Merge branch 'usb-next' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb

* 'usb-next' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb: (232 commits)
  USB: Add USB-ID for Multiplex RC serial adapter to cp210x.c
  xhci: Clean up 32-bit build warnings.
  USB: update documentation for usbmon
  usb: usb-storage doesn't support dynamic id currently, the patch disables the feature to fix an oops
  drivers/usb/class/cdc-acm.c: clear dangling pointer
  drivers/usb/dwc3/dwc3-pci.c: introduce missing kfree
  drivers/usb/host/isp1760-if.c: introduce missing kfree
  usb: option: add ZD Incorporated HSPA modem
  usb: ch9: fix up MaxStreams helper
  USB: usb-skeleton.c: cleanup open_count
  USB: usb-skeleton.c: fix open/disconnect race
  xhci: Properly handle COMP_2ND_BW_ERR
  USB: remove dead code from suspend/resume path
  USB: add quirk for another camera
  drivers: usb: wusbcore: Fix dependency for USB_WUSB
  xhci: Better debugging for critical host errors.
  xhci: Be less verbose during URB cancellation.
  xhci: Remove debugging about ring structure allocation.
  xhci: Remove debugging about toggling cycle bits.
  xhci: Remove debugging for individual transfers.
  ...
diff --git a/Documentation/ABI/testing/sysfs-bus-rbd b/Documentation/ABI/testing/sysfs-bus-rbd
index fa72ccb..dbedafb 100644
--- a/Documentation/ABI/testing/sysfs-bus-rbd
+++ b/Documentation/ABI/testing/sysfs-bus-rbd
@@ -57,13 +57,6 @@
 
 	 $ echo <snap-name> > /sys/bus/rbd/devices/<dev-id>/snap_create
 
-rollback_snap
-
-	Rolls back data to the specified snapshot. This goes over the entire
-	list of rados blocks and sends a rollback command to each.
-
-	 $ echo <snap-name> > /sys/bus/rbd/devices/<dev-id>/snap_rollback
-
 snap_*
 
 	A directory per each snapshot
diff --git a/Documentation/ABI/testing/sysfs-driver-hid-logitech-lg4ff b/Documentation/ABI/testing/sysfs-driver-hid-logitech-lg4ff
index 9aec8ef..167d903 100644
--- a/Documentation/ABI/testing/sysfs-driver-hid-logitech-lg4ff
+++ b/Documentation/ABI/testing/sysfs-driver-hid-logitech-lg4ff
@@ -1,7 +1,7 @@
 What:		/sys/module/hid_logitech/drivers/hid:logitech/<dev>/range.
 Date:		July 2011
 KernelVersion:	3.2
-Contact:	Michal Malý <madcatxster@gmail.com>
+Contact:	Michal Malý <madcatxster@gmail.com>
 Description:	Display minimum, maximum and current range of the steering
 		wheel. Writing a value within min and max boundaries sets the
 		range of the wheel.
diff --git a/Documentation/DocBook/debugobjects.tmpl b/Documentation/DocBook/debugobjects.tmpl
index 08ff908..24979f6 100644
--- a/Documentation/DocBook/debugobjects.tmpl
+++ b/Documentation/DocBook/debugobjects.tmpl
@@ -96,6 +96,7 @@
 	<listitem><para>debug_object_deactivate</para></listitem>
 	<listitem><para>debug_object_destroy</para></listitem>
 	<listitem><para>debug_object_free</para></listitem>
+	<listitem><para>debug_object_assert_init</para></listitem>
       </itemizedlist>
       Each of these functions takes the address of the real object and
       a pointer to the object type specific debug description
@@ -273,6 +274,26 @@
 	debug checks.
       </para>
     </sect1>
+
+    <sect1 id="debug_object_assert_init">
+      <title>debug_object_assert_init</title>
+      <para>
+	This function is called to assert that an object has been
+	initialized.
+      </para>
+      <para>
+	When the real object is not tracked by debugobjects, it calls
+	fixup_assert_init of the object type description structure
+	provided by the caller, with the hardcoded object state
+	ODEBUG_NOT_AVAILABLE. The fixup function can correct the problem
+	by calling debug_object_init and other specific initializing
+	functions.
+      </para>
+      <para>
+	When the real object is already tracked by debugobjects it is
+	ignored.
+      </para>
+    </sect1>
   </chapter>
   <chapter id="fixupfunctions">
     <title>Fixup functions</title>
@@ -381,6 +402,35 @@
 	statistics.
       </para>
     </sect1>
+    <sect1 id="fixup_assert_init">
+      <title>fixup_assert_init</title>
+      <para>
+	This function is called from the debug code whenever a problem
+	in debug_object_assert_init is detected.
+      </para>
+      <para>
+	Called from debug_object_assert_init() with a hardcoded state
+	ODEBUG_STATE_NOTAVAILABLE when the object is not found in the
+	debug bucket.
+      </para>
+      <para>
+	The function returns 1 when the fixup was successful,
+	otherwise 0. The return value is used to update the
+	statistics.
+      </para>
+      <para>
+	Note, this function should make sure debug_object_init() is
+	called before returning.
+      </para>
+      <para>
+	The handling of statically initialized objects is a special
+	case. The fixup function should check if this is a legitimate
+	case of a statically initialized object or not. In this case only
+	debug_object_init() should be called to make the object known to
+	the tracker. Then the function should return 0 because this is not
+	a real fixup.
+      </para>
+    </sect1>
   </chapter>
   <chapter id="bugs">
     <title>Known Bugs And Assumptions</title>
diff --git a/Documentation/HOWTO b/Documentation/HOWTO
index 81bc1a9..f7ade3b 100644
--- a/Documentation/HOWTO
+++ b/Documentation/HOWTO
@@ -275,8 +275,8 @@
 If no 2.6.x.y kernel is available, then the highest numbered 2.6.x
 kernel is the current stable kernel.
 
-2.6.x.y are maintained by the "stable" team <stable@kernel.org>, and are
-released as needs dictate.  The normal release period is approximately 
+2.6.x.y are maintained by the "stable" team <stable@vger.kernel.org>, and
+are released as needs dictate.  The normal release period is approximately
 two weeks, but it can be longer if there are no pressing problems.  A
 security-related problem, instead, can cause a release to happen almost
 instantly.
diff --git a/Documentation/RCU/checklist.txt b/Documentation/RCU/checklist.txt
index 0c134f8..bff2d8b 100644
--- a/Documentation/RCU/checklist.txt
+++ b/Documentation/RCU/checklist.txt
@@ -328,6 +328,12 @@
 	RCU rather than SRCU, because RCU is almost always faster and
 	easier to use than is SRCU.
 
+	If you need to enter your read-side critical section in a
+	hardirq or exception handler, and then exit that same read-side
+	critical section in the task that was interrupted, then you need
+	to srcu_read_lock_raw() and srcu_read_unlock_raw(), which avoid
+	the lockdep checking that would otherwise this practice illegal.
+
 	Also unlike other forms of RCU, explicit initialization
 	and cleanup is required via init_srcu_struct() and
 	cleanup_srcu_struct().	These are passed a "struct srcu_struct"
diff --git a/Documentation/RCU/rcu.txt b/Documentation/RCU/rcu.txt
index 3185270..bf77833 100644
--- a/Documentation/RCU/rcu.txt
+++ b/Documentation/RCU/rcu.txt
@@ -38,11 +38,11 @@
 
 	Preemptible variants of RCU (CONFIG_TREE_PREEMPT_RCU) get the
 	same effect, but require that the readers manipulate CPU-local
-	counters.  These counters allow limited types of blocking
-	within RCU read-side critical sections.  SRCU also uses
-	CPU-local counters, and permits general blocking within
-	RCU read-side critical sections.  These two variants of
-	RCU detect grace periods by sampling these counters.
+	counters.  These counters allow limited types of blocking within
+	RCU read-side critical sections.  SRCU also uses CPU-local
+	counters, and permits general blocking within RCU read-side
+	critical sections.  These variants of RCU detect grace periods
+	by sampling these counters.
 
 o	If I am running on a uniprocessor kernel, which can only do one
 	thing at a time, why should I wait for a grace period?
diff --git a/Documentation/RCU/stallwarn.txt b/Documentation/RCU/stallwarn.txt
index 4e95920..083d88c 100644
--- a/Documentation/RCU/stallwarn.txt
+++ b/Documentation/RCU/stallwarn.txt
@@ -101,6 +101,11 @@
 	CONFIG_TREE_PREEMPT_RCU case, you might see stall-warning
 	messages.
 
+o	A hardware or software issue shuts off the scheduler-clock
+	interrupt on a CPU that is not in dyntick-idle mode.  This
+	problem really has happened, and seems to be most likely to
+	result in RCU CPU stall warnings for CONFIG_NO_HZ=n kernels.
+
 o	A bug in the RCU implementation.
 
 o	A hardware failure.  This is quite unlikely, but has occurred
@@ -109,12 +114,11 @@
 	This resulted in a series of RCU CPU stall warnings, eventually
 	leading the realization that the CPU had failed.
 
-The RCU, RCU-sched, and RCU-bh implementations have CPU stall
-warning.  SRCU does not have its own CPU stall warnings, but its
-calls to synchronize_sched() will result in RCU-sched detecting
-RCU-sched-related CPU stalls.  Please note that RCU only detects
-CPU stalls when there is a grace period in progress.  No grace period,
-no CPU stall warnings.
+The RCU, RCU-sched, and RCU-bh implementations have CPU stall warning.
+SRCU does not have its own CPU stall warnings, but its calls to
+synchronize_sched() will result in RCU-sched detecting RCU-sched-related
+CPU stalls.  Please note that RCU only detects CPU stalls when there is
+a grace period in progress.  No grace period, no CPU stall warnings.
 
 To diagnose the cause of the stall, inspect the stack traces.
 The offending function will usually be near the top of the stack.
diff --git a/Documentation/RCU/torture.txt b/Documentation/RCU/torture.txt
index 783d6c1..d67068d 100644
--- a/Documentation/RCU/torture.txt
+++ b/Documentation/RCU/torture.txt
@@ -61,11 +61,24 @@
 		To properly exercise RCU implementations with preemptible
 		read-side critical sections.
 
+onoff_interval
+		The number of seconds between each attempt to execute a
+		randomly selected CPU-hotplug operation.  Defaults to
+		zero, which disables CPU hotplugging.  In HOTPLUG_CPU=n
+		kernels, rcutorture will silently refuse to do any
+		CPU-hotplug operations regardless of what value is
+		specified for onoff_interval.
+
 shuffle_interval
 		The number of seconds to keep the test threads affinitied
 		to a particular subset of the CPUs, defaults to 3 seconds.
 		Used in conjunction with test_no_idle_hz.
 
+shutdown_secs	The number of seconds to run the test before terminating
+		the test and powering off the system.  The default is
+		zero, which disables test termination and system shutdown.
+		This capability is useful for automated testing.
+
 stat_interval	The number of seconds between output of torture
 		statistics (via printk()).  Regardless of the interval,
 		statistics are printed when the module is unloaded.
diff --git a/Documentation/RCU/trace.txt b/Documentation/RCU/trace.txt
index aaf65f6..49587ab 100644
--- a/Documentation/RCU/trace.txt
+++ b/Documentation/RCU/trace.txt
@@ -105,14 +105,10 @@
 	or one greater than the interrupt-nesting depth otherwise.
 	The number after the second "/" is the NMI nesting depth.
 
-	This field is displayed only for CONFIG_NO_HZ kernels.
-
 o	"df" is the number of times that some other CPU has forced a
 	quiescent state on behalf of this CPU due to this CPU being in
 	dynticks-idle state.
 
-	This field is displayed only for CONFIG_NO_HZ kernels.
-
 o	"of" is the number of times that some other CPU has forced a
 	quiescent state on behalf of this CPU due to this CPU being
 	offline.  In a perfect world, this might never happen, but it
diff --git a/Documentation/RCU/whatisRCU.txt b/Documentation/RCU/whatisRCU.txt
index 6ef6926..6bbe8dc 100644
--- a/Documentation/RCU/whatisRCU.txt
+++ b/Documentation/RCU/whatisRCU.txt
@@ -4,6 +4,7 @@
 1.	What is RCU, Fundamentally?  http://lwn.net/Articles/262464/
 2.	What is RCU? Part 2: Usage   http://lwn.net/Articles/263130/
 3.	RCU part 3: the RCU API      http://lwn.net/Articles/264090/
+4.	The RCU API, 2010 Edition    http://lwn.net/Articles/418853/
 
 
 What is RCU?
@@ -834,6 +835,8 @@
 
 	srcu_read_lock		synchronize_srcu	N/A
 	srcu_read_unlock	synchronize_srcu_expedited
+	srcu_read_lock_raw
+	srcu_read_unlock_raw
 	srcu_dereference
 
 SRCU:	Initialization/cleanup
@@ -855,27 +858,33 @@
 
 a.	Will readers need to block?  If so, you need SRCU.
 
-b.	What about the -rt patchset?  If readers would need to block
+b.	Is it necessary to start a read-side critical section in a
+	hardirq handler or exception handler, and then to complete
+	this read-side critical section in the task that was
+	interrupted?  If so, you need SRCU's srcu_read_lock_raw() and
+	srcu_read_unlock_raw() primitives.
+
+c.	What about the -rt patchset?  If readers would need to block
 	in an non-rt kernel, you need SRCU.  If readers would block
 	in a -rt kernel, but not in a non-rt kernel, SRCU is not
 	necessary.
 
-c.	Do you need to treat NMI handlers, hardirq handlers,
+d.	Do you need to treat NMI handlers, hardirq handlers,
 	and code segments with preemption disabled (whether
 	via preempt_disable(), local_irq_save(), local_bh_disable(),
 	or some other mechanism) as if they were explicit RCU readers?
 	If so, you need RCU-sched.
 
-d.	Do you need RCU grace periods to complete even in the face
+e.	Do you need RCU grace periods to complete even in the face
 	of softirq monopolization of one or more of the CPUs?  For
 	example, is your code subject to network-based denial-of-service
 	attacks?  If so, you need RCU-bh.
 
-e.	Is your workload too update-intensive for normal use of
+f.	Is your workload too update-intensive for normal use of
 	RCU, but inappropriate for other synchronization mechanisms?
 	If so, consider SLAB_DESTROY_BY_RCU.  But please be careful!
 
-f.	Otherwise, use RCU.
+g.	Otherwise, use RCU.
 
 Of course, this all assumes that you have determined that RCU is in fact
 the right tool for your job.
diff --git a/Documentation/arm/memory.txt b/Documentation/arm/memory.txt
index 771d48d..208a2d4 100644
--- a/Documentation/arm/memory.txt
+++ b/Documentation/arm/memory.txt
@@ -51,15 +51,14 @@
 ff000000	ffbfffff	Reserved for future expansion of DMA
 				mapping region.
 
-VMALLOC_END	feffffff	Free for platform use, recommended.
-				VMALLOC_END must be aligned to a 2MB
-				boundary.
-
 VMALLOC_START	VMALLOC_END-1	vmalloc() / ioremap() space.
 				Memory returned by vmalloc/ioremap will
 				be dynamically placed in this region.
-				VMALLOC_START may be based upon the value
-				of the high_memory variable.
+				Machine specific static mappings are also
+				located here through iotable_init().
+				VMALLOC_START is based upon the value
+				of the high_memory variable, and VMALLOC_END
+				is equal to 0xff000000.
 
 PAGE_OFFSET	high_memory-1	Kernel direct-mapped RAM region.
 				This maps the platforms RAM, and typically
diff --git a/Documentation/atomic_ops.txt b/Documentation/atomic_ops.txt
index 3bd585b..27f2b21 100644
--- a/Documentation/atomic_ops.txt
+++ b/Documentation/atomic_ops.txt
@@ -84,6 +84,93 @@
 
 *** YOU HAVE BEEN WARNED! ***
 
+Properly aligned pointers, longs, ints, and chars (and unsigned
+equivalents) may be atomically loaded from and stored to in the same
+sense as described for atomic_read() and atomic_set().  The ACCESS_ONCE()
+macro should be used to prevent the compiler from using optimizations
+that might otherwise optimize accesses out of existence on the one hand,
+or that might create unsolicited accesses on the other.
+
+For example consider the following code:
+
+	while (a > 0)
+		do_something();
+
+If the compiler can prove that do_something() does not store to the
+variable a, then the compiler is within its rights transforming this to
+the following:
+
+	tmp = a;
+	if (a > 0)
+		for (;;)
+			do_something();
+
+If you don't want the compiler to do this (and you probably don't), then
+you should use something like the following:
+
+	while (ACCESS_ONCE(a) < 0)
+		do_something();
+
+Alternatively, you could place a barrier() call in the loop.
+
+For another example, consider the following code:
+
+	tmp_a = a;
+	do_something_with(tmp_a);
+	do_something_else_with(tmp_a);
+
+If the compiler can prove that do_something_with() does not store to the
+variable a, then the compiler is within its rights to manufacture an
+additional load as follows:
+
+	tmp_a = a;
+	do_something_with(tmp_a);
+	tmp_a = a;
+	do_something_else_with(tmp_a);
+
+This could fatally confuse your code if it expected the same value
+to be passed to do_something_with() and do_something_else_with().
+
+The compiler would be likely to manufacture this additional load if
+do_something_with() was an inline function that made very heavy use
+of registers: reloading from variable a could save a flush to the
+stack and later reload.  To prevent the compiler from attacking your
+code in this manner, write the following:
+
+	tmp_a = ACCESS_ONCE(a);
+	do_something_with(tmp_a);
+	do_something_else_with(tmp_a);
+
+For a final example, consider the following code, assuming that the
+variable a is set at boot time before the second CPU is brought online
+and never changed later, so that memory barriers are not needed:
+
+	if (a)
+		b = 9;
+	else
+		b = 42;
+
+The compiler is within its rights to manufacture an additional store
+by transforming the above code into the following:
+
+	b = 42;
+	if (a)
+		b = 9;
+
+This could come as a fatal surprise to other code running concurrently
+that expected b to never have the value 42 if a was zero.  To prevent
+the compiler from doing this, write something like:
+
+	if (a)
+		ACCESS_ONCE(b) = 9;
+	else
+		ACCESS_ONCE(b) = 42;
+
+Don't even -think- about doing this without proper use of memory barriers,
+locks, or atomic operations if variable a can change at runtime!
+
+*** WARNING: ACCESS_ONCE() DOES NOT IMPLY A BARRIER! ***
+
 Now, we move onto the atomic operation interfaces typically implemented with
 the help of assembly code.
 
diff --git a/Documentation/cgroups/memory.txt b/Documentation/cgroups/memory.txt
index cc0ebc5..4d8774f 100644
--- a/Documentation/cgroups/memory.txt
+++ b/Documentation/cgroups/memory.txt
@@ -44,8 +44,8 @@
  - oom-killer disable knob and oom-notifier
  - Root cgroup has no limit controls.
 
- Kernel memory and Hugepages are not under control yet. We just manage
- pages on LRU. To add more controls, we have to take care of performance.
+ Kernel memory support is work in progress, and the current version provides
+ basically functionality. (See Section 2.7)
 
 Brief summary of control files.
 
@@ -72,6 +72,9 @@
  memory.oom_control		 # set/show oom controls.
  memory.numa_stat		 # show the number of memory usage per numa node
 
+ memory.kmem.tcp.limit_in_bytes  # set/show hard limit for tcp buf memory
+ memory.kmem.tcp.usage_in_bytes  # show current tcp buf memory allocation
+
 1. History
 
 The memory controller has a long history. A request for comments for the memory
@@ -255,6 +258,27 @@
   per-zone-per-cgroup LRU (cgroup's private LRU) is just guarded by
   zone->lru_lock, it has no lock of its own.
 
+2.7 Kernel Memory Extension (CONFIG_CGROUP_MEM_RES_CTLR_KMEM)
+
+With the Kernel memory extension, the Memory Controller is able to limit
+the amount of kernel memory used by the system. Kernel memory is fundamentally
+different than user memory, since it can't be swapped out, which makes it
+possible to DoS the system by consuming too much of this precious resource.
+
+Kernel memory limits are not imposed for the root cgroup. Usage for the root
+cgroup may or may not be accounted.
+
+Currently no soft limit is implemented for kernel memory. It is future work
+to trigger slab reclaim when those limits are reached.
+
+2.7.1 Current Kernel Memory resources accounted
+
+* sockets memory pressure: some sockets protocols have memory pressure
+thresholds. The Memory Controller allows them to be controlled individually
+per cgroup, instead of globally.
+
+* tcp memory pressure: sockets memory pressure for the tcp protocol.
+
 3. User Interface
 
 0. Configuration
diff --git a/Documentation/cgroups/net_prio.txt b/Documentation/cgroups/net_prio.txt
new file mode 100644
index 0000000..01b3226
--- /dev/null
+++ b/Documentation/cgroups/net_prio.txt
@@ -0,0 +1,53 @@
+Network priority cgroup
+-------------------------
+
+The Network priority cgroup provides an interface to allow an administrator to
+dynamically set the priority of network traffic generated by various
+applications
+
+Nominally, an application would set the priority of its traffic via the
+SO_PRIORITY socket option.  This however, is not always possible because:
+
+1) The application may not have been coded to set this value
+2) The priority of application traffic is often a site-specific administrative
+   decision rather than an application defined one.
+
+This cgroup allows an administrator to assign a process to a group which defines
+the priority of egress traffic on a given interface. Network priority groups can
+be created by first mounting the cgroup filesystem.
+
+# mount -t cgroup -onet_prio none /sys/fs/cgroup/net_prio
+
+With the above step, the initial group acting as the parent accounting group
+becomes visible at '/sys/fs/cgroup/net_prio'.  This group includes all tasks in
+the system. '/sys/fs/cgroup/net_prio/tasks' lists the tasks in this cgroup.
+
+Each net_prio cgroup contains two files that are subsystem specific
+
+net_prio.prioidx
+This file is read-only, and is simply informative.  It contains a unique integer
+value that the kernel uses as an internal representation of this cgroup.
+
+net_prio.ifpriomap
+This file contains a map of the priorities assigned to traffic originating from
+processes in this group and egressing the system on various interfaces. It
+contains a list of tuples in the form <ifname priority>.  Contents of this file
+can be modified by echoing a string into the file using the same tuple format.
+for example:
+
+echo "eth0 5" > /sys/fs/cgroups/net_prio/iscsi/net_prio.ifpriomap
+
+This command would force any traffic originating from processes belonging to the
+iscsi net_prio cgroup and egressing on interface eth0 to have the priority of
+said traffic set to the value 5. The parent accounting group also has a
+writeable 'net_prio.ifpriomap' file that can be used to set a system default
+priority.
+
+Priorities are set immediately prior to queueing a frame to the device
+queueing discipline (qdisc) so priorities will be assigned prior to the hardware
+queue selection being made.
+
+One usage for the net_prio cgroup is with mqprio qdisc allowing application
+traffic to be steered to hardware/driver based traffic classes. These mappings
+can then be managed by administrators or other networking protocols such as
+DCBX.
diff --git a/Documentation/cpu-freq/governors.txt b/Documentation/cpu-freq/governors.txt
index d221781..c7a2eb8 100644
--- a/Documentation/cpu-freq/governors.txt
+++ b/Documentation/cpu-freq/governors.txt
@@ -127,7 +127,7 @@
 echo `$(($(cat cpuinfo_transition_latency) * 750 / 1000)) \
     >ondemand/sampling_rate
 
-show_sampling_rate_min:
+sampling_rate_min:
 The sampling rate is limited by the HW transition latency:
 transition_latency * 100
 Or by kernel restrictions:
@@ -140,8 +140,6 @@
 The highest value of kernel and HW latency restrictions is shown and
 used as the minimum sampling rate.
 
-show_sampling_rate_max: THIS INTERFACE IS DEPRECATED, DON'T USE IT.
-
 up_threshold: defines what the average CPU usage between the samplings
 of 'sampling_rate' needs to be for the kernel to make a decision on
 whether it should increase the frequency.  For example when it is set
diff --git a/Documentation/development-process/5.Posting b/Documentation/development-process/5.Posting
index 903a254..8a48c9b 100644
--- a/Documentation/development-process/5.Posting
+++ b/Documentation/development-process/5.Posting
@@ -271,10 +271,10 @@
    the linux-kernel list.
 
  - If you are fixing a bug, think about whether the fix should go into the
-   next stable update.  If so, stable@kernel.org should get a copy of the
-   patch.  Also add a "Cc: stable@kernel.org" to the tags within the patch
-   itself; that will cause the stable team to get a notification when your
-   fix goes into the mainline.
+   next stable update.  If so, stable@vger.kernel.org should get a copy of
+   the patch.  Also add a "Cc: stable@vger.kernel.org" to the tags within
+   the patch itself; that will cause the stable team to get a notification
+   when your fix goes into the mainline.
 
 When selecting recipients for a patch, it is good to have an idea of who
 you think will eventually accept the patch and get it merged.  While it
diff --git a/Documentation/devices.txt b/Documentation/devices.txt
index eccffe7..cec8864 100644
--- a/Documentation/devices.txt
+++ b/Documentation/devices.txt
@@ -379,7 +379,7 @@
 		162 = /dev/smbus	System Management Bus
 		163 = /dev/lik		Logitech Internet Keyboard
 		164 = /dev/ipmo		Intel Intelligent Platform Management
-		165 = /dev/vmmon	VMWare virtual machine monitor
+		165 = /dev/vmmon	VMware virtual machine monitor
 		166 = /dev/i2o/ctl	I2O configuration manager
 		167 = /dev/specialix_sxctl Specialix serial control
 		168 = /dev/tcldrv	Technology Concepts serial control
diff --git a/Documentation/devicetree/bindings/arm/gic.txt b/Documentation/devicetree/bindings/arm/gic.txt
index 52916b4..9b4b82a 100644
--- a/Documentation/devicetree/bindings/arm/gic.txt
+++ b/Documentation/devicetree/bindings/arm/gic.txt
@@ -42,6 +42,10 @@
 - interrupts	: Interrupt source of the parent interrupt controller. Only
   present on secondary GICs.
 
+- cpu-offset	: per-cpu offset within the distributor and cpu interface
+  regions, used when the GIC doesn't have banked registers. The offset is
+  cpu-offset * cpu-nr.
+
 Example:
 
 	intc: interrupt-controller@fff11000 {
diff --git a/Documentation/devicetree/bindings/arm/vic.txt b/Documentation/devicetree/bindings/arm/vic.txt
new file mode 100644
index 0000000..266716b
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/vic.txt
@@ -0,0 +1,29 @@
+* ARM Vectored Interrupt Controller
+
+One or more Vectored Interrupt Controllers (VIC's) can be connected in an ARM
+system for interrupt routing.  For multiple controllers they can either be
+nested or have the outputs wire-OR'd together.
+
+Required properties:
+
+- compatible : should be one of
+	"arm,pl190-vic"
+	"arm,pl192-vic"
+- interrupt-controller : Identifies the node as an interrupt controller
+- #interrupt-cells : The number of cells to define the interrupts.  Must be 1 as
+  the VIC has no configuration options for interrupt sources.  The cell is a u32
+  and defines the interrupt number.
+- reg : The register bank for the VIC.
+
+Optional properties:
+
+- interrupts : Interrupt source for parent controllers if the VIC is nested.
+
+Example:
+
+	vic0: interrupt-controller@60000 {
+		compatible = "arm,pl192-vic";
+		interrupt-controller;
+		#interrupt-cells = <1>;
+		reg = <0x60000 0x1000>;
+	};
diff --git a/Documentation/devicetree/bindings/i2c/i2c-designware.txt b/Documentation/devicetree/bindings/i2c/i2c-designware.txt
new file mode 100644
index 0000000..e42a2ee
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/i2c-designware.txt
@@ -0,0 +1,22 @@
+* Synopsys DesignWare I2C
+
+Required properties :
+
+ - compatible : should be "snps,designware-i2c"
+ - reg : Offset and length of the register set for the device
+ - interrupts : <IRQ> where IRQ is the interrupt number.
+
+Recommended properties :
+
+ - clock-frequency : desired I2C bus clock frequency in Hz.
+
+Example :
+
+	i2c@f0000 {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		compatible = "snps,designware-i2c";
+		reg = <0xf0000 0x1000>;
+		interrupts = <11>;
+		clock-frequency = <400000>;
+	};
diff --git a/Documentation/devicetree/bindings/i2c/trivial-devices.txt b/Documentation/devicetree/bindings/i2c/trivial-devices.txt
new file mode 100644
index 0000000..1a85f98
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/trivial-devices.txt
@@ -0,0 +1,58 @@
+This is a list of trivial i2c devices that have simple device tree
+bindings, consisting only of a compatible field, an address and
+possibly an interrupt line.
+
+If a device needs more specific bindings, such as properties to
+describe some aspect of it, there needs to be a specific binding
+document for it just like any other devices.
+
+
+Compatible		Vendor / Chip
+==========		=============
+ad,ad7414		SMBus/I2C Digital Temperature Sensor in 6-Pin SOT with SMBus Alert and Over Temperature Pin
+ad,adm9240		ADM9240:  Complete System Hardware Monitor for uProcessor-Based Systems
+adi,adt7461		+/-1C TDM Extended Temp Range I.C
+adt7461			+/-1C TDM Extended Temp Range I.C
+at,24c08		i2c serial eeprom  (24cxx)
+atmel,24c02		i2c serial eeprom  (24cxx)
+catalyst,24c32		i2c serial eeprom
+dallas,ds1307		64 x 8, Serial, I2C Real-Time Clock
+dallas,ds1338		I2C RTC with 56-Byte NV RAM
+dallas,ds1339		I2C Serial Real-Time Clock
+dallas,ds1340		I2C RTC with Trickle Charger
+dallas,ds1374		I2C, 32-Bit Binary Counter Watchdog RTC with Trickle Charger and Reset Input/Output
+dallas,ds1631		High-Precision Digital Thermometer
+dallas,ds1682		Total-Elapsed-Time Recorder with Alarm
+dallas,ds1775		Tiny Digital Thermometer and Thermostat
+dallas,ds3232		Extremely Accurate I²C RTC with Integrated Crystal and SRAM
+dallas,ds4510		CPU Supervisor with Nonvolatile Memory and Programmable I/O
+dallas,ds75		Digital Thermometer and Thermostat
+dialog,da9053		DA9053: flexible system level PMIC with multicore support
+epson,rx8025		High-Stability. I2C-Bus INTERFACE REAL TIME CLOCK MODULE
+epson,rx8581		I2C-BUS INTERFACE REAL TIME CLOCK MODULE
+fsl,mag3110		MAG3110: Xtrinsic High Accuracy, 3D Magnetometer
+fsl,mc13892		MC13892: Power Management Integrated Circuit (PMIC) for i.MX35/51
+fsl,mma8450		MMA8450Q: Xtrinsic Low-power, 3-axis Xtrinsic Accelerometer
+fsl,mpr121		MPR121: Proximity Capacitive Touch Sensor Controller
+fsl,sgtl5000		SGTL5000: Ultra Low-Power Audio Codec
+maxim,ds1050		5 Bit Programmable, Pulse-Width Modulator
+maxim,max1237		Low-Power, 4-/12-Channel, 2-Wire Serial, 12-Bit ADCs
+maxim,max6625		9-Bit/12-Bit Temperature Sensors with I²C-Compatible Serial Interface
+mc,rv3029c2		Real Time Clock Module with I2C-Bus
+national,lm75		I2C TEMP SENSOR
+national,lm80		Serial Interface ACPI-Compatible Microprocessor System Hardware Monitor
+national,lm92		±0.33°C Accurate, 12-Bit + Sign Temperature Sensor and Thermal Window Comparator with Two-Wire Interface
+nxp,pca9556		Octal SMBus and I2C registered interface
+nxp,pca9557		8-bit I2C-bus and SMBus I/O port with reset
+nxp,pcf8563		Real-time clock/calendar
+ovti,ov5642		OV5642: Color CMOS QSXGA (5-megapixel) Image Sensor with OmniBSI and Embedded TrueFocus
+pericom,pt7c4338	Real-time Clock Module
+plx,pex8648		48-Lane, 12-Port PCI Express Gen 2 (5.0 GT/s) Switch
+ramtron,24c64		i2c serial eeprom  (24cxx)
+ricoh,rs5c372a		I2C bus SERIAL INTERFACE REAL-TIME CLOCK IC
+samsung,24ad0xd1	S524AD0XF1 (128K/256K-bit Serial EEPROM for Low Power)
+st-micro,24c256		i2c serial eeprom  (24cxx)
+stm,m41t00		Serial Access TIMEKEEPER
+stm,m41t62		Serial real-time clock (RTC) with alarm
+stm,m41t80		M41T80 - SERIAL ACCESS RTC WITH ALARMS
+ti,tsc2003		I2C Touch-Screen Controller
diff --git a/Documentation/devicetree/bindings/net/calxeda-xgmac.txt b/Documentation/devicetree/bindings/net/calxeda-xgmac.txt
new file mode 100644
index 0000000..411727a
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/calxeda-xgmac.txt
@@ -0,0 +1,15 @@
+* Calxeda Highbank 10Gb XGMAC Ethernet
+
+Required properties:
+- compatible : Should be "calxeda,hb-xgmac"
+- reg : Address and length of the register set for the device
+- interrupts : Should contain 3 xgmac interrupts. The 1st is main interrupt.
+  The 2nd is pwr mgt interrupt. The 3rd is low power state interrupt.
+
+Example:
+
+ethernet@fff50000 {
+        compatible = "calxeda,hb-xgmac";
+        reg = <0xfff50000 0x1000>;
+        interrupts = <0 77 4  0 78 4  0 79 4>;
+};
diff --git a/Documentation/devicetree/bindings/net/can/cc770.txt b/Documentation/devicetree/bindings/net/can/cc770.txt
new file mode 100644
index 0000000..77027bf
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/can/cc770.txt
@@ -0,0 +1,53 @@
+Memory mapped Bosch CC770 and Intel AN82527 CAN controller
+
+Note: The CC770 is a CAN controller from Bosch, which is 100%
+compatible with the old AN82527 from Intel, but with "bugs" being fixed.
+
+Required properties:
+
+- compatible : should be "bosch,cc770" for the CC770 and "intc,82527"
+	for the AN82527.
+
+- reg : should specify the chip select, address offset and size required
+	to map the registers of the controller. The size is usually 0x80.
+
+- interrupts : property with a value describing the interrupt source
+	(number and sensitivity) required for the controller.
+
+Optional properties:
+
+- bosch,external-clock-frequency : frequency of the external oscillator
+	clock in Hz. Note that the internal clock frequency used by the
+	controller is half of that value. If not specified, a default
+	value of 16000000 (16 MHz) is used.
+
+- bosch,clock-out-frequency : slock frequency in Hz on the CLKOUT pin.
+	If not specified or if the specified value is 0, the CLKOUT pin
+	will be disabled.
+
+- bosch,slew-rate : slew rate of the CLKOUT signal. If not specified,
+	a resonable value will be calculated.
+
+- bosch,disconnect-rx0-input : see data sheet.
+
+- bosch,disconnect-rx1-input : see data sheet.
+
+- bosch,disconnect-tx1-output : see data sheet.
+
+- bosch,polarity-dominant : see data sheet.
+
+- bosch,divide-memory-clock : see data sheet.
+
+- bosch,iso-low-speed-mux : see data sheet.
+
+For further information, please have a look to the CC770 or AN82527.
+
+Examples:
+
+can@3,100 {
+	compatible = "bosch,cc770";
+	reg = <3 0x100 0x80>;
+	interrupts = <2 0>;
+	interrupt-parent = <&mpic>;
+	bosch,external-clock-frequency = <16000000>;
+};
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/srio-rmu.txt b/Documentation/devicetree/bindings/powerpc/fsl/srio-rmu.txt
new file mode 100644
index 0000000..b9a8a2b
--- /dev/null
+++ b/Documentation/devicetree/bindings/powerpc/fsl/srio-rmu.txt
@@ -0,0 +1,163 @@
+Message unit node:
+
+For SRIO controllers that implement the message unit as part of the controller
+this node is required.  For devices with RMAN this node should NOT exist.  The
+node is composed of three types of sub-nodes ("fsl-srio-msg-unit",
+"fsl-srio-dbell-unit" and "fsl-srio-port-write-unit").
+
+See srio.txt for more details about generic SRIO controller details.
+
+   - compatible
+	Usage: required
+	Value type: <string>
+	Definition: Must include "fsl,srio-rmu-vX.Y", "fsl,srio-rmu".
+
+	The version X.Y should match the general SRIO controller's IP Block
+	revision register's Major(X) and Minor (Y) value.
+
+   - reg
+	Usage: required
+	Value type: <prop-encoded-array>
+	Definition: A standard property.  Specifies the physical address and
+		length of the SRIO configuration registers for message units
+		and doorbell units.
+
+   - fsl,liodn
+	Usage: optional-but-recommended (for devices with PAMU)
+	Value type: <prop-encoded-array>
+	Definition: The logical I/O device number for the PAMU (IOMMU) to be
+		correctly configured for SRIO accesses.  The property should
+		not exist on devices that do not support PAMU.
+
+		The LIODN value is associated with all RMU transactions
+		(msg-unit, doorbell, port-write).
+
+Sub-Nodes for RMU:  The RMU node is composed of multiple sub-nodes that
+correspond to the actual sub-controllers in the RMU.  The manual for a given
+SoC will detail which and how many of these sub-controllers are implemented.
+
+Message Unit:
+
+   - compatible
+	Usage: required
+	Value type: <string>
+	Definition: Must include "fsl,srio-msg-unit-vX.Y", "fsl,srio-msg-unit".
+
+	The version X.Y should match the general SRIO controller's IP Block
+	revision register's Major(X) and Minor (Y) value.
+
+   - reg
+	Usage: required
+	Value type: <prop-encoded-array>
+	Definition: A standard property.  Specifies the physical address and
+		length of the SRIO configuration registers for message units
+		and doorbell units.
+
+   - interrupts
+	Usage: required
+	Value type: <prop_encoded-array>
+	Definition:  Specifies the interrupts generated by this device.  The
+		value of the interrupts property consists of one interrupt
+		specifier. The format of the specifier is defined by the
+		binding document describing the node's interrupt parent.
+
+		A pair of IRQs are specified in this property.  The first
+		element is associated with the transmit (TX) interrupt and the
+		second element is associated with the receive (RX) interrupt.
+
+Doorbell Unit:
+
+   - compatible
+	Usage: required
+	Value type: <string>
+	Definition: Must include:
+		"fsl,srio-dbell-unit-vX.Y", "fsl,srio-dbell-unit"
+
+	The version X.Y should match the general SRIO controller's IP Block
+	revision register's Major(X) and Minor (Y) value.
+
+   - reg
+	Usage: required
+	Value type: <prop-encoded-array>
+	Definition: A standard property.  Specifies the physical address and
+		length of the SRIO configuration registers for message units
+		and doorbell units.
+
+   - interrupts
+	Usage: required
+	Value type: <prop_encoded-array>
+	Definition:  Specifies the interrupts generated by this device.  The
+		value of the interrupts property consists of one interrupt
+		specifier. The format of the specifier is defined by the
+		binding document describing the node's interrupt parent.
+
+		A pair of IRQs are specified in this property.  The first
+		element is associated with the transmit (TX) interrupt and the
+		second element is associated with the receive (RX) interrupt.
+
+Port-Write Unit:
+
+   - compatible
+	Usage: required
+	Value type: <string>
+	Definition: Must include:
+		 "fsl,srio-port-write-unit-vX.Y", "fsl,srio-port-write-unit"
+
+	The version X.Y should match the general SRIO controller's IP Block
+	revision register's Major(X) and Minor (Y) value.
+
+   - reg
+	Usage: required
+	Value type: <prop-encoded-array>
+	Definition: A standard property.  Specifies the physical address and
+		length of the SRIO configuration registers for message units
+		and doorbell units.
+
+   - interrupts
+	Usage: required
+	Value type: <prop_encoded-array>
+	Definition:  Specifies the interrupts generated by this device.  The
+		value of the interrupts property consists of one interrupt
+		specifier. The format of the specifier is defined by the
+		binding document describing the node's interrupt parent.
+
+		A single IRQ that handles port-write conditions is
+		specified by this property.  (Typically shared with error).
+
+   Note: All other standard properties (see the ePAPR) are allowed
+   but are optional.
+
+Example:
+	rmu: rmu@d3000 {
+		compatible = "fsl,srio-rmu";
+		reg = <0xd3000 0x400>;
+		ranges = <0x0 0xd3000 0x400>;
+		fsl,liodn = <0xc8>;
+
+		message-unit@0 {
+			compatible = "fsl,srio-msg-unit";
+			reg = <0x0 0x100>;
+			interrupts = <
+				60 2 0 0  /* msg1_tx_irq */
+				61 2 0 0>;/* msg1_rx_irq */
+		};
+		message-unit@100 {
+			compatible = "fsl,srio-msg-unit";
+			reg = <0x100 0x100>;
+			interrupts = <
+				62 2 0 0  /* msg2_tx_irq */
+				63 2 0 0>;/* msg2_rx_irq */
+		};
+		doorbell-unit@400 {
+			compatible = "fsl,srio-dbell-unit";
+			reg = <0x400 0x80>;
+			interrupts = <
+				56 2 0 0  /* bell_outb_irq */
+				57 2 0 0>;/* bell_inb_irq */
+		};
+		port-write-unit@4e0 {
+			compatible = "fsl,srio-port-write-unit";
+			reg = <0x4e0 0x20>;
+			interrupts = <16 2 1 11>;
+		};
+	};
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/srio.txt b/Documentation/devicetree/bindings/powerpc/fsl/srio.txt
new file mode 100644
index 0000000..b039bcb
--- /dev/null
+++ b/Documentation/devicetree/bindings/powerpc/fsl/srio.txt
@@ -0,0 +1,103 @@
+* Freescale Serial RapidIO (SRIO) Controller
+
+RapidIO port node:
+Properties:
+   - compatible
+	Usage: required
+	Value type: <string>
+	Definition: Must include "fsl,srio" for IP blocks with IP Block
+	Revision Register (SRIO IPBRR1) Major ID equal to 0x01c0.
+
+	Optionally, a compatiable string of "fsl,srio-vX.Y" where X is Major
+	version in IP Block Revision Register and Y is Minor version.  If this
+	compatiable is provided it should be ordered before "fsl,srio".
+
+   - reg
+	Usage: required
+	Value type: <prop-encoded-array>
+	Definition: A standard property.  Specifies the physical address and
+		length of the SRIO configuration registers.  The size should
+		be set to 0x11000.
+
+   - interrupts
+	Usage: required
+	Value type: <prop_encoded-array>
+	Definition:  Specifies the interrupts generated by this device.  The
+		value of the interrupts property consists of one interrupt
+		specifier. The format of the specifier is defined by the
+		binding document describing the node's interrupt parent.
+
+		A single IRQ that handles error conditions is specified by this
+		property.  (Typically shared with port-write).
+
+   - fsl,srio-rmu-handle:
+	Usage: required if rmu node is defined
+	Value type: <phandle>
+	Definition: A single <phandle> value that points to the RMU.
+	(See srio-rmu.txt for more details on RMU node binding)
+
+Port Child Nodes:  There should a port child node for each port that exists in
+the controller.  The ports are numbered starting at one (1) and should have
+the following properties:
+
+   - cell-index
+	Usage: required
+	Value type: <u32>
+	Definition: A standard property.  Matches the port id.
+
+   - ranges
+	Usage: required if local access windows preset
+	Value type: <prop-encoded-array>
+	Definition: A standard property. Utilized to describe the memory mapped
+		IO space utilized by the controller.  This corresponds to the
+		setting of the local access windows that are targeted to this
+		SRIO port.
+
+   - fsl,liodn
+	Usage: optional-but-recommended (for devices with PAMU)
+	Value type: <prop-encoded-array>
+	Definition: The logical I/O device number for the PAMU (IOMMU) to be
+		correctly configured for SRIO accesses.  The property should
+		not exist on devices that do not support PAMU.
+
+		For HW (ie, the P4080) that only supports a LIODN for both
+		memory and maintenance transactions then a single LIODN is
+		represented in the property for both transactions.
+
+		For HW (ie, the P304x/P5020, etc) that supports an LIODN for
+		memory transactions and a unique LIODN for maintenance
+		transactions then a pair of LIODNs are represented in the
+		property.  Within the pair, the first element represents the
+		LIODN associated with memory transactions and the second element
+		represents the LIODN associated with maintenance transactions
+		for the port.
+
+Note: All other standard properties (see ePAPR) are allowed but are optional.
+
+Example:
+
+	rapidio: rapidio@ffe0c0000 {
+		#address-cells = <2>;
+		#size-cells = <2>;
+		reg = <0xf 0xfe0c0000 0 0x11000>;
+		compatible = "fsl,srio";
+		interrupts = <16 2 1 11>; /* err_irq */
+		fsl,srio-rmu-handle = <&rmu>;
+		ranges;
+
+		port1 {
+			cell-index = <1>;
+			#address-cells = <2>;
+			#size-cells = <2>;
+			fsl,liodn = <34>;
+			ranges = <0 0 0xc 0x20000000 0 0x10000000>;
+		};
+
+		port2 {
+			cell-index = <2>;
+			#address-cells = <2>;
+			#size-cells = <2>;
+			fsl,liodn = <48>;
+			ranges = <0 0 0xc 0x30000000 0 0x10000000>;
+		};
+	};
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 874921e..1862696 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -8,7 +8,9 @@
 apm	Applied Micro Circuits Corporation (APM)
 arm	ARM Ltd.
 atmel	Atmel Corporation
+cavium	Cavium, Inc.
 chrp	Common Hardware Reference Platform
+cortina	Cortina Systems, Inc.
 dallas	Maxim Integrated Products (formerly Dallas Semiconductor)
 denx	Denx Software Engineering
 epson	Seiko Epson Corp.
@@ -36,6 +38,7 @@
 sil	Silicon Image
 simtek
 sirf	SiRF Technology, Inc.
+st	STMicroelectronics
 stericsson	ST-Ericsson
 ti	Texas Instruments
 xlnx	Xilinx
diff --git a/Documentation/dma-buf-sharing.txt b/Documentation/dma-buf-sharing.txt
new file mode 100644
index 0000000..510eab3
--- /dev/null
+++ b/Documentation/dma-buf-sharing.txt
@@ -0,0 +1,224 @@
+                    DMA Buffer Sharing API Guide
+                    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+                            Sumit Semwal
+                <sumit dot semwal at linaro dot org>
+                 <sumit dot semwal at ti dot com>
+
+This document serves as a guide to device-driver writers on what is the dma-buf
+buffer sharing API, how to use it for exporting and using shared buffers.
+
+Any device driver which wishes to be a part of DMA buffer sharing, can do so as
+either the 'exporter' of buffers, or the 'user' of buffers.
+
+Say a driver A wants to use buffers created by driver B, then we call B as the
+exporter, and A as buffer-user.
+
+The exporter
+- implements and manages operations[1] for the buffer
+- allows other users to share the buffer by using dma_buf sharing APIs,
+- manages the details of buffer allocation,
+- decides about the actual backing storage where this allocation happens,
+- takes care of any migration of scatterlist - for all (shared) users of this
+   buffer,
+
+The buffer-user
+- is one of (many) sharing users of the buffer.
+- doesn't need to worry about how the buffer is allocated, or where.
+- needs a mechanism to get access to the scatterlist that makes up this buffer
+   in memory, mapped into its own address space, so it can access the same area
+   of memory.
+
+*IMPORTANT*: [see https://lkml.org/lkml/2011/12/20/211 for more details]
+For this first version, A buffer shared using the dma_buf sharing API:
+- *may* be exported to user space using "mmap" *ONLY* by exporter, outside of
+   this framework.
+- may be used *ONLY* by importers that do not need CPU access to the buffer.
+
+The dma_buf buffer sharing API usage contains the following steps:
+
+1. Exporter announces that it wishes to export a buffer
+2. Userspace gets the file descriptor associated with the exported buffer, and
+   passes it around to potential buffer-users based on use case
+3. Each buffer-user 'connects' itself to the buffer
+4. When needed, buffer-user requests access to the buffer from exporter
+5. When finished with its use, the buffer-user notifies end-of-DMA to exporter
+6. when buffer-user is done using this buffer completely, it 'disconnects'
+   itself from the buffer.
+
+
+1. Exporter's announcement of buffer export
+
+   The buffer exporter announces its wish to export a buffer. In this, it
+   connects its own private buffer data, provides implementation for operations
+   that can be performed on the exported dma_buf, and flags for the file
+   associated with this buffer.
+
+   Interface:
+      struct dma_buf *dma_buf_export(void *priv, struct dma_buf_ops *ops,
+				     size_t size, int flags)
+
+   If this succeeds, dma_buf_export allocates a dma_buf structure, and returns a
+   pointer to the same. It also associates an anonymous file with this buffer,
+   so it can be exported. On failure to allocate the dma_buf object, it returns
+   NULL.
+
+2. Userspace gets a handle to pass around to potential buffer-users
+
+   Userspace entity requests for a file-descriptor (fd) which is a handle to the
+   anonymous file associated with the buffer. It can then share the fd with other
+   drivers and/or processes.
+
+   Interface:
+      int dma_buf_fd(struct dma_buf *dmabuf)
+
+   This API installs an fd for the anonymous file associated with this buffer;
+   returns either 'fd', or error.
+
+3. Each buffer-user 'connects' itself to the buffer
+
+   Each buffer-user now gets a reference to the buffer, using the fd passed to
+   it.
+
+   Interface:
+      struct dma_buf *dma_buf_get(int fd)
+
+   This API will return a reference to the dma_buf, and increment refcount for
+   it.
+
+   After this, the buffer-user needs to attach its device with the buffer, which
+   helps the exporter to know of device buffer constraints.
+
+   Interface:
+      struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
+                                                struct device *dev)
+
+   This API returns reference to an attachment structure, which is then used
+   for scatterlist operations. It will optionally call the 'attach' dma_buf
+   operation, if provided by the exporter.
+
+   The dma-buf sharing framework does the bookkeeping bits related to managing
+   the list of all attachments to a buffer.
+
+Until this stage, the buffer-exporter has the option to choose not to actually
+allocate the backing storage for this buffer, but wait for the first buffer-user
+to request use of buffer for allocation.
+
+
+4. When needed, buffer-user requests access to the buffer
+
+   Whenever a buffer-user wants to use the buffer for any DMA, it asks for
+   access to the buffer using dma_buf_map_attachment API. At least one attach to
+   the buffer must have happened before map_dma_buf can be called.
+
+   Interface:
+      struct sg_table * dma_buf_map_attachment(struct dma_buf_attachment *,
+                                         enum dma_data_direction);
+
+   This is a wrapper to dma_buf->ops->map_dma_buf operation, which hides the
+   "dma_buf->ops->" indirection from the users of this interface.
+
+   In struct dma_buf_ops, map_dma_buf is defined as
+      struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *,
+                                                enum dma_data_direction);
+
+   It is one of the buffer operations that must be implemented by the exporter.
+   It should return the sg_table containing scatterlist for this buffer, mapped
+   into caller's address space.
+
+   If this is being called for the first time, the exporter can now choose to
+   scan through the list of attachments for this buffer, collate the requirements
+   of the attached devices, and choose an appropriate backing storage for the
+   buffer.
+
+   Based on enum dma_data_direction, it might be possible to have multiple users
+   accessing at the same time (for reading, maybe), or any other kind of sharing
+   that the exporter might wish to make available to buffer-users.
+
+   map_dma_buf() operation can return -EINTR if it is interrupted by a signal.
+
+
+5. When finished, the buffer-user notifies end-of-DMA to exporter
+
+   Once the DMA for the current buffer-user is over, it signals 'end-of-DMA' to
+   the exporter using the dma_buf_unmap_attachment API.
+
+   Interface:
+      void dma_buf_unmap_attachment(struct dma_buf_attachment *,
+                                    struct sg_table *);
+
+   This is a wrapper to dma_buf->ops->unmap_dma_buf() operation, which hides the
+   "dma_buf->ops->" indirection from the users of this interface.
+
+   In struct dma_buf_ops, unmap_dma_buf is defined as
+      void (*unmap_dma_buf)(struct dma_buf_attachment *, struct sg_table *);
+
+   unmap_dma_buf signifies the end-of-DMA for the attachment provided. Like
+   map_dma_buf, this API also must be implemented by the exporter.
+
+
+6. when buffer-user is done using this buffer, it 'disconnects' itself from the
+   buffer.
+
+   After the buffer-user has no more interest in using this buffer, it should
+   disconnect itself from the buffer:
+
+   - it first detaches itself from the buffer.
+
+   Interface:
+      void dma_buf_detach(struct dma_buf *dmabuf,
+                          struct dma_buf_attachment *dmabuf_attach);
+
+   This API removes the attachment from the list in dmabuf, and optionally calls
+   dma_buf->ops->detach(), if provided by exporter, for any housekeeping bits.
+
+   - Then, the buffer-user returns the buffer reference to exporter.
+
+   Interface:
+     void dma_buf_put(struct dma_buf *dmabuf);
+
+   This API then reduces the refcount for this buffer.
+
+   If, as a result of this call, the refcount becomes 0, the 'release' file
+   operation related to this fd is called. It calls the dmabuf->ops->release()
+   operation in turn, and frees the memory allocated for dmabuf when exported.
+
+NOTES:
+- Importance of attach-detach and {map,unmap}_dma_buf operation pairs
+   The attach-detach calls allow the exporter to figure out backing-storage
+   constraints for the currently-interested devices. This allows preferential
+   allocation, and/or migration of pages across different types of storage
+   available, if possible.
+
+   Bracketing of DMA access with {map,unmap}_dma_buf operations is essential
+   to allow just-in-time backing of storage, and migration mid-way through a
+   use-case.
+
+- Migration of backing storage if needed
+   If after
+   - at least one map_dma_buf has happened,
+   - and the backing storage has been allocated for this buffer,
+   another new buffer-user intends to attach itself to this buffer, it might
+   be allowed, if possible for the exporter.
+
+   In case it is allowed by the exporter:
+    if the new buffer-user has stricter 'backing-storage constraints', and the
+    exporter can handle these constraints, the exporter can just stall on the
+    map_dma_buf until all outstanding access is completed (as signalled by
+    unmap_dma_buf).
+    Once all users have finished accessing and have unmapped this buffer, the
+    exporter could potentially move the buffer to the stricter backing-storage,
+    and then allow further {map,unmap}_dma_buf operations from any buffer-user
+    from the migrated backing-storage.
+
+   If the exporter cannot fulfil the backing-storage constraints of the new
+   buffer-user device as requested, dma_buf_attach() would return an error to
+   denote non-compatibility of the new buffer-sharing request with the current
+   buffer.
+
+   If the exporter chooses not to allow an attach() operation once a
+   map_dma_buf() API has been called, it simply returns an error.
+
+References:
+[1] struct dma_buf_ops in include/linux/dma-buf.h
+[2] All interfaces mentioned above defined in include/linux/dma-buf.h
diff --git a/Documentation/dontdiff b/Documentation/dontdiff
index dfa6fc6..0c083c5 100644
--- a/Documentation/dontdiff
+++ b/Documentation/dontdiff
@@ -66,7 +66,6 @@
 GSYMS
 GTAGS
 Image
-Kerntypes
 Module.markers
 Module.symvers
 PENDING
diff --git a/Documentation/driver-model/devres.txt b/Documentation/driver-model/devres.txt
index d79aead..10c64c8 100644
--- a/Documentation/driver-model/devres.txt
+++ b/Documentation/driver-model/devres.txt
@@ -262,6 +262,7 @@
   devm_ioremap()
   devm_ioremap_nocache()
   devm_iounmap()
+  devm_request_and_ioremap() : checks resource, requests region, ioremaps
   pcim_iomap()
   pcim_iounmap()
   pcim_iomap_table()	: array of mapped addresses indexed by BAR
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index a7e4ac1..284b442 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -85,17 +85,6 @@
 
 ---------------------------
 
-What:	Deprecated snapshot ioctls
-When:	2.6.36
-
-Why:	The ioctls in kernel/power/user.c were marked as deprecated long time
-	ago. Now they notify users about that so that they need to replace
-	their userspace. After some more time, remove them completely.
-
-Who:	Jiri Slaby <jirislaby@gmail.com>
-
----------------------------
-
 What:	The ieee80211_regdom module parameter
 When:	March 2010 / desktop catchup
 
@@ -263,8 +252,7 @@
 
 What:	Code that is now under CONFIG_WIRELESS_EXT_SYSFS
 	(in net/core/net-sysfs.c)
-When:	After the only user (hal) has seen a release with the patches
-	for enough time, probably some time in 2010.
+When:	3.5
 Why:	Over 1K .text/.data size reduction, data is available in other
 	ways (ioctls)
 Who:	Johannes Berg <johannes@sipsolutions.net>
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index d819ba1..4fca82e 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -37,15 +37,15 @@
 
 --------------------------- inode_operations --------------------------- 
 prototypes:
-	int (*create) (struct inode *,struct dentry *,int, struct nameidata *);
+	int (*create) (struct inode *,struct dentry *,umode_t, struct nameidata *);
 	struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameid
 ata *);
 	int (*link) (struct dentry *,struct inode *,struct dentry *);
 	int (*unlink) (struct inode *,struct dentry *);
 	int (*symlink) (struct inode *,struct dentry *,const char *);
-	int (*mkdir) (struct inode *,struct dentry *,int);
+	int (*mkdir) (struct inode *,struct dentry *,umode_t);
 	int (*rmdir) (struct inode *,struct dentry *);
-	int (*mknod) (struct inode *,struct dentry *,int,dev_t);
+	int (*mknod) (struct inode *,struct dentry *,umode_t,dev_t);
 	int (*rename) (struct inode *, struct dentry *,
 			struct inode *, struct dentry *);
 	int (*readlink) (struct dentry *, char __user *,int);
@@ -117,7 +117,7 @@
 	int (*statfs) (struct dentry *, struct kstatfs *);
 	int (*remount_fs) (struct super_block *, int *, char *);
 	void (*umount_begin) (struct super_block *);
-	int (*show_options)(struct seq_file *, struct vfsmount *);
+	int (*show_options)(struct seq_file *, struct dentry *);
 	ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
 	ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
 	int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
diff --git a/Documentation/filesystems/configfs/configfs.txt b/Documentation/filesystems/configfs/configfs.txt
index dd57bb6..b40fec9 100644
--- a/Documentation/filesystems/configfs/configfs.txt
+++ b/Documentation/filesystems/configfs/configfs.txt
@@ -192,7 +192,7 @@
 	struct configfs_attribute {
 		char                    *ca_name;
 		struct module           *ca_owner;
-		mode_t                  ca_mode;
+		umode_t                  ca_mode;
 	};
 
 When a config_item wants an attribute to appear as a file in the item's
diff --git a/Documentation/filesystems/debugfs.txt b/Documentation/filesystems/debugfs.txt
index 742cc06..6872c91 100644
--- a/Documentation/filesystems/debugfs.txt
+++ b/Documentation/filesystems/debugfs.txt
@@ -35,7 +35,7 @@
 
 The most general way to create a file within a debugfs directory is with:
 
-    struct dentry *debugfs_create_file(const char *name, mode_t mode,
+    struct dentry *debugfs_create_file(const char *name, umode_t mode,
 				       struct dentry *parent, void *data,
 				       const struct file_operations *fops);
 
@@ -53,13 +53,13 @@
 for simple situations.  Files containing a single integer value can be
 created with any of:
 
-    struct dentry *debugfs_create_u8(const char *name, mode_t mode,
+    struct dentry *debugfs_create_u8(const char *name, umode_t mode,
 				     struct dentry *parent, u8 *value);
-    struct dentry *debugfs_create_u16(const char *name, mode_t mode,
+    struct dentry *debugfs_create_u16(const char *name, umode_t mode,
 				      struct dentry *parent, u16 *value);
-    struct dentry *debugfs_create_u32(const char *name, mode_t mode,
+    struct dentry *debugfs_create_u32(const char *name, umode_t mode,
 				      struct dentry *parent, u32 *value);
-    struct dentry *debugfs_create_u64(const char *name, mode_t mode,
+    struct dentry *debugfs_create_u64(const char *name, umode_t mode,
 				      struct dentry *parent, u64 *value);
 
 These files support both reading and writing the given value; if a specific
@@ -67,13 +67,13 @@
 values in these files are in decimal; if hexadecimal is more appropriate,
 the following functions can be used instead:
 
-    struct dentry *debugfs_create_x8(const char *name, mode_t mode,
+    struct dentry *debugfs_create_x8(const char *name, umode_t mode,
 				     struct dentry *parent, u8 *value);
-    struct dentry *debugfs_create_x16(const char *name, mode_t mode,
+    struct dentry *debugfs_create_x16(const char *name, umode_t mode,
 				      struct dentry *parent, u16 *value);
-    struct dentry *debugfs_create_x32(const char *name, mode_t mode,
+    struct dentry *debugfs_create_x32(const char *name, umode_t mode,
 				      struct dentry *parent, u32 *value);
-    struct dentry *debugfs_create_x64(const char *name, mode_t mode,
+    struct dentry *debugfs_create_x64(const char *name, umode_t mode,
 				      struct dentry *parent, u64 *value);
 
 These functions are useful as long as the developer knows the size of the
@@ -81,7 +81,7 @@
 architectures, though, complicating the situation somewhat.  There is a
 function meant to help out in one special case:
 
-    struct dentry *debugfs_create_size_t(const char *name, mode_t mode,
+    struct dentry *debugfs_create_size_t(const char *name, umode_t mode,
 				         struct dentry *parent, 
 					 size_t *value);
 
@@ -90,21 +90,22 @@
 
 Boolean values can be placed in debugfs with:
 
-    struct dentry *debugfs_create_bool(const char *name, mode_t mode,
+    struct dentry *debugfs_create_bool(const char *name, umode_t mode,
 				       struct dentry *parent, u32 *value);
 
 A read on the resulting file will yield either Y (for non-zero values) or
 N, followed by a newline.  If written to, it will accept either upper- or
 lower-case values, or 1 or 0.  Any other input will be silently ignored.
 
-Finally, a block of arbitrary binary data can be exported with:
+Another option is exporting a block of arbitrary binary data, with
+this structure and function:
 
     struct debugfs_blob_wrapper {
 	void *data;
 	unsigned long size;
     };
 
-    struct dentry *debugfs_create_blob(const char *name, mode_t mode,
+    struct dentry *debugfs_create_blob(const char *name, umode_t mode,
 				       struct dentry *parent,
 				       struct debugfs_blob_wrapper *blob);
 
@@ -115,6 +116,35 @@
 any code which does so in the mainline.  Note that all files created with
 debugfs_create_blob() are read-only.
 
+If you want to dump a block of registers (something that happens quite
+often during development, even if little such code reaches mainline.
+Debugfs offers two functions: one to make a registers-only file, and
+another to insert a register block in the middle of another sequential
+file.
+
+    struct debugfs_reg32 {
+	char *name;
+	unsigned long offset;
+    };
+
+    struct debugfs_regset32 {
+	struct debugfs_reg32 *regs;
+	int nregs;
+	void __iomem *base;
+    };
+
+    struct dentry *debugfs_create_regset32(const char *name, mode_t mode,
+				     struct dentry *parent,
+				     struct debugfs_regset32 *regset);
+
+    int debugfs_print_regs32(struct seq_file *s, struct debugfs_reg32 *regs,
+			 int nregs, void __iomem *base, char *prefix);
+
+The "base" argument may be 0, but you may want to build the reg32 array
+using __stringify, and a number of register names (macros) are actually
+byte offsets over a base for the register block.
+
+
 There are a couple of other directory-oriented helper functions:
 
     struct dentry *debugfs_rename(struct dentry *old_dir, 
diff --git a/Documentation/filesystems/sysfs.txt b/Documentation/filesystems/sysfs.txt
index 07235ca..a6619b7 100644
--- a/Documentation/filesystems/sysfs.txt
+++ b/Documentation/filesystems/sysfs.txt
@@ -70,7 +70,7 @@
 struct attribute {
         char                    * name;
         struct module		*owner;
-        mode_t                  mode;
+        umode_t                 mode;
 };
 
 
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index 43cbd08..3d9393b 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -225,7 +225,7 @@
         void (*clear_inode) (struct inode *);
         void (*umount_begin) (struct super_block *);
 
-        int (*show_options)(struct seq_file *, struct vfsmount *);
+        int (*show_options)(struct seq_file *, struct dentry *);
 
         ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
         ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
@@ -341,14 +341,14 @@
 filesystem. As of kernel 2.6.22, the following members are defined:
 
 struct inode_operations {
-	int (*create) (struct inode *,struct dentry *,int, struct nameidata *);
+	int (*create) (struct inode *,struct dentry *, umode_t, struct nameidata *);
 	struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
 	int (*link) (struct dentry *,struct inode *,struct dentry *);
 	int (*unlink) (struct inode *,struct dentry *);
 	int (*symlink) (struct inode *,struct dentry *,const char *);
-	int (*mkdir) (struct inode *,struct dentry *,int);
+	int (*mkdir) (struct inode *,struct dentry *,umode_t);
 	int (*rmdir) (struct inode *,struct dentry *);
-	int (*mknod) (struct inode *,struct dentry *,int,dev_t);
+	int (*mknod) (struct inode *,struct dentry *,umode_t,dev_t);
 	int (*rename) (struct inode *, struct dentry *,
 			struct inode *, struct dentry *);
 	int (*readlink) (struct dentry *, char __user *,int);
diff --git a/Documentation/hwmon/pmbus b/Documentation/hwmon/pmbus
index 15ac911..d28b591 100644
--- a/Documentation/hwmon/pmbus
+++ b/Documentation/hwmon/pmbus
@@ -2,9 +2,8 @@
 ====================
 
 Supported chips:
-  * Ericsson BMR45X series
-    DC/DC Converter
-    Prefixes: 'bmr450', 'bmr451', 'bmr453', 'bmr454'
+  * Ericsson BMR453, BMR454
+    Prefixes: 'bmr453', 'bmr454'
     Addresses scanned: -
     Datasheet:
  http://archive.ericsson.net/service/internet/picov/get?DocNo=28701-EN/LZT146395
diff --git a/Documentation/hwmon/zl6100 b/Documentation/hwmon/zl6100
index 7617798..51f76a1 100644
--- a/Documentation/hwmon/zl6100
+++ b/Documentation/hwmon/zl6100
@@ -6,6 +6,10 @@
     Prefix: 'zl2004'
     Addresses scanned: -
     Datasheet: http://www.intersil.com/data/fn/fn6847.pdf
+  * Intersil / Zilker Labs ZL2005
+    Prefix: 'zl2005'
+    Addresses scanned: -
+    Datasheet: http://www.intersil.com/data/fn/fn6848.pdf
   * Intersil / Zilker Labs ZL2006
     Prefix: 'zl2006'
     Addresses scanned: -
@@ -30,6 +34,17 @@
     Prefix: 'zl6105'
     Addresses scanned: -
     Datasheet: http://www.intersil.com/data/fn/fn6906.pdf
+  * Ericsson BMR450, BMR451
+    Prefix: 'bmr450', 'bmr451'
+    Addresses scanned: -
+    Datasheet:
+http://archive.ericsson.net/service/internet/picov/get?DocNo=28701-EN/LZT146401
+  * Ericsson BMR462, BMR463, BMR464
+    Prefixes: 'bmr462', 'bmr463', 'bmr464'
+    Addresses scanned: -
+    Datasheet:
+http://archive.ericsson.net/service/internet/picov/get?DocNo=28701-EN/LZT146256
+
 
 Author: Guenter Roeck <guenter.roeck@ericsson.com>
 
diff --git a/Documentation/kdump/kdump.txt b/Documentation/kdump/kdump.txt
index 7a9e0b4..506c739 100644
--- a/Documentation/kdump/kdump.txt
+++ b/Documentation/kdump/kdump.txt
@@ -17,8 +17,8 @@
 memory image to a dump file on the local disk, or across the network to
 a remote system.
 
-Kdump and kexec are currently supported on the x86, x86_64, ppc64 and ia64
-architectures.
+Kdump and kexec are currently supported on the x86, x86_64, ppc64, ia64,
+and s390x architectures.
 
 When the system kernel boots, it reserves a small section of memory for
 the dump-capture kernel. This ensures that ongoing Direct Memory Access
@@ -34,11 +34,18 @@
 booting regardless of where the kernel is loaded and to support 64K page
 size kexec backs up the first 64KB memory.
 
+For s390x, when kdump is triggered, the crashkernel region is exchanged
+with the region [0, crashkernel region size] and then the kdump kernel
+runs in [0, crashkernel region size]. Therefore no relocatable kernel is
+needed for s390x.
+
 All of the necessary information about the system kernel's core image is
 encoded in the ELF format, and stored in a reserved area of memory
 before a crash. The physical address of the start of the ELF header is
 passed to the dump-capture kernel through the elfcorehdr= boot
-parameter.
+parameter. Optionally the size of the ELF header can also be passed
+when using the elfcorehdr=[size[KMG]@]offset[KMG] syntax.
+
 
 With the dump-capture kernel, you can access the memory image, or "old
 memory," in two ways:
@@ -291,6 +298,10 @@
    The region may be automatically placed on ia64, see the
    dump-capture kernel config option notes above.
 
+   On s390x, typically use "crashkernel=xxM". The value of xx is dependent
+   on the memory consumption of the kdump system. In general this is not
+   dependent on the memory size of the production system.
+
 Load the Dump-capture Kernel
 ============================
 
@@ -308,6 +319,8 @@
 	- Use vmlinux
 For ia64:
 	- Use vmlinux or vmlinuz.gz
+For s390x:
+	- Use image or bzImage
 
 
 If you are using a uncompressed vmlinux image then use following command
@@ -337,6 +350,8 @@
 For ppc64:
 	"1 maxcpus=1 noirqdistrib reset_devices"
 
+For s390x:
+	"1 maxcpus=1 cgroup_disable=memory"
 
 Notes on loading the dump-capture kernel:
 
@@ -362,6 +377,20 @@
   dump. Hence generally it is useful either to build a UP dump-capture
   kernel or specify maxcpus=1 option while loading dump-capture kernel.
 
+* For s390x there are two kdump modes: If a ELF header is specified with
+  the elfcorehdr= kernel parameter, it is used by the kdump kernel as it
+  is done on all other architectures. If no elfcorehdr= kernel parameter is
+  specified, the s390x kdump kernel dynamically creates the header. The
+  second mode has the advantage that for CPU and memory hotplug, kdump has
+  not to be reloaded with kexec_load().
+
+* For s390x systems with many attached devices the "cio_ignore" kernel
+  parameter should be used for the kdump kernel in order to prevent allocation
+  of kernel memory for devices that are not relevant for kdump. The same
+  applies to systems that use SCSI/FCP devices. In that case the
+  "allow_lun_scan" zfcp module parameter should be set to zero before
+  setting FCP devices online.
+
 Kernel Panic
 ============
 
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 60cfc25..7b2e5c5 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1885,6 +1885,11 @@
 			arch_perfmon: [X86] Force use of architectural
 				perfmon on Intel CPUs instead of the
 				CPU specific event set.
+			timer: [X86] Force use of architectural NMI
+				timer mode (see also oprofile.timer
+				for generic hr timer mode)
+				[s390] Force legacy basic mode sampling
+                                (report cpu_type "timer")
 
 	oops=panic	Always panic on oopses. Default is to just kill the
 			process, but there is a small probability of
@@ -2754,11 +2759,10 @@
 			functions are at fixed addresses, they make nice
 			targets for exploits that can control RIP.
 
-			emulate     Vsyscalls turn into traps and are emulated
-			            reasonably safely.
+			emulate     [default] Vsyscalls turn into traps and are
+			            emulated reasonably safely.
 
-			native      [default] Vsyscalls are native syscall
-			            instructions.
+			native      Vsyscalls are native syscall instructions.
 			            This is a little bit faster than trapping
 			            and makes a few dynamic recompilers work
 			            better than they would in emulation mode.
diff --git a/Documentation/lockdep-design.txt b/Documentation/lockdep-design.txt
index abf768c..5dbc99c 100644
--- a/Documentation/lockdep-design.txt
+++ b/Documentation/lockdep-design.txt
@@ -221,3 +221,66 @@
 table, which hash-table can be checked in a lockfree manner. If the
 locking chain occurs again later on, the hash table tells us that we
 dont have to validate the chain again.
+
+Troubleshooting:
+----------------
+
+The validator tracks a maximum of MAX_LOCKDEP_KEYS number of lock classes.
+Exceeding this number will trigger the following lockdep warning:
+
+	(DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
+
+By default, MAX_LOCKDEP_KEYS is currently set to 8191, and typical
+desktop systems have less than 1,000 lock classes, so this warning
+normally results from lock-class leakage or failure to properly
+initialize locks.  These two problems are illustrated below:
+
+1.	Repeated module loading and unloading while running the validator
+	will result in lock-class leakage.  The issue here is that each
+	load of the module will create a new set of lock classes for
+	that module's locks, but module unloading does not remove old
+	classes (see below discussion of reuse of lock classes for why).
+	Therefore, if that module is loaded and unloaded repeatedly,
+	the number of lock classes will eventually reach the maximum.
+
+2.	Using structures such as arrays that have large numbers of
+	locks that are not explicitly initialized.  For example,
+	a hash table with 8192 buckets where each bucket has its own
+	spinlock_t will consume 8192 lock classes -unless- each spinlock
+	is explicitly initialized at runtime, for example, using the
+	run-time spin_lock_init() as opposed to compile-time initializers
+	such as __SPIN_LOCK_UNLOCKED().  Failure to properly initialize
+	the per-bucket spinlocks would guarantee lock-class overflow.
+	In contrast, a loop that called spin_lock_init() on each lock
+	would place all 8192 locks into a single lock class.
+
+	The moral of this story is that you should always explicitly
+	initialize your locks.
+
+One might argue that the validator should be modified to allow
+lock classes to be reused.  However, if you are tempted to make this
+argument, first review the code and think through the changes that would
+be required, keeping in mind that the lock classes to be removed are
+likely to be linked into the lock-dependency graph.  This turns out to
+be harder to do than to say.
+
+Of course, if you do run out of lock classes, the next thing to do is
+to find the offending lock classes.  First, the following command gives
+you the number of lock classes currently in use along with the maximum:
+
+	grep "lock-classes" /proc/lockdep_stats
+
+This command produces the following output on a modest system:
+
+	 lock-classes:                          748 [max: 8191]
+
+If the number allocated (748 above) increases continually over time,
+then there is likely a leak.  The following command can be used to
+identify the leaking lock classes:
+
+	grep "BD" /proc/lockdep
+
+Run the command and save the output, then compare against the output from
+a later run of this command to identify the leakers.  This same output
+can also help you find situations where runtime lock initialization has
+been omitted.
diff --git a/Documentation/md.txt b/Documentation/md.txt
index fc94770..993fba3 100644
--- a/Documentation/md.txt
+++ b/Documentation/md.txt
@@ -357,14 +357,14 @@
         written to, that device.
 
       state
-        A file recording the current state of the device in the array
+	A file recording the current state of the device in the array
 	which can be a comma separated list of
 	      faulty   - device has been kicked from active use due to
-                         a detected fault or it has unacknowledged bad
-                         blocks
+			 a detected fault, or it has unacknowledged bad
+			 blocks
 	      in_sync  - device is a fully in-sync member of the array
 	      writemostly - device will only be subject to read
-		         requests if there are no other options.
+			 requests if there are no other options.
 			 This applies only to raid1 arrays.
 	      blocked  - device has failed, and the failure hasn't been
 			 acknowledged yet by the metadata handler.
@@ -374,6 +374,13 @@
 			 This includes spares that are in the process
 			 of being recovered to
 	      write_error - device has ever seen a write error.
+	      want_replacement - device is (mostly) working but probably
+			 should be replaced, either due to errors or
+			 due to user request.
+	      replacement - device is a replacement for another active
+			 device with same raid_disk.
+
+
 	This list may grow in future.
 	This can be written to.
 	Writing "faulty"  simulates a failure on the device.
@@ -386,6 +393,13 @@
 	Writing "in_sync" sets the in_sync flag.
 	Writing "write_error" sets writeerrorseen flag.
 	Writing "-write_error" clears writeerrorseen flag.
+	Writing "want_replacement" is allowed at any time except to a
+		replacement device or a spare.  It sets the flag.
+	Writing "-want_replacement" is allowed at any time.  It clears
+		the flag.
+	Writing "replacement" or "-replacement" is only allowed before
+		starting the array.  It sets or clears the flag.
+
 
 	This file responds to select/poll. Any change to 'faulty'
 	or 'blocked' causes an event.
diff --git a/Documentation/networking/00-INDEX b/Documentation/networking/00-INDEX
index bbce121..9ad9dde 100644
--- a/Documentation/networking/00-INDEX
+++ b/Documentation/networking/00-INDEX
@@ -144,6 +144,8 @@
 	- The Linux Near Field Communication (NFS) subsystem.
 olympic.txt
 	- IBM PCI Pit/Pit-Phy/Olympic Token Ring driver info.
+openvswitch.txt
+	- Open vSwitch developer documentation.
 operstates.txt
 	- Overview of network interface operational states.
 packet_mmap.txt
diff --git a/Documentation/networking/batman-adv.txt b/Documentation/networking/batman-adv.txt
index c86d03f..221ad0c 100644
--- a/Documentation/networking/batman-adv.txt
+++ b/Documentation/networking/batman-adv.txt
@@ -200,15 +200,16 @@
 
 0 - All  debug  output  disabled
 1 - Enable messages related to routing / flooding / broadcasting
-2 - Enable route or tt entry added / changed / deleted
-3 - Enable all messages
+2 - Enable messages related to route added / changed / deleted
+4 - Enable messages related to translation table operations
+7 - Enable all messages
 
 The debug output can be changed at runtime  using  the  file
 /sys/class/net/bat0/mesh/log_level. e.g.
 
 # echo 2 > /sys/class/net/bat0/mesh/log_level
 
-will enable debug messages for when routes or TTs change.
+will enable debug messages for when routes change.
 
 
 BATCTL
diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt
index 91df678..080ad26 100644
--- a/Documentation/networking/bonding.txt
+++ b/Documentation/networking/bonding.txt
@@ -196,6 +196,23 @@
 
 	The parameters are as follows:
 
+active_slave
+
+	Specifies the new active slave for modes that support it
+	(active-backup, balance-alb and balance-tlb).  Possible values
+	are the name of any currently enslaved interface, or an empty
+	string.  If a name is given, the slave and its link must be up in order
+	to be selected as the new active slave.  If an empty string is
+	specified, the current active slave is cleared, and a new active
+	slave is selected automatically.
+
+	Note that this is only available through the sysfs interface. No module
+	parameter by this name exists.
+
+	The normal value of this option is the name of the currently
+	active slave, or the empty string if there is no active slave or
+	the current mode does not use an active slave.
+
 ad_select
 
 	Specifies the 802.3ad aggregation selection logic to use.  The
diff --git a/Documentation/networking/ieee802154.txt b/Documentation/networking/ieee802154.txt
index f41ea24..1dc1c24 100644
--- a/Documentation/networking/ieee802154.txt
+++ b/Documentation/networking/ieee802154.txt
@@ -78,3 +78,30 @@
 
 See header include/net/mac802154.h and several drivers in drivers/ieee802154/.
 
+6LoWPAN Linux implementation
+============================
+
+The IEEE 802.15.4 standard specifies an MTU of 128 bytes, yielding about 80
+octets of actual MAC payload once security is turned on, on a wireless link
+with a link throughput of 250 kbps or less.  The 6LoWPAN adaptation format
+[RFC4944] was specified to carry IPv6 datagrams over such constrained links,
+taking into account limited bandwidth, memory, or energy resources that are
+expected in applications such as wireless Sensor Networks.  [RFC4944] defines
+a Mesh Addressing header to support sub-IP forwarding, a Fragmentation header
+to support the IPv6 minimum MTU requirement [RFC2460], and stateless header
+compression for IPv6 datagrams (LOWPAN_HC1 and LOWPAN_HC2) to reduce the
+relatively large IPv6 and UDP headers down to (in the best case) several bytes.
+
+In Semptember 2011 the standard update was published - [RFC6282].
+It deprecates HC1 and HC2 compression and defines IPHC encoding format which is
+used in this Linux implementation.
+
+All the code related to 6lowpan you may find in files: net/ieee802154/6lowpan.*
+
+To setup 6lowpan interface you need (busybox release > 1.17.0):
+1. Add IEEE802.15.4 interface and initialize PANid;
+2. Add 6lowpan interface by command like:
+   # ip link add link wpan0 name lowpan0 type lowpan
+3. Set MAC (if needs):
+   # ip link set lowpan0 address de:ad:be:ef:ca:fe:ba:be
+4. Bring up 'lowpan0' interface
diff --git a/Documentation/networking/ifenslave.c b/Documentation/networking/ifenslave.c
index 65968fb..ac5debb 100644
--- a/Documentation/networking/ifenslave.c
+++ b/Documentation/networking/ifenslave.c
@@ -539,12 +539,14 @@
 		metric = 0;
 	} else
 		metric = ifr.ifr_metric;
+	printf("The result of SIOCGIFMETRIC is %d\n", metric);
 
 	strcpy(ifr.ifr_name, ifname);
 	if (ioctl(skfd, SIOCGIFMTU, &ifr) < 0)
 		mtu = 0;
 	else
 		mtu = ifr.ifr_mtu;
+	printf("The result of SIOCGIFMTU is %d\n", mtu);
 
 	strcpy(ifr.ifr_name, ifname);
 	if (ioctl(skfd, SIOCGIFDSTADDR, &ifr) < 0) {
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 589f2da..ad3e80e 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -31,6 +31,16 @@
 	when using large numbers of interfaces and when communicating
 	with large numbers of directly-connected peers.
 
+neigh/default/unres_qlen_bytes - INTEGER
+	The maximum number of bytes which may be used by packets
+	queued for each	unresolved address by other network layers.
+	(added in linux 3.3)
+
+neigh/default/unres_qlen - INTEGER
+	The maximum number of packets which may be queued for each
+	unresolved address by other network layers.
+	(deprecated in linux 3.3) : use unres_qlen_bytes instead.
+
 mtu_expires - INTEGER
 	Time, in seconds, that cached PMTU information is kept.
 
@@ -165,6 +175,9 @@
 	connections. The algorithm "reno" is always available, but
 	additional choices may be available based on kernel configuration.
 	Default is set as part of kernel configuration.
+	For passive connections, the listener congestion control choice
+	is inherited.
+	[see setsockopt(listenfd, SOL_TCP, TCP_CONGESTION, "name" ...) ]
 
 tcp_cookie_size - INTEGER
 	Default size of TCP Cookie Transactions (TCPCT) option, that may be
diff --git a/Documentation/networking/openvswitch.txt b/Documentation/networking/openvswitch.txt
new file mode 100644
index 0000000..b8a048b
--- /dev/null
+++ b/Documentation/networking/openvswitch.txt
@@ -0,0 +1,195 @@
+Open vSwitch datapath developer documentation
+=============================================
+
+The Open vSwitch kernel module allows flexible userspace control over
+flow-level packet processing on selected network devices.  It can be
+used to implement a plain Ethernet switch, network device bonding,
+VLAN processing, network access control, flow-based network control,
+and so on.
+
+The kernel module implements multiple "datapaths" (analogous to
+bridges), each of which can have multiple "vports" (analogous to ports
+within a bridge).  Each datapath also has associated with it a "flow
+table" that userspace populates with "flows" that map from keys based
+on packet headers and metadata to sets of actions.  The most common
+action forwards the packet to another vport; other actions are also
+implemented.
+
+When a packet arrives on a vport, the kernel module processes it by
+extracting its flow key and looking it up in the flow table.  If there
+is a matching flow, it executes the associated actions.  If there is
+no match, it queues the packet to userspace for processing (as part of
+its processing, userspace will likely set up a flow to handle further
+packets of the same type entirely in-kernel).
+
+
+Flow key compatibility
+----------------------
+
+Network protocols evolve over time.  New protocols become important
+and existing protocols lose their prominence.  For the Open vSwitch
+kernel module to remain relevant, it must be possible for newer
+versions to parse additional protocols as part of the flow key.  It
+might even be desirable, someday, to drop support for parsing
+protocols that have become obsolete.  Therefore, the Netlink interface
+to Open vSwitch is designed to allow carefully written userspace
+applications to work with any version of the flow key, past or future.
+
+To support this forward and backward compatibility, whenever the
+kernel module passes a packet to userspace, it also passes along the
+flow key that it parsed from the packet.  Userspace then extracts its
+own notion of a flow key from the packet and compares it against the
+kernel-provided version:
+
+    - If userspace's notion of the flow key for the packet matches the
+      kernel's, then nothing special is necessary.
+
+    - If the kernel's flow key includes more fields than the userspace
+      version of the flow key, for example if the kernel decoded IPv6
+      headers but userspace stopped at the Ethernet type (because it
+      does not understand IPv6), then again nothing special is
+      necessary.  Userspace can still set up a flow in the usual way,
+      as long as it uses the kernel-provided flow key to do it.
+
+    - If the userspace flow key includes more fields than the
+      kernel's, for example if userspace decoded an IPv6 header but
+      the kernel stopped at the Ethernet type, then userspace can
+      forward the packet manually, without setting up a flow in the
+      kernel.  This case is bad for performance because every packet
+      that the kernel considers part of the flow must go to userspace,
+      but the forwarding behavior is correct.  (If userspace can
+      determine that the values of the extra fields would not affect
+      forwarding behavior, then it could set up a flow anyway.)
+
+How flow keys evolve over time is important to making this work, so
+the following sections go into detail.
+
+
+Flow key format
+---------------
+
+A flow key is passed over a Netlink socket as a sequence of Netlink
+attributes.  Some attributes represent packet metadata, defined as any
+information about a packet that cannot be extracted from the packet
+itself, e.g. the vport on which the packet was received.  Most
+attributes, however, are extracted from headers within the packet,
+e.g. source and destination addresses from Ethernet, IP, or TCP
+headers.
+
+The <linux/openvswitch.h> header file defines the exact format of the
+flow key attributes.  For informal explanatory purposes here, we write
+them as comma-separated strings, with parentheses indicating arguments
+and nesting.  For example, the following could represent a flow key
+corresponding to a TCP packet that arrived on vport 1:
+
+    in_port(1), eth(src=e0:91:f5:21:d0:b2, dst=00:02:e3:0f:80:a4),
+    eth_type(0x0800), ipv4(src=172.16.0.20, dst=172.18.0.52, proto=17, tos=0,
+    frag=no), tcp(src=49163, dst=80)
+
+Often we ellipsize arguments not important to the discussion, e.g.:
+
+    in_port(1), eth(...), eth_type(0x0800), ipv4(...), tcp(...)
+
+
+Basic rule for evolving flow keys
+---------------------------------
+
+Some care is needed to really maintain forward and backward
+compatibility for applications that follow the rules listed under
+"Flow key compatibility" above.
+
+The basic rule is obvious:
+
+    ------------------------------------------------------------------
+    New network protocol support must only supplement existing flow
+    key attributes.  It must not change the meaning of already defined
+    flow key attributes.
+    ------------------------------------------------------------------
+
+This rule does have less-obvious consequences so it is worth working
+through a few examples.  Suppose, for example, that the kernel module
+did not already implement VLAN parsing.  Instead, it just interpreted
+the 802.1Q TPID (0x8100) as the Ethertype then stopped parsing the
+packet.  The flow key for any packet with an 802.1Q header would look
+essentially like this, ignoring metadata:
+
+    eth(...), eth_type(0x8100)
+
+Naively, to add VLAN support, it makes sense to add a new "vlan" flow
+key attribute to contain the VLAN tag, then continue to decode the
+encapsulated headers beyond the VLAN tag using the existing field
+definitions.  With this change, an TCP packet in VLAN 10 would have a
+flow key much like this:
+
+    eth(...), vlan(vid=10, pcp=0), eth_type(0x0800), ip(proto=6, ...), tcp(...)
+
+But this change would negatively affect a userspace application that
+has not been updated to understand the new "vlan" flow key attribute.
+The application could, following the flow compatibility rules above,
+ignore the "vlan" attribute that it does not understand and therefore
+assume that the flow contained IP packets.  This is a bad assumption
+(the flow only contains IP packets if one parses and skips over the
+802.1Q header) and it could cause the application's behavior to change
+across kernel versions even though it follows the compatibility rules.
+
+The solution is to use a set of nested attributes.  This is, for
+example, why 802.1Q support uses nested attributes.  A TCP packet in
+VLAN 10 is actually expressed as:
+
+    eth(...), eth_type(0x8100), vlan(vid=10, pcp=0), encap(eth_type(0x0800),
+    ip(proto=6, ...), tcp(...)))
+
+Notice how the "eth_type", "ip", and "tcp" flow key attributes are
+nested inside the "encap" attribute.  Thus, an application that does
+not understand the "vlan" key will not see either of those attributes
+and therefore will not misinterpret them.  (Also, the outer eth_type
+is still 0x8100, not changed to 0x0800.)
+
+Handling malformed packets
+--------------------------
+
+Don't drop packets in the kernel for malformed protocol headers, bad
+checksums, etc.  This would prevent userspace from implementing a
+simple Ethernet switch that forwards every packet.
+
+Instead, in such a case, include an attribute with "empty" content.
+It doesn't matter if the empty content could be valid protocol values,
+as long as those values are rarely seen in practice, because userspace
+can always forward all packets with those values to userspace and
+handle them individually.
+
+For example, consider a packet that contains an IP header that
+indicates protocol 6 for TCP, but which is truncated just after the IP
+header, so that the TCP header is missing.  The flow key for this
+packet would include a tcp attribute with all-zero src and dst, like
+this:
+
+    eth(...), eth_type(0x0800), ip(proto=6, ...), tcp(src=0, dst=0)
+
+As another example, consider a packet with an Ethernet type of 0x8100,
+indicating that a VLAN TCI should follow, but which is truncated just
+after the Ethernet type.  The flow key for this packet would include
+an all-zero-bits vlan and an empty encap attribute, like this:
+
+    eth(...), eth_type(0x8100), vlan(0), encap()
+
+Unlike a TCP packet with source and destination ports 0, an
+all-zero-bits VLAN TCI is not that rare, so the CFI bit (aka
+VLAN_TAG_PRESENT inside the kernel) is ordinarily set in a vlan
+attribute expressly to allow this situation to be distinguished.
+Thus, the flow key in this second example unambiguously indicates a
+missing or malformed VLAN TCI.
+
+Other rules
+-----------
+
+The other rules for flow keys are much less subtle:
+
+    - Duplicate attributes are not allowed at a given nesting level.
+
+    - Ordering of attributes is not significant.
+
+    - When the kernel sends a given flow key to userspace, it always
+      composes it the same way.  This allows userspace to hash and
+      compare entire flow keys that it may not be able to fully
+      interpret.
diff --git a/Documentation/networking/packet_mmap.txt b/Documentation/networking/packet_mmap.txt
index 4acea66..1c08a4b 100644
--- a/Documentation/networking/packet_mmap.txt
+++ b/Documentation/networking/packet_mmap.txt
@@ -155,7 +155,7 @@
 
  /* fill sockaddr_ll struct to prepare binding */
  my_addr.sll_family = AF_PACKET;
- my_addr.sll_protocol = ETH_P_ALL;
+ my_addr.sll_protocol = htons(ETH_P_ALL);
  my_addr.sll_ifindex =  s_ifr.ifr_ifindex;
 
  /* bind socket to eth0 */
diff --git a/Documentation/networking/scaling.txt b/Documentation/networking/scaling.txt
index a177de2..579994a 100644
--- a/Documentation/networking/scaling.txt
+++ b/Documentation/networking/scaling.txt
@@ -208,7 +208,7 @@
 CPU's backlog when a packet in this flow was last enqueued. Each backlog
 queue has a head counter that is incremented on dequeue. A tail counter
 is computed as head counter + queue length. In other words, the counter
-in rps_dev_flow_table[i] records the last element in flow i that has
+in rps_dev_flow[i] records the last element in flow i that has
 been enqueued onto the currently designated CPU for flow i (of course,
 entry i is actually selected by hash and multiple flows may hash to the
 same entry i).
@@ -224,7 +224,7 @@
 
 - The current CPU's queue head counter >= the recorded tail counter
   value in rps_dev_flow[i]
-- The current CPU is unset (equal to NR_CPUS)
+- The current CPU is unset (equal to RPS_NO_CPU)
 - The current CPU is offline
 
 After this check, the packet is sent to the (possibly updated) current
@@ -235,7 +235,7 @@
 
 ==== RFS Configuration
 
-RFS is only available if the kconfig symbol CONFIG_RFS is enabled (on
+RFS is only available if the kconfig symbol CONFIG_RPS is enabled (on
 by default for SMP). The functionality remains disabled until explicitly
 configured. The number of entries in the global flow table is set through:
 
@@ -258,7 +258,7 @@
 would normally be configured to the same value as rps_sock_flow_entries.
 For a multi-queue device, the rps_flow_cnt for each queue might be
 configured as rps_sock_flow_entries / N, where N is the number of
-queues. So for instance, if rps_flow_entries is set to 32768 and there
+queues. So for instance, if rps_sock_flow_entries is set to 32768 and there
 are 16 configured receive queues, rps_flow_cnt for each queue might be
 configured as 2048.
 
diff --git a/Documentation/networking/stmmac.txt b/Documentation/networking/stmmac.txt
index 8d67980..d0aeead 100644
--- a/Documentation/networking/stmmac.txt
+++ b/Documentation/networking/stmmac.txt
@@ -4,14 +4,16 @@
 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
 
 This is the driver for the MAC 10/100/1000 on-chip Ethernet controllers
-(Synopsys IP blocks); it has been fully tested on STLinux platforms.
+(Synopsys IP blocks).
 
 Currently this network device driver is for all STM embedded MAC/GMAC
-(i.e. 7xxx/5xxx SoCs) and it's known working on other platforms i.e. ARM SPEAr.
+(i.e. 7xxx/5xxx SoCs), SPEAr (arm), Loongson1B (mips) and XLINX XC2V3000
+FF1152AMT0221 D1215994A VIRTEX FPGA board.
 
-DWC Ether MAC 10/100/1000 Universal version 3.41a and DWC Ether MAC 10/100
-Universal version 4.0 have been used for developing the first code
-implementation.
+DWC Ether MAC 10/100/1000 Universal version 3.60a (and older) and DWC Ether MAC 10/100
+Universal version 4.0 have been used for developing this driver.
+
+This driver supports both the platform bus and PCI.
 
 Please, for more information also visit: www.stlinux.com
 
@@ -277,5 +279,5 @@
 
 6) TODO:
  o XGMAC is not supported.
- o Review the timer optimisation code to use an embedded device that will be
-  available in new chip generations.
+ o Add the EEE - Energy Efficient Ethernet
+ o Add the PTP - precision time protocol
diff --git a/Documentation/networking/team.txt b/Documentation/networking/team.txt
new file mode 100644
index 0000000..5a01368
--- /dev/null
+++ b/Documentation/networking/team.txt
@@ -0,0 +1,2 @@
+Team devices are driven from userspace via libteam library which is here:
+	https://github.com/jpirko/libteam
diff --git a/Documentation/power/devices.txt b/Documentation/power/devices.txt
index 3139fb5..20af7de 100644
--- a/Documentation/power/devices.txt
+++ b/Documentation/power/devices.txt
@@ -126,7 +126,9 @@
 pointed to by the ops member of struct dev_pm_domain, or by the pm member of
 struct bus_type, struct device_type and struct class.  They are mostly of
 interest to the people writing infrastructure for platforms and buses, like PCI
-or USB, or device type and device class drivers.
+or USB, or device type and device class drivers.  They also are relevant to the
+writers of device drivers whose subsystems (PM domains, device types, device
+classes and bus types) don't provide all power management methods.
 
 Bus drivers implement these methods as appropriate for the hardware and the
 drivers using it; PCI works differently from USB, and so on.  Not many people
@@ -268,32 +270,35 @@
 unfrozen.  Furthermore, the *_noirq phases run at a time when IRQ handlers have
 been disabled (except for those marked with the IRQF_NO_SUSPEND flag).
 
-All phases use PM domain, bus, type, or class callbacks (that is, methods
-defined in dev->pm_domain->ops, dev->bus->pm, dev->type->pm, or dev->class->pm).
-These callbacks are regarded by the PM core as mutually exclusive.  Moreover,
-PM domain callbacks always take precedence over bus, type and class callbacks,
-while type callbacks take precedence over bus and class callbacks, and class
-callbacks take precedence over bus callbacks.  To be precise, the following
-rules are used to determine which callback to execute in the given phase:
+All phases use PM domain, bus, type, class or driver callbacks (that is, methods
+defined in dev->pm_domain->ops, dev->bus->pm, dev->type->pm, dev->class->pm or
+dev->driver->pm).  These callbacks are regarded by the PM core as mutually
+exclusive.  Moreover, PM domain callbacks always take precedence over all of the
+other callbacks and, for example, type callbacks take precedence over bus, class
+and driver callbacks.  To be precise, the following rules are used to determine
+which callback to execute in the given phase:
 
-    1.	If dev->pm_domain is present, the PM core will attempt to execute the
-	callback included in dev->pm_domain->ops.  If that callback is not
-	present, no action will be carried out for the given device.
+    1.	If dev->pm_domain is present, the PM core will choose the callback
+	included in dev->pm_domain->ops for execution
 
     2.	Otherwise, if both dev->type and dev->type->pm are present, the callback
-	included in dev->type->pm will be executed.
+	included in dev->type->pm will be chosen for execution.
 
     3.	Otherwise, if both dev->class and dev->class->pm are present, the
-	callback included in dev->class->pm will be executed.
+	callback included in dev->class->pm will be chosen for execution.
 
     4.	Otherwise, if both dev->bus and dev->bus->pm are present, the callback
-	included in dev->bus->pm will be executed.
+	included in dev->bus->pm will be chosen for execution.
 
 This allows PM domains and device types to override callbacks provided by bus
 types or device classes if necessary.
 
-These callbacks may in turn invoke device- or driver-specific methods stored in
-dev->driver->pm, but they don't have to.
+The PM domain, type, class and bus callbacks may in turn invoke device- or
+driver-specific methods stored in dev->driver->pm, but they don't have to do
+that.
+
+If the subsystem callback chosen for execution is not present, the PM core will
+execute the corresponding method from dev->driver->pm instead if there is one.
 
 
 Entering System Suspend
diff --git a/Documentation/power/freezing-of-tasks.txt b/Documentation/power/freezing-of-tasks.txt
index 316c2ba..6ccb68f 100644
--- a/Documentation/power/freezing-of-tasks.txt
+++ b/Documentation/power/freezing-of-tasks.txt
@@ -21,7 +21,7 @@
 try_to_freeze_tasks() that sets TIF_FREEZE for all of the freezable tasks and
 either wakes them up, if they are kernel threads, or sends fake signals to them,
 if they are user space processes.  A task that has TIF_FREEZE set, should react
-to it by calling the function called refrigerator() (defined in
+to it by calling the function called __refrigerator() (defined in
 kernel/freezer.c), which sets the task's PF_FROZEN flag, changes its state
 to TASK_UNINTERRUPTIBLE and makes it loop until PF_FROZEN is cleared for it.
 Then, we say that the task is 'frozen' and therefore the set of functions
@@ -29,10 +29,10 @@
 defined in kernel/power/process.c, kernel/freezer.c & include/linux/freezer.h).
 User space processes are generally frozen before kernel threads.
 
-It is not recommended to call refrigerator() directly.  Instead, it is
-recommended to use the try_to_freeze() function (defined in
-include/linux/freezer.h), that checks the task's TIF_FREEZE flag and makes the
-task enter refrigerator() if the flag is set.
+__refrigerator() must not be called directly.  Instead, use the
+try_to_freeze() function (defined in include/linux/freezer.h), that checks
+the task's TIF_FREEZE flag and makes the task enter __refrigerator() if the
+flag is set.
 
 For user space processes try_to_freeze() is called automatically from the
 signal-handling code, but the freezable kernel threads need to call it
@@ -61,13 +61,13 @@
 After the system memory state has been restored from a hibernation image and
 devices have been reinitialized, the function thaw_processes() is called in
 order to clear the PF_FROZEN flag for each frozen task.  Then, the tasks that
-have been frozen leave refrigerator() and continue running.
+have been frozen leave __refrigerator() and continue running.
 
 III. Which kernel threads are freezable?
 
 Kernel threads are not freezable by default.  However, a kernel thread may clear
 PF_NOFREEZE for itself by calling set_freezable() (the resetting of PF_NOFREEZE
-directly is strongly discouraged).  From this point it is regarded as freezable
+directly is not allowed).  From this point it is regarded as freezable
 and must call try_to_freeze() in a suitable place.
 
 IV. Why do we do that?
@@ -176,3 +176,28 @@
 A driver must have all firmwares it may need in RAM before suspend() is called.
 If keeping them is not practical, for example due to their size, they must be
 requested early enough using the suspend notifier API described in notifiers.txt.
+
+VI. Are there any precautions to be taken to prevent freezing failures?
+
+Yes, there are.
+
+First of all, grabbing the 'pm_mutex' lock to mutually exclude a piece of code
+from system-wide sleep such as suspend/hibernation is not encouraged.
+If possible, that piece of code must instead hook onto the suspend/hibernation
+notifiers to achieve mutual exclusion. Look at the CPU-Hotplug code
+(kernel/cpu.c) for an example.
+
+However, if that is not feasible, and grabbing 'pm_mutex' is deemed necessary,
+it is strongly discouraged to directly call mutex_[un]lock(&pm_mutex) since
+that could lead to freezing failures, because if the suspend/hibernate code
+successfully acquired the 'pm_mutex' lock, and hence that other entity failed
+to acquire the lock, then that task would get blocked in TASK_UNINTERRUPTIBLE
+state. As a consequence, the freezer would not be able to freeze that task,
+leading to freezing failure.
+
+However, the [un]lock_system_sleep() APIs are safe to use in this scenario,
+since they ask the freezer to skip freezing this task, since it is anyway
+"frozen enough" as it is blocked on 'pm_mutex', which will be released
+only after the entire suspend/hibernation sequence is complete.
+So, to summarize, use [un]lock_system_sleep() instead of directly using
+mutex_[un]lock(&pm_mutex). That would prevent freezing failures.
diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
index c2ae8bf..4abe83e 100644
--- a/Documentation/power/runtime_pm.txt
+++ b/Documentation/power/runtime_pm.txt
@@ -57,6 +57,10 @@
 
   4. Bus type of the device, if both dev->bus and dev->bus->pm are present.
 
+If the subsystem chosen by applying the above rules doesn't provide the relevant
+callback, the PM core will invoke the corresponding driver callback stored in
+dev->driver->pm directly (if present).
+
 The PM core always checks which callback to use in the order given above, so the
 priority order of callbacks from high to low is: PM domain, device type, class
 and bus type.  Moreover, the high-priority one will always take precedence over
@@ -64,86 +68,88 @@
 are referred to as subsystem-level callbacks in what follows.
 
 By default, the callbacks are always invoked in process context with interrupts
-enabled.  However, subsystems can use the pm_runtime_irq_safe() helper function
-to tell the PM core that their ->runtime_suspend(), ->runtime_resume() and
-->runtime_idle() callbacks may be invoked in atomic context with interrupts
-disabled for a given device.  This implies that the callback routines in
-question must not block or sleep, but it also means that the synchronous helper
-functions listed at the end of Section 4 may be used for that device within an
-interrupt handler or generally in an atomic context.
+enabled.  However, the pm_runtime_irq_safe() helper function can be used to tell
+the PM core that it is safe to run the ->runtime_suspend(), ->runtime_resume()
+and ->runtime_idle() callbacks for the given device in atomic context with
+interrupts disabled.  This implies that the callback routines in question must
+not block or sleep, but it also means that the synchronous helper functions
+listed at the end of Section 4 may be used for that device within an interrupt
+handler or generally in an atomic context.
 
-The subsystem-level suspend callback is _entirely_ _responsible_ for handling
-the suspend of the device as appropriate, which may, but need not include
-executing the device driver's own ->runtime_suspend() callback (from the
+The subsystem-level suspend callback, if present, is _entirely_ _responsible_
+for handling the suspend of the device as appropriate, which may, but need not
+include executing the device driver's own ->runtime_suspend() callback (from the
 PM core's point of view it is not necessary to implement a ->runtime_suspend()
 callback in a device driver as long as the subsystem-level suspend callback
 knows what to do to handle the device).
 
-  * Once the subsystem-level suspend callback has completed successfully
-    for given device, the PM core regards the device as suspended, which need
-    not mean that the device has been put into a low power state.  It is
-    supposed to mean, however, that the device will not process data and will
-    not communicate with the CPU(s) and RAM until the subsystem-level resume
-    callback is executed for it.  The runtime PM status of a device after
-    successful execution of the subsystem-level suspend callback is 'suspended'.
+  * Once the subsystem-level suspend callback (or the driver suspend callback,
+    if invoked directly) has completed successfully for the given device, the PM
+    core regards the device as suspended, which need not mean that it has been
+    put into a low power state.  It is supposed to mean, however, that the
+    device will not process data and will not communicate with the CPU(s) and
+    RAM until the appropriate resume callback is executed for it.  The runtime
+    PM status of a device after successful execution of the suspend callback is
+    'suspended'.
 
-  * If the subsystem-level suspend callback returns -EBUSY or -EAGAIN,
-    the device's runtime PM status is 'active', which means that the device
-    _must_ be fully operational afterwards.
+  * If the suspend callback returns -EBUSY or -EAGAIN, the device's runtime PM
+    status remains 'active', which means that the device _must_ be fully
+    operational afterwards.
 
-  * If the subsystem-level suspend callback returns an error code different
-    from -EBUSY or -EAGAIN, the PM core regards this as a fatal error and will
-    refuse to run the helper functions described in Section 4 for the device,
-    until the status of it is directly set either to 'active', or to 'suspended'
-    (the PM core provides special helper functions for this purpose).
+  * If the suspend callback returns an error code different from -EBUSY and
+    -EAGAIN, the PM core regards this as a fatal error and will refuse to run
+    the helper functions described in Section 4 for the device until its status
+    is directly set to  either'active', or 'suspended' (the PM core provides
+    special helper functions for this purpose).
 
-In particular, if the driver requires remote wake-up capability (i.e. hardware
+In particular, if the driver requires remote wakeup capability (i.e. hardware
 mechanism allowing the device to request a change of its power state, such as
 PCI PME) for proper functioning and device_run_wake() returns 'false' for the
 device, then ->runtime_suspend() should return -EBUSY.  On the other hand, if
-device_run_wake() returns 'true' for the device and the device is put into a low
-power state during the execution of the subsystem-level suspend callback, it is
-expected that remote wake-up will be enabled for the device.  Generally, remote
-wake-up should be enabled for all input devices put into a low power state at
-run time.
+device_run_wake() returns 'true' for the device and the device is put into a
+low-power state during the execution of the suspend callback, it is expected
+that remote wakeup will be enabled for the device.  Generally, remote wakeup
+should be enabled for all input devices put into low-power states at run time.
 
-The subsystem-level resume callback is _entirely_ _responsible_ for handling the
-resume of the device as appropriate, which may, but need not include executing
-the device driver's own ->runtime_resume() callback (from the PM core's point of
-view it is not necessary to implement a ->runtime_resume() callback in a device
-driver as long as the subsystem-level resume callback knows what to do to handle
-the device).
+The subsystem-level resume callback, if present, is _entirely_ _responsible_ for
+handling the resume of the device as appropriate, which may, but need not
+include executing the device driver's own ->runtime_resume() callback (from the
+PM core's point of view it is not necessary to implement a ->runtime_resume()
+callback in a device driver as long as the subsystem-level resume callback knows
+what to do to handle the device).
 
-  * Once the subsystem-level resume callback has completed successfully, the PM
-    core regards the device as fully operational, which means that the device
-    _must_ be able to complete I/O operations as needed.  The runtime PM status
-    of the device is then 'active'.
+  * Once the subsystem-level resume callback (or the driver resume callback, if
+    invoked directly) has completed successfully, the PM core regards the device
+    as fully operational, which means that the device _must_ be able to complete
+    I/O operations as needed.  The runtime PM status of the device is then
+    'active'.
 
-  * If the subsystem-level resume callback returns an error code, the PM core
-    regards this as a fatal error and will refuse to run the helper functions
-    described in Section 4 for the device, until its status is directly set
-    either to 'active' or to 'suspended' (the PM core provides special helper
-    functions for this purpose).
+  * If the resume callback returns an error code, the PM core regards this as a
+    fatal error and will refuse to run the helper functions described in Section
+    4 for the device, until its status is directly set to either 'active', or
+    'suspended' (by means of special helper functions provided by the PM core
+    for this purpose).
 
-The subsystem-level idle callback is executed by the PM core whenever the device
-appears to be idle, which is indicated to the PM core by two counters, the
-device's usage counter and the counter of 'active' children of the device.
+The idle callback (a subsystem-level one, if present, or the driver one) is
+executed by the PM core whenever the device appears to be idle, which is
+indicated to the PM core by two counters, the device's usage counter and the
+counter of 'active' children of the device.
 
   * If any of these counters is decreased using a helper function provided by
     the PM core and it turns out to be equal to zero, the other counter is
     checked.  If that counter also is equal to zero, the PM core executes the
-    subsystem-level idle callback with the device as an argument.
+    idle callback with the device as its argument.
 
-The action performed by a subsystem-level idle callback is totally dependent on
-the subsystem in question, but the expected and recommended action is to check
+The action performed by the idle callback is totally dependent on the subsystem
+(or driver) in question, but the expected and recommended action is to check
 if the device can be suspended (i.e. if all of the conditions necessary for
 suspending the device are satisfied) and to queue up a suspend request for the
 device in that case.  The value returned by this callback is ignored by the PM
 core.
 
 The helper functions provided by the PM core, described in Section 4, guarantee
-that the following constraints are met with respect to the bus type's runtime
-PM callbacks:
+that the following constraints are met with respect to runtime PM callbacks for
+one device:
 
 (1) The callbacks are mutually exclusive (e.g. it is forbidden to execute
     ->runtime_suspend() in parallel with ->runtime_resume() or with another
diff --git a/Documentation/s390/Debugging390.txt b/Documentation/s390/Debugging390.txt
index efe998b..462321c 100644
--- a/Documentation/s390/Debugging390.txt
+++ b/Documentation/s390/Debugging390.txt
@@ -41,7 +41,6 @@
 Debugging modules
 The proc file system
 Starting points for debugging scripting languages etc.
-Dumptool & Lcrash
 SysRq
 References
 Special Thanks
@@ -2455,39 +2454,6 @@
 
 
 
-Dumptool & Lcrash ( lkcd )
-==========================
-Michael Holzheu & others here at IBM have a fairly mature port of 
-SGI's lcrash tool which allows one to look at kernel structures in a
-running kernel.
-
-It also complements a tool called dumptool which dumps all the kernel's
-memory pages & registers to either a tape or a disk.
-This can be used by tech support or an ambitious end user do
-post mortem debugging of a machine like gdb core dumps.
-
-Going into how to use this tool in detail will be explained
-in other documentation supplied by IBM with the patches & the 
-lcrash homepage http://oss.sgi.com/projects/lkcd/ & the lcrash manpage.
-
-How they work
--------------
-Lcrash is a perfectly normal program,however, it requires 2 
-additional files, Kerntypes which is built using a patch to the 
-linux kernel sources in the linux root directory & the System.map.
-
-Kerntypes is an objectfile whose sole purpose in life
-is to provide stabs debug info to lcrash, to do this
-Kerntypes is built from kerntypes.c which just includes the most commonly
-referenced header files used when debugging, lcrash can then read the
-.stabs section of this file.
-
-Debugging a live system it uses /dev/mem
-alternatively for post mortem debugging it uses the data 
-collected by dumptool.
-
-
-
 SysRq
 =====
 This is now supported by linux for s/390 & z/Architecture.
diff --git a/Documentation/scsi/53c700.txt b/Documentation/scsi/53c700.txt
index 0da681d..e31aceb 100644
--- a/Documentation/scsi/53c700.txt
+++ b/Documentation/scsi/53c700.txt
@@ -16,32 +16,13 @@
 Compile Time Flags
 ==================
 
-The driver may be either io mapped or memory mapped.  This is
-selectable by configuration flags:
-
-CONFIG_53C700_MEM_MAPPED
-
-define if the driver is memory mapped.
-
-CONFIG_53C700_IO_MAPPED
-
-define if the driver is to be io mapped.
-
-One or other of the above flags *must* be defined.
-
-Other flags are:
+A compile time flag is:
 
 CONFIG_53C700_LE_ON_BE
 
 define if the chipset must be supported in little endian mode on a big
 endian architecture (used for the 700 on parisc).
 
-CONFIG_53C700_USE_CONSISTENT
-
-allocate consistent memory (should only be used if your architecture
-has a mixture of consistent and inconsistent memory).  Fully
-consistent or fully inconsistent architectures should not define this.
-
 
 Using the Chip Core Driver
 ==========================
diff --git a/Documentation/serial/driver b/Documentation/serial/driver
index 77ba0af..0a25a91 100644
--- a/Documentation/serial/driver
+++ b/Documentation/serial/driver
@@ -101,7 +101,7 @@
 	Returns the current state of modem control inputs.  The state
 	of the outputs should not be returned, since the core keeps
 	track of their state.  The state information should include:
-		- TIOCM_DCD	state of DCD signal
+		- TIOCM_CAR	state of DCD signal
 		- TIOCM_CTS	state of CTS signal
 		- TIOCM_DSR	state of DSR signal
 		- TIOCM_RI	state of RI signal
diff --git a/Documentation/trace/events.txt b/Documentation/trace/events.txt
index b510564..bb24c2a 100644
--- a/Documentation/trace/events.txt
+++ b/Documentation/trace/events.txt
@@ -191,8 +191,6 @@
 
 Currently, only exact string matches are supported.
 
-Currently, the maximum number of predicates in a filter is 16.
-
 5.2 Setting filters
 -------------------
 
diff --git a/Documentation/vgaarbiter.txt b/Documentation/vgaarbiter.txt
index b7d401e..014423e 100644
--- a/Documentation/vgaarbiter.txt
+++ b/Documentation/vgaarbiter.txt
@@ -177,7 +177,7 @@
 
 Benjamin Herrenschmidt (IBM?) started this work when he discussed such design
 with the Xorg community in 2005 [1, 2]. In the end of 2007, Paulo Zanoni and
-Tiago Vignatti (both of C3SL/Federal University of Paraná) proceeded his work
+Tiago Vignatti (both of C3SL/Federal University of Paraná) proceeded his work
 enhancing the kernel code to adapt as a kernel module and also did the
 implementation of the user space side [3]. Now (2009) Tiago Vignatti and Dave
 Airlie finally put this work in shape and queued to Jesse Barnes' PCI tree.
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 7945b0b..e2a4b52 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -1100,6 +1100,15 @@
    eax, ebx, ecx, edx: the values returned by the cpuid instruction for
          this function/index combination
 
+The TSC deadline timer feature (CPUID leaf 1, ecx[24]) is always returned
+as false, since the feature depends on KVM_CREATE_IRQCHIP for local APIC
+support.  Instead it is reported via
+
+  ioctl(KVM_CHECK_EXTENSION, KVM_CAP_TSC_DEADLINE_TIMER)
+
+if that returns true and you use KVM_CREATE_IRQCHIP, or if you emulate the
+feature in userspace, then you can enable the feature for KVM_SET_CPUID2.
+
 4.47 KVM_PPC_GET_PVINFO
 
 Capability: KVM_CAP_PPC_GET_PVINFO
@@ -1151,6 +1160,13 @@
 /* Depends on KVM_CAP_IOMMU */
 #define KVM_DEV_ASSIGN_ENABLE_IOMMU	(1 << 0)
 
+The KVM_DEV_ASSIGN_ENABLE_IOMMU flag is a mandatory option to ensure
+isolation of the device.  Usages not specifying this flag are deprecated.
+
+Only PCI header type 0 devices with PCI BAR resources are supported by
+device assignment.  The user requesting this ioctl must have read/write
+access to the PCI sysfs resource files associated with the device.
+
 4.49 KVM_DEASSIGN_PCI_DEVICE
 
 Capability: KVM_CAP_DEVICE_DEASSIGNMENT
diff --git a/MAINTAINERS b/MAINTAINERS
index b9db108..1e7d904 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1124,13 +1124,6 @@
 F:	arch/arm/mach-shmobile/
 F:	drivers/sh/
 
-ARM/TELECHIPS ARM ARCHITECTURE
-M:	"Hans J. Koch" <hjk@hansjkoch.de>
-L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S:	Maintained
-F:	arch/arm/plat-tcc/
-F:	arch/arm/mach-tcc8k/
-
 ARM/TECHNOLOGIC SYSTEMS TS7250 MACHINE SUPPORT
 M:	Lennert Buytenhek <kernel@wantstofly.org>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1698,11 +1691,9 @@
 
 CAN NETWORK LAYER
 M:	Oliver Hartkopp <socketcan@hartkopp.net>
-M:	Oliver Hartkopp <oliver.hartkopp@volkswagen.de>
-M:	Urs Thuermann <urs.thuermann@volkswagen.de>
 L:	linux-can@vger.kernel.org
-L:	netdev@vger.kernel.org
-W:	http://developer.berlios.de/projects/socketcan/
+W:	http://gitorious.org/linux-can
+T:	git git://gitorious.org/linux-can/linux-can-next.git
 S:	Maintained
 F:	net/can/
 F:	include/linux/can.h
@@ -1713,9 +1704,10 @@
 
 CAN NETWORK DRIVERS
 M:	Wolfgang Grandegger <wg@grandegger.com>
+M:	Marc Kleine-Budde <mkl@pengutronix.de>
 L:	linux-can@vger.kernel.org
-L:	netdev@vger.kernel.org
-W:	http://developer.berlios.de/projects/socketcan/
+W:	http://gitorious.org/linux-can
+T:	git git://gitorious.org/linux-can/linux-can-next.git
 S:	Maintained
 F:	drivers/net/can/
 F:	include/linux/can/dev.h
@@ -2700,7 +2692,7 @@
 M:	Stefan Richter <stefanr@s5r6.in-berlin.de>
 L:	linux1394-devel@lists.sourceforge.net
 W:	http://ieee1394.wiki.kernel.org/
-T:	git git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6.git
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394.git
 S:	Maintained
 F:	drivers/firewire/
 F:	include/linux/firewire*.h
@@ -2920,6 +2912,7 @@
 
 GPIO SUBSYSTEM
 M:	Grant Likely <grant.likely@secretlab.ca>
+M:	Linus Walleij <linus.walleij@stericsson.com>
 S:	Maintained
 T:	git git://git.secretlab.ca/git/linux-2.6.git
 F:	Documentation/gpio.txt
@@ -3101,6 +3094,7 @@
 
 HIGH-RESOLUTION TIMERS, CLOCKEVENTS, DYNTICKS
 M:	Thomas Gleixner <tglx@linutronix.de>
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
 S:	Maintained
 F:	Documentation/timers/
 F:	kernel/hrtimer.c
@@ -3188,6 +3182,16 @@
 S:	Maintained
 F:	fs/hugetlbfs/
 
+Hyper-V CORE AND DRIVERS
+M:	K. Y. Srinivasan <kys@microsoft.com>
+M:	Haiyang Zhang <haiyangz@microsoft.com>
+L:	devel@linuxdriverproject.org
+S:	Maintained
+F:	drivers/hv/
+F:	drivers/hid/hid-hyperv.c
+F:	drivers/net/hyperv/
+F:	drivers/staging/hv/
+
 I2C/SMBUS STUB DRIVER
 M:	"Mark M. Hoffman" <mhoffman@lightlink.com>
 L:	linux-i2c@vger.kernel.org
@@ -3583,8 +3587,7 @@
 IPWIRELESS DRIVER
 M:	Jiri Kosina <jkosina@suse.cz>
 M:	David Sterba <dsterba@suse.cz>
-S:	Maintained
-T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/ipwireless_cs.git
+S:	Odd Fixes
 F:	drivers/tty/ipwireless/
 
 IPX NETWORK LAYER
@@ -3610,7 +3613,7 @@
 IRQ SUBSYSTEM
 M:	Thomas Gleixner <tglx@linutronix.de>
 S:	Maintained
-T:	git git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip.git irq/core
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
 F:	kernel/irq/
 
 ISAPNP
@@ -4011,7 +4014,7 @@
 M:	Matt Porter <mporter@kernel.crashing.org>
 W:	http://www.penguinppc.org/
 L:	linuxppc-dev@lists.ozlabs.org
-T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jwboyer/powerpc-4xx.git
+T:	git git://git.infradead.org/users/jwboyer/powerpc-4xx.git
 S:	Maintained
 F:	arch/powerpc/platforms/40x/
 F:	arch/powerpc/platforms/44x/
@@ -4098,7 +4101,7 @@
 LOCKDEP AND LOCKSTAT
 M:	Peter Zijlstra <peterz@infradead.org>
 M:	Ingo Molnar <mingo@redhat.com>
-T:	git git://git.kernel.org/pub/scm/linux/kernel/git/peterz/linux-2.6-lockdep.git
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git core/locking
 S:	Maintained
 F:	Documentation/lockdep*.txt
 F:	Documentation/lockstat.txt
@@ -4280,7 +4283,9 @@
 S:	Maintained
 F:	Documentation/dvb/
 F:	Documentation/video4linux/
+F:	Documentation/DocBook/media/
 F:	drivers/media/
+F:	drivers/staging/media/
 F:	include/media/
 F:	include/linux/dvb/
 F:	include/linux/videodev*.h
@@ -4852,6 +4857,14 @@
 T:	git git://openrisc.net/~jonas/linux
 F:	arch/openrisc
 
+OPENVSWITCH
+M:	Jesse Gross <jesse@nicira.com>
+L:	dev@openvswitch.org
+W:	http://openvswitch.org
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jesse/openvswitch.git
+S:	Maintained
+F:	net/openvswitch/
+
 OPL4 DRIVER
 M:	Clemens Ladisch <clemens@ladisch.de>
 L:	alsa-devel@alsa-project.org (moderated for non-subscribers)
@@ -5086,6 +5099,7 @@
 M:	Paul Mackerras <paulus@samba.org>
 M:	Ingo Molnar <mingo@elte.hu>
 M:	Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git perf/core
 S:	Supported
 F:	kernel/events/*
 F:	include/linux/perf_event.h
@@ -5165,6 +5179,7 @@
 
 POSIX CLOCKS and TIMERS
 M:	Thomas Gleixner <tglx@linutronix.de>
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
 S:	Supported
 F:	fs/timerfd.c
 F:	include/linux/timer*
@@ -5369,6 +5384,7 @@
 F:	drivers/scsi/qla4xxx/
 
 QLOGIC QLA3XXX NETWORK DRIVER
+M:	Jitendra Kalsaria <jitendra.kalsaria@qlogic.com>
 M:	Ron Mercer <ron.mercer@qlogic.com>
 M:	linux-driver@qlogic.com
 L:	netdev@vger.kernel.org
@@ -5680,6 +5696,7 @@
 TIMEKEEPING, NTP
 M:	John Stultz <johnstul@us.ibm.com>
 M:	Thomas Gleixner <tglx@linutronix.de>
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
 S:	Supported
 F:	include/linux/clocksource.h
 F:	include/linux/time.h
@@ -5704,6 +5721,7 @@
 SCHEDULER
 M:	Ingo Molnar <mingo@elte.hu>
 M:	Peter Zijlstra <peterz@infradead.org>
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git sched/core
 S:	Maintained
 F:	kernel/sched*
 F:	include/linux/sched.h
@@ -5886,7 +5904,6 @@
 
 SFC NETWORK DRIVER
 M:	Solarflare linux maintainers <linux-net-drivers@solarflare.com>
-M:	Steve Hodgson <shodgson@solarflare.com>
 M:	Ben Hutchings <bhutchings@solarflare.com>
 L:	netdev@vger.kernel.org
 S:	Supported
@@ -6252,7 +6269,7 @@
 
 STABLE BRANCH
 M:	Greg Kroah-Hartman <greg@kroah.com>
-L:	stable@kernel.org
+L:	stable@vger.kernel.org
 S:	Maintained
 
 STAGING SUBSYSTEM
@@ -6313,12 +6330,6 @@
 S:	Odd Fixes
 F:	drivers/staging/frontier/
 
-STAGING - HYPER-V (MICROSOFT)
-M:	Hank Janssen <hjanssen@microsoft.com>
-M:	Haiyang Zhang <haiyangz@microsoft.com>
-S:	Odd Fixes
-F:	drivers/staging/hv/
-
 STAGING - INDUSTRIAL IO
 M:	Jonathan Cameron <jic23@cam.ac.uk>
 L:	linux-iio@vger.kernel.org
@@ -6494,6 +6505,13 @@
 S:	Maintained
 F:	net/ipv4/tcp_lp.c
 
+TEAM DRIVER
+M:	Jiri Pirko <jpirko@redhat.com>
+L:	netdev@vger.kernel.org
+S:	Supported
+F:	drivers/net/team/
+F:	include/linux/if_team.h
+
 TEGRA SUPPORT
 M:	Colin Cross <ccross@android.com>
 M:	Olof Johansson <olof@lixom.net>
@@ -6631,7 +6649,7 @@
 M:	Steven Rostedt <rostedt@goodmis.org>
 M:	Frederic Weisbecker <fweisbec@gmail.com>
 M:	Ingo Molnar <mingo@redhat.com>
-T:	git git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip.git perf/core
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git perf/core
 S:	Maintained
 F:	Documentation/trace/ftrace.txt
 F:	arch/*/*/*/ftrace.h
@@ -7381,7 +7399,7 @@
 M:	Ingo Molnar <mingo@redhat.com>
 M:	"H. Peter Anvin" <hpa@zytor.com>
 M:	x86@kernel.org
-T:	git git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86.git
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/core
 S:	Maintained
 F:	Documentation/x86/
 F:	arch/x86/
diff --git a/Makefile b/Makefile
index d1ea73f..adddd11 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 2
 SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION =
 NAME = Saber-toothed Squirrel
 
 # *DOCUMENTATION*
diff --git a/arch/Kconfig b/arch/Kconfig
index 4b0669c..2505740 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -30,6 +30,10 @@
 config HAVE_OPROFILE
 	bool
 
+config OPROFILE_NMI_TIMER
+	def_bool y
+	depends on PERF_EVENTS && HAVE_PERF_EVENTS_NMI
+
 config KPROBES
 	bool "Kprobes"
 	depends on MODULES
diff --git a/arch/alpha/include/asm/ipcbuf.h b/arch/alpha/include/asm/ipcbuf.h
index d9c0e1a..84c7e51 100644
--- a/arch/alpha/include/asm/ipcbuf.h
+++ b/arch/alpha/include/asm/ipcbuf.h
@@ -1,28 +1 @@
-#ifndef _ALPHA_IPCBUF_H
-#define _ALPHA_IPCBUF_H
-
-/* 
- * The ipc64_perm structure for alpha architecture.
- * Note extra padding because this structure is passed back and forth
- * between kernel and user space.
- *
- * Pad space is left for:
- * - 32-bit seq
- * - 2 miscellaneous 64-bit values
- */
-
-struct ipc64_perm
-{
-	__kernel_key_t	key;
-	__kernel_uid_t	uid;
-	__kernel_gid_t	gid;
-	__kernel_uid_t	cuid;
-	__kernel_gid_t	cgid;
-	__kernel_mode_t	mode; 
-	unsigned short	seq;
-	unsigned short	__pad1;
-	unsigned long	__unused1;
-	unsigned long	__unused2;
-};
-
-#endif /* _ALPHA_IPCBUF_H */
+#include <asm-generic/ipcbuf.h>
diff --git a/arch/alpha/include/asm/socket.h b/arch/alpha/include/asm/socket.h
index 06edfef..082355f 100644
--- a/arch/alpha/include/asm/socket.h
+++ b/arch/alpha/include/asm/socket.h
@@ -69,6 +69,9 @@
 
 #define SO_RXQ_OVFL             40
 
+#define SO_WIFI_STATUS		41
+#define SCM_WIFI_STATUS		SO_WIFI_STATUS
+
 /* O_NONBLOCK clashes with the bits used for socket types.  Therefore we
  * have to define SOCK_NONBLOCK to a different value here.
  */
diff --git a/arch/alpha/include/asm/thread_info.h b/arch/alpha/include/asm/thread_info.h
index ff73db0..28335bd 100644
--- a/arch/alpha/include/asm/thread_info.h
+++ b/arch/alpha/include/asm/thread_info.h
@@ -79,7 +79,6 @@
 #define TIF_UAC_SIGBUS		12	/* ! userspace part of 'osf_sysinfo' */
 #define TIF_MEMDIE		13	/* is terminating due to OOM killer */
 #define TIF_RESTORE_SIGMASK	14	/* restore signal mask in do_signal */
-#define TIF_FREEZE		16	/* is freezing for suspend */
 
 #define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
 #define _TIF_SIGPENDING		(1<<TIF_SIGPENDING)
@@ -87,7 +86,6 @@
 #define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG)
 #define _TIF_RESTORE_SIGMASK	(1<<TIF_RESTORE_SIGMASK)
 #define _TIF_NOTIFY_RESUME	(1<<TIF_NOTIFY_RESUME)
-#define _TIF_FREEZE		(1<<TIF_FREEZE)
 
 /* Work to do on interrupt/exception return.  */
 #define _TIF_WORK_MASK		(_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
diff --git a/arch/alpha/include/asm/types.h b/arch/alpha/include/asm/types.h
index 8815443..0a05790 100644
--- a/arch/alpha/include/asm/types.h
+++ b/arch/alpha/include/asm/types.h
@@ -15,9 +15,4 @@
 #include <asm-generic/int-l64.h>
 #endif
 
-#ifndef __ASSEMBLY__
-
-typedef unsigned int umode_t;
-
-#endif /* __ASSEMBLY__ */
 #endif /* _ALPHA_TYPES_H */
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index e084b7e..f72e170 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -220,8 +220,9 @@
 	  be avoided when possible.
 
 config PHYS_OFFSET
-	hex "Physical address of main memory"
+	hex "Physical address of main memory" if MMU
 	depends on !ARM_PATCH_PHYS_VIRT && !NEED_MACH_MEMORY_H
+	default DRAM_BASE if !MMU
 	help
 	  Please provide the physical address corresponding to the
 	  location of main memory in your system.
@@ -257,6 +258,7 @@
 	select ARCH_HAS_CPUFREQ
 	select CLKDEV_LOOKUP
 	select HAVE_MACH_CLKDEV
+	select HAVE_TCM
 	select ICST
 	select GENERIC_CLOCKEVENTS
 	select PLAT_VERSATILE
@@ -340,10 +342,12 @@
 	select ARM_AMBA
 	select ARM_GIC
 	select ARM_TIMER_SP804
+	select CACHE_L2X0
 	select CLKDEV_LOOKUP
 	select CPU_V7
 	select GENERIC_CLOCKEVENTS
 	select HAVE_ARM_SCU
+	select HAVE_SMP
 	select USE_OF
 	help
 	  Support for the Calxeda Highbank SoC based boards.
@@ -361,6 +365,7 @@
 	select CPU_V6K
 	select GENERIC_CLOCKEVENTS
 	select ARM_GIC
+	select MIGHT_HAVE_CACHE_L2X0
 	select MIGHT_HAVE_PCI
 	select PCI_DOMAINS if PCI
 	help
@@ -381,6 +386,7 @@
 	select GENERIC_CLOCKEVENTS
 	select CLKDEV_LOOKUP
 	select GENERIC_IRQ_CHIP
+	select MIGHT_HAVE_CACHE_L2X0
 	select USE_OF
 	select ZONE_DMA
 	help
@@ -633,6 +639,8 @@
 	select GENERIC_GPIO
 	select HAVE_CLK
 	select HAVE_SCHED_CLOCK
+	select HAVE_SMP
+	select MIGHT_HAVE_CACHE_L2X0
 	select ARCH_HAS_CPUFREQ
 	help
 	  This enables support for NVIDIA Tegra based systems (Tegra APX,
@@ -702,7 +710,9 @@
 	select HAVE_CLK
 	select CLKDEV_LOOKUP
 	select HAVE_MACH_CLKDEV
+	select HAVE_SMP
 	select GENERIC_CLOCKEVENTS
+	select MIGHT_HAVE_CACHE_L2X0
 	select NO_IOPORT
 	select SPARSE_IRQ
 	select MULTI_IRQ_HANDLER
@@ -867,16 +877,6 @@
 	  Support for the StrongARM based Digital DNARD machine, also known
 	  as "Shark" (<http://www.shark-linux.de/shark.html>).
 
-config ARCH_TCC_926
-	bool "Telechips TCC ARM926-based systems"
-	select CLKSRC_MMIO
-	select CPU_ARM926T
-	select HAVE_CLK
-	select CLKDEV_LOOKUP
-	select GENERIC_CLOCKEVENTS
-	help
-	  Support for Telechips TCC ARM926-based systems.
-
 config ARCH_U300
 	bool "ST-Ericsson U300 Series"
 	depends on MMU
@@ -904,6 +904,8 @@
 	select CLKDEV_LOOKUP
 	select ARCH_REQUIRE_GPIOLIB
 	select ARCH_HAS_CPUFREQ
+	select HAVE_SMP
+	select MIGHT_HAVE_CACHE_L2X0
 	help
 	  Support for ST-Ericsson's Ux500 architecture
 
@@ -914,6 +916,7 @@
 	select CPU_ARM926T
 	select CLKDEV_LOOKUP
 	select GENERIC_CLOCKEVENTS
+	select MIGHT_HAVE_CACHE_L2X0
 	select ARCH_REQUIRE_GPIOLIB
 	help
 	  Support for the Nomadik platform by ST-Ericsson
@@ -973,6 +976,7 @@
 	select ARM_GIC
 	select ARM_AMBA
 	select ICST
+	select MIGHT_HAVE_CACHE_L2X0
 	select USE_OF
 	help
 	  Support for Xilinx Zynq ARM Cortex A9 Platform
@@ -1059,8 +1063,6 @@
 
 source "arch/arm/plat-spear/Kconfig"
 
-source "arch/arm/plat-tcc/Kconfig"
-
 if ARCH_S3C2410
 source "arch/arm/mach-s3c2410/Kconfig"
 source "arch/arm/mach-s3c2412/Kconfig"
@@ -1125,6 +1127,11 @@
 
 source arch/arm/mm/Kconfig
 
+config ARM_NR_BANKS
+	int
+	default 16 if ARCH_EP93XX
+	default 8
+
 config IWMMXT
 	bool "Enable iWMMXt support"
 	depends on CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_PJ4
@@ -1133,10 +1140,9 @@
 	  Enable support for iWMMXt context switching at run time if
 	  running on a CPU that supports it.
 
-#  bool 'Use XScale PMU as timer source' CONFIG_XSCALE_PMU_TIMER
 config XSCALE_PMU
 	bool
-	depends on CPU_XSCALE && !XSCALE_PMU_TIMER
+	depends on CPU_XSCALE
 	default y
 
 config CPU_HAS_PMU
@@ -1245,7 +1251,7 @@
 
 config ARM_ERRATA_720789
 	bool "ARM errata: TLBIASIDIS and TLBIMVAIS operations can broadcast a faulty ASID"
-	depends on CPU_V7 && SMP
+	depends on CPU_V7
 	help
 	  This option enables the workaround for the 720789 Cortex-A9 (prior to
 	  r2p0) erratum. A faulty ASID can be sent to the other CPUs for the
@@ -1281,7 +1287,7 @@
 
 config ARM_ERRATA_751472
 	bool "ARM errata: Interrupted ICIALLUIS may prevent completion of broadcasted operation"
-	depends on CPU_V7 && SMP
+	depends on CPU_V7
 	help
 	  This option enables the workaround for the 751472 Cortex-A9 (prior
 	  to r3p0) erratum. An interrupted ICIALLUIS operation may prevent the
@@ -1434,14 +1440,20 @@
 
 source "kernel/time/Kconfig"
 
+config HAVE_SMP
+	bool
+	help
+	  This option should be selected by machines which have an SMP-
+	  capable CPU.
+
+	  The only effect of this option is to make the SMP-related
+	  options available to the user for configuration.
+
 config SMP
 	bool "Symmetric Multi-Processing"
 	depends on CPU_V6K || CPU_V7
 	depends on GENERIC_CLOCKEVENTS
-	depends on REALVIEW_EB_ARM11MP || REALVIEW_EB_A9MP || \
-		 MACH_REALVIEW_PB11MP || MACH_REALVIEW_PBX || ARCH_OMAP4 || \
-		 ARCH_EXYNOS4 || ARCH_TEGRA || ARCH_U8500 || ARCH_VEXPRESS_CA9X4 || \
-		 ARCH_MSM_SCORPIONMP || ARCH_SHMOBILE || ARCH_HIGHBANK || SOC_IMX6Q
+	depends on HAVE_SMP
 	depends on MMU
 	select USE_GENERIC_SMP_HELPERS
 	select HAVE_ARM_SCU if !ARCH_MSM_SCORPIONMP
@@ -1559,6 +1571,16 @@
 	  accounting to be spread across the timer interval, preventing a
 	  "thundering herd" at every timer tick.
 
+config ARCH_NR_GPIO
+	int
+	default 1024 if ARCH_SHMOBILE || ARCH_TEGRA
+	default 350 if ARCH_U8500
+	default 0
+	help
+	  Maximum number of GPIOs in the system.
+
+	  If unsure, leave the default value.
+
 source kernel/Kconfig.preempt
 
 config HZ
@@ -1971,7 +1993,7 @@
 
 config XIP_KERNEL
 	bool "Kernel Execute-In-Place from ROM"
-	depends on !ZBOOT_ROM
+	depends on !ZBOOT_ROM && !ARM_LPAE
 	help
 	  Execute-In-Place allows the kernel to run from non-volatile storage
 	  directly addressable by the CPU, such as NOR flash. This saves RAM
@@ -2001,7 +2023,7 @@
 
 config KEXEC
 	bool "Kexec system call (EXPERIMENTAL)"
-	depends on EXPERIMENTAL
+	depends on EXPERIMENTAL && (!SMP || HOTPLUG_CPU)
 	help
 	  kexec is a system call that implements the ability to shutdown your
 	  current kernel, and to start another kernel.  It is like a reboot
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index dfcf3b0..40319d9 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -184,7 +184,6 @@
 machine-$(CONFIG_ARCH_SA1100)		:= sa1100
 machine-$(CONFIG_ARCH_SHARK)		:= shark
 machine-$(CONFIG_ARCH_SHMOBILE) 	:= shmobile
-machine-$(CONFIG_ARCH_TCC8K)		:= tcc8k
 machine-$(CONFIG_ARCH_TEGRA)		:= tegra
 machine-$(CONFIG_ARCH_U300)		:= u300
 machine-$(CONFIG_ARCH_U8500)		:= ux500
@@ -204,7 +203,6 @@
 plat-$(CONFIG_ARCH_MXC)		:= mxc
 plat-$(CONFIG_ARCH_OMAP)	:= omap
 plat-$(CONFIG_ARCH_S3C64XX)	:= samsung
-plat-$(CONFIG_ARCH_TCC_926)	:= tcc
 plat-$(CONFIG_ARCH_ZYNQ)	:= versatile
 plat-$(CONFIG_PLAT_IOP)		:= iop
 plat-$(CONFIG_PLAT_NOMADIK)	:= nomadik
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index 21f56ff..cf0a64c 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -126,7 +126,8 @@
 asflags-y := -Wa,-march=all
 
 # Supply kernel BSS size to the decompressor via a linker symbol.
-KBSS_SZ = $(shell size $(obj)/../../../../vmlinux | awk 'END{print $$3}')
+KBSS_SZ = $(shell $(CROSS_COMPILE)size $(obj)/../../../../vmlinux | \
+		awk 'END{print $$3}')
 LDFLAGS_vmlinux = --defsym _kernel_bss_size=$(KBSS_SZ)
 # Supply ZRELADDR to the decompressor via a linker symbol.
 ifneq ($(CONFIG_AUTO_ZRELADDR),y)
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index c2effc9..c5d6025 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -659,6 +659,7 @@
 		mcrne	p15, 0, r3, c2, c0, 0	@ load page table pointer
 		mcrne	p15, 0, r1, c3, c0, 0	@ load domain access control
 #endif
+		mcr	p15, 0, r0, c7, c5, 4	@ ISB
 		mcr	p15, 0, r0, c1, c0, 0	@ load control register
 		mrc	p15, 0, r0, c1, c0, 0	@ and read it back
 		mov	r0, #0
diff --git a/arch/arm/boot/dts/testcases/tests-phandle.dtsi b/arch/arm/boot/dts/testcases/tests-phandle.dtsi
new file mode 100644
index 0000000..ec0c4e6
--- /dev/null
+++ b/arch/arm/boot/dts/testcases/tests-phandle.dtsi
@@ -0,0 +1,37 @@
+
+/ {
+	testcase-data {
+		phandle-tests {
+			provider0: provider0 {
+				#phandle-cells = <0>;
+			};
+
+			provider1: provider1 {
+				#phandle-cells = <1>;
+			};
+
+			provider2: provider2 {
+				#phandle-cells = <2>;
+			};
+
+			provider3: provider3 {
+				#phandle-cells = <3>;
+			};
+
+			consumer-a {
+				phandle-list =	<&provider1 1>,
+						<&provider2 2 0>,
+						<0>,
+						<&provider3 4 4 3>,
+						<&provider2 5 100>,
+						<&provider0>,
+						<&provider1 7>;
+				phandle-list-names = "first", "second", "third";
+
+				phandle-list-bad-phandle = <12345678 0 0>;
+				phandle-list-bad-args = <&provider2 1 0>,
+							<&provider3 0>;
+			};
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/testcases/tests.dtsi b/arch/arm/boot/dts/testcases/tests.dtsi
new file mode 100644
index 0000000..a7c5067
--- /dev/null
+++ b/arch/arm/boot/dts/testcases/tests.dtsi
@@ -0,0 +1 @@
+/include/ "tests-phandle.dtsi"
diff --git a/arch/arm/boot/dts/versatile-pb.dts b/arch/arm/boot/dts/versatile-pb.dts
index 8a614e3..1664610 100644
--- a/arch/arm/boot/dts/versatile-pb.dts
+++ b/arch/arm/boot/dts/versatile-pb.dts
@@ -46,3 +46,5 @@
 		};
 	};
 };
+
+/include/ "testcases/tests.dtsi"
diff --git a/arch/arm/common/Kconfig b/arch/arm/common/Kconfig
index 74df9ca..81a933e 100644
--- a/arch/arm/common/Kconfig
+++ b/arch/arm/common/Kconfig
@@ -1,8 +1,14 @@
 config ARM_GIC
 	select IRQ_DOMAIN
+	select MULTI_IRQ_HANDLER
+	bool
+
+config GIC_NON_BANKED
 	bool
 
 config ARM_VIC
+	select IRQ_DOMAIN
+	select MULTI_IRQ_HANDLER
 	bool
 
 config ARM_VIC_NR
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index 410a546..b2dc2dd 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -40,13 +40,36 @@
 #include <linux/slab.h>
 
 #include <asm/irq.h>
+#include <asm/exception.h>
 #include <asm/mach/irq.h>
 #include <asm/hardware/gic.h>
 
-static DEFINE_RAW_SPINLOCK(irq_controller_lock);
+union gic_base {
+	void __iomem *common_base;
+	void __percpu __iomem **percpu_base;
+};
 
-/* Address of GIC 0 CPU interface */
-void __iomem *gic_cpu_base_addr __read_mostly;
+struct gic_chip_data {
+	unsigned int irq_offset;
+	union gic_base dist_base;
+	union gic_base cpu_base;
+#ifdef CONFIG_CPU_PM
+	u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)];
+	u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)];
+	u32 saved_spi_target[DIV_ROUND_UP(1020, 4)];
+	u32 __percpu *saved_ppi_enable;
+	u32 __percpu *saved_ppi_conf;
+#endif
+#ifdef CONFIG_IRQ_DOMAIN
+	struct irq_domain domain;
+#endif
+	unsigned int gic_irqs;
+#ifdef CONFIG_GIC_NON_BANKED
+	void __iomem *(*get_base)(union gic_base *);
+#endif
+};
+
+static DEFINE_RAW_SPINLOCK(irq_controller_lock);
 
 /*
  * Supported arch specific GIC irq extension.
@@ -67,16 +90,48 @@
 
 static struct gic_chip_data gic_data[MAX_GIC_NR] __read_mostly;
 
+#ifdef CONFIG_GIC_NON_BANKED
+static void __iomem *gic_get_percpu_base(union gic_base *base)
+{
+	return *__this_cpu_ptr(base->percpu_base);
+}
+
+static void __iomem *gic_get_common_base(union gic_base *base)
+{
+	return base->common_base;
+}
+
+static inline void __iomem *gic_data_dist_base(struct gic_chip_data *data)
+{
+	return data->get_base(&data->dist_base);
+}
+
+static inline void __iomem *gic_data_cpu_base(struct gic_chip_data *data)
+{
+	return data->get_base(&data->cpu_base);
+}
+
+static inline void gic_set_base_accessor(struct gic_chip_data *data,
+					 void __iomem *(*f)(union gic_base *))
+{
+	data->get_base = f;
+}
+#else
+#define gic_data_dist_base(d)	((d)->dist_base.common_base)
+#define gic_data_cpu_base(d)	((d)->cpu_base.common_base)
+#define gic_set_base_accessor(d,f)
+#endif
+
 static inline void __iomem *gic_dist_base(struct irq_data *d)
 {
 	struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
-	return gic_data->dist_base;
+	return gic_data_dist_base(gic_data);
 }
 
 static inline void __iomem *gic_cpu_base(struct irq_data *d)
 {
 	struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
-	return gic_data->cpu_base;
+	return gic_data_cpu_base(gic_data);
 }
 
 static inline unsigned int gic_irq(struct irq_data *d)
@@ -215,6 +270,32 @@
 #define gic_set_wake	NULL
 #endif
 
+asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
+{
+	u32 irqstat, irqnr;
+	struct gic_chip_data *gic = &gic_data[0];
+	void __iomem *cpu_base = gic_data_cpu_base(gic);
+
+	do {
+		irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK);
+		irqnr = irqstat & ~0x1c00;
+
+		if (likely(irqnr > 15 && irqnr < 1021)) {
+			irqnr = irq_domain_to_irq(&gic->domain, irqnr);
+			handle_IRQ(irqnr, regs);
+			continue;
+		}
+		if (irqnr < 16) {
+			writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
+#ifdef CONFIG_SMP
+			handle_IPI(irqnr, regs);
+#endif
+			continue;
+		}
+		break;
+	} while (1);
+}
+
 static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
 {
 	struct gic_chip_data *chip_data = irq_get_handler_data(irq);
@@ -225,7 +306,7 @@
 	chained_irq_enter(chip, desc);
 
 	raw_spin_lock(&irq_controller_lock);
-	status = readl_relaxed(chip_data->cpu_base + GIC_CPU_INTACK);
+	status = readl_relaxed(gic_data_cpu_base(chip_data) + GIC_CPU_INTACK);
 	raw_spin_unlock(&irq_controller_lock);
 
 	gic_irq = (status & 0x3ff);
@@ -270,7 +351,7 @@
 	u32 cpumask;
 	unsigned int gic_irqs = gic->gic_irqs;
 	struct irq_domain *domain = &gic->domain;
-	void __iomem *base = gic->dist_base;
+	void __iomem *base = gic_data_dist_base(gic);
 	u32 cpu = 0;
 
 #ifdef CONFIG_SMP
@@ -330,8 +411,8 @@
 
 static void __cpuinit gic_cpu_init(struct gic_chip_data *gic)
 {
-	void __iomem *dist_base = gic->dist_base;
-	void __iomem *base = gic->cpu_base;
+	void __iomem *dist_base = gic_data_dist_base(gic);
+	void __iomem *base = gic_data_cpu_base(gic);
 	int i;
 
 	/*
@@ -368,7 +449,7 @@
 		BUG();
 
 	gic_irqs = gic_data[gic_nr].gic_irqs;
-	dist_base = gic_data[gic_nr].dist_base;
+	dist_base = gic_data_dist_base(&gic_data[gic_nr]);
 
 	if (!dist_base)
 		return;
@@ -403,7 +484,7 @@
 		BUG();
 
 	gic_irqs = gic_data[gic_nr].gic_irqs;
-	dist_base = gic_data[gic_nr].dist_base;
+	dist_base = gic_data_dist_base(&gic_data[gic_nr]);
 
 	if (!dist_base)
 		return;
@@ -439,8 +520,8 @@
 	if (gic_nr >= MAX_GIC_NR)
 		BUG();
 
-	dist_base = gic_data[gic_nr].dist_base;
-	cpu_base = gic_data[gic_nr].cpu_base;
+	dist_base = gic_data_dist_base(&gic_data[gic_nr]);
+	cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
 
 	if (!dist_base || !cpu_base)
 		return;
@@ -465,8 +546,8 @@
 	if (gic_nr >= MAX_GIC_NR)
 		BUG();
 
-	dist_base = gic_data[gic_nr].dist_base;
-	cpu_base = gic_data[gic_nr].cpu_base;
+	dist_base = gic_data_dist_base(&gic_data[gic_nr]);
+	cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
 
 	if (!dist_base || !cpu_base)
 		return;
@@ -491,6 +572,11 @@
 	int i;
 
 	for (i = 0; i < MAX_GIC_NR; i++) {
+#ifdef CONFIG_GIC_NON_BANKED
+		/* Skip over unused GICs */
+		if (!gic_data[i].get_base)
+			continue;
+#endif
 		switch (cmd) {
 		case CPU_PM_ENTER:
 			gic_cpu_save(i);
@@ -564,8 +650,9 @@
 #endif
 };
 
-void __init gic_init(unsigned int gic_nr, int irq_start,
-	void __iomem *dist_base, void __iomem *cpu_base)
+void __init gic_init_bases(unsigned int gic_nr, int irq_start,
+			   void __iomem *dist_base, void __iomem *cpu_base,
+			   u32 percpu_offset)
 {
 	struct gic_chip_data *gic;
 	struct irq_domain *domain;
@@ -575,8 +662,36 @@
 
 	gic = &gic_data[gic_nr];
 	domain = &gic->domain;
-	gic->dist_base = dist_base;
-	gic->cpu_base = cpu_base;
+#ifdef CONFIG_GIC_NON_BANKED
+	if (percpu_offset) { /* Frankein-GIC without banked registers... */
+		unsigned int cpu;
+
+		gic->dist_base.percpu_base = alloc_percpu(void __iomem *);
+		gic->cpu_base.percpu_base = alloc_percpu(void __iomem *);
+		if (WARN_ON(!gic->dist_base.percpu_base ||
+			    !gic->cpu_base.percpu_base)) {
+			free_percpu(gic->dist_base.percpu_base);
+			free_percpu(gic->cpu_base.percpu_base);
+			return;
+		}
+
+		for_each_possible_cpu(cpu) {
+			unsigned long offset = percpu_offset * cpu_logical_map(cpu);
+			*per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset;
+			*per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset;
+		}
+
+		gic_set_base_accessor(gic, gic_get_percpu_base);
+	} else
+#endif
+	{			/* Normal, sane GIC... */
+		WARN(percpu_offset,
+		     "GIC_NON_BANKED not enabled, ignoring %08x offset!",
+		     percpu_offset);
+		gic->dist_base.common_base = dist_base;
+		gic->cpu_base.common_base = cpu_base;
+		gic_set_base_accessor(gic, gic_get_common_base);
+	}
 
 	/*
 	 * For primary GICs, skip over SGIs.
@@ -584,8 +699,6 @@
 	 */
 	domain->hwirq_base = 32;
 	if (gic_nr == 0) {
-		gic_cpu_base_addr = cpu_base;
-
 		if ((irq_start & 31) > 0) {
 			domain->hwirq_base = 16;
 			if (irq_start != -1)
@@ -597,7 +710,7 @@
 	 * Find out how many interrupts are supported.
 	 * The GIC only supports up to 1020 interrupt sources.
 	 */
-	gic_irqs = readl_relaxed(dist_base + GIC_DIST_CTR) & 0x1f;
+	gic_irqs = readl_relaxed(gic_data_dist_base(gic) + GIC_DIST_CTR) & 0x1f;
 	gic_irqs = (gic_irqs + 1) * 32;
 	if (gic_irqs > 1020)
 		gic_irqs = 1020;
@@ -645,7 +758,7 @@
 	dsb();
 
 	/* this always happens on GIC0 */
-	writel_relaxed(map << 16 | irq, gic_data[0].dist_base + GIC_DIST_SOFTINT);
+	writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
 }
 #endif
 
@@ -656,6 +769,7 @@
 {
 	void __iomem *cpu_base;
 	void __iomem *dist_base;
+	u32 percpu_offset;
 	int irq;
 	struct irq_domain *domain = &gic_data[gic_cnt].domain;
 
@@ -668,9 +782,12 @@
 	cpu_base = of_iomap(node, 1);
 	WARN(!cpu_base, "unable to map gic cpu registers\n");
 
+	if (of_property_read_u32(node, "cpu-offset", &percpu_offset))
+		percpu_offset = 0;
+
 	domain->of_node = of_node_get(node);
 
-	gic_init(gic_cnt, -1, dist_base, cpu_base);
+	gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset);
 
 	if (parent) {
 		irq = irq_of_parse_and_map(node, 0);
diff --git a/arch/arm/common/pl330.c b/arch/arm/common/pl330.c
index f407a6b..d8e44a4 100644
--- a/arch/arm/common/pl330.c
+++ b/arch/arm/common/pl330.c
@@ -221,17 +221,6 @@
  */
 #define MCODE_BUFF_PER_REQ	256
 
-/*
- * Mark a _pl330_req as free.
- * We do it by writing DMAEND as the first instruction
- * because no valid request is going to have DMAEND as
- * its first instruction to execute.
- */
-#define MARK_FREE(req)	do { \
-				_emit_END(0, (req)->mc_cpu); \
-				(req)->mc_len = 0; \
-			} while (0)
-
 /* If the _pl330_req is available to the client */
 #define IS_FREE(req)	(*((u8 *)((req)->mc_cpu)) == CMD_DMAEND)
 
@@ -301,8 +290,10 @@
 	struct pl330_dmac *dmac;
 	/* Only two at a time */
 	struct _pl330_req req[2];
-	/* Index of the last submitted request */
+	/* Index of the last enqueued request */
 	unsigned lstenq;
+	/* Index of the last submitted request or -1 if the DMA is stopped */
+	int req_running;
 };
 
 enum pl330_dmac_state {
@@ -778,6 +769,22 @@
 	writel(0, regs + DBGCMD);
 }
 
+/*
+ * Mark a _pl330_req as free.
+ * We do it by writing DMAEND as the first instruction
+ * because no valid request is going to have DMAEND as
+ * its first instruction to execute.
+ */
+static void mark_free(struct pl330_thread *thrd, int idx)
+{
+	struct _pl330_req *req = &thrd->req[idx];
+
+	_emit_END(0, req->mc_cpu);
+	req->mc_len = 0;
+
+	thrd->req_running = -1;
+}
+
 static inline u32 _state(struct pl330_thread *thrd)
 {
 	void __iomem *regs = thrd->dmac->pinfo->base;
@@ -836,31 +843,6 @@
 	}
 }
 
-/* If the request 'req' of thread 'thrd' is currently active */
-static inline bool _req_active(struct pl330_thread *thrd,
-		struct _pl330_req *req)
-{
-	void __iomem *regs = thrd->dmac->pinfo->base;
-	u32 buf = req->mc_bus, pc = readl(regs + CPC(thrd->id));
-
-	if (IS_FREE(req))
-		return false;
-
-	return (pc >= buf && pc <= buf + req->mc_len) ? true : false;
-}
-
-/* Returns 0 if the thread is inactive, ID of active req + 1 otherwise */
-static inline unsigned _thrd_active(struct pl330_thread *thrd)
-{
-	if (_req_active(thrd, &thrd->req[0]))
-		return 1; /* First req active */
-
-	if (_req_active(thrd, &thrd->req[1]))
-		return 2; /* Second req active */
-
-	return 0;
-}
-
 static void _stop(struct pl330_thread *thrd)
 {
 	void __iomem *regs = thrd->dmac->pinfo->base;
@@ -892,17 +874,22 @@
 	struct _arg_GO go;
 	unsigned ns;
 	u8 insn[6] = {0, 0, 0, 0, 0, 0};
+	int idx;
 
 	/* Return if already ACTIVE */
 	if (_state(thrd) != PL330_STATE_STOPPED)
 		return true;
 
-	if (!IS_FREE(&thrd->req[1 - thrd->lstenq]))
-		req = &thrd->req[1 - thrd->lstenq];
-	else if (!IS_FREE(&thrd->req[thrd->lstenq]))
-		req = &thrd->req[thrd->lstenq];
-	else
-		req = NULL;
+	idx = 1 - thrd->lstenq;
+	if (!IS_FREE(&thrd->req[idx]))
+		req = &thrd->req[idx];
+	else {
+		idx = thrd->lstenq;
+		if (!IS_FREE(&thrd->req[idx]))
+			req = &thrd->req[idx];
+		else
+			req = NULL;
+	}
 
 	/* Return if no request */
 	if (!req || !req->r)
@@ -933,6 +920,8 @@
 	/* Only manager can execute GO */
 	_execute_DBGINSN(thrd, insn, true);
 
+	thrd->req_running = idx;
+
 	return true;
 }
 
@@ -1382,8 +1371,8 @@
 
 			thrd->req[0].r = NULL;
 			thrd->req[1].r = NULL;
-			MARK_FREE(&thrd->req[0]);
-			MARK_FREE(&thrd->req[1]);
+			mark_free(thrd, 0);
+			mark_free(thrd, 1);
 
 			/* Clear the reset flag */
 			pl330->dmac_tbd.reset_chan &= ~(1 << i);
@@ -1461,14 +1450,12 @@
 
 			thrd = &pl330->channels[id];
 
-			active = _thrd_active(thrd);
-			if (!active) /* Aborted */
+			active = thrd->req_running;
+			if (active == -1) /* Aborted */
 				continue;
 
-			active -= 1;
-
 			rqdone = &thrd->req[active];
-			MARK_FREE(rqdone);
+			mark_free(thrd, active);
 
 			/* Get going again ASAP */
 			_start(thrd);
@@ -1480,13 +1467,19 @@
 
 	/* Now that we are in no hurry, do the callbacks */
 	while (!list_empty(&pl330->req_done)) {
+		struct pl330_req *r;
+
 		rqdone = container_of(pl330->req_done.next,
 					struct _pl330_req, rqd);
 
 		list_del_init(&rqdone->rqd);
 
+		/* Detach the req */
+		r = rqdone->r;
+		rqdone->r = NULL;
+
 		spin_unlock_irqrestore(&pl330->lock, flags);
-		_callback(rqdone->r, PL330_ERR_NONE);
+		_callback(r, PL330_ERR_NONE);
 		spin_lock_irqsave(&pl330->lock, flags);
 	}
 
@@ -1509,7 +1502,7 @@
 	struct pl330_thread *thrd = ch_id;
 	struct pl330_dmac *pl330;
 	unsigned long flags;
-	int ret = 0, active;
+	int ret = 0, active = thrd->req_running;
 
 	if (!thrd || thrd->free || thrd->dmac->state == DYING)
 		return -EINVAL;
@@ -1525,28 +1518,24 @@
 
 		thrd->req[0].r = NULL;
 		thrd->req[1].r = NULL;
-		MARK_FREE(&thrd->req[0]);
-		MARK_FREE(&thrd->req[1]);
+		mark_free(thrd, 0);
+		mark_free(thrd, 1);
 		break;
 
 	case PL330_OP_ABORT:
-		active = _thrd_active(thrd);
-
 		/* Make sure the channel is stopped */
 		_stop(thrd);
 
 		/* ABORT is only for the active req */
-		if (!active)
+		if (active == -1)
 			break;
 
-		active--;
-
 		thrd->req[active].r = NULL;
-		MARK_FREE(&thrd->req[active]);
+		mark_free(thrd, active);
 
 		/* Start the next */
 	case PL330_OP_START:
-		if (!_thrd_active(thrd) && !_start(thrd))
+		if ((active == -1) && !_start(thrd))
 			ret = -EIO;
 		break;
 
@@ -1587,14 +1576,13 @@
 	else
 		pstatus->faulting = false;
 
-	active = _thrd_active(thrd);
+	active = thrd->req_running;
 
-	if (!active) {
+	if (active == -1) {
 		/* Indicate that the thread is not running */
 		pstatus->top_req = NULL;
 		pstatus->wait_req = NULL;
 	} else {
-		active--;
 		pstatus->top_req = thrd->req[active].r;
 		pstatus->wait_req = !IS_FREE(&thrd->req[1 - active])
 					? thrd->req[1 - active].r : NULL;
@@ -1659,9 +1647,9 @@
 				thrd->free = false;
 				thrd->lstenq = 1;
 				thrd->req[0].r = NULL;
-				MARK_FREE(&thrd->req[0]);
+				mark_free(thrd, 0);
 				thrd->req[1].r = NULL;
-				MARK_FREE(&thrd->req[1]);
+				mark_free(thrd, 1);
 				break;
 			}
 		}
@@ -1767,14 +1755,14 @@
 	thrd->req[0].mc_bus = pl330->mcode_bus
 				+ (thrd->id * pi->mcbufsz);
 	thrd->req[0].r = NULL;
-	MARK_FREE(&thrd->req[0]);
+	mark_free(thrd, 0);
 
 	thrd->req[1].mc_cpu = thrd->req[0].mc_cpu
 				+ pi->mcbufsz / 2;
 	thrd->req[1].mc_bus = thrd->req[0].mc_bus
 				+ pi->mcbufsz / 2;
 	thrd->req[1].r = NULL;
-	MARK_FREE(&thrd->req[1]);
+	mark_free(thrd, 1);
 }
 
 static int dmac_alloc_threads(struct pl330_dmac *pl330)
diff --git a/arch/arm/common/timer-sp.c b/arch/arm/common/timer-sp.c
index 2393b5b..8794a34 100644
--- a/arch/arm/common/timer-sp.c
+++ b/arch/arm/common/timer-sp.c
@@ -143,7 +143,6 @@
 }
 
 static struct clock_event_device sp804_clockevent = {
-	.shift		= 32,
 	.features       = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
 	.set_mode	= sp804_set_mode,
 	.set_next_event	= sp804_set_next_event,
@@ -169,13 +168,9 @@
 
 	clkevt_base = base;
 	clkevt_reload = DIV_ROUND_CLOSEST(rate, HZ);
-
 	evt->name = name;
 	evt->irq = irq;
-	evt->mult = div_sc(rate, NSEC_PER_SEC, evt->shift);
-	evt->max_delta_ns = clockevent_delta2ns(0xffffffff, evt);
-	evt->min_delta_ns = clockevent_delta2ns(0xf, evt);
 
 	setup_irq(irq, &sp804_timer_irq);
-	clockevents_register_device(evt);
+	clockevents_config_and_register(evt, rate, 0xf, 0xffffffff);
 }
diff --git a/arch/arm/common/vic.c b/arch/arm/common/vic.c
index 01f18a4..dcb004a 100644
--- a/arch/arm/common/vic.c
+++ b/arch/arm/common/vic.c
@@ -19,17 +19,22 @@
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
+#include <linux/export.h>
 #include <linux/init.h>
 #include <linux/list.h>
 #include <linux/io.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
 #include <linux/syscore_ops.h>
 #include <linux/device.h>
 #include <linux/amba/bus.h>
 
+#include <asm/exception.h>
 #include <asm/mach/irq.h>
 #include <asm/hardware/vic.h>
 
-#ifdef CONFIG_PM
 /**
  * struct vic_device - VIC PM device
  * @irq: The IRQ number for the base of the VIC.
@@ -40,6 +45,7 @@
  * @int_enable: Save for VIC_INT_ENABLE.
  * @soft_int: Save for VIC_INT_SOFT.
  * @protect: Save for VIC_PROTECT.
+ * @domain: The IRQ domain for the VIC.
  */
 struct vic_device {
 	void __iomem	*base;
@@ -50,13 +56,13 @@
 	u32		int_enable;
 	u32		soft_int;
 	u32		protect;
+	struct irq_domain domain;
 };
 
 /* we cannot allocate memory when VICs are initially registered */
 static struct vic_device vic_devices[CONFIG_ARM_VIC_NR];
 
 static int vic_id;
-#endif /* CONFIG_PM */
 
 /**
  * vic_init2 - common initialisation code
@@ -156,39 +162,50 @@
 	return 0;
 }
 late_initcall(vic_pm_init);
+#endif /* CONFIG_PM */
 
 /**
- * vic_pm_register - Register a VIC for later power management control
+ * vic_register() - Register a VIC.
  * @base: The base address of the VIC.
  * @irq: The base IRQ for the VIC.
  * @resume_sources: bitmask of interrupts allowed for resume sources.
+ * @node: The device tree node associated with the VIC.
  *
  * Register the VIC with the system device tree so that it can be notified
  * of suspend and resume requests and ensure that the correct actions are
  * taken to re-instate the settings on resume.
+ *
+ * This also configures the IRQ domain for the VIC.
  */
-static void __init vic_pm_register(void __iomem *base, unsigned int irq, u32 resume_sources)
+static void __init vic_register(void __iomem *base, unsigned int irq,
+				u32 resume_sources, struct device_node *node)
 {
 	struct vic_device *v;
 
-	if (vic_id >= ARRAY_SIZE(vic_devices))
+	if (vic_id >= ARRAY_SIZE(vic_devices)) {
 		printk(KERN_ERR "%s: too few VICs, increase CONFIG_ARM_VIC_NR\n", __func__);
-	else {
-		v = &vic_devices[vic_id];
-		v->base = base;
-		v->resume_sources = resume_sources;
-		v->irq = irq;
-		vic_id++;
+		return;
 	}
+
+	v = &vic_devices[vic_id];
+	v->base = base;
+	v->resume_sources = resume_sources;
+	v->irq = irq;
+	vic_id++;
+
+	v->domain.irq_base = irq;
+	v->domain.nr_irq = 32;
+#ifdef CONFIG_OF_IRQ
+	v->domain.of_node = of_node_get(node);
+#endif /* CONFIG_OF */
+	v->domain.ops = &irq_domain_simple_ops;
+	irq_domain_add(&v->domain);
 }
-#else
-static inline void vic_pm_register(void __iomem *base, unsigned int irq, u32 arg1) { }
-#endif /* CONFIG_PM */
 
 static void vic_ack_irq(struct irq_data *d)
 {
 	void __iomem *base = irq_data_get_irq_chip_data(d);
-	unsigned int irq = d->irq & 31;
+	unsigned int irq = d->hwirq;
 	writel(1 << irq, base + VIC_INT_ENABLE_CLEAR);
 	/* moreover, clear the soft-triggered, in case it was the reason */
 	writel(1 << irq, base + VIC_INT_SOFT_CLEAR);
@@ -197,14 +214,14 @@
 static void vic_mask_irq(struct irq_data *d)
 {
 	void __iomem *base = irq_data_get_irq_chip_data(d);
-	unsigned int irq = d->irq & 31;
+	unsigned int irq = d->hwirq;
 	writel(1 << irq, base + VIC_INT_ENABLE_CLEAR);
 }
 
 static void vic_unmask_irq(struct irq_data *d)
 {
 	void __iomem *base = irq_data_get_irq_chip_data(d);
-	unsigned int irq = d->irq & 31;
+	unsigned int irq = d->hwirq;
 	writel(1 << irq, base + VIC_INT_ENABLE);
 }
 
@@ -226,7 +243,7 @@
 static int vic_set_wake(struct irq_data *d, unsigned int on)
 {
 	struct vic_device *v = vic_from_irq(d->irq);
-	unsigned int off = d->irq & 31;
+	unsigned int off = d->hwirq;
 	u32 bit = 1 << off;
 
 	if (!v)
@@ -301,7 +318,7 @@
  *  and 020 within the page. We call this "second block".
  */
 static void __init vic_init_st(void __iomem *base, unsigned int irq_start,
-				u32 vic_sources)
+			       u32 vic_sources, struct device_node *node)
 {
 	unsigned int i;
 	int vic_2nd_block = ((unsigned long)base & ~PAGE_MASK) != 0;
@@ -328,17 +345,12 @@
 	}
 
 	vic_set_irq_sources(base, irq_start, vic_sources);
+	vic_register(base, irq_start, 0, node);
 }
 
-/**
- * vic_init - initialise a vectored interrupt controller
- * @base: iomem base address
- * @irq_start: starting interrupt number, must be muliple of 32
- * @vic_sources: bitmask of interrupt sources to allow
- * @resume_sources: bitmask of interrupt sources to allow for resume
- */
-void __init vic_init(void __iomem *base, unsigned int irq_start,
-		     u32 vic_sources, u32 resume_sources)
+static void __init __vic_init(void __iomem *base, unsigned int irq_start,
+			      u32 vic_sources, u32 resume_sources,
+			      struct device_node *node)
 {
 	unsigned int i;
 	u32 cellid = 0;
@@ -356,7 +368,7 @@
 
 	switch(vendor) {
 	case AMBA_VENDOR_ST:
-		vic_init_st(base, irq_start, vic_sources);
+		vic_init_st(base, irq_start, vic_sources, node);
 		return;
 	default:
 		printk(KERN_WARNING "VIC: unknown vendor, continuing anyways\n");
@@ -375,5 +387,81 @@
 
 	vic_set_irq_sources(base, irq_start, vic_sources);
 
-	vic_pm_register(base, irq_start, resume_sources);
+	vic_register(base, irq_start, resume_sources, node);
+}
+
+/**
+ * vic_init() - initialise a vectored interrupt controller
+ * @base: iomem base address
+ * @irq_start: starting interrupt number, must be muliple of 32
+ * @vic_sources: bitmask of interrupt sources to allow
+ * @resume_sources: bitmask of interrupt sources to allow for resume
+ */
+void __init vic_init(void __iomem *base, unsigned int irq_start,
+		     u32 vic_sources, u32 resume_sources)
+{
+	__vic_init(base, irq_start, vic_sources, resume_sources, NULL);
+}
+
+#ifdef CONFIG_OF
+int __init vic_of_init(struct device_node *node, struct device_node *parent)
+{
+	void __iomem *regs;
+	int irq_base;
+
+	if (WARN(parent, "non-root VICs are not supported"))
+		return -EINVAL;
+
+	regs = of_iomap(node, 0);
+	if (WARN_ON(!regs))
+		return -EIO;
+
+	irq_base = irq_alloc_descs(-1, 0, 32, numa_node_id());
+	if (WARN_ON(irq_base < 0))
+		goto out_unmap;
+
+	__vic_init(regs, irq_base, ~0, ~0, node);
+
+	return 0;
+
+ out_unmap:
+	iounmap(regs);
+
+	return -EIO;
+}
+#endif /* CONFIG OF */
+
+/*
+ * Handle each interrupt in a single VIC.  Returns non-zero if we've
+ * handled at least one interrupt.  This does a single read of the
+ * status register and handles all interrupts in order from LSB first.
+ */
+static int handle_one_vic(struct vic_device *vic, struct pt_regs *regs)
+{
+	u32 stat, irq;
+	int handled = 0;
+
+	stat = readl_relaxed(vic->base + VIC_IRQ_STATUS);
+	while (stat) {
+		irq = ffs(stat) - 1;
+		handle_IRQ(irq_domain_to_irq(&vic->domain, irq), regs);
+		stat &= ~(1 << irq);
+		handled = 1;
+	}
+
+	return handled;
+}
+
+/*
+ * Keep iterating over all registered VIC's until there are no pending
+ * interrupts.
+ */
+asmlinkage void __exception_irq_entry vic_handle_irq(struct pt_regs *regs)
+{
+	int i, handled;
+
+	do {
+		for (i = 0, handled = 0; i < vic_id; ++i)
+			handled |= handle_one_vic(&vic_devices[i], regs);
+	} while (handled);
 }
diff --git a/arch/arm/configs/imx_v4_v5_defconfig b/arch/arm/configs/imx_v4_v5_defconfig
index 11a4192..cf497ce 100644
--- a/arch/arm/configs/imx_v4_v5_defconfig
+++ b/arch/arm/configs/imx_v4_v5_defconfig
@@ -18,9 +18,10 @@
 CONFIG_ARCH_IMX_V4_V5=y
 CONFIG_ARCH_MX1ADS=y
 CONFIG_MACH_SCB9328=y
+CONFIG_MACH_APF9328=y
 CONFIG_MACH_MX21ADS=y
 CONFIG_MACH_MX25_3DS=y
-CONFIG_MACH_EUKREA_CPUIMX25=y
+CONFIG_MACH_EUKREA_CPUIMX25SD=y
 CONFIG_MACH_MX27ADS=y
 CONFIG_MACH_PCM038=y
 CONFIG_MACH_CPUIMX27=y
@@ -72,17 +73,16 @@
 CONFIG_MTD_CFI_INTELEXT=y
 CONFIG_MTD_PHYSMAP=y
 CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_MXC=y
 CONFIG_MTD_UBI=y
 CONFIG_MISC_DEVICES=y
 CONFIG_EEPROM_AT24=y
 CONFIG_EEPROM_AT25=y
 CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_SMC91X=y
 CONFIG_DM9000=y
+CONFIG_SMC91X=y
 CONFIG_SMC911X=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+CONFIG_SMSC_PHY=y
 # CONFIG_INPUT_MOUSEDEV is not set
 CONFIG_INPUT_EVDEV=y
 # CONFIG_INPUT_KEYBOARD is not set
@@ -100,6 +100,7 @@
 CONFIG_I2C_IMX=y
 CONFIG_SPI=y
 CONFIG_SPI_IMX=y
+CONFIG_SPI_SPIDEV=y
 CONFIG_W1=y
 CONFIG_W1_MASTER_MXC=y
 CONFIG_W1_SLAVE_THERM=y
@@ -139,6 +140,7 @@
 CONFIG_MMC_MXC=y
 CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
 CONFIG_LEDS_MC13783=y
 CONFIG_LEDS_TRIGGERS=y
 CONFIG_LEDS_TRIGGER_TIMER=y
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 29035e8..b6e65de 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -187,6 +187,17 @@
 #endif
 
 /*
+ * Instruction barrier
+ */
+	.macro	instr_sync
+#if __LINUX_ARM_ARCH__ >= 7
+	isb
+#elif __LINUX_ARM_ARCH__ == 6
+	mcr	p15, 0, r0, c7, c5, 4
+#endif
+	.endm
+
+/*
  * SMP data memory barrier
  */
 	.macro	smp_dmb mode
diff --git a/arch/arm/include/asm/bug.h b/arch/arm/include/asm/bug.h
index 9abe7a0..fac79dc 100644
--- a/arch/arm/include/asm/bug.h
+++ b/arch/arm/include/asm/bug.h
@@ -32,7 +32,6 @@
 
 #define __BUG(__file, __line, __value)				\
 do {								\
-	BUILD_BUG_ON(sizeof(struct bug_entry) != 12);		\
 	asm volatile("1:\t" BUG_INSTR_TYPE #__value "\n"	\
 		".pushsection .rodata.str, \"aMS\", %progbits, 1\n" \
 		"2:\t.asciz " #__file "\n" 			\
diff --git a/arch/arm/include/asm/cti.h b/arch/arm/include/asm/cti.h
new file mode 100644
index 0000000..a0ada3e
--- /dev/null
+++ b/arch/arm/include/asm/cti.h
@@ -0,0 +1,179 @@
+#ifndef __ASMARM_CTI_H
+#define __ASMARM_CTI_H
+
+#include	<asm/io.h>
+
+/* The registers' definition is from section 3.2 of
+ * Embedded Cross Trigger Revision: r0p0
+ */
+#define		CTICONTROL		0x000
+#define		CTISTATUS		0x004
+#define		CTILOCK			0x008
+#define		CTIPROTECTION		0x00C
+#define		CTIINTACK		0x010
+#define		CTIAPPSET		0x014
+#define		CTIAPPCLEAR		0x018
+#define		CTIAPPPULSE		0x01c
+#define		CTIINEN			0x020
+#define		CTIOUTEN		0x0A0
+#define		CTITRIGINSTATUS		0x130
+#define		CTITRIGOUTSTATUS	0x134
+#define		CTICHINSTATUS		0x138
+#define		CTICHOUTSTATUS		0x13c
+#define		CTIPERIPHID0		0xFE0
+#define		CTIPERIPHID1		0xFE4
+#define		CTIPERIPHID2		0xFE8
+#define		CTIPERIPHID3		0xFEC
+#define		CTIPCELLID0		0xFF0
+#define		CTIPCELLID1		0xFF4
+#define		CTIPCELLID2		0xFF8
+#define		CTIPCELLID3		0xFFC
+
+/* The below are from section 3.6.4 of
+ * CoreSight v1.0 Architecture Specification
+ */
+#define		LOCKACCESS		0xFB0
+#define		LOCKSTATUS		0xFB4
+
+/* write this value to LOCKACCESS will unlock the module, and
+ * other value will lock the module
+ */
+#define		LOCKCODE		0xC5ACCE55
+
+/**
+ * struct cti - cross trigger interface struct
+ * @base: mapped virtual address for the cti base
+ * @irq: irq number for the cti
+ * @trig_out_for_irq: triger out number which will cause
+ *	the @irq happen
+ *
+ * cti struct used to operate cti registers.
+ */
+struct cti {
+	void __iomem *base;
+	int irq;
+	int trig_out_for_irq;
+};
+
+/**
+ * cti_init - initialize the cti instance
+ * @cti: cti instance
+ * @base: mapped virtual address for the cti base
+ * @irq: irq number for the cti
+ * @trig_out: triger out number which will cause
+ *	the @irq happen
+ *
+ * called by machine code to pass the board dependent
+ * @base, @irq and @trig_out to cti.
+ */
+static inline void cti_init(struct cti *cti,
+	void __iomem *base, int irq, int trig_out)
+{
+	cti->base = base;
+	cti->irq  = irq;
+	cti->trig_out_for_irq = trig_out;
+}
+
+/**
+ * cti_map_trigger - use the @chan to map @trig_in to @trig_out
+ * @cti: cti instance
+ * @trig_in: trigger in number
+ * @trig_out: trigger out number
+ * @channel: channel number
+ *
+ * This function maps one trigger in of @trig_in to one trigger
+ * out of @trig_out using the channel @chan.
+ */
+static inline void cti_map_trigger(struct cti *cti,
+	int trig_in, int trig_out, int chan)
+{
+	void __iomem *base = cti->base;
+	unsigned long val;
+
+	val = __raw_readl(base + CTIINEN + trig_in * 4);
+	val |= BIT(chan);
+	__raw_writel(val, base + CTIINEN + trig_in * 4);
+
+	val = __raw_readl(base + CTIOUTEN + trig_out * 4);
+	val |= BIT(chan);
+	__raw_writel(val, base + CTIOUTEN + trig_out * 4);
+}
+
+/**
+ * cti_enable - enable the cti module
+ * @cti: cti instance
+ *
+ * enable the cti module
+ */
+static inline void cti_enable(struct cti *cti)
+{
+	__raw_writel(0x1, cti->base + CTICONTROL);
+}
+
+/**
+ * cti_disable - disable the cti module
+ * @cti: cti instance
+ *
+ * enable the cti module
+ */
+static inline void cti_disable(struct cti *cti)
+{
+	__raw_writel(0, cti->base + CTICONTROL);
+}
+
+/**
+ * cti_irq_ack - clear the cti irq
+ * @cti: cti instance
+ *
+ * clear the cti irq
+ */
+static inline void cti_irq_ack(struct cti *cti)
+{
+	void __iomem *base = cti->base;
+	unsigned long val;
+
+	val = __raw_readl(base + CTIINTACK);
+	val |= BIT(cti->trig_out_for_irq);
+	__raw_writel(val, base + CTIINTACK);
+}
+
+/**
+ * cti_unlock - unlock cti module
+ * @cti: cti instance
+ *
+ * unlock the cti module, or else any writes to the cti
+ * module is not allowed.
+ */
+static inline void cti_unlock(struct cti *cti)
+{
+	void __iomem *base = cti->base;
+	unsigned long val;
+
+	val = __raw_readl(base + LOCKSTATUS);
+
+	if (val & 1) {
+		val = LOCKCODE;
+		__raw_writel(val, base + LOCKACCESS);
+	}
+}
+
+/**
+ * cti_lock - lock cti module
+ * @cti: cti instance
+ *
+ * lock the cti module, so any writes to the cti
+ * module will be not allowed.
+ */
+static inline void cti_lock(struct cti *cti)
+{
+	void __iomem *base = cti->base;
+	unsigned long val;
+
+	val = __raw_readl(base + LOCKSTATUS);
+
+	if (!(val & 1)) {
+		val = ~LOCKCODE;
+		__raw_writel(val, base + LOCKACCESS);
+	}
+}
+#endif
diff --git a/arch/arm/include/asm/edac.h b/arch/arm/include/asm/edac.h
new file mode 100644
index 0000000..0df7a2c
--- /dev/null
+++ b/arch/arm/include/asm/edac.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2011 Calxeda, Inc.
+ * Based on PPC version Copyright 2007 MontaVista Software, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef ASM_EDAC_H
+#define ASM_EDAC_H
+/*
+ * ECC atomic, DMA, SMP and interrupt safe scrub function.
+ * Implements the per arch atomic_scrub() that EDAC use for software
+ * ECC scrubbing.  It reads memory and then writes back the original
+ * value, allowing the hardware to detect and correct memory errors.
+ */
+static inline void atomic_scrub(void *va, u32 size)
+{
+#if __LINUX_ARM_ARCH__ >= 6
+	unsigned int *virt_addr = va;
+	unsigned int temp, temp2;
+	unsigned int i;
+
+	for (i = 0; i < size / sizeof(*virt_addr); i++, virt_addr++) {
+		/* Very carefully read and write to memory atomically
+		 * so we are interrupt, DMA and SMP safe.
+		 */
+		__asm__ __volatile__("\n"
+			"1:	ldrex	%0, [%2]\n"
+			"	strex	%1, %0, [%2]\n"
+			"	teq	%1, #0\n"
+			"	bne	1b\n"
+			: "=&r"(temp), "=&r"(temp2)
+			: "r"(virt_addr)
+			: "cc");
+	}
+#endif
+}
+
+#endif
diff --git a/arch/arm/include/asm/entry-macro-vic2.S b/arch/arm/include/asm/entry-macro-vic2.S
deleted file mode 100644
index 3ceb85e..0000000
--- a/arch/arm/include/asm/entry-macro-vic2.S
+++ /dev/null
@@ -1,57 +0,0 @@
-/* arch/arm/include/asm/entry-macro-vic2.S
- *
- * Originally arch/arm/mach-s3c6400/include/mach/entry-macro.S
- *
- * Copyright 2008 Openmoko, Inc.
- * Copyright 2008 Simtec Electronics
- *	http://armlinux.simtec.co.uk/
- *	Ben Dooks <ben@simtec.co.uk>
- *
- * Low-level IRQ helper macros for a device with two VICs
- *
- * This file is licensed under  the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
-*/
-
-/* This should be included from <mach/entry-macro.S> with the necessary
- * defines for virtual addresses and IRQ bases for the two vics.
- *
- * The code needs the following defined:
- *	IRQ_VIC0_BASE	IRQ number of VIC0's first IRQ
- *	IRQ_VIC1_BASE	IRQ number of VIC1's first IRQ
- *	VA_VIC0		Virtual address of VIC0
- *	VA_VIC1		Virtual address of VIC1
- *
- * Note, code assumes VIC0's virtual address is an ARM immediate constant
- * away from VIC1.
-*/
-
-#include <asm/hardware/vic.h>
-
-	.macro	disable_fiq
-	.endm
-
-	.macro	get_irqnr_preamble, base, tmp
-	ldr	\base, =VA_VIC0
-	.endm
-
-	.macro	arch_ret_to_user, tmp1, tmp2
-	.endm
-
-	.macro	get_irqnr_and_base, irqnr, irqstat, base, tmp
-
-	@ check the vic0
-	mov	\irqnr, #IRQ_VIC0_BASE + 31
-	ldr	\irqstat, [ \base, # VIC_IRQ_STATUS ]
-	teq	\irqstat, #0
-
-	@ otherwise try vic1
-	addeq	\tmp, \base, #(VA_VIC1 - VA_VIC0)
-	addeq	\irqnr, \irqnr, #(IRQ_VIC1_BASE - IRQ_VIC0_BASE)
-	ldreq	\irqstat, [ \tmp, # VIC_IRQ_STATUS ]
-	teqeq	\irqstat, #0
-
-	clzne	\irqstat, \irqstat
-	subne	\irqnr, \irqnr, \irqstat
-	.endm
diff --git a/arch/arm/include/asm/gpio.h b/arch/arm/include/asm/gpio.h
index 11ad0bf..7151753 100644
--- a/arch/arm/include/asm/gpio.h
+++ b/arch/arm/include/asm/gpio.h
@@ -1,6 +1,10 @@
 #ifndef _ARCH_ARM_GPIO_H
 #define _ARCH_ARM_GPIO_H
 
+#if CONFIG_ARCH_NR_GPIO > 0
+#define ARCH_NR_GPIO CONFIG_ARCH_NR_GPIO
+#endif
+
 /* not all ARM platforms necessarily support this API ... */
 #include <mach/gpio.h>
 
diff --git a/arch/arm/include/asm/hardirq.h b/arch/arm/include/asm/hardirq.h
index ddf07a9..436e60b 100644
--- a/arch/arm/include/asm/hardirq.h
+++ b/arch/arm/include/asm/hardirq.h
@@ -27,23 +27,6 @@
 
 #define arch_irq_stat_cpu	smp_irq_stat_cpu
 
-#if NR_IRQS > 512
-#define HARDIRQ_BITS	10
-#elif NR_IRQS > 256
-#define HARDIRQ_BITS	9
-#else
-#define HARDIRQ_BITS	8
-#endif
-
-/*
- * The hardirq mask has to be large enough to have space
- * for potentially all IRQ sources in the system nesting
- * on a single CPU:
- */
-#if (1 << HARDIRQ_BITS) < NR_IRQS
-# error HARDIRQ_BITS is too low!
-#endif
-
 #define __ARCH_IRQ_EXIT_IRQS_DISABLED	1
 
 #endif /* __ASM_HARDIRQ_H */
diff --git a/arch/arm/include/asm/hardware/entry-macro-gic.S b/arch/arm/include/asm/hardware/entry-macro-gic.S
deleted file mode 100644
index 74ebc80..0000000
--- a/arch/arm/include/asm/hardware/entry-macro-gic.S
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * arch/arm/include/asm/hardware/entry-macro-gic.S
- *
- * Low-level IRQ helper macros for GIC
- *
- * This file is licensed under  the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <asm/hardware/gic.h>
-
-#ifndef HAVE_GET_IRQNR_PREAMBLE
-	.macro	get_irqnr_preamble, base, tmp
-	ldr	\base, =gic_cpu_base_addr
-	ldr	\base, [\base]
-	.endm
-#endif
-
-/*
- * The interrupt numbering scheme is defined in the
- * interrupt controller spec.  To wit:
- *
- * Interrupts 0-15 are IPI
- * 16-31 are local.  We allow 30 to be used for the watchdog.
- * 32-1020 are global
- * 1021-1022 are reserved
- * 1023 is "spurious" (no interrupt)
- *
- * A simple read from the controller will tell us the number of the highest
- * priority enabled interrupt.  We then just need to check whether it is in the
- * valid range for an IRQ (30-1020 inclusive).
- */
-
-	.macro  get_irqnr_and_base, irqnr, irqstat, base, tmp
-
-	ldr     \irqstat, [\base, #GIC_CPU_INTACK]
-	/* bits 12-10 = src CPU, 9-0 = int # */
-
-	ldr	\tmp, =1021
-	bic     \irqnr, \irqstat, #0x1c00
-	cmp     \irqnr, #15
-	cmpcc	\irqnr, \irqnr
-	cmpne	\irqnr, \tmp
-	cmpcs	\irqnr, \irqnr
-	.endm
-
-/* We assume that irqstat (the raw value of the IRQ acknowledge
- * register) is preserved from the macro above.
- * If there is an IPI, we immediately signal end of interrupt on the
- * controller, since this requires the original irqstat value which
- * we won't easily be able to recreate later.
- */
-
-	.macro test_for_ipi, irqnr, irqstat, base, tmp
-	bic	\irqnr, \irqstat, #0x1c00
-	cmp	\irqnr, #16
-	strcc	\irqstat, [\base, #GIC_CPU_EOI]
-	cmpcs	\irqnr, \irqnr
-	.endm
diff --git a/arch/arm/include/asm/hardware/gic.h b/arch/arm/include/asm/hardware/gic.h
index 3e91f22..4bdfe00 100644
--- a/arch/arm/include/asm/hardware/gic.h
+++ b/arch/arm/include/asm/hardware/gic.h
@@ -36,30 +36,22 @@
 #include <linux/irqdomain.h>
 struct device_node;
 
-extern void __iomem *gic_cpu_base_addr;
 extern struct irq_chip gic_arch_extn;
 
-void gic_init(unsigned int, int, void __iomem *, void __iomem *);
+void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
+		    u32 offset);
 int gic_of_init(struct device_node *node, struct device_node *parent);
 void gic_secondary_init(unsigned int);
+void gic_handle_irq(struct pt_regs *regs);
 void gic_cascade_irq(unsigned int gic_nr, unsigned int irq);
 void gic_raise_softirq(const struct cpumask *mask, unsigned int irq);
 
-struct gic_chip_data {
-	void __iomem *dist_base;
-	void __iomem *cpu_base;
-#ifdef CONFIG_CPU_PM
-	u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)];
-	u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)];
-	u32 saved_spi_target[DIV_ROUND_UP(1020, 4)];
-	u32 __percpu *saved_ppi_enable;
-	u32 __percpu *saved_ppi_conf;
-#endif
-#ifdef CONFIG_IRQ_DOMAIN
-	struct irq_domain domain;
-#endif
-	unsigned int gic_irqs;
-};
+static inline void gic_init(unsigned int nr, int start,
+			    void __iomem *dist , void __iomem *cpu)
+{
+	gic_init_bases(nr, start, dist, cpu, 0);
+}
+
 #endif
 
 #endif
diff --git a/arch/arm/include/asm/hardware/iop3xx.h b/arch/arm/include/asm/hardware/iop3xx.h
index 5daea29..077c323 100644
--- a/arch/arm/include/asm/hardware/iop3xx.h
+++ b/arch/arm/include/asm/hardware/iop3xx.h
@@ -234,6 +234,7 @@
 void iop3xx_map_io(void);
 void iop_init_cp6_handler(void);
 void iop_init_time(unsigned long tickrate);
+void iop3xx_restart(char, const char *);
 
 static inline u32 read_tmr0(void)
 {
diff --git a/arch/arm/include/asm/hardware/vic.h b/arch/arm/include/asm/hardware/vic.h
index 5d72550..f42ebd6 100644
--- a/arch/arm/include/asm/hardware/vic.h
+++ b/arch/arm/include/asm/hardware/vic.h
@@ -41,7 +41,15 @@
 #define VIC_PL192_VECT_ADDR		0xF00
 
 #ifndef __ASSEMBLY__
-void vic_init(void __iomem *base, unsigned int irq_start, u32 vic_sources, u32 resume_sources);
-#endif
+#include <linux/compiler.h>
+#include <linux/types.h>
 
+struct device_node;
+struct pt_regs;
+
+void vic_init(void __iomem *base, unsigned int irq_start, u32 vic_sources, u32 resume_sources);
+int vic_of_init(struct device_node *node, struct device_node *parent);
+void vic_handle_irq(struct pt_regs *regs);
+
+#endif /* __ASSEMBLY__ */
 #endif
diff --git a/arch/arm/include/asm/idmap.h b/arch/arm/include/asm/idmap.h
new file mode 100644
index 0000000..bf863ed
--- /dev/null
+++ b/arch/arm/include/asm/idmap.h
@@ -0,0 +1,14 @@
+#ifndef __ASM_IDMAP_H
+#define __ASM_IDMAP_H
+
+#include <linux/compiler.h>
+#include <asm/pgtable.h>
+
+/* Tag a function as requiring to be executed via an identity mapping. */
+#define __idmap __section(.idmap.text) noinline notrace
+
+extern pgd_t *idmap_pgd;
+
+void setup_mm_for_reboot(void);
+
+#endif	/* __ASM_IDMAP_H */
diff --git a/arch/arm/include/asm/ipcbuf.h b/arch/arm/include/asm/ipcbuf.h
index 9768397..84c7e51 100644
--- a/arch/arm/include/asm/ipcbuf.h
+++ b/arch/arm/include/asm/ipcbuf.h
@@ -1,29 +1 @@
-#ifndef __ASMARM_IPCBUF_H
-#define __ASMARM_IPCBUF_H
-
-/*
- * The ipc64_perm structure for arm architecture.
- * Note extra padding because this structure is passed back and forth
- * between kernel and user space.
- *
- * Pad space is left for:
- * - 32-bit mode_t and seq
- * - 2 miscellaneous 32-bit values
- */
-
-struct ipc64_perm
-{
-	__kernel_key_t		key;
-	__kernel_uid32_t	uid;
-	__kernel_gid32_t	gid;
-	__kernel_uid32_t	cuid;
-	__kernel_gid32_t	cgid;
-	__kernel_mode_t		mode;
-	unsigned short		__pad1;
-	unsigned short		seq;
-	unsigned short		__pad2;
-	unsigned long		__unused1;
-	unsigned long		__unused2;
-};
-
-#endif /* __ASMARM_IPCBUF_H */
+#include <asm-generic/ipcbuf.h>
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
index 2b0efc3..d7692ca 100644
--- a/arch/arm/include/asm/mach/arch.h
+++ b/arch/arm/include/asm/mach/arch.h
@@ -19,7 +19,7 @@
 	unsigned int		nr;		/* architecture number	*/
 	const char		*name;		/* architecture name	*/
 	unsigned long		atag_offset;	/* tagged list (relative) */
-	const char		**dt_compat;	/* array of device tree
+	const char *const 	*dt_compat;	/* array of device tree
 						 * 'compatible' strings	*/
 
 	unsigned int		nr_irqs;	/* number of IRQs */
@@ -31,10 +31,10 @@
 	unsigned int		video_start;	/* start of video RAM	*/
 	unsigned int		video_end;	/* end of video RAM	*/
 
-	unsigned int		reserve_lp0 :1;	/* never has lp0	*/
-	unsigned int		reserve_lp1 :1;	/* never has lp1	*/
-	unsigned int		reserve_lp2 :1;	/* never has lp2	*/
-	unsigned int		soft_reboot :1;	/* soft reboot		*/
+	unsigned char		reserve_lp0 :1;	/* never has lp0	*/
+	unsigned char		reserve_lp1 :1;	/* never has lp1	*/
+	unsigned char		reserve_lp2 :1;	/* never has lp2	*/
+	char			restart_mode;	/* default restart mode	*/
 	void			(*fixup)(struct tag *, char **,
 					 struct meminfo *);
 	void			(*reserve)(void);/* reserve mem blocks	*/
@@ -46,6 +46,7 @@
 #ifdef CONFIG_MULTI_IRQ_HANDLER
 	void			(*handle_irq)(struct pt_regs *);
 #endif
+	void			(*restart)(char, const char *);
 };
 
 /*
diff --git a/arch/arm/include/asm/mach/time.h b/arch/arm/include/asm/mach/time.h
index d5adaae..f73c908 100644
--- a/arch/arm/include/asm/mach/time.h
+++ b/arch/arm/include/asm/mach/time.h
@@ -10,8 +10,6 @@
 #ifndef __ASM_ARM_MACH_TIME_H
 #define __ASM_ARM_MACH_TIME_H
 
-#include <linux/sysdev.h>
-
 /*
  * This is our kernel timer structure.
  *
diff --git a/arch/arm/include/asm/opcodes.h b/arch/arm/include/asm/opcodes.h
new file mode 100644
index 0000000..c0efdd6
--- /dev/null
+++ b/arch/arm/include/asm/opcodes.h
@@ -0,0 +1,20 @@
+/*
+ *  arch/arm/include/asm/opcodes.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARM_OPCODES_H
+#define __ASM_ARM_OPCODES_H
+
+#ifndef __ASSEMBLY__
+extern asmlinkage unsigned int arm_check_condition(u32 opcode, u32 psr);
+#endif
+
+#define ARM_OPCODE_CONDTEST_FAIL   0
+#define ARM_OPCODE_CONDTEST_PASS   1
+#define ARM_OPCODE_CONDTEST_UNCOND 2
+
+#endif /* __ASM_ARM_OPCODES_H */
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
index ca94653..97b440c 100644
--- a/arch/arm/include/asm/page.h
+++ b/arch/arm/include/asm/page.h
@@ -151,7 +151,11 @@
 #define clear_page(page)	memset((void *)(page), 0, PAGE_SIZE)
 extern void copy_page(void *to, const void *from);
 
+#ifdef CONFIG_ARM_LPAE
+#include <asm/pgtable-3level-types.h>
+#else
 #include <asm/pgtable-2level-types.h>
+#endif
 
 #endif /* CONFIG_MMU */
 
diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h
index 0f8e382..99cfe36 100644
--- a/arch/arm/include/asm/perf_event.h
+++ b/arch/arm/include/asm/perf_event.h
@@ -32,7 +32,4 @@
 extern enum arm_perf_pmu_ids
 armpmu_get_pmu_id(void);
 
-extern int
-armpmu_get_max_events(void);
-
 #endif /* __ARM_PERF_EVENT_H__ */
diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
index 3e08fd3..943504f 100644
--- a/arch/arm/include/asm/pgalloc.h
+++ b/arch/arm/include/asm/pgalloc.h
@@ -25,12 +25,34 @@
 #define _PAGE_USER_TABLE	(PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_USER))
 #define _PAGE_KERNEL_TABLE	(PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_KERNEL))
 
+#ifdef CONFIG_ARM_LPAE
+
+static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+	return (pmd_t *)get_zeroed_page(GFP_KERNEL | __GFP_REPEAT);
+}
+
+static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
+{
+	BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
+	free_page((unsigned long)pmd);
+}
+
+static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+{
+	set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
+}
+
+#else	/* !CONFIG_ARM_LPAE */
+
 /*
  * Since we have only two-level page tables, these are trivial
  */
 #define pmd_alloc_one(mm,addr)		({ BUG(); ((pmd_t *)2); })
 #define pmd_free(mm, pmd)		do { } while (0)
-#define pgd_populate(mm,pmd,pte)	BUG()
+#define pud_populate(mm,pmd,pte)	BUG()
+
+#endif	/* CONFIG_ARM_LPAE */
 
 extern pgd_t *pgd_alloc(struct mm_struct *mm);
 extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
@@ -109,7 +131,9 @@
 {
 	pmdval_t pmdval = (pte + PTE_HWTABLE_OFF) | prot;
 	pmdp[0] = __pmd(pmdval);
+#ifndef CONFIG_ARM_LPAE
 	pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t));
+#endif
 	flush_pmd_entry(pmdp);
 }
 
diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
index 470457e..2317a71 100644
--- a/arch/arm/include/asm/pgtable-2level.h
+++ b/arch/arm/include/asm/pgtable-2level.h
@@ -140,4 +140,45 @@
 #define L_PTE_MT_DEV_CACHED	(_AT(pteval_t, 0x0b) << 2)	/* 1011 */
 #define L_PTE_MT_MASK		(_AT(pteval_t, 0x0f) << 2)
 
+#ifndef __ASSEMBLY__
+
+/*
+ * The "pud_xxx()" functions here are trivial when the pmd is folded into
+ * the pud: the pud entry is never bad, always exists, and can't be set or
+ * cleared.
+ */
+#define pud_none(pud)		(0)
+#define pud_bad(pud)		(0)
+#define pud_present(pud)	(1)
+#define pud_clear(pudp)		do { } while (0)
+#define set_pud(pud,pudp)	do { } while (0)
+
+static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
+{
+	return (pmd_t *)pud;
+}
+
+#define pmd_bad(pmd)		(pmd_val(pmd) & 2)
+
+#define copy_pmd(pmdpd,pmdps)		\
+	do {				\
+		pmdpd[0] = pmdps[0];	\
+		pmdpd[1] = pmdps[1];	\
+		flush_pmd_entry(pmdpd);	\
+	} while (0)
+
+#define pmd_clear(pmdp)			\
+	do {				\
+		pmdp[0] = __pmd(0);	\
+		pmdp[1] = __pmd(0);	\
+		clean_pmd_entry(pmdp);	\
+	} while (0)
+
+/* we don't need complex calculations here as the pmd is folded into the pgd */
+#define pmd_addr_end(addr,end) (end)
+
+#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext)
+
+#endif /* __ASSEMBLY__ */
+
 #endif /* _ASM_PGTABLE_2LEVEL_H */
diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
new file mode 100644
index 0000000..d795282
--- /dev/null
+++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
@@ -0,0 +1,77 @@
+/*
+ * arch/arm/include/asm/pgtable-3level-hwdef.h
+ *
+ * Copyright (C) 2011 ARM Ltd.
+ * Author: Catalin Marinas <catalin.marinas@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _ASM_PGTABLE_3LEVEL_HWDEF_H
+#define _ASM_PGTABLE_3LEVEL_HWDEF_H
+
+/*
+ * Hardware page table definitions.
+ *
+ * + Level 1/2 descriptor
+ *   - common
+ */
+#define PMD_TYPE_MASK		(_AT(pmdval_t, 3) << 0)
+#define PMD_TYPE_FAULT		(_AT(pmdval_t, 0) << 0)
+#define PMD_TYPE_TABLE		(_AT(pmdval_t, 3) << 0)
+#define PMD_TYPE_SECT		(_AT(pmdval_t, 1) << 0)
+#define PMD_BIT4		(_AT(pmdval_t, 0))
+#define PMD_DOMAIN(x)		(_AT(pmdval_t, 0))
+
+/*
+ *   - section
+ */
+#define PMD_SECT_BUFFERABLE	(_AT(pmdval_t, 1) << 2)
+#define PMD_SECT_CACHEABLE	(_AT(pmdval_t, 1) << 3)
+#define PMD_SECT_S		(_AT(pmdval_t, 3) << 8)
+#define PMD_SECT_AF		(_AT(pmdval_t, 1) << 10)
+#define PMD_SECT_nG		(_AT(pmdval_t, 1) << 11)
+#define PMD_SECT_XN		(_AT(pmdval_t, 1) << 54)
+#define PMD_SECT_AP_WRITE	(_AT(pmdval_t, 0))
+#define PMD_SECT_AP_READ	(_AT(pmdval_t, 0))
+#define PMD_SECT_TEX(x)		(_AT(pmdval_t, 0))
+
+/*
+ * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).
+ */
+#define PMD_SECT_UNCACHED	(_AT(pmdval_t, 0) << 2)	/* strongly ordered */
+#define PMD_SECT_BUFFERED	(_AT(pmdval_t, 1) << 2)	/* normal non-cacheable */
+#define PMD_SECT_WT		(_AT(pmdval_t, 2) << 2)	/* normal inner write-through */
+#define PMD_SECT_WB		(_AT(pmdval_t, 3) << 2)	/* normal inner write-back */
+#define PMD_SECT_WBWA		(_AT(pmdval_t, 7) << 2)	/* normal inner write-alloc */
+
+/*
+ * + Level 3 descriptor (PTE)
+ */
+#define PTE_TYPE_MASK		(_AT(pteval_t, 3) << 0)
+#define PTE_TYPE_FAULT		(_AT(pteval_t, 0) << 0)
+#define PTE_TYPE_PAGE		(_AT(pteval_t, 3) << 0)
+#define PTE_BUFFERABLE		(_AT(pteval_t, 1) << 2)		/* AttrIndx[0] */
+#define PTE_CACHEABLE		(_AT(pteval_t, 1) << 3)		/* AttrIndx[1] */
+#define PTE_EXT_SHARED		(_AT(pteval_t, 3) << 8)		/* SH[1:0], inner shareable */
+#define PTE_EXT_AF		(_AT(pteval_t, 1) << 10)	/* Access Flag */
+#define PTE_EXT_NG		(_AT(pteval_t, 1) << 11)	/* nG */
+#define PTE_EXT_XN		(_AT(pteval_t, 1) << 54)	/* XN */
+
+/*
+ * 40-bit physical address supported.
+ */
+#define PHYS_MASK_SHIFT		(40)
+#define PHYS_MASK		((1ULL << PHYS_MASK_SHIFT) - 1)
+
+#endif
diff --git a/arch/arm/include/asm/pgtable-3level-types.h b/arch/arm/include/asm/pgtable-3level-types.h
new file mode 100644
index 0000000..921aa30
--- /dev/null
+++ b/arch/arm/include/asm/pgtable-3level-types.h
@@ -0,0 +1,70 @@
+/*
+ * arch/arm/include/asm/pgtable-3level-types.h
+ *
+ * Copyright (C) 2011 ARM Ltd.
+ * Author: Catalin Marinas <catalin.marinas@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _ASM_PGTABLE_3LEVEL_TYPES_H
+#define _ASM_PGTABLE_3LEVEL_TYPES_H
+
+#include <asm/types.h>
+
+typedef u64 pteval_t;
+typedef u64 pmdval_t;
+typedef u64 pgdval_t;
+
+#undef STRICT_MM_TYPECHECKS
+
+#ifdef STRICT_MM_TYPECHECKS
+
+/*
+ * These are used to make use of C type-checking..
+ */
+typedef struct { pteval_t pte; } pte_t;
+typedef struct { pmdval_t pmd; } pmd_t;
+typedef struct { pgdval_t pgd; } pgd_t;
+typedef struct { pteval_t pgprot; } pgprot_t;
+
+#define pte_val(x)      ((x).pte)
+#define pmd_val(x)      ((x).pmd)
+#define pgd_val(x)	((x).pgd)
+#define pgprot_val(x)   ((x).pgprot)
+
+#define __pte(x)        ((pte_t) { (x) } )
+#define __pmd(x)        ((pmd_t) { (x) } )
+#define __pgd(x)	((pgd_t) { (x) } )
+#define __pgprot(x)     ((pgprot_t) { (x) } )
+
+#else	/* !STRICT_MM_TYPECHECKS */
+
+typedef pteval_t pte_t;
+typedef pmdval_t pmd_t;
+typedef pgdval_t pgd_t;
+typedef pteval_t pgprot_t;
+
+#define pte_val(x)	(x)
+#define pmd_val(x)	(x)
+#define pgd_val(x)	(x)
+#define pgprot_val(x)	(x)
+
+#define __pte(x)	(x)
+#define __pmd(x)	(x)
+#define __pgd(x)	(x)
+#define __pgprot(x)	(x)
+
+#endif	/* STRICT_MM_TYPECHECKS */
+
+#endif	/* _ASM_PGTABLE_3LEVEL_TYPES_H */
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
new file mode 100644
index 0000000..759af70
--- /dev/null
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -0,0 +1,155 @@
+/*
+ * arch/arm/include/asm/pgtable-3level.h
+ *
+ * Copyright (C) 2011 ARM Ltd.
+ * Author: Catalin Marinas <catalin.marinas@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _ASM_PGTABLE_3LEVEL_H
+#define _ASM_PGTABLE_3LEVEL_H
+
+/*
+ * With LPAE, there are 3 levels of page tables. Each level has 512 entries of
+ * 8 bytes each, occupying a 4K page. The first level table covers a range of
+ * 512GB, each entry representing 1GB. Since we are limited to 4GB input
+ * address range, only 4 entries in the PGD are used.
+ *
+ * There are enough spare bits in a page table entry for the kernel specific
+ * state.
+ */
+#define PTRS_PER_PTE		512
+#define PTRS_PER_PMD		512
+#define PTRS_PER_PGD		4
+
+#define PTE_HWTABLE_PTRS	(PTRS_PER_PTE)
+#define PTE_HWTABLE_OFF		(0)
+#define PTE_HWTABLE_SIZE	(PTRS_PER_PTE * sizeof(u64))
+
+/*
+ * PGDIR_SHIFT determines the size a top-level page table entry can map.
+ */
+#define PGDIR_SHIFT		30
+
+/*
+ * PMD_SHIFT determines the size a middle-level page table entry can map.
+ */
+#define PMD_SHIFT		21
+
+#define PMD_SIZE		(1UL << PMD_SHIFT)
+#define PMD_MASK		(~(PMD_SIZE-1))
+#define PGDIR_SIZE		(1UL << PGDIR_SHIFT)
+#define PGDIR_MASK		(~(PGDIR_SIZE-1))
+
+/*
+ * section address mask and size definitions.
+ */
+#define SECTION_SHIFT		21
+#define SECTION_SIZE		(1UL << SECTION_SHIFT)
+#define SECTION_MASK		(~(SECTION_SIZE-1))
+
+#define USER_PTRS_PER_PGD	(PAGE_OFFSET / PGDIR_SIZE)
+
+/*
+ * "Linux" PTE definitions for LPAE.
+ *
+ * These bits overlap with the hardware bits but the naming is preserved for
+ * consistency with the classic page table format.
+ */
+#define L_PTE_PRESENT		(_AT(pteval_t, 3) << 0)		/* Valid */
+#define L_PTE_FILE		(_AT(pteval_t, 1) << 2)		/* only when !PRESENT */
+#define L_PTE_BUFFERABLE	(_AT(pteval_t, 1) << 2)		/* AttrIndx[0] */
+#define L_PTE_CACHEABLE		(_AT(pteval_t, 1) << 3)		/* AttrIndx[1] */
+#define L_PTE_USER		(_AT(pteval_t, 1) << 6)		/* AP[1] */
+#define L_PTE_RDONLY		(_AT(pteval_t, 1) << 7)		/* AP[2] */
+#define L_PTE_SHARED		(_AT(pteval_t, 3) << 8)		/* SH[1:0], inner shareable */
+#define L_PTE_YOUNG		(_AT(pteval_t, 1) << 10)	/* AF */
+#define L_PTE_XN		(_AT(pteval_t, 1) << 54)	/* XN */
+#define L_PTE_DIRTY		(_AT(pteval_t, 1) << 55)	/* unused */
+#define L_PTE_SPECIAL		(_AT(pteval_t, 1) << 56)	/* unused */
+
+/*
+ * To be used in assembly code with the upper page attributes.
+ */
+#define L_PTE_XN_HIGH		(1 << (54 - 32))
+#define L_PTE_DIRTY_HIGH	(1 << (55 - 32))
+
+/*
+ * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).
+ */
+#define L_PTE_MT_UNCACHED	(_AT(pteval_t, 0) << 2)	/* strongly ordered */
+#define L_PTE_MT_BUFFERABLE	(_AT(pteval_t, 1) << 2)	/* normal non-cacheable */
+#define L_PTE_MT_WRITETHROUGH	(_AT(pteval_t, 2) << 2)	/* normal inner write-through */
+#define L_PTE_MT_WRITEBACK	(_AT(pteval_t, 3) << 2)	/* normal inner write-back */
+#define L_PTE_MT_WRITEALLOC	(_AT(pteval_t, 7) << 2)	/* normal inner write-alloc */
+#define L_PTE_MT_DEV_SHARED	(_AT(pteval_t, 4) << 2)	/* device */
+#define L_PTE_MT_DEV_NONSHARED	(_AT(pteval_t, 4) << 2)	/* device */
+#define L_PTE_MT_DEV_WC		(_AT(pteval_t, 1) << 2)	/* normal non-cacheable */
+#define L_PTE_MT_DEV_CACHED	(_AT(pteval_t, 3) << 2)	/* normal inner write-back */
+#define L_PTE_MT_MASK		(_AT(pteval_t, 7) << 2)
+
+/*
+ * Software PGD flags.
+ */
+#define L_PGD_SWAPPER		(_AT(pgdval_t, 1) << 55)	/* swapper_pg_dir entry */
+
+#ifndef __ASSEMBLY__
+
+#define pud_none(pud)		(!pud_val(pud))
+#define pud_bad(pud)		(!(pud_val(pud) & 2))
+#define pud_present(pud)	(pud_val(pud))
+
+#define pud_clear(pudp)			\
+	do {				\
+		*pudp = __pud(0);	\
+		clean_pmd_entry(pudp);	\
+	} while (0)
+
+#define set_pud(pudp, pud)		\
+	do {				\
+		*pudp = pud;		\
+		flush_pmd_entry(pudp);	\
+	} while (0)
+
+static inline pmd_t *pud_page_vaddr(pud_t pud)
+{
+	return __va(pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK);
+}
+
+/* Find an entry in the second-level page table.. */
+#define pmd_index(addr)		(((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
+static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
+{
+	return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr);
+}
+
+#define pmd_bad(pmd)		(!(pmd_val(pmd) & 2))
+
+#define copy_pmd(pmdpd,pmdps)		\
+	do {				\
+		*pmdpd = *pmdps;	\
+		flush_pmd_entry(pmdpd);	\
+	} while (0)
+
+#define pmd_clear(pmdp)			\
+	do {				\
+		*pmdp = __pmd(0);	\
+		clean_pmd_entry(pmdp);	\
+	} while (0)
+
+#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,__pte(pte_val(pte)|(ext)))
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_PGTABLE_3LEVEL_H */
diff --git a/arch/arm/include/asm/pgtable-hwdef.h b/arch/arm/include/asm/pgtable-hwdef.h
index 1831111..8426229 100644
--- a/arch/arm/include/asm/pgtable-hwdef.h
+++ b/arch/arm/include/asm/pgtable-hwdef.h
@@ -10,6 +10,10 @@
 #ifndef _ASMARM_PGTABLE_HWDEF_H
 #define _ASMARM_PGTABLE_HWDEF_H
 
+#ifdef CONFIG_ARM_LPAE
+#include <asm/pgtable-3level-hwdef.h>
+#else
 #include <asm/pgtable-2level-hwdef.h>
+#endif
 
 #endif
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 9451dce..f66626d 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -11,20 +11,24 @@
 #define _ASMARM_PGTABLE_H
 
 #include <linux/const.h>
-#include <asm-generic/4level-fixup.h>
 #include <asm/proc-fns.h>
 
 #ifndef CONFIG_MMU
 
+#include <asm-generic/4level-fixup.h>
 #include "pgtable-nommu.h"
 
 #else
 
+#include <asm-generic/pgtable-nopud.h>
 #include <asm/memory.h>
-#include <mach/vmalloc.h>
 #include <asm/pgtable-hwdef.h>
 
+#ifdef CONFIG_ARM_LPAE
+#include <asm/pgtable-3level.h>
+#else
 #include <asm/pgtable-2level.h>
+#endif
 
 /*
  * Just any arbitrary offset to the start of the vmalloc VM area: the
@@ -33,15 +37,10 @@
  * any out-of-bounds memory accesses will hopefully be caught.
  * The vmalloc() routines leaves a hole of 4kB between each vmalloced
  * area for the same reason. ;)
- *
- * Note that platforms may override VMALLOC_START, but they must provide
- * VMALLOC_END.  VMALLOC_END defines the (exclusive) limit of this space,
- * which may not overlap IO space.
  */
-#ifndef VMALLOC_START
 #define VMALLOC_OFFSET		(8*1024*1024)
 #define VMALLOC_START		(((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
-#endif
+#define VMALLOC_END		0xff000000UL
 
 #define LIBRARY_TEXT_START	0x0c000000
 
@@ -163,39 +162,8 @@
 /* to find an entry in a kernel page-table-directory */
 #define pgd_offset_k(addr)	pgd_offset(&init_mm, addr)
 
-/*
- * The "pgd_xxx()" functions here are trivial for a folded two-level
- * setup: the pgd is never bad, and a pmd always exists (as it's folded
- * into the pgd entry)
- */
-#define pgd_none(pgd)		(0)
-#define pgd_bad(pgd)		(0)
-#define pgd_present(pgd)	(1)
-#define pgd_clear(pgdp)		do { } while (0)
-#define set_pgd(pgd,pgdp)	do { } while (0)
-#define set_pud(pud,pudp)	do { } while (0)
-
-
-/* Find an entry in the second-level page table.. */
-#define pmd_offset(dir, addr)	((pmd_t *)(dir))
-
 #define pmd_none(pmd)		(!pmd_val(pmd))
 #define pmd_present(pmd)	(pmd_val(pmd))
-#define pmd_bad(pmd)		(pmd_val(pmd) & 2)
-
-#define copy_pmd(pmdpd,pmdps)		\
-	do {				\
-		pmdpd[0] = pmdps[0];	\
-		pmdpd[1] = pmdps[1];	\
-		flush_pmd_entry(pmdpd);	\
-	} while (0)
-
-#define pmd_clear(pmdp)			\
-	do {				\
-		pmdp[0] = __pmd(0);	\
-		pmdp[1] = __pmd(0);	\
-		clean_pmd_entry(pmdp);	\
-	} while (0)
 
 static inline pte_t *pmd_page_vaddr(pmd_t pmd)
 {
@@ -204,10 +172,6 @@
 
 #define pmd_page(pmd)		pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
 
-/* we don't need complex calculations here as the pmd is folded into the pgd */
-#define pmd_addr_end(addr,end)	(end)
-
-
 #ifndef CONFIG_HIGHPTE
 #define __pte_map(pmd)		pmd_page_vaddr(*(pmd))
 #define __pte_unmap(pte)	do { } while (0)
@@ -229,7 +193,6 @@
 #define pte_page(pte)		pfn_to_page(pte_pfn(pte))
 #define mk_pte(page,prot)	pfn_pte(page_to_pfn(page), prot)
 
-#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext)
 #define pte_clear(mm,addr,ptep)	set_pte_ext(ptep, __pte(0), 0)
 
 #if __LINUX_ARM_ARCH__ < 6
@@ -336,6 +299,7 @@
  * We provide our own arch_get_unmapped_area to cope with VIPT caches.
  */
 #define HAVE_ARCH_UNMAPPED_AREA
+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
 
 /*
  * remap a physical page `pfn' of size `size' with page protection `prot'
@@ -346,9 +310,6 @@
 
 #define pgtable_cache_init() do { } while (0)
 
-void identity_mapping_add(pgd_t *, unsigned long, unsigned long);
-void identity_mapping_del(pgd_t *, unsigned long, unsigned long);
-
 #endif /* !__ASSEMBLY__ */
 
 #endif /* CONFIG_MMU */
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
index 0bda22c..b5a5be2 100644
--- a/arch/arm/include/asm/pmu.h
+++ b/arch/arm/include/asm/pmu.h
@@ -27,13 +27,22 @@
 /*
  * struct arm_pmu_platdata - ARM PMU platform data
  *
- * @handle_irq: an optional handler which will be called from the interrupt and
- * passed the address of the low level handler, and can be used to implement
- * any platform specific handling before or after calling it.
+ * @handle_irq: an optional handler which will be called from the
+ *	interrupt and passed the address of the low level handler,
+ *	and can be used to implement any platform specific handling
+ *	before or after calling it.
+ * @enable_irq: an optional handler which will be called after
+ *	request_irq and be used to handle some platform specific
+ *	irq enablement
+ * @disable_irq: an optional handler which will be called before
+ *	free_irq and be used to handle some platform specific
+ *	irq disablement
  */
 struct arm_pmu_platdata {
 	irqreturn_t (*handle_irq)(int irq, void *dev,
 				  irq_handler_t pmu_handler);
+	void (*enable_irq)(int irq);
+	void (*disable_irq)(int irq);
 };
 
 #ifdef CONFIG_CPU_HAS_PMU
diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
index 9e92cb20..f3628fb 100644
--- a/arch/arm/include/asm/proc-fns.h
+++ b/arch/arm/include/asm/proc-fns.h
@@ -65,7 +65,11 @@
 	 * Set a possibly extended PTE.  Non-extended PTEs should
 	 * ignore 'ext'.
 	 */
+#ifdef CONFIG_ARM_LPAE
+	void (*set_pte_ext)(pte_t *ptep, pte_t pte);
+#else
 	void (*set_pte_ext)(pte_t *ptep, pte_t pte, unsigned int ext);
+#endif
 
 	/* Suspend/resume */
 	unsigned int suspend_size;
@@ -79,7 +83,11 @@
 extern int cpu_do_idle(void);
 extern void cpu_dcache_clean_area(void *, int);
 extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm);
+#ifdef CONFIG_ARM_LPAE
+extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte);
+#else
 extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
+#endif
 extern void cpu_reset(unsigned long addr) __attribute__((noreturn));
 
 /* These three are private to arch/arm/kernel/suspend.c */
@@ -107,6 +115,18 @@
 
 #define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm)
 
+#ifdef CONFIG_ARM_LPAE
+#define cpu_get_pgd()	\
+	({						\
+		unsigned long pg, pg2;			\
+		__asm__("mrrc	p15, 0, %0, %1, c2"	\
+			: "=r" (pg), "=r" (pg2)		\
+			:				\
+			: "cc");			\
+		pg &= ~(PTRS_PER_PGD*sizeof(pgd_t)-1);	\
+		(pgd_t *)phys_to_virt(pg);		\
+	})
+#else
 #define cpu_get_pgd()	\
 	({						\
 		unsigned long pg;			\
@@ -115,6 +135,7 @@
 		pg &= ~0x3fff;				\
 		(pgd_t *)phys_to_virt(pg);		\
 	})
+#endif
 
 #endif
 
diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
index b2d9df5..ce280b8 100644
--- a/arch/arm/include/asm/processor.h
+++ b/arch/arm/include/asm/processor.h
@@ -123,6 +123,8 @@
 
 #endif
 
+#define HAVE_ARCH_PICK_MMAP_LAYOUT
+
 #endif
 
 #endif /* __ASM_ARM_PROCESSOR_H */
diff --git a/arch/arm/include/asm/prom.h b/arch/arm/include/asm/prom.h
index 6f65ca8..ee03633 100644
--- a/arch/arm/include/asm/prom.h
+++ b/arch/arm/include/asm/prom.h
@@ -13,7 +13,6 @@
 
 #ifdef CONFIG_OF
 
-#include <asm/setup.h>
 #include <asm/irq.h>
 
 extern struct machine_desc *setup_machine_fdt(unsigned int dt_phys);
diff --git a/arch/arm/include/asm/sched_clock.h b/arch/arm/include/asm/sched_clock.h
index c8e6ddf..e3f7572 100644
--- a/arch/arm/include/asm/sched_clock.h
+++ b/arch/arm/include/asm/sched_clock.h
@@ -8,113 +8,7 @@
 #ifndef ASM_SCHED_CLOCK
 #define ASM_SCHED_CLOCK
 
-#include <linux/kernel.h>
-#include <linux/types.h>
-
-struct clock_data {
-	u64 epoch_ns;
-	u32 epoch_cyc;
-	u32 epoch_cyc_copy;
-	u32 mult;
-	u32 shift;
-};
-
-#define DEFINE_CLOCK_DATA(name)	struct clock_data name
-
-static inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift)
-{
-	return (cyc * mult) >> shift;
-}
-
-/*
- * Atomically update the sched_clock epoch.  Your update callback will
- * be called from a timer before the counter wraps - read the current
- * counter value, and call this function to safely move the epochs
- * forward.  Only use this from the update callback.
- */
-static inline void update_sched_clock(struct clock_data *cd, u32 cyc, u32 mask)
-{
-	unsigned long flags;
-	u64 ns = cd->epoch_ns +
-		cyc_to_ns((cyc - cd->epoch_cyc) & mask, cd->mult, cd->shift);
-
-	/*
-	 * Write epoch_cyc and epoch_ns in a way that the update is
-	 * detectable in cyc_to_fixed_sched_clock().
-	 */
-	raw_local_irq_save(flags);
-	cd->epoch_cyc = cyc;
-	smp_wmb();
-	cd->epoch_ns = ns;
-	smp_wmb();
-	cd->epoch_cyc_copy = cyc;
-	raw_local_irq_restore(flags);
-}
-
-/*
- * If your clock rate is known at compile time, using this will allow
- * you to optimize the mult/shift loads away.  This is paired with
- * init_fixed_sched_clock() to ensure that your mult/shift are correct.
- */
-static inline unsigned long long cyc_to_fixed_sched_clock(struct clock_data *cd,
-	u32 cyc, u32 mask, u32 mult, u32 shift)
-{
-	u64 epoch_ns;
-	u32 epoch_cyc;
-
-	/*
-	 * Load the epoch_cyc and epoch_ns atomically.  We do this by
-	 * ensuring that we always write epoch_cyc, epoch_ns and
-	 * epoch_cyc_copy in strict order, and read them in strict order.
-	 * If epoch_cyc and epoch_cyc_copy are not equal, then we're in
-	 * the middle of an update, and we should repeat the load.
-	 */
-	do {
-		epoch_cyc = cd->epoch_cyc;
-		smp_rmb();
-		epoch_ns = cd->epoch_ns;
-		smp_rmb();
-	} while (epoch_cyc != cd->epoch_cyc_copy);
-
-	return epoch_ns + cyc_to_ns((cyc - epoch_cyc) & mask, mult, shift);
-}
-
-/*
- * Otherwise, you need to use this, which will obtain the mult/shift
- * from the clock_data structure.  Use init_sched_clock() with this.
- */
-static inline unsigned long long cyc_to_sched_clock(struct clock_data *cd,
-	u32 cyc, u32 mask)
-{
-	return cyc_to_fixed_sched_clock(cd, cyc, mask, cd->mult, cd->shift);
-}
-
-/*
- * Initialize the clock data - calculate the appropriate multiplier
- * and shift.  Also setup a timer to ensure that the epoch is refreshed
- * at the appropriate time interval, which will call your update
- * handler.
- */
-void init_sched_clock(struct clock_data *, void (*)(void),
-	unsigned int, unsigned long);
-
-/*
- * Use this initialization function rather than init_sched_clock() if
- * you're using cyc_to_fixed_sched_clock, which will warn if your
- * constants are incorrect.
- */
-static inline void init_fixed_sched_clock(struct clock_data *cd,
-	void (*update)(void), unsigned int bits, unsigned long rate,
-	u32 mult, u32 shift)
-{
-	init_sched_clock(cd, update, bits, rate);
-	if (cd->mult != mult || cd->shift != shift) {
-		pr_crit("sched_clock: wrong multiply/shift: %u>>%u vs calculated %u>>%u\n"
-			"sched_clock: fix multiply/shift to avoid scheduler hiccups\n",
-			mult, shift, cd->mult, cd->shift);
-	}
-}
-
 extern void sched_clock_postinit(void);
+extern void setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate);
 
 #endif
diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h
index 915696d..23ebc0c 100644
--- a/arch/arm/include/asm/setup.h
+++ b/arch/arm/include/asm/setup.h
@@ -192,11 +192,7 @@
 /*
  * Memory map description
  */
-#ifdef CONFIG_ARCH_EP93XX
-# define NR_BANKS 16
-#else
-# define NR_BANKS 8
-#endif
+#define NR_BANKS	CONFIG_ARM_NR_BANKS
 
 struct membank {
 	phys_addr_t start;
diff --git a/arch/arm/include/asm/socket.h b/arch/arm/include/asm/socket.h
index 90ffd04..dec6f9a 100644
--- a/arch/arm/include/asm/socket.h
+++ b/arch/arm/include/asm/socket.h
@@ -62,4 +62,7 @@
 
 #define SO_RXQ_OVFL             40
 
+#define SO_WIFI_STATUS		41
+#define SCM_WIFI_STATUS		SO_WIFI_STATUS
+
 #endif /* _ASM_SOCKET_H */
diff --git a/arch/arm/include/asm/swab.h b/arch/arm/include/asm/swab.h
index 9997ad2..32ee164 100644
--- a/arch/arm/include/asm/swab.h
+++ b/arch/arm/include/asm/swab.h
@@ -24,12 +24,13 @@
 
 #if defined(__KERNEL__) && __LINUX_ARM_ARCH__ >= 6
 
-static inline __attribute_const__ __u16 __arch_swab16(__u16 x)
+static inline __attribute_const__ __u32 __arch_swahb32(__u32 x)
 {
 	__asm__ ("rev16 %0, %1" : "=r" (x) : "r" (x));
 	return x;
 }
-#define __arch_swab16 __arch_swab16
+#define __arch_swahb32 __arch_swahb32
+#define __arch_swab16(x) ((__u16)__arch_swahb32(x))
 
 static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
 {
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
index 984014b..e4c96cc 100644
--- a/arch/arm/include/asm/system.h
+++ b/arch/arm/include/asm/system.h
@@ -80,6 +80,14 @@
 void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info,
 		unsigned long err, unsigned long trap);
 
+#ifdef CONFIG_ARM_LPAE
+#define FAULT_CODE_ALIGNMENT	33
+#define FAULT_CODE_DEBUG	34
+#else
+#define FAULT_CODE_ALIGNMENT	1
+#define FAULT_CODE_DEBUG	2
+#endif
+
 void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int,
 				       struct pt_regs *),
 		     int sig, int code, const char *name);
@@ -100,7 +108,7 @@
 extern int __pure cpu_architecture(void);
 extern void cpu_init(void);
 
-void arm_machine_restart(char mode, const char *cmd);
+void soft_restart(unsigned long);
 extern void (*arm_pm_restart)(char str, const char *cmd);
 
 #define UDBG_UNDEFINED	(1 << 0)
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index 7b5cc8d..0f30c3a 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -142,7 +142,6 @@
 #define TIF_POLLING_NRFLAG	16
 #define TIF_USING_IWMMXT	17
 #define TIF_MEMDIE		18	/* is terminating due to OOM killer */
-#define TIF_FREEZE		19
 #define TIF_RESTORE_SIGMASK	20
 #define TIF_SECCOMP		21
 
@@ -152,7 +151,6 @@
 #define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE)
 #define _TIF_POLLING_NRFLAG	(1 << TIF_POLLING_NRFLAG)
 #define _TIF_USING_IWMMXT	(1 << TIF_USING_IWMMXT)
-#define _TIF_FREEZE		(1 << TIF_FREEZE)
 #define _TIF_RESTORE_SIGMASK	(1 << TIF_RESTORE_SIGMASK)
 #define _TIF_SECCOMP		(1 << TIF_SECCOMP)
 
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index 265f908..5d3ed7e3 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -202,8 +202,18 @@
 	tlb_remove_page(tlb, pte);
 }
 
+static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
+				  unsigned long addr)
+{
+#ifdef CONFIG_ARM_LPAE
+	tlb_add_flush(tlb, addr);
+	tlb_remove_page(tlb, virt_to_page(pmdp));
+#endif
+}
+
 #define pte_free_tlb(tlb, ptep, addr)	__pte_free_tlb(tlb, ptep, addr)
-#define pmd_free_tlb(tlb, pmdp, addr)	pmd_free((tlb)->mm, pmdp)
+#define pmd_free_tlb(tlb, pmdp, addr)	__pmd_free_tlb(tlb, pmdp, addr)
+#define pud_free_tlb(tlb, pudp, addr)	pud_free((tlb)->mm, pudp)
 
 #define tlb_migrate_finish(mm)		do { } while (0)
 
diff --git a/arch/arm/include/asm/types.h b/arch/arm/include/asm/types.h
index 48192ac..28beab9 100644
--- a/arch/arm/include/asm/types.h
+++ b/arch/arm/include/asm/types.h
@@ -3,12 +3,6 @@
 
 #include <asm-generic/int-ll64.h>
 
-#ifndef __ASSEMBLY__
-
-typedef unsigned short umode_t;
-
-#endif /* __ASSEMBLY__ */
-
 /*
  * These aren't exported outside the kernel to avoid name space clashes
  */
diff --git a/arch/arm/include/asm/unwind.h b/arch/arm/include/asm/unwind.h
index a5edf42..d1c3f3a 100644
--- a/arch/arm/include/asm/unwind.h
+++ b/arch/arm/include/asm/unwind.h
@@ -30,14 +30,15 @@
 };
 
 struct unwind_idx {
-	unsigned long addr;
+	unsigned long addr_offset;
 	unsigned long insn;
 };
 
 struct unwind_table {
 	struct list_head list;
-	struct unwind_idx *start;
-	struct unwind_idx *stop;
+	const struct unwind_idx *start;
+	const struct unwind_idx *origin;
+	const struct unwind_idx *stop;
 	unsigned long begin_addr;
 	unsigned long end_addr;
 };
@@ -49,15 +50,6 @@
 extern void unwind_table_del(struct unwind_table *tab);
 extern void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk);
 
-#ifdef CONFIG_ARM_UNWIND
-extern int __init unwind_init(void);
-#else
-static inline int __init unwind_init(void)
-{
-	return 0;
-}
-#endif
-
 #endif	/* !__ASSEMBLY__ */
 
 #ifdef CONFIG_ARM_UNWIND
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 16eed6a..43b740d 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -13,7 +13,7 @@
 
 # Object file lists.
 
-obj-y		:= elf.o entry-armv.o entry-common.o irq.o \
+obj-y		:= elf.o entry-armv.o entry-common.o irq.o opcodes.o \
 		   process.o ptrace.o return_address.o setup.o signal.o \
 		   sys_arm.o stacktrace.o time.o traps.o
 
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index b145f16..3a456c6 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -36,12 +36,11 @@
 #ifdef CONFIG_MULTI_IRQ_HANDLER
 	ldr	r1, =handle_arch_irq
 	mov	r0, sp
-	ldr	r1, [r1]
 	adr	lr, BSYM(9997f)
-	teq	r1, #0
-	movne	pc, r1
-#endif
+	ldr	pc, [r1]
+#else
 	arch_irq_handler_default
+#endif
 9997:
 	.endm
 
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 08c82fd..14e277d 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -39,8 +39,14 @@
 #error KERNEL_RAM_VADDR must start at 0xXXXX8000
 #endif
 
+#ifdef CONFIG_ARM_LPAE
+	/* LPAE requires an additional page for the PGD */
+#define PG_DIR_SIZE	0x5000
+#define PMD_ORDER	3
+#else
 #define PG_DIR_SIZE	0x4000
 #define PMD_ORDER	2
+#endif
 
 	.globl	swapper_pg_dir
 	.equ	swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE
@@ -164,17 +170,36 @@
 	teq	r0, r6
 	bne	1b
 
+#ifdef CONFIG_ARM_LPAE
+	/*
+	 * Build the PGD table (first level) to point to the PMD table. A PGD
+	 * entry is 64-bit wide.
+	 */
+	mov	r0, r4
+	add	r3, r4, #0x1000			@ first PMD table address
+	orr	r3, r3, #3			@ PGD block type
+	mov	r6, #4				@ PTRS_PER_PGD
+	mov	r7, #1 << (55 - 32)		@ L_PGD_SWAPPER
+1:	str	r3, [r0], #4			@ set bottom PGD entry bits
+	str	r7, [r0], #4			@ set top PGD entry bits
+	add	r3, r3, #0x1000			@ next PMD table
+	subs	r6, r6, #1
+	bne	1b
+
+	add	r4, r4, #0x1000			@ point to the PMD tables
+#endif
+
 	ldr	r7, [r10, #PROCINFO_MM_MMUFLAGS] @ mm_mmuflags
 
 	/*
 	 * Create identity mapping to cater for __enable_mmu.
 	 * This identity mapping will be removed by paging_init().
 	 */
-	adr	r0, __enable_mmu_loc
+	adr	r0, __turn_mmu_on_loc
 	ldmia	r0, {r3, r5, r6}
 	sub	r0, r0, r3			@ virt->phys offset
-	add	r5, r5, r0			@ phys __enable_mmu
-	add	r6, r6, r0			@ phys __enable_mmu_end
+	add	r5, r5, r0			@ phys __turn_mmu_on
+	add	r6, r6, r0			@ phys __turn_mmu_on_end
 	mov	r5, r5, lsr #SECTION_SHIFT
 	mov	r6, r6, lsr #SECTION_SHIFT
 
@@ -219,8 +244,8 @@
 #endif
 
 	/*
-	 * Then map boot params address in r2 or
-	 * the first 1MB of ram if boot params address is not specified.
+	 * Then map boot params address in r2 or the first 1MB (2MB with LPAE)
+	 * of ram if boot params address is not specified.
 	 */
 	mov	r0, r2, lsr #SECTION_SHIFT
 	movs	r0, r0, lsl #SECTION_SHIFT
@@ -251,7 +276,15 @@
 	mov	r3, r7, lsr #SECTION_SHIFT
 	ldr	r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags
 	orr	r3, r7, r3, lsl #SECTION_SHIFT
+#ifdef CONFIG_ARM_LPAE
+	mov	r7, #1 << (54 - 32)		@ XN
+#else
+	orr	r3, r3, #PMD_SECT_XN
+#endif
 1:	str	r3, [r0], #4
+#ifdef CONFIG_ARM_LPAE
+	str	r7, [r0], #4
+#endif
 	add	r3, r3, #1 << SECTION_SHIFT
 	cmp	r0, r6
 	blo	1b
@@ -283,14 +316,17 @@
 	str	r3, [r0]
 #endif
 #endif
+#ifdef CONFIG_ARM_LPAE
+	sub	r4, r4, #0x1000		@ point to the PGD table
+#endif
 	mov	pc, lr
 ENDPROC(__create_page_tables)
 	.ltorg
 	.align
-__enable_mmu_loc:
+__turn_mmu_on_loc:
 	.long	.
-	.long	__enable_mmu
-	.long	__enable_mmu_end
+	.long	__turn_mmu_on
+	.long	__turn_mmu_on_end
 
 #if defined(CONFIG_SMP)
 	__CPUINIT
@@ -374,12 +410,17 @@
 #ifdef CONFIG_CPU_ICACHE_DISABLE
 	bic	r0, r0, #CR_I
 #endif
+#ifdef CONFIG_ARM_LPAE
+	mov	r5, #0
+	mcrr	p15, 0, r4, r5, c2		@ load TTBR0
+#else
 	mov	r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
 		      domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
 		      domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
 		      domain_val(DOMAIN_IO, DOMAIN_CLIENT))
 	mcr	p15, 0, r5, c3, c0, 0		@ load domain access register
 	mcr	p15, 0, r4, c2, c0, 0		@ load page table pointer
+#endif
 	b	__turn_mmu_on
 ENDPROC(__enable_mmu)
 
@@ -398,15 +439,19 @@
  * other registers depend on the function called upon completion
  */
 	.align	5
-__turn_mmu_on:
+	.pushsection	.idmap.text, "ax"
+ENTRY(__turn_mmu_on)
 	mov	r0, r0
+	instr_sync
 	mcr	p15, 0, r0, c1, c0, 0		@ write control reg
 	mrc	p15, 0, r3, c0, c0, 0		@ read id reg
+	instr_sync
 	mov	r3, r3
 	mov	r3, r13
 	mov	pc, r3
-__enable_mmu_end:
+__turn_mmu_on_end:
 ENDPROC(__turn_mmu_on)
+	.popsection
 
 
 #ifdef CONFIG_SMP_ON_UP
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 814a52a9..d6a95ef 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -1016,10 +1016,10 @@
 	}
 
 	/* Register debug fault handler. */
-	hook_fault_code(2, hw_breakpoint_pending, SIGTRAP, TRAP_HWBKPT,
-			"watchpoint debug exception");
-	hook_ifault_code(2, hw_breakpoint_pending, SIGTRAP, TRAP_HWBKPT,
-			"breakpoint debug exception");
+	hook_fault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP,
+			TRAP_HWBKPT, "watchpoint debug exception");
+	hook_ifault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP,
+			TRAP_HWBKPT, "breakpoint debug exception");
 
 	/* Register hotplug notifier. */
 	register_cpu_notifier(&dbg_reset_nb);
diff --git a/arch/arm/kernel/kprobes-test.c b/arch/arm/kernel/kprobes-test.c
index e17cdd6..1862d8f 100644
--- a/arch/arm/kernel/kprobes-test.c
+++ b/arch/arm/kernel/kprobes-test.c
@@ -202,6 +202,8 @@
 #include <linux/slab.h>
 #include <linux/kprobes.h>
 
+#include <asm/opcodes.h>
+
 #include "kprobes.h"
 #include "kprobes-test.h"
 
@@ -1050,65 +1052,9 @@
 
 static unsigned long test_check_cc(int cc, unsigned long cpsr)
 {
-	unsigned long temp;
+	int ret = arm_check_condition(cc << 28, cpsr);
 
-	switch (cc) {
-	case 0x0: /* eq */
-		return cpsr & PSR_Z_BIT;
-
-	case 0x1: /* ne */
-		return (~cpsr) & PSR_Z_BIT;
-
-	case 0x2: /* cs */
-		return cpsr & PSR_C_BIT;
-
-	case 0x3: /* cc */
-		return (~cpsr) & PSR_C_BIT;
-
-	case 0x4: /* mi */
-		return cpsr & PSR_N_BIT;
-
-	case 0x5: /* pl */
-		return (~cpsr) & PSR_N_BIT;
-
-	case 0x6: /* vs */
-		return cpsr & PSR_V_BIT;
-
-	case 0x7: /* vc */
-		return (~cpsr) & PSR_V_BIT;
-
-	case 0x8: /* hi */
-		cpsr &= ~(cpsr >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
-		return cpsr & PSR_C_BIT;
-
-	case 0x9: /* ls */
-		cpsr &= ~(cpsr >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
-		return (~cpsr) & PSR_C_BIT;
-
-	case 0xa: /* ge */
-		cpsr ^= (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */
-		return (~cpsr) & PSR_N_BIT;
-
-	case 0xb: /* lt */
-		cpsr ^= (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */
-		return cpsr & PSR_N_BIT;
-
-	case 0xc: /* gt */
-		temp = cpsr ^ (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */
-		temp |= (cpsr << 1);	   /* PSR_N_BIT |= PSR_Z_BIT */
-		return (~temp) & PSR_N_BIT;
-
-	case 0xd: /* le */
-		temp = cpsr ^ (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */
-		temp |= (cpsr << 1);	   /* PSR_N_BIT |= PSR_Z_BIT */
-		return temp & PSR_N_BIT;
-
-	case 0xe: /* al */
-	case 0xf: /* unconditional */
-		return true;
-	}
-	BUG();
-	return false;
+	return (ret != ARM_OPCODE_CONDTEST_FAIL);
 }
 
 static int is_last_scenario;
@@ -1128,7 +1074,9 @@
 
 	if (!test_case_is_thumb) {
 		/* Testing ARM code */
-		probe_should_run = test_check_cc(current_instruction >> 28, cpsr) != 0;
+		int cc = current_instruction >> 28;
+
+		probe_should_run = test_check_cc(cc, cpsr) != 0;
 		if (scenario == 15)
 			is_last_scenario = true;
 
diff --git a/arch/arm/kernel/leds.c b/arch/arm/kernel/leds.c
index 0bcd383..1911dae 100644
--- a/arch/arm/kernel/leds.c
+++ b/arch/arm/kernel/leds.c
@@ -9,7 +9,7 @@
  */
 #include <linux/export.h>
 #include <linux/init.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/syscore_ops.h>
 #include <linux/string.h>
 
@@ -34,8 +34,8 @@
 	{ "red",   led_red_on,   led_red_off   },
 };
 
-static ssize_t leds_store(struct sys_device *dev,
-			struct sysdev_attribute *attr,
+static ssize_t leds_store(struct device *dev,
+			struct device_attribute *attr,
 			const char *buf, size_t size)
 {
 	int ret = -EINVAL, len = strcspn(buf, " ");
@@ -69,15 +69,16 @@
 	return ret;
 }
 
-static SYSDEV_ATTR(event, 0200, NULL, leds_store);
+static DEVICE_ATTR(event, 0200, NULL, leds_store);
 
-static struct sysdev_class leds_sysclass = {
+static struct bus_type leds_subsys = {
 	.name		= "leds",
+	.dev_name	= "leds",
 };
 
-static struct sys_device leds_device = {
+static struct device leds_device = {
 	.id		= 0,
-	.cls		= &leds_sysclass,
+	.bus		= &leds_subsys,
 };
 
 static int leds_suspend(void)
@@ -105,11 +106,11 @@
 static int __init leds_init(void)
 {
 	int ret;
-	ret = sysdev_class_register(&leds_sysclass);
+	ret = subsys_system_register(&leds_subsys, NULL);
 	if (ret == 0)
-		ret = sysdev_register(&leds_device);
+		ret = device_register(&leds_device);
 	if (ret == 0)
-		ret = sysdev_create_file(&leds_device, &attr_event);
+		ret = device_create_file(&leds_device, &dev_attr_event);
 	if (ret == 0)
 		register_syscore_ops(&leds_syscore_ops);
 	return ret;
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index e59bbd4..764bd45 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -12,12 +12,11 @@
 #include <asm/mmu_context.h>
 #include <asm/cacheflush.h>
 #include <asm/mach-types.h>
+#include <asm/system.h>
 
 extern const unsigned char relocate_new_kernel[];
 extern const unsigned int relocate_new_kernel_size;
 
-extern void setup_mm_for_reboot(char mode);
-
 extern unsigned long kexec_start_address;
 extern unsigned long kexec_indirection_page;
 extern unsigned long kexec_mach_type;
@@ -111,14 +110,6 @@
 
 	if (kexec_reinit)
 		kexec_reinit();
-	local_irq_disable();
-	local_fiq_disable();
-	setup_mm_for_reboot(0); /* mode is not used, so just pass 0*/
-	flush_cache_all();
-	outer_flush_all();
-	outer_disable();
-	cpu_proc_fin();
-	outer_inv_all();
-	flush_cache_all();
-	cpu_reset(reboot_code_buffer_phys);
+
+	soft_restart(reboot_code_buffer_phys);
 }
diff --git a/arch/arm/kernel/opcodes.c b/arch/arm/kernel/opcodes.c
new file mode 100644
index 0000000..f8179c6
--- /dev/null
+++ b/arch/arm/kernel/opcodes.c
@@ -0,0 +1,72 @@
+/*
+ *  linux/arch/arm/kernel/opcodes.c
+ *
+ *  A32 condition code lookup feature moved from nwfpe/fpopcode.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <asm/opcodes.h>
+
+#define ARM_OPCODE_CONDITION_UNCOND 0xf
+
+/*
+ * condition code lookup table
+ * index into the table is test code: EQ, NE, ... LT, GT, AL, NV
+ *
+ * bit position in short is condition code: NZCV
+ */
+static const unsigned short cc_map[16] = {
+	0xF0F0,			/* EQ == Z set            */
+	0x0F0F,			/* NE                     */
+	0xCCCC,			/* CS == C set            */
+	0x3333,			/* CC                     */
+	0xFF00,			/* MI == N set            */
+	0x00FF,			/* PL                     */
+	0xAAAA,			/* VS == V set            */
+	0x5555,			/* VC                     */
+	0x0C0C,			/* HI == C set && Z clear */
+	0xF3F3,			/* LS == C clear || Z set */
+	0xAA55,			/* GE == (N==V)           */
+	0x55AA,			/* LT == (N!=V)           */
+	0x0A05,			/* GT == (!Z && (N==V))   */
+	0xF5FA,			/* LE == (Z || (N!=V))    */
+	0xFFFF,			/* AL always              */
+	0			/* NV                     */
+};
+
+/*
+ * Returns:
+ * ARM_OPCODE_CONDTEST_FAIL   - if condition fails
+ * ARM_OPCODE_CONDTEST_PASS   - if condition passes (including AL)
+ * ARM_OPCODE_CONDTEST_UNCOND - if NV condition, or separate unconditional
+ *                              opcode space from v5 onwards
+ *
+ * Code that tests whether a conditional instruction would pass its condition
+ * check should check that return value == ARM_OPCODE_CONDTEST_PASS.
+ *
+ * Code that tests if a condition means that the instruction would be executed
+ * (regardless of conditional or unconditional) should instead check that the
+ * return value != ARM_OPCODE_CONDTEST_FAIL.
+ */
+asmlinkage unsigned int arm_check_condition(u32 opcode, u32 psr)
+{
+	u32 cc_bits  = opcode >> 28;
+	u32 psr_cond = psr >> 28;
+	unsigned int ret;
+
+	if (cc_bits != ARM_OPCODE_CONDITION_UNCOND) {
+		if ((cc_map[cc_bits] >> (psr_cond)) & 1)
+			ret = ARM_OPCODE_CONDTEST_PASS;
+		else
+			ret = ARM_OPCODE_CONDTEST_FAIL;
+	} else {
+		ret = ARM_OPCODE_CONDTEST_UNCOND;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(arm_check_condition);
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 8e9c98e..5bb91bf 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -59,8 +59,7 @@
 }
 EXPORT_SYMBOL_GPL(armpmu_get_pmu_id);
 
-int
-armpmu_get_max_events(void)
+int perf_num_counters(void)
 {
 	int max_events = 0;
 
@@ -69,12 +68,6 @@
 
 	return max_events;
 }
-EXPORT_SYMBOL_GPL(armpmu_get_max_events);
-
-int perf_num_counters(void)
-{
-	return armpmu_get_max_events();
-}
 EXPORT_SYMBOL_GPL(perf_num_counters);
 
 #define HW_OP_UNSUPPORTED		0xFFFF
@@ -380,6 +373,8 @@
 {
 	int i, irq, irqs;
 	struct platform_device *pmu_device = armpmu->plat_device;
+	struct arm_pmu_platdata *plat =
+		dev_get_platdata(&pmu_device->dev);
 
 	irqs = min(pmu_device->num_resources, num_possible_cpus());
 
@@ -387,8 +382,11 @@
 		if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs))
 			continue;
 		irq = platform_get_irq(pmu_device, i);
-		if (irq >= 0)
+		if (irq >= 0) {
+			if (plat && plat->disable_irq)
+				plat->disable_irq(irq);
 			free_irq(irq, armpmu);
+		}
 	}
 
 	release_pmu(armpmu->type);
@@ -448,7 +446,8 @@
 				irq);
 			armpmu_release_hardware(armpmu);
 			return err;
-		}
+		} else if (plat && plat->enable_irq)
+			plat->enable_irq(irq);
 
 		cpumask_set_cpu(i, &armpmu->active_irqs);
 	}
@@ -640,6 +639,9 @@
 
 static int __devinit armpmu_device_probe(struct platform_device *pdev)
 {
+	if (!cpu_pmu)
+		return -ENODEV;
+
 	cpu_pmu->plat_device = pdev;
 	return 0;
 }
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c
index e63d811..533be99 100644
--- a/arch/arm/kernel/perf_event_v6.c
+++ b/arch/arm/kernel/perf_event_v6.c
@@ -65,13 +65,15 @@
  * accesses/misses in hardware.
  */
 static const unsigned armv6_perf_map[PERF_COUNT_HW_MAX] = {
-	[PERF_COUNT_HW_CPU_CYCLES]	    = ARMV6_PERFCTR_CPU_CYCLES,
-	[PERF_COUNT_HW_INSTRUCTIONS]	    = ARMV6_PERFCTR_INSTR_EXEC,
-	[PERF_COUNT_HW_CACHE_REFERENCES]    = HW_OP_UNSUPPORTED,
-	[PERF_COUNT_HW_CACHE_MISSES]	    = HW_OP_UNSUPPORTED,
-	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6_PERFCTR_BR_EXEC,
-	[PERF_COUNT_HW_BRANCH_MISSES]	    = ARMV6_PERFCTR_BR_MISPREDICT,
-	[PERF_COUNT_HW_BUS_CYCLES]	    = HW_OP_UNSUPPORTED,
+	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV6_PERFCTR_CPU_CYCLES,
+	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV6_PERFCTR_INSTR_EXEC,
+	[PERF_COUNT_HW_CACHE_REFERENCES]	= HW_OP_UNSUPPORTED,
+	[PERF_COUNT_HW_CACHE_MISSES]		= HW_OP_UNSUPPORTED,
+	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV6_PERFCTR_BR_EXEC,
+	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV6_PERFCTR_BR_MISPREDICT,
+	[PERF_COUNT_HW_BUS_CYCLES]		= HW_OP_UNSUPPORTED,
+	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= ARMV6_PERFCTR_IBUF_STALL,
+	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= ARMV6_PERFCTR_LSU_FULL_STALL,
 };
 
 static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
@@ -218,13 +220,15 @@
  * accesses/misses in hardware.
  */
 static const unsigned armv6mpcore_perf_map[PERF_COUNT_HW_MAX] = {
-	[PERF_COUNT_HW_CPU_CYCLES]	    = ARMV6MPCORE_PERFCTR_CPU_CYCLES,
-	[PERF_COUNT_HW_INSTRUCTIONS]	    = ARMV6MPCORE_PERFCTR_INSTR_EXEC,
-	[PERF_COUNT_HW_CACHE_REFERENCES]    = HW_OP_UNSUPPORTED,
-	[PERF_COUNT_HW_CACHE_MISSES]	    = HW_OP_UNSUPPORTED,
-	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_BR_EXEC,
-	[PERF_COUNT_HW_BRANCH_MISSES]	    = ARMV6MPCORE_PERFCTR_BR_MISPREDICT,
-	[PERF_COUNT_HW_BUS_CYCLES]	    = HW_OP_UNSUPPORTED,
+	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV6MPCORE_PERFCTR_CPU_CYCLES,
+	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV6MPCORE_PERFCTR_INSTR_EXEC,
+	[PERF_COUNT_HW_CACHE_REFERENCES]	= HW_OP_UNSUPPORTED,
+	[PERF_COUNT_HW_CACHE_MISSES]		= HW_OP_UNSUPPORTED,
+	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV6MPCORE_PERFCTR_BR_EXEC,
+	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV6MPCORE_PERFCTR_BR_MISPREDICT,
+	[PERF_COUNT_HW_BUS_CYCLES]		= HW_OP_UNSUPPORTED,
+	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= ARMV6MPCORE_PERFCTR_IBUF_STALL,
+	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= ARMV6MPCORE_PERFCTR_LSU_FULL_STALL,
 };
 
 static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 1ef6d00..460bbbb 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -28,165 +28,87 @@
  * they are not available.
  */
 enum armv7_perf_types {
-	ARMV7_PERFCTR_PMNC_SW_INCR		= 0x00,
-	ARMV7_PERFCTR_IFETCH_MISS		= 0x01,
-	ARMV7_PERFCTR_ITLB_MISS			= 0x02,
-	ARMV7_PERFCTR_DCACHE_REFILL		= 0x03,	/* L1 */
-	ARMV7_PERFCTR_DCACHE_ACCESS		= 0x04,	/* L1 */
-	ARMV7_PERFCTR_DTLB_REFILL		= 0x05,
-	ARMV7_PERFCTR_DREAD			= 0x06,
-	ARMV7_PERFCTR_DWRITE			= 0x07,
-	ARMV7_PERFCTR_INSTR_EXECUTED		= 0x08,
-	ARMV7_PERFCTR_EXC_TAKEN			= 0x09,
-	ARMV7_PERFCTR_EXC_EXECUTED		= 0x0A,
-	ARMV7_PERFCTR_CID_WRITE			= 0x0B,
-	/* ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
+	ARMV7_PERFCTR_PMNC_SW_INCR			= 0x00,
+	ARMV7_PERFCTR_L1_ICACHE_REFILL			= 0x01,
+	ARMV7_PERFCTR_ITLB_REFILL			= 0x02,
+	ARMV7_PERFCTR_L1_DCACHE_REFILL			= 0x03,
+	ARMV7_PERFCTR_L1_DCACHE_ACCESS			= 0x04,
+	ARMV7_PERFCTR_DTLB_REFILL			= 0x05,
+	ARMV7_PERFCTR_MEM_READ				= 0x06,
+	ARMV7_PERFCTR_MEM_WRITE				= 0x07,
+	ARMV7_PERFCTR_INSTR_EXECUTED			= 0x08,
+	ARMV7_PERFCTR_EXC_TAKEN				= 0x09,
+	ARMV7_PERFCTR_EXC_EXECUTED			= 0x0A,
+	ARMV7_PERFCTR_CID_WRITE				= 0x0B,
+
+	/*
+	 * ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
 	 * It counts:
-	 *  - all branch instructions,
+	 *  - all (taken) branch instructions,
 	 *  - instructions that explicitly write the PC,
 	 *  - exception generating instructions.
 	 */
-	ARMV7_PERFCTR_PC_WRITE			= 0x0C,
-	ARMV7_PERFCTR_PC_IMM_BRANCH		= 0x0D,
-	ARMV7_PERFCTR_PC_PROC_RETURN		= 0x0E,
-	ARMV7_PERFCTR_UNALIGNED_ACCESS		= 0x0F,
+	ARMV7_PERFCTR_PC_WRITE				= 0x0C,
+	ARMV7_PERFCTR_PC_IMM_BRANCH			= 0x0D,
+	ARMV7_PERFCTR_PC_PROC_RETURN			= 0x0E,
+	ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS		= 0x0F,
+	ARMV7_PERFCTR_PC_BRANCH_MIS_PRED		= 0x10,
+	ARMV7_PERFCTR_CLOCK_CYCLES			= 0x11,
+	ARMV7_PERFCTR_PC_BRANCH_PRED			= 0x12,
 
 	/* These events are defined by the PMUv2 supplement (ARM DDI 0457A). */
-	ARMV7_PERFCTR_PC_BRANCH_MIS_PRED	= 0x10,
-	ARMV7_PERFCTR_CLOCK_CYCLES		= 0x11,
-	ARMV7_PERFCTR_PC_BRANCH_PRED		= 0x12,
-	ARMV7_PERFCTR_MEM_ACCESS		= 0x13,
-	ARMV7_PERFCTR_L1_ICACHE_ACCESS		= 0x14,
-	ARMV7_PERFCTR_L1_DCACHE_WB		= 0x15,
-	ARMV7_PERFCTR_L2_DCACHE_ACCESS		= 0x16,
-	ARMV7_PERFCTR_L2_DCACHE_REFILL		= 0x17,
-	ARMV7_PERFCTR_L2_DCACHE_WB		= 0x18,
-	ARMV7_PERFCTR_BUS_ACCESS		= 0x19,
-	ARMV7_PERFCTR_MEMORY_ERROR		= 0x1A,
-	ARMV7_PERFCTR_INSTR_SPEC		= 0x1B,
-	ARMV7_PERFCTR_TTBR_WRITE		= 0x1C,
-	ARMV7_PERFCTR_BUS_CYCLES		= 0x1D,
+	ARMV7_PERFCTR_MEM_ACCESS			= 0x13,
+	ARMV7_PERFCTR_L1_ICACHE_ACCESS			= 0x14,
+	ARMV7_PERFCTR_L1_DCACHE_WB			= 0x15,
+	ARMV7_PERFCTR_L2_CACHE_ACCESS			= 0x16,
+	ARMV7_PERFCTR_L2_CACHE_REFILL			= 0x17,
+	ARMV7_PERFCTR_L2_CACHE_WB			= 0x18,
+	ARMV7_PERFCTR_BUS_ACCESS			= 0x19,
+	ARMV7_PERFCTR_MEM_ERROR				= 0x1A,
+	ARMV7_PERFCTR_INSTR_SPEC			= 0x1B,
+	ARMV7_PERFCTR_TTBR_WRITE			= 0x1C,
+	ARMV7_PERFCTR_BUS_CYCLES			= 0x1D,
 
-	ARMV7_PERFCTR_CPU_CYCLES		= 0xFF
+	ARMV7_PERFCTR_CPU_CYCLES			= 0xFF
 };
 
 /* ARMv7 Cortex-A8 specific event types */
 enum armv7_a8_perf_types {
-	ARMV7_PERFCTR_WRITE_BUFFER_FULL		= 0x40,
-	ARMV7_PERFCTR_L2_STORE_MERGED		= 0x41,
-	ARMV7_PERFCTR_L2_STORE_BUFF		= 0x42,
-	ARMV7_PERFCTR_L2_ACCESS			= 0x43,
-	ARMV7_PERFCTR_L2_CACH_MISS		= 0x44,
-	ARMV7_PERFCTR_AXI_READ_CYCLES		= 0x45,
-	ARMV7_PERFCTR_AXI_WRITE_CYCLES		= 0x46,
-	ARMV7_PERFCTR_MEMORY_REPLAY		= 0x47,
-	ARMV7_PERFCTR_UNALIGNED_ACCESS_REPLAY	= 0x48,
-	ARMV7_PERFCTR_L1_DATA_MISS		= 0x49,
-	ARMV7_PERFCTR_L1_INST_MISS		= 0x4A,
-	ARMV7_PERFCTR_L1_DATA_COLORING		= 0x4B,
-	ARMV7_PERFCTR_L1_NEON_DATA		= 0x4C,
-	ARMV7_PERFCTR_L1_NEON_CACH_DATA		= 0x4D,
-	ARMV7_PERFCTR_L2_NEON			= 0x4E,
-	ARMV7_PERFCTR_L2_NEON_HIT		= 0x4F,
-	ARMV7_PERFCTR_L1_INST			= 0x50,
-	ARMV7_PERFCTR_PC_RETURN_MIS_PRED	= 0x51,
-	ARMV7_PERFCTR_PC_BRANCH_FAILED		= 0x52,
-	ARMV7_PERFCTR_PC_BRANCH_TAKEN		= 0x53,
-	ARMV7_PERFCTR_PC_BRANCH_EXECUTED	= 0x54,
-	ARMV7_PERFCTR_OP_EXECUTED		= 0x55,
-	ARMV7_PERFCTR_CYCLES_INST_STALL		= 0x56,
-	ARMV7_PERFCTR_CYCLES_INST		= 0x57,
-	ARMV7_PERFCTR_CYCLES_NEON_DATA_STALL	= 0x58,
-	ARMV7_PERFCTR_CYCLES_NEON_INST_STALL	= 0x59,
-	ARMV7_PERFCTR_NEON_CYCLES		= 0x5A,
-
-	ARMV7_PERFCTR_PMU0_EVENTS		= 0x70,
-	ARMV7_PERFCTR_PMU1_EVENTS		= 0x71,
-	ARMV7_PERFCTR_PMU_EVENTS		= 0x72,
+	ARMV7_A8_PERFCTR_L2_CACHE_ACCESS		= 0x43,
+	ARMV7_A8_PERFCTR_L2_CACHE_REFILL		= 0x44,
+	ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS		= 0x50,
+	ARMV7_A8_PERFCTR_STALL_ISIDE			= 0x56,
 };
 
 /* ARMv7 Cortex-A9 specific event types */
 enum armv7_a9_perf_types {
-	ARMV7_PERFCTR_JAVA_HW_BYTECODE_EXEC	= 0x40,
-	ARMV7_PERFCTR_JAVA_SW_BYTECODE_EXEC	= 0x41,
-	ARMV7_PERFCTR_JAZELLE_BRANCH_EXEC	= 0x42,
-
-	ARMV7_PERFCTR_COHERENT_LINE_MISS	= 0x50,
-	ARMV7_PERFCTR_COHERENT_LINE_HIT		= 0x51,
-
-	ARMV7_PERFCTR_ICACHE_DEP_STALL_CYCLES	= 0x60,
-	ARMV7_PERFCTR_DCACHE_DEP_STALL_CYCLES	= 0x61,
-	ARMV7_PERFCTR_TLB_MISS_DEP_STALL_CYCLES	= 0x62,
-	ARMV7_PERFCTR_STREX_EXECUTED_PASSED	= 0x63,
-	ARMV7_PERFCTR_STREX_EXECUTED_FAILED	= 0x64,
-	ARMV7_PERFCTR_DATA_EVICTION		= 0x65,
-	ARMV7_PERFCTR_ISSUE_STAGE_NO_INST	= 0x66,
-	ARMV7_PERFCTR_ISSUE_STAGE_EMPTY		= 0x67,
-	ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE	= 0x68,
-
-	ARMV7_PERFCTR_PREDICTABLE_FUNCT_RETURNS	= 0x6E,
-
-	ARMV7_PERFCTR_MAIN_UNIT_EXECUTED_INST	= 0x70,
-	ARMV7_PERFCTR_SECOND_UNIT_EXECUTED_INST	= 0x71,
-	ARMV7_PERFCTR_LD_ST_UNIT_EXECUTED_INST	= 0x72,
-	ARMV7_PERFCTR_FP_EXECUTED_INST		= 0x73,
-	ARMV7_PERFCTR_NEON_EXECUTED_INST	= 0x74,
-
-	ARMV7_PERFCTR_PLD_FULL_DEP_STALL_CYCLES	= 0x80,
-	ARMV7_PERFCTR_DATA_WR_DEP_STALL_CYCLES	= 0x81,
-	ARMV7_PERFCTR_ITLB_MISS_DEP_STALL_CYCLES	= 0x82,
-	ARMV7_PERFCTR_DTLB_MISS_DEP_STALL_CYCLES	= 0x83,
-	ARMV7_PERFCTR_MICRO_ITLB_MISS_DEP_STALL_CYCLES	= 0x84,
-	ARMV7_PERFCTR_MICRO_DTLB_MISS_DEP_STALL_CYCLES	= 0x85,
-	ARMV7_PERFCTR_DMB_DEP_STALL_CYCLES	= 0x86,
-
-	ARMV7_PERFCTR_INTGR_CLK_ENABLED_CYCLES	= 0x8A,
-	ARMV7_PERFCTR_DATA_ENGINE_CLK_EN_CYCLES	= 0x8B,
-
-	ARMV7_PERFCTR_ISB_INST			= 0x90,
-	ARMV7_PERFCTR_DSB_INST			= 0x91,
-	ARMV7_PERFCTR_DMB_INST			= 0x92,
-	ARMV7_PERFCTR_EXT_INTERRUPTS		= 0x93,
-
-	ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_COMPLETED	= 0xA0,
-	ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_SKIPPED	= 0xA1,
-	ARMV7_PERFCTR_PLE_FIFO_FLUSH		= 0xA2,
-	ARMV7_PERFCTR_PLE_RQST_COMPLETED	= 0xA3,
-	ARMV7_PERFCTR_PLE_FIFO_OVERFLOW		= 0xA4,
-	ARMV7_PERFCTR_PLE_RQST_PROG		= 0xA5
+	ARMV7_A9_PERFCTR_INSTR_CORE_RENAME		= 0x68,
+	ARMV7_A9_PERFCTR_STALL_ICACHE			= 0x60,
+	ARMV7_A9_PERFCTR_STALL_DISPATCH			= 0x66,
 };
 
 /* ARMv7 Cortex-A5 specific event types */
 enum armv7_a5_perf_types {
-	ARMV7_PERFCTR_IRQ_TAKEN			= 0x86,
-	ARMV7_PERFCTR_FIQ_TAKEN			= 0x87,
-
-	ARMV7_PERFCTR_EXT_MEM_RQST		= 0xc0,
-	ARMV7_PERFCTR_NC_EXT_MEM_RQST		= 0xc1,
-	ARMV7_PERFCTR_PREFETCH_LINEFILL		= 0xc2,
-	ARMV7_PERFCTR_PREFETCH_LINEFILL_DROP	= 0xc3,
-	ARMV7_PERFCTR_ENTER_READ_ALLOC		= 0xc4,
-	ARMV7_PERFCTR_READ_ALLOC		= 0xc5,
-
-	ARMV7_PERFCTR_STALL_SB_FULL		= 0xc9,
+	ARMV7_A5_PERFCTR_PREFETCH_LINEFILL		= 0xc2,
+	ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP		= 0xc3,
 };
 
 /* ARMv7 Cortex-A15 specific event types */
 enum armv7_a15_perf_types {
-	ARMV7_PERFCTR_L1_DCACHE_READ_ACCESS	= 0x40,
-	ARMV7_PERFCTR_L1_DCACHE_WRITE_ACCESS	= 0x41,
-	ARMV7_PERFCTR_L1_DCACHE_READ_REFILL	= 0x42,
-	ARMV7_PERFCTR_L1_DCACHE_WRITE_REFILL	= 0x43,
+	ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ		= 0x40,
+	ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE	= 0x41,
+	ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ		= 0x42,
+	ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE	= 0x43,
 
-	ARMV7_PERFCTR_L1_DTLB_READ_REFILL	= 0x4C,
-	ARMV7_PERFCTR_L1_DTLB_WRITE_REFILL	= 0x4D,
+	ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ		= 0x4C,
+	ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE		= 0x4D,
 
-	ARMV7_PERFCTR_L2_DCACHE_READ_ACCESS	= 0x50,
-	ARMV7_PERFCTR_L2_DCACHE_WRITE_ACCESS	= 0x51,
-	ARMV7_PERFCTR_L2_DCACHE_READ_REFILL	= 0x52,
-	ARMV7_PERFCTR_L2_DCACHE_WRITE_REFILL	= 0x53,
+	ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ		= 0x50,
+	ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE		= 0x51,
+	ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ		= 0x52,
+	ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE		= 0x53,
 
-	ARMV7_PERFCTR_SPEC_PC_WRITE		= 0x76,
+	ARMV7_A15_PERFCTR_PC_WRITE_SPEC			= 0x76,
 };
 
 /*
@@ -197,13 +119,15 @@
  * accesses/misses in hardware.
  */
 static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = {
-	[PERF_COUNT_HW_CPU_CYCLES]	    = ARMV7_PERFCTR_CPU_CYCLES,
-	[PERF_COUNT_HW_INSTRUCTIONS]	    = ARMV7_PERFCTR_INSTR_EXECUTED,
-	[PERF_COUNT_HW_CACHE_REFERENCES]    = HW_OP_UNSUPPORTED,
-	[PERF_COUNT_HW_CACHE_MISSES]	    = HW_OP_UNSUPPORTED,
-	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
-	[PERF_COUNT_HW_BRANCH_MISSES]	    = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
-	[PERF_COUNT_HW_BUS_CYCLES]	    = ARMV7_PERFCTR_CLOCK_CYCLES,
+	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
+	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
+	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
+	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_PERFCTR_PC_WRITE,
+	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+	[PERF_COUNT_HW_BUS_CYCLES]		= HW_OP_UNSUPPORTED,
+	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= ARMV7_A8_PERFCTR_STALL_ISIDE,
+	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= HW_OP_UNSUPPORTED,
 };
 
 static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
@@ -217,12 +141,12 @@
 		 * combined.
 		 */
 		[C(OP_READ)] = {
-			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_DCACHE_ACCESS,
-			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DCACHE_REFILL,
+			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 		},
 		[C(OP_WRITE)] = {
-			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_DCACHE_ACCESS,
-			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DCACHE_REFILL,
+			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 		},
 		[C(OP_PREFETCH)] = {
 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
@@ -231,12 +155,12 @@
 	},
 	[C(L1I)] = {
 		[C(OP_READ)] = {
-			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_INST,
-			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_INST_MISS,
+			[C(RESULT_ACCESS)]	= ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
 		},
 		[C(OP_WRITE)] = {
-			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_INST,
-			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_INST_MISS,
+			[C(RESULT_ACCESS)]	= ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
 		},
 		[C(OP_PREFETCH)] = {
 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
@@ -245,12 +169,12 @@
 	},
 	[C(LL)] = {
 		[C(OP_READ)] = {
-			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L2_ACCESS,
-			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L2_CACH_MISS,
+			[C(RESULT_ACCESS)]	= ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
+			[C(RESULT_MISS)]	= ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
 		},
 		[C(OP_WRITE)] = {
-			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L2_ACCESS,
-			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L2_CACH_MISS,
+			[C(RESULT_ACCESS)]	= ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
+			[C(RESULT_MISS)]	= ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
 		},
 		[C(OP_PREFETCH)] = {
 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
@@ -274,11 +198,11 @@
 	[C(ITLB)] = {
 		[C(OP_READ)] = {
 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
-			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_MISS,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 		},
 		[C(OP_WRITE)] = {
 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
-			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_MISS,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 		},
 		[C(OP_PREFETCH)] = {
 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
@@ -287,14 +211,12 @@
 	},
 	[C(BPU)] = {
 		[C(OP_READ)] = {
-			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_WRITE,
-			[C(RESULT_MISS)]
-					= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 		},
 		[C(OP_WRITE)] = {
-			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_WRITE,
-			[C(RESULT_MISS)]
-					= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 		},
 		[C(OP_PREFETCH)] = {
 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
@@ -321,14 +243,15 @@
  * Cortex-A9 HW events mapping
  */
 static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
-	[PERF_COUNT_HW_CPU_CYCLES]	    = ARMV7_PERFCTR_CPU_CYCLES,
-	[PERF_COUNT_HW_INSTRUCTIONS]	    =
-					ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE,
-	[PERF_COUNT_HW_CACHE_REFERENCES]    = ARMV7_PERFCTR_DCACHE_ACCESS,
-	[PERF_COUNT_HW_CACHE_MISSES]	    = ARMV7_PERFCTR_DCACHE_REFILL,
-	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
-	[PERF_COUNT_HW_BRANCH_MISSES]	    = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
-	[PERF_COUNT_HW_BUS_CYCLES]	    = ARMV7_PERFCTR_CLOCK_CYCLES,
+	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
+	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_A9_PERFCTR_INSTR_CORE_RENAME,
+	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
+	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_PERFCTR_PC_WRITE,
+	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+	[PERF_COUNT_HW_BUS_CYCLES]		= HW_OP_UNSUPPORTED,
+	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= ARMV7_A9_PERFCTR_STALL_ICACHE,
+	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= ARMV7_A9_PERFCTR_STALL_DISPATCH,
 };
 
 static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
@@ -342,12 +265,12 @@
 		 * combined.
 		 */
 		[C(OP_READ)] = {
-			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_DCACHE_ACCESS,
-			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DCACHE_REFILL,
+			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 		},
 		[C(OP_WRITE)] = {
-			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_DCACHE_ACCESS,
-			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DCACHE_REFILL,
+			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 		},
 		[C(OP_PREFETCH)] = {
 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
@@ -357,11 +280,11 @@
 	[C(L1I)] = {
 		[C(OP_READ)] = {
 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
-			[C(RESULT_MISS)]	= ARMV7_PERFCTR_IFETCH_MISS,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
 		},
 		[C(OP_WRITE)] = {
 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
-			[C(RESULT_MISS)]	= ARMV7_PERFCTR_IFETCH_MISS,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
 		},
 		[C(OP_PREFETCH)] = {
 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
@@ -399,11 +322,11 @@
 	[C(ITLB)] = {
 		[C(OP_READ)] = {
 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
-			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_MISS,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 		},
 		[C(OP_WRITE)] = {
 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
-			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_MISS,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 		},
 		[C(OP_PREFETCH)] = {
 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
@@ -412,14 +335,12 @@
 	},
 	[C(BPU)] = {
 		[C(OP_READ)] = {
-			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_WRITE,
-			[C(RESULT_MISS)]
-					= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 		},
 		[C(OP_WRITE)] = {
-			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_WRITE,
-			[C(RESULT_MISS)]
-					= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 		},
 		[C(OP_PREFETCH)] = {
 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
@@ -446,13 +367,15 @@
  * Cortex-A5 HW events mapping
  */
 static const unsigned armv7_a5_perf_map[PERF_COUNT_HW_MAX] = {
-	[PERF_COUNT_HW_CPU_CYCLES]	    = ARMV7_PERFCTR_CPU_CYCLES,
-	[PERF_COUNT_HW_INSTRUCTIONS]	    = ARMV7_PERFCTR_INSTR_EXECUTED,
-	[PERF_COUNT_HW_CACHE_REFERENCES]    = HW_OP_UNSUPPORTED,
-	[PERF_COUNT_HW_CACHE_MISSES]	    = HW_OP_UNSUPPORTED,
-	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
-	[PERF_COUNT_HW_BRANCH_MISSES]	    = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
-	[PERF_COUNT_HW_BUS_CYCLES]	    = HW_OP_UNSUPPORTED,
+	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
+	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
+	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
+	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_PERFCTR_PC_WRITE,
+	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+	[PERF_COUNT_HW_BUS_CYCLES]		= HW_OP_UNSUPPORTED,
+	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= HW_OP_UNSUPPORTED,
+	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= HW_OP_UNSUPPORTED,
 };
 
 static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
@@ -460,42 +383,34 @@
 					[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 	[C(L1D)] = {
 		[C(OP_READ)] = {
-			[C(RESULT_ACCESS)]
-					= ARMV7_PERFCTR_DCACHE_ACCESS,
-			[C(RESULT_MISS)]
-					= ARMV7_PERFCTR_DCACHE_REFILL,
+			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 		},
 		[C(OP_WRITE)] = {
-			[C(RESULT_ACCESS)]
-					= ARMV7_PERFCTR_DCACHE_ACCESS,
-			[C(RESULT_MISS)]
-					= ARMV7_PERFCTR_DCACHE_REFILL,
+			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 		},
 		[C(OP_PREFETCH)] = {
-			[C(RESULT_ACCESS)]
-					= ARMV7_PERFCTR_PREFETCH_LINEFILL,
-			[C(RESULT_MISS)]
-					= ARMV7_PERFCTR_PREFETCH_LINEFILL_DROP,
+			[C(RESULT_ACCESS)]	= ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
+			[C(RESULT_MISS)]	= ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
 		},
 	},
 	[C(L1I)] = {
 		[C(OP_READ)] = {
 			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
-			[C(RESULT_MISS)]	= ARMV7_PERFCTR_IFETCH_MISS,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
 		},
 		[C(OP_WRITE)] = {
 			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
-			[C(RESULT_MISS)]	= ARMV7_PERFCTR_IFETCH_MISS,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
 		},
 		/*
 		 * The prefetch counters don't differentiate between the I
 		 * side and the D side.
 		 */
 		[C(OP_PREFETCH)] = {
-			[C(RESULT_ACCESS)]
-					= ARMV7_PERFCTR_PREFETCH_LINEFILL,
-			[C(RESULT_MISS)]
-					= ARMV7_PERFCTR_PREFETCH_LINEFILL_DROP,
+			[C(RESULT_ACCESS)]	= ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
+			[C(RESULT_MISS)]	= ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
 		},
 	},
 	[C(LL)] = {
@@ -529,11 +444,11 @@
 	[C(ITLB)] = {
 		[C(OP_READ)] = {
 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
-			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_MISS,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 		},
 		[C(OP_WRITE)] = {
 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
-			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_MISS,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 		},
 		[C(OP_PREFETCH)] = {
 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
@@ -543,13 +458,11 @@
 	[C(BPU)] = {
 		[C(OP_READ)] = {
 			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
-			[C(RESULT_MISS)]
-					= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 		},
 		[C(OP_WRITE)] = {
 			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
-			[C(RESULT_MISS)]
-					= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 		},
 		[C(OP_PREFETCH)] = {
 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
@@ -562,13 +475,15 @@
  * Cortex-A15 HW events mapping
  */
 static const unsigned armv7_a15_perf_map[PERF_COUNT_HW_MAX] = {
-	[PERF_COUNT_HW_CPU_CYCLES]	    = ARMV7_PERFCTR_CPU_CYCLES,
-	[PERF_COUNT_HW_INSTRUCTIONS]	    = ARMV7_PERFCTR_INSTR_EXECUTED,
-	[PERF_COUNT_HW_CACHE_REFERENCES]    = HW_OP_UNSUPPORTED,
-	[PERF_COUNT_HW_CACHE_MISSES]	    = HW_OP_UNSUPPORTED,
-	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_SPEC_PC_WRITE,
-	[PERF_COUNT_HW_BRANCH_MISSES]	    = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
-	[PERF_COUNT_HW_BUS_CYCLES]	    = ARMV7_PERFCTR_BUS_CYCLES,
+	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
+	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
+	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
+	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_A15_PERFCTR_PC_WRITE_SPEC,
+	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+	[PERF_COUNT_HW_BUS_CYCLES]		= ARMV7_PERFCTR_BUS_CYCLES,
+	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= HW_OP_UNSUPPORTED,
+	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= HW_OP_UNSUPPORTED,
 };
 
 static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
@@ -576,16 +491,12 @@
 					[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 	[C(L1D)] = {
 		[C(OP_READ)] = {
-			[C(RESULT_ACCESS)]
-					= ARMV7_PERFCTR_L1_DCACHE_READ_ACCESS,
-			[C(RESULT_MISS)]
-					= ARMV7_PERFCTR_L1_DCACHE_READ_REFILL,
+			[C(RESULT_ACCESS)]	= ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ,
+			[C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ,
 		},
 		[C(OP_WRITE)] = {
-			[C(RESULT_ACCESS)]
-					= ARMV7_PERFCTR_L1_DCACHE_WRITE_ACCESS,
-			[C(RESULT_MISS)]
-					= ARMV7_PERFCTR_L1_DCACHE_WRITE_REFILL,
+			[C(RESULT_ACCESS)]	= ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE,
+			[C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE,
 		},
 		[C(OP_PREFETCH)] = {
 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
@@ -601,11 +512,11 @@
 		 */
 		[C(OP_READ)] = {
 			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
-			[C(RESULT_MISS)]	= ARMV7_PERFCTR_IFETCH_MISS,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
 		},
 		[C(OP_WRITE)] = {
 			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
-			[C(RESULT_MISS)]	= ARMV7_PERFCTR_IFETCH_MISS,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
 		},
 		[C(OP_PREFETCH)] = {
 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
@@ -614,16 +525,12 @@
 	},
 	[C(LL)] = {
 		[C(OP_READ)] = {
-			[C(RESULT_ACCESS)]
-					= ARMV7_PERFCTR_L2_DCACHE_READ_ACCESS,
-			[C(RESULT_MISS)]
-					= ARMV7_PERFCTR_L2_DCACHE_READ_REFILL,
+			[C(RESULT_ACCESS)]	= ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ,
+			[C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ,
 		},
 		[C(OP_WRITE)] = {
-			[C(RESULT_ACCESS)]
-					= ARMV7_PERFCTR_L2_DCACHE_WRITE_ACCESS,
-			[C(RESULT_MISS)]
-					= ARMV7_PERFCTR_L2_DCACHE_WRITE_REFILL,
+			[C(RESULT_ACCESS)]	= ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE,
+			[C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE,
 		},
 		[C(OP_PREFETCH)] = {
 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
@@ -633,13 +540,11 @@
 	[C(DTLB)] = {
 		[C(OP_READ)] = {
 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
-			[C(RESULT_MISS)]
-					= ARMV7_PERFCTR_L1_DTLB_READ_REFILL,
+			[C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ,
 		},
 		[C(OP_WRITE)] = {
 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
-			[C(RESULT_MISS)]
-					= ARMV7_PERFCTR_L1_DTLB_WRITE_REFILL,
+			[C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE,
 		},
 		[C(OP_PREFETCH)] = {
 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
@@ -649,11 +554,11 @@
 	[C(ITLB)] = {
 		[C(OP_READ)] = {
 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
-			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_MISS,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 		},
 		[C(OP_WRITE)] = {
 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
-			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_MISS,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 		},
 		[C(OP_PREFETCH)] = {
 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
@@ -663,13 +568,11 @@
 	[C(BPU)] = {
 		[C(OP_READ)] = {
 			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
-			[C(RESULT_MISS)]
-					= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 		},
 		[C(OP_WRITE)] = {
 			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
-			[C(RESULT_MISS)]
-					= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 		},
 		[C(OP_PREFETCH)] = {
 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c
index e0cca10..3b99d82 100644
--- a/arch/arm/kernel/perf_event_xscale.c
+++ b/arch/arm/kernel/perf_event_xscale.c
@@ -48,13 +48,15 @@
 };
 
 static const unsigned xscale_perf_map[PERF_COUNT_HW_MAX] = {
-	[PERF_COUNT_HW_CPU_CYCLES]	    = XSCALE_PERFCTR_CCNT,
-	[PERF_COUNT_HW_INSTRUCTIONS]	    = XSCALE_PERFCTR_INSTRUCTION,
-	[PERF_COUNT_HW_CACHE_REFERENCES]    = HW_OP_UNSUPPORTED,
-	[PERF_COUNT_HW_CACHE_MISSES]	    = HW_OP_UNSUPPORTED,
-	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = XSCALE_PERFCTR_BRANCH,
-	[PERF_COUNT_HW_BRANCH_MISSES]	    = XSCALE_PERFCTR_BRANCH_MISS,
-	[PERF_COUNT_HW_BUS_CYCLES]	    = HW_OP_UNSUPPORTED,
+	[PERF_COUNT_HW_CPU_CYCLES]		= XSCALE_PERFCTR_CCNT,
+	[PERF_COUNT_HW_INSTRUCTIONS]		= XSCALE_PERFCTR_INSTRUCTION,
+	[PERF_COUNT_HW_CACHE_REFERENCES]	= HW_OP_UNSUPPORTED,
+	[PERF_COUNT_HW_CACHE_MISSES]		= HW_OP_UNSUPPORTED,
+	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= XSCALE_PERFCTR_BRANCH,
+	[PERF_COUNT_HW_BRANCH_MISSES]		= XSCALE_PERFCTR_BRANCH_MISS,
+	[PERF_COUNT_HW_BUS_CYCLES]		= HW_OP_UNSUPPORTED,
+	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= XSCALE_PERFCTR_ICACHE_NO_DELIVER,
+	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= HW_OP_UNSUPPORTED,
 };
 
 static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 3d0c6fb..971d65c 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -57,7 +57,7 @@
   "ARM" , "Thumb" , "Jazelle", "ThumbEE"
 };
 
-extern void setup_mm_for_reboot(char mode);
+extern void setup_mm_for_reboot(void);
 
 static volatile int hlt_counter;
 
@@ -92,18 +92,24 @@
 __setup("nohlt", nohlt_setup);
 __setup("hlt", hlt_setup);
 
-void arm_machine_restart(char mode, const char *cmd)
-{
-	/* Disable interrupts first */
-	local_irq_disable();
-	local_fiq_disable();
+extern void call_with_stack(void (*fn)(void *), void *arg, void *sp);
+typedef void (*phys_reset_t)(unsigned long);
 
-	/*
-	 * Tell the mm system that we are going to reboot -
-	 * we may need it to insert some 1:1 mappings so that
-	 * soft boot works.
-	 */
-	setup_mm_for_reboot(mode);
+/*
+ * A temporary stack to use for CPU reset. This is static so that we
+ * don't clobber it with the identity mapping. When running with this
+ * stack, any references to the current task *will not work* so you
+ * should really do as little as possible before jumping to your reset
+ * code.
+ */
+static u64 soft_restart_stack[16];
+
+static void __soft_restart(void *addr)
+{
+	phys_reset_t phys_reset;
+
+	/* Take out a flat memory mapping. */
+	setup_mm_for_reboot();
 
 	/* Clean and invalidate caches */
 	flush_cache_all();
@@ -114,18 +120,35 @@
 	/* Push out any further dirty data, and ensure cache is empty */
 	flush_cache_all();
 
-	/*
-	 * Now call the architecture specific reboot code.
-	 */
-	arch_reset(mode, cmd);
+	/* Switch to the identity mapping. */
+	phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
+	phys_reset((unsigned long)addr);
 
-	/*
-	 * Whoops - the architecture was unable to reboot.
-	 * Tell the user!
-	 */
-	mdelay(1000);
-	printk("Reboot failed -- System halted\n");
-	while (1);
+	/* Should never get here. */
+	BUG();
+}
+
+void soft_restart(unsigned long addr)
+{
+	u64 *stack = soft_restart_stack + ARRAY_SIZE(soft_restart_stack);
+
+	/* Disable interrupts first */
+	local_irq_disable();
+	local_fiq_disable();
+
+	/* Disable the L2 if we're the last man standing. */
+	if (num_online_cpus() == 1)
+		outer_disable();
+
+	/* Change to the new stack and continue with the reset. */
+	call_with_stack(__soft_restart, (void *)addr, (void *)stack);
+
+	/* Should never get here. */
+	BUG();
+}
+
+static void null_restart(char mode, const char *cmd)
+{
 }
 
 /*
@@ -134,7 +157,7 @@
 void (*pm_power_off)(void);
 EXPORT_SYMBOL(pm_power_off);
 
-void (*arm_pm_restart)(char str, const char *cmd) = arm_machine_restart;
+void (*arm_pm_restart)(char str, const char *cmd) = null_restart;
 EXPORT_SYMBOL_GPL(arm_pm_restart);
 
 static void do_nothing(void *unused)
@@ -183,7 +206,8 @@
 
 	/* endless idle loop with no priority at all */
 	while (1) {
-		tick_nohz_stop_sched_tick(1);
+		tick_nohz_idle_enter();
+		rcu_idle_enter();
 		leds_event(led_idle_start);
 		while (!need_resched()) {
 #ifdef CONFIG_HOTPLUG_CPU
@@ -213,7 +237,8 @@
 			}
 		}
 		leds_event(led_idle_end);
-		tick_nohz_restart_sched_tick();
+		rcu_idle_exit();
+		tick_nohz_idle_exit();
 		preempt_enable_no_resched();
 		schedule();
 		preempt_disable();
@@ -253,7 +278,15 @@
 void machine_restart(char *cmd)
 {
 	machine_shutdown();
+
 	arm_pm_restart(reboot_mode, cmd);
+
+	/* Give a grace period for failure to restart of 1s */
+	mdelay(1000);
+
+	/* Whoops - the platform was unable to reboot. Tell the user! */
+	printk("Reboot failed -- System halted\n");
+	while (1);
 }
 
 void __show_regs(struct pt_regs *regs)
diff --git a/arch/arm/kernel/sched_clock.c b/arch/arm/kernel/sched_clock.c
index 9a46370..5416c7c 100644
--- a/arch/arm/kernel/sched_clock.c
+++ b/arch/arm/kernel/sched_clock.c
@@ -14,61 +14,153 @@
 
 #include <asm/sched_clock.h>
 
+struct clock_data {
+	u64 epoch_ns;
+	u32 epoch_cyc;
+	u32 epoch_cyc_copy;
+	u32 mult;
+	u32 shift;
+};
+
 static void sched_clock_poll(unsigned long wrap_ticks);
 static DEFINE_TIMER(sched_clock_timer, sched_clock_poll, 0, 0);
-static void (*sched_clock_update_fn)(void);
+
+static struct clock_data cd = {
+	.mult	= NSEC_PER_SEC / HZ,
+};
+
+static u32 __read_mostly sched_clock_mask = 0xffffffff;
+
+static u32 notrace jiffy_sched_clock_read(void)
+{
+	return (u32)(jiffies - INITIAL_JIFFIES);
+}
+
+static u32 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read;
+
+static inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift)
+{
+	return (cyc * mult) >> shift;
+}
+
+static unsigned long long cyc_to_sched_clock(u32 cyc, u32 mask)
+{
+	u64 epoch_ns;
+	u32 epoch_cyc;
+
+	/*
+	 * Load the epoch_cyc and epoch_ns atomically.  We do this by
+	 * ensuring that we always write epoch_cyc, epoch_ns and
+	 * epoch_cyc_copy in strict order, and read them in strict order.
+	 * If epoch_cyc and epoch_cyc_copy are not equal, then we're in
+	 * the middle of an update, and we should repeat the load.
+	 */
+	do {
+		epoch_cyc = cd.epoch_cyc;
+		smp_rmb();
+		epoch_ns = cd.epoch_ns;
+		smp_rmb();
+	} while (epoch_cyc != cd.epoch_cyc_copy);
+
+	return epoch_ns + cyc_to_ns((cyc - epoch_cyc) & mask, cd.mult, cd.shift);
+}
+
+/*
+ * Atomically update the sched_clock epoch.
+ */
+static void notrace update_sched_clock(void)
+{
+	unsigned long flags;
+	u32 cyc;
+	u64 ns;
+
+	cyc = read_sched_clock();
+	ns = cd.epoch_ns +
+		cyc_to_ns((cyc - cd.epoch_cyc) & sched_clock_mask,
+			  cd.mult, cd.shift);
+	/*
+	 * Write epoch_cyc and epoch_ns in a way that the update is
+	 * detectable in cyc_to_fixed_sched_clock().
+	 */
+	raw_local_irq_save(flags);
+	cd.epoch_cyc = cyc;
+	smp_wmb();
+	cd.epoch_ns = ns;
+	smp_wmb();
+	cd.epoch_cyc_copy = cyc;
+	raw_local_irq_restore(flags);
+}
 
 static void sched_clock_poll(unsigned long wrap_ticks)
 {
 	mod_timer(&sched_clock_timer, round_jiffies(jiffies + wrap_ticks));
-	sched_clock_update_fn();
+	update_sched_clock();
 }
 
-void __init init_sched_clock(struct clock_data *cd, void (*update)(void),
-	unsigned int clock_bits, unsigned long rate)
+void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
 {
 	unsigned long r, w;
 	u64 res, wrap;
 	char r_unit;
 
-	sched_clock_update_fn = update;
+	BUG_ON(bits > 32);
+	WARN_ON(!irqs_disabled());
+	WARN_ON(read_sched_clock != jiffy_sched_clock_read);
+	read_sched_clock = read;
+	sched_clock_mask = (1 << bits) - 1;
 
 	/* calculate the mult/shift to convert counter ticks to ns. */
-	clocks_calc_mult_shift(&cd->mult, &cd->shift, rate, NSEC_PER_SEC, 0);
+	clocks_calc_mult_shift(&cd.mult, &cd.shift, rate, NSEC_PER_SEC, 0);
 
 	r = rate;
 	if (r >= 4000000) {
 		r /= 1000000;
 		r_unit = 'M';
-	} else {
+	} else if (r >= 1000) {
 		r /= 1000;
 		r_unit = 'k';
-	}
+	} else
+		r_unit = ' ';
 
 	/* calculate how many ns until we wrap */
-	wrap = cyc_to_ns((1ULL << clock_bits) - 1, cd->mult, cd->shift);
+	wrap = cyc_to_ns((1ULL << bits) - 1, cd.mult, cd.shift);
 	do_div(wrap, NSEC_PER_MSEC);
 	w = wrap;
 
 	/* calculate the ns resolution of this counter */
-	res = cyc_to_ns(1ULL, cd->mult, cd->shift);
+	res = cyc_to_ns(1ULL, cd.mult, cd.shift);
 	pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lums\n",
-		clock_bits, r, r_unit, res, w);
+		bits, r, r_unit, res, w);
 
 	/*
 	 * Start the timer to keep sched_clock() properly updated and
 	 * sets the initial epoch.
 	 */
 	sched_clock_timer.data = msecs_to_jiffies(w - (w / 10));
-	update();
+	update_sched_clock();
 
 	/*
 	 * Ensure that sched_clock() starts off at 0ns
 	 */
-	cd->epoch_ns = 0;
+	cd.epoch_ns = 0;
+
+	pr_debug("Registered %pF as sched_clock source\n", read);
+}
+
+unsigned long long notrace sched_clock(void)
+{
+	u32 cyc = read_sched_clock();
+	return cyc_to_sched_clock(cyc, sched_clock_mask);
 }
 
 void __init sched_clock_postinit(void)
 {
+	/*
+	 * If no sched_clock function has been provided at that point,
+	 * make it the final one one.
+	 */
+	if (read_sched_clock == jiffy_sched_clock_read)
+		setup_sched_clock(jiffy_sched_clock_read, 32, HZ);
+
 	sched_clock_poll(sched_clock_timer.data);
 }
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 3448a3f..129fbd5 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -31,6 +31,7 @@
 #include <linux/memblock.h>
 #include <linux/bug.h>
 #include <linux/compiler.h>
+#include <linux/sort.h>
 
 #include <asm/unified.h>
 #include <asm/cpu.h>
@@ -52,6 +53,7 @@
 #include <asm/mach/time.h>
 #include <asm/traps.h>
 #include <asm/unwind.h>
+#include <asm/memblock.h>
 
 #if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
 #include "compat.h"
@@ -890,13 +892,17 @@
 	return mdesc;
 }
 
+static int __init meminfo_cmp(const void *_a, const void *_b)
+{
+	const struct membank *a = _a, *b = _b;
+	long cmp = bank_pfn_start(a) - bank_pfn_start(b);
+	return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
+}
 
 void __init setup_arch(char **cmdline_p)
 {
 	struct machine_desc *mdesc;
 
-	unwind_init();
-
 	setup_processor();
 	mdesc = setup_machine_fdt(__atags_pointer);
 	if (!mdesc)
@@ -904,8 +910,14 @@
 	machine_desc = mdesc;
 	machine_name = mdesc->name;
 
-	if (mdesc->soft_reboot)
-		reboot_setup("s");
+#ifdef CONFIG_ZONE_DMA
+	if (mdesc->dma_zone_size) {
+		extern unsigned long arm_dma_zone_size;
+		arm_dma_zone_size = mdesc->dma_zone_size;
+	}
+#endif
+	if (mdesc->restart_mode)
+		reboot_setup(&mdesc->restart_mode);
 
 	init_mm.start_code = (unsigned long) _text;
 	init_mm.end_code   = (unsigned long) _etext;
@@ -918,12 +930,16 @@
 
 	parse_early_param();
 
+	sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
 	sanity_check_meminfo();
 	arm_memblock_init(&meminfo, mdesc);
 
 	paging_init(mdesc);
 	request_standard_resources(mdesc);
 
+	if (mdesc->restart)
+		arm_pm_restart = mdesc->restart;
+
 	unflatten_device_tree();
 
 #ifdef CONFIG_SMP
@@ -934,12 +950,6 @@
 
 	tcm_init();
 
-#ifdef CONFIG_ZONE_DMA
-	if (mdesc->dma_zone_size) {
-		extern unsigned long arm_dma_zone_size;
-		arm_dma_zone_size = mdesc->dma_zone_size;
-	}
-#endif
 #ifdef CONFIG_MULTI_IRQ_HANDLER
 	handle_arch_irq = mdesc->handle_irq;
 #endif
diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S
index 020e99c..1f268bd 100644
--- a/arch/arm/kernel/sleep.S
+++ b/arch/arm/kernel/sleep.S
@@ -54,14 +54,18 @@
  * r0 = control register value
  */
 	.align	5
+	.pushsection	.idmap.text,"ax"
 ENTRY(cpu_resume_mmu)
 	ldr	r3, =cpu_resume_after_mmu
+	instr_sync
 	mcr	p15, 0, r0, c1, c0, 0	@ turn on MMU, I-cache, etc
 	mrc	p15, 0, r0, c0, c0, 0	@ read id reg
+	instr_sync
 	mov	r0, r0
 	mov	r0, r0
 	mov	pc, r3			@ jump to virtual address
 ENDPROC(cpu_resume_mmu)
+	.popsection
 cpu_resume_after_mmu:
 	bl	cpu_init		@ restore the und/abt/irq banked regs
 	mov	r0, #0			@ return zero on success
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index ef5640b..57db122 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -31,6 +31,7 @@
 #include <asm/cpu.h>
 #include <asm/cputype.h>
 #include <asm/exception.h>
+#include <asm/idmap.h>
 #include <asm/topology.h>
 #include <asm/mmu_context.h>
 #include <asm/pgtable.h>
@@ -61,7 +62,6 @@
 {
 	struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu);
 	struct task_struct *idle = ci->idle;
-	pgd_t *pgd;
 	int ret;
 
 	/*
@@ -84,29 +84,11 @@
 	}
 
 	/*
-	 * Allocate initial page tables to allow the new CPU to
-	 * enable the MMU safely.  This essentially means a set
-	 * of our "standard" page tables, with the addition of
-	 * a 1:1 mapping for the physical address of the kernel.
-	 */
-	pgd = pgd_alloc(&init_mm);
-	if (!pgd)
-		return -ENOMEM;
-
-	if (PHYS_OFFSET != PAGE_OFFSET) {
-#ifndef CONFIG_HOTPLUG_CPU
-		identity_mapping_add(pgd, __pa(__init_begin), __pa(__init_end));
-#endif
-		identity_mapping_add(pgd, __pa(_stext), __pa(_etext));
-		identity_mapping_add(pgd, __pa(_sdata), __pa(_edata));
-	}
-
-	/*
 	 * We need to tell the secondary core where to find
 	 * its stack and the page tables.
 	 */
 	secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
-	secondary_data.pgdir = virt_to_phys(pgd);
+	secondary_data.pgdir = virt_to_phys(idmap_pgd);
 	secondary_data.swapper_pg_dir = virt_to_phys(swapper_pg_dir);
 	__cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data));
 	outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1));
@@ -142,16 +124,6 @@
 	secondary_data.stack = NULL;
 	secondary_data.pgdir = 0;
 
-	if (PHYS_OFFSET != PAGE_OFFSET) {
-#ifndef CONFIG_HOTPLUG_CPU
-		identity_mapping_del(pgd, __pa(__init_begin), __pa(__init_end));
-#endif
-		identity_mapping_del(pgd, __pa(_stext), __pa(_etext));
-		identity_mapping_del(pgd, __pa(_sdata), __pa(_edata));
-	}
-
-	pgd_free(&init_mm, pgd);
-
 	return ret;
 }
 
@@ -550,6 +522,10 @@
 	local_fiq_disable();
 	local_irq_disable();
 
+#ifdef CONFIG_HOTPLUG_CPU
+	platform_cpu_kill(cpu);
+#endif
+
 	while (1)
 		cpu_relax();
 }
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index a8a6682..c8e9385 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -10,8 +10,11 @@
  */
 #include <linux/init.h>
 #include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
 #include <linux/delay.h>
 #include <linux/device.h>
+#include <linux/err.h>
 #include <linux/smp.h>
 #include <linux/jiffies.h>
 #include <linux/clockchips.h>
@@ -25,6 +28,7 @@
 /* set up by the platform code */
 void __iomem *twd_base;
 
+static struct clk *twd_clk;
 static unsigned long twd_timer_rate;
 
 static struct clock_event_device __percpu **twd_evt;
@@ -89,6 +93,52 @@
 	disable_percpu_irq(clk->irq);
 }
 
+#ifdef CONFIG_CPU_FREQ
+
+/*
+ * Updates clockevent frequency when the cpu frequency changes.
+ * Called on the cpu that is changing frequency with interrupts disabled.
+ */
+static void twd_update_frequency(void *data)
+{
+	twd_timer_rate = clk_get_rate(twd_clk);
+
+	clockevents_update_freq(*__this_cpu_ptr(twd_evt), twd_timer_rate);
+}
+
+static int twd_cpufreq_transition(struct notifier_block *nb,
+	unsigned long state, void *data)
+{
+	struct cpufreq_freqs *freqs = data;
+
+	/*
+	 * The twd clock events must be reprogrammed to account for the new
+	 * frequency.  The timer is local to a cpu, so cross-call to the
+	 * changing cpu.
+	 */
+	if (state == CPUFREQ_POSTCHANGE || state == CPUFREQ_RESUMECHANGE)
+		smp_call_function_single(freqs->cpu, twd_update_frequency,
+			NULL, 1);
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block twd_cpufreq_nb = {
+	.notifier_call = twd_cpufreq_transition,
+};
+
+static int twd_cpufreq_init(void)
+{
+	if (!IS_ERR(twd_clk))
+		return cpufreq_register_notifier(&twd_cpufreq_nb,
+			CPUFREQ_TRANSITION_NOTIFIER);
+
+	return 0;
+}
+core_initcall(twd_cpufreq_init);
+
+#endif
+
 static void __cpuinit twd_calibrate_rate(void)
 {
 	unsigned long count;
@@ -140,6 +190,35 @@
 	return IRQ_NONE;
 }
 
+static struct clk *twd_get_clock(void)
+{
+	struct clk *clk;
+	int err;
+
+	clk = clk_get_sys("smp_twd", NULL);
+	if (IS_ERR(clk)) {
+		pr_err("smp_twd: clock not found: %d\n", (int)PTR_ERR(clk));
+		return clk;
+	}
+
+	err = clk_prepare(clk);
+	if (err) {
+		pr_err("smp_twd: clock failed to prepare: %d\n", err);
+		clk_put(clk);
+		return ERR_PTR(err);
+	}
+
+	err = clk_enable(clk);
+	if (err) {
+		pr_err("smp_twd: clock failed to enable: %d\n", err);
+		clk_unprepare(clk);
+		clk_put(clk);
+		return ERR_PTR(err);
+	}
+
+	return clk;
+}
+
 /*
  * Setup the local clock events for a CPU.
  */
@@ -165,7 +244,13 @@
 		}
 	}
 
-	twd_calibrate_rate();
+	if (!twd_clk)
+		twd_clk = twd_get_clock();
+
+	if (!IS_ERR_OR_NULL(twd_clk))
+		twd_timer_rate = clk_get_rate(twd_clk);
+	else
+		twd_calibrate_rate();
 
 	clk->name = "local_timer";
 	clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
@@ -173,15 +258,11 @@
 	clk->rating = 350;
 	clk->set_mode = twd_set_mode;
 	clk->set_next_event = twd_set_next_event;
-	clk->shift = 20;
-	clk->mult = div_sc(twd_timer_rate, NSEC_PER_SEC, clk->shift);
-	clk->max_delta_ns = clockevent_delta2ns(0xffffffff, clk);
-	clk->min_delta_ns = clockevent_delta2ns(0xf, clk);
 
 	this_cpu_clk = __this_cpu_ptr(twd_evt);
 	*this_cpu_clk = clk;
 
-	clockevents_register_device(clk);
-
+	clockevents_config_and_register(clk, twd_timer_rate,
+					0xf, 0xffffffff);
 	enable_percpu_irq(clk->irq, 0);
 }
diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c
index 93a22d2..1794cc3 100644
--- a/arch/arm/kernel/suspend.c
+++ b/arch/arm/kernel/suspend.c
@@ -1,13 +1,12 @@
 #include <linux/init.h>
 
+#include <asm/idmap.h>
 #include <asm/pgalloc.h>
 #include <asm/pgtable.h>
 #include <asm/memory.h>
 #include <asm/suspend.h>
 #include <asm/tlbflush.h>
 
-static pgd_t *suspend_pgd;
-
 extern int __cpu_suspend(unsigned long, int (*)(unsigned long));
 extern void cpu_resume_mmu(void);
 
@@ -21,7 +20,7 @@
 	*save_ptr = virt_to_phys(ptr);
 
 	/* This must correspond to the LDM in cpu_resume() assembly */
-	*ptr++ = virt_to_phys(suspend_pgd);
+	*ptr++ = virt_to_phys(idmap_pgd);
 	*ptr++ = sp;
 	*ptr++ = virt_to_phys(cpu_do_resume);
 
@@ -42,7 +41,7 @@
 	struct mm_struct *mm = current->active_mm;
 	int ret;
 
-	if (!suspend_pgd)
+	if (!idmap_pgd)
 		return -EINVAL;
 
 	/*
@@ -59,14 +58,3 @@
 
 	return ret;
 }
-
-static int __init cpu_suspend_init(void)
-{
-	suspend_pgd = pgd_alloc(&init_mm);
-	if (suspend_pgd) {
-		unsigned long addr = virt_to_phys(cpu_resume_mmu);
-		identity_mapping_add(suspend_pgd, addr, addr + SECTION_SIZE);
-	}
-	return suspend_pgd ? 0 : -ENOMEM;
-}
-core_initcall(cpu_suspend_init);
diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c
index 5f452f8..df74518 100644
--- a/arch/arm/kernel/swp_emulate.c
+++ b/arch/arm/kernel/swp_emulate.c
@@ -25,6 +25,7 @@
 #include <linux/syscalls.h>
 #include <linux/perf_event.h>
 
+#include <asm/opcodes.h>
 #include <asm/traps.h>
 #include <asm/uaccess.h>
 
@@ -185,6 +186,21 @@
 
 	perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->ARM_pc);
 
+	res = arm_check_condition(instr, regs->ARM_cpsr);
+	switch (res) {
+	case ARM_OPCODE_CONDTEST_PASS:
+		break;
+	case ARM_OPCODE_CONDTEST_FAIL:
+		/* Condition failed - return to next instruction */
+		regs->ARM_pc += 4;
+		return 0;
+	case ARM_OPCODE_CONDTEST_UNCOND:
+		/* If unconditional encoding - not a SWP, undef */
+		return -EFAULT;
+	default:
+		return -EINVAL;
+	}
+
 	if (current->pid != previous_pid) {
 		pr_debug("\"%s\" (%ld) uses deprecated SWP{B} instruction\n",
 			 current->comm, (unsigned long)current->pid);
diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c
index 30e302d..01ec453 100644
--- a/arch/arm/kernel/tcm.c
+++ b/arch/arm/kernel/tcm.c
@@ -180,9 +180,9 @@
  */
 void __init tcm_init(void)
 {
-	u32 tcm_status = read_cpuid_tcmstatus();
-	u8 dtcm_banks = (tcm_status >> 16) & 0x03;
-	u8 itcm_banks = (tcm_status & 0x03);
+	u32 tcm_status;
+	u8 dtcm_banks;
+	u8 itcm_banks;
 	size_t dtcm_code_sz = &__edtcm_data - &__sdtcm_data;
 	size_t itcm_code_sz = &__eitcm_text - &__sitcm_text;
 	char *start;
@@ -191,6 +191,22 @@
 	int ret;
 	int i;
 
+	/*
+	 * Prior to ARMv5 there is no TCM, and trying to read the status
+	 * register will hang the processor.
+	 */
+	if (cpu_architecture() < CPU_ARCH_ARMv5) {
+		if (dtcm_code_sz || itcm_code_sz)
+			pr_info("CPU TCM: %u bytes of DTCM and %u bytes of "
+				"ITCM code compiled in, but no TCM present "
+				"in pre-v5 CPU\n", dtcm_code_sz, itcm_code_sz);
+		return;
+	}
+
+	tcm_status = read_cpuid_tcmstatus();
+	dtcm_banks = (tcm_status >> 16) & 0x03;
+	itcm_banks = (tcm_status & 0x03);
+
 	/* Values greater than 2 for D/ITCM banks are "reserved" */
 	if (dtcm_banks > 2)
 		dtcm_banks = 0;
diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c
index e7e8365..00df012 100644
--- a/arch/arm/kernel/unwind.c
+++ b/arch/arm/kernel/unwind.c
@@ -67,7 +67,7 @@
 
 struct unwind_ctrl_block {
 	unsigned long vrs[16];		/* virtual register set */
-	unsigned long *insn;		/* pointer to the current instructions word */
+	const unsigned long *insn;	/* pointer to the current instructions word */
 	int entries;			/* number of entries left to interpret */
 	int byte;			/* current byte number in the instructions word */
 };
@@ -83,8 +83,9 @@
 	PC = 15
 };
 
-extern struct unwind_idx __start_unwind_idx[];
-extern struct unwind_idx __stop_unwind_idx[];
+extern const struct unwind_idx __start_unwind_idx[];
+static const struct unwind_idx *__origin_unwind_idx;
+extern const struct unwind_idx __stop_unwind_idx[];
 
 static DEFINE_SPINLOCK(unwind_lock);
 static LIST_HEAD(unwind_tables);
@@ -98,45 +99,99 @@
 })
 
 /*
- * Binary search in the unwind index. The entries entries are
+ * Binary search in the unwind index. The entries are
  * guaranteed to be sorted in ascending order by the linker.
+ *
+ * start = first entry
+ * origin = first entry with positive offset (or stop if there is no such entry)
+ * stop - 1 = last entry
  */
-static struct unwind_idx *search_index(unsigned long addr,
-				       struct unwind_idx *first,
-				       struct unwind_idx *last)
+static const struct unwind_idx *search_index(unsigned long addr,
+				       const struct unwind_idx *start,
+				       const struct unwind_idx *origin,
+				       const struct unwind_idx *stop)
 {
-	pr_debug("%s(%08lx, %p, %p)\n", __func__, addr, first, last);
+	unsigned long addr_prel31;
 
-	if (addr < first->addr) {
-		pr_warning("unwind: Unknown symbol address %08lx\n", addr);
-		return NULL;
-	} else if (addr >= last->addr)
-		return last;
+	pr_debug("%s(%08lx, %p, %p, %p)\n",
+			__func__, addr, start, origin, stop);
 
-	while (first < last - 1) {
-		struct unwind_idx *mid = first + ((last - first + 1) >> 1);
+	/*
+	 * only search in the section with the matching sign. This way the
+	 * prel31 numbers can be compared as unsigned longs.
+	 */
+	if (addr < (unsigned long)start)
+		/* negative offsets: [start; origin) */
+		stop = origin;
+	else
+		/* positive offsets: [origin; stop) */
+		start = origin;
 
-		if (addr < mid->addr)
-			last = mid;
-		else
-			first = mid;
+	/* prel31 for address relavive to start */
+	addr_prel31 = (addr - (unsigned long)start) & 0x7fffffff;
+
+	while (start < stop - 1) {
+		const struct unwind_idx *mid = start + ((stop - start) >> 1);
+
+		/*
+		 * As addr_prel31 is relative to start an offset is needed to
+		 * make it relative to mid.
+		 */
+		if (addr_prel31 - ((unsigned long)mid - (unsigned long)start) <
+				mid->addr_offset)
+			stop = mid;
+		else {
+			/* keep addr_prel31 relative to start */
+			addr_prel31 -= ((unsigned long)mid -
+					(unsigned long)start);
+			start = mid;
+		}
 	}
 
-	return first;
+	if (likely(start->addr_offset <= addr_prel31))
+		return start;
+	else {
+		pr_warning("unwind: Unknown symbol address %08lx\n", addr);
+		return NULL;
+	}
 }
 
-static struct unwind_idx *unwind_find_idx(unsigned long addr)
+static const struct unwind_idx *unwind_find_origin(
+		const struct unwind_idx *start, const struct unwind_idx *stop)
 {
-	struct unwind_idx *idx = NULL;
+	pr_debug("%s(%p, %p)\n", __func__, start, stop);
+	while (start < stop) {
+		const struct unwind_idx *mid = start + ((stop - start) >> 1);
+
+		if (mid->addr_offset >= 0x40000000)
+			/* negative offset */
+			start = mid + 1;
+		else
+			/* positive offset */
+			stop = mid;
+	}
+	pr_debug("%s -> %p\n", __func__, stop);
+	return stop;
+}
+
+static const struct unwind_idx *unwind_find_idx(unsigned long addr)
+{
+	const struct unwind_idx *idx = NULL;
 	unsigned long flags;
 
 	pr_debug("%s(%08lx)\n", __func__, addr);
 
-	if (core_kernel_text(addr))
+	if (core_kernel_text(addr)) {
+		if (unlikely(!__origin_unwind_idx))
+			__origin_unwind_idx =
+				unwind_find_origin(__start_unwind_idx,
+						__stop_unwind_idx);
+
 		/* main unwind table */
 		idx = search_index(addr, __start_unwind_idx,
-				   __stop_unwind_idx - 1);
-	else {
+				   __origin_unwind_idx,
+				   __stop_unwind_idx);
+	} else {
 		/* module unwind tables */
 		struct unwind_table *table;
 
@@ -145,7 +200,8 @@
 			if (addr >= table->begin_addr &&
 			    addr < table->end_addr) {
 				idx = search_index(addr, table->start,
-						   table->stop - 1);
+						   table->origin,
+						   table->stop);
 				/* Move-to-front to exploit common traces */
 				list_move(&table->list, &unwind_tables);
 				break;
@@ -274,7 +330,7 @@
 int unwind_frame(struct stackframe *frame)
 {
 	unsigned long high, low;
-	struct unwind_idx *idx;
+	const struct unwind_idx *idx;
 	struct unwind_ctrl_block ctrl;
 
 	/* only go to a higher address on the stack */
@@ -399,7 +455,6 @@
 				      unsigned long text_size)
 {
 	unsigned long flags;
-	struct unwind_idx *idx;
 	struct unwind_table *tab = kmalloc(sizeof(*tab), GFP_KERNEL);
 
 	pr_debug("%s(%08lx, %08lx, %08lx, %08lx)\n", __func__, start, size,
@@ -408,15 +463,12 @@
 	if (!tab)
 		return tab;
 
-	tab->start = (struct unwind_idx *)start;
-	tab->stop = (struct unwind_idx *)(start + size);
+	tab->start = (const struct unwind_idx *)start;
+	tab->stop = (const struct unwind_idx *)(start + size);
+	tab->origin = unwind_find_origin(tab->start, tab->stop);
 	tab->begin_addr = text_addr;
 	tab->end_addr = text_addr + text_size;
 
-	/* Convert the symbol addresses to absolute values */
-	for (idx = tab->start; idx < tab->stop; idx++)
-		idx->addr = prel31_to_addr(&idx->addr);
-
 	spin_lock_irqsave(&unwind_lock, flags);
 	list_add_tail(&tab->list, &unwind_tables);
 	spin_unlock_irqrestore(&unwind_lock, flags);
@@ -437,16 +489,3 @@
 
 	kfree(tab);
 }
-
-int __init unwind_init(void)
-{
-	struct unwind_idx *idx;
-
-	/* Convert the symbol addresses to absolute values */
-	for (idx = __start_unwind_idx; idx < __stop_unwind_idx; idx++)
-		idx->addr = prel31_to_addr(&idx->addr);
-
-	pr_debug("unwind: ARM stack unwinding initialised\n");
-
-	return 0;
-}
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 20b3041..f76e755 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -13,6 +13,12 @@
 	*(.proc.info.init)						\
 	VMLINUX_SYMBOL(__proc_info_end) = .;
 
+#define IDMAP_TEXT							\
+	ALIGN_FUNCTION();						\
+	VMLINUX_SYMBOL(__idmap_text_start) = .;				\
+	*(.idmap.text)							\
+	VMLINUX_SYMBOL(__idmap_text_end) = .;
+
 #ifdef CONFIG_HOTPLUG_CPU
 #define ARM_CPU_DISCARD(x)
 #define ARM_CPU_KEEP(x)		x
@@ -92,6 +98,7 @@
 			SCHED_TEXT
 			LOCK_TEXT
 			KPROBES_TEXT
+			IDMAP_TEXT
 #ifdef CONFIG_MMU
 			*(.fixup)
 #endif
diff --git a/arch/arm/lib/Makefile b/arch/arm/lib/Makefile
index cf73a7f..0ade0ac 100644
--- a/arch/arm/lib/Makefile
+++ b/arch/arm/lib/Makefile
@@ -13,7 +13,8 @@
 		   testchangebit.o testclearbit.o testsetbit.o        \
 		   ashldi3.o ashrdi3.o lshrdi3.o muldi3.o             \
 		   ucmpdi2.o lib1funcs.o div64.o                      \
-		   io-readsb.o io-writesb.o io-readsl.o io-writesl.o
+		   io-readsb.o io-writesb.o io-readsl.o io-writesl.o  \
+		   call_with_stack.o
 
 mmu-y	:= clear_user.o copy_page.o getuser.o putuser.o
 
diff --git a/arch/arm/lib/call_with_stack.S b/arch/arm/lib/call_with_stack.S
new file mode 100644
index 0000000..916c80f
--- /dev/null
+++ b/arch/arm/lib/call_with_stack.S
@@ -0,0 +1,44 @@
+/*
+ * arch/arm/lib/call_with_stack.S
+ *
+ * Copyright (C) 2011 ARM Ltd.
+ * Written by Will Deacon <will.deacon@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+/*
+ * void call_with_stack(void (*fn)(void *), void *arg, void *sp)
+ *
+ * Change the stack to that pointed at by sp, then invoke fn(arg) with
+ * the new stack.
+ */
+ENTRY(call_with_stack)
+	str	sp, [r2, #-4]!
+	str	lr, [r2, #-4]!
+
+	mov	sp, r2
+	mov	r2, r0
+	mov	r0, r1
+
+	adr	lr, BSYM(1f)
+	mov	pc, r2
+
+1:	ldr	lr, [sp]
+	ldr	sp, [sp, #4]
+	mov	pc, lr
+ENDPROC(call_with_stack)
diff --git a/arch/arm/mach-at91/at91cap9.c b/arch/arm/mach-at91/at91cap9.c
index ecdd54d..2937339 100644
--- a/arch/arm/mach-at91/at91cap9.c
+++ b/arch/arm/mach-at91/at91cap9.c
@@ -313,7 +313,7 @@
 	}
 };
 
-static void at91cap9_reset(void)
+static void at91cap9_restart(char mode, const char *cmd)
 {
 	at91_sys_write(AT91_RSTC_CR, AT91_RSTC_KEY | AT91_RSTC_PROCRST | AT91_RSTC_PERRST);
 }
@@ -335,7 +335,7 @@
 
 static void __init at91cap9_initialize(void)
 {
-	at91_arch_reset = at91cap9_reset;
+	arm_pm_restart = at91cap9_restart;
 	pm_power_off = at91cap9_poweroff;
 	at91_extern_irq = (1 << AT91CAP9_ID_IRQ0) | (1 << AT91CAP9_ID_IRQ1);
 
diff --git a/arch/arm/mach-at91/at91rm9200.c b/arch/arm/mach-at91/at91rm9200.c
index 713d3bd..430a9fd 100644
--- a/arch/arm/mach-at91/at91rm9200.c
+++ b/arch/arm/mach-at91/at91rm9200.c
@@ -288,7 +288,7 @@
 	}
 };
 
-static void at91rm9200_reset(void)
+static void at91rm9200_restart(char mode, const char *cmd)
 {
 	/*
 	 * Perform a hardware reset with the use of the Watchdog timer.
@@ -309,7 +309,7 @@
 
 static void __init at91rm9200_initialize(void)
 {
-	at91_arch_reset = at91rm9200_reset;
+	arm_pm_restart = at91rm9200_restart;
 	at91_extern_irq = (1 << AT91RM9200_ID_IRQ0) | (1 << AT91RM9200_ID_IRQ1)
 			| (1 << AT91RM9200_ID_IRQ2) | (1 << AT91RM9200_ID_IRQ3)
 			| (1 << AT91RM9200_ID_IRQ4) | (1 << AT91RM9200_ID_IRQ5)
diff --git a/arch/arm/mach-at91/at91sam9260.c b/arch/arm/mach-at91/at91sam9260.c
index 0d20677..e76cd49 100644
--- a/arch/arm/mach-at91/at91sam9260.c
+++ b/arch/arm/mach-at91/at91sam9260.c
@@ -327,7 +327,7 @@
 
 static void __init at91sam9260_initialize(void)
 {
-	at91_arch_reset = at91sam9_alt_reset;
+	arm_pm_restart = at91sam9_alt_restart;
 	pm_power_off = at91sam9260_poweroff;
 	at91_extern_irq = (1 << AT91SAM9260_ID_IRQ0) | (1 << AT91SAM9260_ID_IRQ1)
 			| (1 << AT91SAM9260_ID_IRQ2);
diff --git a/arch/arm/mach-at91/at91sam9261.c b/arch/arm/mach-at91/at91sam9261.c
index 658a518..19ac7c0 100644
--- a/arch/arm/mach-at91/at91sam9261.c
+++ b/arch/arm/mach-at91/at91sam9261.c
@@ -287,7 +287,7 @@
 
 static void __init at91sam9261_initialize(void)
 {
-	at91_arch_reset = at91sam9_alt_reset;
+	arm_pm_restart = at91sam9_alt_restart;
 	pm_power_off = at91sam9261_poweroff;
 	at91_extern_irq = (1 << AT91SAM9261_ID_IRQ0) | (1 << AT91SAM9261_ID_IRQ1)
 			| (1 << AT91SAM9261_ID_IRQ2);
diff --git a/arch/arm/mach-at91/at91sam9263.c b/arch/arm/mach-at91/at91sam9263.c
index f83fbb0..50d0163 100644
--- a/arch/arm/mach-at91/at91sam9263.c
+++ b/arch/arm/mach-at91/at91sam9263.c
@@ -305,7 +305,7 @@
 
 static void __init at91sam9263_initialize(void)
 {
-	at91_arch_reset = at91sam9_alt_reset;
+	arm_pm_restart = at91sam9_alt_restart;
 	pm_power_off = at91sam9263_poweroff;
 	at91_extern_irq = (1 << AT91SAM9263_ID_IRQ0) | (1 << AT91SAM9263_ID_IRQ1);
 
diff --git a/arch/arm/mach-at91/at91sam9_alt_reset.S b/arch/arm/mach-at91/at91sam9_alt_reset.S
index e0256de..d3f931c 100644
--- a/arch/arm/mach-at91/at91sam9_alt_reset.S
+++ b/arch/arm/mach-at91/at91sam9_alt_reset.S
@@ -14,20 +14,15 @@
  */
 
 #include <linux/linkage.h>
-#include <asm/system.h>
 #include <mach/hardware.h>
 #include <mach/at91sam9_sdramc.h>
 #include <mach/at91_rstc.h>
 
 			.arm
 
-			.globl	at91sam9_alt_reset
+			.globl	at91sam9_alt_restart
 
-at91sam9_alt_reset:	mrc	p15, 0, r0, c1, c0, 0
-			orr	r0, r0, #CR_I
-			mcr	p15, 0, r0, c1, c0, 0		@ enable I-cache
-
-			ldr	r0, .at91_va_base_sdramc	@ preload constants
+at91sam9_alt_restart:	ldr	r0, .at91_va_base_sdramc	@ preload constants
 			ldr	r1, .at91_va_base_rstc_cr
 
 			mov	r2, #1
diff --git a/arch/arm/mach-at91/at91sam9g45.c b/arch/arm/mach-at91/at91sam9g45.c
index 318b040..ff21f7a 100644
--- a/arch/arm/mach-at91/at91sam9g45.c
+++ b/arch/arm/mach-at91/at91sam9g45.c
@@ -317,7 +317,7 @@
 	}
 };
 
-static void at91sam9g45_reset(void)
+static void at91sam9g45_restart(char mode, const char *cmd)
 {
 	at91_sys_write(AT91_RSTC_CR, AT91_RSTC_KEY | AT91_RSTC_PROCRST | AT91_RSTC_PERRST);
 }
@@ -340,7 +340,7 @@
 
 static void __init at91sam9g45_initialize(void)
 {
-	at91_arch_reset = at91sam9g45_reset;
+	arm_pm_restart = at91sam9g45_restart;
 	pm_power_off = at91sam9g45_poweroff;
 	at91_extern_irq = (1 << AT91SAM9G45_ID_IRQ0);
 
diff --git a/arch/arm/mach-at91/at91sam9rl.c b/arch/arm/mach-at91/at91sam9rl.c
index a238105..61cbb46 100644
--- a/arch/arm/mach-at91/at91sam9rl.c
+++ b/arch/arm/mach-at91/at91sam9rl.c
@@ -292,7 +292,7 @@
 
 static void __init at91sam9rl_initialize(void)
 {
-	at91_arch_reset = at91sam9_alt_reset;
+	arm_pm_restart = at91sam9_alt_restart;
 	pm_power_off = at91sam9rl_poweroff;
 	at91_extern_irq = (1 << AT91SAM9RL_ID_IRQ0);
 
diff --git a/arch/arm/mach-at91/generic.h b/arch/arm/mach-at91/generic.h
index 938b34f..7f4503b 100644
--- a/arch/arm/mach-at91/generic.h
+++ b/arch/arm/mach-at91/generic.h
@@ -57,7 +57,7 @@
 extern void at91_irq_resume(void);
 
 /* reset */
-extern void at91sam9_alt_reset(void);
+extern void at91sam9_alt_restart(char, const char *);
 
  /* GPIO */
 #define AT91RM9200_PQFP		3	/* AT91RM9200 PQFP package has 3 banks */
@@ -71,5 +71,4 @@
 extern void __init at91_gpio_init(struct at91_gpio_bank *, int nr_banks);
 extern void __init at91_gpio_irq_setup(void);
 
-extern void (*at91_arch_reset)(void);
 extern int at91_extern_irq;
diff --git a/arch/arm/mach-at91/include/mach/io.h b/arch/arm/mach-at91/include/mach/io.h
index 4298e78..4ca09ef 100644
--- a/arch/arm/mach-at91/include/mach/io.h
+++ b/arch/arm/mach-at91/include/mach/io.h
@@ -30,14 +30,6 @@
 
 #ifndef __ASSEMBLY__
 
-#ifndef CONFIG_ARCH_AT91X40
-#define __arch_ioremap	at91_ioremap
-#define __arch_iounmap	at91_iounmap
-#endif
-
-void __iomem *at91_ioremap(unsigned long phys, size_t size, unsigned int type);
-void at91_iounmap(volatile void __iomem *addr);
-
 static inline unsigned int at91_sys_read(unsigned int reg_offset)
 {
 	void __iomem *addr = (void __iomem *)AT91_VA_BASE_SYS;
diff --git a/arch/arm/mach-at91/include/mach/system.h b/arch/arm/mach-at91/include/mach/system.h
index 36af14b..cbd64f3 100644
--- a/arch/arm/mach-at91/include/mach/system.h
+++ b/arch/arm/mach-at91/include/mach/system.h
@@ -47,13 +47,4 @@
 #endif
 }
 
-void (*at91_arch_reset)(void);
-
-static inline void arch_reset(char mode, const char *cmd)
-{
-	/* call the CPU-specific reset function */
-	if (at91_arch_reset)
-		(at91_arch_reset)();
-}
-
 #endif
diff --git a/arch/arm/mach-at91/include/mach/vmalloc.h b/arch/arm/mach-at91/include/mach/vmalloc.h
deleted file mode 100644
index 8e4a1bd..0000000
--- a/arch/arm/mach-at91/include/mach/vmalloc.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * arch/arm/mach-at91/include/mach/vmalloc.h
- *
- *  Copyright (C) 2003 SAN People
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- */
-
-#ifndef __ASM_ARCH_VMALLOC_H
-#define __ASM_ARCH_VMALLOC_H
-
-#include <mach/hardware.h>
-
-#define VMALLOC_END		(AT91_VIRT_BASE & PGDIR_MASK)
-
-#endif
diff --git a/arch/arm/mach-at91/setup.c b/arch/arm/mach-at91/setup.c
index aa64294..cf98a8f 100644
--- a/arch/arm/mach-at91/setup.c
+++ b/arch/arm/mach-at91/setup.c
@@ -73,24 +73,6 @@
 	.type		= MT_DEVICE,
 };
 
-void __iomem *at91_ioremap(unsigned long p, size_t size, unsigned int type)
-{
-	if (p >= AT91_BASE_SYS && p <= (AT91_BASE_SYS + SZ_16K - 1))
-		return (void __iomem *)AT91_IO_P2V(p);
-
-	return __arm_ioremap_caller(p, size, type, __builtin_return_address(0));
-}
-EXPORT_SYMBOL(at91_ioremap);
-
-void at91_iounmap(volatile void __iomem *addr)
-{
-	unsigned long virt = (unsigned long)addr;
-
-	if (virt >= VMALLOC_START && virt < VMALLOC_END)
-		__iounmap(addr);
-}
-EXPORT_SYMBOL(at91_iounmap);
-
 #define AT91_DBGU0	0xfffff200
 #define AT91_DBGU1	0xffffee00
 
diff --git a/arch/arm/mach-bcmring/arch.c b/arch/arm/mach-bcmring/arch.c
index 31a1435..9e5e755 100644
--- a/arch/arm/mach-bcmring/arch.c
+++ b/arch/arm/mach-bcmring/arch.c
@@ -49,7 +49,29 @@
 #endif
 
 /* sysctl */
-int bcmring_arch_warm_reboot;	/* do a warm reboot on hard reset */
+static int bcmring_arch_warm_reboot;	/* do a warm reboot on hard reset */
+
+static void bcmring_restart(char mode, const char *cmd)
+{
+	printk("arch_reset:%c %x\n", mode, bcmring_arch_warm_reboot);
+
+	if (mode == 'h') {
+		/* Reboot configured in proc entry */
+		if (bcmring_arch_warm_reboot) {
+			printk("warm reset\n");
+			/* Issue Warm reset (do not reset ethernet switch, keep alive) */
+			chipcHw_reset(chipcHw_REG_SOFT_RESET_CHIP_WARM);
+		} else {
+			/* Force reset of everything */
+			printk("force reset\n");
+			chipcHw_reset(chipcHw_REG_SOFT_RESET_CHIP_SOFT);
+		}
+	} else {
+		/* Force reset of everything */
+		printk("force reset\n");
+		chipcHw_reset(chipcHw_REG_SOFT_RESET_CHIP_SOFT);
+	}
+}
 
 static struct ctl_table_header *bcmring_sysctl_header;
 
@@ -173,4 +195,5 @@
 	.init_irq = bcmring_init_irq,
 	.timer = &bcmring_timer,
 	.init_machine = bcmring_init_machine
+	.restart = bcmring_restart,
 MACHINE_END
diff --git a/arch/arm/mach-bcmring/core.c b/arch/arm/mach-bcmring/core.c
index 430da12..6b67b7e 100644
--- a/arch/arm/mach-bcmring/core.c
+++ b/arch/arm/mach-bcmring/core.c
@@ -25,7 +25,6 @@
 #include <linux/device.h>
 #include <linux/dma-mapping.h>
 #include <linux/platform_device.h>
-#include <linux/sysdev.h>
 #include <linux/interrupt.h>
 #include <linux/amba/bus.h>
 #include <linux/clkdev.h>
diff --git a/arch/arm/mach-bcmring/dma.c b/arch/arm/mach-bcmring/dma.c
index f4d4d6d..1a1a27d 100644
--- a/arch/arm/mach-bcmring/dma.c
+++ b/arch/arm/mach-bcmring/dma.c
@@ -1615,7 +1615,7 @@
 {
 	unsigned long addrVal = (unsigned long)addr;
 
-	if (addrVal >= VMALLOC_END) {
+	if (addrVal >= CONSISTENT_BASE) {
 		/* NOTE: DMA virtual memory space starts at 0xFFxxxxxx */
 
 		/* dma_alloc_xxx pages are physically and virtually contiguous */
diff --git a/arch/arm/mach-bcmring/include/mach/system.h b/arch/arm/mach-bcmring/include/mach/system.h
index 38b3706..cb78250 100644
--- a/arch/arm/mach-bcmring/include/mach/system.h
+++ b/arch/arm/mach-bcmring/include/mach/system.h
@@ -20,35 +20,9 @@
 #ifndef __ASM_ARCH_SYSTEM_H
 #define __ASM_ARCH_SYSTEM_H
 
-#include <mach/csp/chipcHw_inline.h>
-
-extern int bcmring_arch_warm_reboot;
-
 static inline void arch_idle(void)
 {
 	cpu_do_idle();
 }
 
-static inline void arch_reset(char mode, const char *cmd)
-{
-	printk("arch_reset:%c %x\n", mode, bcmring_arch_warm_reboot);
-
-	if (mode == 'h') {
-		/* Reboot configured in proc entry */
-		if (bcmring_arch_warm_reboot) {
-			printk("warm reset\n");
-			/* Issue Warm reset (do not reset ethernet switch, keep alive) */
-			chipcHw_reset(chipcHw_REG_SOFT_RESET_CHIP_WARM);
-		} else {
-			/* Force reset of everything */
-			printk("force reset\n");
-			chipcHw_reset(chipcHw_REG_SOFT_RESET_CHIP_SOFT);
-		}
-	} else {
-		/* Force reset of everything */
-		printk("force reset\n");
-		chipcHw_reset(chipcHw_REG_SOFT_RESET_CHIP_SOFT);
-	}
-}
-
 #endif
diff --git a/arch/arm/mach-bcmring/include/mach/vmalloc.h b/arch/arm/mach-bcmring/include/mach/vmalloc.h
deleted file mode 100644
index 7397bd7..0000000
--- a/arch/arm/mach-bcmring/include/mach/vmalloc.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- *
- *  Copyright (C) 2000 Russell King.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- */
-
-/*
- * Move VMALLOC_END to 0xf0000000 so that the vm space can range from
- * 0xe0000000 to 0xefffffff. This gives us 256 MB of vm space and handles
- * larger physical memory designs better.
- */
-#define VMALLOC_END       0xf0000000UL
diff --git a/arch/arm/mach-clps711x/Makefile b/arch/arm/mach-clps711x/Makefile
index 4a19731..f2f0256 100644
--- a/arch/arm/mach-clps711x/Makefile
+++ b/arch/arm/mach-clps711x/Makefile
@@ -4,7 +4,7 @@
 
 # Object file lists.
 
-obj-y			:= irq.o mm.o time.o
+obj-y			:= common.o
 obj-m			:=
 obj-n			:=
 obj-			:=
diff --git a/arch/arm/mach-clps711x/autcpu12.c b/arch/arm/mach-clps711x/autcpu12.c
index 0276091..3fb79a1 100644
--- a/arch/arm/mach-clps711x/autcpu12.c
+++ b/arch/arm/mach-clps711x/autcpu12.c
@@ -68,5 +68,6 @@
 	.map_io		= autcpu12_map_io,
 	.init_irq	= clps711x_init_irq,
 	.timer		= &clps711x_timer,
+	.restart	= clps711x_restart,
 MACHINE_END
 
diff --git a/arch/arm/mach-clps711x/cdb89712.c b/arch/arm/mach-clps711x/cdb89712.c
index 25b3bfd..c314f49 100644
--- a/arch/arm/mach-clps711x/cdb89712.c
+++ b/arch/arm/mach-clps711x/cdb89712.c
@@ -59,4 +59,5 @@
 	.map_io		= cdb89712_map_io,
 	.init_irq	= clps711x_init_irq,
 	.timer		= &clps711x_timer,
+	.restart	= clps711x_restart,
 MACHINE_END
diff --git a/arch/arm/mach-clps711x/ceiva.c b/arch/arm/mach-clps711x/ceiva.c
index 1df9ec6..a70147e 100644
--- a/arch/arm/mach-clps711x/ceiva.c
+++ b/arch/arm/mach-clps711x/ceiva.c
@@ -60,4 +60,5 @@
 	.map_io		= ceiva_map_io,
 	.init_irq	= clps711x_init_irq,
 	.timer		= &clps711x_timer,
+	.restart	= clps711x_restart,
 MACHINE_END
diff --git a/arch/arm/mach-clps711x/clep7312.c b/arch/arm/mach-clps711x/clep7312.c
index 80496c0..dbc7842 100644
--- a/arch/arm/mach-clps711x/clep7312.c
+++ b/arch/arm/mach-clps711x/clep7312.c
@@ -41,5 +41,6 @@
 	.map_io		= clps711x_map_io,
 	.init_irq	= clps711x_init_irq,
 	.timer		= &clps711x_timer,
+	.restart	= clps711x_restart,
 MACHINE_END
 
diff --git a/arch/arm/mach-clps711x/common.c b/arch/arm/mach-clps711x/common.c
new file mode 100644
index 0000000..ab1711b
--- /dev/null
+++ b/arch/arm/mach-clps711x/common.c
@@ -0,0 +1,227 @@
+/*
+ *  linux/arch/arm/mach-clps711x/core.c
+ *
+ *  Core support for the CLPS711x-based machines.
+ *
+ *  Copyright (C) 2001,2011 Deep Blue Solutions Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/sched.h>
+#include <linux/timex.h>
+
+#include <asm/sizes.h>
+#include <mach/hardware.h>
+#include <asm/irq.h>
+#include <asm/leds.h>
+#include <asm/pgtable.h>
+#include <asm/page.h>
+#include <asm/mach/map.h>
+#include <asm/mach/time.h>
+#include <asm/hardware/clps7111.h>
+
+/*
+ * This maps the generic CLPS711x registers
+ */
+static struct map_desc clps711x_io_desc[] __initdata = {
+	{
+		.virtual	= CLPS7111_VIRT_BASE,
+		.pfn		= __phys_to_pfn(CLPS7111_PHYS_BASE),
+		.length		= SZ_1M,
+		.type		= MT_DEVICE
+	}
+};
+
+void __init clps711x_map_io(void)
+{
+	iotable_init(clps711x_io_desc, ARRAY_SIZE(clps711x_io_desc));
+}
+
+static void int1_mask(struct irq_data *d)
+{
+	u32 intmr1;
+
+	intmr1 = clps_readl(INTMR1);
+	intmr1 &= ~(1 << d->irq);
+	clps_writel(intmr1, INTMR1);
+}
+
+static void int1_ack(struct irq_data *d)
+{
+	u32 intmr1;
+
+	intmr1 = clps_readl(INTMR1);
+	intmr1 &= ~(1 << d->irq);
+	clps_writel(intmr1, INTMR1);
+
+	switch (d->irq) {
+	case IRQ_CSINT:  clps_writel(0, COEOI);  break;
+	case IRQ_TC1OI:  clps_writel(0, TC1EOI); break;
+	case IRQ_TC2OI:  clps_writel(0, TC2EOI); break;
+	case IRQ_RTCMI:  clps_writel(0, RTCEOI); break;
+	case IRQ_TINT:   clps_writel(0, TEOI);   break;
+	case IRQ_UMSINT: clps_writel(0, UMSEOI); break;
+	}
+}
+
+static void int1_unmask(struct irq_data *d)
+{
+	u32 intmr1;
+
+	intmr1 = clps_readl(INTMR1);
+	intmr1 |= 1 << d->irq;
+	clps_writel(intmr1, INTMR1);
+}
+
+static struct irq_chip int1_chip = {
+	.irq_ack	= int1_ack,
+	.irq_mask	= int1_mask,
+	.irq_unmask	= int1_unmask,
+};
+
+static void int2_mask(struct irq_data *d)
+{
+	u32 intmr2;
+
+	intmr2 = clps_readl(INTMR2);
+	intmr2 &= ~(1 << (d->irq - 16));
+	clps_writel(intmr2, INTMR2);
+}
+
+static void int2_ack(struct irq_data *d)
+{
+	u32 intmr2;
+
+	intmr2 = clps_readl(INTMR2);
+	intmr2 &= ~(1 << (d->irq - 16));
+	clps_writel(intmr2, INTMR2);
+
+	switch (d->irq) {
+	case IRQ_KBDINT: clps_writel(0, KBDEOI); break;
+	}
+}
+
+static void int2_unmask(struct irq_data *d)
+{
+	u32 intmr2;
+
+	intmr2 = clps_readl(INTMR2);
+	intmr2 |= 1 << (d->irq - 16);
+	clps_writel(intmr2, INTMR2);
+}
+
+static struct irq_chip int2_chip = {
+	.irq_ack	= int2_ack,
+	.irq_mask	= int2_mask,
+	.irq_unmask	= int2_unmask,
+};
+
+void __init clps711x_init_irq(void)
+{
+	unsigned int i;
+
+	for (i = 0; i < NR_IRQS; i++) {
+	        if (INT1_IRQS & (1 << i)) {
+			irq_set_chip_and_handler(i, &int1_chip,
+						 handle_level_irq);
+			set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
+		}
+		if (INT2_IRQS & (1 << i)) {
+			irq_set_chip_and_handler(i, &int2_chip,
+						 handle_level_irq);
+			set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
+		}
+	}
+
+	/*
+	 * Disable interrupts
+	 */
+	clps_writel(0, INTMR1);
+	clps_writel(0, INTMR2);
+
+	/*
+	 * Clear down any pending interrupts
+	 */
+	clps_writel(0, COEOI);
+	clps_writel(0, TC1EOI);
+	clps_writel(0, TC2EOI);
+	clps_writel(0, RTCEOI);
+	clps_writel(0, TEOI);
+	clps_writel(0, UMSEOI);
+	clps_writel(0, SYNCIO);
+	clps_writel(0, KBDEOI);
+}
+
+/*
+ * gettimeoffset() returns time since last timer tick, in usecs.
+ *
+ * 'LATCH' is hwclock ticks (see CLOCK_TICK_RATE in timex.h) per jiffy.
+ * 'tick' is usecs per jiffy.
+ */
+static unsigned long clps711x_gettimeoffset(void)
+{
+	unsigned long hwticks;
+	hwticks = LATCH - (clps_readl(TC2D) & 0xffff);	/* since last underflow */
+	return (hwticks * (tick_nsec / 1000)) / LATCH;
+}
+
+/*
+ * IRQ handler for the timer
+ */
+static irqreturn_t p720t_timer_interrupt(int irq, void *dev_id)
+{
+	timer_tick();
+	return IRQ_HANDLED;
+}
+
+static struct irqaction clps711x_timer_irq = {
+	.name		= "CLPS711x Timer Tick",
+	.flags		= IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+	.handler	= p720t_timer_interrupt,
+};
+
+static void __init clps711x_timer_init(void)
+{
+	struct timespec tv;
+	unsigned int syscon;
+
+	syscon = clps_readl(SYSCON1);
+	syscon |= SYSCON1_TC2S | SYSCON1_TC2M;
+	clps_writel(syscon, SYSCON1);
+
+	clps_writel(LATCH-1, TC2D); /* 512kHz / 100Hz - 1 */
+
+	setup_irq(IRQ_TC2OI, &clps711x_timer_irq);
+
+	tv.tv_nsec = 0;
+	tv.tv_sec = clps_readl(RTCDR);
+	do_settimeofday(&tv);
+}
+
+struct sys_timer clps711x_timer = {
+	.init		= clps711x_timer_init,
+	.offset		= clps711x_gettimeoffset,
+};
+
+void clps711x_restart(char mode, const char *cmd)
+{
+	soft_restart(0);
+}
diff --git a/arch/arm/mach-clps711x/common.h b/arch/arm/mach-clps711x/common.h
index 2b8b801..fc0f065 100644
--- a/arch/arm/mach-clps711x/common.h
+++ b/arch/arm/mach-clps711x/common.h
@@ -9,3 +9,4 @@
 extern void clps711x_map_io(void);
 extern void clps711x_init_irq(void);
 extern struct sys_timer clps711x_timer;
+extern void clps711x_restart(char mode, const char *cmd);
diff --git a/arch/arm/mach-clps711x/edb7211-arch.c b/arch/arm/mach-clps711x/edb7211-arch.c
index 9721f61..5fad0b4 100644
--- a/arch/arm/mach-clps711x/edb7211-arch.c
+++ b/arch/arm/mach-clps711x/edb7211-arch.c
@@ -62,4 +62,5 @@
 	.reserve	= edb7211_reserve,
 	.init_irq	= clps711x_init_irq,
 	.timer		= &clps711x_timer,
+	.restart	= clps711x_restart,
 MACHINE_END
diff --git a/arch/arm/mach-clps711x/fortunet.c b/arch/arm/mach-clps711x/fortunet.c
index d992566..3a3f0b7 100644
--- a/arch/arm/mach-clps711x/fortunet.c
+++ b/arch/arm/mach-clps711x/fortunet.c
@@ -78,4 +78,5 @@
 	.map_io		= clps711x_map_io,
 	.init_irq	= clps711x_init_irq,
 	.timer		= &clps711x_timer,
+	.restart	= clps711x_restart,
 MACHINE_END
diff --git a/arch/arm/mach-clps711x/include/mach/system.h b/arch/arm/mach-clps711x/include/mach/system.h
index f916cd7..23d6ef8 100644
--- a/arch/arm/mach-clps711x/include/mach/system.h
+++ b/arch/arm/mach-clps711x/include/mach/system.h
@@ -32,9 +32,4 @@
 	mov	r0, r0");
 }
 
-static inline void arch_reset(char mode, const char *cmd)
-{
-	cpu_reset(0);
-}
-
 #endif
diff --git a/arch/arm/mach-clps711x/include/mach/vmalloc.h b/arch/arm/mach-clps711x/include/mach/vmalloc.h
deleted file mode 100644
index 467b961..0000000
--- a/arch/arm/mach-clps711x/include/mach/vmalloc.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- *  arch/arm/mach-clps711x/include/mach/vmalloc.h
- *
- *  Copyright (C) 2000 Deep Blue Solutions Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- */
-#define VMALLOC_END       0xd0000000UL
diff --git a/arch/arm/mach-clps711x/irq.c b/arch/arm/mach-clps711x/irq.c
deleted file mode 100644
index c2eceee..0000000
--- a/arch/arm/mach-clps711x/irq.c
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
- *  linux/arch/arm/mach-clps711x/irq.c
- *
- *  Copyright (C) 2000 Deep Blue Solutions Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- */
-#include <linux/init.h>
-#include <linux/list.h>
-#include <linux/io.h>
-
-#include <asm/mach/irq.h>
-#include <mach/hardware.h>
-#include <asm/irq.h>
-
-#include <asm/hardware/clps7111.h>
-
-static void int1_mask(struct irq_data *d)
-{
-	u32 intmr1;
-
-	intmr1 = clps_readl(INTMR1);
-	intmr1 &= ~(1 << d->irq);
-	clps_writel(intmr1, INTMR1);
-}
-
-static void int1_ack(struct irq_data *d)
-{
-	u32 intmr1;
-
-	intmr1 = clps_readl(INTMR1);
-	intmr1 &= ~(1 << d->irq);
-	clps_writel(intmr1, INTMR1);
-
-	switch (d->irq) {
-	case IRQ_CSINT:  clps_writel(0, COEOI);  break;
-	case IRQ_TC1OI:  clps_writel(0, TC1EOI); break;
-	case IRQ_TC2OI:  clps_writel(0, TC2EOI); break;
-	case IRQ_RTCMI:  clps_writel(0, RTCEOI); break;
-	case IRQ_TINT:   clps_writel(0, TEOI);   break;
-	case IRQ_UMSINT: clps_writel(0, UMSEOI); break;
-	}
-}
-
-static void int1_unmask(struct irq_data *d)
-{
-	u32 intmr1;
-
-	intmr1 = clps_readl(INTMR1);
-	intmr1 |= 1 << d->irq;
-	clps_writel(intmr1, INTMR1);
-}
-
-static struct irq_chip int1_chip = {
-	.irq_ack	= int1_ack,
-	.irq_mask	= int1_mask,
-	.irq_unmask	= int1_unmask,
-};
-
-static void int2_mask(struct irq_data *d)
-{
-	u32 intmr2;
-
-	intmr2 = clps_readl(INTMR2);
-	intmr2 &= ~(1 << (d->irq - 16));
-	clps_writel(intmr2, INTMR2);
-}
-
-static void int2_ack(struct irq_data *d)
-{
-	u32 intmr2;
-
-	intmr2 = clps_readl(INTMR2);
-	intmr2 &= ~(1 << (d->irq - 16));
-	clps_writel(intmr2, INTMR2);
-
-	switch (d->irq) {
-	case IRQ_KBDINT: clps_writel(0, KBDEOI); break;
-	}
-}
-
-static void int2_unmask(struct irq_data *d)
-{
-	u32 intmr2;
-
-	intmr2 = clps_readl(INTMR2);
-	intmr2 |= 1 << (d->irq - 16);
-	clps_writel(intmr2, INTMR2);
-}
-
-static struct irq_chip int2_chip = {
-	.irq_ack	= int2_ack,
-	.irq_mask	= int2_mask,
-	.irq_unmask	= int2_unmask,
-};
-
-void __init clps711x_init_irq(void)
-{
-	unsigned int i;
-
-	for (i = 0; i < NR_IRQS; i++) {
-	        if (INT1_IRQS & (1 << i)) {
-	        	irq_set_chip_and_handler(i, &int1_chip,
-						 handle_level_irq);
-	        	set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
-		}
-		if (INT2_IRQS & (1 << i)) {
-			irq_set_chip_and_handler(i, &int2_chip,
-						 handle_level_irq);
-			set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
-		}			
-	}
-
-	/*
-	 * Disable interrupts
-	 */
-	clps_writel(0, INTMR1);
-	clps_writel(0, INTMR2);
-
-	/*
-	 * Clear down any pending interrupts
-	 */
-	clps_writel(0, COEOI);
-	clps_writel(0, TC1EOI);
-	clps_writel(0, TC2EOI);
-	clps_writel(0, RTCEOI);
-	clps_writel(0, TEOI);
-	clps_writel(0, UMSEOI);
-	clps_writel(0, SYNCIO);
-	clps_writel(0, KBDEOI);
-}
diff --git a/arch/arm/mach-clps711x/mm.c b/arch/arm/mach-clps711x/mm.c
deleted file mode 100644
index 9865921..0000000
--- a/arch/arm/mach-clps711x/mm.c
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- *  linux/arch/arm/mach-clps711x/mm.c
- *
- *  Generic MM setup for the CLPS711x-based machines.
- *
- *  Copyright (C) 2001 Deep Blue Solutions Ltd
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- */
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/init.h>
-
-#include <asm/sizes.h>
-#include <mach/hardware.h>
-#include <asm/pgtable.h>
-#include <asm/page.h>
-#include <asm/mach/map.h>
-#include <asm/hardware/clps7111.h>
-
-/*
- * This maps the generic CLPS711x registers
- */
-static struct map_desc clps711x_io_desc[] __initdata = {
-	{
-		.virtual	= CLPS7111_VIRT_BASE,
-		.pfn		= __phys_to_pfn(CLPS7111_PHYS_BASE),
-		.length		= SZ_1M,
-		.type		= MT_DEVICE
-	}
-};
-
-void __init clps711x_map_io(void)
-{
-	iotable_init(clps711x_io_desc, ARRAY_SIZE(clps711x_io_desc));
-}
diff --git a/arch/arm/mach-clps711x/p720t.c b/arch/arm/mach-clps711x/p720t.c
index 6ecea95f..42ee8f3 100644
--- a/arch/arm/mach-clps711x/p720t.c
+++ b/arch/arm/mach-clps711x/p720t.c
@@ -93,6 +93,7 @@
 	.map_io		= p720t_map_io,
 	.init_irq	= clps711x_init_irq,
 	.timer		= &clps711x_timer,
+	.restart	= clps711x_restart,
 MACHINE_END
 
 static int p720t_hw_init(void)
diff --git a/arch/arm/mach-clps711x/time.c b/arch/arm/mach-clps711x/time.c
deleted file mode 100644
index d581ef0..0000000
--- a/arch/arm/mach-clps711x/time.c
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- *  linux/arch/arm/mach-clps711x/time.c
- *
- *  Copyright (C) 2001 Deep Blue Solutions Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- */
-#include <linux/timex.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/sched.h>
-#include <linux/io.h>
-
-#include <mach/hardware.h>
-#include <asm/irq.h>
-#include <asm/leds.h>
-#include <asm/hardware/clps7111.h>
-
-#include <asm/mach/time.h>
-
-
-/*
- * gettimeoffset() returns time since last timer tick, in usecs.
- *
- * 'LATCH' is hwclock ticks (see CLOCK_TICK_RATE in timex.h) per jiffy.
- * 'tick' is usecs per jiffy.
- */
-static unsigned long clps711x_gettimeoffset(void)
-{
-	unsigned long hwticks;
-	hwticks = LATCH - (clps_readl(TC2D) & 0xffff);	/* since last underflow */
-	return (hwticks * (tick_nsec / 1000)) / LATCH;
-}
-
-/*
- * IRQ handler for the timer
- */
-static irqreturn_t
-p720t_timer_interrupt(int irq, void *dev_id)
-{
-	timer_tick();
-	return IRQ_HANDLED;
-}
-
-static struct irqaction clps711x_timer_irq = {
-	.name		= "CLPS711x Timer Tick",
-	.flags		= IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
-	.handler	= p720t_timer_interrupt,
-};
-
-static void __init clps711x_timer_init(void)
-{
-	struct timespec tv;
-	unsigned int syscon;
-
-	syscon = clps_readl(SYSCON1);
-	syscon |= SYSCON1_TC2S | SYSCON1_TC2M;
-	clps_writel(syscon, SYSCON1);
-
-	clps_writel(LATCH-1, TC2D); /* 512kHz / 100Hz - 1 */
-
-	setup_irq(IRQ_TC2OI, &clps711x_timer_irq);
-
-	tv.tv_nsec = 0;
-	tv.tv_sec = clps_readl(RTCDR);
-	do_settimeofday(&tv);
-}
-
-struct sys_timer clps711x_timer = {
-	.init		= clps711x_timer_init,
-	.offset		= clps711x_gettimeoffset,
-};
diff --git a/arch/arm/mach-cns3xxx/cns3420vb.c b/arch/arm/mach-cns3xxx/cns3420vb.c
index 55f7b4b..2c5fb4c 100644
--- a/arch/arm/mach-cns3xxx/cns3420vb.c
+++ b/arch/arm/mach-cns3xxx/cns3420vb.c
@@ -26,6 +26,7 @@
 #include <linux/mtd/partitions.h>
 #include <asm/setup.h>
 #include <asm/mach-types.h>
+#include <asm/hardware/gic.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
 #include <asm/mach/time.h>
@@ -201,5 +202,7 @@
 	.map_io		= cns3420_map_io,
 	.init_irq	= cns3xxx_init_irq,
 	.timer		= &cns3xxx_timer,
+	.handle_irq	= gic_handle_irq,
 	.init_machine	= cns3420_init,
+	.restart	= cns3xxx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-cns3xxx/core.h b/arch/arm/mach-cns3xxx/core.h
index fcd2253..4894b8c 100644
--- a/arch/arm/mach-cns3xxx/core.h
+++ b/arch/arm/mach-cns3xxx/core.h
@@ -22,5 +22,6 @@
 void __init cns3xxx_map_io(void);
 void __init cns3xxx_init_irq(void);
 void cns3xxx_power_off(void);
+void cns3xxx_restart(char, const char *);
 
 #endif /* __CNS3XXX_CORE_H */
diff --git a/arch/arm/mach-cns3xxx/include/mach/entry-macro.S b/arch/arm/mach-cns3xxx/include/mach/entry-macro.S
index d87bfc3..01c57df 100644
--- a/arch/arm/mach-cns3xxx/include/mach/entry-macro.S
+++ b/arch/arm/mach-cns3xxx/include/mach/entry-macro.S
@@ -8,8 +8,6 @@
  * published by the Free Software Foundation.
  */
 
-#include <asm/hardware/entry-macro-gic.S>
-
 		.macro	disable_fiq
 		.endm
 
diff --git a/arch/arm/mach-cns3xxx/include/mach/system.h b/arch/arm/mach-cns3xxx/include/mach/system.h
index 4f16c9b..9e56b7d 100644
--- a/arch/arm/mach-cns3xxx/include/mach/system.h
+++ b/arch/arm/mach-cns3xxx/include/mach/system.h
@@ -11,7 +11,6 @@
 #ifndef __MACH_SYSTEM_H
 #define __MACH_SYSTEM_H
 
-#include <linux/io.h>
 #include <asm/proc-fns.h>
 
 static inline void arch_idle(void)
@@ -23,6 +22,4 @@
 	cpu_do_idle();
 }
 
-void arch_reset(char mode, const char *cmd);
-
 #endif
diff --git a/arch/arm/mach-cns3xxx/include/mach/vmalloc.h b/arch/arm/mach-cns3xxx/include/mach/vmalloc.h
deleted file mode 100644
index 1dd231d..0000000
--- a/arch/arm/mach-cns3xxx/include/mach/vmalloc.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/*
- * Copyright 2000 Russell King.
- * Copyright 2003 ARM Limited
- * Copyright 2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- */
-
-#define VMALLOC_END		0xd8000000UL
diff --git a/arch/arm/mach-cns3xxx/pm.c b/arch/arm/mach-cns3xxx/pm.c
index 0c04678..3645808 100644
--- a/arch/arm/mach-cns3xxx/pm.c
+++ b/arch/arm/mach-cns3xxx/pm.c
@@ -11,9 +11,9 @@
 #include <linux/io.h>
 #include <linux/delay.h>
 #include <linux/atomic.h>
-#include <mach/system.h>
 #include <mach/cns3xxx.h>
 #include <mach/pm.h>
+#include "core.h"
 
 void cns3xxx_pwr_clk_en(unsigned int block)
 {
@@ -89,7 +89,7 @@
 }
 EXPORT_SYMBOL(cns3xxx_pwr_soft_rst);
 
-void arch_reset(char mode, const char *cmd)
+void cns3xxx_restart(char mode, const char *cmd)
 {
 	/*
 	 * To reset, we hit the on-board reset register
diff --git a/arch/arm/mach-davinci/Makefile b/arch/arm/mach-davinci/Makefile
index 495e313..2db78bd 100644
--- a/arch/arm/mach-davinci/Makefile
+++ b/arch/arm/mach-davinci/Makefile
@@ -4,7 +4,7 @@
 #
 
 # Common objects
-obj-y 			:= time.o clock.o serial.o io.o psc.o \
+obj-y 			:= time.o clock.o serial.o psc.o \
 			   dma.o usb.o common.o sram.o aemif.o
 
 obj-$(CONFIG_DAVINCI_MUX)		+= mux.o
diff --git a/arch/arm/mach-davinci/board-da830-evm.c b/arch/arm/mach-davinci/board-da830-evm.c
index 11c3db9..dc1afe5 100644
--- a/arch/arm/mach-davinci/board-da830-evm.c
+++ b/arch/arm/mach-davinci/board-da830-evm.c
@@ -682,4 +682,5 @@
 	.timer		= &davinci_timer,
 	.init_machine	= da830_evm_init,
 	.dma_zone_size	= SZ_128M,
+	.restart	= da8xx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c
index 6659a90..f8a682f 100644
--- a/arch/arm/mach-davinci/board-da850-evm.c
+++ b/arch/arm/mach-davinci/board-da850-evm.c
@@ -1411,4 +1411,5 @@
 	.timer		= &davinci_timer,
 	.init_machine	= da850_evm_init,
 	.dma_zone_size	= SZ_128M,
+	.restart	= da8xx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-davinci/board-dm355-evm.c b/arch/arm/mach-davinci/board-dm355-evm.c
index 4e0e707..275341f 100644
--- a/arch/arm/mach-davinci/board-dm355-evm.c
+++ b/arch/arm/mach-davinci/board-dm355-evm.c
@@ -357,4 +357,5 @@
 	.timer	      = &davinci_timer,
 	.init_machine = dm355_evm_init,
 	.dma_zone_size	= SZ_128M,
+	.restart	= davinci_restart,
 MACHINE_END
diff --git a/arch/arm/mach-davinci/board-dm355-leopard.c b/arch/arm/mach-davinci/board-dm355-leopard.c
index ff2d241..e99db28 100644
--- a/arch/arm/mach-davinci/board-dm355-leopard.c
+++ b/arch/arm/mach-davinci/board-dm355-leopard.c
@@ -276,4 +276,5 @@
 	.timer	      = &davinci_timer,
 	.init_machine = dm355_leopard_init,
 	.dma_zone_size	= SZ_128M,
+	.restart	= davinci_restart,
 MACHINE_END
diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c
index 46e1f41..346e1de 100644
--- a/arch/arm/mach-davinci/board-dm365-evm.c
+++ b/arch/arm/mach-davinci/board-dm365-evm.c
@@ -618,5 +618,6 @@
 	.timer		= &davinci_timer,
 	.init_machine	= dm365_evm_init,
 	.dma_zone_size	= SZ_128M,
+	.restart	= davinci_restart,
 MACHINE_END
 
diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c
index 0cf8abf..a64b49c 100644
--- a/arch/arm/mach-davinci/board-dm644x-evm.c
+++ b/arch/arm/mach-davinci/board-dm644x-evm.c
@@ -719,4 +719,5 @@
 	.timer	      = &davinci_timer,
 	.init_machine = davinci_evm_init,
 	.dma_zone_size	= SZ_128M,
+	.restart	= davinci_restart,
 MACHINE_END
diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c
index 635bf77..6401755 100644
--- a/arch/arm/mach-davinci/board-dm646x-evm.c
+++ b/arch/arm/mach-davinci/board-dm646x-evm.c
@@ -799,6 +799,7 @@
 	.timer        = &davinci_timer,
 	.init_machine = evm_init,
 	.dma_zone_size	= SZ_128M,
+	.restart	= davinci_restart,
 MACHINE_END
 
 MACHINE_START(DAVINCI_DM6467TEVM, "DaVinci DM6467T EVM")
@@ -808,5 +809,6 @@
 	.timer        = &davinci_timer,
 	.init_machine = evm_init,
 	.dma_zone_size	= SZ_128M,
+	.restart	= davinci_restart,
 MACHINE_END
 
diff --git a/arch/arm/mach-davinci/board-mityomapl138.c b/arch/arm/mach-davinci/board-mityomapl138.c
index 3cfff55..672d820 100644
--- a/arch/arm/mach-davinci/board-mityomapl138.c
+++ b/arch/arm/mach-davinci/board-mityomapl138.c
@@ -573,4 +573,5 @@
 	.timer		= &davinci_timer,
 	.init_machine	= mityomapl138_init,
 	.dma_zone_size	= SZ_128M,
+	.restart	= da8xx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-davinci/board-neuros-osd2.c b/arch/arm/mach-davinci/board-neuros-osd2.c
index e5f231a..6c4a164 100644
--- a/arch/arm/mach-davinci/board-neuros-osd2.c
+++ b/arch/arm/mach-davinci/board-neuros-osd2.c
@@ -278,4 +278,5 @@
 	.timer		= &davinci_timer,
 	.init_machine = davinci_ntosd2_init,
 	.dma_zone_size	= SZ_128M,
+	.restart	= davinci_restart,
 MACHINE_END
diff --git a/arch/arm/mach-davinci/board-omapl138-hawk.c b/arch/arm/mach-davinci/board-omapl138-hawk.c
index c6701e4..e7c0c7c 100644
--- a/arch/arm/mach-davinci/board-omapl138-hawk.c
+++ b/arch/arm/mach-davinci/board-omapl138-hawk.c
@@ -344,4 +344,5 @@
 	.timer		= &davinci_timer,
 	.init_machine	= omapl138_hawk_init,
 	.dma_zone_size	= SZ_128M,
+	.restart	= da8xx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-davinci/board-sffsdr.c b/arch/arm/mach-davinci/board-sffsdr.c
index 5dd4da9..0b136a8 100644
--- a/arch/arm/mach-davinci/board-sffsdr.c
+++ b/arch/arm/mach-davinci/board-sffsdr.c
@@ -157,4 +157,5 @@
 	.timer	      = &davinci_timer,
 	.init_machine = davinci_sffsdr_init,
 	.dma_zone_size	= SZ_128M,
+	.restart	= davinci_restart,
 MACHINE_END
diff --git a/arch/arm/mach-davinci/board-tnetv107x-evm.c b/arch/arm/mach-davinci/board-tnetv107x-evm.c
index f69e40a..5f14e30 100644
--- a/arch/arm/mach-davinci/board-tnetv107x-evm.c
+++ b/arch/arm/mach-davinci/board-tnetv107x-evm.c
@@ -283,4 +283,5 @@
 	.timer		= &davinci_timer,
 	.init_machine	= tnetv107x_evm_board_init,
 	.dma_zone_size	= SZ_128M,
+	.restart	= tnetv107x_restart,
 MACHINE_END
diff --git a/arch/arm/mach-davinci/common.c b/arch/arm/mach-davinci/common.c
index 865ffe5..cb9b2e4 100644
--- a/arch/arm/mach-davinci/common.c
+++ b/arch/arm/mach-davinci/common.c
@@ -97,9 +97,6 @@
 	local_flush_tlb_all();
 	flush_cache_all();
 
-	if (!davinci_soc_info.reset)
-		davinci_soc_info.reset = davinci_watchdog_reset;
-
 	/*
 	 * We want to check CPU revision early for cpu_is_xxxx() macros.
 	 * IO space mapping must be initialized before we can do that.
diff --git a/arch/arm/mach-davinci/da830.c b/arch/arm/mach-davinci/da830.c
index a6bf5dc..deee5c2 100644
--- a/arch/arm/mach-davinci/da830.c
+++ b/arch/arm/mach-davinci/da830.c
@@ -1201,7 +1201,6 @@
 	.gpio_irq		= IRQ_DA8XX_GPIO0,
 	.serial_dev		= &da8xx_serial_device,
 	.emac_pdata		= &da8xx_emac_pdata,
-	.reset_device		= &da8xx_wdt_device,
 };
 
 void __init da830_init(void)
diff --git a/arch/arm/mach-davinci/da850.c b/arch/arm/mach-davinci/da850.c
index b047f87..0ed7fdb 100644
--- a/arch/arm/mach-davinci/da850.c
+++ b/arch/arm/mach-davinci/da850.c
@@ -1121,7 +1121,6 @@
 	.emac_pdata		= &da8xx_emac_pdata,
 	.sram_dma		= DA8XX_ARM_RAM_BASE,
 	.sram_len		= SZ_8K,
-	.reset_device		= &da8xx_wdt_device,
 };
 
 void __init da850_init(void)
diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c
index 68def71..42dbf3d 100644
--- a/arch/arm/mach-davinci/devices-da8xx.c
+++ b/arch/arm/mach-davinci/devices-da8xx.c
@@ -363,6 +363,11 @@
 	.resource	= da8xx_watchdog_resources,
 };
 
+void da8xx_restart(char mode, const char *cmd)
+{
+	davinci_watchdog_reset(&da8xx_wdt_device);
+}
+
 int __init da8xx_register_watchdog(void)
 {
 	return platform_device_register(&da8xx_wdt_device);
diff --git a/arch/arm/mach-davinci/devices.c b/arch/arm/mach-davinci/devices.c
index 806a2f0..50c0156 100644
--- a/arch/arm/mach-davinci/devices.c
+++ b/arch/arm/mach-davinci/devices.c
@@ -291,6 +291,11 @@
 	.resource	= wdt_resources,
 };
 
+void davinci_restart(char mode, const char *cmd)
+{
+	davinci_watchdog_reset(&davinci_wdt_device);
+}
+
 static void davinci_init_wdt(void)
 {
 	platform_device_register(&davinci_wdt_device);
diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c
index fe520d4..19667cf 100644
--- a/arch/arm/mach-davinci/dm355.c
+++ b/arch/arm/mach-davinci/dm355.c
@@ -853,7 +853,6 @@
 	.serial_dev		= &dm355_serial_device,
 	.sram_dma		= 0x00010000,
 	.sram_len		= SZ_32K,
-	.reset_device		= &davinci_wdt_device,
 };
 
 void __init dm355_init_asp1(u32 evt_enable, struct snd_platform_data *pdata)
diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c
index 679e168..f15b435 100644
--- a/arch/arm/mach-davinci/dm365.c
+++ b/arch/arm/mach-davinci/dm365.c
@@ -1083,7 +1083,6 @@
 	.emac_pdata		= &dm365_emac_pdata,
 	.sram_dma		= 0x00010000,
 	.sram_len		= SZ_32K,
-	.reset_device		= &davinci_wdt_device,
 };
 
 void __init dm365_init_asp(struct snd_platform_data *pdata)
diff --git a/arch/arm/mach-davinci/dm644x.c b/arch/arm/mach-davinci/dm644x.c
index 3470983..0800f9c 100644
--- a/arch/arm/mach-davinci/dm644x.c
+++ b/arch/arm/mach-davinci/dm644x.c
@@ -767,7 +767,6 @@
 	.emac_pdata		= &dm644x_emac_pdata,
 	.sram_dma		= 0x00008000,
 	.sram_len		= SZ_16K,
-	.reset_device		= &davinci_wdt_device,
 };
 
 void __init dm644x_init_asp(struct snd_platform_data *pdata)
diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c
index af27c13..00f7743 100644
--- a/arch/arm/mach-davinci/dm646x.c
+++ b/arch/arm/mach-davinci/dm646x.c
@@ -854,7 +854,6 @@
 	.emac_pdata		= &dm646x_emac_pdata,
 	.sram_dma		= 0x10010000,
 	.sram_len		= SZ_32K,
-	.reset_device		= &davinci_wdt_device,
 };
 
 void __init dm646x_init_mcasp0(struct snd_platform_data *pdata)
diff --git a/arch/arm/mach-davinci/include/mach/common.h b/arch/arm/mach-davinci/include/mach/common.h
index a57cba2..5cd39a4 100644
--- a/arch/arm/mach-davinci/include/mach/common.h
+++ b/arch/arm/mach-davinci/include/mach/common.h
@@ -77,14 +77,13 @@
 	struct emac_platform_data	*emac_pdata;
 	dma_addr_t			sram_dma;
 	unsigned			sram_len;
-	struct platform_device		*reset_device;
-	void				(*reset)(struct platform_device *);
 };
 
 extern struct davinci_soc_info davinci_soc_info;
 
 extern void davinci_common_init(struct davinci_soc_info *soc_info);
 extern void davinci_init_ide(void);
+void davinci_restart(char mode, const char *cmd);
 
 /* standard place to map on-chip SRAMs; they *may* support DMA */
 #define SRAM_VIRT	0xfffe0000
diff --git a/arch/arm/mach-davinci/include/mach/da8xx.h b/arch/arm/mach-davinci/include/mach/da8xx.h
index eaca7d8..ee3461d 100644
--- a/arch/arm/mach-davinci/include/mach/da8xx.h
+++ b/arch/arm/mach-davinci/include/mach/da8xx.h
@@ -91,6 +91,7 @@
 void __iomem * __init da8xx_get_mem_ctlr(void);
 int da850_register_pm(struct platform_device *pdev);
 int __init da850_register_sata(unsigned long refclkpn);
+void da8xx_restart(char mode, const char *cmd);
 
 extern struct platform_device da8xx_serial_device;
 extern struct emac_platform_data da8xx_emac_pdata;
diff --git a/arch/arm/mach-davinci/include/mach/io.h b/arch/arm/mach-davinci/include/mach/io.h
index d1b9549..b2267d1 100644
--- a/arch/arm/mach-davinci/include/mach/io.h
+++ b/arch/arm/mach-davinci/include/mach/io.h
@@ -21,12 +21,4 @@
 #define __mem_pci(a)		(a)
 #define __mem_isa(a)		(a)
 
-#ifndef __ASSEMBLER__
-#define __arch_ioremap		davinci_ioremap
-#define __arch_iounmap		davinci_iounmap
-
-void __iomem *davinci_ioremap(unsigned long phys, size_t size,
-			      unsigned int type);
-void davinci_iounmap(volatile void __iomem *addr);
-#endif
 #endif /* __ASM_ARCH_IO_H */
diff --git a/arch/arm/mach-davinci/include/mach/system.h b/arch/arm/mach-davinci/include/mach/system.h
index e65629c..fcb7a01 100644
--- a/arch/arm/mach-davinci/include/mach/system.h
+++ b/arch/arm/mach-davinci/include/mach/system.h
@@ -18,10 +18,4 @@
 	cpu_do_idle();
 }
 
-static inline void arch_reset(char mode, const char *cmd)
-{
-	if (davinci_soc_info.reset)
-		davinci_soc_info.reset(davinci_soc_info.reset_device);
-}
-
 #endif /* __ASM_ARCH_SYSTEM_H */
diff --git a/arch/arm/mach-davinci/include/mach/tnetv107x.h b/arch/arm/mach-davinci/include/mach/tnetv107x.h
index 89c1fdc..83e5926 100644
--- a/arch/arm/mach-davinci/include/mach/tnetv107x.h
+++ b/arch/arm/mach-davinci/include/mach/tnetv107x.h
@@ -54,6 +54,7 @@
 extern void __init tnetv107x_init(void);
 extern void __init tnetv107x_devices_init(struct tnetv107x_device_info *);
 extern void __init tnetv107x_irq_init(void);
+void tnetv107x_restart(char mode, const char *cmd);
 
 #endif
 
diff --git a/arch/arm/mach-davinci/include/mach/vmalloc.h b/arch/arm/mach-davinci/include/mach/vmalloc.h
deleted file mode 100644
index d49646a..0000000
--- a/arch/arm/mach-davinci/include/mach/vmalloc.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/*
- * DaVinci vmalloc definitions
- *
- * Author: Kevin Hilman, MontaVista Software, Inc. <source@mvista.com>
- *
- * 2007 (c) MontaVista Software, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
- */
-#include <mach/hardware.h>
-
-/* Allow vmalloc range until the IO virtual range minus a 2M "hole" */
-#define VMALLOC_END	  (IO_VIRT - (2<<20))
diff --git a/arch/arm/mach-davinci/io.c b/arch/arm/mach-davinci/io.c
deleted file mode 100644
index 8ea60a8b..0000000
--- a/arch/arm/mach-davinci/io.c
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * DaVinci I/O mapping code
- *
- * Copyright (C) 2005-2006 Texas Instruments
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/io.h>
-
-#include <asm/tlb.h>
-#include <asm/mach/map.h>
-
-#include <mach/common.h>
-
-/*
- * Intercept ioremap() requests for addresses in our fixed mapping regions.
- */
-void __iomem *davinci_ioremap(unsigned long p, size_t size, unsigned int type)
-{
-	struct map_desc *desc = davinci_soc_info.io_desc;
-	int desc_num = davinci_soc_info.io_desc_num;
-	int i;
-
-	for (i = 0; i < desc_num; i++, desc++) {
-		unsigned long iophys = __pfn_to_phys(desc->pfn);
-		unsigned long iosize = desc->length;
-
-		if (p >= iophys && (p + size) <= (iophys + iosize))
-			return __io(desc->virtual + p - iophys);
-	}
-
-	return __arm_ioremap_caller(p, size, type,
-					__builtin_return_address(0));
-}
-EXPORT_SYMBOL(davinci_ioremap);
-
-void davinci_iounmap(volatile void __iomem *addr)
-{
-	unsigned long virt = (unsigned long)addr;
-
-	if (virt >= VMALLOC_START && virt < VMALLOC_END)
-		__iounmap(addr);
-}
-EXPORT_SYMBOL(davinci_iounmap);
diff --git a/arch/arm/mach-davinci/tnetv107x.c b/arch/arm/mach-davinci/tnetv107x.c
index 409bb86..dc1a209 100644
--- a/arch/arm/mach-davinci/tnetv107x.c
+++ b/arch/arm/mach-davinci/tnetv107x.c
@@ -730,6 +730,11 @@
 	__raw_writel(1, &regs->kick);
 }
 
+void tnetv107x_restart(char mode, const char *cmd)
+{
+	tnetv107x_watchdog_reset(&tnetv107x_wdt_device);
+}
+
 static struct davinci_soc_info tnetv107x_soc_info = {
 	.io_desc		= io_desc,
 	.io_desc_num		= ARRAY_SIZE(io_desc),
@@ -752,8 +757,6 @@
 	.gpio_num		= TNETV107X_N_GPIO,
 	.timer_info		= &timer_info,
 	.serial_dev		= &tnetv107x_serial_device,
-	.reset			= tnetv107x_watchdog_reset,
-	.reset_device		= &tnetv107x_wdt_device,
 };
 
 void __init tnetv107x_init(void)
diff --git a/arch/arm/mach-dove/cm-a510.c b/arch/arm/mach-dove/cm-a510.c
index c8a406f..792b4e2 100644
--- a/arch/arm/mach-dove/cm-a510.c
+++ b/arch/arm/mach-dove/cm-a510.c
@@ -93,4 +93,5 @@
 	.init_early	= dove_init_early,
 	.init_irq	= dove_init_irq,
 	.timer		= &dove_timer,
+	.restart	= dove_restart,
 MACHINE_END
diff --git a/arch/arm/mach-dove/common.c b/arch/arm/mach-dove/common.c
index a9e0dae..13bb236 100644
--- a/arch/arm/mach-dove/common.c
+++ b/arch/arm/mach-dove/common.c
@@ -292,3 +292,19 @@
 	dove_xor0_init();
 	dove_xor1_init();
 }
+
+void dove_restart(char mode, const char *cmd)
+{
+	/*
+	 * Enable soft reset to assert RSTOUTn.
+	 */
+	writel(SOFT_RESET_OUT_EN, RSTOUTn_MASK);
+
+	/*
+	 * Assert soft reset.
+	 */
+	writel(SOFT_RESET, SYSTEM_SOFT_RESET);
+
+	while (1)
+		;
+}
diff --git a/arch/arm/mach-dove/common.h b/arch/arm/mach-dove/common.h
index 6a2046e..4202730 100644
--- a/arch/arm/mach-dove/common.h
+++ b/arch/arm/mach-dove/common.h
@@ -39,5 +39,6 @@
 void dove_i2c_init(void);
 void dove_sdio0_init(void);
 void dove_sdio1_init(void);
+void dove_restart(char, const char *);
 
 #endif
diff --git a/arch/arm/mach-dove/dove-db-setup.c b/arch/arm/mach-dove/dove-db-setup.c
index 11ea34e..ea77ae4 100644
--- a/arch/arm/mach-dove/dove-db-setup.c
+++ b/arch/arm/mach-dove/dove-db-setup.c
@@ -100,4 +100,5 @@
 	.init_early	= dove_init_early,
 	.init_irq	= dove_init_irq,
 	.timer		= &dove_timer,
+	.restart	= dove_restart,
 MACHINE_END
diff --git a/arch/arm/mach-dove/include/mach/dove.h b/arch/arm/mach-dove/include/mach/dove.h
index b20ec9a..ad1165d 100644
--- a/arch/arm/mach-dove/include/mach/dove.h
+++ b/arch/arm/mach-dove/include/mach/dove.h
@@ -11,8 +11,6 @@
 #ifndef __ASM_ARCH_DOVE_H
 #define __ASM_ARCH_DOVE_H
 
-#include <mach/vmalloc.h>
-
 /*
  * Marvell Dove address maps.
  *
diff --git a/arch/arm/mach-dove/include/mach/system.h b/arch/arm/mach-dove/include/mach/system.h
index 356afda..3027954 100644
--- a/arch/arm/mach-dove/include/mach/system.h
+++ b/arch/arm/mach-dove/include/mach/system.h
@@ -9,28 +9,9 @@
 #ifndef __ASM_ARCH_SYSTEM_H
 #define __ASM_ARCH_SYSTEM_H
 
-#include <mach/bridge-regs.h>
-
 static inline void arch_idle(void)
 {
 	cpu_do_idle();
 }
 
-static inline void arch_reset(char mode, const char *cmd)
-{
-	/*
-	 * Enable soft reset to assert RSTOUTn.
-	 */
-	writel(SOFT_RESET_OUT_EN, RSTOUTn_MASK);
-
-	/*
-	 * Assert soft reset.
-	 */
-	writel(SOFT_RESET, SYSTEM_SOFT_RESET);
-
-	while (1)
-		;
-}
-
-
 #endif
diff --git a/arch/arm/mach-dove/include/mach/vmalloc.h b/arch/arm/mach-dove/include/mach/vmalloc.h
deleted file mode 100644
index a28792c..0000000
--- a/arch/arm/mach-dove/include/mach/vmalloc.h
+++ /dev/null
@@ -1,5 +0,0 @@
-/*
- * arch/arm/mach-dove/include/mach/vmalloc.h
- */
-
-#define VMALLOC_END	0xfd800000UL
diff --git a/arch/arm/mach-ebsa110/core.c b/arch/arm/mach-ebsa110/core.c
index d0ce8ab..294aad0 100644
--- a/arch/arm/mach-ebsa110/core.c
+++ b/arch/arm/mach-ebsa110/core.c
@@ -278,13 +278,19 @@
 
 arch_initcall(ebsa110_init);
 
+static void ebsa110_restart(char mode, const char *cmd)
+{
+	soft_restart(0x80000000);
+}
+
 MACHINE_START(EBSA110, "EBSA110")
 	/* Maintainer: Russell King */
 	.atag_offset	= 0x400,
 	.reserve_lp0	= 1,
 	.reserve_lp2	= 1,
-	.soft_reboot	= 1,
+	.restart_mode	= 's',
 	.map_io		= ebsa110_map_io,
 	.init_irq	= ebsa110_init_irq,
 	.timer		= &ebsa110_timer,
+	.restart	= ebsa110_restart,
 MACHINE_END
diff --git a/arch/arm/mach-ebsa110/include/mach/system.h b/arch/arm/mach-ebsa110/include/mach/system.h
index 9a26245..2e4af65 100644
--- a/arch/arm/mach-ebsa110/include/mach/system.h
+++ b/arch/arm/mach-ebsa110/include/mach/system.h
@@ -34,6 +34,4 @@
 	asm volatile ("mcr p15, 0, ip, c15, c1, 2" : : : "cc");
 }
 
-#define arch_reset(mode, cmd)	cpu_reset(0x80000000)
-
 #endif
diff --git a/arch/arm/mach-ebsa110/include/mach/vmalloc.h b/arch/arm/mach-ebsa110/include/mach/vmalloc.h
deleted file mode 100644
index ea141b7a..0000000
--- a/arch/arm/mach-ebsa110/include/mach/vmalloc.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/*
- *  arch/arm/mach-ebsa110/include/mach/vmalloc.h
- *
- *  Copyright (C) 1998 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#define VMALLOC_END       0xdf000000UL
diff --git a/arch/arm/mach-ep93xx/adssphere.c b/arch/arm/mach-ep93xx/adssphere.c
index 0713448..681e939 100644
--- a/arch/arm/mach-ep93xx/adssphere.c
+++ b/arch/arm/mach-ep93xx/adssphere.c
@@ -16,6 +16,7 @@
 
 #include <mach/hardware.h>
 
+#include <asm/hardware/vic.h>
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
 
@@ -36,6 +37,8 @@
 	.atag_offset	= 0x100,
 	.map_io		= ep93xx_map_io,
 	.init_irq	= ep93xx_init_irq,
+	.handle_irq	= vic_handle_irq,
 	.timer		= &ep93xx_timer,
 	.init_machine	= adssphere_init_machine,
+	.restart	= ep93xx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c
index 2432a6b..24203f9 100644
--- a/arch/arm/mach-ep93xx/core.c
+++ b/arch/arm/mach-ep93xx/core.c
@@ -906,3 +906,15 @@
 	platform_device_register(&ep93xx_ohci_device);
 	platform_device_register(&ep93xx_leds);
 }
+
+void ep93xx_restart(char mode, const char *cmd)
+{
+	/*
+	 * Set then clear the SWRST bit to initiate a software reset
+	 */
+	ep93xx_devcfg_set_bits(EP93XX_SYSCON_DEVCFG_SWRST);
+	ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_SWRST);
+
+	while (1)
+		;
+}
diff --git a/arch/arm/mach-ep93xx/edb93xx.c b/arch/arm/mach-ep93xx/edb93xx.c
index 70ef8c5..d115653 100644
--- a/arch/arm/mach-ep93xx/edb93xx.c
+++ b/arch/arm/mach-ep93xx/edb93xx.c
@@ -39,6 +39,7 @@
 #include <mach/ep93xx_spi.h>
 #include <mach/gpio-ep93xx.h>
 
+#include <asm/hardware/vic.h>
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
 
@@ -250,8 +251,10 @@
 	.atag_offset	= 0x100,
 	.map_io		= ep93xx_map_io,
 	.init_irq	= ep93xx_init_irq,
+	.handle_irq	= vic_handle_irq,
 	.timer		= &ep93xx_timer,
 	.init_machine	= edb93xx_init_machine,
+	.restart	= ep93xx_restart,
 MACHINE_END
 #endif
 
@@ -261,8 +264,10 @@
 	.atag_offset	= 0x100,
 	.map_io		= ep93xx_map_io,
 	.init_irq	= ep93xx_init_irq,
+	.handle_irq	= vic_handle_irq,
 	.timer		= &ep93xx_timer,
 	.init_machine	= edb93xx_init_machine,
+	.restart	= ep93xx_restart,
 MACHINE_END
 #endif
 
@@ -272,8 +277,10 @@
 	.atag_offset	= 0x100,
 	.map_io		= ep93xx_map_io,
 	.init_irq	= ep93xx_init_irq,
+	.handle_irq	= vic_handle_irq,
 	.timer		= &ep93xx_timer,
 	.init_machine	= edb93xx_init_machine,
+	.restart	= ep93xx_restart,
 MACHINE_END
 #endif
 
@@ -283,8 +290,10 @@
 	.atag_offset	= 0x100,
 	.map_io		= ep93xx_map_io,
 	.init_irq	= ep93xx_init_irq,
+	.handle_irq	= vic_handle_irq,
 	.timer		= &ep93xx_timer,
 	.init_machine	= edb93xx_init_machine,
+	.restart	= ep93xx_restart,
 MACHINE_END
 #endif
 
@@ -294,8 +303,10 @@
 	.atag_offset	= 0x100,
 	.map_io		= ep93xx_map_io,
 	.init_irq	= ep93xx_init_irq,
+	.handle_irq	= vic_handle_irq,
 	.timer		= &ep93xx_timer,
 	.init_machine	= edb93xx_init_machine,
+	.restart	= ep93xx_restart,
 MACHINE_END
 #endif
 
@@ -305,8 +316,10 @@
 	.atag_offset	= 0x100,
 	.map_io		= ep93xx_map_io,
 	.init_irq	= ep93xx_init_irq,
+	.handle_irq	= vic_handle_irq,
 	.timer		= &ep93xx_timer,
 	.init_machine	= edb93xx_init_machine,
+	.restart	= ep93xx_restart,
 MACHINE_END
 #endif
 
@@ -316,8 +329,10 @@
 	.atag_offset	= 0x100,
 	.map_io		= ep93xx_map_io,
 	.init_irq	= ep93xx_init_irq,
+	.handle_irq	= vic_handle_irq,
 	.timer		= &ep93xx_timer,
 	.init_machine	= edb93xx_init_machine,
+	.restart	= ep93xx_restart,
 MACHINE_END
 #endif
 
@@ -327,7 +342,9 @@
 	.atag_offset	= 0x100,
 	.map_io		= ep93xx_map_io,
 	.init_irq	= ep93xx_init_irq,
+	.handle_irq	= vic_handle_irq,
 	.timer		= &ep93xx_timer,
 	.init_machine	= edb93xx_init_machine,
+	.restart	= ep93xx_restart,
 MACHINE_END
 #endif
diff --git a/arch/arm/mach-ep93xx/gesbc9312.c b/arch/arm/mach-ep93xx/gesbc9312.c
index 45ee205..af46970 100644
--- a/arch/arm/mach-ep93xx/gesbc9312.c
+++ b/arch/arm/mach-ep93xx/gesbc9312.c
@@ -16,6 +16,7 @@
 
 #include <mach/hardware.h>
 
+#include <asm/hardware/vic.h>
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
 
@@ -36,6 +37,8 @@
 	.atag_offset	= 0x100,
 	.map_io		= ep93xx_map_io,
 	.init_irq	= ep93xx_init_irq,
+	.handle_irq	= vic_handle_irq,
 	.timer		= &ep93xx_timer,
 	.init_machine	= gesbc9312_init_machine,
+	.restart	= ep93xx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-ep93xx/include/mach/entry-macro.S b/arch/arm/mach-ep93xx/include/mach/entry-macro.S
index 96b85e2..9be6edc 100644
--- a/arch/arm/mach-ep93xx/include/mach/entry-macro.S
+++ b/arch/arm/mach-ep93xx/include/mach/entry-macro.S
@@ -9,51 +9,9 @@
  * the Free Software Foundation; either version 2 of the License, or (at
  * your option) any later version.
  */
-#include <mach/ep93xx-regs.h>
 
 		.macro	disable_fiq
 		.endm
 
-		.macro  get_irqnr_preamble, base, tmp
-		.endm
-
 		.macro  arch_ret_to_user, tmp1, tmp2
 		.endm
-
-		.macro	get_irqnr_and_base, irqnr, irqstat, base, tmp
-		ldr	\base, =(EP93XX_AHB_VIRT_BASE)
-		orr	\base, \base, #0x000b0000
-		mov	\irqnr, #0
-		ldr	\irqstat, [\base]		@ lower 32 interrupts
-		cmp	\irqstat, #0
-		bne	1001f
-
-		eor	\base, \base, #0x00070000
-		ldr	\irqstat, [\base]		@ upper 32 interrupts
-		cmp	\irqstat, #0
-		beq	1002f
-		mov	\irqnr, #0x20
-
-1001:
-		movs	\tmp, \irqstat, lsl #16
-		movne	\irqstat, \tmp
-		addeq	\irqnr, \irqnr, #16
-
-		movs	\tmp, \irqstat, lsl #8
-		movne	\irqstat, \tmp
-		addeq	\irqnr, \irqnr, #8
-
-		movs	\tmp, \irqstat, lsl #4
-		movne	\irqstat, \tmp
-		addeq	\irqnr, \irqnr, #4
-
-		movs	\tmp, \irqstat, lsl #2
-		movne	\irqstat, \tmp
-		addeq	\irqnr, \irqnr, #2
-
-		movs	\tmp, \irqstat, lsl #1
-		addeq	\irqnr, \irqnr, #1
-		orrs	\base, \base, #1
-
-1002:
-		.endm
diff --git a/arch/arm/mach-ep93xx/include/mach/platform.h b/arch/arm/mach-ep93xx/include/mach/platform.h
index 5066045..d4c9349 100644
--- a/arch/arm/mach-ep93xx/include/mach/platform.h
+++ b/arch/arm/mach-ep93xx/include/mach/platform.h
@@ -66,4 +66,6 @@
 void ep93xx_init_devices(void);
 extern struct sys_timer ep93xx_timer;
 
+void ep93xx_restart(char, const char *);
+
 #endif
diff --git a/arch/arm/mach-ep93xx/include/mach/system.h b/arch/arm/mach-ep93xx/include/mach/system.h
index 6d661fe..b5bec7c 100644
--- a/arch/arm/mach-ep93xx/include/mach/system.h
+++ b/arch/arm/mach-ep93xx/include/mach/system.h
@@ -1,24 +1,7 @@
 /*
  * arch/arm/mach-ep93xx/include/mach/system.h
  */
-
-#include <mach/hardware.h>
-
 static inline void arch_idle(void)
 {
 	cpu_do_idle();
 }
-
-static inline void arch_reset(char mode, const char *cmd)
-{
-	local_irq_disable();
-
-	/*
-	 * Set then clear the SWRST bit to initiate a software reset
-	 */
-	ep93xx_devcfg_set_bits(EP93XX_SYSCON_DEVCFG_SWRST);
-	ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_SWRST);
-
-	while (1)
-		;
-}
diff --git a/arch/arm/mach-ep93xx/include/mach/vmalloc.h b/arch/arm/mach-ep93xx/include/mach/vmalloc.h
deleted file mode 100644
index 1b3f25d..0000000
--- a/arch/arm/mach-ep93xx/include/mach/vmalloc.h
+++ /dev/null
@@ -1,5 +0,0 @@
-/*
- * arch/arm/mach-ep93xx/include/mach/vmalloc.h
- */
-
-#define VMALLOC_END	0xfe800000UL
diff --git a/arch/arm/mach-ep93xx/micro9.c b/arch/arm/mach-ep93xx/micro9.c
index e72f736..7b98084 100644
--- a/arch/arm/mach-ep93xx/micro9.c
+++ b/arch/arm/mach-ep93xx/micro9.c
@@ -18,6 +18,7 @@
 
 #include <mach/hardware.h>
 
+#include <asm/hardware/vic.h>
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
 
@@ -80,8 +81,10 @@
 	.atag_offset	= 0x100,
 	.map_io		= ep93xx_map_io,
 	.init_irq	= ep93xx_init_irq,
+	.handle_irq	= vic_handle_irq,
 	.timer		= &ep93xx_timer,
 	.init_machine	= micro9_init_machine,
+	.restart	= ep93xx_restart,
 MACHINE_END
 #endif
 
@@ -91,8 +94,10 @@
 	.atag_offset	= 0x100,
 	.map_io		= ep93xx_map_io,
 	.init_irq	= ep93xx_init_irq,
+	.handle_irq	= vic_handle_irq,
 	.timer		= &ep93xx_timer,
 	.init_machine	= micro9_init_machine,
+	.restart	= ep93xx_restart,
 MACHINE_END
 #endif
 
@@ -102,8 +107,10 @@
 	.atag_offset	= 0x100,
 	.map_io		= ep93xx_map_io,
 	.init_irq	= ep93xx_init_irq,
+	.handle_irq	= vic_handle_irq,
 	.timer		= &ep93xx_timer,
 	.init_machine	= micro9_init_machine,
+	.restart	= ep93xx_restart,
 MACHINE_END
 #endif
 
@@ -113,7 +120,9 @@
 	.atag_offset	= 0x100,
 	.map_io		= ep93xx_map_io,
 	.init_irq	= ep93xx_init_irq,
+	.handle_irq	= vic_handle_irq,
 	.timer		= &ep93xx_timer,
 	.init_machine	= micro9_init_machine,
+	.restart	= ep93xx_restart,
 MACHINE_END
 #endif
diff --git a/arch/arm/mach-ep93xx/simone.c b/arch/arm/mach-ep93xx/simone.c
index 52e090d..f4e553e 100644
--- a/arch/arm/mach-ep93xx/simone.c
+++ b/arch/arm/mach-ep93xx/simone.c
@@ -25,6 +25,7 @@
 #include <mach/fb.h>
 #include <mach/gpio-ep93xx.h>
 
+#include <asm/hardware/vic.h>
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
 
@@ -80,6 +81,8 @@
 	.atag_offset	= 0x100,
 	.map_io		= ep93xx_map_io,
 	.init_irq	= ep93xx_init_irq,
+	.handle_irq	= vic_handle_irq,
 	.timer		= &ep93xx_timer,
 	.init_machine	= simone_init_machine,
+	.restart	= ep93xx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-ep93xx/snappercl15.c b/arch/arm/mach-ep93xx/snappercl15.c
index 8121e3a..fd84633 100644
--- a/arch/arm/mach-ep93xx/snappercl15.c
+++ b/arch/arm/mach-ep93xx/snappercl15.c
@@ -31,6 +31,7 @@
 #include <mach/fb.h>
 #include <mach/gpio-ep93xx.h>
 
+#include <asm/hardware/vic.h>
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
 
@@ -177,6 +178,8 @@
 	.atag_offset	= 0x100,
 	.map_io		= ep93xx_map_io,
 	.init_irq	= ep93xx_init_irq,
+	.handle_irq	= vic_handle_irq,
 	.timer 		= &ep93xx_timer,
 	.init_machine	= snappercl15_init_machine,
+	.restart	= ep93xx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-ep93xx/ts72xx.c b/arch/arm/mach-ep93xx/ts72xx.c
index 8b2f143..79f8ecf 100644
--- a/arch/arm/mach-ep93xx/ts72xx.c
+++ b/arch/arm/mach-ep93xx/ts72xx.c
@@ -23,6 +23,7 @@
 #include <mach/hardware.h>
 #include <mach/ts72xx.h>
 
+#include <asm/hardware/vic.h>
 #include <asm/mach-types.h>
 #include <asm/mach/map.h>
 #include <asm/mach/arch.h>
@@ -247,6 +248,8 @@
 	.atag_offset	= 0x100,
 	.map_io		= ts72xx_map_io,
 	.init_irq	= ep93xx_init_irq,
+	.handle_irq	= vic_handle_irq,
 	.timer		= &ep93xx_timer,
 	.init_machine	= ts72xx_init_machine,
+	.restart	= ep93xx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-ep93xx/vision_ep9307.c b/arch/arm/mach-ep93xx/vision_ep9307.c
index d96e4db..03dd401 100644
--- a/arch/arm/mach-ep93xx/vision_ep9307.c
+++ b/arch/arm/mach-ep93xx/vision_ep9307.c
@@ -361,4 +361,5 @@
 	.init_irq	= ep93xx_init_irq,
 	.timer		= &ep93xx_timer,
 	.init_machine	= vision_init_machine,
+	.restart	= ep93xx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
index 724ec0f..e1efbca 100644
--- a/arch/arm/mach-exynos/Kconfig
+++ b/arch/arm/mach-exynos/Kconfig
@@ -17,6 +17,8 @@
 
 config ARCH_EXYNOS4
 	bool "SAMSUNG EXYNOS4"
+	select HAVE_SMP
+	select MIGHT_HAVE_CACHE_L2X0
 	help
 	  Samsung EXYNOS4 SoCs based systems
 
diff --git a/arch/arm/mach-exynos/Makefile b/arch/arm/mach-exynos/Makefile
index 59069a3..bcb9efc 100644
--- a/arch/arm/mach-exynos/Makefile
+++ b/arch/arm/mach-exynos/Makefile
@@ -10,15 +10,17 @@
 obj-n				:=
 obj-				:=
 
-# Core support for EXYNOS4 system
+# Core
 
-obj-$(CONFIG_ARCH_EXYNOS4)	+= cpu.o init.o clock.o irq-combiner.o setup-i2c0.o
-obj-$(CONFIG_ARCH_EXYNOS4)	+= irq-eint.o dma.o pmu.o
+obj-$(CONFIG_ARCH_EXYNOS4)	+= common.o clock.o
 obj-$(CONFIG_CPU_EXYNOS4210)	+= clock-exynos4210.o
 obj-$(CONFIG_SOC_EXYNOS4212)	+= clock-exynos4212.o
+
 obj-$(CONFIG_PM)		+= pm.o
 obj-$(CONFIG_CPU_IDLE)		+= cpuidle.o
 
+obj-$(CONFIG_ARCH_EXYNOS4)	+= dma.o pmu.o
+
 obj-$(CONFIG_SMP)		+= platsmp.o headsmp.o
 
 obj-$(CONFIG_EXYNOS4_MCT)	+= mct.o
@@ -45,6 +47,7 @@
 obj-$(CONFIG_EXYNOS4_DEV_SYSMMU)	+= dev-sysmmu.o
 obj-$(CONFIG_EXYNOS4_DEV_DWMCI)		+= dev-dwmci.o
 
+obj-$(CONFIG_ARCH_EXYNOS4)		+= setup-i2c0.o
 obj-$(CONFIG_EXYNOS4_SETUP_FIMC)	+= setup-fimc.o
 obj-$(CONFIG_EXYNOS4_SETUP_FIMD0)	+= setup-fimd0.o
 obj-$(CONFIG_EXYNOS4_SETUP_I2C1)	+= setup-i2c1.o
diff --git a/arch/arm/mach-exynos/clock-exynos4210.c b/arch/arm/mach-exynos/clock-exynos4210.c
index b9d5ef6..a5823a7 100644
--- a/arch/arm/mach-exynos/clock-exynos4210.c
+++ b/arch/arm/mach-exynos/clock-exynos4210.c
@@ -23,7 +23,6 @@
 #include <plat/pll.h>
 #include <plat/s5p-clock.h>
 #include <plat/clock-clksrc.h>
-#include <plat/exynos4.h>
 #include <plat/pm.h>
 
 #include <mach/hardware.h>
@@ -31,6 +30,8 @@
 #include <mach/regs-clock.h>
 #include <mach/exynos4-clock.h>
 
+#include "common.h"
+
 static struct sleep_save exynos4210_clock_save[] = {
 	SAVE_ITEM(S5P_CLKSRC_IMAGE),
 	SAVE_ITEM(S5P_CLKSRC_LCD1),
diff --git a/arch/arm/mach-exynos/clock-exynos4212.c b/arch/arm/mach-exynos/clock-exynos4212.c
index 77d5dec..26a668b 100644
--- a/arch/arm/mach-exynos/clock-exynos4212.c
+++ b/arch/arm/mach-exynos/clock-exynos4212.c
@@ -23,7 +23,6 @@
 #include <plat/pll.h>
 #include <plat/s5p-clock.h>
 #include <plat/clock-clksrc.h>
-#include <plat/exynos4.h>
 #include <plat/pm.h>
 
 #include <mach/hardware.h>
@@ -31,6 +30,8 @@
 #include <mach/regs-clock.h>
 #include <mach/exynos4-clock.h>
 
+#include "common.h"
+
 static struct sleep_save exynos4212_clock_save[] = {
 	SAVE_ITEM(S5P_CLKSRC_IMAGE),
 	SAVE_ITEM(S5P_CLKDIV_IMAGE),
diff --git a/arch/arm/mach-exynos/clock.c b/arch/arm/mach-exynos/clock.c
index 2894f0a..83616a0 100644
--- a/arch/arm/mach-exynos/clock.c
+++ b/arch/arm/mach-exynos/clock.c
@@ -21,7 +21,6 @@
 #include <plat/pll.h>
 #include <plat/s5p-clock.h>
 #include <plat/clock-clksrc.h>
-#include <plat/exynos4.h>
 #include <plat/pm.h>
 
 #include <mach/map.h>
@@ -29,6 +28,8 @@
 #include <mach/sysmmu.h>
 #include <mach/exynos4-clock.h>
 
+#include "common.h"
+
 static struct sleep_save exynos4_clock_save[] = {
 	SAVE_ITEM(S5P_CLKDIV_LEFTBUS),
 	SAVE_ITEM(S5P_CLKGATE_IP_LEFTBUS),
diff --git a/arch/arm/mach-exynos/common.c b/arch/arm/mach-exynos/common.c
new file mode 100644
index 0000000..b6ac6ee
--- /dev/null
+++ b/arch/arm/mach-exynos/common.c
@@ -0,0 +1,714 @@
+/*
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * Common Codes for EXYNOS
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/device.h>
+#include <linux/gpio.h>
+#include <linux/sched.h>
+#include <linux/serial_core.h>
+
+#include <asm/proc-fns.h>
+#include <asm/hardware/cache-l2x0.h>
+#include <asm/hardware/gic.h>
+#include <asm/mach/map.h>
+#include <asm/mach/irq.h>
+
+#include <mach/regs-irq.h>
+#include <mach/regs-pmu.h>
+#include <mach/regs-gpio.h>
+
+#include <plat/cpu.h>
+#include <plat/clock.h>
+#include <plat/devs.h>
+#include <plat/pm.h>
+#include <plat/sdhci.h>
+#include <plat/gpio-cfg.h>
+#include <plat/adc-core.h>
+#include <plat/fb-core.h>
+#include <plat/fimc-core.h>
+#include <plat/iic-core.h>
+#include <plat/tv-core.h>
+#include <plat/regs-serial.h>
+
+#include "common.h"
+
+unsigned int gic_bank_offset __read_mostly;
+
+static const char name_exynos4210[] = "EXYNOS4210";
+static const char name_exynos4212[] = "EXYNOS4212";
+static const char name_exynos4412[] = "EXYNOS4412";
+
+static struct cpu_table cpu_ids[] __initdata = {
+	{
+		.idcode		= EXYNOS4210_CPU_ID,
+		.idmask		= EXYNOS4_CPU_MASK,
+		.map_io		= exynos4_map_io,
+		.init_clocks	= exynos4_init_clocks,
+		.init_uarts	= exynos4_init_uarts,
+		.init		= exynos_init,
+		.name		= name_exynos4210,
+	}, {
+		.idcode		= EXYNOS4212_CPU_ID,
+		.idmask		= EXYNOS4_CPU_MASK,
+		.map_io		= exynos4_map_io,
+		.init_clocks	= exynos4_init_clocks,
+		.init_uarts	= exynos4_init_uarts,
+		.init		= exynos_init,
+		.name		= name_exynos4212,
+	}, {
+		.idcode		= EXYNOS4412_CPU_ID,
+		.idmask		= EXYNOS4_CPU_MASK,
+		.map_io		= exynos4_map_io,
+		.init_clocks	= exynos4_init_clocks,
+		.init_uarts	= exynos4_init_uarts,
+		.init		= exynos_init,
+		.name		= name_exynos4412,
+	},
+};
+
+/* Initial IO mappings */
+
+static struct map_desc exynos_iodesc[] __initdata = {
+	{
+		.virtual	= (unsigned long)S5P_VA_CHIPID,
+		.pfn		= __phys_to_pfn(EXYNOS4_PA_CHIPID),
+		.length		= SZ_4K,
+		.type		= MT_DEVICE,
+	}, {
+		.virtual	= (unsigned long)S3C_VA_SYS,
+		.pfn		= __phys_to_pfn(EXYNOS4_PA_SYSCON),
+		.length		= SZ_64K,
+		.type		= MT_DEVICE,
+	}, {
+		.virtual	= (unsigned long)S3C_VA_TIMER,
+		.pfn		= __phys_to_pfn(EXYNOS4_PA_TIMER),
+		.length		= SZ_16K,
+		.type		= MT_DEVICE,
+	}, {
+		.virtual	= (unsigned long)S3C_VA_WATCHDOG,
+		.pfn		= __phys_to_pfn(EXYNOS4_PA_WATCHDOG),
+		.length		= SZ_4K,
+		.type		= MT_DEVICE,
+	}, {
+		.virtual	= (unsigned long)S5P_VA_SROMC,
+		.pfn		= __phys_to_pfn(EXYNOS4_PA_SROMC),
+		.length		= SZ_4K,
+		.type		= MT_DEVICE,
+	}, {
+		.virtual	= (unsigned long)S5P_VA_SYSTIMER,
+		.pfn		= __phys_to_pfn(EXYNOS4_PA_SYSTIMER),
+		.length		= SZ_4K,
+		.type		= MT_DEVICE,
+	}, {
+		.virtual	= (unsigned long)S5P_VA_PMU,
+		.pfn		= __phys_to_pfn(EXYNOS4_PA_PMU),
+		.length		= SZ_64K,
+		.type		= MT_DEVICE,
+	}, {
+		.virtual	= (unsigned long)S5P_VA_COMBINER_BASE,
+		.pfn		= __phys_to_pfn(EXYNOS4_PA_COMBINER),
+		.length		= SZ_4K,
+		.type		= MT_DEVICE,
+	}, {
+		.virtual	= (unsigned long)S5P_VA_GIC_CPU,
+		.pfn		= __phys_to_pfn(EXYNOS4_PA_GIC_CPU),
+		.length		= SZ_64K,
+		.type		= MT_DEVICE,
+	}, {
+		.virtual	= (unsigned long)S5P_VA_GIC_DIST,
+		.pfn		= __phys_to_pfn(EXYNOS4_PA_GIC_DIST),
+		.length		= SZ_64K,
+		.type		= MT_DEVICE,
+	}, {
+		.virtual	= (unsigned long)S3C_VA_UART,
+		.pfn		= __phys_to_pfn(EXYNOS4_PA_UART),
+		.length		= SZ_512K,
+		.type		= MT_DEVICE,
+	},
+};
+
+static struct map_desc exynos4_iodesc[] __initdata = {
+	{
+		.virtual	= (unsigned long)S5P_VA_CMU,
+		.pfn		= __phys_to_pfn(EXYNOS4_PA_CMU),
+		.length		= SZ_128K,
+		.type		= MT_DEVICE,
+	}, {
+		.virtual	= (unsigned long)S5P_VA_COREPERI_BASE,
+		.pfn		= __phys_to_pfn(EXYNOS4_PA_COREPERI),
+		.length		= SZ_8K,
+		.type		= MT_DEVICE,
+	}, {
+		.virtual	= (unsigned long)S5P_VA_L2CC,
+		.pfn		= __phys_to_pfn(EXYNOS4_PA_L2CC),
+		.length		= SZ_4K,
+		.type		= MT_DEVICE,
+	}, {
+		.virtual	= (unsigned long)S5P_VA_GPIO1,
+		.pfn		= __phys_to_pfn(EXYNOS4_PA_GPIO1),
+		.length		= SZ_4K,
+		.type		= MT_DEVICE,
+	}, {
+		.virtual	= (unsigned long)S5P_VA_GPIO2,
+		.pfn		= __phys_to_pfn(EXYNOS4_PA_GPIO2),
+		.length		= SZ_4K,
+		.type		= MT_DEVICE,
+	}, {
+		.virtual	= (unsigned long)S5P_VA_GPIO3,
+		.pfn		= __phys_to_pfn(EXYNOS4_PA_GPIO3),
+		.length		= SZ_256,
+		.type		= MT_DEVICE,
+	}, {
+		.virtual	= (unsigned long)S5P_VA_DMC0,
+		.pfn		= __phys_to_pfn(EXYNOS4_PA_DMC0),
+		.length		= SZ_4K,
+		.type		= MT_DEVICE,
+	}, {
+		.virtual	= (unsigned long)S3C_VA_USB_HSPHY,
+		.pfn		= __phys_to_pfn(EXYNOS4_PA_HSPHY),
+		.length		= SZ_4K,
+		.type		= MT_DEVICE,
+	},
+};
+
+static struct map_desc exynos4_iodesc0[] __initdata = {
+	{
+		.virtual	= (unsigned long)S5P_VA_SYSRAM,
+		.pfn		= __phys_to_pfn(EXYNOS4_PA_SYSRAM0),
+		.length		= SZ_4K,
+		.type		= MT_DEVICE,
+	},
+};
+
+static struct map_desc exynos4_iodesc1[] __initdata = {
+	{
+		.virtual	= (unsigned long)S5P_VA_SYSRAM,
+		.pfn		= __phys_to_pfn(EXYNOS4_PA_SYSRAM1),
+		.length		= SZ_4K,
+		.type		= MT_DEVICE,
+	},
+};
+
+static void exynos_idle(void)
+{
+	if (!need_resched())
+		cpu_do_idle();
+
+	local_irq_enable();
+}
+
+void exynos4_restart(char mode, const char *cmd)
+{
+	__raw_writel(0x1, S5P_SWRESET);
+}
+
+/*
+ * exynos_map_io
+ *
+ * register the standard cpu IO areas
+ */
+
+void __init exynos_init_io(struct map_desc *mach_desc, int size)
+{
+	/* initialize the io descriptors we need for initialization */
+	iotable_init(exynos_iodesc, ARRAY_SIZE(exynos_iodesc));
+	if (mach_desc)
+		iotable_init(mach_desc, size);
+
+	/* detect cpu id and rev. */
+	s5p_init_cpu(S5P_VA_CHIPID);
+
+	s3c_init_cpu(samsung_cpu_id, cpu_ids, ARRAY_SIZE(cpu_ids));
+}
+
+void __init exynos4_map_io(void)
+{
+	iotable_init(exynos4_iodesc, ARRAY_SIZE(exynos4_iodesc));
+
+	if (soc_is_exynos4210() && samsung_rev() == EXYNOS4210_REV_0)
+		iotable_init(exynos4_iodesc0, ARRAY_SIZE(exynos4_iodesc0));
+	else
+		iotable_init(exynos4_iodesc1, ARRAY_SIZE(exynos4_iodesc1));
+
+	/* initialize device information early */
+	exynos4_default_sdhci0();
+	exynos4_default_sdhci1();
+	exynos4_default_sdhci2();
+	exynos4_default_sdhci3();
+
+	s3c_adc_setname("samsung-adc-v3");
+
+	s3c_fimc_setname(0, "exynos4-fimc");
+	s3c_fimc_setname(1, "exynos4-fimc");
+	s3c_fimc_setname(2, "exynos4-fimc");
+	s3c_fimc_setname(3, "exynos4-fimc");
+
+	/* The I2C bus controllers are directly compatible with s3c2440 */
+	s3c_i2c0_setname("s3c2440-i2c");
+	s3c_i2c1_setname("s3c2440-i2c");
+	s3c_i2c2_setname("s3c2440-i2c");
+
+	s5p_fb_setname(0, "exynos4-fb");
+	s5p_hdmi_setname("exynos4-hdmi");
+}
+
+void __init exynos4_init_clocks(int xtal)
+{
+	printk(KERN_DEBUG "%s: initializing clocks\n", __func__);
+
+	s3c24xx_register_baseclocks(xtal);
+	s5p_register_clocks(xtal);
+
+	if (soc_is_exynos4210())
+		exynos4210_register_clocks();
+	else if (soc_is_exynos4212() || soc_is_exynos4412())
+		exynos4212_register_clocks();
+
+	exynos4_register_clocks();
+	exynos4_setup_clocks();
+}
+
+#define COMBINER_ENABLE_SET	0x0
+#define COMBINER_ENABLE_CLEAR	0x4
+#define COMBINER_INT_STATUS	0xC
+
+static DEFINE_SPINLOCK(irq_controller_lock);
+
+struct combiner_chip_data {
+	unsigned int irq_offset;
+	unsigned int irq_mask;
+	void __iomem *base;
+};
+
+static struct combiner_chip_data combiner_data[MAX_COMBINER_NR];
+
+static inline void __iomem *combiner_base(struct irq_data *data)
+{
+	struct combiner_chip_data *combiner_data =
+		irq_data_get_irq_chip_data(data);
+
+	return combiner_data->base;
+}
+
+static void combiner_mask_irq(struct irq_data *data)
+{
+	u32 mask = 1 << (data->irq % 32);
+
+	__raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_CLEAR);
+}
+
+static void combiner_unmask_irq(struct irq_data *data)
+{
+	u32 mask = 1 << (data->irq % 32);
+
+	__raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_SET);
+}
+
+static void combiner_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
+{
+	struct combiner_chip_data *chip_data = irq_get_handler_data(irq);
+	struct irq_chip *chip = irq_get_chip(irq);
+	unsigned int cascade_irq, combiner_irq;
+	unsigned long status;
+
+	chained_irq_enter(chip, desc);
+
+	spin_lock(&irq_controller_lock);
+	status = __raw_readl(chip_data->base + COMBINER_INT_STATUS);
+	spin_unlock(&irq_controller_lock);
+	status &= chip_data->irq_mask;
+
+	if (status == 0)
+		goto out;
+
+	combiner_irq = __ffs(status);
+
+	cascade_irq = combiner_irq + (chip_data->irq_offset & ~31);
+	if (unlikely(cascade_irq >= NR_IRQS))
+		do_bad_IRQ(cascade_irq, desc);
+	else
+		generic_handle_irq(cascade_irq);
+
+ out:
+	chained_irq_exit(chip, desc);
+}
+
+static struct irq_chip combiner_chip = {
+	.name		= "COMBINER",
+	.irq_mask	= combiner_mask_irq,
+	.irq_unmask	= combiner_unmask_irq,
+};
+
+static void __init combiner_cascade_irq(unsigned int combiner_nr, unsigned int irq)
+{
+	if (combiner_nr >= MAX_COMBINER_NR)
+		BUG();
+	if (irq_set_handler_data(irq, &combiner_data[combiner_nr]) != 0)
+		BUG();
+	irq_set_chained_handler(irq, combiner_handle_cascade_irq);
+}
+
+static void __init combiner_init(unsigned int combiner_nr, void __iomem *base,
+			  unsigned int irq_start)
+{
+	unsigned int i;
+
+	if (combiner_nr >= MAX_COMBINER_NR)
+		BUG();
+
+	combiner_data[combiner_nr].base = base;
+	combiner_data[combiner_nr].irq_offset = irq_start;
+	combiner_data[combiner_nr].irq_mask = 0xff << ((combiner_nr % 4) << 3);
+
+	/* Disable all interrupts */
+
+	__raw_writel(combiner_data[combiner_nr].irq_mask,
+		     base + COMBINER_ENABLE_CLEAR);
+
+	/* Setup the Linux IRQ subsystem */
+
+	for (i = irq_start; i < combiner_data[combiner_nr].irq_offset
+				+ MAX_IRQ_IN_COMBINER; i++) {
+		irq_set_chip_and_handler(i, &combiner_chip, handle_level_irq);
+		irq_set_chip_data(i, &combiner_data[combiner_nr]);
+		set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
+	}
+}
+
+static void exynos4_gic_irq_fix_base(struct irq_data *d)
+{
+	struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
+
+	gic_data->cpu_base = S5P_VA_GIC_CPU +
+			    (gic_bank_offset * smp_processor_id());
+
+	gic_data->dist_base = S5P_VA_GIC_DIST +
+			    (gic_bank_offset * smp_processor_id());
+}
+
+void __init exynos4_init_irq(void)
+{
+	int irq;
+
+	gic_bank_offset = soc_is_exynos4412() ? 0x4000 : 0x8000;
+
+	gic_init(0, IRQ_PPI(0), S5P_VA_GIC_DIST, S5P_VA_GIC_CPU);
+	gic_arch_extn.irq_eoi = exynos4_gic_irq_fix_base;
+	gic_arch_extn.irq_unmask = exynos4_gic_irq_fix_base;
+	gic_arch_extn.irq_mask = exynos4_gic_irq_fix_base;
+
+	for (irq = 0; irq < MAX_COMBINER_NR; irq++) {
+
+		combiner_init(irq, (void __iomem *)S5P_VA_COMBINER(irq),
+				COMBINER_IRQ(irq, 0));
+		combiner_cascade_irq(irq, IRQ_SPI(irq));
+	}
+
+	/*
+	 * The parameters of s5p_init_irq() are for VIC init.
+	 * Theses parameters should be NULL and 0 because EXYNOS4
+	 * uses GIC instead of VIC.
+	 */
+	s5p_init_irq(NULL, 0);
+}
+
+struct bus_type exynos4_subsys = {
+	.name		= "exynos4-core",
+	.dev_name	= "exynos4-core",
+};
+
+static struct device exynos4_dev = {
+	.bus	= &exynos4_subsys,
+};
+
+static int __init exynos4_core_init(void)
+{
+	return subsys_system_register(&exynos4_subsys, NULL);
+}
+core_initcall(exynos4_core_init);
+
+#ifdef CONFIG_CACHE_L2X0
+static int __init exynos4_l2x0_cache_init(void)
+{
+	/* TAG, Data Latency Control: 2cycle */
+	__raw_writel(0x110, S5P_VA_L2CC + L2X0_TAG_LATENCY_CTRL);
+
+	if (soc_is_exynos4210())
+		__raw_writel(0x110, S5P_VA_L2CC + L2X0_DATA_LATENCY_CTRL);
+	else if (soc_is_exynos4212() || soc_is_exynos4412())
+		__raw_writel(0x120, S5P_VA_L2CC + L2X0_DATA_LATENCY_CTRL);
+
+	/* L2X0 Prefetch Control */
+	__raw_writel(0x30000007, S5P_VA_L2CC + L2X0_PREFETCH_CTRL);
+
+	/* L2X0 Power Control */
+	__raw_writel(L2X0_DYNAMIC_CLK_GATING_EN | L2X0_STNDBY_MODE_EN,
+		     S5P_VA_L2CC + L2X0_POWER_CTRL);
+
+	l2x0_init(S5P_VA_L2CC, 0x7C470001, 0xC200ffff);
+
+	return 0;
+}
+
+early_initcall(exynos4_l2x0_cache_init);
+#endif
+
+int __init exynos_init(void)
+{
+	printk(KERN_INFO "EXYNOS: Initializing architecture\n");
+
+	/* set idle function */
+	pm_idle = exynos_idle;
+
+	return device_register(&exynos4_dev);
+}
+
+static struct s3c24xx_uart_clksrc exynos4_serial_clocks[] = {
+	[0] = {
+		.name		= "uclk1",
+		.divisor	= 1,
+		.min_baud	= 0,
+		.max_baud	= 0,
+	},
+};
+
+/* uart registration process */
+
+void __init exynos4_init_uarts(struct s3c2410_uartcfg *cfg, int no)
+{
+	struct s3c2410_uartcfg *tcfg = cfg;
+	u32 ucnt;
+
+	for (ucnt = 0; ucnt < no; ucnt++, tcfg++) {
+		if (!tcfg->clocks) {
+			tcfg->has_fracval = 1;
+			tcfg->clocks = exynos4_serial_clocks;
+			tcfg->clocks_size = ARRAY_SIZE(exynos4_serial_clocks);
+		}
+		tcfg->flags |= NO_NEED_CHECK_CLKSRC;
+	}
+
+	s3c24xx_init_uartdevs("s5pv210-uart", s5p_uart_resources, cfg, no);
+}
+
+static DEFINE_SPINLOCK(eint_lock);
+
+static unsigned int eint0_15_data[16];
+
+static unsigned int exynos4_get_irq_nr(unsigned int number)
+{
+	u32 ret = 0;
+
+	switch (number) {
+	case 0 ... 3:
+		ret = (number + IRQ_EINT0);
+		break;
+	case 4 ... 7:
+		ret = (number + (IRQ_EINT4 - 4));
+		break;
+	case 8 ... 15:
+		ret = (number + (IRQ_EINT8 - 8));
+		break;
+	default:
+		printk(KERN_ERR "number available : %d\n", number);
+	}
+
+	return ret;
+}
+
+static inline void exynos4_irq_eint_mask(struct irq_data *data)
+{
+	u32 mask;
+
+	spin_lock(&eint_lock);
+	mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(data->irq)));
+	mask |= eint_irq_to_bit(data->irq);
+	__raw_writel(mask, S5P_EINT_MASK(EINT_REG_NR(data->irq)));
+	spin_unlock(&eint_lock);
+}
+
+static void exynos4_irq_eint_unmask(struct irq_data *data)
+{
+	u32 mask;
+
+	spin_lock(&eint_lock);
+	mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(data->irq)));
+	mask &= ~(eint_irq_to_bit(data->irq));
+	__raw_writel(mask, S5P_EINT_MASK(EINT_REG_NR(data->irq)));
+	spin_unlock(&eint_lock);
+}
+
+static inline void exynos4_irq_eint_ack(struct irq_data *data)
+{
+	__raw_writel(eint_irq_to_bit(data->irq),
+		     S5P_EINT_PEND(EINT_REG_NR(data->irq)));
+}
+
+static void exynos4_irq_eint_maskack(struct irq_data *data)
+{
+	exynos4_irq_eint_mask(data);
+	exynos4_irq_eint_ack(data);
+}
+
+static int exynos4_irq_eint_set_type(struct irq_data *data, unsigned int type)
+{
+	int offs = EINT_OFFSET(data->irq);
+	int shift;
+	u32 ctrl, mask;
+	u32 newvalue = 0;
+
+	switch (type) {
+	case IRQ_TYPE_EDGE_RISING:
+		newvalue = S5P_IRQ_TYPE_EDGE_RISING;
+		break;
+
+	case IRQ_TYPE_EDGE_FALLING:
+		newvalue = S5P_IRQ_TYPE_EDGE_FALLING;
+		break;
+
+	case IRQ_TYPE_EDGE_BOTH:
+		newvalue = S5P_IRQ_TYPE_EDGE_BOTH;
+		break;
+
+	case IRQ_TYPE_LEVEL_LOW:
+		newvalue = S5P_IRQ_TYPE_LEVEL_LOW;
+		break;
+
+	case IRQ_TYPE_LEVEL_HIGH:
+		newvalue = S5P_IRQ_TYPE_LEVEL_HIGH;
+		break;
+
+	default:
+		printk(KERN_ERR "No such irq type %d", type);
+		return -EINVAL;
+	}
+
+	shift = (offs & 0x7) * 4;
+	mask = 0x7 << shift;
+
+	spin_lock(&eint_lock);
+	ctrl = __raw_readl(S5P_EINT_CON(EINT_REG_NR(data->irq)));
+	ctrl &= ~mask;
+	ctrl |= newvalue << shift;
+	__raw_writel(ctrl, S5P_EINT_CON(EINT_REG_NR(data->irq)));
+	spin_unlock(&eint_lock);
+
+	switch (offs) {
+	case 0 ... 7:
+		s3c_gpio_cfgpin(EINT_GPIO_0(offs & 0x7), EINT_MODE);
+		break;
+	case 8 ... 15:
+		s3c_gpio_cfgpin(EINT_GPIO_1(offs & 0x7), EINT_MODE);
+		break;
+	case 16 ... 23:
+		s3c_gpio_cfgpin(EINT_GPIO_2(offs & 0x7), EINT_MODE);
+		break;
+	case 24 ... 31:
+		s3c_gpio_cfgpin(EINT_GPIO_3(offs & 0x7), EINT_MODE);
+		break;
+	default:
+		printk(KERN_ERR "No such irq number %d", offs);
+	}
+
+	return 0;
+}
+
+static struct irq_chip exynos4_irq_eint = {
+	.name		= "exynos4-eint",
+	.irq_mask	= exynos4_irq_eint_mask,
+	.irq_unmask	= exynos4_irq_eint_unmask,
+	.irq_mask_ack	= exynos4_irq_eint_maskack,
+	.irq_ack	= exynos4_irq_eint_ack,
+	.irq_set_type	= exynos4_irq_eint_set_type,
+#ifdef CONFIG_PM
+	.irq_set_wake	= s3c_irqext_wake,
+#endif
+};
+
+/*
+ * exynos4_irq_demux_eint
+ *
+ * This function demuxes the IRQ from from EINTs 16 to 31.
+ * It is designed to be inlined into the specific handler
+ * s5p_irq_demux_eintX_Y.
+ *
+ * Each EINT pend/mask registers handle eight of them.
+ */
+static inline void exynos4_irq_demux_eint(unsigned int start)
+{
+	unsigned int irq;
+
+	u32 status = __raw_readl(S5P_EINT_PEND(EINT_REG_NR(start)));
+	u32 mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(start)));
+
+	status &= ~mask;
+	status &= 0xff;
+
+	while (status) {
+		irq = fls(status) - 1;
+		generic_handle_irq(irq + start);
+		status &= ~(1 << irq);
+	}
+}
+
+static void exynos4_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc)
+{
+	struct irq_chip *chip = irq_get_chip(irq);
+	chained_irq_enter(chip, desc);
+	exynos4_irq_demux_eint(IRQ_EINT(16));
+	exynos4_irq_demux_eint(IRQ_EINT(24));
+	chained_irq_exit(chip, desc);
+}
+
+static void exynos4_irq_eint0_15(unsigned int irq, struct irq_desc *desc)
+{
+	u32 *irq_data = irq_get_handler_data(irq);
+	struct irq_chip *chip = irq_get_chip(irq);
+
+	chained_irq_enter(chip, desc);
+	chip->irq_mask(&desc->irq_data);
+
+	if (chip->irq_ack)
+		chip->irq_ack(&desc->irq_data);
+
+	generic_handle_irq(*irq_data);
+
+	chip->irq_unmask(&desc->irq_data);
+	chained_irq_exit(chip, desc);
+}
+
+int __init exynos4_init_irq_eint(void)
+{
+	int irq;
+
+	for (irq = 0 ; irq <= 31 ; irq++) {
+		irq_set_chip_and_handler(IRQ_EINT(irq), &exynos4_irq_eint,
+					 handle_level_irq);
+		set_irq_flags(IRQ_EINT(irq), IRQF_VALID);
+	}
+
+	irq_set_chained_handler(IRQ_EINT16_31, exynos4_irq_demux_eint16_31);
+
+	for (irq = 0 ; irq <= 15 ; irq++) {
+		eint0_15_data[irq] = IRQ_EINT(irq);
+
+		irq_set_handler_data(exynos4_get_irq_nr(irq),
+				     &eint0_15_data[irq]);
+		irq_set_chained_handler(exynos4_get_irq_nr(irq),
+					exynos4_irq_eint0_15);
+	}
+
+	return 0;
+}
+arch_initcall(exynos4_init_irq_eint);
diff --git a/arch/arm/mach-exynos/common.h b/arch/arm/mach-exynos/common.h
new file mode 100644
index 0000000..1ac49de
--- /dev/null
+++ b/arch/arm/mach-exynos/common.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * Common Header for EXYNOS machines
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARCH_ARM_MACH_EXYNOS_COMMON_H
+#define __ARCH_ARM_MACH_EXYNOS_COMMON_H
+
+void exynos_init_io(struct map_desc *mach_desc, int size);
+void exynos4_init_irq(void);
+
+void exynos4_register_clocks(void);
+void exynos4_setup_clocks(void);
+
+void exynos4210_register_clocks(void);
+void exynos4212_register_clocks(void);
+
+void exynos4_restart(char mode, const char *cmd);
+
+extern struct sys_timer exynos4_timer;
+
+#ifdef CONFIG_ARCH_EXYNOS
+extern  int exynos_init(void);
+extern void exynos4_map_io(void);
+extern void exynos4_init_clocks(int xtal);
+extern void exynos4_init_uarts(struct s3c2410_uartcfg *cfg, int no);
+
+#else
+#define exynos4_init_clocks NULL
+#define exynos4_init_uarts NULL
+#define exynos4_map_io NULL
+#define exynos_init NULL
+#endif
+
+#endif /* __ARCH_ARM_MACH_EXYNOS_COMMON_H */
diff --git a/arch/arm/mach-exynos/cpu.c b/arch/arm/mach-exynos/cpu.c
deleted file mode 100644
index 90ec247..0000000
--- a/arch/arm/mach-exynos/cpu.c
+++ /dev/null
@@ -1,298 +0,0 @@
-/* linux/arch/arm/mach-exynos/cpu.c
- *
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
- *		http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/sched.h>
-#include <linux/sysdev.h>
-
-#include <asm/mach/map.h>
-#include <asm/mach/irq.h>
-
-#include <asm/proc-fns.h>
-#include <asm/hardware/cache-l2x0.h>
-#include <asm/hardware/gic.h>
-
-#include <plat/cpu.h>
-#include <plat/clock.h>
-#include <plat/devs.h>
-#include <plat/exynos4.h>
-#include <plat/adc-core.h>
-#include <plat/sdhci.h>
-#include <plat/fb-core.h>
-#include <plat/fimc-core.h>
-#include <plat/iic-core.h>
-#include <plat/reset.h>
-#include <plat/tv-core.h>
-
-#include <mach/regs-irq.h>
-#include <mach/regs-pmu.h>
-
-unsigned int gic_bank_offset __read_mostly;
-
-extern int combiner_init(unsigned int combiner_nr, void __iomem *base,
-			 unsigned int irq_start);
-extern void combiner_cascade_irq(unsigned int combiner_nr, unsigned int irq);
-
-/* Initial IO mappings */
-static struct map_desc exynos_iodesc[] __initdata = {
-	{
-		.virtual	= (unsigned long)S5P_VA_SYSTIMER,
-		.pfn		= __phys_to_pfn(EXYNOS_PA_SYSTIMER),
-		.length		= SZ_4K,
-		.type		= MT_DEVICE,
-	}, {
-		.virtual	= (unsigned long)S5P_VA_PMU,
-		.pfn		= __phys_to_pfn(EXYNOS_PA_PMU),
-		.length		= SZ_64K,
-		.type		= MT_DEVICE,
-	}, {
-		.virtual	= (unsigned long)S5P_VA_COMBINER_BASE,
-		.pfn		= __phys_to_pfn(EXYNOS_PA_COMBINER),
-		.length		= SZ_4K,
-		.type		= MT_DEVICE,
-	}, {
-		.virtual	= (unsigned long)S5P_VA_GIC_CPU,
-		.pfn		= __phys_to_pfn(EXYNOS_PA_GIC_CPU),
-		.length		= SZ_64K,
-		.type		= MT_DEVICE,
-	}, {
-		.virtual	= (unsigned long)S5P_VA_GIC_DIST,
-		.pfn		= __phys_to_pfn(EXYNOS_PA_GIC_DIST),
-		.length		= SZ_64K,
-		.type		= MT_DEVICE,
-	}, {
-		.virtual	= (unsigned long)S3C_VA_UART,
-		.pfn		= __phys_to_pfn(S3C_PA_UART),
-		.length		= SZ_512K,
-		.type		= MT_DEVICE,
-	},
-};
-
-static struct map_desc exynos4_iodesc[] __initdata = {
-	{
-		.virtual	= (unsigned long)S5P_VA_CMU,
-		.pfn		= __phys_to_pfn(EXYNOS4_PA_CMU),
-		.length		= SZ_128K,
-		.type		= MT_DEVICE,
-	}, {
-		.virtual	= (unsigned long)S5P_VA_COREPERI_BASE,
-		.pfn		= __phys_to_pfn(EXYNOS4_PA_COREPERI),
-		.length		= SZ_8K,
-		.type		= MT_DEVICE,
-	}, {
-		.virtual	= (unsigned long)S5P_VA_L2CC,
-		.pfn		= __phys_to_pfn(EXYNOS4_PA_L2CC),
-		.length		= SZ_4K,
-		.type		= MT_DEVICE,
-	}, {
-		.virtual	= (unsigned long)S5P_VA_GPIO1,
-		.pfn		= __phys_to_pfn(EXYNOS4_PA_GPIO1),
-		.length		= SZ_4K,
-		.type		= MT_DEVICE,
-	}, {
-		.virtual	= (unsigned long)S5P_VA_GPIO2,
-		.pfn		= __phys_to_pfn(EXYNOS4_PA_GPIO2),
-		.length		= SZ_4K,
-		.type		= MT_DEVICE,
-	}, {
-		.virtual	= (unsigned long)S5P_VA_GPIO3,
-		.pfn		= __phys_to_pfn(EXYNOS4_PA_GPIO3),
-		.length		= SZ_256,
-		.type		= MT_DEVICE,
-	}, {
-		.virtual	= (unsigned long)S5P_VA_DMC0,
-		.pfn		= __phys_to_pfn(EXYNOS4_PA_DMC0),
-		.length		= SZ_4K,
-		.type		= MT_DEVICE,
-	}, {
-		.virtual	= (unsigned long)S5P_VA_SROMC,
-		.pfn		= __phys_to_pfn(EXYNOS4_PA_SROMC),
-		.length		= SZ_4K,
-		.type		= MT_DEVICE,
-	}, {
-		.virtual	= (unsigned long)S3C_VA_USB_HSPHY,
-		.pfn		= __phys_to_pfn(EXYNOS4_PA_HSPHY),
-		.length		= SZ_4K,
-		.type		= MT_DEVICE,
-	},
-};
-
-static struct map_desc exynos4_iodesc0[] __initdata = {
-	{
-		.virtual	= (unsigned long)S5P_VA_SYSRAM,
-		.pfn		= __phys_to_pfn(EXYNOS4_PA_SYSRAM0),
-		.length		= SZ_4K,
-		.type		= MT_DEVICE,
-	},
-};
-
-static struct map_desc exynos4_iodesc1[] __initdata = {
-	{
-		.virtual	= (unsigned long)S5P_VA_SYSRAM,
-		.pfn		= __phys_to_pfn(EXYNOS4_PA_SYSRAM1),
-		.length		= SZ_4K,
-		.type		= MT_DEVICE,
-	},
-};
-
-static void exynos_idle(void)
-{
-	if (!need_resched())
-		cpu_do_idle();
-
-	local_irq_enable();
-}
-
-static void exynos4_sw_reset(void)
-{
-	__raw_writel(0x1, S5P_SWRESET);
-}
-
-/*
- * exynos_map_io
- *
- * register the standard cpu IO areas
- */
-void __init exynos4_map_io(void)
-{
-	iotable_init(exynos_iodesc, ARRAY_SIZE(exynos_iodesc));
-	iotable_init(exynos4_iodesc, ARRAY_SIZE(exynos4_iodesc));
-
-	if (soc_is_exynos4210() && samsung_rev() == EXYNOS4210_REV_0)
-		iotable_init(exynos4_iodesc0, ARRAY_SIZE(exynos4_iodesc0));
-	else
-		iotable_init(exynos4_iodesc1, ARRAY_SIZE(exynos4_iodesc1));
-
-	/* initialize device information early */
-	exynos4_default_sdhci0();
-	exynos4_default_sdhci1();
-	exynos4_default_sdhci2();
-	exynos4_default_sdhci3();
-
-	s3c_adc_setname("samsung-adc-v3");
-
-	s3c_fimc_setname(0, "exynos4-fimc");
-	s3c_fimc_setname(1, "exynos4-fimc");
-	s3c_fimc_setname(2, "exynos4-fimc");
-	s3c_fimc_setname(3, "exynos4-fimc");
-
-	/* The I2C bus controllers are directly compatible with s3c2440 */
-	s3c_i2c0_setname("s3c2440-i2c");
-	s3c_i2c1_setname("s3c2440-i2c");
-	s3c_i2c2_setname("s3c2440-i2c");
-
-	s5p_fb_setname(0, "exynos4-fb");
-	s5p_hdmi_setname("exynos4-hdmi");
-}
-
-void __init exynos4_init_clocks(int xtal)
-{
-	printk(KERN_DEBUG "%s: initializing clocks\n", __func__);
-
-	s3c24xx_register_baseclocks(xtal);
-	s5p_register_clocks(xtal);
-
-	if (soc_is_exynos4210())
-		exynos4210_register_clocks();
-	else if (soc_is_exynos4212() || soc_is_exynos4412())
-		exynos4212_register_clocks();
-
-	exynos4_register_clocks();
-	exynos4_setup_clocks();
-}
-
-static void exynos4_gic_irq_fix_base(struct irq_data *d)
-{
-	struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
-
-	gic_data->cpu_base = S5P_VA_GIC_CPU +
-			    (gic_bank_offset * smp_processor_id());
-
-	gic_data->dist_base = S5P_VA_GIC_DIST +
-			    (gic_bank_offset * smp_processor_id());
-}
-
-void __init exynos4_init_irq(void)
-{
-	int irq;
-
-	gic_bank_offset = soc_is_exynos4412() ? 0x4000 : 0x8000;
-
-	gic_init(0, IRQ_PPI(0), S5P_VA_GIC_DIST, S5P_VA_GIC_CPU);
-	gic_arch_extn.irq_eoi = exynos4_gic_irq_fix_base;
-	gic_arch_extn.irq_unmask = exynos4_gic_irq_fix_base;
-	gic_arch_extn.irq_mask = exynos4_gic_irq_fix_base;
-
-	for (irq = 0; irq < MAX_COMBINER_NR; irq++) {
-
-		combiner_init(irq, (void __iomem *)S5P_VA_COMBINER(irq),
-				COMBINER_IRQ(irq, 0));
-		combiner_cascade_irq(irq, IRQ_SPI(irq));
-	}
-
-	/* The parameters of s5p_init_irq() are for VIC init.
-	 * Theses parameters should be NULL and 0 because EXYNOS4
-	 * uses GIC instead of VIC.
-	 */
-	s5p_init_irq(NULL, 0);
-}
-
-struct sysdev_class exynos4_sysclass = {
-	.name	= "exynos4-core",
-};
-
-static struct sys_device exynos4_sysdev = {
-	.cls	= &exynos4_sysclass,
-};
-
-static int __init exynos4_core_init(void)
-{
-	return sysdev_class_register(&exynos4_sysclass);
-}
-core_initcall(exynos4_core_init);
-
-#ifdef CONFIG_CACHE_L2X0
-static int __init exynos4_l2x0_cache_init(void)
-{
-	/* TAG, Data Latency Control: 2cycle */
-	__raw_writel(0x110, S5P_VA_L2CC + L2X0_TAG_LATENCY_CTRL);
-
-	if (soc_is_exynos4210())
-		__raw_writel(0x110, S5P_VA_L2CC + L2X0_DATA_LATENCY_CTRL);
-	else if (soc_is_exynos4212() || soc_is_exynos4412())
-		__raw_writel(0x120, S5P_VA_L2CC + L2X0_DATA_LATENCY_CTRL);
-
-	/* L2X0 Prefetch Control */
-	__raw_writel(0x30000007, S5P_VA_L2CC + L2X0_PREFETCH_CTRL);
-
-	/* L2X0 Power Control */
-	__raw_writel(L2X0_DYNAMIC_CLK_GATING_EN | L2X0_STNDBY_MODE_EN,
-		     S5P_VA_L2CC + L2X0_POWER_CTRL);
-
-	l2x0_init(S5P_VA_L2CC, 0x7C470001, 0xC200ffff);
-
-	return 0;
-}
-
-early_initcall(exynos4_l2x0_cache_init);
-#endif
-
-int __init exynos_init(void)
-{
-	printk(KERN_INFO "EXYNOS: Initializing architecture\n");
-
-	/* set idle function */
-	pm_idle = exynos_idle;
-
-	/* set sw_reset function */
-	if (soc_is_exynos4210() || soc_is_exynos4212() || soc_is_exynos4412())
-		s5p_reset_hook = exynos4_sw_reset;
-
-	return sysdev_register(&exynos4_sysdev);
-}
diff --git a/arch/arm/mach-exynos/include/mach/entry-macro.S b/arch/arm/mach-exynos/include/mach/entry-macro.S
index f5e9fd8..3ba4f54 100644
--- a/arch/arm/mach-exynos/include/mach/entry-macro.S
+++ b/arch/arm/mach-exynos/include/mach/entry-macro.S
@@ -9,83 +9,8 @@
  * warranty of any kind, whether express or implied.
 */
 
-#include <mach/hardware.h>
-#include <mach/map.h>
-#include <asm/hardware/gic.h>
-
 		.macro	disable_fiq
 		.endm
 
-		.macro  get_irqnr_preamble, base, tmp
-		mov	\tmp, #0
-
-		mrc	p15, 0, \base, c0, c0, 5
-		and	\base, \base, #3
-		cmp	\base, #0
-		beq	1f
-
-		ldr	\tmp, =gic_bank_offset
-		ldr	\tmp, [\tmp]
-		cmp	\base, #1
-		beq	1f
-
-		cmp	\base, #2
-		addeq	\tmp, \tmp, \tmp
-		addne	\tmp, \tmp, \tmp, LSL #1
-
-1:		ldr	\base, =gic_cpu_base_addr
-		ldr	\base, [\base]
-		add	\base, \base, \tmp
-		.endm
-
 		.macro  arch_ret_to_user, tmp1, tmp2
 		.endm
-
-		/*
-		 * The interrupt numbering scheme is defined in the
-		 * interrupt controller spec.  To wit:
-		 *
-		 * Interrupts 0-15 are IPI
-		 * 16-28 are reserved
-		 * 29-31 are local.  We allow 30 to be used for the watchdog.
-		 * 32-1020 are global
-		 * 1021-1022 are reserved
-		 * 1023 is "spurious" (no interrupt)
-		 *
-		 * For now, we ignore all local interrupts so only return an interrupt if it's
-		 * between 30 and 1020.  The test_for_ipi routine below will pick up on IPIs.
-		 *
-		 * A simple read from the controller will tell us the number of the highest
-                 * priority enabled interrupt.  We then just need to check whether it is in the
-		 * valid range for an IRQ (30-1020 inclusive).
-		 */
-
-		.macro  get_irqnr_and_base, irqnr, irqstat, base, tmp
-
-		ldr     \irqstat, [\base, #GIC_CPU_INTACK] /* bits 12-10 = src CPU, 9-0 = int # */
-
-		ldr	\tmp, =1021
-
-		bic     \irqnr, \irqstat, #0x1c00
-
-		cmp     \irqnr, #15
-		cmpcc	\irqnr, \irqnr
-		cmpne	\irqnr, \tmp
-		cmpcs	\irqnr, \irqnr
-		addne	\irqnr, \irqnr, #32
-
-		.endm
-
-		/* We assume that irqstat (the raw value of the IRQ acknowledge
-		 * register) is preserved from the macro above.
-		 * If there is an IPI, we immediately signal end of interrupt on the
-		 * controller, since this requires the original irqstat value which
-		 * we won't easily be able to recreate later.
-		 */
-
-		.macro test_for_ipi, irqnr, irqstat, base, tmp
-		bic	\irqnr, \irqstat, #0x1c00
-		cmp	\irqnr, #16
-		strcc	\irqstat, [\base, #GIC_CPU_EOI]
-		cmpcs	\irqnr, \irqnr
-		.endm
diff --git a/arch/arm/mach-exynos/include/mach/map.h b/arch/arm/mach-exynos/include/mach/map.h
index 058541d..d182986 100644
--- a/arch/arm/mach-exynos/include/mach/map.h
+++ b/arch/arm/mach-exynos/include/mach/map.h
@@ -149,7 +149,6 @@
 #define S3C_PA_WDT			EXYNOS4_PA_WATCHDOG
 #define S3C_PA_UART			EXYNOS4_PA_UART
 
-#define S5P_PA_CHIPID			EXYNOS4_PA_CHIPID
 #define S5P_PA_EHCI			EXYNOS4_PA_EHCI
 #define S5P_PA_FIMC0			EXYNOS4_PA_FIMC0
 #define S5P_PA_FIMC1			EXYNOS4_PA_FIMC1
@@ -166,26 +165,17 @@
 #define S5P_PA_ONENAND_DMA		EXYNOS4_PA_ONENAND_DMA
 #define S5P_PA_SDO			EXYNOS4_PA_SDO
 #define S5P_PA_SDRAM			EXYNOS4_PA_SDRAM
-#define S5P_PA_SROMC			EXYNOS4_PA_SROMC
-#define S5P_PA_SYSCON			EXYNOS4_PA_SYSCON
-#define S5P_PA_TIMER			EXYNOS4_PA_TIMER
 #define S5P_PA_VP			EXYNOS4_PA_VP
 
 #define SAMSUNG_PA_ADC			EXYNOS4_PA_ADC
 #define SAMSUNG_PA_ADC1			EXYNOS4_PA_ADC1
 #define SAMSUNG_PA_KEYPAD		EXYNOS4_PA_KEYPAD
 
-#define EXYNOS_PA_COMBINER		EXYNOS4_PA_COMBINER
-#define EXYNOS_PA_GIC_CPU		EXYNOS4_PA_GIC_CPU
-#define EXYNOS_PA_GIC_DIST		EXYNOS4_PA_GIC_DIST
-#define EXYNOS_PA_PMU			EXYNOS4_PA_PMU
-#define EXYNOS_PA_SYSTIMER		EXYNOS4_PA_SYSTIMER
-
 /* Compatibility UART */
 
 #define S3C_VA_UARTx(x)			(S3C_VA_UART + ((x) * S3C_UART_OFFSET))
 
-#define S5P_PA_UART(x)			(S3C_PA_UART + ((x) * S3C_UART_OFFSET))
+#define S5P_PA_UART(x)			(EXYNOS4_PA_UART + ((x) * S3C_UART_OFFSET))
 #define S5P_PA_UART0			S5P_PA_UART(0)
 #define S5P_PA_UART1			S5P_PA_UART(1)
 #define S5P_PA_UART2			S5P_PA_UART(2)
diff --git a/arch/arm/mach-exynos/include/mach/system.h b/arch/arm/mach-exynos/include/mach/system.h
index 5e3220c..0063a6d 100644
--- a/arch/arm/mach-exynos/include/mach/system.h
+++ b/arch/arm/mach-exynos/include/mach/system.h
@@ -13,8 +13,6 @@
 #ifndef __ASM_ARCH_SYSTEM_H
 #define __ASM_ARCH_SYSTEM_H __FILE__
 
-#include <plat/system-reset.h>
-
 static void arch_idle(void)
 {
 	/* nothing here yet */
diff --git a/arch/arm/mach-exynos/include/mach/vmalloc.h b/arch/arm/mach-exynos/include/mach/vmalloc.h
deleted file mode 100644
index 284330e..0000000
--- a/arch/arm/mach-exynos/include/mach/vmalloc.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* linux/arch/arm/mach-exynos4/include/mach/vmalloc.h
- *
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
- *		http://www.samsung.com
- *
- * Copyright 2010 Ben Dooks <ben-linux@fluff.org>
- *
- * Based on arch/arm/mach-s5p6440/include/mach/vmalloc.h
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * EXYNOS4 vmalloc definition
-*/
-
-#ifndef __ASM_ARCH_VMALLOC_H
-#define __ASM_ARCH_VMALLOC_H __FILE__
-
-#define VMALLOC_END	0xF6000000UL
-
-#endif /* __ASM_ARCH_VMALLOC_H */
diff --git a/arch/arm/mach-exynos/init.c b/arch/arm/mach-exynos/init.c
deleted file mode 100644
index a8a83e3..0000000
--- a/arch/arm/mach-exynos/init.c
+++ /dev/null
@@ -1,42 +0,0 @@
-/* linux/arch/arm/mach-exynos4/init.c
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- *		http://www.samsung.com/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/serial_core.h>
-
-#include <plat/cpu.h>
-#include <plat/devs.h>
-#include <plat/regs-serial.h>
-
-static struct s3c24xx_uart_clksrc exynos4_serial_clocks[] = {
-	[0] = {
-		.name		= "uclk1",
-		.divisor	= 1,
-		.min_baud	= 0,
-		.max_baud	= 0,
-	},
-};
-
-/* uart registration process */
-void __init exynos4_common_init_uarts(struct s3c2410_uartcfg *cfg, int no)
-{
-	struct s3c2410_uartcfg *tcfg = cfg;
-	u32 ucnt;
-
-	for (ucnt = 0; ucnt < no; ucnt++, tcfg++) {
-		if (!tcfg->clocks) {
-			tcfg->has_fracval = 1;
-			tcfg->clocks = exynos4_serial_clocks;
-			tcfg->clocks_size = ARRAY_SIZE(exynos4_serial_clocks);
-		}
-		tcfg->flags |= NO_NEED_CHECK_CLKSRC;
-	}
-
-	s3c24xx_init_uartdevs("s5pv210-uart", s5p_uart_resources, cfg, no);
-}
diff --git a/arch/arm/mach-exynos/irq-combiner.c b/arch/arm/mach-exynos/irq-combiner.c
deleted file mode 100644
index 5a2758a..0000000
--- a/arch/arm/mach-exynos/irq-combiner.c
+++ /dev/null
@@ -1,124 +0,0 @@
-/* linux/arch/arm/mach-exynos4/irq-combiner.c
- *
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
- *		http://www.samsung.com
- *
- * Based on arch/arm/common/gic.c
- *
- * IRQ COMBINER support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/io.h>
-
-#include <asm/mach/irq.h>
-
-#define COMBINER_ENABLE_SET	0x0
-#define COMBINER_ENABLE_CLEAR	0x4
-#define COMBINER_INT_STATUS	0xC
-
-static DEFINE_SPINLOCK(irq_controller_lock);
-
-struct combiner_chip_data {
-	unsigned int irq_offset;
-	unsigned int irq_mask;
-	void __iomem *base;
-};
-
-static struct combiner_chip_data combiner_data[MAX_COMBINER_NR];
-
-static inline void __iomem *combiner_base(struct irq_data *data)
-{
-	struct combiner_chip_data *combiner_data =
-		irq_data_get_irq_chip_data(data);
-
-	return combiner_data->base;
-}
-
-static void combiner_mask_irq(struct irq_data *data)
-{
-	u32 mask = 1 << (data->irq % 32);
-
-	__raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_CLEAR);
-}
-
-static void combiner_unmask_irq(struct irq_data *data)
-{
-	u32 mask = 1 << (data->irq % 32);
-
-	__raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_SET);
-}
-
-static void combiner_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
-{
-	struct combiner_chip_data *chip_data = irq_get_handler_data(irq);
-	struct irq_chip *chip = irq_get_chip(irq);
-	unsigned int cascade_irq, combiner_irq;
-	unsigned long status;
-
-	chained_irq_enter(chip, desc);
-
-	spin_lock(&irq_controller_lock);
-	status = __raw_readl(chip_data->base + COMBINER_INT_STATUS);
-	spin_unlock(&irq_controller_lock);
-	status &= chip_data->irq_mask;
-
-	if (status == 0)
-		goto out;
-
-	combiner_irq = __ffs(status);
-
-	cascade_irq = combiner_irq + (chip_data->irq_offset & ~31);
-	if (unlikely(cascade_irq >= NR_IRQS))
-		do_bad_IRQ(cascade_irq, desc);
-	else
-		generic_handle_irq(cascade_irq);
-
- out:
-	chained_irq_exit(chip, desc);
-}
-
-static struct irq_chip combiner_chip = {
-	.name		= "COMBINER",
-	.irq_mask	= combiner_mask_irq,
-	.irq_unmask	= combiner_unmask_irq,
-};
-
-void __init combiner_cascade_irq(unsigned int combiner_nr, unsigned int irq)
-{
-	if (combiner_nr >= MAX_COMBINER_NR)
-		BUG();
-	if (irq_set_handler_data(irq, &combiner_data[combiner_nr]) != 0)
-		BUG();
-	irq_set_chained_handler(irq, combiner_handle_cascade_irq);
-}
-
-void __init combiner_init(unsigned int combiner_nr, void __iomem *base,
-			  unsigned int irq_start)
-{
-	unsigned int i;
-
-	if (combiner_nr >= MAX_COMBINER_NR)
-		BUG();
-
-	combiner_data[combiner_nr].base = base;
-	combiner_data[combiner_nr].irq_offset = irq_start;
-	combiner_data[combiner_nr].irq_mask = 0xff << ((combiner_nr % 4) << 3);
-
-	/* Disable all interrupts */
-
-	__raw_writel(combiner_data[combiner_nr].irq_mask,
-		     base + COMBINER_ENABLE_CLEAR);
-
-	/* Setup the Linux IRQ subsystem */
-
-	for (i = irq_start; i < combiner_data[combiner_nr].irq_offset
-				+ MAX_IRQ_IN_COMBINER; i++) {
-		irq_set_chip_and_handler(i, &combiner_chip, handle_level_irq);
-		irq_set_chip_data(i, &combiner_data[combiner_nr]);
-		set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
-	}
-}
diff --git a/arch/arm/mach-exynos/irq-eint.c b/arch/arm/mach-exynos/irq-eint.c
deleted file mode 100644
index badb8c6..0000000
--- a/arch/arm/mach-exynos/irq-eint.c
+++ /dev/null
@@ -1,237 +0,0 @@
-/* linux/arch/arm/mach-exynos4/irq-eint.c
- *
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
- *		http://www.samsung.com
- *
- * EXYNOS4 - IRQ EINT support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/io.h>
-#include <linux/sysdev.h>
-#include <linux/gpio.h>
-
-#include <plat/pm.h>
-#include <plat/cpu.h>
-#include <plat/gpio-cfg.h>
-
-#include <mach/regs-gpio.h>
-
-#include <asm/mach/irq.h>
-
-static DEFINE_SPINLOCK(eint_lock);
-
-static unsigned int eint0_15_data[16];
-
-static unsigned int exynos4_get_irq_nr(unsigned int number)
-{
-	u32 ret = 0;
-
-	switch (number) {
-	case 0 ... 3:
-		ret = (number + IRQ_EINT0);
-		break;
-	case 4 ... 7:
-		ret = (number + (IRQ_EINT4 - 4));
-		break;
-	case 8 ... 15:
-		ret = (number + (IRQ_EINT8 - 8));
-		break;
-	default:
-		printk(KERN_ERR "number available : %d\n", number);
-	}
-
-	return ret;
-}
-
-static inline void exynos4_irq_eint_mask(struct irq_data *data)
-{
-	u32 mask;
-
-	spin_lock(&eint_lock);
-	mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(data->irq)));
-	mask |= eint_irq_to_bit(data->irq);
-	__raw_writel(mask, S5P_EINT_MASK(EINT_REG_NR(data->irq)));
-	spin_unlock(&eint_lock);
-}
-
-static void exynos4_irq_eint_unmask(struct irq_data *data)
-{
-	u32 mask;
-
-	spin_lock(&eint_lock);
-	mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(data->irq)));
-	mask &= ~(eint_irq_to_bit(data->irq));
-	__raw_writel(mask, S5P_EINT_MASK(EINT_REG_NR(data->irq)));
-	spin_unlock(&eint_lock);
-}
-
-static inline void exynos4_irq_eint_ack(struct irq_data *data)
-{
-	__raw_writel(eint_irq_to_bit(data->irq),
-		     S5P_EINT_PEND(EINT_REG_NR(data->irq)));
-}
-
-static void exynos4_irq_eint_maskack(struct irq_data *data)
-{
-	exynos4_irq_eint_mask(data);
-	exynos4_irq_eint_ack(data);
-}
-
-static int exynos4_irq_eint_set_type(struct irq_data *data, unsigned int type)
-{
-	int offs = EINT_OFFSET(data->irq);
-	int shift;
-	u32 ctrl, mask;
-	u32 newvalue = 0;
-
-	switch (type) {
-	case IRQ_TYPE_EDGE_RISING:
-		newvalue = S5P_IRQ_TYPE_EDGE_RISING;
-		break;
-
-	case IRQ_TYPE_EDGE_FALLING:
-		newvalue = S5P_IRQ_TYPE_EDGE_FALLING;
-		break;
-
-	case IRQ_TYPE_EDGE_BOTH:
-		newvalue = S5P_IRQ_TYPE_EDGE_BOTH;
-		break;
-
-	case IRQ_TYPE_LEVEL_LOW:
-		newvalue = S5P_IRQ_TYPE_LEVEL_LOW;
-		break;
-
-	case IRQ_TYPE_LEVEL_HIGH:
-		newvalue = S5P_IRQ_TYPE_LEVEL_HIGH;
-		break;
-
-	default:
-		printk(KERN_ERR "No such irq type %d", type);
-		return -EINVAL;
-	}
-
-	shift = (offs & 0x7) * 4;
-	mask = 0x7 << shift;
-
-	spin_lock(&eint_lock);
-	ctrl = __raw_readl(S5P_EINT_CON(EINT_REG_NR(data->irq)));
-	ctrl &= ~mask;
-	ctrl |= newvalue << shift;
-	__raw_writel(ctrl, S5P_EINT_CON(EINT_REG_NR(data->irq)));
-	spin_unlock(&eint_lock);
-
-	switch (offs) {
-	case 0 ... 7:
-		s3c_gpio_cfgpin(EINT_GPIO_0(offs & 0x7), EINT_MODE);
-		break;
-	case 8 ... 15:
-		s3c_gpio_cfgpin(EINT_GPIO_1(offs & 0x7), EINT_MODE);
-		break;
-	case 16 ... 23:
-		s3c_gpio_cfgpin(EINT_GPIO_2(offs & 0x7), EINT_MODE);
-		break;
-	case 24 ... 31:
-		s3c_gpio_cfgpin(EINT_GPIO_3(offs & 0x7), EINT_MODE);
-		break;
-	default:
-		printk(KERN_ERR "No such irq number %d", offs);
-	}
-
-	return 0;
-}
-
-static struct irq_chip exynos4_irq_eint = {
-	.name		= "exynos4-eint",
-	.irq_mask	= exynos4_irq_eint_mask,
-	.irq_unmask	= exynos4_irq_eint_unmask,
-	.irq_mask_ack	= exynos4_irq_eint_maskack,
-	.irq_ack	= exynos4_irq_eint_ack,
-	.irq_set_type	= exynos4_irq_eint_set_type,
-#ifdef CONFIG_PM
-	.irq_set_wake	= s3c_irqext_wake,
-#endif
-};
-
-/* exynos4_irq_demux_eint
- *
- * This function demuxes the IRQ from from EINTs 16 to 31.
- * It is designed to be inlined into the specific handler
- * s5p_irq_demux_eintX_Y.
- *
- * Each EINT pend/mask registers handle eight of them.
- */
-static inline void exynos4_irq_demux_eint(unsigned int start)
-{
-	unsigned int irq;
-
-	u32 status = __raw_readl(S5P_EINT_PEND(EINT_REG_NR(start)));
-	u32 mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(start)));
-
-	status &= ~mask;
-	status &= 0xff;
-
-	while (status) {
-		irq = fls(status) - 1;
-		generic_handle_irq(irq + start);
-		status &= ~(1 << irq);
-	}
-}
-
-static void exynos4_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc)
-{
-	struct irq_chip *chip = irq_get_chip(irq);
-	chained_irq_enter(chip, desc);
-	exynos4_irq_demux_eint(IRQ_EINT(16));
-	exynos4_irq_demux_eint(IRQ_EINT(24));
-	chained_irq_exit(chip, desc);
-}
-
-static void exynos4_irq_eint0_15(unsigned int irq, struct irq_desc *desc)
-{
-	u32 *irq_data = irq_get_handler_data(irq);
-	struct irq_chip *chip = irq_get_chip(irq);
-
-	chained_irq_enter(chip, desc);
-	chip->irq_mask(&desc->irq_data);
-
-	if (chip->irq_ack)
-		chip->irq_ack(&desc->irq_data);
-
-	generic_handle_irq(*irq_data);
-
-	chip->irq_unmask(&desc->irq_data);
-	chained_irq_exit(chip, desc);
-}
-
-int __init exynos4_init_irq_eint(void)
-{
-	int irq;
-
-	for (irq = 0 ; irq <= 31 ; irq++) {
-		irq_set_chip_and_handler(IRQ_EINT(irq), &exynos4_irq_eint,
-					 handle_level_irq);
-		set_irq_flags(IRQ_EINT(irq), IRQF_VALID);
-	}
-
-	irq_set_chained_handler(IRQ_EINT16_31, exynos4_irq_demux_eint16_31);
-
-	for (irq = 0 ; irq <= 15 ; irq++) {
-		eint0_15_data[irq] = IRQ_EINT(irq);
-
-		irq_set_handler_data(exynos4_get_irq_nr(irq),
-				     &eint0_15_data[irq]);
-		irq_set_chained_handler(exynos4_get_irq_nr(irq),
-					exynos4_irq_eint0_15);
-	}
-
-	return 0;
-}
-
-arch_initcall(exynos4_init_irq_eint);
diff --git a/arch/arm/mach-exynos/mach-armlex4210.c b/arch/arm/mach-exynos/mach-armlex4210.c
index f0ca6c1..d726fcd 100644
--- a/arch/arm/mach-exynos/mach-armlex4210.c
+++ b/arch/arm/mach-exynos/mach-armlex4210.c
@@ -16,11 +16,11 @@
 #include <linux/smsc911x.h>
 
 #include <asm/mach/arch.h>
+#include <asm/hardware/gic.h>
 #include <asm/mach-types.h>
 
 #include <plat/cpu.h>
 #include <plat/devs.h>
-#include <plat/exynos4.h>
 #include <plat/gpio-cfg.h>
 #include <plat/regs-serial.h>
 #include <plat/regs-srom.h>
@@ -28,6 +28,8 @@
 
 #include <mach/map.h>
 
+#include "common.h"
+
 /* Following are default values for UCON, ULCON and UFCON UART registers */
 #define ARMLEX4210_UCON_DEFAULT	(S3C2410_UCON_TXILEVEL |	\
 				 S3C2410_UCON_RXILEVEL |	\
@@ -187,7 +189,7 @@
 
 static void __init armlex4210_map_io(void)
 {
-	s5p_init_io(NULL, 0, S5P_VA_CHIPID);
+	exynos_init_io(NULL, 0);
 	s3c24xx_init_clocks(24000000);
 	s3c24xx_init_uarts(armlex4210_uartcfgs,
 			   ARRAY_SIZE(armlex4210_uartcfgs));
@@ -210,6 +212,8 @@
 	.atag_offset	= 0x100,
 	.init_irq	= exynos4_init_irq,
 	.map_io		= armlex4210_map_io,
+	.handle_irq	= gic_handle_irq,
 	.init_machine	= armlex4210_machine_init,
 	.timer		= &exynos4_timer,
+	.restart	= exynos4_restart,
 MACHINE_END
diff --git a/arch/arm/mach-exynos/mach-nuri.c b/arch/arm/mach-exynos/mach-nuri.c
index 236bbe1..635fb97 100644
--- a/arch/arm/mach-exynos/mach-nuri.c
+++ b/arch/arm/mach-exynos/mach-nuri.c
@@ -32,12 +32,12 @@
 #include <media/v4l2-mediabus.h>
 
 #include <asm/mach/arch.h>
+#include <asm/hardware/gic.h>
 #include <asm/mach-types.h>
 
 #include <plat/adc.h>
 #include <plat/regs-fb-v4.h>
 #include <plat/regs-serial.h>
-#include <plat/exynos4.h>
 #include <plat/cpu.h>
 #include <plat/devs.h>
 #include <plat/fb.h>
@@ -54,6 +54,8 @@
 
 #include <mach/map.h>
 
+#include "common.h"
+
 /* Following are default values for UCON, ULCON and UFCON UART registers */
 #define NURI_UCON_DEFAULT	(S3C2410_UCON_TXILEVEL |	\
 				 S3C2410_UCON_RXILEVEL |	\
@@ -1283,7 +1285,7 @@
 
 static void __init nuri_map_io(void)
 {
-	s5p_init_io(NULL, 0, S5P_VA_CHIPID);
+	exynos_init_io(NULL, 0);
 	s3c24xx_init_clocks(24000000);
 	s3c24xx_init_uarts(nuri_uartcfgs, ARRAY_SIZE(nuri_uartcfgs));
 }
@@ -1333,7 +1335,9 @@
 	.atag_offset	= 0x100,
 	.init_irq	= exynos4_init_irq,
 	.map_io		= nuri_map_io,
+	.handle_irq	= gic_handle_irq,
 	.init_machine	= nuri_machine_init,
 	.timer		= &exynos4_timer,
 	.reserve        = &nuri_reserve,
+	.restart	= exynos4_restart,
 MACHINE_END
diff --git a/arch/arm/mach-exynos/mach-origen.c b/arch/arm/mach-exynos/mach-origen.c
index f80b563..586eb99 100644
--- a/arch/arm/mach-exynos/mach-origen.c
+++ b/arch/arm/mach-exynos/mach-origen.c
@@ -22,13 +22,13 @@
 #include <linux/lcd.h>
 
 #include <asm/mach/arch.h>
+#include <asm/hardware/gic.h>
 #include <asm/mach-types.h>
 
 #include <video/platform_lcd.h>
 
 #include <plat/regs-serial.h>
 #include <plat/regs-fb-v4.h>
-#include <plat/exynos4.h>
 #include <plat/cpu.h>
 #include <plat/devs.h>
 #include <plat/sdhci.h>
@@ -43,6 +43,8 @@
 
 #include <mach/map.h>
 
+#include "common.h"
+
 /* Following are default values for UCON, ULCON and UFCON UART registers */
 #define ORIGEN_UCON_DEFAULT	(S3C2410_UCON_TXILEVEL |	\
 				 S3C2410_UCON_RXILEVEL |	\
@@ -638,7 +640,7 @@
 
 static void __init origen_map_io(void)
 {
-	s5p_init_io(NULL, 0, S5P_VA_CHIPID);
+	exynos_init_io(NULL, 0);
 	s3c24xx_init_clocks(24000000);
 	s3c24xx_init_uarts(origen_uartcfgs, ARRAY_SIZE(origen_uartcfgs));
 }
@@ -694,7 +696,9 @@
 	.atag_offset	= 0x100,
 	.init_irq	= exynos4_init_irq,
 	.map_io		= origen_map_io,
+	.handle_irq	= gic_handle_irq,
 	.init_machine	= origen_machine_init,
 	.timer		= &exynos4_timer,
 	.reserve	= &origen_reserve,
+	.restart	= exynos4_restart,
 MACHINE_END
diff --git a/arch/arm/mach-exynos/mach-smdk4x12.c b/arch/arm/mach-exynos/mach-smdk4x12.c
index fcf2e0e..d00e4f0 100644
--- a/arch/arm/mach-exynos/mach-smdk4x12.c
+++ b/arch/arm/mach-exynos/mach-smdk4x12.c
@@ -21,13 +21,13 @@
 #include <linux/serial_core.h>
 
 #include <asm/mach/arch.h>
+#include <asm/hardware/gic.h>
 #include <asm/mach-types.h>
 
 #include <plat/backlight.h>
 #include <plat/clock.h>
 #include <plat/cpu.h>
 #include <plat/devs.h>
-#include <plat/exynos4.h>
 #include <plat/gpio-cfg.h>
 #include <plat/iic.h>
 #include <plat/keypad.h>
@@ -36,6 +36,8 @@
 
 #include <mach/map.h>
 
+#include "common.h"
+
 /* Following are default values for UCON, ULCON and UFCON UART registers */
 #define SMDK4X12_UCON_DEFAULT	(S3C2410_UCON_TXILEVEL |	\
 				 S3C2410_UCON_RXILEVEL |	\
@@ -249,7 +251,7 @@
 {
 	clk_xusbxti.rate = 24000000;
 
-	s5p_init_io(NULL, 0, S5P_VA_CHIPID);
+	exynos_init_io(NULL, 0);
 	s3c24xx_init_clocks(clk_xusbxti.rate);
 	s3c24xx_init_uarts(smdk4x12_uartcfgs, ARRAY_SIZE(smdk4x12_uartcfgs));
 }
@@ -287,8 +289,10 @@
 	.atag_offset	= 0x100,
 	.init_irq	= exynos4_init_irq,
 	.map_io		= smdk4x12_map_io,
+	.handle_irq	= gic_handle_irq,
 	.init_machine	= smdk4x12_machine_init,
 	.timer		= &exynos4_timer,
+	.restart	= exynos4_restart,
 MACHINE_END
 
 MACHINE_START(SMDK4412, "SMDK4412")
@@ -297,6 +301,8 @@
 	.atag_offset	= 0x100,
 	.init_irq	= exynos4_init_irq,
 	.map_io		= smdk4x12_map_io,
+	.handle_irq	= gic_handle_irq,
 	.init_machine	= smdk4x12_machine_init,
 	.timer		= &exynos4_timer,
+	.restart	= exynos4_restart,
 MACHINE_END
diff --git a/arch/arm/mach-exynos/mach-smdkv310.c b/arch/arm/mach-exynos/mach-smdkv310.c
index cec2afa..5b36561 100644
--- a/arch/arm/mach-exynos/mach-smdkv310.c
+++ b/arch/arm/mach-exynos/mach-smdkv310.c
@@ -21,13 +21,13 @@
 #include <linux/pwm_backlight.h>
 
 #include <asm/mach/arch.h>
+#include <asm/hardware/gic.h>
 #include <asm/mach-types.h>
 
 #include <video/platform_lcd.h>
 #include <plat/regs-serial.h>
 #include <plat/regs-srom.h>
 #include <plat/regs-fb-v4.h>
-#include <plat/exynos4.h>
 #include <plat/cpu.h>
 #include <plat/devs.h>
 #include <plat/fb.h>
@@ -43,6 +43,8 @@
 
 #include <mach/map.h>
 
+#include "common.h"
+
 /* Following are default values for UCON, ULCON and UFCON UART registers */
 #define SMDKV310_UCON_DEFAULT	(S3C2410_UCON_TXILEVEL |	\
 				 S3C2410_UCON_RXILEVEL |	\
@@ -332,7 +334,7 @@
 
 static void __init smdkv310_map_io(void)
 {
-	s5p_init_io(NULL, 0, S5P_VA_CHIPID);
+	exynos_init_io(NULL, 0);
 	s3c24xx_init_clocks(24000000);
 	s3c24xx_init_uarts(smdkv310_uartcfgs, ARRAY_SIZE(smdkv310_uartcfgs));
 }
@@ -375,9 +377,11 @@
 	.atag_offset	= 0x100,
 	.init_irq	= exynos4_init_irq,
 	.map_io		= smdkv310_map_io,
+	.handle_irq	= gic_handle_irq,
 	.init_machine	= smdkv310_machine_init,
 	.timer		= &exynos4_timer,
 	.reserve	= &smdkv310_reserve,
+	.restart	= exynos4_restart,
 MACHINE_END
 
 MACHINE_START(SMDKC210, "SMDKC210")
@@ -385,6 +389,8 @@
 	.atag_offset	= 0x100,
 	.init_irq	= exynos4_init_irq,
 	.map_io		= smdkv310_map_io,
+	.handle_irq	= gic_handle_irq,
 	.init_machine	= smdkv310_machine_init,
 	.timer		= &exynos4_timer,
+	.restart	= exynos4_restart,
 MACHINE_END
diff --git a/arch/arm/mach-exynos/mach-universal_c210.c b/arch/arm/mach-exynos/mach-universal_c210.c
index a2a177f..52aea97 100644
--- a/arch/arm/mach-exynos/mach-universal_c210.c
+++ b/arch/arm/mach-exynos/mach-universal_c210.c
@@ -24,10 +24,10 @@
 #include <linux/i2c/atmel_mxt_ts.h>
 
 #include <asm/mach/arch.h>
+#include <asm/hardware/gic.h>
 #include <asm/mach-types.h>
 
 #include <plat/regs-serial.h>
-#include <plat/exynos4.h>
 #include <plat/cpu.h>
 #include <plat/devs.h>
 #include <plat/iic.h>
@@ -47,6 +47,8 @@
 #include <media/s5p_fimc.h>
 #include <media/m5mols.h>
 
+#include "common.h"
+
 /* Following are default values for UCON, ULCON and UFCON UART registers */
 #define UNIVERSAL_UCON_DEFAULT	(S3C2410_UCON_TXILEVEL |	\
 				 S3C2410_UCON_RXILEVEL |	\
@@ -992,7 +994,7 @@
 
 static void __init universal_map_io(void)
 {
-	s5p_init_io(NULL, 0, S5P_VA_CHIPID);
+	exynos_init_io(NULL, 0);
 	s3c24xx_init_clocks(24000000);
 	s3c24xx_init_uarts(universal_uartcfgs, ARRAY_SIZE(universal_uartcfgs));
 }
@@ -1058,7 +1060,9 @@
 	.atag_offset	= 0x100,
 	.init_irq	= exynos4_init_irq,
 	.map_io		= universal_map_io,
+	.handle_irq	= gic_handle_irq,
 	.init_machine	= universal_machine_init,
 	.timer		= &exynos4_timer,
 	.reserve        = &universal_reserve,
+	.restart	= exynos4_restart,
 MACHINE_END
diff --git a/arch/arm/mach-exynos/mct.c b/arch/arm/mach-exynos/mct.c
index 97343df..85b5527 100644
--- a/arch/arm/mach-exynos/mct.c
+++ b/arch/arm/mach-exynos/mct.c
@@ -44,8 +44,6 @@
 	char name[10];
 };
 
-static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick);
-
 static void exynos4_mct_write(unsigned int value, void *addr)
 {
 	void __iomem *stat_addr;
@@ -264,6 +262,9 @@
 }
 
 #ifdef CONFIG_LOCAL_TIMERS
+
+static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick);
+
 /* Clock event handling */
 static void exynos4_mct_tick_stop(struct mct_clock_event_device *mevt)
 {
@@ -428,9 +429,13 @@
 
 void local_timer_stop(struct clock_event_device *evt)
 {
+	unsigned int cpu = smp_processor_id();
 	evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
 	if (mct_int_type == MCT_INT_SPI)
-		disable_irq(evt->irq);
+		if (cpu == 0)
+			remove_irq(evt->irq, &mct_tick0_event_irq);
+		else
+			remove_irq(evt->irq, &mct_tick1_event_irq);
 	else
 		disable_percpu_irq(IRQ_MCT_LOCALTIMER);
 }
@@ -443,6 +448,7 @@
 
 	clk_rate = clk_get_rate(mct_clk);
 
+#ifdef CONFIG_LOCAL_TIMERS
 	if (mct_int_type == MCT_INT_PPI) {
 		int err;
 
@@ -452,6 +458,7 @@
 		WARN(err, "MCT: can't request IRQ %d (%d)\n",
 		     IRQ_MCT_LOCALTIMER, err);
 	}
+#endif /* CONFIG_LOCAL_TIMERS */
 }
 
 static void __init exynos4_timer_init(void)
diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c
index 69ffb2f..60bc45e 100644
--- a/arch/arm/mach-exynos/platsmp.c
+++ b/arch/arm/mach-exynos/platsmp.c
@@ -32,7 +32,6 @@
 
 #include <plat/cpu.h>
 
-extern unsigned int gic_bank_offset;
 extern void exynos4_secondary_startup(void);
 
 #define CPU1_BOOT_REG		(samsung_rev() == EXYNOS4210_REV_1_1 ? \
@@ -65,31 +64,6 @@
 
 static DEFINE_SPINLOCK(boot_lock);
 
-static void __cpuinit exynos4_gic_secondary_init(void)
-{
-	void __iomem *dist_base = S5P_VA_GIC_DIST +
-				(gic_bank_offset * smp_processor_id());
-	void __iomem *cpu_base = S5P_VA_GIC_CPU +
-				(gic_bank_offset * smp_processor_id());
-	int i;
-
-	/*
-	 * Deal with the banked PPI and SGI interrupts - disable all
-	 * PPI interrupts, ensure all SGI interrupts are enabled.
-	 */
-	__raw_writel(0xffff0000, dist_base + GIC_DIST_ENABLE_CLEAR);
-	__raw_writel(0x0000ffff, dist_base + GIC_DIST_ENABLE_SET);
-
-	/*
-	 * Set priority on PPI and SGI interrupts
-	 */
-	for (i = 0; i < 32; i += 4)
-		__raw_writel(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4 / 4);
-
-	__raw_writel(0xf0, cpu_base + GIC_CPU_PRIMASK);
-	__raw_writel(1, cpu_base + GIC_CPU_CTRL);
-}
-
 void __cpuinit platform_secondary_init(unsigned int cpu)
 {
 	/*
@@ -97,7 +71,7 @@
 	 * core (e.g. timer irq), then they will not have been enabled
 	 * for us: do so
 	 */
-	exynos4_gic_secondary_init();
+	gic_secondary_init(0);
 
 	/*
 	 * let the primary processor know we're out of the
diff --git a/arch/arm/mach-exynos/pm.c b/arch/arm/mach-exynos/pm.c
index 509a435..c4f792d 100644
--- a/arch/arm/mach-exynos/pm.c
+++ b/arch/arm/mach-exynos/pm.c
@@ -205,7 +205,7 @@
 
 }
 
-static int exynos4_pm_add(struct sys_device *sysdev)
+static int exynos4_pm_add(struct device *dev)
 {
 	pm_cpu_prep = exynos4_pm_prepare;
 	pm_cpu_sleep = exynos4_cpu_suspend;
@@ -301,8 +301,10 @@
 	} while (epll_wait || vpll_wait);
 }
 
-static struct sysdev_driver exynos4_pm_driver = {
-	.add		= exynos4_pm_add,
+static struct subsys_interface exynos4_pm_interface = {
+	.name		= "exynos4_pm",
+	.subsys		= &exynos4_subsys,
+	.add_dev	= exynos4_pm_add,
 };
 
 static __init int exynos4_pm_drvinit(void)
@@ -325,7 +327,7 @@
 		clk_put(pll_base);
 	}
 
-	return sysdev_driver_register(&exynos4_sysclass, &exynos4_pm_driver);
+	return subsys_interface_register(&exynos4_pm_interface);
 }
 arch_initcall(exynos4_pm_drvinit);
 
diff --git a/arch/arm/mach-footbridge/cats-hw.c b/arch/arm/mach-footbridge/cats-hw.c
index d5f1785..25b4536 100644
--- a/arch/arm/mach-footbridge/cats-hw.c
+++ b/arch/arm/mach-footbridge/cats-hw.c
@@ -86,9 +86,10 @@
 MACHINE_START(CATS, "Chalice-CATS")
 	/* Maintainer: Philip Blundell */
 	.atag_offset	= 0x100,
-	.soft_reboot	= 1,
+	.restart_mode	= 's',
 	.fixup		= fixup_cats,
 	.map_io		= footbridge_map_io,
 	.init_irq	= footbridge_init_irq,
 	.timer		= &isa_timer,
+	.restart	= footbridge_restart,
 MACHINE_END
diff --git a/arch/arm/mach-footbridge/common.c b/arch/arm/mach-footbridge/common.c
index 38a44f9..41978ee 100644
--- a/arch/arm/mach-footbridge/common.c
+++ b/arch/arm/mach-footbridge/common.c
@@ -199,6 +199,33 @@
 		iotable_init(ebsa285_host_io_desc, ARRAY_SIZE(ebsa285_host_io_desc));
 }
 
+void footbridge_restart(char mode, const char *cmd)
+{
+	if (mode == 's') {
+		/* Jump into the ROM */
+		soft_restart(0x41000000);
+	} else {
+		/*
+		 * Force the watchdog to do a CPU reset.
+		 *
+		 * After making sure that the watchdog is disabled
+		 * (so we can change the timer registers) we first
+		 * enable the timer to autoreload itself.  Next, the
+		 * timer interval is set really short and any
+		 * current interrupt request is cleared (so we can
+		 * see an edge transition).  Finally, TIMER4 is
+		 * enabled as the watchdog.
+		 */
+		*CSR_SA110_CNTL &= ~(1 << 13);
+		*CSR_TIMER4_CNTL = TIMER_CNTL_ENABLE |
+				   TIMER_CNTL_AUTORELOAD |
+				   TIMER_CNTL_DIV16;
+		*CSR_TIMER4_LOAD = 0x2;
+		*CSR_TIMER4_CLR  = 0;
+		*CSR_SA110_CNTL |= (1 << 13);
+	}
+}
+
 #ifdef CONFIG_FOOTBRIDGE_ADDIN
 
 static inline unsigned long fb_bus_sdram_offset(void)
diff --git a/arch/arm/mach-footbridge/common.h b/arch/arm/mach-footbridge/common.h
index b05e662..c9767b89 100644
--- a/arch/arm/mach-footbridge/common.h
+++ b/arch/arm/mach-footbridge/common.h
@@ -8,3 +8,4 @@
 extern void footbridge_init_irq(void);
 
 extern void isa_init_irq(unsigned int irq);
+extern void footbridge_restart(char, const char *);
diff --git a/arch/arm/mach-footbridge/ebsa285.c b/arch/arm/mach-footbridge/ebsa285.c
index 012210c..27716a7 100644
--- a/arch/arm/mach-footbridge/ebsa285.c
+++ b/arch/arm/mach-footbridge/ebsa285.c
@@ -21,5 +21,6 @@
 	.map_io		= footbridge_map_io,
 	.init_irq	= footbridge_init_irq,
 	.timer		= &footbridge_timer,
+	.restart	= footbridge_restart,
 MACHINE_END
 
diff --git a/arch/arm/mach-footbridge/include/mach/system.h b/arch/arm/mach-footbridge/include/mach/system.h
index 0b29315..a174a58 100644
--- a/arch/arm/mach-footbridge/include/mach/system.h
+++ b/arch/arm/mach-footbridge/include/mach/system.h
@@ -7,63 +7,7 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
-#include <linux/io.h>
-#include <asm/hardware/dec21285.h>
-#include <mach/hardware.h>
-#include <asm/leds.h>
-#include <asm/mach-types.h>
-
 static inline void arch_idle(void)
 {
 	cpu_do_idle();
 }
-
-static inline void arch_reset(char mode, const char *cmd)
-{
-	if (mode == 's') {
-		/*
-		 * Jump into the ROM
-		 */
-		cpu_reset(0x41000000);
-	} else {
-		if (machine_is_netwinder()) {
-			/* open up the SuperIO chip
-			 */
-			outb(0x87, 0x370);
-			outb(0x87, 0x370);
-
-			/* aux function group 1 (logical device 7)
-			 */
-			outb(0x07, 0x370);
-			outb(0x07, 0x371);
-
-			/* set GP16 for WD-TIMER output
-			 */
-			outb(0xe6, 0x370);
-			outb(0x00, 0x371);
-
-			/* set a RED LED and toggle WD_TIMER for rebooting
-			 */
-			outb(0xc4, 0x338);
-		} else {
-			/* 
-			 * Force the watchdog to do a CPU reset.
-			 *
-			 * After making sure that the watchdog is disabled
-			 * (so we can change the timer registers) we first
-			 * enable the timer to autoreload itself.  Next, the
-			 * timer interval is set really short and any
-			 * current interrupt request is cleared (so we can
-			 * see an edge transition).  Finally, TIMER4 is
-			 * enabled as the watchdog.
-			 */
-			*CSR_SA110_CNTL &= ~(1 << 13);
-			*CSR_TIMER4_CNTL = TIMER_CNTL_ENABLE |
-					   TIMER_CNTL_AUTORELOAD |
-					   TIMER_CNTL_DIV16;
-			*CSR_TIMER4_LOAD = 0x2;
-			*CSR_TIMER4_CLR  = 0;
-			*CSR_SA110_CNTL |= (1 << 13);
-		}
-	}
-}
diff --git a/arch/arm/mach-footbridge/include/mach/vmalloc.h b/arch/arm/mach-footbridge/include/mach/vmalloc.h
deleted file mode 100644
index 40ba78e..0000000
--- a/arch/arm/mach-footbridge/include/mach/vmalloc.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/*
- *  arch/arm/mach-footbridge/include/mach/vmalloc.h
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-
-#define VMALLOC_END       0xf0000000UL
diff --git a/arch/arm/mach-footbridge/netwinder-hw.c b/arch/arm/mach-footbridge/netwinder-hw.c
index 0d3846f..80a1c5c 100644
--- a/arch/arm/mach-footbridge/netwinder-hw.c
+++ b/arch/arm/mach-footbridge/netwinder-hw.c
@@ -645,6 +645,32 @@
 #endif
 }
 
+static void netwinder_restart(char mode, const char *cmd)
+{
+	if (mode == 's') {
+		/* Jump into the ROM */
+		soft_restart(0x41000000);
+	} else {
+		local_irq_disable();
+		local_fiq_disable();
+
+		/* open up the SuperIO chip */
+		outb(0x87, 0x370);
+		outb(0x87, 0x370);
+
+		/* aux function group 1 (logical device 7) */
+		outb(0x07, 0x370);
+		outb(0x07, 0x371);
+
+		/* set GP16 for WD-TIMER output */
+		outb(0xe6, 0x370);
+		outb(0x00, 0x371);
+
+		/* set a RED LED and toggle WD_TIMER for rebooting */
+		outb(0xc4, 0x338);
+	}
+}
+
 MACHINE_START(NETWINDER, "Rebel-NetWinder")
 	/* Maintainer: Russell King/Rebel.com */
 	.atag_offset	= 0x100,
@@ -656,4 +682,5 @@
 	.map_io		= footbridge_map_io,
 	.init_irq	= footbridge_init_irq,
 	.timer		= &isa_timer,
+	.restart	= netwinder_restart,
 MACHINE_END
diff --git a/arch/arm/mach-footbridge/personal.c b/arch/arm/mach-footbridge/personal.c
index f41dba3..e1e9990 100644
--- a/arch/arm/mach-footbridge/personal.c
+++ b/arch/arm/mach-footbridge/personal.c
@@ -19,5 +19,6 @@
 	.map_io		= footbridge_map_io,
 	.init_irq	= footbridge_init_irq,
 	.timer		= &footbridge_timer,
+	.restart	= footbridge_restart,
 MACHINE_END
 
diff --git a/arch/arm/mach-gemini/include/mach/vmalloc.h b/arch/arm/mach-gemini/include/mach/vmalloc.h
deleted file mode 100644
index 45371eb..0000000
--- a/arch/arm/mach-gemini/include/mach/vmalloc.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/*
- *  Copyright (C) 2008-2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#define VMALLOC_END	0xf0000000UL
diff --git a/arch/arm/mach-h720x/common.c b/arch/arm/mach-h720x/common.c
index 51d4e44..f8a2f6b 100644
--- a/arch/arm/mach-h720x/common.c
+++ b/arch/arm/mach-h720x/common.c
@@ -242,3 +242,8 @@
 {
 	iotable_init(h720x_io_desc,ARRAY_SIZE(h720x_io_desc));
 }
+
+void h720x_restart(char mode, const char *cmd)
+{
+	CPU_REG (PMU_BASE, PMU_STAT) |= PMU_WARMRESET;
+}
diff --git a/arch/arm/mach-h720x/common.h b/arch/arm/mach-h720x/common.h
index 7dd5fa6..2489537 100644
--- a/arch/arm/mach-h720x/common.h
+++ b/arch/arm/mach-h720x/common.h
@@ -16,6 +16,7 @@
 extern unsigned long h720x_gettimeoffset(void);
 extern void __init h720x_init_irq(void);
 extern void __init h720x_map_io(void);
+extern void h720x_restart(char, const char *);
 
 #ifdef CONFIG_ARCH_H7202
 extern struct sys_timer h7202_timer;
diff --git a/arch/arm/mach-h720x/h7201-eval.c b/arch/arm/mach-h720x/h7201-eval.c
index 9886f19..5fdb20c 100644
--- a/arch/arm/mach-h720x/h7201-eval.c
+++ b/arch/arm/mach-h720x/h7201-eval.c
@@ -34,4 +34,5 @@
 	.init_irq	= h720x_init_irq,
 	.timer		= &h7201_timer,
 	.dma_zone_size	= SZ_256M,
+	.restart	= h720x_restart,
 MACHINE_END
diff --git a/arch/arm/mach-h720x/h7202-eval.c b/arch/arm/mach-h720x/h7202-eval.c
index 284a134..1696730 100644
--- a/arch/arm/mach-h720x/h7202-eval.c
+++ b/arch/arm/mach-h720x/h7202-eval.c
@@ -77,4 +77,5 @@
 	.timer		= &h7202_timer,
 	.init_machine	= init_eval_h7202,
 	.dma_zone_size	= SZ_256M,
+	.restart	= h720x_restart,
 MACHINE_END
diff --git a/arch/arm/mach-h720x/include/mach/system.h b/arch/arm/mach-h720x/include/mach/system.h
index a708d24..16ac46e 100644
--- a/arch/arm/mach-h720x/include/mach/system.h
+++ b/arch/arm/mach-h720x/include/mach/system.h
@@ -24,10 +24,4 @@
 	nop();
 }
 
-
-static __inline__ void arch_reset(char mode, const char *cmd)
-{
-	CPU_REG (PMU_BASE, PMU_STAT) |= PMU_WARMRESET;
-}
-
 #endif
diff --git a/arch/arm/mach-h720x/include/mach/vmalloc.h b/arch/arm/mach-h720x/include/mach/vmalloc.h
deleted file mode 100644
index 8520b4a..0000000
--- a/arch/arm/mach-h720x/include/mach/vmalloc.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/*
- * arch/arm/mach-h720x/include/mach/vmalloc.h
- */
-
-#ifndef __ARCH_ARM_VMALLOC_H
-#define __ARCH_ARM_VMALLOC_H
-
-#define VMALLOC_END       0xd0000000UL
-
-#endif
diff --git a/arch/arm/mach-highbank/core.h b/arch/arm/mach-highbank/core.h
index 7e33fc9..d8e2d0b 100644
--- a/arch/arm/mach-highbank/core.h
+++ b/arch/arm/mach-highbank/core.h
@@ -1,5 +1,6 @@
 extern void highbank_set_cpu_jump(int cpu, void *jump_addr);
 extern void highbank_clocks_init(void);
+extern void highbank_restart(char, const char *);
 extern void __iomem *scu_base_addr;
 #ifdef CONFIG_DEBUG_HIGHBANK_UART
 extern void highbank_lluart_map_io(void);
diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c
index 88660d5..804c4a5 100644
--- a/arch/arm/mach-highbank/highbank.c
+++ b/arch/arm/mach-highbank/highbank.c
@@ -144,6 +144,8 @@
 	.map_io		= highbank_map_io,
 	.init_irq	= highbank_init_irq,
 	.timer		= &highbank_timer,
+	.handle_irq	= gic_handle_irq,
 	.init_machine	= highbank_init,
 	.dt_compat	= highbank_match,
+	.restart	= highbank_restart,
 MACHINE_END
diff --git a/arch/arm/mach-highbank/include/mach/entry-macro.S b/arch/arm/mach-highbank/include/mach/entry-macro.S
index 73c1129..a14f9e6 100644
--- a/arch/arm/mach-highbank/include/mach/entry-macro.S
+++ b/arch/arm/mach-highbank/include/mach/entry-macro.S
@@ -1,5 +1,3 @@
-#include <asm/hardware/entry-macro-gic.S>
-
 	.macro	disable_fiq
 	.endm
 
diff --git a/arch/arm/mach-highbank/include/mach/system.h b/arch/arm/mach-highbank/include/mach/system.h
index 7e81922..b1d8b5f 100644
--- a/arch/arm/mach-highbank/include/mach/system.h
+++ b/arch/arm/mach-highbank/include/mach/system.h
@@ -21,6 +21,4 @@
 	cpu_do_idle();
 }
 
-extern void arch_reset(char mode, const char *cmd);
-
 #endif
diff --git a/arch/arm/mach-highbank/include/mach/vmalloc.h b/arch/arm/mach-highbank/include/mach/vmalloc.h
deleted file mode 100644
index 1969e95..0000000
--- a/arch/arm/mach-highbank/include/mach/vmalloc.h
+++ /dev/null
@@ -1 +0,0 @@
-#define VMALLOC_END		0xFEE00000UL
diff --git a/arch/arm/mach-highbank/system.c b/arch/arm/mach-highbank/system.c
index 53f0c4c..82c27230 100644
--- a/arch/arm/mach-highbank/system.c
+++ b/arch/arm/mach-highbank/system.c
@@ -20,7 +20,7 @@
 #include "core.h"
 #include "sysregs.h"
 
-void arch_reset(char mode, const char *cmd)
+void highbank_restart(char mode, const char *cmd)
 {
 	if (mode == 'h')
 		hignbank_set_pwr_hard_reset();
diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig
index c44aa97..35a218c 100644
--- a/arch/arm/mach-imx/Kconfig
+++ b/arch/arm/mach-imx/Kconfig
@@ -132,7 +132,7 @@
 	select IMX_HAVE_PLATFORM_MXC_NAND
 	select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
 
-config MACH_EUKREA_CPUIMX25
+config MACH_EUKREA_CPUIMX25SD
 	bool "Support Eukrea CPUIMX25 Platform"
 	select SOC_IMX25
 	select IMX_HAVE_PLATFORM_FLEXCAN
@@ -148,7 +148,7 @@
 
 choice
 	prompt "Baseboard"
-	depends on MACH_EUKREA_CPUIMX25
+	depends on MACH_EUKREA_CPUIMX25SD
 	default MACH_EUKREA_MBIMXSD25_BASEBOARD
 
 config MACH_EUKREA_MBIMXSD25_BASEBOARD
@@ -542,7 +542,7 @@
 	  Include support for MX35PDK platform. This includes specific
 	  configurations for the board and its peripherals.
 
-config MACH_EUKREA_CPUIMX35
+config MACH_EUKREA_CPUIMX35SD
 	bool "Support Eukrea CPUIMX35 Platform"
 	select SOC_IMX35
 	select IMX_HAVE_PLATFORM_FLEXCAN
@@ -560,7 +560,7 @@
 
 choice
 	prompt "Baseboard"
-	depends on MACH_EUKREA_CPUIMX35
+	depends on MACH_EUKREA_CPUIMX35SD
 	default MACH_EUKREA_MBIMXSD35_BASEBOARD
 
 config MACH_EUKREA_MBIMXSD35_BASEBOARD
@@ -596,12 +596,12 @@
 config SOC_IMX6Q
 	bool "i.MX6 Quad support"
 	select ARM_GIC
-	select CACHE_L2X0
 	select CPU_V7
 	select HAVE_ARM_SCU
 	select HAVE_IMX_GPC
 	select HAVE_IMX_MMDC
 	select HAVE_IMX_SRC
+	select HAVE_SMP
 	select USE_OF
 
 	help
diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile
index aba7321..d97f409 100644
--- a/arch/arm/mach-imx/Makefile
+++ b/arch/arm/mach-imx/Makefile
@@ -24,7 +24,7 @@
 
 # i.MX25 based machines
 obj-$(CONFIG_MACH_MX25_3DS) += mach-mx25_3ds.o
-obj-$(CONFIG_MACH_EUKREA_CPUIMX25) += mach-eukrea_cpuimx25.o
+obj-$(CONFIG_MACH_EUKREA_CPUIMX25SD) += mach-eukrea_cpuimx25.o
 obj-$(CONFIG_MACH_EUKREA_MBIMXSD25_BASEBOARD) += eukrea_mbimxsd25-baseboard.o
 
 # i.MX27 based machines
@@ -57,7 +57,7 @@
 # i.MX35 based machines
 obj-$(CONFIG_MACH_PCM043) += mach-pcm043.o
 obj-$(CONFIG_MACH_MX35_3DS) += mach-mx35_3ds.o
-obj-$(CONFIG_MACH_EUKREA_CPUIMX35) += mach-cpuimx35.o
+obj-$(CONFIG_MACH_EUKREA_CPUIMX35SD) += mach-cpuimx35.o
 obj-$(CONFIG_MACH_EUKREA_MBIMXSD35_BASEBOARD) += eukrea_mbimxsd35-baseboard.o
 obj-$(CONFIG_MACH_VPR200) += mach-vpr200.o
 
diff --git a/arch/arm/mach-imx/clock-imx35.c b/arch/arm/mach-imx/clock-imx35.c
index 8116f11..ac8238c 100644
--- a/arch/arm/mach-imx/clock-imx35.c
+++ b/arch/arm/mach-imx/clock-imx35.c
@@ -507,7 +507,7 @@
 
 int __init mx35_clocks_init()
 {
-	unsigned int cgr2 = 3 << 26, cgr3 = 0;
+	unsigned int cgr2 = 3 << 26;
 
 #if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_ICEDCC)
 	cgr2 |= 3 << 16;
@@ -521,6 +521,12 @@
 	__raw_writel((3 << 18), CCM_BASE + CCM_CGR0);
 	__raw_writel((3 << 2) | (3 << 4) | (3 << 6) | (3 << 8) | (3 << 16),
 			CCM_BASE + CCM_CGR1);
+	__raw_writel(cgr2, CCM_BASE + CCM_CGR2);
+	__raw_writel(0, CCM_BASE + CCM_CGR3);
+
+	clk_enable(&iim_clk);
+	imx_print_silicon_rev("i.MX35", mx35_revision());
+	clk_disable(&iim_clk);
 
 	/*
 	 * Check if we came up in internal boot mode. If yes, we need some
@@ -529,17 +535,11 @@
 	 */
 	if (!(__raw_readl(CCM_BASE + CCM_RCSR) & (3 << 10))) {
 		/* Additionally turn on UART1, SCC, and IIM clocks */
-		cgr2 |= 3 << 16 | 3 << 4;
-		cgr3 |= 3 << 2;
+		clk_enable(&iim_clk);
+		clk_enable(&uart1_clk);
+		clk_enable(&scc_clk);
 	}
 
-	__raw_writel(cgr2, CCM_BASE + CCM_CGR2);
-	__raw_writel(cgr3, CCM_BASE + CCM_CGR3);
-
-	clk_enable(&iim_clk);
-	imx_print_silicon_rev("i.MX35", mx35_revision());
-	clk_disable(&iim_clk);
-
 #ifdef CONFIG_MXC_USE_EPIT
 	epit_timer_init(&epit1_clk,
 			MX35_IO_ADDRESS(MX35_EPIT1_BASE_ADDR), MX35_INT_EPIT1);
diff --git a/arch/arm/mach-imx/clock-imx6q.c b/arch/arm/mach-imx/clock-imx6q.c
index 039a7ab..9273c2a 100644
--- a/arch/arm/mach-imx/clock-imx6q.c
+++ b/arch/arm/mach-imx/clock-imx6q.c
@@ -1931,14 +1931,12 @@
 		val |= 0x1 << BP_CLPCR_LPM;
 		val &= ~BM_CLPCR_VSTBY;
 		val &= ~BM_CLPCR_SBYOS;
-		val |= BM_CLPCR_BYP_MMDC_CH1_LPM_HS;
 		break;
 	case STOP_POWER_OFF:
 		val |= 0x2 << BP_CLPCR_LPM;
 		val |= 0x3 << BP_CLPCR_STBY_COUNT;
 		val |= BM_CLPCR_VSTBY;
 		val |= BM_CLPCR_SBYOS;
-		val |= BM_CLPCR_BYP_MMDC_CH1_LPM_HS;
 		break;
 	default:
 		return -EINVAL;
diff --git a/arch/arm/mach-imx/mach-apf9328.c b/arch/arm/mach-imx/mach-apf9328.c
index 1e486e6..146a4f0 100644
--- a/arch/arm/mach-imx/mach-apf9328.c
+++ b/arch/arm/mach-imx/mach-apf9328.c
@@ -139,4 +139,5 @@
 	.handle_irq   = imx1_handle_irq,
 	.timer        = &apf9328_timer,
 	.init_machine = apf9328_init,
+	.restart	= mxc_restart,
 MACHINE_END
diff --git a/arch/arm/mach-imx/mach-armadillo5x0.c b/arch/arm/mach-imx/mach-armadillo5x0.c
index c9a9cf6..e4f426a 100644
--- a/arch/arm/mach-imx/mach-armadillo5x0.c
+++ b/arch/arm/mach-imx/mach-armadillo5x0.c
@@ -561,4 +561,5 @@
 	.handle_irq = imx31_handle_irq,
 	.timer = &armadillo5x0_timer,
 	.init_machine = armadillo5x0_init,
+	.restart	= mxc_restart,
 MACHINE_END
diff --git a/arch/arm/mach-imx/mach-bug.c b/arch/arm/mach-imx/mach-bug.c
index 313f62d..9a98977 100644
--- a/arch/arm/mach-imx/mach-bug.c
+++ b/arch/arm/mach-imx/mach-bug.c
@@ -65,4 +65,5 @@
 	.handle_irq = imx31_handle_irq,
 	.timer = &bug_timer,
 	.init_machine = bug_board_init,
+	.restart	= mxc_restart,
 MACHINE_END
diff --git a/arch/arm/mach-imx/mach-cpuimx27.c b/arch/arm/mach-imx/mach-cpuimx27.c
index edb3730..d085aea 100644
--- a/arch/arm/mach-imx/mach-cpuimx27.c
+++ b/arch/arm/mach-imx/mach-cpuimx27.c
@@ -318,4 +318,5 @@
 	.handle_irq = imx27_handle_irq,
 	.timer = &eukrea_cpuimx27_timer,
 	.init_machine = eukrea_cpuimx27_init,
+	.restart	= mxc_restart,
 MACHINE_END
diff --git a/arch/arm/mach-imx/mach-cpuimx35.c b/arch/arm/mach-imx/mach-cpuimx35.c
index 66af2e8..8ecc872 100644
--- a/arch/arm/mach-imx/mach-cpuimx35.c
+++ b/arch/arm/mach-imx/mach-cpuimx35.c
@@ -53,12 +53,18 @@
 	.bitrate =		100000,
 };
 
+#define TSC2007_IRQGPIO		IMX_GPIO_NR(3, 2)
+static int tsc2007_get_pendown_state(void)
+{
+	return !gpio_get_value(TSC2007_IRQGPIO);
+}
+
 static struct tsc2007_platform_data tsc2007_info = {
 	.model			= 2007,
 	.x_plate_ohms		= 180,
+	.get_pendown_state = tsc2007_get_pendown_state,
 };
 
-#define TSC2007_IRQGPIO		IMX_GPIO_NR(3, 2)
 static struct i2c_board_info eukrea_cpuimx35_i2c_devices[] = {
 	{
 		I2C_BOARD_INFO("pcf8563", 0x51),
@@ -201,4 +207,5 @@
 	.handle_irq = imx35_handle_irq,
 	.timer = &eukrea_cpuimx35_timer,
 	.init_machine = eukrea_cpuimx35_init,
+	.restart	= mxc_restart,
 MACHINE_END
diff --git a/arch/arm/mach-imx/mach-eukrea_cpuimx25.c b/arch/arm/mach-imx/mach-eukrea_cpuimx25.c
index ab8fbcc..76a97a5 100644
--- a/arch/arm/mach-imx/mach-eukrea_cpuimx25.c
+++ b/arch/arm/mach-imx/mach-eukrea_cpuimx25.c
@@ -170,4 +170,5 @@
 	.handle_irq = imx25_handle_irq,
 	.timer = &eukrea_cpuimx25_timer,
 	.init_machine = eukrea_cpuimx25_init,
+	.restart	= mxc_restart,
 MACHINE_END
diff --git a/arch/arm/mach-imx/mach-imx27_visstrim_m10.c b/arch/arm/mach-imx/mach-imx27_visstrim_m10.c
index 38eb9e4..c2766ae 100644
--- a/arch/arm/mach-imx/mach-imx27_visstrim_m10.c
+++ b/arch/arm/mach-imx/mach-imx27_visstrim_m10.c
@@ -282,4 +282,5 @@
 	.handle_irq = imx27_handle_irq,
 	.timer = &visstrim_m10_timer,
 	.init_machine = visstrim_m10_board_init,
+	.restart	= mxc_restart,
 MACHINE_END
diff --git a/arch/arm/mach-imx/mach-imx27ipcam.c b/arch/arm/mach-imx/mach-imx27ipcam.c
index 7052155..c9d350c 100644
--- a/arch/arm/mach-imx/mach-imx27ipcam.c
+++ b/arch/arm/mach-imx/mach-imx27ipcam.c
@@ -78,4 +78,5 @@
 	.handle_irq = imx27_handle_irq,
 	.timer = &mx27ipcam_timer,
 	.init_machine = mx27ipcam_init,
+	.restart	= mxc_restart,
 MACHINE_END
diff --git a/arch/arm/mach-imx/mach-imx27lite.c b/arch/arm/mach-imx/mach-imx27lite.c
index 8d6a635..1f45b91 100644
--- a/arch/arm/mach-imx/mach-imx27lite.c
+++ b/arch/arm/mach-imx/mach-imx27lite.c
@@ -84,4 +84,5 @@
 	.handle_irq = imx27_handle_irq,
 	.timer = &mx27lite_timer,
 	.init_machine = mx27lite_init,
+	.restart	= mxc_restart,
 MACHINE_END
diff --git a/arch/arm/mach-imx/mach-imx6q.c b/arch/arm/mach-imx/mach-imx6q.c
index 8deb012..05b49bb 100644
--- a/arch/arm/mach-imx/mach-imx6q.c
+++ b/arch/arm/mach-imx/mach-imx6q.c
@@ -10,10 +10,13 @@
  * http://www.gnu.org/copyleft/gpl.html
  */
 
+#include <linux/delay.h>
 #include <linux/init.h>
+#include <linux/io.h>
 #include <linux/irq.h>
 #include <linux/irqdomain.h>
 #include <linux/of.h>
+#include <linux/of_address.h>
 #include <linux/of_irq.h>
 #include <linux/of_platform.h>
 #include <asm/hardware/cache-l2x0.h>
@@ -23,6 +26,36 @@
 #include <mach/common.h>
 #include <mach/hardware.h>
 
+void imx6q_restart(char mode, const char *cmd)
+{
+	struct device_node *np;
+	void __iomem *wdog_base;
+
+	np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-wdt");
+	wdog_base = of_iomap(np, 0);
+	if (!wdog_base)
+		goto soft;
+
+	imx_src_prepare_restart();
+
+	/* enable wdog */
+	writew_relaxed(1 << 2, wdog_base);
+	/* write twice to ensure the request will not get ignored */
+	writew_relaxed(1 << 2, wdog_base);
+
+	/* wait for reset to assert ... */
+	mdelay(500);
+
+	pr_err("Watchdog reset failed to assert reset\n");
+
+	/* delay to allow the serial port to show the message */
+	mdelay(50);
+
+soft:
+	/* we'll take a jump through zero as a poor second */
+	soft_restart(0);
+}
+
 static void __init imx6q_init_machine(void)
 {
 	of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
@@ -83,4 +116,5 @@
 	.timer		= &imx6q_timer,
 	.init_machine	= imx6q_init_machine,
 	.dt_compat	= imx6q_dt_compat,
+	.restart	= imx6q_restart,
 MACHINE_END
diff --git a/arch/arm/mach-imx/mach-kzm_arm11_01.c b/arch/arm/mach-imx/mach-kzm_arm11_01.c
index 5f37f89..fc78e80 100644
--- a/arch/arm/mach-imx/mach-kzm_arm11_01.c
+++ b/arch/arm/mach-imx/mach-kzm_arm11_01.c
@@ -279,4 +279,5 @@
 	.handle_irq = imx31_handle_irq,
 	.timer = &kzm_timer,
 	.init_machine = kzm_board_init,
+	.restart	= mxc_restart,
 MACHINE_END
diff --git a/arch/arm/mach-imx/mach-mx1ads.c b/arch/arm/mach-imx/mach-mx1ads.c
index fc49785..9704608 100644
--- a/arch/arm/mach-imx/mach-mx1ads.c
+++ b/arch/arm/mach-imx/mach-mx1ads.c
@@ -147,6 +147,7 @@
 	.handle_irq = imx1_handle_irq,
 	.timer = &mx1ads_timer,
 	.init_machine = mx1ads_init,
+	.restart	= mxc_restart,
 MACHINE_END
 
 MACHINE_START(MXLADS, "Freescale MXLADS")
@@ -157,4 +158,5 @@
 	.handle_irq = imx1_handle_irq,
 	.timer = &mx1ads_timer,
 	.init_machine = mx1ads_init,
+	.restart	= mxc_restart,
 MACHINE_END
diff --git a/arch/arm/mach-imx/mach-mx21ads.c b/arch/arm/mach-imx/mach-mx21ads.c
index 25f8402..8d9f955 100644
--- a/arch/arm/mach-imx/mach-mx21ads.c
+++ b/arch/arm/mach-imx/mach-mx21ads.c
@@ -312,4 +312,5 @@
 	.handle_irq = imx21_handle_irq,
 	.timer = &mx21ads_timer,
 	.init_machine = mx21ads_board_init,
+	.restart	= mxc_restart,
 MACHINE_END
diff --git a/arch/arm/mach-imx/mach-mx25_3ds.c b/arch/arm/mach-imx/mach-mx25_3ds.c
index 88dccf1..f267342 100644
--- a/arch/arm/mach-imx/mach-mx25_3ds.c
+++ b/arch/arm/mach-imx/mach-mx25_3ds.c
@@ -270,4 +270,5 @@
 	.handle_irq = imx25_handle_irq,
 	.timer = &mx25pdk_timer,
 	.init_machine = mx25pdk_init,
+	.restart	= mxc_restart,
 MACHINE_END
diff --git a/arch/arm/mach-imx/mach-mx27_3ds.c b/arch/arm/mach-imx/mach-mx27_3ds.c
index ba232d7..18f3581 100644
--- a/arch/arm/mach-imx/mach-mx27_3ds.c
+++ b/arch/arm/mach-imx/mach-mx27_3ds.c
@@ -425,4 +425,5 @@
 	.handle_irq = imx27_handle_irq,
 	.timer = &mx27pdk_timer,
 	.init_machine = mx27pdk_init,
+	.restart	= mxc_restart,
 MACHINE_END
diff --git a/arch/arm/mach-imx/mach-mx27ads.c b/arch/arm/mach-imx/mach-mx27ads.c
index 74dd573..0228d2e 100644
--- a/arch/arm/mach-imx/mach-mx27ads.c
+++ b/arch/arm/mach-imx/mach-mx27ads.c
@@ -351,4 +351,5 @@
 	.handle_irq = imx27_handle_irq,
 	.timer = &mx27ads_timer,
 	.init_machine = mx27ads_board_init,
+	.restart	= mxc_restart,
 MACHINE_END
diff --git a/arch/arm/mach-imx/mach-mx31_3ds.c b/arch/arm/mach-imx/mach-mx31_3ds.c
index b8c54b8..2b565c3 100644
--- a/arch/arm/mach-imx/mach-mx31_3ds.c
+++ b/arch/arm/mach-imx/mach-mx31_3ds.c
@@ -770,4 +770,5 @@
 	.timer = &mx31_3ds_timer,
 	.init_machine = mx31_3ds_init,
 	.reserve = mx31_3ds_reserve,
+	.restart	= mxc_restart,
 MACHINE_END
diff --git a/arch/arm/mach-imx/mach-mx31ads.c b/arch/arm/mach-imx/mach-mx31ads.c
index 9cc1a49..4917aab 100644
--- a/arch/arm/mach-imx/mach-mx31ads.c
+++ b/arch/arm/mach-imx/mach-mx31ads.c
@@ -542,4 +542,5 @@
 	.handle_irq = imx31_handle_irq,
 	.timer = &mx31ads_timer,
 	.init_machine = mx31ads_init,
+	.restart	= mxc_restart,
 MACHINE_END
diff --git a/arch/arm/mach-imx/mach-mx31lilly.c b/arch/arm/mach-imx/mach-mx31lilly.c
index 102ec99..02401bb 100644
--- a/arch/arm/mach-imx/mach-mx31lilly.c
+++ b/arch/arm/mach-imx/mach-mx31lilly.c
@@ -303,4 +303,5 @@
 	.handle_irq = imx31_handle_irq,
 	.timer = &mx31lilly_timer,
 	.init_machine = mx31lilly_board_init,
+	.restart	= mxc_restart,
 MACHINE_END
diff --git a/arch/arm/mach-imx/mach-mx31lite.c b/arch/arm/mach-imx/mach-mx31lite.c
index 5366d2d..ef80751 100644
--- a/arch/arm/mach-imx/mach-mx31lite.c
+++ b/arch/arm/mach-imx/mach-mx31lite.c
@@ -287,4 +287,5 @@
 	.handle_irq = imx31_handle_irq,
 	.timer = &mx31lite_timer,
 	.init_machine = mx31lite_init,
+	.restart	= mxc_restart,
 MACHINE_END
diff --git a/arch/arm/mach-imx/mach-mx31moboard.c b/arch/arm/mach-imx/mach-mx31moboard.c
index 9326915..b95981d 100644
--- a/arch/arm/mach-imx/mach-mx31moboard.c
+++ b/arch/arm/mach-imx/mach-mx31moboard.c
@@ -600,4 +600,5 @@
 	.handle_irq = imx31_handle_irq,
 	.timer = &mx31moboard_timer,
 	.init_machine = mx31moboard_init,
+	.restart	= mxc_restart,
 MACHINE_END
diff --git a/arch/arm/mach-imx/mach-mx35_3ds.c b/arch/arm/mach-imx/mach-mx35_3ds.c
index 7a46202..0af6c9c 100644
--- a/arch/arm/mach-imx/mach-mx35_3ds.c
+++ b/arch/arm/mach-imx/mach-mx35_3ds.c
@@ -224,4 +224,5 @@
 	.handle_irq = imx35_handle_irq,
 	.timer = &mx35pdk_timer,
 	.init_machine = mx35_3ds_init,
+	.restart	= mxc_restart,
 MACHINE_END
diff --git a/arch/arm/mach-imx/mach-mxt_td60.c b/arch/arm/mach-imx/mach-mxt_td60.c
index 125c196..8b3d3f0 100644
--- a/arch/arm/mach-imx/mach-mxt_td60.c
+++ b/arch/arm/mach-imx/mach-mxt_td60.c
@@ -274,4 +274,5 @@
 	.handle_irq = imx27_handle_irq,
 	.timer = &mxt_td60_timer,
 	.init_machine = mxt_td60_board_init,
+	.restart	= mxc_restart,
 MACHINE_END
diff --git a/arch/arm/mach-imx/mach-pca100.c b/arch/arm/mach-imx/mach-pca100.c
index 26072f4..d3b9c6b 100644
--- a/arch/arm/mach-imx/mach-pca100.c
+++ b/arch/arm/mach-imx/mach-pca100.c
@@ -442,4 +442,5 @@
 	.handle_irq = imx27_handle_irq,
 	.init_machine = pca100_init,
 	.timer = &pca100_timer,
+	.restart	= mxc_restart,
 MACHINE_END
diff --git a/arch/arm/mach-imx/mach-pcm037.c b/arch/arm/mach-imx/mach-pcm037.c
index efd6b53..d7e1516 100644
--- a/arch/arm/mach-imx/mach-pcm037.c
+++ b/arch/arm/mach-imx/mach-pcm037.c
@@ -696,4 +696,5 @@
 	.handle_irq = imx31_handle_irq,
 	.timer = &pcm037_timer,
 	.init_machine = pcm037_init,
+	.restart	= mxc_restart,
 MACHINE_END
diff --git a/arch/arm/mach-imx/mach-pcm038.c b/arch/arm/mach-imx/mach-pcm038.c
index a17e9c7..16f126d 100644
--- a/arch/arm/mach-imx/mach-pcm038.c
+++ b/arch/arm/mach-imx/mach-pcm038.c
@@ -357,4 +357,5 @@
 	.handle_irq = imx27_handle_irq,
 	.timer = &pcm038_timer,
 	.init_machine = pcm038_init,
+	.restart	= mxc_restart,
 MACHINE_END
diff --git a/arch/arm/mach-imx/mach-pcm043.c b/arch/arm/mach-imx/mach-pcm043.c
index 7366c2a..06dc106 100644
--- a/arch/arm/mach-imx/mach-pcm043.c
+++ b/arch/arm/mach-imx/mach-pcm043.c
@@ -425,4 +425,5 @@
 	.handle_irq = imx35_handle_irq,
 	.timer = &pcm043_timer,
 	.init_machine = pcm043_init,
+	.restart	= mxc_restart,
 MACHINE_END
diff --git a/arch/arm/mach-imx/mach-qong.c b/arch/arm/mach-imx/mach-qong.c
index 4ff5faf..2606210 100644
--- a/arch/arm/mach-imx/mach-qong.c
+++ b/arch/arm/mach-imx/mach-qong.c
@@ -273,4 +273,5 @@
 	.handle_irq = imx31_handle_irq,
 	.timer = &qong_timer,
 	.init_machine = qong_init,
+	.restart	= mxc_restart,
 MACHINE_END
diff --git a/arch/arm/mach-imx/mach-scb9328.c b/arch/arm/mach-imx/mach-scb9328.c
index bb6e5b2..cb9ceae 100644
--- a/arch/arm/mach-imx/mach-scb9328.c
+++ b/arch/arm/mach-imx/mach-scb9328.c
@@ -144,4 +144,5 @@
 	.handle_irq = imx1_handle_irq,
 	.timer = &scb9328_timer,
 	.init_machine = scb9328_init,
+	.restart	= mxc_restart,
 MACHINE_END
diff --git a/arch/arm/mach-imx/mach-vpr200.c b/arch/arm/mach-imx/mach-vpr200.c
index 6909245..033257e 100644
--- a/arch/arm/mach-imx/mach-vpr200.c
+++ b/arch/arm/mach-imx/mach-vpr200.c
@@ -322,4 +322,5 @@
 	.handle_irq = imx35_handle_irq,
 	.timer = &vpr200_timer,
 	.init_machine = vpr200_board_init,
+	.restart	= mxc_restart,
 MACHINE_END
diff --git a/arch/arm/mach-imx/src.c b/arch/arm/mach-imx/src.c
index a8e3368..4bde04f 100644
--- a/arch/arm/mach-imx/src.c
+++ b/arch/arm/mach-imx/src.c
@@ -19,6 +19,7 @@
 
 #define SRC_SCR				0x000
 #define SRC_GPR1			0x020
+#define BP_SRC_SCR_WARM_RESET_ENABLE	0
 #define BP_SRC_SCR_CORE1_RST		14
 #define BP_SRC_SCR_CORE1_ENABLE		22
 
@@ -46,11 +47,33 @@
 		       src_base + SRC_GPR1 + cpu * 8);
 }
 
+void imx_src_prepare_restart(void)
+{
+	u32 val;
+
+	/* clear enable bits of secondary cores */
+	val = readl_relaxed(src_base + SRC_SCR);
+	val &= ~(0x7 << BP_SRC_SCR_CORE1_ENABLE);
+	writel_relaxed(val, src_base + SRC_SCR);
+
+	/* clear persistent entry register of primary core */
+	writel_relaxed(0, src_base + SRC_GPR1);
+}
+
 void __init imx_src_init(void)
 {
 	struct device_node *np;
+	u32 val;
 
 	np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-src");
 	src_base = of_iomap(np, 0);
 	WARN_ON(!src_base);
+
+	/*
+	 * force warm reset sources to generate cold reset
+	 * for a more reliable restart
+	 */
+	val = readl_relaxed(src_base + SRC_SCR);
+	val &= ~(1 << BP_SRC_SCR_WARM_RESET_ENABLE);
+	writel_relaxed(val, src_base + SRC_SCR);
 }
diff --git a/arch/arm/mach-integrator/Kconfig b/arch/arm/mach-integrator/Kconfig
index dfd18f3..350e266 100644
--- a/arch/arm/mach-integrator/Kconfig
+++ b/arch/arm/mach-integrator/Kconfig
@@ -6,6 +6,8 @@
 	bool "Support Integrator/AP and Integrator/PP2 platforms"
 	select CLKSRC_MMIO
 	select MIGHT_HAVE_PCI
+	select SERIAL_AMBA_PL010
+	select SERIAL_AMBA_PL010_CONSOLE
 	help
 	  Include support for the ARM(R) Integrator/AP and
 	  Integrator/PP2 platforms.
@@ -15,6 +17,8 @@
 	select ARCH_CINTEGRATOR
 	select ARM_TIMER_SP804
 	select PLAT_VERSATILE_CLCD
+	select SERIAL_AMBA_PL011
+	select SERIAL_AMBA_PL011_CONSOLE
 	help
 	  Include support for the ARM(R) Integrator CP platform.
 
diff --git a/arch/arm/mach-integrator/common.h b/arch/arm/mach-integrator/common.h
index a08f9b0..899561d 100644
--- a/arch/arm/mach-integrator/common.h
+++ b/arch/arm/mach-integrator/common.h
@@ -1,2 +1,3 @@
 void integrator_init_early(void);
 void integrator_reserve(void);
+void integrator_restart(char, const char *);
diff --git a/arch/arm/mach-integrator/core.c b/arch/arm/mach-integrator/core.c
index 4b38e13..019f0ab 100644
--- a/arch/arm/mach-integrator/core.c
+++ b/arch/arm/mach-integrator/core.c
@@ -29,6 +29,7 @@
 #include <mach/cm.h>
 #include <asm/system.h>
 #include <asm/leds.h>
+#include <asm/mach-types.h>
 #include <asm/mach/time.h>
 #include <asm/pgtable.h>
 
@@ -44,7 +45,6 @@
 		.flags	= IORESOURCE_MEM,
 	},
 	.irq		= { IRQ_RTCINT, NO_IRQ },
-	.periphid	= 0x00041030,
 };
 
 static struct amba_device uart0_device = {
@@ -58,7 +58,6 @@
 		.flags	= IORESOURCE_MEM,
 	},
 	.irq		= { IRQ_UARTINT0, NO_IRQ },
-	.periphid	= 0x0041010,
 };
 
 static struct amba_device uart1_device = {
@@ -72,7 +71,6 @@
 		.flags	= IORESOURCE_MEM,
 	},
 	.irq		= { IRQ_UARTINT1, NO_IRQ },
-	.periphid	= 0x0041010,
 };
 
 static struct amba_device kmi0_device = {
@@ -85,7 +83,6 @@
 		.flags	= IORESOURCE_MEM,
 	},
 	.irq		= { IRQ_KMIINT0, NO_IRQ },
-	.periphid	= 0x00041050,
 };
 
 static struct amba_device kmi1_device = {
@@ -98,7 +95,6 @@
 		.flags	= IORESOURCE_MEM,
 	},
 	.irq		= { IRQ_KMIINT1, NO_IRQ },
-	.periphid	= 0x00041050,
 };
 
 static struct amba_device *amba_devs[] __initdata = {
@@ -157,6 +153,19 @@
 {
 	int i;
 
+	/*
+	 * The Integrator/AP lacks necessary AMBA PrimeCell IDs, so we need to
+	 * hard-code them. The Integator/CP and forward have proper cell IDs.
+	 * Else we leave them undefined to the bus driver can autoprobe them.
+	 */
+	if (machine_is_integrator()) {
+		rtc_device.periphid	= 0x00041030;
+		uart0_device.periphid	= 0x00041010;
+		uart1_device.periphid	= 0x00041010;
+		kmi0_device.periphid	= 0x00041050;
+		kmi1_device.periphid	= 0x00041050;
+	}
+
 	for (i = 0; i < ARRAY_SIZE(amba_devs); i++) {
 		struct amba_device *d = amba_devs[i];
 		amba_device_register(d, &iomem_resource);
@@ -238,3 +247,11 @@
 {
 	memblock_reserve(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET);
 }
+
+/*
+ * To reset, we hit the on-board reset register in the system FPGA
+ */
+void integrator_restart(char mode, const char *cmd)
+{
+	cm_control(CM_CTRL_RESET, CM_CTRL_RESET);
+}
diff --git a/arch/arm/mach-integrator/include/mach/system.h b/arch/arm/mach-integrator/include/mach/system.h
index e1551b8..901514e 100644
--- a/arch/arm/mach-integrator/include/mach/system.h
+++ b/arch/arm/mach-integrator/include/mach/system.h
@@ -21,8 +21,6 @@
 #ifndef __ASM_ARCH_SYSTEM_H
 #define __ASM_ARCH_SYSTEM_H
 
-#include <mach/cm.h>
-
 static inline void arch_idle(void)
 {
 	/*
@@ -32,13 +30,4 @@
 	cpu_do_idle();
 }
 
-static inline void arch_reset(char mode, const char *cmd)
-{
-	/*
-	 * To reset, we hit the on-board reset register
-	 * in the system FPGA
-	 */
-	cm_control(CM_CTRL_RESET, CM_CTRL_RESET);
-}
-
 #endif
diff --git a/arch/arm/mach-integrator/include/mach/vmalloc.h b/arch/arm/mach-integrator/include/mach/vmalloc.h
deleted file mode 100644
index 2f5a2baf..0000000
--- a/arch/arm/mach-integrator/include/mach/vmalloc.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- *  arch/arm/mach-integrator/include/mach/vmalloc.h
- *
- *  Copyright (C) 2000 Russell King.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- */
-#define VMALLOC_END       0xd0000000UL
diff --git a/arch/arm/mach-integrator/integrator_ap.c b/arch/arm/mach-integrator/integrator_ap.c
index a1769f3..21a1d6c 100644
--- a/arch/arm/mach-integrator/integrator_ap.c
+++ b/arch/arm/mach-integrator/integrator_ap.c
@@ -472,4 +472,5 @@
 	.init_irq	= ap_init_irq,
 	.timer		= &ap_timer,
 	.init_machine	= ap_init,
+	.restart	= integrator_restart,
 MACHINE_END
diff --git a/arch/arm/mach-integrator/integrator_cp.c b/arch/arm/mach-integrator/integrator_cp.c
index 5de49c3..a8b6aa6 100644
--- a/arch/arm/mach-integrator/integrator_cp.c
+++ b/arch/arm/mach-integrator/integrator_cp.c
@@ -14,7 +14,7 @@
 #include <linux/platform_device.h>
 #include <linux/dma-mapping.h>
 #include <linux/string.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/amba/bus.h>
 #include <linux/amba/kmi.h>
 #include <linux/amba/clcd.h>
@@ -499,4 +499,5 @@
 	.init_irq	= intcp_init_irq,
 	.timer		= &cp_timer,
 	.init_machine	= intcp_init,
+	.restart	= integrator_restart,
 MACHINE_END
diff --git a/arch/arm/mach-iop13xx/include/mach/iop13xx.h b/arch/arm/mach-iop13xx/include/mach/iop13xx.h
index 52b7fab..07e9ff7 100644
--- a/arch/arm/mach-iop13xx/include/mach/iop13xx.h
+++ b/arch/arm/mach-iop13xx/include/mach/iop13xx.h
@@ -10,6 +10,7 @@
 void iop13xx_platform_init(void);
 void iop13xx_add_tpmi_devices(void);
 void iop13xx_init_irq(void);
+void iop13xx_restart(char, const char *);
 
 /* CPUID CP6 R0 Page 0 */
 static inline int iop13xx_cpu_id(void)
diff --git a/arch/arm/mach-iop13xx/include/mach/system.h b/arch/arm/mach-iop13xx/include/mach/system.h
index d0c66ef..1f31ed3 100644
--- a/arch/arm/mach-iop13xx/include/mach/system.h
+++ b/arch/arm/mach-iop13xx/include/mach/system.h
@@ -7,21 +7,7 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
-#include <mach/iop13xx.h>
 static inline void arch_idle(void)
 {
 	cpu_do_idle();
 }
-
-static inline void arch_reset(char mode, const char *cmd)
-{
-	/*
-	 * Reset the internal bus (warning both cores are reset)
-	 */
-	write_wdtcr(IOP_WDTCR_EN_ARM);
-	write_wdtcr(IOP_WDTCR_EN);
-	write_wdtsr(IOP13XX_WDTSR_WRITE_EN | IOP13XX_WDTCR_IB_RESET);
-	write_wdtcr(0x1000);
-
-	for(;;);
-}
diff --git a/arch/arm/mach-iop13xx/include/mach/vmalloc.h b/arch/arm/mach-iop13xx/include/mach/vmalloc.h
deleted file mode 100644
index c534567..0000000
--- a/arch/arm/mach-iop13xx/include/mach/vmalloc.h
+++ /dev/null
@@ -1,4 +0,0 @@
-#ifndef _VMALLOC_H_
-#define _VMALLOC_H_
-#define VMALLOC_END 	0xfa000000UL
-#endif
diff --git a/arch/arm/mach-iop13xx/iq81340mc.c b/arch/arm/mach-iop13xx/iq81340mc.c
index 4cf2cc4..abaee88 100644
--- a/arch/arm/mach-iop13xx/iq81340mc.c
+++ b/arch/arm/mach-iop13xx/iq81340mc.c
@@ -96,4 +96,5 @@
 	.init_irq       = iop13xx_init_irq,
 	.timer          = &iq81340mc_timer,
 	.init_machine   = iq81340mc_init,
+	.restart	= iop13xx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-iop13xx/iq81340sc.c b/arch/arm/mach-iop13xx/iq81340sc.c
index cd9e274..690916a 100644
--- a/arch/arm/mach-iop13xx/iq81340sc.c
+++ b/arch/arm/mach-iop13xx/iq81340sc.c
@@ -98,4 +98,5 @@
 	.init_irq       = iop13xx_init_irq,
 	.timer          = &iq81340sc_timer,
 	.init_machine   = iq81340sc_init,
+	.restart	= iop13xx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-iop13xx/setup.c b/arch/arm/mach-iop13xx/setup.c
index a5b9897..daabb1f 100644
--- a/arch/arm/mach-iop13xx/setup.c
+++ b/arch/arm/mach-iop13xx/setup.c
@@ -606,3 +606,14 @@
 __setup("iop13xx_init_adma", iop13xx_init_adma_setup);
 __setup("iop13xx_init_uart", iop13xx_init_uart_setup);
 __setup("iop13xx_init_i2c", iop13xx_init_i2c_setup);
+
+void iop13xx_restart(char mode, const char *cmd)
+{
+	/*
+	 * Reset the internal bus (warning both cores are reset)
+	 */
+	write_wdtcr(IOP_WDTCR_EN_ARM);
+	write_wdtcr(IOP_WDTCR_EN);
+	write_wdtsr(IOP13XX_WDTSR_WRITE_EN | IOP13XX_WDTCR_IB_RESET);
+	write_wdtcr(0x1000);
+}
diff --git a/arch/arm/mach-iop32x/em7210.c b/arch/arm/mach-iop32x/em7210.c
index 4325055..24069e0 100644
--- a/arch/arm/mach-iop32x/em7210.c
+++ b/arch/arm/mach-iop32x/em7210.c
@@ -208,4 +208,5 @@
 	.init_irq	= iop32x_init_irq,
 	.timer		= &em7210_timer,
 	.init_machine	= em7210_init_machine,
+	.restart	= iop3xx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-iop32x/glantank.c b/arch/arm/mach-iop32x/glantank.c
index 0edc880..204e1d1 100644
--- a/arch/arm/mach-iop32x/glantank.c
+++ b/arch/arm/mach-iop32x/glantank.c
@@ -212,4 +212,5 @@
 	.init_irq	= iop32x_init_irq,
 	.timer		= &glantank_timer,
 	.init_machine	= glantank_init_machine,
+	.restart	= iop3xx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-iop32x/include/mach/io.h b/arch/arm/mach-iop32x/include/mach/io.h
index 059c783..2d88264 100644
--- a/arch/arm/mach-iop32x/include/mach/io.h
+++ b/arch/arm/mach-iop32x/include/mach/io.h
@@ -13,15 +13,8 @@
 
 #include <asm/hardware/iop3xx.h>
 
-extern void __iomem *__iop3xx_ioremap(unsigned long cookie, size_t size,
-	unsigned int mtype);
-extern void __iop3xx_iounmap(void __iomem *addr);
-
 #define IO_SPACE_LIMIT		0xffffffff
 #define __io(p)		((void __iomem *)IOP3XX_PCI_IO_PHYS_TO_VIRT(p))
 #define __mem_pci(a)		(a)
 
-#define __arch_ioremap	__iop3xx_ioremap
-#define __arch_iounmap	__iop3xx_iounmap
-
 #endif
diff --git a/arch/arm/mach-iop32x/include/mach/system.h b/arch/arm/mach-iop32x/include/mach/system.h
index a4b808f..4a88727 100644
--- a/arch/arm/mach-iop32x/include/mach/system.h
+++ b/arch/arm/mach-iop32x/include/mach/system.h
@@ -7,28 +7,7 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
-#include <asm/mach-types.h>
-#include <asm/hardware/iop3xx.h>
-#include <mach/n2100.h>
-
 static inline void arch_idle(void)
 {
 	cpu_do_idle();
 }
-
-static inline void arch_reset(char mode, const char *cmd)
-{
-	local_irq_disable();
-
-	if (machine_is_n2100()) {
-		gpio_line_set(N2100_HARDWARE_RESET, GPIO_LOW);
-		gpio_line_config(N2100_HARDWARE_RESET, GPIO_OUT);
-		while (1)
-			;
-	}
-
-	*IOP3XX_PCSR = 0x30;
-
-	/* Jump into ROM at address 0 */
-	cpu_reset(0);
-}
diff --git a/arch/arm/mach-iop32x/include/mach/vmalloc.h b/arch/arm/mach-iop32x/include/mach/vmalloc.h
deleted file mode 100644
index c4862d4..0000000
--- a/arch/arm/mach-iop32x/include/mach/vmalloc.h
+++ /dev/null
@@ -1,5 +0,0 @@
-/*
- * arch/arm/mach-iop32x/include/mach/vmalloc.h
- */
-
-#define VMALLOC_END	0xfe000000UL
diff --git a/arch/arm/mach-iop32x/iq31244.c b/arch/arm/mach-iop32x/iq31244.c
index 9e7aacc..3eb642a 100644
--- a/arch/arm/mach-iop32x/iq31244.c
+++ b/arch/arm/mach-iop32x/iq31244.c
@@ -318,6 +318,7 @@
 	.init_irq	= iop32x_init_irq,
 	.timer		= &iq31244_timer,
 	.init_machine	= iq31244_init_machine,
+	.restart	= iop3xx_restart,
 MACHINE_END
 
 /* There should have been an ep80219 machine identifier from the beginning.
@@ -332,4 +333,5 @@
 	.init_irq	= iop32x_init_irq,
 	.timer		= &iq31244_timer,
 	.init_machine	= iq31244_init_machine,
+	.restart	= iop3xx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-iop32x/iq80321.c b/arch/arm/mach-iop32x/iq80321.c
index 53ea86f..2ec724b 100644
--- a/arch/arm/mach-iop32x/iq80321.c
+++ b/arch/arm/mach-iop32x/iq80321.c
@@ -191,4 +191,5 @@
 	.init_irq	= iop32x_init_irq,
 	.timer		= &iq80321_timer,
 	.init_machine	= iq80321_init_machine,
+	.restart	= iop3xx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-iop32x/n2100.c b/arch/arm/mach-iop32x/n2100.c
index d726927..6b6d559 100644
--- a/arch/arm/mach-iop32x/n2100.c
+++ b/arch/arm/mach-iop32x/n2100.c
@@ -291,6 +291,14 @@
 		;
 }
 
+static void n2100_restart(char mode, const char *cmd)
+{
+	gpio_line_set(N2100_HARDWARE_RESET, GPIO_LOW);
+	gpio_line_config(N2100_HARDWARE_RESET, GPIO_OUT);
+	while (1)
+		;
+}
+
 
 static struct timer_list power_button_poll_timer;
 
@@ -332,4 +340,5 @@
 	.init_irq	= iop32x_init_irq,
 	.timer		= &n2100_timer,
 	.init_machine	= n2100_init_machine,
+	.restart	= n2100_restart,
 MACHINE_END
diff --git a/arch/arm/mach-iop33x/include/mach/io.h b/arch/arm/mach-iop33x/include/mach/io.h
index 39e893e..a8a66fc 100644
--- a/arch/arm/mach-iop33x/include/mach/io.h
+++ b/arch/arm/mach-iop33x/include/mach/io.h
@@ -13,15 +13,8 @@
 
 #include <asm/hardware/iop3xx.h>
 
-extern void __iomem *__iop3xx_ioremap(unsigned long cookie, size_t size,
-	unsigned int mtype);
-extern void __iop3xx_iounmap(void __iomem *addr);
-
 #define IO_SPACE_LIMIT		0xffffffff
 #define __io(p)		((void __iomem *)IOP3XX_PCI_IO_PHYS_TO_VIRT(p))
 #define __mem_pci(a)		(a)
 
-#define __arch_ioremap	__iop3xx_ioremap
-#define __arch_iounmap	__iop3xx_iounmap
-
 #endif
diff --git a/arch/arm/mach-iop33x/include/mach/system.h b/arch/arm/mach-iop33x/include/mach/system.h
index f192a34..4f98e76 100644
--- a/arch/arm/mach-iop33x/include/mach/system.h
+++ b/arch/arm/mach-iop33x/include/mach/system.h
@@ -7,17 +7,7 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
-#include <asm/hardware/iop3xx.h>
-
 static inline void arch_idle(void)
 {
 	cpu_do_idle();
 }
-
-static inline void arch_reset(char mode, const char *cmd)
-{
-	*IOP3XX_PCSR = 0x30;
-
-	/* Jump into ROM at address 0 */
-	cpu_reset(0);
-}
diff --git a/arch/arm/mach-iop33x/include/mach/vmalloc.h b/arch/arm/mach-iop33x/include/mach/vmalloc.h
deleted file mode 100644
index 48331dc..0000000
--- a/arch/arm/mach-iop33x/include/mach/vmalloc.h
+++ /dev/null
@@ -1,5 +0,0 @@
-/*
- * arch/arm/mach-iop33x/include/mach/vmalloc.h
- */
-
-#define VMALLOC_END	0xfe000000UL
diff --git a/arch/arm/mach-iop33x/iq80331.c b/arch/arm/mach-iop33x/iq80331.c
index 9e14ccc..abce934 100644
--- a/arch/arm/mach-iop33x/iq80331.c
+++ b/arch/arm/mach-iop33x/iq80331.c
@@ -146,4 +146,5 @@
 	.init_irq	= iop33x_init_irq,
 	.timer		= &iq80331_timer,
 	.init_machine	= iq80331_init_machine,
+	.restart	= iop3xx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-iop33x/iq80332.c b/arch/arm/mach-iop33x/iq80332.c
index 09c899a..7513559 100644
--- a/arch/arm/mach-iop33x/iq80332.c
+++ b/arch/arm/mach-iop33x/iq80332.c
@@ -146,4 +146,5 @@
 	.init_irq	= iop33x_init_irq,
 	.timer		= &iq80332_timer,
 	.init_machine	= iq80332_init_machine,
+	.restart	= iop3xx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-ixp2000/core.c b/arch/arm/mach-ixp2000/core.c
index 24f0fe3..81c4537 100644
--- a/arch/arm/mach-ixp2000/core.c
+++ b/arch/arm/mach-ixp2000/core.c
@@ -515,3 +515,7 @@
 	}
 }
 
+void ixp2000_restart(char mode, const char *cmd)
+{
+	ixp2000_reg_wrb(IXP2000_RESET0, RSTALL);
+}
diff --git a/arch/arm/mach-ixp2000/enp2611.c b/arch/arm/mach-ixp2000/enp2611.c
index af99945..ee52541 100644
--- a/arch/arm/mach-ixp2000/enp2611.c
+++ b/arch/arm/mach-ixp2000/enp2611.c
@@ -259,6 +259,7 @@
 	.init_irq	= ixp2000_init_irq,
 	.timer		= &enp2611_timer,
 	.init_machine	= enp2611_init_machine,
+	.restart	= ixp2000_restart,
 MACHINE_END
 
 
diff --git a/arch/arm/mach-ixp2000/include/mach/platform.h b/arch/arm/mach-ixp2000/include/mach/platform.h
index 42182c79..bb0f8dc 100644
--- a/arch/arm/mach-ixp2000/include/mach/platform.h
+++ b/arch/arm/mach-ixp2000/include/mach/platform.h
@@ -122,6 +122,7 @@
 void ixp2000_uart_init(void);
 void ixp2000_init_irq(void);
 void ixp2000_init_time(unsigned long);
+void ixp2000_restart(char, const char *);
 unsigned long ixp2000_gettimeoffset(void);
 
 struct pci_sys_data;
diff --git a/arch/arm/mach-ixp2000/include/mach/system.h b/arch/arm/mach-ixp2000/include/mach/system.h
index de37099..a7fb08b 100644
--- a/arch/arm/mach-ixp2000/include/mach/system.h
+++ b/arch/arm/mach-ixp2000/include/mach/system.h
@@ -8,42 +8,7 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
-
-#include <mach/hardware.h>
-#include <asm/mach-types.h>
-
 static inline void arch_idle(void)
 {
 	cpu_do_idle();
 }
-
-static inline void arch_reset(char mode, const char *cmd)
-{
-	local_irq_disable();
-
-	/*
-	 * Reset flash banking register so that we are pointing at
-	 * RedBoot bank.
-	 */
-	if (machine_is_ixdp2401()) {
-		ixp2000_reg_write(IXDP2X01_CPLD_FLASH_REG,
-					((0 >> IXDP2X01_FLASH_WINDOW_BITS)
-						| IXDP2X01_CPLD_FLASH_INTERN));
-		ixp2000_reg_wrb(IXDP2X01_CPLD_RESET_REG, 0xffffffff);
-	}
-
-	/*
-	 * On IXDP2801 we need to write this magic sequence to the CPLD
-	 * to cause a complete reset of the CPU and all external devices
-	 * and move the flash bank register back to 0.
-	 */
-	if (machine_is_ixdp2801() || machine_is_ixdp28x5()) {
-		unsigned long reset_reg = *IXDP2X01_CPLD_RESET_REG;
-
-		reset_reg = 0x55AA0000 | (reset_reg & 0x0000FFFF);
-		ixp2000_reg_write(IXDP2X01_CPLD_RESET_REG, reset_reg);
-		ixp2000_reg_wrb(IXDP2X01_CPLD_RESET_REG, 0x80000000);
-	}
-
-	ixp2000_reg_wrb(IXP2000_RESET0, RSTALL);
-}
diff --git a/arch/arm/mach-ixp2000/include/mach/vmalloc.h b/arch/arm/mach-ixp2000/include/mach/vmalloc.h
deleted file mode 100644
index 61c8dae..0000000
--- a/arch/arm/mach-ixp2000/include/mach/vmalloc.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * arch/arm/mach-ixp2000/include/mach/vmalloc.h
- *
- * Author: Naeem Afzal <naeem.m.afzal@intel.com>
- *
- * Copyright 2002 Intel Corp.
- *
- *  This program is free software; you can redistribute  it and/or modify it
- *  under  the terms of  the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the  License, or (at your
- *  option) any later version.
- *
- * Just any arbitrary offset to the start of the vmalloc VM area: the
- * current 8MB value just means that there will be a 8MB "hole" after the
- * physical memory until the kernel virtual memory starts.  That means that
- * any out-of-bounds memory accesses will hopefully be caught.
- * The vmalloc() routines leaves a hole of 4kB between each vmalloced
- * area for the same reason. ;)
- */
-#define VMALLOC_END	    0xfb000000UL
diff --git a/arch/arm/mach-ixp2000/ixdp2400.c b/arch/arm/mach-ixp2000/ixdp2400.c
index f7dfd97..f53e911 100644
--- a/arch/arm/mach-ixp2000/ixdp2400.c
+++ b/arch/arm/mach-ixp2000/ixdp2400.c
@@ -176,5 +176,6 @@
 	.init_irq	= ixdp2400_init_irq,
 	.timer		= &ixdp2400_timer,
 	.init_machine	= ixdp2x00_init_machine,
+	.restart	= ixp2000_restart,
 MACHINE_END
 
diff --git a/arch/arm/mach-ixp2000/ixdp2800.c b/arch/arm/mach-ixp2000/ixdp2800.c
index d33bcac..a2e7c39 100644
--- a/arch/arm/mach-ixp2000/ixdp2800.c
+++ b/arch/arm/mach-ixp2000/ixdp2800.c
@@ -291,5 +291,6 @@
 	.init_irq	= ixdp2800_init_irq,
 	.timer		= &ixdp2800_timer,
 	.init_machine	= ixdp2x00_init_machine,
+	.restart	= ixp2000_restart,
 MACHINE_END
 
diff --git a/arch/arm/mach-ixp2000/ixdp2x01.c b/arch/arm/mach-ixp2000/ixdp2x01.c
index 61a2867..7632bea 100644
--- a/arch/arm/mach-ixp2000/ixdp2x01.c
+++ b/arch/arm/mach-ixp2000/ixdp2x01.c
@@ -413,6 +413,35 @@
 	ixdp2x01_uart_init();
 }
 
+static void ixdp2401_restart(char mode, const char *cmd)
+{
+	/*
+	 * Reset flash banking register so that we are pointing at
+	 * RedBoot bank.
+	 */
+	ixp2000_reg_write(IXDP2X01_CPLD_FLASH_REG,
+				((0 >> IXDP2X01_FLASH_WINDOW_BITS)
+					| IXDP2X01_CPLD_FLASH_INTERN));
+	ixp2000_reg_wrb(IXDP2X01_CPLD_RESET_REG, 0xffffffff);
+
+	ixp2000_restart(mode, cmd);
+}
+
+static void ixdp280x_restart(char mode, const char *cmd)
+{
+	/*
+	 * On IXDP2801 we need to write this magic sequence to the CPLD
+	 * to cause a complete reset of the CPU and all external devices
+	 * and move the flash bank register back to 0.
+	 */
+	unsigned long reset_reg = *IXDP2X01_CPLD_RESET_REG;
+
+	reset_reg = 0x55AA0000 | (reset_reg & 0x0000FFFF);
+	ixp2000_reg_write(IXDP2X01_CPLD_RESET_REG, reset_reg);
+	ixp2000_reg_wrb(IXDP2X01_CPLD_RESET_REG, 0x80000000);
+
+	ixp2000_restart(mode, cmd);
+}
 
 #ifdef CONFIG_ARCH_IXDP2401
 MACHINE_START(IXDP2401, "Intel IXDP2401 Development Platform")
@@ -422,6 +451,7 @@
 	.init_irq	= ixdp2x01_init_irq,
 	.timer		= &ixdp2x01_timer,
 	.init_machine	= ixdp2x01_init_machine,
+	.restart	= ixdp2401_restart,
 MACHINE_END
 #endif
 
@@ -433,6 +463,7 @@
 	.init_irq	= ixdp2x01_init_irq,
 	.timer		= &ixdp2x01_timer,
 	.init_machine	= ixdp2x01_init_machine,
+	.restart	= ixdp280x_restart,
 MACHINE_END
 
 /*
@@ -446,6 +477,7 @@
 	.init_irq	= ixdp2x01_init_irq,
 	.timer		= &ixdp2x01_timer,
 	.init_machine	= ixdp2x01_init_machine,
+	.restart	= ixdp280x_restart,
 MACHINE_END
 #endif
 
diff --git a/arch/arm/mach-ixp23xx/core.c b/arch/arm/mach-ixp23xx/core.c
index a1bee33..0923bb9 100644
--- a/arch/arm/mach-ixp23xx/core.c
+++ b/arch/arm/mach-ixp23xx/core.c
@@ -444,3 +444,9 @@
 	*IXP23XX_EXP_UNIT_FUSE |= 0xf;
 	platform_add_devices(ixp23xx_devices, ARRAY_SIZE(ixp23xx_devices));
 }
+
+void ixp23xx_restart(char mode, const char *cmd)
+{
+	/* Use on-chip reset capability */
+	*IXP23XX_RESET0 |= IXP23XX_RST_ALL;
+}
diff --git a/arch/arm/mach-ixp23xx/espresso.c b/arch/arm/mach-ixp23xx/espresso.c
index 30dd316..8f2487e 100644
--- a/arch/arm/mach-ixp23xx/espresso.c
+++ b/arch/arm/mach-ixp23xx/espresso.c
@@ -90,4 +90,5 @@
 	.timer		= &ixp23xx_timer,
 	.atag_offset	= 0x100,
 	.init_machine	= espresso_init,
+	.restart	= ixp23xx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-ixp23xx/include/mach/io.h b/arch/arm/mach-ixp23xx/include/mach/io.h
index a1749d0..4ce4353 100644
--- a/arch/arm/mach-ixp23xx/include/mach/io.h
+++ b/arch/arm/mach-ixp23xx/include/mach/io.h
@@ -20,33 +20,4 @@
 #define __io(p)		((void __iomem*)((p) + IXP23XX_PCI_IO_VIRT))
 #define __mem_pci(a)	(a)
 
-static inline void __iomem *
-ixp23xx_ioremap(unsigned long addr, unsigned long size, unsigned int mtype)
-{
-	if (addr >= IXP23XX_PCI_MEM_START &&
-		addr <= IXP23XX_PCI_MEM_START + IXP23XX_PCI_MEM_SIZE) {
-		if (addr + size > IXP23XX_PCI_MEM_START + IXP23XX_PCI_MEM_SIZE)
-			return NULL;
-
-		return (void __iomem *)
- 			((addr - IXP23XX_PCI_MEM_START) + IXP23XX_PCI_MEM_VIRT);
-	}
-
-	return __arm_ioremap(addr, size, mtype);
-}
-
-static inline void
-ixp23xx_iounmap(void __iomem *addr)
-{
-	if ((((u32)addr) >= IXP23XX_PCI_MEM_VIRT) &&
-	    (((u32)addr) < IXP23XX_PCI_MEM_VIRT + IXP23XX_PCI_MEM_SIZE))
-		return;
-
-	__iounmap(addr);
-}
-
-#define __arch_ioremap	ixp23xx_ioremap
-#define __arch_iounmap	ixp23xx_iounmap
-
-
 #endif
diff --git a/arch/arm/mach-ixp23xx/include/mach/platform.h b/arch/arm/mach-ixp23xx/include/mach/platform.h
index db9d941..50de558 100644
--- a/arch/arm/mach-ixp23xx/include/mach/platform.h
+++ b/arch/arm/mach-ixp23xx/include/mach/platform.h
@@ -34,6 +34,7 @@
 void ixp23xx_map_io(void);
 void ixp23xx_init_irq(void);
 void ixp23xx_sys_init(void);
+void ixp23xx_restart(char, const char *);
 int ixp23xx_pci_setup(int, struct pci_sys_data *);
 void ixp23xx_pci_preinit(void);
 struct pci_bus *ixp23xx_pci_scan_bus(int, struct pci_sys_data*);
diff --git a/arch/arm/mach-ixp23xx/include/mach/system.h b/arch/arm/mach-ixp23xx/include/mach/system.h
index 8920ff2..277dda7 100644
--- a/arch/arm/mach-ixp23xx/include/mach/system.h
+++ b/arch/arm/mach-ixp23xx/include/mach/system.h
@@ -7,10 +7,6 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
-
-#include <mach/hardware.h>
-#include <asm/mach-types.h>
-
 static inline void arch_idle(void)
 {
 #if 0
@@ -18,16 +14,3 @@
 		cpu_do_idle();
 #endif
 }
-
-static inline void arch_reset(char mode, const char *cmd)
-{
-	/* First try machine specific support */
-	if (machine_is_ixdp2351()) {
-		*IXDP2351_CPLD_RESET1_REG = IXDP2351_CPLD_RESET1_MAGIC;
-		(void) *IXDP2351_CPLD_RESET1_REG;
-		*IXDP2351_CPLD_RESET1_REG = IXDP2351_CPLD_RESET1_ENABLE;
-	}
-
-	/* Use on-chip reset capability */
-	*IXP23XX_RESET0 |= IXP23XX_RST_ALL;
-}
diff --git a/arch/arm/mach-ixp23xx/include/mach/vmalloc.h b/arch/arm/mach-ixp23xx/include/mach/vmalloc.h
deleted file mode 100644
index 896c56a..0000000
--- a/arch/arm/mach-ixp23xx/include/mach/vmalloc.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/*
- * arch/arm/mach-ixp23xx/include/mach/vmalloc.h
- *
- * Copyright (c) 2005 MontaVista Software, Inc.
- *
- * NPU mappings end at 0xf0000000 and we allocate 64MB for board
- * specific static I/O.
- */
-
-#define VMALLOC_END	(0xec000000UL)
diff --git a/arch/arm/mach-ixp23xx/ixdp2351.c b/arch/arm/mach-ixp23xx/ixdp2351.c
index b3a57e0..5d5dd3e 100644
--- a/arch/arm/mach-ixp23xx/ixdp2351.c
+++ b/arch/arm/mach-ixp23xx/ixdp2351.c
@@ -326,6 +326,17 @@
 	ixp23xx_sys_init();
 }
 
+static void ixdp2351_restart(char mode, const char *cmd)
+{
+	/* First try machine specific support */
+
+	*IXDP2351_CPLD_RESET1_REG = IXDP2351_CPLD_RESET1_MAGIC;
+	(void) *IXDP2351_CPLD_RESET1_REG;
+	*IXDP2351_CPLD_RESET1_REG = IXDP2351_CPLD_RESET1_ENABLE;
+
+	ixp23xx_restart(mode, cmd);
+}
+
 MACHINE_START(IXDP2351, "Intel IXDP2351 Development Platform")
 	/* Maintainer: MontaVista Software, Inc. */
 	.map_io		= ixdp2351_map_io,
@@ -333,4 +344,5 @@
 	.timer		= &ixp23xx_timer,
 	.atag_offset	= 0x100,
 	.init_machine	= ixdp2351_init,
+	.restart	= ixdp2351_restart,
 MACHINE_END
diff --git a/arch/arm/mach-ixp23xx/roadrunner.c b/arch/arm/mach-ixp23xx/roadrunner.c
index 8f4dcbb..377283f 100644
--- a/arch/arm/mach-ixp23xx/roadrunner.c
+++ b/arch/arm/mach-ixp23xx/roadrunner.c
@@ -177,4 +177,5 @@
 	.timer		= &ixp23xx_timer,
 	.atag_offset	= 0x100,
 	.init_machine	= roadrunner_init,
+	.restart	= ixp23xx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-ixp4xx/avila-setup.c b/arch/arm/mach-ixp4xx/avila-setup.c
index 37609a2..a7277ad 100644
--- a/arch/arm/mach-ixp4xx/avila-setup.c
+++ b/arch/arm/mach-ixp4xx/avila-setup.c
@@ -172,6 +172,7 @@
 #if defined(CONFIG_PCI)
 	.dma_zone_size	= SZ_64M,
 #endif
+	.restart	= ixp4xx_restart,
 MACHINE_END
 
  /*
@@ -190,6 +191,7 @@
 #if defined(CONFIG_PCI)
 	.dma_zone_size	= SZ_64M,
 #endif
+	.restart	= ixp4xx_restart,
 MACHINE_END
 #endif
 
diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c
index b86a005..3841ab4 100644
--- a/arch/arm/mach-ixp4xx/common.c
+++ b/arch/arm/mach-ixp4xx/common.c
@@ -17,7 +17,6 @@
 #include <linux/mm.h>
 #include <linux/init.h>
 #include <linux/serial.h>
-#include <linux/sched.h>
 #include <linux/tty.h>
 #include <linux/platform_device.h>
 #include <linux/serial_core.h>
@@ -403,18 +402,9 @@
 /*
  * sched_clock()
  */
-static DEFINE_CLOCK_DATA(cd);
-
-unsigned long long notrace sched_clock(void)
+static u32 notrace ixp4xx_read_sched_clock(void)
 {
-	u32 cyc = *IXP4XX_OSTS;
-	return cyc_to_sched_clock(&cd, cyc, (u32)~0);
-}
-
-static void notrace ixp4xx_update_sched_clock(void)
-{
-	u32 cyc = *IXP4XX_OSTS;
-	update_sched_clock(&cd, cyc, (u32)~0);
+	return *IXP4XX_OSTS;
 }
 
 /*
@@ -430,7 +420,7 @@
 EXPORT_SYMBOL(ixp4xx_timer_freq);
 static void __init ixp4xx_clocksource_init(void)
 {
-	init_sched_clock(&cd, ixp4xx_update_sched_clock, 32, ixp4xx_timer_freq);
+	setup_sched_clock(ixp4xx_read_sched_clock, 32, ixp4xx_timer_freq);
 
 	clocksource_mmio_init(NULL, "OSTS", ixp4xx_timer_freq, 200, 32,
 			ixp4xx_clocksource_read);
@@ -501,3 +491,23 @@
 
 	clockevents_register_device(&clockevent_ixp4xx);
 }
+
+void ixp4xx_restart(char mode, const char *cmd)
+{
+	if ( 1 && mode == 's') {
+		/* Jump into ROM at address 0 */
+		soft_restart(0);
+	} else {
+		/* Use on-chip reset capability */
+
+		/* set the "key" register to enable access to
+		 * "timer" and "enable" registers
+		 */
+		*IXP4XX_OSWK = IXP4XX_WDT_KEY;
+
+		/* write 0 to the timer register for an immediate reset */
+		*IXP4XX_OSWT = 0;
+
+		*IXP4XX_OSWE = IXP4XX_WDT_RESET_ENABLE | IXP4XX_WDT_COUNT_ENABLE;
+	}
+}
diff --git a/arch/arm/mach-ixp4xx/coyote-setup.c b/arch/arm/mach-ixp4xx/coyote-setup.c
index 81dfec3..a74f86c 100644
--- a/arch/arm/mach-ixp4xx/coyote-setup.c
+++ b/arch/arm/mach-ixp4xx/coyote-setup.c
@@ -117,6 +117,7 @@
 #if defined(CONFIG_PCI)
 	.dma_zone_size	= SZ_64M,
 #endif
+	.restart	= ixp4xx_restart,
 MACHINE_END
 #endif
 
@@ -132,6 +133,7 @@
 	.timer		= &ixp4xx_timer,
 	.atag_offset	= 0x100,
 	.init_machine	= coyote_init,
+	.restart	= ixp4xx_restart,
 MACHINE_END
 #endif
 
diff --git a/arch/arm/mach-ixp4xx/dsmg600-setup.c b/arch/arm/mach-ixp4xx/dsmg600-setup.c
index 8837fbc..67be177 100644
--- a/arch/arm/mach-ixp4xx/dsmg600-setup.c
+++ b/arch/arm/mach-ixp4xx/dsmg600-setup.c
@@ -286,4 +286,5 @@
 #if defined(CONFIG_PCI)
 	.dma_zone_size	= SZ_64M,
 #endif
+	.restart	= ixp4xx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-ixp4xx/fsg-setup.c b/arch/arm/mach-ixp4xx/fsg-setup.c
index 2887c35..6d58182 100644
--- a/arch/arm/mach-ixp4xx/fsg-setup.c
+++ b/arch/arm/mach-ixp4xx/fsg-setup.c
@@ -277,5 +277,6 @@
 #if defined(CONFIG_PCI)
 	.dma_zone_size	= SZ_64M,
 #endif
+	.restart	= ixp4xx_restart,
 MACHINE_END
 
diff --git a/arch/arm/mach-ixp4xx/gateway7001-setup.c b/arch/arm/mach-ixp4xx/gateway7001-setup.c
index d69d1b0..7ecf9b2 100644
--- a/arch/arm/mach-ixp4xx/gateway7001-setup.c
+++ b/arch/arm/mach-ixp4xx/gateway7001-setup.c
@@ -104,5 +104,6 @@
 #if defined(CONFIG_PCI)
 	.dma_zone_size	= SZ_64M,
 #endif
+	.restart	= ixp4xx_restart,
 MACHINE_END
 #endif
diff --git a/arch/arm/mach-ixp4xx/goramo_mlr.c b/arch/arm/mach-ixp4xx/goramo_mlr.c
index bf6678d..c0e3d69 100644
--- a/arch/arm/mach-ixp4xx/goramo_mlr.c
+++ b/arch/arm/mach-ixp4xx/goramo_mlr.c
@@ -504,4 +504,5 @@
 #if defined(CONFIG_PCI)
 	.dma_zone_size	= SZ_64M,
 #endif
+	.restart	= ixp4xx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-ixp4xx/gtwx5715-setup.c b/arch/arm/mach-ixp4xx/gtwx5715-setup.c
index aa029fc..a23f893 100644
--- a/arch/arm/mach-ixp4xx/gtwx5715-setup.c
+++ b/arch/arm/mach-ixp4xx/gtwx5715-setup.c
@@ -172,6 +172,7 @@
 #if defined(CONFIG_PCI)
 	.dma_zone_size	= SZ_64M,
 #endif
+	.restart	= ixp4xx_restart,
 MACHINE_END
 
 
diff --git a/arch/arm/mach-ixp4xx/include/mach/platform.h b/arch/arm/mach-ixp4xx/include/mach/platform.h
index e824c02..df9250b 100644
--- a/arch/arm/mach-ixp4xx/include/mach/platform.h
+++ b/arch/arm/mach-ixp4xx/include/mach/platform.h
@@ -125,6 +125,7 @@
 extern void ixp4xx_sys_init(void);
 extern void ixp4xx_timer_init(void);
 extern struct sys_timer ixp4xx_timer;
+extern void ixp4xx_restart(char, const char *);
 extern void ixp4xx_pci_preinit(void);
 struct pci_sys_data;
 extern int ixp4xx_setup(int nr, struct pci_sys_data *sys);
diff --git a/arch/arm/mach-ixp4xx/include/mach/system.h b/arch/arm/mach-ixp4xx/include/mach/system.h
index 54c0af7..140a9be 100644
--- a/arch/arm/mach-ixp4xx/include/mach/system.h
+++ b/arch/arm/mach-ixp4xx/include/mach/system.h
@@ -8,9 +8,6 @@
  * published by the Free Software Foundation.
  *
  */
-
-#include <mach/hardware.h>
-
 static inline void arch_idle(void)
 {
 	/* ixp4xx does not implement the XScale PWRMODE register,
@@ -20,25 +17,3 @@
 	cpu_do_idle();
 #endif
 }
-
-
-static inline void arch_reset(char mode, const char *cmd)
-{
-	if ( 1 && mode == 's') {
-		/* Jump into ROM at address 0 */
-		cpu_reset(0);
-	} else {
-		/* Use on-chip reset capability */
-
-		/* set the "key" register to enable access to
-		 * "timer" and "enable" registers
-		 */
-		*IXP4XX_OSWK = IXP4XX_WDT_KEY;
-
-		/* write 0 to the timer register for an immediate reset */
-		*IXP4XX_OSWT = 0;
-
-		*IXP4XX_OSWE = IXP4XX_WDT_RESET_ENABLE | IXP4XX_WDT_COUNT_ENABLE;
-	}
-}
-
diff --git a/arch/arm/mach-ixp4xx/include/mach/vmalloc.h b/arch/arm/mach-ixp4xx/include/mach/vmalloc.h
deleted file mode 100644
index 9bcd64d..0000000
--- a/arch/arm/mach-ixp4xx/include/mach/vmalloc.h
+++ /dev/null
@@ -1,5 +0,0 @@
-/*
- * arch/arm/mach-ixp4xx/include/mach/vmalloc.h
- */
-#define VMALLOC_END       (0xff000000UL)
-
diff --git a/arch/arm/mach-ixp4xx/ixdp425-setup.c b/arch/arm/mach-ixp4xx/ixdp425-setup.c
index f235f82..8a38b39 100644
--- a/arch/arm/mach-ixp4xx/ixdp425-setup.c
+++ b/arch/arm/mach-ixp4xx/ixdp425-setup.c
@@ -261,6 +261,7 @@
 #if defined(CONFIG_PCI)
 	.dma_zone_size	= SZ_64M,
 #endif
+	.restart	= ixp4xx_restart,
 MACHINE_END
 #endif
 
diff --git a/arch/arm/mach-ixp4xx/nas100d-setup.c b/arch/arm/mach-ixp4xx/nas100d-setup.c
index de716fa..1010eb7 100644
--- a/arch/arm/mach-ixp4xx/nas100d-setup.c
+++ b/arch/arm/mach-ixp4xx/nas100d-setup.c
@@ -321,4 +321,5 @@
 #if defined(CONFIG_PCI)
 	.dma_zone_size	= SZ_64M,
 #endif
+	.restart	= ixp4xx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-ixp4xx/nslu2-setup.c b/arch/arm/mach-ixp4xx/nslu2-setup.c
index ac81ccb..aa355c3 100644
--- a/arch/arm/mach-ixp4xx/nslu2-setup.c
+++ b/arch/arm/mach-ixp4xx/nslu2-setup.c
@@ -307,4 +307,5 @@
 #if defined(CONFIG_PCI)
 	.dma_zone_size	= SZ_64M,
 #endif
+	.restart	= ixp4xx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-ixp4xx/omixp-setup.c b/arch/arm/mach-ixp4xx/omixp-setup.c
index 3b6a81a..0940869 100644
--- a/arch/arm/mach-ixp4xx/omixp-setup.c
+++ b/arch/arm/mach-ixp4xx/omixp-setup.c
@@ -246,6 +246,7 @@
 	.init_irq	= ixp4xx_init_irq,
 	.timer          = &ixp4xx_timer,
 	.init_machine	= omixp_init,
+	.restart	= ixp4xx_restart,
 MACHINE_END
 #endif
 
@@ -259,6 +260,7 @@
 #if defined(CONFIG_PCI)
 	.dma_zone_size	= SZ_64M,
 #endif
+	.restart	= ixp4xx_restart,
 MACHINE_END
 #endif
 
@@ -269,5 +271,6 @@
 	.init_irq	= ixp4xx_init_irq,
 	.timer          = &ixp4xx_timer,
 	.init_machine	= omixp_init,
+	.restart	= ixp4xx_restart,
 MACHINE_END
 #endif
diff --git a/arch/arm/mach-ixp4xx/vulcan-setup.c b/arch/arm/mach-ixp4xx/vulcan-setup.c
index 27e469e..9dec206 100644
--- a/arch/arm/mach-ixp4xx/vulcan-setup.c
+++ b/arch/arm/mach-ixp4xx/vulcan-setup.c
@@ -244,4 +244,5 @@
 #if defined(CONFIG_PCI)
 	.dma_zone_size	= SZ_64M,
 #endif
+	.restart	= ixp4xx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-ixp4xx/wg302v2-setup.c b/arch/arm/mach-ixp4xx/wg302v2-setup.c
index b14144b..5ac0f0a 100644
--- a/arch/arm/mach-ixp4xx/wg302v2-setup.c
+++ b/arch/arm/mach-ixp4xx/wg302v2-setup.c
@@ -105,5 +105,6 @@
 #if defined(CONFIG_PCI)
 	.dma_zone_size	= SZ_64M,
 #endif
+	.restart	= ixp4xx_restart,
 MACHINE_END
 #endif
diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
index f3248cf..0bff4a9 100644
--- a/arch/arm/mach-kirkwood/common.c
+++ b/arch/arm/mach-kirkwood/common.c
@@ -534,3 +534,19 @@
 	return 0;
 }
 late_initcall(kirkwood_clock_gate);
+
+void kirkwood_restart(char mode, const char *cmd)
+{
+	/*
+	 * Enable soft reset to assert RSTOUTn.
+	 */
+	writel(SOFT_RESET_OUT_EN, RSTOUTn_MASK);
+
+	/*
+	 * Assert soft reset.
+	 */
+	writel(SOFT_RESET, SYSTEM_SOFT_RESET);
+
+	while (1)
+		;
+}
diff --git a/arch/arm/mach-kirkwood/common.h b/arch/arm/mach-kirkwood/common.h
index b9b0f09..1529280 100644
--- a/arch/arm/mach-kirkwood/common.h
+++ b/arch/arm/mach-kirkwood/common.h
@@ -50,6 +50,7 @@
 void kirkwood_nand_init(struct mtd_partition *parts, int nr_parts, int delay);
 void kirkwood_nand_init_rnb(struct mtd_partition *parts, int nr_parts, int (*dev_ready)(struct mtd_info *));
 void kirkwood_audio_init(void);
+void kirkwood_restart(char, const char *);
 
 extern int kirkwood_tclk;
 extern struct sys_timer kirkwood_timer;
diff --git a/arch/arm/mach-kirkwood/d2net_v2-setup.c b/arch/arm/mach-kirkwood/d2net_v2-setup.c
index f457e07..6e1bac9 100644
--- a/arch/arm/mach-kirkwood/d2net_v2-setup.c
+++ b/arch/arm/mach-kirkwood/d2net_v2-setup.c
@@ -227,4 +227,5 @@
 	.init_early	= kirkwood_init_early,
 	.init_irq	= kirkwood_init_irq,
 	.timer		= &kirkwood_timer,
+	.restart	= kirkwood_restart,
 MACHINE_END
diff --git a/arch/arm/mach-kirkwood/db88f6281-bp-setup.c b/arch/arm/mach-kirkwood/db88f6281-bp-setup.c
index ff4c21c..d933593 100644
--- a/arch/arm/mach-kirkwood/db88f6281-bp-setup.c
+++ b/arch/arm/mach-kirkwood/db88f6281-bp-setup.c
@@ -103,4 +103,5 @@
 	.init_early	= kirkwood_init_early,
 	.init_irq	= kirkwood_init_irq,
 	.timer		= &kirkwood_timer,
+	.restart	= kirkwood_restart,
 MACHINE_END
diff --git a/arch/arm/mach-kirkwood/dockstar-setup.c b/arch/arm/mach-kirkwood/dockstar-setup.c
index e4d199b..61d9a55 100644
--- a/arch/arm/mach-kirkwood/dockstar-setup.c
+++ b/arch/arm/mach-kirkwood/dockstar-setup.c
@@ -108,4 +108,5 @@
 	.init_early	= kirkwood_init_early,
 	.init_irq	= kirkwood_init_irq,
 	.timer		= &kirkwood_timer,
+	.restart	= kirkwood_restart,
 MACHINE_END
diff --git a/arch/arm/mach-kirkwood/guruplug-setup.c b/arch/arm/mach-kirkwood/guruplug-setup.c
index 6c40f78..bdaed38 100644
--- a/arch/arm/mach-kirkwood/guruplug-setup.c
+++ b/arch/arm/mach-kirkwood/guruplug-setup.c
@@ -127,4 +127,5 @@
 	.init_early	= kirkwood_init_early,
 	.init_irq	= kirkwood_init_irq,
 	.timer		= &kirkwood_timer,
+	.restart	= kirkwood_restart,
 MACHINE_END
diff --git a/arch/arm/mach-kirkwood/include/mach/io.h b/arch/arm/mach-kirkwood/include/mach/io.h
index 1aaddc3..49dd0cb 100644
--- a/arch/arm/mach-kirkwood/include/mach/io.h
+++ b/arch/arm/mach-kirkwood/include/mach/io.h
@@ -19,31 +19,6 @@
 					+ KIRKWOOD_PCIE_IO_VIRT_BASE);
 }
 
-static inline void __iomem *
-__arch_ioremap(unsigned long paddr, size_t size, unsigned int mtype)
-{
-	void __iomem *retval;
-	unsigned long offs = paddr - KIRKWOOD_REGS_PHYS_BASE;
-	if (mtype == MT_DEVICE && size && offs < KIRKWOOD_REGS_SIZE &&
-	    size <= KIRKWOOD_REGS_SIZE && offs + size <= KIRKWOOD_REGS_SIZE) {
-		retval = (void __iomem *)KIRKWOOD_REGS_VIRT_BASE + offs;
-	} else {
-		retval = __arm_ioremap(paddr, size, mtype);
-	}
-
-	return retval;
-}
-
-static inline void
-__arch_iounmap(void __iomem *addr)
-{
-	if (addr < (void __iomem *)KIRKWOOD_REGS_VIRT_BASE ||
-	    addr >= (void __iomem *)(KIRKWOOD_REGS_VIRT_BASE + KIRKWOOD_REGS_SIZE))
-		__iounmap(addr);
-}
-
-#define __arch_ioremap		__arch_ioremap
-#define __arch_iounmap		__arch_iounmap
 #define __io(a)			__io(a)
 #define __mem_pci(a)		(a)
 
diff --git a/arch/arm/mach-kirkwood/include/mach/system.h b/arch/arm/mach-kirkwood/include/mach/system.h
index 7568e95..5fddde0 100644
--- a/arch/arm/mach-kirkwood/include/mach/system.h
+++ b/arch/arm/mach-kirkwood/include/mach/system.h
@@ -9,28 +9,9 @@
 #ifndef __ASM_ARCH_SYSTEM_H
 #define __ASM_ARCH_SYSTEM_H
 
-#include <mach/bridge-regs.h>
-
 static inline void arch_idle(void)
 {
 	cpu_do_idle();
 }
 
-static inline void arch_reset(char mode, const char *cmd)
-{
-	/*
-	 * Enable soft reset to assert RSTOUTn.
-	 */
-	writel(SOFT_RESET_OUT_EN, RSTOUTn_MASK);
-
-	/*
-	 * Assert soft reset.
-	 */
-	writel(SOFT_RESET, SYSTEM_SOFT_RESET);
-
-	while (1)
-		;
-}
-
-
 #endif
diff --git a/arch/arm/mach-kirkwood/include/mach/vmalloc.h b/arch/arm/mach-kirkwood/include/mach/vmalloc.h
deleted file mode 100644
index bf162ca..0000000
--- a/arch/arm/mach-kirkwood/include/mach/vmalloc.h
+++ /dev/null
@@ -1,5 +0,0 @@
-/*
- * arch/arm/mach-kirkwood/include/mach/vmalloc.h
- */
-
-#define VMALLOC_END	0xfe800000UL
diff --git a/arch/arm/mach-kirkwood/mv88f6281gtw_ge-setup.c b/arch/arm/mach-kirkwood/mv88f6281gtw_ge-setup.c
index 9a1e917..85f6169 100644
--- a/arch/arm/mach-kirkwood/mv88f6281gtw_ge-setup.c
+++ b/arch/arm/mach-kirkwood/mv88f6281gtw_ge-setup.c
@@ -169,4 +169,5 @@
 	.init_early	= kirkwood_init_early,
 	.init_irq	= kirkwood_init_irq,
 	.timer		= &kirkwood_timer,
+	.restart	= kirkwood_restart,
 MACHINE_END
diff --git a/arch/arm/mach-kirkwood/netspace_v2-setup.c b/arch/arm/mach-kirkwood/netspace_v2-setup.c
index 8849bcc..e6bba01 100644
--- a/arch/arm/mach-kirkwood/netspace_v2-setup.c
+++ b/arch/arm/mach-kirkwood/netspace_v2-setup.c
@@ -264,6 +264,7 @@
 	.init_early	= kirkwood_init_early,
 	.init_irq	= kirkwood_init_irq,
 	.timer		= &kirkwood_timer,
+	.restart	= kirkwood_restart,
 MACHINE_END
 #endif
 
@@ -275,6 +276,7 @@
 	.init_early	= kirkwood_init_early,
 	.init_irq	= kirkwood_init_irq,
 	.timer		= &kirkwood_timer,
+	.restart	= kirkwood_restart,
 MACHINE_END
 #endif
 
@@ -286,5 +288,6 @@
 	.init_early	= kirkwood_init_early,
 	.init_irq	= kirkwood_init_irq,
 	.timer		= &kirkwood_timer,
+	.restart	= kirkwood_restart,
 MACHINE_END
 #endif
diff --git a/arch/arm/mach-kirkwood/netxbig_v2-setup.c b/arch/arm/mach-kirkwood/netxbig_v2-setup.c
index 1ba12c4..31ae8de 100644
--- a/arch/arm/mach-kirkwood/netxbig_v2-setup.c
+++ b/arch/arm/mach-kirkwood/netxbig_v2-setup.c
@@ -405,6 +405,7 @@
 	.init_early	= kirkwood_init_early,
 	.init_irq	= kirkwood_init_irq,
 	.timer		= &kirkwood_timer,
+	.restart	= kirkwood_restart,
 MACHINE_END
 #endif
 
@@ -416,5 +417,6 @@
 	.init_early	= kirkwood_init_early,
 	.init_irq	= kirkwood_init_irq,
 	.timer		= &kirkwood_timer,
+	.restart	= kirkwood_restart,
 MACHINE_END
 #endif
diff --git a/arch/arm/mach-kirkwood/openrd-setup.c b/arch/arm/mach-kirkwood/openrd-setup.c
index 5660ca6..01f8c89 100644
--- a/arch/arm/mach-kirkwood/openrd-setup.c
+++ b/arch/arm/mach-kirkwood/openrd-setup.c
@@ -220,6 +220,7 @@
 	.init_early	= kirkwood_init_early,
 	.init_irq	= kirkwood_init_irq,
 	.timer		= &kirkwood_timer,
+	.restart	= kirkwood_restart,
 MACHINE_END
 #endif
 
@@ -232,6 +233,7 @@
 	.init_early	= kirkwood_init_early,
 	.init_irq	= kirkwood_init_irq,
 	.timer		= &kirkwood_timer,
+	.restart	= kirkwood_restart,
 MACHINE_END
 #endif
 
@@ -244,5 +246,6 @@
 	.init_early	= kirkwood_init_early,
 	.init_irq	= kirkwood_init_irq,
 	.timer		= &kirkwood_timer,
+	.restart	= kirkwood_restart,
 MACHINE_END
 #endif
diff --git a/arch/arm/mach-kirkwood/rd88f6192-nas-setup.c b/arch/arm/mach-kirkwood/rd88f6192-nas-setup.c
index 6663869..fd2c9c8 100644
--- a/arch/arm/mach-kirkwood/rd88f6192-nas-setup.c
+++ b/arch/arm/mach-kirkwood/rd88f6192-nas-setup.c
@@ -85,4 +85,5 @@
 	.init_early	= kirkwood_init_early,
 	.init_irq	= kirkwood_init_irq,
 	.timer		= &kirkwood_timer,
+	.restart	= kirkwood_restart,
 MACHINE_END
diff --git a/arch/arm/mach-kirkwood/rd88f6281-setup.c b/arch/arm/mach-kirkwood/rd88f6281-setup.c
index 66b3c05..ef92207 100644
--- a/arch/arm/mach-kirkwood/rd88f6281-setup.c
+++ b/arch/arm/mach-kirkwood/rd88f6281-setup.c
@@ -121,4 +121,5 @@
 	.init_early	= kirkwood_init_early,
 	.init_irq	= kirkwood_init_irq,
 	.timer		= &kirkwood_timer,
+	.restart	= kirkwood_restart,
 MACHINE_END
diff --git a/arch/arm/mach-kirkwood/sheevaplug-setup.c b/arch/arm/mach-kirkwood/sheevaplug-setup.c
index 8b102d6..4ea70e5 100644
--- a/arch/arm/mach-kirkwood/sheevaplug-setup.c
+++ b/arch/arm/mach-kirkwood/sheevaplug-setup.c
@@ -107,7 +107,7 @@
 	kirkwood_init();
 
 	/* setup gpio pin select */
-	if (machine_is_sheeva_esata())
+	if (machine_is_esata_sheevaplug())
 		kirkwood_mpp_conf(sheeva_esata_mpp_config);
 	else
 		kirkwood_mpp_conf(sheevaplug_mpp_config);
@@ -123,11 +123,11 @@
 	kirkwood_ge00_init(&sheevaplug_ge00_data);
 
 	/* honor lower power consumption for plugs with out eSATA */
-	if (machine_is_sheeva_esata())
+	if (machine_is_esata_sheevaplug())
 		kirkwood_sata_init(&sheeva_esata_sata_data);
 
 	/* enable sd wp and sd cd on plugs with esata */
-	if (machine_is_sheeva_esata())
+	if (machine_is_esata_sheevaplug())
 		kirkwood_sdio_init(&sheeva_esata_mvsdio_data);
 	else
 		kirkwood_sdio_init(&sheevaplug_mvsdio_data);
@@ -144,6 +144,7 @@
 	.init_early	= kirkwood_init_early,
 	.init_irq	= kirkwood_init_irq,
 	.timer		= &kirkwood_timer,
+	.restart	= kirkwood_restart,
 MACHINE_END
 #endif
 
@@ -155,5 +156,6 @@
 	.init_early	= kirkwood_init_early,
 	.init_irq	= kirkwood_init_irq,
 	.timer		= &kirkwood_timer,
+	.restart	= kirkwood_restart,
 MACHINE_END
 #endif
diff --git a/arch/arm/mach-kirkwood/t5325-setup.c b/arch/arm/mach-kirkwood/t5325-setup.c
index ea104fb..966b2b3 100644
--- a/arch/arm/mach-kirkwood/t5325-setup.c
+++ b/arch/arm/mach-kirkwood/t5325-setup.c
@@ -207,4 +207,5 @@
 	.init_early	= kirkwood_init_early,
 	.init_irq	= kirkwood_init_irq,
 	.timer		= &kirkwood_timer,
+	.restart	= kirkwood_restart,
 MACHINE_END
diff --git a/arch/arm/mach-kirkwood/ts219-setup.c b/arch/arm/mach-kirkwood/ts219-setup.c
index 262c034..73e2b6c 100644
--- a/arch/arm/mach-kirkwood/ts219-setup.c
+++ b/arch/arm/mach-kirkwood/ts219-setup.c
@@ -138,4 +138,5 @@
 	.init_early	= kirkwood_init_early,
 	.init_irq	= kirkwood_init_irq,
 	.timer		= &kirkwood_timer,
+	.restart	= kirkwood_restart,
 MACHINE_END
diff --git a/arch/arm/mach-kirkwood/ts41x-setup.c b/arch/arm/mach-kirkwood/ts41x-setup.c
index b68f5b4..5bbca26 100644
--- a/arch/arm/mach-kirkwood/ts41x-setup.c
+++ b/arch/arm/mach-kirkwood/ts41x-setup.c
@@ -182,4 +182,5 @@
 	.init_early	= kirkwood_init_early,
 	.init_irq	= kirkwood_init_irq,
 	.timer		= &kirkwood_timer,
+	.restart	= kirkwood_restart,
 MACHINE_END
diff --git a/arch/arm/mach-ks8695/board-acs5k.c b/arch/arm/mach-ks8695/board-acs5k.c
index a91f99d..255502d 100644
--- a/arch/arm/mach-ks8695/board-acs5k.c
+++ b/arch/arm/mach-ks8695/board-acs5k.c
@@ -228,4 +228,5 @@
 	.init_irq	= ks8695_init_irq,
 	.init_machine	= acs5k_init,
 	.timer		= &ks8695_timer,
+	.restart	= ks8695_restart,
 MACHINE_END
diff --git a/arch/arm/mach-ks8695/board-dsm320.c b/arch/arm/mach-ks8695/board-dsm320.c
index d24bcef..e0d36ce 100644
--- a/arch/arm/mach-ks8695/board-dsm320.c
+++ b/arch/arm/mach-ks8695/board-dsm320.c
@@ -126,4 +126,5 @@
 	.init_irq	= ks8695_init_irq,
 	.init_machine	= dsm320_init,
 	.timer		= &ks8695_timer,
+	.restart	= ks8695_restart,
 MACHINE_END
diff --git a/arch/arm/mach-ks8695/board-micrel.c b/arch/arm/mach-ks8695/board-micrel.c
index 16c9565..a827072 100644
--- a/arch/arm/mach-ks8695/board-micrel.c
+++ b/arch/arm/mach-ks8695/board-micrel.c
@@ -58,4 +58,5 @@
 	.init_irq	= ks8695_init_irq,
 	.init_machine	= micrel_init,
 	.timer		= &ks8695_timer,
+	.restart	= ks8695_restart,
 MACHINE_END
diff --git a/arch/arm/mach-ks8695/generic.h b/arch/arm/mach-ks8695/generic.h
index 2fbfab8..f8bdb11 100644
--- a/arch/arm/mach-ks8695/generic.h
+++ b/arch/arm/mach-ks8695/generic.h
@@ -12,4 +12,5 @@
 
 extern __init void ks8695_map_io(void);
 extern __init void ks8695_init_irq(void);
+extern void ks8695_restart(char, const char *);
 extern struct sys_timer ks8695_timer;
diff --git a/arch/arm/mach-ks8695/include/mach/system.h b/arch/arm/mach-ks8695/include/mach/system.h
index fb1dda9..59fe992 100644
--- a/arch/arm/mach-ks8695/include/mach/system.h
+++ b/arch/arm/mach-ks8695/include/mach/system.h
@@ -14,9 +14,6 @@
 #ifndef __ASM_ARCH_SYSTEM_H
 #define __ASM_ARCH_SYSTEM_H
 
-#include <linux/io.h>
-#include <mach/regs-timer.h>
-
 static void arch_idle(void)
 {
 	/*
@@ -27,22 +24,4 @@
 
 }
 
-static void arch_reset(char mode, const char *cmd)
-{
-	unsigned int reg;
-
-	if (mode == 's')
-		cpu_reset(0);
-
-	/* disable timer0 */
-	reg = __raw_readl(KS8695_TMR_VA + KS8695_TMCON);
-	__raw_writel(reg & ~TMCON_T0EN, KS8695_TMR_VA + KS8695_TMCON);
-
-	/* enable watchdog mode */
-	__raw_writel((10 << 8) | T0TC_WATCHDOG, KS8695_TMR_VA + KS8695_T0TC);
-
-	/* re-enable timer0 */
-	__raw_writel(reg | TMCON_T0EN, KS8695_TMR_VA + KS8695_TMCON);
-}
-
 #endif
diff --git a/arch/arm/mach-ks8695/include/mach/vmalloc.h b/arch/arm/mach-ks8695/include/mach/vmalloc.h
deleted file mode 100644
index 744ac66..0000000
--- a/arch/arm/mach-ks8695/include/mach/vmalloc.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * arch/arm/mach-ks8695/include/mach/vmalloc.h
- *
- * Copyright (C) 2006 Ben Dooks
- * Copyright (C) 2006 Simtec Electronics <linux@simtec.co.uk>
- *
- * KS8695 vmalloc definition
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __ASM_ARCH_VMALLOC_H
-#define __ASM_ARCH_VMALLOC_H
-
-#define VMALLOC_END	  (KS8695_IO_VA & PGDIR_MASK)
-
-#endif
diff --git a/arch/arm/mach-ks8695/irq.c b/arch/arm/mach-ks8695/irq.c
index a78092d..76802aa 100644
--- a/arch/arm/mach-ks8695/irq.c
+++ b/arch/arm/mach-ks8695/irq.c
@@ -23,7 +23,7 @@
 #include <linux/module.h>
 #include <linux/interrupt.h>
 #include <linux/ioport.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/io.h>
 
 #include <mach/hardware.h>
diff --git a/arch/arm/mach-ks8695/time.c b/arch/arm/mach-ks8695/time.c
index 69c072c..37dfcd5 100644
--- a/arch/arm/mach-ks8695/time.c
+++ b/arch/arm/mach-ks8695/time.c
@@ -109,3 +109,21 @@
 	.offset		= ks8695_gettimeoffset,
 	.resume		= ks8695_timer_setup,
 };
+
+void ks8695_restart(char mode, const char *cmd)
+{
+	unsigned int reg;
+
+	if (mode == 's')
+		soft_restart(0);
+
+	/* disable timer0 */
+	reg = __raw_readl(KS8695_TMR_VA + KS8695_TMCON);
+	__raw_writel(reg & ~TMCON_T0EN, KS8695_TMR_VA + KS8695_TMCON);
+
+	/* enable watchdog mode */
+	__raw_writel((10 << 8) | T0TC_WATCHDOG, KS8695_TMR_VA + KS8695_T0TC);
+
+	/* re-enable timer0 */
+	__raw_writel(reg | TMCON_T0EN, KS8695_TMR_VA + KS8695_TMCON);
+}
diff --git a/arch/arm/mach-lpc32xx/common.c b/arch/arm/mach-lpc32xx/common.c
index 205b2db..369b152 100644
--- a/arch/arm/mach-lpc32xx/common.c
+++ b/arch/arm/mach-lpc32xx/common.c
@@ -164,7 +164,7 @@
 /*
  * System reset via the watchdog timer
  */
-void lpc32xx_watchdog_reset(void)
+static void lpc32xx_watchdog_reset(void)
 {
 	/* Make sure WDT clocks are enabled */
 	__raw_writel(LPC32XX_CLKPWR_PWMCLK_WDOG_EN,
@@ -311,3 +311,21 @@
 {
 	iotable_init(lpc32xx_io_desc, ARRAY_SIZE(lpc32xx_io_desc));
 }
+
+void lpc23xx_restart(char mode, const char *cmd)
+{
+	switch (mode) {
+	case 's':
+	case 'h':
+		lpc32xx_watchdog_reset();
+		break;
+
+	default:
+		/* Do nothing */
+		break;
+	}
+
+	/* Wait for watchdog to reset system */
+	while (1)
+		;
+}
diff --git a/arch/arm/mach-lpc32xx/common.h b/arch/arm/mach-lpc32xx/common.h
index 5583f52..4b4e700 100644
--- a/arch/arm/mach-lpc32xx/common.h
+++ b/arch/arm/mach-lpc32xx/common.h
@@ -39,6 +39,8 @@
 extern void __init lpc32xx_map_io(void);
 extern void __init lpc32xx_serial_init(void);
 extern void __init lpc32xx_gpio_init(void);
+extern void lpc23xx_restart(char, const char *);
+
 
 /*
  * Structure used for setting up and querying the PLLS
diff --git a/arch/arm/mach-lpc32xx/include/mach/system.h b/arch/arm/mach-lpc32xx/include/mach/system.h
index df3b0de..bf176c9 100644
--- a/arch/arm/mach-lpc32xx/include/mach/system.h
+++ b/arch/arm/mach-lpc32xx/include/mach/system.h
@@ -24,29 +24,4 @@
 	cpu_do_idle();
 }
 
-static inline void arch_reset(char mode, const char *cmd)
-{
-	extern void lpc32xx_watchdog_reset(void);
-
-	switch (mode) {
-	case 's':
-	case 'h':
-		printk(KERN_CRIT "RESET: Rebooting system\n");
-
-		/* Disable interrupts */
-		local_irq_disable();
-
-		lpc32xx_watchdog_reset();
-		break;
-
-	default:
-		/* Do nothing */
-		break;
-	}
-
-	/* Wait for watchdog to reset system */
-	while (1)
-		;
-}
-
 #endif
diff --git a/arch/arm/mach-lpc32xx/include/mach/vmalloc.h b/arch/arm/mach-lpc32xx/include/mach/vmalloc.h
deleted file mode 100644
index 720fa43..0000000
--- a/arch/arm/mach-lpc32xx/include/mach/vmalloc.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * arch/arm/mach-lpc32xx/include/mach/vmalloc.h
- *
- * Author: Kevin Wells <kevin.wells@nxp.com>
- *
- * Copyright (C) 2010 NXP Semiconductors
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#ifndef __ASM_ARCH_VMALLOC_H
-#define __ASM_ARCH_VMALLOC_H
-
-#define VMALLOC_END	0xF0000000UL
-
-#endif
diff --git a/arch/arm/mach-lpc32xx/phy3250.c b/arch/arm/mach-lpc32xx/phy3250.c
index 6d2f0d1..bfee5b4 100644
--- a/arch/arm/mach-lpc32xx/phy3250.c
+++ b/arch/arm/mach-lpc32xx/phy3250.c
@@ -18,7 +18,7 @@
 
 #include <linux/init.h>
 #include <linux/platform_device.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/interrupt.h>
 #include <linux/irq.h>
 #include <linux/dma-mapping.h>
@@ -388,4 +388,5 @@
 	.init_irq	= lpc32xx_init_irq,
 	.timer		= &lpc32xx_timer,
 	.init_machine	= phy3250_board_init,
+	.restart	= lpc23xx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-mmp/aspenite.c b/arch/arm/mach-mmp/aspenite.c
index 7a60bbb..3e6dfab 100644
--- a/arch/arm/mach-mmp/aspenite.c
+++ b/arch/arm/mach-mmp/aspenite.c
@@ -243,6 +243,7 @@
 	.init_irq       = pxa168_init_irq,
 	.timer          = &pxa168_timer,
 	.init_machine   = common_init,
+	.restart	= pxa168_restart,
 MACHINE_END
 
 MACHINE_START(ZYLONITE2, "PXA168-based Zylonite2 Development Platform")
@@ -251,4 +252,5 @@
 	.init_irq       = pxa168_init_irq,
 	.timer          = &pxa168_timer,
 	.init_machine   = common_init,
+	.restart	= pxa168_restart,
 MACHINE_END
diff --git a/arch/arm/mach-mmp/avengers_lite.c b/arch/arm/mach-mmp/avengers_lite.c
index 39f0878..8de3dc6 100644
--- a/arch/arm/mach-mmp/avengers_lite.c
+++ b/arch/arm/mach-mmp/avengers_lite.c
@@ -45,4 +45,5 @@
 	.init_irq       = pxa168_init_irq,
 	.timer          = &pxa168_timer,
 	.init_machine   = avengers_lite_init,
+	.restart	= pxa168_restart,
 MACHINE_END
diff --git a/arch/arm/mach-mmp/brownstone.c b/arch/arm/mach-mmp/brownstone.c
index 983cfb1..e16f04b 100644
--- a/arch/arm/mach-mmp/brownstone.c
+++ b/arch/arm/mach-mmp/brownstone.c
@@ -219,4 +219,5 @@
 	.init_irq	= mmp2_init_irq,
 	.timer		= &mmp2_timer,
 	.init_machine	= brownstone_init,
+	.restart	= mmp_restart,
 MACHINE_END
diff --git a/arch/arm/mach-mmp/common.c b/arch/arm/mach-mmp/common.c
index 5720674..062b5b9 100644
--- a/arch/arm/mach-mmp/common.c
+++ b/arch/arm/mach-mmp/common.c
@@ -45,3 +45,8 @@
 	/* this is early, initialize mmp_chip_id here */
 	mmp_chip_id = __raw_readl(MMP_CHIPID);
 }
+
+void mmp_restart(char mode, const char *cmd)
+{
+	soft_restart(0);
+}
diff --git a/arch/arm/mach-mmp/common.h b/arch/arm/mach-mmp/common.h
index ec8d65d..1c9d6c1 100644
--- a/arch/arm/mach-mmp/common.h
+++ b/arch/arm/mach-mmp/common.h
@@ -6,3 +6,4 @@
 
 extern void __init icu_init_irq(void);
 extern void __init mmp_map_io(void);
+extern void mmp_restart(char, const char *);
diff --git a/arch/arm/mach-mmp/flint.c b/arch/arm/mach-mmp/flint.c
index c4fd806..5a6a27a 100644
--- a/arch/arm/mach-mmp/flint.c
+++ b/arch/arm/mach-mmp/flint.c
@@ -121,4 +121,5 @@
 	.init_irq       = mmp2_init_irq,
 	.timer          = &mmp2_timer,
 	.init_machine   = flint_init,
+	.restart	= mmp_restart,
 MACHINE_END
diff --git a/arch/arm/mach-mmp/gplugd.c b/arch/arm/mach-mmp/gplugd.c
index 4665767..1e3abbe 100644
--- a/arch/arm/mach-mmp/gplugd.c
+++ b/arch/arm/mach-mmp/gplugd.c
@@ -194,4 +194,5 @@
 	.init_irq       = pxa168_init_irq,
 	.timer          = &pxa168_timer,
 	.init_machine   = gplugd_init,
+	.restart	= pxa168_restart,
 MACHINE_END
diff --git a/arch/arm/mach-mmp/include/mach/pxa168.h b/arch/arm/mach-mmp/include/mach/pxa168.h
index 7fb568d..a677aa7 100644
--- a/arch/arm/mach-mmp/include/mach/pxa168.h
+++ b/arch/arm/mach-mmp/include/mach/pxa168.h
@@ -5,6 +5,7 @@
 
 extern struct sys_timer pxa168_timer;
 extern void __init pxa168_init_irq(void);
+extern void pxa168_restart(char, const char *);
 extern void pxa168_clear_keypad_wakeup(void);
 
 #include <linux/i2c.h>
diff --git a/arch/arm/mach-mmp/include/mach/system.h b/arch/arm/mach-mmp/include/mach/system.h
index 1a8a25e..1d001ea 100644
--- a/arch/arm/mach-mmp/include/mach/system.h
+++ b/arch/arm/mach-mmp/include/mach/system.h
@@ -9,18 +9,8 @@
 #ifndef __ASM_MACH_SYSTEM_H
 #define __ASM_MACH_SYSTEM_H
 
-#include <mach/cputype.h>
-
 static inline void arch_idle(void)
 {
 	cpu_do_idle();
 }
-
-static inline void arch_reset(char mode, const char *cmd)
-{
-	if (cpu_is_pxa168())
-		cpu_reset(0xffff0000);
-	else
-		cpu_reset(0);
-}
 #endif /* __ASM_MACH_SYSTEM_H */
diff --git a/arch/arm/mach-mmp/include/mach/vmalloc.h b/arch/arm/mach-mmp/include/mach/vmalloc.h
deleted file mode 100644
index 1d0bac0..0000000
--- a/arch/arm/mach-mmp/include/mach/vmalloc.h
+++ /dev/null
@@ -1,5 +0,0 @@
-/*
- * linux/arch/arm/mach-mmp/include/mach/vmalloc.h
- */
-
-#define VMALLOC_END	0xfe000000UL
diff --git a/arch/arm/mach-mmp/jasper.c b/arch/arm/mach-mmp/jasper.c
index 8bfac66..96cf5c8 100644
--- a/arch/arm/mach-mmp/jasper.c
+++ b/arch/arm/mach-mmp/jasper.c
@@ -175,4 +175,5 @@
 	.init_irq       = mmp2_init_irq,
 	.timer          = &mmp2_timer,
 	.init_machine   = jasper_init,
+	.restart	= mmp_restart,
 MACHINE_END
diff --git a/arch/arm/mach-mmp/pxa168.c b/arch/arm/mach-mmp/pxa168.c
index 76ca15c..13f2386 100644
--- a/arch/arm/mach-mmp/pxa168.c
+++ b/arch/arm/mach-mmp/pxa168.c
@@ -214,3 +214,8 @@
 	pxa168_device_usb_host.dev.platform_data = pdata;
 	return platform_device_register(&pxa168_device_usb_host);
 }
+
+void pxa168_restart(char mode, const char *cmd)
+{
+	soft_restart(0xffff0000);
+}
diff --git a/arch/arm/mach-mmp/tavorevb.c b/arch/arm/mach-mmp/tavorevb.c
index eb5be87..257a212 100644
--- a/arch/arm/mach-mmp/tavorevb.c
+++ b/arch/arm/mach-mmp/tavorevb.c
@@ -103,4 +103,5 @@
 	.init_irq       = pxa910_init_irq,
 	.timer          = &pxa910_timer,
 	.init_machine   = tavorevb_init,
+	.restart	= mmp_restart,
 MACHINE_END
diff --git a/arch/arm/mach-mmp/teton_bga.c b/arch/arm/mach-mmp/teton_bga.c
index bbe4727..8ac22a6 100644
--- a/arch/arm/mach-mmp/teton_bga.c
+++ b/arch/arm/mach-mmp/teton_bga.c
@@ -86,4 +86,5 @@
 	.init_irq       = pxa168_init_irq,
 	.timer          = &pxa168_timer,
 	.init_machine   = teton_bga_init,
+	.restart	= pxa168_restart,
 MACHINE_END
diff --git a/arch/arm/mach-mmp/time.c b/arch/arm/mach-mmp/time.c
index 4e91ee6..71fc4ee 100644
--- a/arch/arm/mach-mmp/time.c
+++ b/arch/arm/mach-mmp/time.c
@@ -25,7 +25,6 @@
 
 #include <linux/io.h>
 #include <linux/irq.h>
-#include <linux/sched.h>
 
 #include <asm/sched_clock.h>
 #include <mach/addr-map.h>
@@ -42,8 +41,6 @@
 #define MAX_DELTA		(0xfffffffe)
 #define MIN_DELTA		(16)
 
-static DEFINE_CLOCK_DATA(cd);
-
 /*
  * FIXME: the timer needs some delay to stablize the counter capture
  */
@@ -59,16 +56,9 @@
 	return __raw_readl(TIMERS_VIRT_BASE + TMR_CVWR(1));
 }
 
-unsigned long long notrace sched_clock(void)
+static u32 notrace mmp_read_sched_clock(void)
 {
-	u32 cyc = timer_read();
-	return cyc_to_sched_clock(&cd, cyc, (u32)~0);
-}
-
-static void notrace mmp_update_sched_clock(void)
-{
-	u32 cyc = timer_read();
-	update_sched_clock(&cd, cyc, (u32)~0);
+	return timer_read();
 }
 
 static irqreturn_t timer_interrupt(int irq, void *dev_id)
@@ -201,7 +191,7 @@
 {
 	timer_config();
 
-	init_sched_clock(&cd, mmp_update_sched_clock, 32, CLOCK_TICK_RATE);
+	setup_sched_clock(mmp_read_sched_clock, 32, CLOCK_TICK_RATE);
 
 	ckevt.mult = div_sc(CLOCK_TICK_RATE, NSEC_PER_SEC, ckevt.shift);
 	ckevt.max_delta_ns = clockevent_delta2ns(MAX_DELTA, &ckevt);
diff --git a/arch/arm/mach-mmp/ttc_dkb.c b/arch/arm/mach-mmp/ttc_dkb.c
index 176515a..f026588 100644
--- a/arch/arm/mach-mmp/ttc_dkb.c
+++ b/arch/arm/mach-mmp/ttc_dkb.c
@@ -159,4 +159,5 @@
 	.init_irq       = pxa910_init_irq,
 	.timer          = &pxa910_timer,
 	.init_machine   = ttc_dkb_init,
+	.restart	= mmp_restart,
 MACHINE_END
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index ebde97f..e6beaff 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -67,6 +67,7 @@
 	bool
 config  ARCH_MSM_SCORPIONMP
 	bool
+	select HAVE_SMP
 
 config  ARCH_MSM_ARM11
 	bool
diff --git a/arch/arm/mach-msm/board-msm8960.c b/arch/arm/mach-msm/board-msm8960.c
index 6dc1cbd..ed35981 100644
--- a/arch/arm/mach-msm/board-msm8960.c
+++ b/arch/arm/mach-msm/board-msm8960.c
@@ -99,6 +99,7 @@
 	.map_io = msm8960_map_io,
 	.init_irq = msm8960_init_irq,
 	.timer = &msm_timer,
+	.handle_irq = gic_handle_irq,
 	.init_machine = msm8960_sim_init,
 MACHINE_END
 
@@ -108,6 +109,7 @@
 	.map_io = msm8960_map_io,
 	.init_irq = msm8960_init_irq,
 	.timer = &msm_timer,
+	.handle_irq = gic_handle_irq,
 	.init_machine = msm8960_rumi3_init,
 MACHINE_END
 
diff --git a/arch/arm/mach-msm/board-msm8x60.c b/arch/arm/mach-msm/board-msm8x60.c
index 44bf716..0a11342 100644
--- a/arch/arm/mach-msm/board-msm8x60.c
+++ b/arch/arm/mach-msm/board-msm8x60.c
@@ -108,6 +108,7 @@
 	.reserve = msm8x60_reserve,
 	.map_io = msm8x60_map_io,
 	.init_irq = msm8x60_init_irq,
+	.handle_irq = gic_handle_irq,
 	.init_machine = msm8x60_init,
 	.timer = &msm_timer,
 MACHINE_END
@@ -117,6 +118,7 @@
 	.reserve = msm8x60_reserve,
 	.map_io = msm8x60_map_io,
 	.init_irq = msm8x60_init_irq,
+	.handle_irq = gic_handle_irq,
 	.init_machine = msm8x60_init,
 	.timer = &msm_timer,
 MACHINE_END
@@ -126,6 +128,7 @@
 	.reserve = msm8x60_reserve,
 	.map_io = msm8x60_map_io,
 	.init_irq = msm8x60_init_irq,
+	.handle_irq = gic_handle_irq,
 	.init_machine = msm8x60_init,
 	.timer = &msm_timer,
 MACHINE_END
@@ -135,6 +138,7 @@
 	.reserve = msm8x60_reserve,
 	.map_io = msm8x60_map_io,
 	.init_irq = msm8x60_init_irq,
+	.handle_irq = gic_handle_irq,
 	.init_machine = msm8x60_init,
 	.timer = &msm_timer,
 MACHINE_END
diff --git a/arch/arm/mach-msm/board-sapphire.c b/arch/arm/mach-msm/board-sapphire.c
index 32b4657..97b8191 100644
--- a/arch/arm/mach-msm/board-sapphire.c
+++ b/arch/arm/mach-msm/board-sapphire.c
@@ -18,7 +18,7 @@
 #include <linux/input.h>
 #include <linux/interrupt.h>
 #include <linux/irq.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 
 #include <linux/delay.h>
 
diff --git a/arch/arm/mach-msm/include/mach/entry-macro-qgic.S b/arch/arm/mach-msm/include/mach/entry-macro-qgic.S
deleted file mode 100644
index 717076f..0000000
--- a/arch/arm/mach-msm/include/mach/entry-macro-qgic.S
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
- * Low-level IRQ helper macros
- *
- * Copyright (c) 2010, Code Aurora Forum. All rights reserved.
- *
- * This file is licensed under  the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <asm/hardware/entry-macro-gic.S>
-
-	.macro	disable_fiq
-	.endm
-
-	.macro  arch_ret_to_user, tmp1, tmp2
-	.endm
diff --git a/arch/arm/mach-msm/include/mach/entry-macro-vic.S b/arch/arm/mach-msm/include/mach/entry-macro-vic.S
deleted file mode 100644
index 70563ed..0000000
--- a/arch/arm/mach-msm/include/mach/entry-macro-vic.S
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright (C) 2007 Google, Inc.
- * Author: Brian Swetland <swetland@google.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#include <mach/msm_iomap.h>
-
-	.macro	disable_fiq
-	.endm
-
-	.macro	get_irqnr_preamble, base, tmp
-	@ enable imprecise aborts
-	cpsie	a
-	mov	\base, #MSM_VIC_BASE
-	.endm
-
-	.macro	arch_ret_to_user, tmp1, tmp2
-	.endm
-
-	.macro	get_irqnr_and_base, irqnr, irqstat, base, tmp
-	@ 0xD0 has irq# or old irq# if the irq has been handled
-	@ 0xD4 has irq# or -1 if none pending *but* if you just
-	@ read 0xD4 you never get the first irq for some reason
-	ldr	\irqnr, [\base, #0xD0]
-	ldr	\irqnr, [\base, #0xD4]
-	cmp	\irqnr, #0xffffffff
-	.endm
diff --git a/arch/arm/mach-msm/include/mach/entry-macro.S b/arch/arm/mach-msm/include/mach/entry-macro.S
index b16f082..41f7003 100644
--- a/arch/arm/mach-msm/include/mach/entry-macro.S
+++ b/arch/arm/mach-msm/include/mach/entry-macro.S
@@ -16,8 +16,27 @@
  *
  */
 
-#if defined(CONFIG_ARM_GIC)
-#include <mach/entry-macro-qgic.S>
-#else
-#include <mach/entry-macro-vic.S>
+	.macro	disable_fiq
+	.endm
+
+	.macro	arch_ret_to_user, tmp1, tmp2
+	.endm
+
+#if !defined(CONFIG_ARM_GIC)
+#include <mach/msm_iomap.h>
+
+	.macro	get_irqnr_preamble, base, tmp
+	@ enable imprecise aborts
+	cpsie	a
+	mov	\base, #MSM_VIC_BASE
+	.endm
+
+	.macro	get_irqnr_and_base, irqnr, irqstat, base, tmp
+	@ 0xD0 has irq# or old irq# if the irq has been handled
+	@ 0xD4 has irq# or -1 if none pending *but* if you just
+	@ read 0xD4 you never get the first irq for some reason
+	ldr	\irqnr, [\base, #0xD0]
+	ldr	\irqnr, [\base, #0xD4]
+	cmp	\irqnr, #0xffffffff
+	.endm
 #endif
diff --git a/arch/arm/mach-msm/include/mach/system.h b/arch/arm/mach-msm/include/mach/system.h
index d2e83f4..311db2b 100644
--- a/arch/arm/mach-msm/include/mach/system.h
+++ b/arch/arm/mach-msm/include/mach/system.h
@@ -12,16 +12,8 @@
  * GNU General Public License for more details.
  *
  */
-
-#include <mach/hardware.h>
-
 void arch_idle(void);
 
-static inline void arch_reset(char mode, const char *cmd)
-{
-	for (;;) ;  /* depends on IPC w/ other core */
-}
-
 /* low level hardware reset hook -- for example, hitting the
  * PSHOLD line on the PMIC to hard reset the system
  */
diff --git a/arch/arm/mach-msm/include/mach/vmalloc.h b/arch/arm/mach-msm/include/mach/vmalloc.h
deleted file mode 100644
index d138448..0000000
--- a/arch/arm/mach-msm/include/mach/vmalloc.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* arch/arm/mach-msm/include/mach/vmalloc.h
- *
- * Copyright (C) 2007 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef __ASM_ARCH_MSM_VMALLOC_H
-#define __ASM_ARCH_MSM_VMALLOC_H
-
-#define VMALLOC_END	  0xd0000000UL
-
-#endif
-
diff --git a/arch/arm/mach-msm/smd_debug.c b/arch/arm/mach-msm/smd_debug.c
index 8736aff..0c56a5a 100644
--- a/arch/arm/mach-msm/smd_debug.c
+++ b/arch/arm/mach-msm/smd_debug.c
@@ -215,7 +215,7 @@
 	.llseek = default_llseek,
 };
 
-static void debug_create(const char *name, mode_t mode,
+static void debug_create(const char *name, umode_t mode,
 			 struct dentry *dent,
 			 int (*fill)(char *buf, int max))
 {
diff --git a/arch/arm/mach-mv78xx0/buffalo-wxl-setup.c b/arch/arm/mach-mv78xx0/buffalo-wxl-setup.c
index 0e94268..ee74ec9 100644
--- a/arch/arm/mach-mv78xx0/buffalo-wxl-setup.c
+++ b/arch/arm/mach-mv78xx0/buffalo-wxl-setup.c
@@ -151,4 +151,5 @@
 	.init_early	= mv78xx0_init_early,
 	.init_irq	= mv78xx0_init_irq,
 	.timer		= &mv78xx0_timer,
+	.restart	= mv78xx0_restart,
 MACHINE_END
diff --git a/arch/arm/mach-mv78xx0/common.c b/arch/arm/mach-mv78xx0/common.c
index 23d3980..5b9632b 100644
--- a/arch/arm/mach-mv78xx0/common.c
+++ b/arch/arm/mach-mv78xx0/common.c
@@ -401,3 +401,19 @@
 	feroceon_l2_init(is_l2_writethrough());
 #endif
 }
+
+void mv78xx0_restart(char mode, const char *cmd)
+{
+	/*
+	 * Enable soft reset to assert RSTOUTn.
+	 */
+	writel(SOFT_RESET_OUT_EN, RSTOUTn_MASK);
+
+	/*
+	 * Assert soft reset.
+	 */
+	writel(SOFT_RESET, SYSTEM_SOFT_RESET);
+
+	while (1)
+		;
+}
diff --git a/arch/arm/mach-mv78xx0/common.h b/arch/arm/mach-mv78xx0/common.h
index 632e63d..07d5f8f 100644
--- a/arch/arm/mach-mv78xx0/common.h
+++ b/arch/arm/mach-mv78xx0/common.h
@@ -46,6 +46,7 @@
 void mv78xx0_uart2_init(void);
 void mv78xx0_uart3_init(void);
 void mv78xx0_i2c_init(void);
+void mv78xx0_restart(char, const char *);
 
 extern struct sys_timer mv78xx0_timer;
 
diff --git a/arch/arm/mach-mv78xx0/db78x00-bp-setup.c b/arch/arm/mach-mv78xx0/db78x00-bp-setup.c
index 50b85ae..4d6d48b 100644
--- a/arch/arm/mach-mv78xx0/db78x00-bp-setup.c
+++ b/arch/arm/mach-mv78xx0/db78x00-bp-setup.c
@@ -99,4 +99,5 @@
 	.init_early	= mv78xx0_init_early,
 	.init_irq	= mv78xx0_init_irq,
 	.timer		= &mv78xx0_timer,
+	.restart	= mv78xx0_restart,
 MACHINE_END
diff --git a/arch/arm/mach-mv78xx0/include/mach/system.h b/arch/arm/mach-mv78xx0/include/mach/system.h
index 66e7ce4..8c3a538 100644
--- a/arch/arm/mach-mv78xx0/include/mach/system.h
+++ b/arch/arm/mach-mv78xx0/include/mach/system.h
@@ -9,28 +9,9 @@
 #ifndef __ASM_ARCH_SYSTEM_H
 #define __ASM_ARCH_SYSTEM_H
 
-#include <mach/bridge-regs.h>
-
 static inline void arch_idle(void)
 {
 	cpu_do_idle();
 }
 
-static inline void arch_reset(char mode, const char *cmd)
-{
-	/*
-	 * Enable soft reset to assert RSTOUTn.
-	 */
-	writel(SOFT_RESET_OUT_EN, RSTOUTn_MASK);
-
-	/*
-	 * Assert soft reset.
-	 */
-	writel(SOFT_RESET, SYSTEM_SOFT_RESET);
-
-	while (1)
-		;
-}
-
-
 #endif
diff --git a/arch/arm/mach-mv78xx0/include/mach/vmalloc.h b/arch/arm/mach-mv78xx0/include/mach/vmalloc.h
deleted file mode 100644
index ba26fe9..0000000
--- a/arch/arm/mach-mv78xx0/include/mach/vmalloc.h
+++ /dev/null
@@ -1,5 +0,0 @@
-/*
- * arch/arm/mach-mv78xx0/include/mach/vmalloc.h
- */
-
-#define VMALLOC_END	0xfe000000UL
diff --git a/arch/arm/mach-mv78xx0/rd78x00-masa-setup.c b/arch/arm/mach-mv78xx0/rd78x00-masa-setup.c
index e85222e..9a88270 100644
--- a/arch/arm/mach-mv78xx0/rd78x00-masa-setup.c
+++ b/arch/arm/mach-mv78xx0/rd78x00-masa-setup.c
@@ -84,4 +84,5 @@
 	.init_early	= mv78xx0_init_early,
 	.init_irq	= mv78xx0_init_irq,
 	.timer		= &mv78xx0_timer,
+	.restart	= mv78xx0_restart,
 MACHINE_END
diff --git a/arch/arm/mach-mx5/board-cpuimx51.c b/arch/arm/mach-mx5/board-cpuimx51.c
index 1fc1103..944025d 100644
--- a/arch/arm/mach-mx5/board-cpuimx51.c
+++ b/arch/arm/mach-mx5/board-cpuimx51.c
@@ -297,4 +297,5 @@
 	.handle_irq = imx51_handle_irq,
 	.timer = &mxc_timer,
 	.init_machine = eukrea_cpuimx51_init,
+	.restart	= mxc_restart,
 MACHINE_END
diff --git a/arch/arm/mach-mx5/board-cpuimx51sd.c b/arch/arm/mach-mx5/board-cpuimx51sd.c
index 52a11c1..9fbe923 100644
--- a/arch/arm/mach-mx5/board-cpuimx51sd.c
+++ b/arch/arm/mach-mx5/board-cpuimx51sd.c
@@ -335,4 +335,5 @@
 	.handle_irq = imx51_handle_irq,
 	.timer = &mxc_timer,
 	.init_machine = eukrea_cpuimx51sd_init,
+	.restart	= mxc_restart,
 MACHINE_END
diff --git a/arch/arm/mach-mx5/board-mx50_rdp.c b/arch/arm/mach-mx5/board-mx50_rdp.c
index fc3621d..42b66e8 100644
--- a/arch/arm/mach-mx5/board-mx50_rdp.c
+++ b/arch/arm/mach-mx5/board-mx50_rdp.c
@@ -222,4 +222,5 @@
 	.handle_irq = imx50_handle_irq,
 	.timer = &mx50_rdp_timer,
 	.init_machine = mx50_rdp_board_init,
+	.restart	= mxc_restart,
 MACHINE_END
diff --git a/arch/arm/mach-mx5/board-mx51_3ds.c b/arch/arm/mach-mx5/board-mx51_3ds.c
index 0578390..83eab41 100644
--- a/arch/arm/mach-mx5/board-mx51_3ds.c
+++ b/arch/arm/mach-mx5/board-mx51_3ds.c
@@ -175,4 +175,5 @@
 	.handle_irq = imx51_handle_irq,
 	.timer = &mx51_3ds_timer,
 	.init_machine = mx51_3ds_init,
+	.restart	= mxc_restart,
 MACHINE_END
diff --git a/arch/arm/mach-mx5/board-mx51_babbage.c b/arch/arm/mach-mx5/board-mx51_babbage.c
index 5c837603..e4b822e 100644
--- a/arch/arm/mach-mx5/board-mx51_babbage.c
+++ b/arch/arm/mach-mx5/board-mx51_babbage.c
@@ -362,7 +362,7 @@
 {
 	iomux_v3_cfg_t usbh1stp = MX51_PAD_USBH1_STP__USBH1_STP;
 	iomux_v3_cfg_t power_key = NEW_PAD_CTRL(MX51_PAD_EIM_A27__GPIO2_21,
-		PAD_CTL_SRE_FAST | PAD_CTL_DSE_HIGH | PAD_CTL_PUS_100K_UP);
+		PAD_CTL_SRE_FAST | PAD_CTL_DSE_HIGH);
 
 	imx51_soc_init();
 
@@ -426,4 +426,5 @@
 	.handle_irq = imx51_handle_irq,
 	.timer = &mx51_babbage_timer,
 	.init_machine = mx51_babbage_init,
+	.restart	= mxc_restart,
 MACHINE_END
diff --git a/arch/arm/mach-mx5/board-mx51_efikamx.c b/arch/arm/mach-mx5/board-mx51_efikamx.c
index a9e4866..3a5ed2d 100644
--- a/arch/arm/mach-mx5/board-mx51_efikamx.c
+++ b/arch/arm/mach-mx5/board-mx51_efikamx.c
@@ -182,7 +182,7 @@
 	.nbuttons = ARRAY_SIZE(mx51_efikamx_powerkey),
 };
 
-void mx51_efikamx_reset(void)
+static void mx51_efikamx_restart(char mode, const char *cmd)
 {
 	if (system_rev == 0x11)
 		gpio_direction_output(EFIKAMX_RESET1_1, 0);
@@ -292,4 +292,5 @@
 	.handle_irq = imx51_handle_irq,
 	.timer = &mx51_efikamx_timer,
 	.init_machine = mx51_efikamx_init,
+	.restart = mx51_efikamx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-mx5/board-mx51_efikasb.c b/arch/arm/mach-mx5/board-mx51_efikasb.c
index 38c4a3e..ea5f65b 100644
--- a/arch/arm/mach-mx5/board-mx51_efikasb.c
+++ b/arch/arm/mach-mx5/board-mx51_efikasb.c
@@ -287,4 +287,5 @@
 	.handle_irq = imx51_handle_irq,
 	.init_machine =  efikasb_board_init,
 	.timer = &mx51_efikasb_timer,
+	.restart	= mxc_restart,
 MACHINE_END
diff --git a/arch/arm/mach-mx5/board-mx53_ard.c b/arch/arm/mach-mx5/board-mx53_ard.c
index 0d7f0ff..5f224f1 100644
--- a/arch/arm/mach-mx5/board-mx53_ard.c
+++ b/arch/arm/mach-mx5/board-mx53_ard.c
@@ -257,4 +257,5 @@
 	.handle_irq = imx53_handle_irq,
 	.timer = &mx53_ard_timer,
 	.init_machine = mx53_ard_board_init,
+	.restart	= mxc_restart,
 MACHINE_END
diff --git a/arch/arm/mach-mx5/board-mx53_evk.c b/arch/arm/mach-mx5/board-mx53_evk.c
index 6bea31a..d6ce137 100644
--- a/arch/arm/mach-mx5/board-mx53_evk.c
+++ b/arch/arm/mach-mx5/board-mx53_evk.c
@@ -106,7 +106,7 @@
 	gpio_set_value(MX53_EVK_FEC_PHY_RST, 1);
 }
 
-static struct fec_platform_data mx53_evk_fec_pdata = {
+static const struct fec_platform_data mx53_evk_fec_pdata __initconst = {
 	.phy = PHY_INTERFACE_MODE_RMII,
 };
 
@@ -175,4 +175,5 @@
 	.handle_irq = imx53_handle_irq,
 	.timer = &mx53_evk_timer,
 	.init_machine = mx53_evk_board_init,
+	.restart	= mxc_restart,
 MACHINE_END
diff --git a/arch/arm/mach-mx5/board-mx53_loco.c b/arch/arm/mach-mx5/board-mx53_loco.c
index 7678f77..fd8b524 100644
--- a/arch/arm/mach-mx5/board-mx53_loco.c
+++ b/arch/arm/mach-mx5/board-mx53_loco.c
@@ -242,7 +242,7 @@
 	gpio_set_value(LOCO_FEC_PHY_RST, 1);
 }
 
-static struct fec_platform_data mx53_loco_fec_data = {
+static const struct fec_platform_data mx53_loco_fec_data __initconst = {
 	.phy = PHY_INTERFACE_MODE_RMII,
 };
 
@@ -317,4 +317,5 @@
 	.handle_irq = imx53_handle_irq,
 	.timer = &mx53_loco_timer,
 	.init_machine = mx53_loco_board_init,
+	.restart	= mxc_restart,
 MACHINE_END
diff --git a/arch/arm/mach-mx5/board-mx53_smd.c b/arch/arm/mach-mx5/board-mx53_smd.c
index 59c0845..22c53c9 100644
--- a/arch/arm/mach-mx5/board-mx53_smd.c
+++ b/arch/arm/mach-mx5/board-mx53_smd.c
@@ -104,7 +104,7 @@
 	gpio_set_value(SMD_FEC_PHY_RST, 1);
 }
 
-static struct fec_platform_data mx53_smd_fec_data = {
+static const struct fec_platform_data mx53_smd_fec_data __initconst = {
 	.phy = PHY_INTERFACE_MODE_RMII,
 };
 
@@ -164,4 +164,5 @@
 	.handle_irq = imx53_handle_irq,
 	.timer = &mx53_smd_timer,
 	.init_machine = mx53_smd_board_init,
+	.restart	= mxc_restart,
 MACHINE_END
diff --git a/arch/arm/mach-mx5/imx51-dt.c b/arch/arm/mach-mx5/imx51-dt.c
index 596edd9..e6bad17 100644
--- a/arch/arm/mach-mx5/imx51-dt.c
+++ b/arch/arm/mach-mx5/imx51-dt.c
@@ -115,4 +115,5 @@
 	.timer		= &imx51_timer,
 	.init_machine	= imx51_dt_init,
 	.dt_compat	= imx51_dt_board_compat,
+	.restart	= mxc_restart,
 MACHINE_END
diff --git a/arch/arm/mach-mx5/imx53-dt.c b/arch/arm/mach-mx5/imx53-dt.c
index 85bfd5f..05ebb3e 100644
--- a/arch/arm/mach-mx5/imx53-dt.c
+++ b/arch/arm/mach-mx5/imx53-dt.c
@@ -125,4 +125,5 @@
 	.timer		= &imx53_timer,
 	.init_machine	= imx53_dt_init,
 	.dt_compat	= imx53_dt_board_compat,
+	.restart	= mxc_restart,
 MACHINE_END
diff --git a/arch/arm/mach-mxs/include/mach/common.h b/arch/arm/mach-mxs/include/mach/common.h
index 635bb5d..1388485 100644
--- a/arch/arm/mach-mxs/include/mach/common.h
+++ b/arch/arm/mach-mxs/include/mach/common.h
@@ -16,6 +16,7 @@
 extern const u32 *mxs_get_ocotp(void);
 extern int mxs_reset_block(void __iomem *);
 extern void mxs_timer_init(struct clk *, int);
+extern void mxs_restart(char, const char *);
 
 extern int mx23_register_gpios(void);
 extern int mx23_clocks_init(void);
diff --git a/arch/arm/mach-mxs/include/mach/system.h b/arch/arm/mach-mxs/include/mach/system.h
index 0e42823..e7ad1bb 100644
--- a/arch/arm/mach-mxs/include/mach/system.h
+++ b/arch/arm/mach-mxs/include/mach/system.h
@@ -22,6 +22,4 @@
 	cpu_do_idle();
 }
 
-void arch_reset(char mode, const char *cmd);
-
 #endif /* __MACH_MXS_SYSTEM_H__ */
diff --git a/arch/arm/mach-mxs/include/mach/vmalloc.h b/arch/arm/mach-mxs/include/mach/vmalloc.h
deleted file mode 100644
index 103b016..0000000
--- a/arch/arm/mach-mxs/include/mach/vmalloc.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- *  Copyright (C) 2000 Russell King.
- *  Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#ifndef __MACH_MXS_VMALLOC_H__
-#define __MACH_MXS_VMALLOC_H__
-
-/* vmalloc ending address */
-#define VMALLOC_END       0xf4000000UL
-
-#endif /* __MACH_MXS_VMALLOC_H__ */
diff --git a/arch/arm/mach-mxs/mach-m28evk.c b/arch/arm/mach-mxs/mach-m28evk.c
index 6b00577..2f27582 100644
--- a/arch/arm/mach-mxs/mach-m28evk.c
+++ b/arch/arm/mach-mxs/mach-m28evk.c
@@ -363,4 +363,5 @@
 	.init_irq	= mx28_init_irq,
 	.timer		= &m28evk_timer,
 	.init_machine	= m28evk_init,
+	.restart	= mxs_restart,
 MACHINE_END
diff --git a/arch/arm/mach-mxs/mach-mx23evk.c b/arch/arm/mach-mxs/mach-mx23evk.c
index c325fbe..5ea1c57 100644
--- a/arch/arm/mach-mxs/mach-mx23evk.c
+++ b/arch/arm/mach-mxs/mach-mx23evk.c
@@ -184,4 +184,5 @@
 	.init_irq	= mx23_init_irq,
 	.timer		= &mx23evk_timer,
 	.init_machine	= mx23evk_init,
+	.restart	= mxs_restart,
 MACHINE_END
diff --git a/arch/arm/mach-mxs/mach-mx28evk.c b/arch/arm/mach-mxs/mach-mx28evk.c
index 064ec5a..d0cc37f 100644
--- a/arch/arm/mach-mxs/mach-mx28evk.c
+++ b/arch/arm/mach-mxs/mach-mx28evk.c
@@ -501,4 +501,5 @@
 	.init_irq	= mx28_init_irq,
 	.timer		= &mx28evk_timer,
 	.init_machine	= mx28evk_init,
+	.restart	= mxs_restart,
 MACHINE_END
diff --git a/arch/arm/mach-mxs/mach-stmp378x_devb.c b/arch/arm/mach-mxs/mach-stmp378x_devb.c
index 6834dea..a626c07 100644
--- a/arch/arm/mach-mxs/mach-stmp378x_devb.c
+++ b/arch/arm/mach-mxs/mach-stmp378x_devb.c
@@ -117,4 +117,5 @@
 	.init_irq	= mx23_init_irq,
 	.timer		= &stmp378x_dvb_timer,
 	.init_machine	= stmp378x_dvb_init,
+	.restart	= mxs_restart,
 MACHINE_END
diff --git a/arch/arm/mach-mxs/mach-tx28.c b/arch/arm/mach-mxs/mach-tx28.c
index 9a1f0e7..2c0862e 100644
--- a/arch/arm/mach-mxs/mach-tx28.c
+++ b/arch/arm/mach-mxs/mach-tx28.c
@@ -178,4 +178,5 @@
 	.init_irq = mx28_init_irq,
 	.timer = &tx28_timer,
 	.init_machine = tx28_stk5v3_init,
+	.restart	= mxs_restart,
 MACHINE_END
diff --git a/arch/arm/mach-mxs/system.c b/arch/arm/mach-mxs/system.c
index 20ec3bd..b936633 100644
--- a/arch/arm/mach-mxs/system.c
+++ b/arch/arm/mach-mxs/system.c
@@ -42,7 +42,7 @@
 /*
  * Reset the system. It is called by machine_restart().
  */
-void arch_reset(char mode, const char *cmd)
+void mxs_restart(char mode, const char *cmd)
 {
 	/* reset the chip */
 	__mxs_setl(MXS_CLKCTRL_RESET_CHIP, mxs_clkctrl_reset_addr);
@@ -53,7 +53,7 @@
 	mdelay(50);
 
 	/* We'll take a jump through zero as a poor second */
-	cpu_reset(0);
+	soft_restart(0);
 }
 
 static int __init mxs_arch_reset_init(void)
diff --git a/arch/arm/mach-netx/generic.c b/arch/arm/mach-netx/generic.c
index 00023b5..59e6797 100644
--- a/arch/arm/mach-netx/generic.c
+++ b/arch/arm/mach-netx/generic.c
@@ -187,3 +187,8 @@
 
 subsys_initcall(netx_init);
 
+void netx_restart(char mode, const char *cmd)
+{
+	writel(NETX_SYSTEM_RES_CR_FIRMW_RES_EN | NETX_SYSTEM_RES_CR_FIRMW_RES,
+	       NETX_SYSTEM_RES_CR);
+}
diff --git a/arch/arm/mach-netx/generic.h b/arch/arm/mach-netx/generic.h
index ede2d35..9b91511 100644
--- a/arch/arm/mach-netx/generic.h
+++ b/arch/arm/mach-netx/generic.h
@@ -19,6 +19,7 @@
 
 extern void __init netx_map_io(void);
 extern void __init netx_init_irq(void);
+extern void netx_restart(char, const char *);
 
 struct sys_timer;
 extern struct sys_timer netx_timer;
diff --git a/arch/arm/mach-netx/include/mach/entry-macro.S b/arch/arm/mach-netx/include/mach/entry-macro.S
index 844f1f9..6e9f1cb 100644
--- a/arch/arm/mach-netx/include/mach/entry-macro.S
+++ b/arch/arm/mach-netx/include/mach/entry-macro.S
@@ -18,22 +18,9 @@
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
-#include <mach/hardware.h>
 
 		.macro  disable_fiq
 		.endm
 
-		.macro  get_irqnr_preamble, base, tmp
-		ldr	\base, =io_p2v(0x001ff000)
-		.endm
-
 		.macro  arch_ret_to_user, tmp1, tmp2
 		.endm
-
-		.macro  get_irqnr_and_base, irqnr, irqstat, base, tmp
-		ldr	\irqstat, [\base, #0]
-		clz	\irqnr, \irqstat
-		rsb     \irqnr, \irqnr, #31
-		cmp	\irqstat, #0
-		.endm
-
diff --git a/arch/arm/mach-netx/include/mach/system.h b/arch/arm/mach-netx/include/mach/system.h
index dc7b4bc..b38fa36 100644
--- a/arch/arm/mach-netx/include/mach/system.h
+++ b/arch/arm/mach-netx/include/mach/system.h
@@ -19,20 +19,10 @@
 #ifndef __ASM_ARCH_SYSTEM_H
 #define __ASM_ARCH_SYSTEM_H
 
-#include <linux/io.h>
-#include <mach/hardware.h>
-#include "netx-regs.h"
-
 static inline void arch_idle(void)
 {
 	cpu_do_idle();
 }
 
-static inline void arch_reset(char mode, const char *cmd)
-{
-	writel(NETX_SYSTEM_RES_CR_FIRMW_RES_EN | NETX_SYSTEM_RES_CR_FIRMW_RES,
-	       NETX_SYSTEM_RES_CR);
-}
-
 #endif
 
diff --git a/arch/arm/mach-netx/include/mach/vmalloc.h b/arch/arm/mach-netx/include/mach/vmalloc.h
deleted file mode 100644
index 871f1ef..0000000
--- a/arch/arm/mach-netx/include/mach/vmalloc.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- *  arch/arm/mach-netx/include/mach/vmalloc.h
- *
- * Copyright (C) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- */
-#define VMALLOC_END       0xd0000000UL
diff --git a/arch/arm/mach-netx/nxdb500.c b/arch/arm/mach-netx/nxdb500.c
index 90903dd..180ea89 100644
--- a/arch/arm/mach-netx/nxdb500.c
+++ b/arch/arm/mach-netx/nxdb500.c
@@ -28,6 +28,7 @@
 #include <mach/hardware.h>
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
+#include <asm/hardware/vic.h>
 #include <mach/netx-regs.h>
 #include <mach/eth.h>
 
@@ -203,6 +204,8 @@
 	.atag_offset	= 0x100,
 	.map_io		= netx_map_io,
 	.init_irq	= netx_init_irq,
+	.handle_irq	= vic_handle_irq,
 	.timer		= &netx_timer,
 	.init_machine	= nxdb500_init,
+	.restart	= netx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-netx/nxdkn.c b/arch/arm/mach-netx/nxdkn.c
index c63384a..58009e2 100644
--- a/arch/arm/mach-netx/nxdkn.c
+++ b/arch/arm/mach-netx/nxdkn.c
@@ -28,6 +28,7 @@
 #include <mach/hardware.h>
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
+#include <asm/hardware/vic.h>
 #include <mach/netx-regs.h>
 #include <mach/eth.h>
 
@@ -96,6 +97,8 @@
 	.atag_offset	= 0x100,
 	.map_io		= netx_map_io,
 	.init_irq	= netx_init_irq,
+	.handle_irq	= vic_handle_irq,
 	.timer		= &netx_timer,
 	.init_machine	= nxdkn_init,
+	.restart	= netx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-netx/nxeb500hmi.c b/arch/arm/mach-netx/nxeb500hmi.c
index 8f548ec..122e998 100644
--- a/arch/arm/mach-netx/nxeb500hmi.c
+++ b/arch/arm/mach-netx/nxeb500hmi.c
@@ -28,6 +28,7 @@
 #include <mach/hardware.h>
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
+#include <asm/hardware/vic.h>
 #include <mach/netx-regs.h>
 #include <mach/eth.h>
 
@@ -180,6 +181,8 @@
 	.atag_offset	= 0x100,
 	.map_io		= netx_map_io,
 	.init_irq	= netx_init_irq,
+	.handle_irq	= vic_handle_irq,
 	.timer		= &netx_timer,
 	.init_machine	= nxeb500hmi_init,
+	.restart	= netx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-nomadik/board-nhk8815.c b/arch/arm/mach-nomadik/board-nhk8815.c
index 0cbb74c..7c878bf 100644
--- a/arch/arm/mach-nomadik/board-nhk8815.c
+++ b/arch/arm/mach-nomadik/board-nhk8815.c
@@ -21,6 +21,7 @@
 #include <linux/mtd/onenand.h>
 #include <linux/mtd/partitions.h>
 #include <linux/io.h>
+#include <asm/hardware/vic.h>
 #include <asm/sizes.h>
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
@@ -34,6 +35,8 @@
 #include <mach/nand.h>
 #include <mach/fsmc.h>
 
+#include "cpu-8815.h"
+
 /* Initial value for SRC control register: all timers use MXTAL/8 source */
 #define SRC_CR_INIT_MASK	0x00007fff
 #define SRC_CR_INIT_VAL		0x2aaa8000
@@ -280,6 +283,8 @@
 	.atag_offset	= 0x100,
 	.map_io		= cpu8815_map_io,
 	.init_irq	= cpu8815_init_irq,
+	.handle_irq	= vic_handle_irq,
 	.timer		= &nomadik_timer,
 	.init_machine	= nhk8815_platform_init,
+	.restart	= cpu8815_restart,
 MACHINE_END
diff --git a/arch/arm/mach-nomadik/cpu-8815.c b/arch/arm/mach-nomadik/cpu-8815.c
index dc67717..65df7b4 100644
--- a/arch/arm/mach-nomadik/cpu-8815.c
+++ b/arch/arm/mach-nomadik/cpu-8815.c
@@ -21,6 +21,7 @@
 #include <linux/device.h>
 #include <linux/amba/bus.h>
 #include <linux/platform_device.h>
+#include <linux/io.h>
 
 #include <plat/gpio-nomadik.h>
 #include <mach/hardware.h>
@@ -32,6 +33,7 @@
 #include <asm/hardware/cache-l2x0.h>
 
 #include "clock.h"
+#include "cpu-8815.h"
 
 #define __MEM_4K_RESOURCE(x) \
 	.res = {.start = (x), .end = (x) + SZ_4K - 1, .flags = IORESOURCE_MEM}
@@ -164,3 +166,13 @@
 #endif
 	 return;
 }
+
+void cpu8815_restart(char mode, const char *cmd)
+{
+	void __iomem *src_rstsr = io_p2v(NOMADIK_SRC_BASE + 0x18);
+
+	/* FIXME: use egpio when implemented */
+
+	/* Write anything to Reset status register */
+	writel(1, src_rstsr);
+}
diff --git a/arch/arm/mach-nomadik/cpu-8815.h b/arch/arm/mach-nomadik/cpu-8815.h
new file mode 100644
index 0000000..71c21e8
--- /dev/null
+++ b/arch/arm/mach-nomadik/cpu-8815.h
@@ -0,0 +1,4 @@
+extern void cpu8815_map_io(void);
+extern void cpu8815_platform_init(void);
+extern void cpu8815_init_irq(void);
+extern void cpu8815_restart(char, const char *);
diff --git a/arch/arm/mach-nomadik/include/mach/entry-macro.S b/arch/arm/mach-nomadik/include/mach/entry-macro.S
index 49f1aa3..98ea1c1 100644
--- a/arch/arm/mach-nomadik/include/mach/entry-macro.S
+++ b/arch/arm/mach-nomadik/include/mach/entry-macro.S
@@ -6,38 +6,8 @@
  * warranty of any kind, whether express or implied.
  */
 
-#include <mach/hardware.h>
-#include <mach/irqs.h>
-
 	.macro	disable_fiq
 	.endm
 
-	.macro	get_irqnr_preamble, base, tmp
-	ldr	\base, =IO_ADDRESS(NOMADIK_IC_BASE)
-	.endm
-
 	.macro	arch_ret_to_user, tmp1, tmp2
 	.endm
-
-	.macro	get_irqnr_and_base, irqnr, irqstat, base, tmp
-
-	/* This stanza gets the irq mask from one of two status registers */
-	mov	\irqnr, #0
-	ldr	\irqstat, [\base, #VIC_REG_IRQSR0]	@ get masked status
-	cmp	\irqstat, #0
-	bne	1001f
-	add	\irqnr, \irqnr, #32
-	ldr	\irqstat, [\base, #VIC_REG_IRQSR1]	@ get masked status
-
-1001:	tst	\irqstat, #15
-	bne	1002f
-	add	\irqnr, \irqnr, #4
-	movs	\irqstat, \irqstat, lsr #4
-	bne	1001b
-1002:	tst	\irqstat, #1
-	bne	1003f
-	add	\irqnr, \irqnr, #1
-	movs	\irqstat, \irqstat, lsr #1
-	bne	1002b
-1003:	/* EQ will be set if no irqs pending */
-	.endm
diff --git a/arch/arm/mach-nomadik/include/mach/setup.h b/arch/arm/mach-nomadik/include/mach/setup.h
index b7897ed..bcaeaf4 100644
--- a/arch/arm/mach-nomadik/include/mach/setup.h
+++ b/arch/arm/mach-nomadik/include/mach/setup.h
@@ -12,9 +12,6 @@
 
 #ifdef CONFIG_NOMADIK_8815
 
-extern void cpu8815_map_io(void);
-extern void cpu8815_platform_init(void);
-extern void cpu8815_init_irq(void);
 extern void nmdk_timer_init(void);
 
 #endif /* NOMADIK_8815 */
diff --git a/arch/arm/mach-nomadik/include/mach/system.h b/arch/arm/mach-nomadik/include/mach/system.h
index 7119f68..25e198b 100644
--- a/arch/arm/mach-nomadik/include/mach/system.h
+++ b/arch/arm/mach-nomadik/include/mach/system.h
@@ -20,9 +20,6 @@
 #ifndef __ASM_ARCH_SYSTEM_H
 #define __ASM_ARCH_SYSTEM_H
 
-#include <linux/io.h>
-#include <mach/hardware.h>
-
 static inline void arch_idle(void)
 {
 	/*
@@ -32,14 +29,4 @@
 	cpu_do_idle();
 }
 
-static inline void arch_reset(char mode, const char *cmd)
-{
-	void __iomem *src_rstsr = io_p2v(NOMADIK_SRC_BASE + 0x18);
-
-	/* FIXME: use egpio when implemented */
-
-	/* Write anything to Reset status register */
-	writel(1, src_rstsr);
-}
-
 #endif
diff --git a/arch/arm/mach-nomadik/include/mach/vmalloc.h b/arch/arm/mach-nomadik/include/mach/vmalloc.h
deleted file mode 100644
index f83d574..0000000
--- a/arch/arm/mach-nomadik/include/mach/vmalloc.h
+++ /dev/null
@@ -1,2 +0,0 @@
-
-#define VMALLOC_END       0xe8000000UL
diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c
index b0f15d2..88909cc 100644
--- a/arch/arm/mach-omap1/board-ams-delta.c
+++ b/arch/arm/mach-omap1/board-ams-delta.c
@@ -35,7 +35,7 @@
 #include <plat/mux.h>
 #include <plat/usb.h>
 #include <plat/board.h>
-#include <plat/common.h>
+#include "common.h"
 #include <mach/camera.h>
 
 #include <mach/ams-delta-fiq.h>
@@ -386,6 +386,7 @@
 	.init_irq	= omap1_init_irq,
 	.init_machine	= ams_delta_init,
 	.timer		= &omap1_timer,
+	.restart	= omap1_restart,
 MACHINE_END
 
 EXPORT_SYMBOL(ams_delta_latch1_write);
diff --git a/arch/arm/mach-omap1/board-fsample.c b/arch/arm/mach-omap1/board-fsample.c
index 2317827..0b9464b 100644
--- a/arch/arm/mach-omap1/board-fsample.c
+++ b/arch/arm/mach-omap1/board-fsample.c
@@ -32,7 +32,7 @@
 #include <plat/flash.h>
 #include <plat/fpga.h>
 #include <plat/keypad.h>
-#include <plat/common.h>
+#include "common.h"
 #include <plat/board.h>
 
 /* fsample is pretty close to p2-sample */
@@ -390,4 +390,5 @@
 	.init_irq	= omap1_init_irq,
 	.init_machine	= omap_fsample_init,
 	.timer		= &omap1_timer,
+	.restart	= omap1_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap1/board-generic.c b/arch/arm/mach-omap1/board-generic.c
index dc5b75d..9a5fe58 100644
--- a/arch/arm/mach-omap1/board-generic.c
+++ b/arch/arm/mach-omap1/board-generic.c
@@ -25,7 +25,7 @@
 #include <plat/mux.h>
 #include <plat/usb.h>
 #include <plat/board.h>
-#include <plat/common.h>
+#include "common.h"
 
 /* assume no Mini-AB port */
 
@@ -89,4 +89,5 @@
 	.init_irq	= omap1_init_irq,
 	.init_machine	= omap_generic_init,
 	.timer		= &omap1_timer,
+	.restart	= omap1_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap1/board-h2.c b/arch/arm/mach-omap1/board-h2.c
index b334b14..00ad6b2 100644
--- a/arch/arm/mach-omap1/board-h2.c
+++ b/arch/arm/mach-omap1/board-h2.c
@@ -43,7 +43,7 @@
 #include <plat/irda.h>
 #include <plat/usb.h>
 #include <plat/keypad.h>
-#include <plat/common.h>
+#include "common.h"
 #include <plat/flash.h>
 
 #include "board-h2.h"
@@ -456,4 +456,5 @@
 	.init_irq	= omap1_init_irq,
 	.init_machine	= h2_init,
 	.timer		= &omap1_timer,
+	.restart	= omap1_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap1/board-h3.c b/arch/arm/mach-omap1/board-h3.c
index 74ebe72..4a7f251 100644
--- a/arch/arm/mach-omap1/board-h3.c
+++ b/arch/arm/mach-omap1/board-h3.c
@@ -45,7 +45,7 @@
 #include <plat/usb.h>
 #include <plat/keypad.h>
 #include <plat/dma.h>
-#include <plat/common.h>
+#include "common.h"
 #include <plat/flash.h>
 
 #include "board-h3.h"
@@ -444,4 +444,5 @@
 	.init_irq	= omap1_init_irq,
 	.init_machine	= h3_init,
 	.timer		= &omap1_timer,
+	.restart	= omap1_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap1/board-htcherald.c b/arch/arm/mach-omap1/board-htcherald.c
index 3e91baa..731cc3d 100644
--- a/arch/arm/mach-omap1/board-htcherald.c
+++ b/arch/arm/mach-omap1/board-htcherald.c
@@ -41,7 +41,7 @@
 #include <asm/mach/arch.h>
 
 #include <plat/omap7xx.h>
-#include <plat/common.h>
+#include "common.h"
 #include <plat/board.h>
 #include <plat/keypad.h>
 #include <plat/usb.h>
@@ -610,4 +610,5 @@
 	.init_irq       = omap1_init_irq,
 	.init_machine   = htcherald_init,
 	.timer          = &omap1_timer,
+	.restart	= omap1_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap1/board-innovator.c b/arch/arm/mach-omap1/board-innovator.c
index 273153d..309369e 100644
--- a/arch/arm/mach-omap1/board-innovator.c
+++ b/arch/arm/mach-omap1/board-innovator.c
@@ -37,7 +37,7 @@
 #include <plat/tc.h>
 #include <plat/usb.h>
 #include <plat/keypad.h>
-#include <plat/common.h>
+#include "common.h"
 #include <plat/mmc.h>
 
 /* At OMAP1610 Innovator the Ethernet is directly connected to CS1 */
@@ -460,4 +460,5 @@
 	.init_irq	= omap1_init_irq,
 	.init_machine	= innovator_init,
 	.timer		= &omap1_timer,
+	.restart	= omap1_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap1/board-nokia770.c b/arch/arm/mach-omap1/board-nokia770.c
index 6798b84..f9efc03 100644
--- a/arch/arm/mach-omap1/board-nokia770.c
+++ b/arch/arm/mach-omap1/board-nokia770.c
@@ -30,7 +30,7 @@
 #include <plat/usb.h>
 #include <plat/board.h>
 #include <plat/keypad.h>
-#include <plat/common.h>
+#include "common.h"
 #include <plat/hwa742.h>
 #include <plat/lcd_mipid.h>
 #include <plat/mmc.h>
@@ -259,4 +259,5 @@
 	.init_irq	= omap1_init_irq,
 	.init_machine	= omap_nokia770_init,
 	.timer		= &omap1_timer,
+	.restart	= omap1_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap1/board-osk.c b/arch/arm/mach-omap1/board-osk.c
index c385927..675de06 100644
--- a/arch/arm/mach-omap1/board-osk.c
+++ b/arch/arm/mach-omap1/board-osk.c
@@ -51,7 +51,7 @@
 #include <plat/usb.h>
 #include <plat/mux.h>
 #include <plat/tc.h>
-#include <plat/common.h>
+#include "common.h"
 
 /* At OMAP5912 OSK the Ethernet is directly connected to CS1 */
 #define OMAP_OSK_ETHR_START		0x04800300
@@ -578,4 +578,5 @@
 	.init_irq	= omap1_init_irq,
 	.init_machine	= osk_init,
 	.timer		= &omap1_timer,
+	.restart	= omap1_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap1/board-palmte.c b/arch/arm/mach-omap1/board-palmte.c
index f9c44cb..81fa27f 100644
--- a/arch/arm/mach-omap1/board-palmte.c
+++ b/arch/arm/mach-omap1/board-palmte.c
@@ -41,7 +41,7 @@
 #include <plat/board.h>
 #include <plat/irda.h>
 #include <plat/keypad.h>
-#include <plat/common.h>
+#include "common.h"
 
 #define PALMTE_USBDETECT_GPIO	0
 #define PALMTE_USB_OR_DC_GPIO	1
@@ -270,4 +270,5 @@
 	.init_irq	= omap1_init_irq,
 	.init_machine	= omap_palmte_init,
 	.timer		= &omap1_timer,
+	.restart	= omap1_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap1/board-palmtt.c b/arch/arm/mach-omap1/board-palmtt.c
index 11a9853..81cb821 100644
--- a/arch/arm/mach-omap1/board-palmtt.c
+++ b/arch/arm/mach-omap1/board-palmtt.c
@@ -39,7 +39,7 @@
 #include <plat/board.h>
 #include <plat/irda.h>
 #include <plat/keypad.h>
-#include <plat/common.h>
+#include "common.h"
 
 #include <linux/spi/spi.h>
 #include <linux/spi/ads7846.h>
@@ -317,4 +317,5 @@
 	.init_irq	= omap1_init_irq,
 	.init_machine	= omap_palmtt_init,
 	.timer		= &omap1_timer,
+	.restart	= omap1_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap1/board-palmz71.c b/arch/arm/mach-omap1/board-palmz71.c
index 4206157..e881945 100644
--- a/arch/arm/mach-omap1/board-palmz71.c
+++ b/arch/arm/mach-omap1/board-palmz71.c
@@ -41,7 +41,7 @@
 #include <plat/board.h>
 #include <plat/irda.h>
 #include <plat/keypad.h>
-#include <plat/common.h>
+#include "common.h"
 
 #include <linux/spi/spi.h>
 #include <linux/spi/ads7846.h>
@@ -334,4 +334,5 @@
 	.init_irq	= omap1_init_irq,
 	.init_machine	= omap_palmz71_init,
 	.timer		= &omap1_timer,
+	.restart	= omap1_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap1/board-perseus2.c b/arch/arm/mach-omap1/board-perseus2.c
index 203ae07..c000bed 100644
--- a/arch/arm/mach-omap1/board-perseus2.c
+++ b/arch/arm/mach-omap1/board-perseus2.c
@@ -32,7 +32,7 @@
 #include <plat/fpga.h>
 #include <plat/flash.h>
 #include <plat/keypad.h>
-#include <plat/common.h>
+#include "common.h"
 #include <plat/board.h>
 
 static const unsigned int p2_keymap[] = {
@@ -352,4 +352,5 @@
 	.init_irq	= omap1_init_irq,
 	.init_machine	= omap_perseus2_init,
 	.timer		= &omap1_timer,
+	.restart	= omap1_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap1/board-sx1.c b/arch/arm/mach-omap1/board-sx1.c
index 092a4c0..7bcd82a 100644
--- a/arch/arm/mach-omap1/board-sx1.c
+++ b/arch/arm/mach-omap1/board-sx1.c
@@ -40,7 +40,7 @@
 #include <plat/usb.h>
 #include <plat/tc.h>
 #include <plat/board.h>
-#include <plat/common.h>
+#include "common.h"
 #include <plat/keypad.h>
 #include <plat/board-sx1.h>
 
@@ -416,4 +416,5 @@
 	.init_irq	= omap1_init_irq,
 	.init_machine	= omap_sx1_init,
 	.timer		= &omap1_timer,
+	.restart	= omap1_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap1/board-voiceblue.c b/arch/arm/mach-omap1/board-voiceblue.c
index 61ed6cd..f83a502d 100644
--- a/arch/arm/mach-omap1/board-voiceblue.c
+++ b/arch/arm/mach-omap1/board-voiceblue.c
@@ -28,13 +28,12 @@
 #include <linux/export.h>
 
 #include <mach/hardware.h>
-#include <mach/system.h>
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
 
 #include <plat/board-voiceblue.h>
-#include <plat/common.h>
+#include "common.h"
 #include <plat/flash.h>
 #include <plat/mux.h>
 #include <plat/tc.h>
@@ -221,7 +220,7 @@
 	gpio_set_value(0, wdt_gpio_state);
 }
 
-static void voiceblue_reset(char mode, const char *cmd)
+static void voiceblue_restart(char mode, const char *cmd)
 {
 	/*
 	 * Workaround for 5912/1611b bug mentioned in sprz209d.pdf p. 28
@@ -285,8 +284,6 @@
 	 * (it is connected through invertor) */
 	omap_writeb(0x00, OMAP_LPG1_LCR);
 	omap_writeb(0x00, OMAP_LPG1_PMR);	/* Disable clock */
-
-	arch_reset = voiceblue_reset;
 }
 
 MACHINE_START(VOICEBLUE, "VoiceBlue OMAP5910")
@@ -298,4 +295,5 @@
 	.init_irq	= omap1_init_irq,
 	.init_machine	= voiceblue_init,
 	.timer		= &omap1_timer,
+	.restart	= voiceblue_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap1/common.h b/arch/arm/mach-omap1/common.h
new file mode 100644
index 0000000..a9a5146
--- /dev/null
+++ b/arch/arm/mach-omap1/common.h
@@ -0,0 +1,62 @@
+/*
+ *
+ * Header for code common to all OMAP1 machines.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the  GNU General Public License along
+ * with this program; if not, write  to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __ARCH_ARM_MACH_OMAP1_COMMON_H
+#define __ARCH_ARM_MACH_OMAP1_COMMON_H
+
+#include <plat/common.h>
+
+#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
+void omap7xx_map_io(void);
+#else
+static inline void omap7xx_map_io(void)
+{
+}
+#endif
+
+#ifdef CONFIG_ARCH_OMAP15XX
+void omap15xx_map_io(void);
+#else
+static inline void omap15xx_map_io(void)
+{
+}
+#endif
+
+#ifdef CONFIG_ARCH_OMAP16XX
+void omap16xx_map_io(void);
+#else
+static inline void omap16xx_map_io(void)
+{
+}
+#endif
+
+void omap1_init_early(void);
+void omap1_init_irq(void);
+void omap1_restart(char, const char *);
+
+extern struct sys_timer omap1_timer;
+extern bool omap_32k_timer_init(void);
+
+#endif /* __ARCH_ARM_MACH_OMAP1_COMMON_H */
diff --git a/arch/arm/mach-omap1/devices.c b/arch/arm/mach-omap1/devices.c
index 475cb2f..1d76a63 100644
--- a/arch/arm/mach-omap1/devices.c
+++ b/arch/arm/mach-omap1/devices.c
@@ -22,7 +22,7 @@
 #include <mach/hardware.h>
 #include <asm/mach/map.h>
 
-#include <plat/common.h>
+#include "common.h"
 #include <plat/tc.h>
 #include <plat/board.h>
 #include <plat/mux.h>
diff --git a/arch/arm/mach-omap1/include/mach/vmalloc.h b/arch/arm/mach-omap1/include/mach/vmalloc.h
deleted file mode 100644
index 22ec4a4..0000000
--- a/arch/arm/mach-omap1/include/mach/vmalloc.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- *  arch/arm/mach-omap1/include/mach/vmalloc.h
- *
- *  Copyright (C) 2000 Russell King.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#define VMALLOC_END	0xd8000000UL
diff --git a/arch/arm/mach-omap1/io.c b/arch/arm/mach-omap1/io.c
index 7969cfd..8e55b6f 100644
--- a/arch/arm/mach-omap1/io.c
+++ b/arch/arm/mach-omap1/io.c
@@ -121,7 +121,6 @@
 void omap1_init_early(void)
 {
 	omap_check_revision();
-	omap_ioremap_init();
 
 	/* REVISIT: Refer to OMAP5910 Errata, Advisory SYS_1: "Timeout Abort
 	 * on a Posted Write in the TIPB Bridge".
diff --git a/arch/arm/mach-omap1/reset.c b/arch/arm/mach-omap1/reset.c
index ad951ee..91d199b 100644
--- a/arch/arm/mach-omap1/reset.c
+++ b/arch/arm/mach-omap1/reset.c
@@ -5,10 +5,9 @@
 #include <linux/io.h>
 
 #include <mach/hardware.h>
-#include <mach/system.h>
 #include <plat/prcm.h>
 
-void omap1_arch_reset(char mode, const char *cmd)
+void omap1_restart(char mode, const char *cmd)
 {
 	/*
 	 * Workaround for 5912/1611b bug mentioned in sprz209d.pdf p. 28
@@ -21,5 +20,3 @@
 
 	omap_writew(1, ARM_RSTCT1);
 }
-
-void (*arch_reset)(char, const char *) = omap1_arch_reset;
diff --git a/arch/arm/mach-omap1/time.c b/arch/arm/mach-omap1/time.c
index a183777..b8faffa 100644
--- a/arch/arm/mach-omap1/time.c
+++ b/arch/arm/mach-omap1/time.c
@@ -37,7 +37,6 @@
 #include <linux/init.h>
 #include <linux/delay.h>
 #include <linux/interrupt.h>
-#include <linux/sched.h>
 #include <linux/spinlock.h>
 #include <linux/clk.h>
 #include <linux/err.h>
@@ -54,7 +53,7 @@
 #include <asm/mach/irq.h>
 #include <asm/mach/time.h>
 
-#include <plat/common.h>
+#include "common.h"
 
 #ifdef CONFIG_OMAP_MPU_TIMER
 
@@ -190,30 +189,9 @@
  * ---------------------------------------------------------------------------
  */
 
-static DEFINE_CLOCK_DATA(cd);
-
-static inline unsigned long long notrace _omap_mpu_sched_clock(void)
+static u32 notrace omap_mpu_read_sched_clock(void)
 {
-	u32 cyc = ~omap_mpu_timer_read(1);
-	return cyc_to_sched_clock(&cd, cyc, (u32)~0);
-}
-
-#ifndef CONFIG_OMAP_32K_TIMER
-unsigned long long notrace sched_clock(void)
-{
-	return _omap_mpu_sched_clock();
-}
-#else
-static unsigned long long notrace omap_mpu_sched_clock(void)
-{
-	return _omap_mpu_sched_clock();
-}
-#endif
-
-static void notrace mpu_update_sched_clock(void)
-{
-	u32 cyc = ~omap_mpu_timer_read(1);
-	update_sched_clock(&cd, cyc, (u32)~0);
+	return ~omap_mpu_timer_read(1);
 }
 
 static void __init omap_init_clocksource(unsigned long rate)
@@ -223,7 +201,7 @@
 			"%s: can't register clocksource!\n";
 
 	omap_mpu_timer_start(1, ~0, 1);
-	init_sched_clock(&cd, mpu_update_sched_clock, 32, rate);
+	setup_sched_clock(omap_mpu_read_sched_clock, 32, rate);
 
 	if (clocksource_mmio_init(&timer->read_tim, "mpu_timer2", rate,
 			300, 32, clocksource_mmio_readl_down))
@@ -254,30 +232,6 @@
 }
 #endif	/* CONFIG_OMAP_MPU_TIMER */
 
-#if defined(CONFIG_OMAP_MPU_TIMER) && defined(CONFIG_OMAP_32K_TIMER)
-static unsigned long long (*preferred_sched_clock)(void);
-
-unsigned long long notrace sched_clock(void)
-{
-	if (!preferred_sched_clock)
-		return 0;
-
-	return preferred_sched_clock();
-}
-
-static inline void preferred_sched_clock_init(bool use_32k_sched_clock)
-{
-	if (use_32k_sched_clock)
-		preferred_sched_clock = omap_32k_sched_clock;
-	else
-		preferred_sched_clock = omap_mpu_sched_clock;
-}
-#else
-static inline void preferred_sched_clock_init(bool use_32k_sched_clcok)
-{
-}
-#endif
-
 static inline int omap_32k_timer_usable(void)
 {
 	int res = false;
@@ -299,12 +253,8 @@
  */
 static void __init omap1_timer_init(void)
 {
-	if (omap_32k_timer_usable()) {
-		preferred_sched_clock_init(1);
-	} else {
+	if (!omap_32k_timer_usable())
 		omap_mpu_timer_init();
-		preferred_sched_clock_init(0);
-	}
 }
 
 struct sys_timer omap1_timer = {
diff --git a/arch/arm/mach-omap1/timer32k.c b/arch/arm/mach-omap1/timer32k.c
index 96604a5..9a54ef4 100644
--- a/arch/arm/mach-omap1/timer32k.c
+++ b/arch/arm/mach-omap1/timer32k.c
@@ -52,7 +52,7 @@
 #include <asm/irq.h>
 #include <asm/mach/irq.h>
 #include <asm/mach/time.h>
-#include <plat/common.h>
+#include "common.h"
 #include <plat/dmtimer.h>
 
 /*
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index e1293aa..4f01533 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -25,6 +25,7 @@
 	depends on ARCH_OMAP2PLUS
 	default y
 	select CPU_V6
+	select MULTI_IRQ_HANDLER
 
 config ARCH_OMAP3
 	bool "TI OMAP3"
@@ -36,13 +37,16 @@
 	select ARCH_HAS_OPP
 	select PM_OPP if PM
 	select ARM_CPU_SUSPEND if PM
+	select MULTI_IRQ_HANDLER
 
 config ARCH_OMAP4
 	bool "TI OMAP4"
 	default y
 	depends on ARCH_OMAP2PLUS
+	select CACHE_L2X0
 	select CPU_V7
 	select ARM_GIC
+	select HAVE_SMP
 	select LOCAL_TIMERS if SMP
 	select PL310_ERRATA_588369
 	select PL310_ERRATA_727915
diff --git a/arch/arm/mach-omap2/board-2430sdp.c b/arch/arm/mach-omap2/board-2430sdp.c
index d704f0a..7370983 100644
--- a/arch/arm/mach-omap2/board-2430sdp.c
+++ b/arch/arm/mach-omap2/board-2430sdp.c
@@ -34,7 +34,7 @@
 #include <asm/mach/map.h>
 
 #include <plat/board.h>
-#include <plat/common.h>
+#include "common.h"
 #include <plat/gpmc.h>
 #include <plat/usb.h>
 #include <plat/gpmc-smc91x.h>
@@ -301,6 +301,8 @@
 	.map_io		= omap243x_map_io,
 	.init_early	= omap2430_init_early,
 	.init_irq	= omap2_init_irq,
+	.handle_irq	= omap2_intc_handle_irq,
 	.init_machine	= omap_2430sdp_init,
 	.timer		= &omap2_timer,
+	.restart	= omap_prcm_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap2/board-3430sdp.c b/arch/arm/mach-omap2/board-3430sdp.c
index 77142c1..9996334 100644
--- a/arch/arm/mach-omap2/board-3430sdp.c
+++ b/arch/arm/mach-omap2/board-3430sdp.c
@@ -33,7 +33,7 @@
 #include <plat/mcspi.h>
 #include <plat/board.h>
 #include <plat/usb.h>
-#include <plat/common.h>
+#include "common.h"
 #include <plat/dma.h>
 #include <plat/gpmc.h>
 #include <video/omapdss.h>
@@ -728,6 +728,8 @@
 	.map_io		= omap3_map_io,
 	.init_early	= omap3430_init_early,
 	.init_irq	= omap3_init_irq,
+	.handle_irq	= omap3_intc_handle_irq,
 	.init_machine	= omap_3430sdp_init,
 	.timer		= &omap3_timer,
+	.restart	= omap_prcm_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap2/board-3630sdp.c b/arch/arm/mach-omap2/board-3630sdp.c
index f552305..6ef350d 100644
--- a/arch/arm/mach-omap2/board-3630sdp.c
+++ b/arch/arm/mach-omap2/board-3630sdp.c
@@ -16,7 +16,7 @@
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
 
-#include <plat/common.h>
+#include "common.h"
 #include <plat/board.h>
 #include <plat/gpmc-smc91x.h>
 #include <plat/usb.h>
@@ -215,6 +215,8 @@
 	.map_io		= omap3_map_io,
 	.init_early	= omap3630_init_early,
 	.init_irq	= omap3_init_irq,
+	.handle_irq	= omap3_intc_handle_irq,
 	.init_machine	= omap_sdp_init,
 	.timer		= &omap3_timer,
+	.restart	= omap_prcm_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap2/board-4430sdp.c b/arch/arm/mach-omap2/board-4430sdp.c
index 5156468..bad5d5a 100644
--- a/arch/arm/mach-omap2/board-4430sdp.c
+++ b/arch/arm/mach-omap2/board-4430sdp.c
@@ -27,13 +27,13 @@
 #include <linux/leds_pwm.h>
 
 #include <mach/hardware.h>
-#include <mach/omap4-common.h>
+#include <asm/hardware/gic.h>
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
 
 #include <plat/board.h>
-#include <plat/common.h>
+#include "common.h"
 #include <plat/usb.h>
 #include <plat/mmc.h>
 #include <plat/omap4-keypad.h>
@@ -984,6 +984,8 @@
 	.map_io		= omap4_map_io,
 	.init_early	= omap4430_init_early,
 	.init_irq	= gic_init_irq,
+	.handle_irq	= gic_handle_irq,
 	.init_machine	= omap_4430sdp_init,
 	.timer		= &omap4_timer,
+	.restart	= omap_prcm_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap2/board-am3517crane.c b/arch/arm/mach-omap2/board-am3517crane.c
index 7834536..c3851e8 100644
--- a/arch/arm/mach-omap2/board-am3517crane.c
+++ b/arch/arm/mach-omap2/board-am3517crane.c
@@ -27,7 +27,7 @@
 #include <asm/mach/map.h>
 
 #include <plat/board.h>
-#include <plat/common.h>
+#include "common.h"
 #include <plat/usb.h>
 
 #include "mux.h"
@@ -98,6 +98,8 @@
 	.map_io		= omap3_map_io,
 	.init_early	= am35xx_init_early,
 	.init_irq	= omap3_init_irq,
+	.handle_irq	= omap3_intc_handle_irq,
 	.init_machine	= am3517_crane_init,
 	.timer		= &omap3_timer,
+	.restart	= omap_prcm_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap2/board-am3517evm.c b/arch/arm/mach-omap2/board-am3517evm.c
index d314f03..f5a3a3f 100644
--- a/arch/arm/mach-omap2/board-am3517evm.c
+++ b/arch/arm/mach-omap2/board-am3517evm.c
@@ -32,7 +32,7 @@
 #include <asm/mach/map.h>
 
 #include <plat/board.h>
-#include <plat/common.h>
+#include "common.h"
 #include <plat/usb.h>
 #include <video/omapdss.h>
 #include <video/omap-panel-generic-dpi.h>
@@ -491,6 +491,8 @@
 	.map_io		= omap3_map_io,
 	.init_early	= am35xx_init_early,
 	.init_irq	= omap3_init_irq,
+	.handle_irq	= omap3_intc_handle_irq,
 	.init_machine	= am3517_evm_init,
 	.timer		= &omap3_timer,
+	.restart	= omap_prcm_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap2/board-apollon.c b/arch/arm/mach-omap2/board-apollon.c
index de8134b..ac77382 100644
--- a/arch/arm/mach-omap2/board-apollon.c
+++ b/arch/arm/mach-omap2/board-apollon.c
@@ -37,7 +37,7 @@
 #include <plat/led.h>
 #include <plat/usb.h>
 #include <plat/board.h>
-#include <plat/common.h>
+#include "common.h"
 #include <plat/gpmc.h>
 
 #include <video/omapdss.h>
@@ -354,6 +354,8 @@
 	.map_io		= omap242x_map_io,
 	.init_early	= omap2420_init_early,
 	.init_irq	= omap2_init_irq,
+	.handle_irq	= omap2_intc_handle_irq,
 	.init_machine	= omap_apollon_init,
 	.timer		= &omap2_timer,
+	.restart	= omap_prcm_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap2/board-cm-t35.c b/arch/arm/mach-omap2/board-cm-t35.c
index bd1bcac..1545102 100644
--- a/arch/arm/mach-omap2/board-cm-t35.c
+++ b/arch/arm/mach-omap2/board-cm-t35.c
@@ -37,7 +37,7 @@
 #include <asm/mach/map.h>
 
 #include <plat/board.h>
-#include <plat/common.h>
+#include "common.h"
 #include <plat/nand.h>
 #include <plat/gpmc.h>
 #include <plat/usb.h>
@@ -634,8 +634,10 @@
 	.map_io		= omap3_map_io,
 	.init_early	= omap35xx_init_early,
 	.init_irq	= omap3_init_irq,
+	.handle_irq	= omap3_intc_handle_irq,
 	.init_machine	= cm_t35_init,
 	.timer		= &omap3_timer,
+	.restart	= omap_prcm_restart,
 MACHINE_END
 
 MACHINE_START(CM_T3730, "Compulab CM-T3730")
@@ -644,6 +646,8 @@
 	.map_io         = omap3_map_io,
 	.init_early     = omap3630_init_early,
 	.init_irq       = omap3_init_irq,
+	.handle_irq	= omap3_intc_handle_irq,
 	.init_machine   = cm_t3730_init,
 	.timer          = &omap3_timer,
+	.restart	= omap_prcm_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap2/board-cm-t3517.c b/arch/arm/mach-omap2/board-cm-t3517.c
index 3f4dc66..f36d694 100644
--- a/arch/arm/mach-omap2/board-cm-t3517.c
+++ b/arch/arm/mach-omap2/board-cm-t3517.c
@@ -39,7 +39,7 @@
 #include <asm/mach/map.h>
 
 #include <plat/board.h>
-#include <plat/common.h>
+#include "common.h"
 #include <plat/usb.h>
 #include <plat/nand.h>
 #include <plat/gpmc.h>
@@ -299,6 +299,8 @@
 	.map_io		= omap3_map_io,
 	.init_early	= am35xx_init_early,
 	.init_irq	= omap3_init_irq,
+	.handle_irq	= omap3_intc_handle_irq,
 	.init_machine	= cm_t3517_init,
 	.timer		= &omap3_timer,
+	.restart	= omap_prcm_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap2/board-devkit8000.c b/arch/arm/mach-omap2/board-devkit8000.c
index 90154e4..e873063 100644
--- a/arch/arm/mach-omap2/board-devkit8000.c
+++ b/arch/arm/mach-omap2/board-devkit8000.c
@@ -41,7 +41,7 @@
 #include <asm/mach/flash.h>
 
 #include <plat/board.h>
-#include <plat/common.h>
+#include "common.h"
 #include <plat/gpmc.h>
 #include <plat/nand.h>
 #include <plat/usb.h>
@@ -660,6 +660,8 @@
 	.map_io		= omap3_map_io,
 	.init_early	= omap35xx_init_early,
 	.init_irq	= omap3_init_irq,
+	.handle_irq	= omap3_intc_handle_irq,
 	.init_machine	= devkit8000_init,
 	.timer		= &omap3_secure_timer,
+	.restart	= omap_prcm_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
index fb55fa3d..f8c5b2c 100644
--- a/arch/arm/mach-omap2/board-generic.c
+++ b/arch/arm/mach-omap2/board-generic.c
@@ -20,8 +20,7 @@
 #include <asm/mach/arch.h>
 
 #include <plat/board.h>
-#include <plat/common.h>
-#include <mach/omap4-common.h>
+#include "common.h"
 #include "common-board-devices.h"
 
 /*
@@ -107,6 +106,7 @@
 	.init_machine	= omap_generic_init,
 	.timer		= &omap2_timer,
 	.dt_compat	= omap242x_boards_compat,
+	.restart	= omap_prcm_restart,
 MACHINE_END
 #endif
 
@@ -122,9 +122,11 @@
 	.map_io		= omap243x_map_io,
 	.init_early	= omap2430_init_early,
 	.init_irq	= omap2_init_irq,
+	.handle_irq	= omap2_intc_handle_irq,
 	.init_machine	= omap_generic_init,
 	.timer		= &omap2_timer,
 	.dt_compat	= omap243x_boards_compat,
+	.restart	= omap_prcm_restart,
 MACHINE_END
 #endif
 
@@ -143,6 +145,7 @@
 	.init_machine	= omap3_init,
 	.timer		= &omap3_timer,
 	.dt_compat	= omap3_boards_compat,
+	.restart	= omap_prcm_restart,
 MACHINE_END
 #endif
 
@@ -161,5 +164,6 @@
 	.init_machine	= omap4_init,
 	.timer		= &omap4_timer,
 	.dt_compat	= omap4_boards_compat,
+	.restart	= omap_prcm_restart,
 MACHINE_END
 #endif
diff --git a/arch/arm/mach-omap2/board-h4.c b/arch/arm/mach-omap2/board-h4.c
index 8b351d9..54af800 100644
--- a/arch/arm/mach-omap2/board-h4.c
+++ b/arch/arm/mach-omap2/board-h4.c
@@ -34,7 +34,7 @@
 
 #include <plat/usb.h>
 #include <plat/board.h>
-#include <plat/common.h>
+#include "common.h"
 #include <plat/menelaus.h>
 #include <plat/dma.h>
 #include <plat/gpmc.h>
@@ -396,6 +396,8 @@
 	.map_io		= omap242x_map_io,
 	.init_early	= omap2420_init_early,
 	.init_irq	= omap2_init_irq,
+	.handle_irq	= omap2_intc_handle_irq,
 	.init_machine	= omap_h4_init,
 	.timer		= &omap2_timer,
+	.restart	= omap_prcm_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap2/board-igep0020.c b/arch/arm/mach-omap2/board-igep0020.c
index d0a3f78..a59ace0 100644
--- a/arch/arm/mach-omap2/board-igep0020.c
+++ b/arch/arm/mach-omap2/board-igep0020.c
@@ -28,7 +28,7 @@
 #include <asm/mach/arch.h>
 
 #include <plat/board.h>
-#include <plat/common.h>
+#include "common.h"
 #include <plat/gpmc.h>
 #include <plat/usb.h>
 #include <video/omapdss.h>
@@ -672,8 +672,10 @@
 	.map_io		= omap3_map_io,
 	.init_early	= omap35xx_init_early,
 	.init_irq	= omap3_init_irq,
+	.handle_irq	= omap3_intc_handle_irq,
 	.init_machine	= igep_init,
 	.timer		= &omap3_timer,
+	.restart	= omap_prcm_restart,
 MACHINE_END
 
 MACHINE_START(IGEP0030, "IGEP OMAP3 module")
@@ -682,6 +684,8 @@
 	.map_io		= omap3_map_io,
 	.init_early	= omap35xx_init_early,
 	.init_irq	= omap3_init_irq,
+	.handle_irq	= omap3_intc_handle_irq,
 	.init_machine	= igep_init,
 	.timer		= &omap3_timer,
+	.restart	= omap_prcm_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap2/board-ldp.c b/arch/arm/mach-omap2/board-ldp.c
index e179da0..2d2a61f 100644
--- a/arch/arm/mach-omap2/board-ldp.c
+++ b/arch/arm/mach-omap2/board-ldp.c
@@ -36,7 +36,7 @@
 
 #include <plat/mcspi.h>
 #include <plat/board.h>
-#include <plat/common.h>
+#include "common.h"
 #include <plat/gpmc.h>
 #include <mach/board-zoom.h>
 
@@ -434,6 +434,8 @@
 	.map_io		= omap3_map_io,
 	.init_early	= omap3430_init_early,
 	.init_irq	= omap3_init_irq,
+	.handle_irq	= omap3_intc_handle_irq,
 	.init_machine	= omap_ldp_init,
 	.timer		= &omap3_timer,
+	.restart	= omap_prcm_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
index 2913692..72d76ed 100644
--- a/arch/arm/mach-omap2/board-n8x0.c
+++ b/arch/arm/mach-omap2/board-n8x0.c
@@ -26,7 +26,7 @@
 #include <asm/mach-types.h>
 
 #include <plat/board.h>
-#include <plat/common.h>
+#include "common.h"
 #include <plat/menelaus.h>
 #include <mach/irqs.h>
 #include <plat/mcspi.h>
@@ -689,8 +689,10 @@
 	.map_io		= omap242x_map_io,
 	.init_early	= omap2420_init_early,
 	.init_irq	= omap2_init_irq,
+	.handle_irq	= omap2_intc_handle_irq,
 	.init_machine	= n8x0_init_machine,
 	.timer		= &omap2_timer,
+	.restart	= omap_prcm_restart,
 MACHINE_END
 
 MACHINE_START(NOKIA_N810, "Nokia N810")
@@ -699,8 +701,10 @@
 	.map_io		= omap242x_map_io,
 	.init_early	= omap2420_init_early,
 	.init_irq	= omap2_init_irq,
+	.handle_irq	= omap2_intc_handle_irq,
 	.init_machine	= n8x0_init_machine,
 	.timer		= &omap2_timer,
+	.restart	= omap_prcm_restart,
 MACHINE_END
 
 MACHINE_START(NOKIA_N810_WIMAX, "Nokia N810 WiMAX")
@@ -709,6 +713,8 @@
 	.map_io		= omap242x_map_io,
 	.init_early	= omap2420_init_early,
 	.init_irq	= omap2_init_irq,
+	.handle_irq	= omap2_intc_handle_irq,
 	.init_machine	= n8x0_init_machine,
 	.timer		= &omap2_timer,
+	.restart	= omap_prcm_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap2/board-omap3beagle.c b/arch/arm/mach-omap2/board-omap3beagle.c
index 4a71cb7..7ffcd28 100644
--- a/arch/arm/mach-omap2/board-omap3beagle.c
+++ b/arch/arm/mach-omap2/board-omap3beagle.c
@@ -40,7 +40,7 @@
 #include <asm/mach/flash.h>
 
 #include <plat/board.h>
-#include <plat/common.h>
+#include "common.h"
 #include <video/omapdss.h>
 #include <video/omap-panel-dvi.h>
 #include <plat/gpmc.h>
@@ -559,6 +559,8 @@
 	.map_io		= omap3_map_io,
 	.init_early	= omap3_init_early,
 	.init_irq	= omap3_init_irq,
+	.handle_irq	= omap3_intc_handle_irq,
 	.init_machine	= omap3_beagle_init,
 	.timer		= &omap3_secure_timer,
+	.restart	= omap_prcm_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap2/board-omap3evm.c b/arch/arm/mach-omap2/board-omap3evm.c
index ec00b2e..003fe34 100644
--- a/arch/arm/mach-omap2/board-omap3evm.c
+++ b/arch/arm/mach-omap2/board-omap3evm.c
@@ -43,7 +43,7 @@
 
 #include <plat/board.h>
 #include <plat/usb.h>
-#include <plat/common.h>
+#include "common.h"
 #include <plat/mcspi.h>
 #include <video/omapdss.h>
 #include <video/omap-panel-dvi.h>
@@ -681,6 +681,8 @@
 	.map_io		= omap3_map_io,
 	.init_early	= omap35xx_init_early,
 	.init_irq	= omap3_init_irq,
+	.handle_irq	= omap3_intc_handle_irq,
 	.init_machine	= omap3_evm_init,
 	.timer		= &omap3_timer,
+	.restart	= omap_prcm_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap2/board-omap3logic.c b/arch/arm/mach-omap2/board-omap3logic.c
index 7c0f193..4198dd0 100644
--- a/arch/arm/mach-omap2/board-omap3logic.c
+++ b/arch/arm/mach-omap2/board-omap3logic.c
@@ -40,7 +40,7 @@
 
 #include <plat/mux.h>
 #include <plat/board.h>
-#include <plat/common.h>
+#include "common.h"
 #include <plat/gpmc-smsc911x.h>
 #include <plat/gpmc.h>
 #include <plat/sdrc.h>
@@ -208,8 +208,10 @@
 	.map_io		= omap3_map_io,
 	.init_early	= omap35xx_init_early,
 	.init_irq	= omap3_init_irq,
+	.handle_irq	= omap3_intc_handle_irq,
 	.init_machine	= omap3logic_init,
 	.timer		= &omap3_timer,
+	.restart	= omap_prcm_restart,
 MACHINE_END
 
 MACHINE_START(OMAP3530_LV_SOM, "OMAP Logic 3530 LV SOM board")
@@ -217,6 +219,8 @@
 	.map_io		= omap3_map_io,
 	.init_early	= omap35xx_init_early,
 	.init_irq	= omap3_init_irq,
+	.handle_irq	= omap3_intc_handle_irq,
 	.init_machine	= omap3logic_init,
 	.timer		= &omap3_timer,
+	.restart	= omap_prcm_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap2/board-omap3pandora.c b/arch/arm/mach-omap2/board-omap3pandora.c
index f7811f4..1644b73 100644
--- a/arch/arm/mach-omap2/board-omap3pandora.c
+++ b/arch/arm/mach-omap2/board-omap3pandora.c
@@ -41,7 +41,7 @@
 #include <asm/mach/map.h>
 
 #include <plat/board.h>
-#include <plat/common.h>
+#include "common.h"
 #include <mach/hardware.h>
 #include <plat/mcspi.h>
 #include <plat/usb.h>
@@ -606,6 +606,8 @@
 	.map_io		= omap3_map_io,
 	.init_early	= omap35xx_init_early,
 	.init_irq	= omap3_init_irq,
+	.handle_irq	= omap3_intc_handle_irq,
 	.init_machine	= omap3pandora_init,
 	.timer		= &omap3_timer,
+	.restart	= omap_prcm_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap2/board-omap3stalker.c b/arch/arm/mach-omap2/board-omap3stalker.c
index ddb7d66..cb089a4 100644
--- a/arch/arm/mach-omap2/board-omap3stalker.c
+++ b/arch/arm/mach-omap2/board-omap3stalker.c
@@ -35,7 +35,7 @@
 #include <asm/mach/flash.h>
 
 #include <plat/board.h>
-#include <plat/common.h>
+#include "common.h"
 #include <plat/gpmc.h>
 #include <plat/nand.h>
 #include <plat/usb.h>
@@ -454,6 +454,8 @@
 	.map_io			= omap3_map_io,
 	.init_early		= omap35xx_init_early,
 	.init_irq		= omap3_init_irq,
+	.handle_irq		= omap3_intc_handle_irq,
 	.init_machine		= omap3_stalker_init,
 	.timer			= &omap3_secure_timer,
+	.restart		= omap_prcm_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap2/board-omap3touchbook.c b/arch/arm/mach-omap2/board-omap3touchbook.c
index a2d0d19..a0b851a 100644
--- a/arch/arm/mach-omap2/board-omap3touchbook.c
+++ b/arch/arm/mach-omap2/board-omap3touchbook.c
@@ -44,7 +44,7 @@
 #include <asm/mach/flash.h>
 
 #include <plat/board.h>
-#include <plat/common.h>
+#include "common.h"
 #include <plat/gpmc.h>
 #include <plat/nand.h>
 #include <plat/usb.h>
@@ -381,6 +381,8 @@
 	.map_io		= omap3_map_io,
 	.init_early	= omap3430_init_early,
 	.init_irq	= omap3_init_irq,
+	.handle_irq	= omap3_intc_handle_irq,
 	.init_machine	= omap3_touchbook_init,
 	.timer		= &omap3_secure_timer,
+	.restart	= omap_prcm_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap2/board-omap4panda.c b/arch/arm/mach-omap2/board-omap4panda.c
index a8c2c42..8b06c6a 100644
--- a/arch/arm/mach-omap2/board-omap4panda.c
+++ b/arch/arm/mach-omap2/board-omap4panda.c
@@ -30,14 +30,14 @@
 #include <linux/wl12xx.h>
 
 #include <mach/hardware.h>
-#include <mach/omap4-common.h>
+#include <asm/hardware/gic.h>
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
 #include <video/omapdss.h>
 
 #include <plat/board.h>
-#include <plat/common.h>
+#include "common.h"
 #include <plat/usb.h>
 #include <plat/mmc.h>
 #include <video/omap-panel-dvi.h>
@@ -577,6 +577,8 @@
 	.map_io		= omap4_map_io,
 	.init_early	= omap4430_init_early,
 	.init_irq	= gic_init_irq,
+	.handle_irq	= gic_handle_irq,
 	.init_machine	= omap4_panda_init,
 	.timer		= &omap4_timer,
+	.restart	= omap_prcm_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap2/board-overo.c b/arch/arm/mach-omap2/board-overo.c
index 4cf7aea..52c0cef 100644
--- a/arch/arm/mach-omap2/board-overo.c
+++ b/arch/arm/mach-omap2/board-overo.c
@@ -43,7 +43,7 @@
 #include <asm/mach/map.h>
 
 #include <plat/board.h>
-#include <plat/common.h>
+#include "common.h"
 #include <video/omapdss.h>
 #include <video/omap-panel-generic-dpi.h>
 #include <video/omap-panel-dvi.h>
@@ -562,6 +562,8 @@
 	.map_io		= omap3_map_io,
 	.init_early	= omap35xx_init_early,
 	.init_irq	= omap3_init_irq,
+	.handle_irq	= omap3_intc_handle_irq,
 	.init_machine	= overo_init,
 	.timer		= &omap3_timer,
+	.restart	= omap_prcm_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap2/board-rm680.c b/arch/arm/mach-omap2/board-rm680.c
index 616fb39..8678b38 100644
--- a/arch/arm/mach-omap2/board-rm680.c
+++ b/arch/arm/mach-omap2/board-rm680.c
@@ -25,7 +25,7 @@
 #include <plat/mmc.h>
 #include <plat/usb.h>
 #include <plat/gpmc.h>
-#include <plat/common.h>
+#include "common.h"
 #include <plat/onenand.h>
 
 #include "mux.h"
@@ -149,6 +149,8 @@
 	.map_io		= omap3_map_io,
 	.init_early	= omap3630_init_early,
 	.init_irq	= omap3_init_irq,
+	.handle_irq	= omap3_intc_handle_irq,
 	.init_machine	= rm680_init,
 	.timer		= &omap3_timer,
+	.restart	= omap_prcm_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
index ba1aa07..108fee6 100644
--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
+++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
@@ -27,7 +27,7 @@
 
 #include <plat/mcspi.h>
 #include <plat/board.h>
-#include <plat/common.h>
+#include "common.h"
 #include <plat/dma.h>
 #include <plat/gpmc.h>
 #include <plat/onenand.h>
@@ -193,7 +193,7 @@
 static void __init rx51_charger_init(void)
 {
 	WARN_ON(gpio_request_one(RX51_USB_TRANSCEIVER_RST_GPIO,
-		GPIOF_OUT_INIT_LOW, "isp1704_reset"));
+		GPIOF_OUT_INIT_HIGH, "isp1704_reset"));
 
 	platform_device_register(&rx51_charger_device);
 }
diff --git a/arch/arm/mach-omap2/board-rx51.c b/arch/arm/mach-omap2/board-rx51.c
index 4af7c4b..27f01f0 100644
--- a/arch/arm/mach-omap2/board-rx51.c
+++ b/arch/arm/mach-omap2/board-rx51.c
@@ -25,7 +25,7 @@
 
 #include <plat/mcspi.h>
 #include <plat/board.h>
-#include <plat/common.h>
+#include "common.h"
 #include <plat/dma.h>
 #include <plat/gpmc.h>
 #include <plat/usb.h>
@@ -127,6 +127,8 @@
 	.map_io		= omap3_map_io,
 	.init_early	= omap3430_init_early,
 	.init_irq	= omap3_init_irq,
+	.handle_irq	= omap3_intc_handle_irq,
 	.init_machine	= rx51_init,
 	.timer		= &omap3_timer,
+	.restart	= omap_prcm_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap2/board-ti8168evm.c b/arch/arm/mach-omap2/board-ti8168evm.c
index e6ee884..74713e3 100644
--- a/arch/arm/mach-omap2/board-ti8168evm.c
+++ b/arch/arm/mach-omap2/board-ti8168evm.c
@@ -22,7 +22,7 @@
 
 #include <plat/irqs.h>
 #include <plat/board.h>
-#include <plat/common.h>
+#include "common.h"
 
 static struct omap_board_config_kernel ti8168_evm_config[] __initdata = {
 };
@@ -48,4 +48,5 @@
 	.init_irq	= ti816x_init_irq,
 	.timer		= &omap3_timer,
 	.init_machine	= ti8168_evm_init,
+	.restart	= omap_prcm_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap2/board-zoom-peripherals.c b/arch/arm/mach-omap2/board-zoom-peripherals.c
index 6d0aa4f..8d7ce11 100644
--- a/arch/arm/mach-omap2/board-zoom-peripherals.c
+++ b/arch/arm/mach-omap2/board-zoom-peripherals.c
@@ -24,7 +24,7 @@
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
 
-#include <plat/common.h>
+#include "common.h"
 #include <plat/usb.h>
 
 #include <mach/board-zoom.h>
diff --git a/arch/arm/mach-omap2/board-zoom.c b/arch/arm/mach-omap2/board-zoom.c
index be6684d..5c20bcc 100644
--- a/arch/arm/mach-omap2/board-zoom.c
+++ b/arch/arm/mach-omap2/board-zoom.c
@@ -21,7 +21,7 @@
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
 
-#include <plat/common.h>
+#include "common.h"
 #include <plat/board.h>
 #include <plat/usb.h>
 
@@ -135,8 +135,10 @@
 	.map_io		= omap3_map_io,
 	.init_early	= omap3430_init_early,
 	.init_irq	= omap3_init_irq,
+	.handle_irq	= omap3_intc_handle_irq,
 	.init_machine	= omap_zoom_init,
 	.timer		= &omap3_timer,
+	.restart	= omap_prcm_restart,
 MACHINE_END
 
 MACHINE_START(OMAP_ZOOM3, "OMAP Zoom3 board")
@@ -145,6 +147,8 @@
 	.map_io		= omap3_map_io,
 	.init_early	= omap3630_init_early,
 	.init_irq	= omap3_init_irq,
+	.handle_irq	= omap3_intc_handle_irq,
 	.init_machine	= omap_zoom_init,
 	.timer		= &omap3_timer,
+	.restart	= omap_prcm_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap2/cm2xxx_3xxx.c b/arch/arm/mach-omap2/cm2xxx_3xxx.c
index 38830d8..04d39cd 100644
--- a/arch/arm/mach-omap2/cm2xxx_3xxx.c
+++ b/arch/arm/mach-omap2/cm2xxx_3xxx.c
@@ -18,7 +18,7 @@
 #include <linux/err.h>
 #include <linux/io.h>
 
-#include <plat/common.h>
+#include "common.h"
 
 #include "cm.h"
 #include "cm2xxx_3xxx.h"
diff --git a/arch/arm/mach-omap2/cm44xx.c b/arch/arm/mach-omap2/cm44xx.c
index e96f53e..6a83630 100644
--- a/arch/arm/mach-omap2/cm44xx.c
+++ b/arch/arm/mach-omap2/cm44xx.c
@@ -18,7 +18,7 @@
 #include <linux/err.h>
 #include <linux/io.h>
 
-#include <plat/common.h>
+#include "common.h"
 
 #include "cm.h"
 #include "cm1_44xx.h"
diff --git a/arch/arm/mach-omap2/cminst44xx.c b/arch/arm/mach-omap2/cminst44xx.c
index eb2a472..6204dea 100644
--- a/arch/arm/mach-omap2/cminst44xx.c
+++ b/arch/arm/mach-omap2/cminst44xx.c
@@ -20,7 +20,7 @@
 #include <linux/err.h>
 #include <linux/io.h>
 
-#include <plat/common.h>
+#include "common.h"
 
 #include "cm.h"
 #include "cm1_44xx.h"
diff --git a/arch/arm/mach-omap2/common.c b/arch/arm/mach-omap2/common.c
index 110e5b9..684b8a7 100644
--- a/arch/arm/mach-omap2/common.c
+++ b/arch/arm/mach-omap2/common.c
@@ -17,7 +17,7 @@
 #include <linux/clk.h>
 #include <linux/io.h>
 
-#include <plat/common.h>
+#include "common.h"
 #include <plat/board.h>
 #include <plat/mux.h>
 
diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h
new file mode 100644
index 0000000..cda888a
--- /dev/null
+++ b/arch/arm/mach-omap2/common.h
@@ -0,0 +1,186 @@
+/*
+ * Header for code common to all OMAP2+ machines.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the  GNU General Public License along
+ * with this program; if not, write  to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __ARCH_ARM_MACH_OMAP2PLUS_COMMON_H
+#define __ARCH_ARM_MACH_OMAP2PLUS_COMMON_H
+
+#include <linux/delay.h>
+#include <plat/common.h>
+
+#ifdef CONFIG_SOC_OMAP2420
+extern void omap242x_map_common_io(void);
+#else
+static inline void omap242x_map_common_io(void)
+{
+}
+#endif
+
+#ifdef CONFIG_SOC_OMAP2430
+extern void omap243x_map_common_io(void);
+#else
+static inline void omap243x_map_common_io(void)
+{
+}
+#endif
+
+#ifdef CONFIG_ARCH_OMAP3
+extern void omap34xx_map_common_io(void);
+#else
+static inline void omap34xx_map_common_io(void)
+{
+}
+#endif
+
+#ifdef CONFIG_SOC_OMAPTI816X
+extern void omapti816x_map_common_io(void);
+#else
+static inline void omapti816x_map_common_io(void)
+{
+}
+#endif
+
+#ifdef CONFIG_ARCH_OMAP4
+extern void omap44xx_map_common_io(void);
+#else
+static inline void omap44xx_map_common_io(void)
+{
+}
+#endif
+
+extern void omap2_init_common_infrastructure(void);
+
+extern struct sys_timer omap2_timer;
+extern struct sys_timer omap3_timer;
+extern struct sys_timer omap3_secure_timer;
+extern struct sys_timer omap4_timer;
+
+void omap2420_init_early(void);
+void omap2430_init_early(void);
+void omap3430_init_early(void);
+void omap35xx_init_early(void);
+void omap3630_init_early(void);
+void omap3_init_early(void);	/* Do not use this one */
+void am35xx_init_early(void);
+void ti816x_init_early(void);
+void omap4430_init_early(void);
+void omap_prcm_restart(char, const char *);
+
+/*
+ * IO bases for various OMAP processors
+ * Except the tap base, rest all the io bases
+ * listed are physical addresses.
+ */
+struct omap_globals {
+	u32		class;		/* OMAP class to detect */
+	void __iomem	*tap;		/* Control module ID code */
+	void __iomem	*sdrc;           /* SDRAM Controller */
+	void __iomem	*sms;            /* SDRAM Memory Scheduler */
+	void __iomem	*ctrl;           /* System Control Module */
+	void __iomem	*ctrl_pad;	/* PAD Control Module */
+	void __iomem	*prm;            /* Power and Reset Management */
+	void __iomem	*cm;             /* Clock Management */
+	void __iomem	*cm2;
+};
+
+void omap2_set_globals_242x(void);
+void omap2_set_globals_243x(void);
+void omap2_set_globals_3xxx(void);
+void omap2_set_globals_443x(void);
+void omap2_set_globals_ti816x(void);
+
+/* These get called from omap2_set_globals_xxxx(), do not call these */
+void omap2_set_globals_tap(struct omap_globals *);
+void omap2_set_globals_sdrc(struct omap_globals *);
+void omap2_set_globals_control(struct omap_globals *);
+void omap2_set_globals_prcm(struct omap_globals *);
+
+void omap242x_map_io(void);
+void omap243x_map_io(void);
+void omap3_map_io(void);
+void omap4_map_io(void);
+
+/**
+ * omap_test_timeout - busy-loop, testing a condition
+ * @cond: condition to test until it evaluates to true
+ * @timeout: maximum number of microseconds in the timeout
+ * @index: loop index (integer)
+ *
+ * Loop waiting for @cond to become true or until at least @timeout
+ * microseconds have passed.  To use, define some integer @index in the
+ * calling code.  After running, if @index == @timeout, then the loop has
+ * timed out.
+ */
+#define omap_test_timeout(cond, timeout, index)			\
+({								\
+	for (index = 0; index < timeout; index++) {		\
+		if (cond)					\
+			break;					\
+		udelay(1);					\
+	}							\
+})
+
+extern struct device *omap2_get_mpuss_device(void);
+extern struct device *omap2_get_iva_device(void);
+extern struct device *omap2_get_l3_device(void);
+extern struct device *omap4_get_dsp_device(void);
+
+void omap2_init_irq(void);
+void omap3_init_irq(void);
+void ti816x_init_irq(void);
+extern int omap_irq_pending(void);
+void omap_intc_save_context(void);
+void omap_intc_restore_context(void);
+void omap3_intc_suspend(void);
+void omap3_intc_prepare_idle(void);
+void omap3_intc_resume_idle(void);
+void omap2_intc_handle_irq(struct pt_regs *regs);
+void omap3_intc_handle_irq(struct pt_regs *regs);
+
+/*
+ * wfi used in low power code. Directly opcode is used instead
+ * of instruction to avoid mulit-omap build break
+ */
+#ifdef CONFIG_THUMB2_KERNEL
+#define do_wfi() __asm__ __volatile__ ("wfi" : : : "memory")
+#else
+#define do_wfi()			\
+		__asm__ __volatile__ (".word	0xe320f003" : : : "memory")
+#endif
+
+#ifdef CONFIG_CACHE_L2X0
+extern void __iomem *l2cache_base;
+#endif
+
+extern void __init gic_init_irq(void);
+extern void omap_smc1(u32 fn, u32 arg);
+
+#ifdef CONFIG_SMP
+/* Needed for secondary core boot */
+extern void omap_secondary_startup(void);
+extern u32 omap_modify_auxcoreboot0(u32 set_mask, u32 clear_mask);
+extern void omap_auxcoreboot_addr(u32 cpu_addr);
+extern u32 omap_read_auxcoreboot0(void);
+#endif
+
+#endif /* __ARCH_ARM_MACH_OMAP2PLUS_COMMON_H */
diff --git a/arch/arm/mach-omap2/control.c b/arch/arm/mach-omap2/control.c
index e34d27f..114c037 100644
--- a/arch/arm/mach-omap2/control.c
+++ b/arch/arm/mach-omap2/control.c
@@ -15,7 +15,7 @@
 #include <linux/kernel.h>
 #include <linux/io.h>
 
-#include <plat/common.h>
+#include "common.h"
 #include <plat/sdrc.h>
 
 #include "cm-regbits-34xx.h"
diff --git a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c
index 942bb4f..e20332f 100644
--- a/arch/arm/mach-omap2/cpuidle34xx.c
+++ b/arch/arm/mach-omap2/cpuidle34xx.c
@@ -34,6 +34,7 @@
 
 #include "pm.h"
 #include "control.h"
+#include "common.h"
 
 #ifdef CONFIG_CPU_IDLE
 
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c
index dce9905..bc6cf86 100644
--- a/arch/arm/mach-omap2/display.c
+++ b/arch/arm/mach-omap2/display.c
@@ -22,12 +22,13 @@
 #include <linux/io.h>
 #include <linux/clk.h>
 #include <linux/err.h>
+#include <linux/delay.h>
 
 #include <video/omapdss.h>
 #include <plat/omap_hwmod.h>
 #include <plat/omap_device.h>
 #include <plat/omap-pm.h>
-#include <plat/common.h>
+#include "common.h"
 
 #include "control.h"
 #include "display.h"
diff --git a/arch/arm/mach-omap2/i2c.c b/arch/arm/mach-omap2/i2c.c
index ace9994..a12e224 100644
--- a/arch/arm/mach-omap2/i2c.c
+++ b/arch/arm/mach-omap2/i2c.c
@@ -21,7 +21,7 @@
 
 #include <plat/cpu.h>
 #include <plat/i2c.h>
-#include <plat/common.h>
+#include "common.h"
 #include <plat/omap_hwmod.h>
 
 #include "mux.h"
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c
index 7f47092..27ad722 100644
--- a/arch/arm/mach-omap2/id.c
+++ b/arch/arm/mach-omap2/id.c
@@ -21,7 +21,7 @@
 
 #include <asm/cputype.h>
 
-#include <plat/common.h>
+#include "common.h"
 #include <plat/cpu.h>
 
 #include <mach/id.h>
diff --git a/arch/arm/mach-omap2/include/mach/entry-macro.S b/arch/arm/mach-omap2/include/mach/entry-macro.S
index feb90a1..56964a0 100644
--- a/arch/arm/mach-omap2/include/mach/entry-macro.S
+++ b/arch/arm/mach-omap2/include/mach/entry-macro.S
@@ -10,146 +10,9 @@
  * License version 2. This program is licensed "as is" without any
  * warranty of any kind, whether express or implied.
  */
-#include <mach/hardware.h>
-#include <mach/io.h>
-#include <mach/irqs.h>
-#include <asm/hardware/gic.h>
-
-#include <plat/omap24xx.h>
-#include <plat/omap34xx.h>
-#include <plat/omap44xx.h>
-
-#include <plat/multi.h>
-
-#define OMAP2_IRQ_BASE		OMAP2_L4_IO_ADDRESS(OMAP24XX_IC_BASE)
-#define OMAP3_IRQ_BASE		OMAP2_L4_IO_ADDRESS(OMAP34XX_IC_BASE)
-#define OMAP4_IRQ_BASE		OMAP2_L4_IO_ADDRESS(OMAP44XX_GIC_CPU_BASE)
-#define INTCPS_SIR_IRQ_OFFSET	0x0040	/* omap2/3 active interrupt offset */
-#define	ACTIVEIRQ_MASK		0x7f	/* omap2/3 active interrupt bits */
 
 		.macro	disable_fiq
 		.endm
 
 		.macro  arch_ret_to_user, tmp1, tmp2
 		.endm
-
-/*
- * Unoptimized irq functions for multi-omap2, 3 and 4
- */
-
-#ifdef MULTI_OMAP2
-		/*
-		 * Configure the interrupt base on the first interrupt.
-		 * See also omap_irq_base_init for setting omap_irq_base.
-		 */
-		.macro  get_irqnr_preamble, base, tmp
-		ldr	\base, =omap_irq_base	@ irq base address
-		ldr	\base, [\base, #0]	@ irq base value
-		.endm
-
-		/* Check the pending interrupts. Note that base already set */
-		.macro	get_irqnr_and_base, irqnr, irqstat, base, tmp
-		tst	\base, #0x100		@ gic address?
-		bne	4401f			@ found gic
-
-		/* Handle omap2 and omap3 */
-		ldr	\irqnr, [\base, #0x98] /* IRQ pending reg 1 */
-		cmp	\irqnr, #0x0
-		bne	9998f
-		ldr	\irqnr, [\base, #0xb8] /* IRQ pending reg 2 */
-		cmp	\irqnr, #0x0
-		bne	9998f
-		ldr	\irqnr, [\base, #0xd8] /* IRQ pending reg 3 */
-		cmp	\irqnr, #0x0
-		bne	9998f
-
-		/*
-		 * ti816x has additional IRQ pending register. Checking this
-		 * register on omap2 & omap3 has no effect (read as 0).
-		 */
-		ldr	\irqnr, [\base, #0xf8] /* IRQ pending reg 4 */
-		cmp	\irqnr, #0x0
-9998:
-		ldrne	\irqnr, [\base, #INTCPS_SIR_IRQ_OFFSET]
-		and	\irqnr, \irqnr, #ACTIVEIRQ_MASK /* Clear spurious bits */
-		b	9999f
-
-		/* Handle omap4 */
-4401:		ldr     \irqstat, [\base, #GIC_CPU_INTACK]
-		ldr     \tmp, =1021
-		bic     \irqnr, \irqstat, #0x1c00
-		cmp     \irqnr, #15
-		cmpcc   \irqnr, \irqnr
-		cmpne   \irqnr, \tmp
-		cmpcs   \irqnr, \irqnr
-9999:
-		.endm
-
-#ifdef CONFIG_SMP
-		/* We assume that irqstat (the raw value of the IRQ acknowledge
-		 * register) is preserved from the macro above.
-		 * If there is an IPI, we immediately signal end of interrupt
-		 * on the controller, since this requires the original irqstat
-		 * value which we won't easily be able to recreate later.
-		 */
-
-		.macro test_for_ipi, irqnr, irqstat, base, tmp
-		bic	\irqnr, \irqstat, #0x1c00
-		cmp	\irqnr, #16
-		it	cc
-		strcc	\irqstat, [\base, #GIC_CPU_EOI]
-		it	cs
-		cmpcs	\irqnr, \irqnr
-		.endm
-#endif	/* CONFIG_SMP */
-
-#else	/* MULTI_OMAP2 */
-
-
-/*
- * Optimized irq functions for omap2, 3 and 4
- */
-
-#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
-		.macro  get_irqnr_preamble, base, tmp
-#ifdef CONFIG_ARCH_OMAP2
-		ldr	\base, =OMAP2_IRQ_BASE
-#else
-		ldr	\base, =OMAP3_IRQ_BASE
-#endif
-		.endm
-
-		/* Check the pending interrupts. Note that base already set */
-		.macro	get_irqnr_and_base, irqnr, irqstat, base, tmp
-		ldr	\irqnr, [\base, #0x98] /* IRQ pending reg 1 */
-		cmp	\irqnr, #0x0
-		bne	9999f
-		ldr	\irqnr, [\base, #0xb8] /* IRQ pending reg 2 */
-		cmp	\irqnr, #0x0
-		bne	9999f
-		ldr	\irqnr, [\base, #0xd8] /* IRQ pending reg 3 */
-		cmp	\irqnr, #0x0
-#ifdef CONFIG_SOC_OMAPTI816X
-		bne	9999f
-		ldr	\irqnr, [\base, #0xf8] /* IRQ pending reg 4 */
-		cmp	\irqnr, #0x0
-#endif
-9999:
-		ldrne	\irqnr, [\base, #INTCPS_SIR_IRQ_OFFSET]
-		and	\irqnr, \irqnr, #ACTIVEIRQ_MASK /* Clear spurious bits */
-
-		.endm
-#endif
-
-
-#ifdef CONFIG_ARCH_OMAP4
-#define HAVE_GET_IRQNR_PREAMBLE
-#include <asm/hardware/entry-macro-gic.S>
-
-		.macro  get_irqnr_preamble, base, tmp
-		ldr     \base, =OMAP4_IRQ_BASE
-		.endm
-
-#endif
-
-#endif	/* MULTI_OMAP2 */
diff --git a/arch/arm/mach-omap2/include/mach/omap4-common.h b/arch/arm/mach-omap2/include/mach/omap4-common.h
deleted file mode 100644
index e4bd87619..0000000
--- a/arch/arm/mach-omap2/include/mach/omap4-common.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * omap4-common.h: OMAP4 specific common header file
- *
- * Copyright (C) 2010 Texas Instruments, Inc.
- *
- * Author:
- *	Santosh Shilimkar <santosh.shilimkar@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#ifndef OMAP_ARCH_OMAP4_COMMON_H
-#define OMAP_ARCH_OMAP4_COMMON_H
-
-/*
- * wfi used in low power code. Directly opcode is used instead
- * of instruction to avoid mulit-omap build break
- */
-#ifdef CONFIG_THUMB2_KERNEL
-#define do_wfi() __asm__ __volatile__ ("wfi" : : : "memory")
-#else
-#define do_wfi()			\
-		__asm__ __volatile__ (".word	0xe320f003" : : : "memory")
-#endif
-
-#ifdef CONFIG_CACHE_L2X0
-extern void __iomem *l2cache_base;
-#endif
-
-extern void __iomem *gic_dist_base_addr;
-
-extern void __init gic_init_irq(void);
-extern void omap_smc1(u32 fn, u32 arg);
-
-#ifdef CONFIG_SMP
-/* Needed for secondary core boot */
-extern void omap_secondary_startup(void);
-extern u32 omap_modify_auxcoreboot0(u32 set_mask, u32 clear_mask);
-extern void omap_auxcoreboot_addr(u32 cpu_addr);
-extern u32 omap_read_auxcoreboot0(void);
-#endif
-#endif
diff --git a/arch/arm/mach-omap2/include/mach/vmalloc.h b/arch/arm/mach-omap2/include/mach/vmalloc.h
deleted file mode 100644
index 8663199..0000000
--- a/arch/arm/mach-omap2/include/mach/vmalloc.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- *  arch/arm/plat-omap/include/mach/vmalloc.h
- *
- *  Copyright (C) 2000 Russell King.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#define VMALLOC_END	  0xf8000000UL
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index 25d20ce..3f565dd 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -35,7 +35,7 @@
 #include "clock3xxx.h"
 #include "clock44xx.h"
 
-#include <plat/common.h>
+#include "common.h"
 #include <plat/omap-pm.h>
 #include "voltage.h"
 #include "powerdomain.h"
@@ -43,7 +43,7 @@
 #include "clockdomain.h"
 #include <plat/omap_hwmod.h>
 #include <plat/multi.h>
-#include <plat/common.h>
+#include "common.h"
 
 /*
  * The machine specific code may provide the extra mapping besides the
@@ -316,13 +316,9 @@
 	return omap_hwmod_set_postsetup_state(oh, *(u8 *)data);
 }
 
-/* See irq.c, omap4-common.c and entry-macro.S */
-void __iomem *omap_irq_base;
-
 static void __init omap_common_init_early(void)
 {
 	omap2_check_revision();
-	omap_ioremap_init();
 	omap_init_consistent_dma_size();
 }
 
diff --git a/arch/arm/mach-omap2/irq.c b/arch/arm/mach-omap2/irq.c
index 65f1be6..42b1d65 100644
--- a/arch/arm/mach-omap2/irq.c
+++ b/arch/arm/mach-omap2/irq.c
@@ -15,6 +15,7 @@
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <mach/hardware.h>
+#include <asm/exception.h>
 #include <asm/mach/irq.h>
 
 
@@ -35,6 +36,11 @@
 /* Number of IRQ state bits in each MIR register */
 #define IRQ_BITS_PER_REG	32
 
+#define OMAP2_IRQ_BASE		OMAP2_L4_IO_ADDRESS(OMAP24XX_IC_BASE)
+#define OMAP3_IRQ_BASE		OMAP2_L4_IO_ADDRESS(OMAP34XX_IC_BASE)
+#define INTCPS_SIR_IRQ_OFFSET	0x0040	/* omap2/3 active interrupt offset */
+#define ACTIVEIRQ_MASK		0x7f	/* omap2/3 active interrupt bits */
+
 /*
  * OMAP2 has a number of different interrupt controllers, each interrupt
  * controller is identified as its own "bank". Register definitions are
@@ -143,6 +149,7 @@
 
 static void __init omap_init_irq(u32 base, int nr_irqs)
 {
+	void __iomem *omap_irq_base;
 	unsigned long nr_of_irqs = 0;
 	unsigned int nr_banks = 0;
 	int i, j;
@@ -191,6 +198,44 @@
 	omap_init_irq(OMAP34XX_IC_BASE, 128);
 }
 
+static inline void omap_intc_handle_irq(void __iomem *base_addr, struct pt_regs *regs)
+{
+	u32 irqnr;
+
+	do {
+		irqnr = readl_relaxed(base_addr + 0x98);
+		if (irqnr)
+			goto out;
+
+		irqnr = readl_relaxed(base_addr + 0xb8);
+		if (irqnr)
+			goto out;
+
+		irqnr = readl_relaxed(base_addr + 0xd8);
+#ifdef CONFIG_SOC_OMAPTI816X
+		if (irqnr)
+			goto out;
+		irqnr = readl_relaxed(base_addr + 0xf8);
+#endif
+
+out:
+		if (!irqnr)
+			break;
+
+		irqnr = readl_relaxed(base_addr + INTCPS_SIR_IRQ_OFFSET);
+		irqnr &= ACTIVEIRQ_MASK;
+
+		if (irqnr)
+			handle_IRQ(irqnr, regs);
+	} while (irqnr);
+}
+
+asmlinkage void __exception_irq_entry omap2_intc_handle_irq(struct pt_regs *regs)
+{
+	void __iomem *base_addr = OMAP2_IRQ_BASE;
+	omap_intc_handle_irq(base_addr, regs);
+}
+
 #ifdef CONFIG_ARCH_OMAP3
 static struct omap3_intc_regs intc_context[ARRAY_SIZE(irq_banks)];
 
@@ -263,4 +308,10 @@
 	/* Re-enable autoidle */
 	intc_bank_write_reg(1, &irq_banks[0], INTC_SYSCONFIG);
 }
+
+asmlinkage void __exception_irq_entry omap3_intc_handle_irq(struct pt_regs *regs)
+{
+	void __iomem *base_addr = OMAP3_IRQ_BASE;
+	omap_intc_handle_irq(base_addr, regs);
+}
 #endif /* CONFIG_ARCH_OMAP3 */
diff --git a/arch/arm/mach-omap2/mcbsp.c b/arch/arm/mach-omap2/mcbsp.c
index 292eee3..28fcb27 100644
--- a/arch/arm/mach-omap2/mcbsp.c
+++ b/arch/arm/mach-omap2/mcbsp.c
@@ -145,6 +145,9 @@
 		pdata->reg_size = 4;
 		pdata->has_ccr = true;
 	}
+	pdata->set_clk_src = omap2_mcbsp_set_clk_src;
+	if (id == 1)
+		pdata->mux_signal = omap2_mcbsp1_mux_rx_clk;
 
 	if (oh->class->rev == MCBSP_CONFIG_TYPE3) {
 		if (id == 2)
@@ -174,9 +177,6 @@
 					name, oh->name);
 		return PTR_ERR(pdev);
 	}
-	pdata->set_clk_src = omap2_mcbsp_set_clk_src;
-	if (id == 1)
-		pdata->mux_signal = omap2_mcbsp1_mux_rx_clk;
 	omap_mcbsp_count++;
 	return 0;
 }
diff --git a/arch/arm/mach-omap2/omap-hotplug.c b/arch/arm/mach-omap2/omap-hotplug.c
index 4976b93..e5a1c3f 100644
--- a/arch/arm/mach-omap2/omap-hotplug.c
+++ b/arch/arm/mach-omap2/omap-hotplug.c
@@ -19,7 +19,8 @@
 #include <linux/smp.h>
 
 #include <asm/cacheflush.h>
-#include <mach/omap4-common.h>
+
+#include "common.h"
 
 int platform_cpu_kill(unsigned int cpu)
 {
diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
index 4412ddb..e99bc6c 100644
--- a/arch/arm/mach-omap2/omap-smp.c
+++ b/arch/arm/mach-omap2/omap-smp.c
@@ -24,7 +24,8 @@
 #include <asm/hardware/gic.h>
 #include <asm/smp_scu.h>
 #include <mach/hardware.h>
-#include <mach/omap4-common.h>
+
+#include "common.h"
 
 /* SCU base address */
 static void __iomem *scu_base;
diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c
index 35ac3e5..beecfdd 100644
--- a/arch/arm/mach-omap2/omap4-common.c
+++ b/arch/arm/mach-omap2/omap4-common.c
@@ -22,17 +22,18 @@
 #include <plat/irqs.h>
 
 #include <mach/hardware.h>
-#include <mach/omap4-common.h>
+
+#include "common.h"
 
 #ifdef CONFIG_CACHE_L2X0
 void __iomem *l2cache_base;
 #endif
 
-void __iomem *gic_dist_base_addr;
-
-
 void __init gic_init_irq(void)
 {
+	void __iomem *omap_irq_base;
+	void __iomem *gic_dist_base_addr;
+
 	/* Static mapping, never released */
 	gic_dist_base_addr = ioremap(OMAP44XX_GIC_DIST_BASE, SZ_4K);
 	BUG_ON(!gic_dist_base_addr);
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 207a2ff..529142a 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -137,7 +137,7 @@
 #include <linux/mutex.h>
 #include <linux/spinlock.h>
 
-#include <plat/common.h>
+#include "common.h"
 #include <plat/cpu.h>
 #include "clockdomain.h"
 #include "powerdomain.h"
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index 7f8915a..eef43e2 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -3247,18 +3247,14 @@
 
 /* 3430ES1-only hwmods */
 static __initdata struct omap_hwmod *omap3430es1_hwmods[] = {
-	&omap3xxx_iva_hwmod,
 	&omap3430es1_dss_core_hwmod,
-	&omap3xxx_mailbox_hwmod,
 	NULL
 };
 
 /* 3430ES2+-only hwmods */
 static __initdata struct omap_hwmod *omap3430es2plus_hwmods[] = {
-	&omap3xxx_iva_hwmod,
 	&omap3xxx_dss_core_hwmod,
 	&omap3xxx_usbhsotg_hwmod,
-	&omap3xxx_mailbox_hwmod,
 	NULL
 };
 
diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c
index 00bff46..1881fe9 100644
--- a/arch/arm/mach-omap2/pm.c
+++ b/arch/arm/mach-omap2/pm.c
@@ -18,7 +18,7 @@
 
 #include <plat/omap-pm.h>
 #include <plat/omap_device.h>
-#include <plat/common.h>
+#include "common.h"
 
 #include "voltage.h"
 #include "powerdomain.h"
diff --git a/arch/arm/mach-omap2/pm24xx.c b/arch/arm/mach-omap2/pm24xx.c
index cf0c216..ef8595c 100644
--- a/arch/arm/mach-omap2/pm24xx.c
+++ b/arch/arm/mach-omap2/pm24xx.c
@@ -42,6 +42,7 @@
 #include <plat/dma.h>
 #include <plat/board.h>
 
+#include "common.h"
 #include "prm2xxx_3xxx.h"
 #include "prm-regbits-24xx.h"
 #include "cm2xxx_3xxx.h"
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index efa6649..fa637df 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -42,6 +42,7 @@
 #include <plat/gpmc.h>
 #include <plat/dma.h>
 
+#include "common.h"
 #include "cm2xxx_3xxx.h"
 #include "cm-regbits-34xx.h"
 #include "prm-regbits-34xx.h"
diff --git a/arch/arm/mach-omap2/pm44xx.c b/arch/arm/mach-omap2/pm44xx.c
index 59a870b..8edb015 100644
--- a/arch/arm/mach-omap2/pm44xx.c
+++ b/arch/arm/mach-omap2/pm44xx.c
@@ -16,8 +16,8 @@
 #include <linux/err.h>
 #include <linux/slab.h>
 
+#include "common.h"
 #include "powerdomain.h"
-#include <mach/omap4-common.h>
 
 struct power_state {
 	struct powerdomain *pwrdm;
diff --git a/arch/arm/mach-omap2/prcm.c b/arch/arm/mach-omap2/prcm.c
index 597e2da..626acfa 100644
--- a/arch/arm/mach-omap2/prcm.c
+++ b/arch/arm/mach-omap2/prcm.c
@@ -25,8 +25,7 @@
 #include <linux/delay.h>
 #include <linux/export.h>
 
-#include <mach/system.h>
-#include <plat/common.h>
+#include "common.h"
 #include <plat/prcm.h>
 #include <plat/irqs.h>
 
@@ -59,7 +58,7 @@
 EXPORT_SYMBOL(omap_prcm_get_reset_sources);
 
 /* Resets clock rates and reboots the system. Only called from system.h */
-static void omap_prcm_arch_reset(char mode, const char *cmd)
+void omap_prcm_restart(char mode, const char *cmd)
 {
 	s16 prcm_offs = 0;
 
@@ -110,8 +109,6 @@
 	omap2_prm_read_mod_reg(prcm_offs, OMAP2_RM_RSTCTRL); /* OCP barrier */
 }
 
-void (*arch_reset)(char, const char *) = omap_prcm_arch_reset;
-
 /**
  * omap2_cm_wait_idlest - wait for IDLEST bit to indicate module readiness
  * @reg: physical address of module IDLEST register
diff --git a/arch/arm/mach-omap2/prcm_mpu44xx.c b/arch/arm/mach-omap2/prcm_mpu44xx.c
index 171fe17..ca669b5 100644
--- a/arch/arm/mach-omap2/prcm_mpu44xx.c
+++ b/arch/arm/mach-omap2/prcm_mpu44xx.c
@@ -15,7 +15,7 @@
 #include <linux/err.h>
 #include <linux/io.h>
 
-#include <plat/common.h>
+#include "common.h"
 
 #include "prcm_mpu44xx.h"
 #include "cm-regbits-44xx.h"
diff --git a/arch/arm/mach-omap2/prm2xxx_3xxx.c b/arch/arm/mach-omap2/prm2xxx_3xxx.c
index f02d87f..9a08ba3 100644
--- a/arch/arm/mach-omap2/prm2xxx_3xxx.c
+++ b/arch/arm/mach-omap2/prm2xxx_3xxx.c
@@ -16,7 +16,7 @@
 #include <linux/err.h>
 #include <linux/io.h>
 
-#include <plat/common.h>
+#include "common.h"
 #include <plat/cpu.h>
 #include <plat/prcm.h>
 
diff --git a/arch/arm/mach-omap2/prm44xx.c b/arch/arm/mach-omap2/prm44xx.c
index 495a31a..dd885ee 100644
--- a/arch/arm/mach-omap2/prm44xx.c
+++ b/arch/arm/mach-omap2/prm44xx.c
@@ -17,7 +17,7 @@
 #include <linux/err.h>
 #include <linux/io.h>
 
-#include <plat/common.h>
+#include "common.h"
 #include <plat/cpu.h>
 #include <plat/prcm.h>
 
diff --git a/arch/arm/mach-omap2/prminst44xx.c b/arch/arm/mach-omap2/prminst44xx.c
index 3a7bab1..f6de5bc 100644
--- a/arch/arm/mach-omap2/prminst44xx.c
+++ b/arch/arm/mach-omap2/prminst44xx.c
@@ -16,7 +16,7 @@
 #include <linux/err.h>
 #include <linux/io.h>
 
-#include <plat/common.h>
+#include "common.h"
 
 #include "prm44xx.h"
 #include "prminst44xx.h"
diff --git a/arch/arm/mach-omap2/sdram-nokia.c b/arch/arm/mach-omap2/sdram-nokia.c
index 14caa22..ee3a8ad 100644
--- a/arch/arm/mach-omap2/sdram-nokia.c
+++ b/arch/arm/mach-omap2/sdram-nokia.c
@@ -18,7 +18,7 @@
 #include <linux/io.h>
 
 #include <plat/io.h>
-#include <plat/common.h>
+#include "common.h"
 #include <plat/clock.h>
 #include <plat/sdrc.h>
 
diff --git a/arch/arm/mach-omap2/sdrc.c b/arch/arm/mach-omap2/sdrc.c
index 8f27828..e3d345f 100644
--- a/arch/arm/mach-omap2/sdrc.c
+++ b/arch/arm/mach-omap2/sdrc.c
@@ -23,7 +23,7 @@
 #include <linux/clk.h>
 #include <linux/io.h>
 
-#include <plat/common.h>
+#include "common.h"
 #include <plat/clock.h>
 #include <plat/sram.h>
 
diff --git a/arch/arm/mach-omap2/sdrc2xxx.c b/arch/arm/mach-omap2/sdrc2xxx.c
index ccdb010..791a63c 100644
--- a/arch/arm/mach-omap2/sdrc2xxx.c
+++ b/arch/arm/mach-omap2/sdrc2xxx.c
@@ -24,7 +24,7 @@
 #include <linux/clk.h>
 #include <linux/io.h>
 
-#include <plat/common.h>
+#include "common.h"
 #include <plat/clock.h>
 #include <plat/sram.h>
 
diff --git a/arch/arm/mach-omap2/serial.c b/arch/arm/mach-omap2/serial.c
index 9992dbf..42c3267 100644
--- a/arch/arm/mach-omap2/serial.c
+++ b/arch/arm/mach-omap2/serial.c
@@ -33,7 +33,7 @@
 #include <plat/omap-serial.h>
 #endif
 
-#include <plat/common.h>
+#include "common.h"
 #include <plat/board.h>
 #include <plat/clock.h>
 #include <plat/dma.h>
diff --git a/arch/arm/mach-omap2/smartreflex.c b/arch/arm/mach-omap2/smartreflex.c
index cf246b3..9dd9345 100644
--- a/arch/arm/mach-omap2/smartreflex.c
+++ b/arch/arm/mach-omap2/smartreflex.c
@@ -26,7 +26,7 @@
 #include <linux/slab.h>
 #include <linux/pm_runtime.h>
 
-#include <plat/common.h>
+#include "common.h"
 
 #include "pm.h"
 #include "smartreflex.h"
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
index 037b0d7..6eeff0e 100644
--- a/arch/arm/mach-omap2/timer.c
+++ b/arch/arm/mach-omap2/timer.c
@@ -41,7 +41,7 @@
 #include <plat/dmtimer.h>
 #include <asm/localtimer.h>
 #include <asm/sched_clock.h>
-#include <plat/common.h>
+#include "common.h"
 #include <plat/omap_hwmod.h>
 #include <plat/omap_device.h>
 #include <plat/omap-pm.h>
@@ -254,7 +254,6 @@
 /*
  * clocksource
  */
-static DEFINE_CLOCK_DATA(cd);
 static cycle_t clocksource_read_cycles(struct clocksource *cs)
 {
 	return (cycle_t)__omap_dm_timer_read_counter(&clksrc, 1);
@@ -268,23 +267,12 @@
 	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
 };
 
-static void notrace dmtimer_update_sched_clock(void)
+static u32 notrace dmtimer_read_sched_clock(void)
 {
-	u32 cyc;
-
-	cyc = __omap_dm_timer_read_counter(&clksrc, 1);
-
-	update_sched_clock(&cd, cyc, (u32)~0);
-}
-
-unsigned long long notrace sched_clock(void)
-{
-	u32 cyc = 0;
-
 	if (clksrc.reserved)
-		cyc = __omap_dm_timer_read_counter(&clksrc, 1);
+		return __omap_dm_timer_read_counter(clksrc.io_base, 1);
 
-	return cyc_to_sched_clock(&cd, cyc, (u32)~0);
+	return 0;
 }
 
 /* Setup free-running counter for clocksource */
@@ -301,7 +289,7 @@
 
 	__omap_dm_timer_load_start(&clksrc,
 			OMAP_TIMER_CTRL_ST | OMAP_TIMER_CTRL_AR, 0, 1);
-	init_sched_clock(&cd, dmtimer_update_sched_clock, 32, clksrc.rate);
+	setup_sched_clock(dmtimer_read_sched_clock, 32, clksrc.rate);
 
 	if (clocksource_register_hz(&clocksource_gpt, clksrc.rate))
 		pr_err("Could not register clocksource %s\n",
diff --git a/arch/arm/mach-omap2/vc3xxx_data.c b/arch/arm/mach-omap2/vc3xxx_data.c
index cfe348e..a5ec7f8f 100644
--- a/arch/arm/mach-omap2/vc3xxx_data.c
+++ b/arch/arm/mach-omap2/vc3xxx_data.c
@@ -18,7 +18,7 @@
 #include <linux/err.h>
 #include <linux/init.h>
 
-#include <plat/common.h>
+#include "common.h"
 
 #include "prm-regbits-34xx.h"
 #include "voltage.h"
diff --git a/arch/arm/mach-omap2/vc44xx_data.c b/arch/arm/mach-omap2/vc44xx_data.c
index 2740a96..d70b930 100644
--- a/arch/arm/mach-omap2/vc44xx_data.c
+++ b/arch/arm/mach-omap2/vc44xx_data.c
@@ -18,7 +18,7 @@
 #include <linux/err.h>
 #include <linux/init.h>
 
-#include <plat/common.h>
+#include "common.h"
 
 #include "prm44xx.h"
 #include "prm-regbits-44xx.h"
diff --git a/arch/arm/mach-omap2/voltage.c b/arch/arm/mach-omap2/voltage.c
index 1f8fdf7..8a36342 100644
--- a/arch/arm/mach-omap2/voltage.c
+++ b/arch/arm/mach-omap2/voltage.c
@@ -27,7 +27,7 @@
 #include <linux/slab.h>
 #include <linux/clk.h>
 
-#include <plat/common.h>
+#include "common.h"
 
 #include "prm-regbits-34xx.h"
 #include "prm-regbits-44xx.h"
diff --git a/arch/arm/mach-omap2/voltagedomains3xxx_data.c b/arch/arm/mach-omap2/voltagedomains3xxx_data.c
index 071101d..474559d 100644
--- a/arch/arm/mach-omap2/voltagedomains3xxx_data.c
+++ b/arch/arm/mach-omap2/voltagedomains3xxx_data.c
@@ -18,7 +18,7 @@
 #include <linux/err.h>
 #include <linux/init.h>
 
-#include <plat/common.h>
+#include "common.h"
 #include <plat/cpu.h>
 
 #include "prm-regbits-34xx.h"
diff --git a/arch/arm/mach-omap2/voltagedomains44xx_data.c b/arch/arm/mach-omap2/voltagedomains44xx_data.c
index c4584e9..4e11d02 100644
--- a/arch/arm/mach-omap2/voltagedomains44xx_data.c
+++ b/arch/arm/mach-omap2/voltagedomains44xx_data.c
@@ -21,7 +21,7 @@
 #include <linux/err.h>
 #include <linux/init.h>
 
-#include <plat/common.h>
+#include "common.h"
 
 #include "prm-regbits-44xx.h"
 #include "prm44xx.h"
diff --git a/arch/arm/mach-omap2/vp.c b/arch/arm/mach-omap2/vp.c
index 66bd700..807391d 100644
--- a/arch/arm/mach-omap2/vp.c
+++ b/arch/arm/mach-omap2/vp.c
@@ -1,7 +1,7 @@
 #include <linux/kernel.h>
 #include <linux/init.h>
 
-#include <plat/common.h>
+#include "common.h"
 
 #include "voltage.h"
 #include "vp.h"
diff --git a/arch/arm/mach-omap2/vp3xxx_data.c b/arch/arm/mach-omap2/vp3xxx_data.c
index 260c554..bd89f80 100644
--- a/arch/arm/mach-omap2/vp3xxx_data.c
+++ b/arch/arm/mach-omap2/vp3xxx_data.c
@@ -19,7 +19,7 @@
 #include <linux/err.h>
 #include <linux/init.h>
 
-#include <plat/common.h>
+#include "common.h"
 
 #include "prm-regbits-34xx.h"
 #include "voltage.h"
diff --git a/arch/arm/mach-omap2/vp44xx_data.c b/arch/arm/mach-omap2/vp44xx_data.c
index b4e7704..8c031d1 100644
--- a/arch/arm/mach-omap2/vp44xx_data.c
+++ b/arch/arm/mach-omap2/vp44xx_data.c
@@ -19,7 +19,7 @@
 #include <linux/err.h>
 #include <linux/init.h>
 
-#include <plat/common.h>
+#include "common.h"
 
 #include "prm44xx.h"
 #include "prm-regbits-44xx.h"
diff --git a/arch/arm/mach-orion5x/common.c b/arch/arm/mach-orion5x/common.c
index 22ace0b..41127e8 100644
--- a/arch/arm/mach-orion5x/common.c
+++ b/arch/arm/mach-orion5x/common.c
@@ -18,6 +18,7 @@
 #include <linux/mbus.h>
 #include <linux/mv643xx_i2c.h>
 #include <linux/ata_platform.h>
+#include <linux/delay.h>
 #include <net/dsa.h>
 #include <asm/page.h>
 #include <asm/setup.h>
@@ -304,6 +305,17 @@
 	orion5x_wdt_init();
 }
 
+void orion5x_restart(char mode, const char *cmd)
+{
+	/*
+	 * Enable and issue soft reset
+	 */
+	orion5x_setbits(RSTOUTn_MASK, (1 << 2));
+	orion5x_setbits(CPU_SOFT_RESET, 1);
+	mdelay(200);
+	orion5x_clrbits(CPU_SOFT_RESET, 1);
+}
+
 /*
  * Many orion-based systems have buggy bootloader implementations.
  * This is a common fixup for bogus memory tags.
diff --git a/arch/arm/mach-orion5x/common.h b/arch/arm/mach-orion5x/common.h
index 909489f..37ef18d 100644
--- a/arch/arm/mach-orion5x/common.h
+++ b/arch/arm/mach-orion5x/common.h
@@ -39,6 +39,7 @@
 void orion5x_uart0_init(void);
 void orion5x_uart1_init(void);
 void orion5x_xor_init(void);
+void orion5x_restart(char, const char *);
 
 /*
  * PCIe/PCI functions.
diff --git a/arch/arm/mach-orion5x/d2net-setup.c b/arch/arm/mach-orion5x/d2net-setup.c
index 8c83009..d75dcfa 100644
--- a/arch/arm/mach-orion5x/d2net-setup.c
+++ b/arch/arm/mach-orion5x/d2net-setup.c
@@ -343,6 +343,7 @@
 	.init_irq	= orion5x_init_irq,
 	.timer		= &orion5x_timer,
 	.fixup		= tag_fixup_mem32,
+	.restart	= orion5x_restart,
 MACHINE_END
 #endif
 
@@ -355,6 +356,7 @@
 	.init_irq	= orion5x_init_irq,
 	.timer		= &orion5x_timer,
 	.fixup		= tag_fixup_mem32,
+	.restart	= orion5x_restart,
 MACHINE_END
 #endif
 
diff --git a/arch/arm/mach-orion5x/db88f5281-setup.c b/arch/arm/mach-orion5x/db88f5281-setup.c
index 4b79a80..a104d5a 100644
--- a/arch/arm/mach-orion5x/db88f5281-setup.c
+++ b/arch/arm/mach-orion5x/db88f5281-setup.c
@@ -364,4 +364,5 @@
 	.init_early	= orion5x_init_early,
 	.init_irq	= orion5x_init_irq,
 	.timer		= &orion5x_timer,
+	.restart	= orion5x_restart,
 MACHINE_END
diff --git a/arch/arm/mach-orion5x/dns323-setup.c b/arch/arm/mach-orion5x/dns323-setup.c
index 343f60e..91b0f47 100644
--- a/arch/arm/mach-orion5x/dns323-setup.c
+++ b/arch/arm/mach-orion5x/dns323-setup.c
@@ -736,4 +736,5 @@
 	.init_irq	= orion5x_init_irq,
 	.timer		= &orion5x_timer,
 	.fixup		= tag_fixup_mem32,
+	.restart	= orion5x_restart,
 MACHINE_END
diff --git a/arch/arm/mach-orion5x/edmini_v2-setup.c b/arch/arm/mach-orion5x/edmini_v2-setup.c
index 70a4e92..355e962 100644
--- a/arch/arm/mach-orion5x/edmini_v2-setup.c
+++ b/arch/arm/mach-orion5x/edmini_v2-setup.c
@@ -258,4 +258,5 @@
 	.init_irq	= orion5x_init_irq,
 	.timer		= &orion5x_timer,
 	.fixup		= tag_fixup_mem32,
+	.restart	= orion5x_restart,
 MACHINE_END
diff --git a/arch/arm/mach-orion5x/include/mach/io.h b/arch/arm/mach-orion5x/include/mach/io.h
index c519610..e9d9afd 100644
--- a/arch/arm/mach-orion5x/include/mach/io.h
+++ b/arch/arm/mach-orion5x/include/mach/io.h
@@ -15,31 +15,6 @@
 
 #define IO_SPACE_LIMIT		0xffffffff
 
-static inline void __iomem *
-__arch_ioremap(unsigned long paddr, size_t size, unsigned int mtype)
-{
-	void __iomem *retval;
-	unsigned long offs = paddr - ORION5X_REGS_PHYS_BASE;
-	if (mtype == MT_DEVICE && size && offs < ORION5X_REGS_SIZE &&
-	    size <= ORION5X_REGS_SIZE && offs + size <= ORION5X_REGS_SIZE) {
-		retval = (void __iomem *)ORION5X_REGS_VIRT_BASE + offs;
-	} else {
-		retval = __arm_ioremap(paddr, size, mtype);
-	}
-
-	return retval;
-}
-
-static inline void
-__arch_iounmap(void __iomem *addr)
-{
-	if (addr < (void __iomem *)ORION5X_REGS_VIRT_BASE ||
-	    addr >= (void __iomem *)(ORION5X_REGS_VIRT_BASE + ORION5X_REGS_SIZE))
-		__iounmap(addr);
-}
-
-#define __arch_ioremap		__arch_ioremap
-#define __arch_iounmap		__arch_iounmap
 #define __io(a)			__typesafe_io(a)
 #define __mem_pci(a)		(a)
 
diff --git a/arch/arm/mach-orion5x/include/mach/system.h b/arch/arm/mach-orion5x/include/mach/system.h
index a1d6e46..825a265 100644
--- a/arch/arm/mach-orion5x/include/mach/system.h
+++ b/arch/arm/mach-orion5x/include/mach/system.h
@@ -11,23 +11,9 @@
 #ifndef __ASM_ARCH_SYSTEM_H
 #define __ASM_ARCH_SYSTEM_H
 
-#include <mach/bridge-regs.h>
-
 static inline void arch_idle(void)
 {
 	cpu_do_idle();
 }
 
-static inline void arch_reset(char mode, const char *cmd)
-{
-	/*
-	 * Enable and issue soft reset
-	 */
-	orion5x_setbits(RSTOUTn_MASK, (1 << 2));
-	orion5x_setbits(CPU_SOFT_RESET, 1);
-	mdelay(200);
-	orion5x_clrbits(CPU_SOFT_RESET, 1);
-}
-
-
 #endif
diff --git a/arch/arm/mach-orion5x/include/mach/vmalloc.h b/arch/arm/mach-orion5x/include/mach/vmalloc.h
deleted file mode 100644
index 06b50ae..0000000
--- a/arch/arm/mach-orion5x/include/mach/vmalloc.h
+++ /dev/null
@@ -1,5 +0,0 @@
-/*
- * arch/arm/mach-orion5x/include/mach/vmalloc.h
- */
-
-#define VMALLOC_END       0xfd800000UL
diff --git a/arch/arm/mach-orion5x/kurobox_pro-setup.c b/arch/arm/mach-orion5x/kurobox_pro-setup.c
index d3cd3f6..47587b8 100644
--- a/arch/arm/mach-orion5x/kurobox_pro-setup.c
+++ b/arch/arm/mach-orion5x/kurobox_pro-setup.c
@@ -386,6 +386,7 @@
 	.init_irq	= orion5x_init_irq,
 	.timer		= &orion5x_timer,
 	.fixup		= tag_fixup_mem32,
+	.restart	= orion5x_restart,
 MACHINE_END
 #endif
 
@@ -399,5 +400,6 @@
 	.init_irq	= orion5x_init_irq,
 	.timer		= &orion5x_timer,
 	.fixup		= tag_fixup_mem32,
+	.restart	= orion5x_restart,
 MACHINE_END
 #endif
diff --git a/arch/arm/mach-orion5x/ls-chl-setup.c b/arch/arm/mach-orion5x/ls-chl-setup.c
index 9503fff..5272131 100644
--- a/arch/arm/mach-orion5x/ls-chl-setup.c
+++ b/arch/arm/mach-orion5x/ls-chl-setup.c
@@ -140,7 +140,7 @@
 
 static void lschl_power_off(void)
 {
-	arm_machine_restart('h', NULL);
+	orion5x_restart('h', NULL);
 }
 
 /*****************************************************************************
@@ -325,4 +325,5 @@
 	.init_irq	= orion5x_init_irq,
 	.timer		= &orion5x_timer,
 	.fixup		= tag_fixup_mem32,
+	.restart	= orion5x_restart,
 MACHINE_END
diff --git a/arch/arm/mach-orion5x/ls_hgl-setup.c b/arch/arm/mach-orion5x/ls_hgl-setup.c
index ed6d772..9a8697b 100644
--- a/arch/arm/mach-orion5x/ls_hgl-setup.c
+++ b/arch/arm/mach-orion5x/ls_hgl-setup.c
@@ -186,7 +186,7 @@
 
 static void ls_hgl_power_off(void)
 {
-	arm_machine_restart('h', NULL);
+	orion5x_restart('h', NULL);
 }
 
 
@@ -272,4 +272,5 @@
 	.init_irq	= orion5x_init_irq,
 	.timer		= &orion5x_timer,
 	.fixup		= tag_fixup_mem32,
+	.restart	= orion5x_restart,
 MACHINE_END
diff --git a/arch/arm/mach-orion5x/lsmini-setup.c b/arch/arm/mach-orion5x/lsmini-setup.c
index 743f7f1..09c7365 100644
--- a/arch/arm/mach-orion5x/lsmini-setup.c
+++ b/arch/arm/mach-orion5x/lsmini-setup.c
@@ -186,7 +186,7 @@
 
 static void lsmini_power_off(void)
 {
-	arm_machine_restart('h', NULL);
+	orion5x_restart('h', NULL);
 }
 
 
@@ -274,5 +274,6 @@
 	.init_irq	= orion5x_init_irq,
 	.timer		= &orion5x_timer,
 	.fixup		= tag_fixup_mem32,
+	.restart	= orion5x_restart,
 MACHINE_END
 #endif
diff --git a/arch/arm/mach-orion5x/mss2-setup.c b/arch/arm/mach-orion5x/mss2-setup.c
index 6020e26b..65faaa3 100644
--- a/arch/arm/mach-orion5x/mss2-setup.c
+++ b/arch/arm/mach-orion5x/mss2-setup.c
@@ -267,5 +267,6 @@
 	.init_early	= orion5x_init_early,
 	.init_irq	= orion5x_init_irq,
 	.timer		= &orion5x_timer,
-	.fixup		= tag_fixup_mem32
+	.fixup		= tag_fixup_mem32,
+	.restart	= orion5x_restart,
 MACHINE_END
diff --git a/arch/arm/mach-orion5x/mv2120-setup.c b/arch/arm/mach-orion5x/mv2120-setup.c
index 201ae36..c87fde4 100644
--- a/arch/arm/mach-orion5x/mv2120-setup.c
+++ b/arch/arm/mach-orion5x/mv2120-setup.c
@@ -234,5 +234,6 @@
 	.init_early	= orion5x_init_early,
 	.init_irq	= orion5x_init_irq,
 	.timer		= &orion5x_timer,
-	.fixup		= tag_fixup_mem32
+	.fixup		= tag_fixup_mem32,
+	.restart	= orion5x_restart,
 MACHINE_END
diff --git a/arch/arm/mach-orion5x/net2big-setup.c b/arch/arm/mach-orion5x/net2big-setup.c
index 6197c79..0180c39 100644
--- a/arch/arm/mach-orion5x/net2big-setup.c
+++ b/arch/arm/mach-orion5x/net2big-setup.c
@@ -426,5 +426,6 @@
 	.init_irq	= orion5x_init_irq,
 	.timer		= &orion5x_timer,
 	.fixup		= tag_fixup_mem32,
+	.restart	= orion5x_restart,
 MACHINE_END
 
diff --git a/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c b/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c
index ebd6767..292038f 100644
--- a/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c
+++ b/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c
@@ -175,4 +175,5 @@
 	.init_irq	= orion5x_init_irq,
 	.timer		= &orion5x_timer,
 	.fixup		= tag_fixup_mem32,
+	.restart	= orion5x_restart,
 MACHINE_END
diff --git a/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c b/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c
index 05db2d3..c44eaba 100644
--- a/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c
+++ b/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c
@@ -187,4 +187,5 @@
 	.init_irq	= orion5x_init_irq,
 	.timer		= &orion5x_timer,
 	.fixup		= tag_fixup_mem32,
+	.restart	= orion5x_restart,
 MACHINE_END
diff --git a/arch/arm/mach-orion5x/rd88f5182-setup.c b/arch/arm/mach-orion5x/rd88f5182-setup.c
index e47fa05..96438b6 100644
--- a/arch/arm/mach-orion5x/rd88f5182-setup.c
+++ b/arch/arm/mach-orion5x/rd88f5182-setup.c
@@ -311,4 +311,5 @@
 	.init_early	= orion5x_init_early,
 	.init_irq	= orion5x_init_irq,
 	.timer		= &orion5x_timer,
+	.restart	= orion5x_restart,
 MACHINE_END
diff --git a/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c b/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c
index 6431725..2c5fab0 100644
--- a/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c
+++ b/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c
@@ -128,4 +128,5 @@
 	.init_irq	= orion5x_init_irq,
 	.timer		= &orion5x_timer,
 	.fixup		= tag_fixup_mem32,
+	.restart	= orion5x_restart,
 MACHINE_END
diff --git a/arch/arm/mach-orion5x/terastation_pro2-setup.c b/arch/arm/mach-orion5x/terastation_pro2-setup.c
index 29f1526..632a861 100644
--- a/arch/arm/mach-orion5x/terastation_pro2-setup.c
+++ b/arch/arm/mach-orion5x/terastation_pro2-setup.c
@@ -364,4 +364,5 @@
 	.init_irq	= orion5x_init_irq,
 	.timer		= &orion5x_timer,
 	.fixup		= tag_fixup_mem32,
+	.restart	= orion5x_restart,
 MACHINE_END
diff --git a/arch/arm/mach-orion5x/ts209-setup.c b/arch/arm/mach-orion5x/ts209-setup.c
index 31e51f9..5d64087 100644
--- a/arch/arm/mach-orion5x/ts209-setup.c
+++ b/arch/arm/mach-orion5x/ts209-setup.c
@@ -178,7 +178,7 @@
 
 static int __init qnap_ts209_pci_init(void)
 {
-	if (machine_is_ts_x09())
+	if (machine_is_ts209())
 		pci_common_init(&qnap_ts209_pci);
 
 	return 0;
@@ -329,4 +329,5 @@
 	.init_irq	= orion5x_init_irq,
 	.timer		= &orion5x_timer,
 	.fixup		= tag_fixup_mem32,
+	.restart	= orion5x_restart,
 MACHINE_END
diff --git a/arch/arm/mach-orion5x/ts409-setup.c b/arch/arm/mach-orion5x/ts409-setup.c
index 0fbcc14..4e6ff75 100644
--- a/arch/arm/mach-orion5x/ts409-setup.c
+++ b/arch/arm/mach-orion5x/ts409-setup.c
@@ -318,4 +318,5 @@
 	.init_irq	= orion5x_init_irq,
 	.timer		= &orion5x_timer,
 	.fixup		= tag_fixup_mem32,
+	.restart	= orion5x_restart,
 MACHINE_END
diff --git a/arch/arm/mach-orion5x/ts78xx-setup.c b/arch/arm/mach-orion5x/ts78xx-setup.c
index b35e200..c96f374 100644
--- a/arch/arm/mach-orion5x/ts78xx-setup.c
+++ b/arch/arm/mach-orion5x/ts78xx-setup.c
@@ -627,4 +627,5 @@
 	.init_early	= orion5x_init_early,
 	.init_irq	= orion5x_init_irq,
 	.timer		= &orion5x_timer,
+	.restart	= orion5x_restart,
 MACHINE_END
diff --git a/arch/arm/mach-orion5x/wnr854t-setup.c b/arch/arm/mach-orion5x/wnr854t-setup.c
index b8be7d8..078c03f 100644
--- a/arch/arm/mach-orion5x/wnr854t-setup.c
+++ b/arch/arm/mach-orion5x/wnr854t-setup.c
@@ -179,4 +179,5 @@
 	.init_irq	= orion5x_init_irq,
 	.timer		= &orion5x_timer,
 	.fixup		= tag_fixup_mem32,
+	.restart	= orion5x_restart,
 MACHINE_END
diff --git a/arch/arm/mach-orion5x/wrt350n-v2-setup.c b/arch/arm/mach-orion5x/wrt350n-v2-setup.c
index faf81a0..46a9778 100644
--- a/arch/arm/mach-orion5x/wrt350n-v2-setup.c
+++ b/arch/arm/mach-orion5x/wrt350n-v2-setup.c
@@ -267,4 +267,5 @@
 	.init_irq	= orion5x_init_irq,
 	.timer		= &orion5x_timer,
 	.fixup		= tag_fixup_mem32,
+	.restart	= orion5x_restart,
 MACHINE_END
diff --git a/arch/arm/mach-picoxcell/common.c b/arch/arm/mach-picoxcell/common.c
index 34d0834..ad871bd 100644
--- a/arch/arm/mach-picoxcell/common.c
+++ b/arch/arm/mach-picoxcell/common.c
@@ -11,6 +11,7 @@
 #include <linux/irqdomain.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
+#include <linux/of_irq.h>
 #include <linux/of_platform.h>
 
 #include <asm/mach/arch.h>
@@ -33,22 +34,20 @@
 };
 
 static const struct of_device_id vic_of_match[] __initconst = {
-	{ .compatible = "arm,pl192-vic" },
+	{ .compatible = "arm,pl192-vic", .data = vic_of_init, },
 	{ /* Sentinel */ }
 };
 
 static void __init picoxcell_init_irq(void)
 {
-	vic_init(IO_ADDRESS(PICOXCELL_VIC0_BASE), 0, ~0, 0);
-	vic_init(IO_ADDRESS(PICOXCELL_VIC1_BASE), 32, ~0, 0);
-	irq_domain_generate_simple(vic_of_match, PICOXCELL_VIC0_BASE, 0);
-	irq_domain_generate_simple(vic_of_match, PICOXCELL_VIC1_BASE, 32);
+	of_irq_init(vic_of_match);
 }
 
 DT_MACHINE_START(PICOXCELL, "Picochip picoXcell")
 	.map_io		= picoxcell_map_io,
 	.nr_irqs	= ARCH_NR_IRQS,
 	.init_irq	= picoxcell_init_irq,
+	.handle_irq	= vic_handle_irq,
 	.timer		= &picoxcell_timer,
 	.init_machine	= picoxcell_init_machine,
 	.dt_compat	= picoxcell_dt_match,
diff --git a/arch/arm/mach-picoxcell/include/mach/entry-macro.S b/arch/arm/mach-picoxcell/include/mach/entry-macro.S
index a6b09f7..9b505ac 100644
--- a/arch/arm/mach-picoxcell/include/mach/entry-macro.S
+++ b/arch/arm/mach-picoxcell/include/mach/entry-macro.S
@@ -9,11 +9,8 @@
  * License version 2. This program is licensed "as is" without any
  * warranty of any kind, whether express or implied.
  */
-#include <mach/hardware.h>
-#include <mach/irqs.h>
-#include <mach/map.h>
+	.macro  disable_fiq
+	.endm
 
-#define VA_VIC0		IO_ADDRESS(PICOXCELL_VIC0_BASE)
-#define VA_VIC1		IO_ADDRESS(PICOXCELL_VIC1_BASE)
-
-#include <asm/entry-macro-vic2.S>
+	.macro  arch_ret_to_user, tmp1, tmp2
+	.endm
diff --git a/arch/arm/mach-picoxcell/include/mach/system.h b/arch/arm/mach-picoxcell/include/mach/system.h
index 67c589b..1a5d8cb 100644
--- a/arch/arm/mach-picoxcell/include/mach/system.h
+++ b/arch/arm/mach-picoxcell/include/mach/system.h
@@ -23,9 +23,4 @@
 	cpu_do_idle();
 }
 
-static inline void arch_reset(int mode, const char *cmd)
-{
-	/* Watchdog reset to go here. */
-}
-
 #endif /* __ASM_ARCH_SYSTEM_H */
diff --git a/arch/arm/mach-picoxcell/include/mach/vmalloc.h b/arch/arm/mach-picoxcell/include/mach/vmalloc.h
deleted file mode 100644
index 0216cc4..0000000
--- a/arch/arm/mach-picoxcell/include/mach/vmalloc.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/*
- * Copyright (c) 2011 Picochip Ltd., Jamie Iles
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-#define VMALLOC_END	0xfe000000UL
diff --git a/arch/arm/mach-picoxcell/time.c b/arch/arm/mach-picoxcell/time.c
index 90a554f..6c89cf8 100644
--- a/arch/arm/mach-picoxcell/time.c
+++ b/arch/arm/mach-picoxcell/time.c
@@ -11,7 +11,6 @@
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
-#include <linux/sched.h>
 
 #include <asm/mach/time.h>
 #include <asm/sched_clock.h>
@@ -66,21 +65,11 @@
 	dw_apb_clocksource_register(cs);
 }
 
-static DEFINE_CLOCK_DATA(cd);
 static void __iomem *sched_io_base;
 
-unsigned long long notrace sched_clock(void)
+unsigned u32 notrace picoxcell_read_sched_clock(void)
 {
-	cycle_t cyc = sched_io_base ? __raw_readl(sched_io_base) : 0;
-
-	return cyc_to_sched_clock(&cd, cyc, (u32)~0);
-}
-
-static void notrace picoxcell_update_sched_clock(void)
-{
-	cycle_t cyc = sched_io_base ? __raw_readl(sched_io_base) : 0;
-
-	update_sched_clock(&cd, cyc, (u32)~0);
+	return __raw_readl(sched_io_base);
 }
 
 static const struct of_device_id picoxcell_rtc_ids[] __initconst = {
@@ -100,7 +89,7 @@
 	timer_get_base_and_rate(sched_timer, &sched_io_base, &rate);
 	of_node_put(sched_timer);
 
-	init_sched_clock(&cd, picoxcell_update_sched_clock, 32, rate);
+	setup_sched_clock(picoxcell_read_sched_clock, 32, rate);
 }
 
 static const struct of_device_id picoxcell_timer_ids[] __initconst = {
diff --git a/arch/arm/mach-pnx4008/core.c b/arch/arm/mach-pnx4008/core.c
index cdb95e7..4cfb40b 100644
--- a/arch/arm/mach-pnx4008/core.c
+++ b/arch/arm/mach-pnx4008/core.c
@@ -260,6 +260,11 @@
 	iotable_init(pnx4008_io_desc, ARRAY_SIZE(pnx4008_io_desc));
 }
 
+static void pnx4008_restart(char mode, const char *cmd)
+{
+	soft_restart(0);
+}
+
 extern struct sys_timer pnx4008_timer;
 
 MACHINE_START(PNX4008, "Philips PNX4008")
@@ -269,4 +274,5 @@
 	.init_irq 		= pnx4008_init_irq,
 	.init_machine 		= pnx4008_init,
 	.timer 			= &pnx4008_timer,
+	.restart		= pnx4008_restart,
 MACHINE_END
diff --git a/arch/arm/mach-pnx4008/include/mach/system.h b/arch/arm/mach-pnx4008/include/mach/system.h
index 5dda2bb..60cfe71 100644
--- a/arch/arm/mach-pnx4008/include/mach/system.h
+++ b/arch/arm/mach-pnx4008/include/mach/system.h
@@ -21,18 +21,9 @@
 #ifndef __ASM_ARCH_SYSTEM_H
 #define __ASM_ARCH_SYSTEM_H
 
-#include <linux/io.h>
-#include <mach/hardware.h>
-#include <mach/platform.h>
-
 static void arch_idle(void)
 {
 	cpu_do_idle();
 }
 
-static inline void arch_reset(char mode, const char *cmd)
-{
-	cpu_reset(0);
-}
-
 #endif
diff --git a/arch/arm/mach-pnx4008/include/mach/vmalloc.h b/arch/arm/mach-pnx4008/include/mach/vmalloc.h
deleted file mode 100644
index 184913c..0000000
--- a/arch/arm/mach-pnx4008/include/mach/vmalloc.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * arch/arm/mach-pnx4008/include/mach/vmalloc.h
- *
- * Author: Vitaly Wool <source@mvista.com>
- *
- * 2006 (c) MontaVista Software, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
- */
-
-/*
- * Just any arbitrary offset to the start of the vmalloc VM area: the
- * current 8MB value just means that there will be a 8MB "hole" after the
- * physical memory until the kernel virtual memory starts.  That means that
- * any out-of-bounds memory accesses will hopefully be caught.
- * The vmalloc() routines leaves a hole of 4kB between each vmalloced
- * area for the same reason. ;)
- */
-#define VMALLOC_END       0xd0000000UL
diff --git a/arch/arm/mach-prima2/common.h b/arch/arm/mach-prima2/common.h
index 83e5d212..b28a930 100644
--- a/arch/arm/mach-prima2/common.h
+++ b/arch/arm/mach-prima2/common.h
@@ -16,6 +16,7 @@
 
 extern void __init sirfsoc_of_irq_init(void);
 extern void __init sirfsoc_of_clk_init(void);
+extern void sirfsoc_restart(char, const char *);
 
 #ifndef CONFIG_DEBUG_LL
 static inline void sirfsoc_map_lluart(void)  {}
diff --git a/arch/arm/mach-prima2/include/mach/map.h b/arch/arm/mach-prima2/include/mach/map.h
index 66b1ae2..6f24353 100644
--- a/arch/arm/mach-prima2/include/mach/map.h
+++ b/arch/arm/mach-prima2/include/mach/map.h
@@ -9,8 +9,10 @@
 #ifndef __MACH_PRIMA2_MAP_H__
 #define __MACH_PRIMA2_MAP_H__
 
-#include <mach/vmalloc.h>
+#include <linux/const.h>
 
-#define SIRFSOC_VA(x)			(VMALLOC_END + ((x) & 0x00FFF000))
+#define SIRFSOC_VA_BASE		_AC(0xFEC00000, UL)
+
+#define SIRFSOC_VA(x)		(SIRFSOC_VA_BASE + ((x) & 0x00FFF000))
 
 #endif
diff --git a/arch/arm/mach-prima2/include/mach/system.h b/arch/arm/mach-prima2/include/mach/system.h
index 0dbd257..2c7d2a9 100644
--- a/arch/arm/mach-prima2/include/mach/system.h
+++ b/arch/arm/mach-prima2/include/mach/system.h
@@ -9,21 +9,9 @@
 #ifndef __MACH_SYSTEM_H__
 #define __MACH_SYSTEM_H__
 
-#include <linux/bitops.h>
-#include <mach/hardware.h>
-
-#define SIRFSOC_SYS_RST_BIT  BIT(31)
-
-extern void __iomem *sirfsoc_rstc_base;
-
 static inline void arch_idle(void)
 {
 	cpu_do_idle();
 }
 
-static inline void arch_reset(char mode, const char *cmd)
-{
-	writel(SIRFSOC_SYS_RST_BIT, sirfsoc_rstc_base);
-}
-
 #endif
diff --git a/arch/arm/mach-prima2/include/mach/vmalloc.h b/arch/arm/mach-prima2/include/mach/vmalloc.h
deleted file mode 100644
index c9f90fe..0000000
--- a/arch/arm/mach-prima2/include/mach/vmalloc.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * arch/arm/ach-prima2/include/mach/vmalloc.h
- *
- * Copyright (c) 2010 – 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
- *
- * Licensed under GPLv2 or later.
- */
-
-#ifndef __MACH_VMALLOC_H
-#define __MACH_VMALLOC_H
-
-#include <linux/const.h>
-
-#define VMALLOC_END    _AC(0xFEC00000, UL)
-
-#endif
diff --git a/arch/arm/mach-prima2/prima2.c b/arch/arm/mach-prima2/prima2.c
index a12b689..02b9c05 100644
--- a/arch/arm/mach-prima2/prima2.c
+++ b/arch/arm/mach-prima2/prima2.c
@@ -40,4 +40,5 @@
 	.dma_zone_size	= SZ_256M,
 	.init_machine	= sirfsoc_mach_init,
 	.dt_compat      = prima2cb_dt_match,
+	.restart	= sirfsoc_restart,
 MACHINE_END
diff --git a/arch/arm/mach-prima2/rstc.c b/arch/arm/mach-prima2/rstc.c
index 492cfa8..762adb7 100644
--- a/arch/arm/mach-prima2/rstc.c
+++ b/arch/arm/mach-prima2/rstc.c
@@ -68,3 +68,10 @@
 
 	return 0;
 }
+
+#define SIRFSOC_SYS_RST_BIT  BIT(31)
+
+void sirfsoc_restart(char mode, const char *cmd)
+{
+	writel(SIRFSOC_SYS_RST_BIT, sirfsoc_rstc_base);
+}
diff --git a/arch/arm/mach-pxa/balloon3.c b/arch/arm/mach-pxa/balloon3.c
index 4b81f59..82514f5 100644
--- a/arch/arm/mach-pxa/balloon3.c
+++ b/arch/arm/mach-pxa/balloon3.c
@@ -829,4 +829,5 @@
 	.timer		= &pxa_timer,
 	.init_machine	= balloon3_init,
 	.atag_offset	= 0x100,
+	.restart	= pxa_restart,
 MACHINE_END
diff --git a/arch/arm/mach-pxa/capc7117.c b/arch/arm/mach-pxa/capc7117.c
index 4efc16d..c2f0be0 100644
--- a/arch/arm/mach-pxa/capc7117.c
+++ b/arch/arm/mach-pxa/capc7117.c
@@ -153,5 +153,6 @@
 	.init_irq = pxa3xx_init_irq,
 	.handle_irq = pxa3xx_handle_irq,
 	.timer = &pxa_timer,
-	.init_machine = capc7117_init
+	.init_machine = capc7117_init,
+	.restart	= pxa_restart,
 MACHINE_END
diff --git a/arch/arm/mach-pxa/cm-x2xx.c b/arch/arm/mach-pxa/cm-x2xx.c
index f2e4190..ec170a5 100644
--- a/arch/arm/mach-pxa/cm-x2xx.c
+++ b/arch/arm/mach-pxa/cm-x2xx.c
@@ -524,4 +524,5 @@
 #ifdef CONFIG_PCI
 	.dma_zone_size	= SZ_64M,
 #endif
+	.restart	= pxa_restart,
 MACHINE_END
diff --git a/arch/arm/mach-pxa/cm-x300.c b/arch/arm/mach-pxa/cm-x300.c
index e096bba..7236974 100644
--- a/arch/arm/mach-pxa/cm-x300.c
+++ b/arch/arm/mach-pxa/cm-x300.c
@@ -858,4 +858,5 @@
 	.timer		= &pxa_timer,
 	.init_machine	= cm_x300_init,
 	.fixup		= cm_x300_fixup,
+	.restart	= pxa_restart,
 MACHINE_END
diff --git a/arch/arm/mach-pxa/colibri-pxa270-income.c b/arch/arm/mach-pxa/colibri-pxa270-income.c
index 80538b8..248804b 100644
--- a/arch/arm/mach-pxa/colibri-pxa270-income.c
+++ b/arch/arm/mach-pxa/colibri-pxa270-income.c
@@ -183,7 +183,7 @@
 /******************************************************************************
  * Backlight
  ******************************************************************************/
-#if defined(CONFIG_BACKLIGHT_PWM) || defined(CONFIG_BACKLIGHT_PWM__MODULE)
+#if defined(CONFIG_BACKLIGHT_PWM) || defined(CONFIG_BACKLIGHT_PWM_MODULE)
 static struct platform_pwm_backlight_data income_backlight_data = {
 	.pwm_id		= 0,
 	.max_brightness	= 0x3ff,
diff --git a/arch/arm/mach-pxa/colibri-pxa270.c b/arch/arm/mach-pxa/colibri-pxa270.c
index 05bfa1b..6a68516 100644
--- a/arch/arm/mach-pxa/colibri-pxa270.c
+++ b/arch/arm/mach-pxa/colibri-pxa270.c
@@ -313,6 +313,7 @@
 	.init_irq	= pxa27x_init_irq,
 	.handle_irq	= pxa27x_handle_irq,
 	.timer		= &pxa_timer,
+	.restart	= pxa_restart,
 MACHINE_END
 
 MACHINE_START(INCOME, "Income s.r.o. SH-Dmaster PXA270 SBC")
@@ -322,5 +323,6 @@
 	.init_irq	= pxa27x_init_irq,
 	.handle_irq	= pxa27x_handle_irq,
 	.timer		= &pxa_timer,
+	.restart	= pxa_restart,
 MACHINE_END
 
diff --git a/arch/arm/mach-pxa/colibri-pxa300.c b/arch/arm/mach-pxa/colibri-pxa300.c
index c825e8b..c01059a 100644
--- a/arch/arm/mach-pxa/colibri-pxa300.c
+++ b/arch/arm/mach-pxa/colibri-pxa300.c
@@ -189,5 +189,6 @@
 	.init_irq	= pxa3xx_init_irq,
 	.handle_irq	= pxa3xx_handle_irq,
 	.timer		= &pxa_timer,
+	.restart	= pxa_restart,
 MACHINE_END
 
diff --git a/arch/arm/mach-pxa/colibri-pxa320.c b/arch/arm/mach-pxa/colibri-pxa320.c
index d23b92b..5028f23 100644
--- a/arch/arm/mach-pxa/colibri-pxa320.c
+++ b/arch/arm/mach-pxa/colibri-pxa320.c
@@ -259,5 +259,6 @@
 	.init_irq	= pxa3xx_init_irq,
 	.handle_irq	= pxa3xx_handle_irq,
 	.timer		= &pxa_timer,
+	.restart	= pxa_restart,
 MACHINE_END
 
diff --git a/arch/arm/mach-pxa/corgi.c b/arch/arm/mach-pxa/corgi.c
index 549468d..9d4dc59 100644
--- a/arch/arm/mach-pxa/corgi.c
+++ b/arch/arm/mach-pxa/corgi.c
@@ -655,7 +655,7 @@
 		/* Green LED off tells the bootloader to halt */
 		gpio_set_value(CORGI_GPIO_LED_GREEN, 0);
 
-	arm_machine_restart('h', NULL);
+	pxa_restart('h', NULL);
 }
 
 static void corgi_restart(char mode, const char *cmd)
@@ -664,13 +664,12 @@
 		/* Green LED on tells the bootloader to reboot */
 		gpio_set_value(CORGI_GPIO_LED_GREEN, 1);
 
-	arm_machine_restart('h', cmd);
+	pxa_restart('h', cmd);
 }
 
 static void __init corgi_init(void)
 {
 	pm_power_off = corgi_poweroff;
-	arm_pm_restart = corgi_restart;
 
 	/* Stop 3.6MHz and drive HIGH to PCMCIA and CS */
 	PCFR |= PCFR_OPDE;
@@ -726,6 +725,7 @@
 	.handle_irq	= pxa25x_handle_irq,
 	.init_machine	= corgi_init,
 	.timer		= &pxa_timer,
+	.restart	= corgi_restart,
 MACHINE_END
 #endif
 
@@ -737,6 +737,7 @@
 	.handle_irq	= pxa25x_handle_irq,
 	.init_machine	= corgi_init,
 	.timer		= &pxa_timer,
+	.restart	= corgi_restart,
 MACHINE_END
 #endif
 
@@ -748,6 +749,7 @@
 	.handle_irq	= pxa25x_handle_irq,
 	.init_machine	= corgi_init,
 	.timer		= &pxa_timer,
+	.restart	= corgi_restart,
 MACHINE_END
 #endif
 
diff --git a/arch/arm/mach-pxa/csb726.c b/arch/arm/mach-pxa/csb726.c
index 5e2cf39..fb5a51d 100644
--- a/arch/arm/mach-pxa/csb726.c
+++ b/arch/arm/mach-pxa/csb726.c
@@ -278,4 +278,5 @@
 	.handle_irq       = pxa27x_handle_irq,
 	.init_machine   = csb726_init,
 	.timer          = &pxa_timer,
+	.restart	= pxa_restart,
 MACHINE_END
diff --git a/arch/arm/mach-pxa/em-x270.c b/arch/arm/mach-pxa/em-x270.c
index 94acc0b..bd396ba 100644
--- a/arch/arm/mach-pxa/em-x270.c
+++ b/arch/arm/mach-pxa/em-x270.c
@@ -1305,6 +1305,7 @@
 	.handle_irq	= pxa27x_handle_irq,
 	.timer		= &pxa_timer,
 	.init_machine	= em_x270_init,
+	.restart	= pxa_restart,
 MACHINE_END
 
 MACHINE_START(EXEDA, "Compulab eXeda")
@@ -1314,4 +1315,5 @@
 	.handle_irq	= pxa27x_handle_irq,
 	.timer		= &pxa_timer,
 	.init_machine	= em_x270_init,
+	.restart	= pxa_restart,
 MACHINE_END
diff --git a/arch/arm/mach-pxa/eseries.c b/arch/arm/mach-pxa/eseries.c
index d82b7aa..69473db 100644
--- a/arch/arm/mach-pxa/eseries.c
+++ b/arch/arm/mach-pxa/eseries.c
@@ -196,6 +196,7 @@
 	.fixup		= eseries_fixup,
 	.init_machine	= e330_init,
 	.timer		= &pxa_timer,
+	.restart	= pxa_restart,
 MACHINE_END
 #endif
 
@@ -246,6 +247,7 @@
 	.fixup		= eseries_fixup,
 	.init_machine	= e350_init,
 	.timer		= &pxa_timer,
+	.restart	= pxa_restart,
 MACHINE_END
 #endif
 
@@ -369,6 +371,7 @@
 	.fixup		= eseries_fixup,
 	.init_machine	= e400_init,
 	.timer		= &pxa_timer,
+	.restart	= pxa_restart,
 MACHINE_END
 #endif
 
@@ -558,6 +561,7 @@
 	.fixup		= eseries_fixup,
 	.init_machine	= e740_init,
 	.timer		= &pxa_timer,
+	.restart	= pxa_restart,
 MACHINE_END
 #endif
 
@@ -750,6 +754,7 @@
 	.fixup		= eseries_fixup,
 	.init_machine	= e750_init,
 	.timer		= &pxa_timer,
+	.restart	= pxa_restart,
 MACHINE_END
 #endif
 
@@ -955,5 +960,6 @@
 	.fixup		= eseries_fixup,
 	.init_machine	= e800_init,
 	.timer		= &pxa_timer,
+	.restart	= pxa_restart,
 MACHINE_END
 #endif
diff --git a/arch/arm/mach-pxa/ezx.c b/arch/arm/mach-pxa/ezx.c
index 8308eee..15ab253 100644
--- a/arch/arm/mach-pxa/ezx.c
+++ b/arch/arm/mach-pxa/ezx.c
@@ -804,6 +804,7 @@
 	.handle_irq       = pxa27x_handle_irq,
 	.timer          = &pxa_timer,
 	.init_machine   = a780_init,
+	.restart	= pxa_restart,
 MACHINE_END
 #endif
 
@@ -870,6 +871,7 @@
 	.handle_irq       = pxa27x_handle_irq,
 	.timer          = &pxa_timer,
 	.init_machine   = e680_init,
+	.restart	= pxa_restart,
 MACHINE_END
 #endif
 
@@ -936,6 +938,7 @@
 	.handle_irq       = pxa27x_handle_irq,
 	.timer          = &pxa_timer,
 	.init_machine   = a1200_init,
+	.restart	= pxa_restart,
 MACHINE_END
 #endif
 
@@ -1127,6 +1130,7 @@
 	.handle_irq       = pxa27x_handle_irq,
 	.timer          = &pxa_timer,
 	.init_machine   = a910_init,
+	.restart	= pxa_restart,
 MACHINE_END
 #endif
 
@@ -1193,6 +1197,7 @@
 	.handle_irq       = pxa27x_handle_irq,
 	.timer          = &pxa_timer,
 	.init_machine   = e6_init,
+	.restart	= pxa_restart,
 MACHINE_END
 #endif
 
@@ -1233,5 +1238,6 @@
 	.handle_irq       = pxa27x_handle_irq,
 	.timer          = &pxa_timer,
 	.init_machine   = e2_init,
+	.restart	= pxa_restart,
 MACHINE_END
 #endif
diff --git a/arch/arm/mach-pxa/generic.h b/arch/arm/mach-pxa/generic.h
index 92a2e85..0d729e6 100644
--- a/arch/arm/mach-pxa/generic.h
+++ b/arch/arm/mach-pxa/generic.h
@@ -57,3 +57,5 @@
 void __init pxa_set_btuart_info(void *info);
 void __init pxa_set_stuart_info(void *info);
 void __init pxa_set_hwuart_info(void *info);
+
+void pxa_restart(char, const char *);
diff --git a/arch/arm/mach-pxa/gumstix.c b/arch/arm/mach-pxa/gumstix.c
index ffdd70d..ac3b1ce 100644
--- a/arch/arm/mach-pxa/gumstix.c
+++ b/arch/arm/mach-pxa/gumstix.c
@@ -239,4 +239,5 @@
 	.handle_irq	= pxa25x_handle_irq,
 	.timer		= &pxa_timer,
 	.init_machine	= gumstix_init,
+	.restart	= pxa_restart,
 MACHINE_END
diff --git a/arch/arm/mach-pxa/h5000.c b/arch/arm/mach-pxa/h5000.c
index 4b5e110..fde6b4c 100644
--- a/arch/arm/mach-pxa/h5000.c
+++ b/arch/arm/mach-pxa/h5000.c
@@ -209,4 +209,5 @@
 	.handle_irq = pxa25x_handle_irq,
 	.timer = &pxa_timer,
 	.init_machine = h5000_init,
+	.restart	= pxa_restart,
 MACHINE_END
diff --git a/arch/arm/mach-pxa/himalaya.c b/arch/arm/mach-pxa/himalaya.c
index f2c3245..26d069a 100644
--- a/arch/arm/mach-pxa/himalaya.c
+++ b/arch/arm/mach-pxa/himalaya.c
@@ -164,4 +164,5 @@
 	.handle_irq = pxa25x_handle_irq,
 	.init_machine = himalaya_init,
 	.timer = &pxa_timer,
+	.restart	= pxa_restart,
 MACHINE_END
diff --git a/arch/arm/mach-pxa/hx4700.c b/arch/arm/mach-pxa/hx4700.c
index 6f6368e..ce16bda 100644
--- a/arch/arm/mach-pxa/hx4700.c
+++ b/arch/arm/mach-pxa/hx4700.c
@@ -845,4 +845,5 @@
 	.handle_irq     = pxa27x_handle_irq,
 	.init_machine = hx4700_init,
 	.timer        = &pxa_timer,
+	.restart	= pxa_restart,
 MACHINE_END
diff --git a/arch/arm/mach-pxa/icontrol.c b/arch/arm/mach-pxa/icontrol.c
index f78d5db..e239b82 100644
--- a/arch/arm/mach-pxa/icontrol.c
+++ b/arch/arm/mach-pxa/icontrol.c
@@ -196,5 +196,6 @@
 	.init_irq	= pxa3xx_init_irq,
 	.handle_irq	= pxa3xx_handle_irq,
 	.timer		= &pxa_timer,
-	.init_machine	= icontrol_init
+	.init_machine	= icontrol_init,
+	.restart	= pxa_restart,
 MACHINE_END
diff --git a/arch/arm/mach-pxa/idp.c b/arch/arm/mach-pxa/idp.c
index ddf20e5..fbabd84 100644
--- a/arch/arm/mach-pxa/idp.c
+++ b/arch/arm/mach-pxa/idp.c
@@ -199,4 +199,5 @@
 	.handle_irq	= pxa25x_handle_irq,
 	.timer		= &pxa_timer,
 	.init_machine	= idp_init,
+	.restart	= pxa_restart,
 MACHINE_END
diff --git a/arch/arm/mach-pxa/include/mach/entry-macro.S b/arch/arm/mach-pxa/include/mach/entry-macro.S
index a73bc86..260c0c1 100644
--- a/arch/arm/mach-pxa/include/mach/entry-macro.S
+++ b/arch/arm/mach-pxa/include/mach/entry-macro.S
@@ -7,45 +7,9 @@
  * License version 2. This program is licensed "as is" without any
  * warranty of any kind, whether express or implied.
  */
-#include <mach/hardware.h>
-#include <mach/irqs.h>
 
 		.macro	disable_fiq
 		.endm
 
-		.macro  get_irqnr_preamble, base, tmp
-		.endm
-
 		.macro  arch_ret_to_user, tmp1, tmp2
 		.endm
-
-		.macro	get_irqnr_and_base, irqnr, irqstat, base, tmp
-		mrc	p15, 0, \tmp, c0, c0, 0		@ CPUID
-		mov	\tmp, \tmp, lsr #13
-		and	\tmp, \tmp, #0x7		@ Core G
-		cmp	\tmp, #1
-		bhi	1002f
-
-		@ Core Generation 1 (PXA25x)
-		mov	\base, #io_p2v(0x40000000)	@ IIR Ctl = 0x40d00000
-		add	\base, \base, #0x00d00000
-		ldr	\irqstat, [\base, #0]		@ ICIP
-		ldr	\irqnr, [\base, #4]		@ ICMR
-
-		ands	\irqnr, \irqstat, \irqnr
-		beq	1001f
-		rsb	\irqstat, \irqnr, #0
-		and	\irqstat, \irqstat, \irqnr
-		clz	\irqnr, \irqstat
-		rsb	\irqnr, \irqnr, #(31 + PXA_IRQ(0))
-		b	1001f
-1002:
-		@ Core Generation 2 (PXA27x) or Core Generation 3 (PXA3xx)
-		mrc	p6, 0, \irqstat, c5, c0, 0	@ ICHP
-		tst	\irqstat, #0x80000000
-		beq	1001f
-		bic	\irqstat, \irqstat, #0x80000000
-		mov	\irqnr, \irqstat, lsr #16
-		add	\irqnr, \irqnr, #(PXA_IRQ(0))
-1001:
-		.endm
diff --git a/arch/arm/mach-pxa/include/mach/system.h b/arch/arm/mach-pxa/include/mach/system.h
index d1fce8b..c5afacd 100644
--- a/arch/arm/mach-pxa/include/mach/system.h
+++ b/arch/arm/mach-pxa/include/mach/system.h
@@ -9,15 +9,7 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
-
-#include <asm/proc-fns.h>
-#include "hardware.h"
-#include "pxa2xx-regs.h"
-
 static inline void arch_idle(void)
 {
 	cpu_do_idle();
 }
-
-
-void arch_reset(char mode, const char *cmd);
diff --git a/arch/arm/mach-pxa/include/mach/vmalloc.h b/arch/arm/mach-pxa/include/mach/vmalloc.h
deleted file mode 100644
index bfecfbf..0000000
--- a/arch/arm/mach-pxa/include/mach/vmalloc.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/*
- * arch/arm/mach-pxa/include/mach/vmalloc.h
- *
- * Author:	Nicolas Pitre
- * Copyright:	(C) 2001 MontaVista Software Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#define VMALLOC_END       (0xe8000000UL)
diff --git a/arch/arm/mach-pxa/littleton.c b/arch/arm/mach-pxa/littleton.c
index 7b324ec..c337c7e 100644
--- a/arch/arm/mach-pxa/littleton.c
+++ b/arch/arm/mach-pxa/littleton.c
@@ -445,4 +445,5 @@
 	.handle_irq	= pxa3xx_handle_irq,
 	.timer		= &pxa_timer,
 	.init_machine	= littleton_init,
+	.restart	= pxa_restart,
 MACHINE_END
diff --git a/arch/arm/mach-pxa/lpd270.c b/arch/arm/mach-pxa/lpd270.c
index 1dd5302..6119c01 100644
--- a/arch/arm/mach-pxa/lpd270.c
+++ b/arch/arm/mach-pxa/lpd270.c
@@ -505,4 +505,5 @@
 	.handle_irq	= pxa27x_handle_irq,
 	.timer		= &pxa_timer,
 	.init_machine	= lpd270_init,
+	.restart	= pxa_restart,
 MACHINE_END
diff --git a/arch/arm/mach-pxa/lubbock.c b/arch/arm/mach-pxa/lubbock.c
index c48ce6d..4b7a528 100644
--- a/arch/arm/mach-pxa/lubbock.c
+++ b/arch/arm/mach-pxa/lubbock.c
@@ -556,4 +556,5 @@
 	.handle_irq	= pxa25x_handle_irq,
 	.timer		= &pxa_timer,
 	.init_machine	= lubbock_init,
+	.restart	= pxa_restart,
 MACHINE_END
diff --git a/arch/arm/mach-pxa/magician.c b/arch/arm/mach-pxa/magician.c
index 4b796c3..4e6774f 100644
--- a/arch/arm/mach-pxa/magician.c
+++ b/arch/arm/mach-pxa/magician.c
@@ -760,4 +760,5 @@
 	.handle_irq = pxa27x_handle_irq,
 	.init_machine = magician_init,
 	.timer = &pxa_timer,
+	.restart	= pxa_restart,
 MACHINE_END
diff --git a/arch/arm/mach-pxa/mainstone.c b/arch/arm/mach-pxa/mainstone.c
index 0567d39..ca14555 100644
--- a/arch/arm/mach-pxa/mainstone.c
+++ b/arch/arm/mach-pxa/mainstone.c
@@ -622,4 +622,5 @@
 	.handle_irq	= pxa27x_handle_irq,
 	.timer		= &pxa_timer,
 	.init_machine	= mainstone_init,
+	.restart	= pxa_restart,
 MACHINE_END
diff --git a/arch/arm/mach-pxa/mioa701.c b/arch/arm/mach-pxa/mioa701.c
index b938fc2..924a3b5 100644
--- a/arch/arm/mach-pxa/mioa701.c
+++ b/arch/arm/mach-pxa/mioa701.c
@@ -696,13 +696,13 @@
 static void mioa701_poweroff(void)
 {
 	mioa701_machine_exit();
-	arm_machine_restart('s', NULL);
+	pxa_restart('s', NULL);
 }
 
 static void mioa701_restart(char c, const char *cmd)
 {
 	mioa701_machine_exit();
-	arm_machine_restart('s', cmd);
+	pxa_restart('s', cmd);
 }
 
 static struct gpio global_gpios[] = {
@@ -734,7 +734,6 @@
 	pxa_set_udc_info(&mioa701_udc_info);
 	pxa_set_ac97_info(&mioa701_ac97_info);
 	pm_power_off = mioa701_poweroff;
-	arm_pm_restart = mioa701_restart;
 	platform_add_devices(devices, ARRAY_SIZE(devices));
 	gsm_init();
 
@@ -752,9 +751,11 @@
 
 MACHINE_START(MIOA701, "MIO A701")
 	.atag_offset	= 0x100,
+	.restart_mode	= 's',
 	.map_io		= &pxa27x_map_io,
 	.init_irq	= &pxa27x_init_irq,
 	.handle_irq	= &pxa27x_handle_irq,
 	.init_machine	= mioa701_machine_init,
 	.timer		= &pxa_timer,
+	.restart	= mioa701_restart,
 MACHINE_END
diff --git a/arch/arm/mach-pxa/mp900.c b/arch/arm/mach-pxa/mp900.c
index 4af5d51..169bf8f 100644
--- a/arch/arm/mach-pxa/mp900.c
+++ b/arch/arm/mach-pxa/mp900.c
@@ -98,5 +98,6 @@
 	.init_irq	= pxa25x_init_irq,
 	.handle_irq	= pxa25x_handle_irq,
 	.init_machine	= mp900c_init,
+	.restart	= pxa_restart,
 MACHINE_END
 
diff --git a/arch/arm/mach-pxa/palmld.c b/arch/arm/mach-pxa/palmld.c
index 3d4a281..1fa80f4f 100644
--- a/arch/arm/mach-pxa/palmld.c
+++ b/arch/arm/mach-pxa/palmld.c
@@ -347,5 +347,6 @@
 	.init_irq	= pxa27x_init_irq,
 	.handle_irq	= pxa27x_handle_irq,
 	.timer		= &pxa_timer,
-	.init_machine	= palmld_init
+	.init_machine	= palmld_init,
+	.restart	= pxa_restart,
 MACHINE_END
diff --git a/arch/arm/mach-pxa/palmt5.c b/arch/arm/mach-pxa/palmt5.c
index 99d6bcf..5ba1431 100644
--- a/arch/arm/mach-pxa/palmt5.c
+++ b/arch/arm/mach-pxa/palmt5.c
@@ -208,5 +208,6 @@
 	.init_irq	= pxa27x_init_irq,
 	.handle_irq	= pxa27x_handle_irq,
 	.timer		= &pxa_timer,
-	.init_machine	= palmt5_init
+	.init_machine	= palmt5_init,
+	.restart	= pxa_restart,
 MACHINE_END
diff --git a/arch/arm/mach-pxa/palmtc.c b/arch/arm/mach-pxa/palmtc.c
index 2c24c67..29b51b4 100644
--- a/arch/arm/mach-pxa/palmtc.c
+++ b/arch/arm/mach-pxa/palmtc.c
@@ -542,5 +542,6 @@
 	.init_irq	= pxa25x_init_irq,
 	.handle_irq	= pxa25x_handle_irq,
 	.timer		= &pxa_timer,
-	.init_machine	= palmtc_init
+	.init_machine	= palmtc_init,
+	.restart	= pxa_restart,
 MACHINE_END
diff --git a/arch/arm/mach-pxa/palmte2.c b/arch/arm/mach-pxa/palmte2.c
index 9376da0..5ebf49acb 100644
--- a/arch/arm/mach-pxa/palmte2.c
+++ b/arch/arm/mach-pxa/palmte2.c
@@ -361,5 +361,6 @@
 	.init_irq	= pxa25x_init_irq,
 	.handle_irq	= pxa25x_handle_irq,
 	.timer		= &pxa_timer,
-	.init_machine	= palmte2_init
+	.init_machine	= palmte2_init,
+	.restart	= pxa_restart,
 MACHINE_END
diff --git a/arch/arm/mach-pxa/palmtreo.c b/arch/arm/mach-pxa/palmtreo.c
index 94e9708..ec82491 100644
--- a/arch/arm/mach-pxa/palmtreo.c
+++ b/arch/arm/mach-pxa/palmtreo.c
@@ -452,6 +452,7 @@
 	.handle_irq       = pxa27x_handle_irq,
 	.timer          = &pxa_timer,
 	.init_machine   = treo680_init,
+	.restart	= pxa_restart,
 MACHINE_END
 #endif
 
@@ -464,5 +465,6 @@
 	.handle_irq       = pxa27x_handle_irq,
 	.timer          = &pxa_timer,
 	.init_machine	= centro_init,
+	.restart	= pxa_restart,
 MACHINE_END
 #endif
diff --git a/arch/arm/mach-pxa/palmtx.c b/arch/arm/mach-pxa/palmtx.c
index 4e3e459..6170d76 100644
--- a/arch/arm/mach-pxa/palmtx.c
+++ b/arch/arm/mach-pxa/palmtx.c
@@ -369,5 +369,6 @@
 	.init_irq	= pxa27x_init_irq,
 	.handle_irq	= pxa27x_handle_irq,
 	.timer		= &pxa_timer,
-	.init_machine	= palmtx_init
+	.init_machine	= palmtx_init,
+	.restart	= pxa_restart,
 MACHINE_END
diff --git a/arch/arm/mach-pxa/palmz72.c b/arch/arm/mach-pxa/palmz72.c
index 68e18ba..b2dff9d 100644
--- a/arch/arm/mach-pxa/palmz72.c
+++ b/arch/arm/mach-pxa/palmz72.c
@@ -404,5 +404,6 @@
 	.init_irq	= pxa27x_init_irq,
 	.handle_irq	= pxa27x_handle_irq,
 	.timer		= &pxa_timer,
-	.init_machine	= palmz72_init
+	.init_machine	= palmz72_init,
+	.restart	= pxa_restart,
 MACHINE_END
diff --git a/arch/arm/mach-pxa/pcm027.c b/arch/arm/mach-pxa/pcm027.c
index 0b825a3..fe90544 100644
--- a/arch/arm/mach-pxa/pcm027.c
+++ b/arch/arm/mach-pxa/pcm027.c
@@ -265,4 +265,5 @@
 	.handle_irq	= pxa27x_handle_irq,
 	.timer		= &pxa_timer,
 	.init_machine	= pcm027_init,
+	.restart	= pxa_restart,
 MACHINE_END
diff --git a/arch/arm/mach-pxa/poodle.c b/arch/arm/mach-pxa/poodle.c
index 50c8331..b260ce8 100644
--- a/arch/arm/mach-pxa/poodle.c
+++ b/arch/arm/mach-pxa/poodle.c
@@ -417,12 +417,7 @@
 
 static void poodle_poweroff(void)
 {
-	arm_machine_restart('h', NULL);
-}
-
-static void poodle_restart(char mode, const char *cmd)
-{
-	arm_machine_restart('h', cmd);
+	pxa_restart('h', NULL);
 }
 
 static void __init poodle_init(void)
@@ -430,7 +425,6 @@
 	int ret = 0;
 
 	pm_power_off = poodle_poweroff;
-	arm_pm_restart = poodle_restart;
 
 	PCFR |= PCFR_OPDE;
 
@@ -472,4 +466,5 @@
 	.handle_irq	= pxa25x_handle_irq,
 	.timer		= &pxa_timer,
 	.init_machine	= poodle_init,
+	.restart	= pxa_restart,
 MACHINE_END
diff --git a/arch/arm/mach-pxa/raumfeld.c b/arch/arm/mach-pxa/raumfeld.c
index f0c05f4..4962b16 100644
--- a/arch/arm/mach-pxa/raumfeld.c
+++ b/arch/arm/mach-pxa/raumfeld.c
@@ -1093,6 +1093,7 @@
 	.init_irq	= pxa3xx_init_irq,
 	.handle_irq	= pxa3xx_handle_irq,
 	.timer		= &pxa_timer,
+	.restart	= pxa_restart,
 MACHINE_END
 #endif
 
@@ -1104,6 +1105,7 @@
 	.init_irq	= pxa3xx_init_irq,
 	.handle_irq	= pxa3xx_handle_irq,
 	.timer		= &pxa_timer,
+	.restart	= pxa_restart,
 MACHINE_END
 #endif
 
@@ -1115,5 +1117,6 @@
 	.init_irq	= pxa3xx_init_irq,
 	.handle_irq	= pxa3xx_handle_irq,
 	.timer		= &pxa_timer,
+	.restart	= pxa_restart,
 MACHINE_END
 #endif
diff --git a/arch/arm/mach-pxa/reset.c b/arch/arm/mach-pxa/reset.c
index 01e9d64..c8497b0 100644
--- a/arch/arm/mach-pxa/reset.c
+++ b/arch/arm/mach-pxa/reset.c
@@ -81,14 +81,17 @@
 	OSMR3 = OSCR + 368640;	/* ... in 100 ms */
 }
 
-void arch_reset(char mode, const char *cmd)
+void pxa_restart(char mode, const char *cmd)
 {
+	local_irq_disable();
+	local_fiq_disable();
+
 	clear_reset_status(RESET_STATUS_ALL);
 
 	switch (mode) {
 	case 's':
 		/* Jump into ROM at address 0 */
-		cpu_reset(0);
+		soft_restart(0);
 		break;
 	case 'g':
 		do_gpio_reset();
diff --git a/arch/arm/mach-pxa/saar.c b/arch/arm/mach-pxa/saar.c
index fc2c1e0..8787070 100644
--- a/arch/arm/mach-pxa/saar.c
+++ b/arch/arm/mach-pxa/saar.c
@@ -602,4 +602,5 @@
 	.handle_irq       = pxa3xx_handle_irq,
 	.timer          = &pxa_timer,
 	.init_machine   = saar_init,
+	.restart	= pxa_restart,
 MACHINE_END
diff --git a/arch/arm/mach-pxa/saarb.c b/arch/arm/mach-pxa/saarb.c
index 3e999e3..b6dbaca 100644
--- a/arch/arm/mach-pxa/saarb.c
+++ b/arch/arm/mach-pxa/saarb.c
@@ -111,5 +111,6 @@
 	.handle_irq	= pxa3xx_handle_irq,
 	.timer          = &pxa_timer,
 	.init_machine   = saarb_init,
+	.restart	= pxa_restart,
 MACHINE_END
 
diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c
index 953a919..a7f81a3 100644
--- a/arch/arm/mach-pxa/spitz.c
+++ b/arch/arm/mach-pxa/spitz.c
@@ -926,7 +926,7 @@
  ******************************************************************************/
 static void spitz_poweroff(void)
 {
-	arm_machine_restart('g', NULL);
+	pxa_restart('g', NULL);
 }
 
 static void spitz_restart(char mode, const char *cmd)
@@ -943,7 +943,6 @@
 {
 	init_gpio_reset(SPITZ_GPIO_ON_RESET, 1, 0);
 	pm_power_off = spitz_poweroff;
-	arm_pm_restart = spitz_restart;
 
 	PMCR = 0x00;
 
@@ -982,33 +981,39 @@
 
 #ifdef CONFIG_MACH_SPITZ
 MACHINE_START(SPITZ, "SHARP Spitz")
+	.restart_mode	= 'g',
 	.fixup		= spitz_fixup,
 	.map_io		= pxa27x_map_io,
 	.init_irq	= pxa27x_init_irq,
 	.handle_irq	= pxa27x_handle_irq,
 	.init_machine	= spitz_init,
 	.timer		= &pxa_timer,
+	.restart	= spitz_restart,
 MACHINE_END
 #endif
 
 #ifdef CONFIG_MACH_BORZOI
 MACHINE_START(BORZOI, "SHARP Borzoi")
+	.restart_mode	= 'g',
 	.fixup		= spitz_fixup,
 	.map_io		= pxa27x_map_io,
 	.init_irq	= pxa27x_init_irq,
 	.handle_irq	= pxa27x_handle_irq,
 	.init_machine	= spitz_init,
 	.timer		= &pxa_timer,
+	.restart	= spitz_restart,
 MACHINE_END
 #endif
 
 #ifdef CONFIG_MACH_AKITA
 MACHINE_START(AKITA, "SHARP Akita")
+	.restart_mode	= 'g',
 	.fixup		= spitz_fixup,
 	.map_io		= pxa27x_map_io,
 	.init_irq	= pxa27x_init_irq,
 	.handle_irq	= pxa27x_handle_irq,
 	.init_machine	= spitz_init,
 	.timer		= &pxa_timer,
+	.restart	= spitz_restart,
 MACHINE_END
 #endif
diff --git a/arch/arm/mach-pxa/stargate2.c b/arch/arm/mach-pxa/stargate2.c
index 4c9a48b..80d7f23 100644
--- a/arch/arm/mach-pxa/stargate2.c
+++ b/arch/arm/mach-pxa/stargate2.c
@@ -1005,6 +1005,7 @@
 	.timer		= &pxa_timer,
 	.init_machine	= imote2_init,
 	.atag_offset	= 0x100,
+	.restart	= pxa_restart,
 MACHINE_END
 #endif
 
@@ -1017,5 +1018,6 @@
 	.timer = &pxa_timer,
 	.init_machine = stargate2_init,
 	.atag_offset = 0x100,
+	.restart	= pxa_restart,
 MACHINE_END
 #endif
diff --git a/arch/arm/mach-pxa/tavorevb.c b/arch/arm/mach-pxa/tavorevb.c
index ad47bb9..4fa36a3 100644
--- a/arch/arm/mach-pxa/tavorevb.c
+++ b/arch/arm/mach-pxa/tavorevb.c
@@ -495,4 +495,5 @@
 	.handle_irq       = pxa3xx_handle_irq,
 	.timer          = &pxa_timer,
 	.init_machine   = tavorevb_init,
+	.restart	= pxa_restart,
 MACHINE_END
diff --git a/arch/arm/mach-pxa/tavorevb3.c b/arch/arm/mach-pxa/tavorevb3.c
index fd56916..8a22879 100644
--- a/arch/arm/mach-pxa/tavorevb3.c
+++ b/arch/arm/mach-pxa/tavorevb3.c
@@ -132,4 +132,5 @@
 	.handle_irq       = pxa3xx_handle_irq,
 	.timer          = &pxa_timer,
 	.init_machine   = evb3_init,
+	.restart	= pxa_restart,
 MACHINE_END
diff --git a/arch/arm/mach-pxa/time.c b/arch/arm/mach-pxa/time.c
index de68470..b503049 100644
--- a/arch/arm/mach-pxa/time.c
+++ b/arch/arm/mach-pxa/time.c
@@ -16,7 +16,6 @@
 #include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/clockchips.h>
-#include <linux/sched.h>
 
 #include <asm/div64.h>
 #include <asm/mach/irq.h>
@@ -32,18 +31,10 @@
  * long as there is always less than 582 seconds between successive
  * calls to sched_clock() which should always be the case in practice.
  */
-static DEFINE_CLOCK_DATA(cd);
 
-unsigned long long notrace sched_clock(void)
+static u32 notrace pxa_read_sched_clock(void)
 {
-	u32 cyc = OSCR;
-	return cyc_to_sched_clock(&cd, cyc, (u32)~0);
-}
-
-static void notrace pxa_update_sched_clock(void)
-{
-	u32 cyc = OSCR;
-	update_sched_clock(&cd, cyc, (u32)~0);
+	return OSCR;
 }
 
 
@@ -119,7 +110,7 @@
 	OIER = 0;
 	OSSR = OSSR_M0 | OSSR_M1 | OSSR_M2 | OSSR_M3;
 
-	init_sched_clock(&cd, pxa_update_sched_clock, 32, clock_tick_rate);
+	setup_sched_clock(pxa_read_sched_clock, 32, clock_tick_rate);
 
 	clockevents_calc_mult_shift(&ckevt_pxa_osmr0, clock_tick_rate, 4);
 	ckevt_pxa_osmr0.max_delta_ns =
diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c
index 402b0c9..dfe40f8 100644
--- a/arch/arm/mach-pxa/tosa.c
+++ b/arch/arm/mach-pxa/tosa.c
@@ -905,7 +905,7 @@
 
 static void tosa_poweroff(void)
 {
-	arm_machine_restart('g', NULL);
+	pxa_restart('g', NULL);
 }
 
 static void tosa_restart(char mode, const char *cmd)
@@ -935,7 +935,6 @@
 	init_gpio_reset(TOSA_GPIO_ON_RESET, 0, 0);
 
 	pm_power_off = tosa_poweroff;
-	arm_pm_restart = tosa_restart;
 
 	PCFR |= PCFR_OPDE;
 
@@ -970,6 +969,7 @@
 }
 
 MACHINE_START(TOSA, "SHARP Tosa")
+	.restart_mode	= 'g',
 	.fixup          = fixup_tosa,
 	.map_io         = pxa25x_map_io,
 	.nr_irqs	= TOSA_NR_IRQS,
@@ -977,4 +977,5 @@
 	.handle_irq       = pxa25x_handle_irq,
 	.init_machine   = tosa_init,
 	.timer          = &pxa_timer,
+	.restart	= tosa_restart,
 MACHINE_END
diff --git a/arch/arm/mach-pxa/trizeps4.c b/arch/arm/mach-pxa/trizeps4.c
index 1aaed2b..0f30af6 100644
--- a/arch/arm/mach-pxa/trizeps4.c
+++ b/arch/arm/mach-pxa/trizeps4.c
@@ -561,6 +561,7 @@
 	.init_irq	= pxa27x_init_irq,
 	.handle_irq	= pxa27x_handle_irq,
 	.timer		= &pxa_timer,
+	.restart	= pxa_restart,
 MACHINE_END
 
 MACHINE_START(TRIZEPS4WL, "Keith und Koep Trizeps IV-WL module")
@@ -571,4 +572,5 @@
 	.init_irq	= pxa27x_init_irq,
 	.handle_irq	= pxa27x_handle_irq,
 	.timer		= &pxa_timer,
+	.restart	= pxa_restart,
 MACHINE_END
diff --git a/arch/arm/mach-pxa/viper.c b/arch/arm/mach-pxa/viper.c
index 242ddae..afe2b74 100644
--- a/arch/arm/mach-pxa/viper.c
+++ b/arch/arm/mach-pxa/viper.c
@@ -998,4 +998,5 @@
 	.handle_irq	= pxa25x_handle_irq,
 	.timer          = &pxa_timer,
 	.init_machine	= viper_init,
+	.restart	= pxa_restart,
 MACHINE_END
diff --git a/arch/arm/mach-pxa/vpac270.c b/arch/arm/mach-pxa/vpac270.c
index ca0c661..fed5fb0 100644
--- a/arch/arm/mach-pxa/vpac270.c
+++ b/arch/arm/mach-pxa/vpac270.c
@@ -721,5 +721,6 @@
 	.init_irq	= pxa27x_init_irq,
 	.handle_irq	= pxa27x_handle_irq,
 	.timer		= &pxa_timer,
-	.init_machine	= vpac270_init
+	.init_machine	= vpac270_init,
+	.restart	= pxa_restart,
 MACHINE_END
diff --git a/arch/arm/mach-pxa/xcep.c b/arch/arm/mach-pxa/xcep.c
index 70e1730..4bbe9a3 100644
--- a/arch/arm/mach-pxa/xcep.c
+++ b/arch/arm/mach-pxa/xcep.c
@@ -185,5 +185,6 @@
 	.init_irq	= pxa25x_init_irq,
 	.handle_irq	= pxa25x_handle_irq,
 	.timer		= &pxa_timer,
+	.restart	= pxa_restart,
 MACHINE_END
 
diff --git a/arch/arm/mach-pxa/z2.c b/arch/arm/mach-pxa/z2.c
index ead32c9..d75f66a 100644
--- a/arch/arm/mach-pxa/z2.c
+++ b/arch/arm/mach-pxa/z2.c
@@ -725,4 +725,5 @@
 	.handle_irq	= pxa27x_handle_irq,
 	.timer		= &pxa_timer,
 	.init_machine	= z2_init,
+	.restart	= pxa_restart,
 MACHINE_END
diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c
index 498b83b..9db35a7 100644
--- a/arch/arm/mach-pxa/zeus.c
+++ b/arch/arm/mach-pxa/zeus.c
@@ -911,5 +911,6 @@
 	.handle_irq	= pxa27x_handle_irq,
 	.timer		= &pxa_timer,
 	.init_machine	= zeus_init,
+	.restart	= pxa_restart,
 MACHINE_END
 
diff --git a/arch/arm/mach-pxa/zylonite.c b/arch/arm/mach-pxa/zylonite.c
index 6c39c33..7678b1b 100644
--- a/arch/arm/mach-pxa/zylonite.c
+++ b/arch/arm/mach-pxa/zylonite.c
@@ -430,4 +430,5 @@
 	.handle_irq	= pxa3xx_handle_irq,
 	.timer		= &pxa_timer,
 	.init_machine	= zylonite_init,
+	.restart	= pxa_restart,
 MACHINE_END
diff --git a/arch/arm/mach-realview/Kconfig b/arch/arm/mach-realview/Kconfig
index dba6d0c..c593be4 100644
--- a/arch/arm/mach-realview/Kconfig
+++ b/arch/arm/mach-realview/Kconfig
@@ -12,6 +12,8 @@
 	bool "Support Multicore Cortex-A9 Tile"
 	depends on MACH_REALVIEW_EB
 	select CPU_V7
+	select HAVE_SMP
+	select MIGHT_HAVE_CACHE_L2X0
 	help
 	  Enable support for the Cortex-A9MPCore tile fitted to the
 	  Realview(R) Emulation Baseboard platform.
@@ -21,6 +23,8 @@
 	depends on MACH_REALVIEW_EB
 	select CPU_V6K
 	select ARCH_HAS_BARRIERS if SMP
+	select HAVE_SMP
+	select MIGHT_HAVE_CACHE_L2X0
 	help
 	  Enable support for the ARM11MPCore tile fitted to the Realview(R)
 	  Emulation Baseboard platform.
@@ -39,6 +43,8 @@
 	select CPU_V6K
 	select ARM_GIC
 	select HAVE_PATA_PLATFORM
+	select HAVE_SMP
+	select MIGHT_HAVE_CACHE_L2X0
 	select ARCH_HAS_BARRIERS if SMP
 	help
 	  Include support for the ARM(R) RealView(R) Platform Baseboard for
@@ -51,6 +57,7 @@
 	select CPU_V6
 	select ARM_GIC
 	select HAVE_TCM
+	select MIGHT_HAVE_CACHE_L2X0
 	help
 	  Include support for the ARM(R) RealView(R) Platform Baseboard for
 	  ARM1176JZF-S.
@@ -78,6 +85,8 @@
 	bool "Support RealView(R) Platform Baseboard Explore"
 	select ARM_GIC
 	select HAVE_PATA_PLATFORM
+	select HAVE_SMP
+	select MIGHT_HAVE_CACHE_L2X0
 	select ARCH_SPARSEMEM_ENABLE if CPU_V7 && !REALVIEW_HIGH_PHYS_OFFSET
 	select ZONE_DMA if SPARSEMEM
 	help
diff --git a/arch/arm/mach-realview/core.c b/arch/arm/mach-realview/core.c
index d5ed5d4..acd329a 100644
--- a/arch/arm/mach-realview/core.c
+++ b/arch/arm/mach-realview/core.c
@@ -21,7 +21,7 @@
 #include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/dma-mapping.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/interrupt.h>
 #include <linux/amba/bus.h>
 #include <linux/amba/clcd.h>
diff --git a/arch/arm/mach-realview/core.h b/arch/arm/mach-realview/core.h
index 47259c8..735b57a 100644
--- a/arch/arm/mach-realview/core.h
+++ b/arch/arm/mach-realview/core.h
@@ -65,6 +65,5 @@
 extern void realview_init_early(void);
 extern void realview_fixup(struct tag *tags, char **from,
 			   struct meminfo *meminfo);
-extern void (*realview_reset)(char);
 
 #endif
diff --git a/arch/arm/mach-realview/include/mach/entry-macro.S b/arch/arm/mach-realview/include/mach/entry-macro.S
index 4071164..e8a5179 100644
--- a/arch/arm/mach-realview/include/mach/entry-macro.S
+++ b/arch/arm/mach-realview/include/mach/entry-macro.S
@@ -7,8 +7,6 @@
  * License version 2. This program is licensed "as is" without any
  * warranty of any kind, whether express or implied.
  */
-#include <mach/hardware.h>
-#include <asm/hardware/entry-macro-gic.S>
 
 		.macro	disable_fiq
 		.endm
diff --git a/arch/arm/mach-realview/include/mach/system.h b/arch/arm/mach-realview/include/mach/system.h
index 6657ff23..471b671 100644
--- a/arch/arm/mach-realview/include/mach/system.h
+++ b/arch/arm/mach-realview/include/mach/system.h
@@ -21,12 +21,6 @@
 #ifndef __ASM_ARCH_SYSTEM_H
 #define __ASM_ARCH_SYSTEM_H
 
-#include <linux/io.h>
-#include <mach/hardware.h>
-#include <mach/platform.h>
-
-void (*realview_reset)(char mode);
-
 static inline void arch_idle(void)
 {
 	/*
@@ -36,15 +30,4 @@
 	cpu_do_idle();
 }
 
-static inline void arch_reset(char mode, const char *cmd)
-{
-	/*
-	 * To reset, we hit the on-board reset register
-	 * in the system FPGA
-	 */
-	if (realview_reset)
-		realview_reset(mode);
-	dsb();
-}
-
 #endif
diff --git a/arch/arm/mach-realview/include/mach/vmalloc.h b/arch/arm/mach-realview/include/mach/vmalloc.h
deleted file mode 100644
index a2a4c68..0000000
--- a/arch/arm/mach-realview/include/mach/vmalloc.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- *  arch/arm/mach-realview/include/mach/vmalloc.h
- *
- *  Copyright (C) 2003 ARM Limited
- *  Copyright (C) 2000 Russell King.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- */
-#define VMALLOC_END		0xf8000000UL
diff --git a/arch/arm/mach-realview/realview_eb.c b/arch/arm/mach-realview/realview_eb.c
index 026c66a..0069561 100644
--- a/arch/arm/mach-realview/realview_eb.c
+++ b/arch/arm/mach-realview/realview_eb.c
@@ -21,7 +21,7 @@
 
 #include <linux/init.h>
 #include <linux/platform_device.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/amba/bus.h>
 #include <linux/amba/pl061.h>
 #include <linux/amba/mmci.h>
@@ -91,8 +91,8 @@
 
 static struct map_desc realview_eb11mp_io_desc[] __initdata = {
 	{
-		.virtual	= IO_ADDRESS(REALVIEW_EB11MP_GIC_CPU_BASE),
-		.pfn		= __phys_to_pfn(REALVIEW_EB11MP_GIC_CPU_BASE),
+		.virtual	= IO_ADDRESS(REALVIEW_EB11MP_SCU_BASE),
+		.pfn		= __phys_to_pfn(REALVIEW_EB11MP_SCU_BASE),
 		.length		= SZ_4K,
 		.type		= MT_DEVICE,
 	}, {
@@ -415,7 +415,7 @@
 	.init		= realview_eb_timer_init,
 };
 
-static void realview_eb_reset(char mode)
+static void realview_eb_restart(char mode, const char *cmd)
 {
 	void __iomem *reset_ctrl = __io_address(REALVIEW_SYS_RESETCTL);
 	void __iomem *lock_ctrl = __io_address(REALVIEW_SYS_LOCK);
@@ -427,6 +427,7 @@
 	__raw_writel(REALVIEW_SYS_LOCK_VAL, lock_ctrl);
 	if (core_tile_eb11mp())
 		__raw_writel(0x0008, reset_ctrl);
+	dsb();
 }
 
 static void __init realview_eb_init(void)
@@ -458,7 +459,6 @@
 #ifdef CONFIG_LEDS
 	leds_event = realview_leds_event;
 #endif
-	realview_reset = realview_eb_reset;
 }
 
 MACHINE_START(REALVIEW_EB, "ARM-RealView EB")
@@ -469,8 +469,10 @@
 	.init_early	= realview_init_early,
 	.init_irq	= gic_init_irq,
 	.timer		= &realview_eb_timer,
+	.handle_irq	= gic_handle_irq,
 	.init_machine	= realview_eb_init,
 #ifdef CONFIG_ZONE_DMA
 	.dma_zone_size	= SZ_256M,
 #endif
+	.restart	= realview_eb_restart,
 MACHINE_END
diff --git a/arch/arm/mach-realview/realview_pb1176.c b/arch/arm/mach-realview/realview_pb1176.c
index c057540..8fe3955 100644
--- a/arch/arm/mach-realview/realview_pb1176.c
+++ b/arch/arm/mach-realview/realview_pb1176.c
@@ -21,7 +21,7 @@
 
 #include <linux/init.h>
 #include <linux/platform_device.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/amba/bus.h>
 #include <linux/amba/pl061.h>
 #include <linux/amba/mmci.h>
@@ -336,12 +336,13 @@
 	.init		= realview_pb1176_timer_init,
 };
 
-static void realview_pb1176_reset(char mode)
+static void realview_pb1176_restart(char mode, const char *cmd)
 {
 	void __iomem *reset_ctrl = __io_address(REALVIEW_SYS_RESETCTL);
 	void __iomem *lock_ctrl = __io_address(REALVIEW_SYS_LOCK);
 	__raw_writel(REALVIEW_SYS_LOCK_VAL, lock_ctrl);
 	__raw_writel(REALVIEW_PB1176_SYS_SOFT_RESET, reset_ctrl);
+	dsb();
 }
 
 static void realview_pb1176_fixup(struct tag *tags, char **from,
@@ -381,7 +382,6 @@
 #ifdef CONFIG_LEDS
 	leds_event = realview_leds_event;
 #endif
-	realview_reset = realview_pb1176_reset;
 }
 
 MACHINE_START(REALVIEW_PB1176, "ARM-RealView PB1176")
@@ -392,8 +392,10 @@
 	.init_early	= realview_init_early,
 	.init_irq	= gic_init_irq,
 	.timer		= &realview_pb1176_timer,
+	.handle_irq	= gic_handle_irq,
 	.init_machine	= realview_pb1176_init,
 #ifdef CONFIG_ZONE_DMA
 	.dma_zone_size	= SZ_256M,
 #endif
+	.restart	= realview_pb1176_restart,
 MACHINE_END
diff --git a/arch/arm/mach-realview/realview_pb11mp.c b/arch/arm/mach-realview/realview_pb11mp.c
index 671ad6d..34a2601 100644
--- a/arch/arm/mach-realview/realview_pb11mp.c
+++ b/arch/arm/mach-realview/realview_pb11mp.c
@@ -21,7 +21,7 @@
 
 #include <linux/init.h>
 #include <linux/platform_device.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/amba/bus.h>
 #include <linux/amba/pl061.h>
 #include <linux/amba/mmci.h>
@@ -315,7 +315,7 @@
 	.init		= realview_pb11mp_timer_init,
 };
 
-static void realview_pb11mp_reset(char mode)
+static void realview_pb11mp_restart(char mode, const char *cmd)
 {
 	void __iomem *reset_ctrl = __io_address(REALVIEW_SYS_RESETCTL);
 	void __iomem *lock_ctrl = __io_address(REALVIEW_SYS_LOCK);
@@ -327,6 +327,7 @@
 	__raw_writel(REALVIEW_SYS_LOCK_VAL, lock_ctrl);
 	__raw_writel(0x0000, reset_ctrl);
 	__raw_writel(0x0004, reset_ctrl);
+	dsb();
 }
 
 static void __init realview_pb11mp_init(void)
@@ -355,7 +356,6 @@
 #ifdef CONFIG_LEDS
 	leds_event = realview_leds_event;
 #endif
-	realview_reset = realview_pb11mp_reset;
 }
 
 MACHINE_START(REALVIEW_PB11MP, "ARM-RealView PB11MPCore")
@@ -366,8 +366,10 @@
 	.init_early	= realview_init_early,
 	.init_irq	= gic_init_irq,
 	.timer		= &realview_pb11mp_timer,
+	.handle_irq	= gic_handle_irq,
 	.init_machine	= realview_pb11mp_init,
 #ifdef CONFIG_ZONE_DMA
 	.dma_zone_size	= SZ_256M,
 #endif
+	.restart	= realview_pb11mp_restart,
 MACHINE_END
diff --git a/arch/arm/mach-realview/realview_pba8.c b/arch/arm/mach-realview/realview_pba8.c
index cbf22df..d26a6de 100644
--- a/arch/arm/mach-realview/realview_pba8.c
+++ b/arch/arm/mach-realview/realview_pba8.c
@@ -21,7 +21,7 @@
 
 #include <linux/init.h>
 #include <linux/platform_device.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/amba/bus.h>
 #include <linux/amba/pl061.h>
 #include <linux/amba/mmci.h>
@@ -271,7 +271,7 @@
 	.init		= realview_pba8_timer_init,
 };
 
-static void realview_pba8_reset(char mode)
+static void realview_pba8_restart(char mode, const char *cmd)
 {
 	void __iomem *reset_ctrl = __io_address(REALVIEW_SYS_RESETCTL);
 	void __iomem *lock_ctrl = __io_address(REALVIEW_SYS_LOCK);
@@ -283,6 +283,7 @@
 	__raw_writel(REALVIEW_SYS_LOCK_VAL, lock_ctrl);
 	__raw_writel(0x0000, reset_ctrl);
 	__raw_writel(0x0004, reset_ctrl);
+	dsb();
 }
 
 static void __init realview_pba8_init(void)
@@ -305,7 +306,6 @@
 #ifdef CONFIG_LEDS
 	leds_event = realview_leds_event;
 #endif
-	realview_reset = realview_pba8_reset;
 }
 
 MACHINE_START(REALVIEW_PBA8, "ARM-RealView PB-A8")
@@ -316,8 +316,10 @@
 	.init_early	= realview_init_early,
 	.init_irq	= gic_init_irq,
 	.timer		= &realview_pba8_timer,
+	.handle_irq	= gic_handle_irq,
 	.init_machine	= realview_pba8_init,
 #ifdef CONFIG_ZONE_DMA
 	.dma_zone_size	= SZ_256M,
 #endif
+	.restart	= realview_pba8_restart,
 MACHINE_END
diff --git a/arch/arm/mach-realview/realview_pbx.c b/arch/arm/mach-realview/realview_pbx.c
index 63c4114..a250fb4 100644
--- a/arch/arm/mach-realview/realview_pbx.c
+++ b/arch/arm/mach-realview/realview_pbx.c
@@ -20,7 +20,7 @@
 
 #include <linux/init.h>
 #include <linux/platform_device.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/amba/bus.h>
 #include <linux/amba/pl061.h>
 #include <linux/amba/mmci.h>
@@ -98,8 +98,8 @@
 
 static struct map_desc realview_local_io_desc[] __initdata = {
 	{
-		.virtual        = IO_ADDRESS(REALVIEW_PBX_TILE_GIC_CPU_BASE),
-		.pfn            = __phys_to_pfn(REALVIEW_PBX_TILE_GIC_CPU_BASE),
+		.virtual        = IO_ADDRESS(REALVIEW_PBX_TILE_SCU_BASE),
+		.pfn            = __phys_to_pfn(REALVIEW_PBX_TILE_SCU_BASE),
 		.length         = SZ_4K,
 		.type           = MT_DEVICE,
 	}, {
@@ -339,7 +339,7 @@
 #endif
 }
 
-static void realview_pbx_reset(char mode)
+static void realview_pbx_restart(char mode, const char *cmd)
 {
 	void __iomem *reset_ctrl = __io_address(REALVIEW_SYS_RESETCTL);
 	void __iomem *lock_ctrl = __io_address(REALVIEW_SYS_LOCK);
@@ -351,6 +351,7 @@
 	__raw_writel(REALVIEW_SYS_LOCK_VAL, lock_ctrl);
 	__raw_writel(0x00F0, reset_ctrl);
 	__raw_writel(0x00F4, reset_ctrl);
+	dsb();
 }
 
 static void __init realview_pbx_init(void)
@@ -388,7 +389,6 @@
 #ifdef CONFIG_LEDS
 	leds_event = realview_leds_event;
 #endif
-	realview_reset = realview_pbx_reset;
 }
 
 MACHINE_START(REALVIEW_PBX, "ARM-RealView PBX")
@@ -399,8 +399,10 @@
 	.init_early	= realview_init_early,
 	.init_irq	= gic_init_irq,
 	.timer		= &realview_pbx_timer,
+	.handle_irq	= gic_handle_irq,
 	.init_machine	= realview_pbx_init,
 #ifdef CONFIG_ZONE_DMA
 	.dma_zone_size	= SZ_256M,
 #endif
+	.restart	= realview_pbx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-rpc/include/mach/system.h b/arch/arm/mach-rpc/include/mach/system.h
index 45c7b93..359bab9 100644
--- a/arch/arm/mach-rpc/include/mach/system.h
+++ b/arch/arm/mach-rpc/include/mach/system.h
@@ -7,21 +7,7 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
-#include <linux/io.h>
-#include <mach/hardware.h>
-#include <asm/hardware/iomd.h>
-
 static inline void arch_idle(void)
 {
 	cpu_do_idle();
 }
-
-static inline void arch_reset(char mode, const char *cmd)
-{
-	iomd_writeb(0, IOMD_ROMCR0);
-
-	/*
-	 * Jump into the ROM
-	 */
-	cpu_reset(0);
-}
diff --git a/arch/arm/mach-rpc/include/mach/vmalloc.h b/arch/arm/mach-rpc/include/mach/vmalloc.h
deleted file mode 100644
index fb70022..0000000
--- a/arch/arm/mach-rpc/include/mach/vmalloc.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/*
- *  arch/arm/mach-rpc/include/mach/vmalloc.h
- *
- *  Copyright (C) 1997 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#define VMALLOC_END       0xdc000000UL
diff --git a/arch/arm/mach-rpc/riscpc.c b/arch/arm/mach-rpc/riscpc.c
index 8559598..3d44a59 100644
--- a/arch/arm/mach-rpc/riscpc.c
+++ b/arch/arm/mach-rpc/riscpc.c
@@ -24,6 +24,7 @@
 #include <asm/elf.h>
 #include <asm/mach-types.h>
 #include <mach/hardware.h>
+#include <asm/hardware/iomd.h>
 #include <asm/page.h>
 #include <asm/domain.h>
 #include <asm/setup.h>
@@ -214,6 +215,16 @@
 
 arch_initcall(rpc_init);
 
+static void rpc_restart(char mode, const char *cmd)
+{
+	iomd_writeb(0, IOMD_ROMCR0);
+
+	/*
+	 * Jump into the ROM
+	 */
+	soft_restart(0);
+}
+
 extern struct sys_timer ioc_timer;
 
 MACHINE_START(RISCPC, "Acorn-RiscPC")
@@ -224,4 +235,5 @@
 	.map_io		= rpc_map_io,
 	.init_irq	= rpc_init_irq,
 	.timer		= &ioc_timer,
+	.restart	= rpc_restart,
 MACHINE_END
diff --git a/arch/arm/mach-s3c2410/bast-irq.c b/arch/arm/mach-s3c2410/bast-irq.c
index bc53d2d..ac7b2ad 100644
--- a/arch/arm/mach-s3c2410/bast-irq.c
+++ b/arch/arm/mach-s3c2410/bast-irq.c
@@ -24,7 +24,7 @@
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/ioport.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/io.h>
 
 #include <asm/mach-types.h>
diff --git a/arch/arm/mach-s3c2410/common.h b/arch/arm/mach-s3c2410/common.h
new file mode 100644
index 0000000..f65dc80
--- /dev/null
+++ b/arch/arm/mach-s3c2410/common.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * Common Header for S3C2410 machines
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARCH_ARM_MACH_S3C2410_COMMON_H
+#define __ARCH_ARM_MACH_S3C2410_COMMON_H
+
+void s3c2410_restart(char mode, const char *cmd);
+
+#endif /* __ARCH_ARM_MACH_S3C2410_COMMON_H */
diff --git a/arch/arm/mach-s3c2410/cpu-freq.c b/arch/arm/mach-s3c2410/cpu-freq.c
index 75189df..7dc6c46 100644
--- a/arch/arm/mach-s3c2410/cpu-freq.c
+++ b/arch/arm/mach-s3c2410/cpu-freq.c
@@ -16,7 +16,7 @@
 #include <linux/interrupt.h>
 #include <linux/ioport.h>
 #include <linux/cpufreq.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/clk.h>
 #include <linux/err.h>
 #include <linux/io.h>
@@ -115,24 +115,25 @@
 	.debug_io_show	= s3c_cpufreq_debugfs_call(s3c2410_iotiming_debugfs),
 };
 
-static int s3c2410_cpufreq_add(struct sys_device *sysdev)
+static int s3c2410_cpufreq_add(struct device *dev)
 {
 	return s3c_cpufreq_register(&s3c2410_cpufreq_info);
 }
 
-static struct sysdev_driver s3c2410_cpufreq_driver = {
-	.add		= s3c2410_cpufreq_add,
+static struct subsys_interface s3c2410_cpufreq_interface = {
+	.name		= "s3c2410_cpufreq",
+	.subsys		= &s3c2410_subsys,
+	.add_dev	= s3c2410_cpufreq_add,
 };
 
 static int __init s3c2410_cpufreq_init(void)
 {
-	return sysdev_driver_register(&s3c2410_sysclass,
-				      &s3c2410_cpufreq_driver);
+	return subsys_interface_register(&s3c2410_cpufreq_interface);
 }
 
 arch_initcall(s3c2410_cpufreq_init);
 
-static int s3c2410a_cpufreq_add(struct sys_device *sysdev)
+static int s3c2410a_cpufreq_add(struct device *dev)
 {
 	/* alter the maximum freq settings for S3C2410A. If a board knows
 	 * it only has a maximum of 200, then it should register its own
@@ -143,17 +144,18 @@
 	s3c2410_cpufreq_info.max.pclk =  66500000;
 	s3c2410_cpufreq_info.name = "s3c2410a";
 
-	return s3c2410_cpufreq_add(sysdev);
+	return s3c2410_cpufreq_add(dev);
 }
 
-static struct sysdev_driver s3c2410a_cpufreq_driver = {
-	.add		= s3c2410a_cpufreq_add,
+static struct subsys_interface s3c2410a_cpufreq_interface = {
+	.name		= "s3c2410a_cpufreq",
+	.subsys		= &s3c2410a_subsys,
+	.add_dev	= s3c2410a_cpufreq_add,
 };
 
 static int __init s3c2410a_cpufreq_init(void)
 {
-	return sysdev_driver_register(&s3c2410a_sysclass,
-				      &s3c2410a_cpufreq_driver);
+	return subsys_interface_register(&s3c2410a_cpufreq_interface);
 }
 
 arch_initcall(s3c2410a_cpufreq_init);
diff --git a/arch/arm/mach-s3c2410/dma.c b/arch/arm/mach-s3c2410/dma.c
index dbe43df..2afd000 100644
--- a/arch/arm/mach-s3c2410/dma.c
+++ b/arch/arm/mach-s3c2410/dma.c
@@ -14,7 +14,7 @@
 
 #include <linux/kernel.h>
 #include <linux/init.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/serial_core.h>
 
 #include <mach/map.h>
@@ -132,7 +132,7 @@
 	},
 };
 
-static int __init s3c2410_dma_add(struct sys_device *sysdev)
+static int __init s3c2410_dma_add(struct device *dev)
 {
 	s3c2410_dma_init();
 	s3c24xx_dma_order_set(&s3c2410_dma_order);
@@ -140,24 +140,28 @@
 }
 
 #if defined(CONFIG_CPU_S3C2410)
-static struct sysdev_driver s3c2410_dma_driver = {
-	.add	= s3c2410_dma_add,
+static struct subsys_interface s3c2410_dma_interface = {
+	.name		= "s3c2410_dma",
+	.subsys		= &s3c2410_subsys,
+	.add_dev	= s3c2410_dma_add,
 };
 
 static int __init s3c2410_dma_drvinit(void)
 {
-	return sysdev_driver_register(&s3c2410_sysclass, &s3c2410_dma_driver);
+	return subsys_interface_register(&s3c2410_interface);
 }
 
 arch_initcall(s3c2410_dma_drvinit);
 
-static struct sysdev_driver s3c2410a_dma_driver = {
-	.add	= s3c2410_dma_add,
+static struct subsys_interface s3c2410a_dma_interface = {
+	.name		= "s3c2410a_dma",
+	.subsys		= &s3c2410a_subsys,
+	.add_dev	= s3c2410_dma_add,
 };
 
 static int __init s3c2410a_dma_drvinit(void)
 {
-	return sysdev_driver_register(&s3c2410a_sysclass, &s3c2410a_dma_driver);
+	return subsys_interface_register(&s3c2410a_dma_interface);
 }
 
 arch_initcall(s3c2410a_dma_drvinit);
@@ -165,13 +169,15 @@
 
 #if defined(CONFIG_CPU_S3C2442)
 /* S3C2442 DMA contains the same selection table as the S3C2410 */
-static struct sysdev_driver s3c2442_dma_driver = {
-	.add	= s3c2410_dma_add,
+static struct subsys_interface s3c2442_dma_interface = {
+	.name		= "s3c2442_dma",
+	.subsys		= &s3c2442_subsys,
+	.add_dev	= s3c2410_dma_add,
 };
 
 static int __init s3c2442_dma_drvinit(void)
 {
-	return sysdev_driver_register(&s3c2442_sysclass, &s3c2442_dma_driver);
+	return subsys_interface_register(&s3c2442_dma_interface);
 }
 
 arch_initcall(s3c2442_dma_drvinit);
diff --git a/arch/arm/mach-s3c2410/include/mach/dma.h b/arch/arm/mach-s3c2410/include/mach/dma.h
index ae8e482..acbdfec 100644
--- a/arch/arm/mach-s3c2410/include/mach/dma.h
+++ b/arch/arm/mach-s3c2410/include/mach/dma.h
@@ -13,7 +13,7 @@
 #ifndef __ASM_ARCH_DMA_H
 #define __ASM_ARCH_DMA_H __FILE__
 
-#include <linux/sysdev.h>
+#include <linux/device.h>
 
 #define MAX_DMA_TRANSFER_SIZE   0x100000 /* Data Unit is half word  */
 
@@ -202,7 +202,7 @@
 	struct s3c2410_dma_buf	*end;		/* end of queue */
 
 	/* system device */
-	struct sys_device	dev;
+	struct device	dev;
 };
 
 typedef unsigned long dma_device_t;
diff --git a/arch/arm/mach-s3c2410/include/mach/reset.h b/arch/arm/mach-s3c2410/include/mach/reset.h
deleted file mode 100644
index f8c9387..0000000
--- a/arch/arm/mach-s3c2410/include/mach/reset.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* arch/arm/mach-s3c2410/include/mach/reset.h
- *
- * Copyright (c) 2007 Simtec Electronics
- *	Ben Dooks <ben@simtec.co.uk>
- *	http://armlinux.simtec.co.uk/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * S3C2410 CPU reset controls
-*/
-
-#ifndef __ASM_ARCH_RESET_H
-#define __ASM_ARCH_RESET_H __FILE__
-
-/* This allows the over-ride of the default reset code
-*/
-
-extern void (*s3c24xx_reset_hook)(void);
-
-#endif /* __ASM_ARCH_RESET_H */
diff --git a/arch/arm/mach-s3c2410/include/mach/system-reset.h b/arch/arm/mach-s3c2410/include/mach/system-reset.h
deleted file mode 100644
index 6faadce..0000000
--- a/arch/arm/mach-s3c2410/include/mach/system-reset.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* arch/arm/mach-s3c2410/include/mach/system-reset.h
- *
- * Copyright (c) 2008 Simtec Electronics
- *	Ben Dooks <ben@simtec.co.uk>
- *
- * S3C2410 - System define for arch_reset() function
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <mach/hardware.h>
-#include <plat/watchdog-reset.h>
-
-extern void (*s3c24xx_reset_hook)(void);
-
-static void
-arch_reset(char mode, const char *cmd)
-{
-	if (mode == 's') {
-		cpu_reset(0);
-	}
-
-	if (s3c24xx_reset_hook)
-		s3c24xx_reset_hook();
-
-	arch_wdt_reset();
-
-	/* we'll take a jump through zero as a poor second */
-	cpu_reset(0);
-}
diff --git a/arch/arm/mach-s3c2410/include/mach/system.h b/arch/arm/mach-s3c2410/include/mach/system.h
index a8cbca6..5e215c1 100644
--- a/arch/arm/mach-s3c2410/include/mach/system.h
+++ b/arch/arm/mach-s3c2410/include/mach/system.h
@@ -15,12 +15,10 @@
 
 #include <mach/map.h>
 #include <mach/idle.h>
-#include <mach/reset.h>
 
 #include <mach/regs-clock.h>
 
 void (*s3c24xx_idle)(void);
-void (*s3c24xx_reset_hook)(void);
 
 void s3c24xx_default_idle(void)
 {
@@ -54,5 +52,3 @@
 	else
 		s3c24xx_default_idle();
 }
-
-#include <mach/system-reset.h>
diff --git a/arch/arm/mach-s3c2410/include/mach/vmalloc.h b/arch/arm/mach-s3c2410/include/mach/vmalloc.h
deleted file mode 100644
index 7a311e8..0000000
--- a/arch/arm/mach-s3c2410/include/mach/vmalloc.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* arch/arm/mach-s3c2410/include/mach/vmalloc.h
- *
- * from arch/arm/mach-iop3xx/include/mach/vmalloc.h
- *
- * Copyright (c) 2003 Simtec Electronics <linux@simtec.co.uk>
- *		      http://www.simtec.co.uk/products/SWLINUX/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * S3C2410 vmalloc definition
-*/
-
-#ifndef __ASM_ARCH_VMALLOC_H
-#define __ASM_ARCH_VMALLOC_H
-
-#define VMALLOC_END	0xF6000000UL
-
-#endif /* __ASM_ARCH_VMALLOC_H */
diff --git a/arch/arm/mach-s3c2410/mach-amlm5900.c b/arch/arm/mach-s3c2410/mach-amlm5900.c
index 7983894..4220cc6 100644
--- a/arch/arm/mach-s3c2410/mach-amlm5900.c
+++ b/arch/arm/mach-s3c2410/mach-amlm5900.c
@@ -63,6 +63,8 @@
 #include <linux/mtd/map.h>
 #include <linux/mtd/physmap.h>
 
+#include "common.h"
+
 static struct resource amlm5900_nor_resource = {
 		.start = 0x00000000,
 		.end   = 0x01000000 - 1,
@@ -241,4 +243,5 @@
 	.init_irq	= s3c24xx_init_irq,
 	.init_machine	= amlm5900_init,
 	.timer		= &s3c24xx_timer,
+	.restart	= s3c2410_restart,
 MACHINE_END
diff --git a/arch/arm/mach-s3c2410/mach-bast.c b/arch/arm/mach-s3c2410/mach-bast.c
index a20ae1a..c6133c6 100644
--- a/arch/arm/mach-s3c2410/mach-bast.c
+++ b/arch/arm/mach-s3c2410/mach-bast.c
@@ -66,6 +66,7 @@
 
 #include "usb-simtec.h"
 #include "nor-simtec.h"
+#include "common.h"
 
 #define COPYRIGHT ", Copyright 2004-2008 Simtec Electronics"
 
@@ -662,4 +663,5 @@
 	.init_irq	= s3c24xx_init_irq,
 	.init_machine	= bast_init,
 	.timer		= &s3c24xx_timer,
+	.restart	= s3c2410_restart,
 MACHINE_END
diff --git a/arch/arm/mach-s3c2410/mach-h1940.c b/arch/arm/mach-s3c2410/mach-h1940.c
index 05a7d16..41245a6 100644
--- a/arch/arm/mach-s3c2410/mach-h1940.c
+++ b/arch/arm/mach-s3c2410/mach-h1940.c
@@ -18,7 +18,7 @@
 #include <linux/memblock.h>
 #include <linux/timer.h>
 #include <linux/init.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/serial_core.h>
 #include <linux/platform_device.h>
 #include <linux/io.h>
@@ -70,6 +70,8 @@
 
 #include <sound/uda1380.h>
 
+#include "common.h"
+
 #define H1940_LATCH		((void __force __iomem *)0xF8000000)
 
 #define H1940_PA_LATCH		S3C2410_CS2
@@ -751,4 +753,5 @@
 	.init_irq	= h1940_init_irq,
 	.init_machine	= h1940_init,
 	.timer		= &s3c24xx_timer,
+	.restart	= s3c2410_restart,
 MACHINE_END
diff --git a/arch/arm/mach-s3c2410/mach-n30.c b/arch/arm/mach-s3c2410/mach-n30.c
index 1dc3e32..383d00c 100644
--- a/arch/arm/mach-s3c2410/mach-n30.c
+++ b/arch/arm/mach-s3c2410/mach-n30.c
@@ -51,6 +51,8 @@
 #include <plat/s3c2410.h>
 #include <plat/udc.h>
 
+#include "common.h"
+
 static struct map_desc n30_iodesc[] __initdata = {
 	/* nothing here yet */
 };
@@ -591,6 +593,7 @@
 	.init_machine	= n30_init,
 	.init_irq	= s3c24xx_init_irq,
 	.map_io		= n30_map_io,
+	.restart	= s3c2410_restart,
 MACHINE_END
 
 MACHINE_START(N35, "Acer-N35")
@@ -601,4 +604,5 @@
 	.init_machine	= n30_init,
 	.init_irq	= s3c24xx_init_irq,
 	.map_io		= n30_map_io,
+	.restart	= s3c2410_restart,
 MACHINE_END
diff --git a/arch/arm/mach-s3c2410/mach-otom.c b/arch/arm/mach-s3c2410/mach-otom.c
index f03f3fd..5f1e0ee 100644
--- a/arch/arm/mach-s3c2410/mach-otom.c
+++ b/arch/arm/mach-s3c2410/mach-otom.c
@@ -38,6 +38,8 @@
 #include <plat/iic.h>
 #include <plat/cpu.h>
 
+#include "common.h"
+
 static struct map_desc otom11_iodesc[] __initdata = {
   /* Device area */
 	{ (u32)OTOM_VA_CS8900A_BASE, OTOM_PA_CS8900A_BASE, SZ_16M, MT_DEVICE },
@@ -121,4 +123,5 @@
 	.init_machine	= otom11_init,
 	.init_irq	= s3c24xx_init_irq,
 	.timer		= &s3c24xx_timer,
+	.restart	= s3c2410_restart,
 MACHINE_END
diff --git a/arch/arm/mach-s3c2410/mach-qt2410.c b/arch/arm/mach-s3c2410/mach-qt2410.c
index 4518521..91c16d9 100644
--- a/arch/arm/mach-s3c2410/mach-qt2410.c
+++ b/arch/arm/mach-s3c2410/mach-qt2410.c
@@ -28,7 +28,7 @@
 #include <linux/timer.h>
 #include <linux/init.h>
 #include <linux/gpio.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/platform_device.h>
 #include <linux/serial_core.h>
 #include <linux/spi/spi.h>
@@ -62,6 +62,8 @@
 #include <plat/cpu.h>
 #include <plat/pm.h>
 
+#include "common.h"
+
 static struct map_desc qt2410_iodesc[] __initdata = {
 	{ 0xe0000000, __phys_to_pfn(S3C2410_CS3+0x01000000), SZ_1M, MT_DEVICE }
 };
@@ -350,6 +352,5 @@
 	.init_irq	= s3c24xx_init_irq,
 	.init_machine	= qt2410_machine_init,
 	.timer		= &s3c24xx_timer,
+	.restart	= s3c2410_restart,
 MACHINE_END
-
-
diff --git a/arch/arm/mach-s3c2410/mach-smdk2410.c b/arch/arm/mach-s3c2410/mach-smdk2410.c
index 99c9dfd..bdc27e7 100644
--- a/arch/arm/mach-s3c2410/mach-smdk2410.c
+++ b/arch/arm/mach-s3c2410/mach-smdk2410.c
@@ -54,6 +54,8 @@
 
 #include <plat/common-smdk.h>
 
+#include "common.h"
+
 static struct map_desc smdk2410_iodesc[] __initdata = {
   /* nothing here yet */
 };
@@ -116,6 +118,5 @@
 	.init_irq	= s3c24xx_init_irq,
 	.init_machine	= smdk2410_init,
 	.timer		= &s3c24xx_timer,
+	.restart	= s3c2410_restart,
 MACHINE_END
-
-
diff --git a/arch/arm/mach-s3c2410/mach-tct_hammer.c b/arch/arm/mach-s3c2410/mach-tct_hammer.c
index e0d0b6f..1114666f 100644
--- a/arch/arm/mach-s3c2410/mach-tct_hammer.c
+++ b/arch/arm/mach-s3c2410/mach-tct_hammer.c
@@ -54,6 +54,8 @@
 #include <linux/mtd/map.h>
 #include <linux/mtd/physmap.h>
 
+#include "common.h"
+
 static struct resource tct_hammer_nor_resource = {
 		.start = 0x00000000,
 		.end   = 0x01000000 - 1,
@@ -151,4 +153,5 @@
 	.init_irq	= s3c24xx_init_irq,
 	.init_machine	= tct_hammer_init,
 	.timer		= &s3c24xx_timer,
+	.restart	= s3c2410_restart,
 MACHINE_END
diff --git a/arch/arm/mach-s3c2410/mach-vr1000.c b/arch/arm/mach-s3c2410/mach-vr1000.c
index df47e8e..cc7032b 100644
--- a/arch/arm/mach-s3c2410/mach-vr1000.c
+++ b/arch/arm/mach-s3c2410/mach-vr1000.c
@@ -53,6 +53,7 @@
 
 #include "usb-simtec.h"
 #include "nor-simtec.h"
+#include "common.h"
 
 /* macros for virtual address mods for the io space entries */
 #define VA_C5(item) ((unsigned long)(item) + BAST_VAM_CS5)
@@ -405,4 +406,5 @@
 	.init_machine	= vr1000_init,
 	.init_irq	= s3c24xx_init_irq,
 	.timer		= &s3c24xx_timer,
+	.restart	= s3c2410_restart,
 MACHINE_END
diff --git a/arch/arm/mach-s3c2410/pll.c b/arch/arm/mach-s3c2410/pll.c
index 8338865..c07438b 100644
--- a/arch/arm/mach-s3c2410/pll.c
+++ b/arch/arm/mach-s3c2410/pll.c
@@ -25,7 +25,7 @@
 #include <linux/types.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/list.h>
 #include <linux/clk.h>
 #include <linux/err.h>
@@ -66,30 +66,34 @@
     { .frequency = 270000000, .index = PLLVAL(127, 1, 1),  },
 };
 
-static int s3c2410_plls_add(struct sys_device *dev)
+static int s3c2410_plls_add(struct device *dev)
 {
 	return s3c_plltab_register(pll_vals_12MHz, ARRAY_SIZE(pll_vals_12MHz));
 }
 
-static struct sysdev_driver s3c2410_plls_drv = {
-	.add	= s3c2410_plls_add,
+static struct subsys_interface s3c2410_plls_interface = {
+	.name		= "s3c2410_plls",
+	.subsys		= &s3c2410_subsys,
+	.add_dev	= s3c2410_plls_add,
 };
 
 static int __init s3c2410_pll_init(void)
 {
-	return sysdev_driver_register(&s3c2410_sysclass, &s3c2410_plls_drv);
+	return subsys_interface_register(&s3c2410_plls_interface);
 
 }
 
 arch_initcall(s3c2410_pll_init);
 
-static struct sysdev_driver s3c2410a_plls_drv = {
-	.add	= s3c2410_plls_add,
+static struct subsys_interface s3c2410a_plls_interface = {
+	.name		= "s3c2410a_plls",
+	.subsys		= &s3c2410a_subsys,
+	.add_dev	= s3c2410_plls_add,
 };
 
 static int __init s3c2410a_pll_init(void)
 {
-	return sysdev_driver_register(&s3c2410a_sysclass, &s3c2410a_plls_drv);
+	return subsys_interface_register(&s3c2410a_plls_interface);
 }
 
 arch_initcall(s3c2410a_pll_init);
diff --git a/arch/arm/mach-s3c2410/pm.c b/arch/arm/mach-s3c2410/pm.c
index 4728f9a..fda5385 100644
--- a/arch/arm/mach-s3c2410/pm.c
+++ b/arch/arm/mach-s3c2410/pm.c
@@ -24,7 +24,7 @@
 #include <linux/suspend.h>
 #include <linux/errno.h>
 #include <linux/time.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/syscore_ops.h>
 #include <linux/gpio.h>
 #include <linux/io.h>
@@ -111,7 +111,7 @@
 	.resume		= s3c2410_pm_resume,
 };
 
-static int s3c2410_pm_add(struct sys_device *dev)
+static int s3c2410_pm_add(struct device *dev)
 {
 	pm_cpu_prep = s3c2410_pm_prepare;
 	pm_cpu_sleep = s3c2410_cpu_suspend;
@@ -120,52 +120,60 @@
 }
 
 #if defined(CONFIG_CPU_S3C2410)
-static struct sysdev_driver s3c2410_pm_driver = {
-	.add		= s3c2410_pm_add,
+static struct subsys_interface s3c2410_pm_interface = {
+	.name		= "s3c2410_pm",
+	.subsys		= &s3c2410_subsys,
+	.add_dev	= s3c2410_pm_add,
 };
 
 /* register ourselves */
 
 static int __init s3c2410_pm_drvinit(void)
 {
-	return sysdev_driver_register(&s3c2410_sysclass, &s3c2410_pm_driver);
+	return subsys_interface_register(&s3c2410_pm_interface);
 }
 
 arch_initcall(s3c2410_pm_drvinit);
 
-static struct sysdev_driver s3c2410a_pm_driver = {
-	.add		= s3c2410_pm_add,
+static struct subsys_interface s3c2410a_pm_interface = {
+	.name		= "s3c2410a_pm",
+	.subsys		= &s3c2410a_subsys,
+	.add_dev	= s3c2410_pm_add,
 };
 
 static int __init s3c2410a_pm_drvinit(void)
 {
-	return sysdev_driver_register(&s3c2410a_sysclass, &s3c2410a_pm_driver);
+	return subsys_interface_register(&s3c2410a_pm_interface);
 }
 
 arch_initcall(s3c2410a_pm_drvinit);
 #endif
 
 #if defined(CONFIG_CPU_S3C2440)
-static struct sysdev_driver s3c2440_pm_driver = {
-	.add		= s3c2410_pm_add,
+static struct subsys_interface s3c2440_pm_interface = {
+	.name		= "s3c2440_pm",
+	.subsys		= &s3c2440_subsys,
+	.add_dev	= s3c2410_pm_add,
 };
 
 static int __init s3c2440_pm_drvinit(void)
 {
-	return sysdev_driver_register(&s3c2440_sysclass, &s3c2440_pm_driver);
+	return subsys_interface_register(&s3c2440_pm_interface);
 }
 
 arch_initcall(s3c2440_pm_drvinit);
 #endif
 
 #if defined(CONFIG_CPU_S3C2442)
-static struct sysdev_driver s3c2442_pm_driver = {
-	.add		= s3c2410_pm_add,
+static struct subsys_interface s3c2442_pm_interface = {
+	.name		= "s3c2442_pm",
+	.subsys		= &s3c2442_subsys,
+	.add_dev	= s3c2410_pm_add,
 };
 
 static int __init s3c2442_pm_drvinit(void)
 {
-	return sysdev_driver_register(&s3c2442_sysclass, &s3c2442_pm_driver);
+	return subsys_interface_register(&s3c2442_pm_interface);
 }
 
 arch_initcall(s3c2442_pm_drvinit);
diff --git a/arch/arm/mach-s3c2410/s3c2410.c b/arch/arm/mach-s3c2410/s3c2410.c
index 3d7ebc5..eea559e 100644
--- a/arch/arm/mach-s3c2410/s3c2410.c
+++ b/arch/arm/mach-s3c2410/s3c2410.c
@@ -18,7 +18,7 @@
 #include <linux/init.h>
 #include <linux/gpio.h>
 #include <linux/clk.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/syscore_ops.h>
 #include <linux/serial_core.h>
 #include <linux/platform_device.h>
@@ -42,6 +42,7 @@
 #include <plat/clock.h>
 #include <plat/pll.h>
 #include <plat/pm.h>
+#include <plat/watchdog-reset.h>
 
 #include <plat/gpio-core.h>
 #include <plat/gpio-cfg.h>
@@ -131,22 +132,24 @@
 	s3c24xx_register_clock(&s3c2410_armclk);
 }
 
-struct sysdev_class s3c2410_sysclass = {
+struct bus_type s3c2410_subsys = {
 	.name = "s3c2410-core",
+	.dev_name = "s3c2410-core",
 };
 
 /* Note, we would have liked to name this s3c2410-core, but we cannot
- * register two sysdev_class with the same name.
+ * register two subsystems with the same name.
  */
-struct sysdev_class s3c2410a_sysclass = {
+struct bus_type s3c2410a_subsys = {
 	.name = "s3c2410a-core",
+	.dev_name = "s3c2410a-core",
 };
 
-static struct sys_device s3c2410_sysdev = {
-	.cls		= &s3c2410_sysclass,
+static struct device s3c2410_dev = {
+	.bus		= &s3c2410_subsys,
 };
 
-/* need to register class before we actually register the device, and
+/* need to register the subsystem before we actually register the device, and
  * we also need to ensure that it has been initialised before any of the
  * drivers even try to use it (even if not on an s3c2410 based system)
  * as a driver which may support both 2410 and 2440 may try and use it.
@@ -154,14 +157,14 @@
 
 static int __init s3c2410_core_init(void)
 {
-	return sysdev_class_register(&s3c2410_sysclass);
+	return subsys_system_register(&s3c2410_subsys, NULL);
 }
 
 core_initcall(s3c2410_core_init);
 
 static int __init s3c2410a_core_init(void)
 {
-	return sysdev_class_register(&s3c2410a_sysclass);
+	return subsys_system_register(&s3c2410a_subsys, NULL);
 }
 
 core_initcall(s3c2410a_core_init);
@@ -175,11 +178,23 @@
 #endif
 	register_syscore_ops(&s3c24xx_irq_syscore_ops);
 
-	return sysdev_register(&s3c2410_sysdev);
+	return device_register(&s3c2410_dev);
 }
 
 int __init s3c2410a_init(void)
 {
-	s3c2410_sysdev.cls = &s3c2410a_sysclass;
+	s3c2410_dev.bus = &s3c2410a_subsys;
 	return s3c2410_init();
 }
+
+void s3c2410_restart(char mode, const char *cmd)
+{
+	if (mode == 's') {
+		soft_restart(0);
+	}
+
+	arch_wdt_reset();
+
+	/* we'll take a jump through zero as a poor second */
+	soft_restart(0);
+}
diff --git a/arch/arm/mach-s3c2412/clock.c b/arch/arm/mach-s3c2412/clock.c
index 140711d..5168816 100644
--- a/arch/arm/mach-s3c2412/clock.c
+++ b/arch/arm/mach-s3c2412/clock.c
@@ -26,7 +26,7 @@
 #include <linux/list.h>
 #include <linux/errno.h>
 #include <linux/err.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/clk.h>
 #include <linux/mutex.h>
 #include <linux/delay.h>
diff --git a/arch/arm/mach-s3c2412/cpu-freq.c b/arch/arm/mach-s3c2412/cpu-freq.c
index eb3ea17..d8664b7 100644
--- a/arch/arm/mach-s3c2412/cpu-freq.c
+++ b/arch/arm/mach-s3c2412/cpu-freq.c
@@ -16,7 +16,7 @@
 #include <linux/interrupt.h>
 #include <linux/ioport.h>
 #include <linux/cpufreq.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/delay.h>
 #include <linux/clk.h>
 #include <linux/err.h>
@@ -194,7 +194,7 @@
 	.debug_io_show  = s3c_cpufreq_debugfs_call(s3c2412_iotiming_debugfs),
 };
 
-static int s3c2412_cpufreq_add(struct sys_device *sysdev)
+static int s3c2412_cpufreq_add(struct device *dev)
 {
 	unsigned long fclk_rate;
 
@@ -244,14 +244,15 @@
 	return -ENOENT;
 }
 
-static struct sysdev_driver s3c2412_cpufreq_driver = {
-	.add		= s3c2412_cpufreq_add,
+static struct subsys_interface s3c2412_cpufreq_interface = {
+	.name		= "s3c2412_cpufreq",
+	.subsys		= &s3c2412_subsys,
+	.add_dev	= s3c2412_cpufreq_add,
 };
 
 static int s3c2412_cpufreq_init(void)
 {
-	return sysdev_driver_register(&s3c2412_sysclass,
-				      &s3c2412_cpufreq_driver);
+	return subsys_interface_register(&s3c2412_cpufreq_interface);
 }
 
 arch_initcall(s3c2412_cpufreq_init);
diff --git a/arch/arm/mach-s3c2412/dma.c b/arch/arm/mach-s3c2412/dma.c
index d2a7d5ef..142acd3 100644
--- a/arch/arm/mach-s3c2412/dma.c
+++ b/arch/arm/mach-s3c2412/dma.c
@@ -14,7 +14,7 @@
 
 #include <linux/kernel.h>
 #include <linux/init.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/serial_core.h>
 #include <linux/io.h>
 
@@ -159,19 +159,21 @@
 	.map_size	= ARRAY_SIZE(s3c2412_dma_mappings),
 };
 
-static int __init s3c2412_dma_add(struct sys_device *sysdev)
+static int __init s3c2412_dma_add(struct device *dev)
 {
 	s3c2410_dma_init();
 	return s3c24xx_dma_init_map(&s3c2412_dma_sel);
 }
 
-static struct sysdev_driver s3c2412_dma_driver = {
-	.add	= s3c2412_dma_add,
+static struct subsys_interface s3c2412_dma_interface = {
+	.name		= "s3c2412_dma",
+	.subsys		= &s3c2412_subsys,
+	.add_dev	= s3c2412_dma_add,
 };
 
 static int __init s3c2412_dma_init(void)
 {
-	return sysdev_driver_register(&s3c2412_sysclass, &s3c2412_dma_driver);
+	return subsys_interface_register(&s3c2412_dma_interface);
 }
 
 arch_initcall(s3c2412_dma_init);
diff --git a/arch/arm/mach-s3c2412/irq.c b/arch/arm/mach-s3c2412/irq.c
index 1a1aa22..a8a46c1 100644
--- a/arch/arm/mach-s3c2412/irq.c
+++ b/arch/arm/mach-s3c2412/irq.c
@@ -23,7 +23,7 @@
 #include <linux/module.h>
 #include <linux/interrupt.h>
 #include <linux/ioport.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/io.h>
 
 #include <mach/hardware.h>
@@ -170,7 +170,7 @@
 
 static struct irq_chip s3c2412_irq_rtc_chip;
 
-static int s3c2412_irq_add(struct sys_device *sysdev)
+static int s3c2412_irq_add(struct device *dev)
 {
 	unsigned int irqno;
 
@@ -200,13 +200,15 @@
 	return 0;
 }
 
-static struct sysdev_driver s3c2412_irq_driver = {
-	.add		= s3c2412_irq_add,
+static struct subsys_interface s3c2412_irq_interface = {
+	.name		= "s3c2412_irq",
+	.subsys		= &s3c2412_subsys,
+	.add_dev	= s3c2412_irq_add,
 };
 
 static int s3c2412_irq_init(void)
 {
-	return sysdev_driver_register(&s3c2412_sysclass, &s3c2412_irq_driver);
+	return subsys_interface_register(&s3c2412_irq_interface);
 }
 
 arch_initcall(s3c2412_irq_init);
diff --git a/arch/arm/mach-s3c2412/mach-jive.c b/arch/arm/mach-s3c2412/mach-jive.c
index 286ef17..ae73ba3 100644
--- a/arch/arm/mach-s3c2412/mach-jive.c
+++ b/arch/arm/mach-s3c2412/mach-jive.c
@@ -48,6 +48,7 @@
 #include <linux/mtd/nand_ecc.h>
 #include <linux/mtd/partitions.h>
 
+#include <plat/s3c2412.h>
 #include <plat/gpio-cfg.h>
 #include <plat/clock.h>
 #include <plat/devs.h>
@@ -661,4 +662,5 @@
 	.map_io		= jive_map_io,
 	.init_machine	= jive_machine_init,
 	.timer		= &s3c24xx_timer,
+	.restart	= s3c2412_restart,
 MACHINE_END
diff --git a/arch/arm/mach-s3c2412/mach-smdk2413.c b/arch/arm/mach-s3c2412/mach-smdk2413.c
index f1eec1b..b11451b 100644
--- a/arch/arm/mach-s3c2412/mach-smdk2413.c
+++ b/arch/arm/mach-s3c2412/mach-smdk2413.c
@@ -134,6 +134,7 @@
 	.map_io		= smdk2413_map_io,
 	.init_machine	= smdk2413_machine_init,
 	.timer		= &s3c24xx_timer,
+	.restart	= s3c2412_restart,
 MACHINE_END
 
 MACHINE_START(SMDK2412, "SMDK2412")
@@ -145,6 +146,7 @@
 	.map_io		= smdk2413_map_io,
 	.init_machine	= smdk2413_machine_init,
 	.timer		= &s3c24xx_timer,
+	.restart	= s3c2412_restart,
 MACHINE_END
 
 MACHINE_START(SMDK2413, "SMDK2413")
@@ -156,4 +158,5 @@
 	.map_io		= smdk2413_map_io,
 	.init_machine	= smdk2413_machine_init,
 	.timer		= &s3c24xx_timer,
+	.restart	= s3c2412_restart,
 MACHINE_END
diff --git a/arch/arm/mach-s3c2412/mach-vstms.c b/arch/arm/mach-s3c2412/mach-vstms.c
index 1bbb1ef..94bfaa1 100644
--- a/arch/arm/mach-s3c2412/mach-vstms.c
+++ b/arch/arm/mach-s3c2412/mach-vstms.c
@@ -162,4 +162,5 @@
 	.init_machine	= vstms_init,
 	.map_io		= vstms_map_io,
 	.timer		= &s3c24xx_timer,
+	.restart	= s3c2412_restart,
 MACHINE_END
diff --git a/arch/arm/mach-s3c2412/pm.c b/arch/arm/mach-s3c2412/pm.c
index f4077ef..d1adfa6 100644
--- a/arch/arm/mach-s3c2412/pm.c
+++ b/arch/arm/mach-s3c2412/pm.c
@@ -16,7 +16,7 @@
 #include <linux/list.h>
 #include <linux/timer.h>
 #include <linux/init.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/syscore_ops.h>
 #include <linux/platform_device.h>
 #include <linux/io.h>
@@ -56,7 +56,7 @@
 {
 }
 
-static int s3c2412_pm_add(struct sys_device *sysdev)
+static int s3c2412_pm_add(struct device *dev)
 {
 	pm_cpu_prep = s3c2412_pm_prepare;
 	pm_cpu_sleep = s3c2412_cpu_suspend;
@@ -87,13 +87,15 @@
 	SAVE_ITEM(S3C2413_GPJSLPCON),
 };
 
-static struct sysdev_driver s3c2412_pm_driver = {
-	.add		= s3c2412_pm_add,
+static struct subsys_interface s3c2412_pm_interface = {
+	.name		= "s3c2412_pm",
+	.subsys		= &s3c2412_subsys,
+	.add_dev	= s3c2412_pm_add,
 };
 
 static __init int s3c2412_pm_init(void)
 {
-	return sysdev_driver_register(&s3c2412_sysclass, &s3c2412_pm_driver);
+	return subsys_interface_register(&s3c2412_pm_interface);
 }
 
 arch_initcall(s3c2412_pm_init);
diff --git a/arch/arm/mach-s3c2412/s3c2412.c b/arch/arm/mach-s3c2412/s3c2412.c
index 57a1e01..aff6e85 100644
--- a/arch/arm/mach-s3c2412/s3c2412.c
+++ b/arch/arm/mach-s3c2412/s3c2412.c
@@ -18,7 +18,7 @@
 #include <linux/init.h>
 #include <linux/clk.h>
 #include <linux/delay.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/syscore_ops.h>
 #include <linux/serial_core.h>
 #include <linux/platform_device.h>
@@ -32,7 +32,6 @@
 #include <asm/proc-fns.h>
 #include <asm/irq.h>
 
-#include <mach/reset.h>
 #include <mach/idle.h>
 
 #include <plat/cpu-freq.h>
@@ -131,8 +130,11 @@
 	cpu_do_idle();
 }
 
-static void s3c2412_hard_reset(void)
+void s3c2412_restart(char mode, const char *cmd)
 {
+	if (mode == 's')
+		soft_restart(0);
+
 	/* errata "Watch-dog/Software Reset Problem" specifies that
 	 * this reset must be done with the SYSCLK sourced from
 	 * EXTCLK instead of FOUT to avoid a glitch in the reset
@@ -164,10 +166,6 @@
 
 	s3c24xx_idle = s3c2412_idle;
 
-	/* set custom reset hook */
-
-	s3c24xx_reset_hook = s3c2412_hard_reset;
-
 	/* register our io-tables */
 
 	iotable_init(s3c2412_iodesc, ARRAY_SIZE(s3c2412_iodesc));
@@ -220,25 +218,26 @@
 	s3c2412_baseclk_add();
 }
 
-/* need to register class before we actually register the device, and
+/* need to register the subsystem before we actually register the device, and
  * we also need to ensure that it has been initialised before any of the
  * drivers even try to use it (even if not on an s3c2412 based system)
  * as a driver which may support both 2410 and 2440 may try and use it.
 */
 
-struct sysdev_class s3c2412_sysclass = {
+struct bus_type s3c2412_subsys = {
 	.name = "s3c2412-core",
+	.dev_name = "s3c2412-core",
 };
 
 static int __init s3c2412_core_init(void)
 {
-	return sysdev_class_register(&s3c2412_sysclass);
+	return subsys_system_register(&s3c2412_subsys, NULL);
 }
 
 core_initcall(s3c2412_core_init);
 
-static struct sys_device s3c2412_sysdev = {
-	.cls		= &s3c2412_sysclass,
+static struct device s3c2412_dev = {
+	.bus		= &s3c2412_subsys,
 };
 
 int __init s3c2412_init(void)
@@ -250,5 +249,5 @@
 #endif
 	register_syscore_ops(&s3c24xx_irq_syscore_ops);
 
-	return sysdev_register(&s3c2412_sysdev);
+	return device_register(&s3c2412_dev);
 }
diff --git a/arch/arm/mach-s3c2416/irq.c b/arch/arm/mach-s3c2416/irq.c
index 28ad20d..36df761 100644
--- a/arch/arm/mach-s3c2416/irq.c
+++ b/arch/arm/mach-s3c2416/irq.c
@@ -25,7 +25,7 @@
 #include <linux/module.h>
 #include <linux/interrupt.h>
 #include <linux/ioport.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/io.h>
 
 #include <mach/hardware.h>
@@ -213,7 +213,7 @@
 	return 0;
 }
 
-static int __init s3c2416_irq_add(struct sys_device *sysdev)
+static int __init s3c2416_irq_add(struct device *dev)
 {
 	printk(KERN_INFO "S3C2416: IRQ Support\n");
 
@@ -234,13 +234,15 @@
 	return 0;
 }
 
-static struct sysdev_driver s3c2416_irq_driver = {
-	.add		= s3c2416_irq_add,
+static struct subsys_interface s3c2416_irq_interface = {
+	.name		= "s3c2416_irq",
+	.subsys		= &s3c2416_subsys,
+	.add_dev	= s3c2416_irq_add,
 };
 
 static int __init s3c2416_irq_init(void)
 {
-	return sysdev_driver_register(&s3c2416_sysclass, &s3c2416_irq_driver);
+	return subsys_interface_register(&s3c2416_irq_interface);
 }
 
 arch_initcall(s3c2416_irq_init);
diff --git a/arch/arm/mach-s3c2416/mach-smdk2416.c b/arch/arm/mach-s3c2416/mach-smdk2416.c
index 6345bcb..eebe1e7 100644
--- a/arch/arm/mach-s3c2416/mach-smdk2416.c
+++ b/arch/arm/mach-s3c2416/mach-smdk2416.c
@@ -252,4 +252,5 @@
 	.map_io		= smdk2416_map_io,
 	.init_machine	= smdk2416_machine_init,
 	.timer		= &s3c24xx_timer,
+	.restart	= s3c2416_restart,
 MACHINE_END
diff --git a/arch/arm/mach-s3c2416/pm.c b/arch/arm/mach-s3c2416/pm.c
index 9ec54f1..3bdb15a 100644
--- a/arch/arm/mach-s3c2416/pm.c
+++ b/arch/arm/mach-s3c2416/pm.c
@@ -10,7 +10,7 @@
  * published by the Free Software Foundation.
 */
 
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/syscore_ops.h>
 #include <linux/io.h>
 
@@ -48,7 +48,7 @@
 	__raw_writel(virt_to_phys(s3c_cpu_resume), S3C2412_INFORM1);
 }
 
-static int s3c2416_pm_add(struct sys_device *sysdev)
+static int s3c2416_pm_add(struct device *dev)
 {
 	pm_cpu_prep = s3c2416_pm_prepare;
 	pm_cpu_sleep = s3c2416_cpu_suspend;
@@ -56,13 +56,15 @@
 	return 0;
 }
 
-static struct sysdev_driver s3c2416_pm_driver = {
-	.add		= s3c2416_pm_add,
+static struct subsys_interface s3c2416_pm_interface = {
+	.name		= "s3c2416_pm",
+	.subsys		= &s3c2416_subsys,
+	.add_dev	= s3c2416_pm_add,
 };
 
 static __init int s3c2416_pm_init(void)
 {
-	return sysdev_driver_register(&s3c2416_sysclass, &s3c2416_pm_driver);
+	return subsys_interface_register(&s3c2416_pm_interface);
 }
 
 arch_initcall(s3c2416_pm_init);
diff --git a/arch/arm/mach-s3c2416/s3c2416.c b/arch/arm/mach-s3c2416/s3c2416.c
index ee214bc..5287d28 100644
--- a/arch/arm/mach-s3c2416/s3c2416.c
+++ b/arch/arm/mach-s3c2416/s3c2416.c
@@ -31,7 +31,7 @@
 #include <linux/gpio.h>
 #include <linux/platform_device.h>
 #include <linux/serial_core.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/syscore_ops.h>
 #include <linux/clk.h>
 #include <linux/io.h>
@@ -44,7 +44,6 @@
 #include <asm/proc-fns.h>
 #include <asm/irq.h>
 
-#include <mach/reset.h>
 #include <mach/idle.h>
 #include <mach/regs-s3c2443-clock.h>
 
@@ -68,16 +67,20 @@
 	IODESC_ENT(TIMER),
 };
 
-struct sysdev_class s3c2416_sysclass = {
+struct bus_type s3c2416_subsys = {
 	.name = "s3c2416-core",
+	.dev_name = "s3c2416-core",
 };
 
-static struct sys_device s3c2416_sysdev = {
-	.cls		= &s3c2416_sysclass,
+static struct device s3c2416_dev = {
+	.bus		= &s3c2416_subsys,
 };
 
-static void s3c2416_hard_reset(void)
+void s3c2416_restart(char mode, const char *cmd)
 {
+	if (mode == 's')
+		soft_restart(0);
+
 	__raw_writel(S3C2443_SWRST_RESET, S3C2443_SWRST);
 }
 
@@ -85,7 +88,6 @@
 {
 	printk(KERN_INFO "S3C2416: Initializing architecture\n");
 
-	s3c24xx_reset_hook = s3c2416_hard_reset;
 	/* s3c24xx_idle = s3c2416_idle;	*/
 
 	/* change WDT IRQ number */
@@ -105,7 +107,7 @@
 #endif
 	register_syscore_ops(&s3c24xx_irq_syscore_ops);
 
-	return sysdev_register(&s3c2416_sysdev);
+	return device_register(&s3c2416_dev);
 }
 
 void __init s3c2416_init_uarts(struct s3c2410_uartcfg *cfg, int no)
@@ -133,7 +135,7 @@
 	iotable_init(s3c2416_iodesc, ARRAY_SIZE(s3c2416_iodesc));
 }
 
-/* need to register class before we actually register the device, and
+/* need to register the subsystem before we actually register the device, and
  * we also need to ensure that it has been initialised before any of the
  * drivers even try to use it (even if not on an s3c2416 based system)
  * as a driver which may support both 2443 and 2440 may try and use it.
@@ -141,7 +143,7 @@
 
 static int __init s3c2416_core_init(void)
 {
-	return sysdev_class_register(&s3c2416_sysclass);
+	return subsys_system_register(&s3c2416_subsys, NULL);
 }
 
 core_initcall(s3c2416_core_init);
diff --git a/arch/arm/mach-s3c2440/clock.c b/arch/arm/mach-s3c2440/clock.c
index f9e6bda..d895759 100644
--- a/arch/arm/mach-s3c2440/clock.c
+++ b/arch/arm/mach-s3c2440/clock.c
@@ -28,7 +28,6 @@
 #include <linux/errno.h>
 #include <linux/err.h>
 #include <linux/device.h>
-#include <linux/sysdev.h>
 #include <linux/interrupt.h>
 #include <linux/ioport.h>
 #include <linux/mutex.h>
@@ -108,7 +107,7 @@
 	.ctrlbit	= S3C2440_CLKCON_CAMERA,
 };
 
-static int s3c2440_clk_add(struct sys_device *sysdev)
+static int s3c2440_clk_add(struct device *dev)
 {
 	struct clk *clock_upll;
 	struct clk *clock_h;
@@ -137,13 +136,15 @@
 	return 0;
 }
 
-static struct sysdev_driver s3c2440_clk_driver = {
-	.add	= s3c2440_clk_add,
+static struct subsys_interface s3c2440_clk_interface = {
+	.name		= "s3c2440_clk",
+	.subsys		= &s3c2440_subsys,
+	.add_dev	= s3c2440_clk_add,
 };
 
-static __init int s3c24xx_clk_driver(void)
+static __init int s3c24xx_clk_init(void)
 {
-	return sysdev_driver_register(&s3c2440_sysclass, &s3c2440_clk_driver);
+	return subsys_interface_register(&s3c2440_clk_interface);
 }
 
-arch_initcall(s3c24xx_clk_driver);
+arch_initcall(s3c24xx_clk_init);
diff --git a/arch/arm/mach-s3c2440/common.h b/arch/arm/mach-s3c2440/common.h
new file mode 100644
index 0000000..db8a98a
--- /dev/null
+++ b/arch/arm/mach-s3c2440/common.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * Common Header for S3C2440 machines
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARCH_ARM_MACH_S3C2440_COMMON_H
+#define __ARCH_ARM_MACH_S3C2440_COMMON_H
+
+void s3c2440_restart(char mode, const char *cmd);
+
+#endif /* __ARCH_ARM_MACH_S3C2440_COMMON_H */
diff --git a/arch/arm/mach-s3c2440/dma.c b/arch/arm/mach-s3c2440/dma.c
index 0e73f8f..15b1ddf 100644
--- a/arch/arm/mach-s3c2440/dma.c
+++ b/arch/arm/mach-s3c2440/dma.c
@@ -14,7 +14,7 @@
 
 #include <linux/kernel.h>
 #include <linux/init.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/serial_core.h>
 
 #include <mach/map.h>
@@ -174,20 +174,22 @@
 	},
 };
 
-static int __init s3c2440_dma_add(struct sys_device *sysdev)
+static int __init s3c2440_dma_add(struct device *dev)
 {
 	s3c2410_dma_init();
 	s3c24xx_dma_order_set(&s3c2440_dma_order);
 	return s3c24xx_dma_init_map(&s3c2440_dma_sel);
 }
 
-static struct sysdev_driver s3c2440_dma_driver = {
-	.add	= s3c2440_dma_add,
+static struct subsys_interface s3c2440_dma_interface = {
+	.name		= "s3c2440_dma",
+	.subsys		= &s3c2440_subsys,
+	.add_dev	= s3c2440_dma_add,
 };
 
 static int __init s3c2440_dma_init(void)
 {
-	return sysdev_driver_register(&s3c2440_sysclass, &s3c2440_dma_driver);
+	return subsys_interface_register(&s3c2440_dma_interface);
 }
 
 arch_initcall(s3c2440_dma_init);
diff --git a/arch/arm/mach-s3c2440/irq.c b/arch/arm/mach-s3c2440/irq.c
index eb1cc0f..4fee9bc 100644
--- a/arch/arm/mach-s3c2440/irq.c
+++ b/arch/arm/mach-s3c2440/irq.c
@@ -23,7 +23,7 @@
 #include <linux/module.h>
 #include <linux/interrupt.h>
 #include <linux/ioport.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/io.h>
 
 #include <mach/hardware.h>
@@ -92,7 +92,7 @@
 	.irq_ack	= s3c_irq_wdtac97_ack,
 };
 
-static int s3c2440_irq_add(struct sys_device *sysdev)
+static int s3c2440_irq_add(struct device *dev)
 {
 	unsigned int irqno;
 
@@ -113,13 +113,15 @@
 	return 0;
 }
 
-static struct sysdev_driver s3c2440_irq_driver = {
-	.add		= s3c2440_irq_add,
+static struct subsys_interface s3c2440_irq_interface = {
+	.name		= "s3c2440_irq",
+	.subsys		= &s3c2440_subsys,
+	.add_dev	= s3c2440_irq_add,
 };
 
 static int s3c2440_irq_init(void)
 {
-	return sysdev_driver_register(&s3c2440_sysclass, &s3c2440_irq_driver);
+	return subsys_interface_register(&s3c2440_irq_interface);
 }
 
 arch_initcall(s3c2440_irq_init);
diff --git a/arch/arm/mach-s3c2440/mach-anubis.c b/arch/arm/mach-s3c2440/mach-anubis.c
index 74f92fc..121ff8d 100644
--- a/arch/arm/mach-s3c2440/mach-anubis.c
+++ b/arch/arm/mach-s3c2440/mach-anubis.c
@@ -55,6 +55,8 @@
 #include <plat/cpu.h>
 #include <plat/audio-simtec.h>
 
+#include "common.h"
+
 #define COPYRIGHT ", Copyright 2005-2009 Simtec Electronics"
 
 static struct map_desc anubis_iodesc[] __initdata = {
@@ -503,4 +505,5 @@
 	.init_machine	= anubis_init,
 	.init_irq	= s3c24xx_init_irq,
 	.timer		= &s3c24xx_timer,
+	.restart	= s3c2440_restart,
 MACHINE_END
diff --git a/arch/arm/mach-s3c2440/mach-at2440evb.c b/arch/arm/mach-s3c2440/mach-at2440evb.c
index 38887ee..b7e334f 100644
--- a/arch/arm/mach-s3c2440/mach-at2440evb.c
+++ b/arch/arm/mach-s3c2440/mach-at2440evb.c
@@ -49,6 +49,8 @@
 #include <plat/cpu.h>
 #include <plat/mci.h>
 
+#include "common.h"
+
 static struct map_desc at2440evb_iodesc[] __initdata = {
 	/* Nothing here */
 };
@@ -238,4 +240,5 @@
 	.init_machine	= at2440evb_init,
 	.init_irq	= s3c24xx_init_irq,
 	.timer		= &s3c24xx_timer,
+	.restart	= s3c2440_restart,
 MACHINE_END
diff --git a/arch/arm/mach-s3c2440/mach-gta02.c b/arch/arm/mach-s3c2440/mach-gta02.c
index de1e0ff..5859e60 100644
--- a/arch/arm/mach-s3c2440/mach-gta02.c
+++ b/arch/arm/mach-s3c2440/mach-gta02.c
@@ -90,6 +90,7 @@
 #include <plat/iic.h>
 #include <plat/ts.h>
 
+#include "common.h"
 
 static struct pcf50633 *gta02_pcf;
 
@@ -600,4 +601,5 @@
 	.init_irq	= s3c24xx_init_irq,
 	.init_machine	= gta02_machine_init,
 	.timer		= &s3c24xx_timer,
+	.restart	= s3c2440_restart,
 MACHINE_END
diff --git a/arch/arm/mach-s3c2440/mach-mini2440.c b/arch/arm/mach-s3c2440/mach-mini2440.c
index 91fe0b4..437322f 100644
--- a/arch/arm/mach-s3c2440/mach-mini2440.c
+++ b/arch/arm/mach-s3c2440/mach-mini2440.c
@@ -60,6 +60,8 @@
 
 #include <sound/s3c24xx_uda134x.h>
 
+#include "common.h"
+
 #define MACH_MINI2440_DM9K_BASE (S3C2410_CS4 + 0x300)
 
 static struct map_desc mini2440_iodesc[] __initdata = {
@@ -681,4 +683,5 @@
 	.init_machine	= mini2440_init,
 	.init_irq	= s3c24xx_init_irq,
 	.timer		= &s3c24xx_timer,
+	.restart	= s3c2440_restart,
 MACHINE_END
diff --git a/arch/arm/mach-s3c2440/mach-nexcoder.c b/arch/arm/mach-s3c2440/mach-nexcoder.c
index 61c0bf1..40eaf84 100644
--- a/arch/arm/mach-s3c2440/mach-nexcoder.c
+++ b/arch/arm/mach-s3c2440/mach-nexcoder.c
@@ -47,6 +47,8 @@
 #include <plat/devs.h>
 #include <plat/cpu.h>
 
+#include "common.h"
+
 static struct map_desc nexcoder_iodesc[] __initdata = {
 	/* nothing here yet */
 };
@@ -156,4 +158,5 @@
 	.init_machine	= nexcoder_init,
 	.init_irq	= s3c24xx_init_irq,
 	.timer		= &s3c24xx_timer,
+	.restart	= s3c2440_restart,
 MACHINE_END
diff --git a/arch/arm/mach-s3c2440/mach-osiris.c b/arch/arm/mach-s3c2440/mach-osiris.c
index dc142eb..e795715 100644
--- a/arch/arm/mach-s3c2440/mach-osiris.c
+++ b/arch/arm/mach-s3c2440/mach-osiris.c
@@ -54,6 +54,8 @@
 #include <plat/devs.h>
 #include <plat/cpu.h>
 
+#include "common.h"
+
 /* onboard perihperal map */
 
 static struct map_desc osiris_iodesc[] __initdata = {
@@ -452,4 +454,5 @@
 	.init_irq	= s3c24xx_init_irq,
 	.init_machine	= osiris_init,
 	.timer		= &s3c24xx_timer,
+	.restart	= s3c2440_restart,
 MACHINE_END
diff --git a/arch/arm/mach-s3c2440/mach-rx1950.c b/arch/arm/mach-s3c2440/mach-rx1950.c
index 0d3453b..332d753 100644
--- a/arch/arm/mach-s3c2440/mach-rx1950.c
+++ b/arch/arm/mach-s3c2440/mach-rx1950.c
@@ -24,7 +24,7 @@
 #include <linux/serial_core.h>
 #include <linux/input.h>
 #include <linux/gpio_keys.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/pda_power.h>
 #include <linux/pwm_backlight.h>
 #include <linux/pwm.h>
@@ -62,6 +62,8 @@
 
 #include <sound/uda1380.h>
 
+#include "common.h"
+
 #define LCD_PWM_PERIOD 192960
 #define LCD_PWM_DUTY 127353
 
@@ -832,4 +834,5 @@
 	.init_irq = s3c24xx_init_irq,
 	.init_machine = rx1950_init_machine,
 	.timer = &s3c24xx_timer,
+	.restart	= s3c2440_restart,
 MACHINE_END
diff --git a/arch/arm/mach-s3c2440/mach-rx3715.c b/arch/arm/mach-s3c2440/mach-rx3715.c
index e19499c..80a0972 100644
--- a/arch/arm/mach-s3c2440/mach-rx3715.c
+++ b/arch/arm/mach-s3c2440/mach-rx3715.c
@@ -20,7 +20,7 @@
 #include <linux/init.h>
 #include <linux/tty.h>
 #include <linux/console.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/platform_device.h>
 #include <linux/serial_core.h>
 #include <linux/serial.h>
@@ -51,6 +51,8 @@
 #include <plat/cpu.h>
 #include <plat/pm.h>
 
+#include "common.h"
+
 static struct map_desc rx3715_iodesc[] __initdata = {
 	/* dump ISA space somewhere unused */
 
@@ -224,4 +226,5 @@
 	.init_irq	= rx3715_init_irq,
 	.init_machine	= rx3715_init_machine,
 	.timer		= &s3c24xx_timer,
+	.restart	= s3c2440_restart,
 MACHINE_END
diff --git a/arch/arm/mach-s3c2440/mach-smdk2440.c b/arch/arm/mach-s3c2440/mach-smdk2440.c
index 36eeb41..1deb60d 100644
--- a/arch/arm/mach-s3c2440/mach-smdk2440.c
+++ b/arch/arm/mach-s3c2440/mach-smdk2440.c
@@ -47,6 +47,8 @@
 
 #include <plat/common-smdk.h>
 
+#include "common.h"
+
 static struct map_desc smdk2440_iodesc[] __initdata = {
 	/* ISA IO Space map (memory space selected by A24) */
 
@@ -181,4 +183,5 @@
 	.map_io		= smdk2440_map_io,
 	.init_machine	= smdk2440_machine_init,
 	.timer		= &s3c24xx_timer,
+	.restart	= s3c2440_restart,
 MACHINE_END
diff --git a/arch/arm/mach-s3c2440/s3c2440-cpufreq.c b/arch/arm/mach-s3c2440/s3c2440-cpufreq.c
index 976002f..cf75966 100644
--- a/arch/arm/mach-s3c2440/s3c2440-cpufreq.c
+++ b/arch/arm/mach-s3c2440/s3c2440-cpufreq.c
@@ -17,7 +17,7 @@
 #include <linux/interrupt.h>
 #include <linux/ioport.h>
 #include <linux/cpufreq.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/delay.h>
 #include <linux/clk.h>
 #include <linux/err.h>
@@ -270,7 +270,7 @@
 	.debug_io_show  = s3c_cpufreq_debugfs_call(s3c2410_iotiming_debugfs),
 };
 
-static int s3c2440_cpufreq_add(struct sys_device *sysdev)
+static int s3c2440_cpufreq_add(struct device *dev)
 {
 	xtal = s3c_cpufreq_clk_get(NULL, "xtal");
 	hclk = s3c_cpufreq_clk_get(NULL, "hclk");
@@ -285,27 +285,29 @@
 	return s3c_cpufreq_register(&s3c2440_cpufreq_info);
 }
 
-static struct sysdev_driver s3c2440_cpufreq_driver = {
-	.add		= s3c2440_cpufreq_add,
+static struct subsys_interface s3c2440_cpufreq_interface = {
+	.name		= "s3c2440_cpufreq",
+	.subsys		= &s3c2440_subsys,
+	.add_dev	= s3c2440_cpufreq_add,
 };
 
 static int s3c2440_cpufreq_init(void)
 {
-	return sysdev_driver_register(&s3c2440_sysclass,
-				      &s3c2440_cpufreq_driver);
+	return subsys_interface_register(&s3c2440_cpufreq_interface);
 }
 
 /* arch_initcall adds the clocks we need, so use subsys_initcall. */
 subsys_initcall(s3c2440_cpufreq_init);
 
-static struct sysdev_driver s3c2442_cpufreq_driver = {
-	.add		= s3c2440_cpufreq_add,
+static struct subsys_interface s3c2442_cpufreq_interface = {
+	.name		= "s3c2442_cpufreq",
+	.subsys		= &s3c2442_subsys,
+	.add_dev	= s3c2440_cpufreq_add,
 };
 
 static int s3c2442_cpufreq_init(void)
 {
-	return sysdev_driver_register(&s3c2442_sysclass,
-				      &s3c2442_cpufreq_driver);
+	return subsys_interface_register(&s3c2442_cpufreq_interface);
 }
 
 subsys_initcall(s3c2442_cpufreq_init);
diff --git a/arch/arm/mach-s3c2440/s3c2440-pll-12000000.c b/arch/arm/mach-s3c2440/s3c2440-pll-12000000.c
index f105d5e..b5368ae 100644
--- a/arch/arm/mach-s3c2440/s3c2440-pll-12000000.c
+++ b/arch/arm/mach-s3c2440/s3c2440-pll-12000000.c
@@ -14,7 +14,7 @@
 
 #include <linux/types.h>
 #include <linux/kernel.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/clk.h>
 #include <linux/err.h>
 
@@ -51,7 +51,7 @@
 	{ .frequency = 400000000,	.index = PLLVAL(0x5c, 1, 1),  }, 	/* FVco 800.000000 */
 };
 
-static int s3c2440_plls12_add(struct sys_device *dev)
+static int s3c2440_plls12_add(struct device *dev)
 {
 	struct clk *xtal_clk;
 	unsigned long xtal;
@@ -72,25 +72,29 @@
 	return 0;
 }
 
-static struct sysdev_driver s3c2440_plls12_drv = {
-	.add	= s3c2440_plls12_add,
+static struct subsys_interface s3c2440_plls12_interface = {
+	.name		= "s3c2440_plls12",
+	.subsys		= &s3c2440_subsys,
+	.add_dev	= s3c2440_plls12_add,
 };
 
 static int __init s3c2440_pll_12mhz(void)
 {
-	return sysdev_driver_register(&s3c2440_sysclass, &s3c2440_plls12_drv);
+	return subsys_interface_register(&s3c2440_plls12_interface);
 
 }
 
 arch_initcall(s3c2440_pll_12mhz);
 
-static struct sysdev_driver s3c2442_plls12_drv = {
-	.add	= s3c2440_plls12_add,
+static struct subsys_interface s3c2442_plls12_interface = {
+	.name		= "s3c2442_plls12",
+	.subsys		= &s3c2442_subsys,
+	.add_dev	= s3c2440_plls12_add,
 };
 
 static int __init s3c2442_pll_12mhz(void)
 {
-	return sysdev_driver_register(&s3c2442_sysclass, &s3c2442_plls12_drv);
+	return subsys_interface_register(&s3c2442_plls12_interface);
 
 }
 
diff --git a/arch/arm/mach-s3c2440/s3c2440-pll-16934400.c b/arch/arm/mach-s3c2440/s3c2440-pll-16934400.c
index c8a8f90..42f2b5c 100644
--- a/arch/arm/mach-s3c2440/s3c2440-pll-16934400.c
+++ b/arch/arm/mach-s3c2440/s3c2440-pll-16934400.c
@@ -14,7 +14,7 @@
 
 #include <linux/types.h>
 #include <linux/kernel.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/clk.h>
 #include <linux/err.h>
 
@@ -79,7 +79,7 @@
 	{ .frequency = 402192000,	.index = PLLVAL(87, 2, 1), 	}, 	/* FVco 804.384000 */
 };
 
-static int s3c2440_plls169344_add(struct sys_device *dev)
+static int s3c2440_plls169344_add(struct device *dev)
 {
 	struct clk *xtal_clk;
 	unsigned long xtal;
@@ -100,28 +100,28 @@
 	return 0;
 }
 
-static struct sysdev_driver s3c2440_plls169344_drv = {
-	.add	= s3c2440_plls169344_add,
+static struct subsys_interface s3c2440_plls169344_interface = {
+	.name		= "s3c2440_plls169344",
+	.subsys		= &s3c2440_subsys,
+	.add_dev	= s3c2440_plls169344_add,
 };
 
 static int __init s3c2440_pll_16934400(void)
 {
-	return sysdev_driver_register(&s3c2440_sysclass,
-				      &s3c2440_plls169344_drv);
-
+	return subsys_interface_register(&s3c2440_plls169344_interface);
 }
 
 arch_initcall(s3c2440_pll_16934400);
 
-static struct sysdev_driver s3c2442_plls169344_drv = {
-	.add	= s3c2440_plls169344_add,
+static struct subsys_interface s3c2442_plls169344_interface = {
+	.name		= "s3c2442_plls169344",
+	.subsys		= &s3c2442_subsys,
+	.add_dev	= s3c2440_plls169344_add,
 };
 
 static int __init s3c2442_pll_16934400(void)
 {
-	return sysdev_driver_register(&s3c2442_sysclass,
-				      &s3c2442_plls169344_drv);
-
+	return subsys_interface_register(&s3c2442_plls169344_interface);
 }
 
 arch_initcall(s3c2442_pll_16934400);
diff --git a/arch/arm/mach-s3c2440/s3c2440.c b/arch/arm/mach-s3c2440/s3c2440.c
index 37f8cc6..517623a 100644
--- a/arch/arm/mach-s3c2440/s3c2440.c
+++ b/arch/arm/mach-s3c2440/s3c2440.c
@@ -18,7 +18,7 @@
 #include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/serial_core.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/syscore_ops.h>
 #include <linux/gpio.h>
 #include <linux/clk.h>
@@ -35,13 +35,14 @@
 #include <plat/cpu.h>
 #include <plat/s3c244x.h>
 #include <plat/pm.h>
+#include <plat/watchdog-reset.h>
 
 #include <plat/gpio-core.h>
 #include <plat/gpio-cfg.h>
 #include <plat/gpio-cfg-helpers.h>
 
-static struct sys_device s3c2440_sysdev = {
-	.cls		= &s3c2440_sysclass,
+static struct device s3c2440_dev = {
+	.bus		= &s3c2440_subsys,
 };
 
 int __init s3c2440_init(void)
@@ -63,7 +64,7 @@
 
 	/* register our system device for everything else */
 
-	return sysdev_register(&s3c2440_sysdev);
+	return device_register(&s3c2440_dev);
 }
 
 void __init s3c2440_map_io(void)
@@ -73,3 +74,15 @@
 	s3c24xx_gpiocfg_default.set_pull = s3c24xx_gpio_setpull_1up;
 	s3c24xx_gpiocfg_default.get_pull = s3c24xx_gpio_getpull_1up;
 }
+
+void s3c2440_restart(char mode, const char *cmd)
+{
+	if (mode == 's') {
+		soft_restart(0);
+	}
+
+	arch_wdt_reset();
+
+	/* we'll take a jump through zero as a poor second */
+	soft_restart(0);
+}
diff --git a/arch/arm/mach-s3c2440/s3c2442.c b/arch/arm/mach-s3c2440/s3c2442.c
index 2c822e0..8004e04 100644
--- a/arch/arm/mach-s3c2440/s3c2442.c
+++ b/arch/arm/mach-s3c2440/s3c2442.c
@@ -28,7 +28,6 @@
 #include <linux/errno.h>
 #include <linux/err.h>
 #include <linux/device.h>
-#include <linux/sysdev.h>
 #include <linux/syscore_ops.h>
 #include <linux/interrupt.h>
 #include <linux/ioport.h>
@@ -123,7 +122,7 @@
 	},
 };
 
-static int s3c2442_clk_add(struct sys_device *sysdev)
+static int s3c2442_clk_add(struct device *dev)
 {
 	struct clk *clock_upll;
 	struct clk *clock_h;
@@ -149,20 +148,22 @@
 	return 0;
 }
 
-static struct sysdev_driver s3c2442_clk_driver = {
-	.add	= s3c2442_clk_add,
+static struct subsys_interface s3c2442_clk_interface = {
+	.name		= "s3c2442_clk",
+	.subsys		= &s3c2442_subsys,
+	.add_dev	= s3c2442_clk_add,
 };
 
 static __init int s3c2442_clk_init(void)
 {
-	return sysdev_driver_register(&s3c2442_sysclass, &s3c2442_clk_driver);
+	return subsys_interface_register(&s3c2442_clk_interface);
 }
 
 arch_initcall(s3c2442_clk_init);
 
 
-static struct sys_device s3c2442_sysdev = {
-	.cls		= &s3c2442_sysclass,
+static struct device s3c2442_dev = {
+	.bus		= &s3c2442_subsys,
 };
 
 int __init s3c2442_init(void)
@@ -175,7 +176,7 @@
 	register_syscore_ops(&s3c244x_pm_syscore_ops);
 	register_syscore_ops(&s3c24xx_irq_syscore_ops);
 
-	return sysdev_register(&s3c2442_sysdev);
+	return device_register(&s3c2442_dev);
 }
 
 void __init s3c2442_map_io(void)
diff --git a/arch/arm/mach-s3c2440/s3c244x-clock.c b/arch/arm/mach-s3c2440/s3c244x-clock.c
index 7f5ea0a..b3fdbdd 100644
--- a/arch/arm/mach-s3c2440/s3c244x-clock.c
+++ b/arch/arm/mach-s3c2440/s3c244x-clock.c
@@ -28,7 +28,6 @@
 #include <linux/errno.h>
 #include <linux/err.h>
 #include <linux/device.h>
-#include <linux/sysdev.h>
 #include <linux/interrupt.h>
 #include <linux/ioport.h>
 #include <linux/clk.h>
@@ -73,7 +72,7 @@
 	},
 };
 
-static int s3c244x_clk_add(struct sys_device *sysdev)
+static int s3c244x_clk_add(struct device *dev)
 {
 	unsigned long camdivn = __raw_readl(S3C2440_CAMDIVN);
 	unsigned long clkdivn;
@@ -115,24 +114,28 @@
 	return 0;
 }
 
-static struct sysdev_driver s3c2440_clk_driver = {
-	.add		= s3c244x_clk_add,
+static struct subsys_interface s3c2440_clk_interface = {
+	.name		= "s3c2440_clk",
+	.subsys		= &s3c2440_subsys,
+	.add_dev	= s3c244x_clk_add,
 };
 
 static int s3c2440_clk_init(void)
 {
-	return sysdev_driver_register(&s3c2440_sysclass, &s3c2440_clk_driver);
+	return subsys_interface_register(&s3c2440_clk_interface);
 }
 
 arch_initcall(s3c2440_clk_init);
 
-static struct sysdev_driver s3c2442_clk_driver = {
-	.add		= s3c244x_clk_add,
+static struct subsys_interface s3c2442_clk_interface = {
+	.name		= "s3c2442_clk",
+	.subsys		= &s3c2442_subsys,
+	.add_dev	= s3c244x_clk_add,
 };
 
 static int s3c2442_clk_init(void)
 {
-	return sysdev_driver_register(&s3c2442_sysclass, &s3c2442_clk_driver);
+	return subsys_interface_register(&s3c2442_clk_interface);
 }
 
 arch_initcall(s3c2442_clk_init);
diff --git a/arch/arm/mach-s3c2440/s3c244x-irq.c b/arch/arm/mach-s3c2440/s3c244x-irq.c
index c63e8f2..74d3dcf 100644
--- a/arch/arm/mach-s3c2440/s3c244x-irq.c
+++ b/arch/arm/mach-s3c2440/s3c244x-irq.c
@@ -23,7 +23,7 @@
 #include <linux/module.h>
 #include <linux/interrupt.h>
 #include <linux/ioport.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/io.h>
 
 #include <mach/hardware.h>
@@ -91,7 +91,7 @@
 	.irq_ack	= s3c_irq_cam_ack,
 };
 
-static int s3c244x_irq_add(struct sys_device *sysdev)
+static int s3c244x_irq_add(struct device *dev)
 {
 	unsigned int irqno;
 
@@ -114,25 +114,29 @@
 	return 0;
 }
 
-static struct sysdev_driver s3c2440_irq_driver = {
-	.add		= s3c244x_irq_add,
+static struct subsys_interface s3c2440_irq_interface = {
+	.name		= "s3c2440_irq",
+	.subsys		= &s3c2440_subsys,
+	.add_dev	= s3c244x_irq_add,
 };
 
 static int s3c2440_irq_init(void)
 {
-	return sysdev_driver_register(&s3c2440_sysclass, &s3c2440_irq_driver);
+	return subsys_interface_register(&s3c2440_irq_interface);
 }
 
 arch_initcall(s3c2440_irq_init);
 
-static struct sysdev_driver s3c2442_irq_driver = {
-	.add		= s3c244x_irq_add,
+static struct subsys_interface s3c2442_irq_interface = {
+	.name		= "s3c2442_irq",
+	.subsys		= &s3c2442_subsys,
+	.add_dev	= s3c244x_irq_add,
 };
 
 
 static int s3c2442_irq_init(void)
 {
-	return sysdev_driver_register(&s3c2442_sysclass, &s3c2442_irq_driver);
+	return subsys_interface_register(&s3c2442_irq_interface);
 }
 
 arch_initcall(s3c2442_irq_init);
diff --git a/arch/arm/mach-s3c2440/s3c244x.c b/arch/arm/mach-s3c2440/s3c244x.c
index 7e8a23d..36bc60f6 100644
--- a/arch/arm/mach-s3c2440/s3c244x.c
+++ b/arch/arm/mach-s3c2440/s3c244x.c
@@ -18,7 +18,7 @@
 #include <linux/init.h>
 #include <linux/serial_core.h>
 #include <linux/platform_device.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/syscore_ops.h>
 #include <linux/clk.h>
 #include <linux/io.h>
@@ -135,17 +135,19 @@
 	s3c2410_baseclk_add();
 }
 
-/* Since the S3C2442 and S3C2440 share  items, put both sysclasses here */
+/* Since the S3C2442 and S3C2440 share items, put both subsystems here */
 
-struct sysdev_class s3c2440_sysclass = {
+struct bus_type s3c2440_subsys = {
 	.name		= "s3c2440-core",
+	.dev_name	= "s3c2440-core",
 };
 
-struct sysdev_class s3c2442_sysclass = {
+struct bus_type s3c2442_subsys = {
 	.name		= "s3c2442-core",
+	.dev_name	= "s3c2442-core",
 };
 
-/* need to register class before we actually register the device, and
+/* need to register the subsystem before we actually register the device, and
  * we also need to ensure that it has been initialised before any of the
  * drivers even try to use it (even if not on an s3c2440 based system)
  * as a driver which may support both 2410 and 2440 may try and use it.
@@ -153,14 +155,14 @@
 
 static int __init s3c2440_core_init(void)
 {
-	return sysdev_class_register(&s3c2440_sysclass);
+	return subsys_system_register(&s3c2440_subsys, NULL);
 }
 
 core_initcall(s3c2440_core_init);
 
 static int __init s3c2442_core_init(void)
 {
-	return sysdev_class_register(&s3c2442_sysclass);
+	return subsys_system_register(&s3c2442_subsys, NULL);
 }
 
 core_initcall(s3c2442_core_init);
diff --git a/arch/arm/mach-s3c2443/clock.c b/arch/arm/mach-s3c2443/clock.c
index 1c2c088..6dde269 100644
--- a/arch/arm/mach-s3c2443/clock.c
+++ b/arch/arm/mach-s3c2443/clock.c
@@ -27,7 +27,7 @@
 #include <linux/list.h>
 #include <linux/errno.h>
 #include <linux/err.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/clk.h>
 #include <linux/mutex.h>
 #include <linux/serial_core.h>
diff --git a/arch/arm/mach-s3c2443/dma.c b/arch/arm/mach-s3c2443/dma.c
index fe52151..de6b4a2 100644
--- a/arch/arm/mach-s3c2443/dma.c
+++ b/arch/arm/mach-s3c2443/dma.c
@@ -14,7 +14,7 @@
 
 #include <linux/kernel.h>
 #include <linux/init.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/serial_core.h>
 #include <linux/io.h>
 
@@ -135,19 +135,21 @@
 	.map_size	= ARRAY_SIZE(s3c2443_dma_mappings),
 };
 
-static int __init s3c2443_dma_add(struct sys_device *sysdev)
+static int __init s3c2443_dma_add(struct device *dev)
 {
 	s3c24xx_dma_init(6, IRQ_S3C2443_DMA0, 0x100);
 	return s3c24xx_dma_init_map(&s3c2443_dma_sel);
 }
 
-static struct sysdev_driver s3c2443_dma_driver = {
-	.add	= s3c2443_dma_add,
+static struct subsys_interface s3c2443_dma_interface = {
+	.name		= "s3c2443_dma",
+	.subsys		= &s3c2443_subsys,
+	.add_dev	= s3c2443_dma_add,
 };
 
 static int __init s3c2443_dma_init(void)
 {
-	return sysdev_driver_register(&s3c2443_sysclass, &s3c2443_dma_driver);
+	return subsys_interface_register(&s3c2443_dma_interface);
 }
 
 arch_initcall(s3c2443_dma_init);
diff --git a/arch/arm/mach-s3c2443/irq.c b/arch/arm/mach-s3c2443/irq.c
index 83ecb11..35e4ff2 100644
--- a/arch/arm/mach-s3c2443/irq.c
+++ b/arch/arm/mach-s3c2443/irq.c
@@ -23,7 +23,7 @@
 #include <linux/module.h>
 #include <linux/interrupt.h>
 #include <linux/ioport.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/io.h>
 
 #include <mach/hardware.h>
@@ -241,7 +241,7 @@
 	return 0;
 }
 
-static int __init s3c2443_irq_add(struct sys_device *sysdev)
+static int __init s3c2443_irq_add(struct device *dev)
 {
 	printk("S3C2443: IRQ Support\n");
 
@@ -265,13 +265,15 @@
 	return 0;
 }
 
-static struct sysdev_driver s3c2443_irq_driver = {
-	.add		= s3c2443_irq_add,
+static struct subsys_interface s3c2443_irq_interface = {
+	.name		= "s3c2443_irq",
+	.subsys		= &s3c2443_subsys,
+	.add_dev	= s3c2443_irq_add,
 };
 
 static int __init s3c2443_irq_init(void)
 {
-	return sysdev_driver_register(&s3c2443_sysclass, &s3c2443_irq_driver);
+	return subsys_interface_register(&s3c2443_irq_interface);
 }
 
 arch_initcall(s3c2443_irq_init);
diff --git a/arch/arm/mach-s3c2443/mach-smdk2443.c b/arch/arm/mach-s3c2443/mach-smdk2443.c
index bec107e..2092369 100644
--- a/arch/arm/mach-s3c2443/mach-smdk2443.c
+++ b/arch/arm/mach-s3c2443/mach-smdk2443.c
@@ -145,4 +145,5 @@
 	.map_io		= smdk2443_map_io,
 	.init_machine	= smdk2443_machine_init,
 	.timer		= &s3c24xx_timer,
+	.restart	= s3c2443_restart,
 MACHINE_END
diff --git a/arch/arm/mach-s3c2443/s3c2443.c b/arch/arm/mach-s3c2443/s3c2443.c
index a22b771..b9deaeb 100644
--- a/arch/arm/mach-s3c2443/s3c2443.c
+++ b/arch/arm/mach-s3c2443/s3c2443.c
@@ -19,7 +19,7 @@
 #include <linux/gpio.h>
 #include <linux/platform_device.h>
 #include <linux/serial_core.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/clk.h>
 #include <linux/io.h>
 
@@ -31,7 +31,6 @@
 #include <asm/irq.h>
 
 #include <mach/regs-s3c2443-clock.h>
-#include <mach/reset.h>
 
 #include <plat/gpio-core.h>
 #include <plat/gpio-cfg.h>
@@ -49,16 +48,20 @@
 	IODESC_ENT(TIMER),
 };
 
-struct sysdev_class s3c2443_sysclass = {
+struct bus_type s3c2443_subsys = {
 	.name = "s3c2443-core",
+	.dev_name = "s3c2443-core",
 };
 
-static struct sys_device s3c2443_sysdev = {
-	.cls		= &s3c2443_sysclass,
+static struct device s3c2443_dev = {
+	.bus		= &s3c2443_subsys,
 };
 
-static void s3c2443_hard_reset(void)
+void s3c2443_restart(char mode, const char *cmd)
 {
+	if (mode == 's')
+		soft_restart(0);
+
 	__raw_writel(S3C2443_SWRST_RESET, S3C2443_SWRST);
 }
 
@@ -66,8 +69,6 @@
 {
 	printk("S3C2443: Initialising architecture\n");
 
-	s3c24xx_reset_hook = s3c2443_hard_reset;
-
 	s3c_nand_setname("s3c2412-nand");
 	s3c_fb_setname("s3c2443-fb");
 
@@ -77,7 +78,7 @@
 	s3c_device_wdt.resource[1].start = IRQ_S3C2443_WDT;
 	s3c_device_wdt.resource[1].end   = IRQ_S3C2443_WDT;
 
-	return sysdev_register(&s3c2443_sysdev);
+	return device_register(&s3c2443_dev);
 }
 
 void __init s3c2443_init_uarts(struct s3c2410_uartcfg *cfg, int no)
@@ -99,7 +100,7 @@
 	iotable_init(s3c2443_iodesc, ARRAY_SIZE(s3c2443_iodesc));
 }
 
-/* need to register class before we actually register the device, and
+/* need to register the subsystem before we actually register the device, and
  * we also need to ensure that it has been initialised before any of the
  * drivers even try to use it (even if not on an s3c2443 based system)
  * as a driver which may support both 2443 and 2440 may try and use it.
@@ -107,7 +108,7 @@
 
 static int __init s3c2443_core_init(void)
 {
-	return sysdev_class_register(&s3c2443_sysclass);
+	return subsys_system_register(&s3c2443_subsys, NULL);
 }
 
 core_initcall(s3c2443_core_init);
diff --git a/arch/arm/mach-s3c64xx/Kconfig b/arch/arm/mach-s3c64xx/Kconfig
index 5552e04..381586c 100644
--- a/arch/arm/mach-s3c64xx/Kconfig
+++ b/arch/arm/mach-s3c64xx/Kconfig
@@ -8,6 +8,7 @@
 	bool
 	depends on ARCH_S3C64XX
 	select SAMSUNG_WAKEMASK
+	select PM_GENERIC_DOMAINS
 	default y
 	help
 	  Base platform code for any Samsung S3C64XX device
diff --git a/arch/arm/mach-s3c64xx/Makefile b/arch/arm/mach-s3c64xx/Makefile
index cfc0b99..f37016c 100644
--- a/arch/arm/mach-s3c64xx/Makefile
+++ b/arch/arm/mach-s3c64xx/Makefile
@@ -10,54 +10,49 @@
 obj-n				:=
 obj-				:=
 
-# Core files
-obj-y				+= cpu.o
-obj-y				+= clock.o
+# Core
 
-# Core support for S3C6400 system
+obj-y				+= common.o clock.o
+
+# Core support
 
 obj-$(CONFIG_CPU_S3C6400)	+= s3c6400.o
 obj-$(CONFIG_CPU_S3C6410)	+= s3c6410.o
 
-obj-y				+= irq.o
-obj-y				+= irq-eint.o
+# PM
+
+obj-$(CONFIG_PM)		+= pm.o irq-pm.o sleep.o
 
 # DMA support
 
 obj-$(CONFIG_S3C64XX_DMA)	+= dma.o
 
-# Device setup
-
-obj-$(CONFIG_S3C64XX_SETUP_I2C0) += setup-i2c0.o
-obj-$(CONFIG_S3C64XX_SETUP_I2C1) += setup-i2c1.o
-obj-$(CONFIG_S3C64XX_SETUP_IDE) += setup-ide.o
-obj-$(CONFIG_S3C64XX_SETUP_KEYPAD) += setup-keypad.o
-obj-$(CONFIG_S3C64XX_SETUP_SDHCI) += setup-sdhci.o
-obj-$(CONFIG_S3C64XX_SETUP_FB_24BPP) += setup-fb-24bpp.o
-obj-$(CONFIG_S3C64XX_SETUP_SDHCI_GPIO) += setup-sdhci-gpio.o
-
-# PM
-
-obj-$(CONFIG_PM)		+= pm.o
-obj-$(CONFIG_PM)		+= sleep.o
-obj-$(CONFIG_PM)		+= irq-pm.o
-
-# Machine support
-
-obj-$(CONFIG_MACH_ANW6410)	+= mach-anw6410.o
-obj-$(CONFIG_MACH_SMDK6400)	+= mach-smdk6400.o
-obj-$(CONFIG_MACH_SMDK6410)	+= mach-smdk6410.o
-obj-$(CONFIG_MACH_REAL6410)     += mach-real6410.o
-obj-$(CONFIG_MACH_MINI6410)     += mach-mini6410.o
-obj-$(CONFIG_MACH_NCP)		+= mach-ncp.o
-obj-$(CONFIG_MACH_HMT)		+= mach-hmt.o
-obj-$(CONFIG_MACH_SMARTQ)	+= mach-smartq.o
-obj-$(CONFIG_MACH_SMARTQ5)	+= mach-smartq5.o
-obj-$(CONFIG_MACH_SMARTQ7)	+= mach-smartq7.o
-obj-$(CONFIG_MACH_WLF_CRAGG_6410) += mach-crag6410.o mach-crag6410-module.o
-
-# device support
+# Device support
 
 obj-y				+= dev-uart.o
 obj-y				+= dev-audio.o
 obj-$(CONFIG_S3C64XX_DEV_SPI)	+= dev-spi.o
+
+# Device setup
+
+obj-$(CONFIG_S3C64XX_SETUP_FB_24BPP)	+= setup-fb-24bpp.o
+obj-$(CONFIG_S3C64XX_SETUP_I2C0)	+= setup-i2c0.o
+obj-$(CONFIG_S3C64XX_SETUP_I2C1)	+= setup-i2c1.o
+obj-$(CONFIG_S3C64XX_SETUP_IDE)		+= setup-ide.o
+obj-$(CONFIG_S3C64XX_SETUP_KEYPAD)	+= setup-keypad.o
+obj-$(CONFIG_S3C64XX_SETUP_SDHCI)	+= setup-sdhci.o
+obj-$(CONFIG_S3C64XX_SETUP_SDHCI_GPIO)	+= setup-sdhci-gpio.o
+
+# Machine support
+
+obj-$(CONFIG_MACH_ANW6410)		+= mach-anw6410.o
+obj-$(CONFIG_MACH_HMT)			+= mach-hmt.o
+obj-$(CONFIG_MACH_MINI6410)		+= mach-mini6410.o
+obj-$(CONFIG_MACH_NCP)			+= mach-ncp.o
+obj-$(CONFIG_MACH_REAL6410)		+= mach-real6410.o
+obj-$(CONFIG_MACH_SMARTQ)		+= mach-smartq.o
+obj-$(CONFIG_MACH_SMARTQ5)		+= mach-smartq5.o
+obj-$(CONFIG_MACH_SMARTQ7)		+= mach-smartq7.o
+obj-$(CONFIG_MACH_SMDK6400)		+= mach-smdk6400.o
+obj-$(CONFIG_MACH_SMDK6410)		+= mach-smdk6410.o
+obj-$(CONFIG_MACH_WLF_CRAGG_6410)	+= mach-crag6410.o mach-crag6410-module.o
diff --git a/arch/arm/mach-s3c64xx/clock.c b/arch/arm/mach-s3c64xx/clock.c
index 39c238d..625219b 100644
--- a/arch/arm/mach-s3c64xx/clock.c
+++ b/arch/arm/mach-s3c64xx/clock.c
@@ -705,7 +705,7 @@
 
 #define GET_DIV(clk, field) ((((clk) & field##_MASK) >> field##_SHIFT) + 1)
 
-void __init_or_cpufreq s3c6400_setup_clocks(void)
+void __init_or_cpufreq s3c64xx_setup_clocks(void)
 {
 	struct clk *xtal_clk;
 	unsigned long xtal;
@@ -804,7 +804,7 @@
  * as ARMCLK as well as the necessary parent clocks.
  *
  * This call does not setup the clocks, which is left to the
- * s3c6400_setup_clocks() call which may be needed by the cpufreq
+ * s3c64xx_setup_clocks() call which may be needed by the cpufreq
  * or resume code to re-set the clocks if the bootloader has changed
  * them.
  */
diff --git a/arch/arm/mach-s3c64xx/common.c b/arch/arm/mach-s3c64xx/common.c
new file mode 100644
index 0000000..4a7394d
--- /dev/null
+++ b/arch/arm/mach-s3c64xx/common.c
@@ -0,0 +1,385 @@
+/*
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * Copyright 2008 Openmoko, Inc.
+ * Copyright 2008 Simtec Electronics
+ *	Ben Dooks <ben@simtec.co.uk>
+ *	http://armlinux.simtec.co.uk/
+ *
+ * Common Codes for S3C64XX machines
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/serial_core.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/hardware/vic.h>
+
+#include <mach/map.h>
+#include <mach/hardware.h>
+#include <mach/regs-gpio.h>
+
+#include <plat/cpu.h>
+#include <plat/clock.h>
+#include <plat/devs.h>
+#include <plat/pm.h>
+#include <plat/gpio-cfg.h>
+#include <plat/irq-uart.h>
+#include <plat/irq-vic-timer.h>
+#include <plat/regs-irqtype.h>
+#include <plat/regs-serial.h>
+#include <plat/watchdog-reset.h>
+
+#include "common.h"
+
+/* uart registration process */
+
+void __init s3c64xx_init_uarts(struct s3c2410_uartcfg *cfg, int no)
+{
+	s3c24xx_init_uartdevs("s3c6400-uart", s3c64xx_uart_resources, cfg, no);
+}
+
+/* table of supported CPUs */
+
+static const char name_s3c6400[] = "S3C6400";
+static const char name_s3c6410[] = "S3C6410";
+
+static struct cpu_table cpu_ids[] __initdata = {
+	{
+		.idcode		= S3C6400_CPU_ID,
+		.idmask		= S3C64XX_CPU_MASK,
+		.map_io		= s3c6400_map_io,
+		.init_clocks	= s3c6400_init_clocks,
+		.init_uarts	= s3c64xx_init_uarts,
+		.init		= s3c6400_init,
+		.name		= name_s3c6400,
+	}, {
+		.idcode		= S3C6410_CPU_ID,
+		.idmask		= S3C64XX_CPU_MASK,
+		.map_io		= s3c6410_map_io,
+		.init_clocks	= s3c6410_init_clocks,
+		.init_uarts	= s3c64xx_init_uarts,
+		.init		= s3c6410_init,
+		.name		= name_s3c6410,
+	},
+};
+
+/* minimal IO mapping */
+
+/* see notes on uart map in arch/arm/mach-s3c64xx/include/mach/debug-macro.S */
+#define UART_OFFS (S3C_PA_UART & 0xfffff)
+
+static struct map_desc s3c_iodesc[] __initdata = {
+	{
+		.virtual	= (unsigned long)S3C_VA_SYS,
+		.pfn		= __phys_to_pfn(S3C64XX_PA_SYSCON),
+		.length		= SZ_4K,
+		.type		= MT_DEVICE,
+	}, {
+		.virtual	= (unsigned long)S3C_VA_MEM,
+		.pfn		= __phys_to_pfn(S3C64XX_PA_SROM),
+		.length		= SZ_4K,
+		.type		= MT_DEVICE,
+	}, {
+		.virtual	= (unsigned long)(S3C_VA_UART + UART_OFFS),
+		.pfn		= __phys_to_pfn(S3C_PA_UART),
+		.length		= SZ_4K,
+		.type		= MT_DEVICE,
+	}, {
+		.virtual	= (unsigned long)VA_VIC0,
+		.pfn		= __phys_to_pfn(S3C64XX_PA_VIC0),
+		.length		= SZ_16K,
+		.type		= MT_DEVICE,
+	}, {
+		.virtual	= (unsigned long)VA_VIC1,
+		.pfn		= __phys_to_pfn(S3C64XX_PA_VIC1),
+		.length		= SZ_16K,
+		.type		= MT_DEVICE,
+	}, {
+		.virtual	= (unsigned long)S3C_VA_TIMER,
+		.pfn		= __phys_to_pfn(S3C_PA_TIMER),
+		.length		= SZ_16K,
+		.type		= MT_DEVICE,
+	}, {
+		.virtual	= (unsigned long)S3C64XX_VA_GPIO,
+		.pfn		= __phys_to_pfn(S3C64XX_PA_GPIO),
+		.length		= SZ_4K,
+		.type		= MT_DEVICE,
+	}, {
+		.virtual	= (unsigned long)S3C64XX_VA_MODEM,
+		.pfn		= __phys_to_pfn(S3C64XX_PA_MODEM),
+		.length		= SZ_4K,
+		.type		= MT_DEVICE,
+	}, {
+		.virtual	= (unsigned long)S3C_VA_WATCHDOG,
+		.pfn		= __phys_to_pfn(S3C64XX_PA_WATCHDOG),
+		.length		= SZ_4K,
+		.type		= MT_DEVICE,
+	}, {
+		.virtual	= (unsigned long)S3C_VA_USB_HSPHY,
+		.pfn		= __phys_to_pfn(S3C64XX_PA_USB_HSPHY),
+		.length		= SZ_1K,
+		.type		= MT_DEVICE,
+	},
+};
+
+static struct bus_type s3c64xx_subsys = {
+	.name		= "s3c64xx-core",
+	.dev_name	= "s3c64xx-core",
+};
+
+static struct device s3c64xx_dev = {
+	.bus	= &s3c64xx_subsys,
+};
+
+/* read cpu identification code */
+
+void __init s3c64xx_init_io(struct map_desc *mach_desc, int size)
+{
+	/* initialise the io descriptors we need for initialisation */
+	iotable_init(s3c_iodesc, ARRAY_SIZE(s3c_iodesc));
+	iotable_init(mach_desc, size);
+	init_consistent_dma_size(SZ_8M);
+
+	/* detect cpu id */
+	s3c64xx_init_cpu();
+
+	s3c_init_cpu(samsung_cpu_id, cpu_ids, ARRAY_SIZE(cpu_ids));
+}
+
+static __init int s3c64xx_dev_init(void)
+{
+	subsys_system_register(&s3c64xx_subsys, NULL);
+	return device_register(&s3c64xx_dev);
+}
+core_initcall(s3c64xx_dev_init);
+
+/*
+ * setup the sources the vic should advertise resume
+ * for, even though it is not doing the wake
+ * (set_irq_wake needs to be valid)
+ */
+#define IRQ_VIC0_RESUME (1 << (IRQ_RTC_TIC - IRQ_VIC0_BASE))
+#define IRQ_VIC1_RESUME (1 << (IRQ_RTC_ALARM - IRQ_VIC1_BASE) |	\
+			 1 << (IRQ_PENDN - IRQ_VIC1_BASE) |	\
+			 1 << (IRQ_HSMMC0 - IRQ_VIC1_BASE) |	\
+			 1 << (IRQ_HSMMC1 - IRQ_VIC1_BASE) |	\
+			 1 << (IRQ_HSMMC2 - IRQ_VIC1_BASE))
+
+void __init s3c64xx_init_irq(u32 vic0_valid, u32 vic1_valid)
+{
+	printk(KERN_DEBUG "%s: initialising interrupts\n", __func__);
+
+	/* initialise the pair of VICs */
+	vic_init(VA_VIC0, IRQ_VIC0_BASE, vic0_valid, IRQ_VIC0_RESUME);
+	vic_init(VA_VIC1, IRQ_VIC1_BASE, vic1_valid, IRQ_VIC1_RESUME);
+
+	/* add the timer sub-irqs */
+	s3c_init_vic_timer_irq(5, IRQ_TIMER0);
+}
+
+#define eint_offset(irq)	((irq) - IRQ_EINT(0))
+#define eint_irq_to_bit(irq)	((u32)(1 << eint_offset(irq)))
+
+static inline void s3c_irq_eint_mask(struct irq_data *data)
+{
+	u32 mask;
+
+	mask = __raw_readl(S3C64XX_EINT0MASK);
+	mask |= (u32)data->chip_data;
+	__raw_writel(mask, S3C64XX_EINT0MASK);
+}
+
+static void s3c_irq_eint_unmask(struct irq_data *data)
+{
+	u32 mask;
+
+	mask = __raw_readl(S3C64XX_EINT0MASK);
+	mask &= ~((u32)data->chip_data);
+	__raw_writel(mask, S3C64XX_EINT0MASK);
+}
+
+static inline void s3c_irq_eint_ack(struct irq_data *data)
+{
+	__raw_writel((u32)data->chip_data, S3C64XX_EINT0PEND);
+}
+
+static void s3c_irq_eint_maskack(struct irq_data *data)
+{
+	/* compiler should in-line these */
+	s3c_irq_eint_mask(data);
+	s3c_irq_eint_ack(data);
+}
+
+static int s3c_irq_eint_set_type(struct irq_data *data, unsigned int type)
+{
+	int offs = eint_offset(data->irq);
+	int pin, pin_val;
+	int shift;
+	u32 ctrl, mask;
+	u32 newvalue = 0;
+	void __iomem *reg;
+
+	if (offs > 27)
+		return -EINVAL;
+
+	if (offs <= 15)
+		reg = S3C64XX_EINT0CON0;
+	else
+		reg = S3C64XX_EINT0CON1;
+
+	switch (type) {
+	case IRQ_TYPE_NONE:
+		printk(KERN_WARNING "No edge setting!\n");
+		break;
+
+	case IRQ_TYPE_EDGE_RISING:
+		newvalue = S3C2410_EXTINT_RISEEDGE;
+		break;
+
+	case IRQ_TYPE_EDGE_FALLING:
+		newvalue = S3C2410_EXTINT_FALLEDGE;
+		break;
+
+	case IRQ_TYPE_EDGE_BOTH:
+		newvalue = S3C2410_EXTINT_BOTHEDGE;
+		break;
+
+	case IRQ_TYPE_LEVEL_LOW:
+		newvalue = S3C2410_EXTINT_LOWLEV;
+		break;
+
+	case IRQ_TYPE_LEVEL_HIGH:
+		newvalue = S3C2410_EXTINT_HILEV;
+		break;
+
+	default:
+		printk(KERN_ERR "No such irq type %d", type);
+		return -1;
+	}
+
+	if (offs <= 15)
+		shift = (offs / 2) * 4;
+	else
+		shift = ((offs - 16) / 2) * 4;
+	mask = 0x7 << shift;
+
+	ctrl = __raw_readl(reg);
+	ctrl &= ~mask;
+	ctrl |= newvalue << shift;
+	__raw_writel(ctrl, reg);
+
+	/* set the GPIO pin appropriately */
+
+	if (offs < 16) {
+		pin = S3C64XX_GPN(offs);
+		pin_val = S3C_GPIO_SFN(2);
+	} else if (offs < 23) {
+		pin = S3C64XX_GPL(offs + 8 - 16);
+		pin_val = S3C_GPIO_SFN(3);
+	} else {
+		pin = S3C64XX_GPM(offs - 23);
+		pin_val = S3C_GPIO_SFN(3);
+	}
+
+	s3c_gpio_cfgpin(pin, pin_val);
+
+	return 0;
+}
+
+static struct irq_chip s3c_irq_eint = {
+	.name		= "s3c-eint",
+	.irq_mask	= s3c_irq_eint_mask,
+	.irq_unmask	= s3c_irq_eint_unmask,
+	.irq_mask_ack	= s3c_irq_eint_maskack,
+	.irq_ack	= s3c_irq_eint_ack,
+	.irq_set_type	= s3c_irq_eint_set_type,
+	.irq_set_wake	= s3c_irqext_wake,
+};
+
+/* s3c_irq_demux_eint
+ *
+ * This function demuxes the IRQ from the group0 external interrupts,
+ * from IRQ_EINT(0) to IRQ_EINT(27). It is designed to be inlined into
+ * the specific handlers s3c_irq_demux_eintX_Y.
+ */
+static inline void s3c_irq_demux_eint(unsigned int start, unsigned int end)
+{
+	u32 status = __raw_readl(S3C64XX_EINT0PEND);
+	u32 mask = __raw_readl(S3C64XX_EINT0MASK);
+	unsigned int irq;
+
+	status &= ~mask;
+	status >>= start;
+	status &= (1 << (end - start + 1)) - 1;
+
+	for (irq = IRQ_EINT(start); irq <= IRQ_EINT(end); irq++) {
+		if (status & 1)
+			generic_handle_irq(irq);
+
+		status >>= 1;
+	}
+}
+
+static void s3c_irq_demux_eint0_3(unsigned int irq, struct irq_desc *desc)
+{
+	s3c_irq_demux_eint(0, 3);
+}
+
+static void s3c_irq_demux_eint4_11(unsigned int irq, struct irq_desc *desc)
+{
+	s3c_irq_demux_eint(4, 11);
+}
+
+static void s3c_irq_demux_eint12_19(unsigned int irq, struct irq_desc *desc)
+{
+	s3c_irq_demux_eint(12, 19);
+}
+
+static void s3c_irq_demux_eint20_27(unsigned int irq, struct irq_desc *desc)
+{
+	s3c_irq_demux_eint(20, 27);
+}
+
+static int __init s3c64xx_init_irq_eint(void)
+{
+	int irq;
+
+	for (irq = IRQ_EINT(0); irq <= IRQ_EINT(27); irq++) {
+		irq_set_chip_and_handler(irq, &s3c_irq_eint, handle_level_irq);
+		irq_set_chip_data(irq, (void *)eint_irq_to_bit(irq));
+		set_irq_flags(irq, IRQF_VALID);
+	}
+
+	irq_set_chained_handler(IRQ_EINT0_3, s3c_irq_demux_eint0_3);
+	irq_set_chained_handler(IRQ_EINT4_11, s3c_irq_demux_eint4_11);
+	irq_set_chained_handler(IRQ_EINT12_19, s3c_irq_demux_eint12_19);
+	irq_set_chained_handler(IRQ_EINT20_27, s3c_irq_demux_eint20_27);
+
+	return 0;
+}
+arch_initcall(s3c64xx_init_irq_eint);
+
+void s3c64xx_restart(char mode, const char *cmd)
+{
+	if (mode != 's')
+		arch_wdt_reset();
+
+	/* if all else fails, or mode was for soft, jump to 0 */
+	soft_restart(0);
+}
diff --git a/arch/arm/mach-s3c64xx/common.h b/arch/arm/mach-s3c64xx/common.h
new file mode 100644
index 0000000..5eb9c9a
--- /dev/null
+++ b/arch/arm/mach-s3c64xx/common.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * Copyright 2008 Openmoko, Inc.
+ * Copyright 2008 Simtec Electronics
+ *	Ben Dooks <ben@simtec.co.uk>
+ *	http://armlinux.simtec.co.uk/
+ *
+ * Common Header for S3C64XX machines
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARCH_ARM_MACH_S3C64XX_COMMON_H
+#define __ARCH_ARM_MACH_S3C64XX_COMMON_H
+
+void s3c64xx_init_irq(u32 vic0, u32 vic1);
+void s3c64xx_init_io(struct map_desc *mach_desc, int size);
+
+void s3c64xx_register_clocks(unsigned long xtal, unsigned armclk_limit);
+void s3c64xx_setup_clocks(void);
+
+void s3c64xx_restart(char mode, const char *cmd);
+
+extern struct syscore_ops s3c64xx_irq_syscore_ops;
+
+#ifdef CONFIG_CPU_S3C6400
+
+extern  int s3c6400_init(void);
+extern void s3c6400_init_irq(void);
+extern void s3c6400_map_io(void);
+extern void s3c6400_init_clocks(int xtal);
+
+#else
+#define s3c6400_init_clocks NULL
+#define s3c6400_map_io NULL
+#define s3c6400_init NULL
+#endif
+
+#ifdef CONFIG_CPU_S3C6410
+
+extern  int s3c6410_init(void);
+extern void s3c6410_init_irq(void);
+extern void s3c6410_map_io(void);
+extern void s3c6410_init_clocks(int xtal);
+
+#else
+#define s3c6410_init_clocks NULL
+#define s3c6410_map_io NULL
+#define s3c6410_init NULL
+#endif
+
+#endif /* __ARCH_ARM_MACH_S3C64XX_COMMON_H */
diff --git a/arch/arm/mach-s3c64xx/cpu.c b/arch/arm/mach-s3c64xx/cpu.c
deleted file mode 100644
index de085b7..0000000
--- a/arch/arm/mach-s3c64xx/cpu.c
+++ /dev/null
@@ -1,161 +0,0 @@
-/* linux/arch/arm/plat-s3c64xx/cpu.c
- *
- * Copyright 2008 Openmoko, Inc.
- * Copyright 2008 Simtec Electronics
- *	Ben Dooks <ben@simtec.co.uk>
- *	http://armlinux.simtec.co.uk/
- *
- * S3C64XX CPU Support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/sysdev.h>
-#include <linux/serial_core.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/dma-mapping.h>
-
-#include <mach/hardware.h>
-#include <mach/map.h>
-
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-
-#include <plat/regs-serial.h>
-
-#include <plat/cpu.h>
-#include <plat/devs.h>
-#include <plat/clock.h>
-
-#include <plat/s3c6400.h>
-#include <plat/s3c6410.h>
-
-/* table of supported CPUs */
-
-static const char name_s3c6400[] = "S3C6400";
-static const char name_s3c6410[] = "S3C6410";
-
-static struct cpu_table cpu_ids[] __initdata = {
-	{
-		.idcode		= S3C6400_CPU_ID,
-		.idmask		= S3C64XX_CPU_MASK,
-		.map_io		= s3c6400_map_io,
-		.init_clocks	= s3c6400_init_clocks,
-		.init_uarts	= s3c6400_init_uarts,
-		.init		= s3c6400_init,
-		.name		= name_s3c6400,
-	}, {
-		.idcode		= S3C6410_CPU_ID,
-		.idmask		= S3C64XX_CPU_MASK,
-		.map_io		= s3c6410_map_io,
-		.init_clocks	= s3c6410_init_clocks,
-		.init_uarts	= s3c6410_init_uarts,
-		.init		= s3c6410_init,
-		.name		= name_s3c6410,
-	},
-};
-
-/* minimal IO mapping */
-
-/* see notes on uart map in arch/arm/mach-s3c6400/include/mach/debug-macro.S */
-#define UART_OFFS (S3C_PA_UART & 0xfffff)
-
-static struct map_desc s3c_iodesc[] __initdata = {
-	{
-		.virtual	= (unsigned long)S3C_VA_SYS,
-		.pfn		= __phys_to_pfn(S3C64XX_PA_SYSCON),
-		.length		= SZ_4K,
-		.type		= MT_DEVICE,
-	}, {
-		.virtual	= (unsigned long)S3C_VA_MEM,
-		.pfn		= __phys_to_pfn(S3C64XX_PA_SROM),
-		.length		= SZ_4K,
-		.type		= MT_DEVICE,
-	}, {
-		.virtual	= (unsigned long)(S3C_VA_UART + UART_OFFS),
-		.pfn		= __phys_to_pfn(S3C_PA_UART),
-		.length		= SZ_4K,
-		.type		= MT_DEVICE,
-	}, {
-		.virtual	= (unsigned long)VA_VIC0,
-		.pfn		= __phys_to_pfn(S3C64XX_PA_VIC0),
-		.length		= SZ_16K,
-		.type		= MT_DEVICE,
-	}, {
-		.virtual	= (unsigned long)VA_VIC1,
-		.pfn		= __phys_to_pfn(S3C64XX_PA_VIC1),
-		.length		= SZ_16K,
-		.type		= MT_DEVICE,
-	}, {
-		.virtual	= (unsigned long)S3C_VA_TIMER,
-		.pfn		= __phys_to_pfn(S3C_PA_TIMER),
-		.length		= SZ_16K,
-		.type		= MT_DEVICE,
-	}, {
-		.virtual	= (unsigned long)S3C64XX_VA_GPIO,
-		.pfn		= __phys_to_pfn(S3C64XX_PA_GPIO),
-		.length		= SZ_4K,
-		.type		= MT_DEVICE,
-	}, {
-		.virtual	= (unsigned long)S3C64XX_VA_MODEM,
-		.pfn		= __phys_to_pfn(S3C64XX_PA_MODEM),
-		.length		= SZ_4K,
-		.type		= MT_DEVICE,
-	}, {
-		.virtual	= (unsigned long)S3C_VA_WATCHDOG,
-		.pfn		= __phys_to_pfn(S3C64XX_PA_WATCHDOG),
-		.length		= SZ_4K,
-		.type		= MT_DEVICE,
-	}, {
-		.virtual	= (unsigned long)S3C_VA_USB_HSPHY,
-		.pfn		= __phys_to_pfn(S3C64XX_PA_USB_HSPHY),
-		.length		= SZ_1K,
-		.type		= MT_DEVICE,
-	},
-};
-
-
-struct sysdev_class s3c64xx_sysclass = {
-	.name	= "s3c64xx-core",
-};
-
-static struct sys_device s3c64xx_sysdev = {
-	.cls	= &s3c64xx_sysclass,
-};
-
-/* uart registration process */
-
-void __init s3c6400_common_init_uarts(struct s3c2410_uartcfg *cfg, int no)
-{
-	s3c24xx_init_uartdevs("s3c6400-uart", s3c64xx_uart_resources, cfg, no);
-}
-
-/* read cpu identification code */
-
-void __init s3c64xx_init_io(struct map_desc *mach_desc, int size)
-{
-	/* initialise the io descriptors we need for initialisation */
-	iotable_init(s3c_iodesc, ARRAY_SIZE(s3c_iodesc));
-	iotable_init(mach_desc, size);
-	init_consistent_dma_size(SZ_8M);
-
-	/* detect cpu id */
-	s3c64xx_init_cpu();
-
-	s3c_init_cpu(samsung_cpu_id, cpu_ids, ARRAY_SIZE(cpu_ids));
-}
-
-static __init int s3c64xx_sysdev_init(void)
-{
-	sysdev_class_register(&s3c64xx_sysclass);
-	return sysdev_register(&s3c64xx_sysdev);
-}
-
-core_initcall(s3c64xx_sysdev_init);
diff --git a/arch/arm/mach-s3c64xx/dma.c b/arch/arm/mach-s3c64xx/dma.c
index 17d62f4..f2a7a17 100644
--- a/arch/arm/mach-s3c64xx/dma.c
+++ b/arch/arm/mach-s3c64xx/dma.c
@@ -16,7 +16,7 @@
 #include <linux/module.h>
 #include <linux/interrupt.h>
 #include <linux/dmapool.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/errno.h>
 #include <linux/slab.h>
 #include <linux/delay.h>
@@ -35,7 +35,7 @@
 /* dma channel state information */
 
 struct s3c64xx_dmac {
-	struct sys_device	 sysdev;
+	struct device		dev;
 	struct clk		*clk;
 	void __iomem		*regs;
 	struct s3c2410_dma_chan *channels;
@@ -631,8 +631,9 @@
 	return IRQ_HANDLED;
 }
 
-static struct sysdev_class dma_sysclass = {
+static struct bus_type dma_subsys = {
 	.name		= "s3c64xx-dma",
+	.dev_name	= "s3c64xx-dma",
 };
 
 static int s3c64xx_dma_init1(int chno, enum dma_ch chbase,
@@ -651,12 +652,12 @@
 		return -ENOMEM;
 	}
 
-	dmac->sysdev.id = chno / 8;
-	dmac->sysdev.cls = &dma_sysclass;
+	dmac->dev.id = chno / 8;
+	dmac->dev.bus = &dma_subsys;
 
-	err = sysdev_register(&dmac->sysdev);
+	err = device_register(&dmac->dev);
 	if (err) {
-		printk(KERN_ERR "%s: failed to register sysdevice\n", __func__);
+		printk(KERN_ERR "%s: failed to register device\n", __func__);
 		goto err_alloc;
 	}
 
@@ -667,7 +668,7 @@
 		goto err_dev;
 	}
 
-	snprintf(clkname, sizeof(clkname), "dma%d", dmac->sysdev.id);
+	snprintf(clkname, sizeof(clkname), "dma%d", dmac->dev.id);
 
 	dmac->clk = clk_get(NULL, clkname);
 	if (IS_ERR(dmac->clk)) {
@@ -715,7 +716,7 @@
 err_map:
 	iounmap(regs);
 err_dev:
-	sysdev_unregister(&dmac->sysdev);
+	device_unregister(&dmac->dev);
 err_alloc:
 	kfree(dmac);
 	return err;
@@ -733,9 +734,9 @@
 		return -ENOMEM;
 	}
 
-	ret = sysdev_class_register(&dma_sysclass);
+	ret = subsys_system_register(&dma_subsys, NULL);
 	if (ret) {
-		printk(KERN_ERR "%s: failed to create sysclass\n", __func__);
+		printk(KERN_ERR "%s: failed to create subsys\n", __func__);
 		return -ENOMEM;
 	}
 
diff --git a/arch/arm/mach-s3c64xx/include/mach/entry-macro.S b/arch/arm/mach-s3c64xx/include/mach/entry-macro.S
index dd36260..dc2bc15 100644
--- a/arch/arm/mach-s3c64xx/include/mach/entry-macro.S
+++ b/arch/arm/mach-s3c64xx/include/mach/entry-macro.S
@@ -12,7 +12,8 @@
  * warranty of any kind, whether express or implied.
 */
 
-#include <mach/map.h>
-#include <mach/irqs.h>
+		.macro  disable_fiq
+		.endm
 
-#include <asm/entry-macro-vic2.S>
+		.macro  arch_ret_to_user, tmp1, tmp2
+		.endm
diff --git a/arch/arm/mach-s3c64xx/include/mach/system.h b/arch/arm/mach-s3c64xx/include/mach/system.h
index 2e58cb7..353ed43 100644
--- a/arch/arm/mach-s3c64xx/include/mach/system.h
+++ b/arch/arm/mach-s3c64xx/include/mach/system.h
@@ -11,20 +11,9 @@
 #ifndef __ASM_ARCH_SYSTEM_H
 #define __ASM_ARCH_SYSTEM_H __FILE__
 
-#include <plat/watchdog-reset.h>
-
 static void arch_idle(void)
 {
 	/* nothing here yet */
 }
 
-static void arch_reset(char mode, const char *cmd)
-{
-	if (mode != 's')
-		arch_wdt_reset();
-
-	/* if all else fails, or mode was for soft, jump to 0 */
-	cpu_reset(0);
-}
-
 #endif /* __ASM_ARCH_IRQ_H */
diff --git a/arch/arm/mach-s3c64xx/include/mach/vmalloc.h b/arch/arm/mach-s3c64xx/include/mach/vmalloc.h
deleted file mode 100644
index 23f75e5..0000000
--- a/arch/arm/mach-s3c64xx/include/mach/vmalloc.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* arch/arm/mach-s3c64xx/include/mach/vmalloc.h
- *
- * from arch/arm/mach-iop3xx/include/mach/vmalloc.h
- *
- * Copyright (c) 2003 Simtec Electronics <linux@simtec.co.uk>
- *		      http://www.simtec.co.uk/products/SWLINUX/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * S3C6400 vmalloc definition
-*/
-
-#ifndef __ASM_ARCH_VMALLOC_H
-#define __ASM_ARCH_VMALLOC_H
-
-#define VMALLOC_END	0xF6000000UL
-
-#endif /* __ASM_ARCH_VMALLOC_H */
diff --git a/arch/arm/mach-s3c64xx/irq-eint.c b/arch/arm/mach-s3c64xx/irq-eint.c
deleted file mode 100644
index 4d203be..0000000
--- a/arch/arm/mach-s3c64xx/irq-eint.c
+++ /dev/null
@@ -1,213 +0,0 @@
-/* arch/arm/plat-s3c64xx/irq-eint.c
- *
- * Copyright 2008 Openmoko, Inc.
- * Copyright 2008 Simtec Electronics
- *      Ben Dooks <ben@simtec.co.uk>
- *      http://armlinux.simtec.co.uk/
- *
- * S3C64XX - Interrupt handling for IRQ_EINT(x)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/sysdev.h>
-#include <linux/gpio.h>
-#include <linux/irq.h>
-#include <linux/io.h>
-
-#include <asm/hardware/vic.h>
-
-#include <plat/regs-irqtype.h>
-#include <mach/regs-gpio.h>
-#include <plat/gpio-cfg.h>
-
-#include <mach/map.h>
-#include <plat/cpu.h>
-#include <plat/pm.h>
-
-#define eint_offset(irq)	((irq) - IRQ_EINT(0))
-#define eint_irq_to_bit(irq)	((u32)(1 << eint_offset(irq)))
-
-static inline void s3c_irq_eint_mask(struct irq_data *data)
-{
-	u32 mask;
-
-	mask = __raw_readl(S3C64XX_EINT0MASK);
-	mask |= (u32)data->chip_data;
-	__raw_writel(mask, S3C64XX_EINT0MASK);
-}
-
-static void s3c_irq_eint_unmask(struct irq_data *data)
-{
-	u32 mask;
-
-	mask = __raw_readl(S3C64XX_EINT0MASK);
-	mask &= ~((u32)data->chip_data);
-	__raw_writel(mask, S3C64XX_EINT0MASK);
-}
-
-static inline void s3c_irq_eint_ack(struct irq_data *data)
-{
-	__raw_writel((u32)data->chip_data, S3C64XX_EINT0PEND);
-}
-
-static void s3c_irq_eint_maskack(struct irq_data *data)
-{
-	/* compiler should in-line these */
-	s3c_irq_eint_mask(data);
-	s3c_irq_eint_ack(data);
-}
-
-static int s3c_irq_eint_set_type(struct irq_data *data, unsigned int type)
-{
-	int offs = eint_offset(data->irq);
-	int pin, pin_val;
-	int shift;
-	u32 ctrl, mask;
-	u32 newvalue = 0;
-	void __iomem *reg;
-
-	if (offs > 27)
-		return -EINVAL;
-
-	if (offs <= 15)
-		reg = S3C64XX_EINT0CON0;
-	else
-		reg = S3C64XX_EINT0CON1;
-
-	switch (type) {
-	case IRQ_TYPE_NONE:
-		printk(KERN_WARNING "No edge setting!\n");
-		break;
-
-	case IRQ_TYPE_EDGE_RISING:
-		newvalue = S3C2410_EXTINT_RISEEDGE;
-		break;
-
-	case IRQ_TYPE_EDGE_FALLING:
-		newvalue = S3C2410_EXTINT_FALLEDGE;
-		break;
-
-	case IRQ_TYPE_EDGE_BOTH:
-		newvalue = S3C2410_EXTINT_BOTHEDGE;
-		break;
-
-	case IRQ_TYPE_LEVEL_LOW:
-		newvalue = S3C2410_EXTINT_LOWLEV;
-		break;
-
-	case IRQ_TYPE_LEVEL_HIGH:
-		newvalue = S3C2410_EXTINT_HILEV;
-		break;
-
-	default:
-		printk(KERN_ERR "No such irq type %d", type);
-		return -1;
-	}
-
-	if (offs <= 15)
-		shift = (offs / 2) * 4;
-	else
-		shift = ((offs - 16) / 2) * 4;
-	mask = 0x7 << shift;
-
-	ctrl = __raw_readl(reg);
-	ctrl &= ~mask;
-	ctrl |= newvalue << shift;
-	__raw_writel(ctrl, reg);
-
-	/* set the GPIO pin appropriately */
-
-	if (offs < 16) {
-		pin = S3C64XX_GPN(offs);
-		pin_val = S3C_GPIO_SFN(2);
-	} else if (offs < 23) {
-		pin = S3C64XX_GPL(offs + 8 - 16);
-		pin_val = S3C_GPIO_SFN(3);
-	} else {
-		pin = S3C64XX_GPM(offs - 23);
-		pin_val = S3C_GPIO_SFN(3);
-	}
-
-	s3c_gpio_cfgpin(pin, pin_val);
-
-	return 0;
-}
-
-static struct irq_chip s3c_irq_eint = {
-	.name		= "s3c-eint",
-	.irq_mask	= s3c_irq_eint_mask,
-	.irq_unmask	= s3c_irq_eint_unmask,
-	.irq_mask_ack	= s3c_irq_eint_maskack,
-	.irq_ack	= s3c_irq_eint_ack,
-	.irq_set_type	= s3c_irq_eint_set_type,
-	.irq_set_wake	= s3c_irqext_wake,
-};
-
-/* s3c_irq_demux_eint
- *
- * This function demuxes the IRQ from the group0 external interrupts,
- * from IRQ_EINT(0) to IRQ_EINT(27). It is designed to be inlined into
- * the specific handlers s3c_irq_demux_eintX_Y.
- */
-static inline void s3c_irq_demux_eint(unsigned int start, unsigned int end)
-{
-	u32 status = __raw_readl(S3C64XX_EINT0PEND);
-	u32 mask = __raw_readl(S3C64XX_EINT0MASK);
-	unsigned int irq;
-
-	status &= ~mask;
-	status >>= start;
-	status &= (1 << (end - start + 1)) - 1;
-
-	for (irq = IRQ_EINT(start); irq <= IRQ_EINT(end); irq++) {
-		if (status & 1)
-			generic_handle_irq(irq);
-
-		status >>= 1;
-	}
-}
-
-static void s3c_irq_demux_eint0_3(unsigned int irq, struct irq_desc *desc)
-{
-	s3c_irq_demux_eint(0, 3);
-}
-
-static void s3c_irq_demux_eint4_11(unsigned int irq, struct irq_desc *desc)
-{
-	s3c_irq_demux_eint(4, 11);
-}
-
-static void s3c_irq_demux_eint12_19(unsigned int irq, struct irq_desc *desc)
-{
-	s3c_irq_demux_eint(12, 19);
-}
-
-static void s3c_irq_demux_eint20_27(unsigned int irq, struct irq_desc *desc)
-{
-	s3c_irq_demux_eint(20, 27);
-}
-
-static int __init s3c64xx_init_irq_eint(void)
-{
-	int irq;
-
-	for (irq = IRQ_EINT(0); irq <= IRQ_EINT(27); irq++) {
-		irq_set_chip_and_handler(irq, &s3c_irq_eint, handle_level_irq);
-		irq_set_chip_data(irq, (void *)eint_irq_to_bit(irq));
-		set_irq_flags(irq, IRQF_VALID);
-	}
-
-	irq_set_chained_handler(IRQ_EINT0_3, s3c_irq_demux_eint0_3);
-	irq_set_chained_handler(IRQ_EINT4_11, s3c_irq_demux_eint4_11);
-	irq_set_chained_handler(IRQ_EINT12_19, s3c_irq_demux_eint12_19);
-	irq_set_chained_handler(IRQ_EINT20_27, s3c_irq_demux_eint20_27);
-
-	return 0;
-}
-
-arch_initcall(s3c64xx_init_irq_eint);
diff --git a/arch/arm/mach-s3c64xx/irq.c b/arch/arm/mach-s3c64xx/irq.c
deleted file mode 100644
index b07357e..0000000
--- a/arch/arm/mach-s3c64xx/irq.c
+++ /dev/null
@@ -1,47 +0,0 @@
-/* arch/arm/plat-s3c64xx/irq.c
- *
- * Copyright 2008 Openmoko, Inc.
- * Copyright 2008 Simtec Electronics
- *      Ben Dooks <ben@simtec.co.uk>
- *      http://armlinux.simtec.co.uk/
- *
- * S3C64XX - Interrupt handling
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/serial_core.h>
-#include <linux/irq.h>
-#include <linux/io.h>
-
-#include <asm/hardware/vic.h>
-
-#include <mach/map.h>
-#include <plat/irq-vic-timer.h>
-#include <plat/irq-uart.h>
-#include <plat/cpu.h>
-
-/* setup the sources the vic should advertise resume for, even though it
- * is not doing the wake (set_irq_wake needs to be valid) */
-#define IRQ_VIC0_RESUME (1 << (IRQ_RTC_TIC - IRQ_VIC0_BASE))
-#define IRQ_VIC1_RESUME (1 << (IRQ_RTC_ALARM - IRQ_VIC1_BASE) |	\
-			 1 << (IRQ_PENDN - IRQ_VIC1_BASE) |	\
-			 1 << (IRQ_HSMMC0 - IRQ_VIC1_BASE) |	\
-			 1 << (IRQ_HSMMC1 - IRQ_VIC1_BASE) |	\
-			 1 << (IRQ_HSMMC2 - IRQ_VIC1_BASE))
-
-void __init s3c64xx_init_irq(u32 vic0_valid, u32 vic1_valid)
-{
-	printk(KERN_DEBUG "%s: initialising interrupts\n", __func__);
-
-	/* initialise the pair of VICs */
-	vic_init(VA_VIC0, IRQ_VIC0_BASE, vic0_valid, IRQ_VIC0_RESUME);
-	vic_init(VA_VIC1, IRQ_VIC1_BASE, vic1_valid, IRQ_VIC1_RESUME);
-
-	/* add the timer sub-irqs */
-	s3c_init_vic_timer_irq(5, IRQ_TIMER0);
-}
diff --git a/arch/arm/mach-s3c64xx/mach-anw6410.c b/arch/arm/mach-s3c64xx/mach-anw6410.c
index 8eba88e..b86f277 100644
--- a/arch/arm/mach-s3c64xx/mach-anw6410.c
+++ b/arch/arm/mach-s3c64xx/mach-anw6410.c
@@ -30,6 +30,7 @@
 
 #include <video/platform_lcd.h>
 
+#include <asm/hardware/vic.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
 #include <asm/mach/irq.h>
@@ -45,13 +46,14 @@
 #include <plat/fb.h>
 #include <plat/regs-fb-v4.h>
 
-#include <plat/s3c6410.h>
 #include <plat/clock.h>
 #include <plat/devs.h>
 #include <plat/cpu.h>
 #include <mach/regs-gpio.h>
 #include <mach/regs-modem.h>
 
+#include "common.h"
+
 /* DM9000 */
 #define ANW6410_PA_DM9000	(0x18000000)
 
@@ -236,7 +238,9 @@
 	.atag_offset	= 0x100,
 
 	.init_irq	= s3c6410_init_irq,
+	.handle_irq	= vic_handle_irq,
 	.map_io		= anw6410_map_io,
 	.init_machine	= anw6410_machine_init,
 	.timer		= &s3c24xx_timer,
+	.restart	= s3c64xx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-crag6410.c b/arch/arm/mach-s3c64xx/mach-crag6410.c
index d04b654..fb786b6 100644
--- a/arch/arm/mach-s3c64xx/mach-crag6410.c
+++ b/arch/arm/mach-s3c64xx/mach-crag6410.c
@@ -37,6 +37,7 @@
 #include <linux/mfd/wm831x/irq.h>
 #include <linux/mfd/wm831x/gpio.h>
 
+#include <asm/hardware/vic.h>
 #include <asm/mach/arch.h>
 #include <asm/mach-types.h>
 
@@ -50,7 +51,6 @@
 
 #include <mach/regs-gpio-memport.h>
 
-#include <plat/s3c6410.h>
 #include <plat/regs-serial.h>
 #include <plat/regs-fb-v4.h>
 #include <plat/fb.h>
@@ -66,6 +66,8 @@
 #include <plat/iic.h>
 #include <plat/pm.h>
 
+#include "common.h"
+
 /* serial port setup */
 
 #define UCON (S3C2410_UCON_DEFAULT | S3C2410_UCON_UCLK)
@@ -704,14 +706,16 @@
 
 	regulator_has_full_constraints();
 
-	s3c_pm_init();
+	s3c64xx_pm_init();
 }
 
 MACHINE_START(WLF_CRAGG_6410, "Wolfson Cragganmore 6410")
 	/* Maintainer: Mark Brown <broonie@opensource.wolfsonmicro.com> */
 	.atag_offset	= 0x100,
 	.init_irq	= s3c6410_init_irq,
+	.handle_irq	= vic_handle_irq,
 	.map_io		= crag6410_map_io,
 	.init_machine	= crag6410_machine_init,
 	.timer		= &s3c24xx_timer,
+	.restart	= s3c64xx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-hmt.c b/arch/arm/mach-s3c64xx/mach-hmt.c
index 952f75f..521e07b 100644
--- a/arch/arm/mach-s3c64xx/mach-hmt.c
+++ b/arch/arm/mach-s3c64xx/mach-hmt.c
@@ -29,6 +29,7 @@
 #include <mach/hardware.h>
 #include <mach/map.h>
 
+#include <asm/hardware/vic.h>
 #include <asm/irq.h>
 #include <asm/mach-types.h>
 
@@ -37,12 +38,13 @@
 #include <plat/fb.h>
 #include <plat/nand.h>
 
-#include <plat/s3c6410.h>
 #include <plat/clock.h>
 #include <plat/devs.h>
 #include <plat/cpu.h>
 #include <plat/regs-fb-v4.h>
 
+#include "common.h"
+
 #define UCON S3C2410_UCON_DEFAULT
 #define ULCON (S3C2410_LCON_CS8 | S3C2410_LCON_PNONE)
 #define UFCON (S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE)
@@ -267,7 +269,9 @@
 	/* Maintainer: Peter Korsgaard <jacmet@sunsite.dk> */
 	.atag_offset	= 0x100,
 	.init_irq	= s3c6410_init_irq,
+	.handle_irq	= vic_handle_irq,
 	.map_io		= hmt_map_io,
 	.init_machine	= hmt_machine_init,
 	.timer		= &s3c24xx_timer,
+	.restart	= s3c64xx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-mini6410.c b/arch/arm/mach-s3c64xx/mach-mini6410.c
index 1bc85c3..c34c2ab 100644
--- a/arch/arm/mach-s3c64xx/mach-mini6410.c
+++ b/arch/arm/mach-s3c64xx/mach-mini6410.c
@@ -24,6 +24,7 @@
 #include <linux/serial_core.h>
 #include <linux/types.h>
 
+#include <asm/hardware/vic.h>
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
@@ -33,7 +34,6 @@
 #include <mach/regs-modem.h>
 #include <mach/regs-srom.h>
 
-#include <plat/s3c6410.h>
 #include <plat/adc.h>
 #include <plat/cpu.h>
 #include <plat/devs.h>
@@ -45,6 +45,8 @@
 
 #include <video/platform_lcd.h>
 
+#include "common.h"
+
 #define UCON S3C2410_UCON_DEFAULT
 #define ULCON (S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB)
 #define UFCON (S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE)
@@ -345,7 +347,9 @@
 	/* Maintainer: Darius Augulis <augulis.darius@gmail.com> */
 	.atag_offset	= 0x100,
 	.init_irq	= s3c6410_init_irq,
+	.handle_irq	= vic_handle_irq,
 	.map_io		= mini6410_map_io,
 	.init_machine	= mini6410_machine_init,
 	.timer		= &s3c24xx_timer,
+	.restart	= s3c64xx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-ncp.c b/arch/arm/mach-s3c64xx/mach-ncp.c
index cb13cba..0efa2ba 100644
--- a/arch/arm/mach-s3c64xx/mach-ncp.c
+++ b/arch/arm/mach-s3c64xx/mach-ncp.c
@@ -25,6 +25,7 @@
 
 #include <video/platform_lcd.h>
 
+#include <asm/hardware/vic.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
 #include <asm/mach/irq.h>
@@ -39,12 +40,13 @@
 #include <plat/iic.h>
 #include <plat/fb.h>
 
-#include <plat/s3c6410.h>
 #include <plat/clock.h>
 #include <plat/devs.h>
 #include <plat/cpu.h>
 #include <plat/regs-fb-v4.h>
 
+#include "common.h"
+
 #define UCON S3C2410_UCON_DEFAULT
 #define ULCON S3C2410_LCON_CS8 | S3C2410_LCON_PNONE
 #define UFCON S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE
@@ -99,7 +101,9 @@
 	/* Maintainer: Samsung Electronics */
 	.atag_offset	= 0x100,
 	.init_irq	= s3c6410_init_irq,
+	.handle_irq	= vic_handle_irq,
 	.map_io		= ncp_map_io,
 	.init_machine	= ncp_machine_init,
 	.timer		= &s3c24xx_timer,
+	.restart	= s3c64xx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-real6410.c b/arch/arm/mach-s3c64xx/mach-real6410.c
index 87281e4..be2a9a2 100644
--- a/arch/arm/mach-s3c64xx/mach-real6410.c
+++ b/arch/arm/mach-s3c64xx/mach-real6410.c
@@ -25,6 +25,7 @@
 #include <linux/serial_core.h>
 #include <linux/types.h>
 
+#include <asm/hardware/vic.h>
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
@@ -34,7 +35,6 @@
 #include <mach/regs-modem.h>
 #include <mach/regs-srom.h>
 
-#include <plat/s3c6410.h>
 #include <plat/adc.h>
 #include <plat/cpu.h>
 #include <plat/devs.h>
@@ -46,6 +46,8 @@
 
 #include <video/platform_lcd.h>
 
+#include "common.h"
+
 #define UCON S3C2410_UCON_DEFAULT
 #define ULCON (S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB)
 #define UFCON (S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE)
@@ -326,7 +328,9 @@
 	.atag_offset	= 0x100,
 
 	.init_irq	= s3c6410_init_irq,
+	.handle_irq	= vic_handle_irq,
 	.map_io		= real6410_map_io,
 	.init_machine	= real6410_machine_init,
 	.timer		= &s3c24xx_timer,
+	.restart	= s3c64xx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-smartq.c b/arch/arm/mach-s3c64xx/mach-smartq.c
index cb1ebeb..ce31db1 100644
--- a/arch/arm/mach-s3c64xx/mach-smartq.c
+++ b/arch/arm/mach-s3c64xx/mach-smartq.c
@@ -40,6 +40,8 @@
 
 #include <video/platform_lcd.h>
 
+#include "common.h"
+
 #define UCON S3C2410_UCON_DEFAULT
 #define ULCON (S3C2410_LCON_CS8 | S3C2410_LCON_PNONE)
 #define UFCON (S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE)
diff --git a/arch/arm/mach-s3c64xx/mach-smartq5.c b/arch/arm/mach-s3c64xx/mach-smartq5.c
index 94c831d..3f42431 100644
--- a/arch/arm/mach-s3c64xx/mach-smartq5.c
+++ b/arch/arm/mach-s3c64xx/mach-smartq5.c
@@ -17,19 +17,20 @@
 #include <linux/leds.h>
 #include <linux/platform_device.h>
 
+#include <asm/hardware/vic.h>
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
 
 #include <mach/map.h>
 #include <mach/regs-gpio.h>
 
-#include <plat/s3c6410.h>
 #include <plat/cpu.h>
 #include <plat/devs.h>
 #include <plat/fb.h>
 #include <plat/gpio-cfg.h>
 #include <plat/regs-fb-v4.h>
 
+#include "common.h"
 #include "mach-smartq.h"
 
 static struct gpio_led smartq5_leds[] = {
@@ -148,7 +149,9 @@
 	/* Maintainer: Maurus Cuelenaere <mcuelenaere AT gmail DOT com> */
 	.atag_offset	= 0x100,
 	.init_irq	= s3c6410_init_irq,
+	.handle_irq	= vic_handle_irq,
 	.map_io		= smartq_map_io,
 	.init_machine	= smartq5_machine_init,
 	.timer		= &s3c24xx_timer,
+	.restart	= s3c64xx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-smartq7.c b/arch/arm/mach-s3c64xx/mach-smartq7.c
index f112547..e5c09b6 100644
--- a/arch/arm/mach-s3c64xx/mach-smartq7.c
+++ b/arch/arm/mach-s3c64xx/mach-smartq7.c
@@ -17,19 +17,20 @@
 #include <linux/leds.h>
 #include <linux/platform_device.h>
 
+#include <asm/hardware/vic.h>
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
 
 #include <mach/map.h>
 #include <mach/regs-gpio.h>
 
-#include <plat/s3c6410.h>
 #include <plat/cpu.h>
 #include <plat/devs.h>
 #include <plat/fb.h>
 #include <plat/gpio-cfg.h>
 #include <plat/regs-fb-v4.h>
 
+#include "common.h"
 #include "mach-smartq.h"
 
 static struct gpio_led smartq7_leds[] = {
@@ -164,7 +165,9 @@
 	/* Maintainer: Maurus Cuelenaere <mcuelenaere AT gmail DOT com> */
 	.atag_offset	= 0x100,
 	.init_irq	= s3c6410_init_irq,
+	.handle_irq	= vic_handle_irq,
 	.map_io		= smartq_map_io,
 	.init_machine	= smartq7_machine_init,
 	.timer		= &s3c24xx_timer,
+	.restart	= s3c64xx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-smdk6400.c b/arch/arm/mach-s3c64xx/mach-smdk6400.c
index 73450c2..5f09653 100644
--- a/arch/arm/mach-s3c64xx/mach-smdk6400.c
+++ b/arch/arm/mach-s3c64xx/mach-smdk6400.c
@@ -22,6 +22,7 @@
 
 #include <asm/mach-types.h>
 
+#include <asm/hardware/vic.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
 #include <asm/mach/irq.h>
@@ -31,12 +32,13 @@
 
 #include <plat/regs-serial.h>
 
-#include <plat/s3c6400.h>
 #include <plat/clock.h>
 #include <plat/devs.h>
 #include <plat/cpu.h>
 #include <plat/iic.h>
 
+#include "common.h"
+
 #define UCON S3C2410_UCON_DEFAULT | S3C2410_UCON_UCLK
 #define ULCON S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB
 #define UFCON S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE
@@ -88,7 +90,9 @@
 	.atag_offset	= 0x100,
 
 	.init_irq	= s3c6400_init_irq,
+	.handle_irq	= vic_handle_irq,
 	.map_io		= smdk6400_map_io,
 	.init_machine	= smdk6400_machine_init,
 	.timer		= &s3c24xx_timer,
+	.restart	= s3c64xx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-smdk6410.c b/arch/arm/mach-s3c64xx/mach-smdk6410.c
index 8bc8edd..ca6fc20 100644
--- a/arch/arm/mach-s3c64xx/mach-smdk6410.c
+++ b/arch/arm/mach-s3c64xx/mach-smdk6410.c
@@ -43,6 +43,7 @@
 
 #include <video/platform_lcd.h>
 
+#include <asm/hardware/vic.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
 #include <asm/mach/irq.h>
@@ -63,7 +64,6 @@
 #include <plat/fb.h>
 #include <plat/gpio-cfg.h>
 
-#include <plat/s3c6410.h>
 #include <plat/clock.h>
 #include <plat/devs.h>
 #include <plat/cpu.h>
@@ -73,6 +73,8 @@
 #include <plat/backlight.h>
 #include <plat/regs-fb-v4.h>
 
+#include "common.h"
+
 #define UCON S3C2410_UCON_DEFAULT | S3C2410_UCON_UCLK
 #define ULCON S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB
 #define UFCON S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE
@@ -700,7 +702,9 @@
 	.atag_offset	= 0x100,
 
 	.init_irq	= s3c6410_init_irq,
+	.handle_irq	= vic_handle_irq,
 	.map_io		= smdk6410_map_io,
 	.init_machine	= smdk6410_machine_init,
 	.timer		= &s3c24xx_timer,
+	.restart	= s3c64xx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/pm.c b/arch/arm/mach-s3c64xx/pm.c
index b375cd5..7d3e81b 100644
--- a/arch/arm/mach-s3c64xx/pm.c
+++ b/arch/arm/mach-s3c64xx/pm.c
@@ -17,10 +17,12 @@
 #include <linux/serial_core.h>
 #include <linux/io.h>
 #include <linux/gpio.h>
+#include <linux/pm_domain.h>
 
 #include <mach/map.h>
 #include <mach/irqs.h>
 
+#include <plat/devs.h>
 #include <plat/pm.h>
 #include <plat/wakeup-mask.h>
 
@@ -31,6 +33,148 @@
 #include <mach/regs-gpio-memport.h>
 #include <mach/regs-modem.h>
 
+struct s3c64xx_pm_domain {
+	char *const name;
+	u32 ena;
+	u32 pwr_stat;
+	struct generic_pm_domain pd;
+};
+
+static int s3c64xx_pd_off(struct generic_pm_domain *domain)
+{
+	struct s3c64xx_pm_domain *pd;
+	u32 val;
+
+	pd = container_of(domain, struct s3c64xx_pm_domain, pd);
+
+	val = __raw_readl(S3C64XX_NORMAL_CFG);
+	val &= ~(pd->ena);
+	__raw_writel(val, S3C64XX_NORMAL_CFG);
+
+	return 0;
+}
+
+static int s3c64xx_pd_on(struct generic_pm_domain *domain)
+{
+	struct s3c64xx_pm_domain *pd;
+	u32 val;
+	long retry = 1000000L;
+
+	pd = container_of(domain, struct s3c64xx_pm_domain, pd);
+
+	val = __raw_readl(S3C64XX_NORMAL_CFG);
+	val |= pd->ena;
+	__raw_writel(val, S3C64XX_NORMAL_CFG);
+
+	/* Not all domains provide power status readback */
+	if (pd->pwr_stat) {
+		do {
+			cpu_relax();
+			if (__raw_readl(S3C64XX_BLK_PWR_STAT) & pd->pwr_stat)
+				break;
+		} while (retry--);
+
+		if (!retry) {
+			pr_err("Failed to start domain %s\n", pd->name);
+			return -EBUSY;
+		}
+	}
+
+	return 0;
+}
+
+static struct s3c64xx_pm_domain s3c64xx_pm_irom = {
+	.name = "IROM",
+	.ena = S3C64XX_NORMALCFG_IROM_ON,
+	.pd = {
+		.power_off = s3c64xx_pd_off,
+		.power_on = s3c64xx_pd_on,
+	},
+};
+
+static struct s3c64xx_pm_domain s3c64xx_pm_etm = {
+	.name = "ETM",
+	.ena = S3C64XX_NORMALCFG_DOMAIN_ETM_ON,
+	.pwr_stat = S3C64XX_BLKPWRSTAT_ETM,
+	.pd = {
+		.power_off = s3c64xx_pd_off,
+		.power_on = s3c64xx_pd_on,
+	},
+};
+
+static struct s3c64xx_pm_domain s3c64xx_pm_s = {
+	.name = "S",
+	.ena = S3C64XX_NORMALCFG_DOMAIN_S_ON,
+	.pwr_stat = S3C64XX_BLKPWRSTAT_S,
+	.pd = {
+		.power_off = s3c64xx_pd_off,
+		.power_on = s3c64xx_pd_on,
+	},
+};
+
+static struct s3c64xx_pm_domain s3c64xx_pm_f = {
+	.name = "F",
+	.ena = S3C64XX_NORMALCFG_DOMAIN_F_ON,
+	.pwr_stat = S3C64XX_BLKPWRSTAT_F,
+	.pd = {
+		.power_off = s3c64xx_pd_off,
+		.power_on = s3c64xx_pd_on,
+	},
+};
+
+static struct s3c64xx_pm_domain s3c64xx_pm_p = {
+	.name = "P",
+	.ena = S3C64XX_NORMALCFG_DOMAIN_P_ON,
+	.pwr_stat = S3C64XX_BLKPWRSTAT_P,
+	.pd = {
+		.power_off = s3c64xx_pd_off,
+		.power_on = s3c64xx_pd_on,
+	},
+};
+
+static struct s3c64xx_pm_domain s3c64xx_pm_i = {
+	.name = "I",
+	.ena = S3C64XX_NORMALCFG_DOMAIN_I_ON,
+	.pwr_stat = S3C64XX_BLKPWRSTAT_I,
+	.pd = {
+		.power_off = s3c64xx_pd_off,
+		.power_on = s3c64xx_pd_on,
+	},
+};
+
+static struct s3c64xx_pm_domain s3c64xx_pm_g = {
+	.name = "G",
+	.ena = S3C64XX_NORMALCFG_DOMAIN_G_ON,
+	.pd = {
+		.power_off = s3c64xx_pd_off,
+		.power_on = s3c64xx_pd_on,
+	},
+};
+
+static struct s3c64xx_pm_domain s3c64xx_pm_v = {
+	.name = "V",
+	.ena = S3C64XX_NORMALCFG_DOMAIN_V_ON,
+	.pwr_stat = S3C64XX_BLKPWRSTAT_V,
+	.pd = {
+		.power_off = s3c64xx_pd_off,
+		.power_on = s3c64xx_pd_on,
+	},
+};
+
+static struct s3c64xx_pm_domain *s3c64xx_always_on_pm_domains[] = {
+	&s3c64xx_pm_irom,
+};
+
+static struct s3c64xx_pm_domain *s3c64xx_pm_domains[] = {
+	&s3c64xx_pm_etm,
+	&s3c64xx_pm_g,
+	&s3c64xx_pm_v,
+	&s3c64xx_pm_i,
+	&s3c64xx_pm_p,
+	&s3c64xx_pm_s,
+	&s3c64xx_pm_f,
+};
+
 #ifdef CONFIG_S3C_PM_DEBUG_LED_SMDK
 void s3c_pm_debug_smdkled(u32 set, u32 clear)
 {
@@ -89,6 +233,8 @@
 
 	SAVE_ITEM(S3C64XX_SDMA_SEL),
 	SAVE_ITEM(S3C64XX_MODEM_MIFPCON),
+
+	SAVE_ITEM(S3C64XX_NORMAL_CFG),
 };
 
 void s3c_pm_configure_extint(void)
@@ -179,7 +325,26 @@
 	__raw_writel(__raw_readl(S3C64XX_WAKEUP_STAT), S3C64XX_WAKEUP_STAT);
 }
 
-static int s3c64xx_pm_init(void)
+int __init s3c64xx_pm_init(void)
+{
+	int i;
+
+	s3c_pm_init();
+
+	for (i = 0; i < ARRAY_SIZE(s3c64xx_always_on_pm_domains); i++)
+		pm_genpd_init(&s3c64xx_always_on_pm_domains[i]->pd,
+			      &pm_domain_always_on_gov, false);
+
+	for (i = 0; i < ARRAY_SIZE(s3c64xx_pm_domains); i++)
+		pm_genpd_init(&s3c64xx_pm_domains[i]->pd, NULL, false);
+
+	if (dev_get_platdata(&s3c_device_fb.dev))
+		pm_genpd_add_device(&s3c64xx_pm_f.pd, &s3c_device_fb.dev);
+
+	return 0;
+}
+
+static __init int s3c64xx_pm_initcall(void)
 {
 	pm_cpu_prep = s3c64xx_pm_prepare;
 	pm_cpu_sleep = s3c64xx_cpu_suspend;
@@ -198,5 +363,12 @@
 
 	return 0;
 }
+arch_initcall(s3c64xx_pm_initcall);
 
-arch_initcall(s3c64xx_pm_init);
+static __init int s3c64xx_pm_late_initcall(void)
+{
+	pm_genpd_poweroff_unused();
+
+	return 0;
+}
+late_initcall(s3c64xx_pm_late_initcall);
diff --git a/arch/arm/mach-s3c64xx/s3c6400.c b/arch/arm/mach-s3c64xx/s3c6400.c
index 51c00f2..4869714 100644
--- a/arch/arm/mach-s3c64xx/s3c6400.c
+++ b/arch/arm/mach-s3c64xx/s3c6400.c
@@ -17,7 +17,7 @@
 #include <linux/init.h>
 #include <linux/clk.h>
 #include <linux/io.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/serial_core.h>
 #include <linux/platform_device.h>
 
@@ -38,7 +38,8 @@
 #include <plat/sdhci.h>
 #include <plat/iic-core.h>
 #include <plat/onenand-core.h>
-#include <plat/s3c6400.h>
+
+#include "common.h"
 
 void __init s3c6400_map_io(void)
 {
@@ -60,7 +61,7 @@
 void __init s3c6400_init_clocks(int xtal)
 {
 	s3c64xx_register_clocks(xtal, S3C6400_CLKDIV0_ARM_MASK);
-	s3c6400_setup_clocks();
+	s3c64xx_setup_clocks();
 }
 
 void __init s3c6400_init_irq(void)
@@ -70,17 +71,18 @@
 	s3c64xx_init_irq(~0 & ~(0xf << 5), ~0);
 }
 
-static struct sysdev_class s3c6400_sysclass = {
-	.name	= "s3c6400-core",
+static struct bus_type s3c6400_subsys = {
+	.name		= "s3c6400-core",
+	.dev_name	= "s3c6400-core",
 };
 
-static struct sys_device s3c6400_sysdev = {
-	.cls	= &s3c6400_sysclass,
+static struct device s3c6400_dev = {
+	.bus	= &s3c6400_subsys,
 };
 
 static int __init s3c6400_core_init(void)
 {
-	return sysdev_class_register(&s3c6400_sysclass);
+	return subsys_system_register(&s3c6400_subsys, NULL);
 }
 
 core_initcall(s3c6400_core_init);
@@ -89,5 +91,5 @@
 {
 	printk("S3C6400: Initialising architecture\n");
 
-	return sysdev_register(&s3c6400_sysdev);
+	return device_register(&s3c6400_dev);
 }
diff --git a/arch/arm/mach-s3c64xx/s3c6410.c b/arch/arm/mach-s3c64xx/s3c6410.c
index 4117003..31c29fd 100644
--- a/arch/arm/mach-s3c64xx/s3c6410.c
+++ b/arch/arm/mach-s3c64xx/s3c6410.c
@@ -18,7 +18,7 @@
 #include <linux/init.h>
 #include <linux/clk.h>
 #include <linux/io.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/serial_core.h>
 #include <linux/platform_device.h>
 
@@ -41,8 +41,8 @@
 #include <plat/adc-core.h>
 #include <plat/iic-core.h>
 #include <plat/onenand-core.h>
-#include <plat/s3c6400.h>
-#include <plat/s3c6410.h>
+
+#include "common.h"
 
 void __init s3c6410_map_io(void)
 {
@@ -66,7 +66,7 @@
 {
 	printk(KERN_DEBUG "%s: initialising clocks\n", __func__);
 	s3c64xx_register_clocks(xtal, S3C6410_CLKDIV0_ARM_MASK);
-	s3c6400_setup_clocks();
+	s3c64xx_setup_clocks();
 }
 
 void __init s3c6410_init_irq(void)
@@ -75,17 +75,18 @@
 	s3c64xx_init_irq(~0 & ~(1 << 7), ~0);
 }
 
-struct sysdev_class s3c6410_sysclass = {
-	.name	= "s3c6410-core",
+struct bus_type s3c6410_subsys = {
+	.name		= "s3c6410-core",
+	.dev_name	= "s3c6410-core",
 };
 
-static struct sys_device s3c6410_sysdev = {
-	.cls	= &s3c6410_sysclass,
+static struct device s3c6410_dev = {
+	.bus	= &s3c6410_subsys,
 };
 
 static int __init s3c6410_core_init(void)
 {
-	return sysdev_class_register(&s3c6410_sysclass);
+	return subsys_system_register(&s3c6410_subsys, NULL);
 }
 
 core_initcall(s3c6410_core_init);
@@ -94,5 +95,5 @@
 {
 	printk("S3C6410: Initialising architecture\n");
 
-	return sysdev_register(&s3c6410_sysdev);
+	return device_register(&s3c6410_dev);
 }
diff --git a/arch/arm/mach-s5p64x0/Makefile b/arch/arm/mach-s5p64x0/Makefile
index a1324d8..d3f7409 100644
--- a/arch/arm/mach-s5p64x0/Makefile
+++ b/arch/arm/mach-s5p64x0/Makefile
@@ -10,14 +10,16 @@
 obj-n				:=
 obj-				:=
 
-# Core support for S5P64X0 system
+# Core
 
-obj-$(CONFIG_ARCH_S5P64X0)	+= cpu.o init.o clock.o dma.o
-obj-$(CONFIG_ARCH_S5P64X0)	+= setup-i2c0.o irq-eint.o
+obj-y				+= common.o clock.o
 obj-$(CONFIG_CPU_S5P6440)	+= clock-s5p6440.o
 obj-$(CONFIG_CPU_S5P6450)	+= clock-s5p6450.o
+
 obj-$(CONFIG_PM)		+= pm.o irq-pm.o
 
+obj-y				+= dma.o
+
 # machine support
 
 obj-$(CONFIG_MACH_SMDK6440)	+= mach-smdk6440.o
@@ -28,5 +30,6 @@
 obj-y				+= dev-audio.o
 obj-$(CONFIG_S3C64XX_DEV_SPI)	+= dev-spi.o
 
+obj-y					+= setup-i2c0.o
 obj-$(CONFIG_S5P64X0_SETUP_I2C1)	+= setup-i2c1.o
 obj-$(CONFIG_S5P64X0_SETUP_FB_24BPP)	+= setup-fb-24bpp.o
diff --git a/arch/arm/mach-s5p64x0/clock-s5p6440.c b/arch/arm/mach-s5p64x0/clock-s5p6440.c
index c54c65d..eb4ffe3 100644
--- a/arch/arm/mach-s5p64x0/clock-s5p6440.c
+++ b/arch/arm/mach-s5p64x0/clock-s5p6440.c
@@ -17,7 +17,7 @@
 #include <linux/errno.h>
 #include <linux/err.h>
 #include <linux/clk.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/io.h>
 
 #include <mach/hardware.h>
@@ -31,7 +31,8 @@
 #include <plat/pll.h>
 #include <plat/s5p-clock.h>
 #include <plat/clock-clksrc.h>
-#include <plat/s5p6440.h>
+
+#include "common.h"
 
 static u32 epll_div[][5] = {
 	{ 36000000,	0,	48, 1, 4 },
diff --git a/arch/arm/mach-s5p64x0/clock-s5p6450.c b/arch/arm/mach-s5p64x0/clock-s5p6450.c
index 2d04abf..bb7ee91 100644
--- a/arch/arm/mach-s5p64x0/clock-s5p6450.c
+++ b/arch/arm/mach-s5p64x0/clock-s5p6450.c
@@ -17,7 +17,7 @@
 #include <linux/errno.h>
 #include <linux/err.h>
 #include <linux/clk.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/io.h>
 
 #include <mach/hardware.h>
@@ -31,7 +31,8 @@
 #include <plat/pll.h>
 #include <plat/s5p-clock.h>
 #include <plat/clock-clksrc.h>
-#include <plat/s5p6450.h>
+
+#include "common.h"
 
 static struct clksrc_clk clk_mout_dpll = {
 	.clk	= {
diff --git a/arch/arm/mach-s5p64x0/clock.c b/arch/arm/mach-s5p64x0/clock.c
index b52c6e2..241d0e6 100644
--- a/arch/arm/mach-s5p64x0/clock.c
+++ b/arch/arm/mach-s5p64x0/clock.c
@@ -17,7 +17,7 @@
 #include <linux/errno.h>
 #include <linux/err.h>
 #include <linux/clk.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/io.h>
 
 #include <mach/hardware.h>
@@ -30,8 +30,8 @@
 #include <plat/pll.h>
 #include <plat/s5p-clock.h>
 #include <plat/clock-clksrc.h>
-#include <plat/s5p6440.h>
-#include <plat/s5p6450.h>
+
+#include "common.h"
 
 struct clksrc_clk clk_mout_apll = {
 	.clk	= {
diff --git a/arch/arm/mach-s5p64x0/common.c b/arch/arm/mach-s5p64x0/common.c
new file mode 100644
index 0000000..28d0b91
--- /dev/null
+++ b/arch/arm/mach-s5p64x0/common.c
@@ -0,0 +1,469 @@
+/*
+ * Copyright (c) 2009-2011 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * Common Codes for S5P64X0 machines
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/timer.h>
+#include <linux/init.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/device.h>
+#include <linux/serial_core.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/dma-mapping.h>
+#include <linux/gpio.h>
+#include <linux/irq.h>
+
+#include <asm/irq.h>
+#include <asm/proc-fns.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/mach/irq.h>
+
+#include <mach/map.h>
+#include <mach/hardware.h>
+#include <mach/regs-clock.h>
+#include <mach/regs-gpio.h>
+
+#include <plat/cpu.h>
+#include <plat/clock.h>
+#include <plat/devs.h>
+#include <plat/pm.h>
+#include <plat/adc-core.h>
+#include <plat/fb-core.h>
+#include <plat/gpio-cfg.h>
+#include <plat/regs-irqtype.h>
+#include <plat/regs-serial.h>
+#include <plat/watchdog-reset.h>
+
+#include "common.h"
+
+static const char name_s5p6440[] = "S5P6440";
+static const char name_s5p6450[] = "S5P6450";
+
+static struct cpu_table cpu_ids[] __initdata = {
+	{
+		.idcode		= S5P6440_CPU_ID,
+		.idmask		= S5P64XX_CPU_MASK,
+		.map_io		= s5p6440_map_io,
+		.init_clocks	= s5p6440_init_clocks,
+		.init_uarts	= s5p6440_init_uarts,
+		.init		= s5p64x0_init,
+		.name		= name_s5p6440,
+	}, {
+		.idcode		= S5P6450_CPU_ID,
+		.idmask		= S5P64XX_CPU_MASK,
+		.map_io		= s5p6450_map_io,
+		.init_clocks	= s5p6450_init_clocks,
+		.init_uarts	= s5p6450_init_uarts,
+		.init		= s5p64x0_init,
+		.name		= name_s5p6450,
+	},
+};
+
+/* Initial IO mappings */
+
+static struct map_desc s5p64x0_iodesc[] __initdata = {
+	{
+		.virtual	= (unsigned long)S5P_VA_CHIPID,
+		.pfn		= __phys_to_pfn(S5P64X0_PA_CHIPID),
+		.length		= SZ_4K,
+		.type		= MT_DEVICE,
+	}, {
+		.virtual	= (unsigned long)S3C_VA_SYS,
+		.pfn		= __phys_to_pfn(S5P64X0_PA_SYSCON),
+		.length		= SZ_64K,
+		.type		= MT_DEVICE,
+	}, {
+		.virtual	= (unsigned long)S3C_VA_TIMER,
+		.pfn		= __phys_to_pfn(S5P64X0_PA_TIMER),
+		.length		= SZ_16K,
+		.type		= MT_DEVICE,
+	}, {
+		.virtual	= (unsigned long)S3C_VA_WATCHDOG,
+		.pfn		= __phys_to_pfn(S5P64X0_PA_WDT),
+		.length		= SZ_4K,
+		.type		= MT_DEVICE,
+	}, {
+		.virtual	= (unsigned long)S5P_VA_SROMC,
+		.pfn		= __phys_to_pfn(S5P64X0_PA_SROMC),
+		.length		= SZ_4K,
+		.type		= MT_DEVICE,
+	}, {
+		.virtual	= (unsigned long)S5P_VA_GPIO,
+		.pfn		= __phys_to_pfn(S5P64X0_PA_GPIO),
+		.length		= SZ_4K,
+		.type		= MT_DEVICE,
+	}, {
+		.virtual	= (unsigned long)VA_VIC0,
+		.pfn		= __phys_to_pfn(S5P64X0_PA_VIC0),
+		.length		= SZ_16K,
+		.type		= MT_DEVICE,
+	}, {
+		.virtual	= (unsigned long)VA_VIC1,
+		.pfn		= __phys_to_pfn(S5P64X0_PA_VIC1),
+		.length		= SZ_16K,
+		.type		= MT_DEVICE,
+	},
+};
+
+static struct map_desc s5p6440_iodesc[] __initdata = {
+	{
+		.virtual	= (unsigned long)S3C_VA_UART,
+		.pfn		= __phys_to_pfn(S5P6440_PA_UART(0)),
+		.length		= SZ_4K,
+		.type		= MT_DEVICE,
+	},
+};
+
+static struct map_desc s5p6450_iodesc[] __initdata = {
+	{
+		.virtual	= (unsigned long)S3C_VA_UART,
+		.pfn		= __phys_to_pfn(S5P6450_PA_UART(0)),
+		.length		= SZ_512K,
+		.type		= MT_DEVICE,
+	}, {
+		.virtual	= (unsigned long)S3C_VA_UART + SZ_512K,
+		.pfn		= __phys_to_pfn(S5P6450_PA_UART(5)),
+		.length		= SZ_4K,
+		.type		= MT_DEVICE,
+	},
+};
+
+static void s5p64x0_idle(void)
+{
+	unsigned long val;
+
+	if (!need_resched()) {
+		val = __raw_readl(S5P64X0_PWR_CFG);
+		val &= ~(0x3 << 5);
+		val |= (0x1 << 5);
+		__raw_writel(val, S5P64X0_PWR_CFG);
+
+		cpu_do_idle();
+	}
+	local_irq_enable();
+}
+
+/*
+ * s5p64x0_map_io
+ *
+ * register the standard CPU IO areas
+ */
+
+void __init s5p64x0_init_io(struct map_desc *mach_desc, int size)
+{
+	/* initialize the io descriptors we need for initialization */
+	iotable_init(s5p64x0_iodesc, ARRAY_SIZE(s5p64x0_iodesc));
+	if (mach_desc)
+		iotable_init(mach_desc, size);
+
+	/* detect cpu id and rev. */
+	s5p_init_cpu(S5P64X0_SYS_ID);
+
+	s3c_init_cpu(samsung_cpu_id, cpu_ids, ARRAY_SIZE(cpu_ids));
+}
+
+void __init s5p6440_map_io(void)
+{
+	/* initialize any device information early */
+	s3c_adc_setname("s3c64xx-adc");
+	s3c_fb_setname("s5p64x0-fb");
+
+	iotable_init(s5p6440_iodesc, ARRAY_SIZE(s5p6440_iodesc));
+	init_consistent_dma_size(SZ_8M);
+}
+
+void __init s5p6450_map_io(void)
+{
+	/* initialize any device information early */
+	s3c_adc_setname("s3c64xx-adc");
+	s3c_fb_setname("s5p64x0-fb");
+
+	iotable_init(s5p6450_iodesc, ARRAY_SIZE(s5p6450_iodesc));
+	init_consistent_dma_size(SZ_8M);
+}
+
+/*
+ * s5p64x0_init_clocks
+ *
+ * register and setup the CPU clocks
+ */
+
+void __init s5p6440_init_clocks(int xtal)
+{
+	printk(KERN_DEBUG "%s: initializing clocks\n", __func__);
+
+	s3c24xx_register_baseclocks(xtal);
+	s5p_register_clocks(xtal);
+	s5p6440_register_clocks();
+	s5p6440_setup_clocks();
+}
+
+void __init s5p6450_init_clocks(int xtal)
+{
+	printk(KERN_DEBUG "%s: initializing clocks\n", __func__);
+
+	s3c24xx_register_baseclocks(xtal);
+	s5p_register_clocks(xtal);
+	s5p6450_register_clocks();
+	s5p6450_setup_clocks();
+}
+
+/*
+ * s5p64x0_init_irq
+ *
+ * register the CPU interrupts
+ */
+
+void __init s5p6440_init_irq(void)
+{
+	/* S5P6440 supports 2 VIC */
+	u32 vic[2];
+
+	/*
+	 * VIC0 is missing IRQ_VIC0[3, 4, 8, 10, (12-22)]
+	 * VIC1 is missing IRQ VIC1[1, 3, 4, 10, 11, 12, 14, 15, 22]
+	 */
+	vic[0] = 0xff800ae7;
+	vic[1] = 0xffbf23e5;
+
+	s5p_init_irq(vic, ARRAY_SIZE(vic));
+}
+
+void __init s5p6450_init_irq(void)
+{
+	/* S5P6450 supports only 2 VIC */
+	u32 vic[2];
+
+	/*
+	 * VIC0 is missing IRQ_VIC0[(13-15), (21-22)]
+	 * VIC1 is missing IRQ VIC1[12, 14, 23]
+	 */
+	vic[0] = 0xff9f1fff;
+	vic[1] = 0xff7fafff;
+
+	s5p_init_irq(vic, ARRAY_SIZE(vic));
+}
+
+struct bus_type s5p64x0_subsys = {
+	.name		= "s5p64x0-core",
+	.dev_name	= "s5p64x0-core",
+};
+
+static struct device s5p64x0_dev = {
+	.bus	= &s5p64x0_subsys,
+};
+
+static int __init s5p64x0_core_init(void)
+{
+	return subsys_system_register(&s5p64x0_subsys, NULL);
+}
+core_initcall(s5p64x0_core_init);
+
+int __init s5p64x0_init(void)
+{
+	printk(KERN_INFO "S5P64X0(S5P6440/S5P6450): Initializing architecture\n");
+
+	/* set idle function */
+	pm_idle = s5p64x0_idle;
+
+	return device_register(&s5p64x0_dev);
+}
+
+static struct s3c24xx_uart_clksrc s5p64x0_serial_clocks[] = {
+	[0] = {
+		.name		= "pclk_low",
+		.divisor	= 1,
+		.min_baud	= 0,
+		.max_baud	= 0,
+	},
+	[1] = {
+		.name		= "uclk1",
+		.divisor	= 1,
+		.min_baud	= 0,
+		.max_baud	= 0,
+	},
+};
+
+/* uart registration process */
+
+void __init s5p64x0_common_init_uarts(struct s3c2410_uartcfg *cfg, int no)
+{
+	struct s3c2410_uartcfg *tcfg = cfg;
+	u32 ucnt;
+
+	for (ucnt = 0; ucnt < no; ucnt++, tcfg++) {
+		if (!tcfg->clocks) {
+			tcfg->clocks = s5p64x0_serial_clocks;
+			tcfg->clocks_size = ARRAY_SIZE(s5p64x0_serial_clocks);
+		}
+	}
+}
+
+void __init s5p6440_init_uarts(struct s3c2410_uartcfg *cfg, int no)
+{
+	int uart;
+
+	for (uart = 0; uart < no; uart++) {
+		s5p_uart_resources[uart].resources->start = S5P6440_PA_UART(uart);
+		s5p_uart_resources[uart].resources->end = S5P6440_PA_UART(uart) + S5P_SZ_UART;
+	}
+
+	s5p64x0_common_init_uarts(cfg, no);
+	s3c24xx_init_uartdevs("s3c6400-uart", s5p_uart_resources, cfg, no);
+}
+
+void __init s5p6450_init_uarts(struct s3c2410_uartcfg *cfg, int no)
+{
+	s5p64x0_common_init_uarts(cfg, no);
+	s3c24xx_init_uartdevs("s3c6400-uart", s5p_uart_resources, cfg, no);
+}
+
+#define eint_offset(irq)	((irq) - IRQ_EINT(0))
+
+static int s5p64x0_irq_eint_set_type(struct irq_data *data, unsigned int type)
+{
+	int offs = eint_offset(data->irq);
+	int shift;
+	u32 ctrl, mask;
+	u32 newvalue = 0;
+
+	if (offs > 15)
+		return -EINVAL;
+
+	switch (type) {
+	case IRQ_TYPE_NONE:
+		printk(KERN_WARNING "No edge setting!\n");
+		break;
+	case IRQ_TYPE_EDGE_RISING:
+		newvalue = S3C2410_EXTINT_RISEEDGE;
+		break;
+	case IRQ_TYPE_EDGE_FALLING:
+		newvalue = S3C2410_EXTINT_FALLEDGE;
+		break;
+	case IRQ_TYPE_EDGE_BOTH:
+		newvalue = S3C2410_EXTINT_BOTHEDGE;
+		break;
+	case IRQ_TYPE_LEVEL_LOW:
+		newvalue = S3C2410_EXTINT_LOWLEV;
+		break;
+	case IRQ_TYPE_LEVEL_HIGH:
+		newvalue = S3C2410_EXTINT_HILEV;
+		break;
+	default:
+		printk(KERN_ERR "No such irq type %d", type);
+		return -EINVAL;
+	}
+
+	shift = (offs / 2) * 4;
+	mask = 0x7 << shift;
+
+	ctrl = __raw_readl(S5P64X0_EINT0CON0) & ~mask;
+	ctrl |= newvalue << shift;
+	__raw_writel(ctrl, S5P64X0_EINT0CON0);
+
+	/* Configure the GPIO pin for 6450 or 6440 based on CPU ID */
+	if (soc_is_s5p6450())
+		s3c_gpio_cfgpin(S5P6450_GPN(offs), S3C_GPIO_SFN(2));
+	else
+		s3c_gpio_cfgpin(S5P6440_GPN(offs), S3C_GPIO_SFN(2));
+
+	return 0;
+}
+
+/*
+ * s5p64x0_irq_demux_eint
+ *
+ * This function demuxes the IRQ from the group0 external interrupts,
+ * from IRQ_EINT(0) to IRQ_EINT(15). It is designed to be inlined into
+ * the specific handlers s5p64x0_irq_demux_eintX_Y.
+ */
+static inline void s5p64x0_irq_demux_eint(unsigned int start, unsigned int end)
+{
+	u32 status = __raw_readl(S5P64X0_EINT0PEND);
+	u32 mask = __raw_readl(S5P64X0_EINT0MASK);
+	unsigned int irq;
+
+	status &= ~mask;
+	status >>= start;
+	status &= (1 << (end - start + 1)) - 1;
+
+	for (irq = IRQ_EINT(start); irq <= IRQ_EINT(end); irq++) {
+		if (status & 1)
+			generic_handle_irq(irq);
+		status >>= 1;
+	}
+}
+
+static void s5p64x0_irq_demux_eint0_3(unsigned int irq, struct irq_desc *desc)
+{
+	s5p64x0_irq_demux_eint(0, 3);
+}
+
+static void s5p64x0_irq_demux_eint4_11(unsigned int irq, struct irq_desc *desc)
+{
+	s5p64x0_irq_demux_eint(4, 11);
+}
+
+static void s5p64x0_irq_demux_eint12_15(unsigned int irq,
+					struct irq_desc *desc)
+{
+	s5p64x0_irq_demux_eint(12, 15);
+}
+
+static int s5p64x0_alloc_gc(void)
+{
+	struct irq_chip_generic *gc;
+	struct irq_chip_type *ct;
+
+	gc = irq_alloc_generic_chip("s5p64x0-eint", 1, S5P_IRQ_EINT_BASE,
+				    S5P_VA_GPIO, handle_level_irq);
+	if (!gc) {
+		printk(KERN_ERR "%s: irq_alloc_generic_chip for group 0"
+			"external interrupts failed\n", __func__);
+		return -EINVAL;
+	}
+
+	ct = gc->chip_types;
+	ct->chip.irq_ack = irq_gc_ack_set_bit;
+	ct->chip.irq_mask = irq_gc_mask_set_bit;
+	ct->chip.irq_unmask = irq_gc_mask_clr_bit;
+	ct->chip.irq_set_type = s5p64x0_irq_eint_set_type;
+	ct->chip.irq_set_wake = s3c_irqext_wake;
+	ct->regs.ack = EINT0PEND_OFFSET;
+	ct->regs.mask = EINT0MASK_OFFSET;
+	irq_setup_generic_chip(gc, IRQ_MSK(16), IRQ_GC_INIT_MASK_CACHE,
+			       IRQ_NOREQUEST | IRQ_NOPROBE, 0);
+	return 0;
+}
+
+static int __init s5p64x0_init_irq_eint(void)
+{
+	int ret = s5p64x0_alloc_gc();
+	irq_set_chained_handler(IRQ_EINT0_3, s5p64x0_irq_demux_eint0_3);
+	irq_set_chained_handler(IRQ_EINT4_11, s5p64x0_irq_demux_eint4_11);
+	irq_set_chained_handler(IRQ_EINT12_15, s5p64x0_irq_demux_eint12_15);
+
+	return ret;
+}
+arch_initcall(s5p64x0_init_irq_eint);
+
+void s5p64x0_restart(char mode, const char *cmd)
+{
+	if (mode != 's')
+		arch_wdt_reset();
+
+	soft_restart(0);
+}
diff --git a/arch/arm/mach-s5p64x0/common.h b/arch/arm/mach-s5p64x0/common.h
new file mode 100644
index 0000000..f8a60fd
--- /dev/null
+++ b/arch/arm/mach-s5p64x0/common.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * Common Header for S5P64X0 machines
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARCH_ARM_MACH_S5P64X0_COMMON_H
+#define __ARCH_ARM_MACH_S5P64X0_COMMON_H
+
+void s5p6440_init_irq(void);
+void s5p6450_init_irq(void);
+void s5p64x0_init_io(struct map_desc *mach_desc, int size);
+
+void s5p6440_register_clocks(void);
+void s5p6440_setup_clocks(void);
+
+void s5p6450_register_clocks(void);
+void s5p6450_setup_clocks(void);
+
+void s5p64x0_restart(char mode, const char *cmd);
+
+#ifdef CONFIG_CPU_S5P6440
+
+extern  int s5p64x0_init(void);
+extern void s5p6440_map_io(void);
+extern void s5p6440_init_clocks(int xtal);
+
+extern void s5p6440_init_uarts(struct s3c2410_uartcfg *cfg, int no);
+
+#else
+#define s5p6440_init_clocks NULL
+#define s5p6440_init_uarts NULL
+#define s5p6440_map_io NULL
+#define s5p64x0_init NULL
+#endif
+
+#ifdef CONFIG_CPU_S5P6450
+
+extern  int s5p64x0_init(void);
+extern void s5p6450_map_io(void);
+extern void s5p6450_init_clocks(int xtal);
+
+extern void s5p6450_init_uarts(struct s3c2410_uartcfg *cfg, int no);
+
+#else
+#define s5p6450_init_clocks NULL
+#define s5p6450_init_uarts NULL
+#define s5p6450_map_io NULL
+#define s5p64x0_init NULL
+#endif
+
+#endif /* __ARCH_ARM_MACH_S5P64X0_COMMON_H */
diff --git a/arch/arm/mach-s5p64x0/cpu.c b/arch/arm/mach-s5p64x0/cpu.c
deleted file mode 100644
index ecab40c..0000000
--- a/arch/arm/mach-s5p64x0/cpu.c
+++ /dev/null
@@ -1,215 +0,0 @@
-/* linux/arch/arm/mach-s5p64x0/cpu.c
- *
- * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
- *		http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/interrupt.h>
-#include <linux/list.h>
-#include <linux/timer.h>
-#include <linux/init.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/sysdev.h>
-#include <linux/serial_core.h>
-#include <linux/platform_device.h>
-#include <linux/sched.h>
-#include <linux/dma-mapping.h>
-
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-#include <asm/mach/irq.h>
-#include <asm/proc-fns.h>
-#include <asm/irq.h>
-
-#include <mach/hardware.h>
-#include <mach/map.h>
-#include <mach/regs-clock.h>
-
-#include <plat/regs-serial.h>
-#include <plat/cpu.h>
-#include <plat/devs.h>
-#include <plat/clock.h>
-#include <plat/s5p6440.h>
-#include <plat/s5p6450.h>
-#include <plat/adc-core.h>
-#include <plat/fb-core.h>
-
-/* Initial IO mappings */
-
-static struct map_desc s5p64x0_iodesc[] __initdata = {
-	{
-		.virtual	= (unsigned long)S5P_VA_GPIO,
-		.pfn		= __phys_to_pfn(S5P64X0_PA_GPIO),
-		.length		= SZ_4K,
-		.type		= MT_DEVICE,
-	}, {
-		.virtual	= (unsigned long)VA_VIC0,
-		.pfn		= __phys_to_pfn(S5P64X0_PA_VIC0),
-		.length		= SZ_16K,
-		.type		= MT_DEVICE,
-	}, {
-		.virtual	= (unsigned long)VA_VIC1,
-		.pfn		= __phys_to_pfn(S5P64X0_PA_VIC1),
-		.length		= SZ_16K,
-		.type		= MT_DEVICE,
-	},
-};
-
-static struct map_desc s5p6440_iodesc[] __initdata = {
-	{
-		.virtual	= (unsigned long)S3C_VA_UART,
-		.pfn		= __phys_to_pfn(S5P6440_PA_UART(0)),
-		.length		= SZ_4K,
-		.type		= MT_DEVICE,
-	},
-};
-
-static struct map_desc s5p6450_iodesc[] __initdata = {
-	{
-		.virtual	= (unsigned long)S3C_VA_UART,
-		.pfn		= __phys_to_pfn(S5P6450_PA_UART(0)),
-		.length		= SZ_512K,
-		.type		= MT_DEVICE,
-	}, {
-		.virtual	= (unsigned long)S3C_VA_UART + SZ_512K,
-		.pfn		= __phys_to_pfn(S5P6450_PA_UART(5)),
-		.length		= SZ_4K,
-		.type		= MT_DEVICE,
-	},
-};
-
-static void s5p64x0_idle(void)
-{
-	unsigned long val;
-
-	if (!need_resched()) {
-		val = __raw_readl(S5P64X0_PWR_CFG);
-		val &= ~(0x3 << 5);
-		val |= (0x1 << 5);
-		__raw_writel(val, S5P64X0_PWR_CFG);
-
-		cpu_do_idle();
-	}
-	local_irq_enable();
-}
-
-/*
- * s5p64x0_map_io
- *
- * register the standard CPU IO areas
- */
-
-void __init s5p6440_map_io(void)
-{
-	/* initialize any device information early */
-	s3c_adc_setname("s3c64xx-adc");
-	s3c_fb_setname("s5p64x0-fb");
-
-	iotable_init(s5p64x0_iodesc, ARRAY_SIZE(s5p64x0_iodesc));
-	iotable_init(s5p6440_iodesc, ARRAY_SIZE(s5p6440_iodesc));
-	init_consistent_dma_size(SZ_8M);
-}
-
-void __init s5p6450_map_io(void)
-{
-	/* initialize any device information early */
-	s3c_adc_setname("s3c64xx-adc");
-	s3c_fb_setname("s5p64x0-fb");
-
-	iotable_init(s5p64x0_iodesc, ARRAY_SIZE(s5p64x0_iodesc));
-	iotable_init(s5p6450_iodesc, ARRAY_SIZE(s5p6450_iodesc));
-	init_consistent_dma_size(SZ_8M);
-}
-
-/*
- * s5p64x0_init_clocks
- *
- * register and setup the CPU clocks
- */
-
-void __init s5p6440_init_clocks(int xtal)
-{
-	printk(KERN_DEBUG "%s: initializing clocks\n", __func__);
-
-	s3c24xx_register_baseclocks(xtal);
-	s5p_register_clocks(xtal);
-	s5p6440_register_clocks();
-	s5p6440_setup_clocks();
-}
-
-void __init s5p6450_init_clocks(int xtal)
-{
-	printk(KERN_DEBUG "%s: initializing clocks\n", __func__);
-
-	s3c24xx_register_baseclocks(xtal);
-	s5p_register_clocks(xtal);
-	s5p6450_register_clocks();
-	s5p6450_setup_clocks();
-}
-
-/*
- * s5p64x0_init_irq
- *
- * register the CPU interrupts
- */
-
-void __init s5p6440_init_irq(void)
-{
-	/* S5P6440 supports 2 VIC */
-	u32 vic[2];
-
-	/*
-	 * VIC0 is missing IRQ_VIC0[3, 4, 8, 10, (12-22)]
-	 * VIC1 is missing IRQ VIC1[1, 3, 4, 10, 11, 12, 14, 15, 22]
-	 */
-	vic[0] = 0xff800ae7;
-	vic[1] = 0xffbf23e5;
-
-	s5p_init_irq(vic, ARRAY_SIZE(vic));
-}
-
-void __init s5p6450_init_irq(void)
-{
-	/* S5P6450 supports only 2 VIC */
-	u32 vic[2];
-
-	/*
-	 * VIC0 is missing IRQ_VIC0[(13-15), (21-22)]
-	 * VIC1 is missing IRQ VIC1[12, 14, 23]
-	 */
-	vic[0] = 0xff9f1fff;
-	vic[1] = 0xff7fafff;
-
-	s5p_init_irq(vic, ARRAY_SIZE(vic));
-}
-
-struct sysdev_class s5p64x0_sysclass = {
-	.name	= "s5p64x0-core",
-};
-
-static struct sys_device s5p64x0_sysdev = {
-	.cls	= &s5p64x0_sysclass,
-};
-
-static int __init s5p64x0_core_init(void)
-{
-	return sysdev_class_register(&s5p64x0_sysclass);
-}
-core_initcall(s5p64x0_core_init);
-
-int __init s5p64x0_init(void)
-{
-	printk(KERN_INFO "S5P64X0(S5P6440/S5P6450): Initializing architecture\n");
-
-	/* set idle function */
-	pm_idle = s5p64x0_idle;
-
-	return sysdev_register(&s5p64x0_sysdev);
-}
diff --git a/arch/arm/mach-s5p64x0/include/mach/entry-macro.S b/arch/arm/mach-s5p64x0/include/mach/entry-macro.S
index 10b62b4..fbb246d 100644
--- a/arch/arm/mach-s5p64x0/include/mach/entry-macro.S
+++ b/arch/arm/mach-s5p64x0/include/mach/entry-macro.S
@@ -10,7 +10,8 @@
  * published by the Free Software Foundation.
 */
 
-#include <mach/map.h>
-#include <plat/irqs.h>
+		.macro  disable_fiq
+		.endm
 
-#include <asm/entry-macro-vic2.S>
+		.macro  arch_ret_to_user, tmp1, tmp2
+		.endm
diff --git a/arch/arm/mach-s5p64x0/include/mach/system.h b/arch/arm/mach-s5p64x0/include/mach/system.h
index 60f5753..cf26e09 100644
--- a/arch/arm/mach-s5p64x0/include/mach/system.h
+++ b/arch/arm/mach-s5p64x0/include/mach/system.h
@@ -13,8 +13,6 @@
 #ifndef __ASM_ARCH_SYSTEM_H
 #define __ASM_ARCH_SYSTEM_H __FILE__
 
-#include <plat/system-reset.h>
-
 static void arch_idle(void)
 {
 	/* nothing here yet */
diff --git a/arch/arm/mach-s5p64x0/include/mach/vmalloc.h b/arch/arm/mach-s5p64x0/include/mach/vmalloc.h
deleted file mode 100644
index 38dcc71..0000000
--- a/arch/arm/mach-s5p64x0/include/mach/vmalloc.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* linux/arch/arm/mach-s5p64x0/include/mach/vmalloc.h
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- *		http://www.samsung.com
- *
- * Copyright 2010 Ben Dooks <ben-linux@fluff.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * S3C6400 vmalloc definition
-*/
-
-#ifndef __ASM_ARCH_VMALLOC_H
-#define __ASM_ARCH_VMALLOC_H
-
-#define VMALLOC_END	0xF6000000UL
-
-#endif /* __ASM_ARCH_VMALLOC_H */
diff --git a/arch/arm/mach-s5p64x0/init.c b/arch/arm/mach-s5p64x0/init.c
deleted file mode 100644
index 79833ca..0000000
--- a/arch/arm/mach-s5p64x0/init.c
+++ /dev/null
@@ -1,73 +0,0 @@
-/* linux/arch/arm/mach-s5p64x0/init.c
- *
- * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
- *		http://www.samsung.com
- *
- * S5P64X0 - Init support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/serial_core.h>
-
-#include <mach/map.h>
-
-#include <plat/cpu.h>
-#include <plat/devs.h>
-#include <plat/s5p6440.h>
-#include <plat/s5p6450.h>
-#include <plat/regs-serial.h>
-
-static struct s3c24xx_uart_clksrc s5p64x0_serial_clocks[] = {
-	[0] = {
-		.name		= "pclk_low",
-		.divisor	= 1,
-		.min_baud	= 0,
-		.max_baud	= 0,
-	},
-	[1] = {
-		.name		= "uclk1",
-		.divisor	= 1,
-		.min_baud	= 0,
-		.max_baud	= 0,
-	},
-};
-
-/* uart registration process */
-
-void __init s5p64x0_common_init_uarts(struct s3c2410_uartcfg *cfg, int no)
-{
-	struct s3c2410_uartcfg *tcfg = cfg;
-	u32 ucnt;
-
-	for (ucnt = 0; ucnt < no; ucnt++, tcfg++) {
-		if (!tcfg->clocks) {
-			tcfg->clocks = s5p64x0_serial_clocks;
-			tcfg->clocks_size = ARRAY_SIZE(s5p64x0_serial_clocks);
-		}
-	}
-}
-
-void __init s5p6440_init_uarts(struct s3c2410_uartcfg *cfg, int no)
-{
-	int uart;
-
-	for (uart = 0; uart < no; uart++) {
-		s5p_uart_resources[uart].resources->start = S5P6440_PA_UART(uart);
-		s5p_uart_resources[uart].resources->end = S5P6440_PA_UART(uart) + S5P_SZ_UART;
-	}
-
-	s5p64x0_common_init_uarts(cfg, no);
-	s3c24xx_init_uartdevs("s3c6400-uart", s5p_uart_resources, cfg, no);
-}
-
-void __init s5p6450_init_uarts(struct s3c2410_uartcfg *cfg, int no)
-{
-	s5p64x0_common_init_uarts(cfg, no);
-	s3c24xx_init_uartdevs("s3c6400-uart", s5p_uart_resources, cfg, no);
-}
diff --git a/arch/arm/mach-s5p64x0/irq-eint.c b/arch/arm/mach-s5p64x0/irq-eint.c
deleted file mode 100644
index 275dc74..0000000
--- a/arch/arm/mach-s5p64x0/irq-eint.c
+++ /dev/null
@@ -1,155 +0,0 @@
-/* arch/arm/mach-s5p64x0/irq-eint.c
- *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd
- *		http://www.samsung.com/
- *
- * Based on linux/arch/arm/mach-s3c64xx/irq-eint.c
- *
- * S5P64X0 - Interrupt handling for External Interrupts.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/gpio.h>
-#include <linux/irq.h>
-#include <linux/io.h>
-
-#include <plat/cpu.h>
-#include <plat/regs-irqtype.h>
-#include <plat/gpio-cfg.h>
-#include <plat/pm.h>
-
-#include <mach/regs-gpio.h>
-#include <mach/regs-clock.h>
-
-#define eint_offset(irq)	((irq) - IRQ_EINT(0))
-
-static int s5p64x0_irq_eint_set_type(struct irq_data *data, unsigned int type)
-{
-	int offs = eint_offset(data->irq);
-	int shift;
-	u32 ctrl, mask;
-	u32 newvalue = 0;
-
-	if (offs > 15)
-		return -EINVAL;
-
-	switch (type) {
-	case IRQ_TYPE_NONE:
-		printk(KERN_WARNING "No edge setting!\n");
-		break;
-	case IRQ_TYPE_EDGE_RISING:
-		newvalue = S3C2410_EXTINT_RISEEDGE;
-		break;
-	case IRQ_TYPE_EDGE_FALLING:
-		newvalue = S3C2410_EXTINT_FALLEDGE;
-		break;
-	case IRQ_TYPE_EDGE_BOTH:
-		newvalue = S3C2410_EXTINT_BOTHEDGE;
-		break;
-	case IRQ_TYPE_LEVEL_LOW:
-		newvalue = S3C2410_EXTINT_LOWLEV;
-		break;
-	case IRQ_TYPE_LEVEL_HIGH:
-		newvalue = S3C2410_EXTINT_HILEV;
-		break;
-	default:
-		printk(KERN_ERR "No such irq type %d", type);
-		return -EINVAL;
-	}
-
-	shift = (offs / 2) * 4;
-	mask = 0x7 << shift;
-
-	ctrl = __raw_readl(S5P64X0_EINT0CON0) & ~mask;
-	ctrl |= newvalue << shift;
-	__raw_writel(ctrl, S5P64X0_EINT0CON0);
-
-	/* Configure the GPIO pin for 6450 or 6440 based on CPU ID */
-	if (soc_is_s5p6450())
-		s3c_gpio_cfgpin(S5P6450_GPN(offs), S3C_GPIO_SFN(2));
-	else
-		s3c_gpio_cfgpin(S5P6440_GPN(offs), S3C_GPIO_SFN(2));
-
-	return 0;
-}
-
-/*
- * s5p64x0_irq_demux_eint
- *
- * This function demuxes the IRQ from the group0 external interrupts,
- * from IRQ_EINT(0) to IRQ_EINT(15). It is designed to be inlined into
- * the specific handlers s5p64x0_irq_demux_eintX_Y.
- */
-static inline void s5p64x0_irq_demux_eint(unsigned int start, unsigned int end)
-{
-	u32 status = __raw_readl(S5P64X0_EINT0PEND);
-	u32 mask = __raw_readl(S5P64X0_EINT0MASK);
-	unsigned int irq;
-
-	status &= ~mask;
-	status >>= start;
-	status &= (1 << (end - start + 1)) - 1;
-
-	for (irq = IRQ_EINT(start); irq <= IRQ_EINT(end); irq++) {
-		if (status & 1)
-			generic_handle_irq(irq);
-		status >>= 1;
-	}
-}
-
-static void s5p64x0_irq_demux_eint0_3(unsigned int irq, struct irq_desc *desc)
-{
-	s5p64x0_irq_demux_eint(0, 3);
-}
-
-static void s5p64x0_irq_demux_eint4_11(unsigned int irq, struct irq_desc *desc)
-{
-	s5p64x0_irq_demux_eint(4, 11);
-}
-
-static void s5p64x0_irq_demux_eint12_15(unsigned int irq,
-					struct irq_desc *desc)
-{
-	s5p64x0_irq_demux_eint(12, 15);
-}
-
-static int s5p64x0_alloc_gc(void)
-{
-	struct irq_chip_generic *gc;
-	struct irq_chip_type *ct;
-
-	gc = irq_alloc_generic_chip("s5p64x0-eint", 1, S5P_IRQ_EINT_BASE,
-				    S5P_VA_GPIO, handle_level_irq);
-	if (!gc) {
-		printk(KERN_ERR "%s: irq_alloc_generic_chip for group 0"
-			"external interrupts failed\n", __func__);
-		return -EINVAL;
-	}
-
-	ct = gc->chip_types;
-	ct->chip.irq_ack = irq_gc_ack_set_bit;
-	ct->chip.irq_mask = irq_gc_mask_set_bit;
-	ct->chip.irq_unmask = irq_gc_mask_clr_bit;
-	ct->chip.irq_set_type = s5p64x0_irq_eint_set_type;
-	ct->chip.irq_set_wake = s3c_irqext_wake;
-	ct->regs.ack = EINT0PEND_OFFSET;
-	ct->regs.mask = EINT0MASK_OFFSET;
-	irq_setup_generic_chip(gc, IRQ_MSK(16), IRQ_GC_INIT_MASK_CACHE,
-			       IRQ_NOREQUEST | IRQ_NOPROBE, 0);
-	return 0;
-}
-
-static int __init s5p64x0_init_irq_eint(void)
-{
-	int ret = s5p64x0_alloc_gc();
-	irq_set_chained_handler(IRQ_EINT0_3, s5p64x0_irq_demux_eint0_3);
-	irq_set_chained_handler(IRQ_EINT4_11, s5p64x0_irq_demux_eint4_11);
-	irq_set_chained_handler(IRQ_EINT12_15, s5p64x0_irq_demux_eint12_15);
-
-	return ret;
-}
-arch_initcall(s5p64x0_init_irq_eint);
diff --git a/arch/arm/mach-s5p64x0/mach-smdk6440.c b/arch/arm/mach-s5p64x0/mach-smdk6440.c
index 4a1250c..34d98a1 100644
--- a/arch/arm/mach-s5p64x0/mach-smdk6440.c
+++ b/arch/arm/mach-s5p64x0/mach-smdk6440.c
@@ -27,6 +27,7 @@
 
 #include <video/platform_lcd.h>
 
+#include <asm/hardware/vic.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
 #include <asm/irq.h>
@@ -40,7 +41,6 @@
 
 #include <plat/regs-serial.h>
 #include <plat/gpio-cfg.h>
-#include <plat/s5p6440.h>
 #include <plat/clock.h>
 #include <plat/devs.h>
 #include <plat/cpu.h>
@@ -53,6 +53,8 @@
 #include <plat/fb.h>
 #include <plat/regs-fb.h>
 
+#include "common.h"
+
 #define SMDK6440_UCON_DEFAULT	(S3C2410_UCON_TXILEVEL |	\
 				S3C2410_UCON_RXILEVEL |		\
 				S3C2410_UCON_TXIRQMODE |	\
@@ -201,7 +203,7 @@
 
 static void __init smdk6440_map_io(void)
 {
-	s5p_init_io(NULL, 0, S5P64X0_SYS_ID);
+	s5p64x0_init_io(NULL, 0);
 	s3c24xx_init_clocks(12000000);
 	s3c24xx_init_uarts(smdk6440_uartcfgs, ARRAY_SIZE(smdk6440_uartcfgs));
 	s5p_set_timer_source(S5P_PWM3, S5P_PWM4);
@@ -242,7 +244,9 @@
 	.atag_offset	= 0x100,
 
 	.init_irq	= s5p6440_init_irq,
+	.handle_irq	= vic_handle_irq,
 	.map_io		= smdk6440_map_io,
 	.init_machine	= smdk6440_machine_init,
 	.timer		= &s5p_timer,
+	.restart	= s5p64x0_restart,
 MACHINE_END
diff --git a/arch/arm/mach-s5p64x0/mach-smdk6450.c b/arch/arm/mach-s5p64x0/mach-smdk6450.c
index 0ab129e..135cf5d 100644
--- a/arch/arm/mach-s5p64x0/mach-smdk6450.c
+++ b/arch/arm/mach-s5p64x0/mach-smdk6450.c
@@ -27,6 +27,7 @@
 
 #include <video/platform_lcd.h>
 
+#include <asm/hardware/vic.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
 #include <asm/irq.h>
@@ -40,7 +41,6 @@
 
 #include <plat/regs-serial.h>
 #include <plat/gpio-cfg.h>
-#include <plat/s5p6450.h>
 #include <plat/clock.h>
 #include <plat/devs.h>
 #include <plat/cpu.h>
@@ -53,6 +53,8 @@
 #include <plat/fb.h>
 #include <plat/regs-fb.h>
 
+#include "common.h"
+
 #define SMDK6450_UCON_DEFAULT	(S3C2410_UCON_TXILEVEL |	\
 				S3C2410_UCON_RXILEVEL |		\
 				S3C2410_UCON_TXIRQMODE |	\
@@ -221,7 +223,7 @@
 
 static void __init smdk6450_map_io(void)
 {
-	s5p_init_io(NULL, 0, S5P64X0_SYS_ID);
+	s5p64x0_init_io(NULL, 0);
 	s3c24xx_init_clocks(19200000);
 	s3c24xx_init_uarts(smdk6450_uartcfgs, ARRAY_SIZE(smdk6450_uartcfgs));
 	s5p_set_timer_source(S5P_PWM3, S5P_PWM4);
@@ -262,7 +264,9 @@
 	.atag_offset	= 0x100,
 
 	.init_irq	= s5p6450_init_irq,
+	.handle_irq	= vic_handle_irq,
 	.map_io		= smdk6450_map_io,
 	.init_machine	= smdk6450_machine_init,
 	.timer		= &s5p_timer,
+	.restart	= s5p64x0_restart,
 MACHINE_END
diff --git a/arch/arm/mach-s5p64x0/pm.c b/arch/arm/mach-s5p64x0/pm.c
index 6992724..23f9b22 100644
--- a/arch/arm/mach-s5p64x0/pm.c
+++ b/arch/arm/mach-s5p64x0/pm.c
@@ -160,7 +160,7 @@
 
 }
 
-static int s5p64x0_pm_add(struct sys_device *sysdev)
+static int s5p64x0_pm_add(struct device *dev)
 {
 	pm_cpu_prep = s5p64x0_pm_prepare;
 	pm_cpu_sleep = s5p64x0_cpu_suspend;
@@ -169,15 +169,17 @@
 	return 0;
 }
 
-static struct sysdev_driver s5p64x0_pm_driver = {
-	.add		= s5p64x0_pm_add,
+static struct subsys_interface s5p64x0_pm_interface = {
+	.name		= "s5p64x0_pm",
+	.subsys		= &s5p64x0_subsys,
+	.add_dev	= s5p64x0_pm_add,
 };
 
 static __init int s5p64x0_pm_drvinit(void)
 {
 	s3c_pm_init();
 
-	return sysdev_driver_register(&s5p64x0_sysclass, &s5p64x0_pm_driver);
+	return subsys_interface_register(&s5p64x0_pm_interface);
 }
 arch_initcall(s5p64x0_pm_drvinit);
 
diff --git a/arch/arm/mach-s5pc100/Makefile b/arch/arm/mach-s5pc100/Makefile
index a5e6e60..c3166c4 100644
--- a/arch/arm/mach-s5pc100/Makefile
+++ b/arch/arm/mach-s5pc100/Makefile
@@ -9,28 +9,25 @@
 obj-n				:=
 obj-				:=
 
-# Core support for S5PC100 system
+# Core
 
-obj-$(CONFIG_CPU_S5PC100)	+= cpu.o init.o clock.o
-obj-$(CONFIG_CPU_S5PC100)	+= setup-i2c0.o
-obj-$(CONFIG_CPU_S5PC100)	+= dma.o
+obj-y				+= common.o clock.o
 
-# Helper and device support
-
-obj-$(CONFIG_S5PC100_SETUP_FB_24BPP)	+= setup-fb-24bpp.o
-obj-$(CONFIG_S5PC100_SETUP_I2C1)	+= setup-i2c1.o
-obj-$(CONFIG_S5PC100_SETUP_IDE)		+= setup-ide.o
-obj-$(CONFIG_S5PC100_SETUP_KEYPAD)	+= setup-keypad.o
-obj-$(CONFIG_S5PC100_SETUP_SDHCI)	+= setup-sdhci.o
-obj-$(CONFIG_S5PC100_SETUP_SDHCI_GPIO)	+= setup-sdhci-gpio.o
-
-# device support
-obj-y				+= dev-audio.o
-obj-$(CONFIG_S3C64XX_DEV_SPI)	+= dev-spi.o
+obj-y				+= dma.o
 
 # machine support
 
 obj-$(CONFIG_MACH_SMDKC100)	+= mach-smdkc100.o
 
 # device support
+
 obj-y				+= dev-audio.o
+obj-$(CONFIG_S3C64XX_DEV_SPI)	+= dev-spi.o
+
+obj-y					+= setup-i2c0.o
+obj-$(CONFIG_S5PC100_SETUP_FB_24BPP)	+= setup-fb-24bpp.o
+obj-$(CONFIG_S5PC100_SETUP_I2C1)	+= setup-i2c1.o
+obj-$(CONFIG_S5PC100_SETUP_IDE)		+= setup-ide.o
+obj-$(CONFIG_S5PC100_SETUP_KEYPAD)	+= setup-keypad.o
+obj-$(CONFIG_S5PC100_SETUP_SDHCI)	+= setup-sdhci.o
+obj-$(CONFIG_S5PC100_SETUP_SDHCI_GPIO)	+= setup-sdhci-gpio.o
diff --git a/arch/arm/mach-s5pc100/clock.c b/arch/arm/mach-s5pc100/clock.c
index 8d47709..c4c7489 100644
--- a/arch/arm/mach-s5pc100/clock.c
+++ b/arch/arm/mach-s5pc100/clock.c
@@ -27,7 +27,8 @@
 #include <plat/pll.h>
 #include <plat/s5p-clock.h>
 #include <plat/clock-clksrc.h>
-#include <plat/s5pc100.h>
+
+#include "common.h"
 
 static struct clk s5p_clk_otgphy = {
 	.name		= "otg_phy",
diff --git a/arch/arm/mach-s5pc100/cpu.c b/arch/arm/mach-s5pc100/common.c
similarity index 61%
rename from arch/arm/mach-s5pc100/cpu.c
rename to arch/arm/mach-s5pc100/common.c
index fd2708e..c909573 100644
--- a/arch/arm/mach-s5pc100/cpu.c
+++ b/arch/arm/mach-s5pc100/common.c
@@ -1,17 +1,16 @@
-/* linux/arch/arm/mach-s5pc100/cpu.c
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+/*
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
  *		http://www.samsung.com
  *
  * Copyright 2009 Samsung Electronics Co.
  *	Byungho Min <bhmin@samsung.com>
  *
- * Based on mach-s3c6410/cpu.c
+ * Common Codes for S5PC100
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
-*/
+ */
 
 #include <linux/kernel.h>
 #include <linux/types.h>
@@ -21,40 +20,78 @@
 #include <linux/init.h>
 #include <linux/clk.h>
 #include <linux/io.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/serial_core.h>
 #include <linux/platform_device.h>
 #include <linux/sched.h>
 
+#include <asm/irq.h>
+#include <asm/proc-fns.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
 #include <asm/mach/irq.h>
 
-#include <asm/proc-fns.h>
-
-#include <mach/hardware.h>
 #include <mach/map.h>
-#include <asm/irq.h>
-
-#include <plat/regs-serial.h>
+#include <mach/hardware.h>
 #include <mach/regs-clock.h>
 
 #include <plat/cpu.h>
 #include <plat/devs.h>
 #include <plat/clock.h>
-#include <plat/ata-core.h>
-#include <plat/iic-core.h>
 #include <plat/sdhci.h>
 #include <plat/adc-core.h>
-#include <plat/onenand-core.h>
+#include <plat/ata-core.h>
 #include <plat/fb-core.h>
+#include <plat/iic-core.h>
+#include <plat/onenand-core.h>
+#include <plat/regs-serial.h>
+#include <plat/watchdog-reset.h>
 
-#include <plat/s5pc100.h>
+#include "common.h"
+
+static const char name_s5pc100[] = "S5PC100";
+
+static struct cpu_table cpu_ids[] __initdata = {
+	{
+		.idcode		= S5PC100_CPU_ID,
+		.idmask		= S5PC100_CPU_MASK,
+		.map_io		= s5pc100_map_io,
+		.init_clocks	= s5pc100_init_clocks,
+		.init_uarts	= s5pc100_init_uarts,
+		.init		= s5pc100_init,
+		.name		= name_s5pc100,
+	},
+};
 
 /* Initial IO mappings */
 
 static struct map_desc s5pc100_iodesc[] __initdata = {
 	{
+		.virtual	= (unsigned long)S5P_VA_CHIPID,
+		.pfn		= __phys_to_pfn(S5PC100_PA_CHIPID),
+		.length		= SZ_4K,
+		.type		= MT_DEVICE,
+	}, {
+		.virtual	= (unsigned long)S3C_VA_SYS,
+		.pfn		= __phys_to_pfn(S5PC100_PA_SYSCON),
+		.length		= SZ_64K,
+		.type		= MT_DEVICE,
+	}, {
+		.virtual	= (unsigned long)S3C_VA_TIMER,
+		.pfn		= __phys_to_pfn(S5PC100_PA_TIMER),
+		.length		= SZ_16K,
+		.type		= MT_DEVICE,
+	}, {
+		.virtual	= (unsigned long)S3C_VA_WATCHDOG,
+		.pfn		= __phys_to_pfn(S5PC100_PA_WATCHDOG),
+		.length		= SZ_4K,
+		.type		= MT_DEVICE,
+	}, {
+		.virtual	= (unsigned long)S5P_VA_SROMC,
+		.pfn		= __phys_to_pfn(S5PC100_PA_SROMC),
+		.length		= SZ_4K,
+		.type		= MT_DEVICE,
+	}, {
 		.virtual	= (unsigned long)S5P_VA_SYSTIMER,
 		.pfn		= __phys_to_pfn(S5PC100_PA_SYSTIMER),
 		.length		= SZ_16K,
@@ -100,15 +137,27 @@
 	local_irq_enable();
 }
 
-/* s5pc100_map_io
+/*
+ * s5pc100_map_io
  *
- * register the standard cpu IO areas
-*/
+ * register the standard CPU IO areas
+ */
+
+void __init s5pc100_init_io(struct map_desc *mach_desc, int size)
+{
+	/* initialize the io descriptors we need for initialization */
+	iotable_init(s5pc100_iodesc, ARRAY_SIZE(s5pc100_iodesc));
+	if (mach_desc)
+		iotable_init(mach_desc, size);
+
+	/* detect cpu id and rev. */
+	s5p_init_cpu(S5P_VA_CHIPID);
+
+	s3c_init_cpu(samsung_cpu_id, cpu_ids, ARRAY_SIZE(cpu_ids));
+}
 
 void __init s5pc100_map_io(void)
 {
-	iotable_init(s5pc100_iodesc, ARRAY_SIZE(s5pc100_iodesc));
-
 	/* initialise device information early */
 	s5pc100_default_sdhci0();
 	s5pc100_default_sdhci1();
@@ -143,19 +192,19 @@
 	s5p_init_irq(vic, ARRAY_SIZE(vic));
 }
 
-static struct sysdev_class s5pc100_sysclass = {
-	.name	= "s5pc100-core",
+static struct bus_type s5pc100_subsys = {
+	.name		= "s5pc100-core",
+	.dev_name	= "s5pc100-core",
 };
 
-static struct sys_device s5pc100_sysdev = {
-	.cls	= &s5pc100_sysclass,
+static struct device s5pc100_dev = {
+	.bus	= &s5pc100_subsys,
 };
 
 static int __init s5pc100_core_init(void)
 {
-	return sysdev_class_register(&s5pc100_sysclass);
+	return subsys_system_register(&s5pc100_subsys, NULL);
 }
-
 core_initcall(s5pc100_core_init);
 
 int __init s5pc100_init(void)
@@ -165,5 +214,20 @@
 	/* set idle function */
 	pm_idle = s5pc100_idle;
 
-	return sysdev_register(&s5pc100_sysdev);
+	return device_register(&s5pc100_dev);
+}
+
+/* uart registration process */
+
+void __init s5pc100_init_uarts(struct s3c2410_uartcfg *cfg, int no)
+{
+	s3c24xx_init_uartdevs("s3c6400-uart", s5p_uart_resources, cfg, no);
+}
+
+void s5pc100_restart(char mode, const char *cmd)
+{
+	if (mode != 's')
+		arch_wdt_reset();
+
+	soft_restart(0);
 }
diff --git a/arch/arm/mach-s5pc100/common.h b/arch/arm/mach-s5pc100/common.h
new file mode 100644
index 0000000..9fbd3ae
--- /dev/null
+++ b/arch/arm/mach-s5pc100/common.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * Common Header for S5PC100 machines
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARCH_ARM_MACH_S5PC100_COMMON_H
+#define __ARCH_ARM_MACH_S5PC100_COMMON_H
+
+void s5pc100_init_io(struct map_desc *mach_desc, int size);
+void s5pc100_init_irq(void);
+
+void s5pc100_register_clocks(void);
+void s5pc100_setup_clocks(void);
+
+void s5pc100_restart(char mode, const char *cmd);
+
+#ifdef CONFIG_CPU_S5PC100
+
+extern  int s5pc100_init(void);
+extern void s5pc100_map_io(void);
+extern void s5pc100_init_clocks(int xtal);
+extern void s5pc100_init_uarts(struct s3c2410_uartcfg *cfg, int no);
+
+#else
+#define s5pc100_init_clocks NULL
+#define s5pc100_init_uarts NULL
+#define s5pc100_map_io NULL
+#define s5pc100_init NULL
+#endif
+
+#endif /* __ARCH_ARM_MACH_S5PC100_COMMON_H */
diff --git a/arch/arm/mach-s5pc100/include/mach/entry-macro.S b/arch/arm/mach-s5pc100/include/mach/entry-macro.S
index ba76af0..b8c242e 100644
--- a/arch/arm/mach-s5pc100/include/mach/entry-macro.S
+++ b/arch/arm/mach-s5pc100/include/mach/entry-macro.S
@@ -12,39 +12,14 @@
  * warranty of any kind, whether express or implied.
 */
 
-#include <asm/hardware/vic.h>
-#include <mach/map.h>
-#include <plat/irqs.h>
-
 	.macro	disable_fiq
 	.endm
 
 	.macro	get_irqnr_preamble, base, tmp
-	ldr	\base, =VA_VIC0
 	.endm
 
 	.macro	arch_ret_to_user, tmp1, tmp2
 	.endm
 
 	.macro	get_irqnr_and_base, irqnr, irqstat, base, tmp
-
-	@ check the vic0
-	mov	\irqnr, # S5P_IRQ_OFFSET + 31
-	ldr	\irqstat, [ \base, # VIC_IRQ_STATUS ]
-	teq	\irqstat, #0
-
-	@ otherwise try vic1
-	addeq	\tmp, \base, #(VA_VIC1 - VA_VIC0)
-	addeq	\irqnr, \irqnr, #32
-	ldreq	\irqstat, [ \tmp, # VIC_IRQ_STATUS ]
-	teqeq	\irqstat, #0
-
-	@ otherwise try vic2
-	addeq	\tmp, \base, #(VA_VIC2 - VA_VIC0)
-	addeq	\irqnr, \irqnr, #32
-	ldreq	\irqstat, [ \tmp, # VIC_IRQ_STATUS ]
-	teqeq	\irqstat, #0
-
-	clzne	\irqstat, \irqstat
-	subne	\irqnr, \irqnr, \irqstat
 	.endm
diff --git a/arch/arm/mach-s5pc100/include/mach/system.h b/arch/arm/mach-s5pc100/include/mach/system.h
index a9ea57c..afc96c2 100644
--- a/arch/arm/mach-s5pc100/include/mach/system.h
+++ b/arch/arm/mach-s5pc100/include/mach/system.h
@@ -11,8 +11,6 @@
 #ifndef __ASM_ARCH_SYSTEM_H
 #define __ASM_ARCH_SYSTEM_H __FILE__
 
-#include <plat/system-reset.h>
-
 static void arch_idle(void)
 {
 	/* nothing here yet */
diff --git a/arch/arm/mach-s5pc100/include/mach/vmalloc.h b/arch/arm/mach-s5pc100/include/mach/vmalloc.h
deleted file mode 100644
index 44c8e57..0000000
--- a/arch/arm/mach-s5pc100/include/mach/vmalloc.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* arch/arm/mach-s5pc100/include/mach/vmalloc.h
- *
- * Copyright 2010 Ben Dooks <ben-linux@fluff.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * S3C6400 vmalloc definition
-*/
-
-#ifndef __ASM_ARCH_VMALLOC_H
-#define __ASM_ARCH_VMALLOC_H
-
-#define VMALLOC_END	0xF6000000UL
-
-#endif /* __ASM_ARCH_VMALLOC_H */
diff --git a/arch/arm/mach-s5pc100/init.c b/arch/arm/mach-s5pc100/init.c
deleted file mode 100644
index 19d7b52..0000000
--- a/arch/arm/mach-s5pc100/init.c
+++ /dev/null
@@ -1,24 +0,0 @@
-/* linux/arch/arm/plat-s5pc100/s5pc100-init.c
- *
- * Copyright 2009 Samsung Electronics Co.
- *      Byungho Min <bhmin@samsung.com>
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/init.h>
-
-#include <plat/cpu.h>
-#include <plat/devs.h>
-#include <plat/s5pc100.h>
-
-/* uart registration process */
-void __init s5pc100_common_init_uarts(struct s3c2410_uartcfg *cfg, int no)
-{
-	s3c24xx_init_uartdevs("s3c6400-uart", s5p_uart_resources, cfg, no);
-}
diff --git a/arch/arm/mach-s5pc100/mach-smdkc100.c b/arch/arm/mach-s5pc100/mach-smdkc100.c
index 26f5c91..674d229 100644
--- a/arch/arm/mach-s5pc100/mach-smdkc100.c
+++ b/arch/arm/mach-s5pc100/mach-smdkc100.c
@@ -25,6 +25,7 @@
 #include <linux/input.h>
 #include <linux/pwm_backlight.h>
 
+#include <asm/hardware/vic.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
 
@@ -42,7 +43,6 @@
 #include <plat/clock.h>
 #include <plat/devs.h>
 #include <plat/cpu.h>
-#include <plat/s5pc100.h>
 #include <plat/fb.h>
 #include <plat/iic.h>
 #include <plat/ata.h>
@@ -53,6 +53,8 @@
 #include <plat/backlight.h>
 #include <plat/regs-fb-v4.h>
 
+#include "common.h"
+
 /* Following are default values for UCON, ULCON and UFCON UART registers */
 #define SMDKC100_UCON_DEFAULT	(S3C2410_UCON_TXILEVEL |	\
 				 S3C2410_UCON_RXILEVEL |	\
@@ -215,7 +217,7 @@
 
 static void __init smdkc100_map_io(void)
 {
-	s5p_init_io(NULL, 0, S5P_VA_CHIPID);
+	s5pc100_init_io(NULL, 0);
 	s3c24xx_init_clocks(12000000);
 	s3c24xx_init_uarts(smdkc100_uartcfgs, ARRAY_SIZE(smdkc100_uartcfgs));
 }
@@ -250,7 +252,9 @@
 	/* Maintainer: Byungho Min <bhmin@samsung.com> */
 	.atag_offset	= 0x100,
 	.init_irq	= s5pc100_init_irq,
+	.handle_irq	= vic_handle_irq,
 	.map_io		= smdkc100_map_io,
 	.init_machine	= smdkc100_machine_init,
 	.timer		= &s3c24xx_timer,
+	.restart	= s5pc100_restart,
 MACHINE_END
diff --git a/arch/arm/mach-s5pv210/Makefile b/arch/arm/mach-s5pv210/Makefile
index 009fbe5..4c59186 100644
--- a/arch/arm/mach-s5pv210/Makefile
+++ b/arch/arm/mach-s5pv210/Makefile
@@ -10,18 +10,20 @@
 obj-n				:=
 obj-				:=
 
-# Core support for S5PV210 system
+# Core
 
-obj-$(CONFIG_CPU_S5PV210)	+= cpu.o init.o clock.o dma.o
-obj-$(CONFIG_CPU_S5PV210)	+= setup-i2c0.o
+obj-y				+= common.o clock.o
+
 obj-$(CONFIG_PM)		+= pm.o
 
+obj-y				+= dma.o
+
 # machine support
 
 obj-$(CONFIG_MACH_AQUILA)	+= mach-aquila.o
-obj-$(CONFIG_MACH_SMDKV210)	+= mach-smdkv210.o
-obj-$(CONFIG_MACH_SMDKC110)	+= mach-smdkc110.o
 obj-$(CONFIG_MACH_GONI)		+= mach-goni.o
+obj-$(CONFIG_MACH_SMDKC110)	+= mach-smdkc110.o
+obj-$(CONFIG_MACH_SMDKV210)	+= mach-smdkv210.o
 obj-$(CONFIG_MACH_TORBRECK)	+= mach-torbreck.o
 
 # device support
@@ -29,11 +31,12 @@
 obj-y				+= dev-audio.o
 obj-$(CONFIG_S3C64XX_DEV_SPI)	+= dev-spi.o
 
+obj-y					+= setup-i2c0.o
 obj-$(CONFIG_S5PV210_SETUP_FB_24BPP)	+= setup-fb-24bpp.o
 obj-$(CONFIG_S5PV210_SETUP_FIMC)	+= setup-fimc.o
-obj-$(CONFIG_S5PV210_SETUP_I2C1) 	+= setup-i2c1.o
-obj-$(CONFIG_S5PV210_SETUP_I2C2) 	+= setup-i2c2.o
+obj-$(CONFIG_S5PV210_SETUP_I2C1)	+= setup-i2c1.o
+obj-$(CONFIG_S5PV210_SETUP_I2C2)	+= setup-i2c2.o
 obj-$(CONFIG_S5PV210_SETUP_IDE)		+= setup-ide.o
 obj-$(CONFIG_S5PV210_SETUP_KEYPAD)	+= setup-keypad.o
-obj-$(CONFIG_S5PV210_SETUP_SDHCI)       += setup-sdhci.o
+obj-$(CONFIG_S5PV210_SETUP_SDHCI)	+= setup-sdhci.o
 obj-$(CONFIG_S5PV210_SETUP_SDHCI_GPIO)	+= setup-sdhci-gpio.o
diff --git a/arch/arm/mach-s5pv210/clock.c b/arch/arm/mach-s5pv210/clock.c
index 4c5ac7a..04c9b57 100644
--- a/arch/arm/mach-s5pv210/clock.c
+++ b/arch/arm/mach-s5pv210/clock.c
@@ -17,7 +17,7 @@
 #include <linux/errno.h>
 #include <linux/err.h>
 #include <linux/clk.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/io.h>
 
 #include <mach/map.h>
@@ -29,7 +29,8 @@
 #include <plat/pll.h>
 #include <plat/s5p-clock.h>
 #include <plat/clock-clksrc.h>
-#include <plat/s5pv210.h>
+
+#include "common.h"
 
 static unsigned long xtal;
 
diff --git a/arch/arm/mach-s5pv210/cpu.c b/arch/arm/mach-s5pv210/common.c
similarity index 63%
rename from arch/arm/mach-s5pv210/cpu.c
rename to arch/arm/mach-s5pv210/common.c
index 84ec746..0ec3933 100644
--- a/arch/arm/mach-s5pv210/cpu.c
+++ b/arch/arm/mach-s5pv210/common.c
@@ -1,12 +1,13 @@
-/* linux/arch/arm/mach-s5pv210/cpu.c
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+/*
+ * Copyright (c) 2009-2011 Samsung Electronics Co., Ltd.
  *		http://www.samsung.com
  *
+ * Common Codes for S5PV210
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
-*/
+ */
 
 #include <linux/kernel.h>
 #include <linux/types.h>
@@ -17,37 +18,78 @@
 #include <linux/module.h>
 #include <linux/clk.h>
 #include <linux/io.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/platform_device.h>
 #include <linux/sched.h>
 #include <linux/dma-mapping.h>
+#include <linux/serial_core.h>
 
+#include <asm/proc-fns.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
 #include <asm/mach/irq.h>
 
-#include <asm/proc-fns.h>
 #include <mach/map.h>
 #include <mach/regs-clock.h>
 
 #include <plat/cpu.h>
-#include <plat/devs.h>
 #include <plat/clock.h>
-#include <plat/fb-core.h>
-#include <plat/s5pv210.h>
+#include <plat/devs.h>
+#include <plat/sdhci.h>
 #include <plat/adc-core.h>
 #include <plat/ata-core.h>
+#include <plat/fb-core.h>
 #include <plat/fimc-core.h>
 #include <plat/iic-core.h>
 #include <plat/keypad-core.h>
-#include <plat/sdhci.h>
-#include <plat/reset.h>
 #include <plat/tv-core.h>
+#include <plat/regs-serial.h>
+
+#include "common.h"
+
+static const char name_s5pv210[] = "S5PV210/S5PC110";
+
+static struct cpu_table cpu_ids[] __initdata = {
+	{
+		.idcode		= S5PV210_CPU_ID,
+		.idmask		= S5PV210_CPU_MASK,
+		.map_io		= s5pv210_map_io,
+		.init_clocks	= s5pv210_init_clocks,
+		.init_uarts	= s5pv210_init_uarts,
+		.init		= s5pv210_init,
+		.name		= name_s5pv210,
+	},
+};
 
 /* Initial IO mappings */
 
 static struct map_desc s5pv210_iodesc[] __initdata = {
 	{
+		.virtual	= (unsigned long)S5P_VA_CHIPID,
+		.pfn		= __phys_to_pfn(S5PV210_PA_CHIPID),
+		.length		= SZ_4K,
+		.type		= MT_DEVICE,
+	}, {
+		.virtual	= (unsigned long)S3C_VA_SYS,
+		.pfn		= __phys_to_pfn(S5PV210_PA_SYSCON),
+		.length		= SZ_64K,
+		.type		= MT_DEVICE,
+	}, {
+		.virtual	= (unsigned long)S3C_VA_TIMER,
+		.pfn		= __phys_to_pfn(S5PV210_PA_TIMER),
+		.length		= SZ_16K,
+		.type		= MT_DEVICE,
+	}, {
+		.virtual	= (unsigned long)S3C_VA_WATCHDOG,
+		.pfn		= __phys_to_pfn(S5PV210_PA_WATCHDOG),
+		.length		= SZ_4K,
+		.type		= MT_DEVICE,
+	}, {
+		.virtual	= (unsigned long)S5P_VA_SROMC,
+		.pfn		= __phys_to_pfn(S5PV210_PA_SROMC),
+		.length		= SZ_4K,
+		.type		= MT_DEVICE,
+	}, {
 		.virtual	= (unsigned long)S5P_VA_SYSTIMER,
 		.pfn		= __phys_to_pfn(S5PV210_PA_SYSTIMER),
 		.length		= SZ_4K,
@@ -108,19 +150,32 @@
 	local_irq_enable();
 }
 
-static void s5pv210_sw_reset(void)
+void s5pv210_restart(char mode, const char *cmd)
 {
 	__raw_writel(0x1, S5P_SWRESET);
 }
 
-/* s5pv210_map_io
+/*
+ * s5pv210_map_io
  *
  * register the standard cpu IO areas
-*/
+ */
+
+void __init s5pv210_init_io(struct map_desc *mach_desc, int size)
+{
+	/* initialize the io descriptors we need for initialization */
+	iotable_init(s5pv210_iodesc, ARRAY_SIZE(s5pv210_iodesc));
+	if (mach_desc)
+		iotable_init(mach_desc, size);
+
+	/* detect cpu id and rev. */
+	s5p_init_cpu(S5P_VA_CHIPID);
+
+	s3c_init_cpu(samsung_cpu_id, cpu_ids, ARRAY_SIZE(cpu_ids));
+}
 
 void __init s5pv210_map_io(void)
 {
-	iotable_init(s5pv210_iodesc, ARRAY_SIZE(s5pv210_iodesc));
 	init_consistent_dma_size(14 << 20);
 
 	/* initialise device information early */
@@ -174,19 +229,19 @@
 	s5p_init_irq(vic, ARRAY_SIZE(vic));
 }
 
-struct sysdev_class s5pv210_sysclass = {
-	.name	= "s5pv210-core",
+struct bus_type s5pv210_subsys = {
+	.name		= "s5pv210-core",
+	.dev_name	= "s5pv210-core",
 };
 
-static struct sys_device s5pv210_sysdev = {
-	.cls	= &s5pv210_sysclass,
+static struct device s5pv210_dev = {
+	.bus	= &s5pv210_subsys,
 };
 
 static int __init s5pv210_core_init(void)
 {
-	return sysdev_class_register(&s5pv210_sysclass);
+	return subsys_system_register(&s5pv210_subsys, NULL);
 }
-
 core_initcall(s5pv210_core_init);
 
 int __init s5pv210_init(void)
@@ -196,8 +251,31 @@
 	/* set idle function */
 	pm_idle = s5pv210_idle;
 
-	/* set sw_reset function */
-	s5p_reset_hook = s5pv210_sw_reset;
+	return device_register(&s5pv210_dev);
+}
 
-	return sysdev_register(&s5pv210_sysdev);
+static struct s3c24xx_uart_clksrc s5pv210_serial_clocks[] = {
+	[0] = {
+		.name		= "pclk",
+		.divisor	= 1,
+		.min_baud	= 0,
+		.max_baud	= 0,
+	},
+};
+
+/* uart registration process */
+
+void __init s5pv210_init_uarts(struct s3c2410_uartcfg *cfg, int no)
+{
+	struct s3c2410_uartcfg *tcfg = cfg;
+	u32 ucnt;
+
+	for (ucnt = 0; ucnt < no; ucnt++, tcfg++) {
+		if (!tcfg->clocks) {
+			tcfg->clocks = s5pv210_serial_clocks;
+			tcfg->clocks_size = ARRAY_SIZE(s5pv210_serial_clocks);
+		}
+	}
+
+	s3c24xx_init_uartdevs("s5pv210-uart", s5p_uart_resources, cfg, no);
 }
diff --git a/arch/arm/mach-s5pv210/common.h b/arch/arm/mach-s5pv210/common.h
new file mode 100644
index 0000000..6ed2af5
--- /dev/null
+++ b/arch/arm/mach-s5pv210/common.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * Common Header for S5PV210 machines
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARCH_ARM_MACH_S5PV210_COMMON_H
+#define __ARCH_ARM_MACH_S5PV210_COMMON_H
+
+void s5pv210_init_io(struct map_desc *mach_desc, int size);
+void s5pv210_init_irq(void);
+
+void s5pv210_register_clocks(void);
+void s5pv210_setup_clocks(void);
+
+void s5pv210_restart(char mode, const char *cmd);
+
+#ifdef CONFIG_CPU_S5PV210
+
+extern  int s5pv210_init(void);
+extern void s5pv210_map_io(void);
+extern void s5pv210_init_clocks(int xtal);
+extern void s5pv210_init_uarts(struct s3c2410_uartcfg *cfg, int no);
+
+#else
+#define s5pv210_init_clocks NULL
+#define s5pv210_init_uarts NULL
+#define s5pv210_map_io NULL
+#define s5pv210_init NULL
+#endif
+
+#endif /* __ARCH_ARM_MACH_S5PV210_COMMON_H */
diff --git a/arch/arm/mach-s5pv210/include/mach/entry-macro.S b/arch/arm/mach-s5pv210/include/mach/entry-macro.S
index 3aa41ac..bebca1b 100644
--- a/arch/arm/mach-s5pv210/include/mach/entry-macro.S
+++ b/arch/arm/mach-s5pv210/include/mach/entry-macro.S
@@ -10,45 +10,8 @@
  * published by the Free Software Foundation.
 */
 
-#include <asm/hardware/vic.h>
-#include <mach/map.h>
-#include <plat/irqs.h>
-
 	.macro	disable_fiq
 	.endm
 
-	.macro	get_irqnr_preamble, base, tmp
-	ldr	\base, =VA_VIC0
-	.endm
-
 	.macro	arch_ret_to_user, tmp1, tmp2
 	.endm
-
-	.macro	get_irqnr_and_base, irqnr, irqstat, base, tmp
-
-	@ check the vic0
-	mov	\irqnr, # S5P_IRQ_OFFSET + 31
-	ldr	\irqstat, [ \base, # VIC_IRQ_STATUS ]
-	teq	\irqstat, #0
-
-	@ otherwise try vic1
-	addeq	\tmp, \base, #(VA_VIC1 - VA_VIC0)
-	addeq	\irqnr, \irqnr, #32
-	ldreq	\irqstat, [ \tmp, # VIC_IRQ_STATUS ]
-	teqeq	\irqstat, #0
-
-	@ otherwise try vic2
-	addeq	\tmp, \base, #(VA_VIC2 - VA_VIC0)
-	addeq	\irqnr, \irqnr, #32
-	ldreq	\irqstat, [ \tmp, # VIC_IRQ_STATUS ]
-	teqeq	\irqstat, #0
-
-	@ otherwise try vic3
-	addeq	\tmp, \base, #(VA_VIC3 - VA_VIC0)
-	addeq	\irqnr, \irqnr, #32
-	ldreq	\irqstat, [ \tmp, # VIC_IRQ_STATUS ]
-	teqeq	\irqstat, #0
-
-	clzne	\irqstat, \irqstat
-	subne	\irqnr, \irqnr, \irqstat
-	.endm
diff --git a/arch/arm/mach-s5pv210/include/mach/system.h b/arch/arm/mach-s5pv210/include/mach/system.h
index af8a200..bf288ce 100644
--- a/arch/arm/mach-s5pv210/include/mach/system.h
+++ b/arch/arm/mach-s5pv210/include/mach/system.h
@@ -13,8 +13,6 @@
 #ifndef __ASM_ARCH_SYSTEM_H
 #define __ASM_ARCH_SYSTEM_H __FILE__
 
-#include <plat/system-reset.h>
-
 static void arch_idle(void)
 {
 	/* nothing here yet */
diff --git a/arch/arm/mach-s5pv210/include/mach/vmalloc.h b/arch/arm/mach-s5pv210/include/mach/vmalloc.h
deleted file mode 100644
index a6c659d..0000000
--- a/arch/arm/mach-s5pv210/include/mach/vmalloc.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* linux/arch/arm/mach-s5p6442/include/mach/vmalloc.h
- *
- * Copyright 2010 Ben Dooks <ben-linux@fluff.org>
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- *		http://www.samsung.com/
- *
- * Based on arch/arm/mach-s5p6442/include/mach/vmalloc.h
- *
- * S5PV210 vmalloc definition
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_ARCH_VMALLOC_H
-#define __ASM_ARCH_VMALLOC_H __FILE__
-
-#define VMALLOC_END	0xF6000000UL
-
-#endif /* __ASM_ARCH_VMALLOC_H */
diff --git a/arch/arm/mach-s5pv210/init.c b/arch/arm/mach-s5pv210/init.c
deleted file mode 100644
index 4865ae2..0000000
--- a/arch/arm/mach-s5pv210/init.c
+++ /dev/null
@@ -1,44 +0,0 @@
-/* linux/arch/arm/mach-s5pv210/init.c
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- *		http://www.samsung.com/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/serial_core.h>
-
-#include <plat/cpu.h>
-#include <plat/devs.h>
-#include <plat/s5pv210.h>
-#include <plat/regs-serial.h>
-
-static struct s3c24xx_uart_clksrc s5pv210_serial_clocks[] = {
-	[0] = {
-		.name		= "pclk",
-		.divisor	= 1,
-		.min_baud	= 0,
-		.max_baud	= 0,
-	},
-};
-
-/* uart registration process */
-void __init s5pv210_common_init_uarts(struct s3c2410_uartcfg *cfg, int no)
-{
-	struct s3c2410_uartcfg *tcfg = cfg;
-	u32 ucnt;
-
-	for (ucnt = 0; ucnt < no; ucnt++, tcfg++) {
-		if (!tcfg->clocks) {
-			tcfg->clocks = s5pv210_serial_clocks;
-			tcfg->clocks_size = ARRAY_SIZE(s5pv210_serial_clocks);
-		}
-	}
-
-	s3c24xx_init_uartdevs("s5pv210-uart", s5p_uart_resources, cfg, no);
-}
diff --git a/arch/arm/mach-s5pv210/mach-aquila.c b/arch/arm/mach-s5pv210/mach-aquila.c
index 5811a96..6f7dfe9 100644
--- a/arch/arm/mach-s5pv210/mach-aquila.c
+++ b/arch/arm/mach-s5pv210/mach-aquila.c
@@ -22,6 +22,7 @@
 #include <linux/input.h>
 #include <linux/gpio.h>
 
+#include <asm/hardware/vic.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
 #include <asm/setup.h>
@@ -32,7 +33,6 @@
 
 #include <plat/gpio-cfg.h>
 #include <plat/regs-serial.h>
-#include <plat/s5pv210.h>
 #include <plat/devs.h>
 #include <plat/cpu.h>
 #include <plat/fb.h>
@@ -41,6 +41,8 @@
 #include <plat/s5p-time.h>
 #include <plat/regs-fb-v4.h>
 
+#include "common.h"
+
 /* Following are default values for UCON, ULCON and UFCON UART registers */
 #define AQUILA_UCON_DEFAULT	(S3C2410_UCON_TXILEVEL |	\
 				 S3C2410_UCON_RXILEVEL |	\
@@ -644,7 +646,7 @@
 
 static void __init aquila_map_io(void)
 {
-	s5p_init_io(NULL, 0, S5P_VA_CHIPID);
+	s5pv210_init_io(NULL, 0);
 	s3c24xx_init_clocks(24000000);
 	s3c24xx_init_uarts(aquila_uartcfgs, ARRAY_SIZE(aquila_uartcfgs));
 	s5p_set_timer_source(S5P_PWM3, S5P_PWM4);
@@ -680,7 +682,9 @@
 	   Kyungmin Park <kyungmin.park@samsung.com> */
 	.atag_offset	= 0x100,
 	.init_irq	= s5pv210_init_irq,
+	.handle_irq	= vic_handle_irq,
 	.map_io		= aquila_map_io,
 	.init_machine	= aquila_machine_init,
 	.timer		= &s5p_timer,
+	.restart	= s5pv210_restart,
 MACHINE_END
diff --git a/arch/arm/mach-s5pv210/mach-goni.c b/arch/arm/mach-s5pv210/mach-goni.c
index 15edcae..12c6937 100644
--- a/arch/arm/mach-s5pv210/mach-goni.c
+++ b/arch/arm/mach-s5pv210/mach-goni.c
@@ -27,6 +27,7 @@
 #include <linux/gpio.h>
 #include <linux/interrupt.h>
 
+#include <asm/hardware/vic.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
 #include <asm/setup.h>
@@ -37,7 +38,6 @@
 
 #include <plat/gpio-cfg.h>
 #include <plat/regs-serial.h>
-#include <plat/s5pv210.h>
 #include <plat/devs.h>
 #include <plat/cpu.h>
 #include <plat/fb.h>
@@ -54,6 +54,8 @@
 #include <media/s5p_fimc.h>
 #include <media/noon010pc30.h>
 
+#include "common.h"
+
 /* Following are default values for UCON, ULCON and UFCON UART registers */
 #define GONI_UCON_DEFAULT	(S3C2410_UCON_TXILEVEL |	\
 				 S3C2410_UCON_RXILEVEL |	\
@@ -890,7 +892,7 @@
 
 static void __init goni_map_io(void)
 {
-	s5p_init_io(NULL, 0, S5P_VA_CHIPID);
+	s5pv210_init_io(NULL, 0);
 	s3c24xx_init_clocks(24000000);
 	s3c24xx_init_uarts(goni_uartcfgs, ARRAY_SIZE(goni_uartcfgs));
 	s5p_set_timer_source(S5P_PWM3, S5P_PWM4);
@@ -956,8 +958,10 @@
 	/* Maintainers: Kyungmin Park <kyungmin.park@samsung.com> */
 	.atag_offset	= 0x100,
 	.init_irq	= s5pv210_init_irq,
+	.handle_irq	= vic_handle_irq,
 	.map_io		= goni_map_io,
 	.init_machine	= goni_machine_init,
 	.timer		= &s5p_timer,
 	.reserve	= &goni_reserve,
+	.restart	= s5pv210_restart,
 MACHINE_END
diff --git a/arch/arm/mach-s5pv210/mach-smdkc110.c b/arch/arm/mach-s5pv210/mach-smdkc110.c
index f7266bb..b323983 100644
--- a/arch/arm/mach-s5pv210/mach-smdkc110.c
+++ b/arch/arm/mach-s5pv210/mach-smdkc110.c
@@ -13,8 +13,9 @@
 #include <linux/init.h>
 #include <linux/serial_core.h>
 #include <linux/i2c.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 
+#include <asm/hardware/vic.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
 #include <asm/setup.h>
@@ -24,7 +25,6 @@
 #include <mach/regs-clock.h>
 
 #include <plat/regs-serial.h>
-#include <plat/s5pv210.h>
 #include <plat/devs.h>
 #include <plat/cpu.h>
 #include <plat/ata.h>
@@ -32,6 +32,8 @@
 #include <plat/pm.h>
 #include <plat/s5p-time.h>
 
+#include "common.h"
+
 /* Following are default values for UCON, ULCON and UFCON UART registers */
 #define SMDKC110_UCON_DEFAULT	(S3C2410_UCON_TXILEVEL |	\
 				 S3C2410_UCON_RXILEVEL |	\
@@ -109,7 +111,7 @@
 
 static void __init smdkc110_map_io(void)
 {
-	s5p_init_io(NULL, 0, S5P_VA_CHIPID);
+	s5pv210_init_io(NULL, 0);
 	s3c24xx_init_clocks(24000000);
 	s3c24xx_init_uarts(smdkv210_uartcfgs, ARRAY_SIZE(smdkv210_uartcfgs));
 	s5p_set_timer_source(S5P_PWM3, S5P_PWM4);
@@ -138,7 +140,9 @@
 	/* Maintainer: Kukjin Kim <kgene.kim@samsung.com> */
 	.atag_offset	= 0x100,
 	.init_irq	= s5pv210_init_irq,
+	.handle_irq	= vic_handle_irq,
 	.map_io		= smdkc110_map_io,
 	.init_machine	= smdkc110_machine_init,
 	.timer		= &s5p_timer,
+	.restart	= s5pv210_restart,
 MACHINE_END
diff --git a/arch/arm/mach-s5pv210/mach-smdkv210.c b/arch/arm/mach-s5pv210/mach-smdkv210.c
index a9106c3..b4021dd 100644
--- a/arch/arm/mach-s5pv210/mach-smdkv210.c
+++ b/arch/arm/mach-s5pv210/mach-smdkv210.c
@@ -13,13 +13,14 @@
 #include <linux/i2c.h>
 #include <linux/init.h>
 #include <linux/serial_core.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/dm9000.h>
 #include <linux/fb.h>
 #include <linux/gpio.h>
 #include <linux/delay.h>
 #include <linux/pwm_backlight.h>
 
+#include <asm/hardware/vic.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
 #include <asm/setup.h>
@@ -33,7 +34,6 @@
 #include <plat/regs-serial.h>
 #include <plat/regs-srom.h>
 #include <plat/gpio-cfg.h>
-#include <plat/s5pv210.h>
 #include <plat/devs.h>
 #include <plat/cpu.h>
 #include <plat/adc.h>
@@ -47,6 +47,8 @@
 #include <plat/backlight.h>
 #include <plat/regs-fb-v4.h>
 
+#include "common.h"
+
 /* Following are default values for UCON, ULCON and UFCON UART registers */
 #define SMDKV210_UCON_DEFAULT	(S3C2410_UCON_TXILEVEL |	\
 				 S3C2410_UCON_RXILEVEL |	\
@@ -273,11 +275,12 @@
 
 static struct platform_pwm_backlight_data smdkv210_bl_data = {
 	.pwm_id = 3,
+	.pwm_period_ns = 1000,
 };
 
 static void __init smdkv210_map_io(void)
 {
-	s5p_init_io(NULL, 0, S5P_VA_CHIPID);
+	s5pv210_init_io(NULL, 0);
 	s3c24xx_init_clocks(24000000);
 	s3c24xx_init_uarts(smdkv210_uartcfgs, ARRAY_SIZE(smdkv210_uartcfgs));
 	s5p_set_timer_source(S5P_PWM2, S5P_PWM4);
@@ -315,7 +318,9 @@
 	/* Maintainer: Kukjin Kim <kgene.kim@samsung.com> */
 	.atag_offset	= 0x100,
 	.init_irq	= s5pv210_init_irq,
+	.handle_irq	= vic_handle_irq,
 	.map_io		= smdkv210_map_io,
 	.init_machine	= smdkv210_machine_init,
 	.timer		= &s5p_timer,
+	.restart	= s5pv210_restart,
 MACHINE_END
diff --git a/arch/arm/mach-s5pv210/mach-torbreck.c b/arch/arm/mach-s5pv210/mach-torbreck.c
index 97cc066..74e99bc 100644
--- a/arch/arm/mach-s5pv210/mach-torbreck.c
+++ b/arch/arm/mach-s5pv210/mach-torbreck.c
@@ -14,6 +14,7 @@
 #include <linux/init.h>
 #include <linux/serial_core.h>
 
+#include <asm/hardware/vic.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
 #include <asm/setup.h>
@@ -23,12 +24,13 @@
 #include <mach/regs-clock.h>
 
 #include <plat/regs-serial.h>
-#include <plat/s5pv210.h>
 #include <plat/devs.h>
 #include <plat/cpu.h>
 #include <plat/iic.h>
 #include <plat/s5p-time.h>
 
+#include "common.h"
+
 /* Following are default values for UCON, ULCON and UFCON UART registers */
 #define TORBRECK_UCON_DEFAULT	(S3C2410_UCON_TXILEVEL |	\
 				 S3C2410_UCON_RXILEVEL |	\
@@ -102,7 +104,7 @@
 
 static void __init torbreck_map_io(void)
 {
-	s5p_init_io(NULL, 0, S5P_VA_CHIPID);
+	s5pv210_init_io(NULL, 0);
 	s3c24xx_init_clocks(24000000);
 	s3c24xx_init_uarts(torbreck_uartcfgs, ARRAY_SIZE(torbreck_uartcfgs));
 	s5p_set_timer_source(S5P_PWM3, S5P_PWM4);
@@ -127,7 +129,9 @@
 	/* Maintainer: Hyunchul Ko <ghcstop@gmail.com> */
 	.atag_offset	= 0x100,
 	.init_irq	= s5pv210_init_irq,
+	.handle_irq	= vic_handle_irq,
 	.map_io		= torbreck_map_io,
 	.init_machine	= torbreck_machine_init,
 	.timer		= &s5p_timer,
+	.restart	= s5pv210_restart,
 MACHINE_END
diff --git a/arch/arm/mach-s5pv210/pm.c b/arch/arm/mach-s5pv210/pm.c
index f149d27..677c71c 100644
--- a/arch/arm/mach-s5pv210/pm.c
+++ b/arch/arm/mach-s5pv210/pm.c
@@ -133,7 +133,7 @@
 	s3c_pm_do_save(s5pv210_core_save, ARRAY_SIZE(s5pv210_core_save));
 }
 
-static int s5pv210_pm_add(struct sys_device *sysdev)
+static int s5pv210_pm_add(struct device *dev)
 {
 	pm_cpu_prep = s5pv210_pm_prepare;
 	pm_cpu_sleep = s5pv210_cpu_suspend;
@@ -141,13 +141,15 @@
 	return 0;
 }
 
-static struct sysdev_driver s5pv210_pm_driver = {
-	.add		= s5pv210_pm_add,
+static struct subsys_interface s5pv210_pm_interface = {
+	.name		= "s5pv210_pm",
+	.subsys		= &s5pv210_subsys,
+	.add_dev	= s5pv210_pm_add,
 };
 
 static __init int s5pv210_pm_drvinit(void)
 {
-	return sysdev_driver_register(&s5pv210_sysclass, &s5pv210_pm_driver);
+	return subsys_interface_register(&s5pv210_pm_interface);
 }
 arch_initcall(s5pv210_pm_drvinit);
 
diff --git a/arch/arm/mach-sa1100/assabet.c b/arch/arm/mach-sa1100/assabet.c
index 3dd133f..6b93e20 100644
--- a/arch/arm/mach-sa1100/assabet.c
+++ b/arch/arm/mach-sa1100/assabet.c
@@ -455,4 +455,5 @@
 #ifdef CONFIG_SA1111
 	.dma_zone_size	= SZ_1M,
 #endif
+	.restart	= sa11x0_restart,
 MACHINE_END
diff --git a/arch/arm/mach-sa1100/badge4.c b/arch/arm/mach-sa1100/badge4.c
index bda83e1..b07a2c0 100644
--- a/arch/arm/mach-sa1100/badge4.c
+++ b/arch/arm/mach-sa1100/badge4.c
@@ -309,4 +309,5 @@
 #ifdef CONFIG_SA1111
 	.dma_zone_size	= SZ_1M,
 #endif
+	.restart	= sa11x0_restart,
 MACHINE_END
diff --git a/arch/arm/mach-sa1100/cerf.c b/arch/arm/mach-sa1100/cerf.c
index 7f3da4b..11bb6d0 100644
--- a/arch/arm/mach-sa1100/cerf.c
+++ b/arch/arm/mach-sa1100/cerf.c
@@ -139,4 +139,5 @@
 	.init_irq	= cerf_init_irq,
 	.timer		= &sa1100_timer,
 	.init_machine	= cerf_init,
+	.restart	= sa11x0_restart,
 MACHINE_END
diff --git a/arch/arm/mach-sa1100/collie.c b/arch/arm/mach-sa1100/collie.c
index 2965cc9..b9060e2 100644
--- a/arch/arm/mach-sa1100/collie.c
+++ b/arch/arm/mach-sa1100/collie.c
@@ -387,4 +387,5 @@
 	.init_irq	= sa1100_init_irq,
 	.timer		= &sa1100_timer,
 	.init_machine	= collie_init,
+	.restart	= sa11x0_restart,
 MACHINE_END
diff --git a/arch/arm/mach-sa1100/generic.c b/arch/arm/mach-sa1100/generic.c
index 5fa5ae1..bb10ee2 100644
--- a/arch/arm/mach-sa1100/generic.c
+++ b/arch/arm/mach-sa1100/generic.c
@@ -126,6 +126,17 @@
 	PMCR = PMCR_SF;
 }
 
+void sa11x0_restart(char mode, const char *cmd)
+{
+	if (mode == 's') {
+		/* Jump into ROM at address 0 */
+		soft_restart(0);
+	} else {
+		/* Use on-chip reset capability */
+		RSRR = RSRR_SWR;
+	}
+}
+
 static void sa11x0_register_device(struct platform_device *dev, void *data)
 {
 	int err;
diff --git a/arch/arm/mach-sa1100/generic.h b/arch/arm/mach-sa1100/generic.h
index b7a9a60..33268cf 100644
--- a/arch/arm/mach-sa1100/generic.h
+++ b/arch/arm/mach-sa1100/generic.h
@@ -10,6 +10,7 @@
 extern void __init sa1100_map_io(void);
 extern void __init sa1100_init_irq(void);
 extern void __init sa1100_init_gpio(void);
+extern void sa11x0_restart(char, const char *);
 
 #define SET_BANK(__nr,__start,__size) \
 	mi->bank[__nr].start = (__start), \
diff --git a/arch/arm/mach-sa1100/h3100.c b/arch/arm/mach-sa1100/h3100.c
index b30733a..1e6b3c1 100644
--- a/arch/arm/mach-sa1100/h3100.c
+++ b/arch/arm/mach-sa1100/h3100.c
@@ -89,5 +89,6 @@
 	.init_irq	= sa1100_init_irq,
 	.timer		= &sa1100_timer,
 	.init_machine	= h3100_mach_init,
+	.restart	= sa11x0_restart,
 MACHINE_END
 
diff --git a/arch/arm/mach-sa1100/h3600.c b/arch/arm/mach-sa1100/h3600.c
index 6fd324d..6b58e74 100644
--- a/arch/arm/mach-sa1100/h3600.c
+++ b/arch/arm/mach-sa1100/h3600.c
@@ -130,5 +130,6 @@
 	.init_irq	= sa1100_init_irq,
 	.timer		= &sa1100_timer,
 	.init_machine	= h3600_mach_init,
+	.restart	= sa11x0_restart,
 MACHINE_END
 
diff --git a/arch/arm/mach-sa1100/hackkit.c b/arch/arm/mach-sa1100/hackkit.c
index 30f4a55..c01bb36 100644
--- a/arch/arm/mach-sa1100/hackkit.c
+++ b/arch/arm/mach-sa1100/hackkit.c
@@ -200,4 +200,5 @@
 	.init_irq	= sa1100_init_irq,
 	.timer		= &sa1100_timer,
 	.init_machine	= hackkit_init,
+	.restart	= sa11x0_restart,
 MACHINE_END
diff --git a/arch/arm/mach-sa1100/include/mach/system.h b/arch/arm/mach-sa1100/include/mach/system.h
index ba9da9f..e17b208 100644
--- a/arch/arm/mach-sa1100/include/mach/system.h
+++ b/arch/arm/mach-sa1100/include/mach/system.h
@@ -3,20 +3,7 @@
  *
  * Copyright (c) 1999 Nicolas Pitre <nico@fluxnic.net>
  */
-#include <mach/hardware.h>
-
 static inline void arch_idle(void)
 {
 	cpu_do_idle();
 }
-
-static inline void arch_reset(char mode, const char *cmd)
-{
-	if (mode == 's') {
-		/* Jump into ROM at address 0 */
-		cpu_reset(0);
-	} else {
-		/* Use on-chip reset capability */
-		RSRR = RSRR_SWR;
-	}
-}
diff --git a/arch/arm/mach-sa1100/include/mach/vmalloc.h b/arch/arm/mach-sa1100/include/mach/vmalloc.h
deleted file mode 100644
index b3d0023..0000000
--- a/arch/arm/mach-sa1100/include/mach/vmalloc.h
+++ /dev/null
@@ -1,4 +0,0 @@
-/*
- * arch/arm/mach-sa1100/include/mach/vmalloc.h
- */
-#define VMALLOC_END       (0xe8000000UL)
diff --git a/arch/arm/mach-sa1100/jornada720.c b/arch/arm/mach-sa1100/jornada720.c
index 77198fe..ee121d6 100644
--- a/arch/arm/mach-sa1100/jornada720.c
+++ b/arch/arm/mach-sa1100/jornada720.c
@@ -373,4 +373,5 @@
 #ifdef CONFIG_SA1111
 	.dma_zone_size	= SZ_1M,
 #endif
+	.restart	= sa11x0_restart,
 MACHINE_END
diff --git a/arch/arm/mach-sa1100/lart.c b/arch/arm/mach-sa1100/lart.c
index 5bc59d0..af4e276 100644
--- a/arch/arm/mach-sa1100/lart.c
+++ b/arch/arm/mach-sa1100/lart.c
@@ -66,4 +66,5 @@
 	.init_irq	= sa1100_init_irq,
 	.init_machine	= lart_init,
 	.timer		= &sa1100_timer,
+	.restart	= sa11x0_restart,
 MACHINE_END
diff --git a/arch/arm/mach-sa1100/nanoengine.c b/arch/arm/mach-sa1100/nanoengine.c
index 032f388..85f6ee6 100644
--- a/arch/arm/mach-sa1100/nanoengine.c
+++ b/arch/arm/mach-sa1100/nanoengine.c
@@ -19,6 +19,7 @@
 
 #include <asm/mach-types.h>
 #include <asm/setup.h>
+#include <asm/page.h>
 
 #include <asm/mach/arch.h>
 #include <asm/mach/flash.h>
@@ -116,4 +117,5 @@
 	.init_irq	= sa1100_init_irq,
 	.timer		= &sa1100_timer,
 	.init_machine	= nanoengine_init,
+	.restart	= sa11x0_restart,
 MACHINE_END
diff --git a/arch/arm/mach-sa1100/pleb.c b/arch/arm/mach-sa1100/pleb.c
index 65161f2..9307df0 100644
--- a/arch/arm/mach-sa1100/pleb.c
+++ b/arch/arm/mach-sa1100/pleb.c
@@ -150,4 +150,5 @@
 	.init_irq	= sa1100_init_irq,
 	.timer		= &sa1100_timer,
 	.init_machine   = pleb_init,
+	.restart	= sa11x0_restart,
 MACHINE_END
diff --git a/arch/arm/mach-sa1100/shannon.c b/arch/arm/mach-sa1100/shannon.c
index 1cccbf5..318b2b7 100644
--- a/arch/arm/mach-sa1100/shannon.c
+++ b/arch/arm/mach-sa1100/shannon.c
@@ -87,4 +87,5 @@
 	.init_irq	= sa1100_init_irq,
 	.timer		= &sa1100_timer,
 	.init_machine	= shannon_init,
+	.restart	= sa11x0_restart,
 MACHINE_END
diff --git a/arch/arm/mach-sa1100/simpad.c b/arch/arm/mach-sa1100/simpad.c
index 4790f3f..e17c04d 100644
--- a/arch/arm/mach-sa1100/simpad.c
+++ b/arch/arm/mach-sa1100/simpad.c
@@ -396,4 +396,5 @@
 	.map_io		= simpad_map_io,
 	.init_irq	= sa1100_init_irq,
 	.timer		= &sa1100_timer,
+	.restart	= sa11x0_restart,
 MACHINE_END
diff --git a/arch/arm/mach-sa1100/time.c b/arch/arm/mach-sa1100/time.c
index fa66024..69e3353 100644
--- a/arch/arm/mach-sa1100/time.c
+++ b/arch/arm/mach-sa1100/time.c
@@ -12,7 +12,6 @@
 #include <linux/errno.h>
 #include <linux/interrupt.h>
 #include <linux/irq.h>
-#include <linux/sched.h>	/* just for sched_clock() - funny that */
 #include <linux/timex.h>
 #include <linux/clockchips.h>
 
@@ -20,29 +19,9 @@
 #include <asm/sched_clock.h>
 #include <mach/hardware.h>
 
-/*
- * This is the SA11x0 sched_clock implementation.
- */
-static DEFINE_CLOCK_DATA(cd);
-
-/*
- * Constants generated by clocks_calc_mult_shift(m, s, 3.6864MHz,
- * NSEC_PER_SEC, 60).
- * This gives a resolution of about 271ns and a wrap period of about 19min.
- */
-#define SC_MULT		2275555556u
-#define SC_SHIFT	23
-
-unsigned long long notrace sched_clock(void)
+static u32 notrace sa1100_read_sched_clock(void)
 {
-	u32 cyc = OSCR;
-	return cyc_to_fixed_sched_clock(&cd, cyc, (u32)~0, SC_MULT, SC_SHIFT);
-}
-
-static void notrace sa1100_update_sched_clock(void)
-{
-	u32 cyc = OSCR;
-	update_sched_clock(&cd, cyc, (u32)~0);
+	return OSCR;
 }
 
 #define MIN_OSCR_DELTA 2
@@ -109,8 +88,7 @@
 	OIER = 0;
 	OSSR = OSSR_M0 | OSSR_M1 | OSSR_M2 | OSSR_M3;
 
-	init_fixed_sched_clock(&cd, sa1100_update_sched_clock, 32,
-			       3686400, SC_MULT, SC_SHIFT);
+	setup_sched_clock(sa1100_read_sched_clock, 32, 3686400);
 
 	clockevents_calc_mult_shift(&ckevt_sa1100_osmr0, 3686400, 4);
 	ckevt_sa1100_osmr0.max_delta_ns =
diff --git a/arch/arm/mach-shark/core.c b/arch/arm/mach-shark/core.c
index feda3ca..a851c25 100644
--- a/arch/arm/mach-shark/core.c
+++ b/arch/arm/mach-shark/core.c
@@ -26,10 +26,9 @@
 #define ROMCARD_SIZE            0x08000000
 #define ROMCARD_START           0x10000000
 
-void arch_reset(char mode, const char *cmd)
+static void shark_restart(char mode, const char *cmd)
 {
         short temp;
-        local_irq_disable();
         /* Reset the Machine via pc[3] of the sequoia chipset */
         outw(0x09,0x24);
         temp=inw(0x26);
@@ -157,4 +156,5 @@
 	.init_irq	= shark_init_irq,
 	.timer		= &shark_timer,
 	.dma_zone_size	= SZ_4M,
+	.restart	= shark_restart,
 MACHINE_END
diff --git a/arch/arm/mach-shark/include/mach/system.h b/arch/arm/mach-shark/include/mach/system.h
index 21c373b..1b2f2c5 100644
--- a/arch/arm/mach-shark/include/mach/system.h
+++ b/arch/arm/mach-shark/include/mach/system.h
@@ -6,9 +6,6 @@
 #ifndef __ASM_ARCH_SYSTEM_H
 #define __ASM_ARCH_SYSTEM_H
 
-/* Found in arch/mach-shark/core.c */
-extern void arch_reset(char mode, const char *cmd);
-
 static inline void arch_idle(void)
 {
 }
diff --git a/arch/arm/mach-shark/include/mach/vmalloc.h b/arch/arm/mach-shark/include/mach/vmalloc.h
deleted file mode 100644
index b10df98..0000000
--- a/arch/arm/mach-shark/include/mach/vmalloc.h
+++ /dev/null
@@ -1,4 +0,0 @@
-/*
- * arch/arm/mach-shark/include/mach/vmalloc.h
- */
-#define VMALLOC_END       0xd0000000UL
diff --git a/arch/arm/mach-shmobile/Makefile b/arch/arm/mach-shmobile/Makefile
index 737bdc6..5ca1f9d 100644
--- a/arch/arm/mach-shmobile/Makefile
+++ b/arch/arm/mach-shmobile/Makefile
@@ -28,7 +28,6 @@
 obj-$(CONFIG_ARCH_SH7367)	+= entry-intc.o
 obj-$(CONFIG_ARCH_SH7377)	+= entry-intc.o
 obj-$(CONFIG_ARCH_SH7372)	+= entry-intc.o
-obj-$(CONFIG_ARCH_SH73A0)	+= entry-gic.o
 
 # PM objects
 obj-$(CONFIG_SUSPEND)		+= suspend.o
diff --git a/arch/arm/mach-shmobile/board-ag5evm.c b/arch/arm/mach-shmobile/board-ag5evm.c
index b862e9f..a4e6ca0 100644
--- a/arch/arm/mach-shmobile/board-ag5evm.c
+++ b/arch/arm/mach-shmobile/board-ag5evm.c
@@ -466,8 +466,6 @@
 static void __init ag5evm_map_io(void)
 {
 	iotable_init(ag5evm_io_desc, ARRAY_SIZE(ag5evm_io_desc));
-	/* DMA memory at 0xf6000000 - 0xffdfffff */
-	init_consistent_dma_size(158 << 20);
 
 	/* setup early devices and console here as well */
 	sh73a0_add_early_devices();
@@ -607,8 +605,9 @@
 
 MACHINE_START(AG5EVM, "ag5evm")
 	.map_io		= ag5evm_map_io,
+	.nr_irqs	= NR_IRQS_LEGACY,
 	.init_irq	= sh73a0_init_irq,
-	.handle_irq	= shmobile_handle_irq_gic,
+	.handle_irq	= gic_handle_irq,
 	.init_machine	= ag5evm_init,
 	.timer		= &ag5evm_timer,
 MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c
index 4c865ec..6a6f9f7 100644
--- a/arch/arm/mach-shmobile/board-ap4evb.c
+++ b/arch/arm/mach-shmobile/board-ap4evb.c
@@ -1172,8 +1172,6 @@
 static void __init ap4evb_map_io(void)
 {
 	iotable_init(ap4evb_io_desc, ARRAY_SIZE(ap4evb_io_desc));
-	/* DMA memory at 0xf6000000 - 0xffdfffff */
-	init_consistent_dma_size(158 << 20);
 
 	/* setup early devices and console here as well */
 	sh7372_add_early_devices();
diff --git a/arch/arm/mach-shmobile/board-g3evm.c b/arch/arm/mach-shmobile/board-g3evm.c
index 8b620bf..72d5572 100644
--- a/arch/arm/mach-shmobile/board-g3evm.c
+++ b/arch/arm/mach-shmobile/board-g3evm.c
@@ -261,8 +261,6 @@
 static void __init g3evm_map_io(void)
 {
 	iotable_init(g3evm_io_desc, ARRAY_SIZE(g3evm_io_desc));
-	/* DMA memory at 0xf6000000 - 0xffdfffff */
-	init_consistent_dma_size(158 << 20);
 
 	/* setup early devices and console here as well */
 	sh7367_add_early_devices();
diff --git a/arch/arm/mach-shmobile/board-g4evm.c b/arch/arm/mach-shmobile/board-g4evm.c
index 7719ddc..2220b88 100644
--- a/arch/arm/mach-shmobile/board-g4evm.c
+++ b/arch/arm/mach-shmobile/board-g4evm.c
@@ -275,8 +275,6 @@
 static void __init g4evm_map_io(void)
 {
 	iotable_init(g4evm_io_desc, ARRAY_SIZE(g4evm_io_desc));
-	/* DMA memory at 0xf6000000 - 0xffdfffff */
-	init_consistent_dma_size(158 << 20);
 
 	/* setup early devices and console here as well */
 	sh7377_add_early_devices();
diff --git a/arch/arm/mach-shmobile/board-kota2.c b/arch/arm/mach-shmobile/board-kota2.c
index bd9a784..857ceee 100644
--- a/arch/arm/mach-shmobile/board-kota2.c
+++ b/arch/arm/mach-shmobile/board-kota2.c
@@ -33,6 +33,7 @@
 #include <linux/input/sh_keysc.h>
 #include <linux/gpio_keys.h>
 #include <linux/leds.h>
+#include <linux/platform_data/leds-renesas-tpu.h>
 #include <linux/mmc/host.h>
 #include <linux/mmc/sh_mmcif.h>
 #include <linux/mfd/tmio.h>
@@ -56,7 +57,7 @@
 		.flags		= IORESOURCE_MEM,
 	},
 	[1] = {
-		.start		= gic_spi(33), /* PINTA2 @ PORT144 */
+		.start		= SH73A0_PINT0_IRQ(2), /* PINTA2 */
 		.flags		= IORESOURCE_IRQ,
 	},
 };
@@ -157,10 +158,6 @@
 #define GPIO_LED(n, g) { .name = n, .gpio = g }
 
 static struct gpio_led gpio_leds[] = {
-	GPIO_LED("V2513", GPIO_PORT153), /* PORT153 [TPU1T02] -> V2513 */
-	GPIO_LED("V2514", GPIO_PORT199), /* PORT199 [TPU4TO1] -> V2514 */
-	GPIO_LED("V2515", GPIO_PORT197), /* PORT197 [TPU2TO1] -> V2515 */
-	GPIO_LED("KEYLED", GPIO_PORT163), /* PORT163 [TPU3TO0] -> KEYLED */
 	GPIO_LED("G", GPIO_PORT20), /* PORT20 [GPO0] -> LED7 -> "G" */
 	GPIO_LED("H", GPIO_PORT21), /* PORT21 [GPO1] -> LED8 -> "H" */
 	GPIO_LED("J", GPIO_PORT22), /* PORT22 [GPO2] -> LED9 -> "J" */
@@ -179,6 +176,119 @@
 	},
 };
 
+/* TPU LED */
+static struct led_renesas_tpu_config led_renesas_tpu12_pdata = {
+	.name		= "V2513",
+	.pin_gpio_fn	= GPIO_FN_TPU1TO2,
+	.pin_gpio	= GPIO_PORT153,
+	.channel_offset = 0x90,
+	.timer_bit = 2,
+	.max_brightness = 1000,
+};
+
+static struct resource tpu12_resources[] = {
+	[0] = {
+		.name	= "TPU12",
+		.start	= 0xe6610090,
+		.end	= 0xe66100b5,
+		.flags	= IORESOURCE_MEM,
+	},
+};
+
+static struct platform_device leds_tpu12_device = {
+	.name = "leds-renesas-tpu",
+	.id = 12,
+	.dev = {
+		.platform_data  = &led_renesas_tpu12_pdata,
+	},
+	.num_resources	= ARRAY_SIZE(tpu12_resources),
+	.resource	= tpu12_resources,
+};
+
+static struct led_renesas_tpu_config led_renesas_tpu41_pdata = {
+	.name		= "V2514",
+	.pin_gpio_fn	= GPIO_FN_TPU4TO1,
+	.pin_gpio	= GPIO_PORT199,
+	.channel_offset = 0x50,
+	.timer_bit = 1,
+	.max_brightness = 1000,
+};
+
+static struct resource tpu41_resources[] = {
+	[0] = {
+		.name	= "TPU41",
+		.start	= 0xe6640050,
+		.end	= 0xe6640075,
+		.flags	= IORESOURCE_MEM,
+	},
+};
+
+static struct platform_device leds_tpu41_device = {
+	.name = "leds-renesas-tpu",
+	.id = 41,
+	.dev = {
+		.platform_data  = &led_renesas_tpu41_pdata,
+	},
+	.num_resources	= ARRAY_SIZE(tpu41_resources),
+	.resource	= tpu41_resources,
+};
+
+static struct led_renesas_tpu_config led_renesas_tpu21_pdata = {
+	.name		= "V2515",
+	.pin_gpio_fn	= GPIO_FN_TPU2TO1,
+	.pin_gpio	= GPIO_PORT197,
+	.channel_offset = 0x50,
+	.timer_bit = 1,
+	.max_brightness = 1000,
+};
+
+static struct resource tpu21_resources[] = {
+	[0] = {
+		.name	= "TPU21",
+		.start	= 0xe6620050,
+		.end	= 0xe6620075,
+		.flags	= IORESOURCE_MEM,
+	},
+};
+
+static struct platform_device leds_tpu21_device = {
+	.name = "leds-renesas-tpu",
+	.id = 21,
+	.dev = {
+		.platform_data  = &led_renesas_tpu21_pdata,
+	},
+	.num_resources	= ARRAY_SIZE(tpu21_resources),
+	.resource	= tpu21_resources,
+};
+
+static struct led_renesas_tpu_config led_renesas_tpu30_pdata = {
+	.name		= "KEYLED",
+	.pin_gpio_fn	= GPIO_FN_TPU3TO0,
+	.pin_gpio	= GPIO_PORT163,
+	.channel_offset = 0x10,
+	.timer_bit = 0,
+	.max_brightness = 1000,
+};
+
+static struct resource tpu30_resources[] = {
+	[0] = {
+		.name	= "TPU30",
+		.start	= 0xe6630010,
+		.end	= 0xe6630035,
+		.flags	= IORESOURCE_MEM,
+	},
+};
+
+static struct platform_device leds_tpu30_device = {
+	.name = "leds-renesas-tpu",
+	.id = 30,
+	.dev = {
+		.platform_data  = &led_renesas_tpu30_pdata,
+	},
+	.num_resources	= ARRAY_SIZE(tpu30_resources),
+	.resource	= tpu30_resources,
+};
+
 /* MMCIF */
 static struct resource mmcif_resources[] = {
 	[0] = {
@@ -291,6 +401,10 @@
 	&keysc_device,
 	&gpio_keys_device,
 	&gpio_leds_device,
+	&leds_tpu12_device,
+	&leds_tpu41_device,
+	&leds_tpu21_device,
+	&leds_tpu30_device,
 	&mmcif_device,
 	&sdhi0_device,
 	&sdhi1_device,
@@ -317,18 +431,6 @@
 	shmobile_setup_console();
 }
 
-#define PINTER0A	0xe69000a0
-#define PINTCR0A	0xe69000b0
-
-void __init kota2_init_irq(void)
-{
-	sh73a0_init_irq();
-
-	/* setup PINT: enable PINTA2 as active low */
-	__raw_writel(1 << 29, PINTER0A);
-	__raw_writew(2 << 10, PINTCR0A);
-}
-
 static void __init kota2_init(void)
 {
 	sh73a0_pinmux_init();
@@ -447,8 +549,9 @@
 
 MACHINE_START(KOTA2, "kota2")
 	.map_io		= kota2_map_io,
-	.init_irq	= kota2_init_irq,
-	.handle_irq	= shmobile_handle_irq_gic,
+	.nr_irqs	= NR_IRQS_LEGACY,
+	.init_irq	= sh73a0_init_irq,
+	.handle_irq	= gic_handle_irq,
 	.init_machine	= kota2_init,
 	.timer		= &kota2_timer,
 MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c
index 9c5e598..ed52566 100644
--- a/arch/arm/mach-shmobile/board-mackerel.c
+++ b/arch/arm/mach-shmobile/board-mackerel.c
@@ -1390,8 +1390,6 @@
 static void __init mackerel_map_io(void)
 {
 	iotable_init(mackerel_io_desc, ARRAY_SIZE(mackerel_io_desc));
-	/* DMA memory at 0xf6000000 - 0xffdfffff */
-	init_consistent_dma_size(158 << 20);
 
 	/* setup early devices and console here as well */
 	sh7372_add_early_devices();
diff --git a/arch/arm/mach-shmobile/clock-sh73a0.c b/arch/arm/mach-shmobile/clock-sh73a0.c
index 61a846b..1370a89 100644
--- a/arch/arm/mach-shmobile/clock-sh73a0.c
+++ b/arch/arm/mach-shmobile/clock-sh73a0.c
@@ -113,6 +113,12 @@
 	.ops		= &main_clk_ops,
 };
 
+/* Divide Main clock by two */
+static struct clk main_div2_clk = {
+	.ops		= &div2_clk_ops,
+	.parent		= &main_clk,
+};
+
 /* PLL0, PLL1, PLL2, PLL3 */
 static unsigned long pll_recalc(struct clk *clk)
 {
@@ -181,6 +187,7 @@
 	&extal1_div2_clk,
 	&extal2_div2_clk,
 	&main_clk,
+	&main_div2_clk,
 	&pll0_clk,
 	&pll1_clk,
 	&pll2_clk,
@@ -243,7 +250,7 @@
 	[DIV6_VCK1] = SH_CLK_DIV6(&pll1_div2_clk, VCLKCR1, 0),
 	[DIV6_VCK2] = SH_CLK_DIV6(&pll1_div2_clk, VCLKCR2, 0),
 	[DIV6_VCK3] = SH_CLK_DIV6(&pll1_div2_clk, VCLKCR3, 0),
-	[DIV6_ZB1] = SH_CLK_DIV6(&pll1_div2_clk, ZBCKCR, 0),
+	[DIV6_ZB1] = SH_CLK_DIV6(&pll1_div2_clk, ZBCKCR, CLK_ENABLE_ON_INIT),
 	[DIV6_FLCTL] = SH_CLK_DIV6(&pll1_div2_clk, FLCKCR, 0),
 	[DIV6_SDHI0] = SH_CLK_DIV6(&pll1_div2_clk, SD0CKCR, 0),
 	[DIV6_SDHI1] = SH_CLK_DIV6(&pll1_div2_clk, SD1CKCR, 0),
@@ -268,6 +275,7 @@
 	MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
 	MSTP331, MSTP329, MSTP325, MSTP323, MSTP318,
 	MSTP314, MSTP313, MSTP312, MSTP311,
+	MSTP303, MSTP302, MSTP301, MSTP300,
 	MSTP411, MSTP410, MSTP403,
 	MSTP_NR };
 
@@ -301,6 +309,10 @@
 	[MSTP313] = MSTP(&div6_clks[DIV6_SDHI1], SMSTPCR3, 13, 0), /* SDHI1 */
 	[MSTP312] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 12, 0), /* MMCIF0 */
 	[MSTP311] = MSTP(&div6_clks[DIV6_SDHI2], SMSTPCR3, 11, 0), /* SDHI2 */
+	[MSTP303] = MSTP(&main_div2_clk, SMSTPCR3, 3, 0), /* TPU1 */
+	[MSTP302] = MSTP(&main_div2_clk, SMSTPCR3, 2, 0), /* TPU2 */
+	[MSTP301] = MSTP(&main_div2_clk, SMSTPCR3, 1, 0), /* TPU3 */
+	[MSTP300] = MSTP(&main_div2_clk, SMSTPCR3, 0, 0), /* TPU4 */
 	[MSTP411] = MSTP(&div4_clks[DIV4_HP], SMSTPCR4, 11, 0), /* IIC3 */
 	[MSTP410] = MSTP(&div4_clks[DIV4_HP], SMSTPCR4, 10, 0), /* IIC4 */
 	[MSTP403] = MSTP(&r_clk, SMSTPCR4, 3, 0), /* KEYSC */
@@ -350,6 +362,10 @@
 	CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[MSTP313]), /* SDHI1 */
 	CLKDEV_DEV_ID("sh_mmcif.0", &mstp_clks[MSTP312]), /* MMCIF0 */
 	CLKDEV_DEV_ID("sh_mobile_sdhi.2", &mstp_clks[MSTP311]), /* SDHI2 */
+	CLKDEV_DEV_ID("leds-renesas-tpu.12", &mstp_clks[MSTP303]), /* TPU1 */
+	CLKDEV_DEV_ID("leds-renesas-tpu.21", &mstp_clks[MSTP302]), /* TPU2 */
+	CLKDEV_DEV_ID("leds-renesas-tpu.30", &mstp_clks[MSTP301]), /* TPU3 */
+	CLKDEV_DEV_ID("leds-renesas-tpu.41", &mstp_clks[MSTP300]), /* TPU4 */
 	CLKDEV_DEV_ID("i2c-sh_mobile.3", &mstp_clks[MSTP411]), /* I2C3 */
 	CLKDEV_DEV_ID("i2c-sh_mobile.4", &mstp_clks[MSTP410]), /* I2C4 */
 	CLKDEV_DEV_ID("sh_keysc.0", &mstp_clks[MSTP403]), /* KEYSC */
diff --git a/arch/arm/mach-shmobile/entry-gic.S b/arch/arm/mach-shmobile/entry-gic.S
deleted file mode 100644
index e20239b..0000000
--- a/arch/arm/mach-shmobile/entry-gic.S
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * ARM Interrupt demux handler using GIC
- *
- * Copyright (C) 2010 Magnus Damm
- * Copyright (C) 2011 Paul Mundt
- * Copyright (C) 2010 - 2011 Renesas Solutions Corp.
- *
- * This file is licensed under  the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <asm/assembler.h>
-#include <asm/entry-macro-multi.S>
-#include <asm/hardware/gic.h>
-#include <asm/hardware/entry-macro-gic.S>
-
-	arch_irq_handler shmobile_handle_irq_gic
diff --git a/arch/arm/mach-shmobile/include/mach/common.h b/arch/arm/mach-shmobile/include/mach/common.h
index 834bd6c..be78a2c 100644
--- a/arch/arm/mach-shmobile/include/mach/common.h
+++ b/arch/arm/mach-shmobile/include/mach/common.h
@@ -7,7 +7,6 @@
 struct clk;
 extern int clk_init(void);
 extern void shmobile_handle_irq_intc(struct pt_regs *);
-extern void shmobile_handle_irq_gic(struct pt_regs *);
 extern struct platform_suspend_ops shmobile_suspend_ops;
 struct cpuidle_driver;
 extern void (*shmobile_cpuidle_modes[])(void);
@@ -35,8 +34,8 @@
 extern void sh7372_clock_init(void);
 extern void sh7372_pinmux_init(void);
 extern void sh7372_pm_init(void);
-extern void sh7372_resume_core_standby_a3sm(void);
-extern int sh7372_do_idle_a3sm(unsigned long unused);
+extern void sh7372_resume_core_standby_sysc(void);
+extern int sh7372_do_idle_sysc(unsigned long sleep_mode);
 extern struct clk sh7372_extal1_clk;
 extern struct clk sh7372_extal2_clk;
 
diff --git a/arch/arm/mach-shmobile/include/mach/entry-macro.S b/arch/arm/mach-shmobile/include/mach/entry-macro.S
index 8d4a416..2a57b29 100644
--- a/arch/arm/mach-shmobile/include/mach/entry-macro.S
+++ b/arch/arm/mach-shmobile/include/mach/entry-macro.S
@@ -18,14 +18,5 @@
 	.macro  disable_fiq
 	.endm
 
-	.macro  get_irqnr_preamble, base, tmp
-	.endm
-
-	.macro  get_irqnr_and_base, irqnr, irqstat, base, tmp
-	.endm
-
-	.macro  test_for_ipi, irqnr, irqstat, base, tmp
-	.endm
-
 	.macro  arch_ret_to_user, tmp1, tmp2
 	.endm
diff --git a/arch/arm/mach-shmobile/include/mach/gpio.h b/arch/arm/mach-shmobile/include/mach/gpio.h
index 7bf0890..de795b4 100644
--- a/arch/arm/mach-shmobile/include/mach/gpio.h
+++ b/arch/arm/mach-shmobile/include/mach/gpio.h
@@ -12,8 +12,6 @@
 
 #include <linux/kernel.h>
 #include <linux/errno.h>
-
-#define ARCH_NR_GPIOS 1024
 #include <linux/sh_pfc.h>
 
 #ifdef CONFIG_GPIOLIB
diff --git a/arch/arm/mach-shmobile/include/mach/sh7372.h b/arch/arm/mach-shmobile/include/mach/sh7372.h
index 84532f9..8254ab8 100644
--- a/arch/arm/mach-shmobile/include/mach/sh7372.h
+++ b/arch/arm/mach-shmobile/include/mach/sh7372.h
@@ -480,11 +480,10 @@
 struct sh7372_pm_domain {
 	struct generic_pm_domain genpd;
 	struct dev_power_governor *gov;
-	void (*suspend)(void);
+	int (*suspend)(void);
 	void (*resume)(void);
 	unsigned int bit_shift;
 	bool no_debug;
-	bool stay_on;
 };
 
 static inline struct sh7372_pm_domain *to_sh7372_pd(struct generic_pm_domain *d)
@@ -499,6 +498,7 @@
 extern struct sh7372_pm_domain sh7372_a4r;
 extern struct sh7372_pm_domain sh7372_a3rv;
 extern struct sh7372_pm_domain sh7372_a3ri;
+extern struct sh7372_pm_domain sh7372_a4s;
 extern struct sh7372_pm_domain sh7372_a3sp;
 extern struct sh7372_pm_domain sh7372_a3sg;
 
@@ -515,5 +515,7 @@
 
 extern void sh7372_intcs_suspend(void);
 extern void sh7372_intcs_resume(void);
+extern void sh7372_intca_suspend(void);
+extern void sh7372_intca_resume(void);
 
 #endif /* __ASM_SH7372_H__ */
diff --git a/arch/arm/mach-shmobile/include/mach/system.h b/arch/arm/mach-shmobile/include/mach/system.h
index 76a687e..956ac18 100644
--- a/arch/arm/mach-shmobile/include/mach/system.h
+++ b/arch/arm/mach-shmobile/include/mach/system.h
@@ -8,7 +8,7 @@
 
 static inline void arch_reset(char mode, const char *cmd)
 {
-	cpu_reset(0);
+	soft_restart(0);
 }
 
 #endif
diff --git a/arch/arm/mach-shmobile/include/mach/vmalloc.h b/arch/arm/mach-shmobile/include/mach/vmalloc.h
deleted file mode 100644
index 2b8fd8b..0000000
--- a/arch/arm/mach-shmobile/include/mach/vmalloc.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef __ASM_MACH_VMALLOC_H
-#define __ASM_MACH_VMALLOC_H
-
-/* Vmalloc at ... - 0xe5ffffff */
-#define VMALLOC_END 0xe6000000UL
-
-#endif /* __ASM_MACH_VMALLOC_H */
diff --git a/arch/arm/mach-shmobile/intc-sh7372.c b/arch/arm/mach-shmobile/intc-sh7372.c
index 2d8856d..89afcab 100644
--- a/arch/arm/mach-shmobile/intc-sh7372.c
+++ b/arch/arm/mach-shmobile/intc-sh7372.c
@@ -535,6 +535,7 @@
 static struct intc_desc intcs_desc __initdata = {
 	.name = "sh7372-intcs",
 	.force_enable = ENABLED_INTCS,
+	.skip_syscore_suspend = true,
 	.resource = intcs_resources,
 	.num_resources = ARRAY_SIZE(intcs_resources),
 	.hw = INTC_HW_DESC(intcs_vectors, intcs_groups, intcs_mask_registers,
@@ -611,3 +612,52 @@
 	for (k = 0x80; k <= 0x9c; k += 4)
 		__raw_writeb(ffd5[k], intcs_ffd5 + k);
 }
+
+static unsigned short e694[0x200];
+static unsigned short e695[0x200];
+
+void sh7372_intca_suspend(void)
+{
+	int k;
+
+	for (k = 0x00; k <= 0x38; k += 4)
+		e694[k] = __raw_readw(0xe6940000 + k);
+
+	for (k = 0x80; k <= 0xb4; k += 4)
+		e694[k] = __raw_readb(0xe6940000 + k);
+
+	for (k = 0x180; k <= 0x1b4; k += 4)
+		e694[k] = __raw_readb(0xe6940000 + k);
+
+	for (k = 0x00; k <= 0x50; k += 4)
+		e695[k] = __raw_readw(0xe6950000 + k);
+
+	for (k = 0x80; k <= 0xa8; k += 4)
+		e695[k] = __raw_readb(0xe6950000 + k);
+
+	for (k = 0x180; k <= 0x1a8; k += 4)
+		e695[k] = __raw_readb(0xe6950000 + k);
+}
+
+void sh7372_intca_resume(void)
+{
+	int k;
+
+	for (k = 0x00; k <= 0x38; k += 4)
+		__raw_writew(e694[k], 0xe6940000 + k);
+
+	for (k = 0x80; k <= 0xb4; k += 4)
+		__raw_writeb(e694[k], 0xe6940000 + k);
+
+	for (k = 0x180; k <= 0x1b4; k += 4)
+		__raw_writeb(e694[k], 0xe6940000 + k);
+
+	for (k = 0x00; k <= 0x50; k += 4)
+		__raw_writew(e695[k], 0xe6950000 + k);
+
+	for (k = 0x80; k <= 0xa8; k += 4)
+		__raw_writeb(e695[k], 0xe6950000 + k);
+
+	for (k = 0x180; k <= 0x1a8; k += 4)
+		__raw_writeb(e695[k], 0xe6950000 + k);
+}
diff --git a/arch/arm/mach-shmobile/pm-sh7372.c b/arch/arm/mach-shmobile/pm-sh7372.c
index 34bbcbf..77b8fc1 100644
--- a/arch/arm/mach-shmobile/pm-sh7372.c
+++ b/arch/arm/mach-shmobile/pm-sh7372.c
@@ -82,11 +82,12 @@
 	struct sh7372_pm_domain *sh7372_pd = to_sh7372_pd(genpd);
 	unsigned int mask = 1 << sh7372_pd->bit_shift;
 
-	if (sh7372_pd->suspend)
-		sh7372_pd->suspend();
+	if (sh7372_pd->suspend) {
+		int ret = sh7372_pd->suspend();
 
-	if (sh7372_pd->stay_on)
-		return 0;
+		if (ret)
+			return ret;
+	}
 
 	if (__raw_readl(PSTR) & mask) {
 		unsigned int retry_count;
@@ -101,8 +102,8 @@
 	}
 
 	if (!sh7372_pd->no_debug)
-		pr_debug("sh7372 power domain down 0x%08x -> PSTR = 0x%08x\n",
-			 mask, __raw_readl(PSTR));
+		pr_debug("%s: Power off, 0x%08x -> PSTR = 0x%08x\n",
+			 genpd->name, mask, __raw_readl(PSTR));
 
 	return 0;
 }
@@ -113,9 +114,6 @@
 	unsigned int retry_count;
 	int ret = 0;
 
-	if (sh7372_pd->stay_on)
-		goto out;
-
 	if (__raw_readl(PSTR) & mask)
 		goto out;
 
@@ -133,8 +131,8 @@
 		ret = -EIO;
 
 	if (!sh7372_pd->no_debug)
-		pr_debug("sh7372 power domain up 0x%08x -> PSTR = 0x%08x\n",
-			 mask, __raw_readl(PSTR));
+		pr_debug("%s: Power on, 0x%08x -> PSTR = 0x%08x\n",
+			 sh7372_pd->genpd.name, mask, __raw_readl(PSTR));
 
  out:
 	if (ret == 0 && sh7372_pd->resume && do_resume)
@@ -148,35 +146,60 @@
 	 return __pd_power_up(to_sh7372_pd(genpd), true);
 }
 
-static void sh7372_a4r_suspend(void)
+static int sh7372_a4r_suspend(void)
 {
 	sh7372_intcs_suspend();
 	__raw_writel(0x300fffff, WUPRMSK); /* avoid wakeup */
+	return 0;
 }
 
 static bool pd_active_wakeup(struct device *dev)
 {
-	return true;
+	bool (*active_wakeup)(struct device *dev);
+
+	active_wakeup = dev_gpd_data(dev)->ops.active_wakeup;
+	return active_wakeup ? active_wakeup(dev) : true;
 }
 
-static bool sh7372_power_down_forbidden(struct dev_pm_domain *domain)
+static int sh7372_stop_dev(struct device *dev)
 {
-	return false;
+	int (*stop)(struct device *dev);
+
+	stop = dev_gpd_data(dev)->ops.stop;
+	if (stop) {
+		int ret = stop(dev);
+		if (ret)
+			return ret;
+	}
+	return pm_clk_suspend(dev);
 }
 
-struct dev_power_governor sh7372_always_on_gov = {
-	.power_down_ok = sh7372_power_down_forbidden,
-};
+static int sh7372_start_dev(struct device *dev)
+{
+	int (*start)(struct device *dev);
+	int ret;
+
+	ret = pm_clk_resume(dev);
+	if (ret)
+		return ret;
+
+	start = dev_gpd_data(dev)->ops.start;
+	if (start)
+		ret = start(dev);
+
+	return ret;
+}
 
 void sh7372_init_pm_domain(struct sh7372_pm_domain *sh7372_pd)
 {
 	struct generic_pm_domain *genpd = &sh7372_pd->genpd;
+	struct dev_power_governor *gov = sh7372_pd->gov;
 
-	pm_genpd_init(genpd, sh7372_pd->gov, false);
-	genpd->stop_device = pm_clk_suspend;
-	genpd->start_device = pm_clk_resume;
+	pm_genpd_init(genpd, gov ? : &simple_qos_governor, false);
+	genpd->dev_ops.stop = sh7372_stop_dev;
+	genpd->dev_ops.start = sh7372_start_dev;
+	genpd->dev_ops.active_wakeup = pd_active_wakeup;
 	genpd->dev_irq_safe = true;
-	genpd->active_wakeup = pd_active_wakeup;
 	genpd->power_off = pd_power_down;
 	genpd->power_on = pd_power_up;
 	__pd_power_up(sh7372_pd, false);
@@ -199,48 +222,73 @@
 }
 
 struct sh7372_pm_domain sh7372_a4lc = {
+	.genpd.name = "A4LC",
 	.bit_shift = 1,
 };
 
 struct sh7372_pm_domain sh7372_a4mp = {
+	.genpd.name = "A4MP",
 	.bit_shift = 2,
 };
 
 struct sh7372_pm_domain sh7372_d4 = {
+	.genpd.name = "D4",
 	.bit_shift = 3,
 };
 
 struct sh7372_pm_domain sh7372_a4r = {
+	.genpd.name = "A4R",
 	.bit_shift = 5,
-	.gov = &sh7372_always_on_gov,
 	.suspend = sh7372_a4r_suspend,
 	.resume = sh7372_intcs_resume,
-	.stay_on = true,
 };
 
 struct sh7372_pm_domain sh7372_a3rv = {
+	.genpd.name = "A3RV",
 	.bit_shift = 6,
 };
 
 struct sh7372_pm_domain sh7372_a3ri = {
+	.genpd.name = "A3RI",
 	.bit_shift = 8,
 };
 
-struct sh7372_pm_domain sh7372_a3sp = {
-	.bit_shift = 11,
-	.gov = &sh7372_always_on_gov,
-	.no_debug = true,
-};
-
-static void sh7372_a3sp_init(void)
+static int sh7372_a4s_suspend(void)
 {
-	/* serial consoles make use of SCIF hardware located in A3SP,
-	 * keep such power domain on if "no_console_suspend" is set.
+	/*
+	 * The A4S domain contains the CPU core and therefore it should
+	 * only be turned off if the CPU is in use.
 	 */
-	sh7372_a3sp.stay_on = !console_suspend_enabled;
+	return -EBUSY;
 }
 
+struct sh7372_pm_domain sh7372_a4s = {
+	.genpd.name = "A4S",
+	.bit_shift = 10,
+	.gov = &pm_domain_always_on_gov,
+	.no_debug = true,
+	.suspend = sh7372_a4s_suspend,
+};
+
+static int sh7372_a3sp_suspend(void)
+{
+	/*
+	 * Serial consoles make use of SCIF hardware located in A3SP,
+	 * keep such power domain on if "no_console_suspend" is set.
+	 */
+	return console_suspend_enabled ? -EBUSY : 0;
+}
+
+struct sh7372_pm_domain sh7372_a3sp = {
+	.genpd.name = "A3SP",
+	.bit_shift = 11,
+	.gov = &pm_domain_always_on_gov,
+	.no_debug = true,
+	.suspend = sh7372_a3sp_suspend,
+};
+
 struct sh7372_pm_domain sh7372_a3sg = {
+	.genpd.name = "A3SG",
 	.bit_shift = 13,
 };
 
@@ -257,11 +305,16 @@
 	return 0;
 }
 
-static void sh7372_enter_core_standby(void)
+static void sh7372_set_reset_vector(unsigned long address)
 {
 	/* set reset vector, translate 4k */
-	__raw_writel(__pa(sh7372_resume_core_standby_a3sm), SBAR);
+	__raw_writel(address, SBAR);
 	__raw_writel(0, APARMBAREA);
+}
+
+static void sh7372_enter_core_standby(void)
+{
+	sh7372_set_reset_vector(__pa(sh7372_resume_core_standby_sysc));
 
 	/* enter sleep mode with SYSTBCR to 0x10 */
 	__raw_writel(0x10, SYSTBCR);
@@ -274,27 +327,22 @@
 #endif
 
 #ifdef CONFIG_SUSPEND
-static void sh7372_enter_a3sm_common(int pllc0_on)
+static void sh7372_enter_sysc(int pllc0_on, unsigned long sleep_mode)
 {
-	/* set reset vector, translate 4k */
-	__raw_writel(__pa(sh7372_resume_core_standby_a3sm), SBAR);
-	__raw_writel(0, APARMBAREA);
-
 	if (pllc0_on)
 		__raw_writel(0, PLLC01STPCR);
 	else
 		__raw_writel(1 << 28, PLLC01STPCR);
 
-	__raw_writel(0, PDNSEL); /* power-down A3SM only, not A4S */
 	__raw_readl(WUPSFAC); /* read wakeup int. factor before sleep */
-	cpu_suspend(0, sh7372_do_idle_a3sm);
+	cpu_suspend(sleep_mode, sh7372_do_idle_sysc);
 	__raw_readl(WUPSFAC); /* read wakeup int. factor after wakeup */
 
 	 /* disable reset vector translation */
 	__raw_writel(0, SBAR);
 }
 
-static int sh7372_a3sm_valid(unsigned long *mskp, unsigned long *msk2p)
+static int sh7372_sysc_valid(unsigned long *mskp, unsigned long *msk2p)
 {
 	unsigned long mstpsr0, mstpsr1, mstpsr2, mstpsr3, mstpsr4;
 	unsigned long msk, msk2;
@@ -382,7 +430,7 @@
 	*irqcr2p = irqcr2;
 }
 
-static void sh7372_setup_a3sm(unsigned long msk, unsigned long msk2)
+static void sh7372_setup_sysc(unsigned long msk, unsigned long msk2)
 {
 	u16 irqcrx_low, irqcrx_high, irqcry_low, irqcry_high;
 	unsigned long tmp;
@@ -415,6 +463,22 @@
 	__raw_writel((irqcrx_high << 16) | irqcrx_low, IRQCR3);
 	__raw_writel((irqcry_high << 16) | irqcry_low, IRQCR4);
 }
+
+static void sh7372_enter_a3sm_common(int pllc0_on)
+{
+	sh7372_set_reset_vector(__pa(sh7372_resume_core_standby_sysc));
+	sh7372_enter_sysc(pllc0_on, 1 << 12);
+}
+
+static void sh7372_enter_a4s_common(int pllc0_on)
+{
+	sh7372_intca_suspend();
+	memcpy((void *)SMFRAM, sh7372_resume_core_standby_sysc, 0x100);
+	sh7372_set_reset_vector(SMFRAM);
+	sh7372_enter_sysc(pllc0_on, 1 << 10);
+	sh7372_intca_resume();
+}
+
 #endif
 
 #ifdef CONFIG_CPU_IDLE
@@ -448,14 +512,20 @@
 	unsigned long msk, msk2;
 
 	/* check active clocks to determine potential wakeup sources */
-	if (sh7372_a3sm_valid(&msk, &msk2)) {
-
+	if (sh7372_sysc_valid(&msk, &msk2)) {
 		/* convert INTC mask and sense to SYSC mask and sense */
-		sh7372_setup_a3sm(msk, msk2);
+		sh7372_setup_sysc(msk, msk2);
 
-		/* enter A3SM sleep with PLLC0 off */
-		pr_debug("entering A3SM\n");
-		sh7372_enter_a3sm_common(0);
+		if (!console_suspend_enabled &&
+		    sh7372_a4s.genpd.status == GPD_STATE_POWER_OFF) {
+			/* enter A4S sleep with PLLC0 off */
+			pr_debug("entering A4S\n");
+			sh7372_enter_a4s_common(0);
+		} else {
+			/* enter A3SM sleep with PLLC0 off */
+			pr_debug("entering A3SM\n");
+			sh7372_enter_a3sm_common(0);
+		}
 	} else {
 		/* default to Core Standby that supports all wakeup sources */
 		pr_debug("entering Core Standby\n");
@@ -464,9 +534,37 @@
 	return 0;
 }
 
+/**
+ * sh7372_pm_notifier_fn - SH7372 PM notifier routine.
+ * @notifier: Unused.
+ * @pm_event: Event being handled.
+ * @unused: Unused.
+ */
+static int sh7372_pm_notifier_fn(struct notifier_block *notifier,
+				 unsigned long pm_event, void *unused)
+{
+	switch (pm_event) {
+	case PM_SUSPEND_PREPARE:
+		/*
+		 * This is necessary, because the A4R domain has to be "on"
+		 * when suspend_device_irqs() and resume_device_irqs() are
+		 * executed during system suspend and resume, respectively, so
+		 * that those functions don't crash while accessing the INTCS.
+		 */
+		pm_genpd_poweron(&sh7372_a4r.genpd);
+		break;
+	case PM_POST_SUSPEND:
+		pm_genpd_poweroff_unused();
+		break;
+	}
+
+	return NOTIFY_DONE;
+}
+
 static void sh7372_suspend_init(void)
 {
 	shmobile_suspend_ops.enter = sh7372_enter_suspend;
+	pm_notifier(sh7372_pm_notifier_fn, 0);
 }
 #else
 static void sh7372_suspend_init(void) {}
@@ -482,8 +580,6 @@
 	/* do not convert A3SM, A3SP, A3SG, A4R power down into A4S */
 	__raw_writel(0, PDNSEL);
 
-	sh7372_a3sp_init();
-
 	sh7372_suspend_init();
 	sh7372_cpuidle_init();
 }
diff --git a/arch/arm/mach-shmobile/setup-sh7372.c b/arch/arm/mach-shmobile/setup-sh7372.c
index 2380389..c197f9d 100644
--- a/arch/arm/mach-shmobile/setup-sh7372.c
+++ b/arch/arm/mach-shmobile/setup-sh7372.c
@@ -994,12 +994,16 @@
 	sh7372_init_pm_domain(&sh7372_a4r);
 	sh7372_init_pm_domain(&sh7372_a3rv);
 	sh7372_init_pm_domain(&sh7372_a3ri);
-	sh7372_init_pm_domain(&sh7372_a3sg);
+	sh7372_init_pm_domain(&sh7372_a4s);
 	sh7372_init_pm_domain(&sh7372_a3sp);
+	sh7372_init_pm_domain(&sh7372_a3sg);
 
 	sh7372_pm_add_subdomain(&sh7372_a4lc, &sh7372_a3rv);
 	sh7372_pm_add_subdomain(&sh7372_a4r, &sh7372_a4lc);
 
+	sh7372_pm_add_subdomain(&sh7372_a4s, &sh7372_a3sg);
+	sh7372_pm_add_subdomain(&sh7372_a4s, &sh7372_a3sp);
+
 	platform_add_devices(sh7372_early_devices,
 			    ARRAY_SIZE(sh7372_early_devices));
 
diff --git a/arch/arm/mach-shmobile/sleep-sh7372.S b/arch/arm/mach-shmobile/sleep-sh7372.S
index f3ab3c5..1d56467 100644
--- a/arch/arm/mach-shmobile/sleep-sh7372.S
+++ b/arch/arm/mach-shmobile/sleep-sh7372.S
@@ -37,13 +37,18 @@
 #if defined(CONFIG_SUSPEND) || defined(CONFIG_CPU_IDLE)
 	.align	12
 	.text
-	.global sh7372_resume_core_standby_a3sm
-sh7372_resume_core_standby_a3sm:
+	.global sh7372_resume_core_standby_sysc
+sh7372_resume_core_standby_sysc:
 	ldr     pc, 1f
 1:	.long   cpu_resume - PAGE_OFFSET + PLAT_PHYS_OFFSET
 
-	.global	sh7372_do_idle_a3sm
-sh7372_do_idle_a3sm:
+#define SPDCR 0xe6180008
+
+	/* A3SM & A4S power down */
+	.global	sh7372_do_idle_sysc
+sh7372_do_idle_sysc:
+	mov	r8, r0 /* sleep mode passed in r0 */
+
 	/*
 	 * Clear the SCTLR.C bit to prevent further data cache
 	 * allocation. Clearing SCTLR.C would make all the data accesses
@@ -80,13 +85,9 @@
 	dsb
 	dmb
 
-#define SPDCR 0xe6180008
-#define A3SM (1 << 12)
-
-	/* A3SM power down */
+	/* SYSC power down */
 	ldr     r0, =SPDCR
-	ldr     r1, =A3SM
-	str     r1, [r0]
+	str     r8, [r0]
 1:
 	b      1b
 
diff --git a/arch/arm/mach-spear3xx/include/mach/entry-macro.S b/arch/arm/mach-spear3xx/include/mach/entry-macro.S
index 53da422..de3bb41 100644
--- a/arch/arm/mach-spear3xx/include/mach/entry-macro.S
+++ b/arch/arm/mach-spear3xx/include/mach/entry-macro.S
@@ -11,35 +11,8 @@
  * warranty of any kind, whether express or implied.
  */
 
-#include <asm/hardware/vic.h>
-#include <mach/hardware.h>
-
 		.macro	disable_fiq
 		.endm
 
-		.macro	get_irqnr_preamble, base, tmp
-		.endm
-
 		.macro	arch_ret_to_user, tmp1, tmp2
 		.endm
-
-		.macro	get_irqnr_and_base, irqnr, irqstat, base, tmp
-		ldr	\base, =VA_SPEAR3XX_ML1_VIC_BASE
-		ldr	\irqstat, [\base, #VIC_IRQ_STATUS]	@ get status
-		teq	\irqstat, #0
-		beq	1001f				@ this will set/reset
-							@ zero register
-		/*
-		 * Following code will find bit position of least significang
-		 * bit set in irqstat, using following equation
-		 * least significant bit set in n = (n & ~(n-1))
-		 */
-		sub	\tmp, \irqstat, #1		@ tmp = irqstat - 1
-		mvn	\tmp, \tmp			@ tmp = ~tmp
-		and	\irqstat, \irqstat, \tmp	@ irqstat &= tmp
-		/* Now, irqstat is = bit no. of 1st bit set in vic irq status */
-		clz	\tmp, \irqstat			@ tmp = leading zeros
-		rsb	\irqnr, \tmp, #0x1F		@ irqnr = 32 - tmp - 1
-
-1001:		/* EQ will be set if no irqs pending */
-		.endm
diff --git a/arch/arm/mach-spear3xx/include/mach/generic.h b/arch/arm/mach-spear3xx/include/mach/generic.h
index b8f31c3..14276e5 100644
--- a/arch/arm/mach-spear3xx/include/mach/generic.h
+++ b/arch/arm/mach-spear3xx/include/mach/generic.h
@@ -42,6 +42,8 @@
 void __init spear3xx_init_irq(void);
 void __init spear3xx_init(void);
 
+void spear_restart(char, const char *);
+
 /* pad mux declarations */
 #define PMX_FIRDA_MASK		(1 << 14)
 #define PMX_I2C_MASK		(1 << 13)
diff --git a/arch/arm/mach-spear3xx/include/mach/vmalloc.h b/arch/arm/mach-spear3xx/include/mach/vmalloc.h
deleted file mode 100644
index df977b3..0000000
--- a/arch/arm/mach-spear3xx/include/mach/vmalloc.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * arch/arm/mach-spear3xx/include/mach/vmalloc.h
- *
- * Defining Vmalloc area for SPEAr3xx machine family
- *
- * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#ifndef __MACH_VMALLOC_H
-#define __MACH_VMALLOC_H
-
-#include <plat/vmalloc.h>
-
-#endif /* __MACH_VMALLOC_H */
diff --git a/arch/arm/mach-spear3xx/spear300_evb.c b/arch/arm/mach-spear3xx/spear300_evb.c
index a5ff98e..3462ab9 100644
--- a/arch/arm/mach-spear3xx/spear300_evb.c
+++ b/arch/arm/mach-spear3xx/spear300_evb.c
@@ -11,6 +11,7 @@
  * warranty of any kind, whether express or implied.
  */
 
+#include <asm/hardware/vic.h>
 #include <asm/mach/arch.h>
 #include <asm/mach-types.h>
 #include <mach/generic.h>
@@ -67,6 +68,8 @@
 	.atag_offset	=	0x100,
 	.map_io		=	spear3xx_map_io,
 	.init_irq	=	spear3xx_init_irq,
+	.handle_irq	=	vic_handle_irq,
 	.timer		=	&spear3xx_timer,
 	.init_machine	=	spear300_evb_init,
+	.restart	=	spear_restart,
 MACHINE_END
diff --git a/arch/arm/mach-spear3xx/spear310_evb.c b/arch/arm/mach-spear3xx/spear310_evb.c
index 45d180d..f92c499 100644
--- a/arch/arm/mach-spear3xx/spear310_evb.c
+++ b/arch/arm/mach-spear3xx/spear310_evb.c
@@ -11,6 +11,7 @@
  * warranty of any kind, whether express or implied.
  */
 
+#include <asm/hardware/vic.h>
 #include <asm/mach/arch.h>
 #include <asm/mach-types.h>
 #include <mach/generic.h>
@@ -73,6 +74,8 @@
 	.atag_offset	=	0x100,
 	.map_io		=	spear3xx_map_io,
 	.init_irq	=	spear3xx_init_irq,
+	.handle_irq	=	vic_handle_irq,
 	.timer		=	&spear3xx_timer,
 	.init_machine	=	spear310_evb_init,
+	.restart	=	spear_restart,
 MACHINE_END
diff --git a/arch/arm/mach-spear3xx/spear320_evb.c b/arch/arm/mach-spear3xx/spear320_evb.c
index 2287984..105334a 100644
--- a/arch/arm/mach-spear3xx/spear320_evb.c
+++ b/arch/arm/mach-spear3xx/spear320_evb.c
@@ -11,6 +11,7 @@
  * warranty of any kind, whether express or implied.
  */
 
+#include <asm/hardware/vic.h>
 #include <asm/mach/arch.h>
 #include <asm/mach-types.h>
 #include <mach/generic.h>
@@ -71,6 +72,8 @@
 	.atag_offset	=	0x100,
 	.map_io		=	spear3xx_map_io,
 	.init_irq	=	spear3xx_init_irq,
+	.handle_irq	=	vic_handle_irq,
 	.timer		=	&spear3xx_timer,
 	.init_machine	=	spear320_evb_init,
+	.restart	=	spear_restart,
 MACHINE_END
diff --git a/arch/arm/mach-spear6xx/include/mach/entry-macro.S b/arch/arm/mach-spear6xx/include/mach/entry-macro.S
index 8a0b0ed..d490a91 100644
--- a/arch/arm/mach-spear6xx/include/mach/entry-macro.S
+++ b/arch/arm/mach-spear6xx/include/mach/entry-macro.S
@@ -11,44 +11,8 @@
  * warranty of any kind, whether express or implied.
  */
 
-#include <asm/hardware/vic.h>
-#include <mach/hardware.h>
-
 		.macro	disable_fiq
 		.endm
 
-		.macro	get_irqnr_preamble, base, tmp
-		.endm
-
 		.macro	arch_ret_to_user, tmp1, tmp2
 		.endm
-
-		.macro	get_irqnr_and_base, irqnr, irqstat, base, tmp
-		ldr	\base, =VA_SPEAR6XX_CPU_VIC_PRI_BASE
-		ldr	\irqstat, [\base, #VIC_IRQ_STATUS]	@ get status
-		mov	\irqnr, #0
-		teq	\irqstat, #0
-		bne	1001f
-		ldr	\base, =VA_SPEAR6XX_CPU_VIC_SEC_BASE
-		ldr	\irqstat, [\base, #VIC_IRQ_STATUS]	@ get status
-		teq	\irqstat, #0
-		beq	1002f				@ this will set/reset
-							@ zero register
-		mov	\irqnr, #32
-1001:
-		/*
-		 * Following code will find bit position of least significang
-		 * bit set in irqstat, using following equation
-		 * least significant bit set in n = (n & ~(n-1))
-		 */
-		sub	\tmp, \irqstat, #1		@ tmp = irqstat - 1
-		mvn	\tmp, \tmp			@ tmp = ~tmp
-		and	\irqstat, \irqstat, \tmp	@ irqstat &= tmp
-		/* Now, irqstat is = bit no. of 1st bit set in vic irq status */
-		clz	\tmp, \irqstat			@ tmp = leading zeros
-
-		rsb	\tmp, \tmp, #0x1F		@ tmp = 32 - tmp - 1
-		add	\irqnr, \irqnr, \tmp
-
-1002:		/* EQ will be set if no irqs pending */
-		.endm
diff --git a/arch/arm/mach-spear6xx/include/mach/generic.h b/arch/arm/mach-spear6xx/include/mach/generic.h
index 183f023..116b993 100644
--- a/arch/arm/mach-spear6xx/include/mach/generic.h
+++ b/arch/arm/mach-spear6xx/include/mach/generic.h
@@ -41,6 +41,8 @@
 void __init spear600_init(void);
 void __init spear6xx_clk_init(void);
 
+void spear_restart(char, const char *);
+
 /* Add spear600 machine device structure declarations here */
 
 #endif /* __MACH_GENERIC_H */
diff --git a/arch/arm/mach-spear6xx/include/mach/vmalloc.h b/arch/arm/mach-spear6xx/include/mach/vmalloc.h
deleted file mode 100644
index 4a0b56c..0000000
--- a/arch/arm/mach-spear6xx/include/mach/vmalloc.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * arch/arm/mach-spear6xx/include/mach/vmalloc.h
- *
- * Defining Vmalloc area for SPEAr6xx machine family
- *
- * Copyright (C) 2009 ST Microelectronics
- * Rajeev Kumar<rajeev-dlh.kumar@st.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#ifndef __MACH_VMALLOC_H
-#define __MACH_VMALLOC_H
-
-#include <plat/vmalloc.h>
-
-#endif	/* __MACH_VMALLOC_H */
diff --git a/arch/arm/mach-spear6xx/spear600_evb.c b/arch/arm/mach-spear6xx/spear600_evb.c
index 8238fe3..c6e4254 100644
--- a/arch/arm/mach-spear6xx/spear600_evb.c
+++ b/arch/arm/mach-spear6xx/spear600_evb.c
@@ -11,6 +11,7 @@
  * warranty of any kind, whether express or implied.
  */
 
+#include <asm/hardware/vic.h>
 #include <asm/mach/arch.h>
 #include <asm/mach-types.h>
 #include <mach/generic.h>
@@ -46,6 +47,8 @@
 	.atag_offset	=	0x100,
 	.map_io		=	spear6xx_map_io,
 	.init_irq	=	spear6xx_init_irq,
+	.handle_irq	=	vic_handle_irq,
 	.timer		=	&spear6xx_timer,
 	.init_machine	=	spear600_evb_init,
+	.restart	=	spear_restart,
 MACHINE_END
diff --git a/arch/arm/mach-tcc8k/Kconfig b/arch/arm/mach-tcc8k/Kconfig
deleted file mode 100644
index ad86415..0000000
--- a/arch/arm/mach-tcc8k/Kconfig
+++ /dev/null
@@ -1,11 +0,0 @@
-if ARCH_TCC8K
-
-comment "TCC8000 systems:"
-
-config MACH_TCC8000_SDK
-	bool "Telechips TCC8000-SDK development kit"
-	default y
-	help
-	  Support for the Telechips TCC8000-SDK board.
-
-endif
diff --git a/arch/arm/mach-tcc8k/Makefile b/arch/arm/mach-tcc8k/Makefile
deleted file mode 100644
index 9bacf31..0000000
--- a/arch/arm/mach-tcc8k/Makefile
+++ /dev/null
@@ -1,9 +0,0 @@
-#
-# Makefile for TCC8K boards and common files.
-#
-
-# Common support
-obj-y += clock.o irq.o time.o io.o devices.o
-
-# Board specific support
-obj-$(CONFIG_MACH_TCC8000_SDK) += board-tcc8000-sdk.o
diff --git a/arch/arm/mach-tcc8k/Makefile.boot b/arch/arm/mach-tcc8k/Makefile.boot
deleted file mode 100644
index 5e02d41..0000000
--- a/arch/arm/mach-tcc8k/Makefile.boot
+++ /dev/null
@@ -1,3 +0,0 @@
-   zreladdr-y		+= 0x20008000
-params_phys-y		:= 0x20000100
-initrd_phys-y		:= 0x20800000
diff --git a/arch/arm/mach-tcc8k/board-tcc8000-sdk.c b/arch/arm/mach-tcc8k/board-tcc8000-sdk.c
deleted file mode 100644
index 777a5bb..0000000
--- a/arch/arm/mach-tcc8k/board-tcc8000-sdk.c
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Copyright (C) 2009 Hans J. Koch <hjk@linutronix.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-
-#include <asm/mach-types.h>
-
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-#include <asm/mach/time.h>
-
-#include <mach/clock.h>
-#include <mach/tcc-nand.h>
-#include <mach/tcc8k-regs.h>
-
-#include "common.h"
-
-#define XI_FREQUENCY	12000000
-#define XTI_FREQUENCY	32768
-
-#ifdef CONFIG_MTD_NAND_TCC
-/* NAND */
-static struct tcc_nand_platform_data tcc8k_sdk_nand_data = {
-	.width = 1,
-	.hw_ecc = 0,
-};
-#endif
-
-static void __init tcc8k_init(void)
-{
-#ifdef CONFIG_MTD_NAND_TCC
-	tcc_nand_device.dev.platform_data = &tcc8k_sdk_nand_data;
-	platform_device_register(&tcc_nand_device);
-#endif
-}
-
-static void __init tcc8k_init_timer(void)
-{
-	tcc_clocks_init(XI_FREQUENCY, XTI_FREQUENCY);
-}
-
-static struct sys_timer tcc8k_timer = {
-	.init	= tcc8k_init_timer,
-};
-
-static void __init tcc8k_map_io(void)
-{
-	tcc8k_map_common_io();
-
-	/* set PLL0 clock to 96MHz, adapt UART0 divisor */
-	__raw_writel(0x00026003, CKC_BASE + PLL0CFG_OFFS);
-	__raw_writel(0x10000001, CKC_BASE + ACLKUART0_OFFS);
-
-	/* set PLL1 clock to 192MHz */
-	__raw_writel(0x00016003, CKC_BASE + PLL1CFG_OFFS);
-
-	/* set PLL2 clock to 48MHz */
-	__raw_writel(0x00036003, CKC_BASE + PLL2CFG_OFFS);
-
-	/* with CPU freq higher than 150 MHz, need extra DTCM wait */
-	__raw_writel(0x00000001, SCFG_BASE + DTCMWAIT_OFFS);
-
-	/* PLL locking time as specified */
-	udelay(300);
-}
-
-MACHINE_START(TCC8000_SDK, "Telechips TCC8000-SDK Demo Board")
-	.atag_offset	= 0x100,
-	.map_io		= tcc8k_map_io,
-	.init_irq	= tcc8k_init_irq,
-	.init_machine	= tcc8k_init,
-	.timer		= &tcc8k_timer,
-MACHINE_END
diff --git a/arch/arm/mach-tcc8k/clock.c b/arch/arm/mach-tcc8k/clock.c
deleted file mode 100644
index e7cdae5..0000000
--- a/arch/arm/mach-tcc8k/clock.c
+++ /dev/null
@@ -1,580 +0,0 @@
-/*
- * Lowlevel clock handling for Telechips TCC8xxx SoCs
- *
- * Copyright (C) 2010 by Hans J. Koch <hjk@linutronix.de>
- *
- * Licensed under the terms of the GPL v2
- */
-
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <linux/clkdev.h>
-
-#include <mach/clock.h>
-#include <mach/irqs.h>
-#include <mach/tcc8k-regs.h>
-
-#include "common.h"
-
-#define BCLKCTR0	(CKC_BASE + BCLKCTR0_OFFS)
-#define BCLKCTR1	(CKC_BASE + BCLKCTR1_OFFS)
-
-#define ACLKREF		(CKC_BASE + ACLKREF_OFFS)
-#define ACLKUART0	(CKC_BASE + ACLKUART0_OFFS)
-#define ACLKUART1	(CKC_BASE + ACLKUART1_OFFS)
-#define ACLKUART2	(CKC_BASE + ACLKUART2_OFFS)
-#define ACLKUART3	(CKC_BASE + ACLKUART3_OFFS)
-#define ACLKUART4	(CKC_BASE + ACLKUART4_OFFS)
-#define ACLKI2C		(CKC_BASE + ACLKI2C_OFFS)
-#define ACLKADC		(CKC_BASE + ACLKADC_OFFS)
-#define ACLKUSBH	(CKC_BASE + ACLKUSBH_OFFS)
-#define ACLKLCD		(CKC_BASE + ACLKLCD_OFFS)
-#define ACLKSDH0	(CKC_BASE + ACLKSDH0_OFFS)
-#define ACLKSDH1	(CKC_BASE + ACLKSDH1_OFFS)
-#define ACLKSPI0	(CKC_BASE + ACLKSPI0_OFFS)
-#define ACLKSPI1	(CKC_BASE + ACLKSPI1_OFFS)
-#define ACLKSPDIF	(CKC_BASE + ACLKSPDIF_OFFS)
-#define ACLKC3DEC	(CKC_BASE + ACLKC3DEC_OFFS)
-#define ACLKCAN0	(CKC_BASE + ACLKCAN0_OFFS)
-#define ACLKCAN1	(CKC_BASE + ACLKCAN1_OFFS)
-#define ACLKGSB0	(CKC_BASE + ACLKGSB0_OFFS)
-#define ACLKGSB1	(CKC_BASE + ACLKGSB1_OFFS)
-#define ACLKGSB2	(CKC_BASE + ACLKGSB2_OFFS)
-#define ACLKGSB3	(CKC_BASE + ACLKGSB3_OFFS)
-#define ACLKTCT		(CKC_BASE + ACLKTCT_OFFS)
-#define ACLKTCX		(CKC_BASE + ACLKTCX_OFFS)
-#define ACLKTCZ		(CKC_BASE + ACLKTCZ_OFFS)
-
-#define ACLK_MAX_DIV	(0xfff + 1)
-
-/* Crystal frequencies */
-static unsigned long xi_rate, xti_rate;
-
-static void __iomem *pll_cfg_addr(int pll)
-{
-	switch (pll) {
-	case 0: return (CKC_BASE + PLL0CFG_OFFS);
-	case 1: return (CKC_BASE + PLL1CFG_OFFS);
-	case 2: return (CKC_BASE + PLL2CFG_OFFS);
-	default:
-		BUG();
-	}
-}
-
-static int pll_enable(int pll, int enable)
-{
-	u32 reg;
-	void __iomem *addr = pll_cfg_addr(pll);
-
-	reg = __raw_readl(addr);
-	if (enable)
-		reg &= ~PLLxCFG_PD;
-	else
-		reg |= PLLxCFG_PD;
-
-	__raw_writel(reg, addr);
-	return 0;
-}
-
-static int xi_enable(int enable)
-{
-	u32 reg;
-
-	reg = __raw_readl(CKC_BASE + CLKCTRL_OFFS);
-	if (enable)
-		reg |= CLKCTRL_XE;
-	else
-		reg &= ~CLKCTRL_XE;
-
-	__raw_writel(reg, CKC_BASE + CLKCTRL_OFFS);
-	return 0;
-}
-
-static int root_clk_enable(enum root_clks src)
-{
-	switch (src) {
-	case CLK_SRC_PLL0: return pll_enable(0, 1);
-	case CLK_SRC_PLL1: return pll_enable(1, 1);
-	case CLK_SRC_PLL2: return pll_enable(2, 1);
-	case CLK_SRC_XI: return xi_enable(1);
-	default:
-		BUG();
-	}
-	return 0;
-}
-
-static int root_clk_disable(enum root_clks src)
-{
-	switch (src) {
-	case CLK_SRC_PLL0: return pll_enable(0, 0);
-	case CLK_SRC_PLL1: return pll_enable(1, 0);
-	case CLK_SRC_PLL2: return pll_enable(2, 0);
-	case CLK_SRC_XI: return xi_enable(0);
-	default:
-		BUG();
-	}
-	return 0;
-}
-
-static int enable_clk(struct clk *clk)
-{
-	u32 reg;
-
-	if (clk->root_id != CLK_SRC_NOROOT)
-		return root_clk_enable(clk->root_id);
-
-	if (clk->aclkreg) {
-		reg = __raw_readl(clk->aclkreg);
-		reg |= ACLK_EN;
-		__raw_writel(reg, clk->aclkreg);
-	}
-	if (clk->bclkctr) {
-		reg = __raw_readl(clk->bclkctr);
-		reg |= 1 << clk->bclk_shift;
-		__raw_writel(reg, clk->bclkctr);
-	}
-	return 0;
-}
-
-static void disable_clk(struct clk *clk)
-{
-	u32 reg;
-
-	if (clk->root_id != CLK_SRC_NOROOT) {
-		root_clk_disable(clk->root_id);
-		return;
-	}
-
-	if (clk->bclkctr) {
-		reg = __raw_readl(clk->bclkctr);
-		reg &= ~(1 << clk->bclk_shift);
-		__raw_writel(reg, clk->bclkctr);
-	}
-	if (clk->aclkreg) {
-		reg = __raw_readl(clk->aclkreg);
-		reg &= ~ACLK_EN;
-		__raw_writel(reg, clk->aclkreg);
-	}
-}
-
-static unsigned long get_rate_pll(int pll)
-{
-	u32 reg;
-	unsigned long s, m, p;
-	void __iomem *addr = pll_cfg_addr(pll);
-
-	reg = __raw_readl(addr);
-	s = (reg >> 16) & 0x07;
-	m = (reg >> 8) & 0xff;
-	p = reg & 0x3f;
-
-	return (m * xi_rate) / (p * (1 << s));
-}
-
-static unsigned long get_rate_pll_div(int pll)
-{
-	u32 reg;
-	unsigned long div = 0;
-	void __iomem *addr;
-
-	switch (pll) {
-	case 0:
-		addr = CKC_BASE + CLKDIVC0_OFFS;
-		reg = __raw_readl(addr);
-		if (reg & CLKDIVC0_P0E)
-			div = (reg >> 24) & 0x3f;
-		break;
-	case 1:
-		addr = CKC_BASE + CLKDIVC0_OFFS;
-		reg = __raw_readl(addr);
-		if (reg & CLKDIVC0_P1E)
-			div = (reg >> 16) & 0x3f;
-		break;
-	case 2:
-		addr = CKC_BASE + CLKDIVC1_OFFS;
-		reg = __raw_readl(addr);
-		if (reg & CLKDIVC1_P2E)
-			div = reg & 0x3f;
-		break;
-	}
-	return get_rate_pll(pll) / (div + 1);
-}
-
-static unsigned long get_rate_xi_div(void)
-{
-	unsigned long div = 0;
-	u32 reg = __raw_readl(CKC_BASE + CLKDIVC0_OFFS);
-
-	if (reg & CLKDIVC0_XE)
-		div = (reg >> 8) & 0x3f;
-
-	return xi_rate / (div + 1);
-}
-
-static unsigned long get_rate_xti_div(void)
-{
-	unsigned long div = 0;
-	u32 reg = __raw_readl(CKC_BASE + CLKDIVC0_OFFS);
-
-	if (reg & CLKDIVC0_XTE)
-		div = reg & 0x3f;
-
-	return xti_rate / (div + 1);
-}
-
-static unsigned long root_clk_get_rate(enum root_clks src)
-{
-	switch (src) {
-	case CLK_SRC_PLL0: return get_rate_pll(0);
-	case CLK_SRC_PLL1: return get_rate_pll(1);
-	case CLK_SRC_PLL2: return get_rate_pll(2);
-	case CLK_SRC_PLL0DIV: return get_rate_pll_div(0);
-	case CLK_SRC_PLL1DIV: return get_rate_pll_div(1);
-	case CLK_SRC_PLL2DIV: return get_rate_pll_div(2);
-	case CLK_SRC_XI: return xi_rate;
-	case CLK_SRC_XTI: return xti_rate;
-	case CLK_SRC_XIDIV: return get_rate_xi_div();
-	case CLK_SRC_XTIDIV: return get_rate_xti_div();
-	default: return 0;
-	}
-}
-
-static unsigned long aclk_get_rate(struct clk *clk)
-{
-	u32 reg;
-	unsigned long div;
-	unsigned int src;
-
-	reg = __raw_readl(clk->aclkreg);
-	div = reg & 0x0fff;
-	src = (reg >> ACLK_SEL_SHIFT) & CLK_SRC_MASK;
-	return root_clk_get_rate(src) / (div + 1);
-}
-
-static unsigned long aclk_best_div(struct clk *clk, unsigned long rate)
-{
-	unsigned long div, src, freq, r1, r2;
-
-	if (!rate)
-		return ACLK_MAX_DIV;
-
-	src = __raw_readl(clk->aclkreg) >> ACLK_SEL_SHIFT;
-	src &= CLK_SRC_MASK;
-	freq = root_clk_get_rate(src);
-	div = freq / rate;
-	if (!div)
-		return 1;
-	if (div >= ACLK_MAX_DIV)
-		return ACLK_MAX_DIV;
-	r1 = freq / div;
-	r2 = freq / (div + 1);
-	if ((rate - r2) < (r1 - rate))
-		return div + 1;
-
-	return div;
-}
-
-static unsigned long aclk_round_rate(struct clk *clk, unsigned long rate)
-{
-	unsigned int src;
-
-	src = __raw_readl(clk->aclkreg) >> ACLK_SEL_SHIFT;
-	src &= CLK_SRC_MASK;
-
-	return root_clk_get_rate(src) / aclk_best_div(clk, rate);
-}
-
-static int aclk_set_rate(struct clk *clk, unsigned long rate)
-{
-	u32 reg;
-
-	reg = __raw_readl(clk->aclkreg) & ~ACLK_DIV_MASK;
-	reg |= aclk_best_div(clk, rate) - 1;
-	__raw_writel(reg, clk->aclkreg);
-	return 0;
-}
-
-static unsigned long get_rate_sys(struct clk *clk)
-{
-	unsigned int src;
-
-	src = __raw_readl(CKC_BASE + CLKCTRL_OFFS) & CLK_SRC_MASK;
-	return root_clk_get_rate(src);
-}
-
-static unsigned long get_rate_bus(struct clk *clk)
-{
-	unsigned int reg, sdiv, bdiv, rate;
-
-	reg = __raw_readl(CKC_BASE + CLKCTRL_OFFS);
-	rate = get_rate_sys(clk);
-	sdiv = (reg >> 20) & 3;
-	if (sdiv)
-		rate /= sdiv + 1;
-	bdiv = (reg >> 4) & 0xff;
-	if (bdiv)
-		rate /= bdiv + 1;
-	return rate;
-}
-
-static unsigned long get_rate_cpu(struct clk *clk)
-{
-	unsigned int reg, div, fsys, fbus;
-
-	fbus = get_rate_bus(clk);
-	reg = __raw_readl(CKC_BASE + CLKCTRL_OFFS);
-	if (reg & (1 << 29))
-		return fbus;
-	fsys = get_rate_sys(clk);
-	div = (reg >> 16) & 0x0f;
-	return fbus + ((fsys - fbus) * (div + 1)) / 16;
-}
-
-static unsigned long get_rate_root(struct clk *clk)
-{
-	return root_clk_get_rate(clk->root_id);
-}
-
-static int aclk_set_parent(struct clk *clock, struct clk *parent)
-{
-	u32 reg;
-
-	if (clock->parent == parent)
-		return 0;
-
-	clock->parent = parent;
-
-	if (!parent)
-		return 0;
-
-	if (parent->root_id == CLK_SRC_NOROOT)
-		return 0;
-	reg = __raw_readl(clock->aclkreg);
-	reg &= ~ACLK_SEL_MASK;
-	reg |= (parent->root_id << ACLK_SEL_SHIFT) & ACLK_SEL_MASK;
-	__raw_writel(reg, clock->aclkreg);
-
-	return 0;
-}
-
-#define DEFINE_ROOT_CLOCK(name, ri, p)	\
-	static struct clk name = {		\
-		.root_id = ri,			\
-		.get_rate = get_rate_root,			\
-		.enable = enable_clk,		\
-		.disable = disable_clk,		\
-		.parent = p,			\
-	};
-
-#define DEFINE_SPECIAL_CLOCK(name, gr, p)	\
-	static struct clk name = {		\
-		.root_id = CLK_SRC_NOROOT,	\
-		.get_rate = gr,			\
-		.parent = p,			\
-	};
-
-#define DEFINE_ACLOCK(name, bc, bs, ar)		\
-	static struct clk name = {		\
-		.root_id = CLK_SRC_NOROOT,	\
-		.bclkctr = bc,			\
-		.bclk_shift = bs,		\
-		.aclkreg = ar,			\
-		.get_rate = aclk_get_rate,	\
-		.set_rate = aclk_set_rate,	\
-		.round_rate = aclk_round_rate,	\
-		.enable = enable_clk,		\
-		.disable = disable_clk,		\
-		.set_parent = aclk_set_parent,	\
-	};
-
-#define DEFINE_BCLOCK(name, bc, bs, gr, p)	\
-	static struct clk name = {		\
-		.root_id = CLK_SRC_NOROOT,	\
-		.bclkctr = bc,			\
-		.bclk_shift = bs,		\
-		.get_rate = gr,			\
-		.enable = enable_clk,		\
-		.disable = disable_clk,		\
-		.parent = p,			\
-	};
-
-DEFINE_ROOT_CLOCK(xi, CLK_SRC_XI, NULL)
-DEFINE_ROOT_CLOCK(xti, CLK_SRC_XTI, NULL)
-DEFINE_ROOT_CLOCK(xidiv, CLK_SRC_XIDIV, &xi)
-DEFINE_ROOT_CLOCK(xtidiv, CLK_SRC_XTIDIV, &xti)
-DEFINE_ROOT_CLOCK(pll0, CLK_SRC_PLL0, &xi)
-DEFINE_ROOT_CLOCK(pll1, CLK_SRC_PLL1, &xi)
-DEFINE_ROOT_CLOCK(pll2, CLK_SRC_PLL2, &xi)
-DEFINE_ROOT_CLOCK(pll0div, CLK_SRC_PLL0DIV, &pll0)
-DEFINE_ROOT_CLOCK(pll1div, CLK_SRC_PLL1DIV, &pll1)
-DEFINE_ROOT_CLOCK(pll2div, CLK_SRC_PLL2DIV, &pll2)
-
-/* The following 3 clocks are special and are initialized explicitly later */
-DEFINE_SPECIAL_CLOCK(sys, get_rate_sys, NULL)
-DEFINE_SPECIAL_CLOCK(bus, get_rate_bus, &sys)
-DEFINE_SPECIAL_CLOCK(cpu, get_rate_cpu, &sys)
-
-DEFINE_ACLOCK(tct, NULL, 0, ACLKTCT)
-DEFINE_ACLOCK(tcx, NULL, 0, ACLKTCX)
-DEFINE_ACLOCK(tcz, NULL, 0, ACLKTCZ)
-DEFINE_ACLOCK(ref, NULL, 0, ACLKREF)
-DEFINE_ACLOCK(uart0, BCLKCTR0, 5, ACLKUART0)
-DEFINE_ACLOCK(uart1, BCLKCTR0, 23, ACLKUART1)
-DEFINE_ACLOCK(uart2, BCLKCTR0, 6, ACLKUART2)
-DEFINE_ACLOCK(uart3, BCLKCTR0, 8, ACLKUART3)
-DEFINE_ACLOCK(uart4, BCLKCTR1, 6, ACLKUART4)
-DEFINE_ACLOCK(i2c, BCLKCTR0, 7, ACLKI2C)
-DEFINE_ACLOCK(adc, BCLKCTR0, 10, ACLKADC)
-DEFINE_ACLOCK(usbh0, BCLKCTR0, 11, ACLKUSBH)
-DEFINE_ACLOCK(lcd, BCLKCTR0, 13, ACLKLCD)
-DEFINE_ACLOCK(sd0, BCLKCTR0, 17, ACLKSDH0)
-DEFINE_ACLOCK(sd1, BCLKCTR1, 5, ACLKSDH1)
-DEFINE_ACLOCK(spi0, BCLKCTR0, 24, ACLKSPI0)
-DEFINE_ACLOCK(spi1, BCLKCTR0, 30, ACLKSPI1)
-DEFINE_ACLOCK(spdif, BCLKCTR1, 2, ACLKSPDIF)
-DEFINE_ACLOCK(c3dec, BCLKCTR1, 9, ACLKC3DEC)
-DEFINE_ACLOCK(can0, BCLKCTR1, 10, ACLKCAN0)
-DEFINE_ACLOCK(can1, BCLKCTR1, 11, ACLKCAN1)
-DEFINE_ACLOCK(gsb0, BCLKCTR1, 13, ACLKGSB0)
-DEFINE_ACLOCK(gsb1, BCLKCTR1, 14, ACLKGSB1)
-DEFINE_ACLOCK(gsb2, BCLKCTR1, 15, ACLKGSB2)
-DEFINE_ACLOCK(gsb3, BCLKCTR1, 16, ACLKGSB3)
-DEFINE_ACLOCK(usbh1, BCLKCTR1, 20, ACLKUSBH)
-
-DEFINE_BCLOCK(dai0, BCLKCTR0, 0, NULL, NULL)
-DEFINE_BCLOCK(pic, BCLKCTR0, 1, NULL, NULL)
-DEFINE_BCLOCK(tc, BCLKCTR0, 2, NULL, NULL)
-DEFINE_BCLOCK(gpio, BCLKCTR0, 3, NULL, NULL)
-DEFINE_BCLOCK(usbd, BCLKCTR0, 4, NULL, NULL)
-DEFINE_BCLOCK(ecc, BCLKCTR0, 9, NULL, NULL)
-DEFINE_BCLOCK(gdma0, BCLKCTR0, 12, NULL, NULL)
-DEFINE_BCLOCK(rtc, BCLKCTR0, 15, NULL, NULL)
-DEFINE_BCLOCK(nfc, BCLKCTR0, 16, NULL, NULL)
-DEFINE_BCLOCK(g2d, BCLKCTR0, 18, NULL, NULL)
-DEFINE_BCLOCK(gdma1, BCLKCTR0, 22, NULL, NULL)
-DEFINE_BCLOCK(mscl, BCLKCTR0, 25, NULL, NULL)
-DEFINE_BCLOCK(bdma, BCLKCTR1, 0, NULL, NULL)
-DEFINE_BCLOCK(adma0, BCLKCTR1, 1, NULL, NULL)
-DEFINE_BCLOCK(scfg, BCLKCTR1, 3, NULL, NULL)
-DEFINE_BCLOCK(cid, BCLKCTR1, 4, NULL, NULL)
-DEFINE_BCLOCK(dai1, BCLKCTR1, 7, NULL, NULL)
-DEFINE_BCLOCK(adma1, BCLKCTR1, 8, NULL, NULL)
-DEFINE_BCLOCK(gps, BCLKCTR1, 12, NULL, NULL)
-DEFINE_BCLOCK(gdma2, BCLKCTR1, 17, NULL, NULL)
-DEFINE_BCLOCK(gdma3, BCLKCTR1, 18, NULL, NULL)
-DEFINE_BCLOCK(ddrc, BCLKCTR1, 19, NULL, NULL)
-
-#define _REGISTER_CLOCK(d, n, c) \
-	{ \
-		.dev_id = d, \
-		.con_id = n, \
-		.clk = &c, \
-	},
-
-static struct clk_lookup lookups[] = {
-	_REGISTER_CLOCK(NULL, "bus", bus)
-	_REGISTER_CLOCK(NULL, "cpu", cpu)
-	_REGISTER_CLOCK(NULL, "tct", tct)
-	_REGISTER_CLOCK(NULL, "tcx", tcx)
-	_REGISTER_CLOCK(NULL, "tcz", tcz)
-	_REGISTER_CLOCK(NULL, "ref", ref)
-	_REGISTER_CLOCK(NULL, "dai0", dai0)
-	_REGISTER_CLOCK(NULL, "pic", pic)
-	_REGISTER_CLOCK(NULL, "tc", tc)
-	_REGISTER_CLOCK(NULL, "gpio", gpio)
-	_REGISTER_CLOCK(NULL, "usbd", usbd)
-	_REGISTER_CLOCK("tcc-uart.0", NULL, uart0)
-	_REGISTER_CLOCK("tcc-uart.2", NULL, uart2)
-	_REGISTER_CLOCK("tcc-i2c", NULL, i2c)
-	_REGISTER_CLOCK("tcc-uart.3", NULL, uart3)
-	_REGISTER_CLOCK(NULL, "ecc", ecc)
-	_REGISTER_CLOCK(NULL, "adc", adc)
-	_REGISTER_CLOCK("tcc-usbh.0", "usb", usbh0)
-	_REGISTER_CLOCK(NULL, "gdma0", gdma0)
-	_REGISTER_CLOCK(NULL, "lcd", lcd)
-	_REGISTER_CLOCK(NULL, "rtc", rtc)
-	_REGISTER_CLOCK(NULL, "nfc", nfc)
-	_REGISTER_CLOCK("tcc-mmc.0", NULL, sd0)
-	_REGISTER_CLOCK(NULL, "g2d", g2d)
-	_REGISTER_CLOCK(NULL, "gdma1", gdma1)
-	_REGISTER_CLOCK("tcc-uart.1", NULL, uart1)
-	_REGISTER_CLOCK("tcc-spi.0", NULL, spi0)
-	_REGISTER_CLOCK(NULL, "mscl", mscl)
-	_REGISTER_CLOCK("tcc-spi.1", NULL, spi1)
-	_REGISTER_CLOCK(NULL, "bdma", bdma)
-	_REGISTER_CLOCK(NULL, "adma0", adma0)
-	_REGISTER_CLOCK(NULL, "spdif", spdif)
-	_REGISTER_CLOCK(NULL, "scfg", scfg)
-	_REGISTER_CLOCK(NULL, "cid", cid)
-	_REGISTER_CLOCK("tcc-mmc.1", NULL, sd1)
-	_REGISTER_CLOCK("tcc-uart.4", NULL, uart4)
-	_REGISTER_CLOCK(NULL, "dai1", dai1)
-	_REGISTER_CLOCK(NULL, "adma1", adma1)
-	_REGISTER_CLOCK(NULL, "c3dec", c3dec)
-	_REGISTER_CLOCK("tcc-can.0", NULL, can0)
-	_REGISTER_CLOCK("tcc-can.1", NULL, can1)
-	_REGISTER_CLOCK(NULL, "gps", gps)
-	_REGISTER_CLOCK("tcc-gsb.0", NULL, gsb0)
-	_REGISTER_CLOCK("tcc-gsb.1", NULL, gsb1)
-	_REGISTER_CLOCK("tcc-gsb.2", NULL, gsb2)
-	_REGISTER_CLOCK("tcc-gsb.3", NULL, gsb3)
-	_REGISTER_CLOCK(NULL, "gdma2", gdma2)
-	_REGISTER_CLOCK(NULL, "gdma3", gdma3)
-	_REGISTER_CLOCK(NULL, "ddrc", ddrc)
-	_REGISTER_CLOCK("tcc-usbh.1", "usb", usbh1)
-};
-
-static struct clk *root_clk_by_index(enum root_clks src)
-{
-	switch (src) {
-	case CLK_SRC_PLL0: return &pll0;
-	case CLK_SRC_PLL1: return &pll1;
-	case CLK_SRC_PLL2: return &pll2;
-	case CLK_SRC_PLL0DIV: return &pll0div;
-	case CLK_SRC_PLL1DIV: return &pll1div;
-	case CLK_SRC_PLL2DIV: return &pll2div;
-	case CLK_SRC_XI: return &xi;
-	case CLK_SRC_XTI: return &xti;
-	case CLK_SRC_XIDIV: return &xidiv;
-	case CLK_SRC_XTIDIV: return &xtidiv;
-	default: return NULL;
-	}
-}
-
-static void find_aclk_parent(struct clk *clk)
-{
-	unsigned int src;
-	struct clk *clock;
-
-	if (!clk->aclkreg)
-		return;
-
-	src = __raw_readl(clk->aclkreg) >> ACLK_SEL_SHIFT;
-	src &= CLK_SRC_MASK;
-
-	clock = root_clk_by_index(src);
-	if (!clock)
-		return;
-
-	clk->parent = clock;
-	clk->set_parent = aclk_set_parent;
-}
-
-void __init tcc_clocks_init(unsigned long xi_freq, unsigned long xti_freq)
-{
-	int i;
-
-	xi_rate = xi_freq;
-	xti_rate = xti_freq;
-
-	/* fixup parents and add the clock */
-	for (i = 0; i < ARRAY_SIZE(lookups); i++) {
-		find_aclk_parent(lookups[i].clk);
-		clkdev_add(&lookups[i]);
-	}
-	tcc8k_timer_init(&tcz, (void __iomem *)TIMER_BASE, INT_TC32);
-}
diff --git a/arch/arm/mach-tcc8k/common.h b/arch/arm/mach-tcc8k/common.h
deleted file mode 100644
index 705690a..0000000
--- a/arch/arm/mach-tcc8k/common.h
+++ /dev/null
@@ -1,15 +0,0 @@
-#ifndef MACH_TCC8K_COMMON_H
-#define MACH_TCC8K_COMMON_H
-
-#include <linux/platform_device.h>
-
-extern struct platform_device tcc_nand_device;
-
-struct clk;
-
-extern void tcc_clocks_init(unsigned long xi_freq, unsigned long xti_freq);
-extern void tcc8k_timer_init(struct clk *clock, void __iomem *base, int irq);
-extern void tcc8k_init_irq(void);
-extern void tcc8k_map_common_io(void);
-
-#endif
diff --git a/arch/arm/mach-tcc8k/devices.c b/arch/arm/mach-tcc8k/devices.c
deleted file mode 100644
index 6722ad7..0000000
--- a/arch/arm/mach-tcc8k/devices.c
+++ /dev/null
@@ -1,239 +0,0 @@
-/*
- * linux/arch/arm/mach-tcc8k/devices.c
- *
- * Copyright (C) Telechips, Inc.
- * Copyright (C) 2009 Hans J. Koch <hjk@linutronix.de>
- *
- * Licensed under the terms of GPL v2.
- *
- */
-
-#include <linux/dma-mapping.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-
-#include <asm/mach/map.h>
-
-#include <mach/tcc8k-regs.h>
-#include <mach/irqs.h>
-
-#include "common.h"
-
-static u64 tcc8k_dmamask = DMA_BIT_MASK(32);
-
-#ifdef CONFIG_MTD_NAND_TCC
-/* NAND controller */
-static struct resource tcc_nand_resources[] = {
-	{
-		.start	= (resource_size_t)NFC_BASE,
-		.end	= (resource_size_t)NFC_BASE + 0x7f,
-		.flags	= IORESOURCE_MEM,
-	}, {
-		.start	= INT_NFC,
-		.end	= INT_NFC,
-		.flags	= IORESOURCE_IRQ,
-	},
-};
-
-struct platform_device tcc_nand_device = {
-	.name = "tcc_nand",
-	.id = 0,
-	.num_resources = ARRAY_SIZE(tcc_nand_resources),
-	.resource = tcc_nand_resources,
-};
-#endif
-
-#ifdef CONFIG_MMC_TCC8K
-/* MMC controller */
-static struct resource tcc8k_mmc0_resource[] = {
-	{
-		.start = INT_SD0,
-		.end   = INT_SD0,
-		.flags = IORESOURCE_IRQ,
-	},
-};
-
-static struct resource tcc8k_mmc1_resource[] = {
-	{
-		.start = INT_SD1,
-		.end   = INT_SD1,
-		.flags = IORESOURCE_IRQ,
-	},
-};
-
-struct platform_device tcc8k_mmc0_device = {
-	.name		= "tcc-mmc",
-	.id		= 0,
-	.num_resources	= ARRAY_SIZE(tcc8k_mmc0_resource),
-	.resource	= tcc8k_mmc0_resource,
-	.dev		= {
-		.dma_mask		= &tcc8k_dmamask,
-		.coherent_dma_mask	= DMA_BIT_MASK(32),
-	}
-};
-
-struct platform_device tcc8k_mmc1_device = {
-	.name		= "tcc-mmc",
-	.id		= 1,
-	.num_resources	= ARRAY_SIZE(tcc8k_mmc1_resource),
-	.resource	= tcc8k_mmc1_resource,
-	.dev		= {
-		.dma_mask		= &tcc8k_dmamask,
-		.coherent_dma_mask	= DMA_BIT_MASK(32),
-	}
-};
-
-static inline void tcc8k_init_mmc(void)
-{
-	u32 reg = __raw_readl(GPIOPS_BASE + GPIOPS_FS1_OFFS);
-
-	reg |= GPIOPS_FS1_SDH0_BITS | GPIOPS_FS1_SDH1_BITS;
-	__raw_writel(reg, GPIOPS_BASE + GPIOPS_FS1_OFFS);
-
-	platform_device_register(&tcc8k_mmc0_device);
-	platform_device_register(&tcc8k_mmc1_device);
-}
-#else
-static inline void tcc8k_init_mmc(void) { }
-#endif
-
-#ifdef CONFIG_USB_OHCI_HCD
-static int tcc8k_ohci_init(struct device *dev)
-{
-	u32 reg;
-
-	/* Use GPIO PK19 as VBUS control output */
-	reg = __raw_readl(GPIOPK_BASE + GPIOPK_FS0_OFFS);
-	reg &= ~(1 << 19);
-	__raw_writel(reg, GPIOPK_BASE + GPIOPK_FS0_OFFS);
-	reg = __raw_readl(GPIOPK_BASE + GPIOPK_FS1_OFFS);
-	reg &= ~(1 << 19);
-	__raw_writel(reg, GPIOPK_BASE + GPIOPK_FS1_OFFS);
-
-	reg = __raw_readl(GPIOPK_BASE + GPIOPK_DOE_OFFS);
-	reg |= (1 << 19);
-	__raw_writel(reg, GPIOPK_BASE + GPIOPK_DOE_OFFS);
-	/* Turn on VBUS */
-	reg = __raw_readl(GPIOPK_BASE + GPIOPK_DAT_OFFS);
-	reg |= (1 << 19);
-	__raw_writel(reg, GPIOPK_BASE + GPIOPK_DAT_OFFS);
-
-	return 0;
-}
-
-static struct resource tcc8k_ohci0_resources[] = {
-	[0] = {
-		.start = (resource_size_t)USBH0_BASE,
-		.end   = (resource_size_t)USBH0_BASE + 0x5c,
-		.flags = IORESOURCE_MEM,
-	},
-	[1] = {
-		.start = INT_USBH0,
-		.end   = INT_USBH0,
-		.flags = IORESOURCE_IRQ,
-	}
-};
-
-static struct resource tcc8k_ohci1_resources[] = {
-	[0] = {
-		.start = (resource_size_t)USBH1_BASE,
-		.end   = (resource_size_t)USBH1_BASE + 0x5c,
-		.flags = IORESOURCE_MEM,
-	},
-	[1] = {
-		.start = INT_USBH1,
-		.end   = INT_USBH1,
-		.flags = IORESOURCE_IRQ,
-	}
-};
-
-static struct tccohci_platform_data tcc8k_ohci0_platform_data = {
-	.controller	= 0,
-	.port_mode	= PMM_PERPORT_MODE,
-	.init		= tcc8k_ohci_init,
-};
-
-static struct tccohci_platform_data tcc8k_ohci1_platform_data = {
-	.controller	= 1,
-	.port_mode	= PMM_PERPORT_MODE,
-	.init		= tcc8k_ohci_init,
-};
-
-static struct platform_device ohci0_device = {
-	.name = "tcc-ohci",
-	.id = 0,
-	.dev = {
-		.dma_mask = &tcc8k_dmamask,
-		.coherent_dma_mask = DMA_BIT_MASK(32),
-		.platform_data = &tcc8k_ohci0_platform_data,
-	},
-	.num_resources  = ARRAY_SIZE(tcc8k_ohci0_resources),
-	.resource       = tcc8k_ohci0_resources,
-};
-
-static struct platform_device ohci1_device = {
-	.name = "tcc-ohci",
-	.id = 1,
-	.dev = {
-		.dma_mask = &tcc8k_dmamask,
-		.coherent_dma_mask = DMA_BIT_MASK(32),
-		.platform_data = &tcc8k_ohci1_platform_data,
-	},
-	.num_resources  = ARRAY_SIZE(tcc8k_ohci1_resources),
-	.resource       = tcc8k_ohci1_resources,
-};
-
-static void __init tcc8k_init_usbhost(void)
-{
-	platform_device_register(&ohci0_device);
-	platform_device_register(&ohci1_device);
-}
-#else
-static void __init tcc8k_init_usbhost(void) { }
-#endif
-
-/* USB device controller*/
-#ifdef CONFIG_USB_GADGET_TCC8K
-static struct resource udc_resources[] = {
-	[0] = {
-		.start = INT_USBD,
-		.end   = INT_USBD,
-		.flags = IORESOURCE_IRQ,
-	},
-	[1] = {
-		.start = INT_UDMA,
-		.end   = INT_UDMA,
-		.flags = IORESOURCE_IRQ,
-	},
-};
-
-static struct platform_device tcc8k_udc_device = {
-	.name = "tcc-udc",
-	.id = 0,
-	.resource = udc_resources,
-	.num_resources = ARRAY_SIZE(udc_resources),
-	.dev = {
-		 .dma_mask = &tcc8k_dmamask,
-		 .coherent_dma_mask = DMA_BIT_MASK(32),
-	},
-};
-
-static void __init tcc8k_init_usb_gadget(void)
-{
-	platform_device_register(&tcc8k_udc_device);
-}
-#else
-static void __init tcc8k_init_usb_gadget(void) { }
-#endif	/* CONFIG_USB_GADGET_TCC83X */
-
-static int __init tcc8k_init_devices(void)
-{
-	tcc8k_init_mmc();
-	tcc8k_init_usbhost();
-	tcc8k_init_usb_gadget();
-	return 0;
-}
-
-arch_initcall(tcc8k_init_devices);
diff --git a/arch/arm/mach-tcc8k/io.c b/arch/arm/mach-tcc8k/io.c
deleted file mode 100644
index 9b39d7f..0000000
--- a/arch/arm/mach-tcc8k/io.c
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * linux/arch/arm/mach-tcc8k/io.c
- *
- * (C) 2009 Hans J. Koch <hjk@linutronix.de>
- *
- * derived from TCC83xx io.c
- * Copyright (C) Telechips, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-
-#include <asm/mach/map.h>
-
-#include <mach/tcc8k-regs.h>
-
-/*
- * The machine specific code may provide the extra mapping besides the
- * default mapping provided here.
- */
-static struct map_desc tcc8k_io_desc[] __initdata = {
-	{
-		.virtual	= (unsigned long)CS1_BASE_VIRT,
-		.pfn		= __phys_to_pfn(CS1_BASE),
-		.length		= CS1_SIZE,
-		.type		= MT_DEVICE,
-	}, {
-		.virtual	= (unsigned long)AHB_PERI_BASE_VIRT,
-		.pfn		= __phys_to_pfn(AHB_PERI_BASE),
-		.length		= AHB_PERI_SIZE,
-		.type		= MT_DEVICE,
-	}, {
-		.virtual	= (unsigned long)APB0_PERI_BASE_VIRT,
-		.pfn		= __phys_to_pfn(APB0_PERI_BASE),
-		.length		= APB0_PERI_SIZE,
-		.type		= MT_DEVICE,
-	}, {
-		.virtual	= (unsigned long)APB1_PERI_BASE_VIRT,
-		.pfn		= __phys_to_pfn(APB1_PERI_BASE),
-		.length		= APB1_PERI_SIZE,
-		.type		= MT_DEVICE,
-	}, {
-		.virtual	= (unsigned long)EXT_MEM_CTRL_BASE_VIRT,
-		.pfn		= __phys_to_pfn(EXT_MEM_CTRL_BASE),
-		.length		= EXT_MEM_CTRL_SIZE,
-		.type		= MT_DEVICE,
-	},
-};
-
-/*
- * Maps common IO regions for tcc8k.
- *
- */
-void __init tcc8k_map_common_io(void)
-{
-	iotable_init(tcc8k_io_desc, ARRAY_SIZE(tcc8k_io_desc));
-}
diff --git a/arch/arm/mach-tcc8k/irq.c b/arch/arm/mach-tcc8k/irq.c
deleted file mode 100644
index 209fa5c..0000000
--- a/arch/arm/mach-tcc8k/irq.c
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Copyright (C) Telechips, Inc.
- * Copyright (C) 2009-2010 Hans J. Koch <hjk@linutronix.de>
- *
- * Licensed under the terms of the GNU GPL version 2.
- */
-
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-
-#include <asm/irq.h>
-#include <asm/mach/irq.h>
-
-#include <mach/tcc8k-regs.h>
-#include <mach/irqs.h>
-
-#include "common.h"
-
-/* Disable IRQ */
-static void tcc8000_mask_ack_irq0(struct irq_data *d)
-{
-	PIC0_IEN &= ~(1 << d->irq);
-	PIC0_CREQ |=  (1 << d->irq);
-}
-
-static void tcc8000_mask_ack_irq1(struct irq_data *d)
-{
-	PIC1_IEN &= ~(1 << (d->irq - 32));
-	PIC1_CREQ |= (1 << (d->irq - 32));
-}
-
-static void tcc8000_mask_irq0(struct irq_data *d)
-{
-	PIC0_IEN &= ~(1 << d->irq);
-}
-
-static void tcc8000_mask_irq1(struct irq_data *d)
-{
-	PIC1_IEN &= ~(1 << (d->irq - 32));
-}
-
-static void tcc8000_ack_irq0(struct irq_data *d)
-{
-	PIC0_CREQ |=  (1 << d->irq);
-}
-
-static void tcc8000_ack_irq1(struct irq_data *d)
-{
-	PIC1_CREQ |= (1 << (d->irq - 32));
-}
-
-/* Enable IRQ */
-static void tcc8000_unmask_irq0(struct irq_data *d)
-{
-	PIC0_IEN |= (1 << d->irq);
-	PIC0_INTOEN |= (1 << d->irq);
-}
-
-static void tcc8000_unmask_irq1(struct irq_data *d)
-{
-	PIC1_IEN |= (1 << (d->irq - 32));
-	PIC1_INTOEN |= (1 << (d->irq - 32));
-}
-
-static struct irq_chip tcc8000_irq_chip0 = {
-	.name		= "tcc_irq0",
-	.irq_mask	= tcc8000_mask_irq0,
-	.irq_ack	= tcc8000_ack_irq0,
-	.irq_mask_ack	= tcc8000_mask_ack_irq0,
-	.irq_unmask	= tcc8000_unmask_irq0,
-};
-
-static struct irq_chip tcc8000_irq_chip1 = {
-	.name		= "tcc_irq1",
-	.irq_mask	= tcc8000_mask_irq1,
-	.irq_ack	= tcc8000_ack_irq1,
-	.irq_mask_ack	= tcc8000_mask_ack_irq1,
-	.irq_unmask	= tcc8000_unmask_irq1,
-};
-
-void __init tcc8k_init_irq(void)
-{
-	int irqno;
-
-	/* Mask and clear all interrupts */
-	PIC0_IEN = 0x00000000;
-	PIC0_CREQ = 0xffffffff;
-	PIC1_IEN = 0x00000000;
-	PIC1_CREQ = 0xffffffff;
-
-	PIC0_MEN0 = 0x00000003;
-	PIC1_MEN1 = 0x00000003;
-	PIC1_MEN = 0x00000003;
-
-	/* let all IRQs be level triggered */
-	PIC0_TMODE = 0xffffffff;
-	PIC1_TMODE = 0xffffffff;
-	/* all IRQs are IRQs (not FIQs) */
-	PIC0_IRQSEL = 0xffffffff;
-	PIC1_IRQSEL = 0xffffffff;
-
-	for (irqno = 0; irqno < NR_IRQS; irqno++) {
-		if (irqno < 32)
-			irq_set_chip(irqno, &tcc8000_irq_chip0);
-		else
-			irq_set_chip(irqno, &tcc8000_irq_chip1);
-		irq_set_handler(irqno, handle_level_irq);
-		set_irq_flags(irqno, IRQF_VALID);
-	}
-}
diff --git a/arch/arm/mach-tcc8k/time.c b/arch/arm/mach-tcc8k/time.c
deleted file mode 100644
index a96babe..0000000
--- a/arch/arm/mach-tcc8k/time.c
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * TCC8000 system timer setup
- *
- * (C) 2009 Hans J. Koch <hjk@linutronix.de>
- *
- * Licensed under the terms of the GPL version 2.
- *
- */
-
-#include <linux/clk.h>
-#include <linux/clockchips.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/kernel.h>
-#include <linux/spinlock.h>
-
-#include <asm/mach/time.h>
-
-#include <mach/tcc8k-regs.h>
-#include <mach/irqs.h>
-
-#include "common.h"
-
-static void __iomem *timer_base;
-
-static int tcc_set_next_event(unsigned long evt,
-			      struct clock_event_device *unused)
-{
-	unsigned long reg = __raw_readl(timer_base + TC32MCNT_OFFS);
-
-	__raw_writel(reg + evt, timer_base + TC32CMP0_OFFS);
-	return 0;
-}
-
-static void tcc_set_mode(enum clock_event_mode mode,
-				struct clock_event_device *evt)
-{
-	unsigned long tc32irq;
-
-	switch (mode) {
-	case CLOCK_EVT_MODE_ONESHOT:
-		tc32irq = __raw_readl(timer_base + TC32IRQ_OFFS);
-		tc32irq |= TC32IRQ_IRQEN0;
-		__raw_writel(tc32irq, timer_base + TC32IRQ_OFFS);
-		break;
-	case CLOCK_EVT_MODE_SHUTDOWN:
-	case CLOCK_EVT_MODE_UNUSED:
-		tc32irq = __raw_readl(timer_base + TC32IRQ_OFFS);
-		tc32irq &= ~TC32IRQ_IRQEN0;
-		__raw_writel(tc32irq, timer_base + TC32IRQ_OFFS);
-		break;
-	case CLOCK_EVT_MODE_PERIODIC:
-	case CLOCK_EVT_MODE_RESUME:
-		break;
-	}
-}
-
-static irqreturn_t tcc8k_timer_interrupt(int irq, void *dev_id)
-{
-	struct clock_event_device *evt = dev_id;
-
-	/* Acknowledge TC32 interrupt by reading TC32IRQ */
-	__raw_readl(timer_base + TC32IRQ_OFFS);
-
-	evt->event_handler(evt);
-
-	return IRQ_HANDLED;
-}
-
-static struct clock_event_device clockevent_tcc = {
-	.name		= "tcc_timer1",
-	.features	= CLOCK_EVT_FEAT_ONESHOT,
-	.shift		= 32,
-	.set_mode	= tcc_set_mode,
-	.set_next_event	= tcc_set_next_event,
-	.rating		= 200,
-};
-
-static struct irqaction tcc8k_timer_irq = {
-	.name		= "TC32_timer",
-	.flags		= IRQF_DISABLED | IRQF_TIMER,
-	.handler	= tcc8k_timer_interrupt,
-	.dev_id		= &clockevent_tcc,
-};
-
-static int __init tcc_clockevent_init(struct clk *clock)
-{
-	unsigned int c = clk_get_rate(clock);
-
-	clocksource_mmio_init(timer_base + TC32MCNT_OFFS, "tcc_tc32", c,
-		200, 32, clocksource_mmio_readl_up);
-
-	clockevent_tcc.mult = div_sc(c, NSEC_PER_SEC,
-					clockevent_tcc.shift);
-	clockevent_tcc.max_delta_ns =
-			clockevent_delta2ns(0xfffffffe, &clockevent_tcc);
-	clockevent_tcc.min_delta_ns =
-			clockevent_delta2ns(0xff, &clockevent_tcc);
-
-	clockevent_tcc.cpumask = cpumask_of(0);
-
-	clockevents_register_device(&clockevent_tcc);
-
-	return 0;
-}
-
-void __init tcc8k_timer_init(struct clk *clock, void __iomem *base, int irq)
-{
-	u32 reg;
-
-	timer_base = base;
-	tcc8k_timer_irq.irq = irq;
-
-	/* Enable clocks */
-	clk_enable(clock);
-
-	/* Initialize 32-bit timer */
-	reg = __raw_readl(timer_base + TC32EN_OFFS);
-	reg &= ~TC32EN_ENABLE; /* Disable timer */
-	__raw_writel(reg, timer_base + TC32EN_OFFS);
-	/* Free running timer, counting from 0 to 0xffffffff */
-	__raw_writel(0, timer_base + TC32EN_OFFS);
-	__raw_writel(0, timer_base + TC32LDV_OFFS);
-	reg = __raw_readl(timer_base + TC32IRQ_OFFS);
-	reg |= TC32IRQ_IRQEN0; /* irq at match with CMP0 */
-	__raw_writel(reg, timer_base + TC32IRQ_OFFS);
-
-	__raw_writel(TC32EN_ENABLE, timer_base + TC32EN_OFFS);
-
-	tcc_clockevent_init(clock);
-	setup_irq(irq, &tcc8k_timer_irq);
-}
diff --git a/arch/arm/mach-tegra/board-dt.c b/arch/arm/mach-tegra/board-dt.c
index 74743ad..e417a83 100644
--- a/arch/arm/mach-tegra/board-dt.c
+++ b/arch/arm/mach-tegra/board-dt.c
@@ -32,6 +32,7 @@
 #include <linux/i2c.h>
 #include <linux/i2c-tegra.h>
 
+#include <asm/hardware/gic.h>
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/time.h>
@@ -130,7 +131,9 @@
 	.map_io		= tegra_map_common_io,
 	.init_early	= tegra_init_early,
 	.init_irq	= tegra_init_irq,
+	.handle_irq	= gic_handle_irq,
 	.timer		= &tegra_timer,
 	.init_machine	= tegra_dt_init,
+	.restart	= tegra_assert_system_reset,
 	.dt_compat	= tegra_dt_board_compat,
 MACHINE_END
diff --git a/arch/arm/mach-tegra/board-harmony.c b/arch/arm/mach-tegra/board-harmony.c
index f0bdc5e..70ee674 100644
--- a/arch/arm/mach-tegra/board-harmony.c
+++ b/arch/arm/mach-tegra/board-harmony.c
@@ -31,6 +31,7 @@
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/time.h>
+#include <asm/hardware/gic.h>
 #include <asm/setup.h>
 
 #include <mach/tegra_wm8903_pdata.h>
@@ -187,6 +188,8 @@
 	.map_io         = tegra_map_common_io,
 	.init_early	= tegra_init_early,
 	.init_irq       = tegra_init_irq,
+	.handle_irq	= gic_handle_irq,
 	.timer          = &tegra_timer,
 	.init_machine   = tegra_harmony_init,
+	.restart	= tegra_assert_system_reset,
 MACHINE_END
diff --git a/arch/arm/mach-tegra/board-paz00.c b/arch/arm/mach-tegra/board-paz00.c
index 55c55ba..33d6205 100644
--- a/arch/arm/mach-tegra/board-paz00.c
+++ b/arch/arm/mach-tegra/board-paz00.c
@@ -29,6 +29,7 @@
 #include <linux/gpio.h>
 #include <linux/rfkill-gpio.h>
 
+#include <asm/hardware/gic.h>
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/time.h>
@@ -190,6 +191,8 @@
 	.map_io         = tegra_map_common_io,
 	.init_early	= tegra_init_early,
 	.init_irq       = tegra_init_irq,
+	.handle_irq	= gic_handle_irq,
 	.timer          = &tegra_timer,
 	.init_machine   = tegra_paz00_init,
+	.restart	= tegra_assert_system_reset,
 MACHINE_END
diff --git a/arch/arm/mach-tegra/board-seaboard.c b/arch/arm/mach-tegra/board-seaboard.c
index bf13ea3..c1599eb 100644
--- a/arch/arm/mach-tegra/board-seaboard.c
+++ b/arch/arm/mach-tegra/board-seaboard.c
@@ -34,6 +34,7 @@
 
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
+#include <asm/hardware/gic.h>
 
 #include "board.h"
 #include "board-seaboard.h"
@@ -284,8 +285,10 @@
 	.map_io         = tegra_map_common_io,
 	.init_early     = tegra_init_early,
 	.init_irq       = tegra_init_irq,
+	.handle_irq	= gic_handle_irq,
 	.timer          = &tegra_timer,
 	.init_machine   = tegra_seaboard_init,
+	.restart	= tegra_assert_system_reset,
 MACHINE_END
 
 MACHINE_START(KAEN, "kaen")
@@ -293,8 +296,10 @@
 	.map_io         = tegra_map_common_io,
 	.init_early     = tegra_init_early,
 	.init_irq       = tegra_init_irq,
+	.handle_irq	= gic_handle_irq,
 	.timer          = &tegra_timer,
 	.init_machine   = tegra_kaen_init,
+	.restart	= tegra_assert_system_reset,
 MACHINE_END
 
 MACHINE_START(WARIO, "wario")
@@ -302,6 +307,8 @@
 	.map_io         = tegra_map_common_io,
 	.init_early     = tegra_init_early,
 	.init_irq       = tegra_init_irq,
+	.handle_irq	= gic_handle_irq,
 	.timer          = &tegra_timer,
 	.init_machine   = tegra_wario_init,
+	.restart	= tegra_assert_system_reset,
 MACHINE_END
diff --git a/arch/arm/mach-tegra/board-trimslice.c b/arch/arm/mach-tegra/board-trimslice.c
index 1a6617b..c242314 100644
--- a/arch/arm/mach-tegra/board-trimslice.c
+++ b/arch/arm/mach-tegra/board-trimslice.c
@@ -26,6 +26,7 @@
 #include <linux/i2c.h>
 #include <linux/gpio.h>
 
+#include <asm/hardware/gic.h>
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
 #include <asm/setup.h>
@@ -176,6 +177,8 @@
 	.map_io         = tegra_map_common_io,
 	.init_early	= tegra_init_early,
 	.init_irq       = tegra_init_irq,
+	.handle_irq	= gic_handle_irq,
 	.timer          = &tegra_timer,
 	.init_machine   = tegra_trimslice_init,
+	.restart	= tegra_assert_system_reset,
 MACHINE_END
diff --git a/arch/arm/mach-tegra/common.c b/arch/arm/mach-tegra/common.c
index 690b888..20f396d 100644
--- a/arch/arm/mach-tegra/common.c
+++ b/arch/arm/mach-tegra/common.c
@@ -31,8 +31,6 @@
 #include "clock.h"
 #include "fuse.h"
 
-void (*arch_reset)(char mode, const char *cmd) = tegra_assert_system_reset;
-
 void tegra_assert_system_reset(char mode, const char *cmd)
 {
 	void __iomem *reset = IO_ADDRESS(TEGRA_CLK_RESET_BASE + 0x04);
diff --git a/arch/arm/mach-tegra/include/mach/entry-macro.S b/arch/arm/mach-tegra/include/mach/entry-macro.S
index dd165c5..ac11262 100644
--- a/arch/arm/mach-tegra/include/mach/entry-macro.S
+++ b/arch/arm/mach-tegra/include/mach/entry-macro.S
@@ -12,30 +12,15 @@
  * GNU General Public License for more details.
  *
  */
-#include <mach/iomap.h>
-#include <mach/io.h>
-
-#if defined(CONFIG_ARM_GIC)
-#define HAVE_GET_IRQNR_PREAMBLE
-#include <asm/hardware/entry-macro-gic.S>
-
-	/* Uses the GIC interrupt controller built into the cpu */
-#define ICTRL_BASE (IO_CPU_VIRT + 0x100)
 
 	.macro	disable_fiq
 	.endm
 
-	.macro	get_irqnr_preamble, base, tmp
-	movw \base, #(ICTRL_BASE & 0x0000ffff)
-	movt \base, #((ICTRL_BASE & 0xffff0000) >> 16)
+	.macro	arch_ret_to_user, tmp1, tmp2
 	.endm
 
-	.macro  arch_ret_to_user, tmp1, tmp2
-	.endm
-#else
+#if !defined(CONFIG_ARM_GIC)
 	/* legacy interrupt controller for AP16 */
-	.macro	disable_fiq
-	.endm
 
 	.macro	get_irqnr_preamble, base, tmp
 	@ enable imprecise aborts
@@ -46,9 +31,6 @@
 	orr \base, #0x0000f000
 	.endm
 
-	.macro	arch_ret_to_user, tmp1, tmp2
-	.endm
-
 	.macro	get_irqnr_and_base, irqnr, irqstat, base, tmp
 	ldr \irqnr, [\base, #0x20]	@ EVT_IRQ_STS
 	cmp \irqnr, #0x80
diff --git a/arch/arm/mach-tegra/include/mach/io.h b/arch/arm/mach-tegra/include/mach/io.h
index 35a011f..f15deff 100644
--- a/arch/arm/mach-tegra/include/mach/io.h
+++ b/arch/arm/mach-tegra/include/mach/io.h
@@ -71,12 +71,6 @@
 
 #ifndef __ASSEMBLER__
 
-#define __arch_ioremap		tegra_ioremap
-#define __arch_iounmap		tegra_iounmap
-
-void __iomem *tegra_ioremap(unsigned long phys, size_t size, unsigned int type);
-void tegra_iounmap(volatile void __iomem *addr);
-
 #define IO_ADDRESS(n) (IO_TO_VIRT(n))
 
 #ifdef CONFIG_TEGRA_PCI
diff --git a/arch/arm/mach-tegra/include/mach/system.h b/arch/arm/mach-tegra/include/mach/system.h
index 027c421..a312988 100644
--- a/arch/arm/mach-tegra/include/mach/system.h
+++ b/arch/arm/mach-tegra/include/mach/system.h
@@ -21,10 +21,6 @@
 #ifndef __MACH_TEGRA_SYSTEM_H
 #define __MACH_TEGRA_SYSTEM_H
 
-#include <mach/iomap.h>
-
-extern void (*arch_reset)(char mode, const char *cmd);
-
 static inline void arch_idle(void)
 {
 }
diff --git a/arch/arm/mach-tegra/include/mach/vmalloc.h b/arch/arm/mach-tegra/include/mach/vmalloc.h
deleted file mode 100644
index fd6aa65..0000000
--- a/arch/arm/mach-tegra/include/mach/vmalloc.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * arch/arm/mach-tegra/include/mach/vmalloc.h
- *
- * Copyright (C) 2010 Google, Inc.
- *
- * Author:
- *	Colin Cross <ccross@google.com>
- *	Erik Gilling <konkers@google.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef __MACH_TEGRA_VMALLOC_H
-#define __MACH_TEGRA_VMALLOC_H
-
-#include <asm/sizes.h>
-
-#define VMALLOC_END        0xFE000000UL
-
-#endif
diff --git a/arch/arm/mach-tegra/io.c b/arch/arm/mach-tegra/io.c
index 5489f8b..d23ee2d 100644
--- a/arch/arm/mach-tegra/io.c
+++ b/arch/arm/mach-tegra/io.c
@@ -60,24 +60,3 @@
 {
 	iotable_init(tegra_io_desc, ARRAY_SIZE(tegra_io_desc));
 }
-
-/*
- * Intercept ioremap() requests for addresses in our fixed mapping regions.
- */
-void __iomem *tegra_ioremap(unsigned long p, size_t size, unsigned int type)
-{
-	void __iomem *v = IO_ADDRESS(p);
-	if (v == NULL)
-		v = __arm_ioremap(p, size, type);
-	return v;
-}
-EXPORT_SYMBOL(tegra_ioremap);
-
-void tegra_iounmap(volatile void __iomem *addr)
-{
-	unsigned long virt = (unsigned long)addr;
-
-	if (virt >= VMALLOC_START && virt < VMALLOC_END)
-		__iounmap(addr);
-}
-EXPORT_SYMBOL(tegra_iounmap);
diff --git a/arch/arm/mach-tegra/timer.c b/arch/arm/mach-tegra/timer.c
index e2272d2..732c724 100644
--- a/arch/arm/mach-tegra/timer.c
+++ b/arch/arm/mach-tegra/timer.c
@@ -19,7 +19,6 @@
 
 #include <linux/init.h>
 #include <linux/err.h>
-#include <linux/sched.h>
 #include <linux/time.h>
 #include <linux/interrupt.h>
 #include <linux/irq.h>
@@ -106,25 +105,9 @@
 	.set_mode	= tegra_timer_set_mode,
 };
 
-static DEFINE_CLOCK_DATA(cd);
-
-/*
- * Constants generated by clocks_calc_mult_shift(m, s, 1MHz, NSEC_PER_SEC, 60).
- * This gives a resolution of about 1us and a wrap period of about 1h11min.
- */
-#define SC_MULT		4194304000u
-#define SC_SHIFT	22
-
-unsigned long long notrace sched_clock(void)
+static u32 notrace tegra_read_sched_clock(void)
 {
-	u32 cyc = timer_readl(TIMERUS_CNTR_1US);
-	return cyc_to_fixed_sched_clock(&cd, cyc, (u32)~0, SC_MULT, SC_SHIFT);
-}
-
-static void notrace tegra_update_sched_clock(void)
-{
-	u32 cyc = timer_readl(TIMERUS_CNTR_1US);
-	update_sched_clock(&cd, cyc, (u32)~0);
+	return timer_readl(TIMERUS_CNTR_1US);
 }
 
 /*
@@ -218,8 +201,7 @@
 		WARN(1, "Unknown clock rate");
 	}
 
-	init_fixed_sched_clock(&cd, tegra_update_sched_clock, 32,
-			       1000000, SC_MULT, SC_SHIFT);
+	setup_sched_clock(tegra_read_sched_clock, 32, 1000000);
 
 	if (clocksource_mmio_init(timer_reg_base + TIMERUS_CNTR_1US,
 		"timer_us", 1000000, 300, 32, clocksource_mmio_readl_up)) {
diff --git a/arch/arm/mach-u300/core.c b/arch/arm/mach-u300/core.c
index ac0791e..6979307 100644
--- a/arch/arm/mach-u300/core.c
+++ b/arch/arm/mach-u300/core.c
@@ -1888,3 +1888,23 @@
 	return mmc_init(&mmcsd_device);
 }
 module_init(core_module_init);
+
+/* Forward declare this function from the watchdog */
+void coh901327_watchdog_reset(void);
+
+void u300_restart(char mode, const char *cmd)
+{
+	switch (mode) {
+	case 's':
+	case 'h':
+#ifdef CONFIG_COH901327_WATCHDOG
+		coh901327_watchdog_reset();
+#endif
+		break;
+	default:
+		/* Do nothing */
+		break;
+	}
+	/* Wait for system do die/reset. */
+	while (1);
+}
diff --git a/arch/arm/mach-u300/include/mach/entry-macro.S b/arch/arm/mach-u300/include/mach/entry-macro.S
index 20731ae..7181d6a 100644
--- a/arch/arm/mach-u300/include/mach/entry-macro.S
+++ b/arch/arm/mach-u300/include/mach/entry-macro.S
@@ -8,33 +8,9 @@
  * Low-level IRQ helper macros for ST-Ericsson U300
  * Author: Linus Walleij <linus.walleij@stericsson.com>
  */
-#include <mach/hardware.h>
-#include <asm/hardware/vic.h>
 
 	.macro	disable_fiq
 	.endm
 
-	.macro  get_irqnr_preamble, base, tmp
-	.endm
-
 	.macro  arch_ret_to_user, tmp1, tmp2
 	.endm
-
-	.macro	get_irqnr_and_base, irqnr, irqstat, base, tmp
-	ldr	\base, = U300_AHB_PER_VIRT_BASE-U300_AHB_PER_PHYS_BASE+U300_INTCON0_BASE
-	ldr	\irqstat, [\base, #VIC_IRQ_STATUS] @ get masked status
-	mov	\irqnr, #0
-	teq	\irqstat, #0
-	bne	1002f
-1001:	ldr	\base, = U300_AHB_PER_VIRT_BASE-U300_AHB_PER_PHYS_BASE+U300_INTCON1_BASE
-	ldr	\irqstat, [\base, #VIC_IRQ_STATUS] @ get masked status
-	mov	\irqnr, #32
-	teq	\irqstat, #0
-	beq	1003f
-1002:	tst	\irqstat, #1
-	bne	1003f
-	add	\irqnr, \irqnr, #1
-	movs	\irqstat, \irqstat, lsr #1
-	bne	1002b
-1003:		/* EQ will be set if no irqs pending */
-	.endm
diff --git a/arch/arm/mach-u300/include/mach/platform.h b/arch/arm/mach-u300/include/mach/platform.h
index 77d9210..096333f 100644
--- a/arch/arm/mach-u300/include/mach/platform.h
+++ b/arch/arm/mach-u300/include/mach/platform.h
@@ -14,6 +14,7 @@
 void u300_map_io(void);
 void u300_init_irq(void);
 void u300_init_devices(void);
+void u300_restart(char, const char *);
 extern struct sys_timer u300_timer;
 
 #endif
diff --git a/arch/arm/mach-u300/include/mach/system.h b/arch/arm/mach-u300/include/mach/system.h
index 8daf136..574d46e 100644
--- a/arch/arm/mach-u300/include/mach/system.h
+++ b/arch/arm/mach-u300/include/mach/system.h
@@ -8,35 +8,7 @@
  * System shutdown and reset functions.
  * Author: Linus Walleij <linus.walleij@stericsson.com>
  */
-#include <mach/hardware.h>
-#include <asm/io.h>
-#include <asm/hardware/vic.h>
-#include <asm/irq.h>
-
-/* Forward declare this function from the watchdog */
-void coh901327_watchdog_reset(void);
-
 static inline void arch_idle(void)
 {
 	cpu_do_idle();
 }
-
-static void arch_reset(char mode, const char *cmd)
-{
-	switch (mode) {
-	case 's':
-	case 'h':
-		printk(KERN_CRIT "RESET: shutting down/rebooting system\n");
-		/* Disable interrupts */
-		local_irq_disable();
-#ifdef CONFIG_COH901327_WATCHDOG
-		coh901327_watchdog_reset();
-#endif
-		break;
-	default:
-		/* Do nothing */
-		break;
-	}
-	/* Wait for system do die/reset. */
-	while (1);
-}
diff --git a/arch/arm/mach-u300/include/mach/vmalloc.h b/arch/arm/mach-u300/include/mach/vmalloc.h
deleted file mode 100644
index ec423b9..0000000
--- a/arch/arm/mach-u300/include/mach/vmalloc.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/*
- *
- * arch/arm/mach-u300/include/mach/vmalloc.h
- *
- *
- * Copyright (C) 2006-2009 ST-Ericsson AB
- * License terms: GNU General Public License (GPL) version 2
- * Virtual memory allocations
- * End must be above the I/O registers and on an even 2MiB boundary.
- * Author: Linus Walleij <linus.walleij@stericsson.com>
- */
-#define VMALLOC_END	0xfe800000UL
diff --git a/arch/arm/mach-u300/timer.c b/arch/arm/mach-u300/timer.c
index 5f51bde..bc1c789 100644
--- a/arch/arm/mach-u300/timer.c
+++ b/arch/arm/mach-u300/timer.c
@@ -9,7 +9,6 @@
  * Author: Linus Walleij <linus.walleij@stericsson.com>
  */
 #include <linux/interrupt.h>
-#include <linux/sched.h>
 #include <linux/time.h>
 #include <linux/timex.h>
 #include <linux/clockchips.h>
@@ -337,18 +336,10 @@
  * this wraps around for now, since it is just a relative time
  * stamp. (Inspired by OMAP implementation.)
  */
-static DEFINE_CLOCK_DATA(cd);
 
-unsigned long long notrace sched_clock(void)
+static u32 notrace u300_read_sched_clock(void)
 {
-	u32 cyc = readl(U300_TIMER_APP_VBASE + U300_TIMER_APP_GPT2CC);
-	return cyc_to_sched_clock(&cd, cyc, (u32)~0);
-}
-
-static void notrace u300_update_sched_clock(void)
-{
-	u32 cyc = readl(U300_TIMER_APP_VBASE + U300_TIMER_APP_GPT2CC);
-	update_sched_clock(&cd, cyc, (u32)~0);
+	return readl(U300_TIMER_APP_VBASE + U300_TIMER_APP_GPT2CC);
 }
 
 
@@ -366,7 +357,7 @@
 	clk_enable(clk);
 	rate = clk_get_rate(clk);
 
-	init_sched_clock(&cd, u300_update_sched_clock, 32, rate);
+	setup_sched_clock(u300_read_sched_clock, 32, rate);
 
 	/*
 	 * Disable the "OS" and "DD" timers - these are designed for Symbian!
diff --git a/arch/arm/mach-u300/u300.c b/arch/arm/mach-u300/u300.c
index 89422ee..def45bd 100644
--- a/arch/arm/mach-u300/u300.c
+++ b/arch/arm/mach-u300/u300.c
@@ -19,6 +19,7 @@
 #include <linux/io.h>
 #include <mach/hardware.h>
 #include <mach/platform.h>
+#include <asm/hardware/vic.h>
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
 #include <asm/memory.h>
@@ -49,6 +50,8 @@
 	.atag_offset	= BOOT_PARAMS_OFFSET,
 	.map_io		= u300_map_io,
 	.init_irq	= u300_init_irq,
+	.handle_irq	= vic_handle_irq,
 	.timer		= &u300_timer,
 	.init_machine	= u300_init_machine,
+	.restart	= u300_restart,
 MACHINE_END
diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c
index bdd7b80..de1f5f8 100644
--- a/arch/arm/mach-ux500/board-mop500.c
+++ b/arch/arm/mach-ux500/board-mop500.c
@@ -33,6 +33,7 @@
 #include <linux/leds.h>
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
+#include <asm/hardware/gic.h>
 
 #include <plat/i2c.h>
 #include <plat/ste_dma40.h>
@@ -695,6 +696,7 @@
 	.init_irq	= ux500_init_irq,
 	/* we re-use nomadik timer here */
 	.timer		= &ux500_timer,
+	.handle_irq	= gic_handle_irq,
 	.init_machine	= mop500_init_machine,
 MACHINE_END
 
@@ -703,6 +705,7 @@
 	.map_io		= u8500_map_io,
 	.init_irq	= ux500_init_irq,
 	.timer		= &ux500_timer,
+	.handle_irq	= gic_handle_irq,
 	.init_machine	= hrefv60_init_machine,
 MACHINE_END
 
@@ -712,5 +715,6 @@
 	.init_irq	= ux500_init_irq,
 	/* we re-use nomadik timer here */
 	.timer		= &ux500_timer,
+	.handle_irq	= gic_handle_irq,
 	.init_machine	= snowball_init_machine,
 MACHINE_END
diff --git a/arch/arm/mach-ux500/board-u5500.c b/arch/arm/mach-ux500/board-u5500.c
index 82025ba..fe1569b 100644
--- a/arch/arm/mach-ux500/board-u5500.c
+++ b/arch/arm/mach-ux500/board-u5500.c
@@ -12,6 +12,7 @@
 #include <linux/i2c.h>
 #include <linux/mfd/ab5500/ab5500.h>
 
+#include <asm/hardware/gic.h>
 #include <asm/mach/arch.h>
 #include <asm/mach-types.h>
 
@@ -149,5 +150,6 @@
 	.map_io		= u5500_map_io,
 	.init_irq	= ux500_init_irq,
 	.timer		= &ux500_timer,
+	.handle_irq	= gic_handle_irq,
 	.init_machine	= u5500_init_machine,
 MACHINE_END
diff --git a/arch/arm/mach-ux500/cpu-db5500.c b/arch/arm/mach-ux500/cpu-db5500.c
index 9de1af0..5323286 100644
--- a/arch/arm/mach-ux500/cpu-db5500.c
+++ b/arch/arm/mach-ux500/cpu-db5500.c
@@ -30,12 +30,11 @@
 };
 
 static struct map_desc u5500_io_desc[] __initdata = {
-	__IO_DEV_DESC(U5500_GIC_CPU_BASE, SZ_4K),
+	/* SCU base also covers GIC CPU BASE and TWD with its 4K page */
+	__IO_DEV_DESC(U5500_SCU_BASE, SZ_4K),
 	__IO_DEV_DESC(U5500_GIC_DIST_BASE, SZ_4K),
 	__IO_DEV_DESC(U5500_L2CC_BASE, SZ_4K),
-	__IO_DEV_DESC(U5500_TWD_BASE, SZ_4K),
 	__IO_DEV_DESC(U5500_MTU0_BASE, SZ_4K),
-	__IO_DEV_DESC(U5500_SCU_BASE, SZ_4K),
 	__IO_DEV_DESC(U5500_BACKUPRAM0_BASE, SZ_8K),
 
 	__IO_DEV_DESC(U5500_GPIO0_BASE, SZ_4K),
diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c
index 13e8890..7f2729c 100644
--- a/arch/arm/mach-ux500/cpu-db8500.c
+++ b/arch/arm/mach-ux500/cpu-db8500.c
@@ -35,12 +35,11 @@
 };
 
 static struct map_desc u8500_io_desc[] __initdata = {
-	__IO_DEV_DESC(U8500_GIC_CPU_BASE, SZ_4K),
+	/* SCU base also covers GIC CPU BASE and TWD with its 4K page */
+	__IO_DEV_DESC(U8500_SCU_BASE, SZ_4K),
 	__IO_DEV_DESC(U8500_GIC_DIST_BASE, SZ_4K),
 	__IO_DEV_DESC(U8500_L2CC_BASE, SZ_4K),
-	__IO_DEV_DESC(U8500_TWD_BASE, SZ_4K),
 	__IO_DEV_DESC(U8500_MTU0_BASE, SZ_4K),
-	__IO_DEV_DESC(U8500_SCU_BASE, SZ_4K),
 	__IO_DEV_DESC(U8500_BACKUPRAM0_BASE, SZ_8K),
 
 	__IO_DEV_DESC(U8500_CLKRST1_BASE, SZ_4K),
diff --git a/arch/arm/mach-ux500/include/mach/entry-macro.S b/arch/arm/mach-ux500/include/mach/entry-macro.S
index 071bba9..e16299e 100644
--- a/arch/arm/mach-ux500/include/mach/entry-macro.S
+++ b/arch/arm/mach-ux500/include/mach/entry-macro.S
@@ -10,8 +10,6 @@
  * License version 2. This program is licensed "as is" without any
  * warranty of any kind, whether express or implied.
  */
-#include <mach/hardware.h>
-#include <asm/hardware/entry-macro-gic.S>
 
 		.macro	disable_fiq
 		.endm
diff --git a/arch/arm/mach-ux500/include/mach/gpio.h b/arch/arm/mach-ux500/include/mach/gpio.h
index 7389df9..c01ef66 100644
--- a/arch/arm/mach-ux500/include/mach/gpio.h
+++ b/arch/arm/mach-ux500/include/mach/gpio.h
@@ -1,10 +1,5 @@
 #ifndef __ASM_ARCH_GPIO_H
 #define __ASM_ARCH_GPIO_H
 
-/*
- * 288 (#267 is the highest one actually hooked up) onchip GPIOs, plus enough
- * room for a couple of GPIO expanders.
- */
-#define ARCH_NR_GPIOS	350
 
 #endif /* __ASM_ARCH_GPIO_H */
diff --git a/arch/arm/mach-ux500/include/mach/system.h b/arch/arm/mach-ux500/include/mach/system.h
index c0cd800..258e5c9 100644
--- a/arch/arm/mach-ux500/include/mach/system.h
+++ b/arch/arm/mach-ux500/include/mach/system.h
@@ -17,9 +17,4 @@
 	cpu_do_idle();
 }
 
-static inline void arch_reset(char mode, const char *cmd)
-{
-	/* yet to be implemented - TODO */
-}
-
 #endif
diff --git a/arch/arm/mach-ux500/include/mach/vmalloc.h b/arch/arm/mach-ux500/include/mach/vmalloc.h
deleted file mode 100644
index a4945cb..0000000
--- a/arch/arm/mach-ux500/include/mach/vmalloc.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- *  Copyright (C) 2009 ST-Ericsson
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- */
-#define VMALLOC_END	0xf0000000UL
diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c
index e340a54..02b7b93 100644
--- a/arch/arm/mach-versatile/core.c
+++ b/arch/arm/mach-versatile/core.c
@@ -22,7 +22,6 @@
 #include <linux/device.h>
 #include <linux/dma-mapping.h>
 #include <linux/platform_device.h>
-#include <linux/sysdev.h>
 #include <linux/interrupt.h>
 #include <linux/irqdomain.h>
 #include <linux/of_address.h>
@@ -141,11 +140,6 @@
 	},
 #ifdef CONFIG_MACH_VERSATILE_AB
  	{
-		.virtual	=  IO_ADDRESS(VERSATILE_GPIO0_BASE),
-		.pfn		= __phys_to_pfn(VERSATILE_GPIO0_BASE),
-		.length		= SZ_4K,
-		.type		= MT_DEVICE
-	}, {
 		.virtual	=  IO_ADDRESS(VERSATILE_IB2_BASE),
 		.pfn		= __phys_to_pfn(VERSATILE_IB2_BASE),
 		.length		= SZ_64M,
@@ -745,6 +739,19 @@
 }
 #endif	/* CONFIG_LEDS */
 
+void versatile_restart(char mode, const char *cmd)
+{
+	void __iomem *sys = __io_address(VERSATILE_SYS_BASE);
+	u32 val;
+
+	val = __raw_readl(sys + VERSATILE_SYS_RESETCTL_OFFSET);
+	val |= 0x105;
+
+	__raw_writel(0xa05f, sys + VERSATILE_SYS_LOCK_OFFSET);
+	__raw_writel(val, sys + VERSATILE_SYS_RESETCTL_OFFSET);
+	__raw_writel(0, sys + VERSATILE_SYS_LOCK_OFFSET);
+}
+
 /* Early initializations */
 void __init versatile_init_early(void)
 {
diff --git a/arch/arm/mach-versatile/core.h b/arch/arm/mach-versatile/core.h
index e014227..2ef2f55 100644
--- a/arch/arm/mach-versatile/core.h
+++ b/arch/arm/mach-versatile/core.h
@@ -30,6 +30,7 @@
 extern void __init versatile_init_irq(void);
 extern void __init versatile_map_io(void);
 extern struct sys_timer versatile_timer;
+extern void versatile_restart(char, const char *);
 extern unsigned int mmc_status(struct device *dev);
 #ifdef CONFIG_OF
 extern struct of_dev_auxdata versatile_auxdata_lookup[];
diff --git a/arch/arm/mach-versatile/include/mach/entry-macro.S b/arch/arm/mach-versatile/include/mach/entry-macro.S
index e6f7c16..b6f0dbf 100644
--- a/arch/arm/mach-versatile/include/mach/entry-macro.S
+++ b/arch/arm/mach-versatile/include/mach/entry-macro.S
@@ -7,39 +7,9 @@
  * License version 2. This program is licensed "as is" without any
  * warranty of any kind, whether express or implied.
  */
-#include <mach/hardware.h>
-#include <mach/platform.h>
-#include <asm/hardware/vic.h>
 
 		.macro	disable_fiq
 		.endm
 
-		.macro  get_irqnr_preamble, base, tmp
-		ldr	\base, =IO_ADDRESS(VERSATILE_VIC_BASE)
-		.endm
-
 		.macro  arch_ret_to_user, tmp1, tmp2
 		.endm
-
-		.macro	get_irqnr_and_base, irqnr, irqstat, base, tmp
-		ldr	\irqstat, [\base, #VIC_IRQ_STATUS]	@ get masked status
-		mov	\irqnr, #0
-		teq	\irqstat, #0
-		beq	1003f
-
-1001:		tst	\irqstat, #15
-		bne	1002f
-		add	\irqnr, \irqnr, #4
-		movs	\irqstat, \irqstat, lsr #4
-		bne	1001b
-1002:		tst	\irqstat, #1
-		bne	1003f
-		add	\irqnr, \irqnr, #1
-		movs	\irqstat, \irqstat, lsr #1
-		bne	1002b
-1003:		/* EQ will be set if no irqs pending */
-
-@		clz	\irqnr, \irqstat
-@1003:		/* EQ will be set if we reach MAXIRQNUM */
-		.endm
-
diff --git a/arch/arm/mach-versatile/include/mach/system.h b/arch/arm/mach-versatile/include/mach/system.h
index 8ffc12a..f3fa347 100644
--- a/arch/arm/mach-versatile/include/mach/system.h
+++ b/arch/arm/mach-versatile/include/mach/system.h
@@ -21,10 +21,6 @@
 #ifndef __ASM_ARCH_SYSTEM_H
 #define __ASM_ARCH_SYSTEM_H
 
-#include <linux/io.h>
-#include <mach/hardware.h>
-#include <mach/platform.h>
-
 static inline void arch_idle(void)
 {
 	/*
@@ -34,16 +30,4 @@
 	cpu_do_idle();
 }
 
-static inline void arch_reset(char mode, const char *cmd)
-{
-	u32 val;
-
-	val = __raw_readl(IO_ADDRESS(VERSATILE_SYS_RESETCTL)) & ~0x7;
-	val |= 0x105;
-
-	__raw_writel(0xa05f, IO_ADDRESS(VERSATILE_SYS_LOCK));
-	__raw_writel(val, IO_ADDRESS(VERSATILE_SYS_RESETCTL));
-	__raw_writel(0, IO_ADDRESS(VERSATILE_SYS_LOCK));
-}
-
 #endif
diff --git a/arch/arm/mach-versatile/include/mach/vmalloc.h b/arch/arm/mach-versatile/include/mach/vmalloc.h
deleted file mode 100644
index 7d8e069..0000000
--- a/arch/arm/mach-versatile/include/mach/vmalloc.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- *  arch/arm/mach-versatile/include/mach/vmalloc.h
- *
- *  Copyright (C) 2003 ARM Limited
- *  Copyright (C) 2000 Russell King.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- */
-#define VMALLOC_END		0xd8000000UL
diff --git a/arch/arm/mach-versatile/versatile_ab.c b/arch/arm/mach-versatile/versatile_ab.c
index fda4866..98f6549 100644
--- a/arch/arm/mach-versatile/versatile_ab.c
+++ b/arch/arm/mach-versatile/versatile_ab.c
@@ -21,12 +21,12 @@
 
 #include <linux/init.h>
 #include <linux/device.h>
-#include <linux/sysdev.h>
 #include <linux/amba/bus.h>
 #include <linux/io.h>
 
 #include <mach/hardware.h>
 #include <asm/irq.h>
+#include <asm/hardware/vic.h>
 #include <asm/mach-types.h>
 
 #include <asm/mach/arch.h>
@@ -39,6 +39,8 @@
 	.map_io		= versatile_map_io,
 	.init_early	= versatile_init_early,
 	.init_irq	= versatile_init_irq,
+	.handle_irq	= vic_handle_irq,
 	.timer		= &versatile_timer,
 	.init_machine	= versatile_init,
+	.restart	= versatile_restart,
 MACHINE_END
diff --git a/arch/arm/mach-versatile/versatile_dt.c b/arch/arm/mach-versatile/versatile_dt.c
index 54e037c..ae5ad3c 100644
--- a/arch/arm/mach-versatile/versatile_dt.c
+++ b/arch/arm/mach-versatile/versatile_dt.c
@@ -24,6 +24,7 @@
 #include <linux/init.h>
 #include <linux/of_irq.h>
 #include <linux/of_platform.h>
+#include <asm/hardware/vic.h>
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
 
@@ -45,7 +46,9 @@
 	.map_io		= versatile_map_io,
 	.init_early	= versatile_init_early,
 	.init_irq	= versatile_init_irq,
+	.handle_irq	= vic_handle_irq,
 	.timer		= &versatile_timer,
 	.init_machine	= versatile_dt_init,
 	.dt_compat	= versatile_dt_match,
+	.restart	= versatile_restart,
 MACHINE_END
diff --git a/arch/arm/mach-versatile/versatile_pb.c b/arch/arm/mach-versatile/versatile_pb.c
index feaf9cb..9581c19 100644
--- a/arch/arm/mach-versatile/versatile_pb.c
+++ b/arch/arm/mach-versatile/versatile_pb.c
@@ -21,13 +21,13 @@
 
 #include <linux/init.h>
 #include <linux/device.h>
-#include <linux/sysdev.h>
 #include <linux/amba/bus.h>
 #include <linux/amba/pl061.h>
 #include <linux/amba/mmci.h>
 #include <linux/io.h>
 
 #include <mach/hardware.h>
+#include <asm/hardware/vic.h>
 #include <asm/irq.h>
 #include <asm/mach-types.h>
 
@@ -107,6 +107,8 @@
 	.map_io		= versatile_map_io,
 	.init_early	= versatile_init_early,
 	.init_irq	= versatile_init_irq,
+	.handle_irq	= vic_handle_irq,
 	.timer		= &versatile_timer,
 	.init_machine	= versatile_pb_init,
+	.restart	= versatile_restart,
 MACHINE_END
diff --git a/arch/arm/mach-vexpress/Kconfig b/arch/arm/mach-vexpress/Kconfig
index 9311484..9b3d0fb 100644
--- a/arch/arm/mach-vexpress/Kconfig
+++ b/arch/arm/mach-vexpress/Kconfig
@@ -8,5 +8,7 @@
 	select ARM_ERRATA_720789
 	select ARM_ERRATA_751472
 	select ARM_ERRATA_753970
+	select HAVE_SMP
+	select MIGHT_HAVE_CACHE_L2X0
 
 endmenu
diff --git a/arch/arm/mach-vexpress/include/mach/entry-macro.S b/arch/arm/mach-vexpress/include/mach/entry-macro.S
index 73c1129..a14f9e6 100644
--- a/arch/arm/mach-vexpress/include/mach/entry-macro.S
+++ b/arch/arm/mach-vexpress/include/mach/entry-macro.S
@@ -1,5 +1,3 @@
-#include <asm/hardware/entry-macro-gic.S>
-
 	.macro	disable_fiq
 	.endm
 
diff --git a/arch/arm/mach-vexpress/include/mach/system.h b/arch/arm/mach-vexpress/include/mach/system.h
index 899a4e6..f653a8e 100644
--- a/arch/arm/mach-vexpress/include/mach/system.h
+++ b/arch/arm/mach-vexpress/include/mach/system.h
@@ -30,8 +30,4 @@
 	cpu_do_idle();
 }
 
-static inline void arch_reset(char mode, const char *cmd)
-{
-}
-
 #endif
diff --git a/arch/arm/mach-vexpress/include/mach/vmalloc.h b/arch/arm/mach-vexpress/include/mach/vmalloc.h
deleted file mode 100644
index f43a36e..0000000
--- a/arch/arm/mach-vexpress/include/mach/vmalloc.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- *  arch/arm/mach-vexpress/include/mach/vmalloc.h
- *
- *  Copyright (C) 2003 ARM Limited
- *  Copyright (C) 2000 Russell King.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- */
-#define VMALLOC_END		0xf8000000UL
diff --git a/arch/arm/mach-vexpress/v2m.c b/arch/arm/mach-vexpress/v2m.c
index 1fafc32..b4a28ca 100644
--- a/arch/arm/mach-vexpress/v2m.c
+++ b/arch/arm/mach-vexpress/v2m.c
@@ -10,7 +10,7 @@
 #include <linux/ata_platform.h>
 #include <linux/smsc911x.h>
 #include <linux/spinlock.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/usb/isp1760.h>
 #include <linux/clkdev.h>
 #include <linux/mtd/physmap.h>
@@ -23,6 +23,7 @@
 #include <asm/hardware/arm_timer.h>
 #include <asm/hardware/timer-sp.h>
 #include <asm/hardware/sp810.h>
+#include <asm/hardware/gic.h>
 
 #include <mach/ct-ca9x4.h>
 #include <mach/motherboard.h>
@@ -437,7 +438,6 @@
 		amba_device_register(v2m_amba_devs[i], &iomem_resource);
 
 	pm_power_off = v2m_power_off;
-	arm_pm_restart = v2m_restart;
 
 	ct_desc->init_tile();
 }
@@ -448,5 +448,7 @@
 	.init_early	= v2m_init_early,
 	.init_irq	= v2m_init_irq,
 	.timer		= &v2m_timer,
+	.handle_irq	= gic_handle_irq,
 	.init_machine	= v2m_init,
+	.restart	= v2m_restart,
 MACHINE_END
diff --git a/arch/arm/mach-vt8500/include/mach/vmalloc.h b/arch/arm/mach-vt8500/include/mach/vmalloc.h
deleted file mode 100644
index 4642290..0000000
--- a/arch/arm/mach-vt8500/include/mach/vmalloc.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- *  arch/arm/mach-vt8500/include/mach/vmalloc.h
- *
- *  Copyright (C) 2000 Russell King.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- */
-#define VMALLOC_END	0xd0000000UL
diff --git a/arch/arm/mach-w90x900/cpu.c b/arch/arm/mach-w90x900/cpu.c
index 0a235e5..604e1db 100644
--- a/arch/arm/mach-w90x900/cpu.c
+++ b/arch/arm/mach-w90x900/cpu.c
@@ -33,9 +33,11 @@
 #include <mach/regs-serial.h>
 #include <mach/regs-clock.h>
 #include <mach/regs-ebi.h>
+#include <mach/regs-timer.h>
 
 #include "cpu.h"
 #include "clock.h"
+#include "nuc9xx.h"
 
 /* Initial IO mappings */
 
@@ -222,3 +224,17 @@
 	clkdev_add_table(nuc900_clkregs, ARRAY_SIZE(nuc900_clkregs));
 }
 
+#define	WTCR	(TMR_BA + 0x1C)
+#define	WTCLK	(1 << 10)
+#define	WTE	(1 << 7)
+#define	WTRE	(1 << 1)
+
+void nuc9xx_restart(char mode, const char *cmd)
+{
+	if (mode == 's') {
+		/* Jump into ROM at address 0 */
+		soft_restart(0);
+	} else {
+		__raw_writel(WTE | WTRE | WTCLK, WTCR);
+	}
+}
diff --git a/arch/arm/mach-w90x900/include/mach/system.h b/arch/arm/mach-w90x900/include/mach/system.h
index ce228bd..2aaeb93 100644
--- a/arch/arm/mach-w90x900/include/mach/system.h
+++ b/arch/arm/mach-w90x900/include/mach/system.h
@@ -14,28 +14,6 @@
  * (at your option) any later version.
  *
  */
-
-#include <linux/io.h>
-#include <asm/proc-fns.h>
-#include <mach/map.h>
-#include <mach/regs-timer.h>
-
-#define	WTCR	(TMR_BA + 0x1C)
-#define	WTCLK	(1 << 10)
-#define	WTE	(1 << 7)
-#define	WTRE	(1 << 1)
-
 static void arch_idle(void)
 {
 }
-
-static void arch_reset(char mode, const char *cmd)
-{
-	if (mode == 's') {
-		/* Jump into ROM at address 0 */
-		cpu_reset(0);
-	} else {
-		__raw_writel(WTE | WTRE | WTCLK, WTCR);
-	}
-}
-
diff --git a/arch/arm/mach-w90x900/include/mach/vmalloc.h b/arch/arm/mach-w90x900/include/mach/vmalloc.h
deleted file mode 100644
index b067e44..0000000
--- a/arch/arm/mach-w90x900/include/mach/vmalloc.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * arch/arm/mach-w90x900/include/mach/vmalloc.h
- *
- * Copyright (c) 2008 Nuvoton technology corporation
- * All rights reserved.
- *
- * Wan ZongShun <mcuos.com@gmail.com>
- *
- * Based on arch/arm/mach-s3c2410/include/mach/vmalloc.h
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- */
-
-#ifndef __ASM_ARCH_VMALLOC_H
-#define __ASM_ARCH_VMALLOC_H
-
-#define VMALLOC_END	  (0xe0000000UL)
-
-#endif /* __ASM_ARCH_VMALLOC_H */
diff --git a/arch/arm/mach-w90x900/irq.c b/arch/arm/mach-w90x900/irq.c
index 7bf143c..d66d43a 100644
--- a/arch/arm/mach-w90x900/irq.c
+++ b/arch/arm/mach-w90x900/irq.c
@@ -19,7 +19,7 @@
 #include <linux/interrupt.h>
 #include <linux/ioport.h>
 #include <linux/ptrace.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/io.h>
 
 #include <asm/irq.h>
@@ -28,6 +28,8 @@
 #include <mach/hardware.h>
 #include <mach/regs-irq.h>
 
+#include "nuc9xx.h"
+
 struct group_irq {
 	unsigned long		gpen;
 	unsigned int		enabled;
diff --git a/arch/arm/mach-w90x900/mach-nuc910evb.c b/arch/arm/mach-w90x900/mach-nuc910evb.c
index 31c1090..b4243e4 100644
--- a/arch/arm/mach-w90x900/mach-nuc910evb.c
+++ b/arch/arm/mach-w90x900/mach-nuc910evb.c
@@ -38,4 +38,5 @@
 	.init_irq	= nuc900_init_irq,
 	.init_machine	= nuc910evb_init,
 	.timer		= &nuc900_timer,
+	.restart	= nuc9xx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-w90x900/mach-nuc950evb.c b/arch/arm/mach-w90x900/mach-nuc950evb.c
index 4062e55..067d8f9 100644
--- a/arch/arm/mach-w90x900/mach-nuc950evb.c
+++ b/arch/arm/mach-w90x900/mach-nuc950evb.c
@@ -41,4 +41,5 @@
 	.init_irq	= nuc900_init_irq,
 	.init_machine	= nuc950evb_init,
 	.timer		= &nuc900_timer,
+	.restart	= nuc9xx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-w90x900/mach-nuc960evb.c b/arch/arm/mach-w90x900/mach-nuc960evb.c
index 0ab9995..cbb3adc 100644
--- a/arch/arm/mach-w90x900/mach-nuc960evb.c
+++ b/arch/arm/mach-w90x900/mach-nuc960evb.c
@@ -38,4 +38,5 @@
 	.init_irq	= nuc900_init_irq,
 	.init_machine	= nuc960evb_init,
 	.timer		= &nuc900_timer,
+	.restart	= nuc9xx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-w90x900/nuc910.h b/arch/arm/mach-w90x900/nuc910.h
index 83e9ba5..b14c71a 100644
--- a/arch/arm/mach-w90x900/nuc910.h
+++ b/arch/arm/mach-w90x900/nuc910.h
@@ -12,14 +12,7 @@
  * published by the Free Software Foundation.
  *
  */
-
-struct map_desc;
-struct sys_timer;
-
-/* core initialisation functions */
-
-extern void nuc900_init_irq(void);
-extern struct sys_timer nuc900_timer;
+#include "nuc9xx.h"
 
 /* extern file from nuc910.c */
 
diff --git a/arch/arm/mach-w90x900/nuc950.h b/arch/arm/mach-w90x900/nuc950.h
index 98a1148..6e9de30 100644
--- a/arch/arm/mach-w90x900/nuc950.h
+++ b/arch/arm/mach-w90x900/nuc950.h
@@ -12,14 +12,7 @@
  * published by the Free Software Foundation.
  *
  */
-
-struct map_desc;
-struct sys_timer;
-
-/* core initialisation functions */
-
-extern void nuc900_init_irq(void);
-extern struct sys_timer nuc900_timer;
+#include "nuc9xx.h"
 
 /* extern file from nuc950.c */
 
diff --git a/arch/arm/mach-w90x900/nuc960.h b/arch/arm/mach-w90x900/nuc960.h
index f0c07cb..9f6df9a 100644
--- a/arch/arm/mach-w90x900/nuc960.h
+++ b/arch/arm/mach-w90x900/nuc960.h
@@ -12,14 +12,7 @@
  * published by the Free Software Foundation.
  *
  */
-
-struct map_desc;
-struct sys_timer;
-
-/* core initialisation functions */
-
-extern void nuc900_init_irq(void);
-extern struct sys_timer nuc900_timer;
+#include "nuc9xx.h"
 
 /* extern file from nuc960.c */
 
diff --git a/arch/arm/mach-w90x900/nuc9xx.h b/arch/arm/mach-w90x900/nuc9xx.h
new file mode 100644
index 0000000..91acb40
--- /dev/null
+++ b/arch/arm/mach-w90x900/nuc9xx.h
@@ -0,0 +1,24 @@
+/*
+ * arch/arm/mach-w90x900/nuc9xx.h
+ *
+ * Copied from nuc910.h, which had:
+ *
+ * Copyright (c) 2008 Nuvoton corporation
+ *
+ * Header file for NUC900 CPU support
+ *
+ * Wan ZongShun <mcuos.com@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+struct map_desc;
+struct sys_timer;
+
+/* core initialisation functions */
+
+extern void nuc900_init_irq(void);
+extern struct sys_timer nuc900_timer;
+extern void nuc9xx_restart(char, const char *);
diff --git a/arch/arm/mach-w90x900/time.c b/arch/arm/mach-w90x900/time.c
index a2c4e2d..fa27c49 100644
--- a/arch/arm/mach-w90x900/time.c
+++ b/arch/arm/mach-w90x900/time.c
@@ -33,6 +33,8 @@
 #include <mach/map.h>
 #include <mach/regs-timer.h>
 
+#include "nuc9xx.h"
+
 #define RESETINT	0x1f
 #define PERIOD		(0x01 << 27)
 #define ONESHOT		(0x00 << 27)
diff --git a/arch/arm/mach-zynq/common.c b/arch/arm/mach-zynq/common.c
index 73e9368..ab5cfdd 100644
--- a/arch/arm/mach-zynq/common.c
+++ b/arch/arm/mach-zynq/common.c
@@ -112,6 +112,7 @@
 MACHINE_START(XILINX_EP107, "Xilinx Zynq Platform")
 	.map_io		= xilinx_map_io,
 	.init_irq	= xilinx_irq_init,
+	.handle_irq	= gic_handle_irq,
 	.init_machine	= xilinx_init_machine,
 	.timer		= &xttcpss_sys_timer,
 	.dt_compat	= xilinx_dt_match,
diff --git a/arch/arm/mach-zynq/include/mach/entry-macro.S b/arch/arm/mach-zynq/include/mach/entry-macro.S
index 3cfc01b..d621fb7 100644
--- a/arch/arm/mach-zynq/include/mach/entry-macro.S
+++ b/arch/arm/mach-zynq/include/mach/entry-macro.S
@@ -20,9 +20,6 @@
  * GNU General Public License for more details.
  */
 
-#include <mach/hardware.h>
-#include <asm/hardware/entry-macro-gic.S>
-
 		.macro  disable_fiq
 		.endm
 
diff --git a/arch/arm/mach-zynq/include/mach/system.h b/arch/arm/mach-zynq/include/mach/system.h
index 1b84d70..8e88e0b 100644
--- a/arch/arm/mach-zynq/include/mach/system.h
+++ b/arch/arm/mach-zynq/include/mach/system.h
@@ -20,9 +20,4 @@
 	cpu_do_idle();
 }
 
-static inline void arch_reset(char mode, const char *cmd)
-{
-	/* Add architecture specific reset processing here */
-}
-
 #endif
diff --git a/arch/arm/mach-zynq/include/mach/vmalloc.h b/arch/arm/mach-zynq/include/mach/vmalloc.h
deleted file mode 100644
index 2398eff..0000000
--- a/arch/arm/mach-zynq/include/mach/vmalloc.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* arch/arm/mach-zynq/include/mach/vmalloc.h
- *
- *  Copyright (C) 2011 Xilinx
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#ifndef __MACH_VMALLOC_H__
-#define __MACH_VMALLOC_H__
-
-#define VMALLOC_END       0xE0000000UL
-
-#endif
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 67f75a0..4cefb57 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -629,6 +629,23 @@
 
 comment "Processor Features"
 
+config ARM_LPAE
+	bool "Support for the Large Physical Address Extension"
+	depends on MMU && CPU_V7
+	help
+	  Say Y if you have an ARMv7 processor supporting the LPAE page
+	  table format and you would like to access memory beyond the
+	  4GB limit. The resulting kernel image will not run on
+	  processors without the LPA extension.
+
+	  If unsure, say N.
+
+config ARCH_PHYS_ADDR_T_64BIT
+	def_bool ARM_LPAE
+
+config ARCH_DMA_ADDR_T_64BIT
+	bool
+
 config ARM_THUMB
 	bool "Support Thumb user binaries"
 	depends on CPU_ARM720T || CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_V6 || CPU_V6K || CPU_V7 || CPU_FEROCEON
@@ -816,14 +833,23 @@
 	  Say Y here to use the Feroceon L2 cache in writethrough mode.
 	  Unless you specifically require this, say N for writeback mode.
 
+config MIGHT_HAVE_CACHE_L2X0
+	bool
+	help
+	  This option should be selected by machines which have a L2x0
+	  or PL310 cache controller, but where its use is optional.
+
+	  The only effect of this option is to make CACHE_L2X0 and
+	  related options available to the user for configuration.
+
+	  Boards or SoCs which always require the cache controller
+	  support to be present should select CACHE_L2X0 directly
+	  instead of this option, thus preventing the user from
+	  inadvertently configuring a broken kernel.
+
 config CACHE_L2X0
-	bool "Enable the L2x0 outer cache controller"
-	depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 || \
-		   REALVIEW_EB_A9MP || ARCH_IMX_V6_V7 || MACH_REALVIEW_PBX || \
-		   ARCH_NOMADIK || ARCH_OMAP4 || ARCH_EXYNOS4 || ARCH_TEGRA || \
-		   ARCH_U8500 || ARCH_VEXPRESS_CA9X4 || ARCH_SHMOBILE || \
-		   ARCH_PRIMA2 || ARCH_ZYNQ || ARCH_CNS3XXX || ARCH_HIGHBANK
-	default y
+	bool "Enable the L2x0 outer cache controller" if MIGHT_HAVE_CACHE_L2X0
+	default MIGHT_HAVE_CACHE_L2X0
 	select OUTER_CACHE
 	select OUTER_CACHE_SYNC
 	help
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index c335c76..caf14dc 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -968,7 +968,7 @@
 		ai_usermode = safe_usermode(ai_usermode, false);
 	}
 
-	hook_fault_code(1, do_alignment, SIGBUS, BUS_ADRALN,
+	hook_fault_code(FAULT_CODE_ALIGNMENT, do_alignment, SIGBUS, BUS_ADRALN,
 			"alignment exception");
 
 	/*
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 93aac06..ee9bb36 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -22,6 +22,21 @@
 DEFINE_PER_CPU(struct mm_struct *, current_mm);
 #endif
 
+#ifdef CONFIG_ARM_LPAE
+#define cpu_set_asid(asid) {						\
+	unsigned long ttbl, ttbh;					\
+	asm volatile(							\
+	"	mrrc	p15, 0, %0, %1, c2		@ read TTBR0\n"	\
+	"	mov	%1, %2, lsl #(48 - 32)		@ set ASID\n"	\
+	"	mcrr	p15, 0, %0, %1, c2		@ set TTBR0\n"	\
+	: "=&r" (ttbl), "=&r" (ttbh)					\
+	: "r" (asid & ~ASID_MASK));					\
+}
+#else
+#define cpu_set_asid(asid) \
+	asm("	mcr	p15, 0, %0, c13, c0, 1\n" : : "r" (asid))
+#endif
+
 /*
  * We fork()ed a process, and we need a new context for the child
  * to run in.  We reserve version 0 for initial tasks so we will
@@ -37,7 +52,7 @@
 static void flush_context(void)
 {
 	/* set the reserved ASID before flushing the TLB */
-	asm("mcr	p15, 0, %0, c13, c0, 1\n" : : "r" (0));
+	cpu_set_asid(0);
 	isb();
 	local_flush_tlb_all();
 	if (icache_is_vivt_asid_tagged()) {
@@ -99,7 +114,7 @@
 	set_mm_context(mm, asid);
 
 	/* set the new ASID */
-	asm("mcr	p15, 0, %0, c13, c0, 1\n" : : "r" (mm->context.id));
+	cpu_set_asid(mm->context.id);
 	isb();
 }
 
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index aa33949..bb7eac3 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -27,19 +27,6 @@
 
 #include "fault.h"
 
-/*
- * Fault status register encodings.  We steal bit 31 for our own purposes.
- */
-#define FSR_LNX_PF		(1 << 31)
-#define FSR_WRITE		(1 << 11)
-#define FSR_FS4			(1 << 10)
-#define FSR_FS3_0		(15)
-
-static inline int fsr_fs(unsigned int fsr)
-{
-	return (fsr & FSR_FS3_0) | (fsr & FSR_FS4) >> 6;
-}
-
 #ifdef CONFIG_MMU
 
 #ifdef CONFIG_KPROBES
@@ -123,8 +110,10 @@
 
 		pte = pte_offset_map(pmd, addr);
 		printk(", *pte=%08llx", (long long)pte_val(*pte));
+#ifndef CONFIG_ARM_LPAE
 		printk(", *ppte=%08llx",
 		       (long long)pte_val(pte[PTE_HWTABLE_PTRS]));
+#endif
 		pte_unmap(pte);
 	} while(0);
 
@@ -231,7 +220,7 @@
 
 static int __kprobes
 __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
-		struct task_struct *tsk)
+		unsigned int flags, struct task_struct *tsk)
 {
 	struct vm_area_struct *vma;
 	int fault;
@@ -253,18 +242,7 @@
 		goto out;
 	}
 
-	/*
-	 * If for any reason at all we couldn't handle the fault, make
-	 * sure we exit gracefully rather than endlessly redo the fault.
-	 */
-	fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, (fsr & FSR_WRITE) ? FAULT_FLAG_WRITE : 0);
-	if (unlikely(fault & VM_FAULT_ERROR))
-		return fault;
-	if (fault & VM_FAULT_MAJOR)
-		tsk->maj_flt++;
-	else
-		tsk->min_flt++;
-	return fault;
+	return handle_mm_fault(mm, vma, addr & PAGE_MASK, flags);
 
 check_stack:
 	if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr))
@@ -279,6 +257,9 @@
 	struct task_struct *tsk;
 	struct mm_struct *mm;
 	int fault, sig, code;
+	int write = fsr & FSR_WRITE;
+	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
+				(write ? FAULT_FLAG_WRITE : 0);
 
 	if (notify_page_fault(regs, fsr))
 		return 0;
@@ -305,6 +286,7 @@
 	if (!down_read_trylock(&mm->mmap_sem)) {
 		if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc))
 			goto no_context;
+retry:
 		down_read(&mm->mmap_sem);
 	} else {
 		/*
@@ -320,14 +302,41 @@
 #endif
 	}
 
-	fault = __do_page_fault(mm, addr, fsr, tsk);
-	up_read(&mm->mmap_sem);
+	fault = __do_page_fault(mm, addr, fsr, flags, tsk);
+
+	/* If we need to retry but a fatal signal is pending, handle the
+	 * signal first. We do not need to release the mmap_sem because
+	 * it would already be released in __lock_page_or_retry in
+	 * mm/filemap.c. */
+	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+		return 0;
+
+	/*
+	 * Major/minor page fault accounting is only done on the
+	 * initial attempt. If we go through a retry, it is extremely
+	 * likely that the page will be found in page cache at that point.
+	 */
 
 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
-	if (fault & VM_FAULT_MAJOR)
-		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, addr);
-	else if (fault & VM_FAULT_MINOR)
-		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, addr);
+	if (flags & FAULT_FLAG_ALLOW_RETRY) {
+		if (fault & VM_FAULT_MAJOR) {
+			tsk->maj_flt++;
+			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
+					regs, addr);
+		} else {
+			tsk->min_flt++;
+			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
+					regs, addr);
+		}
+		if (fault & VM_FAULT_RETRY) {
+			/* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
+			* of starvation. */
+			flags &= ~FAULT_FLAG_ALLOW_RETRY;
+			goto retry;
+		}
+	}
+
+	up_read(&mm->mmap_sem);
 
 	/*
 	 * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR
@@ -441,6 +450,12 @@
 	pmd = pmd_offset(pud, addr);
 	pmd_k = pmd_offset(pud_k, addr);
 
+#ifdef CONFIG_ARM_LPAE
+	/*
+	 * Only one hardware entry per PMD with LPAE.
+	 */
+	index = 0;
+#else
 	/*
 	 * On ARM one Linux PGD entry contains two hardware entries (see page
 	 * tables layout in pgtable.h). We normally guarantee that we always
@@ -450,6 +465,7 @@
 	 * for the first of pair.
 	 */
 	index = (addr >> SECTION_SHIFT) & 1;
+#endif
 	if (pmd_none(pmd_k[index]))
 		goto bad_area;
 
@@ -489,55 +505,20 @@
 	return 1;
 }
 
-static struct fsr_info {
+struct fsr_info {
 	int	(*fn)(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
 	int	sig;
 	int	code;
 	const char *name;
-} fsr_info[] = {
-	/*
-	 * The following are the standard ARMv3 and ARMv4 aborts.  ARMv5
-	 * defines these to be "precise" aborts.
-	 */
-	{ do_bad,		SIGSEGV, 0,		"vector exception"		   },
-	{ do_bad,		SIGBUS,	 BUS_ADRALN,	"alignment exception"		   },
-	{ do_bad,		SIGKILL, 0,		"terminal exception"		   },
-	{ do_bad,		SIGBUS,	 BUS_ADRALN,	"alignment exception"		   },
-	{ do_bad,		SIGBUS,	 0,		"external abort on linefetch"	   },
-	{ do_translation_fault,	SIGSEGV, SEGV_MAPERR,	"section translation fault"	   },
-	{ do_bad,		SIGBUS,	 0,		"external abort on linefetch"	   },
-	{ do_page_fault,	SIGSEGV, SEGV_MAPERR,	"page translation fault"	   },
-	{ do_bad,		SIGBUS,	 0,		"external abort on non-linefetch"  },
-	{ do_bad,		SIGSEGV, SEGV_ACCERR,	"section domain fault"		   },
-	{ do_bad,		SIGBUS,	 0,		"external abort on non-linefetch"  },
-	{ do_bad,		SIGSEGV, SEGV_ACCERR,	"page domain fault"		   },
-	{ do_bad,		SIGBUS,	 0,		"external abort on translation"	   },
-	{ do_sect_fault,	SIGSEGV, SEGV_ACCERR,	"section permission fault"	   },
-	{ do_bad,		SIGBUS,	 0,		"external abort on translation"	   },
-	{ do_page_fault,	SIGSEGV, SEGV_ACCERR,	"page permission fault"		   },
-	/*
-	 * The following are "imprecise" aborts, which are signalled by bit
-	 * 10 of the FSR, and may not be recoverable.  These are only
-	 * supported if the CPU abort handler supports bit 10.
-	 */
-	{ do_bad,		SIGBUS,  0,		"unknown 16"			   },
-	{ do_bad,		SIGBUS,  0,		"unknown 17"			   },
-	{ do_bad,		SIGBUS,  0,		"unknown 18"			   },
-	{ do_bad,		SIGBUS,  0,		"unknown 19"			   },
-	{ do_bad,		SIGBUS,  0,		"lock abort"			   }, /* xscale */
-	{ do_bad,		SIGBUS,  0,		"unknown 21"			   },
-	{ do_bad,		SIGBUS,  BUS_OBJERR,	"imprecise external abort"	   }, /* xscale */
-	{ do_bad,		SIGBUS,  0,		"unknown 23"			   },
-	{ do_bad,		SIGBUS,  0,		"dcache parity error"		   }, /* xscale */
-	{ do_bad,		SIGBUS,  0,		"unknown 25"			   },
-	{ do_bad,		SIGBUS,  0,		"unknown 26"			   },
-	{ do_bad,		SIGBUS,  0,		"unknown 27"			   },
-	{ do_bad,		SIGBUS,  0,		"unknown 28"			   },
-	{ do_bad,		SIGBUS,  0,		"unknown 29"			   },
-	{ do_bad,		SIGBUS,  0,		"unknown 30"			   },
-	{ do_bad,		SIGBUS,  0,		"unknown 31"			   }
 };
 
+/* FSR definition */
+#ifdef CONFIG_ARM_LPAE
+#include "fsr-3level.c"
+#else
+#include "fsr-2level.c"
+#endif
+
 void __init
 hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *),
 		int sig, int code, const char *name)
@@ -573,42 +554,6 @@
 	arm_notify_die("", regs, &info, fsr, 0);
 }
 
-
-static struct fsr_info ifsr_info[] = {
-	{ do_bad,		SIGBUS,  0,		"unknown 0"			   },
-	{ do_bad,		SIGBUS,  0,		"unknown 1"			   },
-	{ do_bad,		SIGBUS,  0,		"debug event"			   },
-	{ do_bad,		SIGSEGV, SEGV_ACCERR,	"section access flag fault"	   },
-	{ do_bad,		SIGBUS,  0,		"unknown 4"			   },
-	{ do_translation_fault,	SIGSEGV, SEGV_MAPERR,	"section translation fault"	   },
-	{ do_bad,		SIGSEGV, SEGV_ACCERR,	"page access flag fault"	   },
-	{ do_page_fault,	SIGSEGV, SEGV_MAPERR,	"page translation fault"	   },
-	{ do_bad,		SIGBUS,	 0,		"external abort on non-linefetch"  },
-	{ do_bad,		SIGSEGV, SEGV_ACCERR,	"section domain fault"		   },
-	{ do_bad,		SIGBUS,  0,		"unknown 10"			   },
-	{ do_bad,		SIGSEGV, SEGV_ACCERR,	"page domain fault"		   },
-	{ do_bad,		SIGBUS,	 0,		"external abort on translation"	   },
-	{ do_sect_fault,	SIGSEGV, SEGV_ACCERR,	"section permission fault"	   },
-	{ do_bad,		SIGBUS,	 0,		"external abort on translation"	   },
-	{ do_page_fault,	SIGSEGV, SEGV_ACCERR,	"page permission fault"		   },
-	{ do_bad,		SIGBUS,  0,		"unknown 16"			   },
-	{ do_bad,		SIGBUS,  0,		"unknown 17"			   },
-	{ do_bad,		SIGBUS,  0,		"unknown 18"			   },
-	{ do_bad,		SIGBUS,  0,		"unknown 19"			   },
-	{ do_bad,		SIGBUS,  0,		"unknown 20"			   },
-	{ do_bad,		SIGBUS,  0,		"unknown 21"			   },
-	{ do_bad,		SIGBUS,  0,		"unknown 22"			   },
-	{ do_bad,		SIGBUS,  0,		"unknown 23"			   },
-	{ do_bad,		SIGBUS,  0,		"unknown 24"			   },
-	{ do_bad,		SIGBUS,  0,		"unknown 25"			   },
-	{ do_bad,		SIGBUS,  0,		"unknown 26"			   },
-	{ do_bad,		SIGBUS,  0,		"unknown 27"			   },
-	{ do_bad,		SIGBUS,  0,		"unknown 28"			   },
-	{ do_bad,		SIGBUS,  0,		"unknown 29"			   },
-	{ do_bad,		SIGBUS,  0,		"unknown 30"			   },
-	{ do_bad,		SIGBUS,  0,		"unknown 31"			   },
-};
-
 void __init
 hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *),
 		 int sig, int code, const char *name)
@@ -641,6 +586,7 @@
 	arm_notify_die("", regs, &info, ifsr, 0);
 }
 
+#ifndef CONFIG_ARM_LPAE
 static int __init exceptions_init(void)
 {
 	if (cpu_architecture() >= CPU_ARCH_ARMv6) {
@@ -663,3 +609,4 @@
 }
 
 arch_initcall(exceptions_init);
+#endif
diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
index 49e9e38..cf08bdf 100644
--- a/arch/arm/mm/fault.h
+++ b/arch/arm/mm/fault.h
@@ -1,3 +1,28 @@
-void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
+#ifndef __ARCH_ARM_FAULT_H
+#define __ARCH_ARM_FAULT_H
 
+/*
+ * Fault status register encodings.  We steal bit 31 for our own purposes.
+ */
+#define FSR_LNX_PF		(1 << 31)
+#define FSR_WRITE		(1 << 11)
+#define FSR_FS4			(1 << 10)
+#define FSR_FS3_0		(15)
+#define FSR_FS5_0		(0x3f)
+
+#ifdef CONFIG_ARM_LPAE
+static inline int fsr_fs(unsigned int fsr)
+{
+	return fsr & FSR_FS5_0;
+}
+#else
+static inline int fsr_fs(unsigned int fsr)
+{
+	return (fsr & FSR_FS3_0) | (fsr & FSR_FS4) >> 6;
+}
+#endif
+
+void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
 unsigned long search_exception_table(unsigned long addr);
+
+#endif	/* __ARCH_ARM_FAULT_H */
diff --git a/arch/arm/mm/fsr-2level.c b/arch/arm/mm/fsr-2level.c
new file mode 100644
index 0000000..18ca74c
--- /dev/null
+++ b/arch/arm/mm/fsr-2level.c
@@ -0,0 +1,78 @@
+static struct fsr_info fsr_info[] = {
+	/*
+	 * The following are the standard ARMv3 and ARMv4 aborts.  ARMv5
+	 * defines these to be "precise" aborts.
+	 */
+	{ do_bad,		SIGSEGV, 0,		"vector exception"		   },
+	{ do_bad,		SIGBUS,	 BUS_ADRALN,	"alignment exception"		   },
+	{ do_bad,		SIGKILL, 0,		"terminal exception"		   },
+	{ do_bad,		SIGBUS,	 BUS_ADRALN,	"alignment exception"		   },
+	{ do_bad,		SIGBUS,	 0,		"external abort on linefetch"	   },
+	{ do_translation_fault,	SIGSEGV, SEGV_MAPERR,	"section translation fault"	   },
+	{ do_bad,		SIGBUS,	 0,		"external abort on linefetch"	   },
+	{ do_page_fault,	SIGSEGV, SEGV_MAPERR,	"page translation fault"	   },
+	{ do_bad,		SIGBUS,	 0,		"external abort on non-linefetch"  },
+	{ do_bad,		SIGSEGV, SEGV_ACCERR,	"section domain fault"		   },
+	{ do_bad,		SIGBUS,	 0,		"external abort on non-linefetch"  },
+	{ do_bad,		SIGSEGV, SEGV_ACCERR,	"page domain fault"		   },
+	{ do_bad,		SIGBUS,	 0,		"external abort on translation"	   },
+	{ do_sect_fault,	SIGSEGV, SEGV_ACCERR,	"section permission fault"	   },
+	{ do_bad,		SIGBUS,	 0,		"external abort on translation"	   },
+	{ do_page_fault,	SIGSEGV, SEGV_ACCERR,	"page permission fault"		   },
+	/*
+	 * The following are "imprecise" aborts, which are signalled by bit
+	 * 10 of the FSR, and may not be recoverable.  These are only
+	 * supported if the CPU abort handler supports bit 10.
+	 */
+	{ do_bad,		SIGBUS,  0,		"unknown 16"			   },
+	{ do_bad,		SIGBUS,  0,		"unknown 17"			   },
+	{ do_bad,		SIGBUS,  0,		"unknown 18"			   },
+	{ do_bad,		SIGBUS,  0,		"unknown 19"			   },
+	{ do_bad,		SIGBUS,  0,		"lock abort"			   }, /* xscale */
+	{ do_bad,		SIGBUS,  0,		"unknown 21"			   },
+	{ do_bad,		SIGBUS,  BUS_OBJERR,	"imprecise external abort"	   }, /* xscale */
+	{ do_bad,		SIGBUS,  0,		"unknown 23"			   },
+	{ do_bad,		SIGBUS,  0,		"dcache parity error"		   }, /* xscale */
+	{ do_bad,		SIGBUS,  0,		"unknown 25"			   },
+	{ do_bad,		SIGBUS,  0,		"unknown 26"			   },
+	{ do_bad,		SIGBUS,  0,		"unknown 27"			   },
+	{ do_bad,		SIGBUS,  0,		"unknown 28"			   },
+	{ do_bad,		SIGBUS,  0,		"unknown 29"			   },
+	{ do_bad,		SIGBUS,  0,		"unknown 30"			   },
+	{ do_bad,		SIGBUS,  0,		"unknown 31"			   },
+};
+
+static struct fsr_info ifsr_info[] = {
+	{ do_bad,		SIGBUS,  0,		"unknown 0"			   },
+	{ do_bad,		SIGBUS,  0,		"unknown 1"			   },
+	{ do_bad,		SIGBUS,  0,		"debug event"			   },
+	{ do_bad,		SIGSEGV, SEGV_ACCERR,	"section access flag fault"	   },
+	{ do_bad,		SIGBUS,  0,		"unknown 4"			   },
+	{ do_translation_fault,	SIGSEGV, SEGV_MAPERR,	"section translation fault"	   },
+	{ do_bad,		SIGSEGV, SEGV_ACCERR,	"page access flag fault"	   },
+	{ do_page_fault,	SIGSEGV, SEGV_MAPERR,	"page translation fault"	   },
+	{ do_bad,		SIGBUS,	 0,		"external abort on non-linefetch"  },
+	{ do_bad,		SIGSEGV, SEGV_ACCERR,	"section domain fault"		   },
+	{ do_bad,		SIGBUS,  0,		"unknown 10"			   },
+	{ do_bad,		SIGSEGV, SEGV_ACCERR,	"page domain fault"		   },
+	{ do_bad,		SIGBUS,	 0,		"external abort on translation"	   },
+	{ do_sect_fault,	SIGSEGV, SEGV_ACCERR,	"section permission fault"	   },
+	{ do_bad,		SIGBUS,	 0,		"external abort on translation"	   },
+	{ do_page_fault,	SIGSEGV, SEGV_ACCERR,	"page permission fault"		   },
+	{ do_bad,		SIGBUS,  0,		"unknown 16"			   },
+	{ do_bad,		SIGBUS,  0,		"unknown 17"			   },
+	{ do_bad,		SIGBUS,  0,		"unknown 18"			   },
+	{ do_bad,		SIGBUS,  0,		"unknown 19"			   },
+	{ do_bad,		SIGBUS,  0,		"unknown 20"			   },
+	{ do_bad,		SIGBUS,  0,		"unknown 21"			   },
+	{ do_bad,		SIGBUS,  0,		"unknown 22"			   },
+	{ do_bad,		SIGBUS,  0,		"unknown 23"			   },
+	{ do_bad,		SIGBUS,  0,		"unknown 24"			   },
+	{ do_bad,		SIGBUS,  0,		"unknown 25"			   },
+	{ do_bad,		SIGBUS,  0,		"unknown 26"			   },
+	{ do_bad,		SIGBUS,  0,		"unknown 27"			   },
+	{ do_bad,		SIGBUS,  0,		"unknown 28"			   },
+	{ do_bad,		SIGBUS,  0,		"unknown 29"			   },
+	{ do_bad,		SIGBUS,  0,		"unknown 30"			   },
+	{ do_bad,		SIGBUS,  0,		"unknown 31"			   },
+};
diff --git a/arch/arm/mm/fsr-3level.c b/arch/arm/mm/fsr-3level.c
new file mode 100644
index 0000000..05a4e94
--- /dev/null
+++ b/arch/arm/mm/fsr-3level.c
@@ -0,0 +1,68 @@
+static struct fsr_info fsr_info[] = {
+	{ do_bad,		SIGBUS,  0,		"unknown 0"			},
+	{ do_bad,		SIGBUS,  0,		"unknown 1"			},
+	{ do_bad,		SIGBUS,  0,		"unknown 2"			},
+	{ do_bad,		SIGBUS,  0,		"unknown 3"			},
+	{ do_bad,		SIGBUS,  0,		"reserved translation fault"	},
+	{ do_translation_fault,	SIGSEGV, SEGV_MAPERR,	"level 1 translation fault"	},
+	{ do_translation_fault,	SIGSEGV, SEGV_MAPERR,	"level 2 translation fault"	},
+	{ do_page_fault,	SIGSEGV, SEGV_MAPERR,	"level 3 translation fault"	},
+	{ do_bad,		SIGBUS,  0,		"reserved access flag fault"	},
+	{ do_bad,		SIGSEGV, SEGV_ACCERR,	"level 1 access flag fault"	},
+	{ do_bad,		SIGSEGV, SEGV_ACCERR,	"level 2 access flag fault"	},
+	{ do_page_fault,	SIGSEGV, SEGV_ACCERR,	"level 3 access flag fault"	},
+	{ do_bad,		SIGBUS,  0,		"reserved permission fault"	},
+	{ do_bad,		SIGSEGV, SEGV_ACCERR,	"level 1 permission fault"	},
+	{ do_sect_fault,	SIGSEGV, SEGV_ACCERR,	"level 2 permission fault"	},
+	{ do_page_fault,	SIGSEGV, SEGV_ACCERR,	"level 3 permission fault"	},
+	{ do_bad,		SIGBUS,  0,		"synchronous external abort"	},
+	{ do_bad,		SIGBUS,  0,		"asynchronous external abort"	},
+	{ do_bad,		SIGBUS,  0,		"unknown 18"			},
+	{ do_bad,		SIGBUS,  0,		"unknown 19"			},
+	{ do_bad,		SIGBUS,  0,		"synchronous abort (translation table walk)" },
+	{ do_bad,		SIGBUS,  0,		"synchronous abort (translation table walk)" },
+	{ do_bad,		SIGBUS,  0,		"synchronous abort (translation table walk)" },
+	{ do_bad,		SIGBUS,  0,		"synchronous abort (translation table walk)" },
+	{ do_bad,		SIGBUS,  0,		"synchronous parity error"	},
+	{ do_bad,		SIGBUS,  0,		"asynchronous parity error"	},
+	{ do_bad,		SIGBUS,  0,		"unknown 26"			},
+	{ do_bad,		SIGBUS,  0,		"unknown 27"			},
+	{ do_bad,		SIGBUS,  0,		"synchronous parity error (translation table walk" },
+	{ do_bad,		SIGBUS,  0,		"synchronous parity error (translation table walk" },
+	{ do_bad,		SIGBUS,  0,		"synchronous parity error (translation table walk" },
+	{ do_bad,		SIGBUS,  0,		"synchronous parity error (translation table walk" },
+	{ do_bad,		SIGBUS,  0,		"unknown 32"			},
+	{ do_bad,		SIGBUS,  BUS_ADRALN,	"alignment fault"		},
+	{ do_bad,		SIGBUS,  0,		"debug event"			},
+	{ do_bad,		SIGBUS,  0,		"unknown 35"			},
+	{ do_bad,		SIGBUS,  0,		"unknown 36"			},
+	{ do_bad,		SIGBUS,  0,		"unknown 37"			},
+	{ do_bad,		SIGBUS,  0,		"unknown 38"			},
+	{ do_bad,		SIGBUS,  0,		"unknown 39"			},
+	{ do_bad,		SIGBUS,  0,		"unknown 40"			},
+	{ do_bad,		SIGBUS,  0,		"unknown 41"			},
+	{ do_bad,		SIGBUS,  0,		"unknown 42"			},
+	{ do_bad,		SIGBUS,  0,		"unknown 43"			},
+	{ do_bad,		SIGBUS,  0,		"unknown 44"			},
+	{ do_bad,		SIGBUS,  0,		"unknown 45"			},
+	{ do_bad,		SIGBUS,  0,		"unknown 46"			},
+	{ do_bad,		SIGBUS,  0,		"unknown 47"			},
+	{ do_bad,		SIGBUS,  0,		"unknown 48"			},
+	{ do_bad,		SIGBUS,  0,		"unknown 49"			},
+	{ do_bad,		SIGBUS,  0,		"unknown 50"			},
+	{ do_bad,		SIGBUS,  0,		"unknown 51"			},
+	{ do_bad,		SIGBUS,  0,		"implementation fault (lockdown abort)" },
+	{ do_bad,		SIGBUS,  0,		"unknown 53"			},
+	{ do_bad,		SIGBUS,  0,		"unknown 54"			},
+	{ do_bad,		SIGBUS,  0,		"unknown 55"			},
+	{ do_bad,		SIGBUS,  0,		"unknown 56"			},
+	{ do_bad,		SIGBUS,  0,		"unknown 57"			},
+	{ do_bad,		SIGBUS,  0,		"implementation fault (coprocessor abort)" },
+	{ do_bad,		SIGBUS,  0,		"unknown 59"			},
+	{ do_bad,		SIGBUS,  0,		"unknown 60"			},
+	{ do_bad,		SIGBUS,  0,		"unknown 61"			},
+	{ do_bad,		SIGBUS,  0,		"unknown 62"			},
+	{ do_bad,		SIGBUS,  0,		"unknown 63"			},
+};
+
+#define ifsr_info	fsr_info
diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c
index 2be9139..feacf4c 100644
--- a/arch/arm/mm/idmap.c
+++ b/arch/arm/mm/idmap.c
@@ -1,9 +1,38 @@
 #include <linux/kernel.h>
 
 #include <asm/cputype.h>
+#include <asm/idmap.h>
 #include <asm/pgalloc.h>
 #include <asm/pgtable.h>
+#include <asm/sections.h>
 
+pgd_t *idmap_pgd;
+
+#ifdef CONFIG_ARM_LPAE
+static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
+	unsigned long prot)
+{
+	pmd_t *pmd;
+	unsigned long next;
+
+	if (pud_none_or_clear_bad(pud) || (pud_val(*pud) & L_PGD_SWAPPER)) {
+		pmd = pmd_alloc_one(&init_mm, addr);
+		if (!pmd) {
+			pr_warning("Failed to allocate identity pmd.\n");
+			return;
+		}
+		pud_populate(&init_mm, pud, pmd);
+		pmd += pmd_index(addr);
+	} else
+		pmd = pmd_offset(pud, addr);
+
+	do {
+		next = pmd_addr_end(addr, end);
+		*pmd = __pmd((addr & PMD_MASK) | prot);
+		flush_pmd_entry(pmd);
+	} while (pmd++, addr = next, addr != end);
+}
+#else	/* !CONFIG_ARM_LPAE */
 static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
 	unsigned long prot)
 {
@@ -15,6 +44,7 @@
 	pmd[1] = __pmd(addr);
 	flush_pmd_entry(pmd);
 }
+#endif	/* CONFIG_ARM_LPAE */
 
 static void idmap_add_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
 	unsigned long prot)
@@ -28,11 +58,11 @@
 	} while (pud++, addr = next, addr != end);
 }
 
-void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end)
+static void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end)
 {
 	unsigned long prot, next;
 
-	prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE;
+	prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AF;
 	if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
 		prot |= PMD_BIT4;
 
@@ -43,48 +73,41 @@
 	} while (pgd++, addr = next, addr != end);
 }
 
-#ifdef CONFIG_SMP
-static void idmap_del_pmd(pud_t *pud, unsigned long addr, unsigned long end)
+extern char  __idmap_text_start[], __idmap_text_end[];
+
+static int __init init_static_idmap(void)
 {
-	pmd_t *pmd = pmd_offset(pud, addr);
-	pmd_clear(pmd);
+	phys_addr_t idmap_start, idmap_end;
+
+	idmap_pgd = pgd_alloc(&init_mm);
+	if (!idmap_pgd)
+		return -ENOMEM;
+
+	/* Add an identity mapping for the physical address of the section. */
+	idmap_start = virt_to_phys((void *)__idmap_text_start);
+	idmap_end = virt_to_phys((void *)__idmap_text_end);
+
+	pr_info("Setting up static identity map for 0x%llx - 0x%llx\n",
+		(long long)idmap_start, (long long)idmap_end);
+	identity_mapping_add(idmap_pgd, idmap_start, idmap_end);
+
+	return 0;
 }
-
-static void idmap_del_pud(pgd_t *pgd, unsigned long addr, unsigned long end)
-{
-	pud_t *pud = pud_offset(pgd, addr);
-	unsigned long next;
-
-	do {
-		next = pud_addr_end(addr, end);
-		idmap_del_pmd(pud, addr, next);
-	} while (pud++, addr = next, addr != end);
-}
-
-void identity_mapping_del(pgd_t *pgd, unsigned long addr, unsigned long end)
-{
-	unsigned long next;
-
-	pgd += pgd_index(addr);
-	do {
-		next = pgd_addr_end(addr, end);
-		idmap_del_pud(pgd, addr, next);
-	} while (pgd++, addr = next, addr != end);
-}
-#endif
+early_initcall(init_static_idmap);
 
 /*
- * In order to soft-boot, we need to insert a 1:1 mapping in place of
- * the user-mode pages.  This will then ensure that we have predictable
- * results when turning the mmu off
+ * In order to soft-boot, we need to switch to a 1:1 mapping for the
+ * cpu_reset functions. This will then ensure that we have predictable
+ * results when turning off the mmu.
  */
-void setup_mm_for_reboot(char mode)
+void setup_mm_for_reboot(void)
 {
-	/*
-	 * We need to access to user-mode page tables here. For kernel threads
-	 * we don't have any user-mode mappings so we use the context that we
-	 * "borrowed".
-	 */
-	identity_mapping_add(current->active_mm->pgd, 0, TASK_SIZE);
+	/* Clean and invalidate L1. */
+	flush_cache_all();
+
+	/* Switch to the identity mapping. */
+	cpu_switch_mm(idmap_pgd, &init_mm);
+
+	/* Flush the TLB. */
 	local_flush_tlb_all();
 }
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index fbdd12e..e34ea8a 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -20,7 +20,6 @@
 #include <linux/highmem.h>
 #include <linux/gfp.h>
 #include <linux/memblock.h>
-#include <linux/sort.h>
 
 #include <asm/mach-types.h>
 #include <asm/prom.h>
@@ -32,6 +31,7 @@
 
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
+#include <asm/memblock.h>
 
 #include "mm.h"
 
@@ -134,30 +134,18 @@
 }
 
 static void __init find_limits(unsigned long *min, unsigned long *max_low,
-	unsigned long *max_high)
+			       unsigned long *max_high)
 {
 	struct meminfo *mi = &meminfo;
 	int i;
 
-	*min = -1UL;
-	*max_low = *max_high = 0;
-
-	for_each_bank (i, mi) {
-		struct membank *bank = &mi->bank[i];
-		unsigned long start, end;
-
-		start = bank_pfn_start(bank);
-		end = bank_pfn_end(bank);
-
-		if (*min > start)
-			*min = start;
-		if (*max_high < end)
-			*max_high = end;
-		if (bank->highmem)
-			continue;
-		if (*max_low < end)
-			*max_low = end;
-	}
+	/* This assumes the meminfo array is properly sorted */
+	*min = bank_pfn_start(&mi->bank[0]);
+	for_each_bank (i, mi)
+		if (mi->bank[i].highmem)
+				break;
+	*max_low = bank_pfn_end(&mi->bank[i - 1]);
+	*max_high = bank_pfn_end(&mi->bank[mi->nr_banks - 1]);
 }
 
 static void __init arm_bootmem_init(unsigned long start_pfn,
@@ -319,20 +307,10 @@
 }
 #endif
 
-static int __init meminfo_cmp(const void *_a, const void *_b)
-{
-	const struct membank *a = _a, *b = _b;
-	long cmp = bank_pfn_start(a) - bank_pfn_start(b);
-	return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
-}
-
 void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
 {
 	int i;
 
-	sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
-
-	memblock_init();
 	for (i = 0; i < mi->nr_banks; i++)
 		memblock_add(mi->bank[i].start, mi->bank[i].size);
 
@@ -371,7 +349,7 @@
 	if (mdesc->reserve)
 		mdesc->reserve();
 
-	memblock_analyze();
+	memblock_allow_resize();
 	memblock_dump_all();
 }
 
@@ -403,8 +381,6 @@
 	 */
 	arm_bootmem_free(min, max_low, max_high);
 
-	high_memory = __va(((phys_addr_t)max_low << PAGE_SHIFT) - 1) + 1;
-
 	/*
 	 * This doesn't seem to be used by the Linux memory manager any
 	 * more, but is used by ll_rw_block.  If we can get rid of it, we
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index bdb248c..80632e8 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -36,12 +36,6 @@
 #include <asm/mach/map.h>
 #include "mm.h"
 
-/*
- * Used by ioremap() and iounmap() code to mark (super)section-mapped
- * I/O regions in vm_struct->flags field.
- */
-#define VM_ARM_SECTION_MAPPING	0x80000000
-
 int ioremap_page(unsigned long virt, unsigned long phys,
 		 const struct mem_type *mtype)
 {
@@ -64,7 +58,7 @@
 	} while (seq != init_mm.context.kvm_seq);
 }
 
-#ifndef CONFIG_SMP
+#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
 /*
  * Section support is unsafe on SMP - If you iounmap and ioremap a region,
  * the other CPUs will not see this change until their next context switch.
@@ -79,13 +73,16 @@
 {
 	unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
 	pgd_t *pgd;
+	pud_t *pud;
+	pmd_t *pmdp;
 
 	flush_cache_vunmap(addr, end);
 	pgd = pgd_offset_k(addr);
+	pud = pud_offset(pgd, addr);
+	pmdp = pmd_offset(pud, addr);
 	do {
-		pmd_t pmd, *pmdp = pmd_offset(pgd, addr);
+		pmd_t pmd = *pmdp;
 
-		pmd = *pmdp;
 		if (!pmd_none(pmd)) {
 			/*
 			 * Clear the PMD from the page table, and
@@ -104,8 +101,8 @@
 				pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
 		}
 
-		addr += PGDIR_SIZE;
-		pgd++;
+		addr += PMD_SIZE;
+		pmdp += 2;
 	} while (addr < end);
 
 	/*
@@ -124,6 +121,8 @@
 {
 	unsigned long addr = virt, end = virt + size;
 	pgd_t *pgd;
+	pud_t *pud;
+	pmd_t *pmd;
 
 	/*
 	 * Remove and free any PTE-based mapping, and
@@ -132,17 +131,17 @@
 	unmap_area_sections(virt, size);
 
 	pgd = pgd_offset_k(addr);
+	pud = pud_offset(pgd, addr);
+	pmd = pmd_offset(pud, addr);
 	do {
-		pmd_t *pmd = pmd_offset(pgd, addr);
-
 		pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
 		pfn += SZ_1M >> PAGE_SHIFT;
 		pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
 		pfn += SZ_1M >> PAGE_SHIFT;
 		flush_pmd_entry(pmd);
 
-		addr += PGDIR_SIZE;
-		pgd++;
+		addr += PMD_SIZE;
+		pmd += 2;
 	} while (addr < end);
 
 	return 0;
@@ -154,6 +153,8 @@
 {
 	unsigned long addr = virt, end = virt + size;
 	pgd_t *pgd;
+	pud_t *pud;
+	pmd_t *pmd;
 
 	/*
 	 * Remove and free any PTE-based mapping, and
@@ -162,6 +163,8 @@
 	unmap_area_sections(virt, size);
 
 	pgd = pgd_offset_k(virt);
+	pud = pud_offset(pgd, addr);
+	pmd = pmd_offset(pud, addr);
 	do {
 		unsigned long super_pmd_val, i;
 
@@ -170,14 +173,12 @@
 		super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;
 
 		for (i = 0; i < 8; i++) {
-			pmd_t *pmd = pmd_offset(pgd, addr);
-
 			pmd[0] = __pmd(super_pmd_val);
 			pmd[1] = __pmd(super_pmd_val);
 			flush_pmd_entry(pmd);
 
-			addr += PGDIR_SIZE;
-			pgd++;
+			addr += PMD_SIZE;
+			pmd += 2;
 		}
 
 		pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
@@ -195,17 +196,13 @@
 	unsigned long addr;
  	struct vm_struct * area;
 
+#ifndef CONFIG_ARM_LPAE
 	/*
 	 * High mappings must be supersection aligned
 	 */
 	if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
 		return NULL;
-
-	/*
-	 * Don't allow RAM to be mapped - this causes problems with ARMv6+
-	 */
-	if (WARN_ON(pfn_valid(pfn)))
-		return NULL;
+#endif
 
 	type = get_mem_type(mtype);
 	if (!type)
@@ -216,12 +213,40 @@
 	 */
 	size = PAGE_ALIGN(offset + size);
 
+	/*
+	 * Try to reuse one of the static mapping whenever possible.
+	 */
+	read_lock(&vmlist_lock);
+	for (area = vmlist; area; area = area->next) {
+		if (!size || (sizeof(phys_addr_t) == 4 && pfn >= 0x100000))
+			break;
+		if (!(area->flags & VM_ARM_STATIC_MAPPING))
+			continue;
+		if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
+			continue;
+		if (__phys_to_pfn(area->phys_addr) > pfn ||
+		    __pfn_to_phys(pfn) + size-1 > area->phys_addr + area->size-1)
+			continue;
+		/* we can drop the lock here as we know *area is static */
+		read_unlock(&vmlist_lock);
+		addr = (unsigned long)area->addr;
+		addr += __pfn_to_phys(pfn) - area->phys_addr;
+		return (void __iomem *) (offset + addr);
+	}
+	read_unlock(&vmlist_lock);
+
+	/*
+	 * Don't allow RAM to be mapped - this causes problems with ARMv6+
+	 */
+	if (WARN_ON(pfn_valid(pfn)))
+		return NULL;
+
 	area = get_vm_area_caller(size, VM_IOREMAP, caller);
  	if (!area)
  		return NULL;
  	addr = (unsigned long)area->addr;
 
-#ifndef CONFIG_SMP
+#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
 	if (DOMAIN_IO == 0 &&
 	    (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
 	       cpu_is_xsc3()) && pfn >= 0x100000 &&
@@ -313,28 +338,34 @@
 void __iounmap(volatile void __iomem *io_addr)
 {
 	void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
-#ifndef CONFIG_SMP
-	struct vm_struct **p, *tmp;
+	struct vm_struct *vm;
 
-	/*
-	 * If this is a section based mapping we need to handle it
-	 * specially as the VM subsystem does not know how to handle
-	 * such a beast. We need the lock here b/c we need to clear
-	 * all the mappings before the area can be reclaimed
-	 * by someone else.
-	 */
-	write_lock(&vmlist_lock);
-	for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
-		if ((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) {
-			if (tmp->flags & VM_ARM_SECTION_MAPPING) {
-				unmap_area_sections((unsigned long)tmp->addr,
-						    tmp->size);
-			}
+	read_lock(&vmlist_lock);
+	for (vm = vmlist; vm; vm = vm->next) {
+		if (vm->addr > addr)
+			break;
+		if (!(vm->flags & VM_IOREMAP))
+			continue;
+		/* If this is a static mapping we must leave it alone */
+		if ((vm->flags & VM_ARM_STATIC_MAPPING) &&
+		    (vm->addr <= addr) && (vm->addr + vm->size > addr)) {
+			read_unlock(&vmlist_lock);
+			return;
+		}
+#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
+		/*
+		 * If this is a section based mapping we need to handle it
+		 * specially as the VM subsystem does not know how to handle
+		 * such a beast.
+		 */
+		if ((vm->addr == addr) &&
+		    (vm->flags & VM_ARM_SECTION_MAPPING)) {
+			unmap_area_sections((unsigned long)vm->addr, vm->size);
 			break;
 		}
-	}
-	write_unlock(&vmlist_lock);
 #endif
+	}
+	read_unlock(&vmlist_lock);
 
 	vunmap(addr);
 }
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index ad7cce3..70f6d3ea 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -21,6 +21,20 @@
 
 extern void __flush_dcache_page(struct address_space *mapping, struct page *page);
 
+/*
+ * ARM specific vm_struct->flags bits.
+ */
+
+/* (super)section-mapped I/O regions used by ioremap()/iounmap() */
+#define VM_ARM_SECTION_MAPPING	0x80000000
+
+/* permanent static mappings from iotable_init() */
+#define VM_ARM_STATIC_MAPPING	0x40000000
+
+/* mapping type (attributes) for permanent static mappings */
+#define VM_ARM_MTYPE(mt)		((mt) << 20)
+#define VM_ARM_MTYPE_MASK	(0x1f << 20)
+
 #endif
 
 #ifdef CONFIG_ZONE_DMA
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index 44b628e..ce8cb19 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -11,10 +11,49 @@
 #include <linux/random.h>
 #include <asm/cachetype.h>
 
+static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr,
+					      unsigned long pgoff)
+{
+	unsigned long base = addr & ~(SHMLBA-1);
+	unsigned long off = (pgoff << PAGE_SHIFT) & (SHMLBA-1);
+
+	if (base + off <= addr)
+		return base + off;
+
+	return base - off;
+}
+
 #define COLOUR_ALIGN(addr,pgoff)		\
 	((((addr)+SHMLBA-1)&~(SHMLBA-1)) +	\
 	 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
 
+/* gap between mmap and stack */
+#define MIN_GAP (128*1024*1024UL)
+#define MAX_GAP ((TASK_SIZE)/6*5)
+
+static int mmap_is_legacy(void)
+{
+	if (current->personality & ADDR_COMPAT_LAYOUT)
+		return 1;
+
+	if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
+		return 1;
+
+	return sysctl_legacy_va_layout;
+}
+
+static unsigned long mmap_base(unsigned long rnd)
+{
+	unsigned long gap = rlimit(RLIMIT_STACK);
+
+	if (gap < MIN_GAP)
+		gap = MIN_GAP;
+	else if (gap > MAX_GAP)
+		gap = MAX_GAP;
+
+	return PAGE_ALIGN(TASK_SIZE - gap - rnd);
+}
+
 /*
  * We need to ensure that shared mappings are correctly aligned to
  * avoid aliasing issues with VIPT caches.  We need to ensure that
@@ -68,13 +107,9 @@
 	if (len > mm->cached_hole_size) {
 	        start_addr = addr = mm->free_area_cache;
 	} else {
-	        start_addr = addr = TASK_UNMAPPED_BASE;
+	        start_addr = addr = mm->mmap_base;
 	        mm->cached_hole_size = 0;
 	}
-	/* 8 bits of randomness in 20 address space bits */
-	if ((current->flags & PF_RANDOMIZE) &&
-	    !(current->personality & ADDR_NO_RANDOMIZE))
-		addr += (get_random_int() % (1 << 8)) << PAGE_SHIFT;
 
 full_search:
 	if (do_align)
@@ -111,6 +146,134 @@
 	}
 }
 
+unsigned long
+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+			const unsigned long len, const unsigned long pgoff,
+			const unsigned long flags)
+{
+	struct vm_area_struct *vma;
+	struct mm_struct *mm = current->mm;
+	unsigned long addr = addr0;
+	int do_align = 0;
+	int aliasing = cache_is_vipt_aliasing();
+
+	/*
+	 * We only need to do colour alignment if either the I or D
+	 * caches alias.
+	 */
+	if (aliasing)
+		do_align = filp || (flags & MAP_SHARED);
+
+	/* requested length too big for entire address space */
+	if (len > TASK_SIZE)
+		return -ENOMEM;
+
+	if (flags & MAP_FIXED) {
+		if (aliasing && flags & MAP_SHARED &&
+		    (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
+			return -EINVAL;
+		return addr;
+	}
+
+	/* requesting a specific address */
+	if (addr) {
+		if (do_align)
+			addr = COLOUR_ALIGN(addr, pgoff);
+		else
+			addr = PAGE_ALIGN(addr);
+		vma = find_vma(mm, addr);
+		if (TASK_SIZE - len >= addr &&
+				(!vma || addr + len <= vma->vm_start))
+			return addr;
+	}
+
+	/* check if free_area_cache is useful for us */
+	if (len <= mm->cached_hole_size) {
+		mm->cached_hole_size = 0;
+		mm->free_area_cache = mm->mmap_base;
+	}
+
+	/* either no address requested or can't fit in requested address hole */
+	addr = mm->free_area_cache;
+	if (do_align) {
+		unsigned long base = COLOUR_ALIGN_DOWN(addr - len, pgoff);
+		addr = base + len;
+	}
+
+	/* make sure it can fit in the remaining address space */
+	if (addr > len) {
+		vma = find_vma(mm, addr-len);
+		if (!vma || addr <= vma->vm_start)
+			/* remember the address as a hint for next time */
+			return (mm->free_area_cache = addr-len);
+	}
+
+	if (mm->mmap_base < len)
+		goto bottomup;
+
+	addr = mm->mmap_base - len;
+	if (do_align)
+		addr = COLOUR_ALIGN_DOWN(addr, pgoff);
+
+	do {
+		/*
+		 * Lookup failure means no vma is above this address,
+		 * else if new region fits below vma->vm_start,
+		 * return with success:
+		 */
+		vma = find_vma(mm, addr);
+		if (!vma || addr+len <= vma->vm_start)
+			/* remember the address as a hint for next time */
+			return (mm->free_area_cache = addr);
+
+		/* remember the largest hole we saw so far */
+		if (addr + mm->cached_hole_size < vma->vm_start)
+			mm->cached_hole_size = vma->vm_start - addr;
+
+		/* try just below the current vma->vm_start */
+		addr = vma->vm_start - len;
+		if (do_align)
+			addr = COLOUR_ALIGN_DOWN(addr, pgoff);
+	} while (len < vma->vm_start);
+
+bottomup:
+	/*
+	 * A failed mmap() very likely causes application failure,
+	 * so fall back to the bottom-up function here. This scenario
+	 * can happen with large stack limits and large mmap()
+	 * allocations.
+	 */
+	mm->cached_hole_size = ~0UL;
+	mm->free_area_cache = TASK_UNMAPPED_BASE;
+	addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
+	/*
+	 * Restore the topdown base:
+	 */
+	mm->free_area_cache = mm->mmap_base;
+	mm->cached_hole_size = ~0UL;
+
+	return addr;
+}
+
+void arch_pick_mmap_layout(struct mm_struct *mm)
+{
+	unsigned long random_factor = 0UL;
+
+	/* 8 bits of randomness in 20 address space bits */
+	if ((current->flags & PF_RANDOMIZE) &&
+	    !(current->personality & ADDR_NO_RANDOMIZE))
+		random_factor = (get_random_int() % (1 << 8)) << PAGE_SHIFT;
+
+	if (mmap_is_legacy()) {
+		mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
+		mm->get_unmapped_area = arch_get_unmapped_area;
+		mm->unmap_area = arch_unmap_area;
+	} else {
+		mm->mmap_base = mmap_base(random_factor);
+		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+		mm->unmap_area = arch_unmap_area_topdown;
+	}
+}
 
 /*
  * You really shouldn't be using read() or write() on /dev/mem.  This
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index dc8c550..94c5a0c 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -15,6 +15,7 @@
 #include <linux/nodemask.h>
 #include <linux/memblock.h>
 #include <linux/fs.h>
+#include <linux/vmalloc.h>
 
 #include <asm/cputype.h>
 #include <asm/sections.h>
@@ -150,6 +151,7 @@
 }
 early_param("nowb", early_nowrite);
 
+#ifndef CONFIG_ARM_LPAE
 static int __init early_ecc(char *p)
 {
 	if (memcmp(p, "on", 2) == 0)
@@ -159,6 +161,7 @@
 	return 0;
 }
 early_param("ecc", early_ecc);
+#endif
 
 static int __init noalign_setup(char *__unused)
 {
@@ -228,10 +231,12 @@
 		.prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
 		.domain    = DOMAIN_KERNEL,
 	},
+#ifndef CONFIG_ARM_LPAE
 	[MT_MINICLEAN] = {
 		.prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
 		.domain    = DOMAIN_KERNEL,
 	},
+#endif
 	[MT_LOW_VECTORS] = {
 		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
 				L_PTE_RDONLY,
@@ -429,6 +434,7 @@
 	 * ARMv6 and above have extended page tables.
 	 */
 	if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
+#ifndef CONFIG_ARM_LPAE
 		/*
 		 * Mark cache clean areas and XIP ROM read only
 		 * from SVC mode and no access from userspace.
@@ -436,6 +442,7 @@
 		mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
 		mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
 		mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
+#endif
 
 		if (is_smp()) {
 			/*
@@ -474,6 +481,18 @@
 		mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
 	}
 
+#ifdef CONFIG_ARM_LPAE
+	/*
+	 * Do not generate access flag faults for the kernel mappings.
+	 */
+	for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
+		mem_types[i].prot_pte |= PTE_EXT_AF;
+		mem_types[i].prot_sect |= PMD_SECT_AF;
+	}
+	kern_pgprot |= PTE_EXT_AF;
+	vecs_pgprot |= PTE_EXT_AF;
+#endif
+
 	for (i = 0; i < 16; i++) {
 		unsigned long v = pgprot_val(protection_map[i]);
 		protection_map[i] = __pgprot(v | user_pgprot);
@@ -529,13 +548,18 @@
 
 #define vectors_base()	(vectors_high() ? 0xffff0000 : 0)
 
-static void __init *early_alloc(unsigned long sz)
+static void __init *early_alloc_aligned(unsigned long sz, unsigned long align)
 {
-	void *ptr = __va(memblock_alloc(sz, sz));
+	void *ptr = __va(memblock_alloc(sz, align));
 	memset(ptr, 0, sz);
 	return ptr;
 }
 
+static void __init *early_alloc(unsigned long sz)
+{
+	return early_alloc_aligned(sz, sz);
+}
+
 static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot)
 {
 	if (pmd_none(*pmd)) {
@@ -572,8 +596,10 @@
 	if (((addr | end | phys) & ~SECTION_MASK) == 0) {
 		pmd_t *p = pmd;
 
+#ifndef CONFIG_ARM_LPAE
 		if (addr & SECTION_SIZE)
 			pmd++;
+#endif
 
 		do {
 			*pmd = __pmd(phys | type->prot_sect);
@@ -603,6 +629,7 @@
 	} while (pud++, addr = next, addr != end);
 }
 
+#ifndef CONFIG_ARM_LPAE
 static void __init create_36bit_mapping(struct map_desc *md,
 					const struct mem_type *type)
 {
@@ -662,6 +689,7 @@
 		pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT;
 	} while (addr != end);
 }
+#endif	/* !CONFIG_ARM_LPAE */
 
 /*
  * Create the page directory entries and any necessary
@@ -685,14 +713,16 @@
 	}
 
 	if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
-	    md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) {
+	    md->virtual >= PAGE_OFFSET &&
+	    (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
 		printk(KERN_WARNING "BUG: mapping for 0x%08llx"
-		       " at 0x%08lx overlaps vmalloc space\n",
+		       " at 0x%08lx out of vmalloc space\n",
 		       (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
 	}
 
 	type = &mem_types[md->type];
 
+#ifndef CONFIG_ARM_LPAE
 	/*
 	 * Catch 36-bit addresses
 	 */
@@ -700,6 +730,7 @@
 		create_36bit_mapping(md, type);
 		return;
 	}
+#endif
 
 	addr = md->virtual & PAGE_MASK;
 	phys = __pfn_to_phys(md->pfn);
@@ -729,18 +760,33 @@
  */
 void __init iotable_init(struct map_desc *io_desc, int nr)
 {
-	int i;
+	struct map_desc *md;
+	struct vm_struct *vm;
 
-	for (i = 0; i < nr; i++)
-		create_mapping(io_desc + i);
+	if (!nr)
+		return;
+
+	vm = early_alloc_aligned(sizeof(*vm) * nr, __alignof__(*vm));
+
+	for (md = io_desc; nr; md++, nr--) {
+		create_mapping(md);
+		vm->addr = (void *)(md->virtual & PAGE_MASK);
+		vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
+		vm->phys_addr = __pfn_to_phys(md->pfn); 
+		vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING; 
+		vm->flags |= VM_ARM_MTYPE(md->type);
+		vm->caller = iotable_init;
+		vm_area_add_early(vm++);
+	}
 }
 
-static void * __initdata vmalloc_min = (void *)(VMALLOC_END - SZ_128M);
+static void * __initdata vmalloc_min =
+	(void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET);
 
 /*
  * vmalloc=size forces the vmalloc area to be exactly 'size'
  * bytes. This can be used to increase (or decrease) the vmalloc
- * area - the default is 128m.
+ * area - the default is 240m.
  */
 static int __init early_vmalloc(char *arg)
 {
@@ -775,6 +821,9 @@
 		struct membank *bank = &meminfo.bank[j];
 		*bank = meminfo.bank[i];
 
+		if (bank->start > ULONG_MAX)
+			highmem = 1;
+
 #ifdef CONFIG_HIGHMEM
 		if (__va(bank->start) >= vmalloc_min ||
 		    __va(bank->start) < (void *)PAGE_OFFSET)
@@ -786,7 +835,7 @@
 		 * Split those memory banks which are partially overlapping
 		 * the vmalloc area greatly simplifying things later.
 		 */
-		if (__va(bank->start) < vmalloc_min &&
+		if (!highmem && __va(bank->start) < vmalloc_min &&
 		    bank->size > vmalloc_min - __va(bank->start)) {
 			if (meminfo.nr_banks >= NR_BANKS) {
 				printk(KERN_CRIT "NR_BANKS too low, "
@@ -807,6 +856,17 @@
 		bank->highmem = highmem;
 
 		/*
+		 * Highmem banks not allowed with !CONFIG_HIGHMEM.
+		 */
+		if (highmem) {
+			printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx "
+			       "(!CONFIG_HIGHMEM).\n",
+			       (unsigned long long)bank->start,
+			       (unsigned long long)bank->start + bank->size - 1);
+			continue;
+		}
+
+		/*
 		 * Check whether this memory bank would entirely overlap
 		 * the vmalloc area.
 		 */
@@ -860,6 +920,7 @@
 	}
 #endif
 	meminfo.nr_banks = j;
+	high_memory = __va(lowmem_limit - 1) + 1;
 	memblock_set_current_limit(lowmem_limit);
 }
 
@@ -890,14 +951,20 @@
 
 	/*
 	 * Clear out all the kernel space mappings, except for the first
-	 * memory bank, up to the end of the vmalloc region.
+	 * memory bank, up to the vmalloc region.
 	 */
 	for (addr = __phys_to_virt(end);
-	     addr < VMALLOC_END; addr += PMD_SIZE)
+	     addr < VMALLOC_START; addr += PMD_SIZE)
 		pmd_clear(pmd_off_k(addr));
 }
 
+#ifdef CONFIG_ARM_LPAE
+/* the first page is reserved for pgd */
+#define SWAPPER_PG_DIR_SIZE	(PAGE_SIZE + \
+				 PTRS_PER_PGD * PTRS_PER_PMD * sizeof(pmd_t))
+#else
 #define SWAPPER_PG_DIR_SIZE	(PTRS_PER_PGD * sizeof(pgd_t))
+#endif
 
 /*
  * Reserve the special regions of memory
@@ -920,8 +987,8 @@
 }
 
 /*
- * Set up device the mappings.  Since we clear out the page tables for all
- * mappings above VMALLOC_END, we will remove any debug device mappings.
+ * Set up the device mappings.  Since we clear out the page tables for all
+ * mappings above VMALLOC_START, we will remove any debug device mappings.
  * This means you have to be careful how you debug this function, or any
  * called function.  This means you can't use any function or debugging
  * method which may touch any device, otherwise the kernel _will_ crash.
@@ -936,7 +1003,7 @@
 	 */
 	vectors_page = early_alloc(PAGE_SIZE);
 
-	for (addr = VMALLOC_END; addr; addr += PMD_SIZE)
+	for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
 		pmd_clear(pmd_off_k(addr));
 
 	/*
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 941a98c..4fc6794 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -29,6 +29,8 @@
 
 void __init sanity_check_meminfo(void)
 {
+	phys_addr_t end = bank_phys_end(&meminfo.bank[meminfo.nr_banks - 1]);
+	high_memory = __va(end - 1) + 1;
 }
 
 /*
@@ -43,7 +45,7 @@
 /*
  * We don't need to do anything here for nommu machines.
  */
-void setup_mm_for_reboot(char mode)
+void setup_mm_for_reboot(void)
 {
 }
 
diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c
index b2027c1..a3e78cc 100644
--- a/arch/arm/mm/pgd.c
+++ b/arch/arm/mm/pgd.c
@@ -10,6 +10,7 @@
 #include <linux/mm.h>
 #include <linux/gfp.h>
 #include <linux/highmem.h>
+#include <linux/slab.h>
 
 #include <asm/pgalloc.h>
 #include <asm/page.h>
@@ -17,6 +18,14 @@
 
 #include "mm.h"
 
+#ifdef CONFIG_ARM_LPAE
+#define __pgd_alloc()	kmalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL)
+#define __pgd_free(pgd)	kfree(pgd)
+#else
+#define __pgd_alloc()	(pgd_t *)__get_free_pages(GFP_KERNEL, 2)
+#define __pgd_free(pgd)	free_pages((unsigned long)pgd, 2)
+#endif
+
 /*
  * need to get a 16k page for level 1
  */
@@ -27,7 +36,7 @@
 	pmd_t *new_pmd, *init_pmd;
 	pte_t *new_pte, *init_pte;
 
-	new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2);
+	new_pgd = __pgd_alloc();
 	if (!new_pgd)
 		goto no_pgd;
 
@@ -42,10 +51,25 @@
 
 	clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));
 
+#ifdef CONFIG_ARM_LPAE
+	/*
+	 * Allocate PMD table for modules and pkmap mappings.
+	 */
+	new_pud = pud_alloc(mm, new_pgd + pgd_index(MODULES_VADDR),
+			    MODULES_VADDR);
+	if (!new_pud)
+		goto no_pud;
+
+	new_pmd = pmd_alloc(mm, new_pud, 0);
+	if (!new_pmd)
+		goto no_pmd;
+#endif
+
 	if (!vectors_high()) {
 		/*
 		 * On ARM, first page must always be allocated since it
-		 * contains the machine vectors.
+		 * contains the machine vectors. The vectors are always high
+		 * with LPAE.
 		 */
 		new_pud = pud_alloc(mm, new_pgd, 0);
 		if (!new_pud)
@@ -74,7 +98,7 @@
 no_pmd:
 	pud_free(mm, new_pud);
 no_pud:
-	free_pages((unsigned long)new_pgd, 2);
+	__pgd_free(new_pgd);
 no_pgd:
 	return NULL;
 }
@@ -111,5 +135,24 @@
 	pgd_clear(pgd);
 	pud_free(mm, pud);
 no_pgd:
-	free_pages((unsigned long) pgd_base, 2);
+#ifdef CONFIG_ARM_LPAE
+	/*
+	 * Free modules/pkmap or identity pmd tables.
+	 */
+	for (pgd = pgd_base; pgd < pgd_base + PTRS_PER_PGD; pgd++) {
+		if (pgd_none_or_clear_bad(pgd))
+			continue;
+		if (pgd_val(*pgd) & L_PGD_SWAPPER)
+			continue;
+		pud = pud_offset(pgd, 0);
+		if (pud_none_or_clear_bad(pud))
+			continue;
+		pmd = pmd_offset(pud, 0);
+		pud_clear(pud);
+		pmd_free(mm, pmd);
+		pgd_clear(pgd);
+		pud_free(mm, pud);
+	}
+#endif
+	__pgd_free(pgd_base);
 }
diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S
index 6746966..2349513 100644
--- a/arch/arm/mm/proc-arm1020.S
+++ b/arch/arm/mm/proc-arm1020.S
@@ -95,6 +95,7 @@
  * loc: location to jump to for soft reset
  */
 	.align	5
+	.pushsection	.idmap.text, "ax"
 ENTRY(cpu_arm1020_reset)
 	mov	ip, #0
 	mcr	p15, 0, ip, c7, c7, 0		@ invalidate I,D caches
@@ -107,6 +108,8 @@
 	bic	ip, ip, #0x1100 		@ ...i...s........
 	mcr	p15, 0, ip, c1, c0, 0		@ ctrl register
 	mov	pc, r0
+ENDPROC(cpu_arm1020_reset)
+	.popsection
 
 /*
  * cpu_arm1020_do_idle()
diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S
index 4251421..c244b06 100644
--- a/arch/arm/mm/proc-arm1020e.S
+++ b/arch/arm/mm/proc-arm1020e.S
@@ -95,6 +95,7 @@
  * loc: location to jump to for soft reset
  */
 	.align	5
+	.pushsection	.idmap.text, "ax"
 ENTRY(cpu_arm1020e_reset)
 	mov	ip, #0
 	mcr	p15, 0, ip, c7, c7, 0		@ invalidate I,D caches
@@ -107,6 +108,8 @@
 	bic	ip, ip, #0x1100 		@ ...i...s........
 	mcr	p15, 0, ip, c1, c0, 0		@ ctrl register
 	mov	pc, r0
+ENDPROC(cpu_arm1020e_reset)
+	.popsection
 
 /*
  * cpu_arm1020e_do_idle()
diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S
index d283cf3..38fe22e 100644
--- a/arch/arm/mm/proc-arm1022.S
+++ b/arch/arm/mm/proc-arm1022.S
@@ -84,6 +84,7 @@
  * loc: location to jump to for soft reset
  */
 	.align	5
+	.pushsection	.idmap.text, "ax"
 ENTRY(cpu_arm1022_reset)
 	mov	ip, #0
 	mcr	p15, 0, ip, c7, c7, 0		@ invalidate I,D caches
@@ -96,6 +97,8 @@
 	bic	ip, ip, #0x1100 		@ ...i...s........
 	mcr	p15, 0, ip, c1, c0, 0		@ ctrl register
 	mov	pc, r0
+ENDPROC(cpu_arm1022_reset)
+	.popsection
 
 /*
  * cpu_arm1022_do_idle()
diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S
index 678a1ce..3eb9c3c 100644
--- a/arch/arm/mm/proc-arm1026.S
+++ b/arch/arm/mm/proc-arm1026.S
@@ -84,6 +84,7 @@
  * loc: location to jump to for soft reset
  */
 	.align	5
+	.pushsection	.idmap.text, "ax"
 ENTRY(cpu_arm1026_reset)
 	mov	ip, #0
 	mcr	p15, 0, ip, c7, c7, 0		@ invalidate I,D caches
@@ -96,6 +97,8 @@
 	bic	ip, ip, #0x1100 		@ ...i...s........
 	mcr	p15, 0, ip, c1, c0, 0		@ ctrl register
 	mov	pc, r0
+ENDPROC(cpu_arm1026_reset)
+	.popsection
 
 /*
  * cpu_arm1026_do_idle()
diff --git a/arch/arm/mm/proc-arm6_7.S b/arch/arm/mm/proc-arm6_7.S
index e5b974c..4fbeb5b 100644
--- a/arch/arm/mm/proc-arm6_7.S
+++ b/arch/arm/mm/proc-arm6_7.S
@@ -225,6 +225,7 @@
  * Params  : r0 = address to jump to
  * Notes   : This sets up everything for a reset
  */
+		.pushsection	.idmap.text, "ax"
 ENTRY(cpu_arm6_reset)
 ENTRY(cpu_arm7_reset)
 		mov	r1, #0
@@ -235,6 +236,9 @@
 		mov	r1, #0x30
 		mcr	p15, 0, r1, c1, c0, 0		@ turn off MMU etc
 		mov	pc, r0
+ENDPROC(cpu_arm6_reset)
+ENDPROC(cpu_arm7_reset)
+		.popsection
 
 		__CPUINIT
 
diff --git a/arch/arm/mm/proc-arm720.S b/arch/arm/mm/proc-arm720.S
index 55f4e29..0ac908c 100644
--- a/arch/arm/mm/proc-arm720.S
+++ b/arch/arm/mm/proc-arm720.S
@@ -101,6 +101,7 @@
  * Params  : r0 = address to jump to
  * Notes   : This sets up everything for a reset
  */
+		.pushsection	.idmap.text, "ax"
 ENTRY(cpu_arm720_reset)
 		mov	ip, #0
 		mcr	p15, 0, ip, c7, c7, 0		@ invalidate cache
@@ -112,6 +113,8 @@
 		bic	ip, ip, #0x2100			@ ..v....s........
 		mcr	p15, 0, ip, c1, c0, 0		@ ctrl register
 		mov	pc, r0
+ENDPROC(cpu_arm720_reset)
+		.popsection
 
 	__CPUINIT
 
diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S
index 4506be3..dc5de5d 100644
--- a/arch/arm/mm/proc-arm740.S
+++ b/arch/arm/mm/proc-arm740.S
@@ -49,6 +49,7 @@
  * Params  : r0 = address to jump to
  * Notes   : This sets up everything for a reset
  */
+	.pushsection	.idmap.text, "ax"
 ENTRY(cpu_arm740_reset)
 	mov	ip, #0
 	mcr	p15, 0, ip, c7, c0, 0		@ invalidate cache
@@ -56,6 +57,8 @@
 	bic	ip, ip, #0x0000000c		@ ............wc..
 	mcr	p15, 0, ip, c1, c0, 0		@ ctrl register
 	mov	pc, r0
+ENDPROC(cpu_arm740_reset)
+	.popsection
 
 	__CPUINIT
 
diff --git a/arch/arm/mm/proc-arm7tdmi.S b/arch/arm/mm/proc-arm7tdmi.S
index 7e0e1fe..6ddea3e 100644
--- a/arch/arm/mm/proc-arm7tdmi.S
+++ b/arch/arm/mm/proc-arm7tdmi.S
@@ -45,8 +45,11 @@
  * Params  : loc(r0)	address to jump to
  * Purpose : Sets up everything for a reset and jump to the location for soft reset.
  */
+		.pushsection	.idmap.text, "ax"
 ENTRY(cpu_arm7tdmi_reset)
 		mov	pc, r0
+ENDPROC(cpu_arm7tdmi_reset)
+		.popsection
 
 		__CPUINIT
 
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
index 88fb3d9..cb941ae 100644
--- a/arch/arm/mm/proc-arm920.S
+++ b/arch/arm/mm/proc-arm920.S
@@ -85,6 +85,7 @@
  * loc: location to jump to for soft reset
  */
 	.align	5
+	.pushsection	.idmap.text, "ax"
 ENTRY(cpu_arm920_reset)
 	mov	ip, #0
 	mcr	p15, 0, ip, c7, c7, 0		@ invalidate I,D caches
@@ -97,6 +98,8 @@
 	bic	ip, ip, #0x1100			@ ...i...s........
 	mcr	p15, 0, ip, c1, c0, 0		@ ctrl register
 	mov	pc, r0
+ENDPROC(cpu_arm920_reset)
+	.popsection
 
 /*
  * cpu_arm920_do_idle()
diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S
index 490e188..4ec0e07 100644
--- a/arch/arm/mm/proc-arm922.S
+++ b/arch/arm/mm/proc-arm922.S
@@ -87,6 +87,7 @@
  * loc: location to jump to for soft reset
  */
 	.align	5
+	.pushsection	.idmap.text, "ax"
 ENTRY(cpu_arm922_reset)
 	mov	ip, #0
 	mcr	p15, 0, ip, c7, c7, 0		@ invalidate I,D caches
@@ -99,6 +100,8 @@
 	bic	ip, ip, #0x1100			@ ...i...s........
 	mcr	p15, 0, ip, c1, c0, 0		@ ctrl register
 	mov	pc, r0
+ENDPROC(cpu_arm922_reset)
+	.popsection
 
 /*
  * cpu_arm922_do_idle()
diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S
index 51d494b..9dccd9a 100644
--- a/arch/arm/mm/proc-arm925.S
+++ b/arch/arm/mm/proc-arm925.S
@@ -108,6 +108,7 @@
  * loc: location to jump to for soft reset
  */
 	.align	5
+	.pushsection	.idmap.text, "ax"
 ENTRY(cpu_arm925_reset)
 	/* Send software reset to MPU and DSP */
 	mov	ip, #0xff000000
@@ -115,6 +116,8 @@
 	orr	ip, ip, #0x0000ce00
 	mov	r4, #1
 	strh	r4, [ip, #0x10]
+ENDPROC(cpu_arm925_reset)
+	.popsection
 
 	mov	ip, #0
 	mcr	p15, 0, ip, c7, c7, 0		@ invalidate I,D caches
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
index 9f8fd91..820259b 100644
--- a/arch/arm/mm/proc-arm926.S
+++ b/arch/arm/mm/proc-arm926.S
@@ -77,6 +77,7 @@
  * loc: location to jump to for soft reset
  */
 	.align	5
+	.pushsection	.idmap.text, "ax"
 ENTRY(cpu_arm926_reset)
 	mov	ip, #0
 	mcr	p15, 0, ip, c7, c7, 0		@ invalidate I,D caches
@@ -89,6 +90,8 @@
 	bic	ip, ip, #0x1100			@ ...i...s........
 	mcr	p15, 0, ip, c1, c0, 0		@ ctrl register
 	mov	pc, r0
+ENDPROC(cpu_arm926_reset)
+	.popsection
 
 /*
  * cpu_arm926_do_idle()
diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S
index ac750d5..9fdc0a1 100644
--- a/arch/arm/mm/proc-arm940.S
+++ b/arch/arm/mm/proc-arm940.S
@@ -48,6 +48,7 @@
  * Params  : r0 = address to jump to
  * Notes   : This sets up everything for a reset
  */
+	.pushsection	.idmap.text, "ax"
 ENTRY(cpu_arm940_reset)
 	mov	ip, #0
 	mcr	p15, 0, ip, c7, c5, 0		@ flush I cache
@@ -58,6 +59,8 @@
 	bic	ip, ip, #0x00001000		@ i-cache
 	mcr	p15, 0, ip, c1, c0, 0		@ ctrl register
 	mov	pc, r0
+ENDPROC(cpu_arm940_reset)
+	.popsection
 
 /*
  * cpu_arm940_do_idle()
diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S
index 683af3a..f684cfe 100644
--- a/arch/arm/mm/proc-arm946.S
+++ b/arch/arm/mm/proc-arm946.S
@@ -55,6 +55,7 @@
  * Params  : r0 = address to jump to
  * Notes   : This sets up everything for a reset
  */
+	.pushsection	.idmap.text, "ax"
 ENTRY(cpu_arm946_reset)
 	mov	ip, #0
 	mcr	p15, 0, ip, c7, c5, 0		@ flush I cache
@@ -65,6 +66,8 @@
 	bic	ip, ip, #0x00001000		@ i-cache
 	mcr	p15, 0, ip, c1, c0, 0		@ ctrl register
 	mov	pc, r0
+ENDPROC(cpu_arm946_reset)
+	.popsection
 
 /*
  * cpu_arm946_do_idle()
diff --git a/arch/arm/mm/proc-arm9tdmi.S b/arch/arm/mm/proc-arm9tdmi.S
index 2120f9e..8881391 100644
--- a/arch/arm/mm/proc-arm9tdmi.S
+++ b/arch/arm/mm/proc-arm9tdmi.S
@@ -45,8 +45,11 @@
  * Params  : loc(r0)	address to jump to
  * Purpose : Sets up everything for a reset and jump to the location for soft reset.
  */
+		.pushsection	.idmap.text, "ax"
 ENTRY(cpu_arm9tdmi_reset)
 		mov	pc, r0
+ENDPROC(cpu_arm9tdmi_reset)
+		.popsection
 
 		__CPUINIT
 
diff --git a/arch/arm/mm/proc-fa526.S b/arch/arm/mm/proc-fa526.S
index 4c7a571..272558a 100644
--- a/arch/arm/mm/proc-fa526.S
+++ b/arch/arm/mm/proc-fa526.S
@@ -57,6 +57,7 @@
  * loc: location to jump to for soft reset
  */
 	.align	4
+	.pushsection	.idmap.text, "ax"
 ENTRY(cpu_fa526_reset)
 /* TODO: Use CP8 if possible... */
 	mov	ip, #0
@@ -73,6 +74,8 @@
 	nop
 	nop
 	mov	pc, r0
+ENDPROC(cpu_fa526_reset)
+	.popsection
 
 /*
  * cpu_fa526_do_idle()
diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S
index 8a6c2f7..ba3c500 100644
--- a/arch/arm/mm/proc-feroceon.S
+++ b/arch/arm/mm/proc-feroceon.S
@@ -98,6 +98,7 @@
  * loc: location to jump to for soft reset
  */
 	.align	5
+	.pushsection	.idmap.text, "ax"
 ENTRY(cpu_feroceon_reset)
 	mov	ip, #0
 	mcr	p15, 0, ip, c7, c7, 0		@ invalidate I,D caches
@@ -110,6 +111,8 @@
 	bic	ip, ip, #0x1100			@ ...i...s........
 	mcr	p15, 0, ip, c1, c0, 0		@ ctrl register
 	mov	pc, r0
+ENDPROC(cpu_feroceon_reset)
+	.popsection
 
 /*
  * cpu_feroceon_do_idle()
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
index 307a4def..2d8ff3a 100644
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -91,8 +91,9 @@
 #if L_PTE_SHARED != PTE_EXT_SHARED
 #error PTE shared bit mismatch
 #endif
-#if (L_PTE_XN+L_PTE_USER+L_PTE_RDONLY+L_PTE_DIRTY+L_PTE_YOUNG+\
-     L_PTE_FILE+L_PTE_PRESENT) > L_PTE_SHARED
+#if !defined (CONFIG_ARM_LPAE) && \
+	(L_PTE_XN+L_PTE_USER+L_PTE_RDONLY+L_PTE_DIRTY+L_PTE_YOUNG+\
+	 L_PTE_FILE+L_PTE_PRESENT) > L_PTE_SHARED
 #error Invalid Linux PTE bit settings
 #endif
 #endif	/* CONFIG_MMU */
diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S
index db52b0f..cdfedc5 100644
--- a/arch/arm/mm/proc-mohawk.S
+++ b/arch/arm/mm/proc-mohawk.S
@@ -69,6 +69,7 @@
  * (same as arm926)
  */
 	.align	5
+	.pushsection	.idmap.text, "ax"
 ENTRY(cpu_mohawk_reset)
 	mov	ip, #0
 	mcr	p15, 0, ip, c7, c7, 0		@ invalidate I,D caches
@@ -79,6 +80,8 @@
 	bic	ip, ip, #0x1100			@ ...i...s........
 	mcr	p15, 0, ip, c1, c0, 0		@ ctrl register
 	mov	pc, r0
+ENDPROC(cpu_mohawk_reset)
+	.popsection
 
 /*
  * cpu_mohawk_do_idle()
diff --git a/arch/arm/mm/proc-sa110.S b/arch/arm/mm/proc-sa110.S
index d50ada2..775d70f 100644
--- a/arch/arm/mm/proc-sa110.S
+++ b/arch/arm/mm/proc-sa110.S
@@ -62,6 +62,7 @@
  * loc: location to jump to for soft reset
  */
 	.align	5
+	.pushsection	.idmap.text, "ax"
 ENTRY(cpu_sa110_reset)
 	mov	ip, #0
 	mcr	p15, 0, ip, c7, c7, 0		@ invalidate I,D caches
@@ -74,6 +75,8 @@
 	bic	ip, ip, #0x1100			@ ...i...s........
 	mcr	p15, 0, ip, c1, c0, 0		@ ctrl register
 	mov	pc, r0
+ENDPROC(cpu_sa110_reset)
+	.popsection
 
 /*
  * cpu_sa110_do_idle(type)
diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S
index 7d91545..3aa0da1 100644
--- a/arch/arm/mm/proc-sa1100.S
+++ b/arch/arm/mm/proc-sa1100.S
@@ -70,6 +70,7 @@
  * loc: location to jump to for soft reset
  */
 	.align	5
+	.pushsection	.idmap.text, "ax"
 ENTRY(cpu_sa1100_reset)
 	mov	ip, #0
 	mcr	p15, 0, ip, c7, c7, 0		@ invalidate I,D caches
@@ -82,6 +83,8 @@
 	bic	ip, ip, #0x1100			@ ...i...s........
 	mcr	p15, 0, ip, c1, c0, 0		@ ctrl register
 	mov	pc, r0
+ENDPROC(cpu_sa1100_reset)
+	.popsection
 
 /*
  * cpu_sa1100_do_idle(type)
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index d061d2f..5900cd5 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -55,6 +55,7 @@
  *	- loc   - location to jump to for soft reset
  */
 	.align	5
+	.pushsection	.idmap.text, "ax"
 ENTRY(cpu_v6_reset)
 	mrc	p15, 0, r1, c1, c0, 0		@ ctrl register
 	bic	r1, r1, #0x1			@ ...............m
@@ -62,6 +63,8 @@
 	mov	r1, #0
 	mcr	p15, 0, r1, c7, c5, 4		@ ISB
 	mov	pc, r0
+ENDPROC(cpu_v6_reset)
+	.popsection
 
 /*
  *	cpu_v6_do_idle()
diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S
new file mode 100644
index 0000000..3a4b3e7
--- /dev/null
+++ b/arch/arm/mm/proc-v7-2level.S
@@ -0,0 +1,171 @@
+/*
+ * arch/arm/mm/proc-v7-2level.S
+ *
+ * Copyright (C) 2001 Deep Blue Solutions Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define TTB_S		(1 << 1)
+#define TTB_RGN_NC	(0 << 3)
+#define TTB_RGN_OC_WBWA	(1 << 3)
+#define TTB_RGN_OC_WT	(2 << 3)
+#define TTB_RGN_OC_WB	(3 << 3)
+#define TTB_NOS		(1 << 5)
+#define TTB_IRGN_NC	((0 << 0) | (0 << 6))
+#define TTB_IRGN_WBWA	((0 << 0) | (1 << 6))
+#define TTB_IRGN_WT	((1 << 0) | (0 << 6))
+#define TTB_IRGN_WB	((1 << 0) | (1 << 6))
+
+/* PTWs cacheable, inner WB not shareable, outer WB not shareable */
+#define TTB_FLAGS_UP	TTB_IRGN_WB|TTB_RGN_OC_WB
+#define PMD_FLAGS_UP	PMD_SECT_WB
+
+/* PTWs cacheable, inner WBWA shareable, outer WBWA not shareable */
+#define TTB_FLAGS_SMP	TTB_IRGN_WBWA|TTB_S|TTB_NOS|TTB_RGN_OC_WBWA
+#define PMD_FLAGS_SMP	PMD_SECT_WBWA|PMD_SECT_S
+
+/*
+ *	cpu_v7_switch_mm(pgd_phys, tsk)
+ *
+ *	Set the translation table base pointer to be pgd_phys
+ *
+ *	- pgd_phys - physical address of new TTB
+ *
+ *	It is assumed that:
+ *	- we are not using split page tables
+ */
+ENTRY(cpu_v7_switch_mm)
+#ifdef CONFIG_MMU
+	mov	r2, #0
+	ldr	r1, [r1, #MM_CONTEXT_ID]	@ get mm->context.id
+	ALT_SMP(orr	r0, r0, #TTB_FLAGS_SMP)
+	ALT_UP(orr	r0, r0, #TTB_FLAGS_UP)
+#ifdef CONFIG_ARM_ERRATA_430973
+	mcr	p15, 0, r2, c7, c5, 6		@ flush BTAC/BTB
+#endif
+#ifdef CONFIG_ARM_ERRATA_754322
+	dsb
+#endif
+	mcr	p15, 0, r2, c13, c0, 1		@ set reserved context ID
+	isb
+1:	mcr	p15, 0, r0, c2, c0, 0		@ set TTB 0
+	isb
+#ifdef CONFIG_ARM_ERRATA_754322
+	dsb
+#endif
+	mcr	p15, 0, r1, c13, c0, 1		@ set context ID
+	isb
+#endif
+	mov	pc, lr
+ENDPROC(cpu_v7_switch_mm)
+
+/*
+ *	cpu_v7_set_pte_ext(ptep, pte)
+ *
+ *	Set a level 2 translation table entry.
+ *
+ *	- ptep  - pointer to level 2 translation table entry
+ *		  (hardware version is stored at +2048 bytes)
+ *	- pte   - PTE value to store
+ *	- ext	- value for extended PTE bits
+ */
+ENTRY(cpu_v7_set_pte_ext)
+#ifdef CONFIG_MMU
+	str	r1, [r0]			@ linux version
+
+	bic	r3, r1, #0x000003f0
+	bic	r3, r3, #PTE_TYPE_MASK
+	orr	r3, r3, r2
+	orr	r3, r3, #PTE_EXT_AP0 | 2
+
+	tst	r1, #1 << 4
+	orrne	r3, r3, #PTE_EXT_TEX(1)
+
+	eor	r1, r1, #L_PTE_DIRTY
+	tst	r1, #L_PTE_RDONLY | L_PTE_DIRTY
+	orrne	r3, r3, #PTE_EXT_APX
+
+	tst	r1, #L_PTE_USER
+	orrne	r3, r3, #PTE_EXT_AP1
+#ifdef CONFIG_CPU_USE_DOMAINS
+	@ allow kernel read/write access to read-only user pages
+	tstne	r3, #PTE_EXT_APX
+	bicne	r3, r3, #PTE_EXT_APX | PTE_EXT_AP0
+#endif
+
+	tst	r1, #L_PTE_XN
+	orrne	r3, r3, #PTE_EXT_XN
+
+	tst	r1, #L_PTE_YOUNG
+	tstne	r1, #L_PTE_PRESENT
+	moveq	r3, #0
+
+ ARM(	str	r3, [r0, #2048]! )
+ THUMB(	add	r0, r0, #2048 )
+ THUMB(	str	r3, [r0] )
+	mcr	p15, 0, r0, c7, c10, 1		@ flush_pte
+#endif
+	mov	pc, lr
+ENDPROC(cpu_v7_set_pte_ext)
+
+	/*
+	 * Memory region attributes with SCTLR.TRE=1
+	 *
+	 *   n = TEX[0],C,B
+	 *   TR = PRRR[2n+1:2n]		- memory type
+	 *   IR = NMRR[2n+1:2n]		- inner cacheable property
+	 *   OR = NMRR[2n+17:2n+16]	- outer cacheable property
+	 *
+	 *			n	TR	IR	OR
+	 *   UNCACHED		000	00
+	 *   BUFFERABLE		001	10	00	00
+	 *   WRITETHROUGH	010	10	10	10
+	 *   WRITEBACK		011	10	11	11
+	 *   reserved		110
+	 *   WRITEALLOC		111	10	01	01
+	 *   DEV_SHARED		100	01
+	 *   DEV_NONSHARED	100	01
+	 *   DEV_WC		001	10
+	 *   DEV_CACHED		011	10
+	 *
+	 * Other attributes:
+	 *
+	 *   DS0 = PRRR[16] = 0		- device shareable property
+	 *   DS1 = PRRR[17] = 1		- device shareable property
+	 *   NS0 = PRRR[18] = 0		- normal shareable property
+	 *   NS1 = PRRR[19] = 1		- normal shareable property
+	 *   NOS = PRRR[24+n] = 1	- not outer shareable
+	 */
+.equ	PRRR,	0xff0a81a8
+.equ	NMRR,	0x40e040e0
+
+	/*
+	 * Macro for setting up the TTBRx and TTBCR registers.
+	 * - \ttb0 and \ttb1 updated with the corresponding flags.
+	 */
+	.macro	v7_ttb_setup, zero, ttbr0, ttbr1, tmp
+	mcr	p15, 0, \zero, c2, c0, 2	@ TTB control register
+	ALT_SMP(orr	\ttbr0, \ttbr0, #TTB_FLAGS_SMP)
+	ALT_UP(orr	\ttbr0, \ttbr0, #TTB_FLAGS_UP)
+	ALT_SMP(orr	\ttbr1, \ttbr1, #TTB_FLAGS_SMP)
+	ALT_UP(orr	\ttbr1, \ttbr1, #TTB_FLAGS_UP)
+	mcr	p15, 0, \ttbr1, c2, c0, 1	@ load TTB1
+	.endm
+
+	__CPUINIT
+
+	/*   AT
+	 *  TFR   EV X F   I D LR    S
+	 * .EEE ..EE PUI. .T.T 4RVI ZWRS BLDP WCAM
+	 * rxxx rrxx xxx0 0101 xxxx xxxx x111 xxxx < forced
+	 *    1    0 110       0011 1100 .111 1101 < we want
+	 */
+	.align	2
+	.type	v7_crval, #object
+v7_crval:
+	crval	clear=0x0120c302, mmuset=0x10c03c7d, ucset=0x00c01c7c
+
+	.previous
diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S
new file mode 100644
index 0000000..8de0f1d
--- /dev/null
+++ b/arch/arm/mm/proc-v7-3level.S
@@ -0,0 +1,150 @@
+/*
+ * arch/arm/mm/proc-v7-3level.S
+ *
+ * Copyright (C) 2001 Deep Blue Solutions Ltd.
+ * Copyright (C) 2011 ARM Ltd.
+ * Author: Catalin Marinas <catalin.marinas@arm.com>
+ *   based on arch/arm/mm/proc-v7-2level.S
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#define TTB_IRGN_NC	(0 << 8)
+#define TTB_IRGN_WBWA	(1 << 8)
+#define TTB_IRGN_WT	(2 << 8)
+#define TTB_IRGN_WB	(3 << 8)
+#define TTB_RGN_NC	(0 << 10)
+#define TTB_RGN_OC_WBWA	(1 << 10)
+#define TTB_RGN_OC_WT	(2 << 10)
+#define TTB_RGN_OC_WB	(3 << 10)
+#define TTB_S		(3 << 12)
+#define TTB_EAE		(1 << 31)
+
+/* PTWs cacheable, inner WB not shareable, outer WB not shareable */
+#define TTB_FLAGS_UP	(TTB_IRGN_WB|TTB_RGN_OC_WB)
+#define PMD_FLAGS_UP	(PMD_SECT_WB)
+
+/* PTWs cacheable, inner WBWA shareable, outer WBWA not shareable */
+#define TTB_FLAGS_SMP	(TTB_IRGN_WBWA|TTB_S|TTB_RGN_OC_WBWA)
+#define PMD_FLAGS_SMP	(PMD_SECT_WBWA|PMD_SECT_S)
+
+/*
+ * cpu_v7_switch_mm(pgd_phys, tsk)
+ *
+ * Set the translation table base pointer to be pgd_phys (physical address of
+ * the new TTB).
+ */
+ENTRY(cpu_v7_switch_mm)
+#ifdef CONFIG_MMU
+	ldr	r1, [r1, #MM_CONTEXT_ID]	@ get mm->context.id
+	and	r3, r1, #0xff
+	mov	r3, r3, lsl #(48 - 32)		@ ASID
+	mcrr	p15, 0, r0, r3, c2		@ set TTB 0
+	isb
+#endif
+	mov	pc, lr
+ENDPROC(cpu_v7_switch_mm)
+
+/*
+ * cpu_v7_set_pte_ext(ptep, pte)
+ *
+ * Set a level 2 translation table entry.
+ * - ptep - pointer to level 3 translation table entry
+ * - pte - PTE value to store (64-bit in r2 and r3)
+ */
+ENTRY(cpu_v7_set_pte_ext)
+#ifdef CONFIG_MMU
+	tst	r2, #L_PTE_PRESENT
+	beq	1f
+	tst	r3, #1 << (55 - 32)		@ L_PTE_DIRTY
+	orreq	r2, #L_PTE_RDONLY
+1:	strd	r2, r3, [r0]
+	mcr	p15, 0, r0, c7, c10, 1		@ flush_pte
+#endif
+	mov	pc, lr
+ENDPROC(cpu_v7_set_pte_ext)
+
+	/*
+	 * Memory region attributes for LPAE (defined in pgtable-3level.h):
+	 *
+	 *   n = AttrIndx[2:0]
+	 *
+	 *			n	MAIR
+	 *   UNCACHED		000	00000000
+	 *   BUFFERABLE		001	01000100
+	 *   DEV_WC		001	01000100
+	 *   WRITETHROUGH	010	10101010
+	 *   WRITEBACK		011	11101110
+	 *   DEV_CACHED		011	11101110
+	 *   DEV_SHARED		100	00000100
+	 *   DEV_NONSHARED	100	00000100
+	 *   unused		101
+	 *   unused		110
+	 *   WRITEALLOC		111	11111111
+	 */
+.equ	PRRR,	0xeeaa4400			@ MAIR0
+.equ	NMRR,	0xff000004			@ MAIR1
+
+	/*
+	 * Macro for setting up the TTBRx and TTBCR registers.
+	 * - \ttbr1 updated.
+	 */
+	.macro	v7_ttb_setup, zero, ttbr0, ttbr1, tmp
+	ldr	\tmp, =swapper_pg_dir		@ swapper_pg_dir virtual address
+	cmp	\ttbr1, \tmp			@ PHYS_OFFSET > PAGE_OFFSET? (branch below)
+	mrc	p15, 0, \tmp, c2, c0, 2		@ TTB control register
+	orr	\tmp, \tmp, #TTB_EAE
+	ALT_SMP(orr	\tmp, \tmp, #TTB_FLAGS_SMP)
+	ALT_UP(orr	\tmp, \tmp, #TTB_FLAGS_UP)
+	ALT_SMP(orr	\tmp, \tmp, #TTB_FLAGS_SMP << 16)
+	ALT_UP(orr	\tmp, \tmp, #TTB_FLAGS_UP << 16)
+	/*
+	 * TTBR0/TTBR1 split (PAGE_OFFSET):
+	 *   0x40000000: T0SZ = 2, T1SZ = 0 (not used)
+	 *   0x80000000: T0SZ = 0, T1SZ = 1
+	 *   0xc0000000: T0SZ = 0, T1SZ = 2
+	 *
+	 * Only use this feature if PHYS_OFFSET <= PAGE_OFFSET, otherwise
+	 * booting secondary CPUs would end up using TTBR1 for the identity
+	 * mapping set up in TTBR0.
+	 */
+	bhi	9001f				@ PHYS_OFFSET > PAGE_OFFSET?
+	orr	\tmp, \tmp, #(((PAGE_OFFSET >> 30) - 1) << 16) @ TTBCR.T1SZ
+#if defined CONFIG_VMSPLIT_2G
+	/* PAGE_OFFSET == 0x80000000, T1SZ == 1 */
+	add	\ttbr1, \ttbr1, #1 << 4		@ skip two L1 entries
+#elif defined CONFIG_VMSPLIT_3G
+	/* PAGE_OFFSET == 0xc0000000, T1SZ == 2 */
+	add	\ttbr1, \ttbr1, #4096 * (1 + 3)	@ only L2 used, skip pgd+3*pmd
+#endif
+	/* CONFIG_VMSPLIT_1G does not need TTBR1 adjustment */
+9001:	mcr	p15, 0, \tmp, c2, c0, 2		@ TTB control register
+	mcrr	p15, 1, \ttbr1, \zero, c2	@ load TTBR1
+	.endm
+
+	__CPUINIT
+
+	/*
+	 *   AT
+	 *  TFR   EV X F   IHD LR    S
+	 * .EEE ..EE PUI. .TAT 4RVI ZWRS BLDP WCAM
+	 * rxxx rrxx xxx0 0101 xxxx xxxx x111 xxxx < forced
+	 *   11    0 110    1  0011 1100 .111 1101 < we want
+	 */
+	.align	2
+	.type	v7_crval, #object
+v7_crval:
+	crval	clear=0x0120c302, mmuset=0x30c23c7d, ucset=0x00c01c7c
+
+	.previous
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 2c559ac..7e9b5bf 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -19,24 +19,11 @@
 
 #include "proc-macros.S"
 
-#define TTB_S		(1 << 1)
-#define TTB_RGN_NC	(0 << 3)
-#define TTB_RGN_OC_WBWA	(1 << 3)
-#define TTB_RGN_OC_WT	(2 << 3)
-#define TTB_RGN_OC_WB	(3 << 3)
-#define TTB_NOS		(1 << 5)
-#define TTB_IRGN_NC	((0 << 0) | (0 << 6))
-#define TTB_IRGN_WBWA	((0 << 0) | (1 << 6))
-#define TTB_IRGN_WT	((1 << 0) | (0 << 6))
-#define TTB_IRGN_WB	((1 << 0) | (1 << 6))
-
-/* PTWs cacheable, inner WB not shareable, outer WB not shareable */
-#define TTB_FLAGS_UP	TTB_IRGN_WB|TTB_RGN_OC_WB
-#define PMD_FLAGS_UP	PMD_SECT_WB
-
-/* PTWs cacheable, inner WBWA shareable, outer WBWA not shareable */
-#define TTB_FLAGS_SMP	TTB_IRGN_WBWA|TTB_S|TTB_NOS|TTB_RGN_OC_WBWA
-#define PMD_FLAGS_SMP	PMD_SECT_WBWA|PMD_SECT_S
+#ifdef CONFIG_ARM_LPAE
+#include "proc-v7-3level.S"
+#else
+#include "proc-v7-2level.S"
+#endif
 
 ENTRY(cpu_v7_proc_init)
 	mov	pc, lr
@@ -63,6 +50,7 @@
  *      caches disabled.
  */
 	.align	5
+	.pushsection	.idmap.text, "ax"
 ENTRY(cpu_v7_reset)
 	mrc	p15, 0, r1, c1, c0, 0		@ ctrl register
 	bic	r1, r1, #0x1			@ ...............m
@@ -71,6 +59,7 @@
 	isb
 	mov	pc, r0
 ENDPROC(cpu_v7_reset)
+	.popsection
 
 /*
  *	cpu_v7_do_idle()
@@ -97,127 +86,12 @@
 	mov	pc, lr
 ENDPROC(cpu_v7_dcache_clean_area)
 
-/*
- *	cpu_v7_switch_mm(pgd_phys, tsk)
- *
- *	Set the translation table base pointer to be pgd_phys
- *
- *	- pgd_phys - physical address of new TTB
- *
- *	It is assumed that:
- *	- we are not using split page tables
- */
-ENTRY(cpu_v7_switch_mm)
-#ifdef CONFIG_MMU
-	mov	r2, #0
-	ldr	r1, [r1, #MM_CONTEXT_ID]	@ get mm->context.id
-	ALT_SMP(orr	r0, r0, #TTB_FLAGS_SMP)
-	ALT_UP(orr	r0, r0, #TTB_FLAGS_UP)
-#ifdef CONFIG_ARM_ERRATA_430973
-	mcr	p15, 0, r2, c7, c5, 6		@ flush BTAC/BTB
-#endif
-#ifdef CONFIG_ARM_ERRATA_754322
-	dsb
-#endif
-	mcr	p15, 0, r2, c13, c0, 1		@ set reserved context ID
-	isb
-1:	mcr	p15, 0, r0, c2, c0, 0		@ set TTB 0
-	isb
-#ifdef CONFIG_ARM_ERRATA_754322
-	dsb
-#endif
-	mcr	p15, 0, r1, c13, c0, 1		@ set context ID
-	isb
-#endif
-	mov	pc, lr
-ENDPROC(cpu_v7_switch_mm)
-
-/*
- *	cpu_v7_set_pte_ext(ptep, pte)
- *
- *	Set a level 2 translation table entry.
- *
- *	- ptep  - pointer to level 2 translation table entry
- *		  (hardware version is stored at +2048 bytes)
- *	- pte   - PTE value to store
- *	- ext	- value for extended PTE bits
- */
-ENTRY(cpu_v7_set_pte_ext)
-#ifdef CONFIG_MMU
-	str	r1, [r0]			@ linux version
-
-	bic	r3, r1, #0x000003f0
-	bic	r3, r3, #PTE_TYPE_MASK
-	orr	r3, r3, r2
-	orr	r3, r3, #PTE_EXT_AP0 | 2
-
-	tst	r1, #1 << 4
-	orrne	r3, r3, #PTE_EXT_TEX(1)
-
-	eor	r1, r1, #L_PTE_DIRTY
-	tst	r1, #L_PTE_RDONLY | L_PTE_DIRTY
-	orrne	r3, r3, #PTE_EXT_APX
-
-	tst	r1, #L_PTE_USER
-	orrne	r3, r3, #PTE_EXT_AP1
-#ifdef CONFIG_CPU_USE_DOMAINS
-	@ allow kernel read/write access to read-only user pages
-	tstne	r3, #PTE_EXT_APX
-	bicne	r3, r3, #PTE_EXT_APX | PTE_EXT_AP0
-#endif
-
-	tst	r1, #L_PTE_XN
-	orrne	r3, r3, #PTE_EXT_XN
-
-	tst	r1, #L_PTE_YOUNG
-	tstne	r1, #L_PTE_PRESENT
-	moveq	r3, #0
-
- ARM(	str	r3, [r0, #2048]! )
- THUMB(	add	r0, r0, #2048 )
- THUMB(	str	r3, [r0] )
-	mcr	p15, 0, r0, c7, c10, 1		@ flush_pte
-#endif
-	mov	pc, lr
-ENDPROC(cpu_v7_set_pte_ext)
-
 	string	cpu_v7_name, "ARMv7 Processor"
 	.align
 
-	/*
-	 * Memory region attributes with SCTLR.TRE=1
-	 *
-	 *   n = TEX[0],C,B
-	 *   TR = PRRR[2n+1:2n]		- memory type
-	 *   IR = NMRR[2n+1:2n]		- inner cacheable property
-	 *   OR = NMRR[2n+17:2n+16]	- outer cacheable property
-	 *
-	 *			n	TR	IR	OR
-	 *   UNCACHED		000	00
-	 *   BUFFERABLE		001	10	00	00
-	 *   WRITETHROUGH	010	10	10	10
-	 *   WRITEBACK		011	10	11	11
-	 *   reserved		110
-	 *   WRITEALLOC		111	10	01	01
-	 *   DEV_SHARED		100	01
-	 *   DEV_NONSHARED	100	01
-	 *   DEV_WC		001	10
-	 *   DEV_CACHED		011	10
-	 *
-	 * Other attributes:
-	 *
-	 *   DS0 = PRRR[16] = 0		- device shareable property
-	 *   DS1 = PRRR[17] = 1		- device shareable property
-	 *   NS0 = PRRR[18] = 0		- normal shareable property
-	 *   NS1 = PRRR[19] = 1		- normal shareable property
-	 *   NOS = PRRR[24+n] = 1	- not outer shareable
-	 */
-.equ	PRRR,	0xff0a81a8
-.equ	NMRR,	0x40e040e0
-
 /* Suspend/resume support: derived from arch/arm/mach-s5pv210/sleep.S */
 .globl	cpu_v7_suspend_size
-.equ	cpu_v7_suspend_size, 4 * 7
+.equ	cpu_v7_suspend_size, 4 * 8
 #ifdef CONFIG_ARM_CPU_SUSPEND
 ENTRY(cpu_v7_do_suspend)
 	stmfd	sp!, {r4 - r10, lr}
@@ -226,10 +100,11 @@
 	stmia	r0!, {r4 - r5}
 	mrc	p15, 0, r6, c3, c0, 0	@ Domain ID
 	mrc	p15, 0, r7, c2, c0, 1	@ TTB 1
+	mrc	p15, 0, r11, c2, c0, 2	@ TTB control register
 	mrc	p15, 0, r8, c1, c0, 0	@ Control register
 	mrc	p15, 0, r9, c1, c0, 1	@ Auxiliary control register
 	mrc	p15, 0, r10, c1, c0, 2	@ Co-processor access control
-	stmia	r0, {r6 - r10}
+	stmia	r0, {r6 - r11}
 	ldmfd	sp!, {r4 - r10, pc}
 ENDPROC(cpu_v7_do_suspend)
 
@@ -241,13 +116,15 @@
 	ldmia	r0!, {r4 - r5}
 	mcr	p15, 0, r4, c13, c0, 0	@ FCSE/PID
 	mcr	p15, 0, r5, c13, c0, 3	@ User r/o thread ID
-	ldmia	r0, {r6 - r10}
+	ldmia	r0, {r6 - r11}
 	mcr	p15, 0, r6, c3, c0, 0	@ Domain ID
+#ifndef CONFIG_ARM_LPAE
 	ALT_SMP(orr	r1, r1, #TTB_FLAGS_SMP)
 	ALT_UP(orr	r1, r1, #TTB_FLAGS_UP)
+#endif
 	mcr	p15, 0, r1, c2, c0, 0	@ TTB 0
 	mcr	p15, 0, r7, c2, c0, 1	@ TTB 1
-	mcr	p15, 0, ip, c2, c0, 2	@ TTB control register
+	mcr	p15, 0, r11, c2, c0, 2	@ TTB control register
 	mrc	p15, 0, r4, c1, c0, 1	@ Read Auxiliary control register
 	teq	r4, r9			@ Is it already set?
 	mcrne	p15, 0, r9, c1, c0, 1	@ No, so write it
@@ -284,6 +161,7 @@
 __v7_ca9mp_setup:
 	mov	r10, #(1 << 0)			@ TLB ops broadcasting
 	b	1f
+__v7_ca7mp_setup:
 __v7_ca15mp_setup:
 	mov	r10, #0
 1:
@@ -363,11 +241,13 @@
 	orreq	r10, r10, #1 << 6		@ set bit #6
 	mcreq	p15, 0, r10, c15, c0, 1		@ write diagnostic register
 #endif
-#ifdef CONFIG_ARM_ERRATA_751472
-	cmp	r6, #0x30			@ present prior to r3p0
+#if defined(CONFIG_ARM_ERRATA_751472) && defined(CONFIG_SMP)
+	ALT_SMP(cmp r6, #0x30)			@ present prior to r3p0
+	ALT_UP_B(1f)
 	mrclt	p15, 0, r10, c15, c0, 1		@ read diagnostic register
 	orrlt	r10, r10, #1 << 11		@ set bit #11
 	mcrlt	p15, 0, r10, c15, c0, 1		@ write diagnostic register
+1:
 #endif
 
 3:	mov	r10, #0
@@ -377,12 +257,7 @@
 	dsb
 #ifdef CONFIG_MMU
 	mcr	p15, 0, r10, c8, c7, 0		@ invalidate I + D TLBs
-	mcr	p15, 0, r10, c2, c0, 2		@ TTB control register
-	ALT_SMP(orr	r4, r4, #TTB_FLAGS_SMP)
-	ALT_UP(orr	r4, r4, #TTB_FLAGS_UP)
-	ALT_SMP(orr	r8, r8, #TTB_FLAGS_SMP)
-	ALT_UP(orr	r8, r8, #TTB_FLAGS_UP)
-	mcr	p15, 0, r8, c2, c0, 1		@ load TTB1
+	v7_ttb_setup r10, r4, r8, r5		@ TTBCR, TTBRx setup
 	ldr	r5, =PRRR			@ PRRR
 	ldr	r6, =NMRR			@ NMRR
 	mcr	p15, 0, r5, c10, c2, 0		@ write PRRR
@@ -404,16 +279,7 @@
 	mov	pc, lr				@ return to head.S:__ret
 ENDPROC(__v7_setup)
 
-	/*   AT
-	 *  TFR   EV X F   I D LR    S
-	 * .EEE ..EE PUI. .T.T 4RVI ZWRS BLDP WCAM
-	 * rxxx rrxx xxx0 0101 xxxx xxxx x111 xxxx < forced
-	 *    1    0 110       0011 1100 .111 1101 < we want
-	 */
-	.type	v7_crval, #object
-v7_crval:
-	crval	clear=0x0120c302, mmuset=0x10c03c7d, ucset=0x00c01c7c
-
+	.align	2
 __v7_setup_stack:
 	.space	4 * 11				@ 11 registers
 
@@ -435,11 +301,11 @@
 	 */
 .macro __v7_proc initfunc, mm_mmuflags = 0, io_mmuflags = 0, hwcaps = 0
 	ALT_SMP(.long	PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \
-			PMD_FLAGS_SMP | \mm_mmuflags)
+			PMD_SECT_AF | PMD_FLAGS_SMP | \mm_mmuflags)
 	ALT_UP(.long	PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \
-			PMD_FLAGS_UP | \mm_mmuflags)
-	.long	PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_AP_WRITE | \
-		PMD_SECT_AP_READ | \io_mmuflags
+			PMD_SECT_AF | PMD_FLAGS_UP | \mm_mmuflags)
+	.long	PMD_TYPE_SECT | PMD_SECT_AP_WRITE | \
+		PMD_SECT_AP_READ | PMD_SECT_AF | \io_mmuflags
 	W(b)	\initfunc
 	.long	cpu_arch_name
 	.long	cpu_elf_name
@@ -452,6 +318,7 @@
 	.long	v7_cache_fns
 .endm
 
+#ifndef CONFIG_ARM_LPAE
 	/*
 	 * ARM Ltd. Cortex A5 processor.
 	 */
@@ -463,6 +330,16 @@
 	.size	__v7_ca5mp_proc_info, . - __v7_ca5mp_proc_info
 
 	/*
+	 * ARM Ltd. Cortex A7 processor.
+	 */
+	.type	__v7_ca7mp_proc_info, #object
+__v7_ca7mp_proc_info:
+	.long	0x410fc070
+	.long	0xff0ffff0
+	__v7_proc __v7_ca7mp_setup, hwcaps = HWCAP_IDIV
+	.size	__v7_ca7mp_proc_info, . - __v7_ca7mp_proc_info
+
+	/*
 	 * ARM Ltd. Cortex A9 processor.
 	 */
 	.type   __v7_ca9mp_proc_info, #object
@@ -471,6 +348,7 @@
 	.long	0xff0ffff0
 	__v7_proc __v7_ca9mp_setup
 	.size	__v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info
+#endif	/* CONFIG_ARM_LPAE */
 
 	/*
 	 * ARM Ltd. Cortex A15 processor.
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
index abf0507..b0d5786 100644
--- a/arch/arm/mm/proc-xsc3.S
+++ b/arch/arm/mm/proc-xsc3.S
@@ -105,6 +105,7 @@
  * loc: location to jump to for soft reset
  */
 	.align	5
+	.pushsection	.idmap.text, "ax"
 ENTRY(cpu_xsc3_reset)
 	mov	r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE
 	msr	cpsr_c, r1			@ reset CPSR
@@ -119,6 +120,8 @@
 	@ already containing those two last instructions to survive.
 	mcr	p15, 0, ip, c8, c7, 0		@ invalidate I and D TLBs
 	mov	pc, r0
+ENDPROC(cpu_xsc3_reset)
+	.popsection
 
 /*
  * cpu_xsc3_do_idle()
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index 3277904..4ffebaa 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -142,6 +142,7 @@
  * Beware PXA270 erratum E7.
  */
 	.align	5
+	.pushsection	.idmap.text, "ax"
 ENTRY(cpu_xscale_reset)
 	mov	r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE
 	msr	cpsr_c, r1			@ reset CPSR
@@ -160,6 +161,8 @@
 	@ already containing those two last instructions to survive.
 	mcr	p15, 0, ip, c8, c7, 0		@ invalidate I & D TLBs
 	mov	pc, r0
+ENDPROC(cpu_xscale_reset)
+	.popsection
 
 /*
  * cpu_xscale_do_idle()
diff --git a/arch/arm/nwfpe/entry.S b/arch/arm/nwfpe/entry.S
index cafa183..d18dde9 100644
--- a/arch/arm/nwfpe/entry.S
+++ b/arch/arm/nwfpe/entry.S
@@ -20,6 +20,8 @@
     Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */
 
+#include <asm/opcodes.h>
+
 /* This is the kernel's entry point into the floating point emulator.
 It is called from the kernel with code similar to this:
 
@@ -81,11 +83,11 @@
 	mov	r6, r0			@ save the opcode
 emulate:
 	ldr	r1, [sp, #S_PSR]	@ fetch the PSR
-	bl	checkCondition		@ check the condition
-	cmp	r0, #0			@ r0 = 0 ==> condition failed
+	bl	arm_check_condition	@ check the condition
+	cmp	r0, #ARM_OPCODE_CONDTEST_PASS	@ condition passed?
 
 	@ if condition code failed to match, next insn
-	beq	next			@ get the next instruction;
+	bne	next			@ get the next instruction;
 
 	mov	r0, r6			@ prepare for EmulateAll()
 	bl	EmulateAll		@ emulate the instruction
diff --git a/arch/arm/nwfpe/fpopcode.c b/arch/arm/nwfpe/fpopcode.c
index 922b811..ff983467 100644
--- a/arch/arm/nwfpe/fpopcode.c
+++ b/arch/arm/nwfpe/fpopcode.c
@@ -61,29 +61,3 @@
 	0x41200000		/* single 10.0 */
 };
 
-/* condition code lookup table
- index into the table is test code: EQ, NE, ... LT, GT, AL, NV
- bit position in short is condition code: NZCV */
-static const unsigned short aCC[16] = {
-	0xF0F0,			// EQ == Z set
-	0x0F0F,			// NE
-	0xCCCC,			// CS == C set
-	0x3333,			// CC
-	0xFF00,			// MI == N set
-	0x00FF,			// PL
-	0xAAAA,			// VS == V set
-	0x5555,			// VC
-	0x0C0C,			// HI == C set && Z clear
-	0xF3F3,			// LS == C clear || Z set
-	0xAA55,			// GE == (N==V)
-	0x55AA,			// LT == (N!=V)
-	0x0A05,			// GT == (!Z && (N==V))
-	0xF5FA,			// LE == (Z || (N!=V))
-	0xFFFF,			// AL always
-	0			// NV
-};
-
-unsigned int checkCondition(const unsigned int opcode, const unsigned int ccodes)
-{
-	return (aCC[opcode >> 28] >> (ccodes >> 28)) & 1;
-}
diff --git a/arch/arm/nwfpe/fpopcode.h b/arch/arm/nwfpe/fpopcode.h
index 786e4c9..78f02db 100644
--- a/arch/arm/nwfpe/fpopcode.h
+++ b/arch/arm/nwfpe/fpopcode.h
@@ -475,9 +475,6 @@
 	return (nRc);
 }
 
-extern unsigned int checkCondition(const unsigned int opcode,
-				   const unsigned int ccodes);
-
 extern const float64 float64Constant[];
 extern const float32 float32Constant[];
 
diff --git a/arch/arm/oprofile/common.c b/arch/arm/oprofile/common.c
index c074e66..4e0a371 100644
--- a/arch/arm/oprofile/common.c
+++ b/arch/arm/oprofile/common.c
@@ -116,7 +116,7 @@
 	return oprofile_perf_init(ops);
 }
 
-void __exit oprofile_arch_exit(void)
+void oprofile_arch_exit(void)
 {
 	oprofile_perf_exit();
 }
diff --git a/arch/arm/plat-iop/Makefile b/arch/arm/plat-iop/Makefile
index 69b09c1..a99dc15 100644
--- a/arch/arm/plat-iop/Makefile
+++ b/arch/arm/plat-iop/Makefile
@@ -10,10 +10,10 @@
 obj-$(CONFIG_ARCH_IOP32X) += pci.o
 obj-$(CONFIG_ARCH_IOP32X) += setup.o
 obj-$(CONFIG_ARCH_IOP32X) += time.o
-obj-$(CONFIG_ARCH_IOP32X) += io.o
 obj-$(CONFIG_ARCH_IOP32X) += cp6.o
 obj-$(CONFIG_ARCH_IOP32X) += adma.o
 obj-$(CONFIG_ARCH_IOP32X) += pmu.o
+obj-$(CONFIG_ARCH_IOP32X) += restart.o
 
 # IOP33X
 obj-$(CONFIG_ARCH_IOP33X) += gpio.o
@@ -21,10 +21,10 @@
 obj-$(CONFIG_ARCH_IOP33X) += pci.o
 obj-$(CONFIG_ARCH_IOP33X) += setup.o
 obj-$(CONFIG_ARCH_IOP33X) += time.o
-obj-$(CONFIG_ARCH_IOP33X) += io.o
 obj-$(CONFIG_ARCH_IOP33X) += cp6.o
 obj-$(CONFIG_ARCH_IOP33X) += adma.o
 obj-$(CONFIG_ARCH_IOP33X) += pmu.o
+obj-$(CONFIG_ARCH_IOP33X) += restart.o
 
 # IOP13XX
 obj-$(CONFIG_ARCH_IOP13XX) += cp6.o
diff --git a/arch/arm/plat-iop/io.c b/arch/arm/plat-iop/io.c
deleted file mode 100644
index e15bc17..0000000
--- a/arch/arm/plat-iop/io.c
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * iop3xx custom ioremap implementation
- * Copyright (c) 2006, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/io.h>
-#include <mach/hardware.h>
-
-void * __iomem __iop3xx_ioremap(unsigned long cookie, size_t size,
-	unsigned int mtype)
-{
-	void __iomem * retval;
-
-	switch (cookie) {
-	case IOP3XX_PCI_LOWER_IO_PA ... IOP3XX_PCI_UPPER_IO_PA:
-		retval = (void *) IOP3XX_PCI_IO_PHYS_TO_VIRT(cookie);
-		break;
-	case IOP3XX_PERIPHERAL_PHYS_BASE ... IOP3XX_PERIPHERAL_UPPER_PA:
-		retval = (void *) IOP3XX_PMMR_PHYS_TO_VIRT(cookie);
-		break;
-	default:
-		retval = __arm_ioremap_caller(cookie, size, mtype,
-				__builtin_return_address(0));
-	}
-
-	return retval;
-}
-EXPORT_SYMBOL(__iop3xx_ioremap);
-
-void __iop3xx_iounmap(void __iomem *addr)
-{
-	extern void __iounmap(volatile void __iomem *addr);
-
-	switch ((u32) addr) {
-	case IOP3XX_PCI_LOWER_IO_VA ... IOP3XX_PCI_UPPER_IO_VA:
-	case IOP3XX_PERIPHERAL_VIRT_BASE ... IOP3XX_PERIPHERAL_UPPER_VA:
-		goto skip;
-	}
-	__iounmap(addr);
-
-skip:
-	return;
-}
-EXPORT_SYMBOL(__iop3xx_iounmap);
diff --git a/arch/arm/plat-iop/restart.c b/arch/arm/plat-iop/restart.c
new file mode 100644
index 0000000..6a85a0c
--- /dev/null
+++ b/arch/arm/plat-iop/restart.c
@@ -0,0 +1,19 @@
+/*
+ * restart.c
+ *
+ * Copyright (C) 2001 MontaVista Software, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <asm/hardware/iop3xx.h>
+#include <mach/hardware.h>
+
+void iop3xx_restart(char mode, const char *cmd)
+{
+	*IOP3XX_PCSR = 0x30;
+
+	/* Jump into ROM at address 0 */
+	soft_restart(0);
+}
diff --git a/arch/arm/plat-iop/time.c b/arch/arm/plat-iop/time.c
index 568dd02..cbfbbe4 100644
--- a/arch/arm/plat-iop/time.c
+++ b/arch/arm/plat-iop/time.c
@@ -18,7 +18,6 @@
 #include <linux/time.h>
 #include <linux/init.h>
 #include <linux/timex.h>
-#include <linux/sched.h>
 #include <linux/io.h>
 #include <linux/clocksource.h>
 #include <linux/clockchips.h>
@@ -52,21 +51,12 @@
 	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
 };
 
-static DEFINE_CLOCK_DATA(cd);
-
 /*
  * IOP sched_clock() implementation via its clocksource.
  */
-unsigned long long notrace sched_clock(void)
+static u32 notrace iop_read_sched_clock(void)
 {
-	u32 cyc = 0xffffffffu - read_tcr1();
-	return cyc_to_sched_clock(&cd, cyc, (u32)~0);
-}
-
-static void notrace iop_update_sched_clock(void)
-{
-	u32 cyc = 0xffffffffu - read_tcr1();
-	update_sched_clock(&cd, cyc, (u32)~0);
+	return 0xffffffffu - read_tcr1();
 }
 
 /*
@@ -152,7 +142,7 @@
 {
 	u32 timer_ctl;
 
-	init_sched_clock(&cd, iop_update_sched_clock, 32, tick_rate);
+	setup_sched_clock(iop_read_sched_clock, 32, tick_rate);
 
 	ticks_per_jiffy = DIV_ROUND_CLOSEST(tick_rate, HZ);
 	iop_tick_rate = tick_rate;
diff --git a/arch/arm/plat-mxc/Kconfig b/arch/arm/plat-mxc/Kconfig
index b3a1f2b3..b30708e 100644
--- a/arch/arm/plat-mxc/Kconfig
+++ b/arch/arm/plat-mxc/Kconfig
@@ -20,6 +20,7 @@
 	bool "i.MX3, i.MX6"
 	select AUTO_ZRELADDR if !ZBOOT_ROM
 	select ARM_PATCH_PHYS_VIRT
+	select MIGHT_HAVE_CACHE_L2X0
 	help
 	  This enables support for systems based on the Freescale i.MX3 and i.MX6
 	  family.
diff --git a/arch/arm/plat-mxc/Makefile b/arch/arm/plat-mxc/Makefile
index b9f0f5f..076db84f 100644
--- a/arch/arm/plat-mxc/Makefile
+++ b/arch/arm/plat-mxc/Makefile
@@ -5,7 +5,6 @@
 # Common support
 obj-y := clock.o time.o devices.o cpu.o system.o irq-common.o
 
-obj-$(CONFIG_ARM_GIC) += gic.o
 obj-$(CONFIG_MXC_TZIC) += tzic.o
 obj-$(CONFIG_MXC_AVIC) += avic.o
 
diff --git a/arch/arm/plat-mxc/cpufreq.c b/arch/arm/plat-mxc/cpufreq.c
index 74aac96..73db34b 100644
--- a/arch/arm/plat-mxc/cpufreq.c
+++ b/arch/arm/plat-mxc/cpufreq.c
@@ -17,6 +17,7 @@
  * the CPU clock speed on the fly.
  */
 
+#include <linux/module.h>
 #include <linux/cpufreq.h>
 #include <linux/clk.h>
 #include <linux/err.h>
@@ -97,7 +98,7 @@
 	return ret;
 }
 
-static int __init mxc_cpufreq_init(struct cpufreq_policy *policy)
+static int mxc_cpufreq_init(struct cpufreq_policy *policy)
 {
 	int ret;
 	int i;
diff --git a/arch/arm/plat-mxc/gic.c b/arch/arm/plat-mxc/gic.c
deleted file mode 100644
index 12f8f81..0000000
--- a/arch/arm/plat-mxc/gic.c
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright 2011 Freescale Semiconductor, Inc.
- * Copyright 2011 Linaro Ltd.
- *
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 or later at the following locations:
- *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
- */
-
-#include <linux/io.h>
-#include <asm/exception.h>
-#include <asm/localtimer.h>
-#include <asm/hardware/gic.h>
-#ifdef CONFIG_SMP
-#include <asm/smp.h>
-#endif
-
-asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
-{
-	u32 irqstat, irqnr;
-
-	do {
-		irqstat = readl_relaxed(gic_cpu_base_addr + GIC_CPU_INTACK);
-		irqnr = irqstat & 0x3ff;
-		if (irqnr == 1023)
-			break;
-
-		if (irqnr > 15 && irqnr < 1021)
-			handle_IRQ(irqnr, regs);
-#ifdef CONFIG_SMP
-		else {
-			writel_relaxed(irqstat, gic_cpu_base_addr +
-						GIC_CPU_EOI);
-			handle_IPI(irqnr, regs);
-		}
-#endif
-	} while (1);
-}
diff --git a/arch/arm/plat-mxc/include/mach/common.h b/arch/arm/plat-mxc/include/mach/common.h
index c75f254..83cca9b 100644
--- a/arch/arm/plat-mxc/include/mach/common.h
+++ b/arch/arm/plat-mxc/include/mach/common.h
@@ -71,8 +71,8 @@
 extern struct platform_device *mxc_register_gpio(char *name, int id,
 	resource_size_t iobase, resource_size_t iosize, int irq, int irq_high);
 extern void mxc_set_cpu_type(unsigned int type);
+extern void mxc_restart(char, const char *);
 extern void mxc_arch_reset_init(void __iomem *);
-extern void mx51_efikamx_reset(void);
 extern int mx53_revision(void);
 extern int mx53_display_revision(void);
 
@@ -89,7 +89,6 @@
 
 void avic_handle_irq(struct pt_regs *);
 void tzic_handle_irq(struct pt_regs *);
-void gic_handle_irq(struct pt_regs *);
 
 #define imx1_handle_irq avic_handle_irq
 #define imx21_handle_irq avic_handle_irq
@@ -122,6 +121,7 @@
 extern void imx_enable_cpu(int cpu, bool enable);
 extern void imx_set_cpu_jump(int cpu, void *jump_addr);
 extern void imx_src_init(void);
+extern void imx_src_prepare_restart(void);
 extern void imx_gpc_init(void);
 extern void imx_gpc_pre_suspend(void);
 extern void imx_gpc_post_resume(void);
diff --git a/arch/arm/plat-mxc/include/mach/entry-macro.S b/arch/arm/plat-mxc/include/mach/entry-macro.S
index ca5cf26..def5d30 100644
--- a/arch/arm/plat-mxc/include/mach/entry-macro.S
+++ b/arch/arm/plat-mxc/include/mach/entry-macro.S
@@ -9,19 +9,8 @@
  * published by the Free Software Foundation.
  */
 
-/* Unused, we use CONFIG_MULTI_IRQ_HANDLER */
-
 	.macro	disable_fiq
 	.endm
 
-	.macro  get_irqnr_preamble, base, tmp
-	.endm
-
 	.macro  arch_ret_to_user, tmp1, tmp2
 	.endm
-
-	.macro	get_irqnr_and_base, irqnr, irqstat, base, tmp
-	.endm
-
-	.macro test_for_ipi, irqnr, irqstat, base, tmp
-	.endm
diff --git a/arch/arm/plat-mxc/include/mach/mx1.h b/arch/arm/plat-mxc/include/mach/mx1.h
index 97b19e7..2b7c08d 100644
--- a/arch/arm/plat-mxc/include/mach/mx1.h
+++ b/arch/arm/plat-mxc/include/mach/mx1.h
@@ -12,8 +12,6 @@
 #ifndef __MACH_MX1_H__
 #define __MACH_MX1_H__
 
-#include <mach/vmalloc.h>
-
 /*
  * Memory map
  */
diff --git a/arch/arm/plat-mxc/include/mach/system.h b/arch/arm/plat-mxc/include/mach/system.h
index b9895d2..13ad0df 100644
--- a/arch/arm/plat-mxc/include/mach/system.h
+++ b/arch/arm/plat-mxc/include/mach/system.h
@@ -22,6 +22,4 @@
 	cpu_do_idle();
 }
 
-void arch_reset(char mode, const char *cmd);
-
 #endif /* __ASM_ARCH_MXC_SYSTEM_H__ */
diff --git a/arch/arm/plat-mxc/include/mach/uncompress.h b/arch/arm/plat-mxc/include/mach/uncompress.h
index 88fd404..477971b 100644
--- a/arch/arm/plat-mxc/include/mach/uncompress.h
+++ b/arch/arm/plat-mxc/include/mach/uncompress.h
@@ -98,6 +98,7 @@
 	case MACH_TYPE_PCM043:
 	case MACH_TYPE_LILLY1131:
 	case MACH_TYPE_VPR200:
+	case MACH_TYPE_EUKREA_CPUIMX35SD:
 		uart_base = MX3X_UART1_BASE_ADDR;
 		break;
 	case MACH_TYPE_MAGX_ZN5:
diff --git a/arch/arm/plat-mxc/include/mach/vmalloc.h b/arch/arm/plat-mxc/include/mach/vmalloc.h
deleted file mode 100644
index ef6379c..0000000
--- a/arch/arm/plat-mxc/include/mach/vmalloc.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- *  Copyright (C) 2000 Russell King.
- *  Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#ifndef __ASM_ARCH_MXC_VMALLOC_H__
-#define __ASM_ARCH_MXC_VMALLOC_H__
-
-/* vmalloc ending address */
-#define VMALLOC_END       0xf4000000UL
-
-#endif /* __ASM_ARCH_MXC_VMALLOC_H__ */
diff --git a/arch/arm/plat-mxc/pwm.c b/arch/arm/plat-mxc/pwm.c
index 42d74ea..e032717 100644
--- a/arch/arm/plat-mxc/pwm.c
+++ b/arch/arm/plat-mxc/pwm.c
@@ -32,6 +32,9 @@
 #define MX3_PWMSAR                0x0C    /* PWM Sample Register */
 #define MX3_PWMPR                 0x10    /* PWM Period Register */
 #define MX3_PWMCR_PRESCALER(x)    (((x - 1) & 0xFFF) << 4)
+#define MX3_PWMCR_DOZEEN                (1 << 24)
+#define MX3_PWMCR_WAITEN                (1 << 23)
+#define MX3_PWMCR_DBGEN			(1 << 22)
 #define MX3_PWMCR_CLKSRC_IPG_HIGH (2 << 16)
 #define MX3_PWMCR_CLKSRC_IPG      (1 << 16)
 #define MX3_PWMCR_EN              (1 << 0)
@@ -74,10 +77,21 @@
 		do_div(c, period_ns);
 		duty_cycles = c;
 
+		/*
+		 * according to imx pwm RM, the real period value should be
+		 * PERIOD value in PWMPR plus 2.
+		 */
+		if (period_cycles > 2)
+			period_cycles -= 2;
+		else
+			period_cycles = 0;
+
 		writel(duty_cycles, pwm->mmio_base + MX3_PWMSAR);
 		writel(period_cycles, pwm->mmio_base + MX3_PWMPR);
 
-		cr = MX3_PWMCR_PRESCALER(prescale) | MX3_PWMCR_EN;
+		cr = MX3_PWMCR_PRESCALER(prescale) |
+			MX3_PWMCR_DOZEEN | MX3_PWMCR_WAITEN |
+			MX3_PWMCR_DBGEN | MX3_PWMCR_EN;
 
 		if (cpu_is_mx25())
 			cr |= MX3_PWMCR_CLKSRC_IPG;
diff --git a/arch/arm/plat-mxc/system.c b/arch/arm/plat-mxc/system.c
index d65fb31..3599bf2 100644
--- a/arch/arm/plat-mxc/system.c
+++ b/arch/arm/plat-mxc/system.c
@@ -37,17 +37,10 @@
 /*
  * Reset the system. It is called by machine_restart().
  */
-void arch_reset(char mode, const char *cmd)
+void mxc_restart(char mode, const char *cmd)
 {
 	unsigned int wcr_enable;
 
-#ifdef CONFIG_MACH_MX51_EFIKAMX
-	if (machine_is_mx51_efikamx()) {
-		mx51_efikamx_reset();
-		return;
-	}
-#endif
-
 	if (cpu_is_mx1()) {
 		wcr_enable = (1 << 0);
 	} else {
@@ -71,7 +64,7 @@
 	mdelay(50);
 
 	/* we'll take a jump through zero as a poor second */
-	cpu_reset(0);
+	soft_restart(0);
 }
 
 void mxc_arch_reset_init(void __iomem *base)
diff --git a/arch/arm/plat-mxc/time.c b/arch/arm/plat-mxc/time.c
index 4b0fe28..1c96cdb 100644
--- a/arch/arm/plat-mxc/time.c
+++ b/arch/arm/plat-mxc/time.c
@@ -108,18 +108,9 @@
 
 static void __iomem *sched_clock_reg;
 
-static DEFINE_CLOCK_DATA(cd);
-unsigned long long notrace sched_clock(void)
+static u32 notrace mxc_read_sched_clock(void)
 {
-	cycle_t cyc = sched_clock_reg ? __raw_readl(sched_clock_reg) : 0;
-
-	return cyc_to_sched_clock(&cd, cyc, (u32)~0);
-}
-
-static void notrace mxc_update_sched_clock(void)
-{
-	cycle_t cyc = sched_clock_reg ? __raw_readl(sched_clock_reg) : 0;
-	update_sched_clock(&cd, cyc, (u32)~0);
+	return sched_clock_reg ? __raw_readl(sched_clock_reg) : 0;
 }
 
 static int __init mxc_clocksource_init(struct clk *timer_clk)
@@ -129,7 +120,7 @@
 
 	sched_clock_reg = reg;
 
-	init_sched_clock(&cd, mxc_update_sched_clock, 32, c);
+	setup_sched_clock(mxc_read_sched_clock, 32, c);
 	return clocksource_mmio_init(reg, "mxc_timer1", c, 200, 32,
 			clocksource_mmio_readl_up);
 }
diff --git a/arch/arm/plat-nomadik/timer.c b/arch/arm/plat-nomadik/timer.c
index 30b6433..ad1b45b 100644
--- a/arch/arm/plat-nomadik/timer.c
+++ b/arch/arm/plat-nomadik/timer.c
@@ -17,7 +17,6 @@
 #include <linux/clk.h>
 #include <linux/jiffies.h>
 #include <linux/err.h>
-#include <linux/sched.h>
 #include <asm/mach/time.h>
 #include <asm/sched_clock.h>
 
@@ -79,23 +78,12 @@
  * local implementation which uses the clocksource to get some
  * better resolution when scheduling the kernel.
  */
-static DEFINE_CLOCK_DATA(cd);
-
-unsigned long long notrace sched_clock(void)
+static u32 notrace nomadik_read_sched_clock(void)
 {
-	u32 cyc;
-
 	if (unlikely(!mtu_base))
 		return 0;
 
-	cyc = -readl(mtu_base + MTU_VAL(0));
-	return cyc_to_sched_clock(&cd, cyc, (u32)~0);
-}
-
-static void notrace nomadik_update_sched_clock(void)
-{
-	u32 cyc = -readl(mtu_base + MTU_VAL(0));
-	update_sched_clock(&cd, cyc, (u32)~0);
+	return -readl(mtu_base + MTU_VAL(0));
 }
 #endif
 
@@ -231,9 +219,11 @@
 			rate, 200, 32, clocksource_mmio_readl_down))
 		pr_err("timer: failed to initialize clock source %s\n",
 		       "mtu_0");
+
 #ifdef CONFIG_NOMADIK_MTU_SCHED_CLOCK
-	init_sched_clock(&cd, nomadik_update_sched_clock, 32, rate);
+	setup_sched_clock(nomadik_read_sched_clock, 32, rate);
 #endif
+
 	/* Timer 1 is used for events */
 
 	clockevents_calc_mult_shift(&nmdk_clkevt, rate, MTU_MIN_RANGE);
diff --git a/arch/arm/plat-omap/Makefile b/arch/arm/plat-omap/Makefile
index 9852622..3df04d9 100644
--- a/arch/arm/plat-omap/Makefile
+++ b/arch/arm/plat-omap/Makefile
@@ -4,7 +4,7 @@
 
 # Common support
 obj-y := common.o sram.o clock.o devices.o dma.o mux.o \
-	 usb.o fb.o io.o counter_32k.o
+	 usb.o fb.o counter_32k.o
 obj-m :=
 obj-n :=
 obj-  :=
diff --git a/arch/arm/plat-omap/common.c b/arch/arm/plat-omap/common.c
index d9f10a3..2ee6341 100644
--- a/arch/arm/plat-omap/common.c
+++ b/arch/arm/plat-omap/common.c
@@ -14,6 +14,7 @@
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/io.h>
+#include <linux/dma-mapping.h>
 #include <linux/omapfb.h>
 
 #include <plat/common.h>
@@ -66,3 +67,10 @@
 	omap_vram_reserve_sdram_memblock();
 	omap_dsp_reserve_sdram_memblock();
 }
+
+void __init omap_init_consistent_dma_size(void)
+{
+#ifdef CONFIG_FB_OMAP_CONSISTENT_DMA_SIZE
+	init_consistent_dma_size(CONFIG_FB_OMAP_CONSISTENT_DMA_SIZE << 20);
+#endif
+}
diff --git a/arch/arm/plat-omap/counter_32k.c b/arch/arm/plat-omap/counter_32k.c
index a6cbb71..5f0f229 100644
--- a/arch/arm/plat-omap/counter_32k.c
+++ b/arch/arm/plat-omap/counter_32k.c
@@ -17,7 +17,6 @@
 #include <linux/clk.h>
 #include <linux/err.h>
 #include <linux/io.h>
-#include <linux/sched.h>
 #include <linux/clocksource.h>
 
 #include <asm/sched_clock.h>
@@ -37,41 +36,9 @@
 
 #define OMAP16XX_TIMER_32K_SYNCHRONIZED		0xfffbc410
 
-/*
- * Returns current time from boot in nsecs. It's OK for this to wrap
- * around for now, as it's just a relative time stamp.
- */
-static DEFINE_CLOCK_DATA(cd);
-
-/*
- * Constants generated by clocks_calc_mult_shift(m, s, 32768, NSEC_PER_SEC, 60).
- * This gives a resolution of about 30us and a wrap period of about 36hrs.
- */
-#define SC_MULT		4000000000u
-#define SC_SHIFT	17
-
-static inline unsigned long long notrace _omap_32k_sched_clock(void)
+static u32 notrace omap_32k_read_sched_clock(void)
 {
-	u32 cyc = timer_32k_base ? __raw_readl(timer_32k_base) : 0;
-	return cyc_to_fixed_sched_clock(&cd, cyc, (u32)~0, SC_MULT, SC_SHIFT);
-}
-
-#if defined(CONFIG_OMAP_32K_TIMER) && !defined(CONFIG_OMAP_MPU_TIMER)
-unsigned long long notrace sched_clock(void)
-{
-	return _omap_32k_sched_clock();
-}
-#else
-unsigned long long notrace omap_32k_sched_clock(void)
-{
-	return _omap_32k_sched_clock();
-}
-#endif
-
-static void notrace omap_update_sched_clock(void)
-{
-	u32 cyc = timer_32k_base ? __raw_readl(timer_32k_base) : 0;
-	update_sched_clock(&cd, cyc, (u32)~0);
+	return timer_32k_base ? __raw_readl(timer_32k_base) : 0;
 }
 
 /**
@@ -147,8 +114,7 @@
 					  clocksource_mmio_readl_up))
 			printk(err, "32k_counter");
 
-		init_fixed_sched_clock(&cd, omap_update_sched_clock, 32,
-				       32768, SC_MULT, SC_SHIFT);
+		setup_sched_clock(omap_32k_read_sched_clock, 32, 32768);
 	}
 	return 0;
 }
diff --git a/arch/arm/plat-omap/include/plat/common.h b/arch/arm/plat-omap/include/plat/common.h
index 3ff3e36..b4d7ec3 100644
--- a/arch/arm/plat-omap/include/plat/common.h
+++ b/arch/arm/plat-omap/include/plat/common.h
@@ -27,97 +27,14 @@
 #ifndef __ARCH_ARM_MACH_OMAP_COMMON_H
 #define __ARCH_ARM_MACH_OMAP_COMMON_H
 
-#include <linux/delay.h>
-
 #include <plat/i2c.h>
 #include <plat/omap_hwmod.h>
 
-struct sys_timer;
-
-extern void omap_map_common_io(void);
-extern struct sys_timer omap1_timer;
-extern struct sys_timer omap2_timer;
-extern struct sys_timer omap3_timer;
-extern struct sys_timer omap3_secure_timer;
-extern struct sys_timer omap4_timer;
-extern bool omap_32k_timer_init(void);
 extern int __init omap_init_clocksource_32k(void);
-extern unsigned long long notrace omap_32k_sched_clock(void);
 
 extern void omap_reserve(void);
-
-void omap2420_init_early(void);
-void omap2430_init_early(void);
-void omap3430_init_early(void);
-void omap35xx_init_early(void);
-void omap3630_init_early(void);
-void omap3_init_early(void);	/* Do not use this one */
-void am35xx_init_early(void);
-void ti816x_init_early(void);
-void omap4430_init_early(void);
-
 extern int omap_dss_reset(struct omap_hwmod *);
 
 void omap_sram_init(void);
 
-/*
- * IO bases for various OMAP processors
- * Except the tap base, rest all the io bases
- * listed are physical addresses.
- */
-struct omap_globals {
-	u32		class;		/* OMAP class to detect */
-	void __iomem	*tap;		/* Control module ID code */
-	void __iomem	*sdrc;           /* SDRAM Controller */
-	void __iomem	*sms;            /* SDRAM Memory Scheduler */
-	void __iomem	*ctrl;           /* System Control Module */
-	void __iomem	*ctrl_pad;	/* PAD Control Module */
-	void __iomem	*prm;            /* Power and Reset Management */
-	void __iomem	*cm;             /* Clock Management */
-	void __iomem	*cm2;
-};
-
-void omap2_set_globals_242x(void);
-void omap2_set_globals_243x(void);
-void omap2_set_globals_3xxx(void);
-void omap2_set_globals_443x(void);
-void omap2_set_globals_ti816x(void);
-
-/* These get called from omap2_set_globals_xxxx(), do not call these */
-void omap2_set_globals_tap(struct omap_globals *);
-void omap2_set_globals_sdrc(struct omap_globals *);
-void omap2_set_globals_control(struct omap_globals *);
-void omap2_set_globals_prcm(struct omap_globals *);
-
-void omap242x_map_io(void);
-void omap243x_map_io(void);
-void omap3_map_io(void);
-void omap4_map_io(void);
-
-
-/**
- * omap_test_timeout - busy-loop, testing a condition
- * @cond: condition to test until it evaluates to true
- * @timeout: maximum number of microseconds in the timeout
- * @index: loop index (integer)
- *
- * Loop waiting for @cond to become true or until at least @timeout
- * microseconds have passed.  To use, define some integer @index in the
- * calling code.  After running, if @index == @timeout, then the loop has
- * timed out.
- */
-#define omap_test_timeout(cond, timeout, index)			\
-({								\
-	for (index = 0; index < timeout; index++) {		\
-		if (cond)					\
-			break;					\
-		udelay(1);					\
-	}							\
-})
-
-extern struct device *omap2_get_mpuss_device(void);
-extern struct device *omap2_get_iva_device(void);
-extern struct device *omap2_get_l3_device(void);
-extern struct device *omap4_get_dsp_device(void);
-
 #endif /* __ARCH_ARM_MACH_OMAP_COMMON_H */
diff --git a/arch/arm/plat-omap/include/plat/io.h b/arch/arm/plat-omap/include/plat/io.h
index 7f2969e..1234944 100644
--- a/arch/arm/plat-omap/include/plat/io.h
+++ b/arch/arm/plat-omap/include/plat/io.h
@@ -247,8 +247,6 @@
  * NOTE: Please use ioremap + __raw_read/write where possible instead of these
  */
 
-void omap_ioremap_init(void);
-
 extern u8 omap_readb(u32 pa);
 extern u16 omap_readw(u32 pa);
 extern u32 omap_readl(u32 pa);
@@ -257,83 +255,9 @@
 extern void omap_writel(u32 v, u32 pa);
 
 struct omap_sdrc_params;
-
-#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
-void omap7xx_map_io(void);
-#else
-static inline void omap_map_io(void)
-{
-}
-#endif
-
-#ifdef CONFIG_ARCH_OMAP15XX
-void omap15xx_map_io(void);
-#else
-static inline void omap15xx_map_io(void)
-{
-}
-#endif
-
-#ifdef CONFIG_ARCH_OMAP16XX
-void omap16xx_map_io(void);
-#else
-static inline void omap16xx_map_io(void)
-{
-}
-#endif
-
-void omap1_init_early(void);
-
-#ifdef CONFIG_SOC_OMAP2420
-extern void omap242x_map_common_io(void);
-#else
-static inline void omap242x_map_common_io(void)
-{
-}
-#endif
-
-#ifdef CONFIG_SOC_OMAP2430
-extern void omap243x_map_common_io(void);
-#else
-static inline void omap243x_map_common_io(void)
-{
-}
-#endif
-
-#ifdef CONFIG_ARCH_OMAP3
-extern void omap34xx_map_common_io(void);
-#else
-static inline void omap34xx_map_common_io(void)
-{
-}
-#endif
-
-#ifdef CONFIG_SOC_OMAPTI816X
-extern void omapti816x_map_common_io(void);
-#else
-static inline void omapti816x_map_common_io(void)
-{
-}
-#endif
-
-#ifdef CONFIG_ARCH_OMAP4
-extern void omap44xx_map_common_io(void);
-#else
-static inline void omap44xx_map_common_io(void)
-{
-}
-#endif
-
-extern void omap2_init_common_infrastructure(void);
 extern void omap_sdrc_init(struct omap_sdrc_params *sdrc_cs0,
 				      struct omap_sdrc_params *sdrc_cs1);
 
-#define __arch_ioremap	omap_ioremap
-#define __arch_iounmap	omap_iounmap
-
-void __iomem *omap_ioremap(unsigned long phys, size_t size, unsigned int type);
-void omap_iounmap(volatile void __iomem *addr);
-
 extern void __init omap_init_consistent_dma_size(void);
 
 #endif
diff --git a/arch/arm/plat-omap/include/plat/irqs.h b/arch/arm/plat-omap/include/plat/irqs.h
index 30e1071..ebda738 100644
--- a/arch/arm/plat-omap/include/plat/irqs.h
+++ b/arch/arm/plat-omap/include/plat/irqs.h
@@ -436,20 +436,6 @@
 #define INTCPS_NR_MIR_REGS	3
 #define INTCPS_NR_IRQS		96
 
-#ifndef __ASSEMBLY__
-extern void __iomem *omap_irq_base;
-void omap1_init_irq(void);
-void omap2_init_irq(void);
-void omap3_init_irq(void);
-void ti816x_init_irq(void);
-extern int omap_irq_pending(void);
-void omap_intc_save_context(void);
-void omap_intc_restore_context(void);
-void omap3_intc_suspend(void);
-void omap3_intc_prepare_idle(void);
-void omap3_intc_resume_idle(void);
-#endif
-
 #include <mach/hardware.h>
 
 #ifdef CONFIG_FIQ
diff --git a/arch/arm/plat-omap/include/plat/serial.h b/arch/arm/plat-omap/include/plat/serial.h
index 1ab9fd6..ac44bde 100644
--- a/arch/arm/plat-omap/include/plat/serial.h
+++ b/arch/arm/plat-omap/include/plat/serial.h
@@ -2,7 +2,7 @@
  * arch/arm/plat-omap/include/mach/serial.h
  *
  * Copyright (C) 2009 Texas Instruments
- * Addded OMAP4 support- Santosh Shilimkar <santosh.shilimkar@ti.com>
+ * Added OMAP4 support- Santosh Shilimkar <santosh.shilimkar@ti.com>
  *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
diff --git a/arch/arm/plat-omap/include/plat/system.h b/arch/arm/plat-omap/include/plat/system.h
index c5fa9e9..8e5ebd7 100644
--- a/arch/arm/plat-omap/include/plat/system.h
+++ b/arch/arm/plat-omap/include/plat/system.h
@@ -12,6 +12,4 @@
 	cpu_do_idle();
 }
 
-extern void (*arch_reset)(char, const char *);
-
 #endif
diff --git a/arch/arm/plat-omap/io.c b/arch/arm/plat-omap/io.c
deleted file mode 100644
index 333871f..0000000
--- a/arch/arm/plat-omap/io.c
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
- * Common io.c file
- * This file is created by Russell King <rmk+kernel@arm.linux.org.uk>
- *
- * Copyright (C) 2009 Texas Instruments
- * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/module.h>
-#include <linux/io.h>
-#include <linux/mm.h>
-#include <linux/dma-mapping.h>
-
-#include <plat/omap7xx.h>
-#include <plat/omap1510.h>
-#include <plat/omap16xx.h>
-#include <plat/omap24xx.h>
-#include <plat/omap34xx.h>
-#include <plat/omap44xx.h>
-
-#define BETWEEN(p,st,sz)	((p) >= (st) && (p) < ((st) + (sz)))
-#define XLATE(p,pst,vst)	((void __iomem *)((p) - (pst) + (vst)))
-
-static int initialized;
-
-/*
- * Intercept ioremap() requests for addresses in our fixed mapping regions.
- */
-void __iomem *omap_ioremap(unsigned long p, size_t size, unsigned int type)
-{
-
-	WARN(!initialized, "Do not use ioremap before init_early\n");
-
-#ifdef CONFIG_ARCH_OMAP1
-	if (cpu_class_is_omap1()) {
-		if (BETWEEN(p, OMAP1_IO_PHYS, OMAP1_IO_SIZE))
-			return XLATE(p, OMAP1_IO_PHYS, OMAP1_IO_VIRT);
-	}
-	if (cpu_is_omap7xx()) {
-		if (BETWEEN(p, OMAP7XX_DSP_BASE, OMAP7XX_DSP_SIZE))
-			return XLATE(p, OMAP7XX_DSP_BASE, OMAP7XX_DSP_START);
-
-		if (BETWEEN(p, OMAP7XX_DSPREG_BASE, OMAP7XX_DSPREG_SIZE))
-			return XLATE(p, OMAP7XX_DSPREG_BASE,
-					OMAP7XX_DSPREG_START);
-	}
-	if (cpu_is_omap15xx()) {
-		if (BETWEEN(p, OMAP1510_DSP_BASE, OMAP1510_DSP_SIZE))
-			return XLATE(p, OMAP1510_DSP_BASE, OMAP1510_DSP_START);
-
-		if (BETWEEN(p, OMAP1510_DSPREG_BASE, OMAP1510_DSPREG_SIZE))
-			return XLATE(p, OMAP1510_DSPREG_BASE,
-					OMAP1510_DSPREG_START);
-	}
-	if (cpu_is_omap16xx()) {
-		if (BETWEEN(p, OMAP16XX_DSP_BASE, OMAP16XX_DSP_SIZE))
-			return XLATE(p, OMAP16XX_DSP_BASE, OMAP16XX_DSP_START);
-
-		if (BETWEEN(p, OMAP16XX_DSPREG_BASE, OMAP16XX_DSPREG_SIZE))
-			return XLATE(p, OMAP16XX_DSPREG_BASE,
-					OMAP16XX_DSPREG_START);
-	}
-#endif
-#ifdef CONFIG_ARCH_OMAP2
-	if (cpu_is_omap24xx()) {
-		if (BETWEEN(p, L3_24XX_PHYS, L3_24XX_SIZE))
-			return XLATE(p, L3_24XX_PHYS, L3_24XX_VIRT);
-		if (BETWEEN(p, L4_24XX_PHYS, L4_24XX_SIZE))
-			return XLATE(p, L4_24XX_PHYS, L4_24XX_VIRT);
-	}
-	if (cpu_is_omap2420()) {
-		if (BETWEEN(p, DSP_MEM_2420_PHYS, DSP_MEM_2420_SIZE))
-			return XLATE(p, DSP_MEM_2420_PHYS, DSP_MEM_2420_VIRT);
-		if (BETWEEN(p, DSP_IPI_2420_PHYS, DSP_IPI_2420_SIZE))
-			return XLATE(p, DSP_IPI_2420_PHYS, DSP_IPI_2420_SIZE);
-		if (BETWEEN(p, DSP_MMU_2420_PHYS, DSP_MMU_2420_SIZE))
-			return XLATE(p, DSP_MMU_2420_PHYS, DSP_MMU_2420_VIRT);
-	}
-	if (cpu_is_omap2430()) {
-		if (BETWEEN(p, L4_WK_243X_PHYS, L4_WK_243X_SIZE))
-			return XLATE(p, L4_WK_243X_PHYS, L4_WK_243X_VIRT);
-		if (BETWEEN(p, OMAP243X_GPMC_PHYS, OMAP243X_GPMC_SIZE))
-			return XLATE(p, OMAP243X_GPMC_PHYS, OMAP243X_GPMC_VIRT);
-		if (BETWEEN(p, OMAP243X_SDRC_PHYS, OMAP243X_SDRC_SIZE))
-			return XLATE(p, OMAP243X_SDRC_PHYS, OMAP243X_SDRC_VIRT);
-		if (BETWEEN(p, OMAP243X_SMS_PHYS, OMAP243X_SMS_SIZE))
-			return XLATE(p, OMAP243X_SMS_PHYS, OMAP243X_SMS_VIRT);
-	}
-#endif
-#ifdef CONFIG_ARCH_OMAP3
-	if (cpu_is_ti816x()) {
-		if (BETWEEN(p, L4_34XX_PHYS, L4_34XX_SIZE))
-			return XLATE(p, L4_34XX_PHYS, L4_34XX_VIRT);
-	} else if (cpu_is_omap34xx()) {
-		if (BETWEEN(p, L3_34XX_PHYS, L3_34XX_SIZE))
-			return XLATE(p, L3_34XX_PHYS, L3_34XX_VIRT);
-		if (BETWEEN(p, L4_34XX_PHYS, L4_34XX_SIZE))
-			return XLATE(p, L4_34XX_PHYS, L4_34XX_VIRT);
-		if (BETWEEN(p, OMAP34XX_GPMC_PHYS, OMAP34XX_GPMC_SIZE))
-			return XLATE(p, OMAP34XX_GPMC_PHYS, OMAP34XX_GPMC_VIRT);
-		if (BETWEEN(p, OMAP343X_SMS_PHYS, OMAP343X_SMS_SIZE))
-			return XLATE(p, OMAP343X_SMS_PHYS, OMAP343X_SMS_VIRT);
-		if (BETWEEN(p, OMAP343X_SDRC_PHYS, OMAP343X_SDRC_SIZE))
-			return XLATE(p, OMAP343X_SDRC_PHYS, OMAP343X_SDRC_VIRT);
-		if (BETWEEN(p, L4_PER_34XX_PHYS, L4_PER_34XX_SIZE))
-			return XLATE(p, L4_PER_34XX_PHYS, L4_PER_34XX_VIRT);
-		if (BETWEEN(p, L4_EMU_34XX_PHYS, L4_EMU_34XX_SIZE))
-			return XLATE(p, L4_EMU_34XX_PHYS, L4_EMU_34XX_VIRT);
-	}
-#endif
-#ifdef CONFIG_ARCH_OMAP4
-	if (cpu_is_omap44xx()) {
-		if (BETWEEN(p, L3_44XX_PHYS, L3_44XX_SIZE))
-			return XLATE(p, L3_44XX_PHYS, L3_44XX_VIRT);
-		if (BETWEEN(p, L4_44XX_PHYS, L4_44XX_SIZE))
-			return XLATE(p, L4_44XX_PHYS, L4_44XX_VIRT);
-		if (BETWEEN(p, OMAP44XX_GPMC_PHYS, OMAP44XX_GPMC_SIZE))
-			return XLATE(p, OMAP44XX_GPMC_PHYS, OMAP44XX_GPMC_VIRT);
-		if (BETWEEN(p, OMAP44XX_EMIF1_PHYS, OMAP44XX_EMIF1_SIZE))
-			return XLATE(p, OMAP44XX_EMIF1_PHYS,		\
-							OMAP44XX_EMIF1_VIRT);
-		if (BETWEEN(p, OMAP44XX_EMIF2_PHYS, OMAP44XX_EMIF2_SIZE))
-			return XLATE(p, OMAP44XX_EMIF2_PHYS,		\
-							OMAP44XX_EMIF2_VIRT);
-		if (BETWEEN(p, OMAP44XX_DMM_PHYS, OMAP44XX_DMM_SIZE))
-			return XLATE(p, OMAP44XX_DMM_PHYS, OMAP44XX_DMM_VIRT);
-		if (BETWEEN(p, L4_PER_44XX_PHYS, L4_PER_44XX_SIZE))
-			return XLATE(p, L4_PER_44XX_PHYS, L4_PER_44XX_VIRT);
-		if (BETWEEN(p, L4_EMU_44XX_PHYS, L4_EMU_44XX_SIZE))
-			return XLATE(p, L4_EMU_44XX_PHYS, L4_EMU_44XX_VIRT);
-	}
-#endif
-	return __arm_ioremap_caller(p, size, type, __builtin_return_address(0));
-}
-EXPORT_SYMBOL(omap_ioremap);
-
-void omap_iounmap(volatile void __iomem *addr)
-{
-	unsigned long virt = (unsigned long)addr;
-
-	if (virt >= VMALLOC_START && virt < VMALLOC_END)
-		__iounmap(addr);
-}
-EXPORT_SYMBOL(omap_iounmap);
-
-void __init omap_init_consistent_dma_size(void)
-{
-#ifdef CONFIG_FB_OMAP_CONSISTENT_DMA_SIZE
-	init_consistent_dma_size(CONFIG_FB_OMAP_CONSISTENT_DMA_SIZE << 20);
-#endif
-}
-
-void __init omap_ioremap_init(void)
-{
-	initialized++;
-}
diff --git a/arch/arm/plat-orion/gpio.c b/arch/arm/plat-orion/gpio.c
index 41ab97e..10d1608 100644
--- a/arch/arm/plat-orion/gpio.c
+++ b/arch/arm/plat-orion/gpio.c
@@ -384,12 +384,16 @@
 	struct orion_gpio_chip *ochip;
 	struct irq_chip_generic *gc;
 	struct irq_chip_type *ct;
+	char gc_label[16];
 
 	if (orion_gpio_chip_count == ARRAY_SIZE(orion_gpio_chips))
 		return;
 
+	snprintf(gc_label, sizeof(gc_label), "orion_gpio%d",
+		orion_gpio_chip_count);
+
 	ochip = orion_gpio_chips + orion_gpio_chip_count;
-	ochip->chip.label = "orion_gpio";
+	ochip->chip.label = kstrdup(gc_label, GFP_KERNEL);
 	ochip->chip.request = orion_gpio_request;
 	ochip->chip.direction_input = orion_gpio_direction_input;
 	ochip->chip.get = orion_gpio_get;
diff --git a/arch/arm/plat-orion/time.c b/arch/arm/plat-orion/time.c
index 69a6136..1ed8d13 100644
--- a/arch/arm/plat-orion/time.c
+++ b/arch/arm/plat-orion/time.c
@@ -12,7 +12,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/sched.h>
 #include <linux/timer.h>
 #include <linux/clockchips.h>
 #include <linux/interrupt.h>
@@ -60,24 +59,10 @@
  * Orion's sched_clock implementation. It has a resolution of
  * at least 7.5ns (133MHz TCLK).
  */
-static DEFINE_CLOCK_DATA(cd);
 
-unsigned long long notrace sched_clock(void)
+static u32 notrace orion_read_sched_clock(void)
 {
-	u32 cyc = ~readl(timer_base + TIMER0_VAL_OFF);
-	return cyc_to_sched_clock(&cd, cyc, (u32)~0);
-}
-
-
-static void notrace orion_update_sched_clock(void)
-{
-	u32 cyc = ~readl(timer_base + TIMER0_VAL_OFF);
-	update_sched_clock(&cd, cyc, (u32)~0);
-}
-
-static void __init setup_sched_clock(unsigned long tclk)
-{
-	init_sched_clock(&cd, orion_update_sched_clock, 32, tclk);
+	return ~readl(timer_base + TIMER0_VAL_OFF);
 }
 
 /*
@@ -217,7 +202,7 @@
 	/*
 	 * Set scale and timer for sched_clock.
 	 */
-	setup_sched_clock(tclk);
+	setup_sched_clock(orion_read_sched_clock, 32, tclk);
 
 	/*
 	 * Setup free-running clocksource timer (interrupts
diff --git a/arch/arm/plat-s3c24xx/common-smdk.c b/arch/arm/plat-s3c24xx/common-smdk.c
index bcc43f3..084604b 100644
--- a/arch/arm/plat-s3c24xx/common-smdk.c
+++ b/arch/arm/plat-s3c24xx/common-smdk.c
@@ -19,7 +19,7 @@
 #include <linux/timer.h>
 #include <linux/init.h>
 #include <linux/gpio.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/platform_device.h>
 
 #include <linux/mtd/mtd.h>
diff --git a/arch/arm/plat-s3c24xx/cpu-freq.c b/arch/arm/plat-s3c24xx/cpu-freq.c
index b3d3d02..4680799 100644
--- a/arch/arm/plat-s3c24xx/cpu-freq.c
+++ b/arch/arm/plat-s3c24xx/cpu-freq.c
@@ -20,7 +20,7 @@
 #include <linux/clk.h>
 #include <linux/err.h>
 #include <linux/io.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/sysfs.h>
 #include <linux/slab.h>
 
diff --git a/arch/arm/plat-s3c24xx/cpu.c b/arch/arm/plat-s3c24xx/cpu.c
index 3c63353..1121df1 100644
--- a/arch/arm/plat-s3c24xx/cpu.c
+++ b/arch/arm/plat-s3c24xx/cpu.c
@@ -192,27 +192,6 @@
 	return __raw_readl(S3C2410_GSTATUS1);
 }
 
-/* Hook for arm_pm_restart to ensure we execute the reset code
- * with the caches enabled. It seems at least the S3C2440 has a problem
- * resetting if there is bus activity interrupted by the reset.
- */
-static void s3c24xx_pm_restart(char mode, const char *cmd)
-{
-	if (mode != 's') {
-		unsigned long flags;
-
-		local_irq_save(flags);
-		__cpuc_flush_kern_all();
-		__cpuc_flush_user_all();
-
-		arch_reset(mode, cmd);
-		local_irq_restore(flags);
-	}
-
-	/* fallback, or unhandled */
-	arm_machine_restart(mode, cmd);
-}
-
 void __init s3c24xx_init_io(struct map_desc *mach_desc, int size)
 {
 	/* initialise the io descriptors we need for initialisation */
@@ -226,7 +205,5 @@
 	}
 	s3c24xx_init_cpu();
 
-	arm_pm_restart = s3c24xx_pm_restart;
-
 	s3c_init_cpu(samsung_cpu_id, cpu_ids, ARRAY_SIZE(cpu_ids));
 }
diff --git a/arch/arm/plat-s3c24xx/irq.c b/arch/arm/plat-s3c24xx/irq.c
index fc8c5f8..bc42c04 100644
--- a/arch/arm/plat-s3c24xx/irq.c
+++ b/arch/arm/plat-s3c24xx/irq.c
@@ -22,7 +22,7 @@
 #include <linux/module.h>
 #include <linux/interrupt.h>
 #include <linux/ioport.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/syscore_ops.h>
 
 #include <asm/irq.h>
diff --git a/arch/arm/plat-s3c24xx/pm-simtec.c b/arch/arm/plat-s3c24xx/pm-simtec.c
index 663b280..68296b1 100644
--- a/arch/arm/plat-s3c24xx/pm-simtec.c
+++ b/arch/arm/plat-s3c24xx/pm-simtec.c
@@ -18,7 +18,6 @@
 #include <linux/list.h>
 #include <linux/timer.h>
 #include <linux/init.h>
-#include <linux/sysdev.h>
 #include <linux/device.h>
 #include <linux/io.h>
 
diff --git a/arch/arm/plat-s3c24xx/s3c2410-clock.c b/arch/arm/plat-s3c24xx/s3c2410-clock.c
index def76aa..25dc4d4 100644
--- a/arch/arm/plat-s3c24xx/s3c2410-clock.c
+++ b/arch/arm/plat-s3c24xx/s3c2410-clock.c
@@ -26,7 +26,7 @@
 #include <linux/list.h>
 #include <linux/errno.h>
 #include <linux/err.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/clk.h>
 #include <linux/mutex.h>
 #include <linux/delay.h>
diff --git a/arch/arm/plat-s3c24xx/s3c2412-iotiming.c b/arch/arm/plat-s3c24xx/s3c2412-iotiming.c
index 0b46d38..48eee39 100644
--- a/arch/arm/plat-s3c24xx/s3c2412-iotiming.c
+++ b/arch/arm/plat-s3c24xx/s3c2412-iotiming.c
@@ -17,7 +17,7 @@
 #include <linux/ioport.h>
 #include <linux/cpufreq.h>
 #include <linux/seq_file.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/delay.h>
 #include <linux/clk.h>
 #include <linux/err.h>
diff --git a/arch/arm/plat-s5p/Kconfig b/arch/arm/plat-s5p/Kconfig
index 9b9968f..8167ce6 100644
--- a/arch/arm/plat-s5p/Kconfig
+++ b/arch/arm/plat-s5p/Kconfig
@@ -11,6 +11,7 @@
 	default y
 	select ARM_VIC if !ARCH_EXYNOS4
 	select ARM_GIC if ARCH_EXYNOS4
+	select GIC_NON_BANKED if ARCH_EXYNOS4
 	select NO_IOPORT
 	select ARCH_REQUIRE_GPIOLIB
 	select S3C_GPIO_TRACK
diff --git a/arch/arm/plat-s5p/Makefile b/arch/arm/plat-s5p/Makefile
index 8763440..30d8c30 100644
--- a/arch/arm/plat-s5p/Makefile
+++ b/arch/arm/plat-s5p/Makefile
@@ -13,7 +13,6 @@
 # Core files
 
 obj-y				+= dev-uart.o
-obj-y				+= cpu.o
 obj-y				+= clock.o
 obj-y				+= irq.o
 obj-$(CONFIG_S5P_EXT_INT)	+= irq-eint.o
diff --git a/arch/arm/plat-s5p/clock.c b/arch/arm/plat-s5p/clock.c
index 5f84a3f..963edea 100644
--- a/arch/arm/plat-s5p/clock.c
+++ b/arch/arm/plat-s5p/clock.c
@@ -17,7 +17,7 @@
 #include <linux/errno.h>
 #include <linux/err.h>
 #include <linux/clk.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/io.h>
 #include <asm/div64.h>
 
diff --git a/arch/arm/plat-s5p/cpu.c b/arch/arm/plat-s5p/cpu.c
deleted file mode 100644
index a56959e..0000000
--- a/arch/arm/plat-s5p/cpu.c
+++ /dev/null
@@ -1,144 +0,0 @@
-/* linux/arch/arm/plat-s5p/cpu.c
- *
- * Copyright (c) 2009-2011 Samsung Electronics Co., Ltd.
- *		http://www.samsung.com
- *
- * S5P CPU Support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/init.h>
-#include <linux/module.h>
-
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-
-#include <mach/map.h>
-#include <mach/regs-clock.h>
-
-#include <plat/cpu.h>
-#include <plat/s5p6440.h>
-#include <plat/s5p6450.h>
-#include <plat/s5pc100.h>
-#include <plat/s5pv210.h>
-#include <plat/exynos4.h>
-
-/* table of supported CPUs */
-
-static const char name_s5p6440[] = "S5P6440";
-static const char name_s5p6450[] = "S5P6450";
-static const char name_s5pc100[] = "S5PC100";
-static const char name_s5pv210[] = "S5PV210/S5PC110";
-static const char name_exynos4210[] = "EXYNOS4210";
-static const char name_exynos4212[] = "EXYNOS4212";
-static const char name_exynos4412[] = "EXYNOS4412";
-
-static struct cpu_table cpu_ids[] __initdata = {
-	{
-		.idcode		= S5P6440_CPU_ID,
-		.idmask		= S5P64XX_CPU_MASK,
-		.map_io		= s5p6440_map_io,
-		.init_clocks	= s5p6440_init_clocks,
-		.init_uarts	= s5p6440_init_uarts,
-		.init		= s5p64x0_init,
-		.name		= name_s5p6440,
-	}, {
-		.idcode		= S5P6450_CPU_ID,
-		.idmask		= S5P64XX_CPU_MASK,
-		.map_io		= s5p6450_map_io,
-		.init_clocks	= s5p6450_init_clocks,
-		.init_uarts	= s5p6450_init_uarts,
-		.init		= s5p64x0_init,
-		.name		= name_s5p6450,
-	}, {
-		.idcode		= S5PC100_CPU_ID,
-		.idmask		= S5PC100_CPU_MASK,
-		.map_io		= s5pc100_map_io,
-		.init_clocks	= s5pc100_init_clocks,
-		.init_uarts	= s5pc100_init_uarts,
-		.init		= s5pc100_init,
-		.name		= name_s5pc100,
-	}, {
-		.idcode		= S5PV210_CPU_ID,
-		.idmask		= S5PV210_CPU_MASK,
-		.map_io		= s5pv210_map_io,
-		.init_clocks	= s5pv210_init_clocks,
-		.init_uarts	= s5pv210_init_uarts,
-		.init		= s5pv210_init,
-		.name		= name_s5pv210,
-	}, {
-		.idcode		= EXYNOS4210_CPU_ID,
-		.idmask		= EXYNOS4_CPU_MASK,
-		.map_io		= exynos4_map_io,
-		.init_clocks	= exynos4_init_clocks,
-		.init_uarts	= exynos4_init_uarts,
-		.init		= exynos_init,
-		.name		= name_exynos4210,
-	}, {
-		.idcode		= EXYNOS4212_CPU_ID,
-		.idmask		= EXYNOS4_CPU_MASK,
-		.map_io		= exynos4_map_io,
-		.init_clocks	= exynos4_init_clocks,
-		.init_uarts	= exynos4_init_uarts,
-		.init		= exynos_init,
-		.name		= name_exynos4212,
-	}, {
-		.idcode		= EXYNOS4412_CPU_ID,
-		.idmask		= EXYNOS4_CPU_MASK,
-		.map_io		= exynos4_map_io,
-		.init_clocks	= exynos4_init_clocks,
-		.init_uarts	= exynos4_init_uarts,
-		.init		= exynos_init,
-		.name		= name_exynos4412,
-	},
-};
-
-/* minimal IO mapping */
-
-static struct map_desc s5p_iodesc[] __initdata = {
-	{
-		.virtual	= (unsigned long)S5P_VA_CHIPID,
-		.pfn		= __phys_to_pfn(S5P_PA_CHIPID),
-		.length		= SZ_4K,
-		.type		= MT_DEVICE,
-	}, {
-		.virtual	= (unsigned long)S3C_VA_SYS,
-		.pfn		= __phys_to_pfn(S5P_PA_SYSCON),
-		.length		= SZ_64K,
-		.type		= MT_DEVICE,
-	}, {
-		.virtual	= (unsigned long)S3C_VA_TIMER,
-		.pfn		= __phys_to_pfn(S5P_PA_TIMER),
-		.length		= SZ_16K,
-		.type		= MT_DEVICE,
-	}, {
-		.virtual	= (unsigned long)S3C_VA_WATCHDOG,
-		.pfn		= __phys_to_pfn(S3C_PA_WDT),
-		.length		= SZ_4K,
-		.type		= MT_DEVICE,
-	}, {
-		.virtual	= (unsigned long)S5P_VA_SROMC,
-		.pfn		= __phys_to_pfn(S5P_PA_SROMC),
-		.length		= SZ_4K,
-		.type		= MT_DEVICE,
-	},
-};
-
-/* read cpu identification code */
-
-void __init s5p_init_io(struct map_desc *mach_desc,
-			int size, void __iomem *cpuid_addr)
-{
-	/* initialize the io descriptors we need for initialization */
-	iotable_init(s5p_iodesc, ARRAY_SIZE(s5p_iodesc));
-	if (mach_desc)
-		iotable_init(mach_desc, size);
-
-	/* detect cpu id and rev. */
-	s5p_init_cpu(cpuid_addr);
-
-	s3c_init_cpu(samsung_cpu_id, cpu_ids, ARRAY_SIZE(cpu_ids));
-}
diff --git a/arch/arm/plat-s5p/irq-eint.c b/arch/arm/plat-s5p/irq-eint.c
index b5bb774..c496b35 100644
--- a/arch/arm/plat-s5p/irq-eint.c
+++ b/arch/arm/plat-s5p/irq-eint.c
@@ -14,7 +14,7 @@
 #include <linux/interrupt.h>
 #include <linux/irq.h>
 #include <linux/io.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/gpio.h>
 
 #include <asm/hardware/vic.h>
diff --git a/arch/arm/plat-s5p/s5p-time.c b/arch/arm/plat-s5p/s5p-time.c
index c833e7b..17c0a2c 100644
--- a/arch/arm/plat-s5p/s5p-time.c
+++ b/arch/arm/plat-s5p/s5p-time.c
@@ -10,7 +10,6 @@
  * published by the Free Software Foundation.
 */
 
-#include <linux/sched.h>
 #include <linux/interrupt.h>
 #include <linux/irq.h>
 #include <linux/err.h>
@@ -321,26 +320,14 @@
  * this wraps around for now, since it is just a relative time
  * stamp. (Inspired by U300 implementation.)
  */
-static DEFINE_CLOCK_DATA(cd);
-
-unsigned long long notrace sched_clock(void)
+static u32 notrace s5p_read_sched_clock(void)
 {
 	void __iomem *reg = s5p_timer_reg();
 
 	if (!reg)
 		return 0;
 
-	return cyc_to_sched_clock(&cd, ~__raw_readl(reg), (u32)~0);
-}
-
-static void notrace s5p_update_sched_clock(void)
-{
-	void __iomem *reg = s5p_timer_reg();
-
-	if (!reg)
-		return;
-
-	update_sched_clock(&cd, ~__raw_readl(reg), (u32)~0);
+	return ~__raw_readl(reg);
 }
 
 static void __init s5p_clocksource_init(void)
@@ -358,7 +345,7 @@
 	s5p_time_setup(timer_source.source_id, TCNT_MAX);
 	s5p_time_start(timer_source.source_id, PERIODIC);
 
-	init_sched_clock(&cd, s5p_update_sched_clock, 32, clock_rate);
+	setup_sched_clock(s5p_read_sched_clock, 32, clock_rate);
 
 	if (clocksource_mmio_init(s5p_timer_reg(), "s5p_clocksource_timer",
 			clock_rate, 250, 32, clocksource_mmio_readl_down))
diff --git a/arch/arm/plat-samsung/clock-clksrc.c b/arch/arm/plat-samsung/clock-clksrc.c
index ae8b850..786a410 100644
--- a/arch/arm/plat-samsung/clock-clksrc.c
+++ b/arch/arm/plat-samsung/clock-clksrc.c
@@ -16,7 +16,7 @@
 #include <linux/errno.h>
 #include <linux/err.h>
 #include <linux/clk.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/io.h>
 
 #include <plat/clock.h>
diff --git a/arch/arm/plat-samsung/clock.c b/arch/arm/plat-samsung/clock.c
index 3b44519..10f7117 100644
--- a/arch/arm/plat-samsung/clock.c
+++ b/arch/arm/plat-samsung/clock.c
@@ -33,7 +33,7 @@
 #include <linux/errno.h>
 #include <linux/err.h>
 #include <linux/platform_device.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/interrupt.h>
 #include <linux/ioport.h>
 #include <linux/clk.h>
diff --git a/arch/arm/plat-samsung/dev-backlight.c b/arch/arm/plat-samsung/dev-backlight.c
index e657305..a976c02 100644
--- a/arch/arm/plat-samsung/dev-backlight.c
+++ b/arch/arm/plat-samsung/dev-backlight.c
@@ -15,7 +15,6 @@
 #include <linux/slab.h>
 #include <linux/io.h>
 #include <linux/pwm_backlight.h>
-#include <linux/slab.h>
 
 #include <plat/devs.h>
 #include <plat/gpio-cfg.h>
diff --git a/arch/arm/plat-samsung/include/plat/cpu-freq-core.h b/arch/arm/plat-samsung/include/plat/cpu-freq-core.h
index dac4760..95509d8 100644
--- a/arch/arm/plat-samsung/include/plat/cpu-freq-core.h
+++ b/arch/arm/plat-samsung/include/plat/cpu-freq-core.h
@@ -202,14 +202,6 @@
 extern struct s3c_cpufreq_config *s3c_cpufreq_getconfig(void);
 extern struct s3c_iotimings *s3c_cpufreq_getiotimings(void);
 
-extern void s3c2410_iotiming_debugfs(struct seq_file *seq,
-				     struct s3c_cpufreq_config *cfg,
-				     union s3c_iobank *iob);
-
-extern void s3c2412_iotiming_debugfs(struct seq_file *seq,
-				     struct s3c_cpufreq_config *cfg,
-				     union s3c_iobank *iob);
-
 #ifdef CONFIG_CPU_FREQ_S3C24XX_DEBUGFS
 #define s3c_cpufreq_debugfs_call(x) x
 #else
@@ -226,6 +218,10 @@
 extern void s3c2410_set_fvco(struct s3c_cpufreq_config *cfg);
 
 #ifdef CONFIG_S3C2410_IOTIMING
+extern void s3c2410_iotiming_debugfs(struct seq_file *seq,
+				     struct s3c_cpufreq_config *cfg,
+				     union s3c_iobank *iob);
+
 extern int s3c2410_iotiming_calc(struct s3c_cpufreq_config *cfg,
 				 struct s3c_iotimings *iot);
 
@@ -235,6 +231,7 @@
 extern void s3c2410_iotiming_set(struct s3c_cpufreq_config *cfg,
 				 struct s3c_iotimings *iot);
 #else
+#define s3c2410_iotiming_debugfs NULL
 #define s3c2410_iotiming_calc NULL
 #define s3c2410_iotiming_get NULL
 #define s3c2410_iotiming_set NULL
@@ -242,8 +239,10 @@
 
 /* S3C2412 compatible routines */
 
-extern int s3c2412_iotiming_get(struct s3c_cpufreq_config *cfg,
-				struct s3c_iotimings *timings);
+#ifdef CONFIG_S3C2412_IOTIMING
+extern void s3c2412_iotiming_debugfs(struct seq_file *seq,
+				     struct s3c_cpufreq_config *cfg,
+				     union s3c_iobank *iob);
 
 extern int s3c2412_iotiming_get(struct s3c_cpufreq_config *cfg,
 				struct s3c_iotimings *timings);
@@ -253,6 +252,12 @@
 
 extern void s3c2412_iotiming_set(struct s3c_cpufreq_config *cfg,
 				 struct s3c_iotimings *iot);
+#else
+#define s3c2412_iotiming_debugfs NULL
+#define s3c2412_iotiming_calc NULL
+#define s3c2412_iotiming_get NULL
+#define s3c2412_iotiming_set NULL
+#endif /* CONFIG_S3C2412_IOTIMING */
 
 #ifdef CONFIG_CPU_FREQ_S3C24XX_DEBUG
 #define s3c_freq_dbg(x...) printk(KERN_INFO x)
diff --git a/arch/arm/plat-samsung/include/plat/cpu.h b/arch/arm/plat-samsung/include/plat/cpu.h
index 40fd7b6..73cb3cf 100644
--- a/arch/arm/plat-samsung/include/plat/cpu.h
+++ b/arch/arm/plat-samsung/include/plat/cpu.h
@@ -152,13 +152,9 @@
 /* core initialisation functions */
 
 extern void s3c24xx_init_irq(void);
-extern void s3c64xx_init_irq(u32 vic0, u32 vic1);
 extern void s5p_init_irq(u32 *vic, u32 num_vic);
 
 extern void s3c24xx_init_io(struct map_desc *mach_desc, int size);
-extern void s3c64xx_init_io(struct map_desc *mach_desc, int size);
-extern void s5p_init_io(struct map_desc *mach_desc,
-			int size, void __iomem *cpuid_addr);
 
 extern void s3c24xx_init_cpu(void);
 extern void s3c64xx_init_cpu(void);
@@ -183,22 +179,20 @@
 extern struct syscore_ops s3c2412_pm_syscore_ops;
 extern struct syscore_ops s3c2416_pm_syscore_ops;
 extern struct syscore_ops s3c244x_pm_syscore_ops;
-extern struct syscore_ops s3c64xx_irq_syscore_ops;
 
-/* system device classes */
+/* system device subsystems */
 
-extern struct sysdev_class s3c2410_sysclass;
-extern struct sysdev_class s3c2410a_sysclass;
-extern struct sysdev_class s3c2412_sysclass;
-extern struct sysdev_class s3c2416_sysclass;
-extern struct sysdev_class s3c2440_sysclass;
-extern struct sysdev_class s3c2442_sysclass;
-extern struct sysdev_class s3c2443_sysclass;
-extern struct sysdev_class s3c6410_sysclass;
-extern struct sysdev_class s3c64xx_sysclass;
-extern struct sysdev_class s5p64x0_sysclass;
-extern struct sysdev_class s5pv210_sysclass;
-extern struct sysdev_class exynos4_sysclass;
+extern struct bus_type s3c2410_subsys;
+extern struct bus_type s3c2410a_subsys;
+extern struct bus_type s3c2412_subsys;
+extern struct bus_type s3c2416_subsys;
+extern struct bus_type s3c2440_subsys;
+extern struct bus_type s3c2442_subsys;
+extern struct bus_type s3c2443_subsys;
+extern struct bus_type s3c6410_subsys;
+extern struct bus_type s5p64x0_subsys;
+extern struct bus_type s5pv210_subsys;
+extern struct bus_type exynos4_subsys;
 
 extern void (*s5pc1xx_idle)(void);
 
diff --git a/arch/arm/plat-samsung/include/plat/dma-s3c24xx.h b/arch/arm/plat-samsung/include/plat/dma-s3c24xx.h
index 1c1ed54..d015763 100644
--- a/arch/arm/plat-samsung/include/plat/dma-s3c24xx.h
+++ b/arch/arm/plat-samsung/include/plat/dma-s3c24xx.h
@@ -12,7 +12,7 @@
 
 #include <plat/dma-core.h>
 
-extern struct sysdev_class dma_sysclass;
+extern struct bus_type dma_subsys;
 extern struct s3c2410_dma_chan s3c2410_chans[S3C_DMA_CHANNELS];
 
 #define DMA_CH_VALID		(1<<31)
diff --git a/arch/arm/plat-samsung/include/plat/exynos4.h b/arch/arm/plat-samsung/include/plat/exynos4.h
deleted file mode 100644
index f546e88..0000000
--- a/arch/arm/plat-samsung/include/plat/exynos4.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/* linux/arch/arm/plat-samsung/include/plat/exynos4.h
- *
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
- *		http://www.samsung.com
- *
- * Header file for exynos4 cpu support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-/* Common init code for EXYNOS4 related SoCs */
-
-extern void exynos4_common_init_uarts(struct s3c2410_uartcfg *cfg, int no);
-extern void exynos4_register_clocks(void);
-extern void exynos4210_register_clocks(void);
-extern void exynos4212_register_clocks(void);
-extern void exynos4_setup_clocks(void);
-
-#ifdef CONFIG_ARCH_EXYNOS
-extern  int exynos_init(void);
-extern void exynos4_init_irq(void);
-extern void exynos4_map_io(void);
-extern void exynos4_init_clocks(int xtal);
-extern struct sys_timer exynos4_timer;
-
-#define exynos4_init_uarts exynos4_common_init_uarts
-
-#else
-#define exynos4_init_clocks NULL
-#define exynos4_init_uarts NULL
-#define exynos4_map_io NULL
-#define exynos_init NULL
-#endif
diff --git a/arch/arm/plat-samsung/include/plat/pm.h b/arch/arm/plat-samsung/include/plat/pm.h
index dcf6870..61fc537 100644
--- a/arch/arm/plat-samsung/include/plat/pm.h
+++ b/arch/arm/plat-samsung/include/plat/pm.h
@@ -17,11 +17,12 @@
 
 #include <linux/irq.h>
 
-struct sys_device;
+struct device;
 
 #ifdef CONFIG_PM
 
 extern __init int s3c_pm_init(void);
+extern __init int s3c64xx_pm_init(void);
 
 #else
 
@@ -29,6 +30,11 @@
 {
 	return 0;
 }
+
+static inline int s3c64xx_pm_init(void)
+{
+	return 0;
+}
 #endif
 
 /* configuration for the IRQ mask over sleep */
diff --git a/arch/arm/plat-samsung/include/plat/reset.h b/arch/arm/plat-samsung/include/plat/reset.h
deleted file mode 100644
index 32ca517..0000000
--- a/arch/arm/plat-samsung/include/plat/reset.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* linux/arch/arm/plat-samsung/include/plat/reset.h
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- *		http://www.samsung.com/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __PLAT_SAMSUNG_RESET_H
-#define __PLAT_SAMSUNG_RESET_H __FILE__
-
-extern void (*s5p_reset_hook)(void);
-
-#endif /* __PLAT_SAMSUNG_RESET_H */
diff --git a/arch/arm/plat-samsung/include/plat/s3c2412.h b/arch/arm/plat-samsung/include/plat/s3c2412.h
index 5bcfd14..cbae50d 100644
--- a/arch/arm/plat-samsung/include/plat/s3c2412.h
+++ b/arch/arm/plat-samsung/include/plat/s3c2412.h
@@ -21,9 +21,12 @@
 extern void s3c2412_init_clocks(int xtal);
 
 extern  int s3c2412_baseclk_add(void);
+
+extern void s3c2412_restart(char mode, const char *cmd);
 #else
 #define s3c2412_init_clocks NULL
 #define s3c2412_init_uarts NULL
 #define s3c2412_map_io NULL
 #define s3c2412_init NULL
+#define s3c2412_restart NULL
 #endif
diff --git a/arch/arm/plat-samsung/include/plat/s3c2416.h b/arch/arm/plat-samsung/include/plat/s3c2416.h
index a764f85..de2b5bd 100644
--- a/arch/arm/plat-samsung/include/plat/s3c2416.h
+++ b/arch/arm/plat-samsung/include/plat/s3c2416.h
@@ -23,9 +23,11 @@
 
 extern  int s3c2416_baseclk_add(void);
 
+extern void s3c2416_restart(char mode, const char *cmd);
 #else
 #define s3c2416_init_clocks NULL
 #define s3c2416_init_uarts NULL
 #define s3c2416_map_io NULL
 #define s3c2416_init NULL
+#define s3c2416_restart NULL
 #endif
diff --git a/arch/arm/plat-samsung/include/plat/s3c2443.h b/arch/arm/plat-samsung/include/plat/s3c2443.h
index 7fae1a0..dce05b4 100644
--- a/arch/arm/plat-samsung/include/plat/s3c2443.h
+++ b/arch/arm/plat-samsung/include/plat/s3c2443.h
@@ -24,11 +24,13 @@
 
 extern  int s3c2443_baseclk_add(void);
 
+extern void s3c2443_restart(char mode, const char *cmd);
 #else
 #define s3c2443_init_clocks NULL
 #define s3c2443_init_uarts NULL
 #define s3c2443_map_io NULL
 #define s3c2443_init NULL
+#define s3c2443_restart NULL
 #endif
 
 /* common code used by s3c2443 and others.
diff --git a/arch/arm/plat-samsung/include/plat/s3c6400.h b/arch/arm/plat-samsung/include/plat/s3c6400.h
deleted file mode 100644
index 37d428a..0000000
--- a/arch/arm/plat-samsung/include/plat/s3c6400.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/* linux/arch/arm/plat-samsung/include/plat/s3c6400.h
- *
- * Copyright 2008 Openmoko, Inc.
- * Copyright 2008 Simtec Electronics
- *	Ben Dooks <ben@simtec.co.uk>
- *	http://armlinux.simtec.co.uk/
- *
- * Header file for s3c6400 cpu support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-/* Common init code for S3C6400 related SoCs */
-
-extern void s3c6400_common_init_uarts(struct s3c2410_uartcfg *cfg, int no);
-extern void s3c6400_setup_clocks(void);
-
-extern void s3c64xx_register_clocks(unsigned long xtal, unsigned armclk_limit);
-
-#ifdef CONFIG_CPU_S3C6400
-
-extern  int s3c6400_init(void);
-extern void s3c6400_init_irq(void);
-extern void s3c6400_map_io(void);
-extern void s3c6400_init_clocks(int xtal);
-
-#define s3c6400_init_uarts s3c6400_common_init_uarts
-
-#else
-#define s3c6400_init_clocks NULL
-#define s3c6400_init_uarts NULL
-#define s3c6400_map_io NULL
-#define s3c6400_init NULL
-#endif
diff --git a/arch/arm/plat-samsung/include/plat/s3c6410.h b/arch/arm/plat-samsung/include/plat/s3c6410.h
deleted file mode 100644
index 20a6675..0000000
--- a/arch/arm/plat-samsung/include/plat/s3c6410.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* linux/arch/arm/plat-samsung/include/plat/s3c6410.h
- *
- * Copyright 2008 Openmoko,  Inc.
- * Copyright 2008 Simtec Electronics
- *	Ben Dooks <ben@simtec.co.uk>
- *	http://armlinux.simtec.co.uk/
- *
- * Header file for s3c6410 cpu support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifdef CONFIG_CPU_S3C6410
-
-extern  int s3c6410_init(void);
-extern void s3c6410_init_irq(void);
-extern void s3c6410_map_io(void);
-extern void s3c6410_init_clocks(int xtal);
-
-#define s3c6410_init_uarts s3c6400_common_init_uarts
-
-#else
-#define s3c6410_init_clocks NULL
-#define s3c6410_init_uarts NULL
-#define s3c6410_map_io NULL
-#define s3c6410_init NULL
-#endif
diff --git a/arch/arm/plat-samsung/include/plat/s5p6440.h b/arch/arm/plat-samsung/include/plat/s5p6440.h
deleted file mode 100644
index bf85ebb..0000000
--- a/arch/arm/plat-samsung/include/plat/s5p6440.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/* linux/arch/arm/plat-samsung/include/plat/s5p6440.h
- *
- * Copyright (c) 2009 Samsung Electronics Co., Ltd.
- *		http://www.samsung.com/
- *
- * Header file for s5p6440 cpu support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
- /* Common init code for S5P6440 related SoCs */
-
-extern void s5p6440_register_clocks(void);
-extern void s5p6440_setup_clocks(void);
-
-#ifdef CONFIG_CPU_S5P6440
-
-extern  int s5p64x0_init(void);
-extern void s5p6440_init_irq(void);
-extern void s5p6440_map_io(void);
-extern void s5p6440_init_clocks(int xtal);
-
-extern void s5p6440_init_uarts(struct s3c2410_uartcfg *cfg, int no);
-
-#else
-#define s5p6440_init_clocks NULL
-#define s5p6440_init_uarts NULL
-#define s5p6440_map_io NULL
-#define s5p64x0_init NULL
-#endif
-
-/* S5P6440 timer */
-
-extern struct sys_timer s5p6440_timer;
diff --git a/arch/arm/plat-samsung/include/plat/s5p6450.h b/arch/arm/plat-samsung/include/plat/s5p6450.h
deleted file mode 100644
index da25f9a..0000000
--- a/arch/arm/plat-samsung/include/plat/s5p6450.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/* linux/arch/arm/plat-samsung/include/plat/s5p6450.h
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- *		http://www.samsung.com
- *
- * Header file for s5p6450 cpu support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-/* Common init code for S5P6450 related SoCs */
-
-extern void s5p6450_register_clocks(void);
-extern void s5p6450_setup_clocks(void);
-
-#ifdef CONFIG_CPU_S5P6450
-
-extern  int s5p64x0_init(void);
-extern void s5p6450_init_irq(void);
-extern void s5p6450_map_io(void);
-extern void s5p6450_init_clocks(int xtal);
-
-extern void s5p6450_init_uarts(struct s3c2410_uartcfg *cfg, int no);
-
-#else
-#define s5p6450_init_clocks NULL
-#define s5p6450_init_uarts NULL
-#define s5p6450_map_io NULL
-#define s5p64x0_init NULL
-#endif
-
-/* S5P6450 timer */
-
-extern struct sys_timer s5p6450_timer;
diff --git a/arch/arm/plat-samsung/include/plat/s5pc100.h b/arch/arm/plat-samsung/include/plat/s5pc100.h
deleted file mode 100644
index 9a21aea..0000000
--- a/arch/arm/plat-samsung/include/plat/s5pc100.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* linux/arch/arm/plat-samsung/include/plat/s5pc100.h
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- *		http://www.samsung.com/
- *
- * Header file for s5pc100 cpu support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-/* Common init code for S5PC100 related SoCs */
-
-extern void s5pc100_common_init_uarts(struct s3c2410_uartcfg *cfg, int no);
-extern void s5pc100_register_clocks(void);
-extern void s5pc100_setup_clocks(void);
-
-#ifdef CONFIG_CPU_S5PC100
-
-extern  int s5pc100_init(void);
-extern void s5pc100_init_irq(void);
-extern void s5pc100_map_io(void);
-extern void s5pc100_init_clocks(int xtal);
-
-#define s5pc100_init_uarts s5pc100_common_init_uarts
-
-#else
-#define s5pc100_init_clocks NULL
-#define s5pc100_init_uarts NULL
-#define s5pc100_map_io NULL
-#define s5pc100_init NULL
-#endif
diff --git a/arch/arm/plat-samsung/include/plat/s5pv210.h b/arch/arm/plat-samsung/include/plat/s5pv210.h
deleted file mode 100644
index b4bc6be7..0000000
--- a/arch/arm/plat-samsung/include/plat/s5pv210.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* linux/arch/arm/plat-samsung/include/plat/s5pv210.h
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- *		http://www.samsung.com/
- *
- * Header file for s5pv210 cpu support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-/* Common init code for S5PV210 related SoCs */
-
-extern void s5pv210_common_init_uarts(struct s3c2410_uartcfg *cfg, int no);
-extern void s5pv210_register_clocks(void);
-extern void s5pv210_setup_clocks(void);
-
-#ifdef CONFIG_CPU_S5PV210
-
-extern  int s5pv210_init(void);
-extern void s5pv210_init_irq(void);
-extern void s5pv210_map_io(void);
-extern void s5pv210_init_clocks(int xtal);
-
-#define s5pv210_init_uarts s5pv210_common_init_uarts
-
-#else
-#define s5pv210_init_clocks NULL
-#define s5pv210_init_uarts NULL
-#define s5pv210_map_io NULL
-#define s5pv210_init NULL
-#endif
diff --git a/arch/arm/plat-samsung/include/plat/system-reset.h b/arch/arm/plat-samsung/include/plat/system-reset.h
deleted file mode 100644
index a448e990..0000000
--- a/arch/arm/plat-samsung/include/plat/system-reset.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/* linux/arch/arm/plat-samsung/include/plat/system-reset.h
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- *		http://www.samsung.com
- *
- * Based on arch/arm/mach-s3c2410/include/mach/system-reset.h
- *
- * S5P - System define for arch_reset()
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <plat/watchdog-reset.h>
-
-void (*s5p_reset_hook)(void);
-
-static void arch_reset(char mode, const char *cmd)
-{
-	/* SWRESET support in s5p_reset_hook() */
-
-	if (s5p_reset_hook)
-		s5p_reset_hook();
-
-	/* Perform reset using Watchdog reset
-	 * if there is no s5p_reset_hook()
-	 */
-
-	arch_wdt_reset();
-}
diff --git a/arch/arm/plat-samsung/include/plat/watchdog-reset.h b/arch/arm/plat-samsung/include/plat/watchdog-reset.h
index 40dbb2b..f19aff1 100644
--- a/arch/arm/plat-samsung/include/plat/watchdog-reset.h
+++ b/arch/arm/plat-samsung/include/plat/watchdog-reset.h
@@ -17,6 +17,7 @@
 #include <linux/clk.h>
 #include <linux/err.h>
 #include <linux/io.h>
+#include <linux/delay.h>
 
 static inline void arch_wdt_reset(void)
 {
diff --git a/arch/arm/plat-samsung/pm-gpio.c b/arch/arm/plat-samsung/pm-gpio.c
index 4be016e..c2ff92c 100644
--- a/arch/arm/plat-samsung/pm-gpio.c
+++ b/arch/arm/plat-samsung/pm-gpio.c
@@ -14,7 +14,7 @@
 */
 
 #include <linux/kernel.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/init.h>
 #include <linux/io.h>
 #include <linux/gpio.h>
diff --git a/arch/arm/plat-samsung/wakeup-mask.c b/arch/arm/plat-samsung/wakeup-mask.c
index dc81403..20c3d91 100644
--- a/arch/arm/plat-samsung/wakeup-mask.c
+++ b/arch/arm/plat-samsung/wakeup-mask.c
@@ -11,7 +11,7 @@
 
 #include <linux/kernel.h>
 #include <linux/spinlock.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/types.h>
 #include <linux/irq.h>
 #include <linux/io.h>
diff --git a/arch/arm/plat-spear/Makefile b/arch/arm/plat-spear/Makefile
index b4f340b..e0f2e5b 100644
--- a/arch/arm/plat-spear/Makefile
+++ b/arch/arm/plat-spear/Makefile
@@ -3,6 +3,6 @@
 #
 
 # Common support
-obj-y	:= clock.o time.o
+obj-y	:= clock.o restart.o time.o
 
 obj-$(CONFIG_ARCH_SPEAR3XX)	+= shirq.o padmux.o
diff --git a/arch/arm/plat-spear/include/plat/system.h b/arch/arm/plat-spear/include/plat/system.h
index a235fa0..86c6f83 100644
--- a/arch/arm/plat-spear/include/plat/system.h
+++ b/arch/arm/plat-spear/include/plat/system.h
@@ -14,10 +14,6 @@
 #ifndef __PLAT_SYSTEM_H
 #define __PLAT_SYSTEM_H
 
-#include <linux/io.h>
-#include <asm/hardware/sp810.h>
-#include <mach/hardware.h>
-
 static inline void arch_idle(void)
 {
 	/*
@@ -27,15 +23,4 @@
 	cpu_do_idle();
 }
 
-static inline void arch_reset(char mode, const char *cmd)
-{
-	if (mode == 's') {
-		/* software reset, Jump into ROM at address 0 */
-		cpu_reset(0);
-	} else {
-		/* hardware reset, Use on-chip reset capability */
-		sysctl_soft_reset((void __iomem *)VA_SPEAR_SYS_CTRL_BASE);
-	}
-}
-
 #endif /* __PLAT_SYSTEM_H */
diff --git a/arch/arm/plat-spear/include/plat/vmalloc.h b/arch/arm/plat-spear/include/plat/vmalloc.h
deleted file mode 100644
index 8c8b24d..0000000
--- a/arch/arm/plat-spear/include/plat/vmalloc.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * arch/arm/plat-spear/include/plat/vmalloc.h
- *
- * Defining Vmalloc area for SPEAr platform
- *
- * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#ifndef __PLAT_VMALLOC_H
-#define __PLAT_VMALLOC_H
-
-#define VMALLOC_END		0xF0000000UL
-
-#endif /* __PLAT_VMALLOC_H */
diff --git a/arch/arm/plat-spear/restart.c b/arch/arm/plat-spear/restart.c
new file mode 100644
index 0000000..2b4e3d8
--- /dev/null
+++ b/arch/arm/plat-spear/restart.c
@@ -0,0 +1,27 @@
+/*
+ * arch/arm/plat-spear/restart.c
+ *
+ * SPEAr platform specific restart functions
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+#include <linux/io.h>
+#include <asm/hardware/sp810.h>
+#include <mach/hardware.h>
+#include <mach/generic.h>
+
+void spear_restart(char mode, const char *cmd)
+{
+	if (mode == 's') {
+		/* software reset, Jump into ROM at address 0 */
+		soft_restart(0);
+	} else {
+		/* hardware reset, Use on-chip reset capability */
+		sysctl_soft_reset((void __iomem *)VA_SPEAR_SYS_CTRL_BASE);
+	}
+}
diff --git a/arch/arm/plat-tcc/Kconfig b/arch/arm/plat-tcc/Kconfig
deleted file mode 100644
index 1bf4995..0000000
--- a/arch/arm/plat-tcc/Kconfig
+++ /dev/null
@@ -1,20 +0,0 @@
-if ARCH_TCC_926
-
-menu "Telechips ARM926-based CPUs"
-
-choice
-	prompt "Telechips CPU type:"
-	default ARCH_TCC8K
-
-config ARCH_TCC8K
-	bool TCC8000
-	select USB_ARCH_HAS_OHCI
-	help
-	  Support for Telechips TCC8000 systems
-
-endchoice
-
-source "arch/arm/mach-tcc8k/Kconfig"
-
-endmenu
-endif
diff --git a/arch/arm/plat-tcc/Makefile b/arch/arm/plat-tcc/Makefile
deleted file mode 100644
index eceabc8..0000000
--- a/arch/arm/plat-tcc/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-# "Telechips Platform Common Modules"
-
-obj-y := clock.o system.o
diff --git a/arch/arm/plat-tcc/clock.c b/arch/arm/plat-tcc/clock.c
deleted file mode 100644
index f3ced10..0000000
--- a/arch/arm/plat-tcc/clock.c
+++ /dev/null
@@ -1,179 +0,0 @@
-/*
- * Clock framework for Telechips SoCs
- * Based on arch/arm/plat-mxc/clock.c
- *
- * Copyright (C) 2004 - 2005 Nokia corporation
- * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
- * Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
- * Copyright 2007 Freescale Semiconductor, Inc. All Rights Reserved.
- * Copyright 2008 Juergen Beisert, kernel@pengutronix.de
- * Copyright 2010 Hans J. Koch, hjk@linutronix.de
- *
- * Licensed under the terms of the GPL v2.
- */
-
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/errno.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/string.h>
-
-#include <mach/clock.h>
-#include <mach/hardware.h>
-
-static DEFINE_MUTEX(clocks_mutex);
-
-/*-------------------------------------------------------------------------
- * Standard clock functions defined in include/linux/clk.h
- *-------------------------------------------------------------------------*/
-
-static void __clk_disable(struct clk *clk)
-{
-	BUG_ON(clk->refcount == 0);
-
-	if (!(--clk->refcount) && clk->disable) {
-		/* Unconditionally disable the clock in hardware */
-		clk->disable(clk);
-		/* recursively disable parents */
-		if (clk->parent)
-			__clk_disable(clk->parent);
-	}
-}
-
-static int __clk_enable(struct clk *clk)
-{
-	int ret = 0;
-
-	if (clk->refcount++ == 0 && clk->enable) {
-		if (clk->parent)
-			ret = __clk_enable(clk->parent);
-		if (ret)
-			return ret;
-		else
-			return clk->enable(clk);
-	}
-
-	return 0;
-}
-
-/* This function increments the reference count on the clock and enables the
- * clock if not already enabled. The parent clock tree is recursively enabled
- */
-int clk_enable(struct clk *clk)
-{
-	int ret = 0;
-
-	if (!clk)
-		return -EINVAL;
-
-	mutex_lock(&clocks_mutex);
-	ret = __clk_enable(clk);
-	mutex_unlock(&clocks_mutex);
-
-	return ret;
-}
-EXPORT_SYMBOL_GPL(clk_enable);
-
-/* This function decrements the reference count on the clock and disables
- * the clock when reference count is 0. The parent clock tree is
- * recursively disabled
- */
-void clk_disable(struct clk *clk)
-{
-	if (!clk)
-		return;
-
-	mutex_lock(&clocks_mutex);
-	__clk_disable(clk);
-	mutex_unlock(&clocks_mutex);
-}
-EXPORT_SYMBOL_GPL(clk_disable);
-
-/* Retrieve the *current* clock rate. If the clock itself
- * does not provide a special calculation routine, ask
- * its parent and so on, until one is able to return
- * a valid clock rate
- */
-unsigned long clk_get_rate(struct clk *clk)
-{
-	if (!clk)
-		return 0UL;
-
-	if (clk->get_rate)
-		return clk->get_rate(clk);
-
-	return clk_get_rate(clk->parent);
-}
-EXPORT_SYMBOL_GPL(clk_get_rate);
-
-/* Round the requested clock rate to the nearest supported
- * rate that is less than or equal to the requested rate.
- * This is dependent on the clock's current parent.
- */
-long clk_round_rate(struct clk *clk, unsigned long rate)
-{
-	if (!clk)
-		return 0;
-	if (!clk->round_rate)
-		return 0;
-
-	return clk->round_rate(clk, rate);
-}
-EXPORT_SYMBOL_GPL(clk_round_rate);
-
-/* Set the clock to the requested clock rate. The rate must
- * match a supported rate exactly based on what clk_round_rate returns
- */
-int clk_set_rate(struct clk *clk, unsigned long rate)
-{
-	int ret = -EINVAL;
-
-	if (!clk)
-		return ret;
-	if (!clk->set_rate || !rate)
-		return ret;
-
-	mutex_lock(&clocks_mutex);
-	ret = clk->set_rate(clk, rate);
-	mutex_unlock(&clocks_mutex);
-
-	return ret;
-}
-EXPORT_SYMBOL_GPL(clk_set_rate);
-
-/* Set the clock's parent to another clock source */
-int clk_set_parent(struct clk *clk, struct clk *parent)
-{
-	struct clk *old;
-	int ret = -EINVAL;
-
-	if (!clk)
-		return ret;
-	if (!clk->set_parent || !parent)
-		return ret;
-
-	mutex_lock(&clocks_mutex);
-	old = clk->parent;
-	if (clk->refcount)
-		__clk_enable(parent);
-	ret = clk->set_parent(clk, parent);
-	if (ret)
-		old = parent;
-	if (clk->refcount)
-		__clk_disable(old);
-	mutex_unlock(&clocks_mutex);
-
-	return ret;
-}
-EXPORT_SYMBOL_GPL(clk_set_parent);
-
-/* Retrieve the clock's parent clock source */
-struct clk *clk_get_parent(struct clk *clk)
-{
-	if (!clk)
-		return NULL;
-
-	return clk->parent;
-}
-EXPORT_SYMBOL_GPL(clk_get_parent);
diff --git a/arch/arm/plat-tcc/include/mach/clock.h b/arch/arm/plat-tcc/include/mach/clock.h
deleted file mode 100644
index a12f58a..0000000
--- a/arch/arm/plat-tcc/include/mach/clock.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Low level clock header file for Telechips TCC architecture
- * (C) 2010 Hans J. Koch <hjk@linutronix.de>
- *
- * Licensed under the GPL v2.
- */
-
-#ifndef __ASM_ARCH_TCC_CLOCK_H__
-#define __ASM_ARCH_TCC_CLOCK_H__
-
-#ifndef __ASSEMBLY__
-
-struct clk {
-	struct clk *parent;
-	/* id number of a root clock, 0 for normal clocks */
-	int root_id;
-	/* Reference count of clock enable/disable */
-	int refcount;
-	/* Address of associated BCLKCTRx register. Must be set. */
-	void __iomem *bclkctr;
-	/* Bit position for BCLKCTRx. Must be set. */
-	int bclk_shift;
-	/* Address of ACLKxxx register, if any. */
-	void __iomem *aclkreg;
-	/* get the current clock rate (always a fresh value) */
-	unsigned long (*get_rate) (struct clk *);
-	/* Function ptr to set the clock to a new rate. The rate must match a
-	   supported rate returned from round_rate. Leave blank if clock is not
-	   programmable */
-	int (*set_rate) (struct clk *, unsigned long);
-	/* Function ptr to round the requested clock rate to the nearest
-	   supported rate that is less than or equal to the requested rate. */
-	unsigned long (*round_rate) (struct clk *, unsigned long);
-	/* Function ptr to enable the clock. Leave blank if clock can not
-	   be gated. */
-	int (*enable) (struct clk *);
-	/* Function ptr to disable the clock. Leave blank if clock can not
-	   be gated. */
-	void (*disable) (struct clk *);
-	/* Function ptr to set the parent clock of the clock. */
-	int (*set_parent) (struct clk *, struct clk *);
-};
-
-int clk_register(struct clk *clk);
-void clk_unregister(struct clk *clk);
-
-#endif /* __ASSEMBLY__ */
-#endif /* __ASM_ARCH_MXC_CLOCK_H__ */
diff --git a/arch/arm/plat-tcc/include/mach/debug-macro.S b/arch/arm/plat-tcc/include/mach/debug-macro.S
deleted file mode 100644
index cf17d04..0000000
--- a/arch/arm/plat-tcc/include/mach/debug-macro.S
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (C) 1994-1999 Russell King
- * Copyright (C) 2008-2009 Telechips
- * Copyright (C) 2009 Hans J. Koch <hjk@linutronix.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-		.macro	addruart, rp, rv, tmp
-		moveq	\rp, #0x90000000	@ physical base address
-		movne	\rv, #0xF1000000	@ virtual base
-		orr	\rp, \rp, #0x00007000	@ UART0
-		orr	\rv, \rv, #0x00007000	@ UART0
-		.endm
-
-		.macro	senduart,rd,rx
-		strb	\rd, [\rx, #0x44]
-		.endm
-
-		.macro	waituart,rd,rx
-		.endm
-
-		.macro	busyuart,rd,rx
-1001:
-		ldr \rd, [\rx, #0x14]
-		tst \rd, #0x20
-
-		beq 1001b
-		.endm
diff --git a/arch/arm/plat-tcc/include/mach/entry-macro.S b/arch/arm/plat-tcc/include/mach/entry-macro.S
deleted file mode 100644
index 748f401..0000000
--- a/arch/arm/plat-tcc/include/mach/entry-macro.S
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * include/asm-arm/arch-tcc83x/entry-macro.S
- *
- * Author : <linux@telechips.com>
- * Created: June 10, 2008
- * Description: Low-level IRQ helper macros for Telechips-based platforms
- *
- * Copyright (C) 2008-2009 Telechips
- *
- * This file is licensed under  the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <mach/hardware.h>
-#include <mach/irqs.h>
-
-	.macro	disable_fiq
-	.endm
-
-	.macro  get_irqnr_preamble, base, tmp
-	.endm
-
-	.macro  arch_ret_to_user, tmp1, tmp2
-	.endm
-
-	.macro	get_irqnr_and_base, irqnr, irqstat, base, tmp
-
-		ldr	\base, =0xF2003000 @ base address of PIC registers
-
-		@@ read MREQ register of PIC0
-
-		mov	\irqnr, #0
-		ldr	\irqstat, [\base, #0x00000014 ]	@ lower 32 interrupts
-		cmp	\irqstat, #0
-		bne	1001f
-
-		@@ read MREQ register of PIC1
-
-		ldr	\irqstat, [\base, #0x00000094]	@ upper 32 interrupts
-		cmp	\irqstat, #0
-		beq	1002f
-		mov	\irqnr, #0x20
-
-1001:
-		movs	\tmp, \irqstat, lsl #16
-		movne	\irqstat, \tmp
-		addeq	\irqnr, \irqnr, #16
-
-		movs	\tmp, \irqstat, lsl #8
-		movne	\irqstat, \tmp
-		addeq	\irqnr, \irqnr, #8
-
-		movs	\tmp, \irqstat, lsl #4
-		movne	\irqstat, \tmp
-		addeq	\irqnr, \irqnr, #4
-
-		movs	\tmp, \irqstat, lsl #2
-		movne	\irqstat, \tmp
-		addeq	\irqnr, \irqnr, #2
-
-		movs	\tmp, \irqstat, lsl #1
-		addeq	\irqnr, \irqnr, #1
-		orrs	\base, \base, #1
-1002:
-		@@ exit here, Z flag unset if IRQ
-
-	.endm
diff --git a/arch/arm/plat-tcc/include/mach/hardware.h b/arch/arm/plat-tcc/include/mach/hardware.h
deleted file mode 100644
index e70d126..0000000
--- a/arch/arm/plat-tcc/include/mach/hardware.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Author: RidgeRun, Inc. Greg Lonnon <glonnon@ridgerun.com>
- * Reorganized for Linux-2.6 by Tony Lindgren <tony@atomide.com>
- *                          and Dirk Behme <dirk.behme@de.bosch.com>
- * Rewritten by:    <linux@telechips.com>
- * Description: Hardware definitions for TCC8300 processors and boards
- *
- * Copyright (C) 2001 RidgeRun, Inc.
- * Copyright (C) 2008-2009 Telechips
- *
- * Modifications for mainline (C) 2009 Hans J. Koch <hjk@linutronix.de>
- *
- * Licensed under the terms of the GNU Pulic License version 2.
- */
-
-#ifndef __ASM_ARCH_TCC_HARDWARE_H
-#define __ASM_ARCH_TCC_HARDWARE_H
-
-#include <asm/sizes.h>
-#ifndef __ASSEMBLER__
-#include <asm/types.h>
-#endif
-#include <mach/io.h>
-
-/*
- * ----------------------------------------------------------------------------
- * Clocks
- * ----------------------------------------------------------------------------
- */
-#define CLKGEN_REG_BASE		0xfffece00
-#define ARM_CKCTL		(CLKGEN_REG_BASE + 0x0)
-#define ARM_IDLECT1		(CLKGEN_REG_BASE + 0x4)
-#define ARM_IDLECT2		(CLKGEN_REG_BASE + 0x8)
-#define ARM_EWUPCT		(CLKGEN_REG_BASE + 0xC)
-#define ARM_RSTCT1		(CLKGEN_REG_BASE + 0x10)
-#define ARM_RSTCT2		(CLKGEN_REG_BASE + 0x14)
-#define ARM_SYSST		(CLKGEN_REG_BASE + 0x18)
-#define ARM_IDLECT3		(CLKGEN_REG_BASE + 0x24)
-
-/* DPLL control registers */
-#define DPLL_CTL		0xfffecf00
-
-#endif	/* __ASM_ARCH_TCC_HARDWARE_H */
diff --git a/arch/arm/plat-tcc/include/mach/io.h b/arch/arm/plat-tcc/include/mach/io.h
deleted file mode 100644
index 3e911d3..0000000
--- a/arch/arm/plat-tcc/include/mach/io.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * IO definitions for TCC8000 processors and boards
- *
- * Copyright (C) 1997-1999 Russell King
- * Copyright (C) 2008-2009 Telechips
- * Copyright (C) 2010 Hans J. Koch <hjk@linutronix.de>
- *
- * Licensed under the terms of the GNU Public License version 2.
- */
-
-#ifndef __ASM_ARM_ARCH_IO_H
-#define __ASM_ARM_ARCH_IO_H
-
-#define IO_SPACE_LIMIT 0xffffffff
-
-/*
- * We don't actually have real ISA nor PCI buses, but there is so many
- * drivers out there that might just work if we fake them...
- */
-#define __io(a)			__typesafe_io(a)
-#define __mem_pci(a)		(a)
-
-#endif
diff --git a/arch/arm/plat-tcc/include/mach/irqs.h b/arch/arm/plat-tcc/include/mach/irqs.h
deleted file mode 100644
index da86389..0000000
--- a/arch/arm/plat-tcc/include/mach/irqs.h
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * IRQ definitions for TCC8xxx
- *
- * Copyright (C) 2008-2009 Telechips
- * Copyright (C) 2009 Hans J. Koch <hjk@linutronix.de>
- *
- * Licensed under the terms of the GPL v2.
- *
- */
-
-#ifndef __ASM_ARCH_TCC_IRQS_H
-#define __ASM_ARCH_TCC_IRQS_H
-
-#define NR_IRQS 64
-
-/* PIC0 interrupts */
-#define INT_ADMA1	0
-#define INT_BDMA	1
-#define INT_ADMA0	2
-#define INT_GDMA1	3
-#define INT_I2S0RX	4
-#define INT_I2S0TX	5
-#define INT_TC		6
-#define INT_UART0	7
-#define INT_USBD	8
-#define INT_SPI0TX	9
-#define INT_UDMA	10
-#define INT_LIRQ	11
-#define INT_GDMA2	12
-#define INT_GDMA0	13
-#define INT_TC32	14
-#define INT_LCD		15
-#define INT_ADC		16
-#define INT_I2C		17
-#define INT_RTCP	18
-#define INT_RTCA	19
-#define INT_NFC		20
-#define INT_SD0		21
-#define INT_GSB0	22
-#define INT_PK		23
-#define INT_USBH0	24
-#define INT_USBH1	25
-#define INT_G2D		26
-#define INT_ECC		27
-#define INT_SPI0RX	28
-#define INT_UART1	29
-#define INT_MSCL	30
-#define INT_GSB1	31
-/* PIC1 interrupts */
-#define INT_E0		32
-#define INT_E1		33
-#define INT_E2		34
-#define INT_E3		35
-#define INT_E4		36
-#define INT_E5		37
-#define INT_E6		38
-#define INT_E7		39
-#define INT_UART2	40
-#define INT_UART3	41
-#define INT_SPI1TX	42
-#define INT_SPI1RX	43
-#define INT_GSB2	44
-#define INT_SPDIF	45
-#define INT_CDIF	46
-#define INT_VBON	47
-#define INT_VBOFF	48
-#define INT_SD1		49
-#define INT_UART4	50
-#define INT_GDMA3	51
-#define INT_I2S1RX	52
-#define INT_I2S1TX	53
-#define INT_CAN0	54
-#define INT_CAN1	55
-#define INT_GSB3	56
-#define INT_KRST	57
-#define INT_UNUSED	58
-#define INT_SD0D3	59
-#define INT_SD1D3	60
-#define INT_GPS0	61
-#define INT_GPS1	62
-#define INT_GPS2	63
-
-#endif  /* ASM_ARCH_TCC_IRQS_H */
diff --git a/arch/arm/plat-tcc/include/mach/system.h b/arch/arm/plat-tcc/include/mach/system.h
deleted file mode 100644
index 909e603..0000000
--- a/arch/arm/plat-tcc/include/mach/system.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Author: <linux@telechips.com>
- * Created: June 10, 2008
- * Description: LINUX SYSTEM FUNCTIONS for TCC83x
- *
- * Copyright (C) 2008-2009 Telechips
- *
- * Licensed under the terms of the GPL v2.
- *
- */
-
-#ifndef __ASM_ARCH_SYSTEM_H
-#define __ASM_ARCH_SYSTEM_H
-#include <linux/clk.h>
-
-#include <asm/mach-types.h>
-#include <mach/hardware.h>
-
-extern void plat_tcc_reboot(void);
-
-static inline void arch_idle(void)
-{
-	cpu_do_idle();
-}
-
-static inline void arch_reset(char mode, const char *cmd)
-{
-	plat_tcc_reboot();
-}
-
-#endif
diff --git a/arch/arm/plat-tcc/include/mach/tcc8k-regs.h b/arch/arm/plat-tcc/include/mach/tcc8k-regs.h
deleted file mode 100644
index 1d94282..0000000
--- a/arch/arm/plat-tcc/include/mach/tcc8k-regs.h
+++ /dev/null
@@ -1,807 +0,0 @@
-/*
- * Telechips TCC8000 register definitions
- *
- * (C) 2009 Hans J. Koch <hjk@linutronix.de>
- *
- * Licensed under the terms of the GPLv2.
- */
-
-#ifndef TCC8K_REGS_H
-#define TCC8K_REGS_H
-
-#include <linux/types.h>
-
-#define EXT_SDRAM_BASE		0x20000000
-#define INT_SRAM_BASE		0x30000000
-#define INT_SRAM_SIZE		SZ_32K
-#define CS0_BASE		0x40000000
-#define CS1_BASE		0x50000000
-#define CS1_SIZE		SZ_64K
-#define CS2_BASE		0x60000000
-#define CS3_BASE		0x70000000
-#define AHB_PERI_BASE		0x80000000
-#define AHB_PERI_SIZE		SZ_64K
-#define APB0_PERI_BASE		0x90000000
-#define APB0_PERI_SIZE		SZ_128K
-#define APB1_PERI_BASE		0x98000000
-#define APB1_PERI_SIZE		SZ_128K
-#define DATA_TCM_BASE		0xa0000000
-#define DATA_TCM_SIZE		SZ_8K
-#define EXT_MEM_CTRL_BASE	0xf0000000
-#define EXT_MEM_CTRL_SIZE	SZ_4K
-
-#define CS1_BASE_VIRT		(void __iomem *)0xf7000000
-#define AHB_PERI_BASE_VIRT	(void __iomem *)0xf4000000
-#define APB0_PERI_BASE_VIRT	(void __iomem *)0xf1000000
-#define APB1_PERI_BASE_VIRT	(void __iomem *)0xf2000000
-#define EXT_MEM_CTRL_BASE_VIRT	(void __iomem *)0xf3000000
-#define INT_SRAM_BASE_VIRT	(void __iomem *)0xf5000000
-#define DATA_TCM_BASE_VIRT	(void __iomem *)0xf6000000
-
-#define __REG(x)     (*((volatile u32 *)(x)))
-
-/* USB Device Controller Registers */
-#define UDC_BASE	(AHB_PERI_BASE_VIRT + 0x8000)
-#define UDC_BASE_PHYS	(AHB_PERI_BASE + 0x8000)
-
-#define UDC_IR_OFFS		0x00
-#define UDC_EIR_OFFS		0x04
-#define UDC_EIER_OFFS		0x08
-#define UDC_FAR_OFFS		0x0c
-#define UDC_FNR_OFFS		0x10
-#define UDC_EDR_OFFS		0x14
-#define UDC_RT_OFFS		0x18
-#define UDC_SSR_OFFS		0x1c
-#define UDC_SCR_OFFS		0x20
-#define UDC_EP0SR_OFFS		0x24
-#define UDC_EP0CR_OFFS		0x28
-
-#define UDC_ESR_OFFS		0x2c
-#define UDC_ECR_OFFS		0x30
-#define UDC_BRCR_OFFS		0x34
-#define UDC_BWCR_OFFS		0x38
-#define UDC_MPR_OFFS		0x3c
-#define UDC_DCR_OFFS		0x40
-#define UDC_DTCR_OFFS		0x44
-#define UDC_DFCR_OFFS		0x48
-#define UDC_DTTCR1_OFFS		0x4c
-#define UDC_DTTCR2_OFFS		0x50
-#define UDC_ESR2_OFFS		0x54
-
-#define UDC_SCR2_OFFS		0x58
-#define UDC_EP0BUF_OFFS		0x60
-#define UDC_EP1BUF_OFFS		0x64
-#define UDC_EP2BUF_OFFS		0x68
-#define UDC_EP3BUF_OFFS		0x6c
-#define UDC_PLICR_OFFS		0xa0
-#define UDC_PCR_OFFS		0xa4
-
-#define UDC_UPCR0_OFFS		0xc8
-#define UDC_UPCR1_OFFS		0xcc
-#define UDC_UPCR2_OFFS		0xd0
-#define UDC_UPCR3_OFFS		0xd4
-
-/* Bits in UDC_EIR */
-#define UDC_EIR_EP0I		(1 << 0)
-#define UDC_EIR_EP1I		(1 << 1)
-#define UDC_EIR_EP2I		(1 << 2)
-#define UDC_EIR_EP3I		(1 << 3)
-#define UDC_EIR_EPI_MASK	0x0f
-
-/* Bits in UDC_EIER */
-#define UDC_EIER_EP0IE		(1 << 0)
-#define UDC_EIER_EP1IE		(1 << 1)
-#define UDC_EIER_EP2IE		(1 << 2)
-#define UDC_EIER_EP3IE		(1 << 3)
-
-/* Bits in UDC_FNR */
-#define UDC_FNR_FN_MASK		0x7ff
-#define UDC_FNR_SM		(1 << 13)
-#define UDC_FNR_FTL		(1 << 14)
-
-/* Bits in UDC_SSR */
-#define UDC_SSR_HFRES		(1 << 0)
-#define UDC_SSR_HFSUSP		(1 << 1)
-#define UDC_SSR_HFRM		(1 << 2)
-#define UDC_SSR_SDE		(1 << 3)
-#define UDC_SSR_HSP		(1 << 4)
-#define UDC_SSR_DM		(1 << 5)
-#define UDC_SSR_DP		(1 << 6)
-#define UDC_SSR_TBM		(1 << 7)
-#define UDC_SSR_VBON		(1 << 8)
-#define UDC_SSR_VBOFF		(1 << 9)
-#define UDC_SSR_EOERR		(1 << 10)
-#define UDC_SSR_DCERR		(1 << 11)
-#define UDC_SSR_TCERR		(1 << 12)
-#define UDC_SSR_BSERR		(1 << 13)
-#define UDC_SSR_TMERR		(1 << 14)
-#define UDC_SSR_BAERR		(1 << 15)
-
-/* Bits in UDC_SCR */
-#define UDC_SCR_HRESE		(1 << 0)
-#define UDC_SCR_HSSPE		(1 << 1)
-#define UDC_SCR_RRDE		(1 << 5)
-#define UDC_SCR_SPDEN		(1 << 6)
-#define UDC_SCR_DIEN		(1 << 12)
-
-/* Bits in UDC_EP0SR */
-#define UDC_EP0SR_RSR		(1 << 0)
-#define UDC_EP0SR_TST		(1 << 1)
-#define UDC_EP0SR_SHT		(1 << 4)
-#define UDC_EP0SR_LWO		(1 << 6)
-
-/* Bits in UDC_EP0CR */
-#define UDC_EP0CR_ESS		(1 << 1)
-
-/* Bits in UDC_ESR */
-#define UDC_ESR_RPS		(1 << 0)
-#define UDC_ESR_TPS		(1 << 1)
-#define UDC_ESR_LWO		(1 << 4)
-#define UDC_ESR_FFS		(1 << 6)
-
-/* Bits in UDC_ECR */
-#define UDC_ECR_ESS		(1 << 1)
-#define UDC_ECR_CDP		(1 << 2)
-
-#define UDC_ECR_FLUSH		(1 << 6)
-#define UDC_ECR_DUEN		(1 << 7)
-
-/* Bits in UDC_UPCR0 */
-#define UDC_UPCR0_VBD		(1 << 1)
-#define UDC_UPCR0_VBDS		(1 << 6)
-#define UDC_UPCR0_RCD_12	(0x0 << 9)
-#define UDC_UPCR0_RCD_24	(0x1 << 9)
-#define UDC_UPCR0_RCD_48	(0x2 << 9)
-#define UDC_UPCR0_RCS_EXT	(0x1 << 11)
-#define UDC_UPCR0_RCS_XTAL	(0x0 << 11)
-
-/* Bits in UDC_UPCR1 */
-#define UDC_UPCR1_CDT(x)	((x) << 0)
-#define UDC_UPCR1_OTGT(x)	((x) << 3)
-#define UDC_UPCR1_SQRXT(x)	((x) << 8)
-#define UDC_UPCR1_TXFSLST(x)	((x) << 12)
-
-/* Bits in UDC_UPCR2 */
-#define UDC_UPCR2_TP		(1 << 0)
-#define UDC_UPCR2_TXRT(x)	((x) << 2)
-#define UDC_UPCR2_TXVRT(x)	((x) << 5)
-#define UDC_UPCR2_OPMODE(x)	((x) << 9)
-#define UDC_UPCR2_XCVRSEL(x)	((x) << 12)
-#define UDC_UPCR2_TM		(1 << 14)
-
-/* USB Host Controller registers */
-#define USBH0_BASE	(AHB_PERI_BASE_VIRT + 0xb000)
-#define USBH1_BASE	(AHB_PERI_BASE_VIRT + 0xb800)
-
-#define OHCI_INT_ENABLE_OFFS	0x10
-
-#define RH_DESCRIPTOR_A_OFFS	0x48
-#define RH_DESCRIPTOR_B_OFFS	0x4c
-
-#define USBHTCFG0_OFFS		0x100
-#define USBHHCFG0_OFFS		0x104
-#define USBHHCFG1_OFFS		0x104
-
-/* DMA controller registers */
-#define DMAC0_BASE	(AHB_PERI_BASE + 0x4000)
-#define DMAC1_BASE	(AHB_PERI_BASE + 0xa000)
-#define DMAC2_BASE	(AHB_PERI_BASE + 0x4800)
-#define DMAC3_BASE	(AHB_PERI_BASE + 0xa800)
-
-#define DMAC_CH_OFFSET(ch)	(ch * 0x30)
-
-#define ST_SADR_OFFS		0x00
-#define SPARAM_OFFS		0x04
-#define C_SADR_OFFS		0x0c
-#define ST_DADR_OFFS		0x10
-#define DPARAM_OFFS		0x14
-#define C_DADR_OFFS		0x1c
-#define HCOUNT_OFFS		0x20
-#define CHCTRL_OFFS		0x24
-#define RPTCTRL_OFFS		0x28
-#define EXTREQ_A_OFFS		0x2c
-
-/* Bits in CHCTRL register */
-#define CHCTRL_EN		(1 << 0)
-
-#define CHCTRL_IEN		(1 << 2)
-#define CHCTRL_FLAG		(1 << 3)
-#define CHCTRL_WSIZE8		(0 << 4)
-#define CHCTRL_WSIZE16		(1 << 4)
-#define CHCTRL_WSIZE32		(2 << 4)
-
-#define CHCTRL_BSIZE1		(0 << 6)
-#define CHCTRL_BSIZE2		(1 << 6)
-#define CHCTRL_BSIZE4		(2 << 6)
-#define CHCTRL_BSIZE8		(3 << 6)
-
-#define CHCTRL_TYPE_SINGLE_E	(0 << 8)
-#define CHCTRL_TYPE_HW		(1 << 8)
-#define CHCTRL_TYPE_SW		(2 << 8)
-#define CHCTRL_TYPE_SINGLE_L	(3 << 8)
-
-#define CHCTRL_BST		(1 << 10)
-
-/* Use DMA controller 0, channel 2 for USB */
-#define USB_DMA_BASE		(DMAC0_BASE + DMAC_CH_OFFSET(2))
-
-/* NAND flash controller registers */
-#define NFC_BASE	(AHB_PERI_BASE_VIRT + 0xd000)
-#define NFC_BASE_PHYS	(AHB_PERI_BASE + 0xd000)
-
-#define NFC_CMD_OFFS		0x00
-#define NFC_LADDR_OFFS		0x04
-#define NFC_BADDR_OFFS		0x08
-#define NFC_SADDR_OFFS		0x0c
-#define NFC_WDATA_OFFS		0x10
-#define NFC_LDATA_OFFS		0x20
-#define NFC_SDATA_OFFS		0x40
-#define NFC_CTRL_OFFS		0x50
-#define NFC_PSTART_OFFS		0x54
-#define NFC_RSTART_OFFS		0x58
-#define NFC_DSIZE_OFFS		0x5c
-#define NFC_IREQ_OFFS		0x60
-#define NFC_RST_OFFS		0x64
-#define NFC_CTRL1_OFFS		0x68
-#define NFC_MDATA_OFFS		0x70
-
-#define NFC_WDATA_PHYS_ADDR	(NFC_BASE_PHYS + NFC_WDATA_OFFS)
-
-/* Bits in NFC_CTRL */
-#define NFC_CTRL_BHLD_MASK	(0xf << 0)
-#define NFC_CTRL_BPW_MASK	(0xf << 4)
-#define NFC_CTRL_BSTP_MASK	(0xf << 8)
-#define NFC_CTRL_CADDR_MASK	(0x7 << 12)
-#define NFC_CTRL_CADDR_1	(0x0 << 12)
-#define NFC_CTRL_CADDR_2	(0x1 << 12)
-#define NFC_CTRL_CADDR_3	(0x2 << 12)
-#define NFC_CTRL_CADDR_4	(0x3 << 12)
-#define NFC_CTRL_CADDR_5	(0x4 << 12)
-#define NFC_CTRL_MSK		(1 << 15)
-#define NFC_CTRL_PSIZE256	(0 << 16)
-#define NFC_CTRL_PSIZE512	(1 << 16)
-#define NFC_CTRL_PSIZE1024	(2 << 16)
-#define NFC_CTRL_PSIZE2048	(3 << 16)
-#define NFC_CTRL_PSIZE4096	(4 << 16)
-#define NFC_CTRL_PSIZE_MASK	(7 << 16)
-#define NFC_CTRL_BSIZE1		(0 << 19)
-#define NFC_CTRL_BSIZE2		(1 << 19)
-#define NFC_CTRL_BSIZE4		(2 << 19)
-#define NFC_CTRL_BSIZE8		(3 << 19)
-#define NFC_CTRL_BSIZE_MASK	(3 << 19)
-#define NFC_CTRL_RDY		(1 << 21)
-#define NFC_CTRL_CS0SEL		(1 << 22)
-#define NFC_CTRL_CS1SEL		(1 << 23)
-#define NFC_CTRL_CS2SEL		(1 << 24)
-#define NFC_CTRL_CS3SEL		(1 << 25)
-#define NFC_CTRL_CSMASK		(0xf << 22)
-#define NFC_CTRL_BW		(1 << 26)
-#define NFC_CTRL_FS		(1 << 27)
-#define NFC_CTRL_DEN		(1 << 28)
-#define NFC_CTRL_READ_IEN	(1 << 29)
-#define NFC_CTRL_PROG_IEN	(1 << 30)
-#define NFC_CTRL_RDY_IEN	(1 << 31)
-
-/* Bits in NFC_IREQ */
-#define NFC_IREQ_IRQ0		(1 << 0)
-#define NFC_IREQ_IRQ1		(1 << 1)
-#define NFC_IREQ_IRQ2		(1 << 2)
-
-#define NFC_IREQ_FLAG0		(1 << 4)
-#define NFC_IREQ_FLAG1		(1 << 5)
-#define NFC_IREQ_FLAG2		(1 << 6)
-
-/* MMC controller registers */
-#define MMC0_BASE	(AHB_PERI_BASE_VIRT + 0xe000)
-#define MMC1_BASE	(AHB_PERI_BASE_VIRT + 0xe800)
-
-/* UART base addresses */
-
-#define UART0_BASE	(APB0_PERI_BASE_VIRT + 0x07000)
-#define UART0_BASE_PHYS	(APB0_PERI_BASE + 0x07000)
-#define UART1_BASE	(APB0_PERI_BASE_VIRT + 0x08000)
-#define UART1_BASE_PHYS	(APB0_PERI_BASE + 0x08000)
-#define UART2_BASE	(APB0_PERI_BASE_VIRT + 0x09000)
-#define UART2_BASE_PHYS	(APB0_PERI_BASE + 0x09000)
-#define UART3_BASE	(APB0_PERI_BASE_VIRT + 0x0a000)
-#define UART3_BASE_PHYS	(APB0_PERI_BASE + 0x0a000)
-#define UART4_BASE	(APB0_PERI_BASE_VIRT + 0x15000)
-#define UART4_BASE_PHYS	(APB0_PERI_BASE + 0x15000)
-
-#define UART_BASE	UART0_BASE
-#define UART_BASE_PHYS	UART0_BASE_PHYS
-
-/* ECC controller */
-#define ECC_CTR_BASE	(APB0_PERI_BASE_VIRT + 0xd000)
-
-#define ECC_CTRL_OFFS		0x00
-#define ECC_BASE_OFFS		0x04
-#define ECC_MASK_OFFS		0x08
-#define ECC_CLEAR_OFFS		0x0c
-#define ECC4_0_OFFS		0x10
-#define ECC4_1_OFFS		0x14
-
-#define ECC_EADDR0_OFFS		0x50
-
-#define ECC_ERRNUM_OFFS		0x90
-#define ECC_IREQ_OFFS		0x94
-
-/* Bits in ECC_CTRL */
-#define ECC_CTRL_ECC4_DIEN	(1 << 28)
-#define ECC_CTRL_ECC8_DIEN	(1 << 29)
-#define ECC_CTRL_ECC12_DIEN	(1 << 30)
-#define ECC_CTRL_ECC_DISABLE	0x0
-#define ECC_CTRL_ECC_SLC_ENC	0x8
-#define ECC_CTRL_ECC_SLC_DEC	0x9
-#define ECC_CTRL_ECC4_ENC	0xa
-#define ECC_CTRL_ECC4_DEC	0xb
-#define ECC_CTRL_ECC8_ENC	0xc
-#define ECC_CTRL_ECC8_DEC	0xd
-#define ECC_CTRL_ECC12_ENC	0xe
-#define ECC_CTRL_ECC12_DEC	0xf
-
-/* Bits in ECC_IREQ */
-#define ECC_IREQ_E4DI		(1 << 4)
-
-#define ECC_IREQ_E4DF		(1 << 20)
-#define ECC_IREQ_E4EF		(1 << 21)
-
-/* Interrupt controller */
-
-#define PIC0_BASE	(APB1_PERI_BASE_VIRT + 0x3000)
-#define PIC0_BASE_PHYS	(APB1_PERI_BASE + 0x3000)
-
-#define PIC0_IEN_OFFS		0x00
-#define PIC0_CREQ_OFFS		0x04
-#define PIC0_IREQ_OFFS		0x08
-#define PIC0_IRQSEL_OFFS	0x0c
-#define PIC0_SRC_OFFS		0x10
-#define PIC0_MREQ_OFFS		0x14
-#define PIC0_TSTREQ_OFFS	0x18
-#define PIC0_POL_OFFS		0x1c
-#define PIC0_IRQ_OFFS		0x20
-#define PIC0_FIQ_OFFS		0x24
-#define PIC0_MIRQ_OFFS		0x28
-#define PIC0_MFIQ_OFFS		0x2c
-#define PIC0_TMODE_OFFS		0x30
-#define PIC0_SYNC_OFFS		0x34
-#define PIC0_WKUP_OFFS		0x38
-#define PIC0_TMODEA_OFFS	0x3c
-#define PIC0_INTOEN_OFFS	0x40
-#define PIC0_MEN0_OFFS		0x44
-#define PIC0_MEN_OFFS		0x48
-
-#define PIC0_IEN		__REG(PIC0_BASE + PIC0_IEN_OFFS)
-#define PIC0_IEN_PHYS		__REG(PIC0_BASE_PHYS + PIC0_IEN_OFFS)
-#define PIC0_CREQ		__REG(PIC0_BASE + PIC0_CREQ_OFFS)
-#define PIC0_CREQ_PHYS		__REG(PIC0_BASE_PHYS + PIC0_CREQ_OFFS)
-#define PIC0_IREQ		__REG(PIC0_BASE + PIC0_IREQ_OFFS)
-#define PIC0_IRQSEL		__REG(PIC0_BASE + PIC0_IRQSEL_OFFS)
-#define PIC0_IRQSEL_PHYS	__REG(PIC0_BASE_PHYS + PIC0_IRQSEL_OFFS)
-#define PIC0_SRC		__REG(PIC0_BASE + PIC0_SRC_OFFS)
-#define PIC0_MREQ		__REG(PIC0_BASE + PIC0_MREQ_OFFS)
-#define PIC0_TSTREQ		__REG(PIC0_BASE + PIC0_TSTREQ_OFFS)
-#define PIC0_POL		__REG(PIC0_BASE + PIC0_POL_OFFS)
-#define PIC0_IRQ		__REG(PIC0_BASE + PIC0_IRQ_OFFS)
-#define PIC0_FIQ		__REG(PIC0_BASE + PIC0_FIQ_OFFS)
-#define PIC0_MIRQ		__REG(PIC0_BASE + PIC0_MIRQ_OFFS)
-#define PIC0_MFIQ		__REG(PIC0_BASE + PIC0_MFIQ_OFFS)
-#define PIC0_TMODE		__REG(PIC0_BASE + PIC0_TMODE_OFFS)
-#define PIC0_TMODE_PHYS		__REG(PIC0_BASE_PHYS + PIC0_TMODE_OFFS)
-#define PIC0_SYNC		__REG(PIC0_BASE + PIC0_SYNC_OFFS)
-#define PIC0_WKUP		__REG(PIC0_BASE + PIC0_WKUP_OFFS)
-#define PIC0_TMODEA		__REG(PIC0_BASE + PIC0_TMODEA_OFFS)
-#define PIC0_INTOEN		__REG(PIC0_BASE + PIC0_INTOEN_OFFS)
-#define PIC0_MEN0		__REG(PIC0_BASE + PIC0_MEN0_OFFS)
-#define PIC0_MEN		__REG(PIC0_BASE + PIC0_MEN_OFFS)
-
-#define PIC1_BASE	(APB1_PERI_BASE_VIRT + 0x3080)
-
-#define PIC1_IEN_OFFS		0x00
-#define PIC1_CREQ_OFFS		0x04
-#define PIC1_IREQ_OFFS		0x08
-#define PIC1_IRQSEL_OFFS	0x0c
-#define PIC1_SRC_OFFS		0x10
-#define PIC1_MREQ_OFFS		0x14
-#define PIC1_TSTREQ_OFFS	0x18
-#define PIC1_POL_OFFS		0x1c
-#define PIC1_IRQ_OFFS		0x20
-#define PIC1_FIQ_OFFS		0x24
-#define PIC1_MIRQ_OFFS		0x28
-#define PIC1_MFIQ_OFFS		0x2c
-#define PIC1_TMODE_OFFS		0x30
-#define PIC1_SYNC_OFFS		0x34
-#define PIC1_WKUP_OFFS		0x38
-#define PIC1_TMODEA_OFFS	0x3c
-#define PIC1_INTOEN_OFFS	0x40
-#define PIC1_MEN1_OFFS		0x44
-#define PIC1_MEN_OFFS		0x48
-
-#define PIC1_IEN	__REG(PIC1_BASE + PIC1_IEN_OFFS)
-#define PIC1_CREQ	__REG(PIC1_BASE + PIC1_CREQ_OFFS)
-#define PIC1_IREQ	__REG(PIC1_BASE + PIC1_IREQ_OFFS)
-#define PIC1_IRQSEL	__REG(PIC1_BASE + PIC1_IRQSEL_OFFS)
-#define PIC1_SRC	__REG(PIC1_BASE + PIC1_SRC_OFFS)
-#define PIC1_MREQ	__REG(PIC1_BASE + PIC1_MREQ_OFFS)
-#define PIC1_TSTREQ	__REG(PIC1_BASE + PIC1_TSTREQ_OFFS)
-#define PIC1_POL	__REG(PIC1_BASE + PIC1_POL_OFFS)
-#define PIC1_IRQ	__REG(PIC1_BASE + PIC1_IRQ_OFFS)
-#define PIC1_FIQ	__REG(PIC1_BASE + PIC1_FIQ_OFFS)
-#define PIC1_MIRQ	__REG(PIC1_BASE + PIC1_MIRQ_OFFS)
-#define PIC1_MFIQ	__REG(PIC1_BASE + PIC1_MFIQ_OFFS)
-#define PIC1_TMODE	__REG(PIC1_BASE + PIC1_TMODE_OFFS)
-#define PIC1_SYNC	__REG(PIC1_BASE + PIC1_SYNC_OFFS)
-#define PIC1_WKUP	__REG(PIC1_BASE + PIC1_WKUP_OFFS)
-#define PIC1_TMODEA	__REG(PIC1_BASE + PIC1_TMODEA_OFFS)
-#define PIC1_INTOEN	__REG(PIC1_BASE + PIC1_INTOEN_OFFS)
-#define PIC1_MEN1	__REG(PIC1_BASE + PIC1_MEN1_OFFS)
-#define PIC1_MEN	__REG(PIC1_BASE + PIC1_MEN_OFFS)
-
-/* Timer registers */
-#define TIMER_BASE		(APB1_PERI_BASE_VIRT + 0x4000)
-#define TIMER_BASE_PHYS		(APB1_PERI_BASE + 0x4000)
-
-#define TWDCFG_OFFS		0x70
-
-#define TC32EN_OFFS		0x80
-#define TC32LDV_OFFS		0x84
-#define TC32CMP0_OFFS		0x88
-#define TC32CMP1_OFFS		0x8c
-#define TC32PCNT_OFFS		0x90
-#define TC32MCNT_OFFS		0x94
-#define TC32IRQ_OFFS		0x98
-
-/* Bits in TC32EN */
-#define TC32EN_PRESCALE_MASK	0x00ffffff
-#define TC32EN_ENABLE		(1 << 24)
-#define TC32EN_LOADZERO		(1 << 25)
-#define TC32EN_STOPMODE		(1 << 26)
-#define TC32EN_LDM0		(1 << 28)
-#define TC32EN_LDM1		(1 << 29)
-
-/* Bits in TC32IRQ */
-#define TC32IRQ_MSTAT_MASK	0x0000001f
-#define TC32IRQ_RSTAT_MASK	(0x1f << 8)
-#define TC32IRQ_IRQEN0		(1 << 16)
-#define TC32IRQ_IRQEN1		(1 << 17)
-#define TC32IRQ_IRQEN2		(1 << 18)
-#define TC32IRQ_IRQEN3		(1 << 19)
-#define TC32IRQ_IRQEN4		(1 << 20)
-#define TC32IRQ_RSYNC		(1 << 30)
-#define TC32IRQ_IRQCLR		(1 << 31)
-
-/* GPIO registers */
-#define GPIOPD_BASE		(APB1_PERI_BASE_VIRT + 0x5000)
-
-#define GPIOPD_DAT_OFFS		0x00
-#define GPIOPD_DOE_OFFS		0x04
-#define GPIOPD_FS0_OFFS		0x08
-#define GPIOPD_FS1_OFFS		0x0c
-#define GPIOPD_FS2_OFFS		0x10
-#define GPIOPD_RPU_OFFS		0x30
-#define GPIOPD_RPD_OFFS		0x34
-#define GPIOPD_DV0_OFFS		0x38
-#define GPIOPD_DV1_OFFS		0x3c
-
-#define GPIOPS_BASE		(APB1_PERI_BASE_VIRT + 0x5000)
-
-#define GPIOPS_DAT_OFFS		0x40
-#define GPIOPS_DOE_OFFS		0x44
-#define GPIOPS_FS0_OFFS		0x48
-#define GPIOPS_FS1_OFFS		0x4c
-#define GPIOPS_FS2_OFFS		0x50
-#define GPIOPS_FS3_OFFS		0x54
-#define GPIOPS_RPU_OFFS		0x70
-#define GPIOPS_RPD_OFFS		0x74
-#define GPIOPS_DV0_OFFS		0x78
-#define GPIOPS_DV1_OFFS		0x7c
-
-#define GPIOPS_FS1_SDH0_BITS	0x000000ff
-#define GPIOPS_FS1_SDH1_BITS	0x0000ff00
-
-#define GPIOPU_BASE		(APB1_PERI_BASE_VIRT + 0x5000)
-
-#define GPIOPU_DAT_OFFS		0x80
-#define GPIOPU_DOE_OFFS		0x84
-#define GPIOPU_FS0_OFFS		0x88
-#define GPIOPU_FS1_OFFS		0x8c
-#define GPIOPU_FS2_OFFS		0x90
-#define GPIOPU_RPU_OFFS		0xb0
-#define GPIOPU_RPD_OFFS		0xb4
-#define GPIOPU_DV0_OFFS		0xb8
-#define GPIOPU_DV1_OFFS		0xbc
-
-#define GPIOPU_FS0_TXD0		(1 << 0)
-#define GPIOPU_FS0_RXD0		(1 << 1)
-#define GPIOPU_FS0_CTS0		(1 << 2)
-#define GPIOPU_FS0_RTS0		(1 << 3)
-#define GPIOPU_FS0_TXD1		(1 << 4)
-#define GPIOPU_FS0_RXD1		(1 << 5)
-#define GPIOPU_FS0_CTS1		(1 << 6)
-#define GPIOPU_FS0_RTS1		(1 << 7)
-#define GPIOPU_FS0_TXD2		(1 << 8)
-#define GPIOPU_FS0_RXD2		(1 << 9)
-#define GPIOPU_FS0_CTS2		(1 << 10)
-#define GPIOPU_FS0_RTS2		(1 << 11)
-#define GPIOPU_FS0_TXD3		(1 << 12)
-#define GPIOPU_FS0_RXD3		(1 << 13)
-#define GPIOPU_FS0_CTS3		(1 << 14)
-#define GPIOPU_FS0_RTS3		(1 << 15)
-#define GPIOPU_FS0_TXD4		(1 << 16)
-#define GPIOPU_FS0_RXD4		(1 << 17)
-#define GPIOPU_FS0_CTS4		(1 << 18)
-#define GPIOPU_FS0_RTS4		(1 << 19)
-
-#define GPIOFC_BASE		(APB1_PERI_BASE_VIRT + 0x5000)
-
-#define GPIOFC_DAT_OFFS		0xc0
-#define GPIOFC_DOE_OFFS		0xc4
-#define GPIOFC_FS0_OFFS		0xc8
-#define GPIOFC_FS1_OFFS		0xcc
-#define GPIOFC_FS2_OFFS		0xd0
-#define GPIOFC_FS3_OFFS		0xd4
-#define GPIOFC_RPU_OFFS		0xf0
-#define GPIOFC_RPD_OFFS		0xf4
-#define GPIOFC_DV0_OFFS		0xf8
-#define GPIOFC_DV1_OFFS		0xfc
-
-#define GPIOFD_BASE		(APB1_PERI_BASE_VIRT + 0x5000)
-
-#define GPIOFD_DAT_OFFS		0x100
-#define GPIOFD_DOE_OFFS		0x104
-#define GPIOFD_FS0_OFFS		0x108
-#define GPIOFD_FS1_OFFS		0x10c
-#define GPIOFD_FS2_OFFS		0x110
-#define GPIOFD_RPU_OFFS		0x130
-#define GPIOFD_RPD_OFFS		0x134
-#define GPIOFD_DV0_OFFS		0x138
-#define GPIOFD_DV1_OFFS		0x13c
-
-#define GPIOLC_BASE		(APB1_PERI_BASE_VIRT + 0x5000)
-
-#define GPIOLC_DAT_OFFS		0x140
-#define GPIOLC_DOE_OFFS		0x144
-#define GPIOLC_FS0_OFFS		0x148
-#define GPIOLC_FS1_OFFS		0x14c
-#define GPIOLC_RPU_OFFS		0x170
-#define GPIOLC_RPD_OFFS		0x174
-#define GPIOLC_DV0_OFFS		0x178
-#define GPIOLC_DV1_OFFS		0x17c
-
-#define GPIOLD_BASE		(APB1_PERI_BASE_VIRT + 0x5000)
-
-#define GPIOLD_DAT_OFFS		0x180
-#define GPIOLD_DOE_OFFS		0x184
-#define GPIOLD_FS0_OFFS		0x188
-#define GPIOLD_FS1_OFFS		0x18c
-#define GPIOLD_FS2_OFFS		0x190
-#define GPIOLD_RPU_OFFS		0x1b0
-#define GPIOLD_RPD_OFFS		0x1b4
-#define GPIOLD_DV0_OFFS		0x1b8
-#define GPIOLD_DV1_OFFS		0x1bc
-
-#define GPIOAD_BASE		(APB1_PERI_BASE_VIRT + 0x5000)
-
-#define GPIOAD_DAT_OFFS		0x1c0
-#define GPIOAD_DOE_OFFS		0x1c4
-#define GPIOAD_FS0_OFFS		0x1c8
-#define GPIOAD_RPU_OFFS		0x1f0
-#define GPIOAD_RPD_OFFS		0x1f4
-#define GPIOAD_DV0_OFFS		0x1f8
-#define GPIOAD_DV1_OFFS		0x1fc
-
-#define GPIOXC_BASE		(APB1_PERI_BASE_VIRT + 0x5000)
-
-#define GPIOXC_DAT_OFFS		0x200
-#define GPIOXC_DOE_OFFS		0x204
-#define GPIOXC_FS0_OFFS		0x208
-#define GPIOXC_RPU_OFFS		0x230
-#define GPIOXC_RPD_OFFS		0x234
-#define GPIOXC_DV0_OFFS		0x238
-#define GPIOXC_DV1_OFFS		0x23c
-
-#define GPIOXC_FS0		__REG(GPIOXC_BASE + GPIOXC_FS0_OFFS)
-
-#define GPIOXC_FS0_CS0		(1 << 26)
-#define GPIOXC_FS0_CS1		(1 << 27)
-
-#define GPIOXD_BASE		(APB1_PERI_BASE_VIRT + 0x5000)
-
-#define GPIOXD_DAT_OFFS		0x240
-#define GPIOXD_FS0_OFFS		0x248
-#define GPIOXD_RPU_OFFS		0x270
-#define GPIOXD_RPD_OFFS		0x274
-#define GPIOXD_DV0_OFFS		0x278
-#define GPIOXD_DV1_OFFS		0x27c
-
-#define GPIOPK_BASE		(APB1_PERI_BASE_VIRT + 0x1c000)
-
-#define GPIOPK_RST_OFFS		0x008
-#define GPIOPK_DAT_OFFS		0x100
-#define GPIOPK_DOE_OFFS		0x104
-#define GPIOPK_FS0_OFFS		0x108
-#define GPIOPK_FS1_OFFS		0x10c
-#define GPIOPK_FS2_OFFS		0x110
-#define GPIOPK_IRQST_OFFS	0x210
-#define GPIOPK_IRQEN_OFFS	0x214
-#define GPIOPK_IRQPOL_OFFS	0x218
-#define GPIOPK_IRQTM0_OFFS	0x21c
-#define GPIOPK_IRQTM1_OFFS	0x220
-#define GPIOPK_CTL_OFFS		0x22c
-
-#define PMGPIO_BASE		(APB1_PERI_BASE_VIRT + 0x10000)
-#define BACKUP_RAM_BASE		PMGPIO_BASE
-
-#define PMGPIO_DAT_OFFS		0x800
-#define PMGPIO_DOE_OFFS		0x804
-#define PMGPIO_FS0_OFFS		0x808
-#define PMGPIO_RPU_OFFS		0x810
-#define PMGPIO_RPD_OFFS		0x814
-#define PMGPIO_DV0_OFFS		0x818
-#define PMGPIO_DV1_OFFS		0x81c
-#define PMGPIO_EE0_OFFS		0x820
-#define PMGPIO_EE1_OFFS		0x824
-#define PMGPIO_CTL_OFFS		0x828
-#define PMGPIO_DI_OFFS		0x82c
-#define PMGPIO_STR_OFFS		0x830
-#define PMGPIO_STF_OFFS		0x834
-#define PMGPIO_POL_OFFS		0x838
-#define PMGPIO_APB_OFFS		0x800
-
-/* Clock controller registers */
-#define CKC_BASE	((void __iomem *)(APB1_PERI_BASE_VIRT + 0x6000))
-
-#define CLKCTRL_OFFS		0x00
-#define PLL0CFG_OFFS		0x04
-#define PLL1CFG_OFFS		0x08
-#define CLKDIVC0_OFFS		0x0c
-
-#define BCLKCTR0_OFFS		0x14
-#define SWRESET0_OFFS		0x18
-
-#define BCLKCTR1_OFFS		0x60
-#define SWRESET1_OFFS		0x64
-#define PWDCTL_OFFS		0x68
-#define PLL2CFG_OFFS		0x6c
-#define CLKDIVC1_OFFS		0x70
-
-#define ACLKREF_OFFS		0x80
-#define ACLKI2C_OFFS		0x84
-#define ACLKSPI0_OFFS		0x88
-#define ACLKSPI1_OFFS		0x8c
-#define ACLKUART0_OFFS		0x90
-#define ACLKUART1_OFFS		0x94
-#define ACLKUART2_OFFS		0x98
-#define ACLKUART3_OFFS		0x9c
-#define ACLKUART4_OFFS		0xa0
-#define ACLKTCT_OFFS		0xa4
-#define ACLKTCX_OFFS		0xa8
-#define ACLKTCZ_OFFS		0xac
-#define ACLKADC_OFFS		0xb0
-#define ACLKDAI0_OFFS		0xb4
-#define ACLKDAI1_OFFS		0xb8
-#define ACLKLCD_OFFS		0xbc
-#define ACLKSPDIF_OFFS		0xc0
-#define ACLKUSBH_OFFS		0xc4
-#define ACLKSDH0_OFFS		0xc8
-#define ACLKSDH1_OFFS		0xcc
-#define ACLKC3DEC_OFFS		0xd0
-#define ACLKEXT_OFFS		0xd4
-#define ACLKCAN0_OFFS		0xd8
-#define ACLKCAN1_OFFS		0xdc
-#define ACLKGSB0_OFFS		0xe0
-#define ACLKGSB1_OFFS		0xe4
-#define ACLKGSB2_OFFS		0xe8
-#define ACLKGSB3_OFFS		0xec
-
-#define PLLxCFG_PD		(1 << 31)
-
-/* CLKCTRL bits */
-#define CLKCTRL_XE		(1 << 31)
-
-/* CLKDIVCx bits */
-#define CLKDIVC0_XTE		(1 << 7)
-#define CLKDIVC0_XE		(1 << 15)
-#define CLKDIVC0_P1E		(1 << 23)
-#define CLKDIVC0_P0E		(1 << 31)
-
-#define CLKDIVC1_P2E		(1 << 7)
-
-/* BCLKCTR0 clock bits */
-#define BCLKCTR0_USBD		(1 << 4)
-#define BCLKCTR0_ECC		(1 << 9)
-#define BCLKCTR0_USBH0		(1 << 11)
-#define BCLKCTR0_NFC		(1 << 16)
-
-/* BCLKCTR1 clock bits */
-#define BCLKCTR1_USBH1		(1 << 20)
-
-/* SWRESET0 bits */
-#define SWRESET0_USBD		(1 << 4)
-#define SWRESET0_USBH0		(1 << 11)
-
-/* SWRESET1 bits */
-#define SWRESET1_USBH1		(1 << 20)
-
-/* System clock sources.
- * Note: These are the clock sources that serve as parents for
- * all other clocks. They have no parents themselves.
- *
- * These values are used for struct clk->root_id. All clocks
- * that are not system clock sources have this value set to
- * CLK_SRC_NOROOT.
- * The values for system clocks start with CLK_SRC_PLL0 == 0
- * because this gives us exactly the values needed for the lower
- * 4 bits of ACLK_* registers. Therefore, CLK_SRC_NOROOT is
- * defined as -1 to not disturb the order.
- */
-enum root_clks {
-	CLK_SRC_NOROOT = -1,
-	CLK_SRC_PLL0 = 0,
-	CLK_SRC_PLL1,
-	CLK_SRC_PLL0DIV,
-	CLK_SRC_PLL1DIV,
-	CLK_SRC_XI,
-	CLK_SRC_XIDIV,
-	CLK_SRC_XTI,
-	CLK_SRC_XTIDIV,
-	CLK_SRC_PLL2,
-	CLK_SRC_PLL2DIV,
-	CLK_SRC_PK0,
-	CLK_SRC_PK1,
-	CLK_SRC_PK2,
-	CLK_SRC_PK3,
-	CLK_SRC_PK4,
-	CLK_SRC_48MHZ
-};
-
-#define CLK_SRC_MASK		0xf
-
-/* Bits in ACLK* registers */
-#define ACLK_EN		(1 << 28)
-#define ACLK_SEL_SHIFT		24
-#define ACLK_SEL_MASK		0x0f000000
-#define ACLK_DIV_MASK		0x00000fff
-
-/* System configuration registers */
-
-#define SCFG_BASE		(APB1_PERI_BASE_VIRT + 0x13000)
-
-#define	BMI_OFFS		0x00
-#define AHBCON0_OFFS		0x04
-#define APBPWE_OFFS		0x08
-#define DTCMWAIT_OFFS		0x0c
-#define ECCSEL_OFFS		0x10
-#define AHBCON1_OFFS		0x14
-#define SDHCFG_OFFS		0x18
-#define REMAP_OFFS		0x20
-#define LCDSIAE_OFFS		0x24
-#define XMCCFG_OFFS		0xe0
-#define IMCCFG_OFFS		0xe4
-
-/* Values for ECCSEL */
-#define ECCSEL_EXTMEM		0x0
-#define ECCSEL_DTCM		0x1
-#define ECCSEL_INT_SRAM		0x2
-#define ECCSEL_AHB		0x3
-
-/* Bits in XMCCFG */
-#define XMCCFG_NFCE		(1 << 1)
-#define XMCCFG_FDXD		(1 << 2)
-
-/* External memory controller registers */
-
-#define EMC_BASE		EXT_MEM_CTRL_BASE
-
-#define SDCFG_OFFS		0x00
-#define SDFSM_OFFS		0x04
-#define MCFG_OFFS		0x08
-
-#define CSCFG0_OFFS		0x10
-#define CSCFG1_OFFS		0x14
-#define CSCFG2_OFFS		0x18
-#define CSCFG3_OFFS		0x1c
-
-#define MCFG_SDEN		(1 << 4)
-
-#endif /* TCC8K_REGS_H */
diff --git a/arch/arm/plat-tcc/include/mach/timex.h b/arch/arm/plat-tcc/include/mach/timex.h
deleted file mode 100644
index 057acbe..0000000
--- a/arch/arm/plat-tcc/include/mach/timex.h
+++ /dev/null
@@ -1,5 +0,0 @@
-/*
- * A definition needed by arch core code.
- *
- */
-#define CLOCK_TICK_RATE		(HZ * 100000UL)
diff --git a/arch/arm/plat-tcc/include/mach/uncompress.h b/arch/arm/plat-tcc/include/mach/uncompress.h
deleted file mode 100644
index 7a3e33a..0000000
--- a/arch/arm/plat-tcc/include/mach/uncompress.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright (C) 2009 Hans J. Koch <hjk@linutronix.de>
- *
- * This file is licensed under the terms of the GPL version 2.
- */
-
-#include <linux/serial_reg.h>
-#include <linux/types.h>
-
-#include <mach/tcc8k-regs.h>
-
-unsigned int system_rev;
-
-#define ID_MASK			0x7fff
-
-static void putc(int c)
-{
-	u32 *uart_lsr = (u32 *)(UART_BASE_PHYS + (UART_LSR << 2));
-	u32 *uart_tx = (u32 *)(UART_BASE_PHYS + (UART_TX << 2));
-
-	while (!(*uart_lsr & UART_LSR_THRE))
-		barrier();
-	*uart_tx = c;
-}
-
-static inline void flush(void)
-{
-}
-
-/*
- * nothing to do
- */
-#define arch_decomp_setup()
-#define arch_decomp_wdog()
diff --git a/arch/arm/plat-tcc/include/mach/vmalloc.h b/arch/arm/plat-tcc/include/mach/vmalloc.h
deleted file mode 100644
index 99414d9..0000000
--- a/arch/arm/plat-tcc/include/mach/vmalloc.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/*
- * Author: <linux@telechips.com>
- * Created: June 10, 2008
- *
- * Copyright (C) 2000 Russell King.
- * Copyright (C) 2008-2009 Telechips
- *
- * Licensed under the terms of the GPL v2.
- */
-#define VMALLOC_END	0xf0000000UL
diff --git a/arch/arm/plat-tcc/system.c b/arch/arm/plat-tcc/system.c
deleted file mode 100644
index cc208fa..0000000
--- a/arch/arm/plat-tcc/system.c
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * System functions for Telechips TCCxxxx SoCs
- *
- * Copyright (C) Hans J. Koch <hjk@linutronix.de>
- *
- * Licensed under the terms of the GPL v2.
- *
- */
-
-#include <linux/io.h>
-
-#include <mach/tcc8k-regs.h>
-
-/* System reboot */
-void plat_tcc_reboot(void)
-{
-	/* Make sure clocks are on */
-	__raw_writel(0xffffffff, CKC_BASE + BCLKCTR0_OFFS);
-
-	/* Enable watchdog reset */
-	__raw_writel(0x49, TIMER_BASE + TWDCFG_OFFS);
-	/* Wait for reset */
-	while(1)
-		;
-}
diff --git a/arch/arm/plat-versatile/sched-clock.c b/arch/arm/plat-versatile/sched-clock.c
index 3d6a4c2..b33b74c 100644
--- a/arch/arm/plat-versatile/sched-clock.c
+++ b/arch/arm/plat-versatile/sched-clock.c
@@ -18,41 +18,24 @@
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
+#include <linux/kernel.h>
 #include <linux/io.h>
-#include <linux/sched.h>
 
 #include <asm/sched_clock.h>
 #include <plat/sched_clock.h>
 
-static DEFINE_CLOCK_DATA(cd);
 static void __iomem *ctr;
 
-/*
- * Constants generated by clocks_calc_mult_shift(m, s, 24MHz, NSEC_PER_SEC, 60).
- * This gives a resolution of about 41ns and a wrap period of about 178s.
- */
-#define SC_MULT		2796202667u
-#define SC_SHIFT	26
-
-unsigned long long notrace sched_clock(void)
+static u32 notrace versatile_read_sched_clock(void)
 {
-	if (ctr) {
-		u32 cyc = readl(ctr);
-		return cyc_to_fixed_sched_clock(&cd, cyc, (u32)~0,
-						SC_MULT, SC_SHIFT);
-	} else
-		return 0;
-}
+	if (ctr)
+		return readl(ctr);
 
-static void notrace versatile_update_sched_clock(void)
-{
-	u32 cyc = readl(ctr);
-	update_sched_clock(&cd, cyc, (u32)~0);
+	return 0;
 }
 
 void __init versatile_sched_clock_init(void __iomem *reg, unsigned long rate)
 {
 	ctr = reg;
-	init_fixed_sched_clock(&cd, versatile_update_sched_clock,
-			       32, rate, SC_MULT, SC_SHIFT);
+	setup_sched_clock(versatile_read_sched_clock, 32, rate);
 }
diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types
index ccbe16f..f9c9f33 100644
--- a/arch/arm/tools/mach-types
+++ b/arch/arm/tools/mach-types
@@ -16,7 +16,7 @@
 # are merged into mainline or have been edited in the machine database
 # within the last 12 months.  References to machine_is_NAME() do not count!
 #
-# Last update: Sat May 7 08:48:24 2011
+# Last update: Tue Dec 6 11:07:38 2011
 #
 # machine_is_xxx	CONFIG_xxxx		MACH_TYPE_xxx		number
 #
@@ -269,7 +269,7 @@
 omap3_beagle		MACH_OMAP3_BEAGLE	OMAP3_BEAGLE		1546
 nokia_n810		MACH_NOKIA_N810		NOKIA_N810		1548
 pcm038			MACH_PCM038		PCM038			1551
-ts_x09			MACH_TS209		TS209			1565
+ts209			MACH_TS209		TS209			1565
 at91cap9adk		MACH_AT91CAP9ADK	AT91CAP9ADK		1566
 mx31moboard		MACH_MX31MOBOARD	MX31MOBOARD		1574
 vision_ep9307		MACH_VISION_EP9307	VISION_EP9307		1578
@@ -321,7 +321,6 @@
 mx25_3ds		MACH_MX25_3DS		MX25_3DS		1771
 omap3530_lv_som		MACH_OMAP3530_LV_SOM	OMAP3530_LV_SOM		1773
 davinci_da830_evm	MACH_DAVINCI_DA830_EVM	DAVINCI_DA830_EVM	1781
-at572d940hfek		MACH_AT572D940HFEB	AT572D940HFEB		1783
 dove_db			MACH_DOVE_DB		DOVE_DB			1788
 overo			MACH_OVERO		OVERO			1798
 at2440evb		MACH_AT2440EVB		AT2440EVB		1799
@@ -459,7 +458,7 @@
 spear310		MACH_SPEAR310		SPEAR310		2660
 spear320		MACH_SPEAR320		SPEAR320		2661
 aquila			MACH_AQUILA		AQUILA			2676
-sheeva_esata		MACH_ESATA_SHEEVAPLUG	ESATA_SHEEVAPLUG	2678
+esata_sheevaplug	MACH_ESATA_SHEEVAPLUG	ESATA_SHEEVAPLUG	2678
 msm7x30_surf		MACH_MSM7X30_SURF	MSM7X30_SURF		2679
 ea2478devkit		MACH_EA2478DEVKIT	EA2478DEVKIT		2683
 terastation_wxl		MACH_TERASTATION_WXL	TERASTATION_WXL		2697
@@ -491,380 +490,53 @@
 eukrea_cpuimx51sd	MACH_EUKREA_CPUIMX51SD	EUKREA_CPUIMX51SD	2822
 eukrea_cpuimx51		MACH_EUKREA_CPUIMX51	EUKREA_CPUIMX51		2823
 smdkc210		MACH_SMDKC210		SMDKC210		2838
-omap3_braillo		MACH_OMAP3_BRAILLO	OMAP3_BRAILLO		2839
-spyplug			MACH_SPYPLUG		SPYPLUG			2840
-ginger			MACH_GINGER		GINGER			2841
-tny_t3530		MACH_TNY_T3530		TNY_T3530		2842
 pca102			MACH_PCA102		PCA102			2843
-spade			MACH_SPADE		SPADE			2844
-mxc25_topaz		MACH_MXC25_TOPAZ	MXC25_TOPAZ		2845
 t5325			MACH_T5325		T5325			2846
-gw2361			MACH_GW2361		GW2361			2847
-elog			MACH_ELOG		ELOG			2848
 income			MACH_INCOME		INCOME			2849
-bcm589x			MACH_BCM589X		BCM589X			2850
-etna			MACH_ETNA		ETNA			2851
-hawks			MACH_HAWKS		HAWKS			2852
-meson			MACH_MESON		MESON			2853
-xsbase255		MACH_XSBASE255		XSBASE255		2854
-pvm2030			MACH_PVM2030		PVM2030			2855
-mioa502			MACH_MIOA502		MIOA502			2856
 vvbox_sdorig2		MACH_VVBOX_SDORIG2	VVBOX_SDORIG2		2857
 vvbox_sdlite2		MACH_VVBOX_SDLITE2	VVBOX_SDLITE2		2858
 vvbox_sdpro4		MACH_VVBOX_SDPRO4	VVBOX_SDPRO4		2859
-htc_spv_m700		MACH_HTC_SPV_M700	HTC_SPV_M700		2860
 mx257sx			MACH_MX257SX		MX257SX			2861
 goni			MACH_GONI		GONI			2862
-msm8x55_svlte_ffa	MACH_MSM8X55_SVLTE_FFA	MSM8X55_SVLTE_FFA	2863
-msm8x55_svlte_surf	MACH_MSM8X55_SVLTE_SURF	MSM8X55_SVLTE_SURF	2864
-quickstep		MACH_QUICKSTEP		QUICKSTEP		2865
-dmw96			MACH_DMW96		DMW96			2866
-hammerhead		MACH_HAMMERHEAD		HAMMERHEAD		2867
-trident			MACH_TRIDENT		TRIDENT			2868
-lightning		MACH_LIGHTNING		LIGHTNING		2869
-iconnect		MACH_ICONNECT		ICONNECT		2870
-autobot			MACH_AUTOBOT		AUTOBOT			2871
-coconut			MACH_COCONUT		COCONUT			2872
-durian			MACH_DURIAN		DURIAN			2873
-cayenne			MACH_CAYENNE		CAYENNE			2874
-fuji			MACH_FUJI		FUJI			2875
-synology_6282		MACH_SYNOLOGY_6282	SYNOLOGY_6282		2876
-em1sy			MACH_EM1SY		EM1SY			2877
-m502			MACH_M502		M502			2878
-matrix518		MACH_MATRIX518		MATRIX518		2879
-tiny_gurnard		MACH_TINY_GURNARD	TINY_GURNARD		2880
-spear1310		MACH_SPEAR1310		SPEAR1310		2881
 bv07			MACH_BV07		BV07			2882
-mxt_td61		MACH_MXT_TD61		MXT_TD61		2883
 openrd_ultimate		MACH_OPENRD_ULTIMATE	OPENRD_ULTIMATE		2884
 devixp			MACH_DEVIXP		DEVIXP			2885
 miccpt			MACH_MICCPT		MICCPT			2886
 mic256			MACH_MIC256		MIC256			2887
-as1167			MACH_AS1167		AS1167			2888
-omap3_ibiza		MACH_OMAP3_IBIZA	OMAP3_IBIZA		2889
 u5500			MACH_U5500		U5500			2890
-davinci_picto		MACH_DAVINCI_PICTO	DAVINCI_PICTO		2891
-mecha			MACH_MECHA		MECHA			2892
-bubba3			MACH_BUBBA3		BUBBA3			2893
-pupitre			MACH_PUPITRE		PUPITRE			2894
-tegra_vogue		MACH_TEGRA_VOGUE	TEGRA_VOGUE		2896
-tegra_e1165		MACH_TEGRA_E1165	TEGRA_E1165		2897
-simplenet		MACH_SIMPLENET		SIMPLENET		2898
-ec4350tbm		MACH_EC4350TBM		EC4350TBM		2899
-pec_tc			MACH_PEC_TC		PEC_TC			2900
-pec_hc2			MACH_PEC_HC2		PEC_HC2			2901
-esl_mobilis_a		MACH_ESL_MOBILIS_A	ESL_MOBILIS_A		2902
-esl_mobilis_b		MACH_ESL_MOBILIS_B	ESL_MOBILIS_B		2903
-esl_wave_a		MACH_ESL_WAVE_A		ESL_WAVE_A		2904
-esl_wave_b		MACH_ESL_WAVE_B		ESL_WAVE_B		2905
-unisense_mmm		MACH_UNISENSE_MMM	UNISENSE_MMM		2906
-blueshark		MACH_BLUESHARK		BLUESHARK		2907
-e10			MACH_E10		E10			2908
-app3k_robin		MACH_APP3K_ROBIN	APP3K_ROBIN		2909
-pov15hd			MACH_POV15HD		POV15HD			2910
-stella			MACH_STELLA		STELLA			2911
 linkstation_lschl	MACH_LINKSTATION_LSCHL	LINKSTATION_LSCHL	2913
-netwalker		MACH_NETWALKER		NETWALKER		2914
-acsx106			MACH_ACSX106		ACSX106			2915
-atlas5_c1		MACH_ATLAS5_C1		ATLAS5_C1		2916
-nsb3ast			MACH_NSB3AST		NSB3AST			2917
-gnet_slc		MACH_GNET_SLC		GNET_SLC		2918
-af4000			MACH_AF4000		AF4000			2919
-ark9431			MACH_ARK9431		ARK9431			2920
-fs_s5pc100		MACH_FS_S5PC100		FS_S5PC100		2921
-omap3505nova8		MACH_OMAP3505NOVA8	OMAP3505NOVA8		2922
-omap3621_edp1		MACH_OMAP3621_EDP1	OMAP3621_EDP1		2923
-oratisaes		MACH_ORATISAES		ORATISAES		2924
 smdkv310		MACH_SMDKV310		SMDKV310		2925
-siemens_l0		MACH_SIEMENS_L0		SIEMENS_L0		2926
-ventana			MACH_VENTANA		VENTANA			2927
 wm8505_7in_netbook	MACH_WM8505_7IN_NETBOOK	WM8505_7IN_NETBOOK	2928
-ec4350sdb		MACH_EC4350SDB		EC4350SDB		2929
-mimas			MACH_MIMAS		MIMAS			2930
-titan			MACH_TITAN		TITAN			2931
 craneboard		MACH_CRANEBOARD		CRANEBOARD		2932
-es2440			MACH_ES2440		ES2440			2933
-najay_a9263		MACH_NAJAY_A9263	NAJAY_A9263		2934
-htctornado		MACH_HTCTORNADO		HTCTORNADO		2935
-dimm_mx257		MACH_DIMM_MX257		DIMM_MX257		2936
-jigen301		MACH_JIGEN		JIGEN			2937
 smdk6450		MACH_SMDK6450		SMDK6450		2938
-meno_qng		MACH_MENO_QNG		MENO_QNG		2939
-ns2416			MACH_NS2416		NS2416			2940
-rpc353			MACH_RPC353		RPC353			2941
-tq6410			MACH_TQ6410		TQ6410			2942
-sky6410			MACH_SKY6410		SKY6410			2943
-dynasty			MACH_DYNASTY		DYNASTY			2944
-vivo			MACH_VIVO		VIVO			2945
-bury_bl7582		MACH_BURY_BL7582	BURY_BL7582		2946
-bury_bps5270		MACH_BURY_BPS5270	BURY_BPS5270		2947
-basi			MACH_BASI		BASI			2948
-tn200			MACH_TN200		TN200			2949
-c2mmi			MACH_C2MMI		C2MMI			2950
-meson_6236m		MACH_MESON_6236M	MESON_6236M		2951
-meson_8626m		MACH_MESON_8626M	MESON_8626M		2952
-tube			MACH_TUBE		TUBE			2953
-messina			MACH_MESSINA		MESSINA			2954
-mx50_arm2		MACH_MX50_ARM2		MX50_ARM2		2955
-cetus9263		MACH_CETUS9263		CETUS9263		2956
 brownstone		MACH_BROWNSTONE		BROWNSTONE		2957
-vmx25			MACH_VMX25		VMX25			2958
-vmx51			MACH_VMX51		VMX51			2959
-abacus			MACH_ABACUS		ABACUS			2960
-cm4745			MACH_CM4745		CM4745			2961
-oratislink		MACH_ORATISLINK		ORATISLINK		2962
-davinci_dm365_dvr	MACH_DAVINCI_DM365_DVR	DAVINCI_DM365_DVR	2963
-netviz			MACH_NETVIZ		NETVIZ			2964
 flexibity		MACH_FLEXIBITY		FLEXIBITY		2965
-wlan_computer		MACH_WLAN_COMPUTER	WLAN_COMPUTER		2966
-lpc24xx			MACH_LPC24XX		LPC24XX			2967
-spica			MACH_SPICA		SPICA			2968
-gpsdisplay		MACH_GPSDISPLAY		GPSDISPLAY		2969
-bipnet			MACH_BIPNET		BIPNET			2970
-overo_ctu_inertial	MACH_OVERO_CTU_INERTIAL	OVERO_CTU_INERTIAL	2971
-davinci_dm355_mmm	MACH_DAVINCI_DM355_MMM	DAVINCI_DM355_MMM	2972
-pc9260_v2		MACH_PC9260_V2		PC9260_V2		2973
-ptx7545			MACH_PTX7545		PTX7545			2974
-tm_efdc			MACH_TM_EFDC		TM_EFDC			2975
-omap3_waldo1		MACH_OMAP3_WALDO1	OMAP3_WALDO1		2977
-flyer			MACH_FLYER		FLYER			2978
-tornado3240		MACH_TORNADO3240	TORNADO3240		2979
-soli_01			MACH_SOLI_01		SOLI_01			2980
-omapl138_europalc	MACH_OMAPL138_EUROPALC	OMAPL138_EUROPALC	2981
-helios_v1		MACH_HELIOS_V1		HELIOS_V1		2982
-netspace_lite_v2	MACH_NETSPACE_LITE_V2	NETSPACE_LITE_V2	2983
-ssc			MACH_SSC		SSC			2984
-premierwave_en		MACH_PREMIERWAVE_EN	PREMIERWAVE_EN		2985
-wasabi			MACH_WASABI		WASABI			2986
 mx50_rdp		MACH_MX50_RDP		MX50_RDP		2988
 universal_c210		MACH_UNIVERSAL_C210	UNIVERSAL_C210		2989
 real6410		MACH_REAL6410		REAL6410		2990
-spx_sakura		MACH_SPX_SAKURA		SPX_SAKURA		2991
-ij3k_2440		MACH_IJ3K_2440		IJ3K_2440		2992
-omap3_bc10		MACH_OMAP3_BC10		OMAP3_BC10		2993
-thebe			MACH_THEBE		THEBE			2994
-rv082			MACH_RV082		RV082			2995
-armlguest		MACH_ARMLGUEST		ARMLGUEST		2996
-tjinc1000		MACH_TJINC1000		TJINC1000		2997
 dockstar		MACH_DOCKSTAR		DOCKSTAR		2998
-ax8008			MACH_AX8008		AX8008			2999
-gnet_sgce		MACH_GNET_SGCE		GNET_SGCE		3000
-pxwnas_500_1000		MACH_PXWNAS_500_1000	PXWNAS_500_1000		3001
-ea20			MACH_EA20		EA20			3002
-awm2			MACH_AWM2		AWM2			3003
 ti8148evm		MACH_TI8148EVM		TI8148EVM		3004
 seaboard		MACH_SEABOARD		SEABOARD		3005
-linkstation_chlv2	MACH_LINKSTATION_CHLV2	LINKSTATION_CHLV2	3006
-tera_pro2_rack		MACH_TERA_PRO2_RACK	TERA_PRO2_RACK		3007
-rubys			MACH_RUBYS		RUBYS			3008
-aquarius		MACH_AQUARIUS		AQUARIUS		3009
 mx53_ard		MACH_MX53_ARD		MX53_ARD		3010
 mx53_smd		MACH_MX53_SMD		MX53_SMD		3011
-lswxl			MACH_LSWXL		LSWXL			3012
-dove_avng_v3		MACH_DOVE_AVNG_V3	DOVE_AVNG_V3		3013
-sdi_ess_9263		MACH_SDI_ESS_9263	SDI_ESS_9263		3014
-jocpu550		MACH_JOCPU550		JOCPU550		3015
 msm8x60_rumi3		MACH_MSM8X60_RUMI3	MSM8X60_RUMI3		3016
 msm8x60_ffa		MACH_MSM8X60_FFA	MSM8X60_FFA		3017
-yanomami		MACH_YANOMAMI		YANOMAMI		3018
-gta04			MACH_GTA04		GTA04			3019
 cm_a510			MACH_CM_A510		CM_A510			3020
-omap3_rfs200		MACH_OMAP3_RFS200	OMAP3_RFS200		3021
-kx33xx			MACH_KX33XX		KX33XX			3022
-ptx7510			MACH_PTX7510		PTX7510			3023
-top9000			MACH_TOP9000		TOP9000			3024
-teenote			MACH_TEENOTE		TEENOTE			3025
-ts3			MACH_TS3		TS3			3026
-a0			MACH_A0			A0			3027
-fsm9xxx_surf		MACH_FSM9XXX_SURF	FSM9XXX_SURF		3028
-fsm9xxx_ffa		MACH_FSM9XXX_FFA	FSM9XXX_FFA		3029
-frrhwcdma60w		MACH_FRRHWCDMA60W	FRRHWCDMA60W		3030
-remus			MACH_REMUS		REMUS			3031
-at91cap7xdk		MACH_AT91CAP7XDK	AT91CAP7XDK		3032
-at91cap7stk		MACH_AT91CAP7STK	AT91CAP7STK		3033
-kt_sbc_sam9_1		MACH_KT_SBC_SAM9_1	KT_SBC_SAM9_1		3034
-armada_xp_db		MACH_ARMADA_XP_DB	ARMADA_XP_DB		3036
-spdm			MACH_SPDM		SPDM			3037
-gtib			MACH_GTIB		GTIB			3038
-dgm3240			MACH_DGM3240		DGM3240			3039
-htcmega			MACH_HTCMEGA		HTCMEGA			3041
-tricorder		MACH_TRICORDER		TRICORDER		3042
 tx28			MACH_TX28		TX28			3043
-bstbrd			MACH_BSTBRD		BSTBRD			3044
-pwb3090			MACH_PWB3090		PWB3090			3045
-idea6410		MACH_IDEA6410		IDEA6410		3046
-qbc9263			MACH_QBC9263		QBC9263			3047
-borabora		MACH_BORABORA		BORABORA		3048
-valdez			MACH_VALDEZ		VALDEZ			3049
-ls9g20			MACH_LS9G20		LS9G20			3050
-mios_v1			MACH_MIOS_V1		MIOS_V1			3051
-s5pc110_crespo		MACH_S5PC110_CRESPO	S5PC110_CRESPO		3052
-controltek9g20		MACH_CONTROLTEK9G20	CONTROLTEK9G20		3053
-tin307			MACH_TIN307		TIN307			3054
-tin510			MACH_TIN510		TIN510			3055
-bluecheese		MACH_BLUECHEESE		BLUECHEESE		3057
-tem3x30			MACH_TEM3X30		TEM3X30			3058
-harvest_desoto		MACH_HARVEST_DESOTO	HARVEST_DESOTO		3059
-msm8x60_qrdc		MACH_MSM8X60_QRDC	MSM8X60_QRDC		3060
-spear900		MACH_SPEAR900		SPEAR900		3061
 pcontrol_g20		MACH_PCONTROL_G20	PCONTROL_G20		3062
-rdstor			MACH_RDSTOR		RDSTOR			3063
-usdloader		MACH_USDLOADER		USDLOADER		3064
-tsoploader		MACH_TSOPLOADER		TSOPLOADER		3065
-kronos			MACH_KRONOS		KRONOS			3066
-ffcore			MACH_FFCORE		FFCORE			3067
-mone			MACH_MONE		MONE			3068
-unit2s			MACH_UNIT2S		UNIT2S			3069
-acer_a5			MACH_ACER_A5		ACER_A5			3070
-etherpro_isp		MACH_ETHERPRO_ISP	ETHERPRO_ISP		3071
-stretchs7000		MACH_STRETCHS7000	STRETCHS7000		3072
-p87_smartsim		MACH_P87_SMARTSIM	P87_SMARTSIM		3073
-tulip			MACH_TULIP		TULIP			3074
-sunflower		MACH_SUNFLOWER		SUNFLOWER		3075
-rib			MACH_RIB		RIB			3076
-clod			MACH_CLOD		CLOD			3077
-rump			MACH_RUMP		RUMP			3078
-tenderloin		MACH_TENDERLOIN		TENDERLOIN		3079
-shortloin		MACH_SHORTLOIN		SHORTLOIN		3080
-antares			MACH_ANTARES		ANTARES			3082
-wb40n			MACH_WB40N		WB40N			3083
-herring			MACH_HERRING		HERRING			3084
-naxy400			MACH_NAXY400		NAXY400			3085
-naxy1200		MACH_NAXY1200		NAXY1200		3086
 vpr200			MACH_VPR200		VPR200			3087
-bug20			MACH_BUG20		BUG20			3088
-goflexnet		MACH_GOFLEXNET		GOFLEXNET		3089
 torbreck		MACH_TORBRECK		TORBRECK		3090
-saarb_mg1		MACH_SAARB_MG1		SAARB_MG1		3091
-callisto		MACH_CALLISTO		CALLISTO		3092
-multhsu			MACH_MULTHSU		MULTHSU			3093
-saluda			MACH_SALUDA		SALUDA			3094
-pemp_omap3_apollo	MACH_PEMP_OMAP3_APOLLO	PEMP_OMAP3_APOLLO	3095
-vc0718			MACH_VC0718		VC0718			3096
-mvblx			MACH_MVBLX		MVBLX			3097
-inhand_apeiron		MACH_INHAND_APEIRON	INHAND_APEIRON		3098
-inhand_fury		MACH_INHAND_FURY	INHAND_FURY		3099
-inhand_siren		MACH_INHAND_SIREN	INHAND_SIREN		3100
-hdnvp			MACH_HDNVP		HDNVP			3101
-softwinner		MACH_SOFTWINNER		SOFTWINNER		3102
 prima2_evb		MACH_PRIMA2_EVB		PRIMA2_EVB		3103
-nas6210			MACH_NAS6210		NAS6210			3104
-unisdev			MACH_UNISDEV		UNISDEV			3105
-sbca11			MACH_SBCA11		SBCA11			3106
-saga			MACH_SAGA		SAGA			3107
-ns_k330			MACH_NS_K330		NS_K330			3108
-tanna			MACH_TANNA		TANNA			3109
-imate8502		MACH_IMATE8502		IMATE8502		3110
-aspen			MACH_ASPEN		ASPEN			3111
-daintree_cwac		MACH_DAINTREE_CWAC	DAINTREE_CWAC		3112
-zmx25			MACH_ZMX25		ZMX25			3113
-maple1			MACH_MAPLE1		MAPLE1			3114
-qsd8x72_surf		MACH_QSD8X72_SURF	QSD8X72_SURF		3115
-qsd8x72_ffa		MACH_QSD8X72_FFA	QSD8X72_FFA		3116
-abilene			MACH_ABILENE		ABILENE			3117
-eigen_ttr		MACH_EIGEN_TTR		EIGEN_TTR		3118
-iomega_ix2_200		MACH_IOMEGA_IX2_200	IOMEGA_IX2_200		3119
-coretec_vcx7400		MACH_CORETEC_VCX7400	CORETEC_VCX7400		3120
-santiago		MACH_SANTIAGO		SANTIAGO		3121
-mx257sol		MACH_MX257SOL		MX257SOL		3122
-strasbourg		MACH_STRASBOURG		STRASBOURG		3123
-msm8x60_fluid		MACH_MSM8X60_FLUID	MSM8X60_FLUID		3124
-smartqv5		MACH_SMARTQV5		SMARTQV5		3125
-smartqv3		MACH_SMARTQV3		SMARTQV3		3126
-smartqv7		MACH_SMARTQV7		SMARTQV7		3127
 paz00			MACH_PAZ00		PAZ00			3128
 acmenetusfoxg20		MACH_ACMENETUSFOXG20	ACMENETUSFOXG20		3129
-fwbd_0404		MACH_FWBD_0404		FWBD_0404		3131
-hdgu			MACH_HDGU		HDGU			3132
-pyramid			MACH_PYRAMID		PYRAMID			3133
-epiphan			MACH_EPIPHAN		EPIPHAN			3134
-omap_bender		MACH_OMAP_BENDER	OMAP_BENDER		3135
-gurnard			MACH_GURNARD		GURNARD			3136
-gtl_it5100		MACH_GTL_IT5100		GTL_IT5100		3137
-bcm2708			MACH_BCM2708		BCM2708			3138
-mx51_ggc		MACH_MX51_GGC		MX51_GGC		3139
-sharespace		MACH_SHARESPACE		SHARESPACE		3140
-haba_knx_explorer	MACH_HABA_KNX_EXPLORER	HABA_KNX_EXPLORER	3141
-simtec_kirkmod		MACH_SIMTEC_KIRKMOD	SIMTEC_KIRKMOD		3142
-crux			MACH_CRUX		CRUX			3143
-mx51_bravo		MACH_MX51_BRAVO		MX51_BRAVO		3144
-charon			MACH_CHARON		CHARON			3145
-picocom3		MACH_PICOCOM3		PICOCOM3		3146
-picocom4		MACH_PICOCOM4		PICOCOM4		3147
-serrano			MACH_SERRANO		SERRANO			3148
-doubleshot		MACH_DOUBLESHOT		DOUBLESHOT		3149
-evsy			MACH_EVSY		EVSY			3150
-huashan			MACH_HUASHAN		HUASHAN			3151
-lausanne		MACH_LAUSANNE		LAUSANNE		3152
-emerald			MACH_EMERALD		EMERALD			3153
-tqma35			MACH_TQMA35		TQMA35			3154
-marvel			MACH_MARVEL		MARVEL			3155
-manuae			MACH_MANUAE		MANUAE			3156
-chacha			MACH_CHACHA		CHACHA			3157
-lemon			MACH_LEMON		LEMON			3158
-csc			MACH_CSC		CSC			3159
-gira_knxip_router	MACH_GIRA_KNXIP_ROUTER	GIRA_KNXIP_ROUTER	3160
-t20			MACH_T20		T20			3161
-hdmini			MACH_HDMINI		HDMINI			3162
-sciphone_g2		MACH_SCIPHONE_G2	SCIPHONE_G2		3163
-express			MACH_EXPRESS		EXPRESS			3164
-express_kt		MACH_EXPRESS_KT		EXPRESS_KT		3165
-maximasp		MACH_MAXIMASP		MAXIMASP		3166
-nitrogen_imx51		MACH_NITROGEN_IMX51	NITROGEN_IMX51		3167
-nitrogen_imx53		MACH_NITROGEN_IMX53	NITROGEN_IMX53		3168
-sunfire			MACH_SUNFIRE		SUNFIRE			3169
-arowana			MACH_AROWANA		AROWANA			3170
-tegra_daytona		MACH_TEGRA_DAYTONA	TEGRA_DAYTONA		3171
-tegra_swordfish		MACH_TEGRA_SWORDFISH	TEGRA_SWORDFISH		3172
-edison			MACH_EDISON		EDISON			3173
-svp8500v1		MACH_SVP8500V1		SVP8500V1		3174
-svp8500v2		MACH_SVP8500V2		SVP8500V2		3175
-svp5500			MACH_SVP5500		SVP5500			3176
-b5500			MACH_B5500		B5500			3177
-s5500			MACH_S5500		S5500			3178
-icon			MACH_ICON		ICON			3179
-elephant		MACH_ELEPHANT		ELEPHANT		3180
-shooter			MACH_SHOOTER		SHOOTER			3182
-spade_lte		MACH_SPADE_LTE		SPADE_LTE		3183
-philhwani		MACH_PHILHWANI		PHILHWANI		3184
-gsncomm			MACH_GSNCOMM		GSNCOMM			3185
-strasbourg_a2		MACH_STRASBOURG_A2	STRASBOURG_A2		3186
-mmm			MACH_MMM		MMM			3187
-davinci_dm365_bv	MACH_DAVINCI_DM365_BV	DAVINCI_DM365_BV	3188
 ag5evm			MACH_AG5EVM		AG5EVM			3189
-sc575plc		MACH_SC575PLC		SC575PLC		3190
-sc575hmi		MACH_SC575IPC		SC575IPC		3191
-omap3_tdm3730		MACH_OMAP3_TDM3730	OMAP3_TDM3730		3192
-top9000_eval		MACH_TOP9000_EVAL	TOP9000_EVAL		3194
-top9000_su		MACH_TOP9000_SU		TOP9000_SU		3195
-utm300			MACH_UTM300		UTM300			3196
 tsunagi			MACH_TSUNAGI		TSUNAGI			3197
-ts75xx			MACH_TS75XX		TS75XX			3198
-ts47xx			MACH_TS47XX		TS47XX			3200
-da850_k5		MACH_DA850_K5		DA850_K5		3201
-ax502			MACH_AX502		AX502			3202
-igep0032		MACH_IGEP0032		IGEP0032		3203
-antero			MACH_ANTERO		ANTERO			3204
-synergy			MACH_SYNERGY		SYNERGY			3205
 ics_if_voip		MACH_ICS_IF_VOIP	ICS_IF_VOIP		3206
 wlf_cragg_6410		MACH_WLF_CRAGG_6410	WLF_CRAGG_6410		3207
-punica			MACH_PUNICA		PUNICA			3208
 trimslice		MACH_TRIMSLICE		TRIMSLICE		3209
-mx27_wmultra		MACH_MX27_WMULTRA	MX27_WMULTRA		3210
 mackerel		MACH_MACKEREL		MACKEREL		3211
-fa9x27			MACH_FA9X27		FA9X27			3213
-ns2816tb		MACH_NS2816TB		NS2816TB		3214
-ns2816_ntpad		MACH_NS2816_NTPAD	NS2816_NTPAD		3215
-ns2816_ntnb		MACH_NS2816_NTNB	NS2816_NTNB		3216
 kaen			MACH_KAEN		KAEN			3217
-nv1000			MACH_NV1000		NV1000			3218
-nuc950ts		MACH_NUC950TS		NUC950TS		3219
 nokia_rm680		MACH_NOKIA_RM680	NOKIA_RM680		3220
-ast2200			MACH_AST2200		AST2200			3221
-lead			MACH_LEAD		LEAD			3222
-unino1			MACH_UNINO1		UNINO1			3223
-greeco			MACH_GREECO		GREECO			3224
-verdi			MACH_VERDI		VERDI			3225
 dm6446_adbox		MACH_DM6446_ADBOX	DM6446_ADBOX		3226
 quad_salsa		MACH_QUAD_SALSA		QUAD_SALSA		3227
 abb_gma_1_1		MACH_ABB_GMA_1_1	ABB_GMA_1_1		3228
@@ -949,13 +621,11 @@
 ts4800			MACH_TS4800		TS4800			3313
 tqma9263		MACH_TQMA9263		TQMA9263		3314
 holiday			MACH_HOLIDAY		HOLIDAY			3315
-dma_6410		MACH_DMA6410		DMA6410			3316
 pcats_overlay		MACH_PCATS_OVERLAY	PCATS_OVERLAY		3317
 hwgw6410		MACH_HWGW6410		HWGW6410		3318
 shenzhou		MACH_SHENZHOU		SHENZHOU		3319
 cwme9210		MACH_CWME9210		CWME9210		3320
 cwme9210js		MACH_CWME9210JS		CWME9210JS		3321
-pgs_v1			MACH_PGS_SITARA		PGS_SITARA		3322
 colibri_tegra2		MACH_COLIBRI_TEGRA2	COLIBRI_TEGRA2		3323
 w21			MACH_W21		W21			3324
 polysat1		MACH_POLYSAT1		POLYSAT1		3325
@@ -1021,13 +691,11 @@
 bockw			MACH_BOCKW		BOCKW			3386
 eva2000			MACH_EVA2000		EVA2000			3387
 steelyard		MACH_STEELYARD		STEELYARD		3388
-sdh001			MACH_MACH_SDH001	MACH_SDH001		3390
 nsslsboard		MACH_NSSLSBOARD		NSSLSBOARD		3392
 geneva_b5		MACH_GENEVA_B5		GENEVA_B5		3393
 spear1340		MACH_SPEAR1340		SPEAR1340		3394
 rexmas			MACH_REXMAS		REXMAS			3395
 msm8960_cdp		MACH_MSM8960_CDP	MSM8960_CDP		3396
-msm8960_mdp		MACH_MSM8960_MDP	MSM8960_MDP		3397
 msm8960_fluid		MACH_MSM8960_FLUID	MSM8960_FLUID		3398
 msm8960_apq		MACH_MSM8960_APQ	MSM8960_APQ		3399
 helios_v2		MACH_HELIOS_V2		HELIOS_V2		3400
@@ -1123,6 +791,381 @@
 thales_adc		MACH_THALES_ADC		THALES_ADC		3492
 ubisys_p9d_evp		MACH_UBISYS_P9D_EVP	UBISYS_P9D_EVP		3493
 atdgp318		MACH_ATDGP318		ATDGP318		3494
+dma210u			MACH_DMA210U		DMA210U			3495
+em_t3			MACH_EM_T3		EM_T3			3496
+htx3250			MACH_HTX3250		HTX3250			3497
+g50			MACH_G50		G50			3498
+eco5			MACH_ECO5		ECO5			3499
+wintergrasp		MACH_WINTERGRASP	WINTERGRASP		3500
+puro			MACH_PURO		PURO			3501
+shooter_k		MACH_SHOOTER_K		SHOOTER_K		3502
+nspire			MACH_NSPIRE		NSPIRE			3503
+mickxx			MACH_MICKXX		MICKXX			3504
+lxmb			MACH_LXMB		LXMB			3505
+adam			MACH_ADAM		ADAM			3507
+b1004			MACH_B1004		B1004			3508
+oboea			MACH_OBOEA		OBOEA			3509
+a1015			MACH_A1015		A1015			3510
+robin_vbdt30		MACH_ROBIN_VBDT30	ROBIN_VBDT30		3511
+tegra_enterprise	MACH_TEGRA_ENTERPRISE	TEGRA_ENTERPRISE	3512
+rfl108200_mk10		MACH_RFL108200_MK10	RFL108200_MK10		3513
+rfl108300_mk16		MACH_RFL108300_MK16	RFL108300_MK16		3514
+rover_v7		MACH_ROVER_V7		ROVER_V7		3515
+miphone			MACH_MIPHONE		MIPHONE			3516
+femtobts		MACH_FEMTOBTS		FEMTOBTS		3517
+monopoli		MACH_MONOPOLI		MONOPOLI		3518
+boss			MACH_BOSS		BOSS			3519
+davinci_dm368_vtam	MACH_DAVINCI_DM368_VTAM	DAVINCI_DM368_VTAM	3520
+clcon			MACH_CLCON		CLCON			3521
+nokia_rm696		MACH_NOKIA_RM696	NOKIA_RM696		3522
+tahiti			MACH_TAHITI		TAHITI			3523
+fighter			MACH_FIGHTER		FIGHTER			3524
+sgh_i710		MACH_SGH_I710		SGH_I710		3525
+integreproscb		MACH_INTEGREPROSCB	INTEGREPROSCB		3526
+monza			MACH_MONZA		MONZA			3527
+calimain		MACH_CALIMAIN		CALIMAIN		3528
+mx6q_sabreauto		MACH_MX6Q_SABREAUTO	MX6Q_SABREAUTO		3529
+gma01x			MACH_GMA01X		GMA01X			3530
+sbc51			MACH_SBC51		SBC51			3531
+fit			MACH_FIT		FIT			3532
+steelhead		MACH_STEELHEAD		STEELHEAD		3533
+panther			MACH_PANTHER		PANTHER			3534
+msm8960_liquid		MACH_MSM8960_LIQUID	MSM8960_LIQUID		3535
+lexikonct		MACH_LEXIKONCT		LEXIKONCT		3536
+ns2816_stb		MACH_NS2816_STB		NS2816_STB		3537
+sei_mm2_lpc3250		MACH_SEI_MM2_LPC3250	SEI_MM2_LPC3250		3538
+cmimx53			MACH_CMIMX53		CMIMX53			3539
+sandwich		MACH_SANDWICH		SANDWICH		3540
+chief			MACH_CHIEF		CHIEF			3541
+pogo_e02		MACH_POGO_E02		POGO_E02		3542
+mikrap_x168		MACH_MIKRAP_X168	MIKRAP_X168		3543
+htcmozart		MACH_HTCMOZART		HTCMOZART		3544
+htcgold			MACH_HTCGOLD		HTCGOLD			3545
+mt72xx			MACH_MT72XX		MT72XX			3546
+mx51_ivy		MACH_MX51_IVY		MX51_IVY		3547
+mx51_lvd		MACH_MX51_LVD		MX51_LVD		3548
+omap3_wiser2		MACH_OMAP3_WISER2	OMAP3_WISER2		3549
+dreamplug		MACH_DREAMPLUG		DREAMPLUG		3550
+cobas_c_111		MACH_COBAS_C_111	COBAS_C_111		3551
+cobas_u_411		MACH_COBAS_U_411	COBAS_U_411		3552
+hssd			MACH_HSSD		HSSD			3553
+iom35x			MACH_IOM35X		IOM35X			3554
+psom_omap		MACH_PSOM_OMAP		PSOM_OMAP		3555
+iphone_2g		MACH_IPHONE_2G		IPHONE_2G		3556
+iphone_3g		MACH_IPHONE_3G		IPHONE_3G		3557
+ipod_touch_1g		MACH_IPOD_TOUCH_1G	IPOD_TOUCH_1G		3558
+pharos_tpc		MACH_PHAROS_TPC		PHAROS_TPC		3559
+mx53_hydra		MACH_MX53_HYDRA		MX53_HYDRA		3560
+ns2816_dev_board	MACH_NS2816_DEV_BOARD	NS2816_DEV_BOARD	3561
+iphone_3gs		MACH_IPHONE_3GS		IPHONE_3GS		3562
+iphone_4		MACH_IPHONE_4		IPHONE_4		3563
+ipod_touch_4g		MACH_IPOD_TOUCH_4G	IPOD_TOUCH_4G		3564
+dragon_e1100		MACH_DRAGON_E1100	DRAGON_E1100		3565
+topside			MACH_TOPSIDE		TOPSIDE			3566
+irisiii			MACH_IRISIII		IRISIII			3567
+deto_macarm9		MACH_DETO_MACARM9	DETO_MACARM9		3568
+eti_d1			MACH_ETI_D1		ETI_D1			3569
+som3530sdk		MACH_SOM3530SDK		SOM3530SDK		3570
+oc_engine		MACH_OC_ENGINE		OC_ENGINE		3571
+apq8064_sim		MACH_APQ8064_SIM	APQ8064_SIM		3572
+alps			MACH_ALPS		ALPS			3575
+tny_t3730		MACH_TNY_T3730		TNY_T3730		3576
+geryon_nfe		MACH_GERYON_NFE		GERYON_NFE		3577
+ns2816_ref_board	MACH_NS2816_REF_BOARD	NS2816_REF_BOARD	3578
+silverstone		MACH_SILVERSTONE	SILVERSTONE		3579
+mtt2440			MACH_MTT2440		MTT2440			3580
+ynicdb			MACH_YNICDB		YNICDB			3581
+bct			MACH_BCT		BCT			3582
+tuscan			MACH_TUSCAN		TUSCAN			3583
+xbt_sam9g45		MACH_XBT_SAM9G45	XBT_SAM9G45		3584
+enbw_cmc		MACH_ENBW_CMC		ENBW_CMC		3585
+ch104mx257		MACH_CH104MX257		CH104MX257		3587
+openpri			MACH_OPENPRI		OPENPRI			3588
+am335xevm		MACH_AM335XEVM		AM335XEVM		3589
+picodmb			MACH_PICODMB		PICODMB			3590
+waluigi			MACH_WALUIGI		WALUIGI			3591
+punicag7		MACH_PUNICAG7		PUNICAG7		3592
+ipad_1g			MACH_IPAD_1G		IPAD_1G			3593
+appletv_2g		MACH_APPLETV_2G		APPLETV_2G		3594
+mach_ecog45		MACH_MACH_ECOG45	MACH_ECOG45		3595
+ait_cam_enc_4xx		MACH_AIT_CAM_ENC_4XX	AIT_CAM_ENC_4XX		3596
+runnymede		MACH_RUNNYMEDE		RUNNYMEDE		3597
+play			MACH_PLAY		PLAY			3598
+hw90260			MACH_HW90260		HW90260			3599
+tagh			MACH_TAGH		TAGH			3600
+filbert			MACH_FILBERT		FILBERT			3601
+getinge_netcomv3	MACH_GETINGE_NETCOMV3	GETINGE_NETCOMV3	3602
+cw20			MACH_CW20		CW20			3603
+cinema			MACH_CINEMA		CINEMA			3604
+cinema_tea		MACH_CINEMA_TEA		CINEMA_TEA		3605
+cinema_coffee		MACH_CINEMA_COFFEE	CINEMA_COFFEE		3606
+cinema_juice		MACH_CINEMA_JUICE	CINEMA_JUICE		3607
+mx53_mirage2		MACH_MX53_MIRAGE2	MX53_MIRAGE2		3609
+mx53_efikasb		MACH_MX53_EFIKASB	MX53_EFIKASB		3610
+stm_b2000		MACH_STM_B2000		STM_B2000		3612
 m28evk			MACH_M28EVK		M28EVK			3613
+pda			MACH_PDA		PDA			3614
+meraki_mr58		MACH_MERAKI_MR58	MERAKI_MR58		3615
+kota2			MACH_KOTA2		KOTA2			3616
+letcool			MACH_LETCOOL		LETCOOL			3617
+mx27iat			MACH_MX27IAT		MX27IAT			3618
+apollo_td		MACH_APOLLO_TD		APOLLO_TD		3619
+arena			MACH_ARENA		ARENA			3620
+gsngateway		MACH_GSNGATEWAY		GSNGATEWAY		3621
+lf2000			MACH_LF2000		LF2000			3622
+bonito			MACH_BONITO		BONITO			3623
+asymptote		MACH_ASYMPTOTE		ASYMPTOTE		3624
+bst2brd			MACH_BST2BRD		BST2BRD			3625
+tx335s			MACH_TX335S		TX335S			3626
+pelco_tesla		MACH_PELCO_TESLA	PELCO_TESLA		3627
+rrhtestplat		MACH_RRHTESTPLAT	RRHTESTPLAT		3628
+vidtonic_pro		MACH_VIDTONIC_PRO	VIDTONIC_PRO		3629
+pl_apollo		MACH_PL_APOLLO		PL_APOLLO		3630
+pl_phoenix		MACH_PL_PHOENIX		PL_PHOENIX		3631
+m28cu3			MACH_M28CU3		M28CU3			3632
+vvbox_hd		MACH_VVBOX_HD		VVBOX_HD		3633
+coreware_sam9260_	MACH_COREWARE_SAM9260_	COREWARE_SAM9260_	3634
+marmaduke		MACH_MARMADUKE		MARMADUKE		3635
+amg_xlcore_camera	MACH_AMG_XLCORE_CAMERA	AMG_XLCORE_CAMERA	3636
+omap3_egf		MACH_OMAP3_EGF		OMAP3_EGF		3637
 smdk4212		MACH_SMDK4212		SMDK4212		3638
+dnp9200			MACH_DNP9200		DNP9200			3639
+tf101			MACH_TF101		TF101			3640
+omap3silvio		MACH_OMAP3SILVIO	OMAP3SILVIO		3641
+picasso2		MACH_PICASSO2		PICASSO2		3642
+vangogh2		MACH_VANGOGH2		VANGOGH2		3643
+olpc_xo_1_75		MACH_OLPC_XO_1_75	OLPC_XO_1_75		3644
+gx400			MACH_GX400		GX400			3645
+gs300			MACH_GS300		GS300			3646
+acer_a9			MACH_ACER_A9		ACER_A9			3647
+vivow_evm		MACH_VIVOW_EVM		VIVOW_EVM		3648
+veloce_cxq		MACH_VELOCE_CXQ		VELOCE_CXQ		3649
+veloce_cxm		MACH_VELOCE_CXM		VELOCE_CXM		3650
+p1852			MACH_P1852		P1852			3651
+naxy100			MACH_NAXY100		NAXY100			3652
+taishan			MACH_TAISHAN		TAISHAN			3653
+touchlink		MACH_TOUCHLINK		TOUCHLINK		3654
+stm32f103ze		MACH_STM32F103ZE	STM32F103ZE		3655
+mcx			MACH_MCX		MCX			3656
+stm_nmhdk_fli7610	MACH_STM_NMHDK_FLI7610	STM_NMHDK_FLI7610	3657
+top28x			MACH_TOP28X		TOP28X			3658
+okl4vp_microvisor	MACH_OKL4VP_MICROVISOR	OKL4VP_MICROVISOR	3659
+pop			MACH_POP		POP			3660
+layer			MACH_LAYER		LAYER			3661
+trondheim		MACH_TRONDHEIM		TRONDHEIM		3662
+eva			MACH_EVA		EVA			3663
+trust_taurus		MACH_TRUST_TAURUS	TRUST_TAURUS		3664
+ns2816_huashan		MACH_NS2816_HUASHAN	NS2816_HUASHAN		3665
+ns2816_yangcheng	MACH_NS2816_YANGCHENG	NS2816_YANGCHENG	3666
+p852			MACH_P852		P852			3667
+flea3			MACH_FLEA3		FLEA3			3668
+bowfin			MACH_BOWFIN		BOWFIN			3669
+mv88de3100		MACH_MV88DE3100		MV88DE3100		3670
+pia_am35x		MACH_PIA_AM35X		PIA_AM35X		3671
+cedar			MACH_CEDAR		CEDAR			3672
+picasso_e		MACH_PICASSO_E		PICASSO_E		3673
+samsung_e60		MACH_SAMSUNG_E60	SAMSUNG_E60		3674
+sdvr_mini		MACH_SDVR_MINI		SDVR_MINI		3676
+omap3_ij3k		MACH_OMAP3_IJ3K		OMAP3_IJ3K		3677
+modasmc1		MACH_MODASMC1		MODASMC1		3678
+apq8064_rumi3		MACH_APQ8064_RUMI3	APQ8064_RUMI3		3679
+matrix506		MACH_MATRIX506		MATRIX506		3680
+msm9615_mtp		MACH_MSM9615_MTP	MSM9615_MTP		3681
+dm36x_spawndc		MACH_DM36X_SPAWNDC	DM36X_SPAWNDC		3682
+sff792			MACH_SFF792		SFF792			3683
+am335xiaevm		MACH_AM335XIAEVM	AM335XIAEVM		3684
+g3c2440			MACH_G3C2440		G3C2440			3685
+tion270			MACH_TION270		TION270			3686
+w22q7arm02		MACH_W22Q7ARM02		W22Q7ARM02		3687
+omap_cat		MACH_OMAP_CAT		OMAP_CAT		3688
+at91sam9n12ek		MACH_AT91SAM9N12EK	AT91SAM9N12EK		3689
+morrison		MACH_MORRISON		MORRISON		3690
+svdu			MACH_SVDU		SVDU			3691
+lpp01			MACH_LPP01		LPP01			3692
+ubc283			MACH_UBC283		UBC283			3693
+zeppelin		MACH_ZEPPELIN		ZEPPELIN		3694
+motus			MACH_MOTUS		MOTUS			3695
+neomainboard		MACH_NEOMAINBOARD	NEOMAINBOARD		3696
+devkit3250		MACH_DEVKIT3250		DEVKIT3250		3697
+devkit7000		MACH_DEVKIT7000		DEVKIT7000		3698
+fmc_uic			MACH_FMC_UIC		FMC_UIC			3699
+fmc_dcm			MACH_FMC_DCM		FMC_DCM			3700
+batwm			MACH_BATWM		BATWM			3701
+atlas6cb		MACH_ATLAS6CB		ATLAS6CB		3702
+blue			MACH_BLUE		BLUE			3705
+colorado		MACH_COLORADO		COLORADO		3706
+popc			MACH_POPC		POPC			3707
+promwad_jade		MACH_PROMWAD_JADE	PROMWAD_JADE		3708
+amp			MACH_AMP		AMP			3709
+gnet_amp		MACH_GNET_AMP		GNET_AMP		3710
+toques			MACH_TOQUES		TOQUES			3711
+dct_storm		MACH_DCT_STORM		DCT_STORM		3713
+owl			MACH_OWL		OWL			3715
+cogent_csb1741		MACH_COGENT_CSB1741	COGENT_CSB1741		3716
+adillustra610		MACH_ADILLUSTRA610	ADILLUSTRA610		3718
+ecafe_na04		MACH_ECAFE_NA04		ECAFE_NA04		3719
+popct			MACH_POPCT		POPCT			3720
+omap3_helena		MACH_OMAP3_HELENA	OMAP3_HELENA		3721
+ach			MACH_ACH		ACH			3722
+module_dtb		MACH_MODULE_DTB		MODULE_DTB		3723
+oslo_elisabeth		MACH_OSLO_ELISABETH	OSLO_ELISABETH		3725
+tt01			MACH_TT01		TT01			3726
+msm8930_cdp		MACH_MSM8930_CDP	MSM8930_CDP		3727
+msm8930_mtp		MACH_MSM8930_MTP	MSM8930_MTP		3728
+msm8930_fluid		MACH_MSM8930_FLUID	MSM8930_FLUID		3729
+ltu11			MACH_LTU11		LTU11			3730
+am1808_spawnco		MACH_AM1808_SPAWNCO	AM1808_SPAWNCO		3731
+flx6410			MACH_FLX6410		FLX6410			3732
+mx6q_qsb		MACH_MX6Q_QSB		MX6Q_QSB		3733
+mx53_plt424		MACH_MX53_PLT424	MX53_PLT424		3734
+jasmine			MACH_JASMINE		JASMINE			3735
+l138_owlboard_plus	MACH_L138_OWLBOARD_PLUS	L138_OWLBOARD_PLUS	3736
+wr21			MACH_WR21		WR21			3737
+peaboy			MACH_PEABOY		PEABOY			3739
+mx28_plato		MACH_MX28_PLATO		MX28_PLATO		3740
+kacom2			MACH_KACOM2		KACOM2			3741
+slco			MACH_SLCO		SLCO			3742
+imx51pico		MACH_IMX51PICO		IMX51PICO		3743
+glink1			MACH_GLINK1		GLINK1			3744
+diamond			MACH_DIAMOND		DIAMOND			3745
+d9000			MACH_D9000		D9000			3746
+w5300e01		MACH_W5300E01		W5300E01		3747
+im6000			MACH_IM6000		IM6000			3748
+mx51_fred51		MACH_MX51_FRED51	MX51_FRED51		3749
+stm32f2			MACH_STM32F2		STM32F2			3750
+ville			MACH_VILLE		VILLE			3751
+ptip_murnau		MACH_PTIP_MURNAU	PTIP_MURNAU		3752
+ptip_classic		MACH_PTIP_CLASSIC	PTIP_CLASSIC		3753
+mx53grb			MACH_MX53GRB		MX53GRB			3754
+gagarin			MACH_GAGARIN		GAGARIN			3755
+nas2big			MACH_NAS2BIG		NAS2BIG			3757
+superfemto		MACH_SUPERFEMTO		SUPERFEMTO		3758
+teufel			MACH_TEUFEL		TEUFEL			3759
+dinara			MACH_DINARA		DINARA			3760
+vanquish		MACH_VANQUISH		VANQUISH		3761
+zipabox1		MACH_ZIPABOX1		ZIPABOX1		3762
+u9540			MACH_U9540		U9540			3763
+jet			MACH_JET		JET			3764
 smdk4412		MACH_SMDK4412		SMDK4412		3765
+elite			MACH_ELITE		ELITE			3766
+spear320_hmi		MACH_SPEAR320_HMI	SPEAR320_HMI		3767
+ontario			MACH_ONTARIO		ONTARIO			3768
+mx6q_sabrelite		MACH_MX6Q_SABRELITE	MX6Q_SABRELITE		3769
+vc200			MACH_VC200		VC200			3770
+msm7625a_ffa		MACH_MSM7625A_FFA	MSM7625A_FFA		3771
+msm7625a_surf		MACH_MSM7625A_SURF	MSM7625A_SURF		3772
+benthossbp		MACH_BENTHOSSBP		BENTHOSSBP		3773
+smdk5210		MACH_SMDK5210		SMDK5210		3774
+empq2300		MACH_EMPQ2300		EMPQ2300		3775
+minipos			MACH_MINIPOS		MINIPOS			3776
+omap5_sevm		MACH_OMAP5_SEVM		OMAP5_SEVM		3777
+shelter			MACH_SHELTER		SHELTER			3778
+omap3_devkit8500	MACH_OMAP3_DEVKIT8500	OMAP3_DEVKIT8500	3779
+edgetd			MACH_EDGETD		EDGETD			3780
+copperyard		MACH_COPPERYARD		COPPERYARD		3781
+edge			MACH_EDGE		EDGE			3782
+edge_u			MACH_EDGE_U		EDGE_U			3783
+edge_td			MACH_EDGE_TD		EDGE_TD			3784
+wdss			MACH_WDSS		WDSS			3785
+dl_pb25			MACH_DL_PB25		DL_PB25			3786
+dss11			MACH_DSS11		DSS11			3787
+cpa			MACH_CPA		CPA			3788
+aptp2000		MACH_APTP2000		APTP2000		3789
+marzen			MACH_MARZEN		MARZEN			3790
+st_turbine		MACH_ST_TURBINE		ST_TURBINE		3791
+gtl_it3300		MACH_GTL_IT3300		GTL_IT3300		3792
+mx6_mule		MACH_MX6_MULE		MX6_MULE		3793
+v7pxa_dt		MACH_V7PXA_DT		V7PXA_DT		3794
+v7mmp_dt		MACH_V7MMP_DT		V7MMP_DT		3795
+dragon7			MACH_DRAGON7		DRAGON7			3796
+krome			MACH_KROME		KROME			3797
+oratisdante		MACH_ORATISDANTE	ORATISDANTE		3798
+fathom			MACH_FATHOM		FATHOM			3799
+dns325			MACH_DNS325		DNS325			3800
+sarnen			MACH_SARNEN		SARNEN			3801
+ubisys_g1		MACH_UBISYS_G1		UBISYS_G1		3802
+mx53_pf1		MACH_MX53_PF1		MX53_PF1		3803
+asanti			MACH_ASANTI		ASANTI			3804
+volta			MACH_VOLTA		VOLTA			3805
+knight			MACH_KNIGHT		KNIGHT			3807
+beaglebone		MACH_BEAGLEBONE		BEAGLEBONE		3808
+becker			MACH_BECKER		BECKER			3809
+fc360			MACH_FC360		FC360			3810
+pmi2_xls		MACH_PMI2_XLS		PMI2_XLS		3811
+taranto			MACH_TARANTO		TARANTO			3812
+plutux			MACH_PLUTUX		PLUTUX			3813
+ipmp_medcom		MACH_IPMP_MEDCOM	IPMP_MEDCOM		3814
+absolut			MACH_ABSOLUT		ABSOLUT			3815
+awpb3			MACH_AWPB3		AWPB3			3816
+nfp32xx_dt		MACH_NFP32XX_DT		NFP32XX_DT		3817
+dl_pb53			MACH_DL_PB53		DL_PB53			3818
+acu_ii			MACH_ACU_II		ACU_II			3819
+avalon			MACH_AVALON		AVALON			3820
+sphinx			MACH_SPHINX		SPHINX			3821
+titan_t			MACH_TITAN_T		TITAN_T			3822
+harvest_boris		MACH_HARVEST_BORIS	HARVEST_BORIS		3823
+mach_msm7x30_m3s	MACH_MACH_MSM7X30_M3S	MACH_MSM7X30_M3S	3824
+smdk5250		MACH_SMDK5250		SMDK5250		3825
+imxt_lite		MACH_IMXT_LITE		IMXT_LITE		3826
+imxt_std		MACH_IMXT_STD		IMXT_STD		3827
+imxt_log		MACH_IMXT_LOG		IMXT_LOG		3828
+imxt_nav		MACH_IMXT_NAV		IMXT_NAV		3829
+imxt_full		MACH_IMXT_FULL		IMXT_FULL		3830
+ag09015			MACH_AG09015		AG09015			3831
+am3517_mt_ventoux	MACH_AM3517_MT_VENTOUX	AM3517_MT_VENTOUX	3832
+dp1arm9			MACH_DP1ARM9		DP1ARM9			3833
+picasso_m		MACH_PICASSO_M		PICASSO_M		3834
+video_gadget		MACH_VIDEO_GADGET	VIDEO_GADGET		3835
+mtt_om3x		MACH_MTT_OM3X		MTT_OM3X		3836
+mx6q_arm2		MACH_MX6Q_ARM2		MX6Q_ARM2		3837
+picosam9g45		MACH_PICOSAM9G45	PICOSAM9G45		3838
+vpm_dm365		MACH_VPM_DM365		VPM_DM365		3839
+bonfire			MACH_BONFIRE		BONFIRE			3840
+mt2p2d			MACH_MT2P2D		MT2P2D			3841
+sigpda01		MACH_SIGPDA01		SIGPDA01		3842
+cn27			MACH_CN27		CN27			3843
+mx25_cwtap		MACH_MX25_CWTAP		MX25_CWTAP		3844
+apf28			MACH_APF28		APF28			3845
+pelco_maxwell		MACH_PELCO_MAXWELL	PELCO_MAXWELL		3846
+ge_phoenix		MACH_GE_PHOENIX		GE_PHOENIX		3847
+empc_a500		MACH_EMPC_A500		EMPC_A500		3848
+ims_arm9		MACH_IMS_ARM9		IMS_ARM9		3849
+mini2416		MACH_MINI2416		MINI2416		3850
+mini2450		MACH_MINI2450		MINI2450		3851
+mini310			MACH_MINI310		MINI310			3852
+spear_hurricane		MACH_SPEAR_HURRICANE	SPEAR_HURRICANE		3853
+mt7208			MACH_MT7208		MT7208			3854
+lpc178x			MACH_LPC178X		LPC178X			3855
+farleys			MACH_FARLEYS		FARLEYS			3856
+efm32gg_dk3750		MACH_EFM32GG_DK3750	EFM32GG_DK3750		3857
+zeus_board		MACH_ZEUS_BOARD		ZEUS_BOARD		3858
+cc51			MACH_CC51		CC51			3859
+fxi_c210		MACH_FXI_C210		FXI_C210		3860
+msm8627_cdp		MACH_MSM8627_CDP	MSM8627_CDP		3861
+msm8627_mtp		MACH_MSM8627_MTP	MSM8627_MTP		3862
+armadillo800eva		MACH_ARMADILLO800EVA	ARMADILLO800EVA		3863
+primou			MACH_PRIMOU		PRIMOU			3864
+primoc			MACH_PRIMOC		PRIMOC			3865
+primoct			MACH_PRIMOCT		PRIMOCT			3866
+a9500			MACH_A9500		A9500			3867
+pluto			MACH_PLUTO		PLUTO			3869
+acfx100			MACH_ACFX100		ACFX100			3870
+msm8625_rumi3		MACH_MSM8625_RUMI3	MSM8625_RUMI3		3871
+valente			MACH_VALENTE		VALENTE			3872
+crfs_rfeye		MACH_CRFS_RFEYE		CRFS_RFEYE		3873
+rfeye			MACH_RFEYE		RFEYE			3874
+phidget_sbc3		MACH_PHIDGET_SBC3	PHIDGET_SBC3		3875
+tcw_mika		MACH_TCW_MIKA		TCW_MIKA		3876
+imx28_egf		MACH_IMX28_EGF		IMX28_EGF		3877
+valente_wx		MACH_VALENTE_WX		VALENTE_WX		3878
+huangshans		MACH_HUANGSHANS		HUANGSHANS		3879
+bosphorus1		MACH_BOSPHORUS1		BOSPHORUS1		3880
+prima			MACH_PRIMA		PRIMA			3881
+evita_ulk		MACH_EVITA_ULK		EVITA_ULK		3884
+merisc600		MACH_MERISC600		MERISC600		3885
+dolak			MACH_DOLAK		DOLAK			3886
+sbc53			MACH_SBC53		SBC53			3887
+elite_ulk		MACH_ELITE_ULK		ELITE_ULK		3888
+pov2			MACH_POV2		POV2			3889
+ipod_touch_2g		MACH_IPOD_TOUCH_2G	IPOD_TOUCH_2G		3890
+da850_pqab		MACH_DA850_PQAB		DA850_PQAB		3891
diff --git a/arch/avr32/boards/merisc/merisc_sysfs.c b/arch/avr32/boards/merisc/merisc_sysfs.c
index df431fd..5a25231 100644
--- a/arch/avr32/boards/merisc/merisc_sysfs.c
+++ b/arch/avr32/boards/merisc/merisc_sysfs.c
@@ -13,7 +13,6 @@
 #include <linux/list.h>
 #include <linux/spinlock.h>
 #include <linux/device.h>
-#include <linux/sysdev.h>
 #include <linux/timer.h>
 #include <linux/err.h>
 #include <linux/ctype.h>
diff --git a/arch/avr32/include/asm/ipcbuf.h b/arch/avr32/include/asm/ipcbuf.h
index 1552c96..84c7e51 100644
--- a/arch/avr32/include/asm/ipcbuf.h
+++ b/arch/avr32/include/asm/ipcbuf.h
@@ -1,29 +1 @@
-#ifndef __ASM_AVR32_IPCBUF_H
-#define __ASM_AVR32_IPCBUF_H
-
-/*
-* The user_ipc_perm structure for AVR32 architecture.
-* Note extra padding because this structure is passed back and forth
-* between kernel and user space.
-*
-* Pad space is left for:
-* - 32-bit mode_t and seq
-* - 2 miscellaneous 32-bit values
-*/
-
-struct ipc64_perm
-{
-        __kernel_key_t          key;
-        __kernel_uid32_t        uid;
-        __kernel_gid32_t        gid;
-        __kernel_uid32_t        cuid;
-        __kernel_gid32_t        cgid;
-        __kernel_mode_t         mode;
-        unsigned short          __pad1;
-        unsigned short          seq;
-        unsigned short          __pad2;
-        unsigned long           __unused1;
-        unsigned long           __unused2;
-};
-
-#endif /* __ASM_AVR32_IPCBUF_H */
+#include <asm-generic/ipcbuf.h>
diff --git a/arch/avr32/include/asm/socket.h b/arch/avr32/include/asm/socket.h
index c8d1fae..247b88c 100644
--- a/arch/avr32/include/asm/socket.h
+++ b/arch/avr32/include/asm/socket.h
@@ -62,4 +62,7 @@
 
 #define SO_RXQ_OVFL             40
 
+#define SO_WIFI_STATUS		41
+#define SCM_WIFI_STATUS		SO_WIFI_STATUS
+
 #endif /* __ASM_AVR32_SOCKET_H */
diff --git a/arch/avr32/include/asm/thread_info.h b/arch/avr32/include/asm/thread_info.h
index 7a9c03d..e5deda4 100644
--- a/arch/avr32/include/asm/thread_info.h
+++ b/arch/avr32/include/asm/thread_info.h
@@ -85,7 +85,6 @@
 #define TIF_RESTORE_SIGMASK	7	/* restore signal mask in do_signal */
 #define TIF_CPU_GOING_TO_SLEEP	8	/* CPU is entering sleep 0 mode */
 #define TIF_NOTIFY_RESUME	9	/* callback before returning to user */
-#define TIF_FREEZE		29
 #define TIF_DEBUG		30	/* debugging enabled */
 #define TIF_USERSPACE		31      /* true if FS sets userspace */
 
@@ -98,7 +97,6 @@
 #define _TIF_RESTORE_SIGMASK	(1 << TIF_RESTORE_SIGMASK)
 #define _TIF_CPU_GOING_TO_SLEEP (1 << TIF_CPU_GOING_TO_SLEEP)
 #define _TIF_NOTIFY_RESUME	(1 << TIF_NOTIFY_RESUME)
-#define _TIF_FREEZE		(1 << TIF_FREEZE)
 
 /* Note: The masks below must never span more than 16 bits! */
 
diff --git a/arch/avr32/include/asm/types.h b/arch/avr32/include/asm/types.h
index 72667a3..9bb2d8b 100644
--- a/arch/avr32/include/asm/types.h
+++ b/arch/avr32/include/asm/types.h
@@ -10,12 +10,6 @@
 
 #include <asm-generic/int-ll64.h>
 
-#ifndef __ASSEMBLY__
-
-typedef unsigned short umode_t;
-
-#endif /* __ASSEMBLY__ */
-
 /*
  * These aren't exported outside the kernel to avoid name space clashes
  */
diff --git a/arch/avr32/kernel/cpu.c b/arch/avr32/kernel/cpu.c
index e84faff..2233be7 100644
--- a/arch/avr32/kernel/cpu.c
+++ b/arch/avr32/kernel/cpu.c
@@ -6,7 +6,7 @@
  * published by the Free Software Foundation.
  */
 #include <linux/init.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/seq_file.h>
 #include <linux/cpu.h>
 #include <linux/module.h>
@@ -26,16 +26,16 @@
  * XXX: If/when a SMP-capable implementation of AVR32 will ever be
  * made, we must make sure that the code executes on the correct CPU.
  */
-static ssize_t show_pc0event(struct sys_device *dev,
-			struct sysdev_attribute *attr, char *buf)
+static ssize_t show_pc0event(struct device *dev,
+			struct device_attribute *attr, char *buf)
 {
 	unsigned long pccr;
 
 	pccr = sysreg_read(PCCR);
 	return sprintf(buf, "0x%lx\n", (pccr >> 12) & 0x3f);
 }
-static ssize_t store_pc0event(struct sys_device *dev,
-			struct sysdev_attribute *attr, const char *buf,
+static ssize_t store_pc0event(struct device *dev,
+			struct device_attribute *attr, const char *buf,
 			      size_t count)
 {
 	unsigned long val;
@@ -48,16 +48,16 @@
 	sysreg_write(PCCR, val);
 	return count;
 }
-static ssize_t show_pc0count(struct sys_device *dev,
-			struct sysdev_attribute *attr, char *buf)
+static ssize_t show_pc0count(struct device *dev,
+			struct device_attribute *attr, char *buf)
 {
 	unsigned long pcnt0;
 
 	pcnt0 = sysreg_read(PCNT0);
 	return sprintf(buf, "%lu\n", pcnt0);
 }
-static ssize_t store_pc0count(struct sys_device *dev,
-				struct sysdev_attribute *attr,
+static ssize_t store_pc0count(struct device *dev,
+				struct device_attribute *attr,
 				const char *buf, size_t count)
 {
 	unsigned long val;
@@ -71,16 +71,16 @@
 	return count;
 }
 
-static ssize_t show_pc1event(struct sys_device *dev,
-				struct sysdev_attribute *attr, char *buf)
+static ssize_t show_pc1event(struct device *dev,
+				struct device_attribute *attr, char *buf)
 {
 	unsigned long pccr;
 
 	pccr = sysreg_read(PCCR);
 	return sprintf(buf, "0x%lx\n", (pccr >> 18) & 0x3f);
 }
-static ssize_t store_pc1event(struct sys_device *dev,
-			      struct sysdev_attribute *attr, const char *buf,
+static ssize_t store_pc1event(struct device *dev,
+			      struct device_attribute *attr, const char *buf,
 			      size_t count)
 {
 	unsigned long val;
@@ -93,16 +93,16 @@
 	sysreg_write(PCCR, val);
 	return count;
 }
-static ssize_t show_pc1count(struct sys_device *dev,
-				struct sysdev_attribute *attr, char *buf)
+static ssize_t show_pc1count(struct device *dev,
+				struct device_attribute *attr, char *buf)
 {
 	unsigned long pcnt1;
 
 	pcnt1 = sysreg_read(PCNT1);
 	return sprintf(buf, "%lu\n", pcnt1);
 }
-static ssize_t store_pc1count(struct sys_device *dev,
-				struct sysdev_attribute *attr, const char *buf,
+static ssize_t store_pc1count(struct device *dev,
+				struct device_attribute *attr, const char *buf,
 			      size_t count)
 {
 	unsigned long val;
@@ -116,16 +116,16 @@
 	return count;
 }
 
-static ssize_t show_pccycles(struct sys_device *dev,
-				struct sysdev_attribute *attr, char *buf)
+static ssize_t show_pccycles(struct device *dev,
+				struct device_attribute *attr, char *buf)
 {
 	unsigned long pccnt;
 
 	pccnt = sysreg_read(PCCNT);
 	return sprintf(buf, "%lu\n", pccnt);
 }
-static ssize_t store_pccycles(struct sys_device *dev,
-				struct sysdev_attribute *attr, const char *buf,
+static ssize_t store_pccycles(struct device *dev,
+				struct device_attribute *attr, const char *buf,
 			      size_t count)
 {
 	unsigned long val;
@@ -139,16 +139,16 @@
 	return count;
 }
 
-static ssize_t show_pcenable(struct sys_device *dev,
-			struct sysdev_attribute *attr, char *buf)
+static ssize_t show_pcenable(struct device *dev,
+			struct device_attribute *attr, char *buf)
 {
 	unsigned long pccr;
 
 	pccr = sysreg_read(PCCR);
 	return sprintf(buf, "%c\n", (pccr & 1)?'1':'0');
 }
-static ssize_t store_pcenable(struct sys_device *dev,
-			      struct sysdev_attribute *attr, const char *buf,
+static ssize_t store_pcenable(struct device *dev,
+			      struct device_attribute *attr, const char *buf,
 			      size_t count)
 {
 	unsigned long pccr, val;
@@ -167,12 +167,12 @@
 	return count;
 }
 
-static SYSDEV_ATTR(pc0event, 0600, show_pc0event, store_pc0event);
-static SYSDEV_ATTR(pc0count, 0600, show_pc0count, store_pc0count);
-static SYSDEV_ATTR(pc1event, 0600, show_pc1event, store_pc1event);
-static SYSDEV_ATTR(pc1count, 0600, show_pc1count, store_pc1count);
-static SYSDEV_ATTR(pccycles, 0600, show_pccycles, store_pccycles);
-static SYSDEV_ATTR(pcenable, 0600, show_pcenable, store_pcenable);
+static DEVICE_ATTR(pc0event, 0600, show_pc0event, store_pc0event);
+static DEVICE_ATTR(pc0count, 0600, show_pc0count, store_pc0count);
+static DEVICE_ATTR(pc1event, 0600, show_pc1event, store_pc1event);
+static DEVICE_ATTR(pc1count, 0600, show_pc1count, store_pc1count);
+static DEVICE_ATTR(pccycles, 0600, show_pccycles, store_pccycles);
+static DEVICE_ATTR(pcenable, 0600, show_pcenable, store_pcenable);
 
 #endif /* CONFIG_PERFORMANCE_COUNTERS */
 
@@ -186,12 +186,12 @@
 		register_cpu(c, cpu);
 
 #ifdef CONFIG_PERFORMANCE_COUNTERS
-		sysdev_create_file(&c->sysdev, &attr_pc0event);
-		sysdev_create_file(&c->sysdev, &attr_pc0count);
-		sysdev_create_file(&c->sysdev, &attr_pc1event);
-		sysdev_create_file(&c->sysdev, &attr_pc1count);
-		sysdev_create_file(&c->sysdev, &attr_pccycles);
-		sysdev_create_file(&c->sysdev, &attr_pcenable);
+		device_create_file(&c->dev, &dev_attr_pc0event);
+		device_create_file(&c->dev, &dev_attr_pc0count);
+		device_create_file(&c->dev, &dev_attr_pc1event);
+		device_create_file(&c->dev, &dev_attr_pc1count);
+		device_create_file(&c->dev, &dev_attr_pccycles);
+		device_create_file(&c->dev, &dev_attr_pcenable);
 #endif
 	}
 
diff --git a/arch/avr32/kernel/irq.c b/arch/avr32/kernel/irq.c
index bc3aa18..900e49b 100644
--- a/arch/avr32/kernel/irq.c
+++ b/arch/avr32/kernel/irq.c
@@ -14,7 +14,7 @@
 #include <linux/kernel_stat.h>
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 
 /* May be overridden by platform code */
 int __weak nmi_enable(void)
diff --git a/arch/avr32/kernel/process.c b/arch/avr32/kernel/process.c
index ef5a2a0..ea33957 100644
--- a/arch/avr32/kernel/process.c
+++ b/arch/avr32/kernel/process.c
@@ -34,10 +34,12 @@
 {
 	/* endless idle loop with no priority at all */
 	while (1) {
-		tick_nohz_stop_sched_tick(1);
+		tick_nohz_idle_enter();
+		rcu_idle_enter();
 		while (!need_resched())
 			cpu_idle_sleep();
-		tick_nohz_restart_sched_tick();
+		rcu_idle_exit();
+		tick_nohz_idle_exit();
 		preempt_enable_no_resched();
 		schedule();
 		preempt_disable();
diff --git a/arch/blackfin/include/asm/thread_info.h b/arch/blackfin/include/asm/thread_info.h
index 02560fd8..53ad100 100644
--- a/arch/blackfin/include/asm/thread_info.h
+++ b/arch/blackfin/include/asm/thread_info.h
@@ -100,7 +100,6 @@
 					   TIF_NEED_RESCHED */
 #define TIF_MEMDIE		4	/* is terminating due to OOM killer */
 #define TIF_RESTORE_SIGMASK	5	/* restore signal mask in do_signal() */
-#define TIF_FREEZE		6	/* is freezing for suspend */
 #define TIF_IRQ_SYNC		7	/* sync pipeline stage */
 #define TIF_NOTIFY_RESUME	8	/* callback before returning to user */
 #define TIF_SINGLESTEP		9
@@ -111,7 +110,6 @@
 #define _TIF_NEED_RESCHED	(1<<TIF_NEED_RESCHED)
 #define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG)
 #define _TIF_RESTORE_SIGMASK	(1<<TIF_RESTORE_SIGMASK)
-#define _TIF_FREEZE		(1<<TIF_FREEZE)
 #define _TIF_IRQ_SYNC		(1<<TIF_IRQ_SYNC)
 #define _TIF_NOTIFY_RESUME	(1<<TIF_NOTIFY_RESUME)
 #define _TIF_SINGLESTEP		(1<<TIF_SINGLESTEP)
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c
index 6a80a9e..8dd0416 100644
--- a/arch/blackfin/kernel/process.c
+++ b/arch/blackfin/kernel/process.c
@@ -88,10 +88,12 @@
 #endif
 		if (!idle)
 			idle = default_idle;
-		tick_nohz_stop_sched_tick(1);
+		tick_nohz_idle_enter();
+		rcu_idle_enter();
 		while (!need_resched())
 			idle();
-		tick_nohz_restart_sched_tick();
+		rcu_idle_exit();
+		tick_nohz_idle_exit();
 		preempt_enable_no_resched();
 		schedule();
 		preempt_disable();
diff --git a/arch/cris/Kconfig.debug b/arch/cris/Kconfig.debug
index 0b9a630..14881e8 100644
--- a/arch/cris/Kconfig.debug
+++ b/arch/cris/Kconfig.debug
@@ -1,6 +1,5 @@
 menu "Kernel hacking"
 
-#bool 'Debug kmalloc/kfree' CONFIG_DEBUG_MALLOC
 config PROFILING
 	bool "Kernel profiling support"
 
diff --git a/arch/cris/arch-v32/kernel/time.c b/arch/cris/arch-v32/kernel/time.c
index bb978ed..6773fc8 100644
--- a/arch/cris/arch-v32/kernel/time.c
+++ b/arch/cris/arch-v32/kernel/time.c
@@ -47,14 +47,12 @@
 	.rating = 300,
 	.read   = read_cont_rotime,
 	.mask   = CLOCKSOURCE_MASK(32),
-	.shift  = 10,
 	.flags  = CLOCK_SOURCE_IS_CONTINUOUS,
 };
 
 static int __init etrax_init_cont_rotime(void)
 {
-	cont_rotime.mult = clocksource_khz2mult(100000, cont_rotime.shift);
-	clocksource_register(&cont_rotime);
+	clocksource_register_khz(&cont_rotime, 100000);
 	return 0;
 }
 arch_initcall(etrax_init_cont_rotime);
diff --git a/arch/cris/include/asm/ipcbuf.h b/arch/cris/include/asm/ipcbuf.h
index 8b0c18b..84c7e51 100644
--- a/arch/cris/include/asm/ipcbuf.h
+++ b/arch/cris/include/asm/ipcbuf.h
@@ -1,29 +1 @@
-#ifndef __CRIS_IPCBUF_H__
-#define __CRIS_IPCBUF_H__
-
-/*
- * The user_ipc_perm structure for CRIS architecture.
- * Note extra padding because this structure is passed back and forth
- * between kernel and user space.
- *
- * Pad space is left for:
- * - 32-bit mode_t and seq
- * - 2 miscellaneous 32-bit values
- */
-
-struct ipc64_perm
-{
-	__kernel_key_t		key;
-	__kernel_uid32_t	uid;
-	__kernel_gid32_t	gid;
-	__kernel_uid32_t	cuid;
-	__kernel_gid32_t	cgid;
-	__kernel_mode_t		mode;
-	unsigned short		__pad1;
-	unsigned short		seq;
-	unsigned short		__pad2;
-	unsigned long		__unused1;
-	unsigned long		__unused2;
-};
-
-#endif /* __CRIS_IPCBUF_H__ */
+#include <asm-generic/ipcbuf.h>
diff --git a/arch/cris/include/asm/socket.h b/arch/cris/include/asm/socket.h
index 1a4a619..e269264 100644
--- a/arch/cris/include/asm/socket.h
+++ b/arch/cris/include/asm/socket.h
@@ -64,6 +64,9 @@
 
 #define SO_RXQ_OVFL             40
 
+#define SO_WIFI_STATUS		41
+#define SCM_WIFI_STATUS		SO_WIFI_STATUS
+
 #endif /* _ASM_SOCKET_H */
 
 
diff --git a/arch/cris/include/asm/thread_info.h b/arch/cris/include/asm/thread_info.h
index 332f19c..29b9288 100644
--- a/arch/cris/include/asm/thread_info.h
+++ b/arch/cris/include/asm/thread_info.h
@@ -86,7 +86,6 @@
 #define TIF_RESTORE_SIGMASK	9	/* restore signal mask in do_signal() */
 #define TIF_POLLING_NRFLAG	16	/* true if poll_idle() is polling TIF_NEED_RESCHED */
 #define TIF_MEMDIE		17	/* is terminating due to OOM killer */
-#define TIF_FREEZE		18	/* is freezing for suspend */
 
 #define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
 #define _TIF_NOTIFY_RESUME	(1<<TIF_NOTIFY_RESUME)
@@ -94,7 +93,6 @@
 #define _TIF_NEED_RESCHED	(1<<TIF_NEED_RESCHED)
 #define _TIF_RESTORE_SIGMASK	(1<<TIF_RESTORE_SIGMASK)
 #define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG)
-#define _TIF_FREEZE		(1<<TIF_FREEZE)
 
 #define _TIF_WORK_MASK		0x0000FFFE	/* work to do on interrupt/exception return */
 #define _TIF_ALLWORK_MASK	0x0000FFFF	/* work to do on any return to u-space */
diff --git a/arch/cris/include/asm/types.h b/arch/cris/include/asm/types.h
index 551a12c..adaf827 100644
--- a/arch/cris/include/asm/types.h
+++ b/arch/cris/include/asm/types.h
@@ -3,12 +3,6 @@
 
 #include <asm-generic/int-ll64.h>
 
-#ifndef __ASSEMBLY__
-
-typedef unsigned short umode_t;
-
-#endif /* __ASSEMBLY__ */
-
 /*
  * These aren't exported outside the kernel to avoid name space clashes
  */
diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig
index bad27a6..c5e69ab 100644
--- a/arch/frv/Kconfig
+++ b/arch/frv/Kconfig
@@ -341,16 +341,6 @@
 
 source "drivers/pcmcia/Kconfig"
 
-#config MATH_EMULATION
-#	bool "Math emulation support (EXPERIMENTAL)"
-#	depends on EXPERIMENTAL
-#	help
-#	  At some point in the future, this will cause floating-point math
-#	  instructions to be emulated by the kernel on machines that lack a
-#	  floating-point math coprocessor.  Thrill-seekers and chronically
-#	  sleep-deprived psychotic hacker types can say Y now, everyone else
-#	  should probably wait a while.
-
 menu "Power management options"
 
 config ARCH_SUSPEND_POSSIBLE
diff --git a/arch/frv/include/asm/ipcbuf.h b/arch/frv/include/asm/ipcbuf.h
index b546f67..84c7e51 100644
--- a/arch/frv/include/asm/ipcbuf.h
+++ b/arch/frv/include/asm/ipcbuf.h
@@ -1,30 +1 @@
-#ifndef __ASM_IPCBUF_H__
-#define __ASM_IPCBUF_H__
-
-/*
- * The user_ipc_perm structure for FR-V architecture.
- * Note extra padding because this structure is passed back and forth
- * between kernel and user space.
- *
- * Pad space is left for:
- * - 32-bit mode_t and seq
- * - 2 miscellaneous 32-bit values
- */
-
-struct ipc64_perm
-{
-	__kernel_key_t		key;
-	__kernel_uid32_t	uid;
-	__kernel_gid32_t	gid;
-	__kernel_uid32_t	cuid;
-	__kernel_gid32_t	cgid;
-	__kernel_mode_t		mode;
-	unsigned short		__pad1;
-	unsigned short		seq;
-	unsigned short		__pad2;
-	unsigned long		__unused1;
-	unsigned long		__unused2;
-};
-
-#endif /* __ASM_IPCBUF_H__ */
-
+#include <asm-generic/ipcbuf.h>
diff --git a/arch/frv/include/asm/socket.h b/arch/frv/include/asm/socket.h
index a6b2688..ce80fda 100644
--- a/arch/frv/include/asm/socket.h
+++ b/arch/frv/include/asm/socket.h
@@ -62,5 +62,8 @@
 
 #define SO_RXQ_OVFL             40
 
+#define SO_WIFI_STATUS		41
+#define SCM_WIFI_STATUS		SO_WIFI_STATUS
+
 #endif /* _ASM_SOCKET_H */
 
diff --git a/arch/frv/include/asm/thread_info.h b/arch/frv/include/asm/thread_info.h
index cefbe73..92d83ea 100644
--- a/arch/frv/include/asm/thread_info.h
+++ b/arch/frv/include/asm/thread_info.h
@@ -111,7 +111,6 @@
 #define TIF_RESTORE_SIGMASK	5	/* restore signal mask in do_signal() */
 #define TIF_POLLING_NRFLAG	16	/* true if poll_idle() is polling TIF_NEED_RESCHED */
 #define TIF_MEMDIE		17	/* is terminating due to OOM killer */
-#define TIF_FREEZE		18	/* freezing for suspend */
 
 #define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE)
 #define _TIF_NOTIFY_RESUME	(1 << TIF_NOTIFY_RESUME)
@@ -120,7 +119,6 @@
 #define _TIF_SINGLESTEP		(1 << TIF_SINGLESTEP)
 #define _TIF_RESTORE_SIGMASK	(1 << TIF_RESTORE_SIGMASK)
 #define _TIF_POLLING_NRFLAG	(1 << TIF_POLLING_NRFLAG)
-#define _TIF_FREEZE		(1 << TIF_FREEZE)
 
 #define _TIF_WORK_MASK		0x0000FFFE	/* work to do on interrupt/exception return */
 #define _TIF_ALLWORK_MASK	0x0000FFFF	/* work to do on any return to u-space */
diff --git a/arch/frv/include/asm/types.h b/arch/frv/include/asm/types.h
index aa3e7fd..390a612 100644
--- a/arch/frv/include/asm/types.h
+++ b/arch/frv/include/asm/types.h
@@ -14,12 +14,6 @@
 
 #include <asm-generic/int-ll64.h>
 
-#ifndef __ASSEMBLY__
-
-typedef unsigned short umode_t;
-
-#endif /* __ASSEMBLY__ */
-
 /*
  * These aren't exported outside the kernel to avoid name space clashes
  */
diff --git a/arch/h8300/include/asm/ipcbuf.h b/arch/h8300/include/asm/ipcbuf.h
index 2cd1ebc..84c7e51 100644
--- a/arch/h8300/include/asm/ipcbuf.h
+++ b/arch/h8300/include/asm/ipcbuf.h
@@ -1,29 +1 @@
-#ifndef __H8300_IPCBUF_H__
-#define __H8300_IPCBUF_H__
-
-/*
- * The user_ipc_perm structure for H8/300 architecture.
- * Note extra padding because this structure is passed back and forth
- * between kernel and user space.
- *
- * Pad space is left for:
- * - 32-bit mode_t and seq
- * - 2 miscellaneous 32-bit values
- */
-
-struct ipc64_perm
-{
-	__kernel_key_t		key;
-	__kernel_uid32_t	uid;
-	__kernel_gid32_t	gid;
-	__kernel_uid32_t	cuid;
-	__kernel_gid32_t	cgid;
-	__kernel_mode_t		mode;
-	unsigned short		__pad1;
-	unsigned short		seq;
-	unsigned short		__pad2;
-	unsigned long		__unused1;
-	unsigned long		__unused2;
-};
-
-#endif /* __H8300_IPCBUF_H__ */
+#include <asm-generic/ipcbuf.h>
diff --git a/arch/h8300/include/asm/socket.h b/arch/h8300/include/asm/socket.h
index 04c0f45..cf1daab 100644
--- a/arch/h8300/include/asm/socket.h
+++ b/arch/h8300/include/asm/socket.h
@@ -62,4 +62,7 @@
 
 #define SO_RXQ_OVFL             40
 
+#define SO_WIFI_STATUS		41
+#define SCM_WIFI_STATUS		SO_WIFI_STATUS
+
 #endif /* _ASM_SOCKET_H */
diff --git a/arch/h8300/include/asm/thread_info.h b/arch/h8300/include/asm/thread_info.h
index d6f1784..9c126e0 100644
--- a/arch/h8300/include/asm/thread_info.h
+++ b/arch/h8300/include/asm/thread_info.h
@@ -90,7 +90,6 @@
 #define TIF_MEMDIE		4	/* is terminating due to OOM killer */
 #define TIF_RESTORE_SIGMASK	5	/* restore signal mask in do_signal() */
 #define TIF_NOTIFY_RESUME	6	/* callback before returning to user */
-#define TIF_FREEZE		16	/* is freezing for suspend */
 
 /* as above, but as bit values */
 #define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
@@ -99,7 +98,6 @@
 #define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG)
 #define _TIF_RESTORE_SIGMASK	(1<<TIF_RESTORE_SIGMASK)
 #define _TIF_NOTIFY_RESUME	(1 << TIF_NOTIFY_RESUME)
-#define _TIF_FREEZE		(1<<TIF_FREEZE)
 
 #define _TIF_WORK_MASK		0x0000FFFE	/* work to do on interrupt/exception return */
 
diff --git a/arch/h8300/include/asm/types.h b/arch/h8300/include/asm/types.h
index bb2c91a..07257d9 100644
--- a/arch/h8300/include/asm/types.h
+++ b/arch/h8300/include/asm/types.h
@@ -3,27 +3,10 @@
 
 #include <asm-generic/int-ll64.h>
 
-#if !defined(__ASSEMBLY__)
-
-/*
- * This file is never included by application software unless
- * explicitly requested (e.g., via linux/types.h) in which case the
- * application is Linux specific so (user-) name space pollution is
- * not a major issue.  However, for interoperability, libraries still
- * need to be careful to avoid a name clashes.
- */
-
-typedef unsigned short umode_t;
-
-/*
- * These aren't exported outside the kernel to avoid name space clashes
- */
 #ifdef __KERNEL__
 
 #define BITS_PER_LONG 32
 
 #endif /* __KERNEL__ */
 
-#endif /* __ASSEMBLY__ */
-
 #endif /* _H8300_TYPES_H */
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 27489b6..3b7a7c4 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -23,6 +23,9 @@
 	select HAVE_ARCH_TRACEHOOK
 	select HAVE_DMA_API_DEBUG
 	select HAVE_GENERIC_HARDIRQS
+	select HAVE_MEMBLOCK
+	select HAVE_MEMBLOCK_NODE_MAP
+	select ARCH_DISCARD_MEMBLOCK
 	select GENERIC_IRQ_PROBE
 	select GENERIC_PENDING_IRQ if SMP
 	select IRQ_PER_CPU
@@ -474,9 +477,6 @@
 	  MAX_NUMNODES will be 2^(This value).
 	  If in doubt, use the default.
 
-config ARCH_POPULATES_NODE_MAP
-	def_bool y
-
 # VIRTUAL_MEM_MAP and FLAT_NODE_MEM_MAP are functionally equivalent.
 # VIRTUAL_MEM_MAP has been retained for historical reasons.
 config VIRTUAL_MEM_MAP
diff --git a/arch/ia64/include/asm/cputime.h b/arch/ia64/include/asm/cputime.h
index 6073b18..3deac95 100644
--- a/arch/ia64/include/asm/cputime.h
+++ b/arch/ia64/include/asm/cputime.h
@@ -26,59 +26,53 @@
 #include <linux/jiffies.h>
 #include <asm/processor.h>
 
-typedef u64 cputime_t;
-typedef u64 cputime64_t;
+typedef u64 __nocast cputime_t;
+typedef u64 __nocast cputime64_t;
 
-#define cputime_zero			((cputime_t)0)
 #define cputime_one_jiffy		jiffies_to_cputime(1)
-#define cputime_max			((~((cputime_t)0) >> 1) - 1)
-#define cputime_add(__a, __b)		((__a) +  (__b))
-#define cputime_sub(__a, __b)		((__a) -  (__b))
-#define cputime_div(__a, __n)		((__a) /  (__n))
-#define cputime_halve(__a)		((__a) >> 1)
-#define cputime_eq(__a, __b)		((__a) == (__b))
-#define cputime_gt(__a, __b)		((__a) >  (__b))
-#define cputime_ge(__a, __b)		((__a) >= (__b))
-#define cputime_lt(__a, __b)		((__a) <  (__b))
-#define cputime_le(__a, __b)		((__a) <= (__b))
-
-#define cputime64_zero			((cputime64_t)0)
-#define cputime64_add(__a, __b)		((__a) + (__b))
-#define cputime64_sub(__a, __b)		((__a) - (__b))
-#define cputime_to_cputime64(__ct)	(__ct)
 
 /*
  * Convert cputime <-> jiffies (HZ)
  */
-#define cputime_to_jiffies(__ct)	((__ct) / (NSEC_PER_SEC / HZ))
-#define jiffies_to_cputime(__jif)	((__jif) * (NSEC_PER_SEC / HZ))
-#define cputime64_to_jiffies64(__ct)	((__ct) / (NSEC_PER_SEC / HZ))
-#define jiffies64_to_cputime64(__jif)	((__jif) * (NSEC_PER_SEC / HZ))
+#define cputime_to_jiffies(__ct)	\
+	((__force u64)(__ct) / (NSEC_PER_SEC / HZ))
+#define jiffies_to_cputime(__jif)	\
+	(__force cputime_t)((__jif) * (NSEC_PER_SEC / HZ))
+#define cputime64_to_jiffies64(__ct)	\
+	((__force u64)(__ct) / (NSEC_PER_SEC / HZ))
+#define jiffies64_to_cputime64(__jif)	\
+	(__force cputime64_t)((__jif) * (NSEC_PER_SEC / HZ))
 
 /*
  * Convert cputime <-> microseconds
  */
-#define cputime_to_usecs(__ct)		((__ct) / NSEC_PER_USEC)
-#define usecs_to_cputime(__usecs)	((__usecs) * NSEC_PER_USEC)
+#define cputime_to_usecs(__ct)		\
+	((__force u64)(__ct) / NSEC_PER_USEC)
+#define usecs_to_cputime(__usecs)	\
+	(__force cputime_t)((__usecs) * NSEC_PER_USEC)
+#define usecs_to_cputime64(__usecs)	\
+	(__force cputime64_t)((__usecs) * NSEC_PER_USEC)
 
 /*
  * Convert cputime <-> seconds
  */
-#define cputime_to_secs(__ct)		((__ct) / NSEC_PER_SEC)
-#define secs_to_cputime(__secs)		((__secs) * NSEC_PER_SEC)
+#define cputime_to_secs(__ct)		\
+	((__force u64)(__ct) / NSEC_PER_SEC)
+#define secs_to_cputime(__secs)		\
+	(__force cputime_t)((__secs) * NSEC_PER_SEC)
 
 /*
  * Convert cputime <-> timespec (nsec)
  */
 static inline cputime_t timespec_to_cputime(const struct timespec *val)
 {
-	cputime_t ret = val->tv_sec * NSEC_PER_SEC;
-	return (ret + val->tv_nsec);
+	u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_nsec;
+	return (__force cputime_t) ret;
 }
 static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
 {
-	val->tv_sec  = ct / NSEC_PER_SEC;
-	val->tv_nsec = ct % NSEC_PER_SEC;
+	val->tv_sec  = (__force u64) ct / NSEC_PER_SEC;
+	val->tv_nsec = (__force u64) ct % NSEC_PER_SEC;
 }
 
 /*
@@ -86,25 +80,28 @@
  */
 static inline cputime_t timeval_to_cputime(struct timeval *val)
 {
-	cputime_t ret = val->tv_sec * NSEC_PER_SEC;
-	return (ret + val->tv_usec * NSEC_PER_USEC);
+	u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_usec * NSEC_PER_USEC;
+	return (__force cputime_t) ret;
 }
 static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val)
 {
-	val->tv_sec = ct / NSEC_PER_SEC;
-	val->tv_usec = (ct % NSEC_PER_SEC) / NSEC_PER_USEC;
+	val->tv_sec = (__force u64) ct / NSEC_PER_SEC;
+	val->tv_usec = ((__force u64) ct % NSEC_PER_SEC) / NSEC_PER_USEC;
 }
 
 /*
  * Convert cputime <-> clock (USER_HZ)
  */
-#define cputime_to_clock_t(__ct)	((__ct) / (NSEC_PER_SEC / USER_HZ))
-#define clock_t_to_cputime(__x)		((__x) * (NSEC_PER_SEC / USER_HZ))
+#define cputime_to_clock_t(__ct)	\
+	((__force u64)(__ct) / (NSEC_PER_SEC / USER_HZ))
+#define clock_t_to_cputime(__x)		\
+	(__force cputime_t)((__x) * (NSEC_PER_SEC / USER_HZ))
 
 /*
  * Convert cputime64 to clock.
  */
-#define cputime64_to_clock_t(__ct)      cputime_to_clock_t((cputime_t)__ct)
+#define cputime64_to_clock_t(__ct)	\
+	cputime_to_clock_t((__force cputime_t)__ct)
 
 #endif /* CONFIG_VIRT_CPU_ACCOUNTING */
 #endif /* __IA64_CPUTIME_H */
diff --git a/arch/ia64/include/asm/ipcbuf.h b/arch/ia64/include/asm/ipcbuf.h
index 079899a..84c7e51 100644
--- a/arch/ia64/include/asm/ipcbuf.h
+++ b/arch/ia64/include/asm/ipcbuf.h
@@ -1,28 +1 @@
-#ifndef _ASM_IA64_IPCBUF_H
-#define _ASM_IA64_IPCBUF_H
-
-/*
- * The ipc64_perm structure for IA-64 architecture.
- * Note extra padding because this structure is passed back and forth
- * between kernel and user space.
- *
- * Pad space is left for:
- * - 32-bit seq
- * - 2 miscellaneous 64-bit values
- */
-
-struct ipc64_perm
-{
-	__kernel_key_t	key;
-	__kernel_uid_t	uid;
-	__kernel_gid_t	gid;
-	__kernel_uid_t	cuid;
-	__kernel_gid_t	cgid;
-	__kernel_mode_t	mode;
-	unsigned short	seq;
-	unsigned short	__pad1;
-	unsigned long	__unused1;
-	unsigned long	__unused2;
-};
-
-#endif /* _ASM_IA64_IPCBUF_H */
+#include <asm-generic/ipcbuf.h>
diff --git a/arch/ia64/include/asm/socket.h b/arch/ia64/include/asm/socket.h
index 51427ea..4b03664 100644
--- a/arch/ia64/include/asm/socket.h
+++ b/arch/ia64/include/asm/socket.h
@@ -71,4 +71,7 @@
 
 #define SO_RXQ_OVFL             40
 
+#define SO_WIFI_STATUS		41
+#define SCM_WIFI_STATUS		SO_WIFI_STATUS
+
 #endif /* _ASM_IA64_SOCKET_H */
diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h
index ff0cc84..e054bcc 100644
--- a/arch/ia64/include/asm/thread_info.h
+++ b/arch/ia64/include/asm/thread_info.h
@@ -113,7 +113,6 @@
 #define TIF_MEMDIE		17	/* is terminating due to OOM killer */
 #define TIF_MCA_INIT		18	/* this task is processing MCA or INIT */
 #define TIF_DB_DISABLED		19	/* debug trap disabled for fsyscall */
-#define TIF_FREEZE		20	/* is freezing for suspend */
 #define TIF_RESTORE_RSE		21	/* user RBS is newer than kernel RBS */
 
 #define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE)
@@ -126,7 +125,6 @@
 #define _TIF_POLLING_NRFLAG	(1 << TIF_POLLING_NRFLAG)
 #define _TIF_MCA_INIT		(1 << TIF_MCA_INIT)
 #define _TIF_DB_DISABLED	(1 << TIF_DB_DISABLED)
-#define _TIF_FREEZE		(1 << TIF_FREEZE)
 #define _TIF_RESTORE_RSE	(1 << TIF_RESTORE_RSE)
 
 /* "work to do on user-return" bits */
diff --git a/arch/ia64/include/asm/types.h b/arch/ia64/include/asm/types.h
index 82b3939..3f5b122 100644
--- a/arch/ia64/include/asm/types.h
+++ b/arch/ia64/include/asm/types.h
@@ -28,8 +28,6 @@
 # define __IA64_UL(x)		((unsigned long)(x))
 # define __IA64_UL_CONST(x)	x##UL
 
-typedef unsigned int umode_t;
-
 /*
  * These aren't exported outside the kernel to avoid name space clashes
  */
diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c
index c539c68..2d67317 100644
--- a/arch/ia64/kernel/err_inject.c
+++ b/arch/ia64/kernel/err_inject.c
@@ -24,7 +24,7 @@
  * Copyright (C) 2006, Intel Corp.  All rights reserved.
  *
  */
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/init.h>
 #include <linux/mm.h>
 #include <linux/cpu.h>
@@ -35,10 +35,10 @@
 #define ERR_DATA_BUFFER_SIZE 3 		// Three 8-byte;
 
 #define define_one_ro(name) 						\
-static SYSDEV_ATTR(name, 0444, show_##name, NULL)
+static DEVICE_ATTR(name, 0444, show_##name, NULL)
 
 #define define_one_rw(name) 						\
-static SYSDEV_ATTR(name, 0644, show_##name, store_##name)
+static DEVICE_ATTR(name, 0644, show_##name, store_##name)
 
 static u64 call_start[NR_CPUS];
 static u64 phys_addr[NR_CPUS];
@@ -55,7 +55,7 @@
 
 #define show(name) 							\
 static ssize_t 								\
-show_##name(struct sys_device *dev, struct sysdev_attribute *attr,	\
+show_##name(struct device *dev, struct device_attribute *attr,	\
 		char *buf)						\
 {									\
 	u32 cpu=dev->id;						\
@@ -64,7 +64,7 @@
 
 #define store(name)							\
 static ssize_t 								\
-store_##name(struct sys_device *dev, struct sysdev_attribute *attr,	\
+store_##name(struct device *dev, struct device_attribute *attr,	\
 					const char *buf, size_t size)	\
 {									\
 	unsigned int cpu=dev->id;					\
@@ -78,7 +78,7 @@
  * processor. The cpu number in driver is only used for storing data.
  */
 static ssize_t
-store_call_start(struct sys_device *dev, struct sysdev_attribute *attr,
+store_call_start(struct device *dev, struct device_attribute *attr,
 		const char *buf, size_t size)
 {
 	unsigned int cpu=dev->id;
@@ -127,7 +127,7 @@
 store(err_type_info)
 
 static ssize_t
-show_virtual_to_phys(struct sys_device *dev, struct sysdev_attribute *attr,
+show_virtual_to_phys(struct device *dev, struct device_attribute *attr,
 			char *buf)
 {
 	unsigned int cpu=dev->id;
@@ -135,7 +135,7 @@
 }
 
 static ssize_t
-store_virtual_to_phys(struct sys_device *dev, struct sysdev_attribute *attr,
+store_virtual_to_phys(struct device *dev, struct device_attribute *attr,
 			const char *buf, size_t size)
 {
 	unsigned int cpu=dev->id;
@@ -159,8 +159,8 @@
 store(err_struct_info)
 
 static ssize_t
-show_err_data_buffer(struct sys_device *dev,
-			struct sysdev_attribute *attr, char *buf)
+show_err_data_buffer(struct device *dev,
+			struct device_attribute *attr, char *buf)
 {
 	unsigned int cpu=dev->id;
 
@@ -171,8 +171,8 @@
 }
 
 static ssize_t
-store_err_data_buffer(struct sys_device *dev,
-			struct sysdev_attribute *attr,
+store_err_data_buffer(struct device *dev,
+			struct device_attribute *attr,
 			const char *buf, size_t size)
 {
 	unsigned int cpu=dev->id;
@@ -209,14 +209,14 @@
 define_one_ro(resources);
 
 static struct attribute *default_attrs[] = {
-	&attr_call_start.attr,
-	&attr_virtual_to_phys.attr,
-	&attr_err_type_info.attr,
-	&attr_err_struct_info.attr,
-	&attr_err_data_buffer.attr,
-	&attr_status.attr,
-	&attr_capabilities.attr,
-	&attr_resources.attr,
+	&dev_attr_call_start.attr,
+	&dev_attr_virtual_to_phys.attr,
+	&dev_attr_err_type_info.attr,
+	&dev_attr_err_struct_info.attr,
+	&dev_attr_err_data_buffer.attr,
+	&dev_attr_status.attr,
+	&dev_attr_capabilities.attr,
+	&dev_attr_resources.attr,
 	NULL
 };
 
@@ -225,12 +225,12 @@
 	.name = "err_inject"
 };
 /* Add/Remove err_inject interface for CPU device */
-static int __cpuinit err_inject_add_dev(struct sys_device * sys_dev)
+static int __cpuinit err_inject_add_dev(struct device * sys_dev)
 {
 	return sysfs_create_group(&sys_dev->kobj, &err_inject_attr_group);
 }
 
-static int __cpuinit err_inject_remove_dev(struct sys_device * sys_dev)
+static int __cpuinit err_inject_remove_dev(struct device * sys_dev)
 {
 	sysfs_remove_group(&sys_dev->kobj, &err_inject_attr_group);
 	return 0;
@@ -239,9 +239,9 @@
 		unsigned long action, void *hcpu)
 {
 	unsigned int cpu = (unsigned long)hcpu;
-	struct sys_device *sys_dev;
+	struct device *sys_dev;
 
-	sys_dev = get_cpu_sysdev(cpu);
+	sys_dev = get_cpu_device(cpu);
 	switch (action) {
 	case CPU_ONLINE:
 	case CPU_ONLINE_FROZEN:
@@ -283,13 +283,13 @@
 err_inject_exit(void)
 {
 	int i;
-	struct sys_device *sys_dev;
+	struct device *sys_dev;
 
 #ifdef ERR_INJ_DEBUG
 	printk(KERN_INFO "Exit error injection driver.\n");
 #endif
 	for_each_online_cpu(i) {
-		sys_dev = get_cpu_sysdev(i);
+		sys_dev = get_cpu_device(i);
 		sysfs_remove_group(&sys_dev->kobj, &err_inject_attr_group);
 	}
 	unregister_hotcpu_notifier(&err_inject_cpu_notifier);
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 89accc6..b2c65e0 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -2228,7 +2228,7 @@
 	/*
 	 * allocate a new dcache entry
 	 */
-	path.dentry = d_alloc(pfmfs_mnt->mnt_sb->s_root, &this);
+	path.dentry = d_alloc(pfmfs_mnt->mnt_root, &this);
 	if (!path.dentry) {
 		iput(inode);
 		return ERR_PTR(-ENOMEM);
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 5e2c724..cd57d73 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -220,6 +220,23 @@
 	}
 }
 
+/* merge overlaps */
+static int __init
+merge_regions (struct rsvd_region *rsvd_region, int max)
+{
+	int i;
+	for (i = 1; i < max; ++i) {
+		if (rsvd_region[i].start >= rsvd_region[i-1].end)
+			continue;
+		if (rsvd_region[i].end > rsvd_region[i-1].end)
+			rsvd_region[i-1].end = rsvd_region[i].end;
+		--max;
+		memmove(&rsvd_region[i], &rsvd_region[i+1],
+			(max - i) * sizeof(struct rsvd_region));
+	}
+	return max;
+}
+
 /*
  * Request address space for all standard resources
  */
@@ -270,6 +287,7 @@
 	if (ret == 0 && size > 0) {
 		if (!base) {
 			sort_regions(rsvd_region, *n);
+			*n = merge_regions(rsvd_region, *n);
 			base = kdump_find_rsvd_region(size,
 					rsvd_region, *n);
 		}
@@ -373,6 +391,7 @@
 	BUG_ON(IA64_MAX_RSVD_REGIONS + 1 < n);
 
 	sort_regions(rsvd_region, num_rsvd_regions);
+	num_rsvd_regions = merge_regions(rsvd_region, num_rsvd_regions);
 }
 
 
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
index 9be1f11..9deb21d 100644
--- a/arch/ia64/kernel/topology.c
+++ b/arch/ia64/kernel/topology.c
@@ -350,7 +350,7 @@
 }
 
 /* Add cache interface for CPU device */
-static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
+static int __cpuinit cache_add_dev(struct device * sys_dev)
 {
 	unsigned int cpu = sys_dev->id;
 	unsigned long i, j;
@@ -400,7 +400,7 @@
 }
 
 /* Remove cache interface for CPU device */
-static int __cpuinit cache_remove_dev(struct sys_device * sys_dev)
+static int __cpuinit cache_remove_dev(struct device * sys_dev)
 {
 	unsigned int cpu = sys_dev->id;
 	unsigned long i;
@@ -428,9 +428,9 @@
 		unsigned long action, void *hcpu)
 {
 	unsigned int cpu = (unsigned long)hcpu;
-	struct sys_device *sys_dev;
+	struct device *sys_dev;
 
-	sys_dev = get_cpu_sysdev(cpu);
+	sys_dev = get_cpu_device(cpu);
 	switch (action) {
 	case CPU_ONLINE:
 	case CPU_ONLINE_FROZEN:
@@ -454,7 +454,7 @@
 	int i;
 
 	for_each_online_cpu(i) {
-		struct sys_device *sys_dev = get_cpu_sysdev((unsigned int)i);
+		struct device *sys_dev = get_cpu_device((unsigned int)i);
 		cache_add_dev(sys_dev);
 	}
 
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index f114a3b..1516d1d 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -16,6 +16,7 @@
  */
 #include <linux/bootmem.h>
 #include <linux/efi.h>
+#include <linux/memblock.h>
 #include <linux/mm.h>
 #include <linux/nmi.h>
 #include <linux/swap.h>
@@ -348,7 +349,7 @@
 		printk("Virtual mem_map starts at 0x%p\n", mem_map);
 	}
 #else /* !CONFIG_VIRTUAL_MEM_MAP */
-	add_active_range(0, 0, max_low_pfn);
+	memblock_add_node(0, PFN_PHYS(max_low_pfn), 0);
 	free_area_init_nodes(max_zone_pfns);
 #endif /* !CONFIG_VIRTUAL_MEM_MAP */
 	zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 00cb0e2..13df239d 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -10,6 +10,7 @@
 #include <linux/bootmem.h>
 #include <linux/efi.h>
 #include <linux/elf.h>
+#include <linux/memblock.h>
 #include <linux/mm.h>
 #include <linux/mmzone.h>
 #include <linux/module.h>
@@ -557,8 +558,7 @@
 #endif
 
 	if (start < end)
-		add_active_range(nid, __pa(start) >> PAGE_SHIFT,
-			__pa(end) >> PAGE_SHIFT);
+		memblock_add_node(__pa(start), end - start, nid);
 	return 0;
 }
 
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c
index 485c42d9..dfac09a 100644
--- a/arch/ia64/sn/kernel/irq.c
+++ b/arch/ia64/sn/kernel/irq.c
@@ -150,12 +150,11 @@
 	 * PROM does not support SAL_INTR_REDIRECT, or it failed.
 	 * Revert to old method.
 	 */
-	new_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_ATOMIC);
+	new_irq_info = kmemdup(sn_irq_info, sizeof(struct sn_irq_info),
+			       GFP_ATOMIC);
 	if (new_irq_info == NULL)
 		return NULL;
 
-	memcpy(new_irq_info, sn_irq_info, sizeof(struct sn_irq_info));
-
 	/* Free the old PROM new_irq_info structure */
 	sn_intr_free(local_nasid, local_widget, new_irq_info);
 	unregister_intr_pda(new_irq_info);
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_provider.c b/arch/ia64/sn/pci/pcibr/pcibr_provider.c
index 5698f29..8886a0b 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_provider.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_provider.c
@@ -127,12 +127,11 @@
 	 * Allocate kernel bus soft and copy from prom.
 	 */
 
-	soft = kmalloc(sizeof(struct pcibus_info), GFP_KERNEL);
+	soft = kmemdup(prom_bussoft, sizeof(struct pcibus_info), GFP_KERNEL);
 	if (!soft) {
 		return NULL;
 	}
 
-	memcpy(soft, prom_bussoft, sizeof(struct pcibus_info));
 	soft->pbi_buscommon.bs_base = (unsigned long)
 		ioremap(REGION_OFFSET(soft->pbi_buscommon.bs_base),
 			sizeof(struct pic));
diff --git a/arch/ia64/sn/pci/tioca_provider.c b/arch/ia64/sn/pci/tioca_provider.c
index 642451e..e77c477 100644
--- a/arch/ia64/sn/pci/tioca_provider.c
+++ b/arch/ia64/sn/pci/tioca_provider.c
@@ -600,11 +600,11 @@
 	 * Allocate kernel bus soft and copy from prom.
 	 */
 
-	tioca_common = kzalloc(sizeof(struct tioca_common), GFP_KERNEL);
+	tioca_common = kmemdup(prom_bussoft, sizeof(struct tioca_common),
+			       GFP_KERNEL);
 	if (!tioca_common)
 		return NULL;
 
-	memcpy(tioca_common, prom_bussoft, sizeof(struct tioca_common));
 	tioca_common->ca_common.bs_base = (unsigned long)
 		ioremap(REGION_OFFSET(tioca_common->ca_common.bs_base),
 			sizeof(struct tioca_common));
diff --git a/arch/m32r/include/asm/ipcbuf.h b/arch/m32r/include/asm/ipcbuf.h
index 8d2d7c8..84c7e51 100644
--- a/arch/m32r/include/asm/ipcbuf.h
+++ b/arch/m32r/include/asm/ipcbuf.h
@@ -1,29 +1 @@
-#ifndef _ASM_M32R_IPCBUF_H
-#define _ASM_M32R_IPCBUF_H
-
-/*
- * The ipc64_perm structure for m32r architecture.
- * Note extra padding because this structure is passed back and forth
- * between kernel and user space.
- *
- * Pad space is left for:
- * - 32-bit mode_t and seq
- * - 2 miscellaneous 32-bit values
- */
-
-struct ipc64_perm
-{
-	__kernel_key_t		key;
-	__kernel_uid32_t	uid;
-	__kernel_gid32_t	gid;
-	__kernel_uid32_t	cuid;
-	__kernel_gid32_t	cgid;
-	__kernel_mode_t		mode;
-	unsigned short		__pad1;
-	unsigned short		seq;
-	unsigned short		__pad2;
-	unsigned long		__unused1;
-	unsigned long		__unused2;
-};
-
-#endif /* _ASM_M32R_IPCBUF_H */
+#include <asm-generic/ipcbuf.h>
diff --git a/arch/m32r/include/asm/socket.h b/arch/m32r/include/asm/socket.h
index 469787c3..e8b8c5b 100644
--- a/arch/m32r/include/asm/socket.h
+++ b/arch/m32r/include/asm/socket.h
@@ -62,4 +62,7 @@
 
 #define SO_RXQ_OVFL             40
 
+#define SO_WIFI_STATUS		41
+#define SCM_WIFI_STATUS		SO_WIFI_STATUS
+
 #endif /* _ASM_M32R_SOCKET_H */
diff --git a/arch/m32r/include/asm/thread_info.h b/arch/m32r/include/asm/thread_info.h
index 0227dba..bf8fa3c 100644
--- a/arch/m32r/include/asm/thread_info.h
+++ b/arch/m32r/include/asm/thread_info.h
@@ -138,7 +138,6 @@
 #define TIF_USEDFPU		16	/* FPU was used by this task this quantum (SMP) */
 #define TIF_POLLING_NRFLAG	17	/* true if poll_idle() is polling TIF_NEED_RESCHED */
 #define TIF_MEMDIE		18	/* is terminating due to OOM killer */
-#define TIF_FREEZE		19	/* is freezing for suspend */
 
 #define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
 #define _TIF_SIGPENDING		(1<<TIF_SIGPENDING)
@@ -149,7 +148,6 @@
 #define _TIF_RESTORE_SIGMASK	(1<<TIF_RESTORE_SIGMASK)
 #define _TIF_USEDFPU		(1<<TIF_USEDFPU)
 #define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG)
-#define _TIF_FREEZE		(1<<TIF_FREEZE)
 
 #define _TIF_WORK_MASK		0x0000FFFE	/* work to do on interrupt/exception return */
 #define _TIF_ALLWORK_MASK	0x0000FFFF	/* work to do on any return to u-space */
diff --git a/arch/m32r/include/asm/types.h b/arch/m32r/include/asm/types.h
index bd00355..bb2eead 100644
--- a/arch/m32r/include/asm/types.h
+++ b/arch/m32r/include/asm/types.h
@@ -3,12 +3,6 @@
 
 #include <asm-generic/int-ll64.h>
 
-#ifndef __ASSEMBLY__
-
-typedef unsigned short umode_t;
-
-#endif /* __ASSEMBLY__ */
-
 /*
  * These aren't exported outside the kernel to avoid name space clashes
  */
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index 361d540..81fdaa7 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -3,7 +3,6 @@
 	default y
 	select HAVE_IDE
 	select HAVE_AOUT if MMU
-	select GENERIC_ATOMIC64 if MMU
 	select HAVE_GENERIC_HARDIRQS
 	select GENERIC_IRQ_SHOW
 	select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS
@@ -41,12 +40,15 @@
 config GENERIC_IOMAP
 	def_bool MMU
 
+config GENERIC_CSUM
+	bool
+
 config TIME_LOW_RES
 	bool
 	default y
 
 config ARCH_USES_GETTIMEOFFSET
-	def_bool MMU
+	def_bool MMU && !COLDFIRE
 
 config NO_IOPORT
 	def_bool y
@@ -61,6 +63,12 @@
 config CPU_HAS_NO_BITFIELDS
 	bool
 
+config CPU_HAS_NO_MULDIV64
+	bool
+
+config CPU_HAS_ADDRESS_SPACES
+	bool
+
 config HZ
 	int
 	default 1000 if CLEOPATRA
@@ -80,9 +88,12 @@
 config MMU_MOTOROLA
 	bool
 
+config MMU_COLDFIRE
+	bool
+
 config MMU_SUN3
 	bool
-	depends on MMU && !MMU_MOTOROLA
+	depends on MMU && !MMU_MOTOROLA && !MMU_COLDFIRE
 
 menu "Platform setup"
 
diff --git a/arch/m68k/Kconfig.cpu b/arch/m68k/Kconfig.cpu
index e632b2d..8a9c767 100644
--- a/arch/m68k/Kconfig.cpu
+++ b/arch/m68k/Kconfig.cpu
@@ -1,8 +1,42 @@
 comment "Processor Type"
 
+choice
+	prompt "CPU family support"
+	default M68KCLASSIC if MMU
+	default COLDFIRE if !MMU
+	help
+	  The Freescale (was Motorola) M68K family of processors implements
+	  the full 68000 processor instruction set.
+	  The Freescale ColdFire family of processors is a modern derivitive
+	  of the 68000 processor family. They are mainly targeted at embedded
+	  applications, and are all System-On-Chip (SOC) devices, as opposed
+	  to stand alone CPUs. They implement a subset of the original 68000
+	  processor instruction set.
+	  If you anticipate running this kernel on a computer with a classic
+	  MC68xxx processor, select M68KCLASSIC.
+	  If you anticipate running this kernel on a computer with a ColdFire
+	  processor, select COLDFIRE.
+
+config M68KCLASSIC
+	bool "Classic M68K CPU family support"
+
+config COLDFIRE
+	bool "Coldfire CPU family support"
+	select GENERIC_GPIO
+	select ARCH_REQUIRE_GPIOLIB
+	select CPU_HAS_NO_BITFIELDS
+	select CPU_HAS_NO_MULDIV64
+	select GENERIC_CSUM
+
+endchoice
+
+if M68KCLASSIC
+
 config M68000
 	bool
 	select CPU_HAS_NO_BITFIELDS
+	select CPU_HAS_NO_MULDIV64
+	select GENERIC_CSUM
 	help
 	  The Freescale (was Motorola) 68000 CPU is the first generation of
 	  the well known M68K family of processors. The CPU core as well as
@@ -18,21 +52,11 @@
 	  based on the 68020 processor. For the most part it is used in
 	  System-On-Chip parts, and does not contain a paging MMU.
 
-config COLDFIRE
-	bool
-	select GENERIC_GPIO
-	select ARCH_REQUIRE_GPIOLIB
-	select CPU_HAS_NO_BITFIELDS
-	help
-	  The Freescale ColdFire family of processors is a modern derivitive
-	  of the 68000 processor family. They are mainly targeted at embedded
-	  applications, and are all System-On-Chip (SOC) devices, as opposed
-	  to stand alone CPUs. They implement a subset of the original 68000
-	  processor instruction set.
-
 config M68020
 	bool "68020 support"
 	depends on MMU
+	select GENERIC_ATOMIC64
+	select CPU_HAS_ADDRESS_SPACES
 	help
 	  If you anticipate running this kernel on a computer with a MC68020
 	  processor, say Y. Otherwise, say N. Note that the 68020 requires a
@@ -42,6 +66,8 @@
 config M68030
 	bool "68030 support"
 	depends on MMU && !MMU_SUN3
+	select GENERIC_ATOMIC64
+	select CPU_HAS_ADDRESS_SPACES
 	help
 	  If you anticipate running this kernel on a computer with a MC68030
 	  processor, say Y. Otherwise, say N. Note that a MC68EC030 will not
@@ -50,6 +76,8 @@
 config M68040
 	bool "68040 support"
 	depends on MMU && !MMU_SUN3
+	select GENERIC_ATOMIC64
+	select CPU_HAS_ADDRESS_SPACES
 	help
 	  If you anticipate running this kernel on a computer with a MC68LC040
 	  or MC68040 processor, say Y. Otherwise, say N. Note that an
@@ -59,6 +87,8 @@
 config M68060
 	bool "68060 support"
 	depends on MMU && !MMU_SUN3
+	select GENERIC_ATOMIC64
+	select CPU_HAS_ADDRESS_SPACES
 	help
 	  If you anticipate running this kernel on a computer with a MC68060
 	  processor, say Y. Otherwise, say N.
@@ -91,10 +121,13 @@
 	help
 	  Motorola 68360 processor support.
 
+endif # M68KCLASSIC
+
+if COLDFIRE
+
 config M5206
 	bool "MCF5206"
 	depends on !MMU
-	select COLDFIRE
 	select COLDFIRE_SW_A7
 	select HAVE_MBAR
 	help
@@ -103,7 +136,6 @@
 config M5206e
 	bool "MCF5206e"
 	depends on !MMU
-	select COLDFIRE
 	select COLDFIRE_SW_A7
 	select HAVE_MBAR
 	help
@@ -112,7 +144,6 @@
 config M520x
 	bool "MCF520x"
 	depends on !MMU
-	select COLDFIRE
 	select GENERIC_CLOCKEVENTS
 	select HAVE_CACHE_SPLIT
 	help
@@ -121,7 +152,6 @@
 config M523x
 	bool "MCF523x"
 	depends on !MMU
-	select COLDFIRE
 	select GENERIC_CLOCKEVENTS
 	select HAVE_CACHE_SPLIT
 	select HAVE_IPSBAR
@@ -131,7 +161,6 @@
 config M5249
 	bool "MCF5249"
 	depends on !MMU
-	select COLDFIRE
 	select COLDFIRE_SW_A7
 	select HAVE_MBAR
 	help
@@ -143,7 +172,6 @@
 config M5271
 	bool "MCF5271"
 	depends on !MMU
-	select COLDFIRE
 	select M527x
 	select HAVE_CACHE_SPLIT
 	select HAVE_IPSBAR
@@ -154,7 +182,6 @@
 config M5272
 	bool "MCF5272"
 	depends on !MMU
-	select COLDFIRE
 	select COLDFIRE_SW_A7
 	select HAVE_MBAR
 	help
@@ -163,7 +190,6 @@
 config M5275
 	bool "MCF5275"
 	depends on !MMU
-	select COLDFIRE
 	select M527x
 	select HAVE_CACHE_SPLIT
 	select HAVE_IPSBAR
@@ -174,7 +200,6 @@
 config M528x
 	bool "MCF528x"
 	depends on !MMU
-	select COLDFIRE
 	select GENERIC_CLOCKEVENTS
 	select HAVE_CACHE_SPLIT
 	select HAVE_IPSBAR
@@ -184,7 +209,6 @@
 config M5307
 	bool "MCF5307"
 	depends on !MMU
-	select COLDFIRE
 	select COLDFIRE_SW_A7
 	select HAVE_CACHE_CB
 	select HAVE_MBAR
@@ -194,7 +218,6 @@
 config M532x
 	bool "MCF532x"
 	depends on !MMU
-	select COLDFIRE
 	select HAVE_CACHE_CB
 	help
 	  Freescale (Motorola) ColdFire 532x processor support.
@@ -202,7 +225,6 @@
 config M5407
 	bool "MCF5407"
 	depends on !MMU
-	select COLDFIRE
 	select COLDFIRE_SW_A7
 	select HAVE_CACHE_CB
 	select HAVE_MBAR
@@ -214,9 +236,8 @@
 
 config M547x
 	bool "MCF547x"
-	depends on !MMU
-	select COLDFIRE
 	select M54xx
+	select MMU_COLDFIRE if MMU
 	select HAVE_CACHE_CB
 	select HAVE_MBAR
 	help
@@ -224,14 +245,15 @@
 
 config M548x
 	bool "MCF548x"
-	depends on !MMU
-	select COLDFIRE
+	select MMU_COLDFIRE if MMU
 	select M54xx
 	select HAVE_CACHE_CB
 	select HAVE_MBAR
 	help
 	  Freescale ColdFire 5480/5481/5482/5483/5484/5485 processor support.
 
+endif # COLDFIRE
+
 
 comment "Processor Specific Options"
 
diff --git a/arch/m68k/Kconfig.debug b/arch/m68k/Kconfig.debug
index 2bdb1b0..87233ac 100644
--- a/arch/m68k/Kconfig.debug
+++ b/arch/m68k/Kconfig.debug
@@ -2,6 +2,25 @@
 
 source "lib/Kconfig.debug"
 
+config BOOTPARAM
+	bool 'Compiled-in Kernel Boot Parameter'
+
+config BOOTPARAM_STRING
+	string 'Kernel Boot Parameter'
+	default 'console=ttyS0,19200'
+	depends on BOOTPARAM
+
+config EARLY_PRINTK
+	bool "Early printk" if EMBEDDED
+	depends on MVME16x || MAC
+	default y
+	help
+          Write kernel log output directly to a serial port.
+
+          This is useful for kernel debugging when your machine crashes very
+          early before the console code is initialized.
+          You should normally say N here, unless you want to debug such a crash.
+
 if !MMU
 
 config FULLDEBUG
@@ -15,14 +34,6 @@
 	help
 	  Use a fast secondary clock to produce profiling information.
 
-config BOOTPARAM
-	bool 'Compiled-in Kernel Boot Parameter'
-
-config BOOTPARAM_STRING
-	string 'Kernel Boot Parameter'
-	default 'console=ttyS0,19200'
-	depends on BOOTPARAM
-
 config NO_KERNEL_MSG
 	bool "Suppress Kernel BUG Messages"
 	help
diff --git a/arch/m68k/Kconfig.devices b/arch/m68k/Kconfig.devices
index 6033f5d..04a3d9b 100644
--- a/arch/m68k/Kconfig.devices
+++ b/arch/m68k/Kconfig.devices
@@ -8,8 +8,8 @@
 menu "Platform devices"
 
 config HEARTBEAT
-	bool "Use power LED as a heartbeat" if AMIGA || APOLLO || ATARI || MAC ||Q40
-	default y if !AMIGA && !APOLLO && !ATARI && !MAC && !Q40 && HP300
+	bool "Use power LED as a heartbeat" if AMIGA || APOLLO || ATARI || Q40
+	default y if !AMIGA && !APOLLO && !ATARI && !Q40 && HP300
 	help
 	  Use the power-on LED on your machine as a load meter.  The exact
 	  behavior is platform-dependent, but normally the flash frequency is
@@ -59,27 +59,6 @@
 
 menu "Character devices"
 
-config ATARI_MFPSER
-	tristate "Atari MFP serial support"
-	depends on ATARI
-	---help---
-	  If you like to use the MFP serial ports ("Modem1", "Serial1") under
-	  Linux, say Y. The driver equally supports all kinds of MFP serial
-	  ports and automatically detects whether Serial1 is available.
-
-	  To compile this driver as a module, choose M here.
-
-	  Note for Falcon users: You also have an MFP port, it's just not
-	  wired to the outside... But you could use the port under Linux.
-
-config ATARI_MIDI
-	tristate "Atari MIDI serial support"
-	depends on ATARI
-	help
-	  If you want to use your Atari's MIDI port in Linux, say Y.
-
-	  To compile this driver as a module, choose M here.
-
 config ATARI_DSP56K
 	tristate "Atari DSP56k support (EXPERIMENTAL)"
 	depends on ATARI && EXPERIMENTAL
@@ -99,15 +78,6 @@
 
 	  To compile this driver as a module, choose M here.
 
-config MULTIFACE_III_TTY
-	tristate "Multiface Card III serial support"
-	depends on AMIGA
-	help
-	  If you want to use a Multiface III card's serial port in Linux,
-	  answer Y.
-
-	  To compile this driver as a module, choose M here.
-
 config HPDCA
 	tristate "HP DCA serial support"
 	depends on DIO && SERIAL_8250
@@ -122,13 +92,9 @@
 	  If you want to use the internal "APCI" serial ports on an HP400
 	  machine, say Y here.
 
-config DN_SERIAL
-	bool "Support for DN serial port (dummy)"
-	depends on APOLLO
-
 config SERIAL_CONSOLE
 	bool "Support for serial port console"
-	depends on (AMIGA || ATARI || SUN3 || SUN3X || VME || APOLLO) && (ATARI_MFPSER=y || ATARI_MIDI=y || AMIGA_BUILTIN_SERIAL=y || MULTIFACE_III_TTY=y || SERIAL=y || SERIAL167 || DN_SERIAL)
+	depends on AMIGA_BUILTIN_SERIAL=y
 	---help---
 	  If you say Y here, it will be possible to use a serial port as the
 	  system console (the system console is the device which receives all
@@ -140,10 +106,10 @@
 	  (/dev/tty0) will still be used as the system console by default, but
 	  you can alter that using a kernel command line option such as
 	  "console=ttyS1". (Try "man bootparam" or see the documentation of
-	  your boot loader (lilo or loadlin) about how to pass options to the
-	  kernel at boot time.)
+	  your boot loader about how to pass options to the kernel at boot
+	  time.)
 
-	  If you don't have a VGA card installed and you say Y here, the
+	  If you don't have a graphical console and you say Y here, the
 	  kernel will automatically use the first serial line, /dev/ttyS0, as
 	  system console.
 
diff --git a/arch/m68k/Kconfig.machine b/arch/m68k/Kconfig.machine
index ef4a26a..7cdf6b0 100644
--- a/arch/m68k/Kconfig.machine
+++ b/arch/m68k/Kconfig.machine
@@ -1,5 +1,7 @@
 comment "Machine Types"
 
+if M68KCLASSIC
+
 config AMIGA
 	bool "Amiga support"
 	depends on MMU
@@ -130,6 +132,8 @@
 
 	  If you don't want to compile a kernel exclusively for a Sun 3, say N.
 
+endif # M68KCLASSIC
+
 config PILOT
 	bool
 
diff --git a/arch/m68k/atari/ataints.c b/arch/m68k/atari/ataints.c
index 6d196da..8048e1b 100644
--- a/arch/m68k/atari/ataints.c
+++ b/arch/m68k/atari/ataints.c
@@ -82,8 +82,6 @@
 
 extern void atari_microwire_cmd(int cmd);
 
-extern int atari_SCC_reset_done;
-
 static unsigned int atari_irq_startup(struct irq_data *data)
 {
 	unsigned int irq = data->irq;
diff --git a/arch/m68k/atari/debug.c b/arch/m68k/atari/debug.c
index 5a48424..a547ba9 100644
--- a/arch/m68k/atari/debug.c
+++ b/arch/m68k/atari/debug.c
@@ -202,7 +202,6 @@
 
 static void __init atari_init_scc_port(int cflag)
 {
-	extern int atari_SCC_reset_done;
 	static int clksrc_table[9] =
 		/* reg 11: 0x50 = BRG, 0x00 = RTxC, 0x28 = TRxC */
 		{ 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x00, 0x00 };
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index dbb49fc..e93fdae 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -255,7 +255,6 @@
 CONFIG_HIDRAW=y
 # CONFIG_USB_SUPPORT is not set
 CONFIG_AMIGA_BUILTIN_SERIAL=y
-CONFIG_MULTIFACE_III_TTY=m
 CONFIG_SERIAL_CONSOLE=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index 562b221..66b26c1 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -223,8 +223,6 @@
 CONFIG_HID=m
 CONFIG_HIDRAW=y
 # CONFIG_USB_SUPPORT is not set
-CONFIG_DN_SERIAL=y
-CONFIG_SERIAL_CONSOLE=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
 # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index 82978df..1513325 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -235,10 +235,7 @@
 CONFIG_HID=m
 CONFIG_HIDRAW=y
 # CONFIG_USB_SUPPORT is not set
-CONFIG_ATARI_MFPSER=y
-CONFIG_ATARI_MIDI=y
 CONFIG_ATARI_DSP56K=m
-CONFIG_SERIAL_CONSOLE=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
 # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index ad9e857..55d394e 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -318,13 +318,8 @@
 CONFIG_HID=m
 CONFIG_HIDRAW=y
 # CONFIG_USB_SUPPORT is not set
-CONFIG_ATARI_MFPSER=y
-CONFIG_ATARI_MIDI=y
 CONFIG_ATARI_DSP56K=m
 CONFIG_AMIGA_BUILTIN_SERIAL=y
-CONFIG_MULTIFACE_III_TTY=m
-CONFIG_SERIAL167=y
-CONFIG_DN_SERIAL=y
 CONFIG_SERIAL_CONSOLE=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index c45aaf3..cdb70d6 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -218,8 +218,6 @@
 CONFIG_HID=m
 CONFIG_HIDRAW=y
 # CONFIG_USB_SUPPORT is not set
-CONFIG_SERIAL167=y
-CONFIG_SERIAL_CONSOLE=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
 # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
diff --git a/arch/m68k/emu/nfeth.c b/arch/m68k/emu/nfeth.c
index c5748bb..a985a7e 100644
--- a/arch/m68k/emu/nfeth.c
+++ b/arch/m68k/emu/nfeth.c
@@ -39,7 +39,7 @@
 #define MAX_UNIT	8
 
 /* These identify the driver base version and may not be removed. */
-static const char version[] __devinitdata =
+static const char version[] __devinitconst =
 	KERN_INFO KBUILD_MODNAME ".c:v" DRV_VERSION " " DRV_RELDATE
 	" S.Opichal, M.Jurik, P.Stehlik\n"
 	KERN_INFO " http://aranym.org/\n";
diff --git a/arch/m68k/hp300/config.c b/arch/m68k/hp300/config.c
index 1c05a62..bf16af1 100644
--- a/arch/m68k/hp300/config.c
+++ b/arch/m68k/hp300/config.c
@@ -24,7 +24,8 @@
 
 unsigned long hp300_model;
 unsigned long hp300_uart_scode = -1;
-unsigned char ledstate;
+unsigned char hp300_ledstate;
+EXPORT_SYMBOL(hp300_ledstate);
 
 static char s_hp330[] __initdata = "330";
 static char s_hp340[] __initdata = "340";
diff --git a/arch/m68k/include/asm/anchor.h b/arch/m68k/include/asm/anchor.h
deleted file mode 100644
index 871c0d5..0000000
--- a/arch/m68k/include/asm/anchor.h
+++ /dev/null
@@ -1,112 +0,0 @@
-/****************************************************************************/
-
-/*
- *	anchor.h -- Anchor CO-MEM Lite PCI host bridge part.
- *
- *	(C) Copyright 2000, Moreton Bay (www.moreton.com.au)
- */
-
-/****************************************************************************/
-#ifndef	anchor_h
-#define	anchor_h
-/****************************************************************************/
-
-/*
- *	Define basic addressing info.
- */
-#if defined(CONFIG_M5407C3)
-#define	COMEM_BASE	0xFFFF0000	/* Base of CO-MEM address space */
-#define	COMEM_IRQ	25		/* IRQ of anchor part */
-#else
-#define	COMEM_BASE	0x80000000	/* Base of CO-MEM address space */
-#define	COMEM_IRQ	25		/* IRQ of anchor part */
-#endif
-
-/****************************************************************************/
-
-/*
- *	4-byte registers of CO-MEM, so adjust register addresses for
- *	easy access. Handy macro for word access too.
- */
-#define	LREG(a)		((a) >> 2)
-#define	WREG(a)		((a) >> 1)
-
-
-/*
- *	Define base addresses within CO-MEM Lite register address space.
- */
-#define	COMEM_I2O	0x0000		/* I2O registers */
-#define	COMEM_OPREGS	0x0400		/* Operation registers */
-#define	COMEM_PCIBUS	0x2000		/* Direct access to PCI bus */
-#define	COMEM_SHMEM	0x4000		/* Shared memory region */
-
-#define	COMEM_SHMEMSIZE	0x4000		/* Size of shared memory */
-
-
-/*
- *	Define CO-MEM Registers.
- */
-#define	COMEM_I2OHISR	0x0030		/* I2O host interrupt status */
-#define	COMEM_I2OHIMR	0x0034		/* I2O host interrupt mask */
-#define	COMEM_I2OLISR	0x0038		/* I2O local interrupt status */
-#define	COMEM_I2OLIMR	0x003c		/* I2O local interrupt mask */
-#define	COMEM_IBFPFIFO	0x0040		/* I2O inbound free/post FIFO */
-#define	COMEM_OBPFFIFO	0x0044		/* I2O outbound post/free FIFO */
-#define	COMEM_IBPFFIFO	0x0048		/* I2O inbound post/free FIFO */
-#define	COMEM_OBFPFIFO	0x004c		/* I2O outbound free/post FIFO */
-
-#define	COMEM_DAHBASE	0x0460		/* Direct access base address */
-
-#define	COMEM_NVCMD	0x04a0		/* I2C serial command */
-#define	COMEM_NVREAD	0x04a4		/* I2C serial read */
-#define	COMEM_NVSTAT	0x04a8		/* I2C status */
-
-#define	COMEM_DMALBASE	0x04b0		/* DMA local base address */
-#define	COMEM_DMAHBASE	0x04b4		/* DMA host base address */
-#define	COMEM_DMASIZE	0x04b8		/* DMA size */
-#define	COMEM_DMACTL	0x04bc		/* DMA control */
-
-#define	COMEM_HCTL	0x04e0		/* Host control */
-#define	COMEM_HINT	0x04e4		/* Host interrupt control/status */
-#define	COMEM_HLDATA	0x04e8		/* Host to local data mailbox */
-#define	COMEM_LINT	0x04f4		/* Local interrupt contole status */
-#define	COMEM_LHDATA	0x04f8		/* Local to host data mailbox */
-
-#define	COMEM_LBUSCFG	0x04fc		/* Local bus configuration */
-
-
-/*
- *	Commands and flags for use with Direct Access Register.
- */
-#define	COMEM_DA_IACK	0x00000000	/* Interrupt acknowledge (read) */
-#define	COMEM_DA_SPCL	0x00000010	/* Special cycle (write) */
-#define	COMEM_DA_MEMRD	0x00000004	/* Memory read cycle */
-#define	COMEM_DA_MEMWR	0x00000004	/* Memory write cycle */
-#define	COMEM_DA_IORD	0x00000002	/* I/O read cycle */
-#define	COMEM_DA_IOWR	0x00000002	/* I/O write cycle */
-#define	COMEM_DA_CFGRD	0x00000006	/* Configuration read cycle */
-#define	COMEM_DA_CFGWR	0x00000006	/* Configuration write cycle */
-
-#define	COMEM_DA_ADDR(a)	((a) & 0xffffe000)
-
-#define	COMEM_DA_OFFSET(a)	((a) & 0x00001fff)
-
-
-/*
- *	The PCI bus will be limited in what slots will actually be used.
- *	Define valid device numbers for different boards.
- */
-#if defined(CONFIG_M5407C3)
-#define	COMEM_MINDEV	14		/* Minimum valid DEVICE */
-#define	COMEM_MAXDEV	14		/* Maximum valid DEVICE */
-#define	COMEM_BRIDGEDEV	15		/* Slot bridge is in */
-#else
-#define	COMEM_MINDEV	0		/* Minimum valid DEVICE */
-#define	COMEM_MAXDEV	3		/* Maximum valid DEVICE */
-#endif
-
-#define	COMEM_MAXPCI	(COMEM_MAXDEV+1)	/* Maximum PCI devices */
-
-
-/****************************************************************************/
-#endif	/* anchor_h */
diff --git a/arch/m68k/include/asm/atarihw.h b/arch/m68k/include/asm/atarihw.h
index 0392b2865..c0cb363 100644
--- a/arch/m68k/include/asm/atarihw.h
+++ b/arch/m68k/include/asm/atarihw.h
@@ -30,6 +30,8 @@
 extern int atari_rtc_year_offset;
 extern int atari_dont_touch_floppy_select;
 
+extern int atari_SCC_reset_done;
+
 /* convenience macros for testing machine type */
 #define MACH_IS_ST	((atari_mch_cookie >> 16) == ATARI_MCH_ST)
 #define MACH_IS_STE	((atari_mch_cookie >> 16) == ATARI_MCH_STE && \
diff --git a/arch/m68k/include/asm/atomic.h b/arch/m68k/include/asm/atomic.h
index 65c6be6..4eba796 100644
--- a/arch/m68k/include/asm/atomic.h
+++ b/arch/m68k/include/asm/atomic.h
@@ -55,6 +55,16 @@
 	return c != 0;
 }
 
+static inline int atomic_dec_and_test_lt(atomic_t *v)
+{
+	char c;
+	__asm__ __volatile__(
+		"subql #1,%1; slt %0"
+		: "=d" (c), "=m" (*v)
+		: "m" (*v));
+	return c != 0;
+}
+
 static inline int atomic_inc_and_test(atomic_t *v)
 {
 	char c;
diff --git a/arch/m68k/include/asm/blinken.h b/arch/m68k/include/asm/blinken.h
index 1a749cf..0626582 100644
--- a/arch/m68k/include/asm/blinken.h
+++ b/arch/m68k/include/asm/blinken.h
@@ -17,15 +17,15 @@
 
 #define HP300_LEDS		0xf001ffff
 
-extern unsigned char ledstate;
+extern unsigned char hp300_ledstate;
 
 static __inline__ void blinken_leds(int on, int off)
 {
 	if (MACH_IS_HP300)
 	{
-		ledstate |= on;
-		ledstate &= ~off;
-		out_8(HP300_LEDS, ~ledstate);
+		hp300_ledstate |= on;
+		hp300_ledstate &= ~off;
+		out_8(HP300_LEDS, ~hp300_ledstate);
 	}
 }
 
diff --git a/arch/m68k/include/asm/cacheflush_mm.h b/arch/m68k/include/asm/cacheflush_mm.h
index 73de7c8..8104bd8 100644
--- a/arch/m68k/include/asm/cacheflush_mm.h
+++ b/arch/m68k/include/asm/cacheflush_mm.h
@@ -2,23 +2,89 @@
 #define _M68K_CACHEFLUSH_H
 
 #include <linux/mm.h>
+#ifdef CONFIG_COLDFIRE
+#include <asm/mcfsim.h>
+#endif
 
 /* cache code */
 #define FLUSH_I_AND_D	(0x00000808)
 #define FLUSH_I		(0x00000008)
 
+#ifndef ICACHE_MAX_ADDR
+#define ICACHE_MAX_ADDR	0
+#define ICACHE_SET_MASK	0
+#define DCACHE_MAX_ADDR	0
+#define DCACHE_SETMASK	0
+#endif
+
+static inline void flush_cf_icache(unsigned long start, unsigned long end)
+{
+	unsigned long set;
+
+	for (set = start; set <= end; set += (0x10 - 3)) {
+		__asm__ __volatile__ (
+			"cpushl %%ic,(%0)\n\t"
+			"addq%.l #1,%0\n\t"
+			"cpushl %%ic,(%0)\n\t"
+			"addq%.l #1,%0\n\t"
+			"cpushl %%ic,(%0)\n\t"
+			"addq%.l #1,%0\n\t"
+			"cpushl %%ic,(%0)"
+			: "=a" (set)
+			: "a" (set));
+	}
+}
+
+static inline void flush_cf_dcache(unsigned long start, unsigned long end)
+{
+	unsigned long set;
+
+	for (set = start; set <= end; set += (0x10 - 3)) {
+		__asm__ __volatile__ (
+			"cpushl %%dc,(%0)\n\t"
+			"addq%.l #1,%0\n\t"
+			"cpushl %%dc,(%0)\n\t"
+			"addq%.l #1,%0\n\t"
+			"cpushl %%dc,(%0)\n\t"
+			"addq%.l #1,%0\n\t"
+			"cpushl %%dc,(%0)"
+			: "=a" (set)
+			: "a" (set));
+	}
+}
+
+static inline void flush_cf_bcache(unsigned long start, unsigned long end)
+{
+	unsigned long set;
+
+	for (set = start; set <= end; set += (0x10 - 3)) {
+		__asm__ __volatile__ (
+			"cpushl %%bc,(%0)\n\t"
+			"addq%.l #1,%0\n\t"
+			"cpushl %%bc,(%0)\n\t"
+			"addq%.l #1,%0\n\t"
+			"cpushl %%bc,(%0)\n\t"
+			"addq%.l #1,%0\n\t"
+			"cpushl %%bc,(%0)"
+			: "=a" (set)
+			: "a" (set));
+	}
+}
+
 /*
  * Cache handling functions
  */
 
 static inline void flush_icache(void)
 {
-	if (CPU_IS_040_OR_060)
+	if (CPU_IS_COLDFIRE) {
+		flush_cf_icache(0, ICACHE_MAX_ADDR);
+	} else if (CPU_IS_040_OR_060) {
 		asm volatile (	"nop\n"
 			"	.chip	68040\n"
 			"	cpusha	%bc\n"
 			"	.chip	68k");
-	else {
+	} else {
 		unsigned long tmp;
 		asm volatile (	"movec	%%cacr,%0\n"
 			"	or.w	%1,%0\n"
@@ -51,12 +117,14 @@
    process changes.  */
 #define __flush_cache_all()					\
 ({								\
-	if (CPU_IS_040_OR_060)					\
+	if (CPU_IS_COLDFIRE) {					\
+		flush_cf_dcache(0, DCACHE_MAX_ADDR);		\
+	} else if (CPU_IS_040_OR_060) {				\
 		__asm__ __volatile__("nop\n\t"			\
 				     ".chip 68040\n\t"		\
 				     "cpusha %dc\n\t"		\
 				     ".chip 68k");		\
-	else {							\
+	} else {						\
 		unsigned long _tmp;				\
 		__asm__ __volatile__("movec %%cacr,%0\n\t"	\
 				     "orw %1,%0\n\t"		\
@@ -112,7 +180,17 @@
 /* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
 static inline void __flush_page_to_ram(void *vaddr)
 {
-	if (CPU_IS_040_OR_060) {
+	if (CPU_IS_COLDFIRE) {
+		unsigned long addr, start, end;
+		addr = ((unsigned long) vaddr) & ~(PAGE_SIZE - 1);
+		start = addr & ICACHE_SET_MASK;
+		end = (addr + PAGE_SIZE - 1) & ICACHE_SET_MASK;
+		if (start > end) {
+			flush_cf_bcache(0, end);
+			end = ICACHE_MAX_ADDR;
+		}
+		flush_cf_bcache(start, end);
+	} else if (CPU_IS_040_OR_060) {
 		__asm__ __volatile__("nop\n\t"
 				     ".chip 68040\n\t"
 				     "cpushp %%bc,(%0)\n\t"
diff --git a/arch/m68k/include/asm/checksum.h b/arch/m68k/include/asm/checksum.h
index ec51448..2f88d86 100644
--- a/arch/m68k/include/asm/checksum.h
+++ b/arch/m68k/include/asm/checksum.h
@@ -3,6 +3,10 @@
 
 #include <linux/in6.h>
 
+#ifdef CONFIG_GENERIC_CSUM
+#include <asm-generic/checksum.h>
+#else
+
 /*
  * computes the checksum of a memory block at buff, length len,
  * and adds in "sum" (32-bit)
@@ -34,30 +38,6 @@
 					      void *dst, int len,
 					      __wsum sum);
 
-
-#ifdef CONFIG_COLDFIRE
-
-/*
- *	The ColdFire cores don't support all the 68k instructions used
- *	in the optimized checksum code below. So it reverts back to using
- *	more standard C coded checksums. The fast checksum code is
- *	significantly larger than the optimized version, so it is not
- *	inlined here.
- */
-__sum16 ip_fast_csum(const void *iph, unsigned int ihl);
-
-static inline __sum16 csum_fold(__wsum sum)
-{
-	unsigned int tmp = (__force u32)sum;
-
-	tmp = (tmp & 0xffff) + (tmp >> 16);
-	tmp = (tmp & 0xffff) + (tmp >> 16);
-
-	return (__force __sum16)~tmp;
-}
-
-#else
-
 /*
  *	This is a version of ip_fast_csum() optimized for IP headers,
  *	which always checksum on 4 octet boundaries.
@@ -97,8 +77,6 @@
 	return (__force __sum16)~sum;
 }
 
-#endif /* CONFIG_COLDFIRE */
-
 static inline __wsum
 csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
 		  unsigned short proto, __wsum sum)
@@ -167,4 +145,5 @@
 	return csum_fold(sum);
 }
 
+#endif /* CONFIG_GENERIC_CSUM */
 #endif /* _M68K_CHECKSUM_H */
diff --git a/arch/m68k/include/asm/div64.h b/arch/m68k/include/asm/div64.h
index edb6614..444ea8a 100644
--- a/arch/m68k/include/asm/div64.h
+++ b/arch/m68k/include/asm/div64.h
@@ -1,7 +1,9 @@
 #ifndef _M68K_DIV64_H
 #define _M68K_DIV64_H
 
-#ifdef CONFIG_MMU
+#ifdef CONFIG_CPU_HAS_NO_MULDIV64
+#include <asm-generic/div64.h>
+#else
 
 #include <linux/types.h>
 
@@ -27,8 +29,6 @@
 	__rem;							\
 })
 
-#else
-#include <asm-generic/div64.h>
-#endif /* CONFIG_MMU */
+#endif /* CONFIG_CPU_HAS_NO_MULDIV64 */
 
 #endif /* _M68K_DIV64_H */
diff --git a/arch/m68k/include/asm/elf.h b/arch/m68k/include/asm/elf.h
index 01c193d..e9b7cda5 100644
--- a/arch/m68k/include/asm/elf.h
+++ b/arch/m68k/include/asm/elf.h
@@ -59,10 +59,10 @@
    is actually used on ASV.  */
 #define ELF_PLAT_INIT(_r, load_addr)	_r->a1 = 0
 
-#ifndef CONFIG_SUN3
-#define ELF_EXEC_PAGESIZE	4096
-#else
+#if defined(CONFIG_SUN3) || defined(CONFIG_COLDFIRE)
 #define ELF_EXEC_PAGESIZE	8192
+#else
+#define ELF_EXEC_PAGESIZE	4096
 #endif
 
 /* This is the location that an ET_DYN program is loaded if exec'ed.  Typical
diff --git a/arch/m68k/include/asm/entry.h b/arch/m68k/include/asm/entry.h
index c3c5a86..622138d 100644
--- a/arch/m68k/include/asm/entry.h
+++ b/arch/m68k/include/asm/entry.h
@@ -222,16 +222,24 @@
  * Non-MMU systems do not reserve %a2 in this way, and this definition is
  * not used for them.
  */
+#ifdef CONFIG_MMU
+
 #define curptr a2
 
 #define GET_CURRENT(tmp) get_current tmp
 .macro get_current reg=%d0
 	movel	%sp,\reg
-	andw	#-THREAD_SIZE,\reg
+	andl	#-THREAD_SIZE,\reg
 	movel	\reg,%curptr
 	movel	%curptr@,%curptr
 .endm
 
+#else
+
+#define GET_CURRENT(tmp)
+
+#endif /* CONFIG_MMU */
+
 #else /* C source */
 
 #define STR(X) STR1(X)
diff --git a/arch/m68k/include/asm/fpu.h b/arch/m68k/include/asm/fpu.h
index ffb6b8c..526db9d 100644
--- a/arch/m68k/include/asm/fpu.h
+++ b/arch/m68k/include/asm/fpu.h
@@ -12,6 +12,8 @@
 #define FPSTATESIZE (96)
 #elif defined(CONFIG_M68KFPU_EMU)
 #define FPSTATESIZE (28)
+#elif defined(CONFIG_COLDFIRE) && defined(CONFIG_MMU)
+#define FPSTATESIZE (16)
 #elif defined(CONFIG_M68060)
 #define FPSTATESIZE (12)
 #else
diff --git a/arch/m68k/include/asm/gpio.h b/arch/m68k/include/asm/gpio.h
index b204683..00d0071 100644
--- a/arch/m68k/include/asm/gpio.h
+++ b/arch/m68k/include/asm/gpio.h
@@ -225,7 +225,8 @@
 
 static inline int gpio_to_irq(unsigned gpio)
 {
-	return (gpio < MCFGPIO_IRQ_MAX) ? gpio + MCFGPIO_IRQ_VECBASE : -EINVAL;
+	return (gpio < MCFGPIO_IRQ_MAX) ? gpio + MCFGPIO_IRQ_VECBASE
+		: __gpio_to_irq(gpio);
 }
 
 static inline int irq_to_gpio(unsigned irq)
diff --git a/arch/m68k/include/asm/ipcbuf.h b/arch/m68k/include/asm/ipcbuf.h
index a623ea3..84c7e51 100644
--- a/arch/m68k/include/asm/ipcbuf.h
+++ b/arch/m68k/include/asm/ipcbuf.h
@@ -1,29 +1 @@
-#ifndef __m68k_IPCBUF_H__
-#define __m68k_IPCBUF_H__
-
-/*
- * The user_ipc_perm structure for m68k architecture.
- * Note extra padding because this structure is passed back and forth
- * between kernel and user space.
- *
- * Pad space is left for:
- * - 32-bit mode_t and seq
- * - 2 miscellaneous 32-bit values
- */
-
-struct ipc64_perm
-{
-	__kernel_key_t		key;
-	__kernel_uid32_t	uid;
-	__kernel_gid32_t	gid;
-	__kernel_uid32_t	cuid;
-	__kernel_gid32_t	cgid;
-	__kernel_mode_t		mode;
-	unsigned short		__pad1;
-	unsigned short		seq;
-	unsigned short		__pad2;
-	unsigned long		__unused1;
-	unsigned long		__unused2;
-};
-
-#endif /* __m68k_IPCBUF_H__ */
+#include <asm-generic/ipcbuf.h>
diff --git a/arch/m68k/include/asm/irq.h b/arch/m68k/include/asm/irq.h
index 6198df5..0e89fa0 100644
--- a/arch/m68k/include/asm/irq.h
+++ b/arch/m68k/include/asm/irq.h
@@ -25,7 +25,8 @@
 #define NR_IRQS	0
 #endif
 
-#ifdef CONFIG_MMU
+#if defined(CONFIG_M68020) || defined(CONFIG_M68030) || \
+    defined(CONFIG_M68040) || defined(CONFIG_M68060)
 
 /*
  * Interrupt source definitions
@@ -80,7 +81,7 @@
 
 #else
 #define irq_canonicalize(irq)  (irq)
-#endif /* CONFIG_MMU */
+#endif /* !(CONFIG_M68020 || CONFIG_M68030 || CONFIG_M68040 || CONFIG_M68060) */
 
 asmlinkage void do_IRQ(int irq, struct pt_regs *regs);
 extern atomic_t irq_err_count;
diff --git a/arch/m68k/include/asm/m54xxacr.h b/arch/m68k/include/asm/m54xxacr.h
index 16a1835..47906aa 100644
--- a/arch/m68k/include/asm/m54xxacr.h
+++ b/arch/m68k/include/asm/m54xxacr.h
@@ -39,8 +39,12 @@
 #define ACR_CM_OFF_PRE	0x00000040	/* No cache, precise */
 #define ACR_CM_OFF_IMP	0x00000060	/* No cache, imprecise */
 #define ACR_CM		0x00000060	/* Cache mode mask */
+#define ACR_SP		0x00000008	/* Supervisor protect */
 #define ACR_WPROTECT	0x00000004	/* Write protect */
 
+#define ACR_BA(x)	((x) & 0xff000000)
+#define ACR_ADMSK(x)	((((x) - 1) & 0xff000000) >> 8)
+
 #if defined(CONFIG_M5407)
 
 #define ICACHE_SIZE 0x4000	/* instruction - 16k */
@@ -56,6 +60,11 @@
 #define CACHE_LINE_SIZE 0x0010	/* 16 bytes */
 #define CACHE_WAYS 4		/* 4 ways */
 
+#define ICACHE_SET_MASK	((ICACHE_SIZE / 64 - 1) << CACHE_WAYS)
+#define DCACHE_SET_MASK	((DCACHE_SIZE / 64 - 1) << CACHE_WAYS)
+#define ICACHE_MAX_ADDR	ICACHE_SET_MASK
+#define DCACHE_MAX_ADDR	DCACHE_SET_MASK
+
 /*
  *	Version 4 cores have a true harvard style separate instruction
  *	and data cache. Enable data and instruction caches, also enable write
@@ -73,6 +82,27 @@
 #else
 #define CACHE_MODE (CACR_DEC+CACR_DESB+CACR_DDCM_P+CACR_BEC+CACR_IEC+CACR_EUSP)
 #endif
+#define CACHE_INIT (CACR_DCINVA+CACR_BCINVA+CACR_ICINVA)
+
+#if defined(CONFIG_MMU)
+/*
+ *	If running with the MMU enabled then we need to map the internal
+ *	register region as non-cacheable. And then we map all our RAM as
+ *	cacheable and supervisor access only.
+ */
+#define ACR0_MODE	(ACR_BA(CONFIG_MBAR)+ACR_ADMSK(0x1000000)+ \
+			 ACR_ENABLE+ACR_SUPER+ACR_CM_OFF_PRE+ACR_SP)
+#define ACR1_MODE	(ACR_BA(CONFIG_RAMBASE)+ACR_ADMSK(CONFIG_RAMSIZE)+ \
+			 ACR_ENABLE+ACR_SUPER+ACR_SP)
+#define ACR2_MODE	0
+#define ACR3_MODE	(ACR_BA(CONFIG_RAMBASE)+ACR_ADMSK(CONFIG_RAMSIZE)+ \
+			 ACR_ENABLE+ACR_SUPER+ACR_SP)
+
+#else
+
+/*
+ *	For the non-MMU enabled case we map all of RAM as cacheable.
+ */
 #if defined(CONFIG_CACHE_COPYBACK)
 #define DATA_CACHE_MODE (ACR_ENABLE+ACR_ANY+ACR_CM_CP)
 #else
@@ -80,7 +110,6 @@
 #endif
 #define INSN_CACHE_MODE (ACR_ENABLE+ACR_ANY)
 
-#define CACHE_INIT	(CACR_DCINVA+CACR_BCINVA+CACR_ICINVA)
 #define CACHE_INVALIDATE  (CACHE_MODE+CACR_DCINVA+CACR_BCINVA+CACR_ICINVA)
 #define CACHE_INVALIDATEI (CACHE_MODE+CACR_BCINVA+CACR_ICINVA)
 #define CACHE_INVALIDATED (CACHE_MODE+CACR_DCINVA)
@@ -94,4 +123,5 @@
 #define	CACHE_PUSH
 #endif
 
+#endif /* CONFIG_MMU */
 #endif	/* m54xxacr_h */
diff --git a/arch/m68k/include/asm/mac_baboon.h b/arch/m68k/include/asm/mac_baboon.h
index c2a042b..a2d32f6 100644
--- a/arch/m68k/include/asm/mac_baboon.h
+++ b/arch/m68k/include/asm/mac_baboon.h
@@ -29,4 +29,10 @@
 				 */
 };
 
+extern int baboon_present;
+
+extern void baboon_register_interrupts(void);
+extern void baboon_irq_enable(int);
+extern void baboon_irq_disable(int);
+
 #endif /* __ASSEMBLY **/
diff --git a/arch/m68k/include/asm/mac_iop.h b/arch/m68k/include/asm/mac_iop.h
index a2c7e6f..fde874a 100644
--- a/arch/m68k/include/asm/mac_iop.h
+++ b/arch/m68k/include/asm/mac_iop.h
@@ -159,4 +159,6 @@
 extern void iop_download_code(uint, __u8 *, uint, __u16);
 extern __u8 *iop_compare_code(uint, __u8 *, uint, __u16);
 
+extern void iop_register_interrupts(void);
+
 #endif /* __ASSEMBLY__ */
diff --git a/arch/m68k/include/asm/mac_oss.h b/arch/m68k/include/asm/mac_oss.h
index 3cf2b6e..425fbff 100644
--- a/arch/m68k/include/asm/mac_oss.h
+++ b/arch/m68k/include/asm/mac_oss.h
@@ -58,25 +58,6 @@
 
 #define OSS_POWEROFF	0x80
 
-/*
- * OSS Interrupt levels for various sub-systems
- *
- * This mapping is laid out with two things in mind: first, we try to keep
- * things on their own levels to avoid having to do double-dispatches. Second,
- * the levels match as closely as possible the alternate IRQ mapping mode (aka
- * "A/UX mode") available on some VIA machines.
- */
-
-#define OSS_IRQLEV_DISABLED	0
-#define OSS_IRQLEV_IOPISM	1	/* ADB? */
-#define OSS_IRQLEV_SCSI		IRQ_AUTO_2
-#define OSS_IRQLEV_NUBUS	IRQ_AUTO_3	/* keep this on its own level */
-#define OSS_IRQLEV_IOPSCC	IRQ_AUTO_4	/* matches VIA alternate mapping */
-#define OSS_IRQLEV_SOUND	IRQ_AUTO_5	/* matches VIA alternate mapping */
-#define OSS_IRQLEV_60HZ		6	/* matches VIA alternate mapping */
-#define OSS_IRQLEV_VIA1		IRQ_AUTO_6	/* matches VIA alternate mapping */
-#define OSS_IRQLEV_PARITY	7	/* matches VIA alternate mapping */
-
 #ifndef __ASSEMBLY__
 
 struct mac_oss {
@@ -91,4 +72,8 @@
 extern volatile struct mac_oss *oss;
 extern int oss_present;
 
+extern void oss_register_interrupts(void);
+extern void oss_irq_enable(int);
+extern void oss_irq_disable(int);
+
 #endif /* __ASSEMBLY__ */
diff --git a/arch/m68k/include/asm/mac_psc.h b/arch/m68k/include/asm/mac_psc.h
index 7808bb0..e5c0d71 100644
--- a/arch/m68k/include/asm/mac_psc.h
+++ b/arch/m68k/include/asm/mac_psc.h
@@ -211,6 +211,10 @@
 extern volatile __u8 *psc;
 extern int psc_present;
 
+extern void psc_register_interrupts(void);
+extern void psc_irq_enable(int);
+extern void psc_irq_disable(int);
+
 /*
  *	Access functions
  */
diff --git a/arch/m68k/include/asm/mac_via.h b/arch/m68k/include/asm/mac_via.h
index a59665e..aeeedf8 100644
--- a/arch/m68k/include/asm/mac_via.h
+++ b/arch/m68k/include/asm/mac_via.h
@@ -254,6 +254,15 @@
 extern volatile __u8 *via1,*via2;
 extern int rbv_present,via_alt_mapping;
 
+extern void via_register_interrupts(void);
+extern void via_irq_enable(int);
+extern void via_irq_disable(int);
+extern void via_nubus_irq_startup(int irq);
+extern void via_nubus_irq_shutdown(int irq);
+extern void via1_irq(unsigned int irq, struct irq_desc *desc);
+extern void via1_set_head(int);
+extern int via2_scsi_drq_pending(void);
+
 static inline int rbv_set_video_bpp(int bpp)
 {
 	char val = (bpp==1)?0:(bpp==2)?1:(bpp==4)?2:(bpp==8)?3:-1;
diff --git a/arch/m68k/include/asm/macintosh.h b/arch/m68k/include/asm/macintosh.h
index 12ebe43..682a1a2 100644
--- a/arch/m68k/include/asm/macintosh.h
+++ b/arch/m68k/include/asm/macintosh.h
@@ -11,17 +11,11 @@
 extern void mac_reset(void);
 extern void mac_poweroff(void);
 extern void mac_init_IRQ(void);
-extern int mac_irq_pending(unsigned int);
+
 extern void mac_irq_enable(struct irq_data *data);
 extern void mac_irq_disable(struct irq_data *data);
 
 /*
- *	Floppy driver magic hook - probably shouldn't be here
- */
-
-extern void via1_set_head(int);
-
-/*
  *	Macintosh Table
  */
 
@@ -48,7 +42,7 @@
 #define MAC_ADB_IOP		6
 
 #define MAC_VIA_II		1
-#define MAC_VIA_IIci		2
+#define MAC_VIA_IICI		2
 #define MAC_VIA_QUADRA		3
 
 #define MAC_SCSI_NONE		0
diff --git a/arch/m68k/include/asm/macints.h b/arch/m68k/include/asm/macints.h
index ebe1b70..92aa8a4 100644
--- a/arch/m68k/include/asm/macints.h
+++ b/arch/m68k/include/asm/macints.h
@@ -104,6 +104,9 @@
 #define IRQ_PSC4_3	  (35)
 #define IRQ_MAC_MACE_DMA  IRQ_PSC4_3
 
+/* OSS Level 4 interrupts */
+#define IRQ_MAC_SCC	  (33)
+
 /* Level 5 (PSC, AV Macs only) interrupts */
 #define IRQ_PSC5_0	  (40)
 #define IRQ_PSC5_1	  (41)
@@ -131,9 +134,6 @@
 #define IRQ_BABOON_2	  (66)
 #define IRQ_BABOON_3	  (67)
 
-/* On non-PSC machines, the serial ports share an IRQ */
-#define IRQ_MAC_SCC	  IRQ_AUTO_4
-
 #define SLOT2IRQ(x)	  (x + 47)
 #define IRQ2SLOT(x)	  (x - 47)
 
diff --git a/arch/m68k/include/asm/mcf_pgalloc.h b/arch/m68k/include/asm/mcf_pgalloc.h
new file mode 100644
index 0000000..313f3dd
--- /dev/null
+++ b/arch/m68k/include/asm/mcf_pgalloc.h
@@ -0,0 +1,102 @@
+#ifndef M68K_MCF_PGALLOC_H
+#define M68K_MCF_PGALLOC_H
+
+#include <asm/tlb.h>
+#include <asm/tlbflush.h>
+
+extern inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+	free_page((unsigned long) pte);
+}
+
+extern const char bad_pmd_string[];
+
+extern inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
+	unsigned long address)
+{
+	unsigned long page = __get_free_page(GFP_DMA|__GFP_REPEAT);
+
+	if (!page)
+		return NULL;
+
+	memset((void *)page, 0, PAGE_SIZE);
+	return (pte_t *) (page);
+}
+
+extern inline pmd_t *pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
+{
+	return (pmd_t *) pgd;
+}
+
+#define pmd_alloc_one_fast(mm, address) ({ BUG(); ((pmd_t *)1); })
+#define pmd_alloc_one(mm, address)      ({ BUG(); ((pmd_t *)2); })
+
+#define pte_alloc_one_fast(mm, addr) pte_alloc_one(mm, addr)
+
+#define pmd_populate(mm, pmd, page) (pmd_val(*pmd) = \
+	(unsigned long)(page_address(page)))
+
+#define pmd_populate_kernel(mm, pmd, pte) (pmd_val(*pmd) = (unsigned long)(pte))
+
+#define pmd_pgtable(pmd) pmd_page(pmd)
+
+static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page,
+				  unsigned long address)
+{
+	__free_page(page);
+}
+
+#define __pmd_free_tlb(tlb, pmd, address) do { } while (0)
+
+static inline struct page *pte_alloc_one(struct mm_struct *mm,
+	unsigned long address)
+{
+	struct page *page = alloc_pages(GFP_DMA|__GFP_REPEAT, 0);
+	pte_t *pte;
+
+	if (!page)
+		return NULL;
+
+	pte = kmap(page);
+	if (pte) {
+		clear_page(pte);
+		__flush_page_to_ram(pte);
+		flush_tlb_kernel_page(pte);
+		nocache_page(pte);
+	}
+	kunmap(page);
+
+	return page;
+}
+
+extern inline void pte_free(struct mm_struct *mm, struct page *page)
+{
+	__free_page(page);
+}
+
+/*
+ * In our implementation, each pgd entry contains 1 pmd that is never allocated
+ * or freed.  pgd_present is always 1, so this should never be called. -NL
+ */
+#define pmd_free(mm, pmd) BUG()
+
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+	free_page((unsigned long) pgd);
+}
+
+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+	pgd_t *new_pgd;
+
+	new_pgd = (pgd_t *)__get_free_page(GFP_DMA | __GFP_NOWARN);
+	if (!new_pgd)
+		return NULL;
+	memcpy(new_pgd, swapper_pg_dir, PAGE_SIZE);
+	memset(new_pgd, 0, PAGE_OFFSET >> PGDIR_SHIFT);
+	return new_pgd;
+}
+
+#define pgd_populate(mm, pmd, pte) BUG()
+
+#endif /* M68K_MCF_PGALLOC_H */
diff --git a/arch/m68k/include/asm/mcf_pgtable.h b/arch/m68k/include/asm/mcf_pgtable.h
new file mode 100644
index 0000000..756bde4
--- /dev/null
+++ b/arch/m68k/include/asm/mcf_pgtable.h
@@ -0,0 +1,425 @@
+#ifndef _MCF_PGTABLE_H
+#define _MCF_PGTABLE_H
+
+#include <asm/mcfmmu.h>
+#include <asm/page.h>
+
+/*
+ * MMUDR bits, in proper place. We write these directly into the MMUDR
+ * after masking from the pte.
+ */
+#define CF_PAGE_LOCKED		MMUDR_LK	/* 0x00000002 */
+#define CF_PAGE_EXEC		MMUDR_X		/* 0x00000004 */
+#define CF_PAGE_WRITABLE	MMUDR_W		/* 0x00000008 */
+#define CF_PAGE_READABLE	MMUDR_R		/* 0x00000010 */
+#define CF_PAGE_SYSTEM		MMUDR_SP	/* 0x00000020 */
+#define CF_PAGE_COPYBACK	MMUDR_CM_CCB	/* 0x00000040 */
+#define CF_PAGE_NOCACHE		MMUDR_CM_NCP	/* 0x00000080 */
+
+#define CF_CACHEMASK		(~MMUDR_CM_CCB)
+#define CF_PAGE_MMUDR_MASK	0x000000fe
+
+#define _PAGE_NOCACHE030	CF_PAGE_NOCACHE
+
+/*
+ * MMUTR bits, need shifting down.
+ */
+#define CF_PAGE_MMUTR_MASK	0x00000c00
+#define CF_PAGE_MMUTR_SHIFT	10
+
+#define CF_PAGE_VALID		(MMUTR_V << CF_PAGE_MMUTR_SHIFT)
+#define CF_PAGE_SHARED		(MMUTR_SG << CF_PAGE_MMUTR_SHIFT)
+
+/*
+ * Fake bits, not implemented in CF, will get masked out before
+ * hitting hardware.
+ */
+#define CF_PAGE_DIRTY		0x00000001
+#define CF_PAGE_FILE		0x00000200
+#define CF_PAGE_ACCESSED	0x00001000
+
+#define _PAGE_CACHE040		0x020   /* 68040 cache mode, cachable, copyback */
+#define _PAGE_NOCACHE_S		0x040   /* 68040 no-cache mode, serialized */
+#define _PAGE_NOCACHE		0x060   /* 68040 cache mode, non-serialized */
+#define _PAGE_CACHE040W		0x000   /* 68040 cache mode, cachable, write-through */
+#define _DESCTYPE_MASK		0x003
+#define _CACHEMASK040		(~0x060)
+#define _PAGE_GLOBAL040		0x400   /* 68040 global bit, used for kva descs */
+
+/*
+ * Externally used page protection values.
+ */
+#define _PAGE_PRESENT	(CF_PAGE_VALID)
+#define _PAGE_ACCESSED	(CF_PAGE_ACCESSED)
+#define _PAGE_DIRTY	(CF_PAGE_DIRTY)
+#define _PAGE_READWRITE (CF_PAGE_READABLE \
+				| CF_PAGE_WRITABLE \
+				| CF_PAGE_SYSTEM \
+				| CF_PAGE_SHARED)
+
+/*
+ * Compound page protection values.
+ */
+#define PAGE_NONE	__pgprot(CF_PAGE_VALID \
+				 | CF_PAGE_ACCESSED)
+
+#define PAGE_SHARED     __pgprot(CF_PAGE_VALID \
+				 | CF_PAGE_ACCESSED \
+				 | CF_PAGE_SHARED)
+
+#define PAGE_INIT	__pgprot(CF_PAGE_VALID \
+				 | CF_PAGE_READABLE \
+				 | CF_PAGE_WRITABLE \
+				 | CF_PAGE_EXEC \
+				 | CF_PAGE_SYSTEM)
+
+#define PAGE_KERNEL	__pgprot(CF_PAGE_VALID \
+				 | CF_PAGE_ACCESSED \
+				 | CF_PAGE_READABLE \
+				 | CF_PAGE_WRITABLE \
+				 | CF_PAGE_EXEC \
+				 | CF_PAGE_SYSTEM)
+
+#define PAGE_COPY	__pgprot(CF_PAGE_VALID \
+				 | CF_PAGE_ACCESSED \
+				 | CF_PAGE_READABLE \
+				 | CF_PAGE_DIRTY)
+
+/*
+ * Page protections for initialising protection_map. See mm/mmap.c
+ * for use. In general, the bit positions are xwr, and P-items are
+ * private, the S-items are shared.
+ */
+#define __P000		PAGE_NONE
+#define __P001		__pgprot(CF_PAGE_VALID \
+				 | CF_PAGE_ACCESSED \
+				 | CF_PAGE_READABLE)
+#define __P010		__pgprot(CF_PAGE_VALID \
+				 | CF_PAGE_ACCESSED \
+				 | CF_PAGE_WRITABLE)
+#define __P011		__pgprot(CF_PAGE_VALID \
+				 | CF_PAGE_ACCESSED \
+				 | CF_PAGE_READABLE \
+				 | CF_PAGE_WRITABLE)
+#define __P100		__pgprot(CF_PAGE_VALID \
+				 | CF_PAGE_ACCESSED \
+				 | CF_PAGE_EXEC)
+#define __P101		__pgprot(CF_PAGE_VALID \
+				 | CF_PAGE_ACCESSED \
+				 | CF_PAGE_READABLE \
+				 | CF_PAGE_EXEC)
+#define __P110		__pgprot(CF_PAGE_VALID \
+				 | CF_PAGE_ACCESSED \
+				 | CF_PAGE_WRITABLE \
+				 | CF_PAGE_EXEC)
+#define __P111		__pgprot(CF_PAGE_VALID \
+				 | CF_PAGE_ACCESSED \
+				 | CF_PAGE_READABLE \
+				 | CF_PAGE_WRITABLE \
+				 | CF_PAGE_EXEC)
+
+#define __S000		PAGE_NONE
+#define __S001		__pgprot(CF_PAGE_VALID \
+				 | CF_PAGE_ACCESSED \
+				 | CF_PAGE_READABLE)
+#define __S010		PAGE_SHARED
+#define __S011		__pgprot(CF_PAGE_VALID \
+				 | CF_PAGE_ACCESSED \
+				 | CF_PAGE_SHARED \
+				 | CF_PAGE_READABLE)
+#define __S100		__pgprot(CF_PAGE_VALID \
+				 | CF_PAGE_ACCESSED \
+				 | CF_PAGE_EXEC)
+#define __S101		__pgprot(CF_PAGE_VALID \
+				 | CF_PAGE_ACCESSED \
+				 | CF_PAGE_READABLE \
+				 | CF_PAGE_EXEC)
+#define __S110		__pgprot(CF_PAGE_VALID \
+				 | CF_PAGE_ACCESSED \
+				 | CF_PAGE_SHARED \
+				 | CF_PAGE_EXEC)
+#define __S111		__pgprot(CF_PAGE_VALID \
+				 | CF_PAGE_ACCESSED \
+				 | CF_PAGE_SHARED \
+				 | CF_PAGE_READABLE \
+				 | CF_PAGE_EXEC)
+
+#define PTE_MASK	PAGE_MASK
+#define CF_PAGE_CHG_MASK (PTE_MASK | CF_PAGE_ACCESSED | CF_PAGE_DIRTY)
+
+#ifndef __ASSEMBLY__
+
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ */
+#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
+
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+	pte_val(pte) = (pte_val(pte) & CF_PAGE_CHG_MASK) | pgprot_val(newprot);
+	return pte;
+}
+
+#define pmd_set(pmdp, ptep) do {} while (0)
+
+static inline void pgd_set(pgd_t *pgdp, pmd_t *pmdp)
+{
+	pgd_val(*pgdp) = virt_to_phys(pmdp);
+}
+
+#define __pte_page(pte)	((unsigned long) (pte_val(pte) & PAGE_MASK))
+#define __pmd_page(pmd)	((unsigned long) (pmd_val(pmd)))
+
+static inline int pte_none(pte_t pte)
+{
+	return !pte_val(pte);
+}
+
+static inline int pte_present(pte_t pte)
+{
+	return pte_val(pte) & CF_PAGE_VALID;
+}
+
+static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
+	pte_t *ptep)
+{
+	pte_val(*ptep) = 0;
+}
+
+#define pte_pagenr(pte)	((__pte_page(pte) - PAGE_OFFSET) >> PAGE_SHIFT)
+#define pte_page(pte)	virt_to_page(__pte_page(pte))
+
+static inline int pmd_none2(pmd_t *pmd) { return !pmd_val(*pmd); }
+#define pmd_none(pmd) pmd_none2(&(pmd))
+static inline int pmd_bad2(pmd_t *pmd) { return 0; }
+#define pmd_bad(pmd) pmd_bad2(&(pmd))
+#define pmd_present(pmd) (!pmd_none2(&(pmd)))
+static inline void pmd_clear(pmd_t *pmdp) { pmd_val(*pmdp) = 0; }
+
+static inline int pgd_none(pgd_t pgd) { return 0; }
+static inline int pgd_bad(pgd_t pgd) { return 0; }
+static inline int pgd_present(pgd_t pgd) { return 1; }
+static inline void pgd_clear(pgd_t *pgdp) {}
+
+#define pte_ERROR(e) \
+	printk(KERN_ERR "%s:%d: bad pte %08lx.\n",	\
+	__FILE__, __LINE__, pte_val(e))
+#define pmd_ERROR(e) \
+	printk(KERN_ERR "%s:%d: bad pmd %08lx.\n",	\
+	__FILE__, __LINE__, pmd_val(e))
+#define pgd_ERROR(e) \
+	printk(KERN_ERR "%s:%d: bad pgd %08lx.\n",	\
+	__FILE__, __LINE__, pgd_val(e))
+
+/*
+ * The following only work if pte_present() is true.
+ * Undefined behaviour if not...
+ * [we have the full set here even if they don't change from m68k]
+ */
+static inline int pte_read(pte_t pte)
+{
+	return pte_val(pte) & CF_PAGE_READABLE;
+}
+
+static inline int pte_write(pte_t pte)
+{
+	return pte_val(pte) & CF_PAGE_WRITABLE;
+}
+
+static inline int pte_exec(pte_t pte)
+{
+	return pte_val(pte) & CF_PAGE_EXEC;
+}
+
+static inline int pte_dirty(pte_t pte)
+{
+	return pte_val(pte) & CF_PAGE_DIRTY;
+}
+
+static inline int pte_young(pte_t pte)
+{
+	return pte_val(pte) & CF_PAGE_ACCESSED;
+}
+
+static inline int pte_file(pte_t pte)
+{
+	return pte_val(pte) & CF_PAGE_FILE;
+}
+
+static inline int pte_special(pte_t pte)
+{
+	return 0;
+}
+
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+	pte_val(pte) &= ~CF_PAGE_WRITABLE;
+	return pte;
+}
+
+static inline pte_t pte_rdprotect(pte_t pte)
+{
+	pte_val(pte) &= ~CF_PAGE_READABLE;
+	return pte;
+}
+
+static inline pte_t pte_exprotect(pte_t pte)
+{
+	pte_val(pte) &= ~CF_PAGE_EXEC;
+	return pte;
+}
+
+static inline pte_t pte_mkclean(pte_t pte)
+{
+	pte_val(pte) &= ~CF_PAGE_DIRTY;
+	return pte;
+}
+
+static inline pte_t pte_mkold(pte_t pte)
+{
+	pte_val(pte) &= ~CF_PAGE_ACCESSED;
+	return pte;
+}
+
+static inline pte_t pte_mkwrite(pte_t pte)
+{
+	pte_val(pte) |= CF_PAGE_WRITABLE;
+	return pte;
+}
+
+static inline pte_t pte_mkread(pte_t pte)
+{
+	pte_val(pte) |= CF_PAGE_READABLE;
+	return pte;
+}
+
+static inline pte_t pte_mkexec(pte_t pte)
+{
+	pte_val(pte) |= CF_PAGE_EXEC;
+	return pte;
+}
+
+static inline pte_t pte_mkdirty(pte_t pte)
+{
+	pte_val(pte) |= CF_PAGE_DIRTY;
+	return pte;
+}
+
+static inline pte_t pte_mkyoung(pte_t pte)
+{
+	pte_val(pte) |= CF_PAGE_ACCESSED;
+	return pte;
+}
+
+static inline pte_t pte_mknocache(pte_t pte)
+{
+	pte_val(pte) |= 0x80 | (pte_val(pte) & ~0x40);
+	return pte;
+}
+
+static inline pte_t pte_mkcache(pte_t pte)
+{
+	pte_val(pte) &= ~CF_PAGE_NOCACHE;
+	return pte;
+}
+
+static inline pte_t pte_mkspecial(pte_t pte)
+{
+	return pte;
+}
+
+#define swapper_pg_dir kernel_pg_dir
+extern pgd_t kernel_pg_dir[PTRS_PER_PGD];
+
+/*
+ * Find an entry in a pagetable directory.
+ */
+#define pgd_index(address)	((address) >> PGDIR_SHIFT)
+#define pgd_offset(mm, address)	((mm)->pgd + pgd_index(address))
+
+/*
+ * Find an entry in a kernel pagetable directory.
+ */
+#define pgd_offset_k(address)	pgd_offset(&init_mm, address)
+
+/*
+ * Find an entry in the second-level pagetable.
+ */
+static inline pmd_t *pmd_offset(pgd_t *pgd, unsigned long address)
+{
+	return (pmd_t *) pgd;
+}
+
+/*
+ * Find an entry in the third-level pagetable.
+ */
+#define __pte_offset(address)	((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+#define pte_offset_kernel(dir, address) \
+	((pte_t *) __pmd_page(*(dir)) + __pte_offset(address))
+
+/*
+ * Disable caching for page at given kernel virtual address.
+ */
+static inline void nocache_page(void *vaddr)
+{
+	pgd_t *dir;
+	pmd_t *pmdp;
+	pte_t *ptep;
+	unsigned long addr = (unsigned long) vaddr;
+
+	dir = pgd_offset_k(addr);
+	pmdp = pmd_offset(dir, addr);
+	ptep = pte_offset_kernel(pmdp, addr);
+	*ptep = pte_mknocache(*ptep);
+}
+
+/*
+ * Enable caching for page at given kernel virtual address.
+ */
+static inline void cache_page(void *vaddr)
+{
+	pgd_t *dir;
+	pmd_t *pmdp;
+	pte_t *ptep;
+	unsigned long addr = (unsigned long) vaddr;
+
+	dir = pgd_offset_k(addr);
+	pmdp = pmd_offset(dir, addr);
+	ptep = pte_offset_kernel(pmdp, addr);
+	*ptep = pte_mkcache(*ptep);
+}
+
+#define PTE_FILE_MAX_BITS	21
+#define PTE_FILE_SHIFT		11
+
+static inline unsigned long pte_to_pgoff(pte_t pte)
+{
+	return pte_val(pte) >> PTE_FILE_SHIFT;
+}
+
+static inline pte_t pgoff_to_pte(unsigned pgoff)
+{
+	return __pte((pgoff << PTE_FILE_SHIFT) + CF_PAGE_FILE);
+}
+
+/*
+ * Encode and de-code a swap entry (must be !pte_none(e) && !pte_present(e))
+ */
+#define __swp_type(x)		((x).val & 0xFF)
+#define __swp_offset(x)		((x).val >> PTE_FILE_SHIFT)
+#define __swp_entry(typ, off)	((swp_entry_t) { (typ) | \
+					(off << PTE_FILE_SHIFT) })
+#define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(x)	(__pte((x).val))
+
+#define pmd_page(pmd)		(pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
+
+#define pte_offset_map(pmdp, addr) ((pte_t *)__pmd_page(*pmdp) + \
+				       __pte_offset(addr))
+#define pte_unmap(pte)		((void) 0)
+#define pfn_pte(pfn, prot)	__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
+#define pte_pfn(pte)		(pte_val(pte) >> PAGE_SHIFT)
+
+#endif	/* !__ASSEMBLY__ */
+#endif	/* _MCF_PGTABLE_H */
diff --git a/arch/m68k/include/asm/mcfmmu.h b/arch/m68k/include/asm/mcfmmu.h
new file mode 100644
index 0000000..26cc3d5
--- /dev/null
+++ b/arch/m68k/include/asm/mcfmmu.h
@@ -0,0 +1,112 @@
+/*
+ *	mcfmmu.h -- definitions for the ColdFire v4e MMU
+ *
+ *	(C) Copyright 2011,  Greg Ungerer <gerg@uclinux.org>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+#ifndef	MCFMMU_H
+#define	MCFMMU_H
+
+/*
+ *	The MMU support registers are mapped into the address space using
+ *	the processor MMUBASE register. We used a fixed address for mapping,
+ *	there doesn't seem any need to make this configurable yet.
+ */
+#define	MMUBASE		0xfe000000
+
+/*
+ *	The support registers of the MMU. Names are the sames as those
+ *	used in the Freescale v4e documentation.
+ */
+#define	MMUCR		(MMUBASE + 0x00)	/* Control register */
+#define	MMUOR		(MMUBASE + 0x04)	/* Operation register */
+#define	MMUSR		(MMUBASE + 0x08)	/* Status register */
+#define	MMUAR		(MMUBASE + 0x10)	/* TLB Address register */
+#define	MMUTR		(MMUBASE + 0x14)	/* TLB Tag register */
+#define	MMUDR		(MMUBASE + 0x18)	/* TLB Data register */
+
+/*
+ *	MMU Control register bit flags
+ */
+#define	MMUCR_EN	0x00000001		/* Virtual mode enable */
+#define	MMUCR_ASM	0x00000002		/* Address space mode */
+
+/*
+ *	MMU Operation register.
+ */
+#define	MMUOR_UAA	0x00000001		/* Update allocatiom address */
+#define	MMUOR_ACC	0x00000002		/* TLB access */
+#define	MMUOR_RD	0x00000004		/* TLB access read */
+#define	MMUOR_WR	0x00000000		/* TLB access write */
+#define	MMUOR_ADR	0x00000008		/* TLB address select */
+#define	MMUOR_ITLB	0x00000010		/* ITLB operation */
+#define	MMUOR_CAS	0x00000020		/* Clear non-locked ASID TLBs */
+#define	MMUOR_CNL	0x00000040		/* Clear non-locked TLBs */
+#define	MMUOR_CA	0x00000080		/* Clear all TLBs */
+#define	MMUOR_STLB	0x00000100		/* Search TLBs */
+#define	MMUOR_AAN	16			/* TLB allocation address */
+#define	MMUOR_AAMASK	0xffff0000		/* AA mask */
+
+/*
+ *	MMU Status register.
+ */
+#define	MMUSR_HIT	0x00000002		/* Search TLB hit */
+#define	MMUSR_WF	0x00000008		/* Write access fault */
+#define	MMUSR_RF	0x00000010		/* Read access fault */
+#define	MMUSR_SPF	0x00000020		/* Supervisor protect fault */
+
+/*
+ *	MMU Read/Write Tag register.
+ */
+#define	MMUTR_V		0x00000001		/* Valid */
+#define	MMUTR_SG	0x00000002		/* Shared global */
+#define	MMUTR_IDN	2			/* Address Space ID */
+#define	MMUTR_IDMASK	0x000003fc		/* ASID mask */
+#define	MMUTR_VAN	10			/* Virtual Address */
+#define	MMUTR_VAMASK	0xfffffc00		/* VA mask */
+
+/*
+ *	MMU Read/Write Data register.
+ */
+#define	MMUDR_LK	0x00000002		/* Lock entry */
+#define	MMUDR_X		0x00000004		/* Execute access enable */
+#define	MMUDR_W		0x00000008		/* Write access enable */
+#define	MMUDR_R		0x00000010		/* Read access enable */
+#define	MMUDR_SP	0x00000020		/* Supervisor access enable */
+#define	MMUDR_CM_CWT	0x00000000		/* Cachable write thru */
+#define	MMUDR_CM_CCB	0x00000040		/* Cachable copy back */
+#define	MMUDR_CM_NCP	0x00000080		/* Non-cachable precise */
+#define	MMUDR_CM_NCI	0x000000c0		/* Non-cachable imprecise */
+#define	MMUDR_SZ_1MB	0x00000000		/* 1MB page size */
+#define	MMUDR_SZ_4KB	0x00000100		/* 4kB page size */
+#define	MMUDR_SZ_8KB	0x00000200		/* 8kB page size */
+#define	MMUDR_SZ_1KB	0x00000300		/* 1kB page size */
+#define	MMUDR_PAN	10			/* Physical address */
+#define	MMUDR_PAMASK	0xfffffc00		/* PA mask */
+
+#ifndef __ASSEMBLY__
+
+/*
+ *	Simple access functions for the MMU registers. Nothing fancy
+ *	currently required, just simple 32bit access.
+ */
+static inline u32 mmu_read(u32 a)
+{
+	return *((volatile u32 *) a);
+}
+
+static inline void mmu_write(u32 a, u32 v)
+{
+	*((volatile u32 *) a) = v;
+	__asm__ __volatile__ ("nop");
+}
+
+int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word);
+
+#endif
+
+#endif	/* MCFMMU_H */
diff --git a/arch/m68k/include/asm/mmu_context.h b/arch/m68k/include/asm/mmu_context.h
index 7d4341e..dc3be99 100644
--- a/arch/m68k/include/asm/mmu_context.h
+++ b/arch/m68k/include/asm/mmu_context.h
@@ -8,7 +8,206 @@
 }
 
 #ifdef CONFIG_MMU
-#ifndef CONFIG_SUN3
+
+#if defined(CONFIG_COLDFIRE)
+
+#include <asm/atomic.h>
+#include <asm/bitops.h>
+#include <asm/mcfmmu.h>
+#include <asm/mmu.h>
+
+#define NO_CONTEXT		256
+#define LAST_CONTEXT		255
+#define FIRST_CONTEXT		1
+
+extern unsigned long context_map[];
+extern mm_context_t next_mmu_context;
+
+extern atomic_t nr_free_contexts;
+extern struct mm_struct *context_mm[LAST_CONTEXT+1];
+extern void steal_context(void);
+
+static inline void get_mmu_context(struct mm_struct *mm)
+{
+	mm_context_t ctx;
+
+	if (mm->context != NO_CONTEXT)
+		return;
+	while (atomic_dec_and_test_lt(&nr_free_contexts)) {
+		atomic_inc(&nr_free_contexts);
+		steal_context();
+	}
+	ctx = next_mmu_context;
+	while (test_and_set_bit(ctx, context_map)) {
+		ctx = find_next_zero_bit(context_map, LAST_CONTEXT+1, ctx);
+		if (ctx > LAST_CONTEXT)
+			ctx = 0;
+	}
+	next_mmu_context = (ctx + 1) & LAST_CONTEXT;
+	mm->context = ctx;
+	context_mm[ctx] = mm;
+}
+
+/*
+ * Set up the context for a new address space.
+ */
+#define init_new_context(tsk, mm)	(((mm)->context = NO_CONTEXT), 0)
+
+/*
+ * We're finished using the context for an address space.
+ */
+static inline void destroy_context(struct mm_struct *mm)
+{
+	if (mm->context != NO_CONTEXT) {
+		clear_bit(mm->context, context_map);
+		mm->context = NO_CONTEXT;
+		atomic_inc(&nr_free_contexts);
+	}
+}
+
+static inline void set_context(mm_context_t context, pgd_t *pgd)
+{
+	__asm__ __volatile__ ("movec %0,%%asid" : : "d" (context));
+}
+
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+	struct task_struct *tsk)
+{
+	get_mmu_context(tsk->mm);
+	set_context(tsk->mm->context, next->pgd);
+}
+
+/*
+ * After we have set current->mm to a new value, this activates
+ * the context for the new mm so we see the new mappings.
+ */
+static inline void activate_mm(struct mm_struct *active_mm,
+	struct mm_struct *mm)
+{
+	get_mmu_context(mm);
+	set_context(mm->context, mm->pgd);
+}
+
+#define deactivate_mm(tsk, mm) do { } while (0)
+
+extern void mmu_context_init(void);
+#define prepare_arch_switch(next) load_ksp_mmu(next)
+
+static inline void load_ksp_mmu(struct task_struct *task)
+{
+	unsigned long flags;
+	struct mm_struct *mm;
+	int asid;
+	pgd_t *pgd;
+	pmd_t *pmd;
+	pte_t *pte;
+	unsigned long mmuar;
+
+	local_irq_save(flags);
+	mmuar = task->thread.ksp;
+
+	/* Search for a valid TLB entry, if one is found, don't remap */
+	mmu_write(MMUAR, mmuar);
+	mmu_write(MMUOR, MMUOR_STLB | MMUOR_ADR);
+	if (mmu_read(MMUSR) & MMUSR_HIT)
+		goto end;
+
+	if (mmuar >= PAGE_OFFSET) {
+		mm = &init_mm;
+	} else {
+		pr_info("load_ksp_mmu: non-kernel mm found: 0x%p\n", task->mm);
+		mm = task->mm;
+	}
+
+	if (!mm)
+		goto bug;
+
+	pgd = pgd_offset(mm, mmuar);
+	if (pgd_none(*pgd))
+		goto bug;
+
+	pmd = pmd_offset(pgd, mmuar);
+	if (pmd_none(*pmd))
+		goto bug;
+
+	pte = (mmuar >= PAGE_OFFSET) ? pte_offset_kernel(pmd, mmuar)
+				     : pte_offset_map(pmd, mmuar);
+	if (pte_none(*pte) || !pte_present(*pte))
+		goto bug;
+
+	set_pte(pte, pte_mkyoung(*pte));
+	asid = mm->context & 0xff;
+	if (!pte_dirty(*pte) && mmuar <= PAGE_OFFSET)
+		set_pte(pte, pte_wrprotect(*pte));
+
+	mmu_write(MMUTR, (mmuar & PAGE_MASK) | (asid << MMUTR_IDN) |
+		(((int)(pte->pte) & (int)CF_PAGE_MMUTR_MASK)
+		>> CF_PAGE_MMUTR_SHIFT) | MMUTR_V);
+
+	mmu_write(MMUDR, (pte_val(*pte) & PAGE_MASK) |
+		((pte->pte) & CF_PAGE_MMUDR_MASK) | MMUDR_SZ_8KB | MMUDR_X);
+
+	mmu_write(MMUOR, MMUOR_ACC | MMUOR_UAA);
+
+	goto end;
+
+bug:
+	pr_info("ksp load failed: mm=0x%p ksp=0x08%lx\n", mm, mmuar);
+end:
+	local_irq_restore(flags);
+}
+
+#elif defined(CONFIG_SUN3)
+#include <asm/sun3mmu.h>
+#include <linux/sched.h>
+
+extern unsigned long get_free_context(struct mm_struct *mm);
+extern void clear_context(unsigned long context);
+
+/* set the context for a new task to unmapped */
+static inline int init_new_context(struct task_struct *tsk,
+				   struct mm_struct *mm)
+{
+	mm->context = SUN3_INVALID_CONTEXT;
+	return 0;
+}
+
+/* find the context given to this process, and if it hasn't already
+   got one, go get one for it. */
+static inline void get_mmu_context(struct mm_struct *mm)
+{
+	if (mm->context == SUN3_INVALID_CONTEXT)
+		mm->context = get_free_context(mm);
+}
+
+/* flush context if allocated... */
+static inline void destroy_context(struct mm_struct *mm)
+{
+	if (mm->context != SUN3_INVALID_CONTEXT)
+		clear_context(mm->context);
+}
+
+static inline void activate_context(struct mm_struct *mm)
+{
+	get_mmu_context(mm);
+	sun3_put_context(mm->context);
+}
+
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+			     struct task_struct *tsk)
+{
+	activate_context(tsk->mm);
+}
+
+#define deactivate_mm(tsk, mm)	do { } while (0)
+
+static inline void activate_mm(struct mm_struct *prev_mm,
+			       struct mm_struct *next_mm)
+{
+	activate_context(next_mm);
+}
+
+#else
 
 #include <asm/setup.h>
 #include <asm/page.h>
@@ -103,55 +302,8 @@
 		switch_mm_0460(next_mm);
 }
 
-#else  /* CONFIG_SUN3 */
-#include <asm/sun3mmu.h>
-#include <linux/sched.h>
-
-extern unsigned long get_free_context(struct mm_struct *mm);
-extern void clear_context(unsigned long context);
-
-/* set the context for a new task to unmapped */
-static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
-{
-	mm->context = SUN3_INVALID_CONTEXT;
-	return 0;
-}
-
-/* find the context given to this process, and if it hasn't already
-   got one, go get one for it. */
-static inline void get_mmu_context(struct mm_struct *mm)
-{
-	if(mm->context == SUN3_INVALID_CONTEXT)
-		mm->context = get_free_context(mm);
-}
-
-/* flush context if allocated... */
-static inline void destroy_context(struct mm_struct *mm)
-{
-	if(mm->context != SUN3_INVALID_CONTEXT)
-		clear_context(mm->context);
-}
-
-static inline void activate_context(struct mm_struct *mm)
-{
-	get_mmu_context(mm);
-	sun3_put_context(mm->context);
-}
-
-static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
-{
-	activate_context(tsk->mm);
-}
-
-#define deactivate_mm(tsk,mm)	do { } while (0)
-
-static inline void activate_mm(struct mm_struct *prev_mm,
-			       struct mm_struct *next_mm)
-{
-	activate_context(next_mm);
-}
-
 #endif
+
 #else /* !CONFIG_MMU */
 
 static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
diff --git a/arch/m68k/include/asm/motorola_pgtable.h b/arch/m68k/include/asm/motorola_pgtable.h
index 45bd3f5..e0fdd4d 100644
--- a/arch/m68k/include/asm/motorola_pgtable.h
+++ b/arch/m68k/include/asm/motorola_pgtable.h
@@ -8,6 +8,7 @@
 #define _PAGE_PRESENT	0x001
 #define _PAGE_SHORT	0x002
 #define _PAGE_RONLY	0x004
+#define _PAGE_READWRITE	0x000
 #define _PAGE_ACCESSED	0x008
 #define _PAGE_DIRTY	0x010
 #define _PAGE_SUPER	0x080	/* 68040 supervisor only */
diff --git a/arch/m68k/include/asm/page.h b/arch/m68k/include/asm/page.h
index dfebb7c..98baa82 100644
--- a/arch/m68k/include/asm/page.h
+++ b/arch/m68k/include/asm/page.h
@@ -6,10 +6,10 @@
 #include <asm/page_offset.h>
 
 /* PAGE_SHIFT determines the page size */
-#ifndef CONFIG_SUN3
-#define PAGE_SHIFT	(12)
+#if defined(CONFIG_SUN3) || defined(CONFIG_COLDFIRE)
+#define PAGE_SHIFT	13
 #else
-#define PAGE_SHIFT	(13)
+#define PAGE_SHIFT	12
 #endif
 #define PAGE_SIZE	(_AC(1, UL) << PAGE_SHIFT)
 #define PAGE_MASK	(~(PAGE_SIZE-1))
@@ -36,6 +36,10 @@
 #define __pgd(x)	((pgd_t) { (x) } )
 #define __pgprot(x)	((pgprot_t) { (x) } )
 
+extern unsigned long _rambase;
+extern unsigned long _ramstart;
+extern unsigned long _ramend;
+
 #endif /* !__ASSEMBLY__ */
 
 #ifdef CONFIG_MMU
diff --git a/arch/m68k/include/asm/page_no.h b/arch/m68k/include/asm/page_no.h
index a8d1c60..9059572 100644
--- a/arch/m68k/include/asm/page_no.h
+++ b/arch/m68k/include/asm/page_no.h
@@ -5,9 +5,6 @@
  
 extern unsigned long memory_start;
 extern unsigned long memory_end;
-extern unsigned long _rambase;
-extern unsigned long _ramstart;
-extern unsigned long _ramend;
 
 #define get_user_page(vaddr)		__get_free_page(GFP_KERNEL)
 #define free_user_page(page, addr)	free_page(addr)
diff --git a/arch/m68k/include/asm/page_offset.h b/arch/m68k/include/asm/page_offset.h
index 1780152..82626a8 100644
--- a/arch/m68k/include/asm/page_offset.h
+++ b/arch/m68k/include/asm/page_offset.h
@@ -1,11 +1,9 @@
 /* This handles the memory map.. */
 
-#ifdef CONFIG_MMU
-#ifndef CONFIG_SUN3
-#define PAGE_OFFSET_RAW		0x00000000
-#else
+#if defined(CONFIG_RAMBASE)
+#define PAGE_OFFSET_RAW		CONFIG_RAMBASE
+#elif defined(CONFIG_SUN3)
 #define PAGE_OFFSET_RAW		0x0E000000
-#endif
 #else
-#define	PAGE_OFFSET_RAW		CONFIG_RAMBASE
+#define PAGE_OFFSET_RAW		0x00000000
 #endif
diff --git a/arch/m68k/include/asm/pgalloc.h b/arch/m68k/include/asm/pgalloc.h
index c294aad..37bee7e 100644
--- a/arch/m68k/include/asm/pgalloc.h
+++ b/arch/m68k/include/asm/pgalloc.h
@@ -7,7 +7,9 @@
 
 #ifdef CONFIG_MMU
 #include <asm/virtconvert.h>
-#ifdef CONFIG_SUN3
+#if defined(CONFIG_COLDFIRE)
+#include <asm/mcf_pgalloc.h>
+#elif defined(CONFIG_SUN3)
 #include <asm/sun3_pgalloc.h>
 #else
 #include <asm/motorola_pgalloc.h>
diff --git a/arch/m68k/include/asm/pgtable_mm.h b/arch/m68k/include/asm/pgtable_mm.h
index 87174c9..dc35e0e 100644
--- a/arch/m68k/include/asm/pgtable_mm.h
+++ b/arch/m68k/include/asm/pgtable_mm.h
@@ -40,6 +40,8 @@
 /* PGDIR_SHIFT determines what a third-level page table entry can map */
 #ifdef CONFIG_SUN3
 #define PGDIR_SHIFT     17
+#elif defined(CONFIG_COLDFIRE)
+#define PGDIR_SHIFT     22
 #else
 #define PGDIR_SHIFT	25
 #endif
@@ -54,6 +56,10 @@
 #define PTRS_PER_PTE   16
 #define PTRS_PER_PMD   1
 #define PTRS_PER_PGD   2048
+#elif defined(CONFIG_COLDFIRE)
+#define PTRS_PER_PTE	512
+#define PTRS_PER_PMD	1
+#define PTRS_PER_PGD	1024
 #else
 #define PTRS_PER_PTE	1024
 #define PTRS_PER_PMD	8
@@ -66,12 +72,22 @@
 #ifdef CONFIG_SUN3
 #define KMAP_START     0x0DC00000
 #define KMAP_END       0x0E000000
+#elif defined(CONFIG_COLDFIRE)
+#define KMAP_START	0xe0000000
+#define KMAP_END	0xf0000000
 #else
 #define	KMAP_START	0xd0000000
 #define	KMAP_END	0xf0000000
 #endif
 
-#ifndef CONFIG_SUN3
+#ifdef CONFIG_SUN3
+extern unsigned long m68k_vmalloc_end;
+#define VMALLOC_START 0x0f800000
+#define VMALLOC_END m68k_vmalloc_end
+#elif defined(CONFIG_COLDFIRE)
+#define VMALLOC_START	0xd0000000
+#define VMALLOC_END	0xe0000000
+#else
 /* Just any arbitrary offset to the start of the vmalloc VM area: the
  * current 8MB value just means that there will be a 8MB "hole" after the
  * physical memory until the kernel virtual memory starts.  That means that
@@ -82,11 +98,7 @@
 #define VMALLOC_OFFSET	(8*1024*1024)
 #define VMALLOC_START (((unsigned long) high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
 #define VMALLOC_END KMAP_START
-#else
-extern unsigned long m68k_vmalloc_end;
-#define VMALLOC_START 0x0f800000
-#define VMALLOC_END m68k_vmalloc_end
-#endif /* CONFIG_SUN3 */
+#endif
 
 /* zero page used for uninitialized stuff */
 extern void *empty_zero_page;
@@ -130,6 +142,8 @@
 
 #ifdef CONFIG_SUN3
 #include <asm/sun3_pgtable.h>
+#elif defined(CONFIG_COLDFIRE)
+#include <asm/mcf_pgtable.h>
 #else
 #include <asm/motorola_pgtable.h>
 #endif
@@ -138,6 +152,9 @@
 /*
  * Macro to mark a page protection value as "uncacheable".
  */
+#ifdef CONFIG_COLDFIRE
+# define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | CF_PAGE_NOCACHE))
+#else
 #ifdef SUN3_PAGE_NOCACHE
 # define __SUN3_PAGE_NOCACHE	SUN3_PAGE_NOCACHE
 #else
@@ -152,6 +169,7 @@
 	    ? (__pgprot((pgprot_val(prot) & _CACHEMASK040) | _PAGE_NOCACHE_S))	\
 	    : (prot)))
 
+#endif /* CONFIG_COLDFIRE */
 #include <asm-generic/pgtable.h>
 #endif /* !__ASSEMBLY__ */
 
diff --git a/arch/m68k/include/asm/processor.h b/arch/m68k/include/asm/processor.h
index 568facf..46460fa 100644
--- a/arch/m68k/include/asm/processor.h
+++ b/arch/m68k/include/asm/processor.h
@@ -48,10 +48,12 @@
  * so don't change it unless you know what you are doing.
  */
 #ifdef CONFIG_MMU
-#ifndef CONFIG_SUN3
-#define TASK_SIZE	(0xF0000000UL)
-#else
+#if defined(CONFIG_COLDFIRE)
+#define TASK_SIZE	(0xC0000000UL)
+#elif defined(CONFIG_SUN3)
 #define TASK_SIZE	(0x0E000000UL)
+#else
+#define TASK_SIZE	(0xF0000000UL)
 #endif
 #else
 #define TASK_SIZE	(0xFFFFFFFFUL)
@@ -66,10 +68,12 @@
  * space during mmap's.
  */
 #ifdef CONFIG_MMU
-#ifndef CONFIG_SUN3
-#define TASK_UNMAPPED_BASE	0xC0000000UL
-#else
+#if defined(CONFIG_COLDFIRE)
+#define TASK_UNMAPPED_BASE	0x60000000UL
+#elif defined(CONFIG_SUN3)
 #define TASK_UNMAPPED_BASE	0x0A000000UL
+#else
+#define TASK_UNMAPPED_BASE	0xC0000000UL
 #endif
 #define TASK_UNMAPPED_ALIGN(addr, off)	PAGE_ALIGN(addr)
 #else
@@ -88,14 +92,12 @@
 	unsigned long  fp[8*3];
 	unsigned long  fpcntl[3];	/* fp control regs */
 	unsigned char  fpstate[FPSTATESIZE];  /* floating point state */
-	struct thread_info info;
 };
 
 #define INIT_THREAD  {							\
 	.ksp	= sizeof(init_stack) + (unsigned long) init_stack,	\
 	.sr	= PS_S,							\
 	.fs	= __KERNEL_DS,						\
-	.info	= INIT_THREAD_INFO(init_task),				\
 }
 
 #ifdef CONFIG_MMU
diff --git a/arch/m68k/include/asm/segment.h b/arch/m68k/include/asm/segment.h
index ee95921..0fa80e9 100644
--- a/arch/m68k/include/asm/segment.h
+++ b/arch/m68k/include/asm/segment.h
@@ -22,23 +22,26 @@
 } mm_segment_t;
 
 #define MAKE_MM_SEG(s)	((mm_segment_t) { (s) })
-#define USER_DS		MAKE_MM_SEG(__USER_DS)
-#define KERNEL_DS	MAKE_MM_SEG(__KERNEL_DS)
 
+#ifdef CONFIG_CPU_HAS_ADDRESS_SPACES
 /*
  * Get/set the SFC/DFC registers for MOVES instructions
  */
+#define USER_DS		MAKE_MM_SEG(__USER_DS)
+#define KERNEL_DS	MAKE_MM_SEG(__KERNEL_DS)
 
 static inline mm_segment_t get_fs(void)
 {
-#ifdef CONFIG_MMU
 	mm_segment_t _v;
 	__asm__ ("movec %/dfc,%0":"=r" (_v.seg):);
-
 	return _v;
-#else
-	return USER_DS;
-#endif
+}
+
+static inline void set_fs(mm_segment_t val)
+{
+	__asm__ __volatile__ ("movec %0,%/sfc\n\t"
+			      "movec %0,%/dfc\n\t"
+			      : /* no outputs */ : "r" (val.seg) : "memory");
 }
 
 static inline mm_segment_t get_ds(void)
@@ -47,14 +50,13 @@
     return KERNEL_DS;
 }
 
-static inline void set_fs(mm_segment_t val)
-{
-#ifdef CONFIG_MMU
-	__asm__ __volatile__ ("movec %0,%/sfc\n\t"
-			      "movec %0,%/dfc\n\t"
-			      : /* no outputs */ : "r" (val.seg) : "memory");
+#else
+#define USER_DS		MAKE_MM_SEG(TASK_SIZE)
+#define KERNEL_DS	MAKE_MM_SEG(0xFFFFFFFF)
+#define get_ds()	(KERNEL_DS)
+#define get_fs()	(current_thread_info()->addr_limit)
+#define set_fs(x)	(current_thread_info()->addr_limit = (x))
 #endif
-}
 
 #define segment_eq(a,b)	((a).seg == (b).seg)
 
diff --git a/arch/m68k/include/asm/serial.h b/arch/m68k/include/asm/serial.h
index 2b90d6e..7267536 100644
--- a/arch/m68k/include/asm/serial.h
+++ b/arch/m68k/include/asm/serial.h
@@ -25,9 +25,11 @@
 #define STD_COM4_FLAGS ASYNC_BOOT_AUTOCONF
 #endif
 
+#ifdef CONFIG_ISA
 #define SERIAL_PORT_DFNS			\
 	/* UART CLK   PORT IRQ     FLAGS        */			\
 	{ 0, BASE_BAUD, 0x3F8, 4, STD_COM_FLAGS },	/* ttyS0 */	\
 	{ 0, BASE_BAUD, 0x2F8, 3, STD_COM_FLAGS },	/* ttyS1 */	\
 	{ 0, BASE_BAUD, 0x3E8, 4, STD_COM_FLAGS },	/* ttyS2 */	\
 	{ 0, BASE_BAUD, 0x2E8, 3, STD_COM4_FLAGS },	/* ttyS3 */
+#endif
diff --git a/arch/m68k/include/asm/setup.h b/arch/m68k/include/asm/setup.h
index 4dfb395..00c2c53 100644
--- a/arch/m68k/include/asm/setup.h
+++ b/arch/m68k/include/asm/setup.h
@@ -40,6 +40,7 @@
 #define MACH_HP300    9
 #define MACH_Q40     10
 #define MACH_SUN3X   11
+#define MACH_M54XX   12
 
 #define COMMAND_LINE_SIZE 256
 
@@ -211,23 +212,27 @@
 #define CPUB_68030     1
 #define CPUB_68040     2
 #define CPUB_68060     3
+#define CPUB_COLDFIRE  4
 
 #define CPU_68020      (1<<CPUB_68020)
 #define CPU_68030      (1<<CPUB_68030)
 #define CPU_68040      (1<<CPUB_68040)
 #define CPU_68060      (1<<CPUB_68060)
+#define CPU_COLDFIRE   (1<<CPUB_COLDFIRE)
 
 #define FPUB_68881     0
 #define FPUB_68882     1
 #define FPUB_68040     2                       /* Internal FPU */
 #define FPUB_68060     3                       /* Internal FPU */
 #define FPUB_SUNFPA    4                       /* Sun-3 FPA */
+#define FPUB_COLDFIRE  5                       /* ColdFire FPU */
 
 #define FPU_68881      (1<<FPUB_68881)
 #define FPU_68882      (1<<FPUB_68882)
 #define FPU_68040      (1<<FPUB_68040)
 #define FPU_68060      (1<<FPUB_68060)
 #define FPU_SUNFPA     (1<<FPUB_SUNFPA)
+#define FPU_COLDFIRE   (1<<FPUB_COLDFIRE)
 
 #define MMUB_68851     0
 #define MMUB_68030     1                       /* Internal MMU */
@@ -235,6 +240,7 @@
 #define MMUB_68060     3                       /* Internal MMU */
 #define MMUB_APOLLO    4                       /* Custom Apollo */
 #define MMUB_SUN3      5                       /* Custom Sun-3 */
+#define MMUB_COLDFIRE  6                       /* Internal MMU */
 
 #define MMU_68851      (1<<MMUB_68851)
 #define MMU_68030      (1<<MMUB_68030)
@@ -242,6 +248,7 @@
 #define MMU_68060      (1<<MMUB_68060)
 #define MMU_SUN3       (1<<MMUB_SUN3)
 #define MMU_APOLLO     (1<<MMUB_APOLLO)
+#define MMU_COLDFIRE   (1<<MMUB_COLDFIRE)
 
 #ifdef __KERNEL__
 
@@ -341,6 +348,13 @@
 #  endif
 #endif
 
+#if !defined(CONFIG_COLDFIRE)
+#  define CPU_IS_COLDFIRE (0)
+#else
+#  define CPU_IS_COLDFIRE (1)
+#  define MMU_IS_COLDFIRE (1)
+#endif
+
 #define CPU_TYPE (m68k_cputype)
 
 #ifdef CONFIG_M68KFPU_EMU
diff --git a/arch/m68k/include/asm/sigcontext.h b/arch/m68k/include/asm/sigcontext.h
index a29dd74..523db2a 100644
--- a/arch/m68k/include/asm/sigcontext.h
+++ b/arch/m68k/include/asm/sigcontext.h
@@ -15,11 +15,7 @@
 	unsigned long  sc_pc;
 	unsigned short sc_formatvec;
 #ifndef __uClinux__
-# ifdef __mcoldfire__
-	unsigned long  sc_fpregs[2][2];	/* room for two fp registers */
-# else
 	unsigned long  sc_fpregs[2*3];  /* room for two fp registers */
-# endif
 	unsigned long  sc_fpcntl[3];
 	unsigned char  sc_fpstate[216];
 #endif
diff --git a/arch/m68k/include/asm/socket.h b/arch/m68k/include/asm/socket.h
index 9bf49c8..d4708ce 100644
--- a/arch/m68k/include/asm/socket.h
+++ b/arch/m68k/include/asm/socket.h
@@ -62,4 +62,7 @@
 
 #define SO_RXQ_OVFL             40
 
+#define SO_WIFI_STATUS		41
+#define SCM_WIFI_STATUS		SO_WIFI_STATUS
+
 #endif /* _ASM_SOCKET_H */
diff --git a/arch/m68k/include/asm/thread_info.h b/arch/m68k/include/asm/thread_info.h
index 7909889..e8665e6 100644
--- a/arch/m68k/include/asm/thread_info.h
+++ b/arch/m68k/include/asm/thread_info.h
@@ -3,6 +3,7 @@
 
 #include <asm/types.h>
 #include <asm/page.h>
+#include <asm/segment.h>
 
 /*
  * On machines with 4k pages we default to an 8k thread size, though we
@@ -26,6 +27,7 @@
 	struct task_struct	*task;		/* main task structure */
 	unsigned long		flags;
 	struct exec_domain	*exec_domain;	/* execution domain */
+	mm_segment_t		addr_limit;	/* thread address space */
 	int			preempt_count;	/* 0 => preemptable, <0 => BUG */
 	__u32			cpu;		/* should always be 0 on m68k */
 	unsigned long		tp_value;	/* thread pointer */
@@ -39,6 +41,7 @@
 {						\
 	.task		= &tsk,			\
 	.exec_domain	= &default_exec_domain,	\
+	.addr_limit	= KERNEL_DS,		\
 	.preempt_count	= INIT_PREEMPT_COUNT,	\
 	.restart_block = {			\
 		.fn = do_no_restart_syscall,	\
@@ -47,34 +50,6 @@
 
 #define init_stack		(init_thread_union.stack)
 
-#ifdef CONFIG_MMU
-
-#ifndef __ASSEMBLY__
-#include <asm/current.h>
-#endif
-
-#ifdef ASM_OFFSETS_C
-#define task_thread_info(tsk)	((struct thread_info *) NULL)
-#else
-#include <asm/asm-offsets.h>
-#define task_thread_info(tsk)	((struct thread_info *)((char *)tsk+TASK_TINFO))
-#endif
-
-#define init_thread_info	(init_task.thread.info)
-#define task_stack_page(tsk)	((tsk)->stack)
-#define current_thread_info()	task_thread_info(current)
-
-#define __HAVE_THREAD_FUNCTIONS
-
-#define setup_thread_stack(p, org) ({			\
-	*(struct task_struct **)(p)->stack = (p);	\
-	task_thread_info(p)->task = (p);		\
-})
-
-#define end_of_stack(p)		((unsigned long *)(p)->stack + 1)
-
-#else /* !CONFIG_MMU */
-
 #ifndef __ASSEMBLY__
 /* how to get the thread information struct from C */
 static inline struct thread_info *current_thread_info(void)
@@ -92,8 +67,6 @@
 
 #define init_thread_info	(init_thread_union.thread_info)
 
-#endif /* CONFIG_MMU */
-
 /* entry.S relies on these definitions!
  * bits 0-7 are tested at every exception exit
  * bits 8-15 are also tested at syscall exit
@@ -103,7 +76,6 @@
 #define TIF_DELAYED_TRACE	14	/* single step a syscall */
 #define TIF_SYSCALL_TRACE	15	/* syscall trace active */
 #define TIF_MEMDIE		16	/* is terminating due to OOM killer */
-#define TIF_FREEZE		17	/* thread is freezing for suspend */
 #define TIF_RESTORE_SIGMASK	18	/* restore signal mask in do_signal */
 
 #endif	/* _ASM_M68K_THREAD_INFO_H */
diff --git a/arch/m68k/include/asm/tlbflush.h b/arch/m68k/include/asm/tlbflush.h
index a6b4ed4..965ea35 100644
--- a/arch/m68k/include/asm/tlbflush.h
+++ b/arch/m68k/include/asm/tlbflush.h
@@ -5,10 +5,13 @@
 #ifndef CONFIG_SUN3
 
 #include <asm/current.h>
+#include <asm/mcfmmu.h>
 
 static inline void flush_tlb_kernel_page(void *addr)
 {
-	if (CPU_IS_040_OR_060) {
+	if (CPU_IS_COLDFIRE) {
+		mmu_write(MMUOR, MMUOR_CNL);
+	} else if (CPU_IS_040_OR_060) {
 		mm_segment_t old_fs = get_fs();
 		set_fs(KERNEL_DS);
 		__asm__ __volatile__(".chip 68040\n\t"
@@ -25,12 +28,15 @@
  */
 static inline void __flush_tlb(void)
 {
-	if (CPU_IS_040_OR_060)
+	if (CPU_IS_COLDFIRE) {
+		mmu_write(MMUOR, MMUOR_CNL);
+	} else if (CPU_IS_040_OR_060) {
 		__asm__ __volatile__(".chip 68040\n\t"
 				     "pflushan\n\t"
 				     ".chip 68k");
-	else if (CPU_IS_020_OR_030)
+	} else if (CPU_IS_020_OR_030) {
 		__asm__ __volatile__("pflush #0,#4");
+	}
 }
 
 static inline void __flush_tlb040_one(unsigned long addr)
@@ -43,7 +49,9 @@
 
 static inline void __flush_tlb_one(unsigned long addr)
 {
-	if (CPU_IS_040_OR_060)
+	if (CPU_IS_COLDFIRE)
+		mmu_write(MMUOR, MMUOR_CNL);
+	else if (CPU_IS_040_OR_060)
 		__flush_tlb040_one(addr);
 	else if (CPU_IS_020_OR_030)
 		__asm__ __volatile__("pflush #0,#4,(%0)" : : "a" (addr));
@@ -56,12 +64,15 @@
  */
 static inline void flush_tlb_all(void)
 {
-	if (CPU_IS_040_OR_060)
+	if (CPU_IS_COLDFIRE) {
+		mmu_write(MMUOR, MMUOR_CNL);
+	} else if (CPU_IS_040_OR_060) {
 		__asm__ __volatile__(".chip 68040\n\t"
 				     "pflusha\n\t"
 				     ".chip 68k");
-	else if (CPU_IS_020_OR_030)
+	} else if (CPU_IS_020_OR_030) {
 		__asm__ __volatile__("pflusha");
+	}
 }
 
 static inline void flush_tlb_mm(struct mm_struct *mm)
diff --git a/arch/m68k/include/asm/traps.h b/arch/m68k/include/asm/traps.h
index 151068f..4aff335 100644
--- a/arch/m68k/include/asm/traps.h
+++ b/arch/m68k/include/asm/traps.h
@@ -18,6 +18,7 @@
 
 typedef void (*e_vector)(void);
 extern e_vector vectors[];
+extern e_vector *_ramvec;
 
 asmlinkage void auto_inthandler(void);
 asmlinkage void user_inthandler(void);
diff --git a/arch/m68k/include/asm/types.h b/arch/m68k/include/asm/types.h
index b17fd11..89705ad 100644
--- a/arch/m68k/include/asm/types.h
+++ b/arch/m68k/include/asm/types.h
@@ -10,12 +10,6 @@
  */
 #include <asm-generic/int-ll64.h>
 
-#ifndef __ASSEMBLY__
-
-typedef unsigned short umode_t;
-
-#endif /* __ASSEMBLY__ */
-
 /*
  * These aren't exported outside the kernel to avoid name space clashes
  */
diff --git a/arch/m68k/include/asm/uaccess_mm.h b/arch/m68k/include/asm/uaccess_mm.h
index 7107f3fb..9c80cd5 100644
--- a/arch/m68k/include/asm/uaccess_mm.h
+++ b/arch/m68k/include/asm/uaccess_mm.h
@@ -21,6 +21,22 @@
 }
 
 /*
+ * Not all varients of the 68k family support the notion of address spaces.
+ * The traditional 680x0 parts do, and they use the sfc/dfc registers and
+ * the "moves" instruction to access user space from kernel space. Other
+ * family members like ColdFire don't support this, and only have a single
+ * address space, and use the usual "move" instruction for user space access.
+ *
+ * Outside of this difference the user space access functions are the same.
+ * So lets keep the code simple and just define in what we need to use.
+ */
+#ifdef CONFIG_CPU_HAS_ADDRESS_SPACES
+#define	MOVES	"moves"
+#else
+#define	MOVES	"move"
+#endif
+
+/*
  * The exception table consists of pairs of addresses: the first is the
  * address of an instruction that is allowed to fault, and the second is
  * the address at which the program should continue.  No registers are
@@ -43,7 +59,7 @@
 
 #define __put_user_asm(res, x, ptr, bwl, reg, err)	\
 asm volatile ("\n"					\
-	"1:	moves."#bwl"	%2,%1\n"		\
+	"1:	"MOVES"."#bwl"	%2,%1\n"		\
 	"2:\n"						\
 	"	.section .fixup,\"ax\"\n"		\
 	"	.even\n"				\
@@ -83,8 +99,8 @@
  	    {								\
  		const void __user *__pu_ptr = (ptr);			\
 		asm volatile ("\n"					\
-			"1:	moves.l	%2,(%1)+\n"			\
-			"2:	moves.l	%R2,(%1)\n"			\
+			"1:	"MOVES".l	%2,(%1)+\n"		\
+			"2:	"MOVES".l	%R2,(%1)\n"		\
 			"3:\n"						\
 			"	.section .fixup,\"ax\"\n"		\
 			"	.even\n"				\
@@ -115,12 +131,12 @@
 #define __get_user_asm(res, x, ptr, type, bwl, reg, err) ({	\
 	type __gu_val;						\
 	asm volatile ("\n"					\
-		"1:	moves."#bwl"	%2,%1\n"		\
+		"1:	"MOVES"."#bwl"	%2,%1\n"		\
 		"2:\n"						\
 		"	.section .fixup,\"ax\"\n"		\
 		"	.even\n"				\
 		"10:	move.l	%3,%0\n"			\
-		"	sub."#bwl"	%1,%1\n"		\
+		"	sub.l	%1,%1\n"			\
 		"	jra	2b\n"				\
 		"	.previous\n"				\
 		"\n"						\
@@ -152,8 +168,8 @@
  		const void *__gu_ptr = (ptr);				\
  		u64 __gu_val;						\
 		asm volatile ("\n"					\
-			"1:	moves.l	(%2)+,%1\n"			\
-			"2:	moves.l	(%2),%R1\n"			\
+			"1:	"MOVES".l	(%2)+,%1\n"		\
+			"2:	"MOVES".l	(%2),%R1\n"		\
 			"3:\n"						\
 			"	.section .fixup,\"ax\"\n"		\
 			"	.even\n"				\
@@ -188,12 +204,12 @@
 
 #define __constant_copy_from_user_asm(res, to, from, tmp, n, s1, s2, s3)\
 	asm volatile ("\n"						\
-		"1:	moves."#s1"	(%2)+,%3\n"			\
+		"1:	"MOVES"."#s1"	(%2)+,%3\n"			\
 		"	move."#s1"	%3,(%1)+\n"			\
-		"2:	moves."#s2"	(%2)+,%3\n"			\
+		"2:	"MOVES"."#s2"	(%2)+,%3\n"			\
 		"	move."#s2"	%3,(%1)+\n"			\
 		"	.ifnc	\""#s3"\",\"\"\n"			\
-		"3:	moves."#s3"	(%2)+,%3\n"			\
+		"3:	"MOVES"."#s3"	(%2)+,%3\n"			\
 		"	move."#s3"	%3,(%1)+\n"			\
 		"	.endif\n"					\
 		"4:\n"							\
@@ -269,13 +285,13 @@
 #define __constant_copy_to_user_asm(res, to, from, tmp, n, s1, s2, s3)	\
 	asm volatile ("\n"						\
 		"	move."#s1"	(%2)+,%3\n"			\
-		"11:	moves."#s1"	%3,(%1)+\n"			\
+		"11:	"MOVES"."#s1"	%3,(%1)+\n"			\
 		"12:	move."#s2"	(%2)+,%3\n"			\
-		"21:	moves."#s2"	%3,(%1)+\n"			\
+		"21:	"MOVES"."#s2"	%3,(%1)+\n"			\
 		"22:\n"							\
 		"	.ifnc	\""#s3"\",\"\"\n"			\
 		"	move."#s3"	(%2)+,%3\n"			\
-		"31:	moves."#s3"	%3,(%1)+\n"			\
+		"31:	"MOVES"."#s3"	%3,(%1)+\n"			\
 		"32:\n"							\
 		"	.endif\n"					\
 		"4:\n"							\
diff --git a/arch/m68k/include/asm/ucontext.h b/arch/m68k/include/asm/ucontext.h
index 00dcc51..e4e2266 100644
--- a/arch/m68k/include/asm/ucontext.h
+++ b/arch/m68k/include/asm/ucontext.h
@@ -7,11 +7,7 @@
 
 typedef struct fpregset {
 	int f_fpcntl[3];
-#ifdef __mcoldfire__
-	int f_fpregs[8][2];
-#else
 	int f_fpregs[8*3];
-#endif
 } fpregset_t;
 
 struct mcontext {
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index 303192f..ea0b502 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -132,10 +132,10 @@
 #define __NR_adjtimex		124
 #define __NR_mprotect		125
 #define __NR_sigprocmask	126
-/*#define __NR_create_module	127*/
+#define __NR_create_module	127
 #define __NR_init_module	128
 #define __NR_delete_module	129
-/*#define __NR_get_kernel_syms	130*/
+#define __NR_get_kernel_syms	130
 #define __NR_quotactl		131
 #define __NR_getpgid		132
 #define __NR_fchdir		133
@@ -172,7 +172,7 @@
 #define __NR_setresuid		164
 #define __NR_getresuid		165
 #define __NR_getpagesize	166
-/*#define __NR_query_module	167*/
+#define __NR_query_module	167
 #define __NR_poll		168
 #define __NR_nfsservctl		169
 #define __NR_setresgid		170
@@ -193,8 +193,8 @@
 #define __NR_capset		185
 #define __NR_sigaltstack	186
 #define __NR_sendfile		187
-/*#define __NR_getpmsg		188*/	/* some people actually want streams */
-/*#define __NR_putpmsg		189*/	/* some people actually want streams */
+#define __NR_getpmsg		188	/* some people actually want streams */
+#define __NR_putpmsg		189	/* some people actually want streams */
 #define __NR_vfork		190
 #define __NR_ugetrlimit		191
 #define __NR_mmap2		192
diff --git a/arch/m68k/kernel/Makefile b/arch/m68k/kernel/Makefile
index c569619..40d29a78 100644
--- a/arch/m68k/kernel/Makefile
+++ b/arch/m68k/kernel/Makefile
@@ -2,19 +2,24 @@
 # Makefile for the linux kernel.
 #
 
-extra-$(CONFIG_MMU)	:= head.o
+extra-$(CONFIG_AMIGA)	:= head.o
+extra-$(CONFIG_ATARI)	:= head.o
+extra-$(CONFIG_MAC)	:= head.o
+extra-$(CONFIG_APOLLO)	:= head.o
+extra-$(CONFIG_VME)	:= head.o
+extra-$(CONFIG_HP300)	:= head.o
+extra-$(CONFIG_Q40)	:= head.o
+extra-$(CONFIG_SUN3X)	:= head.o
 extra-$(CONFIG_SUN3)	:= sun3-head.o
 extra-y			+= vmlinux.lds
 
-obj-y	:= entry.o irq.o m68k_ksyms.o module.o process.o ptrace.o setup.o \
-	   signal.o sys_m68k.o syscalltable.o time.o traps.o
+obj-y	:= entry.o init_task.o irq.o m68k_ksyms.o module.o process.o ptrace.o
+obj-y	+= setup.o signal.o sys_m68k.o syscalltable.o time.o traps.o
 
-obj-$(CONFIG_MMU)	+= ints.o vectors.o
+obj-$(CONFIG_MMU_MOTOROLA) += ints.o vectors.o
+obj-$(CONFIG_MMU_SUN3) += ints.o vectors.o
 
 ifndef CONFIG_MMU_SUN3
-obj-y			+= dma.o
-endif
-ifndef CONFIG_MMU
-obj-y			+= init_task.o
+obj-y	+= dma.o
 endif
 
diff --git a/arch/m68k/kernel/asm-offsets.c b/arch/m68k/kernel/asm-offsets.c
index 983fed9..a972b00 100644
--- a/arch/m68k/kernel/asm-offsets.c
+++ b/arch/m68k/kernel/asm-offsets.c
@@ -24,8 +24,7 @@
 	/* offsets into the task struct */
 	DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
 	DEFINE(TASK_MM, offsetof(struct task_struct, mm));
-	DEFINE(TASK_INFO, offsetof(struct task_struct, thread.info));
-	DEFINE(TASK_TINFO, offsetof(struct task_struct, thread.info));
+	DEFINE(TASK_STACK, offsetof(struct task_struct, stack));
 
 	/* offsets into the thread struct */
 	DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp));
diff --git a/arch/m68k/kernel/entry.S b/arch/m68k/kernel/entry.S
index 081cf96..b8daf64 100644
--- a/arch/m68k/kernel/entry.S
+++ b/arch/m68k/kernel/entry.S
@@ -1,4 +1,4 @@
-#ifdef CONFIG_MMU
+#if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE)
 #include "entry_mm.S"
 #else
 #include "entry_no.S"
diff --git a/arch/m68k/kernel/entry_mm.S b/arch/m68k/kernel/entry_mm.S
index c713f51..675a854 100644
--- a/arch/m68k/kernel/entry_mm.S
+++ b/arch/m68k/kernel/entry_mm.S
@@ -99,7 +99,8 @@
 	jra	.Lret_from_exception
 
 ENTRY(ret_from_signal)
-	tstb	%curptr@(TASK_INFO+TINFO_FLAGS+2)
+	movel	%curptr@(TASK_STACK),%a1
+	tstb	%a1@(TINFO_FLAGS+2)
 	jge	1f
 	jbsr	syscall_trace
 1:	RESTORE_SWITCH_STACK
@@ -120,11 +121,13 @@
 	SAVE_ALL_SYS
 
 	GET_CURRENT(%d1)
+	movel	%d1,%a1
+
 	| save top of frame
 	movel	%sp,%curptr@(TASK_THREAD+THREAD_ESP0)
 
 	| syscall trace?
-	tstb	%curptr@(TASK_INFO+TINFO_FLAGS+2)
+	tstb	%a1@(TINFO_FLAGS+2)
 	jmi	do_trace_entry
 	cmpl	#NR_syscalls,%d0
 	jcc	badsys
@@ -133,7 +136,8 @@
 	movel	%d0,%sp@(PT_OFF_D0)	| save the return value
 ret_from_syscall:
 	|oriw	#0x0700,%sr
-	movew	%curptr@(TASK_INFO+TINFO_FLAGS+2),%d0
+	movel	%curptr@(TASK_STACK),%a1
+	movew	%a1@(TINFO_FLAGS+2),%d0
 	jne	syscall_exit_work
 1:	RESTORE_ALL
 
@@ -159,7 +163,8 @@
 	andw	#ALLOWINT,%sr
 
 resume_userspace:
-	moveb	%curptr@(TASK_INFO+TINFO_FLAGS+3),%d0
+	movel	%curptr@(TASK_STACK),%a1
+	moveb	%a1@(TINFO_FLAGS+3),%d0
 	jne	exit_work
 1:	RESTORE_ALL
 
@@ -199,7 +204,8 @@
 ENTRY(auto_inthandler)
 	SAVE_ALL_INT
 	GET_CURRENT(%d0)
-	addqb	#1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
+	movel	%d0,%a1
+	addqb	#1,%a1@(TINFO_PREEMPT+1)
 					|  put exception # in d0
 	bfextu	%sp@(PT_OFF_FORMATVEC){#4,#10},%d0
 	subw	#VEC_SPUR,%d0
@@ -211,7 +217,8 @@
 	addql	#8,%sp			|  pop parameters off stack
 
 ret_from_interrupt:
-	subqb	#1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
+	movel	%curptr@(TASK_STACK),%a1
+	subqb	#1,%a1@(TINFO_PREEMPT+1)
 	jeq	ret_from_last_interrupt
 2:	RESTORE_ALL
 
@@ -232,7 +239,8 @@
 ENTRY(user_inthandler)
 	SAVE_ALL_INT
 	GET_CURRENT(%d0)
-	addqb	#1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
+	movel	%d0,%a1
+	addqb	#1,%a1@(TINFO_PREEMPT+1)
 					|  put exception # in d0
 	bfextu	%sp@(PT_OFF_FORMATVEC){#4,#10},%d0
 user_irqvec_fixup = . + 2
@@ -243,7 +251,8 @@
 	jsr	do_IRQ			|  process the IRQ
 	addql	#8,%sp			|  pop parameters off stack
 
-	subqb	#1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
+	movel	%curptr@(TASK_STACK),%a1
+	subqb	#1,%a1@(TINFO_PREEMPT+1)
 	jeq	ret_from_last_interrupt
 	RESTORE_ALL
 
@@ -252,13 +261,15 @@
 ENTRY(bad_inthandler)
 	SAVE_ALL_INT
 	GET_CURRENT(%d0)
-	addqb	#1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
+	movel	%d0,%a1
+	addqb	#1,%a1@(TINFO_PREEMPT+1)
 
 	movel	%sp,%sp@-
 	jsr	handle_badint
 	addql	#4,%sp
 
-	subqb	#1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
+	movel	%curptr@(TASK_STACK),%a1
+	subqb	#1,%a1@(TINFO_PREEMPT+1)
 	jeq	ret_from_last_interrupt
 	RESTORE_ALL
 
diff --git a/arch/m68k/kernel/entry_no.S b/arch/m68k/kernel/entry_no.S
index 1b42890..d80cba4 100644
--- a/arch/m68k/kernel/entry_no.S
+++ b/arch/m68k/kernel/entry_no.S
@@ -44,8 +44,7 @@
 
 ENTRY(buserr)
 	SAVE_ALL_INT
-	moveq	#-1,%d0
-	movel	%d0,%sp@(PT_OFF_ORIG_D0)
+	GET_CURRENT(%d0)
 	movel	%sp,%sp@- 		/* stack frame pointer argument */
 	jsr	buserr_c
 	addql	#4,%sp
@@ -53,8 +52,7 @@
 
 ENTRY(trap)
 	SAVE_ALL_INT
-	moveq	#-1,%d0
-	movel	%d0,%sp@(PT_OFF_ORIG_D0)
+	GET_CURRENT(%d0)
 	movel	%sp,%sp@- 		/* stack frame pointer argument */
 	jsr	trap_c
 	addql	#4,%sp
@@ -65,8 +63,7 @@
 .globl dbginterrupt
 ENTRY(dbginterrupt)
 	SAVE_ALL_INT
-	moveq	#-1,%d0
-	movel	%d0,%sp@(PT_OFF_ORIG_D0)
+	GET_CURRENT(%d0)
 	movel	%sp,%sp@- 		/* stack frame pointer argument */
 	jsr	dbginterrupt_c
 	addql	#4,%sp
diff --git a/arch/m68k/kernel/head.S b/arch/m68k/kernel/head.S
index 27622b3..d197e7f 100644
--- a/arch/m68k/kernel/head.S
+++ b/arch/m68k/kernel/head.S
@@ -250,9 +250,8 @@
  * USE_MFP:	Use the ST-MFP port (Modem1) for serial debug.
  *
  * Macintosh constants:
- * MAC_SERIAL_DEBUG:	Turns on serial debug output for the Macintosh.
- * MAC_USE_SCC_A:	Use the SCC port A (modem) for serial debug.
- * MAC_USE_SCC_B:	Use the SCC port B (printer) for serial debug (default).
+ * MAC_USE_SCC_A: Use SCC port A (modem) for serial debug and early console.
+ * MAC_USE_SCC_B: Use SCC port B (printer) for serial debug and early console.
  */
 
 #include <linux/linkage.h>
@@ -268,33 +267,25 @@
 
 #include <asm/machw.h>
 
-/*
- * Macintosh console support
- */
-
 #ifdef CONFIG_FRAMEBUFFER_CONSOLE
 #define CONSOLE
 #define CONSOLE_PENGUIN
 #endif
 
-/*
- * Macintosh serial debug support; outputs boot info to the printer
- *   and/or modem serial ports
- */
-#undef MAC_SERIAL_DEBUG
+#ifdef CONFIG_EARLY_PRINTK
+#define SERIAL_DEBUG
+#else
+#undef SERIAL_DEBUG
+#endif
 
-/*
- * Macintosh serial debug port selection; define one or both;
- *   requires MAC_SERIAL_DEBUG to be defined
- */
-#define MAC_USE_SCC_A		/* Macintosh modem serial port */
-#define MAC_USE_SCC_B		/* Macintosh printer serial port */
+#else /* !CONFIG_MAC */
 
-#endif	/* CONFIG_MAC */
+#define SERIAL_DEBUG
+
+#endif /* !CONFIG_MAC */
 
 #undef MMU_PRINT
 #undef MMU_NOCACHE_KERNEL
-#define SERIAL_DEBUG
 #undef DEBUG
 
 /*
@@ -655,11 +646,11 @@
 	lea	%pc@(L(mac_rowbytes)),%a1
 	movel	%a0@,%a1@
 
-#ifdef MAC_SERIAL_DEBUG
+#ifdef SERIAL_DEBUG
 	get_bi_record	BI_MAC_SCCBASE
 	lea	%pc@(L(mac_sccbase)),%a1
 	movel	%a0@,%a1@
-#endif /* MAC_SERIAL_DEBUG */
+#endif
 
 #if 0
 	/*
@@ -1427,7 +1418,7 @@
 	subl	%d0,L(console_font)
 	subl	%d0,L(console_font_data)
 #endif
-#ifdef MAC_SERIAL_DEBUG
+#ifdef SERIAL_DEBUG
 	orl	#0x50000000,L(mac_sccbase)
 #endif
 1:
@@ -1917,7 +1908,7 @@
 	jbne	30b
 
 mmu_print_done:
-	puts	"\n\n"
+	puts	"\n"
 
 func_return	mmu_print
 
@@ -2768,7 +2759,7 @@
 	.byte	9,0		/* no interrupts */
 	.byte	10,0		/* NRZ */
 	.byte	11,0x50		/* use baud rate generator */
-	.byte	12,10,13,0	/* 9600 baud */
+	.byte	12,1,13,0	/* 38400 baud */
 	.byte	14,1		/* Baud rate generator enable */
 	.byte	3,0xc1		/* enable receiver */
 	.byte	5,0xea		/* enable transmitter */
@@ -2906,10 +2897,12 @@
 #endif
 #ifdef CONFIG_MAC
 	is_not_mac(L(serial_init_not_mac))
-#ifdef MAC_SERIAL_DEBUG
-#if !defined(MAC_USE_SCC_A) && !defined(MAC_USE_SCC_B)
-#define MAC_USE_SCC_B
-#endif
+
+#ifdef SERIAL_DEBUG
+/* You may define either or both of these. */
+#define MAC_USE_SCC_A /* Modem port */
+#define MAC_USE_SCC_B /* Printer port */
+
 #define mac_scc_cha_b_ctrl_offset	0x0
 #define mac_scc_cha_a_ctrl_offset	0x2
 #define mac_scc_cha_b_data_offset	0x4
@@ -2940,7 +2933,7 @@
 	jra	7b
 8:
 #endif	/* MAC_USE_SCC_B */
-#endif	/* MAC_SERIAL_DEBUG */
+#endif	/* SERIAL_DEBUG */
 
 	jra	L(serial_init_done)
 L(serial_init_not_mac):
@@ -3011,7 +3004,7 @@
 #ifdef CONFIG_MAC
 	is_not_mac(5f)
 
-#ifdef MAC_SERIAL_DEBUG
+#ifdef SERIAL_DEBUG
 
 #ifdef MAC_USE_SCC_A
 	movel	%pc@(L(mac_sccbase)),%a1
@@ -3029,7 +3022,7 @@
 	moveb	%d0,%a1@(mac_scc_cha_b_data_offset)
 #endif	/* MAC_USE_SCC_B */
 
-#endif	/* MAC_SERIAL_DEBUG */
+#endif	/* SERIAL_DEBUG */
 
 	jra	L(serial_putc_done)
 5:
@@ -3248,33 +3241,39 @@
 
 #ifdef CONFIG_MAC
 /*
- *	mac_serial_print
+ *	mac_early_print
  *
  *	This routine takes its parameters on the stack.  It then
- *	turns around and calls the internal routine.  This routine
- *	is used until the Linux console driver initializes itself.
+ *	turns around and calls the internal routines.  This routine
+ *	is used by the boot console.
  *
  *	The calling parameters are:
- *		void mac_serial_print(const char *str);
+ *		void mac_early_print(const char *str, unsigned length);
  *
  *	This routine does NOT understand variable arguments only
  *	simple strings!
  */
-ENTRY(mac_serial_print)
-	moveml	%d0/%a0,%sp@-
-#if 1
-	move	%sr,%sp@-
+ENTRY(mac_early_print)
+	moveml	%d0/%d1/%a0,%sp@-
+	movew	%sr,%sp@-
 	ori	#0x0700,%sr
-#endif
-	movel	%sp@(10),%a0		/* fetch parameter */
+	movel	%sp@(18),%a0		/* fetch parameter */
+	movel	%sp@(22),%d1		/* fetch parameter */
 	jra	2f
-1:	serial_putc	%d0
-2:	moveb	%a0@+,%d0
-	jne	1b
-#if 1
-	move	%sp@+,%sr
+1:
+#ifdef CONSOLE
+	console_putc	%d0
 #endif
-	moveml	%sp@+,%d0/%a0
+#ifdef SERIAL_DEBUG
+	serial_putc	%d0
+#endif
+	subq	#1,%d1
+2:	jeq	3f
+	moveb	%a0@+,%d0
+	jne	1b
+3:
+	movew	%sp@+,%sr
+	moveml	%sp@+,%d0/%d1/%a0
 	rts
 #endif /* CONFIG_MAC */
 
@@ -3409,10 +3408,10 @@
 	 *		a0 = pointer to boot_info
 	 *		d7 = value of boot_info fields
 	 */
-	puts	"\nMacLinux\n\n"
+	puts	"\nMacLinux\n"
 
 #ifdef SERIAL_DEBUG
-	puts	" vidaddr:"
+	puts	"\n vidaddr:"
 	putn	%pc@(L(mac_videobase))		/* video addr. */
 
 	puts	"\n  _stext:"
@@ -3423,19 +3422,21 @@
 	lea	%pc@(_end),%a0
 	putn	%a0
 
-	puts	"\ncpuid:"
+	puts	"\n   cpuid:"
 	putn	%pc@(L(cputype))
-	putc	'\n'
 
-#ifdef MAC_SERIAL_DEBUG
+#  ifdef CONFIG_MAC
+	puts	"\n sccbase:"
 	putn	%pc@(L(mac_sccbase))
+#  endif
+#  ifdef MMU_PRINT
 	putc	'\n'
-#endif
-#  if defined(MMU_PRINT)
 	jbsr	mmu_print_machine_cpu_types
-#  endif /* MMU_PRINT */
+#  endif
 #endif /* SERIAL_DEBUG */
 
+	putc	'\n'
+
 func_return	console_put_stats
 
 #ifdef CONSOLE_PENGUIN
@@ -3896,11 +3897,11 @@
 	.long	0
 L(mac_rowbytes):
 	.long	0
-#ifdef MAC_SERIAL_DEBUG
+#ifdef SERIAL_DEBUG
 L(mac_sccbase):
 	.long	0
-#endif /* MAC_SERIAL_DEBUG */
 #endif
+#endif /* CONFIG_MAC */
 
 #if defined (CONFIG_APOLLO)
 LSRB0        = 0x10412
diff --git a/arch/m68k/kernel/init_task.c b/arch/m68k/kernel/init_task.c
index cbf9dc3..c744cfc 100644
--- a/arch/m68k/kernel/init_task.c
+++ b/arch/m68k/kernel/init_task.c
@@ -19,7 +19,6 @@
  *
  * All other task structs will be allocated on slabs in fork.c
  */
-__asm__(".align 4");
 struct task_struct init_task = INIT_TASK(init_task);
 
 EXPORT_SYMBOL(init_task);
@@ -27,7 +26,7 @@
 /*
  * Initial thread structure.
  *
- * We need to make sure that this is 8192-byte aligned due to the
+ * We need to make sure that this is THREAD size aligned due to the
  * way process stacks are handled. This is done by having a special
  * "init_task" linker map entry..
  */
diff --git a/arch/m68k/kernel/m68k_ksyms.c b/arch/m68k/kernel/m68k_ksyms.c
index 1b7a14d..774c1bd 100644
--- a/arch/m68k/kernel/m68k_ksyms.c
+++ b/arch/m68k/kernel/m68k_ksyms.c
@@ -14,7 +14,7 @@
 EXPORT_SYMBOL(__lshrdi3);
 EXPORT_SYMBOL(__muldi3);
 
-#if defined(CONFIG_M68000) || defined(CONFIG_COLDFIRE)
+#if defined(CONFIG_CPU_HAS_NO_MULDIV64)
 /*
  * Simpler 68k and ColdFire parts also need a few other gcc functions.
  */
diff --git a/arch/m68k/kernel/process_mm.c b/arch/m68k/kernel/process_mm.c
index 1bc223a..125f34e 100644
--- a/arch/m68k/kernel/process_mm.c
+++ b/arch/m68k/kernel/process_mm.c
@@ -33,22 +33,6 @@
 #include <asm/setup.h>
 #include <asm/pgtable.h>
 
-/*
- * Initial task/thread structure. Make this a per-architecture thing,
- * because different architectures tend to have different
- * alignment requirements and potentially different initial
- * setup.
- */
-static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
-static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-union thread_union init_thread_union __init_task_data
-	__attribute__((aligned(THREAD_SIZE))) =
-		{ INIT_THREAD_INFO(init_task) };
-
-/* initial task structure */
-struct task_struct init_task = INIT_TASK(init_task);
-
-EXPORT_SYMBOL(init_task);
 
 asmlinkage void ret_from_fork(void);
 
@@ -188,9 +172,7 @@
 
 	current->thread.fs = __USER_DS;
 	if (!FPU_IS_EMU)
-		asm volatile (".chip 68k/68881\n\t"
-			      "frestore %0@\n\t"
-			      ".chip 68k" : : "a" (&zero));
+		asm volatile ("frestore %0@" : : "a" (&zero) : "memory");
 }
 
 /*
@@ -264,11 +246,28 @@
 		/* Copy the current fpu state */
 		asm volatile ("fsave %0" : : "m" (p->thread.fpstate[0]) : "memory");
 
-		if (!CPU_IS_060 ? p->thread.fpstate[0] : p->thread.fpstate[2])
-		  asm volatile ("fmovemx %/fp0-%/fp7,%0\n\t"
-				"fmoveml %/fpiar/%/fpcr/%/fpsr,%1"
-				: : "m" (p->thread.fp[0]), "m" (p->thread.fpcntl[0])
-				: "memory");
+		if (!CPU_IS_060 ? p->thread.fpstate[0] : p->thread.fpstate[2]) {
+			if (CPU_IS_COLDFIRE) {
+				asm volatile ("fmovemd %/fp0-%/fp7,%0\n\t"
+					      "fmovel %/fpiar,%1\n\t"
+					      "fmovel %/fpcr,%2\n\t"
+					      "fmovel %/fpsr,%3"
+					      :
+					      : "m" (p->thread.fp[0]),
+						"m" (p->thread.fpcntl[0]),
+						"m" (p->thread.fpcntl[1]),
+						"m" (p->thread.fpcntl[2])
+					      : "memory");
+			} else {
+				asm volatile ("fmovemx %/fp0-%/fp7,%0\n\t"
+					      "fmoveml %/fpiar/%/fpcr/%/fpsr,%1"
+					      :
+					      : "m" (p->thread.fp[0]),
+						"m" (p->thread.fpcntl[0])
+					      : "memory");
+			}
+		}
+
 		/* Restore the state in case the fpu was busy */
 		asm volatile ("frestore %0" : : "m" (p->thread.fpstate[0]));
 	}
@@ -301,12 +300,28 @@
 	if (!CPU_IS_060 ? !fpustate[0] : !fpustate[2])
 		return 0;
 
-	asm volatile ("fmovem %/fpiar/%/fpcr/%/fpsr,%0"
-		:: "m" (fpu->fpcntl[0])
-		: "memory");
-	asm volatile ("fmovemx %/fp0-%/fp7,%0"
-		:: "m" (fpu->fpregs[0])
-		: "memory");
+	if (CPU_IS_COLDFIRE) {
+		asm volatile ("fmovel %/fpiar,%0\n\t"
+			      "fmovel %/fpcr,%1\n\t"
+			      "fmovel %/fpsr,%2\n\t"
+			      "fmovemd %/fp0-%/fp7,%3"
+			      :
+			      : "m" (fpu->fpcntl[0]),
+				"m" (fpu->fpcntl[1]),
+				"m" (fpu->fpcntl[2]),
+				"m" (fpu->fpregs[0])
+			      : "memory");
+	} else {
+		asm volatile ("fmovem %/fpiar/%/fpcr/%/fpsr,%0"
+			      :
+			      : "m" (fpu->fpcntl[0])
+			      : "memory");
+		asm volatile ("fmovemx %/fp0-%/fp7,%0"
+			      :
+			      : "m" (fpu->fpregs[0])
+			      : "memory");
+	}
+
 	return 1;
 }
 EXPORT_SYMBOL(dump_fpu);
diff --git a/arch/m68k/kernel/ptrace_mm.c b/arch/m68k/kernel/ptrace_mm.c
index 0b25268..7bc999b 100644
--- a/arch/m68k/kernel/ptrace_mm.c
+++ b/arch/m68k/kernel/ptrace_mm.c
@@ -18,6 +18,7 @@
 #include <linux/ptrace.h>
 #include <linux/user.h>
 #include <linux/signal.h>
+#include <linux/tracehook.h>
 
 #include <asm/uaccess.h>
 #include <asm/page.h>
@@ -275,3 +276,20 @@
 		current->exit_code = 0;
 	}
 }
+
+#ifdef CONFIG_COLDFIRE
+asmlinkage int syscall_trace_enter(void)
+{
+	int ret = 0;
+
+	if (test_thread_flag(TIF_SYSCALL_TRACE))
+		ret = tracehook_report_syscall_entry(task_pt_regs(current));
+	return ret;
+}
+
+asmlinkage void syscall_trace_leave(void)
+{
+	if (test_thread_flag(TIF_SYSCALL_TRACE))
+		tracehook_report_syscall_exit(task_pt_regs(current), 0);
+}
+#endif /* CONFIG_COLDFIRE */
diff --git a/arch/m68k/kernel/setup_mm.c b/arch/m68k/kernel/setup_mm.c
index c3b4506..d872ce4 100644
--- a/arch/m68k/kernel/setup_mm.c
+++ b/arch/m68k/kernel/setup_mm.c
@@ -221,7 +221,8 @@
 #endif
 
 	/* The bootinfo is located right after the kernel bss */
-	m68k_parse_bootinfo((const struct bi_record *)_end);
+	if (!CPU_IS_COLDFIRE)
+		m68k_parse_bootinfo((const struct bi_record *)_end);
 
 	if (CPU_IS_040)
 		m68k_is040or060 = 4;
@@ -235,7 +236,7 @@
 	 *  with them, we should add a test to check_bugs() below] */
 #ifndef CONFIG_M68KFPU_EMU_ONLY
 	/* clear the fpu if we have one */
-	if (m68k_fputype & (FPU_68881|FPU_68882|FPU_68040|FPU_68060)) {
+	if (m68k_fputype & (FPU_68881|FPU_68882|FPU_68040|FPU_68060|FPU_COLDFIRE)) {
 		volatile int zero = 0;
 		asm volatile ("frestore %0" : : "m" (zero));
 	}
@@ -258,6 +259,10 @@
 	init_mm.end_data = (unsigned long)_edata;
 	init_mm.brk = (unsigned long)_end;
 
+#if defined(CONFIG_BOOTPARAM)
+	strncpy(m68k_command_line, CONFIG_BOOTPARAM_STRING, CL_SIZE);
+	m68k_command_line[CL_SIZE - 1] = 0;
+#endif /* CONFIG_BOOTPARAM */
 	*cmdline_p = m68k_command_line;
 	memcpy(boot_command_line, *cmdline_p, CL_SIZE);
 
@@ -323,6 +328,11 @@
 		config_sun3x();
 		break;
 #endif
+#ifdef CONFIG_COLDFIRE
+	case MACH_M54XX:
+		config_BSP(NULL, 0);
+		break;
+#endif
 	default:
 		panic("No configuration setup");
 	}
@@ -384,6 +394,7 @@
 #define LOOP_CYCLES_68030	(8)
 #define LOOP_CYCLES_68040	(3)
 #define LOOP_CYCLES_68060	(1)
+#define LOOP_CYCLES_COLDFIRE	(2)
 
 	if (CPU_IS_020) {
 		cpu = "68020";
@@ -397,6 +408,9 @@
 	} else if (CPU_IS_060) {
 		cpu = "68060";
 		clockfactor = LOOP_CYCLES_68060;
+	} else if (CPU_IS_COLDFIRE) {
+		cpu = "ColdFire";
+		clockfactor = LOOP_CYCLES_COLDFIRE;
 	} else {
 		cpu = "680x0";
 		clockfactor = 0;
@@ -415,6 +429,8 @@
 		fpu = "68060";
 	else if (m68k_fputype & FPU_SUNFPA)
 		fpu = "Sun FPA";
+	else if (m68k_fputype & FPU_COLDFIRE)
+		fpu = "ColdFire";
 	else
 		fpu = "none";
 #endif
@@ -431,6 +447,8 @@
 		mmu = "Sun-3";
 	else if (m68k_mmutype & MMU_APOLLO)
 		mmu = "Apollo";
+	else if (m68k_mmutype & MMU_COLDFIRE)
+		mmu = "ColdFire";
 	else
 		mmu = "unknown";
 
diff --git a/arch/m68k/kernel/setup_no.c b/arch/m68k/kernel/setup_no.c
index 2ed8c0f..ca3df0d 100644
--- a/arch/m68k/kernel/setup_no.c
+++ b/arch/m68k/kernel/setup_no.c
@@ -47,7 +47,6 @@
 char __initdata command_line[COMMAND_LINE_SIZE];
 
 /* machine dependent timer functions */
-void (*mach_gettod)(int*, int*, int*, int*, int*, int*);
 int (*mach_set_clock_mmss)(unsigned long);
 
 /* machine dependent reboot functions */
diff --git a/arch/m68k/kernel/signal_mm.c b/arch/m68k/kernel/signal_mm.c
index a0afc23..cb856f9 100644
--- a/arch/m68k/kernel/signal_mm.c
+++ b/arch/m68k/kernel/signal_mm.c
@@ -56,7 +56,11 @@
   [1]	= -1, /* sizeof(((struct frame *)0)->un.fmt1), */
   [2]	= sizeof(((struct frame *)0)->un.fmt2),
   [3]	= sizeof(((struct frame *)0)->un.fmt3),
+#ifdef CONFIG_COLDFIRE
+  [4]	= 0,
+#else
   [4]	= sizeof(((struct frame *)0)->un.fmt4),
+#endif
   [5]	= -1, /* sizeof(((struct frame *)0)->un.fmt5), */
   [6]	= -1, /* sizeof(((struct frame *)0)->un.fmt6), */
   [7]	= sizeof(((struct frame *)0)->un.fmt7),
@@ -84,7 +88,11 @@
 	regs->stkadj = frame_extra_sizes[regs->format];
 	tregs =	(struct pt_regs *)((long)regs + regs->stkadj);
 	tregs->vector = regs->vector;
+#ifdef CONFIG_COLDFIRE
+	tregs->format = 4;
+#else
 	tregs->format = 0;
+#endif
 	tregs->pc = fixup->fixup;
 	tregs->sr = regs->sr;
 
@@ -195,7 +203,8 @@
 
 	if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
 	    /* Verify the frame format.  */
-	    if (!CPU_IS_060 && (sc->sc_fpstate[0] != fpu_version))
+	    if (!(CPU_IS_060 || CPU_IS_COLDFIRE) &&
+		 (sc->sc_fpstate[0] != fpu_version))
 		goto out;
 	    if (CPU_IS_020_OR_030) {
 		if (m68k_fputype & FPU_68881 &&
@@ -214,19 +223,43 @@
                       sc->sc_fpstate[3] == 0x60 ||
 		      sc->sc_fpstate[3] == 0xe0))
 		    goto out;
+	    } else if (CPU_IS_COLDFIRE) {
+		if (!(sc->sc_fpstate[0] == 0x00 ||
+		      sc->sc_fpstate[0] == 0x05 ||
+		      sc->sc_fpstate[0] == 0xe5))
+		    goto out;
 	    } else
 		goto out;
 
-	    __asm__ volatile (".chip 68k/68881\n\t"
-			      "fmovemx %0,%%fp0-%%fp1\n\t"
-			      "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
-			      ".chip 68k"
-			      : /* no outputs */
-			      : "m" (*sc->sc_fpregs), "m" (*sc->sc_fpcntl));
+	    if (CPU_IS_COLDFIRE) {
+		__asm__ volatile ("fmovemd %0,%%fp0-%%fp1\n\t"
+				  "fmovel %1,%%fpcr\n\t"
+				  "fmovel %2,%%fpsr\n\t"
+				  "fmovel %3,%%fpiar"
+				  : /* no outputs */
+				  : "m" (sc->sc_fpregs[0]),
+				    "m" (sc->sc_fpcntl[0]),
+				    "m" (sc->sc_fpcntl[1]),
+				    "m" (sc->sc_fpcntl[2]));
+	    } else {
+		__asm__ volatile (".chip 68k/68881\n\t"
+				  "fmovemx %0,%%fp0-%%fp1\n\t"
+				  "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
+				  ".chip 68k"
+				  : /* no outputs */
+				  : "m" (*sc->sc_fpregs),
+				    "m" (*sc->sc_fpcntl));
+	    }
 	}
-	__asm__ volatile (".chip 68k/68881\n\t"
-			  "frestore %0\n\t"
-			  ".chip 68k" : : "m" (*sc->sc_fpstate));
+
+	if (CPU_IS_COLDFIRE) {
+		__asm__ volatile ("frestore %0" : : "m" (*sc->sc_fpstate));
+	} else {
+		__asm__ volatile (".chip 68k/68881\n\t"
+				  "frestore %0\n\t"
+				  ".chip 68k"
+				  : : "m" (*sc->sc_fpstate));
+	}
 	err = 0;
 
 out:
@@ -241,7 +274,7 @@
 static inline int rt_restore_fpu_state(struct ucontext __user *uc)
 {
 	unsigned char fpstate[FPCONTEXT_SIZE];
-	int context_size = CPU_IS_060 ? 8 : 0;
+	int context_size = CPU_IS_060 ? 8 : (CPU_IS_COLDFIRE ? 12 : 0);
 	fpregset_t fpregs;
 	int err = 1;
 
@@ -260,10 +293,11 @@
 	if (__get_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate))
 		goto out;
 	if (CPU_IS_060 ? fpstate[2] : fpstate[0]) {
-		if (!CPU_IS_060)
+		if (!(CPU_IS_060 || CPU_IS_COLDFIRE))
 			context_size = fpstate[1];
 		/* Verify the frame format.  */
-		if (!CPU_IS_060 && (fpstate[0] != fpu_version))
+		if (!(CPU_IS_060 || CPU_IS_COLDFIRE) &&
+		     (fpstate[0] != fpu_version))
 			goto out;
 		if (CPU_IS_020_OR_030) {
 			if (m68k_fputype & FPU_68881 &&
@@ -282,26 +316,50 @@
 			      fpstate[3] == 0x60 ||
 			      fpstate[3] == 0xe0))
 				goto out;
+		} else if (CPU_IS_COLDFIRE) {
+			if (!(fpstate[3] == 0x00 ||
+			      fpstate[3] == 0x05 ||
+			      fpstate[3] == 0xe5))
+				goto out;
 		} else
 			goto out;
 		if (__copy_from_user(&fpregs, &uc->uc_mcontext.fpregs,
 				     sizeof(fpregs)))
 			goto out;
-		__asm__ volatile (".chip 68k/68881\n\t"
-				  "fmovemx %0,%%fp0-%%fp7\n\t"
-				  "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
-				  ".chip 68k"
-				  : /* no outputs */
-				  : "m" (*fpregs.f_fpregs),
-				    "m" (*fpregs.f_fpcntl));
+
+		if (CPU_IS_COLDFIRE) {
+			__asm__ volatile ("fmovemd %0,%%fp0-%%fp7\n\t"
+					  "fmovel %1,%%fpcr\n\t"
+					  "fmovel %2,%%fpsr\n\t"
+					  "fmovel %3,%%fpiar"
+					  : /* no outputs */
+					  : "m" (fpregs.f_fpregs[0]),
+					    "m" (fpregs.f_fpcntl[0]),
+					    "m" (fpregs.f_fpcntl[1]),
+					    "m" (fpregs.f_fpcntl[2]));
+		} else {
+			__asm__ volatile (".chip 68k/68881\n\t"
+					  "fmovemx %0,%%fp0-%%fp7\n\t"
+					  "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
+					  ".chip 68k"
+					  : /* no outputs */
+					  : "m" (*fpregs.f_fpregs),
+					    "m" (*fpregs.f_fpcntl));
+		}
 	}
 	if (context_size &&
 	    __copy_from_user(fpstate + 4, (long __user *)&uc->uc_fpstate + 1,
 			     context_size))
 		goto out;
-	__asm__ volatile (".chip 68k/68881\n\t"
-			  "frestore %0\n\t"
-			  ".chip 68k" : : "m" (*fpstate));
+
+	if (CPU_IS_COLDFIRE) {
+		__asm__ volatile ("frestore %0" : : "m" (*fpstate));
+	} else {
+		__asm__ volatile (".chip 68k/68881\n\t"
+				  "frestore %0\n\t"
+				  ".chip 68k"
+				  : : "m" (*fpstate));
+	}
 	err = 0;
 
 out:
@@ -336,8 +394,12 @@
 		regs->format = formatvec >> 12;
 		regs->vector = formatvec & 0xfff;
 #define frame_offset (sizeof(struct pt_regs)+sizeof(struct switch_stack))
-		__asm__ __volatile__
-			("   movel %0,%/a0\n\t"
+		__asm__ __volatile__ (
+#ifdef CONFIG_COLDFIRE
+			 "   movel %0,%/sp\n\t"
+			 "   bra ret_from_signal\n"
+#else
+			 "   movel %0,%/a0\n\t"
 			 "   subl %1,%/a0\n\t"     /* make room on stack */
 			 "   movel %/a0,%/sp\n\t"  /* set stack pointer */
 			 /* move switch_stack and pt_regs */
@@ -350,6 +412,7 @@
 			 "2: movel %4@+,%/a0@+\n\t"
 			 "   dbra %1,2b\n\t"
 			 "   bral ret_from_signal\n"
+#endif
 			 : /* no outputs, it doesn't ever return */
 			 : "a" (sw), "d" (fsize), "d" (frame_offset/4-1),
 			   "n" (frame_offset), "a" (buf + fsize/4)
@@ -516,10 +579,15 @@
 		return;
 	}
 
-	__asm__ volatile (".chip 68k/68881\n\t"
-			  "fsave %0\n\t"
-			  ".chip 68k"
-			  : : "m" (*sc->sc_fpstate) : "memory");
+	if (CPU_IS_COLDFIRE) {
+		__asm__ volatile ("fsave %0"
+				  : : "m" (*sc->sc_fpstate) : "memory");
+	} else {
+		__asm__ volatile (".chip 68k/68881\n\t"
+				  "fsave %0\n\t"
+				  ".chip 68k"
+				  : : "m" (*sc->sc_fpstate) : "memory");
+	}
 
 	if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
 		fpu_version = sc->sc_fpstate[0];
@@ -530,21 +598,35 @@
 			if (*(unsigned short *) sc->sc_fpstate == 0x1f38)
 				sc->sc_fpstate[0x38] |= 1 << 3;
 		}
-		__asm__ volatile (".chip 68k/68881\n\t"
-				  "fmovemx %%fp0-%%fp1,%0\n\t"
-				  "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
-				  ".chip 68k"
-				  : "=m" (*sc->sc_fpregs),
-				    "=m" (*sc->sc_fpcntl)
-				  : /* no inputs */
-				  : "memory");
+
+		if (CPU_IS_COLDFIRE) {
+			__asm__ volatile ("fmovemd %%fp0-%%fp1,%0\n\t"
+					  "fmovel %%fpcr,%1\n\t"
+					  "fmovel %%fpsr,%2\n\t"
+					  "fmovel %%fpiar,%3"
+					  : "=m" (sc->sc_fpregs[0]),
+					    "=m" (sc->sc_fpcntl[0]),
+					    "=m" (sc->sc_fpcntl[1]),
+					    "=m" (sc->sc_fpcntl[2])
+					  : /* no inputs */
+					  : "memory");
+		} else {
+			__asm__ volatile (".chip 68k/68881\n\t"
+					  "fmovemx %%fp0-%%fp1,%0\n\t"
+					  "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
+					  ".chip 68k"
+					  : "=m" (*sc->sc_fpregs),
+					    "=m" (*sc->sc_fpcntl)
+					  : /* no inputs */
+					  : "memory");
+		}
 	}
 }
 
 static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *regs)
 {
 	unsigned char fpstate[FPCONTEXT_SIZE];
-	int context_size = CPU_IS_060 ? 8 : 0;
+	int context_size = CPU_IS_060 ? 8 : (CPU_IS_COLDFIRE ? 12 : 0);
 	int err = 0;
 
 	if (FPU_IS_EMU) {
@@ -557,15 +639,19 @@
 		return err;
 	}
 
-	__asm__ volatile (".chip 68k/68881\n\t"
-			  "fsave %0\n\t"
-			  ".chip 68k"
-			  : : "m" (*fpstate) : "memory");
+	if (CPU_IS_COLDFIRE) {
+		__asm__ volatile ("fsave %0" : : "m" (*fpstate) : "memory");
+	} else {
+		__asm__ volatile (".chip 68k/68881\n\t"
+				  "fsave %0\n\t"
+				  ".chip 68k"
+				  : : "m" (*fpstate) : "memory");
+	}
 
 	err |= __put_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate);
 	if (CPU_IS_060 ? fpstate[2] : fpstate[0]) {
 		fpregset_t fpregs;
-		if (!CPU_IS_060)
+		if (!(CPU_IS_060 || CPU_IS_COLDFIRE))
 			context_size = fpstate[1];
 		fpu_version = fpstate[0];
 		if (CPU_IS_020_OR_030 &&
@@ -575,14 +661,27 @@
 			if (*(unsigned short *) fpstate == 0x1f38)
 				fpstate[0x38] |= 1 << 3;
 		}
-		__asm__ volatile (".chip 68k/68881\n\t"
-				  "fmovemx %%fp0-%%fp7,%0\n\t"
-				  "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
-				  ".chip 68k"
-				  : "=m" (*fpregs.f_fpregs),
-				    "=m" (*fpregs.f_fpcntl)
-				  : /* no inputs */
-				  : "memory");
+		if (CPU_IS_COLDFIRE) {
+			__asm__ volatile ("fmovemd %%fp0-%%fp7,%0\n\t"
+					  "fmovel %%fpcr,%1\n\t"
+					  "fmovel %%fpsr,%2\n\t"
+					  "fmovel %%fpiar,%3"
+					  : "=m" (fpregs.f_fpregs[0]),
+					    "=m" (fpregs.f_fpcntl[0]),
+					    "=m" (fpregs.f_fpcntl[1]),
+					    "=m" (fpregs.f_fpcntl[2])
+					  : /* no inputs */
+					  : "memory");
+		} else {
+			__asm__ volatile (".chip 68k/68881\n\t"
+					  "fmovemx %%fp0-%%fp7,%0\n\t"
+					  "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
+					  ".chip 68k"
+					  : "=m" (*fpregs.f_fpregs),
+					    "=m" (*fpregs.f_fpcntl)
+					  : /* no inputs */
+					  : "memory");
+		}
 		err |= copy_to_user(&uc->uc_mcontext.fpregs, &fpregs,
 				    sizeof(fpregs));
 	}
@@ -679,8 +778,7 @@
 				      "cpushl %%bc,(%0)\n\t"
 				      ".chip 68k"
 				      : : "a" (temp));
-	}
-	else {
+	} else if (!CPU_IS_COLDFIRE) {
 		/*
 		 * 68030/68020 have no writeback cache;
 		 * still need to clear icache.
diff --git a/arch/m68k/kernel/time.c b/arch/m68k/kernel/time.c
index a5cf40c..75ab79b 100644
--- a/arch/m68k/kernel/time.c
+++ b/arch/m68k/kernel/time.c
@@ -1,4 +1,4 @@
-#ifdef CONFIG_MMU
+#if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE)
 #include "time_mm.c"
 #else
 #include "time_no.c"
diff --git a/arch/m68k/kernel/time_no.c b/arch/m68k/kernel/time_no.c
index 6623909..3ef0f77 100644
--- a/arch/m68k/kernel/time_no.c
+++ b/arch/m68k/kernel/time_no.c
@@ -26,6 +26,9 @@
 
 #define	TICK_SIZE (tick_nsec / 1000)
 
+/* machine dependent timer functions */
+void (*mach_gettod)(int*, int*, int*, int*, int*, int*);
+
 static inline int set_rtc_mmss(unsigned long nowtime)
 {
 	if (mach_set_clock_mmss)
diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c
index 89362f2..a76452c 100644
--- a/arch/m68k/kernel/traps.c
+++ b/arch/m68k/kernel/traps.c
@@ -706,6 +706,88 @@
 #endif /* CPU_M68020_OR_M68030 */
 #endif /* !CONFIG_SUN3 */
 
+#if defined(CONFIG_COLDFIRE) && defined(CONFIG_MMU)
+#include <asm/mcfmmu.h>
+
+/*
+ *	The following table converts the FS encoding of a ColdFire
+ *	exception stack frame into the error_code value needed by
+ *	do_fault.
+*/
+static const unsigned char fs_err_code[] = {
+	0,  /* 0000 */
+	0,  /* 0001 */
+	0,  /* 0010 */
+	0,  /* 0011 */
+	1,  /* 0100 */
+	0,  /* 0101 */
+	0,  /* 0110 */
+	0,  /* 0111 */
+	2,  /* 1000 */
+	3,  /* 1001 */
+	2,  /* 1010 */
+	0,  /* 1011 */
+	1,  /* 1100 */
+	1,  /* 1101 */
+	0,  /* 1110 */
+	0   /* 1111 */
+};
+
+static inline void access_errorcf(unsigned int fs, struct frame *fp)
+{
+	unsigned long mmusr, addr;
+	unsigned int err_code;
+	int need_page_fault;
+
+	mmusr = mmu_read(MMUSR);
+	addr = mmu_read(MMUAR);
+
+	/*
+	 * error_code:
+	 *	bit 0 == 0 means no page found, 1 means protection fault
+	 *	bit 1 == 0 means read, 1 means write
+	 */
+	switch (fs) {
+	case  5:  /* 0101 TLB opword X miss */
+		need_page_fault = cf_tlb_miss(&fp->ptregs, 0, 0, 0);
+		addr = fp->ptregs.pc;
+		break;
+	case  6:  /* 0110 TLB extension word X miss */
+		need_page_fault = cf_tlb_miss(&fp->ptregs, 0, 0, 1);
+		addr = fp->ptregs.pc + sizeof(long);
+		break;
+	case 10:  /* 1010 TLB W miss */
+		need_page_fault = cf_tlb_miss(&fp->ptregs, 1, 1, 0);
+		break;
+	case 14: /* 1110 TLB R miss */
+		need_page_fault = cf_tlb_miss(&fp->ptregs, 0, 1, 0);
+		break;
+	default:
+		/* 0000 Normal  */
+		/* 0001 Reserved */
+		/* 0010 Interrupt during debug service routine */
+		/* 0011 Reserved */
+		/* 0100 X Protection */
+		/* 0111 IFP in emulator mode */
+		/* 1000 W Protection*/
+		/* 1001 Write error*/
+		/* 1011 Reserved*/
+		/* 1100 R Protection*/
+		/* 1101 R Protection*/
+		/* 1111 OEP in emulator mode*/
+		need_page_fault = 1;
+		break;
+	}
+
+	if (need_page_fault) {
+		err_code = fs_err_code[fs];
+		if ((fs == 13) && (mmusr & MMUSR_WF)) /* rd-mod-wr access */
+			err_code |= 2; /* bit1 - write, bit0 - protection */
+		do_page_fault(&fp->ptregs, addr, err_code);
+	}
+}
+#endif /* CONFIG_COLDFIRE CONFIG_MMU */
+
 asmlinkage void buserr_c(struct frame *fp)
 {
 	/* Only set esp0 if coming from user mode */
@@ -716,6 +798,28 @@
 	printk ("*** Bus Error *** Format is %x\n", fp->ptregs.format);
 #endif
 
+#if defined(CONFIG_COLDFIRE) && defined(CONFIG_MMU)
+	if (CPU_IS_COLDFIRE) {
+		unsigned int fs;
+		fs = (fp->ptregs.vector & 0x3) |
+			((fp->ptregs.vector & 0xc00) >> 8);
+		switch (fs) {
+		case 0x5:
+		case 0x6:
+		case 0x7:
+		case 0x9:
+		case 0xa:
+		case 0xd:
+		case 0xe:
+		case 0xf:
+			access_errorcf(fs, fp);
+			return;
+		default:
+			break;
+		}
+	}
+#endif /* CONFIG_COLDFIRE && CONFIG_MMU */
+
 	switch (fp->ptregs.format) {
 #if defined (CONFIG_M68060)
 	case 4:				/* 68060 access error */
diff --git a/arch/m68k/kernel/vmlinux.lds_no.S b/arch/m68k/kernel/vmlinux-nommu.lds
similarity index 96%
rename from arch/m68k/kernel/vmlinux.lds_no.S
rename to arch/m68k/kernel/vmlinux-nommu.lds
index 4e23893..8e66ccb 100644
--- a/arch/m68k/kernel/vmlinux.lds_no.S
+++ b/arch/m68k/kernel/vmlinux-nommu.lds
@@ -69,6 +69,7 @@
 		SCHED_TEXT
 		LOCK_TEXT
 		*(.text..lock)
+		*(.fixup)
 
 		. = ALIGN(16);          /* Exception table              */
 		__start___ex_table = .;
@@ -161,6 +162,13 @@
 		_edata = . ;
 	} > DATA
 
+	.m68k_fixup : {
+		__start_fixup = .;
+		*(.m68k_fixup)
+		__stop_fixup = .;
+	} > DATA
+	NOTES > DATA
+
 	.init.text : {
 		. = ALIGN(PAGE_SIZE);
 		__init_begin = .;
diff --git a/arch/m68k/kernel/vmlinux-std.lds b/arch/m68k/kernel/vmlinux-std.lds
index d099359..63407c8 100644
--- a/arch/m68k/kernel/vmlinux-std.lds
+++ b/arch/m68k/kernel/vmlinux-std.lds
@@ -31,7 +31,9 @@
 
   RW_DATA_SECTION(16, PAGE_SIZE, THREAD_SIZE)
 
+  _sbss = .;
   BSS_SECTION(0, 0, 0)
+  _ebss = .;
 
   _edata = .;			/* End of data section */
 
diff --git a/arch/m68k/kernel/vmlinux-sun3.lds b/arch/m68k/kernel/vmlinux-sun3.lds
index 8080469..ad0f46d 100644
--- a/arch/m68k/kernel/vmlinux-sun3.lds
+++ b/arch/m68k/kernel/vmlinux-sun3.lds
@@ -44,7 +44,9 @@
 	. = ALIGN(PAGE_SIZE);
 	__init_end = .;
 
+  _sbss = .;
   BSS_SECTION(0, 0, 0)
+  _ebss = .;
 
   _end = . ;
 
diff --git a/arch/m68k/kernel/vmlinux.lds.S b/arch/m68k/kernel/vmlinux.lds.S
index 030dabf..69ec796 100644
--- a/arch/m68k/kernel/vmlinux.lds.S
+++ b/arch/m68k/kernel/vmlinux.lds.S
@@ -1,5 +1,14 @@
-#ifdef CONFIG_MMU
-#include "vmlinux.lds_mm.S"
+#if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE)
+PHDRS
+{
+  text PT_LOAD FILEHDR PHDRS FLAGS (7);
+  data PT_LOAD FLAGS (7);
+}
+#ifdef CONFIG_SUN3
+#include "vmlinux-sun3.lds"
 #else
-#include "vmlinux.lds_no.S"
+#include "vmlinux-std.lds"
+#endif
+#else
+#include "vmlinux-nommu.lds"
 #endif
diff --git a/arch/m68k/kernel/vmlinux.lds_mm.S b/arch/m68k/kernel/vmlinux.lds_mm.S
deleted file mode 100644
index 99ba315..0000000
--- a/arch/m68k/kernel/vmlinux.lds_mm.S
+++ /dev/null
@@ -1,10 +0,0 @@
-PHDRS
-{
-  text PT_LOAD FILEHDR PHDRS FLAGS (7);
-  data PT_LOAD FLAGS (7);
-}
-#ifdef CONFIG_SUN3
-#include "vmlinux-sun3.lds"
-#else
-#include "vmlinux-std.lds"
-#endif
diff --git a/arch/m68k/lib/Makefile b/arch/m68k/lib/Makefile
index 1a1bd90..a9d782d 100644
--- a/arch/m68k/lib/Makefile
+++ b/arch/m68k/lib/Makefile
@@ -6,9 +6,11 @@
 lib-y	:= ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \
 	   memcpy.o memset.o memmove.o
 
-ifdef CONFIG_MMU
-lib-y	+= string.o uaccess.o checksum_mm.o
-else
-lib-y	+= mulsi3.o divsi3.o udivsi3.o modsi3.o umodsi3.o checksum_no.o
+lib-$(CONFIG_MMU) += string.o uaccess.o
+lib-$(CONFIG_CPU_HAS_NO_MULDIV64) += mulsi3.o divsi3.o udivsi3.o
+lib-$(CONFIG_CPU_HAS_NO_MULDIV64) += modsi3.o umodsi3.o
+
+ifndef CONFIG_GENERIC_CSUM
+lib-y	+= checksum.o
 endif
 
diff --git a/arch/m68k/lib/checksum_mm.c b/arch/m68k/lib/checksum.c
similarity index 100%
rename from arch/m68k/lib/checksum_mm.c
rename to arch/m68k/lib/checksum.c
diff --git a/arch/m68k/lib/checksum_no.c b/arch/m68k/lib/checksum_no.c
deleted file mode 100644
index e4c6354..0000000
--- a/arch/m68k/lib/checksum_no.c
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
- * INET		An implementation of the TCP/IP protocol suite for the LINUX
- *		operating system.  INET is implemented using the  BSD Socket
- *		interface as the means of communication with the user level.
- *
- *		IP/TCP/UDP checksumming routines
- *
- * Authors:	Jorge Cwik, <jorge@laser.satlink.net>
- *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
- *		Tom May, <ftom@netcom.com>
- *		Andreas Schwab, <schwab@issan.informatik.uni-dortmund.de>
- *		Lots of code moved from tcp.c and ip.c; see those files
- *		for more names.
- *
- * 03/02/96	Jes Sorensen, Andreas Schwab, Roman Hodek:
- *		Fixed some nasty bugs, causing some horrible crashes.
- *		A: At some points, the sum (%0) was used as
- *		length-counter instead of the length counter
- *		(%1). Thanks to Roman Hodek for pointing this out.
- *		B: GCC seems to mess up if one uses too many
- *		data-registers to hold input values and one tries to
- *		specify d0 and d1 as scratch registers. Letting gcc choose these
- *      registers itself solves the problem.
- *
- *		This program is free software; you can redistribute it and/or
- *		modify it under the terms of the GNU General Public License
- *		as published by the Free Software Foundation; either version
- *		2 of the License, or (at your option) any later version.
- */
- 
-/* Revised by Kenneth Albanowski for m68knommu. Basic problem: unaligned access kills, so most
-   of the assembly has to go. */
-
-#include <linux/module.h>
-#include <net/checksum.h>
-
-static inline unsigned short from32to16(unsigned long x)
-{
-	/* add up 16-bit and 16-bit for 16+c bit */
-	x = (x & 0xffff) + (x >> 16);
-	/* add up carry.. */
-	x = (x & 0xffff) + (x >> 16);
-	return x;
-}
-
-static unsigned long do_csum(const unsigned char * buff, int len)
-{
-	int odd, count;
-	unsigned long result = 0;
-
-	if (len <= 0)
-		goto out;
-	odd = 1 & (unsigned long) buff;
-	if (odd) {
-		result = *buff;
-		len--;
-		buff++;
-	}
-	count = len >> 1;		/* nr of 16-bit words.. */
-	if (count) {
-		if (2 & (unsigned long) buff) {
-			result += *(unsigned short *) buff;
-			count--;
-			len -= 2;
-			buff += 2;
-		}
-		count >>= 1;		/* nr of 32-bit words.. */
-		if (count) {
-		        unsigned long carry = 0;
-			do {
-				unsigned long w = *(unsigned long *) buff;
-				count--;
-				buff += 4;
-				result += carry;
-				result += w;
-				carry = (w > result);
-			} while (count);
-			result += carry;
-			result = (result & 0xffff) + (result >> 16);
-		}
-		if (len & 2) {
-			result += *(unsigned short *) buff;
-			buff += 2;
-		}
-	}
-	if (len & 1)
-		result += (*buff << 8);
-	result = from32to16(result);
-	if (odd)
-		result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
-out:
-	return result;
-}
-
-#ifdef CONFIG_COLDFIRE
-/*
- *	This is a version of ip_compute_csum() optimized for IP headers,
- *	which always checksum on 4 octet boundaries.
- */
-__sum16 ip_fast_csum(const void *iph, unsigned int ihl)
-{
-	return (__force __sum16)~do_csum(iph,ihl*4);
-}
-EXPORT_SYMBOL(ip_fast_csum);
-#endif
-
-/*
- * computes the checksum of a memory block at buff, length len,
- * and adds in "sum" (32-bit)
- *
- * returns a 32-bit number suitable for feeding into itself
- * or csum_tcpudp_magic
- *
- * this function must be called with even lengths, except
- * for the last fragment, which may be odd
- *
- * it's best to have buff aligned on a 32-bit boundary
- */
-__wsum csum_partial(const void *buff, int len, __wsum sum)
-{
-	unsigned int result = do_csum(buff, len);
-
-	/* add in old sum, and carry.. */
-	result += (__force u32)sum;
-	if ((__force u32)sum > result)
-		result += 1;
-	return (__force __wsum)result;
-}
-
-EXPORT_SYMBOL(csum_partial);
-
-/*
- * copy from fs while checksumming, otherwise like csum_partial
- */
-
-__wsum
-csum_partial_copy_from_user(const void __user *src, void *dst,
-			    int len, __wsum sum, int *csum_err)
-{
-	if (csum_err) *csum_err = 0;
-	memcpy(dst, (__force const void *)src, len);
-	return csum_partial(dst, len, sum);
-}
-EXPORT_SYMBOL(csum_partial_copy_from_user);
-
-/*
- * copy from ds while checksumming, otherwise like csum_partial
- */
-
-__wsum
-csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
-{
-	memcpy(dst, src, len);
-	return csum_partial(dst, len, sum);
-}
-EXPORT_SYMBOL(csum_partial_copy_nocheck);
diff --git a/arch/m68k/lib/uaccess.c b/arch/m68k/lib/uaccess.c
index 13854ed..5664386 100644
--- a/arch/m68k/lib/uaccess.c
+++ b/arch/m68k/lib/uaccess.c
@@ -15,17 +15,17 @@
 	asm volatile ("\n"
 		"	tst.l	%0\n"
 		"	jeq	2f\n"
-		"1:	moves.l	(%1)+,%3\n"
+		"1:	"MOVES".l	(%1)+,%3\n"
 		"	move.l	%3,(%2)+\n"
 		"	subq.l	#1,%0\n"
 		"	jne	1b\n"
 		"2:	btst	#1,%5\n"
 		"	jeq	4f\n"
-		"3:	moves.w	(%1)+,%3\n"
+		"3:	"MOVES".w	(%1)+,%3\n"
 		"	move.w	%3,(%2)+\n"
 		"4:	btst	#0,%5\n"
 		"	jeq	6f\n"
-		"5:	moves.b	(%1)+,%3\n"
+		"5:	"MOVES".b	(%1)+,%3\n"
 		"	move.b  %3,(%2)+\n"
 		"6:\n"
 		"	.section .fixup,\"ax\"\n"
@@ -68,17 +68,17 @@
 		"	tst.l	%0\n"
 		"	jeq	4f\n"
 		"1:	move.l	(%1)+,%3\n"
-		"2:	moves.l	%3,(%2)+\n"
+		"2:	"MOVES".l	%3,(%2)+\n"
 		"3:	subq.l	#1,%0\n"
 		"	jne	1b\n"
 		"4:	btst	#1,%5\n"
 		"	jeq	6f\n"
 		"	move.w	(%1)+,%3\n"
-		"5:	moves.w	%3,(%2)+\n"
+		"5:	"MOVES".w	%3,(%2)+\n"
 		"6:	btst	#0,%5\n"
 		"	jeq	8f\n"
 		"	move.b	(%1)+,%3\n"
-		"7:	moves.b  %3,(%2)+\n"
+		"7:	"MOVES".b  %3,(%2)+\n"
 		"8:\n"
 		"	.section .fixup,\"ax\"\n"
 		"	.even\n"
@@ -115,7 +115,7 @@
 		return count;
 
 	asm volatile ("\n"
-		"1:	moves.b	(%2)+,%4\n"
+		"1:	"MOVES".b	(%2)+,%4\n"
 		"	move.b	%4,(%1)+\n"
 		"	jeq	2f\n"
 		"	subq.l	#1,%3\n"
@@ -152,7 +152,7 @@
 	asm volatile ("\n"
 		"1:	subq.l	#1,%1\n"
 		"	jmi	3f\n"
-		"2:	moves.b	(%0)+,%2\n"
+		"2:	"MOVES".b	(%0)+,%2\n"
 		"	tst.b	%2\n"
 		"	jne	1b\n"
 		"	jra	4f\n"
@@ -188,15 +188,15 @@
 	asm volatile ("\n"
 		"	tst.l	%0\n"
 		"	jeq	3f\n"
-		"1:	moves.l	%2,(%1)+\n"
+		"1:	"MOVES".l	%2,(%1)+\n"
 		"2:	subq.l	#1,%0\n"
 		"	jne	1b\n"
 		"3:	btst	#1,%4\n"
 		"	jeq	5f\n"
-		"4:	moves.w	%2,(%1)+\n"
+		"4:	"MOVES".w	%2,(%1)+\n"
 		"5:	btst	#0,%4\n"
 		"	jeq	7f\n"
-		"6:	moves.b	%2,(%1)\n"
+		"6:	"MOVES".b	%2,(%1)\n"
 		"7:\n"
 		"	.section .fixup,\"ax\"\n"
 		"	.even\n"
diff --git a/arch/m68k/mac/baboon.c b/arch/m68k/mac/baboon.c
index b403924..3fe0e43 100644
--- a/arch/m68k/mac/baboon.c
+++ b/arch/m68k/mac/baboon.c
@@ -8,13 +8,8 @@
 
 #include <linux/types.h>
 #include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/delay.h>
-#include <linux/init.h>
 #include <linux/irq.h>
 
-#include <asm/traps.h>
-#include <asm/bootinfo.h>
 #include <asm/macintosh.h>
 #include <asm/macints.h>
 #include <asm/mac_baboon.h>
@@ -23,7 +18,6 @@
 
 int baboon_present;
 static volatile struct baboon *baboon;
-static unsigned char baboon_disabled;
 
 #if 0
 extern int macide_ack_intr(struct ata_channel *);
@@ -89,51 +83,32 @@
 
 void __init baboon_register_interrupts(void)
 {
-	baboon_disabled = 0;
 	irq_set_chained_handler(IRQ_NUBUS_C, baboon_irq);
 }
 
 /*
- * The means for masking individual baboon interrupts remains a mystery, so
- * enable the umbrella interrupt only when no baboon interrupt is disabled.
+ * The means for masking individual Baboon interrupts remains a mystery.
+ * However, since we only use the IDE IRQ, we can just enable/disable all
+ * Baboon interrupts. If/when we handle more than one Baboon IRQ, we must
+ * either figure out how to mask them individually or else implement the
+ * same workaround that's used for NuBus slots (see nubus_disabled and
+ * via_nubus_irq_shutdown).
  */
 
 void baboon_irq_enable(int irq)
 {
-	int irq_idx = IRQ_IDX(irq);
-
 #ifdef DEBUG_IRQUSE
 	printk("baboon_irq_enable(%d)\n", irq);
 #endif
 
-	baboon_disabled &= ~(1 << irq_idx);
-	if (!baboon_disabled)
-		mac_irq_enable(irq_get_irq_data(IRQ_NUBUS_C));
+	mac_irq_enable(irq_get_irq_data(IRQ_NUBUS_C));
 }
 
 void baboon_irq_disable(int irq)
 {
-	int irq_idx = IRQ_IDX(irq);
-
 #ifdef DEBUG_IRQUSE
 	printk("baboon_irq_disable(%d)\n", irq);
 #endif
 
-	baboon_disabled |= 1 << irq_idx;
-	if (baboon_disabled)
-		mac_irq_disable(irq_get_irq_data(IRQ_NUBUS_C));
-}
-
-void baboon_irq_clear(int irq)
-{
-	int irq_idx = IRQ_IDX(irq);
-
-	baboon->mb_ifr &= ~(1 << irq_idx);
-}
-
-int baboon_irq_pending(int irq)
-{
-	int irq_idx = IRQ_IDX(irq);
-
-	return baboon->mb_ifr & (1 << irq_idx);
+	mac_irq_disable(irq_get_irq_data(IRQ_NUBUS_C));
 }
diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c
index c247de0..f60ff5f 100644
--- a/arch/m68k/mac/config.c
+++ b/arch/m68k/mac/config.c
@@ -71,6 +71,31 @@
 static void mac_identify(void);
 static void mac_report_hardware(void);
 
+#ifdef CONFIG_EARLY_PRINTK
+asmlinkage void __init mac_early_print(const char *s, unsigned n);
+
+static void __init mac_early_cons_write(struct console *con,
+                                 const char *s, unsigned n)
+{
+	mac_early_print(s, n);
+}
+
+static struct console __initdata mac_early_cons = {
+	.name  = "early",
+	.write = mac_early_cons_write,
+	.flags = CON_PRINTBUFFER | CON_BOOT,
+	.index = -1
+};
+
+int __init mac_unregister_early_cons(void)
+{
+	/* mac_early_print can't be used after init sections are discarded */
+	return unregister_console(&mac_early_cons);
+}
+
+late_initcall(mac_unregister_early_cons);
+#endif
+
 static void __init mac_sched_init(irq_handler_t vector)
 {
 	via_init_clock(vector);
@@ -164,6 +189,10 @@
 	mach_beep = mac_mksound;
 #endif
 
+#ifdef CONFIG_EARLY_PRINTK
+	register_console(&mac_early_cons);
+#endif
+
 	/*
 	 * Determine hardware present
 	 */
@@ -192,7 +221,7 @@
  * inaccurate, so look here if a new Mac model won't run. Example: if
  * a Mac crashes immediately after the VIA1 registers have been dumped
  * to the screen, it probably died attempting to read DirB on a RBV.
- * Meaning it should have MAC_VIA_IIci here :-)
+ * Meaning it should have MAC_VIA_IICI here :-)
  */
 
 struct mac_model *macintosh_config;
@@ -267,7 +296,7 @@
 		.ident		= MAC_MODEL_IICI,
 		.name		= "IIci",
 		.adb_type	= MAC_ADB_II,
-		.via_type	= MAC_VIA_IIci,
+		.via_type	= MAC_VIA_IICI,
 		.scsi_type	= MAC_SCSI_OLD,
 		.scc_type	= MAC_SCC_II,
 		.nubus_type	= MAC_NUBUS,
@@ -276,7 +305,7 @@
 		.ident		= MAC_MODEL_IIFX,
 		.name		= "IIfx",
 		.adb_type	= MAC_ADB_IOP,
-		.via_type	= MAC_VIA_IIci,
+		.via_type	= MAC_VIA_IICI,
 		.scsi_type	= MAC_SCSI_OLD,
 		.scc_type	= MAC_SCC_IOP,
 		.nubus_type	= MAC_NUBUS,
@@ -285,7 +314,7 @@
 		.ident		= MAC_MODEL_IISI,
 		.name		= "IIsi",
 		.adb_type	= MAC_ADB_IISI,
-		.via_type	= MAC_VIA_IIci,
+		.via_type	= MAC_VIA_IICI,
 		.scsi_type	= MAC_SCSI_OLD,
 		.scc_type	= MAC_SCC_II,
 		.nubus_type	= MAC_NUBUS,
@@ -294,7 +323,7 @@
 		.ident		= MAC_MODEL_IIVI,
 		.name		= "IIvi",
 		.adb_type	= MAC_ADB_IISI,
-		.via_type	= MAC_VIA_IIci,
+		.via_type	= MAC_VIA_IICI,
 		.scsi_type	= MAC_SCSI_OLD,
 		.scc_type	= MAC_SCC_II,
 		.nubus_type	= MAC_NUBUS,
@@ -303,7 +332,7 @@
 		.ident		= MAC_MODEL_IIVX,
 		.name		= "IIvx",
 		.adb_type	= MAC_ADB_IISI,
-		.via_type	= MAC_VIA_IIci,
+		.via_type	= MAC_VIA_IICI,
 		.scsi_type	= MAC_SCSI_OLD,
 		.scc_type	= MAC_SCC_II,
 		.nubus_type	= MAC_NUBUS,
@@ -318,7 +347,7 @@
 		.ident		= MAC_MODEL_CLII,
 		.name		= "Classic II",
 		.adb_type	= MAC_ADB_IISI,
-		.via_type	= MAC_VIA_IIci,
+		.via_type	= MAC_VIA_IICI,
 		.scsi_type	= MAC_SCSI_OLD,
 		.scc_type	= MAC_SCC_II,
 		.nubus_type	= MAC_NUBUS,
@@ -327,7 +356,7 @@
 		.ident		= MAC_MODEL_CCL,
 		.name		= "Color Classic",
 		.adb_type	= MAC_ADB_CUDA,
-		.via_type	= MAC_VIA_IIci,
+		.via_type	= MAC_VIA_IICI,
 		.scsi_type	= MAC_SCSI_OLD,
 		.scc_type	= MAC_SCC_II,
 		.nubus_type	= MAC_NUBUS,
@@ -336,7 +365,7 @@
 		.ident		= MAC_MODEL_CCLII,
 		.name		= "Color Classic II",
 		.adb_type	= MAC_ADB_CUDA,
-		.via_type	= MAC_VIA_IIci,
+		.via_type	= MAC_VIA_IICI,
 		.scsi_type	= MAC_SCSI_OLD,
 		.scc_type	= MAC_SCC_II,
 		.nubus_type	= MAC_NUBUS,
@@ -351,7 +380,7 @@
 		.ident		= MAC_MODEL_LC,
 		.name		= "LC",
 		.adb_type	= MAC_ADB_IISI,
-		.via_type	= MAC_VIA_IIci,
+		.via_type	= MAC_VIA_IICI,
 		.scsi_type	= MAC_SCSI_OLD,
 		.scc_type	= MAC_SCC_II,
 		.nubus_type	= MAC_NUBUS,
@@ -360,7 +389,7 @@
 		.ident		= MAC_MODEL_LCII,
 		.name		= "LC II",
 		.adb_type	= MAC_ADB_IISI,
-		.via_type	= MAC_VIA_IIci,
+		.via_type	= MAC_VIA_IICI,
 		.scsi_type	= MAC_SCSI_OLD,
 		.scc_type	= MAC_SCC_II,
 		.nubus_type	= MAC_NUBUS,
@@ -369,7 +398,7 @@
 		.ident		= MAC_MODEL_LCIII,
 		.name		= "LC III",
 		.adb_type	= MAC_ADB_IISI,
-		.via_type	= MAC_VIA_IIci,
+		.via_type	= MAC_VIA_IICI,
 		.scsi_type	= MAC_SCSI_OLD,
 		.scc_type	= MAC_SCC_II,
 		.nubus_type	= MAC_NUBUS,
@@ -497,7 +526,7 @@
 		.ident		= MAC_MODEL_P460,
 		.name		= "Performa 460",
 		.adb_type	= MAC_ADB_IISI,
-		.via_type	= MAC_VIA_IIci,
+		.via_type	= MAC_VIA_IICI,
 		.scsi_type	= MAC_SCSI_OLD,
 		.scc_type	= MAC_SCC_II,
 		.nubus_type	= MAC_NUBUS,
@@ -524,7 +553,7 @@
 		.ident		= MAC_MODEL_P520,
 		.name		= "Performa 520",
 		.adb_type	= MAC_ADB_CUDA,
-		.via_type	= MAC_VIA_IIci,
+		.via_type	= MAC_VIA_IICI,
 		.scsi_type	= MAC_SCSI_OLD,
 		.scc_type	= MAC_SCC_II,
 		.nubus_type	= MAC_NUBUS,
@@ -533,7 +562,7 @@
 		.ident		= MAC_MODEL_P550,
 		.name		= "Performa 550",
 		.adb_type	= MAC_ADB_CUDA,
-		.via_type	= MAC_VIA_IIci,
+		.via_type	= MAC_VIA_IICI,
 		.scsi_type	= MAC_SCSI_OLD,
 		.scc_type	= MAC_SCC_II,
 		.nubus_type	= MAC_NUBUS,
@@ -565,7 +594,7 @@
 		.ident		= MAC_MODEL_TV,
 		.name		= "TV",
 		.adb_type	= MAC_ADB_CUDA,
-		.via_type	= MAC_VIA_QUADRA,
+		.via_type	= MAC_VIA_IICI,
 		.scsi_type	= MAC_SCSI_OLD,
 		.scc_type	= MAC_SCC_II,
 		.nubus_type	= MAC_NUBUS,
@@ -574,7 +603,7 @@
 		.ident		= MAC_MODEL_P600,
 		.name		= "Performa 600",
 		.adb_type	= MAC_ADB_IISI,
-		.via_type	= MAC_VIA_IIci,
+		.via_type	= MAC_VIA_IICI,
 		.scsi_type	= MAC_SCSI_OLD,
 		.scc_type	= MAC_SCC_II,
 		.nubus_type	= MAC_NUBUS,
@@ -645,8 +674,8 @@
 	}, {
 		.ident		= MAC_MODEL_PB150,
 		.name		= "PowerBook 150",
-		.adb_type	= MAC_ADB_PB1,
-		.via_type	= MAC_VIA_IIci,
+		.adb_type	= MAC_ADB_PB2,
+		.via_type	= MAC_VIA_IICI,
 		.scsi_type	= MAC_SCSI_OLD,
 		.ide_type	= MAC_IDE_PB,
 		.scc_type	= MAC_SCC_QUADRA,
@@ -732,17 +761,13 @@
 	 * PowerBook Duos are pretty much like normal PowerBooks
 	 * All of these probably have onboard SONIC in the Dock which
 	 * means we'll have to probe for it eventually.
-	 *
-	 * Are these really MAC_VIA_IIci? The developer notes for the
-	 * Duos show pretty much the same custom parts as in most of
-	 * the other PowerBooks which would imply MAC_VIA_QUADRA.
 	 */
 
 	{
 		.ident		= MAC_MODEL_PB210,
 		.name		= "PowerBook Duo 210",
 		.adb_type	= MAC_ADB_PB2,
-		.via_type	= MAC_VIA_IIci,
+		.via_type	= MAC_VIA_IICI,
 		.scsi_type	= MAC_SCSI_OLD,
 		.scc_type	= MAC_SCC_QUADRA,
 		.nubus_type	= MAC_NUBUS,
@@ -751,7 +776,7 @@
 		.ident		= MAC_MODEL_PB230,
 		.name		= "PowerBook Duo 230",
 		.adb_type	= MAC_ADB_PB2,
-		.via_type	= MAC_VIA_IIci,
+		.via_type	= MAC_VIA_IICI,
 		.scsi_type	= MAC_SCSI_OLD,
 		.scc_type	= MAC_SCC_QUADRA,
 		.nubus_type	= MAC_NUBUS,
@@ -760,7 +785,7 @@
 		.ident		= MAC_MODEL_PB250,
 		.name		= "PowerBook Duo 250",
 		.adb_type	= MAC_ADB_PB2,
-		.via_type	= MAC_VIA_IIci,
+		.via_type	= MAC_VIA_IICI,
 		.scsi_type	= MAC_SCSI_OLD,
 		.scc_type	= MAC_SCC_QUADRA,
 		.nubus_type	= MAC_NUBUS,
@@ -769,7 +794,7 @@
 		.ident		= MAC_MODEL_PB270C,
 		.name		= "PowerBook Duo 270c",
 		.adb_type	= MAC_ADB_PB2,
-		.via_type	= MAC_VIA_IIci,
+		.via_type	= MAC_VIA_IICI,
 		.scsi_type	= MAC_SCSI_OLD,
 		.scc_type	= MAC_SCC_QUADRA,
 		.nubus_type	= MAC_NUBUS,
@@ -778,7 +803,7 @@
 		.ident		= MAC_MODEL_PB280,
 		.name		= "PowerBook Duo 280",
 		.adb_type	= MAC_ADB_PB2,
-		.via_type	= MAC_VIA_IIci,
+		.via_type	= MAC_VIA_IICI,
 		.scsi_type	= MAC_SCSI_OLD,
 		.scc_type	= MAC_SCC_QUADRA,
 		.nubus_type	= MAC_NUBUS,
@@ -787,7 +812,7 @@
 		.ident		= MAC_MODEL_PB280C,
 		.name		= "PowerBook Duo 280c",
 		.adb_type	= MAC_ADB_PB2,
-		.via_type	= MAC_VIA_IIci,
+		.via_type	= MAC_VIA_IICI,
 		.scsi_type	= MAC_SCSI_OLD,
 		.scc_type	= MAC_SCC_QUADRA,
 		.nubus_type	= MAC_NUBUS,
@@ -864,8 +889,14 @@
 		scc_b_rsrcs[1].start = scc_b_rsrcs[1].end = IRQ_MAC_SCC_B;
 		break;
 	default:
-		scc_a_rsrcs[1].start = scc_a_rsrcs[1].end = IRQ_MAC_SCC;
-		scc_b_rsrcs[1].start = scc_b_rsrcs[1].end = IRQ_MAC_SCC;
+		/* On non-PSC machines, the serial ports share an IRQ. */
+		if (macintosh_config->ident == MAC_MODEL_IIFX) {
+			scc_a_rsrcs[1].start = scc_a_rsrcs[1].end = IRQ_MAC_SCC;
+			scc_b_rsrcs[1].start = scc_b_rsrcs[1].end = IRQ_MAC_SCC;
+		} else {
+			scc_a_rsrcs[1].start = scc_a_rsrcs[1].end = IRQ_AUTO_4;
+			scc_b_rsrcs[1].start = scc_b_rsrcs[1].end = IRQ_AUTO_4;
+		}
 		break;
 	}
 
diff --git a/arch/m68k/mac/iop.c b/arch/m68k/mac/iop.c
index a5462cc..7d8d461 100644
--- a/arch/m68k/mac/iop.c
+++ b/arch/m68k/mac/iop.c
@@ -115,7 +115,6 @@
 #include <asm/macintosh.h>
 #include <asm/macints.h>
 #include <asm/mac_iop.h>
-#include <asm/mac_oss.h>
 
 /*#define DEBUG_IOP*/
 
@@ -149,8 +148,6 @@
 
 irqreturn_t iop_ism_irq(int, void *);
 
-extern void oss_irq_enable(int);
-
 /*
  * Private access functions
  */
@@ -304,11 +301,10 @@
 void __init iop_register_interrupts(void)
 {
 	if (iop_ism_present) {
-		if (oss_present) {
-			if (request_irq(OSS_IRQLEV_IOPISM, iop_ism_irq, 0,
+		if (macintosh_config->ident == MAC_MODEL_IIFX) {
+			if (request_irq(IRQ_MAC_ADB, iop_ism_irq, 0,
 					"ISM IOP", (void *)IOP_NUM_ISM))
 				pr_err("Couldn't register ISM IOP interrupt\n");
-			oss_irq_enable(IRQ_MAC_ADB);
 		} else {
 			if (request_irq(IRQ_VIA2_0, iop_ism_irq, 0, "ISM IOP",
 					(void *)IOP_NUM_ISM))
diff --git a/arch/m68k/mac/macints.c b/arch/m68k/mac/macints.c
index ba220b7..5c1a6b2 100644
--- a/arch/m68k/mac/macints.c
+++ b/arch/m68k/mac/macints.c
@@ -26,10 +26,6 @@
  *		  - slot 6: timer 1 (not on IIci)
  *		  - slot 7: status of IRQ; signals 'any enabled int.'
  *
- *	2	- OSS (IIfx only?)
- *		  - slot 0: SCSI interrupt
- *		  - slot 1: Sound interrupt
- *
  * Levels 3-6 vary by machine type. For VIA or RBV Macintoshes:
  *
  *	3	- unused (?)
@@ -42,21 +38,30 @@
  *
  *	6	- off switch (?)
  *
- * For OSS Macintoshes (IIfx only at this point):
+ * Machines with Quadra-like VIA hardware, except PSC and PMU machines, support
+ * an alternate interrupt mapping, as used by A/UX. It spreads ethernet and
+ * sound out to their own autovector IRQs and gives VIA1 a higher priority:
  *
- *	3	- Nubus interrupt
- *		  - slot 0: Slot $9
- *		  - slot 1: Slot $A
- *		  - slot 2: Slot $B
- *		  - slot 3: Slot $C
- *		  - slot 4: Slot $D
- *		  - slot 5: Slot $E
+ *	1	- unused (?)
+ *
+ *	3	- on-board SONIC
+ *
+ *	5	- Apple Sound Chip (ASC)
+ *
+ *	6	- VIA1
+ *
+ * For OSS Macintoshes (IIfx only), we apply an interrupt mapping similar to
+ * the Quadra (A/UX) mapping:
+ *
+ *	1	- ISM IOP (ADB)
+ *
+ *	2	- SCSI
+ *
+ *	3	- NuBus
  *
  *	4	- SCC IOP
  *
- *	5	- ISM IOP (ADB?)
- *
- *	6	- unused
+ *	6	- VIA1
  *
  * For PSC Macintoshes (660AV, 840AV):
  *
@@ -100,88 +105,29 @@
  *   case. They're hidden behind the Nubus slot $C interrupt thus adding a
  *   third layer of indirection. Why oh why did the Apple engineers do that?
  *
- * - We support "fast" and "slow" handlers, just like the Amiga port. The
- *   fast handlers are called first and with all interrupts disabled. They
- *   are expected to execute quickly (hence the name). The slow handlers are
- *   called last with interrupts enabled and the interrupt level restored.
- *   They must therefore be reentrant.
- *
- *   TODO:
- *
  */
 
-#include <linux/module.h>
 #include <linux/types.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
-#include <linux/kernel_stat.h>
-#include <linux/interrupt.h> /* for intr_count */
+#include <linux/interrupt.h>
+#include <linux/irq.h>
 #include <linux/delay.h>
-#include <linux/seq_file.h>
 
-#include <asm/system.h>
 #include <asm/irq.h>
-#include <asm/traps.h>
-#include <asm/bootinfo.h>
 #include <asm/macintosh.h>
+#include <asm/macints.h>
 #include <asm/mac_via.h>
 #include <asm/mac_psc.h>
-#include <asm/hwtest.h>
-#include <asm/errno.h>
-#include <asm/macints.h>
-#include <asm/irq_regs.h>
 #include <asm/mac_oss.h>
+#include <asm/mac_iop.h>
+#include <asm/mac_baboon.h>
+#include <asm/hwtest.h>
+#include <asm/irq_regs.h>
 
 #define SHUTUP_SONIC
 
 /*
- * VIA/RBV hooks
- */
-
-extern void via_register_interrupts(void);
-extern void via_irq_enable(int);
-extern void via_irq_disable(int);
-extern void via_irq_clear(int);
-extern int  via_irq_pending(int);
-
-/*
- * OSS hooks
- */
-
-extern void oss_register_interrupts(void);
-extern void oss_irq_enable(int);
-extern void oss_irq_disable(int);
-extern void oss_irq_clear(int);
-extern int  oss_irq_pending(int);
-
-/*
- * PSC hooks
- */
-
-extern void psc_register_interrupts(void);
-extern void psc_irq_enable(int);
-extern void psc_irq_disable(int);
-extern void psc_irq_clear(int);
-extern int  psc_irq_pending(int);
-
-/*
- * IOP hooks
- */
-
-extern void iop_register_interrupts(void);
-
-/*
- * Baboon hooks
- */
-
-extern int baboon_present;
-
-extern void baboon_register_interrupts(void);
-extern void baboon_irq_enable(int);
-extern void baboon_irq_disable(int);
-extern void baboon_irq_clear(int);
-
-/*
  * console_loglevel determines NMI handler function
  */
 
@@ -190,10 +136,15 @@
 
 /* #define DEBUG_MACINTS */
 
+static unsigned int mac_irq_startup(struct irq_data *);
+static void mac_irq_shutdown(struct irq_data *);
+
 static struct irq_chip mac_irq_chip = {
 	.name		= "mac",
 	.irq_enable	= mac_irq_enable,
 	.irq_disable	= mac_irq_disable,
+	.irq_startup	= mac_irq_startup,
+	.irq_shutdown	= mac_irq_shutdown,
 };
 
 void __init mac_init_IRQ(void)
@@ -239,8 +190,6 @@
 /*
  *  mac_irq_enable - enable an interrupt source
  * mac_irq_disable - disable an interrupt source
- *   mac_clear_irq - clears a pending interrupt
- * mac_irq_pending - returns the pending status of an IRQ (nonzero = pending)
  *
  * These routines are just dispatchers to the VIA/OSS/PSC routines.
  */
@@ -252,8 +201,6 @@
 
 	switch(irq_src) {
 	case 1:
-		via_irq_enable(irq);
-		break;
 	case 2:
 	case 7:
 		if (oss_present)
@@ -262,6 +209,7 @@
 			via_irq_enable(irq);
 		break;
 	case 3:
+	case 4:
 	case 5:
 	case 6:
 		if (psc_present)
@@ -269,10 +217,6 @@
 		else if (oss_present)
 			oss_irq_enable(irq);
 		break;
-	case 4:
-		if (psc_present)
-			psc_irq_enable(irq);
-		break;
 	case 8:
 		if (baboon_present)
 			baboon_irq_enable(irq);
@@ -287,8 +231,6 @@
 
 	switch(irq_src) {
 	case 1:
-		via_irq_disable(irq);
-		break;
 	case 2:
 	case 7:
 		if (oss_present)
@@ -297,6 +239,7 @@
 			via_irq_disable(irq);
 		break;
 	case 3:
+	case 4:
 	case 5:
 	case 6:
 		if (psc_present)
@@ -304,10 +247,6 @@
 		else if (oss_present)
 			oss_irq_disable(irq);
 		break;
-	case 4:
-		if (psc_present)
-			psc_irq_disable(irq);
-		break;
 	case 8:
 		if (baboon_present)
 			baboon_irq_disable(irq);
@@ -315,65 +254,27 @@
 	}
 }
 
-void mac_clear_irq(unsigned int irq)
+static unsigned int mac_irq_startup(struct irq_data *data)
 {
-	switch(IRQ_SRC(irq)) {
-	case 1:
-		via_irq_clear(irq);
-		break;
-	case 2:
-	case 7:
-		if (oss_present)
-			oss_irq_clear(irq);
-		else
-			via_irq_clear(irq);
-		break;
-	case 3:
-	case 5:
-	case 6:
-		if (psc_present)
-			psc_irq_clear(irq);
-		else if (oss_present)
-			oss_irq_clear(irq);
-		break;
-	case 4:
-		if (psc_present)
-			psc_irq_clear(irq);
-		break;
-	case 8:
-		if (baboon_present)
-			baboon_irq_clear(irq);
-		break;
-	}
-}
+	int irq = data->irq;
 
-int mac_irq_pending(unsigned int irq)
-{
-	switch(IRQ_SRC(irq)) {
-	case 1:
-		return via_irq_pending(irq);
-	case 2:
-	case 7:
-		if (oss_present)
-			return oss_irq_pending(irq);
-		else
-			return via_irq_pending(irq);
-	case 3:
-	case 5:
-	case 6:
-		if (psc_present)
-			return psc_irq_pending(irq);
-		else if (oss_present)
-			return oss_irq_pending(irq);
-		break;
-	case 4:
-		if (psc_present)
-			return psc_irq_pending(irq);
-		break;
-	}
+	if (IRQ_SRC(irq) == 7 && !oss_present)
+		via_nubus_irq_startup(irq);
+	else
+		mac_irq_enable(data);
+
 	return 0;
 }
-EXPORT_SYMBOL(mac_irq_pending);
+
+static void mac_irq_shutdown(struct irq_data *data)
+{
+	int irq = data->irq;
+
+	if (IRQ_SRC(irq) == 7 && !oss_present)
+		via_nubus_irq_shutdown(irq);
+	else
+		mac_irq_disable(data);
+}
 
 static int num_debug[8];
 
diff --git a/arch/m68k/mac/oss.c b/arch/m68k/mac/oss.c
index a4c82da..6c4c882 100644
--- a/arch/m68k/mac/oss.c
+++ b/arch/m68k/mac/oss.c
@@ -1,5 +1,5 @@
 /*
- *	OSS handling
+ *	Operating System Services (OSS) chip handling
  *	Written by Joshua M. Thompson (funaho@jurai.org)
  *
  *
@@ -30,8 +30,6 @@
 int oss_present;
 volatile struct mac_oss *oss;
 
-extern void via1_irq(unsigned int irq, struct irq_desc *desc);
-
 /*
  * Initialize the OSS
  *
@@ -51,10 +49,8 @@
 	/* do this by setting the source's interrupt level to zero. */
 
 	for (i = 0; i <= OSS_NUM_SOURCES; i++) {
-		oss->irq_level[i] = OSS_IRQLEV_DISABLED;
+		oss->irq_level[i] = 0;
 	}
-	/* If we disable VIA1 here, we never really handle it... */
-	oss->irq_level[OSS_VIA1] = OSS_IRQLEV_VIA1;
 }
 
 /*
@@ -66,17 +62,13 @@
 }
 
 /*
- * Handle miscellaneous OSS interrupts. Right now that's just sound
- * and SCSI; everything else is routed to its own autovector IRQ.
+ * Handle miscellaneous OSS interrupts.
  */
 
 static void oss_irq(unsigned int irq, struct irq_desc *desc)
 {
-	int events;
-
-	events = oss->irq_pending & (OSS_IP_SOUND|OSS_IP_SCSI);
-	if (!events)
-		return;
+	int events = oss->irq_pending &
+	             (OSS_IP_IOPSCC | OSS_IP_SCSI | OSS_IP_IOPISM);
 
 #ifdef DEBUG_IRQS
 	if ((console_loglevel == 10) && !(events & OSS_IP_SCSI)) {
@@ -84,16 +76,20 @@
 			(int) oss->irq_pending);
 	}
 #endif
-	/* FIXME: how do you clear a pending IRQ?    */
 
-	if (events & OSS_IP_SOUND) {
-		oss->irq_pending &= ~OSS_IP_SOUND;
-		/* FIXME: call sound handler */
-	} else if (events & OSS_IP_SCSI) {
+	if (events & OSS_IP_IOPSCC) {
+		oss->irq_pending &= ~OSS_IP_IOPSCC;
+		generic_handle_irq(IRQ_MAC_SCC);
+	}
+
+	if (events & OSS_IP_SCSI) {
 		oss->irq_pending &= ~OSS_IP_SCSI;
 		generic_handle_irq(IRQ_MAC_SCSI);
-	} else {
-		/* FIXME: error check here? */
+	}
+
+	if (events & OSS_IP_IOPISM) {
+		oss->irq_pending &= ~OSS_IP_IOPISM;
+		generic_handle_irq(IRQ_MAC_ADB);
 	}
 }
 
@@ -132,14 +128,29 @@
 
 /*
  * Register the OSS and NuBus interrupt dispatchers.
+ *
+ * This IRQ mapping is laid out with two things in mind: first, we try to keep
+ * things on their own levels to avoid having to do double-dispatches. Second,
+ * the levels match as closely as possible the alternate IRQ mapping mode (aka
+ * "A/UX mode") available on some VIA machines.
  */
 
+#define OSS_IRQLEV_IOPISM    IRQ_AUTO_1
+#define OSS_IRQLEV_SCSI      IRQ_AUTO_2
+#define OSS_IRQLEV_NUBUS     IRQ_AUTO_3
+#define OSS_IRQLEV_IOPSCC    IRQ_AUTO_4
+#define OSS_IRQLEV_VIA1      IRQ_AUTO_6
+
 void __init oss_register_interrupts(void)
 {
-	irq_set_chained_handler(OSS_IRQLEV_SCSI, oss_irq);
-	irq_set_chained_handler(OSS_IRQLEV_NUBUS, oss_nubus_irq);
-	irq_set_chained_handler(OSS_IRQLEV_SOUND, oss_irq);
-	irq_set_chained_handler(OSS_IRQLEV_VIA1, via1_irq);
+	irq_set_chained_handler(OSS_IRQLEV_IOPISM, oss_irq);
+	irq_set_chained_handler(OSS_IRQLEV_SCSI,   oss_irq);
+	irq_set_chained_handler(OSS_IRQLEV_NUBUS,  oss_nubus_irq);
+	irq_set_chained_handler(OSS_IRQLEV_IOPSCC, oss_irq);
+	irq_set_chained_handler(OSS_IRQLEV_VIA1,   via1_irq);
+
+	/* OSS_VIA1 gets enabled here because it has no machspec interrupt. */
+	oss->irq_level[OSS_VIA1] = IRQ_AUTO_6;
 }
 
 /*
@@ -158,13 +169,13 @@
 	switch(irq) {
 		case IRQ_MAC_SCC:
 			oss->irq_level[OSS_IOPSCC] = OSS_IRQLEV_IOPSCC;
-			break;
+			return;
 		case IRQ_MAC_ADB:
 			oss->irq_level[OSS_IOPISM] = OSS_IRQLEV_IOPISM;
-			break;
+			return;
 		case IRQ_MAC_SCSI:
 			oss->irq_level[OSS_SCSI] = OSS_IRQLEV_SCSI;
-			break;
+			return;
 		case IRQ_NUBUS_9:
 		case IRQ_NUBUS_A:
 		case IRQ_NUBUS_B:
@@ -173,13 +184,11 @@
 		case IRQ_NUBUS_E:
 			irq -= NUBUS_SOURCE_BASE;
 			oss->irq_level[irq] = OSS_IRQLEV_NUBUS;
-			break;
-#ifdef DEBUG_IRQUSE
-		default:
-			printk("%s unknown irq %d\n", __func__, irq);
-			break;
-#endif
+			return;
 	}
+
+	if (IRQ_SRC(irq) == 1)
+		via_irq_enable(irq);
 }
 
 /*
@@ -195,14 +204,14 @@
 #endif
 	switch(irq) {
 		case IRQ_MAC_SCC:
-			oss->irq_level[OSS_IOPSCC] = OSS_IRQLEV_DISABLED;
-			break;
+			oss->irq_level[OSS_IOPSCC] = 0;
+			return;
 		case IRQ_MAC_ADB:
-			oss->irq_level[OSS_IOPISM] = OSS_IRQLEV_DISABLED;
-			break;
+			oss->irq_level[OSS_IOPISM] = 0;
+			return;
 		case IRQ_MAC_SCSI:
-			oss->irq_level[OSS_SCSI] = OSS_IRQLEV_DISABLED;
-			break;
+			oss->irq_level[OSS_SCSI] = 0;
+			return;
 		case IRQ_NUBUS_9:
 		case IRQ_NUBUS_A:
 		case IRQ_NUBUS_B:
@@ -210,72 +219,10 @@
 		case IRQ_NUBUS_D:
 		case IRQ_NUBUS_E:
 			irq -= NUBUS_SOURCE_BASE;
-			oss->irq_level[irq] = OSS_IRQLEV_DISABLED;
-			break;
-#ifdef DEBUG_IRQUSE
-		default:
-			printk("%s unknown irq %d\n", __func__, irq);
-			break;
-#endif
+			oss->irq_level[irq] = 0;
+			return;
 	}
-}
 
-/*
- * Clear an OSS interrupt
- *
- * Not sure if this works or not but it's the only method I could
- * think of based on the contents of the mac_oss structure.
- */
-
-void oss_irq_clear(int irq) {
-	/* FIXME: how to do this on OSS? */
-	switch(irq) {
-		case IRQ_MAC_SCC:
-			oss->irq_pending &= ~OSS_IP_IOPSCC;
-			break;
-		case IRQ_MAC_ADB:
-			oss->irq_pending &= ~OSS_IP_IOPISM;
-			break;
-		case IRQ_MAC_SCSI:
-			oss->irq_pending &= ~OSS_IP_SCSI;
-			break;
-		case IRQ_NUBUS_9:
-		case IRQ_NUBUS_A:
-		case IRQ_NUBUS_B:
-		case IRQ_NUBUS_C:
-		case IRQ_NUBUS_D:
-		case IRQ_NUBUS_E:
-			irq -= NUBUS_SOURCE_BASE;
-			oss->irq_pending &= ~(1 << irq);
-			break;
-	}
-}
-
-/*
- * Check to see if a specific OSS interrupt is pending
- */
-
-int oss_irq_pending(int irq)
-{
-	switch(irq) {
-		case IRQ_MAC_SCC:
-			return oss->irq_pending & OSS_IP_IOPSCC;
-			break;
-		case IRQ_MAC_ADB:
-			return oss->irq_pending & OSS_IP_IOPISM;
-			break;
-		case IRQ_MAC_SCSI:
-			return oss->irq_pending & OSS_IP_SCSI;
-			break;
-		case IRQ_NUBUS_9:
-		case IRQ_NUBUS_A:
-		case IRQ_NUBUS_B:
-		case IRQ_NUBUS_C:
-		case IRQ_NUBUS_D:
-		case IRQ_NUBUS_E:
-			irq -= NUBUS_SOURCE_BASE;
-			return oss->irq_pending & (1 << irq);
-			break;
-	}
-	return 0;
+	if (IRQ_SRC(irq) == 1)
+		via_irq_disable(irq);
 }
diff --git a/arch/m68k/mac/psc.c b/arch/m68k/mac/psc.c
index e6c2d20..6f026fc 100644
--- a/arch/m68k/mac/psc.c
+++ b/arch/m68k/mac/psc.c
@@ -180,20 +180,3 @@
 #endif
 	psc_write_byte(pIER, 1 << irq_idx);
 }
-
-void psc_irq_clear(int irq) {
-	int irq_src	= IRQ_SRC(irq);
-	int irq_idx	= IRQ_IDX(irq);
-	int pIFR	= pIERbase + (irq_src << 4);
-
-	psc_write_byte(pIFR, 1 << irq_idx);
-}
-
-int psc_irq_pending(int irq)
-{
-	int irq_src	= IRQ_SRC(irq);
-	int irq_idx	= IRQ_IDX(irq);
-	int pIFR	= pIERbase + (irq_src << 4);
-
-	return psc_read_byte(pIFR) & (1 << irq_idx);
-}
diff --git a/arch/m68k/mac/via.c b/arch/m68k/mac/via.c
index f1600ad..2d85662 100644
--- a/arch/m68k/mac/via.c
+++ b/arch/m68k/mac/via.c
@@ -63,24 +63,50 @@
 #define MAC_CLOCK_LOW		(MAC_CLOCK_TICK&0xFF)
 #define MAC_CLOCK_HIGH		(MAC_CLOCK_TICK>>8)
 
-/* To disable a NuBus slot on Quadras we make that slot IRQ line an output set
- * high. On RBV we just use the slot interrupt enable register. On Macs with
- * genuine VIA chips we must use nubus_disabled to keep track of disabled slot
- * interrupts. When any slot IRQ is disabled we mask the (edge triggered) CA1
- * or "SLOTS" interrupt. When no slot is disabled, we unmask the CA1 interrupt.
- * So, on genuine VIAs, having more than one NuBus IRQ can mean trouble,
- * because closing one of those drivers can mask all of the NuBus interrupts.
- * Also, since we can't mask the unregistered slot IRQs on genuine VIAs, it's
- * possible to get interrupts from cards that MacOS or the ROM has configured
- * but we have not. FWIW, "Designing Cards and Drivers for Macintosh II and
- * Macintosh SE", page 9-8, says, a slot IRQ with no driver would crash MacOS.
+
+/*
+ * On Macs with a genuine VIA chip there is no way to mask an individual slot
+ * interrupt. This limitation also seems to apply to VIA clone logic cores in
+ * Quadra-like ASICs. (RBV and OSS machines don't have this limitation.)
+ *
+ * We used to fake it by configuring the relevent VIA pin as an output
+ * (to mask the interrupt) or input (to unmask). That scheme did not work on
+ * (at least) the Quadra 700. A NuBus card's /NMRQ signal is an open-collector
+ * circuit (see Designing Cards and Drivers for Macintosh II and Macintosh SE,
+ * p. 10-11 etc) but VIA outputs are not (see datasheet).
+ *
+ * Driving these outputs high must cause the VIA to source current and the
+ * card to sink current when it asserts /NMRQ. Current will flow but the pin
+ * voltage is uncertain and so the /NMRQ condition may still cause a transition
+ * at the VIA2 CA1 input (which explains the lost interrupts). A side effect
+ * is that a disabled slot IRQ can never be tested as pending or not.
+ *
+ * Driving these outputs low doesn't work either. All the slot /NMRQ lines are
+ * (active low) OR'd together to generate the CA1 (aka "SLOTS") interrupt (see
+ * The Guide To Macintosh Family Hardware, 2nd edition p. 167). If we drive a
+ * disabled /NMRQ line low, the falling edge immediately triggers a CA1
+ * interrupt and all slot interrupts after that will generate no transition
+ * and therefore no interrupt, even after being re-enabled.
+ *
+ * So we make the VIA port A I/O lines inputs and use nubus_disabled to keep
+ * track of their states. When any slot IRQ becomes disabled we mask the CA1
+ * umbrella interrupt. Only when all slot IRQs become enabled do we unmask
+ * the CA1 interrupt. It must remain enabled even when cards have no interrupt
+ * handler registered. Drivers must therefore disable a slot interrupt at the
+ * device before they call free_irq (like shared and autovector interrupts).
+ *
+ * There is also a related problem when MacOS is used to boot Linux. A network
+ * card brought up by a MacOS driver may raise an interrupt while Linux boots.
+ * This can be fatal since it can't be handled until the right driver loads
+ * (if such a driver exists at all). Apparently related to this hardware
+ * limitation, "Designing Cards and Drivers", p. 9-8, says that a slot
+ * interrupt with no driver would crash MacOS (the book was written before
+ * the appearance of Macs with RBV or OSS).
  */
+
 static u8 nubus_disabled;
 
 void via_debug_dump(void);
-void via_irq_enable(int irq);
-void via_irq_disable(int irq);
-void via_irq_clear(int irq);
 
 /*
  * Initialize the VIAs
@@ -100,7 +126,7 @@
 
 		/* IIci, IIsi, IIvx, IIvi (P6xx), LC series */
 
-		case MAC_VIA_IIci:
+		case MAC_VIA_IICI:
 			via1 = (void *) VIA1_BASE;
 			if (macintosh_config->ident == MAC_MODEL_IIFX) {
 				via2 = NULL;
@@ -197,38 +223,17 @@
 	if (oss_present)
 		return;
 
-	/* Some machines support an alternate IRQ mapping that spreads  */
-	/* Ethernet and Sound out to their own autolevel IRQs and moves */
-	/* VIA1 to level 6. A/UX uses this mapping and we do too.  Note */
-	/* that the IIfx emulates this alternate mapping using the OSS. */
-
-	via_alt_mapping = 0;
-	if (macintosh_config->via_type == MAC_VIA_QUADRA)
-		switch (macintosh_config->ident) {
-		case MAC_MODEL_C660:
-		case MAC_MODEL_Q840:
-			/* not applicable */
-			break;
-		case MAC_MODEL_P588:
-		case MAC_MODEL_TV:
-		case MAC_MODEL_PB140:
-		case MAC_MODEL_PB145:
-		case MAC_MODEL_PB160:
-		case MAC_MODEL_PB165:
-		case MAC_MODEL_PB165C:
-		case MAC_MODEL_PB170:
-		case MAC_MODEL_PB180:
-		case MAC_MODEL_PB180C:
-		case MAC_MODEL_PB190:
-		case MAC_MODEL_PB520:
-			/* not yet tested */
-			break;
-		default:
-			via_alt_mapping = 1;
-			via1[vDirB] |= 0x40;
-			via1[vBufB] &= ~0x40;
-			break;
-		}
+	if ((macintosh_config->via_type == MAC_VIA_QUADRA) &&
+	    (macintosh_config->adb_type != MAC_ADB_PB1) &&
+	    (macintosh_config->adb_type != MAC_ADB_PB2) &&
+	    (macintosh_config->ident    != MAC_MODEL_C660) &&
+	    (macintosh_config->ident    != MAC_MODEL_Q840)) {
+		via_alt_mapping = 1;
+		via1[vDirB] |= 0x40;
+		via1[vBufB] &= ~0x40;
+	} else {
+		via_alt_mapping = 0;
+	}
 
 	/*
 	 * Now initialize VIA2. For RBV we just kill all interrupts;
@@ -248,22 +253,28 @@
 		via2[vACR] &= ~0x03; /* disable port A & B latches */
 	}
 
+	/* Everything below this point is VIA2 only... */
+
+	if (rbv_present)
+		return;
+
 	/*
-	 * Set vPCR for control line interrupts (but not on RBV)
+	 * Set vPCR for control line interrupts.
+	 *
+	 * CA1 (SLOTS IRQ), CB1 (ASC IRQ): negative edge trigger.
+	 *
+	 * Macs with ESP SCSI have a negative edge triggered SCSI interrupt.
+	 * Testing reveals that PowerBooks do too. However, the SE/30
+	 * schematic diagram shows an active high NCR5380 IRQ line.
 	 */
-	if (!rbv_present) {
-		/* For all VIA types, CA1 (SLOTS IRQ) and CB1 (ASC IRQ)
-		 * are made negative edge triggered here.
-		 */
-		if (macintosh_config->scsi_type == MAC_SCSI_OLD) {
-			/* CB2 (IRQ) indep. input, positive edge */
-			/* CA2 (DRQ) indep. input, positive edge */
-			via2[vPCR] = 0x66;
-		} else {
-			/* CB2 (IRQ) indep. input, negative edge */
-			/* CA2 (DRQ) indep. input, negative edge */
-			via2[vPCR] = 0x22;
-		}
+
+	pr_debug("VIA2 vPCR is 0x%02X\n", via2[vPCR]);
+	if (macintosh_config->via_type == MAC_VIA_II) {
+		/* CA2 (SCSI DRQ), CB2 (SCSI IRQ): indep. input, pos. edge */
+		via2[vPCR] = 0x66;
+	} else {
+		/* CA2 (SCSI DRQ), CB2 (SCSI IRQ): indep. input, neg. edge */
+		via2[vPCR] = 0x22;
 	}
 }
 
@@ -378,34 +389,55 @@
 		via2[gBufB] |= 0x02;
 	}
 
-	/* Disable all the slot interrupts (where possible). */
+	/*
+	 * Disable the slot interrupts. On some hardware that's not possible.
+	 * On some hardware it's unclear what all of these I/O lines do.
+	 */
 
 	switch (macintosh_config->via_type) {
 	case MAC_VIA_II:
-		/* Just make the port A lines inputs. */
-		switch(macintosh_config->ident) {
-		case MAC_MODEL_II:
-		case MAC_MODEL_IIX:
-		case MAC_MODEL_IICX:
-		case MAC_MODEL_SE30:
-			/* The top two bits are RAM size outputs. */
-			via2[vDirA] &= 0xC0;
-			break;
-		default:
-			via2[vDirA] &= 0x80;
-		}
+	case MAC_VIA_QUADRA:
+		pr_debug("VIA2 vDirA is 0x%02X\n", via2[vDirA]);
 		break;
-	case MAC_VIA_IIci:
+	case MAC_VIA_IICI:
 		/* RBV. Disable all the slot interrupts. SIER works like IER. */
 		via2[rSIER] = 0x7F;
 		break;
+	}
+}
+
+void via_nubus_irq_startup(int irq)
+{
+	int irq_idx = IRQ_IDX(irq);
+
+	switch (macintosh_config->via_type) {
+	case MAC_VIA_II:
 	case MAC_VIA_QUADRA:
-		/* Disable the inactive slot interrupts by making those lines outputs. */
-		if ((macintosh_config->adb_type != MAC_ADB_PB1) &&
-		    (macintosh_config->adb_type != MAC_ADB_PB2)) {
-			via2[vBufA] |= 0x7F;
-			via2[vDirA] |= 0x7F;
+		/* Make the port A line an input. Probably redundant. */
+		if (macintosh_config->via_type == MAC_VIA_II) {
+			/* The top two bits are RAM size outputs. */
+			via2[vDirA] &= 0xC0 | ~(1 << irq_idx);
+		} else {
+			/* Allow NuBus slots 9 through F. */
+			via2[vDirA] &= 0x80 | ~(1 << irq_idx);
 		}
+		/* fall through */
+	case MAC_VIA_IICI:
+		via_irq_enable(irq);
+		break;
+	}
+}
+
+void via_nubus_irq_shutdown(int irq)
+{
+	switch (macintosh_config->via_type) {
+	case MAC_VIA_II:
+	case MAC_VIA_QUADRA:
+		/* Ensure that the umbrella CA1 interrupt remains enabled. */
+		via_irq_enable(irq);
+		break;
+	case MAC_VIA_IICI:
+		via_irq_disable(irq);
 		break;
 	}
 }
@@ -531,25 +563,18 @@
 	} else if (irq_src == 7) {
 		switch (macintosh_config->via_type) {
 		case MAC_VIA_II:
+		case MAC_VIA_QUADRA:
 			nubus_disabled &= ~(1 << irq_idx);
 			/* Enable the CA1 interrupt when no slot is disabled. */
 			if (!nubus_disabled)
 				via2[gIER] = IER_SET_BIT(1);
 			break;
-		case MAC_VIA_IIci:
+		case MAC_VIA_IICI:
 			/* On RBV, enable the slot interrupt.
 			 * SIER works like IER.
 			 */
 			via2[rSIER] = IER_SET_BIT(irq_idx);
 			break;
-		case MAC_VIA_QUADRA:
-			/* Make the port A line an input to enable the slot irq.
-			 * But not on PowerBooks, that's ADB.
-			 */
-			if ((macintosh_config->adb_type != MAC_ADB_PB1) &&
-			    (macintosh_config->adb_type != MAC_ADB_PB2))
-				via2[vDirA] &= ~(1 << irq_idx);
-			break;
 		}
 	}
 }
@@ -569,60 +594,18 @@
 	} else if (irq_src == 7) {
 		switch (macintosh_config->via_type) {
 		case MAC_VIA_II:
+		case MAC_VIA_QUADRA:
 			nubus_disabled |= 1 << irq_idx;
 			if (nubus_disabled)
 				via2[gIER] = IER_CLR_BIT(1);
 			break;
-		case MAC_VIA_IIci:
+		case MAC_VIA_IICI:
 			via2[rSIER] = IER_CLR_BIT(irq_idx);
 			break;
-		case MAC_VIA_QUADRA:
-			if ((macintosh_config->adb_type != MAC_ADB_PB1) &&
-			    (macintosh_config->adb_type != MAC_ADB_PB2))
-				via2[vDirA] |= 1 << irq_idx;
-			break;
 		}
 	}
 }
 
-void via_irq_clear(int irq) {
-	int irq_src	= IRQ_SRC(irq);
-	int irq_idx	= IRQ_IDX(irq);
-	int irq_bit	= 1 << irq_idx;
-
-	if (irq_src == 1) {
-		via1[vIFR] = irq_bit;
-	} else if (irq_src == 2) {
-		via2[gIFR] = irq_bit | rbv_clear;
-	} else if (irq_src == 7) {
-		/* FIXME: There is no way to clear an individual nubus slot
-		 * IRQ flag, other than getting the device to do it.
-		 */
-	}
-}
-
-/*
- * Returns nonzero if an interrupt is pending on the given
- * VIA/IRQ combination.
- */
-
-int via_irq_pending(int irq)
-{
-	int irq_src	= IRQ_SRC(irq);
-	int irq_idx	= IRQ_IDX(irq);
-	int irq_bit	= 1 << irq_idx;
-
-	if (irq_src == 1) {
-		return via1[vIFR] & irq_bit;
-	} else if (irq_src == 2) {
-		return via2[gIFR] & irq_bit;
-	} else if (irq_src == 7) {
-		/* Always 0 for MAC_VIA_QUADRA if the slot irq is disabled. */
-		return ~via2[gBufA] & irq_bit;
-	}
-	return 0;
-}
-
 void via1_set_head(int head)
 {
 	if (head == 0)
@@ -631,3 +614,9 @@
 		via1[vBufA] |= VIA1A_vHeadSel;
 }
 EXPORT_SYMBOL(via1_set_head);
+
+int via2_scsi_drq_pending(void)
+{
+	return via2[gIFR] & (1 << IRQ_IDX(IRQ_MAC_SCSIDRQ));
+}
+EXPORT_SYMBOL(via2_scsi_drq_pending);
diff --git a/arch/m68k/mm/Makefile b/arch/m68k/mm/Makefile
index 09cadf1..cfbf320 100644
--- a/arch/m68k/mm/Makefile
+++ b/arch/m68k/mm/Makefile
@@ -4,6 +4,8 @@
 
 obj-y	:= init.o
 
-obj-$(CONFIG_MMU)		+= cache.o fault.o hwtest.o
-obj-$(CONFIG_MMU_MOTOROLA)	+= kmap.o memory.o motorola.o
-obj-$(CONFIG_MMU_SUN3)		+= sun3kmap.o sun3mmu.o
+obj-$(CONFIG_MMU)		+= cache.o fault.o
+obj-$(CONFIG_MMU_MOTOROLA)	+= kmap.o memory.o motorola.o hwtest.o
+obj-$(CONFIG_MMU_SUN3)		+= sun3kmap.o sun3mmu.o hwtest.o
+obj-$(CONFIG_MMU_COLDFIRE)	+= kmap.o memory.o mcfmmu.o
+
diff --git a/arch/m68k/mm/cache.c b/arch/m68k/mm/cache.c
index 5437fff..95d0bf6 100644
--- a/arch/m68k/mm/cache.c
+++ b/arch/m68k/mm/cache.c
@@ -74,8 +74,16 @@
 /* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
 void flush_icache_range(unsigned long address, unsigned long endaddr)
 {
-
-	if (CPU_IS_040_OR_060) {
+	if (CPU_IS_COLDFIRE) {
+		unsigned long start, end;
+		start = address & ICACHE_SET_MASK;
+		end = endaddr & ICACHE_SET_MASK;
+		if (start > end) {
+			flush_cf_icache(0, end);
+			end = ICACHE_MAX_ADDR;
+		}
+		flush_cf_icache(start, end);
+	} else if (CPU_IS_040_OR_060) {
 		address &= PAGE_MASK;
 
 		do {
@@ -100,7 +108,17 @@
 void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
 			     unsigned long addr, int len)
 {
-	if (CPU_IS_040_OR_060) {
+	if (CPU_IS_COLDFIRE) {
+		unsigned long start, end;
+		start = addr & ICACHE_SET_MASK;
+		end = (addr + len) & ICACHE_SET_MASK;
+		if (start > end) {
+			flush_cf_icache(0, end);
+			end = ICACHE_MAX_ADDR;
+		}
+		flush_cf_icache(start, end);
+
+	} else if (CPU_IS_040_OR_060) {
 		asm volatile ("nop\n\t"
 			      ".chip 68040\n\t"
 			      "cpushp %%bc,(%0)\n\t"
diff --git a/arch/m68k/mm/init_mm.c b/arch/m68k/mm/init_mm.c
index bbe5254..89f3b20 100644
--- a/arch/m68k/mm/init_mm.c
+++ b/arch/m68k/mm/init_mm.c
@@ -24,6 +24,7 @@
 #include <asm/page.h>
 #include <asm/pgalloc.h>
 #include <asm/system.h>
+#include <asm/traps.h>
 #include <asm/machdep.h>
 #include <asm/io.h>
 #ifdef CONFIG_ATARI
@@ -75,6 +76,38 @@
 
 extern pmd_t *zero_pgtable;
 
+#if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE)
+#define VECTORS	&vectors[0]
+#else
+#define VECTORS	_ramvec
+#endif
+
+void __init print_memmap(void)
+{
+#define UL(x) ((unsigned long) (x))
+#define MLK(b, t) UL(b), UL(t), (UL(t) - UL(b)) >> 10
+#define MLM(b, t) UL(b), UL(t), (UL(t) - UL(b)) >> 20
+#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), 1024)
+
+	pr_notice("Virtual kernel memory layout:\n"
+		"    vector  : 0x%08lx - 0x%08lx   (%4ld KiB)\n"
+		"    kmap    : 0x%08lx - 0x%08lx   (%4ld MiB)\n"
+		"    vmalloc : 0x%08lx - 0x%08lx   (%4ld MiB)\n"
+		"    lowmem  : 0x%08lx - 0x%08lx   (%4ld MiB)\n"
+		"      .init : 0x%p" " - 0x%p" "   (%4d KiB)\n"
+		"      .text : 0x%p" " - 0x%p" "   (%4d KiB)\n"
+		"      .data : 0x%p" " - 0x%p" "   (%4d KiB)\n"
+		"      .bss  : 0x%p" " - 0x%p" "   (%4d KiB)\n",
+		MLK(VECTORS, VECTORS + 256),
+		MLM(KMAP_START, KMAP_END),
+		MLM(VMALLOC_START, VMALLOC_END),
+		MLM(PAGE_OFFSET, (unsigned long)high_memory),
+		MLK_ROUNDUP(__init_begin, __init_end),
+		MLK_ROUNDUP(_stext, _etext),
+		MLK_ROUNDUP(_sdata, _edata),
+		MLK_ROUNDUP(_sbss, _ebss));
+}
+
 void __init mem_init(void)
 {
 	pg_data_t *pgdat;
@@ -106,7 +139,7 @@
 		}
 	}
 
-#ifndef CONFIG_SUN3
+#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
 	/* insert pointer tables allocated so far into the tablelist */
 	init_pointer_table((unsigned long)kernel_pg_dir);
 	for (i = 0; i < PTRS_PER_PGD; i++) {
@@ -125,6 +158,7 @@
 	       codepages << (PAGE_SHIFT-10),
 	       datapages << (PAGE_SHIFT-10),
 	       initpages << (PAGE_SHIFT-10));
+	print_memmap();
 }
 
 #ifdef CONFIG_BLK_DEV_INITRD
diff --git a/arch/m68k/mm/kmap.c b/arch/m68k/mm/kmap.c
index 6934584..1cc2bed 100644
--- a/arch/m68k/mm/kmap.c
+++ b/arch/m68k/mm/kmap.c
@@ -171,7 +171,8 @@
 			break;
 		}
 	} else {
-		physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY);
+		physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED |
+			     _PAGE_DIRTY | _PAGE_READWRITE);
 		switch (cacheflag) {
 		case IOMAP_NOCACHE_SER:
 		case IOMAP_NOCACHE_NONSER:
diff --git a/arch/m68k/mm/mcfmmu.c b/arch/m68k/mm/mcfmmu.c
new file mode 100644
index 0000000..babd5a9
--- /dev/null
+++ b/arch/m68k/mm/mcfmmu.c
@@ -0,0 +1,198 @@
+/*
+ * Based upon linux/arch/m68k/mm/sun3mmu.c
+ * Based upon linux/arch/ppc/mm/mmu_context.c
+ *
+ * Implementations of mm routines specific to the Coldfire MMU.
+ *
+ * Copyright (c) 2008 Freescale Semiconductor, Inc.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/bootmem.h>
+
+#include <asm/setup.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/mmu_context.h>
+#include <asm/mcf_pgalloc.h>
+#include <asm/tlbflush.h>
+
+#define KMAPAREA(x)	((x >= VMALLOC_START) && (x < KMAP_END))
+
+mm_context_t next_mmu_context;
+unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
+atomic_t nr_free_contexts;
+struct mm_struct *context_mm[LAST_CONTEXT+1];
+extern unsigned long num_pages;
+
+void free_initmem(void)
+{
+}
+
+/*
+ * ColdFire paging_init derived from sun3.
+ */
+void __init paging_init(void)
+{
+	pgd_t *pg_dir;
+	pte_t *pg_table;
+	unsigned long address, size;
+	unsigned long next_pgtable, bootmem_end;
+	unsigned long zones_size[MAX_NR_ZONES];
+	enum zone_type zone;
+	int i;
+
+	empty_zero_page = (void *) alloc_bootmem_pages(PAGE_SIZE);
+	memset((void *) empty_zero_page, 0, PAGE_SIZE);
+
+	pg_dir = swapper_pg_dir;
+	memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
+
+	size = num_pages * sizeof(pte_t);
+	size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);
+	next_pgtable = (unsigned long) alloc_bootmem_pages(size);
+
+	bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK;
+	pg_dir += PAGE_OFFSET >> PGDIR_SHIFT;
+
+	address = PAGE_OFFSET;
+	while (address < (unsigned long)high_memory) {
+		pg_table = (pte_t *) next_pgtable;
+		next_pgtable += PTRS_PER_PTE * sizeof(pte_t);
+		pgd_val(*pg_dir) = (unsigned long) pg_table;
+		pg_dir++;
+
+		/* now change pg_table to kernel virtual addresses */
+		for (i = 0; i < PTRS_PER_PTE; ++i, ++pg_table) {
+			pte_t pte = pfn_pte(virt_to_pfn(address), PAGE_INIT);
+			if (address >= (unsigned long) high_memory)
+				pte_val(pte) = 0;
+
+			set_pte(pg_table, pte);
+			address += PAGE_SIZE;
+		}
+	}
+
+	current->mm = NULL;
+
+	for (zone = 0; zone < MAX_NR_ZONES; zone++)
+		zones_size[zone] = 0x0;
+	zones_size[ZONE_DMA] = num_pages;
+	free_area_init(zones_size);
+}
+
+int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word)
+{
+	unsigned long flags, mmuar;
+	struct mm_struct *mm;
+	pgd_t *pgd;
+	pmd_t *pmd;
+	pte_t *pte;
+	int asid;
+
+	local_irq_save(flags);
+
+	mmuar = (dtlb) ? mmu_read(MMUAR) :
+		regs->pc + (extension_word * sizeof(long));
+
+	mm = (!user_mode(regs) && KMAPAREA(mmuar)) ? &init_mm : current->mm;
+	if (!mm) {
+		local_irq_restore(flags);
+		return -1;
+	}
+
+	pgd = pgd_offset(mm, mmuar);
+	if (pgd_none(*pgd))  {
+		local_irq_restore(flags);
+		return -1;
+	}
+
+	pmd = pmd_offset(pgd, mmuar);
+	if (pmd_none(*pmd)) {
+		local_irq_restore(flags);
+		return -1;
+	}
+
+	pte = (KMAPAREA(mmuar)) ? pte_offset_kernel(pmd, mmuar)
+				: pte_offset_map(pmd, mmuar);
+	if (pte_none(*pte) || !pte_present(*pte)) {
+		local_irq_restore(flags);
+		return -1;
+	}
+
+	if (write) {
+		if (!pte_write(*pte)) {
+			local_irq_restore(flags);
+			return -1;
+		}
+		set_pte(pte, pte_mkdirty(*pte));
+	}
+
+	set_pte(pte, pte_mkyoung(*pte));
+	asid = mm->context & 0xff;
+	if (!pte_dirty(*pte) && !KMAPAREA(mmuar))
+		set_pte(pte, pte_wrprotect(*pte));
+
+	mmu_write(MMUTR, (mmuar & PAGE_MASK) | (asid << MMUTR_IDN) |
+		(((int)(pte->pte) & (int)CF_PAGE_MMUTR_MASK)
+		>> CF_PAGE_MMUTR_SHIFT) | MMUTR_V);
+
+	mmu_write(MMUDR, (pte_val(*pte) & PAGE_MASK) |
+		((pte->pte) & CF_PAGE_MMUDR_MASK) | MMUDR_SZ_8KB | MMUDR_X);
+
+	if (dtlb)
+		mmu_write(MMUOR, MMUOR_ACC | MMUOR_UAA);
+	else
+		mmu_write(MMUOR, MMUOR_ITLB | MMUOR_ACC | MMUOR_UAA);
+
+	local_irq_restore(flags);
+	return 0;
+}
+
+/*
+ * Initialize the context management stuff.
+ * The following was taken from arch/ppc/mmu_context.c
+ */
+void __init mmu_context_init(void)
+{
+	/*
+	 * Some processors have too few contexts to reserve one for
+	 * init_mm, and require using context 0 for a normal task.
+	 * Other processors reserve the use of context zero for the kernel.
+	 * This code assumes FIRST_CONTEXT < 32.
+	 */
+	context_map[0] = (1 << FIRST_CONTEXT) - 1;
+	next_mmu_context = FIRST_CONTEXT;
+	atomic_set(&nr_free_contexts, LAST_CONTEXT - FIRST_CONTEXT + 1);
+}
+
+/*
+ * Steal a context from a task that has one at the moment.
+ * This is only used on 8xx and 4xx and we presently assume that
+ * they don't do SMP.  If they do then thicfpgalloc.hs will have to check
+ * whether the MM we steal is in use.
+ * We also assume that this is only used on systems that don't
+ * use an MMU hash table - this is true for 8xx and 4xx.
+ * This isn't an LRU system, it just frees up each context in
+ * turn (sort-of pseudo-random replacement :).  This would be the
+ * place to implement an LRU scheme if anyone was motivated to do it.
+ *  -- paulus
+ */
+void steal_context(void)
+{
+	struct mm_struct *mm;
+	/*
+	 * free up context `next_mmu_context'
+	 * if we shouldn't free context 0, don't...
+	 */
+	if (next_mmu_context < FIRST_CONTEXT)
+		next_mmu_context = FIRST_CONTEXT;
+	mm = context_mm[next_mmu_context];
+	flush_tlb_mm(mm);
+	destroy_context(mm);
+}
+
diff --git a/arch/m68k/mm/memory.c b/arch/m68k/mm/memory.c
index 34c77ce..a5dbb74 100644
--- a/arch/m68k/mm/memory.c
+++ b/arch/m68k/mm/memory.c
@@ -203,7 +203,9 @@
 
 void cache_clear (unsigned long paddr, int len)
 {
-    if (CPU_IS_040_OR_060) {
+    if (CPU_IS_COLDFIRE) {
+	flush_cf_bcache(0, DCACHE_MAX_ADDR);
+    } else if (CPU_IS_040_OR_060) {
 	int tmp;
 
 	/*
@@ -250,7 +252,9 @@
 
 void cache_push (unsigned long paddr, int len)
 {
-    if (CPU_IS_040_OR_060) {
+    if (CPU_IS_COLDFIRE) {
+	flush_cf_bcache(0, DCACHE_MAX_ADDR);
+    } else if (CPU_IS_040_OR_060) {
 	int tmp = PAGE_SIZE;
 
 	/*
diff --git a/arch/m68k/mvme16x/config.c b/arch/m68k/mvme16x/config.c
index 31a66d9..c3fb3bd 100644
--- a/arch/m68k/mvme16x/config.c
+++ b/arch/m68k/mvme16x/config.c
@@ -124,6 +124,163 @@
 #define PccSCCMICR	0x1d
 #define PccSCCTICR	0x1e
 #define PccSCCRICR	0x1f
+#define PccTPIACKR	0x25
+
+#ifdef CONFIG_EARLY_PRINTK
+
+/**** cd2401 registers ****/
+#define CD2401_ADDR	(0xfff45000)
+
+#define CyGFRCR         (0x81)
+#define CyCCR		(0x13)
+#define      CyCLR_CHAN		(0x40)
+#define      CyINIT_CHAN	(0x20)
+#define      CyCHIP_RESET	(0x10)
+#define      CyENB_XMTR		(0x08)
+#define      CyDIS_XMTR		(0x04)
+#define      CyENB_RCVR		(0x02)
+#define      CyDIS_RCVR		(0x01)
+#define CyCAR		(0xee)
+#define CyIER		(0x11)
+#define      CyMdmCh		(0x80)
+#define      CyRxExc		(0x20)
+#define      CyRxData		(0x08)
+#define      CyTxMpty		(0x02)
+#define      CyTxRdy		(0x01)
+#define CyLICR		(0x26)
+#define CyRISR		(0x89)
+#define      CyTIMEOUT		(0x80)
+#define      CySPECHAR		(0x70)
+#define      CyOVERRUN		(0x08)
+#define      CyPARITY		(0x04)
+#define      CyFRAME		(0x02)
+#define      CyBREAK		(0x01)
+#define CyREOIR		(0x84)
+#define CyTEOIR		(0x85)
+#define CyMEOIR		(0x86)
+#define      CyNOTRANS		(0x08)
+#define CyRFOC		(0x30)
+#define CyRDR		(0xf8)
+#define CyTDR		(0xf8)
+#define CyMISR		(0x8b)
+#define CyRISR		(0x89)
+#define CyTISR		(0x8a)
+#define CyMSVR1		(0xde)
+#define CyMSVR2		(0xdf)
+#define      CyDSR		(0x80)
+#define      CyDCD		(0x40)
+#define      CyCTS		(0x20)
+#define      CyDTR		(0x02)
+#define      CyRTS		(0x01)
+#define CyRTPRL		(0x25)
+#define CyRTPRH		(0x24)
+#define CyCOR1		(0x10)
+#define      CyPARITY_NONE	(0x00)
+#define      CyPARITY_E		(0x40)
+#define      CyPARITY_O		(0xC0)
+#define      Cy_5_BITS		(0x04)
+#define      Cy_6_BITS		(0x05)
+#define      Cy_7_BITS		(0x06)
+#define      Cy_8_BITS		(0x07)
+#define CyCOR2		(0x17)
+#define      CyETC		(0x20)
+#define      CyCtsAE		(0x02)
+#define CyCOR3		(0x16)
+#define      Cy_1_STOP		(0x02)
+#define      Cy_2_STOP		(0x04)
+#define CyCOR4		(0x15)
+#define      CyREC_FIFO		(0x0F)  /* Receive FIFO threshold */
+#define CyCOR5		(0x14)
+#define CyCOR6		(0x18)
+#define CyCOR7		(0x07)
+#define CyRBPR		(0xcb)
+#define CyRCOR		(0xc8)
+#define CyTBPR		(0xc3)
+#define CyTCOR		(0xc0)
+#define CySCHR1		(0x1f)
+#define CySCHR2 	(0x1e)
+#define CyTPR		(0xda)
+#define CyPILR1		(0xe3)
+#define CyPILR2		(0xe0)
+#define CyPILR3		(0xe1)
+#define CyCMR		(0x1b)
+#define      CyASYNC		(0x02)
+#define CyLICR          (0x26)
+#define CyLIVR          (0x09)
+#define CySCRL		(0x23)
+#define CySCRH		(0x22)
+#define CyTFTC		(0x80)
+
+static void cons_write(struct console *co, const char *str, unsigned count)
+{
+	volatile unsigned char *base_addr = (u_char *)CD2401_ADDR;
+	volatile u_char sink;
+	u_char ier;
+	int port;
+	u_char do_lf = 0;
+	int i = 0;
+
+	/* Ensure transmitter is enabled! */
+
+	port = 0;
+	base_addr[CyCAR] = (u_char)port;
+	while (base_addr[CyCCR])
+		;
+	base_addr[CyCCR] = CyENB_XMTR;
+
+	ier = base_addr[CyIER];
+	base_addr[CyIER] = CyTxMpty;
+
+	while (1) {
+		if (pcc2chip[PccSCCTICR] & 0x20)
+		{
+			/* We have a Tx int. Acknowledge it */
+			sink = pcc2chip[PccTPIACKR];
+			if ((base_addr[CyLICR] >> 2) == port) {
+				if (i == count) {
+					/* Last char of string is now output */
+					base_addr[CyTEOIR] = CyNOTRANS;
+					break;
+				}
+				if (do_lf) {
+					base_addr[CyTDR] = '\n';
+					str++;
+					i++;
+					do_lf = 0;
+				}
+				else if (*str == '\n') {
+					base_addr[CyTDR] = '\r';
+					do_lf = 1;
+				}
+				else {
+					base_addr[CyTDR] = *str++;
+					i++;
+				}
+				base_addr[CyTEOIR] = 0;
+			}
+			else
+				base_addr[CyTEOIR] = CyNOTRANS;
+		}
+	}
+
+	base_addr[CyIER] = ier;
+}
+
+static struct console cons_info =
+{
+	.name	= "sercon",
+	.write	= cons_write,
+	.flags	= CON_PRINTBUFFER | CON_BOOT,
+	.index	= -1,
+};
+
+static void __init mvme16x_early_console(void)
+{
+	register_console(&cons_info);
+
+	printk(KERN_INFO "MVME16x: early console registered\n");
+}
+#endif
 
 void __init config_mvme16x(void)
 {
@@ -183,6 +340,9 @@
 	pcc2chip[PccSCCMICR] = 0x10;
 	pcc2chip[PccSCCTICR] = 0x10;
 	pcc2chip[PccSCCRICR] = 0x10;
+#ifdef CONFIG_EARLY_PRINTK
+	mvme16x_early_console();
+#endif
     }
 }
 
diff --git a/arch/m68k/platform/54xx/config.c b/arch/m68k/platform/54xx/config.c
index 7813098..ee04354 100644
--- a/arch/m68k/platform/54xx/config.c
+++ b/arch/m68k/platform/54xx/config.c
@@ -13,11 +13,17 @@
 #include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/bootmem.h>
+#include <asm/pgalloc.h>
 #include <asm/machdep.h>
 #include <asm/coldfire.h>
 #include <asm/m54xxsim.h>
 #include <asm/mcfuart.h>
 #include <asm/m54xxgpt.h>
+#ifdef CONFIG_MMU
+#include <asm/mmu_context.h>
+#endif
 
 /***************************************************************************/
 
@@ -95,8 +101,49 @@
 
 /***************************************************************************/
 
+#ifdef CONFIG_MMU
+
+unsigned long num_pages;
+
+static void __init mcf54xx_bootmem_alloc(void)
+{
+	unsigned long start_pfn;
+	unsigned long memstart;
+
+	/* _rambase and _ramend will be naturally page aligned */
+	m68k_memory[0].addr = _rambase;
+	m68k_memory[0].size = _ramend - _rambase;
+
+	/* compute total pages in system */
+	num_pages = (_ramend - _rambase) >> PAGE_SHIFT;
+
+	/* page numbers */
+	memstart = PAGE_ALIGN(_ramstart);
+	min_low_pfn = _rambase >> PAGE_SHIFT;
+	start_pfn = memstart >> PAGE_SHIFT;
+	max_low_pfn = _ramend >> PAGE_SHIFT;
+	high_memory = (void *)_ramend;
+
+	m68k_virt_to_node_shift = fls(_ramend - _rambase - 1) - 6;
+	module_fixup(NULL, __start_fixup, __stop_fixup);
+
+	/* setup bootmem data */
+	m68k_setup_node(0);
+	memstart += init_bootmem_node(NODE_DATA(0), start_pfn,
+		min_low_pfn, max_low_pfn);
+	free_bootmem_node(NODE_DATA(0), memstart, _ramend - memstart);
+}
+
+#endif /* CONFIG_MMU */
+
+/***************************************************************************/
+
 void __init config_BSP(char *commandp, int size)
 {
+#ifdef CONFIG_MMU
+	mcf54xx_bootmem_alloc();
+	mmu_context_init();
+#endif
 	mach_reset = mcf54xx_reset;
 	m54xx_uarts_init();
 }
diff --git a/arch/m68k/platform/68328/Makefile b/arch/m68k/platform/68328/Makefile
index e4dfd8f..ee61bf8 100644
--- a/arch/m68k/platform/68328/Makefile
+++ b/arch/m68k/platform/68328/Makefile
@@ -14,12 +14,8 @@
 obj-$(CONFIG_ROM)	+= romvec.o
 
 extra-y			:= head.o
-extra-$(CONFIG_M68328)	+= bootlogo.rh head.o
-
-$(obj)/bootlogo.rh: $(src)/bootlogo.h
-	perl $(src)/bootlogo.pl < $(src)/bootlogo.h > $(obj)/bootlogo.rh
 
 $(obj)/head.o: $(obj)/$(head-y)
 	ln -sf $(head-y) $(obj)/head.o
 
-clean-files := $(obj)/bootlogo.rh $(obj)/head.o $(head-y)
+clean-files := $(obj)/head.o $(head-y)
diff --git a/arch/m68k/platform/68328/bootlogo.h b/arch/m68k/platform/68328/bootlogo.h
index 67bc2c1..b896c93 100644
--- a/arch/m68k/platform/68328/bootlogo.h
+++ b/arch/m68k/platform/68328/bootlogo.h
@@ -1,6 +1,6 @@
 #define bootlogo_width 160
 #define bootlogo_height 160
-static unsigned char bootlogo_bits[] = {
+unsigned char __attribute__ ((aligned(16))) bootlogo_bits[] = {
   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x01, 0x00, 0x00, 0x00,
   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
   0x00, 0x00, 0x40, 0x55, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
diff --git a/arch/m68k/platform/68328/bootlogo.pl b/arch/m68k/platform/68328/bootlogo.pl
deleted file mode 100644
index b04ae3f..0000000
--- a/arch/m68k/platform/68328/bootlogo.pl
+++ /dev/null
@@ -1,10 +0,0 @@
-
-$_ = join("", <>);
-
-s/(0x[0-9a-f]{2})/sprintf("0x%.2x",ord(pack("b8",unpack("B8",chr(hex($1))))))/gei;
-
-s/^ /	.byte /gm;
-s/[,};]+$//gm;
-s/^static.*//gm;
-
-print $_;
diff --git a/arch/m68k/platform/68328/config.c b/arch/m68k/platform/68328/config.c
index a7bd21d..d70bf26 100644
--- a/arch/m68k/platform/68328/config.c
+++ b/arch/m68k/platform/68328/config.c
@@ -20,6 +20,9 @@
 #include <asm/system.h>
 #include <asm/machdep.h>
 #include <asm/MC68328.h>
+#if defined(CONFIG_PILOT) || defined(CONFIG_INIT_LCD)
+#include "bootlogo.h"
+#endif
 
 /***************************************************************************/
 
diff --git a/arch/m68k/platform/68328/head-pilot.S b/arch/m68k/platform/68328/head-pilot.S
index aecff53..2ebfd64 100644
--- a/arch/m68k/platform/68328/head-pilot.S
+++ b/arch/m68k/platform/68328/head-pilot.S
@@ -24,19 +24,7 @@
 .global _ramstart
 .global _ramend
 
-.global penguin_bits
-
-#ifdef CONFIG_PILOT
-
-#define IMR 0xFFFFF304
-
-	.data
-	.align 16
-
-penguin_bits:	
-#include "bootlogo.rh"
-
-#endif
+.global bootlogo_bits
 
 /*****************************************************************************/
 
@@ -185,9 +173,6 @@
 	moveq	#79, %d7
 	movel	%d0, _ramend
 
-	movel	%a3, %d0
-	movel	%d0, rom_length
-
 	pea	0
 	pea	env
 	pea	%sp@(4)
@@ -196,7 +181,7 @@
 	DBG_PUTC('H')
 
 #ifdef CONFIG_PILOT
-	movel	#penguin_bits, 0xFFFFFA00
+	movel	#bootlogo_bits, 0xFFFFFA00
 	moveb	#10, 0xFFFFFA05
 	movew	#160, 0xFFFFFA08
 	movew	#160, 0xFFFFFA0A
diff --git a/arch/m68k/platform/68328/head-rom.S b/arch/m68k/platform/68328/head-rom.S
index 6ec77d3..a5ff96d 100644
--- a/arch/m68k/platform/68328/head-rom.S
+++ b/arch/m68k/platform/68328/head-rom.S
@@ -8,7 +8,7 @@
 	.global _ramend
 
 #ifdef CONFIG_INIT_LCD
-	.global splash_bits
+	.global bootlogo_bits
 #endif
 
 	.data
@@ -29,16 +29,11 @@
 
 #define	RAMEND	(CONFIG_RAMBASE + CONFIG_RAMSIZE)
 
-#ifdef CONFIG_INIT_LCD
-splash_bits:
-#include "bootlogo.rh"
-#endif
-	
 	.text
 _start:
 _stext:	movew	#0x2700,%sr
 #ifdef CONFIG_INIT_LCD
-	movel	#splash_bits, 0xfffffA00 /* LSSA */
+	movel	#bootlogo_bits, 0xfffffA00 /* LSSA */
 	moveb	#0x28,   0xfffffA05	/* LVPW */
 	movew	#0x280,  0xFFFFFa08	/* LXMAX */
 	movew	#0x1df,  0xFFFFFa0a	/* LYMAX */
diff --git a/arch/m68k/platform/68328/timers.c b/arch/m68k/platform/68328/timers.c
index 309f7259..f267886 100644
--- a/arch/m68k/platform/68328/timers.c
+++ b/arch/m68k/platform/68328/timers.c
@@ -93,7 +93,6 @@
 	.name	= "timer",
 	.rating	= 250,
 	.read	= m68328_read_clk,
-	.shift	= 20,
 	.mask	= CLOCKSOURCE_MASK(32),
 	.flags	= CLOCK_SOURCE_IS_CONTINUOUS,
 };
@@ -115,8 +114,7 @@
 
 	/* Enable timer 1 */
 	TCTL |= TCTL_TEN;
-	m68328_clk.mult = clocksource_hz2mult(TICKS_PER_JIFFY*HZ, m68328_clk.shift);
-	clocksource_register(&m68328_clk);
+	clocksource_register_hz(&m68328_clk, TICKS_PER_JIFFY*HZ);
 }
 
 /***************************************************************************/
diff --git a/arch/m68k/platform/coldfire/dma_timer.c b/arch/m68k/platform/coldfire/dma_timer.c
index a5f5628..235ad57 100644
--- a/arch/m68k/platform/coldfire/dma_timer.c
+++ b/arch/m68k/platform/coldfire/dma_timer.c
@@ -44,7 +44,6 @@
 	.rating		= 200,
 	.read		= cf_dt_get_cycles,
 	.mask		= CLOCKSOURCE_MASK(32),
-	.shift		= 20,
 	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
 };
 
@@ -60,9 +59,7 @@
 	__raw_writeb(0x00, DTER0);
 	__raw_writel(0x00000000, DTRR0);
 	__raw_writew(DMA_DTMR_CLK_DIV_16 | DMA_DTMR_ENABLE, DTMR0);
-	clocksource_cf_dt.mult = clocksource_hz2mult(DMA_FREQ,
-						     clocksource_cf_dt.shift);
-	return clocksource_register(&clocksource_cf_dt);
+	return clocksource_register_hz(&clocksource_cf_dt, DMA_FREQ);
 }
 
 arch_initcall(init_cf_dt_clocksource);
diff --git a/arch/m68k/platform/coldfire/entry.S b/arch/m68k/platform/coldfire/entry.S
index 3157461..863889f 100644
--- a/arch/m68k/platform/coldfire/entry.S
+++ b/arch/m68k/platform/coldfire/entry.S
@@ -54,7 +54,6 @@
 .globl ret_from_signal
 .globl sys_call_table
 .globl inthandler
-.globl fasthandler
 
 enosys:
 	mov.l	#sys_ni_syscall,%d3
@@ -63,6 +62,7 @@
 ENTRY(system_call)
 	SAVE_ALL_SYS
 	move	#0x2000,%sr		/* enable intrs again */
+	GET_CURRENT(%d2)
 
 	cmpl	#NR_syscalls,%d0
 	jcc	enosys
@@ -166,6 +166,7 @@
  */
 ENTRY(inthandler)
 	SAVE_ALL_INT
+	GET_CURRENT(%d2)
 
 	movew	%sp@(PT_OFF_FORMATVEC),%d0 /* put exception # in d0 */
 	andl	#0x03fc,%d0		/* mask out vector only */
@@ -191,7 +192,9 @@
 	movel	%sp,%a0@(TASK_THREAD+THREAD_KSP) /* save kernel stack pointer */
 	RDUSP					 /* movel %usp,%a3 */
 	movel	%a3,%a0@(TASK_THREAD+THREAD_USP) /* save thread user stack */
-
+#ifdef CONFIG_MMU
+	movel	%a1,%a2				 /* set new current */
+#endif
 	movel	%a1@(TASK_THREAD+THREAD_USP),%a3 /* restore thread user stack */
 	WRUSP					 /* movel %a3,%usp */
 	movel	%a1@(TASK_THREAD+THREAD_KSP),%sp /* restore new kernel stack */
diff --git a/arch/m68k/platform/coldfire/gpio.c b/arch/m68k/platform/coldfire/gpio.c
index ff004579..292a1a5 100644
--- a/arch/m68k/platform/coldfire/gpio.c
+++ b/arch/m68k/platform/coldfire/gpio.c
@@ -15,7 +15,7 @@
 
 #include <linux/kernel.h>
 #include <linux/init.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 
 #include <asm/gpio.h>
 #include <asm/pinmux.h>
@@ -115,13 +115,14 @@
 		mcf_pinmux_release(mcf_chip->gpio_to_pinmux[offset], 0);
 }
 
-struct sysdev_class mcf_gpio_sysclass = {
-	.name	= "gpio",
+struct bus_type mcf_gpio_subsys = {
+	.name		= "gpio",
+	.dev_name	= "gpio",
 };
 
 static int __init mcf_gpio_sysinit(void)
 {
-	return sysdev_class_register(&mcf_gpio_sysclass);
+	return subsys_system_register(&mcf_gpio_subsys, NULL);
 }
 
 core_initcall(mcf_gpio_sysinit);
diff --git a/arch/m68k/platform/coldfire/head.S b/arch/m68k/platform/coldfire/head.S
index c334838..38f04a3 100644
--- a/arch/m68k/platform/coldfire/head.S
+++ b/arch/m68k/platform/coldfire/head.S
@@ -3,7 +3,7 @@
 /*
  *	head.S -- common startup code for ColdFire CPUs.
  *
- *	(C) Copyright 1999-2010, Greg Ungerer <gerg@snapgear.com>.
+ *	(C) Copyright 1999-2011, Greg Ungerer <gerg@snapgear.com>.
  */
 
 /*****************************************************************************/
@@ -13,6 +13,7 @@
 #include <asm/asm-offsets.h>
 #include <asm/coldfire.h>
 #include <asm/mcfsim.h>
+#include <asm/mcfmmu.h>
 #include <asm/thread_info.h>
 
 /*****************************************************************************/
@@ -135,6 +136,14 @@
 
 __HEAD
 
+#ifdef CONFIG_MMU
+_start0:
+	jmp	_start
+.global kernel_pg_dir
+.equ	kernel_pg_dir,_start0
+.equ	.,_start0+0x1000
+#endif
+
 /*
  *	This is the codes first entry point. This is where it all
  *	begins...
@@ -143,6 +152,9 @@
 _start:
 	nop					/* filler */
 	movew	#0x2700, %sr			/* no interrupts */
+	movel	#CACHE_INIT,%d0			/* disable cache */
+	movec	%d0,%CACR
+	nop
 #if defined(CONFIG_UBOOT)
 	movel	%sp,_init_sp			/* save initial stack pointer */
 #endif
@@ -176,9 +188,6 @@
 	 *	it is very similar. Define the exact settings in the headers
 	 *	then the code here is the same for all.
 	 */
-	movel	#CACHE_INIT,%d0			/* invalidate whole cache */
-	movec	%d0,%CACR
-	nop
 	movel	#ACR0_MODE,%d0			/* set RAM region for caching */
 	movec	%d0,%ACR0
 	movel	#ACR1_MODE,%d0			/* anything else to cache? */
@@ -193,6 +202,26 @@
 	movec	%d0,%CACR
 	nop
 
+#ifdef CONFIG_MMU
+	/*
+	 *	Identity mapping for the kernel region.
+	 */
+	movel	#(MMUBASE+1),%d0		/* enable MMUBAR registers */
+	movec	%d0,%MMUBAR
+	movel	#MMUOR_CA,%d0			/* clear TLB entries */
+	movel	%d0,MMUOR
+	movel	#0,%d0				/* set ASID to 0 */
+	movec	%d0,%asid
+
+	movel	#MMUCR_EN,%d0			/* Enable the identity map */
+	movel	%d0,MMUCR
+	nop					/* sync i-pipeline */
+
+	movel	#_vstart,%a0			/* jump to "virtual" space */
+	jmp	%a0@
+_vstart:
+#endif /* CONFIG_MMU */
+
 #ifdef CONFIG_ROMFS_FS
 	/*
 	 *	Move ROM filesystem above bss :-)
@@ -238,6 +267,22 @@
 	lea	init_thread_union,%a0
 	lea	THREAD_SIZE(%a0),%sp
 
+#ifdef CONFIG_MMU
+.global m68k_cputype
+.global m68k_mmutype
+.global m68k_fputype
+.global m68k_machtype
+	movel	#CPU_COLDFIRE,%d0
+	movel	%d0,m68k_cputype		/* Mark us as a ColdFire */
+	movel	#MMU_COLDFIRE,%d0
+	movel	%d0,m68k_mmutype
+	movel	#FPU_COLDFIRE,%d0
+	movel	%d0,m68k_fputype
+	movel	#MACH_M54XX,%d0
+	movel	%d0,m68k_machtype		/* Mark us as a 54xx machine */
+	lea	init_task,%a2			/* Set "current" init task */
+#endif
+
 	/*
 	 *	Assember start up done, start code proper.
 	 */
diff --git a/arch/m68k/platform/coldfire/pit.c b/arch/m68k/platform/coldfire/pit.c
index c2b9809..02663d2 100644
--- a/arch/m68k/platform/coldfire/pit.c
+++ b/arch/m68k/platform/coldfire/pit.c
@@ -144,7 +144,6 @@
 	.name	= "pit",
 	.rating	= 100,
 	.read	= pit_read_clk,
-	.shift	= 20,
 	.mask	= CLOCKSOURCE_MASK(32),
 };
 
@@ -162,8 +161,7 @@
 
 	setup_irq(MCFINT_VECBASE + MCFINT_PIT1, &pit_irq);
 
-	pit_clk.mult = clocksource_hz2mult(FREQ, pit_clk.shift);
-	clocksource_register(&pit_clk);
+	clocksource_register_hz(&pit_clk, FREQ);
 }
 
 /***************************************************************************/
diff --git a/arch/m68k/platform/coldfire/sltimers.c b/arch/m68k/platform/coldfire/sltimers.c
index 6a85daf..54e1452 100644
--- a/arch/m68k/platform/coldfire/sltimers.c
+++ b/arch/m68k/platform/coldfire/sltimers.c
@@ -98,23 +98,25 @@
 static cycle_t mcfslt_read_clk(struct clocksource *cs)
 {
 	unsigned long flags;
-	u32 cycles;
-	u16 scnt;
+	u32 cycles, scnt;
 
 	local_irq_save(flags);
 	scnt = __raw_readl(TA(MCFSLT_SCNT));
 	cycles = mcfslt_cnt;
+	if (__raw_readl(TA(MCFSLT_SSR)) & MCFSLT_SSR_TE) {
+		cycles += mcfslt_cycles_per_jiffy;
+		scnt = __raw_readl(TA(MCFSLT_SCNT));
+	}
 	local_irq_restore(flags);
 
 	/* subtract because slice timers count down */
-	return cycles - scnt;
+	return cycles + ((mcfslt_cycles_per_jiffy - 1) - scnt);
 }
 
 static struct clocksource mcfslt_clk = {
 	.name	= "slt",
 	.rating	= 250,
 	.read	= mcfslt_read_clk,
-	.shift	= 20,
 	.mask	= CLOCKSOURCE_MASK(32),
 	.flags	= CLOCK_SOURCE_IS_CONTINUOUS,
 };
@@ -136,8 +138,7 @@
 
 	setup_irq(MCF_IRQ_TIMER, &mcfslt_timer_irq);
 
-	mcfslt_clk.mult = clocksource_hz2mult(MCF_BUSCLK, mcfslt_clk.shift);
-	clocksource_register(&mcfslt_clk);
+	clocksource_register_hz(&mcfslt_clk, MCF_BUSCLK);
 
 #ifdef CONFIG_HIGHPROFILE
 	mcfslt_profile_init();
diff --git a/arch/m68k/platform/coldfire/timers.c b/arch/m68k/platform/coldfire/timers.c
index 60242f6..0d90da3 100644
--- a/arch/m68k/platform/coldfire/timers.c
+++ b/arch/m68k/platform/coldfire/timers.c
@@ -88,7 +88,6 @@
 	.name	= "tmr",
 	.rating	= 250,
 	.read	= mcftmr_read_clk,
-	.shift	= 20,
 	.mask	= CLOCKSOURCE_MASK(32),
 	.flags	= CLOCK_SOURCE_IS_CONTINUOUS,
 };
@@ -109,8 +108,7 @@
 	__raw_writew(MCFTIMER_TMR_ENORI | MCFTIMER_TMR_CLK16 |
 		MCFTIMER_TMR_RESTART | MCFTIMER_TMR_ENABLE, TA(MCFTIMER_TMR));
 
-	mcftmr_clk.mult = clocksource_hz2mult(FREQ, mcftmr_clk.shift);
-	clocksource_register(&mcftmr_clk);
+	clocksource_register_hz(&mcftmr_clk, FREQ);
 
 	setup_irq(MCF_IRQ_TIMER, &mcftmr_timer_irq);
 
diff --git a/arch/microblaze/include/asm/memblock.h b/arch/microblaze/include/asm/memblock.h
deleted file mode 100644
index 20a8e25..0000000
--- a/arch/microblaze/include/asm/memblock.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/*
- * Copyright (C) 2008 Michal Simek <monstr@monstr.eu>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#ifndef _ASM_MICROBLAZE_MEMBLOCK_H
-#define _ASM_MICROBLAZE_MEMBLOCK_H
-
-#endif /* _ASM_MICROBLAZE_MEMBLOCK_H */
-
-
diff --git a/arch/microblaze/include/asm/thread_info.h b/arch/microblaze/include/asm/thread_info.h
index b73da2a..1a8ab6a 100644
--- a/arch/microblaze/include/asm/thread_info.h
+++ b/arch/microblaze/include/asm/thread_info.h
@@ -125,7 +125,6 @@
 #define TIF_MEMDIE		6	/* is terminating due to OOM killer */
 #define TIF_SYSCALL_AUDIT	9       /* syscall auditing active */
 #define TIF_SECCOMP		10      /* secure computing */
-#define TIF_FREEZE		14	/* Freezing for suspend */
 
 /* true if poll_idle() is polling TIF_NEED_RESCHED */
 #define TIF_POLLING_NRFLAG	16
@@ -137,7 +136,6 @@
 #define _TIF_SINGLESTEP		(1 << TIF_SINGLESTEP)
 #define _TIF_IRET		(1 << TIF_IRET)
 #define _TIF_POLLING_NRFLAG	(1 << TIF_POLLING_NRFLAG)
-#define _TIF_FREEZE		(1 << TIF_FREEZE)
 #define _TIF_SYSCALL_AUDIT	(1 << TIF_SYSCALL_AUDIT)
 #define _TIF_SECCOMP		(1 << TIF_SECCOMP)
 
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c
index 95cc295..7dcb5bf 100644
--- a/arch/microblaze/kernel/process.c
+++ b/arch/microblaze/kernel/process.c
@@ -103,10 +103,12 @@
 		if (!idle)
 			idle = default_idle;
 
-		tick_nohz_stop_sched_tick(1);
+		tick_nohz_idle_enter();
+		rcu_idle_enter();
 		while (!need_resched())
 			idle();
-		tick_nohz_restart_sched_tick();
+		rcu_idle_exit();
+		tick_nohz_idle_exit();
 
 		preempt_enable_no_resched();
 		schedule();
diff --git a/arch/microblaze/kernel/prom.c b/arch/microblaze/kernel/prom.c
index 977484a..80d314e 100644
--- a/arch/microblaze/kernel/prom.c
+++ b/arch/microblaze/kernel/prom.c
@@ -122,7 +122,6 @@
 	of_scan_flat_dt(early_init_dt_scan_chosen, cmd_line);
 
 	/* Scan memory nodes and rebuild MEMBLOCKs */
-	memblock_init();
 	of_scan_flat_dt(early_init_dt_scan_root, NULL);
 	of_scan_flat_dt(early_init_dt_scan_memory, NULL);
 
@@ -130,7 +129,7 @@
 	strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE);
 	parse_early_param();
 
-	memblock_analyze();
+	memblock_allow_resize();
 
 	pr_debug("Phys. mem: %lx\n", (unsigned long) memblock_phys_mem_size());
 
diff --git a/arch/microblaze/kernel/reset.c b/arch/microblaze/kernel/reset.c
index bd8ccab..88a0163 100644
--- a/arch/microblaze/kernel/reset.c
+++ b/arch/microblaze/kernel/reset.c
@@ -19,50 +19,11 @@
 static int handle; /* reset pin handle */
 static unsigned int reset_val;
 
-static int of_reset_gpio_handle(void)
-{
-	int ret; /* variable which stored handle reset gpio pin */
-	struct device_node *root; /* root node */
-	struct device_node *gpio; /* gpio node */
-	struct gpio_chip *gc;
-	u32 flags;
-	const void *gpio_spec;
-
-	/* find out root node */
-	root = of_find_node_by_path("/");
-
-	/* give me handle for gpio node to be possible allocate pin */
-	ret = of_parse_phandles_with_args(root, "hard-reset-gpios",
-				"#gpio-cells", 0, &gpio, &gpio_spec);
-	if (ret) {
-		pr_debug("%s: can't parse gpios property\n", __func__);
-		goto err0;
-	}
-
-	gc = of_node_to_gpiochip(gpio);
-	if (!gc) {
-		pr_debug("%s: gpio controller %s isn't registered\n",
-			 root->full_name, gpio->full_name);
-		ret = -ENODEV;
-		goto err1;
-	}
-
-	ret = gc->of_xlate(gc, root, gpio_spec, &flags);
-	if (ret < 0)
-		goto err1;
-
-	ret += gc->base;
-err1:
-	of_node_put(gpio);
-err0:
-	pr_debug("%s exited with status %d\n", __func__, ret);
-	return ret;
-}
-
 void of_platform_reset_gpio_probe(void)
 {
 	int ret;
-	handle = of_reset_gpio_handle();
+	handle = of_get_named_gpio(of_find_node_by_path("/"),
+				   "hard-reset-gpios", 0);
 
 	if (!gpio_is_valid(handle)) {
 		printk(KERN_INFO "Skipping unavailable RESET gpio %d (%s)\n",
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index d46f1da..a7636d3 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -25,6 +25,9 @@
 	select GENERIC_IRQ_SHOW
 	select HAVE_ARCH_JUMP_LABEL
 	select IRQ_FORCED_THREADING
+	select HAVE_MEMBLOCK
+	select HAVE_MEMBLOCK_NODE_MAP
+	select ARCH_DISCARD_MEMBLOCK
 
 menu "Machine selection"
 
@@ -65,7 +68,6 @@
 	select SYS_SUPPORTS_LITTLE_ENDIAN
 	select SYS_SUPPORTS_ZBOOT_UART16550
 	select ARCH_REQUIRE_GPIOLIB
-	select GCD
 	select VLYNQ
 	help
 	  Support for the Texas Instruments AR7 System-on-a-Chip
@@ -2064,9 +2066,6 @@
 	  or have huge holes in the physical address space for other reasons.
 	  See <file:Documentation/vm/numa> for more.
 
-config ARCH_POPULATES_NODE_MAP
-	def_bool y
-
 config ARCH_SPARSEMEM_ENABLE
 	bool
 	select SPARSEMEM_STATIC
@@ -2369,10 +2368,6 @@
 	  Linux driver support status is documented at:
 	  <http://www.linux-mips.org/wiki/DECstation>
 
-#config ACCESSBUS
-#	bool "Access.Bus support"
-#	depends on TC
-
 config MMU
 	bool
 	default y
diff --git a/arch/mips/include/asm/ip32/mace.h b/arch/mips/include/asm/ip32/mace.h
index d08d7c6..c523123 100644
--- a/arch/mips/include/asm/ip32/mace.h
+++ b/arch/mips/include/asm/ip32/mace.h
@@ -95,7 +95,7 @@
  * Ethernet interface
  */
 struct mace_ethernet {
-	volatile unsigned long mac_ctrl;
+	volatile u64 mac_ctrl;
 	volatile unsigned long int_stat;
 	volatile unsigned long dma_ctrl;
 	volatile unsigned long timer;
diff --git a/arch/mips/include/asm/ipcbuf.h b/arch/mips/include/asm/ipcbuf.h
index d47d08f..84c7e51 100644
--- a/arch/mips/include/asm/ipcbuf.h
+++ b/arch/mips/include/asm/ipcbuf.h
@@ -1,28 +1 @@
-#ifndef _ASM_IPCBUF_H
-#define _ASM_IPCBUF_H
-
-/*
- * The ipc64_perm structure for alpha architecture.
- * Note extra padding because this structure is passed back and forth
- * between kernel and user space.
- *
- * Pad space is left for:
- * - 32-bit seq
- * - 2 miscellaneous 64-bit values
- */
-
-struct ipc64_perm
-{
-	__kernel_key_t	key;
-	__kernel_uid_t	uid;
-	__kernel_gid_t	gid;
-	__kernel_uid_t	cuid;
-	__kernel_gid_t	cgid;
-	__kernel_mode_t	mode;
-	unsigned short	seq;
-	unsigned short	__pad1;
-	unsigned long	__unused1;
-	unsigned long	__unused2;
-};
-
-#endif /* _ASM_IPCBUF_H */
+#include <asm-generic/ipcbuf.h>
diff --git a/arch/mips/include/asm/socket.h b/arch/mips/include/asm/socket.h
index 9de5190..ad5c0a7 100644
--- a/arch/mips/include/asm/socket.h
+++ b/arch/mips/include/asm/socket.h
@@ -82,6 +82,9 @@
 
 #define SO_RXQ_OVFL             40
 
+#define SO_WIFI_STATUS		41
+#define SCM_WIFI_STATUS		SO_WIFI_STATUS
+
 #ifdef __KERNEL__
 
 /** sock_type - Socket types
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
index 97f8bf6..0d85d8e 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -117,7 +117,6 @@
 #define TIF_USEDFPU		16	/* FPU was used by this task this quantum (SMP) */
 #define TIF_POLLING_NRFLAG	17	/* true if poll_idle() is polling TIF_NEED_RESCHED */
 #define TIF_MEMDIE		18	/* is terminating due to OOM killer */
-#define TIF_FREEZE		19
 #define TIF_FIXADE		20	/* Fix address errors in software */
 #define TIF_LOGADE		21	/* Log address errors to syslog */
 #define TIF_32BIT_REGS		22	/* also implies 16/32 fprs */
@@ -141,7 +140,6 @@
 #define _TIF_RESTORE_SIGMASK	(1<<TIF_RESTORE_SIGMASK)
 #define _TIF_USEDFPU		(1<<TIF_USEDFPU)
 #define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG)
-#define _TIF_FREEZE		(1<<TIF_FREEZE)
 #define _TIF_FIXADE		(1<<TIF_FIXADE)
 #define _TIF_LOGADE		(1<<TIF_LOGADE)
 #define _TIF_32BIT_REGS		(1<<TIF_32BIT_REGS)
diff --git a/arch/mips/include/asm/types.h b/arch/mips/include/asm/types.h
index 533812b..43bf70e 100644
--- a/arch/mips/include/asm/types.h
+++ b/arch/mips/include/asm/types.h
@@ -21,12 +21,6 @@
 # include <asm-generic/int-ll64.h>
 #endif
 
-#ifndef __ASSEMBLY__
-
-typedef unsigned short umode_t;
-
-#endif /* __ASSEMBLY__ */
-
 /*
  * These aren't exported outside the kernel to avoid name space clashes
  */
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index c47f96e..7955409 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -56,7 +56,8 @@
 
 	/* endless idle loop with no priority at all */
 	while (1) {
-		tick_nohz_stop_sched_tick(1);
+		tick_nohz_idle_enter();
+		rcu_idle_enter();
 		while (!need_resched() && cpu_online(cpu)) {
 #ifdef CONFIG_MIPS_MT_SMTC
 			extern void smtc_idle_loop_hook(void);
@@ -77,7 +78,8 @@
 		     system_state == SYSTEM_BOOTING))
 			play_dead();
 #endif
-		tick_nohz_restart_sched_tick();
+		rcu_idle_exit();
+		tick_nohz_idle_exit();
 		preempt_enable_no_resched();
 		schedule();
 		preempt_disable();
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 84af26a..b1cb8f8 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -14,6 +14,7 @@
 #include <linux/ioport.h>
 #include <linux/export.h>
 #include <linux/screen_info.h>
+#include <linux/memblock.h>
 #include <linux/bootmem.h>
 #include <linux/initrd.h>
 #include <linux/root_dev.h>
@@ -352,7 +353,7 @@
 			continue;
 #endif
 
-		add_active_range(0, start, end);
+		memblock_add_node(PFN_PHYS(start), PFN_PHYS(end - start), 0);
 	}
 
 	/*
diff --git a/arch/mips/sgi-ip27/Kconfig b/arch/mips/sgi-ip27/Kconfig
index bc5e976..4b2ea28 100644
--- a/arch/mips/sgi-ip27/Kconfig
+++ b/arch/mips/sgi-ip27/Kconfig
@@ -1,9 +1,3 @@
-#config SGI_SN0_XXL
-#	bool "IP27 XXL"
-#	depends on SGI_IP27
-#	  This options adds support for userspace processes up to 16TB size.
-#	  Normally the limit is just .5TB.
-
 choice
 	prompt "Node addressing mode"
 	depends on SGI_IP27
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
index bc12971..b105eca 100644
--- a/arch/mips/sgi-ip27/ip27-memory.c
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -12,6 +12,7 @@
  */
 #include <linux/init.h>
 #include <linux/kernel.h>
+#include <linux/memblock.h>
 #include <linux/mm.h>
 #include <linux/mmzone.h>
 #include <linux/module.h>
@@ -381,8 +382,8 @@
 				continue;
 			}
 			num_physpages += slot_psize;
-			add_active_range(node, slot_getbasepfn(node, slot),
-					 slot_getbasepfn(node, slot) + slot_psize);
+			memblock_add_node(PFN_PHYS(slot_getbasepfn(node, slot)),
+					  PFN_PHYS(slot_psize), node);
 		}
 	}
 }
diff --git a/arch/mips/sibyte/Kconfig b/arch/mips/sibyte/Kconfig
index 3e639bd..3cd937e 100644
--- a/arch/mips/sibyte/Kconfig
+++ b/arch/mips/sibyte/Kconfig
@@ -71,7 +71,6 @@
 	bool
 	select DMA_COHERENT
 	select IRQ_CPU
-	select SIBYTE_CFE
 	select SWAP_IO_SPACE
 	select SYS_SUPPORTS_32BIT_KERNEL
 	select SYS_SUPPORTS_64BIT_KERNEL
diff --git a/arch/mips/txx9/generic/7segled.c b/arch/mips/txx9/generic/7segled.c
index 7f8416f..8e93b21 100644
--- a/arch/mips/txx9/generic/7segled.c
+++ b/arch/mips/txx9/generic/7segled.c
@@ -9,7 +9,7 @@
  * (C) Copyright TOSHIBA CORPORATION 2005-2007
  * All Rights Reserved.
  */
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/slab.h>
 #include <linux/map_to_7segment.h>
 #include <asm/txx9/generic.h>
@@ -37,8 +37,8 @@
 	return 0;
 }
 
-static ssize_t ascii_store(struct sys_device *dev,
-			   struct sysdev_attribute *attr,
+static ssize_t ascii_store(struct device *dev,
+			   struct device_attribute *attr,
 			   const char *buf, size_t size)
 {
 	unsigned int ch = dev->id;
@@ -46,8 +46,8 @@
 	return size;
 }
 
-static ssize_t raw_store(struct sys_device *dev,
-			 struct sysdev_attribute *attr,
+static ssize_t raw_store(struct device *dev,
+			 struct device_attribute *attr,
 			 const char *buf, size_t size)
 {
 	unsigned int ch = dev->id;
@@ -55,19 +55,19 @@
 	return size;
 }
 
-static SYSDEV_ATTR(ascii, 0200, NULL, ascii_store);
-static SYSDEV_ATTR(raw, 0200, NULL, raw_store);
+static DEVICE_ATTR(ascii, 0200, NULL, ascii_store);
+static DEVICE_ATTR(raw, 0200, NULL, raw_store);
 
-static ssize_t map_seg7_show(struct sysdev_class *class,
-			     struct sysdev_class_attribute *attr,
+static ssize_t map_seg7_show(struct device *dev,
+			     struct device_attribute *attr,
 			     char *buf)
 {
 	memcpy(buf, &txx9_seg7map, sizeof(txx9_seg7map));
 	return sizeof(txx9_seg7map);
 }
 
-static ssize_t map_seg7_store(struct sysdev_class *class,
-			      struct sysdev_class_attribute *attr,
+static ssize_t map_seg7_store(struct device *dev,
+			      struct device_attribute *attr,
 			      const char *buf, size_t size)
 {
 	if (size != sizeof(txx9_seg7map))
@@ -76,10 +76,11 @@
 	return size;
 }
 
-static SYSDEV_CLASS_ATTR(map_seg7, 0600, map_seg7_show, map_seg7_store);
+static DEVICE_ATTR(map_seg7, 0600, map_seg7_show, map_seg7_store);
 
-static struct sysdev_class tx_7segled_sysdev_class = {
-	.name	= "7segled",
+static struct bus_type tx_7segled_subsys = {
+	.name		= "7segled",
+	.dev_name	= "7segled",
 };
 
 static int __init tx_7segled_init_sysfs(void)
@@ -87,26 +88,25 @@
 	int error, i;
 	if (!tx_7segled_num)
 		return -ENODEV;
-	error = sysdev_class_register(&tx_7segled_sysdev_class);
+	error = subsys_system_register(&tx_7segled_subsys, NULL);
 	if (error)
 		return error;
-	error = sysdev_class_create_file(&tx_7segled_sysdev_class,
-					 &attr_map_seg7);
+	error = device_create_file(tx_7segled_subsys.dev_root, &dev_attr_map_seg7);
 	if (error)
 		return error;
 	for (i = 0; i < tx_7segled_num; i++) {
-		struct sys_device *dev;
+		struct device *dev;
 		dev = kzalloc(sizeof(*dev), GFP_KERNEL);
 		if (!dev) {
 			error = -ENODEV;
 			break;
 		}
 		dev->id = i;
-		dev->cls = &tx_7segled_sysdev_class;
-		error = sysdev_register(dev);
+		dev->dev = &tx_7segled_subsys;
+		error = device_register(dev);
 		if (!error) {
-			sysdev_create_file(dev, &attr_ascii);
-			sysdev_create_file(dev, &attr_raw);
+			device_create_file(dev, &dev_attr_ascii);
+			device_create_file(dev, &dev_attr_raw);
 		}
 	}
 	return error;
diff --git a/arch/mips/txx9/generic/setup.c b/arch/mips/txx9/generic/setup.c
index ec38e00..ae77a79 100644
--- a/arch/mips/txx9/generic/setup.c
+++ b/arch/mips/txx9/generic/setup.c
@@ -22,7 +22,7 @@
 #include <linux/serial_core.h>
 #include <linux/mtd/physmap.h>
 #include <linux/leds.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/slab.h>
 #include <linux/irq.h>
 #include <asm/bootinfo.h>
@@ -897,10 +897,13 @@
 #endif
 }
 
-static struct sysdev_class txx9_sramc_sysdev_class;
+static struct bus_type txx9_sramc_subsys = {
+	.name = "txx9_sram",
+	.dev_name = "txx9_sram",
+};
 
-struct txx9_sramc_sysdev {
-	struct sys_device dev;
+struct txx9_sramc_dev {
+	struct device dev;
 	struct bin_attribute bindata_attr;
 	void __iomem *base;
 };
@@ -909,7 +912,7 @@
 			      struct bin_attribute *bin_attr,
 			      char *buf, loff_t pos, size_t size)
 {
-	struct txx9_sramc_sysdev *dev = bin_attr->private;
+	struct txx9_sramc_dev *dev = bin_attr->private;
 	size_t ramsize = bin_attr->size;
 
 	if (pos >= ramsize)
@@ -924,7 +927,7 @@
 			       struct bin_attribute *bin_attr,
 			       char *buf, loff_t pos, size_t size)
 {
-	struct txx9_sramc_sysdev *dev = bin_attr->private;
+	struct txx9_sramc_dev *dev = bin_attr->private;
 	size_t ramsize = bin_attr->size;
 
 	if (pos >= ramsize)
@@ -937,18 +940,13 @@
 
 void __init txx9_sramc_init(struct resource *r)
 {
-	struct txx9_sramc_sysdev *dev;
+	struct txx9_sramc_dev *dev;
 	size_t size;
 	int err;
 
-	if (!txx9_sramc_sysdev_class.name) {
-		txx9_sramc_sysdev_class.name = "txx9_sram";
-		err = sysdev_class_register(&txx9_sramc_sysdev_class);
-		if (err) {
-			txx9_sramc_sysdev_class.name = NULL;
-			return;
-		}
-	}
+	err = subsys_system_register(&txx9_sramc_subsys, NULL);
+	if (err)
+		return;
 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
 	if (!dev)
 		return;
@@ -956,7 +954,7 @@
 	dev->base = ioremap(r->start, size);
 	if (!dev->base)
 		goto exit;
-	dev->dev.cls = &txx9_sramc_sysdev_class;
+	dev->dev.bus = &txx9_sramc_subsys;
 	sysfs_bin_attr_init(&dev->bindata_attr);
 	dev->bindata_attr.attr.name = "bindata";
 	dev->bindata_attr.attr.mode = S_IRUSR | S_IWUSR;
@@ -964,12 +962,12 @@
 	dev->bindata_attr.write = txx9_sram_write;
 	dev->bindata_attr.size = size;
 	dev->bindata_attr.private = dev;
-	err = sysdev_register(&dev->dev);
+	err = device_register(&dev->dev);
 	if (err)
 		goto exit;
 	err = sysfs_create_bin_file(&dev->dev.kobj, &dev->bindata_attr);
 	if (err) {
-		sysdev_unregister(&dev->dev);
+		device_unregister(&dev->dev);
 		goto exit;
 	}
 	return;
diff --git a/arch/mips/txx9/generic/setup_tx4939.c b/arch/mips/txx9/generic/setup_tx4939.c
index ba3cec3..6567895 100644
--- a/arch/mips/txx9/generic/setup_tx4939.c
+++ b/arch/mips/txx9/generic/setup_tx4939.c
@@ -15,7 +15,7 @@
 #include <linux/delay.h>
 #include <linux/netdevice.h>
 #include <linux/notifier.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/ethtool.h>
 #include <linux/param.h>
 #include <linux/ptrace.h>
diff --git a/arch/mn10300/include/asm/ipcbuf.h b/arch/mn10300/include/asm/ipcbuf.h
index f6f63d4..84c7e51 100644
--- a/arch/mn10300/include/asm/ipcbuf.h
+++ b/arch/mn10300/include/asm/ipcbuf.h
@@ -1,29 +1 @@
-#ifndef _ASM_IPCBUF_H
-#define _ASM_IPCBUF_H
-
-/*
- * The ipc64_perm structure for MN10300 architecture.
- * Note extra padding because this structure is passed back and forth
- * between kernel and user space.
- *
- * Pad space is left for:
- * - 32-bit mode_t and seq
- * - 2 miscellaneous 32-bit values
- */
-
-struct ipc64_perm
-{
-	__kernel_key_t		key;
-	__kernel_uid32_t	uid;
-	__kernel_gid32_t	gid;
-	__kernel_uid32_t	cuid;
-	__kernel_gid32_t	cgid;
-	__kernel_mode_t		mode;
-	unsigned short		__pad1;
-	unsigned short		seq;
-	unsigned short		__pad2;
-	unsigned long		__unused1;
-	unsigned long		__unused2;
-};
-
-#endif /* _ASM_IPCBUF_H */
+#include <asm-generic/ipcbuf.h>
diff --git a/arch/mn10300/include/asm/socket.h b/arch/mn10300/include/asm/socket.h
index 4e60c42..876356d 100644
--- a/arch/mn10300/include/asm/socket.h
+++ b/arch/mn10300/include/asm/socket.h
@@ -62,4 +62,7 @@
 
 #define SO_RXQ_OVFL             40
 
+#define SO_WIFI_STATUS		41
+#define SCM_WIFI_STATUS		SO_WIFI_STATUS
+
 #endif /* _ASM_SOCKET_H */
diff --git a/arch/mn10300/include/asm/thread_info.h b/arch/mn10300/include/asm/thread_info.h
index 87c2130..28cf521 100644
--- a/arch/mn10300/include/asm/thread_info.h
+++ b/arch/mn10300/include/asm/thread_info.h
@@ -165,7 +165,6 @@
 #define TIF_RESTORE_SIGMASK	5	/* restore signal mask in do_signal() */
 #define TIF_POLLING_NRFLAG	16	/* true if poll_idle() is polling TIF_NEED_RESCHED */
 #define TIF_MEMDIE		17	/* is terminating due to OOM killer */
-#define TIF_FREEZE		18	/* freezing for suspend */
 
 #define _TIF_SYSCALL_TRACE	+(1 << TIF_SYSCALL_TRACE)
 #define _TIF_NOTIFY_RESUME	+(1 << TIF_NOTIFY_RESUME)
@@ -174,7 +173,6 @@
 #define _TIF_SINGLESTEP		+(1 << TIF_SINGLESTEP)
 #define _TIF_RESTORE_SIGMASK	+(1 << TIF_RESTORE_SIGMASK)
 #define _TIF_POLLING_NRFLAG	+(1 << TIF_POLLING_NRFLAG)
-#define _TIF_FREEZE		+(1 << TIF_FREEZE)
 
 #define _TIF_WORK_MASK		0x0000FFFE	/* work to do on interrupt/exception return */
 #define _TIF_ALLWORK_MASK	0x0000FFFF	/* work to do on any return to u-space */
diff --git a/arch/mn10300/include/asm/types.h b/arch/mn10300/include/asm/types.h
index c1833eb..713d4ba 100644
--- a/arch/mn10300/include/asm/types.h
+++ b/arch/mn10300/include/asm/types.h
@@ -13,12 +13,6 @@
 
 #include <asm-generic/int-ll64.h>
 
-#ifndef __ASSEMBLY__
-
-typedef unsigned short umode_t;
-
-#endif /* __ASSEMBLY__ */
-
 /*
  * These aren't exported outside the kernel to avoid name space clashes
  */
diff --git a/arch/openrisc/include/asm/memblock.h b/arch/openrisc/include/asm/memblock.h
deleted file mode 100644
index bbe5a1c..0000000
--- a/arch/openrisc/include/asm/memblock.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * OpenRISC Linux
- *
- * Linux architectural port borrowing liberally from similar works of
- * others.  All original copyrights apply as per the original source
- * declaration.
- *
- * OpenRISC implementation:
- * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
- * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
- * et al.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef __ASM_OPENRISC_MEMBLOCK_H
-#define __ASM_OPENRISC_MEMBLOCK_H
-
-/* empty */
-
-#endif /* __ASM_OPENRISC_MEMBLOCK_H */
diff --git a/arch/openrisc/kernel/idle.c b/arch/openrisc/kernel/idle.c
index d5bc5f8..e5fc7887 100644
--- a/arch/openrisc/kernel/idle.c
+++ b/arch/openrisc/kernel/idle.c
@@ -51,7 +51,8 @@
 
 	/* endless idle loop with no priority at all */
 	while (1) {
-		tick_nohz_stop_sched_tick(1);
+		tick_nohz_idle_enter();
+		rcu_idle_enter();
 
 		while (!need_resched()) {
 			check_pgt_cache();
@@ -69,7 +70,8 @@
 			set_thread_flag(TIF_POLLING_NRFLAG);
 		}
 
-		tick_nohz_restart_sched_tick();
+		rcu_idle_exit();
+		tick_nohz_idle_exit();
 		preempt_enable_no_resched();
 		schedule();
 		preempt_disable();
diff --git a/arch/openrisc/kernel/prom.c b/arch/openrisc/kernel/prom.c
index 1bb58ba..3d4478f 100644
--- a/arch/openrisc/kernel/prom.c
+++ b/arch/openrisc/kernel/prom.c
@@ -76,14 +76,13 @@
 	of_scan_flat_dt(early_init_dt_scan_chosen, cmd_line);
 
 	/* Scan memory nodes and rebuild MEMBLOCKs */
-	memblock_init();
 	of_scan_flat_dt(early_init_dt_scan_root, NULL);
 	of_scan_flat_dt(early_init_dt_scan_memory, NULL);
 
 	/* Save command line for /proc/cmdline and then parse parameters */
 	strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE);
 
-	memblock_analyze();
+	memblock_allow_resize();
 
 	/* We must copy the flattend device tree from init memory to regular
 	 * memory because the device tree references the strings in it
diff --git a/arch/parisc/hpux/sys_hpux.c b/arch/parisc/hpux/sys_hpux.c
index 6ab9580..d9dc6cd 100644
--- a/arch/parisc/hpux/sys_hpux.c
+++ b/arch/parisc/hpux/sys_hpux.c
@@ -136,16 +136,9 @@
  */
 static int hpux_ustat(dev_t dev, struct hpux_ustat __user *ubuf)
 {
-	struct super_block *s;
 	struct hpux_ustat tmp;  /* Changed to hpux_ustat */
 	struct kstatfs sbuf;
-	int err = -EINVAL;
-
-	s = user_get_super(dev);
-	if (s == NULL)
-		goto out;
-	err = statfs_by_dentry(s->s_root, &sbuf);
-	drop_super(s);
+	int err = vfs_ustat(dev, &sbuf);
 	if (err)
 		goto out;
 
diff --git a/arch/parisc/include/asm/socket.h b/arch/parisc/include/asm/socket.h
index 225b7d6..d28c51b 100644
--- a/arch/parisc/include/asm/socket.h
+++ b/arch/parisc/include/asm/socket.h
@@ -61,6 +61,9 @@
 
 #define SO_RXQ_OVFL             0x4021
 
+#define SO_WIFI_STATUS		0x4022
+#define SCM_WIFI_STATUS		SO_WIFI_STATUS
+
 /* O_NONBLOCK clashes with the bits used for socket types.  Therefore we
  * have to define SOCK_NONBLOCK to a different value here.
  */
diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h
index aa8de72..6d9c7c7 100644
--- a/arch/parisc/include/asm/thread_info.h
+++ b/arch/parisc/include/asm/thread_info.h
@@ -58,7 +58,6 @@
 #define TIF_32BIT               4       /* 32 bit binary */
 #define TIF_MEMDIE		5	/* is terminating due to OOM killer */
 #define TIF_RESTORE_SIGMASK	6	/* restore saved signal mask */
-#define TIF_FREEZE		7	/* is freezing for suspend */
 #define TIF_NOTIFY_RESUME	8	/* callback before returning to user */
 #define TIF_SINGLESTEP		9	/* single stepping? */
 #define TIF_BLOCKSTEP		10	/* branch stepping? */
@@ -69,7 +68,6 @@
 #define _TIF_POLLING_NRFLAG	(1 << TIF_POLLING_NRFLAG)
 #define _TIF_32BIT		(1 << TIF_32BIT)
 #define _TIF_RESTORE_SIGMASK	(1 << TIF_RESTORE_SIGMASK)
-#define _TIF_FREEZE		(1 << TIF_FREEZE)
 #define _TIF_NOTIFY_RESUME	(1 << TIF_NOTIFY_RESUME)
 #define _TIF_SINGLESTEP		(1 << TIF_SINGLESTEP)
 #define _TIF_BLOCKSTEP		(1 << TIF_BLOCKSTEP)
diff --git a/arch/parisc/include/asm/types.h b/arch/parisc/include/asm/types.h
index 80e415c..8866f9b 100644
--- a/arch/parisc/include/asm/types.h
+++ b/arch/parisc/include/asm/types.h
@@ -3,10 +3,4 @@
 
 #include <asm-generic/int-ll64.h>
 
-#ifndef __ASSEMBLY__
-
-typedef unsigned short umode_t;
-
-#endif /* __ASSEMBLY__ */
-
 #endif
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
index 45b7389..7c07743 100644
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -198,8 +198,6 @@
 	.rating			= 300,
 	.read			= read_cr16,
 	.mask			= CLOCKSOURCE_MASK(BITS_PER_LONG),
-	.mult			= 0, /* to be set */
-	.shift			= 22,
 	.flags			= CLOCK_SOURCE_IS_CONTINUOUS,
 };
 
@@ -270,7 +268,5 @@
 
 	/* register at clocksource framework */
 	current_cr16_khz = PAGE0->mem_10msec/10;  /* kHz */
-	clocksource_cr16.mult = clocksource_khz2mult(current_cr16_khz,
-						clocksource_cr16.shift);
-	clocksource_register(&clocksource_cr16);
+	clocksource_register_khz(&clocksource_cr16, current_cr16_khz);
 }
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 951e18f..692ac75 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -87,6 +87,10 @@
 	bool
 	default y if 64BIT
 
+config ARCH_HAS_CPU_IDLE_WAIT
+	bool
+	default y
+
 config GENERIC_HWEIGHT
 	bool
 	default y
@@ -117,6 +121,7 @@
 	select HAVE_KRETPROBES
 	select HAVE_ARCH_TRACEHOOK
 	select HAVE_MEMBLOCK
+	select HAVE_MEMBLOCK_NODE_MAP
 	select HAVE_DMA_ATTRS
 	select HAVE_DMA_API_DEBUG
 	select USE_GENERIC_SMP_HELPERS if SMP
@@ -132,6 +137,7 @@
 	select IRQ_PER_CPU
 	select GENERIC_IRQ_SHOW
 	select GENERIC_IRQ_SHOW_LEVEL
+	select IRQ_FORCED_THREADING
 	select HAVE_RCU_TABLE_FREE if SMP
 	select HAVE_SYSCALL_TRACEPOINTS
 	select HAVE_BPF_JIT if (PPC64 && NET)
@@ -362,8 +368,9 @@
 
 config CRASH_DUMP
 	bool "Build a kdump crash kernel"
-	depends on PPC64 || 6xx || FSL_BOOKE
-	select RELOCATABLE if PPC64 || FSL_BOOKE
+	depends on PPC64 || 6xx || FSL_BOOKE || (44x && !SMP && !PPC_47x)
+	select RELOCATABLE if PPC64 || 44x
+	select DYNAMIC_MEMSTART if FSL_BOOKE
 	help
 	  Build a kernel suitable for use as a kdump capture kernel.
 	  The same kernel binary can be used as production kernel and dump
@@ -421,9 +428,6 @@
 	def_bool y
 	depends on (SMP && PPC_PSERIES) || PPC_PS3
 
-config ARCH_POPULATES_NODE_MAP
-	def_bool y
-
 config SYS_SUPPORTS_HUGETLBFS
 	bool
 
@@ -687,6 +691,10 @@
 	  controller.  Also contains some common code used by
 	  drivers for specific local bus peripherals.
 
+config FSL_IFC
+	bool
+        depends on FSL_SOC
+
 config FSL_GTM
 	bool
 	depends on PPC_83xx || QUICC_ENGINE || CPM2
@@ -772,6 +780,10 @@
 
 endmenu
 
+config NONSTATIC_KERNEL
+	bool
+	default n
+
 menu "Advanced setup"
 	depends on PPC32
 
@@ -821,13 +833,32 @@
 	int "Number of CAMs to use to map low memory" if LOWMEM_CAM_NUM_BOOL
 	default 3
 
+config DYNAMIC_MEMSTART
+	bool "Enable page aligned dynamic load address for kernel (EXPERIMENTAL)"
+	depends on EXPERIMENTAL && ADVANCED_OPTIONS && FLATMEM && (FSL_BOOKE || 44x)
+	select NONSTATIC_KERNEL
+	help
+	  This option enables the kernel to be loaded at any page aligned
+	  physical address. The kernel creates a mapping from KERNELBASE to 
+	  the address where the kernel is loaded. The page size here implies
+	  the TLB page size of the mapping for kernel on the particular platform.
+	  Please refer to the init code for finding the TLB page size.
+
+	  DYNAMIC_MEMSTART is an easy way of implementing pseudo-RELOCATABLE
+	  kernel image, where the only restriction is the page aligned kernel
+	  load address. When this option is enabled, the compile time physical 
+	  address CONFIG_PHYSICAL_START is ignored.
+
+	  This option is overridden by CONFIG_RELOCATABLE
+
 config RELOCATABLE
 	bool "Build a relocatable kernel (EXPERIMENTAL)"
-	depends on EXPERIMENTAL && ADVANCED_OPTIONS && FLATMEM && (FSL_BOOKE || PPC_47x)
+	depends on EXPERIMENTAL && ADVANCED_OPTIONS && FLATMEM && 44x
+	select NONSTATIC_KERNEL
 	help
 	  This builds a kernel image that is capable of running at the
-	  location the kernel is loaded at (some alignment restrictions may
-	  exist).
+	  location the kernel is loaded at, without any alignment restrictions.
+	  This feature is a superset of DYNAMIC_MEMSTART and hence overrides it.
 
 	  One use is for the kexec on panic case where the recovery kernel
 	  must live at a different physical address than the primary
@@ -837,7 +868,11 @@
 	  it has been loaded at and the compile time physical addresses
 	  CONFIG_PHYSICAL_START is ignored.  However CONFIG_PHYSICAL_START
 	  setting can still be useful to bootwrappers that need to know the
-	  load location of the kernel (eg. u-boot/mkimage).
+	  load address of the kernel (eg. u-boot/mkimage).
+
+config RELOCATABLE_PPC32
+	def_bool y
+	depends on PPC32 && RELOCATABLE
 
 config PAGE_OFFSET_BOOL
 	bool "Set custom page offset address"
@@ -867,7 +902,7 @@
 config KERNEL_START
 	hex "Virtual address of kernel base" if KERNEL_START_BOOL
 	default PAGE_OFFSET if PAGE_OFFSET_BOOL
-	default "0xc2000000" if CRASH_DUMP && !RELOCATABLE
+	default "0xc2000000" if CRASH_DUMP && !NONSTATIC_KERNEL
 	default "0xc0000000"
 
 config PHYSICAL_START_BOOL
@@ -880,7 +915,7 @@
 
 config PHYSICAL_START
 	hex "Physical address where the kernel is loaded" if PHYSICAL_START_BOOL
-	default "0x02000000" if PPC_STD_MMU && CRASH_DUMP && !RELOCATABLE
+	default "0x02000000" if PPC_STD_MMU && CRASH_DUMP && !NONSTATIC_KERNEL
 	default "0x00000000"
 
 config PHYSICAL_ALIGN
@@ -926,6 +961,7 @@
 if PPC64
 config RELOCATABLE
 	bool "Build a relocatable kernel"
+	select NONSTATIC_KERNEL
 	help
 	  This builds a kernel image that is capable of running anywhere
 	  in the RMA (real memory area) at any 16k-aligned base address.
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index 1b8a9c9..4ccb2a0 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -336,4 +336,16 @@
 	  platform probing is done, all platforms selected must
 	  share the same address.
 
+config STRICT_DEVMEM
+	def_bool y
+	prompt "Filter access to /dev/mem"
+	help
+	  This option restricts access to /dev/mem.  If this option is
+	  disabled, you allow userspace access to all memory, including
+	  kernel and userspace memory. Accidental memory access is likely
+	  to be disastrous.
+	  Memory access is required for experts who want to debug the kernel.
+
+	  If you are unsure, say Y.
+
 endmenu
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 70ba0c0..b8b105c 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -63,9 +63,9 @@
 override AR	:= GNUTARGET=elf$(CONFIG_WORD_SIZE)-powerpc $(AR)
 endif
 
-LDFLAGS_vmlinux-yy := -Bstatic
-LDFLAGS_vmlinux-$(CONFIG_PPC64)$(CONFIG_RELOCATABLE) := -pie
-LDFLAGS_vmlinux	:= $(LDFLAGS_vmlinux-yy)
+LDFLAGS_vmlinux-y := -Bstatic
+LDFLAGS_vmlinux-$(CONFIG_RELOCATABLE) := -pie
+LDFLAGS_vmlinux	:= $(LDFLAGS_vmlinux-y)
 
 CFLAGS-$(CONFIG_PPC64)	:= -mminimal-toc -mtraceback=no -mcall-aixdesc
 CFLAGS-$(CONFIG_PPC32)	:= -ffixed-r2 -mmultiple
@@ -131,8 +131,7 @@
 endif
 
 cpu-as-$(CONFIG_4xx)		+= -Wa,-m405
-cpu-as-$(CONFIG_6xx)		+= -Wa,-maltivec
-cpu-as-$(CONFIG_POWER4)		+= -Wa,-maltivec
+cpu-as-$(CONFIG_ALTIVEC)	+= -Wa,-maltivec
 cpu-as-$(CONFIG_E500)		+= -Wa,-me500
 cpu-as-$(CONFIG_E200)		+= -Wa,-me200
 
@@ -166,7 +165,7 @@
 
 # With make 3.82 we cannot mix normal and wildcard targets
 BOOT_TARGETS1 := zImage zImage.initrd uImage
-BOOT_TARGETS2 := zImage% dtbImage% treeImage.% cuImage.% simpleImage.%
+BOOT_TARGETS2 := zImage% dtbImage% treeImage.% cuImage.% simpleImage.% uImage.%
 
 PHONY += $(BOOT_TARGETS1) $(BOOT_TARGETS2)
 
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 72ee8c1..15986e7 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -45,6 +45,7 @@
 $(obj)/cuboot-acadia.o: BOOTCFLAGS += -mcpu=405
 $(obj)/treeboot-walnut.o: BOOTCFLAGS += -mcpu=405
 $(obj)/treeboot-iss4xx.o: BOOTCFLAGS += -mcpu=405
+$(obj)/treeboot-currituck.o: BOOTCFLAGS += -mcpu=405
 $(obj)/virtex405-head.o: BOOTAFLAGS += -mcpu=405
 
 
@@ -79,7 +80,8 @@
 		cuboot-warp.c cuboot-85xx-cpm2.c cuboot-yosemite.c simpleboot.c \
 		virtex405-head.S virtex.c redboot-83xx.c cuboot-sam440ep.c \
 		cuboot-acadia.c cuboot-amigaone.c cuboot-kilauea.c \
-		gamecube-head.S gamecube.c wii-head.S wii.c treeboot-iss4xx.c
+		gamecube-head.S gamecube.c wii-head.S wii.c treeboot-iss4xx.c \
+		treeboot-currituck.c
 src-boot := $(src-wlib) $(src-plat) empty.c
 
 src-boot := $(addprefix $(obj)/, $(src-boot))
@@ -199,6 +201,7 @@
 image-$(CONFIG_HOTFOOT)			+= cuImage.hotfoot
 image-$(CONFIG_WALNUT)			+= treeImage.walnut
 image-$(CONFIG_ACADIA)			+= cuImage.acadia
+image-$(CONFIG_OBS600)			+= uImage.obs600
 
 # Board ports in arch/powerpc/platform/44x/Kconfig
 image-$(CONFIG_EBONY)			+= treeImage.ebony cuImage.ebony
@@ -212,6 +215,7 @@
 image-$(CONFIG_YOSEMITE)		+= cuImage.yosemite
 image-$(CONFIG_ISS4xx)			+= treeImage.iss4xx \
 					   treeImage.iss4xx-mpic
+image-$(CONFIG_CURRITUCK)			+= treeImage.currituck
 
 # Board ports in arch/powerpc/platform/8xx/Kconfig
 image-$(CONFIG_MPC86XADS)		+= cuImage.mpc866ads
@@ -316,6 +320,12 @@
 $(obj)/uImage: vmlinux $(wrapperbits)
 	$(call if_changed,wrap,uboot)
 
+$(obj)/uImage.initrd.%: vmlinux $(obj)/%.dtb $(wrapperbits)
+	$(call if_changed,wrap,uboot-$*,,$(obj)/$*.dtb,$(obj)/ramdisk.image.gz)
+
+$(obj)/uImage.%: vmlinux $(obj)/%.dtb $(wrapperbits)
+	$(call if_changed,wrap,uboot-$*,,$(obj)/$*.dtb)
+
 $(obj)/cuImage.initrd.%: vmlinux $(obj)/%.dtb $(wrapperbits)
 	$(call if_changed,wrap,cuboot-$*,,$(obj)/$*.dtb,$(obj)/ramdisk.image.gz)
 
diff --git a/arch/powerpc/boot/dcr.h b/arch/powerpc/boot/dcr.h
index 645a7c9..cc73f7a 100644
--- a/arch/powerpc/boot/dcr.h
+++ b/arch/powerpc/boot/dcr.h
@@ -9,6 +9,12 @@
 	})
 #define mtdcr(rn, val) \
 	asm volatile("mtdcr %0,%1" : : "i"(rn), "r"(val))
+#define mfdcrx(rn) \
+	({	\
+		unsigned long rval; \
+		asm volatile("mfdcrx %0,%1" : "=r"(rval) : "r"(rn)); \
+		rval; \
+	})
 
 /* 440GP/440GX SDRAM controller DCRs */
 #define DCRN_SDRAM0_CFGADDR				0x010
diff --git a/arch/powerpc/boot/div64.S b/arch/powerpc/boot/div64.S
index d271ab5..bbcb8a4 100644
--- a/arch/powerpc/boot/div64.S
+++ b/arch/powerpc/boot/div64.S
@@ -57,3 +57,55 @@
 	stw	r8,4(r3)
 	mr	r3,r6		# return the remainder in r3
 	blr
+
+/*
+ * Extended precision shifts.
+ *
+ * Updated to be valid for shift counts from 0 to 63 inclusive.
+ * -- Gabriel
+ *
+ * R3/R4 has 64 bit value
+ * R5    has shift count
+ * result in R3/R4
+ *
+ *  ashrdi3: arithmetic right shift (sign propagation)	
+ *  lshrdi3: logical right shift
+ *  ashldi3: left shift
+ */
+	.globl __ashrdi3
+__ashrdi3:
+	subfic	r6,r5,32
+	srw	r4,r4,r5	# LSW = count > 31 ? 0 : LSW >> count
+	addi	r7,r5,32	# could be xori, or addi with -32
+	slw	r6,r3,r6	# t1 = count > 31 ? 0 : MSW << (32-count)
+	rlwinm	r8,r7,0,32	# t3 = (count < 32) ? 32 : 0
+	sraw	r7,r3,r7	# t2 = MSW >> (count-32)
+	or	r4,r4,r6	# LSW |= t1
+	slw	r7,r7,r8	# t2 = (count < 32) ? 0 : t2
+	sraw	r3,r3,r5	# MSW = MSW >> count
+	or	r4,r4,r7	# LSW |= t2
+	blr
+
+	.globl __ashldi3
+__ashldi3:
+	subfic	r6,r5,32
+	slw	r3,r3,r5	# MSW = count > 31 ? 0 : MSW << count
+	addi	r7,r5,32	# could be xori, or addi with -32
+	srw	r6,r4,r6	# t1 = count > 31 ? 0 : LSW >> (32-count)
+	slw	r7,r4,r7	# t2 = count < 32 ? 0 : LSW << (count-32)
+	or	r3,r3,r6	# MSW |= t1
+	slw	r4,r4,r5	# LSW = LSW << count
+	or	r3,r3,r7	# MSW |= t2
+	blr
+
+	.globl __lshrdi3
+__lshrdi3:
+	subfic	r6,r5,32
+	srw	r4,r4,r5	# LSW = count > 31 ? 0 : LSW >> count
+	addi	r7,r5,32	# could be xori, or addi with -32
+	slw	r6,r3,r6	# t1 = count > 31 ? 0 : MSW << (32-count)
+	srw	r7,r3,r7	# t2 = count < 32 ? 0 : MSW >> (count-32)
+	or	r4,r4,r6	# LSW |= t1
+	srw	r3,r3,r5	# MSW = MSW >> count
+	or	r4,r4,r7	# LSW |= t2
+	blr
diff --git a/arch/powerpc/boot/dts/asp834x-redboot.dts b/arch/powerpc/boot/dts/asp834x-redboot.dts
index 261d10c..227290d 100644
--- a/arch/powerpc/boot/dts/asp834x-redboot.dts
+++ b/arch/powerpc/boot/dts/asp834x-redboot.dts
@@ -256,7 +256,7 @@
 		serial0: serial@4500 {
 			cell-index = <0>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4500 0x100>;
 			clock-frequency = <400000000>;
 			interrupts = <9 0x8>;
@@ -266,7 +266,7 @@
 		serial1: serial@4600 {
 			cell-index = <1>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4600 0x100>;
 			clock-frequency = <400000000>;
 			interrupts = <10 0x8>;
diff --git a/arch/powerpc/boot/dts/currituck.dts b/arch/powerpc/boot/dts/currituck.dts
new file mode 100644
index 0000000..b801dd0
--- /dev/null
+++ b/arch/powerpc/boot/dts/currituck.dts
@@ -0,0 +1,237 @@
+/*
+ * Device Tree Source for IBM Embedded PPC 476 Platform
+ *
+ * Copyright © 2011 Tony Breeds IBM Corporation
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without
+ * any warranty of any kind, whether express or implied.
+ */
+
+/dts-v1/;
+
+/memreserve/ 0x01f00000 0x00100000;	// spin table
+
+/ {
+	#address-cells = <2>;
+	#size-cells = <2>;
+	model = "ibm,currituck";
+	compatible = "ibm,currituck";
+	dcr-parent = <&{/cpus/cpu@0}>;
+
+	aliases {
+		serial0 = &UART0;
+	};
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		cpu@0 {
+			device_type = "cpu";
+			model = "PowerPC,476";
+			reg = <0>;
+			clock-frequency = <1600000000>; // 1.6 GHz
+			timebase-frequency = <100000000>; // 100Mhz
+			i-cache-line-size = <32>;
+			d-cache-line-size = <32>;
+			i-cache-size = <32768>;
+			d-cache-size = <32768>;
+			dcr-controller;
+			dcr-access-method = "native";
+			status = "ok";
+		};
+		cpu@1 {
+			device_type = "cpu";
+			model = "PowerPC,476";
+			reg = <1>;
+			clock-frequency = <1600000000>; // 1.6 GHz
+			timebase-frequency = <100000000>; // 100Mhz
+			i-cache-line-size = <32>;
+			d-cache-line-size = <32>;
+			i-cache-size = <32768>;
+			d-cache-size = <32768>;
+			dcr-controller;
+			dcr-access-method = "native";
+			status = "disabled";
+			enable-method = "spin-table";
+			cpu-release-addr = <0x0 0x01f00000>;
+		};
+	};
+
+	memory {
+		device_type = "memory";
+		reg = <0x0 0x0 0x0 0x0>; // filled in by zImage
+	};
+
+	MPIC: interrupt-controller {
+		compatible = "chrp,open-pic";
+		interrupt-controller;
+		dcr-reg = <0xffc00000 0x00040000>;
+		#address-cells = <0>;
+		#size-cells = <0>;
+		#interrupt-cells = <2>;
+
+	};
+
+	plb {
+		compatible = "ibm,plb6";
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+		clock-frequency = <200000000>; // 200Mhz
+
+		POB0: opb {
+			compatible = "ibm,opb-4xx", "ibm,opb";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			/* Wish there was a nicer way of specifying a full
+			 * 32-bit range
+			 */
+			ranges = <0x00000000 0x00000200 0x00000000 0x80000000
+				  0x80000000 0x00000200 0x80000000 0x80000000>;
+			clock-frequency = <100000000>;
+
+			UART0: serial@10000000 {
+				device_type = "serial";
+				compatible = "ns16750", "ns16550";
+				reg = <0x10000000 0x00000008>;
+				virtual-reg = <0xe1000000>;
+				clock-frequency = <1851851>; // PCIe refclk/MCGC0_CTL[UART]
+				current-speed = <115200>;
+				interrupt-parent = <&MPIC>;
+				interrupts = <34 2>;
+			};
+
+			IIC0: i2c@00000000 {
+				compatible = "ibm,iic-currituck", "ibm,iic";
+				reg = <0x0 0x00000014>;
+				interrupt-parent = <&MPIC>;
+				interrupts = <79 2>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+                                rtc@68 {
+                                        compatible = "stm,m41t80", "m41st85";
+                                        reg = <0x68>;
+                                };
+			};
+		};
+
+		PCIE0: pciex@10100000000 {		// 4xGBIF1
+			device_type = "pci";
+			#interrupt-cells = <1>;
+			#size-cells = <2>;
+			#address-cells = <3>;
+			compatible = "ibm,plb-pciex-476fpe", "ibm,plb-pciex";
+			primary;
+			port = <0x0>; /* port number */
+			reg = <0x00000101 0x00000000 0x0 0x10000000		/* Config space access */
+			       0x00000100 0x00000000 0x0 0x00001000>;	/* UTL Registers space access */
+			dcr-reg = <0x80 0x20>;
+
+//                                pci_space  < pci_addr          > < cpu_addr          > < size       >
+			ranges = <0x02000000 0x00000000 0x80000000 0x00000110 0x80000000 0x0 0x80000000
+			          0x01000000 0x0        0x0        0x00000140 0x0        0x0 0x00010000>;
+
+			/* Inbound starting at 0 to memsize filled in by zImage */
+			dma-ranges = <0x42000000 0x0 0x0 0x0 0x0 0x0 0x0>;
+
+			/* This drives busses 0 to 0xf */
+			bus-range = <0x0 0xf>;
+
+			/* Legacy interrupts (note the weird polarity, the bridge seems
+			 * to invert PCIe legacy interrupts).
+			 * We are de-swizzling here because the numbers are actually for
+			 * port of the root complex virtual P2P bridge. But I want
+			 * to avoid putting a node for it in the tree, so the numbers
+			 * below are basically de-swizzled numbers.
+			 * The real slot is on idsel 0, so the swizzling is 1:1
+			 */
+			interrupt-map-mask = <0x0 0x0 0x0 0x7>;
+			interrupt-map = <
+				0x0 0x0 0x0 0x1 &MPIC 46 0x2 /* int A */
+				0x0 0x0 0x0 0x2 &MPIC 47 0x2 /* int B */
+				0x0 0x0 0x0 0x3 &MPIC 48 0x2 /* int C */
+				0x0 0x0 0x0 0x4 &MPIC 49 0x2 /* int D */>;
+		};
+
+		PCIE1: pciex@30100000000 {		// 4xGBIF0
+			device_type = "pci";
+			#interrupt-cells = <1>;
+			#size-cells = <2>;
+			#address-cells = <3>;
+			compatible = "ibm,plb-pciex-476fpe", "ibm,plb-pciex";
+			primary;
+			port = <0x1>; /* port number */
+			reg = <0x00000301 0x00000000 0x0 0x10000000		/* Config space access */
+			       0x00000300 0x00000000 0x0 0x00001000>;	/* UTL Registers space access */
+			dcr-reg = <0x60 0x20>;
+
+			ranges = <0x02000000 0x00000000 0x80000000 0x00000310 0x80000000 0x0 0x80000000
+			          0x01000000 0x0        0x0        0x00000340 0x0        0x0 0x00010000>;
+
+			/* Inbound starting at 0 to memsize filled in by zImage */
+			dma-ranges = <0x42000000 0x0 0x0 0x0 0x0 0x0 0x0>;
+
+			/* This drives busses 0 to 0xf */
+			bus-range = <0x0 0xf>;
+
+			/* Legacy interrupts (note the weird polarity, the bridge seems
+			 * to invert PCIe legacy interrupts).
+			 * We are de-swizzling here because the numbers are actually for
+			 * port of the root complex virtual P2P bridge. But I want
+			 * to avoid putting a node for it in the tree, so the numbers
+			 * below are basically de-swizzled numbers.
+			 * The real slot is on idsel 0, so the swizzling is 1:1
+			 */
+			interrupt-map-mask = <0x0 0x0 0x0 0x7>;
+			interrupt-map = <
+				0x0 0x0 0x0 0x1 &MPIC 38 0x2 /* int A */
+				0x0 0x0 0x0 0x2 &MPIC 39 0x2 /* int B */
+				0x0 0x0 0x0 0x3 &MPIC 40 0x2 /* int C */
+				0x0 0x0 0x0 0x4 &MPIC 41 0x2 /* int D */>;
+		};
+
+		PCIE2: pciex@38100000000 {		// 2xGBIF0
+			device_type = "pci";
+			#interrupt-cells = <1>;
+			#size-cells = <2>;
+			#address-cells = <3>;
+			compatible = "ibm,plb-pciex-476fpe", "ibm,plb-pciex";
+			primary;
+			port = <0x2>; /* port number */
+			reg = <0x00000381 0x00000000 0x0 0x10000000		/* Config space access */
+			       0x00000380 0x00000000 0x0 0x00001000>;	/* UTL Registers space access */
+			dcr-reg = <0xA0 0x20>;
+
+			ranges = <0x02000000 0x00000000 0x80000000 0x00000390 0x80000000 0x0 0x80000000
+			          0x01000000 0x0        0x0        0x000003C0 0x0        0x0 0x00010000>;
+
+			/* Inbound starting at 0 to memsize filled in by zImage */
+			dma-ranges = <0x42000000 0x0 0x0 0x0 0x0 0x0 0x0>;
+
+			/* This drives busses 0 to 0xf */
+			bus-range = <0x0 0xf>;
+
+			/* Legacy interrupts (note the weird polarity, the bridge seems
+			 * to invert PCIe legacy interrupts).
+			 * We are de-swizzling here because the numbers are actually for
+			 * port of the root complex virtual P2P bridge. But I want
+			 * to avoid putting a node for it in the tree, so the numbers
+			 * below are basically de-swizzled numbers.
+			 * The real slot is on idsel 0, so the swizzling is 1:1
+			 */
+			interrupt-map-mask = <0x0 0x0 0x0 0x7>;
+			interrupt-map = <
+				0x0 0x0 0x0 0x1 &MPIC 54 0x2 /* int A */
+				0x0 0x0 0x0 0x2 &MPIC 55 0x2 /* int B */
+				0x0 0x0 0x0 0x3 &MPIC 56 0x2 /* int C */
+				0x0 0x0 0x0 0x4 &MPIC 57 0x2 /* int D */>;
+		};
+
+	};
+
+	chosen {
+		linux,stdout-path = &UART0;
+	};
+};
diff --git a/arch/powerpc/boot/dts/fsl/mpc8536si-post.dtsi b/arch/powerpc/boot/dts/fsl/mpc8536si-post.dtsi
new file mode 100644
index 0000000..89af626
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/mpc8536si-post.dtsi
@@ -0,0 +1,248 @@
+/*
+ * MPC8536 Silicon/SoC Device Tree Source (post include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&lbc {
+	#address-cells = <2>;
+	#size-cells = <1>;
+	compatible = "fsl,mpc8536-elbc", "fsl,elbc", "simple-bus";
+	interrupts = <19 2 0 0>;
+};
+
+/* controller at 0x8000 */
+&pci0 {
+	compatible = "fsl,mpc8540-pci";
+	device_type = "pci";
+	interrupts = <24 0x2 0 0>;
+	bus-range = <0 0xff>;
+	#interrupt-cells = <1>;
+	#size-cells = <2>;
+	#address-cells = <3>;
+};
+
+/* controller at 0x9000 */
+&pci1 {
+	compatible = "fsl,mpc8548-pcie";
+	device_type = "pci";
+	#size-cells = <2>;
+	#address-cells = <3>;
+	bus-range = <0 255>;
+	clock-frequency = <33333333>;
+	interrupts = <25 2 0 0>;
+
+	pcie@0 {
+		reg = <0 0 0 0 0>;
+		#interrupt-cells = <1>;
+		#size-cells = <2>;
+		#address-cells = <3>;
+		device_type = "pci";
+		interrupts = <25 2 0 0>;
+		interrupt-map-mask = <0xf800 0 0 7>;
+
+		interrupt-map = <
+			/* IDSEL 0x0 */
+			0000 0x0 0x0 0x1 &mpic 0x4 0x1 0x0 0x0
+			0000 0x0 0x0 0x2 &mpic 0x5 0x1 0x0 0x0
+			0000 0x0 0x0 0x3 &mpic 0x6 0x1 0x0 0x0
+			0000 0x0 0x0 0x4 &mpic 0x7 0x1 0x0 0x0
+			>;
+	};
+};
+
+/* controller at 0xa000 */
+&pci2 {
+	compatible = "fsl,mpc8548-pcie";
+	device_type = "pci";
+	#size-cells = <2>;
+	#address-cells = <3>;
+	bus-range = <0 255>;
+	clock-frequency = <33333333>;
+	interrupts = <26 2 0 0>;
+
+	pcie@0 {
+		reg = <0 0 0 0 0>;
+		#interrupt-cells = <1>;
+		#size-cells = <2>;
+		#address-cells = <3>;
+		device_type = "pci";
+		interrupts = <26 2 0 0>;
+		interrupt-map-mask = <0xf800 0 0 7>;
+		interrupt-map = <
+			/* IDSEL 0x0 */
+			0000 0x0 0x0 0x1 &mpic 0x0 0x1 0x0 0x0
+			0000 0x0 0x0 0x2 &mpic 0x1 0x1 0x0 0x0
+			0000 0x0 0x0 0x3 &mpic 0x2 0x1 0x0 0x0
+			0000 0x0 0x0 0x4 &mpic 0x3 0x1 0x0 0x0
+			>;
+	};
+};
+
+/* controller at 0xb000 */
+&pci3 {
+	compatible = "fsl,mpc8548-pcie";
+	device_type = "pci";
+	#size-cells = <2>;
+	#address-cells = <3>;
+	bus-range = <0 255>;
+	clock-frequency = <33333333>;
+	interrupts = <27 2 0 0>;
+
+	pcie@0 {
+		reg = <0 0 0 0 0>;
+		#interrupt-cells = <1>;
+		#size-cells = <2>;
+		#address-cells = <3>;
+		device_type = "pci";
+		interrupts = <27 2 0 0>;
+		interrupt-map-mask = <0xf800 0 0 7>;
+		interrupt-map = <
+			/* IDSEL 0x0 */
+			0000 0x0 0x0 0x1 &mpic 0x8 0x1 0x0 0x0
+			0000 0x0 0x0 0x2 &mpic 0x9 0x1 0x0 0x0
+			0000 0x0 0x0 0x3 &mpic 0xa 0x1 0x0 0x0
+			0000 0x0 0x0 0x4 &mpic 0xb 0x1 0x0 0x0
+			>;
+	};
+};
+&soc {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	device_type = "soc";
+	compatible = "fsl,mpc8536-immr", "simple-bus";
+	bus-frequency = <0>;		// Filled out by uboot.
+
+	ecm-law@0 {
+		compatible = "fsl,ecm-law";
+		reg = <0x0 0x1000>;
+		fsl,num-laws = <12>;
+	};
+
+	ecm@1000 {
+		compatible = "fsl,mpc8536-ecm", "fsl,ecm";
+		reg = <0x1000 0x1000>;
+		interrupts = <17 2 0 0>;
+	};
+
+	memory-controller@2000 {
+		compatible = "fsl,mpc8536-memory-controller";
+		reg = <0x2000 0x1000>;
+		interrupts = <18 2 0 0>;
+	};
+
+/include/ "pq3-i2c-0.dtsi"
+/include/ "pq3-i2c-1.dtsi"
+/include/ "pq3-duart-0.dtsi"
+
+/include/ "pq3-espi-0.dtsi"
+	spi@7000 {
+		fsl,espi-num-chipselects = <4>;
+	};
+
+/include/ "pq3-gpio-0.dtsi"
+
+	/* mark compat w/8572 to get some erratum treatment */
+	gpio-controller@f000 {
+		compatible = "fsl,mpc8572-gpio", "fsl,pq3-gpio";
+	};
+
+	sata@18000 {
+		compatible = "fsl,mpc8536-sata", "fsl,pq-sata";
+		reg = <0x18000 0x1000>;
+		cell-index = <1>;
+		interrupts = <74 0x2 0 0>;
+	};
+
+	sata@19000 {
+		compatible = "fsl,mpc8536-sata", "fsl,pq-sata";
+		reg = <0x19000 0x1000>;
+		cell-index = <2>;
+		interrupts = <41 0x2 0 0>;
+	};
+
+	L2: l2-cache-controller@20000 {
+		compatible = "fsl,mpc8536-l2-cache-controller";
+		reg = <0x20000 0x1000>;
+		cache-line-size = <32>;	// 32 bytes
+		cache-size = <0x80000>; // L2, 512K
+		interrupts = <16 2 0 0>;
+	};
+
+/include/ "pq3-dma-0.dtsi"
+/include/ "pq3-etsec1-0.dtsi"
+/include/ "pq3-etsec1-timer-0.dtsi"
+
+	usb@22000 {
+		compatible = "fsl,mpc8536-usb2-mph", "fsl-usb2-mph";
+		reg = <0x22000 0x1000>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		interrupts = <28 0x2 0 0>;
+	};
+
+	usb@23000 {
+		compatible = "fsl,mpc8536-usb2-mph", "fsl-usb2-mph";
+		reg = <0x23000 0x1000>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		interrupts = <46 0x2 0 0>;
+	};
+
+	ptp_clock@24e00 {
+		interrupts = <68 2 0 0 69 2 0 0 70 2 0 0 71 2 0 0>;
+	};
+
+/include/ "pq3-etsec1-2.dtsi"
+
+	ethernet@26000 {
+		cell-index = <1>;
+	};
+
+	usb@2b000 {
+		compatible = "fsl,mpc8536-usb2-dr", "fsl-usb2-dr";
+		reg = <0x2b000 0x1000>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		interrupts = <60 0x2 0 0>;
+	};
+
+/include/ "pq3-esdhc-0.dtsi"
+/include/ "pq3-sec3.0-0.dtsi"
+/include/ "pq3-mpic.dtsi"
+/include/ "pq3-mpic-timer-B.dtsi"
+
+	global-utilities@e0000 {
+		compatible = "fsl,mpc8536-guts";
+		reg = <0xe0000 0x1000>;
+		fsl,has-rstcr;
+	};
+};
diff --git a/arch/powerpc/boot/dts/fsl/mpc8536si-pre.dtsi b/arch/powerpc/boot/dts/fsl/mpc8536si-pre.dtsi
new file mode 100644
index 0000000..7de45a7
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/mpc8536si-pre.dtsi
@@ -0,0 +1,63 @@
+/*
+ * MPC8536 Silicon/SoC Device Tree Source (pre include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/dts-v1/;
+/ {
+	compatible = "fsl,MPC8536";
+	#address-cells = <2>;
+	#size-cells = <2>;
+	interrupt-parent = <&mpic>;
+
+	aliases {
+		serial0 = &serial0;
+		serial1 = &serial1;
+		ethernet0 = &enet0;
+		ethernet1 = &enet2;
+		pci0 = &pci0;
+		pci1 = &pci1;
+		pci2 = &pci2;
+		pci3 = &pci3;
+	};
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		PowerPC,8536@0 {
+			device_type = "cpu";
+			reg = <0x0>;
+			next-level-cache = <&L2>;
+		};
+	};
+};
diff --git a/arch/powerpc/boot/dts/fsl/mpc8544si-post.dtsi b/arch/powerpc/boot/dts/fsl/mpc8544si-post.dtsi
new file mode 100644
index 0000000..b68eb11
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/mpc8544si-post.dtsi
@@ -0,0 +1,191 @@
+/*
+ * MPC8544 Silicon/SoC Device Tree Source (post include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&lbc {
+	#address-cells = <2>;
+	#size-cells = <1>;
+	compatible = "fsl,mpc8544-lbc", "fsl,pq3-localbus", "simple-bus";
+	interrupts = <19 2 0 0>;
+};
+
+/* controller at 0x8000 */
+&pci0 {
+	compatible = "fsl,mpc8540-pci";
+	device_type = "pci";
+	interrupts = <24 0x2 0 0>;
+	bus-range = <0 0xff>;
+	#interrupt-cells = <1>;
+	#size-cells = <2>;
+	#address-cells = <3>;
+};
+
+/* controller at 0x9000 */
+&pci1 {
+	compatible = "fsl,mpc8548-pcie";
+	device_type = "pci";
+	#size-cells = <2>;
+	#address-cells = <3>;
+	bus-range = <0 255>;
+	clock-frequency = <33333333>;
+	interrupts = <25 2 0 0>;
+
+	pcie@0 {
+		reg = <0 0 0 0 0>;
+		#interrupt-cells = <1>;
+		#size-cells = <2>;
+		#address-cells = <3>;
+		device_type = "pci";
+		interrupts = <25 2 0 0>;
+		interrupt-map-mask = <0xf800 0 0 7>;
+
+		interrupt-map = <
+			/* IDSEL 0x0 */
+			0000 0x0 0x0 0x1 &mpic 0x4 0x1 0x0 0x0
+			0000 0x0 0x0 0x2 &mpic 0x5 0x1 0x0 0x0
+			0000 0x0 0x0 0x3 &mpic 0x6 0x1 0x0 0x0
+			0000 0x0 0x0 0x4 &mpic 0x7 0x1 0x0 0x0
+			>;
+	};
+};
+
+/* controller at 0xa000 */
+&pci2 {
+	compatible = "fsl,mpc8548-pcie";
+	device_type = "pci";
+	#size-cells = <2>;
+	#address-cells = <3>;
+	bus-range = <0 255>;
+	clock-frequency = <33333333>;
+	interrupts = <26 2 0 0>;
+
+	pcie@0 {
+		reg = <0 0 0 0 0>;
+		#interrupt-cells = <1>;
+		#size-cells = <2>;
+		#address-cells = <3>;
+		device_type = "pci";
+		interrupts = <26 2 0 0>;
+		interrupt-map-mask = <0xf800 0 0 7>;
+		interrupt-map = <
+			/* IDSEL 0x0 */
+			0000 0x0 0x0 0x1 &mpic 0x0 0x1 0x0 0x0
+			0000 0x0 0x0 0x2 &mpic 0x1 0x1 0x0 0x0
+			0000 0x0 0x0 0x3 &mpic 0x2 0x1 0x0 0x0
+			0000 0x0 0x0 0x4 &mpic 0x3 0x1 0x0 0x0
+			>;
+	};
+};
+
+/* controller at 0xb000 */
+&pci3 {
+	compatible = "fsl,mpc8548-pcie";
+	device_type = "pci";
+	#size-cells = <2>;
+	#address-cells = <3>;
+	bus-range = <0 255>;
+	clock-frequency = <33333333>;
+	interrupts = <27 2 0 0>;
+
+	pcie@0 {
+		reg = <0 0 0 0 0>;
+		#interrupt-cells = <1>;
+		#size-cells = <2>;
+		#address-cells = <3>;
+		device_type = "pci";
+		interrupts = <27 2 0 0>;
+		interrupt-map-mask = <0xf800 0 0 7>;
+		interrupt-map = <
+			/* IDSEL 0x0 */
+			0000 0x0 0x0 0x1 &mpic 0x8 0x1 0x0 0x0
+			0000 0x0 0x0 0x2 &mpic 0x9 0x1 0x0 0x0
+			0000 0x0 0x0 0x3 &mpic 0xa 0x1 0x0 0x0
+			0000 0x0 0x0 0x4 &mpic 0xb 0x1 0x0 0x0
+			>;
+	};
+};
+
+&soc {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	device_type = "soc";
+	compatible = "fsl,mpc8544-immr", "simple-bus";
+	bus-frequency = <0>;		// Filled out by uboot.
+
+	ecm-law@0 {
+		compatible = "fsl,ecm-law";
+		reg = <0x0 0x1000>;
+		fsl,num-laws = <10>;
+	};
+
+	ecm@1000 {
+		compatible = "fsl,mpc8544-ecm", "fsl,ecm";
+		reg = <0x1000 0x1000>;
+		interrupts = <17 2 0 0>;
+	};
+
+	memory-controller@2000 {
+		compatible = "fsl,mpc8544-memory-controller";
+		reg = <0x2000 0x1000>;
+		interrupts = <18 2 0 0>;
+	};
+
+/include/ "pq3-i2c-0.dtsi"
+/include/ "pq3-i2c-1.dtsi"
+/include/ "pq3-duart-0.dtsi"
+
+	L2: l2-cache-controller@20000 {
+		compatible = "fsl,mpc8544-l2-cache-controller";
+		reg = <0x20000 0x1000>;
+		cache-line-size = <32>;	// 32 bytes
+		cache-size = <0x40000>; // L2, 256K
+		interrupts = <16 2 0 0>;
+	};
+
+/include/ "pq3-dma-0.dtsi"
+/include/ "pq3-etsec1-0.dtsi"
+/include/ "pq3-etsec1-2.dtsi"
+
+	ethernet@26000 {
+		cell-index = <1>;
+	};
+
+/include/ "pq3-sec2.1-0.dtsi"
+/include/ "pq3-mpic.dtsi"
+
+	global-utilities@e0000 {
+		compatible = "fsl,mpc8544-guts";
+		reg = <0xe0000 0x1000>;
+		fsl,has-rstcr;
+	};
+};
diff --git a/arch/powerpc/boot/dts/fsl/mpc8544si-pre.dtsi b/arch/powerpc/boot/dts/fsl/mpc8544si-pre.dtsi
new file mode 100644
index 0000000..8777f92
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/mpc8544si-pre.dtsi
@@ -0,0 +1,63 @@
+/*
+ * MPC8544 Silicon/SoC Device Tree Source (pre include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/dts-v1/;
+/ {
+	compatible = "fsl,MPC8544";
+	#address-cells = <2>;
+	#size-cells = <2>;
+	interrupt-parent = <&mpic>;
+
+	aliases {
+		serial0 = &serial0;
+		serial1 = &serial1;
+		ethernet0 = &enet0;
+		ethernet1 = &enet2;
+		pci0 = &pci0;
+		pci1 = &pci1;
+		pci2 = &pci2;
+		pci3 = &pci3;
+	};
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		PowerPC,8544@0 {
+			device_type = "cpu";
+			reg = <0x0>;
+			next-level-cache = <&L2>;
+		};
+	};
+};
diff --git a/arch/powerpc/boot/dts/fsl/mpc8548si-post.dtsi b/arch/powerpc/boot/dts/fsl/mpc8548si-post.dtsi
new file mode 100644
index 0000000..9d8023a
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/mpc8548si-post.dtsi
@@ -0,0 +1,143 @@
+/*
+ * MPC8548 Silicon/SoC Device Tree Source (post include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&lbc {
+	#address-cells = <2>;
+	#size-cells = <1>;
+	compatible = "fsl,mpc8548-lbc", "fsl,pq3-localbus", "simple-bus";
+	interrupts = <19 2 0 0>;
+};
+
+/* controller at 0x8000 */
+&pci0 {
+	compatible = "fsl,mpc8540-pcix", "fsl,mpc8540-pci";
+	device_type = "pci";
+	interrupts = <24 0x2 0 0>;
+	bus-range = <0 0xff>;
+	#interrupt-cells = <1>;
+	#size-cells = <2>;
+	#address-cells = <3>;
+};
+
+/* controller at 0x9000 */
+&pci1 {
+	compatible = "fsl,mpc8540-pci";
+	device_type = "pci";
+	interrupts = <25 0x2 0 0>;
+	bus-range = <0 0xff>;
+	#interrupt-cells = <1>;
+	#size-cells = <2>;
+	#address-cells = <3>;
+};
+
+/* controller at 0xa000 */
+&pci2 {
+	compatible = "fsl,mpc8548-pcie";
+	device_type = "pci";
+	#size-cells = <2>;
+	#address-cells = <3>;
+	bus-range = <0 255>;
+	clock-frequency = <33333333>;
+	interrupts = <26 2 0 0>;
+
+	pcie@0 {
+		reg = <0 0 0 0 0>;
+		#interrupt-cells = <1>;
+		#size-cells = <2>;
+		#address-cells = <3>;
+		device_type = "pci";
+		interrupts = <26 2 0 0>;
+		interrupt-map-mask = <0xf800 0 0 7>;
+		interrupt-map = <
+			/* IDSEL 0x0 */
+			0000 0x0 0x0 0x1 &mpic 0x0 0x1 0x0 0x0
+			0000 0x0 0x0 0x2 &mpic 0x1 0x1 0x0 0x0
+			0000 0x0 0x0 0x3 &mpic 0x2 0x1 0x0 0x0
+			0000 0x0 0x0 0x4 &mpic 0x3 0x1 0x0 0x0
+			>;
+	};
+};
+
+&soc {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	device_type = "soc";
+	compatible = "fsl,mpc8548-immr", "simple-bus";
+	bus-frequency = <0>;		// Filled out by uboot.
+
+	ecm-law@0 {
+		compatible = "fsl,ecm-law";
+		reg = <0x0 0x1000>;
+		fsl,num-laws = <10>;
+	};
+
+	ecm@1000 {
+		compatible = "fsl,mpc8548-ecm", "fsl,ecm";
+		reg = <0x1000 0x1000>;
+		interrupts = <17 2 0 0>;
+	};
+
+	memory-controller@2000 {
+		compatible = "fsl,mpc8548-memory-controller";
+		reg = <0x2000 0x1000>;
+		interrupts = <18 2 0 0>;
+	};
+
+/include/ "pq3-i2c-0.dtsi"
+/include/ "pq3-i2c-1.dtsi"
+/include/ "pq3-duart-0.dtsi"
+
+	L2: l2-cache-controller@20000 {
+		compatible = "fsl,mpc8548-l2-cache-controller";
+		reg = <0x20000 0x1000>;
+		cache-line-size = <32>;	// 32 bytes
+		cache-size = <0x80000>; // L2, 512K
+		interrupts = <16 2 0 0>;
+	};
+
+/include/ "pq3-dma-0.dtsi"
+/include/ "pq3-etsec1-0.dtsi"
+/include/ "pq3-etsec1-1.dtsi"
+/include/ "pq3-etsec1-2.dtsi"
+/include/ "pq3-etsec1-3.dtsi"
+
+/include/ "pq3-sec2.1-0.dtsi"
+/include/ "pq3-mpic.dtsi"
+
+	global-utilities@e0000 {
+		compatible = "fsl,mpc8548-guts";
+		reg = <0xe0000 0x1000>;
+		fsl,has-rstcr;
+	};
+};
diff --git a/arch/powerpc/boot/dts/fsl/mpc8548si-pre.dtsi b/arch/powerpc/boot/dts/fsl/mpc8548si-pre.dtsi
new file mode 100644
index 0000000..289f121
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/mpc8548si-pre.dtsi
@@ -0,0 +1,62 @@
+/*
+ * MPC8548 Silicon/SoC Device Tree Source (pre include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/dts-v1/;
+/ {
+	compatible = "fsl,MPC8548";
+	#address-cells = <2>;
+	#size-cells = <2>;
+	interrupt-parent = <&mpic>;
+
+	aliases {
+		serial0 = &serial0;
+		serial1 = &serial1;
+		ethernet0 = &enet0;
+		ethernet1 = &enet2;
+		pci0 = &pci0;
+		pci1 = &pci1;
+		pci2 = &pci2;
+	};
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		PowerPC,8548@0 {
+			device_type = "cpu";
+			reg = <0x0>;
+			next-level-cache = <&L2>;
+		};
+	};
+};
diff --git a/arch/powerpc/boot/dts/fsl/mpc8568si-post.dtsi b/arch/powerpc/boot/dts/fsl/mpc8568si-post.dtsi
new file mode 100644
index 0000000..64e7075
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/mpc8568si-post.dtsi
@@ -0,0 +1,270 @@
+/*
+ * MPC8568 Silicon/SoC Device Tree Source (post include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&lbc {
+	#address-cells = <2>;
+	#size-cells = <1>;
+	compatible = "fsl,mpc8568-localbus", "fsl,pq3-localbus", "simple-bus";
+	interrupts = <19 2 0 0>;
+	sleep = <&pmc 0x08000000>;
+};
+
+/* controller at 0x8000 */
+&pci0 {
+	compatible = "fsl,mpc8540-pci";
+	device_type = "pci";
+	interrupts = <24 0x2 0 0>;
+	bus-range = <0 0xff>;
+	#interrupt-cells = <1>;
+	#size-cells = <2>;
+	#address-cells = <3>;
+	sleep = <&pmc 0x80000000>;
+};
+
+/* controller at 0xa000 */
+&pci1 {
+	compatible = "fsl,mpc8548-pcie";
+	device_type = "pci";
+	#size-cells = <2>;
+	#address-cells = <3>;
+	bus-range = <0 255>;
+	clock-frequency = <33333333>;
+	interrupts = <26 2 0 0>;
+	sleep = <&pmc 0x20000000>;
+
+	pcie@0 {
+		reg = <0 0 0 0 0>;
+		#interrupt-cells = <1>;
+		#size-cells = <2>;
+		#address-cells = <3>;
+		device_type = "pci";
+		interrupts = <26 2 0 0>;
+		interrupt-map-mask = <0xf800 0 0 7>;
+		interrupt-map = <
+			/* IDSEL 0x0 */
+			0000 0x0 0x0 0x1 &mpic 0x0 0x1 0x0 0x0
+			0000 0x0 0x0 0x2 &mpic 0x1 0x1 0x0 0x0
+			0000 0x0 0x0 0x3 &mpic 0x2 0x1 0x0 0x0
+			0000 0x0 0x0 0x4 &mpic 0x3 0x1 0x0 0x0
+			>;
+	};
+};
+
+&rio {
+	compatible = "fsl,srio";
+	interrupts = <48 2 0 0>;
+	#address-cells = <2>;
+	#size-cells = <2>;
+	fsl,srio-rmu-handle = <&rmu>;
+	sleep = <&pmc 0x00080000>;
+	ranges;
+
+	port1 {
+		#address-cells = <2>;
+		#size-cells = <2>;
+		cell-index = <1>;
+	};
+};
+
+&soc {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	device_type = "soc";
+	compatible = "fsl,mpc8568-immr", "simple-bus";
+	bus-frequency = <0>;		// Filled out by uboot.
+
+	ecm-law@0 {
+		compatible = "fsl,ecm-law";
+		reg = <0x0 0x1000>;
+		fsl,num-laws = <10>;
+	};
+
+	ecm@1000 {
+		compatible = "fsl,mpc8568-ecm", "fsl,ecm";
+		reg = <0x1000 0x1000>;
+		interrupts = <17 2 0 0>;
+	};
+
+	memory-controller@2000 {
+		compatible = "fsl,mpc8568-memory-controller";
+		reg = <0x2000 0x1000>;
+		interrupts = <18 2 0 0>;
+	};
+
+	i2c-sleep-nexus {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "simple-bus";
+		sleep = <&pmc 0x00000004>;
+		ranges;
+
+/include/ "pq3-i2c-0.dtsi"
+/include/ "pq3-i2c-1.dtsi"
+
+	};
+
+	duart-sleep-nexus {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "simple-bus";
+		sleep = <&pmc 0x00000002>;
+		ranges;
+
+/include/ "pq3-duart-0.dtsi"
+
+	};
+
+	L2: l2-cache-controller@20000 {
+		compatible = "fsl,mpc8568-l2-cache-controller";
+		reg = <0x20000 0x1000>;
+		cache-line-size = <32>;	// 32 bytes
+		cache-size = <0x80000>; // L2, 512K
+		interrupts = <16 2 0 0>;
+	};
+
+/include/ "pq3-dma-0.dtsi"
+	dma@21300 {
+		sleep = <&pmc 0x00000400>;
+	};
+
+/include/ "pq3-etsec1-0.dtsi"
+	ethernet@24000 {
+		sleep = <&pmc 0x00000080>;
+	};
+
+/include/ "pq3-etsec1-1.dtsi"
+	ethernet@25000 {
+		sleep = <&pmc 0x00000040>;
+	};
+
+	par_io@e0100 {
+		reg = <0xe0100 0x100>;
+		device_type = "par_io";
+	};
+
+/include/ "pq3-sec2.1-0.dtsi"
+	crypto@30000 {
+		sleep = <&pmc 0x01000000>;
+	};
+
+/include/ "pq3-mpic.dtsi"
+/include/ "pq3-rmu-0.dtsi"
+	rmu@d3000 {
+		sleep = <&pmc 0x00040000>;
+	};
+
+	global-utilities@e0000 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "fsl,mpc8568-guts", "fsl,mpc8548-guts";
+		reg = <0xe0000 0x1000>;
+		ranges = <0 0xe0000 0x1000>;
+		fsl,has-rstcr;
+
+		pmc: power@70 {
+			compatible = "fsl,mpc8568-pmc",
+				     "fsl,mpc8548-pmc";
+			reg = <0x70 0x20>;
+		};
+	};
+};
+
+&qe {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	device_type = "qe";
+	compatible = "fsl,qe";
+	sleep = <&pmc 0x00000800>;
+	brg-frequency = <0>;
+	bus-frequency = <396000000>;
+	fsl,qe-num-riscs = <2>;
+	fsl,qe-num-snums = <28>;
+
+	qeic: interrupt-controller@80 {
+		interrupt-controller;
+		compatible = "fsl,qe-ic";
+		#address-cells = <0>;
+		#interrupt-cells = <1>;
+		reg = <0x80 0x80>;
+		interrupts = <46 2 0 0 46 2 0 0>; //high:30 low:30
+		interrupt-parent = <&mpic>;
+	};
+
+	spi@4c0 {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		compatible = "fsl,spi";
+		reg = <0x4c0 0x40>;
+		cell-index = <0>;
+		interrupts = <2>;
+		interrupt-parent = <&qeic>;
+	};
+
+	spi@500 {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		cell-index = <1>;
+		compatible = "fsl,spi";
+		reg = <0x500 0x40>;
+		interrupts = <1>;
+		interrupt-parent = <&qeic>;
+	};
+
+	ucc@2000 {
+		cell-index = <1>;
+		reg = <0x2000 0x200>;
+		interrupts = <32>;
+		interrupt-parent = <&qeic>;
+	};
+
+	ucc@3000 {
+		cell-index = <2>;
+		reg = <0x3000 0x200>;
+		interrupts = <33>;
+		interrupt-parent = <&qeic>;
+	};
+
+	muram@10000 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "fsl,qe-muram", "fsl,cpm-muram";
+		ranges = <0x0 0x10000 0x10000>;
+
+		data-only@0 {
+			compatible = "fsl,qe-muram-data",
+				     "fsl,cpm-muram-data";
+			reg = <0x0 0x10000>;
+		};
+	};
+};
diff --git a/arch/powerpc/boot/dts/fsl/mpc8568si-pre.dtsi b/arch/powerpc/boot/dts/fsl/mpc8568si-pre.dtsi
new file mode 100644
index 0000000..eacd62c
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/mpc8568si-pre.dtsi
@@ -0,0 +1,65 @@
+/*
+ * MPC8568 Silicon/SoC Device Tree Source (pre include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/dts-v1/;
+/ {
+	compatible = "fsl,MPC8568";
+	#address-cells = <2>;
+	#size-cells = <2>;
+	interrupt-parent = <&mpic>;
+
+	aliases {
+		serial0 = &serial0;
+		serial1 = &serial1;
+		ethernet0 = &enet0;
+		ethernet1 = &enet1;
+		ethernet2 = &enet2;
+		ethernet3 = &enet3;
+		pci0 = &pci0;
+		pci1 = &pci1;
+	};
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		PowerPC,8568@0 {
+			device_type = "cpu";
+			reg = <0x0>;
+			next-level-cache = <&L2>;
+			sleep = <&pmc 0x00008000	// core
+				 &pmc 0x00004000>;	// timebase
+		};
+	};
+};
diff --git a/arch/powerpc/boot/dts/fsl/mpc8569si-post.dtsi b/arch/powerpc/boot/dts/fsl/mpc8569si-post.dtsi
new file mode 100644
index 0000000..3e6346a
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/mpc8569si-post.dtsi
@@ -0,0 +1,304 @@
+/*
+ * MPC8569 Silicon/SoC Device Tree Source (post include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&lbc {
+	#address-cells = <2>;
+	#size-cells = <1>;
+	compatible = "fsl,mpc8569-elbc", "fsl,elbc", "simple-bus";
+	interrupts = <19 2 0 0>;
+	sleep = <&pmc 0x08000000>;
+};
+
+/* controller at 0xa000 */
+&pci1 {
+	compatible = "fsl,mpc8548-pcie";
+	device_type = "pci";
+	#size-cells = <2>;
+	#address-cells = <3>;
+	bus-range = <0 255>;
+	clock-frequency = <33333333>;
+	interrupts = <26 2 0 0>;
+	sleep = <&pmc 0x20000000>;
+
+	pcie@0 {
+		reg = <0 0 0 0 0>;
+		#interrupt-cells = <1>;
+		#size-cells = <2>;
+		#address-cells = <3>;
+		device_type = "pci";
+		interrupts = <26 2 0 0>;
+		interrupt-map-mask = <0xf800 0 0 7>;
+		interrupt-map = <
+			/* IDSEL 0x0 */
+			0000 0x0 0x0 0x1 &mpic 0x0 0x1 0x0 0x0
+			0000 0x0 0x0 0x2 &mpic 0x1 0x1 0x0 0x0
+			0000 0x0 0x0 0x3 &mpic 0x2 0x1 0x0 0x0
+			0000 0x0 0x0 0x4 &mpic 0x3 0x1 0x0 0x0
+			>;
+	};
+};
+
+&rio {
+	compatible = "fsl,srio";
+	interrupts = <48 2 0 0>;
+	#address-cells = <2>;
+	#size-cells = <2>;
+	fsl,srio-rmu-handle = <&rmu>;
+	sleep = <&pmc 0x00080000>;
+	ranges;
+
+	port1 {
+		#address-cells = <2>;
+		#size-cells = <2>;
+		cell-index = <1>;
+	};
+
+	port2 {
+		#address-cells = <2>;
+		#size-cells = <2>;
+		cell-index = <2>;
+	};
+};
+
+&soc {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	device_type = "soc";
+	compatible = "fsl,mpc8569-immr", "simple-bus";
+	bus-frequency = <0>;		// Filled out by uboot.
+
+	ecm-law@0 {
+		compatible = "fsl,ecm-law";
+		reg = <0x0 0x1000>;
+		fsl,num-laws = <10>;
+	};
+
+	ecm@1000 {
+		compatible = "fsl,mpc8569-ecm", "fsl,ecm";
+		reg = <0x1000 0x1000>;
+		interrupts = <17 2 0 0>;
+	};
+
+	memory-controller@2000 {
+		compatible = "fsl,mpc8569-memory-controller";
+		reg = <0x2000 0x1000>;
+		interrupts = <18 2 0 0>;
+	};
+
+	i2c-sleep-nexus {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "simple-bus";
+		sleep = <&pmc 0x00000004>;
+		ranges;
+
+/include/ "pq3-i2c-0.dtsi"
+/include/ "pq3-i2c-1.dtsi"
+
+	};
+
+	duart-sleep-nexus {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "simple-bus";
+		sleep = <&pmc 0x00000002>;
+		ranges;
+
+/include/ "pq3-duart-0.dtsi"
+
+	};
+
+	L2: l2-cache-controller@20000 {
+		compatible = "fsl,mpc8569-l2-cache-controller";
+		reg = <0x20000 0x1000>;
+		cache-line-size = <32>;	// 32 bytes
+		cache-size = <0x80000>; // L2, 512K
+		interrupts = <16 2 0 0>;
+	};
+
+/include/ "pq3-dma-0.dtsi"
+/include/ "pq3-esdhc-0.dtsi"
+	sdhc@2e000 {
+		sleep = <&pmc 0x00200000>;
+	};
+
+	par_io@e0100 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		reg = <0xe0100 0x100>;
+		ranges = <0x0 0xe0100 0x100>;
+		device_type = "par_io";
+	};
+
+/include/ "pq3-sec3.1-0.dtsi"
+	crypto@30000 {
+		sleep = <&pmc 0x01000000>;
+	};
+
+/include/ "pq3-mpic.dtsi"
+/include/ "pq3-rmu-0.dtsi"
+	rmu@d3000 {
+		sleep = <&pmc 0x00040000>;
+	};
+
+	global-utilities@e0000 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "fsl,mpc8569-guts", "fsl,mpc8548-guts";
+		reg = <0xe0000 0x1000>;
+		ranges = <0 0xe0000 0x1000>;
+		fsl,has-rstcr;
+
+		pmc: power@70 {
+			compatible = "fsl,mpc8569-pmc",
+				     "fsl,mpc8548-pmc";
+			reg = <0x70 0x20>;
+		};
+	};
+};
+
+&qe {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	device_type = "qe";
+	compatible = "fsl,qe";
+	sleep = <&pmc 0x00000800>;
+	brg-frequency = <0>;
+	bus-frequency = <0>;
+	fsl,qe-num-riscs = <4>;
+	fsl,qe-num-snums = <46>;
+
+	qeic: interrupt-controller@80 {
+		interrupt-controller;
+		compatible = "fsl,qe-ic";
+		#address-cells = <0>;
+		#interrupt-cells = <1>;
+		reg = <0x80 0x80>;
+		interrupts = <46 2 0 0 46 2 0 0>; //high:30 low:30
+		interrupt-parent = <&mpic>;
+	};
+
+	timer@440 {
+		compatible = "fsl,mpc8569-qe-gtm",
+			     "fsl,qe-gtm", "fsl,gtm";
+		reg = <0x440 0x40>;
+		interrupts = <12 13 14 15>;
+		interrupt-parent = <&qeic>;
+		/* Filled in by U-Boot */
+		clock-frequency = <0>;
+	};
+
+	spi@4c0 {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		compatible = "fsl,mpc8569-qe-spi", "fsl,spi";
+		reg = <0x4c0 0x40>;
+		cell-index = <0>;
+		interrupts = <2>;
+		interrupt-parent = <&qeic>;
+	};
+
+	spi@500 {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		cell-index = <1>;
+		compatible = "fsl,spi";
+		reg = <0x500 0x40>;
+		interrupts = <1>;
+		interrupt-parent = <&qeic>;
+	};
+
+	usb@6c0 {
+		compatible = "fsl,mpc8569-qe-usb",
+			     "fsl,mpc8323-qe-usb";
+		reg = <0x6c0 0x40 0x8b00 0x100>;
+		interrupts = <11>;
+		interrupt-parent = <&qeic>;
+	};
+
+	ucc@2000 {
+		cell-index = <1>;
+		reg = <0x2000 0x200>;
+		interrupts = <32>;
+		interrupt-parent = <&qeic>;
+	};
+
+	ucc@2200 {
+		cell-index = <3>;
+		reg = <0x2200 0x200>;
+		interrupts = <34>;
+		interrupt-parent = <&qeic>;
+	};
+
+	ucc@3000 {
+		cell-index = <2>;
+		reg = <0x3000 0x200>;
+		interrupts = <33>;
+		interrupt-parent = <&qeic>;
+	};
+
+	ucc@3200 {
+		cell-index = <4>;
+		reg = <0x3200 0x200>;
+		interrupts = <35>;
+		interrupt-parent = <&qeic>;
+	};
+
+	ucc@3400 {
+		cell-index = <6>;
+		reg = <0x3400 0x200>;
+		interrupts = <41>;
+		interrupt-parent = <&qeic>;
+	};
+
+	ucc@3600 {
+		cell-index = <8>;
+		reg = <0x3600 0x200>;
+		interrupts = <43>;
+		interrupt-parent = <&qeic>;
+	};
+
+	muram@10000 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "fsl,qe-muram", "fsl,cpm-muram";
+		ranges = <0x0 0x10000 0x20000>;
+
+		data-only@0 {
+			compatible = "fsl,qe-muram-data",
+				     "fsl,cpm-muram-data";
+			reg = <0x0 0x20000>;
+		};
+	};
+};
diff --git a/arch/powerpc/boot/dts/fsl/mpc8569si-pre.dtsi b/arch/powerpc/boot/dts/fsl/mpc8569si-pre.dtsi
new file mode 100644
index 0000000..b07064d
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/mpc8569si-pre.dtsi
@@ -0,0 +1,64 @@
+/*
+ * MPC8569 Silicon/SoC Device Tree Source (pre include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/dts-v1/;
+/ {
+	compatible = "fsl,MPC8569";
+	#address-cells = <2>;
+	#size-cells = <2>;
+	interrupt-parent = <&mpic>;
+
+	aliases {
+		serial0 = &serial0;
+		serial1 = &serial1;
+		ethernet0 = &enet0;
+		ethernet1 = &enet1;
+		ethernet2 = &enet2;
+		ethernet3 = &enet3;
+		pci1 = &pci1;
+	};
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		PowerPC,8569@0 {
+			device_type = "cpu";
+			reg = <0x0>;
+			next-level-cache = <&L2>;
+			sleep = <&pmc 0x00008000	// core
+				 &pmc 0x00004000>;	// timebase
+		};
+	};
+};
diff --git a/arch/powerpc/boot/dts/fsl/mpc8572si-post.dtsi b/arch/powerpc/boot/dts/fsl/mpc8572si-post.dtsi
new file mode 100644
index 0000000..d44e25a
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/mpc8572si-post.dtsi
@@ -0,0 +1,196 @@
+/*
+ * MPC8572 Silicon/SoC Device Tree Source (post include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&lbc {
+	#address-cells = <2>;
+	#size-cells = <1>;
+	compatible = "fsl,mpc8572-elbc", "fsl,elbc", "simple-bus";
+	interrupts = <19 2 0 0>;
+};
+
+/* controller at 0x8000 */
+&pci0 {
+	compatible = "fsl,mpc8548-pcie";
+	device_type = "pci";
+	#size-cells = <2>;
+	#address-cells = <3>;
+	bus-range = <0 255>;
+	clock-frequency = <33333333>;
+	interrupts = <24 2 0 0>;
+
+	pcie@0 {
+		reg = <0 0 0 0 0>;
+		#interrupt-cells = <1>;
+		#size-cells = <2>;
+		#address-cells = <3>;
+		device_type = "pci";
+		interrupts = <24 2 0 0>;
+		interrupt-map-mask = <0xf800 0 0 7>;
+
+		interrupt-map = <
+			/* IDSEL 0x0 */
+			0000 0x0 0x0 0x1 &mpic 0x8 0x1 0x0 0x0
+			0000 0x0 0x0 0x2 &mpic 0x9 0x1 0x0 0x0
+			0000 0x0 0x0 0x3 &mpic 0xa 0x1 0x0 0x0
+			0000 0x0 0x0 0x4 &mpic 0xb 0x1 0x0 0x0
+			>;
+	};
+};
+
+/* controller at 0x9000 */
+&pci1 {
+	compatible = "fsl,mpc8548-pcie";
+	device_type = "pci";
+	#size-cells = <2>;
+	#address-cells = <3>;
+	bus-range = <0 255>;
+	clock-frequency = <33333333>;
+	interrupts = <25 2 0 0>;
+
+	pcie@0 {
+		reg = <0 0 0 0 0>;
+		#interrupt-cells = <1>;
+		#size-cells = <2>;
+		#address-cells = <3>;
+		device_type = "pci";
+		interrupts = <25 2 0 0>;
+		interrupt-map-mask = <0xf800 0 0 7>;
+
+		interrupt-map = <
+			/* IDSEL 0x0 */
+			0000 0x0 0x0 0x1 &mpic 0x4 0x1 0x0 0x0
+			0000 0x0 0x0 0x2 &mpic 0x5 0x1 0x0 0x0
+			0000 0x0 0x0 0x3 &mpic 0x6 0x1 0x0 0x0
+			0000 0x0 0x0 0x4 &mpic 0x7 0x1 0x0 0x0
+			>;
+	};
+};
+
+/* controller at 0xa000 */
+&pci2 {
+	compatible = "fsl,mpc8548-pcie";
+	device_type = "pci";
+	#size-cells = <2>;
+	#address-cells = <3>;
+	bus-range = <0 255>;
+	clock-frequency = <33333333>;
+	interrupts = <26 2 0 0>;
+
+	pcie@0 {
+		reg = <0 0 0 0 0>;
+		#interrupt-cells = <1>;
+		#size-cells = <2>;
+		#address-cells = <3>;
+		device_type = "pci";
+		interrupts = <26 2 0 0>;
+		interrupt-map-mask = <0xf800 0 0 7>;
+		interrupt-map = <
+			/* IDSEL 0x0 */
+			0000 0x0 0x0 0x1 &mpic 0x0 0x1 0x0 0x0
+			0000 0x0 0x0 0x2 &mpic 0x1 0x1 0x0 0x0
+			0000 0x0 0x0 0x3 &mpic 0x2 0x1 0x0 0x0
+			0000 0x0 0x0 0x4 &mpic 0x3 0x1 0x0 0x0
+			>;
+	};
+};
+
+&soc {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	device_type = "soc";
+	compatible = "fsl,mpc8572-immr", "simple-bus";
+	bus-frequency = <0>;		// Filled out by uboot.
+
+	ecm-law@0 {
+		compatible = "fsl,ecm-law";
+		reg = <0x0 0x1000>;
+		fsl,num-laws = <12>;
+	};
+
+	ecm@1000 {
+		compatible = "fsl,mpc8572-ecm", "fsl,ecm";
+		reg = <0x1000 0x1000>;
+		interrupts = <17 2 0 0>;
+	};
+
+	memory-controller@2000 {
+		compatible = "fsl,mpc8572-memory-controller";
+		reg = <0x2000 0x1000>;
+		interrupts = <18 2 0 0>;
+	};
+
+	memory-controller@6000 {
+		compatible = "fsl,mpc8572-memory-controller";
+		reg = <0x6000 0x1000>;
+		interrupts = <18 2 0 0>;
+	};
+
+/include/ "pq3-i2c-0.dtsi"
+/include/ "pq3-i2c-1.dtsi"
+/include/ "pq3-duart-0.dtsi"
+/include/ "pq3-dma-1.dtsi"
+/include/ "pq3-gpio-0.dtsi"
+	gpio-controller@f000 {
+		compatible = "fsl,mpc8572-gpio", "fsl,pq3-gpio";
+	};
+
+	L2: l2-cache-controller@20000 {
+		compatible = "fsl,mpc8572-l2-cache-controller";
+		reg = <0x20000 0x1000>;
+		cache-line-size = <32>;	// 32 bytes
+		cache-size = <0x100000>; // L2,1M
+		interrupts = <16 2 0 0>;
+	};
+
+/include/ "pq3-dma-0.dtsi"
+/include/ "pq3-etsec1-0.dtsi"
+/include/ "pq3-etsec1-timer-0.dtsi"
+
+	ptp_clock@24e00 {
+		interrupts = <68 2 0 0 69 2 0 0 70 2 0 0 71 2 0 0>;
+	};
+
+/include/ "pq3-etsec1-1.dtsi"
+/include/ "pq3-etsec1-2.dtsi"
+/include/ "pq3-etsec1-3.dtsi"
+/include/ "pq3-sec3.0-0.dtsi"
+/include/ "pq3-mpic.dtsi"
+/include/ "pq3-mpic-timer-B.dtsi"
+
+	global-utilities@e0000 {
+		compatible = "fsl,mpc8572-guts";
+		reg = <0xe0000 0x1000>;
+		fsl,has-rstcr;
+	};
+};
diff --git a/arch/powerpc/boot/dts/fsl/mpc8572si-pre.dtsi b/arch/powerpc/boot/dts/fsl/mpc8572si-pre.dtsi
new file mode 100644
index 0000000..ca18832
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/mpc8572si-pre.dtsi
@@ -0,0 +1,70 @@
+/*
+ * MPC8572 Silicon/SoC Device Tree Source (pre include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/dts-v1/;
+/ {
+	compatible = "fsl,MPC8572";
+	#address-cells = <2>;
+	#size-cells = <2>;
+	interrupt-parent = <&mpic>;
+
+	aliases {
+		serial0 = &serial0;
+		serial1 = &serial1;
+		ethernet0 = &enet0;
+		ethernet1 = &enet1;
+		ethernet2 = &enet2;
+		ethernet3 = &enet3;
+		pci0 = &pci0;
+		pci1 = &pci1;
+		pci2 = &pci2;
+	};
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		PowerPC,8572@0 {
+			device_type = "cpu";
+			reg = <0x0>;
+			next-level-cache = <&L2>;
+		};
+
+		PowerPC,8572@1 {
+			device_type = "cpu";
+			reg = <0x1>;
+			next-level-cache = <&L2>;
+		};
+	};
+};
diff --git a/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi
new file mode 100644
index 0000000..bd9e163
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi
@@ -0,0 +1,198 @@
+/*
+ * P1010/P1014 Silicon/SoC Device Tree Source (post include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&ifc {
+	#address-cells = <2>;
+	#size-cells = <1>;
+	compatible = "fsl,ifc", "simple-bus";
+	interrupts = <16 2 0 0 19 2 0 0>;
+};
+
+/* controller at 0x9000 */
+&pci0 {
+	compatible = "fsl,p1010-pcie", "fsl,qoriq-pcie-v2.3", "fsl,qoriq-pcie-v2.2";
+	device_type = "pci";
+	#size-cells = <2>;
+	#address-cells = <3>;
+	bus-range = <0 255>;
+	clock-frequency = <33333333>;
+	interrupts = <16 2 0 0>;
+
+	pcie@0 {
+		reg = <0 0 0 0 0>;
+		#interrupt-cells = <1>;
+		#size-cells = <2>;
+		#address-cells = <3>;
+		device_type = "pci";
+		interrupts = <16 2 0 0>;
+		interrupt-map-mask = <0xf800 0 0 7>;
+		interrupt-map = <
+			/* IDSEL 0x0 */
+			0000 0x0 0x0 0x1 &mpic 0x4 0x1 0x0 0x0
+			0000 0x0 0x0 0x2 &mpic 0x5 0x1 0x0 0x0
+			0000 0x0 0x0 0x3 &mpic 0x6 0x1 0x0 0x0
+			0000 0x0 0x0 0x4 &mpic 0x7 0x1 0x0 0x0
+			>;
+	};
+};
+
+/* controller at 0xa000 */
+&pci1 {
+	compatible = "fsl,p1010-pcie", "fsl,qoriq-pcie-v2.3", "fsl,qoriq-pcie-v2.2";
+	device_type = "pci";
+	#size-cells = <2>;
+	#address-cells = <3>;
+	bus-range = <0 255>;
+	clock-frequency = <33333333>;
+	interrupts = <16 2 0 0>;
+
+	pcie@0 {
+		reg = <0 0 0 0 0>;
+		#interrupt-cells = <1>;
+		#size-cells = <2>;
+		#address-cells = <3>;
+		device_type = "pci";
+		interrupts = <16 2 0 0>;
+		interrupt-map-mask = <0xf800 0 0 7>;
+
+		interrupt-map = <
+			/* IDSEL 0x0 */
+			0000 0x0 0x0 0x1 &mpic 0x0 0x1 0x0 0x0
+			0000 0x0 0x0 0x2 &mpic 0x1 0x1 0x0 0x0
+			0000 0x0 0x0 0x3 &mpic 0x2 0x1 0x0 0x0
+			0000 0x0 0x0 0x4 &mpic 0x3 0x1 0x0 0x0
+			>;
+	};
+};
+
+&soc {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	device_type = "soc";
+	compatible = "fsl,p1010-immr", "simple-bus";
+	bus-frequency = <0>;		// Filled out by uboot.
+
+	ecm-law@0 {
+		compatible = "fsl,ecm-law";
+		reg = <0x0 0x1000>;
+		fsl,num-laws = <12>;
+	};
+
+	ecm@1000 {
+		compatible = "fsl,p1010-ecm", "fsl,ecm";
+		reg = <0x1000 0x1000>;
+		interrupts = <16 2 0 0>;
+	};
+
+	memory-controller@2000 {
+		compatible = "fsl,p1010-memory-controller";
+		reg = <0x2000 0x1000>;
+		interrupts = <16 2 0 0>;
+	};
+
+/include/ "pq3-i2c-0.dtsi"
+/include/ "pq3-i2c-1.dtsi"
+/include/ "pq3-duart-0.dtsi"
+/include/ "pq3-espi-0.dtsi"
+	spi0: spi@7000 {
+		fsl,espi-num-chipselects = <1>;
+	};
+
+/include/ "pq3-gpio-0.dtsi"
+/include/ "pq3-sata2-0.dtsi"
+/include/ "pq3-sata2-1.dtsi"
+
+	can0: can@1c000 {
+		compatible = "fsl,p1010-flexcan";
+		reg = <0x1c000 0x1000>;
+		interrupts = <48 0x2 0 0>;
+	};
+
+	can1: can@1d000 {
+		compatible = "fsl,p1010-flexcan";
+		reg = <0x1d000 0x1000>;
+		interrupts = <61 0x2 0 0>;
+	};
+
+	L2: l2-cache-controller@20000 {
+		compatible = "fsl,p1010-l2-cache-controller",
+				"fsl,p1014-l2-cache-controller";
+		reg = <0x20000 0x1000>;
+		cache-line-size = <32>;	// 32 bytes
+		cache-size = <0x40000>; // L2,256K
+		interrupts = <16 2 0 0>;
+	};
+
+/include/ "pq3-dma-0.dtsi"
+/include/ "pq3-usb2-dr-0.dtsi"
+/include/ "pq3-esdhc-0.dtsi"
+	sdhc@2e000 {
+		fsl,sdhci-auto-cmd12;
+	};
+
+/include/ "pq3-sec4.4-0.dtsi"
+/include/ "pq3-mpic.dtsi"
+/include/ "pq3-mpic-timer-B.dtsi"
+
+/include/ "pq3-etsec2-0.dtsi"
+	enet0: ethernet@b0000 {
+		queue-group@b0000 {
+			fsl,rx-bit-map = <0xff>;
+			fsl,tx-bit-map = <0xff>;
+		};
+	};
+
+/include/ "pq3-etsec2-1.dtsi"
+	enet1: ethernet@b1000 {
+		queue-group@b1000 {
+			fsl,rx-bit-map = <0xff>;
+			fsl,tx-bit-map = <0xff>;
+		};
+	};
+
+/include/ "pq3-etsec2-2.dtsi"
+	enet2: ethernet@b2000 {
+		queue-group@b2000 {
+			fsl,rx-bit-map = <0xff>;
+			fsl,tx-bit-map = <0xff>;
+		};
+
+	};
+
+	global-utilities@e0000 {
+		compatible = "fsl,p1010-guts";
+		reg = <0xe0000 0x1000>;
+		fsl,has-rstcr;
+	};
+};
diff --git a/arch/powerpc/boot/dts/fsl/p1010si-pre.dtsi b/arch/powerpc/boot/dts/fsl/p1010si-pre.dtsi
new file mode 100644
index 0000000..7354a8f
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/p1010si-pre.dtsi
@@ -0,0 +1,64 @@
+/*
+ * P1010/P1014 Silicon/SoC Device Tree Source (pre include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/dts-v1/;
+/ {
+	compatible = "fsl,P1010";
+	#address-cells = <2>;
+	#size-cells = <2>;
+	interrupt-parent = <&mpic>;
+
+	aliases {
+		serial0 = &serial0;
+		serial1 = &serial1;
+		ethernet0 = &enet0;
+		ethernet1 = &enet1;
+		ethernet2 = &enet2;
+		pci0 = &pci0;
+		pci1 = &pci1;
+		can0 = &can0;
+		can1 = &can1;
+	};
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		PowerPC,P1010@0 {
+			device_type = "cpu";
+			reg = <0x0>;
+			next-level-cache = <&L2>;
+		};
+	};
+};
diff --git a/arch/powerpc/boot/dts/fsl/p1020si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1020si-post.dtsi
new file mode 100644
index 0000000..fc924c5
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/p1020si-post.dtsi
@@ -0,0 +1,174 @@
+/*
+ * P1020/P1011 Silicon/SoC Device Tree Source (post include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&lbc {
+	#address-cells = <2>;
+	#size-cells = <1>;
+	compatible = "fsl,p1020-elbc", "fsl,elbc", "simple-bus";
+	interrupts = <19 2 0 0>;
+};
+
+/* controller at 0x9000 */
+&pci0 {
+	compatible = "fsl,mpc8548-pcie";
+	device_type = "pci";
+	#size-cells = <2>;
+	#address-cells = <3>;
+	bus-range = <0 255>;
+	clock-frequency = <33333333>;
+	interrupts = <16 2 0 0>;
+
+	pcie@0 {
+		reg = <0 0 0 0 0>;
+		#interrupt-cells = <1>;
+		#size-cells = <2>;
+		#address-cells = <3>;
+		device_type = "pci";
+		interrupts = <16 2 0 0>;
+		interrupt-map-mask = <0xf800 0 0 7>;
+		interrupt-map = <
+			/* IDSEL 0x0 */
+			0000 0x0 0x0 0x1 &mpic 0x4 0x1 0x0 0x0
+			0000 0x0 0x0 0x2 &mpic 0x5 0x1 0x0 0x0
+			0000 0x0 0x0 0x3 &mpic 0x6 0x1 0x0 0x0
+			0000 0x0 0x0 0x4 &mpic 0x7 0x1 0x0 0x0
+			>;
+	};
+};
+
+/* controller at 0xa000 */
+&pci1 {
+	compatible = "fsl,mpc8548-pcie";
+	device_type = "pci";
+	#size-cells = <2>;
+	#address-cells = <3>;
+	bus-range = <0 255>;
+	clock-frequency = <33333333>;
+	interrupts = <16 2 0 0>;
+
+	pcie@0 {
+		reg = <0 0 0 0 0>;
+		#interrupt-cells = <1>;
+		#size-cells = <2>;
+		#address-cells = <3>;
+		device_type = "pci";
+		interrupts = <16 2 0 0>;
+		interrupt-map-mask = <0xf800 0 0 7>;
+
+		interrupt-map = <
+			/* IDSEL 0x0 */
+			0000 0x0 0x0 0x1 &mpic 0x0 0x1 0x0 0x0
+			0000 0x0 0x0 0x2 &mpic 0x1 0x1 0x0 0x0
+			0000 0x0 0x0 0x3 &mpic 0x2 0x1 0x0 0x0
+			0000 0x0 0x0 0x4 &mpic 0x3 0x1 0x0 0x0
+			>;
+	};
+};
+
+&soc {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	device_type = "soc";
+	compatible = "fsl,p1020-immr", "simple-bus";
+	bus-frequency = <0>;		// Filled out by uboot.
+
+	ecm-law@0 {
+		compatible = "fsl,ecm-law";
+		reg = <0x0 0x1000>;
+		fsl,num-laws = <12>;
+	};
+
+	ecm@1000 {
+		compatible = "fsl,p1020-ecm", "fsl,ecm";
+		reg = <0x1000 0x1000>;
+		interrupts = <16 2 0 0>;
+	};
+
+	memory-controller@2000 {
+		compatible = "fsl,p1020-memory-controller";
+		reg = <0x2000 0x1000>;
+		interrupts = <16 2 0 0>;
+	};
+
+/include/ "pq3-i2c-0.dtsi"
+/include/ "pq3-i2c-1.dtsi"
+/include/ "pq3-duart-0.dtsi"
+
+/include/ "pq3-espi-0.dtsi"
+	spi@7000 {
+		fsl,espi-num-chipselects = <4>;
+	};
+
+/include/ "pq3-gpio-0.dtsi"
+
+	L2: l2-cache-controller@20000 {
+		compatible = "fsl,p1020-l2-cache-controller";
+		reg = <0x20000 0x1000>;
+		cache-line-size = <32>;	// 32 bytes
+		cache-size = <0x40000>; // L2,256K
+		interrupts = <16 2 0 0>;
+	};
+
+/include/ "pq3-dma-0.dtsi"
+/include/ "pq3-usb2-dr-0.dtsi"
+/include/ "pq3-usb2-dr-1.dtsi"
+
+/include/ "pq3-esdhc-0.dtsi"
+/include/ "pq3-sec3.3-0.dtsi"
+
+/include/ "pq3-mpic.dtsi"
+/include/ "pq3-mpic-timer-B.dtsi"
+
+/include/ "pq3-etsec2-0.dtsi"
+	enet0: enet0_grp2: ethernet@b0000 {
+	};
+
+/include/ "pq3-etsec2-1.dtsi"
+	enet1: enet1_grp2: ethernet@b1000 {
+	};
+
+/include/ "pq3-etsec2-2.dtsi"
+	enet2: enet2_grp2: ethernet@b2000 {
+	};
+
+	global-utilities@e0000 {
+		compatible = "fsl,p1020-guts";
+		reg = <0xe0000 0x1000>;
+		fsl,has-rstcr;
+	};
+};
+
+/include/ "pq3-etsec2-grp2-0.dtsi"
+/include/ "pq3-etsec2-grp2-1.dtsi"
+/include/ "pq3-etsec2-grp2-2.dtsi"
diff --git a/arch/powerpc/boot/dts/fsl/p1020si-pre.dtsi b/arch/powerpc/boot/dts/fsl/p1020si-pre.dtsi
new file mode 100644
index 0000000..6f0376e
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/p1020si-pre.dtsi
@@ -0,0 +1,68 @@
+/*
+ * P1020/P1011 Silicon/SoC Device Tree Source (pre include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/dts-v1/;
+/ {
+	compatible = "fsl,P1020";
+	#address-cells = <2>;
+	#size-cells = <2>;
+	interrupt-parent = <&mpic>;
+
+	aliases {
+		serial0 = &serial0;
+		serial1 = &serial1;
+		ethernet0 = &enet0;
+		ethernet1 = &enet1;
+		ethernet2 = &enet2;
+		pci0 = &pci0;
+		pci1 = &pci1;
+	};
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		PowerPC,P1020@0 {
+			device_type = "cpu";
+			reg = <0x0>;
+			next-level-cache = <&L2>;
+		};
+
+		PowerPC,P1020@1 {
+			device_type = "cpu";
+			reg = <0x1>;
+			next-level-cache = <&L2>;
+		};
+	};
+};
diff --git a/arch/powerpc/boot/dts/fsl/p1021si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1021si-post.dtsi
new file mode 100644
index 0000000..38ba54d
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/p1021si-post.dtsi
@@ -0,0 +1,225 @@
+/*
+ * P1021/P1012 Silicon/SoC Device Tree Source (post include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&lbc {
+	#address-cells = <2>;
+	#size-cells = <1>;
+	compatible = "fsl,p1021-elbc", "fsl,elbc", "simple-bus";
+	interrupts = <19 2 0 0>;
+};
+
+/* controller at 0x9000 */
+&pci0 {
+	compatible = "fsl,mpc8548-pcie";
+	device_type = "pci";
+	#size-cells = <2>;
+	#address-cells = <3>;
+	bus-range = <0 255>;
+	clock-frequency = <33333333>;
+	interrupts = <16 2 0 0>;
+
+	pcie@0 {
+		reg = <0 0 0 0 0>;
+		#interrupt-cells = <1>;
+		#size-cells = <2>;
+		#address-cells = <3>;
+		device_type = "pci";
+		interrupts = <16 2 0 0>;
+		interrupt-map-mask = <0xf800 0 0 7>;
+		interrupt-map = <
+			/* IDSEL 0x0 */
+			0000 0x0 0x0 0x1 &mpic 0x4 0x1 0x0 0x0
+			0000 0x0 0x0 0x2 &mpic 0x5 0x1 0x0 0x0
+			0000 0x0 0x0 0x3 &mpic 0x6 0x1 0x0 0x0
+			0000 0x0 0x0 0x4 &mpic 0x7 0x1 0x0 0x0
+			>;
+	};
+};
+
+/* controller at 0xa000 */
+&pci1 {
+	compatible = "fsl,mpc8548-pcie";
+	device_type = "pci";
+	#size-cells = <2>;
+	#address-cells = <3>;
+	bus-range = <0 255>;
+	clock-frequency = <33333333>;
+	interrupts = <16 2 0 0>;
+
+	pcie@0 {
+		reg = <0 0 0 0 0>;
+		#interrupt-cells = <1>;
+		#size-cells = <2>;
+		#address-cells = <3>;
+		device_type = "pci";
+		interrupts = <16 2 0 0>;
+		interrupt-map-mask = <0xf800 0 0 7>;
+
+		interrupt-map = <
+			/* IDSEL 0x0 */
+			0000 0x0 0x0 0x1 &mpic 0x0 0x1 0x0 0x0
+			0000 0x0 0x0 0x2 &mpic 0x1 0x1 0x0 0x0
+			0000 0x0 0x0 0x3 &mpic 0x2 0x1 0x0 0x0
+			0000 0x0 0x0 0x4 &mpic 0x3 0x1 0x0 0x0
+			>;
+	};
+};
+
+&soc {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	device_type = "soc";
+	compatible = "fsl,p1021-immr", "simple-bus";
+	bus-frequency = <0>;		// Filled out by uboot.
+
+	ecm-law@0 {
+		compatible = "fsl,ecm-law";
+		reg = <0x0 0x1000>;
+		fsl,num-laws = <12>;
+	};
+
+	ecm@1000 {
+		compatible = "fsl,p1021-ecm", "fsl,ecm";
+		reg = <0x1000 0x1000>;
+		interrupts = <16 2 0 0>;
+	};
+
+	memory-controller@2000 {
+		compatible = "fsl,p1021-memory-controller";
+		reg = <0x2000 0x1000>;
+		interrupts = <16 2 0 0>;
+	};
+
+/include/ "pq3-i2c-0.dtsi"
+/include/ "pq3-i2c-1.dtsi"
+/include/ "pq3-duart-0.dtsi"
+
+/include/ "pq3-espi-0.dtsi"
+	spi@7000 {
+		fsl,espi-num-chipselects = <4>;
+	};
+
+/include/ "pq3-gpio-0.dtsi"
+
+	L2: l2-cache-controller@20000 {
+		compatible = "fsl,p1021-l2-cache-controller";
+		reg = <0x20000 0x1000>;
+		cache-line-size = <32>;	// 32 bytes
+		cache-size = <0x40000>; // L2,256K
+		interrupts = <16 2 0 0>;
+	};
+
+/include/ "pq3-dma-0.dtsi"
+/include/ "pq3-usb2-dr-0.dtsi"
+
+/include/ "pq3-esdhc-0.dtsi"
+/include/ "pq3-sec3.3-0.dtsi"
+
+/include/ "pq3-mpic.dtsi"
+/include/ "pq3-mpic-timer-B.dtsi"
+
+/include/ "pq3-etsec2-0.dtsi"
+	enet0: enet0_grp2: ethernet@b0000 {
+	};
+
+/include/ "pq3-etsec2-1.dtsi"
+	enet1: enet1_grp2: ethernet@b1000 {
+	};
+
+/include/ "pq3-etsec2-2.dtsi"
+	enet2: enet2_grp2: ethernet@b2000 {
+	};
+
+	global-utilities@e0000 {
+		compatible = "fsl,p1021-guts";
+		reg = <0xe0000 0x1000>;
+		fsl,has-rstcr;
+	};
+};
+
+&qe {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	device_type = "qe";
+	compatible = "fsl,qe";
+	fsl,qe-num-riscs = <1>;
+	fsl,qe-num-snums = <28>;
+
+	qeic: interrupt-controller@80 {
+		interrupt-controller;
+		compatible = "fsl,qe-ic";
+		#address-cells = <0>;
+		#interrupt-cells = <1>;
+		reg = <0x80 0x80>;
+		interrupts = <63 2 0 0 60 2 0 0>; //high:47 low:44
+	};
+
+	ucc@2000 {
+		cell-index = <1>;
+		reg = <0x2000 0x200>;
+		interrupts = <32>;
+		interrupt-parent = <&qeic>;
+	};
+
+	mdio@2120 {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0x2120 0x18>;
+		compatible = "fsl,ucc-mdio";
+	};
+
+	ucc@2400 {
+		cell-index = <5>;
+		reg = <0x2400 0x200>;
+		interrupts = <40>;
+		interrupt-parent = <&qeic>;
+	};
+
+	muram@10000 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "fsl,qe-muram", "fsl,cpm-muram";
+		ranges = <0x0 0x10000 0x6000>;
+
+		data-only@0 {
+			compatible = "fsl,qe-muram-data",
+			"fsl,cpm-muram-data";
+			reg = <0x0 0x6000>;
+		};
+	};
+};
+
+/include/ "pq3-etsec2-grp2-0.dtsi"
+/include/ "pq3-etsec2-grp2-1.dtsi"
+/include/ "pq3-etsec2-grp2-2.dtsi"
diff --git a/arch/powerpc/boot/dts/fsl/p1021si-pre.dtsi b/arch/powerpc/boot/dts/fsl/p1021si-pre.dtsi
new file mode 100644
index 0000000..4abd54b
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/p1021si-pre.dtsi
@@ -0,0 +1,68 @@
+/*
+ * P1021/P1012 Silicon/SoC Device Tree Source (pre include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/dts-v1/;
+/ {
+	compatible = "fsl,P1021";
+	#address-cells = <2>;
+	#size-cells = <2>;
+	interrupt-parent = <&mpic>;
+
+	aliases {
+		serial0 = &serial0;
+		serial1 = &serial1;
+		ethernet0 = &enet0;
+		ethernet1 = &enet1;
+		ethernet2 = &enet2;
+		pci0 = &pci0;
+		pci1 = &pci1;
+	};
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		PowerPC,P1021@0 {
+			device_type = "cpu";
+			reg = <0x0>;
+			next-level-cache = <&L2>;
+		};
+
+		PowerPC,P1021@1 {
+			device_type = "cpu";
+			reg = <0x1>;
+			next-level-cache = <&L2>;
+		};
+	};
+};
diff --git a/arch/powerpc/boot/dts/fsl/p1022si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1022si-post.dtsi
new file mode 100644
index 0000000..16239b1
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/p1022si-post.dtsi
@@ -0,0 +1,235 @@
+/*
+ * P1022/P1013 Silicon/SoC Device Tree Source (post include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&lbc {
+	#address-cells = <2>;
+	#size-cells = <1>;
+	compatible = "fsl,p1022-elbc", "fsl,elbc", "simple-bus";
+	interrupts = <19 2 0 0>;
+};
+
+/* controller at 0x9000 */
+&pci0 {
+	compatible = "fsl,p1022-pcie";
+	device_type = "pci";
+	#size-cells = <2>;
+	#address-cells = <3>;
+	bus-range = <0 255>;
+	clock-frequency = <33333333>;
+	interrupts = <16 2 0 0>;
+
+	pcie@0 {
+		reg = <0 0 0 0 0>;
+		#interrupt-cells = <1>;
+		#size-cells = <2>;
+		#address-cells = <3>;
+		device_type = "pci";
+		interrupts = <16 2 0 0>;
+		interrupt-map-mask = <0xf800 0 0 7>;
+		interrupt-map = <
+			/* IDSEL 0x0 */
+			0000 0x0 0x0 0x1 &mpic 0x4 0x1 0x0 0x0
+			0000 0x0 0x0 0x2 &mpic 0x5 0x1 0x0 0x0
+			0000 0x0 0x0 0x3 &mpic 0x6 0x1 0x0 0x0
+			0000 0x0 0x0 0x4 &mpic 0x7 0x1 0x0 0x0
+			>;
+	};
+};
+
+/* controller at 0xa000 */
+&pci1 {
+	compatible = "fsl,p1022-pcie";
+	device_type = "pci";
+	#size-cells = <2>;
+	#address-cells = <3>;
+	bus-range = <0 255>;
+	clock-frequency = <33333333>;
+	interrupts = <16 2 0 0>;
+
+	pcie@0 {
+		reg = <0 0 0 0 0>;
+		#interrupt-cells = <1>;
+		#size-cells = <2>;
+		#address-cells = <3>;
+		device_type = "pci";
+		interrupts = <16 2 0 0>;
+		interrupt-map-mask = <0xf800 0 0 7>;
+
+		interrupt-map = <
+			/* IDSEL 0x0 */
+			0000 0x0 0x0 0x1 &mpic 0x0 0x1 0x0 0x0
+			0000 0x0 0x0 0x2 &mpic 0x1 0x1 0x0 0x0
+			0000 0x0 0x0 0x3 &mpic 0x2 0x1 0x0 0x0
+			0000 0x0 0x0 0x4 &mpic 0x3 0x1 0x0 0x0
+			>;
+	};
+};
+
+/* controller at 0xb000 */
+&pci2 {
+	compatible = "fsl,p1022-pcie";
+	device_type = "pci";
+	#size-cells = <2>;
+	#address-cells = <3>;
+	bus-range = <0 255>;
+	clock-frequency = <33333333>;
+	interrupts = <16 2 0 0>;
+
+	pcie@0 {
+		reg = <0 0 0 0 0>;
+		#interrupt-cells = <1>;
+		#size-cells = <2>;
+		#address-cells = <3>;
+		device_type = "pci";
+		interrupts = <16 2 0 0>;
+		interrupt-map-mask = <0xf800 0 0 7>;
+
+		interrupt-map = <
+			/* IDSEL 0x0 */
+			0000 0x0 0x0 0x1 &mpic 0x8 0x1 0x0 0x0
+			0000 0x0 0x0 0x2 &mpic 0x9 0x1 0x0 0x0
+			0000 0x0 0x0 0x3 &mpic 0xa 0x1 0x0 0x0
+			0000 0x0 0x0 0x4 &mpic 0xb 0x1 0x0 0x0
+			>;
+	};
+};
+
+&soc {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	device_type = "soc";
+	compatible = "fsl,p1022-immr", "simple-bus";
+	bus-frequency = <0>;		// Filled out by uboot.
+
+	ecm-law@0 {
+		compatible = "fsl,ecm-law";
+		reg = <0x0 0x1000>;
+		fsl,num-laws = <12>;
+	};
+
+	ecm@1000 {
+		compatible = "fsl,p1022-ecm", "fsl,ecm";
+		reg = <0x1000 0x1000>;
+		interrupts = <16 2 0 0>;
+	};
+
+	memory-controller@2000 {
+		compatible = "fsl,p1022-memory-controller";
+		reg = <0x2000 0x1000>;
+		interrupts = <16 2 0 0>;
+	};
+
+/include/ "pq3-i2c-0.dtsi"
+/include/ "pq3-i2c-1.dtsi"
+/include/ "pq3-duart-0.dtsi"
+/include/ "pq3-espi-0.dtsi"
+	spi@7000 {
+		fsl,espi-num-chipselects = <4>;
+	};
+
+/include/ "pq3-dma-1.dtsi"
+	dma@c300 {
+		dma00: dma-channel@0 {
+			compatible = "fsl,ssi-dma-channel";
+		};
+		dma01: dma-channel@80 {
+			compatible = "fsl,ssi-dma-channel";
+		};
+	};
+
+/include/ "pq3-gpio-0.dtsi"
+
+	display@10000 {
+		compatible = "fsl,diu", "fsl,p1022-diu";
+		reg = <0x10000 1000>;
+		interrupts = <64 2 0 0>;
+	};
+
+	ssi@15000 {
+		compatible = "fsl,mpc8610-ssi";
+		cell-index = <0>;
+		reg = <0x15000 0x100>;
+		interrupts = <75 2 0 0>;
+		fsl,playback-dma = <&dma00>;
+		fsl,capture-dma = <&dma01>;
+		fsl,fifo-depth = <15>;
+	};
+
+/include/ "pq3-sata2-0.dtsi"
+/include/ "pq3-sata2-1.dtsi"
+
+	L2: l2-cache-controller@20000 {
+		compatible = "fsl,p1022-l2-cache-controller";
+		reg = <0x20000 0x1000>;
+		cache-line-size = <32>;	// 32 bytes
+		cache-size = <0x40000>; // L2,256K
+		interrupts = <16 2 0 0>;
+	};
+
+/include/ "pq3-dma-0.dtsi"
+/include/ "pq3-usb2-dr-0.dtsi"
+/include/ "pq3-usb2-dr-1.dtsi"
+
+/include/ "pq3-esdhc-0.dtsi"
+	sdhc@2e000 {
+		fsl,sdhci-auto-cmd12;
+	};
+
+/include/ "pq3-sec3.3-0.dtsi"
+/include/ "pq3-mpic.dtsi"
+/include/ "pq3-mpic-timer-B.dtsi"
+
+/include/ "pq3-etsec2-0.dtsi"
+	enet0: enet0_grp2: ethernet@b0000 {
+	};
+
+/include/ "pq3-etsec2-1.dtsi"
+	enet1: enet1_grp2: ethernet@b1000 {
+	};
+
+	global-utilities@e0000 {
+		compatible = "fsl,p1022-guts";
+		reg = <0xe0000 0x1000>;
+		fsl,has-rstcr;
+	};
+
+	power@e0070{
+		compatible = "fsl,mpc8536-pmc", "fsl,mpc8548-pmc";
+		reg = <0xe0070 0x20>;
+	};
+
+};
+
+/include/ "pq3-etsec2-grp2-0.dtsi"
+/include/ "pq3-etsec2-grp2-1.dtsi"
diff --git a/arch/powerpc/boot/dts/fsl/p1022si-pre.dtsi b/arch/powerpc/boot/dts/fsl/p1022si-pre.dtsi
new file mode 100644
index 0000000..e930f4f
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/p1022si-pre.dtsi
@@ -0,0 +1,68 @@
+/*
+ * P1022/P1013 Silicon/SoC Device Tree Source (pre include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/dts-v1/;
+/ {
+	compatible = "fsl,P1022";
+	#address-cells = <2>;
+	#size-cells = <2>;
+	interrupt-parent = <&mpic>;
+
+	aliases {
+		serial0 = &serial0;
+		serial1 = &serial1;
+		ethernet0 = &enet0;
+		ethernet1 = &enet1;
+		pci0 = &pci0;
+		pci1 = &pci1;
+		pci2 = &pci2;
+	};
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		PowerPC,P1022@0 {
+			device_type = "cpu";
+			reg = <0x0>;
+			next-level-cache = <&L2>;
+		};
+
+		PowerPC,P1022@1 {
+			device_type = "cpu";
+			reg = <0x1>;
+			next-level-cache = <&L2>;
+		};
+	};
+};
diff --git a/arch/powerpc/boot/dts/fsl/p1023si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1023si-post.dtsi
new file mode 100644
index 0000000..b06bb4c
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/p1023si-post.dtsi
@@ -0,0 +1,224 @@
+/*
+ * P1023/P1017 Silicon/SoC Device Tree Source (post include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&lbc {
+	#address-cells = <2>;
+	#size-cells = <1>;
+	compatible = "fsl,p1023-elbc", "fsl,elbc", "simple-bus";
+	interrupts = <19 2 0 0>;
+};
+
+/* controller at 0xa000 */
+&pci0 {
+	compatible = "fsl,p1023-pcie", "fsl,qoriq-pcie-v2.2";
+	device_type = "pci";
+	#size-cells = <2>;
+	#address-cells = <3>;
+	bus-range = <0x0 0xff>;
+	clock-frequency = <33333333>;
+	interrupts = <16 2 0 0>;
+	pcie@0 {
+		reg = <0 0 0 0 0>;
+		#interrupt-cells = <1>;
+		#size-cells = <2>;
+		#address-cells = <3>;
+		device_type = "pci";
+		interrupts = <16 2 0 0>;
+	};
+};
+
+/* controller at 0x9000 */
+&pci1 {
+	compatible = "fsl,p1023-pcie", "fsl,qoriq-pcie-v2.2";
+	device_type = "pci";
+	#size-cells = <2>;
+	#address-cells = <3>;
+	bus-range = <0 0xff>;
+	clock-frequency = <33333333>;
+	interrupts = <16 2 0 0>;
+	pcie@0 {
+		reg = <0 0 0 0 0>;
+		#interrupt-cells = <1>;
+		#size-cells = <2>;
+		#address-cells = <3>;
+		device_type = "pci";
+		interrupts = <16 2 0 0>;
+	};
+};
+
+/* controller at 0xb000 */
+&pci2 {
+	compatible = "fsl,p1023-pcie", "fsl,qoriq-pcie-v2.2";
+	device_type = "pci";
+	#size-cells = <2>;
+	#address-cells = <3>;
+	bus-range = <0x0 0xff>;
+	clock-frequency = <33333333>;
+	interrupts = <16 2 0 0>;
+	pcie@0 {
+		reg = <0 0 0 0 0>;
+		#interrupt-cells = <1>;
+		#size-cells = <2>;
+		#address-cells = <3>;
+		device_type = "pci";
+		interrupts = <16 2 0 0>;
+	};
+};
+
+&soc {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	device_type = "soc";
+	compatible = "fsl,p1023-immr", "simple-bus";
+	bus-frequency = <0>;		// Filled out by uboot.
+
+	ecm-law@0 {
+		compatible = "fsl,ecm-law";
+		reg = <0x0 0x1000>;
+		fsl,num-laws = <12>;
+	};
+
+	ecm@1000 {
+		compatible = "fsl,p1023-ecm", "fsl,ecm";
+		reg = <0x1000 0x1000>;
+		interrupts = <16 2 0 0>;
+	};
+
+	memory-controller@2000 {
+		compatible = "fsl,p1023-memory-controller";
+		reg = <0x2000 0x1000>;
+		interrupts = <16 2 0 0>;
+	};
+
+/include/ "pq3-i2c-0.dtsi"
+/include/ "pq3-i2c-1.dtsi"
+/include/ "pq3-duart-0.dtsi"
+
+/include/ "pq3-espi-0.dtsi"
+	spi@7000 {
+		fsl,espi-num-chipselects = <4>;
+	};
+
+/include/ "pq3-gpio-0.dtsi"
+
+	L2: l2-cache-controller@20000 {
+		compatible = "fsl,p1023-l2-cache-controller";
+		reg = <0x20000 0x1000>;
+		cache-line-size = <32>;	// 32 bytes
+		cache-size = <0x40000>; // L2,256K
+		interrupts = <16 2 0 0>;
+	};
+
+/include/ "pq3-dma-0.dtsi"
+/include/ "pq3-usb2-dr-0.dtsi"
+
+	crypto: crypto@300000 {
+		compatible = "fsl,sec-v4.2", "fsl,sec-v4.0";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		reg = <0x30000 0x10000>;
+		ranges = <0 0x30000 0x10000>;
+		interrupts = <58 2 0 0>;
+
+		sec_jr0: jr@1000 {
+			compatible = "fsl,sec-v4.2-job-ring",
+				     "fsl,sec-v4.0-job-ring";
+			reg = <0x1000 0x1000>;
+			interrupts = <45 2 0 0>;
+		};
+
+		sec_jr1: jr@2000 {
+			compatible = "fsl,sec-v4.2-job-ring",
+				     "fsl,sec-v4.0-job-ring";
+			reg = <0x2000 0x1000>;
+			interrupts = <45 2 0 0>;
+		};
+
+		sec_jr2: jr@3000 {
+			compatible = "fsl,sec-v4.2-job-ring",
+				     "fsl,sec-v4.0-job-ring";
+			reg = <0x3000 0x1000>;
+			interrupts = <57 2 0 0>;
+		};
+
+		sec_jr3: jr@4000 {
+			compatible = "fsl,sec-v4.2-job-ring",
+				     "fsl,sec-v4.0-job-ring";
+			reg = <0x4000 0x1000>;
+			interrupts = <57 2 0 0>;
+		};
+
+		rtic@6000 {
+			compatible = "fsl,sec-v4.2-rtic",
+				     "fsl,sec-v4.0-rtic";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			reg = <0x6000 0x100>;
+			ranges = <0x0 0x6100 0xe00>;
+
+			rtic_a: rtic-a@0 {
+				compatible = "fsl,sec-v4.2-rtic-memory",
+					     "fsl,sec-v4.0-rtic-memory";
+				reg = <0x00 0x20 0x100 0x80>;
+			};
+
+			rtic_b: rtic-b@20 {
+				compatible = "fsl,sec-v4.2-rtic-memory",
+					     "fsl,sec-v4.0-rtic-memory";
+				reg = <0x20 0x20 0x200 0x80>;
+			};
+
+			rtic_c: rtic-c@40 {
+				compatible = "fsl,sec-v4.2-rtic-memory",
+					     "fsl,sec-v4.0-rtic-memory";
+				reg = <0x40 0x20 0x300 0x80>;
+			};
+
+			rtic_d: rtic-d@60 {
+				compatible = "fsl,sec-v4.2-rtic-memory",
+					     "fsl,sec-v4.0-rtic-memory";
+				reg = <0x60 0x20 0x500 0x80>;
+			};
+		};
+	};
+
+/include/ "pq3-mpic.dtsi"
+/include/ "pq3-mpic-timer-B.dtsi"
+
+	global-utilities@e0000 {
+		compatible = "fsl,p1023-guts";
+		reg = <0xe0000 0x1000>;
+		fsl,has-rstcr;
+	};
+};
diff --git a/arch/powerpc/boot/dts/fsl/p1023si-pre.dtsi b/arch/powerpc/boot/dts/fsl/p1023si-pre.dtsi
new file mode 100644
index 0000000..ac45f6d
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/p1023si-pre.dtsi
@@ -0,0 +1,76 @@
+/*
+ * P1023/P1017 Silicon/SoC Device Tree Source (pre include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/dts-v1/;
+/ {
+	compatible = "fsl,P1023";
+	#address-cells = <2>;
+	#size-cells = <2>;
+	interrupt-parent = <&mpic>;
+
+	aliases {
+		serial0 = &serial0;
+		serial1 = &serial1;
+		pci0 = &pci0;
+		pci1 = &pci1;
+		pci2 = &pci2;
+
+		crypto = &crypto;
+		sec_jr0 = &sec_jr0;
+		sec_jr1 = &sec_jr1;
+		sec_jr2 = &sec_jr2;
+		sec_jr3 = &sec_jr3;
+		rtic_a = &rtic_a;
+		rtic_b = &rtic_b;
+		rtic_c = &rtic_c;
+		rtic_d = &rtic_d;
+	};
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		PowerPC,P1023@0 {
+			device_type = "cpu";
+			reg = <0x0>;
+			next-level-cache = <&L2>;
+		};
+
+		PowerPC,P1023@1 {
+			device_type = "cpu";
+			reg = <0x1>;
+			next-level-cache = <&L2>;
+		};
+	};
+};
diff --git a/arch/powerpc/boot/dts/fsl/p2020si-post.dtsi b/arch/powerpc/boot/dts/fsl/p2020si-post.dtsi
new file mode 100644
index 0000000..c041050
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/p2020si-post.dtsi
@@ -0,0 +1,194 @@
+/*
+ * P2020/P2010 Silicon/SoC Device Tree Source (post include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&lbc {
+	#address-cells = <2>;
+	#size-cells = <1>;
+	compatible = "fsl,p2020-elbc", "fsl,elbc", "simple-bus";
+	interrupts = <19 2 0 0>;
+};
+
+/* controller at 0xa000 */
+&pci0 {
+	compatible = "fsl,mpc8548-pcie";
+	device_type = "pci";
+	#size-cells = <2>;
+	#address-cells = <3>;
+	bus-range = <0 255>;
+	clock-frequency = <33333333>;
+	interrupts = <26 2 0 0>;
+
+	pcie@0 {
+		reg = <0 0 0 0 0>;
+		#interrupt-cells = <1>;
+		#size-cells = <2>;
+		#address-cells = <3>;
+		device_type = "pci";
+		interrupts = <26 2 0 0>;
+		interrupt-map-mask = <0xf800 0 0 7>;
+		interrupt-map = <
+			/* IDSEL 0x0 */
+			0000 0x0 0x0 0x1 &mpic 0x0 0x1 0x0 0x0
+			0000 0x0 0x0 0x2 &mpic 0x1 0x1 0x0 0x0
+			0000 0x0 0x0 0x3 &mpic 0x2 0x1 0x0 0x0
+			0000 0x0 0x0 0x4 &mpic 0x3 0x1 0x0 0x0
+			>;
+	};
+};
+
+/* controller at 0x9000 */
+&pci1 {
+	compatible = "fsl,mpc8548-pcie";
+	device_type = "pci";
+	#size-cells = <2>;
+	#address-cells = <3>;
+	bus-range = <0 255>;
+	clock-frequency = <33333333>;
+	interrupts = <25 2 0 0>;
+
+	pcie@0 {
+		reg = <0 0 0 0 0>;
+		#interrupt-cells = <1>;
+		#size-cells = <2>;
+		#address-cells = <3>;
+		device_type = "pci";
+		interrupts = <25 2 0 0>;
+		interrupt-map-mask = <0xf800 0 0 7>;
+
+		interrupt-map = <
+			/* IDSEL 0x0 */
+			0000 0x0 0x0 0x1 &mpic 0x4 0x1 0x0 0x0
+			0000 0x0 0x0 0x2 &mpic 0x5 0x1 0x0 0x0
+			0000 0x0 0x0 0x3 &mpic 0x6 0x1 0x0 0x0
+			0000 0x0 0x0 0x4 &mpic 0x7 0x1 0x0 0x0
+			>;
+	};
+};
+
+/* controller at 0x8000 */
+&pci2 {
+	compatible = "fsl,mpc8548-pcie";
+	device_type = "pci";
+	#size-cells = <2>;
+	#address-cells = <3>;
+	bus-range = <0 255>;
+	clock-frequency = <33333333>;
+	interrupts = <24 2 0 0>;
+
+	pcie@0 {
+		reg = <0 0 0 0 0>;
+		#interrupt-cells = <1>;
+		#size-cells = <2>;
+		#address-cells = <3>;
+		device_type = "pci";
+		interrupts = <24 2 0 0>;
+		interrupt-map-mask = <0xf800 0 0 7>;
+
+		interrupt-map = <
+			/* IDSEL 0x0 */
+			0000 0x0 0x0 0x1 &mpic 0x8 0x1 0x0 0x0
+			0000 0x0 0x0 0x2 &mpic 0x9 0x1 0x0 0x0
+			0000 0x0 0x0 0x3 &mpic 0xa 0x1 0x0 0x0
+			0000 0x0 0x0 0x4 &mpic 0xb 0x1 0x0 0x0
+			>;
+	};
+};
+
+&soc {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	device_type = "soc";
+	compatible = "fsl,p2020-immr", "simple-bus";
+	bus-frequency = <0>;		// Filled out by uboot.
+
+	ecm-law@0 {
+		compatible = "fsl,ecm-law";
+		reg = <0x0 0x1000>;
+		fsl,num-laws = <12>;
+	};
+
+	ecm@1000 {
+		compatible = "fsl,p2020-ecm", "fsl,ecm";
+		reg = <0x1000 0x1000>;
+		interrupts = <17 2 0 0>;
+	};
+
+	memory-controller@2000 {
+		compatible = "fsl,p2020-memory-controller";
+		reg = <0x2000 0x1000>;
+		interrupts = <18 2 0 0>;
+	};
+
+/include/ "pq3-i2c-0.dtsi"
+/include/ "pq3-i2c-1.dtsi"
+/include/ "pq3-duart-0.dtsi"
+/include/ "pq3-espi-0.dtsi"
+	spi0: spi@7000 {
+		fsl,espi-num-chipselects = <4>;
+	};
+
+/include/ "pq3-dma-1.dtsi"
+/include/ "pq3-gpio-0.dtsi"
+
+	L2: l2-cache-controller@20000 {
+		compatible = "fsl,p2020-l2-cache-controller";
+		reg = <0x20000 0x1000>;
+		cache-line-size = <32>;	// 32 bytes
+		cache-size = <0x80000>; // L2,512K
+		interrupts = <16 2 0 0>;
+	};
+
+/include/ "pq3-dma-0.dtsi"
+/include/ "pq3-usb2-dr-0.dtsi"
+/include/ "pq3-etsec1-0.dtsi"
+/include/ "pq3-etsec1-timer-0.dtsi"
+
+	ptp_clock@24e00 {
+		interrupts = <68 2 0 0 69 2 0 0 70 2 0 0>;
+	};
+
+
+/include/ "pq3-etsec1-1.dtsi"
+/include/ "pq3-etsec1-2.dtsi"
+/include/ "pq3-esdhc-0.dtsi"
+/include/ "pq3-sec3.1-0.dtsi"
+/include/ "pq3-mpic.dtsi"
+/include/ "pq3-mpic-timer-B.dtsi"
+
+	global-utilities@e0000 {
+		compatible = "fsl,p2020-guts";
+		reg = <0xe0000 0x1000>;
+		fsl,has-rstcr;
+	};
+};
diff --git a/arch/powerpc/boot/dts/fsl/p2020si-pre.dtsi b/arch/powerpc/boot/dts/fsl/p2020si-pre.dtsi
new file mode 100644
index 0000000..3213288
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/p2020si-pre.dtsi
@@ -0,0 +1,69 @@
+/*
+ * P2020/P2010 Silicon/SoC Device Tree Source (pre include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/dts-v1/;
+/ {
+	compatible = "fsl,P2020";
+	#address-cells = <2>;
+	#size-cells = <2>;
+	interrupt-parent = <&mpic>;
+
+	aliases {
+		serial0 = &serial0;
+		serial1 = &serial1;
+		ethernet0 = &enet0;
+		ethernet1 = &enet1;
+		ethernet2 = &enet2;
+		pci0 = &pci0;
+		pci1 = &pci1;
+		pci2 = &pci2;
+	};
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		PowerPC,P2020@0 {
+			device_type = "cpu";
+			reg = <0x0>;
+			next-level-cache = <&L2>;
+		};
+
+		PowerPC,P2020@1 {
+			device_type = "cpu";
+			reg = <0x1>;
+			next-level-cache = <&L2>;
+		};
+	};
+};
diff --git a/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi b/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi
new file mode 100644
index 0000000..234a399
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi
@@ -0,0 +1,325 @@
+/*
+ * P2041/P2040 Silicon/SoC Device Tree Source (post include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&lbc {
+	compatible = "fsl,p2041-elbc", "fsl,elbc", "simple-bus";
+	interrupts = <25 2 0 0>;
+	#address-cells = <2>;
+	#size-cells = <1>;
+};
+
+/* controller at 0x200000 */
+&pci0 {
+	compatible = "fsl,p2041-pcie", "fsl,qoriq-pcie-v2.2";
+	device_type = "pci";
+	#size-cells = <2>;
+	#address-cells = <3>;
+	bus-range = <0x0 0xff>;
+	clock-frequency = <33333333>;
+	interrupts = <16 2 1 15>;
+	pcie@0 {
+		reg = <0 0 0 0 0>;
+		#interrupt-cells = <1>;
+		#size-cells = <2>;
+		#address-cells = <3>;
+		device_type = "pci";
+		interrupts = <16 2 1 15>;
+		interrupt-map-mask = <0xf800 0 0 7>;
+		interrupt-map = <
+			/* IDSEL 0x0 */
+			0000 0 0 1 &mpic 40 1 0 0
+			0000 0 0 2 &mpic 1 1 0 0
+			0000 0 0 3 &mpic 2 1 0 0
+			0000 0 0 4 &mpic 3 1 0 0
+			>;
+	};
+};
+
+/* controller at 0x201000 */
+&pci1 {
+	compatible = "fsl,p2041-pcie", "fsl,qoriq-pcie-v2.2";
+	device_type = "pci";
+	#size-cells = <2>;
+	#address-cells = <3>;
+	bus-range = <0 0xff>;
+	clock-frequency = <33333333>;
+	interrupts = <16 2 1 14>;
+	pcie@0 {
+		reg = <0 0 0 0 0>;
+		#interrupt-cells = <1>;
+		#size-cells = <2>;
+		#address-cells = <3>;
+		device_type = "pci";
+		interrupts = <16 2 1 14>;
+		interrupt-map-mask = <0xf800 0 0 7>;
+		interrupt-map = <
+			/* IDSEL 0x0 */
+			0000 0 0 1 &mpic 41 1 0 0
+			0000 0 0 2 &mpic 5 1 0 0
+			0000 0 0 3 &mpic 6 1 0 0
+			0000 0 0 4 &mpic 7 1 0 0
+			>;
+	};
+};
+
+/* controller at 0x202000 */
+&pci2 {
+	compatible = "fsl,p2041-pcie", "fsl,qoriq-pcie-v2.2";
+	device_type = "pci";
+	#size-cells = <2>;
+	#address-cells = <3>;
+	bus-range = <0x0 0xff>;
+	clock-frequency = <33333333>;
+	interrupts = <16 2 1 13>;
+	pcie@0 {
+		reg = <0 0 0 0 0>;
+		#interrupt-cells = <1>;
+		#size-cells = <2>;
+		#address-cells = <3>;
+		device_type = "pci";
+		interrupts = <16 2 1 13>;
+		interrupt-map-mask = <0xf800 0 0 7>;
+		interrupt-map = <
+			/* IDSEL 0x0 */
+			0000 0 0 1 &mpic 42 1 0 0
+			0000 0 0 2 &mpic 9 1 0 0
+			0000 0 0 3 &mpic 10 1 0 0
+			0000 0 0 4 &mpic 11 1 0 0
+			>;
+	};
+};
+
+&rio {
+	compatible = "fsl,srio";
+	interrupts = <16 2 1 11>;
+	#address-cells = <2>;
+	#size-cells = <2>;
+	ranges;
+
+	port1 {
+		#address-cells = <2>;
+		#size-cells = <2>;
+		cell-index = <1>;
+	};
+
+	port2 {
+		#address-cells = <2>;
+		#size-cells = <2>;
+		cell-index = <2>;
+	};
+};
+
+&dcsr {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	compatible = "fsl,dcsr", "simple-bus";
+
+	dcsr-epu@0 {
+		compatible = "fsl,dcsr-epu";
+		interrupts = <52 2 0 0
+			      84 2 0 0
+			      85 2 0 0>;
+		reg = <0x0 0x1000>;
+	};
+	dcsr-npc {
+		compatible = "fsl,dcsr-npc";
+		reg = <0x1000 0x1000 0x1000000 0x8000>;
+	};
+	dcsr-nxc@2000 {
+		compatible = "fsl,dcsr-nxc";
+		reg = <0x2000 0x1000>;
+	};
+	dcsr-corenet {
+		compatible = "fsl,dcsr-corenet";
+		reg = <0x8000 0x1000 0xB0000 0x1000>;
+	};
+	dcsr-dpaa@9000 {
+		compatible = "fsl,p2041-dcsr-dpaa", "fsl,dcsr-dpaa";
+		reg = <0x9000 0x1000>;
+	};
+	dcsr-ocn@11000 {
+		compatible = "fsl,p2041-dcsr-ocn", "fsl,dcsr-ocn";
+		reg = <0x11000 0x1000>;
+	};
+	dcsr-ddr@12000 {
+		compatible = "fsl,dcsr-ddr";
+		dev-handle = <&ddr1>;
+		reg = <0x12000 0x1000>;
+	};
+	dcsr-nal@18000 {
+		compatible = "fsl,p2041-dcsr-nal", "fsl,dcsr-nal";
+		reg = <0x18000 0x1000>;
+	};
+	dcsr-rcpm@22000 {
+		compatible = "fsl,p2041-dcsr-rcpm", "fsl,dcsr-rcpm";
+		reg = <0x22000 0x1000>;
+	};
+	dcsr-cpu-sb-proxy@40000 {
+		compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+		cpu-handle = <&cpu0>;
+		reg = <0x40000 0x1000>;
+	};
+	dcsr-cpu-sb-proxy@41000 {
+		compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+		cpu-handle = <&cpu1>;
+		reg = <0x41000 0x1000>;
+	};
+	dcsr-cpu-sb-proxy@42000 {
+		compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+		cpu-handle = <&cpu2>;
+		reg = <0x42000 0x1000>;
+	};
+	dcsr-cpu-sb-proxy@43000 {
+		compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+		cpu-handle = <&cpu3>;
+		reg = <0x43000 0x1000>;
+	};
+};
+
+&soc {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	device_type = "soc";
+	compatible = "simple-bus";
+
+	soc-sram-error {
+		compatible = "fsl,soc-sram-error";
+		interrupts = <16 2 1 29>;
+	};
+
+	corenet-law@0 {
+		compatible = "fsl,corenet-law";
+		reg = <0x0 0x1000>;
+		fsl,num-laws = <32>;
+	};
+
+	ddr1: memory-controller@8000 {
+		compatible = "fsl,qoriq-memory-controller-v4.5", "fsl,qoriq-memory-controller";
+		reg = <0x8000 0x1000>;
+		interrupts = <16 2 1 23>;
+	};
+
+	cpc: l3-cache-controller@10000 {
+		compatible = "fsl,p2041-l3-cache-controller", "fsl,p4080-l3-cache-controller", "cache";
+		reg = <0x10000 0x1000>;
+		interrupts = <16 2 1 27>;
+	};
+
+	corenet-cf@18000 {
+		compatible = "fsl,corenet-cf";
+		reg = <0x18000 0x1000>;
+		interrupts = <16 2 1 31>;
+		fsl,ccf-num-csdids = <32>;
+		fsl,ccf-num-snoopids = <32>;
+	};
+
+	iommu@20000 {
+		compatible = "fsl,pamu-v1.0", "fsl,pamu";
+		reg = <0x20000 0x4000>;
+		interrupts = <
+			24 2 0 0
+			16 2 1 30>;
+	};
+
+/include/ "qoriq-mpic.dtsi"
+
+	guts: global-utilities@e0000 {
+		compatible = "fsl,qoriq-device-config-1.0";
+		reg = <0xe0000 0xe00>;
+		fsl,has-rstcr;
+		#sleep-cells = <1>;
+		fsl,liodn-bits = <12>;
+	};
+
+	pins: global-utilities@e0e00 {
+		compatible = "fsl,qoriq-pin-control-1.0";
+		reg = <0xe0e00 0x200>;
+		#sleep-cells = <2>;
+	};
+
+	clockgen: global-utilities@e1000 {
+		compatible = "fsl,p2041-clockgen", "fsl,qoriq-clockgen-1.0";
+		reg = <0xe1000 0x1000>;
+		clock-frequency = <0>;
+	};
+
+	rcpm: global-utilities@e2000 {
+		compatible = "fsl,qoriq-rcpm-1.0";
+		reg = <0xe2000 0x1000>;
+		#sleep-cells = <1>;
+	};
+
+	sfp: sfp@e8000 {
+		compatible = "fsl,p2041-sfp", "fsl,qoriq-sfp-1.0";
+		reg	   = <0xe8000 0x1000>;
+	};
+
+	serdes: serdes@ea000 {
+		compatible = "fsl,p2041-serdes";
+		reg	   = <0xea000 0x1000>;
+	};
+
+/include/ "qoriq-dma-0.dtsi"
+/include/ "qoriq-dma-1.dtsi"
+/include/ "qoriq-espi-0.dtsi"
+	spi@110000 {
+		fsl,espi-num-chipselects = <4>;
+	};
+
+/include/ "qoriq-esdhc-0.dtsi"
+	sdhc@114000 {
+		sdhci,auto-cmd12;
+	};
+
+/include/ "qoriq-i2c-0.dtsi"
+/include/ "qoriq-i2c-1.dtsi"
+/include/ "qoriq-duart-0.dtsi"
+/include/ "qoriq-duart-1.dtsi"
+/include/ "qoriq-gpio-0.dtsi"
+/include/ "qoriq-usb2-mph-0.dtsi"
+		usb0: usb@210000 {
+			phy_type = "utmi";
+			port0;
+		};
+
+/include/ "qoriq-usb2-dr-0.dtsi"
+		usb1: usb@211000 {
+			dr_mode = "host";
+			phy_type = "utmi";
+		};
+
+/include/ "qoriq-sata2-0.dtsi"
+/include/ "qoriq-sata2-1.dtsi"
+/include/ "qoriq-sec4.2-0.dtsi"
+};
diff --git a/arch/powerpc/boot/dts/fsl/p2041si-pre.dtsi b/arch/powerpc/boot/dts/fsl/p2041si-pre.dtsi
new file mode 100644
index 0000000..2d0a40d
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/p2041si-pre.dtsi
@@ -0,0 +1,111 @@
+/*
+ * P2041 Silicon/SoC Device Tree Source (pre include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/dts-v1/;
+/ {
+	compatible = "fsl,P2041";
+	#address-cells = <2>;
+	#size-cells = <2>;
+	interrupt-parent = <&mpic>;
+
+	aliases {
+		ccsr = &soc;
+		dcsr = &dcsr;
+
+		serial0 = &serial0;
+		serial1 = &serial1;
+		serial2 = &serial2;
+		serial3 = &serial3;
+		pci0 = &pci0;
+		pci1 = &pci1;
+		pci2 = &pci2;
+		usb0 = &usb0;
+		usb1 = &usb1;
+		dma0 = &dma0;
+		dma1 = &dma1;
+		sdhc = &sdhc;
+		msi0 = &msi0;
+		msi1 = &msi1;
+		msi2 = &msi2;
+
+		crypto = &crypto;
+		sec_jr0 = &sec_jr0;
+		sec_jr1 = &sec_jr1;
+		sec_jr2 = &sec_jr2;
+		sec_jr3 = &sec_jr3;
+		rtic_a = &rtic_a;
+		rtic_b = &rtic_b;
+		rtic_c = &rtic_c;
+		rtic_d = &rtic_d;
+		sec_mon = &sec_mon;
+	};
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		cpu0: PowerPC,e500mc@0 {
+			device_type = "cpu";
+			reg = <0>;
+			next-level-cache = <&L2_0>;
+			L2_0: l2-cache {
+				next-level-cache = <&cpc>;
+			};
+		};
+		cpu1: PowerPC,e500mc@1 {
+			device_type = "cpu";
+			reg = <1>;
+			next-level-cache = <&L2_1>;
+			L2_1: l2-cache {
+				next-level-cache = <&cpc>;
+			};
+		};
+		cpu2: PowerPC,e500mc@2 {
+			device_type = "cpu";
+			reg = <2>;
+			next-level-cache = <&L2_2>;
+			L2_2: l2-cache {
+				next-level-cache = <&cpc>;
+			};
+		};
+		cpu3: PowerPC,e500mc@3 {
+			device_type = "cpu";
+			reg = <3>;
+			next-level-cache = <&L2_3>;
+			L2_3: l2-cache {
+				next-level-cache = <&cpc>;
+			};
+		};
+	};
+};
diff --git a/arch/powerpc/boot/dts/fsl/p3041si-post.dtsi b/arch/powerpc/boot/dts/fsl/p3041si-post.dtsi
new file mode 100644
index 0000000..d41d08d
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/p3041si-post.dtsi
@@ -0,0 +1,352 @@
+/*
+ * P3041 Silicon/SoC Device Tree Source (post include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&lbc {
+	compatible = "fsl,p3041-elbc", "fsl,elbc", "simple-bus";
+	interrupts = <25 2 0 0>;
+	#address-cells = <2>;
+	#size-cells = <1>;
+};
+
+/* controller at 0x200000 */
+&pci0 {
+	compatible = "fsl,p3041-pcie", "fsl,qoriq-pcie-v2.2";
+	device_type = "pci";
+	#size-cells = <2>;
+	#address-cells = <3>;
+	bus-range = <0x0 0xff>;
+	clock-frequency = <33333333>;
+	interrupts = <16 2 1 15>;
+	pcie@0 {
+		reg = <0 0 0 0 0>;
+		#interrupt-cells = <1>;
+		#size-cells = <2>;
+		#address-cells = <3>;
+		device_type = "pci";
+		interrupts = <16 2 1 15>;
+		interrupt-map-mask = <0xf800 0 0 7>;
+		interrupt-map = <
+			/* IDSEL 0x0 */
+			0000 0 0 1 &mpic 40 1 0 0
+			0000 0 0 2 &mpic 1 1 0 0
+			0000 0 0 3 &mpic 2 1 0 0
+			0000 0 0 4 &mpic 3 1 0 0
+			>;
+	};
+};
+
+/* controller at 0x201000 */
+&pci1 {
+	compatible = "fsl,p3041-pcie", "fsl,qoriq-pcie-v2.2";
+	device_type = "pci";
+	#size-cells = <2>;
+	#address-cells = <3>;
+	bus-range = <0 0xff>;
+	clock-frequency = <33333333>;
+	interrupts = <16 2 1 14>;
+	pcie@0 {
+		reg = <0 0 0 0 0>;
+		#interrupt-cells = <1>;
+		#size-cells = <2>;
+		#address-cells = <3>;
+		device_type = "pci";
+		interrupts = <16 2 1 14>;
+		interrupt-map-mask = <0xf800 0 0 7>;
+		interrupt-map = <
+			/* IDSEL 0x0 */
+			0000 0 0 1 &mpic 41 1 0 0
+			0000 0 0 2 &mpic 5 1 0 0
+			0000 0 0 3 &mpic 6 1 0 0
+			0000 0 0 4 &mpic 7 1 0 0
+			>;
+	};
+};
+
+/* controller at 0x202000 */
+&pci2 {
+	compatible = "fsl,p3041-pcie", "fsl,qoriq-pcie-v2.2";
+	device_type = "pci";
+	#size-cells = <2>;
+	#address-cells = <3>;
+	bus-range = <0x0 0xff>;
+	clock-frequency = <33333333>;
+	interrupts = <16 2 1 13>;
+	pcie@0 {
+		reg = <0 0 0 0 0>;
+		#interrupt-cells = <1>;
+		#size-cells = <2>;
+		#address-cells = <3>;
+		device_type = "pci";
+		interrupts = <16 2 1 13>;
+		interrupt-map-mask = <0xf800 0 0 7>;
+		interrupt-map = <
+			/* IDSEL 0x0 */
+			0000 0 0 1 &mpic 42 1 0 0
+			0000 0 0 2 &mpic 9 1 0 0
+			0000 0 0 3 &mpic 10 1 0 0
+			0000 0 0 4 &mpic 11 1 0 0
+			>;
+	};
+};
+
+/* controller at 0x203000 */
+&pci3 {
+	compatible = "fsl,p3041-pcie", "fsl,qoriq-pcie-v2.2";
+	device_type = "pci";
+	#size-cells = <2>;
+	#address-cells = <3>;
+	bus-range = <0x0 0xff>;
+	clock-frequency = <33333333>;
+	interrupts = <16 2 1 12>;
+	pcie@0 {
+		reg = <0 0 0 0 0>;
+		#interrupt-cells = <1>;
+		#size-cells = <2>;
+		#address-cells = <3>;
+		device_type = "pci";
+		interrupts = <16 2 1 12>;
+		interrupt-map-mask = <0xf800 0 0 7>;
+		interrupt-map = <
+			/* IDSEL 0x0 */
+			0000 0 0 1 &mpic 43 1 0 0
+			0000 0 0 2 &mpic 0 1 0 0
+			0000 0 0 3 &mpic 4 1 0 0
+			0000 0 0 4 &mpic 8 1 0 0
+			>;
+	};
+};
+
+&rio {
+	compatible = "fsl,srio";
+	interrupts = <16 2 1 11>;
+	#address-cells = <2>;
+	#size-cells = <2>;
+	ranges;
+
+	port1 {
+		#address-cells = <2>;
+		#size-cells = <2>;
+		cell-index = <1>;
+	};
+
+	port2 {
+		#address-cells = <2>;
+		#size-cells = <2>;
+		cell-index = <2>;
+	};
+};
+
+&dcsr {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	compatible = "fsl,dcsr", "simple-bus";
+
+	dcsr-epu@0 {
+		compatible = "fsl,dcsr-epu";
+		interrupts = <52 2 0 0
+			      84 2 0 0
+			      85 2 0 0>;
+		reg = <0x0 0x1000>;
+	};
+	dcsr-npc {
+		compatible = "fsl,dcsr-npc";
+		reg = <0x1000 0x1000 0x1000000 0x8000>;
+	};
+	dcsr-nxc@2000 {
+		compatible = "fsl,dcsr-nxc";
+		reg = <0x2000 0x1000>;
+	};
+	dcsr-corenet {
+		compatible = "fsl,dcsr-corenet";
+		reg = <0x8000 0x1000 0xB0000 0x1000>;
+	};
+	dcsr-dpaa@9000 {
+		compatible = "fsl,p3041-dcsr-dpaa", "fsl,dcsr-dpaa";
+		reg = <0x9000 0x1000>;
+	};
+	dcsr-ocn@11000 {
+		compatible = "fsl,p3041-dcsr-ocn", "fsl,dcsr-ocn";
+		reg = <0x11000 0x1000>;
+	};
+	dcsr-ddr@12000 {
+		compatible = "fsl,dcsr-ddr";
+		dev-handle = <&ddr1>;
+		reg = <0x12000 0x1000>;
+	};
+	dcsr-nal@18000 {
+		compatible = "fsl,p3041-dcsr-nal", "fsl,dcsr-nal";
+		reg = <0x18000 0x1000>;
+	};
+	dcsr-rcpm@22000 {
+		compatible = "fsl,p3041-dcsr-rcpm", "fsl,dcsr-rcpm";
+		reg = <0x22000 0x1000>;
+	};
+	dcsr-cpu-sb-proxy@40000 {
+		compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+		cpu-handle = <&cpu0>;
+		reg = <0x40000 0x1000>;
+	};
+	dcsr-cpu-sb-proxy@41000 {
+		compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+		cpu-handle = <&cpu1>;
+		reg = <0x41000 0x1000>;
+	};
+	dcsr-cpu-sb-proxy@42000 {
+		compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+		cpu-handle = <&cpu2>;
+		reg = <0x42000 0x1000>;
+	};
+	dcsr-cpu-sb-proxy@43000 {
+		compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+		cpu-handle = <&cpu3>;
+		reg = <0x43000 0x1000>;
+	};
+};
+
+&soc {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	device_type = "soc";
+	compatible = "simple-bus";
+
+	soc-sram-error {
+		compatible = "fsl,soc-sram-error";
+		interrupts = <16 2 1 29>;
+	};
+
+	corenet-law@0 {
+		compatible = "fsl,corenet-law";
+		reg = <0x0 0x1000>;
+		fsl,num-laws = <32>;
+	};
+
+	ddr1: memory-controller@8000 {
+		compatible = "fsl,qoriq-memory-controller-v4.5", "fsl,qoriq-memory-controller";
+		reg = <0x8000 0x1000>;
+		interrupts = <16 2 1 23>;
+	};
+
+	cpc: l3-cache-controller@10000 {
+		compatible = "fsl,p3041-l3-cache-controller", "fsl,p4080-l3-cache-controller", "cache";
+		reg = <0x10000 0x1000>;
+		interrupts = <16 2 1 27>;
+	};
+
+	corenet-cf@18000 {
+		compatible = "fsl,corenet-cf";
+		reg = <0x18000 0x1000>;
+		interrupts = <16 2 1 31>;
+		fsl,ccf-num-csdids = <32>;
+		fsl,ccf-num-snoopids = <32>;
+	};
+
+	iommu@20000 {
+		compatible = "fsl,pamu-v1.0", "fsl,pamu";
+		reg = <0x20000 0x4000>;
+		interrupts = <
+			24 2 0 0
+			16 2 1 30>;
+	};
+
+/include/ "qoriq-mpic.dtsi"
+
+	guts: global-utilities@e0000 {
+		compatible = "fsl,qoriq-device-config-1.0";
+		reg = <0xe0000 0xe00>;
+		fsl,has-rstcr;
+		#sleep-cells = <1>;
+		fsl,liodn-bits = <12>;
+	};
+
+	pins: global-utilities@e0e00 {
+		compatible = "fsl,qoriq-pin-control-1.0";
+		reg = <0xe0e00 0x200>;
+		#sleep-cells = <2>;
+	};
+
+	clockgen: global-utilities@e1000 {
+		compatible = "fsl,p3041-clockgen", "fsl,qoriq-clockgen-1.0";
+		reg = <0xe1000 0x1000>;
+		clock-frequency = <0>;
+	};
+
+	rcpm: global-utilities@e2000 {
+		compatible = "fsl,qoriq-rcpm-1.0";
+		reg = <0xe2000 0x1000>;
+		#sleep-cells = <1>;
+	};
+
+	sfp: sfp@e8000 {
+		compatible = "fsl,p3041-sfp", "fsl,qoriq-sfp-1.0";
+		reg	   = <0xe8000 0x1000>;
+	};
+
+	serdes: serdes@ea000 {
+		compatible = "fsl,p3041-serdes";
+		reg	   = <0xea000 0x1000>;
+	};
+
+/include/ "qoriq-dma-0.dtsi"
+/include/ "qoriq-dma-1.dtsi"
+/include/ "qoriq-espi-0.dtsi"
+	spi@110000 {
+		fsl,espi-num-chipselects = <4>;
+	};
+
+/include/ "qoriq-esdhc-0.dtsi"
+	sdhc@114000 {
+		sdhci,auto-cmd12;
+	};
+
+/include/ "qoriq-i2c-0.dtsi"
+/include/ "qoriq-i2c-1.dtsi"
+/include/ "qoriq-duart-0.dtsi"
+/include/ "qoriq-duart-1.dtsi"
+/include/ "qoriq-gpio-0.dtsi"
+/include/ "qoriq-usb2-mph-0.dtsi"
+		usb0: usb@210000 {
+			phy_type = "utmi";
+			port0;
+		};
+
+/include/ "qoriq-usb2-dr-0.dtsi"
+		usb1: usb@211000 {
+			dr_mode = "host";
+			phy_type = "utmi";
+		};
+
+/include/ "qoriq-sata2-0.dtsi"
+/include/ "qoriq-sata2-1.dtsi"
+/include/ "qoriq-sec4.2-0.dtsi"
+};
diff --git a/arch/powerpc/boot/dts/fsl/p3041si-pre.dtsi b/arch/powerpc/boot/dts/fsl/p3041si-pre.dtsi
new file mode 100644
index 0000000..136def3
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/p3041si-pre.dtsi
@@ -0,0 +1,112 @@
+/*
+ * P3041 Silicon/SoC Device Tree Source (pre include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/dts-v1/;
+/ {
+	compatible = "fsl,P3041";
+	#address-cells = <2>;
+	#size-cells = <2>;
+	interrupt-parent = <&mpic>;
+
+	aliases {
+		ccsr = &soc;
+		dcsr = &dcsr;
+
+		serial0 = &serial0;
+		serial1 = &serial1;
+		serial2 = &serial2;
+		serial3 = &serial3;
+		pci0 = &pci0;
+		pci1 = &pci1;
+		pci2 = &pci2;
+		pci3 = &pci3;
+		usb0 = &usb0;
+		usb1 = &usb1;
+		dma0 = &dma0;
+		dma1 = &dma1;
+		sdhc = &sdhc;
+		msi0 = &msi0;
+		msi1 = &msi1;
+		msi2 = &msi2;
+
+		crypto = &crypto;
+		sec_jr0 = &sec_jr0;
+		sec_jr1 = &sec_jr1;
+		sec_jr2 = &sec_jr2;
+		sec_jr3 = &sec_jr3;
+		rtic_a = &rtic_a;
+		rtic_b = &rtic_b;
+		rtic_c = &rtic_c;
+		rtic_d = &rtic_d;
+		sec_mon = &sec_mon;
+	};
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		cpu0: PowerPC,e500mc@0 {
+			device_type = "cpu";
+			reg = <0>;
+			next-level-cache = <&L2_0>;
+			L2_0: l2-cache {
+				next-level-cache = <&cpc>;
+			};
+		};
+		cpu1: PowerPC,e500mc@1 {
+			device_type = "cpu";
+			reg = <1>;
+			next-level-cache = <&L2_1>;
+			L2_1: l2-cache {
+				next-level-cache = <&cpc>;
+			};
+		};
+		cpu2: PowerPC,e500mc@2 {
+			device_type = "cpu";
+			reg = <2>;
+			next-level-cache = <&L2_2>;
+			L2_2: l2-cache {
+				next-level-cache = <&cpc>;
+			};
+		};
+		cpu3: PowerPC,e500mc@3 {
+			device_type = "cpu";
+			reg = <3>;
+			next-level-cache = <&L2_3>;
+			L2_3: l2-cache {
+				next-level-cache = <&cpc>;
+			};
+		};
+	};
+};
diff --git a/arch/powerpc/boot/dts/fsl/p3060si-post.dtsi b/arch/powerpc/boot/dts/fsl/p3060si-post.dtsi
new file mode 100644
index 0000000..a63edd1
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/p3060si-post.dtsi
@@ -0,0 +1,296 @@
+/*
+ * P3060 Silicon/SoC Device Tree Source (post include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&lbc {
+	compatible = "fsl,p3060-elbc", "fsl,elbc", "simple-bus";
+	interrupts = <25 2 0 0>;
+	#address-cells = <2>;
+	#size-cells = <1>;
+};
+
+/* controller at 0x200000 */
+&pci0 {
+	compatible = "fsl,p3060-pcie", "fsl,qoriq-pcie-v2.2";
+	device_type = "pci";
+	#size-cells = <2>;
+	#address-cells = <3>;
+	bus-range = <0x0 0xff>;
+	clock-frequency = <33333333>;
+	interrupts = <16 2 1 15>;
+	pcie@0 {
+		reg = <0 0 0 0 0>;
+		#interrupt-cells = <1>;
+		#size-cells = <2>;
+		#address-cells = <3>;
+		device_type = "pci";
+		interrupts = <16 2 1 15>;
+		interrupt-map-mask = <0xf800 0 0 7>;
+		interrupt-map = <
+			/* IDSEL 0x0 */
+			0000 0 0 1 &mpic 40 1 0 0
+			0000 0 0 2 &mpic 1 1 0 0
+			0000 0 0 3 &mpic 2 1 0 0
+			0000 0 0 4 &mpic 3 1 0 0
+			>;
+	};
+};
+
+/* controller at 0x201000 */
+&pci1 {
+	compatible = "fsl,p3060-pcie", "fsl,qoriq-pcie-v2.2";
+	device_type = "pci";
+	#size-cells = <2>;
+	#address-cells = <3>;
+	bus-range = <0 0xff>;
+	clock-frequency = <33333333>;
+	interrupts = <16 2 1 14>;
+	pcie@0 {
+		reg = <0 0 0 0 0>;
+		#interrupt-cells = <1>;
+		#size-cells = <2>;
+		#address-cells = <3>;
+		device_type = "pci";
+		interrupts = <16 2 1 14>;
+		interrupt-map-mask = <0xf800 0 0 7>;
+		interrupt-map = <
+			/* IDSEL 0x0 */
+			0000 0 0 1 &mpic 41 1 0 0
+			0000 0 0 2 &mpic 5 1 0 0
+			0000 0 0 3 &mpic 6 1 0 0
+			0000 0 0 4 &mpic 7 1 0 0
+			>;
+	};
+};
+
+&rio {
+	compatible = "fsl,srio";
+	interrupts = <16 2 1 11>;
+	#address-cells = <2>;
+	#size-cells = <2>;
+	fsl,srio-rmu-handle = <&rmu>;
+	ranges;
+
+	port1 {
+		#address-cells = <2>;
+		#size-cells = <2>;
+		cell-index = <1>;
+	};
+
+	port2 {
+		#address-cells = <2>;
+		#size-cells = <2>;
+		cell-index = <2>;
+	};
+};
+
+&dcsr {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	compatible = "fsl,dcsr", "simple-bus";
+
+	dcsr-epu@0 {
+		compatible = "fsl,dcsr-epu";
+		interrupts = <52 2 0 0
+			      84 2 0 0
+			      85 2 0 0>;
+		reg = <0x0 0x1000>;
+	};
+	dcsr-npc {
+		compatible = "fsl,dcsr-npc";
+		reg = <0x1000 0x1000 0x1000000 0x8000>;
+	};
+	dcsr-nxc@2000 {
+		compatible = "fsl,dcsr-nxc";
+		reg = <0x2000 0x1000>;
+	};
+	dcsr-corenet {
+		compatible = "fsl,dcsr-corenet";
+		reg = <0x8000 0x1000 0xB0000 0x1000>;
+	};
+	dcsr-dpaa@9000 {
+		compatible = "fsl,p3060-dcsr-dpaa", "fsl,dcsr-dpaa";
+		reg = <0x9000 0x1000>;
+	};
+	dcsr-ocn@11000 {
+		compatible = "fsl,p3060-dcsr-ocn", "fsl,dcsr-ocn";
+		reg = <0x11000 0x1000>;
+	};
+	dcsr-ddr@12000 {
+		compatible = "fsl,dcsr-ddr";
+		dev-handle = <&ddr1>;
+		reg = <0x12000 0x1000>;
+	};
+	dcsr-nal@18000 {
+		compatible = "fsl,p3060-dcsr-nal", "fsl,dcsr-nal";
+		reg = <0x18000 0x1000>;
+	};
+	dcsr-rcpm@22000 {
+		compatible = "fsl,p3060-dcsr-rcpm", "fsl,dcsr-rcpm";
+		reg = <0x22000 0x1000>;
+	};
+	dcsr-cpu-sb-proxy@40000 {
+		compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+		cpu-handle = <&cpu0>;
+		reg = <0x40000 0x1000>;
+	};
+	dcsr-cpu-sb-proxy@41000 {
+		compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+		cpu-handle = <&cpu1>;
+		reg = <0x41000 0x1000>;
+	};
+	dcsr-cpu-sb-proxy@44000 {
+		compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+		cpu-handle = <&cpu4>;
+		reg = <0x44000 0x1000>;
+	};
+	dcsr-cpu-sb-proxy@45000 {
+		compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+		cpu-handle = <&cpu5>;
+		reg = <0x45000 0x1000>;
+	};
+	dcsr-cpu-sb-proxy@46000 {
+		compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+		cpu-handle = <&cpu6>;
+		reg = <0x46000 0x1000>;
+	};
+	dcsr-cpu-sb-proxy@47000 {
+		compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+		cpu-handle = <&cpu7>;
+		reg = <0x47000 0x1000>;
+	};
+
+};
+
+&soc {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	device_type = "soc";
+	compatible = "simple-bus";
+
+	soc-sram-error {
+		compatible = "fsl,soc-sram-error";
+		interrupts = <16 2 1 29>;
+	};
+
+	corenet-law@0 {
+		compatible = "fsl,corenet-law";
+		reg = <0x0 0x1000>;
+		fsl,num-laws = <32>;
+	};
+
+	ddr1: memory-controller@8000 {
+		compatible = "fsl,qoriq-memory-controller-v4.4", "fsl,qoriq-memory-controller";
+		reg = <0x8000 0x1000>;
+		interrupts = <16 2 1 23>;
+	};
+
+	cpc: l3-cache-controller@10000 {
+		compatible = "fsl,p3060-l3-cache-controller", "cache";
+		reg = <0x10000 0x1000
+		       0x11000 0x1000>;
+		interrupts = <16 2 1 27
+			      16 2 1 26>;
+	};
+
+	corenet-cf@18000 {
+		compatible = "fsl,corenet-cf";
+		reg = <0x18000 0x1000>;
+		interrupts = <16 2 1 31>;
+		fsl,ccf-num-csdids = <32>;
+		fsl,ccf-num-snoopids = <32>;
+	};
+
+	iommu@20000 {
+		compatible = "fsl,pamu-v1.0", "fsl,pamu";
+		reg = <0x20000 0x5000>;
+		interrupts = <
+			24 2 0 0
+			16 2 1 30>;
+	};
+
+/include/ "qoriq-rmu-0.dtsi"
+/include/ "qoriq-mpic.dtsi"
+
+	guts: global-utilities@e0000 {
+		compatible = "fsl,qoriq-device-config-1.0";
+		reg = <0xe0000 0xe00>;
+		fsl,has-rstcr;
+		#sleep-cells = <1>;
+		fsl,liodn-bits = <12>;
+	};
+
+	pins: global-utilities@e0e00 {
+		compatible = "fsl,qoriq-pin-control-1.0";
+		reg = <0xe0e00 0x200>;
+		#sleep-cells = <2>;
+	};
+
+	clockgen: global-utilities@e1000 {
+		compatible = "fsl,p3060-clockgen", "fsl,qoriq-clockgen-1.0";
+		reg = <0xe1000 0x1000>;
+		clock-frequency = <0>;
+	};
+
+	rcpm: global-utilities@e2000 {
+		compatible = "fsl,qoriq-rcpm-1.0";
+		reg = <0xe2000 0x1000>;
+		#sleep-cells = <1>;
+	};
+
+	sfp: sfp@e8000 {
+		compatible = "fsl,p3060-sfp", "fsl,qoriq-sfp-1.0";
+		reg	   = <0xe8000 0x1000>;
+	};
+
+	serdes: serdes@ea000 {
+		compatible = "fsl,p3060-serdes";
+		reg	   = <0xea000 0x1000>;
+	};
+
+/include/ "qoriq-dma-0.dtsi"
+/include/ "qoriq-dma-1.dtsi"
+/include/ "qoriq-espi-0.dtsi"
+	spi@110000 {
+		fsl,espi-num-chipselects = <4>;
+	};
+
+/include/ "qoriq-i2c-0.dtsi"
+/include/ "qoriq-i2c-1.dtsi"
+/include/ "qoriq-duart-0.dtsi"
+/include/ "qoriq-duart-1.dtsi"
+/include/ "qoriq-gpio-0.dtsi"
+/include/ "qoriq-usb2-mph-0.dtsi"
+/include/ "qoriq-usb2-dr-0.dtsi"
+/include/ "qoriq-sec4.1-0.dtsi"
+};
diff --git a/arch/powerpc/boot/dts/fsl/p3060si-pre.dtsi b/arch/powerpc/boot/dts/fsl/p3060si-pre.dtsi
new file mode 100644
index 0000000..00c8e70
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/p3060si-pre.dtsi
@@ -0,0 +1,125 @@
+/*
+ * P3060 Silicon/SoC Device Tree Source (pre include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/dts-v1/;
+/ {
+	compatible = "fsl,P3060";
+	#address-cells = <2>;
+	#size-cells = <2>;
+	interrupt-parent = <&mpic>;
+
+	aliases {
+		ccsr = &soc;
+		dcsr = &dcsr;
+
+		serial0 = &serial0;
+		serial1 = &serial1;
+		serial2 = &serial2;
+		serial3 = &serial3;
+		pci0 = &pci0;
+		pci1 = &pci1;
+		usb0 = &usb0;
+		usb1 = &usb1;
+		dma0 = &dma0;
+		dma1 = &dma1;
+		msi0 = &msi0;
+		msi1 = &msi1;
+		msi2 = &msi2;
+
+		crypto = &crypto;
+		sec_jr0 = &sec_jr0;
+		sec_jr1 = &sec_jr1;
+		sec_jr2 = &sec_jr2;
+		sec_jr3 = &sec_jr3;
+		rtic_a = &rtic_a;
+		rtic_b = &rtic_b;
+		rtic_c = &rtic_c;
+		rtic_d = &rtic_d;
+		sec_mon = &sec_mon;
+	};
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		cpu0: PowerPC,e500mc@0 {
+			device_type = "cpu";
+			reg = <0>;
+			next-level-cache = <&L2_0>;
+			L2_0: l2-cache {
+				next-level-cache = <&cpc>;
+			};
+		};
+		cpu1: PowerPC,e500mc@1 {
+			device_type = "cpu";
+			reg = <1>;
+			next-level-cache = <&L2_1>;
+			L2_1: l2-cache {
+				next-level-cache = <&cpc>;
+			};
+		};
+		cpu4: PowerPC,e500mc@4 {
+			device_type = "cpu";
+			reg = <4>;
+			next-level-cache = <&L2_4>;
+			L2_4: l2-cache {
+				next-level-cache = <&cpc>;
+			};
+		};
+		cpu5: PowerPC,e500mc@5 {
+			device_type = "cpu";
+			reg = <5>;
+			next-level-cache = <&L2_5>;
+			L2_5: l2-cache {
+				next-level-cache = <&cpc>;
+			};
+		};
+		cpu6: PowerPC,e500mc@6 {
+			device_type = "cpu";
+			reg = <6>;
+			next-level-cache = <&L2_6>;
+			L2_6: l2-cache {
+				next-level-cache = <&cpc>;
+			};
+		};
+		cpu7: PowerPC,e500mc@7 {
+			device_type = "cpu";
+			reg = <7>;
+			next-level-cache = <&L2_7>;
+			L2_7: l2-cache {
+				next-level-cache = <&cpc>;
+			};
+		};
+	};
+};
diff --git a/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi b/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi
new file mode 100644
index 0000000..8d35d2c
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi
@@ -0,0 +1,350 @@
+/*
+ * P4080/P4040 Silicon/SoC Device Tree Source (post include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&lbc {
+	compatible = "fsl,p4080-elbc", "fsl,elbc", "simple-bus";
+	interrupts = <25 2 0 0>;
+	#address-cells = <2>;
+	#size-cells = <1>;
+};
+
+/* controller at 0x200000 */
+&pci0 {
+	compatible = "fsl,p4080-pcie";
+	device_type = "pci";
+	#size-cells = <2>;
+	#address-cells = <3>;
+	bus-range = <0x0 0xff>;
+	clock-frequency = <33333333>;
+	interrupts = <16 2 1 15>;
+	pcie@0 {
+		reg = <0 0 0 0 0>;
+		#interrupt-cells = <1>;
+		#size-cells = <2>;
+		#address-cells = <3>;
+		device_type = "pci";
+		interrupts = <16 2 1 15>;
+		interrupt-map-mask = <0xf800 0 0 7>;
+		interrupt-map = <
+			/* IDSEL 0x0 */
+			0000 0 0 1 &mpic 40 1 0 0
+			0000 0 0 2 &mpic 1 1 0 0
+			0000 0 0 3 &mpic 2 1 0 0
+			0000 0 0 4 &mpic 3 1 0 0
+			>;
+	};
+};
+
+/* controller at 0x201000 */
+&pci1 {
+	compatible = "fsl,p4080-pcie";
+	device_type = "pci";
+	#size-cells = <2>;
+	#address-cells = <3>;
+	bus-range = <0 0xff>;
+	clock-frequency = <33333333>;
+	interrupts = <16 2 1 14>;
+	pcie@0 {
+		reg = <0 0 0 0 0>;
+		#interrupt-cells = <1>;
+		#size-cells = <2>;
+		#address-cells = <3>;
+		device_type = "pci";
+		interrupts = <16 2 1 14>;
+		interrupt-map-mask = <0xf800 0 0 7>;
+		interrupt-map = <
+			/* IDSEL 0x0 */
+			0000 0 0 1 &mpic 41 1 0 0
+			0000 0 0 2 &mpic 5 1 0 0
+			0000 0 0 3 &mpic 6 1 0 0
+			0000 0 0 4 &mpic 7 1 0 0
+			>;
+	};
+};
+
+/* controller at 0x202000 */
+&pci2 {
+	compatible = "fsl,p4080-pcie";
+	device_type = "pci";
+	#size-cells = <2>;
+	#address-cells = <3>;
+	bus-range = <0x0 0xff>;
+	clock-frequency = <33333333>;
+	interrupts = <16 2 1 13>;
+	pcie@0 {
+		reg = <0 0 0 0 0>;
+		#interrupt-cells = <1>;
+		#size-cells = <2>;
+		#address-cells = <3>;
+		device_type = "pci";
+		interrupts = <16 2 1 13>;
+		interrupt-map-mask = <0xf800 0 0 7>;
+		interrupt-map = <
+			/* IDSEL 0x0 */
+			0000 0 0 1 &mpic 42 1 0 0
+			0000 0 0 2 &mpic 9 1 0 0
+			0000 0 0 3 &mpic 10 1 0 0
+			0000 0 0 4 &mpic 11 1 0 0
+			>;
+	};
+};
+
+&rio {
+	compatible = "fsl,srio";
+	interrupts = <16 2 1 11>;
+	#address-cells = <2>;
+	#size-cells = <2>;
+	fsl,srio-rmu-handle = <&rmu>;
+	ranges;
+
+	port1 {
+		#address-cells = <2>;
+		#size-cells = <2>;
+		cell-index = <1>;
+	};
+
+	port2 {
+		#address-cells = <2>;
+		#size-cells = <2>;
+		cell-index = <2>;
+	};
+};
+
+&dcsr {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	compatible = "fsl,dcsr", "simple-bus";
+
+	dcsr-epu@0 {
+		compatible = "fsl,dcsr-epu";
+		interrupts = <52 2 0 0
+			      84 2 0 0
+			      85 2 0 0>;
+		reg = <0x0 0x1000>;
+	};
+	dcsr-npc {
+		compatible = "fsl,dcsr-npc";
+		reg = <0x1000 0x1000 0x1000000 0x8000>;
+	};
+	dcsr-nxc@2000 {
+		compatible = "fsl,dcsr-nxc";
+		reg = <0x2000 0x1000>;
+	};
+	dcsr-corenet {
+		compatible = "fsl,dcsr-corenet";
+		reg = <0x8000 0x1000 0xB0000 0x1000>;
+	};
+	dcsr-dpaa@9000 {
+		compatible = "fsl,p4080-dcsr-dpaa", "fsl,dcsr-dpaa";
+		reg = <0x9000 0x1000>;
+	};
+	dcsr-ocn@11000 {
+		compatible = "fsl,p4080-dcsr-ocn", "fsl,dcsr-ocn";
+		reg = <0x11000 0x1000>;
+	};
+	dcsr-ddr@12000 {
+		compatible = "fsl,dcsr-ddr";
+		dev-handle = <&ddr1>;
+		reg = <0x12000 0x1000>;
+	};
+	dcsr-ddr@13000 {
+		compatible = "fsl,dcsr-ddr";
+		dev-handle = <&ddr2>;
+		reg = <0x13000 0x1000>;
+	};
+	dcsr-nal@18000 {
+		compatible = "fsl,p4080-dcsr-nal", "fsl,dcsr-nal";
+		reg = <0x18000 0x1000>;
+	};
+	dcsr-rcpm@22000 {
+		compatible = "fsl,p4080-dcsr-rcpm", "fsl,dcsr-rcpm";
+		reg = <0x22000 0x1000>;
+	};
+	dcsr-cpu-sb-proxy@40000 {
+		compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+		cpu-handle = <&cpu0>;
+		reg = <0x40000 0x1000>;
+	};
+	dcsr-cpu-sb-proxy@41000 {
+		compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+		cpu-handle = <&cpu1>;
+		reg = <0x41000 0x1000>;
+	};
+	dcsr-cpu-sb-proxy@42000 {
+		compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+		cpu-handle = <&cpu2>;
+		reg = <0x42000 0x1000>;
+	};
+	dcsr-cpu-sb-proxy@43000 {
+		compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+		cpu-handle = <&cpu3>;
+		reg = <0x43000 0x1000>;
+	};
+	dcsr-cpu-sb-proxy@44000 {
+		compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+		cpu-handle = <&cpu4>;
+		reg = <0x44000 0x1000>;
+	};
+	dcsr-cpu-sb-proxy@45000 {
+		compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+		cpu-handle = <&cpu5>;
+		reg = <0x45000 0x1000>;
+	};
+	dcsr-cpu-sb-proxy@46000 {
+		compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+		cpu-handle = <&cpu6>;
+		reg = <0x46000 0x1000>;
+	};
+	dcsr-cpu-sb-proxy@47000 {
+		compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+		cpu-handle = <&cpu7>;
+		reg = <0x47000 0x1000>;
+	};
+
+};
+
+&soc {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	device_type = "soc";
+	compatible = "simple-bus";
+
+	soc-sram-error {
+		compatible = "fsl,soc-sram-error";
+		interrupts = <16 2 1 29>;
+	};
+
+	corenet-law@0 {
+		compatible = "fsl,corenet-law";
+		reg = <0x0 0x1000>;
+		fsl,num-laws = <32>;
+	};
+
+	ddr1: memory-controller@8000 {
+		compatible = "fsl,qoriq-memory-controller-v4.4", "fsl,qoriq-memory-controller";
+		reg = <0x8000 0x1000>;
+		interrupts = <16 2 1 23>;
+	};
+
+	ddr2: memory-controller@9000 {
+		compatible = "fsl,qoriq-memory-controller-v4.4","fsl,qoriq-memory-controller";
+		reg = <0x9000 0x1000>;
+		interrupts = <16 2 1 22>;
+	};
+
+	cpc: l3-cache-controller@10000 {
+		compatible = "fsl,p4080-l3-cache-controller", "cache";
+		reg = <0x10000 0x1000
+		       0x11000 0x1000>;
+		interrupts = <16 2 1 27
+			      16 2 1 26>;
+	};
+
+	corenet-cf@18000 {
+		compatible = "fsl,corenet-cf";
+		reg = <0x18000 0x1000>;
+		interrupts = <16 2 1 31>;
+		fsl,ccf-num-csdids = <32>;
+		fsl,ccf-num-snoopids = <32>;
+	};
+
+	iommu@20000 {
+		compatible = "fsl,pamu-v1.0", "fsl,pamu";
+		reg = <0x20000 0x5000>;
+		interrupts = <
+			24 2 0 0
+			16 2 1 30>;
+	};
+
+/include/ "qoriq-rmu-0.dtsi"
+/include/ "qoriq-mpic.dtsi"
+
+	guts: global-utilities@e0000 {
+		compatible = "fsl,qoriq-device-config-1.0";
+		reg = <0xe0000 0xe00>;
+		fsl,has-rstcr;
+		#sleep-cells = <1>;
+		fsl,liodn-bits = <12>;
+	};
+
+	pins: global-utilities@e0e00 {
+		compatible = "fsl,qoriq-pin-control-1.0";
+		reg = <0xe0e00 0x200>;
+		#sleep-cells = <2>;
+	};
+
+	clockgen: global-utilities@e1000 {
+		compatible = "fsl,p4080-clockgen", "fsl,qoriq-clockgen-1.0";
+		reg = <0xe1000 0x1000>;
+		clock-frequency = <0>;
+	};
+
+	rcpm: global-utilities@e2000 {
+		compatible = "fsl,qoriq-rcpm-1.0";
+		reg = <0xe2000 0x1000>;
+		#sleep-cells = <1>;
+	};
+
+	sfp: sfp@e8000 {
+		compatible = "fsl,p4080-sfp", "fsl,qoriq-sfp-1.0";
+		reg	   = <0xe8000 0x1000>;
+	};
+
+	serdes: serdes@ea000 {
+		compatible = "fsl,p4080-serdes";
+		reg	   = <0xea000 0x1000>;
+	};
+
+/include/ "qoriq-dma-0.dtsi"
+/include/ "qoriq-dma-1.dtsi"
+/include/ "qoriq-espi-0.dtsi"
+	spi@110000 {
+		fsl,espi-num-chipselects = <4>;
+	};
+
+/include/ "qoriq-esdhc-0.dtsi"
+	sdhc@114000 {
+		voltage-ranges = <3300 3300>;
+		sdhci,auto-cmd12;
+	};
+
+/include/ "qoriq-i2c-0.dtsi"
+/include/ "qoriq-i2c-1.dtsi"
+/include/ "qoriq-duart-0.dtsi"
+/include/ "qoriq-duart-1.dtsi"
+/include/ "qoriq-gpio-0.dtsi"
+/include/ "qoriq-usb2-mph-0.dtsi"
+/include/ "qoriq-usb2-dr-0.dtsi"
+/include/ "qoriq-sec4.0-0.dtsi"
+};
diff --git a/arch/powerpc/boot/dts/fsl/p4080si-pre.dtsi b/arch/powerpc/boot/dts/fsl/p4080si-pre.dtsi
new file mode 100644
index 0000000..b9556ee
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/p4080si-pre.dtsi
@@ -0,0 +1,143 @@
+/*
+ * P4080/P4040 Silicon/SoC Device Tree Source (pre include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/dts-v1/;
+/ {
+	compatible = "fsl,P4080";
+	#address-cells = <2>;
+	#size-cells = <2>;
+	interrupt-parent = <&mpic>;
+
+	aliases {
+		ccsr = &soc;
+		dcsr = &dcsr;
+
+		serial0 = &serial0;
+		serial1 = &serial1;
+		serial2 = &serial2;
+		serial3 = &serial3;
+		pci0 = &pci0;
+		pci1 = &pci1;
+		pci2 = &pci2;
+		usb0 = &usb0;
+		usb1 = &usb1;
+		dma0 = &dma0;
+		dma1 = &dma1;
+		sdhc = &sdhc;
+		msi0 = &msi0;
+		msi1 = &msi1;
+		msi2 = &msi2;
+
+		crypto = &crypto;
+		sec_jr0 = &sec_jr0;
+		sec_jr1 = &sec_jr1;
+		sec_jr2 = &sec_jr2;
+		sec_jr3 = &sec_jr3;
+		rtic_a = &rtic_a;
+		rtic_b = &rtic_b;
+		rtic_c = &rtic_c;
+		rtic_d = &rtic_d;
+		sec_mon = &sec_mon;
+	};
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		cpu0: PowerPC,e500mc@0 {
+			device_type = "cpu";
+			reg = <0>;
+			next-level-cache = <&L2_0>;
+			L2_0: l2-cache {
+				next-level-cache = <&cpc>;
+			};
+		};
+		cpu1: PowerPC,e500mc@1 {
+			device_type = "cpu";
+			reg = <1>;
+			next-level-cache = <&L2_1>;
+			L2_1: l2-cache {
+				next-level-cache = <&cpc>;
+			};
+		};
+		cpu2: PowerPC,e500mc@2 {
+			device_type = "cpu";
+			reg = <2>;
+			next-level-cache = <&L2_2>;
+			L2_2: l2-cache {
+				next-level-cache = <&cpc>;
+			};
+		};
+		cpu3: PowerPC,e500mc@3 {
+			device_type = "cpu";
+			reg = <3>;
+			next-level-cache = <&L2_3>;
+			L2_3: l2-cache {
+				next-level-cache = <&cpc>;
+			};
+		};
+		cpu4: PowerPC,e500mc@4 {
+			device_type = "cpu";
+			reg = <4>;
+			next-level-cache = <&L2_4>;
+			L2_4: l2-cache {
+				next-level-cache = <&cpc>;
+			};
+		};
+		cpu5: PowerPC,e500mc@5 {
+			device_type = "cpu";
+			reg = <5>;
+			next-level-cache = <&L2_5>;
+			L2_5: l2-cache {
+				next-level-cache = <&cpc>;
+			};
+		};
+		cpu6: PowerPC,e500mc@6 {
+			device_type = "cpu";
+			reg = <6>;
+			next-level-cache = <&L2_6>;
+			L2_6: l2-cache {
+				next-level-cache = <&cpc>;
+			};
+		};
+		cpu7: PowerPC,e500mc@7 {
+			device_type = "cpu";
+			reg = <7>;
+			next-level-cache = <&L2_7>;
+			L2_7: l2-cache {
+				next-level-cache = <&cpc>;
+			};
+		};
+	};
+};
diff --git a/arch/powerpc/boot/dts/fsl/p5020si-post.dtsi b/arch/powerpc/boot/dts/fsl/p5020si-post.dtsi
new file mode 100644
index 0000000..914074b
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/p5020si-post.dtsi
@@ -0,0 +1,355 @@
+/*
+ * P5020/5010 Silicon/SoC Device Tree Source (post include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&lbc {
+	compatible = "fsl,p5020-elbc", "fsl,elbc", "simple-bus";
+	interrupts = <25 2 0 0>;
+	#address-cells = <2>;
+	#size-cells = <1>;
+};
+
+/* controller at 0x200000 */
+&pci0 {
+	compatible = "fsl,p5020-pcie", "fsl,qoriq-pcie-v2.2";
+	device_type = "pci";
+	#size-cells = <2>;
+	#address-cells = <3>;
+	bus-range = <0x0 0xff>;
+	clock-frequency = <33333333>;
+	interrupts = <16 2 1 15>;
+	pcie@0 {
+		reg = <0 0 0 0 0>;
+		#interrupt-cells = <1>;
+		#size-cells = <2>;
+		#address-cells = <3>;
+		device_type = "pci";
+		interrupts = <16 2 1 15>;
+		interrupt-map-mask = <0xf800 0 0 7>;
+		interrupt-map = <
+			/* IDSEL 0x0 */
+			0000 0 0 1 &mpic 40 1 0 0
+			0000 0 0 2 &mpic 1 1 0 0
+			0000 0 0 3 &mpic 2 1 0 0
+			0000 0 0 4 &mpic 3 1 0 0
+			>;
+	};
+};
+
+/* controller at 0x201000 */
+&pci1 {
+	compatible = "fsl,p5020-pcie", "fsl,qoriq-pcie-v2.2";
+	device_type = "pci";
+	#size-cells = <2>;
+	#address-cells = <3>;
+	bus-range = <0 0xff>;
+	clock-frequency = <33333333>;
+	interrupts = <16 2 1 14>;
+	pcie@0 {
+		reg = <0 0 0 0 0>;
+		#interrupt-cells = <1>;
+		#size-cells = <2>;
+		#address-cells = <3>;
+		device_type = "pci";
+		interrupts = <16 2 1 14>;
+		interrupt-map-mask = <0xf800 0 0 7>;
+		interrupt-map = <
+			/* IDSEL 0x0 */
+			0000 0 0 1 &mpic 41 1 0 0
+			0000 0 0 2 &mpic 5 1 0 0
+			0000 0 0 3 &mpic 6 1 0 0
+			0000 0 0 4 &mpic 7 1 0 0
+			>;
+	};
+};
+
+/* controller at 0x202000 */
+&pci2 {
+	compatible = "fsl,p5020-pcie", "fsl,qoriq-pcie-v2.2";
+	device_type = "pci";
+	#size-cells = <2>;
+	#address-cells = <3>;
+	bus-range = <0x0 0xff>;
+	clock-frequency = <33333333>;
+	interrupts = <16 2 1 13>;
+	pcie@0 {
+		reg = <0 0 0 0 0>;
+		#interrupt-cells = <1>;
+		#size-cells = <2>;
+		#address-cells = <3>;
+		device_type = "pci";
+		interrupts = <16 2 1 13>;
+		interrupt-map-mask = <0xf800 0 0 7>;
+		interrupt-map = <
+			/* IDSEL 0x0 */
+			0000 0 0 1 &mpic 42 1 0 0
+			0000 0 0 2 &mpic 9 1 0 0
+			0000 0 0 3 &mpic 10 1 0 0
+			0000 0 0 4 &mpic 11 1 0 0
+			>;
+	};
+};
+
+/* controller at 0x203000 */
+&pci3 {
+	compatible = "fsl,p5020-pcie", "fsl,qoriq-pcie-v2.2";
+	device_type = "pci";
+	#size-cells = <2>;
+	#address-cells = <3>;
+	bus-range = <0x0 0xff>;
+	clock-frequency = <33333333>;
+	interrupts = <16 2 1 12>;
+	pcie@0 {
+		reg = <0 0 0 0 0>;
+		#interrupt-cells = <1>;
+		#size-cells = <2>;
+		#address-cells = <3>;
+		device_type = "pci";
+		interrupts = <16 2 1 12>;
+		interrupt-map-mask = <0xf800 0 0 7>;
+		interrupt-map = <
+			/* IDSEL 0x0 */
+			0000 0 0 1 &mpic 43 1 0 0
+			0000 0 0 2 &mpic 0 1 0 0
+			0000 0 0 3 &mpic 4 1 0 0
+			0000 0 0 4 &mpic 8 1 0 0
+			>;
+	};
+};
+
+&rio {
+	compatible = "fsl,srio";
+	interrupts = <16 2 1 11>;
+	#address-cells = <2>;
+	#size-cells = <2>;
+	ranges;
+
+	port1 {
+		#address-cells = <2>;
+		#size-cells = <2>;
+		cell-index = <1>;
+	};
+
+	port2 {
+		#address-cells = <2>;
+		#size-cells = <2>;
+		cell-index = <2>;
+	};
+};
+
+&dcsr {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	compatible = "fsl,dcsr", "simple-bus";
+
+	dcsr-epu@0 {
+		compatible = "fsl,dcsr-epu";
+		interrupts = <52 2 0 0
+			      84 2 0 0
+			      85 2 0 0>;
+		reg = <0x0 0x1000>;
+	};
+	dcsr-npc {
+		compatible = "fsl,dcsr-npc";
+		reg = <0x1000 0x1000 0x1000000 0x8000>;
+	};
+	dcsr-nxc@2000 {
+		compatible = "fsl,dcsr-nxc";
+		reg = <0x2000 0x1000>;
+	};
+	dcsr-corenet {
+		compatible = "fsl,dcsr-corenet";
+		reg = <0x8000 0x1000 0xB0000 0x1000>;
+	};
+	dcsr-dpaa@9000 {
+		compatible = "fsl,p5020-dcsr-dpaa", "fsl,dcsr-dpaa";
+		reg = <0x9000 0x1000>;
+	};
+	dcsr-ocn@11000 {
+		compatible = "fsl,p5020-dcsr-ocn", "fsl,dcsr-ocn";
+		reg = <0x11000 0x1000>;
+	};
+	dcsr-ddr@12000 {
+		compatible = "fsl,dcsr-ddr";
+		dev-handle = <&ddr1>;
+		reg = <0x12000 0x1000>;
+	};
+	dcsr-ddr@13000 {
+		compatible = "fsl,dcsr-ddr";
+		dev-handle = <&ddr2>;
+		reg = <0x13000 0x1000>;
+	};
+	dcsr-nal@18000 {
+		compatible = "fsl,p5020-dcsr-nal", "fsl,dcsr-nal";
+		reg = <0x18000 0x1000>;
+	};
+	dcsr-rcpm@22000 {
+		compatible = "fsl,p5020-dcsr-rcpm", "fsl,dcsr-rcpm";
+		reg = <0x22000 0x1000>;
+	};
+	dcsr-cpu-sb-proxy@40000 {
+		compatible = "fsl,dcsr-e5500-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+		cpu-handle = <&cpu0>;
+		reg = <0x40000 0x1000>;
+	};
+	dcsr-cpu-sb-proxy@41000 {
+		compatible = "fsl,dcsr-e5500-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+		cpu-handle = <&cpu1>;
+		reg = <0x41000 0x1000>;
+	};
+};
+
+&soc {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	device_type = "soc";
+	compatible = "simple-bus";
+
+	soc-sram-error {
+		compatible = "fsl,soc-sram-error";
+		interrupts = <16 2 1 29>;
+	};
+
+	corenet-law@0 {
+		compatible = "fsl,corenet-law";
+		reg = <0x0 0x1000>;
+		fsl,num-laws = <32>;
+	};
+
+	ddr1: memory-controller@8000 {
+		compatible = "fsl,qoriq-memory-controller-v4.5", "fsl,qoriq-memory-controller";
+		reg = <0x8000 0x1000>;
+		interrupts = <16 2 1 23>;
+	};
+
+	ddr2: memory-controller@9000 {
+		compatible = "fsl,qoriq-memory-controller-v4.5","fsl,qoriq-memory-controller";
+		reg = <0x9000 0x1000>;
+		interrupts = <16 2 1 22>;
+	};
+
+	cpc: l3-cache-controller@10000 {
+		compatible = "fsl,p5020-l3-cache-controller", "fsl,p4080-l3-cache-controller", "cache";
+		reg = <0x10000 0x1000
+		       0x11000 0x1000>;
+		interrupts = <16 2 1 27
+			      16 2 1 26>;
+	};
+
+	corenet-cf@18000 {
+		compatible = "fsl,corenet-cf";
+		reg = <0x18000 0x1000>;
+		interrupts = <16 2 1 31>;
+		fsl,ccf-num-csdids = <32>;
+		fsl,ccf-num-snoopids = <32>;
+	};
+
+	iommu@20000 {
+		compatible = "fsl,pamu-v1.0", "fsl,pamu";
+		reg = <0x20000 0x4000>;
+		interrupts = <
+			24 2 0 0
+			16 2 1 30>;
+	};
+
+/include/ "qoriq-mpic.dtsi"
+
+	guts: global-utilities@e0000 {
+		compatible = "fsl,qoriq-device-config-1.0";
+		reg = <0xe0000 0xe00>;
+		fsl,has-rstcr;
+		#sleep-cells = <1>;
+		fsl,liodn-bits = <12>;
+	};
+
+	pins: global-utilities@e0e00 {
+		compatible = "fsl,qoriq-pin-control-1.0";
+		reg = <0xe0e00 0x200>;
+		#sleep-cells = <2>;
+	};
+
+	clockgen: global-utilities@e1000 {
+		compatible = "fsl,p5020-clockgen", "fsl,qoriq-clockgen-1.0";
+		reg = <0xe1000 0x1000>;
+		clock-frequency = <0>;
+	};
+
+	rcpm: global-utilities@e2000 {
+		compatible = "fsl,qoriq-rcpm-1.0";
+		reg = <0xe2000 0x1000>;
+		#sleep-cells = <1>;
+	};
+
+	sfp: sfp@e8000 {
+		compatible = "fsl,p5020-sfp", "fsl,qoriq-sfp-1.0";
+		reg	   = <0xe8000 0x1000>;
+	};
+
+	serdes: serdes@ea000 {
+		compatible = "fsl,p5020-serdes";
+		reg	   = <0xea000 0x1000>;
+	};
+
+/include/ "qoriq-dma-0.dtsi"
+/include/ "qoriq-dma-1.dtsi"
+/include/ "qoriq-espi-0.dtsi"
+	spi@110000 {
+		fsl,espi-num-chipselects = <4>;
+	};
+
+/include/ "qoriq-esdhc-0.dtsi"
+	sdhc@114000 {
+		sdhci,auto-cmd12;
+	};
+
+/include/ "qoriq-i2c-0.dtsi"
+/include/ "qoriq-i2c-1.dtsi"
+/include/ "qoriq-duart-0.dtsi"
+/include/ "qoriq-duart-1.dtsi"
+/include/ "qoriq-gpio-0.dtsi"
+/include/ "qoriq-usb2-mph-0.dtsi"
+		usb0: usb@210000 {
+			phy_type = "utmi";
+			port0;
+		};
+
+/include/ "qoriq-usb2-dr-0.dtsi"
+		usb1: usb@211000 {
+			dr_mode = "host";
+			phy_type = "utmi";
+		};
+
+/include/ "qoriq-sata2-0.dtsi"
+/include/ "qoriq-sata2-1.dtsi"
+/include/ "qoriq-sec4.2-0.dtsi"
+};
diff --git a/arch/powerpc/boot/dts/fsl/p5020si-pre.dtsi b/arch/powerpc/boot/dts/fsl/p5020si-pre.dtsi
new file mode 100644
index 0000000..ae823a4
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/p5020si-pre.dtsi
@@ -0,0 +1,96 @@
+/*
+ * P5020/P5010 Silicon/SoC Device Tree Source (pre include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/dts-v1/;
+/ {
+	compatible = "fsl,P5020";
+	#address-cells = <2>;
+	#size-cells = <2>;
+	interrupt-parent = <&mpic>;
+
+	aliases {
+		ccsr = &soc;
+		dcsr = &dcsr;
+
+		serial0 = &serial0;
+		serial1 = &serial1;
+		serial2 = &serial2;
+		serial3 = &serial3;
+		pci0 = &pci0;
+		pci1 = &pci1;
+		pci2 = &pci2;
+		pci3 = &pci3;
+		usb0 = &usb0;
+		usb1 = &usb1;
+		dma0 = &dma0;
+		dma1 = &dma1;
+		sdhc = &sdhc;
+		msi0 = &msi0;
+		msi1 = &msi1;
+		msi2 = &msi2;
+
+		crypto = &crypto;
+		sec_jr0 = &sec_jr0;
+		sec_jr1 = &sec_jr1;
+		sec_jr2 = &sec_jr2;
+		sec_jr3 = &sec_jr3;
+		rtic_a = &rtic_a;
+		rtic_b = &rtic_b;
+		rtic_c = &rtic_c;
+		rtic_d = &rtic_d;
+		sec_mon = &sec_mon;
+	};
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		cpu0: PowerPC,e5500@0 {
+			device_type = "cpu";
+			reg = <0>;
+			next-level-cache = <&L2_0>;
+			L2_0: l2-cache {
+				next-level-cache = <&cpc>;
+			};
+		};
+		cpu1: PowerPC,e5500@1 {
+			device_type = "cpu";
+			reg = <1>;
+			next-level-cache = <&L2_1>;
+			L2_1: l2-cache {
+				next-level-cache = <&cpc>;
+			};
+		};
+	};
+};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-dma-0.dtsi b/arch/powerpc/boot/dts/fsl/pq3-dma-0.dtsi
new file mode 100644
index 0000000..b5b37ad
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-dma-0.dtsi
@@ -0,0 +1,66 @@
+/*
+ * PQ3 DMA device tree stub [ controller @ offset 0x21000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+dma@21300 {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	compatible = "fsl,eloplus-dma";
+	reg = <0x21300 0x4>;
+	ranges = <0x0 0x21100 0x200>;
+	cell-index = <0>;
+	dma-channel@0 {
+		compatible = "fsl,eloplus-dma-channel";
+		reg = <0x0 0x80>;
+		cell-index = <0>;
+		interrupts = <20 2 0 0>;
+	};
+	dma-channel@80 {
+		compatible = "fsl,eloplus-dma-channel";
+		reg = <0x80 0x80>;
+		cell-index = <1>;
+		interrupts = <21 2 0 0>;
+	};
+	dma-channel@100 {
+		compatible = "fsl,eloplus-dma-channel";
+		reg = <0x100 0x80>;
+		cell-index = <2>;
+		interrupts = <22 2 0 0>;
+	};
+	dma-channel@180 {
+		compatible = "fsl,eloplus-dma-channel";
+		reg = <0x180 0x80>;
+		cell-index = <3>;
+		interrupts = <23 2 0 0>;
+	};
+};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-dma-1.dtsi b/arch/powerpc/boot/dts/fsl/pq3-dma-1.dtsi
new file mode 100644
index 0000000..28cb8a5
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-dma-1.dtsi
@@ -0,0 +1,66 @@
+/*
+ * PQ3 DMA device tree stub [ controller @ offset 0xc300 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+dma@c300 {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	compatible = "fsl,eloplus-dma";
+	reg = <0xc300 0x4>;
+	ranges = <0x0 0xc100 0x200>;
+	cell-index = <1>;
+	dma-channel@0 {
+		compatible = "fsl,eloplus-dma-channel";
+		reg = <0x0 0x80>;
+		cell-index = <0>;
+		interrupts = <76 2 0 0>;
+	};
+	dma-channel@80 {
+		compatible = "fsl,eloplus-dma-channel";
+		reg = <0x80 0x80>;
+		cell-index = <1>;
+		interrupts = <77 2 0 0>;
+	};
+	dma-channel@100 {
+		compatible = "fsl,eloplus-dma-channel";
+		reg = <0x100 0x80>;
+		cell-index = <2>;
+		interrupts = <78 2 0 0>;
+	};
+	dma-channel@180 {
+		compatible = "fsl,eloplus-dma-channel";
+		reg = <0x180 0x80>;
+		cell-index = <3>;
+		interrupts = <79 2 0 0>;
+	};
+};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-duart-0.dtsi b/arch/powerpc/boot/dts/fsl/pq3-duart-0.dtsi
new file mode 100644
index 0000000..5e268fd
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-duart-0.dtsi
@@ -0,0 +1,51 @@
+/*
+ * PQ3 DUART device tree stub [ controller @ offset 0x4000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+serial0: serial@4500 {
+	cell-index = <0>;
+	device_type = "serial";
+	compatible = "fsl,ns16550", "ns16550";
+	reg = <0x4500 0x100>;
+	clock-frequency = <0>;
+	interrupts = <42 2 0 0>;
+};
+
+serial1: serial@4600 {
+	cell-index = <1>;
+	device_type = "serial";
+	compatible = "fsl,ns16550", "ns16550";
+	reg = <0x4600 0x100>;
+	clock-frequency = <0>;
+	interrupts = <42 2 0 0>;
+};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-esdhc-0.dtsi b/arch/powerpc/boot/dts/fsl/pq3-esdhc-0.dtsi
new file mode 100644
index 0000000..5743433
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-esdhc-0.dtsi
@@ -0,0 +1,41 @@
+/*
+ * PQ3 eSDHC device tree stub [ controller @ offset 0x2e000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+sdhc@2e000 {
+	compatible = "fsl,esdhc";
+	reg = <0x2e000 0x1000>;
+	interrupts = <72 0x2 0 0>;
+	/* Filled in by U-Boot */
+	clock-frequency = <0>;
+};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-espi-0.dtsi b/arch/powerpc/boot/dts/fsl/pq3-espi-0.dtsi
new file mode 100644
index 0000000..75854b2
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-espi-0.dtsi
@@ -0,0 +1,41 @@
+/*
+ * PQ3 eSPI device tree stub [ controller @ offset 0x7000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+spi@7000 {
+	#address-cells = <1>;
+	#size-cells = <0>;
+	compatible = "fsl,mpc8536-espi";
+	reg = <0x7000 0x1000>;
+	interrupts = <59 0x2 0 0>;
+};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-etsec1-0.dtsi b/arch/powerpc/boot/dts/fsl/pq3-etsec1-0.dtsi
new file mode 100644
index 0000000..a1979ae
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-etsec1-0.dtsi
@@ -0,0 +1,53 @@
+/*
+ * PQ3 eTSEC device tree stub [ @ offsets 0x24000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+ethernet@24000 {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	cell-index = <0>;
+	device_type = "network";
+	model = "eTSEC";
+	compatible = "gianfar";
+	reg = <0x24000 0x1000>;
+	ranges = <0x0 0x24000 0x1000>;
+	local-mac-address = [ 00 00 00 00 00 00 ];
+	interrupts = <29 2 0 0 30 2 0 0 34 2 0 0>;
+};
+
+mdio@24520 {
+	#address-cells = <1>;
+	#size-cells = <0>;
+	compatible = "fsl,gianfar-mdio";
+	reg = <0x24520 0x20>;
+};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-etsec1-1.dtsi b/arch/powerpc/boot/dts/fsl/pq3-etsec1-1.dtsi
new file mode 100644
index 0000000..4c4fdde
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-etsec1-1.dtsi
@@ -0,0 +1,53 @@
+/*
+ * PQ3 eTSEC device tree stub [ @ offsets 0x25000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+ethernet@25000 {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	cell-index = <1>;
+	device_type = "network";
+	model = "eTSEC";
+	compatible = "gianfar";
+	reg = <0x25000 0x1000>;
+	ranges = <0x0 0x25000 0x1000>;
+	local-mac-address = [ 00 00 00 00 00 00 ];
+	interrupts = <35 2 0 0 36 2 0 0 40 2 0 0>;
+};
+
+mdio@25520 {
+	#address-cells = <1>;
+	#size-cells = <0>;
+	compatible = "fsl,gianfar-tbi";
+	reg = <0x25520 0x20>;
+};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-etsec1-2.dtsi b/arch/powerpc/boot/dts/fsl/pq3-etsec1-2.dtsi
new file mode 100644
index 0000000..4b8ab43
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-etsec1-2.dtsi
@@ -0,0 +1,53 @@
+/*
+ * PQ3 eTSEC device tree stub [ @ offsets 0x26000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+ethernet@26000 {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	cell-index = <2>;
+	device_type = "network";
+	model = "eTSEC";
+	compatible = "gianfar";
+	reg = <0x26000 0x1000>;
+	ranges = <0x0 0x26000 0x1000>;
+	local-mac-address = [ 00 00 00 00 00 00 ];
+	interrupts = <31 2 0 0 32 2 0 0 33 2 0 0>;
+};
+
+mdio@26520 {
+	#address-cells = <1>;
+	#size-cells = <0>;
+	compatible = "fsl,gianfar-tbi";
+	reg = <0x26520 0x20>;
+};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-etsec1-3.dtsi b/arch/powerpc/boot/dts/fsl/pq3-etsec1-3.dtsi
new file mode 100644
index 0000000..40c9137
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-etsec1-3.dtsi
@@ -0,0 +1,53 @@
+/*
+ * PQ3 eTSEC device tree stub [ @ offsets 0x27000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+ethernet@27000 {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	cell-index = <3>;
+	device_type = "network";
+	model = "eTSEC";
+	compatible = "gianfar";
+	reg = <0x27000 0x1000>;
+	ranges = <0x0 0x27000 0x1000>;
+	local-mac-address = [ 00 00 00 00 00 00 ];
+	interrupts = <37 2 0 0 38 2 0 0 39 2 0 0>;
+};
+
+mdio@27520 {
+	#address-cells = <1>;
+	#size-cells = <0>;
+	compatible = "fsl,gianfar-tbi";
+	reg = <0x27520 0x20>;
+};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-etsec1-timer-0.dtsi b/arch/powerpc/boot/dts/fsl/pq3-etsec1-timer-0.dtsi
new file mode 100644
index 0000000..efe2ca0
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-etsec1-timer-0.dtsi
@@ -0,0 +1,39 @@
+/*
+ * PQ3 eTSEC Timer (IEEE 1588) device tree stub [ @ offsets 0x24e00 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+ptp_clock@24e00 {
+	compatible = "fsl,etsec-ptp";
+	reg = <0x24e00 0xb0>;
+	interrupts = <68 2 0 0 69 2 0 0>;
+};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-etsec2-0.dtsi b/arch/powerpc/boot/dts/fsl/pq3-etsec2-0.dtsi
new file mode 100644
index 0000000..1382fec
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-etsec2-0.dtsi
@@ -0,0 +1,60 @@
+/*
+ * PQ3 eTSEC2 device tree stub [ @ offsets 0x24000/0xb0000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+mdio@24000 {
+	#address-cells = <1>;
+	#size-cells = <0>;
+	compatible = "fsl,etsec2-mdio";
+	reg = <0x24000 0x1000 0xb0030 0x4>;
+};
+
+ethernet@b0000 {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	device_type = "network";
+	model = "eTSEC";
+	compatible = "fsl,etsec2";
+	fsl,num_rx_queues = <0x8>;
+	fsl,num_tx_queues = <0x8>;
+	fsl,magic-packet;
+	local-mac-address = [ 00 00 00 00 00 00 ];
+
+	queue-group@b0000 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		reg = <0xb0000 0x1000>;
+		interrupts = <29 2 0 0 30 2 0 0 34 2 0 0>;
+	};
+};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-etsec2-1.dtsi b/arch/powerpc/boot/dts/fsl/pq3-etsec2-1.dtsi
new file mode 100644
index 0000000..221cd2e
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-etsec2-1.dtsi
@@ -0,0 +1,60 @@
+/*
+ * PQ3 eTSEC2 device tree stub [ @ offsets 0x25000/0xb1000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+mdio@25000 {
+	#address-cells = <1>;
+	#size-cells = <0>;
+	compatible = "fsl,etsec2-tbi";
+	reg = <0x25000 0x1000 0xb1030 0x4>;
+};
+
+ethernet@b1000 {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	device_type = "network";
+	model = "eTSEC";
+	compatible = "fsl,etsec2";
+	fsl,num_rx_queues = <0x8>;
+	fsl,num_tx_queues = <0x8>;
+	fsl,magic-packet;
+	local-mac-address = [ 00 00 00 00 00 00 ];
+
+	queue-group@b1000 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		reg = <0xb1000 0x1000>;
+		interrupts = <35 2 0 0 36 2 0 0 40 2 0 0>;
+	};
+};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-etsec2-2.dtsi b/arch/powerpc/boot/dts/fsl/pq3-etsec2-2.dtsi
new file mode 100644
index 0000000..61456c3
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-etsec2-2.dtsi
@@ -0,0 +1,59 @@
+/*
+ * PQ3 eTSEC2 device tree stub [ @ offsets 0x26000/0xb2000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+mdio@26000 {
+	#address-cells = <1>;
+	#size-cells = <0>;
+	compatible = "fsl,etsec2-tbi";
+	reg = <0x26000 0x1000 0xb1030 0x4>;
+};
+
+ethernet@b2000 {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	device_type = "network";
+	model = "eTSEC";
+	compatible = "fsl,etsec2";
+	fsl,num_rx_queues = <0x8>;
+	fsl,num_tx_queues = <0x8>;
+	fsl,magic-packet;
+	local-mac-address = [ 00 00 00 00 00 00 ];
+
+	queue-group@b2000 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		reg = <0xb2000 0x1000>;
+		interrupts = <31 2 0 0 32 2 0 0 33 2 0 0>;
+	};
+};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-etsec2-grp2-0.dtsi b/arch/powerpc/boot/dts/fsl/pq3-etsec2-grp2-0.dtsi
new file mode 100644
index 0000000..034ab8f
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-etsec2-grp2-0.dtsi
@@ -0,0 +1,42 @@
+/*
+ * PQ3 eTSEC2 Group 2 device tree stub [ @ offsets 0xb4000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&enet0_grp2 {
+	queue-group@b4000 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		reg = <0xb4000 0x1000>;
+		interrupts = <17 2 0 0 18 2 0 0 24 2 0 0>;
+	};
+};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-etsec2-grp2-1.dtsi b/arch/powerpc/boot/dts/fsl/pq3-etsec2-grp2-1.dtsi
new file mode 100644
index 0000000..3be9ba3
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-etsec2-grp2-1.dtsi
@@ -0,0 +1,42 @@
+/*
+ * PQ3 eTSEC2 Group 2 device tree stub [ @ offsets 0xb5000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&enet1_grp2 {
+	queue-group@b5000 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		reg = <0xb5000 0x1000>;
+		interrupts = <51 2 0 0 52 2 0 0 67 2 0 0>;
+	};
+};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-etsec2-grp2-2.dtsi b/arch/powerpc/boot/dts/fsl/pq3-etsec2-grp2-2.dtsi
new file mode 100644
index 0000000..02a3345
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-etsec2-grp2-2.dtsi
@@ -0,0 +1,42 @@
+/*
+ * PQ3 eTSEC2 Group 2 device tree stub [ @ offsets 0xb6000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&enet2_grp2 {
+	queue-group@b6000 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		reg = <0xb6000 0x1000>;
+		interrupts = <25 2 0 0 26 2 0 0 27 2 0 0>;
+	};
+};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-gpio-0.dtsi b/arch/powerpc/boot/dts/fsl/pq3-gpio-0.dtsi
new file mode 100644
index 0000000..72a3ef5
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-gpio-0.dtsi
@@ -0,0 +1,41 @@
+/*
+ * PQ3 GPIO device tree stub [ controller @ offset 0xf000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+gpio-controller@f000 {
+	#gpio-cells = <2>;
+	compatible = "fsl,pq3-gpio";
+	reg = <0xf000 0x100>;
+	interrupts = <47 0x2 0 0>;
+	gpio-controller;
+};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-i2c-0.dtsi b/arch/powerpc/boot/dts/fsl/pq3-i2c-0.dtsi
new file mode 100644
index 0000000..d1dd6fb
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-i2c-0.dtsi
@@ -0,0 +1,43 @@
+/*
+ * PQ3 I2C device tree stub [ controller @ offset 0x3000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+i2c@3000 {
+	#address-cells = <1>;
+	#size-cells = <0>;
+	cell-index = <0>;
+	compatible = "fsl-i2c";
+	reg = <0x3000 0x100>;
+	interrupts = <43 2 0 0>;
+	dfsrr;
+};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-i2c-1.dtsi b/arch/powerpc/boot/dts/fsl/pq3-i2c-1.dtsi
new file mode 100644
index 0000000..a9bd803
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-i2c-1.dtsi
@@ -0,0 +1,43 @@
+/*
+ * PQ3 I2C device tree stub [ controller @ offset 0x3100 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+i2c@3100 {
+	#address-cells = <1>;
+	#size-cells = <0>;
+	cell-index = <1>;
+	compatible = "fsl-i2c";
+	reg = <0x3100 0x100>;
+	interrupts = <43 2 0 0>;
+	dfsrr;
+};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-mpic-timer-B.dtsi b/arch/powerpc/boot/dts/fsl/pq3-mpic-timer-B.dtsi
new file mode 100644
index 0000000..8734cff
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-mpic-timer-B.dtsi
@@ -0,0 +1,42 @@
+/*
+ * PQ3 MPIC Timer (Group B) device tree stub [ controller @ offset 0x42100 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+timer@42100 {
+	compatible = "fsl,mpic-global-timer";
+	reg = <0x42100 0x100 0x42300 4>;
+	interrupts = <4 0 3 0
+		      5 0 3 0
+		      6 0 3 0
+		      7 0 3 0>;
+};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-mpic.dtsi b/arch/powerpc/boot/dts/fsl/pq3-mpic.dtsi
new file mode 100644
index 0000000..5c80460
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-mpic.dtsi
@@ -0,0 +1,66 @@
+/*
+ * PQ3 MPIC device tree stub [ controller @ offset 0x40000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+mpic: pic@40000 {
+	interrupt-controller;
+	#address-cells = <0>;
+	#interrupt-cells = <4>;
+	reg = <0x40000 0x40000>;
+	compatible = "fsl,mpic";
+	device_type = "open-pic";
+};
+
+timer@41100 {
+	compatible = "fsl,mpic-global-timer";
+	reg = <0x41100 0x100 0x41300 4>;
+	interrupts = <0 0 3 0
+		      1 0 3 0
+		      2 0 3 0
+		      3 0 3 0>;
+};
+
+msi@41600 {
+	compatible = "fsl,mpic-msi";
+	reg = <0x41600 0x80>;
+	msi-available-ranges = <0 0x100>;
+	interrupts = <
+		0xe0 0 0 0
+		0xe1 0 0 0
+		0xe2 0 0 0
+		0xe3 0 0 0
+		0xe4 0 0 0
+		0xe5 0 0 0
+		0xe6 0 0 0
+		0xe7 0 0 0>;
+};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-rmu-0.dtsi b/arch/powerpc/boot/dts/fsl/pq3-rmu-0.dtsi
new file mode 100644
index 0000000..587ca9f
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-rmu-0.dtsi
@@ -0,0 +1,68 @@
+/*
+ * PQ3 RIO Message Unit device tree stub [ controller @ offset 0xd3000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+rmu: rmu@d3000 {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	compatible = "fsl,srio-rmu";
+	reg = <0xd3000 0x500>;
+	ranges = <0x0 0xd3000 0x500>;
+
+	message-unit@0 {
+		compatible = "fsl,srio-msg-unit";
+		reg = <0x0 0x100>;
+		interrupts = <
+			53 2 0 0 /* msg1_tx_irq */
+			54 2 0 0>;/* msg1_rx_irq */
+	};
+	message-unit@100 {
+		compatible = "fsl,srio-msg-unit";
+		reg = <0x100 0x100>;
+		interrupts = <
+			55 2 0 0  /* msg2_tx_irq */
+			56 2 0 0>;/* msg2_rx_irq */
+	};
+	doorbell-unit@400 {
+		compatible = "fsl,srio-dbell-unit";
+		reg = <0x400 0x80>;
+		interrupts = <
+			49 2 0 0  /* bell_outb_irq */
+			50 2 0 0>;/* bell_inb_irq */
+	};
+	port-write-unit@4e0 {
+		compatible = "fsl,srio-port-write-unit";
+		reg = <0x4e0 0x20>;
+		interrupts = <48 2 0 0>;
+	};
+};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-sata2-0.dtsi b/arch/powerpc/boot/dts/fsl/pq3-sata2-0.dtsi
new file mode 100644
index 0000000..3c28dd0
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-sata2-0.dtsi
@@ -0,0 +1,40 @@
+/*
+ * PQ3 SATAv2 device tree stub [ controller @ offset 0x18000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+sata@18000 {
+	compatible = "fsl,pq-sata-v2";
+	reg = <0x18000 0x1000>;
+	cell-index = <1>;
+	interrupts = <74 0x2 0 0>;
+};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-sata2-1.dtsi b/arch/powerpc/boot/dts/fsl/pq3-sata2-1.dtsi
new file mode 100644
index 0000000..eefaf28
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-sata2-1.dtsi
@@ -0,0 +1,40 @@
+/*
+ * PQ3 SATAv2 device tree stub [ controller @ offset 0x19000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+sata@19000 {
+	compatible = "fsl,pq-sata-v2";
+	reg = <0x19000 0x1000>;
+	cell-index = <2>;
+	interrupts = <41 0x2 0 0>;
+};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-sec2.1-0.dtsi b/arch/powerpc/boot/dts/fsl/pq3-sec2.1-0.dtsi
new file mode 100644
index 0000000..02a5c7a
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-sec2.1-0.dtsi
@@ -0,0 +1,43 @@
+/*
+ * PQ3 Sec/Crypto 2.1 device tree stub [ controller @ offset 0x30000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+crypto@30000 {
+	compatible = "fsl,sec2.1", "fsl,sec2.0";
+	reg = <0x30000 0x10000>;
+	interrupts = <45 2 0 0>;
+	fsl,num-channels = <4>;
+	fsl,channel-fifo-len = <24>;
+	fsl,exec-units-mask = <0xfe>;
+	fsl,descriptor-types-mask = <0x12b0ebf>;
+};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-sec3.0-0.dtsi b/arch/powerpc/boot/dts/fsl/pq3-sec3.0-0.dtsi
new file mode 100644
index 0000000..bba1ba4
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-sec3.0-0.dtsi
@@ -0,0 +1,45 @@
+/*
+ * PQ3 Sec/Crypto 3.0 device tree stub [ controller @ offset 0x30000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+crypto@30000 {
+	compatible = "fsl,sec3.0",
+		     "fsl,sec2.4", "fsl,sec2.2", "fsl,sec2.1",
+		     "fsl,sec2.0";
+	reg = <0x30000 0x10000>;
+	interrupts = <45 2 0 0 58 2 0 0>;
+	fsl,num-channels = <4>;
+	fsl,channel-fifo-len = <24>;
+	fsl,exec-units-mask = <0x9fe>;
+	fsl,descriptor-types-mask = <0x3ab0ebf>;
+};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-sec3.1-0.dtsi b/arch/powerpc/boot/dts/fsl/pq3-sec3.1-0.dtsi
new file mode 100644
index 0000000..8f0a566
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-sec3.1-0.dtsi
@@ -0,0 +1,45 @@
+/*
+ * PQ3 Sec/Crypto 3.1 device tree stub [ controller @ offset 0x30000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+crypto@30000 {
+	compatible = "fsl,sec3.1", "fsl,sec3.0",
+		     "fsl,sec2.4", "fsl,sec2.2", "fsl,sec2.1",
+		     "fsl,sec2.0";
+	reg = <0x30000 0x10000>;
+	interrupts = <45 2 0 0 58 2 0 0>;
+	fsl,num-channels = <4>;
+	fsl,channel-fifo-len = <24>;
+	fsl,exec-units-mask = <0xbfe>;
+	fsl,descriptor-types-mask = <0x3ab0ebf>;
+};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-sec3.3-0.dtsi b/arch/powerpc/boot/dts/fsl/pq3-sec3.3-0.dtsi
new file mode 100644
index 0000000..c227f27
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-sec3.3-0.dtsi
@@ -0,0 +1,45 @@
+/*
+ * PQ3 Sec/Crypto 3.3 device tree stub [ controller @ offset 0x30000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+crypto@30000 {
+	compatible = "fsl,sec3.3", "fsl,sec3.1", "fsl,sec3.0",
+		     "fsl,sec2.4", "fsl,sec2.2", "fsl,sec2.1",
+		     "fsl,sec2.0";
+	reg = <0x30000 0x10000>;
+	interrupts = <45 2 0 0 58 2 0 0>;
+	fsl,num-channels = <4>;
+	fsl,channel-fifo-len = <24>;
+	fsl,exec-units-mask = <0x97c>;
+	fsl,descriptor-types-mask = <0x3a30abf>;
+};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-sec4.4-0.dtsi b/arch/powerpc/boot/dts/fsl/pq3-sec4.4-0.dtsi
new file mode 100644
index 0000000..bf957a7
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-sec4.4-0.dtsi
@@ -0,0 +1,65 @@
+/*
+ * PQ3 Sec/Crypto 4.4 device tree stub [ controller @ offset 0x30000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+crypto@30000 {
+	compatible = "fsl,sec4.4", "fsl,sec4.0";
+	#address-cells = <1>;
+	#size-cells = <1>;
+	reg		 = <0x30000 0x10000>;
+	interrupts	 = <58 2 0 0>;
+
+	sec_jr0: jr@1000 {
+		compatible = "fsl,sec4.4-job-ring", "fsl,sec4.0-job-ring";
+		reg	   = <0x1000 0x1000>;
+		interrupts	 = <45 2 0 0>;
+	};
+
+	sec_jr1: jr@2000 {
+		compatible = "fsl,sec4.4-job-ring", "fsl,sec4.0-job-ring";
+		reg	   = <0x2000 0x1000>;
+		interrupts	 = <45 2 0 0>;
+	};
+
+	sec_jr2: jr@3000 {
+		compatible = "fsl,sec4.4-job-ring", "fsl,sec4.0-job-ring";
+		reg	   = <0x3000 0x1000>;
+		interrupts	 = <45 2 0 0>;
+	};
+
+	sec_jr3: jr@4000 {
+		compatible = "fsl,sec4.4-job-ring", "fsl,sec4.0-job-ring";
+		reg	   = <0x4000 0x1000>;
+		interrupts	 = <45 2 0 0>;
+	};
+};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-usb2-dr-0.dtsi b/arch/powerpc/boot/dts/fsl/pq3-usb2-dr-0.dtsi
new file mode 100644
index 0000000..185ab9d
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-usb2-dr-0.dtsi
@@ -0,0 +1,41 @@
+/*
+ * PQ3 USB DR device tree stub [ controller @ offset 0x22000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+usb@22000 {
+	compatible = "fsl-usb2-dr";
+	reg = <0x22000 0x1000>;
+	#address-cells = <1>;
+	#size-cells = <0>;
+	interrupts = <28 0x2 0 0>;
+};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-usb2-dr-1.dtsi b/arch/powerpc/boot/dts/fsl/pq3-usb2-dr-1.dtsi
new file mode 100644
index 0000000..fe24cd6
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-usb2-dr-1.dtsi
@@ -0,0 +1,41 @@
+/*
+ * PQ3 USB DR device tree stub [ controller @ offset 0x23000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+usb@23000 {
+	compatible = "fsl-usb2-dr";
+	reg = <0x23000 0x1000>;
+	#address-cells = <1>;
+	#size-cells = <0>;
+	interrupts = <46 0x2 0 0>;
+};
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-dma-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-dma-0.dtsi
new file mode 100644
index 0000000..1aebf3e
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/qoriq-dma-0.dtsi
@@ -0,0 +1,66 @@
+/*
+ * QorIQ DMA device tree stub [ controller @ offset 0x100000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+dma0: dma@100300 {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	compatible = "fsl,eloplus-dma";
+	reg = <0x100300 0x4>;
+	ranges = <0x0 0x100100 0x200>;
+	cell-index = <0>;
+	dma-channel@0 {
+		compatible = "fsl,eloplus-dma-channel";
+		reg = <0x0 0x80>;
+		cell-index = <0>;
+		interrupts = <28 2 0 0>;
+	};
+	dma-channel@80 {
+		compatible = "fsl,eloplus-dma-channel";
+		reg = <0x80 0x80>;
+		cell-index = <1>;
+		interrupts = <29 2 0 0>;
+	};
+	dma-channel@100 {
+		compatible = "fsl,eloplus-dma-channel";
+		reg = <0x100 0x80>;
+		cell-index = <2>;
+		interrupts = <30 2 0 0>;
+	};
+	dma-channel@180 {
+		compatible = "fsl,eloplus-dma-channel";
+		reg = <0x180 0x80>;
+		cell-index = <3>;
+		interrupts = <31 2 0 0>;
+	};
+};
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-dma-1.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-dma-1.dtsi
new file mode 100644
index 0000000..ecf5e18
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/qoriq-dma-1.dtsi
@@ -0,0 +1,66 @@
+/*
+ * QorIQ DMA device tree stub [ controller @ offset 0x101000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+dma1: dma@101300 {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	compatible = "fsl,eloplus-dma";
+	reg = <0x101300 0x4>;
+	ranges = <0x0 0x101100 0x200>;
+	cell-index = <1>;
+	dma-channel@0 {
+		compatible = "fsl,eloplus-dma-channel";
+		reg = <0x0 0x80>;
+		cell-index = <0>;
+		interrupts = <32 2 0 0>;
+	};
+	dma-channel@80 {
+		compatible = "fsl,eloplus-dma-channel";
+		reg = <0x80 0x80>;
+		cell-index = <1>;
+		interrupts = <33 2 0 0>;
+	};
+	dma-channel@100 {
+		compatible = "fsl,eloplus-dma-channel";
+		reg = <0x100 0x80>;
+		cell-index = <2>;
+		interrupts = <34 2 0 0>;
+	};
+	dma-channel@180 {
+		compatible = "fsl,eloplus-dma-channel";
+		reg = <0x180 0x80>;
+		cell-index = <3>;
+		interrupts = <35 2 0 0>;
+	};
+};
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-duart-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-duart-0.dtsi
new file mode 100644
index 0000000..225c07b
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/qoriq-duart-0.dtsi
@@ -0,0 +1,51 @@
+/*
+ * QorIQ DUART device tree stub [ controller @ offset 0x11c000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+serial0: serial@11c500 {
+	cell-index = <0>;
+	device_type = "serial";
+	compatible = "fsl,ns16550", "ns16550";
+	reg = <0x11c500 0x100>;
+	clock-frequency = <0>;
+	interrupts = <36 2 0 0>;
+};
+
+serial1: serial@11c600 {
+	cell-index = <1>;
+	device_type = "serial";
+	compatible = "fsl,ns16550", "ns16550";
+	reg = <0x11c600 0x100>;
+	clock-frequency = <0>;
+	interrupts = <36 2 0 0>;
+};
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-duart-1.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-duart-1.dtsi
new file mode 100644
index 0000000..d23233a
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/qoriq-duart-1.dtsi
@@ -0,0 +1,51 @@
+/*
+ * QorIQ DUART device tree stub [ controller @ offset 0x11d000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+serial2: serial@11d500 {
+	cell-index = <2>;
+	device_type = "serial";
+	compatible = "fsl,ns16550", "ns16550";
+	reg = <0x11d500 0x100>;
+	clock-frequency = <0>;
+	interrupts = <37 2 0 0>;
+};
+
+serial3: serial@11d600 {
+	cell-index = <3>;
+	device_type = "serial";
+	compatible = "fsl,ns16550", "ns16550";
+	reg = <0x11d600 0x100>;
+	clock-frequency = <0>;
+	interrupts = <37 2 0 0>;
+};
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-esdhc-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-esdhc-0.dtsi
new file mode 100644
index 0000000..20835ae
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/qoriq-esdhc-0.dtsi
@@ -0,0 +1,40 @@
+/*
+ * QorIQ eSDHC device tree stub [ controller @ offset 0x114000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+sdhc: sdhc@114000 {
+	compatible = "fsl,esdhc";
+	reg = <0x114000 0x1000>;
+	interrupts = <48 2 0 0>;
+	clock-frequency = <0>;
+};
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-espi-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-espi-0.dtsi
new file mode 100644
index 0000000..6db0697
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/qoriq-espi-0.dtsi
@@ -0,0 +1,41 @@
+/*
+ * QorIQ eSPI device tree stub [ controller @ offset 0x110000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+spi@110000 {
+	#address-cells = <1>;
+	#size-cells = <0>;
+	compatible = "fsl,mpc8536-espi";
+	reg = <0x110000 0x1000>;
+	interrupts = <53 0x2 0 0>;
+};
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-gpio-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-gpio-0.dtsi
new file mode 100644
index 0000000..cf714f5
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/qoriq-gpio-0.dtsi
@@ -0,0 +1,41 @@
+/*
+ * QorIQ GPIO device tree stub [ controller @ offset 0x130000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+gpio0: gpio@130000 {
+	compatible = "fsl,qoriq-gpio";
+	reg = <0x130000 0x1000>;
+	interrupts = <55 2 0 0>;
+	#gpio-cells = <2>;
+	gpio-controller;
+};
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-i2c-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-i2c-0.dtsi
new file mode 100644
index 0000000..5f9bf7d
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/qoriq-i2c-0.dtsi
@@ -0,0 +1,53 @@
+/*
+ * QorIQ I2C device tree stub [ controller @ offset 0x118000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+i2c@118000 {
+	#address-cells = <1>;
+	#size-cells = <0>;
+	cell-index = <0>;
+	compatible = "fsl-i2c";
+	reg = <0x118000 0x100>;
+	interrupts = <38 2 0 0>;
+	dfsrr;
+};
+
+i2c@118100 {
+	#address-cells = <1>;
+	#size-cells = <0>;
+	cell-index = <1>;
+	compatible = "fsl-i2c";
+	reg = <0x118100 0x100>;
+	interrupts = <38 2 0 0>;
+	dfsrr;
+};
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-i2c-1.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-i2c-1.dtsi
new file mode 100644
index 0000000..7989bf5
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/qoriq-i2c-1.dtsi
@@ -0,0 +1,53 @@
+/*
+ * QorIQ I2C device tree stub [ controller @ offset 0x119000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+i2c@119000 {
+	#address-cells = <1>;
+	#size-cells = <0>;
+	cell-index = <2>;
+	compatible = "fsl-i2c";
+	reg = <0x119000 0x100>;
+	interrupts = <39 2 0 0>;
+	dfsrr;
+};
+
+i2c@119100 {
+	#address-cells = <1>;
+	#size-cells = <0>;
+	cell-index = <3>;
+	compatible = "fsl-i2c";
+	reg = <0x119100 0x100>;
+	interrupts = <39 2 0 0>;
+	dfsrr;
+};
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-mpic.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-mpic.dtsi
new file mode 100644
index 0000000..b9bada6
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/qoriq-mpic.dtsi
@@ -0,0 +1,106 @@
+/*
+ * QorIQ MPIC device tree stub [ controller @ offset 0x40000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+mpic: pic@40000 {
+	interrupt-controller;
+	#address-cells = <0>;
+	#interrupt-cells = <4>;
+	reg = <0x40000 0x40000>;
+	compatible = "fsl,mpic", "chrp,open-pic";
+	device_type = "open-pic";
+	clock-frequency = <0x0>;
+};
+
+timer@41100 {
+	compatible = "fsl,mpic-global-timer";
+	reg = <0x41100 0x100 0x41300 4>;
+	interrupts = <0 0 3 0
+		      1 0 3 0
+		      2 0 3 0
+		      3 0 3 0>;
+};
+
+msi0: msi@41600 {
+	compatible = "fsl,mpic-msi";
+	reg = <0x41600 0x200>;
+	msi-available-ranges = <0 0x100>;
+	interrupts = <
+		0xe0 0 0 0
+		0xe1 0 0 0
+		0xe2 0 0 0
+		0xe3 0 0 0
+		0xe4 0 0 0
+		0xe5 0 0 0
+		0xe6 0 0 0
+		0xe7 0 0 0>;
+};
+
+msi1: msi@41800 {
+	compatible = "fsl,mpic-msi";
+	reg = <0x41800 0x200>;
+	msi-available-ranges = <0 0x100>;
+	interrupts = <
+		0xe8 0 0 0
+		0xe9 0 0 0
+		0xea 0 0 0
+		0xeb 0 0 0
+		0xec 0 0 0
+		0xed 0 0 0
+		0xee 0 0 0
+		0xef 0 0 0>;
+};
+
+msi2: msi@41a00 {
+	compatible = "fsl,mpic-msi";
+	reg = <0x41a00 0x200>;
+	msi-available-ranges = <0 0x100>;
+	interrupts = <
+		0xf0 0 0 0
+		0xf1 0 0 0
+		0xf2 0 0 0
+		0xf3 0 0 0
+		0xf4 0 0 0
+		0xf5 0 0 0
+		0xf6 0 0 0
+		0xf7 0 0 0>;
+};
+
+timer@42100 {
+	compatible = "fsl,mpic-global-timer";
+	reg = <0x42100 0x100 0x42300 4>;
+	interrupts = <4 0 3 0
+		      5 0 3 0
+		      6 0 3 0
+		      7 0 3 0>;
+};
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-rmu-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-rmu-0.dtsi
new file mode 100644
index 0000000..ca7fec79
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/qoriq-rmu-0.dtsi
@@ -0,0 +1,68 @@
+/*
+ * QorIQ RIO Message Unit device tree stub [ controller @ offset 0xd3000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+rmu: rmu@d3000 {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	compatible = "fsl,srio-rmu";
+	reg = <0xd3000 0x500>;
+	ranges = <0x0 0xd3000 0x500>;
+
+	message-unit@0 {
+		compatible = "fsl,srio-msg-unit";
+		reg = <0x0 0x100>;
+		interrupts = <
+			60 2 0 0  /* msg1_tx_irq */
+			61 2 0 0>;/* msg1_rx_irq */
+	};
+	message-unit@100 {
+		compatible = "fsl,srio-msg-unit";
+		reg = <0x100 0x100>;
+		interrupts = <
+			62 2 0 0  /* msg2_tx_irq */
+			63 2 0 0>;/* msg2_rx_irq */
+	};
+	doorbell-unit@400 {
+		compatible = "fsl,srio-dbell-unit";
+		reg = <0x400 0x80>;
+		interrupts = <
+			56 2 0 0  /* bell_outb_irq */
+			57 2 0 0>;/* bell_inb_irq */
+	};
+	port-write-unit@4e0 {
+		compatible = "fsl,srio-port-write-unit";
+		reg = <0x4e0 0x20>;
+		interrupts = <16 2 1 11>;
+	};
+};
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-sata2-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-sata2-0.dtsi
new file mode 100644
index 0000000..b642047
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/qoriq-sata2-0.dtsi
@@ -0,0 +1,39 @@
+/*
+ * QorIQ SATAv2 device tree stub [ controller @ offset 0x220000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+sata@220000 {
+	compatible = "fsl,pq-sata-v2";
+	reg = <0x220000 0x1000>;
+	interrupts = <68 0x2 0 0>;
+};
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-sata2-1.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-sata2-1.dtsi
new file mode 100644
index 0000000..c573702
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/qoriq-sata2-1.dtsi
@@ -0,0 +1,39 @@
+/*
+ * QorIQ SATAv2 device tree stub [ controller @ offset 0x221000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+sata@221000 {
+	compatible = "fsl,pq-sata-v2";
+	reg = <0x221000 0x1000>;
+	interrupts = <69 0x2 0 0>;
+};
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-sec4.0-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-sec4.0-0.dtsi
new file mode 100644
index 0000000..0cbbac3
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/qoriq-sec4.0-0.dtsi
@@ -0,0 +1,100 @@
+/*
+ * QorIQ Sec/Crypto 4.0 device tree stub [ controller @ offset 0x300000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+crypto: crypto@300000 {
+	compatible = "fsl,sec-v4.0";
+	#address-cells = <1>;
+	#size-cells = <1>;
+	reg = <0x300000 0x10000>;
+	ranges = <0 0x300000 0x10000>;
+	interrupts = <92 2 0 0>;
+
+	sec_jr0: jr@1000 {
+		compatible = "fsl,sec-v4.0-job-ring";
+		reg = <0x1000 0x1000>;
+		interrupts = <88 2 0 0>;
+	};
+
+	sec_jr1: jr@2000 {
+		compatible = "fsl,sec-v4.0-job-ring";
+		reg = <0x2000 0x1000>;
+		interrupts = <89 2 0 0>;
+	};
+
+	sec_jr2: jr@3000 {
+		compatible = "fsl,sec-v4.0-job-ring";
+		reg = <0x3000 0x1000>;
+		interrupts = <90 2 0 0>;
+	};
+
+	sec_jr3: jr@4000 {
+		compatible = "fsl,sec-v4.0-job-ring";
+		reg = <0x4000 0x1000>;
+		interrupts = <91 2 0 0>;
+	};
+
+	rtic@6000 {
+		compatible = "fsl,sec-v4.0-rtic";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		reg = <0x6000 0x100>;
+		ranges = <0x0 0x6100 0xe00>;
+
+		rtic_a: rtic-a@0 {
+			compatible = "fsl,sec-v4.0-rtic-memory";
+			reg = <0x00 0x20 0x100 0x80>;
+		};
+
+		rtic_b: rtic-b@20 {
+			compatible = "fsl,sec-v4.0-rtic-memory";
+			reg = <0x20 0x20 0x200 0x80>;
+		};
+
+		rtic_c: rtic-c@40 {
+			compatible = "fsl,sec-v4.0-rtic-memory";
+			reg = <0x40 0x20 0x300 0x80>;
+		};
+
+		rtic_d: rtic-d@60 {
+			compatible = "fsl,sec-v4.0-rtic-memory";
+			reg = <0x60 0x20 0x500 0x80>;
+		};
+	};
+};
+
+sec_mon: sec_mon@314000 {
+	compatible = "fsl,sec-v4.0-mon";
+	reg = <0x314000 0x1000>;
+	interrupts = <93 2 0 0>;
+};
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-sec4.1-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-sec4.1-0.dtsi
new file mode 100644
index 0000000..3308986
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/qoriq-sec4.1-0.dtsi
@@ -0,0 +1,109 @@
+/*
+ * QorIQ Sec/Crypto 4.1 device tree stub [ controller @ offset 0x300000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+crypto: crypto@300000 {
+	compatible = "fsl,sec-v4.1", "fsl,sec-v4.0";
+	#address-cells = <1>;
+	#size-cells = <1>;
+	reg		 = <0x300000 0x10000>;
+	ranges		 = <0 0x300000 0x10000>;
+	interrupts	 = <92 2 0 0>;
+
+	sec_jr0: jr@1000 {
+		compatible = "fsl,sec-v4.1-job-ring",
+			     "fsl,sec-v4.0-job-ring";
+		reg = <0x1000 0x1000>;
+		interrupts = <88 2 0 0>;
+	};
+
+	sec_jr1: jr@2000 {
+		compatible = "fsl,sec-v4.1-job-ring",
+			     "fsl,sec-v4.0-job-ring";
+		reg = <0x2000 0x1000>;
+		interrupts = <89 2 0 0>;
+	};
+
+	sec_jr2: jr@3000 {
+		compatible = "fsl,sec-v4.1-job-ring",
+			     "fsl,sec-v4.0-job-ring";
+		reg = <0x3000 0x1000>;
+		interrupts = <90 2 0 0>;
+	};
+
+	sec_jr3: jr@4000 {
+		compatible = "fsl,sec-v4.1-job-ring",
+			     "fsl,sec-v4.0-job-ring";
+		reg = <0x4000 0x1000>;
+		interrupts = <91 2 0 0>;
+	};
+
+	rtic@6000 {
+		compatible = "fsl,sec-v4.1-rtic",
+			     "fsl,sec-v4.0-rtic";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		reg = <0x6000 0x100>;
+		ranges = <0x0 0x6100 0xe00>;
+
+		rtic_a: rtic-a@0 {
+			compatible = "fsl,sec-v4.1-rtic-memory",
+				     "fsl,sec-v4.0-rtic-memory";
+			reg = <0x00 0x20 0x100 0x80>;
+		};
+
+		rtic_b: rtic-b@20 {
+			compatible = "fsl,sec-v4.1-rtic-memory",
+				     "fsl,sec-v4.0-rtic-memory";
+			reg = <0x20 0x20 0x200 0x80>;
+		};
+
+		rtic_c: rtic-c@40 {
+			compatible = "fsl,sec-v4.1-rtic-memory",
+				     "fsl,sec-v4.0-rtic-memory";
+			reg = <0x40 0x20 0x300 0x80>;
+		};
+
+		rtic_d: rtic-d@60 {
+			compatible = "fsl,sec-v4.1-rtic-memory",
+				     "fsl,sec-v4.0-rtic-memory";
+			reg = <0x60 0x20 0x500 0x80>;
+		};
+	};
+};
+
+sec_mon: sec_mon@314000 {
+	compatible = "fsl,sec-v4.1-mon", "fsl,sec-v4.0-mon";
+	reg = <0x314000 0x1000>;
+	interrupts = <93 2 0 0>;
+};
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-sec4.2-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-sec4.2-0.dtsi
new file mode 100644
index 0000000..7990e0d
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/qoriq-sec4.2-0.dtsi
@@ -0,0 +1,109 @@
+/*
+ * QorIQ Sec/Crypto 4.2 device tree stub [ controller @ offset 0x300000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+crypto: crypto@300000 {
+	compatible = "fsl,sec-v4.2", "fsl,sec-v4.0";
+	#address-cells = <1>;
+	#size-cells = <1>;
+	reg		 = <0x300000 0x10000>;
+	ranges		 = <0 0x300000 0x10000>;
+	interrupts	 = <92 2 0 0>;
+
+	sec_jr0: jr@1000 {
+		compatible = "fsl,sec-v4.2-job-ring",
+			     "fsl,sec-v4.0-job-ring";
+		reg = <0x1000 0x1000>;
+		interrupts = <88 2 0 0>;
+	};
+
+	sec_jr1: jr@2000 {
+		compatible = "fsl,sec-v4.2-job-ring",
+			     "fsl,sec-v4.0-job-ring";
+		reg = <0x2000 0x1000>;
+		interrupts = <89 2 0 0>;
+	};
+
+	sec_jr2: jr@3000 {
+		compatible = "fsl,sec-v4.2-job-ring",
+			     "fsl,sec-v4.0-job-ring";
+		reg = <0x3000 0x1000>;
+		interrupts = <90 2 0 0>;
+	};
+
+	sec_jr3: jr@4000 {
+		compatible = "fsl,sec-v4.2-job-ring",
+			     "fsl,sec-v4.0-job-ring";
+		reg = <0x4000 0x1000>;
+		interrupts = <91 2 0 0>;
+	};
+
+	rtic@6000 {
+		compatible = "fsl,sec-v4.2-rtic",
+			     "fsl,sec-v4.0-rtic";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		reg = <0x6000 0x100>;
+		ranges = <0x0 0x6100 0xe00>;
+
+		rtic_a: rtic-a@0 {
+			compatible = "fsl,sec-v4.2-rtic-memory",
+				     "fsl,sec-v4.0-rtic-memory";
+			reg = <0x00 0x20 0x100 0x80>;
+		};
+
+		rtic_b: rtic-b@20 {
+			compatible = "fsl,sec-v4.2-rtic-memory",
+				     "fsl,sec-v4.0-rtic-memory";
+			reg = <0x20 0x20 0x200 0x80>;
+		};
+
+		rtic_c: rtic-c@40 {
+			compatible = "fsl,sec-v4.2-rtic-memory",
+				     "fsl,sec-v4.0-rtic-memory";
+			reg = <0x40 0x20 0x300 0x80>;
+		};
+
+		rtic_d: rtic-d@60 {
+			compatible = "fsl,sec-v4.2-rtic-memory",
+				     "fsl,sec-v4.0-rtic-memory";
+			reg = <0x60 0x20 0x500 0x80>;
+		};
+	};
+};
+
+sec_mon: sec_mon@314000 {
+	compatible = "fsl,sec-v4.2-mon", "fsl,sec-v4.0-mon";
+	reg = <0x314000 0x1000>;
+	interrupts = <93 2 0 0>;
+};
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-usb2-dr-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-usb2-dr-0.dtsi
new file mode 100644
index 0000000..4dd6f84
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/qoriq-usb2-dr-0.dtsi
@@ -0,0 +1,41 @@
+/*
+ * QorIQ USB DR device tree stub [ controller @ offset 0x211000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+usb@211000 {
+	compatible = "fsl,mpc85xx-usb2-dr", "fsl-usb2-dr";
+	reg = <0x211000 0x1000>;
+	#address-cells = <1>;
+	#size-cells = <0>;
+	interrupts = <45 0x2 0 0>;
+};
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-usb2-mph-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-usb2-mph-0.dtsi
new file mode 100644
index 0000000..f053835
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/qoriq-usb2-mph-0.dtsi
@@ -0,0 +1,41 @@
+/*
+ * QorIQ USB Host device tree stub [ controller @ offset 0x210000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+usb@210000 {
+	compatible = "fsl,mpc85xx-usb2-mph", "fsl-usb2-mph";
+	reg = <0x210000 0x1000>;
+	#address-cells = <1>;
+	#size-cells = <0>;
+	interrupts = <44 0x2 0 0>;
+};
diff --git a/arch/powerpc/boot/dts/gef_ppc9a.dts b/arch/powerpc/boot/dts/gef_ppc9a.dts
index 2266bbb..38dcb96 100644
--- a/arch/powerpc/boot/dts/gef_ppc9a.dts
+++ b/arch/powerpc/boot/dts/gef_ppc9a.dts
@@ -339,7 +339,7 @@
 		serial0: serial@4500 {
 			cell-index = <0>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4500 0x100>;
 			clock-frequency = <0>;
 			interrupts = <0x2a 0x2>;
@@ -349,7 +349,7 @@
 		serial1: serial@4600 {
 			cell-index = <1>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4600 0x100>;
 			clock-frequency = <0>;
 			interrupts = <0x1c 0x2>;
diff --git a/arch/powerpc/boot/dts/gef_sbc310.dts b/arch/powerpc/boot/dts/gef_sbc310.dts
index 429e87d..5ab8932 100644
--- a/arch/powerpc/boot/dts/gef_sbc310.dts
+++ b/arch/powerpc/boot/dts/gef_sbc310.dts
@@ -337,7 +337,7 @@
 		serial0: serial@4500 {
 			cell-index = <0>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4500 0x100>;
 			clock-frequency = <0>;
 			interrupts = <0x2a 0x2>;
@@ -347,7 +347,7 @@
 		serial1: serial@4600 {
 			cell-index = <1>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4600 0x100>;
 			clock-frequency = <0>;
 			interrupts = <0x1c 0x2>;
diff --git a/arch/powerpc/boot/dts/gef_sbc610.dts b/arch/powerpc/boot/dts/gef_sbc610.dts
index d81201a..d5341f5 100644
--- a/arch/powerpc/boot/dts/gef_sbc610.dts
+++ b/arch/powerpc/boot/dts/gef_sbc610.dts
@@ -337,7 +337,7 @@
 		serial0: serial@4500 {
 			cell-index = <0>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4500 0x100>;
 			clock-frequency = <0>;
 			interrupts = <0x2a 0x2>;
@@ -347,7 +347,7 @@
 		serial1: serial@4600 {
 			cell-index = <1>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4600 0x100>;
 			clock-frequency = <0>;
 			interrupts = <0x1c 0x2>;
diff --git a/arch/powerpc/boot/dts/klondike.dts b/arch/powerpc/boot/dts/klondike.dts
new file mode 100644
index 0000000..8c94290
--- /dev/null
+++ b/arch/powerpc/boot/dts/klondike.dts
@@ -0,0 +1,227 @@
+/*
+ * Device Tree for Klondike (APM8018X) board.
+ *
+ * Copyright (c) 2010, Applied Micro Circuits Corporation
+ * Author: Tanmay Inamdar <tinamdar@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ *
+ */
+
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	model = "apm,klondike";
+	compatible = "apm,klondike";
+	dcr-parent = <&{/cpus/cpu@0}>;
+
+	aliases {
+		ethernet0 = &EMAC0;
+		ethernet1 = &EMAC1;
+	};
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		cpu@0 {
+			device_type = "cpu";
+			model = "PowerPC,apm8018x";
+			reg = <0x00000000>;
+			clock-frequency = <300000000>; /* Filled in by U-Boot */
+			timebase-frequency = <300000000>; /* Filled in by U-Boot */
+			i-cache-line-size = <32>;
+			d-cache-line-size = <32>;
+			i-cache-size = <16384>; /* 16 kB */
+			d-cache-size = <16384>; /* 16 kB */
+			dcr-controller;
+			dcr-access-method = "native";
+		};
+	};
+
+	memory {
+		device_type = "memory";
+		reg = <0x00000000 0x20000000>; /* Filled in by U-Boot */
+	};
+
+	UIC0: interrupt-controller {
+		compatible = "ibm,uic";
+		interrupt-controller;
+		cell-index = <0>;
+		dcr-reg = <0x0c0 0x010>;
+		#address-cells = <0>;
+		#size-cells = <0>;
+		#interrupt-cells = <2>;
+	};
+
+	UIC1: interrupt-controller1 {
+		compatible = "ibm,uic";
+		interrupt-controller;
+		cell-index = <1>;
+		dcr-reg = <0x0d0 0x010>;
+		#address-cells = <0>;
+		#size-cells = <0>;
+		#interrupt-cells = <2>;
+		interrupts = <0x1e 0x4 0x1f 0x4>; /* cascade */
+		interrupt-parent = <&UIC0>;
+	};
+
+	UIC2: interrupt-controller2 {
+		compatible = "ibm,uic";
+		interrupt-controller;
+		cell-index = <2>;
+		dcr-reg = <0x0e0 0x010>;
+		#address-cells = <0>;
+		#size-cells = <0>;
+		#interrupt-cells = <2>;
+		interrupts = <0x0a 0x4 0x0b 0x4>; /* cascade */
+		interrupt-parent = <&UIC0>;
+	};
+
+	UIC3: interrupt-controller3 {
+		compatible = "ibm,uic";
+		interrupt-controller;
+		cell-index = <3>;
+		dcr-reg = <0x0f0 0x010>;
+		#address-cells = <0>;
+		#size-cells = <0>;
+		#interrupt-cells = <2>;
+		interrupts = <0x10 0x4 0x11 0x4>; /* cascade */
+		interrupt-parent = <&UIC0>;
+	};
+
+	plb {
+		compatible = "ibm,plb4";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
+		clock-frequency = <0>; /* Filled in by U-Boot */
+
+		SDRAM0: memory-controller {
+			compatible = "ibm,sdram-apm8018x";
+			dcr-reg = <0x010 0x002>;
+		};
+
+		MAL0: mcmal {
+			compatible = "ibm,mcmal2";
+			dcr-reg = <0x180 0x062>;
+			num-tx-chans = <2>;
+			num-rx-chans = <16>;
+			#address-cells = <0>;
+			#size-cells = <0>;
+			interrupt-parent = <&UIC1>;
+			interrupts = </*TXEOB*/   0x6 0x4
+					/*RXEOB*/ 0x7 0x4
+					/*SERR*/  0x1 0x4
+					/*TXDE*/  0x2 0x4
+					/*RXDE*/  0x3 0x4>;
+		};
+
+		POB0: opb {
+			compatible = "ibm,opb";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges = <0x20000000 0x20000000 0x30000000
+				  0x50000000 0x50000000 0x10000000
+				  0x60000000 0x60000000 0x10000000
+				  0xFE000000 0xFE000000 0x00010000>;
+			dcr-reg = <0x100 0x020>;
+			clock-frequency = <300000000>; /* Filled in by U-Boot */
+
+			RGMII0: emac-rgmii@400a2000 {
+				compatible = "ibm,rgmii";
+				reg = <0x400a2000 0x00000010>;
+				has-mdio;
+			};
+
+			TAH0: emac-tah@400a3000 {
+				compatible = "ibm,tah";
+				reg = <0x400a3000 0x100>;
+			};
+
+			TAH1: emac-tah@400a4000 {
+				compatible = "ibm,tah";
+				reg = <0x400a4000 0x100>;
+			};
+
+			EMAC0: ethernet@400a0000 {
+				compatible = "ibm,emac4", "ibm-emac4sync";
+				interrupt-parent = <&EMAC0>;
+				interrupts = <0x0>;
+				#interrupt-cells = <1>;
+				#address-cells = <0>;
+				#size-cells = <0>;
+				interrupt-map = </*Status*/ 0x0 &UIC0 0x13 0x4>;
+				reg = <0x400a0000 0x00000100>;
+				local-mac-address = [000000000000]; /* Filled in by U-Boot */
+				mal-device = <&MAL0>;
+				mal-tx-channel = <0x0>;
+				mal-rx-channel = <0x0>;
+				cell-index = <0>;
+				max-frame-size = <9000>;
+				rx-fifo-size = <4096>;
+				tx-fifo-size = <2048>;
+				phy-mode = "rgmii";
+				phy-address = <0x2>;
+				turbo = "no";
+				phy-map = <0x00000000>;
+				rgmii-device = <&RGMII0>;
+				rgmii-channel = <0>;
+				tah-device = <&TAH0>;
+				tah-channel = <0>;
+				has-inverted-stacr-oc;
+				has-new-stacr-staopc;
+			};
+
+			EMAC1: ethernet@400a1000 {
+				compatible = "ibm,emac4", "ibm-emac4sync";
+				status = "disabled";
+				interrupt-parent = <&EMAC1>;
+				interrupts = <0x0>;
+				#interrupt-cells = <1>;
+				#address-cells = <0>;
+				#size-cells = <0>;
+				interrupt-map = </*Status*/ 0x0 &UIC0 0x14 0x4>;
+				reg = <0x400a1000 0x00000100>;
+				local-mac-address = [000000000000]; /* Filled in by U-Boot */
+				mal-device = <&MAL0>;
+				mal-tx-channel = <1>;
+				mal-rx-channel = <8>;
+				cell-index = <1>;
+				max-frame-size = <9000>;
+				rx-fifo-size = <4096>;
+				tx-fifo-size = <2048>;
+				phy-mode = "rgmii";
+				phy-address = <0x3>;
+				turbo = "no";
+				phy-map = <0x00000000>;
+				rgmii-device = <&RGMII0>;
+				rgmii-channel = <1>;
+				tah-device = <&TAH1>;
+				tah-channel = <0>;
+				has-inverted-stacr-oc;
+				has-new-stacr-staopc;
+				mdio-device = <&EMAC0>;
+			};
+		};
+	};
+
+	chosen {
+		linux,stdout-path = "/plb/opb/serial@50001000";
+	};
+};
diff --git a/arch/powerpc/boot/dts/kmeter1.dts b/arch/powerpc/boot/dts/kmeter1.dts
index d16bae1..983aee1 100644
--- a/arch/powerpc/boot/dts/kmeter1.dts
+++ b/arch/powerpc/boot/dts/kmeter1.dts
@@ -80,7 +80,7 @@
 		serial0: serial@4500 {
 			cell-index = <0>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4500 0x100>;
 			clock-frequency = <264000000>;
 			interrupts = <9 0x8>;
diff --git a/arch/powerpc/boot/dts/kuroboxHD.dts b/arch/powerpc/boot/dts/kuroboxHD.dts
index 8d725d1..0a45451 100644
--- a/arch/powerpc/boot/dts/kuroboxHD.dts
+++ b/arch/powerpc/boot/dts/kuroboxHD.dts
@@ -84,7 +84,7 @@
 		serial0: serial@80004500 {
 			cell-index = <0>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x80004500 0x8>;
 			clock-frequency = <97553800>;
 			current-speed = <9600>;
@@ -95,7 +95,7 @@
 		serial1: serial@80004600 {
 			cell-index = <1>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x80004600 0x8>;
 			clock-frequency = <97553800>;
 			current-speed = <57600>;
diff --git a/arch/powerpc/boot/dts/kuroboxHG.dts b/arch/powerpc/boot/dts/kuroboxHG.dts
index b13a11e..0e758b3 100644
--- a/arch/powerpc/boot/dts/kuroboxHG.dts
+++ b/arch/powerpc/boot/dts/kuroboxHG.dts
@@ -84,7 +84,7 @@
 		serial0: serial@80004500 {
 			cell-index = <0>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x80004500 0x8>;
 			clock-frequency = <130041000>;
 			current-speed = <9600>;
@@ -95,7 +95,7 @@
 		serial1: serial@80004600 {
 			cell-index = <1>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x80004600 0x8>;
 			clock-frequency = <130041000>;
 			current-speed = <57600>;
diff --git a/arch/powerpc/boot/dts/mpc8308_p1m.dts b/arch/powerpc/boot/dts/mpc8308_p1m.dts
index 697b3f6..22b0832 100644
--- a/arch/powerpc/boot/dts/mpc8308_p1m.dts
+++ b/arch/powerpc/boot/dts/mpc8308_p1m.dts
@@ -233,7 +233,7 @@
 		serial0: serial@4500 {
 			cell-index = <0>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4500 0x100>;
 			clock-frequency = <133333333>;
 			interrupts = <9 0x8>;
@@ -243,7 +243,7 @@
 		serial1: serial@4600 {
 			cell-index = <1>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4600 0x100>;
 			clock-frequency = <133333333>;
 			interrupts = <10 0x8>;
diff --git a/arch/powerpc/boot/dts/mpc8308rdb.dts b/arch/powerpc/boot/dts/mpc8308rdb.dts
index a0bd188..f66d10d 100644
--- a/arch/powerpc/boot/dts/mpc8308rdb.dts
+++ b/arch/powerpc/boot/dts/mpc8308rdb.dts
@@ -208,7 +208,7 @@
 		serial0: serial@4500 {
 			cell-index = <0>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4500 0x100>;
 			clock-frequency = <133333333>;
 			interrupts = <9 0x8>;
@@ -218,7 +218,7 @@
 		serial1: serial@4600 {
 			cell-index = <1>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4600 0x100>;
 			clock-frequency = <133333333>;
 			interrupts = <10 0x8>;
diff --git a/arch/powerpc/boot/dts/mpc8313erdb.dts b/arch/powerpc/boot/dts/mpc8313erdb.dts
index ac1eb32..1c836c6 100644
--- a/arch/powerpc/boot/dts/mpc8313erdb.dts
+++ b/arch/powerpc/boot/dts/mpc8313erdb.dts
@@ -261,7 +261,7 @@
 		serial0: serial@4500 {
 			cell-index = <0>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4500 0x100>;
 			clock-frequency = <0>;
 			interrupts = <9 0x8>;
@@ -271,7 +271,7 @@
 		serial1: serial@4600 {
 			cell-index = <1>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4600 0x100>;
 			clock-frequency = <0>;
 			interrupts = <10 0x8>;
diff --git a/arch/powerpc/boot/dts/mpc8315erdb.dts b/arch/powerpc/boot/dts/mpc8315erdb.dts
index 4dd08c3..811848e 100644
--- a/arch/powerpc/boot/dts/mpc8315erdb.dts
+++ b/arch/powerpc/boot/dts/mpc8315erdb.dts
@@ -265,7 +265,7 @@
 		serial0: serial@4500 {
 			cell-index = <0>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4500 0x100>;
 			clock-frequency = <133333333>;
 			interrupts = <9 0x8>;
@@ -275,7 +275,7 @@
 		serial1: serial@4600 {
 			cell-index = <1>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4600 0x100>;
 			clock-frequency = <133333333>;
 			interrupts = <10 0x8>;
diff --git a/arch/powerpc/boot/dts/mpc832x_mds.dts b/arch/powerpc/boot/dts/mpc832x_mds.dts
index 05ad8c9..da9c72d 100644
--- a/arch/powerpc/boot/dts/mpc832x_mds.dts
+++ b/arch/powerpc/boot/dts/mpc832x_mds.dts
@@ -105,7 +105,7 @@
 		serial0: serial@4500 {
 			cell-index = <0>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4500 0x100>;
 			clock-frequency = <0>;
 			interrupts = <9 0x8>;
@@ -115,7 +115,7 @@
 		serial1: serial@4600 {
 			cell-index = <1>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4600 0x100>;
 			clock-frequency = <0>;
 			interrupts = <10 0x8>;
diff --git a/arch/powerpc/boot/dts/mpc832x_rdb.dts b/arch/powerpc/boot/dts/mpc832x_rdb.dts
index f4fadb23a..ff7b15b 100644
--- a/arch/powerpc/boot/dts/mpc832x_rdb.dts
+++ b/arch/powerpc/boot/dts/mpc832x_rdb.dts
@@ -83,7 +83,7 @@
 		serial0: serial@4500 {
 			cell-index = <0>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4500 0x100>;
 			clock-frequency = <0>;
 			interrupts = <9 0x8>;
@@ -93,7 +93,7 @@
 		serial1: serial@4600 {
 			cell-index = <1>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4600 0x100>;
 			clock-frequency = <0>;
 			interrupts = <10 0x8>;
diff --git a/arch/powerpc/boot/dts/mpc8349emitx.dts b/arch/powerpc/boot/dts/mpc8349emitx.dts
index 505dc84..2608679 100644
--- a/arch/powerpc/boot/dts/mpc8349emitx.dts
+++ b/arch/powerpc/boot/dts/mpc8349emitx.dts
@@ -283,7 +283,7 @@
 		serial0: serial@4500 {
 			cell-index = <0>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4500 0x100>;
 			clock-frequency = <0>;		// from bootloader
 			interrupts = <9 0x8>;
@@ -293,7 +293,7 @@
 		serial1: serial@4600 {
 			cell-index = <1>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4600 0x100>;
 			clock-frequency = <0>;		// from bootloader
 			interrupts = <10 0x8>;
diff --git a/arch/powerpc/boot/dts/mpc8349emitxgp.dts b/arch/powerpc/boot/dts/mpc8349emitxgp.dts
index eb73211..6cd044d 100644
--- a/arch/powerpc/boot/dts/mpc8349emitxgp.dts
+++ b/arch/powerpc/boot/dts/mpc8349emitxgp.dts
@@ -189,7 +189,7 @@
 		serial0: serial@4500 {
 			cell-index = <0>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4500 0x100>;
 			clock-frequency = <0>;		// from bootloader
 			interrupts = <9 0x8>;
@@ -199,7 +199,7 @@
 		serial1: serial@4600 {
 			cell-index = <1>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4600 0x100>;
 			clock-frequency = <0>;		// from bootloader
 			interrupts = <10 0x8>;
diff --git a/arch/powerpc/boot/dts/mpc834x_mds.dts b/arch/powerpc/boot/dts/mpc834x_mds.dts
index 230febb..4552864 100644
--- a/arch/powerpc/boot/dts/mpc834x_mds.dts
+++ b/arch/powerpc/boot/dts/mpc834x_mds.dts
@@ -242,7 +242,7 @@
 		serial0: serial@4500 {
 			cell-index = <0>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4500 0x100>;
 			clock-frequency = <0>;
 			interrupts = <9 0x8>;
@@ -252,7 +252,7 @@
 		serial1: serial@4600 {
 			cell-index = <1>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4600 0x100>;
 			clock-frequency = <0>;
 			interrupts = <10 0x8>;
diff --git a/arch/powerpc/boot/dts/mpc836x_mds.dts b/arch/powerpc/boot/dts/mpc836x_mds.dts
index 45cfa1c5..c0e450a 100644
--- a/arch/powerpc/boot/dts/mpc836x_mds.dts
+++ b/arch/powerpc/boot/dts/mpc836x_mds.dts
@@ -136,7 +136,7 @@
 		serial0: serial@4500 {
 			cell-index = <0>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4500 0x100>;
 			clock-frequency = <264000000>;
 			interrupts = <9 0x8>;
@@ -146,7 +146,7 @@
 		serial1: serial@4600 {
 			cell-index = <1>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4600 0x100>;
 			clock-frequency = <264000000>;
 			interrupts = <10 0x8>;
diff --git a/arch/powerpc/boot/dts/mpc836x_rdk.dts b/arch/powerpc/boot/dts/mpc836x_rdk.dts
index bdf4459..b6e9aec 100644
--- a/arch/powerpc/boot/dts/mpc836x_rdk.dts
+++ b/arch/powerpc/boot/dts/mpc836x_rdk.dts
@@ -102,7 +102,7 @@
 
 		serial0: serial@4500 {
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4500 0x100>;
 			interrupts = <9 8>;
 			interrupt-parent = <&ipic>;
@@ -112,7 +112,7 @@
 
 		serial1: serial@4600 {
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4600 0x100>;
 			interrupts = <10 8>;
 			interrupt-parent = <&ipic>;
diff --git a/arch/powerpc/boot/dts/mpc8377_mds.dts b/arch/powerpc/boot/dts/mpc8377_mds.dts
index 855782c..cfccef5 100644
--- a/arch/powerpc/boot/dts/mpc8377_mds.dts
+++ b/arch/powerpc/boot/dts/mpc8377_mds.dts
@@ -276,7 +276,7 @@
 		serial0: serial@4500 {
 			cell-index = <0>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4500 0x100>;
 			clock-frequency = <0>;
 			interrupts = <9 0x8>;
@@ -286,7 +286,7 @@
 		serial1: serial@4600 {
 			cell-index = <1>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4600 0x100>;
 			clock-frequency = <0>;
 			interrupts = <10 0x8>;
diff --git a/arch/powerpc/boot/dts/mpc8377_rdb.dts b/arch/powerpc/boot/dts/mpc8377_rdb.dts
index dbc1b98..353deff 100644
--- a/arch/powerpc/boot/dts/mpc8377_rdb.dts
+++ b/arch/powerpc/boot/dts/mpc8377_rdb.dts
@@ -321,7 +321,7 @@
 		serial0: serial@4500 {
 			cell-index = <0>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4500 0x100>;
 			clock-frequency = <0>;
 			interrupts = <9 0x8>;
@@ -331,7 +331,7 @@
 		serial1: serial@4600 {
 			cell-index = <1>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4600 0x100>;
 			clock-frequency = <0>;
 			interrupts = <10 0x8>;
diff --git a/arch/powerpc/boot/dts/mpc8377_wlan.dts b/arch/powerpc/boot/dts/mpc8377_wlan.dts
index 9ea7830..ef4a305 100644
--- a/arch/powerpc/boot/dts/mpc8377_wlan.dts
+++ b/arch/powerpc/boot/dts/mpc8377_wlan.dts
@@ -304,7 +304,7 @@
 		serial0: serial@4500 {
 			cell-index = <0>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4500 0x100>;
 			clock-frequency = <0>;
 			interrupts = <9 0x8>;
@@ -314,7 +314,7 @@
 		serial1: serial@4600 {
 			cell-index = <1>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4600 0x100>;
 			clock-frequency = <0>;
 			interrupts = <10 0x8>;
diff --git a/arch/powerpc/boot/dts/mpc8378_mds.dts b/arch/powerpc/boot/dts/mpc8378_mds.dts
index f70cf60..538fcb9 100644
--- a/arch/powerpc/boot/dts/mpc8378_mds.dts
+++ b/arch/powerpc/boot/dts/mpc8378_mds.dts
@@ -315,7 +315,7 @@
 		serial0: serial@4500 {
 			cell-index = <0>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4500 0x100>;
 			clock-frequency = <0>;
 			interrupts = <9 0x8>;
@@ -325,7 +325,7 @@
 		serial1: serial@4600 {
 			cell-index = <1>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4600 0x100>;
 			clock-frequency = <0>;
 			interrupts = <10 0x8>;
diff --git a/arch/powerpc/boot/dts/mpc8378_rdb.dts b/arch/powerpc/boot/dts/mpc8378_rdb.dts
index 3447eb9..32333a9 100644
--- a/arch/powerpc/boot/dts/mpc8378_rdb.dts
+++ b/arch/powerpc/boot/dts/mpc8378_rdb.dts
@@ -321,7 +321,7 @@
 		serial0: serial@4500 {
 			cell-index = <0>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4500 0x100>;
 			clock-frequency = <0>;
 			interrupts = <9 0x8>;
@@ -331,7 +331,7 @@
 		serial1: serial@4600 {
 			cell-index = <1>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4600 0x100>;
 			clock-frequency = <0>;
 			interrupts = <10 0x8>;
diff --git a/arch/powerpc/boot/dts/mpc8379_mds.dts b/arch/powerpc/boot/dts/mpc8379_mds.dts
index 645ec51..5387092 100644
--- a/arch/powerpc/boot/dts/mpc8379_mds.dts
+++ b/arch/powerpc/boot/dts/mpc8379_mds.dts
@@ -313,7 +313,7 @@
 		serial0: serial@4500 {
 			cell-index = <0>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4500 0x100>;
 			clock-frequency = <0>;
 			interrupts = <9 0x8>;
@@ -323,7 +323,7 @@
 		serial1: serial@4600 {
 			cell-index = <1>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4600 0x100>;
 			clock-frequency = <0>;
 			interrupts = <10 0x8>;
diff --git a/arch/powerpc/boot/dts/mpc8379_rdb.dts b/arch/powerpc/boot/dts/mpc8379_rdb.dts
index 15560c6..46224c2 100644
--- a/arch/powerpc/boot/dts/mpc8379_rdb.dts
+++ b/arch/powerpc/boot/dts/mpc8379_rdb.dts
@@ -319,7 +319,7 @@
 		serial0: serial@4500 {
 			cell-index = <0>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4500 0x100>;
 			clock-frequency = <0>;
 			interrupts = <9 0x8>;
@@ -329,7 +329,7 @@
 		serial1: serial@4600 {
 			cell-index = <1>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4600 0x100>;
 			clock-frequency = <0>;
 			interrupts = <10 0x8>;
diff --git a/arch/powerpc/boot/dts/mpc8536ds.dts b/arch/powerpc/boot/dts/mpc8536ds.dts
index a75c10e..c158815 100644
--- a/arch/powerpc/boot/dts/mpc8536ds.dts
+++ b/arch/powerpc/boot/dts/mpc8536ds.dts
@@ -9,24 +9,11 @@
  * option) any later version.
  */
 
-/dts-v1/;
+/include/ "fsl/mpc8536si-pre.dtsi"
 
 / {
 	model = "fsl,mpc8536ds";
 	compatible = "fsl,mpc8536ds";
-	#address-cells = <2>;
-	#size-cells = <2>;
-
-	aliases {
-		ethernet0 = &enet0;
-		ethernet1 = &enet1;
-		serial0 = &serial0;
-		serial1 = &serial1;
-		pci0 = &pci0;
-		pci1 = &pci1;
-		pci2 = &pci2;
-		pci3 = &pci3;
-	};
 
 	cpus {
 		#cpus = <1>;
@@ -45,403 +32,34 @@
 		reg = <0 0 0 0>;	// Filled by U-Boot
 	};
 
-	soc@ffe00000 {
-		#address-cells = <1>;
-		#size-cells = <1>;
-		device_type = "soc";
-		compatible = "simple-bus";
+	lbc: localbus@ffe05000 {
+		reg = <0 0xffe05000 0 0x1000>;
+	};
+
+	board_soc: soc: soc@ffe00000 {
 		ranges = <0x0 0 0xffe00000 0x100000>;
-		bus-frequency = <0>;		// Filled out by uboot.
-
-		ecm-law@0 {
-			compatible = "fsl,ecm-law";
-			reg = <0x0 0x1000>;
-			fsl,num-laws = <12>;
-		};
-
-		ecm@1000 {
-			compatible = "fsl,mpc8536-ecm", "fsl,ecm";
-			reg = <0x1000 0x1000>;
-			interrupts = <17 2>;
-			interrupt-parent = <&mpic>;
-		};
-
-		memory-controller@2000 {
-			compatible = "fsl,mpc8536-memory-controller";
-			reg = <0x2000 0x1000>;
-			interrupt-parent = <&mpic>;
-			interrupts = <18 0x2>;
-		};
-
-		L2: l2-cache-controller@20000 {
-			compatible = "fsl,mpc8536-l2-cache-controller";
-			reg = <0x20000 0x1000>;
-			interrupt-parent = <&mpic>;
-			interrupts = <16 0x2>;
-		};
-
-		i2c@3000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			cell-index = <0>;
-			compatible = "fsl-i2c";
-			reg = <0x3000 0x100>;
-			interrupts = <43 0x2>;
-			interrupt-parent = <&mpic>;
-			dfsrr;
-		};
-
-		i2c@3100 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			cell-index = <1>;
-			compatible = "fsl-i2c";
-			reg = <0x3100 0x100>;
-			interrupts = <43 0x2>;
-			interrupt-parent = <&mpic>;
-			dfsrr;
-			rtc@68 {
-				compatible = "dallas,ds3232";
-				reg = <0x68>;
-				interrupts = <0 0x1>;
-				interrupt-parent = <&mpic>;
-			};
-		};
-
-		spi@7000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "fsl,mpc8536-espi";
-			reg = <0x7000 0x1000>;
-			interrupts = <59 0x2>;
-			interrupt-parent = <&mpic>;
-			fsl,espi-num-chipselects = <4>;
-
-			flash@0 {
-				#address-cells = <1>;
-				#size-cells = <1>;
-				compatible = "spansion,s25sl12801";
-				reg = <0>;
-				spi-max-frequency = <40000000>;
-				partition@u-boot {
-					label = "u-boot";
-					reg = <0x00000000 0x00100000>;
-					read-only;
-				};
-				partition@kernel {
-					label = "kernel";
-					reg = <0x00100000 0x00500000>;
-					read-only;
-				};
-				partition@dtb {
-					label = "dtb";
-					reg = <0x00600000 0x00100000>;
-					read-only;
-				};
-				partition@fs {
-					label = "file system";
-					reg = <0x00700000 0x00900000>;
-				};
-			};
-			flash@1 {
-				compatible = "spansion,s25sl12801";
-				reg = <1>;
-				spi-max-frequency = <40000000>;
-			};
-			flash@2 {
-				compatible = "spansion,s25sl12801";
-				reg = <2>;
-				spi-max-frequency = <40000000>;
-			};
-			flash@3 {
-				compatible = "spansion,s25sl12801";
-				reg = <3>;
-				spi-max-frequency = <40000000>;
-			};
-		};
-
-		dma@21300 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			compatible = "fsl,mpc8536-dma", "fsl,eloplus-dma";
-			reg = <0x21300 4>;
-			ranges = <0 0x21100 0x200>;
-			cell-index = <0>;
-			dma-channel@0 {
-				compatible = "fsl,mpc8536-dma-channel",
-					     "fsl,eloplus-dma-channel";
-				reg = <0x0 0x80>;
-				cell-index = <0>;
-				interrupt-parent = <&mpic>;
-				interrupts = <20 2>;
-			};
-			dma-channel@80 {
-				compatible = "fsl,mpc8536-dma-channel",
-					     "fsl,eloplus-dma-channel";
-				reg = <0x80 0x80>;
-				cell-index = <1>;
-				interrupt-parent = <&mpic>;
-				interrupts = <21 2>;
-			};
-			dma-channel@100 {
-				compatible = "fsl,mpc8536-dma-channel",
-					     "fsl,eloplus-dma-channel";
-				reg = <0x100 0x80>;
-				cell-index = <2>;
-				interrupt-parent = <&mpic>;
-				interrupts = <22 2>;
-			};
-			dma-channel@180 {
-				compatible = "fsl,mpc8536-dma-channel",
-					     "fsl,eloplus-dma-channel";
-				reg = <0x180 0x80>;
-				cell-index = <3>;
-				interrupt-parent = <&mpic>;
-				interrupts = <23 2>;
-			};
-		};
-
-		usb@22000 {
-			compatible = "fsl,mpc8536-usb2-mph", "fsl-usb2-mph";
-			reg = <0x22000 0x1000>;
-			#address-cells = <1>;
-			#size-cells = <0>;
-			interrupt-parent = <&mpic>;
-			interrupts = <28 0x2>;
-			phy_type = "ulpi";
-		};
-
-		usb@23000 {
-			compatible = "fsl,mpc8536-usb2-mph", "fsl-usb2-mph";
-			reg = <0x23000 0x1000>;
-			#address-cells = <1>;
-			#size-cells = <0>;
-			interrupt-parent = <&mpic>;
-			interrupts = <46 0x2>;
-			phy_type = "ulpi";
-		};
-
-		enet0: ethernet@24000 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			cell-index = <0>;
-			device_type = "network";
-			model = "eTSEC";
-			compatible = "gianfar";
-			reg = <0x24000 0x1000>;
-			ranges = <0x0 0x24000 0x1000>;
-			local-mac-address = [ 00 00 00 00 00 00 ];
-			interrupts = <29 2 30 2 34 2>;
-			interrupt-parent = <&mpic>;
-			tbi-handle = <&tbi0>;
-			phy-handle = <&phy1>;
-			phy-connection-type = "rgmii-id";
-
-			mdio@520 {
-				#address-cells = <1>;
-				#size-cells = <0>;
-				compatible = "fsl,gianfar-mdio";
-				reg = <0x520 0x20>;
-
-				phy0: ethernet-phy@0 {
-					interrupt-parent = <&mpic>;
-					interrupts = <10 0x1>;
-					reg = <0>;
-					device_type = "ethernet-phy";
-				};
-				phy1: ethernet-phy@1 {
-					interrupt-parent = <&mpic>;
-					interrupts = <10 0x1>;
-					reg = <1>;
-					device_type = "ethernet-phy";
-				};
-				tbi0: tbi-phy@11 {
-					reg = <0x11>;
-					device_type = "tbi-phy";
-				};
-			};
-		};
-
-		enet1: ethernet@26000 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			cell-index = <1>;
-			device_type = "network";
-			model = "eTSEC";
-			compatible = "gianfar";
-			reg = <0x26000 0x1000>;
-			ranges = <0x0 0x26000 0x1000>;
-			local-mac-address = [ 00 00 00 00 00 00 ];
-			interrupts = <31 2 32 2 33 2>;
-			interrupt-parent = <&mpic>;
-			tbi-handle = <&tbi1>;
-			phy-handle = <&phy0>;
-			phy-connection-type = "rgmii-id";
-
-			mdio@520 {
-				#address-cells = <1>;
-				#size-cells = <0>;
-				compatible = "fsl,gianfar-tbi";
-				reg = <0x520 0x20>;
-
-				tbi1: tbi-phy@11 {
-					reg = <0x11>;
-					device_type = "tbi-phy";
-				};
-			};
-		};
-
-		usb@2b000 {
-			compatible = "fsl,mpc8536-usb2-dr", "fsl-usb2-dr";
-			reg = <0x2b000 0x1000>;
-			#address-cells = <1>;
-			#size-cells = <0>;
-			interrupt-parent = <&mpic>;
-			interrupts = <60 0x2>;
-			dr_mode = "peripheral";
-			phy_type = "ulpi";
-		};
-
-		sdhci@2e000 {
-			compatible = "fsl,mpc8536-esdhc", "fsl,esdhc";
-			reg = <0x2e000 0x1000>;
-			interrupts = <72 0x2>;
-			interrupt-parent = <&mpic>;
-			clock-frequency = <250000000>;
-		};
-
-		serial0: serial@4500 {
-			cell-index = <0>;
-			device_type = "serial";
-			compatible = "ns16550";
-			reg = <0x4500 0x100>;
-			clock-frequency = <0>;
-			interrupts = <42 0x2>;
-			interrupt-parent = <&mpic>;
-		};
-
-		serial1: serial@4600 {
-			cell-index = <1>;
-			device_type = "serial";
-			compatible = "ns16550";
-			reg = <0x4600 0x100>;
-			clock-frequency = <0>;
-			interrupts = <42 0x2>;
-			interrupt-parent = <&mpic>;
-		};
-
-		crypto@30000 {
-			compatible = "fsl,sec3.0", "fsl,sec2.4", "fsl,sec2.2",
-				     "fsl,sec2.1", "fsl,sec2.0";
-			reg = <0x30000 0x10000>;
-			interrupts = <45 2 58 2>;
-			interrupt-parent = <&mpic>;
-			fsl,num-channels = <4>;
-			fsl,channel-fifo-len = <24>;
-			fsl,exec-units-mask = <0x9fe>;
-			fsl,descriptor-types-mask = <0x3ab0ebf>;
-		};
-
-		sata@18000 {
-			compatible = "fsl,mpc8536-sata", "fsl,pq-sata";
-			reg = <0x18000 0x1000>;
-			cell-index = <1>;
-			interrupts = <74 0x2>;
-			interrupt-parent = <&mpic>;
-		};
-
-		sata@19000 {
-			compatible = "fsl,mpc8536-sata", "fsl,pq-sata";
-			reg = <0x19000 0x1000>;
-			cell-index = <2>;
-			interrupts = <41 0x2>;
-			interrupt-parent = <&mpic>;
-		};
-
-		global-utilities@e0000 {	//global utilities block
-			compatible = "fsl,mpc8548-guts";
-			reg = <0xe0000 0x1000>;
-			fsl,has-rstcr;
-		};
-
-		mpic: pic@40000 {
-			clock-frequency = <0>;
-			interrupt-controller;
-			#address-cells = <0>;
-			#interrupt-cells = <2>;
-			reg = <0x40000 0x40000>;
-			compatible = "chrp,open-pic";
-			device_type = "open-pic";
-			big-endian;
-		};
-
-		msi@41600 {
-			compatible = "fsl,mpc8536-msi", "fsl,mpic-msi";
-			reg = <0x41600 0x80>;
-			msi-available-ranges = <0 0x100>;
-			interrupts = <
-				0xe0 0
-				0xe1 0
-				0xe2 0
-				0xe3 0
-				0xe4 0
-				0xe5 0
-				0xe6 0
-				0xe7 0>;
-			interrupt-parent = <&mpic>;
-		};
 	};
 
 	pci0: pci@ffe08000 {
-		compatible = "fsl,mpc8540-pci";
-		device_type = "pci";
+		reg = <0 0xffe08000 0 0x1000>;
+		ranges = <0x02000000 0 0x80000000 0 0x80000000 0 0x10000000
+			  0x01000000 0 0x00000000 0 0xffc00000 0 0x00010000>;
+		clock-frequency = <66666666>;
 		interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
 		interrupt-map = <
 
 			/* IDSEL 0x11 J17 Slot 1 */
-			0x8800 0 0 1 &mpic 1 1
-			0x8800 0 0 2 &mpic 2 1
-			0x8800 0 0 3 &mpic 3 1
-			0x8800 0 0 4 &mpic 4 1>;
-
-		interrupt-parent = <&mpic>;
-		interrupts = <24 0x2>;
-		bus-range = <0 0xff>;
-		ranges = <0x02000000 0 0x80000000 0 0x80000000 0 0x10000000
-			  0x01000000 0 0x00000000 0 0xffc00000 0 0x00010000>;
-		clock-frequency = <66666666>;
-		#interrupt-cells = <1>;
-		#size-cells = <2>;
-		#address-cells = <3>;
-		reg = <0 0xffe08000 0 0x1000>;
+			0x8800 0 0 1 &mpic 1 1 0 0
+			0x8800 0 0 2 &mpic 2 1 0 0
+			0x8800 0 0 3 &mpic 3 1 0 0
+			0x8800 0 0 4 &mpic 4 1 0 0>;
 	};
 
 	pci1: pcie@ffe09000 {
-		compatible = "fsl,mpc8548-pcie";
-		device_type = "pci";
-		#interrupt-cells = <1>;
-		#size-cells = <2>;
-		#address-cells = <3>;
 		reg = <0 0xffe09000 0 0x1000>;
-		bus-range = <0 0xff>;
 		ranges = <0x02000000 0 0x98000000 0 0x98000000 0 0x08000000
 			  0x01000000 0 0x00000000 0 0xffc20000 0 0x00010000>;
-		clock-frequency = <33333333>;
-		interrupt-parent = <&mpic>;
-		interrupts = <25 0x2>;
-		interrupt-map-mask = <0xf800 0 0 7>;
-		interrupt-map = <
-			/* IDSEL 0x0 */
-			0000 0 0 1 &mpic 4 1
-			0000 0 0 2 &mpic 5 1
-			0000 0 0 3 &mpic 6 1
-			0000 0 0 4 &mpic 7 1
-			>;
 		pcie@0 {
-			reg = <0 0 0 0 0>;
-			#size-cells = <2>;
-			#address-cells = <3>;
-			device_type = "pci";
 			ranges = <0x02000000 0 0x98000000
 				  0x02000000 0 0x98000000
 				  0 0x08000000
@@ -453,31 +71,10 @@
 	};
 
 	pci2: pcie@ffe0a000 {
-		compatible = "fsl,mpc8548-pcie";
-		device_type = "pci";
-		#interrupt-cells = <1>;
-		#size-cells = <2>;
-		#address-cells = <3>;
 		reg = <0 0xffe0a000 0 0x1000>;
-		bus-range = <0 0xff>;
 		ranges = <0x02000000 0 0x90000000 0 0x90000000 0 0x08000000
 			  0x01000000 0 0x00000000 0 0xffc10000 0 0x00010000>;
-		clock-frequency = <33333333>;
-		interrupt-parent = <&mpic>;
-		interrupts = <26 0x2>;
-		interrupt-map-mask = <0xf800 0 0 7>;
-		interrupt-map = <
-			/* IDSEL 0x0 */
-			0000 0 0 1 &mpic 0 1
-			0000 0 0 2 &mpic 1 1
-			0000 0 0 3 &mpic 2 1
-			0000 0 0 4 &mpic 3 1
-			>;
 		pcie@0 {
-			reg = <0 0 0 0 0>;
-			#size-cells = <2>;
-			#address-cells = <3>;
-			device_type = "pci";
 			ranges = <0x02000000 0 0x90000000
 				  0x02000000 0 0x90000000
 				  0 0x08000000
@@ -489,32 +86,10 @@
 	};
 
 	pci3: pcie@ffe0b000 {
-		compatible = "fsl,mpc8548-pcie";
-		device_type = "pci";
-		#interrupt-cells = <1>;
-		#size-cells = <2>;
-		#address-cells = <3>;
 		reg = <0 0xffe0b000 0 0x1000>;
-		bus-range = <0 0xff>;
 		ranges = <0x02000000 0 0xa0000000 0 0xa0000000 0 0x20000000
 			  0x01000000 0 0x00000000 0 0xffc30000 0 0x00010000>;
-		clock-frequency = <33333333>;
-		interrupt-parent = <&mpic>;
-		interrupts = <27 0x2>;
-		interrupt-map-mask = <0xf800 0 0 7>;
-		interrupt-map = <
-			/* IDSEL 0x0 */
-			0000 0 0 1 &mpic 8 1
-			0000 0 0 2 &mpic 9 1
-			0000 0 0 3 &mpic 10 1
-			0000 0 0 4 &mpic 11 1
-			>;
-
 		pcie@0 {
-			reg = <0 0 0 0 0>;
-			#size-cells = <2>;
-			#address-cells = <3>;
-			device_type = "pci";
 			ranges = <0x02000000 0 0xa0000000
 				  0x02000000 0 0xa0000000
 				  0 0x20000000
@@ -525,3 +100,6 @@
 		};
 	};
 };
+
+/include/ "fsl/mpc8536si-post.dtsi"
+/include/ "mpc8536ds.dtsi"
diff --git a/arch/powerpc/boot/dts/mpc8536ds.dtsi b/arch/powerpc/boot/dts/mpc8536ds.dtsi
new file mode 100644
index 0000000..1462e4c
--- /dev/null
+++ b/arch/powerpc/boot/dts/mpc8536ds.dtsi
@@ -0,0 +1,141 @@
+/*
+ * MPC8536DS Device Tree Source stub (no addresses or top-level ranges)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&board_soc {
+	i2c@3100 {
+		rtc@68 {
+			compatible = "dallas,ds3232";
+			reg = <0x68>;
+			interrupts = <0 0x1 0 0>;
+		};
+	};
+
+	spi@7000 {
+		flash@0 {
+			#address-cells = <1>;
+			#size-cells = <1>;
+			compatible = "spansion,s25sl12801";
+			reg = <0>;
+			spi-max-frequency = <40000000>;
+			partition@u-boot {
+				label = "u-boot";
+				reg = <0x00000000 0x00100000>;
+				read-only;
+			};
+			partition@kernel {
+				label = "kernel";
+				reg = <0x00100000 0x00500000>;
+				read-only;
+			};
+			partition@dtb {
+				label = "dtb";
+				reg = <0x00600000 0x00100000>;
+				read-only;
+			};
+			partition@fs {
+				label = "file system";
+				reg = <0x00700000 0x00900000>;
+			};
+		};
+		flash@1 {
+			compatible = "spansion,s25sl12801";
+			reg = <1>;
+			spi-max-frequency = <40000000>;
+		};
+		flash@2 {
+			compatible = "spansion,s25sl12801";
+			reg = <2>;
+			spi-max-frequency = <40000000>;
+		};
+		flash@3 {
+			compatible = "spansion,s25sl12801";
+			reg = <3>;
+			spi-max-frequency = <40000000>;
+		};
+	};
+
+	usb@22000 {
+		phy_type = "ulpi";
+	};
+
+	usb@23000 {
+		phy_type = "ulpi";
+	};
+
+	enet0: ethernet@24000 {
+		tbi-handle = <&tbi0>;
+		phy-handle = <&phy1>;
+		phy-connection-type = "rgmii-id";
+	};
+
+	mdio@24520 {
+		phy0: ethernet-phy@0 {
+			interrupts = <10 0x1 0 0>;
+			reg = <0>;
+			device_type = "ethernet-phy";
+		};
+		phy1: ethernet-phy@1 {
+			interrupts = <10 0x1 0 0>;
+			reg = <1>;
+			device_type = "ethernet-phy";
+		};
+		tbi0: tbi-phy@11 {
+			reg = <0x11>;
+			device_type = "tbi-phy";
+		};
+	};
+
+	enet2: ethernet@26000 {
+		tbi-handle = <&tbi1>;
+		phy-handle = <&phy0>;
+		phy-connection-type = "rgmii-id";
+	};
+
+	mdio@26520 {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		compatible = "fsl,gianfar-tbi";
+		reg = <0x26520 0x20>;
+
+		tbi1: tbi-phy@11 {
+			reg = <0x11>;
+			device_type = "tbi-phy";
+		};
+	};
+
+	usb@2b000 {
+		dr_mode = "peripheral";
+		phy_type = "ulpi";
+	};
+};
diff --git a/arch/powerpc/boot/dts/mpc8536ds_36b.dts b/arch/powerpc/boot/dts/mpc8536ds_36b.dts
index d95b260..8f4b929 100644
--- a/arch/powerpc/boot/dts/mpc8536ds_36b.dts
+++ b/arch/powerpc/boot/dts/mpc8536ds_36b.dts
@@ -1,5 +1,5 @@
 /*
- * MPC8536 DS Device Tree Source
+ * MPC8536DS Device Tree Source (36-bit address map)
  *
  * Copyright 2008-2009 Freescale Semiconductor, Inc.
  *
@@ -9,24 +9,11 @@
  * option) any later version.
  */
 
-/dts-v1/;
+/include/ "fsl/mpc8536si-pre.dtsi"
 
 / {
 	model = "fsl,mpc8536ds";
 	compatible = "fsl,mpc8536ds";
-	#address-cells = <2>;
-	#size-cells = <2>;
-
-	aliases {
-		ethernet0 = &enet0;
-		ethernet1 = &enet1;
-		serial0 = &serial0;
-		serial1 = &serial1;
-		pci0 = &pci0;
-		pci1 = &pci1;
-		pci2 = &pci2;
-		pci3 = &pci3;
-	};
 
 	cpus {
 		#cpus = <1>;
@@ -45,351 +32,34 @@
 		reg = <0 0 0 0>;	// Filled by U-Boot
 	};
 
-	soc@fffe00000 {
-		#address-cells = <1>;
-		#size-cells = <1>;
-		device_type = "soc";
-		compatible = "simple-bus";
-		ranges = <0x0 0xf 0xffe00000 0x100000>;
-		bus-frequency = <0>;		// Filled out by uboot.
-
-		ecm-law@0 {
-			compatible = "fsl,ecm-law";
-			reg = <0x0 0x1000>;
-			fsl,num-laws = <12>;
-		};
-
-		ecm@1000 {
-			compatible = "fsl,mpc8536-ecm", "fsl,ecm";
-			reg = <0x1000 0x1000>;
-			interrupts = <17 2>;
-			interrupt-parent = <&mpic>;
-		};
-
-		memory-controller@2000 {
-			compatible = "fsl,mpc8536-memory-controller";
-			reg = <0x2000 0x1000>;
-			interrupt-parent = <&mpic>;
-			interrupts = <18 0x2>;
-		};
-
-		L2: l2-cache-controller@20000 {
-			compatible = "fsl,mpc8536-l2-cache-controller";
-			reg = <0x20000 0x1000>;
-			interrupt-parent = <&mpic>;
-			interrupts = <16 0x2>;
-		};
-
-		i2c@3000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			cell-index = <0>;
-			compatible = "fsl-i2c";
-			reg = <0x3000 0x100>;
-			interrupts = <43 0x2>;
-			interrupt-parent = <&mpic>;
-			dfsrr;
-		};
-
-		i2c@3100 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			cell-index = <1>;
-			compatible = "fsl-i2c";
-			reg = <0x3100 0x100>;
-			interrupts = <43 0x2>;
-			interrupt-parent = <&mpic>;
-			dfsrr;
-			rtc@68 {
-				compatible = "dallas,ds3232";
-				reg = <0x68>;
-				interrupts = <0 0x1>;
-				interrupt-parent = <&mpic>;
-			};
-		};
-
-		dma@21300 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			compatible = "fsl,mpc8536-dma", "fsl,eloplus-dma";
-			reg = <0x21300 4>;
-			ranges = <0 0x21100 0x200>;
-			cell-index = <0>;
-			dma-channel@0 {
-				compatible = "fsl,mpc8536-dma-channel",
-					     "fsl,eloplus-dma-channel";
-				reg = <0x0 0x80>;
-				cell-index = <0>;
-				interrupt-parent = <&mpic>;
-				interrupts = <20 2>;
-			};
-			dma-channel@80 {
-				compatible = "fsl,mpc8536-dma-channel",
-					     "fsl,eloplus-dma-channel";
-				reg = <0x80 0x80>;
-				cell-index = <1>;
-				interrupt-parent = <&mpic>;
-				interrupts = <21 2>;
-			};
-			dma-channel@100 {
-				compatible = "fsl,mpc8536-dma-channel",
-					     "fsl,eloplus-dma-channel";
-				reg = <0x100 0x80>;
-				cell-index = <2>;
-				interrupt-parent = <&mpic>;
-				interrupts = <22 2>;
-			};
-			dma-channel@180 {
-				compatible = "fsl,mpc8536-dma-channel",
-					     "fsl,eloplus-dma-channel";
-				reg = <0x180 0x80>;
-				cell-index = <3>;
-				interrupt-parent = <&mpic>;
-				interrupts = <23 2>;
-			};
-		};
-
-		usb@22000 {
-			compatible = "fsl,mpc8536-usb2-mph", "fsl-usb2-mph";
-			reg = <0x22000 0x1000>;
-			#address-cells = <1>;
-			#size-cells = <0>;
-			interrupt-parent = <&mpic>;
-			interrupts = <28 0x2>;
-			phy_type = "ulpi";
-		};
-
-		usb@23000 {
-			compatible = "fsl,mpc8536-usb2-mph", "fsl-usb2-mph";
-			reg = <0x23000 0x1000>;
-			#address-cells = <1>;
-			#size-cells = <0>;
-			interrupt-parent = <&mpic>;
-			interrupts = <46 0x2>;
-			phy_type = "ulpi";
-		};
-
-		enet0: ethernet@24000 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			cell-index = <0>;
-			device_type = "network";
-			model = "eTSEC";
-			compatible = "gianfar";
-			reg = <0x24000 0x1000>;
-			ranges = <0x0 0x24000 0x1000>;
-			local-mac-address = [ 00 00 00 00 00 00 ];
-			interrupts = <29 2 30 2 34 2>;
-			interrupt-parent = <&mpic>;
-			tbi-handle = <&tbi0>;
-			phy-handle = <&phy1>;
-			phy-connection-type = "rgmii-id";
-
-			mdio@520 {
-				#address-cells = <1>;
-				#size-cells = <0>;
-				compatible = "fsl,gianfar-mdio";
-				reg = <0x520 0x20>;
-
-				phy0: ethernet-phy@0 {
-					interrupt-parent = <&mpic>;
-					interrupts = <10 0x1>;
-					reg = <0>;
-					device_type = "ethernet-phy";
-				};
-				phy1: ethernet-phy@1 {
-					interrupt-parent = <&mpic>;
-					interrupts = <10 0x1>;
-					reg = <1>;
-					device_type = "ethernet-phy";
-				};
-				tbi0: tbi-phy@11 {
-					reg = <0x11>;
-					device_type = "tbi-phy";
-				};
-			};
-		};
-
-		enet1: ethernet@26000 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			cell-index = <1>;
-			device_type = "network";
-			model = "eTSEC";
-			compatible = "gianfar";
-			reg = <0x26000 0x1000>;
-			ranges = <0x0 0x26000 0x1000>;
-			local-mac-address = [ 00 00 00 00 00 00 ];
-			interrupts = <31 2 32 2 33 2>;
-			interrupt-parent = <&mpic>;
-			tbi-handle = <&tbi1>;
-			phy-handle = <&phy0>;
-			phy-connection-type = "rgmii-id";
-
-			mdio@520 {
-				#address-cells = <1>;
-				#size-cells = <0>;
-				compatible = "fsl,gianfar-tbi";
-				reg = <0x520 0x20>;
-
-				tbi1: tbi-phy@11 {
-					reg = <0x11>;
-					device_type = "tbi-phy";
-				};
-			};
-		};
-
-		usb@2b000 {
-			compatible = "fsl,mpc8536-usb2-dr", "fsl-usb2-dr";
-			reg = <0x2b000 0x1000>;
-			#address-cells = <1>;
-			#size-cells = <0>;
-			interrupt-parent = <&mpic>;
-			interrupts = <60 0x2>;
-			dr_mode = "peripheral";
-			phy_type = "ulpi";
-		};
-
-		sdhci@2e000 {
-			compatible = "fsl,mpc8536-esdhc", "fsl,esdhc";
-			reg = <0x2e000 0x1000>;
-			interrupts = <72 0x2>;
-			interrupt-parent = <&mpic>;
-			clock-frequency = <250000000>;
-		};
-
-		serial0: serial@4500 {
-			cell-index = <0>;
-			device_type = "serial";
-			compatible = "ns16550";
-			reg = <0x4500 0x100>;
-			clock-frequency = <0>;
-			interrupts = <42 0x2>;
-			interrupt-parent = <&mpic>;
-		};
-
-		serial1: serial@4600 {
-			cell-index = <1>;
-			device_type = "serial";
-			compatible = "ns16550";
-			reg = <0x4600 0x100>;
-			clock-frequency = <0>;
-			interrupts = <42 0x2>;
-			interrupt-parent = <&mpic>;
-		};
-
-		crypto@30000 {
-			compatible = "fsl,sec3.0", "fsl,sec2.4", "fsl,sec2.2",
-				     "fsl,sec2.1", "fsl,sec2.0";
-			reg = <0x30000 0x10000>;
-			interrupts = <45 2 58 2>;
-			interrupt-parent = <&mpic>;
-			fsl,num-channels = <4>;
-			fsl,channel-fifo-len = <24>;
-			fsl,exec-units-mask = <0x9fe>;
-			fsl,descriptor-types-mask = <0x3ab0ebf>;
-		};
-
-		sata@18000 {
-			compatible = "fsl,mpc8536-sata", "fsl,pq-sata";
-			reg = <0x18000 0x1000>;
-			cell-index = <1>;
-			interrupts = <74 0x2>;
-			interrupt-parent = <&mpic>;
-		};
-
-		sata@19000 {
-			compatible = "fsl,mpc8536-sata", "fsl,pq-sata";
-			reg = <0x19000 0x1000>;
-			cell-index = <2>;
-			interrupts = <41 0x2>;
-			interrupt-parent = <&mpic>;
-		};
-
-		global-utilities@e0000 {	//global utilities block
-			compatible = "fsl,mpc8548-guts";
-			reg = <0xe0000 0x1000>;
-			fsl,has-rstcr;
-		};
-
-		mpic: pic@40000 {
-			clock-frequency = <0>;
-			interrupt-controller;
-			#address-cells = <0>;
-			#interrupt-cells = <2>;
-			reg = <0x40000 0x40000>;
-			compatible = "chrp,open-pic";
-			device_type = "open-pic";
-			big-endian;
-		};
-
-		msi@41600 {
-			compatible = "fsl,mpc8536-msi", "fsl,mpic-msi";
-			reg = <0x41600 0x80>;
-			msi-available-ranges = <0 0x100>;
-			interrupts = <
-				0xe0 0
-				0xe1 0
-				0xe2 0
-				0xe3 0
-				0xe4 0
-				0xe5 0
-				0xe6 0
-				0xe7 0>;
-			interrupt-parent = <&mpic>;
-		};
+	lbc: localbus@ffe05000 {
+		reg = <0 0xffe05000 0 0x1000>;
 	};
 
-	pci0: pci@fffe08000 {
-		compatible = "fsl,mpc8540-pci";
-		device_type = "pci";
+	board_soc: soc: soc@fffe00000 {
+		ranges = <0x0 0xf 0xffe00000 0x100000>;
+	};
+
+	pci0: pci@ffe08000 {
+		reg = <0xf 0xffe08000 0 0x1000>;
+		ranges = <0x02000000 0 0xf0000000 0xc 0x00000000 0 0x10000000
+			  0x01000000 0 0x00000000 0xf 0xffc00000 0 0x00010000>;
+		clock-frequency = <66666666>;
 		interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
 		interrupt-map = <
 
 			/* IDSEL 0x11 J17 Slot 1 */
-			0x8800 0 0 1 &mpic 1 1
-			0x8800 0 0 2 &mpic 2 1
-			0x8800 0 0 3 &mpic 3 1
-			0x8800 0 0 4 &mpic 4 1>;
-
-		interrupt-parent = <&mpic>;
-		interrupts = <24 0x2>;
-		bus-range = <0 0xff>;
-		ranges = <0x02000000 0 0xf0000000 0xc 0x00000000 0 0x10000000
-			  0x01000000 0 0x00000000 0xf 0xffc00000 0 0x00010000>;
-		clock-frequency = <66666666>;
-		#interrupt-cells = <1>;
-		#size-cells = <2>;
-		#address-cells = <3>;
-		reg = <0xf 0xffe08000 0 0x1000>;
+			0x8800 0 0 1 &mpic 1 1 0 0
+			0x8800 0 0 2 &mpic 2 1 0 0
+			0x8800 0 0 3 &mpic 3 1 0 0
+			0x8800 0 0 4 &mpic 4 1 0 0>;
 	};
 
-	pci1: pcie@fffe09000 {
-		compatible = "fsl,mpc8548-pcie";
-		device_type = "pci";
-		#interrupt-cells = <1>;
-		#size-cells = <2>;
-		#address-cells = <3>;
+	pci1: pcie@ffe09000 {
 		reg = <0xf 0xffe09000 0 0x1000>;
-		bus-range = <0 0xff>;
 		ranges = <0x02000000 0 0xf8000000 0xc 0x18000000 0 0x08000000
 			  0x01000000 0 0x00000000 0xf 0xffc20000 0 0x00010000>;
-		clock-frequency = <33333333>;
-		interrupt-parent = <&mpic>;
-		interrupts = <25 0x2>;
-		interrupt-map-mask = <0xf800 0 0 7>;
-		interrupt-map = <
-			/* IDSEL 0x0 */
-			0000 0 0 1 &mpic 4 1
-			0000 0 0 2 &mpic 5 1
-			0000 0 0 3 &mpic 6 1
-			0000 0 0 4 &mpic 7 1
-			>;
 		pcie@0 {
-			reg = <0 0 0 0 0>;
-			#size-cells = <2>;
-			#address-cells = <3>;
-			device_type = "pci";
 			ranges = <0x02000000 0 0xf8000000
 				  0x02000000 0 0xf8000000
 				  0 0x08000000
@@ -401,31 +71,10 @@
 	};
 
 	pci2: pcie@fffe0a000 {
-		compatible = "fsl,mpc8548-pcie";
-		device_type = "pci";
-		#interrupt-cells = <1>;
-		#size-cells = <2>;
-		#address-cells = <3>;
 		reg = <0xf 0xffe0a000 0 0x1000>;
-		bus-range = <0 0xff>;
 		ranges = <0x02000000 0 0xf8000000 0xc 0x10000000 0 0x08000000
 			  0x01000000 0 0x00000000 0xf 0xffc10000 0 0x00010000>;
-		clock-frequency = <33333333>;
-		interrupt-parent = <&mpic>;
-		interrupts = <26 0x2>;
-		interrupt-map-mask = <0xf800 0 0 7>;
-		interrupt-map = <
-			/* IDSEL 0x0 */
-			0000 0 0 1 &mpic 0 1
-			0000 0 0 2 &mpic 1 1
-			0000 0 0 3 &mpic 2 1
-			0000 0 0 4 &mpic 3 1
-			>;
 		pcie@0 {
-			reg = <0 0 0 0 0>;
-			#size-cells = <2>;
-			#address-cells = <3>;
-			device_type = "pci";
 			ranges = <0x02000000 0 0xf8000000
 				  0x02000000 0 0xf8000000
 				  0 0x08000000
@@ -437,32 +86,10 @@
 	};
 
 	pci3: pcie@fffe0b000 {
-		compatible = "fsl,mpc8548-pcie";
-		device_type = "pci";
-		#interrupt-cells = <1>;
-		#size-cells = <2>;
-		#address-cells = <3>;
 		reg = <0xf 0xffe0b000 0 0x1000>;
-		bus-range = <0 0xff>;
 		ranges = <0x02000000 0 0xe0000000 0xc 0x20000000 0 0x20000000
 			  0x01000000 0 0x00000000 0xf 0xffc30000 0 0x00010000>;
-		clock-frequency = <33333333>;
-		interrupt-parent = <&mpic>;
-		interrupts = <27 0x2>;
-		interrupt-map-mask = <0xf800 0 0 7>;
-		interrupt-map = <
-			/* IDSEL 0x0 */
-			0000 0 0 1 &mpic 8 1
-			0000 0 0 2 &mpic 9 1
-			0000 0 0 3 &mpic 10 1
-			0000 0 0 4 &mpic 11 1
-			>;
-
 		pcie@0 {
-			reg = <0 0 0 0 0>;
-			#size-cells = <2>;
-			#address-cells = <3>;
-			device_type = "pci";
 			ranges = <0x02000000 0 0xe0000000
 				  0x02000000 0 0xe0000000
 				  0 0x20000000
@@ -473,3 +100,6 @@
 		};
 	};
 };
+
+/include/ "fsl/mpc8536si-post.dtsi"
+/include/ "mpc8536ds.dtsi"
diff --git a/arch/powerpc/boot/dts/mpc8540ads.dts b/arch/powerpc/boot/dts/mpc8540ads.dts
index 8d1bf0f..f99fb11 100644
--- a/arch/powerpc/boot/dts/mpc8540ads.dts
+++ b/arch/powerpc/boot/dts/mpc8540ads.dts
@@ -243,7 +243,7 @@
 		serial0: serial@4500 {
 			cell-index = <0>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4500 0x100>; 	// reg base, size
 			clock-frequency = <0>; 	// should we fill in in uboot?
 			interrupts = <42 2>;
@@ -253,7 +253,7 @@
 		serial1: serial@4600 {
 			cell-index = <1>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4600 0x100>;	// reg base, size
 			clock-frequency = <0>; 	// should we fill in in uboot?
 			interrupts = <42 2>;
diff --git a/arch/powerpc/boot/dts/mpc8541cds.dts b/arch/powerpc/boot/dts/mpc8541cds.dts
index 87ff965..0f5e939 100644
--- a/arch/powerpc/boot/dts/mpc8541cds.dts
+++ b/arch/powerpc/boot/dts/mpc8541cds.dts
@@ -209,7 +209,7 @@
 		serial0: serial@4500 {
 			cell-index = <0>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4500 0x100>; 	// reg base, size
 			clock-frequency = <0>; 	// should we fill in in uboot?
 			interrupts = <42 2>;
@@ -219,7 +219,7 @@
 		serial1: serial@4600 {
 			cell-index = <1>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4600 0x100>;	// reg base, size
 			clock-frequency = <0>; 	// should we fill in in uboot?
 			interrupts = <42 2>;
diff --git a/arch/powerpc/boot/dts/mpc8544ds.dts b/arch/powerpc/boot/dts/mpc8544ds.dts
index d793968..e934987 100644
--- a/arch/powerpc/boot/dts/mpc8544ds.dts
+++ b/arch/powerpc/boot/dts/mpc8544ds.dts
@@ -9,339 +9,52 @@
  * option) any later version.
  */
 
-/dts-v1/;
+/include/ "fsl/mpc8544si-pre.dtsi"
+
 / {
 	model = "MPC8544DS";
 	compatible = "MPC8544DS", "MPC85xxDS";
-	#address-cells = <1>;
-	#size-cells = <1>;
-
-	aliases {
-		ethernet0 = &enet0;
-		ethernet1 = &enet1;
-		serial0 = &serial0;
-		serial1 = &serial1;
-		pci0 = &pci0;
-		pci1 = &pci1;
-		pci2 = &pci2;
-		pci3 = &pci3;
-	};
-
-	cpus {
-		#address-cells = <1>;
-		#size-cells = <0>;
-
-		PowerPC,8544@0 {
-			device_type = "cpu";
-			reg = <0x0>;
-			d-cache-line-size = <32>;	// 32 bytes
-			i-cache-line-size = <32>;	// 32 bytes
-			d-cache-size = <0x8000>;		// L1, 32K
-			i-cache-size = <0x8000>;		// L1, 32K
-			timebase-frequency = <0>;
-			bus-frequency = <0>;
-			clock-frequency = <0>;
-			next-level-cache = <&L2>;
-		};
-	};
 
 	memory {
 		device_type = "memory";
-		reg = <0x0 0x0>;	// Filled by U-Boot
+		reg = <0 0 0 0>;	// Filled by U-Boot
 	};
 
-	soc8544@e0000000 {
-		#address-cells = <1>;
-		#size-cells = <1>;
-		device_type = "soc";
-		compatible = "simple-bus";
+	lbc: localbus@e0005000 {
+		reg = <0 0xe0005000 0 0x1000>;
+	};
 
-		ranges = <0x0 0xe0000000 0x100000>;
-		bus-frequency = <0>;		// Filled out by uboot.
-
-		ecm-law@0 {
-			compatible = "fsl,ecm-law";
-			reg = <0x0 0x1000>;
-			fsl,num-laws = <10>;
-		};
-
-		ecm@1000 {
-			compatible = "fsl,mpc8544-ecm", "fsl,ecm";
-			reg = <0x1000 0x1000>;
-			interrupts = <17 2>;
-			interrupt-parent = <&mpic>;
-		};
-
-		memory-controller@2000 {
-			compatible = "fsl,mpc8544-memory-controller";
-			reg = <0x2000 0x1000>;
-			interrupt-parent = <&mpic>;
-			interrupts = <18 2>;
-		};
-
-		L2: l2-cache-controller@20000 {
-			compatible = "fsl,mpc8544-l2-cache-controller";
-			reg = <0x20000 0x1000>;
-			cache-line-size = <32>;	// 32 bytes
-			cache-size = <0x40000>;	// L2, 256K
-			interrupt-parent = <&mpic>;
-			interrupts = <16 2>;
-		};
-
-		i2c@3000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			cell-index = <0>;
-			compatible = "fsl-i2c";
-			reg = <0x3000 0x100>;
-			interrupts = <43 2>;
-			interrupt-parent = <&mpic>;
-			dfsrr;
-		};
-
-		i2c@3100 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			cell-index = <1>;
-			compatible = "fsl-i2c";
-			reg = <0x3100 0x100>;
-			interrupts = <43 2>;
-			interrupt-parent = <&mpic>;
-			dfsrr;
-		};
-
-		dma@21300 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			compatible = "fsl,mpc8544-dma", "fsl,eloplus-dma";
-			reg = <0x21300 0x4>;
-			ranges = <0x0 0x21100 0x200>;
-			cell-index = <0>;
-			dma-channel@0 {
-				compatible = "fsl,mpc8544-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x0 0x80>;
-				cell-index = <0>;
-				interrupt-parent = <&mpic>;
-				interrupts = <20 2>;
-			};
-			dma-channel@80 {
-				compatible = "fsl,mpc8544-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x80 0x80>;
-				cell-index = <1>;
-				interrupt-parent = <&mpic>;
-				interrupts = <21 2>;
-			};
-			dma-channel@100 {
-				compatible = "fsl,mpc8544-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x100 0x80>;
-				cell-index = <2>;
-				interrupt-parent = <&mpic>;
-				interrupts = <22 2>;
-			};
-			dma-channel@180 {
-				compatible = "fsl,mpc8544-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x180 0x80>;
-				cell-index = <3>;
-				interrupt-parent = <&mpic>;
-				interrupts = <23 2>;
-			};
-		};
-
-		enet0: ethernet@24000 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			cell-index = <0>;
-			device_type = "network";
-			model = "TSEC";
-			compatible = "gianfar";
-			reg = <0x24000 0x1000>;
-			ranges = <0x0 0x24000 0x1000>;
-			local-mac-address = [ 00 00 00 00 00 00 ];
-			interrupts = <29 2 30 2 34 2>;
-			interrupt-parent = <&mpic>;
-			phy-handle = <&phy0>;
-			tbi-handle = <&tbi0>;
-			phy-connection-type = "rgmii-id";
-
-			mdio@520 {
-				#address-cells = <1>;
-				#size-cells = <0>;
-				compatible = "fsl,gianfar-mdio";
-				reg = <0x520 0x20>;
-
-				phy0: ethernet-phy@0 {
-					interrupt-parent = <&mpic>;
-					interrupts = <10 1>;
-					reg = <0x0>;
-					device_type = "ethernet-phy";
-				};
-				phy1: ethernet-phy@1 {
-					interrupt-parent = <&mpic>;
-					interrupts = <10 1>;
-					reg = <0x1>;
-					device_type = "ethernet-phy";
-				};
-
-				tbi0: tbi-phy@11 {
-					reg = <0x11>;
-					device_type = "tbi-phy";
-				};
-			};
-		};
-
-		enet1: ethernet@26000 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			cell-index = <1>;
-			device_type = "network";
-			model = "TSEC";
-			compatible = "gianfar";
-			reg = <0x26000 0x1000>;
-			ranges = <0x0 0x26000 0x1000>;
-			local-mac-address = [ 00 00 00 00 00 00 ];
-			interrupts = <31 2 32 2 33 2>;
-			interrupt-parent = <&mpic>;
-			phy-handle = <&phy1>;
-			tbi-handle = <&tbi1>;
-			phy-connection-type = "rgmii-id";
-
-			mdio@520 {
-				#address-cells = <1>;
-				#size-cells = <0>;
-				compatible = "fsl,gianfar-tbi";
-				reg = <0x520 0x20>;
-
-				tbi1: tbi-phy@11 {
-					reg = <0x11>;
-					device_type = "tbi-phy";
-				};
-			};
-		};
-
-		serial0: serial@4500 {
-			cell-index = <0>;
-			device_type = "serial";
-			compatible = "ns16550";
-			reg = <0x4500 0x100>;
-			clock-frequency = <0>;
-			interrupts = <42 2>;
-			interrupt-parent = <&mpic>;
-		};
-
-		serial1: serial@4600 {
-			cell-index = <1>;
-			device_type = "serial";
-			compatible = "ns16550";
-			reg = <0x4600 0x100>;
-			clock-frequency = <0>;
-			interrupts = <42 2>;
-			interrupt-parent = <&mpic>;
-		};
-
-		global-utilities@e0000 {	//global utilities block
-			compatible = "fsl,mpc8548-guts";
-			reg = <0xe0000 0x1000>;
-			fsl,has-rstcr;
-		};
-
-		crypto@30000 {
-			compatible = "fsl,sec2.1", "fsl,sec2.0";
-			reg = <0x30000 0x10000>;
-			interrupts = <45 2>;
-			interrupt-parent = <&mpic>;
-			fsl,num-channels = <4>;
-			fsl,channel-fifo-len = <24>;
-			fsl,exec-units-mask = <0xfe>;
-			fsl,descriptor-types-mask = <0x12b0ebf>;
-		};
-
-		mpic: pic@40000 {
-			interrupt-controller;
-			#address-cells = <0>;
-			#interrupt-cells = <2>;
-			reg = <0x40000 0x40000>;
-			compatible = "chrp,open-pic";
-			device_type = "open-pic";
-		};
-
-		msi@41600 {
-			compatible = "fsl,mpc8544-msi", "fsl,mpic-msi";
-			reg = <0x41600 0x80>;
-			msi-available-ranges = <0 0x100>;
-			interrupts = <
-				0xe0 0
-				0xe1 0
-				0xe2 0
-				0xe3 0
-				0xe4 0
-				0xe5 0
-				0xe6 0
-				0xe7 0>;
-			interrupt-parent = <&mpic>;
-		};
+	board_soc: soc: soc8544@e0000000 {
+		ranges = <0x0 0x0 0xe0000000 0x100000>;
 	};
 
 	pci0: pci@e0008000 {
-		compatible = "fsl,mpc8540-pci";
-		device_type = "pci";
+		reg = <0 0xe0008000 0 0x1000>;
+		ranges = <0x2000000 0x0 0xc0000000 0 0xc0000000 0x0 0x20000000
+			  0x1000000 0x0 0x00000000 0 0xe1000000 0x0 0x10000>;
+		clock-frequency = <66666666>;
 		interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
 		interrupt-map = <
 
 			/* IDSEL 0x11 J17 Slot 1 */
-			0x8800 0x0 0x0 0x1 &mpic 0x2 0x1
-			0x8800 0x0 0x0 0x2 &mpic 0x3 0x1
-			0x8800 0x0 0x0 0x3 &mpic 0x4 0x1
-			0x8800 0x0 0x0 0x4 &mpic 0x1 0x1
+			0x8800 0x0 0x0 0x1 &mpic 0x2 0x1 0 0
+			0x8800 0x0 0x0 0x2 &mpic 0x3 0x1 0 0
+			0x8800 0x0 0x0 0x3 &mpic 0x4 0x1 0 0
+			0x8800 0x0 0x0 0x4 &mpic 0x1 0x1 0 0
 
 			/* IDSEL 0x12 J16 Slot 2 */
 
-			0x9000 0x0 0x0 0x1 &mpic 0x3 0x1
-			0x9000 0x0 0x0 0x2 &mpic 0x4 0x1
-			0x9000 0x0 0x0 0x3 &mpic 0x2 0x1
-			0x9000 0x0 0x0 0x4 &mpic 0x1 0x1>;
-
-		interrupt-parent = <&mpic>;
-		interrupts = <24 2>;
-		bus-range = <0 255>;
-		ranges = <0x2000000 0x0 0xc0000000 0xc0000000 0x0 0x20000000
-			  0x1000000 0x0 0x0 0xe1000000 0x0 0x10000>;
-		clock-frequency = <66666666>;
-		#interrupt-cells = <1>;
-		#size-cells = <2>;
-		#address-cells = <3>;
-		reg = <0xe0008000 0x1000>;
+			0x9000 0x0 0x0 0x1 &mpic 0x3 0x1 0 0
+			0x9000 0x0 0x0 0x2 &mpic 0x4 0x1 0 0
+			0x9000 0x0 0x0 0x3 &mpic 0x2 0x1 0 0
+			0x9000 0x0 0x0 0x4 &mpic 0x1 0x1 0 0>;
 	};
 
 	pci1: pcie@e0009000 {
-		compatible = "fsl,mpc8548-pcie";
-		device_type = "pci";
-		#interrupt-cells = <1>;
-		#size-cells = <2>;
-		#address-cells = <3>;
-		reg = <0xe0009000 0x1000>;
-		bus-range = <0 255>;
-		ranges = <0x2000000 0x0 0x80000000 0x80000000 0x0 0x20000000
-			  0x1000000 0x0 0x0 0xe1010000 0x0 0x10000>;
-		clock-frequency = <33333333>;
-		interrupt-parent = <&mpic>;
-		interrupts = <25 2>;
-		interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
-		interrupt-map = <
-			/* IDSEL 0x0 */
-			0000 0x0 0x0 0x1 &mpic 0x4 0x1
-			0000 0x0 0x0 0x2 &mpic 0x5 0x1
-			0000 0x0 0x0 0x3 &mpic 0x6 0x1
-			0000 0x0 0x0 0x4 &mpic 0x7 0x1
-			>;
+		reg = <0x0 0xe0009000 0x0 0x1000>;
+		ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000
+			  0x1000000 0x0 0x00000000 0 0xe1010000 0x0 0x10000>;
 		pcie@0 {
-			reg = <0x0 0x0 0x0 0x0 0x0>;
-			#size-cells = <2>;
-			#address-cells = <3>;
-			device_type = "pci";
 			ranges = <0x2000000 0x0 0x80000000
 				  0x2000000 0x0 0x80000000
 				  0x0 0x20000000
@@ -353,31 +66,10 @@
 	};
 
 	pci2: pcie@e000a000 {
-		compatible = "fsl,mpc8548-pcie";
-		device_type = "pci";
-		#interrupt-cells = <1>;
-		#size-cells = <2>;
-		#address-cells = <3>;
-		reg = <0xe000a000 0x1000>;
-		bus-range = <0 255>;
-		ranges = <0x2000000 0x0 0xa0000000 0xa0000000 0x0 0x10000000
-			  0x1000000 0x0 0x0 0xe1020000 0x0 0x10000>;
-		clock-frequency = <33333333>;
-		interrupt-parent = <&mpic>;
-		interrupts = <26 2>;
-		interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
-		interrupt-map = <
-			/* IDSEL 0x0 */
-			0000 0x0 0x0 0x1 &mpic 0x0 0x1
-			0000 0x0 0x0 0x2 &mpic 0x1 0x1
-			0000 0x0 0x0 0x3 &mpic 0x2 0x1
-			0000 0x0 0x0 0x4 &mpic 0x3 0x1
-			>;
+		reg = <0x0 0xe000a000 0x0 0x1000>;
+		ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x10000000
+			  0x1000000 0x0 0x00000000 0 0xe1020000 0x0 0x10000>;
 		pcie@0 {
-			reg = <0x0 0x0 0x0 0x0 0x0>;
-			#size-cells = <2>;
-			#address-cells = <3>;
-			device_type = "pci";
 			ranges = <0x2000000 0x0 0xa0000000
 				  0x2000000 0x0 0xa0000000
 				  0x0 0x10000000
@@ -388,44 +80,11 @@
 		};
 	};
 
-	pci3: pcie@e000b000 {
-		compatible = "fsl,mpc8548-pcie";
-		device_type = "pci";
-		#interrupt-cells = <1>;
-		#size-cells = <2>;
-		#address-cells = <3>;
-		reg = <0xe000b000 0x1000>;
-		bus-range = <0 255>;
-		ranges = <0x2000000 0x0 0xb0000000 0xb0000000 0x0 0x100000
-			  0x1000000 0x0 0x0 0xb0100000 0x0 0x100000>;
-		clock-frequency = <33333333>;
-		interrupt-parent = <&mpic>;
-		interrupts = <27 2>;
-		interrupt-map-mask = <0xff00 0x0 0x0 0x1>;
-		interrupt-map = <
-			// IDSEL 0x1c  USB
-			0xe000 0x0 0x0 0x1 &i8259 0xc 0x2
-			0xe100 0x0 0x0 0x2 &i8259 0x9 0x2
-			0xe200 0x0 0x0 0x3 &i8259 0xa 0x2
-			0xe300 0x0 0x0 0x4 &i8259 0xb 0x2
-
-			// IDSEL 0x1d  Audio
-			0xe800 0x0 0x0 0x1 &i8259 0x6 0x2
-
-			// IDSEL 0x1e Legacy
-			0xf000 0x0 0x0 0x1 &i8259 0x7 0x2
-			0xf100 0x0 0x0 0x1 &i8259 0x7 0x2
-
-			// IDSEL 0x1f IDE/SATA
-			0xf800 0x0 0x0 0x1 &i8259 0xe 0x2
-			0xf900 0x0 0x0 0x1 &i8259 0x5 0x2
-		>;
-
+	board_pci3: pci3: pcie@e000b000 {
+		reg = <0x0 0xe000b000 0x0 0x1000>;
+		ranges = <0x2000000 0x0 0xb0000000 0 0xb0000000 0x0 0x100000
+			  0x1000000 0x0 0x00000000 0 0xb0100000 0x0 0x100000>;
 		pcie@0 {
-			reg = <0x0 0x0 0x0 0x0 0x0>;
-			#size-cells = <2>;
-			#address-cells = <3>;
-			device_type = "pci";
 			ranges = <0x2000000 0x0 0xb0000000
 				  0x2000000 0x0 0xb0000000
 				  0x0 0x100000
@@ -433,70 +92,14 @@
 				  0x1000000 0x0 0x0
 				  0x1000000 0x0 0x0
 				  0x0 0x100000>;
-
-			uli1575@0 {
-				reg = <0x0 0x0 0x0 0x0 0x0>;
-				#size-cells = <2>;
-				#address-cells = <3>;
-				ranges = <0x2000000 0x0 0xb0000000
-					  0x2000000 0x0 0xb0000000
-					  0x0 0x100000
-
-					  0x1000000 0x0 0x0
-					  0x1000000 0x0 0x0
-					  0x0 0x100000>;
-				isa@1e {
-					device_type = "isa";
-					#interrupt-cells = <2>;
-					#size-cells = <1>;
-					#address-cells = <2>;
-					reg = <0xf000 0x0 0x0 0x0 0x0>;
-					ranges = <0x1 0x0
-						  0x1000000 0x0 0x0
-						  0x1000>;
-					interrupt-parent = <&i8259>;
-
-					i8259: interrupt-controller@20 {
-						reg = <0x1 0x20 0x2
-						       0x1 0xa0 0x2
-						       0x1 0x4d0 0x2>;
-						interrupt-controller;
-						device_type = "interrupt-controller";
-						#address-cells = <0>;
-						#interrupt-cells = <2>;
-						compatible = "chrp,iic";
-						interrupts = <9 2>;
-						interrupt-parent = <&mpic>;
-					};
-
-					i8042@60 {
-						#size-cells = <0>;
-						#address-cells = <1>;
-						reg = <0x1 0x60 0x1 0x1 0x64 0x1>;
-						interrupts = <1 3 12 3>;
-						interrupt-parent = <&i8259>;
-
-						keyboard@0 {
-							reg = <0x0>;
-							compatible = "pnpPNP,303";
-						};
-
-						mouse@1 {
-							reg = <0x1>;
-							compatible = "pnpPNP,f03";
-						};
-					};
-
-					rtc@70 {
-						compatible = "pnpPNP,b00";
-						reg = <0x1 0x70 0x2>;
-					};
-
-					gpio@400 {
-						reg = <0x1 0x400 0x80>;
-					};
-				};
-			};
 		};
 	};
 };
+
+/*
+ * mpc8544ds.dtsi must be last to ensure board_pci3 overrides pci3 settings
+ * for interrupt-map & interrupt-map-mask
+ */
+
+/include/ "fsl/mpc8544si-post.dtsi"
+/include/ "mpc8544ds.dtsi"
diff --git a/arch/powerpc/boot/dts/mpc8544ds.dtsi b/arch/powerpc/boot/dts/mpc8544ds.dtsi
new file mode 100644
index 0000000..270f64b
--- /dev/null
+++ b/arch/powerpc/boot/dts/mpc8544ds.dtsi
@@ -0,0 +1,161 @@
+/*
+ * MPC8544DS Device Tree Source stub (no addresses or top-level ranges)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&board_soc {
+	enet0: ethernet@24000 {
+		phy-handle = <&phy0>;
+		tbi-handle = <&tbi0>;
+		phy-connection-type = "rgmii-id";
+	};
+
+	mdio@24520 {
+		phy0: ethernet-phy@0 {
+			interrupts = <10 1 0 0>;
+			reg = <0x0>;
+			device_type = "ethernet-phy";
+		};
+		phy1: ethernet-phy@1 {
+			interrupts = <10 1 0 0>;
+			reg = <0x1>;
+			device_type = "ethernet-phy";
+		};
+
+		tbi0: tbi-phy@11 {
+			reg = <0x11>;
+			device_type = "tbi-phy";
+		};
+	};
+
+	enet2: ethernet@26000 {
+		phy-handle = <&phy1>;
+		tbi-handle = <&tbi1>;
+		phy-connection-type = "rgmii-id";
+	};
+
+	mdio@26520 {
+		tbi1: tbi-phy@11 {
+			reg = <0x11>;
+			device_type = "tbi-phy";
+		};
+	};
+};
+
+&board_pci3 {
+	pcie@0 {
+		interrupt-map-mask = <0xff00 0x0 0x0 0x7>;
+		interrupt-map = <
+			// IDSEL 0x1c  USB
+			0xe000 0x0 0x0 0x1 &i8259 0xc 0x2
+			0xe100 0x0 0x0 0x2 &i8259 0x9 0x2
+			0xe200 0x0 0x0 0x3 &i8259 0xa 0x2
+			0xe300 0x0 0x0 0x4 &i8259 0xb 0x2
+
+			// IDSEL 0x1d  Audio
+			0xe800 0x0 0x0 0x1 &i8259 0x6 0x2
+
+			// IDSEL 0x1e Legacy
+			0xf000 0x0 0x0 0x1 &i8259 0x7 0x2
+			0xf100 0x0 0x0 0x1 &i8259 0x7 0x2
+
+			// IDSEL 0x1f IDE/SATA
+			0xf800 0x0 0x0 0x1 &i8259 0xe 0x2
+			0xf900 0x0 0x0 0x1 &i8259 0x5 0x2
+			>;
+
+
+		uli1575@0 {
+			reg = <0x0 0x0 0x0 0x0 0x0>;
+			#size-cells = <2>;
+			#address-cells = <3>;
+			ranges = <0x2000000 0x0 0xb0000000
+				  0x2000000 0x0 0xb0000000
+				  0x0 0x100000
+
+				  0x1000000 0x0 0x0
+				  0x1000000 0x0 0x0
+				  0x0 0x100000>;
+			isa@1e {
+				device_type = "isa";
+				#interrupt-cells = <2>;
+				#size-cells = <1>;
+				#address-cells = <2>;
+				reg = <0xf000 0x0 0x0 0x0 0x0>;
+				ranges = <0x1 0x0 0x1000000 0x0 0x0
+					  0x1000>;
+				interrupt-parent = <&i8259>;
+
+				i8259: interrupt-controller@20 {
+					reg = <0x1 0x20 0x2
+					       0x1 0xa0 0x2
+					       0x1 0x4d0 0x2>;
+					interrupt-controller;
+					device_type = "interrupt-controller";
+					#address-cells = <0>;
+					#interrupt-cells = <2>;
+					compatible = "chrp,iic";
+					interrupts = <9 2 0 0>;
+					interrupt-parent = <&mpic>;
+				};
+
+				i8042@60 {
+					#size-cells = <0>;
+					#address-cells = <1>;
+					reg = <0x1 0x60 0x1 0x1 0x64 0x1>;
+					interrupts = <1 3 12 3>;
+					interrupt-parent =
+						<&i8259>;
+
+					keyboard@0 {
+						reg = <0x0>;
+						compatible = "pnpPNP,303";
+					};
+
+					mouse@1 {
+						reg = <0x1>;
+						compatible = "pnpPNP,f03";
+					};
+				};
+
+				rtc@70 {
+					compatible = "pnpPNP,b00";
+					reg = <0x1 0x70 0x2>;
+				};
+
+				gpio@400 {
+					reg = <0x1 0x400 0x80>;
+				};
+			};
+		};
+	};
+};
diff --git a/arch/powerpc/boot/dts/mpc8548cds.dts b/arch/powerpc/boot/dts/mpc8548cds.dts
index a17a557..07b8dae0 100644
--- a/arch/powerpc/boot/dts/mpc8548cds.dts
+++ b/arch/powerpc/boot/dts/mpc8548cds.dts
@@ -9,13 +9,11 @@
  * option) any later version.
  */
 
-/dts-v1/;
+/include/ "fsl/mpc8548si-pre.dtsi"
 
 / {
 	model = "MPC8548CDS";
 	compatible = "MPC8548CDS", "MPC85xxCDS";
-	#address-cells = <1>;
-	#size-cells = <1>;
 
 	aliases {
 		ethernet0 = &enet0;
@@ -29,76 +27,19 @@
 		pci2 = &pci2;
 	};
 
-	cpus {
-		#address-cells = <1>;
-		#size-cells = <0>;
-
-		PowerPC,8548@0 {
-			device_type = "cpu";
-			reg = <0x0>;
-			d-cache-line-size = <32>;	// 32 bytes
-			i-cache-line-size = <32>;	// 32 bytes
-			d-cache-size = <0x8000>;		// L1, 32K
-			i-cache-size = <0x8000>;		// L1, 32K
-			timebase-frequency = <0>;	//  33 MHz, from uboot
-			bus-frequency = <0>;	// 166 MHz
-			clock-frequency = <0>;	// 825 MHz, from uboot
-			next-level-cache = <&L2>;
-		};
-	};
-
 	memory {
 		device_type = "memory";
-		reg = <0x0 0x8000000>;	// 128M at 0x0
+		reg = <0 0 0x0 0x8000000>;	// 128M at 0x0
 	};
 
-	soc8548@e0000000 {
-		#address-cells = <1>;
-		#size-cells = <1>;
-		device_type = "soc";
-		compatible = "simple-bus";
-		ranges = <0x0 0xe0000000 0x100000>;
-		bus-frequency = <0>;
+	lbc: localbus@e0005000 {
+		reg = <0 0xe0005000 0 0x1000>;
+	};
 
-		ecm-law@0 {
-			compatible = "fsl,ecm-law";
-			reg = <0x0 0x1000>;
-			fsl,num-laws = <10>;
-		};
-
-		ecm@1000 {
-			compatible = "fsl,mpc8548-ecm", "fsl,ecm";
-			reg = <0x1000 0x1000>;
-			interrupts = <17 2>;
-			interrupt-parent = <&mpic>;
-		};
-
-		memory-controller@2000 {
-			compatible = "fsl,mpc8548-memory-controller";
-			reg = <0x2000 0x1000>;
-			interrupt-parent = <&mpic>;
-			interrupts = <18 2>;
-		};
-
-		L2: l2-cache-controller@20000 {
-			compatible = "fsl,mpc8548-l2-cache-controller";
-			reg = <0x20000 0x1000>;
-			cache-line-size = <32>;	// 32 bytes
-			cache-size = <0x80000>;	// L2, 512K
-			interrupt-parent = <&mpic>;
-			interrupts = <16 2>;
-		};
+	soc: soc8548@e0000000 {
+		ranges = <0 0x0 0xe0000000 0x100000>;
 
 		i2c@3000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			cell-index = <0>;
-			compatible = "fsl-i2c";
-			reg = <0x3000 0x100>;
-			interrupts = <43 2>;
-			interrupt-parent = <&mpic>;
-			dfsrr;
-
 			eeprom@50 {
 				compatible = "atmel,24c64";
 				reg = <0x50>;
@@ -116,351 +57,178 @@
 		};
 
 		i2c@3100 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			cell-index = <1>;
-			compatible = "fsl-i2c";
-			reg = <0x3100 0x100>;
-			interrupts = <43 2>;
-			interrupt-parent = <&mpic>;
-			dfsrr;
-
 			eeprom@50 {
 				compatible = "atmel,24c64";
 				reg = <0x50>;
 			};
 		};
 
-		dma@21300 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			compatible = "fsl,mpc8548-dma", "fsl,eloplus-dma";
-			reg = <0x21300 0x4>;
-			ranges = <0x0 0x21100 0x200>;
-			cell-index = <0>;
-			dma-channel@0 {
-				compatible = "fsl,mpc8548-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x0 0x80>;
-				cell-index = <0>;
-				interrupt-parent = <&mpic>;
-				interrupts = <20 2>;
-			};
-			dma-channel@80 {
-				compatible = "fsl,mpc8548-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x80 0x80>;
-				cell-index = <1>;
-				interrupt-parent = <&mpic>;
-				interrupts = <21 2>;
-			};
-			dma-channel@100 {
-				compatible = "fsl,mpc8548-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x100 0x80>;
-				cell-index = <2>;
-				interrupt-parent = <&mpic>;
-				interrupts = <22 2>;
-			};
-			dma-channel@180 {
-				compatible = "fsl,mpc8548-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x180 0x80>;
-				cell-index = <3>;
-				interrupt-parent = <&mpic>;
-				interrupts = <23 2>;
-			};
-		};
-
 		enet0: ethernet@24000 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			cell-index = <0>;
-			device_type = "network";
-			model = "eTSEC";
-			compatible = "gianfar";
-			reg = <0x24000 0x1000>;
-			ranges = <0x0 0x24000 0x1000>;
-			local-mac-address = [ 00 00 00 00 00 00 ];
-			interrupts = <29 2 30 2 34 2>;
-			interrupt-parent = <&mpic>;
 			tbi-handle = <&tbi0>;
 			phy-handle = <&phy0>;
+		};
 
-			mdio@520 {
-				#address-cells = <1>;
-				#size-cells = <0>;
-				compatible = "fsl,gianfar-mdio";
-				reg = <0x520 0x20>;
-
-				phy0: ethernet-phy@0 {
-					interrupt-parent = <&mpic>;
-					interrupts = <5 1>;
-					reg = <0x0>;
-					device_type = "ethernet-phy";
-				};
-				phy1: ethernet-phy@1 {
-					interrupt-parent = <&mpic>;
-					interrupts = <5 1>;
-					reg = <0x1>;
-					device_type = "ethernet-phy";
-				};
-				phy2: ethernet-phy@2 {
-					interrupt-parent = <&mpic>;
-					interrupts = <5 1>;
-					reg = <0x2>;
-					device_type = "ethernet-phy";
-				};
-				phy3: ethernet-phy@3 {
-					interrupt-parent = <&mpic>;
-					interrupts = <5 1>;
-					reg = <0x3>;
-					device_type = "ethernet-phy";
-				};
-				tbi0: tbi-phy@11 {
-					reg = <0x11>;
-					device_type = "tbi-phy";
-				};
+		mdio@24520 {
+			phy0: ethernet-phy@0 {
+				interrupts = <5 1 0 0>;
+				reg = <0x0>;
+				device_type = "ethernet-phy";
+			};
+			phy1: ethernet-phy@1 {
+				interrupts = <5 1 0 0>;
+				reg = <0x1>;
+				device_type = "ethernet-phy";
+			};
+			phy2: ethernet-phy@2 {
+				interrupts = <5 1 0 0>;
+				reg = <0x2>;
+				device_type = "ethernet-phy";
+			};
+			phy3: ethernet-phy@3 {
+				interrupts = <5 1 0 0>;
+				reg = <0x3>;
+				device_type = "ethernet-phy";
+			};
+			tbi0: tbi-phy@11 {
+				reg = <0x11>;
+				device_type = "tbi-phy";
 			};
 		};
 
 		enet1: ethernet@25000 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			cell-index = <1>;
-			device_type = "network";
-			model = "eTSEC";
-			compatible = "gianfar";
-			reg = <0x25000 0x1000>;
-			ranges = <0x0 0x25000 0x1000>;
-			local-mac-address = [ 00 00 00 00 00 00 ];
-			interrupts = <35 2 36 2 40 2>;
-			interrupt-parent = <&mpic>;
 			tbi-handle = <&tbi1>;
 			phy-handle = <&phy1>;
+		};
 
-			mdio@520 {
-				#address-cells = <1>;
-				#size-cells = <0>;
-				compatible = "fsl,gianfar-tbi";
-				reg = <0x520 0x20>;
-
-				tbi1: tbi-phy@11 {
-					reg = <0x11>;
-					device_type = "tbi-phy";
-				};
+		mdio@25520 {
+			tbi1: tbi-phy@11 {
+				reg = <0x11>;
+				device_type = "tbi-phy";
 			};
 		};
 
 		enet2: ethernet@26000 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			cell-index = <2>;
-			device_type = "network";
-			model = "eTSEC";
-			compatible = "gianfar";
-			reg = <0x26000 0x1000>;
-			ranges = <0x0 0x26000 0x1000>;
-			local-mac-address = [ 00 00 00 00 00 00 ];
-			interrupts = <31 2 32 2 33 2>;
-			interrupt-parent = <&mpic>;
 			tbi-handle = <&tbi2>;
 			phy-handle = <&phy2>;
+		};
 
-			mdio@520 {
-				#address-cells = <1>;
-				#size-cells = <0>;
-				compatible = "fsl,gianfar-tbi";
-				reg = <0x520 0x20>;
-
-				tbi2: tbi-phy@11 {
-					reg = <0x11>;
-					device_type = "tbi-phy";
-				};
+		mdio@26520 {
+			tbi2: tbi-phy@11 {
+				reg = <0x11>;
+				device_type = "tbi-phy";
 			};
 		};
 
 		enet3: ethernet@27000 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			cell-index = <3>;
-			device_type = "network";
-			model = "eTSEC";
-			compatible = "gianfar";
-			reg = <0x27000 0x1000>;
-			ranges = <0x0 0x27000 0x1000>;
-			local-mac-address = [ 00 00 00 00 00 00 ];
-			interrupts = <37 2 38 2 39 2>;
-			interrupt-parent = <&mpic>;
 			tbi-handle = <&tbi3>;
 			phy-handle = <&phy3>;
+		};
 
-			mdio@520 {
-				#address-cells = <1>;
-				#size-cells = <0>;
-				compatible = "fsl,gianfar-tbi";
-				reg = <0x520 0x20>;
-
-				tbi3: tbi-phy@11 {
-					reg = <0x11>;
-					device_type = "tbi-phy";
-				};
+		mdio@27520 {
+			tbi3: tbi-phy@11 {
+				reg = <0x11>;
+				device_type = "tbi-phy";
 			};
 		};
-
-		serial0: serial@4500 {
-			cell-index = <0>;
-			device_type = "serial";
-			compatible = "ns16550";
-			reg = <0x4500 0x100>;	// reg base, size
-			clock-frequency = <0>;	// should we fill in in uboot?
-			interrupts = <42 2>;
-			interrupt-parent = <&mpic>;
-		};
-
-		serial1: serial@4600 {
-			cell-index = <1>;
-			device_type = "serial";
-			compatible = "ns16550";
-			reg = <0x4600 0x100>;	// reg base, size
-			clock-frequency = <0>;	// should we fill in in uboot?
-			interrupts = <42 2>;
-			interrupt-parent = <&mpic>;
-		};
-
-		global-utilities@e0000 {	//global utilities reg
-			compatible = "fsl,mpc8548-guts";
-			reg = <0xe0000 0x1000>;
-			fsl,has-rstcr;
-		};
-
-		crypto@30000 {
-			compatible = "fsl,sec2.1", "fsl,sec2.0";
-			reg = <0x30000 0x10000>;
-			interrupts = <45 2>;
-			interrupt-parent = <&mpic>;
-			fsl,num-channels = <4>;
-			fsl,channel-fifo-len = <24>;
-			fsl,exec-units-mask = <0xfe>;
-			fsl,descriptor-types-mask = <0x12b0ebf>;
-		};
-
-		mpic: pic@40000 {
-			interrupt-controller;
-			#address-cells = <0>;
-			#interrupt-cells = <2>;
-			reg = <0x40000 0x40000>;
-			compatible = "chrp,open-pic";
-			device_type = "open-pic";
-		};
 	};
 
 	pci0: pci@e0008000 {
+		reg = <0 0xe0008000 0 0x1000>;
+		ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x10000000
+			  0x1000000 0x0 0x00000000 0 0xe2000000 0x0 0x800000>;
+		clock-frequency = <66666666>;
 		interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
 		interrupt-map = <
 			/* IDSEL 0x4 (PCIX Slot 2) */
-			0x2000 0x0 0x0 0x1 &mpic 0x0 0x1
-			0x2000 0x0 0x0 0x2 &mpic 0x1 0x1
-			0x2000 0x0 0x0 0x3 &mpic 0x2 0x1
-			0x2000 0x0 0x0 0x4 &mpic 0x3 0x1
+			0x2000 0x0 0x0 0x1 &mpic 0x0 0x1 0 0
+			0x2000 0x0 0x0 0x2 &mpic 0x1 0x1 0 0
+			0x2000 0x0 0x0 0x3 &mpic 0x2 0x1 0 0
+			0x2000 0x0 0x0 0x4 &mpic 0x3 0x1 0 0
 
 			/* IDSEL 0x5 (PCIX Slot 3) */
-			0x2800 0x0 0x0 0x1 &mpic 0x1 0x1
-			0x2800 0x0 0x0 0x2 &mpic 0x2 0x1
-			0x2800 0x0 0x0 0x3 &mpic 0x3 0x1
-			0x2800 0x0 0x0 0x4 &mpic 0x0 0x1
+			0x2800 0x0 0x0 0x1 &mpic 0x1 0x1 0 0
+			0x2800 0x0 0x0 0x2 &mpic 0x2 0x1 0 0
+			0x2800 0x0 0x0 0x3 &mpic 0x3 0x1 0 0
+			0x2800 0x0 0x0 0x4 &mpic 0x0 0x1 0 0
 
 			/* IDSEL 0x6 (PCIX Slot 4) */
-			0x3000 0x0 0x0 0x1 &mpic 0x2 0x1
-			0x3000 0x0 0x0 0x2 &mpic 0x3 0x1
-			0x3000 0x0 0x0 0x3 &mpic 0x0 0x1
-			0x3000 0x0 0x0 0x4 &mpic 0x1 0x1
+			0x3000 0x0 0x0 0x1 &mpic 0x2 0x1 0 0
+			0x3000 0x0 0x0 0x2 &mpic 0x3 0x1 0 0
+			0x3000 0x0 0x0 0x3 &mpic 0x0 0x1 0 0
+			0x3000 0x0 0x0 0x4 &mpic 0x1 0x1 0 0
 
 			/* IDSEL 0x8 (PCIX Slot 5) */
-			0x4000 0x0 0x0 0x1 &mpic 0x0 0x1
-			0x4000 0x0 0x0 0x2 &mpic 0x1 0x1
-			0x4000 0x0 0x0 0x3 &mpic 0x2 0x1
-			0x4000 0x0 0x0 0x4 &mpic 0x3 0x1
+			0x4000 0x0 0x0 0x1 &mpic 0x0 0x1 0 0
+			0x4000 0x0 0x0 0x2 &mpic 0x1 0x1 0 0
+			0x4000 0x0 0x0 0x3 &mpic 0x2 0x1 0 0
+			0x4000 0x0 0x0 0x4 &mpic 0x3 0x1 0 0
 
 			/* IDSEL 0xC (Tsi310 bridge) */
-			0x6000 0x0 0x0 0x1 &mpic 0x0 0x1
-			0x6000 0x0 0x0 0x2 &mpic 0x1 0x1
-			0x6000 0x0 0x0 0x3 &mpic 0x2 0x1
-			0x6000 0x0 0x0 0x4 &mpic 0x3 0x1
+			0x6000 0x0 0x0 0x1 &mpic 0x0 0x1 0 0
+			0x6000 0x0 0x0 0x2 &mpic 0x1 0x1 0 0
+			0x6000 0x0 0x0 0x3 &mpic 0x2 0x1 0 0
+			0x6000 0x0 0x0 0x4 &mpic 0x3 0x1 0 0
 
 			/* IDSEL 0x14 (Slot 2) */
-			0xa000 0x0 0x0 0x1 &mpic 0x0 0x1
-			0xa000 0x0 0x0 0x2 &mpic 0x1 0x1
-			0xa000 0x0 0x0 0x3 &mpic 0x2 0x1
-			0xa000 0x0 0x0 0x4 &mpic 0x3 0x1
+			0xa000 0x0 0x0 0x1 &mpic 0x0 0x1 0 0
+			0xa000 0x0 0x0 0x2 &mpic 0x1 0x1 0 0
+			0xa000 0x0 0x0 0x3 &mpic 0x2 0x1 0 0
+			0xa000 0x0 0x0 0x4 &mpic 0x3 0x1 0 0
 
 			/* IDSEL 0x15 (Slot 3) */
-			0xa800 0x0 0x0 0x1 &mpic 0x1 0x1
-			0xa800 0x0 0x0 0x2 &mpic 0x2 0x1
-			0xa800 0x0 0x0 0x3 &mpic 0x3 0x1
-			0xa800 0x0 0x0 0x4 &mpic 0x0 0x1
+			0xa800 0x0 0x0 0x1 &mpic 0x1 0x1 0 0
+			0xa800 0x0 0x0 0x2 &mpic 0x2 0x1 0 0
+			0xa800 0x0 0x0 0x3 &mpic 0x3 0x1 0 0
+			0xa800 0x0 0x0 0x4 &mpic 0x0 0x1 0 0
 
 			/* IDSEL 0x16 (Slot 4) */
-			0xb000 0x0 0x0 0x1 &mpic 0x2 0x1
-			0xb000 0x0 0x0 0x2 &mpic 0x3 0x1
-			0xb000 0x0 0x0 0x3 &mpic 0x0 0x1
-			0xb000 0x0 0x0 0x4 &mpic 0x1 0x1
+			0xb000 0x0 0x0 0x1 &mpic 0x2 0x1 0 0
+			0xb000 0x0 0x0 0x2 &mpic 0x3 0x1 0 0
+			0xb000 0x0 0x0 0x3 &mpic 0x0 0x1 0 0
+			0xb000 0x0 0x0 0x4 &mpic 0x1 0x1 0 0
 
 			/* IDSEL 0x18 (Slot 5) */
-			0xc000 0x0 0x0 0x1 &mpic 0x0 0x1
-			0xc000 0x0 0x0 0x2 &mpic 0x1 0x1
-			0xc000 0x0 0x0 0x3 &mpic 0x2 0x1
-			0xc000 0x0 0x0 0x4 &mpic 0x3 0x1
+			0xc000 0x0 0x0 0x1 &mpic 0x0 0x1 0 0
+			0xc000 0x0 0x0 0x2 &mpic 0x1 0x1 0 0
+			0xc000 0x0 0x0 0x3 &mpic 0x2 0x1 0 0
+			0xc000 0x0 0x0 0x4 &mpic 0x3 0x1 0 0
 
 			/* IDSEL 0x1C (Tsi310 bridge PCI primary) */
-			0xe000 0x0 0x0 0x1 &mpic 0x0 0x1
-			0xe000 0x0 0x0 0x2 &mpic 0x1 0x1
-			0xe000 0x0 0x0 0x3 &mpic 0x2 0x1
-			0xe000 0x0 0x0 0x4 &mpic 0x3 0x1>;
-
-		interrupt-parent = <&mpic>;
-		interrupts = <24 2>;
-		bus-range = <0 0>;
-		ranges = <0x2000000 0x0 0x80000000 0x80000000 0x0 0x10000000
-			  0x1000000 0x0 0x0 0xe2000000 0x0 0x800000>;
-		clock-frequency = <66666666>;
-		#interrupt-cells = <1>;
-		#size-cells = <2>;
-		#address-cells = <3>;
-		reg = <0xe0008000 0x1000>;
-		compatible = "fsl,mpc8540-pcix", "fsl,mpc8540-pci";
-		device_type = "pci";
+			0xe000 0x0 0x0 0x1 &mpic 0x0 0x1 0 0
+			0xe000 0x0 0x0 0x2 &mpic 0x1 0x1 0 0
+			0xe000 0x0 0x0 0x3 &mpic 0x2 0x1 0 0
+			0xe000 0x0 0x0 0x4 &mpic 0x3 0x1 0 0>;
 
 		pci_bridge@1c {
 			interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
 			interrupt-map = <
 
 				/* IDSEL 0x00 (PrPMC Site) */
-				0000 0x0 0x0 0x1 &mpic 0x0 0x1
-				0000 0x0 0x0 0x2 &mpic 0x1 0x1
-				0000 0x0 0x0 0x3 &mpic 0x2 0x1
-				0000 0x0 0x0 0x4 &mpic 0x3 0x1
+				0000 0x0 0x0 0x1 &mpic 0x0 0x1 0 0
+				0000 0x0 0x0 0x2 &mpic 0x1 0x1 0 0
+				0000 0x0 0x0 0x3 &mpic 0x2 0x1 0 0
+				0000 0x0 0x0 0x4 &mpic 0x3 0x1 0 0
 
 				/* IDSEL 0x04 (VIA chip) */
-				0x2000 0x0 0x0 0x1 &mpic 0x0 0x1
-				0x2000 0x0 0x0 0x2 &mpic 0x1 0x1
-				0x2000 0x0 0x0 0x3 &mpic 0x2 0x1
-				0x2000 0x0 0x0 0x4 &mpic 0x3 0x1
+				0x2000 0x0 0x0 0x1 &mpic 0x0 0x1 0 0
+				0x2000 0x0 0x0 0x2 &mpic 0x1 0x1 0 0
+				0x2000 0x0 0x0 0x3 &mpic 0x2 0x1 0 0
+				0x2000 0x0 0x0 0x4 &mpic 0x3 0x1 0 0
 
 				/* IDSEL 0x05 (8139) */
-				0x2800 0x0 0x0 0x1 &mpic 0x1 0x1
+				0x2800 0x0 0x0 0x1 &mpic 0x1 0x1 0 0
 
 				/* IDSEL 0x06 (Slot 6) */
-				0x3000 0x0 0x0 0x1 &mpic 0x2 0x1
-				0x3000 0x0 0x0 0x2 &mpic 0x3 0x1
-				0x3000 0x0 0x0 0x3 &mpic 0x0 0x1
-				0x3000 0x0 0x0 0x4 &mpic 0x1 0x1
+				0x3000 0x0 0x0 0x1 &mpic 0x2 0x1 0 0
+				0x3000 0x0 0x0 0x2 &mpic 0x3 0x1 0 0
+				0x3000 0x0 0x0 0x3 &mpic 0x0 0x1 0 0
+				0x3000 0x0 0x0 0x4 &mpic 0x1 0x1 0 0
 
 				/* IDESL 0x07 (Slot 7) */
-				0x3800 0x0 0x0 0x1 &mpic 0x3 0x1
-				0x3800 0x0 0x0 0x2 &mpic 0x0 0x1
-				0x3800 0x0 0x0 0x3 &mpic 0x1 0x1
-				0x3800 0x0 0x0 0x4 &mpic 0x2 0x1>;
+				0x3800 0x0 0x0 0x1 &mpic 0x3 0x1 0 0
+				0x3800 0x0 0x0 0x2 &mpic 0x0 0x1 0 0
+				0x3800 0x0 0x0 0x3 &mpic 0x1 0x1 0 0
+				0x3800 0x0 0x0 0x4 &mpic 0x2 0x1 0 0>;
 
 			reg = <0xe000 0x0 0x0 0x0 0x0>;
 			#interrupt-cells = <1>;
@@ -492,7 +260,7 @@
 					#address-cells = <0>;
 					#interrupt-cells = <2>;
 					compatible = "chrp,iic";
-					interrupts = <0 1>;
+					interrupts = <0 1 0 0>;
 					interrupt-parent = <&mpic>;
 				};
 
@@ -505,56 +273,25 @@
 	};
 
 	pci1: pci@e0009000 {
+		reg = <0 0xe0009000 0 0x1000>;
+		ranges = <0x2000000 0x0 0x90000000 0 0x90000000 0x0 0x10000000
+			  0x1000000 0x0 0x00000000 0 0xe2800000 0x0 0x800000>;
+		clock-frequency = <66666666>;
 		interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
 		interrupt-map = <
 
 			/* IDSEL 0x15 */
-			0xa800 0x0 0x0 0x1 &mpic 0xb 0x1
-			0xa800 0x0 0x0 0x2 &mpic 0x1 0x1
-			0xa800 0x0 0x0 0x3 &mpic 0x2 0x1
-			0xa800 0x0 0x0 0x4 &mpic 0x3 0x1>;
-
-		interrupt-parent = <&mpic>;
-		interrupts = <25 2>;
-		bus-range = <0 0>;
-		ranges = <0x2000000 0x0 0x90000000 0x90000000 0x0 0x10000000
-			  0x1000000 0x0 0x0 0xe2800000 0x0 0x800000>;
-		clock-frequency = <66666666>;
-		#interrupt-cells = <1>;
-		#size-cells = <2>;
-		#address-cells = <3>;
-		reg = <0xe0009000 0x1000>;
-		compatible = "fsl,mpc8540-pci";
-		device_type = "pci";
+			0xa800 0x0 0x0 0x1 &mpic 0xb 0x1 0 0
+			0xa800 0x0 0x0 0x2 &mpic 0x1 0x1 0 0
+			0xa800 0x0 0x0 0x3 &mpic 0x2 0x1 0 0
+			0xa800 0x0 0x0 0x4 &mpic 0x3 0x1 0 0>;
 	};
 
 	pci2: pcie@e000a000 {
-		interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
-		interrupt-map = <
-
-			/* IDSEL 0x0 (PEX) */
-			00000 0x0 0x0 0x1 &mpic 0x0 0x1
-			00000 0x0 0x0 0x2 &mpic 0x1 0x1
-			00000 0x0 0x0 0x3 &mpic 0x2 0x1
-			00000 0x0 0x0 0x4 &mpic 0x3 0x1>;
-
-		interrupt-parent = <&mpic>;
-		interrupts = <26 2>;
-		bus-range = <0 255>;
-		ranges = <0x2000000 0x0 0xa0000000 0xa0000000 0x0 0x20000000
-			  0x1000000 0x0 0x0 0xe3000000 0x0 0x100000>;
-		clock-frequency = <33333333>;
-		#interrupt-cells = <1>;
-		#size-cells = <2>;
-		#address-cells = <3>;
-		reg = <0xe000a000 0x1000>;
-		compatible = "fsl,mpc8548-pcie";
-		device_type = "pci";
+		reg = <0 0xe000a000 0 0x1000>;
+		ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000
+			  0x1000000 0x0 0x00000000 0 0xe3000000 0x0 0x100000>;
 		pcie@0 {
-			reg = <0x0 0x0 0x0 0x0 0x0>;
-			#size-cells = <2>;
-			#address-cells = <3>;
-			device_type = "pci";
 			ranges = <0x2000000 0x0 0xa0000000
 				  0x2000000 0x0 0xa0000000
 				  0x0 0x20000000
@@ -565,3 +302,5 @@
 		};
 	};
 };
+
+/include/ "fsl/mpc8548si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/mpc8555cds.dts b/arch/powerpc/boot/dts/mpc8555cds.dts
index 5c5614f..fe10438 100644
--- a/arch/powerpc/boot/dts/mpc8555cds.dts
+++ b/arch/powerpc/boot/dts/mpc8555cds.dts
@@ -209,7 +209,7 @@
 		serial0: serial@4500 {
 			cell-index = <0>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4500 0x100>; 	// reg base, size
 			clock-frequency = <0>; 	// should we fill in in uboot?
 			interrupts = <42 2>;
@@ -219,7 +219,7 @@
 		serial1: serial@4600 {
 			cell-index = <1>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4600 0x100>;	// reg base, size
 			clock-frequency = <0>; 	// should we fill in in uboot?
 			interrupts = <42 2>;
diff --git a/arch/powerpc/boot/dts/mpc8568mds.dts b/arch/powerpc/boot/dts/mpc8568mds.dts
index 647daf8..09598bb 100644
--- a/arch/powerpc/boot/dts/mpc8568mds.dts
+++ b/arch/powerpc/boot/dts/mpc8568mds.dts
@@ -9,60 +9,25 @@
  * option) any later version.
  */
 
-/dts-v1/;
+/include/ "fsl/mpc8568si-pre.dtsi"
 
 / {
 	model = "MPC8568EMDS";
 	compatible = "MPC8568EMDS", "MPC85xxMDS";
-	#address-cells = <1>;
-	#size-cells = <1>;
 
 	aliases {
-		ethernet0 = &enet0;
-		ethernet1 = &enet1;
-		ethernet2 = &enet2;
-		ethernet3 = &enet3;
-		serial0 = &serial0;
-		serial1 = &serial1;
 		pci0 = &pci0;
 		pci1 = &pci1;
-		rapidio0 = &rio0;
-	};
-
-	cpus {
-		#address-cells = <1>;
-		#size-cells = <0>;
-
-		PowerPC,8568@0 {
-			device_type = "cpu";
-			reg = <0x0>;
-			d-cache-line-size = <32>;	// 32 bytes
-			i-cache-line-size = <32>;	// 32 bytes
-			d-cache-size = <0x8000>;		// L1, 32K
-			i-cache-size = <0x8000>;		// L1, 32K
-			sleep = <&pmc 0x00008000	// core
-				 &pmc 0x00004000>;	// timebase
-			timebase-frequency = <0>;
-			bus-frequency = <0>;
-			clock-frequency = <0>;
-			next-level-cache = <&L2>;
-		};
+		rapidio0 = &rio;
 	};
 
 	memory {
 		device_type = "memory";
-		reg = <0x0 0x10000000>;
+		reg = <0x0 0x0 0x0 0x0>;
 	};
 
-	localbus@e0005000 {
-		#address-cells = <2>;
-		#size-cells = <1>;
-		compatible = "fsl,mpc8568-localbus", "fsl,pq3-localbus",
-			     "simple-bus";
-		reg = <0xe0005000 0x1000>;
-		interrupt-parent = <&mpic>;
-		interrupts = <19 2>;
-
+	lbc: localbus@e0005000 {
+		reg = <0x0 0xe0005000 0x0 0x1000>;
 		ranges = <0x0 0x0 0xfe000000 0x02000000
 			  0x1 0x0 0xf8000000 0x00008000
 			  0x2 0x0 0xf0000000 0x04000000
@@ -104,288 +69,65 @@
 		};
 	};
 
-	soc8568@e0000000 {
-		#address-cells = <1>;
-		#size-cells = <1>;
-		device_type = "soc";
-		compatible = "simple-bus";
-		ranges = <0x0 0xe0000000 0x100000>;
-		bus-frequency = <0>;
-
-		ecm-law@0 {
-			compatible = "fsl,ecm-law";
-			reg = <0x0 0x1000>;
-			fsl,num-laws = <10>;
-		};
-
-		ecm@1000 {
-			compatible = "fsl,mpc8568-ecm", "fsl,ecm";
-			reg = <0x1000 0x1000>;
-			interrupts = <17 2>;
-			interrupt-parent = <&mpic>;
-		};
-
-		memory-controller@2000 {
-			compatible = "fsl,mpc8568-memory-controller";
-			reg = <0x2000 0x1000>;
-			interrupt-parent = <&mpic>;
-			interrupts = <18 2>;
-		};
-
-		L2: l2-cache-controller@20000 {
-			compatible = "fsl,mpc8568-l2-cache-controller";
-			reg = <0x20000 0x1000>;
-			cache-line-size = <32>;	// 32 bytes
-			cache-size = <0x80000>;	// L2, 512K
-			interrupt-parent = <&mpic>;
-			interrupts = <16 2>;
-		};
+	soc: soc8568@e0000000 {
+		ranges = <0x0 0x0 0xe0000000 0x100000>;
 
 		i2c-sleep-nexus {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			compatible = "simple-bus";
-			sleep = <&pmc 0x00000004>;
-			ranges;
-
 			i2c@3000 {
-				#address-cells = <1>;
-				#size-cells = <0>;
-				cell-index = <0>;
-				compatible = "fsl-i2c";
-				reg = <0x3000 0x100>;
-				interrupts = <43 2>;
-				interrupt-parent = <&mpic>;
-				dfsrr;
-
 				rtc@68 {
 					compatible = "dallas,ds1374";
 					reg = <0x68>;
-					interrupts = <3 1>;
-					interrupt-parent = <&mpic>;
+					interrupts = <3 1 0 0>;
 				};
 			};
-
-			i2c@3100 {
-				#address-cells = <1>;
-				#size-cells = <0>;
-				cell-index = <1>;
-				compatible = "fsl-i2c";
-				reg = <0x3100 0x100>;
-				interrupts = <43 2>;
-				interrupt-parent = <&mpic>;
-				dfsrr;
-			};
-		};
-
-		dma@21300 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			compatible = "fsl,mpc8568-dma", "fsl,eloplus-dma";
-			reg = <0x21300 0x4>;
-			ranges = <0x0 0x21100 0x200>;
-			cell-index = <0>;
-			sleep = <&pmc 0x00000400>;
-
-			dma-channel@0 {
-				compatible = "fsl,mpc8568-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x0 0x80>;
-				cell-index = <0>;
-				interrupt-parent = <&mpic>;
-				interrupts = <20 2>;
-			};
-			dma-channel@80 {
-				compatible = "fsl,mpc8568-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x80 0x80>;
-				cell-index = <1>;
-				interrupt-parent = <&mpic>;
-				interrupts = <21 2>;
-			};
-			dma-channel@100 {
-				compatible = "fsl,mpc8568-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x100 0x80>;
-				cell-index = <2>;
-				interrupt-parent = <&mpic>;
-				interrupts = <22 2>;
-			};
-			dma-channel@180 {
-				compatible = "fsl,mpc8568-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x180 0x80>;
-				cell-index = <3>;
-				interrupt-parent = <&mpic>;
-				interrupts = <23 2>;
-			};
 		};
 
 		enet0: ethernet@24000 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			cell-index = <0>;
-			device_type = "network";
-			model = "eTSEC";
-			compatible = "gianfar";
-			reg = <0x24000 0x1000>;
-			ranges = <0x0 0x24000 0x1000>;
-			local-mac-address = [ 00 00 00 00 00 00 ];
- 			interrupts = <29 2 30 2 34 2>;
-			interrupt-parent = <&mpic>;
 			tbi-handle = <&tbi0>;
 			phy-handle = <&phy2>;
-			sleep = <&pmc 0x00000080>;
+		};
 
-			mdio@520 {
-				#address-cells = <1>;
-				#size-cells = <0>;
-				compatible = "fsl,gianfar-mdio";
-				reg = <0x520 0x20>;
-
-				phy0: ethernet-phy@7 {
-					interrupt-parent = <&mpic>;
-					interrupts = <1 1>;
-					reg = <0x7>;
-					device_type = "ethernet-phy";
-				};
-				phy1: ethernet-phy@1 {
-					interrupt-parent = <&mpic>;
-					interrupts = <2 1>;
-					reg = <0x1>;
-					device_type = "ethernet-phy";
-				};
-				phy2: ethernet-phy@2 {
-					interrupt-parent = <&mpic>;
-					interrupts = <1 1>;
-					reg = <0x2>;
-					device_type = "ethernet-phy";
-				};
-				phy3: ethernet-phy@3 {
-					interrupt-parent = <&mpic>;
-					interrupts = <2 1>;
-					reg = <0x3>;
-					device_type = "ethernet-phy";
-				};
-				tbi0: tbi-phy@11 {
-					reg = <0x11>;
-					device_type = "tbi-phy";
-				};
+		mdio@24520 {
+			phy0: ethernet-phy@7 {
+				interrupts = <1 1 0 0>;
+				reg = <0x7>;
+				device_type = "ethernet-phy";
+			};
+			phy1: ethernet-phy@1 {
+				interrupts = <2 1 0 0>;
+				reg = <0x1>;
+				device_type = "ethernet-phy";
+			};
+			phy2: ethernet-phy@2 {
+				interrupts = <1 1 0 0>;
+				reg = <0x2>;
+				device_type = "ethernet-phy";
+			};
+			phy3: ethernet-phy@3 {
+				interrupts = <2 1 0 0>;
+				reg = <0x3>;
+				device_type = "ethernet-phy";
+			};
+			tbi0: tbi-phy@11 {
+				reg = <0x11>;
+				device_type = "tbi-phy";
 			};
 		};
 
 		enet1: ethernet@25000 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			cell-index = <1>;
-			device_type = "network";
-			model = "eTSEC";
-			compatible = "gianfar";
-			reg = <0x25000 0x1000>;
-			ranges = <0x0 0x25000 0x1000>;
-			local-mac-address = [ 00 00 00 00 00 00 ];
- 			interrupts = <35 2 36 2 40 2>;
-			interrupt-parent = <&mpic>;
 			tbi-handle = <&tbi1>;
 			phy-handle = <&phy3>;
 			sleep = <&pmc 0x00000040>;
+		};
 
-			mdio@520 {
-				#address-cells = <1>;
-				#size-cells = <0>;
-				compatible = "fsl,gianfar-tbi";
-				reg = <0x520 0x20>;
-
-				tbi1: tbi-phy@11 {
-					reg = <0x11>;
-					device_type = "tbi-phy";
-				};
+		mdio@25520 {
+			tbi1: tbi-phy@11 {
+				reg = <0x11>;
+				device_type = "tbi-phy";
 			};
 		};
 
-		duart-sleep-nexus {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			compatible = "simple-bus";
-			sleep = <&pmc 0x00000002>;
-			ranges;
-
-			serial0: serial@4500 {
-				cell-index = <0>;
-				device_type = "serial";
-				compatible = "ns16550";
-				reg = <0x4500 0x100>;
-				clock-frequency = <0>;
-				interrupts = <42 2>;
-				interrupt-parent = <&mpic>;
-			};
-
-			serial1: serial@4600 {
-				cell-index = <1>;
-				device_type = "serial";
-				compatible = "ns16550";
-				reg = <0x4600 0x100>;
-				clock-frequency = <0>;
-				interrupts = <42 2>;
-				interrupt-parent = <&mpic>;
-			};
-		};
-
-		global-utilities@e0000 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			compatible = "fsl,mpc8568-guts", "fsl,mpc8548-guts";
-			reg = <0xe0000 0x1000>;
-			ranges = <0 0xe0000 0x1000>;
-			fsl,has-rstcr;
-
-			pmc: power@70 {
-				compatible = "fsl,mpc8568-pmc",
-					     "fsl,mpc8548-pmc";
-				reg = <0x70 0x20>;
-			};
-		};
-
-		crypto@30000 {
-			compatible = "fsl,sec2.1", "fsl,sec2.0";
-			reg = <0x30000 0x10000>;
-			interrupts = <45 2>;
-			interrupt-parent = <&mpic>;
-			fsl,num-channels = <4>;
-			fsl,channel-fifo-len = <24>;
-			fsl,exec-units-mask = <0xfe>;
-			fsl,descriptor-types-mask = <0x12b0ebf>;
-			sleep = <&pmc 0x01000000>;
-		};
-
-		mpic: pic@40000 {
-			interrupt-controller;
-			#address-cells = <0>;
-			#interrupt-cells = <2>;
-			reg = <0x40000 0x40000>;
-			compatible = "chrp,open-pic";
-			device_type = "open-pic";
-		};
-
-		msi@41600 {
-			compatible = "fsl,mpc8568-msi", "fsl,mpic-msi";
-			reg = <0x41600 0x80>;
-			msi-available-ranges = <0 0x100>;
-			interrupts = <
-				0xe0 0
-				0xe1 0
-				0xe2 0
-				0xe3 0
-				0xe4 0
-				0xe5 0
-				0xe6 0
-				0xe7 0>;
-			interrupt-parent = <&mpic>;
-		};
-
 		par_io@e0100 {
-			reg = <0xe0100 0x100>;
-			device_type = "par_io";
 			num-ports = <7>;
 
 			pio1: ucc_pin@01 {
@@ -448,57 +190,21 @@
 		};
 	};
 
-	qe@e0080000 {
-		#address-cells = <1>;
-		#size-cells = <1>;
-		device_type = "qe";
-		compatible = "fsl,qe";
-		ranges = <0x0 0xe0080000 0x40000>;
-		reg = <0xe0080000 0x480>;
-		sleep = <&pmc 0x00000800>;
-		brg-frequency = <0>;
-		bus-frequency = <396000000>;
-		fsl,qe-num-riscs = <2>;
-		fsl,qe-num-snums = <28>;
-
-		muram@10000 {
- 			#address-cells = <1>;
- 			#size-cells = <1>;
-			compatible = "fsl,qe-muram", "fsl,cpm-muram";
-			ranges = <0x0 0x10000 0x10000>;
-
-			data-only@0 {
-				compatible = "fsl,qe-muram-data",
-					     "fsl,cpm-muram-data";
-				reg = <0x0 0x10000>;
-			};
-		};
+	qe: qe@e0080000 {
+		ranges = <0x0 0x0 0xe0080000 0x40000>;
+		reg = <0x0 0xe0080000 0x0 0x480>;
 
 		spi@4c0 {
-			cell-index = <0>;
-			compatible = "fsl,spi";
-			reg = <0x4c0 0x40>;
-			interrupts = <2>;
-			interrupt-parent = <&qeic>;
 			mode = "cpu";
 		};
 
 		spi@500 {
-			cell-index = <1>;
-			compatible = "fsl,spi";
-			reg = <0x500 0x40>;
-			interrupts = <1>;
-			interrupt-parent = <&qeic>;
 			mode = "cpu";
 		};
 
 		enet2: ucc@2000 {
 			device_type = "network";
 			compatible = "ucc_geth";
-			cell-index = <1>;
-			reg = <0x2000 0x200>;
-			interrupts = <32>;
-			interrupt-parent = <&qeic>;
 			local-mac-address = [ 00 00 00 00 00 00 ];
 			rx-clock-name = "none";
 			tx-clock-name = "clk16";
@@ -510,10 +216,6 @@
 		enet3: ucc@3000 {
 			device_type = "network";
 			compatible = "ucc_geth";
-			cell-index = <2>;
-			reg = <0x3000 0x200>;
-			interrupts = <33>;
-			interrupt-parent = <&qeic>;
 			local-mac-address = [ 00 00 00 00 00 00 ];
 			rx-clock-name = "none";
 			tx-clock-name = "clk16";
@@ -532,102 +234,57 @@
 			 * gianfar's MDIO bus */
 			qe_phy0: ethernet-phy@07 {
 				interrupt-parent = <&mpic>;
-				interrupts = <1 1>;
+				interrupts = <1 1 0 0>;
 				reg = <0x7>;
 				device_type = "ethernet-phy";
 			};
 			qe_phy1: ethernet-phy@01 {
 				interrupt-parent = <&mpic>;
-				interrupts = <2 1>;
+				interrupts = <2 1 0 0>;
 				reg = <0x1>;
 				device_type = "ethernet-phy";
 			};
 			qe_phy2: ethernet-phy@02 {
 				interrupt-parent = <&mpic>;
-				interrupts = <1 1>;
+				interrupts = <1 1 0 0>;
 				reg = <0x2>;
 				device_type = "ethernet-phy";
 			};
 			qe_phy3: ethernet-phy@03 {
 				interrupt-parent = <&mpic>;
-				interrupts = <2 1>;
+				interrupts = <2 1 0 0>;
 				reg = <0x3>;
 				device_type = "ethernet-phy";
 			};
 		};
-
-		qeic: interrupt-controller@80 {
-			interrupt-controller;
-			compatible = "fsl,qe-ic";
-			#address-cells = <0>;
-			#interrupt-cells = <1>;
-			reg = <0x80 0x80>;
-			big-endian;
-			interrupts = <46 2 46 2>; //high:30 low:30
-			interrupt-parent = <&mpic>;
-		};
-
 	};
 
 	pci0: pci@e0008000 {
+		reg = <0x0 0xe0008000 0x0 0x1000>;
+		ranges = <0x2000000 0x0 0x80000000 0x0 0x80000000 0x0 0x20000000
+			  0x1000000 0x0 0x00000000 0x0 0xe2000000 0x0 0x800000>;
+		clock-frequency = <66666666>;
 		interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
 		interrupt-map = <
 			/* IDSEL 0x12 AD18 */
-			0x9000 0x0 0x0 0x1 &mpic 0x5 0x1
-			0x9000 0x0 0x0 0x2 &mpic 0x6 0x1
-			0x9000 0x0 0x0 0x3 &mpic 0x7 0x1
-			0x9000 0x0 0x0 0x4 &mpic 0x4 0x1
+			0x9000 0x0 0x0 0x1 &mpic 0x5 0x1 0 0
+			0x9000 0x0 0x0 0x2 &mpic 0x6 0x1 0 0
+			0x9000 0x0 0x0 0x3 &mpic 0x7 0x1 0 0
+			0x9000 0x0 0x0 0x4 &mpic 0x4 0x1 0 0
 
 			/* IDSEL 0x13 AD19 */
-			0x9800 0x0 0x0 0x1 &mpic 0x6 0x1
-			0x9800 0x0 0x0 0x2 &mpic 0x7 0x1
-			0x9800 0x0 0x0 0x3 &mpic 0x4 0x1
-			0x9800 0x0 0x0 0x4 &mpic 0x5 0x1>;
-
-		interrupt-parent = <&mpic>;
-		interrupts = <24 2>;
-		bus-range = <0 255>;
-		ranges = <0x2000000 0x0 0x80000000 0x80000000 0x0 0x20000000
-			  0x1000000 0x0 0x0 0xe2000000 0x0 0x800000>;
-		sleep = <&pmc 0x80000000>;
-		clock-frequency = <66666666>;
-		#interrupt-cells = <1>;
-		#size-cells = <2>;
-		#address-cells = <3>;
-		reg = <0xe0008000 0x1000>;
-		compatible = "fsl,mpc8540-pci";
-		device_type = "pci";
+			0x9800 0x0 0x0 0x1 &mpic 0x6 0x1 0 0
+			0x9800 0x0 0x0 0x2 &mpic 0x7 0x1 0 0
+			0x9800 0x0 0x0 0x3 &mpic 0x4 0x1 0 0
+			0x9800 0x0 0x0 0x4 &mpic 0x5 0x1 0 0>;
 	};
 
 	/* PCI Express */
 	pci1: pcie@e000a000 {
-		interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
-		interrupt-map = <
-
-			/* IDSEL 0x0 (PEX) */
-			00000 0x0 0x0 0x1 &mpic 0x0 0x1
-			00000 0x0 0x0 0x2 &mpic 0x1 0x1
-			00000 0x0 0x0 0x3 &mpic 0x2 0x1
-			00000 0x0 0x0 0x4 &mpic 0x3 0x1>;
-
-		interrupt-parent = <&mpic>;
-		interrupts = <26 2>;
-		bus-range = <0 255>;
-		ranges = <0x2000000 0x0 0xa0000000 0xa0000000 0x0 0x10000000
-			  0x1000000 0x0 0x0 0xe2800000 0x0 0x800000>;
-		sleep = <&pmc 0x20000000>;
-		clock-frequency = <33333333>;
-		#interrupt-cells = <1>;
-		#size-cells = <2>;
-		#address-cells = <3>;
-		reg = <0xe000a000 0x1000>;
-		compatible = "fsl,mpc8548-pcie";
-		device_type = "pci";
+		ranges = <0x2000000 0x0 0xa0000000 0x0 0xa0000000 0x0 0x10000000
+			  0x1000000 0x0 0x00000000 0x0 0xe2800000 0x0 0x800000>;
+		reg = <0x0 0xe000a000 0x0 0x1000>;
 		pcie@0 {
-			reg = <0x0 0x0 0x0 0x0 0x0>;
-			#size-cells = <2>;
-			#address-cells = <3>;
-			device_type = "pci";
 			ranges = <0x2000000 0x0 0xa0000000
 				  0x2000000 0x0 0xa0000000
 				  0x0 0x10000000
@@ -638,22 +295,11 @@
 		};
 	};
 
-	rio0: rapidio@e00c00000 {
-		#address-cells = <2>;
-		#size-cells = <2>;
-		compatible = "fsl,mpc8568-rapidio", "fsl,rapidio-delta";
-		reg = <0xe00c0000 0x20000>;
-		ranges = <0x0 0x0 0xc0000000 0x0 0x20000000>;
-		interrupts = <48 2 /* error     */
-			      49 2 /* bell_outb */
-			      50 2 /* bell_inb  */
-			      53 2 /* msg1_tx   */
-			      54 2 /* msg1_rx   */
-			      55 2 /* msg2_tx   */
-			      56 2 /* msg2_rx   */>;
-		interrupt-parent = <&mpic>;
-		sleep = <&pmc 0x00080000   /* controller */
-			 &pmc 0x00040000>; /* message unit */
+	rio: rapidio@e00c00000 {
+		reg = <0x0 0xe00c0000 0x0 0x20000>;
+		port1 {
+			ranges = <0x0 0x0 0x0 0xc0000000 0x0 0x20000000>;
+		};
 	};
 
 	leds {
@@ -672,3 +318,5 @@
 		};
 	};
 };
+
+/include/ "fsl/mpc8568si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/mpc8569mds.dts b/arch/powerpc/boot/dts/mpc8569mds.dts
index 8b72eaf..7e283c8 100644
--- a/arch/powerpc/boot/dts/mpc8569mds.dts
+++ b/arch/powerpc/boot/dts/mpc8569mds.dts
@@ -9,66 +9,36 @@
  * option) any later version.
  */
 
-/dts-v1/;
+/include/ "fsl/mpc8569si-pre.dtsi"
 
 / {
 	model = "MPC8569EMDS";
 	compatible = "fsl,MPC8569EMDS";
-	#address-cells = <1>;
-	#size-cells = <1>;
+	#address-cells = <2>;
+	#size-cells = <2>;
+	interrupt-parent = <&mpic>;
 
 	aliases {
-		serial0 = &serial0;
-		serial1 = &serial1;
-		ethernet0 = &enet0;
-		ethernet1 = &enet1;
 		ethernet2 = &enet2;
 		ethernet3 = &enet3;
 		ethernet5 = &enet5;
 		ethernet7 = &enet7;
-		pci1 = &pci1;
-		rapidio0 = &rio0;
-	};
-
-	cpus {
-		#address-cells = <1>;
-		#size-cells = <0>;
-
-		PowerPC,8569@0 {
-			device_type = "cpu";
-			reg = <0x0>;
-			d-cache-line-size = <32>;	// 32 bytes
-			i-cache-line-size = <32>;	// 32 bytes
-			d-cache-size = <0x8000>;		// L1, 32K
-			i-cache-size = <0x8000>;		// L1, 32K
-			sleep = <&pmc 0x00008000	// core
-				 &pmc 0x00004000>;	// timebase
-			timebase-frequency = <0>;
-			bus-frequency = <0>;
-			clock-frequency = <0>;
-			next-level-cache = <&L2>;
-		};
+		rapidio0 = &rio;
 	};
 
 	memory {
 		device_type = "memory";
 	};
 
-	localbus@e0005000 {
-		#address-cells = <2>;
-		#size-cells = <1>;
-		compatible = "fsl,mpc8569-elbc", "fsl,elbc", "simple-bus";
-		reg = <0xe0005000 0x1000>;
-		interrupts = <19 2>;
-		interrupt-parent = <&mpic>;
-		sleep = <&pmc 0x08000000>;
+	lbc: localbus@e0005000 {
+		reg = <0x0 0xe0005000 0x0 0x1000>;
 
-		ranges = <0x0 0x0 0xfe000000 0x02000000
-			  0x1 0x0 0xf8000000 0x00008000
-			  0x2 0x0 0xf0000000 0x04000000
-			  0x3 0x0 0xfc000000 0x00008000
-			  0x4 0x0 0xf8008000 0x00008000
-			  0x5 0x0 0xf8010000 0x00008000>;
+		ranges = <0x0 0x0 0x0 0xfe000000 0x02000000
+			  0x1 0x0 0x0 0xf8000000 0x00008000
+			  0x2 0x0 0x0 0xf0000000 0x04000000
+			  0x3 0x0 0x0 0xfc000000 0x00008000
+			  0x4 0x0 0x0 0xf8008000 0x00008000
+			  0x5 0x0 0x0 0xf8010000 0x00008000>;
 
 		nor@0,0 {
 			#address-cells = <1>;
@@ -133,220 +103,25 @@
 		};
 	};
 
-	soc@e0000000 {
-		#address-cells = <1>;
-		#size-cells = <1>;
-		device_type = "soc";
-		compatible = "fsl,mpc8569-immr", "simple-bus";
-		ranges = <0x0 0xe0000000 0x100000>;
-		bus-frequency = <0>;
-
-		ecm-law@0 {
-			compatible = "fsl,ecm-law";
-			reg = <0x0 0x1000>;
-			fsl,num-laws = <10>;
-		};
-
-		ecm@1000 {
-			compatible = "fsl,mpc8569-ecm", "fsl,ecm";
-			reg = <0x1000 0x1000>;
-			interrupts = <17 2>;
-			interrupt-parent = <&mpic>;
-		};
-
-		memory-controller@2000 {
-			compatible = "fsl,mpc8569-memory-controller";
-			reg = <0x2000 0x1000>;
-			interrupt-parent = <&mpic>;
-			interrupts = <18 2>;
-		};
+	soc: soc@e0000000 {
+		ranges = <0x0 0x0 0xe0000000 0x100000>;
 
 		i2c-sleep-nexus {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			compatible = "simple-bus";
-			sleep = <&pmc 0x00000004>;
-			ranges;
-
 			i2c@3000 {
-				#address-cells = <1>;
-				#size-cells = <0>;
-				cell-index = <0>;
-				compatible = "fsl-i2c";
-				reg = <0x3000 0x100>;
-				interrupts = <43 2>;
-				interrupt-parent = <&mpic>;
-				dfsrr;
-
 				rtc@68 {
 					compatible = "dallas,ds1374";
 					reg = <0x68>;
-					interrupts = <3 1>;
-					interrupt-parent = <&mpic>;
+					interrupts = <3 1 0 0>;
 				};
 			};
-
-			i2c@3100 {
-				#address-cells = <1>;
-				#size-cells = <0>;
-				cell-index = <1>;
-				compatible = "fsl-i2c";
-				reg = <0x3100 0x100>;
-				interrupts = <43 2>;
-				interrupt-parent = <&mpic>;
-				dfsrr;
-			};
 		};
 
-		duart-sleep-nexus {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			compatible = "simple-bus";
-			sleep = <&pmc 0x00000002>;
-			ranges;
-
-			serial0: serial@4500 {
-				cell-index = <0>;
-				device_type = "serial";
-				compatible = "ns16550";
-				reg = <0x4500 0x100>;
-				clock-frequency = <0>;
-				interrupts = <42 2>;
-				interrupt-parent = <&mpic>;
-			};
-
-			serial1: serial@4600 {
-				cell-index = <1>;
-				device_type = "serial";
-				compatible = "ns16550";
-				reg = <0x4600 0x100>;
-				clock-frequency = <0>;
-				interrupts = <42 2>;
-				interrupt-parent = <&mpic>;
-			};
-		};
-
-		L2: l2-cache-controller@20000 {
-			compatible = "fsl,mpc8569-l2-cache-controller";
-			reg = <0x20000 0x1000>;
-			cache-line-size = <32>;	// 32 bytes
-			cache-size = <0x80000>;	// L2, 512K
-			interrupt-parent = <&mpic>;
-			interrupts = <16 2>;
-		};
-
-		dma@21300 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			compatible = "fsl,mpc8569-dma", "fsl,eloplus-dma";
-			reg = <0x21300 0x4>;
-			ranges = <0x0 0x21100 0x200>;
-			cell-index = <0>;
-			dma-channel@0 {
-				compatible = "fsl,mpc8569-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x0 0x80>;
-				cell-index = <0>;
-				interrupt-parent = <&mpic>;
-				interrupts = <20 2>;
-			};
-			dma-channel@80 {
-				compatible = "fsl,mpc8569-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x80 0x80>;
-				cell-index = <1>;
-				interrupt-parent = <&mpic>;
-				interrupts = <21 2>;
-			};
-			dma-channel@100 {
-				compatible = "fsl,mpc8569-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x100 0x80>;
-				cell-index = <2>;
-				interrupt-parent = <&mpic>;
-				interrupts = <22 2>;
-			};
-			dma-channel@180 {
-				compatible = "fsl,mpc8569-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x180 0x80>;
-				cell-index = <3>;
-				interrupt-parent = <&mpic>;
-				interrupts = <23 2>;
-			};
-		};
-
-		sdhci@2e000 {
-			compatible = "fsl,mpc8569-esdhc", "fsl,esdhc";
-			reg = <0x2e000 0x1000>;
-			interrupts = <72 0x8>;
-			interrupt-parent = <&mpic>;
-			sleep = <&pmc 0x00200000>;
-			/* Filled in by U-Boot */
-			clock-frequency = <0>;
+		sdhc@2e000 {
 			status = "disabled";
 			sdhci,1-bit-only;
 		};
 
-		crypto@30000 {
-			compatible = "fsl,sec3.1", "fsl,sec3.0", "fsl,sec2.4",
-				"fsl,sec2.2", "fsl,sec2.1", "fsl,sec2.0";
-			reg = <0x30000 0x10000>;
-			interrupts = <45 2 58 2>;
-			interrupt-parent = <&mpic>;
-			fsl,num-channels = <4>;
-			fsl,channel-fifo-len = <24>;
-			fsl,exec-units-mask = <0xbfe>;
-			fsl,descriptor-types-mask = <0x3ab0ebf>;
-			sleep = <&pmc 0x01000000>;
-		};
-
-		mpic: pic@40000 {
-			interrupt-controller;
-			#address-cells = <0>;
-			#interrupt-cells = <2>;
-			reg = <0x40000 0x40000>;
-			compatible = "chrp,open-pic";
-			device_type = "open-pic";
-		};
-
-		msi@41600 {
-			compatible = "fsl,mpc8568-msi", "fsl,mpic-msi";
-			reg = <0x41600 0x80>;
-			msi-available-ranges = <0 0x100>;
-			interrupts = <
-				0xe0 0
-				0xe1 0
-				0xe2 0
-				0xe3 0
-				0xe4 0
-				0xe5 0
-				0xe6 0
-				0xe7 0>;
-			interrupt-parent = <&mpic>;
-		};
-
-		global-utilities@e0000 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			compatible = "fsl,mpc8569-guts", "fsl,mpc8548-guts";
-			reg = <0xe0000 0x1000>;
-			ranges = <0 0xe0000 0x1000>;
-			fsl,has-rstcr;
-
-			pmc: power@70 {
-				compatible = "fsl,mpc8569-pmc",
-					     "fsl,mpc8548-pmc";
-				reg = <0x70 0x20>;
-			};
-		};
-
 		par_io@e0100 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			reg = <0xe0100 0x100>;
-			ranges = <0x0 0xe0100 0x100>;
-			device_type = "par_io";
 			num-ports = <7>;
 
 			qe_pio_e: gpio-controller@80 {
@@ -447,47 +222,11 @@
 		};
 	};
 
-	qe@e0080000 {
-		#address-cells = <1>;
-		#size-cells = <1>;
-		device_type = "qe";
-		compatible = "fsl,qe";
-		ranges = <0x0 0xe0080000 0x40000>;
-		reg = <0xe0080000 0x480>;
-		sleep = <&pmc 0x00000800>;
-		brg-frequency = <0>;
-		bus-frequency = <0>;
-		fsl,qe-num-riscs = <4>;
-		fsl,qe-num-snums = <46>;
-
-		qeic: interrupt-controller@80 {
-			interrupt-controller;
-			compatible = "fsl,qe-ic";
-			#address-cells = <0>;
-			#interrupt-cells = <1>;
-			reg = <0x80 0x80>;
-			interrupts = <46 2 46 2>; //high:30 low:30
-			interrupt-parent = <&mpic>;
-		};
-
-		timer@440 {
-			compatible = "fsl,mpc8569-qe-gtm",
-				     "fsl,qe-gtm", "fsl,gtm";
-			reg = <0x440 0x40>;
-			interrupts = <12 13 14 15>;
-			interrupt-parent = <&qeic>;
-			/* Filled in by U-Boot */
-			clock-frequency = <0>;
-		};
+	qe: qe@e0080000 {
+		ranges = <0x0 0x0 0xe0080000 0x40000>;
+		reg = <0x0 0xe0080000 0x0 0x480>;
 
 		spi@4c0 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "fsl,mpc8569-qe-spi", "fsl,spi";
-			reg = <0x4c0 0x40>;
-			cell-index = <0>;
-			interrupts = <2>;
-			interrupt-parent = <&qeic>;
 			gpios = <&qe_pio_e 30 0>;
 			mode = "cpu-qe";
 
@@ -499,20 +238,10 @@
 		};
 
 		spi@500 {
-			cell-index = <1>;
-			compatible = "fsl,spi";
-			reg = <0x500 0x40>;
-			interrupts = <1>;
-			interrupt-parent = <&qeic>;
 			mode = "cpu";
 		};
 
 		usb@6c0 {
-			compatible = "fsl,mpc8569-qe-usb",
-				     "fsl,mpc8323-qe-usb";
-			reg = <0x6c0 0x40 0x8b00 0x100>;
-			interrupts = <11>;
-			interrupt-parent = <&qeic>;
 			fsl,fullspeed-clock = "clk5";
 			fsl,lowspeed-clock = "brg10";
 			gpios = <&qe_pio_f 3 0   /* USBOE */
@@ -527,10 +256,6 @@
 		enet0: ucc@2000 {
 			device_type = "network";
 			compatible = "ucc_geth";
-			cell-index = <1>;
-			reg = <0x2000 0x200>;
-			interrupts = <32>;
-			interrupt-parent = <&qeic>;
 			local-mac-address = [ 00 00 00 00 00 00 ];
 			rx-clock-name = "none";
 			tx-clock-name = "clk12";
@@ -548,35 +273,33 @@
 
 			qe_phy0: ethernet-phy@07 {
 				interrupt-parent = <&mpic>;
-				interrupts = <1 1>;
+				interrupts = <1 1 0 0>;
 				reg = <0x7>;
 				device_type = "ethernet-phy";
 			};
 			qe_phy1: ethernet-phy@01 {
 				interrupt-parent = <&mpic>;
-				interrupts = <2 1>;
+				interrupts = <2 1 0 0>;
 				reg = <0x1>;
 				device_type = "ethernet-phy";
 			};
 			qe_phy2: ethernet-phy@02 {
 				interrupt-parent = <&mpic>;
-				interrupts = <3 1>;
+				interrupts = <3 1 0 0>;
 				reg = <0x2>;
 				device_type = "ethernet-phy";
 			};
 			qe_phy3: ethernet-phy@03 {
 				interrupt-parent = <&mpic>;
-				interrupts = <4 1>;
+				interrupts = <4 1 0 0>;
 				reg = <0x3>;
 				device_type = "ethernet-phy";
 			};
 			qe_phy5: ethernet-phy@04 {
-				interrupt-parent = <&mpic>;
 				reg = <0x04>;
 				device_type = "ethernet-phy";
 			};
 			qe_phy7: ethernet-phy@06 {
-				interrupt-parent = <&mpic>;
 				reg = <0x6>;
 				device_type = "ethernet-phy";
 			};
@@ -610,10 +333,6 @@
 		enet2: ucc@2200 {
 			device_type = "network";
 			compatible = "ucc_geth";
-			cell-index = <3>;
-			reg = <0x2200 0x200>;
-			interrupts = <34>;
-			interrupt-parent = <&qeic>;
 			local-mac-address = [ 00 00 00 00 00 00 ];
 			rx-clock-name = "none";
 			tx-clock-name = "clk12";
@@ -637,10 +356,6 @@
 		enet1: ucc@3000 {
 			device_type = "network";
 			compatible = "ucc_geth";
-			cell-index = <2>;
-			reg = <0x3000 0x200>;
-			interrupts = <33>;
-			interrupt-parent = <&qeic>;
 			local-mac-address = [ 00 00 00 00 00 00 ];
 			rx-clock-name = "none";
 			tx-clock-name = "clk17";
@@ -664,10 +379,6 @@
 		enet3: ucc@3200 {
 			device_type = "network";
 			compatible = "ucc_geth";
-			cell-index = <4>;
-			reg = <0x3200 0x200>;
-			interrupts = <35>;
-			interrupt-parent = <&qeic>;
 			local-mac-address = [ 00 00 00 00 00 00 ];
 			rx-clock-name = "none";
 			tx-clock-name = "clk17";
@@ -691,10 +402,6 @@
 		enet5: ucc@3400 {
 			device_type = "network";
 			compatible = "ucc_geth";
-			cell-index = <6>;
-			reg = <0x3400 0x200>;
-			interrupts = <41>;
-			interrupt-parent = <&qeic>;
 			local-mac-address = [ 00 00 00 00 00 00 ];
 			rx-clock-name = "none";
 			tx-clock-name = "none";
@@ -706,10 +413,6 @@
 		enet7: ucc@3600 {
 			device_type = "network";
 			compatible = "ucc_geth";
-			cell-index = <8>;
-			reg = <0x3600 0x200>;
-			interrupts = <43>;
-			interrupt-parent = <&qeic>;
 			local-mac-address = [ 00 00 00 00 00 00 ];
 			rx-clock-name = "none";
 			tx-clock-name = "none";
@@ -717,50 +420,14 @@
 			phy-handle = <&qe_phy7>;
 			phy-connection-type = "sgmii";
 		};
-
-		muram@10000 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			compatible = "fsl,qe-muram", "fsl,cpm-muram";
-			ranges = <0x0 0x10000 0x20000>;
-
-			data-only@0 {
-				compatible = "fsl,qe-muram-data",
-					     "fsl,cpm-muram-data";
-				reg = <0x0 0x20000>;
-			};
-		};
-
 	};
 
 	/* PCI Express */
 	pci1: pcie@e000a000 {
-		compatible = "fsl,mpc8548-pcie";
-		device_type = "pci";
-		#interrupt-cells = <1>;
-		#size-cells = <2>;
-		#address-cells = <3>;
-		reg = <0xe000a000 0x1000>;
-		interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
-		interrupt-map = <
-			/* IDSEL 0x0 (PEX) */
-			00000 0x0 0x0 0x1 &mpic 0x0 0x1
-			00000 0x0 0x0 0x2 &mpic 0x1 0x1
-			00000 0x0 0x0 0x3 &mpic 0x2 0x1
-			00000 0x0 0x0 0x4 &mpic 0x3 0x1>;
-
-		interrupt-parent = <&mpic>;
-		interrupts = <26 2>;
-		bus-range = <0 255>;
-		ranges = <0x2000000 0x0 0xa0000000 0xa0000000 0x0 0x10000000
-			  0x1000000 0x0 0x00000000 0xe2800000 0x0 0x00800000>;
-		sleep = <&pmc 0x20000000>;
-		clock-frequency = <33333333>;
+		reg = <0x0 0xe000a000 0x0 0x1000>;
+		ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x10000000
+			  0x1000000 0x0 0x00000000 0 0xe2800000 0x0 0x00800000>;
 		pcie@0 {
-			reg = <0x0 0x0 0x0 0x0 0x0>;
-			#size-cells = <2>;
-			#address-cells = <3>;
-			device_type = "pci";
 			ranges = <0x2000000 0x0 0xa0000000
 				  0x2000000 0x0 0xa0000000
 				  0x0 0x10000000
@@ -771,20 +438,15 @@
 		};
 	};
 
-	rio0: rapidio@e00c00000 {
-		#address-cells = <2>;
-		#size-cells = <2>;
-		compatible = "fsl,mpc8569-rapidio", "fsl,rapidio-delta";
-		reg = <0xe00c0000 0x20000>;
-		ranges = <0x0 0x0 0xc0000000 0x0 0x20000000>;
-		interrupts = <48 2 /* error     */
-			      49 2 /* bell_outb */
-			      50 2 /* bell_inb  */
-			      53 2 /* msg1_tx   */
-			      54 2 /* msg1_rx   */
-			      55 2 /* msg2_tx   */
-			      56 2 /* msg2_rx   */>;
-		interrupt-parent = <&mpic>;
-		sleep = <&pmc 0x00080000>;
+	rio: rapidio@e00c00000 {
+		reg = <0x0 0xe00c0000 0x0 0x20000>;
+		port1 {
+			ranges = <0x0 0x0 0x0 0xc0000000 0x0 0x20000000>;
+		};
+		port2 {
+			status = "disabled";
+		};
 	};
 };
+
+/include/ "fsl/mpc8569si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/mpc8572ds.dts b/arch/powerpc/boot/dts/mpc8572ds.dts
index f6c04d2..0c9f295 100644
--- a/arch/powerpc/boot/dts/mpc8572ds.dts
+++ b/arch/powerpc/boot/dts/mpc8572ds.dts
@@ -9,67 +9,18 @@
  * option) any later version.
  */
 
-/dts-v1/;
+/include/ "fsl/mpc8572si-pre.dtsi"
+
 / {
 	model = "fsl,MPC8572DS";
 	compatible = "fsl,MPC8572DS";
-	#address-cells = <2>;
-	#size-cells = <2>;
-
-	aliases {
-		ethernet0 = &enet0;
-		ethernet1 = &enet1;
-		ethernet2 = &enet2;
-		ethernet3 = &enet3;
-		serial0 = &serial0;
-		serial1 = &serial1;
-		pci0 = &pci0;
-		pci1 = &pci1;
-		pci2 = &pci2;
-	};
-
-	cpus {
-		#address-cells = <1>;
-		#size-cells = <0>;
-
-		PowerPC,8572@0 {
-			device_type = "cpu";
-			reg = <0x0>;
-			d-cache-line-size = <32>;	// 32 bytes
-			i-cache-line-size = <32>;	// 32 bytes
-			d-cache-size = <0x8000>;		// L1, 32K
-			i-cache-size = <0x8000>;		// L1, 32K
-			timebase-frequency = <0>;
-			bus-frequency = <0>;
-			clock-frequency = <0>;
-			next-level-cache = <&L2>;
-		};
-
-		PowerPC,8572@1 {
-			device_type = "cpu";
-			reg = <0x1>;
-			d-cache-line-size = <32>;	// 32 bytes
-			i-cache-line-size = <32>;	// 32 bytes
-			d-cache-size = <0x8000>;		// L1, 32K
-			i-cache-size = <0x8000>;		// L1, 32K
-			timebase-frequency = <0>;
-			bus-frequency = <0>;
-			clock-frequency = <0>;
-			next-level-cache = <&L2>;
-		};
-	};
 
 	memory {
 		device_type = "memory";
 	};
 
-	localbus@ffe05000 {
-		#address-cells = <2>;
-		#size-cells = <1>;
-		compatible = "fsl,mpc8572-elbc", "fsl,elbc", "simple-bus";
+	board_lbc: lbc: localbus@ffe05000 {
 		reg = <0 0xffe05000 0 0x1000>;
-		interrupts = <19 2>;
-		interrupt-parent = <&mpic>;
 
 		ranges = <0x0 0x0 0x0 0xe8000000 0x08000000
 			  0x1 0x0 0x0 0xe0000000 0x08000000
@@ -78,601 +29,17 @@
 			  0x4 0x0 0x0 0xffa40000 0x00040000
 			  0x5 0x0 0x0 0xffa80000 0x00040000
 			  0x6 0x0 0x0 0xffac0000 0x00040000>;
-
-		nor@0,0 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			compatible = "cfi-flash";
-			reg = <0x0 0x0 0x8000000>;
-			bank-width = <2>;
-			device-width = <1>;
-
-			ramdisk@0 {
-				reg = <0x0 0x03000000>;
-				read-only;
-			};
-
-			diagnostic@3000000 {
-				reg = <0x03000000 0x00e00000>;
-				read-only;
-			};
-
-			dink@3e00000 {
-				reg = <0x03e00000 0x00200000>;
-				read-only;
-			};
-
-			kernel@4000000 {
-				reg = <0x04000000 0x00400000>;
-				read-only;
-			};
-
-			jffs2@4400000 {
-				reg = <0x04400000 0x03b00000>;
-			};
-
-			dtb@7f00000 {
-				reg = <0x07f00000 0x00080000>;
-				read-only;
-			};
-
-			u-boot@7f80000 {
-				reg = <0x07f80000 0x00080000>;
-				read-only;
-			};
-		};
-
-		nand@2,0 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			compatible = "fsl,mpc8572-fcm-nand",
-				     "fsl,elbc-fcm-nand";
-			reg = <0x2 0x0 0x40000>;
-
-			u-boot@0 {
-				reg = <0x0 0x02000000>;
-				read-only;
-			};
-
-			jffs2@2000000 {
-				reg = <0x02000000 0x10000000>;
-			};
-
-			ramdisk@12000000 {
-				reg = <0x12000000 0x08000000>;
-				read-only;
-			};
-
-			kernel@1a000000 {
-				reg = <0x1a000000 0x04000000>;
-			};
-
-			dtb@1e000000 {
-				reg = <0x1e000000 0x01000000>;
-				read-only;
-			};
-
-			empty@1f000000 {
-				reg = <0x1f000000 0x21000000>;
-			};
-		};
-
-		nand@4,0 {
-			compatible = "fsl,mpc8572-fcm-nand",
-				     "fsl,elbc-fcm-nand";
-			reg = <0x4 0x0 0x40000>;
-		};
-
-		nand@5,0 {
-			compatible = "fsl,mpc8572-fcm-nand",
-				     "fsl,elbc-fcm-nand";
-			reg = <0x5 0x0 0x40000>;
-		};
-
-		nand@6,0 {
-			compatible = "fsl,mpc8572-fcm-nand",
-				     "fsl,elbc-fcm-nand";
-			reg = <0x6 0x0 0x40000>;
-		};
 	};
 
-	soc8572@ffe00000 {
-		#address-cells = <1>;
-		#size-cells = <1>;
-		device_type = "soc";
-		compatible = "simple-bus";
+	board_soc: soc: soc8572@ffe00000 {
 		ranges = <0x0 0 0xffe00000 0x100000>;
-		bus-frequency = <0>;		// Filled out by uboot.
-
-		ecm-law@0 {
-			compatible = "fsl,ecm-law";
-			reg = <0x0 0x1000>;
-			fsl,num-laws = <12>;
-		};
-
-		ecm@1000 {
-			compatible = "fsl,mpc8572-ecm", "fsl,ecm";
-			reg = <0x1000 0x1000>;
-			interrupts = <17 2>;
-			interrupt-parent = <&mpic>;
-		};
-
-		memory-controller@2000 {
-			compatible = "fsl,mpc8572-memory-controller";
-			reg = <0x2000 0x1000>;
-			interrupt-parent = <&mpic>;
-			interrupts = <18 2>;
-		};
-
-		memory-controller@6000 {
-			compatible = "fsl,mpc8572-memory-controller";
-			reg = <0x6000 0x1000>;
-			interrupt-parent = <&mpic>;
-			interrupts = <18 2>;
-		};
-
-		L2: l2-cache-controller@20000 {
-			compatible = "fsl,mpc8572-l2-cache-controller";
-			reg = <0x20000 0x1000>;
-			cache-line-size = <32>;	// 32 bytes
-			cache-size = <0x100000>; // L2, 1M
-			interrupt-parent = <&mpic>;
-			interrupts = <16 2>;
-		};
-
-		i2c@3000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			cell-index = <0>;
-			compatible = "fsl-i2c";
-			reg = <0x3000 0x100>;
-			interrupts = <43 2>;
-			interrupt-parent = <&mpic>;
-			dfsrr;
-		};
-
-		i2c@3100 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			cell-index = <1>;
-			compatible = "fsl-i2c";
-			reg = <0x3100 0x100>;
-			interrupts = <43 2>;
-			interrupt-parent = <&mpic>;
-			dfsrr;
-		};
-
-		dma@c300 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			compatible = "fsl,mpc8572-dma", "fsl,eloplus-dma";
-			reg = <0xc300 0x4>;
-			ranges = <0x0 0xc100 0x200>;
-			cell-index = <1>;
-			dma-channel@0 {
-				compatible = "fsl,mpc8572-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x0 0x80>;
-				cell-index = <0>;
-				interrupt-parent = <&mpic>;
-				interrupts = <76 2>;
-			};
-			dma-channel@80 {
-				compatible = "fsl,mpc8572-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x80 0x80>;
-				cell-index = <1>;
-				interrupt-parent = <&mpic>;
-				interrupts = <77 2>;
-			};
-			dma-channel@100 {
-				compatible = "fsl,mpc8572-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x100 0x80>;
-				cell-index = <2>;
-				interrupt-parent = <&mpic>;
-				interrupts = <78 2>;
-			};
-			dma-channel@180 {
-				compatible = "fsl,mpc8572-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x180 0x80>;
-				cell-index = <3>;
-				interrupt-parent = <&mpic>;
-				interrupts = <79 2>;
-			};
-		};
-
-		dma@21300 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			compatible = "fsl,mpc8572-dma", "fsl,eloplus-dma";
-			reg = <0x21300 0x4>;
-			ranges = <0x0 0x21100 0x200>;
-			cell-index = <0>;
-			dma-channel@0 {
-				compatible = "fsl,mpc8572-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x0 0x80>;
-				cell-index = <0>;
-				interrupt-parent = <&mpic>;
-				interrupts = <20 2>;
-			};
-			dma-channel@80 {
-				compatible = "fsl,mpc8572-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x80 0x80>;
-				cell-index = <1>;
-				interrupt-parent = <&mpic>;
-				interrupts = <21 2>;
-			};
-			dma-channel@100 {
-				compatible = "fsl,mpc8572-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x100 0x80>;
-				cell-index = <2>;
-				interrupt-parent = <&mpic>;
-				interrupts = <22 2>;
-			};
-			dma-channel@180 {
-				compatible = "fsl,mpc8572-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x180 0x80>;
-				cell-index = <3>;
-				interrupt-parent = <&mpic>;
-				interrupts = <23 2>;
-			};
-		};
-
-		ptp_clock@24E00 {
-			compatible = "fsl,etsec-ptp";
-			reg = <0x24E00 0xB0>;
-			interrupts = <68 2 69 2 70 2 71 2>;
-			interrupt-parent = < &mpic >;
-			fsl,tclk-period = <5>;
-			fsl,tmr-prsc = <200>;
-			fsl,tmr-add = <0xAAAAAAAB>;
-			fsl,tmr-fiper1 = <0x3B9AC9FB>;
-			fsl,tmr-fiper2 = <0x3B9AC9FB>;
-			fsl,max-adj = <499999999>;
-		};
-
-		enet0: ethernet@24000 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			cell-index = <0>;
-			device_type = "network";
-			model = "eTSEC";
-			compatible = "gianfar";
-			reg = <0x24000 0x1000>;
-			ranges = <0x0 0x24000 0x1000>;
-			local-mac-address = [ 00 00 00 00 00 00 ];
-			interrupts = <29 2 30 2 34 2>;
-			interrupt-parent = <&mpic>;
-			tbi-handle = <&tbi0>;
-			phy-handle = <&phy0>;
-			phy-connection-type = "rgmii-id";
-
-			mdio@520 {
-				#address-cells = <1>;
-				#size-cells = <0>;
-				compatible = "fsl,gianfar-mdio";
-				reg = <0x520 0x20>;
-
-				phy0: ethernet-phy@0 {
-					interrupt-parent = <&mpic>;
-					interrupts = <10 1>;
-					reg = <0x0>;
-				};
-				phy1: ethernet-phy@1 {
-					interrupt-parent = <&mpic>;
-					interrupts = <10 1>;
-					reg = <0x1>;
-				};
-				phy2: ethernet-phy@2 {
-					interrupt-parent = <&mpic>;
-					interrupts = <10 1>;
-					reg = <0x2>;
-				};
-				phy3: ethernet-phy@3 {
-					interrupt-parent = <&mpic>;
-					interrupts = <10 1>;
-					reg = <0x3>;
-				};
-
-				tbi0: tbi-phy@11 {
-					reg = <0x11>;
-					device_type = "tbi-phy";
-				};
-			};
-		};
-
-		enet1: ethernet@25000 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			cell-index = <1>;
-			device_type = "network";
-			model = "eTSEC";
-			compatible = "gianfar";
-			reg = <0x25000 0x1000>;
-			ranges = <0x0 0x25000 0x1000>;
-			local-mac-address = [ 00 00 00 00 00 00 ];
-			interrupts = <35 2 36 2 40 2>;
-			interrupt-parent = <&mpic>;
-			tbi-handle = <&tbi1>;
-			phy-handle = <&phy1>;
-			phy-connection-type = "rgmii-id";
-
-			mdio@520 {
-				#address-cells = <1>;
-				#size-cells = <0>;
-				compatible = "fsl,gianfar-tbi";
-				reg = <0x520 0x20>;
-
-				tbi1: tbi-phy@11 {
-					reg = <0x11>;
-					device_type = "tbi-phy";
-				};
-			};
-		};
-
-		enet2: ethernet@26000 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			cell-index = <2>;
-			device_type = "network";
-			model = "eTSEC";
-			compatible = "gianfar";
-			reg = <0x26000 0x1000>;
-			ranges = <0x0 0x26000 0x1000>;
-			local-mac-address = [ 00 00 00 00 00 00 ];
-			interrupts = <31 2 32 2 33 2>;
-			interrupt-parent = <&mpic>;
-			tbi-handle = <&tbi2>;
-			phy-handle = <&phy2>;
-			phy-connection-type = "rgmii-id";
-
-			mdio@520 {
-				#address-cells = <1>;
-				#size-cells = <0>;
-				compatible = "fsl,gianfar-tbi";
-				reg = <0x520 0x20>;
-
-				tbi2: tbi-phy@11 {
-					reg = <0x11>;
-					device_type = "tbi-phy";
-				};
-			};
-		};
-
-		enet3: ethernet@27000 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			cell-index = <3>;
-			device_type = "network";
-			model = "eTSEC";
-			compatible = "gianfar";
-			reg = <0x27000 0x1000>;
-			ranges = <0x0 0x27000 0x1000>;
-			local-mac-address = [ 00 00 00 00 00 00 ];
-			interrupts = <37 2 38 2 39 2>;
-			interrupt-parent = <&mpic>;
-			tbi-handle = <&tbi3>;
-			phy-handle = <&phy3>;
-			phy-connection-type = "rgmii-id";
-
-			mdio@520 {
-				#address-cells = <1>;
-				#size-cells = <0>;
-				compatible = "fsl,gianfar-tbi";
-				reg = <0x520 0x20>;
-
-				tbi3: tbi-phy@11 {
-					reg = <0x11>;
-					device_type = "tbi-phy";
-				};
-			};
-		};
-
-		serial0: serial@4500 {
-			cell-index = <0>;
-			device_type = "serial";
-			compatible = "ns16550";
-			reg = <0x4500 0x100>;
-			clock-frequency = <0>;
-			interrupts = <42 2>;
-			interrupt-parent = <&mpic>;
-		};
-
-		serial1: serial@4600 {
-			cell-index = <1>;
-			device_type = "serial";
-			compatible = "ns16550";
-			reg = <0x4600 0x100>;
-			clock-frequency = <0>;
-			interrupts = <42 2>;
-			interrupt-parent = <&mpic>;
-		};
-
-		global-utilities@e0000 {	//global utilities block
-			compatible = "fsl,mpc8572-guts";
-			reg = <0xe0000 0x1000>;
-			fsl,has-rstcr;
-		};
-
-		msi@41600 {
-			compatible = "fsl,mpc8572-msi", "fsl,mpic-msi";
-			reg = <0x41600 0x80>;
-			msi-available-ranges = <0 0x100>;
-			interrupts = <
-				0xe0 0
-				0xe1 0
-				0xe2 0
-				0xe3 0
-				0xe4 0
-				0xe5 0
-				0xe6 0
-				0xe7 0>;
-			interrupt-parent = <&mpic>;
-		};
-
-		crypto@30000 {
-			compatible = "fsl,sec3.0", "fsl,sec2.4", "fsl,sec2.2",
-				     "fsl,sec2.1", "fsl,sec2.0";
-			reg = <0x30000 0x10000>;
-			interrupts = <45 2 58 2>;
-			interrupt-parent = <&mpic>;
-			fsl,num-channels = <4>;
-			fsl,channel-fifo-len = <24>;
-			fsl,exec-units-mask = <0x9fe>;
-			fsl,descriptor-types-mask = <0x3ab0ebf>;
-		};
-
-		mpic: pic@40000 {
-			interrupt-controller;
-			#address-cells = <0>;
-			#interrupt-cells = <2>;
-			reg = <0x40000 0x40000>;
-			compatible = "chrp,open-pic";
-			device_type = "open-pic";
-		};
 	};
 
-	pci0: pcie@ffe08000 {
-		compatible = "fsl,mpc8548-pcie";
-		device_type = "pci";
-		#interrupt-cells = <1>;
-		#size-cells = <2>;
-		#address-cells = <3>;
+	board_pci0: pci0: pcie@ffe08000 {
 		reg = <0 0xffe08000 0 0x1000>;
-		bus-range = <0 255>;
 		ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000
 			  0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x00010000>;
-		clock-frequency = <33333333>;
-		interrupt-parent = <&mpic>;
-		interrupts = <24 2>;
-		interrupt-map-mask = <0xff00 0x0 0x0 0x7>;
-		interrupt-map = <
-			/* IDSEL 0x11 func 0 - PCI slot 1 */
-			0x8800 0x0 0x0 0x1 &mpic 0x2 0x1
-			0x8800 0x0 0x0 0x2 &mpic 0x3 0x1
-			0x8800 0x0 0x0 0x3 &mpic 0x4 0x1
-			0x8800 0x0 0x0 0x4 &mpic 0x1 0x1
-
-			/* IDSEL 0x11 func 1 - PCI slot 1 */
-			0x8900 0x0 0x0 0x1 &mpic 0x2 0x1
-			0x8900 0x0 0x0 0x2 &mpic 0x3 0x1
-			0x8900 0x0 0x0 0x3 &mpic 0x4 0x1
-			0x8900 0x0 0x0 0x4 &mpic 0x1 0x1
-
-			/* IDSEL 0x11 func 2 - PCI slot 1 */
-			0x8a00 0x0 0x0 0x1 &mpic 0x2 0x1
-			0x8a00 0x0 0x0 0x2 &mpic 0x3 0x1
-			0x8a00 0x0 0x0 0x3 &mpic 0x4 0x1
-			0x8a00 0x0 0x0 0x4 &mpic 0x1 0x1
-
-			/* IDSEL 0x11 func 3 - PCI slot 1 */
-			0x8b00 0x0 0x0 0x1 &mpic 0x2 0x1
-			0x8b00 0x0 0x0 0x2 &mpic 0x3 0x1
-			0x8b00 0x0 0x0 0x3 &mpic 0x4 0x1
-			0x8b00 0x0 0x0 0x4 &mpic 0x1 0x1
-
-			/* IDSEL 0x11 func 4 - PCI slot 1 */
-			0x8c00 0x0 0x0 0x1 &mpic 0x2 0x1
-			0x8c00 0x0 0x0 0x2 &mpic 0x3 0x1
-			0x8c00 0x0 0x0 0x3 &mpic 0x4 0x1
-			0x8c00 0x0 0x0 0x4 &mpic 0x1 0x1
-
-			/* IDSEL 0x11 func 5 - PCI slot 1 */
-			0x8d00 0x0 0x0 0x1 &mpic 0x2 0x1
-			0x8d00 0x0 0x0 0x2 &mpic 0x3 0x1
-			0x8d00 0x0 0x0 0x3 &mpic 0x4 0x1
-			0x8d00 0x0 0x0 0x4 &mpic 0x1 0x1
-
-			/* IDSEL 0x11 func 6 - PCI slot 1 */
-			0x8e00 0x0 0x0 0x1 &mpic 0x2 0x1
-			0x8e00 0x0 0x0 0x2 &mpic 0x3 0x1
-			0x8e00 0x0 0x0 0x3 &mpic 0x4 0x1
-			0x8e00 0x0 0x0 0x4 &mpic 0x1 0x1
-
-			/* IDSEL 0x11 func 7 - PCI slot 1 */
-			0x8f00 0x0 0x0 0x1 &mpic 0x2 0x1
-			0x8f00 0x0 0x0 0x2 &mpic 0x3 0x1
-			0x8f00 0x0 0x0 0x3 &mpic 0x4 0x1
-			0x8f00 0x0 0x0 0x4 &mpic 0x1 0x1
-
-			/* IDSEL 0x12 func 0 - PCI slot 2 */
-			0x9000 0x0 0x0 0x1 &mpic 0x3 0x1
-			0x9000 0x0 0x0 0x2 &mpic 0x4 0x1
-			0x9000 0x0 0x0 0x3 &mpic 0x1 0x1
-			0x9000 0x0 0x0 0x4 &mpic 0x2 0x1
-
-			/* IDSEL 0x12 func 1 - PCI slot 2 */
-			0x9100 0x0 0x0 0x1 &mpic 0x3 0x1
-			0x9100 0x0 0x0 0x2 &mpic 0x4 0x1
-			0x9100 0x0 0x0 0x3 &mpic 0x1 0x1
-			0x9100 0x0 0x0 0x4 &mpic 0x2 0x1
-
-			/* IDSEL 0x12 func 2 - PCI slot 2 */
-			0x9200 0x0 0x0 0x1 &mpic 0x3 0x1
-			0x9200 0x0 0x0 0x2 &mpic 0x4 0x1
-			0x9200 0x0 0x0 0x3 &mpic 0x1 0x1
-			0x9200 0x0 0x0 0x4 &mpic 0x2 0x1
-
-			/* IDSEL 0x12 func 3 - PCI slot 2 */
-			0x9300 0x0 0x0 0x1 &mpic 0x3 0x1
-			0x9300 0x0 0x0 0x2 &mpic 0x4 0x1
-			0x9300 0x0 0x0 0x3 &mpic 0x1 0x1
-			0x9300 0x0 0x0 0x4 &mpic 0x2 0x1
-
-			/* IDSEL 0x12 func 4 - PCI slot 2 */
-			0x9400 0x0 0x0 0x1 &mpic 0x3 0x1
-			0x9400 0x0 0x0 0x2 &mpic 0x4 0x1
-			0x9400 0x0 0x0 0x3 &mpic 0x1 0x1
-			0x9400 0x0 0x0 0x4 &mpic 0x2 0x1
-
-			/* IDSEL 0x12 func 5 - PCI slot 2 */
-			0x9500 0x0 0x0 0x1 &mpic 0x3 0x1
-			0x9500 0x0 0x0 0x2 &mpic 0x4 0x1
-			0x9500 0x0 0x0 0x3 &mpic 0x1 0x1
-			0x9500 0x0 0x0 0x4 &mpic 0x2 0x1
-
-			/* IDSEL 0x12 func 6 - PCI slot 2 */
-			0x9600 0x0 0x0 0x1 &mpic 0x3 0x1
-			0x9600 0x0 0x0 0x2 &mpic 0x4 0x1
-			0x9600 0x0 0x0 0x3 &mpic 0x1 0x1
-			0x9600 0x0 0x0 0x4 &mpic 0x2 0x1
-
-			/* IDSEL 0x12 func 7 - PCI slot 2 */
-			0x9700 0x0 0x0 0x1 &mpic 0x3 0x1
-			0x9700 0x0 0x0 0x2 &mpic 0x4 0x1
-			0x9700 0x0 0x0 0x3 &mpic 0x1 0x1
-			0x9700 0x0 0x0 0x4 &mpic 0x2 0x1
-
-			// IDSEL 0x1c  USB
-			0xe000 0x0 0x0 0x1 &i8259 0xc 0x2
-			0xe100 0x0 0x0 0x2 &i8259 0x9 0x2
-			0xe200 0x0 0x0 0x3 &i8259 0xa 0x2
-			0xe300 0x0 0x0 0x4 &i8259 0xb 0x2
-
-			// IDSEL 0x1d  Audio
-			0xe800 0x0 0x0 0x1 &i8259 0x6 0x2
-
-			// IDSEL 0x1e Legacy
-			0xf000 0x0 0x0 0x1 &i8259 0x7 0x2
-			0xf100 0x0 0x0 0x1 &i8259 0x7 0x2
-
-			// IDSEL 0x1f IDE/SATA
-			0xf800 0x0 0x0 0x1 &i8259 0xe 0x2
-			0xf900 0x0 0x0 0x1 &i8259 0x5 0x2
-
-			>;
-
 		pcie@0 {
-			reg = <0x0 0x0 0x0 0x0 0x0>;
-			#size-cells = <2>;
-			#address-cells = <3>;
-			device_type = "pci";
 			ranges = <0x2000000 0x0 0x80000000
 				  0x2000000 0x0 0x80000000
 				  0x0 0x20000000
@@ -680,99 +47,14 @@
 				  0x1000000 0x0 0x0
 				  0x1000000 0x0 0x0
 				  0x0 0x10000>;
-			uli1575@0 {
-				reg = <0x0 0x0 0x0 0x0 0x0>;
-				#size-cells = <2>;
-				#address-cells = <3>;
-				ranges = <0x2000000 0x0 0x80000000
-					  0x2000000 0x0 0x80000000
-					  0x0 0x20000000
-
-					  0x1000000 0x0 0x0
-					  0x1000000 0x0 0x0
-					  0x0 0x10000>;
-				isa@1e {
-					device_type = "isa";
-					#interrupt-cells = <2>;
-					#size-cells = <1>;
-					#address-cells = <2>;
-					reg = <0xf000 0x0 0x0 0x0 0x0>;
-					ranges = <0x1 0x0 0x1000000 0x0 0x0
-						  0x1000>;
-					interrupt-parent = <&i8259>;
-
-					i8259: interrupt-controller@20 {
-						reg = <0x1 0x20 0x2
-						       0x1 0xa0 0x2
-						       0x1 0x4d0 0x2>;
-						interrupt-controller;
-						device_type = "interrupt-controller";
-						#address-cells = <0>;
-						#interrupt-cells = <2>;
-						compatible = "chrp,iic";
-						interrupts = <9 2>;
-						interrupt-parent = <&mpic>;
-					};
-
-					i8042@60 {
-						#size-cells = <0>;
-						#address-cells = <1>;
-						reg = <0x1 0x60 0x1 0x1 0x64 0x1>;
-						interrupts = <1 3 12 3>;
-						interrupt-parent =
-							<&i8259>;
-
-						keyboard@0 {
-							reg = <0x0>;
-							compatible = "pnpPNP,303";
-						};
-
-						mouse@1 {
-							reg = <0x1>;
-							compatible = "pnpPNP,f03";
-						};
-					};
-
-					rtc@70 {
-						compatible = "pnpPNP,b00";
-						reg = <0x1 0x70 0x2>;
-					};
-
-					gpio@400 {
-						reg = <0x1 0x400 0x80>;
-					};
-				};
-			};
 		};
-
 	};
 
 	pci1: pcie@ffe09000 {
-		compatible = "fsl,mpc8548-pcie";
-		device_type = "pci";
-		#interrupt-cells = <1>;
-		#size-cells = <2>;
-		#address-cells = <3>;
 		reg = <0 0xffe09000 0 0x1000>;
-		bus-range = <0 255>;
 		ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000
 			  0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x00010000>;
-		clock-frequency = <33333333>;
-		interrupt-parent = <&mpic>;
-		interrupts = <25 2>;
-		interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
-		interrupt-map = <
-			/* IDSEL 0x0 */
-			0000 0x0 0x0 0x1 &mpic 0x4 0x1
-			0000 0x0 0x0 0x2 &mpic 0x5 0x1
-			0000 0x0 0x0 0x3 &mpic 0x6 0x1
-			0000 0x0 0x0 0x4 &mpic 0x7 0x1
-			>;
 		pcie@0 {
-			reg = <0x0 0x0 0x0 0x0 0x0>;
-			#size-cells = <2>;
-			#address-cells = <3>;
-			device_type = "pci";
 			ranges = <0x2000000 0x0 0xa0000000
 				  0x2000000 0x0 0xa0000000
 				  0x0 0x20000000
@@ -784,31 +66,10 @@
 	};
 
 	pci2: pcie@ffe0a000 {
-		compatible = "fsl,mpc8548-pcie";
-		device_type = "pci";
-		#interrupt-cells = <1>;
-		#size-cells = <2>;
-		#address-cells = <3>;
 		reg = <0 0xffe0a000 0 0x1000>;
-		bus-range = <0 255>;
 		ranges = <0x2000000 0x0 0xc0000000 0 0xc0000000 0x0 0x20000000
 			  0x1000000 0x0 0x00000000 0 0xffc20000 0x0 0x00010000>;
-		clock-frequency = <33333333>;
-		interrupt-parent = <&mpic>;
-		interrupts = <26 2>;
-		interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
-		interrupt-map = <
-			/* IDSEL 0x0 */
-			0000 0x0 0x0 0x1 &mpic 0x0 0x1
-			0000 0x0 0x0 0x2 &mpic 0x1 0x1
-			0000 0x0 0x0 0x3 &mpic 0x2 0x1
-			0000 0x0 0x0 0x4 &mpic 0x3 0x1
-			>;
 		pcie@0 {
-			reg = <0x0 0x0 0x0 0x0 0x0>;
-			#size-cells = <2>;
-			#address-cells = <3>;
-			device_type = "pci";
 			ranges = <0x2000000 0x0 0xc0000000
 				  0x2000000 0x0 0xc0000000
 				  0x0 0x20000000
@@ -819,3 +80,11 @@
 		};
 	};
 };
+
+/*
+ * mpc8572ds.dtsi must be last to ensure board_pci0 overrides pci0 settings
+ * for interrupt-map & interrupt-map-mask
+ */
+
+/include/ "fsl/mpc8572si-post.dtsi"
+/include/ "mpc8572ds.dtsi"
diff --git a/arch/powerpc/boot/dts/mpc8572ds.dtsi b/arch/powerpc/boot/dts/mpc8572ds.dtsi
new file mode 100644
index 0000000..c3d4fac
--- /dev/null
+++ b/arch/powerpc/boot/dts/mpc8572ds.dtsi
@@ -0,0 +1,397 @@
+/*
+ * MPC8572DS Device Tree Source stub (no addresses or top-level ranges)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&board_lbc {
+	nor@0,0 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "cfi-flash";
+		reg = <0x0 0x0 0x8000000>;
+		bank-width = <2>;
+		device-width = <1>;
+
+		ramdisk@0 {
+			reg = <0x0 0x03000000>;
+			read-only;
+		};
+
+		diagnostic@3000000 {
+			reg = <0x03000000 0x00e00000>;
+			read-only;
+		};
+
+		dink@3e00000 {
+			reg = <0x03e00000 0x00200000>;
+			read-only;
+		};
+
+		kernel@4000000 {
+			reg = <0x04000000 0x00400000>;
+			read-only;
+		};
+
+		jffs2@4400000 {
+			reg = <0x04400000 0x03b00000>;
+		};
+
+		dtb@7f00000 {
+			reg = <0x07f00000 0x00080000>;
+			read-only;
+		};
+
+		u-boot@7f80000 {
+			reg = <0x07f80000 0x00080000>;
+			read-only;
+		};
+	};
+
+	nand@2,0 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "fsl,mpc8572-fcm-nand",
+			     "fsl,elbc-fcm-nand";
+		reg = <0x2 0x0 0x40000>;
+
+		u-boot@0 {
+			reg = <0x0 0x02000000>;
+			read-only;
+		};
+
+		jffs2@2000000 {
+			reg = <0x02000000 0x10000000>;
+		};
+
+		ramdisk@12000000 {
+			reg = <0x12000000 0x08000000>;
+			read-only;
+		};
+
+		kernel@1a000000 {
+			reg = <0x1a000000 0x04000000>;
+		};
+
+		dtb@1e000000 {
+			reg = <0x1e000000 0x01000000>;
+			read-only;
+		};
+
+		empty@1f000000 {
+			reg = <0x1f000000 0x21000000>;
+		};
+	};
+
+	nand@4,0 {
+		compatible = "fsl,mpc8572-fcm-nand",
+			     "fsl,elbc-fcm-nand";
+		reg = <0x4 0x0 0x40000>;
+	};
+
+	nand@5,0 {
+		compatible = "fsl,mpc8572-fcm-nand",
+			     "fsl,elbc-fcm-nand";
+		reg = <0x5 0x0 0x40000>;
+	};
+
+	nand@6,0 {
+		compatible = "fsl,mpc8572-fcm-nand",
+			     "fsl,elbc-fcm-nand";
+		reg = <0x6 0x0 0x40000>;
+	};
+};
+
+&board_soc {
+	enet0: ethernet@24000 {
+		tbi-handle = <&tbi0>;
+		phy-handle = <&phy0>;
+		phy-connection-type = "rgmii-id";
+	};
+
+	mdio@24520 {
+		phy0: ethernet-phy@0 {
+			interrupts = <10 1 0 0>;
+			reg = <0x0>;
+		};
+		phy1: ethernet-phy@1 {
+			interrupts = <10 1 0 0>;
+			reg = <0x1>;
+		};
+		phy2: ethernet-phy@2 {
+			interrupts = <10 1 0 0>;
+			reg = <0x2>;
+		};
+		phy3: ethernet-phy@3 {
+			interrupts = <10 1 0 0>;
+			reg = <0x3>;
+		};
+
+		tbi0: tbi-phy@11 {
+			reg = <0x11>;
+			device_type = "tbi-phy";
+		};
+	};
+
+	ptp_clock@24e00 {
+		fsl,tclk-period = <5>;
+		fsl,tmr-prsc = <200>;
+		fsl,tmr-add = <0xAAAAAAAB>;
+		fsl,tmr-fiper1 = <0x3B9AC9FB>;
+		fsl,tmr-fiper2 = <0x3B9AC9FB>;
+		fsl,max-adj = <499999999>;
+	};
+
+	enet1: ethernet@25000 {
+		tbi-handle = <&tbi1>;
+		phy-handle = <&phy1>;
+		phy-connection-type = "rgmii-id";
+
+	};
+
+	mdio@25520 {
+		tbi1: tbi-phy@11 {
+			reg = <0x11>;
+			device_type = "tbi-phy";
+		};
+	};
+
+	enet2: ethernet@26000 {
+		tbi-handle = <&tbi2>;
+		phy-handle = <&phy2>;
+		phy-connection-type = "rgmii-id";
+
+	};
+	mdio@26520 {
+		tbi2: tbi-phy@11 {
+			reg = <0x11>;
+			device_type = "tbi-phy";
+		};
+	};
+
+	enet3: ethernet@27000 {
+		tbi-handle = <&tbi3>;
+		phy-handle = <&phy3>;
+		phy-connection-type = "rgmii-id";
+	};
+
+	mdio@27520 {
+		tbi3: tbi-phy@11 {
+			reg = <0x11>;
+			device_type = "tbi-phy";
+		};
+	};
+};
+
+&board_pci0 {
+	pcie@0 {
+		interrupt-map-mask = <0xff00 0x0 0x0 0x7>;
+		interrupt-map = <
+			/* IDSEL 0x11 func 0 - PCI slot 1 */
+			0x8800 0x0 0x0 0x1 &mpic 0x2 0x1 0 0
+			0x8800 0x0 0x0 0x2 &mpic 0x3 0x1 0 0
+			0x8800 0x0 0x0 0x3 &mpic 0x4 0x1 0 0
+			0x8800 0x0 0x0 0x4 &mpic 0x1 0x1 0 0
+
+			/* IDSEL 0x11 func 1 - PCI slot 1 */
+			0x8900 0x0 0x0 0x1 &mpic 0x2 0x1 0 0
+			0x8900 0x0 0x0 0x2 &mpic 0x3 0x1 0 0
+			0x8900 0x0 0x0 0x3 &mpic 0x4 0x1 0 0
+			0x8900 0x0 0x0 0x4 &mpic 0x1 0x1 0 0
+
+			/* IDSEL 0x11 func 2 - PCI slot 1 */
+			0x8a00 0x0 0x0 0x1 &mpic 0x2 0x1 0 0
+			0x8a00 0x0 0x0 0x2 &mpic 0x3 0x1 0 0
+			0x8a00 0x0 0x0 0x3 &mpic 0x4 0x1 0 0
+			0x8a00 0x0 0x0 0x4 &mpic 0x1 0x1 0 0
+
+			/* IDSEL 0x11 func 3 - PCI slot 1 */
+			0x8b00 0x0 0x0 0x1 &mpic 0x2 0x1 0 0
+			0x8b00 0x0 0x0 0x2 &mpic 0x3 0x1 0 0
+			0x8b00 0x0 0x0 0x3 &mpic 0x4 0x1 0 0
+			0x8b00 0x0 0x0 0x4 &mpic 0x1 0x1 0 0
+
+			/* IDSEL 0x11 func 4 - PCI slot 1 */
+			0x8c00 0x0 0x0 0x1 &mpic 0x2 0x1 0 0
+			0x8c00 0x0 0x0 0x2 &mpic 0x3 0x1 0 0
+			0x8c00 0x0 0x0 0x3 &mpic 0x4 0x1 0 0
+			0x8c00 0x0 0x0 0x4 &mpic 0x1 0x1 0 0
+
+			/* IDSEL 0x11 func 5 - PCI slot 1 */
+			0x8d00 0x0 0x0 0x1 &mpic 0x2 0x1 0 0
+			0x8d00 0x0 0x0 0x2 &mpic 0x3 0x1 0 0
+			0x8d00 0x0 0x0 0x3 &mpic 0x4 0x1 0 0
+			0x8d00 0x0 0x0 0x4 &mpic 0x1 0x1 0 0
+
+			/* IDSEL 0x11 func 6 - PCI slot 1 */
+			0x8e00 0x0 0x0 0x1 &mpic 0x2 0x1 0 0
+			0x8e00 0x0 0x0 0x2 &mpic 0x3 0x1 0 0
+			0x8e00 0x0 0x0 0x3 &mpic 0x4 0x1 0 0
+			0x8e00 0x0 0x0 0x4 &mpic 0x1 0x1 0 0
+
+			/* IDSEL 0x11 func 7 - PCI slot 1 */
+			0x8f00 0x0 0x0 0x1 &mpic 0x2 0x1 0 0
+			0x8f00 0x0 0x0 0x2 &mpic 0x3 0x1 0 0
+			0x8f00 0x0 0x0 0x3 &mpic 0x4 0x1 0 0
+			0x8f00 0x0 0x0 0x4 &mpic 0x1 0x1 0 0
+
+			/* IDSEL 0x12 func 0 - PCI slot 2 */
+			0x9000 0x0 0x0 0x1 &mpic 0x3 0x1 0 0
+			0x9000 0x0 0x0 0x2 &mpic 0x4 0x1 0 0
+			0x9000 0x0 0x0 0x3 &mpic 0x1 0x1 0 0
+			0x9000 0x0 0x0 0x4 &mpic 0x2 0x1 0 0
+
+			/* IDSEL 0x12 func 1 - PCI slot 2 */
+			0x9100 0x0 0x0 0x1 &mpic 0x3 0x1 0 0
+			0x9100 0x0 0x0 0x2 &mpic 0x4 0x1 0 0
+			0x9100 0x0 0x0 0x3 &mpic 0x1 0x1 0 0
+			0x9100 0x0 0x0 0x4 &mpic 0x2 0x1 0 0
+
+			/* IDSEL 0x12 func 2 - PCI slot 2 */
+			0x9200 0x0 0x0 0x1 &mpic 0x3 0x1 0 0
+			0x9200 0x0 0x0 0x2 &mpic 0x4 0x1 0 0
+			0x9200 0x0 0x0 0x3 &mpic 0x1 0x1 0 0
+			0x9200 0x0 0x0 0x4 &mpic 0x2 0x1 0 0
+
+			/* IDSEL 0x12 func 3 - PCI slot 2 */
+			0x9300 0x0 0x0 0x1 &mpic 0x3 0x1 0 0
+			0x9300 0x0 0x0 0x2 &mpic 0x4 0x1 0 0
+			0x9300 0x0 0x0 0x3 &mpic 0x1 0x1 0 0
+			0x9300 0x0 0x0 0x4 &mpic 0x2 0x1 0 0
+
+			/* IDSEL 0x12 func 4 - PCI slot 2 */
+			0x9400 0x0 0x0 0x1 &mpic 0x3 0x1 0 0
+			0x9400 0x0 0x0 0x2 &mpic 0x4 0x1 0 0
+			0x9400 0x0 0x0 0x3 &mpic 0x1 0x1 0 0
+			0x9400 0x0 0x0 0x4 &mpic 0x2 0x1 0 0
+
+			/* IDSEL 0x12 func 5 - PCI slot 2 */
+			0x9500 0x0 0x0 0x1 &mpic 0x3 0x1 0 0
+			0x9500 0x0 0x0 0x2 &mpic 0x4 0x1 0 0
+			0x9500 0x0 0x0 0x3 &mpic 0x1 0x1 0 0
+			0x9500 0x0 0x0 0x4 &mpic 0x2 0x1 0 0
+
+			/* IDSEL 0x12 func 6 - PCI slot 2 */
+			0x9600 0x0 0x0 0x1 &mpic 0x3 0x1 0 0
+			0x9600 0x0 0x0 0x2 &mpic 0x4 0x1 0 0
+			0x9600 0x0 0x0 0x3 &mpic 0x1 0x1 0 0
+			0x9600 0x0 0x0 0x4 &mpic 0x2 0x1 0 0
+
+			/* IDSEL 0x12 func 7 - PCI slot 2 */
+			0x9700 0x0 0x0 0x1 &mpic 0x3 0x1 0 0
+			0x9700 0x0 0x0 0x2 &mpic 0x4 0x1 0 0
+			0x9700 0x0 0x0 0x3 &mpic 0x1 0x1 0 0
+			0x9700 0x0 0x0 0x4 &mpic 0x2 0x1 0 0
+
+			// IDSEL 0x1c  USB
+			0xe000 0x0 0x0 0x1 &i8259 0xc 0x2
+			0xe100 0x0 0x0 0x2 &i8259 0x9 0x2
+			0xe200 0x0 0x0 0x3 &i8259 0xa 0x2
+			0xe300 0x0 0x0 0x4 &i8259 0xb 0x2
+
+			// IDSEL 0x1d  Audio
+			0xe800 0x0 0x0 0x1 &i8259 0x6 0x2
+
+			// IDSEL 0x1e Legacy
+			0xf000 0x0 0x0 0x1 &i8259 0x7 0x2
+			0xf100 0x0 0x0 0x1 &i8259 0x7 0x2
+
+			// IDSEL 0x1f IDE/SATA
+			0xf800 0x0 0x0 0x1 &i8259 0xe 0x2
+			0xf900 0x0 0x0 0x1 &i8259 0x5 0x2
+			>;
+
+
+		uli1575@0 {
+			reg = <0x0 0x0 0x0 0x0 0x0>;
+			#size-cells = <2>;
+			#address-cells = <3>;
+			ranges = <0x2000000 0x0 0x80000000
+				  0x2000000 0x0 0x80000000
+				  0x0 0x20000000
+
+				  0x1000000 0x0 0x0
+				  0x1000000 0x0 0x0
+				  0x0 0x10000>;
+			isa@1e {
+				device_type = "isa";
+				#interrupt-cells = <2>;
+				#size-cells = <1>;
+				#address-cells = <2>;
+				reg = <0xf000 0x0 0x0 0x0 0x0>;
+				ranges = <0x1 0x0 0x1000000 0x0 0x0
+					  0x1000>;
+				interrupt-parent = <&i8259>;
+
+				i8259: interrupt-controller@20 {
+					reg = <0x1 0x20 0x2
+					       0x1 0xa0 0x2
+					       0x1 0x4d0 0x2>;
+					interrupt-controller;
+					device_type = "interrupt-controller";
+					#address-cells = <0>;
+					#interrupt-cells = <2>;
+					compatible = "chrp,iic";
+					interrupts = <9 2 0 0>;
+					interrupt-parent = <&mpic>;
+				};
+
+				i8042@60 {
+					#size-cells = <0>;
+					#address-cells = <1>;
+					reg = <0x1 0x60 0x1 0x1 0x64 0x1>;
+					interrupts = <1 3 12 3>;
+					interrupt-parent =
+						<&i8259>;
+
+					keyboard@0 {
+						reg = <0x0>;
+						compatible = "pnpPNP,303";
+					};
+
+					mouse@1 {
+						reg = <0x1>;
+						compatible = "pnpPNP,f03";
+					};
+				};
+
+				rtc@70 {
+					compatible = "pnpPNP,b00";
+					reg = <0x1 0x70 0x2>;
+				};
+
+				gpio@400 {
+					reg = <0x1 0x400 0x80>;
+				};
+			};
+		};
+	};
+};
diff --git a/arch/powerpc/boot/dts/mpc8572ds_36b.dts b/arch/powerpc/boot/dts/mpc8572ds_36b.dts
index f6365db..6c3d0b3 100644
--- a/arch/powerpc/boot/dts/mpc8572ds_36b.dts
+++ b/arch/powerpc/boot/dts/mpc8572ds_36b.dts
@@ -1,5 +1,5 @@
 /*
- * MPC8572 DS Device Tree Source
+ * MPC8572DS Device Tree Source (36-bit address map)
  *
  * Copyright 2007-2009 Freescale Semiconductor Inc.
  *
@@ -9,67 +9,18 @@
  * option) any later version.
  */
 
-/dts-v1/;
+/include/ "fsl/mpc8572si-pre.dtsi"
+
 / {
 	model = "fsl,MPC8572DS";
 	compatible = "fsl,MPC8572DS";
-	#address-cells = <2>;
-	#size-cells = <2>;
-
-	aliases {
-		ethernet0 = &enet0;
-		ethernet1 = &enet1;
-		ethernet2 = &enet2;
-		ethernet3 = &enet3;
-		serial0 = &serial0;
-		serial1 = &serial1;
-		pci0 = &pci0;
-		pci1 = &pci1;
-		pci2 = &pci2;
-	};
-
-	cpus {
-		#address-cells = <1>;
-		#size-cells = <0>;
-
-		PowerPC,8572@0 {
-			device_type = "cpu";
-			reg = <0x0>;
-			d-cache-line-size = <32>;	// 32 bytes
-			i-cache-line-size = <32>;	// 32 bytes
-			d-cache-size = <0x8000>;		// L1, 32K
-			i-cache-size = <0x8000>;		// L1, 32K
-			timebase-frequency = <0>;
-			bus-frequency = <0>;
-			clock-frequency = <0>;
-			next-level-cache = <&L2>;
-		};
-
-		PowerPC,8572@1 {
-			device_type = "cpu";
-			reg = <0x1>;
-			d-cache-line-size = <32>;	// 32 bytes
-			i-cache-line-size = <32>;	// 32 bytes
-			d-cache-size = <0x8000>;		// L1, 32K
-			i-cache-size = <0x8000>;		// L1, 32K
-			timebase-frequency = <0>;
-			bus-frequency = <0>;
-			clock-frequency = <0>;
-			next-level-cache = <&L2>;
-		};
-	};
 
 	memory {
 		device_type = "memory";
 	};
 
-	localbus@fffe05000 {
-		#address-cells = <2>;
-		#size-cells = <1>;
-		compatible = "fsl,mpc8572-elbc", "fsl,elbc", "simple-bus";
+	board_lbc: lbc: localbus@fffe05000 {
 		reg = <0xf 0xffe05000 0 0x1000>;
-		interrupts = <19 2>;
-		interrupt-parent = <&mpic>;
 
 		ranges = <0x0 0x0 0xf 0xe8000000 0x08000000
 			  0x1 0x0 0xf 0xe0000000 0x08000000
@@ -78,588 +29,17 @@
 			  0x4 0x0 0xf 0xffa40000 0x00040000
 			  0x5 0x0 0xf 0xffa80000 0x00040000
 			  0x6 0x0 0xf 0xffac0000 0x00040000>;
-
-		nor@0,0 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			compatible = "cfi-flash";
-			reg = <0x0 0x0 0x8000000>;
-			bank-width = <2>;
-			device-width = <1>;
-
-			ramdisk@0 {
-				reg = <0x0 0x03000000>;
-				read-only;
-			};
-
-			diagnostic@3000000 {
-				reg = <0x03000000 0x00e00000>;
-				read-only;
-			};
-
-			dink@3e00000 {
-				reg = <0x03e00000 0x00200000>;
-				read-only;
-			};
-
-			kernel@4000000 {
-				reg = <0x04000000 0x00400000>;
-				read-only;
-			};
-
-			jffs2@4400000 {
-				reg = <0x04400000 0x03b00000>;
-			};
-
-			dtb@7f00000 {
-				reg = <0x07f00000 0x00080000>;
-				read-only;
-			};
-
-			u-boot@7f80000 {
-				reg = <0x07f80000 0x00080000>;
-				read-only;
-			};
-		};
-
-		nand@2,0 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			compatible = "fsl,mpc8572-fcm-nand",
-				     "fsl,elbc-fcm-nand";
-			reg = <0x2 0x0 0x40000>;
-
-			u-boot@0 {
-				reg = <0x0 0x02000000>;
-				read-only;
-			};
-
-			jffs2@2000000 {
-				reg = <0x02000000 0x10000000>;
-			};
-
-			ramdisk@12000000 {
-				reg = <0x12000000 0x08000000>;
-				read-only;
-			};
-
-			kernel@1a000000 {
-				reg = <0x1a000000 0x04000000>;
-			};
-
-			dtb@1e000000 {
-				reg = <0x1e000000 0x01000000>;
-				read-only;
-			};
-
-			empty@1f000000 {
-				reg = <0x1f000000 0x21000000>;
-			};
-		};
-
-		nand@4,0 {
-			compatible = "fsl,mpc8572-fcm-nand",
-				     "fsl,elbc-fcm-nand";
-			reg = <0x4 0x0 0x40000>;
-		};
-
-		nand@5,0 {
-			compatible = "fsl,mpc8572-fcm-nand",
-				     "fsl,elbc-fcm-nand";
-			reg = <0x5 0x0 0x40000>;
-		};
-
-		nand@6,0 {
-			compatible = "fsl,mpc8572-fcm-nand",
-				     "fsl,elbc-fcm-nand";
-			reg = <0x6 0x0 0x40000>;
-		};
 	};
 
-	soc8572@fffe00000 {
-		#address-cells = <1>;
-		#size-cells = <1>;
-		device_type = "soc";
-		compatible = "simple-bus";
+	board_soc: soc: soc8572@fffe00000 {
 		ranges = <0x0 0xf 0xffe00000 0x100000>;
-		bus-frequency = <0>;		// Filled out by uboot.
-
-		ecm-law@0 {
-			compatible = "fsl,ecm-law";
-			reg = <0x0 0x1000>;
-			fsl,num-laws = <12>;
-		};
-
-		ecm@1000 {
-			compatible = "fsl,mpc8572-ecm", "fsl,ecm";
-			reg = <0x1000 0x1000>;
-			interrupts = <17 2>;
-			interrupt-parent = <&mpic>;
-		};
-
-		memory-controller@2000 {
-			compatible = "fsl,mpc8572-memory-controller";
-			reg = <0x2000 0x1000>;
-			interrupt-parent = <&mpic>;
-			interrupts = <18 2>;
-		};
-
-		memory-controller@6000 {
-			compatible = "fsl,mpc8572-memory-controller";
-			reg = <0x6000 0x1000>;
-			interrupt-parent = <&mpic>;
-			interrupts = <18 2>;
-		};
-
-		L2: l2-cache-controller@20000 {
-			compatible = "fsl,mpc8572-l2-cache-controller";
-			reg = <0x20000 0x1000>;
-			cache-line-size = <32>;	// 32 bytes
-			cache-size = <0x100000>; // L2, 1M
-			interrupt-parent = <&mpic>;
-			interrupts = <16 2>;
-		};
-
-		i2c@3000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			cell-index = <0>;
-			compatible = "fsl-i2c";
-			reg = <0x3000 0x100>;
-			interrupts = <43 2>;
-			interrupt-parent = <&mpic>;
-			dfsrr;
-		};
-
-		i2c@3100 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			cell-index = <1>;
-			compatible = "fsl-i2c";
-			reg = <0x3100 0x100>;
-			interrupts = <43 2>;
-			interrupt-parent = <&mpic>;
-			dfsrr;
-		};
-
-		dma@c300 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			compatible = "fsl,mpc8572-dma", "fsl,eloplus-dma";
-			reg = <0xc300 0x4>;
-			ranges = <0x0 0xc100 0x200>;
-			cell-index = <1>;
-			dma-channel@0 {
-				compatible = "fsl,mpc8572-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x0 0x80>;
-				cell-index = <0>;
-				interrupt-parent = <&mpic>;
-				interrupts = <76 2>;
-			};
-			dma-channel@80 {
-				compatible = "fsl,mpc8572-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x80 0x80>;
-				cell-index = <1>;
-				interrupt-parent = <&mpic>;
-				interrupts = <77 2>;
-			};
-			dma-channel@100 {
-				compatible = "fsl,mpc8572-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x100 0x80>;
-				cell-index = <2>;
-				interrupt-parent = <&mpic>;
-				interrupts = <78 2>;
-			};
-			dma-channel@180 {
-				compatible = "fsl,mpc8572-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x180 0x80>;
-				cell-index = <3>;
-				interrupt-parent = <&mpic>;
-				interrupts = <79 2>;
-			};
-		};
-
-		dma@21300 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			compatible = "fsl,mpc8572-dma", "fsl,eloplus-dma";
-			reg = <0x21300 0x4>;
-			ranges = <0x0 0x21100 0x200>;
-			cell-index = <0>;
-			dma-channel@0 {
-				compatible = "fsl,mpc8572-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x0 0x80>;
-				cell-index = <0>;
-				interrupt-parent = <&mpic>;
-				interrupts = <20 2>;
-			};
-			dma-channel@80 {
-				compatible = "fsl,mpc8572-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x80 0x80>;
-				cell-index = <1>;
-				interrupt-parent = <&mpic>;
-				interrupts = <21 2>;
-			};
-			dma-channel@100 {
-				compatible = "fsl,mpc8572-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x100 0x80>;
-				cell-index = <2>;
-				interrupt-parent = <&mpic>;
-				interrupts = <22 2>;
-			};
-			dma-channel@180 {
-				compatible = "fsl,mpc8572-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x180 0x80>;
-				cell-index = <3>;
-				interrupt-parent = <&mpic>;
-				interrupts = <23 2>;
-			};
-		};
-
-		enet0: ethernet@24000 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			cell-index = <0>;
-			device_type = "network";
-			model = "eTSEC";
-			compatible = "gianfar";
-			reg = <0x24000 0x1000>;
-			ranges = <0x0 0x24000 0x1000>;
-			local-mac-address = [ 00 00 00 00 00 00 ];
-			interrupts = <29 2 30 2 34 2>;
-			interrupt-parent = <&mpic>;
-			tbi-handle = <&tbi0>;
-			phy-handle = <&phy0>;
-			phy-connection-type = "rgmii-id";
-
-			mdio@520 {
-				#address-cells = <1>;
-				#size-cells = <0>;
-				compatible = "fsl,gianfar-mdio";
-				reg = <0x520 0x20>;
-
-				phy0: ethernet-phy@0 {
-					interrupt-parent = <&mpic>;
-					interrupts = <10 1>;
-					reg = <0x0>;
-				};
-				phy1: ethernet-phy@1 {
-					interrupt-parent = <&mpic>;
-					interrupts = <10 1>;
-					reg = <0x1>;
-				};
-				phy2: ethernet-phy@2 {
-					interrupt-parent = <&mpic>;
-					interrupts = <10 1>;
-					reg = <0x2>;
-				};
-				phy3: ethernet-phy@3 {
-					interrupt-parent = <&mpic>;
-					interrupts = <10 1>;
-					reg = <0x3>;
-				};
-
-				tbi0: tbi-phy@11 {
-					reg = <0x11>;
-					device_type = "tbi-phy";
-				};
-			};
-		};
-
-		enet1: ethernet@25000 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			cell-index = <1>;
-			device_type = "network";
-			model = "eTSEC";
-			compatible = "gianfar";
-			reg = <0x25000 0x1000>;
-			ranges = <0x0 0x25000 0x1000>;
-			local-mac-address = [ 00 00 00 00 00 00 ];
-			interrupts = <35 2 36 2 40 2>;
-			interrupt-parent = <&mpic>;
-			tbi-handle = <&tbi1>;
-			phy-handle = <&phy1>;
-			phy-connection-type = "rgmii-id";
-
-			mdio@520 {
-				#address-cells = <1>;
-				#size-cells = <0>;
-				compatible = "fsl,gianfar-tbi";
-				reg = <0x520 0x20>;
-
-				tbi1: tbi-phy@11 {
-					reg = <0x11>;
-					device_type = "tbi-phy";
-				};
-			};
-		};
-
-		enet2: ethernet@26000 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			cell-index = <2>;
-			device_type = "network";
-			model = "eTSEC";
-			compatible = "gianfar";
-			reg = <0x26000 0x1000>;
-			ranges = <0x0 0x26000 0x1000>;
-			local-mac-address = [ 00 00 00 00 00 00 ];
-			interrupts = <31 2 32 2 33 2>;
-			interrupt-parent = <&mpic>;
-			tbi-handle = <&tbi2>;
-			phy-handle = <&phy2>;
-			phy-connection-type = "rgmii-id";
-
-			mdio@520 {
-				#address-cells = <1>;
-				#size-cells = <0>;
-				compatible = "fsl,gianfar-tbi";
-				reg = <0x520 0x20>;
-
-				tbi2: tbi-phy@11 {
-					reg = <0x11>;
-					device_type = "tbi-phy";
-				};
-			};
-		};
-
-		enet3: ethernet@27000 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			cell-index = <3>;
-			device_type = "network";
-			model = "eTSEC";
-			compatible = "gianfar";
-			reg = <0x27000 0x1000>;
-			ranges = <0x0 0x27000 0x1000>;
-			local-mac-address = [ 00 00 00 00 00 00 ];
-			interrupts = <37 2 38 2 39 2>;
-			interrupt-parent = <&mpic>;
-			tbi-handle = <&tbi3>;
-			phy-handle = <&phy3>;
-			phy-connection-type = "rgmii-id";
-
-			mdio@520 {
-				#address-cells = <1>;
-				#size-cells = <0>;
-				compatible = "fsl,gianfar-tbi";
-				reg = <0x520 0x20>;
-
-				tbi3: tbi-phy@11 {
-					reg = <0x11>;
-					device_type = "tbi-phy";
-				};
-			};
-		};
-
-		serial0: serial@4500 {
-			cell-index = <0>;
-			device_type = "serial";
-			compatible = "ns16550";
-			reg = <0x4500 0x100>;
-			clock-frequency = <0>;
-			interrupts = <42 2>;
-			interrupt-parent = <&mpic>;
-		};
-
-		serial1: serial@4600 {
-			cell-index = <1>;
-			device_type = "serial";
-			compatible = "ns16550";
-			reg = <0x4600 0x100>;
-			clock-frequency = <0>;
-			interrupts = <42 2>;
-			interrupt-parent = <&mpic>;
-		};
-
-		global-utilities@e0000 {	//global utilities block
-			compatible = "fsl,mpc8572-guts";
-			reg = <0xe0000 0x1000>;
-			fsl,has-rstcr;
-		};
-
-		msi@41600 {
-			compatible = "fsl,mpc8572-msi", "fsl,mpic-msi";
-			reg = <0x41600 0x80>;
-			msi-available-ranges = <0 0x100>;
-			interrupts = <
-				0xe0 0
-				0xe1 0
-				0xe2 0
-				0xe3 0
-				0xe4 0
-				0xe5 0
-				0xe6 0
-				0xe7 0>;
-			interrupt-parent = <&mpic>;
-		};
-
-		crypto@30000 {
-			compatible = "fsl,sec3.0", "fsl,sec2.4", "fsl,sec2.2",
-				     "fsl,sec2.1", "fsl,sec2.0";
-			reg = <0x30000 0x10000>;
-			interrupts = <45 2 58 2>;
-			interrupt-parent = <&mpic>;
-			fsl,num-channels = <4>;
-			fsl,channel-fifo-len = <24>;
-			fsl,exec-units-mask = <0x9fe>;
-			fsl,descriptor-types-mask = <0x3ab0ebf>;
-		};
-
-		mpic: pic@40000 {
-			interrupt-controller;
-			#address-cells = <0>;
-			#interrupt-cells = <2>;
-			reg = <0x40000 0x40000>;
-			compatible = "chrp,open-pic";
-			device_type = "open-pic";
-		};
 	};
 
-	pci0: pcie@fffe08000 {
-		compatible = "fsl,mpc8548-pcie";
-		device_type = "pci";
-		#interrupt-cells = <1>;
-		#size-cells = <2>;
-		#address-cells = <3>;
+	board_pci0: pci0: pcie@fffe08000 {
 		reg = <0xf 0xffe08000 0 0x1000>;
-		bus-range = <0 255>;
 		ranges = <0x2000000 0x0 0xe0000000 0xc 0x00000000 0x0 0x20000000
 			  0x1000000 0x0 0x00000000 0xf 0xffc00000 0x0 0x00010000>;
-		clock-frequency = <33333333>;
-		interrupt-parent = <&mpic>;
-		interrupts = <24 2>;
-		interrupt-map-mask = <0xff00 0x0 0x0 0x7>;
-		interrupt-map = <
-			/* IDSEL 0x11 func 0 - PCI slot 1 */
-			0x8800 0x0 0x0 0x1 &mpic 0x2 0x1
-			0x8800 0x0 0x0 0x2 &mpic 0x3 0x1
-			0x8800 0x0 0x0 0x3 &mpic 0x4 0x1
-			0x8800 0x0 0x0 0x4 &mpic 0x1 0x1
-
-			/* IDSEL 0x11 func 1 - PCI slot 1 */
-			0x8900 0x0 0x0 0x1 &mpic 0x2 0x1
-			0x8900 0x0 0x0 0x2 &mpic 0x3 0x1
-			0x8900 0x0 0x0 0x3 &mpic 0x4 0x1
-			0x8900 0x0 0x0 0x4 &mpic 0x1 0x1
-
-			/* IDSEL 0x11 func 2 - PCI slot 1 */
-			0x8a00 0x0 0x0 0x1 &mpic 0x2 0x1
-			0x8a00 0x0 0x0 0x2 &mpic 0x3 0x1
-			0x8a00 0x0 0x0 0x3 &mpic 0x4 0x1
-			0x8a00 0x0 0x0 0x4 &mpic 0x1 0x1
-
-			/* IDSEL 0x11 func 3 - PCI slot 1 */
-			0x8b00 0x0 0x0 0x1 &mpic 0x2 0x1
-			0x8b00 0x0 0x0 0x2 &mpic 0x3 0x1
-			0x8b00 0x0 0x0 0x3 &mpic 0x4 0x1
-			0x8b00 0x0 0x0 0x4 &mpic 0x1 0x1
-
-			/* IDSEL 0x11 func 4 - PCI slot 1 */
-			0x8c00 0x0 0x0 0x1 &mpic 0x2 0x1
-			0x8c00 0x0 0x0 0x2 &mpic 0x3 0x1
-			0x8c00 0x0 0x0 0x3 &mpic 0x4 0x1
-			0x8c00 0x0 0x0 0x4 &mpic 0x1 0x1
-
-			/* IDSEL 0x11 func 5 - PCI slot 1 */
-			0x8d00 0x0 0x0 0x1 &mpic 0x2 0x1
-			0x8d00 0x0 0x0 0x2 &mpic 0x3 0x1
-			0x8d00 0x0 0x0 0x3 &mpic 0x4 0x1
-			0x8d00 0x0 0x0 0x4 &mpic 0x1 0x1
-
-			/* IDSEL 0x11 func 6 - PCI slot 1 */
-			0x8e00 0x0 0x0 0x1 &mpic 0x2 0x1
-			0x8e00 0x0 0x0 0x2 &mpic 0x3 0x1
-			0x8e00 0x0 0x0 0x3 &mpic 0x4 0x1
-			0x8e00 0x0 0x0 0x4 &mpic 0x1 0x1
-
-			/* IDSEL 0x11 func 7 - PCI slot 1 */
-			0x8f00 0x0 0x0 0x1 &mpic 0x2 0x1
-			0x8f00 0x0 0x0 0x2 &mpic 0x3 0x1
-			0x8f00 0x0 0x0 0x3 &mpic 0x4 0x1
-			0x8f00 0x0 0x0 0x4 &mpic 0x1 0x1
-
-			/* IDSEL 0x12 func 0 - PCI slot 2 */
-			0x9000 0x0 0x0 0x1 &mpic 0x3 0x1
-			0x9000 0x0 0x0 0x2 &mpic 0x4 0x1
-			0x9000 0x0 0x0 0x3 &mpic 0x1 0x1
-			0x9000 0x0 0x0 0x4 &mpic 0x2 0x1
-
-			/* IDSEL 0x12 func 1 - PCI slot 2 */
-			0x9100 0x0 0x0 0x1 &mpic 0x3 0x1
-			0x9100 0x0 0x0 0x2 &mpic 0x4 0x1
-			0x9100 0x0 0x0 0x3 &mpic 0x1 0x1
-			0x9100 0x0 0x0 0x4 &mpic 0x2 0x1
-
-			/* IDSEL 0x12 func 2 - PCI slot 2 */
-			0x9200 0x0 0x0 0x1 &mpic 0x3 0x1
-			0x9200 0x0 0x0 0x2 &mpic 0x4 0x1
-			0x9200 0x0 0x0 0x3 &mpic 0x1 0x1
-			0x9200 0x0 0x0 0x4 &mpic 0x2 0x1
-
-			/* IDSEL 0x12 func 3 - PCI slot 2 */
-			0x9300 0x0 0x0 0x1 &mpic 0x3 0x1
-			0x9300 0x0 0x0 0x2 &mpic 0x4 0x1
-			0x9300 0x0 0x0 0x3 &mpic 0x1 0x1
-			0x9300 0x0 0x0 0x4 &mpic 0x2 0x1
-
-			/* IDSEL 0x12 func 4 - PCI slot 2 */
-			0x9400 0x0 0x0 0x1 &mpic 0x3 0x1
-			0x9400 0x0 0x0 0x2 &mpic 0x4 0x1
-			0x9400 0x0 0x0 0x3 &mpic 0x1 0x1
-			0x9400 0x0 0x0 0x4 &mpic 0x2 0x1
-
-			/* IDSEL 0x12 func 5 - PCI slot 2 */
-			0x9500 0x0 0x0 0x1 &mpic 0x3 0x1
-			0x9500 0x0 0x0 0x2 &mpic 0x4 0x1
-			0x9500 0x0 0x0 0x3 &mpic 0x1 0x1
-			0x9500 0x0 0x0 0x4 &mpic 0x2 0x1
-
-			/* IDSEL 0x12 func 6 - PCI slot 2 */
-			0x9600 0x0 0x0 0x1 &mpic 0x3 0x1
-			0x9600 0x0 0x0 0x2 &mpic 0x4 0x1
-			0x9600 0x0 0x0 0x3 &mpic 0x1 0x1
-			0x9600 0x0 0x0 0x4 &mpic 0x2 0x1
-
-			/* IDSEL 0x12 func 7 - PCI slot 2 */
-			0x9700 0x0 0x0 0x1 &mpic 0x3 0x1
-			0x9700 0x0 0x0 0x2 &mpic 0x4 0x1
-			0x9700 0x0 0x0 0x3 &mpic 0x1 0x1
-			0x9700 0x0 0x0 0x4 &mpic 0x2 0x1
-
-			// IDSEL 0x1c  USB
-			0xe000 0x0 0x0 0x1 &i8259 0xc 0x2
-			0xe100 0x0 0x0 0x2 &i8259 0x9 0x2
-			0xe200 0x0 0x0 0x3 &i8259 0xa 0x2
-			0xe300 0x0 0x0 0x4 &i8259 0xb 0x2
-
-			// IDSEL 0x1d  Audio
-			0xe800 0x0 0x0 0x1 &i8259 0x6 0x2
-
-			// IDSEL 0x1e Legacy
-			0xf000 0x0 0x0 0x1 &i8259 0x7 0x2
-			0xf100 0x0 0x0 0x1 &i8259 0x7 0x2
-
-			// IDSEL 0x1f IDE/SATA
-			0xf800 0x0 0x0 0x1 &i8259 0xe 0x2
-			0xf900 0x0 0x0 0x1 &i8259 0x5 0x2
-
-			>;
-
 		pcie@0 {
-			reg = <0x0 0x0 0x0 0x0 0x0>;
-			#size-cells = <2>;
-			#address-cells = <3>;
-			device_type = "pci";
 			ranges = <0x2000000 0x0 0xe0000000
 				  0x2000000 0x0 0xe0000000
 				  0x0 0x20000000
@@ -667,99 +47,14 @@
 				  0x1000000 0x0 0x0
 				  0x1000000 0x0 0x0
 				  0x0 0x10000>;
-			uli1575@0 {
-				reg = <0x0 0x0 0x0 0x0 0x0>;
-				#size-cells = <2>;
-				#address-cells = <3>;
-				ranges = <0x2000000 0x0 0xe0000000
-					  0x2000000 0x0 0xe0000000
-					  0x0 0x20000000
-
-					  0x1000000 0x0 0x0
-					  0x1000000 0x0 0x0
-					  0x0 0x10000>;
-				isa@1e {
-					device_type = "isa";
-					#interrupt-cells = <2>;
-					#size-cells = <1>;
-					#address-cells = <2>;
-					reg = <0xf000 0x0 0x0 0x0 0x0>;
-					ranges = <0x1 0x0 0x1000000 0x0 0x0
-						  0x1000>;
-					interrupt-parent = <&i8259>;
-
-					i8259: interrupt-controller@20 {
-						reg = <0x1 0x20 0x2
-						       0x1 0xa0 0x2
-						       0x1 0x4d0 0x2>;
-						interrupt-controller;
-						device_type = "interrupt-controller";
-						#address-cells = <0>;
-						#interrupt-cells = <2>;
-						compatible = "chrp,iic";
-						interrupts = <9 2>;
-						interrupt-parent = <&mpic>;
-					};
-
-					i8042@60 {
-						#size-cells = <0>;
-						#address-cells = <1>;
-						reg = <0x1 0x60 0x1 0x1 0x64 0x1>;
-						interrupts = <1 3 12 3>;
-						interrupt-parent =
-							<&i8259>;
-
-						keyboard@0 {
-							reg = <0x0>;
-							compatible = "pnpPNP,303";
-						};
-
-						mouse@1 {
-							reg = <0x1>;
-							compatible = "pnpPNP,f03";
-						};
-					};
-
-					rtc@70 {
-						compatible = "pnpPNP,b00";
-						reg = <0x1 0x70 0x2>;
-					};
-
-					gpio@400 {
-						reg = <0x1 0x400 0x80>;
-					};
-				};
-			};
 		};
-
 	};
 
 	pci1: pcie@fffe09000 {
-		compatible = "fsl,mpc8548-pcie";
-		device_type = "pci";
-		#interrupt-cells = <1>;
-		#size-cells = <2>;
-		#address-cells = <3>;
 		reg = <0xf 0xffe09000 0 0x1000>;
-		bus-range = <0 255>;
 		ranges = <0x2000000 0x0 0xe0000000 0xc 0x20000000 0x0 0x20000000
 			  0x1000000 0x0 0x00000000 0xf 0xffc10000 0x0 0x00010000>;
-		clock-frequency = <33333333>;
-		interrupt-parent = <&mpic>;
-		interrupts = <25 2>;
-		interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
-		interrupt-map = <
-			/* IDSEL 0x0 */
-			0000 0x0 0x0 0x1 &mpic 0x4 0x1
-			0000 0x0 0x0 0x2 &mpic 0x5 0x1
-			0000 0x0 0x0 0x3 &mpic 0x6 0x1
-			0000 0x0 0x0 0x4 &mpic 0x7 0x1
-			>;
 		pcie@0 {
-			reg = <0x0 0x0 0x0 0x0 0x0>;
-			#size-cells = <2>;
-			#address-cells = <3>;
-			device_type = "pci";
 			ranges = <0x2000000 0x0 0xe0000000
 				  0x2000000 0x0 0xe0000000
 				  0x0 0x20000000
@@ -771,31 +66,10 @@
 	};
 
 	pci2: pcie@fffe0a000 {
-		compatible = "fsl,mpc8548-pcie";
-		device_type = "pci";
-		#interrupt-cells = <1>;
-		#size-cells = <2>;
-		#address-cells = <3>;
 		reg = <0xf 0xffe0a000 0 0x1000>;
-		bus-range = <0 255>;
 		ranges = <0x2000000 0x0 0xe0000000 0xc 0x40000000 0x0 0x20000000
 			  0x1000000 0x0 0x00000000 0xf 0xffc20000 0x0 0x00010000>;
-		clock-frequency = <33333333>;
-		interrupt-parent = <&mpic>;
-		interrupts = <26 2>;
-		interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
-		interrupt-map = <
-			/* IDSEL 0x0 */
-			0000 0x0 0x0 0x1 &mpic 0x0 0x1
-			0000 0x0 0x0 0x2 &mpic 0x1 0x1
-			0000 0x0 0x0 0x3 &mpic 0x2 0x1
-			0000 0x0 0x0 0x4 &mpic 0x3 0x1
-			>;
 		pcie@0 {
-			reg = <0x0 0x0 0x0 0x0 0x0>;
-			#size-cells = <2>;
-			#address-cells = <3>;
-			device_type = "pci";
 			ranges = <0x2000000 0x0 0xe0000000
 				  0x2000000 0x0 0xe0000000
 				  0x0 0x20000000
@@ -806,3 +80,11 @@
 		};
 	};
 };
+
+/*
+ * mpc8572ds.dtsi must be last to ensure board_pci0 overrides pci0 settings
+ * for interrupt-map & interrupt-map-mask
+ */
+
+/include/ "fsl/mpc8572si-post.dtsi"
+/include/ "mpc8572ds.dtsi"
diff --git a/arch/powerpc/boot/dts/mpc8572ds_camp_core0.dts b/arch/powerpc/boot/dts/mpc8572ds_camp_core0.dts
index 3375c2a..d34d127 100644
--- a/arch/powerpc/boot/dts/mpc8572ds_camp_core0.dts
+++ b/arch/powerpc/boot/dts/mpc8572ds_camp_core0.dts
@@ -14,494 +14,69 @@
  * option) any later version.
  */
 
-/dts-v1/;
+/include/ "mpc8572ds.dts"
+
 / {
 	model = "fsl,MPC8572DS";
 	compatible = "fsl,MPC8572DS", "fsl,MPC8572DS-CAMP";
-	#address-cells = <1>;
-	#size-cells = <1>;
-
-	aliases {
-		ethernet0 = &enet0;
-		ethernet1 = &enet1;
-		serial0 = &serial0;
-		pci0 = &pci0;
-		pci1 = &pci1;
-	};
 
 	cpus {
-		#address-cells = <1>;
-		#size-cells = <0>;
-
 		PowerPC,8572@0 {
-			device_type = "cpu";
-			reg = <0x0>;
-			d-cache-line-size = <32>;	// 32 bytes
-			i-cache-line-size = <32>;	// 32 bytes
-			d-cache-size = <0x8000>;		// L1, 32K
-			i-cache-size = <0x8000>;		// L1, 32K
-			timebase-frequency = <0>;
-			bus-frequency = <0>;
-			clock-frequency = <0>;
-			next-level-cache = <&L2>;
 		};
-
+		PowerPC,8572@1 {
+			status = "disabled";
+		};
 	};
 
-	memory {
-		device_type = "memory";
-		reg = <0x0 0x0>;	// Filled by U-Boot
+	localbus@ffe05000 {
+		status = "disabled";
 	};
 
 	soc8572@ffe00000 {
-		#address-cells = <1>;
-		#size-cells = <1>;
-		device_type = "soc";
-		compatible = "simple-bus";
-		ranges = <0x0 0xffe00000 0x100000>;
-		bus-frequency = <0>;		// Filled out by uboot.
-
-		ecm-law@0 {
-			compatible = "fsl,ecm-law";
-			reg = <0x0 0x1000>;
-			fsl,num-laws = <12>;
+		serial@4600 {
+			status = "disabled";
 		};
-
-		ecm@1000 {
-			compatible = "fsl,mpc8572-ecm", "fsl,ecm";
-			reg = <0x1000 0x1000>;
-			interrupts = <17 2>;
-			interrupt-parent = <&mpic>;
+		dma@c300 {
+			status = "disabled";
 		};
-
-		memory-controller@2000 {
-			compatible = "fsl,mpc8572-memory-controller";
-			reg = <0x2000 0x1000>;
-			interrupt-parent = <&mpic>;
-			interrupts = <18 2>;
+		gpio-controller@f000 {
 		};
-
-		memory-controller@6000 {
-			compatible = "fsl,mpc8572-memory-controller";
-			reg = <0x6000 0x1000>;
-			interrupt-parent = <&mpic>;
-			interrupts = <18 2>;
-		};
-
-		L2: l2-cache-controller@20000 {
-			compatible = "fsl,mpc8572-l2-cache-controller";
-			reg = <0x20000 0x1000>;
-			cache-line-size = <32>;	// 32 bytes
+		l2-cache-controller@20000 {
 			cache-size = <0x80000>;	// L2, 512K
-			interrupt-parent = <&mpic>;
-			interrupts = <16 2>;
 		};
-
-		i2c@3000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			cell-index = <0>;
-			compatible = "fsl-i2c";
-			reg = <0x3000 0x100>;
-			interrupts = <43 2>;
-			interrupt-parent = <&mpic>;
-			dfsrr;
+		ethernet@26000 {
+			status = "disabled";
 		};
-
-		i2c@3100 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			cell-index = <1>;
-			compatible = "fsl-i2c";
-			reg = <0x3100 0x100>;
-			interrupts = <43 2>;
-			interrupt-parent = <&mpic>;
-			dfsrr;
+		mdio@26520 {
+			status = "disabled";
 		};
-
-		dma@21300 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			compatible = "fsl,mpc8572-dma", "fsl,eloplus-dma";
-			reg = <0x21300 0x4>;
-			ranges = <0x0 0x21100 0x200>;
-			cell-index = <0>;
-			dma-channel@0 {
-				compatible = "fsl,mpc8572-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x0 0x80>;
-				cell-index = <0>;
-				interrupt-parent = <&mpic>;
-				interrupts = <20 2>;
-			};
-			dma-channel@80 {
-				compatible = "fsl,mpc8572-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x80 0x80>;
-				cell-index = <1>;
-				interrupt-parent = <&mpic>;
-				interrupts = <21 2>;
-			};
-			dma-channel@100 {
-				compatible = "fsl,mpc8572-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x100 0x80>;
-				cell-index = <2>;
-				interrupt-parent = <&mpic>;
-				interrupts = <22 2>;
-			};
-			dma-channel@180 {
-				compatible = "fsl,mpc8572-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x180 0x80>;
-				cell-index = <3>;
-				interrupt-parent = <&mpic>;
-				interrupts = <23 2>;
-			};
+		ethernet@27000 {
+			status = "disabled";
 		};
-
-		enet0: ethernet@24000 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			cell-index = <0>;
-			device_type = "network";
-			model = "eTSEC";
-			compatible = "gianfar";
-			reg = <0x24000 0x1000>;
-			ranges = <0x0 0x24000 0x1000>;
-			local-mac-address = [ 00 00 00 00 00 00 ];
-			interrupts = <29 2 30 2 34 2>;
-			interrupt-parent = <&mpic>;
-			phy-handle = <&phy0>;
-			phy-connection-type = "rgmii-id";
-
-			mdio@520 {
-				#address-cells = <1>;
-				#size-cells = <0>;
-				compatible = "fsl,gianfar-mdio";
-				reg = <0x520 0x20>;
-
-				phy0: ethernet-phy@0 {
-					interrupt-parent = <&mpic>;
-					interrupts = <10 1>;
-					reg = <0x0>;
-				};
-				phy1: ethernet-phy@1 {
-					interrupt-parent = <&mpic>;
-					interrupts = <10 1>;
-					reg = <0x1>;
-				};
-			};
+		mdio@27520 {
+			status = "disabled";
 		};
-
-		enet1: ethernet@25000 {
-			cell-index = <1>;
-			device_type = "network";
-			model = "eTSEC";
-			compatible = "gianfar";
-			reg = <0x25000 0x1000>;
-			local-mac-address = [ 00 00 00 00 00 00 ];
-			interrupts = <35 2 36 2 40 2>;
-			interrupt-parent = <&mpic>;
-			phy-handle = <&phy1>;
-			phy-connection-type = "rgmii-id";
-		};
-
-		serial0: serial@4500 {
-			cell-index = <0>;
-			device_type = "serial";
-			compatible = "ns16550";
-			reg = <0x4500 0x100>;
-			clock-frequency = <0>;
-		};
-
-		msi@41600 {
-			compatible = "fsl,mpc8572-msi", "fsl,mpic-msi";
-			reg = <0x41600 0x80>;
-			msi-available-ranges = <0 0x80>;
-			interrupts = <
-				0xe0 0
-				0xe1 0
-				0xe2 0
-				0xe3 0>;
-			interrupt-parent = <&mpic>;
-		};
-
-		global-utilities@e0000 {	//global utilities block
-			compatible = "fsl,mpc8572-guts";
-			reg = <0xe0000 0x1000>;
-			fsl,has-rstcr;
-		};
-
-		crypto@30000 {
-			compatible = "fsl,sec3.0", "fsl,sec2.4", "fsl,sec2.2",
-				     "fsl,sec2.1", "fsl,sec2.0";
-			reg = <0x30000 0x10000>;
-			interrupts = <45 2 58 2>;
-			interrupt-parent = <&mpic>;
-			fsl,num-channels = <4>;
-			fsl,channel-fifo-len = <24>;
-			fsl,exec-units-mask = <0x9fe>;
-			fsl,descriptor-types-mask = <0x3ab0ebf>;
-		};
-
-		mpic: pic@40000 {
-			interrupt-controller;
-			#address-cells = <0>;
-			#interrupt-cells = <2>;
-			reg = <0x40000 0x40000>;
-			compatible = "chrp,open-pic";
-			device_type = "open-pic";
+		pic@40000 {
 			protected-sources = <
 			31 32 33 37 38 39       /* enet2 enet3 */
 			76 77 78 79 26 42	/* dma2 pci2 serial*/
 			0xe4 0xe5 0xe6 0xe7	/* msi */
 			>;
 		};
-	};
 
-	pci0: pcie@ffe08000 {
-		compatible = "fsl,mpc8548-pcie";
-		device_type = "pci";
-		#interrupt-cells = <1>;
-		#size-cells = <2>;
-		#address-cells = <3>;
-		reg = <0xffe08000 0x1000>;
-		bus-range = <0 255>;
-		ranges = <0x2000000 0x0 0x80000000 0x80000000 0x0 0x20000000
-			  0x1000000 0x0 0x0 0xffc00000 0x0 0x10000>;
-		clock-frequency = <33333333>;
-		interrupt-parent = <&mpic>;
-		interrupts = <24 2>;
-		interrupt-map-mask = <0xff00 0x0 0x0 0x7>;
-		interrupt-map = <
-			/* IDSEL 0x11 func 0 - PCI slot 1 */
-			0x8800 0x0 0x0 0x1 &mpic 0x2 0x1
-			0x8800 0x0 0x0 0x2 &mpic 0x3 0x1
-			0x8800 0x0 0x0 0x3 &mpic 0x4 0x1
-			0x8800 0x0 0x0 0x4 &mpic 0x1 0x1
-
-			/* IDSEL 0x11 func 1 - PCI slot 1 */
-			0x8900 0x0 0x0 0x1 &mpic 0x2 0x1
-			0x8900 0x0 0x0 0x2 &mpic 0x3 0x1
-			0x8900 0x0 0x0 0x3 &mpic 0x4 0x1
-			0x8900 0x0 0x0 0x4 &mpic 0x1 0x1
-
-			/* IDSEL 0x11 func 2 - PCI slot 1 */
-			0x8a00 0x0 0x0 0x1 &mpic 0x2 0x1
-			0x8a00 0x0 0x0 0x2 &mpic 0x3 0x1
-			0x8a00 0x0 0x0 0x3 &mpic 0x4 0x1
-			0x8a00 0x0 0x0 0x4 &mpic 0x1 0x1
-
-			/* IDSEL 0x11 func 3 - PCI slot 1 */
-			0x8b00 0x0 0x0 0x1 &mpic 0x2 0x1
-			0x8b00 0x0 0x0 0x2 &mpic 0x3 0x1
-			0x8b00 0x0 0x0 0x3 &mpic 0x4 0x1
-			0x8b00 0x0 0x0 0x4 &mpic 0x1 0x1
-
-			/* IDSEL 0x11 func 4 - PCI slot 1 */
-			0x8c00 0x0 0x0 0x1 &mpic 0x2 0x1
-			0x8c00 0x0 0x0 0x2 &mpic 0x3 0x1
-			0x8c00 0x0 0x0 0x3 &mpic 0x4 0x1
-			0x8c00 0x0 0x0 0x4 &mpic 0x1 0x1
-
-			/* IDSEL 0x11 func 5 - PCI slot 1 */
-			0x8d00 0x0 0x0 0x1 &mpic 0x2 0x1
-			0x8d00 0x0 0x0 0x2 &mpic 0x3 0x1
-			0x8d00 0x0 0x0 0x3 &mpic 0x4 0x1
-			0x8d00 0x0 0x0 0x4 &mpic 0x1 0x1
-
-			/* IDSEL 0x11 func 6 - PCI slot 1 */
-			0x8e00 0x0 0x0 0x1 &mpic 0x2 0x1
-			0x8e00 0x0 0x0 0x2 &mpic 0x3 0x1
-			0x8e00 0x0 0x0 0x3 &mpic 0x4 0x1
-			0x8e00 0x0 0x0 0x4 &mpic 0x1 0x1
-
-			/* IDSEL 0x11 func 7 - PCI slot 1 */
-			0x8f00 0x0 0x0 0x1 &mpic 0x2 0x1
-			0x8f00 0x0 0x0 0x2 &mpic 0x3 0x1
-			0x8f00 0x0 0x0 0x3 &mpic 0x4 0x1
-			0x8f00 0x0 0x0 0x4 &mpic 0x1 0x1
-
-			/* IDSEL 0x12 func 0 - PCI slot 2 */
-			0x9000 0x0 0x0 0x1 &mpic 0x3 0x1
-			0x9000 0x0 0x0 0x2 &mpic 0x4 0x1
-			0x9000 0x0 0x0 0x3 &mpic 0x1 0x1
-			0x9000 0x0 0x0 0x4 &mpic 0x2 0x1
-
-			/* IDSEL 0x12 func 1 - PCI slot 2 */
-			0x9100 0x0 0x0 0x1 &mpic 0x3 0x1
-			0x9100 0x0 0x0 0x2 &mpic 0x4 0x1
-			0x9100 0x0 0x0 0x3 &mpic 0x1 0x1
-			0x9100 0x0 0x0 0x4 &mpic 0x2 0x1
-
-			/* IDSEL 0x12 func 2 - PCI slot 2 */
-			0x9200 0x0 0x0 0x1 &mpic 0x3 0x1
-			0x9200 0x0 0x0 0x2 &mpic 0x4 0x1
-			0x9200 0x0 0x0 0x3 &mpic 0x1 0x1
-			0x9200 0x0 0x0 0x4 &mpic 0x2 0x1
-
-			/* IDSEL 0x12 func 3 - PCI slot 2 */
-			0x9300 0x0 0x0 0x1 &mpic 0x3 0x1
-			0x9300 0x0 0x0 0x2 &mpic 0x4 0x1
-			0x9300 0x0 0x0 0x3 &mpic 0x1 0x1
-			0x9300 0x0 0x0 0x4 &mpic 0x2 0x1
-
-			/* IDSEL 0x12 func 4 - PCI slot 2 */
-			0x9400 0x0 0x0 0x1 &mpic 0x3 0x1
-			0x9400 0x0 0x0 0x2 &mpic 0x4 0x1
-			0x9400 0x0 0x0 0x3 &mpic 0x1 0x1
-			0x9400 0x0 0x0 0x4 &mpic 0x2 0x1
-
-			/* IDSEL 0x12 func 5 - PCI slot 2 */
-			0x9500 0x0 0x0 0x1 &mpic 0x3 0x1
-			0x9500 0x0 0x0 0x2 &mpic 0x4 0x1
-			0x9500 0x0 0x0 0x3 &mpic 0x1 0x1
-			0x9500 0x0 0x0 0x4 &mpic 0x2 0x1
-
-			/* IDSEL 0x12 func 6 - PCI slot 2 */
-			0x9600 0x0 0x0 0x1 &mpic 0x3 0x1
-			0x9600 0x0 0x0 0x2 &mpic 0x4 0x1
-			0x9600 0x0 0x0 0x3 &mpic 0x1 0x1
-			0x9600 0x0 0x0 0x4 &mpic 0x2 0x1
-
-			/* IDSEL 0x12 func 7 - PCI slot 2 */
-			0x9700 0x0 0x0 0x1 &mpic 0x3 0x1
-			0x9700 0x0 0x0 0x2 &mpic 0x4 0x1
-			0x9700 0x0 0x0 0x3 &mpic 0x1 0x1
-			0x9700 0x0 0x0 0x4 &mpic 0x2 0x1
-
-			// IDSEL 0x1c  USB
-			0xe000 0x0 0x0 0x1 &i8259 0xc 0x2
-			0xe100 0x0 0x0 0x2 &i8259 0x9 0x2
-			0xe200 0x0 0x0 0x3 &i8259 0xa 0x2
-			0xe300 0x0 0x0 0x4 &i8259 0xb 0x2
-
-			// IDSEL 0x1d  Audio
-			0xe800 0x0 0x0 0x1 &i8259 0x6 0x2
-
-			// IDSEL 0x1e Legacy
-			0xf000 0x0 0x0 0x1 &i8259 0x7 0x2
-			0xf100 0x0 0x0 0x1 &i8259 0x7 0x2
-
-			// IDSEL 0x1f IDE/SATA
-			0xf800 0x0 0x0 0x1 &i8259 0xe 0x2
-			0xf900 0x0 0x0 0x1 &i8259 0x5 0x2
-
-			>;
-
-		pcie@0 {
-			reg = <0x0 0x0 0x0 0x0 0x0>;
-			#size-cells = <2>;
-			#address-cells = <3>;
-			device_type = "pci";
-			ranges = <0x2000000 0x0 0x80000000
-				  0x2000000 0x0 0x80000000
-				  0x0 0x20000000
-
-				  0x1000000 0x0 0x0
-				  0x1000000 0x0 0x0
-				  0x0 0x10000>;
-			uli1575@0 {
-				reg = <0x0 0x0 0x0 0x0 0x0>;
-				#size-cells = <2>;
-				#address-cells = <3>;
-				ranges = <0x2000000 0x0 0x80000000
-					  0x2000000 0x0 0x80000000
-					  0x0 0x20000000
-
-					  0x1000000 0x0 0x0
-					  0x1000000 0x0 0x0
-					  0x0 0x10000>;
-				isa@1e {
-					device_type = "isa";
-					#interrupt-cells = <2>;
-					#size-cells = <1>;
-					#address-cells = <2>;
-					reg = <0xf000 0x0 0x0 0x0 0x0>;
-					ranges = <0x1 0x0 0x1000000 0x0 0x0
-						  0x1000>;
-					interrupt-parent = <&i8259>;
-
-					i8259: interrupt-controller@20 {
-						reg = <0x1 0x20 0x2
-						       0x1 0xa0 0x2
-						       0x1 0x4d0 0x2>;
-						interrupt-controller;
-						device_type = "interrupt-controller";
-						#address-cells = <0>;
-						#interrupt-cells = <2>;
-						compatible = "chrp,iic";
-						interrupts = <9 2>;
-						interrupt-parent = <&mpic>;
-					};
-
-					i8042@60 {
-						#size-cells = <0>;
-						#address-cells = <1>;
-						reg = <0x1 0x60 0x1 0x1 0x64 0x1>;
-						interrupts = <1 3 12 3>;
-						interrupt-parent =
-							<&i8259>;
-
-						keyboard@0 {
-							reg = <0x0>;
-							compatible = "pnpPNP,303";
-						};
-
-						mouse@1 {
-							reg = <0x1>;
-							compatible = "pnpPNP,f03";
-						};
-					};
-
-					rtc@70 {
-						compatible = "pnpPNP,b00";
-						reg = <0x1 0x70 0x2>;
-					};
-
-					gpio@400 {
-						reg = <0x1 0x400 0x80>;
-					};
-				};
-			};
+		msi@41600 {
+			msi-available-ranges = <0 0x80>;
+			interrupts = <
+				0xe0 0
+				0xe1 0
+				0xe2 0
+				0xe3 0>;
 		};
-
-	};
-
-	pci1: pcie@ffe09000 {
-		compatible = "fsl,mpc8548-pcie";
-		device_type = "pci";
-		#interrupt-cells = <1>;
-		#size-cells = <2>;
-		#address-cells = <3>;
-		reg = <0xffe09000 0x1000>;
-		bus-range = <0 255>;
-		ranges = <0x2000000 0x0 0xa0000000 0xa0000000 0x0 0x20000000
-			  0x1000000 0x0 0x0 0xffc10000 0x0 0x10000>;
-		clock-frequency = <33333333>;
-		interrupt-parent = <&mpic>;
-		interrupts = <25 2>;
-		interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
-		interrupt-map = <
-			/* IDSEL 0x0 */
-			0000 0x0 0x0 0x1 &mpic 0x4 0x1
-			0000 0x0 0x0 0x2 &mpic 0x5 0x1
-			0000 0x0 0x0 0x3 &mpic 0x6 0x1
-			0000 0x0 0x0 0x4 &mpic 0x7 0x1
-			>;
-		pcie@0 {
-			reg = <0x0 0x0 0x0 0x0 0x0>;
-			#size-cells = <2>;
-			#address-cells = <3>;
-			device_type = "pci";
-			ranges = <0x2000000 0x0 0xa0000000
-				  0x2000000 0x0 0xa0000000
-				  0x0 0x20000000
-
-				  0x1000000 0x0 0x0
-				  0x1000000 0x0 0x0
-				  0x0 0x10000>;
+		timer@42100 {
+			status = "disabled";
 		};
 	};
+	pcie@ffe0a000 {
+		status = "disabled";
+	};
 };
diff --git a/arch/powerpc/boot/dts/mpc8572ds_camp_core1.dts b/arch/powerpc/boot/dts/mpc8572ds_camp_core1.dts
index e7b477f..d6a8faf 100644
--- a/arch/powerpc/boot/dts/mpc8572ds_camp_core1.dts
+++ b/arch/powerpc/boot/dts/mpc8572ds_camp_core1.dts
@@ -15,169 +15,74 @@
  * option) any later version.
  */
 
-/dts-v1/;
+/include/ "mpc8572ds.dts"
+
 / {
 	model = "fsl,MPC8572DS";
 	compatible = "fsl,MPC8572DS", "fsl,MPC8572DS-CAMP";
-	#address-cells = <1>;
-	#size-cells = <1>;
-
-	aliases {
-		ethernet2 = &enet2;
-		ethernet3 = &enet3;
-		serial0 = &serial0;
-		pci2 = &pci2;
-	};
 
 	cpus {
-		#address-cells = <1>;
-		#size-cells = <0>;
-
+		PowerPC,8572@0 {
+			status = "disabled";
+		};
 		PowerPC,8572@1 {
-			device_type = "cpu";
-			reg = <0x1>;
-			d-cache-line-size = <32>;	// 32 bytes
-			i-cache-line-size = <32>;	// 32 bytes
-			d-cache-size = <0x8000>;		// L1, 32K
-			i-cache-size = <0x8000>;		// L1, 32K
-			timebase-frequency = <0>;
-			bus-frequency = <0>;
-			clock-frequency = <0>;
-			next-level-cache = <&L2>;
 		};
 	};
 
-	memory {
-		device_type = "memory";
-		reg = <0x0 0x0>;	// Filled by U-Boot
+	localbus@ffe05000 {
+		status = "disabled";
 	};
 
 	soc8572@ffe00000 {
-		#address-cells = <1>;
-		#size-cells = <1>;
-		device_type = "soc";
-		compatible = "simple-bus";
-		ranges = <0x0 0xffe00000 0x100000>;
-		bus-frequency = <0>;		// Filled out by uboot.
-
-		L2: l2-cache-controller@20000 {
-			compatible = "fsl,mpc8572-l2-cache-controller";
-			reg = <0x20000 0x1000>;
-			cache-line-size = <32>; // 32 bytes
-			cache-size = <0x80000>; // L2, 512K
-			interrupt-parent = <&mpic>;
+		ecm-law@0 {
+			status = "disabled";
 		};
-
-		dma@c300 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			compatible = "fsl,mpc8572-dma", "fsl,eloplus-dma";
-			reg = <0xc300 0x4>;
-			ranges = <0x0 0xc100 0x200>;
-			cell-index = <0>;
-			dma-channel@0 {
-				compatible = "fsl,mpc8572-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x0 0x80>;
-				cell-index = <0>;
-				interrupt-parent = <&mpic>;
-				interrupts = <76 2>;
-			};
-			dma-channel@80 {
-				compatible = "fsl,mpc8572-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x80 0x80>;
-				cell-index = <1>;
-				interrupt-parent = <&mpic>;
-				interrupts = <77 2>;
-			};
-			dma-channel@100 {
-				compatible = "fsl,mpc8572-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x100 0x80>;
-				cell-index = <2>;
-				interrupt-parent = <&mpic>;
-				interrupts = <78 2>;
-			};
-			dma-channel@180 {
-				compatible = "fsl,mpc8572-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x180 0x80>;
-				cell-index = <3>;
-				interrupt-parent = <&mpic>;
-				interrupts = <79 2>;
-			};
+		ecm@1000 {
+			status = "disabled";
 		};
-
+		memory-controller@2000 {
+			status = "disabled";
+		};
+		memory-controller@6000 {
+			status = "disabled";
+		};
+		i2c@3000 {
+			status = "disabled";
+		};
+		i2c@3100 {
+			status = "disabled";
+		};
+		serial@4500 {
+			status = "disabled";
+		};
+		gpio-controller@f000 {
+			status = "disabled";
+		};
+		l2-cache-controller@20000 {
+			cache-size = <0x80000>;	// L2, 512K
+		};
+		dma@21300 {
+			status = "disabled";
+		};
+		ethernet@24000 {
+			status = "disabled";
+		};
 		mdio@24520 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "fsl,gianfar-mdio";
-			reg = <0x24520 0x20>;
-
-			phy2: ethernet-phy@2 {
-				interrupt-parent = <&mpic>;
-				reg = <0x2>;
-			};
-			phy3: ethernet-phy@3 {
-				interrupt-parent = <&mpic>;
-				reg = <0x3>;
-			};
+			status = "disabled";
 		};
-
-		enet2: ethernet@26000 {
-			cell-index = <2>;
-			device_type = "network";
-			model = "eTSEC";
-			compatible = "gianfar";
-			reg = <0x26000 0x1000>;
-			local-mac-address = [ 00 00 00 00 00 00 ];
-			interrupts = <31 2 32 2 33 2>;
-			interrupt-parent = <&mpic>;
-			phy-handle = <&phy2>;
-			phy-connection-type = "rgmii-id";
+		ptp_clock@24e00 {
+			status = "disabled";
 		};
-
-		enet3: ethernet@27000 {
-			cell-index = <3>;
-			device_type = "network";
-			model = "eTSEC";
-			compatible = "gianfar";
-			reg = <0x27000 0x1000>;
-			local-mac-address = [ 00 00 00 00 00 00 ];
-			interrupts = <37 2 38 2 39 2>;
-			interrupt-parent = <&mpic>;
-			phy-handle = <&phy3>;
-			phy-connection-type = "rgmii-id";
+		ethernet@25000 {
+			status = "disabled";
 		};
-
-		msi@41600 {
-			compatible = "fsl,mpc8572-msi", "fsl,mpic-msi";
-			reg = <0x41600 0x80>;
-			msi-available-ranges = <0x80 0x80>;
-			interrupts = <
-				0xe4 0
-				0xe5 0
-				0xe6 0
-				0xe7 0>;
-			interrupt-parent = <&mpic>;
+		mdio@25520 {
+			status = "disabled";
 		};
-
-		serial0: serial@4600 {
-			cell-index = <1>;
-			device_type = "serial";
-			compatible = "ns16550";
-			reg = <0x4600 0x100>;
-			clock-frequency = <0>;
+		crypto@30000 {
+			status = "disabled";
 		};
-
-		mpic: pic@40000 {
-			interrupt-controller;
-			#address-cells = <0>;
-			#interrupt-cells = <2>;
-			reg = <0x40000 0x40000>;
-			compatible = "chrp,open-pic";
-			device_type = "open-pic";
+		pic@40000 {
 			protected-sources = <
 			18 16 10 42 45 58	/* MEM L2 mdio serial crypto */
 			29 30 34 35 36 40	/* enet0 enet1 */
@@ -189,41 +94,25 @@
 			0xe0 0xe1 0xe2 0xe3	/* msi */
 			>;
 		};
-	};
-
-	pci2: pcie@ffe0a000 {
-		compatible = "fsl,mpc8548-pcie";
-		device_type = "pci";
-		#interrupt-cells = <1>;
-		#size-cells = <2>;
-		#address-cells = <3>;
-		reg = <0xffe0a000 0x1000>;
-		bus-range = <0 255>;
-		ranges = <0x2000000 0x0 0xc0000000 0xc0000000 0x0 0x20000000
-			  0x1000000 0x0 0x0 0xffc20000 0x0 0x10000>;
-		clock-frequency = <33333333>;
-		interrupt-parent = <&mpic>;
-		interrupts = <26 2>;
-		interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
-		interrupt-map = <
-			/* IDSEL 0x0 */
-			0000 0x0 0x0 0x1 &mpic 0x0 0x1
-			0000 0x0 0x0 0x2 &mpic 0x1 0x1
-			0000 0x0 0x0 0x3 &mpic 0x2 0x1
-			0000 0x0 0x0 0x4 &mpic 0x3 0x1
-			>;
-		pcie@0 {
-			reg = <0x0 0x0 0x0 0x0 0x0>;
-			#size-cells = <2>;
-			#address-cells = <3>;
-			device_type = "pci";
-			ranges = <0x2000000 0x0 0xc0000000
-				  0x2000000 0x0 0xc0000000
-				  0x0 0x20000000
-
-				  0x1000000 0x0 0x0
-				  0x1000000 0x0 0x0
-				  0x0 0x10000>;
+		timer@41100 {
+			status = "disabled";
 		};
+		msi@41600 {
+			msi-available-ranges = <0x80 0x80>;
+			interrupts = <
+				0xe4 0
+				0xe5 0
+				0xe6 0
+				0xe7 0>;
+		};
+		global-utilities@e0000 {
+			status = "disabled";
+		};
+	};
+	pcie@ffe08000 {
+		status = "disabled";
+	};
+	pcie@ffe09000 {
+		status = "disabled";
 	};
 };
diff --git a/arch/powerpc/boot/dts/mpc8610_hpcd.dts b/arch/powerpc/boot/dts/mpc8610_hpcd.dts
index 83c3218..6a109a0 100644
--- a/arch/powerpc/boot/dts/mpc8610_hpcd.dts
+++ b/arch/powerpc/boot/dts/mpc8610_hpcd.dts
@@ -175,7 +175,7 @@
 		serial0: serial@4500 {
 			cell-index = <0>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4500 0x100>;
 			clock-frequency = <0>;
 			interrupts = <42 2>;
@@ -186,7 +186,7 @@
 		serial1: serial@4600 {
 			cell-index = <1>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4600 0x100>;
 			clock-frequency = <0>;
 			interrupts = <42 2>;
diff --git a/arch/powerpc/boot/dts/mpc8641_hpcn.dts b/arch/powerpc/boot/dts/mpc8641_hpcn.dts
index 848320e..1e8666c 100644
--- a/arch/powerpc/boot/dts/mpc8641_hpcn.dts
+++ b/arch/powerpc/boot/dts/mpc8641_hpcn.dts
@@ -26,13 +26,6 @@
 		serial1 = &serial1;
 		pci0 = &pci0;
 		pci1 = &pci1;
-/*
- * Only one of Rapid IO or PCI can be present due to HW limitations and
- * due to the fact that the 2 now share address space in the new memory
- * map.  The most likely case is that we have PCI, so comment out the
- * rapidio node.  Leave it here for reference.
- */
-		/* rapidio0 = &rapidio0; */
 	};
 
 	cpus {
@@ -335,7 +328,7 @@
 		serial0: serial@4500 {
 			cell-index = <0>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4500 0x100>;
 			clock-frequency = <0>;
 			interrupts = <42 2>;
@@ -345,7 +338,7 @@
 		serial1: serial@4600 {
 			cell-index = <1>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4600 0x100>;
 			clock-frequency = <0>;
 			interrupts = <28 2>;
@@ -361,6 +354,41 @@
 			device_type = "open-pic";
 		};
 
+		rmu: rmu@d3000 {
+			#address-cells = <1>;
+			#size-cells = <1>;
+			compatible = "fsl,srio-rmu";
+			reg = <0xd3000 0x500>;
+			ranges = <0x0 0xd3000 0x500>;
+
+			message-unit@0 {
+				compatible = "fsl,srio-msg-unit";
+				reg = <0x0 0x100>;
+				interrupts = <
+					53 2 /* msg1_tx_irq */
+					54 2>;/* msg1_rx_irq */
+			};
+			message-unit@100 {
+				compatible = "fsl,srio-msg-unit";
+				reg = <0x100 0x100>;
+				interrupts = <
+					55 2  /* msg2_tx_irq */
+					56 2>;/* msg2_rx_irq */
+			};
+			doorbell-unit@400 {
+				compatible = "fsl,srio-dbell-unit";
+				reg = <0x400 0x80>;
+				interrupts = <
+					49 2  /* bell_outb_irq */
+					50 2>;/* bell_inb_irq */
+			};
+			port-write-unit@4e0 {
+				compatible = "fsl,srio-port-write-unit";
+				reg = <0x4e0 0x20>;
+				interrupts = <48 2>;
+			};
+		};
+
 		global-utilities@e0000 {
 			compatible = "fsl,mpc8641-guts";
 			reg = <0xe0000 0x1000>;
@@ -612,16 +640,27 @@
 		};
 	};
 /*
-	rapidio0: rapidio@ffec0000 {
+ * Only one of Rapid IO or PCI can be present due to HW limitations and
+ * due to the fact that the 2 now share address space in the new memory
+ * map.  The most likely case is that we have PCI, so comment out the
+ * rapidio node.  Leave it here for reference.
+
+	rapidio@ffec0000 {
+		reg = <0xffec0000 0x11000>;
+		compatible = "fsl,srio";
+		interrupt-parent = <&mpic>;
+		interrupts = <48 2>;
 		#address-cells = <2>;
 		#size-cells = <2>;
-		compatible = "fsl,rapidio-delta";
-		reg = <0xffec0000 0x20000>;
-		ranges = <0 0 0x80000000 0 0x20000000>;
-		interrupt-parent = <&mpic>;
-		// err_irq bell_outb_irq bell_inb_irq
-		//	msg1_tx_irq msg1_rx_irq	msg2_tx_irq msg2_rx_irq
-		interrupts = <48 2 49 2 50 2 53 2 54 2 55 2 56 2>;
+		fsl,srio-rmu-handle = <&rmu>;
+		ranges;
+
+		port1 {
+			#address-cells = <2>;
+			#size-cells = <2>;
+			cell-index = <1>;
+			ranges = <0 0 0x80000000 0 0x20000000>;
+		};
 	};
 */
 
diff --git a/arch/powerpc/boot/dts/mpc8641_hpcn_36b.dts b/arch/powerpc/boot/dts/mpc8641_hpcn_36b.dts
index 8be8e70..fd4cd4d 100644
--- a/arch/powerpc/boot/dts/mpc8641_hpcn_36b.dts
+++ b/arch/powerpc/boot/dts/mpc8641_hpcn_36b.dts
@@ -328,7 +328,7 @@
 		serial0: serial@4500 {
 			cell-index = <0>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4500 0x100>;
 			clock-frequency = <0>;
 			interrupts = <42 2>;
@@ -338,7 +338,7 @@
 		serial1: serial@4600 {
 			cell-index = <1>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4600 0x100>;
 			clock-frequency = <0>;
 			interrupts = <28 2>;
diff --git a/arch/powerpc/boot/dts/obs600.dts b/arch/powerpc/boot/dts/obs600.dts
new file mode 100644
index 0000000..18e7d79
--- /dev/null
+++ b/arch/powerpc/boot/dts/obs600.dts
@@ -0,0 +1,314 @@
+/*
+ * Device Tree Source for PlatHome OpenBlockS 600 (405EX)
+ *
+ * Copyright 2011 Ben Herrenschmidt, IBM Corp.
+ *
+ * Based on Kilauea by:
+ *
+ * Copyright 2007-2009 DENX Software Engineering, Stefan Roese <sr@denx.de>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without
+ * any warranty of any kind, whether express or implied.
+ */
+
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	model = "PlatHome,OpenBlockS 600";
+	compatible = "plathome,obs600";
+	dcr-parent = <&{/cpus/cpu@0}>;
+
+	aliases {
+		ethernet0 = &EMAC0;
+		ethernet1 = &EMAC1;
+		serial0 = &UART0;
+		serial1 = &UART1;
+	};
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		cpu@0 {
+			device_type = "cpu";
+			model = "PowerPC,405EX";
+			reg = <0x00000000>;
+			clock-frequency = <0>; /* Filled in by U-Boot */
+			timebase-frequency = <0>; /* Filled in by U-Boot */
+			i-cache-line-size = <32>;
+			d-cache-line-size = <32>;
+			i-cache-size = <16384>; /* 16 kB */
+			d-cache-size = <16384>; /* 16 kB */
+			dcr-controller;
+			dcr-access-method = "native";
+		};
+	};
+
+	memory {
+		device_type = "memory";
+		reg = <0x00000000 0x00000000>; /* Filled in by U-Boot */
+	};
+
+	UIC0: interrupt-controller {
+		compatible = "ibm,uic-405ex", "ibm,uic";
+		interrupt-controller;
+		cell-index = <0>;
+		dcr-reg = <0x0c0 0x009>;
+		#address-cells = <0>;
+		#size-cells = <0>;
+		#interrupt-cells = <2>;
+	};
+
+	UIC1: interrupt-controller1 {
+		compatible = "ibm,uic-405ex","ibm,uic";
+		interrupt-controller;
+		cell-index = <1>;
+		dcr-reg = <0x0d0 0x009>;
+		#address-cells = <0>;
+		#size-cells = <0>;
+		#interrupt-cells = <2>;
+		interrupts = <0x1e 0x4 0x1f 0x4>; /* cascade */
+		interrupt-parent = <&UIC0>;
+	};
+
+	UIC2: interrupt-controller2 {
+		compatible = "ibm,uic-405ex","ibm,uic";
+		interrupt-controller;
+		cell-index = <2>;
+		dcr-reg = <0x0e0 0x009>;
+		#address-cells = <0>;
+		#size-cells = <0>;
+		#interrupt-cells = <2>;
+		interrupts = <0x1c 0x4 0x1d 0x4>; /* cascade */
+		interrupt-parent = <&UIC0>;
+	};
+
+	CPM0: cpm {
+		compatible = "ibm,cpm";
+		dcr-access-method = "native";
+		dcr-reg = <0x0b0 0x003>;
+		unused-units = <0x00000000>;
+		idle-doze = <0x02000000>;
+		standby = <0xe3e74800>;
+	};
+
+	plb {
+		compatible = "ibm,plb-405ex", "ibm,plb4";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
+		clock-frequency = <0>; /* Filled in by U-Boot */
+
+		SDRAM0: memory-controller {
+			compatible = "ibm,sdram-405ex", "ibm,sdram-4xx-ddr2";
+			dcr-reg = <0x010 0x002>;
+			interrupt-parent = <&UIC2>;
+			interrupts = <0x5 0x4	/* ECC DED Error */
+				      0x6 0x4>;	/* ECC SEC Error */
+		};
+
+		CRYPTO: crypto@ef700000 {
+			compatible = "amcc,ppc405ex-crypto", "amcc,ppc4xx-crypto";
+			reg = <0xef700000 0x80400>;
+			interrupt-parent = <&UIC0>;
+			interrupts = <0x17 0x2>;
+		};
+
+		MAL0: mcmal {
+			compatible = "ibm,mcmal-405ex", "ibm,mcmal2";
+			dcr-reg = <0x180 0x062>;
+			num-tx-chans = <2>;
+			num-rx-chans = <2>;
+			interrupt-parent = <&MAL0>;
+			interrupts = <0x0 0x1 0x2 0x3 0x4>;
+			#interrupt-cells = <1>;
+			#address-cells = <0>;
+			#size-cells = <0>;
+			interrupt-map = </*TXEOB*/ 0x0 &UIC0 0xa 0x4
+					/*RXEOB*/ 0x1 &UIC0 0xb 0x4
+					/*SERR*/  0x2 &UIC1 0x0 0x4
+					/*TXDE*/  0x3 &UIC1 0x1 0x4
+					/*RXDE*/  0x4 &UIC1 0x2 0x4>;
+			interrupt-map-mask = <0xffffffff>;
+		};
+
+		POB0: opb {
+			compatible = "ibm,opb-405ex", "ibm,opb";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges = <0x80000000 0x80000000 0x10000000
+				  0xef600000 0xef600000 0x00a00000
+				  0xf0000000 0xf0000000 0x10000000>;
+			dcr-reg = <0x0a0 0x005>;
+			clock-frequency = <0>; /* Filled in by U-Boot */
+
+			EBC0: ebc {
+				compatible = "ibm,ebc-405ex", "ibm,ebc";
+				dcr-reg = <0x012 0x002>;
+				#address-cells = <2>;
+				#size-cells = <1>;
+				clock-frequency = <0>; /* Filled in by U-Boot */
+				/* ranges property is supplied by U-Boot */
+				interrupts = <0x5 0x1>;
+				interrupt-parent = <&UIC1>;
+
+				nor_flash@0,0 {
+					compatible = "amd,s29gl512n", "cfi-flash";
+					bank-width = <2>;
+					reg = <0x00000000 0x00000000 0x08000000>;
+					#address-cells = <1>;
+					#size-cells = <1>;
+					partition@0 {
+						label = "kernel + initrd";
+						reg = <0x00000000 0x03de0000>;
+					};
+					partition@3de0000 {
+						label = "user config area";
+						reg = <0x03de0000 0x00080000>;
+					};
+					partition@3e60000 {
+						label = "user program area";
+						reg = <0x03e60000 0x04000000>;
+					};
+					partition@7e60000 {
+						label = "flat device tree";
+						reg = <0x07e60000 0x00080000>;
+					};
+					partition@7ee0000 {
+						label = "test program";
+						reg = <0x07ee0000 0x00080000>;
+					};
+					partition@7f60000 {
+						label = "u-boot env";
+						reg = <0x07f60000 0x00040000>;
+					};
+					partition@7fa0000 {
+						label = "u-boot";
+						reg = <0x07fa0000 0x00060000>;
+					};
+				};
+			};
+
+			UART0: serial@ef600200 {
+				device_type = "serial";
+				compatible = "ns16550";
+				reg = <0xef600200 0x00000008>;
+				virtual-reg = <0xef600200>;
+				clock-frequency = <0>; /* Filled in by U-Boot */
+				current-speed = <0>;
+				interrupt-parent = <&UIC0>;
+				interrupts = <0x1a 0x4>;
+			};
+
+			UART1: serial@ef600300 {
+				device_type = "serial";
+				compatible = "ns16550";
+				reg = <0xef600300 0x00000008>;
+				virtual-reg = <0xef600300>;
+				clock-frequency = <0>; /* Filled in by U-Boot */
+				current-speed = <0>;
+				interrupt-parent = <&UIC0>;
+				interrupts = <0x1 0x4>;
+			};
+
+			IIC0: i2c@ef600400 {
+				compatible = "ibm,iic-405ex", "ibm,iic";
+				reg = <0xef600400 0x00000014>;
+				interrupt-parent = <&UIC0>;
+				interrupts = <0x2 0x4>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				rtc@68 {
+					compatible = "dallas,ds1340";
+					reg = <0x68>;
+				};
+			};
+
+			IIC1: i2c@ef600500 {
+				compatible = "ibm,iic-405ex", "ibm,iic";
+				reg = <0xef600500 0x00000014>;
+				interrupt-parent = <&UIC0>;
+				interrupts = <0x7 0x4>;
+			};
+
+			RGMII0: emac-rgmii@ef600b00 {
+				compatible = "ibm,rgmii-405ex", "ibm,rgmii";
+				reg = <0xef600b00 0x00000104>;
+				has-mdio;
+			};
+
+			EMAC0: ethernet@ef600900 {
+				linux,network-index = <0x0>;
+				device_type = "network";
+				compatible = "ibm,emac-405ex", "ibm,emac4sync";
+				interrupt-parent = <&EMAC0>;
+				interrupts = <0x0 0x1>;
+				#interrupt-cells = <1>;
+				#address-cells = <0>;
+				#size-cells = <0>;
+				interrupt-map = </*Status*/ 0x0 &UIC0 0x18 0x4
+						/*Wake*/  0x1 &UIC1 0x1d 0x4>;
+				reg = <0xef600900 0x000000c4>;
+				local-mac-address = [000000000000]; /* Filled in by U-Boot */
+				mal-device = <&MAL0>;
+				mal-tx-channel = <0>;
+				mal-rx-channel = <0>;
+				cell-index = <0>;
+				max-frame-size = <9000>;
+				rx-fifo-size = <4096>;
+				tx-fifo-size = <2048>;
+				rx-fifo-size-gige = <16384>;
+				tx-fifo-size-gige = <16384>;
+				phy-mode = "rgmii";
+				phy-map = <0x00000000>;
+				rgmii-device = <&RGMII0>;
+				rgmii-channel = <0>;
+				has-inverted-stacr-oc;
+				has-new-stacr-staopc;
+			};
+
+			EMAC1: ethernet@ef600a00 {
+				linux,network-index = <0x1>;
+				device_type = "network";
+				compatible = "ibm,emac-405ex", "ibm,emac4sync";
+				interrupt-parent = <&EMAC1>;
+				interrupts = <0x0 0x1>;
+				#interrupt-cells = <1>;
+				#address-cells = <0>;
+				#size-cells = <0>;
+				interrupt-map = </*Status*/ 0x0 &UIC0 0x19 0x4
+						/*Wake*/  0x1 &UIC1 0x1f 0x4>;
+				reg = <0xef600a00 0x000000c4>;
+				local-mac-address = [000000000000]; /* Filled in by U-Boot */
+				mal-device = <&MAL0>;
+				mal-tx-channel = <1>;
+				mal-rx-channel = <1>;
+				cell-index = <1>;
+				max-frame-size = <9000>;
+				rx-fifo-size = <4096>;
+				tx-fifo-size = <2048>;
+				rx-fifo-size-gige = <16384>;
+				tx-fifo-size-gige = <16384>;
+				phy-mode = "rgmii";
+				phy-map = <0x00000000>;
+				rgmii-device = <&RGMII0>;
+				rgmii-channel = <1>;
+				has-inverted-stacr-oc;
+				has-new-stacr-staopc;
+			};
+
+			GPIO: gpio@ef600800 {
+				device_type = "gpio";
+				compatible = "ibm,gpio-405ex", "ibm,ppc4xx-gpio";
+				reg = <0xef600800 0x50>;
+			};
+		};
+	};
+        chosen {
+                linux,stdout-path = "/plb/opb/serial@ef600200";
+        };
+};
diff --git a/arch/powerpc/boot/dts/p1010rdb.dts b/arch/powerpc/boot/dts/p1010rdb.dts
index d6c669c..b868d22 100644
--- a/arch/powerpc/boot/dts/p1010rdb.dts
+++ b/arch/powerpc/boot/dts/p1010rdb.dts
@@ -9,230 +9,33 @@
  * option) any later version.
  */
 
-/include/ "p1010si.dtsi"
+/include/ "fsl/p1010si-pre.dtsi"
 
 / {
 	model = "fsl,P1010RDB";
 	compatible = "fsl,P1010RDB";
 
-	aliases {
-		serial0 = &serial0;
-		serial1 = &serial1;
-		ethernet0 = &enet0;
-		ethernet1 = &enet1;
-		ethernet2 = &enet2;
-		pci0 = &pci0;
-		pci1 = &pci1;
-		can0 = &can0;
-		can1 = &can1;
-	};
-
 	memory {
 		device_type = "memory";
 	};
 
-	ifc@ffe1e000 {
+	board_ifc: ifc: ifc@ffe1e000 {
 		/* NOR, NAND Flashes and CPLD on board */
 		ranges = <0x0 0x0 0x0 0xee000000 0x02000000
 			  0x1 0x0 0x0 0xff800000 0x00010000
 			  0x3 0x0 0x0 0xffb00000 0x00000020>;
-
-		nor@0,0 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			compatible = "cfi-flash";
-			reg = <0x0 0x0 0x2000000>;
-			bank-width = <2>;
-			device-width = <1>;
-
-			partition@40000 {
-				/* 256KB for DTB Image */
-				reg = <0x00040000 0x00040000>;
-				label = "NOR DTB Image";
-			};
-
-			partition@80000 {
-				/* 7 MB for Linux Kernel Image */
-				reg = <0x00080000 0x00700000>;
-				label = "NOR Linux Kernel Image";
-			};
-
-			partition@800000 {
-				/* 20MB for JFFS2 based Root file System */
-				reg = <0x00800000 0x01400000>;
-				label = "NOR JFFS2 Root File System";
-			};
-
-			partition@1f00000 {
-				/* This location must not be altered  */
-				/* 512KB for u-boot Bootloader Image */
-				/* 512KB for u-boot Environment Variables */
-				reg = <0x01f00000 0x00100000>;
-				label = "NOR U-Boot Image";
-				read-only;
-			};
-		};
-
-		nand@1,0 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			compatible = "fsl,ifc-nand";
-			reg = <0x1 0x0 0x10000>;
-
-			partition@0 {
-				/* This location must not be altered  */
-				/* 1MB for u-boot Bootloader Image */
-				reg = <0x0 0x00100000>;
-				label = "NAND U-Boot Image";
-				read-only;
-			};
-
-			partition@100000 {
-				/* 1MB for DTB Image */
-				reg = <0x00100000 0x00100000>;
-				label = "NAND DTB Image";
-			};
-
-			partition@200000 {
-				/* 4MB for Linux Kernel Image */
-				reg = <0x00200000 0x00400000>;
-				label = "NAND Linux Kernel Image";
-			};
-
-			partition@600000 {
-				/* 4MB for Compressed Root file System Image */
-				reg = <0x00600000 0x00400000>;
-				label = "NAND Compressed RFS Image";
-			};
-
-			partition@a00000 {
-				/* 15MB for JFFS2 based Root file System */
-				reg = <0x00a00000 0x00f00000>;
-				label = "NAND JFFS2 Root File System";
-			};
-
-			partition@1900000 {
-				/* 7MB for User Area */
-				reg = <0x01900000 0x00700000>;
-				label = "NAND User area";
-			};
-		};
-
-		cpld@3,0 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			compatible = "fsl,p1010rdb-cpld";
-			reg = <0x3 0x0 0x0000020>;
-			bank-width = <1>;
-			device-width = <1>;
-		};
+		reg = <0x0 0xffe1e000 0 0x2000>;
 	};
 
-	soc@ffe00000 {
-		spi@7000 {
-			flash@0 {
-				#address-cells = <1>;
-				#size-cells = <1>;
-				compatible = "spansion,s25sl12801";
-				reg = <0>;
-				spi-max-frequency = <50000000>;
-
-				partition@0 {
-					/* 1MB for u-boot Bootloader Image */
-					/* 1MB for Environment */
-					reg = <0x0 0x00100000>;
-					label = "SPI Flash U-Boot Image";
-					read-only;
-				};
-
-				partition@100000 {
-					/* 512KB for DTB Image */
-					reg = <0x00100000 0x00080000>;
-					label = "SPI Flash DTB Image";
-				};
-
-				partition@180000 {
-					/* 4MB for Linux Kernel Image */
-					reg = <0x00180000 0x00400000>;
-					label = "SPI Flash Linux Kernel Image";
-				};
-
-				partition@580000 {
-					/* 4MB for Compressed RFS Image */
-					reg = <0x00580000 0x00400000>;
-					label = "SPI Flash Compressed RFSImage";
-				};
-
-				partition@980000 {
-					/* 6.5MB for JFFS2 based RFS */
-					reg = <0x00980000 0x00680000>;
-					label = "SPI Flash JFFS2 RFS";
-				};
-			};
-		};
-
-		usb@22000 {
-			phy_type = "utmi";
-		};
-
-		mdio@24000 {
-			phy0: ethernet-phy@0 {
-				interrupt-parent = <&mpic>;
-				interrupts = <3 1>;
-				reg = <0x1>;
-			};
-
-			phy1: ethernet-phy@1 {
-				interrupt-parent = <&mpic>;
-				interrupts = <2 1>;
-				reg = <0x0>;
-			};
-
-			phy2: ethernet-phy@2 {
-				interrupt-parent = <&mpic>;
-				interrupts = <2 1>;
-				reg = <0x2>;
-			};
-		};
-
-		enet0: ethernet@b0000 {
-			phy-handle = <&phy0>;
-			phy-connection-type = "rgmii-id";
-		};
-
-		enet1: ethernet@b1000 {
-			phy-handle = <&phy1>;
-			tbi-handle = <&tbi0>;
-			phy-connection-type = "sgmii";
-		};
-
-		enet2: ethernet@b2000 {
-			phy-handle = <&phy2>;
-			tbi-handle = <&tbi1>;
-			phy-connection-type = "sgmii";
-		};
+	board_soc: soc: soc@ffe00000 {
+		ranges = <0x0 0x0 0xffe00000 0x100000>;
 	};
 
 	pci0: pcie@ffe09000 {
+		reg = <0 0xffe09000 0 0x1000>;
 		ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000
 			  0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>;
 		pcie@0 {
-			reg = <0x0 0x0 0x0 0x0 0x0>;
-			#interrupt-cells = <1>;
-			#size-cells = <2>;
-			#address-cells = <3>;
-			device_type = "pci";
-			interrupt-parent = <&mpic>;
-			interrupts = <16 2>;
-			interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
-			interrupt-map = <
-			/* IDSEL 0x0 */
-			0000 0x0 0x0 0x1 &mpic 0x4 0x1
-			0000 0x0 0x0 0x2 &mpic 0x5 0x1
-			0000 0x0 0x0 0x3 &mpic 0x6 0x1
-			0000 0x0 0x0 0x4 &mpic 0x7 0x1
-			>;
-
 			ranges = <0x2000000 0x0 0xa0000000
 				  0x2000000 0x0 0xa0000000
 				  0x0 0x20000000
@@ -244,24 +47,10 @@
 	};
 
 	pci1: pcie@ffe0a000 {
+		reg = <0 0xffe0a000 0 0x1000>;
 		ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000
 			  0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>;
 		pcie@0 {
-			reg = <0x0 0x0 0x0 0x0 0x0>;
-			#interrupt-cells = <1>;
-			#size-cells = <2>;
-			#address-cells = <3>;
-			device_type = "pci";
-			interrupt-parent = <&mpic>;
-			interrupts = <16 2>;
-			interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
-			interrupt-map = <
-			/* IDSEL 0x0 */
-			0000 0x0 0x0 0x1 &mpic 0x4 0x1
-			0000 0x0 0x0 0x2 &mpic 0x5 0x1
-			0000 0x0 0x0 0x3 &mpic 0x6 0x1
-			0000 0x0 0x0 0x4 &mpic 0x7 0x1
-			>;
 			ranges = <0x2000000 0x0 0x80000000
 				  0x2000000 0x0 0x80000000
 				  0x0 0x20000000
@@ -272,3 +61,6 @@
 		};
 	};
 };
+
+/include/ "p1010rdb.dtsi"
+/include/ "fsl/p1010si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p1010rdb.dtsi b/arch/powerpc/boot/dts/p1010rdb.dtsi
new file mode 100644
index 0000000..d4c4a77
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1010rdb.dtsi
@@ -0,0 +1,234 @@
+/*
+ * P1010 RDB Device Tree Source stub (no addresses or top-level ranges)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&board_ifc {
+	nor@0,0 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "cfi-flash";
+		reg = <0x0 0x0 0x2000000>;
+		bank-width = <2>;
+		device-width = <1>;
+
+		partition@40000 {
+			/* 256KB for DTB Image */
+			reg = <0x00040000 0x00040000>;
+			label = "NOR DTB Image";
+		};
+
+		partition@80000 {
+			/* 7 MB for Linux Kernel Image */
+			reg = <0x00080000 0x00700000>;
+			label = "NOR Linux Kernel Image";
+		};
+
+		partition@800000 {
+			/* 20MB for JFFS2 based Root file System */
+			reg = <0x00800000 0x01400000>;
+			label = "NOR JFFS2 Root File System";
+		};
+
+		partition@1f00000 {
+			/* This location must not be altered  */
+			/* 512KB for u-boot Bootloader Image */
+			/* 512KB for u-boot Environment Variables */
+			reg = <0x01f00000 0x00100000>;
+			label = "NOR U-Boot Image";
+			read-only;
+		};
+	};
+
+	nand@1,0 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "fsl,ifc-nand";
+		reg = <0x1 0x0 0x10000>;
+
+		partition@0 {
+			/* This location must not be altered  */
+			/* 1MB for u-boot Bootloader Image */
+			reg = <0x0 0x00100000>;
+			label = "NAND U-Boot Image";
+			read-only;
+		};
+
+		partition@100000 {
+			/* 1MB for DTB Image */
+			reg = <0x00100000 0x00100000>;
+			label = "NAND DTB Image";
+		};
+
+		partition@200000 {
+			/* 4MB for Linux Kernel Image */
+			reg = <0x00200000 0x00400000>;
+			label = "NAND Linux Kernel Image";
+		};
+
+		partition@600000 {
+			/* 4MB for Compressed Root file System Image */
+			reg = <0x00600000 0x00400000>;
+			label = "NAND Compressed RFS Image";
+		};
+
+		partition@a00000 {
+			/* 15MB for JFFS2 based Root file System */
+			reg = <0x00a00000 0x00f00000>;
+			label = "NAND JFFS2 Root File System";
+		};
+
+		partition@1900000 {
+			/* 7MB for User Area */
+			reg = <0x01900000 0x00700000>;
+			label = "NAND User area";
+		};
+	};
+
+	cpld@3,0 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "fsl,p1010rdb-cpld";
+		reg = <0x3 0x0 0x0000020>;
+		bank-width = <1>;
+		device-width = <1>;
+	};
+};
+
+&board_soc {
+	i2c@3000 {
+		rtc@68 {
+			compatible = "pericom,pt7c4338";
+			reg = <0x68>;
+		};
+	};
+
+	spi@7000 {
+		flash@0 {
+			#address-cells = <1>;
+			#size-cells = <1>;
+			compatible = "spansion,s25sl12801";
+			reg = <0>;
+			spi-max-frequency = <50000000>;
+
+			partition@0 {
+				/* 1MB for u-boot Bootloader Image */
+				/* 1MB for Environment */
+				reg = <0x0 0x00100000>;
+				label = "SPI Flash U-Boot Image";
+				read-only;
+			};
+
+			partition@100000 {
+				/* 512KB for DTB Image */
+				reg = <0x00100000 0x00080000>;
+				label = "SPI Flash DTB Image";
+			};
+
+			partition@180000 {
+				/* 4MB for Linux Kernel Image */
+				reg = <0x00180000 0x00400000>;
+				label = "SPI Flash Linux Kernel Image";
+			};
+
+			partition@580000 {
+				/* 4MB for Compressed RFS Image */
+				reg = <0x00580000 0x00400000>;
+				label = "SPI Flash Compressed RFSImage";
+			};
+
+			partition@980000 {
+				/* 6.5MB for JFFS2 based RFS */
+				reg = <0x00980000 0x00680000>;
+				label = "SPI Flash JFFS2 RFS";
+			};
+		};
+	};
+
+	usb@22000 {
+		phy_type = "utmi";
+		dr_mode = "host";
+	};
+
+	mdio@24000 {
+		phy0: ethernet-phy@0 {
+			interrupts = <3 1 0 0>;
+			reg = <0x1>;
+		};
+
+		phy1: ethernet-phy@1 {
+			interrupts = <2 1 0 0>;
+			reg = <0x0>;
+		};
+
+		phy2: ethernet-phy@2 {
+			interrupts = <2 1 0 0>;
+			reg = <0x2>;
+		};
+
+		tbi-phy@3 {
+			device-type = "tbi-phy";
+			reg = <0x3>;
+		};
+	};
+
+	mdio@25000 {
+		tbi0: tbi-phy@11 {
+			reg = <0x11>;
+			device_type = "tbi-phy";
+		};
+	};
+
+	mdio@26000 {
+		tbi1: tbi-phy@11 {
+			reg = <0x11>;
+			device_type = "tbi-phy";
+		};
+	};
+
+	enet0: ethernet@b0000 {
+		phy-handle = <&phy0>;
+		phy-connection-type = "rgmii-id";
+	};
+
+	enet1: ethernet@b1000 {
+		phy-handle = <&phy1>;
+		tbi-handle = <&tbi0>;
+		phy-connection-type = "sgmii";
+	};
+
+	enet2: ethernet@b2000 {
+		phy-handle = <&phy2>;
+		tbi-handle = <&tbi1>;
+		phy-connection-type = "sgmii";
+	};
+};
diff --git a/arch/powerpc/boot/dts/p1010rdb_36b.dts b/arch/powerpc/boot/dts/p1010rdb_36b.dts
new file mode 100644
index 0000000..64776f4
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1010rdb_36b.dts
@@ -0,0 +1,89 @@
+/*
+ * P1010 RDB Device Tree Source (36-bit address map)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/include/ "fsl/p1010si-pre.dtsi"
+
+/ {
+	model = "fsl,P1010RDB";
+	compatible = "fsl,P1010RDB";
+
+	memory {
+		device_type = "memory";
+	};
+
+	board_ifc: ifc: ifc@fffe1e000 {
+		/* NOR, NAND Flashes and CPLD on board */
+		ranges = <0x0 0x0 0xf 0xee000000 0x02000000
+			  0x1 0x0 0xf 0xff800000 0x00010000
+			  0x3 0x0 0xf 0xffb00000 0x00000020>;
+		reg = <0xf 0xffe1e000 0 0x2000>;
+	};
+
+	board_soc: soc: soc@fffe00000 {
+		ranges = <0x0 0xf 0xffe00000 0x100000>;
+	};
+
+	pci0: pcie@fffe09000 {
+		reg = <0xf 0xffe09000 0 0x1000>;
+		ranges = <0x2000000 0x0 0xc0000000 0xc 0x20000000 0x0 0x20000000
+			  0x1000000 0x0 0x00000000 0xf 0xffc10000 0x0 0x10000>;
+		pcie@0 {
+			ranges = <0x2000000 0x0 0xc0000000
+				  0x2000000 0x0 0xc0000000
+				  0x0 0x20000000
+
+				  0x1000000 0x0 0x0
+				  0x1000000 0x0 0x0
+				  0x0 0x100000>;
+		};
+	};
+
+	pci1: pcie@fffe0a000 {
+		reg = <0xf 0xffe0a000 0 0x1000>;
+		ranges = <0x2000000 0x0 0xc0000000 0xc 0x20000000 0x0 0x20000000
+			  0x1000000 0x0 0x00000000 0xf 0xffc10000 0x0 0x10000>;
+		pcie@0 {
+			ranges = <0x2000000 0x0 0xc0000000
+				  0x2000000 0x0 0xc0000000
+				  0x0 0x20000000
+
+				  0x1000000 0x0 0x0
+				  0x1000000 0x0 0x0
+				  0x0 0x100000>;
+		};
+	};
+};
+
+/include/ "p1010rdb.dtsi"
+/include/ "fsl/p1010si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p1010si.dtsi b/arch/powerpc/boot/dts/p1010si.dtsi
deleted file mode 100644
index cabe0a4..0000000
--- a/arch/powerpc/boot/dts/p1010si.dtsi
+++ /dev/null
@@ -1,374 +0,0 @@
-/*
- * P1010si Device Tree Source
- *
- * Copyright 2011 Freescale Semiconductor Inc.
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-
-/dts-v1/;
-/ {
-	compatible = "fsl,P1010";
-	#address-cells = <2>;
-	#size-cells = <2>;
-
-	cpus {
-		#address-cells = <1>;
-		#size-cells = <0>;
-
-		PowerPC,P1010@0 {
-			device_type = "cpu";
-			reg = <0x0>;
-			next-level-cache = <&L2>;
-		};
-	};
-
-	ifc@ffe1e000 {
-		#address-cells = <2>;
-		#size-cells = <1>;
-		compatible = "fsl,ifc", "simple-bus";
-		reg = <0x0 0xffe1e000 0 0x2000>;
-		interrupts = <16 2 19 2>;
-		interrupt-parent = <&mpic>;
-	};
-
-	soc@ffe00000 {
-		#address-cells = <1>;
-		#size-cells = <1>;
-		device_type = "soc";
-		compatible = "fsl,p1010-immr", "simple-bus";
-		ranges = <0x0  0x0 0xffe00000 0x100000>;
-		bus-frequency = <0>;		// Filled out by uboot.
-
-		ecm-law@0 {
-			compatible = "fsl,ecm-law";
-			reg = <0x0 0x1000>;
-			fsl,num-laws = <12>;
-		};
-
-		ecm@1000 {
-			compatible = "fsl,p1010-ecm", "fsl,ecm";
-			reg = <0x1000 0x1000>;
-			interrupts = <16 2>;
-			interrupt-parent = <&mpic>;
-		};
-
-		memory-controller@2000 {
-			compatible = "fsl,p1010-memory-controller";
-			reg = <0x2000 0x1000>;
-			interrupt-parent = <&mpic>;
-			interrupts = <16 2>;
-		};
-
-		i2c@3000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			cell-index = <0>;
-			compatible = "fsl-i2c";
-			reg = <0x3000 0x100>;
-			interrupts = <43 2>;
-			interrupt-parent = <&mpic>;
-			dfsrr;
-		};
-
-		i2c@3100 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			cell-index = <1>;
-			compatible = "fsl-i2c";
-			reg = <0x3100 0x100>;
-			interrupts = <43 2>;
-			interrupt-parent = <&mpic>;
-			dfsrr;
-		};
-
-		serial0: serial@4500 {
-			cell-index = <0>;
-			device_type = "serial";
-			compatible = "ns16550";
-			reg = <0x4500 0x100>;
-			clock-frequency = <0>;
-			interrupts = <42 2>;
-			interrupt-parent = <&mpic>;
-		};
-
-		serial1: serial@4600 {
-			cell-index = <1>;
-			device_type = "serial";
-			compatible = "ns16550";
-			reg = <0x4600 0x100>;
-			clock-frequency = <0>;
-			interrupts = <42 2>;
-			interrupt-parent = <&mpic>;
-		};
-
-		spi@7000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "fsl,mpc8536-espi";
-			reg = <0x7000 0x1000>;
-			interrupts = <59 0x2>;
-			interrupt-parent = <&mpic>;
-			fsl,espi-num-chipselects = <1>;
-		};
-
-		gpio: gpio-controller@f000 {
-			#gpio-cells = <2>;
-			compatible = "fsl,mpc8572-gpio";
-			reg = <0xf000 0x100>;
-			interrupts = <47 0x2>;
-			interrupt-parent = <&mpic>;
-			gpio-controller;
-		};
-
-		sata@18000 {
-			compatible = "fsl,pq-sata-v2";
-			reg = <0x18000 0x1000>;
-			cell-index = <1>;
-			interrupts = <74 0x2>;
-			interrupt-parent = <&mpic>;
-		};
-
-		sata@19000 {
-			compatible = "fsl,pq-sata-v2";
-			reg = <0x19000 0x1000>;
-			cell-index = <2>;
-			interrupts = <41 0x2>;
-			interrupt-parent = <&mpic>;
-		};
-
-		can0: can@1c000 {
-			compatible = "fsl,p1010-flexcan";
-			reg = <0x1c000 0x1000>;
-			interrupts = <48 0x2>;
-			interrupt-parent = <&mpic>;
-		};
-
-		can1: can@1d000 {
-			compatible = "fsl,p1010-flexcan";
-			reg = <0x1d000 0x1000>;
-			interrupts = <61 0x2>;
-			interrupt-parent = <&mpic>;
-		};
-
-		L2: l2-cache-controller@20000 {
-			compatible = "fsl,p1010-l2-cache-controller",
-					"fsl,p1014-l2-cache-controller";
-			reg = <0x20000 0x1000>;
-			cache-line-size = <32>;	// 32 bytes
-			cache-size = <0x40000>; // L2,256K
-			interrupt-parent = <&mpic>;
-			interrupts = <16 2>;
-		};
-
-		dma@21300 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			compatible = "fsl,p1010-dma", "fsl,eloplus-dma";
-			reg = <0x21300 0x4>;
-			ranges = <0x0 0x21100 0x200>;
-			cell-index = <0>;
-			dma-channel@0 {
-				compatible = "fsl,p1010-dma-channel", "fsl,eloplus-dma-channel";
-				reg = <0x0 0x80>;
-				cell-index = <0>;
-				interrupt-parent = <&mpic>;
-				interrupts = <20 2>;
-			};
-			dma-channel@80 {
-				compatible = "fsl,p1010-dma-channel", "fsl,eloplus-dma-channel";
-				reg = <0x80 0x80>;
-				cell-index = <1>;
-				interrupt-parent = <&mpic>;
-				interrupts = <21 2>;
-			};
-			dma-channel@100 {
-				compatible = "fsl,p1010-dma-channel", "fsl,eloplus-dma-channel";
-				reg = <0x100 0x80>;
-				cell-index = <2>;
-				interrupt-parent = <&mpic>;
-				interrupts = <22 2>;
-			};
-			dma-channel@180 {
-				compatible = "fsl,p1010-dma-channel", "fsl,eloplus-dma-channel";
-				reg = <0x180 0x80>;
-				cell-index = <3>;
-				interrupt-parent = <&mpic>;
-				interrupts = <23 2>;
-			};
-		};
-
-		usb@22000 {
-			compatible = "fsl-usb2-dr";
-			reg = <0x22000 0x1000>;
-			#address-cells = <1>;
-			#size-cells = <0>;
-			interrupt-parent = <&mpic>;
-			interrupts = <28 0x2>;
-			dr_mode = "host";
-		};
-
-		mdio@24000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "fsl,etsec2-mdio";
-			reg = <0x24000 0x1000 0xb0030 0x4>;
-		};
-
-		mdio@25000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "fsl,etsec2-tbi";
-			reg = <0x25000 0x1000 0xb1030 0x4>;
-			tbi0: tbi-phy@11 {
-				reg = <0x11>;
-				device_type = "tbi-phy";
-			};
-		};
-
-		mdio@26000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "fsl,etsec2-tbi";
-			reg = <0x26000 0x1000 0xb1030 0x4>;
-			tbi1: tbi-phy@11 {
-				reg = <0x11>;
-				device_type = "tbi-phy";
-			};
-		};
-
-		sdhci@2e000 {
-			compatible = "fsl,esdhc";
-			reg = <0x2e000 0x1000>;
-			interrupts = <72 0x8>;
-			interrupt-parent = <&mpic>;
-			/* Filled in by U-Boot */
-			clock-frequency = <0>;
-			fsl,sdhci-auto-cmd12;
-		};
-
-		enet0: ethernet@b0000 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			device_type = "network";
-			model = "eTSEC";
-			compatible = "fsl,etsec2";
-			fsl,num_rx_queues = <0x8>;
-			fsl,num_tx_queues = <0x8>;
-			local-mac-address = [ 00 00 00 00 00 00 ];
-			interrupt-parent = <&mpic>;
-
-			queue-group@0 {
-				#address-cells = <1>;
-				#size-cells = <1>;
-				reg = <0xb0000 0x1000>;
-				fsl,rx-bit-map = <0xff>;
-				fsl,tx-bit-map = <0xff>;
-				interrupts = <29 2 30 2 34 2>;
-			};
-
-		};
-
-		enet1: ethernet@b1000 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			device_type = "network";
-			model = "eTSEC";
-			compatible = "fsl,etsec2";
-			fsl,num_rx_queues = <0x8>;
-			fsl,num_tx_queues = <0x8>;
-			local-mac-address = [ 00 00 00 00 00 00 ];
-			interrupt-parent = <&mpic>;
-
-			queue-group@0 {
-				#address-cells = <1>;
-				#size-cells = <1>;
-				reg = <0xb1000 0x1000>;
-				fsl,rx-bit-map = <0xff>;
-				fsl,tx-bit-map = <0xff>;
-				interrupts = <35 2 36 2 40 2>;
-			};
-
-		};
-
-		enet2: ethernet@b2000 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			device_type = "network";
-			model = "eTSEC";
-			compatible = "fsl,etsec2";
-			fsl,num_rx_queues = <0x8>;
-			fsl,num_tx_queues = <0x8>;
-			local-mac-address = [ 00 00 00 00 00 00 ];
-			interrupt-parent = <&mpic>;
-
-			queue-group@0 {
-				#address-cells = <1>;
-				#size-cells = <1>;
-				reg = <0xb2000 0x1000>;
-				fsl,rx-bit-map = <0xff>;
-				fsl,tx-bit-map = <0xff>;
-				interrupts = <31 2 32 2 33 2>;
-			};
-
-		};
-
-		mpic: pic@40000 {
-			interrupt-controller;
-			#address-cells = <0>;
-			#interrupt-cells = <2>;
-			reg = <0x40000 0x40000>;
-			compatible = "chrp,open-pic";
-			device_type = "open-pic";
-		};
-
-		msi@41600 {
-			compatible = "fsl,p1010-msi", "fsl,mpic-msi";
-			reg = <0x41600 0x80>;
-			msi-available-ranges = <0 0x100>;
-			interrupts = <
-				0xe0 0
-				0xe1 0
-				0xe2 0
-				0xe3 0
-				0xe4 0
-				0xe5 0
-				0xe6 0
-				0xe7 0>;
-			interrupt-parent = <&mpic>;
-		};
-
-		global-utilities@e0000 {	//global utilities block
-			compatible = "fsl,p1010-guts";
-			reg = <0xe0000 0x1000>;
-			fsl,has-rstcr;
-		};
-	};
-
-	pci0: pcie@ffe09000 {
-		compatible = "fsl,p1010-pcie", "fsl,qoriq-pcie-v2.3", "fsl,qoriq-pcie-v2.2";
-		device_type = "pci";
-		#size-cells = <2>;
-		#address-cells = <3>;
-		reg = <0 0xffe09000 0 0x1000>;
-		bus-range = <0 255>;
-		clock-frequency = <33333333>;
-		interrupt-parent = <&mpic>;
-		interrupts = <16 2>;
-	};
-
-	pci1: pcie@ffe0a000 {
-		compatible = "fsl,p1010-pcie", "fsl,qoriq-pcie-v2.3", "fsl,qoriq-pcie-v2.2";
-		device_type = "pci";
-		#size-cells = <2>;
-		#address-cells = <3>;
-		reg = <0 0xffe0a000 0 0x1000>;
-		bus-range = <0 255>;
-		clock-frequency = <33333333>;
-		interrupt-parent = <&mpic>;
-		interrupts = <16 2>;
-	};
-};
diff --git a/arch/powerpc/boot/dts/p1020rdb.dts b/arch/powerpc/boot/dts/p1020rdb.dts
index d6a8ae4..518bf99 100644
--- a/arch/powerpc/boot/dts/p1020rdb.dts
+++ b/arch/powerpc/boot/dts/p1020rdb.dts
@@ -9,267 +9,33 @@
  * option) any later version.
  */
 
-/include/ "p1020si.dtsi"
-
+/include/ "fsl/p1020si-pre.dtsi"
 / {
 	model = "fsl,P1020RDB";
 	compatible = "fsl,P1020RDB";
 
-	aliases {
-		serial0 = &serial0;
-		serial1 = &serial1;
-		ethernet0 = &enet0;
-		ethernet1 = &enet1;
-		ethernet2 = &enet2;
-		pci0 = &pci0;
-		pci1 = &pci1;
-	};
-
 	memory {
 		device_type = "memory";
 	};
 
-	localbus@ffe05000 {
+	board_lbc: lbc: localbus@ffe05000 {
+		reg = <0 0xffe05000 0 0x1000>;
 
 		/* NOR, NAND Flashes and Vitesse 5 port L2 switch */
 		ranges = <0x0 0x0 0x0 0xef000000 0x01000000
 			  0x1 0x0 0x0 0xffa00000 0x00040000
 			  0x2 0x0 0x0 0xffb00000 0x00020000>;
-
-		nor@0,0 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			compatible = "cfi-flash";
-			reg = <0x0 0x0 0x1000000>;
-			bank-width = <2>;
-			device-width = <1>;
-
-			partition@0 {
-				/* This location must not be altered  */
-				/* 256KB for Vitesse 7385 Switch firmware */
-				reg = <0x0 0x00040000>;
-				label = "NOR (RO) Vitesse-7385 Firmware";
-				read-only;
-			};
-
-			partition@40000 {
-				/* 256KB for DTB Image */
-				reg = <0x00040000 0x00040000>;
-				label = "NOR (RO) DTB Image";
-				read-only;
-			};
-
-			partition@80000 {
-				/* 3.5 MB for Linux Kernel Image */
-				reg = <0x00080000 0x00380000>;
-				label = "NOR (RO) Linux Kernel Image";
-				read-only;
-			};
-
-			partition@400000 {
-				/* 11MB for JFFS2 based Root file System */
-				reg = <0x00400000 0x00b00000>;
-				label = "NOR (RW) JFFS2 Root File System";
-			};
-
-			partition@f00000 {
-				/* This location must not be altered  */
-				/* 512KB for u-boot Bootloader Image */
-				/* 512KB for u-boot Environment Variables */
-				reg = <0x00f00000 0x00100000>;
-				label = "NOR (RO) U-Boot Image";
-				read-only;
-			};
-		};
-
-		nand@1,0 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			compatible = "fsl,p1020-fcm-nand",
-				     "fsl,elbc-fcm-nand";
-			reg = <0x1 0x0 0x40000>;
-
-			partition@0 {
-				/* This location must not be altered  */
-				/* 1MB for u-boot Bootloader Image */
-				reg = <0x0 0x00100000>;
-				label = "NAND (RO) U-Boot Image";
-				read-only;
-			};
-
-			partition@100000 {
-				/* 1MB for DTB Image */
-				reg = <0x00100000 0x00100000>;
-				label = "NAND (RO) DTB Image";
-				read-only;
-			};
-
-			partition@200000 {
-				/* 4MB for Linux Kernel Image */
-				reg = <0x00200000 0x00400000>;
-				label = "NAND (RO) Linux Kernel Image";
-				read-only;
-			};
-
-			partition@600000 {
-				/* 4MB for Compressed Root file System Image */
-				reg = <0x00600000 0x00400000>;
-				label = "NAND (RO) Compressed RFS Image";
-				read-only;
-			};
-
-			partition@a00000 {
-				/* 7MB for JFFS2 based Root file System */
-				reg = <0x00a00000 0x00700000>;
-				label = "NAND (RW) JFFS2 Root File System";
-			};
-
-			partition@1100000 {
-				/* 15MB for JFFS2 based Root file System */
-				reg = <0x01100000 0x00f00000>;
-				label = "NAND (RW) Writable User area";
-			};
-		};
-
-		L2switch@2,0 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			compatible = "vitesse-7385";
-			reg = <0x2 0x0 0x20000>;
-		};
-
 	};
 
-	soc@ffe00000 {
-		i2c@3000 {
-			rtc@68 {
-				compatible = "dallas,ds1339";
-				reg = <0x68>;
-			};
-		};
-
-		spi@7000 {
-
-			fsl_m25p80@0 {
-				#address-cells = <1>;
-				#size-cells = <1>;
-				compatible = "fsl,espi-flash";
-				reg = <0>;
-				linux,modalias = "fsl_m25p80";
-				modal = "s25sl128b";
-				spi-max-frequency = <50000000>;
-				mode = <0>;
-
-				partition@0 {
-					/* 512KB for u-boot Bootloader Image */
-					reg = <0x0 0x00080000>;
-					label = "SPI (RO) U-Boot Image";
-					read-only;
-				};
-
-				partition@80000 {
-					/* 512KB for DTB Image */
-					reg = <0x00080000 0x00080000>;
-					label = "SPI (RO) DTB Image";
-					read-only;
-				};
-
-				partition@100000 {
-					/* 4MB for Linux Kernel Image */
-					reg = <0x00100000 0x00400000>;
-					label = "SPI (RO) Linux Kernel Image";
-					read-only;
-				};
-
-				partition@500000 {
-					/* 4MB for Compressed RFS Image */
-					reg = <0x00500000 0x00400000>;
-					label = "SPI (RO) Compressed RFS Image";
-					read-only;
-				};
-
-				partition@900000 {
-					/* 7MB for JFFS2 based RFS */
-					reg = <0x00900000 0x00700000>;
-					label = "SPI (RW) JFFS2 RFS";
-				};
-			};
-		};
-
-		mdio@24000 {
-
-			phy0: ethernet-phy@0 {
-				interrupt-parent = <&mpic>;
-				interrupts = <3 1>;
-				reg = <0x0>;
-			};
-
-			phy1: ethernet-phy@1 {
-				interrupt-parent = <&mpic>;
-				interrupts = <2 1>;
-				reg = <0x1>;
-			};
-		};
-
-		mdio@25000 {
-
-			tbi0: tbi-phy@11 {
-				reg = <0x11>;
-				device_type = "tbi-phy";
-			};
-		};
-
-		enet0: ethernet@b0000 {
-			fixed-link = <1 1 1000 0 0>;
-			phy-connection-type = "rgmii-id";
-
-		};
-
-		enet1: ethernet@b1000 {
-			phy-handle = <&phy0>;
-			tbi-handle = <&tbi0>;
-			phy-connection-type = "sgmii";
-
-		};
-
-		enet2: ethernet@b2000 {
-			phy-handle = <&phy1>;
-			phy-connection-type = "rgmii-id";
-
-		};
-
-		usb@22000 {
-			phy_type = "ulpi";
-		};
-
-		/* USB2 is shared with localbus, so it must be disabled
-		   by default. We can't put 'status = "disabled";' here
-		   since U-Boot doesn't clear the status property when
-		   it enables USB2. OTOH, U-Boot does create a new node
-		   when there isn't any. So, just comment it out.
-		usb@23000 {
-			phy_type = "ulpi";
-		};
-		*/
-
+	board_soc: soc: soc@ffe00000 {
+		ranges = <0x0 0x0 0xffe00000 0x100000>;
 	};
 
 	pci0: pcie@ffe09000 {
 		ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000
 			  0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>;
-		interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
-		interrupt-map = <
-			/* IDSEL 0x0 */
-			0000 0x0 0x0 0x1 &mpic 0x4 0x1
-			0000 0x0 0x0 0x2 &mpic 0x5 0x1
-			0000 0x0 0x0 0x3 &mpic 0x6 0x1
-			0000 0x0 0x0 0x4 &mpic 0x7 0x1
-			>;
+		reg = <0 0xffe09000 0 0x1000>;
 		pcie@0 {
-			reg = <0x0 0x0 0x0 0x0 0x0>;
-			#size-cells = <2>;
-			#address-cells = <3>;
-			device_type = "pci";
 			ranges = <0x2000000 0x0 0xa0000000
 				  0x2000000 0x0 0xa0000000
 				  0x0 0x20000000
@@ -281,21 +47,10 @@
 	};
 
 	pci1: pcie@ffe0a000 {
+		reg = <0 0xffe0a000 0 0x1000>;
 		ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000
 			  0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>;
-		interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
-		interrupt-map = <
-			/* IDSEL 0x0 */
-			0000 0x0 0x0 0x1 &mpic 0x0 0x1
-			0000 0x0 0x0 0x2 &mpic 0x1 0x1
-			0000 0x0 0x0 0x3 &mpic 0x2 0x1
-			0000 0x0 0x0 0x4 &mpic 0x3 0x1
-			>;
 		pcie@0 {
-			reg = <0x0 0x0 0x0 0x0 0x0>;
-			#size-cells = <2>;
-			#address-cells = <3>;
-			device_type = "pci";
 			ranges = <0x2000000 0x0 0x80000000
 				  0x2000000 0x0 0x80000000
 				  0x0 0x20000000
@@ -306,3 +61,6 @@
 		};
 	};
 };
+
+/include/ "p1020rdb.dtsi"
+/include/ "fsl/p1020si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p1020rdb.dtsi b/arch/powerpc/boot/dts/p1020rdb.dtsi
new file mode 100644
index 0000000..b5bd86f
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1020rdb.dtsi
@@ -0,0 +1,247 @@
+/*
+ * P1020 RDB Device Tree Source stub (no addresses or top-level ranges)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&board_lbc {
+	nor@0,0 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "cfi-flash";
+		reg = <0x0 0x0 0x1000000>;
+		bank-width = <2>;
+		device-width = <1>;
+
+		partition@0 {
+			/* This location must not be altered  */
+			/* 256KB for Vitesse 7385 Switch firmware */
+			reg = <0x0 0x00040000>;
+			label = "NOR (RO) Vitesse-7385 Firmware";
+			read-only;
+		};
+
+		partition@40000 {
+			/* 256KB for DTB Image */
+			reg = <0x00040000 0x00040000>;
+			label = "NOR (RO) DTB Image";
+			read-only;
+		};
+
+		partition@80000 {
+			/* 3.5 MB for Linux Kernel Image */
+			reg = <0x00080000 0x00380000>;
+			label = "NOR (RO) Linux Kernel Image";
+			read-only;
+		};
+
+		partition@400000 {
+			/* 11MB for JFFS2 based Root file System */
+			reg = <0x00400000 0x00b00000>;
+			label = "NOR (RW) JFFS2 Root File System";
+		};
+
+		partition@f00000 {
+			/* This location must not be altered  */
+			/* 512KB for u-boot Bootloader Image */
+			/* 512KB for u-boot Environment Variables */
+			reg = <0x00f00000 0x00100000>;
+			label = "NOR (RO) U-Boot Image";
+			read-only;
+		};
+	};
+
+	nand@1,0 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "fsl,p1020-fcm-nand",
+			     "fsl,elbc-fcm-nand";
+		reg = <0x1 0x0 0x40000>;
+
+		partition@0 {
+			/* This location must not be altered  */
+			/* 1MB for u-boot Bootloader Image */
+			reg = <0x0 0x00100000>;
+			label = "NAND (RO) U-Boot Image";
+			read-only;
+		};
+
+		partition@100000 {
+			/* 1MB for DTB Image */
+			reg = <0x00100000 0x00100000>;
+			label = "NAND (RO) DTB Image";
+			read-only;
+		};
+
+		partition@200000 {
+			/* 4MB for Linux Kernel Image */
+			reg = <0x00200000 0x00400000>;
+			label = "NAND (RO) Linux Kernel Image";
+			read-only;
+		};
+
+		partition@600000 {
+			/* 4MB for Compressed Root file System Image */
+			reg = <0x00600000 0x00400000>;
+			label = "NAND (RO) Compressed RFS Image";
+			read-only;
+		};
+
+		partition@a00000 {
+			/* 7MB for JFFS2 based Root file System */
+			reg = <0x00a00000 0x00700000>;
+			label = "NAND (RW) JFFS2 Root File System";
+		};
+
+		partition@1100000 {
+			/* 15MB for JFFS2 based Root file System */
+			reg = <0x01100000 0x00f00000>;
+			label = "NAND (RW) Writable User area";
+		};
+	};
+
+	L2switch@2,0 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "vitesse-7385";
+		reg = <0x2 0x0 0x20000>;
+	};
+};
+
+&board_soc {
+	i2c@3000 {
+		rtc@68 {
+			compatible = "dallas,ds1339";
+			reg = <0x68>;
+		};
+	};
+
+	spi@7000 {
+		flash@0 {
+			#address-cells = <1>;
+			#size-cells = <1>;
+			compatible = "spansion,s25sl12801";
+			reg = <0>;
+			spi-max-frequency = <40000000>; /* input clock */
+
+			partition@u-boot {
+				/* 512KB for u-boot Bootloader Image */
+				reg = <0x0 0x00080000>;
+				label = "u-boot";
+				read-only;
+			};
+
+			partition@dtb {
+				/* 512KB for DTB Image */
+				reg = <0x00080000 0x00080000>;
+				label = "dtb";
+				read-only;
+			};
+
+			partition@kernel {
+				/* 4MB for Linux Kernel Image */
+				reg = <0x00100000 0x00400000>;
+				label = "kernel";
+				read-only;
+			};
+
+			partition@fs {
+				/* 4MB for Compressed RFS Image */
+				reg = <0x00500000 0x00400000>;
+				label = "file system";
+				read-only;
+			};
+
+			partition@jffs-fs {
+				/* 7MB for JFFS2 based RFS */
+				reg = <0x00900000 0x00700000>;
+				label = "file system jffs2";
+			};
+		};
+	};
+
+	usb@22000 {
+		phy_type = "ulpi";
+	};
+
+	/* USB2 is shared with localbus, so it must be disabled
+	   by default. We can't put 'status = "disabled";' here
+	   since U-Boot doesn't clear the status property when
+	   it enables USB2. OTOH, U-Boot does create a new node
+	   when there isn't any. So, just comment it out.
+	usb@23000 {
+		phy_type = "ulpi";
+	};
+	*/
+
+	mdio@24000 {
+		phy0: ethernet-phy@0 {
+			interrupt-parent = <&mpic>;
+			interrupts = <3 1>;
+			reg = <0x0>;
+		};
+
+		phy1: ethernet-phy@1 {
+			interrupt-parent = <&mpic>;
+			interrupts = <2 1>;
+			reg = <0x1>;
+		};
+
+		tbi-phy@2 {
+			device_type = "tbi-phy";
+			reg = <0x2>;
+		};
+	};
+
+	mdio@25000 {
+		tbi0: tbi-phy@11 {
+			reg = <0x11>;
+			device_type = "tbi-phy";
+		};
+	};
+
+	enet0: ethernet@b0000 {
+		fixed-link = <1 1 1000 0 0>;
+		phy-connection-type = "rgmii-id";
+
+	};
+
+	enet1: ethernet@b1000 {
+		phy-handle = <&phy0>;
+		tbi-handle = <&tbi0>;
+		phy-connection-type = "sgmii";
+	};
+
+	enet2: ethernet@b2000 {
+		phy-handle = <&phy1>;
+		phy-connection-type = "rgmii-id";
+	};
+};
diff --git a/arch/powerpc/boot/dts/p1020rdb_36b.dts b/arch/powerpc/boot/dts/p1020rdb_36b.dts
new file mode 100644
index 0000000..bdbdb60
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1020rdb_36b.dts
@@ -0,0 +1,66 @@
+/*
+ * P1020 RDB Device Tree Source (36-bit address map)
+ *
+ * Copyright 2009-2011 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+/include/ "fsl/p1020si-pre.dtsi"
+/ {
+	model = "fsl,P1020RDB";
+	compatible = "fsl,P1020RDB";
+
+	memory {
+		device_type = "memory";
+	};
+
+	board_lbc: lbc: localbus@fffe05000 {
+		reg = <0xf 0xffe05000 0 0x1000>;
+
+		/* NOR, NAND Flashes and Vitesse 5 port L2 switch */
+		ranges = <0x0 0x0 0xf 0xef000000 0x01000000
+			  0x1 0x0 0xf 0xffa00000 0x00040000
+			  0x2 0x0 0xf 0xffb00000 0x00020000>;
+	};
+
+	board_soc: soc: soc@fffe00000 {
+		ranges = <0x0 0xf 0xffe00000 0x100000>;
+	};
+
+	pci0: pcie@fffe09000 {
+		reg = <0xf 0xffe09000 0 0x1000>;
+		ranges = <0x2000000 0x0 0xc0000000 0xc 0x20000000 0x0 0x20000000
+			  0x1000000 0x0 0x00000000 0xf 0xffc10000 0x0 0x10000>;
+		pcie@0 {
+			ranges = <0x2000000 0x0 0xc0000000
+				  0x2000000 0x0 0xc0000000
+				  0x0 0x20000000
+
+				  0x1000000 0x0 0x0
+				  0x1000000 0x0 0x0
+				  0x0 0x100000>;
+		};
+	};
+
+	pci1: pcie@fffe0a000 {
+		reg = <0xf 0xffe0a000 0 0x1000>;
+		ranges = <0x2000000 0x0 0x80000000 0xc 0x00000000 0x0 0x20000000
+			  0x1000000 0x0 0x00000000 0xf 0xffc00000 0x0 0x10000>;
+		pcie@0 {
+			ranges = <0x2000000 0x0 0x80000000
+				  0x2000000 0x0 0x80000000
+				  0x0 0x20000000
+
+				  0x1000000 0x0 0x0
+				  0x1000000 0x0 0x0
+				  0x0 0x100000>;
+		};
+	};
+};
+
+/include/ "p1020rdb.dtsi"
+/include/ "fsl/p1020si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p1020rdb_camp_core0.dts b/arch/powerpc/boot/dts/p1020rdb_camp_core0.dts
index f0bf7f4..41b4585 100644
--- a/arch/powerpc/boot/dts/p1020rdb_camp_core0.dts
+++ b/arch/powerpc/boot/dts/p1020rdb_camp_core0.dts
@@ -16,7 +16,7 @@
  * option) any later version.
  */
 
-/include/ "p1020si.dtsi"
+/include/ "p1020rdb.dts"
 
 / {
 	model = "fsl,P1020RDB";
@@ -32,7 +32,7 @@
 
 	cpus {
 		PowerPC,P1020@1 {
-		status = "disabled";
+			status = "disabled";
 		};
 	};
 
@@ -45,169 +45,19 @@
 	};
 
 	soc@ffe00000 {
-		i2c@3000 {
-			rtc@68 {
-				compatible = "dallas,ds1339";
-				reg = <0x68>;
-			};
-		};
-
 		serial1: serial@4600 {
 			status = "disabled";
 		};
 
-		spi@7000 {
-			fsl_m25p80@0 {
-				#address-cells = <1>;
-				#size-cells = <1>;
-				compatible = "fsl,espi-flash";
-				reg = <0>;
-				linux,modalias = "fsl_m25p80";
-				spi-max-frequency = <40000000>;
-
-				partition@0 {
-					/* 512KB for u-boot Bootloader Image */
-					reg = <0x0 0x00080000>;
-					label = "SPI (RO) U-Boot Image";
-					read-only;
-				};
-
-				partition@80000 {
-					/* 512KB for DTB Image */
-					reg = <0x00080000 0x00080000>;
-					label = "SPI (RO) DTB Image";
-					read-only;
-				};
-
-				partition@100000 {
-					/* 4MB for Linux Kernel Image */
-					reg = <0x00100000 0x00400000>;
-					label = "SPI (RO) Linux Kernel Image";
-					read-only;
-				};
-
-				partition@500000 {
-					/* 4MB for Compressed RFS Image */
-					reg = <0x00500000 0x00400000>;
-					label = "SPI (RO) Compressed RFS Image";
-					read-only;
-				};
-
-				partition@900000 {
-					/* 7MB for JFFS2 based RFS */
-					reg = <0x00900000 0x00700000>;
-					label = "SPI (RW) JFFS2 RFS";
-				};
-			};
-		};
-
-		mdio@24000 {
-			phy0: ethernet-phy@0 {
-				interrupt-parent = <&mpic>;
-				interrupts = <3 1>;
-				reg = <0x0>;
-			};
-			phy1: ethernet-phy@1 {
-				interrupt-parent = <&mpic>;
-				interrupts = <2 1>;
-				reg = <0x1>;
-			};
-		};
-
-		mdio@25000 {
-			tbi0: tbi-phy@11 {
-				reg = <0x11>;
-				device_type = "tbi-phy";
-			};
-		};
-
 		enet0: ethernet@b0000 {
 			status = "disabled";
 		};
 
-		enet1: ethernet@b1000 {
-			phy-handle = <&phy0>;
-			tbi-handle = <&tbi0>;
-			phy-connection-type = "sgmii";
-		};
-
-		enet2: ethernet@b2000 {
-			phy-handle = <&phy1>;
-			phy-connection-type = "rgmii-id";
-		};
-
-		usb@22000 {
-			phy_type = "ulpi";
-		};
-
-		/* USB2 is shared with localbus, so it must be disabled
-		   by default. We can't put 'status = "disabled";' here
-		   since U-Boot doesn't clear the status property when
-		   it enables USB2. OTOH, U-Boot does create a new node
-		   when there isn't any. So, just comment it out.
-		usb@23000 {
-			phy_type = "ulpi";
-		};
-		*/
-
 		mpic: pic@40000 {
 			protected-sources = <
 			42 29 30 34	/* serial1, enet0-queue-group0 */
 			17 18 24 45	/* enet0-queue-group1, crypto */
 			>;
 		};
-
-	};
-
-	pci0: pcie@ffe09000 {
-		ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000
-			  0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>;
-		interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
-		interrupt-map = <
-			/* IDSEL 0x0 */
-			0000 0x0 0x0 0x1 &mpic 0x4 0x1
-			0000 0x0 0x0 0x2 &mpic 0x5 0x1
-			0000 0x0 0x0 0x3 &mpic 0x6 0x1
-			0000 0x0 0x0 0x4 &mpic 0x7 0x1
-			>;
-		pcie@0 {
-			reg = <0x0 0x0 0x0 0x0 0x0>;
-			#size-cells = <2>;
-			#address-cells = <3>;
-			device_type = "pci";
-			ranges = <0x2000000 0x0 0xa0000000
-				  0x2000000 0x0 0xa0000000
-				  0x0 0x20000000
-
-				  0x1000000 0x0 0x0
-				  0x1000000 0x0 0x0
-				  0x0 0x100000>;
-		};
-	};
-
-	pci1: pcie@ffe0a000 {
-		ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000
-			  0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>;
-		interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
-		interrupt-map = <
-			/* IDSEL 0x0 */
-			0000 0x0 0x0 0x1 &mpic 0x0 0x1
-			0000 0x0 0x0 0x2 &mpic 0x1 0x1
-			0000 0x0 0x0 0x3 &mpic 0x2 0x1
-			0000 0x0 0x0 0x4 &mpic 0x3 0x1
-			>;
-		pcie@0 {
-			reg = <0x0 0x0 0x0 0x0 0x0>;
-			#size-cells = <2>;
-			#address-cells = <3>;
-			device_type = "pci";
-			ranges = <0x2000000 0x0 0x80000000
-				  0x2000000 0x0 0x80000000
-				  0x0 0x20000000
-
-				  0x1000000 0x0 0x0
-				  0x1000000 0x0 0x0
-				  0x0 0x100000>;
-		};
 	};
 };
diff --git a/arch/powerpc/boot/dts/p1020rdb_camp_core1.dts b/arch/powerpc/boot/dts/p1020rdb_camp_core1.dts
index 6ec0220..5174538 100644
--- a/arch/powerpc/boot/dts/p1020rdb_camp_core1.dts
+++ b/arch/powerpc/boot/dts/p1020rdb_camp_core1.dts
@@ -15,7 +15,7 @@
  * option) any later version.
  */
 
-/include/ "p1020si.dtsi"
+/include/ "p1020rdb.dts"
 
 / {
 	model = "fsl,P1020RDB";
@@ -28,7 +28,7 @@
 
 	cpus {
 		PowerPC,P1020@0 {
-		status = "disabled";
+			status = "disabled";
 		};
 	};
 
@@ -85,12 +85,6 @@
 			status = "disabled";
 		};
 
-		enet0: ethernet@b0000 {
-			fixed-link = <1 1 1000 0 0>;
-			phy-connection-type = "rgmii-id";
-
-		};
-
 		enet1: ethernet@b1000 {
 			status = "disabled";
 		};
@@ -135,7 +129,6 @@
 		global-utilities@e0000 {	//global utilities block
 			status = "disabled";
 		};
-
 	};
 
 	pci0: pcie@ffe09000 {
diff --git a/arch/powerpc/boot/dts/p1020si.dtsi b/arch/powerpc/boot/dts/p1020si.dtsi
deleted file mode 100644
index 5c5acb6..0000000
--- a/arch/powerpc/boot/dts/p1020si.dtsi
+++ /dev/null
@@ -1,377 +0,0 @@
-/*
- * P1020si Device Tree Source
- *
- * Copyright 2011 Freescale Semiconductor Inc.
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-
-/dts-v1/;
-/ {
-	compatible = "fsl,P1020";
-	#address-cells = <2>;
-	#size-cells = <2>;
-
-	cpus {
-		#address-cells = <1>;
-		#size-cells = <0>;
-
-		PowerPC,P1020@0 {
-			device_type = "cpu";
-			reg = <0x0>;
-			next-level-cache = <&L2>;
-		};
-
-		PowerPC,P1020@1 {
-			device_type = "cpu";
-			reg = <0x1>;
-			next-level-cache = <&L2>;
-		};
-	};
-
-	localbus@ffe05000 {
-		#address-cells = <2>;
-		#size-cells = <1>;
-		compatible = "fsl,p1020-elbc", "fsl,elbc", "simple-bus";
-		reg = <0 0xffe05000 0 0x1000>;
-		interrupts = <19 2>;
-		interrupt-parent = <&mpic>;
-	};
-
-	soc@ffe00000 {
-		#address-cells = <1>;
-		#size-cells = <1>;
-		device_type = "soc";
-		compatible = "fsl,p1020-immr", "simple-bus";
-		ranges = <0x0  0x0 0xffe00000 0x100000>;
-		bus-frequency = <0>;		// Filled out by uboot.
-
-		ecm-law@0 {
-			compatible = "fsl,ecm-law";
-			reg = <0x0 0x1000>;
-			fsl,num-laws = <12>;
-		};
-
-		ecm@1000 {
-			compatible = "fsl,p1020-ecm", "fsl,ecm";
-			reg = <0x1000 0x1000>;
-			interrupts = <16 2>;
-			interrupt-parent = <&mpic>;
-		};
-
-		memory-controller@2000 {
-			compatible = "fsl,p1020-memory-controller";
-			reg = <0x2000 0x1000>;
-			interrupt-parent = <&mpic>;
-			interrupts = <16 2>;
-		};
-
-		i2c@3000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			cell-index = <0>;
-			compatible = "fsl-i2c";
-			reg = <0x3000 0x100>;
-			interrupts = <43 2>;
-			interrupt-parent = <&mpic>;
-			dfsrr;
-		};
-
-		i2c@3100 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			cell-index = <1>;
-			compatible = "fsl-i2c";
-			reg = <0x3100 0x100>;
-			interrupts = <43 2>;
-			interrupt-parent = <&mpic>;
-			dfsrr;
-		};
-
-		serial0: serial@4500 {
-			cell-index = <0>;
-			device_type = "serial";
-			compatible = "ns16550";
-			reg = <0x4500 0x100>;
-			clock-frequency = <0>;
-			interrupts = <42 2>;
-			interrupt-parent = <&mpic>;
-		};
-
-		serial1: serial@4600 {
-			cell-index = <1>;
-			device_type = "serial";
-			compatible = "ns16550";
-			reg = <0x4600 0x100>;
-			clock-frequency = <0>;
-			interrupts = <42 2>;
-			interrupt-parent = <&mpic>;
-		};
-
-		spi@7000 {
-			cell-index = <0>;
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "fsl,espi";
-			reg = <0x7000 0x1000>;
-			interrupts = <59 0x2>;
-			interrupt-parent = <&mpic>;
-			mode = "cpu";
-		};
-
-		gpio: gpio-controller@f000 {
-			#gpio-cells = <2>;
-			compatible = "fsl,mpc8572-gpio";
-			reg = <0xf000 0x100>;
-			interrupts = <47 0x2>;
-			interrupt-parent = <&mpic>;
-			gpio-controller;
-		};
-
-		L2: l2-cache-controller@20000 {
-			compatible = "fsl,p1020-l2-cache-controller";
-			reg = <0x20000 0x1000>;
-			cache-line-size = <32>;	// 32 bytes
-			cache-size = <0x40000>; // L2,256K
-			interrupt-parent = <&mpic>;
-			interrupts = <16 2>;
-		};
-
-		dma@21300 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			compatible = "fsl,eloplus-dma";
-			reg = <0x21300 0x4>;
-			ranges = <0x0 0x21100 0x200>;
-			cell-index = <0>;
-			dma-channel@0 {
-				compatible = "fsl,eloplus-dma-channel";
-				reg = <0x0 0x80>;
-				cell-index = <0>;
-				interrupt-parent = <&mpic>;
-				interrupts = <20 2>;
-			};
-			dma-channel@80 {
-				compatible = "fsl,eloplus-dma-channel";
-				reg = <0x80 0x80>;
-				cell-index = <1>;
-				interrupt-parent = <&mpic>;
-				interrupts = <21 2>;
-			};
-			dma-channel@100 {
-				compatible = "fsl,eloplus-dma-channel";
-				reg = <0x100 0x80>;
-				cell-index = <2>;
-				interrupt-parent = <&mpic>;
-				interrupts = <22 2>;
-			};
-			dma-channel@180 {
-				compatible = "fsl,eloplus-dma-channel";
-				reg = <0x180 0x80>;
-				cell-index = <3>;
-				interrupt-parent = <&mpic>;
-				interrupts = <23 2>;
-			};
-		};
-
-		mdio@24000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "fsl,etsec2-mdio";
-			reg = <0x24000 0x1000 0xb0030 0x4>;
-
-		};
-
-		mdio@25000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "fsl,etsec2-tbi";
-			reg = <0x25000 0x1000 0xb1030 0x4>;
-
-		};
-
-		enet0: ethernet@b0000 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			device_type = "network";
-			model = "eTSEC";
-			compatible = "fsl,etsec2";
-			fsl,num_rx_queues = <0x8>;
-			fsl,num_tx_queues = <0x8>;
-			local-mac-address = [ 00 00 00 00 00 00 ];
-			interrupt-parent = <&mpic>;
-
-			queue-group@0 {
-				#address-cells = <1>;
-				#size-cells = <1>;
-				reg = <0xb0000 0x1000>;
-				interrupts = <29 2 30 2 34 2>;
-			};
-
-			queue-group@1 {
-				#address-cells = <1>;
-				#size-cells = <1>;
-				reg = <0xb4000 0x1000>;
-				interrupts = <17 2 18 2 24 2>;
-			};
-		};
-
-		enet1: ethernet@b1000 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			device_type = "network";
-			model = "eTSEC";
-			compatible = "fsl,etsec2";
-			fsl,num_rx_queues = <0x8>;
-			fsl,num_tx_queues = <0x8>;
-			local-mac-address = [ 00 00 00 00 00 00 ];
-			interrupt-parent = <&mpic>;
-
-			queue-group@0 {
-				#address-cells = <1>;
-				#size-cells = <1>;
-				reg = <0xb1000 0x1000>;
-				interrupts = <35 2 36 2 40 2>;
-			};
-
-			queue-group@1 {
-				#address-cells = <1>;
-				#size-cells = <1>;
-				reg = <0xb5000 0x1000>;
-				interrupts = <51 2 52 2 67 2>;
-			};
-		};
-
-		enet2: ethernet@b2000 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			device_type = "network";
-			model = "eTSEC";
-			compatible = "fsl,etsec2";
-			fsl,num_rx_queues = <0x8>;
-			fsl,num_tx_queues = <0x8>;
-			local-mac-address = [ 00 00 00 00 00 00 ];
-			interrupt-parent = <&mpic>;
-
-			queue-group@0 {
-				#address-cells = <1>;
-				#size-cells = <1>;
-				reg = <0xb2000 0x1000>;
-				interrupts = <31 2 32 2 33 2>;
-			};
-
-			queue-group@1 {
-				#address-cells = <1>;
-				#size-cells = <1>;
-				reg = <0xb6000 0x1000>;
-				interrupts = <25 2 26 2 27 2>;
-			};
-		};
-
-		usb@22000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "fsl-usb2-dr";
-			reg = <0x22000 0x1000>;
-			interrupt-parent = <&mpic>;
-			interrupts = <28 0x2>;
-		};
-
-		/* USB2 is shared with localbus, so it must be disabled
-		   by default. We can't put 'status = "disabled";' here
-		   since U-Boot doesn't clear the status property when
-		   it enables USB2. OTOH, U-Boot does create a new node
-		   when there isn't any. So, just comment it out.
-		usb@23000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "fsl-usb2-dr";
-			reg = <0x23000 0x1000>;
-			interrupt-parent = <&mpic>;
-			interrupts = <46 0x2>;
-			phy_type = "ulpi";
-		};
-		*/
-
-		sdhci@2e000 {
-			compatible = "fsl,p1020-esdhc", "fsl,esdhc";
-			reg = <0x2e000 0x1000>;
-			interrupts = <72 0x2>;
-			interrupt-parent = <&mpic>;
-			/* Filled in by U-Boot */
-			clock-frequency = <0>;
-		};
-
-		crypto@30000 {
-			compatible = "fsl,sec3.1", "fsl,sec3.0", "fsl,sec2.4",
-				     "fsl,sec2.2", "fsl,sec2.1", "fsl,sec2.0";
-			reg = <0x30000 0x10000>;
-			interrupts = <45 2 58 2>;
-			interrupt-parent = <&mpic>;
-			fsl,num-channels = <4>;
-			fsl,channel-fifo-len = <24>;
-			fsl,exec-units-mask = <0xbfe>;
-			fsl,descriptor-types-mask = <0x3ab0ebf>;
-		};
-
-		mpic: pic@40000 {
-			interrupt-controller;
-			#address-cells = <0>;
-			#interrupt-cells = <2>;
-			reg = <0x40000 0x40000>;
-			compatible = "chrp,open-pic";
-			device_type = "open-pic";
-		};
-
-		msi@41600 {
-			compatible = "fsl,p1020-msi", "fsl,mpic-msi";
-			reg = <0x41600 0x80>;
-			msi-available-ranges = <0 0x100>;
-			interrupts = <
-				0xe0 0
-				0xe1 0
-				0xe2 0
-				0xe3 0
-				0xe4 0
-				0xe5 0
-				0xe6 0
-				0xe7 0>;
-			interrupt-parent = <&mpic>;
-		};
-
-		global-utilities@e0000 {	//global utilities block
-			compatible = "fsl,p1020-guts","fsl,p2020-guts";
-			reg = <0xe0000 0x1000>;
-			fsl,has-rstcr;
-		};
-	};
-
-	pci0: pcie@ffe09000 {
-		compatible = "fsl,mpc8548-pcie";
-		device_type = "pci";
-		#interrupt-cells = <1>;
-		#size-cells = <2>;
-		#address-cells = <3>;
-		reg = <0 0xffe09000 0 0x1000>;
-		bus-range = <0 255>;
-		clock-frequency = <33333333>;
-		interrupt-parent = <&mpic>;
-		interrupts = <16 2>;
-	};
-
-	pci1: pcie@ffe0a000 {
-		compatible = "fsl,mpc8548-pcie";
-		device_type = "pci";
-		#interrupt-cells = <1>;
-		#size-cells = <2>;
-		#address-cells = <3>;
-		reg = <0 0xffe0a000 0 0x1000>;
-		bus-range = <0 255>;
-		clock-frequency = <33333333>;
-		interrupt-parent = <&mpic>;
-		interrupts = <16 2>;
-	};
-};
diff --git a/arch/powerpc/boot/dts/p1021mds.dts b/arch/powerpc/boot/dts/p1021mds.dts
index ad5b852..d954079 100644
--- a/arch/powerpc/boot/dts/p1021mds.dts
+++ b/arch/powerpc/boot/dts/p1021mds.dts
@@ -9,53 +9,22 @@
  * option) any later version.
  */
 
-/dts-v1/;
+/include/ "fsl/p1021si-pre.dtsi"
 / {
 	model = "fsl,P1021";
 	compatible = "fsl,P1021MDS";
-	#address-cells = <2>;
-	#size-cells = <2>;
 
 	aliases {
-		serial0 = &serial0;
-		serial1 = &serial1;
-		ethernet0 = &enet0;
-		ethernet1 = &enet1;
-		ethernet2 = &enet2;
 		ethernet3 = &enet3;
 		ethernet4 = &enet4;
-		pci0 = &pci0;
-		pci1 = &pci1;
-	};
-
-	cpus {
-		#address-cells = <1>;
-		#size-cells = <0>;
-
-		PowerPC,P1021@0 {
-			device_type = "cpu";
-			reg = <0x0>;
-			next-level-cache = <&L2>;
-		};
-
-		PowerPC,P1021@1 {
-			device_type = "cpu";
-			reg = <0x1>;
-			next-level-cache = <&L2>;
-		};
 	};
 
 	memory {
 		device_type = "memory";
 	};
 
-	localbus@ffe05000 {
-		#address-cells = <2>;
-		#size-cells = <1>;
-		compatible = "fsl,p1021-elbc", "fsl,elbc", "simple-bus";
-		reg = <0 0xffe05000 0 0x1000>;
-		interrupts = <19 2>;
-		interrupt-parent = <&mpic>;
+	lbc: localbus@ffe05000 {
+		reg = <0x0 0xffe05000 0x0 0x1000>;
 
 		/* NAND Flash, BCSR, PMC0/1*/
 		ranges = <0x0 0x0 0x0 0xfc000000 0x02000000
@@ -138,99 +107,26 @@
 		};
 	};
 
-	soc@ffe00000 {
-
-		#address-cells = <1>;
-		#size-cells = <1>;
-		device_type = "soc";
+	soc: soc@ffe00000 {
 		compatible = "fsl,p1021-immr", "simple-bus";
-		ranges = <0x0  0x0 0xffe00000 0x100000>;
-		bus-frequency = <0>;		// Filled out by uboot.
-
-		ecm-law@0 {
-			compatible = "fsl,ecm-law";
-			reg = <0x0 0x1000>;
-			fsl,num-laws = <12>;
-		};
-
-		ecm@1000 {
-			compatible = "fsl,p1021-ecm", "fsl,ecm";
-			reg = <0x1000 0x1000>;
-			interrupts = <16 2>;
-			interrupt-parent = <&mpic>;
-		};
-
-		memory-controller@2000 {
-			compatible = "fsl,p1021-memory-controller";
-			reg = <0x2000 0x1000>;
-			interrupt-parent = <&mpic>;
-			interrupts = <16 2>;
-		};
+		ranges = <0x0 0x0 0xffe00000 0x100000>;
 
 		i2c@3000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			cell-index = <0>;
-			compatible = "fsl-i2c";
-			reg = <0x3000 0x100>;
-			interrupts = <43 2>;
-			interrupt-parent = <&mpic>;
-			dfsrr;
 			rtc@68 {
 				compatible = "dallas,ds1374";
 				reg = <0x68>;
 			};
 		};
 
-		i2c@3100 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			cell-index = <1>;
-			compatible = "fsl-i2c";
-			reg = <0x3100 0x100>;
-			interrupts = <43 2>;
-			interrupt-parent = <&mpic>;
-			dfsrr;
-		};
-
-		serial0: serial@4500 {
-			cell-index = <0>;
-			device_type = "serial";
-			compatible = "ns16550";
-			reg = <0x4500 0x100>;
-			clock-frequency = <0>;
-			interrupts = <42 2>;
-			interrupt-parent = <&mpic>;
-		};
-
-		serial1: serial@4600 {
-			cell-index = <1>;
-			device_type = "serial";
-			compatible = "ns16550";
-			reg = <0x4600 0x100>;
-			clock-frequency = <0>;
-			interrupts = <42 2>;
-			interrupt-parent = <&mpic>;
-		};
-
 		spi@7000 {
-			cell-index = <0>;
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "fsl,espi";
-			reg = <0x7000 0x1000>;
-			interrupts = <59 0x2>;
-			interrupt-parent = <&mpic>;
-			espi,num-ss-bits = <4>;
-			mode = "cpu";
 
-			fsl_m25p80@0 {
+			flash@0 {
 				#address-cells = <1>;
 				#size-cells = <1>;
-				compatible = "fsl,espi-flash";
+				compatible = "spansion,s25sl12801";
 				reg = <0>;
-				linux,modalias = "fsl_m25p80";
 				spi-max-frequency = <40000000>; /* input clock */
+
 				partition@u-boot {
 					label = "u-boot-spi";
 					reg = <0x00000000 0x00100000>;
@@ -253,237 +149,49 @@
 			};
 		};
 
-		gpio: gpio-controller@f000 {
-			#gpio-cells = <2>;
-			compatible = "fsl,mpc8572-gpio";
-			reg = <0xf000 0x100>;
-			interrupts = <47 0x2>;
-			interrupt-parent = <&mpic>;
-			gpio-controller;
-		};
-
-		L2: l2-cache-controller@20000 {
-			compatible = "fsl,p1021-l2-cache-controller";
-			reg = <0x20000 0x1000>;
-			cache-line-size = <32>;	// 32 bytes
-			cache-size = <0x40000>; // L2,256K
-			interrupt-parent = <&mpic>;
-			interrupts = <16 2>;
-		};
-
-		dma@21300 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			compatible = "fsl,eloplus-dma";
-			reg = <0x21300 0x4>;
-			ranges = <0x0 0x21100 0x200>;
-			cell-index = <0>;
-			dma-channel@0 {
-				compatible = "fsl,eloplus-dma-channel";
-				reg = <0x0 0x80>;
-				cell-index = <0>;
-				interrupt-parent = <&mpic>;
-				interrupts = <20 2>;
-			};
-			dma-channel@80 {
-				compatible = "fsl,eloplus-dma-channel";
-				reg = <0x80 0x80>;
-				cell-index = <1>;
-				interrupt-parent = <&mpic>;
-				interrupts = <21 2>;
-			};
-			dma-channel@100 {
-				compatible = "fsl,eloplus-dma-channel";
-				reg = <0x100 0x80>;
-				cell-index = <2>;
-				interrupt-parent = <&mpic>;
-				interrupts = <22 2>;
-			};
-			dma-channel@180 {
-				compatible = "fsl,eloplus-dma-channel";
-				reg = <0x180 0x80>;
-				cell-index = <3>;
-				interrupt-parent = <&mpic>;
-				interrupts = <23 2>;
-			};
-		};
-
 		usb@22000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "fsl-usb2-dr";
-			reg = <0x22000 0x1000>;
-			interrupt-parent = <&mpic>;
-			interrupts = <28 0x2>;
 			phy_type = "ulpi";
 		};
 
-		 mdio@24000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "fsl,etsec2-mdio";
-			reg = <0x24000 0x1000 0xb0030 0x4>;
-
+		mdio@24000 {
 			phy0: ethernet-phy@0 {
-				interrupt-parent = <&mpic>;
-				interrupts = <1 1>;
+				interrupts = <1 1 0 0>;
 				reg = <0x0>;
 			};
 			phy1: ethernet-phy@1 {
-				interrupt-parent = <&mpic>;
-				interrupts = <2 1>;
+				interrupts = <2 1 0 0>;
 				reg = <0x1>;
 			};
 			phy4: ethernet-phy@4 {
-				interrupt-parent = <&mpic>;
 				reg = <0x4>;
 			};
+			tbi-phy@5 {
+				device_type = "tbi-phy";
+				reg = <0x5>;
+			};
 		};
 
 		mdio@25000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "fsl,etsec2-tbi";
-			reg = <0x25000 0x1000 0xb1030 0x4>;
 			tbi0: tbi-phy@11 {
 				reg = <0x11>;
 				device_type = "tbi-phy";
 			};
 		};
 
-		enet0: ethernet@B0000 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			cell-index = <0>;
-			device_type = "network";
-			model = "eTSEC";
-			compatible = "fsl,etsec2";
-			fsl,num_rx_queues = <0x8>;
-			fsl,num_tx_queues = <0x8>;
-			local-mac-address = [ 00 00 00 00 00 00 ];
-			interrupt-parent = <&mpic>;
+		ethernet@b0000 {
 			phy-handle = <&phy0>;
 			phy-connection-type = "rgmii-id";
-			queue-group@0{
-				#address-cells = <1>;
-				#size-cells = <1>;
-				reg = <0xB0000 0x1000>;
-				interrupts = <29 2 30 2 34 2>;
-			};
-			queue-group@1{
-				#address-cells = <1>;
-				#size-cells = <1>;
-				reg = <0xB4000 0x1000>;
-				interrupts = <17 2 18 2 24 2>;
-			};
 		};
 
-		enet1: ethernet@B1000 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			cell-index = <0>;
-			device_type = "network";
-			model = "eTSEC";
-			compatible = "fsl,etsec2";
-			fsl,num_rx_queues = <0x8>;
-			fsl,num_tx_queues = <0x8>;
-			local-mac-address = [ 00 00 00 00 00 00 ];
-			interrupt-parent = <&mpic>;
+		ethernet@b1000 {
 			phy-handle = <&phy4>;
 			tbi-handle = <&tbi0>;
 			phy-connection-type = "sgmii";
-			queue-group@0{
-				#address-cells = <1>;
-				#size-cells = <1>;
-				reg = <0xB1000 0x1000>;
-				interrupts = <35 2 36 2 40 2>;
-			};
-			queue-group@1{
-				#address-cells = <1>;
-				#size-cells = <1>;
-				reg = <0xB5000 0x1000>;
-				interrupts = <51 2 52 2 67 2>;
-			};
 		};
 
-		enet2: ethernet@B2000 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			cell-index = <0>;
-			device_type = "network";
-			model = "eTSEC";
-			compatible = "fsl,etsec2";
-			fsl,num_rx_queues = <0x8>;
-			fsl,num_tx_queues = <0x8>;
-			local-mac-address = [ 00 00 00 00 00 00 ];
-			interrupt-parent = <&mpic>;
+		ethernet@b2000 {
 			phy-handle = <&phy1>;
 			phy-connection-type = "rgmii-id";
-			queue-group@0{
-				#address-cells = <1>;
-				#size-cells = <1>;
-				reg = <0xB2000 0x1000>;
-				interrupts = <31 2 32 2 33 2>;
-			};
-			queue-group@1{
-				#address-cells = <1>;
-				#size-cells = <1>;
-				reg = <0xB6000 0x1000>;
-				interrupts = <25 2 26 2 27 2>;
-			};
-		};
-
-		sdhci@2e000 {
-			compatible = "fsl,p1021-esdhc", "fsl,esdhc";
-			reg = <0x2e000 0x1000>;
-			interrupts = <72 0x2>;
-			interrupt-parent = <&mpic>;
-			/* Filled in by U-Boot */
-			clock-frequency = <0>;
-		};
-
-		crypto@30000 {
-			compatible = "fsl,sec3.3", "fsl,sec3.1",
-				     "fsl,sec3.0", "fsl,sec2.4",
-				     "fsl,sec2.2", "fsl,sec2.1", "fsl,sec2.0";
-			reg = <0x30000 0x10000>;
-			interrupts = <45 2 58 2>;
-			interrupt-parent = <&mpic>;
-			fsl,num-channels = <4>;
-			fsl,channel-fifo-len = <24>;
-			fsl,exec-units-mask = <0x97c>;
-			fsl,descriptor-types-mask = <0x3a30abf>;
-		};
-
-		mpic: pic@40000 {
-			interrupt-controller;
-			#address-cells = <0>;
-			#interrupt-cells = <2>;
-			reg = <0x40000 0x40000>;
-			compatible = "chrp,open-pic";
-			device_type = "open-pic";
-		};
-
-		msi@41600 {
-			compatible = "fsl,p1021-msi", "fsl,mpic-msi";
-			reg = <0x41600 0x80>;
-			msi-available-ranges = <0 0x100>;
-			interrupts = <
-				0xe0 0
-				0xe1 0
-				0xe2 0
-				0xe3 0
-				0xe4 0
-				0xe5 0
-				0xe6 0
-				0xe7 0>;
-			interrupt-parent = <&mpic>;
-		};
-
-		global-utilities@e0000 {	//global utilities block
-			compatible = "fsl,p1021-guts";
-			reg = <0xe0000 0x1000>;
-			fsl,has-rstcr;
 		};
 
 		par_io@e0100 {
@@ -499,8 +207,7 @@
 					0x1  0x13 0x1  0x0  0x1  0x0    /* QE_MUX_MDC */
 					0x1  0x14 0x3  0x0  0x1  0x0    /* QE_MUX_MDIO */
 					0x0  0x17 0x2  0x0  0x2  0x0    /* CLK12 */
-					0x0  0x18 0x2  0x0  0x1  0x0    /* CLK9
-*/
+					0x0  0x18 0x2  0x0  0x1  0x0    /* CLK9 */
 					0x0  0x7  0x1  0x0  0x2  0x0    /* ENET1_TXD0_SER1_TXD0 */
 					0x0  0x9  0x1  0x0  0x2  0x0    /* ENET1_TXD1_SER1_TXD1 */
 					0x0  0xb  0x1  0x0  0x2  0x0    /* ENET1_TXD2_SER1_TXD2 */
@@ -535,31 +242,10 @@
 	};
 
 	pci0: pcie@ffe09000 {
-		compatible = "fsl,mpc8548-pcie";
-		device_type = "pci";
-		#interrupt-cells = <1>;
-		#size-cells = <2>;
-		#address-cells = <3>;
 		reg = <0 0xffe09000 0 0x1000>;
-		bus-range = <0 255>;
 		ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000
 			  0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>;
-		clock-frequency = <33333333>;
-		interrupt-parent = <&mpic>;
-		interrupts = <16 2>;
-		interrupt-map-mask = <0xf800 0 0 7>;
-		interrupt-map = <
-			/* IDSEL 0x0 */
-			0000 0 0 1 &mpic 4 1
-			0000 0 0 2 &mpic 5 1
-			0000 0 0 3 &mpic 6 1
-			0000 0 0 4 &mpic 7 1
-			>;
 		pcie@0 {
-			reg = <0x0 0x0 0x0 0x0 0x0>;
-			#size-cells = <2>;
-			#address-cells = <3>;
-			device_type = "pci";
 			ranges = <0x2000000 0x0 0xa0000000
 				  0x2000000 0x0 0xa0000000
 				  0x0 0x20000000
@@ -571,31 +257,10 @@
 	};
 
 	pci1: pcie@ffe0a000 {
-		compatible = "fsl,mpc8548-pcie";
-		device_type = "pci";
-		#interrupt-cells = <1>;
-		#size-cells = <2>;
-		#address-cells = <3>;
 		reg = <0 0xffe0a000 0 0x1000>;
-		bus-range = <0 255>;
 		ranges = <0x2000000 0x0 0xc0000000 0 0xc0000000 0x0 0x20000000
 			  0x1000000 0x0 0x00000000 0 0xffc20000 0x0 0x10000>;
-		clock-frequency = <33333333>;
-		interrupt-parent = <&mpic>;
-		interrupts = <16 2>;
-		interrupt-map-mask = <0xf800 0 0 7>;
-		interrupt-map = <
-			/* IDSEL 0x0 */
-			0000 0 0 1 &mpic 0 1
-			0000 0 0 2 &mpic 1 1
-			0000 0 0 3 &mpic 2 1
-			0000 0 0 4 &mpic 3 1
-			>;
 		pcie@0 {
-			reg = <0x0 0x0 0x0 0x0 0x0>;
-			#size-cells = <2>;
-			#address-cells = <3>;
-			device_type = "pci";
 			ranges = <0x2000000 0x0 0xc0000000
 				  0x2000000 0x0 0xc0000000
 				  0x0 0x20000000
@@ -606,36 +271,16 @@
 		};
 	};
 
-	qe@ffe80000 {
-		#address-cells = <1>;
-		#size-cells = <1>;
-		device_type = "qe";
-		compatible = "fsl,qe";
+	qe: qe@ffe80000 {
 		ranges = <0x0 0x0 0xffe80000 0x40000>;
 		reg = <0 0xffe80000 0 0x480>;
 		brg-frequency = <0>;
 		bus-frequency = <0>;
-		fsl,qe-num-riscs = <1>;
-		fsl,qe-num-snums = <28>;
 		status = "disabled"; /* no firmware loaded */
 
-		qeic: interrupt-controller@80 {
-			interrupt-controller;
-			compatible = "fsl,qe-ic";
-			#address-cells = <0>;
-			#interrupt-cells = <1>;
-			reg = <0x80 0x80>;
-			interrupts = <63 2 60 2>; //high:47 low:44
-			interrupt-parent = <&mpic>;
-		};
-
 		enet3: ucc@2000 {
 			device_type = "network";
 			compatible = "ucc_geth";
-			cell-index = <1>;
-			reg = <0x2000 0x200>;
-			interrupts = <32>;
-			interrupt-parent = <&qeic>;
 			local-mac-address = [ 00 00 00 00 00 00 ];
 			rx-clock-name = "clk12";
 			tx-clock-name = "clk9";
@@ -645,20 +290,15 @@
 		};
 
 		mdio@2120 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			reg = <0x2120 0x18>;
-			compatible = "fsl,ucc-mdio";
-
 			qe_phy0: ethernet-phy@0 {
 				interrupt-parent = <&mpic>;
-				interrupts = <4 1>;
+				interrupts = <4 1 0 0>;
 				reg = <0x0>;
 				device_type = "ethernet-phy";
 			};
 			qe_phy1: ethernet-phy@03 {
 				interrupt-parent = <&mpic>;
-				interrupts = <5 1>;
+				interrupts = <5 1 0 0>;
 				reg = <0x3>;
 				device_type = "ethernet-phy";
 			};
@@ -671,10 +311,6 @@
 		enet4: ucc@2400 {
 			device_type = "network";
 			compatible = "ucc_geth";
-			cell-index = <5>;
-			reg = <0x2400 0x200>;
-			interrupts = <40>;
-			interrupt-parent = <&qeic>;
 			local-mac-address = [ 00 00 00 00 00 00 ];
 			rx-clock-name = "none";
 			tx-clock-name = "clk13";
@@ -682,18 +318,7 @@
 			phy-handle = <&qe_phy1>;
 			phy-connection-type = "rmii";
 		};
-
-		muram@10000 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			compatible = "fsl,qe-muram", "fsl,cpm-muram";
-			ranges = <0x0 0x10000 0x6000>;
-
-			data-only@0 {
-				compatible = "fsl,qe-muram-data",
-				"fsl,cpm-muram-data";
-				reg = <0x0 0x6000>;
-			};
-		};
 	};
 };
+
+/include/ "fsl/p1021si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p1022ds.dts b/arch/powerpc/boot/dts/p1022ds.dts
index b9b8719..ef95717 100644
--- a/arch/powerpc/boot/dts/p1022ds.dts
+++ b/arch/powerpc/boot/dts/p1022ds.dts
@@ -8,57 +8,36 @@
  * kind, whether express or implied.
  */
 
-/dts-v1/;
+/include/ "fsl/p1022si-pre.dtsi"
 / {
-	model = "fsl,P1022";
+	model = "fsl,P1022DS";
 	compatible = "fsl,P1022DS";
-	#address-cells = <2>;
-	#size-cells = <2>;
-	interrupt-parent = <&mpic>;
-
-	aliases {
-		ethernet0 = &enet0;
-		ethernet1 = &enet1;
-		serial0 = &serial0;
-		serial1 = &serial1;
-		pci0 = &pci0;
-		pci1 = &pci1;
-		pci2 = &pci2;
-	};
-
-	cpus {
-		#address-cells = <1>;
-		#size-cells = <0>;
-
-		PowerPC,P1022@0 {
-			device_type = "cpu";
-			reg = <0x0>;
-			next-level-cache = <&L2>;
-		};
-
-		PowerPC,P1022@1 {
-			device_type = "cpu";
-			reg = <0x1>;
-			next-level-cache = <&L2>;
-		};
-	};
 
 	memory {
 		device_type = "memory";
 	};
 
-	localbus@fffe05000 {
-		#address-cells = <2>;
-		#size-cells = <1>;
-		compatible = "fsl,p1022-elbc", "fsl,elbc", "simple-bus";
-		reg = <0 0xffe05000 0 0x1000>;
-		interrupts = <19 2 0 0>;
-
+	lbc: localbus@fffe05000 {
+		reg = <0xf 0xffe05000 0 0x1000>;
 		ranges = <0x0 0x0 0xf 0xe8000000 0x08000000
 			  0x1 0x0 0xf 0xe0000000 0x08000000
-			  0x2 0x0 0x0 0xffa00000 0x00040000
+			  0x2 0x0 0xf 0xff800000 0x00040000
 			  0x3 0x0 0xf 0xffdf0000 0x00008000>;
 
+		/*
+		 * This node is used to access the pixis via "indirect" mode,
+		 * which is done by writing the pixis register index to chip
+		 * select 0 and the value to/from chip select 1.  Indirect
+		 * mode is the only way to access the pixis when DIU video
+		 * is enabled.  Note that this assumes that the first column
+		 * of the 'ranges' property above is the chip select number.
+		 */
+		board-control@0,0 {
+			compatible = "fsl,p1022ds-indirect-pixis";
+			reg = <0x0 0x0 1	/* CS0 */
+			       0x1 0x0 1>;	/* CS1 */
+		};
+
 		nor@0,0 {
 			#address-cells = <1>;
 			#size-cells = <1>;
@@ -161,51 +140,10 @@
 		};
 	};
 
-	soc@fffe00000 {
-		#address-cells = <1>;
-		#size-cells = <1>;
-		device_type = "soc";
-		compatible = "fsl,p1022-immr", "simple-bus";
+	soc: soc@fffe00000 {
 		ranges = <0x0 0xf 0xffe00000 0x100000>;
-		bus-frequency = <0>;		// Filled out by uboot.
-
-		ecm-law@0 {
-			compatible = "fsl,ecm-law";
-			reg = <0x0 0x1000>;
-			fsl,num-laws = <12>;
-		};
-
-		ecm@1000 {
-			compatible = "fsl,p1022-ecm", "fsl,ecm";
-			reg = <0x1000 0x1000>;
-			interrupts = <16 2 0 0>;
-		};
-
-		memory-controller@2000 {
-			compatible = "fsl,p1022-memory-controller";
-			reg = <0x2000 0x1000>;
-			interrupts = <16 2 0 0>;
-		};
-
-		i2c@3000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			cell-index = <0>;
-			compatible = "fsl-i2c";
-			reg = <0x3000 0x100>;
-			interrupts = <43 2 0 0>;
-			dfsrr;
-		};
 
 		i2c@3100 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			cell-index = <1>;
-			compatible = "fsl-i2c";
-			reg = <0x3100 0x100>;
-			interrupts = <43 2 0 0>;
-			dfsrr;
-
 			wm8776:codec@1a {
 				compatible = "wlf,wm8776";
 				reg = <0x1a>;
@@ -216,41 +154,14 @@
 			};
 		};
 
-		serial0: serial@4500 {
-			cell-index = <0>;
-			device_type = "serial";
-			compatible = "ns16550";
-			reg = <0x4500 0x100>;
-			clock-frequency = <0>;
-			interrupts = <42 2 0 0>;
-		};
-
-		serial1: serial@4600 {
-			cell-index = <1>;
-			device_type = "serial";
-			compatible = "ns16550";
-			reg = <0x4600 0x100>;
-			clock-frequency = <0>;
-			interrupts = <42 2 0 0>;
-		};
-
 		spi@7000 {
-			cell-index = <0>;
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "fsl,espi";
-			reg = <0x7000 0x1000>;
-			interrupts = <59 0x2 0 0>;
-			espi,num-ss-bits = <4>;
-			mode = "cpu";
-
-			fsl_m25p80@0 {
+			flash@0 {
 				#address-cells = <1>;
 				#size-cells = <1>;
-				compatible = "fsl,espi-flash";
+				compatible = "spansion,s25sl12801";
 				reg = <0>;
-				linux,modalias = "fsl_m25p80";
 				spi-max-frequency = <40000000>; /* input clock */
+
 				partition@0 {
 					label = "u-boot-spi";
 					reg = <0x00000000 0x00100000>;
@@ -274,115 +185,20 @@
 		};
 
 		ssi@15000 {
-			compatible = "fsl,mpc8610-ssi";
-			cell-index = <0>;
-			reg = <0x15000 0x100>;
-			interrupts = <75 2 0 0>;
 			fsl,mode = "i2s-slave";
 			codec-handle = <&wm8776>;
-			fsl,playback-dma = <&dma00>;
-			fsl,capture-dma = <&dma01>;
-			fsl,fifo-depth = <15>;
 			fsl,ssi-asynchronous;
 		};
 
-		dma@c300 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			compatible = "fsl,eloplus-dma";
-			reg = <0xc300 0x4>;
-			ranges = <0x0 0xc100 0x200>;
-			cell-index = <1>;
-			dma00: dma-channel@0 {
-				compatible = "fsl,ssi-dma-channel";
-				reg = <0x0 0x80>;
-				cell-index = <0>;
-				interrupts = <76 2 0 0>;
-			};
-			dma01: dma-channel@80 {
-				compatible = "fsl,ssi-dma-channel";
-				reg = <0x80 0x80>;
-				cell-index = <1>;
-				interrupts = <77 2 0 0>;
-			};
-			dma-channel@100 {
-				compatible = "fsl,eloplus-dma-channel";
-				reg = <0x100 0x80>;
-				cell-index = <2>;
-				interrupts = <78 2 0 0>;
-			};
-			dma-channel@180 {
-				compatible = "fsl,eloplus-dma-channel";
-				reg = <0x180 0x80>;
-				cell-index = <3>;
-				interrupts = <79 2 0 0>;
-			};
-		};
-
-		gpio: gpio-controller@f000 {
-			#gpio-cells = <2>;
-			compatible = "fsl,mpc8572-gpio";
-			reg = <0xf000 0x100>;
-			interrupts = <47 0x2 0 0>;
-			gpio-controller;
-		};
-
-		L2: l2-cache-controller@20000 {
-			compatible = "fsl,p1022-l2-cache-controller";
-			reg = <0x20000 0x1000>;
-			cache-line-size = <32>;	// 32 bytes
-			cache-size = <0x40000>; // L2, 256K
-			interrupts = <16 2 0 0>;
-		};
-
-		dma@21300 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			compatible = "fsl,eloplus-dma";
-			reg = <0x21300 0x4>;
-			ranges = <0x0 0x21100 0x200>;
-			cell-index = <0>;
-			dma-channel@0 {
-				compatible = "fsl,eloplus-dma-channel";
-				reg = <0x0 0x80>;
-				cell-index = <0>;
-				interrupts = <20 2 0 0>;
-			};
-			dma-channel@80 {
-				compatible = "fsl,eloplus-dma-channel";
-				reg = <0x80 0x80>;
-				cell-index = <1>;
-				interrupts = <21 2 0 0>;
-			};
-			dma-channel@100 {
-				compatible = "fsl,eloplus-dma-channel";
-				reg = <0x100 0x80>;
-				cell-index = <2>;
-				interrupts = <22 2 0 0>;
-			};
-			dma-channel@180 {
-				compatible = "fsl,eloplus-dma-channel";
-				reg = <0x180 0x80>;
-				cell-index = <3>;
-				interrupts = <23 2 0 0>;
-			};
-		};
-
 		usb@22000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "fsl-usb2-dr";
-			reg = <0x22000 0x1000>;
-			interrupts = <28 0x2 0 0>;
 			phy_type = "ulpi";
 		};
 
-		mdio@24000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "fsl,etsec2-mdio";
-			reg = <0x24000 0x1000 0xb0030 0x4>;
+		usb@23000 {
+			status = "disabled";
+		};
 
+		mdio@24000 {
 			phy0: ethernet-phy@0 {
 				interrupts = <3 1 0 0>;
 				reg = <0x1>;
@@ -391,189 +207,28 @@
 				interrupts = <9 1 0 0>;
 				reg = <0x2>;
 			};
+			tbi-phy@2 {
+				device_type = "tbi-phy";
+				reg = <0x2>;
+			};
 		};
 
-		mdio@25000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "fsl,etsec2-mdio";
-			reg = <0x25000 0x1000 0xb1030 0x4>;
-		};
-
-		enet0: ethernet@B0000 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			cell-index = <0>;
-			device_type = "network";
-			model = "eTSEC";
-			compatible = "fsl,etsec2";
-			fsl,num_rx_queues = <0x8>;
-			fsl,num_tx_queues = <0x8>;
-			fsl,magic-packet;
-			fsl,wake-on-filer;
-			local-mac-address = [ 00 00 00 00 00 00 ];
+		ethernet@b0000 {
 			phy-handle = <&phy0>;
 			phy-connection-type = "rgmii-id";
-			queue-group@0{
-				#address-cells = <1>;
-				#size-cells = <1>;
-				reg = <0xB0000 0x1000>;
-				interrupts = <29 2 0 0 30 2 0 0 34 2 0 0>;
-			};
-			queue-group@1{
-				#address-cells = <1>;
-				#size-cells = <1>;
-				reg = <0xB4000 0x1000>;
-				interrupts = <17 2 0 0 18 2 0 0 24 2 0 0>;
-			};
 		};
 
-		enet1: ethernet@B1000 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			cell-index = <0>;
-			device_type = "network";
-			model = "eTSEC";
-			compatible = "fsl,etsec2";
-			fsl,num_rx_queues = <0x8>;
-			fsl,num_tx_queues = <0x8>;
-			local-mac-address = [ 00 00 00 00 00 00 ];
+		ethernet@b1000 {
 			phy-handle = <&phy1>;
 			phy-connection-type = "rgmii-id";
-			queue-group@0{
-				#address-cells = <1>;
-				#size-cells = <1>;
-				reg = <0xB1000 0x1000>;
-				interrupts = <35 2 0 0 36 2 0 0 40 2 0 0>;
-			};
-			queue-group@1{
-				#address-cells = <1>;
-				#size-cells = <1>;
-				reg = <0xB5000 0x1000>;
-				interrupts = <51 2 0 0 52 2 0 0 67 2 0 0>;
-			};
-		};
-
-		sdhci@2e000 {
-			compatible = "fsl,p1022-esdhc", "fsl,esdhc";
-			reg = <0x2e000 0x1000>;
-			interrupts = <72 0x2 0 0>;
-			fsl,sdhci-auto-cmd12;
-			/* Filled in by U-Boot */
-			clock-frequency = <0>;
-		};
-
-		crypto@30000 {
-			compatible = "fsl,sec3.3", "fsl,sec3.1", "fsl,sec3.0",
-				     "fsl,sec2.4", "fsl,sec2.2", "fsl,sec2.1",
-				     "fsl,sec2.0";
-			reg = <0x30000 0x10000>;
-			interrupts = <45 2 0 0 58 2 0 0>;
-			fsl,num-channels = <4>;
-			fsl,channel-fifo-len = <24>;
-			fsl,exec-units-mask = <0x97c>;
-			fsl,descriptor-types-mask = <0x3a30abf>;
-		};
-
-		sata@18000 {
-			compatible = "fsl,p1022-sata", "fsl,pq-sata-v2";
-			reg = <0x18000 0x1000>;
-			cell-index = <1>;
-			interrupts = <74 0x2 0 0>;
-		};
-
-		sata@19000 {
-			compatible = "fsl,p1022-sata", "fsl,pq-sata-v2";
-			reg = <0x19000 0x1000>;
-			cell-index = <2>;
-			interrupts = <41 0x2 0 0>;
-		};
-
-		power@e0070{
-			compatible = "fsl,mpc8536-pmc", "fsl,mpc8548-pmc";
-			reg = <0xe0070 0x20>;
-		};
-
-		display@10000 {
-			compatible = "fsl,diu", "fsl,p1022-diu";
-			reg = <0x10000 1000>;
-			interrupts = <64 2 0 0>;
-		};
-
-		timer@41100 {
-			compatible = "fsl,mpic-global-timer";
-			reg = <0x41100 0x100 0x41300 4>;
-			interrupts = <0 0 3 0
-			              1 0 3 0
-			              2 0 3 0
-			              3 0 3 0>;
-		};
-
-		timer@42100 {
-			compatible = "fsl,mpic-global-timer";
-			reg = <0x42100 0x100 0x42300 4>;
-			interrupts = <4 0 3 0
-			              5 0 3 0
-			              6 0 3 0
-			              7 0 3 0>;
-		};
-
-		mpic: pic@40000 {
-			interrupt-controller;
-			#address-cells = <0>;
-			#interrupt-cells = <4>;
-			reg = <0x40000 0x40000>;
-			compatible = "fsl,mpic";
-			device_type = "open-pic";
-		};
-
-		msi@41600 {
-			compatible = "fsl,p1022-msi", "fsl,mpic-msi";
-			reg = <0x41600 0x80>;
-			msi-available-ranges = <0 0x100>;
-			interrupts = <
-				0xe0 0 0 0
-				0xe1 0 0 0
-				0xe2 0 0 0
-				0xe3 0 0 0
-				0xe4 0 0 0
-				0xe5 0 0 0
-				0xe6 0 0 0
-				0xe7 0 0 0>;
-		};
-
-		global-utilities@e0000 {	//global utilities block
-			compatible = "fsl,p1022-guts";
-			reg = <0xe0000 0x1000>;
-			fsl,has-rstcr;
 		};
 	};
 
 	pci0: pcie@fffe09000 {
-		compatible = "fsl,p1022-pcie";
-		device_type = "pci";
-		#interrupt-cells = <1>;
-		#size-cells = <2>;
-		#address-cells = <3>;
 		reg = <0xf 0xffe09000 0 0x1000>;
-		bus-range = <0 255>;
-		ranges = <0x2000000 0x0 0xa0000000 0xc 0x20000000 0x0 0x20000000
+		ranges = <0x2000000 0x0 0xe0000000 0xc 0x20000000 0x0 0x20000000
 			  0x1000000 0x0 0x00000000 0xf 0xffc10000 0x0 0x10000>;
-		clock-frequency = <33333333>;
-		interrupts = <16 2 0 0>;
-		interrupt-map-mask = <0xf800 0 0 7>;
-		interrupt-map = <
-			/* IDSEL 0x0 */
-			0000 0 0 1 &mpic 4 1
-			0000 0 0 2 &mpic 5 1
-			0000 0 0 3 &mpic 6 1
-			0000 0 0 4 &mpic 7 1
-			>;
 		pcie@0 {
-			reg = <0x0 0x0 0x0 0x0 0x0>;
-			#size-cells = <2>;
-			#address-cells = <3>;
-			device_type = "pci";
 			ranges = <0x2000000 0x0 0xe0000000
 				  0x2000000 0x0 0xe0000000
 				  0x0 0x20000000
@@ -585,30 +240,11 @@
 	};
 
 	pci1: pcie@fffe0a000 {
-		compatible = "fsl,p1022-pcie";
-		device_type = "pci";
-		#interrupt-cells = <1>;
-		#size-cells = <2>;
-		#address-cells = <3>;
 		reg = <0xf 0xffe0a000 0 0x1000>;
-		bus-range = <0 255>;
-		ranges = <0x2000000 0x0 0xc0000000 0xc 0x40000000 0x0 0x20000000
+		ranges = <0x2000000 0x0 0xe0000000 0xc 0x40000000 0x0 0x20000000
 			  0x1000000 0x0 0x00000000 0xf 0xffc20000 0x0 0x10000>;
-		clock-frequency = <33333333>;
-		interrupts = <16 2 0 0>;
-		interrupt-map-mask = <0xf800 0 0 7>;
-		interrupt-map = <
-			/* IDSEL 0x0 */
-			0000 0 0 1 &mpic 0 1
-			0000 0 0 2 &mpic 1 1
-			0000 0 0 3 &mpic 2 1
-			0000 0 0 4 &mpic 3 1
-			>;
 		pcie@0 {
 			reg = <0x0 0x0 0x0 0x0 0x0>;
-			#size-cells = <2>;
-			#address-cells = <3>;
-			device_type = "pci";
 			ranges = <0x2000000 0x0 0xe0000000
 				  0x2000000 0x0 0xe0000000
 				  0x0 0x20000000
@@ -619,32 +255,11 @@
 		};
 	};
 
-
 	pci2: pcie@fffe0b000 {
-		compatible = "fsl,p1022-pcie";
-		device_type = "pci";
-		#interrupt-cells = <1>;
-		#size-cells = <2>;
-		#address-cells = <3>;
 		reg = <0xf 0xffe0b000 0 0x1000>;
-		bus-range = <0 255>;
-		ranges = <0x2000000 0x0 0x80000000 0xc 0x00000000 0x0 0x20000000
+		ranges = <0x2000000 0x0 0xe0000000 0xc 0x00000000 0x0 0x20000000
 			  0x1000000 0x0 0x00000000 0xf 0xffc00000 0x0 0x10000>;
-		clock-frequency = <33333333>;
-		interrupts = <16 2 0 0>;
-		interrupt-map-mask = <0xf800 0 0 7>;
-		interrupt-map = <
-			/* IDSEL 0x0 */
-			0000 0 0 1 &mpic 8 1
-			0000 0 0 2 &mpic 9 1
-			0000 0 0 3 &mpic 10 1
-			0000 0 0 4 &mpic 11 1
-			>;
 		pcie@0 {
-			reg = <0x0 0x0 0x0 0x0 0x0>;
-			#size-cells = <2>;
-			#address-cells = <3>;
-			device_type = "pci";
 			ranges = <0x2000000 0x0 0xe0000000
 				  0x2000000 0x0 0xe0000000
 				  0x0 0x20000000
@@ -655,3 +270,5 @@
 		};
 	};
 };
+
+/include/ "fsl/p1022si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p1023rds.dts b/arch/powerpc/boot/dts/p1023rds.dts
index d3b4782..beb6cb1 100644
--- a/arch/powerpc/boot/dts/p1023rds.dts
+++ b/arch/powerpc/boot/dts/p1023rds.dts
@@ -34,137 +34,30 @@
  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-/dts-v1/;
+/include/ "fsl/p1023si-pre.dtsi"
 
 / {
 	model = "fsl,P1023";
 	compatible = "fsl,P1023RDS";
 	#address-cells = <2>;
 	#size-cells = <2>;
-
-	aliases {
-		serial0 = &serial0;
-		serial1 = &serial1;
-		pci0 = &pci0;
-		pci1 = &pci1;
-		pci2 = &pci2;
-
-		crypto = &crypto;
-		sec_jr0 = &sec_jr0;
-		sec_jr1 = &sec_jr1;
-		sec_jr2 = &sec_jr2;
-		sec_jr3 = &sec_jr3;
-		rtic_a = &rtic_a;
-		rtic_b = &rtic_b;
-		rtic_c = &rtic_c;
-		rtic_d = &rtic_d;
-	};
-
-	cpus {
-		#address-cells = <1>;
-		#size-cells = <0>;
-
-		cpu0: PowerPC,P1023@0 {
-			device_type = "cpu";
-			reg = <0x0>;
-			next-level-cache = <&L2>;
-		};
-
-		cpu1: PowerPC,P1023@1 {
-			device_type = "cpu";
-			reg = <0x1>;
-			next-level-cache = <&L2>;
-		};
-	};
+	interrupt-parent = <&mpic>;
 
 	memory {
 		device_type = "memory";
 	};
 
-	soc@ff600000 {
-		#address-cells = <1>;
-		#size-cells = <1>;
-		device_type = "soc";
-		compatible = "fsl,p1023-immr", "simple-bus";
+	soc: soc@ff600000 {
 		ranges = <0x0 0x0 0xff600000 0x200000>;
-		bus-frequency = <0>;		// Filled out by uboot.
-
-		ecm-law@0 {
-			compatible = "fsl,ecm-law";
-			reg = <0x0 0x1000>;
-			fsl,num-laws = <12>;
-		};
-
-		ecm@1000 {
-			compatible = "fsl,p1023-ecm", "fsl,ecm";
-			reg = <0x1000 0x1000>;
-			interrupts = <16 2>;
-			interrupt-parent = <&mpic>;
-		};
-
-		memory-controller@2000 {
-			compatible = "fsl,p1023-memory-controller";
-			reg = <0x2000 0x1000>;
-			interrupt-parent = <&mpic>;
-			interrupts = <16 2>;
-		};
 
 		i2c@3000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			cell-index = <0>;
-			compatible = "fsl-i2c";
-			reg = <0x3000 0x100>;
-			interrupts = <43 2>;
-			interrupt-parent = <&mpic>;
-			dfsrr;
 			rtc@68 {
 				compatible = "dallas,ds1374";
 				reg = <0x68>;
 			};
 		};
 
-		i2c@3100 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			cell-index = <1>;
-			compatible = "fsl-i2c";
-			reg = <0x3100 0x100>;
-			interrupts = <43 2>;
-			interrupt-parent = <&mpic>;
-			dfsrr;
-		};
-
-		serial0: serial@4500 {
-			cell-index = <0>;
-			device_type = "serial";
-			compatible = "ns16550";
-			reg = <0x4500 0x100>;
-			clock-frequency = <0>;
-			interrupts = <42 2>;
-			interrupt-parent = <&mpic>;
-		};
-
-		serial1: serial@4600 {
-			cell-index = <1>;
-			device_type = "serial";
-			compatible = "ns16550";
-			reg = <0x4600 0x100>;
-			clock-frequency = <0>;
-			interrupts = <42 2>;
-			interrupt-parent = <&mpic>;
-		};
-
 		spi@7000 {
-			cell-index = <0>;
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "fsl,p1023-espi", "fsl,mpc8536-espi";
-			reg = <0x7000 0x1000>;
-			interrupts = <59 0x2>;
-			interrupt-parent = <&mpic>;
-			fsl,espi-num-chipselects = <4>;
-
 			fsl_dataflash@0 {
 				#address-cells = <1>;
 				#size-cells = <1>;
@@ -186,197 +79,14 @@
 			};
 		};
 
-		gpio: gpio-controller@f000 {
-			#gpio-cells = <2>;
-			compatible = "fsl,qoriq-gpio";
-			reg = <0xf000 0x100>;
-			interrupts = <47 0x2>;
-			interrupt-parent = <&mpic>;
-			gpio-controller;
-		};
-
-		L2: l2-cache-controller@20000 {
-			compatible = "fsl,p1023-l2-cache-controller";
-			reg = <0x20000 0x1000>;
-			cache-line-size = <32>;	// 32 bytes
-			cache-size = <0x40000>; // L2,256K
-			interrupt-parent = <&mpic>;
-			interrupts = <16 2>;
-		};
-
-		dma@21300 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			compatible = "fsl,eloplus-dma";
-			reg = <0x21300 0x4>;
-			ranges = <0x0 0x21100 0x200>;
-			cell-index = <0>;
-			dma-channel@0 {
-				compatible = "fsl,eloplus-dma-channel";
-				reg = <0x0 0x80>;
-				cell-index = <0>;
-				interrupt-parent = <&mpic>;
-				interrupts = <20 2>;
-			};
-			dma-channel@80 {
-				compatible = "fsl,eloplus-dma-channel";
-				reg = <0x80 0x80>;
-				cell-index = <1>;
-				interrupt-parent = <&mpic>;
-				interrupts = <21 2>;
-			};
-			dma-channel@100 {
-				compatible = "fsl,eloplus-dma-channel";
-				reg = <0x100 0x80>;
-				cell-index = <2>;
-				interrupt-parent = <&mpic>;
-				interrupts = <22 2>;
-			};
-			dma-channel@180 {
-				compatible = "fsl,eloplus-dma-channel";
-				reg = <0x180 0x80>;
-				cell-index = <3>;
-				interrupt-parent = <&mpic>;
-				interrupts = <23 2>;
-			};
-		};
-
 		usb@22000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "fsl-usb2-dr";
-			reg = <0x22000 0x1000>;
-			interrupt-parent = <&mpic>;
-			interrupts = <28 0x2>;
 			dr_mode = "host";
 			phy_type = "ulpi";
 		};
-
-		crypto: crypto@300000 {
-			compatible = "fsl,sec-v4.2", "fsl,sec-v4.0";
-			#address-cells = <1>;
-			#size-cells = <1>;
-			reg = <0x30000 0x10000>;
-			ranges = <0 0x30000 0x10000>;
-			interrupt-parent = <&mpic>;
-			interrupts = <58 2>;
-
-			sec_jr0: jr@1000 {
-				compatible = "fsl,sec-v4.2-job-ring",
-					     "fsl,sec-v4.0-job-ring";
-				reg = <0x1000 0x1000>;
-				interrupts = <45 2>;
-			};
-
-			sec_jr1: jr@2000 {
-				compatible = "fsl,sec-v4.2-job-ring",
-					     "fsl,sec-v4.0-job-ring";
-				reg = <0x2000 0x1000>;
-				interrupts = <45 2>;
-			};
-
-			sec_jr2: jr@3000 {
-				compatible = "fsl,sec-v4.2-job-ring",
-					     "fsl,sec-v4.0-job-ring";
-				reg = <0x3000 0x1000>;
-				interrupts = <57 2>;
-			};
-
-			sec_jr3: jr@4000 {
-				compatible = "fsl,sec-v4.2-job-ring",
-					     "fsl,sec-v4.0-job-ring";
-				reg = <0x4000 0x1000>;
-				interrupts = <57 2>;
-			};
-
-			rtic@6000 {
-				compatible = "fsl,sec-v4.2-rtic",
-					     "fsl,sec-v4.0-rtic";
-				#address-cells = <1>;
-				#size-cells = <1>;
-				reg = <0x6000 0x100>;
-				ranges = <0x0 0x6100 0xe00>;
-
-				rtic_a: rtic-a@0 {
-					compatible = "fsl,sec-v4.2-rtic-memory",
-						     "fsl,sec-v4.0-rtic-memory";
-					reg = <0x00 0x20 0x100 0x80>;
-				};
-
-				rtic_b: rtic-b@20 {
-					compatible = "fsl,sec-v4.2-rtic-memory",
-						     "fsl,sec-v4.0-rtic-memory";
-					reg = <0x20 0x20 0x200 0x80>;
-				};
-
-				rtic_c: rtic-c@40 {
-					compatible = "fsl,sec-v4.2-rtic-memory",
-						     "fsl,sec-v4.0-rtic-memory";
-					reg = <0x40 0x20 0x300 0x80>;
-				};
-
-				rtic_d: rtic-d@60 {
-					compatible = "fsl,sec-v4.2-rtic-memory",
-						     "fsl,sec-v4.0-rtic-memory";
-					reg = <0x60 0x20 0x500 0x80>;
-				};
-			};
-		};
-
-		power@e0070{
-			compatible = "fsl,mpc8536-pmc", "fsl,mpc8548-pmc",
-			             "fsl,p1022-pmc";
-			reg = <0xe0070 0x20>;
-			etsec1_clk: soc-clk@B0{
-				fsl,pmcdr-mask = <0x00000080>;
-			};
-			etsec2_clk: soc-clk@B1{
-				fsl,pmcdr-mask = <0x00000040>;
-			};
-			etsec3_clk: soc-clk@B2{
-				fsl,pmcdr-mask = <0x00000020>;
-			};
-		};
-
-		mpic: pic@40000 {
-			interrupt-controller;
-			#address-cells = <0>;
-			#interrupt-cells = <2>;
-			reg = <0x40000 0x40000>;
-			compatible = "chrp,open-pic";
-			device_type = "open-pic";
-		};
-
-		msi@41600 {
-			compatible = "fsl,p1023-msi", "fsl,mpic-msi";
-			reg = <0x41600 0x80>;
-			msi-available-ranges = <0 0x100>;
-			interrupts = <
-				0xe0 0
-				0xe1 0
-				0xe2 0
-				0xe3 0
-				0xe4 0
-				0xe5 0
-				0xe6 0
-				0xe7 0>;
-			interrupt-parent = <&mpic>;
-		};
-
-		global-utilities@e0000 {	//global utilities block
-			compatible = "fsl,p1023-guts";
-			reg = <0xe0000 0x1000>;
-			fsl,has-rstcr;
-		};
 	};
 
-	localbus@ff605000 {
-		#address-cells = <2>;
-		#size-cells = <1>;
-		compatible = "fsl,p1023-elbc", "fsl,elbc", "simple-bus";
+	lbc: localbus@ff605000 {
 		reg = <0 0xff605000 0 0x1000>;
-		interrupts = <19 2>;
-		interrupt-parent = <&mpic>;
 
 		/* NOR Flash, BCSR */
 		ranges = <0x0 0x0 0x0 0xee000000 0x02000000
@@ -428,34 +138,18 @@
 	};
 
 	pci0: pcie@ff60a000 {
-		compatible = "fsl,p1023-pcie", "fsl,qoriq-pcie-v2.2";
-		cell-index = <1>;
-		device_type = "pci";
-		#size-cells = <2>;
-		#address-cells = <3>;
 		reg = <0 0xff60a000 0 0x1000>;
-		bus-range = <0 255>;
 		ranges = <0x2000000 0x0 0xc0000000 0 0xc0000000 0x0 0x20000000
 			  0x1000000 0x0 0x00000000 0 0xffc20000 0x0 0x10000>;
-		clock-frequency = <33333333>;
-		interrupt-parent = <&mpic>;
-		interrupts = <16 2>;
 		pcie@0 {
-			reg = <0x0 0x0 0x0 0x0 0x0>;
-			#interrupt-cells = <1>;
-			#size-cells = <2>;
-			#address-cells = <3>;
-			device_type = "pci";
-			interrupt-parent = <&mpic>;
-			interrupts = <16 2>;
-			interrupt-map-mask = <0xf800 0 0 7>;
 			/* IRQ[0:3] are pulled up on board, set to active-low */
+			interrupt-map-mask = <0xf800 0 0 7>;
 			interrupt-map = <
 				/* IDSEL 0x0 */
-				0000 0 0 1 &mpic 0 1
-				0000 0 0 2 &mpic 1 1
-				0000 0 0 3 &mpic 2 1
-				0000 0 0 4 &mpic 3 1
+				0000 0 0 1 &mpic 0 1 0 0
+				0000 0 0 2 &mpic 1 1 0 0
+				0000 0 0 3 &mpic 2 1 0 0
+				0000 0 0 4 &mpic 3 1 0 0
 				>;
 			ranges = <0x2000000 0x0 0xc0000000
 				  0x2000000 0x0 0xc0000000
@@ -467,38 +161,22 @@
 		};
 	};
 
-	pci1: pcie@ff609000 {
-		compatible = "fsl,p1023-pcie", "fsl,qoriq-pcie-v2.2";
-		cell-index = <2>;
-		device_type = "pci";
-		#size-cells = <2>;
-		#address-cells = <3>;
+	board_pci1: pci1: pcie@ff609000 {
 		reg = <0 0xff609000 0 0x1000>;
-		bus-range = <0 255>;
 		ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000
 			  0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>;
-		clock-frequency = <33333333>;
-		interrupt-parent = <&mpic>;
-		interrupts = <16 2>;
 		pcie@0 {
-			reg = <0x0 0x0 0x0 0x0 0x0>;
-			#interrupt-cells = <1>;
-			#size-cells = <2>;
-			#address-cells = <3>;
-			device_type = "pci";
-			interrupt-parent = <&mpic>;
-			interrupts = <16 2>;
-			interrupt-map-mask = <0xf800 0 0 7>;
 			/*
 			 * IRQ[4:6] only for PCIe, set to active-high,
 			 * IRQ[7] is pulled up on board, set to active-low
 			 */
+			interrupt-map-mask = <0xf800 0 0 7>;
 			interrupt-map = <
 				/* IDSEL 0x0 */
-				0000 0 0 1 &mpic 4 2
-				0000 0 0 2 &mpic 5 2
-				0000 0 0 3 &mpic 6 2
-				0000 0 0 4 &mpic 7 1
+				0000 0 0 1 &mpic 4 2 0 0
+				0000 0 0 2 &mpic 5 2 0 0
+				0000 0 0 3 &mpic 6 2 0 0
+				0000 0 0 4 &mpic 7 1 0 0
 				>;
 			ranges = <0x2000000 0x0 0xa0000000
 				  0x2000000 0x0 0xa0000000
@@ -511,37 +189,21 @@
 	};
 
 	pci2: pcie@ff60b000 {
-		cell-index = <3>;
-		compatible = "fsl,p1023-pcie", "fsl,qoriq-pcie-v2.2";
-		device_type = "pci";
-		#size-cells = <2>;
-		#address-cells = <3>;
 		reg = <0 0xff60b000 0 0x1000>;
-		bus-range = <0 255>;
 		ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000
 			  0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>;
-		clock-frequency = <33333333>;
-		interrupt-parent = <&mpic>;
-		interrupts = <16 2>;
 		pcie@0 {
-			reg = <0x0 0x0 0x0 0x0 0x0>;
-			#interrupt-cells = <1>;
-			#size-cells = <2>;
-			#address-cells = <3>;
-			device_type = "pci";
-			interrupt-parent = <&mpic>;
-			interrupts = <16 2>;
-			interrupt-map-mask = <0xf800 0 0 7>;
 			/*
 			 * IRQ[8:10] are pulled up on board, set to active-low
 			 * IRQ[11] only for PCIe, set to active-high,
 			 */
+			interrupt-map-mask = <0xf800 0 0 7>;
 			interrupt-map = <
 				/* IDSEL 0x0 */
-				0000 0 0 1 &mpic 8 1
-				0000 0 0 2 &mpic 9 1
-				0000 0 0 3 &mpic 10 1
-				0000 0 0 4 &mpic 11 2
+				0000 0 0 1 &mpic 8 1 0 0
+				0000 0 0 2 &mpic 9 1 0 0
+				0000 0 0 3 &mpic 10 1 0 0
+				0000 0 0 4 &mpic 11 2 0 0
 				>;
 			ranges = <0x2000000 0x0 0x80000000
 				  0x2000000 0x0 0x80000000
@@ -553,3 +215,5 @@
 		};
 	};
 };
+
+/include/ "fsl/p1023si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p2020ds.dts b/arch/powerpc/boot/dts/p2020ds.dts
index 66f03d6..237310c 100644
--- a/arch/powerpc/boot/dts/p2020ds.dts
+++ b/arch/powerpc/boot/dts/p2020ds.dts
@@ -9,30 +9,17 @@
  * option) any later version.
  */
 
-/include/ "p2020si.dtsi"
+/include/ "fsl/p2020si-pre.dtsi"
 
 / {
 	model = "fsl,P2020DS";
 	compatible = "fsl,P2020DS";
 
-	aliases {
-		ethernet0 = &enet0;
-		ethernet1 = &enet1;
-		ethernet2 = &enet2;
-		serial0 = &serial0;
-		serial1 = &serial1;
-		pci0 = &pci0;
-		pci1 = &pci1;
-		pci2 = &pci2;
-	};
-
-
 	memory {
 		device_type = "memory";
 	};
 
-	localbus@ffe05000 {
-		compatible = "fsl,elbc", "simple-bus";
+	board_lbc: lbc: localbus@ffe05000 {
 		ranges = <0x0 0x0 0x0 0xe8000000 0x08000000
 			  0x1 0x0 0x0 0xe0000000 0x08000000
 			  0x2 0x0 0x0 0xffa00000 0x00040000
@@ -40,203 +27,18 @@
 			  0x4 0x0 0x0 0xffa40000 0x00040000
 			  0x5 0x0 0x0 0xffa80000 0x00040000
 			  0x6 0x0 0x0 0xffac0000 0x00040000>;
-
-		nor@0,0 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			compatible = "cfi-flash";
-			reg = <0x0 0x0 0x8000000>;
-			bank-width = <2>;
-			device-width = <1>;
-
-			ramdisk@0 {
-				reg = <0x0 0x03000000>;
-				read-only;
-			};
-
-			diagnostic@3000000 {
-				reg = <0x03000000 0x00e00000>;
-				read-only;
-			};
-
-			dink@3e00000 {
-				reg = <0x03e00000 0x00200000>;
-				read-only;
-			};
-
-			kernel@4000000 {
-				reg = <0x04000000 0x00400000>;
-				read-only;
-			};
-
-			jffs2@4400000 {
-				reg = <0x04400000 0x03b00000>;
-			};
-
-			dtb@7f00000 {
-				reg = <0x07f00000 0x00080000>;
-				read-only;
-			};
-
-			u-boot@7f80000 {
-				reg = <0x07f80000 0x00080000>;
-				read-only;
-			};
-		};
-
-		nand@2,0 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			compatible = "fsl,elbc-fcm-nand";
-			reg = <0x2 0x0 0x40000>;
-
-			u-boot@0 {
-				reg = <0x0 0x02000000>;
-				read-only;
-			};
-
-			jffs2@2000000 {
-				reg = <0x02000000 0x10000000>;
-			};
-
-			ramdisk@12000000 {
-				reg = <0x12000000 0x08000000>;
-				read-only;
-			};
-
-			kernel@1a000000 {
-				reg = <0x1a000000 0x04000000>;
-			};
-
-			dtb@1e000000 {
-				reg = <0x1e000000 0x01000000>;
-				read-only;
-			};
-
-			empty@1f000000 {
-				reg = <0x1f000000 0x21000000>;
-			};
-		};
-
-		board-control@3,0 {
-			compatible = "fsl,p2020ds-fpga", "fsl,fpga-ngpixis";
-			reg = <0x3 0x0 0x30>;
-		};
-
-		nand@4,0 {
-			compatible = "fsl,elbc-fcm-nand";
-			reg = <0x4 0x0 0x40000>;
-		};
-
-		nand@5,0 {
-			compatible = "fsl,elbc-fcm-nand";
-			reg = <0x5 0x0 0x40000>;
-		};
-
-		nand@6,0 {
-			compatible = "fsl,elbc-fcm-nand";
-			reg = <0x6 0x0 0x40000>;
-		};
+		reg = <0 0xffe05000 0 0x1000>;
 	};
 
-	soc@ffe00000 {
-
-		usb@22000 {
-			phy_type = "ulpi";
-		};
-
-		mdio@24520 {
-			phy0: ethernet-phy@0 {
-				interrupt-parent = <&mpic>;
-				interrupts = <3 1>;
-				reg = <0x0>;
-			};
-			phy1: ethernet-phy@1 {
-				interrupt-parent = <&mpic>;
-				interrupts = <3 1>;
-				reg = <0x1>;
-			};
-			phy2: ethernet-phy@2 {
-				interrupt-parent = <&mpic>;
-				interrupts = <3 1>;
-				reg = <0x2>;
-			};
-			tbi0: tbi-phy@11 {
-				reg = <0x11>;
-				device_type = "tbi-phy";
-			};
-
-		};
-
-		mdio@25520 {
-			tbi1: tbi-phy@11 {
-				reg = <0x11>;
-				device_type = "tbi-phy";
-			};
-		};
-
-		mdio@26520 {
-			tbi2: tbi-phy@11 {
-				reg = <0x11>;
-				device_type = "tbi-phy";
-			};
-
-		};
-
-		ptp_clock@24E00 {
-			compatible = "fsl,etsec-ptp";
-			reg = <0x24E00 0xB0>;
-			interrupts = <68 2 69 2 70 2>;
-			interrupt-parent = < &mpic >;
-			fsl,tclk-period = <5>;
-			fsl,tmr-prsc = <200>;
-			fsl,tmr-add = <0xCCCCCCCD>;
-			fsl,tmr-fiper1 = <0x3B9AC9FB>;
-			fsl,tmr-fiper2 = <0x0001869B>;
-			fsl,max-adj = <249999999>;
-		};
-
-		enet0: ethernet@24000 {
-			tbi-handle = <&tbi0>;
-			phy-handle = <&phy0>;
-			phy-connection-type = "rgmii-id";
-		};
-
-		enet1: ethernet@25000 {
-			tbi-handle = <&tbi1>;
-			phy-handle = <&phy1>;
-			phy-connection-type = "rgmii-id";
-
-		};
-
-		enet2: ethernet@26000 {
-			tbi-handle = <&tbi2>;
-			phy-handle = <&phy2>;
-			phy-connection-type = "rgmii-id";
-		};
-
-
-		msi@41600 {
-			compatible = "fsl,mpic-msi";
-		};
+	board_soc: soc: soc@ffe00000 {
+		ranges = <0x0 0x0 0xffe00000 0x100000>;
 	};
 
-	pci0: pcie@ffe08000 {
+	pci2: pcie@ffe08000 {
 		ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000
 			  0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>;
-		interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
-		interrupt-map = <
-			/* IDSEL 0x0 */
-			0000 0x0 0x0 0x1 &mpic 0x8 0x1
-			0000 0x0 0x0 0x2 &mpic 0x9 0x1
-			0000 0x0 0x0 0x3 &mpic 0xa 0x1
-			0000 0x0 0x0 0x4 &mpic 0xb 0x1
-			>;
+		reg = <0 0xffe08000 0 0x1000>;
 		pcie@0 {
-			reg = <0x0 0x0 0x0 0x0 0x0>;
-			#size-cells = <2>;
-			#address-cells = <3>;
-			device_type = "pci";
 			ranges = <0x2000000 0x0 0x80000000
 				  0x2000000 0x0 0x80000000
 				  0x0 0x20000000
@@ -247,61 +49,11 @@
 		};
 	};
 
-	pci1: pcie@ffe09000 {
+	board_pci1: pci1: pcie@ffe09000 {
 		ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000
 			  0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>;
-		interrupt-map-mask = <0xff00 0x0 0x0 0x7>;
-		interrupt-map = <
-
-			// IDSEL 0x11 func 0 - PCI slot 1
-			0x8800 0x0 0x0 0x1 &i8259 0x9 0x2
-			0x8800 0x0 0x0 0x2 &i8259 0xa 0x2
-
-			// IDSEL 0x11 func 1 - PCI slot 1
-			0x8900 0x0 0x0 0x1 &i8259 0x9 0x2
-			0x8900 0x0 0x0 0x2 &i8259 0xa 0x2
-
-			// IDSEL 0x11 func 2 - PCI slot 1
-			0x8a00 0x0 0x0 0x1 &i8259 0x9 0x2
-			0x8a00 0x0 0x0 0x2 &i8259 0xa 0x2
-
-			// IDSEL 0x11 func 3 - PCI slot 1
-			0x8b00 0x0 0x0 0x1 &i8259 0x9 0x2
-			0x8b00 0x0 0x0 0x2 &i8259 0xa 0x2
-
-			// IDSEL 0x11 func 4 - PCI slot 1
-			0x8c00 0x0 0x0 0x1 &i8259 0x9 0x2
-			0x8c00 0x0 0x0 0x2 &i8259 0xa 0x2
-
-			// IDSEL 0x11 func 5 - PCI slot 1
-			0x8d00 0x0 0x0 0x1 &i8259 0x9 0x2
-			0x8d00 0x0 0x0 0x2 &i8259 0xa 0x2
-
-			// IDSEL 0x11 func 6 - PCI slot 1
-			0x8e00 0x0 0x0 0x1 &i8259 0x9 0x2
-			0x8e00 0x0 0x0 0x2 &i8259 0xa 0x2
-
-			// IDSEL 0x11 func 7 - PCI slot 1
-			0x8f00 0x0 0x0 0x1 &i8259 0x9 0x2
-			0x8f00 0x0 0x0 0x2 &i8259 0xa 0x2
-
-			// IDSEL 0x1d  Audio
-			0xe800 0x0 0x0 0x1 &i8259 0x6 0x2
-
-			// IDSEL 0x1e Legacy
-			0xf000 0x0 0x0 0x1 &i8259 0x7 0x2
-			0xf100 0x0 0x0 0x1 &i8259 0x7 0x2
-
-			// IDSEL 0x1f IDE/SATA
-			0xf800 0x0 0x0 0x1 &i8259 0xe 0x2
-			0xf900 0x0 0x0 0x1 &i8259 0x5 0x2
-			>;
-
+		reg = <0 0xffe09000 0 0x1000>;
 		pcie@0 {
-			reg = <0x0 0x0 0x0 0x0 0x0>;
-			#size-cells = <2>;
-			#address-cells = <3>;
-			device_type = "pci";
 			ranges = <0x2000000 0x0 0xa0000000
 				  0x2000000 0x0 0xa0000000
 				  0x0 0x20000000
@@ -309,89 +61,14 @@
 				  0x1000000 0x0 0x0
 				  0x1000000 0x0 0x0
 				  0x0 0x10000>;
-			uli1575@0 {
-				reg = <0x0 0x0 0x0 0x0 0x0>;
-				#size-cells = <2>;
-				#address-cells = <3>;
-				ranges = <0x2000000 0x0 0xa0000000
-					  0x2000000 0x0 0xa0000000
-					  0x0 0x20000000
-
-					  0x1000000 0x0 0x0
-					  0x1000000 0x0 0x0
-					  0x0 0x10000>;
-				isa@1e {
-					device_type = "isa";
-					#interrupt-cells = <2>;
-					#size-cells = <1>;
-					#address-cells = <2>;
-					reg = <0xf000 0x0 0x0 0x0 0x0>;
-					ranges = <0x1 0x0 0x1000000 0x0 0x0
-						  0x1000>;
-					interrupt-parent = <&i8259>;
-
-					i8259: interrupt-controller@20 {
-						reg = <0x1 0x20 0x2
-						       0x1 0xa0 0x2
-						       0x1 0x4d0 0x2>;
-						interrupt-controller;
-						device_type = "interrupt-controller";
-						#address-cells = <0>;
-						#interrupt-cells = <2>;
-						compatible = "chrp,iic";
-						interrupts = <4 1>;
-						interrupt-parent = <&mpic>;
-					};
-
-					i8042@60 {
-						#size-cells = <0>;
-						#address-cells = <1>;
-						reg = <0x1 0x60 0x1 0x1 0x64 0x1>;
-						interrupts = <1 3 12 3>;
-						interrupt-parent =
-							<&i8259>;
-
-						keyboard@0 {
-							reg = <0x0>;
-							compatible = "pnpPNP,303";
-						};
-
-						mouse@1 {
-							reg = <0x1>;
-							compatible = "pnpPNP,f03";
-						};
-					};
-
-					rtc@70 {
-						compatible = "pnpPNP,b00";
-						reg = <0x1 0x70 0x2>;
-					};
-
-					gpio@400 {
-						reg = <0x1 0x400 0x80>;
-					};
-				};
-			};
 		};
-
 	};
 
-	pci2: pcie@ffe0a000 {
+	pci0: pcie@ffe0a000 {
 		ranges = <0x2000000 0x0 0xc0000000 0 0xc0000000 0x0 0x20000000
 			  0x1000000 0x0 0x00000000 0 0xffc20000 0x0 0x10000>;
-		interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
-		interrupt-map = <
-			/* IDSEL 0x0 */
-			0000 0x0 0x0 0x1 &mpic 0x0 0x1
-			0000 0x0 0x0 0x2 &mpic 0x1 0x1
-			0000 0x0 0x0 0x3 &mpic 0x2 0x1
-			0000 0x0 0x0 0x4 &mpic 0x3 0x1
-			>;
+		reg = <0 0xffe0a000 0 0x1000>;
 		pcie@0 {
-			reg = <0x0 0x0 0x0 0x0 0x0>;
-			#size-cells = <2>;
-			#address-cells = <3>;
-			device_type = "pci";
 			ranges = <0x2000000 0x0 0xc0000000
 				  0x2000000 0x0 0xc0000000
 				  0x0 0x20000000
@@ -402,3 +79,11 @@
 		};
 	};
 };
+
+/*
+ * p2020ds.dtsi must be last to ensure board_pci0 overrides pci0 settings
+ * for interrupt-map & interrupt-map-mask
+ */
+
+/include/ "fsl/p2020si-post.dtsi"
+/include/ "p2020ds.dtsi"
diff --git a/arch/powerpc/boot/dts/p2020ds.dtsi b/arch/powerpc/boot/dts/p2020ds.dtsi
new file mode 100644
index 0000000..c1cf6ce
--- /dev/null
+++ b/arch/powerpc/boot/dts/p2020ds.dtsi
@@ -0,0 +1,316 @@
+/*
+ * P2020DS Device Tree Source stub (no addresses or top-level ranges)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&board_lbc {
+	nor@0,0 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "cfi-flash";
+		reg = <0x0 0x0 0x8000000>;
+		bank-width = <2>;
+		device-width = <1>;
+
+		ramdisk@0 {
+			reg = <0x0 0x03000000>;
+			read-only;
+		};
+
+		diagnostic@3000000 {
+			reg = <0x03000000 0x00e00000>;
+			read-only;
+		};
+
+		dink@3e00000 {
+			reg = <0x03e00000 0x00200000>;
+			read-only;
+		};
+
+		kernel@4000000 {
+			reg = <0x04000000 0x00400000>;
+			read-only;
+		};
+
+		jffs2@4400000 {
+			reg = <0x04400000 0x03b00000>;
+		};
+
+		dtb@7f00000 {
+			reg = <0x07f00000 0x00080000>;
+			read-only;
+		};
+
+		u-boot@7f80000 {
+			reg = <0x07f80000 0x00080000>;
+			read-only;
+		};
+	};
+
+	nand@2,0 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "fsl,elbc-fcm-nand";
+		reg = <0x2 0x0 0x40000>;
+
+		u-boot@0 {
+			reg = <0x0 0x02000000>;
+			read-only;
+		};
+
+		jffs2@2000000 {
+			reg = <0x02000000 0x10000000>;
+		};
+
+		ramdisk@12000000 {
+			reg = <0x12000000 0x08000000>;
+			read-only;
+		};
+
+		kernel@1a000000 {
+			reg = <0x1a000000 0x04000000>;
+		};
+
+		dtb@1e000000 {
+			reg = <0x1e000000 0x01000000>;
+			read-only;
+		};
+
+		empty@1f000000 {
+			reg = <0x1f000000 0x21000000>;
+		};
+	};
+
+	board-control@3,0 {
+		compatible = "fsl,p2020ds-fpga", "fsl,fpga-ngpixis";
+		reg = <0x3 0x0 0x30>;
+	};
+
+	nand@4,0 {
+		compatible = "fsl,elbc-fcm-nand";
+		reg = <0x4 0x0 0x40000>;
+	};
+
+	nand@5,0 {
+		compatible = "fsl,elbc-fcm-nand";
+		reg = <0x5 0x0 0x40000>;
+	};
+
+	nand@6,0 {
+		compatible = "fsl,elbc-fcm-nand";
+		reg = <0x6 0x0 0x40000>;
+	};
+};
+
+&board_soc {
+	usb@22000 {
+		phy_type = "ulpi";
+	};
+
+	mdio@24520 {
+		phy0: ethernet-phy@0 {
+			interrupts = <3 1 0 0>;
+			reg = <0x0>;
+		};
+		phy1: ethernet-phy@1 {
+			interrupts = <3 1 0 0>;
+			reg = <0x1>;
+		};
+		phy2: ethernet-phy@2 {
+			interrupts = <3 1 0 0>;
+			reg = <0x2>;
+		};
+		tbi0: tbi-phy@11 {
+			reg = <0x11>;
+			device_type = "tbi-phy";
+		};
+
+	};
+
+	mdio@25520 {
+		tbi1: tbi-phy@11 {
+			reg = <0x11>;
+			device_type = "tbi-phy";
+		};
+	};
+
+	mdio@26520 {
+		tbi2: tbi-phy@11 {
+			reg = <0x11>;
+			device_type = "tbi-phy";
+		};
+
+	};
+
+	ptp_clock@24e00 {
+		fsl,tclk-period = <5>;
+		fsl,tmr-prsc = <200>;
+		fsl,tmr-add = <0xCCCCCCCD>;
+		fsl,tmr-fiper1 = <0x3B9AC9FB>;
+		fsl,tmr-fiper2 = <0x0001869B>;
+		fsl,max-adj = <249999999>;
+	};
+
+	enet0: ethernet@24000 {
+		tbi-handle = <&tbi0>;
+		phy-handle = <&phy0>;
+		phy-connection-type = "rgmii-id";
+	};
+
+	enet1: ethernet@25000 {
+		tbi-handle = <&tbi1>;
+		phy-handle = <&phy1>;
+		phy-connection-type = "rgmii-id";
+
+	};
+
+	enet2: ethernet@26000 {
+		tbi-handle = <&tbi2>;
+		phy-handle = <&phy2>;
+		phy-connection-type = "rgmii-id";
+	};
+};
+
+&board_pci1 {
+	pcie@0 {
+		interrupt-map-mask = <0xff00 0x0 0x0 0x7>;
+		interrupt-map = <
+
+			// IDSEL 0x11 func 0 - PCI slot 1
+			0x8800 0x0 0x0 0x1 &i8259 0x9 0x2
+			0x8800 0x0 0x0 0x2 &i8259 0xa 0x2
+
+			// IDSEL 0x11 func 1 - PCI slot 1
+			0x8900 0x0 0x0 0x1 &i8259 0x9 0x2
+			0x8900 0x0 0x0 0x2 &i8259 0xa 0x2
+
+			// IDSEL 0x11 func 2 - PCI slot 1
+			0x8a00 0x0 0x0 0x1 &i8259 0x9 0x2
+			0x8a00 0x0 0x0 0x2 &i8259 0xa 0x2
+
+			// IDSEL 0x11 func 3 - PCI slot 1
+			0x8b00 0x0 0x0 0x1 &i8259 0x9 0x2
+			0x8b00 0x0 0x0 0x2 &i8259 0xa 0x2
+
+			// IDSEL 0x11 func 4 - PCI slot 1
+			0x8c00 0x0 0x0 0x1 &i8259 0x9 0x2
+			0x8c00 0x0 0x0 0x2 &i8259 0xa 0x2
+
+			// IDSEL 0x11 func 5 - PCI slot 1
+			0x8d00 0x0 0x0 0x1 &i8259 0x9 0x2
+			0x8d00 0x0 0x0 0x2 &i8259 0xa 0x2
+
+			// IDSEL 0x11 func 6 - PCI slot 1
+			0x8e00 0x0 0x0 0x1 &i8259 0x9 0x2
+			0x8e00 0x0 0x0 0x2 &i8259 0xa 0x2
+
+			// IDSEL 0x11 func 7 - PCI slot 1
+			0x8f00 0x0 0x0 0x1 &i8259 0x9 0x2
+			0x8f00 0x0 0x0 0x2 &i8259 0xa 0x2
+
+			// IDSEL 0x1d  Audio
+			0xe800 0x0 0x0 0x1 &i8259 0x6 0x2
+
+			// IDSEL 0x1e Legacy
+			0xf000 0x0 0x0 0x1 &i8259 0x7 0x2
+			0xf100 0x0 0x0 0x1 &i8259 0x7 0x2
+
+			// IDSEL 0x1f IDE/SATA
+			0xf800 0x0 0x0 0x1 &i8259 0xe 0x2
+			0xf900 0x0 0x0 0x1 &i8259 0x5 0x2
+			>;
+
+		uli1575@0 {
+			reg = <0x0 0x0 0x0 0x0 0x0>;
+			#size-cells = <2>;
+			#address-cells = <3>;
+			ranges = <0x2000000 0x0 0xa0000000
+				  0x2000000 0x0 0xa0000000
+				  0x0 0x20000000
+
+				  0x1000000 0x0 0x0
+				  0x1000000 0x0 0x0
+				  0x0 0x10000>;
+			isa@1e {
+				device_type = "isa";
+				#interrupt-cells = <2>;
+				#size-cells = <1>;
+				#address-cells = <2>;
+				reg = <0xf000 0x0 0x0 0x0 0x0>;
+				ranges = <0x1 0x0 0x1000000 0x0 0x0
+					  0x1000>;
+				interrupt-parent = <&i8259>;
+
+				i8259: interrupt-controller@20 {
+					reg = <0x1 0x20 0x2
+					       0x1 0xa0 0x2
+					       0x1 0x4d0 0x2>;
+					interrupt-controller;
+					device_type = "interrupt-controller";
+					#address-cells = <0>;
+					#interrupt-cells = <2>;
+					compatible = "chrp,iic";
+					interrupts = <4 1 0 0>;
+					interrupt-parent = <&mpic>;
+				};
+
+				i8042@60 {
+					#size-cells = <0>;
+					#address-cells = <1>;
+					reg = <0x1 0x60 0x1 0x1 0x64 0x1>;
+					interrupts = <1 3 12 3>;
+					interrupt-parent =
+						<&i8259>;
+
+					keyboard@0 {
+						reg = <0x0>;
+						compatible = "pnpPNP,303";
+					};
+
+					mouse@1 {
+						reg = <0x1>;
+						compatible = "pnpPNP,f03";
+					};
+				};
+
+				rtc@70 {
+					compatible = "pnpPNP,b00";
+					reg = <0x1 0x70 0x2>;
+				};
+
+				gpio@400 {
+					reg = <0x1 0x400 0x80>;
+				};
+			};
+		};
+	};
+};
diff --git a/arch/powerpc/boot/dts/p2020rdb.dts b/arch/powerpc/boot/dts/p2020rdb.dts
index 1d7a05f..26759a5 100644
--- a/arch/powerpc/boot/dts/p2020rdb.dts
+++ b/arch/powerpc/boot/dts/p2020rdb.dts
@@ -9,7 +9,7 @@
  * option) any later version.
  */
 
-/include/ "p2020si.dtsi"
+/include/ "fsl/p2020si-pre.dtsi"
 
 / {
 	model = "fsl,P2020RDB";
@@ -29,7 +29,8 @@
 		device_type = "memory";
 	};
 
-	localbus@ffe05000 {
+	lbc: localbus@ffe05000 {
+		reg = <0 0xffe05000 0 0x1000>;
 
 		/* NOR and NAND Flashes */
 		ranges = <0x0 0x0 0x0 0xef000000 0x01000000
@@ -140,7 +141,9 @@
 
 	};
 
-	soc@ffe00000 {
+	soc: soc@ffe00000 {
+		ranges = <0x0 0x0 0xffe00000 0x100000>;
+
 		i2c@3000 {
 			rtc@68 {
 				compatible = "dallas,ds1339";
@@ -148,17 +151,13 @@
 			};
 		};
 
-	spi@7000 {
-
-		fsl_m25p80@0 {
+		spi@7000 {
+			flash@0 {
 				#address-cells = <1>;
 				#size-cells = <1>;
-				compatible = "fsl,espi-flash";
+				compatible = "spansion,s25sl12801";
 				reg = <0>;
-				linux,modalias = "fsl_m25p80";
-				modal = "s25sl128b";
 				spi-max-frequency = <50000000>;
-				mode = <0>;
 
 				partition@0 {
 					/* 512KB for u-boot Bootloader Image */
@@ -202,15 +201,17 @@
 
 		mdio@24520 {
 			phy0: ethernet-phy@0 {
-				interrupt-parent = <&mpic>;
-				interrupts = <3 1>;
+				interrupts = <3 1 0 0>;
 				reg = <0x0>;
-				};
+			};
 			phy1: ethernet-phy@1 {
-				interrupt-parent = <&mpic>;
-				interrupts = <3 1>;
+				interrupts = <3 1 0 0>;
 				reg = <0x1>;
-				};
+			};
+			tbi-phy@2 {
+				device_type = "tbi-phy";
+				reg = <0x2>;
+			};
 		};
 
 		mdio@25520 {
@@ -224,11 +225,7 @@
 			status = "disabled";
 		};
 
-		ptp_clock@24E00 {
-			compatible = "fsl,etsec-ptp";
-			reg = <0x24E00 0xB0>;
-			interrupts = <68 2 69 2 70 2>;
-			interrupt-parent = < &mpic >;
+		ptp_clock@24e00 {
 			fsl,tclk-period = <5>;
 			fsl,tmr-prsc = <200>;
 			fsl,tmr-add = <0xCCCCCCCD>;
@@ -252,29 +249,18 @@
 			phy-handle = <&phy1>;
 			phy-connection-type = "rgmii-id";
 		};
-
 	};
 
 	pci0: pcie@ffe08000 {
+		reg = <0 0xffe08000 0 0x1000>;
 		status = "disabled";
 	};
 
 	pci1: pcie@ffe09000 {
+		reg = <0 0xffe09000 0 0x1000>;
 		ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000
 			  0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>;
-		interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
-		interrupt-map = <
-			/* IDSEL 0x0 */
-			0000 0x0 0x0 0x1 &mpic 0x4 0x1
-			0000 0x0 0x0 0x2 &mpic 0x5 0x1
-			0000 0x0 0x0 0x3 &mpic 0x6 0x1
-			0000 0x0 0x0 0x4 &mpic 0x7 0x1
-			>;
-			pcie@0 {
-			reg = <0x0 0x0 0x0 0x0 0x0>;
-			#size-cells = <2>;
-			#address-cells = <3>;
-			device_type = "pci";
+		pcie@0 {
 			ranges = <0x2000000 0x0 0xa0000000
 				  0x2000000 0x0 0xa0000000
 				  0x0 0x20000000
@@ -286,21 +272,10 @@
 	};
 
 	pci2: pcie@ffe0a000 {
+		reg = <0 0xffe0a000 0 0x1000>;
 		ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000
 			  0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>;
-		interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
-		interrupt-map = <
-			/* IDSEL 0x0 */
-			0000 0x0 0x0 0x1 &mpic 0x0 0x1
-			0000 0x0 0x0 0x2 &mpic 0x1 0x1
-			0000 0x0 0x0 0x3 &mpic 0x2 0x1
-			0000 0x0 0x0 0x4 &mpic 0x3 0x1
-			>;
 		pcie@0 {
-			reg = <0x0 0x0 0x0 0x0 0x0>;
-			#size-cells = <2>;
-			#address-cells = <3>;
-			device_type = "pci";
 			ranges = <0x2000000 0x0 0x80000000
 				  0x2000000 0x0 0x80000000
 				  0x0 0x20000000
@@ -311,3 +286,5 @@
 		};
 	};
 };
+
+/include/ "fsl/p2020si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p2020rdb_camp_core0.dts b/arch/powerpc/boot/dts/p2020rdb_camp_core0.dts
index fc8ddddf..66aac86 100644
--- a/arch/powerpc/boot/dts/p2020rdb_camp_core0.dts
+++ b/arch/powerpc/boot/dts/p2020rdb_camp_core0.dts
@@ -14,28 +14,16 @@
  * option) any later version.
  */
 
-/include/ "p2020si.dtsi"
+/include/ "p2020rdb.dts"
 
 / {
 	model = "fsl,P2020RDB";
 	compatible = "fsl,P2020RDB", "fsl,MPC85XXRDB-CAMP";
 
-	aliases {
-		ethernet1 = &enet1;
-		ethernet2 = &enet2;
-		serial0 = &serial0;
-		pci0 = &pci0;
-	};
-
 	cpus {
 		PowerPC,P2020@1 {
-		status = "disabled";
+			status = "disabled";
 		};
-
-	};
-
-	memory {
-		device_type = "memory";
 	};
 
 	localbus@ffe05000 {
@@ -43,115 +31,18 @@
 	};
 
 	soc@ffe00000 {
-		i2c@3000 {
-			rtc@68 {
-				compatible = "dallas,ds1339";
-				reg = <0x68>;
-			};
-		};
-
 		serial1: serial@4600 {
 			status = "disabled";
 		};
 
-		spi@7000 {
-
-			fsl_m25p80@0 {
-				#address-cells = <1>;
-				#size-cells = <1>;
-				compatible = "fsl,espi-flash";
-				reg = <0>;
-				linux,modalias = "fsl_m25p80";
-				modal = "s25sl128b";
-				spi-max-frequency = <50000000>;
-				mode = <0>;
-
-				partition@0 {
-					/* 512KB for u-boot Bootloader Image */
-					reg = <0x0 0x00080000>;
-					label = "SPI (RO) U-Boot Image";
-					read-only;
-				};
-
-				partition@80000 {
-					/* 512KB for DTB Image */
-					reg = <0x00080000 0x00080000>;
-					label = "SPI (RO) DTB Image";
-					read-only;
-				};
-
-				partition@100000 {
-					/* 4MB for Linux Kernel Image */
-					reg = <0x00100000 0x00400000>;
-					label = "SPI (RO) Linux Kernel Image";
-					read-only;
-				};
-
-				partition@500000 {
-					/* 4MB for Compressed RFS Image */
-					reg = <0x00500000 0x00400000>;
-					label = "SPI (RO) Compressed RFS Image";
-					read-only;
-				};
-
-				partition@900000 {
-					/* 7MB for JFFS2 based RFS */
-					reg = <0x00900000 0x00700000>;
-					label = "SPI (RW) JFFS2 RFS";
-				};
-			};
-		};
-
 		dma@c300 {
 			status = "disabled";
 		};
 
-		usb@22000 {
-			phy_type = "ulpi";
-		};
-
-		mdio@24520 {
-
-			phy0: ethernet-phy@0 {
-				interrupt-parent = <&mpic>;
-				interrupts = <3 1>;
-				reg = <0x0>;
-			};
-			phy1: ethernet-phy@1 {
-				interrupt-parent = <&mpic>;
-				interrupts = <3 1>;
-				reg = <0x1>;
-			};
-		};
-
-		mdio@25520 {
-			tbi0: tbi-phy@11 {
-				reg = <0x11>;
-				device_type = "tbi-phy";
-			};
-		};
-
-		mdio@26520 {
-			status = "disabled";
-		};
-
 		enet0: ethernet@24000 {
 			status = "disabled";
 		};
 
-		enet1: ethernet@25000 {
-			tbi-handle = <&tbi0>;
-			phy-handle = <&phy0>;
-			phy-connection-type = "sgmii";
-
-		};
-
-		enet2: ethernet@26000 {
-			phy-handle = <&phy1>;
-			phy-connection-type = "rgmii-id";
-		};
-
-
 		mpic: pic@40000 {
 			protected-sources = <
 			42 76 77 78 79 /* serial1 , dma2 */
@@ -164,40 +55,12 @@
 		msi@41600 {
 			status = "disabled";
 		};
-
-
 	};
 
 	pci0: pcie@ffe08000 {
 		status = "disabled";
 	};
 
-	pci1: pcie@ffe09000 {
-		ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000
-			  0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>;
-		interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
-		interrupt-map = <
-			/* IDSEL 0x0 */
-			0000 0x0 0x0 0x1 &mpic 0x4 0x1
-			0000 0x0 0x0 0x2 &mpic 0x5 0x1
-			0000 0x0 0x0 0x3 &mpic 0x6 0x1
-			0000 0x0 0x0 0x4 &mpic 0x7 0x1
-			>;
-		pcie@0 {
-			reg = <0x0 0x0 0x0 0x0 0x0>;
-			#size-cells = <2>;
-			#address-cells = <3>;
-			device_type = "pci";
-			ranges = <0x2000000 0x0 0xa0000000
-				  0x2000000 0x0 0xa0000000
-				  0x0 0x20000000
-
-				  0x1000000 0x0 0x0
-				  0x1000000 0x0 0x0
-				  0x0 0x100000>;
-		};
-	};
-
 	pci2: pcie@ffe0a000 {
 		status = "disabled";
 	};
diff --git a/arch/powerpc/boot/dts/p2020rdb_camp_core1.dts b/arch/powerpc/boot/dts/p2020rdb_camp_core1.dts
index 261c34b..9bd8ef49 100644
--- a/arch/powerpc/boot/dts/p2020rdb_camp_core1.dts
+++ b/arch/powerpc/boot/dts/p2020rdb_camp_core1.dts
@@ -15,28 +15,18 @@
  * option) any later version.
  */
 
-/include/ "p2020si.dtsi"
+/include/ "p2020rdb.dts"
 
 / {
 	model = "fsl,P2020RDB";
 	compatible = "fsl,P2020RDB", "fsl,MPC85XXRDB-CAMP";
 
-	aliases {
-		ethernet0 = &enet0;
-		serial0 = &serial1;
-		pci1 = &pci1;
-	};
-
 	cpus {
 		PowerPC,P2020@0 {
-		status = "disabled";
+			status = "disabled";
 		};
 	};
 
-	memory {
-		device_type = "memory";
-	};
-
 	localbus@ffe05000 {
 		status = "disabled";
 	};
@@ -70,55 +60,10 @@
 			status = "disabled";
 		};
 
-		dma@c300 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			compatible = "fsl,eloplus-dma";
-			reg = <0xc300 0x4>;
-			ranges = <0x0 0xc100 0x200>;
-			cell-index = <1>;
-			dma-channel@0 {
-				compatible = "fsl,eloplus-dma-channel";
-				reg = <0x0 0x80>;
-				cell-index = <0>;
-				interrupt-parent = <&mpic>;
-				interrupts = <76 2>;
-			};
-			dma-channel@80 {
-				compatible = "fsl,eloplus-dma-channel";
-				reg = <0x80 0x80>;
-				cell-index = <1>;
-				interrupt-parent = <&mpic>;
-				interrupts = <77 2>;
-			};
-			dma-channel@100 {
-				compatible = "fsl,eloplus-dma-channel";
-				reg = <0x100 0x80>;
-				cell-index = <2>;
-				interrupt-parent = <&mpic>;
-				interrupts = <78 2>;
-			};
-			dma-channel@180 {
-				compatible = "fsl,eloplus-dma-channel";
-				reg = <0x180 0x80>;
-				cell-index = <3>;
-				interrupt-parent = <&mpic>;
-				interrupts = <79 2>;
-			};
-		};
-
 		gpio: gpio-controller@f000 {
 			status = "disabled";
 		};
 
-		L2: l2-cache-controller@20000 {
-			compatible = "fsl,p2020-l2-cache-controller";
-			reg = <0x20000 0x1000>;
-			cache-line-size = <32>;	// 32 bytes
-			cache-size = <0x80000>; // L2,512K
-			interrupt-parent = <&mpic>;
-		};
-
 		dma@21300 {
 			status = "disabled";
 		};
@@ -139,12 +84,6 @@
 			status = "disabled";
 		};
 
-		enet0: ethernet@24000 {
-			fixed-link = <1 1 1000 0 0>;
-			phy-connection-type = "rgmii-id";
-
-		};
-
 		enet1: ethernet@25000 {
 			status = "disabled";
 		};
@@ -170,22 +109,6 @@
 			>;
 		};
 
-		msi@41600 {
-			compatible = "fsl,p2020-msi", "fsl,mpic-msi";
-			reg = <0x41600 0x80>;
-			msi-available-ranges = <0 0x100>;
-			interrupts = <
-				0xe0 0
-				0xe1 0
-				0xe2 0
-				0xe3 0
-				0xe4 0
-				0xe5 0
-				0xe6 0
-				0xe7 0>;
-			interrupt-parent = <&mpic>;
-		};
-
 		global-utilities@e0000 {	//global utilities block
 			status = "disabled";
 		};
@@ -199,30 +122,4 @@
 	pci1: pcie@ffe09000 {
 		status = "disabled";
 	};
-
-	pci2: pcie@ffe0a000 {
-		ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000
-			  0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>;
-		interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
-		interrupt-map = <
-			/* IDSEL 0x0 */
-			0000 0x0 0x0 0x1 &mpic 0x0 0x1
-			0000 0x0 0x0 0x2 &mpic 0x1 0x1
-			0000 0x0 0x0 0x3 &mpic 0x2 0x1
-			0000 0x0 0x0 0x4 &mpic 0x3 0x1
-			>;
-		pcie@0 {
-			reg = <0x0 0x0 0x0 0x0 0x0>;
-			#size-cells = <2>;
-			#address-cells = <3>;
-			device_type = "pci";
-			ranges = <0x2000000 0x0 0x80000000
-				  0x2000000 0x0 0x80000000
-				  0x0 0x20000000
-
-				  0x1000000 0x0 0x0
-				  0x1000000 0x0 0x0
-				  0x0 0x100000>;
-		};
-	};
 };
diff --git a/arch/powerpc/boot/dts/p2020si.dtsi b/arch/powerpc/boot/dts/p2020si.dtsi
deleted file mode 100644
index 6def17f..0000000
--- a/arch/powerpc/boot/dts/p2020si.dtsi
+++ /dev/null
@@ -1,382 +0,0 @@
-/*
- * P2020 Device Tree Source
- *
- * Copyright 2011 Freescale Semiconductor Inc.
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-
-/dts-v1/;
-/ {
-	compatible = "fsl,P2020";
-	#address-cells = <2>;
-	#size-cells = <2>;
-
-	cpus {
-		#address-cells = <1>;
-		#size-cells = <0>;
-
-		PowerPC,P2020@0 {
-			device_type = "cpu";
-			reg = <0x0>;
-			next-level-cache = <&L2>;
-		};
-
-		PowerPC,P2020@1 {
-			device_type = "cpu";
-			reg = <0x1>;
-			next-level-cache = <&L2>;
-		};
-	};
-
-	localbus@ffe05000 {
-		#address-cells = <2>;
-		#size-cells = <1>;
-		compatible = "fsl,p2020-elbc", "fsl,elbc", "simple-bus";
-		reg = <0 0xffe05000 0 0x1000>;
-		interrupts = <19 2>;
-		interrupt-parent = <&mpic>;
-	};
-
-	soc@ffe00000 {
-		#address-cells = <1>;
-		#size-cells = <1>;
-		device_type = "soc";
-		compatible = "fsl,p2020-immr", "simple-bus";
-		ranges = <0x0  0x0 0xffe00000 0x100000>;
-		bus-frequency = <0>;		// Filled out by uboot.
-
-		ecm-law@0 {
-			compatible = "fsl,ecm-law";
-			reg = <0x0 0x1000>;
-			fsl,num-laws = <12>;
-		};
-
-		ecm@1000 {
-			compatible = "fsl,p2020-ecm", "fsl,ecm";
-			reg = <0x1000 0x1000>;
-			interrupts = <17 2>;
-			interrupt-parent = <&mpic>;
-		};
-
-		memory-controller@2000 {
-			compatible = "fsl,p2020-memory-controller";
-			reg = <0x2000 0x1000>;
-			interrupt-parent = <&mpic>;
-			interrupts = <18 2>;
-		};
-
-		i2c@3000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			cell-index = <0>;
-			compatible = "fsl-i2c";
-			reg = <0x3000 0x100>;
-			interrupts = <43 2>;
-			interrupt-parent = <&mpic>;
-			dfsrr;
-		};
-
-		i2c@3100 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			cell-index = <1>;
-			compatible = "fsl-i2c";
-			reg = <0x3100 0x100>;
-			interrupts = <43 2>;
-			interrupt-parent = <&mpic>;
-			dfsrr;
-		};
-
-		serial0: serial@4500 {
-			cell-index = <0>;
-			device_type = "serial";
-			compatible = "ns16550";
-			reg = <0x4500 0x100>;
-			clock-frequency = <0>;
-			interrupts = <42 2>;
-			interrupt-parent = <&mpic>;
-		};
-
-		serial1: serial@4600 {
-			cell-index = <1>;
-			device_type = "serial";
-			compatible = "ns16550";
-			reg = <0x4600 0x100>;
-			clock-frequency = <0>;
-			interrupts = <42 2>;
-			interrupt-parent = <&mpic>;
-		};
-
-		spi@7000 {
-			cell-index = <0>;
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "fsl,espi";
-			reg = <0x7000 0x1000>;
-			interrupts = <59 0x2>;
-			interrupt-parent = <&mpic>;
-			mode = "cpu";
-		};
-
-		dma@c300 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			compatible = "fsl,eloplus-dma";
-			reg = <0xc300 0x4>;
-			ranges = <0x0 0xc100 0x200>;
-			cell-index = <1>;
-			dma-channel@0 {
-				compatible = "fsl,eloplus-dma-channel";
-				reg = <0x0 0x80>;
-				cell-index = <0>;
-				interrupt-parent = <&mpic>;
-				interrupts = <76 2>;
-			};
-			dma-channel@80 {
-				compatible = "fsl,eloplus-dma-channel";
-				reg = <0x80 0x80>;
-				cell-index = <1>;
-				interrupt-parent = <&mpic>;
-				interrupts = <77 2>;
-			};
-			dma-channel@100 {
-				compatible = "fsl,eloplus-dma-channel";
-				reg = <0x100 0x80>;
-				cell-index = <2>;
-				interrupt-parent = <&mpic>;
-				interrupts = <78 2>;
-			};
-			dma-channel@180 {
-				compatible = "fsl,eloplus-dma-channel";
-				reg = <0x180 0x80>;
-				cell-index = <3>;
-				interrupt-parent = <&mpic>;
-				interrupts = <79 2>;
-			};
-		};
-
-		gpio: gpio-controller@f000 {
-			#gpio-cells = <2>;
-			compatible = "fsl,mpc8572-gpio";
-			reg = <0xf000 0x100>;
-			interrupts = <47 0x2>;
-			interrupt-parent = <&mpic>;
-			gpio-controller;
-		};
-
-		L2: l2-cache-controller@20000 {
-			compatible = "fsl,p2020-l2-cache-controller";
-			reg = <0x20000 0x1000>;
-			cache-line-size = <32>;	// 32 bytes
-			cache-size = <0x80000>; // L2,512K
-			interrupt-parent = <&mpic>;
-			interrupts = <16 2>;
-		};
-
-		dma@21300 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			compatible = "fsl,eloplus-dma";
-			reg = <0x21300 0x4>;
-			ranges = <0x0 0x21100 0x200>;
-			cell-index = <0>;
-			dma-channel@0 {
-				compatible = "fsl,eloplus-dma-channel";
-				reg = <0x0 0x80>;
-				cell-index = <0>;
-				interrupt-parent = <&mpic>;
-				interrupts = <20 2>;
-			};
-			dma-channel@80 {
-				compatible = "fsl,eloplus-dma-channel";
-				reg = <0x80 0x80>;
-				cell-index = <1>;
-				interrupt-parent = <&mpic>;
-				interrupts = <21 2>;
-			};
-			dma-channel@100 {
-				compatible = "fsl,eloplus-dma-channel";
-				reg = <0x100 0x80>;
-				cell-index = <2>;
-				interrupt-parent = <&mpic>;
-				interrupts = <22 2>;
-			};
-			dma-channel@180 {
-				compatible = "fsl,eloplus-dma-channel";
-				reg = <0x180 0x80>;
-				cell-index = <3>;
-				interrupt-parent = <&mpic>;
-				interrupts = <23 2>;
-			};
-		};
-
-		usb@22000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "fsl-usb2-dr";
-			reg = <0x22000 0x1000>;
-			interrupt-parent = <&mpic>;
-			interrupts = <28 0x2>;
-		};
-
-		mdio@24520 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "fsl,gianfar-mdio";
-			reg = <0x24520 0x20>;
-		};
-
-		mdio@25520 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "fsl,gianfar-tbi";
-			reg = <0x26520 0x20>;
-		};
-
-		mdio@26520 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "fsl,gianfar-tbi";
-			reg = <0x520 0x20>;
-		};
-
-		enet0: ethernet@24000 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			cell-index = <0>;
-			device_type = "network";
-			model = "eTSEC";
-			compatible = "gianfar";
-			reg = <0x24000 0x1000>;
-			ranges = <0x0 0x24000 0x1000>;
-			local-mac-address = [ 00 00 00 00 00 00 ];
-			interrupts = <29 2 30 2 34 2>;
-			interrupt-parent = <&mpic>;
-		};
-
-		enet1: ethernet@25000 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			cell-index = <1>;
-			device_type = "network";
-			model = "eTSEC";
-			compatible = "gianfar";
-			reg = <0x25000 0x1000>;
-			ranges = <0x0 0x25000 0x1000>;
-			local-mac-address = [ 00 00 00 00 00 00 ];
-			interrupts = <35 2 36 2 40 2>;
-			interrupt-parent = <&mpic>;
-
-		};
-
-		enet2: ethernet@26000 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			cell-index = <2>;
-			device_type = "network";
-			model = "eTSEC";
-			compatible = "gianfar";
-			reg = <0x26000 0x1000>;
-			ranges = <0x0 0x26000 0x1000>;
-			local-mac-address = [ 00 00 00 00 00 00 ];
-			interrupts = <31 2 32 2 33 2>;
-			interrupt-parent = <&mpic>;
-
-		};
-
-		sdhci@2e000 {
-			compatible = "fsl,p2020-esdhc", "fsl,esdhc";
-			reg = <0x2e000 0x1000>;
-			interrupts = <72 0x2>;
-			interrupt-parent = <&mpic>;
-			/* Filled in by U-Boot */
-			clock-frequency = <0>;
-		};
-
-		crypto@30000 {
-			compatible = "fsl,sec3.1", "fsl,sec3.0", "fsl,sec2.4",
-				     "fsl,sec2.2", "fsl,sec2.1", "fsl,sec2.0";
-			reg = <0x30000 0x10000>;
-			interrupts = <45 2 58 2>;
-			interrupt-parent = <&mpic>;
-			fsl,num-channels = <4>;
-			fsl,channel-fifo-len = <24>;
-			fsl,exec-units-mask = <0xbfe>;
-			fsl,descriptor-types-mask = <0x3ab0ebf>;
-		};
-
-		mpic: pic@40000 {
-			interrupt-controller;
-			#address-cells = <0>;
-			#interrupt-cells = <2>;
-			reg = <0x40000 0x40000>;
-			compatible = "chrp,open-pic";
-			device_type = "open-pic";
-		};
-
-		msi@41600 {
-			compatible = "fsl,p2020-msi", "fsl,mpic-msi";
-			reg = <0x41600 0x80>;
-			msi-available-ranges = <0 0x100>;
-			interrupts = <
-				0xe0 0
-				0xe1 0
-				0xe2 0
-				0xe3 0
-				0xe4 0
-				0xe5 0
-				0xe6 0
-				0xe7 0>;
-			interrupt-parent = <&mpic>;
-		};
-
-		global-utilities@e0000 {	//global utilities block
-			compatible = "fsl,p2020-guts";
-			reg = <0xe0000 0x1000>;
-			fsl,has-rstcr;
-		};
-	};
-
-	pci0: pcie@ffe08000 {
-		compatible = "fsl,mpc8548-pcie";
-		device_type = "pci";
-		#interrupt-cells = <1>;
-		#size-cells = <2>;
-		#address-cells = <3>;
-		reg = <0 0xffe08000 0 0x1000>;
-		bus-range = <0 255>;
-		clock-frequency = <33333333>;
-		interrupt-parent = <&mpic>;
-		interrupts = <24 2>;
-	};
-
-	pci1: pcie@ffe09000 {
-		compatible = "fsl,mpc8548-pcie";
-		device_type = "pci";
-		#interrupt-cells = <1>;
-		#size-cells = <2>;
-		#address-cells = <3>;
-		reg = <0 0xffe09000 0 0x1000>;
-		bus-range = <0 255>;
-		clock-frequency = <33333333>;
-		interrupt-parent = <&mpic>;
-		interrupts = <25 2>;
-	};
-
-	pci2: pcie@ffe0a000 {
-		compatible = "fsl,mpc8548-pcie";
-		device_type = "pci";
-		#interrupt-cells = <1>;
-		#size-cells = <2>;
-		#address-cells = <3>;
-		reg = <0 0xffe0a000 0 0x1000>;
-		bus-range = <0 255>;
-		clock-frequency = <33333333>;
-		interrupt-parent = <&mpic>;
-		interrupts = <26 2>;
-	};
-};
diff --git a/arch/powerpc/boot/dts/p2041rdb.dts b/arch/powerpc/boot/dts/p2041rdb.dts
index 79b6895..4f957db 100644
--- a/arch/powerpc/boot/dts/p2041rdb.dts
+++ b/arch/powerpc/boot/dts/p2041rdb.dts
@@ -32,7 +32,7 @@
  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-/include/ "p2041si.dtsi"
+/include/ "fsl/p2041si-pre.dtsi"
 
 / {
 	model = "fsl,P2041RDB";
@@ -50,6 +50,8 @@
 	};
 
 	soc: soc@ffe000000 {
+		ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
+		reg = <0xf 0xfe000000 0 0x00001000>;
 		spi@110000 {
 			flash@0 {
 				#address-cells = <1>;
@@ -106,7 +108,18 @@
 		};
 	};
 
-	localbus@ffe124000 {
+	rio: rapidio@ffe0c0000 {
+		reg = <0xf 0xfe0c0000 0 0x11000>;
+
+		port1 {
+			ranges = <0 0 0xc 0x20000000 0 0x10000000>;
+		};
+		port2 {
+			ranges = <0 0 0xc 0x30000000 0 0x10000000>;
+		};
+	};
+
+	lbc: localbus@ffe124000 {
 		reg = <0xf 0xfe124000 0 0x1000>;
 		ranges = <0 0 0xf 0xe8000000 0x08000000>;
 
@@ -122,6 +135,7 @@
 		reg = <0xf 0xfe200000 0 0x1000>;
 		ranges = <0x02000000 0 0xe0000000 0xc 0x00000000 0x0 0x20000000
 			  0x01000000 0 0x00000000 0xf 0xf8000000 0x0 0x00010000>;
+		fsl,msi = <&msi0>;
 		pcie@0 {
 			ranges = <0x02000000 0 0xe0000000
 				  0x02000000 0 0xe0000000
@@ -137,6 +151,7 @@
 		reg = <0xf 0xfe201000 0 0x1000>;
 		ranges = <0x02000000 0x0 0xe0000000 0xc 0x20000000 0x0 0x20000000
 			  0x01000000 0x0 0x00000000 0xf 0xf8010000 0x0 0x00010000>;
+		fsl,msi = <&msi1>;
 		pcie@0 {
 			ranges = <0x02000000 0 0xe0000000
 				  0x02000000 0 0xe0000000
@@ -152,6 +167,7 @@
 		reg = <0xf 0xfe202000 0 0x1000>;
 		ranges = <0x02000000 0 0xe0000000 0xc 0x40000000 0 0x20000000
 			  0x01000000 0 0x00000000 0xf 0xf8020000 0 0x00010000>;
+		fsl,msi = <&msi2>;
 		pcie@0 {
 			ranges = <0x02000000 0 0xe0000000
 				  0x02000000 0 0xe0000000
@@ -163,3 +179,5 @@
 		};
 	};
 };
+
+/include/ "fsl/p2041si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p2041si.dtsi b/arch/powerpc/boot/dts/p2041si.dtsi
deleted file mode 100644
index f7492ed..0000000
--- a/arch/powerpc/boot/dts/p2041si.dtsi
+++ /dev/null
@@ -1,692 +0,0 @@
-/*
- * P2041 Silicon Device Tree Source
- *
- * Copyright 2011 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in the
- *       documentation and/or other materials provided with the distribution.
- *     * Neither the name of Freescale Semiconductor nor the
- *       names of its contributors may be used to endorse or promote products
- *       derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/dts-v1/;
-
-/ {
-	compatible = "fsl,P2041";
-	#address-cells = <2>;
-	#size-cells = <2>;
-	interrupt-parent = <&mpic>;
-
-	aliases {
-		ccsr = &soc;
-		dcsr = &dcsr;
-
-		serial0 = &serial0;
-		serial1 = &serial1;
-		serial2 = &serial2;
-		serial3 = &serial3;
-		pci0 = &pci0;
-		pci1 = &pci1;
-		pci2 = &pci2;
-		usb0 = &usb0;
-		usb1 = &usb1;
-		dma0 = &dma0;
-		dma1 = &dma1;
-		sdhc = &sdhc;
-		msi0 = &msi0;
-		msi1 = &msi1;
-		msi2 = &msi2;
-
-		crypto = &crypto;
-		sec_jr0 = &sec_jr0;
-		sec_jr1 = &sec_jr1;
-		sec_jr2 = &sec_jr2;
-		sec_jr3 = &sec_jr3;
-		rtic_a = &rtic_a;
-		rtic_b = &rtic_b;
-		rtic_c = &rtic_c;
-		rtic_d = &rtic_d;
-		sec_mon = &sec_mon;
-	};
-
-	cpus {
-		#address-cells = <1>;
-		#size-cells = <0>;
-
-		cpu0: PowerPC,e500mc@0 {
-			device_type = "cpu";
-			reg = <0>;
-			next-level-cache = <&L2_0>;
-			L2_0: l2-cache {
-				next-level-cache = <&cpc>;
-			};
-		};
-		cpu1: PowerPC,e500mc@1 {
-			device_type = "cpu";
-			reg = <1>;
-			next-level-cache = <&L2_1>;
-			L2_1: l2-cache {
-				next-level-cache = <&cpc>;
-			};
-		};
-		cpu2: PowerPC,e500mc@2 {
-			device_type = "cpu";
-			reg = <2>;
-			next-level-cache = <&L2_2>;
-			L2_2: l2-cache {
-				next-level-cache = <&cpc>;
-			};
-		};
-		cpu3: PowerPC,e500mc@3 {
-			device_type = "cpu";
-			reg = <3>;
-			next-level-cache = <&L2_3>;
-			L2_3: l2-cache {
-				next-level-cache = <&cpc>;
-			};
-		};
-	};
-
-	dcsr: dcsr@f00000000 {
-		#address-cells = <1>;
-		#size-cells = <1>;
-		compatible = "fsl,dcsr", "simple-bus";
-
-		dcsr-epu@0 {
-			compatible = "fsl,dcsr-epu";
-			interrupts = <52 2 0 0
-				      84 2 0 0
-				      85 2 0 0>;
-			interrupt-parent = <&mpic>;
-			reg = <0x0 0x1000>;
-		};
-		dcsr-npc {
-			compatible = "fsl,dcsr-npc";
-			reg = <0x1000 0x1000 0x1000000 0x8000>;
-		};
-		dcsr-nxc@2000 {
-			compatible = "fsl,dcsr-nxc";
-			reg = <0x2000 0x1000>;
-		};
-		dcsr-corenet {
-			compatible = "fsl,dcsr-corenet";
-			reg = <0x8000 0x1000 0xB0000 0x1000>;
-		};
-		dcsr-dpaa@9000 {
-			compatible = "fsl,p2041-dcsr-dpaa", "fsl,dcsr-dpaa";
-			reg = <0x9000 0x1000>;
-		};
-		dcsr-ocn@11000 {
-			compatible = "fsl,p2041-dcsr-ocn", "fsl,dcsr-ocn";
-			reg = <0x11000 0x1000>;
-		};
-		dcsr-ddr@12000 {
-			compatible = "fsl,dcsr-ddr";
-			dev-handle = <&ddr>;
-			reg = <0x12000 0x1000>;
-		};
-		dcsr-nal@18000 {
-			compatible = "fsl,p2041-dcsr-nal", "fsl,dcsr-nal";
-			reg = <0x18000 0x1000>;
-		};
-		dcsr-rcpm@22000 {
-			compatible = "fsl,p2041-dcsr-rcpm", "fsl,dcsr-rcpm";
-			reg = <0x22000 0x1000>;
-		};
-		dcsr-cpu-sb-proxy@40000 {
-			compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
-			cpu-handle = <&cpu0>;
-			reg = <0x40000 0x1000>;
-		};
-		dcsr-cpu-sb-proxy@41000 {
-			compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
-			cpu-handle = <&cpu1>;
-			reg = <0x41000 0x1000>;
-		};
-		dcsr-cpu-sb-proxy@42000 {
-			compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
-			cpu-handle = <&cpu2>;
-			reg = <0x42000 0x1000>;
-		};
-		dcsr-cpu-sb-proxy@43000 {
-			compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
-			cpu-handle = <&cpu3>;
-			reg = <0x43000 0x1000>;
-		};
-	};
-
-	soc: soc@ffe000000 {
-		#address-cells = <1>;
-		#size-cells = <1>;
-		device_type = "soc";
-		compatible = "simple-bus";
-		ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
-		reg = <0xf 0xfe000000 0 0x00001000>;
-
-		soc-sram-error {
-			compatible = "fsl,soc-sram-error";
-			interrupts = <16 2 1 29>;
-		};
-
-		corenet-law@0 {
-			compatible = "fsl,corenet-law";
-			reg = <0x0 0x1000>;
-			fsl,num-laws = <32>;
-		};
-
-		ddr: memory-controller@8000 {
-			compatible = "fsl,qoriq-memory-controller-v4.5", "fsl,qoriq-memory-controller";
-			reg = <0x8000 0x1000>;
-			interrupts = <16 2 1 23>;
-		};
-
-		cpc: l3-cache-controller@10000 {
-			compatible = "fsl,p2041-l3-cache-controller", "fsl,p4080-l3-cache-controller", "cache";
-			reg = <0x10000 0x1000>;
-			interrupts = <16 2 1 27>;
-		};
-
-		corenet-cf@18000 {
-			compatible = "fsl,corenet-cf";
-			reg = <0x18000 0x1000>;
-			interrupts = <16 2 1 31>;
-			fsl,ccf-num-csdids = <32>;
-			fsl,ccf-num-snoopids = <32>;
-		};
-
-		iommu@20000 {
-			compatible = "fsl,pamu-v1.0", "fsl,pamu";
-			reg = <0x20000 0x4000>;
-			interrupts = <
-				24 2 0 0
-				16 2 1 30>;
-		};
-
-		mpic: pic@40000 {
-			clock-frequency = <0>;
-			interrupt-controller;
-			#address-cells = <0>;
-			#interrupt-cells = <4>;
-			reg = <0x40000 0x40000>;
-			compatible = "fsl,mpic", "chrp,open-pic";
-			device_type = "open-pic";
-		};
-
-		msi0: msi@41600 {
-			compatible = "fsl,mpic-msi";
-			reg = <0x41600 0x200>;
-			msi-available-ranges = <0 0x100>;
-			interrupts = <
-				0xe0 0 0 0
-				0xe1 0 0 0
-				0xe2 0 0 0
-				0xe3 0 0 0
-				0xe4 0 0 0
-				0xe5 0 0 0
-				0xe6 0 0 0
-				0xe7 0 0 0>;
-		};
-
-		msi1: msi@41800 {
-			compatible = "fsl,mpic-msi";
-			reg = <0x41800 0x200>;
-			msi-available-ranges = <0 0x100>;
-			interrupts = <
-				0xe8 0 0 0
-				0xe9 0 0 0
-				0xea 0 0 0
-				0xeb 0 0 0
-				0xec 0 0 0
-				0xed 0 0 0
-				0xee 0 0 0
-				0xef 0 0 0>;
-		};
-
-		msi2: msi@41a00 {
-			compatible = "fsl,mpic-msi";
-			reg = <0x41a00 0x200>;
-			msi-available-ranges = <0 0x100>;
-			interrupts = <
-				0xf0 0 0 0
-				0xf1 0 0 0
-				0xf2 0 0 0
-				0xf3 0 0 0
-				0xf4 0 0 0
-				0xf5 0 0 0
-				0xf6 0 0 0
-				0xf7 0 0 0>;
-		};
-
-		guts: global-utilities@e0000 {
-			compatible = "fsl,qoriq-device-config-1.0";
-			reg = <0xe0000 0xe00>;
-			fsl,has-rstcr;
-			#sleep-cells = <1>;
-			fsl,liodn-bits = <12>;
-		};
-
-		pins: global-utilities@e0e00 {
-			compatible = "fsl,qoriq-pin-control-1.0";
-			reg = <0xe0e00 0x200>;
-			#sleep-cells = <2>;
-		};
-
-		clockgen: global-utilities@e1000 {
-			compatible = "fsl,p2041-clockgen", "fsl,qoriq-clockgen-1.0";
-			reg = <0xe1000 0x1000>;
-			clock-frequency = <0>;
-		};
-
-		rcpm: global-utilities@e2000 {
-			compatible = "fsl,qoriq-rcpm-1.0";
-			reg = <0xe2000 0x1000>;
-			#sleep-cells = <1>;
-		};
-
-		sfp: sfp@e8000 {
-			compatible = "fsl,p2041-sfp", "fsl,qoriq-sfp-1.0";
-			reg	   = <0xe8000 0x1000>;
-		};
-
-		serdes: serdes@ea000 {
-			compatible = "fsl,p2041-serdes";
-			reg	   = <0xea000 0x1000>;
-		};
-
-		dma0: dma@100300 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			compatible = "fsl,p2041-dma", "fsl,eloplus-dma";
-			reg = <0x100300 0x4>;
-			ranges = <0x0 0x100100 0x200>;
-			cell-index = <0>;
-			dma-channel@0 {
-				compatible = "fsl,p2041-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x0 0x80>;
-				cell-index = <0>;
-				interrupts = <28 2 0 0>;
-			};
-			dma-channel@80 {
-				compatible = "fsl,p2041-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x80 0x80>;
-				cell-index = <1>;
-				interrupts = <29 2 0 0>;
-			};
-			dma-channel@100 {
-				compatible = "fsl,p2041-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x100 0x80>;
-				cell-index = <2>;
-				interrupts = <30 2 0 0>;
-			};
-			dma-channel@180 {
-				compatible = "fsl,p2041-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x180 0x80>;
-				cell-index = <3>;
-				interrupts = <31 2 0 0>;
-			};
-		};
-
-		dma1: dma@101300 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			compatible = "fsl,p2041-dma", "fsl,eloplus-dma";
-			reg = <0x101300 0x4>;
-			ranges = <0x0 0x101100 0x200>;
-			cell-index = <1>;
-			dma-channel@0 {
-				compatible = "fsl,p2041-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x0 0x80>;
-				cell-index = <0>;
-				interrupts = <32 2 0 0>;
-			};
-			dma-channel@80 {
-				compatible = "fsl,p2041-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x80 0x80>;
-				cell-index = <1>;
-				interrupts = <33 2 0 0>;
-			};
-			dma-channel@100 {
-				compatible = "fsl,p2041-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x100 0x80>;
-				cell-index = <2>;
-				interrupts = <34 2 0 0>;
-			};
-			dma-channel@180 {
-				compatible = "fsl,p2041-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x180 0x80>;
-				cell-index = <3>;
-				interrupts = <35 2 0 0>;
-			};
-		};
-
-		spi@110000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "fsl,p2041-espi", "fsl,mpc8536-espi";
-			reg = <0x110000 0x1000>;
-			interrupts = <53 0x2 0 0>;
-			fsl,espi-num-chipselects = <4>;
-		};
-
-		sdhc: sdhc@114000 {
-			compatible = "fsl,p2041-esdhc", "fsl,esdhc";
-			reg = <0x114000 0x1000>;
-			interrupts = <48 2 0 0>;
-			sdhci,auto-cmd12;
-			clock-frequency = <0>;
-		};
-
-		i2c@118000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			cell-index = <0>;
-			compatible = "fsl-i2c";
-			reg = <0x118000 0x100>;
-			interrupts = <38 2 0 0>;
-			dfsrr;
-		};
-
-		i2c@118100 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			cell-index = <1>;
-			compatible = "fsl-i2c";
-			reg = <0x118100 0x100>;
-			interrupts = <38 2 0 0>;
-			dfsrr;
-		};
-
-		i2c@119000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			cell-index = <2>;
-			compatible = "fsl-i2c";
-			reg = <0x119000 0x100>;
-			interrupts = <39 2 0 0>;
-			dfsrr;
-		};
-
-		i2c@119100 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			cell-index = <3>;
-			compatible = "fsl-i2c";
-			reg = <0x119100 0x100>;
-			interrupts = <39 2 0 0>;
-			dfsrr;
-		};
-
-		serial0: serial@11c500 {
-			cell-index = <0>;
-			device_type = "serial";
-			compatible = "ns16550";
-			reg = <0x11c500 0x100>;
-			clock-frequency = <0>;
-			interrupts = <36 2 0 0>;
-		};
-
-		serial1: serial@11c600 {
-			cell-index = <1>;
-			device_type = "serial";
-			compatible = "ns16550";
-			reg = <0x11c600 0x100>;
-			clock-frequency = <0>;
-			interrupts = <36 2 0 0>;
-		};
-
-		serial2: serial@11d500 {
-			cell-index = <2>;
-			device_type = "serial";
-			compatible = "ns16550";
-			reg = <0x11d500 0x100>;
-			clock-frequency = <0>;
-			interrupts = <37 2 0 0>;
-		};
-
-		serial3: serial@11d600 {
-			cell-index = <3>;
-			device_type = "serial";
-			compatible = "ns16550";
-			reg = <0x11d600 0x100>;
-			clock-frequency = <0>;
-			interrupts = <37 2 0 0>;
-		};
-
-		gpio0: gpio@130000 {
-			compatible = "fsl,p2041-gpio", "fsl,qoriq-gpio";
-			reg = <0x130000 0x1000>;
-			interrupts = <55 2 0 0>;
-			#gpio-cells = <2>;
-			gpio-controller;
-		};
-
-		usb0: usb@210000 {
-			compatible = "fsl,p2041-usb2-mph",
-					"fsl,mpc85xx-usb2-mph", "fsl-usb2-mph";
-			reg = <0x210000 0x1000>;
-			#address-cells = <1>;
-			#size-cells = <0>;
-			interrupts = <44 0x2 0 0>;
-			phy_type = "utmi";
-			port0;
-		};
-
-		usb1: usb@211000 {
-			compatible = "fsl,p2041-usb2-dr",
-					"fsl,mpc85xx-usb2-dr", "fsl-usb2-dr";
-			reg = <0x211000 0x1000>;
-			#address-cells = <1>;
-			#size-cells = <0>;
-			interrupts = <45 0x2 0 0>;
-			phy_type = "utmi";
-		};
-
-		sata@220000 {
-			compatible = "fsl,p2041-sata", "fsl,pq-sata-v2";
-			reg = <0x220000 0x1000>;
-			interrupts = <68 0x2 0 0>;
-		};
-
-		sata@221000 {
-			compatible = "fsl,p2041-sata", "fsl,pq-sata-v2";
-			reg = <0x221000 0x1000>;
-			interrupts = <69 0x2 0 0>;
-		};
-
-		crypto: crypto@300000 {
-			compatible = "fsl,sec-v4.2", "fsl,sec-v4.0";
-			#address-cells = <1>;
-			#size-cells = <1>;
-			reg = <0x300000 0x10000>;
-			ranges = <0 0x300000 0x10000>;
-			interrupts = <92 2 0 0>;
-
-			sec_jr0: jr@1000 {
-				compatible = "fsl,sec-v4.2-job-ring",
-					     "fsl,sec-v4.0-job-ring";
-				reg = <0x1000 0x1000>;
-				interrupts = <88 2 0 0>;
-			};
-
-			sec_jr1: jr@2000 {
-				compatible = "fsl,sec-v4.2-job-ring",
-					     "fsl,sec-v4.0-job-ring";
-				reg = <0x2000 0x1000>;
-				interrupts = <89 2 0 0>;
-			};
-
-			sec_jr2: jr@3000 {
-				compatible = "fsl,sec-v4.2-job-ring",
-					     "fsl,sec-v4.0-job-ring";
-				reg = <0x3000 0x1000>;
-				interrupts = <90 2 0 0>;
-			};
-
-			sec_jr3: jr@4000 {
-				compatible = "fsl,sec-v4.2-job-ring",
-					     "fsl,sec-v4.0-job-ring";
-				reg = <0x4000 0x1000>;
-				interrupts = <91 2 0 0>;
-			};
-
-			rtic@6000 {
-				compatible = "fsl,sec-v4.2-rtic",
-					     "fsl,sec-v4.0-rtic";
-				#address-cells = <1>;
-				#size-cells = <1>;
-				reg = <0x6000 0x100>;
-				ranges = <0x0 0x6100 0xe00>;
-
-				rtic_a: rtic-a@0 {
-					compatible = "fsl,sec-v4.2-rtic-memory",
-						     "fsl,sec-v4.0-rtic-memory";
-					reg = <0x00 0x20 0x100 0x80>;
-				};
-
-				rtic_b: rtic-b@20 {
-					compatible = "fsl,sec-v4.2-rtic-memory",
-						     "fsl,sec-v4.0-rtic-memory";
-					reg = <0x20 0x20 0x200 0x80>;
-				};
-
-				rtic_c: rtic-c@40 {
-					compatible = "fsl,sec-v4.2-rtic-memory",
-						     "fsl,sec-v4.0-rtic-memory";
-					reg = <0x40 0x20 0x300 0x80>;
-				};
-
-				rtic_d: rtic-d@60 {
-					compatible = "fsl,sec-v4.2-rtic-memory",
-						     "fsl,sec-v4.0-rtic-memory";
-					reg = <0x60 0x20 0x500 0x80>;
-				};
-			};
-		};
-
-		sec_mon: sec_mon@314000 {
-			compatible = "fsl,sec-v4.2-mon", "fsl,sec-v4.0-mon";
-			reg = <0x314000 0x1000>;
-			interrupts = <93 2 0 0>;
-		};
-
-	};
-
-	localbus@ffe124000 {
-		compatible = "fsl,p2041-elbc", "fsl,elbc", "simple-bus";
-		interrupts = <25 2 0 0>;
-		#address-cells = <2>;
-		#size-cells = <1>;
-	};
-
-	pci0: pcie@ffe200000 {
-		compatible = "fsl,p2041-pcie", "fsl,qoriq-pcie-v2.2";
-		device_type = "pci";
-		#size-cells = <2>;
-		#address-cells = <3>;
-		bus-range = <0x0 0xff>;
-		clock-frequency = <33333333>;
-		fsl,msi = <&msi0>;
-		interrupts = <16 2 1 15>;
-		pcie@0 {
-			reg = <0 0 0 0 0>;
-			#interrupt-cells = <1>;
-			#size-cells = <2>;
-			#address-cells = <3>;
-			device_type = "pci";
-			interrupts = <16 2 1 15>;
-			interrupt-map-mask = <0xf800 0 0 7>;
-			interrupt-map = <
-				/* IDSEL 0x0 */
-				0000 0 0 1 &mpic 40 1 0 0
-				0000 0 0 2 &mpic 1 1 0 0
-				0000 0 0 3 &mpic 2 1 0 0
-				0000 0 0 4 &mpic 3 1 0 0
-				>;
-		};
-	};
-
-	pci1: pcie@ffe201000 {
-		compatible = "fsl,p2041-pcie", "fsl,qoriq-pcie-v2.2";
-		device_type = "pci";
-		#size-cells = <2>;
-		#address-cells = <3>;
-		bus-range = <0 0xff>;
-		clock-frequency = <33333333>;
-		fsl,msi = <&msi1>;
-		interrupts = <16 2 1 14>;
-		pcie@0 {
-			reg = <0 0 0 0 0>;
-			#interrupt-cells = <1>;
-			#size-cells = <2>;
-			#address-cells = <3>;
-			device_type = "pci";
-			interrupts = <16 2 1 14>;
-			interrupt-map-mask = <0xf800 0 0 7>;
-			interrupt-map = <
-				/* IDSEL 0x0 */
-				0000 0 0 1 &mpic 41 1 0 0
-				0000 0 0 2 &mpic 5 1 0 0
-				0000 0 0 3 &mpic 6 1 0 0
-				0000 0 0 4 &mpic 7 1 0 0
-				>;
-		};
-	};
-
-	pci2: pcie@ffe202000 {
-		compatible = "fsl,p2041-pcie", "fsl,qoriq-pcie-v2.2";
-		device_type = "pci";
-		#size-cells = <2>;
-		#address-cells = <3>;
-		bus-range = <0x0 0xff>;
-		clock-frequency = <33333333>;
-		fsl,msi = <&msi2>;
-		interrupts = <16 2 1 13>;
-		pcie@0 {
-			reg = <0 0 0 0 0>;
-			#interrupt-cells = <1>;
-			#size-cells = <2>;
-			#address-cells = <3>;
-			device_type = "pci";
-			interrupts = <16 2 1 13>;
-			interrupt-map-mask = <0xf800 0 0 7>;
-			interrupt-map = <
-				/* IDSEL 0x0 */
-				0000 0 0 1 &mpic 42 1 0 0
-				0000 0 0 2 &mpic 9 1 0 0
-				0000 0 0 3 &mpic 10 1 0 0
-				0000 0 0 4 &mpic 11 1 0 0
-				>;
-		};
-	};
-};
diff --git a/arch/powerpc/boot/dts/p3041ds.dts b/arch/powerpc/boot/dts/p3041ds.dts
index bbd113b..f469145 100644
--- a/arch/powerpc/boot/dts/p3041ds.dts
+++ b/arch/powerpc/boot/dts/p3041ds.dts
@@ -32,7 +32,7 @@
  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-/include/ "p3041si.dtsi"
+/include/ "fsl/p3041si-pre.dtsi"
 
 / {
 	model = "fsl,P3041DS";
@@ -50,6 +50,8 @@
 	};
 
 	soc: soc@ffe000000 {
+		ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
+		reg = <0xf 0xfe000000 0 0x00001000>;
 		spi@110000 {
 			flash@0 {
 				#address-cells = <1>;
@@ -99,7 +101,18 @@
 		};
 	};
 
-	localbus@ffe124000 {
+	rio: rapidio@ffe0c0000 {
+		reg = <0xf 0xfe0c0000 0 0x11000>;
+
+		port1 {
+			ranges = <0 0 0xc 0x20000000 0 0x10000000>;
+		};
+		port2 {
+			ranges = <0 0 0xc 0x30000000 0 0x10000000>;
+		};
+	};
+
+	lbc: localbus@ffe124000 {
 		reg = <0xf 0xfe124000 0 0x1000>;
 		ranges = <0 0 0xf 0xe8000000 0x08000000
 			  2 0 0xf 0xffa00000 0x00040000
@@ -160,6 +173,7 @@
 		reg = <0xf 0xfe200000 0 0x1000>;
 		ranges = <0x02000000 0 0xe0000000 0xc 0x00000000 0x0 0x20000000
 			  0x01000000 0 0x00000000 0xf 0xf8000000 0x0 0x00010000>;
+		fsl,msi = <&msi0>;
 		pcie@0 {
 			ranges = <0x02000000 0 0xe0000000
 				  0x02000000 0 0xe0000000
@@ -175,6 +189,7 @@
 		reg = <0xf 0xfe201000 0 0x1000>;
 		ranges = <0x02000000 0x0 0xe0000000 0xc 0x20000000 0x0 0x20000000
 			  0x01000000 0x0 0x00000000 0xf 0xf8010000 0x0 0x00010000>;
+		fsl,msi = <&msi1>;
 		pcie@0 {
 			ranges = <0x02000000 0 0xe0000000
 				  0x02000000 0 0xe0000000
@@ -190,6 +205,7 @@
 		reg = <0xf 0xfe202000 0 0x1000>;
 		ranges = <0x02000000 0 0xe0000000 0xc 0x40000000 0 0x20000000
 			  0x01000000 0 0x00000000 0xf 0xf8020000 0 0x00010000>;
+		fsl,msi = <&msi2>;
 		pcie@0 {
 			ranges = <0x02000000 0 0xe0000000
 				  0x02000000 0 0xe0000000
@@ -205,6 +221,7 @@
 		reg = <0xf 0xfe203000 0 0x1000>;
 		ranges = <0x02000000 0 0xe0000000 0xc 0x60000000 0 0x20000000
 			  0x01000000 0 0x00000000 0xf 0xf8030000 0 0x00010000>;
+		fsl,msi = <&msi2>;
 		pcie@0 {
 			ranges = <0x02000000 0 0xe0000000
 				  0x02000000 0 0xe0000000
@@ -216,3 +233,5 @@
 		};
 	};
 };
+
+/include/ "fsl/p3041si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p3041si.dtsi b/arch/powerpc/boot/dts/p3041si.dtsi
deleted file mode 100644
index 87130b7..0000000
--- a/arch/powerpc/boot/dts/p3041si.dtsi
+++ /dev/null
@@ -1,729 +0,0 @@
-/*
- * P3041 Silicon Device Tree Source
- *
- * Copyright 2010-2011 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in the
- *       documentation and/or other materials provided with the distribution.
- *     * Neither the name of Freescale Semiconductor nor the
- *       names of its contributors may be used to endorse or promote products
- *       derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/dts-v1/;
-
-/ {
-	compatible = "fsl,P3041";
-	#address-cells = <2>;
-	#size-cells = <2>;
-	interrupt-parent = <&mpic>;
-
-	aliases {
-		ccsr = &soc;
-		dcsr = &dcsr;
-
-		serial0 = &serial0;
-		serial1 = &serial1;
-		serial2 = &serial2;
-		serial3 = &serial3;
-		pci0 = &pci0;
-		pci1 = &pci1;
-		pci2 = &pci2;
-		pci3 = &pci3;
-		usb0 = &usb0;
-		usb1 = &usb1;
-		dma0 = &dma0;
-		dma1 = &dma1;
-		sdhc = &sdhc;
-		msi0 = &msi0;
-		msi1 = &msi1;
-		msi2 = &msi2;
-
-		crypto = &crypto;
-		sec_jr0 = &sec_jr0;
-		sec_jr1 = &sec_jr1;
-		sec_jr2 = &sec_jr2;
-		sec_jr3 = &sec_jr3;
-		rtic_a = &rtic_a;
-		rtic_b = &rtic_b;
-		rtic_c = &rtic_c;
-		rtic_d = &rtic_d;
-		sec_mon = &sec_mon;
-
-/*
-		rio0 = &rapidio0;
- */
-	};
-
-	cpus {
-		#address-cells = <1>;
-		#size-cells = <0>;
-
-		cpu0: PowerPC,e500mc@0 {
-			device_type = "cpu";
-			reg = <0>;
-			next-level-cache = <&L2_0>;
-			L2_0: l2-cache {
-				next-level-cache = <&cpc>;
-			};
-		};
-		cpu1: PowerPC,e500mc@1 {
-			device_type = "cpu";
-			reg = <1>;
-			next-level-cache = <&L2_1>;
-			L2_1: l2-cache {
-				next-level-cache = <&cpc>;
-			};
-		};
-		cpu2: PowerPC,e500mc@2 {
-			device_type = "cpu";
-			reg = <2>;
-			next-level-cache = <&L2_2>;
-			L2_2: l2-cache {
-				next-level-cache = <&cpc>;
-			};
-		};
-		cpu3: PowerPC,e500mc@3 {
-			device_type = "cpu";
-			reg = <3>;
-			next-level-cache = <&L2_3>;
-			L2_3: l2-cache {
-				next-level-cache = <&cpc>;
-			};
-		};
-	};
-
-	dcsr: dcsr@f00000000 {
-		#address-cells = <1>;
-		#size-cells = <1>;
-		compatible = "fsl,dcsr", "simple-bus";
-
-		dcsr-epu@0 {
-			compatible = "fsl,dcsr-epu";
-			interrupts = <52 2 0 0
-				      84 2 0 0
-				      85 2 0 0>;
-			interrupt-parent = <&mpic>;
-			reg = <0x0 0x1000>;
-		};
-		dcsr-npc {
-			compatible = "fsl,dcsr-npc";
-			reg = <0x1000 0x1000 0x1000000 0x8000>;
-		};
-		dcsr-nxc@2000 {
-			compatible = "fsl,dcsr-nxc";
-			reg = <0x2000 0x1000>;
-		};
-		dcsr-corenet {
-			compatible = "fsl,dcsr-corenet";
-			reg = <0x8000 0x1000 0xB0000 0x1000>;
-		};
-		dcsr-dpaa@9000 {
-			compatible = "fsl,p43041-dcsr-dpaa", "fsl,dcsr-dpaa";
-			reg = <0x9000 0x1000>;
-		};
-		dcsr-ocn@11000 {
-			compatible = "fsl,p43041-dcsr-ocn", "fsl,dcsr-ocn";
-			reg = <0x11000 0x1000>;
-		};
-		dcsr-ddr@12000 {
-			compatible = "fsl,dcsr-ddr";
-			dev-handle = <&ddr>;
-			reg = <0x12000 0x1000>;
-		};
-		dcsr-nal@18000 {
-			compatible = "fsl,p43041-dcsr-nal", "fsl,dcsr-nal";
-			reg = <0x18000 0x1000>;
-		};
-		dcsr-rcpm@22000 {
-			compatible = "fsl,p43041-dcsr-rcpm", "fsl,dcsr-rcpm";
-			reg = <0x22000 0x1000>;
-		};
-		dcsr-cpu-sb-proxy@40000 {
-			compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
-			cpu-handle = <&cpu0>;
-			reg = <0x40000 0x1000>;
-		};
-		dcsr-cpu-sb-proxy@41000 {
-			compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
-			cpu-handle = <&cpu1>;
-			reg = <0x41000 0x1000>;
-		};
-		dcsr-cpu-sb-proxy@42000 {
-			compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
-			cpu-handle = <&cpu2>;
-			reg = <0x42000 0x1000>;
-		};
-		dcsr-cpu-sb-proxy@43000 {
-			compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
-			cpu-handle = <&cpu3>;
-			reg = <0x43000 0x1000>;
-		};
-	};
-
-	soc: soc@ffe000000 {
-		#address-cells = <1>;
-		#size-cells = <1>;
-		device_type = "soc";
-		compatible = "simple-bus";
-		ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
-		reg = <0xf 0xfe000000 0 0x00001000>;
-
-		soc-sram-error {
-			compatible = "fsl,soc-sram-error";
-			interrupts = <16 2 1 29>;
-		};
-
-		corenet-law@0 {
-			compatible = "fsl,corenet-law";
-			reg = <0x0 0x1000>;
-			fsl,num-laws = <32>;
-		};
-
-		ddr: memory-controller@8000 {
-			compatible = "fsl,qoriq-memory-controller-v4.5", "fsl,qoriq-memory-controller";
-			reg = <0x8000 0x1000>;
-			interrupts = <16 2 1 23>;
-		};
-
-		cpc: l3-cache-controller@10000 {
-			compatible = "fsl,p3041-l3-cache-controller", "fsl,p4080-l3-cache-controller", "cache";
-			reg = <0x10000 0x1000>;
-			interrupts = <16 2 1 27>;
-		};
-
-		corenet-cf@18000 {
-			compatible = "fsl,corenet-cf";
-			reg = <0x18000 0x1000>;
-			interrupts = <16 2 1 31>;
-			fsl,ccf-num-csdids = <32>;
-			fsl,ccf-num-snoopids = <32>;
-		};
-
-		iommu@20000 {
-			compatible = "fsl,pamu-v1.0", "fsl,pamu";
-			reg = <0x20000 0x4000>;
-			interrupts = <
-				24 2 0 0
-				16 2 1 30>;
-		};
-
-		mpic: pic@40000 {
-			clock-frequency = <0>;
-			interrupt-controller;
-			#address-cells = <0>;
-			#interrupt-cells = <4>;
-			reg = <0x40000 0x40000>;
-			compatible = "fsl,mpic", "chrp,open-pic";
-			device_type = "open-pic";
-		};
-
-		msi0: msi@41600 {
-			compatible = "fsl,mpic-msi";
-			reg = <0x41600 0x200>;
-			msi-available-ranges = <0 0x100>;
-			interrupts = <
-				0xe0 0 0 0
-				0xe1 0 0 0
-				0xe2 0 0 0
-				0xe3 0 0 0
-				0xe4 0 0 0
-				0xe5 0 0 0
-				0xe6 0 0 0
-				0xe7 0 0 0>;
-		};
-
-		msi1: msi@41800 {
-			compatible = "fsl,mpic-msi";
-			reg = <0x41800 0x200>;
-			msi-available-ranges = <0 0x100>;
-			interrupts = <
-				0xe8 0 0 0
-				0xe9 0 0 0
-				0xea 0 0 0
-				0xeb 0 0 0
-				0xec 0 0 0
-				0xed 0 0 0
-				0xee 0 0 0
-				0xef 0 0 0>;
-		};
-
-		msi2: msi@41a00 {
-			compatible = "fsl,mpic-msi";
-			reg = <0x41a00 0x200>;
-			msi-available-ranges = <0 0x100>;
-			interrupts = <
-				0xf0 0 0 0
-				0xf1 0 0 0
-				0xf2 0 0 0
-				0xf3 0 0 0
-				0xf4 0 0 0
-				0xf5 0 0 0
-				0xf6 0 0 0
-				0xf7 0 0 0>;
-		};
-
-		guts: global-utilities@e0000 {
-			compatible = "fsl,qoriq-device-config-1.0";
-			reg = <0xe0000 0xe00>;
-			fsl,has-rstcr;
-			#sleep-cells = <1>;
-			fsl,liodn-bits = <12>;
-		};
-
-		pins: global-utilities@e0e00 {
-			compatible = "fsl,qoriq-pin-control-1.0";
-			reg = <0xe0e00 0x200>;
-			#sleep-cells = <2>;
-		};
-
-		clockgen: global-utilities@e1000 {
-			compatible = "fsl,p3041-clockgen", "fsl,qoriq-clockgen-1.0";
-			reg = <0xe1000 0x1000>;
-			clock-frequency = <0>;
-		};
-
-		rcpm: global-utilities@e2000 {
-			compatible = "fsl,qoriq-rcpm-1.0";
-			reg = <0xe2000 0x1000>;
-			#sleep-cells = <1>;
-		};
-
-		sfp: sfp@e8000 {
-			compatible = "fsl,p3041-sfp", "fsl,qoriq-sfp-1.0";
-			reg	   = <0xe8000 0x1000>;
-		};
-
-		serdes: serdes@ea000 {
-			compatible = "fsl,p3041-serdes";
-			reg	   = <0xea000 0x1000>;
-		};
-
-		dma0: dma@100300 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			compatible = "fsl,p3041-dma", "fsl,eloplus-dma";
-			reg = <0x100300 0x4>;
-			ranges = <0x0 0x100100 0x200>;
-			cell-index = <0>;
-			dma-channel@0 {
-				compatible = "fsl,p3041-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x0 0x80>;
-				cell-index = <0>;
-				interrupts = <28 2 0 0>;
-			};
-			dma-channel@80 {
-				compatible = "fsl,p3041-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x80 0x80>;
-				cell-index = <1>;
-				interrupts = <29 2 0 0>;
-			};
-			dma-channel@100 {
-				compatible = "fsl,p3041-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x100 0x80>;
-				cell-index = <2>;
-				interrupts = <30 2 0 0>;
-			};
-			dma-channel@180 {
-				compatible = "fsl,p3041-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x180 0x80>;
-				cell-index = <3>;
-				interrupts = <31 2 0 0>;
-			};
-		};
-
-		dma1: dma@101300 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			compatible = "fsl,p3041-dma", "fsl,eloplus-dma";
-			reg = <0x101300 0x4>;
-			ranges = <0x0 0x101100 0x200>;
-			cell-index = <1>;
-			dma-channel@0 {
-				compatible = "fsl,p3041-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x0 0x80>;
-				cell-index = <0>;
-				interrupts = <32 2 0 0>;
-			};
-			dma-channel@80 {
-				compatible = "fsl,p3041-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x80 0x80>;
-				cell-index = <1>;
-				interrupts = <33 2 0 0>;
-			};
-			dma-channel@100 {
-				compatible = "fsl,p3041-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x100 0x80>;
-				cell-index = <2>;
-				interrupts = <34 2 0 0>;
-			};
-			dma-channel@180 {
-				compatible = "fsl,p3041-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x180 0x80>;
-				cell-index = <3>;
-				interrupts = <35 2 0 0>;
-			};
-		};
-
-		spi@110000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "fsl,p3041-espi", "fsl,mpc8536-espi";
-			reg = <0x110000 0x1000>;
-			interrupts = <53 0x2 0 0>;
-			fsl,espi-num-chipselects = <4>;
-		};
-
-		sdhc: sdhc@114000 {
-			compatible = "fsl,p3041-esdhc", "fsl,esdhc";
-			reg = <0x114000 0x1000>;
-			interrupts = <48 2 0 0>;
-			sdhci,auto-cmd12;
-			clock-frequency = <0>;
-		};
-
-		i2c@118000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			cell-index = <0>;
-			compatible = "fsl-i2c";
-			reg = <0x118000 0x100>;
-			interrupts = <38 2 0 0>;
-			dfsrr;
-		};
-
-		i2c@118100 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			cell-index = <1>;
-			compatible = "fsl-i2c";
-			reg = <0x118100 0x100>;
-			interrupts = <38 2 0 0>;
-			dfsrr;
-		};
-
-		i2c@119000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			cell-index = <2>;
-			compatible = "fsl-i2c";
-			reg = <0x119000 0x100>;
-			interrupts = <39 2 0 0>;
-			dfsrr;
-		};
-
-		i2c@119100 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			cell-index = <3>;
-			compatible = "fsl-i2c";
-			reg = <0x119100 0x100>;
-			interrupts = <39 2 0 0>;
-			dfsrr;
-		};
-
-		serial0: serial@11c500 {
-			cell-index = <0>;
-			device_type = "serial";
-			compatible = "ns16550";
-			reg = <0x11c500 0x100>;
-			clock-frequency = <0>;
-			interrupts = <36 2 0 0>;
-		};
-
-		serial1: serial@11c600 {
-			cell-index = <1>;
-			device_type = "serial";
-			compatible = "ns16550";
-			reg = <0x11c600 0x100>;
-			clock-frequency = <0>;
-			interrupts = <36 2 0 0>;
-		};
-
-		serial2: serial@11d500 {
-			cell-index = <2>;
-			device_type = "serial";
-			compatible = "ns16550";
-			reg = <0x11d500 0x100>;
-			clock-frequency = <0>;
-			interrupts = <37 2 0 0>;
-		};
-
-		serial3: serial@11d600 {
-			cell-index = <3>;
-			device_type = "serial";
-			compatible = "ns16550";
-			reg = <0x11d600 0x100>;
-			clock-frequency = <0>;
-			interrupts = <37 2 0 0>;
-		};
-
-		gpio0: gpio@130000 {
-			compatible = "fsl,p3041-gpio", "fsl,qoriq-gpio";
-			reg = <0x130000 0x1000>;
-			interrupts = <55 2 0 0>;
-			#gpio-cells = <2>;
-			gpio-controller;
-		};
-
-		usb0: usb@210000 {
-			compatible = "fsl,p3041-usb2-mph",
-					"fsl,mpc85xx-usb2-mph", "fsl-usb2-mph";
-			reg = <0x210000 0x1000>;
-			#address-cells = <1>;
-			#size-cells = <0>;
-			interrupts = <44 0x2 0 0>;
-			phy_type = "utmi";
-			port0;
-		};
-
-		usb1: usb@211000 {
-			compatible = "fsl,p3041-usb2-dr",
-					"fsl,mpc85xx-usb2-dr", "fsl-usb2-dr";
-			reg = <0x211000 0x1000>;
-			#address-cells = <1>;
-			#size-cells = <0>;
-			interrupts = <45 0x2 0 0>;
-			dr_mode = "host";
-			phy_type = "utmi";
-		};
-
-		sata@220000 {
-			compatible = "fsl,p3041-sata", "fsl,pq-sata-v2";
-			reg = <0x220000 0x1000>;
-			interrupts = <68 0x2 0 0>;
-		};
-
-		sata@221000 {
-			compatible = "fsl,p3041-sata", "fsl,pq-sata-v2";
-			reg = <0x221000 0x1000>;
-			interrupts = <69 0x2 0 0>;
-		};
-
-		crypto: crypto@300000 {
-			compatible = "fsl,sec-v4.2", "fsl,sec-v4.0";
-			#address-cells = <1>;
-			#size-cells = <1>;
-			reg		 = <0x300000 0x10000>;
-			ranges		 = <0 0x300000 0x10000>;
-			interrupts	 = <92 2 0 0>;
-
-			sec_jr0: jr@1000 {
-				compatible = "fsl,sec-v4.2-job-ring",
-					     "fsl,sec-v4.0-job-ring";
-				reg = <0x1000 0x1000>;
-				interrupts = <88 2 0 0>;
-			};
-
-			sec_jr1: jr@2000 {
-				compatible = "fsl,sec-v4.2-job-ring",
-					     "fsl,sec-v4.0-job-ring";
-				reg = <0x2000 0x1000>;
-				interrupts = <89 2 0 0>;
-			};
-
-			sec_jr2: jr@3000 {
-				compatible = "fsl,sec-v4.2-job-ring",
-					     "fsl,sec-v4.0-job-ring";
-				reg = <0x3000 0x1000>;
-				interrupts = <90 2 0 0>;
-			};
-
-			sec_jr3: jr@4000 {
-				compatible = "fsl,sec-v4.2-job-ring",
-					     "fsl,sec-v4.0-job-ring";
-				reg = <0x4000 0x1000>;
-				interrupts = <91 2 0 0>;
-			};
-
-			rtic@6000 {
-				compatible = "fsl,sec-v4.2-rtic",
-					     "fsl,sec-v4.0-rtic";
-				#address-cells = <1>;
-				#size-cells = <1>;
-				reg = <0x6000 0x100>;
-				ranges = <0x0 0x6100 0xe00>;
-
-				rtic_a: rtic-a@0 {
-					compatible = "fsl,sec-v4.2-rtic-memory",
-						     "fsl,sec-v4.0-rtic-memory";
-					reg = <0x00 0x20 0x100 0x80>;
-				};
-
-				rtic_b: rtic-b@20 {
-					compatible = "fsl,sec-v4.2-rtic-memory",
-						     "fsl,sec-v4.0-rtic-memory";
-					reg = <0x20 0x20 0x200 0x80>;
-				};
-
-				rtic_c: rtic-c@40 {
-					compatible = "fsl,sec-v4.2-rtic-memory",
-						     "fsl,sec-v4.0-rtic-memory";
-					reg = <0x40 0x20 0x300 0x80>;
-				};
-
-				rtic_d: rtic-d@60 {
-					compatible = "fsl,sec-v4.2-rtic-memory",
-						     "fsl,sec-v4.0-rtic-memory";
-					reg = <0x60 0x20 0x500 0x80>;
-				};
-			};
-		};
-
-		sec_mon: sec_mon@314000 {
-			compatible = "fsl,sec-v4.2-mon", "fsl,sec-v4.0-mon";
-			reg = <0x314000 0x1000>;
-			interrupts = <93 2 0 0>;
-		};
-	};
-
-/*
-	rapidio0: rapidio@ffe0c0000
-*/
-
-	localbus@ffe124000 {
-		compatible = "fsl,p3041-elbc", "fsl,elbc", "simple-bus";
-		interrupts = <25 2 0 0>;
-		#address-cells = <2>;
-		#size-cells = <1>;
-	};
-
-	pci0: pcie@ffe200000 {
-		compatible = "fsl,p3041-pcie", "fsl,qoriq-pcie-v2.2";
-		device_type = "pci";
-		#size-cells = <2>;
-		#address-cells = <3>;
-		bus-range = <0x0 0xff>;
-		clock-frequency = <0x1fca055>;
-		fsl,msi = <&msi0>;
-		interrupts = <16 2 1 15>;
-
-		pcie@0 {
-			reg = <0 0 0 0 0>;
-			#interrupt-cells = <1>;
-			#size-cells = <2>;
-			#address-cells = <3>;
-			device_type = "pci";
-			interrupts = <16 2 1 15>;
-			interrupt-map-mask = <0xf800 0 0 7>;
-			interrupt-map = <
-				/* IDSEL 0x0 */
-				0000 0 0 1 &mpic 40 1 0 0
-				0000 0 0 2 &mpic 1 1 0 0
-				0000 0 0 3 &mpic 2 1 0 0
-				0000 0 0 4 &mpic 3 1 0 0
-				>;
-		};
-	};
-
-	pci1: pcie@ffe201000 {
-		compatible = "fsl,p3041-pcie", "fsl,qoriq-pcie-v2.2";
-		device_type = "pci";
-		#size-cells = <2>;
-		#address-cells = <3>;
-		bus-range = <0 0xff>;
-		clock-frequency = <0x1fca055>;
-		fsl,msi = <&msi1>;
-		interrupts = <16 2 1 14>;
-		pcie@0 {
-			reg = <0 0 0 0 0>;
-			#interrupt-cells = <1>;
-			#size-cells = <2>;
-			#address-cells = <3>;
-			device_type = "pci";
-			interrupts = <16 2 1 14>;
-			interrupt-map-mask = <0xf800 0 0 7>;
-			interrupt-map = <
-				/* IDSEL 0x0 */
-				0000 0 0 1 &mpic 41 1 0 0
-				0000 0 0 2 &mpic 5 1 0 0
-				0000 0 0 3 &mpic 6 1 0 0
-				0000 0 0 4 &mpic 7 1 0 0
-				>;
-		};
-	};
-
-	pci2: pcie@ffe202000 {
-		compatible = "fsl,p3041-pcie", "fsl,qoriq-pcie-v2.2";
-		device_type = "pci";
-		#size-cells = <2>;
-		#address-cells = <3>;
-		bus-range = <0x0 0xff>;
-		clock-frequency = <0x1fca055>;
-		fsl,msi = <&msi2>;
-		interrupts = <16 2 1 13>;
-		pcie@0 {
-			reg = <0 0 0 0 0>;
-			#interrupt-cells = <1>;
-			#size-cells = <2>;
-			#address-cells = <3>;
-			device_type = "pci";
-			interrupts = <16 2 1 13>;
-			interrupt-map-mask = <0xf800 0 0 7>;
-			interrupt-map = <
-				/* IDSEL 0x0 */
-				0000 0 0 1 &mpic 42 1 0 0
-				0000 0 0 2 &mpic 9 1 0 0
-				0000 0 0 3 &mpic 10 1 0 0
-				0000 0 0 4 &mpic 11 1 0 0
-				>;
-		};
-	};
-
-	pci3: pcie@ffe203000 {
-		compatible = "fsl,p3041-pcie", "fsl,qoriq-pcie-v2.2";
-		device_type = "pci";
-		#size-cells = <2>;
-		#address-cells = <3>;
-		bus-range = <0x0 0xff>;
-		clock-frequency = <0x1fca055>;
-		fsl,msi = <&msi2>;
-		interrupts = <16 2 1 12>;
-		pcie@0 {
-			reg = <0 0 0 0 0>;
-			#interrupt-cells = <1>;
-			#size-cells = <2>;
-			#address-cells = <3>;
-			device_type = "pci";
-			interrupts = <16 2 1 12>;
-			interrupt-map-mask = <0xf800 0 0 7>;
-			interrupt-map = <
-				/* IDSEL 0x0 */
-				0000 0 0 1 &mpic 43 1 0 0
-				0000 0 0 2 &mpic 0 1 0 0
-				0000 0 0 3 &mpic 4 1 0 0
-				0000 0 0 4 &mpic 8 1 0 0
-				>;
-		};
-	};
-};
diff --git a/arch/powerpc/boot/dts/p3060qds.dts b/arch/powerpc/boot/dts/p3060qds.dts
index 08b9193..529042e 100644
--- a/arch/powerpc/boot/dts/p3060qds.dts
+++ b/arch/powerpc/boot/dts/p3060qds.dts
@@ -32,7 +32,7 @@
  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-/include/ "p3060si.dtsi"
+/include/ "fsl/p3060si-pre.dtsi"
 
 / {
 	model = "fsl,P3060QDS";
@@ -50,6 +50,8 @@
 	};
 
 	soc: soc@ffe000000 {
+		ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
+		reg = <0xf 0xfe000000 0 0x00001000>;
 		spi@110000 {
 			flash@0 {
 				#address-cells = <1>;
@@ -138,7 +140,7 @@
 		};
 	};
 
-	rapidio@ffe0c0000 {
+	rio: rapidio@ffe0c0000 {
 		reg = <0xf 0xfe0c0000 0 0x11000>;
 
 		port1 {
@@ -149,7 +151,7 @@
 		};
 	};
 
-	localbus@ffe124000 {
+	lbc: localbus@ffe124000 {
 		reg = <0xf 0xfe124000 0 0x1000>;
 		ranges = <0 0 0xf 0xe8000000 0x08000000
 			  2 0 0xf 0xffa00000 0x00040000
@@ -210,6 +212,7 @@
 		reg = <0xf 0xfe200000 0 0x1000>;
 		ranges = <0x02000000 0 0xe0000000 0xc 0x00000000 0x0 0x20000000
 			  0x01000000 0 0x00000000 0xf 0xf8000000 0x0 0x00010000>;
+		fsl,msi = <&msi0>;
 		pcie@0 {
 			ranges = <0x02000000 0 0xe0000000
 				  0x02000000 0 0xe0000000
@@ -225,6 +228,7 @@
 		reg = <0xf 0xfe201000 0 0x1000>;
 		ranges = <0x02000000 0x0 0xe0000000 0xc 0x20000000 0x0 0x20000000
 			  0x01000000 0x0 0x00000000 0xf 0xf8010000 0x0 0x00010000>;
+		fsl,msi = <&msi1>;
 		pcie@0 {
 			ranges = <0x02000000 0 0xe0000000
 				  0x02000000 0 0xe0000000
@@ -236,3 +240,5 @@
 		};
 	};
 };
+
+/include/ "fsl/p3060si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p3060si.dtsi b/arch/powerpc/boot/dts/p3060si.dtsi
deleted file mode 100644
index 68947e1..0000000
--- a/arch/powerpc/boot/dts/p3060si.dtsi
+++ /dev/null
@@ -1,719 +0,0 @@
-/*
- * P3060 Silicon Device Tree Source
- *
- * Copyright 2011 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in the
- *       documentation and/or other materials provided with the distribution.
- *     * Neither the name of Freescale Semiconductor nor the
- *       names of its contributors may be used to endorse or promote products
- *       derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/dts-v1/;
-
-/ {
-	compatible = "fsl,P3060";
-	#address-cells = <2>;
-	#size-cells = <2>;
-	interrupt-parent = <&mpic>;
-
-	aliases {
-		ccsr = &soc;
-		dcsr = &dcsr;
-
-		serial0 = &serial0;
-		serial1 = &serial1;
-		serial2 = &serial2;
-		serial3 = &serial3;
-		pci0 = &pci0;
-		pci1 = &pci1;
-		usb0 = &usb0;
-		usb1 = &usb1;
-		dma0 = &dma0;
-		dma1 = &dma1;
-		msi0 = &msi0;
-		msi1 = &msi1;
-		msi2 = &msi2;
-
-		crypto = &crypto;
-		sec_jr0 = &sec_jr0;
-		sec_jr1 = &sec_jr1;
-		sec_jr2 = &sec_jr2;
-		sec_jr3 = &sec_jr3;
-		rtic_a = &rtic_a;
-		rtic_b = &rtic_b;
-		rtic_c = &rtic_c;
-		rtic_d = &rtic_d;
-		sec_mon = &sec_mon;
-	};
-
-	cpus {
-		#address-cells = <1>;
-		#size-cells = <0>;
-
-		cpu0: PowerPC,e500mc@0 {
-			device_type = "cpu";
-			reg = <0>;
-			next-level-cache = <&L2_0>;
-			L2_0: l2-cache {
-				next-level-cache = <&cpc>;
-			};
-		};
-		cpu1: PowerPC,e500mc@1 {
-			device_type = "cpu";
-			reg = <1>;
-			next-level-cache = <&L2_1>;
-			L2_1: l2-cache {
-				next-level-cache = <&cpc>;
-			};
-		};
-		cpu4: PowerPC,e500mc@4 {
-			device_type = "cpu";
-			reg = <4>;
-			next-level-cache = <&L2_4>;
-			L2_4: l2-cache {
-				next-level-cache = <&cpc>;
-			};
-		};
-		cpu5: PowerPC,e500mc@5 {
-			device_type = "cpu";
-			reg = <5>;
-			next-level-cache = <&L2_5>;
-			L2_5: l2-cache {
-				next-level-cache = <&cpc>;
-			};
-		};
-		cpu6: PowerPC,e500mc@6 {
-			device_type = "cpu";
-			reg = <6>;
-			next-level-cache = <&L2_6>;
-			L2_6: l2-cache {
-				next-level-cache = <&cpc>;
-			};
-		};
-		cpu7: PowerPC,e500mc@7 {
-			device_type = "cpu";
-			reg = <7>;
-			next-level-cache = <&L2_7>;
-			L2_7: l2-cache {
-				next-level-cache = <&cpc>;
-			};
-		};
-	};
-
-	dcsr: dcsr@f00000000 {
-		#address-cells = <1>;
-		#size-cells = <1>;
-		compatible = "fsl,dcsr", "simple-bus";
-
-		dcsr-epu@0 {
-			compatible = "fsl,dcsr-epu";
-			interrupts = <52 2 0 0
-				      84 2 0 0
-				      85 2 0 0>;
-			interrupt-parent = <&mpic>;
-			reg = <0x0 0x1000>;
-		};
-		dcsr-npc {
-			compatible = "fsl,dcsr-npc";
-			reg = <0x1000 0x1000 0x1000000 0x8000>;
-		};
-		dcsr-nxc@2000 {
-			compatible = "fsl,dcsr-nxc";
-			reg = <0x2000 0x1000>;
-		};
-		dcsr-corenet {
-			compatible = "fsl,dcsr-corenet";
-			reg = <0x8000 0x1000 0xB0000 0x1000>;
-		};
-		dcsr-dpaa@9000 {
-			compatible = "fsl,p3060-dcsr-dpaa", "fsl,dcsr-dpaa";
-			reg = <0x9000 0x1000>;
-		};
-		dcsr-ocn@11000 {
-			compatible = "fsl,p3060-dcsr-ocn", "fsl,dcsr-ocn";
-			reg = <0x11000 0x1000>;
-		};
-		dcsr-ddr@12000 {
-			compatible = "fsl,dcsr-ddr";
-			dev-handle = <&ddr>;
-			reg = <0x12000 0x1000>;
-		};
-		dcsr-nal@18000 {
-			compatible = "fsl,p3060-dcsr-nal", "fsl,dcsr-nal";
-			reg = <0x18000 0x1000>;
-		};
-		dcsr-rcpm@22000 {
-			compatible = "fsl,p3060-dcsr-rcpm", "fsl,dcsr-rcpm";
-			reg = <0x22000 0x1000>;
-		};
-		dcsr-cpu-sb-proxy@40000 {
-			compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
-			cpu-handle = <&cpu0>;
-			reg = <0x40000 0x1000>;
-		};
-		dcsr-cpu-sb-proxy@41000 {
-			compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
-			cpu-handle = <&cpu1>;
-			reg = <0x41000 0x1000>;
-		};
-		dcsr-cpu-sb-proxy@44000 {
-			compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
-			cpu-handle = <&cpu4>;
-			reg = <0x44000 0x1000>;
-		};
-		dcsr-cpu-sb-proxy@45000 {
-			compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
-			cpu-handle = <&cpu5>;
-			reg = <0x45000 0x1000>;
-		};
-		dcsr-cpu-sb-proxy@46000 {
-			compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
-			cpu-handle = <&cpu6>;
-			reg = <0x46000 0x1000>;
-		};
-		dcsr-cpu-sb-proxy@47000 {
-			compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
-			cpu-handle = <&cpu7>;
-			reg = <0x47000 0x1000>;
-		};
-	};
-
-	soc: soc@ffe000000 {
-		#address-cells = <1>;
-		#size-cells = <1>;
-		device_type = "soc";
-		compatible = "simple-bus";
-		ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
-		reg = <0xf 0xfe000000 0 0x00001000>;
-
-		soc-sram-error {
-			compatible = "fsl,soc-sram-error";
-			interrupts = <16 2 1 29>;
-		};
-
-		corenet-law@0 {
-			compatible = "fsl,corenet-law";
-			reg = <0x0 0x1000>;
-			fsl,num-laws = <32>;
-		};
-
-		ddr: memory-controller@8000 {
-			compatible = "fsl,qoriq-memory-controller-v4.4", "fsl,qoriq-memory-controller";
-			reg = <0x8000 0x1000>;
-			interrupts = <16 2 1 23>;
-		};
-
-		cpc: l3-cache-controller@10000 {
-			compatible = "fsl,p3060-l3-cache-controller", "cache";
-			reg = <0x10000 0x1000
-			       0x11000 0x1000>;
-			interrupts = <16 2 1 27>;
-		};
-
-		corenet-cf@18000 {
-			compatible = "fsl,corenet-cf";
-			reg = <0x18000 0x1000>;
-			interrupts = <16 2 1 31>;
-			fsl,ccf-num-csdids = <32>;
-			fsl,ccf-num-snoopids = <32>;
-		};
-
-		iommu@20000 {
-			compatible = "fsl,pamu-v1.0", "fsl,pamu";
-			reg = <0x20000 0x5000>;
-			interrupts = <
-				24 2 0 0
-				16 2 1 30>;
-		};
-
-		mpic: pic@40000 {
-			clock-frequency = <0>;
-			interrupt-controller;
-			#address-cells = <0>;
-			#interrupt-cells = <4>;
-			reg = <0x40000 0x40000>;
-			compatible = "fsl,mpic", "chrp,open-pic";
-			device_type = "open-pic";
-		};
-
-		msi0: msi@41600 {
-			compatible = "fsl,mpic-msi";
-			reg = <0x41600 0x200>;
-			msi-available-ranges = <0 0x100>;
-			interrupts = <
-				0xe0 0 0 0
-				0xe1 0 0 0
-				0xe2 0 0 0
-				0xe3 0 0 0
-				0xe4 0 0 0
-				0xe5 0 0 0
-				0xe6 0 0 0
-				0xe7 0 0 0>;
-		};
-
-		msi1: msi@41800 {
-			compatible = "fsl,mpic-msi";
-			reg = <0x41800 0x200>;
-			msi-available-ranges = <0 0x100>;
-			interrupts = <
-				0xe8 0 0 0
-				0xe9 0 0 0
-				0xea 0 0 0
-				0xeb 0 0 0
-				0xec 0 0 0
-				0xed 0 0 0
-				0xee 0 0 0
-				0xef 0 0 0>;
-		};
-
-		msi2: msi@41a00 {
-			compatible = "fsl,mpic-msi";
-			reg = <0x41a00 0x200>;
-			msi-available-ranges = <0 0x100>;
-			interrupts = <
-				0xf0 0 0 0
-				0xf1 0 0 0
-				0xf2 0 0 0
-				0xf3 0 0 0
-				0xf4 0 0 0
-				0xf5 0 0 0
-				0xf6 0 0 0
-				0xf7 0 0 0>;
-		};
-
-		rmu: rmu@d3000 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			compatible = "fsl,srio-rmu";
-			reg = <0xd3000 0x500>;
-			ranges = <0x0 0xd3000 0x500>;
-
-			message-unit@0 {
-				compatible = "fsl,srio-msg-unit";
-				reg = <0x0 0x100>;
-				interrupts = <
-					60 2 0 0  /* msg1_tx_irq */
-					61 2 0 0>;/* msg1_rx_irq */
-			};
-			message-unit@100 {
-				compatible = "fsl,srio-msg-unit";
-				reg = <0x100 0x100>;
-				interrupts = <
-					62 2 0 0  /* msg2_tx_irq */
-					63 2 0 0>;/* msg2_rx_irq */
-			};
-			doorbell-unit@400 {
-				compatible = "fsl,srio-dbell-unit";
-				reg = <0x400 0x80>;
-				interrupts = <
-					56 2 0 0  /* bell_outb_irq */
-					57 2 0 0>;/* bell_inb_irq */
-			};
-			port-write-unit@4e0 {
-				compatible = "fsl,srio-port-write-unit";
-				reg = <0x4e0 0x20>;
-				interrupts = <16 2 1 11>;
-			};
-		};
-
-		guts: global-utilities@e0000 {
-			compatible = "fsl,qoriq-device-config-1.0";
-			reg = <0xe0000 0xe00>;
-			fsl,has-rstcr;
-			#sleep-cells = <1>;
-			fsl,liodn-bits = <12>;
-		};
-
-		pins: global-utilities@e0e00 {
-			compatible = "fsl,qoriq-pin-control-1.0";
-			reg = <0xe0e00 0x200>;
-			#sleep-cells = <2>;
-		};
-
-		clockgen: global-utilities@e1000 {
-			compatible = "fsl,p3060-clockgen", "fsl,qoriq-clockgen-1.0";
-			reg = <0xe1000 0x1000>;
-			clock-frequency = <0>;
-		};
-
-		rcpm: global-utilities@e2000 {
-			compatible = "fsl,qoriq-rcpm-1.0";
-			reg = <0xe2000 0x1000>;
-			#sleep-cells = <1>;
-		};
-
-		sfp: sfp@e8000 {
-			compatible = "fsl,p3060-sfp", "fsl,qoriq-sfp-1.0";
-			reg	   = <0xe8000 0x1000>;
-		};
-
-		serdes: serdes@ea000 {
-			compatible = "fsl,p3060-serdes";
-			reg	   = <0xea000 0x1000>;
-		};
-
-		dma0: dma@100300 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			compatible = "fsl,p3060-dma", "fsl,eloplus-dma";
-			reg = <0x100300 0x4>;
-			ranges = <0x0 0x100100 0x200>;
-			cell-index = <0>;
-			dma-channel@0 {
-				compatible = "fsl,p3060-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x0 0x80>;
-				cell-index = <0>;
-				interrupts = <28 2 0 0>;
-			};
-			dma-channel@80 {
-				compatible = "fsl,p3060-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x80 0x80>;
-				cell-index = <1>;
-				interrupts = <29 2 0 0>;
-			};
-			dma-channel@100 {
-				compatible = "fsl,p3060-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x100 0x80>;
-				cell-index = <2>;
-				interrupts = <30 2 0 0>;
-			};
-			dma-channel@180 {
-				compatible = "fsl,p3060-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x180 0x80>;
-				cell-index = <3>;
-				interrupts = <31 2 0 0>;
-			};
-		};
-
-		dma1: dma@101300 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			compatible = "fsl,p3060-dma", "fsl,eloplus-dma";
-			reg = <0x101300 0x4>;
-			ranges = <0x0 0x101100 0x200>;
-			cell-index = <1>;
-			dma-channel@0 {
-				compatible = "fsl,p3060-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x0 0x80>;
-				cell-index = <0>;
-				interrupts = <32 2 0 0>;
-			};
-			dma-channel@80 {
-				compatible = "fsl,p3060-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x80 0x80>;
-				cell-index = <1>;
-				interrupts = <33 2 0 0>;
-			};
-			dma-channel@100 {
-				compatible = "fsl,p3060-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x100 0x80>;
-				cell-index = <2>;
-				interrupts = <34 2 0 0>;
-			};
-			dma-channel@180 {
-				compatible = "fsl,p3060-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x180 0x80>;
-				cell-index = <3>;
-				interrupts = <35 2 0 0>;
-			};
-		};
-
-		spi@110000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "fsl,p3060-espi", "fsl,mpc8536-espi";
-			reg = <0x110000 0x1000>;
-			interrupts = <53 0x2 0 0>;
-			fsl,espi-num-chipselects = <4>;
-		};
-
-		i2c@118000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			cell-index = <0>;
-			compatible = "fsl-i2c";
-			reg = <0x118000 0x100>;
-			interrupts = <38 2 0 0>;
-			dfsrr;
-		};
-
-		i2c@118100 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			cell-index = <1>;
-			compatible = "fsl-i2c";
-			reg = <0x118100 0x100>;
-			interrupts = <38 2 0 0>;
-			dfsrr;
-		};
-
-		i2c@119000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			cell-index = <2>;
-			compatible = "fsl-i2c";
-			reg = <0x119000 0x100>;
-			interrupts = <39 2 0 0>;
-			dfsrr;
-		};
-
-		i2c@119100 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			cell-index = <3>;
-			compatible = "fsl-i2c";
-			reg = <0x119100 0x100>;
-			interrupts = <39 2 0 0>;
-			dfsrr;
-		};
-
-		serial0: serial@11c500 {
-			cell-index = <0>;
-			device_type = "serial";
-			compatible = "ns16550";
-			reg = <0x11c500 0x100>;
-			clock-frequency = <0>;
-			interrupts = <36 2 0 0>;
-		};
-
-		serial1: serial@11c600 {
-			cell-index = <1>;
-			device_type = "serial";
-			compatible = "ns16550";
-			reg = <0x11c600 0x100>;
-			clock-frequency = <0>;
-			interrupts = <36 2 0 0>;
-		};
-
-		serial2: serial@11d500 {
-			cell-index = <2>;
-			device_type = "serial";
-			compatible = "ns16550";
-			reg = <0x11d500 0x100>;
-			clock-frequency = <0>;
-			interrupts = <37 2 0 0>;
-		};
-
-		serial3: serial@11d600 {
-			cell-index = <3>;
-			device_type = "serial";
-			compatible = "ns16550";
-			reg = <0x11d600 0x100>;
-			clock-frequency = <0>;
-			interrupts = <37 2 0 0>;
-		};
-
-		gpio0: gpio@130000 {
-			compatible = "fsl,p3060-gpio", "fsl,qoriq-gpio";
-			reg = <0x130000 0x1000>;
-			interrupts = <55 2 0 0>;
-			#gpio-cells = <2>;
-			gpio-controller;
-		};
-
-		usb0: usb@210000 {
-			compatible = "fsl,p3060-usb2-mph",
-					"fsl,mpc85xx-usb2-mph", "fsl-usb2-mph";
-			reg = <0x210000 0x1000>;
-			#address-cells = <1>;
-			#size-cells = <0>;
-			interrupts = <44 0x2 0 0>;
-		};
-
-		usb1: usb@211000 {
-			compatible = "fsl,p3060-usb2-dr",
-					"fsl,mpc85xx-usb2-dr", "fsl-usb2-dr";
-			reg = <0x211000 0x1000>;
-			#address-cells = <1>;
-			#size-cells = <0>;
-			interrupts = <45 0x2 0 0>;
-		};
-
-		crypto: crypto@300000 {
-			compatible = "fsl,sec-v4.1", "fsl,sec-v4.0";
-			#address-cells = <1>;
-			#size-cells = <1>;
-			reg = <0x300000 0x10000>;
-			ranges = <0 0x300000 0x10000>;
-			interrupt-parent = <&mpic>;
-			interrupts = <92 2 0 0>;
-
-			sec_jr0: jr@1000 {
-				compatible = "fsl,sec-v4.1-job-ring", "fsl,sec-v4.0-job-ring";
-				reg = <0x1000 0x1000>;
-				interrupt-parent = <&mpic>;
-				interrupts = <88 2 0 0>;
-			};
-
-			sec_jr1: jr@2000 {
-				compatible = "fsl,sec-v4.1-job-ring", "fsl,sec-v4.0-job-ring";
-				reg = <0x2000 0x1000>;
-				interrupt-parent = <&mpic>;
-				interrupts = <89 2 0 0>;
-			};
-
-			sec_jr2: jr@3000 {
-				compatible = "fsl,sec-v4.1-job-ring", "fsl,sec-v4.0-job-ring";
-				reg = <0x3000 0x1000>;
-				interrupt-parent = <&mpic>;
-				interrupts = <90 2 0 0>;
-			};
-
-			sec_jr3: jr@4000 {
-				compatible = "fsl,sec-v4.1-job-ring", "fsl,sec-v4.0-job-ring";
-				reg = <0x4000 0x1000>;
-				interrupt-parent = <&mpic>;
-				interrupts = <91 2 0 0>;
-			};
-
-			rtic@6000 {
-				compatible = "fsl,sec-v4.1-rtic", "fsl,sec-v4.0-rtic";
-				#address-cells = <1>;
-				#size-cells = <1>;
-				reg = <0x6000 0x100>;
-				ranges = <0x0 0x6100 0xe00>;
-
-				rtic_a: rtic-a@0 {
-					compatible = "fsl,sec-v4.1-rtic-memory", "fsl,sec-v4.0-rtic-memory";
-					reg = <0x00 0x20 0x100 0x80>;
-				};
-
-				rtic_b: rtic-b@20 {
-					compatible = "fsl,sec-v4.1-rtic-memory", "fsl,sec-v4.0-rtic-memory";
-					reg = <0x20 0x20 0x200 0x80>;
-				};
-
-				rtic_c: rtic-c@40 {
-					compatible = "fsl,sec-v4.1-rtic-memory", "fsl,sec-v4.0-rtic-memory";
-					reg = <0x40 0x20 0x300 0x80>;
-				};
-
-				rtic_d: rtic-d@60 {
-					compatible = "fsl,sec-v4.1-rtic-memory", "fsl,sec-v4.0-rtic-memory";
-					reg = <0x60 0x20 0x500 0x80>;
-				};
-			};
-		};
-
-		sec_mon: sec_mon@314000 {
-			compatible = "fsl,sec-v4.1-mon", "fsl,sec-v4.0-mon";
-			reg = <0x314000 0x1000>;
-			interrupt-parent = <&mpic>;
-			interrupts = <93 2 0 0>;
-		};
-	};
-
-	rapidio@ffe0c0000 {
-		compatible = "fsl,srio";
-		interrupts = <16 2 1 11>;
-		#address-cells = <2>;
-		#size-cells = <2>;
-		fsl,srio-rmu-handle = <&rmu>;
-		ranges;
-
-		port1 {
-			#address-cells = <2>;
-			#size-cells = <2>;
-			cell-index = <1>;
-		};
-
-		port2 {
-			#address-cells = <2>;
-			#size-cells = <2>;
-			cell-index = <2>;
-		};
-	};
-
-	localbus@ffe124000 {
-		compatible = "fsl,p3060-elbc", "fsl,elbc", "simple-bus";
-		interrupts = <25 2 0 0>;
-		#address-cells = <2>;
-		#size-cells = <1>;
-	};
-
-	pci0: pcie@ffe200000 {
-		compatible = "fsl,p3060-pcie", "fsl,qoriq-pcie-v2.2";
-		device_type = "pci";
-		#size-cells = <2>;
-		#address-cells = <3>;
-		bus-range = <0x0 0xff>;
-		clock-frequency = <33333333>;
-		fsl,msi = <&msi0>;
-		interrupts = <16 2 1 15>;
-		pcie@0 {
-			reg = <0 0 0 0 0>;
-			#interrupt-cells = <1>;
-			#size-cells = <2>;
-			#address-cells = <3>;
-			device_type = "pci";
-			interrupts = <16 2 1 15>;
-			interrupt-map-mask = <0xf800 0 0 7>;
-			interrupt-map = <
-				/* IDSEL 0x0 */
-				0000 0 0 1 &mpic 40 1 0 0
-				0000 0 0 2 &mpic 1 1 0 0
-				0000 0 0 3 &mpic 2 1 0 0
-				0000 0 0 4 &mpic 3 1 0 0
-				>;
-		};
-	};
-
-	pci1: pcie@ffe201000 {
-		compatible = "fsl,p3060-pcie", "fsl,qoriq-pcie-v2.2";
-		device_type = "pci";
-		#size-cells = <2>;
-		#address-cells = <3>;
-		bus-range = <0 0xff>;
-		clock-frequency = <33333333>;
-		fsl,msi = <&msi1>;
-		interrupts = <16 2 1 14>;
-		pcie@0 {
-			reg = <0 0 0 0 0>;
-			#interrupt-cells = <1>;
-			#size-cells = <2>;
-			#address-cells = <3>;
-			device_type = "pci";
-			interrupts = <16 2 1 14>;
-			interrupt-map-mask = <0xf800 0 0 7>;
-			interrupt-map = <
-				/* IDSEL 0x0 */
-				0000 0 0 1 &mpic 41 1 0 0
-				0000 0 0 2 &mpic 5 1 0 0
-				0000 0 0 3 &mpic 6 1 0 0
-				0000 0 0 4 &mpic 7 1 0 0
-				>;
-		};
-	};
-};
diff --git a/arch/powerpc/boot/dts/p4080ds.dts b/arch/powerpc/boot/dts/p4080ds.dts
index c7916dc2..6d60e54 100644
--- a/arch/powerpc/boot/dts/p4080ds.dts
+++ b/arch/powerpc/boot/dts/p4080ds.dts
@@ -32,7 +32,7 @@
  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-/include/ "p4080si.dtsi"
+/include/ "fsl/p4080si-pre.dtsi"
 
 / {
 	model = "fsl,P4080DS";
@@ -50,6 +50,9 @@
 	};
 
 	soc: soc@ffe000000 {
+		ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
+		reg = <0xf 0xfe000000 0 0x00001000>;
+
 		spi@110000 {
 			flash@0 {
 				#address-cells = <1>;
@@ -105,12 +108,18 @@
 		};
 	};
 
-	rapidio0: rapidio@ffe0c0000 {
-		reg = <0xf 0xfe0c0000 0 0x20000>;
-		ranges = <0 0 0xc 0x20000000 0 0x01000000>;
+	rio: rapidio@ffe0c0000 {
+		reg = <0xf 0xfe0c0000 0 0x11000>;
+
+		port1 {
+			ranges = <0 0 0xc 0x20000000 0 0x10000000>;
+		};
+		port2 {
+			ranges = <0 0 0xc 0x30000000 0 0x10000000>;
+		};
 	};
 
-	localbus@ffe124000 {
+	lbc: localbus@ffe124000 {
 		reg = <0xf 0xfe124000 0 0x1000>;
 		ranges = <0 0 0xf 0xe8000000 0x08000000
 			  3 0 0xf 0xffdf0000 0x00008000>;
@@ -132,6 +141,7 @@
 		reg = <0xf 0xfe200000 0 0x1000>;
 		ranges = <0x02000000 0 0xe0000000 0xc 0x00000000 0x0 0x20000000
 			  0x01000000 0 0x00000000 0xf 0xf8000000 0x0 0x00010000>;
+		fsl,msi = <&msi0>;
 		pcie@0 {
 			ranges = <0x02000000 0 0xe0000000
 				  0x02000000 0 0xe0000000
@@ -147,6 +157,7 @@
 		reg = <0xf 0xfe201000 0 0x1000>;
 		ranges = <0x02000000 0x0 0xe0000000 0xc 0x20000000 0x0 0x20000000
 			  0x01000000 0x0 0x00000000 0xf 0xf8010000 0x0 0x00010000>;
+		fsl,msi = <&msi1>;
 		pcie@0 {
 			ranges = <0x02000000 0 0xe0000000
 				  0x02000000 0 0xe0000000
@@ -162,6 +173,7 @@
 		reg = <0xf 0xfe202000 0 0x1000>;
 		ranges = <0x02000000 0 0xe0000000 0xc 0x40000000 0 0x20000000
 			  0x01000000 0 0x00000000 0xf 0xf8020000 0 0x00010000>;
+		fsl,msi = <&msi2>;
 		pcie@0 {
 			ranges = <0x02000000 0 0xe0000000
 				  0x02000000 0 0xe0000000
@@ -174,3 +186,5 @@
 	};
 
 };
+
+/include/ "fsl/p4080si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p4080si.dtsi b/arch/powerpc/boot/dts/p4080si.dtsi
deleted file mode 100644
index f20c01a..0000000
--- a/arch/powerpc/boot/dts/p4080si.dtsi
+++ /dev/null
@@ -1,755 +0,0 @@
-/*
- * P4080 Silicon Device Tree Source
- *
- * Copyright 2009-2011 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in the
- *       documentation and/or other materials provided with the distribution.
- *     * Neither the name of Freescale Semiconductor nor the
- *       names of its contributors may be used to endorse or promote products
- *       derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/dts-v1/;
-
-/ {
-	compatible = "fsl,P4080";
-	#address-cells = <2>;
-	#size-cells = <2>;
-	interrupt-parent = <&mpic>;
-
-	aliases {
-		ccsr = &soc;
-		dcsr = &dcsr;
-
-		serial0 = &serial0;
-		serial1 = &serial1;
-		serial2 = &serial2;
-		serial3 = &serial3;
-		pci0 = &pci0;
-		pci1 = &pci1;
-		pci2 = &pci2;
-		usb0 = &usb0;
-		usb1 = &usb1;
-		dma0 = &dma0;
-		dma1 = &dma1;
-		sdhc = &sdhc;
-		msi0 = &msi0;
-		msi1 = &msi1;
-		msi2 = &msi2;
-
-		crypto = &crypto;
-		sec_jr0 = &sec_jr0;
-		sec_jr1 = &sec_jr1;
-		sec_jr2 = &sec_jr2;
-		sec_jr3 = &sec_jr3;
-		rtic_a = &rtic_a;
-		rtic_b = &rtic_b;
-		rtic_c = &rtic_c;
-		rtic_d = &rtic_d;
-		sec_mon = &sec_mon;
-
-		rio0 = &rapidio0;
-	};
-
-	cpus {
-		#address-cells = <1>;
-		#size-cells = <0>;
-
-		cpu0: PowerPC,e500mc@0 {
-			device_type = "cpu";
-			reg = <0>;
-			next-level-cache = <&L2_0>;
-			L2_0: l2-cache {
-				next-level-cache = <&cpc>;
-			};
-		};
-		cpu1: PowerPC,e500mc@1 {
-			device_type = "cpu";
-			reg = <1>;
-			next-level-cache = <&L2_1>;
-			L2_1: l2-cache {
-				next-level-cache = <&cpc>;
-			};
-		};
-		cpu2: PowerPC,e500mc@2 {
-			device_type = "cpu";
-			reg = <2>;
-			next-level-cache = <&L2_2>;
-			L2_2: l2-cache {
-				next-level-cache = <&cpc>;
-			};
-		};
-		cpu3: PowerPC,e500mc@3 {
-			device_type = "cpu";
-			reg = <3>;
-			next-level-cache = <&L2_3>;
-			L2_3: l2-cache {
-				next-level-cache = <&cpc>;
-			};
-		};
-		cpu4: PowerPC,e500mc@4 {
-			device_type = "cpu";
-			reg = <4>;
-			next-level-cache = <&L2_4>;
-			L2_4: l2-cache {
-				next-level-cache = <&cpc>;
-			};
-		};
-		cpu5: PowerPC,e500mc@5 {
-			device_type = "cpu";
-			reg = <5>;
-			next-level-cache = <&L2_5>;
-			L2_5: l2-cache {
-				next-level-cache = <&cpc>;
-			};
-		};
-		cpu6: PowerPC,e500mc@6 {
-			device_type = "cpu";
-			reg = <6>;
-			next-level-cache = <&L2_6>;
-			L2_6: l2-cache {
-				next-level-cache = <&cpc>;
-			};
-		};
-		cpu7: PowerPC,e500mc@7 {
-			device_type = "cpu";
-			reg = <7>;
-			next-level-cache = <&L2_7>;
-			L2_7: l2-cache {
-				next-level-cache = <&cpc>;
-			};
-		};
-	};
-
-	dcsr: dcsr@f00000000 {
-		#address-cells = <1>;
-		#size-cells = <1>;
-		compatible = "fsl,dcsr", "simple-bus";
-
-		dcsr-epu@0 {
-			compatible = "fsl,dcsr-epu";
-			interrupts = <52 2 0 0
-				      84 2 0 0
-				      85 2 0 0>;
-			interrupt-parent = <&mpic>;
-			reg = <0x0 0x1000>;
-		};
-		dcsr-npc {
-			compatible = "fsl,dcsr-npc";
-			reg = <0x1000 0x1000 0x1000000 0x8000>;
-		};
-		dcsr-nxc@2000 {
-			compatible = "fsl,dcsr-nxc";
-			reg = <0x2000 0x1000>;
-		};
-		dcsr-corenet {
-			compatible = "fsl,dcsr-corenet";
-			reg = <0x8000 0x1000 0xB0000 0x1000>;
-		};
-		dcsr-dpaa@9000 {
-			compatible = "fsl,p4080-dcsr-dpaa", "fsl,dcsr-dpaa";
-			reg = <0x9000 0x1000>;
-		};
-		dcsr-ocn@11000 {
-			compatible = "fsl,p4080-dcsr-ocn", "fsl,dcsr-ocn";
-			reg = <0x11000 0x1000>;
-		};
-		dcsr-ddr@12000 {
-			compatible = "fsl,dcsr-ddr";
-			dev-handle = <&ddr1>;
-			reg = <0x12000 0x1000>;
-		};
-		dcsr-ddr@13000 {
-			compatible = "fsl,dcsr-ddr";
-			dev-handle = <&ddr2>;
-			reg = <0x13000 0x1000>;
-		};
-		dcsr-nal@18000 {
-			compatible = "fsl,p4080-dcsr-nal", "fsl,dcsr-nal";
-			reg = <0x18000 0x1000>;
-		};
-		dcsr-rcpm@22000 {
-			compatible = "fsl,p4080-dcsr-rcpm", "fsl,dcsr-rcpm";
-			reg = <0x22000 0x1000>;
-		};
-		dcsr-cpu-sb-proxy@40000 {
-			compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
-			cpu-handle = <&cpu0>;
-			reg = <0x40000 0x1000>;
-		};
-		dcsr-cpu-sb-proxy@41000 {
-			compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
-			cpu-handle = <&cpu1>;
-			reg = <0x41000 0x1000>;
-		};
-		dcsr-cpu-sb-proxy@42000 {
-			compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
-			cpu-handle = <&cpu2>;
-			reg = <0x42000 0x1000>;
-		};
-		dcsr-cpu-sb-proxy@43000 {
-			compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
-			cpu-handle = <&cpu3>;
-			reg = <0x43000 0x1000>;
-		};
-		dcsr-cpu-sb-proxy@44000 {
-			compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
-			cpu-handle = <&cpu4>;
-			reg = <0x44000 0x1000>;
-		};
-		dcsr-cpu-sb-proxy@45000 {
-			compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
-			cpu-handle = <&cpu5>;
-			reg = <0x45000 0x1000>;
-		};
-		dcsr-cpu-sb-proxy@46000 {
-			compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
-			cpu-handle = <&cpu6>;
-			reg = <0x46000 0x1000>;
-		};
-		dcsr-cpu-sb-proxy@47000 {
-			compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
-			cpu-handle = <&cpu7>;
-			reg = <0x47000 0x1000>;
-		};
-	};
-
-	soc: soc@ffe000000 {
-		#address-cells = <1>;
-		#size-cells = <1>;
-		device_type = "soc";
-		compatible = "simple-bus";
-		ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
-		reg = <0xf 0xfe000000 0 0x00001000>;
-
-		soc-sram-error {
-			compatible = "fsl,soc-sram-error";
-			interrupts = <16 2 1 29>;
-		};
-
-		corenet-law@0 {
-			compatible = "fsl,corenet-law";
-			reg = <0x0 0x1000>;
-			fsl,num-laws = <32>;
-		};
-
-		ddr1: memory-controller@8000 {
-			compatible = "fsl,qoriq-memory-controller-v4.4", "fsl,qoriq-memory-controller";
-			reg = <0x8000 0x1000>;
-			interrupts = <16 2 1 23>;
-		};
-
-		ddr2: memory-controller@9000 {
-			compatible = "fsl,qoriq-memory-controller-v4.4","fsl,qoriq-memory-controller";
-			reg = <0x9000 0x1000>;
-			interrupts = <16 2 1 22>;
-		};
-
-		cpc: l3-cache-controller@10000 {
-			compatible = "fsl,p4080-l3-cache-controller", "cache";
-			reg = <0x10000 0x1000
-			       0x11000 0x1000>;
-			interrupts = <16 2 1 27
-				      16 2 1 26>;
-		};
-
-		corenet-cf@18000 {
-			compatible = "fsl,corenet-cf";
-			reg = <0x18000 0x1000>;
-			interrupts = <16 2 1 31>;
-			fsl,ccf-num-csdids = <32>;
-			fsl,ccf-num-snoopids = <32>;
-		};
-
-		iommu@20000 {
-			compatible = "fsl,pamu-v1.0", "fsl,pamu";
-			reg = <0x20000 0x5000>;
-			interrupts = <
-				24 2 0 0
-				16 2 1 30>;
-		};
-
-		mpic: pic@40000 {
-			clock-frequency = <0>;
-			interrupt-controller;
-			#address-cells = <0>;
-			#interrupt-cells = <4>;
-			reg = <0x40000 0x40000>;
-			compatible = "fsl,mpic", "chrp,open-pic";
-			device_type = "open-pic";
-		};
-
-		msi0: msi@41600 {
-			compatible = "fsl,mpic-msi";
-			reg = <0x41600 0x200>;
-			msi-available-ranges = <0 0x100>;
-			interrupts = <
-				0xe0 0 0 0
-				0xe1 0 0 0
-				0xe2 0 0 0
-				0xe3 0 0 0
-				0xe4 0 0 0
-				0xe5 0 0 0
-				0xe6 0 0 0
-				0xe7 0 0 0>;
-		};
-
-		msi1: msi@41800 {
-			compatible = "fsl,mpic-msi";
-			reg = <0x41800 0x200>;
-			msi-available-ranges = <0 0x100>;
-			interrupts = <
-				0xe8 0 0 0
-				0xe9 0 0 0
-				0xea 0 0 0
-				0xeb 0 0 0
-				0xec 0 0 0
-				0xed 0 0 0
-				0xee 0 0 0
-				0xef 0 0 0>;
-		};
-
-		msi2: msi@41a00 {
-			compatible = "fsl,mpic-msi";
-			reg = <0x41a00 0x200>;
-			msi-available-ranges = <0 0x100>;
-			interrupts = <
-				0xf0 0 0 0
-				0xf1 0 0 0
-				0xf2 0 0 0
-				0xf3 0 0 0
-				0xf4 0 0 0
-				0xf5 0 0 0
-				0xf6 0 0 0
-				0xf7 0 0 0>;
-		};
-
-		guts: global-utilities@e0000 {
-			compatible = "fsl,qoriq-device-config-1.0";
-			reg = <0xe0000 0xe00>;
-			fsl,has-rstcr;
-			#sleep-cells = <1>;
-			fsl,liodn-bits = <12>;
-		};
-
-		pins: global-utilities@e0e00 {
-			compatible = "fsl,qoriq-pin-control-1.0";
-			reg = <0xe0e00 0x200>;
-			#sleep-cells = <2>;
-		};
-
-		clockgen: global-utilities@e1000 {
-			compatible = "fsl,p4080-clockgen", "fsl,qoriq-clockgen-1.0";
-			reg = <0xe1000 0x1000>;
-			clock-frequency = <0>;
-		};
-
-		rcpm: global-utilities@e2000 {
-			compatible = "fsl,qoriq-rcpm-1.0";
-			reg = <0xe2000 0x1000>;
-			#sleep-cells = <1>;
-		};
-
-		sfp: sfp@e8000 {
-			compatible = "fsl,p4080-sfp", "fsl,qoriq-sfp-1.0";
-			reg	   = <0xe8000 0x1000>;
-		};
-
-		serdes: serdes@ea000 {
-			compatible = "fsl,p4080-serdes";
-			reg	   = <0xea000 0x1000>;
-		};
-
-		dma0: dma@100300 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			compatible = "fsl,p4080-dma", "fsl,eloplus-dma";
-			reg = <0x100300 0x4>;
-			ranges = <0x0 0x100100 0x200>;
-			cell-index = <0>;
-			dma-channel@0 {
-				compatible = "fsl,p4080-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x0 0x80>;
-				cell-index = <0>;
-				interrupts = <28 2 0 0>;
-			};
-			dma-channel@80 {
-				compatible = "fsl,p4080-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x80 0x80>;
-				cell-index = <1>;
-				interrupts = <29 2 0 0>;
-			};
-			dma-channel@100 {
-				compatible = "fsl,p4080-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x100 0x80>;
-				cell-index = <2>;
-				interrupts = <30 2 0 0>;
-			};
-			dma-channel@180 {
-				compatible = "fsl,p4080-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x180 0x80>;
-				cell-index = <3>;
-				interrupts = <31 2 0 0>;
-			};
-		};
-
-		dma1: dma@101300 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			compatible = "fsl,p4080-dma", "fsl,eloplus-dma";
-			reg = <0x101300 0x4>;
-			ranges = <0x0 0x101100 0x200>;
-			cell-index = <1>;
-			dma-channel@0 {
-				compatible = "fsl,p4080-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x0 0x80>;
-				cell-index = <0>;
-				interrupts = <32 2 0 0>;
-			};
-			dma-channel@80 {
-				compatible = "fsl,p4080-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x80 0x80>;
-				cell-index = <1>;
-				interrupts = <33 2 0 0>;
-			};
-			dma-channel@100 {
-				compatible = "fsl,p4080-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x100 0x80>;
-				cell-index = <2>;
-				interrupts = <34 2 0 0>;
-			};
-			dma-channel@180 {
-				compatible = "fsl,p4080-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x180 0x80>;
-				cell-index = <3>;
-				interrupts = <35 2 0 0>;
-			};
-		};
-
-		spi@110000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "fsl,p4080-espi", "fsl,mpc8536-espi";
-			reg = <0x110000 0x1000>;
-			interrupts = <53 0x2 0 0>;
-			fsl,espi-num-chipselects = <4>;
-		};
-
-		sdhc: sdhc@114000 {
-			compatible = "fsl,p4080-esdhc", "fsl,esdhc";
-			reg = <0x114000 0x1000>;
-			interrupts = <48 2 0 0>;
-			voltage-ranges = <3300 3300>;
-			sdhci,auto-cmd12;
-			clock-frequency = <0>;
-		};
-
-		i2c@118000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			cell-index = <0>;
-			compatible = "fsl-i2c";
-			reg = <0x118000 0x100>;
-			interrupts = <38 2 0 0>;
-			dfsrr;
-		};
-
-		i2c@118100 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			cell-index = <1>;
-			compatible = "fsl-i2c";
-			reg = <0x118100 0x100>;
-			interrupts = <38 2 0 0>;
-			dfsrr;
-		};
-
-		i2c@119000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			cell-index = <2>;
-			compatible = "fsl-i2c";
-			reg = <0x119000 0x100>;
-			interrupts = <39 2 0 0>;
-			dfsrr;
-		};
-
-		i2c@119100 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			cell-index = <3>;
-			compatible = "fsl-i2c";
-			reg = <0x119100 0x100>;
-			interrupts = <39 2 0 0>;
-			dfsrr;
-		};
-
-		serial0: serial@11c500 {
-			cell-index = <0>;
-			device_type = "serial";
-			compatible = "ns16550";
-			reg = <0x11c500 0x100>;
-			clock-frequency = <0>;
-			interrupts = <36 2 0 0>;
-		};
-
-		serial1: serial@11c600 {
-			cell-index = <1>;
-			device_type = "serial";
-			compatible = "ns16550";
-			reg = <0x11c600 0x100>;
-			clock-frequency = <0>;
-			interrupts = <36 2 0 0>;
-		};
-
-		serial2: serial@11d500 {
-			cell-index = <2>;
-			device_type = "serial";
-			compatible = "ns16550";
-			reg = <0x11d500 0x100>;
-			clock-frequency = <0>;
-			interrupts = <37 2 0 0>;
-		};
-
-		serial3: serial@11d600 {
-			cell-index = <3>;
-			device_type = "serial";
-			compatible = "ns16550";
-			reg = <0x11d600 0x100>;
-			clock-frequency = <0>;
-			interrupts = <37 2 0 0>;
-		};
-
-		gpio0: gpio@130000 {
-			compatible = "fsl,p4080-gpio", "fsl,qoriq-gpio";
-			reg = <0x130000 0x1000>;
-			interrupts = <55 2 0 0>;
-			#gpio-cells = <2>;
-			gpio-controller;
-		};
-
-		usb0: usb@210000 {
-			compatible = "fsl,p4080-usb2-mph",
-					"fsl,mpc85xx-usb2-mph", "fsl-usb2-mph";
-			reg = <0x210000 0x1000>;
-			#address-cells = <1>;
-			#size-cells = <0>;
-			interrupts = <44 0x2 0 0>;
-		};
-
-		usb1: usb@211000 {
-			compatible = "fsl,p4080-usb2-dr",
-					"fsl,mpc85xx-usb2-dr", "fsl-usb2-dr";
-			reg = <0x211000 0x1000>;
-			#address-cells = <1>;
-			#size-cells = <0>;
-			interrupts = <45 0x2 0 0>;
-		};
-
-		crypto: crypto@300000 {
-			compatible = "fsl,sec-v4.0";
-			#address-cells = <1>;
-			#size-cells = <1>;
-			reg = <0x300000 0x10000>;
-			ranges = <0 0x300000 0x10000>;
-			interrupt-parent = <&mpic>;
-			interrupts = <92 2 0 0>;
-
-			sec_jr0: jr@1000 {
-				compatible = "fsl,sec-v4.0-job-ring";
-				reg = <0x1000 0x1000>;
-				interrupt-parent = <&mpic>;
-				interrupts = <88 2 0 0>;
-			};
-
-			sec_jr1: jr@2000 {
-				compatible = "fsl,sec-v4.0-job-ring";
-				reg = <0x2000 0x1000>;
-				interrupt-parent = <&mpic>;
-				interrupts = <89 2 0 0>;
-			};
-
-			sec_jr2: jr@3000 {
-				compatible = "fsl,sec-v4.0-job-ring";
-				reg = <0x3000 0x1000>;
-				interrupt-parent = <&mpic>;
-				interrupts = <90 2 0 0>;
-			};
-
-			sec_jr3: jr@4000 {
-				compatible = "fsl,sec-v4.0-job-ring";
-				reg = <0x4000 0x1000>;
-				interrupt-parent = <&mpic>;
-				interrupts = <91 2 0 0>;
-			};
-
-			rtic@6000 {
-				compatible = "fsl,sec-v4.0-rtic";
-				#address-cells = <1>;
-				#size-cells = <1>;
-				reg = <0x6000 0x100>;
-				ranges = <0x0 0x6100 0xe00>;
-
-				rtic_a: rtic-a@0 {
-					compatible = "fsl,sec-v4.0-rtic-memory";
-					reg = <0x00 0x20 0x100 0x80>;
-				};
-
-				rtic_b: rtic-b@20 {
-					compatible = "fsl,sec-v4.0-rtic-memory";
-					reg = <0x20 0x20 0x200 0x80>;
-				};
-
-				rtic_c: rtic-c@40 {
-					compatible = "fsl,sec-v4.0-rtic-memory";
-					reg = <0x40 0x20 0x300 0x80>;
-				};
-
-				rtic_d: rtic-d@60 {
-					compatible = "fsl,sec-v4.0-rtic-memory";
-					reg = <0x60 0x20 0x500 0x80>;
-				};
-			};
-		};
-
-		sec_mon: sec_mon@314000 {
-			compatible = "fsl,sec-v4.0-mon";
-			reg = <0x314000 0x1000>;
-			interrupt-parent = <&mpic>;
-			interrupts = <93 2 0 0>;
-		};
-	};
-
-	rapidio0: rapidio@ffe0c0000 {
-		#address-cells = <2>;
-		#size-cells = <2>;
-		compatible = "fsl,rapidio-delta";
-		interrupts = <
-			16 2 1 11 /* err_irq */
-			56 2 0 0  /* bell_outb_irq */
-			57 2 0 0  /* bell_inb_irq */
-			60 2 0 0  /* msg1_tx_irq */
-			61 2 0 0  /* msg1_rx_irq */
-			62 2 0 0  /* msg2_tx_irq */
-			63 2 0 0>; /* msg2_rx_irq */
-	};
-
-	localbus@ffe124000 {
-		compatible = "fsl,p4080-elbc", "fsl,elbc", "simple-bus";
-		interrupts = <25 2 0 0>;
-		#address-cells = <2>;
-		#size-cells = <1>;
-	};
-
-	pci0: pcie@ffe200000 {
-		compatible = "fsl,p4080-pcie";
-		device_type = "pci";
-		#size-cells = <2>;
-		#address-cells = <3>;
-		bus-range = <0x0 0xff>;
-		clock-frequency = <0x1fca055>;
-		fsl,msi = <&msi0>;
-		interrupts = <16 2 1 15>;
-		pcie@0 {
-			reg = <0 0 0 0 0>;
-			#interrupt-cells = <1>;
-			#size-cells = <2>;
-			#address-cells = <3>;
-			device_type = "pci";
-			interrupts = <16 2 1 15>;
-			interrupt-map-mask = <0xf800 0 0 7>;
-			interrupt-map = <
-				/* IDSEL 0x0 */
-				0000 0 0 1 &mpic 40 1 0 0
-				0000 0 0 2 &mpic 1 1 0 0
-				0000 0 0 3 &mpic 2 1 0 0
-				0000 0 0 4 &mpic 3 1 0 0
-				>;
-		};
-	};
-
-	pci1: pcie@ffe201000 {
-		compatible = "fsl,p4080-pcie";
-		device_type = "pci";
-		#size-cells = <2>;
-		#address-cells = <3>;
-		bus-range = <0 0xff>;
-		clock-frequency = <0x1fca055>;
-		fsl,msi = <&msi1>;
-		interrupts = <16 2 1 14>;
-		pcie@0 {
-			reg = <0 0 0 0 0>;
-			#interrupt-cells = <1>;
-			#size-cells = <2>;
-			#address-cells = <3>;
-			device_type = "pci";
-			interrupts = <16 2 1 14>;
-			interrupt-map-mask = <0xf800 0 0 7>;
-			interrupt-map = <
-				/* IDSEL 0x0 */
-				0000 0 0 1 &mpic 41 1 0 0
-				0000 0 0 2 &mpic 5 1 0 0
-				0000 0 0 3 &mpic 6 1 0 0
-				0000 0 0 4 &mpic 7 1 0 0
-				>;
-		};
-	};
-
-	pci2: pcie@ffe202000 {
-		compatible = "fsl,p4080-pcie";
-		device_type = "pci";
-		#size-cells = <2>;
-		#address-cells = <3>;
-		bus-range = <0x0 0xff>;
-		clock-frequency = <0x1fca055>;
-		fsl,msi = <&msi2>;
-		interrupts = <16 2 1 13>;
-		pcie@0 {
-			reg = <0 0 0 0 0>;
-			#interrupt-cells = <1>;
-			#size-cells = <2>;
-			#address-cells = <3>;
-			device_type = "pci";
-			interrupts = <16 2 1 13>;
-			interrupt-map-mask = <0xf800 0 0 7>;
-			interrupt-map = <
-				/* IDSEL 0x0 */
-				0000 0 0 1 &mpic 42 1 0 0
-				0000 0 0 2 &mpic 9 1 0 0
-				0000 0 0 3 &mpic 10 1 0 0
-				0000 0 0 4 &mpic 11 1 0 0
-				>;
-		};
-	};
-};
diff --git a/arch/powerpc/boot/dts/p5020ds.dts b/arch/powerpc/boot/dts/p5020ds.dts
index e6d4099..1c25068 100644
--- a/arch/powerpc/boot/dts/p5020ds.dts
+++ b/arch/powerpc/boot/dts/p5020ds.dts
@@ -32,7 +32,7 @@
  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-/include/ "p5020si.dtsi"
+/include/ "fsl/p5020si-pre.dtsi"
 
 / {
 	model = "fsl,P5020DS";
@@ -50,6 +50,8 @@
 	};
 
 	soc: soc@ffe000000 {
+		ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
+		reg = <0xf 0xfe000000 0 0x00001000>;
 		spi@110000 {
 			flash@0 {
 				#address-cells = <1>;
@@ -99,7 +101,18 @@
 		};
 	};
 
-	localbus@ffe124000 {
+	rio: rapidio@ffe0c0000 {
+		reg = <0xf 0xfe0c0000 0 0x11000>;
+
+		port1 {
+			ranges = <0 0 0xc 0x20000000 0 0x10000000>;
+		};
+		port2 {
+			ranges = <0 0 0xc 0x30000000 0 0x10000000>;
+		};
+	};
+
+	lbc: localbus@ffe124000 {
 		reg = <0xf 0xfe124000 0 0x1000>;
 		ranges = <0 0 0xf 0xe8000000 0x08000000
 			  2 0 0xf 0xffa00000 0x00040000
@@ -160,7 +173,7 @@
 		reg = <0xf 0xfe200000 0 0x1000>;
 		ranges = <0x02000000 0 0xe0000000 0xc 0x00000000 0x0 0x20000000
 			  0x01000000 0 0x00000000 0xf 0xf8000000 0x0 0x00010000>;
-
+		fsl,msi = <&msi0>;
 		pcie@0 {
 			ranges = <0x02000000 0 0xe0000000
 				  0x02000000 0 0xe0000000
@@ -176,6 +189,7 @@
 		reg = <0xf 0xfe201000 0 0x1000>;
 		ranges = <0x02000000 0x0 0xe0000000 0xc 0x20000000 0x0 0x20000000
 			  0x01000000 0x0 0x00000000 0xf 0xf8010000 0x0 0x00010000>;
+		fsl,msi = <&msi1>;
 		pcie@0 {
 			ranges = <0x02000000 0 0xe0000000
 				  0x02000000 0 0xe0000000
@@ -191,6 +205,7 @@
 		reg = <0xf 0xfe202000 0 0x1000>;
 		ranges = <0x02000000 0 0xe0000000 0xc 0x40000000 0 0x20000000
 			  0x01000000 0 0x00000000 0xf 0xf8020000 0 0x00010000>;
+		fsl,msi = <&msi2>;
 		pcie@0 {
 			ranges = <0x02000000 0 0xe0000000
 				  0x02000000 0 0xe0000000
@@ -206,6 +221,7 @@
 		reg = <0xf 0xfe203000 0 0x1000>;
 		ranges = <0x02000000 0 0xe0000000 0xc 0x60000000 0 0x20000000
 			  0x01000000 0 0x00000000 0xf 0xf8030000 0 0x00010000>;
+		fsl,msi = <&msi2>;
 		pcie@0 {
 			ranges = <0x02000000 0 0xe0000000
 				  0x02000000 0 0xe0000000
@@ -217,3 +233,5 @@
 		};
 	};
 };
+
+/include/ "fsl/p5020si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p5020si.dtsi b/arch/powerpc/boot/dts/p5020si.dtsi
deleted file mode 100644
index e7948ad..0000000
--- a/arch/powerpc/boot/dts/p5020si.dtsi
+++ /dev/null
@@ -1,716 +0,0 @@
-/*
- * P5020 Silicon Device Tree Source
- *
- * Copyright 2010-2011 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in the
- *       documentation and/or other materials provided with the distribution.
- *     * Neither the name of Freescale Semiconductor nor the
- *       names of its contributors may be used to endorse or promote products
- *       derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/dts-v1/;
-
-/ {
-	compatible = "fsl,P5020";
-	#address-cells = <2>;
-	#size-cells = <2>;
-	interrupt-parent = <&mpic>;
-
-	aliases {
-		ccsr = &soc;
-		dcsr = &dcsr;
-
-		serial0 = &serial0;
-		serial1 = &serial1;
-		serial2 = &serial2;
-		serial3 = &serial3;
-		pci0 = &pci0;
-		pci1 = &pci1;
-		pci2 = &pci2;
-		pci3 = &pci3;
-		usb0 = &usb0;
-		usb1 = &usb1;
-		dma0 = &dma0;
-		dma1 = &dma1;
-		sdhc = &sdhc;
-		msi0 = &msi0;
-		msi1 = &msi1;
-		msi2 = &msi2;
-
-		crypto = &crypto;
-		sec_jr0 = &sec_jr0;
-		sec_jr1 = &sec_jr1;
-		sec_jr2 = &sec_jr2;
-		sec_jr3 = &sec_jr3;
-		rtic_a = &rtic_a;
-		rtic_b = &rtic_b;
-		rtic_c = &rtic_c;
-		rtic_d = &rtic_d;
-		sec_mon = &sec_mon;
-
-/*
-		rio0 = &rapidio0;
- */
-	};
-
-	cpus {
-		#address-cells = <1>;
-		#size-cells = <0>;
-
-		cpu0: PowerPC,e5500@0 {
-			device_type = "cpu";
-			reg = <0>;
-			next-level-cache = <&L2_0>;
-			L2_0: l2-cache {
-				next-level-cache = <&cpc>;
-			};
-		};
-		cpu1: PowerPC,e5500@1 {
-			device_type = "cpu";
-			reg = <1>;
-			next-level-cache = <&L2_1>;
-			L2_1: l2-cache {
-				next-level-cache = <&cpc>;
-			};
-		};
-	};
-
-	dcsr: dcsr@f00000000 {
-		#address-cells = <1>;
-		#size-cells = <1>;
-		compatible = "fsl,dcsr", "simple-bus";
-
-		dcsr-epu@0 {
-			compatible = "fsl,dcsr-epu";
-			interrupts = <52 2 0 0
-				      84 2 0 0
-				      85 2 0 0>;
-			interrupt-parent = <&mpic>;
-			reg = <0x0 0x1000>;
-		};
-		dcsr-npc {
-			compatible = "fsl,dcsr-npc";
-			reg = <0x1000 0x1000 0x1000000 0x8000>;
-		};
-		dcsr-nxc@2000 {
-			compatible = "fsl,dcsr-nxc";
-			reg = <0x2000 0x1000>;
-		};
-		dcsr-corenet {
-			compatible = "fsl,dcsr-corenet";
-			reg = <0x8000 0x1000 0xB0000 0x1000>;
-		};
-		dcsr-dpaa@9000 {
-			compatible = "fsl,p5020-dcsr-dpaa", "fsl,dcsr-dpaa";
-			reg = <0x9000 0x1000>;
-		};
-		dcsr-ocn@11000 {
-			compatible = "fsl,p5020-dcsr-ocn", "fsl,dcsr-ocn";
-			reg = <0x11000 0x1000>;
-		};
-		dcsr-ddr@12000 {
-			compatible = "fsl,dcsr-ddr";
-			dev-handle = <&ddr1>;
-			reg = <0x12000 0x1000>;
-		};
-		dcsr-ddr@13000 {
-			compatible = "fsl,dcsr-ddr";
-			dev-handle = <&ddr2>;
-			reg = <0x13000 0x1000>;
-		};
-		dcsr-nal@18000 {
-			compatible = "fsl,p5020-dcsr-nal", "fsl,dcsr-nal";
-			reg = <0x18000 0x1000>;
-		};
-		dcsr-rcpm@22000 {
-			compatible = "fsl,p5020-dcsr-rcpm", "fsl,dcsr-rcpm";
-			reg = <0x22000 0x1000>;
-		};
-		dcsr-cpu-sb-proxy@40000 {
-			compatible = "fsl,dcsr-e5500-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
-			cpu-handle = <&cpu0>;
-			reg = <0x40000 0x1000>;
-		};
-		dcsr-cpu-sb-proxy@41000 {
-			compatible = "fsl,dcsr-e5500-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
-			cpu-handle = <&cpu1>;
-			reg = <0x41000 0x1000>;
-		};
-	};
-
-	soc: soc@ffe000000 {
-		#address-cells = <1>;
-		#size-cells = <1>;
-		device_type = "soc";
-		compatible = "simple-bus";
-		ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
-		reg = <0xf 0xfe000000 0 0x00001000>;
-
-		soc-sram-error {
-			compatible = "fsl,soc-sram-error";
-			interrupts = <16 2 1 29>;
-		};
-
-		corenet-law@0 {
-			compatible = "fsl,corenet-law";
-			reg = <0x0 0x1000>;
-			fsl,num-laws = <32>;
-		};
-
-		ddr1: memory-controller@8000 {
-			compatible = "fsl,qoriq-memory-controller-v4.5", "fsl,qoriq-memory-controller";
-			reg = <0x8000 0x1000>;
-			interrupts = <16 2 1 23>;
-		};
-
-		ddr2: memory-controller@9000 {
-			compatible = "fsl,qoriq-memory-controller-v4.5", "fsl,qoriq-memory-controller";
-			reg = <0x9000 0x1000>;
-			interrupts = <16 2 1 22>;
-		};
-
-		cpc: l3-cache-controller@10000 {
-			compatible = "fsl,p5020-l3-cache-controller", "fsl,p4080-l3-cache-controller", "cache";
-			reg = <0x10000 0x1000
-			       0x11000 0x1000>;
-			interrupts = <16 2 1 27
-				      16 2 1 26>;
-		};
-
-		corenet-cf@18000 {
-			compatible = "fsl,corenet-cf";
-			reg = <0x18000 0x1000>;
-			interrupts = <16 2 1 31>;
-			fsl,ccf-num-csdids = <32>;
-			fsl,ccf-num-snoopids = <32>;
-		};
-
-		iommu@20000 {
-			compatible = "fsl,pamu-v1.0", "fsl,pamu";
-			reg = <0x20000 0x4000>;
-			interrupts = <
-				24 2 0 0
-				16 2 1 30>;
-		};
-
-		mpic: pic@40000 {
-			clock-frequency = <0>;
-			interrupt-controller;
-			#address-cells = <0>;
-			#interrupt-cells = <4>;
-			reg = <0x40000 0x40000>;
-			compatible = "fsl,mpic", "chrp,open-pic";
-			device_type = "open-pic";
-		};
-
-		msi0: msi@41600 {
-			compatible = "fsl,mpic-msi";
-			reg = <0x41600 0x200>;
-			msi-available-ranges = <0 0x100>;
-			interrupts = <
-				0xe0 0 0 0
-				0xe1 0 0 0
-				0xe2 0 0 0
-				0xe3 0 0 0
-				0xe4 0 0 0
-				0xe5 0 0 0
-				0xe6 0 0 0
-				0xe7 0 0 0>;
-		};
-
-		msi1: msi@41800 {
-			compatible = "fsl,mpic-msi";
-			reg = <0x41800 0x200>;
-			msi-available-ranges = <0 0x100>;
-			interrupts = <
-				0xe8 0 0 0
-				0xe9 0 0 0
-				0xea 0 0 0
-				0xeb 0 0 0
-				0xec 0 0 0
-				0xed 0 0 0
-				0xee 0 0 0
-				0xef 0 0 0>;
-		};
-
-		msi2: msi@41a00 {
-			compatible = "fsl,mpic-msi";
-			reg = <0x41a00 0x200>;
-			msi-available-ranges = <0 0x100>;
-			interrupts = <
-				0xf0 0 0 0
-				0xf1 0 0 0
-				0xf2 0 0 0
-				0xf3 0 0 0
-				0xf4 0 0 0
-				0xf5 0 0 0
-				0xf6 0 0 0
-				0xf7 0 0 0>;
-		};
-
-		guts: global-utilities@e0000 {
-			compatible = "fsl,qoriq-device-config-1.0";
-			reg = <0xe0000 0xe00>;
-			fsl,has-rstcr;
-			#sleep-cells = <1>;
-			fsl,liodn-bits = <12>;
-		};
-
-		pins: global-utilities@e0e00 {
-			compatible = "fsl,qoriq-pin-control-1.0";
-			reg = <0xe0e00 0x200>;
-			#sleep-cells = <2>;
-		};
-
-		clockgen: global-utilities@e1000 {
-			compatible = "fsl,p5020-clockgen", "fsl,qoriq-clockgen-1.0";
-			reg = <0xe1000 0x1000>;
-			clock-frequency = <0>;
-		};
-
-		rcpm: global-utilities@e2000 {
-			compatible = "fsl,qoriq-rcpm-1.0";
-			reg = <0xe2000 0x1000>;
-			#sleep-cells = <1>;
-		};
-
-		sfp: sfp@e8000 {
-			compatible = "fsl,p5020-sfp", "fsl,qoriq-sfp-1.0";
-			reg	   = <0xe8000 0x1000>;
-		};
-
-		serdes: serdes@ea000 {
-			compatible = "fsl,p5020-serdes";
-			reg	   = <0xea000 0x1000>;
-		};
-
-		dma0: dma@100300 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			compatible = "fsl,p5020-dma", "fsl,eloplus-dma";
-			reg = <0x100300 0x4>;
-			ranges = <0x0 0x100100 0x200>;
-			cell-index = <0>;
-			dma-channel@0 {
-				compatible = "fsl,p5020-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x0 0x80>;
-				cell-index = <0>;
-				interrupts = <28 2 0 0>;
-			};
-			dma-channel@80 {
-				compatible = "fsl,p5020-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x80 0x80>;
-				cell-index = <1>;
-				interrupts = <29 2 0 0>;
-			};
-			dma-channel@100 {
-				compatible = "fsl,p5020-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x100 0x80>;
-				cell-index = <2>;
-				interrupts = <30 2 0 0>;
-			};
-			dma-channel@180 {
-				compatible = "fsl,p5020-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x180 0x80>;
-				cell-index = <3>;
-				interrupts = <31 2 0 0>;
-			};
-		};
-
-		dma1: dma@101300 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			compatible = "fsl,p5020-dma", "fsl,eloplus-dma";
-			reg = <0x101300 0x4>;
-			ranges = <0x0 0x101100 0x200>;
-			cell-index = <1>;
-			dma-channel@0 {
-				compatible = "fsl,p5020-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x0 0x80>;
-				cell-index = <0>;
-				interrupts = <32 2 0 0>;
-			};
-			dma-channel@80 {
-				compatible = "fsl,p5020-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x80 0x80>;
-				cell-index = <1>;
-				interrupts = <33 2 0 0>;
-			};
-			dma-channel@100 {
-				compatible = "fsl,p5020-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x100 0x80>;
-				cell-index = <2>;
-				interrupts = <34 2 0 0>;
-			};
-			dma-channel@180 {
-				compatible = "fsl,p5020-dma-channel",
-						"fsl,eloplus-dma-channel";
-				reg = <0x180 0x80>;
-				cell-index = <3>;
-				interrupts = <35 2 0 0>;
-			};
-		};
-
-		spi@110000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "fsl,p5020-espi", "fsl,mpc8536-espi";
-			reg = <0x110000 0x1000>;
-			interrupts = <53 0x2 0 0>;
-			fsl,espi-num-chipselects = <4>;
-		};
-
-		sdhc: sdhc@114000 {
-			compatible = "fsl,p5020-esdhc", "fsl,esdhc";
-			reg = <0x114000 0x1000>;
-			interrupts = <48 2 0 0>;
-			sdhci,auto-cmd12;
-			clock-frequency = <0>;
-		};
-
-		i2c@118000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			cell-index = <0>;
-			compatible = "fsl-i2c";
-			reg = <0x118000 0x100>;
-			interrupts = <38 2 0 0>;
-			dfsrr;
-		};
-
-		i2c@118100 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			cell-index = <1>;
-			compatible = "fsl-i2c";
-			reg = <0x118100 0x100>;
-			interrupts = <38 2 0 0>;
-			dfsrr;
-		};
-
-		i2c@119000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			cell-index = <2>;
-			compatible = "fsl-i2c";
-			reg = <0x119000 0x100>;
-			interrupts = <39 2 0 0>;
-			dfsrr;
-		};
-
-		i2c@119100 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			cell-index = <3>;
-			compatible = "fsl-i2c";
-			reg = <0x119100 0x100>;
-			interrupts = <39 2 0 0>;
-			dfsrr;
-		};
-
-		serial0: serial@11c500 {
-			cell-index = <0>;
-			device_type = "serial";
-			compatible = "ns16550";
-			reg = <0x11c500 0x100>;
-			clock-frequency = <0>;
-			interrupts = <36 2 0 0>;
-		};
-
-		serial1: serial@11c600 {
-			cell-index = <1>;
-			device_type = "serial";
-			compatible = "ns16550";
-			reg = <0x11c600 0x100>;
-			clock-frequency = <0>;
-			interrupts = <36 2 0 0>;
-		};
-
-		serial2: serial@11d500 {
-			cell-index = <2>;
-			device_type = "serial";
-			compatible = "ns16550";
-			reg = <0x11d500 0x100>;
-			clock-frequency = <0>;
-			interrupts = <37 2 0 0>;
-		};
-
-		serial3: serial@11d600 {
-			cell-index = <3>;
-			device_type = "serial";
-			compatible = "ns16550";
-			reg = <0x11d600 0x100>;
-			clock-frequency = <0>;
-			interrupts = <37 2 0 0>;
-		};
-
-		gpio0: gpio@130000 {
-			compatible = "fsl,p5020-gpio", "fsl,qoriq-gpio";
-			reg = <0x130000 0x1000>;
-			interrupts = <55 2 0 0>;
-			#gpio-cells = <2>;
-			gpio-controller;
-		};
-
-		usb0: usb@210000 {
-			compatible = "fsl,p5020-usb2-mph",
-					"fsl,mpc85xx-usb2-mph", "fsl-usb2-mph";
-			reg = <0x210000 0x1000>;
-			#address-cells = <1>;
-			#size-cells = <0>;
-			interrupts = <44 0x2 0 0>;
-			phy_type = "utmi";
-			port0;
-		};
-
-		usb1: usb@211000 {
-			compatible = "fsl,p5020-usb2-dr",
-					"fsl,mpc85xx-usb2-dr", "fsl-usb2-dr";
-			reg = <0x211000 0x1000>;
-			#address-cells = <1>;
-			#size-cells = <0>;
-			interrupts = <45 0x2 0 0>;
-			dr_mode = "host";
-			phy_type = "utmi";
-		};
-
-		sata@220000 {
-			compatible = "fsl,p5020-sata", "fsl,pq-sata-v2";
-			reg = <0x220000 0x1000>;
-			interrupts = <68 0x2 0 0>;
-		};
-
-		sata@221000 {
-			compatible = "fsl,p5020-sata", "fsl,pq-sata-v2";
-			reg = <0x221000 0x1000>;
-			interrupts = <69 0x2 0 0>;
-		};
-
-		crypto: crypto@300000 {
-			compatible = "fsl,sec-v4.2", "fsl,sec-v4.0";
-			#address-cells = <1>;
-			#size-cells = <1>;
-			reg		 = <0x300000 0x10000>;
-			ranges		 = <0 0x300000 0x10000>;
-			interrupts	 = <92 2 0 0>;
-
-			sec_jr0: jr@1000 {
-				compatible = "fsl,sec-v4.2-job-ring",
-					     "fsl,sec-v4.0-job-ring";
-				reg = <0x1000 0x1000>;
-				interrupts = <88 2 0 0>;
-			};
-
-			sec_jr1: jr@2000 {
-				compatible = "fsl,sec-v4.2-job-ring",
-					     "fsl,sec-v4.0-job-ring";
-				reg = <0x2000 0x1000>;
-				interrupts = <89 2 0 0>;
-			};
-
-			sec_jr2: jr@3000 {
-				compatible = "fsl,sec-v4.2-job-ring",
-					     "fsl,sec-v4.0-job-ring";
-				reg = <0x3000 0x1000>;
-				interrupts = <90 2 0 0>;
-			};
-
-			sec_jr3: jr@4000 {
-				compatible = "fsl,sec-v4.2-job-ring",
-					     "fsl,sec-v4.0-job-ring";
-				reg = <0x4000 0x1000>;
-				interrupts = <91 2 0 0>;
-			};
-
-			rtic@6000 {
-				compatible = "fsl,sec-v4.2-rtic",
-					     "fsl,sec-v4.0-rtic";
-				#address-cells = <1>;
-				#size-cells = <1>;
-				reg = <0x6000 0x100>;
-				ranges = <0x0 0x6100 0xe00>;
-
-				rtic_a: rtic-a@0 {
-					compatible = "fsl,sec-v4.2-rtic-memory",
-						     "fsl,sec-v4.0-rtic-memory";
-					reg = <0x00 0x20 0x100 0x80>;
-				};
-
-				rtic_b: rtic-b@20 {
-					compatible = "fsl,sec-v4.2-rtic-memory",
-						     "fsl,sec-v4.0-rtic-memory";
-					reg = <0x20 0x20 0x200 0x80>;
-				};
-
-				rtic_c: rtic-c@40 {
-					compatible = "fsl,sec-v4.2-rtic-memory",
-						     "fsl,sec-v4.0-rtic-memory";
-					reg = <0x40 0x20 0x300 0x80>;
-				};
-
-				rtic_d: rtic-d@60 {
-					compatible = "fsl,sec-v4.2-rtic-memory",
-						     "fsl,sec-v4.0-rtic-memory";
-					reg = <0x60 0x20 0x500 0x80>;
-				};
-			};
-		};
-
-		sec_mon: sec_mon@314000 {
-			compatible = "fsl,sec-v4.2-mon", "fsl,sec-v4.0-mon";
-			reg = <0x314000 0x1000>;
-			interrupts = <93 2 0 0>;
-		};
-	};
-
-/*
-	rapidio0: rapidio@ffe0c0000
-*/
-
-	localbus@ffe124000 {
-		compatible = "fsl,p5020-elbc", "fsl,elbc", "simple-bus";
-		interrupts = <25 2 0 0>;
-		#address-cells = <2>;
-		#size-cells = <1>;
-	};
-
-	pci0: pcie@ffe200000 {
-		compatible = "fsl,p5020-pcie", "fsl,qoriq-pcie-v2.2";
-		device_type = "pci";
-		#size-cells = <2>;
-		#address-cells = <3>;
-		bus-range = <0x0 0xff>;
-		clock-frequency = <0x1fca055>;
-		fsl,msi = <&msi0>;
-		interrupts = <16 2 1 15>;
-
-		pcie@0 {
-			reg = <0 0 0 0 0>;
-			#interrupt-cells = <1>;
-			#size-cells = <2>;
-			#address-cells = <3>;
-			device_type = "pci";
-			interrupts = <16 2 1 15>;
-			interrupt-map-mask = <0xf800 0 0 7>;
-			interrupt-map = <
-				/* IDSEL 0x0 */
-				0000 0 0 1 &mpic 40 1 0 0
-				0000 0 0 2 &mpic 1 1 0 0
-				0000 0 0 3 &mpic 2 1 0 0
-				0000 0 0 4 &mpic 3 1 0 0
-				>;
-		};
-	};
-
-	pci1: pcie@ffe201000 {
-		compatible = "fsl,p5020-pcie", "fsl,qoriq-pcie-v2.2";
-		device_type = "pci";
-		#size-cells = <2>;
-		#address-cells = <3>;
-		bus-range = <0 0xff>;
-		clock-frequency = <0x1fca055>;
-		fsl,msi = <&msi1>;
-		interrupts = <16 2 1 14>;
-		pcie@0 {
-			reg = <0 0 0 0 0>;
-			#interrupt-cells = <1>;
-			#size-cells = <2>;
-			#address-cells = <3>;
-			device_type = "pci";
-			interrupts = <16 2 1 14>;
-			interrupt-map-mask = <0xf800 0 0 7>;
-			interrupt-map = <
-				/* IDSEL 0x0 */
-				0000 0 0 1 &mpic 41 1 0 0
-				0000 0 0 2 &mpic 5 1 0 0
-				0000 0 0 3 &mpic 6 1 0 0
-				0000 0 0 4 &mpic 7 1 0 0
-				>;
-		};
-	};
-
-	pci2: pcie@ffe202000 {
-		compatible = "fsl,p5020-pcie", "fsl,qoriq-pcie-v2.2";
-		device_type = "pci";
-		#size-cells = <2>;
-		#address-cells = <3>;
-		bus-range = <0x0 0xff>;
-		clock-frequency = <0x1fca055>;
-		fsl,msi = <&msi2>;
-		interrupts = <16 2 1 13>;
-		pcie@0 {
-			reg = <0 0 0 0 0>;
-			#interrupt-cells = <1>;
-			#size-cells = <2>;
-			#address-cells = <3>;
-			device_type = "pci";
-			interrupts = <16 2 1 13>;
-			interrupt-map-mask = <0xf800 0 0 7>;
-			interrupt-map = <
-				/* IDSEL 0x0 */
-				0000 0 0 1 &mpic 42 1 0 0
-				0000 0 0 2 &mpic 9 1 0 0
-				0000 0 0 3 &mpic 10 1 0 0
-				0000 0 0 4 &mpic 11 1 0 0
-				>;
-		};
-	};
-
-	pci3: pcie@ffe203000 {
-		compatible = "fsl,p5020-pcie", "fsl,qoriq-pcie-v2.2";
-		device_type = "pci";
-		#size-cells = <2>;
-		#address-cells = <3>;
-		bus-range = <0x0 0xff>;
-		clock-frequency = <0x1fca055>;
-		fsl,msi = <&msi2>;
-		interrupts = <16 2 1 12>;
-		pcie@0 {
-			reg = <0 0 0 0 0>;
-			#interrupt-cells = <1>;
-			#size-cells = <2>;
-			#address-cells = <3>;
-			device_type = "pci";
-			interrupts = <16 2 1 12>;
-			interrupt-map-mask = <0xf800 0 0 7>;
-			interrupt-map = <
-				/* IDSEL 0x0 */
-				0000 0 0 1 &mpic 43 1 0 0
-				0000 0 0 2 &mpic 0 1 0 0
-				0000 0 0 3 &mpic 4 1 0 0
-				0000 0 0 4 &mpic 8 1 0 0
-				>;
-		};
-	};
-};
diff --git a/arch/powerpc/boot/dts/sbc8349.dts b/arch/powerpc/boot/dts/sbc8349.dts
index 0dc90f9..b1e45a8 100644
--- a/arch/powerpc/boot/dts/sbc8349.dts
+++ b/arch/powerpc/boot/dts/sbc8349.dts
@@ -222,7 +222,7 @@
 		serial0: serial@4500 {
 			cell-index = <0>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4500 0x100>;
 			clock-frequency = <0>;
 			interrupts = <9 0x8>;
@@ -232,7 +232,7 @@
 		serial1: serial@4600 {
 			cell-index = <1>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4600 0x100>;
 			clock-frequency = <0>;
 			interrupts = <10 0x8>;
diff --git a/arch/powerpc/boot/dts/sbc8548.dts b/arch/powerpc/boot/dts/sbc8548.dts
index 94a3322..77be771 100644
--- a/arch/powerpc/boot/dts/sbc8548.dts
+++ b/arch/powerpc/boot/dts/sbc8548.dts
@@ -316,7 +316,7 @@
 		serial0: serial@4500 {
 			cell-index = <0>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4500 0x100>;	// reg base, size
 			clock-frequency = <0>;	// should we fill in in uboot?
 			interrupts = <0x2a 0x2>;
@@ -326,7 +326,7 @@
 		serial1: serial@4600 {
 			cell-index = <1>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4600 0x100>;	// reg base, size
 			clock-frequency = <0>;	// should we fill in in uboot?
 			interrupts = <0x2a 0x2>;
diff --git a/arch/powerpc/boot/dts/sbc8641d.dts b/arch/powerpc/boot/dts/sbc8641d.dts
index ee5538f..56bebce 100644
--- a/arch/powerpc/boot/dts/sbc8641d.dts
+++ b/arch/powerpc/boot/dts/sbc8641d.dts
@@ -347,7 +347,7 @@
 		serial0: serial@4500 {
 			cell-index = <0>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4500 0x100>;
 			clock-frequency = <0>;
 			interrupts = <42 2>;
@@ -357,7 +357,7 @@
 		serial1: serial@4600 {
 			cell-index = <1>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4600 0x100>;
 			clock-frequency = <0>;
 			interrupts = <28 2>;
diff --git a/arch/powerpc/boot/dts/socrates.dts b/arch/powerpc/boot/dts/socrates.dts
index 38c3540..134a5ff9 100644
--- a/arch/powerpc/boot/dts/socrates.dts
+++ b/arch/powerpc/boot/dts/socrates.dts
@@ -199,7 +199,7 @@
 		serial0: serial@4500 {
 			cell-index = <0>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4500 0x100>;
 			clock-frequency = <0>;
 			interrupts = <42 2>;
@@ -209,7 +209,7 @@
 		serial1: serial@4600 {
 			cell-index = <1>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4600 0x100>;
 			clock-frequency = <0>;
 			interrupts = <42 2>;
diff --git a/arch/powerpc/boot/dts/storcenter.dts b/arch/powerpc/boot/dts/storcenter.dts
index eab680c..2a55573 100644
--- a/arch/powerpc/boot/dts/storcenter.dts
+++ b/arch/powerpc/boot/dts/storcenter.dts
@@ -74,7 +74,7 @@
 		serial0: serial@4500 {
 			cell-index = <0>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4500 0x20>;
 			clock-frequency = <97553800>; /* Hz */
 			current-speed = <115200>;
@@ -85,7 +85,7 @@
 		serial1: serial@4600 {
 			cell-index = <1>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4600 0x20>;
 			clock-frequency = <97553800>; /* Hz */
 			current-speed = <9600>;
diff --git a/arch/powerpc/boot/dts/stxssa8555.dts b/arch/powerpc/boot/dts/stxssa8555.dts
index 49efd44..4f166b0 100644
--- a/arch/powerpc/boot/dts/stxssa8555.dts
+++ b/arch/powerpc/boot/dts/stxssa8555.dts
@@ -210,7 +210,7 @@
 		serial0: serial@4500 {
 			cell-index = <0>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4500 0x100>; 	// reg base, size
 			clock-frequency = <0>; 	// should we fill in in uboot?
 			interrupts = <42 2>;
@@ -220,7 +220,7 @@
 		serial1: serial@4600 {
 			cell-index = <1>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4600 0x100>;	// reg base, size
 			clock-frequency = <0>; 	// should we fill in in uboot?
 			interrupts = <42 2>;
diff --git a/arch/powerpc/boot/dts/tqm8540.dts b/arch/powerpc/boot/dts/tqm8540.dts
index 0a4cedb..ed264d9 100644
--- a/arch/powerpc/boot/dts/tqm8540.dts
+++ b/arch/powerpc/boot/dts/tqm8540.dts
@@ -250,7 +250,7 @@
 		serial0: serial@4500 {
 			cell-index = <0>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4500 0x100>; 	// reg base, size
 			clock-frequency = <0>; 	// should we fill in in uboot?
 			interrupts = <42 2>;
@@ -260,7 +260,7 @@
 		serial1: serial@4600 {
 			cell-index = <1>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4600 0x100>;	// reg base, size
 			clock-frequency = <0>; 	// should we fill in in uboot?
 			interrupts = <42 2>;
diff --git a/arch/powerpc/boot/dts/tqm8541.dts b/arch/powerpc/boot/dts/tqm8541.dts
index f49d091..9252421 100644
--- a/arch/powerpc/boot/dts/tqm8541.dts
+++ b/arch/powerpc/boot/dts/tqm8541.dts
@@ -224,7 +224,7 @@
 		serial0: serial@4500 {
 			cell-index = <0>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4500 0x100>; 	// reg base, size
 			clock-frequency = <0>; 	// should we fill in in uboot?
 			interrupts = <42 2>;
@@ -234,7 +234,7 @@
 		serial1: serial@4600 {
 			cell-index = <1>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4600 0x100>;	// reg base, size
 			clock-frequency = <0>; 	// should we fill in in uboot?
 			interrupts = <42 2>;
diff --git a/arch/powerpc/boot/dts/tqm8548-bigflash.dts b/arch/powerpc/boot/dts/tqm8548-bigflash.dts
index 9452c3c..6e1ac50 100644
--- a/arch/powerpc/boot/dts/tqm8548-bigflash.dts
+++ b/arch/powerpc/boot/dts/tqm8548-bigflash.dts
@@ -305,7 +305,7 @@
 		serial0: serial@4500 {
 			cell-index = <0>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4500 0x100>;	// reg base, size
 			clock-frequency = <0>;	// should we fill in in uboot?
 			current-speed = <115200>;
@@ -316,7 +316,7 @@
 		serial1: serial@4600 {
 			cell-index = <1>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4600 0x100>;	// reg base, size
 			clock-frequency = <0>;	// should we fill in in uboot?
 			current-speed = <115200>;
@@ -352,7 +352,7 @@
 		ranges = <
 			0 0x0 0xfc000000 0x04000000	// NOR FLASH bank 1
 			1 0x0 0xf8000000 0x08000000	// NOR FLASH bank 0
-			2 0x0 0xa3000000 0x00008000	// CAN (2 x i82527)
+			2 0x0 0xa3000000 0x00008000	// CAN (2 x CC770)
 			3 0x0 0xa3010000 0x00008000	// NAND FLASH
 
 		>;
@@ -393,18 +393,27 @@
 		};
 
 		/* Note: CAN support needs be enabled in U-Boot */
-		can0@2,0 {
-			compatible = "intel,82527"; // Bosch CC770
+		can@2,0 {
+			compatible = "bosch,cc770"; // Bosch CC770
 			reg = <2 0x0 0x100>;
 			interrupts = <4 1>;
 			interrupt-parent = <&mpic>;
+			bosch,external-clock-frequency = <16000000>;
+			bosch,disconnect-rx1-input;
+			bosch,disconnect-tx1-output;
+			bosch,iso-low-speed-mux;
+			bosch,clock-out-frequency = <16000000>;
 		};
 
-		can1@2,100 {
-			compatible = "intel,82527"; // Bosch CC770
+		can@2,100 {
+			compatible = "bosch,cc770"; // Bosch CC770
 			reg = <2 0x100 0x100>;
 			interrupts = <4 1>;
 			interrupt-parent = <&mpic>;
+			bosch,external-clock-frequency = <16000000>;
+			bosch,disconnect-rx1-input;
+			bosch,disconnect-tx1-output;
+			bosch,iso-low-speed-mux;
 		};
 
 		/* Note: NAND support needs to be enabled in U-Boot */
diff --git a/arch/powerpc/boot/dts/tqm8548.dts b/arch/powerpc/boot/dts/tqm8548.dts
index 619776f..161e75e 100644
--- a/arch/powerpc/boot/dts/tqm8548.dts
+++ b/arch/powerpc/boot/dts/tqm8548.dts
@@ -305,7 +305,7 @@
 		serial0: serial@4500 {
 			cell-index = <0>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4500 0x100>;	// reg base, size
 			clock-frequency = <0>;	// should we fill in in uboot?
 			current-speed = <115200>;
@@ -316,7 +316,7 @@
 		serial1: serial@4600 {
 			cell-index = <1>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4600 0x100>;	// reg base, size
 			clock-frequency = <0>;	// should we fill in in uboot?
 			current-speed = <115200>;
@@ -352,7 +352,7 @@
 		ranges = <
 			0 0x0 0xfc000000 0x04000000	// NOR FLASH bank 1
 			1 0x0 0xf8000000 0x08000000	// NOR FLASH bank 0
-			2 0x0 0xe3000000 0x00008000	// CAN (2 x i82527)
+			2 0x0 0xe3000000 0x00008000	// CAN (2 x CC770)
 			3 0x0 0xe3010000 0x00008000	// NAND FLASH
 
 		>;
@@ -393,18 +393,27 @@
 		};
 
 		/* Note: CAN support needs be enabled in U-Boot */
-		can0@2,0 {
-			compatible = "intel,82527"; // Bosch CC770
+		can@2,0 {
+			compatible = "bosch,cc770"; // Bosch CC770
 			reg = <2 0x0 0x100>;
 			interrupts = <4 1>;
 			interrupt-parent = <&mpic>;
+			bosch,external-clock-frequency = <16000000>;
+			bosch,disconnect-rx1-input;
+			bosch,disconnect-tx1-output;
+			bosch,iso-low-speed-mux;
+			bosch,clock-out-frequency = <16000000>;
 		};
 
-		can1@2,100 {
-			compatible = "intel,82527"; // Bosch CC770
+		can@2,100 {
+			compatible = "bosch,cc770"; // Bosch CC770
 			reg = <2 0x100 0x100>;
 			interrupts = <4 1>;
 			interrupt-parent = <&mpic>;
+			bosch,external-clock-frequency = <16000000>;
+			bosch,disconnect-rx1-input;
+			bosch,disconnect-tx1-output;
+			bosch,iso-low-speed-mux;
 		};
 
 		/* Note: NAND support needs to be enabled in U-Boot */
diff --git a/arch/powerpc/boot/dts/tqm8555.dts b/arch/powerpc/boot/dts/tqm8555.dts
index 81bad8c..aa6ff0d 100644
--- a/arch/powerpc/boot/dts/tqm8555.dts
+++ b/arch/powerpc/boot/dts/tqm8555.dts
@@ -224,7 +224,7 @@
 		serial0: serial@4500 {
 			cell-index = <0>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4500 0x100>; 	// reg base, size
 			clock-frequency = <0>; 	// should we fill in in uboot?
 			interrupts = <42 2>;
@@ -234,7 +234,7 @@
 		serial1: serial@4600 {
 			cell-index = <1>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4600 0x100>;	// reg base, size
 			clock-frequency = <0>; 	// should we fill in in uboot?
 			interrupts = <42 2>;
diff --git a/arch/powerpc/boot/dts/tqm8xx.dts b/arch/powerpc/boot/dts/tqm8xx.dts
index f6da7ec..c3dba25 100644
--- a/arch/powerpc/boot/dts/tqm8xx.dts
+++ b/arch/powerpc/boot/dts/tqm8xx.dts
@@ -57,6 +57,7 @@
 
 		ranges = <
 			0x0 0x0 0x40000000 0x800000
+			0x3 0x0 0xc0000000 0x200
 		>;
 
 		flash@0,0 {
@@ -67,6 +68,30 @@
 			bank-width = <4>;
 			device-width = <2>;
 		};
+
+		/* Note: CAN support needs be enabled in U-Boot */
+		can@3,0 {
+			compatible = "intc,82527";
+			reg = <3 0x0 0x80>;
+			interrupts = <8 1>;
+			interrupt-parent = <&PIC>;
+			bosch,external-clock-frequency = <16000000>;
+			bosch,disconnect-rx1-input;
+			bosch,disconnect-tx1-output;
+			bosch,iso-low-speed-mux;
+			bosch,clock-out-frequency = <16000000>;
+		};
+
+		can@3,100 {
+			compatible = "intc,82527";
+			reg = <3 0x100 0x80>;
+			interrupts = <8 1>;
+			interrupt-parent = <&PIC>;
+			bosch,external-clock-frequency = <16000000>;
+			bosch,disconnect-rx1-input;
+			bosch,disconnect-tx1-output;
+			bosch,iso-low-speed-mux;
+		};
 	};
 
 	soc@fff00000 {
diff --git a/arch/powerpc/boot/dts/xcalibur1501.dts b/arch/powerpc/boot/dts/xcalibur1501.dts
index ac0a617..cc00f4d 100644
--- a/arch/powerpc/boot/dts/xcalibur1501.dts
+++ b/arch/powerpc/boot/dts/xcalibur1501.dts
@@ -531,7 +531,7 @@
 		serial0: serial@4500 {
 			cell-index = <0>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4500 0x100>;
 			clock-frequency = <0>;
 			interrupts = <42 2>;
@@ -542,7 +542,7 @@
 		serial1: serial@4600 {
 			cell-index = <1>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4600 0x100>;
 			clock-frequency = <0>;
 			interrupts = <42 2>;
diff --git a/arch/powerpc/boot/dts/xpedite5200.dts b/arch/powerpc/boot/dts/xpedite5200.dts
index c41a80c..8fd7b70 100644
--- a/arch/powerpc/boot/dts/xpedite5200.dts
+++ b/arch/powerpc/boot/dts/xpedite5200.dts
@@ -333,7 +333,7 @@
 		serial0: serial@4500 {
 			cell-index = <0>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4500 0x100>;
 			clock-frequency = <0>;
 			current-speed = <115200>;
@@ -344,7 +344,7 @@
 		serial1: serial@4600 {
 			cell-index = <1>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4600 0x100>;
 			clock-frequency = <0>;
 			current-speed = <115200>;
diff --git a/arch/powerpc/boot/dts/xpedite5200_xmon.dts b/arch/powerpc/boot/dts/xpedite5200_xmon.dts
index c0efcbb..0baa828 100644
--- a/arch/powerpc/boot/dts/xpedite5200_xmon.dts
+++ b/arch/powerpc/boot/dts/xpedite5200_xmon.dts
@@ -337,7 +337,7 @@
 		serial0: serial@4500 {
 			cell-index = <0>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4500 0x100>;
 			clock-frequency = <0>;
 			current-speed = <9600>;
@@ -348,7 +348,7 @@
 		serial1: serial@4600 {
 			cell-index = <1>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4600 0x100>;
 			clock-frequency = <0>;
 			current-speed = <9600>;
diff --git a/arch/powerpc/boot/dts/xpedite5301.dts b/arch/powerpc/boot/dts/xpedite5301.dts
index db7faf5..53c1c6a 100644
--- a/arch/powerpc/boot/dts/xpedite5301.dts
+++ b/arch/powerpc/boot/dts/xpedite5301.dts
@@ -441,7 +441,7 @@
 		serial0: serial@4500 {
 			cell-index = <0>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4500 0x100>;
 			clock-frequency = <0>;
 			interrupts = <42 2>;
@@ -452,7 +452,7 @@
 		serial1: serial@4600 {
 			cell-index = <1>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4600 0x100>;
 			clock-frequency = <0>;
 			interrupts = <42 2>;
diff --git a/arch/powerpc/boot/dts/xpedite5330.dts b/arch/powerpc/boot/dts/xpedite5330.dts
index c364ca6..2152259 100644
--- a/arch/powerpc/boot/dts/xpedite5330.dts
+++ b/arch/powerpc/boot/dts/xpedite5330.dts
@@ -477,7 +477,7 @@
 		serial0: serial@4500 {
 			cell-index = <0>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4500 0x100>;
 			clock-frequency = <0>;
 			interrupts = <42 2>;
@@ -488,7 +488,7 @@
 		serial1: serial@4600 {
 			cell-index = <1>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4600 0x100>;
 			clock-frequency = <0>;
 			interrupts = <42 2>;
diff --git a/arch/powerpc/boot/dts/xpedite5370.dts b/arch/powerpc/boot/dts/xpedite5370.dts
index 7a8a4afd..11dbda1 100644
--- a/arch/powerpc/boot/dts/xpedite5370.dts
+++ b/arch/powerpc/boot/dts/xpedite5370.dts
@@ -439,7 +439,7 @@
 		serial0: serial@4500 {
 			cell-index = <0>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4500 0x100>;
 			clock-frequency = <0>;
 			interrupts = <42 2>;
@@ -450,7 +450,7 @@
 		serial1: serial@4600 {
 			cell-index = <1>;
 			device_type = "serial";
-			compatible = "ns16550";
+			compatible = "fsl,ns16550", "ns16550";
 			reg = <0x4600 0x100>;
 			clock-frequency = <0>;
 			interrupts = <42 2>;
diff --git a/arch/powerpc/boot/treeboot-currituck.c b/arch/powerpc/boot/treeboot-currituck.c
new file mode 100644
index 0000000..925ae43
--- /dev/null
+++ b/arch/powerpc/boot/treeboot-currituck.c
@@ -0,0 +1,119 @@
+/*
+ * Copyright © 2011 Tony Breeds IBM Corporation
+ *
+ * Based on earlier code:
+ *   Copyright (C) Paul Mackerras 1997.
+ *
+ *   Matt Porter <mporter@kernel.crashing.org>
+ *   Copyright 2002-2005 MontaVista Software Inc.
+ *
+ *   Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
+ *   Copyright (c) 2003, 2004 Zultys Technologies
+ *
+ *    Copyright 2007 David Gibson, IBM Corporation.
+ *    Copyright 2010 Ben. Herrenschmidt, IBM Corporation.
+ *    Copyright © 2011 David Kleikamp IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <stdarg.h>
+#include <stddef.h>
+#include "types.h"
+#include "elf.h"
+#include "string.h"
+#include "stdio.h"
+#include "page.h"
+#include "ops.h"
+#include "reg.h"
+#include "io.h"
+#include "dcr.h"
+#include "4xx.h"
+#include "44x.h"
+#include "libfdt.h"
+
+BSS_STACK(4096);
+
+#define MAX_RANKS	0x4
+#define DDR3_MR0CF	0x80010011U
+
+static unsigned long long ibm_currituck_memsize;
+static unsigned long long ibm_currituck_detect_memsize(void)
+{
+	u32 reg;
+	unsigned i;
+	unsigned long long memsize = 0;
+
+	for(i = 0; i < MAX_RANKS; i++){
+		reg = mfdcrx(DDR3_MR0CF + i);
+
+		if (!(reg & 1))
+			continue;
+
+		reg &= 0x0000f000;
+		reg >>= 12;
+		memsize += (0x800000ULL << reg);
+	}
+
+	return memsize;
+}
+
+static void ibm_currituck_fixups(void)
+{
+	void *devp = finddevice("/");
+	u32 dma_ranges[7];
+
+	dt_fixup_memory(0x0ULL,  ibm_currituck_memsize);
+
+	while ((devp = find_node_by_devtype(devp, "pci"))) {
+		if (getprop(devp, "dma-ranges", dma_ranges, sizeof(dma_ranges)) < 0) {
+			printf("%s: Failed to get dma-ranges\r\n", __func__);
+			continue;
+		}
+
+		dma_ranges[5] = ibm_currituck_memsize >> 32;
+		dma_ranges[6] = ibm_currituck_memsize & 0xffffffffUL;
+
+		setprop(devp, "dma-ranges", dma_ranges, sizeof(dma_ranges));
+	}
+}
+
+#define SPRN_PIR	0x11E	/* Processor Indentification Register */
+void platform_init(void)
+{
+	unsigned long end_of_ram, avail_ram;
+	u32 pir_reg;
+	int node, size;
+	const u32 *timebase;
+
+	ibm_currituck_memsize = ibm_currituck_detect_memsize();
+	if (ibm_currituck_memsize >> 32)
+		end_of_ram = ~0UL;
+	else
+		end_of_ram = ibm_currituck_memsize;
+	avail_ram = end_of_ram - (unsigned long)_end;
+
+	simple_alloc_init(_end, avail_ram, 128, 64);
+	platform_ops.fixups = ibm_currituck_fixups;
+	platform_ops.exit = ibm44x_dbcr_reset;
+	pir_reg = mfspr(SPRN_PIR);
+
+	/* Make sure FDT blob is sane */
+	if (fdt_check_header(_dtb_start) != 0)
+		fatal("Invalid device tree blob\n");
+
+	node = fdt_node_offset_by_prop_value(_dtb_start, -1, "device_type",
+	                                     "cpu", sizeof("cpu"));
+	if (!node)
+		fatal("Cannot find cpu node\n");
+	timebase = fdt_getprop(_dtb_start, node, "timebase-frequency", &size);
+	if (timebase && (size == 4))
+		timebase_period_ns = 1000000000 / *timebase;
+
+	fdt_set_boot_cpuid_phys(_dtb_start, pir_reg);
+	fdt_init(_dtb_start);
+
+	serial_console_init();
+}
diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper
index c74531a..f090e6d 100755
--- a/arch/powerpc/boot/wrapper
+++ b/arch/powerpc/boot/wrapper
@@ -163,7 +163,7 @@
     link_address='0x500000'
     pie=
     ;;
-miboot|uboot)
+miboot|uboot*)
     # miboot and U-boot want just the bare bits, not an ELF binary
     ext=bin
     objflags="-O binary"
@@ -244,6 +244,9 @@
     link_address='0x600000'
     platformo="$object/$platform-head.o $object/$platform.o"
     ;;
+treeboot-currituck)
+    link_address='0x1000000'
+    ;;
 treeboot-iss4xx-mpic)
     platformo="$object/treeboot-iss4xx.o"
     ;;
@@ -257,6 +260,8 @@
 if [ -z "$cacheit" -o ! -f "$vmz$gzip" -o "$vmz$gzip" -ot "$kernel" ]; then
     ${CROSS}objcopy $objflags "$kernel" "$vmz.$$"
 
+    strip_size=$(stat -c %s $vmz.$$)
+
     if [ -n "$gzip" ]; then
         gzip -n -f -9 "$vmz.$$"
     fi
@@ -266,6 +271,24 @@
     else
 	vmz="$vmz.$$"
     fi
+else
+    # Calculate the vmlinux.strip size
+    ${CROSS}objcopy $objflags "$kernel" "$vmz.$$"
+    strip_size=$(stat -c %s $vmz.$$)
+    rm -f $vmz.$$
+fi
+
+# Round the size to next higher MB limit
+round_size=$(((strip_size + 0xfffff) & 0xfff00000))
+
+round_size=0x$(printf "%x" $round_size)
+link_addr=$(printf "%d" $link_address)
+
+if [ $link_addr -lt $strip_size ]; then
+    echo "INFO: Uncompressed kernel (size 0x$(printf "%x\n" $strip_size))" \
+		"overlaps the address of the wrapper($link_address)"
+    echo "INFO: Fixing the link_address of wrapper to ($round_size)"
+    link_address=$round_size
 fi
 
 vmz="$vmz$gzip"
@@ -291,6 +314,26 @@
     fi
     exit 0
     ;;
+uboot-obs600)
+    rm -f "$ofile"
+    # obs600 wants a multi image with an initrd, so we need to put a fake
+    # one in even when building a "normal" image.
+    if [ -n "$initrd" ]; then
+	real_rd="$initrd"
+    else
+	real_rd=`mktemp`
+	echo "\0" >>"$real_rd"
+    fi
+    ${MKIMAGE} -A ppc -O linux -T multi -C gzip -a $membase -e $membase \
+	$uboot_version -d "$vmz":"$real_rd":"$dtb" "$ofile"
+    if [ -z "$initrd" ]; then
+	rm -f "$real_rd"
+    fi
+    if [ -z "$cacheit" ]; then
+	rm -f "$vmz"
+    fi
+    exit 0
+    ;;
 esac
 
 addsec() {
diff --git a/arch/powerpc/configs/40x/klondike_defconfig b/arch/powerpc/configs/40x/klondike_defconfig
new file mode 100644
index 0000000..c0d228d
--- /dev/null
+++ b/arch/powerpc/configs/40x/klondike_defconfig
@@ -0,0 +1,55 @@
+CONFIG_40x=y
+CONFIG_EXPERIMENTAL=y
+CONFIG_SYSVIPC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_EMBEDDED=y
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_WALNUT is not set
+CONFIG_APM8018X=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_MATH_EMULATION=y
+# CONFIG_MIGRATION is not set
+# CONFIG_SUSPEND is not set
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_PROC_DEVICETREE=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=35000
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_SCSI_SAS_ATTRS=y
+# CONFIG_INPUT is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+# CONFIG_UNIX98_PTYS is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVKMEM is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_CRAMFS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_UTF8=y
+CONFIG_AVERAGE=y
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_SYSCTL_SYSCALL_CHECK=y
+# CONFIG_FTRACE is not set
diff --git a/arch/powerpc/configs/40x/obs600_defconfig b/arch/powerpc/configs/40x/obs600_defconfig
new file mode 100644
index 0000000..91c110d
--- /dev/null
+++ b/arch/powerpc/configs/40x/obs600_defconfig
@@ -0,0 +1,83 @@
+CONFIG_40x=y
+CONFIG_EXPERIMENTAL=y
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EXPERT=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_WALNUT is not set
+CONFIG_OBS600=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_MATH_EMULATION=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_IPV6 is not set
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_CONNECTOR=y
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_OF_PARTS=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_JEDECPROBE=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_NDFC=y
+CONFIG_PROC_DEVICETREE=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=35000
+CONFIG_NETDEVICES=y
+CONFIG_IBM_EMAC=y
+CONFIG_IBM_EMAC_RXB=256
+CONFIG_IBM_EMAC_TXB=256
+# CONFIG_INPUT is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_OF_PLATFORM=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_IBM_IIC=y
+CONFIG_SENSORS_LM75=y
+CONFIG_THERMAL=y
+# CONFIG_USB_SUPPORT is not set
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_DS1307=y
+CONFIG_EXT2_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_CRAMFS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_ROOT_NFS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_FS=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_CRYPTO=y
+CONFIG_CRYPTO_CBC=y
+CONFIG_CRYPTO_ECB=y
+CONFIG_CRYPTO_PCBC=y
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/44x/currituck_defconfig b/arch/powerpc/configs/44x/currituck_defconfig
new file mode 100644
index 0000000..4192322
--- /dev/null
+++ b/arch/powerpc/configs/44x/currituck_defconfig
@@ -0,0 +1,110 @@
+CONFIG_44x=y
+CONFIG_SMP=y
+CONFIG_EXPERIMENTAL=y
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_SPARSE_IRQ=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_EXPERT=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PPC_47x=y
+# CONFIG_EBONY is not set
+CONFIG_CURRITUCK=y
+CONFIG_HIGHMEM=y
+CONFIG_HZ_100=y
+CONFIG_MATH_EMULATION=y
+CONFIG_IRQ_ALL_CPUS=y
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE=""
+# CONFIG_SUSPEND is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_IPV6 is not set
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_CONNECTOR=y
+CONFIG_MTD=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_JEDECPROBE=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_PROC_DEVICETREE=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=35000
+# CONFIG_SCSI_PROC_FS is not set
+CONFIG_BLK_DEV_SD=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_ATA=y
+# CONFIG_SATA_PMP is not set
+CONFIG_SATA_SIL24=y
+# CONFIG_ATA_SFF is not set
+CONFIG_NETDEVICES=y
+CONFIG_E1000E=y
+# CONFIG_NETDEV_10000 is not set
+# CONFIG_INPUT is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_OF_PLATFORM=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C=y
+CONFIG_I2C_IBM_IIC=y
+# CONFIG_HWMON is not set
+CONFIG_THERMAL=y
+CONFIG_USB=y
+CONFIG_USB_DEBUG=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_M41T80=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_CRAMFS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
+CONFIG_NLS_DEFAULT="n"
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_FS=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_DEBUG_INFO=y
+CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_XMON=y
+CONFIG_XMON_DEFAULT=y
+CONFIG_PPC_EARLY_DEBUG=y
+CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW=0x10000000
+CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH=0x200
+CONFIG_CRYPTO=y
+CONFIG_CRYPTO_CBC=y
+CONFIG_CRYPTO_ECB=y
+CONFIG_CRYPTO_PCBC=y
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set
diff --git a/arch/powerpc/configs/44x/iss476-smp_defconfig b/arch/powerpc/configs/44x/iss476-smp_defconfig
index a6eb6ad..ca00cf7 100644
--- a/arch/powerpc/configs/44x/iss476-smp_defconfig
+++ b/arch/powerpc/configs/44x/iss476-smp_defconfig
@@ -25,7 +25,8 @@
 CONFIG_CMDLINE="root=/dev/issblk0"
 # CONFIG_PCI is not set
 CONFIG_ADVANCED_OPTIONS=y
-CONFIG_RELOCATABLE=y
+CONFIG_NONSTATIC_KERNEL=y
+CONFIG_DYNAMIC_MEMSTART=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
diff --git a/arch/powerpc/configs/chroma_defconfig b/arch/powerpc/configs/chroma_defconfig
new file mode 100644
index 0000000..acf7fb2
--- /dev/null
+++ b/arch/powerpc/configs/chroma_defconfig
@@ -0,0 +1,307 @@
+CONFIG_PPC64=y
+CONFIG_PPC_BOOK3E_64=y
+# CONFIG_VIRT_CPU_ACCOUNTING is not set
+CONFIG_SMP=y
+CONFIG_NR_CPUS=256
+CONFIG_EXPERIMENTAL=y
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_AUDIT=y
+CONFIG_AUDITSYSCALL=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=19
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_CGROUP_MEM_RES_CTLR=y
+CONFIG_CGROUP_MEM_RES_CTLR_SWAP=y
+CONFIG_NAMESPACES=y
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_BZIP2=y
+CONFIG_RD_LZMA=y
+CONFIG_INITRAMFS_COMPRESSION_GZIP=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_EMBEDDED=y
+CONFIG_PERF_COUNTERS=y
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=y
+CONFIG_KPROBES=y
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_SCOM_DEBUGFS=y
+CONFIG_PPC_A2_DD2=y
+CONFIG_KVM_GUEST=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_HZ_100=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_MISC=y
+CONFIG_NUMA=y
+# CONFIG_MIGRATION is not set
+CONFIG_PPC_64K_PAGES=y
+CONFIG_SCHED_SMT=y
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE=""
+# CONFIG_SECCOMP is not set
+CONFIG_PCIEPORTBUS=y
+# CONFIG_PCIEASPM is not set
+CONFIG_PCI_MSI=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_XFRM_SUB_POLICY=y
+CONFIG_XFRM_STATISTICS=y
+CONFIG_NET_KEY=m
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_NET_IPIP=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_IPV6=y
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=y
+CONFIG_IPV6_TUNNEL=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_IPV6_MROUTE=y
+CONFIG_IPV6_PIMSM_V2=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NF_CT_NETLINK=m
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFLOG=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_DCCP=m
+CONFIG_NETFILTER_XT_MATCH_DSCP=m
+CONFIG_NETFILTER_XT_MATCH_ESP=m
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_OWNER=m
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
+CONFIG_NETFILTER_XT_MATCH_QUOTA=m
+CONFIG_NETFILTER_XT_MATCH_RATEEST=m
+CONFIG_NETFILTER_XT_MATCH_REALM=m
+CONFIG_NETFILTER_XT_MATCH_RECENT=m
+CONFIG_NETFILTER_XT_MATCH_SCTP=m
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
+CONFIG_NETFILTER_XT_MATCH_STRING=m
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_TIME=m
+CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_IP_NF_QUEUE=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_LOG=m
+CONFIG_IP_NF_TARGET_ULOG=m
+CONFIG_NF_NAT=m
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_TARGET_NETMAP=m
+CONFIG_IP_NF_TARGET_REDIRECT=m
+CONFIG_NET_TCPPROBE=y
+# CONFIG_WIRELESS is not set
+CONFIG_NET_9P=y
+CONFIG_NET_9P_DEBUG=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_MTD=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_ADV_OPTIONS=y
+CONFIG_MTD_CFI_LE_BYTE_SWAP=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_CFI_STAA=y
+CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_PROC_DEVICETREE=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=y
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=65536
+CONFIG_CDROM_PKTCDVD=y
+CONFIG_MISC_DEVICES=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_SPI_ATTRS=y
+CONFIG_SCSI_FC_ATTRS=y
+CONFIG_SCSI_ISCSI_ATTRS=m
+CONFIG_SCSI_SAS_ATTRS=m
+CONFIG_SCSI_SRP_ATTRS=y
+CONFIG_ATA=y
+CONFIG_SATA_AHCI=y
+CONFIG_SATA_SIL24=y
+CONFIG_SATA_MV=y
+CONFIG_SATA_SIL=y
+CONFIG_PATA_CMD64X=y
+CONFIG_PATA_MARVELL=y
+CONFIG_PATA_SIL680=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_MD_LINEAR=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_SNAPSHOT=y
+CONFIG_DM_MIRROR=y
+CONFIG_DM_ZERO=y
+CONFIG_DM_UEVENT=y
+CONFIG_NETDEVICES=y
+CONFIG_TUN=y
+CONFIG_E1000E=y
+CONFIG_TIGON3=y
+# CONFIG_WLAN is not set
+# CONFIG_INPUT is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_HW_RANDOM=y
+CONFIG_RAW_DRIVER=y
+CONFIG_MAX_RAW_DEVS=1024
+# CONFIG_HWMON is not set
+# CONFIG_VGA_ARB is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_EDAC=y
+CONFIG_EDAC_MM_EDAC=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_DS1511=y
+CONFIG_RTC_DRV_DS1553=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT2_FS_XIP=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_EXT4_FS=y
+# CONFIG_DNOTIFY is not set
+CONFIG_FUSE_FS=y
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_CONFIGFS_FS=m
+CONFIG_CRAMFS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_V4_1=y
+CONFIG_ROOT_NFS=y
+CONFIG_CIFS=y
+CONFIG_CIFS_WEAK_PW_HASH=y
+CONFIG_CIFS_XATTR=y
+CONFIG_CIFS_POSIX=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_CRC_CCITT=m
+CONFIG_CRC_T10DIF=y
+CONFIG_LIBCRC32C=m
+CONFIG_PRINTK_TIME=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_SCHED_DEBUG is not set
+CONFIG_DEBUG_INFO=y
+CONFIG_FTRACE_SYSCALLS=y
+CONFIG_PPC_EMULATED_STATS=y
+CONFIG_XMON=y
+CONFIG_XMON_DEFAULT=y
+CONFIG_VIRQ_DEBUG=y
+CONFIG_PPC_EARLY_DEBUG=y
+CONFIG_KEYS_DEBUG_PROC_KEYS=y
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_CCM=m
+CONFIG_CRYPTO_GCM=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_SHA256=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SALSA20=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_LZO=m
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_VIRTUALIZATION=y
diff --git a/arch/powerpc/configs/corenet32_smp_defconfig b/arch/powerpc/configs/corenet32_smp_defconfig
index f087de6..f8aef20 100644
--- a/arch/powerpc/configs/corenet32_smp_defconfig
+++ b/arch/powerpc/configs/corenet32_smp_defconfig
@@ -37,6 +37,8 @@
 CONFIG_PCI=y
 CONFIG_PCIEPORTBUS=y
 # CONFIG_PCIEASPM is not set
+CONFIG_RAPIDIO=y
+CONFIG_FSL_RIO=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -94,17 +96,17 @@
 CONFIG_SATA_SIL=y
 CONFIG_PATA_SIL680=y
 CONFIG_NETDEVICES=y
-CONFIG_VITESSE_PHY=y
-CONFIG_FIXED_PHY=y
-CONFIG_NET_ETHERNET=y
+CONFIG_FSL_PQ_MDIO=y
 CONFIG_E1000=y
 CONFIG_E1000E=y
-CONFIG_FSL_PQ_MDIO=y
+CONFIG_VITESSE_PHY=y
+CONFIG_FIXED_PHY=y
 # CONFIG_INPUT_MOUSEDEV is not set
 # CONFIG_INPUT_KEYBOARD is not set
 # CONFIG_INPUT_MOUSE is not set
 CONFIG_SERIO_LIBPS2=y
 # CONFIG_LEGACY_PTYS is not set
+CONFIG_PPC_EPAPR_HV_BYTECHAN=y
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_8250_EXTENDED=y
@@ -155,6 +157,7 @@
 CONFIG_NTFS_FS=y
 CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
+CONFIG_HUGETLBFS=y
 CONFIG_JFFS2_FS=y
 CONFIG_CRAMFS=y
 CONFIG_NFS_FS=y
diff --git a/arch/powerpc/configs/corenet64_smp_defconfig b/arch/powerpc/configs/corenet64_smp_defconfig
index 782822c..7ed8d4c 100644
--- a/arch/powerpc/configs/corenet64_smp_defconfig
+++ b/arch/powerpc/configs/corenet64_smp_defconfig
@@ -23,6 +23,8 @@
 CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_BINFMT_MISC=m
+CONFIG_RAPIDIO=y
+CONFIG_FSL_RIO=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -57,7 +59,6 @@
 CONFIG_EEPROM_LEGACY=y
 CONFIG_NETDEVICES=y
 CONFIG_DUMMY=y
-CONFIG_NET_ETHERNET=y
 CONFIG_INPUT_FF_MEMLESS=m
 # CONFIG_INPUT_MOUSEDEV is not set
 # CONFIG_INPUT_KEYBOARD is not set
@@ -81,6 +82,7 @@
 # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
 CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
+CONFIG_HUGETLBFS=y
 # CONFIG_MISC_FILESYSTEMS is not set
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_MAC_PARTITION=y
diff --git a/arch/powerpc/configs/mpc85xx_defconfig b/arch/powerpc/configs/mpc85xx_defconfig
index a1e5a17..f37a2ab 100644
--- a/arch/powerpc/configs/mpc85xx_defconfig
+++ b/arch/powerpc/configs/mpc85xx_defconfig
@@ -1,5 +1,4 @@
 CONFIG_PPC_85xx=y
-CONFIG_PHYS_64BIT=y
 CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
@@ -93,15 +92,14 @@
 CONFIG_PATA_ALI=y
 CONFIG_NETDEVICES=y
 CONFIG_DUMMY=y
+CONFIG_FS_ENET=y
+CONFIG_UCC_GETH=y
+CONFIG_GIANFAR=y
 CONFIG_MARVELL_PHY=y
 CONFIG_DAVICOM_PHY=y
 CONFIG_CICADA_PHY=y
 CONFIG_VITESSE_PHY=y
 CONFIG_FIXED_PHY=y
-CONFIG_NET_ETHERNET=y
-CONFIG_FS_ENET=y
-CONFIG_GIANFAR=y
-CONFIG_UCC_GETH=y
 CONFIG_INPUT_FF_MEMLESS=m
 # CONFIG_INPUT_MOUSEDEV is not set
 # CONFIG_INPUT_KEYBOARD is not set
@@ -120,6 +118,9 @@
 CONFIG_I2C=y
 CONFIG_I2C_CPM=m
 CONFIG_I2C_MPC=y
+CONFIG_SPI=y
+CONFIG_SPI_FSL_SPI=y
+CONFIG_SPI_FSL_ESPI=y
 CONFIG_GPIO_MPC8XXX=y
 # CONFIG_HWMON is not set
 CONFIG_VIDEO_OUTPUT_CONTROL=y
@@ -163,6 +164,10 @@
 CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
 CONFIG_USB_OHCI_HCD_PPC_OF_LE=y
 CONFIG_USB_STORAGE=y
+CONFIG_MMC=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_OF_ESDHC=y
 CONFIG_EDAC=y
 CONFIG_EDAC_MM_EDAC=y
 CONFIG_RTC_CLASS=y
@@ -182,6 +187,7 @@
 CONFIG_NTFS_FS=y
 CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
+CONFIG_HUGETLBFS=y
 CONFIG_ADFS_FS=m
 CONFIG_AFFS_FS=m
 CONFIG_HFS_FS=m
@@ -213,4 +219,5 @@
 CONFIG_CRYPTO_SHA512=y
 CONFIG_CRYPTO_AES=y
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_DEV_FSL_CAAM=y
 CONFIG_CRYPTO_DEV_TALITOS=y
diff --git a/arch/powerpc/configs/mpc85xx_smp_defconfig b/arch/powerpc/configs/mpc85xx_smp_defconfig
index dd1e413..abdcd31 100644
--- a/arch/powerpc/configs/mpc85xx_smp_defconfig
+++ b/arch/powerpc/configs/mpc85xx_smp_defconfig
@@ -1,5 +1,4 @@
 CONFIG_PPC_85xx=y
-CONFIG_PHYS_64BIT=y
 CONFIG_SMP=y
 CONFIG_NR_CPUS=8
 CONFIG_EXPERIMENTAL=y
@@ -26,6 +25,7 @@
 CONFIG_MPC8536_DS=y
 CONFIG_MPC85xx_DS=y
 CONFIG_MPC85xx_RDB=y
+CONFIG_P1010_RDB=y
 CONFIG_P1022_DS=y
 CONFIG_P1023_RDS=y
 CONFIG_SOCRATES=y
@@ -94,15 +94,14 @@
 CONFIG_PATA_ALI=y
 CONFIG_NETDEVICES=y
 CONFIG_DUMMY=y
+CONFIG_FS_ENET=y
+CONFIG_UCC_GETH=y
+CONFIG_GIANFAR=y
 CONFIG_MARVELL_PHY=y
 CONFIG_DAVICOM_PHY=y
 CONFIG_CICADA_PHY=y
 CONFIG_VITESSE_PHY=y
 CONFIG_FIXED_PHY=y
-CONFIG_NET_ETHERNET=y
-CONFIG_FS_ENET=y
-CONFIG_GIANFAR=y
-CONFIG_UCC_GETH=y
 CONFIG_INPUT_FF_MEMLESS=m
 # CONFIG_INPUT_MOUSEDEV is not set
 # CONFIG_INPUT_KEYBOARD is not set
@@ -121,6 +120,9 @@
 CONFIG_I2C=y
 CONFIG_I2C_CPM=m
 CONFIG_I2C_MPC=y
+CONFIG_SPI=y
+CONFIG_SPI_FSL_SPI=y
+CONFIG_SPI_FSL_ESPI=y
 CONFIG_GPIO_MPC8XXX=y
 # CONFIG_HWMON is not set
 CONFIG_VIDEO_OUTPUT_CONTROL=y
@@ -164,6 +166,10 @@
 CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
 CONFIG_USB_OHCI_HCD_PPC_OF_LE=y
 CONFIG_USB_STORAGE=y
+CONFIG_MMC=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_OF_ESDHC=y
 CONFIG_EDAC=y
 CONFIG_EDAC_MM_EDAC=y
 CONFIG_RTC_CLASS=y
@@ -183,6 +189,7 @@
 CONFIG_NTFS_FS=y
 CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
+CONFIG_HUGETLBFS=y
 CONFIG_ADFS_FS=m
 CONFIG_AFFS_FS=m
 CONFIG_HFS_FS=m
@@ -214,4 +221,5 @@
 CONFIG_CRYPTO_SHA512=y
 CONFIG_CRYPTO_AES=y
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_DEV_FSL_CAAM=y
 CONFIG_CRYPTO_DEV_TALITOS=y
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index 535711f..2156e07 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -390,6 +390,11 @@
 CONFIG_HFS_FS=m
 CONFIG_HFSPLUS_FS=m
 CONFIG_CRAMFS=m
+CONFIG_SQUASHFS=m
+CONFIG_SQUASHFS_XATTR=y
+CONFIG_SQUASHFS_ZLIB=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_SQUASHFS_XZ=y
 CONFIG_NFS_FS=y
 CONFIG_NFS_V3=y
 CONFIG_NFS_V3_ACL=y
diff --git a/arch/powerpc/configs/ps3_defconfig b/arch/powerpc/configs/ps3_defconfig
index 185c292..ded8678 100644
--- a/arch/powerpc/configs/ps3_defconfig
+++ b/arch/powerpc/configs/ps3_defconfig
@@ -6,10 +6,10 @@
 CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_NAMESPACES=y
+CONFIG_SPARSE_IRQ=y
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_EXPERT=y
-CONFIG_KALLSYMS_EXTRA_PASS=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_EMBEDDED=y
 # CONFIG_PERF_EVENTS is not set
 # CONFIG_COMPAT_BRK is not set
 CONFIG_SLAB=y
@@ -17,6 +17,7 @@
 CONFIG_OPROFILE=m
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
+# CONFIG_PPC_POWERNV is not set
 # CONFIG_PPC_PSERIES is not set
 # CONFIG_PPC_PMAC is not set
 CONFIG_PPC_PS3=y
@@ -27,14 +28,14 @@
 CONFIG_PS3_LPM=m
 # CONFIG_PPC_OF_BOOT_TRAMPOLINE is not set
 CONFIG_HIGH_RES_TIMERS=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_BINFMT_MISC=y
 CONFIG_KEXEC=y
-CONFIG_SPARSE_IRQ=y
 # CONFIG_SPARSEMEM_VMEMMAP is not set
 CONFIG_SCHED_SMT=y
 CONFIG_CMDLINE_BOOL=y
 CONFIG_CMDLINE=""
-CONFIG_PM=y
+CONFIG_PM_RUNTIME=y
 CONFIG_PM_DEBUG=y
 # CONFIG_SECCOMP is not set
 # CONFIG_PCI is not set
@@ -81,20 +82,23 @@
 CONFIG_MD=y
 CONFIG_BLK_DEV_DM=m
 CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
 CONFIG_GELIC_NET=y
 CONFIG_GELIC_WIRELESS=y
-# CONFIG_NETDEV_10000 is not set
+# CONFIG_NET_VENDOR_XILINX is not set
 CONFIG_USB_USBNET=m
 # CONFIG_USB_NET_CDCETHER is not set
+# CONFIG_USB_NET_CDC_NCM is not set
 # CONFIG_USB_NET_NET1080 is not set
 # CONFIG_USB_NET_CDC_SUBSET is not set
 # CONFIG_USB_NET_ZAURUS is not set
-CONFIG_PPP=m
-CONFIG_PPP_MULTILINK=y
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_DEFLATE=m
-CONFIG_PPPOE=m
 CONFIG_INPUT_FF_MEMLESS=m
 # CONFIG_INPUT_MOUSEDEV_PSAUX is not set
 CONFIG_INPUT_JOYDEV=m
@@ -135,22 +139,21 @@
 CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
 CONFIG_USB_DEVICEFS=y
 # CONFIG_USB_DEVICE_CLASS is not set
+CONFIG_USB_SUSPEND=y
 CONFIG_USB_MON=m
 CONFIG_USB_EHCI_HCD=m
-CONFIG_USB_EHCI_TT_NEWSCHED=y
 # CONFIG_USB_EHCI_HCD_PPC_OF is not set
 CONFIG_USB_OHCI_HCD=m
 CONFIG_USB_STORAGE=m
 CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_PS3=m
+CONFIG_RTC_DRV_PS3=y
+# CONFIG_IOMMU_SUPPORT is not set
 CONFIG_EXT2_FS=m
 CONFIG_EXT3_FS=m
 # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
 CONFIG_EXT4_FS=y
-CONFIG_INOTIFY=y
 CONFIG_QUOTA=y
 CONFIG_QFMT_V2=y
-CONFIG_AUTOFS_FS=m
 CONFIG_AUTOFS4_FS=m
 CONFIG_ISO9660_FS=m
 CONFIG_JOLIET=y
@@ -167,19 +170,17 @@
 CONFIG_NLS=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
+CONFIG_CRC_CCITT=m
 CONFIG_CRC_T10DIF=y
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
 CONFIG_DETECT_HUNG_TASK=y
 CONFIG_PROVE_LOCKING=y
 CONFIG_DEBUG_LOCKDEP=y
-CONFIG_DEBUG_SPINLOCK_SLEEP=y
 CONFIG_DEBUG_INFO=y
 CONFIG_DEBUG_WRITECOUNT=y
 CONFIG_DEBUG_MEMORY_INIT=y
 CONFIG_DEBUG_LIST=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
 CONFIG_SYSCTL_SYSCALL_CHECK=y
 # CONFIG_FTRACE is not set
 CONFIG_DEBUG_STACKOVERFLOW=y
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index a72f241..30e7d0d 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -304,6 +304,11 @@
 CONFIG_TMPFS=y
 CONFIG_HUGETLBFS=y
 CONFIG_CRAMFS=m
+CONFIG_SQUASHFS=m
+CONFIG_SQUASHFS_XATTR=y
+CONFIG_SQUASHFS_ZLIB=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_SQUASHFS_XZ=y
 CONFIG_NFS_FS=y
 CONFIG_NFS_V3=y
 CONFIG_NFS_V3_ACL=y
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild
index d51df17..7e313f1 100644
--- a/arch/powerpc/include/asm/Kbuild
+++ b/arch/powerpc/include/asm/Kbuild
@@ -34,3 +34,5 @@
 header-y += types.h
 header-y += ucontext.h
 header-y += unistd.h
+
+generic-y += rwsem.h
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index e30442c..ad55a1c 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -201,6 +201,7 @@
 #define CPU_FTR_POPCNTB			LONG_ASM_CONST(0x0400000000000000)
 #define CPU_FTR_POPCNTD			LONG_ASM_CONST(0x0800000000000000)
 #define CPU_FTR_ICSWX			LONG_ASM_CONST(0x1000000000000000)
+#define CPU_FTR_VMX_COPY		LONG_ASM_CONST(0x2000000000000000)
 
 #ifndef __ASSEMBLY__
 
@@ -425,7 +426,7 @@
 	    CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
 	    CPU_FTR_DSCR | CPU_FTR_SAO  | CPU_FTR_ASYM_SMT | \
 	    CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
-	    CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE)
+	    CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY)
 #define CPU_FTRS_CELL	(CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
 	    CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
 	    CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
@@ -437,7 +438,7 @@
 #define CPU_FTRS_COMPATIBLE	(CPU_FTR_USE_TB | CPU_FTR_PPCAS_ARCH_V2)
 
 #define CPU_FTRS_A2 (CPU_FTR_USE_TB | CPU_FTR_SMT | CPU_FTR_DBELL | \
-		     CPU_FTR_NOEXECUTE | CPU_FTR_NODSISRALIGN)
+		     CPU_FTR_NOEXECUTE | CPU_FTR_NODSISRALIGN | CPU_FTR_ICSWX)
 
 #ifdef __powerpc64__
 #ifdef CONFIG_PPC_BOOK3E
diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h
index 1cf20bd..487d46f 100644
--- a/arch/powerpc/include/asm/cputime.h
+++ b/arch/powerpc/include/asm/cputime.h
@@ -29,25 +29,8 @@
 #include <asm/time.h>
 #include <asm/param.h>
 
-typedef u64 cputime_t;
-typedef u64 cputime64_t;
-
-#define cputime_zero			((cputime_t)0)
-#define cputime_max			((~((cputime_t)0) >> 1) - 1)
-#define cputime_add(__a, __b)		((__a) +  (__b))
-#define cputime_sub(__a, __b)		((__a) -  (__b))
-#define cputime_div(__a, __n)		((__a) /  (__n))
-#define cputime_halve(__a)		((__a) >> 1)
-#define cputime_eq(__a, __b)		((__a) == (__b))
-#define cputime_gt(__a, __b)		((__a) >  (__b))
-#define cputime_ge(__a, __b)		((__a) >= (__b))
-#define cputime_lt(__a, __b)		((__a) <  (__b))
-#define cputime_le(__a, __b)		((__a) <= (__b))
-
-#define cputime64_zero			((cputime64_t)0)
-#define cputime64_add(__a, __b)		((__a) + (__b))
-#define cputime64_sub(__a, __b)		((__a) - (__b))
-#define cputime_to_cputime64(__ct)	(__ct)
+typedef u64 __nocast cputime_t;
+typedef u64 __nocast cputime64_t;
 
 #ifdef __KERNEL__
 
@@ -65,7 +48,7 @@
 
 static inline unsigned long cputime_to_jiffies(const cputime_t ct)
 {
-	return mulhdu(ct, __cputime_jiffies_factor);
+	return mulhdu((__force u64) ct, __cputime_jiffies_factor);
 }
 
 /* Estimate the scaled cputime by scaling the real cputime based on
@@ -74,14 +57,15 @@
 {
 	if (cpu_has_feature(CPU_FTR_SPURR) &&
 	    __get_cpu_var(cputime_last_delta))
-		return ct * __get_cpu_var(cputime_scaled_last_delta) /
-			    __get_cpu_var(cputime_last_delta);
+		return (__force u64) ct *
+			__get_cpu_var(cputime_scaled_last_delta) /
+			__get_cpu_var(cputime_last_delta);
 	return ct;
 }
 
 static inline cputime_t jiffies_to_cputime(const unsigned long jif)
 {
-	cputime_t ct;
+	u64 ct;
 	unsigned long sec;
 
 	/* have to be a little careful about overflow */
@@ -93,7 +77,7 @@
 	}
 	if (sec)
 		ct += (cputime_t) sec * tb_ticks_per_sec;
-	return ct;
+	return (__force cputime_t) ct;
 }
 
 static inline void setup_cputime_one_jiffy(void)
@@ -103,7 +87,7 @@
 
 static inline cputime64_t jiffies64_to_cputime64(const u64 jif)
 {
-	cputime_t ct;
+	u64 ct;
 	u64 sec;
 
 	/* have to be a little careful about overflow */
@@ -114,28 +98,28 @@
 		do_div(ct, HZ);
 	}
 	if (sec)
-		ct += (cputime_t) sec * tb_ticks_per_sec;
-	return ct;
+		ct += (u64) sec * tb_ticks_per_sec;
+	return (__force cputime64_t) ct;
 }
 
 static inline u64 cputime64_to_jiffies64(const cputime_t ct)
 {
-	return mulhdu(ct, __cputime_jiffies_factor);
+	return mulhdu((__force u64) ct, __cputime_jiffies_factor);
 }
 
 /*
  * Convert cputime <-> microseconds
  */
-extern u64 __cputime_msec_factor;
+extern u64 __cputime_usec_factor;
 
 static inline unsigned long cputime_to_usecs(const cputime_t ct)
 {
-	return mulhdu(ct, __cputime_msec_factor) * USEC_PER_MSEC;
+	return mulhdu((__force u64) ct, __cputime_usec_factor);
 }
 
 static inline cputime_t usecs_to_cputime(const unsigned long us)
 {
-	cputime_t ct;
+	u64 ct;
 	unsigned long sec;
 
 	/* have to be a little careful about overflow */
@@ -143,13 +127,15 @@
 	sec = us / 1000000;
 	if (ct) {
 		ct *= tb_ticks_per_sec;
-		do_div(ct, 1000);
+		do_div(ct, 1000000);
 	}
 	if (sec)
 		ct += (cputime_t) sec * tb_ticks_per_sec;
-	return ct;
+	return (__force cputime_t) ct;
 }
 
+#define usecs_to_cputime64(us)		usecs_to_cputime(us)
+
 /*
  * Convert cputime <-> seconds
  */
@@ -157,12 +143,12 @@
 
 static inline unsigned long cputime_to_secs(const cputime_t ct)
 {
-	return mulhdu(ct, __cputime_sec_factor);
+	return mulhdu((__force u64) ct, __cputime_sec_factor);
 }
 
 static inline cputime_t secs_to_cputime(const unsigned long sec)
 {
-	return (cputime_t) sec * tb_ticks_per_sec;
+	return (__force cputime_t)((u64) sec * tb_ticks_per_sec);
 }
 
 /*
@@ -170,7 +156,7 @@
  */
 static inline void cputime_to_timespec(const cputime_t ct, struct timespec *p)
 {
-	u64 x = ct;
+	u64 x = (__force u64) ct;
 	unsigned int frac;
 
 	frac = do_div(x, tb_ticks_per_sec);
@@ -182,11 +168,11 @@
 
 static inline cputime_t timespec_to_cputime(const struct timespec *p)
 {
-	cputime_t ct;
+	u64 ct;
 
 	ct = (u64) p->tv_nsec * tb_ticks_per_sec;
 	do_div(ct, 1000000000);
-	return ct + (u64) p->tv_sec * tb_ticks_per_sec;
+	return (__force cputime_t)(ct + (u64) p->tv_sec * tb_ticks_per_sec);
 }
 
 /*
@@ -194,7 +180,7 @@
  */
 static inline void cputime_to_timeval(const cputime_t ct, struct timeval *p)
 {
-	u64 x = ct;
+	u64 x = (__force u64) ct;
 	unsigned int frac;
 
 	frac = do_div(x, tb_ticks_per_sec);
@@ -206,11 +192,11 @@
 
 static inline cputime_t timeval_to_cputime(const struct timeval *p)
 {
-	cputime_t ct;
+	u64 ct;
 
 	ct = (u64) p->tv_usec * tb_ticks_per_sec;
 	do_div(ct, 1000000);
-	return ct + (u64) p->tv_sec * tb_ticks_per_sec;
+	return (__force cputime_t)(ct + (u64) p->tv_sec * tb_ticks_per_sec);
 }
 
 /*
@@ -220,12 +206,12 @@
 
 static inline unsigned long cputime_to_clock_t(const cputime_t ct)
 {
-	return mulhdu(ct, __cputime_clockt_factor);
+	return mulhdu((__force u64) ct, __cputime_clockt_factor);
 }
 
 static inline cputime_t clock_t_to_cputime(const unsigned long clk)
 {
-	cputime_t ct;
+	u64 ct;
 	unsigned long sec;
 
 	/* have to be a little careful about overflow */
@@ -236,8 +222,8 @@
 		do_div(ct, USER_HZ);
 	}
 	if (sec)
-		ct += (cputime_t) sec * tb_ticks_per_sec;
-	return ct;
+		ct += (u64) sec * tb_ticks_per_sec;
+	return (__force cputime_t) ct;
 }
 
 #define cputime64_to_clock_t(ct)	cputime_to_clock_t((cputime_t)(ct))
diff --git a/arch/powerpc/include/asm/fsl_ifc.h b/arch/powerpc/include/asm/fsl_ifc.h
new file mode 100644
index 0000000..b955012
--- /dev/null
+++ b/arch/powerpc/include/asm/fsl_ifc.h
@@ -0,0 +1,834 @@
+/* Freescale Integrated Flash Controller
+ *
+ * Copyright 2011 Freescale Semiconductor, Inc
+ *
+ * Author: Dipen Dudhat <dipen.dudhat@freescale.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifndef __ASM_FSL_IFC_H
+#define __ASM_FSL_IFC_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <linux/io.h>
+
+#include <linux/of_platform.h>
+#include <linux/interrupt.h>
+
+#define FSL_IFC_BANK_COUNT 4
+
+/*
+ * CSPR - Chip Select Property Register
+ */
+#define CSPR_BA				0xFFFF0000
+#define CSPR_BA_SHIFT			16
+#define CSPR_PORT_SIZE			0x00000180
+#define CSPR_PORT_SIZE_SHIFT		7
+/* Port Size 8 bit */
+#define CSPR_PORT_SIZE_8		0x00000080
+/* Port Size 16 bit */
+#define CSPR_PORT_SIZE_16		0x00000100
+/* Port Size 32 bit */
+#define CSPR_PORT_SIZE_32		0x00000180
+/* Write Protect */
+#define CSPR_WP				0x00000040
+#define CSPR_WP_SHIFT			6
+/* Machine Select */
+#define CSPR_MSEL			0x00000006
+#define CSPR_MSEL_SHIFT			1
+/* NOR */
+#define CSPR_MSEL_NOR			0x00000000
+/* NAND */
+#define CSPR_MSEL_NAND			0x00000002
+/* GPCM */
+#define CSPR_MSEL_GPCM			0x00000004
+/* Bank Valid */
+#define CSPR_V				0x00000001
+#define CSPR_V_SHIFT			0
+
+/*
+ * Address Mask Register
+ */
+#define IFC_AMASK_MASK			0xFFFF0000
+#define IFC_AMASK_SHIFT			16
+#define IFC_AMASK(n)			(IFC_AMASK_MASK << \
+					(__ilog2(n) - IFC_AMASK_SHIFT))
+
+/*
+ * Chip Select Option Register IFC_NAND Machine
+ */
+/* Enable ECC Encoder */
+#define CSOR_NAND_ECC_ENC_EN		0x80000000
+#define CSOR_NAND_ECC_MODE_MASK		0x30000000
+/* 4 bit correction per 520 Byte sector */
+#define CSOR_NAND_ECC_MODE_4		0x00000000
+/* 8 bit correction per 528 Byte sector */
+#define CSOR_NAND_ECC_MODE_8		0x10000000
+/* Enable ECC Decoder */
+#define CSOR_NAND_ECC_DEC_EN		0x04000000
+/* Row Address Length */
+#define CSOR_NAND_RAL_MASK		0x01800000
+#define CSOR_NAND_RAL_SHIFT		20
+#define CSOR_NAND_RAL_1			0x00000000
+#define CSOR_NAND_RAL_2			0x00800000
+#define CSOR_NAND_RAL_3			0x01000000
+#define CSOR_NAND_RAL_4			0x01800000
+/* Page Size 512b, 2k, 4k */
+#define CSOR_NAND_PGS_MASK		0x00180000
+#define CSOR_NAND_PGS_SHIFT		16
+#define CSOR_NAND_PGS_512		0x00000000
+#define CSOR_NAND_PGS_2K		0x00080000
+#define CSOR_NAND_PGS_4K		0x00100000
+/* Spare region Size */
+#define CSOR_NAND_SPRZ_MASK		0x0000E000
+#define CSOR_NAND_SPRZ_SHIFT		13
+#define CSOR_NAND_SPRZ_16		0x00000000
+#define CSOR_NAND_SPRZ_64		0x00002000
+#define CSOR_NAND_SPRZ_128		0x00004000
+#define CSOR_NAND_SPRZ_210		0x00006000
+#define CSOR_NAND_SPRZ_218		0x00008000
+#define CSOR_NAND_SPRZ_224		0x0000A000
+/* Pages Per Block */
+#define CSOR_NAND_PB_MASK		0x00000700
+#define CSOR_NAND_PB_SHIFT		8
+#define CSOR_NAND_PB(n)		((__ilog2(n) - 5) << CSOR_NAND_PB_SHIFT)
+/* Time for Read Enable High to Output High Impedance */
+#define CSOR_NAND_TRHZ_MASK		0x0000001C
+#define CSOR_NAND_TRHZ_SHIFT		2
+#define CSOR_NAND_TRHZ_20		0x00000000
+#define CSOR_NAND_TRHZ_40		0x00000004
+#define CSOR_NAND_TRHZ_60		0x00000008
+#define CSOR_NAND_TRHZ_80		0x0000000C
+#define CSOR_NAND_TRHZ_100		0x00000010
+/* Buffer control disable */
+#define CSOR_NAND_BCTLD			0x00000001
+
+/*
+ * Chip Select Option Register - NOR Flash Mode
+ */
+/* Enable Address shift Mode */
+#define CSOR_NOR_ADM_SHFT_MODE_EN	0x80000000
+/* Page Read Enable from NOR device */
+#define CSOR_NOR_PGRD_EN		0x10000000
+/* AVD Toggle Enable during Burst Program */
+#define CSOR_NOR_AVD_TGL_PGM_EN		0x01000000
+/* Address Data Multiplexing Shift */
+#define CSOR_NOR_ADM_MASK		0x0003E000
+#define CSOR_NOR_ADM_SHIFT_SHIFT	13
+#define CSOR_NOR_ADM_SHIFT(n)	((n) << CSOR_NOR_ADM_SHIFT_SHIFT)
+/* Type of the NOR device hooked */
+#define CSOR_NOR_NOR_MODE_AYSNC_NOR	0x00000000
+#define CSOR_NOR_NOR_MODE_AVD_NOR	0x00000020
+/* Time for Read Enable High to Output High Impedance */
+#define CSOR_NOR_TRHZ_MASK		0x0000001C
+#define CSOR_NOR_TRHZ_SHIFT		2
+#define CSOR_NOR_TRHZ_20		0x00000000
+#define CSOR_NOR_TRHZ_40		0x00000004
+#define CSOR_NOR_TRHZ_60		0x00000008
+#define CSOR_NOR_TRHZ_80		0x0000000C
+#define CSOR_NOR_TRHZ_100		0x00000010
+/* Buffer control disable */
+#define CSOR_NOR_BCTLD			0x00000001
+
+/*
+ * Chip Select Option Register - GPCM Mode
+ */
+/* GPCM Mode - Normal */
+#define CSOR_GPCM_GPMODE_NORMAL		0x00000000
+/* GPCM Mode - GenericASIC */
+#define CSOR_GPCM_GPMODE_ASIC		0x80000000
+/* Parity Mode odd/even */
+#define CSOR_GPCM_PARITY_EVEN		0x40000000
+/* Parity Checking enable/disable */
+#define CSOR_GPCM_PAR_EN		0x20000000
+/* GPCM Timeout Count */
+#define CSOR_GPCM_GPTO_MASK		0x0F000000
+#define CSOR_GPCM_GPTO_SHIFT		24
+#define CSOR_GPCM_GPTO(n)	((__ilog2(n) - 8) << CSOR_GPCM_GPTO_SHIFT)
+/* GPCM External Access Termination mode for read access */
+#define CSOR_GPCM_RGETA_EXT		0x00080000
+/* GPCM External Access Termination mode for write access */
+#define CSOR_GPCM_WGETA_EXT		0x00040000
+/* Address Data Multiplexing Shift */
+#define CSOR_GPCM_ADM_MASK		0x0003E000
+#define CSOR_GPCM_ADM_SHIFT_SHIFT	13
+#define CSOR_GPCM_ADM_SHIFT(n)	((n) << CSOR_GPCM_ADM_SHIFT_SHIFT)
+/* Generic ASIC Parity error indication delay */
+#define CSOR_GPCM_GAPERRD_MASK		0x00000180
+#define CSOR_GPCM_GAPERRD_SHIFT		7
+#define CSOR_GPCM_GAPERRD(n)	(((n) - 1) << CSOR_GPCM_GAPERRD_SHIFT)
+/* Time for Read Enable High to Output High Impedance */
+#define CSOR_GPCM_TRHZ_MASK		0x0000001C
+#define CSOR_GPCM_TRHZ_20		0x00000000
+#define CSOR_GPCM_TRHZ_40		0x00000004
+#define CSOR_GPCM_TRHZ_60		0x00000008
+#define CSOR_GPCM_TRHZ_80		0x0000000C
+#define CSOR_GPCM_TRHZ_100		0x00000010
+/* Buffer control disable */
+#define CSOR_GPCM_BCTLD			0x00000001
+
+/*
+ * Ready Busy Status Register (RB_STAT)
+ */
+/* CSn is READY */
+#define IFC_RB_STAT_READY_CS0		0x80000000
+#define IFC_RB_STAT_READY_CS1		0x40000000
+#define IFC_RB_STAT_READY_CS2		0x20000000
+#define IFC_RB_STAT_READY_CS3		0x10000000
+
+/*
+ * General Control Register (GCR)
+ */
+#define IFC_GCR_MASK			0x8000F800
+/* reset all IFC hardware */
+#define IFC_GCR_SOFT_RST_ALL		0x80000000
+/* Turnaroud Time of external buffer */
+#define IFC_GCR_TBCTL_TRN_TIME		0x0000F800
+#define IFC_GCR_TBCTL_TRN_TIME_SHIFT	11
+
+/*
+ * Common Event and Error Status Register (CM_EVTER_STAT)
+ */
+/* Chip select error */
+#define IFC_CM_EVTER_STAT_CSER		0x80000000
+
+/*
+ * Common Event and Error Enable Register (CM_EVTER_EN)
+ */
+/* Chip select error checking enable */
+#define IFC_CM_EVTER_EN_CSEREN		0x80000000
+
+/*
+ * Common Event and Error Interrupt Enable Register (CM_EVTER_INTR_EN)
+ */
+/* Chip select error interrupt enable */
+#define IFC_CM_EVTER_INTR_EN_CSERIREN	0x80000000
+
+/*
+ * Common Transfer Error Attribute Register-0 (CM_ERATTR0)
+ */
+/* transaction type of error Read/Write */
+#define IFC_CM_ERATTR0_ERTYP_READ	0x80000000
+#define IFC_CM_ERATTR0_ERAID		0x0FF00000
+#define IFC_CM_ERATTR0_ERAID_SHIFT	20
+#define IFC_CM_ERATTR0_ESRCID		0x0000FF00
+#define IFC_CM_ERATTR0_ESRCID_SHIFT	8
+
+/*
+ * Clock Control Register (CCR)
+ */
+#define IFC_CCR_MASK			0x0F0F8800
+/* Clock division ratio */
+#define IFC_CCR_CLK_DIV_MASK		0x0F000000
+#define IFC_CCR_CLK_DIV_SHIFT		24
+#define IFC_CCR_CLK_DIV(n)		((n-1) << IFC_CCR_CLK_DIV_SHIFT)
+/* IFC Clock Delay */
+#define IFC_CCR_CLK_DLY_MASK		0x000F0000
+#define IFC_CCR_CLK_DLY_SHIFT		16
+#define IFC_CCR_CLK_DLY(n)		((n) << IFC_CCR_CLK_DLY_SHIFT)
+/* Invert IFC clock before sending out */
+#define IFC_CCR_INV_CLK_EN		0x00008000
+/* Fedback IFC Clock */
+#define IFC_CCR_FB_IFC_CLK_SEL		0x00000800
+
+/*
+ * Clock Status Register (CSR)
+ */
+/* Clk is stable */
+#define IFC_CSR_CLK_STAT_STABLE		0x80000000
+
+/*
+ * IFC_NAND Machine Specific Registers
+ */
+/*
+ * NAND Configuration Register (NCFGR)
+ */
+/* Auto Boot Mode */
+#define IFC_NAND_NCFGR_BOOT		0x80000000
+/* Addressing Mode-ROW0+n/COL0 */
+#define IFC_NAND_NCFGR_ADDR_MODE_RC0	0x00000000
+/* Addressing Mode-ROW0+n/COL0+n */
+#define IFC_NAND_NCFGR_ADDR_MODE_RC1	0x00400000
+/* Number of loop iterations of FIR sequences for multi page operations */
+#define IFC_NAND_NCFGR_NUM_LOOP_MASK	0x0000F000
+#define IFC_NAND_NCFGR_NUM_LOOP_SHIFT	12
+#define IFC_NAND_NCFGR_NUM_LOOP(n)	((n) << IFC_NAND_NCFGR_NUM_LOOP_SHIFT)
+/* Number of wait cycles */
+#define IFC_NAND_NCFGR_NUM_WAIT_MASK	0x000000FF
+#define IFC_NAND_NCFGR_NUM_WAIT_SHIFT	0
+
+/*
+ * NAND Flash Command Registers (NAND_FCR0/NAND_FCR1)
+ */
+/* General purpose FCM flash command bytes CMD0-CMD7 */
+#define IFC_NAND_FCR0_CMD0		0xFF000000
+#define IFC_NAND_FCR0_CMD0_SHIFT	24
+#define IFC_NAND_FCR0_CMD1		0x00FF0000
+#define IFC_NAND_FCR0_CMD1_SHIFT	16
+#define IFC_NAND_FCR0_CMD2		0x0000FF00
+#define IFC_NAND_FCR0_CMD2_SHIFT	8
+#define IFC_NAND_FCR0_CMD3		0x000000FF
+#define IFC_NAND_FCR0_CMD3_SHIFT	0
+#define IFC_NAND_FCR1_CMD4		0xFF000000
+#define IFC_NAND_FCR1_CMD4_SHIFT	24
+#define IFC_NAND_FCR1_CMD5		0x00FF0000
+#define IFC_NAND_FCR1_CMD5_SHIFT	16
+#define IFC_NAND_FCR1_CMD6		0x0000FF00
+#define IFC_NAND_FCR1_CMD6_SHIFT	8
+#define IFC_NAND_FCR1_CMD7		0x000000FF
+#define IFC_NAND_FCR1_CMD7_SHIFT	0
+
+/*
+ * Flash ROW and COL Address Register (ROWn, COLn)
+ */
+/* Main/spare region locator */
+#define IFC_NAND_COL_MS			0x80000000
+/* Column Address */
+#define IFC_NAND_COL_CA_MASK		0x00000FFF
+
+/*
+ * NAND Flash Byte Count Register (NAND_BC)
+ */
+/* Byte Count for read/Write */
+#define IFC_NAND_BC			0x000001FF
+
+/*
+ * NAND Flash Instruction Registers (NAND_FIR0/NAND_FIR1/NAND_FIR2)
+ */
+/* NAND Machine specific opcodes OP0-OP14*/
+#define IFC_NAND_FIR0_OP0		0xFC000000
+#define IFC_NAND_FIR0_OP0_SHIFT		26
+#define IFC_NAND_FIR0_OP1		0x03F00000
+#define IFC_NAND_FIR0_OP1_SHIFT		20
+#define IFC_NAND_FIR0_OP2		0x000FC000
+#define IFC_NAND_FIR0_OP2_SHIFT		14
+#define IFC_NAND_FIR0_OP3		0x00003F00
+#define IFC_NAND_FIR0_OP3_SHIFT		8
+#define IFC_NAND_FIR0_OP4		0x000000FC
+#define IFC_NAND_FIR0_OP4_SHIFT		2
+#define IFC_NAND_FIR1_OP5		0xFC000000
+#define IFC_NAND_FIR1_OP5_SHIFT		26
+#define IFC_NAND_FIR1_OP6		0x03F00000
+#define IFC_NAND_FIR1_OP6_SHIFT		20
+#define IFC_NAND_FIR1_OP7		0x000FC000
+#define IFC_NAND_FIR1_OP7_SHIFT		14
+#define IFC_NAND_FIR1_OP8		0x00003F00
+#define IFC_NAND_FIR1_OP8_SHIFT		8
+#define IFC_NAND_FIR1_OP9		0x000000FC
+#define IFC_NAND_FIR1_OP9_SHIFT		2
+#define IFC_NAND_FIR2_OP10		0xFC000000
+#define IFC_NAND_FIR2_OP10_SHIFT	26
+#define IFC_NAND_FIR2_OP11		0x03F00000
+#define IFC_NAND_FIR2_OP11_SHIFT	20
+#define IFC_NAND_FIR2_OP12		0x000FC000
+#define IFC_NAND_FIR2_OP12_SHIFT	14
+#define IFC_NAND_FIR2_OP13		0x00003F00
+#define IFC_NAND_FIR2_OP13_SHIFT	8
+#define IFC_NAND_FIR2_OP14		0x000000FC
+#define IFC_NAND_FIR2_OP14_SHIFT	2
+
+/*
+ * Instruction opcodes to be programmed
+ * in FIR registers- 6bits
+ */
+enum ifc_nand_fir_opcodes {
+	IFC_FIR_OP_NOP,
+	IFC_FIR_OP_CA0,
+	IFC_FIR_OP_CA1,
+	IFC_FIR_OP_CA2,
+	IFC_FIR_OP_CA3,
+	IFC_FIR_OP_RA0,
+	IFC_FIR_OP_RA1,
+	IFC_FIR_OP_RA2,
+	IFC_FIR_OP_RA3,
+	IFC_FIR_OP_CMD0,
+	IFC_FIR_OP_CMD1,
+	IFC_FIR_OP_CMD2,
+	IFC_FIR_OP_CMD3,
+	IFC_FIR_OP_CMD4,
+	IFC_FIR_OP_CMD5,
+	IFC_FIR_OP_CMD6,
+	IFC_FIR_OP_CMD7,
+	IFC_FIR_OP_CW0,
+	IFC_FIR_OP_CW1,
+	IFC_FIR_OP_CW2,
+	IFC_FIR_OP_CW3,
+	IFC_FIR_OP_CW4,
+	IFC_FIR_OP_CW5,
+	IFC_FIR_OP_CW6,
+	IFC_FIR_OP_CW7,
+	IFC_FIR_OP_WBCD,
+	IFC_FIR_OP_RBCD,
+	IFC_FIR_OP_BTRD,
+	IFC_FIR_OP_RDSTAT,
+	IFC_FIR_OP_NWAIT,
+	IFC_FIR_OP_WFR,
+	IFC_FIR_OP_SBRD,
+	IFC_FIR_OP_UA,
+	IFC_FIR_OP_RB,
+};
+
+/*
+ * NAND Chip Select Register (NAND_CSEL)
+ */
+#define IFC_NAND_CSEL			0x0C000000
+#define IFC_NAND_CSEL_SHIFT		26
+#define IFC_NAND_CSEL_CS0		0x00000000
+#define IFC_NAND_CSEL_CS1		0x04000000
+#define IFC_NAND_CSEL_CS2		0x08000000
+#define IFC_NAND_CSEL_CS3		0x0C000000
+
+/*
+ * NAND Operation Sequence Start (NANDSEQ_STRT)
+ */
+/* NAND Flash Operation Start */
+#define IFC_NAND_SEQ_STRT_FIR_STRT	0x80000000
+/* Automatic Erase */
+#define IFC_NAND_SEQ_STRT_AUTO_ERS	0x00800000
+/* Automatic Program */
+#define IFC_NAND_SEQ_STRT_AUTO_PGM	0x00100000
+/* Automatic Copyback */
+#define IFC_NAND_SEQ_STRT_AUTO_CPB	0x00020000
+/* Automatic Read Operation */
+#define IFC_NAND_SEQ_STRT_AUTO_RD	0x00004000
+/* Automatic Status Read */
+#define IFC_NAND_SEQ_STRT_AUTO_STAT_RD	0x00000800
+
+/*
+ * NAND Event and Error Status Register (NAND_EVTER_STAT)
+ */
+/* Operation Complete */
+#define IFC_NAND_EVTER_STAT_OPC		0x80000000
+/* Flash Timeout Error */
+#define IFC_NAND_EVTER_STAT_FTOER	0x08000000
+/* Write Protect Error */
+#define IFC_NAND_EVTER_STAT_WPER	0x04000000
+/* ECC Error */
+#define IFC_NAND_EVTER_STAT_ECCER	0x02000000
+/* RCW Load Done */
+#define IFC_NAND_EVTER_STAT_RCW_DN	0x00008000
+/* Boot Loadr Done */
+#define IFC_NAND_EVTER_STAT_BOOT_DN	0x00004000
+/* Bad Block Indicator search select */
+#define IFC_NAND_EVTER_STAT_BBI_SRCH_SE	0x00000800
+
+/*
+ * NAND Flash Page Read Completion Event Status Register
+ * (PGRDCMPL_EVT_STAT)
+ */
+#define PGRDCMPL_EVT_STAT_MASK		0xFFFF0000
+/* Small Page 0-15 Done */
+#define PGRDCMPL_EVT_STAT_SECTION_SP(n)	(1 << (31 - (n)))
+/* Large Page(2K) 0-3 Done */
+#define PGRDCMPL_EVT_STAT_LP_2K(n)	(0xF << (28 - (n)*4))
+/* Large Page(4K) 0-1 Done */
+#define PGRDCMPL_EVT_STAT_LP_4K(n)	(0xFF << (24 - (n)*8))
+
+/*
+ * NAND Event and Error Enable Register (NAND_EVTER_EN)
+ */
+/* Operation complete event enable */
+#define IFC_NAND_EVTER_EN_OPC_EN	0x80000000
+/* Page read complete event enable */
+#define IFC_NAND_EVTER_EN_PGRDCMPL_EN	0x20000000
+/* Flash Timeout error enable */
+#define IFC_NAND_EVTER_EN_FTOER_EN	0x08000000
+/* Write Protect error enable */
+#define IFC_NAND_EVTER_EN_WPER_EN	0x04000000
+/* ECC error logging enable */
+#define IFC_NAND_EVTER_EN_ECCER_EN	0x02000000
+
+/*
+ * NAND Event and Error Interrupt Enable Register (NAND_EVTER_INTR_EN)
+ */
+/* Enable interrupt for operation complete */
+#define IFC_NAND_EVTER_INTR_OPCIR_EN		0x80000000
+/* Enable interrupt for Page read complete */
+#define IFC_NAND_EVTER_INTR_PGRDCMPLIR_EN	0x20000000
+/* Enable interrupt for Flash timeout error */
+#define IFC_NAND_EVTER_INTR_FTOERIR_EN		0x08000000
+/* Enable interrupt for Write protect error */
+#define IFC_NAND_EVTER_INTR_WPERIR_EN		0x04000000
+/* Enable interrupt for ECC error*/
+#define IFC_NAND_EVTER_INTR_ECCERIR_EN		0x02000000
+
+/*
+ * NAND Transfer Error Attribute Register-0 (NAND_ERATTR0)
+ */
+#define IFC_NAND_ERATTR0_MASK		0x0C080000
+/* Error on CS0-3 for NAND */
+#define IFC_NAND_ERATTR0_ERCS_CS0	0x00000000
+#define IFC_NAND_ERATTR0_ERCS_CS1	0x04000000
+#define IFC_NAND_ERATTR0_ERCS_CS2	0x08000000
+#define IFC_NAND_ERATTR0_ERCS_CS3	0x0C000000
+/* Transaction type of error Read/Write */
+#define IFC_NAND_ERATTR0_ERTTYPE_READ	0x00080000
+
+/*
+ * NAND Flash Status Register (NAND_FSR)
+ */
+/* First byte of data read from read status op */
+#define IFC_NAND_NFSR_RS0		0xFF000000
+/* Second byte of data read from read status op */
+#define IFC_NAND_NFSR_RS1		0x00FF0000
+
+/*
+ * ECC Error Status Registers (ECCSTAT0-ECCSTAT3)
+ */
+/* Number of ECC errors on sector n (n = 0-15) */
+#define IFC_NAND_ECCSTAT0_ERRCNT_SECTOR0_MASK	0x0F000000
+#define IFC_NAND_ECCSTAT0_ERRCNT_SECTOR0_SHIFT	24
+#define IFC_NAND_ECCSTAT0_ERRCNT_SECTOR1_MASK	0x000F0000
+#define IFC_NAND_ECCSTAT0_ERRCNT_SECTOR1_SHIFT	16
+#define IFC_NAND_ECCSTAT0_ERRCNT_SECTOR2_MASK	0x00000F00
+#define IFC_NAND_ECCSTAT0_ERRCNT_SECTOR2_SHIFT	8
+#define IFC_NAND_ECCSTAT0_ERRCNT_SECTOR3_MASK	0x0000000F
+#define IFC_NAND_ECCSTAT0_ERRCNT_SECTOR3_SHIFT	0
+#define IFC_NAND_ECCSTAT1_ERRCNT_SECTOR4_MASK	0x0F000000
+#define IFC_NAND_ECCSTAT1_ERRCNT_SECTOR4_SHIFT	24
+#define IFC_NAND_ECCSTAT1_ERRCNT_SECTOR5_MASK	0x000F0000
+#define IFC_NAND_ECCSTAT1_ERRCNT_SECTOR5_SHIFT	16
+#define IFC_NAND_ECCSTAT1_ERRCNT_SECTOR6_MASK	0x00000F00
+#define IFC_NAND_ECCSTAT1_ERRCNT_SECTOR6_SHIFT	8
+#define IFC_NAND_ECCSTAT1_ERRCNT_SECTOR7_MASK	0x0000000F
+#define IFC_NAND_ECCSTAT1_ERRCNT_SECTOR7_SHIFT	0
+#define IFC_NAND_ECCSTAT2_ERRCNT_SECTOR8_MASK	0x0F000000
+#define IFC_NAND_ECCSTAT2_ERRCNT_SECTOR8_SHIFT	24
+#define IFC_NAND_ECCSTAT2_ERRCNT_SECTOR9_MASK	0x000F0000
+#define IFC_NAND_ECCSTAT2_ERRCNT_SECTOR9_SHIFT	16
+#define IFC_NAND_ECCSTAT2_ERRCNT_SECTOR10_MASK	0x00000F00
+#define IFC_NAND_ECCSTAT2_ERRCNT_SECTOR10_SHIFT	8
+#define IFC_NAND_ECCSTAT2_ERRCNT_SECTOR11_MASK	0x0000000F
+#define IFC_NAND_ECCSTAT2_ERRCNT_SECTOR11_SHIFT	0
+#define IFC_NAND_ECCSTAT3_ERRCNT_SECTOR12_MASK	0x0F000000
+#define IFC_NAND_ECCSTAT3_ERRCNT_SECTOR12_SHIFT	24
+#define IFC_NAND_ECCSTAT3_ERRCNT_SECTOR13_MASK	0x000F0000
+#define IFC_NAND_ECCSTAT3_ERRCNT_SECTOR13_SHIFT	16
+#define IFC_NAND_ECCSTAT3_ERRCNT_SECTOR14_MASK	0x00000F00
+#define IFC_NAND_ECCSTAT3_ERRCNT_SECTOR14_SHIFT	8
+#define IFC_NAND_ECCSTAT3_ERRCNT_SECTOR15_MASK	0x0000000F
+#define IFC_NAND_ECCSTAT3_ERRCNT_SECTOR15_SHIFT	0
+
+/*
+ * NAND Control Register (NANDCR)
+ */
+#define IFC_NAND_NCR_FTOCNT_MASK	0x1E000000
+#define IFC_NAND_NCR_FTOCNT_SHIFT	25
+#define IFC_NAND_NCR_FTOCNT(n)	((_ilog2(n) - 8)  << IFC_NAND_NCR_FTOCNT_SHIFT)
+
+/*
+ * NAND_AUTOBOOT_TRGR
+ */
+/* Trigger RCW load */
+#define IFC_NAND_AUTOBOOT_TRGR_RCW_LD	0x80000000
+/* Trigget Auto Boot */
+#define IFC_NAND_AUTOBOOT_TRGR_BOOT_LD	0x20000000
+
+/*
+ * NAND_MDR
+ */
+/* 1st read data byte when opcode SBRD */
+#define IFC_NAND_MDR_RDATA0		0xFF000000
+/* 2nd read data byte when opcode SBRD */
+#define IFC_NAND_MDR_RDATA1		0x00FF0000
+
+/*
+ * NOR Machine Specific Registers
+ */
+/*
+ * NOR Event and Error Status Register (NOR_EVTER_STAT)
+ */
+/* NOR Command Sequence Operation Complete */
+#define IFC_NOR_EVTER_STAT_OPC_NOR	0x80000000
+/* Write Protect Error */
+#define IFC_NOR_EVTER_STAT_WPER		0x04000000
+/* Command Sequence Timeout Error */
+#define IFC_NOR_EVTER_STAT_STOER	0x01000000
+
+/*
+ * NOR Event and Error Enable Register (NOR_EVTER_EN)
+ */
+/* NOR Command Seq complete event enable */
+#define IFC_NOR_EVTER_EN_OPCEN_NOR	0x80000000
+/* Write Protect Error Checking Enable */
+#define IFC_NOR_EVTER_EN_WPEREN		0x04000000
+/* Timeout Error Enable */
+#define IFC_NOR_EVTER_EN_STOEREN	0x01000000
+
+/*
+ * NOR Event and Error Interrupt Enable Register (NOR_EVTER_INTR_EN)
+ */
+/* Enable interrupt for OPC complete */
+#define IFC_NOR_EVTER_INTR_OPCEN_NOR	0x80000000
+/* Enable interrupt for write protect error */
+#define IFC_NOR_EVTER_INTR_WPEREN	0x04000000
+/* Enable interrupt for timeout error */
+#define IFC_NOR_EVTER_INTR_STOEREN	0x01000000
+
+/*
+ * NOR Transfer Error Attribute Register-0 (NOR_ERATTR0)
+ */
+/* Source ID for error transaction */
+#define IFC_NOR_ERATTR0_ERSRCID		0xFF000000
+/* AXI ID for error transation */
+#define IFC_NOR_ERATTR0_ERAID		0x000FF000
+/* Chip select corresponds to NOR error */
+#define IFC_NOR_ERATTR0_ERCS_CS0	0x00000000
+#define IFC_NOR_ERATTR0_ERCS_CS1	0x00000010
+#define IFC_NOR_ERATTR0_ERCS_CS2	0x00000020
+#define IFC_NOR_ERATTR0_ERCS_CS3	0x00000030
+/* Type of transaction read/write */
+#define IFC_NOR_ERATTR0_ERTYPE_READ	0x00000001
+
+/*
+ * NOR Transfer Error Attribute Register-2 (NOR_ERATTR2)
+ */
+#define IFC_NOR_ERATTR2_ER_NUM_PHASE_EXP	0x000F0000
+#define IFC_NOR_ERATTR2_ER_NUM_PHASE_PER	0x00000F00
+
+/*
+ * NOR Control Register (NORCR)
+ */
+#define IFC_NORCR_MASK			0x0F0F0000
+/* No. of Address/Data Phase */
+#define IFC_NORCR_NUM_PHASE_MASK	0x0F000000
+#define IFC_NORCR_NUM_PHASE_SHIFT	24
+#define IFC_NORCR_NUM_PHASE(n)	((n-1) << IFC_NORCR_NUM_PHASE_SHIFT)
+/* Sequence Timeout Count */
+#define IFC_NORCR_STOCNT_MASK		0x000F0000
+#define IFC_NORCR_STOCNT_SHIFT		16
+#define IFC_NORCR_STOCNT(n)	((__ilog2(n) - 8) << IFC_NORCR_STOCNT_SHIFT)
+
+/*
+ * GPCM Machine specific registers
+ */
+/*
+ * GPCM Event and Error Status Register (GPCM_EVTER_STAT)
+ */
+/* Timeout error */
+#define IFC_GPCM_EVTER_STAT_TOER	0x04000000
+/* Parity error */
+#define IFC_GPCM_EVTER_STAT_PER		0x01000000
+
+/*
+ * GPCM Event and Error Enable Register (GPCM_EVTER_EN)
+ */
+/* Timeout error enable */
+#define IFC_GPCM_EVTER_EN_TOER_EN	0x04000000
+/* Parity error enable */
+#define IFC_GPCM_EVTER_EN_PER_EN	0x01000000
+
+/*
+ * GPCM Event and Error Interrupt Enable Register (GPCM_EVTER_INTR_EN)
+ */
+/* Enable Interrupt for timeout error */
+#define IFC_GPCM_EEIER_TOERIR_EN	0x04000000
+/* Enable Interrupt for Parity error */
+#define IFC_GPCM_EEIER_PERIR_EN		0x01000000
+
+/*
+ * GPCM Transfer Error Attribute Register-0 (GPCM_ERATTR0)
+ */
+/* Source ID for error transaction */
+#define IFC_GPCM_ERATTR0_ERSRCID	0xFF000000
+/* AXI ID for error transaction */
+#define IFC_GPCM_ERATTR0_ERAID		0x000FF000
+/* Chip select corresponds to GPCM error */
+#define IFC_GPCM_ERATTR0_ERCS_CS0	0x00000000
+#define IFC_GPCM_ERATTR0_ERCS_CS1	0x00000040
+#define IFC_GPCM_ERATTR0_ERCS_CS2	0x00000080
+#define IFC_GPCM_ERATTR0_ERCS_CS3	0x000000C0
+/* Type of transaction read/Write */
+#define IFC_GPCM_ERATTR0_ERTYPE_READ	0x00000001
+
+/*
+ * GPCM Transfer Error Attribute Register-2 (GPCM_ERATTR2)
+ */
+/* On which beat of address/data parity error is observed */
+#define IFC_GPCM_ERATTR2_PERR_BEAT		0x00000C00
+/* Parity Error on byte */
+#define IFC_GPCM_ERATTR2_PERR_BYTE		0x000000F0
+/* Parity Error reported in addr or data phase */
+#define IFC_GPCM_ERATTR2_PERR_DATA_PHASE	0x00000001
+
+/*
+ * GPCM Status Register (GPCM_STAT)
+ */
+#define IFC_GPCM_STAT_BSY		0x80000000  /* GPCM is busy */
+
+/*
+ * IFC Controller NAND Machine registers
+ */
+struct fsl_ifc_nand {
+	__be32 ncfgr;
+	u32 res1[0x4];
+	__be32 nand_fcr0;
+	__be32 nand_fcr1;
+	u32 res2[0x8];
+	__be32 row0;
+	u32 res3;
+	__be32 col0;
+	u32 res4;
+	__be32 row1;
+	u32 res5;
+	__be32 col1;
+	u32 res6;
+	__be32 row2;
+	u32 res7;
+	__be32 col2;
+	u32 res8;
+	__be32 row3;
+	u32 res9;
+	__be32 col3;
+	u32 res10[0x24];
+	__be32 nand_fbcr;
+	u32 res11;
+	__be32 nand_fir0;
+	__be32 nand_fir1;
+	__be32 nand_fir2;
+	u32 res12[0x10];
+	__be32 nand_csel;
+	u32 res13;
+	__be32 nandseq_strt;
+	u32 res14;
+	__be32 nand_evter_stat;
+	u32 res15;
+	__be32 pgrdcmpl_evt_stat;
+	u32 res16[0x2];
+	__be32 nand_evter_en;
+	u32 res17[0x2];
+	__be32 nand_evter_intr_en;
+	u32 res18[0x2];
+	__be32 nand_erattr0;
+	__be32 nand_erattr1;
+	u32 res19[0x10];
+	__be32 nand_fsr;
+	u32 res20;
+	__be32 nand_eccstat[4];
+	u32 res21[0x20];
+	__be32 nanndcr;
+	u32 res22[0x2];
+	__be32 nand_autoboot_trgr;
+	u32 res23;
+	__be32 nand_mdr;
+	u32 res24[0x5C];
+};
+
+/*
+ * IFC controller NOR Machine registers
+ */
+struct fsl_ifc_nor {
+	__be32 nor_evter_stat;
+	u32 res1[0x2];
+	__be32 nor_evter_en;
+	u32 res2[0x2];
+	__be32 nor_evter_intr_en;
+	u32 res3[0x2];
+	__be32 nor_erattr0;
+	__be32 nor_erattr1;
+	__be32 nor_erattr2;
+	u32 res4[0x4];
+	__be32 norcr;
+	u32 res5[0xEF];
+};
+
+/*
+ * IFC controller GPCM Machine registers
+ */
+struct fsl_ifc_gpcm {
+	__be32 gpcm_evter_stat;
+	u32 res1[0x2];
+	__be32 gpcm_evter_en;
+	u32 res2[0x2];
+	__be32 gpcm_evter_intr_en;
+	u32 res3[0x2];
+	__be32 gpcm_erattr0;
+	__be32 gpcm_erattr1;
+	__be32 gpcm_erattr2;
+	__be32 gpcm_stat;
+	u32 res4[0x1F3];
+};
+
+/*
+ * IFC Controller Registers
+ */
+struct fsl_ifc_regs {
+	__be32 ifc_rev;
+	u32 res1[0x3];
+	struct {
+		__be32 cspr;
+		u32 res2[0x2];
+	} cspr_cs[FSL_IFC_BANK_COUNT];
+	u32 res3[0x18];
+	struct {
+		__be32 amask;
+		u32 res4[0x2];
+	} amask_cs[FSL_IFC_BANK_COUNT];
+	u32 res5[0x18];
+	struct {
+		__be32 csor;
+		u32 res6[0x2];
+	} csor_cs[FSL_IFC_BANK_COUNT];
+	u32 res7[0x18];
+	struct {
+		__be32 ftim[4];
+		u32 res8[0x8];
+	} ftim_cs[FSL_IFC_BANK_COUNT];
+	u32 res9[0x60];
+	__be32 rb_stat;
+	u32 res10[0x2];
+	__be32 ifc_gcr;
+	u32 res11[0x2];
+	__be32 cm_evter_stat;
+	u32 res12[0x2];
+	__be32 cm_evter_en;
+	u32 res13[0x2];
+	__be32 cm_evter_intr_en;
+	u32 res14[0x2];
+	__be32 cm_erattr0;
+	__be32 cm_erattr1;
+	u32 res15[0x2];
+	__be32 ifc_ccr;
+	__be32 ifc_csr;
+	u32 res16[0x2EB];
+	struct fsl_ifc_nand ifc_nand;
+	struct fsl_ifc_nor ifc_nor;
+	struct fsl_ifc_gpcm ifc_gpcm;
+};
+
+extern unsigned int convert_ifc_address(phys_addr_t addr_base);
+extern int fsl_ifc_find(phys_addr_t addr_base);
+
+/* overview of the fsl ifc controller */
+
+struct fsl_ifc_ctrl {
+	/* device info */
+	struct device			*dev;
+	struct fsl_ifc_regs __iomem	*regs;
+	int				irq;
+	int				nand_irq;
+	spinlock_t			lock;
+	void				*nand;
+
+	u32 nand_stat;
+	wait_queue_head_t nand_wait;
+};
+
+extern struct fsl_ifc_ctrl *fsl_ifc_ctrl_dev;
+
+
+#endif /* __ASM_FSL_IFC_H */
diff --git a/arch/powerpc/include/asm/fsl_lbc.h b/arch/powerpc/include/asm/fsl_lbc.h
index 8a0b5ec..420b453 100644
--- a/arch/powerpc/include/asm/fsl_lbc.h
+++ b/arch/powerpc/include/asm/fsl_lbc.h
@@ -238,8 +238,6 @@
 #define FPAR_LP_CI_SHIFT      0
 	__be32 fbcr;            /**< Flash Byte Count Register */
 #define FBCR_BC      0x00000FFF
-	u8 res11[0x8];
-	u8 res8[0xF00];
 };
 
 /*
@@ -294,6 +292,11 @@
 
 	/* status read from LTESR by irq handler */
 	unsigned int			irq_status;
+
+#ifdef CONFIG_SUSPEND
+	/* save regs when system go to deep-sleep */
+	struct fsl_lbc_regs		*saved_regs;
+#endif
 };
 
 extern int fsl_upm_run_pattern(struct fsl_upm *upm, void __iomem *io_base,
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
index 8600493..dfdb95b 100644
--- a/arch/powerpc/include/asm/hugetlb.h
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -5,7 +5,6 @@
 #include <asm/page.h>
 
 extern struct kmem_cache *hugepte_cache;
-extern void __init reserve_hugetlb_gpages(void);
 
 static inline pte_t *hugepd_page(hugepd_t hpd)
 {
@@ -22,14 +21,14 @@
 				    unsigned pdshift)
 {
 	/*
-	 * On 32-bit, we have multiple higher-level table entries that point to
-	 * the same hugepte.  Just use the first one since they're all
+	 * On FSL BookE, we have multiple higher-level table entries that
+	 * point to the same hugepte.  Just use the first one since they're all
 	 * identical.  So for that case, idx=0.
 	 */
 	unsigned long idx = 0;
 
 	pte_t *dir = hugepd_page(*hpdp);
-#ifdef CONFIG_PPC64
+#ifndef CONFIG_PPC_FSL_BOOK3E
 	idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(*hpdp);
 #endif
 
@@ -53,7 +52,8 @@
 }
 #endif
 
-void book3e_hugetlb_preload(struct mm_struct *mm, unsigned long ea, pte_t pte);
+void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
+			    pte_t pte);
 void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
 
 void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
@@ -124,7 +124,17 @@
 					     unsigned long addr, pte_t *ptep,
 					     pte_t pte, int dirty)
 {
+#ifdef HUGETLB_NEED_PRELOAD
+	/*
+	 * The "return 1" forces a call of update_mmu_cache, which will write a
+	 * TLB entry.  Without this, platforms that don't do a write of the TLB
+	 * entry in the TLB miss handler asm will fault ad infinitum.
+	 */
+	ptep_set_access_flags(vma, addr, ptep, pte, dirty);
+	return 1;
+#else
 	return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
+#endif
 }
 
 static inline pte_t huge_ptep_get(pte_t *ptep)
@@ -142,14 +152,24 @@
 }
 
 #else /* ! CONFIG_HUGETLB_PAGE */
-static inline void reserve_hugetlb_gpages(void)
-{
-	pr_err("Cannot reserve gpages without hugetlb enabled\n");
-}
 static inline void flush_hugetlb_page(struct vm_area_struct *vma,
 				      unsigned long vmaddr)
 {
 }
+#endif /* CONFIG_HUGETLB_PAGE */
+
+
+/*
+ * FSL Book3E platforms require special gpage handling - the gpages
+ * are reserved early in the boot process by memblock instead of via
+ * the .dts as on IBM platforms.
+ */
+#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PPC_FSL_BOOK3E)
+extern void __init reserve_hugetlb_gpages(void);
+#else
+static inline void reserve_hugetlb_gpages(void)
+{
+}
 #endif
 
 #endif /* _ASM_POWERPC_HUGETLB_H */
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
index 45698d5..a3855b8 100644
--- a/arch/powerpc/include/asm/io.h
+++ b/arch/powerpc/include/asm/io.h
@@ -394,7 +394,7 @@
 #endif /* CONFIG_PPC32 */
 
 /* The "__do_*" operations below provide the actual "base" implementation
- * for each of the defined acccessor. Some of them use the out_* functions
+ * for each of the defined accessors. Some of them use the out_* functions
  * directly, some of them still use EEH, though we might change that in the
  * future. Those macros below provide the necessary argument swapping and
  * handling of the IO base for PIO.
diff --git a/arch/powerpc/include/asm/kdump.h b/arch/powerpc/include/asm/kdump.h
index bffd062..c977620 100644
--- a/arch/powerpc/include/asm/kdump.h
+++ b/arch/powerpc/include/asm/kdump.h
@@ -32,11 +32,11 @@
 
 #ifndef __ASSEMBLY__
 
-#if defined(CONFIG_CRASH_DUMP) && !defined(CONFIG_RELOCATABLE)
+#if defined(CONFIG_CRASH_DUMP) && !defined(CONFIG_NONSTATIC_KERNEL)
 extern void reserve_kdump_trampoline(void);
 extern void setup_kdump_trampoline(void);
 #else
-/* !CRASH_DUMP || RELOCATABLE */
+/* !CRASH_DUMP || !NONSTATIC_KERNEL */
 static inline void reserve_kdump_trampoline(void) { ; }
 static inline void setup_kdump_trampoline(void) { ; }
 #endif
diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h
index f921eb1..16d7e33 100644
--- a/arch/powerpc/include/asm/kexec.h
+++ b/arch/powerpc/include/asm/kexec.h
@@ -49,7 +49,6 @@
 #define KEXEC_STATE_REAL_MODE 2
 
 #ifndef __ASSEMBLY__
-#include <linux/cpumask.h>
 #include <asm/reg.h>
 
 typedef void (*crash_shutdown_t)(void);
@@ -73,11 +72,6 @@
 					  master to copy new code to 0 */
 extern int crashing_cpu;
 extern void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *));
-extern cpumask_t cpus_in_sr;
-static inline int kexec_sr_activated(int cpu)
-{
-	return cpumask_test_cpu(cpu, &cpus_in_sr);
-}
 
 struct kimage;
 struct pt_regs;
@@ -94,7 +88,6 @@
 extern void machine_kexec_mask_interrupts(void);
 
 #else /* !CONFIG_KEXEC */
-static inline int kexec_sr_activated(int cpu) { return 0; }
 static inline void crash_kexec_secondary(struct pt_regs *regs) { }
 
 static inline int overlaps_crashkernel(unsigned long start, unsigned long size)
diff --git a/arch/powerpc/include/asm/keylargo.h b/arch/powerpc/include/asm/keylargo.h
index d8520ef..fc195d0 100644
--- a/arch/powerpc/include/asm/keylargo.h
+++ b/arch/powerpc/include/asm/keylargo.h
@@ -51,7 +51,7 @@
 
 #define KL_GPIO_SOUND_POWER		(KEYLARGO_GPIO_0+0x05)
 
-/* Hrm... this one is only to be used on Pismo. It seeem to also
+/* Hrm... this one is only to be used on Pismo. It seems to also
  * control the timebase enable on other machines. Still to be
  * experimented... --BenH.
  */
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index d4df013..69c7377 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -381,39 +381,6 @@
 }
 #endif
 
-static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
-					     unsigned long pte_index)
-{
-	unsigned long rb, va_low;
-
-	rb = (v & ~0x7fUL) << 16;		/* AVA field */
-	va_low = pte_index >> 3;
-	if (v & HPTE_V_SECONDARY)
-		va_low = ~va_low;
-	/* xor vsid from AVA */
-	if (!(v & HPTE_V_1TB_SEG))
-		va_low ^= v >> 12;
-	else
-		va_low ^= v >> 24;
-	va_low &= 0x7ff;
-	if (v & HPTE_V_LARGE) {
-		rb |= 1;			/* L field */
-		if (cpu_has_feature(CPU_FTR_ARCH_206) &&
-		    (r & 0xff000)) {
-			/* non-16MB large page, must be 64k */
-			/* (masks depend on page size) */
-			rb |= 0x1000;		/* page encoding in LP field */
-			rb |= (va_low & 0x7f) << 16; /* 7b of VA in AVA/LP field */
-			rb |= (va_low & 0xfe);	/* AVAL field (P7 doesn't seem to care) */
-		}
-	} else {
-		/* 4kB page */
-		rb |= (va_low & 0x7ff) << 12;	/* remaining 11b of VA */
-	}
-	rb |= (v >> 54) & 0x300;		/* B field */
-	return rb;
-}
-
 /* Magic register values loaded into r3 and r4 before the 'sc' assembly
  * instruction for the OSI hypercalls */
 #define OSI_SC_MAGIC_R3			0x113724FA
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index e43fe42..d0ac94f 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -29,4 +29,37 @@
 
 #define SPAPR_TCE_SHIFT		12
 
+static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
+					     unsigned long pte_index)
+{
+	unsigned long rb, va_low;
+
+	rb = (v & ~0x7fUL) << 16;		/* AVA field */
+	va_low = pte_index >> 3;
+	if (v & HPTE_V_SECONDARY)
+		va_low = ~va_low;
+	/* xor vsid from AVA */
+	if (!(v & HPTE_V_1TB_SEG))
+		va_low ^= v >> 12;
+	else
+		va_low ^= v >> 24;
+	va_low &= 0x7ff;
+	if (v & HPTE_V_LARGE) {
+		rb |= 1;			/* L field */
+		if (cpu_has_feature(CPU_FTR_ARCH_206) &&
+		    (r & 0xff000)) {
+			/* non-16MB large page, must be 64k */
+			/* (masks depend on page size) */
+			rb |= 0x1000;		/* page encoding in LP field */
+			rb |= (va_low & 0x7f) << 16; /* 7b of VA in AVA/LP field */
+			rb |= (va_low & 0xfe);	/* AVAL field (P7 doesn't seem to care) */
+		}
+	} else {
+		/* 4kB page */
+		rb |= (va_low & 0x7ff) << 12;	/* remaining 11b of VA */
+	}
+	rb |= (v >> 54) & 0x300;		/* B field */
+	return rb;
+}
+
 #endif /* __ASM_KVM_BOOK3S_64_H__ */
diff --git a/arch/powerpc/include/asm/lv1call.h b/arch/powerpc/include/asm/lv1call.h
index f77c708..233f9ec 100644
--- a/arch/powerpc/include/asm/lv1call.h
+++ b/arch/powerpc/include/asm/lv1call.h
@@ -231,7 +231,7 @@
 LV1_CALL(write_htab_entry,                              4, 0,   1 )
 LV1_CALL(construct_virtual_address_space,               3, 2,   2 )
 LV1_CALL(invalidate_htab_entries,                       5, 0,   3 )
-LV1_CALL(get_virtual_address_space_id_of_ppe,           1, 1,   4 )
+LV1_CALL(get_virtual_address_space_id_of_ppe,           0, 1,   4 )
 LV1_CALL(query_logical_partition_address_region_info,   1, 5,   6 )
 LV1_CALL(select_virtual_address_space,                  1, 0,   7 )
 LV1_CALL(pause,                                         1, 0,   9 )
@@ -264,7 +264,7 @@
 LV1_CALL(get_spe_irq_outlet,                            2, 1,  78 )
 LV1_CALL(set_spe_privilege_state_area_1_register,       3, 0,  79 )
 LV1_CALL(create_repository_node,                        6, 0,  90 )
-LV1_CALL(get_repository_node_value,                     5, 2,  91 )
+LV1_CALL(read_repository_node,                          5, 2,  91 )
 LV1_CALL(modify_repository_node_value,                  6, 0,  92 )
 LV1_CALL(remove_repository_node,                        4, 0,  93 )
 LV1_CALL(read_htab_entries,                             2, 5,  95 )
@@ -276,7 +276,7 @@
 LV1_CALL(destruct_io_irq_outlet,                        1, 0, 121 )
 LV1_CALL(map_htab,                                      1, 1, 122 )
 LV1_CALL(unmap_htab,                                    1, 0, 123 )
-LV1_CALL(get_version_info,                              0, 1, 127 )
+LV1_CALL(get_version_info,                              0, 2, 127 )
 LV1_CALL(insert_htab_entry,                             6, 3, 158 )
 LV1_CALL(read_virtual_uart,                             3, 1, 162 )
 LV1_CALL(write_virtual_uart,                            3, 1, 163 )
@@ -294,9 +294,9 @@
 LV1_CALL(net_add_multicast_address,                     4, 0, 185 )
 LV1_CALL(net_remove_multicast_address,                  4, 0, 186 )
 LV1_CALL(net_start_tx_dma,                              4, 0, 187 )
-LV1_CALL(net_stop_tx_dma,                               3, 0, 188 )
+LV1_CALL(net_stop_tx_dma,                               2, 0, 188 )
 LV1_CALL(net_start_rx_dma,                              4, 0, 189 )
-LV1_CALL(net_stop_rx_dma,                               3, 0, 190 )
+LV1_CALL(net_stop_rx_dma,                               2, 0, 190 )
 LV1_CALL(net_set_interrupt_status_indicator,            4, 0, 191 )
 LV1_CALL(net_set_interrupt_mask,                        4, 0, 193 )
 LV1_CALL(net_control,                                   6, 2, 194 )
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index b540d6f..bf37931d 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -213,6 +213,9 @@
 	 * allow assignment/enabling of the device. */
 	int  (*pcibios_enable_device_hook)(struct pci_dev *);
 
+	/* Called after scan and before resource survey */
+	void (*pcibios_fixup_phb)(struct pci_controller *hose);
+
 	/* Called to shutdown machine specific hardware not already controlled
 	 * by other drivers.
 	 */
diff --git a/arch/powerpc/include/asm/memblock.h b/arch/powerpc/include/asm/memblock.h
deleted file mode 100644
index 43efc34..0000000
--- a/arch/powerpc/include/asm/memblock.h
+++ /dev/null
@@ -1,8 +0,0 @@
-#ifndef _ASM_POWERPC_MEMBLOCK_H
-#define _ASM_POWERPC_MEMBLOCK_H
-
-#include <asm/udbg.h>
-
-#define MEMBLOCK_DBG(fmt...) udbg_printf(fmt)
-
-#endif /* _ASM_POWERPC_MEMBLOCK_H */
diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h
index 0260ea5..f5f89ca 100644
--- a/arch/powerpc/include/asm/mmu-book3e.h
+++ b/arch/powerpc/include/asm/mmu-book3e.h
@@ -214,6 +214,10 @@
 	unsigned int	id;
 	unsigned int	active;
 	unsigned long	vdso_base;
+#ifdef CONFIG_PPC_ICSWX
+	struct spinlock *cop_lockp;	/* guard cop related stuff */
+	unsigned long acop;		/* mask of enabled coprocessor types */
+#endif /* CONFIG_PPC_ICSWX */
 #ifdef CONFIG_PPC_MM_SLICES
 	u64 low_slices_psize;   /* SLB page size encodings */
 	u64 high_slices_psize;  /* 4 bits per slice for now */
@@ -254,6 +258,13 @@
 
 #ifdef CONFIG_PPC64
 extern unsigned long linear_map_top;
+
+/*
+ * 64-bit booke platforms don't load the tlb in the tlb miss handler code.
+ * HUGETLB_NEED_PRELOAD handles this - it causes huge_ptep_set_access_flags to
+ * return 1, indicating that the tlb requires preloading.
+ */
+#define HUGETLB_NEED_PRELOAD
 #endif
 
 #endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index db645ec..412ba49 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -312,10 +312,9 @@
  * (i.e. everything above 0xC000000000000000), except the very top
  * segment, which simplifies several things.
  *
- * 	- We allow for 15 significant bits of ESID and 20 bits of
- * context for user addresses.  i.e. 8T (43 bits) of address space for
- * up to 1M contexts (although the page table structure and context
- * allocation will need changes to take advantage of this).
+ *	- We allow for 16 significant bits of ESID and 19 bits of
+ * context for user addresses.  i.e. 16T (44 bits) of address space for
+ * up to half a million contexts.
  *
  * 	- The scramble function gives robust scattering in the hash
  * table (at least based on some initial results).  The previous
diff --git a/arch/powerpc/include/asm/mpic.h b/arch/powerpc/include/asm/mpic.h
index e6fae49..67b4d98 100644
--- a/arch/powerpc/include/asm/mpic.h
+++ b/arch/powerpc/include/asm/mpic.h
@@ -251,6 +251,9 @@
 /* The instance data of a given MPIC */
 struct mpic
 {
+	/* The OpenFirmware dt node for this MPIC */
+	struct device_node *node;
+
 	/* The remapper for this MPIC */
 	struct irq_host		*irqhost;
 
@@ -293,6 +296,9 @@
 	/* Register access method */
 	enum mpic_reg_type	reg_type;
 
+	/* The physical base address of the MPIC */
+	phys_addr_t paddr;
+
 	/* The various ioremap'ed bases */
 	struct mpic_reg_bank	gregs;
 	struct mpic_reg_bank	tmregs;
@@ -331,11 +337,11 @@
  * Note setting any ID (leaving those bits to 0) means standard MPIC
  */
 
-/* This is the primary controller, only that one has IPIs and
- * has afinity control. A non-primary MPIC always uses CPU0
- * registers only
+/*
+ * This is a secondary ("chained") controller; it only uses the CPU0
+ * registers.  Primary controllers have IPIs and affinity control.
  */
-#define MPIC_PRIMARY			0x00000001
+#define MPIC_SECONDARY			0x00000001
 
 /* Set this for a big-endian MPIC */
 #define MPIC_BIG_ENDIAN			0x00000002
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index 2893e8f..a4b28f1 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -109,6 +109,14 @@
 #define OPAL_PCI_MAP_PE_DMA_WINDOW		44
 #define OPAL_PCI_MAP_PE_DMA_WINDOW_REAL		45
 #define OPAL_PCI_RESET				49
+#define OPAL_PCI_GET_HUB_DIAG_DATA		50
+#define OPAL_PCI_GET_PHB_DIAG_DATA		51
+#define OPAL_PCI_FENCE_PHB			52
+#define OPAL_PCI_REINIT				53
+#define OPAL_PCI_MASK_PE_ERROR			54
+#define OPAL_SET_SLOT_LED_STATUS		55
+#define OPAL_GET_EPOW_STATUS			56
+#define OPAL_SET_SYSTEM_ATTENTION_LED		57
 
 #ifndef __ASSEMBLY__
 
@@ -169,7 +177,11 @@
 	OPAL_EVENT_NVRAM = 0x2,
 	OPAL_EVENT_RTC = 0x4,
 	OPAL_EVENT_CONSOLE_OUTPUT = 0x8,
-	OPAL_EVENT_CONSOLE_INPUT = 0x10
+	OPAL_EVENT_CONSOLE_INPUT = 0x10,
+	OPAL_EVENT_ERROR_LOG_AVAIL = 0x20,
+	OPAL_EVENT_ERROR_LOG = 0x40,
+	OPAL_EVENT_EPOW = 0x80,
+	OPAL_EVENT_LED_STATUS = 0x100
 };
 
 /* Machine check related definitions */
@@ -258,13 +270,49 @@
 	OPAL_MAP_PE = 1
 };
 
+enum OpalPeltvAction {
+	OPAL_REMOVE_PE_FROM_DOMAIN = 0,
+	OPAL_ADD_PE_TO_DOMAIN = 1
+};
+
+enum OpalMveEnableAction {
+	OPAL_DISABLE_MVE = 0,
+	OPAL_ENABLE_MVE = 1
+};
+
 enum OpalPciResetAndReinitScope {
 	OPAL_PHB_COMPLETE = 1, OPAL_PCI_LINK = 2, OPAL_PHB_ERROR = 3,
 	OPAL_PCI_HOT_RESET = 4, OPAL_PCI_FUNDAMENTAL_RESET = 5,
-	OPAL_PCI_IODA_RESET = 6,
+	OPAL_PCI_IODA_TABLE_RESET = 6,
 };
 
-enum OpalPciResetState { OPAL_DEASSERT_RESET = 0, OPAL_ASSERT_RESET = 1 };
+enum OpalPciResetState {
+	OPAL_DEASSERT_RESET = 0,
+	OPAL_ASSERT_RESET = 1
+};
+
+enum OpalPciMaskAction {
+	OPAL_UNMASK_ERROR_TYPE = 0,
+	OPAL_MASK_ERROR_TYPE = 1
+};
+
+enum OpalSlotLedType {
+	OPAL_SLOT_LED_ID_TYPE = 0,
+	OPAL_SLOT_LED_FAULT_TYPE = 1
+};
+
+enum OpalLedAction {
+	OPAL_TURN_OFF_LED = 0,
+	OPAL_TURN_ON_LED = 1,
+	OPAL_QUERY_LED_STATE_AFTER_BUSY = 2
+};
+
+enum OpalEpowStatus {
+	OPAL_EPOW_NONE = 0,
+	OPAL_EPOW_UPS = 1,
+	OPAL_EPOW_OVER_AMBIENT_TEMP = 2,
+	OPAL_EPOW_OVER_INTERNAL_TEMP = 3
+};
 
 struct opal_machine_check_event {
 	enum OpalMCE_Version	version:8;	/* 0x00 */
@@ -314,8 +362,74 @@
 	} u;
 };
 
+/**
+ * This structure defines the overlay which will be used to store PHB error
+ * data upon request.
+ */
+enum {
+	OPAL_P7IOC_NUM_PEST_REGS = 128,
+};
+
+struct OpalIoP7IOCPhbErrorData {
+	uint32_t brdgCtl;
+
+	// P7IOC utl regs
+	uint32_t portStatusReg;
+	uint32_t rootCmplxStatus;
+	uint32_t busAgentStatus;
+
+	// P7IOC cfg regs
+	uint32_t deviceStatus;
+	uint32_t slotStatus;
+	uint32_t linkStatus;
+	uint32_t devCmdStatus;
+	uint32_t devSecStatus;
+
+	// cfg AER regs
+	uint32_t rootErrorStatus;
+	uint32_t uncorrErrorStatus;
+	uint32_t corrErrorStatus;
+	uint32_t tlpHdr1;
+	uint32_t tlpHdr2;
+	uint32_t tlpHdr3;
+	uint32_t tlpHdr4;
+	uint32_t sourceId;
+
+	uint32_t rsv3;
+
+	// Record data about the call to allocate a buffer.
+	uint64_t errorClass;
+	uint64_t correlator;
+
+	//P7IOC MMIO Error Regs
+	uint64_t p7iocPlssr;                // n120
+	uint64_t p7iocCsr;                  // n110
+	uint64_t lemFir;                    // nC00
+	uint64_t lemErrorMask;              // nC18
+	uint64_t lemWOF;                    // nC40
+	uint64_t phbErrorStatus;            // nC80
+	uint64_t phbFirstErrorStatus;       // nC88
+	uint64_t phbErrorLog0;              // nCC0
+	uint64_t phbErrorLog1;              // nCC8
+	uint64_t mmioErrorStatus;           // nD00
+	uint64_t mmioFirstErrorStatus;      // nD08
+	uint64_t mmioErrorLog0;             // nD40
+	uint64_t mmioErrorLog1;             // nD48
+	uint64_t dma0ErrorStatus;           // nD80
+	uint64_t dma0FirstErrorStatus;      // nD88
+	uint64_t dma0ErrorLog0;             // nDC0
+	uint64_t dma0ErrorLog1;             // nDC8
+	uint64_t dma1ErrorStatus;           // nE00
+	uint64_t dma1FirstErrorStatus;      // nE08
+	uint64_t dma1ErrorLog0;             // nE40
+	uint64_t dma1ErrorLog1;             // nE48
+	uint64_t pestA[OPAL_P7IOC_NUM_PEST_REGS];
+	uint64_t pestB[OPAL_P7IOC_NUM_PEST_REGS];
+};
+
 typedef struct oppanel_line {
-	/* XXX */
+	const char * 	line;
+	uint64_t 	line_len;
 } oppanel_line_t;
 
 /* API functions */
@@ -413,6 +527,15 @@
 					uint64_t pci_mem_size);
 int64_t opal_pci_reset(uint64_t phb_id, uint8_t reset_scope, uint8_t assert_state);
 
+int64_t opal_pci_get_hub_diag_data(uint64_t hub_id, void *diag_buffer, uint64_t diag_buffer_len);
+int64_t opal_pci_get_phb_diag_data(uint64_t phb_id, void *diag_buffer, uint64_t diag_buffer_len);
+int64_t opal_pci_fence_phb(uint64_t phb_id);
+int64_t opal_pci_reinit(uint64_t phb_id, uint8_t reinit_scope);
+int64_t opal_pci_mask_pe_error(uint64_t phb_id, uint16_t pe_number, uint8_t error_type, uint8_t mask_action);
+int64_t opal_set_slot_led_status(uint64_t phb_id, uint64_t slot_id, uint8_t led_type, uint8_t led_action);
+int64_t opal_get_epow_status(uint64_t *status);
+int64_t opal_set_system_attention_led(uint8_t led_action);
+
 /* Internal functions */
 extern int early_init_dt_scan_opal(unsigned long node, const char *uname, int depth, void *data);
 
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index 17722c7..269c05a3 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -135,6 +135,7 @@
 	u8 hard_enabled;		/* set if irqs are enabled in MSR */
 	u8 io_sync;			/* writel() needs spin_unlock sync */
 	u8 irq_work_pending;		/* IRQ_WORK interrupt while soft-disable */
+	u8 nap_state_lost;		/* NV GPR values lost in power7_idle */
 
 #ifdef CONFIG_PPC_POWERNV
 	/* Pointer to OPAL machine check event structure set by the
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index dd9c4fd..f072e97 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -92,20 +92,34 @@
 #define PAGE_OFFSET	ASM_CONST(CONFIG_PAGE_OFFSET)
 #define LOAD_OFFSET	ASM_CONST((CONFIG_KERNEL_START-CONFIG_PHYSICAL_START))
 
-#if defined(CONFIG_RELOCATABLE)
+#if defined(CONFIG_NONSTATIC_KERNEL)
 #ifndef __ASSEMBLY__
 
 extern phys_addr_t memstart_addr;
 extern phys_addr_t kernstart_addr;
+
+#ifdef CONFIG_RELOCATABLE_PPC32
+extern long long virt_phys_offset;
 #endif
+
+#endif /* __ASSEMBLY__ */
 #define PHYSICAL_START	kernstart_addr
-#else
+
+#else	/* !CONFIG_NONSTATIC_KERNEL */
 #define PHYSICAL_START	ASM_CONST(CONFIG_PHYSICAL_START)
 #endif
 
+/* See Description below for VIRT_PHYS_OFFSET */
+#ifdef CONFIG_RELOCATABLE_PPC32
+#define VIRT_PHYS_OFFSET virt_phys_offset
+#else
+#define VIRT_PHYS_OFFSET (KERNELBASE - PHYSICAL_START)
+#endif
+
+
 #ifdef CONFIG_PPC64
 #define MEMORY_START	0UL
-#elif defined(CONFIG_RELOCATABLE)
+#elif defined(CONFIG_NONSTATIC_KERNEL)
 #define MEMORY_START	memstart_addr
 #else
 #define MEMORY_START	(PHYSICAL_START + PAGE_OFFSET - KERNELBASE)
@@ -125,12 +139,77 @@
  * determine MEMORY_START until then.  However we can determine PHYSICAL_START
  * from information at hand (program counter, TLB lookup).
  *
+ * On BookE with RELOCATABLE (RELOCATABLE_PPC32)
+ *
+ *   With RELOCATABLE_PPC32,  we support loading the kernel at any physical 
+ *   address without any restriction on the page alignment.
+ *
+ *   We find the runtime address of _stext and relocate ourselves based on 
+ *   the following calculation:
+ *
+ *  	  virtual_base = ALIGN_DOWN(KERNELBASE,256M) +
+ *  				MODULO(_stext.run,256M)
+ *   and create the following mapping:
+ *
+ * 	  ALIGN_DOWN(_stext.run,256M) => ALIGN_DOWN(KERNELBASE,256M)
+ *
+ *   When we process relocations, we cannot depend on the
+ *   existing equation for the __va()/__pa() translations:
+ *
+ * 	   __va(x) = (x)  - PHYSICAL_START + KERNELBASE
+ *
+ *   Where:
+ *   	 PHYSICAL_START = kernstart_addr = Physical address of _stext
+ *  	 KERNELBASE = Compiled virtual address of _stext.
+ *
+ *   This formula holds true iff, kernel load address is TLB page aligned.
+ *
+ *   In our case, we need to also account for the shift in the kernel Virtual 
+ *   address.
+ *
+ *   E.g.,
+ *
+ *   Let the kernel be loaded at 64MB and KERNELBASE be 0xc0000000 (same as PAGE_OFFSET).
+ *   In this case, we would be mapping 0 to 0xc0000000, and kernstart_addr = 64M
+ *
+ *   Now __va(1MB) = (0x100000) - (0x4000000) + 0xc0000000
+ *                 = 0xbc100000 , which is wrong.
+ *
+ *   Rather, it should be : 0xc0000000 + 0x100000 = 0xc0100000
+ *      	according to our mapping.
+ *
+ *   Hence we use the following formula to get the translations right:
+ *
+ * 	  __va(x) = (x) - [ PHYSICAL_START - Effective KERNELBASE ]
+ *
+ * 	  Where :
+ * 		PHYSICAL_START = dynamic load address.(kernstart_addr variable)
+ * 		Effective KERNELBASE = virtual_base =
+ * 				     = ALIGN_DOWN(KERNELBASE,256M) +
+ * 						MODULO(PHYSICAL_START,256M)
+ *
+ * 	To make the cost of __va() / __pa() more light weight, we introduce
+ * 	a new variable virt_phys_offset, which will hold :
+ *
+ * 	virt_phys_offset = Effective KERNELBASE - PHYSICAL_START
+ * 			 = ALIGN_DOWN(KERNELBASE,256M) - 
+ * 			 	ALIGN_DOWN(PHYSICALSTART,256M)
+ *
+ * 	Hence :
+ *
+ * 	__va(x) = x - PHYSICAL_START + Effective KERNELBASE
+ * 		= x + virt_phys_offset
+ *
+ * 		and
+ * 	__pa(x) = x + PHYSICAL_START - Effective KERNELBASE
+ * 		= x - virt_phys_offset
+ * 		
  * On non-Book-E PPC64 PAGE_OFFSET and MEMORY_START are constants so use
  * the other definitions for __va & __pa.
  */
 #ifdef CONFIG_BOOKE
-#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) - PHYSICAL_START + KERNELBASE))
-#define __pa(x) ((unsigned long)(x) + PHYSICAL_START - KERNELBASE)
+#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + VIRT_PHYS_OFFSET))
+#define __pa(x) ((unsigned long)(x) - VIRT_PHYS_OFFSET)
 #else
 #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + PAGE_OFFSET - MEMORY_START))
 #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + MEMORY_START)
@@ -290,6 +369,7 @@
 extern void copy_user_page(void *to, void *from, unsigned long vaddr,
 		struct page *p);
 extern int page_is_ram(unsigned long pfn);
+extern int devmem_is_allowed(unsigned long pfn);
 
 #ifdef CONFIG_PPC_SMLPAR
 void arch_free_page(struct page *page, int order);
diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
index fb40ede..fed85e6 100644
--- a/arch/powerpc/include/asm/page_64.h
+++ b/arch/powerpc/include/asm/page_64.h
@@ -130,7 +130,9 @@
 
 #ifdef CONFIG_HUGETLB_PAGE
 
+#ifdef CONFIG_PPC_MM_SLICES
 #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
+#endif
 
 #endif /* !CONFIG_HUGETLB_PAGE */
 
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
index 56b879a..882b6aa 100644
--- a/arch/powerpc/include/asm/pci-bridge.h
+++ b/arch/powerpc/include/asm/pci-bridge.h
@@ -153,8 +153,8 @@
 
 	int	pci_ext_config_space;	/* for pci devices */
 
-#ifdef CONFIG_EEH
 	struct	pci_dev *pcidev;	/* back-pointer to the pci device */
+#ifdef CONFIG_EEH
 	int	class_code;		/* pci device class */
 	int	eeh_mode;		/* See eeh.h for possible EEH_MODEs */
 	int	eeh_config_addr;
@@ -164,6 +164,10 @@
 	int	eeh_false_positives;	/* # times this device reported #ff's */
 	u32	config_space[16];	/* saved PCI config space */
 #endif
+#define IODA_INVALID_PE		(-1)
+#ifdef CONFIG_PPC_POWERNV
+	int	pe_number;
+#endif
 };
 
 /* Get the pointer to a device_node's pci_dn */
diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
index 49c3de5..1c92013 100644
--- a/arch/powerpc/include/asm/pci.h
+++ b/arch/powerpc/include/asm/pci.h
@@ -184,8 +184,6 @@
 extern void of_scan_bus(struct device_node *node, struct pci_bus *bus);
 extern void of_rescan_bus(struct device_node *node, struct pci_bus *bus);
 
-extern int pci_read_irq_line(struct pci_dev *dev);
-
 struct file;
 extern pgprot_t	pci_phys_mem_access_prot(struct file *file,
 					 unsigned long pfn,
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index 88b0bd9..2e0e411 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -170,6 +170,9 @@
 #define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
 				            _PAGE_COHERENT | _PAGE_WRITETHRU))
 
+#define pgprot_cached_noncoherent(prot) \
+		(__pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL))
+
 #define pgprot_writecombine pgprot_noncached_wc
 
 struct file;
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index eb11a44..b585bff 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -382,6 +382,9 @@
 }
 #endif
 
+extern unsigned long cpuidle_disable;
+enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_POWERSAVE_OFF};
+
 #endif /* __KERNEL__ */
 #endif /* __ASSEMBLY__ */
 #endif /* _ASM_POWERPC_PROCESSOR_H */
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 559da19..7fdc2c0 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -951,6 +951,7 @@
 #define PVR_403GCX	0x00201400
 #define PVR_405GP	0x40110000
 #define PVR_476		0x11a52000
+#define PVR_476FPE	0x7ff50000
 #define PVR_STB03XXX	0x40310000
 #define PVR_NP405H	0x41410000
 #define PVR_NP405L	0x41610000
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index 03c48e8..500fe1d 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -187,6 +187,10 @@
 #define SPRN_CSRR1	SPRN_SRR3 /* Critical Save and Restore Register 1 */
 #endif
 
+#ifdef CONFIG_PPC_ICSWX
+#define SPRN_HACOP	0x15F	/* Hypervisor Available Coprocessor Register */
+#endif
+
 /* Bit definitions for CCR1. */
 #define	CCR1_DPC	0x00000100 /* Disable L1 I-Cache/D-Cache parity checking */
 #define	CCR1_TCS	0x00000080 /* Timer Clock Select */
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
index 41f69ae..01c143b 100644
--- a/arch/powerpc/include/asm/rtas.h
+++ b/arch/powerpc/include/asm/rtas.h
@@ -245,6 +245,12 @@
 
 extern void pSeries_log_error(char *buf, unsigned int err_type, int fatal);
 
+#ifdef CONFIG_PPC_RTAS_DAEMON
+extern void rtas_cancel_event_scan(void);
+#else
+static inline void rtas_cancel_event_scan(void) { }
+#endif
+
 /* Error types logged.  */
 #define ERR_FLAG_ALREADY_LOGGED	0x0
 #define ERR_FLAG_BOOT		0x1 	/* log was pulled from NVRAM on boot */
@@ -307,5 +313,17 @@
 extern void __cpuinit rtas_give_timebase(void);
 extern void __cpuinit rtas_take_timebase(void);
 
+#ifdef CONFIG_PPC_RTAS
+static inline int page_is_rtas_user_buf(unsigned long pfn)
+{
+	unsigned long paddr = (pfn << PAGE_SHIFT);
+	if (paddr >= rtas_rmo_buf && paddr < (rtas_rmo_buf + RTAS_RMOBUF_MAX))
+		return 1;
+	return 0;
+}
+#else
+static inline int page_is_rtas_user_buf(unsigned long pfn) { return 0;}
+#endif
+
 #endif /* __KERNEL__ */
 #endif /* _POWERPC_RTAS_H */
diff --git a/arch/powerpc/include/asm/rwsem.h b/arch/powerpc/include/asm/rwsem.h
deleted file mode 100644
index bb1e2cd..0000000
--- a/arch/powerpc/include/asm/rwsem.h
+++ /dev/null
@@ -1,132 +0,0 @@
-#ifndef _ASM_POWERPC_RWSEM_H
-#define _ASM_POWERPC_RWSEM_H
-
-#ifndef _LINUX_RWSEM_H
-#error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead."
-#endif
-
-#ifdef __KERNEL__
-
-/*
- * R/W semaphores for PPC using the stuff in lib/rwsem.c.
- * Adapted largely from include/asm-i386/rwsem.h
- * by Paul Mackerras <paulus@samba.org>.
- */
-
-/*
- * the semaphore definition
- */
-#ifdef CONFIG_PPC64
-# define RWSEM_ACTIVE_MASK		0xffffffffL
-#else
-# define RWSEM_ACTIVE_MASK		0x0000ffffL
-#endif
-
-#define RWSEM_UNLOCKED_VALUE		0x00000000L
-#define RWSEM_ACTIVE_BIAS		0x00000001L
-#define RWSEM_WAITING_BIAS		(-RWSEM_ACTIVE_MASK-1)
-#define RWSEM_ACTIVE_READ_BIAS		RWSEM_ACTIVE_BIAS
-#define RWSEM_ACTIVE_WRITE_BIAS		(RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
-
-/*
- * lock for reading
- */
-static inline void __down_read(struct rw_semaphore *sem)
-{
-	if (unlikely(atomic_long_inc_return((atomic_long_t *)&sem->count) <= 0))
-		rwsem_down_read_failed(sem);
-}
-
-static inline int __down_read_trylock(struct rw_semaphore *sem)
-{
-	long tmp;
-
-	while ((tmp = sem->count) >= 0) {
-		if (tmp == cmpxchg(&sem->count, tmp,
-				   tmp + RWSEM_ACTIVE_READ_BIAS)) {
-			return 1;
-		}
-	}
-	return 0;
-}
-
-/*
- * lock for writing
- */
-static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
-{
-	long tmp;
-
-	tmp = atomic_long_add_return(RWSEM_ACTIVE_WRITE_BIAS,
-				     (atomic_long_t *)&sem->count);
-	if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
-		rwsem_down_write_failed(sem);
-}
-
-static inline void __down_write(struct rw_semaphore *sem)
-{
-	__down_write_nested(sem, 0);
-}
-
-static inline int __down_write_trylock(struct rw_semaphore *sem)
-{
-	long tmp;
-
-	tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
-		      RWSEM_ACTIVE_WRITE_BIAS);
-	return tmp == RWSEM_UNLOCKED_VALUE;
-}
-
-/*
- * unlock after reading
- */
-static inline void __up_read(struct rw_semaphore *sem)
-{
-	long tmp;
-
-	tmp = atomic_long_dec_return((atomic_long_t *)&sem->count);
-	if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0))
-		rwsem_wake(sem);
-}
-
-/*
- * unlock after writing
- */
-static inline void __up_write(struct rw_semaphore *sem)
-{
-	if (unlikely(atomic_long_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
-				 (atomic_long_t *)&sem->count) < 0))
-		rwsem_wake(sem);
-}
-
-/*
- * implement atomic add functionality
- */
-static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
-{
-	atomic_long_add(delta, (atomic_long_t *)&sem->count);
-}
-
-/*
- * downgrade write lock to read lock
- */
-static inline void __downgrade_write(struct rw_semaphore *sem)
-{
-	long tmp;
-
-	tmp = atomic_long_add_return(-RWSEM_WAITING_BIAS,
-				     (atomic_long_t *)&sem->count);
-	if (tmp < 0)
-		rwsem_downgrade_wake(sem);
-}
-
-/*
- * implement exchange and add functionality
- */
-static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
-{
-	return atomic_long_add_return(delta, (atomic_long_t *)&sem->count);
-}
-
-#endif	/* __KERNEL__ */
-#endif	/* _ASM_POWERPC_RWSEM_H */
diff --git a/arch/powerpc/include/asm/socket.h b/arch/powerpc/include/asm/socket.h
index 866f760..2fc2af8 100644
--- a/arch/powerpc/include/asm/socket.h
+++ b/arch/powerpc/include/asm/socket.h
@@ -69,4 +69,7 @@
 
 #define SO_RXQ_OVFL             40
 
+#define SO_WIFI_STATUS		41
+#define SCM_WIFI_STATUS		SO_WIFI_STATUS
+
 #endif	/* _ASM_POWERPC_SOCKET_H */
diff --git a/arch/powerpc/include/asm/spu.h b/arch/powerpc/include/asm/spu.h
index 4e360bd..93f280e 100644
--- a/arch/powerpc/include/asm/spu.h
+++ b/arch/powerpc/include/asm/spu.h
@@ -25,7 +25,7 @@
 #ifdef __KERNEL__
 
 #include <linux/workqueue.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/mutex.h>
 
 #define LS_SIZE (256 * 1024)
@@ -166,7 +166,7 @@
 	/* beat only */
 	u64 shadow_int_mask_RW[3];
 
-	struct sys_device sysdev;
+	struct device dev;
 
 	int has_mem_affinity;
 	struct list_head aff_list;
@@ -237,7 +237,7 @@
 struct file;
 struct spufs_calls {
 	long (*create_thread)(const char __user *name,
-					unsigned int flags, mode_t mode,
+					unsigned int flags, umode_t mode,
 					struct file *neighbor);
 	long (*spu_run)(struct file *filp, __u32 __user *unpc,
 						__u32 __user *ustatus);
@@ -270,11 +270,11 @@
 int register_spu_syscalls(struct spufs_calls *calls);
 void unregister_spu_syscalls(struct spufs_calls *calls);
 
-int spu_add_sysdev_attr(struct sysdev_attribute *attr);
-void spu_remove_sysdev_attr(struct sysdev_attribute *attr);
+int spu_add_dev_attr(struct device_attribute *attr);
+void spu_remove_dev_attr(struct device_attribute *attr);
 
-int spu_add_sysdev_attr_group(struct attribute_group *attrs);
-void spu_remove_sysdev_attr_group(struct attribute_group *attrs);
+int spu_add_dev_attr_group(struct attribute_group *attrs);
+void spu_remove_dev_attr_group(struct attribute_group *attrs);
 
 int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
 		unsigned long dsisr, unsigned *flt);
diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
index e30a13d..c377457 100644
--- a/arch/powerpc/include/asm/system.h
+++ b/arch/powerpc/include/asm/system.h
@@ -193,8 +193,8 @@
 extern void *cacheable_memcpy(void *, const void *, unsigned int);
 extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long);
 extern void bad_page_fault(struct pt_regs *, unsigned long, int);
-extern int die(const char *, struct pt_regs *, long);
 extern void _exception(int, struct pt_regs *, int, unsigned long);
+extern void die(const char *, struct pt_regs *, long);
 extern void _nmask_and_or_msr(unsigned long nmask, unsigned long or_val);
 
 #ifdef CONFIG_BOOKE_WDT
@@ -221,6 +221,15 @@
 extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
 
 extern int powersave_nap;	/* set if nap mode can be used in idle loop */
+void cpu_idle_wait(void);
+
+#ifdef CONFIG_PSERIES_IDLE
+extern void update_smt_snooze_delay(int snooze);
+extern int pseries_notify_cpuidle_add_cpu(int cpu);
+#else
+static inline void update_smt_snooze_delay(int snooze) {}
+static inline int pseries_notify_cpuidle_add_cpu(int cpu) { return 0; }
+#endif
 
 /*
  * Atomic exchange
diff --git a/arch/powerpc/include/asm/tce.h b/arch/powerpc/include/asm/tce.h
index f663634..743f36b 100644
--- a/arch/powerpc/include/asm/tce.h
+++ b/arch/powerpc/include/asm/tce.h
@@ -26,10 +26,14 @@
 
 /*
  * Tces come in two formats, one for the virtual bus and a different
- * format for PCI
+ * format for PCI.  PCI TCEs can have hardware or software maintianed
+ * coherency.
  */
-#define TCE_VB  0
-#define TCE_PCI 1
+#define TCE_VB			0
+#define TCE_PCI			1
+#define TCE_PCI_SWINV_CREATE	2
+#define TCE_PCI_SWINV_FREE	4
+#define TCE_PCI_SWINV_PAIR	8
 
 /* TCE page size is 4096 bytes (1 << 12) */
 
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index 836f231..96471494 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -109,7 +109,6 @@
 #define TIF_RESTOREALL		11	/* Restore all regs (implies NOERROR) */
 #define TIF_NOERROR		12	/* Force successful syscall return */
 #define TIF_NOTIFY_RESUME	13	/* callback before returning to user */
-#define TIF_FREEZE		14	/* Freezing for suspend */
 #define TIF_SYSCALL_TRACEPOINT	15	/* syscall tracepoint instrumentation */
 #define TIF_RUNLATCH		16	/* Is the runlatch enabled? */
 
@@ -127,7 +126,6 @@
 #define _TIF_RESTOREALL		(1<<TIF_RESTOREALL)
 #define _TIF_NOERROR		(1<<TIF_NOERROR)
 #define _TIF_NOTIFY_RESUME	(1<<TIF_NOTIFY_RESUME)
-#define _TIF_FREEZE		(1<<TIF_FREEZE)
 #define _TIF_SYSCALL_TRACEPOINT	(1<<TIF_SYSCALL_TRACEPOINT)
 #define _TIF_RUNLATCH		(1<<TIF_RUNLATCH)
 #define _TIF_SYSCALL_T_OR_A	(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h
index fe6f7c2..7eb10fb 100644
--- a/arch/powerpc/include/asm/time.h
+++ b/arch/powerpc/include/asm/time.h
@@ -219,5 +219,7 @@
 extern void secondary_cpu_time_init(void);
 extern void iSeries_time_init_early(void);
 
+DECLARE_PER_CPU(u64, decrementers_next_tb);
+
 #endif /* __KERNEL__ */
 #endif /* __POWERPC_TIME_H */
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index 1e104af..c971858 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -3,7 +3,7 @@
 #ifdef __KERNEL__
 
 
-struct sys_device;
+struct device;
 struct device_node;
 
 #ifdef CONFIG_NUMA
@@ -86,19 +86,19 @@
 
 extern void __init dump_numa_cpu_topology(void);
 
-extern int sysfs_add_device_to_node(struct sys_device *dev, int nid);
-extern void sysfs_remove_device_from_node(struct sys_device *dev, int nid);
+extern int sysfs_add_device_to_node(struct device *dev, int nid);
+extern void sysfs_remove_device_from_node(struct device *dev, int nid);
 
 #else
 
 static inline void dump_numa_cpu_topology(void) {}
 
-static inline int sysfs_add_device_to_node(struct sys_device *dev, int nid)
+static inline int sysfs_add_device_to_node(struct device *dev, int nid)
 {
 	return 0;
 }
 
-static inline void sysfs_remove_device_from_node(struct sys_device *dev,
+static inline void sysfs_remove_device_from_node(struct device *dev,
 						int nid)
 {
 }
diff --git a/arch/powerpc/include/asm/types.h b/arch/powerpc/include/asm/types.h
index 8947b98..0abf7f2 100644
--- a/arch/powerpc/include/asm/types.h
+++ b/arch/powerpc/include/asm/types.h
@@ -5,8 +5,11 @@
  * This is here because we used to use l64 for 64bit powerpc
  * and we don't want to impact user mode with our change to ll64
  * in the kernel.
+ *
+ * However, some user programs are fine with this.  They can
+ * flag __SANE_USERSPACE_TYPES__ to get int-ll64.h here.
  */
-#if defined(__powerpc64__) && !defined(__KERNEL__)
+#if !defined(__SANE_USERSPACE_TYPES__) && defined(__powerpc64__) && !defined(__KERNEL__)
 # include <asm-generic/int-l64.h>
 #else
 # include <asm-generic/int-ll64.h>
@@ -27,12 +30,6 @@
  * 2 of the License, or (at your option) any later version.
  */
 
-#ifdef __powerpc64__
-typedef unsigned int umode_t;
-#else
-typedef unsigned short umode_t;
-#endif
-
 typedef struct {
 	__u32 u[4];
 } __attribute__((aligned(16))) __vector128;
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index ce4f7f1..ee728e4 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -85,6 +85,8 @@
 extra-$(CONFIG_8xx)		:= head_8xx.o
 extra-y				+= vmlinux.lds
 
+obj-$(CONFIG_RELOCATABLE_PPC32)	+= reloc_32.o
+
 obj-$(CONFIG_PPC32)		+= entry_32.o setup_32.o
 obj-$(CONFIG_PPC64)		+= dma-iommu.o iommu.o
 obj-$(CONFIG_KGDB)		+= kgdb.o
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 7c5324f..04caee7 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -208,6 +208,7 @@
 	DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time));
 	DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time));
 	DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save));
+	DEFINE(PACA_NAPSTATELOST, offsetof(struct paca_struct, nap_state_lost));
 #endif /* CONFIG_PPC64 */
 
 	/* RTAS */
diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
index a3c684b..92c6b00 100644
--- a/arch/powerpc/kernel/cacheinfo.c
+++ b/arch/powerpc/kernel/cacheinfo.c
@@ -451,15 +451,15 @@
 static struct cache_dir *__cpuinit cacheinfo_create_cache_dir(unsigned int cpu_id)
 {
 	struct cache_dir *cache_dir;
-	struct sys_device *sysdev;
+	struct device *dev;
 	struct kobject *kobj = NULL;
 
-	sysdev = get_cpu_sysdev(cpu_id);
-	WARN_ONCE(!sysdev, "no sysdev for CPU %i\n", cpu_id);
-	if (!sysdev)
+	dev = get_cpu_device(cpu_id);
+	WARN_ONCE(!dev, "no dev for CPU %i\n", cpu_id);
+	if (!dev)
 		goto err;
 
-	kobj = kobject_create_and_add("cache", &sysdev->kobj);
+	kobj = kobject_create_and_add("cache", &dev->kobj);
 	if (!kobj)
 		goto err;
 
diff --git a/arch/powerpc/kernel/cpu_setup_a2.S b/arch/powerpc/kernel/cpu_setup_a2.S
index 7f818fe..ebc62f4 100644
--- a/arch/powerpc/kernel/cpu_setup_a2.S
+++ b/arch/powerpc/kernel/cpu_setup_a2.S
@@ -41,11 +41,16 @@
 	 * core local but doing it always won't hurt
 	 */
 
-#ifdef CONFIG_PPC_WSP_COPRO
+#ifdef CONFIG_PPC_ICSWX
 	/* Make sure ACOP starts out as zero */
 	li	r3,0
 	mtspr   SPRN_ACOP,r3
 
+	/* Skip the following if we are in Guest mode */
+	mfmsr	r3
+	andis.	r0,r3,MSR_GS@h
+	bne	_icswx_skip_guest
+
 	/* Enable icswx instruction */
 	mfspr   r3,SPRN_A2_CCR2
 	ori     r3,r3,A2_CCR2_ENABLE_ICSWX
@@ -54,7 +59,8 @@
 	/* Unmask all CTs in HACOP */
 	li      r3,-1
 	mtspr   SPRN_HACOP,r3
-#endif /* CONFIG_PPC_WSP_COPRO */
+_icswx_skip_guest:
+#endif /* CONFIG_PPC_ICSWX */
 
 	/* Enable doorbell */
 	mfspr   r3,SPRN_A2_CCR2
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index edae5bb..81db9e2 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -1505,6 +1505,19 @@
 		.machine_check		= machine_check_4xx,
 		.platform		= "ppc405",
 	},
+	{	/* APM8018X */
+		.pvr_mask		= 0xffff0000,
+		.pvr_value		= 0x7ff11432,
+		.cpu_name		= "APM8018X",
+		.cpu_features		= CPU_FTRS_40X,
+		.cpu_user_features	= PPC_FEATURE_32 |
+			PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
+		.mmu_features		= MMU_FTR_TYPE_40x,
+		.icache_bsize		= 32,
+		.dcache_bsize		= 32,
+		.machine_check		= machine_check_4xx,
+		.platform		= "ppc405",
+	},
 	{	/* default match */
 		.pvr_mask		= 0x00000000,
 		.pvr_value		= 0x00000000,
@@ -1830,6 +1843,20 @@
 		.machine_check		= machine_check_47x,
 		.platform		= "ppc470",
 	},
+	{ /* 476fpe */
+		.pvr_mask		= 0xffff0000,
+		.pvr_value		= 0x7ff50000,
+		.cpu_name		= "476fpe",
+		.cpu_features		= CPU_FTRS_47X | CPU_FTR_476_DD2,
+		.cpu_user_features	= COMMON_USER_BOOKE |
+			PPC_FEATURE_HAS_FPU,
+		.mmu_features		= MMU_FTR_TYPE_47x |
+			MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL,
+		.icache_bsize		= 32,
+		.dcache_bsize		= 128,
+		.machine_check		= machine_check_47x,
+		.platform		= "ppc470",
+	},
 	{ /* 476 iss */
 		.pvr_mask		= 0xffff0000,
 		.pvr_value		= 0x00050000,
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
index d879809..28be345 100644
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -10,85 +10,85 @@
  *
  */
 
-#undef DEBUG
-
 #include <linux/kernel.h>
 #include <linux/smp.h>
 #include <linux/reboot.h>
 #include <linux/kexec.h>
-#include <linux/bootmem.h>
 #include <linux/export.h>
 #include <linux/crash_dump.h>
 #include <linux/delay.h>
-#include <linux/elf.h>
-#include <linux/elfcore.h>
 #include <linux/init.h>
 #include <linux/irq.h>
 #include <linux/types.h>
-#include <linux/memblock.h>
 
 #include <asm/processor.h>
 #include <asm/machdep.h>
 #include <asm/kexec.h>
 #include <asm/kdump.h>
 #include <asm/prom.h>
-#include <asm/firmware.h>
 #include <asm/smp.h>
 #include <asm/system.h>
 #include <asm/setjmp.h>
 
-#ifdef DEBUG
-#include <asm/udbg.h>
-#define DBG(fmt...) udbg_printf(fmt)
-#else
-#define DBG(fmt...)
-#endif
+/*
+ * The primary CPU waits a while for all secondary CPUs to enter. This is to
+ * avoid sending an IPI if the secondary CPUs are entering
+ * crash_kexec_secondary on their own (eg via a system reset).
+ *
+ * The secondary timeout has to be longer than the primary. Both timeouts are
+ * in milliseconds.
+ */
+#define PRIMARY_TIMEOUT		500
+#define SECONDARY_TIMEOUT	1000
 
-/* This keeps a track of which one is crashing cpu. */
+#define IPI_TIMEOUT		10000
+#define REAL_MODE_TIMEOUT	10000
+
+/* This keeps a track of which one is the crashing cpu. */
 int crashing_cpu = -1;
-static cpumask_t cpus_in_crash = CPU_MASK_NONE;
-cpumask_t cpus_in_sr = CPU_MASK_NONE;
+static atomic_t cpus_in_crash;
+static int time_to_dump;
 
 #define CRASH_HANDLER_MAX 3
 /* NULL terminated list of shutdown handles */
 static crash_shutdown_t crash_shutdown_handles[CRASH_HANDLER_MAX+1];
 static DEFINE_SPINLOCK(crash_handlers_lock);
 
+static unsigned long crash_shutdown_buf[JMP_BUF_LEN];
+static int crash_shutdown_cpu = -1;
+
+static int handle_fault(struct pt_regs *regs)
+{
+	if (crash_shutdown_cpu == smp_processor_id())
+		longjmp(crash_shutdown_buf, 1);
+	return 0;
+}
+
 #ifdef CONFIG_SMP
-static atomic_t enter_on_soft_reset = ATOMIC_INIT(0);
 
 void crash_ipi_callback(struct pt_regs *regs)
 {
+	static cpumask_t cpus_state_saved = CPU_MASK_NONE;
+
 	int cpu = smp_processor_id();
 
 	if (!cpu_online(cpu))
 		return;
 
 	hard_irq_disable();
-	if (!cpumask_test_cpu(cpu, &cpus_in_crash))
+	if (!cpumask_test_cpu(cpu, &cpus_state_saved)) {
 		crash_save_cpu(regs, cpu);
-	cpumask_set_cpu(cpu, &cpus_in_crash);
-
-	/*
-	 * Entered via soft-reset - could be the kdump
-	 * process is invoked using soft-reset or user activated
-	 * it if some CPU did not respond to an IPI.
-	 * For soft-reset, the secondary CPU can enter this func
-	 * twice. 1 - using IPI, and 2. soft-reset.
-	 * Tell the kexec CPU that entered via soft-reset and ready
-	 * to go down.
-	 */
-	if (cpumask_test_cpu(cpu, &cpus_in_sr)) {
-		cpumask_clear_cpu(cpu, &cpus_in_sr);
-		atomic_inc(&enter_on_soft_reset);
+		cpumask_set_cpu(cpu, &cpus_state_saved);
 	}
 
+	atomic_inc(&cpus_in_crash);
+	smp_mb__after_atomic_inc();
+
 	/*
 	 * Starting the kdump boot.
 	 * This barrier is needed to make sure that all CPUs are stopped.
-	 * If not, soft-reset will be invoked to bring other CPUs.
 	 */
-	while (!cpumask_test_cpu(crashing_cpu, &cpus_in_crash))
+	while (!time_to_dump)
 		cpu_relax();
 
 	if (ppc_md.kexec_cpu_down)
@@ -103,106 +103,99 @@
 	/* NOTREACHED */
 }
 
-/*
- * Wait until all CPUs are entered via soft-reset.
- */
-static void crash_soft_reset_check(int cpu)
-{
-	unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */
-
-	cpumask_clear_cpu(cpu, &cpus_in_sr);
-	while (atomic_read(&enter_on_soft_reset) != ncpus)
-		cpu_relax();
-}
-
-
 static void crash_kexec_prepare_cpus(int cpu)
 {
 	unsigned int msecs;
-
 	unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */
+	int tries = 0;
+	int (*old_handler)(struct pt_regs *regs);
+
+	printk(KERN_EMERG "Sending IPI to other CPUs\n");
 
 	crash_send_ipi(crash_ipi_callback);
 	smp_wmb();
 
+again:
 	/*
 	 * FIXME: Until we will have the way to stop other CPUs reliably,
 	 * the crash CPU will send an IPI and wait for other CPUs to
 	 * respond.
-	 * Delay of at least 10 seconds.
 	 */
-	printk(KERN_EMERG "Sending IPI to other cpus...\n");
-	msecs = 10000;
-	while ((cpumask_weight(&cpus_in_crash) < ncpus) && (--msecs > 0)) {
-		cpu_relax();
+	msecs = IPI_TIMEOUT;
+	while ((atomic_read(&cpus_in_crash) < ncpus) && (--msecs > 0))
 		mdelay(1);
-	}
 
 	/* Would it be better to replace the trap vector here? */
 
+	if (atomic_read(&cpus_in_crash) >= ncpus) {
+		printk(KERN_EMERG "IPI complete\n");
+		return;
+	}
+
+	printk(KERN_EMERG "ERROR: %d cpu(s) not responding\n",
+		ncpus - atomic_read(&cpus_in_crash));
+
 	/*
-	 * FIXME: In case if we do not get all CPUs, one possibility: ask the
-	 * user to do soft reset such that we get all.
-	 * Soft-reset will be used until better mechanism is implemented.
+	 * If we have a panic timeout set then we can't wait indefinitely
+	 * for someone to activate system reset. We also give up on the
+	 * second time through if system reset fail to work.
 	 */
-	if (cpumask_weight(&cpus_in_crash) < ncpus) {
-		printk(KERN_EMERG "done waiting: %d cpu(s) not responding\n",
-			ncpus - cpumask_weight(&cpus_in_crash));
-		printk(KERN_EMERG "Activate soft-reset to stop other cpu(s)\n");
-		cpumask_clear(&cpus_in_sr);
-		atomic_set(&enter_on_soft_reset, 0);
-		while (cpumask_weight(&cpus_in_crash) < ncpus)
+	if ((panic_timeout > 0) || (tries > 0))
+		return;
+
+	/*
+	 * A system reset will cause all CPUs to take an 0x100 exception.
+	 * The primary CPU returns here via setjmp, and the secondary
+	 * CPUs reexecute the crash_kexec_secondary path.
+	 */
+	old_handler = __debugger;
+	__debugger = handle_fault;
+	crash_shutdown_cpu = smp_processor_id();
+
+	if (setjmp(crash_shutdown_buf) == 0) {
+		printk(KERN_EMERG "Activate system reset (dumprestart) "
+				  "to stop other cpu(s)\n");
+
+		/*
+		 * A system reset will force all CPUs to execute the
+		 * crash code again. We need to reset cpus_in_crash so we
+		 * wait for everyone to do this.
+		 */
+		atomic_set(&cpus_in_crash, 0);
+		smp_mb();
+
+		while (atomic_read(&cpus_in_crash) < ncpus)
 			cpu_relax();
 	}
-	/*
-	 * Make sure all CPUs are entered via soft-reset if the kdump is
-	 * invoked using soft-reset.
-	 */
-	if (cpumask_test_cpu(cpu, &cpus_in_sr))
-		crash_soft_reset_check(cpu);
-	/* Leave the IPI callback set */
+
+	crash_shutdown_cpu = -1;
+	__debugger = old_handler;
+
+	tries++;
+	goto again;
 }
 
 /*
- * This function will be called by secondary cpus or by kexec cpu
- * if soft-reset is activated to stop some CPUs.
+ * This function will be called by secondary cpus.
  */
 void crash_kexec_secondary(struct pt_regs *regs)
 {
-	int cpu = smp_processor_id();
 	unsigned long flags;
-	int msecs = 5;
+	int msecs = SECONDARY_TIMEOUT;
 
 	local_irq_save(flags);
-	/* Wait 5ms if the kexec CPU is not entered yet. */
+
+	/* Wait for the primary crash CPU to signal its progress */
 	while (crashing_cpu < 0) {
 		if (--msecs < 0) {
-			/*
-			 * Either kdump image is not loaded or
-			 * kdump process is not started - Probably xmon
-			 * exited using 'x'(exit and recover) or
-			 * kexec_should_crash() failed for all running tasks.
-			 */
-			cpumask_clear_cpu(cpu, &cpus_in_sr);
+			/* No response, kdump image may not have been loaded */
 			local_irq_restore(flags);
 			return;
 		}
+
 		mdelay(1);
-		cpu_relax();
 	}
-	if (cpu == crashing_cpu) {
-		/*
-		 * Panic CPU will enter this func only via soft-reset.
-		 * Wait until all secondary CPUs entered and
-		 * then start kexec boot.
-		 */
-		crash_soft_reset_check(cpu);
-		cpumask_set_cpu(crashing_cpu, &cpus_in_crash);
-		if (ppc_md.kexec_cpu_down)
-			ppc_md.kexec_cpu_down(1, 0);
-		machine_kexec(kexec_crash_image);
-		/* NOTREACHED */
-	}
+
 	crash_ipi_callback(regs);
 }
 
@@ -211,7 +204,7 @@
 static void crash_kexec_prepare_cpus(int cpu)
 {
 	/*
-	 * move the secondarys to us so that we can copy
+	 * move the secondaries to us so that we can copy
 	 * the new kernel 0-0x100 safely
 	 *
 	 * do this if kexec in setup.c ?
@@ -225,7 +218,6 @@
 
 void crash_kexec_secondary(struct pt_regs *regs)
 {
-	cpumask_clear(&cpus_in_sr);
 }
 #endif	/* CONFIG_SMP */
 
@@ -236,7 +228,7 @@
 	unsigned int msecs;
 	int i;
 
-	msecs = 10000;
+	msecs = REAL_MODE_TIMEOUT;
 	for (i=0; i < nr_cpu_ids && msecs > 0; i++) {
 		if (i == cpu)
 			continue;
@@ -308,22 +300,11 @@
 }
 EXPORT_SYMBOL(crash_shutdown_unregister);
 
-static unsigned long crash_shutdown_buf[JMP_BUF_LEN];
-static int crash_shutdown_cpu = -1;
-
-static int handle_fault(struct pt_regs *regs)
-{
-	if (crash_shutdown_cpu == smp_processor_id())
-		longjmp(crash_shutdown_buf, 1);
-	return 0;
-}
-
 void default_machine_crash_shutdown(struct pt_regs *regs)
 {
 	unsigned int i;
 	int (*old_handler)(struct pt_regs *regs);
 
-
 	/*
 	 * This function is only called after the system
 	 * has panicked or is otherwise in a critical state.
@@ -341,15 +322,26 @@
 	 * such that another IPI will not be sent.
 	 */
 	crashing_cpu = smp_processor_id();
-	crash_save_cpu(regs, crashing_cpu);
+
+	/*
+	 * If we came in via system reset, wait a while for the secondary
+	 * CPUs to enter.
+	 */
+	if (TRAP(regs) == 0x100)
+		mdelay(PRIMARY_TIMEOUT);
+
 	crash_kexec_prepare_cpus(crashing_cpu);
-	cpumask_set_cpu(crashing_cpu, &cpus_in_crash);
+
+	crash_save_cpu(regs, crashing_cpu);
+
+	time_to_dump = 1;
+
 	crash_kexec_wait_realmode(crashing_cpu);
 
 	machine_kexec_mask_interrupts();
 
 	/*
-	 * Call registered shutdown routines savely.  Swap out
+	 * Call registered shutdown routines safely.  Swap out
 	 * __debugger_fault_handler, and replace on exit.
 	 */
 	old_handler = __debugger_fault_handler;
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
index 424afb6..b3ba516 100644
--- a/arch/powerpc/kernel/crash_dump.c
+++ b/arch/powerpc/kernel/crash_dump.c
@@ -28,7 +28,7 @@
 #define DBG(fmt...)
 #endif
 
-#ifndef CONFIG_RELOCATABLE
+#ifndef CONFIG_NONSTATIC_KERNEL
 void __init reserve_kdump_trampoline(void)
 {
 	memblock_reserve(0, KDUMP_RESERVE_LIMIT);
@@ -67,7 +67,7 @@
 
 	DBG(" <- setup_kdump_trampoline()\n");
 }
-#endif /* CONFIG_RELOCATABLE */
+#endif /* CONFIG_NONSTATIC_KERNEL */
 
 static int __init parse_savemaxmem(char *p)
 {
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index cf9c69b..d4be7bb 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -65,7 +65,7 @@
 	lbz	r0,PACAPROCSTART(r13)
 	cmpwi	r0,0x80
 	bne	1f
-	li	r0,0
+	li	r0,1
 	stb	r0,PACAPROCSTART(r13)
 	b	kvm_start_guest
 1:
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
index b725dab..7dd2981 100644
--- a/arch/powerpc/kernel/head_44x.S
+++ b/arch/powerpc/kernel/head_44x.S
@@ -64,6 +64,35 @@
 	mr	r31,r3		/* save device tree ptr */
 	li	r24,0		/* CPU number */
 
+#ifdef CONFIG_RELOCATABLE
+/*
+ * Relocate ourselves to the current runtime address.
+ * This is called only by the Boot CPU.
+ * "relocate" is called with our current runtime virutal
+ * address.
+ * r21 will be loaded with the physical runtime address of _stext
+ */
+	bl	0f				/* Get our runtime address */
+0:	mflr	r21				/* Make it accessible */
+	addis	r21,r21,(_stext - 0b)@ha
+	addi	r21,r21,(_stext - 0b)@l 	/* Get our current runtime base */
+
+	/*
+	 * We have the runtime (virutal) address of our base.
+	 * We calculate our shift of offset from a 256M page.
+	 * We could map the 256M page we belong to at PAGE_OFFSET and
+	 * get going from there.
+	 */
+	lis	r4,KERNELBASE@h
+	ori	r4,r4,KERNELBASE@l
+	rlwinm	r6,r21,0,4,31			/* r6 = PHYS_START % 256M */
+	rlwinm	r5,r4,0,4,31			/* r5 = KERNELBASE % 256M */
+	subf	r3,r5,r6			/* r3 = r6 - r5 */
+	add	r3,r4,r3			/* Required Virutal Address */
+
+	bl	relocate
+#endif
+
 	bl	init_cpu_state
 
 	/*
@@ -88,6 +117,65 @@
 
 #ifdef CONFIG_RELOCATABLE
 	/*
+	 * Relocatable kernel support based on processing of dynamic
+	 * relocation entries.
+	 *
+	 * r25 will contain RPN/ERPN for the start address of memory
+	 * r21 will contain the current offset of _stext
+	 */
+	lis	r3,kernstart_addr@ha
+	la	r3,kernstart_addr@l(r3)
+
+	/*
+	 * Compute the kernstart_addr.
+	 * kernstart_addr => (r6,r8)
+	 * kernstart_addr & ~0xfffffff => (r6,r7)
+	 */
+	rlwinm	r6,r25,0,28,31	/* ERPN. Bits 32-35 of Address */
+	rlwinm	r7,r25,0,0,3	/* RPN - assuming 256 MB page size */
+	rlwinm	r8,r21,0,4,31	/* r8 = (_stext & 0xfffffff) */
+	or	r8,r7,r8	/* Compute the lower 32bit of kernstart_addr */
+
+	/* Store kernstart_addr */
+	stw	r6,0(r3)	/* higher 32bit */
+	stw	r8,4(r3)	/* lower 32bit  */
+
+	/*
+	 * Compute the virt_phys_offset :
+	 * virt_phys_offset = stext.run - kernstart_addr
+	 *
+	 * stext.run = (KERNELBASE & ~0xfffffff) + (kernstart_addr & 0xfffffff)
+	 * When we relocate, we have :
+	 *
+	 *	(kernstart_addr & 0xfffffff) = (stext.run & 0xfffffff)
+	 *
+	 * hence:
+	 *  virt_phys_offset = (KERNELBASE & ~0xfffffff) - (kernstart_addr & ~0xfffffff)
+	 *
+	 */
+
+	/* KERNELBASE&~0xfffffff => (r4,r5) */
+	li	r4, 0		/* higer 32bit */
+	lis	r5,KERNELBASE@h
+	rlwinm	r5,r5,0,0,3	/* Align to 256M, lower 32bit */
+
+	/*
+	 * 64bit subtraction.
+	 */
+	subfc	r5,r7,r5
+	subfe	r4,r6,r4
+
+	/* Store virt_phys_offset */
+	lis	r3,virt_phys_offset@ha
+	la	r3,virt_phys_offset@l(r3)
+
+	stw	r4,0(r3)
+	stw	r5,4(r3)
+
+#elif defined(CONFIG_DYNAMIC_MEMSTART)
+	/*
+	 * Mapping based, page aligned dynamic kernel loading.
+	 *
 	 * r25 will contain RPN/ERPN for the start address of memory
 	 *
 	 * Add the difference between KERNELBASE and PAGE_OFFSET to the
@@ -732,6 +820,8 @@
 	/* We use the PVR to differenciate 44x cores from 476 */
 	mfspr	r3,SPRN_PVR
 	srwi	r3,r3,16
+	cmplwi	cr0,r3,PVR_476FPE@h
+	beq	head_start_47x
 	cmplwi	cr0,r3,PVR_476@h
 	beq	head_start_47x
 	cmplwi	cr0,r3,PVR_476_ISS@h
@@ -800,12 +890,29 @@
 /*
  * Configure and load pinned entry into TLB slot 63.
  */
+#ifdef CONFIG_NONSTATIC_KERNEL
+	/*
+	 * In case of a NONSTATIC_KERNEL we reuse the TLB XLAT
+	 * entries of the initial mapping set by the boot loader.
+	 * The XLAT entry is stored in r25
+	 */
+
+	/* Read the XLAT entry for our current mapping */
+	tlbre	r25,r23,PPC44x_TLB_XLAT
+
+	lis	r3,KERNELBASE@h
+	ori	r3,r3,KERNELBASE@l
+
+	/* Use our current RPN entry */
+	mr	r4,r25
+#else
 
 	lis	r3,PAGE_OFFSET@h
 	ori	r3,r3,PAGE_OFFSET@l
 
 	/* Kernel is at the base of RAM */
 	li r4, 0			/* Load the kernel physical address */
+#endif
 
 	/* Load the kernel PID = 0 */
 	li	r0,0
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 9f5d210..d5d78c4 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -197,7 +197,7 @@
 
 	bl	early_init
 
-#ifdef CONFIG_RELOCATABLE
+#ifdef CONFIG_DYNAMIC_MEMSTART
 	lis	r3,kernstart_addr@ha
 	la	r3,kernstart_addr@l(r3)
 #ifdef CONFIG_PHYS_64BIT
diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c
index 39a2baa..7c66ce13 100644
--- a/arch/powerpc/kernel/idle.c
+++ b/arch/powerpc/kernel/idle.c
@@ -39,13 +39,23 @@
 #define cpu_should_die()	0
 #endif
 
+unsigned long cpuidle_disable = IDLE_NO_OVERRIDE;
+EXPORT_SYMBOL(cpuidle_disable);
+
 static int __init powersave_off(char *arg)
 {
 	ppc_md.power_save = NULL;
+	cpuidle_disable = IDLE_POWERSAVE_OFF;
 	return 0;
 }
 __setup("powersave=off", powersave_off);
 
+#if defined(CONFIG_PPC_PSERIES) && defined(CONFIG_TRACEPOINTS)
+static const bool idle_uses_rcu = 1;
+#else
+static const bool idle_uses_rcu;
+#endif
+
 /*
  * The body of the idle task.
  */
@@ -56,7 +66,10 @@
 
 	set_thread_flag(TIF_POLLING_NRFLAG);
 	while (1) {
-		tick_nohz_stop_sched_tick(1);
+		tick_nohz_idle_enter();
+		if (!idle_uses_rcu)
+			rcu_idle_enter();
+
 		while (!need_resched() && !cpu_should_die()) {
 			ppc64_runlatch_off();
 
@@ -93,7 +106,9 @@
 
 		HMT_medium();
 		ppc64_runlatch_on();
-		tick_nohz_restart_sched_tick();
+		if (!idle_uses_rcu)
+			rcu_idle_exit();
+		tick_nohz_idle_exit();
 		preempt_enable_no_resched();
 		if (cpu_should_die())
 			cpu_die();
@@ -102,6 +117,29 @@
 	}
 }
 
+
+/*
+ * cpu_idle_wait - Used to ensure that all the CPUs come out of the old
+ * idle loop and start using the new idle loop.
+ * Required while changing idle handler on SMP systems.
+ * Caller must have changed idle handler to the new value before the call.
+ * This window may be larger on shared systems.
+ */
+void cpu_idle_wait(void)
+{
+	int cpu;
+	smp_mb();
+
+	/* kick all the CPUs so that they exit out of old idle routine */
+	get_online_cpus();
+	for_each_online_cpu(cpu) {
+		if (cpu != smp_processor_id())
+			smp_send_reschedule(cpu);
+	}
+	put_online_cpus();
+}
+EXPORT_SYMBOL_GPL(cpu_idle_wait);
+
 int powersave_nap;
 
 #ifdef CONFIG_SYSCTL
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S
index 3a70845..fcdff19 100644
--- a/arch/powerpc/kernel/idle_power7.S
+++ b/arch/powerpc/kernel/idle_power7.S
@@ -54,6 +54,7 @@
 	li	r0,0
 	stb	r0,PACASOFTIRQEN(r13)	/* we'll hard-enable shortly */
 	stb	r0,PACAHARDIRQEN(r13)
+	stb	r0,PACA_NAPSTATELOST(r13)
 
 	/* Continue saving state */
 	SAVE_GPR(2, r1)
@@ -86,6 +87,9 @@
 	rfid
 
 _GLOBAL(power7_wakeup_noloss)
+	lbz	r0,PACA_NAPSTATELOST(r13)
+	cmpwi	r0,0
+	bne	.power7_wakeup_loss
 	ld	r1,PACAR1(r13)
 	ld	r4,_MSR(r1)
 	ld	r5,_NIP(r1)
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 5c3c469..701d4ac 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -115,6 +115,15 @@
 	: : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled)));
 }
 
+static inline notrace void decrementer_check_overflow(void)
+{
+	u64 now = get_tb_or_rtc();
+	u64 *next_tb = &__get_cpu_var(decrementers_next_tb);
+
+	if (now >= *next_tb)
+		set_dec(1);
+}
+
 notrace void arch_local_irq_restore(unsigned long en)
 {
 	/*
@@ -164,24 +173,21 @@
 	 */
 	local_paca->hard_enabled = en;
 
-#ifndef CONFIG_BOOKE
-	/* On server, re-trigger the decrementer if it went negative since
-	 * some processors only trigger on edge transitions of the sign bit.
-	 *
-	 * BookE has a level sensitive decrementer (latches in TSR) so we
-	 * don't need that
+	/*
+	 * Trigger the decrementer if we have a pending event. Some processors
+	 * only trigger on edge transitions of the sign bit. We might also
+	 * have disabled interrupts long enough that the decrementer wrapped
+	 * to positive.
 	 */
-	if ((int)mfspr(SPRN_DEC) < 0)
-		mtspr(SPRN_DEC, 1);
-#endif /* CONFIG_BOOKE */
+	decrementer_check_overflow();
 
 	/*
 	 * Force the delivery of pending soft-disabled interrupts on PS3.
 	 * Any HV call will have this side effect.
 	 */
 	if (firmware_has_feature(FW_FEATURE_PS3_LV1)) {
-		u64 tmp;
-		lv1_get_version_info(&tmp);
+		u64 tmp, tmp2;
+		lv1_get_version_info(&tmp, &tmp2);
 	}
 
 	__hard_irq_enable();
diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c
index c7b5afe..3fea368 100644
--- a/arch/powerpc/kernel/legacy_serial.c
+++ b/arch/powerpc/kernel/legacy_serial.c
@@ -441,6 +441,9 @@
 		return;
 
 	port->irq = virq;
+
+	if (of_device_is_compatible(np, "fsl,ns16550"))
+		port->handle_irq = fsl8250_handle_irq;
 }
 
 static void __init fixup_port_pio(int index,
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
index 84daabe..578f35f 100644
--- a/arch/powerpc/kernel/lparcfg.c
+++ b/arch/powerpc/kernel/lparcfg.c
@@ -783,7 +783,7 @@
 static int __init lparcfg_init(void)
 {
 	struct proc_dir_entry *ent;
-	mode_t mode = S_IRUSR | S_IRGRP | S_IROTH;
+	umode_t mode = S_IRUSR | S_IRGRP | S_IROTH;
 
 	/* Allow writing if we have FW_FEATURE_SPLPAR */
 	if (firmware_has_feature(FW_FEATURE_SPLPAR) &&
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c
index 9ce1672..c957b12 100644
--- a/arch/powerpc/kernel/machine_kexec.c
+++ b/arch/powerpc/kernel/machine_kexec.c
@@ -107,9 +107,6 @@
 	unsigned long long crash_size, crash_base;
 	int ret;
 
-	/* this is necessary because of memblock_phys_mem_size() */
-	memblock_analyze();
-
 	/* use common parsing */
 	ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
 			&crash_size, &crash_base);
@@ -128,7 +125,7 @@
 
 	crash_size = resource_size(&crashk_res);
 
-#ifndef CONFIG_RELOCATABLE
+#ifndef CONFIG_NONSTATIC_KERNEL
 	if (crashk_res.start != KDUMP_KERNELBASE)
 		printk("Crash kernel location must be 0x%x\n",
 				KDUMP_KERNELBASE);
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 458ed3b..fa4a573 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -214,7 +214,7 @@
  * If the interrupt is used, then gets the interrupt line from the
  * openfirmware and sets it in the pci_dev and pci_config line.
  */
-int pci_read_irq_line(struct pci_dev *pci_dev)
+static int pci_read_irq_line(struct pci_dev *pci_dev)
 {
 	struct of_irq oirq;
 	unsigned int virq;
@@ -283,7 +283,6 @@
 
 	return 0;
 }
-EXPORT_SYMBOL(pci_read_irq_line);
 
 /*
  * Platform support for /proc/bus/pci/X/Y mmap()s,
@@ -921,18 +920,22 @@
 		struct resource *res = dev->resource + i;
 		if (!res->flags)
 			continue;
-		/* On platforms that have PCI_PROBE_ONLY set, we don't
-		 * consider 0 as an unassigned BAR value. It's technically
-		 * a valid value, but linux doesn't like it... so when we can
-		 * re-assign things, we do so, but if we can't, we keep it
-		 * around and hope for the best...
+
+		/* If we're going to re-assign everything, we mark all resources
+		 * as unset (and 0-base them). In addition, we mark BARs starting
+		 * at 0 as unset as well, except if PCI_PROBE_ONLY is also set
+		 * since in that case, we don't want to re-assign anything
 		 */
-		if (res->start == 0 && !pci_has_flag(PCI_PROBE_ONLY)) {
-			pr_debug("PCI:%s Resource %d %016llx-%016llx [%x] is unassigned\n",
-				 pci_name(dev), i,
-				 (unsigned long long)res->start,
-				 (unsigned long long)res->end,
-				 (unsigned int)res->flags);
+		if (pci_has_flag(PCI_REASSIGN_ALL_RSRC) ||
+		    (res->start == 0 && !pci_has_flag(PCI_PROBE_ONLY))) {
+			/* Only print message if not re-assigning */
+			if (!pci_has_flag(PCI_REASSIGN_ALL_RSRC))
+				pr_debug("PCI:%s Resource %d %016llx-%016llx [%x] "
+					 "is unassigned\n",
+					 pci_name(dev), i,
+					 (unsigned long long)res->start,
+					 (unsigned long long)res->end,
+					 (unsigned int)res->flags);
 			res->end -= res->start;
 			res->start = 0;
 			res->flags |= IORESOURCE_UNSET;
@@ -1042,6 +1045,16 @@
 		if (i >= 3 && bus->self->transparent)
 			continue;
 
+		/* If we are going to re-assign everything, mark the resource
+		 * as unset and move it down to 0
+		 */
+		if (pci_has_flag(PCI_REASSIGN_ALL_RSRC)) {
+			res->flags |= IORESOURCE_UNSET;
+			res->end -= res->start;
+			res->start = 0;
+			continue;
+		}
+
 		pr_debug("PCI:%s Bus rsrc %d %016llx-%016llx [%x] fixup...\n",
 			 pci_name(dev), i,
 			 (unsigned long long)res->start,\
@@ -1262,18 +1275,15 @@
 	pci_bus_for_each_resource(bus, res, i) {
 		if (!res || !res->flags || res->start > res->end || res->parent)
 			continue;
+
+		/* If the resource was left unset at this point, we clear it */
+		if (res->flags & IORESOURCE_UNSET)
+			goto clear_resource;
+
 		if (bus->parent == NULL)
 			pr = (res->flags & IORESOURCE_IO) ?
 				&ioport_resource : &iomem_resource;
 		else {
-			/* Don't bother with non-root busses when
-			 * re-assigning all resources. We clear the
-			 * resource flags as if they were colliding
-			 * and as such ensure proper re-allocation
-			 * later.
-			 */
-			if (pci_has_flag(PCI_REASSIGN_ALL_RSRC))
-				goto clear_resource;
 			pr = pci_find_parent_resource(bus->self, res);
 			if (pr == res) {
 				/* this happens when the generic PCI
@@ -1304,9 +1314,9 @@
 			if (reparent_resources(pr, res) == 0)
 				continue;
 		}
-		printk(KERN_WARNING "PCI: Cannot allocate resource region "
-		       "%d of PCI bridge %d, will remap\n", i, bus->number);
-clear_resource:
+		pr_warning("PCI: Cannot allocate resource region "
+			   "%d of PCI bridge %d, will remap\n", i, bus->number);
+	clear_resource:
 		res->start = res->end = 0;
 		res->flags = 0;
 	}
@@ -1451,16 +1461,11 @@
 {
 	struct pci_bus *b;
 
-	/* Allocate and assign resources. If we re-assign everything, then
-	 * we skip the allocate phase
-	 */
+	/* Allocate and assign resources */
 	list_for_each_entry(b, &pci_root_buses, node)
 		pcibios_allocate_bus_resources(b);
-
-	if (!pci_has_flag(PCI_REASSIGN_ALL_RSRC)) {
-		pcibios_allocate_resources(0);
-		pcibios_allocate_resources(1);
-	}
+	pcibios_allocate_resources(0);
+	pcibios_allocate_resources(1);
 
 	/* Before we start assigning unassigned resource, we try to reserve
 	 * the low IO area and the VGA memory area if they intersect the
@@ -1732,6 +1737,12 @@
 	if (mode == PCI_PROBE_NORMAL)
 		hose->last_busno = bus->subordinate = pci_scan_child_bus(bus);
 
+	/* Platform gets a chance to do some global fixups before
+	 * we proceed to resource allocation
+	 */
+	if (ppc_md.pcibios_fixup_phb)
+		ppc_md.pcibios_fixup_phb(hose);
+
 	/* Configure PCI Express settings */
 	if (bus && !pci_has_flag(PCI_PROBE_ONLY)) {
 		struct pci_bus *child;
@@ -1747,10 +1758,13 @@
 static void fixup_hide_host_resource_fsl(struct pci_dev *dev)
 {
 	int i, class = dev->class >> 8;
+	/* When configured as agent, programing interface = 1 */
+	int prog_if = dev->class & 0xf;
 
 	if ((class == PCI_CLASS_PROCESSOR_POWERPC ||
 	     class == PCI_CLASS_BRIDGE_OTHER) &&
 		(dev->hdr_type == PCI_HEADER_TYPE_NORMAL) &&
+		(prog_if == 0) &&
 		(dev->bus->parent == NULL)) {
 		for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
 			dev->resource[i].start = 0;
diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c
index 4e69deb..dd9e4a0 100644
--- a/arch/powerpc/kernel/pci_dn.c
+++ b/arch/powerpc/kernel/pci_dn.c
@@ -50,6 +50,9 @@
 	dn->data = pdn;
 	pdn->node = dn;
 	pdn->phb = phb;
+#ifdef CONFIG_PPC_POWERNV
+	pdn->pe_number = IODA_INVALID_PE;
+#endif
 	regs = of_get_property(dn, "reg", NULL);
 	if (regs) {
 		/* First register entry is addr (00BBSS00)  */
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 6457574..ebe5766 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -584,16 +584,32 @@
 	unsigned long bit;
 	const char *name;
 } msr_bits[] = {
+#if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE)
+	{MSR_SF,	"SF"},
+	{MSR_HV,	"HV"},
+#endif
+	{MSR_VEC,	"VEC"},
+	{MSR_VSX,	"VSX"},
+#ifdef CONFIG_BOOKE
+	{MSR_CE,	"CE"},
+#endif
 	{MSR_EE,	"EE"},
 	{MSR_PR,	"PR"},
 	{MSR_FP,	"FP"},
-	{MSR_VEC,	"VEC"},
-	{MSR_VSX,	"VSX"},
 	{MSR_ME,	"ME"},
-	{MSR_CE,	"CE"},
+#ifdef CONFIG_BOOKE
 	{MSR_DE,	"DE"},
+#else
+	{MSR_SE,	"SE"},
+	{MSR_BE,	"BE"},
+#endif
 	{MSR_IR,	"IR"},
 	{MSR_DR,	"DR"},
+	{MSR_PMM,	"PMM"},
+#ifndef CONFIG_BOOKE
+	{MSR_RI,	"RI"},
+	{MSR_LE,	"LE"},
+#endif
 	{0,		NULL}
 };
 
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index fa1235b..abe405d 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -733,8 +733,6 @@
 	of_scan_flat_dt(early_init_dt_scan_chosen_ppc, cmd_line);
 
 	/* Scan memory nodes and rebuild MEMBLOCKs */
-	memblock_init();
-
 	of_scan_flat_dt(early_init_dt_scan_root, NULL);
 	of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL);
 
@@ -756,20 +754,14 @@
 	early_reserve_mem();
 	phyp_dump_reserve_mem();
 
-	limit = memory_limit;
-	if (! limit) {
-		phys_addr_t memsize;
-
-		/* Ensure that total memory size is page-aligned, because
-		 * otherwise mark_bootmem() gets upset. */
-		memblock_analyze();
-		memsize = memblock_phys_mem_size();
-		if ((memsize & PAGE_MASK) != memsize)
-			limit = memsize & PAGE_MASK;
-	}
+	/*
+	 * Ensure that total memory size is page-aligned, because otherwise
+	 * mark_bootmem() gets upset.
+	 */
+	limit = ALIGN(memory_limit ?: memblock_phys_mem_size(), PAGE_SIZE);
 	memblock_enforce_memory_limit(limit);
 
-	memblock_analyze();
+	memblock_allow_resize();
 	memblock_dump_all();
 
 	DBG("Phys. mem: %llx\n", memblock_phys_mem_size());
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index cc58486..eca626e 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -742,7 +742,7 @@
 	W(0xffffffff),			/* virt_base */
 	W(0xffffffff),			/* virt_size */
 	W(0xffffffff),			/* load_base */
-	W(64),				/* 64MB min RMA */
+	W(256),				/* 256MB min RMA */
 	W(0xffffffff),			/* full client load */
 	0,				/* min RMA percentage of total RAM */
 	48,				/* max log_2(hash table size) */
@@ -1224,14 +1224,6 @@
 
 	RELOC(alloc_bottom) = PAGE_ALIGN((unsigned long)&RELOC(_end) + 0x4000);
 
-	/* Check if we have an initrd after the kernel, if we do move our bottom
-	 * point to after it
-	 */
-	if (RELOC(prom_initrd_start)) {
-		if (RELOC(prom_initrd_end) > RELOC(alloc_bottom))
-			RELOC(alloc_bottom) = PAGE_ALIGN(RELOC(prom_initrd_end));
-	}
-
 	/*
 	 * If prom_memory_limit is set we reduce the upper limits *except* for
 	 * alloc_top_high. This must be the real top of RAM so we can put
@@ -1269,6 +1261,15 @@
 	RELOC(alloc_top) = RELOC(rmo_top);
 	RELOC(alloc_top_high) = RELOC(ram_top);
 
+	/*
+	 * Check if we have an initrd after the kernel but still inside
+	 * the RMO.  If we do move our bottom point to after it.
+	 */
+	if (RELOC(prom_initrd_start) &&
+	    RELOC(prom_initrd_start) < RELOC(rmo_top) &&
+	    RELOC(prom_initrd_end) > RELOC(alloc_bottom))
+		RELOC(alloc_bottom) = PAGE_ALIGN(RELOC(prom_initrd_end));
+
 	prom_printf("memory layout at init:\n");
 	prom_printf("  memory_limit : %x (16 MB aligned)\n", RELOC(prom_memory_limit));
 	prom_printf("  alloc_bottom : %x\n", RELOC(alloc_bottom));
@@ -2079,7 +2080,7 @@
 		/* Setup a usable color table when the appropriate
 		 * method is available. Should update this to set-colors */
 		clut = RELOC(default_colors);
-		for (i = 0; i < 32; i++, clut += 3)
+		for (i = 0; i < 16; i++, clut += 3)
 			if (prom_set_color(ih, i, clut[0], clut[1],
 					   clut[2]) != 0)
 				break;
@@ -2844,7 +2845,7 @@
 	RELOC(of_platform) = prom_find_machine_type();
 	prom_printf("Detected machine type: %x\n", RELOC(of_platform));
 
-#ifndef CONFIG_RELOCATABLE
+#ifndef CONFIG_NONSTATIC_KERNEL
 	/* Bail if this is a kdump kernel. */
 	if (PHYSICAL_START > 0)
 		prom_panic("Error: You can't boot a kdump kernel from OF!\n");
@@ -2969,9 +2970,11 @@
 	/*
 	 * in case stdin is USB and still active on IBM machines...
 	 * Unfortunately quiesce crashes on some powermacs if we have
-	 * closed stdin already (in particular the powerbook 101).
+	 * closed stdin already (in particular the powerbook 101). It
+	 * appears that the OPAL version of OFW doesn't like it either.
 	 */
-	if (RELOC(of_platform) != PLATFORM_POWERMAC)
+	if (RELOC(of_platform) != PLATFORM_POWERMAC &&
+	    RELOC(of_platform) != PLATFORM_OPAL)
 		prom_close_stdin();
 
 	/*
@@ -2987,8 +2990,12 @@
 	 * is common to us and kexec
 	 */
 	hdr = RELOC(dt_header_start);
-	prom_printf("returning from prom_init\n");
-	prom_debug("->dt_header_start=0x%x\n", hdr);
+
+	/* Don't print anything after quiesce under OPAL, it crashes OFW */
+	if (RELOC(of_platform) != PLATFORM_OPAL) {
+		prom_printf("returning from prom_init\n");
+		prom_debug("->dt_header_start=0x%x\n", hdr);
+	}
 
 #ifdef CONFIG_PPC32
 	reloc_got2(-offset);
diff --git a/arch/powerpc/kernel/reloc_32.S b/arch/powerpc/kernel/reloc_32.S
new file mode 100644
index 0000000..ef46ba6
--- /dev/null
+++ b/arch/powerpc/kernel/reloc_32.S
@@ -0,0 +1,208 @@
+/*
+ * Code to process dynamic relocations for PPC32.
+ *
+ * Copyrights (C) IBM Corporation, 2011.
+ *	Author: Suzuki Poulose <suzuki@in.ibm.com>
+ *
+ *  - Based on ppc64 code - reloc_64.S
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ */
+
+#include <asm/ppc_asm.h>
+
+/* Dynamic section table entry tags */
+DT_RELA = 7			/* Tag for Elf32_Rela section */
+DT_RELASZ = 8			/* Size of the Rela relocs */
+DT_RELAENT = 9			/* Size of one Rela reloc entry */
+
+STN_UNDEF = 0			/* Undefined symbol index */
+STB_LOCAL = 0			/* Local binding for the symbol */
+
+R_PPC_ADDR16_LO = 4		/* Lower half of (S+A) */
+R_PPC_ADDR16_HI = 5		/* Upper half of (S+A) */
+R_PPC_ADDR16_HA = 6		/* High Adjusted (S+A) */
+R_PPC_RELATIVE = 22
+
+/*
+ * r3 = desired final address
+ */
+
+_GLOBAL(relocate)
+
+	mflr	r0		/* Save our LR */
+	bl	0f		/* Find our current runtime address */
+0:	mflr	r12		/* Make it accessible */
+	mtlr	r0
+
+	lwz	r11, (p_dyn - 0b)(r12)
+	add	r11, r11, r12	/* runtime address of .dynamic section */
+	lwz	r9, (p_rela - 0b)(r12)
+	add	r9, r9, r12	/* runtime address of .rela.dyn section */
+	lwz	r10, (p_st - 0b)(r12)
+	add	r10, r10, r12	/* runtime address of _stext section */
+	lwz	r13, (p_sym - 0b)(r12)
+	add	r13, r13, r12	/* runtime address of .dynsym section */
+
+	/*
+	 * Scan the dynamic section for RELA, RELASZ entries
+	 */
+	li	r6, 0
+	li	r7, 0
+	li	r8, 0
+1:	lwz	r5, 0(r11)	/* ELF_Dyn.d_tag */
+	cmpwi	r5, 0		/* End of ELF_Dyn[] */
+	beq	eodyn
+	cmpwi	r5, DT_RELA
+	bne	relasz
+	lwz	r7, 4(r11)	/* r7 = rela.link */
+	b	skip
+relasz:
+	cmpwi	r5, DT_RELASZ
+	bne	relaent
+	lwz	r8, 4(r11)	/* r8 = Total Rela relocs size */
+	b	skip
+relaent:
+	cmpwi	r5, DT_RELAENT
+	bne	skip
+	lwz	r6, 4(r11)	/* r6 = Size of one Rela reloc */
+skip:
+	addi	r11, r11, 8
+	b	1b
+eodyn:				/* End of Dyn Table scan */
+
+	/* Check if we have found all the entries */
+	cmpwi	r7, 0
+	beq	done
+	cmpwi	r8, 0
+	beq	done
+	cmpwi	r6, 0
+	beq	done
+
+
+	/*
+	 * Work out the current offset from the link time address of .rela
+	 * section.
+	 *  cur_offset[r7] = rela.run[r9] - rela.link [r7]
+	 *  _stext.link[r12] = _stext.run[r10] - cur_offset[r7]
+	 *  final_offset[r3] = _stext.final[r3] - _stext.link[r12]
+	 */
+	subf	r7, r7, r9	/* cur_offset */
+	subf	r12, r7, r10
+	subf	r3, r12, r3	/* final_offset */
+
+	subf	r8, r6, r8	/* relaz -= relaent */
+	/*
+	 * Scan through the .rela table and process each entry
+	 * r9	- points to the current .rela table entry
+	 * r13	- points to the symbol table
+	 */
+
+	/*
+	 * Check if we have a relocation based on symbol
+	 * r5 will hold the value of the symbol.
+	 */
+applyrela:
+	lwz	r4, 4(r9)		/* r4 = rela.r_info */
+	srwi	r5, r4, 8		/* ELF32_R_SYM(r_info) */
+	cmpwi	r5, STN_UNDEF	/* sym == STN_UNDEF ? */
+	beq	get_type	/* value = 0 */
+	/* Find the value of the symbol at index(r5) */
+	slwi	r5, r5, 4		/* r5 = r5 * sizeof(Elf32_Sym) */
+	add	r12, r13, r5	/* r12 = &__dyn_sym[Index] */
+
+	/*
+	 * GNU ld has a bug, where dynamic relocs based on
+	 * STB_LOCAL symbols, the value should be assumed
+	 * to be zero. - Alan Modra
+	 */
+	/* XXX: Do we need to check if we are using GNU ld ? */
+	lbz	r5, 12(r12)	/* r5 = dyn_sym[Index].st_info */
+	extrwi	r5, r5, 4, 24	/* r5 = ELF32_ST_BIND(r5) */
+	cmpwi	r5, STB_LOCAL	/* st_value = 0, ld bug */
+	beq	get_type	/* We have r5 = 0 */
+	lwz	r5, 4(r12)	/* r5 = __dyn_sym[Index].st_value */
+
+get_type:
+	/* Load the relocation type to r4 */
+	extrwi	r4, r4, 8, 24	/* r4 = ELF32_R_TYPE(r_info) = ((char*)r4)[3] */
+
+	/* R_PPC_RELATIVE */
+	cmpwi	r4, R_PPC_RELATIVE
+	bne	hi16
+	lwz	r4, 0(r9)	/* r_offset */
+	lwz	r0, 8(r9)	/* r_addend */
+	add	r0, r0, r3	/* final addend */
+	stwx	r0, r4, r7	/* memory[r4+r7]) = (u32)r0 */
+	b	nxtrela		/* continue */
+
+	/* R_PPC_ADDR16_HI */
+hi16:
+	cmpwi	r4, R_PPC_ADDR16_HI
+	bne	ha16
+	lwz	r4, 0(r9)	/* r_offset */
+	lwz	r0, 8(r9)	/* r_addend */
+	add	r0, r0, r3
+	add	r0, r0, r5	/* r0 = (S+A+Offset) */
+	extrwi	r0, r0, 16, 0	/* r0 = (r0 >> 16) */
+	b	store_half
+
+	/* R_PPC_ADDR16_HA */
+ha16:
+	cmpwi	r4, R_PPC_ADDR16_HA
+	bne	lo16
+	lwz	r4, 0(r9)	/* r_offset */
+	lwz	r0, 8(r9)	/* r_addend */
+	add	r0, r0, r3
+	add	r0, r0, r5	/* r0 = (S+A+Offset) */
+	extrwi	r5, r0, 1, 16	/* Extract bit 16 */
+	extrwi	r0, r0, 16, 0	/* r0 = (r0 >> 16) */
+	add	r0, r0, r5	/* Add it to r0 */
+	b	store_half
+
+	/* R_PPC_ADDR16_LO */
+lo16:
+	cmpwi	r4, R_PPC_ADDR16_LO
+	bne	nxtrela
+	lwz	r4, 0(r9)	/* r_offset */
+	lwz	r0, 8(r9)	/* r_addend */
+	add	r0, r0, r3
+	add	r0, r0, r5	/* r0 = (S+A+Offset) */
+	extrwi	r0, r0, 16, 16	/* r0 &= 0xffff */
+	/* Fall through to */
+
+	/* Store half word */
+store_half:
+	sthx	r0, r4, r7	/* memory[r4+r7] = (u16)r0 */
+
+nxtrela:
+	/*
+	 * We have to flush the modified instructions to the
+	 * main storage from the d-cache. And also, invalidate the
+	 * cached instructions in i-cache which has been modified.
+	 *
+	 * We delay the sync / isync operation till the end, since
+	 * we won't be executing the modified instructions until
+	 * we return from here.
+	 */
+	dcbst	r4,r7
+	sync			/* Ensure the data is flushed before icbi */
+	icbi	r4,r7
+	cmpwi	r8, 0		/* relasz = 0 ? */
+	ble	done
+	add	r9, r9, r6	/* move to next entry in the .rela table */
+	subf	r8, r6, r8	/* relasz -= relaent */
+	b	applyrela
+
+done:
+	sync			/* Wait for the flush to finish */
+	isync			/* Discard prefetched instructions */
+	blr
+
+p_dyn:		.long	__dynamic_start - 0b
+p_rela:		.long	__rela_dyn_start - 0b
+p_sym:		.long	__dynamic_symtab - 0b
+p_st:		.long	_stext - 0b
diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c
index e037c74..4174b4b 100644
--- a/arch/powerpc/kernel/rtas_flash.c
+++ b/arch/powerpc/kernel/rtas_flash.c
@@ -568,6 +568,12 @@
 	}
 
 	/*
+	 * Just before starting the firmware flash, cancel the event scan work
+	 * to avoid any soft lockup issues.
+	 */
+	rtas_cancel_event_scan();
+
+	/*
 	 * NOTE: the "first" block must be under 4GB, so we create
 	 * an entry with no data blocks in the reserved buffer in
 	 * the kernel data segment.
diff --git a/arch/powerpc/kernel/rtasd.c b/arch/powerpc/kernel/rtasd.c
index 481ef06..1045ff4 100644
--- a/arch/powerpc/kernel/rtasd.c
+++ b/arch/powerpc/kernel/rtasd.c
@@ -472,6 +472,13 @@
 				 &event_scan_work, event_scan_delay);
 }
 
+/* Cancel the rtas event scan work */
+void rtas_cancel_event_scan(void)
+{
+	cancel_delayed_work_sync(&event_scan_work);
+}
+EXPORT_SYMBOL_GPL(rtas_cancel_event_scan);
+
 static int __init rtas_init(void)
 {
 	struct proc_dir_entry *entry;
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index fb9bb46..4cb8f1e 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -35,6 +35,8 @@
 #include <linux/pci.h>
 #include <linux/lockdep.h>
 #include <linux/memblock.h>
+#include <linux/hugetlb.h>
+
 #include <asm/io.h>
 #include <asm/kdump.h>
 #include <asm/prom.h>
@@ -64,6 +66,7 @@
 #include <asm/mmu_context.h>
 #include <asm/code-patching.h>
 #include <asm/kvm_ppc.h>
+#include <asm/hugetlb.h>
 
 #include "setup.h"
 
@@ -217,6 +220,13 @@
 	/* Initialize the hash table or TLB handling */
 	early_init_mmu();
 
+	/*
+	 * Reserve any gigantic pages requested on the command line.
+	 * memblock needs to have been initialized by the time this is
+	 * called since this will reserve memory.
+	 */
+	reserve_hugetlb_gpages();
+
 	DBG(" <- early_setup()\n");
 }
 
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 6df7090..46695fe 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -27,7 +27,7 @@
 #include <linux/spinlock.h>
 #include <linux/cache.h>
 #include <linux/err.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/cpu.h>
 #include <linux/notifier.h>
 #include <linux/topology.h>
@@ -187,7 +187,8 @@
 		return 1;
 	}
 #endif
-	err = request_irq(virq, smp_ipi_action[msg], IRQF_PERCPU,
+	err = request_irq(virq, smp_ipi_action[msg],
+			  IRQF_PERCPU | IRQF_NO_THREAD,
 			  smp_ipi_name[msg], 0);
 	WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
 		virq, smp_ipi_name[msg], err);
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index ce035c1..883e74c 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -1,4 +1,4 @@
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/cpu.h>
 #include <linux/smp.h>
 #include <linux/percpu.h>
@@ -18,6 +18,7 @@
 #include <asm/machdep.h>
 #include <asm/smp.h>
 #include <asm/pmc.h>
+#include <asm/system.h>
 
 #include "cacheinfo.h"
 
@@ -37,12 +38,12 @@
 /* Time in microseconds we delay before sleeping in the idle loop */
 DEFINE_PER_CPU(long, smt_snooze_delay) = { 100 };
 
-static ssize_t store_smt_snooze_delay(struct sys_device *dev,
-				      struct sysdev_attribute *attr,
+static ssize_t store_smt_snooze_delay(struct device *dev,
+				      struct device_attribute *attr,
 				      const char *buf,
 				      size_t count)
 {
-	struct cpu *cpu = container_of(dev, struct cpu, sysdev);
+	struct cpu *cpu = container_of(dev, struct cpu, dev);
 	ssize_t ret;
 	long snooze;
 
@@ -50,21 +51,22 @@
 	if (ret != 1)
 		return -EINVAL;
 
-	per_cpu(smt_snooze_delay, cpu->sysdev.id) = snooze;
+	per_cpu(smt_snooze_delay, cpu->dev.id) = snooze;
+	update_smt_snooze_delay(snooze);
 
 	return count;
 }
 
-static ssize_t show_smt_snooze_delay(struct sys_device *dev,
-				     struct sysdev_attribute *attr,
+static ssize_t show_smt_snooze_delay(struct device *dev,
+				     struct device_attribute *attr,
 				     char *buf)
 {
-	struct cpu *cpu = container_of(dev, struct cpu, sysdev);
+	struct cpu *cpu = container_of(dev, struct cpu, dev);
 
-	return sprintf(buf, "%ld\n", per_cpu(smt_snooze_delay, cpu->sysdev.id));
+	return sprintf(buf, "%ld\n", per_cpu(smt_snooze_delay, cpu->dev.id));
 }
 
-static SYSDEV_ATTR(smt_snooze_delay, 0644, show_smt_snooze_delay,
+static DEVICE_ATTR(smt_snooze_delay, 0644, show_smt_snooze_delay,
 		   store_smt_snooze_delay);
 
 static int __init setup_smt_snooze_delay(char *str)
@@ -117,25 +119,25 @@
 	ppc_enable_pmcs(); \
 	mtspr(ADDRESS, *(unsigned long *)val);	\
 } \
-static ssize_t show_##NAME(struct sys_device *dev, \
-			struct sysdev_attribute *attr, \
+static ssize_t show_##NAME(struct device *dev, \
+			struct device_attribute *attr, \
 			char *buf) \
 { \
-	struct cpu *cpu = container_of(dev, struct cpu, sysdev); \
+	struct cpu *cpu = container_of(dev, struct cpu, dev); \
 	unsigned long val; \
-	smp_call_function_single(cpu->sysdev.id, read_##NAME, &val, 1);	\
+	smp_call_function_single(cpu->dev.id, read_##NAME, &val, 1);	\
 	return sprintf(buf, "%lx\n", val); \
 } \
 static ssize_t __used \
-	store_##NAME(struct sys_device *dev, struct sysdev_attribute *attr, \
+	store_##NAME(struct device *dev, struct device_attribute *attr, \
 			const char *buf, size_t count) \
 { \
-	struct cpu *cpu = container_of(dev, struct cpu, sysdev); \
+	struct cpu *cpu = container_of(dev, struct cpu, dev); \
 	unsigned long val; \
 	int ret = sscanf(buf, "%lx", &val); \
 	if (ret != 1) \
 		return -EINVAL; \
-	smp_call_function_single(cpu->sysdev.id, write_##NAME, &val, 1); \
+	smp_call_function_single(cpu->dev.id, write_##NAME, &val, 1); \
 	return count; \
 }
 
@@ -177,23 +179,25 @@
 SYSFS_PMCSETUP(purr, SPRN_PURR);
 SYSFS_PMCSETUP(spurr, SPRN_SPURR);
 SYSFS_PMCSETUP(dscr, SPRN_DSCR);
+SYSFS_PMCSETUP(pir, SPRN_PIR);
 
-static SYSDEV_ATTR(mmcra, 0600, show_mmcra, store_mmcra);
-static SYSDEV_ATTR(spurr, 0600, show_spurr, NULL);
-static SYSDEV_ATTR(dscr, 0600, show_dscr, store_dscr);
-static SYSDEV_ATTR(purr, 0600, show_purr, store_purr);
+static DEVICE_ATTR(mmcra, 0600, show_mmcra, store_mmcra);
+static DEVICE_ATTR(spurr, 0600, show_spurr, NULL);
+static DEVICE_ATTR(dscr, 0600, show_dscr, store_dscr);
+static DEVICE_ATTR(purr, 0600, show_purr, store_purr);
+static DEVICE_ATTR(pir, 0400, show_pir, NULL);
 
 unsigned long dscr_default = 0;
 EXPORT_SYMBOL(dscr_default);
 
-static ssize_t show_dscr_default(struct sysdev_class *class,
-		struct sysdev_class_attribute *attr, char *buf)
+static ssize_t show_dscr_default(struct device *dev,
+		struct device_attribute *attr, char *buf)
 {
 	return sprintf(buf, "%lx\n", dscr_default);
 }
 
-static ssize_t __used store_dscr_default(struct sysdev_class *class,
-		struct sysdev_class_attribute *attr, const char *buf,
+static ssize_t __used store_dscr_default(struct device *dev,
+		struct device_attribute *attr, const char *buf,
 		size_t count)
 {
 	unsigned long val;
@@ -207,15 +211,14 @@
 	return count;
 }
 
-static SYSDEV_CLASS_ATTR(dscr_default, 0600,
+static DEVICE_ATTR(dscr_default, 0600,
 		show_dscr_default, store_dscr_default);
 
 static void sysfs_create_dscr_default(void)
 {
 	int err = 0;
 	if (cpu_has_feature(CPU_FTR_DSCR))
-		err = sysfs_create_file(&cpu_sysdev_class.kset.kobj,
-			&attr_dscr_default.attr);
+		err = device_create_file(cpu_subsys.dev_root, &dev_attr_dscr_default);
 }
 #endif /* CONFIG_PPC64 */
 
@@ -259,72 +262,72 @@
 #endif /* HAS_PPC_PMC_PA6T */
 
 #ifdef HAS_PPC_PMC_IBM
-static struct sysdev_attribute ibm_common_attrs[] = {
-	_SYSDEV_ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0),
-	_SYSDEV_ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1),
+static struct device_attribute ibm_common_attrs[] = {
+	__ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0),
+	__ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1),
 };
 #endif /* HAS_PPC_PMC_G4 */
 
 #ifdef HAS_PPC_PMC_G4
-static struct sysdev_attribute g4_common_attrs[] = {
-	_SYSDEV_ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0),
-	_SYSDEV_ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1),
-	_SYSDEV_ATTR(mmcr2, 0600, show_mmcr2, store_mmcr2),
+static struct device_attribute g4_common_attrs[] = {
+	__ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0),
+	__ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1),
+	__ATTR(mmcr2, 0600, show_mmcr2, store_mmcr2),
 };
 #endif /* HAS_PPC_PMC_G4 */
 
-static struct sysdev_attribute classic_pmc_attrs[] = {
-	_SYSDEV_ATTR(pmc1, 0600, show_pmc1, store_pmc1),
-	_SYSDEV_ATTR(pmc2, 0600, show_pmc2, store_pmc2),
-	_SYSDEV_ATTR(pmc3, 0600, show_pmc3, store_pmc3),
-	_SYSDEV_ATTR(pmc4, 0600, show_pmc4, store_pmc4),
-	_SYSDEV_ATTR(pmc5, 0600, show_pmc5, store_pmc5),
-	_SYSDEV_ATTR(pmc6, 0600, show_pmc6, store_pmc6),
+static struct device_attribute classic_pmc_attrs[] = {
+	__ATTR(pmc1, 0600, show_pmc1, store_pmc1),
+	__ATTR(pmc2, 0600, show_pmc2, store_pmc2),
+	__ATTR(pmc3, 0600, show_pmc3, store_pmc3),
+	__ATTR(pmc4, 0600, show_pmc4, store_pmc4),
+	__ATTR(pmc5, 0600, show_pmc5, store_pmc5),
+	__ATTR(pmc6, 0600, show_pmc6, store_pmc6),
 #ifdef CONFIG_PPC64
-	_SYSDEV_ATTR(pmc7, 0600, show_pmc7, store_pmc7),
-	_SYSDEV_ATTR(pmc8, 0600, show_pmc8, store_pmc8),
+	__ATTR(pmc7, 0600, show_pmc7, store_pmc7),
+	__ATTR(pmc8, 0600, show_pmc8, store_pmc8),
 #endif
 };
 
 #ifdef HAS_PPC_PMC_PA6T
-static struct sysdev_attribute pa6t_attrs[] = {
-	_SYSDEV_ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0),
-	_SYSDEV_ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1),
-	_SYSDEV_ATTR(pmc0, 0600, show_pa6t_pmc0, store_pa6t_pmc0),
-	_SYSDEV_ATTR(pmc1, 0600, show_pa6t_pmc1, store_pa6t_pmc1),
-	_SYSDEV_ATTR(pmc2, 0600, show_pa6t_pmc2, store_pa6t_pmc2),
-	_SYSDEV_ATTR(pmc3, 0600, show_pa6t_pmc3, store_pa6t_pmc3),
-	_SYSDEV_ATTR(pmc4, 0600, show_pa6t_pmc4, store_pa6t_pmc4),
-	_SYSDEV_ATTR(pmc5, 0600, show_pa6t_pmc5, store_pa6t_pmc5),
+static struct device_attribute pa6t_attrs[] = {
+	__ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0),
+	__ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1),
+	__ATTR(pmc0, 0600, show_pa6t_pmc0, store_pa6t_pmc0),
+	__ATTR(pmc1, 0600, show_pa6t_pmc1, store_pa6t_pmc1),
+	__ATTR(pmc2, 0600, show_pa6t_pmc2, store_pa6t_pmc2),
+	__ATTR(pmc3, 0600, show_pa6t_pmc3, store_pa6t_pmc3),
+	__ATTR(pmc4, 0600, show_pa6t_pmc4, store_pa6t_pmc4),
+	__ATTR(pmc5, 0600, show_pa6t_pmc5, store_pa6t_pmc5),
 #ifdef CONFIG_DEBUG_KERNEL
-	_SYSDEV_ATTR(hid0, 0600, show_hid0, store_hid0),
-	_SYSDEV_ATTR(hid1, 0600, show_hid1, store_hid1),
-	_SYSDEV_ATTR(hid4, 0600, show_hid4, store_hid4),
-	_SYSDEV_ATTR(hid5, 0600, show_hid5, store_hid5),
-	_SYSDEV_ATTR(ima0, 0600, show_ima0, store_ima0),
-	_SYSDEV_ATTR(ima1, 0600, show_ima1, store_ima1),
-	_SYSDEV_ATTR(ima2, 0600, show_ima2, store_ima2),
-	_SYSDEV_ATTR(ima3, 0600, show_ima3, store_ima3),
-	_SYSDEV_ATTR(ima4, 0600, show_ima4, store_ima4),
-	_SYSDEV_ATTR(ima5, 0600, show_ima5, store_ima5),
-	_SYSDEV_ATTR(ima6, 0600, show_ima6, store_ima6),
-	_SYSDEV_ATTR(ima7, 0600, show_ima7, store_ima7),
-	_SYSDEV_ATTR(ima8, 0600, show_ima8, store_ima8),
-	_SYSDEV_ATTR(ima9, 0600, show_ima9, store_ima9),
-	_SYSDEV_ATTR(imaat, 0600, show_imaat, store_imaat),
-	_SYSDEV_ATTR(btcr, 0600, show_btcr, store_btcr),
-	_SYSDEV_ATTR(pccr, 0600, show_pccr, store_pccr),
-	_SYSDEV_ATTR(rpccr, 0600, show_rpccr, store_rpccr),
-	_SYSDEV_ATTR(der, 0600, show_der, store_der),
-	_SYSDEV_ATTR(mer, 0600, show_mer, store_mer),
-	_SYSDEV_ATTR(ber, 0600, show_ber, store_ber),
-	_SYSDEV_ATTR(ier, 0600, show_ier, store_ier),
-	_SYSDEV_ATTR(sier, 0600, show_sier, store_sier),
-	_SYSDEV_ATTR(siar, 0600, show_siar, store_siar),
-	_SYSDEV_ATTR(tsr0, 0600, show_tsr0, store_tsr0),
-	_SYSDEV_ATTR(tsr1, 0600, show_tsr1, store_tsr1),
-	_SYSDEV_ATTR(tsr2, 0600, show_tsr2, store_tsr2),
-	_SYSDEV_ATTR(tsr3, 0600, show_tsr3, store_tsr3),
+	__ATTR(hid0, 0600, show_hid0, store_hid0),
+	__ATTR(hid1, 0600, show_hid1, store_hid1),
+	__ATTR(hid4, 0600, show_hid4, store_hid4),
+	__ATTR(hid5, 0600, show_hid5, store_hid5),
+	__ATTR(ima0, 0600, show_ima0, store_ima0),
+	__ATTR(ima1, 0600, show_ima1, store_ima1),
+	__ATTR(ima2, 0600, show_ima2, store_ima2),
+	__ATTR(ima3, 0600, show_ima3, store_ima3),
+	__ATTR(ima4, 0600, show_ima4, store_ima4),
+	__ATTR(ima5, 0600, show_ima5, store_ima5),
+	__ATTR(ima6, 0600, show_ima6, store_ima6),
+	__ATTR(ima7, 0600, show_ima7, store_ima7),
+	__ATTR(ima8, 0600, show_ima8, store_ima8),
+	__ATTR(ima9, 0600, show_ima9, store_ima9),
+	__ATTR(imaat, 0600, show_imaat, store_imaat),
+	__ATTR(btcr, 0600, show_btcr, store_btcr),
+	__ATTR(pccr, 0600, show_pccr, store_pccr),
+	__ATTR(rpccr, 0600, show_rpccr, store_rpccr),
+	__ATTR(der, 0600, show_der, store_der),
+	__ATTR(mer, 0600, show_mer, store_mer),
+	__ATTR(ber, 0600, show_ber, store_ber),
+	__ATTR(ier, 0600, show_ier, store_ier),
+	__ATTR(sier, 0600, show_sier, store_sier),
+	__ATTR(siar, 0600, show_siar, store_siar),
+	__ATTR(tsr0, 0600, show_tsr0, store_tsr0),
+	__ATTR(tsr1, 0600, show_tsr1, store_tsr1),
+	__ATTR(tsr2, 0600, show_tsr2, store_tsr2),
+	__ATTR(tsr3, 0600, show_tsr3, store_tsr3),
 #endif /* CONFIG_DEBUG_KERNEL */
 };
 #endif /* HAS_PPC_PMC_PA6T */
@@ -333,14 +336,14 @@
 static void __cpuinit register_cpu_online(unsigned int cpu)
 {
 	struct cpu *c = &per_cpu(cpu_devices, cpu);
-	struct sys_device *s = &c->sysdev;
-	struct sysdev_attribute *attrs, *pmc_attrs;
+	struct device *s = &c->dev;
+	struct device_attribute *attrs, *pmc_attrs;
 	int i, nattrs;
 
 #ifdef CONFIG_PPC64
 	if (!firmware_has_feature(FW_FEATURE_ISERIES) &&
 			cpu_has_feature(CPU_FTR_SMT))
-		sysdev_create_file(s, &attr_smt_snooze_delay);
+		device_create_file(s, &dev_attr_smt_snooze_delay);
 #endif
 
 	/* PMC stuff */
@@ -348,14 +351,14 @@
 #ifdef HAS_PPC_PMC_IBM
 	case PPC_PMC_IBM:
 		attrs = ibm_common_attrs;
-		nattrs = sizeof(ibm_common_attrs) / sizeof(struct sysdev_attribute);
+		nattrs = sizeof(ibm_common_attrs) / sizeof(struct device_attribute);
 		pmc_attrs = classic_pmc_attrs;
 		break;
 #endif /* HAS_PPC_PMC_IBM */
 #ifdef HAS_PPC_PMC_G4
 	case PPC_PMC_G4:
 		attrs = g4_common_attrs;
-		nattrs = sizeof(g4_common_attrs) / sizeof(struct sysdev_attribute);
+		nattrs = sizeof(g4_common_attrs) / sizeof(struct device_attribute);
 		pmc_attrs = classic_pmc_attrs;
 		break;
 #endif /* HAS_PPC_PMC_G4 */
@@ -363,7 +366,7 @@
 	case PPC_PMC_PA6T:
 		/* PA Semi starts counting at PMC0 */
 		attrs = pa6t_attrs;
-		nattrs = sizeof(pa6t_attrs) / sizeof(struct sysdev_attribute);
+		nattrs = sizeof(pa6t_attrs) / sizeof(struct device_attribute);
 		pmc_attrs = NULL;
 		break;
 #endif /* HAS_PPC_PMC_PA6T */
@@ -374,24 +377,27 @@
 	}
 
 	for (i = 0; i < nattrs; i++)
-		sysdev_create_file(s, &attrs[i]);
+		device_create_file(s, &attrs[i]);
 
 	if (pmc_attrs)
 		for (i = 0; i < cur_cpu_spec->num_pmcs; i++)
-			sysdev_create_file(s, &pmc_attrs[i]);
+			device_create_file(s, &pmc_attrs[i]);
 
 #ifdef CONFIG_PPC64
 	if (cpu_has_feature(CPU_FTR_MMCRA))
-		sysdev_create_file(s, &attr_mmcra);
+		device_create_file(s, &dev_attr_mmcra);
 
 	if (cpu_has_feature(CPU_FTR_PURR))
-		sysdev_create_file(s, &attr_purr);
+		device_create_file(s, &dev_attr_purr);
 
 	if (cpu_has_feature(CPU_FTR_SPURR))
-		sysdev_create_file(s, &attr_spurr);
+		device_create_file(s, &dev_attr_spurr);
 
 	if (cpu_has_feature(CPU_FTR_DSCR))
-		sysdev_create_file(s, &attr_dscr);
+		device_create_file(s, &dev_attr_dscr);
+
+	if (cpu_has_feature(CPU_FTR_PPCAS_ARCH_V2))
+		device_create_file(s, &dev_attr_pir);
 #endif /* CONFIG_PPC64 */
 
 	cacheinfo_cpu_online(cpu);
@@ -401,8 +407,8 @@
 static void unregister_cpu_online(unsigned int cpu)
 {
 	struct cpu *c = &per_cpu(cpu_devices, cpu);
-	struct sys_device *s = &c->sysdev;
-	struct sysdev_attribute *attrs, *pmc_attrs;
+	struct device *s = &c->dev;
+	struct device_attribute *attrs, *pmc_attrs;
 	int i, nattrs;
 
 	BUG_ON(!c->hotpluggable);
@@ -410,7 +416,7 @@
 #ifdef CONFIG_PPC64
 	if (!firmware_has_feature(FW_FEATURE_ISERIES) &&
 			cpu_has_feature(CPU_FTR_SMT))
-		sysdev_remove_file(s, &attr_smt_snooze_delay);
+		device_remove_file(s, &dev_attr_smt_snooze_delay);
 #endif
 
 	/* PMC stuff */
@@ -418,14 +424,14 @@
 #ifdef HAS_PPC_PMC_IBM
 	case PPC_PMC_IBM:
 		attrs = ibm_common_attrs;
-		nattrs = sizeof(ibm_common_attrs) / sizeof(struct sysdev_attribute);
+		nattrs = sizeof(ibm_common_attrs) / sizeof(struct device_attribute);
 		pmc_attrs = classic_pmc_attrs;
 		break;
 #endif /* HAS_PPC_PMC_IBM */
 #ifdef HAS_PPC_PMC_G4
 	case PPC_PMC_G4:
 		attrs = g4_common_attrs;
-		nattrs = sizeof(g4_common_attrs) / sizeof(struct sysdev_attribute);
+		nattrs = sizeof(g4_common_attrs) / sizeof(struct device_attribute);
 		pmc_attrs = classic_pmc_attrs;
 		break;
 #endif /* HAS_PPC_PMC_G4 */
@@ -433,7 +439,7 @@
 	case PPC_PMC_PA6T:
 		/* PA Semi starts counting at PMC0 */
 		attrs = pa6t_attrs;
-		nattrs = sizeof(pa6t_attrs) / sizeof(struct sysdev_attribute);
+		nattrs = sizeof(pa6t_attrs) / sizeof(struct device_attribute);
 		pmc_attrs = NULL;
 		break;
 #endif /* HAS_PPC_PMC_PA6T */
@@ -444,24 +450,27 @@
 	}
 
 	for (i = 0; i < nattrs; i++)
-		sysdev_remove_file(s, &attrs[i]);
+		device_remove_file(s, &attrs[i]);
 
 	if (pmc_attrs)
 		for (i = 0; i < cur_cpu_spec->num_pmcs; i++)
-			sysdev_remove_file(s, &pmc_attrs[i]);
+			device_remove_file(s, &pmc_attrs[i]);
 
 #ifdef CONFIG_PPC64
 	if (cpu_has_feature(CPU_FTR_MMCRA))
-		sysdev_remove_file(s, &attr_mmcra);
+		device_remove_file(s, &dev_attr_mmcra);
 
 	if (cpu_has_feature(CPU_FTR_PURR))
-		sysdev_remove_file(s, &attr_purr);
+		device_remove_file(s, &dev_attr_purr);
 
 	if (cpu_has_feature(CPU_FTR_SPURR))
-		sysdev_remove_file(s, &attr_spurr);
+		device_remove_file(s, &dev_attr_spurr);
 
 	if (cpu_has_feature(CPU_FTR_DSCR))
-		sysdev_remove_file(s, &attr_dscr);
+		device_remove_file(s, &dev_attr_dscr);
+
+	if (cpu_has_feature(CPU_FTR_PPCAS_ARCH_V2))
+		device_remove_file(s, &dev_attr_pir);
 #endif /* CONFIG_PPC64 */
 
 	cacheinfo_cpu_offline(cpu);
@@ -513,70 +522,70 @@
 
 static DEFINE_MUTEX(cpu_mutex);
 
-int cpu_add_sysdev_attr(struct sysdev_attribute *attr)
+int cpu_add_dev_attr(struct device_attribute *attr)
 {
 	int cpu;
 
 	mutex_lock(&cpu_mutex);
 
 	for_each_possible_cpu(cpu) {
-		sysdev_create_file(get_cpu_sysdev(cpu), attr);
+		device_create_file(get_cpu_device(cpu), attr);
 	}
 
 	mutex_unlock(&cpu_mutex);
 	return 0;
 }
-EXPORT_SYMBOL_GPL(cpu_add_sysdev_attr);
+EXPORT_SYMBOL_GPL(cpu_add_dev_attr);
 
-int cpu_add_sysdev_attr_group(struct attribute_group *attrs)
+int cpu_add_dev_attr_group(struct attribute_group *attrs)
 {
 	int cpu;
-	struct sys_device *sysdev;
+	struct device *dev;
 	int ret;
 
 	mutex_lock(&cpu_mutex);
 
 	for_each_possible_cpu(cpu) {
-		sysdev = get_cpu_sysdev(cpu);
-		ret = sysfs_create_group(&sysdev->kobj, attrs);
+		dev = get_cpu_device(cpu);
+		ret = sysfs_create_group(&dev->kobj, attrs);
 		WARN_ON(ret != 0);
 	}
 
 	mutex_unlock(&cpu_mutex);
 	return 0;
 }
-EXPORT_SYMBOL_GPL(cpu_add_sysdev_attr_group);
+EXPORT_SYMBOL_GPL(cpu_add_dev_attr_group);
 
 
-void cpu_remove_sysdev_attr(struct sysdev_attribute *attr)
+void cpu_remove_dev_attr(struct device_attribute *attr)
 {
 	int cpu;
 
 	mutex_lock(&cpu_mutex);
 
 	for_each_possible_cpu(cpu) {
-		sysdev_remove_file(get_cpu_sysdev(cpu), attr);
+		device_remove_file(get_cpu_device(cpu), attr);
 	}
 
 	mutex_unlock(&cpu_mutex);
 }
-EXPORT_SYMBOL_GPL(cpu_remove_sysdev_attr);
+EXPORT_SYMBOL_GPL(cpu_remove_dev_attr);
 
-void cpu_remove_sysdev_attr_group(struct attribute_group *attrs)
+void cpu_remove_dev_attr_group(struct attribute_group *attrs)
 {
 	int cpu;
-	struct sys_device *sysdev;
+	struct device *dev;
 
 	mutex_lock(&cpu_mutex);
 
 	for_each_possible_cpu(cpu) {
-		sysdev = get_cpu_sysdev(cpu);
-		sysfs_remove_group(&sysdev->kobj, attrs);
+		dev = get_cpu_device(cpu);
+		sysfs_remove_group(&dev->kobj, attrs);
 	}
 
 	mutex_unlock(&cpu_mutex);
 }
-EXPORT_SYMBOL_GPL(cpu_remove_sysdev_attr_group);
+EXPORT_SYMBOL_GPL(cpu_remove_dev_attr_group);
 
 
 /* NUMA stuff */
@@ -590,18 +599,18 @@
 		register_one_node(i);
 }
 
-int sysfs_add_device_to_node(struct sys_device *dev, int nid)
+int sysfs_add_device_to_node(struct device *dev, int nid)
 {
 	struct node *node = &node_devices[nid];
-	return sysfs_create_link(&node->sysdev.kobj, &dev->kobj,
+	return sysfs_create_link(&node->dev.kobj, &dev->kobj,
 			kobject_name(&dev->kobj));
 }
 EXPORT_SYMBOL_GPL(sysfs_add_device_to_node);
 
-void sysfs_remove_device_from_node(struct sys_device *dev, int nid)
+void sysfs_remove_device_from_node(struct device *dev, int nid)
 {
 	struct node *node = &node_devices[nid];
-	sysfs_remove_link(&node->sysdev.kobj, kobject_name(&dev->kobj));
+	sysfs_remove_link(&node->dev.kobj, kobject_name(&dev->kobj));
 }
 EXPORT_SYMBOL_GPL(sysfs_remove_device_from_node);
 
@@ -614,14 +623,14 @@
 #endif
 
 /* Only valid if CPU is present. */
-static ssize_t show_physical_id(struct sys_device *dev,
-				struct sysdev_attribute *attr, char *buf)
+static ssize_t show_physical_id(struct device *dev,
+				struct device_attribute *attr, char *buf)
 {
-	struct cpu *cpu = container_of(dev, struct cpu, sysdev);
+	struct cpu *cpu = container_of(dev, struct cpu, dev);
 
-	return sprintf(buf, "%d\n", get_hard_smp_processor_id(cpu->sysdev.id));
+	return sprintf(buf, "%d\n", get_hard_smp_processor_id(cpu->dev.id));
 }
-static SYSDEV_ATTR(physical_id, 0444, show_physical_id, NULL);
+static DEVICE_ATTR(physical_id, 0444, show_physical_id, NULL);
 
 static int __init topology_init(void)
 {
@@ -646,7 +655,7 @@
 		if (cpu_online(cpu) || c->hotpluggable) {
 			register_cpu(c, cpu);
 
-			sysdev_create_file(&c->sysdev, &attr_physical_id);
+			device_create_file(&c->dev, &dev_attr_physical_id);
 		}
 
 		if (cpu_online(cpu))
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 522bb1d..567dd7c 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -86,8 +86,6 @@
 	.rating       = 400,
 	.flags        = CLOCK_SOURCE_IS_CONTINUOUS,
 	.mask         = CLOCKSOURCE_MASK(64),
-	.shift        = 22,
-	.mult         = 0,	/* To be filled in */
 	.read         = rtc_read,
 };
 
@@ -97,8 +95,6 @@
 	.rating       = 400,
 	.flags        = CLOCK_SOURCE_IS_CONTINUOUS,
 	.mask         = CLOCKSOURCE_MASK(64),
-	.shift        = 22,
-	.mult         = 0,	/* To be filled in */
 	.read         = timebase_read,
 };
 
@@ -110,22 +106,16 @@
 				 struct clock_event_device *dev);
 
 static struct clock_event_device decrementer_clockevent = {
-       .name           = "decrementer",
-       .rating         = 200,
-       .shift          = 0,	/* To be filled in */
-       .mult           = 0,	/* To be filled in */
-       .irq            = 0,
-       .set_next_event = decrementer_set_next_event,
-       .set_mode       = decrementer_set_mode,
-       .features       = CLOCK_EVT_FEAT_ONESHOT,
+	.name           = "decrementer",
+	.rating         = 200,
+	.irq            = 0,
+	.set_next_event = decrementer_set_next_event,
+	.set_mode       = decrementer_set_mode,
+	.features       = CLOCK_EVT_FEAT_ONESHOT,
 };
 
-struct decrementer_clock {
-	struct clock_event_device event;
-	u64 next_tb;
-};
-
-static DEFINE_PER_CPU(struct decrementer_clock, decrementers);
+DEFINE_PER_CPU(u64, decrementers_next_tb);
+static DEFINE_PER_CPU(struct clock_event_device, decrementers);
 
 #ifdef CONFIG_PPC_ISERIES
 static unsigned long __initdata iSeries_recal_titan;
@@ -168,13 +158,13 @@
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
 /*
  * Factors for converting from cputime_t (timebase ticks) to
- * jiffies, milliseconds, seconds, and clock_t (1/USER_HZ seconds).
+ * jiffies, microseconds, seconds, and clock_t (1/USER_HZ seconds).
  * These are all stored as 0.64 fixed-point binary fractions.
  */
 u64 __cputime_jiffies_factor;
 EXPORT_SYMBOL(__cputime_jiffies_factor);
-u64 __cputime_msec_factor;
-EXPORT_SYMBOL(__cputime_msec_factor);
+u64 __cputime_usec_factor;
+EXPORT_SYMBOL(__cputime_usec_factor);
 u64 __cputime_sec_factor;
 EXPORT_SYMBOL(__cputime_sec_factor);
 u64 __cputime_clockt_factor;
@@ -192,8 +182,8 @@
 
 	div128_by_32(HZ, 0, tb_ticks_per_sec, &res);
 	__cputime_jiffies_factor = res.result_low;
-	div128_by_32(1000, 0, tb_ticks_per_sec, &res);
-	__cputime_msec_factor = res.result_low;
+	div128_by_32(1000000, 0, tb_ticks_per_sec, &res);
+	__cputime_usec_factor = res.result_low;
 	div128_by_32(1, 0, tb_ticks_per_sec, &res);
 	__cputime_sec_factor = res.result_low;
 	div128_by_32(USER_HZ, 0, tb_ticks_per_sec, &res);
@@ -441,7 +431,7 @@
 /* 
  * This function recalibrates the timebase based on the 49-bit time-of-day
  * value in the Titan chip.  The Titan is much more accurate than the value
- * returned by the service processor for the timebase frequency.  
+ * returned by the service processor for the timebase frequency.
  */
 
 static int __init iSeries_tb_recal(void)
@@ -576,9 +566,8 @@
 void timer_interrupt(struct pt_regs * regs)
 {
 	struct pt_regs *old_regs;
-	struct decrementer_clock *decrementer =  &__get_cpu_var(decrementers);
-	struct clock_event_device *evt = &decrementer->event;
-	u64 now;
+	u64 *next_tb = &__get_cpu_var(decrementers_next_tb);
+	struct clock_event_device *evt = &__get_cpu_var(decrementers);
 
 	/* Ensure a positive value is written to the decrementer, or else
 	 * some CPUs will continue to take decrementer exceptions.
@@ -613,16 +602,9 @@
 		get_lppaca()->int_dword.fields.decr_int = 0;
 #endif
 
-	now = get_tb_or_rtc();
-	if (now >= decrementer->next_tb) {
-		decrementer->next_tb = ~(u64)0;
-		if (evt->event_handler)
-			evt->event_handler(evt);
-	} else {
-		now = decrementer->next_tb - now;
-		if (now <= DECREMENTER_MAX)
-			set_dec((int)now);
-	}
+	*next_tb = ~(u64)0;
+	if (evt->event_handler)
+		evt->event_handler(evt);
 
 #ifdef CONFIG_PPC_ISERIES
 	if (firmware_has_feature(FW_FEATURE_ISERIES) && hvlpevent_is_pending())
@@ -650,9 +632,9 @@
 	 * with suspending.
 	 */
 
-	set_dec(0x7fffffff);
+	set_dec(DECREMENTER_MAX);
 	local_irq_disable();
-	set_dec(0x7fffffff);
+	set_dec(DECREMENTER_MAX);
 }
 
 static void generic_suspend_enable_irqs(void)
@@ -824,9 +806,8 @@
 	++vdso_data->tb_update_count;
 	smp_mb();
 
-	/* XXX this assumes clock->shift == 22 */
-	/* 4611686018 ~= 2^(20+64-22) / 1e9 */
-	new_tb_to_xs = (u64) mult * 4611686018ULL;
+	/* 19342813113834067 ~= 2^(20+64) / 1e9 */
+	new_tb_to_xs = (u64) mult * (19342813113834067ULL >> clock->shift);
 	new_stamp_xsec = (u64) wall_time->tv_nsec * XSEC_PER_SEC;
 	do_div(new_stamp_xsec, 1000000000);
 	new_stamp_xsec += (u64) wall_time->tv_sec * XSEC_PER_SEC;
@@ -877,9 +858,7 @@
 	else
 		clock = &clocksource_timebase;
 
-	clock->mult = clocksource_hz2mult(tb_ticks_per_sec, clock->shift);
-
-	if (clocksource_register(clock)) {
+	if (clocksource_register_hz(clock, tb_ticks_per_sec)) {
 		printk(KERN_ERR "clocksource: %s is already registered\n",
 		       clock->name);
 		return;
@@ -892,7 +871,7 @@
 static int decrementer_set_next_event(unsigned long evt,
 				      struct clock_event_device *dev)
 {
-	__get_cpu_var(decrementers).next_tb = get_tb_or_rtc() + evt;
+	__get_cpu_var(decrementers_next_tb) = get_tb_or_rtc() + evt;
 	set_dec(evt);
 	return 0;
 }
@@ -904,34 +883,9 @@
 		decrementer_set_next_event(DECREMENTER_MAX, dev);
 }
 
-static inline uint64_t div_sc64(unsigned long ticks, unsigned long nsec,
-				int shift)
-{
-	uint64_t tmp = ((uint64_t)ticks) << shift;
-
-	do_div(tmp, nsec);
-	return tmp;
-}
-
-static void __init setup_clockevent_multiplier(unsigned long hz)
-{
-	u64 mult, shift = 32;
-
-	while (1) {
-		mult = div_sc64(hz, NSEC_PER_SEC, shift);
-		if (mult && (mult >> 32UL) == 0UL)
-			break;
-
-		shift--;
-	}
-
-	decrementer_clockevent.shift = shift;
-	decrementer_clockevent.mult = mult;
-}
-
 static void register_decrementer_clockevent(int cpu)
 {
-	struct clock_event_device *dec = &per_cpu(decrementers, cpu).event;
+	struct clock_event_device *dec = &per_cpu(decrementers, cpu);
 
 	*dec = decrementer_clockevent;
 	dec->cpumask = cpumask_of(cpu);
@@ -946,7 +900,8 @@
 {
 	int cpu = smp_processor_id();
 
-	setup_clockevent_multiplier(ppc_tb_freq);
+	clockevents_calc_mult_shift(&decrementer_clockevent, ppc_tb_freq, 4);
+
 	decrementer_clockevent.max_delta_ns =
 		clockevent_delta2ns(DECREMENTER_MAX, &decrementer_clockevent);
 	decrementer_clockevent.min_delta_ns =
@@ -1014,10 +969,10 @@
 	boot_tb = get_tb_or_rtc();
 
 	/* If platform provided a timezone (pmac), we correct the time */
-        if (timezone_offset) {
+	if (timezone_offset) {
 		sys_tz.tz_minuteswest = -timezone_offset / 60;
 		sys_tz.tz_dsttime = 0;
-        }
+	}
 
 	vdso_data->tb_update_count = 0;
 	vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 5459d14..c091527 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -98,18 +98,14 @@
 static inline void pmac_backlight_unblank(void) { }
 #endif
 
-int die(const char *str, struct pt_regs *regs, long err)
+static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
+static int die_owner = -1;
+static unsigned int die_nest_count;
+static int die_counter;
+
+static unsigned __kprobes long oops_begin(struct pt_regs *regs)
 {
-	static struct {
-		raw_spinlock_t lock;
-		u32 lock_owner;
-		int lock_owner_depth;
-	} die = {
-		.lock =			__RAW_SPIN_LOCK_UNLOCKED(die.lock),
-		.lock_owner =		-1,
-		.lock_owner_depth =	0
-	};
-	static int die_counter;
+	int cpu;
 	unsigned long flags;
 
 	if (debugger(regs))
@@ -117,66 +113,109 @@
 
 	oops_enter();
 
-	if (die.lock_owner != raw_smp_processor_id()) {
-		console_verbose();
-		raw_spin_lock_irqsave(&die.lock, flags);
-		die.lock_owner = smp_processor_id();
-		die.lock_owner_depth = 0;
-		bust_spinlocks(1);
-		if (machine_is(powermac))
-			pmac_backlight_unblank();
-	} else {
-		local_save_flags(flags);
+	/* racy, but better than risking deadlock. */
+	raw_local_irq_save(flags);
+	cpu = smp_processor_id();
+	if (!arch_spin_trylock(&die_lock)) {
+		if (cpu == die_owner)
+			/* nested oops. should stop eventually */;
+		else
+			arch_spin_lock(&die_lock);
 	}
+	die_nest_count++;
+	die_owner = cpu;
+	console_verbose();
+	bust_spinlocks(1);
+	if (machine_is(powermac))
+		pmac_backlight_unblank();
+	return flags;
+}
 
-	if (++die.lock_owner_depth < 3) {
-		printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
-#ifdef CONFIG_PREEMPT
-		printk("PREEMPT ");
-#endif
-#ifdef CONFIG_SMP
-		printk("SMP NR_CPUS=%d ", NR_CPUS);
-#endif
-#ifdef CONFIG_DEBUG_PAGEALLOC
-		printk("DEBUG_PAGEALLOC ");
-#endif
-#ifdef CONFIG_NUMA
-		printk("NUMA ");
-#endif
-		printk("%s\n", ppc_md.name ? ppc_md.name : "");
-
-		if (notify_die(DIE_OOPS, str, regs, err, 255,
-			       SIGSEGV) == NOTIFY_STOP)
-			return 1;
-
-		print_modules();
-		show_regs(regs);
-	} else {
-		printk("Recursive die() failure, output suppressed\n");
-	}
-
+static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
+			       int signr)
+{
 	bust_spinlocks(0);
-	die.lock_owner = -1;
+	die_owner = -1;
 	add_taint(TAINT_DIE);
-	raw_spin_unlock_irqrestore(&die.lock, flags);
+	die_nest_count--;
+	oops_exit();
+	printk("\n");
+	if (!die_nest_count)
+		/* Nest count reaches zero, release the lock. */
+		arch_spin_unlock(&die_lock);
+	raw_local_irq_restore(flags);
 
-	if (kexec_should_crash(current) ||
-		kexec_sr_activated(smp_processor_id()))
+	/*
+	 * A system reset (0x100) is a request to dump, so we always send
+	 * it through the crashdump code.
+	 */
+	if (kexec_should_crash(current) || (TRAP(regs) == 0x100)) {
 		crash_kexec(regs);
-	crash_kexec_secondary(regs);
+
+		/*
+		 * We aren't the primary crash CPU. We need to send it
+		 * to a holding pattern to avoid it ending up in the panic
+		 * code.
+		 */
+		crash_kexec_secondary(regs);
+	}
+
+	if (!signr)
+		return;
+
+	/*
+	 * While our oops output is serialised by a spinlock, output
+	 * from panic() called below can race and corrupt it. If we
+	 * know we are going to panic, delay for 1 second so we have a
+	 * chance to get clean backtraces from all CPUs that are oopsing.
+	 */
+	if (in_interrupt() || panic_on_oops || !current->pid ||
+	    is_global_init(current)) {
+		mdelay(MSEC_PER_SEC);
+	}
 
 	if (in_interrupt())
 		panic("Fatal exception in interrupt");
-
 	if (panic_on_oops)
 		panic("Fatal exception");
+	do_exit(signr);
+}
 
-	oops_exit();
-	do_exit(err);
+static int __kprobes __die(const char *str, struct pt_regs *regs, long err)
+{
+	printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
+#ifdef CONFIG_PREEMPT
+	printk("PREEMPT ");
+#endif
+#ifdef CONFIG_SMP
+	printk("SMP NR_CPUS=%d ", NR_CPUS);
+#endif
+#ifdef CONFIG_DEBUG_PAGEALLOC
+	printk("DEBUG_PAGEALLOC ");
+#endif
+#ifdef CONFIG_NUMA
+	printk("NUMA ");
+#endif
+	printk("%s\n", ppc_md.name ? ppc_md.name : "");
+
+	if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV) == NOTIFY_STOP)
+		return 1;
+
+	print_modules();
+	show_regs(regs);
 
 	return 0;
 }
 
+void die(const char *str, struct pt_regs *regs, long err)
+{
+	unsigned long flags = oops_begin(regs);
+
+	if (__die(str, regs, err))
+		err = 0;
+	oops_end(flags, regs, err);
+}
+
 void user_single_step_siginfo(struct task_struct *tsk,
 				struct pt_regs *regs, siginfo_t *info)
 {
@@ -195,10 +234,11 @@
 			"at %016lx nip %016lx lr %016lx code %x\n";
 
 	if (!user_mode(regs)) {
-		if (die("Exception in kernel mode", regs, signr))
-			return;
-	} else if (show_unhandled_signals &&
-		   unhandled_signal(current, signr)) {
+		die("Exception in kernel mode", regs, signr);
+		return;
+	}
+
+	if (show_unhandled_signals && unhandled_signal(current, signr)) {
 		printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32,
 				   current->comm, current->pid, signr,
 				   addr, regs->nip, regs->link, code);
@@ -220,25 +260,8 @@
 			return;
 	}
 
-#ifdef CONFIG_KEXEC
-	cpumask_set_cpu(smp_processor_id(), &cpus_in_sr);
-#endif
-
 	die("System Reset", regs, SIGABRT);
 
-	/*
-	 * Some CPUs when released from the debugger will execute this path.
-	 * These CPUs entered the debugger via a soft-reset. If the CPU was
-	 * hung before entering the debugger it will return to the hung
-	 * state when exiting this function.  This causes a problem in
-	 * kdump since the hung CPU(s) will not respond to the IPI sent
-	 * from kdump. To prevent the problem we call crash_kexec_secondary()
-	 * here. If a kdump had not been initiated or we exit the debugger
-	 * with the "exit and recover" command (x) crash_kexec_secondary()
-	 * will return after 5ms and the CPU returns to its previous state.
-	 */
-	crash_kexec_secondary(regs);
-
 	/* Must die if the interrupt is not recoverable */
 	if (!(regs->msr & MSR_RI))
 		panic("Unrecoverable System Reset");
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index f65af61..8b08629 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -1406,7 +1406,6 @@
 	.match = vio_bus_match,
 	.probe = vio_bus_probe,
 	.remove = vio_bus_remove,
-	.pm = GENERIC_SUBSYS_PM_OPS,
 };
 
 /**
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 920276c..710a540 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -170,7 +170,13 @@
 	}
 #ifdef CONFIG_RELOCATABLE
 	. = ALIGN(8);
-	.dynsym : AT(ADDR(.dynsym) - LOAD_OFFSET) { *(.dynsym) }
+	.dynsym : AT(ADDR(.dynsym) - LOAD_OFFSET)
+	{
+#ifdef CONFIG_RELOCATABLE_PPC32
+		__dynamic_symtab = .;
+#endif
+		*(.dynsym)
+	}
 	.dynstr : AT(ADDR(.dynstr) - LOAD_OFFSET) { *(.dynstr) }
 	.dynamic : AT(ADDR(.dynamic) - LOAD_OFFSET)
 	{
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 0cb137a..336983d 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -538,7 +538,7 @@
 	tpaca->kvm_hstate.napping = 0;
 	vcpu->cpu = vc->pcpu;
 	smp_wmb();
-#ifdef CONFIG_PPC_ICP_NATIVE
+#if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP)
 	if (vcpu->arch.ptid) {
 		tpaca->cpu_start = 0x80;
 		wmb();
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 44d8829..5c8b261 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -112,6 +112,9 @@
 	stbcix	r0, r5, r6		/* clear it */
 	stwcix	r8, r5, r7		/* EOI it */
 
+	/* NV GPR values from power7_idle() will no longer be valid */
+	stb	r0, PACA_NAPSTATELOST(r13)
+
 .global kvmppc_hv_entry
 kvmppc_hv_entry:
 
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 3c791e1..e2cfb9e 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -658,10 +658,12 @@
 			ulong cmd = kvmppc_get_gpr(vcpu, 3);
 			int i;
 
+#ifdef CONFIG_KVM_BOOK3S_64_PR
 			if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) {
 				r = RESUME_GUEST;
 				break;
 			}
+#endif
 
 			run->papr_hcall.nr = cmd;
 			for (i = 0; i < 9; ++i) {
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c
index 26d2090..8c0d45a 100644
--- a/arch/powerpc/kvm/e500.c
+++ b/arch/powerpc/kvm/e500.c
@@ -15,6 +15,7 @@
 #include <linux/kvm_host.h>
 #include <linux/slab.h>
 #include <linux/err.h>
+#include <linux/export.h>
 
 #include <asm/reg.h>
 #include <asm/cputable.h>
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index 166a6a0..7735a2c 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -16,13 +16,15 @@
 
 obj-$(CONFIG_PPC64)	+= copypage_64.o copyuser_64.o \
 			   memcpy_64.o usercopy_64.o mem_64.o string.o \
-			   checksum_wrappers_64.o hweight_64.o
+			   checksum_wrappers_64.o hweight_64.o \
+			   copyuser_power7.o
 obj-$(CONFIG_XMON)	+= sstep.o ldstfp.o
 obj-$(CONFIG_KPROBES)	+= sstep.o ldstfp.o
 obj-$(CONFIG_HAVE_HW_BREAKPOINT)	+= sstep.o ldstfp.o
 
 ifeq ($(CONFIG_PPC64),y)
 obj-$(CONFIG_SMP)	+= locks.o
+obj-$(CONFIG_ALTIVEC)	+= copyuser_power7_vmx.o
 endif
 
 obj-$(CONFIG_PPC_LIB_RHEAP) += rheap.o
diff --git a/arch/powerpc/lib/copyuser_64.S b/arch/powerpc/lib/copyuser_64.S
index 578b625..773d38f 100644
--- a/arch/powerpc/lib/copyuser_64.S
+++ b/arch/powerpc/lib/copyuser_64.S
@@ -11,6 +11,12 @@
 
 	.align	7
 _GLOBAL(__copy_tofrom_user)
+BEGIN_FTR_SECTION
+	nop
+FTR_SECTION_ELSE
+	b	__copy_tofrom_user_power7
+ALT_FTR_SECTION_END_IFCLR(CPU_FTR_VMX_COPY)
+_GLOBAL(__copy_tofrom_user_base)
 	/* first check for a whole page copy on a page boundary */
 	cmpldi	cr1,r5,16
 	cmpdi	cr6,r5,4096
diff --git a/arch/powerpc/lib/copyuser_power7.S b/arch/powerpc/lib/copyuser_power7.S
new file mode 100644
index 0000000..497db7b
--- /dev/null
+++ b/arch/powerpc/lib/copyuser_power7.S
@@ -0,0 +1,683 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2011
+ *
+ * Author: Anton Blanchard <anton@au.ibm.com>
+ */
+#include <asm/ppc_asm.h>
+
+#define STACKFRAMESIZE	256
+#define STK_REG(i)	(112 + ((i)-14)*8)
+
+	.macro err1
+100:
+	.section __ex_table,"a"
+	.align 3
+	.llong 100b,.Ldo_err1
+	.previous
+	.endm
+
+	.macro err2
+200:
+	.section __ex_table,"a"
+	.align 3
+	.llong 200b,.Ldo_err2
+	.previous
+	.endm
+
+#ifdef CONFIG_ALTIVEC
+	.macro err3
+300:
+	.section __ex_table,"a"
+	.align 3
+	.llong 300b,.Ldo_err3
+	.previous
+	.endm
+
+	.macro err4
+400:
+	.section __ex_table,"a"
+	.align 3
+	.llong 400b,.Ldo_err4
+	.previous
+	.endm
+
+
+.Ldo_err4:
+	ld	r16,STK_REG(r16)(r1)
+	ld	r15,STK_REG(r15)(r1)
+	ld	r14,STK_REG(r14)(r1)
+.Ldo_err3:
+	bl	.exit_vmx_copy
+	ld	r0,STACKFRAMESIZE+16(r1)
+	mtlr	r0
+	b	.Lexit
+#endif /* CONFIG_ALTIVEC */
+
+.Ldo_err2:
+	ld	r22,STK_REG(r22)(r1)
+	ld	r21,STK_REG(r21)(r1)
+	ld	r20,STK_REG(r20)(r1)
+	ld	r19,STK_REG(r19)(r1)
+	ld	r18,STK_REG(r18)(r1)
+	ld	r17,STK_REG(r17)(r1)
+	ld	r16,STK_REG(r16)(r1)
+	ld	r15,STK_REG(r15)(r1)
+	ld	r14,STK_REG(r14)(r1)
+.Lexit:
+	addi	r1,r1,STACKFRAMESIZE
+.Ldo_err1:
+	ld	r3,48(r1)
+	ld	r4,56(r1)
+	ld	r5,64(r1)
+	b	__copy_tofrom_user_base
+
+
+_GLOBAL(__copy_tofrom_user_power7)
+#ifdef CONFIG_ALTIVEC
+	cmpldi	r5,16
+	cmpldi	cr1,r5,4096
+
+	std	r3,48(r1)
+	std	r4,56(r1)
+	std	r5,64(r1)
+
+	blt	.Lshort_copy
+	bgt	cr1,.Lvmx_copy
+#else
+	cmpldi	r5,16
+
+	std	r3,48(r1)
+	std	r4,56(r1)
+	std	r5,64(r1)
+
+	blt	.Lshort_copy
+#endif
+
+.Lnonvmx_copy:
+	/* Get the source 8B aligned */
+	neg	r6,r4
+	mtocrf	0x01,r6
+	clrldi	r6,r6,(64-3)
+
+	bf	cr7*4+3,1f
+err1;	lbz	r0,0(r4)
+	addi	r4,r4,1
+err1;	stb	r0,0(r3)
+	addi	r3,r3,1
+
+1:	bf	cr7*4+2,2f
+err1;	lhz	r0,0(r4)
+	addi	r4,r4,2
+err1;	sth	r0,0(r3)
+	addi	r3,r3,2
+
+2:	bf	cr7*4+1,3f
+err1;	lwz	r0,0(r4)
+	addi	r4,r4,4
+err1;	stw	r0,0(r3)
+	addi	r3,r3,4
+
+3:	sub	r5,r5,r6
+	cmpldi	r5,128
+	blt	5f
+
+	mflr	r0
+	stdu	r1,-STACKFRAMESIZE(r1)
+	std	r14,STK_REG(r14)(r1)
+	std	r15,STK_REG(r15)(r1)
+	std	r16,STK_REG(r16)(r1)
+	std	r17,STK_REG(r17)(r1)
+	std	r18,STK_REG(r18)(r1)
+	std	r19,STK_REG(r19)(r1)
+	std	r20,STK_REG(r20)(r1)
+	std	r21,STK_REG(r21)(r1)
+	std	r22,STK_REG(r22)(r1)
+	std	r0,STACKFRAMESIZE+16(r1)
+
+	srdi	r6,r5,7
+	mtctr	r6
+
+	/* Now do cacheline (128B) sized loads and stores. */
+	.align	5
+4:
+err2;	ld	r0,0(r4)
+err2;	ld	r6,8(r4)
+err2;	ld	r7,16(r4)
+err2;	ld	r8,24(r4)
+err2;	ld	r9,32(r4)
+err2;	ld	r10,40(r4)
+err2;	ld	r11,48(r4)
+err2;	ld	r12,56(r4)
+err2;	ld	r14,64(r4)
+err2;	ld	r15,72(r4)
+err2;	ld	r16,80(r4)
+err2;	ld	r17,88(r4)
+err2;	ld	r18,96(r4)
+err2;	ld	r19,104(r4)
+err2;	ld	r20,112(r4)
+err2;	ld	r21,120(r4)
+	addi	r4,r4,128
+err2;	std	r0,0(r3)
+err2;	std	r6,8(r3)
+err2;	std	r7,16(r3)
+err2;	std	r8,24(r3)
+err2;	std	r9,32(r3)
+err2;	std	r10,40(r3)
+err2;	std	r11,48(r3)
+err2;	std	r12,56(r3)
+err2;	std	r14,64(r3)
+err2;	std	r15,72(r3)
+err2;	std	r16,80(r3)
+err2;	std	r17,88(r3)
+err2;	std	r18,96(r3)
+err2;	std	r19,104(r3)
+err2;	std	r20,112(r3)
+err2;	std	r21,120(r3)
+	addi	r3,r3,128
+	bdnz	4b
+
+	clrldi	r5,r5,(64-7)
+
+	ld	r14,STK_REG(r14)(r1)
+	ld	r15,STK_REG(r15)(r1)
+	ld	r16,STK_REG(r16)(r1)
+	ld	r17,STK_REG(r17)(r1)
+	ld	r18,STK_REG(r18)(r1)
+	ld	r19,STK_REG(r19)(r1)
+	ld	r20,STK_REG(r20)(r1)
+	ld	r21,STK_REG(r21)(r1)
+	ld	r22,STK_REG(r22)(r1)
+	addi	r1,r1,STACKFRAMESIZE
+
+	/* Up to 127B to go */
+5:	srdi	r6,r5,4
+	mtocrf	0x01,r6
+
+6:	bf	cr7*4+1,7f
+err1;	ld	r0,0(r4)
+err1;	ld	r6,8(r4)
+err1;	ld	r7,16(r4)
+err1;	ld	r8,24(r4)
+err1;	ld	r9,32(r4)
+err1;	ld	r10,40(r4)
+err1;	ld	r11,48(r4)
+err1;	ld	r12,56(r4)
+	addi	r4,r4,64
+err1;	std	r0,0(r3)
+err1;	std	r6,8(r3)
+err1;	std	r7,16(r3)
+err1;	std	r8,24(r3)
+err1;	std	r9,32(r3)
+err1;	std	r10,40(r3)
+err1;	std	r11,48(r3)
+err1;	std	r12,56(r3)
+	addi	r3,r3,64
+
+	/* Up to 63B to go */
+7:	bf	cr7*4+2,8f
+err1;	ld	r0,0(r4)
+err1;	ld	r6,8(r4)
+err1;	ld	r7,16(r4)
+err1;	ld	r8,24(r4)
+	addi	r4,r4,32
+err1;	std	r0,0(r3)
+err1;	std	r6,8(r3)
+err1;	std	r7,16(r3)
+err1;	std	r8,24(r3)
+	addi	r3,r3,32
+
+	/* Up to 31B to go */
+8:	bf	cr7*4+3,9f
+err1;	ld	r0,0(r4)
+err1;	ld	r6,8(r4)
+	addi	r4,r4,16
+err1;	std	r0,0(r3)
+err1;	std	r6,8(r3)
+	addi	r3,r3,16
+
+9:	clrldi	r5,r5,(64-4)
+
+	/* Up to 15B to go */
+.Lshort_copy:
+	mtocrf	0x01,r5
+	bf	cr7*4+0,12f
+err1;	lwz	r0,0(r4)	/* Less chance of a reject with word ops */
+err1;	lwz	r6,4(r4)
+	addi	r4,r4,8
+err1;	stw	r0,0(r3)
+err1;	stw	r6,4(r3)
+	addi	r3,r3,8
+
+12:	bf	cr7*4+1,13f
+err1;	lwz	r0,0(r4)
+	addi	r4,r4,4
+err1;	stw	r0,0(r3)
+	addi	r3,r3,4
+
+13:	bf	cr7*4+2,14f
+err1;	lhz	r0,0(r4)
+	addi	r4,r4,2
+err1;	sth	r0,0(r3)
+	addi	r3,r3,2
+
+14:	bf	cr7*4+3,15f
+err1;	lbz	r0,0(r4)
+err1;	stb	r0,0(r3)
+
+15:	li	r3,0
+	blr
+
+.Lunwind_stack_nonvmx_copy:
+	addi	r1,r1,STACKFRAMESIZE
+	b	.Lnonvmx_copy
+
+#ifdef CONFIG_ALTIVEC
+.Lvmx_copy:
+	mflr	r0
+	std	r0,16(r1)
+	stdu	r1,-STACKFRAMESIZE(r1)
+	bl	.enter_vmx_copy
+	cmpwi	r3,0
+	ld	r0,STACKFRAMESIZE+16(r1)
+	ld	r3,STACKFRAMESIZE+48(r1)
+	ld	r4,STACKFRAMESIZE+56(r1)
+	ld	r5,STACKFRAMESIZE+64(r1)
+	mtlr	r0
+
+	beq	.Lunwind_stack_nonvmx_copy
+
+	/*
+	 * If source and destination are not relatively aligned we use a
+	 * slower permute loop.
+	 */
+	xor	r6,r4,r3
+	rldicl.	r6,r6,0,(64-4)
+	bne	.Lvmx_unaligned_copy
+
+	/* Get the destination 16B aligned */
+	neg	r6,r3
+	mtocrf	0x01,r6
+	clrldi	r6,r6,(64-4)
+
+	bf	cr7*4+3,1f
+err3;	lbz	r0,0(r4)
+	addi	r4,r4,1
+err3;	stb	r0,0(r3)
+	addi	r3,r3,1
+
+1:	bf	cr7*4+2,2f
+err3;	lhz	r0,0(r4)
+	addi	r4,r4,2
+err3;	sth	r0,0(r3)
+	addi	r3,r3,2
+
+2:	bf	cr7*4+1,3f
+err3;	lwz	r0,0(r4)
+	addi	r4,r4,4
+err3;	stw	r0,0(r3)
+	addi	r3,r3,4
+
+3:	bf	cr7*4+0,4f
+err3;	ld	r0,0(r4)
+	addi	r4,r4,8
+err3;	std	r0,0(r3)
+	addi	r3,r3,8
+
+4:	sub	r5,r5,r6
+
+	/* Get the desination 128B aligned */
+	neg	r6,r3
+	srdi	r7,r6,4
+	mtocrf	0x01,r7
+	clrldi	r6,r6,(64-7)
+
+	li	r9,16
+	li	r10,32
+	li	r11,48
+
+	bf	cr7*4+3,5f
+err3;	lvx	vr1,r0,r4
+	addi	r4,r4,16
+err3;	stvx	vr1,r0,r3
+	addi	r3,r3,16
+
+5:	bf	cr7*4+2,6f
+err3;	lvx	vr1,r0,r4
+err3;	lvx	vr0,r4,r9
+	addi	r4,r4,32
+err3;	stvx	vr1,r0,r3
+err3;	stvx	vr0,r3,r9
+	addi	r3,r3,32
+
+6:	bf	cr7*4+1,7f
+err3;	lvx	vr3,r0,r4
+err3;	lvx	vr2,r4,r9
+err3;	lvx	vr1,r4,r10
+err3;	lvx	vr0,r4,r11
+	addi	r4,r4,64
+err3;	stvx	vr3,r0,r3
+err3;	stvx	vr2,r3,r9
+err3;	stvx	vr1,r3,r10
+err3;	stvx	vr0,r3,r11
+	addi	r3,r3,64
+
+7:	sub	r5,r5,r6
+	srdi	r6,r5,7
+
+	std	r14,STK_REG(r14)(r1)
+	std	r15,STK_REG(r15)(r1)
+	std	r16,STK_REG(r16)(r1)
+
+	li	r12,64
+	li	r14,80
+	li	r15,96
+	li	r16,112
+
+	mtctr	r6
+
+	/*
+	 * Now do cacheline sized loads and stores. By this stage the
+	 * cacheline stores are also cacheline aligned.
+	 */
+	.align	5
+8:
+err4;	lvx	vr7,r0,r4
+err4;	lvx	vr6,r4,r9
+err4;	lvx	vr5,r4,r10
+err4;	lvx	vr4,r4,r11
+err4;	lvx	vr3,r4,r12
+err4;	lvx	vr2,r4,r14
+err4;	lvx	vr1,r4,r15
+err4;	lvx	vr0,r4,r16
+	addi	r4,r4,128
+err4;	stvx	vr7,r0,r3
+err4;	stvx	vr6,r3,r9
+err4;	stvx	vr5,r3,r10
+err4;	stvx	vr4,r3,r11
+err4;	stvx	vr3,r3,r12
+err4;	stvx	vr2,r3,r14
+err4;	stvx	vr1,r3,r15
+err4;	stvx	vr0,r3,r16
+	addi	r3,r3,128
+	bdnz	8b
+
+	ld	r14,STK_REG(r14)(r1)
+	ld	r15,STK_REG(r15)(r1)
+	ld	r16,STK_REG(r16)(r1)
+
+	/* Up to 127B to go */
+	clrldi	r5,r5,(64-7)
+	srdi	r6,r5,4
+	mtocrf	0x01,r6
+
+	bf	cr7*4+1,9f
+err3;	lvx	vr3,r0,r4
+err3;	lvx	vr2,r4,r9
+err3;	lvx	vr1,r4,r10
+err3;	lvx	vr0,r4,r11
+	addi	r4,r4,64
+err3;	stvx	vr3,r0,r3
+err3;	stvx	vr2,r3,r9
+err3;	stvx	vr1,r3,r10
+err3;	stvx	vr0,r3,r11
+	addi	r3,r3,64
+
+9:	bf	cr7*4+2,10f
+err3;	lvx	vr1,r0,r4
+err3;	lvx	vr0,r4,r9
+	addi	r4,r4,32
+err3;	stvx	vr1,r0,r3
+err3;	stvx	vr0,r3,r9
+	addi	r3,r3,32
+
+10:	bf	cr7*4+3,11f
+err3;	lvx	vr1,r0,r4
+	addi	r4,r4,16
+err3;	stvx	vr1,r0,r3
+	addi	r3,r3,16
+
+	/* Up to 15B to go */
+11:	clrldi	r5,r5,(64-4)
+	mtocrf	0x01,r5
+	bf	cr7*4+0,12f
+err3;	ld	r0,0(r4)
+	addi	r4,r4,8
+err3;	std	r0,0(r3)
+	addi	r3,r3,8
+
+12:	bf	cr7*4+1,13f
+err3;	lwz	r0,0(r4)
+	addi	r4,r4,4
+err3;	stw	r0,0(r3)
+	addi	r3,r3,4
+
+13:	bf	cr7*4+2,14f
+err3;	lhz	r0,0(r4)
+	addi	r4,r4,2
+err3;	sth	r0,0(r3)
+	addi	r3,r3,2
+
+14:	bf	cr7*4+3,15f
+err3;	lbz	r0,0(r4)
+err3;	stb	r0,0(r3)
+
+15:	addi	r1,r1,STACKFRAMESIZE
+	b	.exit_vmx_copy		/* tail call optimise */
+
+.Lvmx_unaligned_copy:
+	/* Get the destination 16B aligned */
+	neg	r6,r3
+	mtocrf	0x01,r6
+	clrldi	r6,r6,(64-4)
+
+	bf	cr7*4+3,1f
+err3;	lbz	r0,0(r4)
+	addi	r4,r4,1
+err3;	stb	r0,0(r3)
+	addi	r3,r3,1
+
+1:	bf	cr7*4+2,2f
+err3;	lhz	r0,0(r4)
+	addi	r4,r4,2
+err3;	sth	r0,0(r3)
+	addi	r3,r3,2
+
+2:	bf	cr7*4+1,3f
+err3;	lwz	r0,0(r4)
+	addi	r4,r4,4
+err3;	stw	r0,0(r3)
+	addi	r3,r3,4
+
+3:	bf	cr7*4+0,4f
+err3;	lwz	r0,0(r4)	/* Less chance of a reject with word ops */
+err3;	lwz	r7,4(r4)
+	addi	r4,r4,8
+err3;	stw	r0,0(r3)
+err3;	stw	r7,4(r3)
+	addi	r3,r3,8
+
+4:	sub	r5,r5,r6
+
+	/* Get the desination 128B aligned */
+	neg	r6,r3
+	srdi	r7,r6,4
+	mtocrf	0x01,r7
+	clrldi	r6,r6,(64-7)
+
+	li	r9,16
+	li	r10,32
+	li	r11,48
+
+	lvsl	vr16,0,r4	/* Setup permute control vector */
+err3;	lvx	vr0,0,r4
+	addi	r4,r4,16
+
+	bf	cr7*4+3,5f
+err3;	lvx	vr1,r0,r4
+	vperm	vr8,vr0,vr1,vr16
+	addi	r4,r4,16
+err3;	stvx	vr8,r0,r3
+	addi	r3,r3,16
+	vor	vr0,vr1,vr1
+
+5:	bf	cr7*4+2,6f
+err3;	lvx	vr1,r0,r4
+	vperm	vr8,vr0,vr1,vr16
+err3;	lvx	vr0,r4,r9
+	vperm	vr9,vr1,vr0,vr16
+	addi	r4,r4,32
+err3;	stvx	vr8,r0,r3
+err3;	stvx	vr9,r3,r9
+	addi	r3,r3,32
+
+6:	bf	cr7*4+1,7f
+err3;	lvx	vr3,r0,r4
+	vperm	vr8,vr0,vr3,vr16
+err3;	lvx	vr2,r4,r9
+	vperm	vr9,vr3,vr2,vr16
+err3;	lvx	vr1,r4,r10
+	vperm	vr10,vr2,vr1,vr16
+err3;	lvx	vr0,r4,r11
+	vperm	vr11,vr1,vr0,vr16
+	addi	r4,r4,64
+err3;	stvx	vr8,r0,r3
+err3;	stvx	vr9,r3,r9
+err3;	stvx	vr10,r3,r10
+err3;	stvx	vr11,r3,r11
+	addi	r3,r3,64
+
+7:	sub	r5,r5,r6
+	srdi	r6,r5,7
+
+	std	r14,STK_REG(r14)(r1)
+	std	r15,STK_REG(r15)(r1)
+	std	r16,STK_REG(r16)(r1)
+
+	li	r12,64
+	li	r14,80
+	li	r15,96
+	li	r16,112
+
+	mtctr	r6
+
+	/*
+	 * Now do cacheline sized loads and stores. By this stage the
+	 * cacheline stores are also cacheline aligned.
+	 */
+	.align	5
+8:
+err4;	lvx	vr7,r0,r4
+	vperm	vr8,vr0,vr7,vr16
+err4;	lvx	vr6,r4,r9
+	vperm	vr9,vr7,vr6,vr16
+err4;	lvx	vr5,r4,r10
+	vperm	vr10,vr6,vr5,vr16
+err4;	lvx	vr4,r4,r11
+	vperm	vr11,vr5,vr4,vr16
+err4;	lvx	vr3,r4,r12
+	vperm	vr12,vr4,vr3,vr16
+err4;	lvx	vr2,r4,r14
+	vperm	vr13,vr3,vr2,vr16
+err4;	lvx	vr1,r4,r15
+	vperm	vr14,vr2,vr1,vr16
+err4;	lvx	vr0,r4,r16
+	vperm	vr15,vr1,vr0,vr16
+	addi	r4,r4,128
+err4;	stvx	vr8,r0,r3
+err4;	stvx	vr9,r3,r9
+err4;	stvx	vr10,r3,r10
+err4;	stvx	vr11,r3,r11
+err4;	stvx	vr12,r3,r12
+err4;	stvx	vr13,r3,r14
+err4;	stvx	vr14,r3,r15
+err4;	stvx	vr15,r3,r16
+	addi	r3,r3,128
+	bdnz	8b
+
+	ld	r14,STK_REG(r14)(r1)
+	ld	r15,STK_REG(r15)(r1)
+	ld	r16,STK_REG(r16)(r1)
+
+	/* Up to 127B to go */
+	clrldi	r5,r5,(64-7)
+	srdi	r6,r5,4
+	mtocrf	0x01,r6
+
+	bf	cr7*4+1,9f
+err3;	lvx	vr3,r0,r4
+	vperm	vr8,vr0,vr3,vr16
+err3;	lvx	vr2,r4,r9
+	vperm	vr9,vr3,vr2,vr16
+err3;	lvx	vr1,r4,r10
+	vperm	vr10,vr2,vr1,vr16
+err3;	lvx	vr0,r4,r11
+	vperm	vr11,vr1,vr0,vr16
+	addi	r4,r4,64
+err3;	stvx	vr8,r0,r3
+err3;	stvx	vr9,r3,r9
+err3;	stvx	vr10,r3,r10
+err3;	stvx	vr11,r3,r11
+	addi	r3,r3,64
+
+9:	bf	cr7*4+2,10f
+err3;	lvx	vr1,r0,r4
+	vperm	vr8,vr0,vr1,vr16
+err3;	lvx	vr0,r4,r9
+	vperm	vr9,vr1,vr0,vr16
+	addi	r4,r4,32
+err3;	stvx	vr8,r0,r3
+err3;	stvx	vr9,r3,r9
+	addi	r3,r3,32
+
+10:	bf	cr7*4+3,11f
+err3;	lvx	vr1,r0,r4
+	vperm	vr8,vr0,vr1,vr16
+	addi	r4,r4,16
+err3;	stvx	vr8,r0,r3
+	addi	r3,r3,16
+
+	/* Up to 15B to go */
+11:	clrldi	r5,r5,(64-4)
+	addi	r4,r4,-16	/* Unwind the +16 load offset */
+	mtocrf	0x01,r5
+	bf	cr7*4+0,12f
+err3;	lwz	r0,0(r4)	/* Less chance of a reject with word ops */
+err3;	lwz	r6,4(r4)
+	addi	r4,r4,8
+err3;	stw	r0,0(r3)
+err3;	stw	r6,4(r3)
+	addi	r3,r3,8
+
+12:	bf	cr7*4+1,13f
+err3;	lwz	r0,0(r4)
+	addi	r4,r4,4
+err3;	stw	r0,0(r3)
+	addi	r3,r3,4
+
+13:	bf	cr7*4+2,14f
+err3;	lhz	r0,0(r4)
+	addi	r4,r4,2
+err3;	sth	r0,0(r3)
+	addi	r3,r3,2
+
+14:	bf	cr7*4+3,15f
+err3;	lbz	r0,0(r4)
+err3;	stb	r0,0(r3)
+
+15:	addi	r1,r1,STACKFRAMESIZE
+	b	.exit_vmx_copy		/* tail call optimise */
+#endif /* CONFiG_ALTIVEC */
diff --git a/arch/powerpc/lib/copyuser_power7_vmx.c b/arch/powerpc/lib/copyuser_power7_vmx.c
new file mode 100644
index 0000000..6e1efad
--- /dev/null
+++ b/arch/powerpc/lib/copyuser_power7_vmx.c
@@ -0,0 +1,50 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2011
+ *
+ * Authors: Sukadev Bhattiprolu <sukadev@linux.vnet.ibm.com>
+ *          Anton Blanchard <anton@au.ibm.com>
+ */
+#include <linux/uaccess.h>
+#include <linux/hardirq.h>
+
+int enter_vmx_copy(void)
+{
+	if (in_interrupt())
+		return 0;
+
+	/* This acts as preempt_disable() as well and will make
+	 * enable_kernel_altivec(). We need to disable page faults
+	 * as they can call schedule and thus make us lose the VMX
+	 * context. So on page faults, we just fail which will cause
+	 * a fallback to the normal non-vmx copy.
+	 */
+	pagefault_disable();
+
+	enable_kernel_altivec();
+
+	return 1;
+}
+
+/*
+ * This function must return 0 because we tail call optimise when calling
+ * from __copy_tofrom_user_power7 which returns 0 on success.
+ */
+int exit_vmx_copy(void)
+{
+	pagefault_enable();
+	return 0;
+}
diff --git a/arch/powerpc/mm/44x_mmu.c b/arch/powerpc/mm/44x_mmu.c
index f60e006..388b95e 100644
--- a/arch/powerpc/mm/44x_mmu.c
+++ b/arch/powerpc/mm/44x_mmu.c
@@ -78,11 +78,7 @@
 		"tlbwe	%1,%3,%5\n"
 		"tlbwe	%0,%3,%6\n"
 	:
-#ifdef CONFIG_PPC47x
-	: "r" (PPC47x_TLB2_S_RWX),
-#else
 	: "r" (PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G),
-#endif
 	  "r" (phys),
 	  "r" (virt | PPC44x_TLB_VALID | PPC44x_TLB_256M),
 	  "r" (entry),
@@ -221,7 +217,7 @@
 {
 	u64 size;
 
-#ifndef CONFIG_RELOCATABLE
+#ifndef CONFIG_NONSTATIC_KERNEL
 	/* We don't currently support the first MEMBLOCK not mapping 0
 	 * physical on those processors
 	 */
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index 991ee81..3787b61 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -21,6 +21,8 @@
 obj-$(CONFIG_PPC_STD_MMU)	+= hash_low_$(CONFIG_WORD_SIZE).o \
 				   tlb_hash$(CONFIG_WORD_SIZE).o \
 				   mmu_context_hash$(CONFIG_WORD_SIZE).o
+obj-$(CONFIG_PPC_ICSWX)		+= icswx.o
+obj-$(CONFIG_PPC_ICSWX_PID)	+= icswx_pid.o
 obj-$(CONFIG_40x)		+= 40x_mmu.o
 obj-$(CONFIG_44x)		+= 44x_mmu.o
 obj-$(CONFIG_PPC_FSL_BOOK3E)	+= fsl_booke_mmu.o
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 5efe8c9..2f0d1b0 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -44,6 +44,8 @@
 #include <asm/siginfo.h>
 #include <mm/mmu_decl.h>
 
+#include "icswx.h"
+
 #ifdef CONFIG_KPROBES
 static inline int notify_page_fault(struct pt_regs *regs)
 {
@@ -143,6 +145,21 @@
 	is_write = error_code & ESR_DST;
 #endif /* CONFIG_4xx || CONFIG_BOOKE */
 
+#ifdef CONFIG_PPC_ICSWX
+	/*
+	 * we need to do this early because this "data storage
+	 * interrupt" does not update the DAR/DEAR so we don't want to
+	 * look at it
+	 */
+	if (error_code & ICSWX_DSI_UCT) {
+		int ret;
+
+		ret = acop_handle_fault(regs, address, error_code);
+		if (ret)
+			return ret;
+	}
+#endif
+
 	if (notify_page_fault(regs))
 		return 0;
 
diff --git a/arch/powerpc/mm/hugetlbpage-book3e.c b/arch/powerpc/mm/hugetlbpage-book3e.c
index 343ad0b..3bc7006 100644
--- a/arch/powerpc/mm/hugetlbpage-book3e.c
+++ b/arch/powerpc/mm/hugetlbpage-book3e.c
@@ -37,31 +37,32 @@
 	return found;
 }
 
-void book3e_hugetlb_preload(struct mm_struct *mm, unsigned long ea, pte_t pte)
+void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
+			    pte_t pte)
 {
 	unsigned long mas1, mas2;
 	u64 mas7_3;
 	unsigned long psize, tsize, shift;
 	unsigned long flags;
+	struct mm_struct *mm;
 
 #ifdef CONFIG_PPC_FSL_BOOK3E
-	int index, lz, ncams;
-	struct vm_area_struct *vma;
+	int index, ncams;
 #endif
 
 	if (unlikely(is_kernel_addr(ea)))
 		return;
 
+	mm = vma->vm_mm;
+
 #ifdef CONFIG_PPC_MM_SLICES
-	psize = mmu_get_tsize(get_slice_psize(mm, ea));
-	tsize = mmu_get_psize(psize);
+	psize = get_slice_psize(mm, ea);
+	tsize = mmu_get_tsize(psize);
 	shift = mmu_psize_defs[psize].shift;
 #else
-	vma = find_vma(mm, ea);
-	psize = vma_mmu_pagesize(vma);	/* returns actual size in bytes */
-	asm (PPC_CNTLZL "%0,%1" : "=r" (lz) : "r" (psize));
-	shift = 31 - lz;
-	tsize = 21 - lz;
+	psize = vma_mmu_pagesize(vma);
+	shift = __ilog2(psize);
+	tsize = shift - 10;
 #endif
 
 	/*
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 8558b57..a8b3cc7 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -29,22 +29,22 @@
 
 /*
  * Tracks gpages after the device tree is scanned and before the
- * huge_boot_pages list is ready.  On 64-bit implementations, this is
- * just used to track 16G pages and so is a single array.  32-bit
- * implementations may have more than one gpage size due to limitations
- * of the memory allocators, so we need multiple arrays
+ * huge_boot_pages list is ready.  On non-Freescale implementations, this is
+ * just used to track 16G pages and so is a single array.  FSL-based
+ * implementations may have more than one gpage size, so we need multiple
+ * arrays
  */
-#ifdef CONFIG_PPC64
-#define MAX_NUMBER_GPAGES	1024
-static u64 gpage_freearray[MAX_NUMBER_GPAGES];
-static unsigned nr_gpages;
-#else
+#ifdef CONFIG_PPC_FSL_BOOK3E
 #define MAX_NUMBER_GPAGES	128
 struct psize_gpages {
 	u64 gpage_list[MAX_NUMBER_GPAGES];
 	unsigned int nr_gpages;
 };
 static struct psize_gpages gpage_freearray[MMU_PAGE_COUNT];
+#else
+#define MAX_NUMBER_GPAGES	1024
+static u64 gpage_freearray[MAX_NUMBER_GPAGES];
+static unsigned nr_gpages;
 #endif
 
 static inline int shift_to_mmu_psize(unsigned int shift)
@@ -115,12 +115,12 @@
 	struct kmem_cache *cachep;
 	pte_t *new;
 
-#ifdef CONFIG_PPC64
-	cachep = PGT_CACHE(pdshift - pshift);
-#else
+#ifdef CONFIG_PPC_FSL_BOOK3E
 	int i;
 	int num_hugepd = 1 << (pshift - pdshift);
 	cachep = hugepte_cache;
+#else
+	cachep = PGT_CACHE(pdshift - pshift);
 #endif
 
 	new = kmem_cache_zalloc(cachep, GFP_KERNEL|__GFP_REPEAT);
@@ -132,12 +132,7 @@
 		return -ENOMEM;
 
 	spin_lock(&mm->page_table_lock);
-#ifdef CONFIG_PPC64
-	if (!hugepd_none(*hpdp))
-		kmem_cache_free(cachep, new);
-	else
-		hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift;
-#else
+#ifdef CONFIG_PPC_FSL_BOOK3E
 	/*
 	 * We have multiple higher-level entries that point to the same
 	 * actual pte location.  Fill in each as we go and backtrack on error.
@@ -156,11 +151,28 @@
 			hpdp->pd = 0;
 		kmem_cache_free(cachep, new);
 	}
+#else
+	if (!hugepd_none(*hpdp))
+		kmem_cache_free(cachep, new);
+	else
+		hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift;
 #endif
 	spin_unlock(&mm->page_table_lock);
 	return 0;
 }
 
+/*
+ * These macros define how to determine which level of the page table holds
+ * the hpdp.
+ */
+#ifdef CONFIG_PPC_FSL_BOOK3E
+#define HUGEPD_PGD_SHIFT PGDIR_SHIFT
+#define HUGEPD_PUD_SHIFT PUD_SHIFT
+#else
+#define HUGEPD_PGD_SHIFT PUD_SHIFT
+#define HUGEPD_PUD_SHIFT PMD_SHIFT
+#endif
+
 pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
 {
 	pgd_t *pg;
@@ -173,12 +185,13 @@
 	addr &= ~(sz-1);
 
 	pg = pgd_offset(mm, addr);
-	if (pshift >= PUD_SHIFT) {
+
+	if (pshift >= HUGEPD_PGD_SHIFT) {
 		hpdp = (hugepd_t *)pg;
 	} else {
 		pdshift = PUD_SHIFT;
 		pu = pud_alloc(mm, pg, addr);
-		if (pshift >= PMD_SHIFT) {
+		if (pshift >= HUGEPD_PUD_SHIFT) {
 			hpdp = (hugepd_t *)pu;
 		} else {
 			pdshift = PMD_SHIFT;
@@ -198,7 +211,7 @@
 	return hugepte_offset(hpdp, addr, pdshift);
 }
 
-#ifdef CONFIG_PPC32
+#ifdef CONFIG_PPC_FSL_BOOK3E
 /* Build list of addresses of gigantic pages.  This function is used in early
  * boot before the buddy or bootmem allocator is setup.
  */
@@ -318,7 +331,7 @@
 	}
 }
 
-#else /* PPC64 */
+#else /* !PPC_FSL_BOOK3E */
 
 /* Build list of addresses of gigantic pages.  This function is used in early
  * boot before the buddy or bootmem allocator is setup.
@@ -356,7 +369,7 @@
 	return 0;
 }
 
-#ifdef CONFIG_PPC32
+#ifdef CONFIG_PPC_FSL_BOOK3E
 #define HUGEPD_FREELIST_SIZE \
 	((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t))
 
@@ -416,11 +429,11 @@
 	unsigned long pdmask = ~((1UL << pdshift) - 1);
 	unsigned int num_hugepd = 1;
 
-#ifdef CONFIG_PPC64
-	unsigned int shift = hugepd_shift(*hpdp);
-#else
-	/* Note: On 32-bit the hpdp may be the first of several */
+#ifdef CONFIG_PPC_FSL_BOOK3E
+	/* Note: On fsl the hpdp may be the first of several */
 	num_hugepd = (1 << (hugepd_shift(*hpdp) - pdshift));
+#else
+	unsigned int shift = hugepd_shift(*hpdp);
 #endif
 
 	start &= pdmask;
@@ -438,10 +451,11 @@
 		hpdp->pd = 0;
 
 	tlb->need_flush = 1;
-#ifdef CONFIG_PPC64
-	pgtable_free_tlb(tlb, hugepte, pdshift - shift);
-#else
+
+#ifdef CONFIG_PPC_FSL_BOOK3E
 	hugepd_free(tlb, hugepte);
+#else
+	pgtable_free_tlb(tlb, hugepte, pdshift - shift);
 #endif
 }
 
@@ -454,14 +468,23 @@
 	unsigned long start;
 
 	start = addr;
-	pmd = pmd_offset(pud, addr);
 	do {
+		pmd = pmd_offset(pud, addr);
 		next = pmd_addr_end(addr, end);
 		if (pmd_none(*pmd))
 			continue;
+#ifdef CONFIG_PPC_FSL_BOOK3E
+		/*
+		 * Increment next by the size of the huge mapping since
+		 * there may be more than one entry at this level for a
+		 * single hugepage, but all of them point to
+		 * the same kmem cache that holds the hugepte.
+		 */
+		next = addr + (1 << hugepd_shift(*(hugepd_t *)pmd));
+#endif
 		free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT,
 				  addr, next, floor, ceiling);
-	} while (pmd++, addr = next, addr != end);
+	} while (addr = next, addr != end);
 
 	start &= PUD_MASK;
 	if (start < floor)
@@ -488,8 +511,8 @@
 	unsigned long start;
 
 	start = addr;
-	pud = pud_offset(pgd, addr);
 	do {
+		pud = pud_offset(pgd, addr);
 		next = pud_addr_end(addr, end);
 		if (!is_hugepd(pud)) {
 			if (pud_none_or_clear_bad(pud))
@@ -497,10 +520,19 @@
 			hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
 					       ceiling);
 		} else {
+#ifdef CONFIG_PPC_FSL_BOOK3E
+			/*
+			 * Increment next by the size of the huge mapping since
+			 * there may be more than one entry at this level for a
+			 * single hugepage, but all of them point to
+			 * the same kmem cache that holds the hugepte.
+			 */
+			next = addr + (1 << hugepd_shift(*(hugepd_t *)pud));
+#endif
 			free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT,
 					  addr, next, floor, ceiling);
 		}
-	} while (pud++, addr = next, addr != end);
+	} while (addr = next, addr != end);
 
 	start &= PGDIR_MASK;
 	if (start < floor)
@@ -555,12 +587,12 @@
 				continue;
 			hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
 		} else {
-#ifdef CONFIG_PPC32
+#ifdef CONFIG_PPC_FSL_BOOK3E
 			/*
 			 * Increment next by the size of the huge mapping since
-			 * on 32-bit there may be more than one entry at the pgd
-			 * level for a single hugepage, but all of them point to
-			 * the same kmem cache that holds the hugepte.
+			 * there may be more than one entry at the pgd level
+			 * for a single hugepage, but all of them point to the
+			 * same kmem cache that holds the hugepte.
 			 */
 			next = addr + (1 << hugepd_shift(*(hugepd_t *)pgd));
 #endif
@@ -698,19 +730,17 @@
 	return 1;
 }
 
+#ifdef CONFIG_PPC_MM_SLICES
 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
 					unsigned long len, unsigned long pgoff,
 					unsigned long flags)
 {
-#ifdef CONFIG_PPC_MM_SLICES
 	struct hstate *hstate = hstate_file(file);
 	int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
 
 	return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1, 0);
-#else
-	return get_unmapped_area(file, addr, len, pgoff, flags);
-#endif
 }
+#endif
 
 unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
 {
@@ -784,7 +814,7 @@
 }
 __setup("hugepagesz=", hugepage_setup_sz);
 
-#ifdef CONFIG_FSL_BOOKE
+#ifdef CONFIG_PPC_FSL_BOOK3E
 struct kmem_cache *hugepte_cache;
 static int __init hugetlbpage_init(void)
 {
diff --git a/arch/powerpc/mm/icswx.c b/arch/powerpc/mm/icswx.c
new file mode 100644
index 0000000..5d9a59e
--- /dev/null
+++ b/arch/powerpc/mm/icswx.c
@@ -0,0 +1,273 @@
+/*
+ *  ICSWX and ACOP Management
+ *
+ *  Copyright (C) 2011 Anton Blanchard, IBM Corp. <anton@samba.org>
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/spinlock.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+
+#include "icswx.h"
+
+/*
+ * The processor and its L2 cache cause the icswx instruction to
+ * generate a COP_REQ transaction on PowerBus. The transaction has no
+ * address, and the processor does not perform an MMU access to
+ * authenticate the transaction. The command portion of the PowerBus
+ * COP_REQ transaction includes the LPAR_ID (LPID) and the coprocessor
+ * Process ID (PID), which the coprocessor compares to the authorized
+ * LPID and PID held in the coprocessor, to determine if the process
+ * is authorized to generate the transaction.  The data of the COP_REQ
+ * transaction is 128-byte or less in size and is placed in cacheable
+ * memory on a 128-byte cache line boundary.
+ *
+ * The task to use a coprocessor should use use_cop() to mark the use
+ * of the Coprocessor Type (CT) and context switching. On a server
+ * class processor, the PID register is used only for coprocessor
+ * management + * and so a coprocessor PID is allocated before
+ * executing icswx + * instruction. Drop_cop() is used to free the
+ * coprocessor PID.
+ *
+ * Example:
+ * Host Fabric Interface (HFI) is a PowerPC network coprocessor.
+ * Each HFI have multiple windows. Each HFI window serves as a
+ * network device sending to and receiving from HFI network.
+ * HFI immediate send function uses icswx instruction. The immediate
+ * send function allows small (single cache-line) packets be sent
+ * without using the regular HFI send FIFO and doorbell, which are
+ * much slower than immediate send.
+ *
+ * For each task intending to use HFI immediate send, the HFI driver
+ * calls use_cop() to obtain a coprocessor PID for the task.
+ * The HFI driver then allocate a free HFI window and save the
+ * coprocessor PID to the HFI window to allow the task to use the
+ * HFI window.
+ *
+ * The HFI driver repeatedly creates immediate send packets and
+ * issues icswx instruction to send data through the HFI window.
+ * The HFI compares the coprocessor PID in the CPU PID register
+ * to the PID held in the HFI window to determine if the transaction
+ * is allowed.
+ *
+ * When the task to release the HFI window, the HFI driver calls
+ * drop_cop() to release the coprocessor PID.
+ */
+
+void switch_cop(struct mm_struct *next)
+{
+#ifdef CONFIG_ICSWX_PID
+	mtspr(SPRN_PID, next->context.cop_pid);
+#endif
+	mtspr(SPRN_ACOP, next->context.acop);
+}
+
+/**
+ * Start using a coprocessor.
+ * @acop: mask of coprocessor to be used.
+ * @mm: The mm the coprocessor to associate with. Most likely current mm.
+ *
+ * Return a positive PID if successful. Negative errno otherwise.
+ * The returned PID will be fed to the coprocessor to determine if an
+ * icswx transaction is authenticated.
+ */
+int use_cop(unsigned long acop, struct mm_struct *mm)
+{
+	int ret;
+
+	if (!cpu_has_feature(CPU_FTR_ICSWX))
+		return -ENODEV;
+
+	if (!mm || !acop)
+		return -EINVAL;
+
+	/* The page_table_lock ensures mm_users won't change under us */
+	spin_lock(&mm->page_table_lock);
+	spin_lock(mm->context.cop_lockp);
+
+	ret = get_cop_pid(mm);
+	if (ret < 0)
+		goto out;
+
+	/* update acop */
+	mm->context.acop |= acop;
+
+	sync_cop(mm);
+
+	/*
+	 * If this is a threaded process then there might be other threads
+	 * running. We need to send an IPI to force them to pick up any
+	 * change in PID and ACOP.
+	 */
+	if (atomic_read(&mm->mm_users) > 1)
+		smp_call_function(sync_cop, mm, 1);
+
+out:
+	spin_unlock(mm->context.cop_lockp);
+	spin_unlock(&mm->page_table_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(use_cop);
+
+/**
+ * Stop using a coprocessor.
+ * @acop: mask of coprocessor to be stopped.
+ * @mm: The mm the coprocessor associated with.
+ */
+void drop_cop(unsigned long acop, struct mm_struct *mm)
+{
+	int free_pid;
+
+	if (!cpu_has_feature(CPU_FTR_ICSWX))
+		return;
+
+	if (WARN_ON_ONCE(!mm))
+		return;
+
+	/* The page_table_lock ensures mm_users won't change under us */
+	spin_lock(&mm->page_table_lock);
+	spin_lock(mm->context.cop_lockp);
+
+	mm->context.acop &= ~acop;
+
+	free_pid = disable_cop_pid(mm);
+	sync_cop(mm);
+
+	/*
+	 * If this is a threaded process then there might be other threads
+	 * running. We need to send an IPI to force them to pick up any
+	 * change in PID and ACOP.
+	 */
+	if (atomic_read(&mm->mm_users) > 1)
+		smp_call_function(sync_cop, mm, 1);
+
+	if (free_pid != COP_PID_NONE)
+		free_cop_pid(free_pid);
+
+	spin_unlock(mm->context.cop_lockp);
+	spin_unlock(&mm->page_table_lock);
+}
+EXPORT_SYMBOL_GPL(drop_cop);
+
+static int acop_use_cop(int ct)
+{
+	/* todo */
+	return -1;
+}
+
+/*
+ * Get the instruction word at the NIP
+ */
+static u32 acop_get_inst(struct pt_regs *regs)
+{
+	u32 inst;
+	u32 __user *p;
+
+	p = (u32 __user *)regs->nip;
+	if (!access_ok(VERIFY_READ, p, sizeof(*p)))
+		return 0;
+
+	if (__get_user(inst, p))
+		return 0;
+
+	return inst;
+}
+
+/**
+ * @regs: regsiters at time of interrupt
+ * @address: storage address
+ * @error_code: Fault code, usually the DSISR or ESR depending on
+ *		processor type
+ *
+ * Return 0 if we are able to resolve the data storage fault that
+ * results from a CT miss in the ACOP register.
+ */
+int acop_handle_fault(struct pt_regs *regs, unsigned long address,
+		      unsigned long error_code)
+{
+	int ct;
+	u32 inst = 0;
+
+	if (!cpu_has_feature(CPU_FTR_ICSWX)) {
+		pr_info("No coprocessors available");
+		_exception(SIGILL, regs, ILL_ILLOPN, address);
+	}
+
+	if (!user_mode(regs)) {
+		/* this could happen if the HV denies the
+		 * kernel access, for now we just die */
+		die("ICSWX from kernel failed", regs, SIGSEGV);
+	}
+
+	/* Some implementations leave us a hint for the CT */
+	ct = ICSWX_GET_CT_HINT(error_code);
+	if (ct < 0) {
+		/* we have to peek at the instruction word to figure out CT */
+		u32 ccw;
+		u32 rs;
+
+		inst = acop_get_inst(regs);
+		if (inst == 0)
+			return -1;
+
+		rs = (inst >> (31 - 10)) & 0x1f;
+		ccw = regs->gpr[rs];
+		ct = (ccw >> 16) & 0x3f;
+	}
+
+	if (!acop_use_cop(ct))
+		return 0;
+
+	/* at this point the CT is unknown to the system */
+	pr_warn("%s[%d]: Coprocessor %d is unavailable",
+		current->comm, current->pid, ct);
+
+	/* get inst if we don't already have it */
+	if (inst == 0) {
+		inst = acop_get_inst(regs);
+		if (inst == 0)
+			return -1;
+	}
+
+	/* Check if the instruction is the "record form" */
+	if (inst & 1) {
+		/*
+		 * the instruction is "record" form so we can reject
+		 * using CR0
+		 */
+		regs->ccr &= ~(0xful << 28);
+		regs->ccr |= ICSWX_RC_NOT_FOUND << 28;
+
+		/* Move on to the next instruction */
+		regs->nip += 4;
+	} else {
+		/*
+		 * There is no architected mechanism to report a bad
+		 * CT so we could either SIGILL or report nothing.
+		 * Since the non-record version should only bu used
+		 * for "hints" or "don't care" we should probably do
+		 * nothing.  However, I could see how some people
+		 * might want an SIGILL so it here if you want it.
+		 */
+#ifdef CONFIG_PPC_ICSWX_USE_SIGILL
+		_exception(SIGILL, regs, ILL_ILLOPN, address);
+#else
+		regs->nip += 4;
+#endif
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(acop_handle_fault);
diff --git a/arch/powerpc/mm/icswx.h b/arch/powerpc/mm/icswx.h
new file mode 100644
index 0000000..42176bd
--- /dev/null
+++ b/arch/powerpc/mm/icswx.h
@@ -0,0 +1,62 @@
+#ifndef _ARCH_POWERPC_MM_ICSWX_H_
+#define _ARCH_POWERPC_MM_ICSWX_H_
+
+/*
+ *  ICSWX and ACOP Management
+ *
+ *  Copyright (C) 2011 Anton Blanchard, IBM Corp. <anton@samba.org>
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <asm/mmu_context.h>
+
+/* also used to denote that PIDs are not used */
+#define COP_PID_NONE 0
+
+static inline void sync_cop(void *arg)
+{
+	struct mm_struct *mm = arg;
+
+	if (mm == current->active_mm)
+		switch_cop(current->active_mm);
+}
+
+#ifdef CONFIG_PPC_ICSWX_PID
+extern int get_cop_pid(struct mm_struct *mm);
+extern int disable_cop_pid(struct mm_struct *mm);
+extern void free_cop_pid(int free_pid);
+#else
+#define get_cop_pid(m) (COP_PID_NONE)
+#define disable_cop_pid(m) (COP_PID_NONE)
+#define free_cop_pid(p)
+#endif
+
+/*
+ * These are implementation bits for architected registers.  If this
+ * ever becomes architecture the should be moved to reg.h et. al.
+ */
+/* UCT is the same bit for Server and Embedded */
+#define ICSWX_DSI_UCT		0x00004000  /* Unavailable Coprocessor Type */
+
+#ifdef CONFIG_PPC_BOOK3E
+/* Embedded implementation gives us no hints as to what the CT is */
+#define ICSWX_GET_CT_HINT(x) (-1)
+#else
+/* Server implementation contains the CT value in the DSISR */
+#define ICSWX_DSISR_CTMASK	0x00003f00
+#define ICSWX_GET_CT_HINT(x)	(((x) & ICSWX_DSISR_CTMASK) >> 8)
+#endif
+
+#define ICSWX_RC_STARTED	0x8	/* The request has been started */
+#define ICSWX_RC_NOT_IDLE	0x4	/* No coprocessor found idle */
+#define ICSWX_RC_NOT_FOUND	0x2	/* No coprocessor found */
+#define ICSWX_RC_UNDEFINED	0x1	/* Reserved */
+
+extern int acop_handle_fault(struct pt_regs *regs, unsigned long address,
+			     unsigned long error_code);
+#endif /* !_ARCH_POWERPC_MM_ICSWX_H_ */
diff --git a/arch/powerpc/mm/icswx_pid.c b/arch/powerpc/mm/icswx_pid.c
new file mode 100644
index 0000000..91e30eb
--- /dev/null
+++ b/arch/powerpc/mm/icswx_pid.c
@@ -0,0 +1,87 @@
+/*
+ *  ICSWX and ACOP/PID Management
+ *
+ *  Copyright (C) 2011 Anton Blanchard, IBM Corp. <anton@samba.org>
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/spinlock.h>
+#include <linux/idr.h>
+#include <linux/module.h>
+#include "icswx.h"
+
+#define COP_PID_MIN (COP_PID_NONE + 1)
+#define COP_PID_MAX (0xFFFF)
+
+static DEFINE_SPINLOCK(mmu_context_acop_lock);
+static DEFINE_IDA(cop_ida);
+
+static int new_cop_pid(struct ida *ida, int min_id, int max_id,
+		       spinlock_t *lock)
+{
+	int index;
+	int err;
+
+again:
+	if (!ida_pre_get(ida, GFP_KERNEL))
+		return -ENOMEM;
+
+	spin_lock(lock);
+	err = ida_get_new_above(ida, min_id, &index);
+	spin_unlock(lock);
+
+	if (err == -EAGAIN)
+		goto again;
+	else if (err)
+		return err;
+
+	if (index > max_id) {
+		spin_lock(lock);
+		ida_remove(ida, index);
+		spin_unlock(lock);
+		return -ENOMEM;
+	}
+
+	return index;
+}
+
+int get_cop_pid(struct mm_struct *mm)
+{
+	int pid;
+
+	if (mm->context.cop_pid == COP_PID_NONE) {
+		pid = new_cop_pid(&cop_ida, COP_PID_MIN, COP_PID_MAX,
+				  &mmu_context_acop_lock);
+		if (pid >= 0)
+			mm->context.cop_pid = pid;
+	}
+	return mm->context.cop_pid;
+}
+
+int disable_cop_pid(struct mm_struct *mm)
+{
+	int free_pid = COP_PID_NONE;
+
+	if ((!mm->context.acop) && (mm->context.cop_pid != COP_PID_NONE)) {
+		free_pid = mm->context.cop_pid;
+		mm->context.cop_pid = COP_PID_NONE;
+	}
+	return free_pid;
+}
+
+void free_cop_pid(int free_pid)
+{
+	spin_lock(&mmu_context_acop_lock);
+	ida_remove(&cop_ida, free_pid);
+	spin_unlock(&mmu_context_acop_lock);
+}
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index 161cefd..6157be2 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -65,6 +65,13 @@
 EXPORT_SYMBOL(memstart_addr);
 phys_addr_t kernstart_addr;
 EXPORT_SYMBOL(kernstart_addr);
+
+#ifdef CONFIG_RELOCATABLE_PPC32
+/* Used in __va()/__pa() */
+long long virt_phys_offset;
+EXPORT_SYMBOL(virt_phys_offset);
+#endif
+
 phys_addr_t lowmem_end_addr;
 
 int boot_mapsize;
@@ -134,8 +141,7 @@
 
 	if (memblock.memory.cnt > 1) {
 #ifndef CONFIG_WII
-		memblock.memory.cnt = 1;
-		memblock_analyze();
+		memblock_enforce_memory_limit(memblock.memory.regions[0].size);
 		printk(KERN_WARNING "Only using first contiguous memory region");
 #else
 		wii_memory_fixups();
@@ -158,7 +164,6 @@
 #ifndef CONFIG_HIGHMEM
 		total_memory = total_lowmem;
 		memblock_enforce_memory_limit(total_lowmem);
-		memblock_analyze();
 #endif /* CONFIG_HIGHMEM */
 	}
 
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 2dd6bdd..d974b79 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -51,6 +51,7 @@
 #include <asm/vdso.h>
 #include <asm/fixmap.h>
 #include <asm/swiotlb.h>
+#include <asm/rtas.h>
 
 #include "mmu_decl.h"
 
@@ -199,7 +200,7 @@
 		unsigned long start_pfn, end_pfn;
 		start_pfn = memblock_region_memory_base_pfn(reg);
 		end_pfn = memblock_region_memory_end_pfn(reg);
-		add_active_range(0, start_pfn, end_pfn);
+		memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0);
 	}
 
 	/* Add all physical memory to the bootmem map, mark each area
@@ -553,7 +554,7 @@
 #if (defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_FSL_BOOK3E)) \
 	&& defined(CONFIG_HUGETLB_PAGE)
 	if (is_vm_hugetlb_page(vma))
-		book3e_hugetlb_preload(vma->vm_mm, address, *ptep);
+		book3e_hugetlb_preload(vma, address, *ptep);
 #endif
 }
 
@@ -585,3 +586,23 @@
 	return 0;
 }
 subsys_initcall(add_system_ram_resources);
+
+#ifdef CONFIG_STRICT_DEVMEM
+/*
+ * devmem_is_allowed(): check to see if /dev/mem access to a certain address
+ * is valid. The argument is a physical page number.
+ *
+ * Access has to be given to non-kernel-ram areas as well, these contain the
+ * PCI mmio resources as well as potential bios/acpi data regions.
+ */
+int devmem_is_allowed(unsigned long pfn)
+{
+	if (iomem_is_exclusive(pfn << PAGE_SHIFT))
+		return 0;
+	if (!page_is_ram(pfn))
+		return 1;
+	if (page_is_rtas_user_buf(pfn))
+		return 1;
+	return 0;
+}
+#endif /* CONFIG_STRICT_DEVMEM */
diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
index 5a783d8..67a42ed 100644
--- a/arch/powerpc/mm/mmap_64.c
+++ b/arch/powerpc/mm/mmap_64.c
@@ -53,14 +53,6 @@
 	return sysctl_legacy_va_layout;
 }
 
-/*
- * Since get_random_int() returns the same value within a 1 jiffy window,
- * we will almost always get the same randomisation for the stack and mmap
- * region. This will mean the relative distance between stack and mmap will
- * be the same.
- *
- * To avoid this we can shift the randomness by 1 bit.
- */
 static unsigned long mmap_rnd(void)
 {
 	unsigned long rnd = 0;
@@ -68,11 +60,11 @@
 	if (current->flags & PF_RANDOMIZE) {
 		/* 8MB for 32bit, 1GB for 64bit */
 		if (is_32bit_task())
-			rnd = (long)(get_random_int() % (1<<(22-PAGE_SHIFT)));
+			rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
 		else
-			rnd = (long)(get_random_int() % (1<<(29-PAGE_SHIFT)));
+			rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
 	}
-	return (rnd << PAGE_SHIFT) * 2;
+	return rnd << PAGE_SHIFT;
 }
 
 static inline unsigned long mmap_base(void)
diff --git a/arch/powerpc/mm/mmu_context_hash64.c b/arch/powerpc/mm/mmu_context_hash64.c
index ca988a3..40677aa 100644
--- a/arch/powerpc/mm/mmu_context_hash64.c
+++ b/arch/powerpc/mm/mmu_context_hash64.c
@@ -24,200 +24,7 @@
 
 #include <asm/mmu_context.h>
 
-#ifdef CONFIG_PPC_ICSWX
-/*
- * The processor and its L2 cache cause the icswx instruction to
- * generate a COP_REQ transaction on PowerBus. The transaction has
- * no address, and the processor does not perform an MMU access
- * to authenticate the transaction. The command portion of the
- * PowerBus COP_REQ transaction includes the LPAR_ID (LPID) and
- * the coprocessor Process ID (PID), which the coprocessor compares
- * to the authorized LPID and PID held in the coprocessor, to determine
- * if the process is authorized to generate the transaction.
- * The data of the COP_REQ transaction is 128-byte or less and is
- * placed in cacheable memory on a 128-byte cache line boundary.
- *
- * The task to use a coprocessor should use use_cop() to allocate
- * a coprocessor PID before executing icswx instruction. use_cop()
- * also enables the coprocessor context switching. Drop_cop() is
- * used to free the coprocessor PID.
- *
- * Example:
- * Host Fabric Interface (HFI) is a PowerPC network coprocessor.
- * Each HFI have multiple windows. Each HFI window serves as a
- * network device sending to and receiving from HFI network.
- * HFI immediate send function uses icswx instruction. The immediate
- * send function allows small (single cache-line) packets be sent
- * without using the regular HFI send FIFO and doorbell, which are
- * much slower than immediate send.
- *
- * For each task intending to use HFI immediate send, the HFI driver
- * calls use_cop() to obtain a coprocessor PID for the task.
- * The HFI driver then allocate a free HFI window and save the
- * coprocessor PID to the HFI window to allow the task to use the
- * HFI window.
- *
- * The HFI driver repeatedly creates immediate send packets and
- * issues icswx instruction to send data through the HFI window.
- * The HFI compares the coprocessor PID in the CPU PID register
- * to the PID held in the HFI window to determine if the transaction
- * is allowed.
- *
- * When the task to release the HFI window, the HFI driver calls
- * drop_cop() to release the coprocessor PID.
- */
-
-#define COP_PID_NONE 0
-#define COP_PID_MIN (COP_PID_NONE + 1)
-#define COP_PID_MAX (0xFFFF)
-
-static DEFINE_SPINLOCK(mmu_context_acop_lock);
-static DEFINE_IDA(cop_ida);
-
-void switch_cop(struct mm_struct *next)
-{
-	mtspr(SPRN_PID, next->context.cop_pid);
-	mtspr(SPRN_ACOP, next->context.acop);
-}
-
-static int new_cop_pid(struct ida *ida, int min_id, int max_id,
-		       spinlock_t *lock)
-{
-	int index;
-	int err;
-
-again:
-	if (!ida_pre_get(ida, GFP_KERNEL))
-		return -ENOMEM;
-
-	spin_lock(lock);
-	err = ida_get_new_above(ida, min_id, &index);
-	spin_unlock(lock);
-
-	if (err == -EAGAIN)
-		goto again;
-	else if (err)
-		return err;
-
-	if (index > max_id) {
-		spin_lock(lock);
-		ida_remove(ida, index);
-		spin_unlock(lock);
-		return -ENOMEM;
-	}
-
-	return index;
-}
-
-static void sync_cop(void *arg)
-{
-	struct mm_struct *mm = arg;
-
-	if (mm == current->active_mm)
-		switch_cop(current->active_mm);
-}
-
-/**
- * Start using a coprocessor.
- * @acop: mask of coprocessor to be used.
- * @mm: The mm the coprocessor to associate with. Most likely current mm.
- *
- * Return a positive PID if successful. Negative errno otherwise.
- * The returned PID will be fed to the coprocessor to determine if an
- * icswx transaction is authenticated.
- */
-int use_cop(unsigned long acop, struct mm_struct *mm)
-{
-	int ret;
-
-	if (!cpu_has_feature(CPU_FTR_ICSWX))
-		return -ENODEV;
-
-	if (!mm || !acop)
-		return -EINVAL;
-
-	/* The page_table_lock ensures mm_users won't change under us */
-	spin_lock(&mm->page_table_lock);
-	spin_lock(mm->context.cop_lockp);
-
-	if (mm->context.cop_pid == COP_PID_NONE) {
-		ret = new_cop_pid(&cop_ida, COP_PID_MIN, COP_PID_MAX,
-				  &mmu_context_acop_lock);
-		if (ret < 0)
-			goto out;
-
-		mm->context.cop_pid = ret;
-	}
-	mm->context.acop |= acop;
-
-	sync_cop(mm);
-
-	/*
-	 * If this is a threaded process then there might be other threads
-	 * running. We need to send an IPI to force them to pick up any
-	 * change in PID and ACOP.
-	 */
-	if (atomic_read(&mm->mm_users) > 1)
-		smp_call_function(sync_cop, mm, 1);
-
-	ret = mm->context.cop_pid;
-
-out:
-	spin_unlock(mm->context.cop_lockp);
-	spin_unlock(&mm->page_table_lock);
-
-	return ret;
-}
-EXPORT_SYMBOL_GPL(use_cop);
-
-/**
- * Stop using a coprocessor.
- * @acop: mask of coprocessor to be stopped.
- * @mm: The mm the coprocessor associated with.
- */
-void drop_cop(unsigned long acop, struct mm_struct *mm)
-{
-	int free_pid = COP_PID_NONE;
-
-	if (!cpu_has_feature(CPU_FTR_ICSWX))
-		return;
-
-	if (WARN_ON_ONCE(!mm))
-		return;
-
-	/* The page_table_lock ensures mm_users won't change under us */
-	spin_lock(&mm->page_table_lock);
-	spin_lock(mm->context.cop_lockp);
-
-	mm->context.acop &= ~acop;
-
-	if ((!mm->context.acop) && (mm->context.cop_pid != COP_PID_NONE)) {
-		free_pid = mm->context.cop_pid;
-		mm->context.cop_pid = COP_PID_NONE;
-	}
-
-	sync_cop(mm);
-
-	/*
-	 * If this is a threaded process then there might be other threads
-	 * running. We need to send an IPI to force them to pick up any
-	 * change in PID and ACOP.
-	 */
-	if (atomic_read(&mm->mm_users) > 1)
-		smp_call_function(sync_cop, mm, 1);
-
-	if (free_pid != COP_PID_NONE) {
-		spin_lock(&mmu_context_acop_lock);
-		ida_remove(&cop_ida, free_pid);
-		spin_unlock(&mmu_context_acop_lock);
-	}
-
-	spin_unlock(mm->context.cop_lockp);
-	spin_unlock(&mm->page_table_lock);
-}
-EXPORT_SYMBOL_GPL(drop_cop);
-
-#endif /* CONFIG_PPC_ICSWX */
+#include "icswx.h"
 
 static DEFINE_SPINLOCK(mmu_context_lock);
 static DEFINE_IDA(mmu_context_ida);
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index b22a83a..4ff3d8e 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -127,45 +127,25 @@
 }
 
 /*
- * get_active_region_work_fn - A helper function for get_node_active_region
- *	Returns datax set to the start_pfn and end_pfn if they contain
- *	the initial value of datax->start_pfn between them
- * @start_pfn: start page(inclusive) of region to check
- * @end_pfn: end page(exclusive) of region to check
- * @datax: comes in with ->start_pfn set to value to search for and
- *	goes out with active range if it contains it
- * Returns 1 if search value is in range else 0
- */
-static int __init get_active_region_work_fn(unsigned long start_pfn,
-					unsigned long end_pfn, void *datax)
-{
-	struct node_active_region *data;
-	data = (struct node_active_region *)datax;
-
-	if (start_pfn <= data->start_pfn && end_pfn > data->start_pfn) {
-		data->start_pfn = start_pfn;
-		data->end_pfn = end_pfn;
-		return 1;
-	}
-	return 0;
-
-}
-
-/*
- * get_node_active_region - Return active region containing start_pfn
+ * get_node_active_region - Return active region containing pfn
  * Active range returned is empty if none found.
- * @start_pfn: The page to return the region for.
- * @node_ar: Returned set to the active region containing start_pfn
+ * @pfn: The page to return the region for
+ * @node_ar: Returned set to the active region containing @pfn
  */
-static void __init get_node_active_region(unsigned long start_pfn,
-		       struct node_active_region *node_ar)
+static void __init get_node_active_region(unsigned long pfn,
+					  struct node_active_region *node_ar)
 {
-	int nid = early_pfn_to_nid(start_pfn);
+	unsigned long start_pfn, end_pfn;
+	int i, nid;
 
-	node_ar->nid = nid;
-	node_ar->start_pfn = start_pfn;
-	node_ar->end_pfn = start_pfn;
-	work_with_active_regions(nid, get_active_region_work_fn, node_ar);
+	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
+		if (pfn >= start_pfn && pfn < end_pfn) {
+			node_ar->nid = nid;
+			node_ar->start_pfn = start_pfn;
+			node_ar->end_pfn = end_pfn;
+			break;
+		}
+	}
 }
 
 static void map_cpu_to_node(int cpu, int node)
@@ -406,7 +386,7 @@
 	of_node_put(memory);
 }
 
-static unsigned long __devinit read_n_cells(int n, const unsigned int **buf)
+static unsigned long read_n_cells(int n, const unsigned int **buf)
 {
 	unsigned long result = 0;
 
@@ -521,7 +501,7 @@
 	aa->n_arrays = *prop++;
 	aa->array_sz = *prop++;
 
-	/* Now that we know the number of arrrays and size of each array,
+	/* Now that we know the number of arrays and size of each array,
 	 * revalidate the size of the property read in.
 	 */
 	if (len < (aa->n_arrays * aa->array_sz + 2) * sizeof(unsigned int))
@@ -710,9 +690,7 @@
 			node_set_online(nid);
 			sz = numa_enforce_memory_limit(base, size);
 			if (sz)
-				add_active_range(nid, base >> PAGE_SHIFT,
-						 (base >> PAGE_SHIFT)
-						 + (sz >> PAGE_SHIFT));
+				memblock_set_node(base, sz, nid);
 		} while (--ranges);
 	}
 }
@@ -802,8 +780,7 @@
 				continue;
 		}
 
-		add_active_range(nid, start >> PAGE_SHIFT,
-				(start >> PAGE_SHIFT) + (size >> PAGE_SHIFT));
+		memblock_set_node(start, size, nid);
 
 		if (--ranges)
 			goto new_range;
@@ -839,7 +816,8 @@
 		end_pfn = memblock_region_memory_end_pfn(reg);
 
 		fake_numa_create_new_node(end_pfn, &nid);
-		add_active_range(nid, start_pfn, end_pfn);
+		memblock_set_node(PFN_PHYS(start_pfn),
+				  PFN_PHYS(end_pfn - start_pfn), nid);
 		node_set_online(nid);
 	}
 }
@@ -969,7 +947,7 @@
 	.priority = 1 /* Must run before sched domains notifier. */
 };
 
-static void mark_reserved_regions_for_nid(int nid)
+static void __init mark_reserved_regions_for_nid(int nid)
 {
 	struct pglist_data *node = NODE_DATA(nid);
 	struct memblock_region *reg;
@@ -1462,7 +1440,7 @@
 {
 	int cpu, nid, old_nid;
 	unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0};
-	struct sys_device *sysdev;
+	struct device *dev;
 
 	for_each_cpu(cpu,&cpu_associativity_changes_mask) {
 		vphn_get_associativity(cpu, associativity);
@@ -1483,9 +1461,9 @@
 		register_cpu_under_node(cpu, nid);
 		put_online_cpus();
 
-		sysdev = get_cpu_sysdev(cpu);
-		if (sysdev)
-			kobject_uevent(&sysdev->kobj, KOBJ_CHANGE);
+		dev = get_cpu_device(cpu);
+		if (dev)
+			kobject_uevent(&dev->kobj, KOBJ_CHANGE);
 	}
 
 	return 1;
diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S
index dc4a5f3..ff672bd 100644
--- a/arch/powerpc/mm/tlb_low_64e.S
+++ b/arch/powerpc/mm/tlb_low_64e.S
@@ -94,11 +94,11 @@
 
 	srdi	r15,r16,60		/* get region */
 	rldicl.	r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4
-	bne-	dtlb_miss_fault_bolted
+	bne-	dtlb_miss_fault_bolted	/* Bail if fault addr is invalid */
 
 	rlwinm	r10,r11,32-19,27,27
 	rlwimi	r10,r11,32-16,19,19
-	cmpwi	r15,0
+	cmpwi	r15,0			/* user vs kernel check */
 	ori	r10,r10,_PAGE_PRESENT
 	oris	r11,r10,_PAGE_ACCESSED@h
 
@@ -120,44 +120,38 @@
 	rldicl	r15,r16,64-PGDIR_SHIFT+3,64-PGD_INDEX_SIZE-3
 	cmpldi	cr0,r14,0
 	clrrdi	r15,r15,3
-	beq	tlb_miss_fault_bolted
+	beq	tlb_miss_fault_bolted	/* No PGDIR, bail */
 
 BEGIN_MMU_FTR_SECTION
 	/* Set the TLB reservation and search for existing entry. Then load
 	 * the entry.
 	 */
 	PPC_TLBSRX_DOT(0,r16)
-	ldx	r14,r14,r15
-	beq	normal_tlb_miss_done
+	ldx	r14,r14,r15		/* grab pgd entry */
+	beq	normal_tlb_miss_done	/* tlb exists already, bail */
 MMU_FTR_SECTION_ELSE
-	ldx	r14,r14,r15
+	ldx	r14,r14,r15		/* grab pgd entry */
 ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_USE_TLBRSRV)
 
 #ifndef CONFIG_PPC_64K_PAGES
 	rldicl	r15,r16,64-PUD_SHIFT+3,64-PUD_INDEX_SIZE-3
 	clrrdi	r15,r15,3
-
-	cmpldi	cr0,r14,0
-	beq	tlb_miss_fault_bolted
-
-	ldx	r14,r14,r15
+	cmpdi	cr0,r14,0
+	bge	tlb_miss_fault_bolted	/* Bad pgd entry or hugepage; bail */
+	ldx	r14,r14,r15		/* grab pud entry */
 #endif /* CONFIG_PPC_64K_PAGES */
 
 	rldicl	r15,r16,64-PMD_SHIFT+3,64-PMD_INDEX_SIZE-3
 	clrrdi	r15,r15,3
-
-	cmpldi	cr0,r14,0
-	beq	tlb_miss_fault_bolted
-
-	ldx	r14,r14,r15
+	cmpdi	cr0,r14,0
+	bge	tlb_miss_fault_bolted
+	ldx	r14,r14,r15		/* Grab pmd entry */
 
 	rldicl	r15,r16,64-PAGE_SHIFT+3,64-PTE_INDEX_SIZE-3
 	clrrdi	r15,r15,3
-
-	cmpldi	cr0,r14,0
-	beq	tlb_miss_fault_bolted
-
-	ldx	r14,r14,r15
+	cmpdi	cr0,r14,0
+	bge	tlb_miss_fault_bolted
+	ldx	r14,r14,r15		/* Grab PTE, normal (!huge) page */
 
 	/* Check if required permissions are met */
 	andc.	r15,r11,r14
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
index 4e13d6f..df32a83 100644
--- a/arch/powerpc/mm/tlb_nohash.c
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -52,7 +52,7 @@
  * indirect page table entries.
  */
 #ifdef CONFIG_PPC_BOOK3E_MMU
-#ifdef CONFIG_FSL_BOOKE
+#ifdef CONFIG_PPC_FSL_BOOK3E
 struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
 	[MMU_PAGE_4K] = {
 		.shift	= 12,
@@ -615,7 +615,6 @@
 
 		/* limit memory so we dont have linear faults */
 		memblock_enforce_memory_limit(linear_map_top);
-		memblock_analyze();
 
 		patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e);
 		patch_exception(0x1e0, exc_instruction_tlb_miss_bolted_book3e);
diff --git a/arch/powerpc/platforms/40x/Kconfig b/arch/powerpc/platforms/40x/Kconfig
index 1530229..a392d12 100644
--- a/arch/powerpc/platforms/40x/Kconfig
+++ b/arch/powerpc/platforms/40x/Kconfig
@@ -1,19 +1,3 @@
-#config BUBINGA
-#	bool "Bubinga"
-#	depends on 40x
-#	default n
-#	select 405EP
-#	help
-#	  This option enables support for the IBM 405EP evaluation board.
-
-#config CPCI405
-#	bool "CPCI405"
-#	depends on 40x
-#	default n
-#	select 405GP
-#	help
-#	  This option enables support for the CPCI405 board.
-
 config ACADIA
 	bool "Acadia"
 	depends on 40x
@@ -65,14 +49,6 @@
 	help
 	  This option enables support for the AMCC PPC405EX board.
 
-#config SYCAMORE
-#	bool "Sycamore"
-#	depends on 40x
-#	default n
-#	select 405GPR
-#	help
-#	  This option enables support for the IBM PPC405GPr evaluation board.
-
 config WALNUT
 	bool "Walnut"
 	depends on 40x
@@ -100,6 +76,16 @@
 	  Most Virtex designs should use this unless it needs to do some
 	  special configuration at board probe time.
 
+config OBS600
+	bool "OpenBlockS 600"
+	depends on 40x
+	default n
+	select 405EX
+	select PPC40x_SIMPLE
+	help
+	  This option enables support for PlatHome OpenBlockS 600 server
+
+
 config PPC40x_SIMPLE
 	bool "Simple PowerPC 40x board support"
 	depends on 40x
@@ -173,16 +159,11 @@
 config IBM405_ERR51
 	bool
 
-#config BIOS_FIXUP
-#	bool
-#	depends on BUBINGA || EP405 || SYCAMORE || WALNUT
-#	default y
-
-#config PPC4xx_DMA
-#	bool "PPC4xx DMA controller support"
-#	depends on 4xx
-
-#config PPC4xx_EDMA
-#	bool
-#	depends on !STB03xxx && PPC4xx_DMA
-#	default y
+config APM8018X
+	bool "APM8018X"
+	depends on 40x
+	default n
+	select PPC40x_SIMPLE
+	help
+	  This option enables support for the AppliedMicro APM8018X evaluation
+	  board.
diff --git a/arch/powerpc/platforms/40x/ppc40x_simple.c b/arch/powerpc/platforms/40x/ppc40x_simple.c
index e8dd5c5..9761206 100644
--- a/arch/powerpc/platforms/40x/ppc40x_simple.c
+++ b/arch/powerpc/platforms/40x/ppc40x_simple.c
@@ -55,7 +55,9 @@
 	"amcc,haleakala",
 	"amcc,kilauea",
 	"amcc,makalu",
-	"est,hotfoot"
+	"apm,klondike",
+	"est,hotfoot",
+	"plathome,obs600"
 };
 
 static int __init ppc40x_probe(void)
diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig
index 762322c..fcf6bf2 100644
--- a/arch/powerpc/platforms/44x/Kconfig
+++ b/arch/powerpc/platforms/44x/Kconfig
@@ -75,7 +75,7 @@
 	select PCI
 	select PPC4xx_PCI_EXPRESS
 	select PCI_MSI
-	select PCC4xx_MSI
+	select PPC4xx_MSI
 	help
 	  This option enables support for the AMCC PPC440SPe evaluation board.
 
@@ -186,6 +186,16 @@
 	help
 	  This option enables support for the IBM ISS simulation environment
 
+config CURRITUCK
+	bool "IBM Currituck (476fpe) Support"
+	depends on PPC_47x
+	default n
+	select SWIOTLB
+	select 476FPE
+	select PPC4xx_PCI_EXPRESS
+	help
+	  This option enables support for the IBM Currituck (476fpe) evaluation board
+
 config ICON
 	bool "Icon"
 	depends on 44x
@@ -197,22 +207,6 @@
 	help
 	  This option enables support for the AMCC PPC440SPe evaluation board.
 
-#config LUAN
-#	bool "Luan"
-#	depends on 44x
-#	default n
-#	select 440SP
-#	help
-#	  This option enables support for the IBM PPC440SP evaluation board.
-
-#config OCOTEA
-#	bool "Ocotea"
-#	depends on 44x
-#	default n
-#	select 440GX
-#	help
-#	  This option enables support for the IBM PPC440GX evaluation board.
-
 config XILINX_VIRTEX440_GENERIC_BOARD
 	bool "Generic Xilinx Virtex 5 FXT board support"
 	depends on 44x
@@ -308,6 +302,10 @@
 	select IBM_EMAC_ZMII
 	select IBM_EMAC_TAH
 
+config 476FPE
+	bool
+	select PPC_FPU
+
 config APM821xx
 	bool
 	select PPC_FPU
diff --git a/arch/powerpc/platforms/44x/Makefile b/arch/powerpc/platforms/44x/Makefile
index 553db60..d03833a 100644
--- a/arch/powerpc/platforms/44x/Makefile
+++ b/arch/powerpc/platforms/44x/Makefile
@@ -10,3 +10,4 @@
 obj-$(CONFIG_XILINX_ML510) += virtex_ml510.o
 obj-$(CONFIG_ISS4xx)	+= iss4xx.o
 obj-$(CONFIG_CANYONLANDS)+= canyonlands.o
+obj-$(CONFIG_CURRITUCK)	+= currituck.o
diff --git a/arch/powerpc/platforms/44x/currituck.c b/arch/powerpc/platforms/44x/currituck.c
new file mode 100644
index 0000000..3f6229b
--- /dev/null
+++ b/arch/powerpc/platforms/44x/currituck.c
@@ -0,0 +1,204 @@
+/*
+ * Currituck board specific routines
+ *
+ * Copyright © 2011 Tony Breeds IBM Corporation
+ *
+ * Based on earlier code:
+ *    Matt Porter <mporter@kernel.crashing.org>
+ *    Copyright 2002-2005 MontaVista Software Inc.
+ *
+ *    Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
+ *    Copyright (c) 2003-2005 Zultys Technologies
+ *
+ *    Rewritten and ported to the merged powerpc tree:
+ *    Copyright 2007 David Gibson <dwg@au1.ibm.com>, IBM Corporation.
+ *    Copyright © 2011 David Kliekamp IBM Corporation
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/memblock.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/rtc.h>
+
+#include <asm/machdep.h>
+#include <asm/prom.h>
+#include <asm/udbg.h>
+#include <asm/time.h>
+#include <asm/uic.h>
+#include <asm/ppc4xx.h>
+#include <asm/mpic.h>
+#include <asm/mmu.h>
+
+#include <linux/pci.h>
+
+static __initdata struct of_device_id ppc47x_of_bus[] = {
+	{ .compatible = "ibm,plb4", },
+	{ .compatible = "ibm,plb6", },
+	{ .compatible = "ibm,opb", },
+	{ .compatible = "ibm,ebc", },
+	{},
+};
+
+/* The EEPROM is missing and the default values are bogus.  This forces USB in
+ * to EHCI mode */
+static void __devinit quirk_ppc_currituck_usb_fixup(struct pci_dev *dev)
+{
+	if (of_machine_is_compatible("ibm,currituck")) {
+		pci_write_config_dword(dev, 0xe0, 0x0114231f);
+		pci_write_config_dword(dev, 0xe4, 0x00006c40);
+	}
+}
+DECLARE_PCI_FIXUP_HEADER(0x1033, 0x0035, quirk_ppc_currituck_usb_fixup);
+
+static int __init ppc47x_device_probe(void)
+{
+	of_platform_bus_probe(NULL, ppc47x_of_bus, NULL);
+
+	return 0;
+}
+machine_device_initcall(ppc47x, ppc47x_device_probe);
+
+/* We can have either UICs or MPICs */
+static void __init ppc47x_init_irq(void)
+{
+	struct device_node *np;
+
+	/* Find top level interrupt controller */
+	for_each_node_with_property(np, "interrupt-controller") {
+		if (of_get_property(np, "interrupts", NULL) == NULL)
+			break;
+	}
+	if (np == NULL)
+		panic("Can't find top level interrupt controller");
+
+	/* Check type and do appropriate initialization */
+	if (of_device_is_compatible(np, "chrp,open-pic")) {
+		/* The MPIC driver will get everything it needs from the
+		 * device-tree, just pass 0 to all arguments
+		 */
+		struct mpic *mpic =
+			mpic_alloc(np, 0, 0, 0, 0, " MPIC     ");
+		BUG_ON(mpic == NULL);
+		mpic_init(mpic);
+		ppc_md.get_irq = mpic_get_irq;
+	} else
+		panic("Unrecognized top level interrupt controller");
+}
+
+#ifdef CONFIG_SMP
+static void __cpuinit smp_ppc47x_setup_cpu(int cpu)
+{
+	mpic_setup_this_cpu();
+}
+
+static int __cpuinit smp_ppc47x_kick_cpu(int cpu)
+{
+	struct device_node *cpunode = of_get_cpu_node(cpu, NULL);
+	const u64 *spin_table_addr_prop;
+	u32 *spin_table;
+	extern void start_secondary_47x(void);
+
+	BUG_ON(cpunode == NULL);
+
+	/* Assume spin table. We could test for the enable-method in
+	 * the device-tree but currently there's little point as it's
+	 * our only supported method
+	 */
+	spin_table_addr_prop =
+		of_get_property(cpunode, "cpu-release-addr", NULL);
+
+	if (spin_table_addr_prop == NULL) {
+		pr_err("CPU%d: Can't start, missing cpu-release-addr !\n",
+		       cpu);
+		return 1;
+	}
+
+	/* Assume it's mapped as part of the linear mapping. This is a bit
+	 * fishy but will work fine for now
+	 *
+	 * XXX: Is there any reason to assume differently?
+	 */
+	spin_table = (u32 *)__va(*spin_table_addr_prop);
+	pr_debug("CPU%d: Spin table mapped at %p\n", cpu, spin_table);
+
+	spin_table[3] = cpu;
+	smp_wmb();
+	spin_table[1] = __pa(start_secondary_47x);
+	mb();
+
+	return 0;
+}
+
+static struct smp_ops_t ppc47x_smp_ops = {
+	.probe		= smp_mpic_probe,
+	.message_pass	= smp_mpic_message_pass,
+	.setup_cpu	= smp_ppc47x_setup_cpu,
+	.kick_cpu	= smp_ppc47x_kick_cpu,
+	.give_timebase	= smp_generic_give_timebase,
+	.take_timebase	= smp_generic_take_timebase,
+};
+
+static void __init ppc47x_smp_init(void)
+{
+	if (mmu_has_feature(MMU_FTR_TYPE_47x))
+		smp_ops = &ppc47x_smp_ops;
+}
+
+#else /* CONFIG_SMP */
+static void __init ppc47x_smp_init(void) { }
+#endif /* CONFIG_SMP */
+
+static void __init ppc47x_setup_arch(void)
+{
+
+	/* No need to check the DMA config as we /know/ our windows are all of
+ 	 * RAM.  Lets hope that doesn't change */
+#ifdef CONFIG_SWIOTLB
+	if (memblock_end_of_DRAM() > 0xffffffff) {
+		ppc_swiotlb_enable = 1;
+		set_pci_dma_ops(&swiotlb_dma_ops);
+		ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
+	}
+#endif
+	ppc47x_smp_init();
+}
+
+/*
+ * Called very early, MMU is off, device-tree isn't unflattened
+ */
+static int __init ppc47x_probe(void)
+{
+	unsigned long root = of_get_flat_dt_root();
+
+	if (!of_flat_dt_is_compatible(root, "ibm,currituck"))
+		return 0;
+
+	return 1;
+}
+
+/* Use USB controller should have been hardware swizzled but it wasn't :( */
+static void ppc47x_pci_irq_fixup(struct pci_dev *dev)
+{
+	if (dev->vendor == 0x1033 && (dev->device == 0x0035 ||
+	                              dev->device == 0x00e0)) {
+		dev->irq = irq_create_mapping(NULL, 47);
+		pr_info("%s: Mapping irq 47 %d\n", __func__, dev->irq);
+	}
+}
+
+define_machine(ppc47x) {
+	.name			= "PowerPC 47x",
+	.probe			= ppc47x_probe,
+	.progress		= udbg_progress,
+	.init_IRQ		= ppc47x_init_irq,
+	.setup_arch		= ppc47x_setup_arch,
+	.pci_irq_fixup		= ppc47x_pci_irq_fixup,
+	.restart		= ppc4xx_reset_system,
+	.calibrate_decr		= generic_calibrate_decr,
+};
diff --git a/arch/powerpc/platforms/44x/iss4xx.c b/arch/powerpc/platforms/44x/iss4xx.c
index 19395f1..5b8cdbb 100644
--- a/arch/powerpc/platforms/44x/iss4xx.c
+++ b/arch/powerpc/platforms/44x/iss4xx.c
@@ -71,7 +71,7 @@
 		/* The MPIC driver will get everything it needs from the
 		 * device-tree, just pass 0 to all arguments
 		 */
-		struct mpic *mpic = mpic_alloc(np, 0, MPIC_PRIMARY, 0, 0,
+		struct mpic *mpic = mpic_alloc(np, 0, 0, 0, 0,
 					       " MPIC     ");
 		BUG_ON(mpic == NULL);
 		mpic_init(mpic);
diff --git a/arch/powerpc/platforms/512x/Kconfig b/arch/powerpc/platforms/512x/Kconfig
index b3ebce1..c169998 100644
--- a/arch/powerpc/platforms/512x/Kconfig
+++ b/arch/powerpc/platforms/512x/Kconfig
@@ -12,7 +12,6 @@
 	bool "Freescale MPC5121E ADS"
 	depends on PPC_MPC512x
 	select DEFAULT_UIMAGE
-	select MPC5121_ADS_CPLD
 	help
 	  This option enables support for the MPC5121E ADS board.
 
diff --git a/arch/powerpc/platforms/83xx/asp834x.c b/arch/powerpc/platforms/83xx/asp834x.c
index aa0d84d..464ea8e 100644
--- a/arch/powerpc/platforms/83xx/asp834x.c
+++ b/arch/powerpc/platforms/83xx/asp834x.c
@@ -36,38 +36,7 @@
 	mpc834x_usb_cfg();
 }
 
-static void __init asp834x_init_IRQ(void)
-{
-	struct device_node *np;
-
-	np = of_find_node_by_type(NULL, "ipic");
-	if (!np)
-		return;
-
-	ipic_init(np, 0);
-
-	of_node_put(np);
-
-	/* Initialize the default interrupt mapping priorities,
-	 * in case the boot rom changed something on us.
-	 */
-	ipic_set_default_priority();
-}
-
-static struct __initdata of_device_id asp8347_ids[] = {
-	{ .type = "soc", },
-	{ .compatible = "soc", },
-	{ .compatible = "simple-bus", },
-	{ .compatible = "gianfar", },
-	{},
-};
-
-static int __init asp8347_declare_of_platform_devices(void)
-{
-	of_platform_bus_probe(NULL, asp8347_ids, NULL);
-	return 0;
-}
-machine_device_initcall(asp834x, asp8347_declare_of_platform_devices);
+machine_device_initcall(asp834x, mpc83xx_declare_of_platform_devices);
 
 /*
  * Called very early, MMU is off, device-tree isn't unflattened
@@ -82,7 +51,7 @@
 	.name			= "ASP8347E",
 	.probe			= asp834x_probe,
 	.setup_arch		= asp834x_setup_arch,
-	.init_IRQ		= asp834x_init_IRQ,
+	.init_IRQ		= mpc83xx_ipic_init_IRQ,
 	.get_irq		= ipic_get_irq,
 	.restart		= mpc83xx_restart,
 	.time_init		= mpc83xx_time_init,
diff --git a/arch/powerpc/platforms/83xx/km83xx.c b/arch/powerpc/platforms/83xx/km83xx.c
index c55129f..65eb792 100644
--- a/arch/powerpc/platforms/83xx/km83xx.c
+++ b/arch/powerpc/platforms/83xx/km83xx.c
@@ -51,15 +51,14 @@
  */
 static void __init mpc83xx_km_setup_arch(void)
 {
+#ifdef CONFIG_QUICC_ENGINE
 	struct device_node *np;
+#endif
 
 	if (ppc_md.progress)
 		ppc_md.progress("kmpbec83xx_setup_arch()", 0);
 
-#ifdef CONFIG_PCI
-	for_each_compatible_node(np, "pci", "fsl,mpc8349-pci")
-		mpc83xx_add_bridge(np);
-#endif
+	mpc83xx_setup_pci();
 
 #ifdef CONFIG_QUICC_ENGINE
 	qe_reset();
@@ -122,54 +121,7 @@
 #endif				/* CONFIG_QUICC_ENGINE */
 }
 
-static struct of_device_id kmpbec83xx_ids[] = {
-	{ .type = "soc", },
-	{ .compatible = "soc", },
-	{ .compatible = "simple-bus", },
-	{ .type = "qe", },
-	{ .compatible = "fsl,qe", },
-	{},
-};
-
-static int __init kmeter_declare_of_platform_devices(void)
-{
-	/* Publish the QE devices */
-	of_platform_bus_probe(NULL, kmpbec83xx_ids, NULL);
-
-	return 0;
-}
-machine_device_initcall(mpc83xx_km, kmeter_declare_of_platform_devices);
-
-static void __init mpc83xx_km_init_IRQ(void)
-{
-	struct device_node *np;
-
-	np = of_find_compatible_node(NULL, NULL, "fsl,pq2pro-pic");
-	if (!np) {
-		np = of_find_node_by_type(NULL, "ipic");
-		if (!np)
-			return;
-	}
-
-	ipic_init(np, 0);
-
-	/* Initialize the default interrupt mapping priorities,
-	 * in case the boot rom changed something on us.
-	 */
-	ipic_set_default_priority();
-	of_node_put(np);
-
-#ifdef CONFIG_QUICC_ENGINE
-	np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic");
-	if (!np) {
-		np = of_find_node_by_type(NULL, "qeic");
-		if (!np)
-			return;
-	}
-	qe_ic_init(np, 0, qe_ic_cascade_low_ipic, qe_ic_cascade_high_ipic);
-	of_node_put(np);
-#endif				/* CONFIG_QUICC_ENGINE */
-}
+machine_device_initcall(mpc83xx_km, mpc83xx_declare_of_platform_devices);
 
 /* list of the supported boards */
 static char *board[] __initdata = {
@@ -198,7 +150,7 @@
 	.name		= "mpc83xx-km-platform",
 	.probe		= mpc83xx_km_probe,
 	.setup_arch	= mpc83xx_km_setup_arch,
-	.init_IRQ	= mpc83xx_km_init_IRQ,
+	.init_IRQ	= mpc83xx_ipic_and_qe_init_IRQ,
 	.get_irq	= ipic_get_irq,
 	.restart	= mpc83xx_restart,
 	.time_init	= mpc83xx_time_init,
diff --git a/arch/powerpc/platforms/83xx/misc.c b/arch/powerpc/platforms/83xx/misc.c
index f01806c..125336f 100644
--- a/arch/powerpc/platforms/83xx/misc.c
+++ b/arch/powerpc/platforms/83xx/misc.c
@@ -11,10 +11,15 @@
 
 #include <linux/stddef.h>
 #include <linux/kernel.h>
+#include <linux/of_platform.h>
+#include <linux/pci.h>
 
 #include <asm/io.h>
 #include <asm/hw_irq.h>
+#include <asm/ipic.h>
+#include <asm/qe_ic.h>
 #include <sysdev/fsl_soc.h>
+#include <sysdev/fsl_pci.h>
 
 #include "mpc83xx.h"
 
@@ -65,3 +70,75 @@
 
 	return 0;
 }
+
+void __init mpc83xx_ipic_init_IRQ(void)
+{
+	struct device_node *np;
+
+	/* looking for fsl,pq2pro-pic which is asl compatible with fsl,ipic */
+	np = of_find_compatible_node(NULL, NULL, "fsl,ipic");
+	if (!np)
+		np = of_find_node_by_type(NULL, "ipic");
+	if (!np)
+		return;
+
+	ipic_init(np, 0);
+
+	of_node_put(np);
+
+	/* Initialize the default interrupt mapping priorities,
+	 * in case the boot rom changed something on us.
+	 */
+	ipic_set_default_priority();
+}
+
+#ifdef CONFIG_QUICC_ENGINE
+void __init mpc83xx_qe_init_IRQ(void)
+{
+	struct device_node *np;
+
+	np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic");
+	if (!np) {
+		np = of_find_node_by_type(NULL, "qeic");
+		if (!np)
+			return;
+	}
+	qe_ic_init(np, 0, qe_ic_cascade_low_ipic, qe_ic_cascade_high_ipic);
+	of_node_put(np);
+}
+
+void __init mpc83xx_ipic_and_qe_init_IRQ(void)
+{
+	mpc83xx_ipic_init_IRQ();
+	mpc83xx_qe_init_IRQ();
+}
+#endif /* CONFIG_QUICC_ENGINE */
+
+static struct of_device_id __initdata of_bus_ids[] = {
+	{ .type = "soc", },
+	{ .compatible = "soc", },
+	{ .compatible = "simple-bus" },
+	{ .compatible = "gianfar" },
+	{ .compatible = "gpio-leds", },
+	{ .type = "qe", },
+	{ .compatible = "fsl,qe", },
+	{},
+};
+
+int __init mpc83xx_declare_of_platform_devices(void)
+{
+	of_platform_bus_probe(NULL, of_bus_ids, NULL);
+	return 0;
+}
+
+#ifdef CONFIG_PCI
+void __init mpc83xx_setup_pci(void)
+{
+	struct device_node *np;
+
+	for_each_compatible_node(np, "pci", "fsl,mpc8349-pci")
+		mpc83xx_add_bridge(np);
+	for_each_compatible_node(np, "pci", "fsl,mpc8314-pcie")
+		mpc83xx_add_bridge(np);
+}
+#endif
diff --git a/arch/powerpc/platforms/83xx/mpc830x_rdb.c b/arch/powerpc/platforms/83xx/mpc830x_rdb.c
index d0c4e15..4f2d9fe 100644
--- a/arch/powerpc/platforms/83xx/mpc830x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc830x_rdb.c
@@ -27,36 +27,13 @@
  */
 static void __init mpc830x_rdb_setup_arch(void)
 {
-#ifdef CONFIG_PCI
-	struct device_node *np;
-#endif
-
 	if (ppc_md.progress)
 		ppc_md.progress("mpc830x_rdb_setup_arch()", 0);
 
-#ifdef CONFIG_PCI
-	for_each_compatible_node(np, "pci", "fsl,mpc8308-pcie")
-		mpc83xx_add_bridge(np);
-#endif
+	mpc83xx_setup_pci();
 	mpc831x_usb_cfg();
 }
 
-static void __init mpc830x_rdb_init_IRQ(void)
-{
-	struct device_node *np;
-
-	np = of_find_node_by_type(NULL, "ipic");
-	if (!np)
-		return;
-
-	ipic_init(np, 0);
-
-	/* Initialize the default interrupt mapping priorities,
-	 * in case the boot rom changed something on us.
-	 */
-	ipic_set_default_priority();
-}
-
 static const char *board[] __initdata = {
 	"MPC8308RDB",
 	"fsl,mpc8308rdb",
@@ -72,24 +49,13 @@
 	return of_flat_dt_match(of_get_flat_dt_root(), board);
 }
 
-static struct of_device_id __initdata of_bus_ids[] = {
-	{ .compatible = "simple-bus" },
-	{ .compatible = "gianfar" },
-	{},
-};
-
-static int __init declare_of_platform_devices(void)
-{
-	of_platform_bus_probe(NULL, of_bus_ids, NULL);
-	return 0;
-}
-machine_device_initcall(mpc830x_rdb, declare_of_platform_devices);
+machine_device_initcall(mpc830x_rdb, mpc83xx_declare_of_platform_devices);
 
 define_machine(mpc830x_rdb) {
 	.name			= "MPC830x RDB",
 	.probe			= mpc830x_rdb_probe,
 	.setup_arch		= mpc830x_rdb_setup_arch,
-	.init_IRQ		= mpc830x_rdb_init_IRQ,
+	.init_IRQ		= mpc83xx_ipic_init_IRQ,
 	.get_irq		= ipic_get_irq,
 	.restart		= mpc83xx_restart,
 	.time_init		= mpc83xx_time_init,
diff --git a/arch/powerpc/platforms/83xx/mpc831x_rdb.c b/arch/powerpc/platforms/83xx/mpc831x_rdb.c
index f859ead..fa25977 100644
--- a/arch/powerpc/platforms/83xx/mpc831x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc831x_rdb.c
@@ -28,38 +28,13 @@
  */
 static void __init mpc831x_rdb_setup_arch(void)
 {
-#ifdef CONFIG_PCI
-	struct device_node *np;
-#endif
-
 	if (ppc_md.progress)
 		ppc_md.progress("mpc831x_rdb_setup_arch()", 0);
 
-#ifdef CONFIG_PCI
-	for_each_compatible_node(np, "pci", "fsl,mpc8349-pci")
-		mpc83xx_add_bridge(np);
-	for_each_compatible_node(np, "pci", "fsl,mpc8314-pcie")
-		mpc83xx_add_bridge(np);
-#endif
+	mpc83xx_setup_pci();
 	mpc831x_usb_cfg();
 }
 
-static void __init mpc831x_rdb_init_IRQ(void)
-{
-	struct device_node *np;
-
-	np = of_find_node_by_type(NULL, "ipic");
-	if (!np)
-		return;
-
-	ipic_init(np, 0);
-
-	/* Initialize the default interrupt mapping priorities,
-	 * in case the boot rom changed something on us.
-	 */
-	ipic_set_default_priority();
-}
-
 static const char *board[] __initdata = {
 	"MPC8313ERDB",
 	"fsl,mpc8315erdb",
@@ -74,25 +49,13 @@
 	return of_flat_dt_match(of_get_flat_dt_root(), board);
 }
 
-static struct of_device_id __initdata of_bus_ids[] = {
-	{ .compatible = "simple-bus" },
-	{ .compatible = "gianfar" },
-	{ .compatible = "gpio-leds", },
-	{},
-};
-
-static int __init declare_of_platform_devices(void)
-{
-	of_platform_bus_probe(NULL, of_bus_ids, NULL);
-	return 0;
-}
-machine_device_initcall(mpc831x_rdb, declare_of_platform_devices);
+machine_device_initcall(mpc831x_rdb, mpc83xx_declare_of_platform_devices);
 
 define_machine(mpc831x_rdb) {
 	.name			= "MPC831x RDB",
 	.probe			= mpc831x_rdb_probe,
 	.setup_arch		= mpc831x_rdb_setup_arch,
-	.init_IRQ		= mpc831x_rdb_init_IRQ,
+	.init_IRQ		= mpc83xx_ipic_init_IRQ,
 	.get_irq		= ipic_get_irq,
 	.restart		= mpc83xx_restart,
 	.time_init		= mpc83xx_time_init,
diff --git a/arch/powerpc/platforms/83xx/mpc832x_mds.c b/arch/powerpc/platforms/83xx/mpc832x_mds.c
index 32a5289..e36bc61 100644
--- a/arch/powerpc/platforms/83xx/mpc832x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc832x_mds.c
@@ -72,10 +72,7 @@
 		of_node_put(np);
 	}
 
-#ifdef CONFIG_PCI
-	for_each_compatible_node(np, "pci", "fsl,mpc8349-pci")
-		mpc83xx_add_bridge(np);
-#endif
+	mpc83xx_setup_pci();
 
 #ifdef CONFIG_QUICC_ENGINE
 	qe_reset();
@@ -101,51 +98,7 @@
 #endif				/* CONFIG_QUICC_ENGINE */
 }
 
-static struct of_device_id mpc832x_ids[] = {
-	{ .type = "soc", },
-	{ .compatible = "soc", },
-	{ .compatible = "simple-bus", },
-	{ .type = "qe", },
-	{ .compatible = "fsl,qe", },
-	{},
-};
-
-static int __init mpc832x_declare_of_platform_devices(void)
-{
-	/* Publish the QE devices */
-	of_platform_bus_probe(NULL, mpc832x_ids, NULL);
-
-	return 0;
-}
-machine_device_initcall(mpc832x_mds, mpc832x_declare_of_platform_devices);
-
-static void __init mpc832x_sys_init_IRQ(void)
-{
-	struct device_node *np;
-
-	np = of_find_node_by_type(NULL, "ipic");
-	if (!np)
-		return;
-
-	ipic_init(np, 0);
-
-	/* Initialize the default interrupt mapping priorities,
-	 * in case the boot rom changed something on us.
-	 */
-	ipic_set_default_priority();
-	of_node_put(np);
-
-#ifdef CONFIG_QUICC_ENGINE
-	np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic");
-	if (!np) {
-		np = of_find_node_by_type(NULL, "qeic");
-		if (!np)
-			return;
-	}
-	qe_ic_init(np, 0, qe_ic_cascade_low_ipic, qe_ic_cascade_high_ipic);
-	of_node_put(np);
-#endif				/* CONFIG_QUICC_ENGINE */
-}
+machine_device_initcall(mpc832x_mds, mpc83xx_declare_of_platform_devices);
 
 /*
  * Called very early, MMU is off, device-tree isn't unflattened
@@ -161,7 +114,7 @@
 	.name 		= "MPC832x MDS",
 	.probe 		= mpc832x_sys_probe,
 	.setup_arch 	= mpc832x_sys_setup_arch,
-	.init_IRQ 	= mpc832x_sys_init_IRQ,
+	.init_IRQ	= mpc83xx_ipic_and_qe_init_IRQ,
 	.get_irq 	= ipic_get_irq,
 	.restart 	= mpc83xx_restart,
 	.time_init 	= mpc83xx_time_init,
diff --git a/arch/powerpc/platforms/83xx/mpc832x_rdb.c b/arch/powerpc/platforms/83xx/mpc832x_rdb.c
index 17f9974..eff5baa 100644
--- a/arch/powerpc/platforms/83xx/mpc832x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc832x_rdb.c
@@ -193,17 +193,14 @@
  */
 static void __init mpc832x_rdb_setup_arch(void)
 {
-#if defined(CONFIG_PCI) || defined(CONFIG_QUICC_ENGINE)
+#if defined(CONFIG_QUICC_ENGINE)
 	struct device_node *np;
 #endif
 
 	if (ppc_md.progress)
 		ppc_md.progress("mpc832x_rdb_setup_arch()", 0);
 
-#ifdef CONFIG_PCI
-	for_each_compatible_node(np, "pci", "fsl,mpc8349-pci")
-		mpc83xx_add_bridge(np);
-#endif
+	mpc83xx_setup_pci();
 
 #ifdef CONFIG_QUICC_ENGINE
 	qe_reset();
@@ -218,52 +215,7 @@
 #endif				/* CONFIG_QUICC_ENGINE */
 }
 
-static struct of_device_id mpc832x_ids[] = {
-	{ .type = "soc", },
-	{ .compatible = "soc", },
-	{ .compatible = "simple-bus", },
-	{ .type = "qe", },
-	{ .compatible = "fsl,qe", },
-	{},
-};
-
-static int __init mpc832x_declare_of_platform_devices(void)
-{
-	/* Publish the QE devices */
-	of_platform_bus_probe(NULL, mpc832x_ids, NULL);
-
-	return 0;
-}
-machine_device_initcall(mpc832x_rdb, mpc832x_declare_of_platform_devices);
-
-static void __init mpc832x_rdb_init_IRQ(void)
-{
-
-	struct device_node *np;
-
-	np = of_find_node_by_type(NULL, "ipic");
-	if (!np)
-		return;
-
-	ipic_init(np, 0);
-
-	/* Initialize the default interrupt mapping priorities,
-	 * in case the boot rom changed something on us.
-	 */
-	ipic_set_default_priority();
-	of_node_put(np);
-
-#ifdef CONFIG_QUICC_ENGINE
-	np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic");
-	if (!np) {
-		np = of_find_node_by_type(NULL, "qeic");
-		if (!np)
-			return;
-	}
-	qe_ic_init(np, 0, qe_ic_cascade_low_ipic, qe_ic_cascade_high_ipic);
-	of_node_put(np);
-#endif				/* CONFIG_QUICC_ENGINE */
-}
+machine_device_initcall(mpc832x_rdb, mpc83xx_declare_of_platform_devices);
 
 /*
  * Called very early, MMU is off, device-tree isn't unflattened
@@ -279,7 +231,7 @@
 	.name		= "MPC832x RDB",
 	.probe		= mpc832x_rdb_probe,
 	.setup_arch	= mpc832x_rdb_setup_arch,
-	.init_IRQ	= mpc832x_rdb_init_IRQ,
+	.init_IRQ	= mpc83xx_ipic_and_qe_init_IRQ,
 	.get_irq	= ipic_get_irq,
 	.restart	= mpc83xx_restart,
 	.time_init	= mpc83xx_time_init,
diff --git a/arch/powerpc/platforms/83xx/mpc834x_itx.c b/arch/powerpc/platforms/83xx/mpc834x_itx.c
index 6b45969..39849dd 100644
--- a/arch/powerpc/platforms/83xx/mpc834x_itx.c
+++ b/arch/powerpc/platforms/83xx/mpc834x_itx.c
@@ -41,13 +41,12 @@
 
 static struct of_device_id __initdata mpc834x_itx_ids[] = {
 	{ .compatible = "fsl,pq2pro-localbus", },
-	{ .compatible = "simple-bus", },
-	{ .compatible = "gianfar", },
 	{},
 };
 
 static int __init mpc834x_itx_declare_of_platform_devices(void)
 {
+	mpc83xx_declare_of_platform_devices();
 	return of_platform_bus_probe(NULL, mpc834x_itx_ids, NULL);
 }
 machine_device_initcall(mpc834x_itx, mpc834x_itx_declare_of_platform_devices);
@@ -59,37 +58,14 @@
  */
 static void __init mpc834x_itx_setup_arch(void)
 {
-#ifdef CONFIG_PCI
-	struct device_node *np;
-#endif
-
 	if (ppc_md.progress)
 		ppc_md.progress("mpc834x_itx_setup_arch()", 0);
 
-#ifdef CONFIG_PCI
-	for_each_compatible_node(np, "pci", "fsl,mpc8349-pci")
-		mpc83xx_add_bridge(np);
-#endif
+	mpc83xx_setup_pci();
 
 	mpc834x_usb_cfg();
 }
 
-static void __init mpc834x_itx_init_IRQ(void)
-{
-	struct device_node *np;
-
-	np = of_find_node_by_type(NULL, "ipic");
-	if (!np)
-		return;
-
-	ipic_init(np, 0);
-
-	/* Initialize the default interrupt mapping priorities,
-	 * in case the boot rom changed something on us.
-	 */
-	ipic_set_default_priority();
-}
-
 /*
  * Called very early, MMU is off, device-tree isn't unflattened
  */
@@ -104,7 +80,7 @@
 	.name			= "MPC834x ITX",
 	.probe			= mpc834x_itx_probe,
 	.setup_arch		= mpc834x_itx_setup_arch,
-	.init_IRQ		= mpc834x_itx_init_IRQ,
+	.init_IRQ		= mpc83xx_ipic_init_IRQ,
 	.get_irq		= ipic_get_irq,
 	.restart		= mpc83xx_restart,
 	.time_init		= mpc83xx_time_init,
diff --git a/arch/powerpc/platforms/83xx/mpc834x_mds.c b/arch/powerpc/platforms/83xx/mpc834x_mds.c
index 041c517..5828d8e 100644
--- a/arch/powerpc/platforms/83xx/mpc834x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc834x_mds.c
@@ -77,51 +77,15 @@
  */
 static void __init mpc834x_mds_setup_arch(void)
 {
-#ifdef CONFIG_PCI
-	struct device_node *np;
-#endif
-
 	if (ppc_md.progress)
 		ppc_md.progress("mpc834x_mds_setup_arch()", 0);
 
-#ifdef CONFIG_PCI
-	for_each_compatible_node(np, "pci", "fsl,mpc8349-pci")
-		mpc83xx_add_bridge(np);
-#endif
+	mpc83xx_setup_pci();
 
 	mpc834xemds_usb_cfg();
 }
 
-static void __init mpc834x_mds_init_IRQ(void)
-{
-	struct device_node *np;
-
-	np = of_find_node_by_type(NULL, "ipic");
-	if (!np)
-		return;
-
-	ipic_init(np, 0);
-
-	/* Initialize the default interrupt mapping priorities,
-	 * in case the boot rom changed something on us.
-	 */
-	ipic_set_default_priority();
-}
-
-static struct of_device_id mpc834x_ids[] = {
-	{ .type = "soc", },
-	{ .compatible = "soc", },
-	{ .compatible = "simple-bus", },
-	{ .compatible = "gianfar", },
-	{},
-};
-
-static int __init mpc834x_declare_of_platform_devices(void)
-{
-	of_platform_bus_probe(NULL, mpc834x_ids, NULL);
-	return 0;
-}
-machine_device_initcall(mpc834x_mds, mpc834x_declare_of_platform_devices);
+machine_device_initcall(mpc834x_mds, mpc83xx_declare_of_platform_devices);
 
 /*
  * Called very early, MMU is off, device-tree isn't unflattened
@@ -137,7 +101,7 @@
 	.name			= "MPC834x MDS",
 	.probe			= mpc834x_mds_probe,
 	.setup_arch		= mpc834x_mds_setup_arch,
-	.init_IRQ		= mpc834x_mds_init_IRQ,
+	.init_IRQ		= mpc83xx_ipic_init_IRQ,
 	.get_irq		= ipic_get_irq,
 	.restart		= mpc83xx_restart,
 	.time_init		= mpc83xx_time_init,
diff --git a/arch/powerpc/platforms/83xx/mpc836x_mds.c b/arch/powerpc/platforms/83xx/mpc836x_mds.c
index 934cc8c4..ad8e4bc 100644
--- a/arch/powerpc/platforms/83xx/mpc836x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc836x_mds.c
@@ -80,10 +80,7 @@
 		of_node_put(np);
 	}
 
-#ifdef CONFIG_PCI
-	for_each_compatible_node(np, "pci", "fsl,mpc8349-pci")
-		mpc83xx_add_bridge(np);
-#endif
+	mpc83xx_setup_pci();
 
 #ifdef CONFIG_QUICC_ENGINE
 	qe_reset();
@@ -144,23 +141,7 @@
 #endif				/* CONFIG_QUICC_ENGINE */
 }
 
-static struct of_device_id mpc836x_ids[] = {
-	{ .type = "soc", },
-	{ .compatible = "soc", },
-	{ .compatible = "simple-bus", },
-	{ .type = "qe", },
-	{ .compatible = "fsl,qe", },
-	{},
-};
-
-static int __init mpc836x_declare_of_platform_devices(void)
-{
-	/* Publish the QE devices */
-	of_platform_bus_probe(NULL, mpc836x_ids, NULL);
-
-	return 0;
-}
-machine_device_initcall(mpc836x_mds, mpc836x_declare_of_platform_devices);
+machine_device_initcall(mpc836x_mds, mpc83xx_declare_of_platform_devices);
 
 #ifdef CONFIG_QE_USB
 static int __init mpc836x_usb_cfg(void)
@@ -226,34 +207,6 @@
 machine_arch_initcall(mpc836x_mds, mpc836x_usb_cfg);
 #endif /* CONFIG_QE_USB */
 
-static void __init mpc836x_mds_init_IRQ(void)
-{
-	struct device_node *np;
-
-	np = of_find_node_by_type(NULL, "ipic");
-	if (!np)
-		return;
-
-	ipic_init(np, 0);
-
-	/* Initialize the default interrupt mapping priorities,
-	 * in case the boot rom changed something on us.
-	 */
-	ipic_set_default_priority();
-	of_node_put(np);
-
-#ifdef CONFIG_QUICC_ENGINE
-	np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic");
-	if (!np) {
-		np = of_find_node_by_type(NULL, "qeic");
-		if (!np)
-			return;
-	}
-	qe_ic_init(np, 0, qe_ic_cascade_low_ipic, qe_ic_cascade_high_ipic);
-	of_node_put(np);
-#endif				/* CONFIG_QUICC_ENGINE */
-}
-
 /*
  * Called very early, MMU is off, device-tree isn't unflattened
  */
@@ -268,7 +221,7 @@
 	.name		= "MPC836x MDS",
 	.probe		= mpc836x_mds_probe,
 	.setup_arch	= mpc836x_mds_setup_arch,
-	.init_IRQ	= mpc836x_mds_init_IRQ,
+	.init_IRQ	= mpc83xx_ipic_and_qe_init_IRQ,
 	.get_irq	= ipic_get_irq,
 	.restart	= mpc83xx_restart,
 	.time_init	= mpc83xx_time_init,
diff --git a/arch/powerpc/platforms/83xx/mpc836x_rdk.c b/arch/powerpc/platforms/83xx/mpc836x_rdk.c
index b0090aa..f8769d7 100644
--- a/arch/powerpc/platforms/83xx/mpc836x_rdk.c
+++ b/arch/powerpc/platforms/83xx/mpc836x_rdk.c
@@ -27,61 +27,19 @@
 
 #include "mpc83xx.h"
 
-static struct of_device_id __initdata mpc836x_rdk_ids[] = {
-	{ .compatible = "simple-bus", },
-	{},
-};
-
-static int __init mpc836x_rdk_declare_of_platform_devices(void)
-{
-	return of_platform_bus_probe(NULL, mpc836x_rdk_ids, NULL);
-}
-machine_device_initcall(mpc836x_rdk, mpc836x_rdk_declare_of_platform_devices);
+machine_device_initcall(mpc836x_rdk, mpc83xx_declare_of_platform_devices);
 
 static void __init mpc836x_rdk_setup_arch(void)
 {
-#ifdef CONFIG_PCI
-	struct device_node *np;
-#endif
-
 	if (ppc_md.progress)
 		ppc_md.progress("mpc836x_rdk_setup_arch()", 0);
 
-#ifdef CONFIG_PCI
-	for_each_compatible_node(np, "pci", "fsl,mpc8349-pci")
-		mpc83xx_add_bridge(np);
-#endif
+	mpc83xx_setup_pci();
 #ifdef CONFIG_QUICC_ENGINE
 	qe_reset();
 #endif
 }
 
-static void __init mpc836x_rdk_init_IRQ(void)
-{
-	struct device_node *np;
-
-	np = of_find_compatible_node(NULL, NULL, "fsl,ipic");
-	if (!np)
-		return;
-
-	ipic_init(np, 0);
-
-	/*
-	 * Initialize the default interrupt mapping priorities,
-	 * in case the boot rom changed something on us.
-	 */
-	ipic_set_default_priority();
-	of_node_put(np);
-#ifdef CONFIG_QUICC_ENGINE
-	np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic");
-	if (!np)
-		return;
-
-	qe_ic_init(np, 0, qe_ic_cascade_low_ipic, qe_ic_cascade_high_ipic);
-	of_node_put(np);
-#endif
-}
-
 /*
  * Called very early, MMU is off, device-tree isn't unflattened.
  */
@@ -96,7 +54,7 @@
 	.name		= "MPC836x RDK",
 	.probe		= mpc836x_rdk_probe,
 	.setup_arch	= mpc836x_rdk_setup_arch,
-	.init_IRQ	= mpc836x_rdk_init_IRQ,
+	.init_IRQ	= mpc83xx_ipic_and_qe_init_IRQ,
 	.get_irq	= ipic_get_irq,
 	.restart	= mpc83xx_restart,
 	.time_init	= mpc83xx_time_init,
diff --git a/arch/powerpc/platforms/83xx/mpc837x_mds.c b/arch/powerpc/platforms/83xx/mpc837x_mds.c
index 8306832..e53a60b 100644
--- a/arch/powerpc/platforms/83xx/mpc837x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc837x_mds.c
@@ -79,54 +79,14 @@
  */
 static void __init mpc837x_mds_setup_arch(void)
 {
-#ifdef CONFIG_PCI
-	struct device_node *np;
-#endif
-
 	if (ppc_md.progress)
 		ppc_md.progress("mpc837x_mds_setup_arch()", 0);
 
-#ifdef CONFIG_PCI
-	for_each_compatible_node(np, "pci", "fsl,mpc8349-pci")
-		mpc83xx_add_bridge(np);
-	for_each_compatible_node(np, "pci", "fsl,mpc8314-pcie")
-		mpc83xx_add_bridge(np);
-#endif
+	mpc83xx_setup_pci();
 	mpc837xmds_usb_cfg();
 }
 
-static struct of_device_id mpc837x_ids[] = {
-	{ .type = "soc", },
-	{ .compatible = "soc", },
-	{ .compatible = "simple-bus", },
-	{ .compatible = "gianfar", },
-	{},
-};
-
-static int __init mpc837x_declare_of_platform_devices(void)
-{
-	/* Publish platform_device */
-	of_platform_bus_probe(NULL, mpc837x_ids, NULL);
-
-	return 0;
-}
-machine_device_initcall(mpc837x_mds, mpc837x_declare_of_platform_devices);
-
-static void __init mpc837x_mds_init_IRQ(void)
-{
-	struct device_node *np;
-
-	np = of_find_compatible_node(NULL, NULL, "fsl,ipic");
-	if (!np)
-		return;
-
-	ipic_init(np, 0);
-
-	/* Initialize the default interrupt mapping priorities,
-	 * in case the boot rom changed something on us.
-	 */
-	ipic_set_default_priority();
-}
+machine_device_initcall(mpc837x_mds, mpc83xx_declare_of_platform_devices);
 
 /*
  * Called very early, MMU is off, device-tree isn't unflattened
@@ -142,7 +102,7 @@
 	.name			= "MPC837x MDS",
 	.probe			= mpc837x_mds_probe,
 	.setup_arch		= mpc837x_mds_setup_arch,
-	.init_IRQ		= mpc837x_mds_init_IRQ,
+	.init_IRQ		= mpc83xx_ipic_init_IRQ,
 	.get_irq		= ipic_get_irq,
 	.restart		= mpc83xx_restart,
 	.time_init		= mpc83xx_time_init,
diff --git a/arch/powerpc/platforms/83xx/mpc837x_rdb.c b/arch/powerpc/platforms/83xx/mpc837x_rdb.c
index 7bafbf2..16c9c9c 100644
--- a/arch/powerpc/platforms/83xx/mpc837x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc837x_rdb.c
@@ -50,56 +50,15 @@
  */
 static void __init mpc837x_rdb_setup_arch(void)
 {
-#ifdef CONFIG_PCI
-	struct device_node *np;
-#endif
-
 	if (ppc_md.progress)
 		ppc_md.progress("mpc837x_rdb_setup_arch()", 0);
 
-#ifdef CONFIG_PCI
-	for_each_compatible_node(np, "pci", "fsl,mpc8349-pci")
-		mpc83xx_add_bridge(np);
-	for_each_compatible_node(np, "pci", "fsl,mpc8314-pcie")
-		mpc83xx_add_bridge(np);
-#endif
+	mpc83xx_setup_pci();
 	mpc837x_usb_cfg();
 	mpc837x_rdb_sd_cfg();
 }
 
-static struct of_device_id mpc837x_ids[] = {
-	{ .type = "soc", },
-	{ .compatible = "soc", },
-	{ .compatible = "simple-bus", },
-	{ .compatible = "gianfar", },
-	{ .compatible = "gpio-leds", },
-	{},
-};
-
-static int __init mpc837x_declare_of_platform_devices(void)
-{
-	/* Publish platform_device */
-	of_platform_bus_probe(NULL, mpc837x_ids, NULL);
-
-	return 0;
-}
-machine_device_initcall(mpc837x_rdb, mpc837x_declare_of_platform_devices);
-
-static void __init mpc837x_rdb_init_IRQ(void)
-{
-	struct device_node *np;
-
-	np = of_find_compatible_node(NULL, NULL, "fsl,ipic");
-	if (!np)
-		return;
-
-	ipic_init(np, 0);
-
-	/* Initialize the default interrupt mapping priorities,
-	 * in case the boot rom changed something on us.
-	 */
-	ipic_set_default_priority();
-}
+machine_device_initcall(mpc837x_rdb, mpc83xx_declare_of_platform_devices);
 
 static const char *board[] __initdata = {
 	"fsl,mpc8377rdb",
@@ -121,7 +80,7 @@
 	.name			= "MPC837x RDB/WLAN",
 	.probe			= mpc837x_rdb_probe,
 	.setup_arch		= mpc837x_rdb_setup_arch,
-	.init_IRQ		= mpc837x_rdb_init_IRQ,
+	.init_IRQ		= mpc83xx_ipic_init_IRQ,
 	.get_irq		= ipic_get_irq,
 	.restart		= mpc83xx_restart,
 	.time_init		= mpc83xx_time_init,
diff --git a/arch/powerpc/platforms/83xx/mpc83xx.h b/arch/powerpc/platforms/83xx/mpc83xx.h
index 82a4345..0cf74d7 100644
--- a/arch/powerpc/platforms/83xx/mpc83xx.h
+++ b/arch/powerpc/platforms/83xx/mpc83xx.h
@@ -70,5 +70,21 @@
 extern int mpc837x_usb_cfg(void);
 extern int mpc834x_usb_cfg(void);
 extern int mpc831x_usb_cfg(void);
+extern void mpc83xx_ipic_init_IRQ(void);
+#ifdef CONFIG_QUICC_ENGINE
+extern void mpc83xx_qe_init_IRQ(void);
+extern void mpc83xx_ipic_and_qe_init_IRQ(void);
+#else
+static inline void __init mpc83xx_qe_init_IRQ(void) {}
+#define mpc83xx_ipic_and_qe_init_IRQ mpc83xx_ipic_init_IRQ
+#endif /* CONFIG_QUICC_ENGINE */
+
+#ifdef CONFIG_PCI
+extern void mpc83xx_setup_pci(void);
+#else
+#define mpc83xx_setup_pci()	do {} while (0)
+#endif
+
+extern int mpc83xx_declare_of_platform_devices(void);
 
 #endif				/* __MPC83XX_H__ */
diff --git a/arch/powerpc/platforms/83xx/sbc834x.c b/arch/powerpc/platforms/83xx/sbc834x.c
index af41d8c..8a81d76 100644
--- a/arch/powerpc/platforms/83xx/sbc834x.c
+++ b/arch/powerpc/platforms/83xx/sbc834x.c
@@ -48,52 +48,13 @@
  */
 static void __init sbc834x_setup_arch(void)
 {
-#ifdef CONFIG_PCI
-	struct device_node *np;
-#endif
-
 	if (ppc_md.progress)
 		ppc_md.progress("sbc834x_setup_arch()", 0);
 
-#ifdef CONFIG_PCI
-	for_each_compatible_node(np, "pci", "fsl,mpc8349-pci")
-		mpc83xx_add_bridge(np);
-#endif
-
+	mpc83xx_setup_pci();
 }
 
-static void __init sbc834x_init_IRQ(void)
-{
-	struct device_node *np;
-
-	np = of_find_node_by_type(NULL, "ipic");
-	if (!np)
-		return;
-
-	ipic_init(np, 0);
-
-	/* Initialize the default interrupt mapping priorities,
-	 * in case the boot rom changed something on us.
-	 */
-	ipic_set_default_priority();
-
-	of_node_put(np);
-}
-
-static struct __initdata of_device_id sbc834x_ids[] = {
-	{ .type = "soc", },
-	{ .compatible = "soc", },
-	{ .compatible = "simple-bus", },
-	{ .compatible = "gianfar", },
-	{},
-};
-
-static int __init sbc834x_declare_of_platform_devices(void)
-{
-	of_platform_bus_probe(NULL, sbc834x_ids, NULL);
-	return 0;
-}
-machine_device_initcall(sbc834x, sbc834x_declare_of_platform_devices);
+machine_device_initcall(sbc834x, mpc83xx_declare_of_platform_devices);
 
 /*
  * Called very early, MMU is off, device-tree isn't unflattened
@@ -102,14 +63,14 @@
 {
 	unsigned long root = of_get_flat_dt_root();
 
-	return of_flat_dt_is_compatible(root, "SBC834x");
+	return of_flat_dt_is_compatible(root, "SBC834xE");
 }
 
 define_machine(sbc834x) {
-	.name			= "SBC834x",
+	.name			= "SBC834xE",
 	.probe			= sbc834x_probe,
 	.setup_arch		= sbc834x_setup_arch,
-	.init_IRQ		= sbc834x_init_IRQ,
+	.init_IRQ		= mpc83xx_ipic_init_IRQ,
 	.get_irq		= ipic_get_irq,
 	.restart		= mpc83xx_restart,
 	.time_init		= mpc83xx_time_init,
diff --git a/arch/powerpc/platforms/85xx/Makefile b/arch/powerpc/platforms/85xx/Makefile
index bc5acb9..9cb2d43 100644
--- a/arch/powerpc/platforms/85xx/Makefile
+++ b/arch/powerpc/platforms/85xx/Makefile
@@ -3,6 +3,8 @@
 #
 obj-$(CONFIG_SMP) += smp.o
 
+obj-y += common.o
+
 obj-$(CONFIG_MPC8540_ADS) += mpc85xx_ads.o
 obj-$(CONFIG_MPC8560_ADS) += mpc85xx_ads.o
 obj-$(CONFIG_MPC85xx_CDS) += mpc85xx_cds.o
diff --git a/arch/powerpc/platforms/85xx/common.c b/arch/powerpc/platforms/85xx/common.c
new file mode 100644
index 0000000..9fef530
--- /dev/null
+++ b/arch/powerpc/platforms/85xx/common.c
@@ -0,0 +1,66 @@
+/*
+ * Routines common to most mpc85xx-based boards.
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/of_platform.h>
+
+#include <sysdev/cpm2_pic.h>
+
+#include "mpc85xx.h"
+
+static struct of_device_id __initdata mpc85xx_common_ids[] = {
+	{ .type = "soc", },
+	{ .compatible = "soc", },
+	{ .compatible = "simple-bus", },
+	{ .name = "cpm", },
+	{ .name = "localbus", },
+	{ .compatible = "gianfar", },
+	{ .compatible = "fsl,qe", },
+	{ .compatible = "fsl,cpm2", },
+	{ .compatible = "fsl,srio", },
+	{},
+};
+
+int __init mpc85xx_common_publish_devices(void)
+{
+	return of_platform_bus_probe(NULL, mpc85xx_common_ids, NULL);
+}
+#ifdef CONFIG_CPM2
+static void cpm2_cascade(unsigned int irq, struct irq_desc *desc)
+{
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	int cascade_irq;
+
+	while ((cascade_irq = cpm2_get_irq()) >= 0)
+		generic_handle_irq(cascade_irq);
+
+	chip->irq_eoi(&desc->irq_data);
+}
+
+
+void __init mpc85xx_cpm2_pic_init(void)
+{
+	struct device_node *np;
+	int irq;
+
+	/* Setup CPM2 PIC */
+	np = of_find_compatible_node(NULL, NULL, "fsl,cpm2-pic");
+	if (np == NULL) {
+		printk(KERN_ERR "PIC init: can not find fsl,cpm2-pic node\n");
+		return;
+	}
+	irq = irq_of_parse_and_map(np, 0);
+	if (irq == NO_IRQ) {
+		of_node_put(np);
+		printk(KERN_ERR "PIC init: got no IRQ for cpm cascade\n");
+		return;
+	}
+
+	cpm2_pic_init(np);
+	of_node_put(np);
+	irq_set_chained_handler(irq, cpm2_cascade);
+}
+#endif
diff --git a/arch/powerpc/platforms/85xx/corenet_ds.c b/arch/powerpc/platforms/85xx/corenet_ds.c
index 802ad11..07e3e6c 100644
--- a/arch/powerpc/platforms/85xx/corenet_ds.c
+++ b/arch/powerpc/platforms/85xx/corenet_ds.c
@@ -31,32 +31,18 @@
 #include <linux/of_platform.h>
 #include <sysdev/fsl_soc.h>
 #include <sysdev/fsl_pci.h>
+#include "smp.h"
 
 void __init corenet_ds_pic_init(void)
 {
 	struct mpic *mpic;
-	struct resource r;
-	struct device_node *np = NULL;
-	unsigned int flags = MPIC_PRIMARY | MPIC_BIG_ENDIAN |
+	unsigned int flags = MPIC_BIG_ENDIAN |
 				MPIC_BROKEN_FRR_NIRQS | MPIC_SINGLE_DEST_CPU;
 
-	np = of_find_node_by_type(np, "open-pic");
-
-	if (np == NULL) {
-		printk(KERN_ERR "Could not find open-pic node\n");
-		return;
-	}
-
-	if (of_address_to_resource(np, 0, &r)) {
-		printk(KERN_ERR "Failed to map mpic register space\n");
-		of_node_put(np);
-		return;
-	}
-
 	if (ppc_md.get_irq == mpic_get_coreint_irq)
 		flags |= MPIC_ENABLE_COREINT;
 
-	mpic = mpic_alloc(np, r.start, flags, 0, 256, " OpenPIC  ");
+	mpic = mpic_alloc(NULL, 0, flags, 0, 256, " OpenPIC  ");
 	BUG_ON(mpic == NULL);
 
 	mpic_init(mpic);
@@ -65,10 +51,6 @@
 /*
  * Setup the architecture
  */
-#ifdef CONFIG_SMP
-void __init mpc85xx_smp_init(void);
-#endif
-
 void __init corenet_ds_setup_arch(void)
 {
 #ifdef CONFIG_PCI
@@ -77,9 +59,7 @@
 #endif
 	dma_addr_t max = 0xffffffff;
 
-#ifdef CONFIG_SMP
 	mpc85xx_smp_init();
-#endif
 
 #ifdef CONFIG_PCI
 	for_each_node_by_type(np, "pci") {
@@ -112,7 +92,7 @@
 		.compatible	= "simple-bus"
 	},
 	{
-		.compatible	= "fsl,rapidio-delta",
+		.compatible	= "fsl,srio",
 	},
 	{
 		.compatible	= "fsl,p4080-pcie",
diff --git a/arch/powerpc/platforms/85xx/ksi8560.c b/arch/powerpc/platforms/85xx/ksi8560.c
index c46f935..20f75d7 100644
--- a/arch/powerpc/platforms/85xx/ksi8560.c
+++ b/arch/powerpc/platforms/85xx/ksi8560.c
@@ -35,6 +35,7 @@
 #include <asm/cpm2.h>
 #include <sysdev/cpm2_pic.h>
 
+#include "mpc85xx.h"
 
 #define KSI8560_CPLD_HVR		0x04 /* Hardware Version Register */
 #define KSI8560_CPLD_PVR		0x08 /* PLD Version Register */
@@ -54,60 +55,15 @@
 	for (;;);
 }
 
-static void cpm2_cascade(unsigned int irq, struct irq_desc *desc)
-{
-	struct irq_chip *chip = irq_desc_get_chip(desc);
-	int cascade_irq;
-
-	while ((cascade_irq = cpm2_get_irq()) >= 0)
-		generic_handle_irq(cascade_irq);
-
-	chip->irq_eoi(&desc->irq_data);
-}
-
 static void __init ksi8560_pic_init(void)
 {
-	struct mpic *mpic;
-	struct resource r;
-	struct device_node *np;
-#ifdef CONFIG_CPM2
-	int irq;
-#endif
-
-	np = of_find_node_by_type(NULL, "open-pic");
-
-	if (np == NULL) {
-		printk(KERN_ERR "Could not find open-pic node\n");
-		return;
-	}
-
-	if (of_address_to_resource(np, 0, &r)) {
-		printk(KERN_ERR "Could not map mpic register space\n");
-		of_node_put(np);
-		return;
-	}
-
-	mpic = mpic_alloc(np, r.start,
-			MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
+	struct mpic *mpic = mpic_alloc(NULL, 0,
+			MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
 			0, 256, " OpenPIC  ");
 	BUG_ON(mpic == NULL);
-	of_node_put(np);
-
 	mpic_init(mpic);
 
-#ifdef CONFIG_CPM2
-	/* Setup CPM2 PIC */
-	np = of_find_compatible_node(NULL, NULL, "fsl,cpm2-pic");
-	if (np == NULL) {
-		printk(KERN_ERR "PIC init: can not find fsl,cpm2-pic node\n");
-		return;
-	}
-	irq = irq_of_parse_and_map(np, 0);
-
-	cpm2_pic_init(np);
-	of_node_put(np);
-	irq_set_chained_handler(irq, cpm2_cascade);
-#endif
+	mpc85xx_cpm2_pic_init();
 }
 
 #ifdef CONFIG_CPM2
@@ -215,22 +171,7 @@
 	seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
 }
 
-static struct of_device_id __initdata of_bus_ids[] = {
-	{ .type = "soc", },
-	{ .type = "simple-bus", },
-	{ .name = "cpm", },
-	{ .name = "localbus", },
-	{ .compatible = "gianfar", },
-	{},
-};
-
-static int __init declare_of_platform_devices(void)
-{
-	of_platform_bus_probe(NULL, of_bus_ids, NULL);
-
-	return 0;
-}
-machine_device_initcall(ksi8560, declare_of_platform_devices);
+machine_device_initcall(ksi8560, mpc85xx_common_publish_devices);
 
 /*
  * Called very early, device-tree isn't unflattened
diff --git a/arch/powerpc/platforms/85xx/mpc8536_ds.c b/arch/powerpc/platforms/85xx/mpc8536_ds.c
index f79f2f1..cf26682 100644
--- a/arch/powerpc/platforms/85xx/mpc8536_ds.c
+++ b/arch/powerpc/platforms/85xx/mpc8536_ds.c
@@ -32,31 +32,15 @@
 #include <sysdev/fsl_soc.h>
 #include <sysdev/fsl_pci.h>
 
+#include "mpc85xx.h"
+
 void __init mpc8536_ds_pic_init(void)
 {
-	struct mpic *mpic;
-	struct resource r;
-	struct device_node *np;
-
-	np = of_find_node_by_type(NULL, "open-pic");
-	if (np == NULL) {
-		printk(KERN_ERR "Could not find open-pic node\n");
-		return;
-	}
-
-	if (of_address_to_resource(np, 0, &r)) {
-		printk(KERN_ERR "Failed to map mpic register space\n");
-		of_node_put(np);
-		return;
-	}
-
-	mpic = mpic_alloc(np, r.start,
-			  MPIC_PRIMARY | MPIC_WANTS_RESET |
+	struct mpic *mpic = mpic_alloc(NULL, 0,
+			  MPIC_WANTS_RESET |
 			  MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS,
 			0, 256, " OpenPIC  ");
 	BUG_ON(mpic == NULL);
-	of_node_put(np);
-
 	mpic_init(mpic);
 }
 
@@ -104,19 +88,7 @@
 	printk("MPC8536 DS board from Freescale Semiconductor\n");
 }
 
-static struct of_device_id __initdata mpc8536_ds_ids[] = {
-	{ .type = "soc", },
-	{ .compatible = "soc", },
-	{ .compatible = "simple-bus", },
-	{ .compatible = "gianfar", },
-	{},
-};
-
-static int __init mpc8536_ds_publish_devices(void)
-{
-	return of_platform_bus_probe(NULL, mpc8536_ds_ids, NULL);
-}
-machine_device_initcall(mpc8536_ds, mpc8536_ds_publish_devices);
+machine_device_initcall(mpc8536_ds, mpc85xx_common_publish_devices);
 
 machine_arch_initcall(mpc8536_ds, swiotlb_setup_bus_notifier);
 
diff --git a/arch/powerpc/platforms/85xx/mpc85xx.h b/arch/powerpc/platforms/85xx/mpc85xx.h
new file mode 100644
index 0000000..2aa7c5d
--- /dev/null
+++ b/arch/powerpc/platforms/85xx/mpc85xx.h
@@ -0,0 +1,11 @@
+#ifndef MPC85xx_H
+#define MPC85xx_H
+extern int mpc85xx_common_publish_devices(void);
+
+#ifdef CONFIG_CPM2
+extern void mpc85xx_cpm2_pic_init(void);
+#else
+static inline void __init mpc85xx_cpm2_pic_init(void) {}
+#endif /* CONFIG_CPM2 */
+
+#endif
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ads.c b/arch/powerpc/platforms/85xx/mpc85xx_ads.c
index 3b2c9bb..3bebb51 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_ads.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_ads.c
@@ -35,6 +35,8 @@
 #include <sysdev/cpm2_pic.h>
 #endif
 
+#include "mpc85xx.h"
+
 #ifdef CONFIG_PCI
 static int mpc85xx_exclude_device(struct pci_controller *hose,
 				   u_char bus, u_char devfn)
@@ -46,63 +48,15 @@
 }
 #endif /* CONFIG_PCI */
 
-#ifdef CONFIG_CPM2
-
-static void cpm2_cascade(unsigned int irq, struct irq_desc *desc)
-{
-	struct irq_chip *chip = irq_desc_get_chip(desc);
-	int cascade_irq;
-
-	while ((cascade_irq = cpm2_get_irq()) >= 0)
-		generic_handle_irq(cascade_irq);
-
-	chip->irq_eoi(&desc->irq_data);
-}
-
-#endif /* CONFIG_CPM2 */
-
 static void __init mpc85xx_ads_pic_init(void)
 {
-	struct mpic *mpic;
-	struct resource r;
-	struct device_node *np = NULL;
-#ifdef CONFIG_CPM2
-	int irq;
-#endif
-
-	np = of_find_node_by_type(np, "open-pic");
-	if (!np) {
-		printk(KERN_ERR "Could not find open-pic node\n");
-		return;
-	}
-
-	if (of_address_to_resource(np, 0, &r)) {
-		printk(KERN_ERR "Could not map mpic register space\n");
-		of_node_put(np);
-		return;
-	}
-
-	mpic = mpic_alloc(np, r.start,
-			MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
+	struct mpic *mpic = mpic_alloc(NULL, 0,
+			MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
 			0, 256, " OpenPIC  ");
 	BUG_ON(mpic == NULL);
-	of_node_put(np);
-
 	mpic_init(mpic);
 
-#ifdef CONFIG_CPM2
-	/* Setup CPM2 PIC */
-	np = of_find_compatible_node(NULL, NULL, "fsl,cpm2-pic");
-	if (np == NULL) {
-		printk(KERN_ERR "PIC init: can not find fsl,cpm2-pic node\n");
-		return;
-	}
-	irq = irq_of_parse_and_map(np, 0);
-
-	cpm2_pic_init(np);
-	of_node_put(np);
-	irq_set_chained_handler(irq, cpm2_cascade);
-#endif
+	mpc85xx_cpm2_pic_init();
 }
 
 /*
@@ -221,23 +175,7 @@
 	seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
 }
 
-static struct of_device_id __initdata of_bus_ids[] = {
-	{ .name = "soc", },
-	{ .type = "soc", },
-	{ .name = "cpm", },
-	{ .name = "localbus", },
-	{ .compatible = "simple-bus", },
-	{ .compatible = "gianfar", },
-	{},
-};
-
-static int __init declare_of_platform_devices(void)
-{
-	of_platform_bus_probe(NULL, of_bus_ids, NULL);
-
-	return 0;
-}
-machine_device_initcall(mpc85xx_ads, declare_of_platform_devices);
+machine_device_initcall(mpc85xx_ads, mpc85xx_common_publish_devices);
 
 /*
  * Called very early, device-tree isn't unflattened
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_cds.c b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
index 66cb8d6..40f03da 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_cds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
@@ -46,6 +46,8 @@
 #include <sysdev/fsl_soc.h>
 #include <sysdev/fsl_pci.h>
 
+#include "mpc85xx.h"
+
 /* CADMUS info */
 /* xxx - galak, move into device tree */
 #define CADMUS_BASE (0xf8004000)
@@ -177,7 +179,7 @@
 
 static struct irqaction mpc85xxcds_8259_irqaction = {
 	.handler = mpc85xx_8259_cascade_action,
-	.flags = IRQF_SHARED,
+	.flags = IRQF_SHARED | IRQF_NO_THREAD,
 	.name = "8259 cascade",
 };
 #endif /* PPC_I8259 */
@@ -186,30 +188,10 @@
 static void __init mpc85xx_cds_pic_init(void)
 {
 	struct mpic *mpic;
-	struct resource r;
-	struct device_node *np = NULL;
-
-	np = of_find_node_by_type(np, "open-pic");
-
-	if (np == NULL) {
-		printk(KERN_ERR "Could not find open-pic node\n");
-		return;
-	}
-
-	if (of_address_to_resource(np, 0, &r)) {
-		printk(KERN_ERR "Failed to map mpic register space\n");
-		of_node_put(np);
-		return;
-	}
-
-	mpic = mpic_alloc(np, r.start,
-			MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
+	mpic = mpic_alloc(NULL, 0,
+			MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
 			0, 256, " OpenPIC  ");
 	BUG_ON(mpic == NULL);
-
-	/* Return the mpic node */
-	of_node_put(np);
-
 	mpic_init(mpic);
 }
 
@@ -330,19 +312,7 @@
         return of_flat_dt_is_compatible(root, "MPC85xxCDS");
 }
 
-static struct of_device_id __initdata of_bus_ids[] = {
-	{ .type = "soc", },
-	{ .compatible = "soc", },
-	{ .compatible = "simple-bus", },
-	{ .compatible = "gianfar", },
-	{},
-};
-
-static int __init declare_of_platform_devices(void)
-{
-	return of_platform_bus_probe(NULL, of_bus_ids, NULL);
-}
-machine_device_initcall(mpc85xx_cds, declare_of_platform_devices);
+machine_device_initcall(mpc85xx_cds, mpc85xx_common_publish_devices);
 
 define_machine(mpc85xx_cds) {
 	.name		= "MPC85xx CDS",
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ds.c b/arch/powerpc/platforms/85xx/mpc85xx_ds.c
index 1b9a8cf..eefbb91 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_ds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_ds.c
@@ -35,6 +35,9 @@
 
 #include <sysdev/fsl_soc.h>
 #include <sysdev/fsl_pci.h>
+#include "smp.h"
+
+#include "mpc85xx.h"
 
 #undef DEBUG
 
@@ -60,43 +63,27 @@
 void __init mpc85xx_ds_pic_init(void)
 {
 	struct mpic *mpic;
-	struct resource r;
-	struct device_node *np;
 #ifdef CONFIG_PPC_I8259
+	struct device_node *np;
 	struct device_node *cascade_node = NULL;
 	int cascade_irq;
 #endif
 	unsigned long root = of_get_flat_dt_root();
 
-	np = of_find_node_by_type(NULL, "open-pic");
-	if (np == NULL) {
-		printk(KERN_ERR "Could not find open-pic node\n");
-		return;
-	}
-
-	if (of_address_to_resource(np, 0, &r)) {
-		printk(KERN_ERR "Failed to map mpic register space\n");
-		of_node_put(np);
-		return;
-	}
-
 	if (of_flat_dt_is_compatible(root, "fsl,MPC8572DS-CAMP")) {
-		mpic = mpic_alloc(np, r.start,
-			MPIC_PRIMARY |
+		mpic = mpic_alloc(NULL, 0,
 			MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS |
 			MPIC_SINGLE_DEST_CPU,
 			0, 256, " OpenPIC  ");
 	} else {
-		mpic = mpic_alloc(np, r.start,
-			  MPIC_PRIMARY | MPIC_WANTS_RESET |
+		mpic = mpic_alloc(NULL, 0,
+			  MPIC_WANTS_RESET |
 			  MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS |
 			  MPIC_SINGLE_DEST_CPU,
 			0, 256, " OpenPIC  ");
 	}
 
 	BUG_ON(mpic == NULL);
-	of_node_put(np);
-
 	mpic_init(mpic);
 
 #ifdef CONFIG_PPC_I8259
@@ -152,9 +139,6 @@
 /*
  * Setup the architecture
  */
-#ifdef CONFIG_SMP
-extern void __init mpc85xx_smp_init(void);
-#endif
 static void __init mpc85xx_ds_setup_arch(void)
 {
 #ifdef CONFIG_PCI
@@ -187,9 +171,7 @@
 	ppc_md.pci_exclude_device = mpc85xx_exclude_device;
 #endif
 
-#ifdef CONFIG_SMP
 	mpc85xx_smp_init();
-#endif
 
 #ifdef CONFIG_SWIOTLB
 	if (memblock_end_of_DRAM() > max) {
@@ -219,21 +201,9 @@
 	return 0;
 }
 
-static struct of_device_id __initdata mpc85xxds_ids[] = {
-	{ .type = "soc", },
-	{ .compatible = "soc", },
-	{ .compatible = "simple-bus", },
-	{ .compatible = "gianfar", },
-	{},
-};
-
-static int __init mpc85xxds_publish_devices(void)
-{
-	return of_platform_bus_probe(NULL, mpc85xxds_ids, NULL);
-}
-machine_device_initcall(mpc8544_ds, mpc85xxds_publish_devices);
-machine_device_initcall(mpc8572_ds, mpc85xxds_publish_devices);
-machine_device_initcall(p2020_ds, mpc85xxds_publish_devices);
+machine_device_initcall(mpc8544_ds, mpc85xx_common_publish_devices);
+machine_device_initcall(mpc8572_ds, mpc85xx_common_publish_devices);
+machine_device_initcall(p2020_ds, mpc85xx_common_publish_devices);
 
 machine_arch_initcall(mpc8544_ds, swiotlb_setup_bus_notifier);
 machine_arch_initcall(mpc8572_ds, swiotlb_setup_bus_notifier);
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
index a23a3ff..1d15a0c 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
@@ -51,6 +51,9 @@
 #include <asm/qe_ic.h>
 #include <asm/mpic.h>
 #include <asm/swiotlb.h>
+#include "smp.h"
+
+#include "mpc85xx.h"
 
 #undef DEBUG
 #ifdef DEBUG
@@ -153,30 +156,7 @@
  * Setup the architecture
  *
  */
-#ifdef CONFIG_SMP
-extern void __init mpc85xx_smp_init(void);
-#endif
-
 #ifdef CONFIG_QUICC_ENGINE
-static struct of_device_id mpc85xx_qe_ids[] __initdata = {
-	{ .type = "qe", },
-	{ .compatible = "fsl,qe", },
-	{ },
-};
-
-static void __init mpc85xx_publish_qe_devices(void)
-{
-	struct device_node *np;
-
-	np = of_find_compatible_node(NULL, NULL, "fsl,qe");
-	if (!of_device_is_available(np)) {
-		of_node_put(np);
-		return;
-	}
-
-	of_platform_bus_probe(NULL, mpc85xx_qe_ids, NULL);
-}
-
 static void __init mpc85xx_mds_reset_ucc_phys(void)
 {
 	struct device_node *np;
@@ -347,7 +327,6 @@
 	of_node_put(np);
 }
 #else
-static void __init mpc85xx_publish_qe_devices(void) { }
 static void __init mpc85xx_mds_qe_init(void) { }
 static void __init mpc85xx_mds_qeic_init(void) { }
 #endif	/* CONFIG_QUICC_ENGINE */
@@ -381,9 +360,7 @@
 	}
 #endif
 
-#ifdef CONFIG_SMP
 	mpc85xx_smp_init();
-#endif
 
 	mpc85xx_mds_qe_init();
 
@@ -429,24 +406,11 @@
 machine_arch_initcall(mpc8569_mds, board_fixups);
 
 static struct of_device_id mpc85xx_ids[] = {
-	{ .type = "soc", },
-	{ .compatible = "soc", },
-	{ .compatible = "simple-bus", },
-	{ .compatible = "gianfar", },
-	{ .compatible = "fsl,rapidio-delta", },
 	{ .compatible = "fsl,mpc8548-guts", },
 	{ .compatible = "gpio-leds", },
 	{},
 };
 
-static struct of_device_id p1021_ids[] = {
-	{ .type = "soc", },
-	{ .compatible = "soc", },
-	{ .compatible = "simple-bus", },
-	{ .compatible = "gianfar", },
-	{},
-};
-
 static int __init mpc85xx_publish_devices(void)
 {
 	if (machine_is(mpc8568_mds))
@@ -454,23 +418,15 @@
 	if (machine_is(mpc8569_mds))
 		simple_gpiochip_init("fsl,mpc8569mds-bcsr-gpio");
 
+	mpc85xx_common_publish_devices();
 	of_platform_bus_probe(NULL, mpc85xx_ids, NULL);
-	mpc85xx_publish_qe_devices();
-
-	return 0;
-}
-
-static int __init p1021_publish_devices(void)
-{
-	of_platform_bus_probe(NULL, p1021_ids, NULL);
-	mpc85xx_publish_qe_devices();
 
 	return 0;
 }
 
 machine_device_initcall(mpc8568_mds, mpc85xx_publish_devices);
 machine_device_initcall(mpc8569_mds, mpc85xx_publish_devices);
-machine_device_initcall(p1021_mds, p1021_publish_devices);
+machine_device_initcall(p1021_mds, mpc85xx_common_publish_devices);
 
 machine_arch_initcall(mpc8568_mds, swiotlb_setup_bus_notifier);
 machine_arch_initcall(mpc8569_mds, swiotlb_setup_bus_notifier);
@@ -478,26 +434,11 @@
 
 static void __init mpc85xx_mds_pic_init(void)
 {
-	struct mpic *mpic;
-	struct resource r;
-	struct device_node *np = NULL;
-
-	np = of_find_node_by_type(NULL, "open-pic");
-	if (!np)
-		return;
-
-	if (of_address_to_resource(np, 0, &r)) {
-		printk(KERN_ERR "Failed to map mpic register space\n");
-		of_node_put(np);
-		return;
-	}
-
-	mpic = mpic_alloc(np, r.start,
-			MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN |
+	struct mpic *mpic = mpic_alloc(NULL, 0,
+			MPIC_WANTS_RESET | MPIC_BIG_ENDIAN |
 			MPIC_BROKEN_FRR_NIRQS | MPIC_SINGLE_DEST_CPU,
 			0, 256, " OpenPIC  ");
 	BUG_ON(mpic == NULL);
-	of_node_put(np);
 
 	mpic_init(mpic);
 	mpc85xx_mds_qeic_init();
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
index f5ff911..ccf520e 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
@@ -29,6 +29,9 @@
 
 #include <sysdev/fsl_soc.h>
 #include <sysdev/fsl_pci.h>
+#include "smp.h"
+
+#include "mpc85xx.h"
 
 #undef DEBUG
 
@@ -42,49 +45,28 @@
 void __init mpc85xx_rdb_pic_init(void)
 {
 	struct mpic *mpic;
-	struct resource r;
-	struct device_node *np;
 	unsigned long root = of_get_flat_dt_root();
 
-	np = of_find_node_by_type(NULL, "open-pic");
-	if (np == NULL) {
-		printk(KERN_ERR "Could not find open-pic node\n");
-		return;
-	}
-
-	if (of_address_to_resource(np, 0, &r)) {
-		printk(KERN_ERR "Failed to map mpic register space\n");
-		of_node_put(np);
-		return;
-	}
-
 	if (of_flat_dt_is_compatible(root, "fsl,MPC85XXRDB-CAMP")) {
-		mpic = mpic_alloc(np, r.start,
-			MPIC_PRIMARY |
+		mpic = mpic_alloc(NULL, 0,
 			MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS |
 			MPIC_SINGLE_DEST_CPU,
 			0, 256, " OpenPIC  ");
 	} else {
-		mpic = mpic_alloc(np, r.start,
-		  MPIC_PRIMARY | MPIC_WANTS_RESET |
+		mpic = mpic_alloc(NULL, 0,
+		  MPIC_WANTS_RESET |
 		  MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS |
 		  MPIC_SINGLE_DEST_CPU,
 		  0, 256, " OpenPIC  ");
 	}
 
 	BUG_ON(mpic == NULL);
-	of_node_put(np);
-
 	mpic_init(mpic);
-
 }
 
 /*
  * Setup the architecture
  */
-#ifdef CONFIG_SMP
-extern void __init mpc85xx_smp_init(void);
-#endif
 static void __init mpc85xx_rdb_setup_arch(void)
 {
 #ifdef CONFIG_PCI
@@ -102,27 +84,12 @@
 
 #endif
 
-#ifdef CONFIG_SMP
 	mpc85xx_smp_init();
-#endif
-
 	printk(KERN_INFO "MPC85xx RDB board from Freescale Semiconductor\n");
 }
 
-static struct of_device_id __initdata mpc85xxrdb_ids[] = {
-	{ .type = "soc", },
-	{ .compatible = "soc", },
-	{ .compatible = "simple-bus", },
-	{ .compatible = "gianfar", },
-	{},
-};
-
-static int __init mpc85xxrdb_publish_devices(void)
-{
-	return of_platform_bus_probe(NULL, mpc85xxrdb_ids, NULL);
-}
-machine_device_initcall(p2020_rdb, mpc85xxrdb_publish_devices);
-machine_device_initcall(p1020_rdb, mpc85xxrdb_publish_devices);
+machine_device_initcall(p2020_rdb, mpc85xx_common_publish_devices);
+machine_device_initcall(p1020_rdb, mpc85xx_common_publish_devices);
 
 /*
  * Called very early, device-tree isn't unflattened
diff --git a/arch/powerpc/platforms/85xx/p1010rdb.c b/arch/powerpc/platforms/85xx/p1010rdb.c
index d7387fa..538bc3f 100644
--- a/arch/powerpc/platforms/85xx/p1010rdb.c
+++ b/arch/powerpc/platforms/85xx/p1010rdb.c
@@ -28,33 +28,18 @@
 #include <sysdev/fsl_soc.h>
 #include <sysdev/fsl_pci.h>
 
+#include "mpc85xx.h"
+
 void __init p1010_rdb_pic_init(void)
 {
-	struct mpic *mpic;
-	struct resource r;
-	struct device_node *np;
-
-	np = of_find_node_by_type(NULL, "open-pic");
-	if (np == NULL) {
-		printk(KERN_ERR "Could not find open-pic node\n");
-		return;
-	}
-
-	if (of_address_to_resource(np, 0, &r)) {
-		printk(KERN_ERR "Failed to map mpic register space\n");
-		of_node_put(np);
-		return;
-	}
-
-	mpic = mpic_alloc(np, r.start, MPIC_PRIMARY | MPIC_WANTS_RESET |
-	  MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS | MPIC_SINGLE_DEST_CPU,
+	struct mpic *mpic = mpic_alloc(NULL, 0,
+	  MPIC_WANTS_RESET | MPIC_BIG_ENDIAN |
+	  MPIC_BROKEN_FRR_NIRQS | MPIC_SINGLE_DEST_CPU,
 	  0, 256, " OpenPIC  ");
 
 	BUG_ON(mpic == NULL);
-	of_node_put(np);
 
 	mpic_init(mpic);
-
 }
 
 
@@ -81,18 +66,7 @@
 	printk(KERN_INFO "P1010 RDB board from Freescale Semiconductor\n");
 }
 
-static struct of_device_id __initdata p1010rdb_ids[] = {
-	{ .type = "soc", },
-	{ .compatible = "soc", },
-	{ .compatible = "simple-bus", },
-	{},
-};
-
-static int __init p1010rdb_publish_devices(void)
-{
-	return of_platform_bus_probe(NULL, p1010rdb_ids, NULL);
-}
-machine_device_initcall(p1010_rdb, p1010rdb_publish_devices);
+machine_device_initcall(p1010_rdb, mpc85xx_common_publish_devices);
 machine_arch_initcall(p1010_rdb, swiotlb_setup_bus_notifier);
 
 /*
diff --git a/arch/powerpc/platforms/85xx/p1022_ds.c b/arch/powerpc/platforms/85xx/p1022_ds.c
index fda1571..bb3d84f 100644
--- a/arch/powerpc/platforms/85xx/p1022_ds.c
+++ b/arch/powerpc/platforms/85xx/p1022_ds.c
@@ -26,6 +26,9 @@
 #include <sysdev/fsl_soc.h>
 #include <sysdev/fsl_pci.h>
 #include <asm/fsl_guts.h>
+#include "smp.h"
+
+#include "mpc85xx.h"
 
 #if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE)
 
@@ -238,38 +241,15 @@
 
 void __init p1022_ds_pic_init(void)
 {
-	struct mpic *mpic;
-	struct resource r;
-	struct device_node *np;
-
-	np = of_find_node_by_type(NULL, "open-pic");
-	if (!np) {
-		pr_err("Could not find open-pic node\n");
-		return;
-	}
-
-	if (of_address_to_resource(np, 0, &r)) {
-		pr_err("Failed to map mpic register space\n");
-		of_node_put(np);
-		return;
-	}
-
-	mpic = mpic_alloc(np, r.start,
-		MPIC_PRIMARY | MPIC_WANTS_RESET |
+	struct mpic *mpic = mpic_alloc(NULL, 0,
+		MPIC_WANTS_RESET |
 		MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS |
 		MPIC_SINGLE_DEST_CPU,
 		0, 256, " OpenPIC  ");
-
 	BUG_ON(mpic == NULL);
-	of_node_put(np);
-
 	mpic_init(mpic);
 }
 
-#ifdef CONFIG_SMP
-void __init mpc85xx_smp_init(void);
-#endif
-
 /*
  * Setup the architecture
  */
@@ -309,9 +289,7 @@
 	diu_ops.valid_monitor_port	= p1022ds_valid_monitor_port;
 #endif
 
-#ifdef CONFIG_SMP
 	mpc85xx_smp_init();
-#endif
 
 #ifdef CONFIG_SWIOTLB
 	if (memblock_end_of_DRAM() > max) {
@@ -325,10 +303,6 @@
 }
 
 static struct of_device_id __initdata p1022_ds_ids[] = {
-	{ .type = "soc", },
-	{ .compatible = "soc", },
-	{ .compatible = "simple-bus", },
-	{ .compatible = "gianfar", },
 	/* So that the DMA channel nodes can be probed individually: */
 	{ .compatible = "fsl,eloplus-dma", },
 	{},
@@ -336,6 +310,7 @@
 
 static int __init p1022_ds_publish_devices(void)
 {
+	mpc85xx_common_publish_devices();
 	return of_platform_bus_probe(NULL, p1022_ds_ids, NULL);
 }
 machine_device_initcall(p1022_ds, p1022_ds_publish_devices);
diff --git a/arch/powerpc/platforms/85xx/p1023_rds.c b/arch/powerpc/platforms/85xx/p1023_rds.c
index 835e0b3..d951e70 100644
--- a/arch/powerpc/platforms/85xx/p1023_rds.c
+++ b/arch/powerpc/platforms/85xx/p1023_rds.c
@@ -30,19 +30,18 @@
 #include <asm/prom.h>
 #include <asm/udbg.h>
 #include <asm/mpic.h>
+#include "smp.h"
 
 #include <sysdev/fsl_soc.h>
 #include <sysdev/fsl_pci.h>
 
+#include "mpc85xx.h"
+
 /* ************************************************************************
  *
  * Setup the architecture
  *
  */
-#ifdef CONFIG_SMP
-void __init mpc85xx_smp_init(void);
-#endif
-
 static void __init mpc85xx_rds_setup_arch(void)
 {
 	struct device_node *np;
@@ -87,53 +86,19 @@
 		fsl_add_bridge(np, 0);
 #endif
 
-#ifdef CONFIG_SMP
 	mpc85xx_smp_init();
-#endif
 }
 
-static struct of_device_id p1023_ids[] = {
-	{ .type = "soc", },
-	{ .compatible = "soc", },
-	{ .compatible = "simple-bus", },
-	{},
-};
-
-
-static int __init p1023_publish_devices(void)
-{
-	of_platform_bus_probe(NULL, p1023_ids, NULL);
-
-	return 0;
-}
-
-machine_device_initcall(p1023_rds, p1023_publish_devices);
+machine_device_initcall(p1023_rds, mpc85xx_common_publish_devices);
 
 static void __init mpc85xx_rds_pic_init(void)
 {
-	struct mpic *mpic;
-	struct resource r;
-	struct device_node *np = NULL;
-
-	np = of_find_node_by_type(NULL, "open-pic");
-	if (!np) {
-		printk(KERN_ERR "Could not find open-pic node\n");
-		return;
-	}
-
-	if (of_address_to_resource(np, 0, &r)) {
-		printk(KERN_ERR "Failed to map mpic register space\n");
-		of_node_put(np);
-		return;
-	}
-
-	mpic = mpic_alloc(np, r.start,
-		MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN |
+	struct mpic *mpic = mpic_alloc(NULL, 0,
+		MPIC_WANTS_RESET | MPIC_BIG_ENDIAN |
 		MPIC_BROKEN_FRR_NIRQS | MPIC_SINGLE_DEST_CPU,
 		0, 256, " OpenPIC  ");
 
 	BUG_ON(mpic == NULL);
-	of_node_put(np);
 
 	mpic_init(mpic);
 }
diff --git a/arch/powerpc/platforms/85xx/sbc8548.c b/arch/powerpc/platforms/85xx/sbc8548.c
index 14632a9..184a507 100644
--- a/arch/powerpc/platforms/85xx/sbc8548.c
+++ b/arch/powerpc/platforms/85xx/sbc8548.c
@@ -48,35 +48,16 @@
 #include <sysdev/fsl_soc.h>
 #include <sysdev/fsl_pci.h>
 
+#include "mpc85xx.h"
+
 static int sbc_rev;
 
 static void __init sbc8548_pic_init(void)
 {
-	struct mpic *mpic;
-	struct resource r;
-	struct device_node *np = NULL;
-
-	np = of_find_node_by_type(np, "open-pic");
-
-	if (np == NULL) {
-		printk(KERN_ERR "Could not find open-pic node\n");
-		return;
-	}
-
-	if (of_address_to_resource(np, 0, &r)) {
-		printk(KERN_ERR "Failed to map mpic register space\n");
-		of_node_put(np);
-		return;
-	}
-
-	mpic = mpic_alloc(np, r.start,
-			MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
+	struct mpic *mpic = mpic_alloc(NULL, 0,
+			MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
 			0, 256, " OpenPIC  ");
 	BUG_ON(mpic == NULL);
-
-	/* Return the mpic node */
-	of_node_put(np);
-
 	mpic_init(mpic);
 }
 
@@ -149,21 +130,7 @@
 	seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
 }
 
-static struct of_device_id __initdata of_bus_ids[] = {
-	{ .name = "soc", },
-	{ .type = "soc", },
-	{ .compatible = "simple-bus", },
-	{ .compatible = "gianfar", },
-	{},
-};
-
-static int __init declare_of_platform_devices(void)
-{
-	of_platform_bus_probe(NULL, of_bus_ids, NULL);
-
-	return 0;
-}
-machine_device_initcall(sbc8548, declare_of_platform_devices);
+machine_device_initcall(sbc8548, mpc85xx_common_publish_devices);
 
 /*
  * Called very early, device-tree isn't unflattened
diff --git a/arch/powerpc/platforms/85xx/sbc8560.c b/arch/powerpc/platforms/85xx/sbc8560.c
index cebd786..940752e 100644
--- a/arch/powerpc/platforms/85xx/sbc8560.c
+++ b/arch/powerpc/platforms/85xx/sbc8560.c
@@ -32,68 +32,22 @@
 #include <sysdev/fsl_soc.h>
 #include <sysdev/fsl_pci.h>
 
+#include "mpc85xx.h"
+
 #ifdef CONFIG_CPM2
 #include <asm/cpm2.h>
 #include <sysdev/cpm2_pic.h>
 #endif
 
-#ifdef CONFIG_CPM2
-
-static void cpm2_cascade(unsigned int irq, struct irq_desc *desc)
-{
-	struct irq_chip *chip = irq_desc_get_chip(desc);
-	int cascade_irq;
-
-	while ((cascade_irq = cpm2_get_irq()) >= 0)
-		generic_handle_irq(cascade_irq);
-
-	chip->irq_eoi(&desc->irq_data);
-}
-
-#endif /* CONFIG_CPM2 */
-
 static void __init sbc8560_pic_init(void)
 {
-	struct mpic *mpic;
-	struct resource r;
-	struct device_node *np = NULL;
-#ifdef CONFIG_CPM2
-	int irq;
-#endif
-
-	np = of_find_node_by_type(np, "open-pic");
-	if (!np) {
-		printk(KERN_ERR "Could not find open-pic node\n");
-		return;
-	}
-
-	if (of_address_to_resource(np, 0, &r)) {
-		printk(KERN_ERR "Could not map mpic register space\n");
-		of_node_put(np);
-		return;
-	}
-
-	mpic = mpic_alloc(np, r.start,
-			MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
+	struct mpic *mpic = mpic_alloc(NULL, 0,
+			MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
 			0, 256, " OpenPIC  ");
 	BUG_ON(mpic == NULL);
-	of_node_put(np);
-
 	mpic_init(mpic);
 
-#ifdef CONFIG_CPM2
-	/* Setup CPM2 PIC */
-	np = of_find_compatible_node(NULL, NULL, "fsl,cpm2-pic");
-	if (np == NULL) {
-		printk(KERN_ERR "PIC init: can not find fsl,cpm2-pic node\n");
-		return;
-	}
-	irq = irq_of_parse_and_map(np, 0);
-
-	cpm2_pic_init(np);
-	of_node_put(np);
-	irq_set_chained_handler(irq, cpm2_cascade);
-#endif
+	mpc85xx_cpm2_pic_init();
 }
 
 /*
@@ -208,23 +162,7 @@
 	seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
 }
 
-static struct of_device_id __initdata of_bus_ids[] = {
-	{ .name = "soc", },
-	{ .type = "soc", },
-	{ .name = "cpm", },
-	{ .name = "localbus", },
-	{ .compatible = "simple-bus", },
-	{ .compatible = "gianfar", },
-	{},
-};
-
-static int __init declare_of_platform_devices(void)
-{
-	of_platform_bus_probe(NULL, of_bus_ids, NULL);
-
-	return 0;
-}
-machine_device_initcall(sbc8560, declare_of_platform_devices);
+machine_device_initcall(sbc8560, mpc85xx_common_publish_devices);
 
 /*
  * Called very early, device-tree isn't unflattened
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index 2df4785..ff42490 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -27,6 +27,7 @@
 
 #include <sysdev/fsl_soc.h>
 #include <sysdev/mpic.h>
+#include "smp.h"
 
 extern void __early_start(void);
 
diff --git a/arch/powerpc/platforms/85xx/smp.h b/arch/powerpc/platforms/85xx/smp.h
new file mode 100644
index 0000000..e2b4493
--- /dev/null
+++ b/arch/powerpc/platforms/85xx/smp.h
@@ -0,0 +1,15 @@
+#ifndef POWERPC_85XX_SMP_H_
+#define POWERPC_85XX_SMP_H_ 1
+
+#include <linux/init.h>
+
+#ifdef CONFIG_SMP
+void __init mpc85xx_smp_init(void);
+#else
+static inline void mpc85xx_smp_init(void)
+{
+	/* Nothing to do */
+}
+#endif
+
+#endif /* not POWERPC_85XX_SMP_H_ */
diff --git a/arch/powerpc/platforms/85xx/socrates.c b/arch/powerpc/platforms/85xx/socrates.c
index 747d8fb..18f6359 100644
--- a/arch/powerpc/platforms/85xx/socrates.c
+++ b/arch/powerpc/platforms/85xx/socrates.c
@@ -41,32 +41,17 @@
 #include <sysdev/fsl_soc.h>
 #include <sysdev/fsl_pci.h>
 
+#include "mpc85xx.h"
 #include "socrates_fpga_pic.h"
 
 static void __init socrates_pic_init(void)
 {
-	struct mpic *mpic;
-	struct resource r;
 	struct device_node *np;
 
-	np = of_find_node_by_type(NULL, "open-pic");
-	if (!np) {
-		printk(KERN_ERR "Could not find open-pic node\n");
-		return;
-	}
-
-	if (of_address_to_resource(np, 0, &r)) {
-		printk(KERN_ERR "Could not map mpic register space\n");
-		of_node_put(np);
-		return;
-	}
-
-	mpic = mpic_alloc(np, r.start,
-			MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
+	struct mpic *mpic = mpic_alloc(NULL, 0,
+			MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
 			0, 256, " OpenPIC  ");
 	BUG_ON(mpic == NULL);
-	of_node_put(np);
-
 	mpic_init(mpic);
 
 	np = of_find_compatible_node(NULL, NULL, "abb,socrates-fpga-pic");
@@ -96,17 +81,7 @@
 #endif
 }
 
-static struct of_device_id __initdata socrates_of_bus_ids[] = {
-	{ .compatible = "simple-bus", },
-	{ .compatible = "gianfar", },
-	{},
-};
-
-static int __init socrates_publish_devices(void)
-{
-	return of_platform_bus_probe(NULL, socrates_of_bus_ids, NULL);
-}
-machine_device_initcall(socrates, socrates_publish_devices);
+machine_device_initcall(socrates, mpc85xx_common_publish_devices);
 
 /*
  * Called very early, device-tree isn't unflattened
diff --git a/arch/powerpc/platforms/85xx/stx_gp3.c b/arch/powerpc/platforms/85xx/stx_gp3.c
index 5387e9f..e9e5234 100644
--- a/arch/powerpc/platforms/85xx/stx_gp3.c
+++ b/arch/powerpc/platforms/85xx/stx_gp3.c
@@ -40,70 +40,21 @@
 #include <sysdev/fsl_soc.h>
 #include <sysdev/fsl_pci.h>
 
+#include "mpc85xx.h"
+
 #ifdef CONFIG_CPM2
 #include <asm/cpm2.h>
-#include <sysdev/cpm2_pic.h>
-
-static void cpm2_cascade(unsigned int irq, struct irq_desc *desc)
-{
-	struct irq_chip *chip = irq_desc_get_chip(desc);
-	int cascade_irq;
-
-	while ((cascade_irq = cpm2_get_irq()) >= 0)
-		generic_handle_irq(cascade_irq);
-
-	chip->irq_eoi(&desc->irq_data);
-}
 #endif /* CONFIG_CPM2 */
 
 static void __init stx_gp3_pic_init(void)
 {
-	struct mpic *mpic;
-	struct resource r;
-	struct device_node *np;
-#ifdef CONFIG_CPM2
-	int irq;
-#endif
-
-	np = of_find_node_by_type(NULL, "open-pic");
-	if (!np) {
-		printk(KERN_ERR "Could not find open-pic node\n");
-		return;
-	}
-
-	if (of_address_to_resource(np, 0, &r)) {
-		printk(KERN_ERR "Could not map mpic register space\n");
-		of_node_put(np);
-		return;
-	}
-
-	mpic = mpic_alloc(np, r.start,
-			MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
+	struct mpic *mpic = mpic_alloc(NULL, 0,
+			MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
 			0, 256, " OpenPIC  ");
 	BUG_ON(mpic == NULL);
-	of_node_put(np);
-
 	mpic_init(mpic);
 
-#ifdef CONFIG_CPM2
-	/* Setup CPM2 PIC */
-	np = of_find_compatible_node(NULL, NULL, "fsl,cpm2-pic");
-	if (np == NULL) {
-		printk(KERN_ERR "PIC init: can not find fsl,cpm2-pic node\n");
-		return;
-	}
-	irq = irq_of_parse_and_map(np, 0);
-
-	if (irq == NO_IRQ) {
-		of_node_put(np);
-		printk(KERN_ERR "PIC init: got no IRQ for cpm cascade\n");
-		return;
-	}
-
-	cpm2_pic_init(np);
-	of_node_put(np);
-	irq_set_chained_handler(irq, cpm2_cascade);
-#endif
+	mpc85xx_cpm2_pic_init();
 }
 
 /*
@@ -144,19 +95,7 @@
 	seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
 }
 
-static struct of_device_id __initdata of_bus_ids[] = {
-	{ .compatible = "simple-bus", },
-	{ .compatible = "gianfar", },
-	{},
-};
-
-static int __init declare_of_platform_devices(void)
-{
-	of_platform_bus_probe(NULL, of_bus_ids, NULL);
-
-	return 0;
-}
-machine_device_initcall(stx_gp3, declare_of_platform_devices);
+machine_device_initcall(stx_gp3, mpc85xx_common_publish_devices);
 
 /*
  * Called very early, device-tree isn't unflattened
diff --git a/arch/powerpc/platforms/85xx/tqm85xx.c b/arch/powerpc/platforms/85xx/tqm85xx.c
index 325de77..bf7c89f 100644
--- a/arch/powerpc/platforms/85xx/tqm85xx.c
+++ b/arch/powerpc/platforms/85xx/tqm85xx.c
@@ -38,70 +38,21 @@
 #include <sysdev/fsl_soc.h>
 #include <sysdev/fsl_pci.h>
 
+#include "mpc85xx.h"
+
 #ifdef CONFIG_CPM2
 #include <asm/cpm2.h>
-#include <sysdev/cpm2_pic.h>
-
-static void cpm2_cascade(unsigned int irq, struct irq_desc *desc)
-{
-	struct irq_chip *chip = irq_desc_get_chip(desc);
-	int cascade_irq;
-
-	while ((cascade_irq = cpm2_get_irq()) >= 0)
-		generic_handle_irq(cascade_irq);
-
-	chip->irq_eoi(&desc->irq_data);
-}
 #endif /* CONFIG_CPM2 */
 
 static void __init tqm85xx_pic_init(void)
 {
-	struct mpic *mpic;
-	struct resource r;
-	struct device_node *np;
-#ifdef CONFIG_CPM2
-	int irq;
-#endif
-
-	np = of_find_node_by_type(NULL, "open-pic");
-	if (!np) {
-		printk(KERN_ERR "Could not find open-pic node\n");
-		return;
-	}
-
-	if (of_address_to_resource(np, 0, &r)) {
-		printk(KERN_ERR "Could not map mpic register space\n");
-		of_node_put(np);
-		return;
-	}
-
-	mpic = mpic_alloc(np, r.start,
-			MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
+	struct mpic *mpic = mpic_alloc(NULL, 0,
+			MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
 			0, 256, " OpenPIC  ");
 	BUG_ON(mpic == NULL);
-	of_node_put(np);
-
 	mpic_init(mpic);
 
-#ifdef CONFIG_CPM2
-	/* Setup CPM2 PIC */
-	np = of_find_compatible_node(NULL, NULL, "fsl,cpm2-pic");
-	if (np == NULL) {
-		printk(KERN_ERR "PIC init: can not find fsl,cpm2-pic node\n");
-		return;
-	}
-	irq = irq_of_parse_and_map(np, 0);
-
-	if (irq == NO_IRQ) {
-		of_node_put(np);
-		printk(KERN_ERR "PIC init: got no IRQ for cpm cascade\n");
-		return;
-	}
-
-	cpm2_pic_init(np);
-	of_node_put(np);
-	irq_set_chained_handler(irq, cpm2_cascade);
-#endif
+	mpc85xx_cpm2_pic_init();
 }
 
 /*
@@ -173,19 +124,7 @@
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_1520,
 		tqm85xx_ti1520_fixup);
 
-static struct of_device_id __initdata of_bus_ids[] = {
-	{ .compatible = "simple-bus", },
-	{ .compatible = "gianfar", },
-	{},
-};
-
-static int __init declare_of_platform_devices(void)
-{
-	of_platform_bus_probe(NULL, of_bus_ids, NULL);
-
-	return 0;
-}
-machine_device_initcall(tqm85xx, declare_of_platform_devices);
+machine_device_initcall(tqm85xx, mpc85xx_common_publish_devices);
 
 static const char *board[] __initdata = {
 	"tqc,tqm8540",
diff --git a/arch/powerpc/platforms/85xx/xes_mpc85xx.c b/arch/powerpc/platforms/85xx/xes_mpc85xx.c
index a9dc5e7..3a69f8b 100644
--- a/arch/powerpc/platforms/85xx/xes_mpc85xx.c
+++ b/arch/powerpc/platforms/85xx/xes_mpc85xx.c
@@ -32,6 +32,9 @@
 
 #include <sysdev/fsl_soc.h>
 #include <sysdev/fsl_pci.h>
+#include "smp.h"
+
+#include "mpc85xx.h"
 
 /* A few bit definitions needed for fixups on some boards */
 #define MPC85xx_L2CTL_L2E		0x80000000 /* L2 enable */
@@ -40,29 +43,11 @@
 
 void __init xes_mpc85xx_pic_init(void)
 {
-	struct mpic *mpic;
-	struct resource r;
-	struct device_node *np;
-
-	np = of_find_node_by_type(NULL, "open-pic");
-	if (np == NULL) {
-		printk(KERN_ERR "Could not find open-pic node\n");
-		return;
-	}
-
-	if (of_address_to_resource(np, 0, &r)) {
-		printk(KERN_ERR "Failed to map mpic register space\n");
-		of_node_put(np);
-		return;
-	}
-
-	mpic = mpic_alloc(np, r.start,
-			  MPIC_PRIMARY | MPIC_WANTS_RESET |
+	struct mpic *mpic = mpic_alloc(NULL, 0,
+			  MPIC_WANTS_RESET |
 			  MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS,
 			0, 256, " OpenPIC  ");
 	BUG_ON(mpic == NULL);
-	of_node_put(np);
-
 	mpic_init(mpic);
 }
 
@@ -136,9 +121,6 @@
 /*
  * Setup the architecture
  */
-#ifdef CONFIG_SMP
-extern void __init mpc85xx_smp_init(void);
-#endif
 static void __init xes_mpc85xx_setup_arch(void)
 {
 #ifdef CONFIG_PCI
@@ -172,26 +154,12 @@
 	}
 #endif
 
-#ifdef CONFIG_SMP
 	mpc85xx_smp_init();
-#endif
 }
 
-static struct of_device_id __initdata xes_mpc85xx_ids[] = {
-	{ .type = "soc", },
-	{ .compatible = "soc", },
-	{ .compatible = "simple-bus", },
-	{ .compatible = "gianfar", },
-	{},
-};
-
-static int __init xes_mpc85xx_publish_devices(void)
-{
-	return of_platform_bus_probe(NULL, xes_mpc85xx_ids, NULL);
-}
-machine_device_initcall(xes_mpc8572, xes_mpc85xx_publish_devices);
-machine_device_initcall(xes_mpc8548, xes_mpc85xx_publish_devices);
-machine_device_initcall(xes_mpc8540, xes_mpc85xx_publish_devices);
+machine_device_initcall(xes_mpc8572, mpc85xx_common_publish_devices);
+machine_device_initcall(xes_mpc8548, mpc85xx_common_publish_devices);
+machine_device_initcall(xes_mpc8540, mpc85xx_common_publish_devices);
 
 /*
  * Called very early, device-tree isn't unflattened
diff --git a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
index b11c353..569262c 100644
--- a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
+++ b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
@@ -161,7 +161,7 @@
 
 static __initdata struct of_device_id of_bus_ids[] = {
 	{ .compatible = "simple-bus", },
-	{ .compatible = "fsl,rapidio-delta", },
+	{ .compatible = "fsl,srio", },
 	{ .compatible = "gianfar", },
 	{},
 };
diff --git a/arch/powerpc/platforms/86xx/pic.c b/arch/powerpc/platforms/86xx/pic.c
index 8ef8960..52bbfa0 100644
--- a/arch/powerpc/platforms/86xx/pic.c
+++ b/arch/powerpc/platforms/86xx/pic.c
@@ -31,26 +31,16 @@
 
 void __init mpc86xx_init_irq(void)
 {
-	struct mpic *mpic;
-	struct device_node *np;
-	struct resource res;
 #ifdef CONFIG_PPC_I8259
+	struct device_node *np;
 	struct device_node *cascade_node = NULL;
 	int cascade_irq;
 #endif
 
-	/* Determine PIC address. */
-	np = of_find_node_by_type(NULL, "open-pic");
-	if (np == NULL)
-		return;
-	of_address_to_resource(np, 0, &res);
-
-	mpic = mpic_alloc(np, res.start,
-			MPIC_PRIMARY | MPIC_WANTS_RESET |
-			MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS |
-			MPIC_SINGLE_DEST_CPU,
+	struct mpic *mpic = mpic_alloc(NULL, 0,
+			MPIC_WANTS_RESET | MPIC_BIG_ENDIAN |
+			MPIC_BROKEN_FRR_NIRQS | MPIC_SINGLE_DEST_CPU,
 			0, 256, " MPIC     ");
-	of_node_put(np);
 	BUG_ON(mpic == NULL);
 
 	mpic_init(mpic);
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index 3fe6d92..31e1ade 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -211,6 +211,12 @@
 
 endmenu
 
+menu "CPUIdle driver"
+
+source "drivers/cpuidle/Kconfig"
+
+endmenu
+
 config PPC601_SYNC_FIX
 	bool "Workarounds for PPC601 bugs"
 	depends on 6xx && (PPC_PREP || PPC_PMAC)
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index fbecae0..425db18 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -174,7 +174,6 @@
 config FSL_BOOKE
 	bool
 	depends on (E200 || E500) && PPC32
-	select SYS_SUPPORTS_HUGETLBFS if PHYS_64BIT
 	default y
 
 # this is for common code between PPC32 & PPC64 FSL BOOKE
@@ -182,6 +181,7 @@
 	bool
 	select FSL_EMB_PERFMON
 	select PPC_SMP_MUXED_IPI
+	select SYS_SUPPORTS_HUGETLBFS if PHYS_64BIT || PPC64
 	default y if FSL_BOOKE
 
 config PTE_64BIT
@@ -236,7 +236,7 @@
 
 config PPC_ICSWX
 	bool "Support for PowerPC icswx coprocessor instruction"
-	depends on POWER4
+	depends on POWER4 || PPC_A2
 	default n
 	---help---
 
@@ -252,6 +252,25 @@
 
 	  If in doubt, say N here.
 
+config PPC_ICSWX_PID
+	bool "icswx requires direct PID management"
+	depends on PPC_ICSWX && POWER4
+	default y
+	---help---
+	  The PID register in server is used explicitly for ICSWX.  In
+	  embedded systems PID managment is done by the system.
+
+config PPC_ICSWX_USE_SIGILL
+	bool "Should a bad CT cause a SIGILL?"
+	depends on PPC_ICSWX
+	default n
+	---help---
+	  Should a bad CT used for "non-record form ICSWX" cause an
+	  illegal intruction signal or should it be silent as
+	  architected.
+
+	  If in doubt, say N here.
+
 config SPE
 	bool "SPE Support"
 	depends on E200 || (E500 && !PPC_E500MC)
@@ -290,7 +309,7 @@
 
 config PPC_MM_SLICES
 	bool
-	default y if (PPC64 && HUGETLB_PAGE) || (PPC_STD_MMU_64 && PPC_64K_PAGES)
+	default y if (!PPC_FSL_BOOK3E && PPC64 && HUGETLB_PAGE) || (PPC_STD_MMU_64 && PPC_64K_PAGES)
 	default n
 
 config VIRT_CPU_ACCOUNTING
diff --git a/arch/powerpc/platforms/cell/cbe_thermal.c b/arch/powerpc/platforms/cell/cbe_thermal.c
index 4d4c8c1..94560db 100644
--- a/arch/powerpc/platforms/cell/cbe_thermal.c
+++ b/arch/powerpc/platforms/cell/cbe_thermal.c
@@ -46,7 +46,7 @@
  */
 
 #include <linux/module.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/kernel.h>
 #include <linux/cpu.h>
 #include <asm/spu.h>
@@ -59,8 +59,8 @@
 #define TEMP_MIN 65
 #define TEMP_MAX 125
 
-#define SYSDEV_PREFIX_ATTR(_prefix,_name,_mode)			\
-struct sysdev_attribute attr_ ## _prefix ## _ ## _name = {	\
+#define DEVICE_PREFIX_ATTR(_prefix,_name,_mode)			\
+struct device_attribute attr_ ## _prefix ## _ ## _name = {	\
 	.attr = { .name = __stringify(_name), .mode = _mode },	\
 	.show	= _prefix ## _show_ ## _name,			\
 	.store	= _prefix ## _store_ ## _name,			\
@@ -76,36 +76,36 @@
 	return ((temp - TEMP_MIN) >> 1) & 0x3f;
 }
 
-static struct cbe_pmd_regs __iomem *get_pmd_regs(struct sys_device *sysdev)
+static struct cbe_pmd_regs __iomem *get_pmd_regs(struct device *dev)
 {
 	struct spu *spu;
 
-	spu = container_of(sysdev, struct spu, sysdev);
+	spu = container_of(dev, struct spu, dev);
 
 	return cbe_get_pmd_regs(spu_devnode(spu));
 }
 
 /* returns the value for a given spu in a given register */
-static u8 spu_read_register_value(struct sys_device *sysdev, union spe_reg __iomem *reg)
+static u8 spu_read_register_value(struct device *dev, union spe_reg __iomem *reg)
 {
 	union spe_reg value;
 	struct spu *spu;
 
-	spu = container_of(sysdev, struct spu, sysdev);
+	spu = container_of(dev, struct spu, dev);
 	value.val = in_be64(&reg->val);
 
 	return value.spe[spu->spe_id];
 }
 
-static ssize_t spu_show_temp(struct sys_device *sysdev, struct sysdev_attribute *attr,
+static ssize_t spu_show_temp(struct device *dev, struct device_attribute *attr,
 			char *buf)
 {
 	u8 value;
 	struct cbe_pmd_regs __iomem *pmd_regs;
 
-	pmd_regs = get_pmd_regs(sysdev);
+	pmd_regs = get_pmd_regs(dev);
 
-	value = spu_read_register_value(sysdev, &pmd_regs->ts_ctsr1);
+	value = spu_read_register_value(dev, &pmd_regs->ts_ctsr1);
 
 	return sprintf(buf, "%d\n", reg_to_temp(value));
 }
@@ -147,48 +147,48 @@
 	return size;
 }
 
-static ssize_t spu_show_throttle_end(struct sys_device *sysdev,
-			struct sysdev_attribute *attr, char *buf)
+static ssize_t spu_show_throttle_end(struct device *dev,
+			struct device_attribute *attr, char *buf)
 {
-	return show_throttle(get_pmd_regs(sysdev), buf, 0);
+	return show_throttle(get_pmd_regs(dev), buf, 0);
 }
 
-static ssize_t spu_show_throttle_begin(struct sys_device *sysdev,
-			struct sysdev_attribute *attr, char *buf)
+static ssize_t spu_show_throttle_begin(struct device *dev,
+			struct device_attribute *attr, char *buf)
 {
-	return show_throttle(get_pmd_regs(sysdev), buf, 8);
+	return show_throttle(get_pmd_regs(dev), buf, 8);
 }
 
-static ssize_t spu_show_throttle_full_stop(struct sys_device *sysdev,
-			struct sysdev_attribute *attr, char *buf)
+static ssize_t spu_show_throttle_full_stop(struct device *dev,
+			struct device_attribute *attr, char *buf)
 {
-	return show_throttle(get_pmd_regs(sysdev), buf, 16);
+	return show_throttle(get_pmd_regs(dev), buf, 16);
 }
 
-static ssize_t spu_store_throttle_end(struct sys_device *sysdev,
-			struct sysdev_attribute *attr, const char *buf, size_t size)
+static ssize_t spu_store_throttle_end(struct device *dev,
+			struct device_attribute *attr, const char *buf, size_t size)
 {
-	return store_throttle(get_pmd_regs(sysdev), buf, size, 0);
+	return store_throttle(get_pmd_regs(dev), buf, size, 0);
 }
 
-static ssize_t spu_store_throttle_begin(struct sys_device *sysdev,
-			struct sysdev_attribute *attr, const char *buf, size_t size)
+static ssize_t spu_store_throttle_begin(struct device *dev,
+			struct device_attribute *attr, const char *buf, size_t size)
 {
-	return store_throttle(get_pmd_regs(sysdev), buf, size, 8);
+	return store_throttle(get_pmd_regs(dev), buf, size, 8);
 }
 
-static ssize_t spu_store_throttle_full_stop(struct sys_device *sysdev,
-			struct sysdev_attribute *attr, const char *buf, size_t size)
+static ssize_t spu_store_throttle_full_stop(struct device *dev,
+			struct device_attribute *attr, const char *buf, size_t size)
 {
-	return store_throttle(get_pmd_regs(sysdev), buf, size, 16);
+	return store_throttle(get_pmd_regs(dev), buf, size, 16);
 }
 
-static ssize_t ppe_show_temp(struct sys_device *sysdev, char *buf, int pos)
+static ssize_t ppe_show_temp(struct device *dev, char *buf, int pos)
 {
 	struct cbe_pmd_regs __iomem *pmd_regs;
 	u64 value;
 
-	pmd_regs = cbe_get_cpu_pmd_regs(sysdev->id);
+	pmd_regs = cbe_get_cpu_pmd_regs(dev->id);
 	value = in_be64(&pmd_regs->ts_ctsr2);
 
 	value = (value >> pos) & 0x3f;
@@ -199,64 +199,64 @@
 
 /* shows the temperature of the DTS on the PPE,
  * located near the linear thermal sensor */
-static ssize_t ppe_show_temp0(struct sys_device *sysdev,
-			struct sysdev_attribute *attr, char *buf)
+static ssize_t ppe_show_temp0(struct device *dev,
+			struct device_attribute *attr, char *buf)
 {
-	return ppe_show_temp(sysdev, buf, 32);
+	return ppe_show_temp(dev, buf, 32);
 }
 
 /* shows the temperature of the second DTS on the PPE */
-static ssize_t ppe_show_temp1(struct sys_device *sysdev,
-			struct sysdev_attribute *attr, char *buf)
+static ssize_t ppe_show_temp1(struct device *dev,
+			struct device_attribute *attr, char *buf)
 {
-	return ppe_show_temp(sysdev, buf, 0);
+	return ppe_show_temp(dev, buf, 0);
 }
 
-static ssize_t ppe_show_throttle_end(struct sys_device *sysdev,
-			struct sysdev_attribute *attr, char *buf)
+static ssize_t ppe_show_throttle_end(struct device *dev,
+			struct device_attribute *attr, char *buf)
 {
-	return show_throttle(cbe_get_cpu_pmd_regs(sysdev->id), buf, 32);
+	return show_throttle(cbe_get_cpu_pmd_regs(dev->id), buf, 32);
 }
 
-static ssize_t ppe_show_throttle_begin(struct sys_device *sysdev,
-			struct sysdev_attribute *attr, char *buf)
+static ssize_t ppe_show_throttle_begin(struct device *dev,
+			struct device_attribute *attr, char *buf)
 {
-	return show_throttle(cbe_get_cpu_pmd_regs(sysdev->id), buf, 40);
+	return show_throttle(cbe_get_cpu_pmd_regs(dev->id), buf, 40);
 }
 
-static ssize_t ppe_show_throttle_full_stop(struct sys_device *sysdev,
-			struct sysdev_attribute *attr, char *buf)
+static ssize_t ppe_show_throttle_full_stop(struct device *dev,
+			struct device_attribute *attr, char *buf)
 {
-	return show_throttle(cbe_get_cpu_pmd_regs(sysdev->id), buf, 48);
+	return show_throttle(cbe_get_cpu_pmd_regs(dev->id), buf, 48);
 }
 
-static ssize_t ppe_store_throttle_end(struct sys_device *sysdev,
-			struct sysdev_attribute *attr, const char *buf, size_t size)
+static ssize_t ppe_store_throttle_end(struct device *dev,
+			struct device_attribute *attr, const char *buf, size_t size)
 {
-	return store_throttle(cbe_get_cpu_pmd_regs(sysdev->id), buf, size, 32);
+	return store_throttle(cbe_get_cpu_pmd_regs(dev->id), buf, size, 32);
 }
 
-static ssize_t ppe_store_throttle_begin(struct sys_device *sysdev,
-			struct sysdev_attribute *attr, const char *buf, size_t size)
+static ssize_t ppe_store_throttle_begin(struct device *dev,
+			struct device_attribute *attr, const char *buf, size_t size)
 {
-	return store_throttle(cbe_get_cpu_pmd_regs(sysdev->id), buf, size, 40);
+	return store_throttle(cbe_get_cpu_pmd_regs(dev->id), buf, size, 40);
 }
 
-static ssize_t ppe_store_throttle_full_stop(struct sys_device *sysdev,
-			struct sysdev_attribute *attr, const char *buf, size_t size)
+static ssize_t ppe_store_throttle_full_stop(struct device *dev,
+			struct device_attribute *attr, const char *buf, size_t size)
 {
-	return store_throttle(cbe_get_cpu_pmd_regs(sysdev->id), buf, size, 48);
+	return store_throttle(cbe_get_cpu_pmd_regs(dev->id), buf, size, 48);
 }
 
 
-static struct sysdev_attribute attr_spu_temperature = {
+static struct device_attribute attr_spu_temperature = {
 	.attr = {.name = "temperature", .mode = 0400 },
 	.show = spu_show_temp,
 };
 
-static SYSDEV_PREFIX_ATTR(spu, throttle_end, 0600);
-static SYSDEV_PREFIX_ATTR(spu, throttle_begin, 0600);
-static SYSDEV_PREFIX_ATTR(spu, throttle_full_stop, 0600);
+static DEVICE_PREFIX_ATTR(spu, throttle_end, 0600);
+static DEVICE_PREFIX_ATTR(spu, throttle_begin, 0600);
+static DEVICE_PREFIX_ATTR(spu, throttle_full_stop, 0600);
 
 
 static struct attribute *spu_attributes[] = {
@@ -272,19 +272,19 @@
 	.attrs	= spu_attributes,
 };
 
-static struct sysdev_attribute attr_ppe_temperature0 = {
+static struct device_attribute attr_ppe_temperature0 = {
 	.attr = {.name = "temperature0", .mode = 0400 },
 	.show = ppe_show_temp0,
 };
 
-static struct sysdev_attribute attr_ppe_temperature1 = {
+static struct device_attribute attr_ppe_temperature1 = {
 	.attr = {.name = "temperature1", .mode = 0400 },
 	.show = ppe_show_temp1,
 };
 
-static SYSDEV_PREFIX_ATTR(ppe, throttle_end, 0600);
-static SYSDEV_PREFIX_ATTR(ppe, throttle_begin, 0600);
-static SYSDEV_PREFIX_ATTR(ppe, throttle_full_stop, 0600);
+static DEVICE_PREFIX_ATTR(ppe, throttle_end, 0600);
+static DEVICE_PREFIX_ATTR(ppe, throttle_begin, 0600);
+static DEVICE_PREFIX_ATTR(ppe, throttle_full_stop, 0600);
 
 static struct attribute *ppe_attributes[] = {
 	&attr_ppe_temperature0.attr,
@@ -307,7 +307,7 @@
 {
 	int cpu;
 	struct cbe_pmd_regs __iomem *pmd_regs;
-	struct sys_device *sysdev;
+	struct device *dev;
 	union ppe_spe_reg tpr;
 	union spe_reg str1;
 	u64 str2;
@@ -349,14 +349,14 @@
 
 	for_each_possible_cpu (cpu) {
 		pr_debug("processing cpu %d\n", cpu);
-		sysdev = get_cpu_sysdev(cpu);
+		dev = get_cpu_device(cpu);
 
-		if (!sysdev) {
-			pr_info("invalid sysdev pointer for cbe_thermal\n");
+		if (!dev) {
+			pr_info("invalid dev pointer for cbe_thermal\n");
 			return -EINVAL;
 		}
 
-		pmd_regs = cbe_get_cpu_pmd_regs(sysdev->id);
+		pmd_regs = cbe_get_cpu_pmd_regs(dev->id);
 
 		if (!pmd_regs) {
 			pr_info("invalid CBE regs pointer for cbe_thermal\n");
@@ -379,8 +379,8 @@
 	int rc = init_default_values();
 
 	if (rc == 0) {
-		spu_add_sysdev_attr_group(&spu_attribute_group);
-		cpu_add_sysdev_attr_group(&ppe_attribute_group);
+		spu_add_dev_attr_group(&spu_attribute_group);
+		cpu_add_dev_attr_group(&ppe_attribute_group);
 	}
 
 	return rc;
@@ -389,8 +389,8 @@
 
 static void __exit thermal_exit(void)
 {
-	spu_remove_sysdev_attr_group(&spu_attribute_group);
-	cpu_remove_sysdev_attr_group(&ppe_attribute_group);
+	spu_remove_dev_attr_group(&spu_attribute_group);
+	cpu_remove_dev_attr_group(&ppe_attribute_group);
 }
 module_exit(thermal_exit);
 
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index 592c3d5..ae9fc7b 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -1037,6 +1037,8 @@
 
 	/* The fixed mapping is only supported on axon machines */
 	np = of_find_node_by_name(NULL, "axon");
+	of_node_put(np);
+
 	if (!np) {
 		pr_debug("iommu: fixed mapping disabled, no axons found\n");
 		return -1;
diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c
index 0fc9b72..62002a7 100644
--- a/arch/powerpc/platforms/cell/setup.c
+++ b/arch/powerpc/platforms/cell/setup.c
@@ -184,24 +184,10 @@
 }
 machine_subsys_initcall(cell, cell_publish_devices);
 
-static void cell_mpic_cascade(unsigned int irq, struct irq_desc *desc)
-{
-	struct irq_chip *chip = irq_desc_get_chip(desc);
-	struct mpic *mpic = irq_desc_get_handler_data(desc);
-	unsigned int virq;
-
-	virq = mpic_get_one_irq(mpic);
-	if (virq != NO_IRQ)
-		generic_handle_irq(virq);
-
-	chip->irq_eoi(&desc->irq_data);
-}
-
 static void __init mpic_init_IRQ(void)
 {
 	struct device_node *dn;
 	struct mpic *mpic;
-	unsigned int virq;
 
 	for (dn = NULL;
 	     (dn = of_find_node_by_name(dn, "interrupt-controller"));) {
@@ -211,19 +197,10 @@
 		/* The MPIC driver will get everything it needs from the
 		 * device-tree, just pass 0 to all arguments
 		 */
-		mpic = mpic_alloc(dn, 0, 0, 0, 0, " MPIC     ");
+		mpic = mpic_alloc(dn, 0, MPIC_SECONDARY, 0, 0, " MPIC     ");
 		if (mpic == NULL)
 			continue;
 		mpic_init(mpic);
-
-		virq = irq_of_parse_and_map(dn, 0);
-		if (virq == NO_IRQ)
-			continue;
-
-		printk(KERN_INFO "%s : hooking up to IRQ %d\n",
-		       dn->full_name, virq);
-		irq_set_handler_data(virq, mpic);
-		irq_set_chained_handler(virq, cell_mpic_cascade);
 	}
 }
 
diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c
index f5c5c76..4a255cf 100644
--- a/arch/powerpc/platforms/cell/smp.c
+++ b/arch/powerpc/platforms/cell/smp.c
@@ -23,7 +23,7 @@
 #include <linux/spinlock.h>
 #include <linux/cache.h>
 #include <linux/err.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/cpu.h>
 
 #include <asm/ptrace.h>
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index e94d3ec..8b12139 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -519,31 +519,32 @@
 }
 EXPORT_SYMBOL_GPL(spu_init_channels);
 
-static struct sysdev_class spu_sysdev_class = {
+static struct bus_type spu_subsys = {
 	.name = "spu",
+	.dev_name = "spu",
 };
 
-int spu_add_sysdev_attr(struct sysdev_attribute *attr)
+int spu_add_dev_attr(struct device_attribute *attr)
 {
 	struct spu *spu;
 
 	mutex_lock(&spu_full_list_mutex);
 	list_for_each_entry(spu, &spu_full_list, full_list)
-		sysdev_create_file(&spu->sysdev, attr);
+		device_create_file(&spu->dev, attr);
 	mutex_unlock(&spu_full_list_mutex);
 
 	return 0;
 }
-EXPORT_SYMBOL_GPL(spu_add_sysdev_attr);
+EXPORT_SYMBOL_GPL(spu_add_dev_attr);
 
-int spu_add_sysdev_attr_group(struct attribute_group *attrs)
+int spu_add_dev_attr_group(struct attribute_group *attrs)
 {
 	struct spu *spu;
 	int rc = 0;
 
 	mutex_lock(&spu_full_list_mutex);
 	list_for_each_entry(spu, &spu_full_list, full_list) {
-		rc = sysfs_create_group(&spu->sysdev.kobj, attrs);
+		rc = sysfs_create_group(&spu->dev.kobj, attrs);
 
 		/* we're in trouble here, but try unwinding anyway */
 		if (rc) {
@@ -552,7 +553,7 @@
 
 			list_for_each_entry_continue_reverse(spu,
 					&spu_full_list, full_list)
-				sysfs_remove_group(&spu->sysdev.kobj, attrs);
+				sysfs_remove_group(&spu->dev.kobj, attrs);
 			break;
 		}
 	}
@@ -561,45 +562,45 @@
 
 	return rc;
 }
-EXPORT_SYMBOL_GPL(spu_add_sysdev_attr_group);
+EXPORT_SYMBOL_GPL(spu_add_dev_attr_group);
 
 
-void spu_remove_sysdev_attr(struct sysdev_attribute *attr)
+void spu_remove_dev_attr(struct device_attribute *attr)
 {
 	struct spu *spu;
 
 	mutex_lock(&spu_full_list_mutex);
 	list_for_each_entry(spu, &spu_full_list, full_list)
-		sysdev_remove_file(&spu->sysdev, attr);
+		device_remove_file(&spu->dev, attr);
 	mutex_unlock(&spu_full_list_mutex);
 }
-EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr);
+EXPORT_SYMBOL_GPL(spu_remove_dev_attr);
 
-void spu_remove_sysdev_attr_group(struct attribute_group *attrs)
+void spu_remove_dev_attr_group(struct attribute_group *attrs)
 {
 	struct spu *spu;
 
 	mutex_lock(&spu_full_list_mutex);
 	list_for_each_entry(spu, &spu_full_list, full_list)
-		sysfs_remove_group(&spu->sysdev.kobj, attrs);
+		sysfs_remove_group(&spu->dev.kobj, attrs);
 	mutex_unlock(&spu_full_list_mutex);
 }
-EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr_group);
+EXPORT_SYMBOL_GPL(spu_remove_dev_attr_group);
 
-static int spu_create_sysdev(struct spu *spu)
+static int spu_create_dev(struct spu *spu)
 {
 	int ret;
 
-	spu->sysdev.id = spu->number;
-	spu->sysdev.cls = &spu_sysdev_class;
-	ret = sysdev_register(&spu->sysdev);
+	spu->dev.id = spu->number;
+	spu->dev.bus = &spu_subsys;
+	ret = device_register(&spu->dev);
 	if (ret) {
 		printk(KERN_ERR "Can't register SPU %d with sysfs\n",
 				spu->number);
 		return ret;
 	}
 
-	sysfs_add_device_to_node(&spu->sysdev, spu->node);
+	sysfs_add_device_to_node(&spu->dev, spu->node);
 
 	return 0;
 }
@@ -635,7 +636,7 @@
 	if (ret)
 		goto out_destroy;
 
-	ret = spu_create_sysdev(spu);
+	ret = spu_create_dev(spu);
 	if (ret)
 		goto out_free_irqs;
 
@@ -692,10 +693,10 @@
 }
 
 
-static ssize_t spu_stat_show(struct sys_device *sysdev,
-				struct sysdev_attribute *attr, char *buf)
+static ssize_t spu_stat_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
 {
-	struct spu *spu = container_of(sysdev, struct spu, sysdev);
+	struct spu *spu = container_of(dev, struct spu, dev);
 
 	return sprintf(buf, "%s %llu %llu %llu %llu "
 		      "%llu %llu %llu %llu %llu %llu %llu %llu\n",
@@ -714,7 +715,7 @@
 		spu->stats.libassist);
 }
 
-static SYSDEV_ATTR(stat, 0644, spu_stat_show, NULL);
+static DEVICE_ATTR(stat, 0644, spu_stat_show, NULL);
 
 #ifdef CONFIG_KEXEC
 
@@ -813,8 +814,8 @@
 	if (!spu_management_ops)
 		goto out;
 
-	/* create sysdev class for spus */
-	ret = sysdev_class_register(&spu_sysdev_class);
+	/* create system subsystem for spus */
+	ret = subsys_system_register(&spu_subsys, NULL);
 	if (ret)
 		goto out;
 
@@ -823,7 +824,7 @@
 	if (ret < 0) {
 		printk(KERN_WARNING "%s: Error initializing spus\n",
 			__func__);
-		goto out_unregister_sysdev_class;
+		goto out_unregister_subsys;
 	}
 
 	if (ret > 0)
@@ -833,15 +834,15 @@
 	xmon_register_spus(&spu_full_list);
 	crash_register_spus(&spu_full_list);
 	mutex_unlock(&spu_full_list_mutex);
-	spu_add_sysdev_attr(&attr_stat);
+	spu_add_dev_attr(&dev_attr_stat);
 	register_syscore_ops(&spu_syscore_ops);
 
 	spu_init_affinity();
 
 	return 0;
 
- out_unregister_sysdev_class:
-	sysdev_class_unregister(&spu_sysdev_class);
+ out_unregister_subsys:
+	bus_unregister(&spu_subsys);
  out:
 	return ret;
 }
diff --git a/arch/powerpc/platforms/cell/spu_syscalls.c b/arch/powerpc/platforms/cell/spu_syscalls.c
index 75530d9..714bbfc 100644
--- a/arch/powerpc/platforms/cell/spu_syscalls.c
+++ b/arch/powerpc/platforms/cell/spu_syscalls.c
@@ -65,8 +65,8 @@
 
 #endif /* CONFIG_SPU_FS_MODULE */
 
-asmlinkage long sys_spu_create(const char __user *name,
-		unsigned int flags, mode_t mode, int neighbor_fd)
+SYSCALL_DEFINE4(spu_create, const char __user *, name, unsigned int, flags,
+	umode_t, mode, int, neighbor_fd)
 {
 	long ret;
 	struct file *neighbor;
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index e481f6b..d4a094c 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -74,7 +74,6 @@
 static void spufs_i_callback(struct rcu_head *head)
 {
 	struct inode *inode = container_of(head, struct inode, i_rcu);
-	INIT_LIST_HEAD(&inode->i_dentry);
 	kmem_cache_free(spufs_inode_cache, SPUFS_I(inode));
 }
 
@@ -92,7 +91,7 @@
 }
 
 static struct inode *
-spufs_new_inode(struct super_block *sb, int mode)
+spufs_new_inode(struct super_block *sb, umode_t mode)
 {
 	struct inode *inode;
 
@@ -124,7 +123,7 @@
 
 static int
 spufs_new_file(struct super_block *sb, struct dentry *dentry,
-		const struct file_operations *fops, int mode,
+		const struct file_operations *fops, umode_t mode,
 		size_t size, struct spu_context *ctx)
 {
 	static const struct inode_operations spufs_file_iops = {
@@ -194,7 +193,7 @@
 }
 
 static int spufs_fill_dir(struct dentry *dir,
-		const struct spufs_tree_descr *files, int mode,
+		const struct spufs_tree_descr *files, umode_t mode,
 		struct spu_context *ctx)
 {
 	struct dentry *dentry, *tmp;
@@ -264,7 +263,7 @@
 
 static int
 spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags,
-		int mode)
+		umode_t mode)
 {
 	int ret;
 	struct inode *inode;
@@ -447,7 +446,7 @@
 
 static int
 spufs_create_context(struct inode *inode, struct dentry *dentry,
-			struct vfsmount *mnt, int flags, int mode,
+			struct vfsmount *mnt, int flags, umode_t mode,
 			struct file *aff_filp)
 {
 	int ret;
@@ -521,7 +520,7 @@
 }
 
 static int
-spufs_mkgang(struct inode *dir, struct dentry *dentry, int mode)
+spufs_mkgang(struct inode *dir, struct dentry *dentry, umode_t mode)
 {
 	int ret;
 	struct inode *inode;
@@ -584,7 +583,7 @@
 
 static int spufs_create_gang(struct inode *inode,
 			struct dentry *dentry,
-			struct vfsmount *mnt, int mode)
+			struct vfsmount *mnt, umode_t mode)
 {
 	int ret;
 
@@ -612,7 +611,7 @@
 static struct file_system_type spufs_type;
 
 long spufs_create(struct path *path, struct dentry *dentry,
-		unsigned int flags, mode_t mode, struct file *filp)
+		unsigned int flags, umode_t mode, struct file *filp)
 {
 	int ret;
 
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index 099245f..67852ad 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -237,7 +237,7 @@
 struct spufs_tree_descr {
 	const char *name;
 	const struct file_operations *ops;
-	int mode;
+	umode_t mode;
 	size_t size;
 };
 
@@ -249,7 +249,7 @@
 extern struct spufs_calls spufs_calls;
 long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *status);
 long spufs_create(struct path *nd, struct dentry *dentry, unsigned int flags,
-			mode_t mode, struct file *filp);
+			umode_t mode, struct file *filp);
 /* ELF coredump callbacks for writing SPU ELF notes */
 extern int spufs_coredump_extra_notes_size(void);
 extern int spufs_coredump_extra_notes_write(struct file *file, loff_t *foffset);
diff --git a/arch/powerpc/platforms/cell/spufs/syscalls.c b/arch/powerpc/platforms/cell/spufs/syscalls.c
index 71a5b52..8591bb6 100644
--- a/arch/powerpc/platforms/cell/spufs/syscalls.c
+++ b/arch/powerpc/platforms/cell/spufs/syscalls.c
@@ -60,7 +60,7 @@
 }
 
 static long do_spu_create(const char __user *pathname, unsigned int flags,
-		mode_t mode, struct file *neighbor)
+		umode_t mode, struct file *neighbor)
 {
 	struct path path;
 	struct dentry *dentry;
diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c
index 1227864..f1f17bb 100644
--- a/arch/powerpc/platforms/chrp/setup.c
+++ b/arch/powerpc/platforms/chrp/setup.c
@@ -435,8 +435,7 @@
 	if (len > 1)
 		isu_size = iranges[3];
 
-	chrp_mpic = mpic_alloc(np, opaddr, MPIC_PRIMARY,
-			       isu_size, 0, " MPIC    ");
+	chrp_mpic = mpic_alloc(np, opaddr, 0, isu_size, 0, " MPIC    ");
 	if (chrp_mpic == NULL) {
 		printk(KERN_ERR "Failed to allocate MPIC structure\n");
 		goto bail;
diff --git a/arch/powerpc/platforms/embedded6xx/holly.c b/arch/powerpc/platforms/embedded6xx/holly.c
index 2e9bcf6..9cfcf20 100644
--- a/arch/powerpc/platforms/embedded6xx/holly.c
+++ b/arch/powerpc/platforms/embedded6xx/holly.c
@@ -148,30 +148,14 @@
 static void __init holly_init_IRQ(void)
 {
 	struct mpic *mpic;
-	phys_addr_t mpic_paddr = 0;
-	struct device_node *tsi_pic;
 #ifdef CONFIG_PCI
 	unsigned int cascade_pci_irq;
 	struct device_node *tsi_pci;
 	struct device_node *cascade_node = NULL;
 #endif
 
-	tsi_pic = of_find_node_by_type(NULL, "open-pic");
-	if (tsi_pic) {
-		unsigned int size;
-		const void *prop = of_get_property(tsi_pic, "reg", &size);
-		mpic_paddr = of_translate_address(tsi_pic, prop);
-	}
-
-	if (mpic_paddr == 0) {
-		printk(KERN_ERR "%s: No tsi108 PIC found !\n", __func__);
-		return;
-	}
-
-	pr_debug("%s: tsi108 pic phys_addr = 0x%x\n", __func__, (u32) mpic_paddr);
-
-	mpic = mpic_alloc(tsi_pic, mpic_paddr,
-			MPIC_PRIMARY | MPIC_BIG_ENDIAN | MPIC_WANTS_RESET |
+	mpic = mpic_alloc(NULL, 0,
+			MPIC_BIG_ENDIAN | MPIC_WANTS_RESET |
 			MPIC_SPV_EOI | MPIC_NO_PTHROU_DIS | MPIC_REGSET_TSI108,
 			24,
 			NR_IRQS-4, /* num_sources used */
@@ -179,7 +163,7 @@
 
 	BUG_ON(mpic == NULL);
 
-	mpic_assign_isu(mpic, 0, mpic_paddr + 0x100);
+	mpic_assign_isu(mpic, 0, mpic->paddr + 0x100);
 
 	mpic_init(mpic);
 
@@ -204,7 +188,6 @@
 #endif
 	/* Configure MPIC outputs to CPU0 */
 	tsi108_write_reg(TSI108_MPIC_OFFSET + 0x30c, 0);
-	of_node_put(tsi_pic);
 }
 
 void holly_show_cpuinfo(struct seq_file *m)
diff --git a/arch/powerpc/platforms/embedded6xx/linkstation.c b/arch/powerpc/platforms/embedded6xx/linkstation.c
index 244f997..bcfad92 100644
--- a/arch/powerpc/platforms/embedded6xx/linkstation.c
+++ b/arch/powerpc/platforms/embedded6xx/linkstation.c
@@ -81,29 +81,19 @@
 static void __init linkstation_init_IRQ(void)
 {
 	struct mpic *mpic;
-	struct device_node *dnp;
-	const u32 *prop;
-	int size;
-	phys_addr_t paddr;
 
-	dnp = of_find_node_by_type(NULL, "open-pic");
-	if (dnp == NULL)
-		return;
-
-	prop = of_get_property(dnp, "reg", &size);
-	paddr = (phys_addr_t)of_translate_address(dnp, prop);
-
-	mpic = mpic_alloc(dnp, paddr, MPIC_PRIMARY | MPIC_WANTS_RESET, 4, 32, " EPIC     ");
+	mpic = mpic_alloc(NULL, 0, MPIC_WANTS_RESET,
+			4, 32, " EPIC     ");
 	BUG_ON(mpic == NULL);
 
 	/* PCI IRQs */
-	mpic_assign_isu(mpic, 0, paddr + 0x10200);
+	mpic_assign_isu(mpic, 0, mpic->paddr + 0x10200);
 
 	/* I2C */
-	mpic_assign_isu(mpic, 1, paddr + 0x11000);
+	mpic_assign_isu(mpic, 1, mpic->paddr + 0x11000);
 
 	/* ttyS0, ttyS1 */
-	mpic_assign_isu(mpic, 2, paddr + 0x11100);
+	mpic_assign_isu(mpic, 2, mpic->paddr + 0x11100);
 
 	mpic_init(mpic);
 }
diff --git a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
index f8f33e1..f3350d7 100644
--- a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
+++ b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
@@ -102,31 +102,14 @@
 static void __init mpc7448_hpc2_init_IRQ(void)
 {
 	struct mpic *mpic;
-	phys_addr_t mpic_paddr = 0;
-	struct device_node *tsi_pic;
 #ifdef CONFIG_PCI
 	unsigned int cascade_pci_irq;
 	struct device_node *tsi_pci;
 	struct device_node *cascade_node = NULL;
 #endif
 
-	tsi_pic = of_find_node_by_type(NULL, "open-pic");
-	if (tsi_pic) {
-		unsigned int size;
-		const void *prop = of_get_property(tsi_pic, "reg", &size);
-		mpic_paddr = of_translate_address(tsi_pic, prop);
-	}
-
-	if (mpic_paddr == 0) {
-		printk("%s: No tsi108 PIC found !\n", __func__);
-		return;
-	}
-
-	DBG("%s: tsi108 pic phys_addr = 0x%x\n", __func__,
-	    (u32) mpic_paddr);
-
-	mpic = mpic_alloc(tsi_pic, mpic_paddr,
-			MPIC_PRIMARY | MPIC_BIG_ENDIAN | MPIC_WANTS_RESET |
+	mpic = mpic_alloc(NULL, 0,
+			MPIC_BIG_ENDIAN | MPIC_WANTS_RESET |
 			MPIC_SPV_EOI | MPIC_NO_PTHROU_DIS | MPIC_REGSET_TSI108,
 			24,
 			NR_IRQS-4, /* num_sources used */
@@ -134,7 +117,7 @@
 
 	BUG_ON(mpic == NULL);
 
-	mpic_assign_isu(mpic, 0, mpic_paddr + 0x100);
+	mpic_assign_isu(mpic, 0, mpic->paddr + 0x100);
 
 	mpic_init(mpic);
 
@@ -159,7 +142,6 @@
 #endif
 	/* Configure MPIC outputs to CPU0 */
 	tsi108_write_reg(TSI108_MPIC_OFFSET + 0x30c, 0);
-	of_node_put(tsi_pic);
 }
 
 void mpc7448_hpc2_show_cpuinfo(struct seq_file *m)
diff --git a/arch/powerpc/platforms/embedded6xx/storcenter.c b/arch/powerpc/platforms/embedded6xx/storcenter.c
index f1eebca..afa6388 100644
--- a/arch/powerpc/platforms/embedded6xx/storcenter.c
+++ b/arch/powerpc/platforms/embedded6xx/storcenter.c
@@ -83,35 +83,17 @@
 static void __init storcenter_init_IRQ(void)
 {
 	struct mpic *mpic;
-	struct device_node *dnp;
-	const void *prop;
-	int size;
-	phys_addr_t paddr;
 
-	dnp = of_find_node_by_type(NULL, "open-pic");
-	if (dnp == NULL)
-		return;
-
-	prop = of_get_property(dnp, "reg", &size);
-	if (prop == NULL) {
-		of_node_put(dnp);
-		return;
-	}
-
-	paddr = (phys_addr_t)of_translate_address(dnp, prop);
-	mpic = mpic_alloc(dnp, paddr, MPIC_PRIMARY | MPIC_WANTS_RESET,
+	mpic = mpic_alloc(NULL, 0, MPIC_WANTS_RESET,
 			16, 32, " OpenPIC  ");
-
-	of_node_put(dnp);
-
 	BUG_ON(mpic == NULL);
 
 	/*
 	 * 16 Serial Interrupts followed by 16 Internal Interrupts.
 	 * I2C is the second internal, so it is at 17, 0x11020.
 	 */
-	mpic_assign_isu(mpic, 0, paddr + 0x10200);
-	mpic_assign_isu(mpic, 1, paddr + 0x11000);
+	mpic_assign_isu(mpic, 0, mpic->paddr + 0x10200);
+	mpic_assign_isu(mpic, 1, mpic->paddr + 0x11000);
 
 	mpic_init(mpic);
 }
diff --git a/arch/powerpc/platforms/embedded6xx/wii.c b/arch/powerpc/platforms/embedded6xx/wii.c
index 1b5dc1a..6d8dadf 100644
--- a/arch/powerpc/platforms/embedded6xx/wii.c
+++ b/arch/powerpc/platforms/embedded6xx/wii.c
@@ -79,24 +79,19 @@
 	BUG_ON(memblock.memory.cnt != 2);
 	BUG_ON(!page_aligned(p[0].base) || !page_aligned(p[1].base));
 
-	p[0].size = _ALIGN_DOWN(p[0].size, PAGE_SIZE);
-	p[1].size = _ALIGN_DOWN(p[1].size, PAGE_SIZE);
+	/* trim unaligned tail */
+	memblock_remove(ALIGN(p[1].base + p[1].size, PAGE_SIZE),
+			(phys_addr_t)ULLONG_MAX);
 
-	wii_hole_start = p[0].base + p[0].size;
+	/* determine hole, add & reserve them */
+	wii_hole_start = ALIGN(p[0].base + p[0].size, PAGE_SIZE);
 	wii_hole_size = p[1].base - wii_hole_start;
-
-	pr_info("MEM1: <%08llx %08llx>\n", p[0].base, p[0].size);
-	pr_info("HOLE: <%08lx %08lx>\n", wii_hole_start, wii_hole_size);
-	pr_info("MEM2: <%08llx %08llx>\n", p[1].base, p[1].size);
-
-	p[0].size += wii_hole_size + p[1].size;
-
-	memblock.memory.cnt = 1;
-	memblock_analyze();
-
-	/* reserve the hole */
+	memblock_add(wii_hole_start, wii_hole_size);
 	memblock_reserve(wii_hole_start, wii_hole_size);
 
+	BUG_ON(memblock.memory.cnt != 1);
+	__memblock_dump_all();
+
 	/* allow ioremapping the address space in the hole */
 	__allow_ioremap_reserved = 1;
 }
diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c
index ea0acbd..8fc6258 100644
--- a/arch/powerpc/platforms/iseries/setup.c
+++ b/arch/powerpc/platforms/iseries/setup.c
@@ -563,7 +563,8 @@
 static void iseries_shared_idle(void)
 {
 	while (1) {
-		tick_nohz_stop_sched_tick(1);
+		tick_nohz_idle_enter();
+		rcu_idle_enter();
 		while (!need_resched() && !hvlpevent_is_pending()) {
 			local_irq_disable();
 			ppc64_runlatch_off();
@@ -577,7 +578,8 @@
 		}
 
 		ppc64_runlatch_on();
-		tick_nohz_restart_sched_tick();
+		rcu_idle_exit();
+		tick_nohz_idle_exit();
 
 		if (hvlpevent_is_pending())
 			process_iSeries_events();
@@ -593,7 +595,8 @@
 	set_thread_flag(TIF_POLLING_NRFLAG);
 
 	while (1) {
-		tick_nohz_stop_sched_tick(1);
+		tick_nohz_idle_enter();
+		rcu_idle_enter();
 		if (!need_resched()) {
 			while (!need_resched()) {
 				ppc64_runlatch_off();
@@ -610,7 +613,8 @@
 		}
 
 		ppc64_runlatch_on();
-		tick_nohz_restart_sched_tick();
+		rcu_idle_exit();
+		tick_nohz_idle_exit();
 		preempt_enable_no_resched();
 		schedule();
 		preempt_disable();
diff --git a/arch/powerpc/platforms/iseries/smp.c b/arch/powerpc/platforms/iseries/smp.c
index 7e2a551..02df49f 100644
--- a/arch/powerpc/platforms/iseries/smp.c
+++ b/arch/powerpc/platforms/iseries/smp.c
@@ -24,7 +24,7 @@
 #include <linux/spinlock.h>
 #include <linux/cache.h>
 #include <linux/err.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/cpu.h>
 
 #include <asm/ptrace.h>
diff --git a/arch/powerpc/platforms/maple/pci.c b/arch/powerpc/platforms/maple/pci.c
index dd2e48b..401e3f3 100644
--- a/arch/powerpc/platforms/maple/pci.c
+++ b/arch/powerpc/platforms/maple/pci.c
@@ -207,6 +207,54 @@
 		return hose->cfg_data + u3_ht_cfa1(bus, devfn, offset);
 }
 
+static int u3_ht_root_read_config(struct pci_controller *hose, u8 offset,
+				  int len, u32 *val)
+{
+	volatile void __iomem *addr;
+
+	addr = hose->cfg_addr;
+	addr += ((offset & ~3) << 2) + (4 - len - (offset & 3));
+
+	switch (len) {
+	case 1:
+		*val = in_8(addr);
+		break;
+	case 2:
+		*val = in_be16(addr);
+		break;
+	default:
+		*val = in_be32(addr);
+		break;
+	}
+
+	return PCIBIOS_SUCCESSFUL;
+}
+
+static int u3_ht_root_write_config(struct pci_controller *hose, u8 offset,
+				  int len, u32 val)
+{
+	volatile void __iomem *addr;
+
+	addr = hose->cfg_addr + ((offset & ~3) << 2) + (4 - len - (offset & 3));
+
+	if (offset >= PCI_BASE_ADDRESS_0 && offset < PCI_CAPABILITY_LIST)
+		return PCIBIOS_SUCCESSFUL;
+
+	switch (len) {
+	case 1:
+		out_8(addr, val);
+		break;
+	case 2:
+		out_be16(addr, val);
+		break;
+	default:
+		out_be32(addr, val);
+		break;
+	}
+
+	return PCIBIOS_SUCCESSFUL;
+}
+
 static int u3_ht_read_config(struct pci_bus *bus, unsigned int devfn,
 			     int offset, int len, u32 *val)
 {
@@ -217,6 +265,9 @@
 	if (hose == NULL)
 		return PCIBIOS_DEVICE_NOT_FOUND;
 
+	if (bus->number == hose->first_busno && devfn == PCI_DEVFN(0, 0))
+		return u3_ht_root_read_config(hose, offset, len, val);
+
 	if (offset > 0xff)
 		return PCIBIOS_BAD_REGISTER_NUMBER;
 
@@ -252,6 +303,9 @@
 	if (hose == NULL)
 		return PCIBIOS_DEVICE_NOT_FOUND;
 
+	if (bus->number == hose->first_busno && devfn == PCI_DEVFN(0, 0))
+		return u3_ht_root_write_config(hose, offset, len, val);
+
 	if (offset > 0xff)
 		return PCIBIOS_BAD_REGISTER_NUMBER;
 
@@ -428,6 +482,7 @@
 	 * reg_property and using some accessor functions instead
 	 */
 	hose->cfg_data = ioremap(0xf2000000, 0x02000000);
+	hose->cfg_addr = ioremap(0xf8070000, 0x1000);
 
 	hose->first_busno = 0;
 	hose->last_busno = 0xef;
diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c
index 4c37204..0bcbfe7 100644
--- a/arch/powerpc/platforms/maple/setup.c
+++ b/arch/powerpc/platforms/maple/setup.c
@@ -221,7 +221,7 @@
 	unsigned long openpic_addr = 0;
 	int naddr, n, i, opplen, has_isus = 0;
 	struct mpic *mpic;
-	unsigned int flags = MPIC_PRIMARY;
+	unsigned int flags = 0;
 
 	/* Locate MPIC in the device-tree. Note that there is a bug
 	 * in Maple device-tree where the type of the controller is
diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c
index 6f35582..98b7a7c 100644
--- a/arch/powerpc/platforms/pasemi/setup.c
+++ b/arch/powerpc/platforms/pasemi/setup.c
@@ -224,7 +224,7 @@
 	openpic_addr = of_read_number(opprop, naddr);
 	printk(KERN_DEBUG "OpenPIC addr: %lx\n", openpic_addr);
 
-	mpic_flags = MPIC_PRIMARY | MPIC_LARGE_VECTORS | MPIC_NO_BIAS;
+	mpic_flags = MPIC_LARGE_VECTORS | MPIC_NO_BIAS;
 
 	nmiprop = of_get_property(mpic_node, "nmi-source", NULL);
 	if (nmiprop)
@@ -234,7 +234,7 @@
 			  mpic_flags, 0, 0, "PASEMI-OPIC");
 	BUG_ON(!mpic);
 
-	mpic_assign_isu(mpic, 0, openpic_addr + 0x10000);
+	mpic_assign_isu(mpic, 0, mpic->paddr + 0x10000);
 	mpic_init(mpic);
 	/* The NMI/MCK source needs to be prio 15 */
 	if (nmiprop) {
diff --git a/arch/powerpc/platforms/powermac/cpufreq_32.c b/arch/powerpc/platforms/powermac/cpufreq_32.c
index 04af5f4..1fc386a 100644
--- a/arch/powerpc/platforms/powermac/cpufreq_32.c
+++ b/arch/powerpc/platforms/powermac/cpufreq_32.c
@@ -23,7 +23,7 @@
 #include <linux/pmu.h>
 #include <linux/cpufreq.h>
 #include <linux/init.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/hardirq.h>
 #include <asm/prom.h>
 #include <asm/machdep.h>
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c
index 901bfbd..7761aab 100644
--- a/arch/powerpc/platforms/powermac/pic.c
+++ b/arch/powerpc/platforms/powermac/pic.c
@@ -52,13 +52,8 @@
 /* Default addresses */
 static volatile struct pmac_irq_hw __iomem *pmac_irq_hw[4];
 
-#define GC_LEVEL_MASK		0x3ff00000
-#define OHARE_LEVEL_MASK	0x1ff00000
-#define HEATHROW_LEVEL_MASK	0x1ff00000
-
 static int max_irqs;
 static int max_real_irqs;
-static u32 level_mask[4];
 
 static DEFINE_RAW_SPINLOCK(pmac_pic_lock);
 
@@ -217,8 +212,7 @@
 	for (irq = max_irqs; (irq -= 32) >= max_real_irqs; ) {
 		int i = irq >> 5;
 		bits = in_le32(&pmac_irq_hw[i]->event) | ppc_lost_interrupts[i];
-		/* We must read level interrupts from the level register */
-		bits |= (in_le32(&pmac_irq_hw[i]->level) & level_mask[i]);
+		bits |= in_le32(&pmac_irq_hw[i]->level);
 		bits &= ppc_cached_irq_mask[i];
 		if (bits == 0)
 			continue;
@@ -248,8 +242,7 @@
 	for (irq = max_real_irqs; (irq -= 32) >= 0; ) {
 		int i = irq >> 5;
 		bits = in_le32(&pmac_irq_hw[i]->event) | ppc_lost_interrupts[i];
-		/* We must read level interrupts from the level register */
-		bits |= (in_le32(&pmac_irq_hw[i]->level) & level_mask[i]);
+		bits |= in_le32(&pmac_irq_hw[i]->level);
 		bits &= ppc_cached_irq_mask[i];
 		if (bits == 0)
 			continue;
@@ -284,19 +277,14 @@
 static int pmac_pic_host_map(struct irq_host *h, unsigned int virq,
 			     irq_hw_number_t hw)
 {
-	int level;
-
 	if (hw >= max_irqs)
 		return -EINVAL;
 
 	/* Mark level interrupts, set delayed disable for edge ones and set
 	 * handlers
 	 */
-	level = !!(level_mask[hw >> 5] & (1UL << (hw & 0x1f)));
-	if (level)
-		irq_set_status_flags(virq, IRQ_LEVEL);
-	irq_set_chip_and_handler(virq, &pmac_pic,
-				 level ? handle_level_irq : handle_edge_irq);
+	irq_set_status_flags(virq, IRQ_LEVEL);
+	irq_set_chip_and_handler(virq, &pmac_pic, handle_level_irq);
 	return 0;
 }
 
@@ -334,21 +322,14 @@
 
 	if ((master = of_find_node_by_name(NULL, "gc")) != NULL) {
 		max_irqs = max_real_irqs = 32;
-		level_mask[0] = GC_LEVEL_MASK;
 	} else if ((master = of_find_node_by_name(NULL, "ohare")) != NULL) {
 		max_irqs = max_real_irqs = 32;
-		level_mask[0] = OHARE_LEVEL_MASK;
-
 		/* We might have a second cascaded ohare */
 		slave = of_find_node_by_name(NULL, "pci106b,7");
-		if (slave) {
+		if (slave)
 			max_irqs = 64;
-			level_mask[1] = OHARE_LEVEL_MASK;
-		}
 	} else if ((master = of_find_node_by_name(NULL, "mac-io")) != NULL) {
 		max_irqs = max_real_irqs = 64;
-		level_mask[0] = HEATHROW_LEVEL_MASK;
-		level_mask[1] = 0;
 
 		/* We might have a second cascaded heathrow */
 		slave = of_find_node_by_name(master, "mac-io");
@@ -363,11 +344,8 @@
 		}
 
 		/* We found a slave */
-		if (slave) {
+		if (slave)
 			max_irqs = 128;
-			level_mask[2] = HEATHROW_LEVEL_MASK;
-			level_mask[3] = 0;
-		}
 	}
 	BUG_ON(master == NULL);
 
@@ -464,18 +442,6 @@
 }
 #endif /* CONFIG_PPC32 */
 
-static void pmac_u3_cascade(unsigned int irq, struct irq_desc *desc)
-{
-	struct irq_chip *chip = irq_desc_get_chip(desc);
-	struct mpic *mpic = irq_desc_get_handler_data(desc);
-	unsigned int cascade_irq = mpic_get_one_irq(mpic);
-
-	if (cascade_irq != NO_IRQ)
-		generic_handle_irq(cascade_irq);
-
-	chip->irq_eoi(&desc->irq_data);
-}
-
 static void __init pmac_pic_setup_mpic_nmi(struct mpic *mpic)
 {
 #if defined(CONFIG_XMON) && defined(CONFIG_PPC32)
@@ -498,14 +464,8 @@
 						int master)
 {
 	const char *name = master ? " MPIC 1   " : " MPIC 2   ";
-	struct resource r;
 	struct mpic *mpic;
-	unsigned int flags = master ? MPIC_PRIMARY : 0;
-	int rc;
-
-	rc = of_address_to_resource(np, 0, &r);
-	if (rc)
-		return NULL;
+	unsigned int flags = master ? 0 : MPIC_SECONDARY;
 
 	pmac_call_feature(PMAC_FTR_ENABLE_MPIC, np, 0, 0);
 
@@ -519,7 +479,7 @@
 	if (master && (flags & MPIC_BIG_ENDIAN))
 		flags |= MPIC_U3_HT_IRQS;
 
-	mpic = mpic_alloc(np, r.start, flags, 0, 0, name);
+	mpic = mpic_alloc(np, 0, flags, 0, 0, name);
 	if (mpic == NULL)
 		return NULL;
 
@@ -532,7 +492,6 @@
 {
 	struct mpic *mpic1, *mpic2;
 	struct device_node *np, *master = NULL, *slave = NULL;
-	unsigned int cascade;
 
 	/* We can have up to 2 MPICs cascaded */
 	for (np = NULL; (np = of_find_node_by_type(np, "open-pic"))
@@ -568,27 +527,14 @@
 
 	of_node_put(master);
 
-	/* No slave, let's go out */
-	if (slave == NULL)
-		return 0;
-
-	/* Get/Map slave interrupt */
-	cascade = irq_of_parse_and_map(slave, 0);
-	if (cascade == NO_IRQ) {
-		printk(KERN_ERR "Failed to map cascade IRQ\n");
-		return 0;
-	}
-
-	mpic2 = pmac_setup_one_mpic(slave, 0);
-	if (mpic2 == NULL) {
-		printk(KERN_ERR "Failed to setup slave MPIC\n");
+	/* Set up a cascaded controller, if present */
+	if (slave) {
+		mpic2 = pmac_setup_one_mpic(slave, 0);
+		if (mpic2 == NULL)
+			printk(KERN_ERR "Failed to setup slave MPIC\n");
 		of_node_put(slave);
-		return 0;
 	}
-	irq_set_handler_data(cascade, mpic2);
-	irq_set_chained_handler(cascade, pmac_u3_cascade);
 
-	of_node_put(slave);
 	return 0;
 }
 
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index 96580b1..970ea1d 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -494,11 +494,15 @@
 		return -1;
 
 	np = of_find_node_by_name(NULL, "valkyrie");
-	if (np)
+	if (np) {
 		of_platform_device_create(np, "valkyrie", NULL);
+		of_node_put(np);
+	}
 	np = of_find_node_by_name(NULL, "platinum");
-	if (np)
+	if (np) {
 		of_platform_device_create(np, "platinum", NULL);
+		of_node_put(np);
+	}
         np = of_find_node_by_type(NULL, "smu");
         if (np) {
 		of_platform_device_create(np, "smu", NULL);
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index 9b6a820..44d7692 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -200,7 +200,7 @@
 
 	if (psurge_secondary_virq)
 		rc = request_irq(psurge_secondary_virq, psurge_ipi_intr,
-			IRQF_PERCPU, "IPI", NULL);
+			IRQF_PERCPU | IRQF_NO_THREAD, "IPI", NULL);
 
 	if (rc)
 		pr_err("Failed to setup secondary cpu IPI\n");
@@ -408,13 +408,13 @@
 
 static struct irqaction psurge_irqaction = {
 	.handler = psurge_ipi_intr,
-	.flags = IRQF_PERCPU,
+	.flags = IRQF_PERCPU | IRQF_NO_THREAD,
 	.name = "primary IPI",
 };
 
 static void __init smp_psurge_setup_cpu(int cpu_nr)
 {
-	if (cpu_nr != 0)
+	if (cpu_nr != 0 || !psurge_start)
 		return;
 
 	/* reset the entry point so if we get another intr we won't
diff --git a/arch/powerpc/platforms/powernv/Makefile b/arch/powerpc/platforms/powernv/Makefile
index 3185300..bcc3cb4 100644
--- a/arch/powerpc/platforms/powernv/Makefile
+++ b/arch/powerpc/platforms/powernv/Makefile
@@ -2,4 +2,4 @@
 obj-y			+= opal-rtc.o opal-nvram.o
 
 obj-$(CONFIG_SMP)	+= smp.o
-obj-$(CONFIG_PCI)	+= pci.o pci-p5ioc2.o
+obj-$(CONFIG_PCI)	+= pci.o pci-p5ioc2.o pci-ioda.o
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S
index 4a3f46d..3bb07e5 100644
--- a/arch/powerpc/platforms/powernv/opal-wrappers.S
+++ b/arch/powerpc/platforms/powernv/opal-wrappers.S
@@ -99,3 +99,11 @@
 OPAL_CALL(opal_pci_map_pe_dma_window,		OPAL_PCI_MAP_PE_DMA_WINDOW);
 OPAL_CALL(opal_pci_map_pe_dma_window_real,	OPAL_PCI_MAP_PE_DMA_WINDOW_REAL);
 OPAL_CALL(opal_pci_reset,			OPAL_PCI_RESET);
+OPAL_CALL(opal_pci_get_hub_diag_data,		OPAL_PCI_GET_HUB_DIAG_DATA);
+OPAL_CALL(opal_pci_get_phb_diag_data,		OPAL_PCI_GET_PHB_DIAG_DATA);
+OPAL_CALL(opal_pci_fence_phb,			OPAL_PCI_FENCE_PHB);
+OPAL_CALL(opal_pci_reinit,			OPAL_PCI_REINIT);
+OPAL_CALL(opal_pci_mask_pe_error,		OPAL_PCI_MASK_PE_ERROR);
+OPAL_CALL(opal_set_slot_led_status,		OPAL_SET_SLOT_LED_STATUS);
+OPAL_CALL(opal_get_epow_status,			OPAL_GET_EPOW_STATUS);
+OPAL_CALL(opal_set_system_attention_led,	OPAL_SET_SYSTEM_ATTENTION_LED);
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
new file mode 100644
index 0000000..f31162c
--- /dev/null
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -0,0 +1,1330 @@
+/*
+ * Support PCI/PCIe on PowerNV platforms
+ *
+ * Copyright 2011 Benjamin Herrenschmidt, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#undef DEBUG
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/msi.h>
+
+#include <asm/sections.h>
+#include <asm/io.h>
+#include <asm/prom.h>
+#include <asm/pci-bridge.h>
+#include <asm/machdep.h>
+#include <asm/ppc-pci.h>
+#include <asm/opal.h>
+#include <asm/iommu.h>
+#include <asm/tce.h>
+#include <asm/abs_addr.h>
+
+#include "powernv.h"
+#include "pci.h"
+
+struct resource_wrap {
+	struct list_head	link;
+	resource_size_t		size;
+	resource_size_t		align;
+	struct pci_dev		*dev;	/* Set if it's a device */
+	struct pci_bus		*bus;	/* Set if it's a bridge */
+};
+
+static int __pe_printk(const char *level, const struct pnv_ioda_pe *pe,
+		       struct va_format *vaf)
+{
+	char pfix[32];
+
+	if (pe->pdev)
+		strlcpy(pfix, dev_name(&pe->pdev->dev), sizeof(pfix));
+	else
+		sprintf(pfix, "%04x:%02x     ",
+			pci_domain_nr(pe->pbus), pe->pbus->number);
+	return printk("pci %s%s: [PE# %.3d] %pV", level, pfix, pe->pe_number, vaf);
+}
+
+#define define_pe_printk_level(func, kern_level)		\
+static int func(const struct pnv_ioda_pe *pe, const char *fmt, ...)	\
+{								\
+	struct va_format vaf;					\
+	va_list args;						\
+	int r;							\
+								\
+	va_start(args, fmt);					\
+								\
+	vaf.fmt = fmt;						\
+	vaf.va = &args;						\
+								\
+	r = __pe_printk(kern_level, pe, &vaf);			\
+	va_end(args);						\
+								\
+	return r;						\
+}								\
+
+define_pe_printk_level(pe_err, KERN_ERR);
+define_pe_printk_level(pe_warn, KERN_WARNING);
+define_pe_printk_level(pe_info, KERN_INFO);
+
+
+/* Calculate resource usage & alignment requirement of a single
+ * device. This will also assign all resources within the device
+ * for a given type starting at 0 for the biggest one and then
+ * assigning in decreasing order of size.
+ */
+static void __devinit pnv_ioda_calc_dev(struct pci_dev *dev, unsigned int flags,
+					resource_size_t *size,
+					resource_size_t *align)
+{
+	resource_size_t start;
+	struct resource *r;
+	int i;
+
+	pr_devel("  -> CDR %s\n", pci_name(dev));
+
+	*size = *align = 0;
+
+	/* Clear the resources out and mark them all unset */
+	for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
+		r = &dev->resource[i];
+		if (!(r->flags & flags))
+		    continue;
+		if (r->start) {
+			r->end -= r->start;
+			r->start = 0;
+		}
+		r->flags |= IORESOURCE_UNSET;
+	}
+
+	/* We currently keep all memory resources together, we
+	 * will handle prefetch & 64-bit separately in the future
+	 * but for now we stick everybody in M32
+	 */
+	start = 0;
+	for (;;) {
+		resource_size_t max_size = 0;
+		int max_no = -1;
+
+		/* Find next biggest resource */
+		for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
+			r = &dev->resource[i];
+			if (!(r->flags & IORESOURCE_UNSET) ||
+			    !(r->flags & flags))
+				continue;
+			if (resource_size(r) > max_size) {
+				max_size = resource_size(r);
+				max_no = i;
+			}
+		}
+		if (max_no < 0)
+			break;
+		r = &dev->resource[max_no];
+		if (max_size > *align)
+			*align = max_size;
+		*size += max_size;
+		r->start = start;
+		start += max_size;
+		r->end = r->start + max_size - 1;
+		r->flags &= ~IORESOURCE_UNSET;
+		pr_devel("  ->     R%d %016llx..%016llx\n",
+			 max_no, r->start, r->end);
+	}
+	pr_devel("  <- CDR %s size=%llx align=%llx\n",
+		 pci_name(dev), *size, *align);
+}
+
+/* Allocate a resource "wrap" for a given device or bridge and
+ * insert it at the right position in the sorted list
+ */
+static void __devinit pnv_ioda_add_wrap(struct list_head *list,
+					struct pci_bus *bus,
+					struct pci_dev *dev,
+					resource_size_t size,
+					resource_size_t align)
+{
+	struct resource_wrap *w1, *w = kzalloc(sizeof(*w), GFP_KERNEL);
+
+	w->size = size;
+	w->align = align;
+	w->dev = dev;
+	w->bus = bus;
+
+	list_for_each_entry(w1, list, link) {
+		if (w1->align < align) {
+			list_add_tail(&w->link, &w1->link);
+			return;
+		}
+	}
+	list_add_tail(&w->link, list);
+}
+
+/* Offset device resources of a given type */
+static void __devinit pnv_ioda_offset_dev(struct pci_dev *dev,
+					  unsigned int flags,
+					  resource_size_t offset)
+{
+	struct resource *r;
+	int i;
+
+	pr_devel("  -> ODR %s [%x] +%016llx\n", pci_name(dev), flags, offset);
+
+	for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
+		r = &dev->resource[i];
+		if (r->flags & flags) {
+			dev->resource[i].start += offset;
+			dev->resource[i].end += offset;
+		}
+	}
+
+	pr_devel("  <- ODR %s [%x] +%016llx\n", pci_name(dev), flags, offset);
+}
+
+/* Offset bus resources (& all children) of a given type */
+static void __devinit pnv_ioda_offset_bus(struct pci_bus *bus,
+					  unsigned int flags,
+					  resource_size_t offset)
+{
+	struct resource *r;
+	struct pci_dev *dev;
+	struct pci_bus *cbus;
+	int i;
+
+	pr_devel("  -> OBR %s [%x] +%016llx\n",
+		 bus->self ? pci_name(bus->self) : "root", flags, offset);
+
+	for (i = 0; i < 2; i++) {
+		r = bus->resource[i];
+		if (r && (r->flags & flags)) {
+			bus->resource[i]->start += offset;
+			bus->resource[i]->end += offset;
+		}
+	}
+	list_for_each_entry(dev, &bus->devices, bus_list)
+		pnv_ioda_offset_dev(dev, flags, offset);
+	list_for_each_entry(cbus, &bus->children, node)
+		pnv_ioda_offset_bus(cbus, flags, offset);
+
+	pr_devel("  <- OBR %s [%x]\n",
+		 bus->self ? pci_name(bus->self) : "root", flags);
+}
+
+/* This is the guts of our IODA resource allocation. This is called
+ * recursively for each bus in the system. It calculates all the
+ * necessary size and requirements for children and assign them
+ * resources such that:
+ *
+ *   - Each function fits in it's own contiguous set of IO/M32
+ *     segment
+ *
+ *   - All segments behind a P2P bridge are contiguous and obey
+ *     alignment constraints of those bridges
+ */
+static void __devinit pnv_ioda_calc_bus(struct pci_bus *bus, unsigned int flags,
+					resource_size_t *size,
+					resource_size_t *align)
+{
+	struct pci_controller *hose = pci_bus_to_host(bus);
+	struct pnv_phb *phb = hose->private_data;
+	resource_size_t dev_size, dev_align, start;
+	resource_size_t min_align, min_balign;
+	struct pci_dev *cdev;
+	struct pci_bus *cbus;
+	struct list_head head;
+	struct resource_wrap *w;
+	unsigned int bres;
+
+	*size = *align = 0;
+
+	pr_devel("-> CBR %s [%x]\n",
+		 bus->self ? pci_name(bus->self) : "root", flags);
+
+	/* Calculate alignment requirements based on the type
+	 * of resource we are working on
+	 */
+	if (flags & IORESOURCE_IO) {
+		bres = 0;
+		min_align = phb->ioda.io_segsize;
+		min_balign = 0x1000;
+	} else {
+		bres = 1;
+		min_align = phb->ioda.m32_segsize;
+		min_balign = 0x100000;
+	}
+
+	/* Gather all our children resources ordered by alignment */
+	INIT_LIST_HEAD(&head);
+
+	/*   - Busses */
+	list_for_each_entry(cbus, &bus->children, node) {
+		pnv_ioda_calc_bus(cbus, flags, &dev_size, &dev_align);
+		pnv_ioda_add_wrap(&head, cbus, NULL, dev_size, dev_align);
+	}
+
+	/*   - Devices */
+	list_for_each_entry(cdev, &bus->devices, bus_list) {
+		pnv_ioda_calc_dev(cdev, flags, &dev_size, &dev_align);
+		/* Align them to segment size */
+		if (dev_align < min_align)
+			dev_align = min_align;
+		pnv_ioda_add_wrap(&head, NULL, cdev, dev_size, dev_align);
+	}
+	if (list_empty(&head))
+		goto empty;
+
+	/* Now we can do two things: assign offsets to them within that
+	 * level and get our total alignment & size requirements. The
+	 * assignment algorithm is going to be uber-trivial for now, we
+	 * can try to be smarter later at filling out holes.
+	 */
+	start = bus->self ? 0 : bus->resource[bres]->start;
+
+	/* Don't hand out IO 0 */
+	if ((flags & IORESOURCE_IO) && !bus->self)
+		start += 0x1000;
+
+	while(!list_empty(&head)) {
+		w = list_first_entry(&head, struct resource_wrap, link);
+		list_del(&w->link);
+		if (w->size) {
+			if (start) {
+				start = ALIGN(start, w->align);
+				if (w->dev)
+					pnv_ioda_offset_dev(w->dev,flags,start);
+				else if (w->bus)
+					pnv_ioda_offset_bus(w->bus,flags,start);
+			}
+			if (w->align > *align)
+				*align = w->align;
+		}
+		start += w->size;
+		kfree(w);
+	}
+	*size = start;
+
+	/* Align and setup bridge resources */
+	*align = max_t(resource_size_t, *align,
+		       max_t(resource_size_t, min_align, min_balign));
+	*size = ALIGN(*size,
+		      max_t(resource_size_t, min_align, min_balign));
+ empty:
+	/* Only setup P2P's, not the PHB itself */
+	if (bus->self) {
+		WARN_ON(bus->resource[bres] == NULL);
+		bus->resource[bres]->start = 0;
+		bus->resource[bres]->flags = (*size) ? flags : 0;
+		bus->resource[bres]->end = (*size) ? (*size - 1) : 0;
+
+		/* Clear prefetch bus resources for now */
+		bus->resource[2]->flags = 0;
+	}
+
+	pr_devel("<- CBR %s [%x] *size=%016llx *align=%016llx\n",
+		 bus->self ? pci_name(bus->self) : "root", flags,*size,*align);
+}
+
+static struct pci_dn *pnv_ioda_get_pdn(struct pci_dev *dev)
+{
+	struct device_node *np;
+
+	np = pci_device_to_OF_node(dev);
+	if (!np)
+		return NULL;
+	return PCI_DN(np);
+}
+
+static void __devinit pnv_ioda_setup_pe_segments(struct pci_dev *dev)
+{
+	struct pci_controller *hose = pci_bus_to_host(dev->bus);
+	struct pnv_phb *phb = hose->private_data;
+	struct pci_dn *pdn = pnv_ioda_get_pdn(dev);
+	unsigned int pe, i;
+	resource_size_t pos;
+	struct resource io_res;
+	struct resource m32_res;
+	struct pci_bus_region region;
+	int rc;
+
+	/* Anything not referenced in the device-tree gets PE#0 */
+	pe = pdn ? pdn->pe_number : 0;
+
+	/* Calculate the device min/max */
+	io_res.start = m32_res.start = (resource_size_t)-1;
+	io_res.end = m32_res.end = 0;
+	io_res.flags = IORESOURCE_IO;
+	m32_res.flags = IORESOURCE_MEM;
+
+	for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
+		struct resource *r = NULL;
+		if (dev->resource[i].flags & IORESOURCE_IO)
+			r = &io_res;
+		if (dev->resource[i].flags & IORESOURCE_MEM)
+			r = &m32_res;
+		if (!r)
+			continue;
+		if (dev->resource[i].start < r->start)
+			r->start = dev->resource[i].start;
+		if (dev->resource[i].end > r->end)
+			r->end = dev->resource[i].end;
+	}
+
+	/* Setup IO segments */
+	if (io_res.start < io_res.end) {
+		pcibios_resource_to_bus(dev, &region, &io_res);
+		pos = region.start;
+		i = pos / phb->ioda.io_segsize;
+		while(i < phb->ioda.total_pe && pos <= region.end) {
+			if (phb->ioda.io_segmap[i]) {
+				pr_err("%s: Trying to use IO seg #%d which is"
+				       " already used by PE# %d\n",
+				       pci_name(dev), i,
+				       phb->ioda.io_segmap[i]);
+				/* XXX DO SOMETHING TO DISABLE DEVICE ? */
+				break;
+			}
+			phb->ioda.io_segmap[i] = pe;
+			rc = opal_pci_map_pe_mmio_window(phb->opal_id, pe,
+							 OPAL_IO_WINDOW_TYPE,
+							 0, i);
+			if (rc != OPAL_SUCCESS) {
+				pr_err("%s: OPAL error %d setting up mapping"
+				       " for IO seg# %d\n",
+				       pci_name(dev), rc, i);
+				/* XXX DO SOMETHING TO DISABLE DEVICE ? */
+				break;
+			}
+			pos += phb->ioda.io_segsize;
+			i++;
+		};
+	}
+
+	/* Setup M32 segments */
+	if (m32_res.start < m32_res.end) {
+		pcibios_resource_to_bus(dev, &region, &m32_res);
+		pos = region.start;
+		i = pos / phb->ioda.m32_segsize;
+		while(i < phb->ioda.total_pe && pos <= region.end) {
+			if (phb->ioda.m32_segmap[i]) {
+				pr_err("%s: Trying to use M32 seg #%d which is"
+				       " already used by PE# %d\n",
+				       pci_name(dev), i,
+				       phb->ioda.m32_segmap[i]);
+				/* XXX DO SOMETHING TO DISABLE DEVICE ? */
+				break;
+			}
+			phb->ioda.m32_segmap[i] = pe;
+			rc = opal_pci_map_pe_mmio_window(phb->opal_id, pe,
+							 OPAL_M32_WINDOW_TYPE,
+							 0, i);
+			if (rc != OPAL_SUCCESS) {
+				pr_err("%s: OPAL error %d setting up mapping"
+				       " for M32 seg# %d\n",
+				       pci_name(dev), rc, i);
+				/* XXX DO SOMETHING TO DISABLE DEVICE ? */
+				break;
+			}
+			pos += phb->ioda.m32_segsize;
+			i++;
+		}
+	}
+}
+
+/* Check if a resource still fits in the total IO or M32 range
+ * for a given PHB
+ */
+static int __devinit pnv_ioda_resource_fit(struct pci_controller *hose,
+					   struct resource *r)
+{
+	struct resource *bounds;
+
+	if (r->flags & IORESOURCE_IO)
+		bounds = &hose->io_resource;
+	else if (r->flags & IORESOURCE_MEM)
+		bounds = &hose->mem_resources[0];
+	else
+		return 1;
+
+	if (r->start >= bounds->start && r->end <= bounds->end)
+		return 1;
+	r->flags = 0;
+	return 0;
+}
+
+static void __devinit pnv_ioda_update_resources(struct pci_bus *bus)
+{
+	struct pci_controller *hose = pci_bus_to_host(bus);
+	struct pci_bus *cbus;
+	struct pci_dev *cdev;
+	unsigned int i;
+
+	/* We used to clear all device enables here. However it looks like
+	 * clearing MEM enable causes Obsidian (IPR SCS) to go bonkers,
+	 * and shoot fatal errors to the PHB which in turns fences itself
+	 * and we can't recover from that ... yet. So for now, let's leave
+	 * the enables as-is and hope for the best.
+	 */
+
+	/* Check if bus resources fit in our IO or M32 range */
+	for (i = 0; bus->self && (i < 2); i++) {
+		struct resource *r = bus->resource[i];
+		if (r && !pnv_ioda_resource_fit(hose, r))
+			pr_err("%s: Bus %d resource %d disabled, no room\n",
+			       pci_name(bus->self), bus->number, i);
+	}
+
+	/* Update self if it's not a PHB */
+	if (bus->self)
+		pci_setup_bridge(bus);
+
+	/* Update child devices */
+	list_for_each_entry(cdev, &bus->devices, bus_list) {
+		/* Check if resource fits, if not, disabled it */
+		for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
+			struct resource *r = &cdev->resource[i];
+			if (!pnv_ioda_resource_fit(hose, r))
+				pr_err("%s: Resource %d disabled, no room\n",
+				       pci_name(cdev), i);
+		}
+
+		/* Assign segments */
+		pnv_ioda_setup_pe_segments(cdev);
+
+		/* Update HW BARs */
+		for (i = 0; i <= PCI_ROM_RESOURCE; i++)
+			pci_update_resource(cdev, i);
+	}
+
+	/* Update child busses */
+	list_for_each_entry(cbus, &bus->children, node)
+		pnv_ioda_update_resources(cbus);
+}
+
+static int __devinit pnv_ioda_alloc_pe(struct pnv_phb *phb)
+{
+	unsigned long pe;
+
+	do {
+		pe = find_next_zero_bit(phb->ioda.pe_alloc,
+					phb->ioda.total_pe, 0);
+		if (pe >= phb->ioda.total_pe)
+			return IODA_INVALID_PE;
+	} while(test_and_set_bit(pe, phb->ioda.pe_alloc));
+
+	phb->ioda.pe_array[pe].pe_number = pe;
+	return pe;
+}
+
+static void __devinit pnv_ioda_free_pe(struct pnv_phb *phb, int pe)
+{
+	WARN_ON(phb->ioda.pe_array[pe].pdev);
+
+	memset(&phb->ioda.pe_array[pe], 0, sizeof(struct pnv_ioda_pe));
+	clear_bit(pe, phb->ioda.pe_alloc);
+}
+
+/* Currently those 2 are only used when MSIs are enabled, this will change
+ * but in the meantime, we need to protect them to avoid warnings
+ */
+#ifdef CONFIG_PCI_MSI
+static struct pnv_ioda_pe * __devinit __pnv_ioda_get_one_pe(struct pci_dev *dev)
+{
+	struct pci_controller *hose = pci_bus_to_host(dev->bus);
+	struct pnv_phb *phb = hose->private_data;
+	struct pci_dn *pdn = pnv_ioda_get_pdn(dev);
+
+	if (!pdn)
+		return NULL;
+	if (pdn->pe_number == IODA_INVALID_PE)
+		return NULL;
+	return &phb->ioda.pe_array[pdn->pe_number];
+}
+
+static struct pnv_ioda_pe * __devinit pnv_ioda_get_pe(struct pci_dev *dev)
+{
+	struct pnv_ioda_pe *pe = __pnv_ioda_get_one_pe(dev);
+
+	while (!pe && dev->bus->self) {
+		dev = dev->bus->self;
+		pe = __pnv_ioda_get_one_pe(dev);
+		if (pe)
+			pe = pe->bus_pe;
+	}
+	return pe;
+}
+#endif /* CONFIG_PCI_MSI */
+
+static int __devinit pnv_ioda_configure_pe(struct pnv_phb *phb,
+					   struct pnv_ioda_pe *pe)
+{
+	struct pci_dev *parent;
+	uint8_t bcomp, dcomp, fcomp;
+	long rc, rid_end, rid;
+
+	/* Bus validation ? */
+	if (pe->pbus) {
+		int count;
+
+		dcomp = OPAL_IGNORE_RID_DEVICE_NUMBER;
+		fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER;
+		parent = pe->pbus->self;
+		count = pe->pbus->subordinate - pe->pbus->secondary + 1;
+		switch(count) {
+		case  1: bcomp = OpalPciBusAll;		break;
+		case  2: bcomp = OpalPciBus7Bits;	break;
+		case  4: bcomp = OpalPciBus6Bits;	break;
+		case  8: bcomp = OpalPciBus5Bits;	break;
+		case 16: bcomp = OpalPciBus4Bits;	break;
+		case 32: bcomp = OpalPciBus3Bits;	break;
+		default:
+			pr_err("%s: Number of subordinate busses %d"
+			       " unsupported\n",
+			       pci_name(pe->pbus->self), count);
+			/* Do an exact match only */
+			bcomp = OpalPciBusAll;
+		}
+		rid_end = pe->rid + (count << 8);
+	} else {
+		parent = pe->pdev->bus->self;
+		bcomp = OpalPciBusAll;
+		dcomp = OPAL_COMPARE_RID_DEVICE_NUMBER;
+		fcomp = OPAL_COMPARE_RID_FUNCTION_NUMBER;
+		rid_end = pe->rid + 1;
+	}
+
+	/* Associate PE in PELT */
+	rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid,
+			     bcomp, dcomp, fcomp, OPAL_MAP_PE);
+	if (rc) {
+		pe_err(pe, "OPAL error %ld trying to setup PELT table\n", rc);
+		return -ENXIO;
+	}
+	opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number,
+				  OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
+
+	/* Add to all parents PELT-V */
+	while (parent) {
+		struct pci_dn *pdn = pnv_ioda_get_pdn(parent);
+		if (pdn && pdn->pe_number != IODA_INVALID_PE) {
+			rc = opal_pci_set_peltv(phb->opal_id, pdn->pe_number,
+						pe->pe_number, OPAL_ADD_PE_TO_DOMAIN);
+			/* XXX What to do in case of error ? */
+		}
+		parent = parent->bus->self;
+	}
+	/* Setup reverse map */
+	for (rid = pe->rid; rid < rid_end; rid++)
+		phb->ioda.pe_rmap[rid] = pe->pe_number;
+
+	/* Setup one MVTs on IODA1 */
+	if (phb->type == PNV_PHB_IODA1) {
+		pe->mve_number = pe->pe_number;
+		rc = opal_pci_set_mve(phb->opal_id, pe->mve_number,
+				      pe->pe_number);
+		if (rc) {
+			pe_err(pe, "OPAL error %ld setting up MVE %d\n",
+			       rc, pe->mve_number);
+			pe->mve_number = -1;
+		} else {
+			rc = opal_pci_set_mve_enable(phb->opal_id,
+						     pe->mve_number, OPAL_ENABLE_MVE);
+			if (rc) {
+				pe_err(pe, "OPAL error %ld enabling MVE %d\n",
+				       rc, pe->mve_number);
+				pe->mve_number = -1;
+			}
+		}
+	} else if (phb->type == PNV_PHB_IODA2)
+		pe->mve_number = 0;
+
+	return 0;
+}
+
+static void __devinit pnv_ioda_link_pe_by_weight(struct pnv_phb *phb,
+						 struct pnv_ioda_pe *pe)
+{
+	struct pnv_ioda_pe *lpe;
+
+	list_for_each_entry(lpe, &phb->ioda.pe_list, link) {
+		if (lpe->dma_weight < pe->dma_weight) {
+			list_add_tail(&pe->link, &lpe->link);
+			return;
+		}
+	}
+	list_add_tail(&pe->link, &phb->ioda.pe_list);
+}
+
+static unsigned int pnv_ioda_dma_weight(struct pci_dev *dev)
+{
+	/* This is quite simplistic. The "base" weight of a device
+	 * is 10. 0 means no DMA is to be accounted for it.
+	 */
+
+	/* If it's a bridge, no DMA */
+	if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL)
+		return 0;
+
+	/* Reduce the weight of slow USB controllers */
+	if (dev->class == PCI_CLASS_SERIAL_USB_UHCI ||
+	    dev->class == PCI_CLASS_SERIAL_USB_OHCI ||
+	    dev->class == PCI_CLASS_SERIAL_USB_EHCI)
+		return 3;
+
+	/* Increase the weight of RAID (includes Obsidian) */
+	if ((dev->class >> 8) == PCI_CLASS_STORAGE_RAID)
+		return 15;
+
+	/* Default */
+	return 10;
+}
+
+static struct pnv_ioda_pe * __devinit pnv_ioda_setup_dev_PE(struct pci_dev *dev)
+{
+	struct pci_controller *hose = pci_bus_to_host(dev->bus);
+	struct pnv_phb *phb = hose->private_data;
+	struct pci_dn *pdn = pnv_ioda_get_pdn(dev);
+	struct pnv_ioda_pe *pe;
+	int pe_num;
+
+	if (!pdn) {
+		pr_err("%s: Device tree node not associated properly\n",
+			   pci_name(dev));
+		return NULL;
+	}
+	if (pdn->pe_number != IODA_INVALID_PE)
+		return NULL;
+
+	/* PE#0 has been pre-set */
+	if (dev->bus->number == 0)
+		pe_num = 0;
+	else
+		pe_num = pnv_ioda_alloc_pe(phb);
+	if (pe_num == IODA_INVALID_PE) {
+		pr_warning("%s: Not enough PE# available, disabling device\n",
+			   pci_name(dev));
+		return NULL;
+	}
+
+	/* NOTE: We get only one ref to the pci_dev for the pdn, not for the
+	 * pointer in the PE data structure, both should be destroyed at the
+	 * same time. However, this needs to be looked at more closely again
+	 * once we actually start removing things (Hotplug, SR-IOV, ...)
+	 *
+	 * At some point we want to remove the PDN completely anyways
+	 */
+	pe = &phb->ioda.pe_array[pe_num];
+	pci_dev_get(dev);
+	pdn->pcidev = dev;
+	pdn->pe_number = pe_num;
+	pe->pdev = dev;
+	pe->pbus = NULL;
+	pe->tce32_seg = -1;
+	pe->mve_number = -1;
+	pe->rid = dev->bus->number << 8 | pdn->devfn;
+
+	pe_info(pe, "Associated device to PE\n");
+
+	if (pnv_ioda_configure_pe(phb, pe)) {
+		/* XXX What do we do here ? */
+		if (pe_num)
+			pnv_ioda_free_pe(phb, pe_num);
+		pdn->pe_number = IODA_INVALID_PE;
+		pe->pdev = NULL;
+		pci_dev_put(dev);
+		return NULL;
+	}
+
+	/* Assign a DMA weight to the device */
+	pe->dma_weight = pnv_ioda_dma_weight(dev);
+	if (pe->dma_weight != 0) {
+		phb->ioda.dma_weight += pe->dma_weight;
+		phb->ioda.dma_pe_count++;
+	}
+
+	/* Link the PE */
+	pnv_ioda_link_pe_by_weight(phb, pe);
+
+	return pe;
+}
+
+static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe)
+{
+	struct pci_dev *dev;
+
+	list_for_each_entry(dev, &bus->devices, bus_list) {
+		struct pci_dn *pdn = pnv_ioda_get_pdn(dev);
+
+		if (pdn == NULL) {
+			pr_warn("%s: No device node associated with device !\n",
+				pci_name(dev));
+			continue;
+		}
+		pci_dev_get(dev);
+		pdn->pcidev = dev;
+		pdn->pe_number = pe->pe_number;
+		pe->dma_weight += pnv_ioda_dma_weight(dev);
+		if (dev->subordinate)
+			pnv_ioda_setup_same_PE(dev->subordinate, pe);
+	}
+}
+
+static void __devinit pnv_ioda_setup_bus_PE(struct pci_dev *dev,
+					    struct pnv_ioda_pe *ppe)
+{
+	struct pci_controller *hose = pci_bus_to_host(dev->bus);
+	struct pnv_phb *phb = hose->private_data;
+	struct pci_bus *bus = dev->subordinate;
+	struct pnv_ioda_pe *pe;
+	int pe_num;
+
+	if (!bus) {
+		pr_warning("%s: Bridge without a subordinate bus !\n",
+			   pci_name(dev));
+		return;
+	}
+	pe_num = pnv_ioda_alloc_pe(phb);
+	if (pe_num == IODA_INVALID_PE) {
+		pr_warning("%s: Not enough PE# available, disabling bus\n",
+			   pci_name(dev));
+		return;
+	}
+
+	pe = &phb->ioda.pe_array[pe_num];
+	ppe->bus_pe = pe;
+	pe->pbus = bus;
+	pe->pdev = NULL;
+	pe->tce32_seg = -1;
+	pe->mve_number = -1;
+	pe->rid = bus->secondary << 8;
+	pe->dma_weight = 0;
+
+	pe_info(pe, "Secondary busses %d..%d associated with PE\n",
+		bus->secondary, bus->subordinate);
+
+	if (pnv_ioda_configure_pe(phb, pe)) {
+		/* XXX What do we do here ? */
+		if (pe_num)
+			pnv_ioda_free_pe(phb, pe_num);
+		pe->pbus = NULL;
+		return;
+	}
+
+	/* Associate it with all child devices */
+	pnv_ioda_setup_same_PE(bus, pe);
+
+	/* Account for one DMA PE if at least one DMA capable device exist
+	 * below the bridge
+	 */
+	if (pe->dma_weight != 0) {
+		phb->ioda.dma_weight += pe->dma_weight;
+		phb->ioda.dma_pe_count++;
+	}
+
+	/* Link the PE */
+	pnv_ioda_link_pe_by_weight(phb, pe);
+}
+
+static void __devinit pnv_ioda_setup_PEs(struct pci_bus *bus)
+{
+	struct pci_dev *dev;
+	struct pnv_ioda_pe *pe;
+
+	list_for_each_entry(dev, &bus->devices, bus_list) {
+		pe = pnv_ioda_setup_dev_PE(dev);
+		if (pe == NULL)
+			continue;
+		/* Leaving the PCIe domain ... single PE# */
+		if (dev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
+			pnv_ioda_setup_bus_PE(dev, pe);
+		else if (dev->subordinate)
+			pnv_ioda_setup_PEs(dev->subordinate);
+	}
+}
+
+static void __devinit pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb,
+						 struct pci_dev *dev)
+{
+	/* We delay DMA setup after we have assigned all PE# */
+}
+
+static void __devinit pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe,
+					     struct pci_bus *bus)
+{
+	struct pci_dev *dev;
+
+	list_for_each_entry(dev, &bus->devices, bus_list) {
+		set_iommu_table_base(&dev->dev, &pe->tce32_table);
+		if (dev->subordinate)
+			pnv_ioda_setup_bus_dma(pe, dev->subordinate);
+	}
+}
+
+static void __devinit pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
+						struct pnv_ioda_pe *pe,
+						unsigned int base,
+						unsigned int segs)
+{
+
+	struct page *tce_mem = NULL;
+	const __be64 *swinvp;
+	struct iommu_table *tbl;
+	unsigned int i;
+	int64_t rc;
+	void *addr;
+
+	/* 256M DMA window, 4K TCE pages, 8 bytes TCE */
+#define TCE32_TABLE_SIZE	((0x10000000 / 0x1000) * 8)
+
+	/* XXX FIXME: Handle 64-bit only DMA devices */
+	/* XXX FIXME: Provide 64-bit DMA facilities & non-4K TCE tables etc.. */
+	/* XXX FIXME: Allocate multi-level tables on PHB3 */
+
+	/* We shouldn't already have a 32-bit DMA associated */
+	if (WARN_ON(pe->tce32_seg >= 0))
+		return;
+
+	/* Grab a 32-bit TCE table */
+	pe->tce32_seg = base;
+	pe_info(pe, " Setting up 32-bit TCE table at %08x..%08x\n",
+		(base << 28), ((base + segs) << 28) - 1);
+
+	/* XXX Currently, we allocate one big contiguous table for the
+	 * TCEs. We only really need one chunk per 256M of TCE space
+	 * (ie per segment) but that's an optimization for later, it
+	 * requires some added smarts with our get/put_tce implementation
+	 */
+	tce_mem = alloc_pages_node(phb->hose->node, GFP_KERNEL,
+				   get_order(TCE32_TABLE_SIZE * segs));
+	if (!tce_mem) {
+		pe_err(pe, " Failed to allocate a 32-bit TCE memory\n");
+		goto fail;
+	}
+	addr = page_address(tce_mem);
+	memset(addr, 0, TCE32_TABLE_SIZE * segs);
+
+	/* Configure HW */
+	for (i = 0; i < segs; i++) {
+		rc = opal_pci_map_pe_dma_window(phb->opal_id,
+					      pe->pe_number,
+					      base + i, 1,
+					      __pa(addr) + TCE32_TABLE_SIZE * i,
+					      TCE32_TABLE_SIZE, 0x1000);
+		if (rc) {
+			pe_err(pe, " Failed to configure 32-bit TCE table,"
+			       " err %ld\n", rc);
+			goto fail;
+		}
+	}
+
+	/* Setup linux iommu table */
+	tbl = &pe->tce32_table;
+	pnv_pci_setup_iommu_table(tbl, addr, TCE32_TABLE_SIZE * segs,
+				  base << 28);
+
+	/* OPAL variant of P7IOC SW invalidated TCEs */
+	swinvp = of_get_property(phb->hose->dn, "ibm,opal-tce-kill", NULL);
+	if (swinvp) {
+		/* We need a couple more fields -- an address and a data
+		 * to or.  Since the bus is only printed out on table free
+		 * errors, and on the first pass the data will be a relative
+		 * bus number, print that out instead.
+		 */
+		tbl->it_busno = 0;
+		tbl->it_index = (unsigned long)ioremap(be64_to_cpup(swinvp), 8);
+		tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE
+			| TCE_PCI_SWINV_PAIR;
+	}
+	iommu_init_table(tbl, phb->hose->node);
+
+	if (pe->pdev)
+		set_iommu_table_base(&pe->pdev->dev, tbl);
+	else
+		pnv_ioda_setup_bus_dma(pe, pe->pbus);
+
+	return;
+ fail:
+	/* XXX Failure: Try to fallback to 64-bit only ? */
+	if (pe->tce32_seg >= 0)
+		pe->tce32_seg = -1;
+	if (tce_mem)
+		__free_pages(tce_mem, get_order(TCE32_TABLE_SIZE * segs));
+}
+
+static void __devinit pnv_ioda_setup_dma(struct pnv_phb *phb)
+{
+	struct pci_controller *hose = phb->hose;
+	unsigned int residual, remaining, segs, tw, base;
+	struct pnv_ioda_pe *pe;
+
+	/* If we have more PE# than segments available, hand out one
+	 * per PE until we run out and let the rest fail. If not,
+	 * then we assign at least one segment per PE, plus more based
+	 * on the amount of devices under that PE
+	 */
+	if (phb->ioda.dma_pe_count > phb->ioda.tce32_count)
+		residual = 0;
+	else
+		residual = phb->ioda.tce32_count -
+			phb->ioda.dma_pe_count;
+
+	pr_info("PCI: Domain %04x has %ld available 32-bit DMA segments\n",
+		hose->global_number, phb->ioda.tce32_count);
+	pr_info("PCI: %d PE# for a total weight of %d\n",
+		phb->ioda.dma_pe_count, phb->ioda.dma_weight);
+
+	/* Walk our PE list and configure their DMA segments, hand them
+	 * out one base segment plus any residual segments based on
+	 * weight
+	 */
+	remaining = phb->ioda.tce32_count;
+	tw = phb->ioda.dma_weight;
+	base = 0;
+	list_for_each_entry(pe, &phb->ioda.pe_list, link) {
+		if (!pe->dma_weight)
+			continue;
+		if (!remaining) {
+			pe_warn(pe, "No DMA32 resources available\n");
+			continue;
+		}
+		segs = 1;
+		if (residual) {
+			segs += ((pe->dma_weight * residual)  + (tw / 2)) / tw;
+			if (segs > remaining)
+				segs = remaining;
+		}
+		pe_info(pe, "DMA weight %d, assigned %d DMA32 segments\n",
+			pe->dma_weight, segs);
+		pnv_pci_ioda_setup_dma_pe(phb, pe, base, segs);
+		remaining -= segs;
+		base += segs;
+	}
+}
+
+#ifdef CONFIG_PCI_MSI
+static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
+				  unsigned int hwirq, unsigned int is_64,
+				  struct msi_msg *msg)
+{
+	struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev);
+	unsigned int xive_num = hwirq - phb->msi_base;
+	uint64_t addr64;
+	uint32_t addr32, data;
+	int rc;
+
+	/* No PE assigned ? bail out ... no MSI for you ! */
+	if (pe == NULL)
+		return -ENXIO;
+
+	/* Check if we have an MVE */
+	if (pe->mve_number < 0)
+		return -ENXIO;
+
+	/* Assign XIVE to PE */
+	rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num);
+	if (rc) {
+		pr_warn("%s: OPAL error %d setting XIVE %d PE\n",
+			pci_name(dev), rc, xive_num);
+		return -EIO;
+	}
+
+	if (is_64) {
+		rc = opal_get_msi_64(phb->opal_id, pe->mve_number, xive_num, 1,
+				     &addr64, &data);
+		if (rc) {
+			pr_warn("%s: OPAL error %d getting 64-bit MSI data\n",
+				pci_name(dev), rc);
+			return -EIO;
+		}
+		msg->address_hi = addr64 >> 32;
+		msg->address_lo = addr64 & 0xfffffffful;
+	} else {
+		rc = opal_get_msi_32(phb->opal_id, pe->mve_number, xive_num, 1,
+				     &addr32, &data);
+		if (rc) {
+			pr_warn("%s: OPAL error %d getting 32-bit MSI data\n",
+				pci_name(dev), rc);
+			return -EIO;
+		}
+		msg->address_hi = 0;
+		msg->address_lo = addr32;
+	}
+	msg->data = data;
+
+	pr_devel("%s: %s-bit MSI on hwirq %x (xive #%d),"
+		 " address=%x_%08x data=%x PE# %d\n",
+		 pci_name(dev), is_64 ? "64" : "32", hwirq, xive_num,
+		 msg->address_hi, msg->address_lo, data, pe->pe_number);
+
+	return 0;
+}
+
+static void pnv_pci_init_ioda_msis(struct pnv_phb *phb)
+{
+	unsigned int bmap_size;
+	const __be32 *prop = of_get_property(phb->hose->dn,
+					     "ibm,opal-msi-ranges", NULL);
+	if (!prop) {
+		/* BML Fallback */
+		prop = of_get_property(phb->hose->dn, "msi-ranges", NULL);
+	}
+	if (!prop)
+		return;
+
+	phb->msi_base = be32_to_cpup(prop);
+	phb->msi_count = be32_to_cpup(prop + 1);
+	bmap_size = BITS_TO_LONGS(phb->msi_count) * sizeof(unsigned long);
+	phb->msi_map = zalloc_maybe_bootmem(bmap_size, GFP_KERNEL);
+	if (!phb->msi_map) {
+		pr_err("PCI %d: Failed to allocate MSI bitmap !\n",
+		       phb->hose->global_number);
+		return;
+	}
+	phb->msi_setup = pnv_pci_ioda_msi_setup;
+	phb->msi32_support = 1;
+	pr_info("  Allocated bitmap for %d MSIs (base IRQ 0x%x)\n",
+		phb->msi_count, phb->msi_base);
+}
+#else
+static void pnv_pci_init_ioda_msis(struct pnv_phb *phb) { }
+#endif /* CONFIG_PCI_MSI */
+
+/* This is the starting point of our IODA specific resource
+ * allocation process
+ */
+static void __devinit pnv_pci_ioda_fixup_phb(struct pci_controller *hose)
+{
+	resource_size_t size, align;
+	struct pci_bus *child;
+
+	/* Associate PEs per functions */
+	pnv_ioda_setup_PEs(hose->bus);
+
+	/* Calculate all resources */
+	pnv_ioda_calc_bus(hose->bus, IORESOURCE_IO, &size, &align);
+	pnv_ioda_calc_bus(hose->bus, IORESOURCE_MEM, &size, &align);
+
+	/* Apply then to HW */
+	pnv_ioda_update_resources(hose->bus);
+
+	/* Setup DMA */
+	pnv_ioda_setup_dma(hose->private_data);
+
+	/* Configure PCI Express settings */
+	list_for_each_entry(child, &hose->bus->children, node) {
+		struct pci_dev *self = child->self;
+		if (!self)
+			continue;
+		pcie_bus_configure_settings(child, self->pcie_mpss);
+	}
+}
+
+/* Prevent enabling devices for which we couldn't properly
+ * assign a PE
+ */
+static int __devinit pnv_pci_enable_device_hook(struct pci_dev *dev)
+{
+	struct pci_dn *pdn = pnv_ioda_get_pdn(dev);
+
+	if (!pdn || pdn->pe_number == IODA_INVALID_PE)
+		return -EINVAL;
+	return 0;
+}
+
+static u32 pnv_ioda_bdfn_to_pe(struct pnv_phb *phb, struct pci_bus *bus,
+			       u32 devfn)
+{
+	return phb->ioda.pe_rmap[(bus->number << 8) | devfn];
+}
+
+void __init pnv_pci_init_ioda1_phb(struct device_node *np)
+{
+	struct pci_controller *hose;
+	static int primary = 1;
+	struct pnv_phb *phb;
+	unsigned long size, m32map_off, iomap_off, pemap_off;
+	const u64 *prop64;
+	u64 phb_id;
+	void *aux;
+	long rc;
+
+	pr_info(" Initializing IODA OPAL PHB %s\n", np->full_name);
+
+	prop64 = of_get_property(np, "ibm,opal-phbid", NULL);
+	if (!prop64) {
+		pr_err("  Missing \"ibm,opal-phbid\" property !\n");
+		return;
+	}
+	phb_id = be64_to_cpup(prop64);
+	pr_debug("  PHB-ID  : 0x%016llx\n", phb_id);
+
+	phb = alloc_bootmem(sizeof(struct pnv_phb));
+	if (phb) {
+		memset(phb, 0, sizeof(struct pnv_phb));
+		phb->hose = hose = pcibios_alloc_controller(np);
+	}
+	if (!phb || !phb->hose) {
+		pr_err("PCI: Failed to allocate PCI controller for %s\n",
+		       np->full_name);
+		return;
+	}
+
+	spin_lock_init(&phb->lock);
+	/* XXX Use device-tree */
+	hose->first_busno = 0;
+	hose->last_busno = 0xff;
+	hose->private_data = phb;
+	phb->opal_id = phb_id;
+	phb->type = PNV_PHB_IODA1;
+
+	/* Detect specific models for error handling */
+	if (of_device_is_compatible(np, "ibm,p7ioc-pciex"))
+		phb->model = PNV_PHB_MODEL_P7IOC;
+	else
+		phb->model = PNV_PHB_MODEL_UNKNOWN;
+
+	/* We parse "ranges" now since we need to deduce the register base
+	 * from the IO base
+	 */
+	pci_process_bridge_OF_ranges(phb->hose, np, primary);
+	primary = 0;
+
+	/* Magic formula from Milton */
+	phb->regs = of_iomap(np, 0);
+	if (phb->regs == NULL)
+		pr_err("  Failed to map registers !\n");
+
+
+	/* XXX This is hack-a-thon. This needs to be changed so that:
+	 *  - we obtain stuff like PE# etc... from device-tree
+	 *  - we properly re-allocate M32 ourselves
+	 *    (the OFW one isn't very good)
+	 */
+
+	/* Initialize more IODA stuff */
+	phb->ioda.total_pe = 128;
+
+	phb->ioda.m32_size = resource_size(&hose->mem_resources[0]);
+	/* OFW Has already off top 64k of M32 space (MSI space) */
+	phb->ioda.m32_size += 0x10000;
+
+	phb->ioda.m32_segsize = phb->ioda.m32_size / phb->ioda.total_pe;
+	phb->ioda.m32_pci_base = hose->mem_resources[0].start -
+		hose->pci_mem_offset;
+	phb->ioda.io_size = hose->pci_io_size;
+	phb->ioda.io_segsize = phb->ioda.io_size / phb->ioda.total_pe;
+	phb->ioda.io_pci_base = 0; /* XXX calculate this ? */
+
+	/* Allocate aux data & arrays */
+	size = _ALIGN_UP(phb->ioda.total_pe / 8, sizeof(unsigned long));
+	m32map_off = size;
+	size += phb->ioda.total_pe;
+	iomap_off = size;
+	size += phb->ioda.total_pe;
+	pemap_off = size;
+	size += phb->ioda.total_pe * sizeof(struct pnv_ioda_pe);
+	aux = alloc_bootmem(size);
+	memset(aux, 0, size);
+	phb->ioda.pe_alloc = aux;
+	phb->ioda.m32_segmap = aux + m32map_off;
+	phb->ioda.io_segmap = aux + iomap_off;
+	phb->ioda.pe_array = aux + pemap_off;
+	set_bit(0, phb->ioda.pe_alloc);
+
+	INIT_LIST_HEAD(&phb->ioda.pe_list);
+
+	/* Calculate how many 32-bit TCE segments we have */
+	phb->ioda.tce32_count = phb->ioda.m32_pci_base >> 28;
+
+	/* Clear unusable m64 */
+	hose->mem_resources[1].flags = 0;
+	hose->mem_resources[1].start = 0;
+	hose->mem_resources[1].end = 0;
+	hose->mem_resources[2].flags = 0;
+	hose->mem_resources[2].start = 0;
+	hose->mem_resources[2].end = 0;
+
+#if 0
+	rc = opal_pci_set_phb_mem_window(opal->phb_id,
+					 window_type,
+					 window_num,
+					 starting_real_address,
+					 starting_pci_address,
+					 segment_size);
+#endif
+
+	pr_info("  %d PE's M32: 0x%x [segment=0x%x] IO: 0x%x [segment=0x%x]\n",
+		phb->ioda.total_pe,
+		phb->ioda.m32_size, phb->ioda.m32_segsize,
+		phb->ioda.io_size, phb->ioda.io_segsize);
+
+	if (phb->regs)  {
+		pr_devel(" BUID     = 0x%016llx\n", in_be64(phb->regs + 0x100));
+		pr_devel(" PHB2_CR  = 0x%016llx\n", in_be64(phb->regs + 0x160));
+		pr_devel(" IO_BAR   = 0x%016llx\n", in_be64(phb->regs + 0x170));
+		pr_devel(" IO_BAMR  = 0x%016llx\n", in_be64(phb->regs + 0x178));
+		pr_devel(" IO_SAR   = 0x%016llx\n", in_be64(phb->regs + 0x180));
+		pr_devel(" M32_BAR  = 0x%016llx\n", in_be64(phb->regs + 0x190));
+		pr_devel(" M32_BAMR = 0x%016llx\n", in_be64(phb->regs + 0x198));
+		pr_devel(" M32_SAR  = 0x%016llx\n", in_be64(phb->regs + 0x1a0));
+	}
+	phb->hose->ops = &pnv_pci_ops;
+
+	/* Setup RID -> PE mapping function */
+	phb->bdfn_to_pe = pnv_ioda_bdfn_to_pe;
+
+	/* Setup TCEs */
+	phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup;
+
+	/* Setup MSI support */
+	pnv_pci_init_ioda_msis(phb);
+
+	/* We set both probe_only and PCI_REASSIGN_ALL_RSRC. This is an
+	 * odd combination which essentially means that we skip all resource
+	 * fixups and assignments in the generic code, and do it all
+	 * ourselves here
+	 */
+	pci_probe_only = 1;
+	ppc_md.pcibios_fixup_phb = pnv_pci_ioda_fixup_phb;
+	ppc_md.pcibios_enable_device_hook = pnv_pci_enable_device_hook;
+	pci_add_flags(PCI_REASSIGN_ALL_RSRC);
+
+	/* Reset IODA tables to a clean state */
+	rc = opal_pci_reset(phb_id, OPAL_PCI_IODA_TABLE_RESET, OPAL_ASSERT_RESET);
+	if (rc)
+		pr_warning("  OPAL Error %ld performing IODA table reset !\n", rc);
+	opal_pci_set_pe(phb_id, 0, 0, 7, 1, 1 , OPAL_MAP_PE);
+}
+
+void __init pnv_pci_init_ioda_hub(struct device_node *np)
+{
+	struct device_node *phbn;
+	const u64 *prop64;
+	u64 hub_id;
+
+	pr_info("Probing IODA IO-Hub %s\n", np->full_name);
+
+	prop64 = of_get_property(np, "ibm,opal-hubid", NULL);
+	if (!prop64) {
+		pr_err(" Missing \"ibm,opal-hubid\" property !\n");
+		return;
+	}
+	hub_id = be64_to_cpup(prop64);
+	pr_devel(" HUB-ID : 0x%016llx\n", hub_id);
+
+	/* Count child PHBs */
+	for_each_child_of_node(np, phbn) {
+		/* Look for IODA1 PHBs */
+		if (of_device_is_compatible(phbn, "ibm,ioda-phb"))
+			pnv_pci_init_ioda1_phb(phbn);
+	}
+}
diff --git a/arch/powerpc/platforms/powernv/pci-p5ioc2.c b/arch/powerpc/platforms/powernv/pci-p5ioc2.c
index 4c80f7c..2649677 100644
--- a/arch/powerpc/platforms/powernv/pci-p5ioc2.c
+++ b/arch/powerpc/platforms/powernv/pci-p5ioc2.c
@@ -137,6 +137,7 @@
 	phb->hose->private_data = phb;
 	phb->opal_id = phb_id;
 	phb->type = PNV_PHB_P5IOC2;
+	phb->model = PNV_PHB_MODEL_P5IOC2;
 
 	phb->regs = of_iomap(np, 0);
 
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index 85bb66d..a70bc1e 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -144,6 +144,112 @@
 }
 #endif /* CONFIG_PCI_MSI */
 
+static void pnv_pci_dump_p7ioc_diag_data(struct pnv_phb *phb)
+{
+	struct OpalIoP7IOCPhbErrorData *data = &phb->diag.p7ioc;
+	int i;
+
+	pr_info("PHB %d diagnostic data:\n", phb->hose->global_number);
+
+	pr_info("  brdgCtl              = 0x%08x\n", data->brdgCtl);
+
+	pr_info("  portStatusReg        = 0x%08x\n", data->portStatusReg);
+	pr_info("  rootCmplxStatus      = 0x%08x\n", data->rootCmplxStatus);
+	pr_info("  busAgentStatus       = 0x%08x\n", data->busAgentStatus);
+
+	pr_info("  deviceStatus         = 0x%08x\n", data->deviceStatus);
+	pr_info("  slotStatus           = 0x%08x\n", data->slotStatus);
+	pr_info("  linkStatus           = 0x%08x\n", data->linkStatus);
+	pr_info("  devCmdStatus         = 0x%08x\n", data->devCmdStatus);
+	pr_info("  devSecStatus         = 0x%08x\n", data->devSecStatus);
+
+	pr_info("  rootErrorStatus      = 0x%08x\n", data->rootErrorStatus);
+	pr_info("  uncorrErrorStatus    = 0x%08x\n", data->uncorrErrorStatus);
+	pr_info("  corrErrorStatus      = 0x%08x\n", data->corrErrorStatus);
+	pr_info("  tlpHdr1              = 0x%08x\n", data->tlpHdr1);
+	pr_info("  tlpHdr2              = 0x%08x\n", data->tlpHdr2);
+	pr_info("  tlpHdr3              = 0x%08x\n", data->tlpHdr3);
+	pr_info("  tlpHdr4              = 0x%08x\n", data->tlpHdr4);
+	pr_info("  sourceId             = 0x%08x\n", data->sourceId);
+
+	pr_info("  errorClass           = 0x%016llx\n", data->errorClass);
+	pr_info("  correlator           = 0x%016llx\n", data->correlator);
+
+	pr_info("  p7iocPlssr           = 0x%016llx\n", data->p7iocPlssr);
+	pr_info("  p7iocCsr             = 0x%016llx\n", data->p7iocCsr);
+	pr_info("  lemFir               = 0x%016llx\n", data->lemFir);
+	pr_info("  lemErrorMask         = 0x%016llx\n", data->lemErrorMask);
+	pr_info("  lemWOF               = 0x%016llx\n", data->lemWOF);
+	pr_info("  phbErrorStatus       = 0x%016llx\n", data->phbErrorStatus);
+	pr_info("  phbFirstErrorStatus  = 0x%016llx\n", data->phbFirstErrorStatus);
+	pr_info("  phbErrorLog0         = 0x%016llx\n", data->phbErrorLog0);
+	pr_info("  phbErrorLog1         = 0x%016llx\n", data->phbErrorLog1);
+	pr_info("  mmioErrorStatus      = 0x%016llx\n", data->mmioErrorStatus);
+	pr_info("  mmioFirstErrorStatus = 0x%016llx\n", data->mmioFirstErrorStatus);
+	pr_info("  mmioErrorLog0        = 0x%016llx\n", data->mmioErrorLog0);
+	pr_info("  mmioErrorLog1        = 0x%016llx\n", data->mmioErrorLog1);
+	pr_info("  dma0ErrorStatus      = 0x%016llx\n", data->dma0ErrorStatus);
+	pr_info("  dma0FirstErrorStatus = 0x%016llx\n", data->dma0FirstErrorStatus);
+	pr_info("  dma0ErrorLog0        = 0x%016llx\n", data->dma0ErrorLog0);
+	pr_info("  dma0ErrorLog1        = 0x%016llx\n", data->dma0ErrorLog1);
+	pr_info("  dma1ErrorStatus      = 0x%016llx\n", data->dma1ErrorStatus);
+	pr_info("  dma1FirstErrorStatus = 0x%016llx\n", data->dma1FirstErrorStatus);
+	pr_info("  dma1ErrorLog0        = 0x%016llx\n", data->dma1ErrorLog0);
+	pr_info("  dma1ErrorLog1        = 0x%016llx\n", data->dma1ErrorLog1);
+
+	for (i = 0; i < OPAL_P7IOC_NUM_PEST_REGS; i++) {
+		if ((data->pestA[i] >> 63) == 0 &&
+		    (data->pestB[i] >> 63) == 0)
+			continue;
+		pr_info("  PE[%3d] PESTA        = 0x%016llx\n", i, data->pestA[i]);
+		pr_info("          PESTB        = 0x%016llx\n", data->pestB[i]);
+	}
+}
+
+static void pnv_pci_dump_phb_diag_data(struct pnv_phb *phb)
+{
+	switch(phb->model) {
+	case PNV_PHB_MODEL_P7IOC:
+		pnv_pci_dump_p7ioc_diag_data(phb);
+		break;
+	default:
+		pr_warning("PCI %d: Can't decode this PHB diag data\n",
+			   phb->hose->global_number);
+	}
+}
+
+static void pnv_pci_handle_eeh_config(struct pnv_phb *phb, u32 pe_no)
+{
+	unsigned long flags, rc;
+	int has_diag;
+
+	spin_lock_irqsave(&phb->lock, flags);
+
+	rc = opal_pci_get_phb_diag_data(phb->opal_id, phb->diag.blob, PNV_PCI_DIAG_BUF_SIZE);
+	has_diag = (rc == OPAL_SUCCESS);
+
+	rc = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no,
+				       OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
+	if (rc) {
+		pr_warning("PCI %d: Failed to clear EEH freeze state"
+			   " for PE#%d, err %ld\n",
+			   phb->hose->global_number, pe_no, rc);
+
+		/* For now, let's only display the diag buffer when we fail to clear
+		 * the EEH status. We'll do more sensible things later when we have
+		 * proper EEH support. We need to make sure we don't pollute ourselves
+		 * with the normal errors generated when probing empty slots
+		 */
+		if (has_diag)
+			pnv_pci_dump_phb_diag_data(phb);
+		else
+			pr_warning("PCI %d: No diag data available\n",
+				   phb->hose->global_number);
+	}
+
+	spin_unlock_irqrestore(&phb->lock, flags);
+}
+
 static void pnv_pci_config_check_eeh(struct pnv_phb *phb, struct pci_bus *bus,
 				     u32 bdfn)
 {
@@ -165,15 +271,8 @@
 	}
 	cfg_dbg(" -> EEH check, bdfn=%04x PE%d fstate=%x\n",
 		bdfn, pe_no, fstate);
-	if (fstate != 0) {
-		rc = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no,
-					      OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
-		if (rc) {
-			pr_warning("PCI %d: Failed to clear EEH freeze state"
-				   " for PE#%d, err %lld\n",
-				   phb->hose->global_number, pe_no, rc);
-		}
-	}
+	if (fstate != 0)
+		pnv_pci_handle_eeh_config(phb, pe_no);
 }
 
 static int pnv_pci_read_config(struct pci_bus *bus,
@@ -257,12 +356,54 @@
 	.write = pnv_pci_write_config,
 };
 
+
+static void pnv_tce_invalidate(struct iommu_table *tbl,
+			       u64 *startp, u64 *endp)
+{
+	u64 __iomem *invalidate = (u64 __iomem *)tbl->it_index;
+	unsigned long start, end, inc;
+
+	start = __pa(startp);
+	end = __pa(endp);
+
+
+	/* BML uses this case for p6/p7/galaxy2: Shift addr and put in node */
+	if (tbl->it_busno) {
+		start <<= 12;
+		end <<= 12;
+		inc = 128 << 12;
+		start |= tbl->it_busno;
+		end |= tbl->it_busno;
+	}
+	/* p7ioc-style invalidation, 2 TCEs per write */
+	else if (tbl->it_type & TCE_PCI_SWINV_PAIR) {
+		start |= (1ull << 63);
+		end |= (1ull << 63);
+		inc = 16;
+	}
+	/* Default (older HW) */
+	else
+		inc = 128;
+
+	end |= inc - 1;		/* round up end to be different than start */
+
+	mb(); /* Ensure above stores are visible */
+	while (start <= end) {
+		__raw_writeq(start, invalidate);
+		start += inc;
+	}
+	/* The iommu layer will do another mb() for us on build() and
+	 * we don't care on free()
+	 */
+}
+
+
 static int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
 			 unsigned long uaddr, enum dma_data_direction direction,
 			 struct dma_attrs *attrs)
 {
 	u64 proto_tce;
-	u64 *tcep;
+	u64 *tcep, *tces;
 	u64 rpn;
 
 	proto_tce = TCE_PCI_READ; // Read allowed
@@ -270,25 +411,33 @@
 	if (direction != DMA_TO_DEVICE)
 		proto_tce |= TCE_PCI_WRITE;
 
-	tcep = ((u64 *)tbl->it_base) + index;
+	tces = tcep = ((u64 *)tbl->it_base) + index - tbl->it_offset;
+	rpn = __pa(uaddr) >> TCE_SHIFT;
 
-	while (npages--) {
-		/* can't move this out since we might cross LMB boundary */
-		rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT;
-		*tcep = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
+	while (npages--)
+		*(tcep++) = proto_tce | (rpn++ << TCE_RPN_SHIFT);
 
-		uaddr += TCE_PAGE_SIZE;
-		tcep++;
-	}
+	/* Some implementations won't cache invalid TCEs and thus may not
+	 * need that flush. We'll probably turn it_type into a bit mask
+	 * of flags if that becomes the case
+	 */
+	if (tbl->it_type & TCE_PCI_SWINV_CREATE)
+		pnv_tce_invalidate(tbl, tces, tcep - 1);
+
 	return 0;
 }
 
 static void pnv_tce_free(struct iommu_table *tbl, long index, long npages)
 {
-	u64 *tcep = ((u64 *)tbl->it_base) + index;
+	u64 *tcep, *tces;
+
+	tces = tcep = ((u64 *)tbl->it_base) + index - tbl->it_offset;
 
 	while (npages--)
 		*(tcep++) = 0;
+
+	if (tbl->it_type & TCE_PCI_SWINV_FREE)
+		pnv_tce_invalidate(tbl, tces, tcep - 1);
 }
 
 void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
@@ -308,13 +457,14 @@
 pnv_pci_setup_bml_iommu(struct pci_controller *hose)
 {
 	struct iommu_table *tbl;
-	const __be64 *basep;
+	const __be64 *basep, *swinvp;
 	const __be32 *sizep;
 
 	basep = of_get_property(hose->dn, "linux,tce-base", NULL);
 	sizep = of_get_property(hose->dn, "linux,tce-size", NULL);
 	if (basep == NULL || sizep == NULL) {
-		pr_err("PCI: %s has missing tce entries !\n", hose->dn->full_name);
+		pr_err("PCI: %s has missing tce entries !\n",
+		       hose->dn->full_name);
 		return NULL;
 	}
 	tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, hose->node);
@@ -323,6 +473,15 @@
 	pnv_pci_setup_iommu_table(tbl, __va(be64_to_cpup(basep)),
 				  be32_to_cpup(sizep), 0);
 	iommu_init_table(tbl, hose->node);
+
+	/* Deal with SW invalidated TCEs when needed (BML way) */
+	swinvp = of_get_property(hose->dn, "linux,tce-sw-invalidate-info",
+				 NULL);
+	if (swinvp) {
+		tbl->it_busno = swinvp[1];
+		tbl->it_index = (unsigned long)ioremap(swinvp[0], 8);
+		tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE;
+	}
 	return tbl;
 }
 
@@ -356,6 +515,13 @@
 		pnv_pci_dma_fallback_setup(hose, pdev);
 }
 
+/* Fixup wrong class code in p7ioc root complex */
+static void __devinit pnv_p7ioc_rc_quirk(struct pci_dev *dev)
+{
+	dev->class = PCI_CLASS_BRIDGE_PCI << 8;
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_IBM, 0x3b9, pnv_p7ioc_rc_quirk);
+
 static int pnv_pci_probe_mode(struct pci_bus *bus)
 {
 	struct pci_controller *hose = pci_bus_to_host(bus);
@@ -400,12 +566,24 @@
 		init_pci_config_tokens();
 		find_and_init_phbs();
 #endif /* CONFIG_PPC_POWERNV_RTAS */
-	} else {
-		/* OPAL is here, do our normal stuff */
+	}
+	/* OPAL is here, do our normal stuff */
+	else {
+		int found_ioda = 0;
+
+		/* Look for IODA IO-Hubs. We don't support mixing IODA
+		 * and p5ioc2 due to the need to change some global
+		 * probing flags
+		 */
+		for_each_compatible_node(np, NULL, "ibm,ioda-hub") {
+			pnv_pci_init_ioda_hub(np);
+			found_ioda = 1;
+		}
 
 		/* Look for p5ioc2 IO-Hubs */
-		for_each_compatible_node(np, NULL, "ibm,p5ioc2")
-			pnv_pci_init_p5ioc2_hub(np);
+		if (!found_ioda)
+			for_each_compatible_node(np, NULL, "ibm,p5ioc2")
+				pnv_pci_init_p5ioc2_hub(np);
 	}
 
 	/* Setup the linkage between OF nodes and PHBs */
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index d4dbc49..8bc4796 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -9,9 +9,63 @@
 	PNV_PHB_IODA2,
 };
 
+/* Precise PHB model for error management */
+enum pnv_phb_model {
+	PNV_PHB_MODEL_UNKNOWN,
+	PNV_PHB_MODEL_P5IOC2,
+	PNV_PHB_MODEL_P7IOC,
+};
+
+#define PNV_PCI_DIAG_BUF_SIZE	4096
+
+/* Data associated with a PE, including IOMMU tracking etc.. */
+struct pnv_ioda_pe {
+	/* A PE can be associated with a single device or an
+	 * entire bus (& children). In the former case, pdev
+	 * is populated, in the later case, pbus is.
+	 */
+	struct pci_dev		*pdev;
+	struct pci_bus		*pbus;
+
+	/* Effective RID (device RID for a device PE and base bus
+	 * RID with devfn 0 for a bus PE)
+	 */
+	unsigned int		rid;
+
+	/* PE number */
+	unsigned int		pe_number;
+
+	/* "Weight" assigned to the PE for the sake of DMA resource
+	 * allocations
+	 */
+	unsigned int		dma_weight;
+
+	/* This is a PCI-E -> PCI-X bridge, this points to the
+	 * corresponding bus PE
+	 */
+	struct pnv_ioda_pe	*bus_pe;
+
+	/* "Base" iommu table, ie, 4K TCEs, 32-bit DMA */
+	int			tce32_seg;
+	int			tce32_segcount;
+	struct iommu_table	tce32_table;
+
+	/* XXX TODO: Add support for additional 64-bit iommus */
+
+	/* MSIs. MVE index is identical for for 32 and 64 bit MSI
+	 * and -1 if not supported. (It's actually identical to the
+	 * PE number)
+	 */
+	int			mve_number;
+
+	/* Link in list of PE#s */
+	struct list_head	link;
+};
+
 struct pnv_phb {
 	struct pci_controller	*hose;
 	enum pnv_phb_type	type;
+	enum pnv_phb_model	model;
 	u64			opal_id;
 	void __iomem		*regs;
 	spinlock_t		lock;
@@ -34,7 +88,52 @@
 		struct {
 			struct iommu_table iommu_table;
 		} p5ioc2;
+
+		struct {
+			/* Global bridge info */
+			unsigned int		total_pe;
+			unsigned int		m32_size;
+			unsigned int		m32_segsize;
+			unsigned int		m32_pci_base;
+			unsigned int		io_size;
+			unsigned int		io_segsize;
+			unsigned int		io_pci_base;
+
+			/* PE allocation bitmap */
+			unsigned long		*pe_alloc;
+
+			/* M32 & IO segment maps */
+			unsigned int		*m32_segmap;
+			unsigned int		*io_segmap;
+			struct pnv_ioda_pe	*pe_array;
+
+			/* Reverse map of PEs, will have to extend if
+			 * we are to support more than 256 PEs, indexed
+			 * bus { bus, devfn }
+			 */
+			unsigned char		pe_rmap[0x10000];
+
+			/* 32-bit TCE tables allocation */
+			unsigned long		tce32_count;
+
+			/* Total "weight" for the sake of DMA resources
+			 * allocation
+			 */
+			unsigned int		dma_weight;
+			unsigned int		dma_pe_count;
+
+			/* Sorted list of used PE's, sorted at
+			 * boot for resource allocation purposes
+			 */
+			struct list_head	pe_list;
+		} ioda;
 	};
+
+	/* PHB status structure */
+	union {
+		unsigned char			blob[PNV_PCI_DIAG_BUF_SIZE];
+		struct OpalIoP7IOCPhbErrorData	p7ioc;
+	} diag;
 };
 
 extern struct pci_ops pnv_pci_ops;
@@ -43,6 +142,7 @@
 				      void *tce_mem, u64 tce_size,
 				      u64 dma_offset);
 extern void pnv_pci_init_p5ioc2_hub(struct device_node *np);
+extern void pnv_pci_init_ioda_hub(struct device_node *np);
 
 
 #endif /* __POWERNV_PCI_H */
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
index e877366..17210c5 100644
--- a/arch/powerpc/platforms/powernv/smp.c
+++ b/arch/powerpc/platforms/powernv/smp.c
@@ -75,7 +75,7 @@
 	/* On OPAL v2 the CPU are still spinning inside OPAL itself,
 	 * get them back now
 	 */
-	if (firmware_has_feature(FW_FEATURE_OPALv2)) {
+	if (!paca[nr].cpu_start && firmware_has_feature(FW_FEATURE_OPALv2)) {
 		pr_devel("OPAL: Starting CPU %d (HW 0x%x)...\n", nr, pcpu);
 		rc = opal_start_cpu(pcpu, start_here);
 		if (rc != OPAL_SUCCESS)
diff --git a/arch/powerpc/platforms/ps3/interrupt.c b/arch/powerpc/platforms/ps3/interrupt.c
index 1d6f4f4..617efa1 100644
--- a/arch/powerpc/platforms/ps3/interrupt.c
+++ b/arch/powerpc/platforms/ps3/interrupt.c
@@ -31,18 +31,18 @@
 
 #if defined(DEBUG)
 #define DBG udbg_printf
+#define FAIL udbg_printf
 #else
-#define DBG pr_debug
+#define DBG pr_devel
+#define FAIL pr_debug
 #endif
 
 /**
  * struct ps3_bmp - a per cpu irq status and mask bitmap structure
  * @status: 256 bit status bitmap indexed by plug
- * @unused_1:
+ * @unused_1: Alignment
  * @mask: 256 bit mask bitmap indexed by plug
- * @unused_2:
- * @lock:
- * @ipi_debug_brk_mask:
+ * @unused_2: Alignment
  *
  * The HV maintains per SMT thread mappings of HV outlet to HV plug on
  * behalf of the guest.  These mappings are implemented as 256 bit guest
@@ -73,21 +73,24 @@
 		unsigned long mask;
 		u64 unused_2[3];
 	};
-	u64 ipi_debug_brk_mask;
-	spinlock_t lock;
 };
 
 /**
  * struct ps3_private - a per cpu data structure
  * @bmp: ps3_bmp structure
+ * @bmp_lock: Syncronize access to bmp.
+ * @ipi_debug_brk_mask: Mask for debug break IPIs
  * @ppe_id: HV logical_ppe_id
  * @thread_id: HV thread_id
+ * @ipi_mask: Mask of IPI virqs
  */
 
 struct ps3_private {
 	struct ps3_bmp bmp __attribute__ ((aligned (PS3_BMP_MINALIGN)));
+	spinlock_t bmp_lock;
 	u64 ppe_id;
 	u64 thread_id;
+	unsigned long ipi_debug_brk_mask;
 	unsigned long ipi_mask;
 };
 
@@ -105,7 +108,7 @@
 	struct ps3_private *pd = irq_data_get_irq_chip_data(d);
 	unsigned long flags;
 
-	pr_debug("%s:%d: thread_id %llu, virq %d\n", __func__, __LINE__,
+	DBG("%s:%d: thread_id %llu, virq %d\n", __func__, __LINE__,
 		pd->thread_id, d->irq);
 
 	local_irq_save(flags);
@@ -126,7 +129,7 @@
 	struct ps3_private *pd = irq_data_get_irq_chip_data(d);
 	unsigned long flags;
 
-	pr_debug("%s:%d: thread_id %llu, virq %d\n", __func__, __LINE__,
+	DBG("%s:%d: thread_id %llu, virq %d\n", __func__, __LINE__,
 		pd->thread_id, d->irq);
 
 	local_irq_save(flags);
@@ -190,19 +193,19 @@
 	*virq = irq_create_mapping(NULL, outlet);
 
 	if (*virq == NO_IRQ) {
-		pr_debug("%s:%d: irq_create_mapping failed: outlet %lu\n",
+		FAIL("%s:%d: irq_create_mapping failed: outlet %lu\n",
 			__func__, __LINE__, outlet);
 		result = -ENOMEM;
 		goto fail_create;
 	}
 
-	pr_debug("%s:%d: outlet %lu => cpu %u, virq %u\n", __func__, __LINE__,
+	DBG("%s:%d: outlet %lu => cpu %u, virq %u\n", __func__, __LINE__,
 		outlet, cpu, *virq);
 
 	result = irq_set_chip_data(*virq, pd);
 
 	if (result) {
-		pr_debug("%s:%d: irq_set_chip_data failed\n",
+		FAIL("%s:%d: irq_set_chip_data failed\n",
 			__func__, __LINE__);
 		goto fail_set;
 	}
@@ -228,13 +231,13 @@
 {
 	const struct ps3_private *pd = irq_get_chip_data(virq);
 
-	pr_debug("%s:%d: ppe_id %llu, thread_id %llu, virq %u\n", __func__,
+	DBG("%s:%d: ppe_id %llu, thread_id %llu, virq %u\n", __func__,
 		__LINE__, pd->ppe_id, pd->thread_id, virq);
 
 	irq_set_chip_data(virq, NULL);
 	irq_dispose_mapping(virq);
 
-	pr_debug("%s:%d <-\n", __func__, __LINE__);
+	DBG("%s:%d <-\n", __func__, __LINE__);
 	return 0;
 }
 
@@ -257,7 +260,7 @@
 	result = ps3_virq_setup(cpu, outlet, virq);
 
 	if (result) {
-		pr_debug("%s:%d: ps3_virq_setup failed\n", __func__, __LINE__);
+		FAIL("%s:%d: ps3_virq_setup failed\n", __func__, __LINE__);
 		goto fail_setup;
 	}
 
@@ -269,7 +272,7 @@
 		outlet, 0);
 
 	if (result) {
-		pr_info("%s:%d: lv1_connect_irq_plug_ext failed: %s\n",
+		FAIL("%s:%d: lv1_connect_irq_plug_ext failed: %s\n",
 		__func__, __LINE__, ps3_result(result));
 		result = -EPERM;
 		goto fail_connect;
@@ -298,7 +301,7 @@
 	int result;
 	const struct ps3_private *pd = irq_get_chip_data(virq);
 
-	pr_debug("%s:%d: ppe_id %llu, thread_id %llu, virq %u\n", __func__,
+	DBG("%s:%d: ppe_id %llu, thread_id %llu, virq %u\n", __func__,
 		__LINE__, pd->ppe_id, pd->thread_id, virq);
 
 	ps3_chip_mask(irq_get_irq_data(virq));
@@ -306,7 +309,7 @@
 	result = lv1_disconnect_irq_plug_ext(pd->ppe_id, pd->thread_id, virq);
 
 	if (result)
-		pr_info("%s:%d: lv1_disconnect_irq_plug_ext failed: %s\n",
+		FAIL("%s:%d: lv1_disconnect_irq_plug_ext failed: %s\n",
 		__func__, __LINE__, ps3_result(result));
 
 	ps3_virq_destroy(virq);
@@ -334,7 +337,7 @@
 	result = lv1_construct_event_receive_port(&outlet);
 
 	if (result) {
-		pr_debug("%s:%d: lv1_construct_event_receive_port failed: %s\n",
+		FAIL("%s:%d: lv1_construct_event_receive_port failed: %s\n",
 			__func__, __LINE__, ps3_result(result));
 		*virq = NO_IRQ;
 		return result;
@@ -360,14 +363,14 @@
 {
 	int result;
 
-	pr_debug(" -> %s:%d virq %u\n", __func__, __LINE__, virq);
+	DBG(" -> %s:%d virq %u\n", __func__, __LINE__, virq);
 
 	ps3_chip_mask(irq_get_irq_data(virq));
 
 	result = lv1_destruct_event_receive_port(virq_to_hw(virq));
 
 	if (result)
-		pr_debug("%s:%d: lv1_destruct_event_receive_port failed: %s\n",
+		FAIL("%s:%d: lv1_destruct_event_receive_port failed: %s\n",
 			__func__, __LINE__, ps3_result(result));
 
 	/*
@@ -375,7 +378,7 @@
 	 * calls from interrupt context (smp_call_function) when kexecing.
 	 */
 
-	pr_debug(" <- %s:%d\n", __func__, __LINE__);
+	DBG(" <- %s:%d\n", __func__, __LINE__);
 	return result;
 }
 
@@ -411,7 +414,7 @@
 		dev->dev_id, virq_to_hw(*virq), dev->interrupt_id);
 
 	if (result) {
-		pr_debug("%s:%d: lv1_connect_interrupt_event_receive_port"
+		FAIL("%s:%d: lv1_connect_interrupt_event_receive_port"
 			" failed: %s\n", __func__, __LINE__,
 			ps3_result(result));
 		ps3_event_receive_port_destroy(*virq);
@@ -419,7 +422,7 @@
 		return result;
 	}
 
-	pr_debug("%s:%d: interrupt_id %u, virq %u\n", __func__, __LINE__,
+	DBG("%s:%d: interrupt_id %u, virq %u\n", __func__, __LINE__,
 		dev->interrupt_id, *virq);
 
 	return 0;
@@ -433,14 +436,14 @@
 
 	int result;
 
-	pr_debug(" -> %s:%d: interrupt_id %u, virq %u\n", __func__, __LINE__,
+	DBG(" -> %s:%d: interrupt_id %u, virq %u\n", __func__, __LINE__,
 		dev->interrupt_id, virq);
 
 	result = lv1_disconnect_interrupt_event_receive_port(dev->bus_id,
 		dev->dev_id, virq_to_hw(virq), dev->interrupt_id);
 
 	if (result)
-		pr_debug("%s:%d: lv1_disconnect_interrupt_event_receive_port"
+		FAIL("%s:%d: lv1_disconnect_interrupt_event_receive_port"
 			" failed: %s\n", __func__, __LINE__,
 			ps3_result(result));
 
@@ -455,7 +458,7 @@
 	result = ps3_virq_destroy(virq);
 	BUG_ON(result);
 
-	pr_debug(" <- %s:%d\n", __func__, __LINE__);
+	DBG(" <- %s:%d\n", __func__, __LINE__);
 	return result;
 }
 EXPORT_SYMBOL(ps3_sb_event_receive_port_destroy);
@@ -480,7 +483,7 @@
 	result = lv1_construct_io_irq_outlet(interrupt_id, &outlet);
 
 	if (result) {
-		pr_debug("%s:%d: lv1_construct_io_irq_outlet failed: %s\n",
+		FAIL("%s:%d: lv1_construct_io_irq_outlet failed: %s\n",
 			__func__, __LINE__, ps3_result(result));
 		return result;
 	}
@@ -510,7 +513,7 @@
 	result = lv1_destruct_io_irq_outlet(outlet);
 
 	if (result)
-		pr_debug("%s:%d: lv1_destruct_io_irq_outlet failed: %s\n",
+		FAIL("%s:%d: lv1_destruct_io_irq_outlet failed: %s\n",
 			__func__, __LINE__, ps3_result(result));
 
 	return result;
@@ -542,7 +545,7 @@
 	result = lv1_configure_virtual_uart_irq(lpar_addr, &outlet);
 
 	if (result) {
-		pr_debug("%s:%d: lv1_configure_virtual_uart_irq failed: %s\n",
+		FAIL("%s:%d: lv1_configure_virtual_uart_irq failed: %s\n",
 			__func__, __LINE__, ps3_result(result));
 		return result;
 	}
@@ -562,7 +565,7 @@
 	result = lv1_deconfigure_virtual_uart_irq();
 
 	if (result) {
-		pr_debug("%s:%d: lv1_configure_virtual_uart_irq failed: %s\n",
+		FAIL("%s:%d: lv1_configure_virtual_uart_irq failed: %s\n",
 			__func__, __LINE__, ps3_result(result));
 		return result;
 	}
@@ -595,7 +598,7 @@
 	result = lv1_get_spe_irq_outlet(spe_id, class, &outlet);
 
 	if (result) {
-		pr_debug("%s:%d: lv1_get_spe_irq_outlet failed: %s\n",
+		FAIL("%s:%d: lv1_get_spe_irq_outlet failed: %s\n",
 			__func__, __LINE__, ps3_result(result));
 		return result;
 	}
@@ -626,7 +629,7 @@
 static void _dump_64_bmp(const char *header, const u64 *p, unsigned cpu,
 	const char* func, int line)
 {
-	pr_debug("%s:%d: %s %u {%04lx_%04lx_%04lx_%04lx}\n",
+	pr_debug("%s:%d: %s %u {%04llx_%04llx_%04llx_%04llx}\n",
 		func, line, header, cpu,
 		*p >> 48, (*p >> 32) & 0xffff, (*p >> 16) & 0xffff,
 		*p & 0xffff);
@@ -635,7 +638,7 @@
 static void __maybe_unused _dump_256_bmp(const char *header,
 	const u64 *p, unsigned cpu, const char* func, int line)
 {
-	pr_debug("%s:%d: %s %u {%016lx:%016lx:%016lx:%016lx}\n",
+	pr_debug("%s:%d: %s %u {%016llx:%016llx:%016llx:%016llx}\n",
 		func, line, header, cpu, p[0], p[1], p[2], p[3]);
 }
 
@@ -644,10 +647,10 @@
 {
 	unsigned long flags;
 
-	spin_lock_irqsave(&pd->bmp.lock, flags);
+	spin_lock_irqsave(&pd->bmp_lock, flags);
 	_dump_64_bmp("stat", &pd->bmp.status, pd->thread_id, func, line);
-	_dump_64_bmp("mask", &pd->bmp.mask, pd->thread_id, func, line);
-	spin_unlock_irqrestore(&pd->bmp.lock, flags);
+	_dump_64_bmp("mask", (u64*)&pd->bmp.mask, pd->thread_id, func, line);
+	spin_unlock_irqrestore(&pd->bmp_lock, flags);
 }
 
 #define dump_mask(_x) _dump_mask(_x, __func__, __LINE__)
@@ -656,9 +659,9 @@
 {
 	unsigned long flags;
 
-	spin_lock_irqsave(&pd->bmp.lock, flags);
-	_dump_64_bmp("mask", &pd->bmp.mask, pd->thread_id, func, line);
-	spin_unlock_irqrestore(&pd->bmp.lock, flags);
+	spin_lock_irqsave(&pd->bmp_lock, flags);
+	_dump_64_bmp("mask", (u64*)&pd->bmp.mask, pd->thread_id, func, line);
+	spin_unlock_irqrestore(&pd->bmp_lock, flags);
 }
 #else
 static void dump_bmp(struct ps3_private* pd) {};
@@ -667,7 +670,7 @@
 static int ps3_host_map(struct irq_host *h, unsigned int virq,
 	irq_hw_number_t hwirq)
 {
-	pr_debug("%s:%d: hwirq %lu, virq %u\n", __func__, __LINE__, hwirq,
+	DBG("%s:%d: hwirq %lu, virq %u\n", __func__, __LINE__, hwirq,
 		virq);
 
 	irq_set_chip_and_handler(virq, &ps3_irq_chip, handle_fasteoi_irq);
@@ -690,10 +693,10 @@
 {
 	struct ps3_private *pd = &per_cpu(ps3_private, cpu);
 
-	pd->bmp.ipi_debug_brk_mask = 0x8000000000000000UL >> virq;
+	set_bit(63 - virq, &pd->ipi_debug_brk_mask);
 
-	pr_debug("%s:%d: cpu %u, virq %u, mask %llxh\n", __func__, __LINE__,
-		cpu, virq, pd->bmp.ipi_debug_brk_mask);
+	DBG("%s:%d: cpu %u, virq %u, mask %lxh\n", __func__, __LINE__,
+		cpu, virq, pd->ipi_debug_brk_mask);
 }
 
 void __init ps3_register_ipi_irq(unsigned int cpu, unsigned int virq)
@@ -714,14 +717,14 @@
 
 	/* check for ipi break first to stop this cpu ASAP */
 
-	if (x & pd->bmp.ipi_debug_brk_mask)
-		x &= pd->bmp.ipi_debug_brk_mask;
+	if (x & pd->ipi_debug_brk_mask)
+		x &= pd->ipi_debug_brk_mask;
 
 	asm volatile("cntlzd %0,%1" : "=r" (plug) : "r" (x));
 	plug &= 0x3f;
 
 	if (unlikely(plug == NO_IRQ)) {
-		pr_debug("%s:%d: no plug found: thread_id %llu\n", __func__,
+		DBG("%s:%d: no plug found: thread_id %llu\n", __func__,
 			__LINE__, pd->thread_id);
 		dump_bmp(&per_cpu(ps3_private, 0));
 		dump_bmp(&per_cpu(ps3_private, 1));
@@ -760,9 +763,9 @@
 
 		lv1_get_logical_ppe_id(&pd->ppe_id);
 		pd->thread_id = get_hard_smp_processor_id(cpu);
-		spin_lock_init(&pd->bmp.lock);
+		spin_lock_init(&pd->bmp_lock);
 
-		pr_debug("%s:%d: ppe_id %llu, thread_id %llu, bmp %lxh\n",
+		DBG("%s:%d: ppe_id %llu, thread_id %llu, bmp %lxh\n",
 			__func__, __LINE__, pd->ppe_id, pd->thread_id,
 			ps3_mm_phys_to_lpar(__pa(&pd->bmp)));
 
@@ -770,7 +773,7 @@
 			pd->thread_id, ps3_mm_phys_to_lpar(__pa(&pd->bmp)));
 
 		if (result)
-			pr_debug("%s:%d: lv1_configure_irq_state_bitmap failed:"
+			FAIL("%s:%d: lv1_configure_irq_state_bitmap failed:"
 				" %s\n", __func__, __LINE__,
 				ps3_result(result));
 	}
diff --git a/arch/powerpc/platforms/ps3/mm.c b/arch/powerpc/platforms/ps3/mm.c
index 72714ad..8bd6ba5 100644
--- a/arch/powerpc/platforms/ps3/mm.c
+++ b/arch/powerpc/platforms/ps3/mm.c
@@ -319,7 +319,6 @@
 	}
 
 	memblock_add(start_addr, map.r1.size);
-	memblock_analyze();
 
 	result = online_pages(start_pfn, nr_pages);
 
diff --git a/arch/powerpc/platforms/ps3/repository.c b/arch/powerpc/platforms/ps3/repository.c
index ca40f6a..7bdfea3 100644
--- a/arch/powerpc/platforms/ps3/repository.c
+++ b/arch/powerpc/platforms/ps3/repository.c
@@ -44,7 +44,7 @@
 		s[i] = (in[i] <= 126 && in[i] >= 32) ? in[i] : '.';
 	s[i] = 0;
 
-	pr_debug("%s:%d: %s%016llx : %s\n", func, line, hdr, n, s);
+	pr_devel("%s:%d: %s%016llx : %s\n", func, line, hdr, n, s);
 #endif
 }
 
@@ -53,7 +53,7 @@
 static void _dump_node_name(unsigned int lpar_id, u64 n1, u64 n2, u64 n3,
 	u64 n4, const char *func, int line)
 {
-	pr_debug("%s:%d: lpar: %u\n", func, line, lpar_id);
+	pr_devel("%s:%d: lpar: %u\n", func, line, lpar_id);
 	_dump_field("n1: ", n1, func, line);
 	_dump_field("n2: ", n2, func, line);
 	_dump_field("n3: ", n3, func, line);
@@ -65,13 +65,13 @@
 static void _dump_node(unsigned int lpar_id, u64 n1, u64 n2, u64 n3, u64 n4,
 	u64 v1, u64 v2, const char *func, int line)
 {
-	pr_debug("%s:%d: lpar: %u\n", func, line, lpar_id);
+	pr_devel("%s:%d: lpar: %u\n", func, line, lpar_id);
 	_dump_field("n1: ", n1, func, line);
 	_dump_field("n2: ", n2, func, line);
 	_dump_field("n3: ", n3, func, line);
 	_dump_field("n4: ", n4, func, line);
-	pr_debug("%s:%d: v1: %016llx\n", func, line, v1);
-	pr_debug("%s:%d: v2: %016llx\n", func, line, v2);
+	pr_devel("%s:%d: v1: %016llx\n", func, line, v1);
+	pr_devel("%s:%d: v2: %016llx\n", func, line, v2);
 }
 
 /**
@@ -131,11 +131,11 @@
 		lpar_id = id;
 	}
 
-	result = lv1_get_repository_node_value(lpar_id, n1, n2, n3, n4, &v1,
+	result = lv1_read_repository_node(lpar_id, n1, n2, n3, n4, &v1,
 		&v2);
 
 	if (result) {
-		pr_debug("%s:%d: lv1_get_repository_node_value failed: %s\n",
+		pr_warn("%s:%d: lv1_read_repository_node failed: %s\n",
 			__func__, __LINE__, ps3_result(result));
 		dump_node_name(lpar_id, n1, n2, n3, n4);
 		return -ENOENT;
@@ -149,10 +149,10 @@
 		*_v2 = v2;
 
 	if (v1 && !_v1)
-		pr_debug("%s:%d: warning: discarding non-zero v1: %016llx\n",
+		pr_devel("%s:%d: warning: discarding non-zero v1: %016llx\n",
 			__func__, __LINE__, v1);
 	if (v2 && !_v2)
-		pr_debug("%s:%d: warning: discarding non-zero v2: %016llx\n",
+		pr_devel("%s:%d: warning: discarding non-zero v2: %016llx\n",
 			__func__, __LINE__, v2);
 
 	return 0;
@@ -323,16 +323,16 @@
 	result = ps3_repository_read_bus_num_dev(tmp.bus_index, &num_dev);
 
 	if (result) {
-		pr_debug("%s:%d read_bus_num_dev failed\n", __func__, __LINE__);
+		pr_devel("%s:%d read_bus_num_dev failed\n", __func__, __LINE__);
 		return result;
 	}
 
-	pr_debug("%s:%d: bus_type %u, bus_index %u, bus_id %llu, num_dev %u\n",
+	pr_devel("%s:%d: bus_type %u, bus_index %u, bus_id %llu, num_dev %u\n",
 		__func__, __LINE__, tmp.bus_type, tmp.bus_index, tmp.bus_id,
 		num_dev);
 
 	if (tmp.dev_index >= num_dev) {
-		pr_debug("%s:%d: no device found\n", __func__, __LINE__);
+		pr_devel("%s:%d: no device found\n", __func__, __LINE__);
 		return -ENODEV;
 	}
 
@@ -340,7 +340,7 @@
 		&tmp.dev_type);
 
 	if (result) {
-		pr_debug("%s:%d read_dev_type failed\n", __func__, __LINE__);
+		pr_devel("%s:%d read_dev_type failed\n", __func__, __LINE__);
 		return result;
 	}
 
@@ -348,12 +348,12 @@
 		&tmp.dev_id);
 
 	if (result) {
-		pr_debug("%s:%d ps3_repository_read_dev_id failed\n", __func__,
+		pr_devel("%s:%d ps3_repository_read_dev_id failed\n", __func__,
 		__LINE__);
 		return result;
 	}
 
-	pr_debug("%s:%d: found: dev_type %u, dev_index %u, dev_id %llu\n",
+	pr_devel("%s:%d: found: dev_type %u, dev_index %u, dev_id %llu\n",
 		__func__, __LINE__, tmp.dev_type, tmp.dev_index, tmp.dev_id);
 
 	*repo = tmp;
@@ -367,14 +367,14 @@
 	struct ps3_repository_device tmp;
 	unsigned int num_dev;
 
-	pr_debug(" -> %s:%u: find device by id %llu:%llu\n", __func__, __LINE__,
+	pr_devel(" -> %s:%u: find device by id %llu:%llu\n", __func__, __LINE__,
 		 bus_id, dev_id);
 
 	for (tmp.bus_index = 0; tmp.bus_index < 10; tmp.bus_index++) {
 		result = ps3_repository_read_bus_id(tmp.bus_index,
 						    &tmp.bus_id);
 		if (result) {
-			pr_debug("%s:%u read_bus_id(%u) failed\n", __func__,
+			pr_devel("%s:%u read_bus_id(%u) failed\n", __func__,
 				 __LINE__, tmp.bus_index);
 			return result;
 		}
@@ -382,23 +382,23 @@
 		if (tmp.bus_id == bus_id)
 			goto found_bus;
 
-		pr_debug("%s:%u: skip, bus_id %llu\n", __func__, __LINE__,
+		pr_devel("%s:%u: skip, bus_id %llu\n", __func__, __LINE__,
 			 tmp.bus_id);
 	}
-	pr_debug(" <- %s:%u: bus not found\n", __func__, __LINE__);
+	pr_devel(" <- %s:%u: bus not found\n", __func__, __LINE__);
 	return result;
 
 found_bus:
 	result = ps3_repository_read_bus_type(tmp.bus_index, &tmp.bus_type);
 	if (result) {
-		pr_debug("%s:%u read_bus_type(%u) failed\n", __func__,
+		pr_devel("%s:%u read_bus_type(%u) failed\n", __func__,
 			 __LINE__, tmp.bus_index);
 		return result;
 	}
 
 	result = ps3_repository_read_bus_num_dev(tmp.bus_index, &num_dev);
 	if (result) {
-		pr_debug("%s:%u read_bus_num_dev failed\n", __func__,
+		pr_devel("%s:%u read_bus_num_dev failed\n", __func__,
 			 __LINE__);
 		return result;
 	}
@@ -408,7 +408,7 @@
 						    tmp.dev_index,
 						    &tmp.dev_id);
 		if (result) {
-			pr_debug("%s:%u read_dev_id(%u:%u) failed\n", __func__,
+			pr_devel("%s:%u read_dev_id(%u:%u) failed\n", __func__,
 				 __LINE__, tmp.bus_index, tmp.dev_index);
 			return result;
 		}
@@ -416,21 +416,21 @@
 		if (tmp.dev_id == dev_id)
 			goto found_dev;
 
-		pr_debug("%s:%u: skip, dev_id %llu\n", __func__, __LINE__,
+		pr_devel("%s:%u: skip, dev_id %llu\n", __func__, __LINE__,
 			 tmp.dev_id);
 	}
-	pr_debug(" <- %s:%u: dev not found\n", __func__, __LINE__);
+	pr_devel(" <- %s:%u: dev not found\n", __func__, __LINE__);
 	return result;
 
 found_dev:
 	result = ps3_repository_read_dev_type(tmp.bus_index, tmp.dev_index,
 					      &tmp.dev_type);
 	if (result) {
-		pr_debug("%s:%u read_dev_type failed\n", __func__, __LINE__);
+		pr_devel("%s:%u read_dev_type failed\n", __func__, __LINE__);
 		return result;
 	}
 
-	pr_debug(" <- %s:%u: found: type (%u:%u) index (%u:%u) id (%llu:%llu)\n",
+	pr_devel(" <- %s:%u: found: type (%u:%u) index (%u:%u) id (%llu:%llu)\n",
 		 __func__, __LINE__, tmp.bus_type, tmp.dev_type, tmp.bus_index,
 		 tmp.dev_index, tmp.bus_id, tmp.dev_id);
 	*repo = tmp;
@@ -443,18 +443,18 @@
 	int result = 0;
 	struct ps3_repository_device repo;
 
-	pr_debug(" -> %s:%d: find bus_type %u\n", __func__, __LINE__, bus_type);
+	pr_devel(" -> %s:%d: find bus_type %u\n", __func__, __LINE__, bus_type);
 
 	repo.bus_type = bus_type;
 	result = ps3_repository_find_bus(repo.bus_type, 0, &repo.bus_index);
 	if (result) {
-		pr_debug(" <- %s:%u: bus not found\n", __func__, __LINE__);
+		pr_devel(" <- %s:%u: bus not found\n", __func__, __LINE__);
 		return result;
 	}
 
 	result = ps3_repository_read_bus_id(repo.bus_index, &repo.bus_id);
 	if (result) {
-		pr_debug("%s:%d read_bus_id(%u) failed\n", __func__, __LINE__,
+		pr_devel("%s:%d read_bus_id(%u) failed\n", __func__, __LINE__,
 			 repo.bus_index);
 		return result;
 	}
@@ -469,13 +469,13 @@
 
 		result = callback(&repo);
 		if (result) {
-			pr_debug("%s:%d: abort at callback\n", __func__,
+			pr_devel("%s:%d: abort at callback\n", __func__,
 				__LINE__);
 			break;
 		}
 	}
 
-	pr_debug(" <- %s:%d\n", __func__, __LINE__);
+	pr_devel(" <- %s:%d\n", __func__, __LINE__);
 	return result;
 }
 
@@ -489,7 +489,7 @@
 	for (i = from; i < 10; i++) {
 		error = ps3_repository_read_bus_type(i, &type);
 		if (error) {
-			pr_debug("%s:%d read_bus_type failed\n",
+			pr_devel("%s:%d read_bus_type failed\n",
 				__func__, __LINE__);
 			*bus_index = UINT_MAX;
 			return error;
@@ -509,7 +509,7 @@
 	int result = 0;
 	unsigned int res_index;
 
-	pr_debug("%s:%d: find intr_type %u\n", __func__, __LINE__, intr_type);
+	pr_devel("%s:%d: find intr_type %u\n", __func__, __LINE__, intr_type);
 
 	*interrupt_id = UINT_MAX;
 
@@ -521,7 +521,7 @@
 			repo->dev_index, res_index, &t, &id);
 
 		if (result) {
-			pr_debug("%s:%d read_dev_intr failed\n",
+			pr_devel("%s:%d read_dev_intr failed\n",
 				__func__, __LINE__);
 			return result;
 		}
@@ -535,7 +535,7 @@
 	if (res_index == 10)
 		return -ENODEV;
 
-	pr_debug("%s:%d: found intr_type %u at res_index %u\n",
+	pr_devel("%s:%d: found intr_type %u at res_index %u\n",
 		__func__, __LINE__, intr_type, res_index);
 
 	return result;
@@ -547,7 +547,7 @@
 	int result = 0;
 	unsigned int res_index;
 
-	pr_debug("%s:%d: find reg_type %u\n", __func__, __LINE__, reg_type);
+	pr_devel("%s:%d: find reg_type %u\n", __func__, __LINE__, reg_type);
 
 	*bus_addr = *len = 0;
 
@@ -560,7 +560,7 @@
 			repo->dev_index, res_index, &t, &a, &l);
 
 		if (result) {
-			pr_debug("%s:%d read_dev_reg failed\n",
+			pr_devel("%s:%d read_dev_reg failed\n",
 				__func__, __LINE__);
 			return result;
 		}
@@ -575,7 +575,7 @@
 	if (res_index == 10)
 		return -ENODEV;
 
-	pr_debug("%s:%d: found reg_type %u at res_index %u\n",
+	pr_devel("%s:%d: found reg_type %u at res_index %u\n",
 		__func__, __LINE__, reg_type, res_index);
 
 	return result;
@@ -1009,7 +1009,7 @@
 	int result = 0;
 	unsigned int res_index;
 
-	pr_debug(" -> %s:%d: (%u:%u)\n", __func__, __LINE__,
+	pr_devel(" -> %s:%d: (%u:%u)\n", __func__, __LINE__,
 		repo->bus_index, repo->dev_index);
 
 	for (res_index = 0; res_index < 10; res_index++) {
@@ -1021,13 +1021,13 @@
 
 		if (result) {
 			if (result !=  LV1_NO_ENTRY)
-				pr_debug("%s:%d ps3_repository_read_dev_intr"
+				pr_devel("%s:%d ps3_repository_read_dev_intr"
 					" (%u:%u) failed\n", __func__, __LINE__,
 					repo->bus_index, repo->dev_index);
 			break;
 		}
 
-		pr_debug("%s:%d (%u:%u) intr_type %u, interrupt_id %u\n",
+		pr_devel("%s:%d (%u:%u) intr_type %u, interrupt_id %u\n",
 			__func__, __LINE__, repo->bus_index, repo->dev_index,
 			intr_type, interrupt_id);
 	}
@@ -1042,18 +1042,18 @@
 
 		if (result) {
 			if (result !=  LV1_NO_ENTRY)
-				pr_debug("%s:%d ps3_repository_read_dev_reg"
+				pr_devel("%s:%d ps3_repository_read_dev_reg"
 					" (%u:%u) failed\n", __func__, __LINE__,
 					repo->bus_index, repo->dev_index);
 			break;
 		}
 
-		pr_debug("%s:%d (%u:%u) reg_type %u, bus_addr %lxh, len %lxh\n",
+		pr_devel("%s:%d (%u:%u) reg_type %u, bus_addr %llxh, len %llxh\n",
 			__func__, __LINE__, repo->bus_index, repo->dev_index,
 			reg_type, bus_addr, len);
 	}
 
-	pr_debug(" <- %s:%d\n", __func__, __LINE__);
+	pr_devel(" <- %s:%d\n", __func__, __LINE__);
 	return result;
 }
 
@@ -1063,22 +1063,22 @@
 	unsigned int num_regions, region_index;
 	u64 port, blk_size, num_blocks;
 
-	pr_debug(" -> %s:%d: (%u:%u)\n", __func__, __LINE__,
+	pr_devel(" -> %s:%d: (%u:%u)\n", __func__, __LINE__,
 		repo->bus_index, repo->dev_index);
 
 	result = ps3_repository_read_stor_dev_info(repo->bus_index,
 		repo->dev_index, &port, &blk_size, &num_blocks, &num_regions);
 	if (result) {
-		pr_debug("%s:%d ps3_repository_read_stor_dev_info"
+		pr_devel("%s:%d ps3_repository_read_stor_dev_info"
 			" (%u:%u) failed\n", __func__, __LINE__,
 			repo->bus_index, repo->dev_index);
 		goto out;
 	}
 
-	pr_debug("%s:%d  (%u:%u): port %lu, blk_size %lu, num_blocks "
-		 "%lu, num_regions %u\n",
-		 __func__, __LINE__, repo->bus_index, repo->dev_index, port,
-		 blk_size, num_blocks, num_regions);
+	pr_devel("%s:%d  (%u:%u): port %llu, blk_size %llu, num_blocks "
+		 "%llu, num_regions %u\n",
+		 __func__, __LINE__, repo->bus_index, repo->dev_index,
+		port, blk_size, num_blocks, num_regions);
 
 	for (region_index = 0; region_index < num_regions; region_index++) {
 		unsigned int region_id;
@@ -1088,19 +1088,20 @@
 			repo->dev_index, region_index, &region_id,
 			&region_start, &region_size);
 		if (result) {
-			 pr_debug("%s:%d ps3_repository_read_stor_dev_region"
+			 pr_devel("%s:%d ps3_repository_read_stor_dev_region"
 				  " (%u:%u) failed\n", __func__, __LINE__,
 				  repo->bus_index, repo->dev_index);
 			break;
 		}
 
-		pr_debug("%s:%d (%u:%u) region_id %u, start %lxh, size %lxh\n",
+		pr_devel("%s:%d (%u:%u) region_id %u, start %lxh, size %lxh\n",
 			__func__, __LINE__, repo->bus_index, repo->dev_index,
-			region_id, region_start, region_size);
+			region_id, (unsigned long)region_start,
+			(unsigned long)region_size);
 	}
 
 out:
-	pr_debug(" <- %s:%d\n", __func__, __LINE__);
+	pr_devel(" <- %s:%d\n", __func__, __LINE__);
 	return result;
 }
 
@@ -1109,7 +1110,7 @@
 {
 	int result = 0;
 
-	pr_debug(" -> %s:%d: bus_%u\n", __func__, __LINE__, repo->bus_index);
+	pr_devel(" -> %s:%d: bus_%u\n", __func__, __LINE__, repo->bus_index);
 
 	for (repo->dev_index = 0; repo->dev_index < num_dev;
 		repo->dev_index++) {
@@ -1118,7 +1119,7 @@
 			repo->dev_index, &repo->dev_type);
 
 		if (result) {
-			pr_debug("%s:%d ps3_repository_read_dev_type"
+			pr_devel("%s:%d ps3_repository_read_dev_type"
 				" (%u:%u) failed\n", __func__, __LINE__,
 				repo->bus_index, repo->dev_index);
 			break;
@@ -1128,15 +1129,15 @@
 			repo->dev_index, &repo->dev_id);
 
 		if (result) {
-			pr_debug("%s:%d ps3_repository_read_dev_id"
+			pr_devel("%s:%d ps3_repository_read_dev_id"
 				" (%u:%u) failed\n", __func__, __LINE__,
 				repo->bus_index, repo->dev_index);
 			continue;
 		}
 
-		pr_debug("%s:%d  (%u:%u): dev_type %u, dev_id %lu\n", __func__,
+		pr_devel("%s:%d  (%u:%u): dev_type %u, dev_id %lu\n", __func__,
 			__LINE__, repo->bus_index, repo->dev_index,
-			repo->dev_type, repo->dev_id);
+			repo->dev_type, (unsigned long)repo->dev_id);
 
 		ps3_repository_dump_resource_info(repo);
 
@@ -1144,7 +1145,7 @@
 			dump_stor_dev_info(repo);
 	}
 
-	pr_debug(" <- %s:%d\n", __func__, __LINE__);
+	pr_devel(" <- %s:%d\n", __func__, __LINE__);
 	return result;
 }
 
@@ -1153,7 +1154,7 @@
 	int result = 0;
 	struct ps3_repository_device repo;
 
-	pr_debug(" -> %s:%d\n", __func__, __LINE__);
+	pr_devel(" -> %s:%d\n", __func__, __LINE__);
 
 	memset(&repo, 0, sizeof(repo));
 
@@ -1164,7 +1165,7 @@
 			&repo.bus_type);
 
 		if (result) {
-			pr_debug("%s:%d read_bus_type(%u) failed\n",
+			pr_devel("%s:%d read_bus_type(%u) failed\n",
 				__func__, __LINE__, repo.bus_index);
 			break;
 		}
@@ -1173,32 +1174,32 @@
 			&repo.bus_id);
 
 		if (result) {
-			pr_debug("%s:%d read_bus_id(%u) failed\n",
+			pr_devel("%s:%d read_bus_id(%u) failed\n",
 				__func__, __LINE__, repo.bus_index);
 			continue;
 		}
 
 		if (repo.bus_index != repo.bus_id)
-			pr_debug("%s:%d bus_index != bus_id\n",
+			pr_devel("%s:%d bus_index != bus_id\n",
 				__func__, __LINE__);
 
 		result = ps3_repository_read_bus_num_dev(repo.bus_index,
 			&num_dev);
 
 		if (result) {
-			pr_debug("%s:%d read_bus_num_dev(%u) failed\n",
+			pr_devel("%s:%d read_bus_num_dev(%u) failed\n",
 				__func__, __LINE__, repo.bus_index);
 			continue;
 		}
 
-		pr_debug("%s:%d bus_%u: bus_type %u, bus_id %lu, num_dev %u\n",
+		pr_devel("%s:%d bus_%u: bus_type %u, bus_id %lu, num_dev %u\n",
 			__func__, __LINE__, repo.bus_index, repo.bus_type,
-			repo.bus_id, num_dev);
+			(unsigned long)repo.bus_id, num_dev);
 
 		dump_device_info(&repo, num_dev);
 	}
 
-	pr_debug(" <- %s:%d\n", __func__, __LINE__);
+	pr_devel(" <- %s:%d\n", __func__, __LINE__);
 	return result;
 }
 
diff --git a/arch/powerpc/platforms/ps3/setup.c b/arch/powerpc/platforms/ps3/setup.c
index e8ec1b2..2d664c5 100644
--- a/arch/powerpc/platforms/ps3/setup.c
+++ b/arch/powerpc/platforms/ps3/setup.c
@@ -193,10 +193,12 @@
 
 static void __init ps3_setup_arch(void)
 {
+	u64 tmp;
 
 	DBG(" -> %s:%d\n", __func__, __LINE__);
 
-	lv1_get_version_info(&ps3_firmware_version.raw);
+	lv1_get_version_info(&ps3_firmware_version.raw, &tmp);
+
 	printk(KERN_INFO "PS3 firmware version %u.%u.%u\n",
 	       ps3_firmware_version.major, ps3_firmware_version.minor,
 	       ps3_firmware_version.rev);
diff --git a/arch/powerpc/platforms/ps3/smp.c b/arch/powerpc/platforms/ps3/smp.c
index efc1cd8..4b35166 100644
--- a/arch/powerpc/platforms/ps3/smp.c
+++ b/arch/powerpc/platforms/ps3/smp.c
@@ -57,7 +57,7 @@
 			" (%d)\n", __func__, __LINE__, cpu, msg, result);
 }
 
-static int ps3_smp_probe(void)
+static int __init ps3_smp_probe(void)
 {
 	int cpu;
 
diff --git a/arch/powerpc/platforms/ps3/spu.c b/arch/powerpc/platforms/ps3/spu.c
index 451fad1..e17fa14 100644
--- a/arch/powerpc/platforms/ps3/spu.c
+++ b/arch/powerpc/platforms/ps3/spu.c
@@ -154,7 +154,7 @@
 	u64 id;
 
 	lv1_get_logical_ppe_id(&id);
-	lv1_get_virtual_address_space_id_of_ppe(id, &id);
+	lv1_get_virtual_address_space_id_of_ppe(&id);
 
 	return id;
 }
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index c81f6bb..ae7b6d4 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -120,3 +120,12 @@
 	  which are accessible through a debugfs file.
 
 	  Say N if you are unsure.
+
+config PSERIES_IDLE
+	tristate "Cpuidle driver for pSeries platforms"
+	depends on CPU_IDLE
+	depends on PPC_PSERIES
+	default y
+	help
+	  Select this option to enable processor idle state management
+	  through cpuidle subsystem.
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index 3556e40..236db46b 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -22,6 +22,7 @@
 obj-$(CONFIG_CMM)		+= cmm.o
 obj-$(CONFIG_DTL)		+= dtl.o
 obj-$(CONFIG_IO_EVENT_IRQ)	+= io_event_irq.o
+obj-$(CONFIG_PSERIES_IDLE)	+= processor_idle.o
 
 ifeq ($(CONFIG_PPC_PSERIES),y)
 obj-$(CONFIG_SUSPEND)		+= suspend.o
diff --git a/arch/powerpc/platforms/pseries/cmm.c b/arch/powerpc/platforms/pseries/cmm.c
index 3cafc30..c638535 100644
--- a/arch/powerpc/platforms/pseries/cmm.c
+++ b/arch/powerpc/platforms/pseries/cmm.c
@@ -33,7 +33,7 @@
 #include <linux/sched.h>
 #include <linux/stringify.h>
 #include <linux/swap.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <asm/firmware.h>
 #include <asm/hvcall.h>
 #include <asm/mmu.h>
@@ -65,7 +65,7 @@
 static unsigned int cmm_debug = CMM_DEBUG;
 static unsigned int cmm_disabled = CMM_DISABLE;
 static unsigned long min_mem_mb = CMM_MIN_MEM_MB;
-static struct sys_device cmm_sysdev;
+static struct device cmm_dev;
 
 MODULE_AUTHOR("Brian King <brking@linux.vnet.ibm.com>");
 MODULE_DESCRIPTION("IBM System p Collaborative Memory Manager");
@@ -347,25 +347,25 @@
 }
 
 #define CMM_SHOW(name, format, args...)			\
-	static ssize_t show_##name(struct sys_device *dev,	\
-				   struct sysdev_attribute *attr,	\
+	static ssize_t show_##name(struct device *dev,	\
+				   struct device_attribute *attr,	\
 				   char *buf)			\
 	{							\
 		return sprintf(buf, format, ##args);		\
 	}							\
-	static SYSDEV_ATTR(name, S_IRUGO, show_##name, NULL)
+	static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
 
 CMM_SHOW(loaned_kb, "%lu\n", PAGES2KB(loaned_pages));
 CMM_SHOW(loaned_target_kb, "%lu\n", PAGES2KB(loaned_pages_target));
 
-static ssize_t show_oom_pages(struct sys_device *dev,
-			      struct sysdev_attribute *attr, char *buf)
+static ssize_t show_oom_pages(struct device *dev,
+			      struct device_attribute *attr, char *buf)
 {
 	return sprintf(buf, "%lu\n", PAGES2KB(oom_freed_pages));
 }
 
-static ssize_t store_oom_pages(struct sys_device *dev,
-			       struct sysdev_attribute *attr,
+static ssize_t store_oom_pages(struct device *dev,
+			       struct device_attribute *attr,
 			       const char *buf, size_t count)
 {
 	unsigned long val = simple_strtoul (buf, NULL, 10);
@@ -379,17 +379,18 @@
 	return count;
 }
 
-static SYSDEV_ATTR(oom_freed_kb, S_IWUSR| S_IRUGO,
+static DEVICE_ATTR(oom_freed_kb, S_IWUSR | S_IRUGO,
 		   show_oom_pages, store_oom_pages);
 
-static struct sysdev_attribute *cmm_attrs[] = {
-	&attr_loaned_kb,
-	&attr_loaned_target_kb,
-	&attr_oom_freed_kb,
+static struct device_attribute *cmm_attrs[] = {
+	&dev_attr_loaned_kb,
+	&dev_attr_loaned_target_kb,
+	&dev_attr_oom_freed_kb,
 };
 
-static struct sysdev_class cmm_sysdev_class = {
+static struct bus_type cmm_subsys = {
 	.name = "cmm",
+	.dev_name = "cmm",
 };
 
 /**
@@ -398,21 +399,21 @@
  * Return value:
  * 	0 on success / other on failure
  **/
-static int cmm_sysfs_register(struct sys_device *sysdev)
+static int cmm_sysfs_register(struct device *dev)
 {
 	int i, rc;
 
-	if ((rc = sysdev_class_register(&cmm_sysdev_class)))
+	if ((rc = subsys_system_register(&cmm_subsys, NULL)))
 		return rc;
 
-	sysdev->id = 0;
-	sysdev->cls = &cmm_sysdev_class;
+	dev->id = 0;
+	dev->bus = &cmm_subsys;
 
-	if ((rc = sysdev_register(sysdev)))
-		goto class_unregister;
+	if ((rc = device_register(dev)))
+		goto subsys_unregister;
 
 	for (i = 0; i < ARRAY_SIZE(cmm_attrs); i++) {
-		if ((rc = sysdev_create_file(sysdev, cmm_attrs[i])))
+		if ((rc = device_create_file(dev, cmm_attrs[i])))
 			goto fail;
 	}
 
@@ -420,10 +421,10 @@
 
 fail:
 	while (--i >= 0)
-		sysdev_remove_file(sysdev, cmm_attrs[i]);
-	sysdev_unregister(sysdev);
-class_unregister:
-	sysdev_class_unregister(&cmm_sysdev_class);
+		device_remove_file(dev, cmm_attrs[i]);
+	device_unregister(dev);
+subsys_unregister:
+	bus_unregister(&cmm_subsys);
 	return rc;
 }
 
@@ -431,14 +432,14 @@
  * cmm_unregister_sysfs - Unregister from sysfs
  *
  **/
-static void cmm_unregister_sysfs(struct sys_device *sysdev)
+static void cmm_unregister_sysfs(struct device *dev)
 {
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(cmm_attrs); i++)
-		sysdev_remove_file(sysdev, cmm_attrs[i]);
-	sysdev_unregister(sysdev);
-	sysdev_class_unregister(&cmm_sysdev_class);
+		device_remove_file(dev, cmm_attrs[i]);
+	device_unregister(dev);
+	bus_unregister(&cmm_subsys);
 }
 
 /**
@@ -657,7 +658,7 @@
 	if ((rc = register_reboot_notifier(&cmm_reboot_nb)))
 		goto out_oom_notifier;
 
-	if ((rc = cmm_sysfs_register(&cmm_sysdev)))
+	if ((rc = cmm_sysfs_register(&cmm_dev)))
 		goto out_reboot_notifier;
 
 	if (register_memory_notifier(&cmm_mem_nb) ||
@@ -678,7 +679,7 @@
 out_unregister_notifier:
 	unregister_memory_notifier(&cmm_mem_nb);
 	unregister_memory_isolate_notifier(&cmm_mem_isolate_nb);
-	cmm_unregister_sysfs(&cmm_sysdev);
+	cmm_unregister_sysfs(&cmm_dev);
 out_reboot_notifier:
 	unregister_reboot_notifier(&cmm_reboot_nb);
 out_oom_notifier:
@@ -701,7 +702,7 @@
 	unregister_memory_notifier(&cmm_mem_nb);
 	unregister_memory_isolate_notifier(&cmm_mem_isolate_nb);
 	cmm_free_pages(loaned_pages);
-	cmm_unregister_sysfs(&cmm_sysdev);
+	cmm_unregister_sysfs(&cmm_dev);
 }
 
 /**
diff --git a/arch/powerpc/platforms/pseries/hvCall_inst.c b/arch/powerpc/platforms/pseries/hvCall_inst.c
index f106662..c9311cf 100644
--- a/arch/powerpc/platforms/pseries/hvCall_inst.c
+++ b/arch/powerpc/platforms/pseries/hvCall_inst.c
@@ -109,7 +109,7 @@
 	if (opcode > MAX_HCALL_OPCODE)
 		return;
 
-	h = &get_cpu_var(hcall_stats)[opcode / 4];
+	h = &__get_cpu_var(hcall_stats)[opcode / 4];
 	h->tb_start = mftb();
 	h->purr_start = mfspr(SPRN_PURR);
 }
@@ -126,8 +126,6 @@
 	h->num_calls++;
 	h->tb_total += mftb() - h->tb_start;
 	h->purr_total += mfspr(SPRN_PURR) - h->purr_start;
-
-	put_cpu_var(hcall_stats);
 }
 
 static int __init hcall_inst_init(void)
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index b719d97..c442f2b 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -52,13 +52,42 @@
 #include "plpar_wrappers.h"
 
 
+static void tce_invalidate_pSeries_sw(struct iommu_table *tbl,
+				      u64 *startp, u64 *endp)
+{
+	u64 __iomem *invalidate = (u64 __iomem *)tbl->it_index;
+	unsigned long start, end, inc;
+
+	start = __pa(startp);
+	end = __pa(endp);
+	inc = L1_CACHE_BYTES; /* invalidate a cacheline of TCEs at a time */
+
+	/* If this is non-zero, change the format.  We shift the
+	 * address and or in the magic from the device tree. */
+	if (tbl->it_busno) {
+		start <<= 12;
+		end <<= 12;
+		inc <<= 12;
+		start |= tbl->it_busno;
+		end |= tbl->it_busno;
+	}
+
+	end |= inc - 1; /* round up end to be different than start */
+
+	mb(); /* Make sure TCEs in memory are written */
+	while (start <= end) {
+		out_be64(invalidate, start);
+		start += inc;
+	}
+}
+
 static int tce_build_pSeries(struct iommu_table *tbl, long index,
 			      long npages, unsigned long uaddr,
 			      enum dma_data_direction direction,
 			      struct dma_attrs *attrs)
 {
 	u64 proto_tce;
-	u64 *tcep;
+	u64 *tcep, *tces;
 	u64 rpn;
 
 	proto_tce = TCE_PCI_READ; // Read allowed
@@ -66,7 +95,7 @@
 	if (direction != DMA_TO_DEVICE)
 		proto_tce |= TCE_PCI_WRITE;
 
-	tcep = ((u64 *)tbl->it_base) + index;
+	tces = tcep = ((u64 *)tbl->it_base) + index;
 
 	while (npages--) {
 		/* can't move this out since we might cross MEMBLOCK boundary */
@@ -76,18 +105,24 @@
 		uaddr += TCE_PAGE_SIZE;
 		tcep++;
 	}
+
+	if (tbl->it_type == TCE_PCI_SWINV_CREATE)
+		tce_invalidate_pSeries_sw(tbl, tces, tcep - 1);
 	return 0;
 }
 
 
 static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages)
 {
-	u64 *tcep;
+	u64 *tcep, *tces;
 
-	tcep = ((u64 *)tbl->it_base) + index;
+	tces = tcep = ((u64 *)tbl->it_base) + index;
 
 	while (npages--)
 		*(tcep++) = 0;
+
+	if (tbl->it_type == TCE_PCI_SWINV_FREE)
+		tce_invalidate_pSeries_sw(tbl, tces, tcep - 1);
 }
 
 static unsigned long tce_get_pseries(struct iommu_table *tbl, long index)
@@ -425,7 +460,7 @@
 				 struct iommu_table *tbl)
 {
 	struct device_node *node;
-	const unsigned long *basep;
+	const unsigned long *basep, *sw_inval;
 	const u32 *sizep;
 
 	node = phb->dn;
@@ -462,6 +497,22 @@
 	tbl->it_index = 0;
 	tbl->it_blocksize = 16;
 	tbl->it_type = TCE_PCI;
+
+	sw_inval = of_get_property(node, "linux,tce-sw-invalidate-info", NULL);
+	if (sw_inval) {
+		/*
+		 * This property contains information on how to
+		 * invalidate the TCE entry.  The first property is
+		 * the base MMIO address used to invalidate entries.
+		 * The second property tells us the format of the TCE
+		 * invalidate (whether it needs to be shifted) and
+		 * some magic routing info to add to our invalidate
+		 * command.
+		 */
+		tbl->it_index = (unsigned long) ioremap(sw_inval[0], 8);
+		tbl->it_busno = sw_inval[1]; /* overload this with magic */
+		tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE;
+	}
 }
 
 /*
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 27a4950..948e0e3 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -554,7 +554,10 @@
 		goto out;
 
 	(*depth)++;
+	preempt_disable();
 	trace_hcall_entry(opcode, args);
+	if (opcode == H_CEDE)
+		rcu_idle_enter();
 	(*depth)--;
 
 out:
@@ -575,7 +578,10 @@
 		goto out;
 
 	(*depth)++;
+	if (opcode == H_CEDE)
+		rcu_idle_exit();
 	trace_hcall_exit(opcode, retval, retbuf);
+	preempt_enable();
 	(*depth)--;
 
 out:
diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c
index a76b228..330a57b 100644
--- a/arch/powerpc/platforms/pseries/nvram.c
+++ b/arch/powerpc/platforms/pseries/nvram.c
@@ -625,6 +625,8 @@
 {
 	static unsigned int oops_count = 0;
 	static bool panicking = false;
+	static DEFINE_SPINLOCK(lock);
+	unsigned long flags;
 	size_t text_len;
 	unsigned int err_type = ERR_TYPE_KERNEL_PANIC_GZ;
 	int rc = -1;
@@ -655,6 +657,9 @@
 	if (clobbering_unread_rtas_event())
 		return;
 
+	if (!spin_trylock_irqsave(&lock, flags))
+		return;
+
 	if (big_oops_buf) {
 		text_len = capture_last_msgs(old_msgs, old_len,
 			new_msgs, new_len, big_oops_buf, big_oops_buf_sz);
@@ -670,4 +675,6 @@
 
 	(void) nvram_write_os_partition(&oops_log_partition, oops_buf,
 		(int) (sizeof(*oops_len) + *oops_len), err_type, ++oops_count);
+
+	spin_unlock_irqrestore(&lock, flags);
 }
diff --git a/arch/powerpc/platforms/pseries/processor_idle.c b/arch/powerpc/platforms/pseries/processor_idle.c
new file mode 100644
index 0000000..085fd3f
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/processor_idle.c
@@ -0,0 +1,329 @@
+/*
+ *  processor_idle - idle state cpuidle driver.
+ *  Adapted from drivers/idle/intel_idle.c and
+ *  drivers/acpi/processor_idle.c
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/moduleparam.h>
+#include <linux/cpuidle.h>
+#include <linux/cpu.h>
+
+#include <asm/paca.h>
+#include <asm/reg.h>
+#include <asm/system.h>
+#include <asm/machdep.h>
+#include <asm/firmware.h>
+
+#include "plpar_wrappers.h"
+#include "pseries.h"
+
+struct cpuidle_driver pseries_idle_driver = {
+	.name =		"pseries_idle",
+	.owner =	THIS_MODULE,
+};
+
+#define MAX_IDLE_STATE_COUNT	2
+
+static int max_idle_state = MAX_IDLE_STATE_COUNT - 1;
+static struct cpuidle_device __percpu *pseries_cpuidle_devices;
+static struct cpuidle_state *cpuidle_state_table;
+
+void update_smt_snooze_delay(int snooze)
+{
+	struct cpuidle_driver *drv = cpuidle_get_driver();
+	if (drv)
+		drv->states[0].target_residency = snooze;
+}
+
+static inline void idle_loop_prolog(unsigned long *in_purr, ktime_t *kt_before)
+{
+
+	*kt_before = ktime_get_real();
+	*in_purr = mfspr(SPRN_PURR);
+	/*
+	 * Indicate to the HV that we are idle. Now would be
+	 * a good time to find other work to dispatch.
+	 */
+	get_lppaca()->idle = 1;
+}
+
+static inline  s64 idle_loop_epilog(unsigned long in_purr, ktime_t kt_before)
+{
+	get_lppaca()->wait_state_cycles += mfspr(SPRN_PURR) - in_purr;
+	get_lppaca()->idle = 0;
+
+	return ktime_to_us(ktime_sub(ktime_get_real(), kt_before));
+}
+
+static int snooze_loop(struct cpuidle_device *dev,
+			struct cpuidle_driver *drv,
+			int index)
+{
+	unsigned long in_purr;
+	ktime_t kt_before;
+	unsigned long start_snooze;
+	long snooze = drv->states[0].target_residency;
+
+	idle_loop_prolog(&in_purr, &kt_before);
+
+	if (snooze) {
+		start_snooze = get_tb() + snooze * tb_ticks_per_usec;
+		local_irq_enable();
+		set_thread_flag(TIF_POLLING_NRFLAG);
+
+		while ((snooze < 0) || (get_tb() < start_snooze)) {
+			if (need_resched() || cpu_is_offline(dev->cpu))
+				goto out;
+			ppc64_runlatch_off();
+			HMT_low();
+			HMT_very_low();
+		}
+
+		HMT_medium();
+		clear_thread_flag(TIF_POLLING_NRFLAG);
+		smp_mb();
+		local_irq_disable();
+	}
+
+out:
+	HMT_medium();
+	dev->last_residency =
+		(int)idle_loop_epilog(in_purr, kt_before);
+	return index;
+}
+
+static int dedicated_cede_loop(struct cpuidle_device *dev,
+				struct cpuidle_driver *drv,
+				int index)
+{
+	unsigned long in_purr;
+	ktime_t kt_before;
+
+	idle_loop_prolog(&in_purr, &kt_before);
+	get_lppaca()->donate_dedicated_cpu = 1;
+
+	ppc64_runlatch_off();
+	HMT_medium();
+	cede_processor();
+
+	get_lppaca()->donate_dedicated_cpu = 0;
+	dev->last_residency =
+		(int)idle_loop_epilog(in_purr, kt_before);
+	return index;
+}
+
+static int shared_cede_loop(struct cpuidle_device *dev,
+			struct cpuidle_driver *drv,
+			int index)
+{
+	unsigned long in_purr;
+	ktime_t kt_before;
+
+	idle_loop_prolog(&in_purr, &kt_before);
+
+	/*
+	 * Yield the processor to the hypervisor.  We return if
+	 * an external interrupt occurs (which are driven prior
+	 * to returning here) or if a prod occurs from another
+	 * processor. When returning here, external interrupts
+	 * are enabled.
+	 */
+	cede_processor();
+
+	dev->last_residency =
+		(int)idle_loop_epilog(in_purr, kt_before);
+	return index;
+}
+
+/*
+ * States for dedicated partition case.
+ */
+static struct cpuidle_state dedicated_states[MAX_IDLE_STATE_COUNT] = {
+	{ /* Snooze */
+		.name = "snooze",
+		.desc = "snooze",
+		.flags = CPUIDLE_FLAG_TIME_VALID,
+		.exit_latency = 0,
+		.target_residency = 0,
+		.enter = &snooze_loop },
+	{ /* CEDE */
+		.name = "CEDE",
+		.desc = "CEDE",
+		.flags = CPUIDLE_FLAG_TIME_VALID,
+		.exit_latency = 1,
+		.target_residency = 10,
+		.enter = &dedicated_cede_loop },
+};
+
+/*
+ * States for shared partition case.
+ */
+static struct cpuidle_state shared_states[MAX_IDLE_STATE_COUNT] = {
+	{ /* Shared Cede */
+		.name = "Shared Cede",
+		.desc = "Shared Cede",
+		.flags = CPUIDLE_FLAG_TIME_VALID,
+		.exit_latency = 0,
+		.target_residency = 0,
+		.enter = &shared_cede_loop },
+};
+
+int pseries_notify_cpuidle_add_cpu(int cpu)
+{
+	struct cpuidle_device *dev =
+			per_cpu_ptr(pseries_cpuidle_devices, cpu);
+	if (dev && cpuidle_get_driver()) {
+		cpuidle_disable_device(dev);
+		cpuidle_enable_device(dev);
+	}
+	return 0;
+}
+
+/*
+ * pseries_cpuidle_driver_init()
+ */
+static int pseries_cpuidle_driver_init(void)
+{
+	int idle_state;
+	struct cpuidle_driver *drv = &pseries_idle_driver;
+
+	drv->state_count = 0;
+
+	for (idle_state = 0; idle_state < MAX_IDLE_STATE_COUNT; ++idle_state) {
+
+		if (idle_state > max_idle_state)
+			break;
+
+		/* is the state not enabled? */
+		if (cpuidle_state_table[idle_state].enter == NULL)
+			continue;
+
+		drv->states[drv->state_count] =	/* structure copy */
+			cpuidle_state_table[idle_state];
+
+		if (cpuidle_state_table == dedicated_states)
+			drv->states[drv->state_count].target_residency =
+				__get_cpu_var(smt_snooze_delay);
+
+		drv->state_count += 1;
+	}
+
+	return 0;
+}
+
+/* pseries_idle_devices_uninit(void)
+ * unregister cpuidle devices and de-allocate memory
+ */
+static void pseries_idle_devices_uninit(void)
+{
+	int i;
+	struct cpuidle_device *dev;
+
+	for_each_possible_cpu(i) {
+		dev = per_cpu_ptr(pseries_cpuidle_devices, i);
+		cpuidle_unregister_device(dev);
+	}
+
+	free_percpu(pseries_cpuidle_devices);
+	return;
+}
+
+/* pseries_idle_devices_init()
+ * allocate, initialize and register cpuidle device
+ */
+static int pseries_idle_devices_init(void)
+{
+	int i;
+	struct cpuidle_driver *drv = &pseries_idle_driver;
+	struct cpuidle_device *dev;
+
+	pseries_cpuidle_devices = alloc_percpu(struct cpuidle_device);
+	if (pseries_cpuidle_devices == NULL)
+		return -ENOMEM;
+
+	for_each_possible_cpu(i) {
+		dev = per_cpu_ptr(pseries_cpuidle_devices, i);
+		dev->state_count = drv->state_count;
+		dev->cpu = i;
+		if (cpuidle_register_device(dev)) {
+			printk(KERN_DEBUG \
+				"cpuidle_register_device %d failed!\n", i);
+			return -EIO;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * pseries_idle_probe()
+ * Choose state table for shared versus dedicated partition
+ */
+static int pseries_idle_probe(void)
+{
+
+	if (!firmware_has_feature(FW_FEATURE_SPLPAR))
+		return -ENODEV;
+
+	if (cpuidle_disable != IDLE_NO_OVERRIDE)
+		return -ENODEV;
+
+	if (max_idle_state == 0) {
+		printk(KERN_DEBUG "pseries processor idle disabled.\n");
+		return -EPERM;
+	}
+
+	if (get_lppaca()->shared_proc)
+		cpuidle_state_table = shared_states;
+	else
+		cpuidle_state_table = dedicated_states;
+
+	return 0;
+}
+
+static int __init pseries_processor_idle_init(void)
+{
+	int retval;
+
+	retval = pseries_idle_probe();
+	if (retval)
+		return retval;
+
+	pseries_cpuidle_driver_init();
+	retval = cpuidle_register_driver(&pseries_idle_driver);
+	if (retval) {
+		printk(KERN_DEBUG "Registration of pseries driver failed.\n");
+		return retval;
+	}
+
+	retval = pseries_idle_devices_init();
+	if (retval) {
+		pseries_idle_devices_uninit();
+		cpuidle_unregister_driver(&pseries_idle_driver);
+		return retval;
+	}
+
+	printk(KERN_DEBUG "pseries_idle_driver registered\n");
+
+	return 0;
+}
+
+static void __exit pseries_processor_idle_exit(void)
+{
+
+	pseries_idle_devices_uninit();
+	cpuidle_unregister_driver(&pseries_idle_driver);
+
+	return;
+}
+
+module_init(pseries_processor_idle_init);
+module_exit(pseries_processor_idle_exit);
+
+MODULE_AUTHOR("Deepthi Dharwar <deepthi@linux.vnet.ibm.com>");
+MODULE_DESCRIPTION("Cpuidle driver for POWER");
+MODULE_LICENSE("GPL");
diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h
index 24c7162..9a3dda0 100644
--- a/arch/powerpc/platforms/pseries/pseries.h
+++ b/arch/powerpc/platforms/pseries/pseries.h
@@ -57,4 +57,7 @@
 extern int dlpar_attach_node(struct device_node *);
 extern int dlpar_detach_node(struct device_node *);
 
+/* Snooze Delay, pseries_idle */
+DECLARE_PER_CPU(long, smt_snooze_delay);
+
 #endif /* _PSERIES_PSERIES_H */
diff --git a/arch/powerpc/platforms/pseries/pseries_energy.c b/arch/powerpc/platforms/pseries/pseries_energy.c
index c8b3c69..af281dc 100644
--- a/arch/powerpc/platforms/pseries/pseries_energy.c
+++ b/arch/powerpc/platforms/pseries/pseries_energy.c
@@ -15,7 +15,7 @@
 #include <linux/errno.h>
 #include <linux/init.h>
 #include <linux/seq_file.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/cpu.h>
 #include <linux/of.h>
 #include <asm/cputhreads.h>
@@ -184,7 +184,7 @@
 	return s-page;
 }
 
-static ssize_t get_best_energy_data(struct sys_device *dev,
+static ssize_t get_best_energy_data(struct device *dev,
 					char *page, int activate)
 {
 	int rc;
@@ -207,26 +207,26 @@
 
 /* Wrapper functions */
 
-static ssize_t cpu_activate_hint_list_show(struct sysdev_class *class,
-			struct sysdev_class_attribute *attr, char *page)
+static ssize_t cpu_activate_hint_list_show(struct device *dev,
+			struct device_attribute *attr, char *page)
 {
 	return get_best_energy_list(page, 1);
 }
 
-static ssize_t cpu_deactivate_hint_list_show(struct sysdev_class *class,
-			struct sysdev_class_attribute *attr, char *page)
+static ssize_t cpu_deactivate_hint_list_show(struct device *dev,
+			struct device_attribute *attr, char *page)
 {
 	return get_best_energy_list(page, 0);
 }
 
-static ssize_t percpu_activate_hint_show(struct sys_device *dev,
-			struct sysdev_attribute *attr, char *page)
+static ssize_t percpu_activate_hint_show(struct device *dev,
+			struct device_attribute *attr, char *page)
 {
 	return get_best_energy_data(dev, page, 1);
 }
 
-static ssize_t percpu_deactivate_hint_show(struct sys_device *dev,
-			struct sysdev_attribute *attr, char *page)
+static ssize_t percpu_deactivate_hint_show(struct device *dev,
+			struct device_attribute *attr, char *page)
 {
 	return get_best_energy_data(dev, page, 0);
 }
@@ -241,48 +241,48 @@
  *	Per-cpu value of the hint
  */
 
-struct sysdev_class_attribute attr_cpu_activate_hint_list =
-		_SYSDEV_CLASS_ATTR(pseries_activate_hint_list, 0444,
+struct device_attribute attr_cpu_activate_hint_list =
+		__ATTR(pseries_activate_hint_list, 0444,
 		cpu_activate_hint_list_show, NULL);
 
-struct sysdev_class_attribute attr_cpu_deactivate_hint_list =
-		_SYSDEV_CLASS_ATTR(pseries_deactivate_hint_list, 0444,
+struct device_attribute attr_cpu_deactivate_hint_list =
+		__ATTR(pseries_deactivate_hint_list, 0444,
 		cpu_deactivate_hint_list_show, NULL);
 
-struct sysdev_attribute attr_percpu_activate_hint =
-		_SYSDEV_ATTR(pseries_activate_hint, 0444,
+struct device_attribute attr_percpu_activate_hint =
+		__ATTR(pseries_activate_hint, 0444,
 		percpu_activate_hint_show, NULL);
 
-struct sysdev_attribute attr_percpu_deactivate_hint =
-		_SYSDEV_ATTR(pseries_deactivate_hint, 0444,
+struct device_attribute attr_percpu_deactivate_hint =
+		__ATTR(pseries_deactivate_hint, 0444,
 		percpu_deactivate_hint_show, NULL);
 
 static int __init pseries_energy_init(void)
 {
 	int cpu, err;
-	struct sys_device *cpu_sys_dev;
+	struct device *cpu_dev;
 
 	if (!check_for_h_best_energy()) {
 		printk(KERN_INFO "Hypercall H_BEST_ENERGY not supported\n");
 		return 0;
 	}
 	/* Create the sysfs files */
-	err = sysfs_create_file(&cpu_sysdev_class.kset.kobj,
-				&attr_cpu_activate_hint_list.attr);
+	err = device_create_file(cpu_subsys.dev_root,
+				&attr_cpu_activate_hint_list);
 	if (!err)
-		err = sysfs_create_file(&cpu_sysdev_class.kset.kobj,
-				&attr_cpu_deactivate_hint_list.attr);
+		err = device_create_file(cpu_subsys.dev_root,
+				&attr_cpu_deactivate_hint_list);
 
 	if (err)
 		return err;
 	for_each_possible_cpu(cpu) {
-		cpu_sys_dev = get_cpu_sysdev(cpu);
-		err = sysfs_create_file(&cpu_sys_dev->kobj,
-				&attr_percpu_activate_hint.attr);
+		cpu_dev = get_cpu_device(cpu);
+		err = device_create_file(cpu_dev,
+				&attr_percpu_activate_hint);
 		if (err)
 			break;
-		err = sysfs_create_file(&cpu_sys_dev->kobj,
-				&attr_percpu_deactivate_hint.attr);
+		err = device_create_file(cpu_dev,
+				&attr_percpu_deactivate_hint);
 		if (err)
 			break;
 	}
@@ -298,23 +298,20 @@
 static void __exit pseries_energy_cleanup(void)
 {
 	int cpu;
-	struct sys_device *cpu_sys_dev;
+	struct device *cpu_dev;
 
 	if (!sysfs_entries)
 		return;
 
 	/* Remove the sysfs files */
-	sysfs_remove_file(&cpu_sysdev_class.kset.kobj,
-				&attr_cpu_activate_hint_list.attr);
-
-	sysfs_remove_file(&cpu_sysdev_class.kset.kobj,
-				&attr_cpu_deactivate_hint_list.attr);
+	device_remove_file(cpu_subsys.dev_root, &attr_cpu_activate_hint_list);
+	device_remove_file(cpu_subsys.dev_root, &attr_cpu_deactivate_hint_list);
 
 	for_each_possible_cpu(cpu) {
-		cpu_sys_dev = get_cpu_sysdev(cpu);
-		sysfs_remove_file(&cpu_sys_dev->kobj,
+		cpu_dev = get_cpu_device(cpu);
+		sysfs_remove_file(&cpu_dev->kobj,
 				&attr_percpu_activate_hint.attr);
-		sysfs_remove_file(&cpu_sys_dev->kobj,
+		sysfs_remove_file(&cpu_dev->kobj,
 				&attr_percpu_deactivate_hint.attr);
 	}
 }
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index c3408ca..f79f127 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -39,6 +39,7 @@
 #include <linux/irq.h>
 #include <linux/seq_file.h>
 #include <linux/root_dev.h>
+#include <linux/cpuidle.h>
 
 #include <asm/mmu.h>
 #include <asm/processor.h>
@@ -74,9 +75,6 @@
 
 int fwnmi_active;  /* TRUE if an FWNMI handler is present */
 
-static void pseries_shared_idle_sleep(void);
-static void pseries_dedicated_idle_sleep(void);
-
 static struct device_node *pSeries_mpic_node;
 
 static void pSeries_show_cpuinfo(struct seq_file *m)
@@ -192,8 +190,7 @@
 	BUG_ON(openpic_addr == 0);
 
 	/* Setup the openpic driver */
-	mpic = mpic_alloc(pSeries_mpic_node, openpic_addr,
-			  MPIC_PRIMARY,
+	mpic = mpic_alloc(pSeries_mpic_node, openpic_addr, 0,
 			  16, 250, /* isu size, irq count */
 			  " MPIC     ");
 	BUG_ON(mpic == NULL);
@@ -352,8 +349,25 @@
 }
 early_initcall(alloc_dispatch_log_kmem_cache);
 
+static void pSeries_idle(void)
+{
+	/* This would call on the cpuidle framework, and the back-end pseries
+	 * driver to  go to idle states
+	 */
+	if (cpuidle_idle_call()) {
+		/* On error, execute default handler
+		 * to go into low thread priority and possibly
+		 * low power mode.
+		 */
+		HMT_low();
+		HMT_very_low();
+	}
+}
+
 static void __init pSeries_setup_arch(void)
 {
+	panic_timeout = 10;
+
 	/* Discover PIC type and setup ppc_md accordingly */
 	pseries_discover_pic();
 
@@ -374,18 +388,9 @@
 
 	pSeries_nvram_init();
 
-	/* Choose an idle loop */
 	if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
 		vpa_init(boot_cpuid);
-		if (get_lppaca()->shared_proc) {
-			printk(KERN_DEBUG "Using shared processor idle loop\n");
-			ppc_md.power_save = pseries_shared_idle_sleep;
-		} else {
-			printk(KERN_DEBUG "Using dedicated idle loop\n");
-			ppc_md.power_save = pseries_dedicated_idle_sleep;
-		}
-	} else {
-		printk(KERN_DEBUG "Using default idle loop\n");
+		ppc_md.power_save = pSeries_idle;
 	}
 
 	if (firmware_has_feature(FW_FEATURE_LPAR))
@@ -586,80 +591,6 @@
 	return 1;
 }
 
-
-DECLARE_PER_CPU(long, smt_snooze_delay);
-
-static void pseries_dedicated_idle_sleep(void)
-{ 
-	unsigned int cpu = smp_processor_id();
-	unsigned long start_snooze;
-	unsigned long in_purr, out_purr;
-	long snooze = __get_cpu_var(smt_snooze_delay);
-
-	/*
-	 * Indicate to the HV that we are idle. Now would be
-	 * a good time to find other work to dispatch.
-	 */
-	get_lppaca()->idle = 1;
-	get_lppaca()->donate_dedicated_cpu = 1;
-	in_purr = mfspr(SPRN_PURR);
-
-	/*
-	 * We come in with interrupts disabled, and need_resched()
-	 * has been checked recently.  If we should poll for a little
-	 * while, do so.
-	 */
-	if (snooze) {
-		start_snooze = get_tb() + snooze * tb_ticks_per_usec;
-		local_irq_enable();
-		set_thread_flag(TIF_POLLING_NRFLAG);
-
-		while ((snooze < 0) || (get_tb() < start_snooze)) {
-			if (need_resched() || cpu_is_offline(cpu))
-				goto out;
-			ppc64_runlatch_off();
-			HMT_low();
-			HMT_very_low();
-		}
-
-		HMT_medium();
-		clear_thread_flag(TIF_POLLING_NRFLAG);
-		smp_mb();
-		local_irq_disable();
-		if (need_resched() || cpu_is_offline(cpu))
-			goto out;
-	}
-
-	cede_processor();
-
-out:
-	HMT_medium();
-	out_purr = mfspr(SPRN_PURR);
-	get_lppaca()->wait_state_cycles += out_purr - in_purr;
-	get_lppaca()->donate_dedicated_cpu = 0;
-	get_lppaca()->idle = 0;
-}
-
-static void pseries_shared_idle_sleep(void)
-{
-	/*
-	 * Indicate to the HV that we are idle. Now would be
-	 * a good time to find other work to dispatch.
-	 */
-	get_lppaca()->idle = 1;
-
-	/*
-	 * Yield the processor to the hypervisor.  We return if
-	 * an external interrupt occurs (which are driven prior
-	 * to returning here) or if a prod occurs from another
-	 * processor. When returning here, external interrupts
-	 * are enabled.
-	 */
-	cede_processor();
-
-	get_lppaca()->idle = 0;
-}
-
 static int pSeries_pci_probe_mode(struct pci_bus *bus)
 {
 	if (firmware_has_feature(FW_FEATURE_LPAR))
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index 26e93fd..eadba95 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -22,7 +22,7 @@
 #include <linux/spinlock.h>
 #include <linux/cache.h>
 #include <linux/err.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/cpu.h>
 
 #include <asm/ptrace.h>
@@ -148,6 +148,7 @@
 	set_cpu_current_state(cpu, CPU_STATE_ONLINE);
 	set_default_offline_state(cpu);
 #endif
+	pseries_notify_cpuidle_add_cpu(cpu);
 }
 
 static int __devinit smp_pSeries_kick_cpu(int nr)
diff --git a/arch/powerpc/platforms/pseries/suspend.c b/arch/powerpc/platforms/pseries/suspend.c
index d3de084..b84a8b2 100644
--- a/arch/powerpc/platforms/pseries/suspend.c
+++ b/arch/powerpc/platforms/pseries/suspend.c
@@ -26,7 +26,7 @@
 #include <asm/rtas.h>
 
 static u64 stream_id;
-static struct sys_device suspend_sysdev;
+static struct device suspend_dev;
 static DECLARE_COMPLETION(suspend_work);
 static struct rtas_suspend_me_data suspend_data;
 static atomic_t suspending;
@@ -110,8 +110,8 @@
 
 /**
  * store_hibernate - Initiate partition hibernation
- * @classdev:	sysdev class struct
- * @attr:		class device attribute struct
+ * @dev:		subsys root device
+ * @attr:		device attribute struct
  * @buf:		buffer
  * @count:		buffer size
  *
@@ -121,8 +121,8 @@
  * Return value:
  * 	number of bytes printed to buffer / other on failure
  **/
-static ssize_t store_hibernate(struct sysdev_class *classdev,
-			       struct sysdev_class_attribute *attr,
+static ssize_t store_hibernate(struct device *dev,
+			       struct device_attribute *attr,
 			       const char *buf, size_t count)
 {
 	int rc;
@@ -148,10 +148,11 @@
 	return rc;
 }
 
-static SYSDEV_CLASS_ATTR(hibernate, S_IWUSR, NULL, store_hibernate);
+static DEVICE_ATTR(hibernate, S_IWUSR, NULL, store_hibernate);
 
-static struct sysdev_class suspend_sysdev_class = {
+static struct bus_type suspend_subsys = {
 	.name = "power",
+	.dev_name = "power",
 };
 
 static const struct platform_suspend_ops pseries_suspend_ops = {
@@ -167,23 +168,23 @@
  * Return value:
  * 	0 on success / other on failure
  **/
-static int pseries_suspend_sysfs_register(struct sys_device *sysdev)
+static int pseries_suspend_sysfs_register(struct device *dev)
 {
 	int rc;
 
-	if ((rc = sysdev_class_register(&suspend_sysdev_class)))
+	if ((rc = subsys_system_register(&suspend_subsys, NULL)))
 		return rc;
 
-	sysdev->id = 0;
-	sysdev->cls = &suspend_sysdev_class;
+	dev->id = 0;
+	dev->bus = &suspend_subsys;
 
-	if ((rc = sysdev_class_create_file(&suspend_sysdev_class, &attr_hibernate)))
-		goto class_unregister;
+	if ((rc = device_create_file(suspend_subsys.dev_root, &dev_attr_hibernate)))
+		goto subsys_unregister;
 
 	return 0;
 
-class_unregister:
-	sysdev_class_unregister(&suspend_sysdev_class);
+subsys_unregister:
+	bus_unregister(&suspend_subsys);
 	return rc;
 }
 
@@ -204,7 +205,7 @@
 	if (suspend_data.token == RTAS_UNKNOWN_SERVICE)
 		return 0;
 
-	if ((rc = pseries_suspend_sysfs_register(&suspend_sysdev)))
+	if ((rc = pseries_suspend_sysfs_register(&suspend_dev)))
 		return rc;
 
 	ppc_md.suspend_disable_cpu = pseries_suspend_cpu;
diff --git a/arch/powerpc/platforms/wsp/Kconfig b/arch/powerpc/platforms/wsp/Kconfig
index bd560c7..57d22a2 100644
--- a/arch/powerpc/platforms/wsp/Kconfig
+++ b/arch/powerpc/platforms/wsp/Kconfig
@@ -1,20 +1,28 @@
 config PPC_WSP
 	bool
 	select PPC_A2
+	select GENERIC_TBSYNC
+	select PPC_ICSWX
 	select PPC_SCOM
 	select PPC_XICS
 	select PPC_ICP_NATIVE
 	select PCI
 	select PPC_IO_WORKAROUNDS if PCI
 	select PPC_INDIRECT_PIO if PCI
+	select PPC_WSP_COPRO
 	default n
 
 menu "WSP platform selection"
 	depends on PPC_BOOK3E_64
 
 config PPC_PSR2
-	bool "PSR-2 platform"
-	select GENERIC_TBSYNC
+	bool "PowerEN System Reference Platform 2"
+	select EPAPR_BOOT
+	select PPC_WSP
+	default y
+
+config PPC_CHROMA
+	bool "PowerEN PCIe Chroma Card"
 	select EPAPR_BOOT
 	select PPC_WSP
 	default y
diff --git a/arch/powerpc/platforms/wsp/Makefile b/arch/powerpc/platforms/wsp/Makefile
index a1486b4..56817ac 100644
--- a/arch/powerpc/platforms/wsp/Makefile
+++ b/arch/powerpc/platforms/wsp/Makefile
@@ -1,8 +1,10 @@
 ccflags-y			+= -mno-minimal-toc
 
-obj-y				+= setup.o ics.o
-obj-$(CONFIG_PPC_PSR2)		+= psr2.o opb_pic.o
+obj-y				+= setup.o ics.o wsp.o
+obj-$(CONFIG_PPC_PSR2)		+= psr2.o
+obj-$(CONFIG_PPC_CHROMA)	+= chroma.o h8.o
+obj-$(CONFIG_PPC_WSP)		+= opb_pic.o
 obj-$(CONFIG_PPC_WSP)		+= scom_wsp.o
 obj-$(CONFIG_SMP)		+= smp.o scom_smp.o
 obj-$(CONFIG_PCI)		+= wsp_pci.o
-obj-$(CONFIG_PCI_MSI)		+= msi.o
\ No newline at end of file
+obj-$(CONFIG_PCI_MSI)		+= msi.o
diff --git a/arch/powerpc/platforms/wsp/chroma.c b/arch/powerpc/platforms/wsp/chroma.c
new file mode 100644
index 0000000..ca6fa26
--- /dev/null
+++ b/arch/powerpc/platforms/wsp/chroma.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2008-2011, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/of.h>
+#include <linux/smp.h>
+#include <linux/time.h>
+
+#include <asm/machdep.h>
+#include <asm/system.h>
+#include <asm/udbg.h>
+
+#include "ics.h"
+#include "wsp.h"
+
+void __init chroma_setup_arch(void)
+{
+	wsp_setup_arch();
+	wsp_setup_h8();
+
+}
+
+static int __init chroma_probe(void)
+{
+	unsigned long root = of_get_flat_dt_root();
+
+	if (!of_flat_dt_is_compatible(root, "ibm,wsp-chroma"))
+		return 0;
+
+	return 1;
+}
+
+define_machine(chroma_md) {
+	.name			= "Chroma PCIe",
+	.probe			= chroma_probe,
+	.setup_arch		= chroma_setup_arch,
+	.restart		= wsp_h8_restart,
+	.power_off		= wsp_h8_power_off,
+	.halt			= wsp_halt,
+	.calibrate_decr		= generic_calibrate_decr,
+	.init_IRQ		= wsp_setup_irq,
+	.progress		= udbg_progress,
+	.power_save		= book3e_idle,
+};
+
+machine_arch_initcall(chroma_md, wsp_probe_devices);
diff --git a/arch/powerpc/platforms/wsp/h8.c b/arch/powerpc/platforms/wsp/h8.c
new file mode 100644
index 0000000..d18e6cc
--- /dev/null
+++ b/arch/powerpc/platforms/wsp/h8.c
@@ -0,0 +1,134 @@
+/*
+ * Copyright 2008-2011, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/io.h>
+
+#include "wsp.h"
+
+/*
+ * The UART connection to the H8 is over ttyS1 which is just a 16550.
+ * We assume that FW has it setup right and no one messes with it.
+ */
+
+
+static u8 __iomem *h8;
+
+#define RBR 0		/* Receiver Buffer Register */
+#define THR 0		/* Transmitter Holding Register */
+#define LSR 5		/* Line Status Register */
+#define LSR_DR 0x01	/* LSR value for Data-Ready */
+#define LSR_THRE 0x20	/* LSR value for Transmitter-Holding-Register-Empty */
+static void wsp_h8_putc(int c)
+{
+	u8 lsr;
+
+	do {
+		lsr = readb(h8 + LSR);
+	} while ((lsr & LSR_THRE) != LSR_THRE);
+	writeb(c, h8 + THR);
+}
+
+static int wsp_h8_getc(void)
+{
+	u8 lsr;
+
+	do {
+		lsr = readb(h8 + LSR);
+	} while ((lsr & LSR_DR) != LSR_DR);
+
+	return readb(h8 + RBR);
+}
+
+static void wsp_h8_puts(const char *s, int sz)
+{
+	int i;
+
+	for (i = 0; i < sz; i++) {
+		wsp_h8_putc(s[i]);
+
+		/* no flow control so wait for echo */
+		wsp_h8_getc();
+	}
+	wsp_h8_putc('\r');
+	wsp_h8_putc('\n');
+}
+
+static void wsp_h8_terminal_cmd(const char *cmd, int sz)
+{
+	hard_irq_disable();
+	wsp_h8_puts(cmd, sz);
+	/* should never return, but just in case */
+	for (;;)
+		continue;
+}
+
+
+void wsp_h8_restart(char *cmd)
+{
+	static const char restart[] = "warm-reset";
+
+	(void)cmd;
+	wsp_h8_terminal_cmd(restart, sizeof(restart) - 1);
+}
+
+void wsp_h8_power_off(void)
+{
+	static const char off[] = "power-off";
+
+	wsp_h8_terminal_cmd(off, sizeof(off) - 1);
+}
+
+static void __iomem *wsp_h8_getaddr(void)
+{
+	struct device_node *aliases;
+	struct device_node *uart;
+	struct property *path;
+	void __iomem *va = NULL;
+
+	/*
+	 * there is nothing in the devtree to tell us which is mapped
+	 * to the H8, but se know it is the second serial port.
+	 */
+
+	aliases = of_find_node_by_path("/aliases");
+	if (aliases == NULL)
+		return NULL;
+
+	path = of_find_property(aliases, "serial1", NULL);
+	if (path == NULL)
+		goto out;
+
+	uart = of_find_node_by_path(path->value);
+	if (uart == NULL)
+		goto out;
+
+	va = of_iomap(uart, 0);
+
+	/* remove it so no one messes with it */
+	of_detach_node(uart);
+	of_node_put(uart);
+
+out:
+	of_node_put(aliases);
+
+	return va;
+}
+
+void __init wsp_setup_h8(void)
+{
+	h8 = wsp_h8_getaddr();
+
+	/* Devtree change? lets hard map it anyway */
+	if (h8 == NULL) {
+		pr_warn("UART to H8 could not be found");
+		h8 = ioremap(0xffc0008000ULL, 0x100);
+	}
+}
diff --git a/arch/powerpc/platforms/wsp/opb_pic.c b/arch/powerpc/platforms/wsp/opb_pic.c
index be05631..19f353d 100644
--- a/arch/powerpc/platforms/wsp/opb_pic.c
+++ b/arch/powerpc/platforms/wsp/opb_pic.c
@@ -320,7 +320,8 @@
 		}
 
 		/* Attach opb interrupt handler to new virtual IRQ */
-		rc = request_irq(virq, opb_irq_handler, 0, "OPB LS Cascade", opb);
+		rc = request_irq(virq, opb_irq_handler, IRQF_NO_THREAD,
+				 "OPB LS Cascade", opb);
 		if (rc) {
 			printk("opb: request_irq failed: %d\n", rc);
 			continue;
diff --git a/arch/powerpc/platforms/wsp/psr2.c b/arch/powerpc/platforms/wsp/psr2.c
index 166f2e4..0c1ae06 100644
--- a/arch/powerpc/platforms/wsp/psr2.c
+++ b/arch/powerpc/platforms/wsp/psr2.c
@@ -14,10 +14,10 @@
 #include <linux/mm.h>
 #include <linux/of.h>
 #include <linux/smp.h>
+#include <linux/time.h>
 
 #include <asm/machdep.h>
 #include <asm/system.h>
-#include <asm/time.h>
 #include <asm/udbg.h>
 
 #include "ics.h"
@@ -27,7 +27,8 @@
 static void psr2_spin(void)
 {
 	hard_irq_disable();
-	for (;;) ;
+	for (;;)
+		continue;
 }
 
 static void psr2_restart(char *cmd)
@@ -35,65 +36,32 @@
 	psr2_spin();
 }
 
-static int psr2_probe_devices(void)
-{
-	struct device_node *np;
-
-	/* Our RTC is a ds1500. It seems to be programatically compatible
-	 * with the ds1511 for which we have a driver so let's use that
-	 */
-	np = of_find_compatible_node(NULL, NULL, "dallas,ds1500");
-	if (np != NULL) {
-		struct resource res;
-		if (of_address_to_resource(np, 0, &res) == 0)
-			platform_device_register_simple("ds1511", 0, &res, 1);
-	}
-	return 0;
-}
-machine_arch_initcall(psr2_md, psr2_probe_devices);
-
-static void __init psr2_setup_arch(void)
-{
-	/* init to some ~sane value until calibrate_delay() runs */
-	loops_per_jiffy = 50000000;
-
-	scom_init_wsp();
-
-	/* Setup SMP callback */
-#ifdef CONFIG_SMP
-	a2_setup_smp();
-#endif
-#ifdef CONFIG_PCI
-	wsp_setup_pci();
-#endif
-
-}
-
 static int __init psr2_probe(void)
 {
 	unsigned long root = of_get_flat_dt_root();
 
+	if (of_flat_dt_is_compatible(root, "ibm,wsp-chroma")) {
+		/* chroma systems also claim they are psr2s */
+		return 0;
+	}
+
 	if (!of_flat_dt_is_compatible(root, "ibm,psr2"))
 		return 0;
 
 	return 1;
 }
 
-static void __init psr2_init_irq(void)
-{
-	wsp_init_irq();
-	opb_pic_init();
-}
-
 define_machine(psr2_md) {
 	.name			= "PSR2 A2",
 	.probe			= psr2_probe,
-	.setup_arch		= psr2_setup_arch,
+	.setup_arch		= wsp_setup_arch,
 	.restart		= psr2_restart,
 	.power_off		= psr2_spin,
 	.halt			= psr2_spin,
 	.calibrate_decr		= generic_calibrate_decr,
-	.init_IRQ		= psr2_init_irq,
+	.init_IRQ		= wsp_setup_irq,
 	.progress		= udbg_progress,
 	.power_save		= book3e_idle,
 };
+
+machine_arch_initcall(psr2_md, wsp_probe_devices);
diff --git a/arch/powerpc/platforms/wsp/wsp.c b/arch/powerpc/platforms/wsp/wsp.c
new file mode 100644
index 0000000..d25cc96
--- /dev/null
+++ b/arch/powerpc/platforms/wsp/wsp.c
@@ -0,0 +1,115 @@
+/*
+ * Copyright 2008-2011, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/smp.h>
+#include <linux/delay.h>
+#include <linux/time.h>
+
+#include <asm/scom.h>
+
+#include "wsp.h"
+#include "ics.h"
+
+#define WSP_SOC_COMPATIBLE	"ibm,wsp-soc"
+#define PBIC_COMPATIBLE		"ibm,wsp-pbic"
+#define COPRO_COMPATIBLE	"ibm,wsp-coprocessor"
+
+static int __init wsp_probe_buses(void)
+{
+	static __initdata struct of_device_id bus_ids[] = {
+		/*
+		 * every node in between needs to be here or you won't
+		 * find it
+		 */
+		{ .compatible = WSP_SOC_COMPATIBLE, },
+		{ .compatible = PBIC_COMPATIBLE, },
+		{ .compatible = COPRO_COMPATIBLE, },
+		{},
+	};
+	of_platform_bus_probe(NULL, bus_ids, NULL);
+
+	return 0;
+}
+
+void __init wsp_setup_arch(void)
+{
+	/* init to some ~sane value until calibrate_delay() runs */
+	loops_per_jiffy = 50000000;
+
+	scom_init_wsp();
+
+	/* Setup SMP callback */
+#ifdef CONFIG_SMP
+	a2_setup_smp();
+#endif
+#ifdef CONFIG_PCI
+	wsp_setup_pci();
+#endif
+}
+
+void __init wsp_setup_irq(void)
+{
+	wsp_init_irq();
+	opb_pic_init();
+}
+
+
+int __init wsp_probe_devices(void)
+{
+	struct device_node *np;
+
+	/* Our RTC is a ds1500. It seems to be programatically compatible
+	 * with the ds1511 for which we have a driver so let's use that
+	 */
+	np = of_find_compatible_node(NULL, NULL, "dallas,ds1500");
+	if (np != NULL) {
+		struct resource res;
+		if (of_address_to_resource(np, 0, &res) == 0)
+			platform_device_register_simple("ds1511", 0, &res, 1);
+	}
+
+	wsp_probe_buses();
+
+	return 0;
+}
+
+void wsp_halt(void)
+{
+	u64 val;
+	scom_map_t m;
+	struct device_node *dn;
+	struct device_node *mine;
+	struct device_node *me;
+
+	me = of_get_cpu_node(smp_processor_id(), NULL);
+	mine = scom_find_parent(me);
+
+	/* This will halt all the A2s but not power off the chip */
+	for_each_node_with_property(dn, "scom-controller") {
+		if (dn == mine)
+			continue;
+		m = scom_map(dn, 0, 1);
+
+		/* read-modify-write it so the HW probe does not get
+		 * confused */
+		val = scom_read(m, 0);
+		val |= 1;
+		scom_write(m, 0, val);
+		scom_unmap(m);
+	}
+	m = scom_map(mine, 0, 1);
+	val = scom_read(m, 0);
+	val |= 1;
+	scom_write(m, 0, val);
+	/* should never return */
+	scom_unmap(m);
+}
diff --git a/arch/powerpc/platforms/wsp/wsp.h b/arch/powerpc/platforms/wsp/wsp.h
index 3347981..10c1d1f 100644
--- a/arch/powerpc/platforms/wsp/wsp.h
+++ b/arch/powerpc/platforms/wsp/wsp.h
@@ -6,15 +6,25 @@
 /* Devtree compatible strings for major devices */
 #define PCIE_COMPATIBLE     "ibm,wsp-pciex"
 
+extern void wsp_setup_arch(void);
+extern void wsp_setup_irq(void);
+extern int wsp_probe_devices(void);
+extern void wsp_halt(void);
+
 extern void wsp_setup_pci(void);
 extern void scom_init_wsp(void);
 
 extern void a2_setup_smp(void);
 extern int a2_scom_startup_cpu(unsigned int lcpu, int thr_idx,
 			       struct device_node *np);
-int smp_a2_cpu_bootable(unsigned int nr);
-int __devinit smp_a2_kick_cpu(int nr);
+extern int smp_a2_cpu_bootable(unsigned int nr);
+extern int __devinit smp_a2_kick_cpu(int nr);
 
-void opb_pic_init(void);
+extern void opb_pic_init(void);
+
+/* chroma specific managment */
+extern void wsp_h8_restart(char *cmd);
+extern void wsp_h8_power_off(void);
+extern void __init wsp_setup_h8(void);
 
 #endif /*  __WSP_H */
diff --git a/arch/powerpc/relocs_check.pl b/arch/powerpc/relocs_check.pl
index d257109..7f5b838 100755
--- a/arch/powerpc/relocs_check.pl
+++ b/arch/powerpc/relocs_check.pl
@@ -32,8 +32,18 @@
 	next if (!/\s+R_/);
 
 	# These relocations are okay
-	next if (/R_PPC64_RELATIVE/ or /R_PPC64_NONE/ or
-	         /R_PPC64_ADDR64\s+mach_/);
+	# On PPC64:
+	# 	R_PPC64_RELATIVE, R_PPC64_NONE, R_PPC64_ADDR64
+	# On PPC:
+	# 	R_PPC_RELATIVE, R_PPC_ADDR16_HI, 
+	# 	R_PPC_ADDR16_HA,R_PPC_ADDR16_LO,
+	# 	R_PPC_NONE
+
+	next if (/\bR_PPC64_RELATIVE\b/ or /\bR_PPC64_NONE\b/ or
+	         /\bR_PPC64_ADDR64\s+mach_/);
+	next if (/\bR_PPC_ADDR16_LO\b/ or /\bR_PPC_ADDR16_HI\b/ or
+		 /\bR_PPC_ADDR16_HA\b/ or /\bR_PPC_RELATIVE\b/ or
+		 /\bR_PPC_NONE\b/);
 
 	# If we see this type of relcoation it's an idication that
 	# we /may/ be using an old version of binutils.
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
index 84e1325..5e37b47 100644
--- a/arch/powerpc/sysdev/Makefile
+++ b/arch/powerpc/sysdev/Makefile
@@ -17,10 +17,11 @@
 obj-$(CONFIG_FSL_PCI)		+= fsl_pci.o $(fsl-msi-obj-y)
 obj-$(CONFIG_FSL_PMC)		+= fsl_pmc.o
 obj-$(CONFIG_FSL_LBC)		+= fsl_lbc.o
+obj-$(CONFIG_FSL_IFC)		+= fsl_ifc.o
 obj-$(CONFIG_FSL_GTM)		+= fsl_gtm.o
 obj-$(CONFIG_FSL_85XX_CACHE_SRAM)	+= fsl_85xx_l2ctlr.o fsl_85xx_cache_sram.o
 obj-$(CONFIG_SIMPLE_GPIO)	+= simple_gpio.o
-obj-$(CONFIG_FSL_RIO)		+= fsl_rio.o
+obj-$(CONFIG_FSL_RIO)		+= fsl_rio.o fsl_rmu.o
 obj-$(CONFIG_TSI108_BRIDGE)	+= tsi108_pci.o tsi108_dev.o
 obj-$(CONFIG_QUICC_ENGINE)	+= qe_lib/
 obj-$(CONFIG_PPC_BESTCOMM)	+= bestcomm/
diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c
index ba42719..1c16141 100644
--- a/arch/powerpc/sysdev/axonram.c
+++ b/arch/powerpc/sysdev/axonram.c
@@ -25,7 +25,6 @@
 
 #include <linux/bio.h>
 #include <linux/blkdev.h>
-#include <linux/buffer_head.h>
 #include <linux/device.h>
 #include <linux/errno.h>
 #include <linux/fs.h>
diff --git a/arch/powerpc/sysdev/fsl_ifc.c b/arch/powerpc/sysdev/fsl_ifc.c
new file mode 100644
index 0000000..b31f19f
--- /dev/null
+++ b/arch/powerpc/sysdev/fsl_ifc.c
@@ -0,0 +1,310 @@
+/*
+ * Copyright 2011 Freescale Semiconductor, Inc
+ *
+ * Freescale Integrated Flash Controller
+ *
+ * Author: Dipen Dudhat <Dipen.Dudhat@freescale.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/compiler.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <asm/prom.h>
+#include <asm/fsl_ifc.h>
+
+struct fsl_ifc_ctrl *fsl_ifc_ctrl_dev;
+EXPORT_SYMBOL(fsl_ifc_ctrl_dev);
+
+/*
+ * convert_ifc_address - convert the base address
+ * @addr_base:	base address of the memory bank
+ */
+unsigned int convert_ifc_address(phys_addr_t addr_base)
+{
+	return addr_base & CSPR_BA;
+}
+EXPORT_SYMBOL(convert_ifc_address);
+
+/*
+ * fsl_ifc_find - find IFC bank
+ * @addr_base:	base address of the memory bank
+ *
+ * This function walks IFC banks comparing "Base address" field of the CSPR
+ * registers with the supplied addr_base argument. When bases match this
+ * function returns bank number (starting with 0), otherwise it returns
+ * appropriate errno value.
+ */
+int fsl_ifc_find(phys_addr_t addr_base)
+{
+	int i = 0;
+
+	if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->regs)
+		return -ENODEV;
+
+	for (i = 0; i < ARRAY_SIZE(fsl_ifc_ctrl_dev->regs->cspr_cs); i++) {
+		__be32 cspr = in_be32(&fsl_ifc_ctrl_dev->regs->cspr_cs[i].cspr);
+		if (cspr & CSPR_V && (cspr & CSPR_BA) ==
+				convert_ifc_address(addr_base))
+			return i;
+	}
+
+	return -ENOENT;
+}
+EXPORT_SYMBOL(fsl_ifc_find);
+
+static int __devinit fsl_ifc_ctrl_init(struct fsl_ifc_ctrl *ctrl)
+{
+	struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
+
+	/*
+	 * Clear all the common status and event registers
+	 */
+	if (in_be32(&ifc->cm_evter_stat) & IFC_CM_EVTER_STAT_CSER)
+		out_be32(&ifc->cm_evter_stat, IFC_CM_EVTER_STAT_CSER);
+
+	/* enable all error and events */
+	out_be32(&ifc->cm_evter_en, IFC_CM_EVTER_EN_CSEREN);
+
+	/* enable all error and event interrupts */
+	out_be32(&ifc->cm_evter_intr_en, IFC_CM_EVTER_INTR_EN_CSERIREN);
+	out_be32(&ifc->cm_erattr0, 0x0);
+	out_be32(&ifc->cm_erattr1, 0x0);
+
+	return 0;
+}
+
+static int fsl_ifc_ctrl_remove(struct platform_device *dev)
+{
+	struct fsl_ifc_ctrl *ctrl = dev_get_drvdata(&dev->dev);
+
+	free_irq(ctrl->nand_irq, ctrl);
+	free_irq(ctrl->irq, ctrl);
+
+	irq_dispose_mapping(ctrl->nand_irq);
+	irq_dispose_mapping(ctrl->irq);
+
+	iounmap(ctrl->regs);
+
+	dev_set_drvdata(&dev->dev, NULL);
+	kfree(ctrl);
+
+	return 0;
+}
+
+/*
+ * NAND events are split between an operational interrupt which only
+ * receives OPC, and an error interrupt that receives everything else,
+ * including non-NAND errors.  Whichever interrupt gets to it first
+ * records the status and wakes the wait queue.
+ */
+static DEFINE_SPINLOCK(nand_irq_lock);
+
+static u32 check_nand_stat(struct fsl_ifc_ctrl *ctrl)
+{
+	struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
+	unsigned long flags;
+	u32 stat;
+
+	spin_lock_irqsave(&nand_irq_lock, flags);
+
+	stat = in_be32(&ifc->ifc_nand.nand_evter_stat);
+	if (stat) {
+		out_be32(&ifc->ifc_nand.nand_evter_stat, stat);
+		ctrl->nand_stat = stat;
+		wake_up(&ctrl->nand_wait);
+	}
+
+	spin_unlock_irqrestore(&nand_irq_lock, flags);
+
+	return stat;
+}
+
+static irqreturn_t fsl_ifc_nand_irq(int irqno, void *data)
+{
+	struct fsl_ifc_ctrl *ctrl = data;
+
+	if (check_nand_stat(ctrl))
+		return IRQ_HANDLED;
+
+	return IRQ_NONE;
+}
+
+/*
+ * NOTE: This interrupt is used to report ifc events of various kinds,
+ * such as transaction errors on the chipselects.
+ */
+static irqreturn_t fsl_ifc_ctrl_irq(int irqno, void *data)
+{
+	struct fsl_ifc_ctrl *ctrl = data;
+	struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
+	u32 err_axiid, err_srcid, status, cs_err, err_addr;
+	irqreturn_t ret = IRQ_NONE;
+
+	/* read for chip select error */
+	cs_err = in_be32(&ifc->cm_evter_stat);
+	if (cs_err) {
+		dev_err(ctrl->dev, "transaction sent to IFC is not mapped to"
+				"any memory bank 0x%08X\n", cs_err);
+		/* clear the chip select error */
+		out_be32(&ifc->cm_evter_stat, IFC_CM_EVTER_STAT_CSER);
+
+		/* read error attribute registers print the error information */
+		status = in_be32(&ifc->cm_erattr0);
+		err_addr = in_be32(&ifc->cm_erattr1);
+
+		if (status & IFC_CM_ERATTR0_ERTYP_READ)
+			dev_err(ctrl->dev, "Read transaction error"
+				"CM_ERATTR0 0x%08X\n", status);
+		else
+			dev_err(ctrl->dev, "Write transaction error"
+				"CM_ERATTR0 0x%08X\n", status);
+
+		err_axiid = (status & IFC_CM_ERATTR0_ERAID) >>
+					IFC_CM_ERATTR0_ERAID_SHIFT;
+		dev_err(ctrl->dev, "AXI ID of the error"
+					"transaction 0x%08X\n", err_axiid);
+
+		err_srcid = (status & IFC_CM_ERATTR0_ESRCID) >>
+					IFC_CM_ERATTR0_ESRCID_SHIFT;
+		dev_err(ctrl->dev, "SRC ID of the error"
+					"transaction 0x%08X\n", err_srcid);
+
+		dev_err(ctrl->dev, "Transaction Address corresponding to error"
+					"ERADDR 0x%08X\n", err_addr);
+
+		ret = IRQ_HANDLED;
+	}
+
+	if (check_nand_stat(ctrl))
+		ret = IRQ_HANDLED;
+
+	return ret;
+}
+
+/*
+ * fsl_ifc_ctrl_probe
+ *
+ * called by device layer when it finds a device matching
+ * one our driver can handled. This code allocates all of
+ * the resources needed for the controller only.  The
+ * resources for the NAND banks themselves are allocated
+ * in the chip probe function.
+*/
+static int __devinit fsl_ifc_ctrl_probe(struct platform_device *dev)
+{
+	int ret = 0;
+
+
+	dev_info(&dev->dev, "Freescale Integrated Flash Controller\n");
+
+	fsl_ifc_ctrl_dev = kzalloc(sizeof(*fsl_ifc_ctrl_dev), GFP_KERNEL);
+	if (!fsl_ifc_ctrl_dev)
+		return -ENOMEM;
+
+	dev_set_drvdata(&dev->dev, fsl_ifc_ctrl_dev);
+
+	/* IOMAP the entire IFC region */
+	fsl_ifc_ctrl_dev->regs = of_iomap(dev->dev.of_node, 0);
+	if (!fsl_ifc_ctrl_dev->regs) {
+		dev_err(&dev->dev, "failed to get memory region\n");
+		ret = -ENODEV;
+		goto err;
+	}
+
+	/* get the Controller level irq */
+	fsl_ifc_ctrl_dev->irq = irq_of_parse_and_map(dev->dev.of_node, 0);
+	if (fsl_ifc_ctrl_dev->irq == NO_IRQ) {
+		dev_err(&dev->dev, "failed to get irq resource "
+							"for IFC\n");
+		ret = -ENODEV;
+		goto err;
+	}
+
+	/* get the nand machine irq */
+	fsl_ifc_ctrl_dev->nand_irq =
+			irq_of_parse_and_map(dev->dev.of_node, 1);
+	if (fsl_ifc_ctrl_dev->nand_irq == NO_IRQ) {
+		dev_err(&dev->dev, "failed to get irq resource "
+						"for NAND Machine\n");
+		ret = -ENODEV;
+		goto err;
+	}
+
+	fsl_ifc_ctrl_dev->dev = &dev->dev;
+
+	ret = fsl_ifc_ctrl_init(fsl_ifc_ctrl_dev);
+	if (ret < 0)
+		goto err;
+
+	init_waitqueue_head(&fsl_ifc_ctrl_dev->nand_wait);
+
+	ret = request_irq(fsl_ifc_ctrl_dev->irq, fsl_ifc_ctrl_irq, IRQF_SHARED,
+			  "fsl-ifc", fsl_ifc_ctrl_dev);
+	if (ret != 0) {
+		dev_err(&dev->dev, "failed to install irq (%d)\n",
+			fsl_ifc_ctrl_dev->irq);
+		goto err_irq;
+	}
+
+	ret = request_irq(fsl_ifc_ctrl_dev->nand_irq, fsl_ifc_nand_irq, 0,
+			  "fsl-ifc-nand", fsl_ifc_ctrl_dev);
+	if (ret != 0) {
+		dev_err(&dev->dev, "failed to install irq (%d)\n",
+			fsl_ifc_ctrl_dev->nand_irq);
+		goto err_nandirq;
+	}
+
+	return 0;
+
+err_nandirq:
+	free_irq(fsl_ifc_ctrl_dev->nand_irq, fsl_ifc_ctrl_dev);
+	irq_dispose_mapping(fsl_ifc_ctrl_dev->nand_irq);
+err_irq:
+	free_irq(fsl_ifc_ctrl_dev->irq, fsl_ifc_ctrl_dev);
+	irq_dispose_mapping(fsl_ifc_ctrl_dev->irq);
+err:
+	return ret;
+}
+
+static const struct of_device_id fsl_ifc_match[] = {
+	{
+		.compatible = "fsl,ifc",
+	},
+	{},
+};
+
+static struct platform_driver fsl_ifc_ctrl_driver = {
+	.driver = {
+		.name	= "fsl-ifc",
+		.of_match_table = fsl_ifc_match,
+	},
+	.probe       = fsl_ifc_ctrl_probe,
+	.remove      = fsl_ifc_ctrl_remove,
+};
+
+module_platform_driver(fsl_ifc_ctrl_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Freescale Semiconductor");
+MODULE_DESCRIPTION("Freescale Integrated Flash Controller driver");
diff --git a/arch/powerpc/sysdev/fsl_lbc.c b/arch/powerpc/sysdev/fsl_lbc.c
index d5c3c90..483126d 100644
--- a/arch/powerpc/sysdev/fsl_lbc.c
+++ b/arch/powerpc/sysdev/fsl_lbc.c
@@ -332,6 +332,38 @@
 	return ret;
 }
 
+#ifdef CONFIG_SUSPEND
+
+/* save lbc registers */
+static int fsl_lbc_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	struct fsl_lbc_ctrl *ctrl = dev_get_drvdata(&pdev->dev);
+	struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
+
+	ctrl->saved_regs = kmalloc(sizeof(struct fsl_lbc_regs), GFP_KERNEL);
+	if (!ctrl->saved_regs)
+		return -ENOMEM;
+
+	_memcpy_fromio(ctrl->saved_regs, lbc, sizeof(struct fsl_lbc_regs));
+	return 0;
+}
+
+/* restore lbc registers */
+static int fsl_lbc_resume(struct platform_device *pdev)
+{
+	struct fsl_lbc_ctrl *ctrl = dev_get_drvdata(&pdev->dev);
+	struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
+
+	if (ctrl->saved_regs) {
+		_memcpy_toio(lbc, ctrl->saved_regs,
+				sizeof(struct fsl_lbc_regs));
+		kfree(ctrl->saved_regs);
+		ctrl->saved_regs = NULL;
+	}
+	return 0;
+}
+#endif /* CONFIG_SUSPEND */
+
 static const struct of_device_id fsl_lbc_match[] = {
 	{ .compatible = "fsl,elbc", },
 	{ .compatible = "fsl,pq3-localbus", },
@@ -346,6 +378,10 @@
 		.of_match_table = fsl_lbc_match,
 	},
 	.probe = fsl_lbc_ctrl_probe,
+#ifdef CONFIG_SUSPEND
+	.suspend     = fsl_lbc_suspend,
+	.resume      = fsl_lbc_resume,
+#endif
 };
 
 static int __init fsl_lbc_init(void)
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index e5c344d..ecb5c19 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -23,6 +23,8 @@
 #include <asm/hw_irq.h>
 #include <asm/ppc-pci.h>
 #include <asm/mpic.h>
+#include <asm/fsl_hcalls.h>
+
 #include "fsl_msi.h"
 #include "fsl_pci.h"
 
@@ -148,14 +150,49 @@
 
 static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
 {
+	struct pci_controller *hose = pci_bus_to_host(pdev->bus);
+	struct device_node *np;
+	phandle phandle = 0;
 	int rc, hwirq = -ENOMEM;
 	unsigned int virq;
 	struct msi_desc *entry;
 	struct msi_msg msg;
 	struct fsl_msi *msi_data;
 
+	/*
+	 * If the PCI node has an fsl,msi property, then we need to use it
+	 * to find the specific MSI.
+	 */
+	np = of_parse_phandle(hose->dn, "fsl,msi", 0);
+	if (np) {
+		if (of_device_is_compatible(np, "fsl,mpic-msi") ||
+		    of_device_is_compatible(np, "fsl,vmpic-msi"))
+			phandle = np->phandle;
+		else {
+			dev_err(&pdev->dev,
+				"node %s has an invalid fsl,msi phandle %u\n",
+				hose->dn->full_name, np->phandle);
+			return -EINVAL;
+		}
+	}
+
 	list_for_each_entry(entry, &pdev->msi_list, list) {
+		/*
+		 * Loop over all the MSI devices until we find one that has an
+		 * available interrupt.
+		 */
 		list_for_each_entry(msi_data, &msi_head, list) {
+			/*
+			 * If the PCI node has an fsl,msi property, then we
+			 * restrict our search to the corresponding MSI node.
+			 * The simplest way is to skip over MSI nodes with the
+			 * wrong phandle. Under the Freescale hypervisor, this
+			 * has the additional benefit of skipping over MSI
+			 * nodes that are not mapped in the PAMU.
+			 */
+			if (phandle && (phandle != msi_data->phandle))
+				continue;
+
 			hwirq = msi_bitmap_alloc_hwirqs(&msi_data->bitmap, 1);
 			if (hwirq >= 0)
 				break;
@@ -163,16 +200,14 @@
 
 		if (hwirq < 0) {
 			rc = hwirq;
-			pr_debug("%s: fail allocating msi interrupt\n",
-					__func__);
+			dev_err(&pdev->dev, "could not allocate MSI interrupt\n");
 			goto out_free;
 		}
 
 		virq = irq_create_mapping(msi_data->irqhost, hwirq);
 
 		if (virq == NO_IRQ) {
-			pr_debug("%s: fail mapping hwirq 0x%x\n",
-					__func__, hwirq);
+			dev_err(&pdev->dev, "fail mapping hwirq %i\n", hwirq);
 			msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
 			rc = -ENOSPC;
 			goto out_free;
@@ -201,6 +236,7 @@
 	u32 intr_index;
 	u32 have_shift = 0;
 	struct fsl_msi_cascade_data *cascade_data;
+	unsigned int ret;
 
 	cascade_data = irq_get_handler_data(irq);
 	msi_data = cascade_data->msi_data;
@@ -232,6 +268,14 @@
 	case FSL_PIC_IP_IPIC:
 		msir_value = fsl_msi_read(msi_data->msi_regs, msir_index * 0x4);
 		break;
+	case FSL_PIC_IP_VMPIC:
+		ret = fh_vmpic_get_msir(virq_to_hw(irq), &msir_value);
+		if (ret) {
+			pr_err("fsl-msi: fh_vmpic_get_msir() failed for "
+			       "irq %u (ret=%u)\n", irq, ret);
+			msir_value = 0;
+		}
+		break;
 	}
 
 	while (msir_value) {
@@ -249,6 +293,7 @@
 
 	switch (msi_data->feature & FSL_PIC_IP_MASK) {
 	case FSL_PIC_IP_MPIC:
+	case FSL_PIC_IP_VMPIC:
 		chip->irq_eoi(idata);
 		break;
 	case FSL_PIC_IP_IPIC:
@@ -278,7 +323,8 @@
 	}
 	if (msi->bitmap.bitmap)
 		msi_bitmap_free(&msi->bitmap);
-	iounmap(msi->msi_regs);
+	if ((msi->feature & FSL_PIC_IP_MASK) != FSL_PIC_IP_VMPIC)
+		iounmap(msi->msi_regs);
 	kfree(msi);
 
 	return 0;
@@ -350,25 +396,37 @@
 		goto error_out;
 	}
 
-	/* Get the MSI reg base */
-	err = of_address_to_resource(dev->dev.of_node, 0, &res);
-	if (err) {
-		dev_err(&dev->dev, "%s resource error!\n",
+	/*
+	 * Under the Freescale hypervisor, the msi nodes don't have a 'reg'
+	 * property.  Instead, we use hypercalls to access the MSI.
+	 */
+	if ((features->fsl_pic_ip & FSL_PIC_IP_MASK) != FSL_PIC_IP_VMPIC) {
+		err = of_address_to_resource(dev->dev.of_node, 0, &res);
+		if (err) {
+			dev_err(&dev->dev, "invalid resource for node %s\n",
 				dev->dev.of_node->full_name);
-		goto error_out;
-	}
+			goto error_out;
+		}
 
-	msi->msi_regs = ioremap(res.start, resource_size(&res));
-	if (!msi->msi_regs) {
-		dev_err(&dev->dev, "ioremap problem failed\n");
-		goto error_out;
+		msi->msi_regs = ioremap(res.start, resource_size(&res));
+		if (!msi->msi_regs) {
+			dev_err(&dev->dev, "could not map node %s\n",
+				dev->dev.of_node->full_name);
+			goto error_out;
+		}
+		msi->msiir_offset =
+			features->msiir_offset + (res.start & 0xfffff);
 	}
 
 	msi->feature = features->fsl_pic_ip;
 
 	msi->irqhost->host_data = msi;
 
-	msi->msiir_offset = features->msiir_offset + (res.start & 0xfffff);
+	/*
+	 * Remember the phandle, so that we can match with any PCI nodes
+	 * that have an "fsl,msi" property.
+	 */
+	msi->phandle = dev->dev.of_node->phandle;
 
 	rc = fsl_msi_init_allocator(msi);
 	if (rc) {
@@ -437,6 +495,11 @@
 	.msiir_offset = 0x38,
 };
 
+static const struct fsl_msi_feature vmpic_msi_feature = {
+	.fsl_pic_ip = FSL_PIC_IP_VMPIC,
+	.msiir_offset = 0,
+};
+
 static const struct of_device_id fsl_of_msi_ids[] = {
 	{
 		.compatible = "fsl,mpic-msi",
@@ -446,6 +509,10 @@
 		.compatible = "fsl,ipic-msi",
 		.data = (void *)&ipic_msi_feature,
 	},
+	{
+		.compatible = "fsl,vmpic-msi",
+		.data = (void *)&vmpic_msi_feature,
+	},
 	{}
 };
 
diff --git a/arch/powerpc/sysdev/fsl_msi.h b/arch/powerpc/sysdev/fsl_msi.h
index 1313abb..f6c646a 100644
--- a/arch/powerpc/sysdev/fsl_msi.h
+++ b/arch/powerpc/sysdev/fsl_msi.h
@@ -13,15 +13,17 @@
 #ifndef _POWERPC_SYSDEV_FSL_MSI_H
 #define _POWERPC_SYSDEV_FSL_MSI_H
 
+#include <linux/of.h>
 #include <asm/msi_bitmap.h>
 
 #define NR_MSI_REG		8
 #define IRQS_PER_MSI_REG	32
 #define NR_MSI_IRQS	(NR_MSI_REG * IRQS_PER_MSI_REG)
 
-#define FSL_PIC_IP_MASK	0x0000000F
-#define FSL_PIC_IP_MPIC	0x00000001
-#define FSL_PIC_IP_IPIC	0x00000002
+#define FSL_PIC_IP_MASK   0x0000000F
+#define FSL_PIC_IP_MPIC   0x00000001
+#define FSL_PIC_IP_IPIC   0x00000002
+#define FSL_PIC_IP_VMPIC  0x00000003
 
 struct fsl_msi {
 	struct irq_host *irqhost;
@@ -36,6 +38,8 @@
 	struct msi_bitmap bitmap;
 
 	struct list_head list;          /* support multiple MSI banks */
+
+	phandle phandle;
 };
 
 #endif /* _POWERPC_SYSDEV_FSL_MSI_H */
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index 4ce547e..3b61e8c 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -65,6 +65,30 @@
 }
 
 #if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
+
+#define MAX_PHYS_ADDR_BITS	40
+static u64 pci64_dma_offset = 1ull << MAX_PHYS_ADDR_BITS;
+
+static int fsl_pci_dma_set_mask(struct device *dev, u64 dma_mask)
+{
+	if (!dev->dma_mask || !dma_supported(dev, dma_mask))
+		return -EIO;
+
+	/*
+	 * Fixup PCI devices that are able to DMA to above the physical
+	 * address width of the SoC such that we can address any internal
+	 * SoC address from across PCI if needed
+	 */
+	if ((dev->bus == &pci_bus_type) &&
+	    dma_mask >= DMA_BIT_MASK(MAX_PHYS_ADDR_BITS)) {
+		set_dma_ops(dev, &dma_direct_ops);
+		set_dma_offset(dev, pci64_dma_offset);
+	}
+
+	*dev->dma_mask = dma_mask;
+	return 0;
+}
+
 static int __init setup_one_atmu(struct ccsr_pci __iomem *pci,
 	unsigned int index, const struct resource *res,
 	resource_size_t offset)
@@ -113,6 +137,8 @@
 	u32 piwar = PIWAR_EN | PIWAR_PF | PIWAR_TGI_LOCAL |
 			PIWAR_READ_SNOOP | PIWAR_WRITE_SNOOP;
 	char *name = hose->dn->full_name;
+	const u64 *reg;
+	int len;
 
 	pr_debug("PCI memory map start 0x%016llx, size 0x%016llx\n",
 		 (u64)rsrc->start, (u64)resource_size(rsrc));
@@ -205,6 +231,33 @@
 
 	/* Setup inbound mem window */
 	mem = memblock_end_of_DRAM();
+
+	/*
+	 * The msi-address-64 property, if it exists, indicates the physical
+	 * address of the MSIIR register.  Normally, this register is located
+	 * inside CCSR, so the ATMU that covers all of CCSR is used. But if
+	 * this property exists, then we normally need to create a new ATMU
+	 * for it.  For now, however, we cheat.  The only entity that creates
+	 * this property is the Freescale hypervisor, and the address is
+	 * specified in the partition configuration.  Typically, the address
+	 * is located in the page immediately after the end of DDR.  If so, we
+	 * can avoid allocating a new ATMU by extending the DDR ATMU by one
+	 * page.
+	 */
+	reg = of_get_property(hose->dn, "msi-address-64", &len);
+	if (reg && (len == sizeof(u64))) {
+		u64 address = be64_to_cpup(reg);
+
+		if ((address >= mem) && (address < (mem + PAGE_SIZE))) {
+			pr_info("%s: extending DDR ATMU to cover MSIIR", name);
+			mem += PAGE_SIZE;
+		} else {
+			/* TODO: Create a new ATMU for MSIIR */
+			pr_warn("%s: msi-address-64 address of %llx is "
+				"unsupported\n", name, address);
+		}
+	}
+
 	sz = min(mem, paddr_lo);
 	mem_log = __ilog2_u64(sz);
 
@@ -228,6 +281,37 @@
 
 		hose->dma_window_base_cur = 0x00000000;
 		hose->dma_window_size = (resource_size_t)sz;
+
+		/*
+		 * if we have >4G of memory setup second PCI inbound window to
+		 * let devices that are 64-bit address capable to work w/o
+		 * SWIOTLB and access the full range of memory
+		 */
+		if (sz != mem) {
+			mem_log = __ilog2_u64(mem);
+
+			/* Size window up if we dont fit in exact power-of-2 */
+			if ((1ull << mem_log) != mem)
+				mem_log++;
+
+			piwar = (piwar & ~PIWAR_SZ_MASK) | (mem_log - 1);
+
+			/* Setup inbound memory window */
+			out_be32(&pci->piw[win_idx].pitar,  0x00000000);
+			out_be32(&pci->piw[win_idx].piwbear,
+					pci64_dma_offset >> 44);
+			out_be32(&pci->piw[win_idx].piwbar,
+					pci64_dma_offset >> 12);
+			out_be32(&pci->piw[win_idx].piwar,  piwar);
+
+			/*
+			 * install our own dma_set_mask handler to fixup dma_ops
+			 * and dma_offset
+			 */
+			ppc_md.dma_set_mask = fsl_pci_dma_set_mask;
+
+			pr_info("%s: Setup 64-bit PCI DMA window\n", name);
+		}
 	} else {
 		u64 paddr = 0;
 
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c
index 22ffccd..a4c4f4a 100644
--- a/arch/powerpc/sysdev/fsl_rio.c
+++ b/arch/powerpc/sysdev/fsl_rio.c
@@ -10,7 +10,7 @@
  * - Added Port-Write message handling
  * - Added Machine Check exception handling
  *
- * Copyright (C) 2007, 2008, 2010 Freescale Semiconductor, Inc.
+ * Copyright (C) 2007, 2008, 2010, 2011 Freescale Semiconductor, Inc.
  * Zhang Wei <wei.zhang@freescale.com>
  *
  * Copyright 2005 MontaVista Software, Inc.
@@ -28,240 +28,33 @@
 #include <linux/dma-mapping.h>
 #include <linux/interrupt.h>
 #include <linux/device.h>
-#include <linux/rio.h>
-#include <linux/rio_drv.h>
 #include <linux/of_platform.h>
 #include <linux/delay.h>
 #include <linux/slab.h>
-#include <linux/kfifo.h>
 
-#include <asm/io.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
 #include <asm/machdep.h>
-#include <asm/uaccess.h>
+
+#include "fsl_rio.h"
 
 #undef DEBUG_PW	/* Port-Write debugging */
 
-/* RapidIO definition irq, which read from OF-tree */
-#define IRQ_RIO_BELL(m)		(((struct rio_priv *)(m->priv))->bellirq)
-#define IRQ_RIO_TX(m)		(((struct rio_priv *)(m->priv))->txirq)
-#define IRQ_RIO_RX(m)		(((struct rio_priv *)(m->priv))->rxirq)
-#define IRQ_RIO_PW(m)		(((struct rio_priv *)(m->priv))->pwirq)
-
-#define IPWSR_CLEAR		0x98
-#define OMSR_CLEAR		0x1cb3
-#define IMSR_CLEAR		0x491
-#define IDSR_CLEAR		0x91
-#define ODSR_CLEAR		0x1c00
-#define LTLEECSR_ENABLE_ALL	0xFFC000FC
-#define ESCSR_CLEAR		0x07120204
-#define IECSR_CLEAR		0x80000000
-
 #define RIO_PORT1_EDCSR		0x0640
 #define RIO_PORT2_EDCSR		0x0680
 #define RIO_PORT1_IECSR		0x10130
 #define RIO_PORT2_IECSR		0x101B0
-#define RIO_IM0SR		0x13064
-#define RIO_IM1SR		0x13164
-#define RIO_OM0SR		0x13004
-#define RIO_OM1SR		0x13104
 
-#define RIO_ATMU_REGS_OFFSET	0x10c00
-#define RIO_P_MSG_REGS_OFFSET	0x11000
-#define RIO_S_MSG_REGS_OFFSET	0x13000
 #define RIO_GCCSR		0x13c
 #define RIO_ESCSR		0x158
+#define ESCSR_CLEAR		0x07120204
 #define RIO_PORT2_ESCSR		0x178
 #define RIO_CCSR		0x15c
-#define RIO_LTLEDCSR		0x0608
 #define RIO_LTLEDCSR_IER	0x80000000
 #define RIO_LTLEDCSR_PRT	0x01000000
-#define RIO_LTLEECSR		0x060c
-#define RIO_EPWISR		0x10010
+#define IECSR_CLEAR		0x80000000
 #define RIO_ISR_AACR		0x10120
 #define RIO_ISR_AACR_AA		0x1	/* Accept All ID */
-#define RIO_MAINT_WIN_SIZE	0x400000
-#define RIO_DBELL_WIN_SIZE	0x1000
-
-#define RIO_MSG_OMR_MUI		0x00000002
-#define RIO_MSG_OSR_TE		0x00000080
-#define RIO_MSG_OSR_QOI		0x00000020
-#define RIO_MSG_OSR_QFI		0x00000010
-#define RIO_MSG_OSR_MUB		0x00000004
-#define RIO_MSG_OSR_EOMI	0x00000002
-#define RIO_MSG_OSR_QEI		0x00000001
-
-#define RIO_MSG_IMR_MI		0x00000002
-#define RIO_MSG_ISR_TE		0x00000080
-#define RIO_MSG_ISR_QFI		0x00000010
-#define RIO_MSG_ISR_DIQI	0x00000001
-
-#define RIO_IPWMR_SEN		0x00100000
-#define RIO_IPWMR_QFIE		0x00000100
-#define RIO_IPWMR_EIE		0x00000020
-#define RIO_IPWMR_CQ		0x00000002
-#define RIO_IPWMR_PWE		0x00000001
-
-#define RIO_IPWSR_QF		0x00100000
-#define RIO_IPWSR_TE		0x00000080
-#define RIO_IPWSR_QFI		0x00000010
-#define RIO_IPWSR_PWD		0x00000008
-#define RIO_IPWSR_PWB		0x00000004
-
-/* EPWISR Error match value */
-#define RIO_EPWISR_PINT1	0x80000000
-#define RIO_EPWISR_PINT2	0x40000000
-#define RIO_EPWISR_MU		0x00000002
-#define RIO_EPWISR_PW		0x00000001
-
-#define RIO_MSG_DESC_SIZE	32
-#define RIO_MSG_BUFFER_SIZE	4096
-#define RIO_MIN_TX_RING_SIZE	2
-#define RIO_MAX_TX_RING_SIZE	2048
-#define RIO_MIN_RX_RING_SIZE	2
-#define RIO_MAX_RX_RING_SIZE	2048
-
-#define DOORBELL_DMR_DI		0x00000002
-#define DOORBELL_DSR_TE		0x00000080
-#define DOORBELL_DSR_QFI	0x00000010
-#define DOORBELL_DSR_DIQI	0x00000001
-#define DOORBELL_TID_OFFSET	0x02
-#define DOORBELL_SID_OFFSET	0x04
-#define DOORBELL_INFO_OFFSET	0x06
-
-#define DOORBELL_MESSAGE_SIZE	0x08
-#define DBELL_SID(x)		(*(u16 *)(x + DOORBELL_SID_OFFSET))
-#define DBELL_TID(x)		(*(u16 *)(x + DOORBELL_TID_OFFSET))
-#define DBELL_INF(x)		(*(u16 *)(x + DOORBELL_INFO_OFFSET))
-
-struct rio_atmu_regs {
-	u32 rowtar;
-	u32 rowtear;
-	u32 rowbar;
-	u32 pad2;
-	u32 rowar;
-	u32 pad3[3];
-};
-
-struct rio_msg_regs {
-	u32 omr;	/* 0xD_3000 - Outbound message 0 mode register */
-	u32 osr;	/* 0xD_3004 - Outbound message 0 status register */
-	u32 pad1;
-	u32 odqdpar;	/* 0xD_300C - Outbound message 0 descriptor queue
-			   dequeue pointer address register */
-	u32 pad2;
-	u32 osar;	/* 0xD_3014 - Outbound message 0 source address
-			   register */
-	u32 odpr;	/* 0xD_3018 - Outbound message 0 destination port
-			   register */
-	u32 odatr;	/* 0xD_301C - Outbound message 0 destination attributes
-			   Register*/
-	u32 odcr;	/* 0xD_3020 - Outbound message 0 double-word count
-			   register */
-	u32 pad3;
-	u32 odqepar;	/* 0xD_3028 - Outbound message 0 descriptor queue
-			   enqueue pointer address register */
-	u32 pad4[13];
-	u32 imr;	/* 0xD_3060 - Inbound message 0 mode register */
-	u32 isr;	/* 0xD_3064 - Inbound message 0 status register */
-	u32 pad5;
-	u32 ifqdpar;	/* 0xD_306C - Inbound message 0 frame queue dequeue
-			   pointer address register*/
-	u32 pad6;
-	u32 ifqepar;	/* 0xD_3074 - Inbound message 0 frame queue enqueue
-			   pointer address register */
-	u32 pad7[226];
-	u32 odmr;	/* 0xD_3400 - Outbound doorbell mode register */
-	u32 odsr;	/* 0xD_3404 - Outbound doorbell status register */
-	u32 res0[4];
-	u32 oddpr;	/* 0xD_3418 - Outbound doorbell destination port
-			   register */
-	u32 oddatr;	/* 0xD_341c - Outbound doorbell destination attributes
-			   register */
-	u32 res1[3];
-	u32 odretcr;	/* 0xD_342C - Outbound doorbell retry error threshold
-			   configuration register */
-	u32 res2[12];
-	u32 dmr;	/* 0xD_3460 - Inbound doorbell mode register */
-	u32 dsr;	/* 0xD_3464 - Inbound doorbell status register */
-	u32 pad8;
-	u32 dqdpar;	/* 0xD_346C - Inbound doorbell queue dequeue Pointer
-			   address register */
-	u32 pad9;
-	u32 dqepar;	/* 0xD_3474 - Inbound doorbell Queue enqueue pointer
-			   address register */
-	u32 pad10[26];
-	u32 pwmr;	/* 0xD_34E0 - Inbound port-write mode register */
-	u32 pwsr;	/* 0xD_34E4 - Inbound port-write status register */
-	u32 epwqbar;	/* 0xD_34E8 - Extended Port-Write Queue Base Address
-			   register */
-	u32 pwqbar;	/* 0xD_34EC - Inbound port-write queue base address
-			   register */
-};
-
-struct rio_tx_desc {
-	u32 res1;
-	u32 saddr;
-	u32 dport;
-	u32 dattr;
-	u32 res2;
-	u32 res3;
-	u32 dwcnt;
-	u32 res4;
-};
-
-struct rio_dbell_ring {
-	void *virt;
-	dma_addr_t phys;
-};
-
-struct rio_msg_tx_ring {
-	void *virt;
-	dma_addr_t phys;
-	void *virt_buffer[RIO_MAX_TX_RING_SIZE];
-	dma_addr_t phys_buffer[RIO_MAX_TX_RING_SIZE];
-	int tx_slot;
-	int size;
-	void *dev_id;
-};
-
-struct rio_msg_rx_ring {
-	void *virt;
-	dma_addr_t phys;
-	void *virt_buffer[RIO_MAX_RX_RING_SIZE];
-	int rx_slot;
-	int size;
-	void *dev_id;
-};
-
-struct rio_port_write_msg {
-	void *virt;
-	dma_addr_t phys;
-	u32 msg_count;
-	u32 err_count;
-	u32 discard_count;
-};
-
-struct rio_priv {
-	struct device *dev;
-	void __iomem *regs_win;
-	struct rio_atmu_regs __iomem *atmu_regs;
-	struct rio_atmu_regs __iomem *maint_atmu_regs;
-	struct rio_atmu_regs __iomem *dbell_atmu_regs;
-	void __iomem *dbell_win;
-	void __iomem *maint_win;
-	struct rio_msg_regs __iomem *msg_regs;
-	struct rio_dbell_ring dbell_ring;
-	struct rio_msg_tx_ring msg_tx_ring;
-	struct rio_msg_rx_ring msg_rx_ring;
-	struct rio_port_write_msg port_write_msg;
-	int bellirq;
-	int txirq;
-	int rxirq;
-	int pwirq;
-	struct work_struct pw_work;
-	struct kfifo pw_fifo;
-	spinlock_t pw_fifo_lock;
-};
 
 #define __fsl_read_rio_config(x, addr, err, op)		\
 	__asm__ __volatile__(				\
@@ -279,7 +72,12 @@
 		: "=r" (err), "=r" (x)			\
 		: "b" (addr), "i" (-EFAULT), "0" (err))
 
-static void __iomem *rio_regs_win;
+void __iomem *rio_regs_win;
+void __iomem *rmu_regs_win;
+resource_size_t rio_law_start;
+
+struct fsl_rio_dbell *dbell;
+struct fsl_rio_pw *pw;
 
 #ifdef CONFIG_E500
 int fsl_rio_mcheck_exception(struct pt_regs *regs)
@@ -311,42 +109,6 @@
 #endif
 
 /**
- * fsl_rio_doorbell_send - Send a MPC85xx doorbell message
- * @mport: RapidIO master port info
- * @index: ID of RapidIO interface
- * @destid: Destination ID of target device
- * @data: 16-bit info field of RapidIO doorbell message
- *
- * Sends a MPC85xx doorbell message. Returns %0 on success or
- * %-EINVAL on failure.
- */
-static int fsl_rio_doorbell_send(struct rio_mport *mport,
-				int index, u16 destid, u16 data)
-{
-	struct rio_priv *priv = mport->priv;
-	pr_debug("fsl_doorbell_send: index %d destid %4.4x data %4.4x\n",
-		 index, destid, data);
-	switch (mport->phy_type) {
-	case RIO_PHY_PARALLEL:
-		out_be32(&priv->dbell_atmu_regs->rowtar, destid << 22);
-		out_be16(priv->dbell_win, data);
-		break;
-	case RIO_PHY_SERIAL:
-		/* In the serial version silicons, such as MPC8548, MPC8641,
-		 * below operations is must be.
-		 */
-		out_be32(&priv->msg_regs->odmr, 0x00000000);
-		out_be32(&priv->msg_regs->odretcr, 0x00000004);
-		out_be32(&priv->msg_regs->oddpr, destid << 16);
-		out_be32(&priv->msg_regs->oddatr, data);
-		out_be32(&priv->msg_regs->odmr, 0x00000001);
-		break;
-	}
-
-	return 0;
-}
-
-/**
  * fsl_local_config_read - Generate a MPC85xx local config space read
  * @mport: RapidIO master port info
  * @index: ID of RapdiIO interface
@@ -384,8 +146,8 @@
 {
 	struct rio_priv *priv = mport->priv;
 	pr_debug
-	    ("fsl_local_config_write: index %d offset %8.8x data %8.8x\n",
-	     index, offset, data);
+		("fsl_local_config_write: index %d offset %8.8x data %8.8x\n",
+		index, offset, data);
 	out_be32(priv->regs_win + offset, data);
 
 	return 0;
@@ -413,8 +175,9 @@
 	u32 rval, err = 0;
 
 	pr_debug
-	    ("fsl_rio_config_read: index %d destid %d hopcount %d offset %8.8x len %d\n",
-	     index, destid, hopcount, offset, len);
+		("fsl_rio_config_read:"
+		" index %d destid %d hopcount %d offset %8.8x len %d\n",
+		index, destid, hopcount, offset, len);
 
 	/* 16MB maintenance window possible */
 	/* allow only aligned access to maintenance registers */
@@ -423,7 +186,7 @@
 
 	out_be32(&priv->maint_atmu_regs->rowtar,
 		 (destid << 22) | (hopcount << 12) | (offset >> 12));
-	out_be32(&priv->maint_atmu_regs->rowtear,  (destid >> 10));
+	out_be32(&priv->maint_atmu_regs->rowtear, (destid >> 10));
 
 	data = (u8 *) priv->maint_win + (offset & (RIO_MAINT_WIN_SIZE - 1));
 	switch (len) {
@@ -470,8 +233,9 @@
 	struct rio_priv *priv = mport->priv;
 	u8 *data;
 	pr_debug
-	    ("fsl_rio_config_write: index %d destid %d hopcount %d offset %8.8x len %d val %8.8x\n",
-	     index, destid, hopcount, offset, len, val);
+		("fsl_rio_config_write:"
+		" index %d destid %d hopcount %d offset %8.8x len %d val %8.8x\n",
+		index, destid, hopcount, offset, len, val);
 
 	/* 16MB maintenance windows possible */
 	/* allow only aligned access to maintenance registers */
@@ -480,7 +244,7 @@
 
 	out_be32(&priv->maint_atmu_regs->rowtar,
 		 (destid << 22) | (hopcount << 12) | (offset >> 12));
-	out_be32(&priv->maint_atmu_regs->rowtear,  (destid >> 10));
+	out_be32(&priv->maint_atmu_regs->rowtear, (destid >> 10));
 
 	data = (u8 *) priv->maint_win + (offset & (RIO_MAINT_WIN_SIZE - 1));
 	switch (len) {
@@ -500,590 +264,7 @@
 	return 0;
 }
 
-/**
- * fsl_add_outb_message - Add message to the MPC85xx outbound message queue
- * @mport: Master port with outbound message queue
- * @rdev: Target of outbound message
- * @mbox: Outbound mailbox
- * @buffer: Message to add to outbound queue
- * @len: Length of message
- *
- * Adds the @buffer message to the MPC85xx outbound message queue. Returns
- * %0 on success or %-EINVAL on failure.
- */
-static int
-fsl_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox,
-			void *buffer, size_t len)
-{
-	struct rio_priv *priv = mport->priv;
-	u32 omr;
-	struct rio_tx_desc *desc = (struct rio_tx_desc *)priv->msg_tx_ring.virt
-					+ priv->msg_tx_ring.tx_slot;
-	int ret = 0;
-
-	pr_debug("RIO: fsl_add_outb_message(): destid %4.4x mbox %d buffer " \
-		 "%8.8x len %8.8x\n", rdev->destid, mbox, (int)buffer, len);
-
-	if ((len < 8) || (len > RIO_MAX_MSG_SIZE)) {
-		ret = -EINVAL;
-		goto out;
-	}
-
-	/* Copy and clear rest of buffer */
-	memcpy(priv->msg_tx_ring.virt_buffer[priv->msg_tx_ring.tx_slot], buffer,
-			len);
-	if (len < (RIO_MAX_MSG_SIZE - 4))
-		memset(priv->msg_tx_ring.virt_buffer[priv->msg_tx_ring.tx_slot]
-				+ len, 0, RIO_MAX_MSG_SIZE - len);
-
-	switch (mport->phy_type) {
-	case RIO_PHY_PARALLEL:
-		/* Set mbox field for message */
-		desc->dport = mbox & 0x3;
-
-		/* Enable EOMI interrupt, set priority, and set destid */
-		desc->dattr = 0x28000000 | (rdev->destid << 2);
-		break;
-	case RIO_PHY_SERIAL:
-		/* Set mbox field for message, and set destid */
-		desc->dport = (rdev->destid << 16) | (mbox & 0x3);
-
-		/* Enable EOMI interrupt and priority */
-		desc->dattr = 0x28000000;
-		break;
-	}
-
-	/* Set transfer size aligned to next power of 2 (in double words) */
-	desc->dwcnt = is_power_of_2(len) ? len : 1 << get_bitmask_order(len);
-
-	/* Set snooping and source buffer address */
-	desc->saddr = 0x00000004
-		| priv->msg_tx_ring.phys_buffer[priv->msg_tx_ring.tx_slot];
-
-	/* Increment enqueue pointer */
-	omr = in_be32(&priv->msg_regs->omr);
-	out_be32(&priv->msg_regs->omr, omr | RIO_MSG_OMR_MUI);
-
-	/* Go to next descriptor */
-	if (++priv->msg_tx_ring.tx_slot == priv->msg_tx_ring.size)
-		priv->msg_tx_ring.tx_slot = 0;
-
-      out:
-	return ret;
-}
-
-/**
- * fsl_rio_tx_handler - MPC85xx outbound message interrupt handler
- * @irq: Linux interrupt number
- * @dev_instance: Pointer to interrupt-specific data
- *
- * Handles outbound message interrupts. Executes a register outbound
- * mailbox event handler and acks the interrupt occurrence.
- */
-static irqreturn_t
-fsl_rio_tx_handler(int irq, void *dev_instance)
-{
-	int osr;
-	struct rio_mport *port = (struct rio_mport *)dev_instance;
-	struct rio_priv *priv = port->priv;
-
-	osr = in_be32(&priv->msg_regs->osr);
-
-	if (osr & RIO_MSG_OSR_TE) {
-		pr_info("RIO: outbound message transmission error\n");
-		out_be32(&priv->msg_regs->osr, RIO_MSG_OSR_TE);
-		goto out;
-	}
-
-	if (osr & RIO_MSG_OSR_QOI) {
-		pr_info("RIO: outbound message queue overflow\n");
-		out_be32(&priv->msg_regs->osr, RIO_MSG_OSR_QOI);
-		goto out;
-	}
-
-	if (osr & RIO_MSG_OSR_EOMI) {
-		u32 dqp = in_be32(&priv->msg_regs->odqdpar);
-		int slot = (dqp - priv->msg_tx_ring.phys) >> 5;
-		port->outb_msg[0].mcback(port, priv->msg_tx_ring.dev_id, -1,
-				slot);
-
-		/* Ack the end-of-message interrupt */
-		out_be32(&priv->msg_regs->osr, RIO_MSG_OSR_EOMI);
-	}
-
-      out:
-	return IRQ_HANDLED;
-}
-
-/**
- * fsl_open_outb_mbox - Initialize MPC85xx outbound mailbox
- * @mport: Master port implementing the outbound message unit
- * @dev_id: Device specific pointer to pass on event
- * @mbox: Mailbox to open
- * @entries: Number of entries in the outbound mailbox ring
- *
- * Initializes buffer ring, request the outbound message interrupt,
- * and enables the outbound message unit. Returns %0 on success and
- * %-EINVAL or %-ENOMEM on failure.
- */
-static int
-fsl_open_outb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries)
-{
-	int i, j, rc = 0;
-	struct rio_priv *priv = mport->priv;
-
-	if ((entries < RIO_MIN_TX_RING_SIZE) ||
-	    (entries > RIO_MAX_TX_RING_SIZE) || (!is_power_of_2(entries))) {
-		rc = -EINVAL;
-		goto out;
-	}
-
-	/* Initialize shadow copy ring */
-	priv->msg_tx_ring.dev_id = dev_id;
-	priv->msg_tx_ring.size = entries;
-
-	for (i = 0; i < priv->msg_tx_ring.size; i++) {
-		priv->msg_tx_ring.virt_buffer[i] =
-			dma_alloc_coherent(priv->dev, RIO_MSG_BUFFER_SIZE,
-				&priv->msg_tx_ring.phys_buffer[i], GFP_KERNEL);
-		if (!priv->msg_tx_ring.virt_buffer[i]) {
-			rc = -ENOMEM;
-			for (j = 0; j < priv->msg_tx_ring.size; j++)
-				if (priv->msg_tx_ring.virt_buffer[j])
-					dma_free_coherent(priv->dev,
-							RIO_MSG_BUFFER_SIZE,
-							priv->msg_tx_ring.
-							virt_buffer[j],
-							priv->msg_tx_ring.
-							phys_buffer[j]);
-			goto out;
-		}
-	}
-
-	/* Initialize outbound message descriptor ring */
-	priv->msg_tx_ring.virt = dma_alloc_coherent(priv->dev,
-				priv->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
-				&priv->msg_tx_ring.phys, GFP_KERNEL);
-	if (!priv->msg_tx_ring.virt) {
-		rc = -ENOMEM;
-		goto out_dma;
-	}
-	memset(priv->msg_tx_ring.virt, 0,
-			priv->msg_tx_ring.size * RIO_MSG_DESC_SIZE);
-	priv->msg_tx_ring.tx_slot = 0;
-
-	/* Point dequeue/enqueue pointers at first entry in ring */
-	out_be32(&priv->msg_regs->odqdpar, priv->msg_tx_ring.phys);
-	out_be32(&priv->msg_regs->odqepar, priv->msg_tx_ring.phys);
-
-	/* Configure for snooping */
-	out_be32(&priv->msg_regs->osar, 0x00000004);
-
-	/* Clear interrupt status */
-	out_be32(&priv->msg_regs->osr, 0x000000b3);
-
-	/* Hook up outbound message handler */
-	rc = request_irq(IRQ_RIO_TX(mport), fsl_rio_tx_handler, 0,
-			 "msg_tx", (void *)mport);
-	if (rc < 0)
-		goto out_irq;
-
-	/*
-	 * Configure outbound message unit
-	 *      Snooping
-	 *      Interrupts (all enabled, except QEIE)
-	 *      Chaining mode
-	 *      Disable
-	 */
-	out_be32(&priv->msg_regs->omr, 0x00100220);
-
-	/* Set number of entries */
-	out_be32(&priv->msg_regs->omr,
-		 in_be32(&priv->msg_regs->omr) |
-		 ((get_bitmask_order(entries) - 2) << 12));
-
-	/* Now enable the unit */
-	out_be32(&priv->msg_regs->omr, in_be32(&priv->msg_regs->omr) | 0x1);
-
-      out:
-	return rc;
-
-      out_irq:
-	dma_free_coherent(priv->dev,
-			  priv->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
-			  priv->msg_tx_ring.virt, priv->msg_tx_ring.phys);
-
-      out_dma:
-	for (i = 0; i < priv->msg_tx_ring.size; i++)
-		dma_free_coherent(priv->dev, RIO_MSG_BUFFER_SIZE,
-				  priv->msg_tx_ring.virt_buffer[i],
-				  priv->msg_tx_ring.phys_buffer[i]);
-
-	return rc;
-}
-
-/**
- * fsl_close_outb_mbox - Shut down MPC85xx outbound mailbox
- * @mport: Master port implementing the outbound message unit
- * @mbox: Mailbox to close
- *
- * Disables the outbound message unit, free all buffers, and
- * frees the outbound message interrupt.
- */
-static void fsl_close_outb_mbox(struct rio_mport *mport, int mbox)
-{
-	struct rio_priv *priv = mport->priv;
-	/* Disable inbound message unit */
-	out_be32(&priv->msg_regs->omr, 0);
-
-	/* Free ring */
-	dma_free_coherent(priv->dev,
-			  priv->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
-			  priv->msg_tx_ring.virt, priv->msg_tx_ring.phys);
-
-	/* Free interrupt */
-	free_irq(IRQ_RIO_TX(mport), (void *)mport);
-}
-
-/**
- * fsl_rio_rx_handler - MPC85xx inbound message interrupt handler
- * @irq: Linux interrupt number
- * @dev_instance: Pointer to interrupt-specific data
- *
- * Handles inbound message interrupts. Executes a registered inbound
- * mailbox event handler and acks the interrupt occurrence.
- */
-static irqreturn_t
-fsl_rio_rx_handler(int irq, void *dev_instance)
-{
-	int isr;
-	struct rio_mport *port = (struct rio_mport *)dev_instance;
-	struct rio_priv *priv = port->priv;
-
-	isr = in_be32(&priv->msg_regs->isr);
-
-	if (isr & RIO_MSG_ISR_TE) {
-		pr_info("RIO: inbound message reception error\n");
-		out_be32((void *)&priv->msg_regs->isr, RIO_MSG_ISR_TE);
-		goto out;
-	}
-
-	/* XXX Need to check/dispatch until queue empty */
-	if (isr & RIO_MSG_ISR_DIQI) {
-		/*
-		 * We implement *only* mailbox 0, but can receive messages
-		 * for any mailbox/letter to that mailbox destination. So,
-		 * make the callback with an unknown/invalid mailbox number
-		 * argument.
-		 */
-		port->inb_msg[0].mcback(port, priv->msg_rx_ring.dev_id, -1, -1);
-
-		/* Ack the queueing interrupt */
-		out_be32(&priv->msg_regs->isr, RIO_MSG_ISR_DIQI);
-	}
-
-      out:
-	return IRQ_HANDLED;
-}
-
-/**
- * fsl_open_inb_mbox - Initialize MPC85xx inbound mailbox
- * @mport: Master port implementing the inbound message unit
- * @dev_id: Device specific pointer to pass on event
- * @mbox: Mailbox to open
- * @entries: Number of entries in the inbound mailbox ring
- *
- * Initializes buffer ring, request the inbound message interrupt,
- * and enables the inbound message unit. Returns %0 on success
- * and %-EINVAL or %-ENOMEM on failure.
- */
-static int
-fsl_open_inb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries)
-{
-	int i, rc = 0;
-	struct rio_priv *priv = mport->priv;
-
-	if ((entries < RIO_MIN_RX_RING_SIZE) ||
-	    (entries > RIO_MAX_RX_RING_SIZE) || (!is_power_of_2(entries))) {
-		rc = -EINVAL;
-		goto out;
-	}
-
-	/* Initialize client buffer ring */
-	priv->msg_rx_ring.dev_id = dev_id;
-	priv->msg_rx_ring.size = entries;
-	priv->msg_rx_ring.rx_slot = 0;
-	for (i = 0; i < priv->msg_rx_ring.size; i++)
-		priv->msg_rx_ring.virt_buffer[i] = NULL;
-
-	/* Initialize inbound message ring */
-	priv->msg_rx_ring.virt = dma_alloc_coherent(priv->dev,
-				priv->msg_rx_ring.size * RIO_MAX_MSG_SIZE,
-				&priv->msg_rx_ring.phys, GFP_KERNEL);
-	if (!priv->msg_rx_ring.virt) {
-		rc = -ENOMEM;
-		goto out;
-	}
-
-	/* Point dequeue/enqueue pointers at first entry in ring */
-	out_be32(&priv->msg_regs->ifqdpar, (u32) priv->msg_rx_ring.phys);
-	out_be32(&priv->msg_regs->ifqepar, (u32) priv->msg_rx_ring.phys);
-
-	/* Clear interrupt status */
-	out_be32(&priv->msg_regs->isr, 0x00000091);
-
-	/* Hook up inbound message handler */
-	rc = request_irq(IRQ_RIO_RX(mport), fsl_rio_rx_handler, 0,
-			 "msg_rx", (void *)mport);
-	if (rc < 0) {
-		dma_free_coherent(priv->dev, RIO_MSG_BUFFER_SIZE,
-				  priv->msg_tx_ring.virt_buffer[i],
-				  priv->msg_tx_ring.phys_buffer[i]);
-		goto out;
-	}
-
-	/*
-	 * Configure inbound message unit:
-	 *      Snooping
-	 *      4KB max message size
-	 *      Unmask all interrupt sources
-	 *      Disable
-	 */
-	out_be32(&priv->msg_regs->imr, 0x001b0060);
-
-	/* Set number of queue entries */
-	setbits32(&priv->msg_regs->imr, (get_bitmask_order(entries) - 2) << 12);
-
-	/* Now enable the unit */
-	setbits32(&priv->msg_regs->imr, 0x1);
-
-      out:
-	return rc;
-}
-
-/**
- * fsl_close_inb_mbox - Shut down MPC85xx inbound mailbox
- * @mport: Master port implementing the inbound message unit
- * @mbox: Mailbox to close
- *
- * Disables the inbound message unit, free all buffers, and
- * frees the inbound message interrupt.
- */
-static void fsl_close_inb_mbox(struct rio_mport *mport, int mbox)
-{
-	struct rio_priv *priv = mport->priv;
-	/* Disable inbound message unit */
-	out_be32(&priv->msg_regs->imr, 0);
-
-	/* Free ring */
-	dma_free_coherent(priv->dev, priv->msg_rx_ring.size * RIO_MAX_MSG_SIZE,
-			  priv->msg_rx_ring.virt, priv->msg_rx_ring.phys);
-
-	/* Free interrupt */
-	free_irq(IRQ_RIO_RX(mport), (void *)mport);
-}
-
-/**
- * fsl_add_inb_buffer - Add buffer to the MPC85xx inbound message queue
- * @mport: Master port implementing the inbound message unit
- * @mbox: Inbound mailbox number
- * @buf: Buffer to add to inbound queue
- *
- * Adds the @buf buffer to the MPC85xx inbound message queue. Returns
- * %0 on success or %-EINVAL on failure.
- */
-static int fsl_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf)
-{
-	int rc = 0;
-	struct rio_priv *priv = mport->priv;
-
-	pr_debug("RIO: fsl_add_inb_buffer(), msg_rx_ring.rx_slot %d\n",
-		 priv->msg_rx_ring.rx_slot);
-
-	if (priv->msg_rx_ring.virt_buffer[priv->msg_rx_ring.rx_slot]) {
-		printk(KERN_ERR
-		       "RIO: error adding inbound buffer %d, buffer exists\n",
-		       priv->msg_rx_ring.rx_slot);
-		rc = -EINVAL;
-		goto out;
-	}
-
-	priv->msg_rx_ring.virt_buffer[priv->msg_rx_ring.rx_slot] = buf;
-	if (++priv->msg_rx_ring.rx_slot == priv->msg_rx_ring.size)
-		priv->msg_rx_ring.rx_slot = 0;
-
-      out:
-	return rc;
-}
-
-/**
- * fsl_get_inb_message - Fetch inbound message from the MPC85xx message unit
- * @mport: Master port implementing the inbound message unit
- * @mbox: Inbound mailbox number
- *
- * Gets the next available inbound message from the inbound message queue.
- * A pointer to the message is returned on success or NULL on failure.
- */
-static void *fsl_get_inb_message(struct rio_mport *mport, int mbox)
-{
-	struct rio_priv *priv = mport->priv;
-	u32 phys_buf, virt_buf;
-	void *buf = NULL;
-	int buf_idx;
-
-	phys_buf = in_be32(&priv->msg_regs->ifqdpar);
-
-	/* If no more messages, then bail out */
-	if (phys_buf == in_be32(&priv->msg_regs->ifqepar))
-		goto out2;
-
-	virt_buf = (u32) priv->msg_rx_ring.virt + (phys_buf
-						- priv->msg_rx_ring.phys);
-	buf_idx = (phys_buf - priv->msg_rx_ring.phys) / RIO_MAX_MSG_SIZE;
-	buf = priv->msg_rx_ring.virt_buffer[buf_idx];
-
-	if (!buf) {
-		printk(KERN_ERR
-		       "RIO: inbound message copy failed, no buffers\n");
-		goto out1;
-	}
-
-	/* Copy max message size, caller is expected to allocate that big */
-	memcpy(buf, (void *)virt_buf, RIO_MAX_MSG_SIZE);
-
-	/* Clear the available buffer */
-	priv->msg_rx_ring.virt_buffer[buf_idx] = NULL;
-
-      out1:
-	setbits32(&priv->msg_regs->imr, RIO_MSG_IMR_MI);
-
-      out2:
-	return buf;
-}
-
-/**
- * fsl_rio_dbell_handler - MPC85xx doorbell interrupt handler
- * @irq: Linux interrupt number
- * @dev_instance: Pointer to interrupt-specific data
- *
- * Handles doorbell interrupts. Parses a list of registered
- * doorbell event handlers and executes a matching event handler.
- */
-static irqreturn_t
-fsl_rio_dbell_handler(int irq, void *dev_instance)
-{
-	int dsr;
-	struct rio_mport *port = (struct rio_mport *)dev_instance;
-	struct rio_priv *priv = port->priv;
-
-	dsr = in_be32(&priv->msg_regs->dsr);
-
-	if (dsr & DOORBELL_DSR_TE) {
-		pr_info("RIO: doorbell reception error\n");
-		out_be32(&priv->msg_regs->dsr, DOORBELL_DSR_TE);
-		goto out;
-	}
-
-	if (dsr & DOORBELL_DSR_QFI) {
-		pr_info("RIO: doorbell queue full\n");
-		out_be32(&priv->msg_regs->dsr, DOORBELL_DSR_QFI);
-	}
-
-	/* XXX Need to check/dispatch until queue empty */
-	if (dsr & DOORBELL_DSR_DIQI) {
-		u32 dmsg =
-		    (u32) priv->dbell_ring.virt +
-		    (in_be32(&priv->msg_regs->dqdpar) & 0xfff);
-		struct rio_dbell *dbell;
-		int found = 0;
-
-		pr_debug
-		    ("RIO: processing doorbell, sid %2.2x tid %2.2x info %4.4x\n",
-		     DBELL_SID(dmsg), DBELL_TID(dmsg), DBELL_INF(dmsg));
-
-		list_for_each_entry(dbell, &port->dbells, node) {
-			if ((dbell->res->start <= DBELL_INF(dmsg)) &&
-			    (dbell->res->end >= DBELL_INF(dmsg))) {
-				found = 1;
-				break;
-			}
-		}
-		if (found) {
-			dbell->dinb(port, dbell->dev_id, DBELL_SID(dmsg), DBELL_TID(dmsg),
-				    DBELL_INF(dmsg));
-		} else {
-			pr_debug
-			    ("RIO: spurious doorbell, sid %2.2x tid %2.2x info %4.4x\n",
-			     DBELL_SID(dmsg), DBELL_TID(dmsg), DBELL_INF(dmsg));
-		}
-		setbits32(&priv->msg_regs->dmr, DOORBELL_DMR_DI);
-		out_be32(&priv->msg_regs->dsr, DOORBELL_DSR_DIQI);
-	}
-
-      out:
-	return IRQ_HANDLED;
-}
-
-/**
- * fsl_rio_doorbell_init - MPC85xx doorbell interface init
- * @mport: Master port implementing the inbound doorbell unit
- *
- * Initializes doorbell unit hardware and inbound DMA buffer
- * ring. Called from fsl_rio_setup(). Returns %0 on success
- * or %-ENOMEM on failure.
- */
-static int fsl_rio_doorbell_init(struct rio_mport *mport)
-{
-	struct rio_priv *priv = mport->priv;
-	int rc = 0;
-
-	/* Map outbound doorbell window immediately after maintenance window */
-	priv->dbell_win = ioremap(mport->iores.start + RIO_MAINT_WIN_SIZE,
-			    RIO_DBELL_WIN_SIZE);
-	if (!priv->dbell_win) {
-		printk(KERN_ERR
-		       "RIO: unable to map outbound doorbell window\n");
-		rc = -ENOMEM;
-		goto out;
-	}
-
-	/* Initialize inbound doorbells */
-	priv->dbell_ring.virt = dma_alloc_coherent(priv->dev, 512 *
-		    DOORBELL_MESSAGE_SIZE, &priv->dbell_ring.phys, GFP_KERNEL);
-	if (!priv->dbell_ring.virt) {
-		printk(KERN_ERR "RIO: unable allocate inbound doorbell ring\n");
-		rc = -ENOMEM;
-		iounmap(priv->dbell_win);
-		goto out;
-	}
-
-	/* Point dequeue/enqueue pointers at first entry in ring */
-	out_be32(&priv->msg_regs->dqdpar, (u32) priv->dbell_ring.phys);
-	out_be32(&priv->msg_regs->dqepar, (u32) priv->dbell_ring.phys);
-
-	/* Clear interrupt status */
-	out_be32(&priv->msg_regs->dsr, 0x00000091);
-
-	/* Hook up doorbell handler */
-	rc = request_irq(IRQ_RIO_BELL(mport), fsl_rio_dbell_handler, 0,
-			 "dbell_rx", (void *)mport);
-	if (rc < 0) {
-		iounmap(priv->dbell_win);
-		dma_free_coherent(priv->dev, 512 * DOORBELL_MESSAGE_SIZE,
-				  priv->dbell_ring.virt, priv->dbell_ring.phys);
-		printk(KERN_ERR
-		       "MPC85xx RIO: unable to request inbound doorbell irq");
-		goto out;
-	}
-
-	/* Configure doorbells for snooping, 512 entries, and enable */
-	out_be32(&priv->msg_regs->dmr, 0x00108161);
-
-      out:
-	return rc;
-}
-
-static void port_error_handler(struct rio_mport *port, int offset)
+void fsl_rio_port_error_handler(int offset)
 {
 	/*XXX: Error recovery is not implemented, we just clear errors */
 	out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), 0);
@@ -1098,263 +279,6 @@
 		out_be32((u32 *)(rio_regs_win + RIO_PORT2_ESCSR), ESCSR_CLEAR);
 	}
 }
-
-static void msg_unit_error_handler(struct rio_mport *port)
-{
-	struct rio_priv *priv = port->priv;
-
-	/*XXX: Error recovery is not implemented, we just clear errors */
-	out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), 0);
-
-	out_be32((u32 *)(rio_regs_win + RIO_IM0SR), IMSR_CLEAR);
-	out_be32((u32 *)(rio_regs_win + RIO_IM1SR), IMSR_CLEAR);
-	out_be32((u32 *)(rio_regs_win + RIO_OM0SR), OMSR_CLEAR);
-	out_be32((u32 *)(rio_regs_win + RIO_OM1SR), OMSR_CLEAR);
-
-	out_be32(&priv->msg_regs->odsr, ODSR_CLEAR);
-	out_be32(&priv->msg_regs->dsr, IDSR_CLEAR);
-
-	out_be32(&priv->msg_regs->pwsr, IPWSR_CLEAR);
-}
-
-/**
- * fsl_rio_port_write_handler - MPC85xx port write interrupt handler
- * @irq: Linux interrupt number
- * @dev_instance: Pointer to interrupt-specific data
- *
- * Handles port write interrupts. Parses a list of registered
- * port write event handlers and executes a matching event handler.
- */
-static irqreturn_t
-fsl_rio_port_write_handler(int irq, void *dev_instance)
-{
-	u32 ipwmr, ipwsr;
-	struct rio_mport *port = (struct rio_mport *)dev_instance;
-	struct rio_priv *priv = port->priv;
-	u32 epwisr, tmp;
-
-	epwisr = in_be32(priv->regs_win + RIO_EPWISR);
-	if (!(epwisr & RIO_EPWISR_PW))
-		goto pw_done;
-
-	ipwmr = in_be32(&priv->msg_regs->pwmr);
-	ipwsr = in_be32(&priv->msg_regs->pwsr);
-
-#ifdef DEBUG_PW
-	pr_debug("PW Int->IPWMR: 0x%08x IPWSR: 0x%08x (", ipwmr, ipwsr);
-	if (ipwsr & RIO_IPWSR_QF)
-		pr_debug(" QF");
-	if (ipwsr & RIO_IPWSR_TE)
-		pr_debug(" TE");
-	if (ipwsr & RIO_IPWSR_QFI)
-		pr_debug(" QFI");
-	if (ipwsr & RIO_IPWSR_PWD)
-		pr_debug(" PWD");
-	if (ipwsr & RIO_IPWSR_PWB)
-		pr_debug(" PWB");
-	pr_debug(" )\n");
-#endif
-	/* Schedule deferred processing if PW was received */
-	if (ipwsr & RIO_IPWSR_QFI) {
-		/* Save PW message (if there is room in FIFO),
-		 * otherwise discard it.
-		 */
-		if (kfifo_avail(&priv->pw_fifo) >= RIO_PW_MSG_SIZE) {
-			priv->port_write_msg.msg_count++;
-			kfifo_in(&priv->pw_fifo, priv->port_write_msg.virt,
-				 RIO_PW_MSG_SIZE);
-		} else {
-			priv->port_write_msg.discard_count++;
-			pr_debug("RIO: ISR Discarded Port-Write Msg(s) (%d)\n",
-				 priv->port_write_msg.discard_count);
-		}
-		/* Clear interrupt and issue Clear Queue command. This allows
-		 * another port-write to be received.
-		 */
-		out_be32(&priv->msg_regs->pwsr,	RIO_IPWSR_QFI);
-		out_be32(&priv->msg_regs->pwmr, ipwmr | RIO_IPWMR_CQ);
-
-		schedule_work(&priv->pw_work);
-	}
-
-	if ((ipwmr & RIO_IPWMR_EIE) && (ipwsr & RIO_IPWSR_TE)) {
-		priv->port_write_msg.err_count++;
-		pr_debug("RIO: Port-Write Transaction Err (%d)\n",
-			 priv->port_write_msg.err_count);
-		/* Clear Transaction Error: port-write controller should be
-		 * disabled when clearing this error
-		 */
-		out_be32(&priv->msg_regs->pwmr, ipwmr & ~RIO_IPWMR_PWE);
-		out_be32(&priv->msg_regs->pwsr,	RIO_IPWSR_TE);
-		out_be32(&priv->msg_regs->pwmr, ipwmr);
-	}
-
-	if (ipwsr & RIO_IPWSR_PWD) {
-		priv->port_write_msg.discard_count++;
-		pr_debug("RIO: Port Discarded Port-Write Msg(s) (%d)\n",
-			 priv->port_write_msg.discard_count);
-		out_be32(&priv->msg_regs->pwsr, RIO_IPWSR_PWD);
-	}
-
-pw_done:
-	if (epwisr & RIO_EPWISR_PINT1) {
-		tmp = in_be32(priv->regs_win + RIO_LTLEDCSR);
-		pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
-		port_error_handler(port, 0);
-	}
-
-	if (epwisr & RIO_EPWISR_PINT2) {
-		tmp = in_be32(priv->regs_win + RIO_LTLEDCSR);
-		pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
-		port_error_handler(port, 1);
-	}
-
-	if (epwisr & RIO_EPWISR_MU) {
-		tmp = in_be32(priv->regs_win + RIO_LTLEDCSR);
-		pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
-		msg_unit_error_handler(port);
-	}
-
-	return IRQ_HANDLED;
-}
-
-static void fsl_pw_dpc(struct work_struct *work)
-{
-	struct rio_priv *priv = container_of(work, struct rio_priv, pw_work);
-	unsigned long flags;
-	u32 msg_buffer[RIO_PW_MSG_SIZE/sizeof(u32)];
-
-	/*
-	 * Process port-write messages
-	 */
-	spin_lock_irqsave(&priv->pw_fifo_lock, flags);
-	while (kfifo_out(&priv->pw_fifo, (unsigned char *)msg_buffer,
-			 RIO_PW_MSG_SIZE)) {
-		/* Process one message */
-		spin_unlock_irqrestore(&priv->pw_fifo_lock, flags);
-#ifdef DEBUG_PW
-		{
-		u32 i;
-		pr_debug("%s : Port-Write Message:", __func__);
-		for (i = 0; i < RIO_PW_MSG_SIZE/sizeof(u32); i++) {
-			if ((i%4) == 0)
-				pr_debug("\n0x%02x: 0x%08x", i*4,
-					 msg_buffer[i]);
-			else
-				pr_debug(" 0x%08x", msg_buffer[i]);
-		}
-		pr_debug("\n");
-		}
-#endif
-		/* Pass the port-write message to RIO core for processing */
-		rio_inb_pwrite_handler((union rio_pw_msg *)msg_buffer);
-		spin_lock_irqsave(&priv->pw_fifo_lock, flags);
-	}
-	spin_unlock_irqrestore(&priv->pw_fifo_lock, flags);
-}
-
-/**
- * fsl_rio_pw_enable - enable/disable port-write interface init
- * @mport: Master port implementing the port write unit
- * @enable:    1=enable; 0=disable port-write message handling
- */
-static int fsl_rio_pw_enable(struct rio_mport *mport, int enable)
-{
-	struct rio_priv *priv = mport->priv;
-	u32 rval;
-
-	rval = in_be32(&priv->msg_regs->pwmr);
-
-	if (enable)
-		rval |= RIO_IPWMR_PWE;
-	else
-		rval &= ~RIO_IPWMR_PWE;
-
-	out_be32(&priv->msg_regs->pwmr, rval);
-
-	return 0;
-}
-
-/**
- * fsl_rio_port_write_init - MPC85xx port write interface init
- * @mport: Master port implementing the port write unit
- *
- * Initializes port write unit hardware and DMA buffer
- * ring. Called from fsl_rio_setup(). Returns %0 on success
- * or %-ENOMEM on failure.
- */
-static int fsl_rio_port_write_init(struct rio_mport *mport)
-{
-	struct rio_priv *priv = mport->priv;
-	int rc = 0;
-
-	/* Following configurations require a disabled port write controller */
-	out_be32(&priv->msg_regs->pwmr,
-		 in_be32(&priv->msg_regs->pwmr) & ~RIO_IPWMR_PWE);
-
-	/* Initialize port write */
-	priv->port_write_msg.virt = dma_alloc_coherent(priv->dev,
-					RIO_PW_MSG_SIZE,
-					&priv->port_write_msg.phys, GFP_KERNEL);
-	if (!priv->port_write_msg.virt) {
-		pr_err("RIO: unable allocate port write queue\n");
-		return -ENOMEM;
-	}
-
-	priv->port_write_msg.err_count = 0;
-	priv->port_write_msg.discard_count = 0;
-
-	/* Point dequeue/enqueue pointers at first entry */
-	out_be32(&priv->msg_regs->epwqbar, 0);
-	out_be32(&priv->msg_regs->pwqbar, (u32) priv->port_write_msg.phys);
-
-	pr_debug("EIPWQBAR: 0x%08x IPWQBAR: 0x%08x\n",
-		 in_be32(&priv->msg_regs->epwqbar),
-		 in_be32(&priv->msg_regs->pwqbar));
-
-	/* Clear interrupt status IPWSR */
-	out_be32(&priv->msg_regs->pwsr,
-		 (RIO_IPWSR_TE | RIO_IPWSR_QFI | RIO_IPWSR_PWD));
-
-	/* Configure port write contoller for snooping enable all reporting,
-	   clear queue full */
-	out_be32(&priv->msg_regs->pwmr,
-		 RIO_IPWMR_SEN | RIO_IPWMR_QFIE | RIO_IPWMR_EIE | RIO_IPWMR_CQ);
-
-
-	/* Hook up port-write handler */
-	rc = request_irq(IRQ_RIO_PW(mport), fsl_rio_port_write_handler,
-			IRQF_SHARED, "port-write", (void *)mport);
-	if (rc < 0) {
-		pr_err("MPC85xx RIO: unable to request inbound doorbell irq");
-		goto err_out;
-	}
-	/* Enable Error Interrupt */
-	out_be32((u32 *)(rio_regs_win + RIO_LTLEECSR), LTLEECSR_ENABLE_ALL);
-
-	INIT_WORK(&priv->pw_work, fsl_pw_dpc);
-	spin_lock_init(&priv->pw_fifo_lock);
-	if (kfifo_alloc(&priv->pw_fifo, RIO_PW_MSG_SIZE * 32, GFP_KERNEL)) {
-		pr_err("FIFO allocation failed\n");
-		rc = -ENOMEM;
-		goto err_out_irq;
-	}
-
-	pr_debug("IPWMR: 0x%08x IPWSR: 0x%08x\n",
-		 in_be32(&priv->msg_regs->pwmr),
-		 in_be32(&priv->msg_regs->pwsr));
-
-	return rc;
-
-err_out_irq:
-	free_irq(IRQ_RIO_PW(mport), (void *)mport);
-err_out:
-	dma_free_coherent(priv->dev, RIO_PW_MSG_SIZE,
-			  priv->port_write_msg.virt,
-			  priv->port_write_msg.phys);
-	return rc;
-}
-
 static inline void fsl_rio_info(struct device *dev, u32 ccsr)
 {
 	const char *str;
@@ -1411,16 +335,21 @@
 	struct rio_mport *port;
 	struct rio_priv *priv;
 	int rc = 0;
-	const u32 *dt_range, *cell;
-	struct resource regs;
+	const u32 *dt_range, *cell, *port_index;
+	u32 active_ports = 0;
+	struct resource regs, rmu_regs;
+	struct device_node *np, *rmu_node;
 	int rlen;
 	u32 ccsr;
-	u64 law_start, law_size;
+	u64 range_start, range_size;
 	int paw, aw, sw;
+	u32 i;
+	static int tmp;
+	struct device_node *rmu_np[MAX_MSG_UNIT_NUM] = {NULL};
 
 	if (!dev->dev.of_node) {
 		dev_err(&dev->dev, "Device OF-Node is NULL");
-		return -EFAULT;
+		return -ENODEV;
 	}
 
 	rc = of_address_to_resource(dev->dev.of_node, 0, &regs);
@@ -1429,37 +358,17 @@
 				dev->dev.of_node->full_name);
 		return -EFAULT;
 	}
-	dev_info(&dev->dev, "Of-device full name %s\n", dev->dev.of_node->full_name);
+	dev_info(&dev->dev, "Of-device full name %s\n",
+			dev->dev.of_node->full_name);
 	dev_info(&dev->dev, "Regs: %pR\n", &regs);
 
-	dt_range = of_get_property(dev->dev.of_node, "ranges", &rlen);
-	if (!dt_range) {
-		dev_err(&dev->dev, "Can't get %s property 'ranges'\n",
-				dev->dev.of_node->full_name);
-		return -EFAULT;
+	rio_regs_win = ioremap(regs.start, resource_size(&regs));
+	if (!rio_regs_win) {
+		dev_err(&dev->dev, "Unable to map rio register window\n");
+		rc = -ENOMEM;
+		goto err_rio_regs;
 	}
 
-	/* Get node address wide */
-	cell = of_get_property(dev->dev.of_node, "#address-cells", NULL);
-	if (cell)
-		aw = *cell;
-	else
-		aw = of_n_addr_cells(dev->dev.of_node);
-	/* Get node size wide */
-	cell = of_get_property(dev->dev.of_node, "#size-cells", NULL);
-	if (cell)
-		sw = *cell;
-	else
-		sw = of_n_size_cells(dev->dev.of_node);
-	/* Get parent address wide wide */
-	paw = of_n_addr_cells(dev->dev.of_node);
-
-	law_start = of_read_number(dt_range + aw, paw);
-	law_size = of_read_number(dt_range + aw + paw, sw);
-
-	dev_info(&dev->dev, "LAW start 0x%016llx, size 0x%016llx.\n",
-			law_start, law_size);
-
 	ops = kzalloc(sizeof(struct rio_ops), GFP_KERNEL);
 	if (!ops) {
 		rc = -ENOMEM;
@@ -1479,143 +388,257 @@
 	ops->add_inb_buffer = fsl_add_inb_buffer;
 	ops->get_inb_message = fsl_get_inb_message;
 
-	port = kzalloc(sizeof(struct rio_mport), GFP_KERNEL);
-	if (!port) {
+	rmu_node = of_parse_phandle(dev->dev.of_node, "fsl,srio-rmu-handle", 0);
+	if (!rmu_node)
+		goto err_rmu;
+	rc = of_address_to_resource(rmu_node, 0, &rmu_regs);
+	if (rc) {
+		dev_err(&dev->dev, "Can't get %s property 'reg'\n",
+				rmu_node->full_name);
+		goto err_rmu;
+	}
+	rmu_regs_win = ioremap(rmu_regs.start, resource_size(&rmu_regs));
+	if (!rmu_regs_win) {
+		dev_err(&dev->dev, "Unable to map rmu register window\n");
 		rc = -ENOMEM;
-		goto err_port;
+		goto err_rmu;
 	}
-	port->index = 0;
+	for_each_compatible_node(np, NULL, "fsl,srio-msg-unit") {
+		rmu_np[tmp] = np;
+		tmp++;
+	}
 
-	priv = kzalloc(sizeof(struct rio_priv), GFP_KERNEL);
-	if (!priv) {
-		printk(KERN_ERR "Can't alloc memory for 'priv'\n");
+	/*set up doobell node*/
+	np = of_find_compatible_node(NULL, NULL, "fsl,srio-dbell-unit");
+	if (!np) {
+		rc = -ENODEV;
+		goto err_dbell;
+	}
+	dbell = kzalloc(sizeof(struct fsl_rio_dbell), GFP_KERNEL);
+	if (!(dbell)) {
+		dev_err(&dev->dev, "Can't alloc memory for 'fsl_rio_dbell'\n");
 		rc = -ENOMEM;
-		goto err_priv;
+		goto err_dbell;
 	}
+	dbell->dev = &dev->dev;
+	dbell->bellirq = irq_of_parse_and_map(np, 1);
+	dev_info(&dev->dev, "bellirq: %d\n", dbell->bellirq);
 
-	INIT_LIST_HEAD(&port->dbells);
-	port->iores.start = law_start;
-	port->iores.end = law_start + law_size - 1;
-	port->iores.flags = IORESOURCE_MEM;
-	port->iores.name = "rio_io_win";
-
-	if (request_resource(&iomem_resource, &port->iores) < 0) {
-		dev_err(&dev->dev, "RIO: Error requesting master port region"
-			" 0x%016llx-0x%016llx\n",
-			(u64)port->iores.start, (u64)port->iores.end);
-			rc = -ENOMEM;
-			goto err_res;
+	aw = of_n_addr_cells(np);
+	dt_range = of_get_property(np, "reg", &rlen);
+	if (!dt_range) {
+		pr_err("%s: unable to find 'reg' property\n",
+			np->full_name);
+		rc = -ENOMEM;
+		goto err_pw;
 	}
+	range_start = of_read_number(dt_range, aw);
+	dbell->dbell_regs = (struct rio_dbell_regs *)(rmu_regs_win +
+				(u32)range_start);
 
-	priv->pwirq   = irq_of_parse_and_map(dev->dev.of_node, 0);
-	priv->bellirq = irq_of_parse_and_map(dev->dev.of_node, 2);
-	priv->txirq = irq_of_parse_and_map(dev->dev.of_node, 3);
-	priv->rxirq = irq_of_parse_and_map(dev->dev.of_node, 4);
-	dev_info(&dev->dev, "pwirq: %d, bellirq: %d, txirq: %d, rxirq %d\n",
-		 priv->pwirq, priv->bellirq, priv->txirq, priv->rxirq);
-
-	rio_init_dbell_res(&port->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff);
-	rio_init_mbox_res(&port->riores[RIO_INB_MBOX_RESOURCE], 0, 0);
-	rio_init_mbox_res(&port->riores[RIO_OUTB_MBOX_RESOURCE], 0, 0);
-	strcpy(port->name, "RIO0 mport");
-
-	priv->dev = &dev->dev;
-
-	port->ops = ops;
-	port->priv = priv;
-	port->phys_efptr = 0x100;
-
-	priv->regs_win = ioremap(regs.start, resource_size(&regs));
-	rio_regs_win = priv->regs_win;
-
-	/* Probe the master port phy type */
-	ccsr = in_be32(priv->regs_win + RIO_CCSR);
-	port->phy_type = (ccsr & 1) ? RIO_PHY_SERIAL : RIO_PHY_PARALLEL;
-	dev_info(&dev->dev, "RapidIO PHY type: %s\n",
-			(port->phy_type == RIO_PHY_PARALLEL) ? "parallel" :
-			((port->phy_type == RIO_PHY_SERIAL) ? "serial" :
-			 "unknown"));
-	/* Checking the port training status */
-	if (in_be32((priv->regs_win + RIO_ESCSR)) & 1) {
-		dev_err(&dev->dev, "Port is not ready. "
-				   "Try to restart connection...\n");
-		switch (port->phy_type) {
-		case RIO_PHY_SERIAL:
-			/* Disable ports */
-			out_be32(priv->regs_win + RIO_CCSR, 0);
-			/* Set 1x lane */
-			setbits32(priv->regs_win + RIO_CCSR, 0x02000000);
-			/* Enable ports */
-			setbits32(priv->regs_win + RIO_CCSR, 0x00600000);
-			break;
-		case RIO_PHY_PARALLEL:
-			/* Disable ports */
-			out_be32(priv->regs_win + RIO_CCSR, 0x22000000);
-			/* Enable ports */
-			out_be32(priv->regs_win + RIO_CCSR, 0x44000000);
-			break;
-		}
-		msleep(100);
-		if (in_be32((priv->regs_win + RIO_ESCSR)) & 1) {
-			dev_err(&dev->dev, "Port restart failed.\n");
-			rc = -ENOLINK;
-			goto err;
-		}
-		dev_info(&dev->dev, "Port restart success!\n");
+	/*set up port write node*/
+	np = of_find_compatible_node(NULL, NULL, "fsl,srio-port-write-unit");
+	if (!np) {
+		rc = -ENODEV;
+		goto err_pw;
 	}
-	fsl_rio_info(&dev->dev, ccsr);
-
-	port->sys_size = (in_be32((priv->regs_win + RIO_PEF_CAR))
-					& RIO_PEF_CTLS) >> 4;
-	dev_info(&dev->dev, "RapidIO Common Transport System size: %d\n",
-			port->sys_size ? 65536 : 256);
-
-	if (rio_register_mport(port))
+	pw = kzalloc(sizeof(struct fsl_rio_pw), GFP_KERNEL);
+	if (!(pw)) {
+		dev_err(&dev->dev, "Can't alloc memory for 'fsl_rio_pw'\n");
+		rc = -ENOMEM;
+		goto err_pw;
+	}
+	pw->dev = &dev->dev;
+	pw->pwirq = irq_of_parse_and_map(np, 0);
+	dev_info(&dev->dev, "pwirq: %d\n", pw->pwirq);
+	aw = of_n_addr_cells(np);
+	dt_range = of_get_property(np, "reg", &rlen);
+	if (!dt_range) {
+		pr_err("%s: unable to find 'reg' property\n",
+			np->full_name);
+		rc = -ENOMEM;
 		goto err;
+	}
+	range_start = of_read_number(dt_range, aw);
+	pw->pw_regs = (struct rio_pw_regs *)(rmu_regs_win + (u32)range_start);
 
-	if (port->host_deviceid >= 0)
-		out_be32(priv->regs_win + RIO_GCCSR, RIO_PORT_GEN_HOST |
-			RIO_PORT_GEN_MASTER | RIO_PORT_GEN_DISCOVERED);
-	else
-		out_be32(priv->regs_win + RIO_GCCSR, 0x00000000);
+	/*set up ports node*/
+	for_each_child_of_node(dev->dev.of_node, np) {
+		port_index = of_get_property(np, "cell-index", NULL);
+		if (!port_index) {
+			dev_err(&dev->dev, "Can't get %s property 'cell-index'\n",
+					np->full_name);
+			continue;
+		}
 
-	priv->atmu_regs = (struct rio_atmu_regs *)(priv->regs_win
-					+ RIO_ATMU_REGS_OFFSET);
-	priv->maint_atmu_regs = priv->atmu_regs + 1;
-	priv->dbell_atmu_regs = priv->atmu_regs + 2;
-	priv->msg_regs = (struct rio_msg_regs *)(priv->regs_win +
-				((port->phy_type == RIO_PHY_SERIAL) ?
-				RIO_S_MSG_REGS_OFFSET : RIO_P_MSG_REGS_OFFSET));
+		dt_range = of_get_property(np, "ranges", &rlen);
+		if (!dt_range) {
+			dev_err(&dev->dev, "Can't get %s property 'ranges'\n",
+					np->full_name);
+			continue;
+		}
 
-	/* Set to receive any dist ID for serial RapidIO controller. */
-	if (port->phy_type == RIO_PHY_SERIAL)
-		out_be32((priv->regs_win + RIO_ISR_AACR), RIO_ISR_AACR_AA);
+		/* Get node address wide */
+		cell = of_get_property(np, "#address-cells", NULL);
+		if (cell)
+			aw = *cell;
+		else
+			aw = of_n_addr_cells(np);
+		/* Get node size wide */
+		cell = of_get_property(np, "#size-cells", NULL);
+		if (cell)
+			sw = *cell;
+		else
+			sw = of_n_size_cells(np);
+		/* Get parent address wide wide */
+		paw = of_n_addr_cells(np);
+		range_start = of_read_number(dt_range + aw, paw);
+		range_size = of_read_number(dt_range + aw + paw, sw);
 
-	/* Configure maintenance transaction window */
-	out_be32(&priv->maint_atmu_regs->rowbar, law_start >> 12);
-	out_be32(&priv->maint_atmu_regs->rowar,
-		 0x80077000 | (ilog2(RIO_MAINT_WIN_SIZE) - 1));
+		dev_info(&dev->dev, "%s: LAW start 0x%016llx, size 0x%016llx.\n",
+				np->full_name, range_start, range_size);
 
-	priv->maint_win = ioremap(law_start, RIO_MAINT_WIN_SIZE);
+		port = kzalloc(sizeof(struct rio_mport), GFP_KERNEL);
+		if (!port)
+			continue;
 
-	/* Configure outbound doorbell window */
-	out_be32(&priv->dbell_atmu_regs->rowbar,
-			(law_start + RIO_MAINT_WIN_SIZE) >> 12);
-	out_be32(&priv->dbell_atmu_regs->rowar, 0x8004200b);	/* 4k */
-	fsl_rio_doorbell_init(port);
-	fsl_rio_port_write_init(port);
+		i = *port_index - 1;
+		port->index = (unsigned char)i;
+
+		priv = kzalloc(sizeof(struct rio_priv), GFP_KERNEL);
+		if (!priv) {
+			dev_err(&dev->dev, "Can't alloc memory for 'priv'\n");
+			kfree(port);
+			continue;
+		}
+
+		INIT_LIST_HEAD(&port->dbells);
+		port->iores.start = range_start;
+		port->iores.end = port->iores.start + range_size - 1;
+		port->iores.flags = IORESOURCE_MEM;
+		port->iores.name = "rio_io_win";
+
+		if (request_resource(&iomem_resource, &port->iores) < 0) {
+			dev_err(&dev->dev, "RIO: Error requesting master port region"
+				" 0x%016llx-0x%016llx\n",
+				(u64)port->iores.start, (u64)port->iores.end);
+				kfree(priv);
+				kfree(port);
+				continue;
+		}
+		sprintf(port->name, "RIO mport %d", i);
+
+		priv->dev = &dev->dev;
+		port->ops = ops;
+		port->priv = priv;
+		port->phys_efptr = 0x100;
+		priv->regs_win = rio_regs_win;
+
+		/* Probe the master port phy type */
+		ccsr = in_be32(priv->regs_win + RIO_CCSR + i*0x20);
+		port->phy_type = (ccsr & 1) ? RIO_PHY_SERIAL : RIO_PHY_PARALLEL;
+		if (port->phy_type == RIO_PHY_PARALLEL) {
+			dev_err(&dev->dev, "RIO: Parallel PHY type, unsupported port type!\n");
+			release_resource(&port->iores);
+			kfree(priv);
+			kfree(port);
+			continue;
+		}
+		dev_info(&dev->dev, "RapidIO PHY type: Serial\n");
+		/* Checking the port training status */
+		if (in_be32((priv->regs_win + RIO_ESCSR + i*0x20)) & 1) {
+			dev_err(&dev->dev, "Port %d is not ready. "
+			"Try to restart connection...\n", i);
+			/* Disable ports */
+			out_be32(priv->regs_win
+				+ RIO_CCSR + i*0x20, 0);
+			/* Set 1x lane */
+			setbits32(priv->regs_win
+				+ RIO_CCSR + i*0x20, 0x02000000);
+			/* Enable ports */
+			setbits32(priv->regs_win
+				+ RIO_CCSR + i*0x20, 0x00600000);
+			msleep(100);
+			if (in_be32((priv->regs_win
+					+ RIO_ESCSR + i*0x20)) & 1) {
+				dev_err(&dev->dev,
+					"Port %d restart failed.\n", i);
+				release_resource(&port->iores);
+				kfree(priv);
+				kfree(port);
+				continue;
+			}
+			dev_info(&dev->dev, "Port %d restart success!\n", i);
+		}
+		fsl_rio_info(&dev->dev, ccsr);
+
+		port->sys_size = (in_be32((priv->regs_win + RIO_PEF_CAR))
+					& RIO_PEF_CTLS) >> 4;
+		dev_info(&dev->dev, "RapidIO Common Transport System size: %d\n",
+				port->sys_size ? 65536 : 256);
+
+		if (rio_register_mport(port)) {
+			release_resource(&port->iores);
+			kfree(priv);
+			kfree(port);
+			continue;
+		}
+		if (port->host_deviceid >= 0)
+			out_be32(priv->regs_win + RIO_GCCSR, RIO_PORT_GEN_HOST |
+				RIO_PORT_GEN_MASTER | RIO_PORT_GEN_DISCOVERED);
+		else
+			out_be32(priv->regs_win + RIO_GCCSR,
+				RIO_PORT_GEN_MASTER);
+
+		priv->atmu_regs = (struct rio_atmu_regs *)(priv->regs_win
+			+ ((i == 0) ? RIO_ATMU_REGS_PORT1_OFFSET :
+			RIO_ATMU_REGS_PORT2_OFFSET));
+
+		priv->maint_atmu_regs = priv->atmu_regs + 1;
+
+		/* Set to receive any dist ID for serial RapidIO controller. */
+		if (port->phy_type == RIO_PHY_SERIAL)
+			out_be32((priv->regs_win
+				+ RIO_ISR_AACR + i*0x80), RIO_ISR_AACR_AA);
+
+		/* Configure maintenance transaction window */
+		out_be32(&priv->maint_atmu_regs->rowbar,
+			port->iores.start >> 12);
+		out_be32(&priv->maint_atmu_regs->rowar,
+			 0x80077000 | (ilog2(RIO_MAINT_WIN_SIZE) - 1));
+
+		priv->maint_win = ioremap(port->iores.start,
+				RIO_MAINT_WIN_SIZE);
+
+		rio_law_start = range_start;
+
+		fsl_rio_setup_rmu(port, rmu_np[i]);
+
+		dbell->mport[i] = port;
+
+		active_ports++;
+	}
+
+	if (!active_ports) {
+		rc = -ENOLINK;
+		goto err;
+	}
+
+	fsl_rio_doorbell_init(dbell);
+	fsl_rio_port_write_init(pw);
 
 	return 0;
 err:
-	iounmap(priv->regs_win);
-	release_resource(&port->iores);
-err_res:
-	kfree(priv);
-err_priv:
-	kfree(port);
-err_port:
+	kfree(pw);
+err_pw:
+	kfree(dbell);
+err_dbell:
+	iounmap(rmu_regs_win);
+err_rmu:
 	kfree(ops);
 err_ops:
+	iounmap(rio_regs_win);
+err_rio_regs:
 	return rc;
 }
 
@@ -1631,7 +654,7 @@
 
 static const struct of_device_id fsl_of_rio_rpn_ids[] = {
 	{
-		.compatible = "fsl,rapidio-delta",
+		.compatible = "fsl,srio",
 	},
 	{},
 };
diff --git a/arch/powerpc/sysdev/fsl_rio.h b/arch/powerpc/sysdev/fsl_rio.h
new file mode 100644
index 0000000..ae8e274
--- /dev/null
+++ b/arch/powerpc/sysdev/fsl_rio.h
@@ -0,0 +1,135 @@
+/*
+ * Freescale MPC85xx/MPC86xx RapidIO support
+ *
+ * Copyright 2009 Sysgo AG
+ * Thomas Moll <thomas.moll@sysgo.com>
+ * - fixed maintenance access routines, check for aligned access
+ *
+ * Copyright 2009 Integrated Device Technology, Inc.
+ * Alex Bounine <alexandre.bounine@idt.com>
+ * - Added Port-Write message handling
+ * - Added Machine Check exception handling
+ *
+ * Copyright (C) 2007, 2008, 2010, 2011 Freescale Semiconductor, Inc.
+ * Zhang Wei <wei.zhang@freescale.com>
+ * Lian Minghuan-B31939 <Minghuan.Lian@freescale.com>
+ * Liu Gang <Gang.Liu@freescale.com>
+ *
+ * Copyright 2005 MontaVista Software, Inc.
+ * Matt Porter <mporter@kernel.crashing.org>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __FSL_RIO_H
+#define __FSL_RIO_H
+
+#include <linux/rio.h>
+#include <linux/rio_drv.h>
+#include <linux/kfifo.h>
+
+#define RIO_REGS_WIN(mport)	(((struct rio_priv *)(mport->priv))->regs_win)
+
+#define RIO_MAINT_WIN_SIZE	0x400000
+#define RIO_LTLEDCSR		0x0608
+
+#define DOORBELL_ROWAR_EN	0x80000000
+#define DOORBELL_ROWAR_TFLOWLV	0x08000000 /* highest priority level */
+#define DOORBELL_ROWAR_PCI	0x02000000 /* PCI window */
+#define DOORBELL_ROWAR_NREAD	0x00040000 /* NREAD */
+#define DOORBELL_ROWAR_MAINTRD	0x00070000  /* maintenance read */
+#define DOORBELL_ROWAR_RES	0x00002000 /* wrtpy: reserverd */
+#define DOORBELL_ROWAR_MAINTWD	0x00007000
+#define DOORBELL_ROWAR_SIZE	0x0000000b /* window size is 4k */
+
+#define RIO_ATMU_REGS_PORT1_OFFSET	0x10c00
+#define RIO_ATMU_REGS_PORT2_OFFSET	0x10e00
+#define RIO_S_DBELL_REGS_OFFSET	0x13400
+#define RIO_S_PW_REGS_OFFSET	0x134e0
+#define RIO_ATMU_REGS_DBELL_OFFSET	0x10C40
+
+#define MAX_MSG_UNIT_NUM	2
+#define MAX_PORT_NUM		4
+
+struct rio_atmu_regs {
+	 u32 rowtar;
+	 u32 rowtear;
+	 u32 rowbar;
+	 u32 pad1;
+	 u32 rowar;
+	 u32 pad2[3];
+};
+
+struct rio_dbell_ring {
+	void *virt;
+	dma_addr_t phys;
+};
+
+struct rio_port_write_msg {
+	 void *virt;
+	 dma_addr_t phys;
+	 u32 msg_count;
+	 u32 err_count;
+	 u32 discard_count;
+};
+
+struct fsl_rio_dbell {
+	struct rio_mport *mport[MAX_PORT_NUM];
+	struct device *dev;
+	struct rio_dbell_regs __iomem *dbell_regs;
+	struct rio_dbell_ring dbell_ring;
+	int bellirq;
+};
+
+struct fsl_rio_pw {
+	struct device *dev;
+	struct rio_pw_regs __iomem *pw_regs;
+	struct rio_port_write_msg port_write_msg;
+	int pwirq;
+	struct work_struct pw_work;
+	struct kfifo pw_fifo;
+	spinlock_t pw_fifo_lock;
+};
+
+struct rio_priv {
+	struct device *dev;
+	void __iomem *regs_win;
+	struct rio_atmu_regs __iomem *atmu_regs;
+	struct rio_atmu_regs __iomem *maint_atmu_regs;
+	void __iomem *maint_win;
+	void *rmm_handle; /* RapidIO message manager(unit) Handle */
+};
+
+extern void __iomem *rio_regs_win;
+extern void __iomem *rmu_regs_win;
+
+extern resource_size_t rio_law_start;
+
+extern struct fsl_rio_dbell *dbell;
+extern struct fsl_rio_pw *pw;
+
+extern int fsl_rio_setup_rmu(struct rio_mport *mport,
+	struct device_node *node);
+extern int fsl_rio_port_write_init(struct fsl_rio_pw *pw);
+extern int fsl_rio_pw_enable(struct rio_mport *mport, int enable);
+extern void fsl_rio_port_error_handler(int offset);
+extern int fsl_rio_doorbell_init(struct fsl_rio_dbell *dbell);
+
+extern int fsl_rio_doorbell_send(struct rio_mport *mport,
+				int index, u16 destid, u16 data);
+extern int fsl_add_outb_message(struct rio_mport *mport,
+	struct rio_dev *rdev,
+	int mbox, void *buffer, size_t len);
+extern int fsl_open_outb_mbox(struct rio_mport *mport,
+	void *dev_id, int mbox, int entries);
+extern void fsl_close_outb_mbox(struct rio_mport *mport, int mbox);
+extern int fsl_open_inb_mbox(struct rio_mport *mport,
+	void *dev_id, int mbox, int entries);
+extern void fsl_close_inb_mbox(struct rio_mport *mport, int mbox);
+extern int fsl_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf);
+extern void *fsl_get_inb_message(struct rio_mport *mport, int mbox);
+
+#endif
diff --git a/arch/powerpc/sysdev/fsl_rmu.c b/arch/powerpc/sysdev/fsl_rmu.c
new file mode 100644
index 0000000..1548578
--- /dev/null
+++ b/arch/powerpc/sysdev/fsl_rmu.c
@@ -0,0 +1,1104 @@
+/*
+ * Freescale MPC85xx/MPC86xx RapidIO RMU support
+ *
+ * Copyright 2009 Sysgo AG
+ * Thomas Moll <thomas.moll@sysgo.com>
+ * - fixed maintenance access routines, check for aligned access
+ *
+ * Copyright 2009 Integrated Device Technology, Inc.
+ * Alex Bounine <alexandre.bounine@idt.com>
+ * - Added Port-Write message handling
+ * - Added Machine Check exception handling
+ *
+ * Copyright (C) 2007, 2008, 2010, 2011 Freescale Semiconductor, Inc.
+ * Zhang Wei <wei.zhang@freescale.com>
+ * Lian Minghuan-B31939 <Minghuan.Lian@freescale.com>
+ * Liu Gang <Gang.Liu@freescale.com>
+ *
+ * Copyright 2005 MontaVista Software, Inc.
+ * Matt Porter <mporter@kernel.crashing.org>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/types.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+
+#include "fsl_rio.h"
+
+#define GET_RMM_HANDLE(mport) \
+		(((struct rio_priv *)(mport->priv))->rmm_handle)
+
+/* RapidIO definition irq, which read from OF-tree */
+#define IRQ_RIO_PW(m)		(((struct fsl_rio_pw *)(m))->pwirq)
+#define IRQ_RIO_BELL(m) (((struct fsl_rio_dbell *)(m))->bellirq)
+#define IRQ_RIO_TX(m) (((struct fsl_rmu *)(GET_RMM_HANDLE(m)))->txirq)
+#define IRQ_RIO_RX(m) (((struct fsl_rmu *)(GET_RMM_HANDLE(m)))->rxirq)
+
+#define RIO_MIN_TX_RING_SIZE	2
+#define RIO_MAX_TX_RING_SIZE	2048
+#define RIO_MIN_RX_RING_SIZE	2
+#define RIO_MAX_RX_RING_SIZE	2048
+
+#define RIO_IPWMR_SEN		0x00100000
+#define RIO_IPWMR_QFIE		0x00000100
+#define RIO_IPWMR_EIE		0x00000020
+#define RIO_IPWMR_CQ		0x00000002
+#define RIO_IPWMR_PWE		0x00000001
+
+#define RIO_IPWSR_QF		0x00100000
+#define RIO_IPWSR_TE		0x00000080
+#define RIO_IPWSR_QFI		0x00000010
+#define RIO_IPWSR_PWD		0x00000008
+#define RIO_IPWSR_PWB		0x00000004
+
+#define RIO_EPWISR		0x10010
+/* EPWISR Error match value */
+#define RIO_EPWISR_PINT1	0x80000000
+#define RIO_EPWISR_PINT2	0x40000000
+#define RIO_EPWISR_MU		0x00000002
+#define RIO_EPWISR_PW		0x00000001
+
+#define IPWSR_CLEAR		0x98
+#define OMSR_CLEAR		0x1cb3
+#define IMSR_CLEAR		0x491
+#define IDSR_CLEAR		0x91
+#define ODSR_CLEAR		0x1c00
+#define LTLEECSR_ENABLE_ALL	0xFFC000FC
+#define RIO_LTLEECSR		0x060c
+
+#define RIO_IM0SR		0x64
+#define RIO_IM1SR		0x164
+#define RIO_OM0SR		0x4
+#define RIO_OM1SR		0x104
+
+#define RIO_DBELL_WIN_SIZE	0x1000
+
+#define RIO_MSG_OMR_MUI		0x00000002
+#define RIO_MSG_OSR_TE		0x00000080
+#define RIO_MSG_OSR_QOI		0x00000020
+#define RIO_MSG_OSR_QFI		0x00000010
+#define RIO_MSG_OSR_MUB		0x00000004
+#define RIO_MSG_OSR_EOMI	0x00000002
+#define RIO_MSG_OSR_QEI		0x00000001
+
+#define RIO_MSG_IMR_MI		0x00000002
+#define RIO_MSG_ISR_TE		0x00000080
+#define RIO_MSG_ISR_QFI		0x00000010
+#define RIO_MSG_ISR_DIQI	0x00000001
+
+#define RIO_MSG_DESC_SIZE	32
+#define RIO_MSG_BUFFER_SIZE	4096
+
+#define DOORBELL_DMR_DI		0x00000002
+#define DOORBELL_DSR_TE		0x00000080
+#define DOORBELL_DSR_QFI	0x00000010
+#define DOORBELL_DSR_DIQI	0x00000001
+#define DOORBELL_TID_OFFSET	0x02
+#define DOORBELL_SID_OFFSET	0x04
+#define DOORBELL_INFO_OFFSET	0x06
+
+#define DOORBELL_MESSAGE_SIZE	0x08
+#define DBELL_SID(x)		(*(u16 *)(x + DOORBELL_SID_OFFSET))
+#define DBELL_TID(x)		(*(u16 *)(x + DOORBELL_TID_OFFSET))
+#define DBELL_INF(x)		(*(u16 *)(x + DOORBELL_INFO_OFFSET))
+
+struct rio_msg_regs {
+	u32 omr;
+	u32 osr;
+	u32 pad1;
+	u32 odqdpar;
+	u32 pad2;
+	u32 osar;
+	u32 odpr;
+	u32 odatr;
+	u32 odcr;
+	u32 pad3;
+	u32 odqepar;
+	u32 pad4[13];
+	u32 imr;
+	u32 isr;
+	u32 pad5;
+	u32 ifqdpar;
+	u32 pad6;
+	u32 ifqepar;
+};
+
+struct rio_dbell_regs {
+	u32 odmr;
+	u32 odsr;
+	u32 pad1[4];
+	u32 oddpr;
+	u32 oddatr;
+	u32 pad2[3];
+	u32 odretcr;
+	u32 pad3[12];
+	u32 dmr;
+	u32 dsr;
+	u32 pad4;
+	u32 dqdpar;
+	u32 pad5;
+	u32 dqepar;
+};
+
+struct rio_pw_regs {
+	u32 pwmr;
+	u32 pwsr;
+	u32 epwqbar;
+	u32 pwqbar;
+};
+
+
+struct rio_tx_desc {
+	u32 pad1;
+	u32 saddr;
+	u32 dport;
+	u32 dattr;
+	u32 pad2;
+	u32 pad3;
+	u32 dwcnt;
+	u32 pad4;
+};
+
+struct rio_msg_tx_ring {
+	void *virt;
+	dma_addr_t phys;
+	void *virt_buffer[RIO_MAX_TX_RING_SIZE];
+	dma_addr_t phys_buffer[RIO_MAX_TX_RING_SIZE];
+	int tx_slot;
+	int size;
+	void *dev_id;
+};
+
+struct rio_msg_rx_ring {
+	void *virt;
+	dma_addr_t phys;
+	void *virt_buffer[RIO_MAX_RX_RING_SIZE];
+	int rx_slot;
+	int size;
+	void *dev_id;
+};
+
+struct fsl_rmu {
+	struct rio_msg_regs __iomem *msg_regs;
+	struct rio_msg_tx_ring msg_tx_ring;
+	struct rio_msg_rx_ring msg_rx_ring;
+	int txirq;
+	int rxirq;
+};
+
+/**
+ * fsl_rio_tx_handler - MPC85xx outbound message interrupt handler
+ * @irq: Linux interrupt number
+ * @dev_instance: Pointer to interrupt-specific data
+ *
+ * Handles outbound message interrupts. Executes a register outbound
+ * mailbox event handler and acks the interrupt occurrence.
+ */
+static irqreturn_t
+fsl_rio_tx_handler(int irq, void *dev_instance)
+{
+	int osr;
+	struct rio_mport *port = (struct rio_mport *)dev_instance;
+	struct fsl_rmu *rmu = GET_RMM_HANDLE(port);
+
+	osr = in_be32(&rmu->msg_regs->osr);
+
+	if (osr & RIO_MSG_OSR_TE) {
+		pr_info("RIO: outbound message transmission error\n");
+		out_be32(&rmu->msg_regs->osr, RIO_MSG_OSR_TE);
+		goto out;
+	}
+
+	if (osr & RIO_MSG_OSR_QOI) {
+		pr_info("RIO: outbound message queue overflow\n");
+		out_be32(&rmu->msg_regs->osr, RIO_MSG_OSR_QOI);
+		goto out;
+	}
+
+	if (osr & RIO_MSG_OSR_EOMI) {
+		u32 dqp = in_be32(&rmu->msg_regs->odqdpar);
+		int slot = (dqp - rmu->msg_tx_ring.phys) >> 5;
+		if (port->outb_msg[0].mcback != NULL) {
+			port->outb_msg[0].mcback(port, rmu->msg_tx_ring.dev_id,
+					-1,
+					slot);
+		}
+		/* Ack the end-of-message interrupt */
+		out_be32(&rmu->msg_regs->osr, RIO_MSG_OSR_EOMI);
+	}
+
+out:
+	return IRQ_HANDLED;
+}
+
+/**
+ * fsl_rio_rx_handler - MPC85xx inbound message interrupt handler
+ * @irq: Linux interrupt number
+ * @dev_instance: Pointer to interrupt-specific data
+ *
+ * Handles inbound message interrupts. Executes a registered inbound
+ * mailbox event handler and acks the interrupt occurrence.
+ */
+static irqreturn_t
+fsl_rio_rx_handler(int irq, void *dev_instance)
+{
+	int isr;
+	struct rio_mport *port = (struct rio_mport *)dev_instance;
+	struct fsl_rmu *rmu = GET_RMM_HANDLE(port);
+
+	isr = in_be32(&rmu->msg_regs->isr);
+
+	if (isr & RIO_MSG_ISR_TE) {
+		pr_info("RIO: inbound message reception error\n");
+		out_be32((void *)&rmu->msg_regs->isr, RIO_MSG_ISR_TE);
+		goto out;
+	}
+
+	/* XXX Need to check/dispatch until queue empty */
+	if (isr & RIO_MSG_ISR_DIQI) {
+		/*
+		* Can receive messages for any mailbox/letter to that
+		* mailbox destination. So, make the callback with an
+		* unknown/invalid mailbox number argument.
+		*/
+		if (port->inb_msg[0].mcback != NULL)
+			port->inb_msg[0].mcback(port, rmu->msg_rx_ring.dev_id,
+				-1,
+				-1);
+
+		/* Ack the queueing interrupt */
+		out_be32(&rmu->msg_regs->isr, RIO_MSG_ISR_DIQI);
+	}
+
+out:
+	return IRQ_HANDLED;
+}
+
+/**
+ * fsl_rio_dbell_handler - MPC85xx doorbell interrupt handler
+ * @irq: Linux interrupt number
+ * @dev_instance: Pointer to interrupt-specific data
+ *
+ * Handles doorbell interrupts. Parses a list of registered
+ * doorbell event handlers and executes a matching event handler.
+ */
+static irqreturn_t
+fsl_rio_dbell_handler(int irq, void *dev_instance)
+{
+	int dsr;
+	struct fsl_rio_dbell *fsl_dbell = (struct fsl_rio_dbell *)dev_instance;
+	int i;
+
+	dsr = in_be32(&fsl_dbell->dbell_regs->dsr);
+
+	if (dsr & DOORBELL_DSR_TE) {
+		pr_info("RIO: doorbell reception error\n");
+		out_be32(&fsl_dbell->dbell_regs->dsr, DOORBELL_DSR_TE);
+		goto out;
+	}
+
+	if (dsr & DOORBELL_DSR_QFI) {
+		pr_info("RIO: doorbell queue full\n");
+		out_be32(&fsl_dbell->dbell_regs->dsr, DOORBELL_DSR_QFI);
+	}
+
+	/* XXX Need to check/dispatch until queue empty */
+	if (dsr & DOORBELL_DSR_DIQI) {
+		u32 dmsg =
+			(u32) fsl_dbell->dbell_ring.virt +
+			(in_be32(&fsl_dbell->dbell_regs->dqdpar) & 0xfff);
+		struct rio_dbell *dbell;
+		int found = 0;
+
+		pr_debug
+			("RIO: processing doorbell,"
+			" sid %2.2x tid %2.2x info %4.4x\n",
+			DBELL_SID(dmsg), DBELL_TID(dmsg), DBELL_INF(dmsg));
+
+		for (i = 0; i < MAX_PORT_NUM; i++) {
+			if (fsl_dbell->mport[i]) {
+				list_for_each_entry(dbell,
+					&fsl_dbell->mport[i]->dbells, node) {
+					if ((dbell->res->start
+						<= DBELL_INF(dmsg))
+						&& (dbell->res->end
+						>= DBELL_INF(dmsg))) {
+						found = 1;
+						break;
+					}
+				}
+				if (found && dbell->dinb) {
+					dbell->dinb(fsl_dbell->mport[i],
+						dbell->dev_id, DBELL_SID(dmsg),
+						DBELL_TID(dmsg),
+						DBELL_INF(dmsg));
+					break;
+				}
+			}
+		}
+
+		if (!found) {
+			pr_debug
+				("RIO: spurious doorbell,"
+				" sid %2.2x tid %2.2x info %4.4x\n",
+				DBELL_SID(dmsg), DBELL_TID(dmsg),
+				DBELL_INF(dmsg));
+		}
+		setbits32(&fsl_dbell->dbell_regs->dmr, DOORBELL_DMR_DI);
+		out_be32(&fsl_dbell->dbell_regs->dsr, DOORBELL_DSR_DIQI);
+	}
+
+out:
+	return IRQ_HANDLED;
+}
+
+void msg_unit_error_handler(void)
+{
+
+	/*XXX: Error recovery is not implemented, we just clear errors */
+	out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), 0);
+
+	out_be32((u32 *)(rmu_regs_win + RIO_IM0SR), IMSR_CLEAR);
+	out_be32((u32 *)(rmu_regs_win + RIO_IM1SR), IMSR_CLEAR);
+	out_be32((u32 *)(rmu_regs_win + RIO_OM0SR), OMSR_CLEAR);
+	out_be32((u32 *)(rmu_regs_win + RIO_OM1SR), OMSR_CLEAR);
+
+	out_be32(&dbell->dbell_regs->odsr, ODSR_CLEAR);
+	out_be32(&dbell->dbell_regs->dsr, IDSR_CLEAR);
+
+	out_be32(&pw->pw_regs->pwsr, IPWSR_CLEAR);
+}
+
+/**
+ * fsl_rio_port_write_handler - MPC85xx port write interrupt handler
+ * @irq: Linux interrupt number
+ * @dev_instance: Pointer to interrupt-specific data
+ *
+ * Handles port write interrupts. Parses a list of registered
+ * port write event handlers and executes a matching event handler.
+ */
+static irqreturn_t
+fsl_rio_port_write_handler(int irq, void *dev_instance)
+{
+	u32 ipwmr, ipwsr;
+	struct fsl_rio_pw *pw = (struct fsl_rio_pw *)dev_instance;
+	u32 epwisr, tmp;
+
+	epwisr = in_be32(rio_regs_win + RIO_EPWISR);
+	if (!(epwisr & RIO_EPWISR_PW))
+		goto pw_done;
+
+	ipwmr = in_be32(&pw->pw_regs->pwmr);
+	ipwsr = in_be32(&pw->pw_regs->pwsr);
+
+#ifdef DEBUG_PW
+	pr_debug("PW Int->IPWMR: 0x%08x IPWSR: 0x%08x (", ipwmr, ipwsr);
+	if (ipwsr & RIO_IPWSR_QF)
+		pr_debug(" QF");
+	if (ipwsr & RIO_IPWSR_TE)
+		pr_debug(" TE");
+	if (ipwsr & RIO_IPWSR_QFI)
+		pr_debug(" QFI");
+	if (ipwsr & RIO_IPWSR_PWD)
+		pr_debug(" PWD");
+	if (ipwsr & RIO_IPWSR_PWB)
+		pr_debug(" PWB");
+	pr_debug(" )\n");
+#endif
+	/* Schedule deferred processing if PW was received */
+	if (ipwsr & RIO_IPWSR_QFI) {
+		/* Save PW message (if there is room in FIFO),
+		 * otherwise discard it.
+		 */
+		if (kfifo_avail(&pw->pw_fifo) >= RIO_PW_MSG_SIZE) {
+			pw->port_write_msg.msg_count++;
+			kfifo_in(&pw->pw_fifo, pw->port_write_msg.virt,
+				 RIO_PW_MSG_SIZE);
+		} else {
+			pw->port_write_msg.discard_count++;
+			pr_debug("RIO: ISR Discarded Port-Write Msg(s) (%d)\n",
+				 pw->port_write_msg.discard_count);
+		}
+		/* Clear interrupt and issue Clear Queue command. This allows
+		 * another port-write to be received.
+		 */
+		out_be32(&pw->pw_regs->pwsr,	RIO_IPWSR_QFI);
+		out_be32(&pw->pw_regs->pwmr, ipwmr | RIO_IPWMR_CQ);
+
+		schedule_work(&pw->pw_work);
+	}
+
+	if ((ipwmr & RIO_IPWMR_EIE) && (ipwsr & RIO_IPWSR_TE)) {
+		pw->port_write_msg.err_count++;
+		pr_debug("RIO: Port-Write Transaction Err (%d)\n",
+			 pw->port_write_msg.err_count);
+		/* Clear Transaction Error: port-write controller should be
+		 * disabled when clearing this error
+		 */
+		out_be32(&pw->pw_regs->pwmr, ipwmr & ~RIO_IPWMR_PWE);
+		out_be32(&pw->pw_regs->pwsr,	RIO_IPWSR_TE);
+		out_be32(&pw->pw_regs->pwmr, ipwmr);
+	}
+
+	if (ipwsr & RIO_IPWSR_PWD) {
+		pw->port_write_msg.discard_count++;
+		pr_debug("RIO: Port Discarded Port-Write Msg(s) (%d)\n",
+			 pw->port_write_msg.discard_count);
+		out_be32(&pw->pw_regs->pwsr, RIO_IPWSR_PWD);
+	}
+
+pw_done:
+	if (epwisr & RIO_EPWISR_PINT1) {
+		tmp = in_be32(rio_regs_win + RIO_LTLEDCSR);
+		pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
+		fsl_rio_port_error_handler(0);
+	}
+
+	if (epwisr & RIO_EPWISR_PINT2) {
+		tmp = in_be32(rio_regs_win + RIO_LTLEDCSR);
+		pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
+		fsl_rio_port_error_handler(1);
+	}
+
+	if (epwisr & RIO_EPWISR_MU) {
+		tmp = in_be32(rio_regs_win + RIO_LTLEDCSR);
+		pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
+		msg_unit_error_handler();
+	}
+
+	return IRQ_HANDLED;
+}
+
+static void fsl_pw_dpc(struct work_struct *work)
+{
+	struct fsl_rio_pw *pw = container_of(work, struct fsl_rio_pw, pw_work);
+	u32 msg_buffer[RIO_PW_MSG_SIZE/sizeof(u32)];
+
+	/*
+	 * Process port-write messages
+	 */
+	while (kfifo_out_spinlocked(&pw->pw_fifo, (unsigned char *)msg_buffer,
+			 RIO_PW_MSG_SIZE, &pw->pw_fifo_lock)) {
+		/* Process one message */
+#ifdef DEBUG_PW
+		{
+		u32 i;
+		pr_debug("%s : Port-Write Message:", __func__);
+		for (i = 0; i < RIO_PW_MSG_SIZE/sizeof(u32); i++) {
+			if ((i%4) == 0)
+				pr_debug("\n0x%02x: 0x%08x", i*4,
+					 msg_buffer[i]);
+			else
+				pr_debug(" 0x%08x", msg_buffer[i]);
+		}
+		pr_debug("\n");
+		}
+#endif
+		/* Pass the port-write message to RIO core for processing */
+		rio_inb_pwrite_handler((union rio_pw_msg *)msg_buffer);
+	}
+}
+
+/**
+ * fsl_rio_pw_enable - enable/disable port-write interface init
+ * @mport: Master port implementing the port write unit
+ * @enable:    1=enable; 0=disable port-write message handling
+ */
+int fsl_rio_pw_enable(struct rio_mport *mport, int enable)
+{
+	u32 rval;
+
+	rval = in_be32(&pw->pw_regs->pwmr);
+
+	if (enable)
+		rval |= RIO_IPWMR_PWE;
+	else
+		rval &= ~RIO_IPWMR_PWE;
+
+	out_be32(&pw->pw_regs->pwmr, rval);
+
+	return 0;
+}
+
+/**
+ * fsl_rio_port_write_init - MPC85xx port write interface init
+ * @mport: Master port implementing the port write unit
+ *
+ * Initializes port write unit hardware and DMA buffer
+ * ring. Called from fsl_rio_setup(). Returns %0 on success
+ * or %-ENOMEM on failure.
+ */
+
+int fsl_rio_port_write_init(struct fsl_rio_pw *pw)
+{
+	int rc = 0;
+
+	/* Following configurations require a disabled port write controller */
+	out_be32(&pw->pw_regs->pwmr,
+		 in_be32(&pw->pw_regs->pwmr) & ~RIO_IPWMR_PWE);
+
+	/* Initialize port write */
+	pw->port_write_msg.virt = dma_alloc_coherent(pw->dev,
+					RIO_PW_MSG_SIZE,
+					&pw->port_write_msg.phys, GFP_KERNEL);
+	if (!pw->port_write_msg.virt) {
+		pr_err("RIO: unable allocate port write queue\n");
+		return -ENOMEM;
+	}
+
+	pw->port_write_msg.err_count = 0;
+	pw->port_write_msg.discard_count = 0;
+
+	/* Point dequeue/enqueue pointers at first entry */
+	out_be32(&pw->pw_regs->epwqbar, 0);
+	out_be32(&pw->pw_regs->pwqbar, (u32) pw->port_write_msg.phys);
+
+	pr_debug("EIPWQBAR: 0x%08x IPWQBAR: 0x%08x\n",
+		 in_be32(&pw->pw_regs->epwqbar),
+		 in_be32(&pw->pw_regs->pwqbar));
+
+	/* Clear interrupt status IPWSR */
+	out_be32(&pw->pw_regs->pwsr,
+		 (RIO_IPWSR_TE | RIO_IPWSR_QFI | RIO_IPWSR_PWD));
+
+	/* Configure port write contoller for snooping enable all reporting,
+	   clear queue full */
+	out_be32(&pw->pw_regs->pwmr,
+		 RIO_IPWMR_SEN | RIO_IPWMR_QFIE | RIO_IPWMR_EIE | RIO_IPWMR_CQ);
+
+
+	/* Hook up port-write handler */
+	rc = request_irq(IRQ_RIO_PW(pw), fsl_rio_port_write_handler,
+			IRQF_SHARED, "port-write", (void *)pw);
+	if (rc < 0) {
+		pr_err("MPC85xx RIO: unable to request inbound doorbell irq");
+		goto err_out;
+	}
+	/* Enable Error Interrupt */
+	out_be32((u32 *)(rio_regs_win + RIO_LTLEECSR), LTLEECSR_ENABLE_ALL);
+
+	INIT_WORK(&pw->pw_work, fsl_pw_dpc);
+	spin_lock_init(&pw->pw_fifo_lock);
+	if (kfifo_alloc(&pw->pw_fifo, RIO_PW_MSG_SIZE * 32, GFP_KERNEL)) {
+		pr_err("FIFO allocation failed\n");
+		rc = -ENOMEM;
+		goto err_out_irq;
+	}
+
+	pr_debug("IPWMR: 0x%08x IPWSR: 0x%08x\n",
+		 in_be32(&pw->pw_regs->pwmr),
+		 in_be32(&pw->pw_regs->pwsr));
+
+	return rc;
+
+err_out_irq:
+	free_irq(IRQ_RIO_PW(pw), (void *)pw);
+err_out:
+	dma_free_coherent(pw->dev, RIO_PW_MSG_SIZE,
+		pw->port_write_msg.virt,
+		pw->port_write_msg.phys);
+	return rc;
+}
+
+/**
+ * fsl_rio_doorbell_send - Send a MPC85xx doorbell message
+ * @mport: RapidIO master port info
+ * @index: ID of RapidIO interface
+ * @destid: Destination ID of target device
+ * @data: 16-bit info field of RapidIO doorbell message
+ *
+ * Sends a MPC85xx doorbell message. Returns %0 on success or
+ * %-EINVAL on failure.
+ */
+int fsl_rio_doorbell_send(struct rio_mport *mport,
+				int index, u16 destid, u16 data)
+{
+	pr_debug("fsl_doorbell_send: index %d destid %4.4x data %4.4x\n",
+		 index, destid, data);
+
+	/* In the serial version silicons, such as MPC8548, MPC8641,
+	 * below operations is must be.
+	 */
+	out_be32(&dbell->dbell_regs->odmr, 0x00000000);
+	out_be32(&dbell->dbell_regs->odretcr, 0x00000004);
+	out_be32(&dbell->dbell_regs->oddpr, destid << 16);
+	out_be32(&dbell->dbell_regs->oddatr, (index << 20) | data);
+	out_be32(&dbell->dbell_regs->odmr, 0x00000001);
+
+	return 0;
+}
+
+/**
+ * fsl_add_outb_message - Add message to the MPC85xx outbound message queue
+ * @mport: Master port with outbound message queue
+ * @rdev: Target of outbound message
+ * @mbox: Outbound mailbox
+ * @buffer: Message to add to outbound queue
+ * @len: Length of message
+ *
+ * Adds the @buffer message to the MPC85xx outbound message queue. Returns
+ * %0 on success or %-EINVAL on failure.
+ */
+int
+fsl_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox,
+			void *buffer, size_t len)
+{
+	struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
+	u32 omr;
+	struct rio_tx_desc *desc = (struct rio_tx_desc *)rmu->msg_tx_ring.virt
+					+ rmu->msg_tx_ring.tx_slot;
+	int ret = 0;
+
+	pr_debug("RIO: fsl_add_outb_message(): destid %4.4x mbox %d buffer " \
+		 "%8.8x len %8.8x\n", rdev->destid, mbox, (int)buffer, len);
+	if ((len < 8) || (len > RIO_MAX_MSG_SIZE)) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/* Copy and clear rest of buffer */
+	memcpy(rmu->msg_tx_ring.virt_buffer[rmu->msg_tx_ring.tx_slot], buffer,
+			len);
+	if (len < (RIO_MAX_MSG_SIZE - 4))
+		memset(rmu->msg_tx_ring.virt_buffer[rmu->msg_tx_ring.tx_slot]
+				+ len, 0, RIO_MAX_MSG_SIZE - len);
+
+	/* Set mbox field for message, and set destid */
+	desc->dport = (rdev->destid << 16) | (mbox & 0x3);
+
+	/* Enable EOMI interrupt and priority */
+	desc->dattr = 0x28000000 | ((mport->index) << 20);
+
+	/* Set transfer size aligned to next power of 2 (in double words) */
+	desc->dwcnt = is_power_of_2(len) ? len : 1 << get_bitmask_order(len);
+
+	/* Set snooping and source buffer address */
+	desc->saddr = 0x00000004
+		| rmu->msg_tx_ring.phys_buffer[rmu->msg_tx_ring.tx_slot];
+
+	/* Increment enqueue pointer */
+	omr = in_be32(&rmu->msg_regs->omr);
+	out_be32(&rmu->msg_regs->omr, omr | RIO_MSG_OMR_MUI);
+
+	/* Go to next descriptor */
+	if (++rmu->msg_tx_ring.tx_slot == rmu->msg_tx_ring.size)
+		rmu->msg_tx_ring.tx_slot = 0;
+
+out:
+	return ret;
+}
+
+/**
+ * fsl_open_outb_mbox - Initialize MPC85xx outbound mailbox
+ * @mport: Master port implementing the outbound message unit
+ * @dev_id: Device specific pointer to pass on event
+ * @mbox: Mailbox to open
+ * @entries: Number of entries in the outbound mailbox ring
+ *
+ * Initializes buffer ring, request the outbound message interrupt,
+ * and enables the outbound message unit. Returns %0 on success and
+ * %-EINVAL or %-ENOMEM on failure.
+ */
+int
+fsl_open_outb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries)
+{
+	int i, j, rc = 0;
+	struct rio_priv *priv = mport->priv;
+	struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
+
+	if ((entries < RIO_MIN_TX_RING_SIZE) ||
+		(entries > RIO_MAX_TX_RING_SIZE) || (!is_power_of_2(entries))) {
+		rc = -EINVAL;
+		goto out;
+	}
+
+	/* Initialize shadow copy ring */
+	rmu->msg_tx_ring.dev_id = dev_id;
+	rmu->msg_tx_ring.size = entries;
+
+	for (i = 0; i < rmu->msg_tx_ring.size; i++) {
+		rmu->msg_tx_ring.virt_buffer[i] =
+			dma_alloc_coherent(priv->dev, RIO_MSG_BUFFER_SIZE,
+				&rmu->msg_tx_ring.phys_buffer[i], GFP_KERNEL);
+		if (!rmu->msg_tx_ring.virt_buffer[i]) {
+			rc = -ENOMEM;
+			for (j = 0; j < rmu->msg_tx_ring.size; j++)
+				if (rmu->msg_tx_ring.virt_buffer[j])
+					dma_free_coherent(priv->dev,
+							RIO_MSG_BUFFER_SIZE,
+							rmu->msg_tx_ring.
+							virt_buffer[j],
+							rmu->msg_tx_ring.
+							phys_buffer[j]);
+			goto out;
+		}
+	}
+
+	/* Initialize outbound message descriptor ring */
+	rmu->msg_tx_ring.virt = dma_alloc_coherent(priv->dev,
+				rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
+				&rmu->msg_tx_ring.phys, GFP_KERNEL);
+	if (!rmu->msg_tx_ring.virt) {
+		rc = -ENOMEM;
+		goto out_dma;
+	}
+	memset(rmu->msg_tx_ring.virt, 0,
+			rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE);
+	rmu->msg_tx_ring.tx_slot = 0;
+
+	/* Point dequeue/enqueue pointers at first entry in ring */
+	out_be32(&rmu->msg_regs->odqdpar, rmu->msg_tx_ring.phys);
+	out_be32(&rmu->msg_regs->odqepar, rmu->msg_tx_ring.phys);
+
+	/* Configure for snooping */
+	out_be32(&rmu->msg_regs->osar, 0x00000004);
+
+	/* Clear interrupt status */
+	out_be32(&rmu->msg_regs->osr, 0x000000b3);
+
+	/* Hook up outbound message handler */
+	rc = request_irq(IRQ_RIO_TX(mport), fsl_rio_tx_handler, 0,
+			 "msg_tx", (void *)mport);
+	if (rc < 0)
+		goto out_irq;
+
+	/*
+	 * Configure outbound message unit
+	 *      Snooping
+	 *      Interrupts (all enabled, except QEIE)
+	 *      Chaining mode
+	 *      Disable
+	 */
+	out_be32(&rmu->msg_regs->omr, 0x00100220);
+
+	/* Set number of entries */
+	out_be32(&rmu->msg_regs->omr,
+		 in_be32(&rmu->msg_regs->omr) |
+		 ((get_bitmask_order(entries) - 2) << 12));
+
+	/* Now enable the unit */
+	out_be32(&rmu->msg_regs->omr, in_be32(&rmu->msg_regs->omr) | 0x1);
+
+out:
+	return rc;
+
+out_irq:
+	dma_free_coherent(priv->dev,
+		rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
+		rmu->msg_tx_ring.virt, rmu->msg_tx_ring.phys);
+
+out_dma:
+	for (i = 0; i < rmu->msg_tx_ring.size; i++)
+		dma_free_coherent(priv->dev, RIO_MSG_BUFFER_SIZE,
+		rmu->msg_tx_ring.virt_buffer[i],
+		rmu->msg_tx_ring.phys_buffer[i]);
+
+	return rc;
+}
+
+/**
+ * fsl_close_outb_mbox - Shut down MPC85xx outbound mailbox
+ * @mport: Master port implementing the outbound message unit
+ * @mbox: Mailbox to close
+ *
+ * Disables the outbound message unit, free all buffers, and
+ * frees the outbound message interrupt.
+ */
+void fsl_close_outb_mbox(struct rio_mport *mport, int mbox)
+{
+	struct rio_priv *priv = mport->priv;
+	struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
+
+	/* Disable inbound message unit */
+	out_be32(&rmu->msg_regs->omr, 0);
+
+	/* Free ring */
+	dma_free_coherent(priv->dev,
+	rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
+	rmu->msg_tx_ring.virt, rmu->msg_tx_ring.phys);
+
+	/* Free interrupt */
+	free_irq(IRQ_RIO_TX(mport), (void *)mport);
+}
+
+/**
+ * fsl_open_inb_mbox - Initialize MPC85xx inbound mailbox
+ * @mport: Master port implementing the inbound message unit
+ * @dev_id: Device specific pointer to pass on event
+ * @mbox: Mailbox to open
+ * @entries: Number of entries in the inbound mailbox ring
+ *
+ * Initializes buffer ring, request the inbound message interrupt,
+ * and enables the inbound message unit. Returns %0 on success
+ * and %-EINVAL or %-ENOMEM on failure.
+ */
+int
+fsl_open_inb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries)
+{
+	int i, rc = 0;
+	struct rio_priv *priv = mport->priv;
+	struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
+
+	if ((entries < RIO_MIN_RX_RING_SIZE) ||
+		(entries > RIO_MAX_RX_RING_SIZE) || (!is_power_of_2(entries))) {
+		rc = -EINVAL;
+		goto out;
+	}
+
+	/* Initialize client buffer ring */
+	rmu->msg_rx_ring.dev_id = dev_id;
+	rmu->msg_rx_ring.size = entries;
+	rmu->msg_rx_ring.rx_slot = 0;
+	for (i = 0; i < rmu->msg_rx_ring.size; i++)
+		rmu->msg_rx_ring.virt_buffer[i] = NULL;
+
+	/* Initialize inbound message ring */
+	rmu->msg_rx_ring.virt = dma_alloc_coherent(priv->dev,
+				rmu->msg_rx_ring.size * RIO_MAX_MSG_SIZE,
+				&rmu->msg_rx_ring.phys, GFP_KERNEL);
+	if (!rmu->msg_rx_ring.virt) {
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	/* Point dequeue/enqueue pointers at first entry in ring */
+	out_be32(&rmu->msg_regs->ifqdpar, (u32) rmu->msg_rx_ring.phys);
+	out_be32(&rmu->msg_regs->ifqepar, (u32) rmu->msg_rx_ring.phys);
+
+	/* Clear interrupt status */
+	out_be32(&rmu->msg_regs->isr, 0x00000091);
+
+	/* Hook up inbound message handler */
+	rc = request_irq(IRQ_RIO_RX(mport), fsl_rio_rx_handler, 0,
+			 "msg_rx", (void *)mport);
+	if (rc < 0) {
+		dma_free_coherent(priv->dev, RIO_MSG_BUFFER_SIZE,
+			rmu->msg_tx_ring.virt_buffer[i],
+			rmu->msg_tx_ring.phys_buffer[i]);
+		goto out;
+	}
+
+	/*
+	 * Configure inbound message unit:
+	 *      Snooping
+	 *      4KB max message size
+	 *      Unmask all interrupt sources
+	 *      Disable
+	 */
+	out_be32(&rmu->msg_regs->imr, 0x001b0060);
+
+	/* Set number of queue entries */
+	setbits32(&rmu->msg_regs->imr, (get_bitmask_order(entries) - 2) << 12);
+
+	/* Now enable the unit */
+	setbits32(&rmu->msg_regs->imr, 0x1);
+
+out:
+	return rc;
+}
+
+/**
+ * fsl_close_inb_mbox - Shut down MPC85xx inbound mailbox
+ * @mport: Master port implementing the inbound message unit
+ * @mbox: Mailbox to close
+ *
+ * Disables the inbound message unit, free all buffers, and
+ * frees the inbound message interrupt.
+ */
+void fsl_close_inb_mbox(struct rio_mport *mport, int mbox)
+{
+	struct rio_priv *priv = mport->priv;
+	struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
+
+	/* Disable inbound message unit */
+	out_be32(&rmu->msg_regs->imr, 0);
+
+	/* Free ring */
+	dma_free_coherent(priv->dev, rmu->msg_rx_ring.size * RIO_MAX_MSG_SIZE,
+	rmu->msg_rx_ring.virt, rmu->msg_rx_ring.phys);
+
+	/* Free interrupt */
+	free_irq(IRQ_RIO_RX(mport), (void *)mport);
+}
+
+/**
+ * fsl_add_inb_buffer - Add buffer to the MPC85xx inbound message queue
+ * @mport: Master port implementing the inbound message unit
+ * @mbox: Inbound mailbox number
+ * @buf: Buffer to add to inbound queue
+ *
+ * Adds the @buf buffer to the MPC85xx inbound message queue. Returns
+ * %0 on success or %-EINVAL on failure.
+ */
+int fsl_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf)
+{
+	int rc = 0;
+	struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
+
+	pr_debug("RIO: fsl_add_inb_buffer(), msg_rx_ring.rx_slot %d\n",
+		 rmu->msg_rx_ring.rx_slot);
+
+	if (rmu->msg_rx_ring.virt_buffer[rmu->msg_rx_ring.rx_slot]) {
+		printk(KERN_ERR
+			"RIO: error adding inbound buffer %d, buffer exists\n",
+			rmu->msg_rx_ring.rx_slot);
+		rc = -EINVAL;
+		goto out;
+	}
+
+	rmu->msg_rx_ring.virt_buffer[rmu->msg_rx_ring.rx_slot] = buf;
+	if (++rmu->msg_rx_ring.rx_slot == rmu->msg_rx_ring.size)
+		rmu->msg_rx_ring.rx_slot = 0;
+
+out:
+	return rc;
+}
+
+/**
+ * fsl_get_inb_message - Fetch inbound message from the MPC85xx message unit
+ * @mport: Master port implementing the inbound message unit
+ * @mbox: Inbound mailbox number
+ *
+ * Gets the next available inbound message from the inbound message queue.
+ * A pointer to the message is returned on success or NULL on failure.
+ */
+void *fsl_get_inb_message(struct rio_mport *mport, int mbox)
+{
+	struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
+	u32 phys_buf, virt_buf;
+	void *buf = NULL;
+	int buf_idx;
+
+	phys_buf = in_be32(&rmu->msg_regs->ifqdpar);
+
+	/* If no more messages, then bail out */
+	if (phys_buf == in_be32(&rmu->msg_regs->ifqepar))
+		goto out2;
+
+	virt_buf = (u32) rmu->msg_rx_ring.virt + (phys_buf
+						- rmu->msg_rx_ring.phys);
+	buf_idx = (phys_buf - rmu->msg_rx_ring.phys) / RIO_MAX_MSG_SIZE;
+	buf = rmu->msg_rx_ring.virt_buffer[buf_idx];
+
+	if (!buf) {
+		printk(KERN_ERR
+			"RIO: inbound message copy failed, no buffers\n");
+		goto out1;
+	}
+
+	/* Copy max message size, caller is expected to allocate that big */
+	memcpy(buf, (void *)virt_buf, RIO_MAX_MSG_SIZE);
+
+	/* Clear the available buffer */
+	rmu->msg_rx_ring.virt_buffer[buf_idx] = NULL;
+
+out1:
+	setbits32(&rmu->msg_regs->imr, RIO_MSG_IMR_MI);
+
+out2:
+	return buf;
+}
+
+/**
+ * fsl_rio_doorbell_init - MPC85xx doorbell interface init
+ * @mport: Master port implementing the inbound doorbell unit
+ *
+ * Initializes doorbell unit hardware and inbound DMA buffer
+ * ring. Called from fsl_rio_setup(). Returns %0 on success
+ * or %-ENOMEM on failure.
+ */
+int fsl_rio_doorbell_init(struct fsl_rio_dbell *dbell)
+{
+	int rc = 0;
+
+	/* Initialize inbound doorbells */
+	dbell->dbell_ring.virt = dma_alloc_coherent(dbell->dev, 512 *
+		DOORBELL_MESSAGE_SIZE, &dbell->dbell_ring.phys, GFP_KERNEL);
+	if (!dbell->dbell_ring.virt) {
+		printk(KERN_ERR "RIO: unable allocate inbound doorbell ring\n");
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	/* Point dequeue/enqueue pointers at first entry in ring */
+	out_be32(&dbell->dbell_regs->dqdpar, (u32) dbell->dbell_ring.phys);
+	out_be32(&dbell->dbell_regs->dqepar, (u32) dbell->dbell_ring.phys);
+
+	/* Clear interrupt status */
+	out_be32(&dbell->dbell_regs->dsr, 0x00000091);
+
+	/* Hook up doorbell handler */
+	rc = request_irq(IRQ_RIO_BELL(dbell), fsl_rio_dbell_handler, 0,
+			 "dbell_rx", (void *)dbell);
+	if (rc < 0) {
+		dma_free_coherent(dbell->dev, 512 * DOORBELL_MESSAGE_SIZE,
+			 dbell->dbell_ring.virt, dbell->dbell_ring.phys);
+		printk(KERN_ERR
+			"MPC85xx RIO: unable to request inbound doorbell irq");
+		goto out;
+	}
+
+	/* Configure doorbells for snooping, 512 entries, and enable */
+	out_be32(&dbell->dbell_regs->dmr, 0x00108161);
+
+out:
+	return rc;
+}
+
+int fsl_rio_setup_rmu(struct rio_mport *mport, struct device_node *node)
+{
+	struct rio_priv *priv;
+	struct fsl_rmu *rmu;
+	u64 msg_start;
+	const u32 *msg_addr;
+	int mlen;
+	int aw;
+
+	if (!mport || !mport->priv)
+		return -EINVAL;
+
+	priv = mport->priv;
+
+	if (!node) {
+		dev_warn(priv->dev, "Can't get %s property 'fsl,rmu'\n",
+			priv->dev->of_node->full_name);
+		return -EINVAL;
+	}
+
+	rmu = kzalloc(sizeof(struct fsl_rmu), GFP_KERNEL);
+	if (!rmu)
+		return -ENOMEM;
+
+	aw = of_n_addr_cells(node);
+	msg_addr = of_get_property(node, "reg", &mlen);
+	if (!msg_addr) {
+		pr_err("%s: unable to find 'reg' property of message-unit\n",
+			node->full_name);
+		kfree(rmu);
+		return -ENOMEM;
+	}
+	msg_start = of_read_number(msg_addr, aw);
+
+	rmu->msg_regs = (struct rio_msg_regs *)
+			(rmu_regs_win + (u32)msg_start);
+
+	rmu->txirq = irq_of_parse_and_map(node, 0);
+	rmu->rxirq = irq_of_parse_and_map(node, 1);
+	printk(KERN_INFO "%s: txirq: %d, rxirq %d\n",
+		node->full_name, rmu->txirq, rmu->rxirq);
+
+	priv->rmm_handle = rmu;
+
+	rio_init_dbell_res(&mport->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff);
+	rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 0);
+	rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 0);
+
+	return 0;
+}
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 8c7e852..4e9ccb1 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -154,7 +154,7 @@
 {
 	unsigned int cpu = 0;
 
-	if (mpic->flags & MPIC_PRIMARY)
+	if (!(mpic->flags & MPIC_SECONDARY))
 		cpu = hard_smp_processor_id();
 
 	return cpu;
@@ -315,29 +315,25 @@
 }
 
 #ifdef CONFIG_PPC_DCR
-static void _mpic_map_dcr(struct mpic *mpic, struct device_node *node,
-			  struct mpic_reg_bank *rb,
+static void _mpic_map_dcr(struct mpic *mpic, struct mpic_reg_bank *rb,
 			  unsigned int offset, unsigned int size)
 {
-	const u32 *dbasep;
-
-	dbasep = of_get_property(node, "dcr-reg", NULL);
-
-	rb->dhost = dcr_map(node, *dbasep + offset, size);
+	phys_addr_t phys_addr = dcr_resource_start(mpic->node, 0);
+	rb->dhost = dcr_map(mpic->node, phys_addr + offset, size);
 	BUG_ON(!DCR_MAP_OK(rb->dhost));
 }
 
-static inline void mpic_map(struct mpic *mpic, struct device_node *node,
+static inline void mpic_map(struct mpic *mpic,
 			    phys_addr_t phys_addr, struct mpic_reg_bank *rb,
 			    unsigned int offset, unsigned int size)
 {
 	if (mpic->flags & MPIC_USES_DCR)
-		_mpic_map_dcr(mpic, node, rb, offset, size);
+		_mpic_map_dcr(mpic, rb, offset, size);
 	else
 		_mpic_map_mmio(mpic, phys_addr, rb, offset, size);
 }
 #else /* CONFIG_PPC_DCR */
-#define mpic_map(m,n,p,b,o,s)	_mpic_map_mmio(m,p,b,o,s)
+#define mpic_map(m,p,b,o,s)	_mpic_map_mmio(m,p,b,o,s)
 #endif /* !CONFIG_PPC_DCR */
 
 
@@ -901,7 +897,7 @@
 	if (vold != vnew)
 		mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), vnew);
 
-	return IRQ_SET_MASK_OK_NOCOPY;;
+	return IRQ_SET_MASK_OK_NOCOPY;
 }
 
 void mpic_set_vector(unsigned int virq, unsigned int vector)
@@ -990,7 +986,7 @@
 
 #ifdef CONFIG_SMP
 	else if (hw >= mpic->ipi_vecs[0]) {
-		WARN_ON(!(mpic->flags & MPIC_PRIMARY));
+		WARN_ON(mpic->flags & MPIC_SECONDARY);
 
 		DBG("mpic: mapping as IPI\n");
 		irq_set_chip_data(virq, mpic);
@@ -1001,7 +997,7 @@
 #endif /* CONFIG_SMP */
 
 	if (hw >= mpic->timer_vecs[0] && hw <= mpic->timer_vecs[7]) {
-		WARN_ON(!(mpic->flags & MPIC_PRIMARY));
+		WARN_ON(mpic->flags & MPIC_SECONDARY);
 
 		DBG("mpic: mapping as timer\n");
 		irq_set_chip_data(virq, mpic);
@@ -1115,17 +1111,28 @@
 	return 0;
 }
 
+/* IRQ handler for a secondary MPIC cascaded from another IRQ controller */
+static void mpic_cascade(unsigned int irq, struct irq_desc *desc)
+{
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	struct mpic *mpic = irq_desc_get_handler_data(desc);
+	unsigned int virq;
+
+	BUG_ON(!(mpic->flags & MPIC_SECONDARY));
+
+	virq = mpic_get_one_irq(mpic);
+	if (virq != NO_IRQ)
+		generic_handle_irq(virq);
+
+	chip->irq_eoi(&desc->irq_data);
+}
+
 static struct irq_host_ops mpic_host_ops = {
 	.match = mpic_host_match,
 	.map = mpic_host_map,
 	.xlate = mpic_host_xlate,
 };
 
-static int mpic_reset_prohibited(struct device_node *node)
-{
-	return node && of_get_property(node, "pic-no-reset", NULL);
-}
-
 /*
  * Exported functions
  */
@@ -1137,27 +1144,60 @@
 				unsigned int irq_count,
 				const char *name)
 {
-	struct mpic	*mpic;
-	u32		greg_feature;
-	const char	*vers;
-	int		i;
-	int		intvec_top;
-	u64		paddr = phys_addr;
+	int i, psize, intvec_top;
+	struct mpic *mpic;
+	u32 greg_feature;
+	const char *vers;
+	const u32 *psrc;
+
+	/* Default MPIC search parameters */
+	static const struct of_device_id __initconst mpic_device_id[] = {
+		{ .type	      = "open-pic", },
+		{ .compatible = "open-pic", },
+		{},
+	};
+
+	/*
+	 * If we were not passed a device-tree node, then perform the default
+	 * search for standardized a standardized OpenPIC.
+	 */
+	if (node) {
+		node = of_node_get(node);
+	} else {
+		node = of_find_matching_node(NULL, mpic_device_id);
+		if (!node)
+			return NULL;
+	}
+
+	/* Pick the physical address from the device tree if unspecified */
+	if (!phys_addr) {
+		/* Check if it is DCR-based */
+		if (of_get_property(node, "dcr-reg", NULL)) {
+			flags |= MPIC_USES_DCR;
+		} else {
+			struct resource r;
+			if (of_address_to_resource(node, 0, &r))
+				goto err_of_node_put;
+			phys_addr = r.start;
+		}
+	}
 
 	mpic = kzalloc(sizeof(struct mpic), GFP_KERNEL);
 	if (mpic == NULL)
-		return NULL;
+		goto err_of_node_put;
 
 	mpic->name = name;
+	mpic->node = node;
+	mpic->paddr = phys_addr;
 
 	mpic->hc_irq = mpic_irq_chip;
 	mpic->hc_irq.name = name;
-	if (flags & MPIC_PRIMARY)
+	if (!(flags & MPIC_SECONDARY))
 		mpic->hc_irq.irq_set_affinity = mpic_set_affinity;
 #ifdef CONFIG_MPIC_U3_HT_IRQS
 	mpic->hc_ht_irq = mpic_irq_ht_chip;
 	mpic->hc_ht_irq.name = name;
-	if (flags & MPIC_PRIMARY)
+	if (!(flags & MPIC_SECONDARY))
 		mpic->hc_ht_irq.irq_set_affinity = mpic_set_affinity;
 #endif /* CONFIG_MPIC_U3_HT_IRQS */
 
@@ -1194,28 +1234,22 @@
 	mpic->spurious_vec  = intvec_top;
 
 	/* Check for "big-endian" in device-tree */
-	if (node && of_get_property(node, "big-endian", NULL) != NULL)
+	if (of_get_property(mpic->node, "big-endian", NULL) != NULL)
 		mpic->flags |= MPIC_BIG_ENDIAN;
-	if (node && of_device_is_compatible(node, "fsl,mpic"))
+	if (of_device_is_compatible(mpic->node, "fsl,mpic"))
 		mpic->flags |= MPIC_FSL;
 
 	/* Look for protected sources */
-	if (node) {
-		int psize;
-		unsigned int bits, mapsize;
-		const u32 *psrc =
-			of_get_property(node, "protected-sources", &psize);
-		if (psrc) {
-			psize /= 4;
-			bits = intvec_top + 1;
-			mapsize = BITS_TO_LONGS(bits) * sizeof(unsigned long);
-			mpic->protected = kzalloc(mapsize, GFP_KERNEL);
-			BUG_ON(mpic->protected == NULL);
-			for (i = 0; i < psize; i++) {
-				if (psrc[i] > intvec_top)
-					continue;
-				__set_bit(psrc[i], mpic->protected);
-			}
+	psrc = of_get_property(mpic->node, "protected-sources", &psize);
+	if (psrc) {
+		/* Allocate a bitmap with one bit per interrupt */
+		unsigned int mapsize = BITS_TO_LONGS(intvec_top + 1);
+		mpic->protected = kzalloc(mapsize*sizeof(long), GFP_KERNEL);
+		BUG_ON(mpic->protected == NULL);
+		for (i = 0; i < psize/sizeof(u32); i++) {
+			if (psrc[i] > intvec_top)
+				continue;
+			__set_bit(psrc[i], mpic->protected);
 		}
 	}
 
@@ -1224,42 +1258,32 @@
 #endif
 
 	/* default register type */
-	mpic->reg_type = (flags & MPIC_BIG_ENDIAN) ?
-		mpic_access_mmio_be : mpic_access_mmio_le;
+	if (flags & MPIC_BIG_ENDIAN)
+		mpic->reg_type = mpic_access_mmio_be;
+	else
+		mpic->reg_type = mpic_access_mmio_le;
 
-	/* If no physical address is passed in, a device-node is mandatory */
-	BUG_ON(paddr == 0 && node == NULL);
-
-	/* If no physical address passed in, check if it's dcr based */
-	if (paddr == 0 && of_get_property(node, "dcr-reg", NULL) != NULL) {
+	/*
+	 * An MPIC with a "dcr-reg" property must be accessed that way, but
+	 * only if the kernel includes DCR support.
+	 */
 #ifdef CONFIG_PPC_DCR
-		mpic->flags |= MPIC_USES_DCR;
+	if (flags & MPIC_USES_DCR)
 		mpic->reg_type = mpic_access_dcr;
 #else
-		BUG();
-#endif /* CONFIG_PPC_DCR */
-	}
-
-	/* If the MPIC is not DCR based, and no physical address was passed
-	 * in, try to obtain one
-	 */
-	if (paddr == 0 && !(mpic->flags & MPIC_USES_DCR)) {
-		const u32 *reg = of_get_property(node, "reg", NULL);
-		BUG_ON(reg == NULL);
-		paddr = of_translate_address(node, reg);
-		BUG_ON(paddr == OF_BAD_ADDR);
-	}
+	BUG_ON(flags & MPIC_USES_DCR);
+#endif
 
 	/* Map the global registers */
-	mpic_map(mpic, node, paddr, &mpic->gregs, MPIC_INFO(GREG_BASE), 0x1000);
-	mpic_map(mpic, node, paddr, &mpic->tmregs, MPIC_INFO(TIMER_BASE), 0x1000);
+	mpic_map(mpic, mpic->paddr, &mpic->gregs, MPIC_INFO(GREG_BASE), 0x1000);
+	mpic_map(mpic, mpic->paddr, &mpic->tmregs, MPIC_INFO(TIMER_BASE), 0x1000);
 
 	/* Reset */
 
 	/* When using a device-node, reset requests are only honored if the MPIC
 	 * is allowed to reset.
 	 */
-	if (mpic_reset_prohibited(node))
+	if (of_get_property(mpic->node, "pic-no-reset", NULL))
 		mpic->flags |= MPIC_NO_RESET;
 
 	if ((flags & MPIC_WANTS_RESET) && !(mpic->flags & MPIC_NO_RESET)) {
@@ -1307,7 +1331,7 @@
 	for_each_possible_cpu(i) {
 		unsigned int cpu = get_hard_smp_processor_id(i);
 
-		mpic_map(mpic, node, paddr, &mpic->cpuregs[cpu],
+		mpic_map(mpic, mpic->paddr, &mpic->cpuregs[cpu],
 			 MPIC_INFO(CPU_BASE) + cpu * MPIC_INFO(CPU_STRIDE),
 			 0x1000);
 	}
@@ -1315,16 +1339,21 @@
 	/* Initialize main ISU if none provided */
 	if (mpic->isu_size == 0) {
 		mpic->isu_size = mpic->num_sources;
-		mpic_map(mpic, node, paddr, &mpic->isus[0],
+		mpic_map(mpic, mpic->paddr, &mpic->isus[0],
 			 MPIC_INFO(IRQ_BASE), MPIC_INFO(IRQ_STRIDE) * mpic->isu_size);
 	}
 	mpic->isu_shift = 1 + __ilog2(mpic->isu_size - 1);
 	mpic->isu_mask = (1 << mpic->isu_shift) - 1;
 
-	mpic->irqhost = irq_alloc_host(node, IRQ_HOST_MAP_LINEAR,
+	mpic->irqhost = irq_alloc_host(mpic->node, IRQ_HOST_MAP_LINEAR,
 				       isu_size ? isu_size : mpic->num_sources,
 				       &mpic_host_ops,
 				       flags & MPIC_LARGE_VECTORS ? 2048 : 256);
+
+	/*
+	 * FIXME: The code leaks the MPIC object and mappings here; this
+	 * is very unlikely to fail but it ought to be fixed anyways.
+	 */
 	if (mpic->irqhost == NULL)
 		return NULL;
 
@@ -1347,19 +1376,23 @@
 	}
 	printk(KERN_INFO "mpic: Setting up MPIC \"%s\" version %s at %llx,"
 	       " max %d CPUs\n",
-	       name, vers, (unsigned long long)paddr, num_possible_cpus());
+	       name, vers, (unsigned long long)mpic->paddr, num_possible_cpus());
 	printk(KERN_INFO "mpic: ISU size: %d, shift: %d, mask: %x\n",
 	       mpic->isu_size, mpic->isu_shift, mpic->isu_mask);
 
 	mpic->next = mpics;
 	mpics = mpic;
 
-	if (flags & MPIC_PRIMARY) {
+	if (!(flags & MPIC_SECONDARY)) {
 		mpic_primary = mpic;
 		irq_set_default_host(mpic->irqhost);
 	}
 
 	return mpic;
+
+err_of_node_put:
+	of_node_put(node);
+	return NULL;
 }
 
 void __init mpic_assign_isu(struct mpic *mpic, unsigned int isu_num,
@@ -1369,7 +1402,7 @@
 
 	BUG_ON(isu_num >= MPIC_MAX_ISU);
 
-	mpic_map(mpic, mpic->irqhost->of_node,
+	mpic_map(mpic,
 		 paddr, &mpic->isus[isu_num], 0,
 		 MPIC_INFO(IRQ_STRIDE) * mpic->isu_size);
 
@@ -1385,8 +1418,7 @@
 
 void __init mpic_init(struct mpic *mpic)
 {
-	int i;
-	int cpu;
+	int i, cpu;
 
 	BUG_ON(mpic->num_sources == 0);
 
@@ -1424,7 +1456,7 @@
 
 	/* Do the HT PIC fixups on U3 broken mpic */
 	DBG("MPIC flags: %x\n", mpic->flags);
-	if ((mpic->flags & MPIC_U3_HT_IRQS) && (mpic->flags & MPIC_PRIMARY)) {
+	if ((mpic->flags & MPIC_U3_HT_IRQS) && !(mpic->flags & MPIC_SECONDARY)) {
 		mpic_scan_ht_pics(mpic);
 		mpic_u3msi_init(mpic);
 	}
@@ -1471,6 +1503,17 @@
 				  GFP_KERNEL);
 	BUG_ON(mpic->save_data == NULL);
 #endif
+
+	/* Check if this MPIC is chained from a parent interrupt controller */
+	if (mpic->flags & MPIC_SECONDARY) {
+		int virq = irq_of_parse_and_map(mpic->node, 0);
+		if (virq != NO_IRQ) {
+			printk(KERN_INFO "%s: hooking up to IRQ %d\n",
+					mpic->node->full_name, virq);
+			irq_set_handler_data(virq, mpic);
+			irq_set_chained_handler(virq, &mpic_cascade);
+		}
+	}
 }
 
 void __init mpic_set_clk_ratio(struct mpic *mpic, u32 clock_ratio)
diff --git a/arch/powerpc/sysdev/ppc4xx_cpm.c b/arch/powerpc/sysdev/ppc4xx_cpm.c
index 73b86cc..82e2cfe 100644
--- a/arch/powerpc/sysdev/ppc4xx_cpm.c
+++ b/arch/powerpc/sysdev/ppc4xx_cpm.c
@@ -179,12 +179,12 @@
 
 static void cpm_idle_config_sysfs(void)
 {
-	struct sys_device *sys_dev;
+	struct device *dev;
 	unsigned long ret;
 
-	sys_dev = get_cpu_sysdev(0);
+	dev = get_cpu_device(0);
 
-	ret = sysfs_create_file(&sys_dev->kobj,
+	ret = sysfs_create_file(&dev->kobj,
 				&cpm_idle_attr.attr);
 	if (ret)
 		printk(KERN_WARNING
diff --git a/arch/powerpc/sysdev/ppc4xx_pci.c b/arch/powerpc/sysdev/ppc4xx_pci.c
index 862f11b..4f05f75 100644
--- a/arch/powerpc/sysdev/ppc4xx_pci.c
+++ b/arch/powerpc/sysdev/ppc4xx_pci.c
@@ -185,9 +185,15 @@
  out:
 	dma_offset_set = 1;
 	pci_dram_offset = res->start;
+	hose->dma_window_base_cur = res->start;
+	hose->dma_window_size = resource_size(res);
 
 	printk(KERN_INFO "4xx PCI DMA offset set to 0x%08lx\n",
 	       pci_dram_offset);
+	printk(KERN_INFO "4xx PCI DMA window base to 0x%016llx\n",
+	       (unsigned long long)hose->dma_window_base_cur);
+	printk(KERN_INFO "DMA window size 0x%016llx\n",
+	       (unsigned long long)hose->dma_window_size);
 	return 0;
 }
 
@@ -647,6 +653,7 @@
 
 struct ppc4xx_pciex_hwops
 {
+	bool want_sdr;
 	int (*core_init)(struct device_node *np);
 	int (*port_init_hw)(struct ppc4xx_pciex_port *port);
 	int (*setup_utl)(struct ppc4xx_pciex_port *port);
@@ -916,6 +923,7 @@
 
 static struct ppc4xx_pciex_hwops ppc440speA_pcie_hwops __initdata =
 {
+	.want_sdr	= true,
 	.core_init	= ppc440spe_pciex_core_init,
 	.port_init_hw	= ppc440speA_pciex_init_port_hw,
 	.setup_utl	= ppc440speA_pciex_init_utl,
@@ -924,6 +932,7 @@
 
 static struct ppc4xx_pciex_hwops ppc440speB_pcie_hwops __initdata =
 {
+	.want_sdr	= true,
 	.core_init	= ppc440spe_pciex_core_init,
 	.port_init_hw	= ppc440speB_pciex_init_port_hw,
 	.setup_utl	= ppc440speB_pciex_init_utl,
@@ -1034,6 +1043,7 @@
 
 static struct ppc4xx_pciex_hwops ppc460ex_pcie_hwops __initdata =
 {
+	.want_sdr	= true,
 	.core_init	= ppc460ex_pciex_core_init,
 	.port_init_hw	= ppc460ex_pciex_init_port_hw,
 	.setup_utl	= ppc460ex_pciex_init_utl,
@@ -1181,6 +1191,7 @@
 }
 
 static struct ppc4xx_pciex_hwops ppc460sx_pcie_hwops __initdata = {
+	.want_sdr	= true,
 	.core_init	= ppc460sx_pciex_core_init,
 	.port_init_hw	= ppc460sx_pciex_init_port_hw,
 	.setup_utl	= ppc460sx_pciex_init_utl,
@@ -1276,6 +1287,7 @@
 
 static struct ppc4xx_pciex_hwops ppc405ex_pcie_hwops __initdata =
 {
+	.want_sdr	= true,
 	.core_init	= ppc405ex_pciex_core_init,
 	.port_init_hw	= ppc405ex_pciex_init_port_hw,
 	.setup_utl	= ppc405ex_pciex_init_utl,
@@ -1284,6 +1296,52 @@
 
 #endif /* CONFIG_40x */
 
+#ifdef CONFIG_476FPE
+static int __init ppc_476fpe_pciex_core_init(struct device_node *np)
+{
+	return 4;
+}
+
+static void __init ppc_476fpe_pciex_check_link(struct ppc4xx_pciex_port *port)
+{
+	u32 timeout_ms = 20;
+	u32 val = 0, mask = (PECFG_TLDLP_LNKUP|PECFG_TLDLP_PRESENT);
+	void __iomem *mbase = ioremap(port->cfg_space.start + 0x10000000,
+	                              0x1000);
+
+	printk(KERN_INFO "PCIE%d: Checking link...\n", port->index);
+
+	if (mbase == NULL) {
+		printk(KERN_WARNING "PCIE%d: failed to get cfg space\n",
+		                    port->index);
+		return;
+	}
+		
+	while (timeout_ms--) {
+		val = in_le32(mbase + PECFG_TLDLP);
+
+		if ((val & mask) == mask)
+			break;
+		msleep(10);
+	}
+
+	if (val & PECFG_TLDLP_PRESENT) {
+		printk(KERN_INFO "PCIE%d: link is up !\n", port->index);
+		port->link = 1;
+	} else
+		printk(KERN_WARNING "PCIE%d: Link up failed\n", port->index);
+
+	iounmap(mbase);
+	return;
+}
+
+static struct ppc4xx_pciex_hwops ppc_476fpe_pcie_hwops __initdata =
+{
+	.core_init	= ppc_476fpe_pciex_core_init,
+	.check_link	= ppc_476fpe_pciex_check_link,
+};
+#endif /* CONFIG_476FPE */
+
 /* Check that the core has been initied and if not, do it */
 static int __init ppc4xx_pciex_check_core_init(struct device_node *np)
 {
@@ -1309,6 +1367,10 @@
 	if (of_device_is_compatible(np, "ibm,plb-pciex-405ex"))
 		ppc4xx_pciex_hwops = &ppc405ex_pcie_hwops;
 #endif
+#ifdef CONFIG_476FPE
+	if (of_device_is_compatible(np, "ibm,plb-pciex-476fpe"))
+		ppc4xx_pciex_hwops = &ppc_476fpe_pcie_hwops;
+#endif
 	if (ppc4xx_pciex_hwops == NULL) {
 		printk(KERN_WARNING "PCIE: unknown host type %s\n",
 		       np->full_name);
@@ -1617,6 +1679,10 @@
 			dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL,
 				sa | DCRO_PEGPL_460SX_OMR1MSKL_UOT
 					| DCRO_PEGPL_OMRxMSKL_VAL);
+		else if (of_device_is_compatible(port->node, "ibm,plb-pciex-476fpe"))
+			dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL,
+				sa | DCRO_PEGPL_476FPE_OMR1MSKL_UOT
+					| DCRO_PEGPL_OMRxMSKL_VAL);
 		else
 			dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL,
 				sa | DCRO_PEGPL_OMR1MSKL_UOT
@@ -1739,9 +1805,10 @@
 		/* Calculate window size */
 		sa = (0xffffffffffffffffull << ilog2(size));
 		if (res->flags & IORESOURCE_PREFETCH)
-			sa |= 0x8;
+			sa |= PCI_BASE_ADDRESS_MEM_PREFETCH;
 
-		if (of_device_is_compatible(port->node, "ibm,plb-pciex-460sx"))
+		if (of_device_is_compatible(port->node, "ibm,plb-pciex-460sx") ||
+		    of_device_is_compatible(port->node, "ibm,plb-pciex-476fpe"))
 			sa |= PCI_BASE_ADDRESS_MEM_TYPE_64;
 
 		out_le32(mbase + PECFG_BAR0HMPA, RES_TO_U32_HIGH(sa));
@@ -1972,13 +2039,15 @@
 	}
 
 	port->node = of_node_get(np);
-	pval = of_get_property(np, "sdr-base", NULL);
-	if (pval == NULL) {
-		printk(KERN_ERR "PCIE: missing sdr-base for %s\n",
-		       np->full_name);
-		return;
+	if (ppc4xx_pciex_hwops->want_sdr) {
+		pval = of_get_property(np, "sdr-base", NULL);
+		if (pval == NULL) {
+			printk(KERN_ERR "PCIE: missing sdr-base for %s\n",
+			       np->full_name);
+			return;
+		}
+		port->sdr_base = *pval;
 	}
-	port->sdr_base = *pval;
 
 	/* Check if device_type property is set to "pci" or "pci-endpoint".
 	 * Resulting from this setup this PCIe port will be configured
diff --git a/arch/powerpc/sysdev/ppc4xx_pci.h b/arch/powerpc/sysdev/ppc4xx_pci.h
index 32ce763..bb48219 100644
--- a/arch/powerpc/sysdev/ppc4xx_pci.h
+++ b/arch/powerpc/sysdev/ppc4xx_pci.h
@@ -476,6 +476,13 @@
 #define DCRO_PEGPL_OMR1MSKL_UOT	 0x00000002
 #define DCRO_PEGPL_OMR3MSKL_IO	 0x00000002
 
+/* 476FPE */
+#define PCCFG_LCPA			0x270
+#define PECFG_TLDLP			0x3F8
+#define PECFG_TLDLP_LNKUP		0x00000008
+#define PECFG_TLDLP_PRESENT		0x00000010
+#define DCRO_PEGPL_476FPE_OMR1MSKL_UOT	 0x00000004
+
 /* SDR Bit Mappings */
 #define PESDRx_RCSSET_HLDPLB	0x10000000
 #define PESDRx_RCSSET_RSTGU	0x01000000
diff --git a/arch/powerpc/sysdev/qe_lib/gpio.c b/arch/powerpc/sysdev/qe_lib/gpio.c
index e23f23c..521e67a 100644
--- a/arch/powerpc/sysdev/qe_lib/gpio.c
+++ b/arch/powerpc/sysdev/qe_lib/gpio.c
@@ -139,14 +139,10 @@
 struct qe_pin *qe_pin_request(struct device_node *np, int index)
 {
 	struct qe_pin *qe_pin;
-	struct device_node *gpio_np;
 	struct gpio_chip *gc;
 	struct of_mm_gpio_chip *mm_gc;
 	struct qe_gpio_chip *qe_gc;
 	int err;
-	int size;
-	const void *gpio_spec;
-	const u32 *gpio_cells;
 	unsigned long flags;
 
 	qe_pin = kzalloc(sizeof(*qe_pin), GFP_KERNEL);
@@ -155,45 +151,25 @@
 		return ERR_PTR(-ENOMEM);
 	}
 
-	err = of_parse_phandles_with_args(np, "gpios", "#gpio-cells", index,
-					  &gpio_np, &gpio_spec);
-	if (err) {
-		pr_debug("%s: can't parse gpios property\n", __func__);
+	err = of_get_gpio(np, index);
+	if (err < 0)
 		goto err0;
-	}
+	gc = gpio_to_chip(err);
+	if (WARN_ON(!gc))
+		goto err0;
 
-	if (!of_device_is_compatible(gpio_np, "fsl,mpc8323-qe-pario-bank")) {
+	if (!of_device_is_compatible(gc->of_node, "fsl,mpc8323-qe-pario-bank")) {
 		pr_debug("%s: tried to get a non-qe pin\n", __func__);
 		err = -EINVAL;
-		goto err1;
+		goto err0;
 	}
 
-	gc = of_node_to_gpiochip(gpio_np);
-	if (!gc) {
-		pr_debug("%s: gpio controller %s isn't registered\n",
-			 np->full_name, gpio_np->full_name);
-		err = -ENODEV;
-		goto err1;
-	}
-
-	gpio_cells = of_get_property(gpio_np, "#gpio-cells", &size);
-	if (!gpio_cells || size != sizeof(*gpio_cells) ||
-			*gpio_cells != gc->of_gpio_n_cells) {
-		pr_debug("%s: wrong #gpio-cells for %s\n",
-			 np->full_name, gpio_np->full_name);
-		err = -EINVAL;
-		goto err1;
-	}
-
-	err = gc->of_xlate(gc, np, gpio_spec, NULL);
-	if (err < 0)
-		goto err1;
-
 	mm_gc = to_of_mm_gpio_chip(gc);
 	qe_gc = to_qe_gpio_chip(mm_gc);
 
 	spin_lock_irqsave(&qe_gc->lock, flags);
 
+	err -= gc->base;
 	if (test_and_set_bit(QE_PIN_REQUESTED, &qe_gc->pin_flags[err]) == 0) {
 		qe_pin->controller = qe_gc;
 		qe_pin->num = err;
@@ -206,8 +182,6 @@
 
 	if (!err)
 		return qe_pin;
-err1:
-	of_node_put(gpio_np);
 err0:
 	kfree(qe_pin);
 	pr_debug("%s failed with status %d\n", __func__, err);
diff --git a/arch/powerpc/sysdev/qe_lib/qe_ic.c b/arch/powerpc/sysdev/qe_lib/qe_ic.c
index 18e75ca..73034bd 100644
--- a/arch/powerpc/sysdev/qe_lib/qe_ic.c
+++ b/arch/powerpc/sysdev/qe_lib/qe_ic.c
@@ -22,7 +22,6 @@
 #include <linux/stddef.h>
 #include <linux/sched.h>
 #include <linux/signal.h>
-#include <linux/sysdev.h>
 #include <linux/device.h>
 #include <linux/bootmem.h>
 #include <linux/spinlock.h>
@@ -484,13 +483,14 @@
 	return 0;
 }
 
-static struct sysdev_class qe_ic_sysclass = {
+static struct bus_type qe_ic_subsys = {
 	.name = "qe_ic",
+	.dev_name = "qe_ic",
 };
 
-static struct sys_device device_qe_ic = {
+static struct device device_qe_ic = {
 	.id = 0,
-	.cls = &qe_ic_sysclass,
+	.bus = &qe_ic_subsys,
 };
 
 static int __init init_qe_ic_sysfs(void)
@@ -499,12 +499,12 @@
 
 	printk(KERN_DEBUG "Registering qe_ic with sysfs...\n");
 
-	rc = sysdev_class_register(&qe_ic_sysclass);
+	rc = subsys_system_register(&qe_ic_subsys, NULL);
 	if (rc) {
 		printk(KERN_ERR "Failed registering qe_ic sys class\n");
 		return -ENODEV;
 	}
-	rc = sysdev_register(&device_qe_ic);
+	rc = device_register(&device_qe_ic);
 	if (rc) {
 		printk(KERN_ERR "Failed registering qe_ic sys device\n");
 		return -ENODEV;
diff --git a/arch/powerpc/sysdev/uic.c b/arch/powerpc/sysdev/uic.c
index 3330fec..063c901 100644
--- a/arch/powerpc/sysdev/uic.c
+++ b/arch/powerpc/sysdev/uic.c
@@ -18,7 +18,6 @@
 #include <linux/stddef.h>
 #include <linux/sched.h>
 #include <linux/signal.h>
-#include <linux/sysdev.h>
 #include <linux/device.h>
 #include <linux/bootmem.h>
 #include <linux/spinlock.h>
diff --git a/arch/powerpc/sysdev/xics/icp-hv.c b/arch/powerpc/sysdev/xics/icp-hv.c
index 9518d36..253dce9 100644
--- a/arch/powerpc/sysdev/xics/icp-hv.c
+++ b/arch/powerpc/sysdev/xics/icp-hv.c
@@ -27,33 +27,50 @@
 {
 	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
 	long rc;
+	unsigned int ret = XICS_IRQ_SPURIOUS;
 
 	rc = plpar_hcall(H_XIRR, retbuf, cppr);
-	if (rc != H_SUCCESS)
-		panic(" bad return code xirr - rc = %lx\n", rc);
-	return (unsigned int)retbuf[0];
-}
+	if (rc == H_SUCCESS) {
+		ret = (unsigned int)retbuf[0];
+	} else {
+		pr_err("%s: bad return code xirr cppr=0x%x returned %ld\n",
+			__func__, cppr, rc);
+		WARN_ON_ONCE(1);
+	}
 
-static inline void icp_hv_set_xirr(unsigned int value)
-{
-	long rc = plpar_hcall_norets(H_EOI, value);
-	if (rc != H_SUCCESS)
-		panic("bad return code EOI - rc = %ld, value=%x\n", rc, value);
+	return ret;
 }
 
 static inline void icp_hv_set_cppr(u8 value)
 {
 	long rc = plpar_hcall_norets(H_CPPR, value);
-	if (rc != H_SUCCESS)
-		panic("bad return code cppr - rc = %lx\n", rc);
+	if (rc != H_SUCCESS) {
+		pr_err("%s: bad return code cppr cppr=0x%x returned %ld\n",
+			__func__, value, rc);
+		WARN_ON_ONCE(1);
+	}
+}
+
+static inline void icp_hv_set_xirr(unsigned int value)
+{
+	long rc = plpar_hcall_norets(H_EOI, value);
+	if (rc != H_SUCCESS) {
+		pr_err("%s: bad return code eoi xirr=0x%x returned %ld\n",
+			__func__, value, rc);
+		WARN_ON_ONCE(1);
+		icp_hv_set_cppr(value >> 24);
+	}
 }
 
 static inline void icp_hv_set_qirr(int n_cpu , u8 value)
 {
-	long rc = plpar_hcall_norets(H_IPI, get_hard_smp_processor_id(n_cpu),
-				     value);
-	if (rc != H_SUCCESS)
-		panic("bad return code qirr - rc = %lx\n", rc);
+	int hw_cpu = get_hard_smp_processor_id(n_cpu);
+	long rc = plpar_hcall_norets(H_IPI, hw_cpu, value);
+	if (rc != H_SUCCESS) {
+		pr_err("%s: bad return code qirr cpu=%d hw_cpu=%d mfrr=0x%x "
+			"returned %ld\n", __func__, n_cpu, hw_cpu, value, rc);
+		WARN_ON_ONCE(1);
+	}
 }
 
 static void icp_hv_eoi(struct irq_data *d)
diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c
index 63762c6..d72eda6 100644
--- a/arch/powerpc/sysdev/xics/xics-common.c
+++ b/arch/powerpc/sysdev/xics/xics-common.c
@@ -137,7 +137,7 @@
 	 * IPIs are marked IRQF_PERCPU. The handler was set in map.
 	 */
 	BUG_ON(request_irq(ipi, icp_ops->ipi_action,
-			   IRQF_PERCPU, "IPI", NULL));
+			   IRQF_PERCPU | IRQF_NO_THREAD, "IPI", NULL));
 }
 
 int __init xics_smp_probe(void)
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 03a217a..cb95eea 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -228,13 +228,11 @@
   t	print backtrace\n\
   x	exit monitor and recover\n\
   X	exit monitor and dont recover\n"
-#ifdef CONFIG_PPC64
+#if defined(CONFIG_PPC64) && !defined(CONFIG_PPC_BOOK3E)
 "  u	dump segment table or SLB\n"
-#endif
-#ifdef CONFIG_PPC_STD_MMU_32
+#elif defined(CONFIG_PPC_STD_MMU_32)
 "  u	dump segment registers\n"
-#endif
-#ifdef CONFIG_44x
+#elif defined(CONFIG_44x) || defined(CONFIG_PPC_BOOK3E)
 "  u	dump TLB\n"
 #endif
 "  ?	help\n"
@@ -340,7 +338,7 @@
 
 static inline int unrecoverable_excp(struct pt_regs *regs)
 {
-#if defined(CONFIG_4xx) || defined(CONFIG_BOOK3E)
+#if defined(CONFIG_4xx) || defined(CONFIG_PPC_BOOK3E)
 	/* We have no MSR_RI bit on 4xx or Book3e, so we simply return false */
 	return 0;
 #else
@@ -885,13 +883,11 @@
 		case 'u':
 			dump_segments();
 			break;
-#endif
-#ifdef CONFIG_4xx
+#elif defined(CONFIG_4xx)
 		case 'u':
 			dump_tlb_44x();
 			break;
-#endif
-#ifdef CONFIG_PPC_BOOK3E
+#elif defined(CONFIG_PPC_BOOK3E)
 		case 'u':
 			dump_tlb_book3e();
 			break;
diff --git a/arch/s390/Kbuild b/arch/s390/Kbuild
index ae4b010..9858476 100644
--- a/arch/s390/Kbuild
+++ b/arch/s390/Kbuild
@@ -1,6 +1,7 @@
-obj-y += kernel/
-obj-y += mm/
-obj-y += crypto/
-obj-y += appldata/
-obj-y += hypfs/
-obj-y += kvm/
+obj-y				+= kernel/
+obj-y				+= mm/
+obj-$(CONFIG_KVM)		+= kvm/
+obj-$(CONFIG_CRYPTO_HW)		+= crypto/
+obj-$(CONFIG_S390_HYPFS_FS)	+= hypfs/
+obj-$(CONFIG_APPLDATA_BASE)	+= appldata/
+obj-$(CONFIG_MATHEMU)		+= math-emu/
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 373679b..d172758 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -87,11 +87,13 @@
 	select HAVE_KERNEL_LZMA
 	select HAVE_KERNEL_LZO
 	select HAVE_KERNEL_XZ
-	select HAVE_GET_USER_PAGES_FAST
 	select HAVE_ARCH_MUTEX_CPU_RELAX
 	select HAVE_ARCH_JUMP_LABEL if !MARCH_G5
 	select HAVE_RCU_TABLE_FREE if SMP
 	select ARCH_SAVE_PAGE_KEYS if HIBERNATION
+	select HAVE_MEMBLOCK
+	select HAVE_MEMBLOCK_NODE_MAP
+	select ARCH_DISCARD_MEMBLOCK
 	select ARCH_INLINE_SPIN_TRYLOCK
 	select ARCH_INLINE_SPIN_TRYLOCK_BH
 	select ARCH_INLINE_SPIN_LOCK
@@ -191,18 +193,13 @@
 	  Say N if you want to disable CPU hotplug.
 
 config SCHED_MC
-	def_bool y
-	prompt "Multi-core scheduler support"
-	depends on SMP
-	help
-	  Multi-core scheduler support improves the CPU scheduler's decision
-	  making when dealing with multi-core CPU chips at a cost of slightly
-	  increased overhead in some places.
+	def_bool n
 
 config SCHED_BOOK
 	def_bool y
 	prompt "Book scheduler support"
-	depends on SMP && SCHED_MC
+	depends on SMP
+	select SCHED_MC
 	help
 	  Book scheduler support improves the CPU scheduler's decision making
 	  when dealing with machines that have several books.
@@ -345,9 +342,6 @@
 
 	  Say N if you are unsure.
 
-config ARCH_POPULATES_NODE_MAP
-	def_bool y
-
 comment "Kernel preemption"
 
 source "kernel/Kconfig.preempt"
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index 27a0b5d..e9f3533 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -99,7 +99,6 @@
 
 libs-y		+= arch/s390/lib/
 drivers-y	+= drivers/s390/
-drivers-$(CONFIG_MATHEMU) += arch/s390/math-emu/
 
 # must be linked after kernel
 drivers-$(CONFIG_OPROFILE)	+= arch/s390/oprofile/
diff --git a/arch/s390/appldata/appldata_os.c b/arch/s390/appldata/appldata_os.c
index 92f1cb7..4de031d6 100644
--- a/arch/s390/appldata/appldata_os.c
+++ b/arch/s390/appldata/appldata_os.c
@@ -115,21 +115,21 @@
 	j = 0;
 	for_each_online_cpu(i) {
 		os_data->os_cpu[j].per_cpu_user =
-			cputime_to_jiffies(kstat_cpu(i).cpustat.user);
+			cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_USER]);
 		os_data->os_cpu[j].per_cpu_nice =
-			cputime_to_jiffies(kstat_cpu(i).cpustat.nice);
+			cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_NICE]);
 		os_data->os_cpu[j].per_cpu_system =
-			cputime_to_jiffies(kstat_cpu(i).cpustat.system);
+			cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM]);
 		os_data->os_cpu[j].per_cpu_idle =
-			cputime_to_jiffies(kstat_cpu(i).cpustat.idle);
+			cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IDLE]);
 		os_data->os_cpu[j].per_cpu_irq =
-			cputime_to_jiffies(kstat_cpu(i).cpustat.irq);
+			cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IRQ]);
 		os_data->os_cpu[j].per_cpu_softirq =
-			cputime_to_jiffies(kstat_cpu(i).cpustat.softirq);
+			cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ]);
 		os_data->os_cpu[j].per_cpu_iowait =
-			cputime_to_jiffies(kstat_cpu(i).cpustat.iowait);
+			cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IOWAIT]);
 		os_data->os_cpu[j].per_cpu_steal =
-			cputime_to_jiffies(kstat_cpu(i).cpustat.steal);
+			cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_STEAL]);
 		os_data->os_cpu[j].cpu_id = i;
 		j++;
 	}
diff --git a/arch/s390/boot/Makefile b/arch/s390/boot/Makefile
index 635d677..f2737a0 100644
--- a/arch/s390/boot/Makefile
+++ b/arch/s390/boot/Makefile
@@ -23,4 +23,4 @@
 
 install: $(CONFIGURE) $(obj)/image
 	sh -x  $(srctree)/$(obj)/install.sh $(KERNELRELEASE) $(obj)/image \
-	      System.map Kerntypes "$(INSTALL_PATH)"
+	      System.map "$(INSTALL_PATH)"
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index 481f4f7..8a2a887 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -97,7 +97,7 @@
 	}
 }
 
-static struct inode *hypfs_make_inode(struct super_block *sb, int mode)
+static struct inode *hypfs_make_inode(struct super_block *sb, umode_t mode)
 {
 	struct inode *ret = new_inode(sb);
 
@@ -107,7 +107,7 @@
 		ret->i_uid = hypfs_info->uid;
 		ret->i_gid = hypfs_info->gid;
 		ret->i_atime = ret->i_mtime = ret->i_ctime = CURRENT_TIME;
-		if (mode & S_IFDIR)
+		if (S_ISDIR(mode))
 			set_nlink(ret, 2);
 	}
 	return ret;
@@ -259,9 +259,9 @@
 	return 0;
 }
 
-static int hypfs_show_options(struct seq_file *s, struct vfsmount *mnt)
+static int hypfs_show_options(struct seq_file *s, struct dentry *root)
 {
-	struct hypfs_sb_info *hypfs_info = mnt->mnt_sb->s_fs_info;
+	struct hypfs_sb_info *hypfs_info = root->d_sb->s_fs_info;
 
 	seq_printf(s, ",uid=%u", hypfs_info->uid);
 	seq_printf(s, ",gid=%u", hypfs_info->gid);
@@ -333,7 +333,7 @@
 
 static struct dentry *hypfs_create_file(struct super_block *sb,
 					struct dentry *parent, const char *name,
-					char *data, mode_t mode)
+					char *data, umode_t mode)
 {
 	struct dentry *dentry;
 	struct inode *inode;
@@ -350,13 +350,13 @@
 		dentry = ERR_PTR(-ENOMEM);
 		goto fail;
 	}
-	if (mode & S_IFREG) {
+	if (S_ISREG(mode)) {
 		inode->i_fop = &hypfs_file_ops;
 		if (data)
 			inode->i_size = strlen(data);
 		else
 			inode->i_size = 0;
-	} else if (mode & S_IFDIR) {
+	} else if (S_ISDIR(mode)) {
 		inode->i_op = &simple_dir_inode_operations;
 		inode->i_fop = &simple_dir_operations;
 		inc_nlink(parent->d_inode);
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h
index 0814348..c23c390 100644
--- a/arch/s390/include/asm/cputime.h
+++ b/arch/s390/include/asm/cputime.h
@@ -16,114 +16,100 @@
 
 /* We want to use full resolution of the CPU timer: 2**-12 micro-seconds. */
 
-typedef unsigned long long cputime_t;
-typedef unsigned long long cputime64_t;
+typedef unsigned long long __nocast cputime_t;
+typedef unsigned long long __nocast cputime64_t;
 
-#ifndef __s390x__
-
-static inline unsigned int
-__div(unsigned long long n, unsigned int base)
+static inline unsigned long __div(unsigned long long n, unsigned long base)
 {
+#ifndef __s390x__
 	register_pair rp;
 
 	rp.pair = n >> 1;
 	asm ("dr %0,%1" : "+d" (rp) : "d" (base >> 1));
 	return rp.subreg.odd;
-}
-
 #else /* __s390x__ */
-
-static inline unsigned int
-__div(unsigned long long n, unsigned int base)
-{
 	return n / base;
+#endif /* __s390x__ */
 }
 
-#endif /* __s390x__ */
-
-#define cputime_zero			(0ULL)
 #define cputime_one_jiffy		jiffies_to_cputime(1)
-#define cputime_max			((~0UL >> 1) - 1)
-#define cputime_add(__a, __b)		((__a) +  (__b))
-#define cputime_sub(__a, __b)		((__a) -  (__b))
-#define cputime_div(__a, __n) ({		\
-	unsigned long long __div = (__a);	\
-	do_div(__div,__n);			\
-	__div;					\
-})
-#define cputime_halve(__a)		((__a) >> 1)
-#define cputime_eq(__a, __b)		((__a) == (__b))
-#define cputime_gt(__a, __b)		((__a) >  (__b))
-#define cputime_ge(__a, __b)		((__a) >= (__b))
-#define cputime_lt(__a, __b)		((__a) <  (__b))
-#define cputime_le(__a, __b)		((__a) <= (__b))
-#define cputime_to_jiffies(__ct)	(__div((__ct), 4096000000ULL / HZ))
-#define cputime_to_scaled(__ct)		(__ct)
-#define jiffies_to_cputime(__hz)	((cputime_t)(__hz) * (4096000000ULL / HZ))
 
-#define cputime64_zero			(0ULL)
-#define cputime64_add(__a, __b)		((__a) + (__b))
-#define cputime_to_cputime64(__ct)	(__ct)
-
-static inline u64
-cputime64_to_jiffies64(cputime64_t cputime)
+/*
+ * Convert cputime to jiffies and back.
+ */
+static inline unsigned long cputime_to_jiffies(const cputime_t cputime)
 {
-	do_div(cputime, 4096000000ULL / HZ);
-	return cputime;
+	return __div((__force unsigned long long) cputime, 4096000000ULL / HZ);
+}
+
+static inline cputime_t jiffies_to_cputime(const unsigned int jif)
+{
+	return (__force cputime_t)(jif * (4096000000ULL / HZ));
+}
+
+static inline u64 cputime64_to_jiffies64(cputime64_t cputime)
+{
+	unsigned long long jif = (__force unsigned long long) cputime;
+	do_div(jif, 4096000000ULL / HZ);
+	return jif;
+}
+
+static inline cputime64_t jiffies64_to_cputime64(const u64 jif)
+{
+	return (__force cputime64_t)(jif * (4096000000ULL / HZ));
 }
 
 /*
  * Convert cputime to microseconds and back.
  */
-static inline unsigned int
-cputime_to_usecs(const cputime_t cputime)
+static inline unsigned int cputime_to_usecs(const cputime_t cputime)
 {
-	return cputime_div(cputime, 4096);
+	return (__force unsigned long long) cputime >> 12;
 }
 
-static inline cputime_t
-usecs_to_cputime(const unsigned int m)
+static inline cputime_t usecs_to_cputime(const unsigned int m)
 {
-	return (cputime_t) m * 4096;
+	return (__force cputime_t)(m * 4096ULL);
 }
 
+#define usecs_to_cputime64(m)		usecs_to_cputime(m)
+
 /*
  * Convert cputime to milliseconds and back.
  */
-static inline unsigned int
-cputime_to_secs(const cputime_t cputime)
+static inline unsigned int cputime_to_secs(const cputime_t cputime)
 {
-	return __div(cputime, 2048000000) >> 1;
+	return __div((__force unsigned long long) cputime, 2048000000) >> 1;
 }
 
-static inline cputime_t
-secs_to_cputime(const unsigned int s)
+static inline cputime_t secs_to_cputime(const unsigned int s)
 {
-	return (cputime_t) s * 4096000000ULL;
+	return (__force cputime_t)(s * 4096000000ULL);
 }
 
 /*
  * Convert cputime to timespec and back.
  */
-static inline cputime_t
-timespec_to_cputime(const struct timespec *value)
+static inline cputime_t timespec_to_cputime(const struct timespec *value)
 {
-	return value->tv_nsec * 4096 / 1000 + (u64) value->tv_sec * 4096000000ULL;
+	unsigned long long ret = value->tv_sec * 4096000000ULL;
+	return (__force cputime_t)(ret + value->tv_nsec * 4096 / 1000);
 }
 
-static inline void
-cputime_to_timespec(const cputime_t cputime, struct timespec *value)
+static inline void cputime_to_timespec(const cputime_t cputime,
+				       struct timespec *value)
 {
+	unsigned long long __cputime = (__force unsigned long long) cputime;
 #ifndef __s390x__
 	register_pair rp;
 
-	rp.pair = cputime >> 1;
+	rp.pair = __cputime >> 1;
 	asm ("dr %0,%1" : "+d" (rp) : "d" (2048000000UL));
 	value->tv_nsec = rp.subreg.even * 1000 / 4096;
 	value->tv_sec = rp.subreg.odd;
 #else
-	value->tv_nsec = (cputime % 4096000000ULL) * 1000 / 4096;
-	value->tv_sec = cputime / 4096000000ULL;
+	value->tv_nsec = (__cputime % 4096000000ULL) * 1000 / 4096;
+	value->tv_sec = __cputime / 4096000000ULL;
 #endif
 }
 
@@ -132,50 +118,52 @@
  * Since cputime and timeval have the same resolution (microseconds)
  * this is easy.
  */
-static inline cputime_t
-timeval_to_cputime(const struct timeval *value)
+static inline cputime_t timeval_to_cputime(const struct timeval *value)
 {
-	return value->tv_usec * 4096 + (u64) value->tv_sec * 4096000000ULL;
+	unsigned long long ret = value->tv_sec * 4096000000ULL;
+	return (__force cputime_t)(ret + value->tv_usec * 4096ULL);
 }
 
-static inline void
-cputime_to_timeval(const cputime_t cputime, struct timeval *value)
+static inline void cputime_to_timeval(const cputime_t cputime,
+				      struct timeval *value)
 {
+	unsigned long long __cputime = (__force unsigned long long) cputime;
 #ifndef __s390x__
 	register_pair rp;
 
-	rp.pair = cputime >> 1;
+	rp.pair = __cputime >> 1;
 	asm ("dr %0,%1" : "+d" (rp) : "d" (2048000000UL));
 	value->tv_usec = rp.subreg.even / 4096;
 	value->tv_sec = rp.subreg.odd;
 #else
-	value->tv_usec = (cputime % 4096000000ULL) / 4096;
-	value->tv_sec = cputime / 4096000000ULL;
+	value->tv_usec = (__cputime % 4096000000ULL) / 4096;
+	value->tv_sec = __cputime / 4096000000ULL;
 #endif
 }
 
 /*
  * Convert cputime to clock and back.
  */
-static inline clock_t
-cputime_to_clock_t(cputime_t cputime)
+static inline clock_t cputime_to_clock_t(cputime_t cputime)
 {
-	return cputime_div(cputime, 4096000000ULL / USER_HZ);
+	unsigned long long clock = (__force unsigned long long) cputime;
+	do_div(clock, 4096000000ULL / USER_HZ);
+	return clock;
 }
 
-static inline cputime_t
-clock_t_to_cputime(unsigned long x)
+static inline cputime_t clock_t_to_cputime(unsigned long x)
 {
-	return (cputime_t) x * (4096000000ULL / USER_HZ);
+	return (__force cputime_t)(x * (4096000000ULL / USER_HZ));
 }
 
 /*
  * Convert cputime64 to clock.
  */
-static inline clock_t
-cputime64_to_clock_t(cputime64_t cputime)
+static inline clock_t cputime64_to_clock_t(cputime64_t cputime)
 {
-       return cputime_div(cputime, 4096000000ULL / USER_HZ);
+	unsigned long long clock = (__force unsigned long long) cputime;
+	do_div(clock, 4096000000ULL / USER_HZ);
+	return clock;
 }
 
 struct s390_idle_data {
diff --git a/arch/s390/include/asm/debug.h b/arch/s390/include/asm/debug.h
index 18124b7..9d88db1 100644
--- a/arch/s390/include/asm/debug.h
+++ b/arch/s390/include/asm/debug.h
@@ -73,7 +73,7 @@
 	struct dentry* debugfs_entries[DEBUG_MAX_VIEWS];
 	struct debug_view* views[DEBUG_MAX_VIEWS];	
 	char name[DEBUG_MAX_NAME_LEN];
-	mode_t mode;
+	umode_t mode;
 } debug_info_t;
 
 typedef int (debug_header_proc_t) (debug_info_t* id,
@@ -124,7 +124,7 @@
                              int buf_size);
 
 debug_info_t *debug_register_mode(const char *name, int pages, int nr_areas,
-				  int buf_size, mode_t mode, uid_t uid,
+				  int buf_size, umode_t mode, uid_t uid,
 				  gid_t gid);
 
 void debug_unregister(debug_info_t* id);
diff --git a/arch/s390/include/asm/kdebug.h b/arch/s390/include/asm/kdebug.h
index 40db27c..5c1abd4 100644
--- a/arch/s390/include/asm/kdebug.h
+++ b/arch/s390/include/asm/kdebug.h
@@ -22,6 +22,6 @@
 	DIE_NMI_IPI,
 };
 
-extern void die(const char *, struct pt_regs *, long);
+extern void die(struct pt_regs *, const char *);
 
 #endif
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 9e13c7d..707f230 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -97,47 +97,52 @@
 	__u32	gpregs_save_area[16];		/* 0x0180 */
 	__u32	cregs_save_area[16];		/* 0x01c0 */
 
+	/* Save areas. */
+	__u32	save_area_sync[8];		/* 0x0200 */
+	__u32	save_area_async[8];		/* 0x0220 */
+	__u32	save_area_restart[1];		/* 0x0240 */
+	__u8	pad_0x0244[0x0248-0x0244];	/* 0x0244 */
+
 	/* Return psws. */
-	__u32	save_area[16];			/* 0x0200 */
-	psw_t	return_psw;			/* 0x0240 */
-	psw_t	return_mcck_psw;		/* 0x0248 */
+	psw_t	return_psw;			/* 0x0248 */
+	psw_t	return_mcck_psw;		/* 0x0250 */
 
 	/* CPU time accounting values */
-	__u64	sync_enter_timer;		/* 0x0250 */
-	__u64	async_enter_timer;		/* 0x0258 */
-	__u64	mcck_enter_timer;		/* 0x0260 */
-	__u64	exit_timer;			/* 0x0268 */
-	__u64	user_timer;			/* 0x0270 */
-	__u64	system_timer;			/* 0x0278 */
-	__u64	steal_timer;			/* 0x0280 */
-	__u64	last_update_timer;		/* 0x0288 */
-	__u64	last_update_clock;		/* 0x0290 */
+	__u64	sync_enter_timer;		/* 0x0258 */
+	__u64	async_enter_timer;		/* 0x0260 */
+	__u64	mcck_enter_timer;		/* 0x0268 */
+	__u64	exit_timer;			/* 0x0270 */
+	__u64	user_timer;			/* 0x0278 */
+	__u64	system_timer;			/* 0x0280 */
+	__u64	steal_timer;			/* 0x0288 */
+	__u64	last_update_timer;		/* 0x0290 */
+	__u64	last_update_clock;		/* 0x0298 */
 
 	/* Current process. */
-	__u32	current_task;			/* 0x0298 */
-	__u32	thread_info;			/* 0x029c */
-	__u32	kernel_stack;			/* 0x02a0 */
+	__u32	current_task;			/* 0x02a0 */
+	__u32	thread_info;			/* 0x02a4 */
+	__u32	kernel_stack;			/* 0x02a8 */
 
 	/* Interrupt and panic stack. */
-	__u32	async_stack;			/* 0x02a4 */
-	__u32	panic_stack;			/* 0x02a8 */
+	__u32	async_stack;			/* 0x02ac */
+	__u32	panic_stack;			/* 0x02b0 */
 
 	/* Address space pointer. */
-	__u32	kernel_asce;			/* 0x02ac */
-	__u32	user_asce;			/* 0x02b0 */
-	__u32	current_pid;			/* 0x02b4 */
+	__u32	kernel_asce;			/* 0x02b4 */
+	__u32	user_asce;			/* 0x02b8 */
+	__u32	current_pid;			/* 0x02bc */
 
 	/* SMP info area */
-	__u32	cpu_nr;				/* 0x02b8 */
-	__u32	softirq_pending;		/* 0x02bc */
-	__u32	percpu_offset;			/* 0x02c0 */
-	__u32	ext_call_fast;			/* 0x02c4 */
-	__u64	int_clock;			/* 0x02c8 */
-	__u64	mcck_clock;			/* 0x02d0 */
-	__u64	clock_comparator;		/* 0x02d8 */
-	__u32	machine_flags;			/* 0x02e0 */
-	__u32	ftrace_func;			/* 0x02e4 */
-	__u8	pad_0x02e8[0x0300-0x02e8];	/* 0x02e8 */
+	__u32	cpu_nr;				/* 0x02c0 */
+	__u32	softirq_pending;		/* 0x02c4 */
+	__u32	percpu_offset;			/* 0x02c8 */
+	__u32	ext_call_fast;			/* 0x02cc */
+	__u64	int_clock;			/* 0x02d0 */
+	__u64	mcck_clock;			/* 0x02d8 */
+	__u64	clock_comparator;		/* 0x02e0 */
+	__u32	machine_flags;			/* 0x02e8 */
+	__u32	ftrace_func;			/* 0x02ec */
+	__u8	pad_0x02f8[0x0300-0x02f0];	/* 0x02f0 */
 
 	/* Interrupt response block */
 	__u8	irb[64];			/* 0x0300 */
@@ -229,57 +234,62 @@
 	psw_t	mcck_new_psw;			/* 0x01e0 */
 	psw_t	io_new_psw;			/* 0x01f0 */
 
-	/* Entry/exit save area & return psws. */
-	__u64	save_area[16];			/* 0x0200 */
-	psw_t	return_psw;			/* 0x0280 */
-	psw_t	return_mcck_psw;		/* 0x0290 */
+	/* Save areas. */
+	__u64	save_area_sync[8];		/* 0x0200 */
+	__u64	save_area_async[8];		/* 0x0240 */
+	__u64	save_area_restart[1];		/* 0x0280 */
+	__u8	pad_0x0288[0x0290-0x0288];	/* 0x0288 */
+
+	/* Return psws. */
+	psw_t	return_psw;			/* 0x0290 */
+	psw_t	return_mcck_psw;		/* 0x02a0 */
 
 	/* CPU accounting and timing values. */
-	__u64	sync_enter_timer;		/* 0x02a0 */
-	__u64	async_enter_timer;		/* 0x02a8 */
-	__u64	mcck_enter_timer;		/* 0x02b0 */
-	__u64	exit_timer;			/* 0x02b8 */
-	__u64	user_timer;			/* 0x02c0 */
-	__u64	system_timer;			/* 0x02c8 */
-	__u64	steal_timer;			/* 0x02d0 */
-	__u64	last_update_timer;		/* 0x02d8 */
-	__u64	last_update_clock;		/* 0x02e0 */
+	__u64	sync_enter_timer;		/* 0x02b0 */
+	__u64	async_enter_timer;		/* 0x02b8 */
+	__u64	mcck_enter_timer;		/* 0x02c0 */
+	__u64	exit_timer;			/* 0x02c8 */
+	__u64	user_timer;			/* 0x02d0 */
+	__u64	system_timer;			/* 0x02d8 */
+	__u64	steal_timer;			/* 0x02e0 */
+	__u64	last_update_timer;		/* 0x02e8 */
+	__u64	last_update_clock;		/* 0x02f0 */
 
 	/* Current process. */
-	__u64	current_task;			/* 0x02e8 */
-	__u64	thread_info;			/* 0x02f0 */
-	__u64	kernel_stack;			/* 0x02f8 */
+	__u64	current_task;			/* 0x02f8 */
+	__u64	thread_info;			/* 0x0300 */
+	__u64	kernel_stack;			/* 0x0308 */
 
 	/* Interrupt and panic stack. */
-	__u64	async_stack;			/* 0x0300 */
-	__u64	panic_stack;			/* 0x0308 */
+	__u64	async_stack;			/* 0x0310 */
+	__u64	panic_stack;			/* 0x0318 */
 
 	/* Address space pointer. */
-	__u64	kernel_asce;			/* 0x0310 */
-	__u64	user_asce;			/* 0x0318 */
-	__u64	current_pid;			/* 0x0320 */
+	__u64	kernel_asce;			/* 0x0320 */
+	__u64	user_asce;			/* 0x0328 */
+	__u64	current_pid;			/* 0x0330 */
 
 	/* SMP info area */
-	__u32	cpu_nr;				/* 0x0328 */
-	__u32	softirq_pending;		/* 0x032c */
-	__u64	percpu_offset;			/* 0x0330 */
-	__u64	ext_call_fast;			/* 0x0338 */
-	__u64	int_clock;			/* 0x0340 */
-	__u64	mcck_clock;			/* 0x0348 */
-	__u64	clock_comparator;		/* 0x0350 */
-	__u64	vdso_per_cpu_data;		/* 0x0358 */
-	__u64	machine_flags;			/* 0x0360 */
-	__u64	ftrace_func;			/* 0x0368 */
-	__u64	gmap;				/* 0x0370 */
-	__u64	cmf_hpp;			/* 0x0378 */
+	__u32	cpu_nr;				/* 0x0338 */
+	__u32	softirq_pending;		/* 0x033c */
+	__u64	percpu_offset;			/* 0x0340 */
+	__u64	ext_call_fast;			/* 0x0348 */
+	__u64	int_clock;			/* 0x0350 */
+	__u64	mcck_clock;			/* 0x0358 */
+	__u64	clock_comparator;		/* 0x0360 */
+	__u64	vdso_per_cpu_data;		/* 0x0368 */
+	__u64	machine_flags;			/* 0x0370 */
+	__u64	ftrace_func;			/* 0x0378 */
+	__u64	gmap;				/* 0x0380 */
+	__u8	pad_0x0388[0x0400-0x0388];	/* 0x0388 */
 
 	/* Interrupt response block. */
-	__u8	irb[64];			/* 0x0380 */
+	__u8	irb[64];			/* 0x0400 */
 
 	/* Per cpu primary space access list */
-	__u32	paste[16];			/* 0x03c0 */
+	__u32	paste[16];			/* 0x0440 */
 
-	__u8	pad_0x0400[0x0e00-0x0400];	/* 0x0400 */
+	__u8	pad_0x0480[0x0e00-0x0480];	/* 0x0480 */
 
 	/*
 	 * 0xe00 contains the address of the IPL Parameter Information
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 4f289ff..011358c 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -128,28 +128,11 @@
  * effect, this also makes sure that 64 bit module code cannot be used
  * as system call address.
  */
-
 extern unsigned long VMALLOC_START;
+extern unsigned long VMALLOC_END;
+extern struct page *vmemmap;
 
-#ifndef __s390x__
-#define VMALLOC_SIZE	(96UL << 20)
-#define VMALLOC_END	0x7e000000UL
-#define VMEM_MAP_END	0x80000000UL
-#else /* __s390x__ */
-#define VMALLOC_SIZE	(128UL << 30)
-#define VMALLOC_END	0x3e000000000UL
-#define VMEM_MAP_END	0x40000000000UL
-#endif /* __s390x__ */
-
-/*
- * VMEM_MAX_PHYS is the highest physical address that can be added to the 1:1
- * mapping. This needs to be calculated at compile time since the size of the
- * VMEM_MAP is static but the size of struct page can change.
- */
-#define VMEM_MAX_PAGES	((VMEM_MAP_END - VMALLOC_END) / sizeof(struct page))
-#define VMEM_MAX_PFN	min(VMALLOC_START >> PAGE_SHIFT, VMEM_MAX_PAGES)
-#define VMEM_MAX_PHYS	((VMEM_MAX_PFN << PAGE_SHIFT) & ~((16 << 20) - 1))
-#define vmemmap		((struct page *) VMALLOC_END)
+#define VMEM_MAX_PHYS ((unsigned long) vmemmap)
 
 /*
  * A 31 bit pagetable entry of S390 has following format:
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 5f33d37..27272f6 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -80,8 +80,6 @@
 	unsigned int  acrs[NUM_ACRS];
         unsigned long ksp;              /* kernel stack pointer             */
 	mm_segment_t mm_segment;
-        unsigned long prot_addr;        /* address of protection-excep.     */
-        unsigned int trap_no;
 	unsigned long gmap_addr;	/* address of last gmap fault. */
 	struct per_regs per_user;	/* User specified PER registers */
 	struct per_event per_event;	/* Cause of the last PER trap */
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index a658463..56da355 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -324,7 +324,8 @@
 	psw_t psw;
 	unsigned long gprs[NUM_GPRS];
 	unsigned long orig_gpr2;
-	unsigned int svc_code;
+	unsigned int int_code;
+	unsigned long int_parm_long;
 };
 
 /*
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
index e63d13d..d75c8e7 100644
--- a/arch/s390/include/asm/qdio.h
+++ b/arch/s390/include/asm/qdio.h
@@ -352,7 +352,7 @@
  * @no_output_qs: number of output queues
  * @input_handler: handler to be called for input queues
  * @output_handler: handler to be called for output queues
- * @queue_start_poll: polling handlers (one per input queue or NULL)
+ * @queue_start_poll_array: polling handlers (one per input queue or NULL)
  * @int_parm: interruption parameter
  * @input_sbal_addr_array:  address of no_input_qs * 128 pointers
  * @output_sbal_addr_array: address of no_output_qs * 128 pointers
@@ -372,7 +372,8 @@
 	unsigned int no_output_qs;
 	qdio_handler_t *input_handler;
 	qdio_handler_t *output_handler;
-	void (**queue_start_poll) (struct ccw_device *, int, unsigned long);
+	void (**queue_start_poll_array) (struct ccw_device *, int,
+					  unsigned long);
 	int scan_threshold;
 	unsigned long int_parm;
 	void **input_sbal_addr_array;
diff --git a/arch/s390/include/asm/sigp.h b/arch/s390/include/asm/sigp.h
index e3bffd4..7040b85 100644
--- a/arch/s390/include/asm/sigp.h
+++ b/arch/s390/include/asm/sigp.h
@@ -56,6 +56,7 @@
 	ec_schedule = 0,
 	ec_call_function,
 	ec_call_function_single,
+	ec_stop_cpu,
 };
 
 /*
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index ab47a69..c32e912 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -23,7 +23,6 @@
 extern int __cpu_up (unsigned int cpu);
 
 extern struct mutex smp_cpu_state_mutex;
-extern int smp_cpu_polarization[];
 
 extern void arch_send_call_function_single_ipi(int cpu);
 extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
diff --git a/arch/s390/include/asm/socket.h b/arch/s390/include/asm/socket.h
index fdff1e9..67b5c1b 100644
--- a/arch/s390/include/asm/socket.h
+++ b/arch/s390/include/asm/socket.h
@@ -70,4 +70,7 @@
 
 #define SO_RXQ_OVFL             40
 
+#define SO_WIFI_STATUS		41
+#define SCM_WIFI_STATUS		SO_WIFI_STATUS
+
 #endif /* _ASM_SOCKET_H */
diff --git a/arch/s390/include/asm/sparsemem.h b/arch/s390/include/asm/sparsemem.h
index 545d219..0fb3402 100644
--- a/arch/s390/include/asm/sparsemem.h
+++ b/arch/s390/include/asm/sparsemem.h
@@ -4,8 +4,8 @@
 #ifdef CONFIG_64BIT
 
 #define SECTION_SIZE_BITS	28
-#define MAX_PHYSADDR_BITS	42
-#define MAX_PHYSMEM_BITS	42
+#define MAX_PHYSADDR_BITS	46
+#define MAX_PHYSMEM_BITS	46
 
 #else
 
diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h
index b239ff5..fb214dd 100644
--- a/arch/s390/include/asm/syscall.h
+++ b/arch/s390/include/asm/syscall.h
@@ -27,7 +27,7 @@
 				  struct pt_regs *regs)
 {
 	return test_tsk_thread_flag(task, TIF_SYSCALL) ?
-		(regs->svc_code & 0xffff) : -1;
+		(regs->int_code & 0xffff) : -1;
 }
 
 static inline void syscall_rollback(struct task_struct *task,
diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
index ef573c1..d73cc6b 100644
--- a/arch/s390/include/asm/system.h
+++ b/arch/s390/include/asm/system.h
@@ -20,8 +20,6 @@
 
 struct task_struct;
 
-extern int sysctl_userprocess_debug;
-
 extern struct task_struct *__switch_to(void *, void *);
 extern void update_per_regs(struct task_struct *task);
 
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index a231834..a730381 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -102,7 +102,6 @@
 #define TIF_MEMDIE		18	/* is terminating due to OOM killer */
 #define TIF_RESTORE_SIGMASK	19	/* restore signal mask in do_signal() */
 #define TIF_SINGLE_STEP		20	/* This task is single stepped */
-#define TIF_FREEZE		21	/* thread is freezing for suspend */
 
 #define _TIF_SYSCALL		(1<<TIF_SYSCALL)
 #define _TIF_NOTIFY_RESUME	(1<<TIF_NOTIFY_RESUME)
@@ -119,7 +118,6 @@
 #define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG)
 #define _TIF_31BIT		(1<<TIF_31BIT)
 #define _TIF_SINGLE_STEP	(1<<TIF_SINGLE_STEP)
-#define _TIF_FREEZE		(1<<TIF_FREEZE)
 
 #ifdef CONFIG_64BIT
 #define is_32bit_task()		(test_thread_flag(TIF_31BIT))
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
index 005d77d..0837de8 100644
--- a/arch/s390/include/asm/topology.h
+++ b/arch/s390/include/asm/topology.h
@@ -4,6 +4,10 @@
 #include <linux/cpumask.h>
 #include <asm/sysinfo.h>
 
+struct cpu;
+
+#ifdef CONFIG_SCHED_BOOK
+
 extern unsigned char cpu_core_id[NR_CPUS];
 extern cpumask_t cpu_core_map[NR_CPUS];
 
@@ -16,8 +20,6 @@
 #define topology_core_cpumask(cpu)	(&cpu_core_map[cpu])
 #define mc_capable()			(1)
 
-#ifdef CONFIG_SCHED_BOOK
-
 extern unsigned char cpu_book_id[NR_CPUS];
 extern cpumask_t cpu_book_map[NR_CPUS];
 
@@ -29,19 +31,45 @@
 #define topology_book_id(cpu)		(cpu_book_id[cpu])
 #define topology_book_cpumask(cpu)	(&cpu_book_map[cpu])
 
-#endif /* CONFIG_SCHED_BOOK */
-
+int topology_cpu_init(struct cpu *);
 int topology_set_cpu_management(int fc);
 void topology_schedule_update(void);
 void store_topology(struct sysinfo_15_1_x *info);
+void topology_expect_change(void);
 
-#define POLARIZATION_UNKNWN	(-1)
+#else /* CONFIG_SCHED_BOOK */
+
+static inline void topology_schedule_update(void) { }
+static inline int topology_cpu_init(struct cpu *cpu) { return 0; }
+static inline void topology_expect_change(void) { }
+
+#endif /* CONFIG_SCHED_BOOK */
+
+#define POLARIZATION_UNKNOWN	(-1)
 #define POLARIZATION_HRZ	(0)
 #define POLARIZATION_VL		(1)
 #define POLARIZATION_VM		(2)
 #define POLARIZATION_VH		(3)
 
-#ifdef CONFIG_SMP
+extern int cpu_polarization[];
+
+static inline void cpu_set_polarization(int cpu, int val)
+{
+#ifdef CONFIG_SCHED_BOOK
+	cpu_polarization[cpu] = val;
+#endif
+}
+
+static inline int cpu_read_polarization(int cpu)
+{
+#ifdef CONFIG_SCHED_BOOK
+	return cpu_polarization[cpu];
+#else
+	return POLARIZATION_HRZ;
+#endif
+}
+
+#ifdef CONFIG_SCHED_BOOK
 void s390_init_cpu_topology(void);
 #else
 static inline void s390_init_cpu_topology(void)
diff --git a/arch/s390/include/asm/types.h b/arch/s390/include/asm/types.h
index eeb52cc..05ebbcd 100644
--- a/arch/s390/include/asm/types.h
+++ b/arch/s390/include/asm/types.h
@@ -13,8 +13,6 @@
 
 #ifndef __ASSEMBLY__
 
-typedef unsigned short umode_t;
-
 /* A address type so that arithmetic can be done on it & it can be upgraded to
    64 bit when necessary 
 */
diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h
index 58de4c9..8a8008f 100644
--- a/arch/s390/include/asm/unistd.h
+++ b/arch/s390/include/asm/unistd.h
@@ -398,6 +398,7 @@
 #define __ARCH_WANT_SYS_SIGNAL
 #define __ARCH_WANT_SYS_UTIME
 #define __ARCH_WANT_SYS_SOCKETCALL
+#define __ARCH_WANT_SYS_IPC
 #define __ARCH_WANT_SYS_FADVISE64
 #define __ARCH_WANT_SYS_GETPGRP
 #define __ARCH_WANT_SYS_LLSEEK
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index dd4f076..7d9ec92 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -32,7 +32,8 @@
 extra-y				+= $(if $(CONFIG_64BIT),head64.o,head31.o)
 
 obj-$(CONFIG_MODULES)		+= s390_ksyms.o module.o
-obj-$(CONFIG_SMP)		+= smp.o topology.o
+obj-$(CONFIG_SMP)		+= smp.o
+obj-$(CONFIG_SCHED_BOOK)	+= topology.o
 obj-$(CONFIG_SMP)		+= $(if $(CONFIG_64BIT),switch_cpu64.o, \
 							switch_cpu.o)
 obj-$(CONFIG_HIBERNATION)	+= suspend.o swsusp_asm64.o
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 7513187..6e6a72e 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -45,7 +45,8 @@
 	DEFINE(__PT_PSW, offsetof(struct pt_regs, psw));
 	DEFINE(__PT_GPRS, offsetof(struct pt_regs, gprs));
 	DEFINE(__PT_ORIG_GPR2, offsetof(struct pt_regs, orig_gpr2));
-	DEFINE(__PT_SVC_CODE, offsetof(struct pt_regs, svc_code));
+	DEFINE(__PT_INT_CODE, offsetof(struct pt_regs, int_code));
+	DEFINE(__PT_INT_PARM_LONG, offsetof(struct pt_regs, int_parm_long));
 	DEFINE(__PT_SIZE, sizeof(struct pt_regs));
 	BLANK();
 	DEFINE(__SF_BACKCHAIN, offsetof(struct stack_frame, back_chain));
@@ -108,7 +109,9 @@
 	DEFINE(__LC_PGM_NEW_PSW, offsetof(struct _lowcore, program_new_psw));
 	DEFINE(__LC_MCK_NEW_PSW, offsetof(struct _lowcore, mcck_new_psw));
 	DEFINE(__LC_IO_NEW_PSW, offsetof(struct _lowcore, io_new_psw));
-	DEFINE(__LC_SAVE_AREA, offsetof(struct _lowcore, save_area));
+	DEFINE(__LC_SAVE_AREA_SYNC, offsetof(struct _lowcore, save_area_sync));
+	DEFINE(__LC_SAVE_AREA_ASYNC, offsetof(struct _lowcore, save_area_async));
+	DEFINE(__LC_SAVE_AREA_RESTART, offsetof(struct _lowcore, save_area_restart));
 	DEFINE(__LC_RETURN_PSW, offsetof(struct _lowcore, return_psw));
 	DEFINE(__LC_RETURN_MCCK_PSW, offsetof(struct _lowcore, return_mcck_psw));
 	DEFINE(__LC_SYNC_ENTER_TIMER, offsetof(struct _lowcore, sync_enter_timer));
@@ -150,7 +153,6 @@
 	DEFINE(__LC_LAST_BREAK, offsetof(struct _lowcore, breaking_event_addr));
 	DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data));
 	DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap));
-	DEFINE(__LC_CMF_HPP, offsetof(struct _lowcore, cmf_hpp));
 	DEFINE(__GMAP_ASCE, offsetof(struct gmap, asce));
 #endif /* CONFIG_32BIT */
 	return 0;
diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S
index f8828d3..3aa4d00 100644
--- a/arch/s390/kernel/base.S
+++ b/arch/s390/kernel/base.S
@@ -33,7 +33,7 @@
 	.previous
 
 ENTRY(s390_base_ext_handler)
-	stmg	%r0,%r15,__LC_SAVE_AREA
+	stmg	%r0,%r15,__LC_SAVE_AREA_ASYNC
 	basr	%r13,0
 0:	aghi	%r15,-STACK_FRAME_OVERHEAD
 	larl	%r1,s390_base_ext_handler_fn
@@ -41,7 +41,7 @@
 	ltgr	%r1,%r1
 	jz	1f
 	basr	%r14,%r1
-1:	lmg	%r0,%r15,__LC_SAVE_AREA
+1:	lmg	%r0,%r15,__LC_SAVE_AREA_ASYNC
 	ni	__LC_EXT_OLD_PSW+1,0xfd	# clear wait state bit
 	lpswe	__LC_EXT_OLD_PSW
 
@@ -53,7 +53,7 @@
 	.previous
 
 ENTRY(s390_base_pgm_handler)
-	stmg	%r0,%r15,__LC_SAVE_AREA
+	stmg	%r0,%r15,__LC_SAVE_AREA_SYNC
 	basr	%r13,0
 0:	aghi	%r15,-STACK_FRAME_OVERHEAD
 	larl	%r1,s390_base_pgm_handler_fn
@@ -61,7 +61,7 @@
 	ltgr	%r1,%r1
 	jz	1f
 	basr	%r14,%r1
-	lmg	%r0,%r15,__LC_SAVE_AREA
+	lmg	%r0,%r15,__LC_SAVE_AREA_SYNC
 	lpswe	__LC_PGM_OLD_PSW
 1:	lpswe	disabled_wait_psw-0b(%r13)
 
@@ -142,7 +142,7 @@
 	.previous
 
 ENTRY(s390_base_ext_handler)
-	stm	%r0,%r15,__LC_SAVE_AREA
+	stm	%r0,%r15,__LC_SAVE_AREA_ASYNC
 	basr	%r13,0
 0:	ahi	%r15,-STACK_FRAME_OVERHEAD
 	l	%r1,2f-0b(%r13)
@@ -150,7 +150,7 @@
 	ltr	%r1,%r1
 	jz	1f
 	basr	%r14,%r1
-1:	lm	%r0,%r15,__LC_SAVE_AREA
+1:	lm	%r0,%r15,__LC_SAVE_AREA_ASYNC
 	ni	__LC_EXT_OLD_PSW+1,0xfd	# clear wait state bit
 	lpsw	__LC_EXT_OLD_PSW
 
@@ -164,7 +164,7 @@
 	.previous
 
 ENTRY(s390_base_pgm_handler)
-	stm	%r0,%r15,__LC_SAVE_AREA
+	stm	%r0,%r15,__LC_SAVE_AREA_SYNC
 	basr	%r13,0
 0:	ahi	%r15,-STACK_FRAME_OVERHEAD
 	l	%r1,2f-0b(%r13)
@@ -172,7 +172,7 @@
 	ltr	%r1,%r1
 	jz	1f
 	basr	%r14,%r1
-	lm	%r0,%r15,__LC_SAVE_AREA
+	lm	%r0,%r15,__LC_SAVE_AREA_SYNC
 	lpsw	__LC_PGM_OLD_PSW
 
 1:	lpsw	disabled_wait_psw-0b(%r13)
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index 84a9828..ab64bdb 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -278,9 +278,6 @@
 {
 	if (call >> 16)		/* hack for backward compatibility */
 		return -EINVAL;
-
-	call &= 0xffff;
-
 	switch (call) {
 	case SEMTIMEDOP:
 		return compat_sys_semtimedop(first, compat_ptr(ptr),
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 4f68c81..6fe78c2 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -501,8 +501,12 @@
 
 	/* We forgot to include these in the sigcontext.
 	   To avoid breaking binary compatibility, they are passed as args. */
-	regs->gprs[4] = current->thread.trap_no;
-	regs->gprs[5] = current->thread.prot_addr;
+	if (sig == SIGSEGV || sig == SIGBUS || sig == SIGILL ||
+	    sig == SIGTRAP || sig == SIGFPE) {
+		/* set extra registers only for synchronous signals */
+		regs->gprs[4] = regs->int_code & 127;
+		regs->gprs[5] = regs->int_parm_long;
+	}
 
 	/* Place signal number on stack to allow backtrace from handler.  */
 	if (__put_user(regs->gprs[2], (int __force __user *) &frame->signo))
@@ -544,9 +548,9 @@
 	/* Set up to return from userspace.  If provided, use a stub
 	   already in userspace.  */
 	if (ka->sa.sa_flags & SA_RESTORER) {
-		regs->gprs[14] = (__u64) ka->sa.sa_restorer;
+		regs->gprs[14] = (__u64) ka->sa.sa_restorer | PSW32_ADDR_AMODE;
 	} else {
-		regs->gprs[14] = (__u64) frame->retcode;
+		regs->gprs[14] = (__u64) frame->retcode | PSW32_ADDR_AMODE;
 		err |= __put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn,
 				  (u16 __force __user *)(frame->retcode));
 	}
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index 5ad6bc0..6848828 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -74,7 +74,7 @@
 static int debug_open(struct inode *inode, struct file *file);
 static int debug_close(struct inode *inode, struct file *file);
 static debug_info_t *debug_info_create(const char *name, int pages_per_area,
-			int nr_areas, int buf_size, mode_t mode);
+			int nr_areas, int buf_size, umode_t mode);
 static void debug_info_get(debug_info_t *);
 static void debug_info_put(debug_info_t *);
 static int debug_prolog_level_fn(debug_info_t * id,
@@ -330,7 +330,7 @@
 
 static debug_info_t*
 debug_info_create(const char *name, int pages_per_area, int nr_areas,
-		  int buf_size, mode_t mode)
+		  int buf_size, umode_t mode)
 {
 	debug_info_t* rc;
 
@@ -688,7 +688,7 @@
  */
 
 debug_info_t *debug_register_mode(const char *name, int pages_per_area,
-				  int nr_areas, int buf_size, mode_t mode,
+				  int nr_areas, int buf_size, umode_t mode,
 				  uid_t uid, gid_t gid)
 {
 	debug_info_t *rc = NULL;
@@ -1090,7 +1090,7 @@
 	int rc = 0;
 	int i;
 	unsigned long flags;
-	mode_t mode;
+	umode_t mode;
 	struct dentry *pde;
 
 	if (!id)
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index 45df6d4..e2f8475 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -1578,10 +1578,15 @@
 	ptr += sprintf(ptr, "%s Code:", mode);
 	hops = 0;
 	while (start < end && hops < 8) {
-		*ptr++ = (start == 32) ? '>' : ' ';
+		opsize = insn_length(code[start]);
+		if  (start + opsize == 32)
+			*ptr++ = '#';
+		else if (start == 32)
+			*ptr++ = '>';
+		else
+			*ptr++ = ' ';
 		addr = regs->psw.addr + start - 32;
 		ptr += sprintf(ptr, ONELONG, addr);
-		opsize = insn_length(code[start]);
 		if (start + opsize >= end)
 			break;
 		for (i = 0; i < opsize; i++)
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index c9ffe00..52098d6 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -434,18 +434,22 @@
 	}
 }
 
-static void __init setup_boot_command_line(void)
+static inline int has_ebcdic_char(const char *str)
 {
 	int i;
 
-	/* convert arch command line to ascii */
-	for (i = 0; i < ARCH_COMMAND_LINE_SIZE; i++)
-		if (COMMAND_LINE[i] & 0x80)
-			break;
-	if (i < ARCH_COMMAND_LINE_SIZE)
-		EBCASC(COMMAND_LINE, ARCH_COMMAND_LINE_SIZE);
-	COMMAND_LINE[ARCH_COMMAND_LINE_SIZE-1] = 0;
+	for (i = 0; str[i]; i++)
+		if (str[i] & 0x80)
+			return 1;
+	return 0;
+}
 
+static void __init setup_boot_command_line(void)
+{
+	COMMAND_LINE[ARCH_COMMAND_LINE_SIZE - 1] = 0;
+	/* convert arch command line to ascii if necessary */
+	if (has_ebcdic_char(COMMAND_LINE))
+		EBCASC(COMMAND_LINE, ARCH_COMMAND_LINE_SIZE);
 	/* copy arch command line */
 	strlcpy(boot_command_line, strstrip(COMMAND_LINE),
 		ARCH_COMMAND_LINE_SIZE);
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index b131570..3705700 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -19,32 +19,22 @@
 #include <asm/unistd.h>
 #include <asm/page.h>
 
-/*
- * Stack layout for the system_call stack entry.
- * The first few entries are identical to the user_regs_struct.
- */
-SP_PTREGS    =	STACK_FRAME_OVERHEAD
-SP_ARGS      =	STACK_FRAME_OVERHEAD + __PT_ARGS
-SP_PSW	     =	STACK_FRAME_OVERHEAD + __PT_PSW
-SP_R0	     =	STACK_FRAME_OVERHEAD + __PT_GPRS
-SP_R1	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 4
-SP_R2	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 8
-SP_R3	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 12
-SP_R4	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 16
-SP_R5	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 20
-SP_R6	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 24
-SP_R7	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 28
-SP_R8	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 32
-SP_R9	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 36
-SP_R10	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 40
-SP_R11	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 44
-SP_R12	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 48
-SP_R13	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 52
-SP_R14	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 56
-SP_R15	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 60
-SP_ORIG_R2   =	STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2
-SP_SVC_CODE  =	STACK_FRAME_OVERHEAD + __PT_SVC_CODE
-SP_SIZE      =	STACK_FRAME_OVERHEAD + __PT_SIZE
+__PT_R0      =	__PT_GPRS
+__PT_R1      =	__PT_GPRS + 4
+__PT_R2      =	__PT_GPRS + 8
+__PT_R3      =	__PT_GPRS + 12
+__PT_R4      =	__PT_GPRS + 16
+__PT_R5      =	__PT_GPRS + 20
+__PT_R6      =	__PT_GPRS + 24
+__PT_R7      =	__PT_GPRS + 28
+__PT_R8      =	__PT_GPRS + 32
+__PT_R9      =	__PT_GPRS + 36
+__PT_R10     =	__PT_GPRS + 40
+__PT_R11     =	__PT_GPRS + 44
+__PT_R12     =	__PT_GPRS + 48
+__PT_R13     =	__PT_GPRS + 524
+__PT_R14     =	__PT_GPRS + 56
+__PT_R15     =	__PT_GPRS + 60
 
 _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
 		 _TIF_MCCK_PENDING | _TIF_PER_TRAP )
@@ -58,133 +48,91 @@
 
 #define BASED(name) name-system_call(%r13)
 
-#ifdef CONFIG_TRACE_IRQFLAGS
 	.macro	TRACE_IRQS_ON
+#ifdef CONFIG_TRACE_IRQFLAGS
 	basr	%r2,%r0
-	l	%r1,BASED(.Ltrace_irq_on_caller)
-	basr	%r14,%r1
+	l	%r1,BASED(.Lhardirqs_on)
+	basr	%r14,%r1		# call trace_hardirqs_on_caller
+#endif
 	.endm
 
 	.macro	TRACE_IRQS_OFF
+#ifdef CONFIG_TRACE_IRQFLAGS
 	basr	%r2,%r0
-	l	%r1,BASED(.Ltrace_irq_off_caller)
-	basr	%r14,%r1
-	.endm
-#else
-#define TRACE_IRQS_ON
-#define TRACE_IRQS_OFF
+	l	%r1,BASED(.Lhardirqs_off)
+	basr	%r14,%r1		# call trace_hardirqs_off_caller
 #endif
+	.endm
 
-#ifdef CONFIG_LOCKDEP
 	.macro	LOCKDEP_SYS_EXIT
-	tm	SP_PSW+1(%r15),0x01	# returning to user ?
-	jz	0f
+#ifdef CONFIG_LOCKDEP
+	tm	__PT_PSW+1(%r11),0x01	# returning to user ?
+	jz	.+10
 	l	%r1,BASED(.Llockdep_sys_exit)
-	basr	%r14,%r1
-0:
-	.endm
-#else
-#define LOCKDEP_SYS_EXIT
+	basr	%r14,%r1		# call lockdep_sys_exit
 #endif
-
-/*
- * Register usage in interrupt handlers:
- *    R9  - pointer to current task structure
- *    R13 - pointer to literal pool
- *    R14 - return register for function calls
- *    R15 - kernel stack pointer
- */
-
-	.macro	UPDATE_VTIME lc_from,lc_to,lc_sum
-	lm	%r10,%r11,\lc_from
-	sl	%r10,\lc_to
-	sl	%r11,\lc_to+4
-	bc	3,BASED(0f)
-	sl	%r10,BASED(.Lc_1)
-0:	al	%r10,\lc_sum
-	al	%r11,\lc_sum+4
-	bc	12,BASED(1f)
-	al	%r10,BASED(.Lc_1)
-1:	stm	%r10,%r11,\lc_sum
 	.endm
 
-	.macro	SAVE_ALL_SVC psworg,savearea
-	stm	%r12,%r15,\savearea
-	l	%r13,__LC_SVC_NEW_PSW+4	# load &system_call to %r13
-	l	%r15,__LC_KERNEL_STACK	# problem state -> load ksp
-	s	%r15,BASED(.Lc_spsize)	# make room for registers & psw
-	.endm
-
-	.macro	SAVE_ALL_BASE savearea
-	stm	%r12,%r15,\savearea
-	l	%r13,__LC_SVC_NEW_PSW+4	# load &system_call to %r13
-	.endm
-
-	.macro	SAVE_ALL_PGM psworg,savearea
-	tm	\psworg+1,0x01		# test problem state bit
+	.macro	CHECK_STACK stacksize,savearea
 #ifdef CONFIG_CHECK_STACK
-	bnz	BASED(1f)
-	tml	%r15,STACK_SIZE - CONFIG_STACK_GUARD
-	bnz	BASED(2f)
-	la	%r12,\psworg
-	b	BASED(stack_overflow)
-#else
-	bz	BASED(2f)
+	tml	%r15,\stacksize - CONFIG_STACK_GUARD
+	la	%r14,\savearea
+	jz	stack_overflow
 #endif
-1:	l	%r15,__LC_KERNEL_STACK	# problem state -> load ksp
-2:	s	%r15,BASED(.Lc_spsize)	# make room for registers & psw
 	.endm
 
-	.macro	SAVE_ALL_ASYNC psworg,savearea
-	stm	%r12,%r15,\savearea
-	l	%r13,__LC_SVC_NEW_PSW+4	# load &system_call to %r13
-	la	%r12,\psworg
-	tm	\psworg+1,0x01		# test problem state bit
-	bnz	BASED(1f)		# from user -> load async stack
-	clc	\psworg+4(4),BASED(.Lcritical_end)
-	bhe	BASED(0f)
-	clc	\psworg+4(4),BASED(.Lcritical_start)
-	bl	BASED(0f)
-	l	%r14,BASED(.Lcleanup_critical)
-	basr	%r14,%r14
-	tm	1(%r12),0x01		# retest problem state after cleanup
-	bnz	BASED(1f)
-0:	l	%r14,__LC_ASYNC_STACK	# are we already on the async stack ?
+	.macro	SWITCH_ASYNC savearea,stack,shift
+	tmh	%r8,0x0001		# interrupting from user ?
+	jnz	1f
+	lr	%r14,%r9
+	sl	%r14,BASED(.Lcritical_start)
+	cl	%r14,BASED(.Lcritical_length)
+	jhe	0f
+	la	%r11,\savearea		# inside critical section, do cleanup
+	bras	%r14,cleanup_critical
+	tmh	%r8,0x0001		# retest problem state after cleanup
+	jnz	1f
+0:	l	%r14,\stack		# are we already on the target stack?
 	slr	%r14,%r15
-	sra	%r14,STACK_SHIFT
-#ifdef CONFIG_CHECK_STACK
-	bnz	BASED(1f)
-	tml	%r15,STACK_SIZE - CONFIG_STACK_GUARD
-	bnz	BASED(2f)
-	b	BASED(stack_overflow)
-#else
-	bz	BASED(2f)
-#endif
-1:	l	%r15,__LC_ASYNC_STACK
-2:	s	%r15,BASED(.Lc_spsize)	# make room for registers & psw
+	sra	%r14,\shift
+	jnz	1f
+	CHECK_STACK 1<<\shift,\savearea
+	j	2f
+1:	l	%r15,\stack		# load target stack
+2:	ahi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+	la	%r11,STACK_FRAME_OVERHEAD(%r15)
 	.endm
 
-	.macro	CREATE_STACK_FRAME savearea
-	xc	__SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
-	st	%r2,SP_ORIG_R2(%r15)	# store original content of gpr 2
-	mvc	SP_R12(16,%r15),\savearea # move %r12-%r15 to stack
-	stm	%r0,%r11,SP_R0(%r15)	# store gprs %r0-%r11 to kernel stack
+	.macro	ADD64 high,low,timer
+	al	\high,\timer
+	al	\low,\timer+4
+	brc	12,.+8
+	ahi	\high,1
 	.endm
 
-	.macro	RESTORE_ALL psworg,sync
-	mvc	\psworg(8),SP_PSW(%r15) # move user PSW to lowcore
-	.if !\sync
-	ni	\psworg+1,0xfd		# clear wait state bit
-	.endif
-	lm	%r0,%r15,SP_R0(%r15)	# load gprs 0-15 of user
-	stpt	__LC_EXIT_TIMER
-	lpsw	\psworg			# back to caller
+	.macro	SUB64 high,low,timer
+	sl	\high,\timer
+	sl	\low,\timer+4
+	brc	3,.+8
+	ahi	\high,-1
+	.endm
+
+	.macro	UPDATE_VTIME high,low,enter_timer
+	lm	\high,\low,__LC_EXIT_TIMER
+	SUB64	\high,\low,\enter_timer
+	ADD64	\high,\low,__LC_USER_TIMER
+	stm	\high,\low,__LC_USER_TIMER
+	lm	\high,\low,__LC_LAST_UPDATE_TIMER
+	SUB64	\high,\low,__LC_EXIT_TIMER
+	ADD64	\high,\low,__LC_SYSTEM_TIMER
+	stm	\high,\low,__LC_SYSTEM_TIMER
+	mvc	__LC_LAST_UPDATE_TIMER(8),\enter_timer
 	.endm
 
 	.macro REENABLE_IRQS
-	mvc	__SF_EMPTY(1,%r15),SP_PSW(%r15)
-	ni	__SF_EMPTY(%r15),0xbf
-	ssm	__SF_EMPTY(%r15)
+	st	%r8,__LC_RETURN_PSW
+	ni	__LC_RETURN_PSW,0xbf
+	ssm	__LC_RETURN_PSW
 	.endm
 
 	.section .kprobes.text, "ax"
@@ -197,14 +145,13 @@
  *  gpr2 = prev
  */
 ENTRY(__switch_to)
-	basr	%r1,0
-0:	l	%r4,__THREAD_info(%r2)		# get thread_info of prev
+	l	%r4,__THREAD_info(%r2)		# get thread_info of prev
 	l	%r5,__THREAD_info(%r3)		# get thread_info of next
 	tm	__TI_flags+3(%r4),_TIF_MCCK_PENDING # machine check pending?
-	bz	1f-0b(%r1)
+	jz	0f
 	ni	__TI_flags+3(%r4),255-_TIF_MCCK_PENDING	# clear flag in prev
 	oi	__TI_flags+3(%r5),_TIF_MCCK_PENDING	# set it in next
-1:	stm	%r6,%r15,__SF_GPRS(%r15)	# store gprs of prev task
+0:	stm	%r6,%r15,__SF_GPRS(%r15)	# store gprs of prev task
 	st	%r15,__THREAD_ksp(%r2)		# store kernel stack of prev
 	l	%r15,__THREAD_ksp(%r3)		# load kernel stack of next
 	lctl	%c4,%c4,__TASK_pid(%r3)		# load pid to control reg. 4
@@ -224,48 +171,55 @@
 
 ENTRY(system_call)
 	stpt	__LC_SYNC_ENTER_TIMER
-sysc_saveall:
-	SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
-	CREATE_STACK_FRAME __LC_SAVE_AREA
-	l	%r12,__LC_THREAD_INFO	# load pointer to thread_info struct
-	mvc	SP_PSW(8,%r15),__LC_SVC_OLD_PSW
-	mvc	SP_SVC_CODE(4,%r15),__LC_SVC_ILC
-	oi	__TI_flags+3(%r12),_TIF_SYSCALL
+sysc_stm:
+	stm	%r8,%r15,__LC_SAVE_AREA_SYNC
+	l	%r12,__LC_THREAD_INFO
+	l	%r13,__LC_SVC_NEW_PSW+4
+sysc_per:
+	l	%r15,__LC_KERNEL_STACK
+	ahi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+	la	%r11,STACK_FRAME_OVERHEAD(%r15)	# pointer to pt_regs
 sysc_vtime:
-	UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
-sysc_stime:
-	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
-sysc_update:
-	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
+	UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER
+	stm	%r0,%r7,__PT_R0(%r11)
+	mvc	__PT_R8(32,%r11),__LC_SAVE_AREA_SYNC
+	mvc	__PT_PSW(8,%r11),__LC_SVC_OLD_PSW
+	mvc	__PT_INT_CODE(4,%r11),__LC_SVC_ILC
 sysc_do_svc:
-	xr	%r7,%r7
-	icm	%r7,3,SP_SVC_CODE+2(%r15)# load svc number and test for svc 0
-	bnz	BASED(sysc_nr_ok)	# svc number > 0
+	oi	__TI_flags+3(%r12),_TIF_SYSCALL
+	lh	%r8,__PT_INT_CODE+2(%r11)
+	sla	%r8,2				# shift and test for svc0
+	jnz	sysc_nr_ok
 	# svc 0: system call number in %r1
 	cl	%r1,BASED(.Lnr_syscalls)
-	bnl	BASED(sysc_nr_ok)
-	sth	%r1,SP_SVC_CODE+2(%r15)
-	lr	%r7,%r1 	  # copy svc number to %r7
+	jnl	sysc_nr_ok
+	sth	%r1,__PT_INT_CODE+2(%r11)
+	lr	%r8,%r1
+	sla	%r8,2
 sysc_nr_ok:
-	sll	%r7,2		  # svc number *4
-	l	%r10,BASED(.Lsysc_table)
+	l	%r10,BASED(.Lsys_call_table)	# 31 bit system call table
+	xc	__SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
+	st	%r2,__PT_ORIG_GPR2(%r11)
+	st	%r7,STACK_FRAME_OVERHEAD(%r15)
+	l	%r9,0(%r8,%r10)			# get system call addr.
 	tm	__TI_flags+2(%r12),_TIF_TRACE >> 8
-	mvc	SP_ARGS(4,%r15),SP_R7(%r15)
-	l	%r8,0(%r7,%r10)	  # get system call addr.
-	bnz	BASED(sysc_tracesys)
-	basr	%r14,%r8	  # call sys_xxxx
-	st	%r2,SP_R2(%r15)   # store return value (change R2 on stack)
+	jnz	sysc_tracesys
+	basr	%r14,%r9			# call sys_xxxx
+	st	%r2,__PT_R2(%r11)		# store return value
 
 sysc_return:
 	LOCKDEP_SYS_EXIT
 sysc_tif:
-	tm	SP_PSW+1(%r15),0x01	# returning to user ?
-	bno	BASED(sysc_restore)
+	tm	__PT_PSW+1(%r11),0x01		# returning to user ?
+	jno	sysc_restore
 	tm	__TI_flags+3(%r12),_TIF_WORK_SVC
-	bnz	BASED(sysc_work)  # there is work to do (signals etc.)
+	jnz	sysc_work			# check for work
 	ni	__TI_flags+3(%r12),255-_TIF_SYSCALL
 sysc_restore:
-	RESTORE_ALL __LC_RETURN_PSW,1
+	mvc	__LC_RETURN_PSW(8),__PT_PSW(%r11)
+	stpt	__LC_EXIT_TIMER
+	lm	%r0,%r15,__PT_R0(%r11)
+	lpsw	__LC_RETURN_PSW
 sysc_done:
 
 #
@@ -273,16 +227,16 @@
 #
 sysc_work:
 	tm	__TI_flags+3(%r12),_TIF_MCCK_PENDING
-	bo	BASED(sysc_mcck_pending)
+	jo	sysc_mcck_pending
 	tm	__TI_flags+3(%r12),_TIF_NEED_RESCHED
-	bo	BASED(sysc_reschedule)
+	jo	sysc_reschedule
 	tm	__TI_flags+3(%r12),_TIF_SIGPENDING
-	bo	BASED(sysc_sigpending)
+	jo	sysc_sigpending
 	tm	__TI_flags+3(%r12),_TIF_NOTIFY_RESUME
-	bo	BASED(sysc_notify_resume)
+	jo	sysc_notify_resume
 	tm	__TI_flags+3(%r12),_TIF_PER_TRAP
-	bo	BASED(sysc_singlestep)
-	b	BASED(sysc_return)	# beware of critical section cleanup
+	jo	sysc_singlestep
+	j	sysc_return		# beware of critical section cleanup
 
 #
 # _TIF_NEED_RESCHED is set, call schedule
@@ -290,13 +244,13 @@
 sysc_reschedule:
 	l	%r1,BASED(.Lschedule)
 	la	%r14,BASED(sysc_return)
-	br	%r1			# call scheduler
+	br	%r1			# call schedule
 
 #
 # _TIF_MCCK_PENDING is set, call handler
 #
 sysc_mcck_pending:
-	l	%r1,BASED(.Ls390_handle_mcck)
+	l	%r1,BASED(.Lhandle_mcck)
 	la	%r14,BASED(sysc_return)
 	br	%r1			# TIF bit will be cleared by handler
 
@@ -305,23 +259,24 @@
 #
 sysc_sigpending:
 	ni	__TI_flags+3(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP
-	la	%r2,SP_PTREGS(%r15)	# load pt_regs
+	lr	%r2,%r11		# pass pointer to pt_regs
 	l	%r1,BASED(.Ldo_signal)
 	basr	%r14,%r1		# call do_signal
 	tm	__TI_flags+3(%r12),_TIF_SYSCALL
-	bno	BASED(sysc_return)
-	lm	%r2,%r6,SP_R2(%r15)	# load svc arguments
-	xr	%r7,%r7			# svc 0 returns -ENOSYS
-	clc	SP_SVC_CODE+2(2,%r15),BASED(.Lnr_syscalls+2)
-	bnl	BASED(sysc_nr_ok)	# invalid svc number -> do svc 0
-	icm	%r7,3,SP_SVC_CODE+2(%r15)# load new svc number
-	b	BASED(sysc_nr_ok)	# restart svc
+	jno	sysc_return
+	lm	%r2,%r7,__PT_R2(%r11)	# load svc arguments
+	xr	%r8,%r8			# svc 0 returns -ENOSYS
+	clc	__PT_INT_CODE+2(2,%r11),BASED(.Lnr_syscalls+2)
+	jnl	sysc_nr_ok		# invalid svc number -> do svc 0
+	lh	%r8,__PT_INT_CODE+2(%r11)	# load new svc number
+	sla	%r8,2
+	j	sysc_nr_ok		# restart svc
 
 #
 # _TIF_NOTIFY_RESUME is set, call do_notify_resume
 #
 sysc_notify_resume:
-	la	%r2,SP_PTREGS(%r15)	# load pt_regs
+	lr	%r2,%r11		# pass pointer to pt_regs
 	l	%r1,BASED(.Ldo_notify_resume)
 	la	%r14,BASED(sysc_return)
 	br	%r1			# call do_notify_resume
@@ -331,56 +286,57 @@
 #
 sysc_singlestep:
 	ni	__TI_flags+3(%r12),255-(_TIF_SYSCALL | _TIF_PER_TRAP)
-	la	%r2,SP_PTREGS(%r15)	# address of register-save area
-	l	%r1,BASED(.Lhandle_per)	# load adr. of per handler
-	la	%r14,BASED(sysc_return)	# load adr. of system return
-	br	%r1			# branch to do_per_trap
+	lr	%r2,%r11		# pass pointer to pt_regs
+	l	%r1,BASED(.Ldo_per_trap)
+	la	%r14,BASED(sysc_return)
+	br	%r1			# call do_per_trap
 
 #
 # call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
 # and after the system call
 #
 sysc_tracesys:
-	l	%r1,BASED(.Ltrace_entry)
-	la	%r2,SP_PTREGS(%r15)	# load pt_regs
+	l	%r1,BASED(.Ltrace_enter)
+	lr	%r2,%r11		# pass pointer to pt_regs
 	la	%r3,0
 	xr	%r0,%r0
-	icm	%r0,3,SP_SVC_CODE(%r15)
-	st	%r0,SP_R2(%r15)
-	basr	%r14,%r1
+	icm	%r0,3,__PT_INT_CODE+2(%r11)
+	st	%r0,__PT_R2(%r11)
+	basr	%r14,%r1		# call do_syscall_trace_enter
 	cl	%r2,BASED(.Lnr_syscalls)
-	bnl	BASED(sysc_tracenogo)
-	lr	%r7,%r2
-	sll	%r7,2			# svc number *4
-	l	%r8,0(%r7,%r10)
+	jnl	sysc_tracenogo
+	lr	%r8,%r2
+	sll	%r8,2
+	l	%r9,0(%r8,%r10)
 sysc_tracego:
-	lm	%r3,%r6,SP_R3(%r15)
-	mvc	SP_ARGS(4,%r15),SP_R7(%r15)
-	l	%r2,SP_ORIG_R2(%r15)
-	basr	%r14,%r8		# call sys_xxx
-	st	%r2,SP_R2(%r15)		# store return value
+	lm	%r3,%r7,__PT_R3(%r11)
+	st	%r7,STACK_FRAME_OVERHEAD(%r15)
+	l	%r2,__PT_ORIG_GPR2(%r11)
+	basr	%r14,%r9		# call sys_xxx
+	st	%r2,__PT_R2(%r11)	# store return value
 sysc_tracenogo:
 	tm	__TI_flags+2(%r12),_TIF_TRACE >> 8
-	bz	BASED(sysc_return)
+	jz	sysc_return
 	l	%r1,BASED(.Ltrace_exit)
-	la	%r2,SP_PTREGS(%r15)	# load pt_regs
+	lr	%r2,%r11		# pass pointer to pt_regs
 	la	%r14,BASED(sysc_return)
-	br	%r1
+	br	%r1			# call do_syscall_trace_exit
 
 #
 # a new process exits the kernel with ret_from_fork
 #
 ENTRY(ret_from_fork)
+	la	%r11,STACK_FRAME_OVERHEAD(%r15)
+	l	%r12,__LC_THREAD_INFO
 	l	%r13,__LC_SVC_NEW_PSW+4
-	l	%r12,__LC_THREAD_INFO	# load pointer to thread_info struct
-	tm	SP_PSW+1(%r15),0x01	# forking a kernel thread ?
-	bo	BASED(0f)
-	st	%r15,SP_R15(%r15)	# store stack pointer for new kthread
-0:	l	%r1,BASED(.Lschedtail)
-	basr	%r14,%r1
+	tm	__PT_PSW+1(%r11),0x01	# forking a kernel thread ?
+	jo	0f
+	st	%r15,__PT_R15(%r11)	# store stack pointer for new kthread
+0:	l	%r1,BASED(.Lschedule_tail)
+	basr	%r14,%r1		# call schedule_tail
 	TRACE_IRQS_ON
-	stosm	__SF_EMPTY(%r15),0x03	# reenable interrupts
-	b	BASED(sysc_tracenogo)
+	ssm	__LC_SVC_NEW_PSW	# reenable interrupts
+	j	sysc_tracenogo
 
 #
 # kernel_execve function needs to deal with pt_regs that is not
@@ -390,153 +346,98 @@
 	stm	%r12,%r15,48(%r15)
 	lr	%r14,%r15
 	l	%r13,__LC_SVC_NEW_PSW+4
-	s	%r15,BASED(.Lc_spsize)
+	ahi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
 	st	%r14,__SF_BACKCHAIN(%r15)
-	la	%r12,SP_PTREGS(%r15)
+	la	%r12,STACK_FRAME_OVERHEAD(%r15)
 	xc	0(__PT_SIZE,%r12),0(%r12)
 	l	%r1,BASED(.Ldo_execve)
 	lr	%r5,%r12
-	basr	%r14,%r1
+	basr	%r14,%r1		# call do_execve
 	ltr	%r2,%r2
-	be	BASED(0f)
-	a	%r15,BASED(.Lc_spsize)
+	je	0f
+	ahi	%r15,(STACK_FRAME_OVERHEAD + __PT_SIZE)
 	lm	%r12,%r15,48(%r15)
 	br	%r14
 	# execve succeeded.
-0:	stnsm	__SF_EMPTY(%r15),0xfc	# disable interrupts
+0:	ssm	__LC_PGM_NEW_PSW	# disable I/O and ext. interrupts
 	l	%r15,__LC_KERNEL_STACK	# load ksp
-	s	%r15,BASED(.Lc_spsize)	# make room for registers & psw
-	mvc	SP_PTREGS(__PT_SIZE,%r15),0(%r12)	# copy pt_regs
+	ahi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+	la	%r11,STACK_FRAME_OVERHEAD(%r15)
+	mvc	0(__PT_SIZE,%r11),0(%r12)	# copy pt_regs
 	l	%r12,__LC_THREAD_INFO
 	xc	__SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
-	stosm	__SF_EMPTY(%r15),0x03	# reenable interrupts
+	ssm	__LC_SVC_NEW_PSW	# reenable interrupts
 	l	%r1,BASED(.Lexecve_tail)
-	basr	%r14,%r1
-	b	BASED(sysc_return)
+	basr	%r14,%r1		# call execve_tail
+	j	sysc_return
 
 /*
  * Program check handler routine
  */
 
 ENTRY(pgm_check_handler)
-/*
- * First we need to check for a special case:
- * Single stepping an instruction that disables the PER event mask will
- * cause a PER event AFTER the mask has been set. Example: SVC or LPSW.
- * For a single stepped SVC the program check handler gets control after
- * the SVC new PSW has been loaded. But we want to execute the SVC first and
- * then handle the PER event. Therefore we update the SVC old PSW to point
- * to the pgm_check_handler and branch to the SVC handler after we checked
- * if we have to load the kernel stack register.
- * For every other possible cause for PER event without the PER mask set
- * we just ignore the PER event (FIXME: is there anything we have to do
- * for LPSW?).
- */
 	stpt	__LC_SYNC_ENTER_TIMER
-	SAVE_ALL_BASE __LC_SAVE_AREA
-	tm	__LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
-	bnz	BASED(pgm_per)		# got per exception -> special case
-	SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA
-	CREATE_STACK_FRAME __LC_SAVE_AREA
-	mvc	SP_PSW(8,%r15),__LC_PGM_OLD_PSW
-	l	%r12,__LC_THREAD_INFO	# load pointer to thread_info struct
-	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
-	bz	BASED(pgm_no_vtime)
-	UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
-	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
-	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
-pgm_no_vtime:
-	l	%r3,__LC_PGM_ILC	# load program interruption code
-	l	%r4,__LC_TRANS_EXC_CODE
-	REENABLE_IRQS
-	la	%r8,0x7f
-	nr	%r8,%r3
-	sll	%r8,2
-	l	%r1,BASED(.Ljump_table)
-	l	%r1,0(%r8,%r1)		# load address of handler routine
-	la	%r2,SP_PTREGS(%r15)	# address of register-save area
-	basr	%r14,%r1		# branch to interrupt-handler
-pgm_exit:
-	b	BASED(sysc_return)
-
-#
-# handle per exception
-#
-pgm_per:
-	tm	__LC_PGM_OLD_PSW,0x40	# test if per event recording is on
-	bnz	BASED(pgm_per_std)	# ok, normal per event from user space
-# ok its one of the special cases, now we need to find out which one
-	clc	__LC_PGM_OLD_PSW(8),__LC_SVC_NEW_PSW
-	be	BASED(pgm_svcper)
-# no interesting special case, ignore PER event
-	lm	%r12,%r15,__LC_SAVE_AREA
-	lpsw	0x28
-
-#
-# Normal per exception
-#
-pgm_per_std:
-	SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA
-	CREATE_STACK_FRAME __LC_SAVE_AREA
-	mvc	SP_PSW(8,%r15),__LC_PGM_OLD_PSW
-	l	%r12,__LC_THREAD_INFO	# load pointer to thread_info struct
-	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
-	bz	BASED(pgm_no_vtime2)
-	UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
-	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
-	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
-pgm_no_vtime2:
+	stm	%r8,%r15,__LC_SAVE_AREA_SYNC
+	l	%r12,__LC_THREAD_INFO
+	l	%r13,__LC_SVC_NEW_PSW+4
+	lm	%r8,%r9,__LC_PGM_OLD_PSW
+	tmh	%r8,0x0001		# test problem state bit
+	jnz	1f			# -> fault in user space
+	tmh	%r8,0x4000		# PER bit set in old PSW ?
+	jnz	0f			# -> enabled, can't be a double fault
+	tm	__LC_PGM_ILC+3,0x80	# check for per exception
+	jnz	pgm_svcper		# -> single stepped svc
+0:	CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
+	j	2f
+1:	UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
+	l	%r15,__LC_KERNEL_STACK
+2:	ahi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+	la	%r11,STACK_FRAME_OVERHEAD(%r15)
+	stm	%r0,%r7,__PT_R0(%r11)
+	mvc	__PT_R8(32,%r11),__LC_SAVE_AREA_SYNC
+	stm	%r8,%r9,__PT_PSW(%r11)
+	mvc	__PT_INT_CODE(4,%r11),__LC_PGM_ILC
+	mvc	__PT_INT_PARM_LONG(4,%r11),__LC_TRANS_EXC_CODE
+	tm	__LC_PGM_ILC+3,0x80	# check for per exception
+	jz	0f
 	l	%r1,__TI_task(%r12)
-	tm	SP_PSW+1(%r15),0x01	# kernel per event ?
-	bz	BASED(kernel_per)
-	mvc	__THREAD_per_cause(2,%r1),__LC_PER_CAUSE
+	tmh	%r8,0x0001		# kernel per event ?
+	jz	pgm_kprobe
+	oi	__TI_flags+3(%r12),_TIF_PER_TRAP
 	mvc	__THREAD_per_address(4,%r1),__LC_PER_ADDRESS
+	mvc	__THREAD_per_cause(2,%r1),__LC_PER_CAUSE
 	mvc	__THREAD_per_paid(1,%r1),__LC_PER_PAID
-	oi	__TI_flags+3(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP
-	l	%r3,__LC_PGM_ILC	# load program interruption code
-	l	%r4,__LC_TRANS_EXC_CODE
-	REENABLE_IRQS
-	la	%r8,0x7f
-	nr	%r8,%r3 		# clear per-event-bit and ilc
-	be	BASED(pgm_exit2)	# only per or per+check ?
-	sll	%r8,2
+0:	REENABLE_IRQS
+	xc	__SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
 	l	%r1,BASED(.Ljump_table)
-	l	%r1,0(%r8,%r1)		# load address of handler routine
-	la	%r2,SP_PTREGS(%r15)	# address of register-save area
+	la	%r10,0x7f
+	n	%r10,__PT_INT_CODE(%r11)
+	je	sysc_return
+	sll	%r10,2
+	l	%r1,0(%r10,%r1)		# load address of handler routine
+	lr	%r2,%r11		# pass pointer to pt_regs
 	basr	%r14,%r1		# branch to interrupt-handler
-pgm_exit2:
-	b	BASED(sysc_return)
+	j	sysc_return
 
 #
-# it was a single stepped SVC that is causing all the trouble
+# PER event in supervisor state, must be kprobes
+#
+pgm_kprobe:
+	REENABLE_IRQS
+	xc	__SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
+	l	%r1,BASED(.Ldo_per_trap)
+	lr	%r2,%r11		# pass pointer to pt_regs
+	basr	%r14,%r1		# call do_per_trap
+	j	sysc_return
+
+#
+# single stepped system call
 #
 pgm_svcper:
-	SAVE_ALL_PGM __LC_SVC_OLD_PSW,__LC_SAVE_AREA
-	CREATE_STACK_FRAME __LC_SAVE_AREA
-	l	%r12,__LC_THREAD_INFO	# load pointer to thread_info struct
-	mvc	SP_PSW(8,%r15),__LC_SVC_OLD_PSW
-	mvc	SP_SVC_CODE(4,%r15),__LC_SVC_ILC
-	oi	__TI_flags+3(%r12),(_TIF_SYSCALL | _TIF_PER_TRAP)
-	UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
-	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
-	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
-	l	%r8,__TI_task(%r12)
-	mvc	__THREAD_per_cause(2,%r8),__LC_PER_CAUSE
-	mvc	__THREAD_per_address(4,%r8),__LC_PER_ADDRESS
-	mvc	__THREAD_per_paid(1,%r8),__LC_PER_PAID
-	stosm	__SF_EMPTY(%r15),0x03	# reenable interrupts
-	lm	%r2,%r6,SP_R2(%r15)	# load svc arguments
-	b	BASED(sysc_do_svc)
-
-#
-# per was called from kernel, must be kprobes
-#
-kernel_per:
-	REENABLE_IRQS
-	la	%r2,SP_PTREGS(%r15)	# address of register-save area
-	l	%r1,BASED(.Lhandle_per)	# load adr. of per handler
-	basr	%r14,%r1		# branch to do_single_step
-	b	BASED(pgm_exit)
+	oi	__TI_flags+3(%r12),_TIF_PER_TRAP
+	mvc	__LC_RETURN_PSW(4),__LC_SVC_NEW_PSW
+	mvc	__LC_RETURN_PSW+4(4),BASED(.Lsysc_per)
+	lpsw	__LC_RETURN_PSW		# branch to sysc_per and enable irqs
 
 /*
  * IO interrupt handler routine
@@ -545,28 +446,35 @@
 ENTRY(io_int_handler)
 	stck	__LC_INT_CLOCK
 	stpt	__LC_ASYNC_ENTER_TIMER
-	SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+16
-	CREATE_STACK_FRAME __LC_SAVE_AREA+16
-	mvc	SP_PSW(8,%r15),0(%r12)	# move user PSW to stack
-	l	%r12,__LC_THREAD_INFO	# load pointer to thread_info struct
-	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
-	bz	BASED(io_no_vtime)
-	UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
-	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
-	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
-io_no_vtime:
+	stm	%r8,%r15,__LC_SAVE_AREA_ASYNC
+	l	%r12,__LC_THREAD_INFO
+	l	%r13,__LC_SVC_NEW_PSW+4
+	lm	%r8,%r9,__LC_IO_OLD_PSW
+	tmh	%r8,0x0001		# interrupting from user ?
+	jz	io_skip
+	UPDATE_VTIME %r14,%r15,__LC_ASYNC_ENTER_TIMER
+io_skip:
+	SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
+	stm	%r0,%r7,__PT_R0(%r11)
+	mvc	__PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC
+	stm	%r8,%r9,__PT_PSW(%r11)
 	TRACE_IRQS_OFF
-	l	%r1,BASED(.Ldo_IRQ)	# load address of do_IRQ
-	la	%r2,SP_PTREGS(%r15)	# address of register-save area
-	basr	%r14,%r1		# branch to standard irq handler
+	xc	__SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
+	l	%r1,BASED(.Ldo_IRQ)
+	lr	%r2,%r11		# pass pointer to pt_regs
+	basr	%r14,%r1		# call do_IRQ
 io_return:
 	LOCKDEP_SYS_EXIT
 	TRACE_IRQS_ON
 io_tif:
 	tm	__TI_flags+3(%r12),_TIF_WORK_INT
-	bnz	BASED(io_work)		# there is work to do (signals etc.)
+	jnz	io_work			# there is work to do (signals etc.)
 io_restore:
-	RESTORE_ALL __LC_RETURN_PSW,0
+	mvc	__LC_RETURN_PSW(8),__PT_PSW(%r11)
+	ni	__LC_RETURN_PSW+1,0xfd	# clean wait state bit
+	stpt	__LC_EXIT_TIMER
+	lm	%r0,%r15,__PT_R0(%r11)
+	lpsw	__LC_RETURN_PSW
 io_done:
 
 #
@@ -577,28 +485,29 @@
 # Before any work can be done, a switch to the kernel stack is required.
 #
 io_work:
-	tm	SP_PSW+1(%r15),0x01	# returning to user ?
-	bo	BASED(io_work_user)	# yes -> do resched & signal
+	tm	__PT_PSW+1(%r11),0x01	# returning to user ?
+	jo	io_work_user		# yes -> do resched & signal
 #ifdef CONFIG_PREEMPT
 	# check for preemptive scheduling
 	icm	%r0,15,__TI_precount(%r12)
-	bnz	BASED(io_restore)	# preemption disabled
+	jnz	io_restore		# preemption disabled
 	tm	__TI_flags+3(%r12),_TIF_NEED_RESCHED
-	bno	BASED(io_restore)
+	jno	io_restore
 	# switch to kernel stack
-	l	%r1,SP_R15(%r15)
-	s	%r1,BASED(.Lc_spsize)
-	mvc	SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
-	xc	__SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain
+	l	%r1,__PT_R15(%r11)
+	ahi	%r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+	mvc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
+	xc	__SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1)
+	la	%r11,STACK_FRAME_OVERHEAD(%r1)
 	lr	%r15,%r1
 	# TRACE_IRQS_ON already done at io_return, call
 	# TRACE_IRQS_OFF to keep things symmetrical
 	TRACE_IRQS_OFF
-	l	%r1,BASED(.Lpreempt_schedule_irq)
+	l	%r1,BASED(.Lpreempt_irq)
 	basr	%r14,%r1		# call preempt_schedule_irq
-	b	BASED(io_return)
+	j	io_return
 #else
-	b	BASED(io_restore)
+	j	io_restore
 #endif
 
 #
@@ -606,9 +515,10 @@
 #
 io_work_user:
 	l	%r1,__LC_KERNEL_STACK
-	s	%r1,BASED(.Lc_spsize)
-	mvc	SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
-	xc	__SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain
+	ahi	%r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+	mvc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
+	xc	__SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1)
+	la	%r11,STACK_FRAME_OVERHEAD(%r1)
 	lr	%r15,%r1
 
 #
@@ -618,24 +528,24 @@
 #
 io_work_tif:
 	tm	__TI_flags+3(%r12),_TIF_MCCK_PENDING
-	bo	BASED(io_mcck_pending)
+	jo	io_mcck_pending
 	tm	__TI_flags+3(%r12),_TIF_NEED_RESCHED
-	bo	BASED(io_reschedule)
+	jo	io_reschedule
 	tm	__TI_flags+3(%r12),_TIF_SIGPENDING
-	bo	BASED(io_sigpending)
+	jo	io_sigpending
 	tm	__TI_flags+3(%r12),_TIF_NOTIFY_RESUME
-	bo	BASED(io_notify_resume)
-	b	BASED(io_return)	# beware of critical section cleanup
+	jo	io_notify_resume
+	j	io_return		# beware of critical section cleanup
 
 #
 # _TIF_MCCK_PENDING is set, call handler
 #
 io_mcck_pending:
 	# TRACE_IRQS_ON already done at io_return
-	l	%r1,BASED(.Ls390_handle_mcck)
+	l	%r1,BASED(.Lhandle_mcck)
 	basr	%r14,%r1		# TIF bit will be cleared by handler
 	TRACE_IRQS_OFF
-	b	BASED(io_return)
+	j	io_return
 
 #
 # _TIF_NEED_RESCHED is set, call schedule
@@ -643,37 +553,37 @@
 io_reschedule:
 	# TRACE_IRQS_ON already done at io_return
 	l	%r1,BASED(.Lschedule)
-	stosm	__SF_EMPTY(%r15),0x03	# reenable interrupts
+	ssm	__LC_SVC_NEW_PSW	# reenable interrupts
 	basr	%r14,%r1		# call scheduler
-	stnsm	__SF_EMPTY(%r15),0xfc	# disable I/O and ext. interrupts
+	ssm	__LC_PGM_NEW_PSW	# disable I/O and ext. interrupts
 	TRACE_IRQS_OFF
-	b	BASED(io_return)
+	j	io_return
 
 #
 # _TIF_SIGPENDING is set, call do_signal
 #
 io_sigpending:
 	# TRACE_IRQS_ON already done at io_return
-	stosm	__SF_EMPTY(%r15),0x03	# reenable interrupts
-	la	%r2,SP_PTREGS(%r15)	# load pt_regs
 	l	%r1,BASED(.Ldo_signal)
+	ssm	__LC_SVC_NEW_PSW	# reenable interrupts
+	lr	%r2,%r11		# pass pointer to pt_regs
 	basr	%r14,%r1		# call do_signal
-	stnsm	__SF_EMPTY(%r15),0xfc	# disable I/O and ext. interrupts
+	ssm	__LC_PGM_NEW_PSW	# disable I/O and ext. interrupts
 	TRACE_IRQS_OFF
-	b	BASED(io_return)
+	j	io_return
 
 #
 # _TIF_SIGPENDING is set, call do_signal
 #
 io_notify_resume:
 	# TRACE_IRQS_ON already done at io_return
-	stosm	__SF_EMPTY(%r15),0x03	# reenable interrupts
-	la	%r2,SP_PTREGS(%r15)	# load pt_regs
 	l	%r1,BASED(.Ldo_notify_resume)
-	basr	%r14,%r1		# call do_signal
-	stnsm	__SF_EMPTY(%r15),0xfc	# disable I/O and ext. interrupts
+	ssm	__LC_SVC_NEW_PSW	# reenable interrupts
+	lr	%r2,%r11		# pass pointer to pt_regs
+	basr	%r14,%r1		# call do_notify_resume
+	ssm	__LC_PGM_NEW_PSW	# disable I/O and ext. interrupts
 	TRACE_IRQS_OFF
-	b	BASED(io_return)
+	j	io_return
 
 /*
  * External interrupt handler routine
@@ -682,23 +592,25 @@
 ENTRY(ext_int_handler)
 	stck	__LC_INT_CLOCK
 	stpt	__LC_ASYNC_ENTER_TIMER
-	SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16
-	CREATE_STACK_FRAME __LC_SAVE_AREA+16
-	mvc	SP_PSW(8,%r15),0(%r12)	# move user PSW to stack
-	l	%r12,__LC_THREAD_INFO	# load pointer to thread_info struct
-	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
-	bz	BASED(ext_no_vtime)
-	UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
-	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
-	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
-ext_no_vtime:
+	stm	%r8,%r15,__LC_SAVE_AREA_ASYNC
+	l	%r12,__LC_THREAD_INFO
+	l	%r13,__LC_SVC_NEW_PSW+4
+	lm	%r8,%r9,__LC_EXT_OLD_PSW
+	tmh	%r8,0x0001		# interrupting from user ?
+	jz	ext_skip
+	UPDATE_VTIME %r14,%r15,__LC_ASYNC_ENTER_TIMER
+ext_skip:
+	SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
+	stm	%r0,%r7,__PT_R0(%r11)
+	mvc	__PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC
+	stm	%r8,%r9,__PT_PSW(%r11)
 	TRACE_IRQS_OFF
-	la	%r2,SP_PTREGS(%r15)	# address of register-save area
+	lr	%r2,%r11		# pass pointer to pt_regs
 	l	%r3,__LC_CPU_ADDRESS	# get cpu address + interruption code
 	l	%r4,__LC_EXT_PARAMS	# get external parameters
 	l	%r1,BASED(.Ldo_extint)
-	basr	%r14,%r1
-	b	BASED(io_return)
+	basr	%r14,%r1		# call do_extint
+	j	io_return
 
 __critical_end:
 
@@ -710,82 +622,74 @@
 	stck	__LC_MCCK_CLOCK
 	spt	__LC_CPU_TIMER_SAVE_AREA	# revalidate cpu timer
 	lm	%r0,%r15,__LC_GPREGS_SAVE_AREA	# revalidate gprs
-	SAVE_ALL_BASE __LC_SAVE_AREA+32
-	la	%r12,__LC_MCK_OLD_PSW
+	l	%r12,__LC_THREAD_INFO
+	l	%r13,__LC_SVC_NEW_PSW+4
+	lm	%r8,%r9,__LC_MCK_OLD_PSW
 	tm	__LC_MCCK_CODE,0x80	# system damage?
-	bo	BASED(mcck_int_main)	# yes -> rest of mcck code invalid
-	mvc	__LC_MCCK_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA
+	jo	mcck_panic		# yes -> rest of mcck code invalid
+	la	%r14,__LC_CPU_TIMER_SAVE_AREA
+	mvc	__LC_MCCK_ENTER_TIMER(8),0(%r14)
 	tm	__LC_MCCK_CODE+5,0x02	# stored cpu timer value valid?
-	bo	BASED(1f)
+	jo	3f
 	la	%r14,__LC_SYNC_ENTER_TIMER
 	clc	0(8,%r14),__LC_ASYNC_ENTER_TIMER
-	bl	BASED(0f)
+	jl	0f
 	la	%r14,__LC_ASYNC_ENTER_TIMER
 0:	clc	0(8,%r14),__LC_EXIT_TIMER
-	bl	BASED(0f)
+	jl	1f
 	la	%r14,__LC_EXIT_TIMER
-0:	clc	0(8,%r14),__LC_LAST_UPDATE_TIMER
-	bl	BASED(0f)
+1:	clc	0(8,%r14),__LC_LAST_UPDATE_TIMER
+	jl	2f
 	la	%r14,__LC_LAST_UPDATE_TIMER
-0:	spt	0(%r14)
+2:	spt	0(%r14)
 	mvc	__LC_MCCK_ENTER_TIMER(8),0(%r14)
-1:	tm	__LC_MCCK_CODE+2,0x09	# mwp + ia of old psw valid?
-	bno	BASED(mcck_int_main)	# no -> skip cleanup critical
-	tm	__LC_MCK_OLD_PSW+1,0x01	# test problem state bit
-	bnz	BASED(mcck_int_main)	# from user -> load async stack
-	clc	__LC_MCK_OLD_PSW+4(4),BASED(.Lcritical_end)
-	bhe	BASED(mcck_int_main)
-	clc	__LC_MCK_OLD_PSW+4(4),BASED(.Lcritical_start)
-	bl	BASED(mcck_int_main)
-	l	%r14,BASED(.Lcleanup_critical)
-	basr	%r14,%r14
-mcck_int_main:
-	l	%r14,__LC_PANIC_STACK	# are we already on the panic stack?
-	slr	%r14,%r15
-	sra	%r14,PAGE_SHIFT
-	be	BASED(0f)
-	l	%r15,__LC_PANIC_STACK	# load panic stack
-0:	s	%r15,BASED(.Lc_spsize)	# make room for registers & psw
-	CREATE_STACK_FRAME __LC_SAVE_AREA+32
-	mvc	SP_PSW(8,%r15),0(%r12)
-	l	%r12,__LC_THREAD_INFO	# load pointer to thread_info struct
-	tm	__LC_MCCK_CODE+2,0x08	# mwp of old psw valid?
-	bno	BASED(mcck_no_vtime)	# no -> skip cleanup critical
-	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
-	bz	BASED(mcck_no_vtime)
-	UPDATE_VTIME __LC_EXIT_TIMER,__LC_MCCK_ENTER_TIMER,__LC_USER_TIMER
-	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
-	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_MCCK_ENTER_TIMER
-mcck_no_vtime:
-	la	%r2,SP_PTREGS(%r15)	# load pt_regs
-	l	%r1,BASED(.Ls390_mcck)
-	basr	%r14,%r1		# call machine check handler
-	tm	SP_PSW+1(%r15),0x01	# returning to user ?
-	bno	BASED(mcck_return)
+3:	tm	__LC_MCCK_CODE+2,0x09	# mwp + ia of old psw valid?
+	jno	mcck_panic		# no -> skip cleanup critical
+	tm	%r8,0x0001		# interrupting from user ?
+	jz	mcck_skip
+	UPDATE_VTIME %r14,%r15,__LC_MCCK_ENTER_TIMER
+mcck_skip:
+	SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+32,__LC_PANIC_STACK,PAGE_SHIFT
+	mvc	__PT_R0(64,%r11),__LC_GPREGS_SAVE_AREA
+	stm	%r8,%r9,__PT_PSW(%r11)
+	xc	__SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
+	l	%r1,BASED(.Ldo_machine_check)
+	lr	%r2,%r11		# pass pointer to pt_regs
+	basr	%r14,%r1		# call s390_do_machine_check
+	tm	__PT_PSW+1(%r11),0x01	# returning to user ?
+	jno	mcck_return
 	l	%r1,__LC_KERNEL_STACK	# switch to kernel stack
-	s	%r1,BASED(.Lc_spsize)
-	mvc	SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
-	xc	__SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain
+	ahi	%r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+	mvc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
+	xc	__SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1)
+	la	%r11,STACK_FRAME_OVERHEAD(%r15)
 	lr	%r15,%r1
-	stosm	__SF_EMPTY(%r15),0x04	# turn dat on
+	ssm	__LC_PGM_NEW_PSW	# turn dat on, keep irqs off
 	tm	__TI_flags+3(%r12),_TIF_MCCK_PENDING
-	bno	BASED(mcck_return)
+	jno	mcck_return
 	TRACE_IRQS_OFF
-	l	%r1,BASED(.Ls390_handle_mcck)
-	basr	%r14,%r1		# call machine check handler
+	l	%r1,BASED(.Lhandle_mcck)
+	basr	%r14,%r1		# call s390_handle_mcck
 	TRACE_IRQS_ON
 mcck_return:
-	mvc	__LC_RETURN_MCCK_PSW(8),SP_PSW(%r15) # move return PSW
+	mvc	__LC_RETURN_MCCK_PSW(8),__PT_PSW(%r11) # move return PSW
 	ni	__LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit
 	tm	__LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
-	bno	BASED(0f)
-	lm	%r0,%r15,SP_R0(%r15)	# load gprs 0-15
+	jno	0f
+	lm	%r0,%r15,__PT_R0(%r11)
 	stpt	__LC_EXIT_TIMER
-	lpsw	__LC_RETURN_MCCK_PSW	# back to caller
-0:	lm	%r0,%r15,SP_R0(%r15)	# load gprs 0-15
-	lpsw	__LC_RETURN_MCCK_PSW	# back to caller
+	lpsw	__LC_RETURN_MCCK_PSW
+0:	lm	%r0,%r15,__PT_R0(%r11)
+	lpsw	__LC_RETURN_MCCK_PSW
 
-	RESTORE_ALL __LC_RETURN_MCCK_PSW,0
+mcck_panic:
+	l	%r14,__LC_PANIC_STACK
+	slr	%r14,%r15
+	sra	%r14,PAGE_SHIFT
+	jz	0f
+	l	%r15,__LC_PANIC_STACK
+0:	ahi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+	j	mcck_skip
 
 /*
  * Restart interruption handler, kick starter for additional CPUs
@@ -799,18 +703,18 @@
 	stck	__LC_LAST_UPDATE_CLOCK
 	mvc	__LC_LAST_UPDATE_TIMER(8),restart_vtime-restart_base(%r1)
 	mvc	__LC_EXIT_TIMER(8),restart_vtime-restart_base(%r1)
-	l	%r15,__LC_SAVE_AREA+60	# load ksp
+	l	%r15,__LC_GPREGS_SAVE_AREA+60 # load ksp
 	lctl	%c0,%c15,__LC_CREGS_SAVE_AREA # get new ctl regs
 	lam	%a0,%a15,__LC_AREGS_SAVE_AREA
-	lm	%r6,%r15,__SF_GPRS(%r15) # load registers from clone
+	lm	%r6,%r15,__SF_GPRS(%r15)# load registers from clone
 	l	%r1,__LC_THREAD_INFO
 	mvc	__LC_USER_TIMER(8),__TI_user_timer(%r1)
 	mvc	__LC_SYSTEM_TIMER(8),__TI_system_timer(%r1)
 	xc	__LC_STEAL_TIMER(8),__LC_STEAL_TIMER
-	stosm	__SF_EMPTY(%r15),0x04	# now we can turn dat on
+	ssm	__LC_PGM_NEW_PSW	# turn dat on, keep irqs off
 	basr	%r14,0
 	l	%r14,restart_addr-.(%r14)
-	basr	%r14,%r14		# branch to start_secondary
+	basr	%r14,%r14		# call start_secondary
 restart_addr:
 	.long	start_secondary
 	.align	8
@@ -835,19 +739,19 @@
 # PSW restart interrupt handler
 #
 ENTRY(psw_restart_int_handler)
-	st	%r15,__LC_SAVE_AREA+48(%r0)	# save r15
+	st	%r15,__LC_SAVE_AREA_RESTART
 	basr	%r15,0
 0:	l	%r15,.Lrestart_stack-0b(%r15)	# load restart stack
 	l	%r15,0(%r15)
-	ahi	%r15,-SP_SIZE			# make room for pt_regs
-	stm	%r0,%r14,SP_R0(%r15)		# store gprs %r0-%r14 to stack
-	mvc	SP_R15(4,%r15),__LC_SAVE_AREA+48(%r0)# store saved %r15 to stack
-	mvc	SP_PSW(8,%r15),__LC_RST_OLD_PSW(%r0) # store restart old psw
-	xc	__SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # set backchain to 0
+	ahi	%r15,-__PT_SIZE			# create pt_regs on stack
+	stm	%r0,%r14,__PT_R0(%r15)
+	mvc	__PT_R15(4,%r15),__LC_SAVE_AREA_RESTART
+	mvc	__PT_PSW(8,%r15),__LC_RST_OLD_PSW # store restart old psw
+	ahi	%r15,-STACK_FRAME_OVERHEAD
+	xc	__SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
 	basr	%r14,0
 1:	l	%r14,.Ldo_restart-1b(%r14)
 	basr	%r14,%r14
-
 	basr	%r14,0				# load disabled wait PSW if
 2:	lpsw	restart_psw_crash-2b(%r14)	# do_restart returns
 	.align 4
@@ -869,215 +773,174 @@
  */
 stack_overflow:
 	l	%r15,__LC_PANIC_STACK	# change to panic stack
-	sl	%r15,BASED(.Lc_spsize)
-	mvc	SP_PSW(8,%r15),0(%r12)	# move user PSW to stack
-	stm	%r0,%r11,SP_R0(%r15)	# store gprs %r0-%r11 to kernel stack
-	la	%r1,__LC_SAVE_AREA
-	ch	%r12,BASED(.L0x020)	# old psw addr == __LC_SVC_OLD_PSW ?
-	be	BASED(0f)
-	ch	%r12,BASED(.L0x028)	# old psw addr == __LC_PGM_OLD_PSW ?
-	be	BASED(0f)
-	la	%r1,__LC_SAVE_AREA+16
-0:	mvc	SP_R12(16,%r15),0(%r1)	# move %r12-%r15 to stack
-	xc	__SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear back chain
-	l	%r1,BASED(1f)		# branch to kernel_stack_overflow
-	la	%r2,SP_PTREGS(%r15)	# load pt_regs
-	br	%r1
+	ahi	%r15,-__PT_SIZE		# create pt_regs
+	stm	%r0,%r7,__PT_R0(%r15)
+	stm	%r8,%r9,__PT_PSW(%r15)
+	mvc	__PT_R8(32,%r11),0(%r14)
+	lr	%r15,%r11
+	ahi	%r15,-STACK_FRAME_OVERHEAD
+	l	%r1,BASED(1f)
+	xc	__SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
+	lr	%r2,%r11		# pass pointer to pt_regs
+	br	%r1			# branch to kernel_stack_overflow
 1:	.long	kernel_stack_overflow
 #endif
 
-cleanup_table_system_call:
-	.long	system_call + 0x80000000, sysc_do_svc + 0x80000000
-cleanup_table_sysc_tif:
-	.long	sysc_tif + 0x80000000, sysc_restore + 0x80000000
-cleanup_table_sysc_restore:
-	.long	sysc_restore + 0x80000000, sysc_done + 0x80000000
-cleanup_table_io_tif:
-	.long	io_tif + 0x80000000, io_restore + 0x80000000
-cleanup_table_io_restore:
-	.long	io_restore + 0x80000000, io_done + 0x80000000
+cleanup_table:
+	.long	system_call + 0x80000000
+	.long	sysc_do_svc + 0x80000000
+	.long	sysc_tif + 0x80000000
+	.long	sysc_restore + 0x80000000
+	.long	sysc_done + 0x80000000
+	.long	io_tif + 0x80000000
+	.long	io_restore + 0x80000000
+	.long	io_done + 0x80000000
 
 cleanup_critical:
-	clc	4(4,%r12),BASED(cleanup_table_system_call)
-	bl	BASED(0f)
-	clc	4(4,%r12),BASED(cleanup_table_system_call+4)
-	bl	BASED(cleanup_system_call)
-0:
-	clc	4(4,%r12),BASED(cleanup_table_sysc_tif)
-	bl	BASED(0f)
-	clc	4(4,%r12),BASED(cleanup_table_sysc_tif+4)
-	bl	BASED(cleanup_sysc_tif)
-0:
-	clc	4(4,%r12),BASED(cleanup_table_sysc_restore)
-	bl	BASED(0f)
-	clc	4(4,%r12),BASED(cleanup_table_sysc_restore+4)
-	bl	BASED(cleanup_sysc_restore)
-0:
-	clc	4(4,%r12),BASED(cleanup_table_io_tif)
-	bl	BASED(0f)
-	clc	4(4,%r12),BASED(cleanup_table_io_tif+4)
-	bl	BASED(cleanup_io_tif)
-0:
-	clc	4(4,%r12),BASED(cleanup_table_io_restore)
-	bl	BASED(0f)
-	clc	4(4,%r12),BASED(cleanup_table_io_restore+4)
-	bl	BASED(cleanup_io_restore)
-0:
-	br	%r14
+	cl	%r9,BASED(cleanup_table)	# system_call
+	jl	0f
+	cl	%r9,BASED(cleanup_table+4)	# sysc_do_svc
+	jl	cleanup_system_call
+	cl	%r9,BASED(cleanup_table+8)	# sysc_tif
+	jl	0f
+	cl	%r9,BASED(cleanup_table+12)	# sysc_restore
+	jl	cleanup_sysc_tif
+	cl	%r9,BASED(cleanup_table+16)	# sysc_done
+	jl	cleanup_sysc_restore
+	cl	%r9,BASED(cleanup_table+20)	# io_tif
+	jl	0f
+	cl	%r9,BASED(cleanup_table+24)	# io_restore
+	jl	cleanup_io_tif
+	cl	%r9,BASED(cleanup_table+28)	# io_done
+	jl	cleanup_io_restore
+0:	br	%r14
 
 cleanup_system_call:
-	mvc	__LC_RETURN_PSW(8),0(%r12)
-	clc	__LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+4)
-	bh	BASED(0f)
-	mvc	__LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
-	c	%r12,BASED(.Lmck_old_psw)
-	be	BASED(0f)
+	# check if stpt has been executed
+	cl	%r9,BASED(cleanup_system_call_insn)
+	jh	0f
 	mvc	__LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
-0:	c	%r12,BASED(.Lmck_old_psw)
-	la	%r12,__LC_SAVE_AREA+32
-	be	BASED(0f)
-	la	%r12,__LC_SAVE_AREA+16
-0:	clc	__LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+8)
-	bhe	BASED(cleanup_vtime)
-	clc	__LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn)
-	bh	BASED(0f)
-	mvc	__LC_SAVE_AREA(16),0(%r12)
-0:	st	%r13,4(%r12)
-	l	%r15,__LC_KERNEL_STACK	# problem state -> load ksp
-	s	%r15,BASED(.Lc_spsize)	# make room for registers & psw
-	st	%r15,12(%r12)
-	CREATE_STACK_FRAME __LC_SAVE_AREA
-	mvc	0(4,%r12),__LC_THREAD_INFO
-	l	%r12,__LC_THREAD_INFO
-	mvc	SP_PSW(8,%r15),__LC_SVC_OLD_PSW
-	mvc	SP_SVC_CODE(4,%r15),__LC_SVC_ILC
-	oi	__TI_flags+3(%r12),_TIF_SYSCALL
-cleanup_vtime:
-	clc	__LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+12)
-	bhe	BASED(cleanup_stime)
-	UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
-cleanup_stime:
-	clc	__LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+16)
-	bh	BASED(cleanup_update)
-	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
-cleanup_update:
+	chi	%r11,__LC_SAVE_AREA_ASYNC
+	je	0f
+	mvc	__LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
+0:	# check if stm has been executed
+	cl	%r9,BASED(cleanup_system_call_insn+4)
+	jh	0f
+	mvc	__LC_SAVE_AREA_SYNC(32),0(%r11)
+0:	# set up saved registers r12, and r13
+	st	%r12,16(%r11)		# r12 thread-info pointer
+	st	%r13,20(%r11)		# r13 literal-pool pointer
+	# check if the user time calculation has been done
+	cl	%r9,BASED(cleanup_system_call_insn+8)
+	jh	0f
+	l	%r10,__LC_EXIT_TIMER
+	l	%r15,__LC_EXIT_TIMER+4
+	SUB64	%r10,%r15,__LC_SYNC_ENTER_TIMER
+	ADD64	%r10,%r15,__LC_USER_TIMER
+	st	%r10,__LC_USER_TIMER
+	st	%r15,__LC_USER_TIMER+4
+0:	# check if the system time calculation has been done
+	cl	%r9,BASED(cleanup_system_call_insn+12)
+	jh	0f
+	l	%r10,__LC_LAST_UPDATE_TIMER
+	l	%r15,__LC_LAST_UPDATE_TIMER+4
+	SUB64	%r10,%r15,__LC_EXIT_TIMER
+	ADD64	%r10,%r15,__LC_SYSTEM_TIMER
+	st	%r10,__LC_SYSTEM_TIMER
+	st	%r15,__LC_SYSTEM_TIMER+4
+0:	# update accounting time stamp
 	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
-	mvc	__LC_RETURN_PSW+4(4),BASED(cleanup_table_system_call+4)
-	la	%r12,__LC_RETURN_PSW
+	# set up saved register 11
+	l	%r15,__LC_KERNEL_STACK
+	ahi	%r15,-__PT_SIZE
+	st	%r15,12(%r11)		# r11 pt_regs pointer
+	# fill pt_regs
+	mvc	__PT_R8(32,%r15),__LC_SAVE_AREA_SYNC
+	stm	%r0,%r7,__PT_R0(%r15)
+	mvc	__PT_PSW(8,%r15),__LC_SVC_OLD_PSW
+	mvc	__PT_INT_CODE(4,%r15),__LC_SVC_ILC
+	# setup saved register 15
+	ahi	%r15,-STACK_FRAME_OVERHEAD
+	st	%r15,28(%r11)		# r15 stack pointer
+	# set new psw address and exit
+	l	%r9,BASED(cleanup_table+4)	# sysc_do_svc + 0x80000000
 	br	%r14
 cleanup_system_call_insn:
-	.long	sysc_saveall + 0x80000000
 	.long	system_call + 0x80000000
-	.long	sysc_vtime + 0x80000000
-	.long	sysc_stime + 0x80000000
-	.long	sysc_update + 0x80000000
+	.long	sysc_stm + 0x80000000
+	.long	sysc_vtime + 0x80000000 + 36
+	.long	sysc_vtime + 0x80000000 + 76
 
 cleanup_sysc_tif:
-	mvc	__LC_RETURN_PSW(4),0(%r12)
-	mvc	__LC_RETURN_PSW+4(4),BASED(cleanup_table_sysc_tif)
-	la	%r12,__LC_RETURN_PSW
+	l	%r9,BASED(cleanup_table+8)	# sysc_tif + 0x80000000
 	br	%r14
 
 cleanup_sysc_restore:
-	clc	4(4,%r12),BASED(cleanup_sysc_restore_insn)
-	be	BASED(2f)
-	mvc	__LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
-	c	%r12,BASED(.Lmck_old_psw)
-	be	BASED(0f)
-	mvc	__LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
-0:	clc	4(4,%r12),BASED(cleanup_sysc_restore_insn+4)
-	be	BASED(2f)
-	mvc	__LC_RETURN_PSW(8),SP_PSW(%r15)
-	c	%r12,BASED(.Lmck_old_psw)
-	la	%r12,__LC_SAVE_AREA+32
-	be	BASED(1f)
-	la	%r12,__LC_SAVE_AREA+16
-1:	mvc	0(16,%r12),SP_R12(%r15)
-	lm	%r0,%r11,SP_R0(%r15)
-	l	%r15,SP_R15(%r15)
-2:	la	%r12,__LC_RETURN_PSW
+	cl	%r9,BASED(cleanup_sysc_restore_insn)
+	jhe	0f
+	l	%r9,12(%r11)		# get saved pointer to pt_regs
+	mvc	__LC_RETURN_PSW(8),__PT_PSW(%r9)
+	mvc	0(32,%r11),__PT_R8(%r9)
+	lm	%r0,%r7,__PT_R0(%r9)
+0:	lm	%r8,%r9,__LC_RETURN_PSW
 	br	%r14
 cleanup_sysc_restore_insn:
 	.long	sysc_done - 4 + 0x80000000
-	.long	sysc_done - 8 + 0x80000000
 
 cleanup_io_tif:
-	mvc	__LC_RETURN_PSW(4),0(%r12)
-	mvc	__LC_RETURN_PSW+4(4),BASED(cleanup_table_io_tif)
-	la	%r12,__LC_RETURN_PSW
+	l	%r9,BASED(cleanup_table+20)	# io_tif + 0x80000000
 	br	%r14
 
 cleanup_io_restore:
-	clc	4(4,%r12),BASED(cleanup_io_restore_insn)
-	be	BASED(1f)
-	mvc	__LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
-	clc	4(4,%r12),BASED(cleanup_io_restore_insn+4)
-	be	BASED(1f)
-	mvc	__LC_RETURN_PSW(8),SP_PSW(%r15)
-	mvc	__LC_SAVE_AREA+32(16),SP_R12(%r15)
-	lm	%r0,%r11,SP_R0(%r15)
-	l	%r15,SP_R15(%r15)
-1:	la	%r12,__LC_RETURN_PSW
+	cl	%r9,BASED(cleanup_io_restore_insn)
+	jhe	0f
+	l	%r9,12(%r11)		# get saved r11 pointer to pt_regs
+	mvc	__LC_RETURN_PSW(8),__PT_PSW(%r9)
+	ni	__LC_RETURN_PSW+1,0xfd	# clear wait state bit
+	mvc	0(32,%r11),__PT_R8(%r9)
+	lm	%r0,%r7,__PT_R0(%r9)
+0:	lm	%r8,%r9,__LC_RETURN_PSW
 	br	%r14
 cleanup_io_restore_insn:
 	.long	io_done - 4 + 0x80000000
-	.long	io_done - 8 + 0x80000000
 
 /*
  * Integer constants
  */
-		.align	4
-.Lc_spsize:	.long	SP_SIZE
-.Lc_overhead:	.long	STACK_FRAME_OVERHEAD
-.Lnr_syscalls:	.long	NR_syscalls
-.L0x018:	.short	0x018
-.L0x020:	.short	0x020
-.L0x028:	.short	0x028
-.L0x030:	.short	0x030
-.L0x038:	.short	0x038
-.Lc_1:		.long	1
+	.align	4
+.Lnr_syscalls:		.long	NR_syscalls
 
 /*
  * Symbol constants
  */
-.Ls390_mcck:	.long	s390_do_machine_check
-.Ls390_handle_mcck:
-		.long	s390_handle_mcck
-.Lmck_old_psw:	.long	__LC_MCK_OLD_PSW
-.Ldo_IRQ:	.long	do_IRQ
-.Ldo_extint:	.long	do_extint
-.Ldo_signal:	.long	do_signal
-.Ldo_notify_resume:
-		.long	do_notify_resume
-.Lhandle_per:	.long	do_per_trap
-.Ldo_execve:	.long	do_execve
-.Lexecve_tail:	.long	execve_tail
-.Ljump_table:	.long	pgm_check_table
-.Lschedule:	.long	schedule
+.Ldo_machine_check:	.long	s390_do_machine_check
+.Lhandle_mcck:		.long	s390_handle_mcck
+.Ldo_IRQ:		.long	do_IRQ
+.Ldo_extint:		.long	do_extint
+.Ldo_signal:		.long	do_signal
+.Ldo_notify_resume:	.long	do_notify_resume
+.Ldo_per_trap:		.long	do_per_trap
+.Ldo_execve:		.long	do_execve
+.Lexecve_tail:		.long	execve_tail
+.Ljump_table:		.long	pgm_check_table
+.Lschedule:		.long	schedule
 #ifdef CONFIG_PREEMPT
-.Lpreempt_schedule_irq:
-		.long	preempt_schedule_irq
+.Lpreempt_irq:		.long	preempt_schedule_irq
 #endif
-.Ltrace_entry:	.long	do_syscall_trace_enter
-.Ltrace_exit:	.long	do_syscall_trace_exit
-.Lschedtail:	.long	schedule_tail
-.Lsysc_table:	.long	sys_call_table
+.Ltrace_enter:		.long	do_syscall_trace_enter
+.Ltrace_exit:		.long	do_syscall_trace_exit
+.Lschedule_tail:	.long	schedule_tail
+.Lsys_call_table:	.long	sys_call_table
+.Lsysc_per:		.long	sysc_per + 0x80000000
 #ifdef CONFIG_TRACE_IRQFLAGS
-.Ltrace_irq_on_caller:
-		.long	trace_hardirqs_on_caller
-.Ltrace_irq_off_caller:
-		.long	trace_hardirqs_off_caller
+.Lhardirqs_on:		.long	trace_hardirqs_on_caller
+.Lhardirqs_off:		.long	trace_hardirqs_off_caller
 #endif
 #ifdef CONFIG_LOCKDEP
-.Llockdep_sys_exit:
-		.long	lockdep_sys_exit
+.Llockdep_sys_exit:	.long	lockdep_sys_exit
 #endif
-.Lcritical_start:
-		.long	__critical_start + 0x80000000
-.Lcritical_end:
-		.long	__critical_end + 0x80000000
-.Lcleanup_critical:
-		.long	cleanup_critical
+.Lcritical_start:	.long	__critical_start + 0x80000000
+.Lcritical_length:	.long	__critical_end - __critical_start
 
 		.section .rodata, "a"
 #define SYSCALL(esa,esame,emu)	.long esa
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index ef8fb1d..bf538aa 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -6,15 +6,15 @@
 #include <asm/ptrace.h>
 
 
-extern void (*pgm_check_table[128])(struct pt_regs *, long, unsigned long);
+extern void (*pgm_check_table[128])(struct pt_regs *);
 extern void *restart_stack;
 
 asmlinkage long do_syscall_trace_enter(struct pt_regs *regs);
 asmlinkage void do_syscall_trace_exit(struct pt_regs *regs);
 
-void do_protection_exception(struct pt_regs *, long, unsigned long);
-void do_dat_exception(struct pt_regs *, long, unsigned long);
-void do_asce_exception(struct pt_regs *, long, unsigned long);
+void do_protection_exception(struct pt_regs *regs);
+void do_dat_exception(struct pt_regs *regs);
+void do_asce_exception(struct pt_regs *regs);
 
 void do_per_trap(struct pt_regs *regs);
 void syscall_trace(struct pt_regs *regs, int entryexit);
@@ -28,7 +28,7 @@
 void do_restart(void);
 int __cpuinit start_secondary(void *cpuvoid);
 void __init startup_init(void);
-void die(const char * str, struct pt_regs * regs, long err);
+void die(struct pt_regs *regs, const char *str);
 
 void __init time_init(void);
 
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 83a9374..412a7b8 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -19,32 +19,22 @@
 #include <asm/unistd.h>
 #include <asm/page.h>
 
-/*
- * Stack layout for the system_call stack entry.
- * The first few entries are identical to the user_regs_struct.
- */
-SP_PTREGS    =	STACK_FRAME_OVERHEAD
-SP_ARGS      =	STACK_FRAME_OVERHEAD + __PT_ARGS
-SP_PSW	     =	STACK_FRAME_OVERHEAD + __PT_PSW
-SP_R0	     =	STACK_FRAME_OVERHEAD + __PT_GPRS
-SP_R1	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 8
-SP_R2	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 16
-SP_R3	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 24
-SP_R4	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 32
-SP_R5	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 40
-SP_R6	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 48
-SP_R7	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 56
-SP_R8	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 64
-SP_R9	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 72
-SP_R10	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 80
-SP_R11	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 88
-SP_R12	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 96
-SP_R13	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 104
-SP_R14	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 112
-SP_R15	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 120
-SP_ORIG_R2   =	STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2
-SP_SVC_CODE  =	STACK_FRAME_OVERHEAD + __PT_SVC_CODE
-SP_SIZE      =	STACK_FRAME_OVERHEAD + __PT_SIZE
+__PT_R0      =	__PT_GPRS
+__PT_R1      =	__PT_GPRS + 8
+__PT_R2      =	__PT_GPRS + 16
+__PT_R3      =	__PT_GPRS + 24
+__PT_R4      =	__PT_GPRS + 32
+__PT_R5      =	__PT_GPRS + 40
+__PT_R6      =	__PT_GPRS + 48
+__PT_R7      =	__PT_GPRS + 56
+__PT_R8      =	__PT_GPRS + 64
+__PT_R9      =	__PT_GPRS + 72
+__PT_R10     =	__PT_GPRS + 80
+__PT_R11     =	__PT_GPRS + 88
+__PT_R12     =	__PT_GPRS + 96
+__PT_R13     =	__PT_GPRS + 104
+__PT_R14     =	__PT_GPRS + 112
+__PT_R15     =	__PT_GPRS + 120
 
 STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
 STACK_SIZE  = 1 << STACK_SHIFT
@@ -59,6 +49,28 @@
 
 #define BASED(name) name-system_call(%r13)
 
+	.macro	TRACE_IRQS_ON
+#ifdef CONFIG_TRACE_IRQFLAGS
+	basr	%r2,%r0
+	brasl	%r14,trace_hardirqs_on_caller
+#endif
+	.endm
+
+	.macro	TRACE_IRQS_OFF
+#ifdef CONFIG_TRACE_IRQFLAGS
+	basr	%r2,%r0
+	brasl	%r14,trace_hardirqs_off_caller
+#endif
+	.endm
+
+	.macro	LOCKDEP_SYS_EXIT
+#ifdef CONFIG_LOCKDEP
+	tm	__PT_PSW+1(%r11),0x01	# returning to user ?
+	jz	.+10
+	brasl	%r14,lockdep_sys_exit
+#endif
+	.endm
+
 	.macro SPP newpp
 #if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
 	tm	__LC_MACHINE_FLAGS+6,0x20	# MACHINE_FLAG_SPP
@@ -67,146 +79,73 @@
 #endif
 	.endm
 
-	.macro	HANDLE_SIE_INTERCEPT
+	.macro	HANDLE_SIE_INTERCEPT scratch
 #if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
 	tm	__TI_flags+6(%r12),_TIF_SIE>>8
-	jz	0f
-	SPP	__LC_CMF_HPP			# set host id
-	clc	SP_PSW+8(8,%r15),BASED(.Lsie_loop)
-	jl	0f
-	clc	SP_PSW+8(8,%r15),BASED(.Lsie_done)
-	jhe	0f
-	mvc	SP_PSW+8(8,%r15),BASED(.Lsie_loop)
-0:
+	jz	.+42
+	tm	__LC_MACHINE_FLAGS+6,0x20	# MACHINE_FLAG_SPP
+	jz	.+8
+	.insn	s,0xb2800000,BASED(.Lhost_id)	# set host id
+	lgr	\scratch,%r9
+	slg	\scratch,BASED(.Lsie_loop)
+	clg	\scratch,BASED(.Lsie_length)
+	jhe	.+10
+	lg	%r9,BASED(.Lsie_loop)
 #endif
 	.endm
 
-#ifdef CONFIG_TRACE_IRQFLAGS
-	.macro	TRACE_IRQS_ON
-	basr	%r2,%r0
-	brasl	%r14,trace_hardirqs_on_caller
-	.endm
-
-	.macro	TRACE_IRQS_OFF
-	basr	%r2,%r0
-	brasl	%r14,trace_hardirqs_off_caller
-	.endm
-#else
-#define TRACE_IRQS_ON
-#define TRACE_IRQS_OFF
-#endif
-
-#ifdef CONFIG_LOCKDEP
-	.macro	LOCKDEP_SYS_EXIT
-	tm	SP_PSW+1(%r15),0x01	# returning to user ?
-	jz	0f
-	brasl	%r14,lockdep_sys_exit
-0:
-	.endm
-#else
-#define LOCKDEP_SYS_EXIT
-#endif
-
-	.macro	UPDATE_VTIME lc_from,lc_to,lc_sum
-	lg	%r10,\lc_from
-	slg	%r10,\lc_to
-	alg	%r10,\lc_sum
-	stg	%r10,\lc_sum
-	.endm
-
-/*
- * Register usage in interrupt handlers:
- *    R9  - pointer to current task structure
- *    R13 - pointer to literal pool
- *    R14 - return register for function calls
- *    R15 - kernel stack pointer
- */
-
-	.macro	SAVE_ALL_SVC psworg,savearea
-	stmg	%r11,%r15,\savearea
-	lg	%r15,__LC_KERNEL_STACK	# problem state -> load ksp
-	aghi	%r15,-SP_SIZE		# make room for registers & psw
-	lg	%r11,__LC_LAST_BREAK
-	.endm
-
-	.macro	SAVE_ALL_PGM psworg,savearea
-	stmg	%r11,%r15,\savearea
-	tm	\psworg+1,0x01		# test problem state bit
+	.macro	CHECK_STACK stacksize,savearea
 #ifdef CONFIG_CHECK_STACK
-	jnz	1f
-	tml	%r15,STACK_SIZE - CONFIG_STACK_GUARD
-	jnz	2f
-	la	%r12,\psworg
-	j	stack_overflow
-#else
-	jz	2f
+	tml	%r15,\stacksize - CONFIG_STACK_GUARD
+	lghi	%r14,\savearea
+	jz	stack_overflow
 #endif
-1:	lg	%r15,__LC_KERNEL_STACK	# problem state -> load ksp
-2:	aghi	%r15,-SP_SIZE		# make room for registers & psw
-	larl	%r13,system_call
-	lg	%r11,__LC_LAST_BREAK
 	.endm
 
-	.macro	SAVE_ALL_ASYNC psworg,savearea
-	stmg	%r11,%r15,\savearea
-	larl	%r13,system_call
-	lg	%r11,__LC_LAST_BREAK
-	la	%r12,\psworg
-	tm	\psworg+1,0x01		# test problem state bit
-	jnz	1f			# from user -> load kernel stack
-	clc	\psworg+8(8),BASED(.Lcritical_end)
+	.macro	SWITCH_ASYNC savearea,stack,shift
+	tmhh	%r8,0x0001		# interrupting from user ?
+	jnz	1f
+	lgr	%r14,%r9
+	slg	%r14,BASED(.Lcritical_start)
+	clg	%r14,BASED(.Lcritical_length)
 	jhe	0f
-	clc	\psworg+8(8),BASED(.Lcritical_start)
-	jl	0f
+	lghi	%r11,\savearea		# inside critical section, do cleanup
 	brasl	%r14,cleanup_critical
-	tm	1(%r12),0x01		# retest problem state after cleanup
+	tmhh	%r8,0x0001		# retest problem state after cleanup
 	jnz	1f
-0:	lg	%r14,__LC_ASYNC_STACK	# are we already on the async. stack ?
+0:	lg	%r14,\stack		# are we already on the target stack?
 	slgr	%r14,%r15
-	srag	%r14,%r14,STACK_SHIFT
-#ifdef CONFIG_CHECK_STACK
+	srag	%r14,%r14,\shift
 	jnz	1f
-	tml	%r15,STACK_SIZE - CONFIG_STACK_GUARD
-	jnz	2f
-	j	stack_overflow
-#else
-	jz	2f
-#endif
-1:	lg	%r15,__LC_ASYNC_STACK	# load async stack
-2:	aghi	%r15,-SP_SIZE		# make room for registers & psw
+	CHECK_STACK 1<<\shift,\savearea
+	j	2f
+1:	lg	%r15,\stack		# load target stack
+2:	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+	la	%r11,STACK_FRAME_OVERHEAD(%r15)
 	.endm
 
-	.macro	CREATE_STACK_FRAME savearea
-	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
-	stg	%r2,SP_ORIG_R2(%r15)	# store original content of gpr 2
-	mvc	SP_R11(40,%r15),\savearea # move %r11-%r15 to stack
-	stmg	%r0,%r10,SP_R0(%r15)	# store gprs %r0-%r10 to kernel stack
+	.macro UPDATE_VTIME scratch,enter_timer
+	lg	\scratch,__LC_EXIT_TIMER
+	slg	\scratch,\enter_timer
+	alg	\scratch,__LC_USER_TIMER
+	stg	\scratch,__LC_USER_TIMER
+	lg	\scratch,__LC_LAST_UPDATE_TIMER
+	slg	\scratch,__LC_EXIT_TIMER
+	alg	\scratch,__LC_SYSTEM_TIMER
+	stg	\scratch,__LC_SYSTEM_TIMER
+	mvc	__LC_LAST_UPDATE_TIMER(8),\enter_timer
 	.endm
 
-	.macro	RESTORE_ALL psworg,sync
-	mvc	\psworg(16),SP_PSW(%r15) # move user PSW to lowcore
-	.if !\sync
-	ni	\psworg+1,0xfd		# clear wait state bit
-	.endif
-	lg	%r14,__LC_VDSO_PER_CPU
-	lmg	%r0,%r13,SP_R0(%r15)	# load gprs 0-13 of user
-	stpt	__LC_EXIT_TIMER
-	mvc	__VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
-	lmg	%r14,%r15,SP_R14(%r15)	# load grps 14-15 of user
-	lpswe	\psworg			# back to caller
-	.endm
-
-	.macro	LAST_BREAK
-	srag	%r10,%r11,23
-	jz	0f
-	stg	%r11,__TI_last_break(%r12)
-0:
+	.macro	LAST_BREAK scratch
+	srag	\scratch,%r10,23
+	jz	.+10
+	stg	%r10,__TI_last_break(%r12)
 	.endm
 
 	.macro REENABLE_IRQS
-	mvc	__SF_EMPTY(1,%r15),SP_PSW(%r15)
-	ni	__SF_EMPTY(%r15),0xbf
-	ssm	__SF_EMPTY(%r15)
+	stg	%r8,__LC_RETURN_PSW
+	ni	__LC_RETURN_PSW,0xbf
+	ssm	__LC_RETURN_PSW
 	.endm
 
 	.section .kprobes.text, "ax"
@@ -245,55 +184,66 @@
 
 ENTRY(system_call)
 	stpt	__LC_SYNC_ENTER_TIMER
-sysc_saveall:
-	SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
-	CREATE_STACK_FRAME __LC_SAVE_AREA
-	lg	%r12,__LC_THREAD_INFO	# load pointer to thread_info struct
-	mvc	SP_PSW(16,%r15),__LC_SVC_OLD_PSW
-	mvc	SP_SVC_CODE(4,%r15),__LC_SVC_ILC
-	oi	__TI_flags+7(%r12),_TIF_SYSCALL
+sysc_stmg:
+	stmg	%r8,%r15,__LC_SAVE_AREA_SYNC
+	lg	%r10,__LC_LAST_BREAK
+	lg	%r12,__LC_THREAD_INFO
+	larl	%r13,system_call
+sysc_per:
+	lg	%r15,__LC_KERNEL_STACK
+	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+	la	%r11,STACK_FRAME_OVERHEAD(%r15)	# pointer to pt_regs
 sysc_vtime:
-	UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
-sysc_stime:
-	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
-sysc_update:
-	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
-	LAST_BREAK
+	UPDATE_VTIME %r13,__LC_SYNC_ENTER_TIMER
+	LAST_BREAK %r13
+	stmg	%r0,%r7,__PT_R0(%r11)
+	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
+	mvc	__PT_PSW(16,%r11),__LC_SVC_OLD_PSW
+	mvc	__PT_INT_CODE(4,%r11),__LC_SVC_ILC
 sysc_do_svc:
-	llgh	%r7,SP_SVC_CODE+2(%r15)
-	slag	%r7,%r7,2	# shift and test for svc 0
+	oi	__TI_flags+7(%r12),_TIF_SYSCALL
+	llgh	%r8,__PT_INT_CODE+2(%r11)
+	slag	%r8,%r8,2			# shift and test for svc 0
 	jnz	sysc_nr_ok
 	# svc 0: system call number in %r1
-	llgfr	%r1,%r1		# clear high word in r1
+	llgfr	%r1,%r1				# clear high word in r1
 	cghi	%r1,NR_syscalls
 	jnl	sysc_nr_ok
-	sth	%r1,SP_SVC_CODE+2(%r15)
-	slag	%r7,%r1,2	# shift and test for svc 0
+	sth	%r1,__PT_INT_CODE+2(%r11)
+	slag	%r8,%r1,2
 sysc_nr_ok:
-	larl	%r10,sys_call_table
+	larl	%r10,sys_call_table		# 64 bit system call table
 #ifdef CONFIG_COMPAT
-	tm	__TI_flags+5(%r12),(_TIF_31BIT>>16)  # running in 31 bit mode ?
+	tm	__TI_flags+5(%r12),(_TIF_31BIT>>16)
 	jno	sysc_noemu
-	larl	%r10,sys_call_table_emu  # use 31 bit emulation system calls
+	larl	%r10,sys_call_table_emu		# 31 bit system call table
 sysc_noemu:
 #endif
+	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+	stg	%r2,__PT_ORIG_GPR2(%r11)
+	stg	%r7,STACK_FRAME_OVERHEAD(%r15)
+	lgf	%r9,0(%r8,%r10)			# get system call add.
 	tm	__TI_flags+6(%r12),_TIF_TRACE >> 8
-	mvc	SP_ARGS(8,%r15),SP_R7(%r15)
-	lgf	%r8,0(%r7,%r10) # load address of system call routine
 	jnz	sysc_tracesys
-	basr	%r14,%r8	# call sys_xxxx
-	stg	%r2,SP_R2(%r15) # store return value (change R2 on stack)
+	basr	%r14,%r9			# call sys_xxxx
+	stg	%r2,__PT_R2(%r11)		# store return value
 
 sysc_return:
 	LOCKDEP_SYS_EXIT
 sysc_tif:
-	tm	SP_PSW+1(%r15),0x01	# returning to user ?
+	tm	__PT_PSW+1(%r11),0x01		# returning to user ?
 	jno	sysc_restore
 	tm	__TI_flags+7(%r12),_TIF_WORK_SVC
-	jnz	sysc_work	# there is work to do (signals etc.)
+	jnz	sysc_work			# check for work
 	ni	__TI_flags+7(%r12),255-_TIF_SYSCALL
 sysc_restore:
-	RESTORE_ALL __LC_RETURN_PSW,1
+	lg	%r14,__LC_VDSO_PER_CPU
+	lmg	%r0,%r10,__PT_R0(%r11)
+	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r11)
+	stpt	__LC_EXIT_TIMER
+	mvc	__VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
+	lmg	%r11,%r15,__PT_R11(%r11)
+	lpswe	__LC_RETURN_PSW
 sysc_done:
 
 #
@@ -317,7 +267,7 @@
 #
 sysc_reschedule:
 	larl	%r14,sysc_return
-	jg	schedule		# return point is sysc_return
+	jg	schedule
 
 #
 # _TIF_MCCK_PENDING is set, call handler
@@ -331,33 +281,33 @@
 #
 sysc_sigpending:
 	ni	__TI_flags+7(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP
-	la	%r2,SP_PTREGS(%r15)	# load pt_regs
-	brasl	%r14,do_signal		# call do_signal
+	lgr	%r2,%r11		# pass pointer to pt_regs
+	brasl	%r14,do_signal
 	tm	__TI_flags+7(%r12),_TIF_SYSCALL
 	jno	sysc_return
-	lmg	%r2,%r6,SP_R2(%r15)	# load svc arguments
-	lghi	%r7,0			# svc 0 returns -ENOSYS
-	lh	%r1,SP_SVC_CODE+2(%r15)	# load new svc number
+	lmg	%r2,%r7,__PT_R2(%r11)	# load svc arguments
+	lghi	%r8,0			# svc 0 returns -ENOSYS
+	lh	%r1,__PT_INT_CODE+2(%r11)	# load new svc number
 	cghi	%r1,NR_syscalls
 	jnl	sysc_nr_ok		# invalid svc number -> do svc 0
-	slag	%r7,%r1,2
+	slag	%r8,%r1,2
 	j	sysc_nr_ok		# restart svc
 
 #
 # _TIF_NOTIFY_RESUME is set, call do_notify_resume
 #
 sysc_notify_resume:
-	la	%r2,SP_PTREGS(%r15)	# load pt_regs
+	lgr	%r2,%r11		# pass pointer to pt_regs
 	larl	%r14,sysc_return
-	jg	do_notify_resume	# call do_notify_resume
+	jg	do_notify_resume
 
 #
 # _TIF_PER_TRAP is set, call do_per_trap
 #
 sysc_singlestep:
 	ni	__TI_flags+7(%r12),255-(_TIF_SYSCALL | _TIF_PER_TRAP)
-	la	%r2,SP_PTREGS(%r15)	# address of register-save area
-	larl	%r14,sysc_return	# load adr. of system return
+	lgr	%r2,%r11		# pass pointer to pt_regs
+	larl	%r14,sysc_return
 	jg	do_per_trap
 
 #
@@ -365,41 +315,41 @@
 # and after the system call
 #
 sysc_tracesys:
-	la	%r2,SP_PTREGS(%r15)	# load pt_regs
+	lgr	%r2,%r11		# pass pointer to pt_regs
 	la	%r3,0
-	llgh	%r0,SP_SVC_CODE+2(%r15)
-	stg	%r0,SP_R2(%r15)
+	llgh	%r0,__PT_INT_CODE+2(%r11)
+	stg	%r0,__PT_R2(%r11)
 	brasl	%r14,do_syscall_trace_enter
 	lghi	%r0,NR_syscalls
 	clgr	%r0,%r2
 	jnh	sysc_tracenogo
-	sllg	%r7,%r2,2		# svc number *4
-	lgf	%r8,0(%r7,%r10)
+	sllg	%r8,%r2,2
+	lgf	%r9,0(%r8,%r10)
 sysc_tracego:
-	lmg	%r3,%r6,SP_R3(%r15)
-	mvc	SP_ARGS(8,%r15),SP_R7(%r15)
-	lg	%r2,SP_ORIG_R2(%r15)
-	basr	%r14,%r8		# call sys_xxx
-	stg	%r2,SP_R2(%r15)		# store return value
+	lmg	%r3,%r7,__PT_R3(%r11)
+	stg	%r7,STACK_FRAME_OVERHEAD(%r15)
+	lg	%r2,__PT_ORIG_GPR2(%r11)
+	basr	%r14,%r9		# call sys_xxx
+	stg	%r2,__PT_R2(%r11)	# store return value
 sysc_tracenogo:
 	tm	__TI_flags+6(%r12),_TIF_TRACE >> 8
 	jz	sysc_return
-	la	%r2,SP_PTREGS(%r15)	# load pt_regs
-	larl	%r14,sysc_return	# return point is sysc_return
+	lgr	%r2,%r11		# pass pointer to pt_regs
+	larl	%r14,sysc_return
 	jg	do_syscall_trace_exit
 
 #
 # a new process exits the kernel with ret_from_fork
 #
 ENTRY(ret_from_fork)
-	lg	%r13,__LC_SVC_NEW_PSW+8
-	lg	%r12,__LC_THREAD_INFO	# load pointer to thread_info struct
-	tm	SP_PSW+1(%r15),0x01	# forking a kernel thread ?
+	la	%r11,STACK_FRAME_OVERHEAD(%r15)
+	lg	%r12,__LC_THREAD_INFO
+	tm	__PT_PSW+1(%r11),0x01	# forking a kernel thread ?
 	jo	0f
-	stg	%r15,SP_R15(%r15)	# store stack pointer for new kthread
+	stg	%r15,__PT_R15(%r11)	# store stack pointer for new kthread
 0:	brasl	%r14,schedule_tail
 	TRACE_IRQS_ON
-	stosm	24(%r15),0x03		# reenable interrupts
+	ssm	__LC_SVC_NEW_PSW	# reenable interrupts
 	j	sysc_tracenogo
 
 #
@@ -409,26 +359,26 @@
 ENTRY(kernel_execve)
 	stmg	%r12,%r15,96(%r15)
 	lgr	%r14,%r15
-	aghi	%r15,-SP_SIZE
+	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
 	stg	%r14,__SF_BACKCHAIN(%r15)
-	la	%r12,SP_PTREGS(%r15)
+	la	%r12,STACK_FRAME_OVERHEAD(%r15)
 	xc	0(__PT_SIZE,%r12),0(%r12)
 	lgr	%r5,%r12
 	brasl	%r14,do_execve
 	ltgfr	%r2,%r2
 	je	0f
-	aghi	%r15,SP_SIZE
+	aghi	%r15,(STACK_FRAME_OVERHEAD + __PT_SIZE)
 	lmg	%r12,%r15,96(%r15)
 	br	%r14
 	# execve succeeded.
-0:	stnsm	__SF_EMPTY(%r15),0xfc	# disable interrupts
+0:	ssm	__LC_PGM_NEW_PSW	# disable I/O and ext. interrupts
 	lg	%r15,__LC_KERNEL_STACK	# load ksp
-	aghi	%r15,-SP_SIZE		# make room for registers & psw
-	lg	%r13,__LC_SVC_NEW_PSW+8
-	mvc	SP_PTREGS(__PT_SIZE,%r15),0(%r12)	# copy pt_regs
+	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+	la	%r11,STACK_FRAME_OVERHEAD(%r15)
+	mvc	0(__PT_SIZE,%r11),0(%r12)	# copy pt_regs
 	lg	%r12,__LC_THREAD_INFO
 	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
-	stosm	__SF_EMPTY(%r15),0x03	# reenable interrupts
+	ssm	__LC_SVC_NEW_PSW	# reenable interrupts
 	brasl	%r14,execve_tail
 	j	sysc_return
 
@@ -437,127 +387,72 @@
  */
 
 ENTRY(pgm_check_handler)
-/*
- * First we need to check for a special case:
- * Single stepping an instruction that disables the PER event mask will
- * cause a PER event AFTER the mask has been set. Example: SVC or LPSW.
- * For a single stepped SVC the program check handler gets control after
- * the SVC new PSW has been loaded. But we want to execute the SVC first and
- * then handle the PER event. Therefore we update the SVC old PSW to point
- * to the pgm_check_handler and branch to the SVC handler after we checked
- * if we have to load the kernel stack register.
- * For every other possible cause for PER event without the PER mask set
- * we just ignore the PER event (FIXME: is there anything we have to do
- * for LPSW?).
- */
 	stpt	__LC_SYNC_ENTER_TIMER
-	tm	__LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
-	jnz	pgm_per 		 # got per exception -> special case
-	SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA
-	CREATE_STACK_FRAME __LC_SAVE_AREA
-	mvc	SP_PSW(16,%r15),__LC_PGM_OLD_PSW
-	lg	%r12,__LC_THREAD_INFO	# load pointer to thread_info struct
-	HANDLE_SIE_INTERCEPT
-	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
-	jz	pgm_no_vtime
-	UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
-	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
-	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
-	LAST_BREAK
-pgm_no_vtime:
-	stg	%r11,SP_ARGS(%r15)
-	lgf	%r3,__LC_PGM_ILC	# load program interruption code
-	lg	%r4,__LC_TRANS_EXC_CODE
-	REENABLE_IRQS
-	lghi	%r8,0x7f
-	ngr	%r8,%r3
-	sll	%r8,3
-	larl	%r1,pgm_check_table
-	lg	%r1,0(%r8,%r1)		# load address of handler routine
-	la	%r2,SP_PTREGS(%r15)	# address of register-save area
-	basr	%r14,%r1		# branch to interrupt-handler
-pgm_exit:
-	j	sysc_return
-
-#
-# handle per exception
-#
-pgm_per:
-	tm	__LC_PGM_OLD_PSW,0x40	# test if per event recording is on
-	jnz	pgm_per_std		# ok, normal per event from user space
-# ok its one of the special cases, now we need to find out which one
-	clc	__LC_PGM_OLD_PSW(16),__LC_SVC_NEW_PSW
-	je	pgm_svcper
-# no interesting special case, ignore PER event
-	lpswe	__LC_PGM_OLD_PSW
-
-#
-# Normal per exception
-#
-pgm_per_std:
-	SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA
-	CREATE_STACK_FRAME __LC_SAVE_AREA
-	mvc	SP_PSW(16,%r15),__LC_PGM_OLD_PSW
-	lg	%r12,__LC_THREAD_INFO	# load pointer to thread_info struct
-	HANDLE_SIE_INTERCEPT
-	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
-	jz	pgm_no_vtime2
-	UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
-	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
-	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
-	LAST_BREAK
-pgm_no_vtime2:
+	stmg	%r8,%r15,__LC_SAVE_AREA_SYNC
+	lg	%r10,__LC_LAST_BREAK
+	lg	%r12,__LC_THREAD_INFO
+	larl	%r13,system_call
+	lmg	%r8,%r9,__LC_PGM_OLD_PSW
+	HANDLE_SIE_INTERCEPT %r14
+	tmhh	%r8,0x0001		# test problem state bit
+	jnz	1f			# -> fault in user space
+	tmhh	%r8,0x4000		# PER bit set in old PSW ?
+	jnz	0f			# -> enabled, can't be a double fault
+	tm	__LC_PGM_ILC+3,0x80	# check for per exception
+	jnz	pgm_svcper		# -> single stepped svc
+0:	CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
+	j	2f
+1:	UPDATE_VTIME %r14,__LC_SYNC_ENTER_TIMER
+	LAST_BREAK %r14
+	lg	%r15,__LC_KERNEL_STACK
+2:	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+	la	%r11,STACK_FRAME_OVERHEAD(%r15)
+	stmg	%r0,%r7,__PT_R0(%r11)
+	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
+	stmg	%r8,%r9,__PT_PSW(%r11)
+	mvc	__PT_INT_CODE(4,%r11),__LC_PGM_ILC
+	mvc	__PT_INT_PARM_LONG(8,%r11),__LC_TRANS_EXC_CODE
+	stg	%r10,__PT_ARGS(%r11)
+	tm	__LC_PGM_ILC+3,0x80	# check for per exception
+	jz	0f
 	lg	%r1,__TI_task(%r12)
-	tm	SP_PSW+1(%r15),0x01	# kernel per event ?
-	jz	kernel_per
-	mvc	__THREAD_per_cause(2,%r1),__LC_PER_CAUSE
+	tmhh	%r8,0x0001		# kernel per event ?
+	jz	pgm_kprobe
+	oi	__TI_flags+7(%r12),_TIF_PER_TRAP
 	mvc	__THREAD_per_address(8,%r1),__LC_PER_ADDRESS
+	mvc	__THREAD_per_cause(2,%r1),__LC_PER_CAUSE
 	mvc	__THREAD_per_paid(1,%r1),__LC_PER_PAID
-	oi	__TI_flags+7(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP
-	lgf	%r3,__LC_PGM_ILC	# load program interruption code
-	lg	%r4,__LC_TRANS_EXC_CODE
-	REENABLE_IRQS
-	lghi	%r8,0x7f
-	ngr	%r8,%r3			# clear per-event-bit and ilc
-	je	pgm_exit2
-	sll	%r8,3
+0:	REENABLE_IRQS
+	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
 	larl	%r1,pgm_check_table
-	lg	%r1,0(%r8,%r1)		# load address of handler routine
-	la	%r2,SP_PTREGS(%r15)	# address of register-save area
+	llgh	%r10,__PT_INT_CODE+2(%r11)
+	nill	%r10,0x007f
+	sll	%r10,3
+	je	sysc_return
+	lg	%r1,0(%r10,%r1)		# load address of handler routine
+	lgr	%r2,%r11		# pass pointer to pt_regs
 	basr	%r14,%r1		# branch to interrupt-handler
-pgm_exit2:
 	j	sysc_return
 
 #
-# it was a single stepped SVC that is causing all the trouble
+# PER event in supervisor state, must be kprobes
+#
+pgm_kprobe:
+	REENABLE_IRQS
+	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+	lgr	%r2,%r11		# pass pointer to pt_regs
+	brasl	%r14,do_per_trap
+	j	sysc_return
+
+#
+# single stepped system call
 #
 pgm_svcper:
-	SAVE_ALL_PGM __LC_SVC_OLD_PSW,__LC_SAVE_AREA
-	CREATE_STACK_FRAME __LC_SAVE_AREA
-	lg	%r12,__LC_THREAD_INFO	# load pointer to thread_info struct
-	mvc	SP_PSW(16,%r15),__LC_SVC_OLD_PSW
-	mvc	SP_SVC_CODE(4,%r15),__LC_SVC_ILC
-	oi	__TI_flags+7(%r12),(_TIF_SYSCALL | _TIF_PER_TRAP)
-	UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
-	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
-	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
-	LAST_BREAK
-	lg	%r8,__TI_task(%r12)
-	mvc	__THREAD_per_cause(2,%r8),__LC_PER_CAUSE
-	mvc	__THREAD_per_address(8,%r8),__LC_PER_ADDRESS
-	mvc	__THREAD_per_paid(1,%r8),__LC_PER_PAID
-	stosm	__SF_EMPTY(%r15),0x03	# reenable interrupts
-	lmg	%r2,%r6,SP_R2(%r15)	# load svc arguments
-	j	sysc_do_svc
-
-#
-# per was called from kernel, must be kprobes
-#
-kernel_per:
-	REENABLE_IRQS
-	la	%r2,SP_PTREGS(%r15)	# address of register-save area
-	brasl	%r14,do_per_trap
-	j	pgm_exit
+	oi	__TI_flags+7(%r12),_TIF_PER_TRAP
+	mvc	__LC_RETURN_PSW(8),__LC_SVC_NEW_PSW
+	larl	%r14,sysc_per
+	stg	%r14,__LC_RETURN_PSW+8
+	lpswe	__LC_RETURN_PSW		# branch to sysc_per and enable irqs
 
 /*
  * IO interrupt handler routine
@@ -565,21 +460,25 @@
 ENTRY(io_int_handler)
 	stck	__LC_INT_CLOCK
 	stpt	__LC_ASYNC_ENTER_TIMER
-	SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+40
-	CREATE_STACK_FRAME __LC_SAVE_AREA+40
-	mvc	SP_PSW(16,%r15),0(%r12)	# move user PSW to stack
-	lg	%r12,__LC_THREAD_INFO	# load pointer to thread_info struct
-	HANDLE_SIE_INTERCEPT
-	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
-	jz	io_no_vtime
-	UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
-	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
-	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
-	LAST_BREAK
-io_no_vtime:
+	stmg	%r8,%r15,__LC_SAVE_AREA_ASYNC
+	lg	%r10,__LC_LAST_BREAK
+	lg	%r12,__LC_THREAD_INFO
+	larl	%r13,system_call
+	lmg	%r8,%r9,__LC_IO_OLD_PSW
+	HANDLE_SIE_INTERCEPT %r14
+	SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
+	tmhh	%r8,0x0001		# interrupting from user?
+	jz	io_skip
+	UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER
+	LAST_BREAK %r14
+io_skip:
+	stmg	%r0,%r7,__PT_R0(%r11)
+	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
+	stmg	%r8,%r9,__PT_PSW(%r11)
 	TRACE_IRQS_OFF
-	la	%r2,SP_PTREGS(%r15)	# address of register-save area
-	brasl	%r14,do_IRQ		# call standard irq handler
+	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+	lgr	%r2,%r11		# pass pointer to pt_regs
+	brasl	%r14,do_IRQ
 io_return:
 	LOCKDEP_SYS_EXIT
 	TRACE_IRQS_ON
@@ -587,7 +486,14 @@
 	tm	__TI_flags+7(%r12),_TIF_WORK_INT
 	jnz	io_work 		# there is work to do (signals etc.)
 io_restore:
-	RESTORE_ALL __LC_RETURN_PSW,0
+	lg	%r14,__LC_VDSO_PER_CPU
+	lmg	%r0,%r10,__PT_R0(%r11)
+	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r11)
+	ni	__LC_RETURN_PSW+1,0xfd	# clear wait state bit
+	stpt	__LC_EXIT_TIMER
+	mvc	__VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
+	lmg	%r11,%r15,__PT_R11(%r11)
+	lpswe	__LC_RETURN_PSW
 io_done:
 
 #
@@ -600,7 +506,7 @@
 # Before any work can be done, a switch to the kernel stack is required.
 #
 io_work:
-	tm	SP_PSW+1(%r15),0x01	# returning to user ?
+	tm	__PT_PSW+1(%r11),0x01	# returning to user ?
 	jo	io_work_user		# yes -> do resched & signal
 #ifdef CONFIG_PREEMPT
 	# check for preemptive scheduling
@@ -609,10 +515,11 @@
 	tm	__TI_flags+7(%r12),_TIF_NEED_RESCHED
 	jno	io_restore
 	# switch to kernel stack
-	lg	%r1,SP_R15(%r15)
-	aghi	%r1,-SP_SIZE
-	mvc	SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
-	xc	__SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain
+	lg	%r1,__PT_R15(%r11)
+	aghi	%r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+	mvc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
+	xc	__SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
+	la	%r11,STACK_FRAME_OVERHEAD(%r1)
 	lgr	%r15,%r1
 	# TRACE_IRQS_ON already done at io_return, call
 	# TRACE_IRQS_OFF to keep things symmetrical
@@ -628,9 +535,10 @@
 #
 io_work_user:
 	lg	%r1,__LC_KERNEL_STACK
-	aghi	%r1,-SP_SIZE
-	mvc	SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
-	xc	__SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain
+	aghi	%r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+	mvc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
+	xc	__SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
+	la	%r11,STACK_FRAME_OVERHEAD(%r1)
 	lgr	%r15,%r1
 
 #
@@ -663,9 +571,9 @@
 #
 io_reschedule:
 	# TRACE_IRQS_ON already done at io_return
-	stosm	__SF_EMPTY(%r15),0x03	# reenable interrupts
+	ssm	__LC_SVC_NEW_PSW	# reenable interrupts
 	brasl	%r14,schedule		# call scheduler
-	stnsm	__SF_EMPTY(%r15),0xfc	# disable I/O and ext. interrupts
+	ssm	__LC_PGM_NEW_PSW	# disable I/O and ext. interrupts
 	TRACE_IRQS_OFF
 	j	io_return
 
@@ -674,10 +582,10 @@
 #
 io_sigpending:
 	# TRACE_IRQS_ON already done at io_return
-	stosm	__SF_EMPTY(%r15),0x03	# reenable interrupts
-	la	%r2,SP_PTREGS(%r15)	# load pt_regs
-	brasl	%r14,do_signal		# call do_signal
-	stnsm	__SF_EMPTY(%r15),0xfc	# disable I/O and ext. interrupts
+	ssm	__LC_SVC_NEW_PSW	# reenable interrupts
+	lgr	%r2,%r11		# pass pointer to pt_regs
+	brasl	%r14,do_signal
+	ssm	__LC_PGM_NEW_PSW	# disable I/O and ext. interrupts
 	TRACE_IRQS_OFF
 	j	io_return
 
@@ -686,10 +594,10 @@
 #
 io_notify_resume:
 	# TRACE_IRQS_ON already done at io_return
-	stosm	__SF_EMPTY(%r15),0x03	# reenable interrupts
-	la	%r2,SP_PTREGS(%r15)	# load pt_regs
-	brasl	%r14,do_notify_resume	# call do_notify_resume
-	stnsm	__SF_EMPTY(%r15),0xfc	# disable I/O and ext. interrupts
+	ssm	__LC_SVC_NEW_PSW	# reenable interrupts
+	lgr	%r2,%r11		# pass pointer to pt_regs
+	brasl	%r14,do_notify_resume
+	ssm	__LC_PGM_NEW_PSW	# disable I/O and ext. interrupts
 	TRACE_IRQS_OFF
 	j	io_return
 
@@ -699,21 +607,24 @@
 ENTRY(ext_int_handler)
 	stck	__LC_INT_CLOCK
 	stpt	__LC_ASYNC_ENTER_TIMER
-	SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+40
-	CREATE_STACK_FRAME __LC_SAVE_AREA+40
-	mvc	SP_PSW(16,%r15),0(%r12)	# move user PSW to stack
-	lg	%r12,__LC_THREAD_INFO	# load pointer to thread_info struct
-	HANDLE_SIE_INTERCEPT
-	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
-	jz	ext_no_vtime
-	UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
-	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
-	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
-	LAST_BREAK
-ext_no_vtime:
+	stmg	%r8,%r15,__LC_SAVE_AREA_ASYNC
+	lg	%r10,__LC_LAST_BREAK
+	lg	%r12,__LC_THREAD_INFO
+	larl	%r13,system_call
+	lmg	%r8,%r9,__LC_EXT_OLD_PSW
+	HANDLE_SIE_INTERCEPT %r14
+	SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
+	tmhh	%r8,0x0001		# interrupting from user ?
+	jz	ext_skip
+	UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER
+	LAST_BREAK %r14
+ext_skip:
+	stmg	%r0,%r7,__PT_R0(%r11)
+	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
+	stmg	%r8,%r9,__PT_PSW(%r11)
 	TRACE_IRQS_OFF
 	lghi	%r1,4096
-	la	%r2,SP_PTREGS(%r15)	# address of register-save area
+	lgr	%r2,%r11		# pass pointer to pt_regs
 	llgf	%r3,__LC_CPU_ADDRESS	# get cpu address + interruption code
 	llgf	%r4,__LC_EXT_PARAMS	# get external parameter
 	lg	%r5,__LC_EXT_PARAMS2-4096(%r1)	# get 64 bit external parameter
@@ -730,81 +641,77 @@
 	la	%r1,4095		# revalidate r1
 	spt	__LC_CPU_TIMER_SAVE_AREA-4095(%r1)	# revalidate cpu timer
 	lmg	%r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
-	stmg	%r11,%r15,__LC_SAVE_AREA+80
+	lg	%r10,__LC_LAST_BREAK
+	lg	%r12,__LC_THREAD_INFO
 	larl	%r13,system_call
-	lg	%r11,__LC_LAST_BREAK
-	la	%r12,__LC_MCK_OLD_PSW
+	lmg	%r8,%r9,__LC_MCK_OLD_PSW
+	HANDLE_SIE_INTERCEPT %r14
 	tm	__LC_MCCK_CODE,0x80	# system damage?
-	jo	mcck_int_main		# yes -> rest of mcck code invalid
-	la	%r14,4095
-	mvc	__LC_MCCK_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA-4095(%r14)
+	jo	mcck_panic		# yes -> rest of mcck code invalid
+	lghi	%r14,__LC_CPU_TIMER_SAVE_AREA
+	mvc	__LC_MCCK_ENTER_TIMER(8),0(%r14)
 	tm	__LC_MCCK_CODE+5,0x02	# stored cpu timer value valid?
-	jo	1f
+	jo	3f
 	la	%r14,__LC_SYNC_ENTER_TIMER
 	clc	0(8,%r14),__LC_ASYNC_ENTER_TIMER
 	jl	0f
 	la	%r14,__LC_ASYNC_ENTER_TIMER
 0:	clc	0(8,%r14),__LC_EXIT_TIMER
-	jl	0f
+	jl	1f
 	la	%r14,__LC_EXIT_TIMER
-0:	clc	0(8,%r14),__LC_LAST_UPDATE_TIMER
-	jl	0f
+1:	clc	0(8,%r14),__LC_LAST_UPDATE_TIMER
+	jl	2f
 	la	%r14,__LC_LAST_UPDATE_TIMER
-0:	spt	0(%r14)
+2:	spt	0(%r14)
 	mvc	__LC_MCCK_ENTER_TIMER(8),0(%r14)
-1:	tm	__LC_MCCK_CODE+2,0x09	# mwp + ia of old psw valid?
-	jno	mcck_int_main		# no -> skip cleanup critical
-	tm	__LC_MCK_OLD_PSW+1,0x01 # test problem state bit
-	jnz	mcck_int_main		# from user -> load kernel stack
-	clc	__LC_MCK_OLD_PSW+8(8),BASED(.Lcritical_end)
-	jhe	mcck_int_main
-	clc	__LC_MCK_OLD_PSW+8(8),BASED(.Lcritical_start)
-	jl	mcck_int_main
-	brasl	%r14,cleanup_critical
-mcck_int_main:
-	lg	%r14,__LC_PANIC_STACK	# are we already on the panic stack?
-	slgr	%r14,%r15
-	srag	%r14,%r14,PAGE_SHIFT
-	jz	0f
-	lg	%r15,__LC_PANIC_STACK	# load panic stack
-0:	aghi	%r15,-SP_SIZE		# make room for registers & psw
-	CREATE_STACK_FRAME __LC_SAVE_AREA+80
-	mvc	SP_PSW(16,%r15),0(%r12)
-	lg	%r12,__LC_THREAD_INFO	# load pointer to thread_info struct
-	tm	__LC_MCCK_CODE+2,0x08	# mwp of old psw valid?
-	jno	mcck_no_vtime		# no -> no timer update
-	HANDLE_SIE_INTERCEPT
-	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
-	jz	mcck_no_vtime
-	UPDATE_VTIME __LC_EXIT_TIMER,__LC_MCCK_ENTER_TIMER,__LC_USER_TIMER
-	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
-	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_MCCK_ENTER_TIMER
-	LAST_BREAK
-mcck_no_vtime:
-	la	%r2,SP_PTREGS(%r15)	# load pt_regs
+3:	tm	__LC_MCCK_CODE+2,0x09	# mwp + ia of old psw valid?
+	jno	mcck_panic		# no -> skip cleanup critical
+	SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_PANIC_STACK,PAGE_SHIFT
+	tm	%r8,0x0001		# interrupting from user ?
+	jz	mcck_skip
+	UPDATE_VTIME %r14,__LC_MCCK_ENTER_TIMER
+	LAST_BREAK %r14
+mcck_skip:
+	lghi	%r14,__LC_GPREGS_SAVE_AREA
+	mvc	__PT_R0(128,%r11),0(%r14)
+	stmg	%r8,%r9,__PT_PSW(%r11)
+	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+	lgr	%r2,%r11		# pass pointer to pt_regs
 	brasl	%r14,s390_do_machine_check
-	tm	SP_PSW+1(%r15),0x01	# returning to user ?
+	tm	__PT_PSW+1(%r11),0x01	# returning to user ?
 	jno	mcck_return
 	lg	%r1,__LC_KERNEL_STACK	# switch to kernel stack
-	aghi	%r1,-SP_SIZE
-	mvc	SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
-	xc	__SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain
+	aghi	%r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+	mvc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
+	xc	__SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
+	la	%r11,STACK_FRAME_OVERHEAD(%r1)
 	lgr	%r15,%r1
-	stosm	__SF_EMPTY(%r15),0x04	# turn dat on
+	ssm	__LC_PGM_NEW_PSW	# turn dat on, keep irqs off
 	tm	__TI_flags+7(%r12),_TIF_MCCK_PENDING
 	jno	mcck_return
 	TRACE_IRQS_OFF
 	brasl	%r14,s390_handle_mcck
 	TRACE_IRQS_ON
 mcck_return:
-	mvc	__LC_RETURN_MCCK_PSW(16),SP_PSW(%r15) # move return PSW
+	lg	%r14,__LC_VDSO_PER_CPU
+	lmg	%r0,%r10,__PT_R0(%r11)
+	mvc	__LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
 	ni	__LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit
-	lmg	%r0,%r15,SP_R0(%r15)	# load gprs 0-15
 	tm	__LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
 	jno	0f
 	stpt	__LC_EXIT_TIMER
-0:	lpswe	__LC_RETURN_MCCK_PSW	# back to caller
-mcck_done:
+	mvc	__VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
+0:	lmg	%r11,%r15,__PT_R11(%r11)
+	lpswe	__LC_RETURN_MCCK_PSW
+
+mcck_panic:
+	lg	%r14,__LC_PANIC_STACK
+	slgr	%r14,%r15
+	srag	%r14,%r14,PAGE_SHIFT
+	jz	0f
+	lg	%r15,__LC_PANIC_STACK
+0:	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+	j	mcck_skip
 
 /*
  * Restart interruption handler, kick starter for additional CPUs
@@ -818,17 +725,18 @@
 	stck	__LC_LAST_UPDATE_CLOCK
 	mvc	__LC_LAST_UPDATE_TIMER(8),restart_vtime-restart_base(%r1)
 	mvc	__LC_EXIT_TIMER(8),restart_vtime-restart_base(%r1)
-	lg	%r15,__LC_SAVE_AREA+120 # load ksp
+	lghi	%r10,__LC_GPREGS_SAVE_AREA
+	lg	%r15,120(%r10)		# load ksp
 	lghi	%r10,__LC_CREGS_SAVE_AREA
-	lctlg	%c0,%c15,0(%r10) # get new ctl regs
+	lctlg	%c0,%c15,0(%r10)	# get new ctl regs
 	lghi	%r10,__LC_AREGS_SAVE_AREA
 	lam	%a0,%a15,0(%r10)
-	lmg	%r6,%r15,__SF_GPRS(%r15) # load registers from clone
+	lmg	%r6,%r15,__SF_GPRS(%r15)# load registers from clone
 	lg	%r1,__LC_THREAD_INFO
 	mvc	__LC_USER_TIMER(8),__TI_user_timer(%r1)
 	mvc	__LC_SYSTEM_TIMER(8),__TI_system_timer(%r1)
 	xc	__LC_STEAL_TIMER(8),__LC_STEAL_TIMER
-	stosm	__SF_EMPTY(%r15),0x04	# now we can turn dat on
+	ssm	__LC_PGM_NEW_PSW	# turn dat on, keep irqs off
 	brasl	%r14,start_secondary
 	.align	8
 restart_vtime:
@@ -852,16 +760,16 @@
 # PSW restart interrupt handler
 #
 ENTRY(psw_restart_int_handler)
-	stg	%r15,__LC_SAVE_AREA+120(%r0)	# save r15
+	stg	%r15,__LC_SAVE_AREA_RESTART
 	larl	%r15,restart_stack		# load restart stack
 	lg	%r15,0(%r15)
-	aghi	%r15,-SP_SIZE			# make room for pt_regs
-	stmg	%r0,%r14,SP_R0(%r15)		# store gprs %r0-%r14 to stack
-	mvc	SP_R15(8,%r15),__LC_SAVE_AREA+120(%r0)# store saved %r15 to stack
-	mvc	SP_PSW(16,%r15),__LC_RST_OLD_PSW(%r0)# store restart old psw
-	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # set backchain to 0
+	aghi	%r15,-__PT_SIZE			# create pt_regs on stack
+	stmg	%r0,%r14,__PT_R0(%r15)
+	mvc	__PT_R15(8,%r15),__LC_SAVE_AREA_RESTART
+	mvc	__PT_PSW(16,%r15),__LC_RST_OLD_PSW # store restart old psw
+	aghi	%r15,-STACK_FRAME_OVERHEAD
+	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
 	brasl	%r14,do_restart
-
 	larl	%r14,restart_psw_crash		# load disabled wait PSW if
 	lpswe	0(%r14)				# do_restart returns
 	.align 8
@@ -877,172 +785,153 @@
  * Setup a pt_regs so that show_trace can provide a good call trace.
  */
 stack_overflow:
-	lg	%r15,__LC_PANIC_STACK	# change to panic stack
-	aghi	%r15,-SP_SIZE
-	mvc	SP_PSW(16,%r15),0(%r12)	# move user PSW to stack
-	stmg	%r0,%r10,SP_R0(%r15)	# store gprs %r0-%r10 to kernel stack
-	la	%r1,__LC_SAVE_AREA
-	chi	%r12,__LC_SVC_OLD_PSW
-	je	0f
-	chi	%r12,__LC_PGM_OLD_PSW
-	je	0f
-	la	%r1,__LC_SAVE_AREA+40
-0:	mvc	SP_R11(40,%r15),0(%r1)	# move %r11-%r15 to stack
-	mvc	SP_ARGS(8,%r15),__LC_LAST_BREAK
-	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # clear back chain
-	la	%r2,SP_PTREGS(%r15)	# load pt_regs
+	lg	%r11,__LC_PANIC_STACK	# change to panic stack
+	aghi	%r11,-__PT_SIZE		# create pt_regs
+	stmg	%r0,%r7,__PT_R0(%r11)
+	stmg	%r8,%r9,__PT_PSW(%r11)
+	mvc	__PT_R8(64,%r11),0(%r14)
+	stg	%r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
+	lgr	%r15,%r11
+	aghi	%r15,-STACK_FRAME_OVERHEAD
+	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+	lgr	%r2,%r11		# pass pointer to pt_regs
 	jg	kernel_stack_overflow
 #endif
 
-cleanup_table_system_call:
-	.quad	system_call, sysc_do_svc
-cleanup_table_sysc_tif:
-	.quad	sysc_tif, sysc_restore
-cleanup_table_sysc_restore:
-	.quad	sysc_restore, sysc_done
-cleanup_table_io_tif:
-	.quad	io_tif, io_restore
-cleanup_table_io_restore:
-	.quad	io_restore, io_done
+	.align	8
+cleanup_table:
+	.quad	system_call
+	.quad	sysc_do_svc
+	.quad	sysc_tif
+	.quad	sysc_restore
+	.quad	sysc_done
+	.quad	io_tif
+	.quad	io_restore
+	.quad	io_done
 
 cleanup_critical:
-	clc	8(8,%r12),BASED(cleanup_table_system_call)
+	clg	%r9,BASED(cleanup_table)	# system_call
 	jl	0f
-	clc	8(8,%r12),BASED(cleanup_table_system_call+8)
+	clg	%r9,BASED(cleanup_table+8)	# sysc_do_svc
 	jl	cleanup_system_call
-0:
-	clc	8(8,%r12),BASED(cleanup_table_sysc_tif)
+	clg	%r9,BASED(cleanup_table+16)	# sysc_tif
 	jl	0f
-	clc	8(8,%r12),BASED(cleanup_table_sysc_tif+8)
+	clg	%r9,BASED(cleanup_table+24)	# sysc_restore
 	jl	cleanup_sysc_tif
-0:
-	clc	8(8,%r12),BASED(cleanup_table_sysc_restore)
-	jl	0f
-	clc	8(8,%r12),BASED(cleanup_table_sysc_restore+8)
+	clg	%r9,BASED(cleanup_table+32)	# sysc_done
 	jl	cleanup_sysc_restore
-0:
-	clc	8(8,%r12),BASED(cleanup_table_io_tif)
+	clg	%r9,BASED(cleanup_table+40)	# io_tif
 	jl	0f
-	clc	8(8,%r12),BASED(cleanup_table_io_tif+8)
+	clg	%r9,BASED(cleanup_table+48)	# io_restore
 	jl	cleanup_io_tif
-0:
-	clc	8(8,%r12),BASED(cleanup_table_io_restore)
-	jl	0f
-	clc	8(8,%r12),BASED(cleanup_table_io_restore+8)
+	clg	%r9,BASED(cleanup_table+56)	# io_done
 	jl	cleanup_io_restore
-0:
-	br	%r14
+0:	br	%r14
+
 
 cleanup_system_call:
-	mvc	__LC_RETURN_PSW(16),0(%r12)
-	clc	__LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+8)
+	# check if stpt has been executed
+	clg	%r9,BASED(cleanup_system_call_insn)
 	jh	0f
-	mvc	__LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
-	cghi	%r12,__LC_MCK_OLD_PSW
-	je	0f
 	mvc	__LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
-0:	cghi	%r12,__LC_MCK_OLD_PSW
-	la	%r12,__LC_SAVE_AREA+80
+	cghi	%r11,__LC_SAVE_AREA_ASYNC
 	je	0f
-	la	%r12,__LC_SAVE_AREA+40
-0:	clc	__LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+16)
-	jhe	cleanup_vtime
-	clc	__LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn)
+	mvc	__LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
+0:	# check if stmg has been executed
+	clg	%r9,BASED(cleanup_system_call_insn+8)
 	jh	0f
-	mvc	__LC_SAVE_AREA(40),0(%r12)
-0:	lg	%r15,__LC_KERNEL_STACK	# problem state -> load ksp
-	aghi	%r15,-SP_SIZE		# make room for registers & psw
-	stg	%r15,32(%r12)
-	stg	%r11,0(%r12)
-	CREATE_STACK_FRAME __LC_SAVE_AREA
-	mvc	8(8,%r12),__LC_THREAD_INFO
-	lg	%r12,__LC_THREAD_INFO
-	mvc	SP_PSW(16,%r15),__LC_SVC_OLD_PSW
-	mvc	SP_SVC_CODE(4,%r15),__LC_SVC_ILC
-	oi	__TI_flags+7(%r12),_TIF_SYSCALL
-cleanup_vtime:
-	clc	__LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+24)
-	jhe	cleanup_stime
-	UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
-cleanup_stime:
-	clc	__LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+32)
-	jh	cleanup_update
-	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
-cleanup_update:
+	mvc	__LC_SAVE_AREA_SYNC(64),0(%r11)
+0:	# check if base register setup + TIF bit load has been done
+	clg	%r9,BASED(cleanup_system_call_insn+16)
+	jhe	0f
+	# set up saved registers r10 and r12
+	stg	%r10,16(%r11)		# r10 last break
+	stg	%r12,32(%r11)		# r12 thread-info pointer
+0:	# check if the user time update has been done
+	clg	%r9,BASED(cleanup_system_call_insn+24)
+	jh	0f
+	lg	%r15,__LC_EXIT_TIMER
+	slg	%r15,__LC_SYNC_ENTER_TIMER
+	alg	%r15,__LC_USER_TIMER
+	stg	%r15,__LC_USER_TIMER
+0:	# check if the system time update has been done
+	clg	%r9,BASED(cleanup_system_call_insn+32)
+	jh	0f
+	lg	%r15,__LC_LAST_UPDATE_TIMER
+	slg	%r15,__LC_EXIT_TIMER
+	alg	%r15,__LC_SYSTEM_TIMER
+	stg	%r15,__LC_SYSTEM_TIMER
+0:	# update accounting time stamp
 	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
-	srag	%r12,%r11,23
-	lg	%r12,__LC_THREAD_INFO
+	# do LAST_BREAK
+	lg	%r9,16(%r11)
+	srag	%r9,%r9,23
 	jz	0f
-	stg	%r11,__TI_last_break(%r12)
-0:	mvc	__LC_RETURN_PSW+8(8),BASED(cleanup_table_system_call+8)
-	la	%r12,__LC_RETURN_PSW
+	mvc	__TI_last_break(8,%r12),16(%r11)
+0:	# set up saved register r11
+	lg	%r15,__LC_KERNEL_STACK
+	aghi	%r15,-__PT_SIZE
+	stg	%r15,24(%r11)		# r11 pt_regs pointer
+	# fill pt_regs
+	mvc	__PT_R8(64,%r15),__LC_SAVE_AREA_SYNC
+	stmg	%r0,%r7,__PT_R0(%r15)
+	mvc	__PT_PSW(16,%r15),__LC_SVC_OLD_PSW
+	mvc	__PT_INT_CODE(4,%r15),__LC_SVC_ILC
+	# setup saved register r15
+	aghi	%r15,-STACK_FRAME_OVERHEAD
+	stg	%r15,56(%r11)		# r15 stack pointer
+	# set new psw address and exit
+	larl	%r9,sysc_do_svc
 	br	%r14
 cleanup_system_call_insn:
-	.quad	sysc_saveall
 	.quad	system_call
-	.quad	sysc_vtime
-	.quad	sysc_stime
-	.quad	sysc_update
+	.quad	sysc_stmg
+	.quad	sysc_per
+	.quad	sysc_vtime+18
+	.quad	sysc_vtime+42
 
 cleanup_sysc_tif:
-	mvc	__LC_RETURN_PSW(8),0(%r12)
-	mvc	__LC_RETURN_PSW+8(8),BASED(cleanup_table_sysc_tif)
-	la	%r12,__LC_RETURN_PSW
+	larl	%r9,sysc_tif
 	br	%r14
 
 cleanup_sysc_restore:
-	clc	8(8,%r12),BASED(cleanup_sysc_restore_insn)
-	je	2f
-	clc	8(8,%r12),BASED(cleanup_sysc_restore_insn+8)
-	jhe	0f
-	mvc	__LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
-	cghi	%r12,__LC_MCK_OLD_PSW
+	clg	%r9,BASED(cleanup_sysc_restore_insn)
 	je	0f
-	mvc	__LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
-0:	mvc	__LC_RETURN_PSW(16),SP_PSW(%r15)
-	cghi	%r12,__LC_MCK_OLD_PSW
-	la	%r12,__LC_SAVE_AREA+80
-	je	1f
-	la	%r12,__LC_SAVE_AREA+40
-1:	mvc	0(40,%r12),SP_R11(%r15)
-	lmg	%r0,%r10,SP_R0(%r15)
-	lg	%r15,SP_R15(%r15)
-2:	la	%r12,__LC_RETURN_PSW
+	lg	%r9,24(%r11)		# get saved pointer to pt_regs
+	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r9)
+	mvc	0(64,%r11),__PT_R8(%r9)
+	lmg	%r0,%r7,__PT_R0(%r9)
+0:	lmg	%r8,%r9,__LC_RETURN_PSW
 	br	%r14
 cleanup_sysc_restore_insn:
 	.quad	sysc_done - 4
-	.quad	sysc_done - 16
 
 cleanup_io_tif:
-	mvc	__LC_RETURN_PSW(8),0(%r12)
-	mvc	__LC_RETURN_PSW+8(8),BASED(cleanup_table_io_tif)
-	la	%r12,__LC_RETURN_PSW
+	larl	%r9,io_tif
 	br	%r14
 
 cleanup_io_restore:
-	clc	8(8,%r12),BASED(cleanup_io_restore_insn)
-	je	1f
-	clc	8(8,%r12),BASED(cleanup_io_restore_insn+8)
-	jhe	0f
-	mvc	__LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
-0:	mvc	__LC_RETURN_PSW(16),SP_PSW(%r15)
-	mvc	__LC_SAVE_AREA+80(40),SP_R11(%r15)
-	lmg	%r0,%r10,SP_R0(%r15)
-	lg	%r15,SP_R15(%r15)
-1:	la	%r12,__LC_RETURN_PSW
+	clg	%r9,BASED(cleanup_io_restore_insn)
+	je	0f
+	lg	%r9,24(%r11)		# get saved r11 pointer to pt_regs
+	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r9)
+	ni	__LC_RETURN_PSW+1,0xfd	# clear wait state bit
+	mvc	0(64,%r11),__PT_R8(%r9)
+	lmg	%r0,%r7,__PT_R0(%r9)
+0:	lmg	%r8,%r9,__LC_RETURN_PSW
 	br	%r14
 cleanup_io_restore_insn:
 	.quad	io_done - 4
-	.quad	io_done - 16
 
 /*
  * Integer constants
  */
-		.align	4
+	.align	8
 .Lcritical_start:
-		.quad	__critical_start
-.Lcritical_end:
-		.quad	__critical_end
+	.quad	__critical_start
+.Lcritical_length:
+	.quad	__critical_end - __critical_start
+
 
 #if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
 /*
@@ -1054,6 +943,7 @@
 	stmg	%r6,%r14,__SF_GPRS(%r15)	# save kernel registers
 	stg	%r2,__SF_EMPTY(%r15)		# save control block pointer
 	stg	%r3,__SF_EMPTY+8(%r15)		# save guest register save area
+	xc	__SF_EMPTY+16(8,%r15),__SF_EMPTY+16(%r15) # host id == 0
 	lmg	%r0,%r13,0(%r3)			# load guest gprs 0-13
 	lg	%r14,__LC_THREAD_INFO		# pointer thread_info struct
 	oi	__TI_flags+6(%r14),_TIF_SIE>>8
@@ -1070,7 +960,7 @@
 	SPP	__SF_EMPTY(%r15)		# set guest id
 	sie	0(%r14)
 sie_done:
-	SPP	__LC_CMF_HPP			# set host id
+	SPP	__SF_EMPTY+16(%r15)		# set host id
 	lg	%r14,__LC_THREAD_INFO		# pointer thread_info struct
 sie_exit:
 	lctlg	%c1,%c1,__LC_USER_ASCE		# load primary asce
@@ -1093,8 +983,10 @@
 	.align	8
 .Lsie_loop:
 	.quad	sie_loop
-.Lsie_done:
-	.quad	sie_done
+.Lsie_length:
+	.quad	sie_done - sie_loop
+.Lhost_id:
+	.quad	0
 
 	.section __ex_table,"a"
 	.quad	sie_loop,sie_fault
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index 900068d..c27a072 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -329,8 +329,8 @@
 #
 # reset files in VM reader
 #
-	stidp	__LC_SAVE_AREA		# store cpuid
-	tm	__LC_SAVE_AREA,0xff	# running VM ?
+	stidp	__LC_SAVE_AREA_SYNC	# store cpuid
+	tm	__LC_SAVE_AREA_SYNC,0xff# running VM ?
 	bno	.Lnoreset
 	la	%r2,.Lreset
 	lhi	%r3,26
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index 3cd0f25..47b168f 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -208,6 +208,7 @@
 void arch_crash_save_vmcoreinfo(void)
 {
 	VMCOREINFO_SYMBOL(lowcore_ptr);
+	VMCOREINFO_SYMBOL(high_memory);
 	VMCOREINFO_LENGTH(lowcore_ptr, NR_CPUS);
 }
 
diff --git a/arch/s390/kernel/mem_detect.c b/arch/s390/kernel/mem_detect.c
index 19b4568..22d502e 100644
--- a/arch/s390/kernel/mem_detect.c
+++ b/arch/s390/kernel/mem_detect.c
@@ -64,70 +64,82 @@
 EXPORT_SYMBOL(detect_memory_layout);
 
 /*
+ * Move memory chunks array from index "from" to index "to"
+ */
+static void mem_chunk_move(struct mem_chunk chunk[], int to, int from)
+{
+	int cnt = MEMORY_CHUNKS - to;
+
+	memmove(&chunk[to], &chunk[from], cnt * sizeof(struct mem_chunk));
+}
+
+/*
+ * Initialize memory chunk
+ */
+static void mem_chunk_init(struct mem_chunk *chunk, unsigned long addr,
+			   unsigned long size, int type)
+{
+	chunk->type = type;
+	chunk->addr = addr;
+	chunk->size = size;
+}
+
+/*
  * Create memory hole with given address, size, and type
  */
-void create_mem_hole(struct mem_chunk chunks[], unsigned long addr,
+void create_mem_hole(struct mem_chunk chunk[], unsigned long addr,
 		     unsigned long size, int type)
 {
-	unsigned long start, end, new_size;
-	int i;
+	unsigned long lh_start, lh_end, lh_size, ch_start, ch_end, ch_size;
+	int i, ch_type;
 
 	for (i = 0; i < MEMORY_CHUNKS; i++) {
-		if (chunks[i].size == 0)
+		if (chunk[i].size == 0)
 			continue;
-		if (addr + size < chunks[i].addr)
-			continue;
-		if (addr >= chunks[i].addr + chunks[i].size)
-			continue;
-		start = max(addr, chunks[i].addr);
-		end = min(addr + size, chunks[i].addr + chunks[i].size);
-		new_size = end - start;
-		if (new_size == 0)
-			continue;
-		if (start == chunks[i].addr &&
-		    end == chunks[i].addr + chunks[i].size) {
-			/* Remove chunk */
-			chunks[i].type = type;
-		} else if (start == chunks[i].addr) {
-			/* Make chunk smaller at start */
-			if (i >= MEMORY_CHUNKS - 1)
-				panic("Unable to create memory hole");
-			memmove(&chunks[i + 1], &chunks[i],
-				sizeof(struct mem_chunk) *
-				(MEMORY_CHUNKS - (i + 1)));
-			chunks[i + 1].addr = chunks[i].addr + new_size;
-			chunks[i + 1].size = chunks[i].size - new_size;
-			chunks[i].size = new_size;
-			chunks[i].type = type;
+
+		/* Define chunk properties */
+		ch_start = chunk[i].addr;
+		ch_size = chunk[i].size;
+		ch_end = ch_start + ch_size - 1;
+		ch_type = chunk[i].type;
+
+		/* Is memory chunk hit by memory hole? */
+		if (addr + size <= ch_start)
+			continue; /* No: memory hole in front of chunk */
+		if (addr > ch_end)
+			continue; /* No: memory hole after chunk */
+
+		/* Yes: Define local hole properties */
+		lh_start = max(addr, chunk[i].addr);
+		lh_end = min(addr + size - 1, ch_end);
+		lh_size = lh_end - lh_start + 1;
+
+		if (lh_start == ch_start && lh_end == ch_end) {
+			/* Hole covers complete memory chunk */
+			mem_chunk_init(&chunk[i], lh_start, lh_size, type);
+		} else if (lh_end == ch_end) {
+			/* Hole starts in memory chunk and convers chunk end */
+			mem_chunk_move(chunk, i + 1, i);
+			mem_chunk_init(&chunk[i], ch_start, ch_size - lh_size,
+				       ch_type);
+			mem_chunk_init(&chunk[i + 1], lh_start, lh_size, type);
 			i += 1;
-		} else if (end == chunks[i].addr + chunks[i].size) {
-			/* Make chunk smaller at end */
-			if (i >= MEMORY_CHUNKS - 1)
-				panic("Unable to create memory hole");
-			memmove(&chunks[i + 1], &chunks[i],
-				sizeof(struct mem_chunk) *
-				(MEMORY_CHUNKS - (i + 1)));
-			chunks[i + 1].addr = start;
-			chunks[i + 1].size = new_size;
-			chunks[i + 1].type = type;
-			chunks[i].size -= new_size;
-			i += 1;
+		} else if (lh_start == ch_start) {
+			/* Hole ends in memory chunk */
+			mem_chunk_move(chunk, i + 1, i);
+			mem_chunk_init(&chunk[i], lh_start, lh_size, type);
+			mem_chunk_init(&chunk[i + 1], lh_end + 1,
+				       ch_size - lh_size, ch_type);
+			break;
 		} else {
-			/* Create memory hole */
-			if (i >= MEMORY_CHUNKS - 2)
-				panic("Unable to create memory hole");
-			memmove(&chunks[i + 2], &chunks[i],
-				sizeof(struct mem_chunk) *
-				(MEMORY_CHUNKS - (i + 2)));
-			chunks[i + 1].addr = addr;
-			chunks[i + 1].size = size;
-			chunks[i + 1].type = type;
-			chunks[i + 2].addr = addr + size;
-			chunks[i + 2].size =
-				chunks[i].addr + chunks[i].size - (addr + size);
-			chunks[i + 2].type = chunks[i].type;
-			chunks[i].size = addr - chunks[i].addr;
-			i += 2;
+			/* Hole splits memory chunk */
+			mem_chunk_move(chunk, i + 2, i);
+			mem_chunk_init(&chunk[i], ch_start,
+				       lh_start - ch_start, ch_type);
+			mem_chunk_init(&chunk[i + 1], lh_start, lh_size, type);
+			mem_chunk_init(&chunk[i + 2], lh_end + 1,
+				       ch_end - lh_end, ch_type);
+			break;
 		}
 	}
 }
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 9451b21..3201ae4 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -91,10 +91,12 @@
 void cpu_idle(void)
 {
 	for (;;) {
-		tick_nohz_stop_sched_tick(1);
+		tick_nohz_idle_enter();
+		rcu_idle_enter();
 		while (!need_resched())
 			default_idle();
-		tick_nohz_restart_sched_tick();
+		rcu_idle_exit();
+		tick_nohz_idle_exit();
 		preempt_enable_no_resched();
 		schedule();
 		preempt_disable();
diff --git a/arch/s390/kernel/reipl64.S b/arch/s390/kernel/reipl64.S
index 732a793..36b3265 100644
--- a/arch/s390/kernel/reipl64.S
+++ b/arch/s390/kernel/reipl64.S
@@ -17,11 +17,11 @@
 #
 ENTRY(store_status)
 	/* Save register one and load save area base */
-	stg	%r1,__LC_SAVE_AREA+120(%r0)
+	stg	%r1,__LC_SAVE_AREA_RESTART
 	lghi	%r1,SAVE_AREA_BASE
 	/* General purpose registers */
 	stmg	%r0,%r15,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
-	lg	%r2,__LC_SAVE_AREA+120(%r0)
+	lg	%r2,__LC_SAVE_AREA_RESTART
 	stg	%r2,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE+8(%r1)
 	/* Control registers */
 	stctg	%c0,%c15,__LC_CREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index e54c4ff..354de07 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -21,6 +21,7 @@
 #include <linux/module.h>
 #include <linux/sched.h>
 #include <linux/kernel.h>
+#include <linux/memblock.h>
 #include <linux/mm.h>
 #include <linux/stddef.h>
 #include <linux/unistd.h>
@@ -94,6 +95,15 @@
 int __initdata memory_end_set;
 unsigned long __initdata memory_end;
 
+unsigned long VMALLOC_START;
+EXPORT_SYMBOL(VMALLOC_START);
+
+unsigned long VMALLOC_END;
+EXPORT_SYMBOL(VMALLOC_END);
+
+struct page *vmemmap;
+EXPORT_SYMBOL(vmemmap);
+
 /* An array with a pointer to the lowcore of every CPU. */
 struct _lowcore *lowcore_ptr[NR_CPUS];
 EXPORT_SYMBOL(lowcore_ptr);
@@ -277,6 +287,15 @@
 }
 early_param("mem", early_parse_mem);
 
+static int __init parse_vmalloc(char *arg)
+{
+	if (!arg)
+		return -EINVAL;
+	VMALLOC_END = (memparse(arg, &arg) + PAGE_SIZE - 1) & PAGE_MASK;
+	return 0;
+}
+early_param("vmalloc", parse_vmalloc);
+
 unsigned int user_mode = HOME_SPACE_MODE;
 EXPORT_SYMBOL_GPL(user_mode);
 
@@ -382,7 +401,6 @@
 		__ctl_set_bit(14, 29);
 	}
 #else
-	lc->cmf_hpp = -1ULL;
 	lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0];
 #endif
 	lc->sync_enter_timer = S390_lowcore.sync_enter_timer;
@@ -478,8 +496,7 @@
 
 static void __init setup_memory_end(void)
 {
-	unsigned long memory_size;
-	unsigned long max_mem;
+	unsigned long vmax, vmalloc_size, tmp;
 	int i;
 
 
@@ -489,12 +506,9 @@
 		memory_end_set = 1;
 	}
 #endif
-	memory_size = 0;
+	real_memory_size = 0;
 	memory_end &= PAGE_MASK;
 
-	max_mem = memory_end ? min(VMEM_MAX_PHYS, memory_end) : VMEM_MAX_PHYS;
-	memory_end = min(max_mem, memory_end);
-
 	/*
 	 * Make sure all chunks are MAX_ORDER aligned so we don't need the
 	 * extra checks that HOLES_IN_ZONE would require.
@@ -514,23 +528,48 @@
 			chunk->addr = start;
 			chunk->size = end - start;
 		}
+		real_memory_size = max(real_memory_size,
+				       chunk->addr + chunk->size);
 	}
 
+	/* Choose kernel address space layout: 2, 3, or 4 levels. */
+#ifdef CONFIG_64BIT
+	vmalloc_size = VMALLOC_END ?: 128UL << 30;
+	tmp = (memory_end ?: real_memory_size) / PAGE_SIZE;
+	tmp = tmp * (sizeof(struct page) + PAGE_SIZE) + vmalloc_size;
+	if (tmp <= (1UL << 42))
+		vmax = 1UL << 42;	/* 3-level kernel page table */
+	else
+		vmax = 1UL << 53;	/* 4-level kernel page table */
+#else
+	vmalloc_size = VMALLOC_END ?: 96UL << 20;
+	vmax = 1UL << 31;		/* 2-level kernel page table */
+#endif
+	/* vmalloc area is at the end of the kernel address space. */
+	VMALLOC_END = vmax;
+	VMALLOC_START = vmax - vmalloc_size;
+
+	/* Split remaining virtual space between 1:1 mapping & vmemmap array */
+	tmp = VMALLOC_START / (PAGE_SIZE + sizeof(struct page));
+	tmp = VMALLOC_START - tmp * sizeof(struct page);
+	tmp &= ~((vmax >> 11) - 1);	/* align to page table level */
+	tmp = min(tmp, 1UL << MAX_PHYSMEM_BITS);
+	vmemmap = (struct page *) tmp;
+
+	/* Take care that memory_end is set and <= vmemmap */
+	memory_end = min(memory_end ?: real_memory_size, tmp);
+
+	/* Fixup memory chunk array to fit into 0..memory_end */
 	for (i = 0; i < MEMORY_CHUNKS; i++) {
 		struct mem_chunk *chunk = &memory_chunk[i];
 
-		real_memory_size = max(real_memory_size,
-				       chunk->addr + chunk->size);
-		if (chunk->addr >= max_mem) {
+		if (chunk->addr >= memory_end) {
 			memset(chunk, 0, sizeof(*chunk));
 			continue;
 		}
-		if (chunk->addr + chunk->size > max_mem)
-			chunk->size = max_mem - chunk->addr;
-		memory_size = max(memory_size, chunk->addr + chunk->size);
+		if (chunk->addr + chunk->size > memory_end)
+			chunk->size = memory_end - chunk->addr;
 	}
-	if (!memory_end)
-		memory_end = memory_size;
 }
 
 void *restart_stack __attribute__((__section__(".data")));
@@ -654,7 +693,6 @@
 static void __init reserve_kdump_bootmem(unsigned long addr, unsigned long size,
 					 int type)
 {
-
 	create_mem_hole(memory_chunk, addr, size, type);
 }
 
@@ -820,7 +858,8 @@
 		end_chunk = min(end_chunk, end_pfn);
 		if (start_chunk >= end_chunk)
 			continue;
-		add_active_range(0, start_chunk, end_chunk);
+		memblock_add_node(PFN_PHYS(start_chunk),
+				  PFN_PHYS(end_chunk - start_chunk), 0);
 		pfn = max(start_chunk, start_pfn);
 		for (; pfn < end_chunk; pfn++)
 			page_set_storage_key(PFN_PHYS(pfn),
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index 7f6f9f3..a8ba840 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -302,9 +302,13 @@
 
 	/* We forgot to include these in the sigcontext.
 	   To avoid breaking binary compatibility, they are passed as args. */
-	regs->gprs[4] = current->thread.trap_no;
-	regs->gprs[5] = current->thread.prot_addr;
-	regs->gprs[6] = task_thread_info(current)->last_break;
+	if (sig == SIGSEGV || sig == SIGBUS || sig == SIGILL ||
+	    sig == SIGTRAP || sig == SIGFPE) {
+		/* set extra registers only for synchronous signals */
+		regs->gprs[4] = regs->int_code & 127;
+		regs->gprs[5] = regs->int_parm_long;
+		regs->gprs[6] = task_thread_info(current)->last_break;
+	}
 
 	/* Place signal number on stack to allow backtrace from handler.  */
 	if (__put_user(regs->gprs[2], (int __user *) &frame->signo))
@@ -434,13 +438,13 @@
 	 * call information.
 	 */
 	current_thread_info()->system_call =
-		test_thread_flag(TIF_SYSCALL) ? regs->svc_code : 0;
+		test_thread_flag(TIF_SYSCALL) ? regs->int_code : 0;
 	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
 
 	if (signr > 0) {
 		/* Whee!  Actually deliver the signal.  */
 		if (current_thread_info()->system_call) {
-			regs->svc_code = current_thread_info()->system_call;
+			regs->int_code = current_thread_info()->system_call;
 			/* Check for system call restarting. */
 			switch (regs->gprs[2]) {
 			case -ERESTART_RESTARTBLOCK:
@@ -457,7 +461,7 @@
 				regs->gprs[2] = regs->orig_gpr2;
 				regs->psw.addr =
 					__rewind_psw(regs->psw,
-						     regs->svc_code >> 16);
+						     regs->int_code >> 16);
 				break;
 			}
 		}
@@ -488,11 +492,11 @@
 	/* No handlers present - check for system call restart */
 	clear_thread_flag(TIF_SYSCALL);
 	if (current_thread_info()->system_call) {
-		regs->svc_code = current_thread_info()->system_call;
+		regs->int_code = current_thread_info()->system_call;
 		switch (regs->gprs[2]) {
 		case -ERESTART_RESTARTBLOCK:
 			/* Restart with sys_restart_syscall */
-			regs->svc_code = __NR_restart_syscall;
+			regs->int_code = __NR_restart_syscall;
 		/* fallthrough */
 		case -ERESTARTNOHAND:
 		case -ERESTARTSYS:
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 3ea8728..2398ce6 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -69,9 +69,7 @@
 };
 
 DEFINE_MUTEX(smp_cpu_state_mutex);
-int smp_cpu_polarization[NR_CPUS];
 static int smp_cpu_state[NR_CPUS];
-static int cpu_management;
 
 static DEFINE_PER_CPU(struct cpu, cpu_devices);
 
@@ -149,29 +147,59 @@
 	sp -= sizeof(struct pt_regs);
 	regs = (struct pt_regs *) sp;
 	memcpy(&regs->gprs, &current_lc->gpregs_save_area, sizeof(regs->gprs));
-	regs->psw = lc->psw_save_area;
+	regs->psw = current_lc->psw_save_area;
 	sp -= STACK_FRAME_OVERHEAD;
 	sf = (struct stack_frame *) sp;
-	sf->back_chain = regs->gprs[15];
+	sf->back_chain = 0;
 	smp_switch_to_cpu(func, data, sp, stap(), __cpu_logical_map[0]);
 }
 
+static void smp_stop_cpu(void)
+{
+	while (sigp(smp_processor_id(), sigp_stop) == sigp_busy)
+		cpu_relax();
+}
+
 void smp_send_stop(void)
 {
-	int cpu, rc;
+	cpumask_t cpumask;
+	int cpu;
+	u64 end;
 
 	/* Disable all interrupts/machine checks */
 	__load_psw_mask(psw_kernel_bits | PSW_MASK_DAT);
 	trace_hardirqs_off();
 
-	/* stop all processors */
-	for_each_online_cpu(cpu) {
-		if (cpu == smp_processor_id())
-			continue;
-		do {
-			rc = sigp(cpu, sigp_stop);
-		} while (rc == sigp_busy);
+	cpumask_copy(&cpumask, cpu_online_mask);
+	cpumask_clear_cpu(smp_processor_id(), &cpumask);
 
+	if (oops_in_progress) {
+		/*
+		 * Give the other cpus the opportunity to complete
+		 * outstanding interrupts before stopping them.
+		 */
+		end = get_clock() + (1000000UL << 12);
+		for_each_cpu(cpu, &cpumask) {
+			set_bit(ec_stop_cpu, (unsigned long *)
+				&lowcore_ptr[cpu]->ext_call_fast);
+			while (sigp(cpu, sigp_emergency_signal) == sigp_busy &&
+			       get_clock() < end)
+				cpu_relax();
+		}
+		while (get_clock() < end) {
+			for_each_cpu(cpu, &cpumask)
+				if (cpu_stopped(cpu))
+					cpumask_clear_cpu(cpu, &cpumask);
+			if (cpumask_empty(&cpumask))
+				break;
+			cpu_relax();
+		}
+	}
+
+	/* stop all processors */
+	for_each_cpu(cpu, &cpumask) {
+		while (sigp(cpu, sigp_stop) == sigp_busy)
+			cpu_relax();
 		while (!cpu_stopped(cpu))
 			cpu_relax();
 	}
@@ -187,7 +215,7 @@
 {
 	unsigned long bits;
 
-	if (ext_int_code == 0x1202)
+	if ((ext_int_code & 0xffff) == 0x1202)
 		kstat_cpu(smp_processor_id()).irqs[EXTINT_EXC]++;
 	else
 		kstat_cpu(smp_processor_id()).irqs[EXTINT_EMS]++;
@@ -196,6 +224,9 @@
 	 */
 	bits = xchg(&S390_lowcore.ext_call_fast, 0);
 
+	if (test_bit(ec_stop_cpu, &bits))
+		smp_stop_cpu();
+
 	if (test_bit(ec_schedule, &bits))
 		scheduler_ipi();
 
@@ -204,6 +235,7 @@
 
 	if (test_bit(ec_call_function_single, &bits))
 		generic_smp_call_function_single_interrupt();
+
 }
 
 /*
@@ -369,7 +401,7 @@
 		if (cpu_known(cpu_id))
 			continue;
 		__cpu_logical_map[logical_cpu] = cpu_id;
-		smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN;
+		cpu_set_polarization(logical_cpu, POLARIZATION_UNKNOWN);
 		if (!cpu_stopped(logical_cpu))
 			continue;
 		set_cpu_present(logical_cpu, true);
@@ -403,7 +435,7 @@
 		if (cpu_known(cpu_id))
 			continue;
 		__cpu_logical_map[logical_cpu] = cpu_id;
-		smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN;
+		cpu_set_polarization(logical_cpu, POLARIZATION_UNKNOWN);
 		set_cpu_present(logical_cpu, true);
 		if (cpu >= info->configured)
 			smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY;
@@ -656,7 +688,7 @@
 				     - sizeof(struct stack_frame));
 	memset(sf, 0, sizeof(struct stack_frame));
 	sf->gprs[9] = (unsigned long) sf;
-	cpu_lowcore->save_area[15] = (unsigned long) sf;
+	cpu_lowcore->gpregs_save_area[15] = (unsigned long) sf;
 	__ctl_store(cpu_lowcore->cregs_save_area, 0, 15);
 	atomic_inc(&init_mm.context.attach_count);
 	asm volatile(
@@ -806,7 +838,7 @@
 	S390_lowcore.percpu_offset = __per_cpu_offset[0];
 	current_set[0] = current;
 	smp_cpu_state[0] = CPU_STATE_CONFIGURED;
-	smp_cpu_polarization[0] = POLARIZATION_UNKNWN;
+	cpu_set_polarization(0, POLARIZATION_UNKNOWN);
 }
 
 void __init smp_cpus_done(unsigned int max_cpus)
@@ -831,8 +863,8 @@
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
-static ssize_t cpu_configure_show(struct sys_device *dev,
-				struct sysdev_attribute *attr, char *buf)
+static ssize_t cpu_configure_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
 {
 	ssize_t count;
 
@@ -842,8 +874,8 @@
 	return count;
 }
 
-static ssize_t cpu_configure_store(struct sys_device *dev,
-				  struct sysdev_attribute *attr,
+static ssize_t cpu_configure_store(struct device *dev,
+				  struct device_attribute *attr,
 				  const char *buf, size_t count)
 {
 	int cpu = dev->id;
@@ -868,7 +900,8 @@
 			rc = sclp_cpu_deconfigure(__cpu_logical_map[cpu]);
 			if (!rc) {
 				smp_cpu_state[cpu] = CPU_STATE_STANDBY;
-				smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
+				cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
+				topology_expect_change();
 			}
 		}
 		break;
@@ -877,7 +910,8 @@
 			rc = sclp_cpu_configure(__cpu_logical_map[cpu]);
 			if (!rc) {
 				smp_cpu_state[cpu] = CPU_STATE_CONFIGURED;
-				smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
+				cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
+				topology_expect_change();
 			}
 		}
 		break;
@@ -889,52 +923,21 @@
 	put_online_cpus();
 	return rc ? rc : count;
 }
-static SYSDEV_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
+static DEVICE_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
 #endif /* CONFIG_HOTPLUG_CPU */
 
-static ssize_t cpu_polarization_show(struct sys_device *dev,
-				     struct sysdev_attribute *attr, char *buf)
-{
-	int cpu = dev->id;
-	ssize_t count;
-
-	mutex_lock(&smp_cpu_state_mutex);
-	switch (smp_cpu_polarization[cpu]) {
-	case POLARIZATION_HRZ:
-		count = sprintf(buf, "horizontal\n");
-		break;
-	case POLARIZATION_VL:
-		count = sprintf(buf, "vertical:low\n");
-		break;
-	case POLARIZATION_VM:
-		count = sprintf(buf, "vertical:medium\n");
-		break;
-	case POLARIZATION_VH:
-		count = sprintf(buf, "vertical:high\n");
-		break;
-	default:
-		count = sprintf(buf, "unknown\n");
-		break;
-	}
-	mutex_unlock(&smp_cpu_state_mutex);
-	return count;
-}
-static SYSDEV_ATTR(polarization, 0444, cpu_polarization_show, NULL);
-
-static ssize_t show_cpu_address(struct sys_device *dev,
-				struct sysdev_attribute *attr, char *buf)
+static ssize_t show_cpu_address(struct device *dev,
+				struct device_attribute *attr, char *buf)
 {
 	return sprintf(buf, "%d\n", __cpu_logical_map[dev->id]);
 }
-static SYSDEV_ATTR(address, 0444, show_cpu_address, NULL);
-
+static DEVICE_ATTR(address, 0444, show_cpu_address, NULL);
 
 static struct attribute *cpu_common_attrs[] = {
 #ifdef CONFIG_HOTPLUG_CPU
-	&attr_configure.attr,
+	&dev_attr_configure.attr,
 #endif
-	&attr_address.attr,
-	&attr_polarization.attr,
+	&dev_attr_address.attr,
 	NULL,
 };
 
@@ -942,8 +945,8 @@
 	.attrs = cpu_common_attrs,
 };
 
-static ssize_t show_capability(struct sys_device *dev,
-				struct sysdev_attribute *attr, char *buf)
+static ssize_t show_capability(struct device *dev,
+				struct device_attribute *attr, char *buf)
 {
 	unsigned int capability;
 	int rc;
@@ -953,10 +956,10 @@
 		return rc;
 	return sprintf(buf, "%u\n", capability);
 }
-static SYSDEV_ATTR(capability, 0444, show_capability, NULL);
+static DEVICE_ATTR(capability, 0444, show_capability, NULL);
 
-static ssize_t show_idle_count(struct sys_device *dev,
-				struct sysdev_attribute *attr, char *buf)
+static ssize_t show_idle_count(struct device *dev,
+				struct device_attribute *attr, char *buf)
 {
 	struct s390_idle_data *idle;
 	unsigned long long idle_count;
@@ -976,10 +979,10 @@
 		goto repeat;
 	return sprintf(buf, "%llu\n", idle_count);
 }
-static SYSDEV_ATTR(idle_count, 0444, show_idle_count, NULL);
+static DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL);
 
-static ssize_t show_idle_time(struct sys_device *dev,
-				struct sysdev_attribute *attr, char *buf)
+static ssize_t show_idle_time(struct device *dev,
+				struct device_attribute *attr, char *buf)
 {
 	struct s390_idle_data *idle;
 	unsigned long long now, idle_time, idle_enter;
@@ -1001,12 +1004,12 @@
 		goto repeat;
 	return sprintf(buf, "%llu\n", idle_time >> 12);
 }
-static SYSDEV_ATTR(idle_time_us, 0444, show_idle_time, NULL);
+static DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL);
 
 static struct attribute *cpu_online_attrs[] = {
-	&attr_capability.attr,
-	&attr_idle_count.attr,
-	&attr_idle_time_us.attr,
+	&dev_attr_capability.attr,
+	&dev_attr_idle_count.attr,
+	&dev_attr_idle_time_us.attr,
 	NULL,
 };
 
@@ -1019,7 +1022,7 @@
 {
 	unsigned int cpu = (unsigned int)(long)hcpu;
 	struct cpu *c = &per_cpu(cpu_devices, cpu);
-	struct sys_device *s = &c->sysdev;
+	struct device *s = &c->dev;
 	struct s390_idle_data *idle;
 	int err = 0;
 
@@ -1045,7 +1048,7 @@
 static int __devinit smp_add_present_cpu(int cpu)
 {
 	struct cpu *c = &per_cpu(cpu_devices, cpu);
-	struct sys_device *s = &c->sysdev;
+	struct device *s = &c->dev;
 	int rc;
 
 	c->hotpluggable = 1;
@@ -1055,11 +1058,20 @@
 	rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group);
 	if (rc)
 		goto out_cpu;
-	if (!cpu_online(cpu))
-		goto out;
-	rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
-	if (!rc)
-		return 0;
+	if (cpu_online(cpu)) {
+		rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
+		if (rc)
+			goto out_online;
+	}
+	rc = topology_cpu_init(c);
+	if (rc)
+		goto out_topology;
+	return 0;
+
+out_topology:
+	if (cpu_online(cpu))
+		sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
+out_online:
 	sysfs_remove_group(&s->kobj, &cpu_common_attr_group);
 out_cpu:
 #ifdef CONFIG_HOTPLUG_CPU
@@ -1098,8 +1110,8 @@
 	return rc;
 }
 
-static ssize_t __ref rescan_store(struct sysdev_class *class,
-				  struct sysdev_class_attribute *attr,
+static ssize_t __ref rescan_store(struct device *dev,
+				  struct device_attribute *attr,
 				  const char *buf,
 				  size_t count)
 {
@@ -1108,64 +1120,19 @@
 	rc = smp_rescan_cpus();
 	return rc ? rc : count;
 }
-static SYSDEV_CLASS_ATTR(rescan, 0200, NULL, rescan_store);
+static DEVICE_ATTR(rescan, 0200, NULL, rescan_store);
 #endif /* CONFIG_HOTPLUG_CPU */
 
-static ssize_t dispatching_show(struct sysdev_class *class,
-				struct sysdev_class_attribute *attr,
-				char *buf)
+static int __init s390_smp_init(void)
 {
-	ssize_t count;
-
-	mutex_lock(&smp_cpu_state_mutex);
-	count = sprintf(buf, "%d\n", cpu_management);
-	mutex_unlock(&smp_cpu_state_mutex);
-	return count;
-}
-
-static ssize_t dispatching_store(struct sysdev_class *dev,
-				 struct sysdev_class_attribute *attr,
-				 const char *buf,
-				 size_t count)
-{
-	int val, rc;
-	char delim;
-
-	if (sscanf(buf, "%d %c", &val, &delim) != 1)
-		return -EINVAL;
-	if (val != 0 && val != 1)
-		return -EINVAL;
-	rc = 0;
-	get_online_cpus();
-	mutex_lock(&smp_cpu_state_mutex);
-	if (cpu_management == val)
-		goto out;
-	rc = topology_set_cpu_management(val);
-	if (!rc)
-		cpu_management = val;
-out:
-	mutex_unlock(&smp_cpu_state_mutex);
-	put_online_cpus();
-	return rc ? rc : count;
-}
-static SYSDEV_CLASS_ATTR(dispatching, 0644, dispatching_show,
-			 dispatching_store);
-
-static int __init topology_init(void)
-{
-	int cpu;
-	int rc;
+	int cpu, rc;
 
 	register_cpu_notifier(&smp_cpu_nb);
-
 #ifdef CONFIG_HOTPLUG_CPU
-	rc = sysdev_class_create_file(&cpu_sysdev_class, &attr_rescan);
+	rc = device_create_file(cpu_subsys.dev_root, &dev_attr_rescan);
 	if (rc)
 		return rc;
 #endif
-	rc = sysdev_class_create_file(&cpu_sysdev_class, &attr_dispatching);
-	if (rc)
-		return rc;
 	for_each_present_cpu(cpu) {
 		rc = smp_add_present_cpu(cpu);
 		if (rc)
@@ -1173,4 +1140,4 @@
 	}
 	return 0;
 }
-subsys_initcall(topology_init);
+subsys_initcall(s390_smp_init);
diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c
index 4760814..78ea194 100644
--- a/arch/s390/kernel/sys_s390.c
+++ b/arch/s390/kernel/sys_s390.c
@@ -60,74 +60,22 @@
 }
 
 /*
- * sys_ipc() is the de-multiplexer for the SysV IPC calls..
- *
- * This is really horribly ugly.
+ * sys_ipc() is the de-multiplexer for the SysV IPC calls.
  */
 SYSCALL_DEFINE5(s390_ipc, uint, call, int, first, unsigned long, second,
 		unsigned long, third, void __user *, ptr)
 {
-        struct ipc_kludge tmp;
-	int ret;
-
-        switch (call) {
-        case SEMOP:
-		return sys_semtimedop(first, (struct sembuf __user *)ptr,
-				       (unsigned)second, NULL);
-	case SEMTIMEDOP:
-		return sys_semtimedop(first, (struct sembuf __user *)ptr,
-				       (unsigned)second,
-				       (const struct timespec __user *) third);
-        case SEMGET:
-                return sys_semget(first, (int)second, third);
-        case SEMCTL: {
-                union semun fourth;
-                if (!ptr)
-                        return -EINVAL;
-                if (get_user(fourth.__pad, (void __user * __user *) ptr))
-                        return -EFAULT;
-                return sys_semctl(first, (int)second, third, fourth);
-        }
-        case MSGSND:
-		return sys_msgsnd (first, (struct msgbuf __user *) ptr,
-                                   (size_t)second, third);
-		break;
-        case MSGRCV:
-                if (!ptr)
-                        return -EINVAL;
-                if (copy_from_user (&tmp, (struct ipc_kludge __user *) ptr,
-                                    sizeof (struct ipc_kludge)))
-                        return -EFAULT;
-                return sys_msgrcv (first, tmp.msgp,
-                                   (size_t)second, tmp.msgtyp, third);
-        case MSGGET:
-                return sys_msgget((key_t)first, (int)second);
-        case MSGCTL:
-                return sys_msgctl(first, (int)second,
-				   (struct msqid_ds __user *)ptr);
-
-	case SHMAT: {
-		ulong raddr;
-		ret = do_shmat(first, (char __user *)ptr,
-				(int)second, &raddr);
-		if (ret)
-			return ret;
-		return put_user (raddr, (ulong __user *) third);
-		break;
-        }
-	case SHMDT:
-		return sys_shmdt ((char __user *)ptr);
-	case SHMGET:
-		return sys_shmget(first, (size_t)second, third);
-	case SHMCTL:
-		return sys_shmctl(first, (int)second,
-                                   (struct shmid_ds __user *) ptr);
-	default:
-		return -ENOSYS;
-
-	}
-
-	return -EINVAL;
+	if (call >> 16)
+		return -EINVAL;
+	/* The s390 sys_ipc variant has only five parameters instead of six
+	 * like the generic variant. The only difference is the handling of
+	 * the SEMTIMEDOP subcall where on s390 the third parameter is used
+	 * as a pointer to a struct timespec where the generic variant uses
+	 * the fifth parameter.
+	 * Therefore we can call the generic variant by simply passing the
+	 * third parameter also as fifth parameter.
+	 */
+	return sys_ipc(call, first, second, third, ptr, third);
 }
 
 #ifdef CONFIG_64BIT
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index ebbfab3..fa02f44 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -27,7 +27,7 @@
 #include <linux/cpu.h>
 #include <linux/stop_machine.h>
 #include <linux/time.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/delay.h>
 #include <linux/init.h>
 #include <linux/smp.h>
@@ -1116,34 +1116,35 @@
 /*
  * Sysfs interface functions
  */
-static struct sysdev_class etr_sysclass = {
-	.name	= "etr",
+static struct bus_type etr_subsys = {
+	.name		= "etr",
+	.dev_name	= "etr",
 };
 
-static struct sys_device etr_port0_dev = {
+static struct device etr_port0_dev = {
 	.id	= 0,
-	.cls	= &etr_sysclass,
+	.bus	= &etr_subsys,
 };
 
-static struct sys_device etr_port1_dev = {
+static struct device etr_port1_dev = {
 	.id	= 1,
-	.cls	= &etr_sysclass,
+	.bus	= &etr_subsys,
 };
 
 /*
- * ETR class attributes
+ * ETR subsys attributes
  */
-static ssize_t etr_stepping_port_show(struct sysdev_class *class,
-					struct sysdev_class_attribute *attr,
+static ssize_t etr_stepping_port_show(struct device *dev,
+					struct device_attribute *attr,
 					char *buf)
 {
 	return sprintf(buf, "%i\n", etr_port0.esw.p);
 }
 
-static SYSDEV_CLASS_ATTR(stepping_port, 0400, etr_stepping_port_show, NULL);
+static DEVICE_ATTR(stepping_port, 0400, etr_stepping_port_show, NULL);
 
-static ssize_t etr_stepping_mode_show(struct sysdev_class *class,
-				      	struct sysdev_class_attribute *attr,
+static ssize_t etr_stepping_mode_show(struct device *dev,
+					struct device_attribute *attr,
 					char *buf)
 {
 	char *mode_str;
@@ -1157,12 +1158,12 @@
 	return sprintf(buf, "%s\n", mode_str);
 }
 
-static SYSDEV_CLASS_ATTR(stepping_mode, 0400, etr_stepping_mode_show, NULL);
+static DEVICE_ATTR(stepping_mode, 0400, etr_stepping_mode_show, NULL);
 
 /*
  * ETR port attributes
  */
-static inline struct etr_aib *etr_aib_from_dev(struct sys_device *dev)
+static inline struct etr_aib *etr_aib_from_dev(struct device *dev)
 {
 	if (dev == &etr_port0_dev)
 		return etr_port0_online ? &etr_port0 : NULL;
@@ -1170,8 +1171,8 @@
 		return etr_port1_online ? &etr_port1 : NULL;
 }
 
-static ssize_t etr_online_show(struct sys_device *dev,
-				struct sysdev_attribute *attr,
+static ssize_t etr_online_show(struct device *dev,
+				struct device_attribute *attr,
 				char *buf)
 {
 	unsigned int online;
@@ -1180,8 +1181,8 @@
 	return sprintf(buf, "%i\n", online);
 }
 
-static ssize_t etr_online_store(struct sys_device *dev,
-				struct sysdev_attribute *attr,
+static ssize_t etr_online_store(struct device *dev,
+				struct device_attribute *attr,
 				const char *buf, size_t count)
 {
 	unsigned int value;
@@ -1218,20 +1219,20 @@
 	return count;
 }
 
-static SYSDEV_ATTR(online, 0600, etr_online_show, etr_online_store);
+static DEVICE_ATTR(online, 0600, etr_online_show, etr_online_store);
 
-static ssize_t etr_stepping_control_show(struct sys_device *dev,
-					struct sysdev_attribute *attr,
+static ssize_t etr_stepping_control_show(struct device *dev,
+					struct device_attribute *attr,
 					char *buf)
 {
 	return sprintf(buf, "%i\n", (dev == &etr_port0_dev) ?
 		       etr_eacr.e0 : etr_eacr.e1);
 }
 
-static SYSDEV_ATTR(stepping_control, 0400, etr_stepping_control_show, NULL);
+static DEVICE_ATTR(stepping_control, 0400, etr_stepping_control_show, NULL);
 
-static ssize_t etr_mode_code_show(struct sys_device *dev,
-				struct sysdev_attribute *attr, char *buf)
+static ssize_t etr_mode_code_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
 {
 	if (!etr_port0_online && !etr_port1_online)
 		/* Status word is not uptodate if both ports are offline. */
@@ -1240,10 +1241,10 @@
 		       etr_port0.esw.psc0 : etr_port0.esw.psc1);
 }
 
-static SYSDEV_ATTR(state_code, 0400, etr_mode_code_show, NULL);
+static DEVICE_ATTR(state_code, 0400, etr_mode_code_show, NULL);
 
-static ssize_t etr_untuned_show(struct sys_device *dev,
-				struct sysdev_attribute *attr, char *buf)
+static ssize_t etr_untuned_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
 {
 	struct etr_aib *aib = etr_aib_from_dev(dev);
 
@@ -1252,10 +1253,10 @@
 	return sprintf(buf, "%i\n", aib->edf1.u);
 }
 
-static SYSDEV_ATTR(untuned, 0400, etr_untuned_show, NULL);
+static DEVICE_ATTR(untuned, 0400, etr_untuned_show, NULL);
 
-static ssize_t etr_network_id_show(struct sys_device *dev,
-				struct sysdev_attribute *attr, char *buf)
+static ssize_t etr_network_id_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
 {
 	struct etr_aib *aib = etr_aib_from_dev(dev);
 
@@ -1264,10 +1265,10 @@
 	return sprintf(buf, "%i\n", aib->edf1.net_id);
 }
 
-static SYSDEV_ATTR(network, 0400, etr_network_id_show, NULL);
+static DEVICE_ATTR(network, 0400, etr_network_id_show, NULL);
 
-static ssize_t etr_id_show(struct sys_device *dev,
-			struct sysdev_attribute *attr, char *buf)
+static ssize_t etr_id_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
 {
 	struct etr_aib *aib = etr_aib_from_dev(dev);
 
@@ -1276,10 +1277,10 @@
 	return sprintf(buf, "%i\n", aib->edf1.etr_id);
 }
 
-static SYSDEV_ATTR(id, 0400, etr_id_show, NULL);
+static DEVICE_ATTR(id, 0400, etr_id_show, NULL);
 
-static ssize_t etr_port_number_show(struct sys_device *dev,
-			struct sysdev_attribute *attr, char *buf)
+static ssize_t etr_port_number_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
 {
 	struct etr_aib *aib = etr_aib_from_dev(dev);
 
@@ -1288,10 +1289,10 @@
 	return sprintf(buf, "%i\n", aib->edf1.etr_pn);
 }
 
-static SYSDEV_ATTR(port, 0400, etr_port_number_show, NULL);
+static DEVICE_ATTR(port, 0400, etr_port_number_show, NULL);
 
-static ssize_t etr_coupled_show(struct sys_device *dev,
-			struct sysdev_attribute *attr, char *buf)
+static ssize_t etr_coupled_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
 {
 	struct etr_aib *aib = etr_aib_from_dev(dev);
 
@@ -1300,10 +1301,10 @@
 	return sprintf(buf, "%i\n", aib->edf3.c);
 }
 
-static SYSDEV_ATTR(coupled, 0400, etr_coupled_show, NULL);
+static DEVICE_ATTR(coupled, 0400, etr_coupled_show, NULL);
 
-static ssize_t etr_local_time_show(struct sys_device *dev,
-			struct sysdev_attribute *attr, char *buf)
+static ssize_t etr_local_time_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
 {
 	struct etr_aib *aib = etr_aib_from_dev(dev);
 
@@ -1312,10 +1313,10 @@
 	return sprintf(buf, "%i\n", aib->edf3.blto);
 }
 
-static SYSDEV_ATTR(local_time, 0400, etr_local_time_show, NULL);
+static DEVICE_ATTR(local_time, 0400, etr_local_time_show, NULL);
 
-static ssize_t etr_utc_offset_show(struct sys_device *dev,
-			struct sysdev_attribute *attr, char *buf)
+static ssize_t etr_utc_offset_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
 {
 	struct etr_aib *aib = etr_aib_from_dev(dev);
 
@@ -1324,64 +1325,64 @@
 	return sprintf(buf, "%i\n", aib->edf3.buo);
 }
 
-static SYSDEV_ATTR(utc_offset, 0400, etr_utc_offset_show, NULL);
+static DEVICE_ATTR(utc_offset, 0400, etr_utc_offset_show, NULL);
 
-static struct sysdev_attribute *etr_port_attributes[] = {
-	&attr_online,
-	&attr_stepping_control,
-	&attr_state_code,
-	&attr_untuned,
-	&attr_network,
-	&attr_id,
-	&attr_port,
-	&attr_coupled,
-	&attr_local_time,
-	&attr_utc_offset,
+static struct device_attribute *etr_port_attributes[] = {
+	&dev_attr_online,
+	&dev_attr_stepping_control,
+	&dev_attr_state_code,
+	&dev_attr_untuned,
+	&dev_attr_network,
+	&dev_attr_id,
+	&dev_attr_port,
+	&dev_attr_coupled,
+	&dev_attr_local_time,
+	&dev_attr_utc_offset,
 	NULL
 };
 
-static int __init etr_register_port(struct sys_device *dev)
+static int __init etr_register_port(struct device *dev)
 {
-	struct sysdev_attribute **attr;
+	struct device_attribute **attr;
 	int rc;
 
-	rc = sysdev_register(dev);
+	rc = device_register(dev);
 	if (rc)
 		goto out;
 	for (attr = etr_port_attributes; *attr; attr++) {
-		rc = sysdev_create_file(dev, *attr);
+		rc = device_create_file(dev, *attr);
 		if (rc)
 			goto out_unreg;
 	}
 	return 0;
 out_unreg:
 	for (; attr >= etr_port_attributes; attr--)
-		sysdev_remove_file(dev, *attr);
-	sysdev_unregister(dev);
+		device_remove_file(dev, *attr);
+	device_unregister(dev);
 out:
 	return rc;
 }
 
-static void __init etr_unregister_port(struct sys_device *dev)
+static void __init etr_unregister_port(struct device *dev)
 {
-	struct sysdev_attribute **attr;
+	struct device_attribute **attr;
 
 	for (attr = etr_port_attributes; *attr; attr++)
-		sysdev_remove_file(dev, *attr);
-	sysdev_unregister(dev);
+		device_remove_file(dev, *attr);
+	device_unregister(dev);
 }
 
 static int __init etr_init_sysfs(void)
 {
 	int rc;
 
-	rc = sysdev_class_register(&etr_sysclass);
+	rc = subsys_system_register(&etr_subsys, NULL);
 	if (rc)
 		goto out;
-	rc = sysdev_class_create_file(&etr_sysclass, &attr_stepping_port);
+	rc = device_create_file(etr_subsys.dev_root, &dev_attr_stepping_port);
 	if (rc)
-		goto out_unreg_class;
-	rc = sysdev_class_create_file(&etr_sysclass, &attr_stepping_mode);
+		goto out_unreg_subsys;
+	rc = device_create_file(etr_subsys.dev_root, &dev_attr_stepping_mode);
 	if (rc)
 		goto out_remove_stepping_port;
 	rc = etr_register_port(&etr_port0_dev);
@@ -1395,11 +1396,11 @@
 out_remove_port0:
 	etr_unregister_port(&etr_port0_dev);
 out_remove_stepping_mode:
-	sysdev_class_remove_file(&etr_sysclass, &attr_stepping_mode);
+	device_remove_file(etr_subsys.dev_root, &dev_attr_stepping_mode);
 out_remove_stepping_port:
-	sysdev_class_remove_file(&etr_sysclass, &attr_stepping_port);
-out_unreg_class:
-	sysdev_class_unregister(&etr_sysclass);
+	device_remove_file(etr_subsys.dev_root, &dev_attr_stepping_port);
+out_unreg_subsys:
+	bus_unregister(&etr_subsys);
 out:
 	return rc;
 }
@@ -1599,14 +1600,15 @@
 }
 
 /*
- * STP class sysfs interface functions
+ * STP subsys sysfs interface functions
  */
-static struct sysdev_class stp_sysclass = {
-	.name	= "stp",
+static struct bus_type stp_subsys = {
+	.name		= "stp",
+	.dev_name	= "stp",
 };
 
-static ssize_t stp_ctn_id_show(struct sysdev_class *class,
-				struct sysdev_class_attribute *attr,
+static ssize_t stp_ctn_id_show(struct device *dev,
+				struct device_attribute *attr,
 				char *buf)
 {
 	if (!stp_online)
@@ -1615,10 +1617,10 @@
 		       *(unsigned long long *) stp_info.ctnid);
 }
 
-static SYSDEV_CLASS_ATTR(ctn_id, 0400, stp_ctn_id_show, NULL);
+static DEVICE_ATTR(ctn_id, 0400, stp_ctn_id_show, NULL);
 
-static ssize_t stp_ctn_type_show(struct sysdev_class *class,
-				struct sysdev_class_attribute *attr,
+static ssize_t stp_ctn_type_show(struct device *dev,
+				struct device_attribute *attr,
 				char *buf)
 {
 	if (!stp_online)
@@ -1626,10 +1628,10 @@
 	return sprintf(buf, "%i\n", stp_info.ctn);
 }
 
-static SYSDEV_CLASS_ATTR(ctn_type, 0400, stp_ctn_type_show, NULL);
+static DEVICE_ATTR(ctn_type, 0400, stp_ctn_type_show, NULL);
 
-static ssize_t stp_dst_offset_show(struct sysdev_class *class,
-				   struct sysdev_class_attribute *attr,
+static ssize_t stp_dst_offset_show(struct device *dev,
+				   struct device_attribute *attr,
 				   char *buf)
 {
 	if (!stp_online || !(stp_info.vbits & 0x2000))
@@ -1637,10 +1639,10 @@
 	return sprintf(buf, "%i\n", (int)(s16) stp_info.dsto);
 }
 
-static SYSDEV_CLASS_ATTR(dst_offset, 0400, stp_dst_offset_show, NULL);
+static DEVICE_ATTR(dst_offset, 0400, stp_dst_offset_show, NULL);
 
-static ssize_t stp_leap_seconds_show(struct sysdev_class *class,
-					struct sysdev_class_attribute *attr,
+static ssize_t stp_leap_seconds_show(struct device *dev,
+					struct device_attribute *attr,
 					char *buf)
 {
 	if (!stp_online || !(stp_info.vbits & 0x8000))
@@ -1648,10 +1650,10 @@
 	return sprintf(buf, "%i\n", (int)(s16) stp_info.leaps);
 }
 
-static SYSDEV_CLASS_ATTR(leap_seconds, 0400, stp_leap_seconds_show, NULL);
+static DEVICE_ATTR(leap_seconds, 0400, stp_leap_seconds_show, NULL);
 
-static ssize_t stp_stratum_show(struct sysdev_class *class,
-				struct sysdev_class_attribute *attr,
+static ssize_t stp_stratum_show(struct device *dev,
+				struct device_attribute *attr,
 				char *buf)
 {
 	if (!stp_online)
@@ -1659,10 +1661,10 @@
 	return sprintf(buf, "%i\n", (int)(s16) stp_info.stratum);
 }
 
-static SYSDEV_CLASS_ATTR(stratum, 0400, stp_stratum_show, NULL);
+static DEVICE_ATTR(stratum, 0400, stp_stratum_show, NULL);
 
-static ssize_t stp_time_offset_show(struct sysdev_class *class,
-				struct sysdev_class_attribute *attr,
+static ssize_t stp_time_offset_show(struct device *dev,
+				struct device_attribute *attr,
 				char *buf)
 {
 	if (!stp_online || !(stp_info.vbits & 0x0800))
@@ -1670,10 +1672,10 @@
 	return sprintf(buf, "%i\n", (int) stp_info.tto);
 }
 
-static SYSDEV_CLASS_ATTR(time_offset, 0400, stp_time_offset_show, NULL);
+static DEVICE_ATTR(time_offset, 0400, stp_time_offset_show, NULL);
 
-static ssize_t stp_time_zone_offset_show(struct sysdev_class *class,
-				struct sysdev_class_attribute *attr,
+static ssize_t stp_time_zone_offset_show(struct device *dev,
+				struct device_attribute *attr,
 				char *buf)
 {
 	if (!stp_online || !(stp_info.vbits & 0x4000))
@@ -1681,11 +1683,11 @@
 	return sprintf(buf, "%i\n", (int)(s16) stp_info.tzo);
 }
 
-static SYSDEV_CLASS_ATTR(time_zone_offset, 0400,
+static DEVICE_ATTR(time_zone_offset, 0400,
 			 stp_time_zone_offset_show, NULL);
 
-static ssize_t stp_timing_mode_show(struct sysdev_class *class,
-				struct sysdev_class_attribute *attr,
+static ssize_t stp_timing_mode_show(struct device *dev,
+				struct device_attribute *attr,
 				char *buf)
 {
 	if (!stp_online)
@@ -1693,10 +1695,10 @@
 	return sprintf(buf, "%i\n", stp_info.tmd);
 }
 
-static SYSDEV_CLASS_ATTR(timing_mode, 0400, stp_timing_mode_show, NULL);
+static DEVICE_ATTR(timing_mode, 0400, stp_timing_mode_show, NULL);
 
-static ssize_t stp_timing_state_show(struct sysdev_class *class,
-				struct sysdev_class_attribute *attr,
+static ssize_t stp_timing_state_show(struct device *dev,
+				struct device_attribute *attr,
 				char *buf)
 {
 	if (!stp_online)
@@ -1704,17 +1706,17 @@
 	return sprintf(buf, "%i\n", stp_info.tst);
 }
 
-static SYSDEV_CLASS_ATTR(timing_state, 0400, stp_timing_state_show, NULL);
+static DEVICE_ATTR(timing_state, 0400, stp_timing_state_show, NULL);
 
-static ssize_t stp_online_show(struct sysdev_class *class,
-				struct sysdev_class_attribute *attr,
+static ssize_t stp_online_show(struct device *dev,
+				struct device_attribute *attr,
 				char *buf)
 {
 	return sprintf(buf, "%i\n", stp_online);
 }
 
-static ssize_t stp_online_store(struct sysdev_class *class,
-				struct sysdev_class_attribute *attr,
+static ssize_t stp_online_store(struct device *dev,
+				struct device_attribute *attr,
 				const char *buf, size_t count)
 {
 	unsigned int value;
@@ -1736,47 +1738,47 @@
 }
 
 /*
- * Can't use SYSDEV_CLASS_ATTR because the attribute should be named
- * stp/online but attr_online already exists in this file ..
+ * Can't use DEVICE_ATTR because the attribute should be named
+ * stp/online but dev_attr_online already exists in this file ..
  */
-static struct sysdev_class_attribute attr_stp_online = {
+static struct device_attribute dev_attr_stp_online = {
 	.attr = { .name = "online", .mode = 0600 },
 	.show	= stp_online_show,
 	.store	= stp_online_store,
 };
 
-static struct sysdev_class_attribute *stp_attributes[] = {
-	&attr_ctn_id,
-	&attr_ctn_type,
-	&attr_dst_offset,
-	&attr_leap_seconds,
-	&attr_stp_online,
-	&attr_stratum,
-	&attr_time_offset,
-	&attr_time_zone_offset,
-	&attr_timing_mode,
-	&attr_timing_state,
+static struct device_attribute *stp_attributes[] = {
+	&dev_attr_ctn_id,
+	&dev_attr_ctn_type,
+	&dev_attr_dst_offset,
+	&dev_attr_leap_seconds,
+	&dev_attr_stp_online,
+	&dev_attr_stratum,
+	&dev_attr_time_offset,
+	&dev_attr_time_zone_offset,
+	&dev_attr_timing_mode,
+	&dev_attr_timing_state,
 	NULL
 };
 
 static int __init stp_init_sysfs(void)
 {
-	struct sysdev_class_attribute **attr;
+	struct device_attribute **attr;
 	int rc;
 
-	rc = sysdev_class_register(&stp_sysclass);
+	rc = subsys_system_register(&stp_subsys, NULL);
 	if (rc)
 		goto out;
 	for (attr = stp_attributes; *attr; attr++) {
-		rc = sysdev_class_create_file(&stp_sysclass, *attr);
+		rc = device_create_file(stp_subsys.dev_root, *attr);
 		if (rc)
 			goto out_unreg;
 	}
 	return 0;
 out_unreg:
 	for (; attr >= stp_attributes; attr--)
-		sysdev_class_remove_file(&stp_sysclass, *attr);
-	sysdev_class_unregister(&stp_sysclass);
+		device_remove_file(stp_subsys.dev_root, *attr);
+	bus_unregister(&stp_subsys);
 out:
 	return rc;
 }
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index fdb5b8c..7370a41 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -1,22 +1,22 @@
 /*
- *    Copyright IBM Corp. 2007
+ *    Copyright IBM Corp. 2007,2011
  *    Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
  */
 
 #define KMSG_COMPONENT "cpu"
 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
 
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/init.h>
-#include <linux/device.h>
-#include <linux/bootmem.h>
-#include <linux/sched.h>
 #include <linux/workqueue.h>
+#include <linux/bootmem.h>
+#include <linux/cpuset.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/delay.h>
 #include <linux/cpu.h>
 #include <linux/smp.h>
-#include <linux/cpuset.h>
-#include <asm/delay.h>
+#include <linux/mm.h>
 
 #define PTF_HORIZONTAL	(0UL)
 #define PTF_VERTICAL	(1UL)
@@ -31,7 +31,6 @@
 static int topology_enabled = 1;
 static void topology_work_fn(struct work_struct *work);
 static struct sysinfo_15_1_x *tl_info;
-static struct timer_list topology_timer;
 static void set_topology_timer(void);
 static DECLARE_WORK(topology_work, topology_work_fn);
 /* topology_lock protects the core linked list */
@@ -41,11 +40,12 @@
 cpumask_t cpu_core_map[NR_CPUS];
 unsigned char cpu_core_id[NR_CPUS];
 
-#ifdef CONFIG_SCHED_BOOK
 static struct mask_info book_info;
 cpumask_t cpu_book_map[NR_CPUS];
 unsigned char cpu_book_id[NR_CPUS];
-#endif
+
+/* smp_cpu_state_mutex must be held when accessing this array */
+int cpu_polarization[NR_CPUS];
 
 static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
 {
@@ -71,7 +71,7 @@
 static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu,
 					  struct mask_info *book,
 					  struct mask_info *core,
-					  int z10)
+					  int one_core_per_cpu)
 {
 	unsigned int cpu;
 
@@ -85,18 +85,16 @@
 		for_each_present_cpu(lcpu) {
 			if (cpu_logical_map(lcpu) != rcpu)
 				continue;
-#ifdef CONFIG_SCHED_BOOK
 			cpumask_set_cpu(lcpu, &book->mask);
 			cpu_book_id[lcpu] = book->id;
-#endif
 			cpumask_set_cpu(lcpu, &core->mask);
-			if (z10) {
+			if (one_core_per_cpu) {
 				cpu_core_id[lcpu] = rcpu;
 				core = core->next;
 			} else {
 				cpu_core_id[lcpu] = core->id;
 			}
-			smp_cpu_polarization[lcpu] = tl_cpu->pp;
+			cpu_set_polarization(lcpu, tl_cpu->pp);
 		}
 	}
 	return core;
@@ -111,13 +109,11 @@
 		cpumask_clear(&info->mask);
 		info = info->next;
 	}
-#ifdef CONFIG_SCHED_BOOK
 	info = &book_info;
 	while (info) {
 		cpumask_clear(&info->mask);
 		info = info->next;
 	}
-#endif
 }
 
 static union topology_entry *next_tle(union topology_entry *tle)
@@ -127,66 +123,75 @@
 	return (union topology_entry *)((struct topology_container *)tle + 1);
 }
 
-static void tl_to_cores(struct sysinfo_15_1_x *info)
+static void __tl_to_cores_generic(struct sysinfo_15_1_x *info)
 {
-#ifdef CONFIG_SCHED_BOOK
-	struct mask_info *book = &book_info;
-	struct cpuid cpu_id;
-#else
-	struct mask_info *book = NULL;
-#endif
 	struct mask_info *core = &core_info;
+	struct mask_info *book = &book_info;
 	union topology_entry *tle, *end;
-	int z10 = 0;
 
-#ifdef CONFIG_SCHED_BOOK
-	get_cpu_id(&cpu_id);
-	z10 = cpu_id.machine == 0x2097 || cpu_id.machine == 0x2098;
-#endif
-	spin_lock_irq(&topology_lock);
-	clear_masks();
 	tle = info->tle;
 	end = (union topology_entry *)((unsigned long)info + info->length);
 	while (tle < end) {
-#ifdef CONFIG_SCHED_BOOK
-		if (z10) {
-			switch (tle->nl) {
-			case 1:
-				book = book->next;
-				book->id = tle->container.id;
-				break;
-			case 0:
-				core = add_cpus_to_mask(&tle->cpu, book, core, z10);
-				break;
-			default:
-				clear_masks();
-				goto out;
-			}
-			tle = next_tle(tle);
-			continue;
-		}
-#endif
 		switch (tle->nl) {
-#ifdef CONFIG_SCHED_BOOK
 		case 2:
 			book = book->next;
 			book->id = tle->container.id;
 			break;
-#endif
 		case 1:
 			core = core->next;
 			core->id = tle->container.id;
 			break;
 		case 0:
-			add_cpus_to_mask(&tle->cpu, book, core, z10);
+			add_cpus_to_mask(&tle->cpu, book, core, 0);
 			break;
 		default:
 			clear_masks();
-			goto out;
+			return;
 		}
 		tle = next_tle(tle);
 	}
-out:
+}
+
+static void __tl_to_cores_z10(struct sysinfo_15_1_x *info)
+{
+	struct mask_info *core = &core_info;
+	struct mask_info *book = &book_info;
+	union topology_entry *tle, *end;
+
+	tle = info->tle;
+	end = (union topology_entry *)((unsigned long)info + info->length);
+	while (tle < end) {
+		switch (tle->nl) {
+		case 1:
+			book = book->next;
+			book->id = tle->container.id;
+			break;
+		case 0:
+			core = add_cpus_to_mask(&tle->cpu, book, core, 1);
+			break;
+		default:
+			clear_masks();
+			return;
+		}
+		tle = next_tle(tle);
+	}
+}
+
+static void tl_to_cores(struct sysinfo_15_1_x *info)
+{
+	struct cpuid cpu_id;
+
+	get_cpu_id(&cpu_id);
+	spin_lock_irq(&topology_lock);
+	clear_masks();
+	switch (cpu_id.machine) {
+	case 0x2097:
+	case 0x2098:
+		__tl_to_cores_z10(info);
+		break;
+	default:
+		__tl_to_cores_generic(info);
+	}
 	spin_unlock_irq(&topology_lock);
 }
 
@@ -196,7 +201,7 @@
 
 	mutex_lock(&smp_cpu_state_mutex);
 	for_each_possible_cpu(cpu)
-		smp_cpu_polarization[cpu] = POLARIZATION_HRZ;
+		cpu_set_polarization(cpu, POLARIZATION_HRZ);
 	mutex_unlock(&smp_cpu_state_mutex);
 }
 
@@ -215,8 +220,7 @@
 
 int topology_set_cpu_management(int fc)
 {
-	int cpu;
-	int rc;
+	int cpu, rc;
 
 	if (!MACHINE_HAS_TOPOLOGY)
 		return -EOPNOTSUPP;
@@ -227,7 +231,7 @@
 	if (rc)
 		return -EBUSY;
 	for_each_possible_cpu(cpu)
-		smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
+		cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
 	return rc;
 }
 
@@ -239,29 +243,25 @@
 	spin_lock_irqsave(&topology_lock, flags);
 	for_each_possible_cpu(cpu) {
 		cpu_core_map[cpu] = cpu_group_map(&core_info, cpu);
-#ifdef CONFIG_SCHED_BOOK
 		cpu_book_map[cpu] = cpu_group_map(&book_info, cpu);
-#endif
 	}
 	spin_unlock_irqrestore(&topology_lock, flags);
 }
 
 void store_topology(struct sysinfo_15_1_x *info)
 {
-#ifdef CONFIG_SCHED_BOOK
 	int rc;
 
 	rc = stsi(info, 15, 1, 3);
 	if (rc != -ENOSYS)
 		return;
-#endif
 	stsi(info, 15, 1, 2);
 }
 
 int arch_update_cpu_topology(void)
 {
 	struct sysinfo_15_1_x *info = tl_info;
-	struct sys_device *sysdev;
+	struct device *dev;
 	int cpu;
 
 	if (!MACHINE_HAS_TOPOLOGY) {
@@ -273,8 +273,8 @@
 	tl_to_cores(info);
 	update_cpu_core_map();
 	for_each_online_cpu(cpu) {
-		sysdev = get_cpu_sysdev(cpu);
-		kobject_uevent(&sysdev->kobj, KOBJ_CHANGE);
+		dev = get_cpu_device(cpu);
+		kobject_uevent(&dev->kobj, KOBJ_CHANGE);
 	}
 	return 1;
 }
@@ -296,12 +296,30 @@
 	set_topology_timer();
 }
 
+static struct timer_list topology_timer =
+	TIMER_DEFERRED_INITIALIZER(topology_timer_fn, 0, 0);
+
+static atomic_t topology_poll = ATOMIC_INIT(0);
+
 static void set_topology_timer(void)
 {
-	topology_timer.function = topology_timer_fn;
-	topology_timer.data = 0;
-	topology_timer.expires = jiffies + 60 * HZ;
-	add_timer(&topology_timer);
+	if (atomic_add_unless(&topology_poll, -1, 0))
+		mod_timer(&topology_timer, jiffies + HZ / 10);
+	else
+		mod_timer(&topology_timer, jiffies + HZ * 60);
+}
+
+void topology_expect_change(void)
+{
+	if (!MACHINE_HAS_TOPOLOGY)
+		return;
+	/* This is racy, but it doesn't matter since it is just a heuristic.
+	 * Worst case is that we poll in a higher frequency for a bit longer.
+	 */
+	if (atomic_read(&topology_poll) > 60)
+		return;
+	atomic_add(60, &topology_poll);
+	set_topology_timer();
 }
 
 static int __init early_parse_topology(char *p)
@@ -313,23 +331,6 @@
 }
 early_param("topology", early_parse_topology);
 
-static int __init init_topology_update(void)
-{
-	int rc;
-
-	rc = 0;
-	if (!MACHINE_HAS_TOPOLOGY) {
-		topology_update_polarization_simple();
-		goto out;
-	}
-	init_timer_deferrable(&topology_timer);
-	set_topology_timer();
-out:
-	update_cpu_core_map();
-	return rc;
-}
-__initcall(init_topology_update);
-
 static void __init alloc_masks(struct sysinfo_15_1_x *info,
 			       struct mask_info *mask, int offset)
 {
@@ -357,10 +358,108 @@
 	store_topology(info);
 	pr_info("The CPU configuration topology of the machine is:");
 	for (i = 0; i < TOPOLOGY_NR_MAG; i++)
-		printk(" %d", info->mag[i]);
-	printk(" / %d\n", info->mnest);
+		printk(KERN_CONT " %d", info->mag[i]);
+	printk(KERN_CONT " / %d\n", info->mnest);
 	alloc_masks(info, &core_info, 1);
-#ifdef CONFIG_SCHED_BOOK
 	alloc_masks(info, &book_info, 2);
-#endif
 }
+
+static int cpu_management;
+
+static ssize_t dispatching_show(struct device *dev,
+				struct device_attribute *attr,
+				char *buf)
+{
+	ssize_t count;
+
+	mutex_lock(&smp_cpu_state_mutex);
+	count = sprintf(buf, "%d\n", cpu_management);
+	mutex_unlock(&smp_cpu_state_mutex);
+	return count;
+}
+
+static ssize_t dispatching_store(struct device *dev,
+				 struct device_attribute *attr,
+				 const char *buf,
+				 size_t count)
+{
+	int val, rc;
+	char delim;
+
+	if (sscanf(buf, "%d %c", &val, &delim) != 1)
+		return -EINVAL;
+	if (val != 0 && val != 1)
+		return -EINVAL;
+	rc = 0;
+	get_online_cpus();
+	mutex_lock(&smp_cpu_state_mutex);
+	if (cpu_management == val)
+		goto out;
+	rc = topology_set_cpu_management(val);
+	if (rc)
+		goto out;
+	cpu_management = val;
+	topology_expect_change();
+out:
+	mutex_unlock(&smp_cpu_state_mutex);
+	put_online_cpus();
+	return rc ? rc : count;
+}
+static DEVICE_ATTR(dispatching, 0644, dispatching_show,
+			 dispatching_store);
+
+static ssize_t cpu_polarization_show(struct device *dev,
+				     struct device_attribute *attr, char *buf)
+{
+	int cpu = dev->id;
+	ssize_t count;
+
+	mutex_lock(&smp_cpu_state_mutex);
+	switch (cpu_read_polarization(cpu)) {
+	case POLARIZATION_HRZ:
+		count = sprintf(buf, "horizontal\n");
+		break;
+	case POLARIZATION_VL:
+		count = sprintf(buf, "vertical:low\n");
+		break;
+	case POLARIZATION_VM:
+		count = sprintf(buf, "vertical:medium\n");
+		break;
+	case POLARIZATION_VH:
+		count = sprintf(buf, "vertical:high\n");
+		break;
+	default:
+		count = sprintf(buf, "unknown\n");
+		break;
+	}
+	mutex_unlock(&smp_cpu_state_mutex);
+	return count;
+}
+static DEVICE_ATTR(polarization, 0444, cpu_polarization_show, NULL);
+
+static struct attribute *topology_cpu_attrs[] = {
+	&dev_attr_polarization.attr,
+	NULL,
+};
+
+static struct attribute_group topology_cpu_attr_group = {
+	.attrs = topology_cpu_attrs,
+};
+
+int topology_cpu_init(struct cpu *cpu)
+{
+	return sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group);
+}
+
+static int __init topology_init(void)
+{
+	if (!MACHINE_HAS_TOPOLOGY) {
+		topology_update_polarization_simple();
+		goto out;
+	}
+	set_topology_timer();
+out:
+	update_cpu_core_map();
+	return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching);
+}
+device_initcall(topology_init);
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index a9807dd..5ce3750 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -43,9 +43,9 @@
 #include <asm/debug.h>
 #include "entry.h"
 
-void (*pgm_check_table[128])(struct pt_regs *, long, unsigned long);
+void (*pgm_check_table[128])(struct pt_regs *regs);
 
-int show_unhandled_signals;
+int show_unhandled_signals = 1;
 
 #define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
 
@@ -234,7 +234,7 @@
 
 static DEFINE_SPINLOCK(die_lock);
 
-void die(const char * str, struct pt_regs * regs, long err)
+void die(struct pt_regs *regs, const char *str)
 {
 	static int die_counter;
 
@@ -243,7 +243,7 @@
 	console_verbose();
 	spin_lock_irq(&die_lock);
 	bust_spinlocks(1);
-	printk("%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter);
+	printk("%s: %04x [#%d] ", str, regs->int_code & 0xffff, ++die_counter);
 #ifdef CONFIG_PREEMPT
 	printk("PREEMPT ");
 #endif
@@ -254,7 +254,7 @@
 	printk("DEBUG_PAGEALLOC");
 #endif
 	printk("\n");
-	notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV);
+	notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV);
 	show_regs(regs);
 	bust_spinlocks(0);
 	add_taint(TAINT_DIE);
@@ -267,8 +267,7 @@
 	do_exit(SIGSEGV);
 }
 
-static void inline report_user_fault(struct pt_regs *regs, long int_code,
-				     int signr)
+static inline void report_user_fault(struct pt_regs *regs, int signr)
 {
 	if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
 		return;
@@ -276,7 +275,7 @@
 		return;
 	if (!printk_ratelimit())
 		return;
-	printk("User process fault: interruption code 0x%lX ", int_code);
+	printk("User process fault: interruption code 0x%X ", regs->int_code);
 	print_vma_addr("in ", regs->psw.addr & PSW_ADDR_INSN);
 	printk("\n");
 	show_regs(regs);
@@ -287,19 +286,28 @@
 	return 1;
 }
 
-static inline void __kprobes do_trap(long pgm_int_code, int signr, char *str,
-				     struct pt_regs *regs, siginfo_t *info)
+static inline void __user *get_psw_address(struct pt_regs *regs)
 {
-	if (notify_die(DIE_TRAP, str, regs, pgm_int_code,
-		       pgm_int_code, signr) == NOTIFY_STOP)
+	return (void __user *)
+		((regs->psw.addr - (regs->int_code >> 16)) & PSW_ADDR_INSN);
+}
+
+static void __kprobes do_trap(struct pt_regs *regs,
+			      int si_signo, int si_code, char *str)
+{
+	siginfo_t info;
+
+	if (notify_die(DIE_TRAP, str, regs, 0,
+		       regs->int_code, si_signo) == NOTIFY_STOP)
 		return;
 
         if (regs->psw.mask & PSW_MASK_PSTATE) {
-                struct task_struct *tsk = current;
-
-		tsk->thread.trap_no = pgm_int_code & 0xffff;
-		force_sig_info(signr, info, tsk);
-		report_user_fault(regs, pgm_int_code, signr);
+		info.si_signo = si_signo;
+		info.si_errno = 0;
+		info.si_code = si_code;
+		info.si_addr = get_psw_address(regs);
+		force_sig_info(si_signo, &info, current);
+		report_user_fault(regs, si_signo);
         } else {
                 const struct exception_table_entry *fixup;
                 fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
@@ -311,18 +319,11 @@
 			btt = report_bug(regs->psw.addr & PSW_ADDR_INSN, regs);
 			if (btt == BUG_TRAP_TYPE_WARN)
 				return;
-			die(str, regs, pgm_int_code);
+			die(regs, str);
 		}
         }
 }
 
-static inline void __user *get_psw_address(struct pt_regs *regs,
-					   long pgm_int_code)
-{
-	return (void __user *)
-		((regs->psw.addr - (pgm_int_code >> 16)) & PSW_ADDR_INSN);
-}
-
 void __kprobes do_per_trap(struct pt_regs *regs)
 {
 	siginfo_t info;
@@ -339,26 +340,19 @@
 	force_sig_info(SIGTRAP, &info, current);
 }
 
-static void default_trap_handler(struct pt_regs *regs, long pgm_int_code,
-				 unsigned long trans_exc_code)
+static void default_trap_handler(struct pt_regs *regs)
 {
         if (regs->psw.mask & PSW_MASK_PSTATE) {
-		report_user_fault(regs, pgm_int_code, SIGSEGV);
+		report_user_fault(regs, SIGSEGV);
 		do_exit(SIGSEGV);
 	} else
-		die("Unknown program exception", regs, pgm_int_code);
+		die(regs, "Unknown program exception");
 }
 
 #define DO_ERROR_INFO(name, signr, sicode, str) \
-static void name(struct pt_regs *regs, long pgm_int_code, \
-		 unsigned long trans_exc_code) \
+static void name(struct pt_regs *regs) \
 { \
-        siginfo_t info; \
-        info.si_signo = signr; \
-        info.si_errno = 0; \
-        info.si_code = sicode; \
-	info.si_addr = get_psw_address(regs, pgm_int_code); \
-	do_trap(pgm_int_code, signr, str, regs, &info);	    \
+	do_trap(regs, signr, sicode, str); \
 }
 
 DO_ERROR_INFO(addressing_exception, SIGILL, ILL_ILLADR,
@@ -388,42 +382,34 @@
 DO_ERROR_INFO(translation_exception, SIGILL, ILL_ILLOPN,
 	      "translation exception")
 
-static inline void do_fp_trap(struct pt_regs *regs, void __user *location,
-			      int fpc, long pgm_int_code)
+static inline void do_fp_trap(struct pt_regs *regs, int fpc)
 {
-	siginfo_t si;
-
-	si.si_signo = SIGFPE;
-	si.si_errno = 0;
-	si.si_addr = location;
-	si.si_code = 0;
+	int si_code = 0;
 	/* FPC[2] is Data Exception Code */
 	if ((fpc & 0x00000300) == 0) {
 		/* bits 6 and 7 of DXC are 0 iff IEEE exception */
 		if (fpc & 0x8000) /* invalid fp operation */
-			si.si_code = FPE_FLTINV;
+			si_code = FPE_FLTINV;
 		else if (fpc & 0x4000) /* div by 0 */
-			si.si_code = FPE_FLTDIV;
+			si_code = FPE_FLTDIV;
 		else if (fpc & 0x2000) /* overflow */
-			si.si_code = FPE_FLTOVF;
+			si_code = FPE_FLTOVF;
 		else if (fpc & 0x1000) /* underflow */
-			si.si_code = FPE_FLTUND;
+			si_code = FPE_FLTUND;
 		else if (fpc & 0x0800) /* inexact */
-			si.si_code = FPE_FLTRES;
+			si_code = FPE_FLTRES;
 	}
-	do_trap(pgm_int_code, SIGFPE,
-		"floating point exception", regs, &si);
+	do_trap(regs, SIGFPE, si_code, "floating point exception");
 }
 
-static void __kprobes illegal_op(struct pt_regs *regs, long pgm_int_code,
-				 unsigned long trans_exc_code)
+static void __kprobes illegal_op(struct pt_regs *regs)
 {
 	siginfo_t info;
         __u8 opcode[6];
 	__u16 __user *location;
 	int signal = 0;
 
-	location = get_psw_address(regs, pgm_int_code);
+	location = get_psw_address(regs);
 
 	if (regs->psw.mask & PSW_MASK_PSTATE) {
 		if (get_user(*((__u16 *) opcode), (__u16 __user *) location))
@@ -467,44 +453,31 @@
 		 * If we get an illegal op in kernel mode, send it through the
 		 * kprobes notifier. If kprobes doesn't pick it up, SIGILL
 		 */
-		if (notify_die(DIE_BPT, "bpt", regs, pgm_int_code,
+		if (notify_die(DIE_BPT, "bpt", regs, 0,
 			       3, SIGTRAP) != NOTIFY_STOP)
 			signal = SIGILL;
 	}
 
 #ifdef CONFIG_MATHEMU
         if (signal == SIGFPE)
-		do_fp_trap(regs, location,
-			   current->thread.fp_regs.fpc, pgm_int_code);
-        else if (signal == SIGSEGV) {
-		info.si_signo = signal;
-		info.si_errno = 0;
-		info.si_code = SEGV_MAPERR;
-		info.si_addr = (void __user *) location;
-		do_trap(pgm_int_code, signal,
-			"user address fault", regs, &info);
-	} else
+		do_fp_trap(regs, current->thread.fp_regs.fpc);
+	else if (signal == SIGSEGV)
+		do_trap(regs, signal, SEGV_MAPERR, "user address fault");
+	else
 #endif
-        if (signal) {
-		info.si_signo = signal;
-		info.si_errno = 0;
-		info.si_code = ILL_ILLOPC;
-		info.si_addr = (void __user *) location;
-		do_trap(pgm_int_code, signal,
-			"illegal operation", regs, &info);
-	}
+	if (signal)
+		do_trap(regs, signal, ILL_ILLOPC, "illegal operation");
 }
 
 
 #ifdef CONFIG_MATHEMU
-void specification_exception(struct pt_regs *regs, long pgm_int_code,
-			     unsigned long trans_exc_code)
+void specification_exception(struct pt_regs *regs)
 {
         __u8 opcode[6];
 	__u16 __user *location = NULL;
 	int signal = 0;
 
-	location = (__u16 __user *) get_psw_address(regs, pgm_int_code);
+	location = (__u16 __user *) get_psw_address(regs);
 
         if (regs->psw.mask & PSW_MASK_PSTATE) {
 		get_user(*((__u16 *) opcode), location);
@@ -539,30 +512,21 @@
 		signal = SIGILL;
 
         if (signal == SIGFPE)
-		do_fp_trap(regs, location,
-			   current->thread.fp_regs.fpc, pgm_int_code);
-        else if (signal) {
-		siginfo_t info;
-		info.si_signo = signal;
-		info.si_errno = 0;
-		info.si_code = ILL_ILLOPN;
-		info.si_addr = location;
-		do_trap(pgm_int_code, signal,
-			"specification exception", regs, &info);
-	}
+		do_fp_trap(regs, current->thread.fp_regs.fpc);
+	else if (signal)
+		do_trap(regs, signal, ILL_ILLOPN, "specification exception");
 }
 #else
 DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN,
 	      "specification exception");
 #endif
 
-static void data_exception(struct pt_regs *regs, long pgm_int_code,
-			   unsigned long trans_exc_code)
+static void data_exception(struct pt_regs *regs)
 {
 	__u16 __user *location;
 	int signal = 0;
 
-	location = get_psw_address(regs, pgm_int_code);
+	location = get_psw_address(regs);
 
 	if (MACHINE_HAS_IEEE)
 		asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc));
@@ -627,32 +591,18 @@
 	else
 		signal = SIGILL;
         if (signal == SIGFPE)
-		do_fp_trap(regs, location,
-			   current->thread.fp_regs.fpc, pgm_int_code);
-        else if (signal) {
-		siginfo_t info;
-		info.si_signo = signal;
-		info.si_errno = 0;
-		info.si_code = ILL_ILLOPN;
-		info.si_addr = location;
-		do_trap(pgm_int_code, signal, "data exception", regs, &info);
-	}
+		do_fp_trap(regs, current->thread.fp_regs.fpc);
+	else if (signal)
+		do_trap(regs, signal, ILL_ILLOPN, "data exception");
 }
 
-static void space_switch_exception(struct pt_regs *regs, long pgm_int_code,
-				   unsigned long trans_exc_code)
+static void space_switch_exception(struct pt_regs *regs)
 {
-        siginfo_t info;
-
 	/* Set user psw back to home space mode. */
 	if (regs->psw.mask & PSW_MASK_PSTATE)
 		regs->psw.mask |= PSW_ASC_HOME;
 	/* Send SIGILL. */
-        info.si_signo = SIGILL;
-        info.si_errno = 0;
-        info.si_code = ILL_PRVOPC;
-	info.si_addr = get_psw_address(regs, pgm_int_code);
-	do_trap(pgm_int_code, SIGILL, "space switch event", regs, &info);
+	do_trap(regs, SIGILL, ILL_PRVOPC, "space switch event");
 }
 
 void __kprobes kernel_stack_overflow(struct pt_regs * regs)
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index a9a3018..354dd39 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -125,8 +125,7 @@
 	return trans_exc_code != 3;
 }
 
-static inline void report_user_fault(struct pt_regs *regs, long int_code,
-				     int signr, unsigned long address)
+static inline void report_user_fault(struct pt_regs *regs, long signr)
 {
 	if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
 		return;
@@ -134,10 +133,12 @@
 		return;
 	if (!printk_ratelimit())
 		return;
-	printk("User process fault: interruption code 0x%lX ", int_code);
+	printk(KERN_ALERT "User process fault: interruption code 0x%X ",
+	       regs->int_code);
 	print_vma_addr(KERN_CONT "in ", regs->psw.addr & PSW_ADDR_INSN);
-	printk("\n");
-	printk("failing address: %lX\n", address);
+	printk(KERN_CONT "\n");
+	printk(KERN_ALERT "failing address: %lX\n",
+	       regs->int_parm_long & __FAIL_ADDR_MASK);
 	show_regs(regs);
 }
 
@@ -145,24 +146,18 @@
  * Send SIGSEGV to task.  This is an external routine
  * to keep the stack usage of do_page_fault small.
  */
-static noinline void do_sigsegv(struct pt_regs *regs, long int_code,
-				int si_code, unsigned long trans_exc_code)
+static noinline void do_sigsegv(struct pt_regs *regs, int si_code)
 {
 	struct siginfo si;
-	unsigned long address;
 
-	address = trans_exc_code & __FAIL_ADDR_MASK;
-	current->thread.prot_addr = address;
-	current->thread.trap_no = int_code;
-	report_user_fault(regs, int_code, SIGSEGV, address);
+	report_user_fault(regs, SIGSEGV);
 	si.si_signo = SIGSEGV;
 	si.si_code = si_code;
-	si.si_addr = (void __user *) address;
+	si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK);
 	force_sig_info(SIGSEGV, &si, current);
 }
 
-static noinline void do_no_context(struct pt_regs *regs, long int_code,
-				   unsigned long trans_exc_code)
+static noinline void do_no_context(struct pt_regs *regs)
 {
 	const struct exception_table_entry *fixup;
 	unsigned long address;
@@ -178,55 +173,48 @@
 	 * Oops. The kernel tried to access some bad page. We'll have to
 	 * terminate things with extreme prejudice.
 	 */
-	address = trans_exc_code & __FAIL_ADDR_MASK;
-	if (!user_space_fault(trans_exc_code))
+	address = regs->int_parm_long & __FAIL_ADDR_MASK;
+	if (!user_space_fault(regs->int_parm_long))
 		printk(KERN_ALERT "Unable to handle kernel pointer dereference"
 		       " at virtual kernel address %p\n", (void *)address);
 	else
 		printk(KERN_ALERT "Unable to handle kernel paging request"
 		       " at virtual user address %p\n", (void *)address);
 
-	die("Oops", regs, int_code);
+	die(regs, "Oops");
 	do_exit(SIGKILL);
 }
 
-static noinline void do_low_address(struct pt_regs *regs, long int_code,
-				    unsigned long trans_exc_code)
+static noinline void do_low_address(struct pt_regs *regs)
 {
 	/* Low-address protection hit in kernel mode means
 	   NULL pointer write access in kernel mode.  */
 	if (regs->psw.mask & PSW_MASK_PSTATE) {
 		/* Low-address protection hit in user mode 'cannot happen'. */
-		die ("Low-address protection", regs, int_code);
+		die (regs, "Low-address protection");
 		do_exit(SIGKILL);
 	}
 
-	do_no_context(regs, int_code, trans_exc_code);
+	do_no_context(regs);
 }
 
-static noinline void do_sigbus(struct pt_regs *regs, long int_code,
-			       unsigned long trans_exc_code)
+static noinline void do_sigbus(struct pt_regs *regs)
 {
 	struct task_struct *tsk = current;
-	unsigned long address;
 	struct siginfo si;
 
 	/*
 	 * Send a sigbus, regardless of whether we were in kernel
 	 * or user mode.
 	 */
-	address = trans_exc_code & __FAIL_ADDR_MASK;
-	tsk->thread.prot_addr = address;
-	tsk->thread.trap_no = int_code;
 	si.si_signo = SIGBUS;
 	si.si_errno = 0;
 	si.si_code = BUS_ADRERR;
-	si.si_addr = (void __user *) address;
+	si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK);
 	force_sig_info(SIGBUS, &si, tsk);
 }
 
-static noinline void do_fault_error(struct pt_regs *regs, long int_code,
-				    unsigned long trans_exc_code, int fault)
+static noinline void do_fault_error(struct pt_regs *regs, int fault)
 {
 	int si_code;
 
@@ -238,24 +226,24 @@
 			/* User mode accesses just cause a SIGSEGV */
 			si_code = (fault == VM_FAULT_BADMAP) ?
 				SEGV_MAPERR : SEGV_ACCERR;
-			do_sigsegv(regs, int_code, si_code, trans_exc_code);
+			do_sigsegv(regs, si_code);
 			return;
 		}
 	case VM_FAULT_BADCONTEXT:
-		do_no_context(regs, int_code, trans_exc_code);
+		do_no_context(regs);
 		break;
 	default: /* fault & VM_FAULT_ERROR */
 		if (fault & VM_FAULT_OOM) {
 			if (!(regs->psw.mask & PSW_MASK_PSTATE))
-				do_no_context(regs, int_code, trans_exc_code);
+				do_no_context(regs);
 			else
 				pagefault_out_of_memory();
 		} else if (fault & VM_FAULT_SIGBUS) {
 			/* Kernel mode? Handle exceptions or die */
 			if (!(regs->psw.mask & PSW_MASK_PSTATE))
-				do_no_context(regs, int_code, trans_exc_code);
+				do_no_context(regs);
 			else
-				do_sigbus(regs, int_code, trans_exc_code);
+				do_sigbus(regs);
 		} else
 			BUG();
 		break;
@@ -273,12 +261,12 @@
  *   11       Page translation     ->  Not present       (nullification)
  *   3b       Region third trans.  ->  Not present       (nullification)
  */
-static inline int do_exception(struct pt_regs *regs, int access,
-			       unsigned long trans_exc_code)
+static inline int do_exception(struct pt_regs *regs, int access)
 {
 	struct task_struct *tsk;
 	struct mm_struct *mm;
 	struct vm_area_struct *vma;
+	unsigned long trans_exc_code;
 	unsigned long address;
 	unsigned int flags;
 	int fault;
@@ -288,6 +276,7 @@
 
 	tsk = current;
 	mm = tsk->mm;
+	trans_exc_code = regs->int_parm_long;
 
 	/*
 	 * Verify that the fault happened in user space, that
@@ -387,45 +376,46 @@
 	return fault;
 }
 
-void __kprobes do_protection_exception(struct pt_regs *regs, long pgm_int_code,
-				       unsigned long trans_exc_code)
+void __kprobes do_protection_exception(struct pt_regs *regs)
 {
+	unsigned long trans_exc_code;
 	int fault;
 
+	trans_exc_code = regs->int_parm_long;
 	/* Protection exception is suppressing, decrement psw address. */
-	regs->psw.addr = __rewind_psw(regs->psw, pgm_int_code >> 16);
+	regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
 	/*
 	 * Check for low-address protection.  This needs to be treated
 	 * as a special case because the translation exception code
 	 * field is not guaranteed to contain valid data in this case.
 	 */
 	if (unlikely(!(trans_exc_code & 4))) {
-		do_low_address(regs, pgm_int_code, trans_exc_code);
+		do_low_address(regs);
 		return;
 	}
-	fault = do_exception(regs, VM_WRITE, trans_exc_code);
+	fault = do_exception(regs, VM_WRITE);
 	if (unlikely(fault))
-		do_fault_error(regs, 4, trans_exc_code, fault);
+		do_fault_error(regs, fault);
 }
 
-void __kprobes do_dat_exception(struct pt_regs *regs, long pgm_int_code,
-				unsigned long trans_exc_code)
+void __kprobes do_dat_exception(struct pt_regs *regs)
 {
 	int access, fault;
 
 	access = VM_READ | VM_EXEC | VM_WRITE;
-	fault = do_exception(regs, access, trans_exc_code);
+	fault = do_exception(regs, access);
 	if (unlikely(fault))
-		do_fault_error(regs, pgm_int_code & 255, trans_exc_code, fault);
+		do_fault_error(regs, fault);
 }
 
 #ifdef CONFIG_64BIT
-void __kprobes do_asce_exception(struct pt_regs *regs, long pgm_int_code,
-				 unsigned long trans_exc_code)
+void __kprobes do_asce_exception(struct pt_regs *regs)
 {
 	struct mm_struct *mm = current->mm;
 	struct vm_area_struct *vma;
+	unsigned long trans_exc_code;
 
+	trans_exc_code = regs->int_parm_long;
 	if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
 		goto no_context;
 
@@ -440,12 +430,12 @@
 
 	/* User mode accesses just cause a SIGSEGV */
 	if (regs->psw.mask & PSW_MASK_PSTATE) {
-		do_sigsegv(regs, pgm_int_code, SEGV_MAPERR, trans_exc_code);
+		do_sigsegv(regs, SEGV_MAPERR);
 		return;
 	}
 
 no_context:
-	do_no_context(regs, pgm_int_code, trans_exc_code);
+	do_no_context(regs);
 }
 #endif
 
@@ -459,14 +449,15 @@
 		regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT;
 	regs.psw.addr = (unsigned long) __builtin_return_address(0);
 	regs.psw.addr |= PSW_ADDR_AMODE;
-	uaddr &= PAGE_MASK;
+	regs.int_code = pgm_int_code;
+	regs.int_parm_long = (uaddr & PAGE_MASK) | 2;
 	access = write ? VM_WRITE : VM_READ;
-	fault = do_exception(&regs, access, uaddr | 2);
+	fault = do_exception(&regs, access);
 	if (unlikely(fault)) {
 		if (fault & VM_FAULT_OOM)
 			return -EFAULT;
 		else if (fault & VM_FAULT_SIGBUS)
-			do_sigbus(&regs, pgm_int_code, uaddr);
+			do_sigbus(&regs);
 	}
 	return fault ? -EFAULT : 0;
 }
@@ -509,7 +500,7 @@
 		.reserved = __PF_RES_FIELD };
         int rc;
 
-	if (!MACHINE_IS_VM || pfault_disable)
+	if (pfault_disable)
 		return -1;
 	asm volatile(
 		"	diag	%1,%0,0x258\n"
@@ -530,7 +521,7 @@
 		.refversn = 2,
 	};
 
-	if (!MACHINE_IS_VM || pfault_disable)
+	if (pfault_disable)
 		return;
 	asm volatile(
 		"	diag	%0,0,0x258\n"
@@ -643,8 +634,6 @@
 {
 	int rc;
 
-	if (!MACHINE_IS_VM)
-		return 0;
 	rc = register_external_interrupt(0x2603, pfault_interrupt);
 	if (rc)
 		goto out_extint;
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index d4b9fb4..5d63301 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -93,18 +93,22 @@
 void __init paging_init(void)
 {
 	unsigned long max_zone_pfns[MAX_NR_ZONES];
-	unsigned long pgd_type;
+	unsigned long pgd_type, asce_bits;
 
 	init_mm.pgd = swapper_pg_dir;
-	S390_lowcore.kernel_asce = __pa(init_mm.pgd) & PAGE_MASK;
 #ifdef CONFIG_64BIT
-	/* A three level page table (4TB) is enough for the kernel space. */
-	S390_lowcore.kernel_asce |= _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
-	pgd_type = _REGION3_ENTRY_EMPTY;
+	if (VMALLOC_END > (1UL << 42)) {
+		asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
+		pgd_type = _REGION2_ENTRY_EMPTY;
+	} else {
+		asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
+		pgd_type = _REGION3_ENTRY_EMPTY;
+	}
 #else
-	S390_lowcore.kernel_asce |= _ASCE_TABLE_LENGTH;
+	asce_bits = _ASCE_TABLE_LENGTH;
 	pgd_type = _SEGMENT_ENTRY_EMPTY;
 #endif
+	S390_lowcore.kernel_asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
 	clear_table((unsigned long *) init_mm.pgd, pgd_type,
 		    sizeof(unsigned long)*2048);
 	vmem_map_init();
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 301c84d..9a4d02f 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -33,17 +33,6 @@
 #define FRAG_MASK	0x03
 #endif
 
-unsigned long VMALLOC_START = VMALLOC_END - VMALLOC_SIZE;
-EXPORT_SYMBOL(VMALLOC_START);
-
-static int __init parse_vmalloc(char *arg)
-{
-	if (!arg)
-		return -EINVAL;
-	VMALLOC_START = (VMALLOC_END - memparse(arg, &arg)) & PAGE_MASK;
-	return 0;
-}
-early_param("vmalloc", parse_vmalloc);
 
 unsigned long *crst_table_alloc(struct mm_struct *mm)
 {
@@ -267,7 +256,10 @@
 	struct page *page;
 	unsigned long *new;
 
+	/* since we dont free the gmap table until gmap_free we can unlock */
+	spin_unlock(&gmap->mm->page_table_lock);
 	page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
+	spin_lock(&gmap->mm->page_table_lock);
 	if (!page)
 		return -ENOMEM;
 	new = (unsigned long *) page_to_phys(page);
diff --git a/arch/s390/oprofile/hwsampler.c b/arch/s390/oprofile/hwsampler.c
index f43c0e4..9daee91 100644
--- a/arch/s390/oprofile/hwsampler.c
+++ b/arch/s390/oprofile/hwsampler.c
@@ -22,6 +22,7 @@
 #include <asm/irq.h>
 
 #include "hwsampler.h"
+#include "op_counter.h"
 
 #define MAX_NUM_SDB 511
 #define MIN_NUM_SDB 1
@@ -896,6 +897,8 @@
 		if (sample_data_ptr->P == 1) {
 			/* userspace sample */
 			unsigned int pid = sample_data_ptr->prim_asn;
+			if (!counter_config.user)
+				goto skip_sample;
 			rcu_read_lock();
 			tsk = pid_task(find_vpid(pid), PIDTYPE_PID);
 			if (tsk)
@@ -903,6 +906,8 @@
 			rcu_read_unlock();
 		} else {
 			/* kernelspace sample */
+			if (!counter_config.kernel)
+				goto skip_sample;
 			regs = task_pt_regs(current);
 		}
 
@@ -910,7 +915,7 @@
 		oprofile_add_ext_hw_sample(sample_data_ptr->ia, regs, 0,
 				!sample_data_ptr->P, tsk);
 		mutex_unlock(&hws_sem);
-
+	skip_sample:
 		sample_data_ptr++;
 	}
 }
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c
index 6efc18b..2297be4 100644
--- a/arch/s390/oprofile/init.c
+++ b/arch/s390/oprofile/init.c
@@ -2,10 +2,11 @@
  * arch/s390/oprofile/init.c
  *
  * S390 Version
- *   Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *   Copyright (C) 2002-2011 IBM Deutschland Entwicklung GmbH, IBM Corporation
  *   Author(s): Thomas Spatzier (tspat@de.ibm.com)
  *   Author(s): Mahesh Salgaonkar (mahesh@linux.vnet.ibm.com)
  *   Author(s): Heinz Graalfs (graalfs@linux.vnet.ibm.com)
+ *   Author(s): Andreas Krebbel (krebbel@linux.vnet.ibm.com)
  *
  * @remark Copyright 2002-2011 OProfile authors
  */
@@ -14,6 +15,8 @@
 #include <linux/init.h>
 #include <linux/errno.h>
 #include <linux/fs.h>
+#include <linux/module.h>
+#include <asm/processor.h>
 
 #include "../../../drivers/oprofile/oprof.h"
 
@@ -22,6 +25,7 @@
 #ifdef CONFIG_64BIT
 
 #include "hwsampler.h"
+#include "op_counter.h"
 
 #define DEFAULT_INTERVAL	4127518
 
@@ -35,16 +39,41 @@
 static unsigned long oprofile_sdbt_blocks = DEFAULT_SDBT_BLOCKS;
 static unsigned long oprofile_sdb_blocks = DEFAULT_SDB_BLOCKS;
 
-static int hwsampler_file;
+static int hwsampler_enabled;
 static int hwsampler_running;	/* start_mutex must be held to change */
+static int hwsampler_available;
 
 static struct oprofile_operations timer_ops;
 
+struct op_counter_config counter_config;
+
+enum __force_cpu_type {
+	reserved = 0,		/* do not force */
+	timer,
+};
+static int force_cpu_type;
+
+static int set_cpu_type(const char *str, struct kernel_param *kp)
+{
+	if (!strcmp(str, "timer")) {
+		force_cpu_type = timer;
+		printk(KERN_INFO "oprofile: forcing timer to be returned "
+		                 "as cpu type\n");
+	} else {
+		force_cpu_type = 0;
+	}
+
+	return 0;
+}
+module_param_call(cpu_type, set_cpu_type, NULL, NULL, 0);
+MODULE_PARM_DESC(cpu_type, "Force legacy basic mode sampling"
+		           "(report cpu_type \"timer\"");
+
 static int oprofile_hwsampler_start(void)
 {
 	int retval;
 
-	hwsampler_running = hwsampler_file;
+	hwsampler_running = hwsampler_enabled;
 
 	if (!hwsampler_running)
 		return timer_ops.start();
@@ -72,10 +101,16 @@
 	return;
 }
 
+/*
+ * File ops used for:
+ * /dev/oprofile/0/enabled
+ * /dev/oprofile/hwsampling/hwsampler  (cpu_type = timer)
+ */
+
 static ssize_t hwsampler_read(struct file *file, char __user *buf,
 		size_t count, loff_t *offset)
 {
-	return oprofilefs_ulong_to_user(hwsampler_file, buf, count, offset);
+	return oprofilefs_ulong_to_user(hwsampler_enabled, buf, count, offset);
 }
 
 static ssize_t hwsampler_write(struct file *file, char const __user *buf,
@@ -88,9 +123,12 @@
 		return -EINVAL;
 
 	retval = oprofilefs_ulong_from_user(&val, buf, count);
-	if (retval)
+	if (retval <= 0)
 		return retval;
 
+	if (val != 0 && val != 1)
+		return -EINVAL;
+
 	if (oprofile_started)
 		/*
 		 * save to do without locking as we set
@@ -99,7 +137,7 @@
 		 */
 		return -EBUSY;
 
-	hwsampler_file = val;
+	hwsampler_enabled = val;
 
 	return count;
 }
@@ -109,38 +147,311 @@
 	.write		= hwsampler_write,
 };
 
-static int oprofile_create_hwsampling_files(struct super_block *sb,
-						struct dentry *root)
+/*
+ * File ops used for:
+ * /dev/oprofile/0/count
+ * /dev/oprofile/hwsampling/hw_interval  (cpu_type = timer)
+ *
+ * Make sure that the value is within the hardware range.
+ */
+
+static ssize_t hw_interval_read(struct file *file, char __user *buf,
+				size_t count, loff_t *offset)
 {
-	struct dentry *hw_dir;
+	return oprofilefs_ulong_to_user(oprofile_hw_interval, buf,
+					count, offset);
+}
 
-	/* reinitialize default values */
-	hwsampler_file = 1;
+static ssize_t hw_interval_write(struct file *file, char const __user *buf,
+				 size_t count, loff_t *offset)
+{
+	unsigned long val;
+	int retval;
 
-	hw_dir = oprofilefs_mkdir(sb, root, "hwsampling");
-	if (!hw_dir)
+	if (*offset)
+		return -EINVAL;
+	retval = oprofilefs_ulong_from_user(&val, buf, count);
+	if (retval)
+		return retval;
+	if (val < oprofile_min_interval)
+		oprofile_hw_interval = oprofile_min_interval;
+	else if (val > oprofile_max_interval)
+		oprofile_hw_interval = oprofile_max_interval;
+	else
+		oprofile_hw_interval = val;
+
+	return count;
+}
+
+static const struct file_operations hw_interval_fops = {
+	.read		= hw_interval_read,
+	.write		= hw_interval_write,
+};
+
+/*
+ * File ops used for:
+ * /dev/oprofile/0/event
+ * Only a single event with number 0 is supported with this counter.
+ *
+ * /dev/oprofile/0/unit_mask
+ * This is a dummy file needed by the user space tools.
+ * No value other than 0 is accepted or returned.
+ */
+
+static ssize_t hwsampler_zero_read(struct file *file, char __user *buf,
+				    size_t count, loff_t *offset)
+{
+	return oprofilefs_ulong_to_user(0, buf, count, offset);
+}
+
+static ssize_t hwsampler_zero_write(struct file *file, char const __user *buf,
+				     size_t count, loff_t *offset)
+{
+	unsigned long val;
+	int retval;
+
+	if (*offset)
 		return -EINVAL;
 
-	oprofilefs_create_file(sb, hw_dir, "hwsampler", &hwsampler_fops);
-	oprofilefs_create_ulong(sb, hw_dir, "hw_interval",
-				&oprofile_hw_interval);
-	oprofilefs_create_ro_ulong(sb, hw_dir, "hw_min_interval",
-				&oprofile_min_interval);
-	oprofilefs_create_ro_ulong(sb, hw_dir, "hw_max_interval",
-				&oprofile_max_interval);
-	oprofilefs_create_ulong(sb, hw_dir, "hw_sdbt_blocks",
-				&oprofile_sdbt_blocks);
+	retval = oprofilefs_ulong_from_user(&val, buf, count);
+	if (retval)
+		return retval;
+	if (val != 0)
+		return -EINVAL;
+	return count;
+}
 
+static const struct file_operations zero_fops = {
+	.read		= hwsampler_zero_read,
+	.write		= hwsampler_zero_write,
+};
+
+/* /dev/oprofile/0/kernel file ops.  */
+
+static ssize_t hwsampler_kernel_read(struct file *file, char __user *buf,
+				     size_t count, loff_t *offset)
+{
+	return oprofilefs_ulong_to_user(counter_config.kernel,
+					buf, count, offset);
+}
+
+static ssize_t hwsampler_kernel_write(struct file *file, char const __user *buf,
+				      size_t count, loff_t *offset)
+{
+	unsigned long val;
+	int retval;
+
+	if (*offset)
+		return -EINVAL;
+
+	retval = oprofilefs_ulong_from_user(&val, buf, count);
+	if (retval)
+		return retval;
+
+	if (val != 0 && val != 1)
+		return -EINVAL;
+
+	counter_config.kernel = val;
+
+	return count;
+}
+
+static const struct file_operations kernel_fops = {
+	.read		= hwsampler_kernel_read,
+	.write		= hwsampler_kernel_write,
+};
+
+/* /dev/oprofile/0/user file ops. */
+
+static ssize_t hwsampler_user_read(struct file *file, char __user *buf,
+				   size_t count, loff_t *offset)
+{
+	return oprofilefs_ulong_to_user(counter_config.user,
+					buf, count, offset);
+}
+
+static ssize_t hwsampler_user_write(struct file *file, char const __user *buf,
+				    size_t count, loff_t *offset)
+{
+	unsigned long val;
+	int retval;
+
+	if (*offset)
+		return -EINVAL;
+
+	retval = oprofilefs_ulong_from_user(&val, buf, count);
+	if (retval)
+		return retval;
+
+	if (val != 0 && val != 1)
+		return -EINVAL;
+
+	counter_config.user = val;
+
+	return count;
+}
+
+static const struct file_operations user_fops = {
+	.read		= hwsampler_user_read,
+	.write		= hwsampler_user_write,
+};
+
+
+/*
+ * File ops used for: /dev/oprofile/timer/enabled
+ * The value always has to be the inverted value of hwsampler_enabled. So
+ * no separate variable is created. That way we do not need locking.
+ */
+
+static ssize_t timer_enabled_read(struct file *file, char __user *buf,
+				  size_t count, loff_t *offset)
+{
+	return oprofilefs_ulong_to_user(!hwsampler_enabled, buf, count, offset);
+}
+
+static ssize_t timer_enabled_write(struct file *file, char const __user *buf,
+				   size_t count, loff_t *offset)
+{
+	unsigned long val;
+	int retval;
+
+	if (*offset)
+		return -EINVAL;
+
+	retval = oprofilefs_ulong_from_user(&val, buf, count);
+	if (retval)
+		return retval;
+
+	if (val != 0 && val != 1)
+		return -EINVAL;
+
+	/* Timer cannot be disabled without having hardware sampling.  */
+	if (val == 0 && !hwsampler_available)
+		return -EINVAL;
+
+	if (oprofile_started)
+		/*
+		 * save to do without locking as we set
+		 * hwsampler_running in start() when start_mutex is
+		 * held
+		 */
+		return -EBUSY;
+
+	hwsampler_enabled = !val;
+
+	return count;
+}
+
+static const struct file_operations timer_enabled_fops = {
+	.read		= timer_enabled_read,
+	.write		= timer_enabled_write,
+};
+
+
+static int oprofile_create_hwsampling_files(struct super_block *sb,
+					    struct dentry *root)
+{
+	struct dentry *dir;
+
+	dir = oprofilefs_mkdir(sb, root, "timer");
+	if (!dir)
+		return -EINVAL;
+
+	oprofilefs_create_file(sb, dir, "enabled", &timer_enabled_fops);
+
+	if (!hwsampler_available)
+		return 0;
+
+	/* reinitialize default values */
+	hwsampler_enabled = 1;
+	counter_config.kernel = 1;
+	counter_config.user = 1;
+
+	if (!force_cpu_type) {
+		/*
+		 * Create the counter file system.  A single virtual
+		 * counter is created which can be used to
+		 * enable/disable hardware sampling dynamically from
+		 * user space.  The user space will configure a single
+		 * counter with a single event.  The value of 'event'
+		 * and 'unit_mask' are not evaluated by the kernel code
+		 * and can only be set to 0.
+		 */
+
+		dir = oprofilefs_mkdir(sb, root, "0");
+		if (!dir)
+			return -EINVAL;
+
+		oprofilefs_create_file(sb, dir, "enabled", &hwsampler_fops);
+		oprofilefs_create_file(sb, dir, "event", &zero_fops);
+		oprofilefs_create_file(sb, dir, "count", &hw_interval_fops);
+		oprofilefs_create_file(sb, dir, "unit_mask", &zero_fops);
+		oprofilefs_create_file(sb, dir, "kernel", &kernel_fops);
+		oprofilefs_create_file(sb, dir, "user", &user_fops);
+		oprofilefs_create_ulong(sb, dir, "hw_sdbt_blocks",
+					&oprofile_sdbt_blocks);
+
+	} else {
+		/*
+		 * Hardware sampling can be used but the cpu_type is
+		 * forced to timer in order to deal with legacy user
+		 * space tools.  The /dev/oprofile/hwsampling fs is
+		 * provided in that case.
+		 */
+		dir = oprofilefs_mkdir(sb, root, "hwsampling");
+		if (!dir)
+			return -EINVAL;
+
+		oprofilefs_create_file(sb, dir, "hwsampler",
+				       &hwsampler_fops);
+		oprofilefs_create_file(sb, dir, "hw_interval",
+				       &hw_interval_fops);
+		oprofilefs_create_ro_ulong(sb, dir, "hw_min_interval",
+					   &oprofile_min_interval);
+		oprofilefs_create_ro_ulong(sb, dir, "hw_max_interval",
+					   &oprofile_max_interval);
+		oprofilefs_create_ulong(sb, dir, "hw_sdbt_blocks",
+					&oprofile_sdbt_blocks);
+	}
 	return 0;
 }
 
 static int oprofile_hwsampler_init(struct oprofile_operations *ops)
 {
+	/*
+	 * Initialize the timer mode infrastructure as well in order
+	 * to be able to switch back dynamically.  oprofile_timer_init
+	 * is not supposed to fail.
+	 */
+	if (oprofile_timer_init(ops))
+		BUG();
+
+	memcpy(&timer_ops, ops, sizeof(timer_ops));
+	ops->create_files = oprofile_create_hwsampling_files;
+
+	/*
+	 * If the user space tools do not support newer cpu types,
+	 * the force_cpu_type module parameter
+	 * can be used to always return \"timer\" as cpu type.
+	 */
+	if (force_cpu_type != timer) {
+		struct cpuid id;
+
+		get_cpu_id (&id);
+
+		switch (id.machine) {
+		case 0x2097: case 0x2098: ops->cpu_type = "s390/z10"; break;
+		case 0x2817: case 0x2818: ops->cpu_type = "s390/z196"; break;
+		default: return -ENODEV;
+		}
+	}
+
 	if (hwsampler_setup())
 		return -ENODEV;
 
 	/*
-	 * create hwsampler files only if hwsampler_setup() succeeds.
+	 * Query the range for the sampling interval from the
+	 * hardware.
 	 */
 	oprofile_min_interval = hwsampler_query_min_interval();
 	if (oprofile_min_interval == 0)
@@ -155,23 +466,17 @@
 	if (oprofile_hw_interval > oprofile_max_interval)
 		oprofile_hw_interval = oprofile_max_interval;
 
-	if (oprofile_timer_init(ops))
-		return -ENODEV;
-
-	printk(KERN_INFO "oprofile: using hardware sampling\n");
-
-	memcpy(&timer_ops, ops, sizeof(timer_ops));
+	printk(KERN_INFO "oprofile: System z hardware sampling "
+	       "facility found.\n");
 
 	ops->start = oprofile_hwsampler_start;
 	ops->stop = oprofile_hwsampler_stop;
-	ops->create_files = oprofile_create_hwsampling_files;
 
 	return 0;
 }
 
 static void oprofile_hwsampler_exit(void)
 {
-	oprofile_timer_exit();
 	hwsampler_shutdown();
 }
 
@@ -182,7 +487,15 @@
 	ops->backtrace = s390_backtrace;
 
 #ifdef CONFIG_64BIT
-	return oprofile_hwsampler_init(ops);
+
+	/*
+	 * -ENODEV is not reported to the caller.  The module itself
+         * will use the timer mode sampling as fallback and this is
+         * always available.
+	 */
+	hwsampler_available = oprofile_hwsampler_init(ops) == 0;
+
+	return 0;
 #else
 	return -ENODEV;
 #endif
diff --git a/arch/s390/oprofile/op_counter.h b/arch/s390/oprofile/op_counter.h
new file mode 100644
index 0000000..1a8d3ca
--- /dev/null
+++ b/arch/s390/oprofile/op_counter.h
@@ -0,0 +1,23 @@
+/**
+ * arch/s390/oprofile/op_counter.h
+ *
+ *   Copyright (C) 2011 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *   Author(s): Andreas Krebbel (krebbel@linux.vnet.ibm.com)
+ *
+ * @remark Copyright 2011 OProfile authors
+ */
+
+#ifndef OP_COUNTER_H
+#define OP_COUNTER_H
+
+struct op_counter_config {
+	/* `enabled' maps to the hwsampler_file variable.  */
+	/* `count' maps to the oprofile_hw_interval variable.  */
+	/* `event' and `unit_mask' are unused. */
+	unsigned long kernel;
+	unsigned long user;
+};
+
+extern struct op_counter_config counter_config;
+
+#endif /* OP_COUNTER_H */
diff --git a/arch/score/Kconfig b/arch/score/Kconfig
index df169e8..8b0c946 100644
--- a/arch/score/Kconfig
+++ b/arch/score/Kconfig
@@ -4,6 +4,9 @@
        def_bool y
        select HAVE_GENERIC_HARDIRQS
        select GENERIC_IRQ_SHOW
+       select HAVE_MEMBLOCK
+       select HAVE_MEMBLOCK_NODE_MAP
+       select ARCH_DISCARD_MEMBLOCK
 
 choice
 	prompt "System type"
@@ -60,9 +63,6 @@
 config ARCH_FLATMEM_ENABLE
 	def_bool y
 
-config ARCH_POPULATES_NODE_MAP
-	def_bool y
-
 source "mm/Kconfig"
 
 config MEMORY_START
diff --git a/arch/score/kernel/setup.c b/arch/score/kernel/setup.c
index 6f898c0..b48459a 100644
--- a/arch/score/kernel/setup.c
+++ b/arch/score/kernel/setup.c
@@ -26,6 +26,7 @@
 #include <linux/bootmem.h>
 #include <linux/initrd.h>
 #include <linux/ioport.h>
+#include <linux/memblock.h>
 #include <linux/mm.h>
 #include <linux/seq_file.h>
 #include <linux/screen_info.h>
@@ -54,7 +55,8 @@
 	/* Initialize the boot-time allocator with low memory only. */
 	bootmap_size = init_bootmem_node(NODE_DATA(0), start_pfn,
 					 min_low_pfn, max_low_pfn);
-	add_active_range(0, min_low_pfn, max_low_pfn);
+	memblock_add_node(PFN_PHYS(min_low_pfn),
+			  PFN_PHYS(max_low_pfn - min_low_pfn), 0);
 
 	free_bootmem(PFN_PHYS(start_pfn),
 		     (max_low_pfn - start_pfn) << PAGE_SHIFT);
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 5629e20..47a2f1c 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -4,6 +4,7 @@
 	select CLKDEV_LOOKUP
 	select HAVE_IDE if HAS_IOPORT
 	select HAVE_MEMBLOCK
+	select HAVE_MEMBLOCK_NODE_MAP
 	select HAVE_OPROFILE
 	select HAVE_GENERIC_DMA_COHERENT
 	select HAVE_ARCH_TRACEHOOK
diff --git a/arch/sh/boards/board-sh7757lcr.c b/arch/sh/boards/board-sh7757lcr.c
index ec8c84c..895e337 100644
--- a/arch/sh/boards/board-sh7757lcr.c
+++ b/arch/sh/boards/board-sh7757lcr.c
@@ -50,9 +50,9 @@
 #define GBECONT		0xffc10100
 #define GBECONT_RMII1	BIT(17)
 #define GBECONT_RMII0	BIT(16)
-static void sh7757_eth_set_mdio_gate(unsigned long addr)
+static void sh7757_eth_set_mdio_gate(void *addr)
 {
-	if ((addr & 0x00000fff) < 0x0800)
+	if (((unsigned long)addr & 0x00000fff) < 0x0800)
 		writel(readl(GBECONT) | GBECONT_RMII0, GBECONT);
 	else
 		writel(readl(GBECONT) | GBECONT_RMII1, GBECONT);
@@ -116,9 +116,9 @@
 	},
 };
 
-static void sh7757_eth_giga_set_mdio_gate(unsigned long addr)
+static void sh7757_eth_giga_set_mdio_gate(void *addr)
 {
-	if ((addr & 0x00000fff) < 0x0800) {
+	if (((unsigned long)addr & 0x00000fff) < 0x0800) {
 		gpio_set_value(GPIO_PTT4, 1);
 		writel(readl(GBECONT) & ~GBECONT_RMII0, GBECONT);
 	} else {
@@ -210,8 +210,12 @@
 };
 
 static struct sh_mmcif_dma sh7757lcr_mmcif_dma = {
-	.chan_priv_tx	= SHDMA_SLAVE_MMCIF_TX,
-	.chan_priv_rx	= SHDMA_SLAVE_MMCIF_RX,
+	.chan_priv_tx	= {
+		.slave_id = SHDMA_SLAVE_MMCIF_TX,
+	},
+	.chan_priv_rx	= {
+		.slave_id = SHDMA_SLAVE_MMCIF_RX,
+	}
 };
 
 static struct sh_mmcif_plat_data sh_mmcif_plat = {
diff --git a/arch/sh/drivers/dma/dma-sysfs.c b/arch/sh/drivers/dma/dma-sysfs.c
index 83cc704..b1cb271 100644
--- a/arch/sh/drivers/dma/dma-sysfs.c
+++ b/arch/sh/drivers/dma/dma-sysfs.c
@@ -12,18 +12,19 @@
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/stat.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/platform_device.h>
 #include <linux/err.h>
 #include <linux/string.h>
 #include <asm/dma.h>
 
-static struct sysdev_class dma_sysclass = {
+static struct bus_type dma_subsys = {
 	.name = "dma",
+	.dev_name = "dma",
 };
 
-static ssize_t dma_show_devices(struct sys_device *dev,
-				struct sysdev_attribute *attr, char *buf)
+static ssize_t dma_show_devices(struct device *dev,
+				struct device_attribute *attr, char *buf)
 {
 	ssize_t len = 0;
 	int i;
@@ -43,29 +44,29 @@
 	return len;
 }
 
-static SYSDEV_ATTR(devices, S_IRUGO, dma_show_devices, NULL);
+static DEVICE_ATTR(devices, S_IRUGO, dma_show_devices, NULL);
 
-static int __init dma_sysclass_init(void)
+static int __init dma_subsys_init(void)
 {
 	int ret;
 
-	ret = sysdev_class_register(&dma_sysclass);
+	ret = subsys_system_register(&dma_subsys, NULL);
 	if (unlikely(ret))
 		return ret;
 
-	return sysfs_create_file(&dma_sysclass.kset.kobj, &attr_devices.attr);
+	return device_create_file(dma_subsys.dev_root, &dev_attr_devices.attr);
 }
-postcore_initcall(dma_sysclass_init);
+postcore_initcall(dma_subsys_init);
 
-static ssize_t dma_show_dev_id(struct sys_device *dev,
-				struct sysdev_attribute *attr, char *buf)
+static ssize_t dma_show_dev_id(struct device *dev,
+				struct device_attribute *attr, char *buf)
 {
 	struct dma_channel *channel = to_dma_channel(dev);
 	return sprintf(buf, "%s\n", channel->dev_id);
 }
 
-static ssize_t dma_store_dev_id(struct sys_device *dev,
-				struct sysdev_attribute *attr,
+static ssize_t dma_store_dev_id(struct device *dev,
+				struct device_attribute *attr,
 				const char *buf, size_t count)
 {
 	struct dma_channel *channel = to_dma_channel(dev);
@@ -73,10 +74,10 @@
 	return count;
 }
 
-static SYSDEV_ATTR(dev_id, S_IRUGO | S_IWUSR, dma_show_dev_id, dma_store_dev_id);
+static DEVICE_ATTR(dev_id, S_IRUGO | S_IWUSR, dma_show_dev_id, dma_store_dev_id);
 
-static ssize_t dma_store_config(struct sys_device *dev,
-				struct sysdev_attribute *attr,
+static ssize_t dma_store_config(struct device *dev,
+				struct device_attribute *attr,
 				const char *buf, size_t count)
 {
 	struct dma_channel *channel = to_dma_channel(dev);
@@ -88,17 +89,17 @@
 	return count;
 }
 
-static SYSDEV_ATTR(config, S_IWUSR, NULL, dma_store_config);
+static DEVICE_ATTR(config, S_IWUSR, NULL, dma_store_config);
 
-static ssize_t dma_show_mode(struct sys_device *dev,
-				struct sysdev_attribute *attr, char *buf)
+static ssize_t dma_show_mode(struct device *dev,
+				struct device_attribute *attr, char *buf)
 {
 	struct dma_channel *channel = to_dma_channel(dev);
 	return sprintf(buf, "0x%08x\n", channel->mode);
 }
 
-static ssize_t dma_store_mode(struct sys_device *dev,
-			      struct sysdev_attribute *attr,
+static ssize_t dma_store_mode(struct device *dev,
+			      struct device_attribute *attr,
 			      const char *buf, size_t count)
 {
 	struct dma_channel *channel = to_dma_channel(dev);
@@ -106,38 +107,38 @@
 	return count;
 }
 
-static SYSDEV_ATTR(mode, S_IRUGO | S_IWUSR, dma_show_mode, dma_store_mode);
+static DEVICE_ATTR(mode, S_IRUGO | S_IWUSR, dma_show_mode, dma_store_mode);
 
 #define dma_ro_attr(field, fmt)						\
-static ssize_t dma_show_##field(struct sys_device *dev, 		\
-				struct sysdev_attribute *attr, char *buf)\
+static ssize_t dma_show_##field(struct device *dev,		\
+				struct device_attribute *attr, char *buf)\
 {									\
 	struct dma_channel *channel = to_dma_channel(dev);		\
 	return sprintf(buf, fmt, channel->field);			\
 }									\
-static SYSDEV_ATTR(field, S_IRUGO, dma_show_##field, NULL);
+static DEVICE_ATTR(field, S_IRUGO, dma_show_##field, NULL);
 
 dma_ro_attr(count, "0x%08x\n");
 dma_ro_attr(flags, "0x%08lx\n");
 
 int dma_create_sysfs_files(struct dma_channel *chan, struct dma_info *info)
 {
-	struct sys_device *dev = &chan->dev;
+	struct device *dev = &chan->dev;
 	char name[16];
 	int ret;
 
 	dev->id  = chan->vchan;
-	dev->cls = &dma_sysclass;
+	dev->bus = &dma_subsys;
 
-	ret = sysdev_register(dev);
+	ret = device_register(dev);
 	if (ret)
 		return ret;
 
-	ret |= sysdev_create_file(dev, &attr_dev_id);
-	ret |= sysdev_create_file(dev, &attr_count);
-	ret |= sysdev_create_file(dev, &attr_mode);
-	ret |= sysdev_create_file(dev, &attr_flags);
-	ret |= sysdev_create_file(dev, &attr_config);
+	ret |= device_create_file(dev, &dev_attr_dev_id);
+	ret |= device_create_file(dev, &dev_attr_count);
+	ret |= device_create_file(dev, &dev_attr_mode);
+	ret |= device_create_file(dev, &dev_attr_flags);
+	ret |= device_create_file(dev, &dev_attr_config);
 
 	if (unlikely(ret)) {
 		dev_err(&info->pdev->dev, "Failed creating attrs\n");
@@ -150,17 +151,17 @@
 
 void dma_remove_sysfs_files(struct dma_channel *chan, struct dma_info *info)
 {
-	struct sys_device *dev = &chan->dev;
+	struct device *dev = &chan->dev;
 	char name[16];
 
-	sysdev_remove_file(dev, &attr_dev_id);
-	sysdev_remove_file(dev, &attr_count);
-	sysdev_remove_file(dev, &attr_mode);
-	sysdev_remove_file(dev, &attr_flags);
-	sysdev_remove_file(dev, &attr_config);
+	device_remove_file(dev, &dev_attr_dev_id);
+	device_remove_file(dev, &dev_attr_count);
+	device_remove_file(dev, &dev_attr_mode);
+	device_remove_file(dev, &dev_attr_flags);
+	device_remove_file(dev, &dev_attr_config);
 
 	snprintf(name, sizeof(name), "dma%d", chan->chan);
 	sysfs_remove_link(&info->pdev->dev.kobj, name);
 
-	sysdev_unregister(dev);
+	device_unregister(dev);
 }
diff --git a/arch/sh/include/asm/dma.h b/arch/sh/include/asm/dma.h
index 07373a0..6aa2080 100644
--- a/arch/sh/include/asm/dma.h
+++ b/arch/sh/include/asm/dma.h
@@ -14,7 +14,7 @@
 #include <linux/spinlock.h>
 #include <linux/wait.h>
 #include <linux/sched.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <cpu/dma.h>
 #include <asm-generic/dma.h>
 
@@ -91,7 +91,7 @@
 
 	wait_queue_head_t wait_queue;
 
-	struct sys_device dev;
+	struct device dev;
 	void *priv_data;
 };
 
diff --git a/arch/sh/include/asm/memblock.h b/arch/sh/include/asm/memblock.h
deleted file mode 100644
index e87063f..0000000
--- a/arch/sh/include/asm/memblock.h
+++ /dev/null
@@ -1,4 +0,0 @@
-#ifndef __ASM_SH_MEMBLOCK_H
-#define __ASM_SH_MEMBLOCK_H
-
-#endif /* __ASM_SH_MEMBLOCK_H */
diff --git a/arch/sh/include/asm/thread_info.h b/arch/sh/include/asm/thread_info.h
index ea2d508..20ee40a 100644
--- a/arch/sh/include/asm/thread_info.h
+++ b/arch/sh/include/asm/thread_info.h
@@ -122,7 +122,6 @@
 #define TIF_SYSCALL_TRACEPOINT	8	/* for ftrace syscall instrumentation */
 #define TIF_POLLING_NRFLAG	17	/* true if poll_idle() is polling TIF_NEED_RESCHED */
 #define TIF_MEMDIE		18	/* is terminating due to OOM killer */
-#define TIF_FREEZE		19	/* Freezing for suspend */
 
 #define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE)
 #define _TIF_SIGPENDING		(1 << TIF_SIGPENDING)
@@ -133,7 +132,6 @@
 #define _TIF_NOTIFY_RESUME	(1 << TIF_NOTIFY_RESUME)
 #define _TIF_SYSCALL_TRACEPOINT	(1 << TIF_SYSCALL_TRACEPOINT)
 #define _TIF_POLLING_NRFLAG	(1 << TIF_POLLING_NRFLAG)
-#define _TIF_FREEZE		(1 << TIF_FREEZE)
 
 /*
  * _TIF_ALLWORK_MASK and _TIF_WORK_MASK need to fit within 2 bytes, or we
diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c
index f090799..a8140f0 100644
--- a/arch/sh/kernel/cpu/sh4/sq.c
+++ b/arch/sh/kernel/cpu/sh4/sq.c
@@ -13,7 +13,7 @@
 #include <linux/init.h>
 #include <linux/cpu.h>
 #include <linux/bitmap.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/slab.h>
@@ -337,9 +337,9 @@
 	.default_attrs	= sq_sysfs_attrs,
 };
 
-static int __devinit sq_sysdev_add(struct sys_device *sysdev)
+static int __devinit sq_dev_add(struct device *dev)
 {
-	unsigned int cpu = sysdev->id;
+	unsigned int cpu = dev->id;
 	struct kobject *kobj;
 	int error;
 
@@ -348,25 +348,27 @@
 		return -ENOMEM;
 
 	kobj = sq_kobject[cpu];
-	error = kobject_init_and_add(kobj, &ktype_percpu_entry, &sysdev->kobj,
+	error = kobject_init_and_add(kobj, &ktype_percpu_entry, &dev->kobj,
 				     "%s", "sq");
 	if (!error)
 		kobject_uevent(kobj, KOBJ_ADD);
 	return error;
 }
 
-static int __devexit sq_sysdev_remove(struct sys_device *sysdev)
+static int __devexit sq_dev_remove(struct device *dev)
 {
-	unsigned int cpu = sysdev->id;
+	unsigned int cpu = dev->id;
 	struct kobject *kobj = sq_kobject[cpu];
 
 	kobject_put(kobj);
 	return 0;
 }
 
-static struct sysdev_driver sq_sysdev_driver = {
-	.add		= sq_sysdev_add,
-	.remove		= __devexit_p(sq_sysdev_remove),
+static struct subsys_interface sq_interface = {
+	.name		= "sq"
+	.subsys		= &cpu_subsys,
+	.add_dev	= sq_dev_add,
+	.remove_dev	= __devexit_p(sq_dev_remove),
 };
 
 static int __init sq_api_init(void)
@@ -386,7 +388,7 @@
 	if (unlikely(!sq_bitmap))
 		goto out;
 
-	ret = sysdev_driver_register(&cpu_sysdev_class, &sq_sysdev_driver);
+	ret = subsys_interface_register(&sq_interface);
 	if (unlikely(ret != 0))
 		goto out;
 
@@ -401,7 +403,7 @@
 
 static void __exit sq_api_exit(void)
 {
-	sysdev_driver_unregister(&cpu_sysdev_class, &sq_sysdev_driver);
+	subsys_interface_unregister(&sq_interface);
 	kfree(sq_bitmap);
 	kmem_cache_destroy(sq_cache);
 }
diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c
index db4ecd7..406508d 100644
--- a/arch/sh/kernel/idle.c
+++ b/arch/sh/kernel/idle.c
@@ -89,7 +89,8 @@
 
 	/* endless idle loop with no priority at all */
 	while (1) {
-		tick_nohz_stop_sched_tick(1);
+		tick_nohz_idle_enter();
+		rcu_idle_enter();
 
 		while (!need_resched()) {
 			check_pgt_cache();
@@ -111,7 +112,8 @@
 			start_critical_timings();
 		}
 
-		tick_nohz_restart_sched_tick();
+		rcu_idle_exit();
+		tick_nohz_idle_exit();
 		preempt_enable_no_resched();
 		schedule();
 		preempt_disable();
diff --git a/arch/sh/kernel/machine_kexec.c b/arch/sh/kernel/machine_kexec.c
index c5a33f0..9fea49f 100644
--- a/arch/sh/kernel/machine_kexec.c
+++ b/arch/sh/kernel/machine_kexec.c
@@ -157,9 +157,6 @@
 	unsigned long long crash_size, crash_base;
 	int ret;
 
-	/* this is necessary because of memblock_phys_mem_size() */
-	memblock_analyze();
-
 	ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
 			&crash_size, &crash_base);
 	if (ret == 0 && crash_size > 0) {
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index 1a0e946..7b57bf1 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -230,7 +230,8 @@
 	pmb_bolt_mapping((unsigned long)__va(start), start, end - start,
 			 PAGE_KERNEL);
 
-	add_active_range(nid, start_pfn, end_pfn);
+	memblock_set_node(PFN_PHYS(start_pfn),
+			  PFN_PHYS(end_pfn - start_pfn), nid);
 }
 
 void __init __weak plat_early_device_setup(void)
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig
index c3e61b3..cb8f992 100644
--- a/arch/sh/mm/Kconfig
+++ b/arch/sh/mm/Kconfig
@@ -143,9 +143,6 @@
 		       CPU_SUBTYPE_SH7785)
 	default "1"
 
-config ARCH_POPULATES_NODE_MAP
-	def_bool y
-
 config ARCH_SELECT_MEMORY_MODEL
 	def_bool y
 
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 939ca0f..82cc576 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -324,7 +324,6 @@
 	unsigned long vaddr, end;
 	int nid;
 
-	memblock_init();
 	sh_mv.mv_mem_init();
 
 	early_reserve_mem();
@@ -337,7 +336,7 @@
 		sh_mv.mv_mem_reserve();
 
 	memblock_enforce_memory_limit(memory_limit);
-	memblock_analyze();
+	memblock_allow_resize();
 
 	memblock_dump_all();
 
diff --git a/arch/sh/oprofile/common.c b/arch/sh/oprofile/common.c
index b4c2d2b..e4dd5d5 100644
--- a/arch/sh/oprofile/common.c
+++ b/arch/sh/oprofile/common.c
@@ -49,7 +49,7 @@
 	return oprofile_perf_init(ops);
 }
 
-void __exit oprofile_arch_exit(void)
+void oprofile_arch_exit(void)
 {
 	oprofile_perf_exit();
 	kfree(sh_pmu_op_name);
@@ -60,5 +60,5 @@
 	ops->backtrace = sh_backtrace;
 	return -ENODEV;
 }
-void __exit oprofile_arch_exit(void) {}
+void oprofile_arch_exit(void) {}
 #endif /* CONFIG_HW_PERF_EVENTS */
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index f92602e..70ae9d8 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -43,6 +43,7 @@
 	select HAVE_KPROBES
 	select HAVE_RCU_TABLE_FREE if SMP
 	select HAVE_MEMBLOCK
+	select HAVE_MEMBLOCK_NODE_MAP
 	select HAVE_SYSCALL_WRAPPERS
 	select HAVE_DYNAMIC_FTRACE
 	select HAVE_FTRACE_MCOUNT_RECORD
@@ -352,9 +353,6 @@
 	def_bool y
 	depends on NEED_MULTIPLE_NODES
 
-config ARCH_POPULATES_NODE_MAP
-	def_bool y if SPARC64
-
 config ARCH_SELECT_MEMORY_MODEL
 	def_bool y if SPARC64
 
diff --git a/arch/sparc/include/asm/memblock.h b/arch/sparc/include/asm/memblock.h
deleted file mode 100644
index c67b047..0000000
--- a/arch/sparc/include/asm/memblock.h
+++ /dev/null
@@ -1,8 +0,0 @@
-#ifndef _SPARC64_MEMBLOCK_H
-#define _SPARC64_MEMBLOCK_H
-
-#include <asm/oplib.h>
-
-#define MEMBLOCK_DBG(fmt...) prom_printf(fmt)
-
-#endif /* !(_SPARC64_MEMBLOCK_H) */
diff --git a/arch/sparc/include/asm/posix_types.h b/arch/sparc/include/asm/posix_types.h
index 98d6ebb..dbfc1a3 100644
--- a/arch/sparc/include/asm/posix_types.h
+++ b/arch/sparc/include/asm/posix_types.h
@@ -20,7 +20,6 @@
 typedef unsigned int           __kernel_gid_t;
 typedef unsigned long          __kernel_ino_t;
 typedef unsigned int           __kernel_mode_t;
-typedef unsigned short         __kernel_umode_t;
 typedef unsigned int           __kernel_nlink_t;
 typedef int                    __kernel_daddr_t;
 typedef long                   __kernel_off_t;
@@ -55,7 +54,6 @@
 typedef unsigned short         __kernel_gid_t;
 typedef unsigned long          __kernel_ino_t;
 typedef unsigned short         __kernel_mode_t;
-typedef unsigned short         __kernel_umode_t;
 typedef short                  __kernel_nlink_t;
 typedef long                   __kernel_daddr_t;
 typedef long                   __kernel_off_t;
diff --git a/arch/sparc/include/asm/socket.h b/arch/sparc/include/asm/socket.h
index 9d3fefc..8af1b64 100644
--- a/arch/sparc/include/asm/socket.h
+++ b/arch/sparc/include/asm/socket.h
@@ -58,6 +58,9 @@
 
 #define SO_RXQ_OVFL             0x0024
 
+#define SO_WIFI_STATUS		0x0025
+#define SCM_WIFI_STATUS		SO_WIFI_STATUS
+
 /* Security levels - as per NRL IPv6 - don't actually do anything */
 #define SO_SECURITY_AUTHENTICATION		0x5001
 #define SO_SECURITY_ENCRYPTION_TRANSPORT	0x5002
diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
index fa57532..5cc5888 100644
--- a/arch/sparc/include/asm/thread_info_32.h
+++ b/arch/sparc/include/asm/thread_info_32.h
@@ -133,7 +133,6 @@
 #define TIF_POLLING_NRFLAG	9	/* true if poll_idle() is polling
 					 * TIF_NEED_RESCHED */
 #define TIF_MEMDIE		10	/* is terminating due to OOM killer */
-#define TIF_FREEZE		11	/* is freezing for suspend */
 
 /* as above, but as bit values */
 #define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
@@ -147,7 +146,6 @@
 #define _TIF_DO_NOTIFY_RESUME_MASK	(_TIF_NOTIFY_RESUME | \
 					 _TIF_SIGPENDING | \
 					 _TIF_RESTORE_SIGMASK)
-#define _TIF_FREEZE		(1<<TIF_FREEZE)
 
 #endif /* __KERNEL__ */
 
diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
index 60d86be..01d057f 100644
--- a/arch/sparc/include/asm/thread_info_64.h
+++ b/arch/sparc/include/asm/thread_info_64.h
@@ -225,7 +225,6 @@
 /* flag bit 12 is available */
 #define TIF_MEMDIE		13	/* is terminating due to OOM killer */
 #define TIF_POLLING_NRFLAG	14
-#define TIF_FREEZE		15	/* is freezing for suspend */
 
 #define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
 #define _TIF_NOTIFY_RESUME	(1<<TIF_NOTIFY_RESUME)
@@ -237,7 +236,6 @@
 #define _TIF_SYSCALL_AUDIT	(1<<TIF_SYSCALL_AUDIT)
 #define _TIF_SYSCALL_TRACEPOINT	(1<<TIF_SYSCALL_TRACEPOINT)
 #define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG)
-#define _TIF_FREEZE		(1<<TIF_FREEZE)
 
 #define _TIF_USER_WORK_MASK	((0xff << TI_FLAG_WSAVED_SHIFT) | \
 				 _TIF_DO_NOTIFY_RESUME_MASK | \
diff --git a/arch/sparc/include/asm/types.h b/arch/sparc/include/asm/types.h
index 91e5a03..383d156 100644
--- a/arch/sparc/include/asm/types.h
+++ b/arch/sparc/include/asm/types.h
@@ -12,12 +12,6 @@
 
 #include <asm-generic/int-ll64.h>
 
-#ifndef __ASSEMBLY__
-
-typedef unsigned short umode_t;
-
-#endif /* __ASSEMBLY__ */
-
 #endif /* defined(__sparc__) */
 
 #endif /* defined(_SPARC_TYPES_H) */
diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
index 7429b47..381edcd 100644
--- a/arch/sparc/kernel/ds.c
+++ b/arch/sparc/kernel/ds.c
@@ -1181,13 +1181,11 @@
 
 	dp->rcv_buf_len = 4096;
 
-	dp->ds_states = kzalloc(sizeof(ds_states_template),
-				GFP_KERNEL);
+	dp->ds_states = kmemdup(ds_states_template,
+				sizeof(ds_states_template), GFP_KERNEL);
 	if (!dp->ds_states)
 		goto out_free_rcv_buf;
 
-	memcpy(dp->ds_states, ds_states_template,
-	       sizeof(ds_states_template));
 	dp->num_ds_states = ARRAY_SIZE(ds_states_template);
 
 	for (i = 0; i < dp->num_ds_states; i++)
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
index b272cda..af5755d 100644
--- a/arch/sparc/kernel/pci_sun4v.c
+++ b/arch/sparc/kernel/pci_sun4v.c
@@ -849,10 +849,10 @@
 	if (!irq)
 		return -ENOMEM;
 
-	if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
-		return -EINVAL;
 	if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
 		return -EINVAL;
+	if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
+		return -EINVAL;
 
 	return irq;
 }
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index 3739a06..39d8b05 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -95,12 +95,14 @@
 	set_thread_flag(TIF_POLLING_NRFLAG);
 
 	while(1) {
-		tick_nohz_stop_sched_tick(1);
+		tick_nohz_idle_enter();
+		rcu_idle_enter();
 
 		while (!need_resched() && !cpu_is_offline(cpu))
 			sparc64_yield(cpu);
 
-		tick_nohz_restart_sched_tick();
+		rcu_idle_exit();
+		tick_nohz_idle_exit();
 
 		preempt_enable_no_resched();
 
diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
index 4661480..741df91 100644
--- a/arch/sparc/kernel/prom_common.c
+++ b/arch/sparc/kernel/prom_common.c
@@ -58,12 +58,10 @@
 	void *new_val;
 	int err;
 
-	new_val = kmalloc(len, GFP_KERNEL);
+	new_val = kmemdup(val, len, GFP_KERNEL);
 	if (!new_val)
 		return -ENOMEM;
 
-	memcpy(new_val, val, len);
-
 	err = -ENODEV;
 
 	mutex_lock(&of_set_property_mutex);
diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c
index fe1e3fc..ffb883d 100644
--- a/arch/sparc/kernel/setup_32.c
+++ b/arch/sparc/kernel/setup_32.c
@@ -84,7 +84,7 @@
 
 	prom_printf("PROM SYNC COMMAND...\n");
 	show_free_areas(0);
-	if(current->pid != 0) {
+	if (!is_idle_task(current)) {
 		local_irq_enable();
 		sys_sync();
 		local_irq_disable();
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index 7560772..3b1bd7c 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -840,7 +840,7 @@
 	struct trap_per_cpu *tp = &trap_block[raw_smp_processor_id()];
 	struct mm_struct *mm = info;
 
-	/* It is not valid to test "currrent->active_mm == mm" here.
+	/* It is not valid to test "current->active_mm == mm" here.
 	 *
 	 * The value of "current" is not changed atomically with
 	 * switch_mm().  But that's OK, we just need to check the
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index 441521a..232df99 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -368,11 +368,11 @@
 	if (current->flags & PF_RANDOMIZE) {
 		unsigned long val = get_random_int();
 		if (test_thread_flag(TIF_32BIT))
-			rnd = (val % (1UL << (22UL-PAGE_SHIFT)));
+			rnd = (val % (1UL << (23UL-PAGE_SHIFT)));
 		else
-			rnd = (val % (1UL << (29UL-PAGE_SHIFT)));
+			rnd = (val % (1UL << (30UL-PAGE_SHIFT)));
 	}
-	return (rnd << PAGE_SHIFT) * 2;
+	return rnd << PAGE_SHIFT;
 }
 
 void arch_pick_mmap_layout(struct mm_struct *mm)
diff --git a/arch/sparc/kernel/sysfs.c b/arch/sparc/kernel/sysfs.c
index 7408201..654e8aa 100644
--- a/arch/sparc/kernel/sysfs.c
+++ b/arch/sparc/kernel/sysfs.c
@@ -3,7 +3,7 @@
  * Copyright (C) 2007 David S. Miller <davem@davemloft.net>
  */
 #include <linux/sched.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/cpu.h>
 #include <linux/smp.h>
 #include <linux/percpu.h>
@@ -16,13 +16,13 @@
 static DEFINE_PER_CPU(struct hv_mmu_statistics, mmu_stats) __attribute__((aligned(64)));
 
 #define SHOW_MMUSTAT_ULONG(NAME) \
-static ssize_t show_##NAME(struct sys_device *dev, \
-			struct sysdev_attribute *attr, char *buf) \
+static ssize_t show_##NAME(struct device *dev, \
+			struct device_attribute *attr, char *buf) \
 { \
 	struct hv_mmu_statistics *p = &per_cpu(mmu_stats, dev->id); \
 	return sprintf(buf, "%lu\n", p->NAME); \
 } \
-static SYSDEV_ATTR(NAME, 0444, show_##NAME, NULL)
+static DEVICE_ATTR(NAME, 0444, show_##NAME, NULL)
 
 SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_8k_tte);
 SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_8k_tte);
@@ -58,38 +58,38 @@
 SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_256mb_tte);
 
 static struct attribute *mmu_stat_attrs[] = {
-	&attr_immu_tsb_hits_ctx0_8k_tte.attr,
-	&attr_immu_tsb_ticks_ctx0_8k_tte.attr,
-	&attr_immu_tsb_hits_ctx0_64k_tte.attr,
-	&attr_immu_tsb_ticks_ctx0_64k_tte.attr,
-	&attr_immu_tsb_hits_ctx0_4mb_tte.attr,
-	&attr_immu_tsb_ticks_ctx0_4mb_tte.attr,
-	&attr_immu_tsb_hits_ctx0_256mb_tte.attr,
-	&attr_immu_tsb_ticks_ctx0_256mb_tte.attr,
-	&attr_immu_tsb_hits_ctxnon0_8k_tte.attr,
-	&attr_immu_tsb_ticks_ctxnon0_8k_tte.attr,
-	&attr_immu_tsb_hits_ctxnon0_64k_tte.attr,
-	&attr_immu_tsb_ticks_ctxnon0_64k_tte.attr,
-	&attr_immu_tsb_hits_ctxnon0_4mb_tte.attr,
-	&attr_immu_tsb_ticks_ctxnon0_4mb_tte.attr,
-	&attr_immu_tsb_hits_ctxnon0_256mb_tte.attr,
-	&attr_immu_tsb_ticks_ctxnon0_256mb_tte.attr,
-	&attr_dmmu_tsb_hits_ctx0_8k_tte.attr,
-	&attr_dmmu_tsb_ticks_ctx0_8k_tte.attr,
-	&attr_dmmu_tsb_hits_ctx0_64k_tte.attr,
-	&attr_dmmu_tsb_ticks_ctx0_64k_tte.attr,
-	&attr_dmmu_tsb_hits_ctx0_4mb_tte.attr,
-	&attr_dmmu_tsb_ticks_ctx0_4mb_tte.attr,
-	&attr_dmmu_tsb_hits_ctx0_256mb_tte.attr,
-	&attr_dmmu_tsb_ticks_ctx0_256mb_tte.attr,
-	&attr_dmmu_tsb_hits_ctxnon0_8k_tte.attr,
-	&attr_dmmu_tsb_ticks_ctxnon0_8k_tte.attr,
-	&attr_dmmu_tsb_hits_ctxnon0_64k_tte.attr,
-	&attr_dmmu_tsb_ticks_ctxnon0_64k_tte.attr,
-	&attr_dmmu_tsb_hits_ctxnon0_4mb_tte.attr,
-	&attr_dmmu_tsb_ticks_ctxnon0_4mb_tte.attr,
-	&attr_dmmu_tsb_hits_ctxnon0_256mb_tte.attr,
-	&attr_dmmu_tsb_ticks_ctxnon0_256mb_tte.attr,
+	&dev_attr_immu_tsb_hits_ctx0_8k_tte.attr,
+	&dev_attr_immu_tsb_ticks_ctx0_8k_tte.attr,
+	&dev_attr_immu_tsb_hits_ctx0_64k_tte.attr,
+	&dev_attr_immu_tsb_ticks_ctx0_64k_tte.attr,
+	&dev_attr_immu_tsb_hits_ctx0_4mb_tte.attr,
+	&dev_attr_immu_tsb_ticks_ctx0_4mb_tte.attr,
+	&dev_attr_immu_tsb_hits_ctx0_256mb_tte.attr,
+	&dev_attr_immu_tsb_ticks_ctx0_256mb_tte.attr,
+	&dev_attr_immu_tsb_hits_ctxnon0_8k_tte.attr,
+	&dev_attr_immu_tsb_ticks_ctxnon0_8k_tte.attr,
+	&dev_attr_immu_tsb_hits_ctxnon0_64k_tte.attr,
+	&dev_attr_immu_tsb_ticks_ctxnon0_64k_tte.attr,
+	&dev_attr_immu_tsb_hits_ctxnon0_4mb_tte.attr,
+	&dev_attr_immu_tsb_ticks_ctxnon0_4mb_tte.attr,
+	&dev_attr_immu_tsb_hits_ctxnon0_256mb_tte.attr,
+	&dev_attr_immu_tsb_ticks_ctxnon0_256mb_tte.attr,
+	&dev_attr_dmmu_tsb_hits_ctx0_8k_tte.attr,
+	&dev_attr_dmmu_tsb_ticks_ctx0_8k_tte.attr,
+	&dev_attr_dmmu_tsb_hits_ctx0_64k_tte.attr,
+	&dev_attr_dmmu_tsb_ticks_ctx0_64k_tte.attr,
+	&dev_attr_dmmu_tsb_hits_ctx0_4mb_tte.attr,
+	&dev_attr_dmmu_tsb_ticks_ctx0_4mb_tte.attr,
+	&dev_attr_dmmu_tsb_hits_ctx0_256mb_tte.attr,
+	&dev_attr_dmmu_tsb_ticks_ctx0_256mb_tte.attr,
+	&dev_attr_dmmu_tsb_hits_ctxnon0_8k_tte.attr,
+	&dev_attr_dmmu_tsb_ticks_ctxnon0_8k_tte.attr,
+	&dev_attr_dmmu_tsb_hits_ctxnon0_64k_tte.attr,
+	&dev_attr_dmmu_tsb_ticks_ctxnon0_64k_tte.attr,
+	&dev_attr_dmmu_tsb_hits_ctxnon0_4mb_tte.attr,
+	&dev_attr_dmmu_tsb_ticks_ctxnon0_4mb_tte.attr,
+	&dev_attr_dmmu_tsb_hits_ctxnon0_256mb_tte.attr,
+	&dev_attr_dmmu_tsb_ticks_ctxnon0_256mb_tte.attr,
 	NULL,
 };
 
@@ -139,15 +139,15 @@
 	return sun4v_mmustat_conf(ra, &orig_ra);
 }
 
-static ssize_t show_mmustat_enable(struct sys_device *s,
-				struct sysdev_attribute *attr, char *buf)
+static ssize_t show_mmustat_enable(struct device *s,
+				struct device_attribute *attr, char *buf)
 {
 	unsigned long val = run_on_cpu(s->id, read_mmustat_enable, 0);
 	return sprintf(buf, "%lx\n", val);
 }
 
-static ssize_t store_mmustat_enable(struct sys_device *s,
-			struct sysdev_attribute *attr, const char *buf,
+static ssize_t store_mmustat_enable(struct device *s,
+			struct device_attribute *attr, const char *buf,
 			size_t count)
 {
 	unsigned long val, err;
@@ -163,39 +163,39 @@
 	return count;
 }
 
-static SYSDEV_ATTR(mmustat_enable, 0644, show_mmustat_enable, store_mmustat_enable);
+static DEVICE_ATTR(mmustat_enable, 0644, show_mmustat_enable, store_mmustat_enable);
 
 static int mmu_stats_supported;
 
-static int register_mmu_stats(struct sys_device *s)
+static int register_mmu_stats(struct device *s)
 {
 	if (!mmu_stats_supported)
 		return 0;
-	sysdev_create_file(s, &attr_mmustat_enable);
+	device_create_file(s, &dev_attr_mmustat_enable);
 	return sysfs_create_group(&s->kobj, &mmu_stat_group);
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
-static void unregister_mmu_stats(struct sys_device *s)
+static void unregister_mmu_stats(struct device *s)
 {
 	if (!mmu_stats_supported)
 		return;
 	sysfs_remove_group(&s->kobj, &mmu_stat_group);
-	sysdev_remove_file(s, &attr_mmustat_enable);
+	device_remove_file(s, &dev_attr_mmustat_enable);
 }
 #endif
 
 #define SHOW_CPUDATA_ULONG_NAME(NAME, MEMBER) \
-static ssize_t show_##NAME(struct sys_device *dev, \
-		struct sysdev_attribute *attr, char *buf) \
+static ssize_t show_##NAME(struct device *dev, \
+		struct device_attribute *attr, char *buf) \
 { \
 	cpuinfo_sparc *c = &cpu_data(dev->id); \
 	return sprintf(buf, "%lu\n", c->MEMBER); \
 }
 
 #define SHOW_CPUDATA_UINT_NAME(NAME, MEMBER) \
-static ssize_t show_##NAME(struct sys_device *dev, \
-		struct sysdev_attribute *attr, char *buf) \
+static ssize_t show_##NAME(struct device *dev, \
+		struct device_attribute *attr, char *buf) \
 { \
 	cpuinfo_sparc *c = &cpu_data(dev->id); \
 	return sprintf(buf, "%u\n", c->MEMBER); \
@@ -209,14 +209,14 @@
 SHOW_CPUDATA_UINT_NAME(l2_cache_size, ecache_size);
 SHOW_CPUDATA_UINT_NAME(l2_cache_line_size, ecache_line_size);
 
-static struct sysdev_attribute cpu_core_attrs[] = {
-	_SYSDEV_ATTR(clock_tick,          0444, show_clock_tick, NULL),
-	_SYSDEV_ATTR(l1_dcache_size,      0444, show_l1_dcache_size, NULL),
-	_SYSDEV_ATTR(l1_dcache_line_size, 0444, show_l1_dcache_line_size, NULL),
-	_SYSDEV_ATTR(l1_icache_size,      0444, show_l1_icache_size, NULL),
-	_SYSDEV_ATTR(l1_icache_line_size, 0444, show_l1_icache_line_size, NULL),
-	_SYSDEV_ATTR(l2_cache_size,       0444, show_l2_cache_size, NULL),
-	_SYSDEV_ATTR(l2_cache_line_size,  0444, show_l2_cache_line_size, NULL),
+static struct device_attribute cpu_core_attrs[] = {
+	__ATTR(clock_tick,          0444, show_clock_tick, NULL),
+	__ATTR(l1_dcache_size,      0444, show_l1_dcache_size, NULL),
+	__ATTR(l1_dcache_line_size, 0444, show_l1_dcache_line_size, NULL),
+	__ATTR(l1_icache_size,      0444, show_l1_icache_size, NULL),
+	__ATTR(l1_icache_line_size, 0444, show_l1_icache_line_size, NULL),
+	__ATTR(l2_cache_size,       0444, show_l2_cache_size, NULL),
+	__ATTR(l2_cache_line_size,  0444, show_l2_cache_line_size, NULL),
 };
 
 static DEFINE_PER_CPU(struct cpu, cpu_devices);
@@ -224,11 +224,11 @@
 static void register_cpu_online(unsigned int cpu)
 {
 	struct cpu *c = &per_cpu(cpu_devices, cpu);
-	struct sys_device *s = &c->sysdev;
+	struct device *s = &c->dev;
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(cpu_core_attrs); i++)
-		sysdev_create_file(s, &cpu_core_attrs[i]);
+		device_create_file(s, &cpu_core_attrs[i]);
 
 	register_mmu_stats(s);
 }
@@ -237,12 +237,12 @@
 static void unregister_cpu_online(unsigned int cpu)
 {
 	struct cpu *c = &per_cpu(cpu_devices, cpu);
-	struct sys_device *s = &c->sysdev;
+	struct device *s = &c->dev;
 	int i;
 
 	unregister_mmu_stats(s);
 	for (i = 0; i < ARRAY_SIZE(cpu_core_attrs); i++)
-		sysdev_remove_file(s, &cpu_core_attrs[i]);
+		device_remove_file(s, &cpu_core_attrs[i]);
 }
 #endif
 
diff --git a/arch/sparc/mm/btfixup.c b/arch/sparc/mm/btfixup.c
index 5175ac2..8a7f817 100644
--- a/arch/sparc/mm/btfixup.c
+++ b/arch/sparc/mm/btfixup.c
@@ -302,8 +302,7 @@
 				case 'i':	/* INT */
 					if ((insn & 0xc1c00000) == 0x01000000) /* %HI */
 						set_addr(addr, q[1], fmangled, (insn & 0xffc00000) | (p[1] >> 10));
-					else if ((insn & 0x80002000) == 0x80002000 &&
-					         (insn & 0x01800000) != 0x01800000) /* %LO */
+					else if ((insn & 0x80002000) == 0x80002000) /* %LO */
 						set_addr(addr, q[1], fmangled, (insn & 0xffffe000) | (p[1] & 0x3ff));
 					else {
 						prom_printf(insn_i, p, addr, insn);
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 8e073d8..b3f5e7d 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -790,7 +790,7 @@
 	return -1;
 }
 
-u64 memblock_nid_range(u64 start, u64 end, int *nid)
+static u64 memblock_nid_range(u64 start, u64 end, int *nid)
 {
 	*nid = find_node(start);
 	start += PAGE_SIZE;
@@ -808,7 +808,7 @@
 	return start;
 }
 #else
-u64 memblock_nid_range(u64 start, u64 end, int *nid)
+static u64 memblock_nid_range(u64 start, u64 end, int *nid)
 {
 	*nid = 0;
 	return end;
@@ -816,7 +816,7 @@
 #endif
 
 /* This must be invoked after performing all of the necessary
- * add_active_range() calls for 'nid'.  We need to be able to get
+ * memblock_set_node() calls for 'nid'.  We need to be able to get
  * correct data from get_pfn_range_for_nid().
  */
 static void __init allocate_node_data(int nid)
@@ -987,14 +987,11 @@
 
 			this_end = memblock_nid_range(start, end, &nid);
 
-			numadbg("Adding active range nid[%d] "
+			numadbg("Setting memblock NUMA node nid[%d] "
 				"start[%lx] end[%lx]\n",
 				nid, start, this_end);
 
-			add_active_range(nid,
-					 start >> PAGE_SHIFT,
-					 this_end >> PAGE_SHIFT);
-
+			memblock_set_node(start, this_end - start, nid);
 			start = this_end;
 		}
 	}
@@ -1282,7 +1279,6 @@
 {
 	unsigned long top_of_ram = memblock_end_of_DRAM();
 	unsigned long total_ram = memblock_phys_mem_size();
-	struct memblock_region *reg;
 
 	numadbg("bootmem_init_nonnuma()\n");
 
@@ -1292,20 +1288,8 @@
 	       (top_of_ram - total_ram) >> 20);
 
 	init_node_masks_nonnuma();
-
-	for_each_memblock(memory, reg) {
-		unsigned long start_pfn, end_pfn;
-
-		if (!reg->size)
-			continue;
-
-		start_pfn = memblock_region_memory_base_pfn(reg);
-		end_pfn = memblock_region_memory_end_pfn(reg);
-		add_active_range(0, start_pfn, end_pfn);
-	}
-
+	memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0);
 	allocate_node_data(0);
-
 	node_set_online(0);
 }
 
@@ -1769,8 +1753,6 @@
 		sun4v_ktsb_init();
 	}
 
-	memblock_init();
-
 	/* Find available physical memory...
 	 *
 	 * Read it twice in order to work around a bug in openfirmware.
@@ -1796,7 +1778,7 @@
 
 	memblock_enforce_memory_limit(cmdline_memory_size);
 
-	memblock_analyze();
+	memblock_allow_resize();
 	memblock_dump_all();
 
 	set_bit(0, mmu_context_bmap);
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index 9c45d8b..4c1ac6e 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -85,7 +85,8 @@
 
 	/* endless idle loop with no priority at all */
 	while (1) {
-		tick_nohz_stop_sched_tick(1);
+		tick_nohz_idle_enter();
+		rcu_idle_enter();
 		while (!need_resched()) {
 			if (cpu_is_offline(cpu))
 				BUG();  /* no HOTPLUG_CPU */
@@ -105,7 +106,8 @@
 				local_irq_enable();
 			current_thread_info()->status |= TS_POLLING;
 		}
-		tick_nohz_restart_sched_tick();
+		rcu_idle_exit();
+		tick_nohz_idle_exit();
 		preempt_enable_no_resched();
 		schedule();
 		preempt_disable();
diff --git a/arch/tile/kernel/sysfs.c b/arch/tile/kernel/sysfs.c
index 6029082..f862b00 100644
--- a/arch/tile/kernel/sysfs.c
+++ b/arch/tile/kernel/sysfs.c
@@ -14,7 +14,7 @@
  * /sys entry support.
  */
 
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/cpu.h>
 #include <linux/slab.h>
 #include <linux/smp.h>
@@ -32,55 +32,55 @@
 	return n;
 }
 
-static ssize_t chip_width_show(struct sysdev_class *dev,
-			       struct sysdev_class_attribute *attr,
+static ssize_t chip_width_show(struct device *dev,
+			       struct device_attribute *attr,
 			       char *page)
 {
 	return sprintf(page, "%u\n", smp_width);
 }
-static SYSDEV_CLASS_ATTR(chip_width, 0444, chip_width_show, NULL);
+static DEVICE_ATTR(chip_width, 0444, chip_width_show, NULL);
 
-static ssize_t chip_height_show(struct sysdev_class *dev,
-				struct sysdev_class_attribute *attr,
+static ssize_t chip_height_show(struct device *dev,
+				struct device_attribute *attr,
 				char *page)
 {
 	return sprintf(page, "%u\n", smp_height);
 }
-static SYSDEV_CLASS_ATTR(chip_height, 0444, chip_height_show, NULL);
+static DEVICE_ATTR(chip_height, 0444, chip_height_show, NULL);
 
-static ssize_t chip_serial_show(struct sysdev_class *dev,
-				struct sysdev_class_attribute *attr,
+static ssize_t chip_serial_show(struct device *dev,
+				struct device_attribute *attr,
 				char *page)
 {
 	return get_hv_confstr(page, HV_CONFSTR_CHIP_SERIAL_NUM);
 }
-static SYSDEV_CLASS_ATTR(chip_serial, 0444, chip_serial_show, NULL);
+static DEVICE_ATTR(chip_serial, 0444, chip_serial_show, NULL);
 
-static ssize_t chip_revision_show(struct sysdev_class *dev,
-				  struct sysdev_class_attribute *attr,
+static ssize_t chip_revision_show(struct device *dev,
+				  struct device_attribute *attr,
 				  char *page)
 {
 	return get_hv_confstr(page, HV_CONFSTR_CHIP_REV);
 }
-static SYSDEV_CLASS_ATTR(chip_revision, 0444, chip_revision_show, NULL);
+static DEVICE_ATTR(chip_revision, 0444, chip_revision_show, NULL);
 
 
-static ssize_t type_show(struct sysdev_class *dev,
-			    struct sysdev_class_attribute *attr,
+static ssize_t type_show(struct device *dev,
+			    struct device_attribute *attr,
 			    char *page)
 {
 	return sprintf(page, "tilera\n");
 }
-static SYSDEV_CLASS_ATTR(type, 0444, type_show, NULL);
+static DEVICE_ATTR(type, 0444, type_show, NULL);
 
 #define HV_CONF_ATTR(name, conf)					\
-	static ssize_t name ## _show(struct sysdev_class *dev,		\
-				     struct sysdev_class_attribute *attr, \
+	static ssize_t name ## _show(struct device *dev,		\
+				     struct device_attribute *attr, \
 				     char *page)			\
 	{								\
 		return get_hv_confstr(page, conf);			\
 	}								\
-	static SYSDEV_CLASS_ATTR(name, 0444, name ## _show, NULL);
+	static DEVICE_ATTR(name, 0444, name ## _show, NULL);
 
 HV_CONF_ATTR(version,		HV_CONFSTR_HV_SW_VER)
 HV_CONF_ATTR(config_version,	HV_CONFSTR_HV_CONFIG_VER)
@@ -96,15 +96,15 @@
 HV_CONF_ATTR(switch_control,	HV_CONFSTR_SWITCH_CONTROL)
 
 static struct attribute *board_attrs[] = {
-	&attr_board_part.attr,
-	&attr_board_serial.attr,
-	&attr_board_revision.attr,
-	&attr_board_description.attr,
-	&attr_mezz_part.attr,
-	&attr_mezz_serial.attr,
-	&attr_mezz_revision.attr,
-	&attr_mezz_description.attr,
-	&attr_switch_control.attr,
+	&dev_attr_board_part.attr,
+	&dev_attr_board_serial.attr,
+	&dev_attr_board_revision.attr,
+	&dev_attr_board_description.attr,
+	&dev_attr_mezz_part.attr,
+	&dev_attr_mezz_serial.attr,
+	&dev_attr_mezz_revision.attr,
+	&dev_attr_mezz_description.attr,
+	&dev_attr_switch_control.attr,
 	NULL
 };
 
@@ -151,12 +151,11 @@
 
 static int __init create_sysfs_entries(void)
 {
-	struct sysdev_class *cls = &cpu_sysdev_class;
 	int err = 0;
 
 #define create_cpu_attr(name)						\
 	if (!err)							\
-		err = sysfs_create_file(&cls->kset.kobj, &attr_##name.attr);
+		err = device_create_file(cpu_subsys.dev_root, &dev_attr_##name);
 	create_cpu_attr(chip_width);
 	create_cpu_attr(chip_height);
 	create_cpu_attr(chip_serial);
@@ -164,7 +163,7 @@
 
 #define create_hv_attr(name)						\
 	if (!err)							\
-		err = sysfs_create_file(hypervisor_kobj, &attr_##name.attr);
+		err = sysfs_create_file(hypervisor_kobj, &dev_attr_##name);
 	create_hv_attr(type);
 	create_hv_attr(version);
 	create_hv_attr(config_version);
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c
index 25b7b90..c1eaaa1 100644
--- a/arch/tile/mm/fault.c
+++ b/arch/tile/mm/fault.c
@@ -54,7 +54,7 @@
 	if (unlikely(tsk->pid < 2)) {
 		panic("Signal %d (code %d) at %#lx sent to %s!",
 		      si_signo, si_code & 0xffff, address,
-		      tsk->pid ? "init" : "the idle task");
+		      is_idle_task(tsk) ? "the idle task" : "init");
 	}
 
 	info.si_signo = si_signo;
@@ -515,7 +515,7 @@
 
 	if (unlikely(tsk->pid < 2)) {
 		panic("Kernel page fault running %s!",
-		      tsk->pid ? "init" : "the idle task");
+		      is_idle_task(tsk) ? "the idle task" : "init");
 	}
 
 	/*
diff --git a/arch/um/include/asm/thread_info.h b/arch/um/include/asm/thread_info.h
index 5bd1bad..200c4ab 100644
--- a/arch/um/include/asm/thread_info.h
+++ b/arch/um/include/asm/thread_info.h
@@ -71,7 +71,6 @@
 #define TIF_MEMDIE		5	/* is terminating due to OOM killer */
 #define TIF_SYSCALL_AUDIT	6
 #define TIF_RESTORE_SIGMASK	7
-#define TIF_FREEZE		16	/* is freezing for suspend */
 
 #define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE)
 #define _TIF_SIGPENDING		(1 << TIF_SIGPENDING)
@@ -80,6 +79,5 @@
 #define _TIF_MEMDIE		(1 << TIF_MEMDIE)
 #define _TIF_SYSCALL_AUDIT	(1 << TIF_SYSCALL_AUDIT)
 #define _TIF_RESTORE_SIGMASK	(1 << TIF_RESTORE_SIGMASK)
-#define _TIF_FREEZE		(1 << TIF_FREEZE)
 
 #endif
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index c533835..69f2490 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -246,10 +246,12 @@
 		if (need_resched())
 			schedule();
 
-		tick_nohz_stop_sched_tick(1);
+		tick_nohz_idle_enter();
+		rcu_idle_enter();
 		nsecs = disable_timer();
 		idle_sleep(nsecs);
-		tick_nohz_restart_sched_tick();
+		rcu_idle_exit();
+		tick_nohz_idle_exit();
 	}
 }
 
diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c
index a08d9fa..82a6e22 100644
--- a/arch/um/kernel/time.c
+++ b/arch/um/kernel/time.c
@@ -75,8 +75,6 @@
 	.rating		= 300,
 	.read		= itimer_read,
 	.mask		= CLOCKSOURCE_MASK(64),
-	.mult		= 1000,
-	.shift		= 0,
 	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
 };
 
@@ -94,9 +92,9 @@
 		clockevent_delta2ns(60 * HZ, &itimer_clockevent);
 	itimer_clockevent.min_delta_ns =
 		clockevent_delta2ns(1, &itimer_clockevent);
-	err = clocksource_register(&itimer_clocksource);
+	err = clocksource_register_hz(&itimer_clocksource, USEC_PER_SEC);
 	if (err) {
-		printk(KERN_ERR "clocksource_register returned %d\n", err);
+		printk(KERN_ERR "clocksource_register_hz returned %d\n", err);
 		return;
 	}
 	clockevents_register_device(&itimer_clockevent);
diff --git a/arch/unicore32/include/asm/thread_info.h b/arch/unicore32/include/asm/thread_info.h
index c270e9e..89f7557 100644
--- a/arch/unicore32/include/asm/thread_info.h
+++ b/arch/unicore32/include/asm/thread_info.h
@@ -135,14 +135,12 @@
 #define TIF_NOTIFY_RESUME	2	/* callback before returning to user */
 #define TIF_SYSCALL_TRACE	8
 #define TIF_MEMDIE		18
-#define TIF_FREEZE		19
 #define TIF_RESTORE_SIGMASK	20
 
 #define _TIF_SIGPENDING		(1 << TIF_SIGPENDING)
 #define _TIF_NEED_RESCHED	(1 << TIF_NEED_RESCHED)
 #define _TIF_NOTIFY_RESUME	(1 << TIF_NOTIFY_RESUME)
 #define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE)
-#define _TIF_FREEZE		(1 << TIF_FREEZE)
 #define _TIF_RESTORE_SIGMASK	(1 << TIF_RESTORE_SIGMASK)
 
 /*
diff --git a/arch/unicore32/kernel/process.c b/arch/unicore32/kernel/process.c
index ba401df..52edc2b 100644
--- a/arch/unicore32/kernel/process.c
+++ b/arch/unicore32/kernel/process.c
@@ -55,7 +55,8 @@
 {
 	/* endless idle loop with no priority at all */
 	while (1) {
-		tick_nohz_stop_sched_tick(1);
+		tick_nohz_idle_enter();
+		rcu_idle_enter();
 		while (!need_resched()) {
 			local_irq_disable();
 			stop_critical_timings();
@@ -63,7 +64,8 @@
 			local_irq_enable();
 			start_critical_timings();
 		}
-		tick_nohz_restart_sched_tick();
+		rcu_idle_exit();
+		tick_nohz_idle_exit();
 		preempt_enable_no_resched();
 		schedule();
 		preempt_disable();
diff --git a/arch/unicore32/kernel/puv3-core.c b/arch/unicore32/kernel/puv3-core.c
index 1a505a7..254adee 100644
--- a/arch/unicore32/kernel/puv3-core.c
+++ b/arch/unicore32/kernel/puv3-core.c
@@ -13,7 +13,6 @@
 
 #include <linux/init.h>
 #include <linux/device.h>
-#include <linux/sysdev.h>
 #include <linux/amba/bus.h>
 #include <linux/platform_device.h>
 #include <linux/io.h>
diff --git a/arch/unicore32/kernel/puv3-nb0916.c b/arch/unicore32/kernel/puv3-nb0916.c
index e731c56..37b12a0 100644
--- a/arch/unicore32/kernel/puv3-nb0916.c
+++ b/arch/unicore32/kernel/puv3-nb0916.c
@@ -13,7 +13,6 @@
 
 #include <linux/init.h>
 #include <linux/device.h>
-#include <linux/sysdev.h>
 #include <linux/platform_device.h>
 #include <linux/mtd/physmap.h>
 #include <linux/io.h>
diff --git a/arch/unicore32/kernel/setup.c b/arch/unicore32/kernel/setup.c
index 471b6bc..673d7a8 100644
--- a/arch/unicore32/kernel/setup.c
+++ b/arch/unicore32/kernel/setup.c
@@ -37,6 +37,7 @@
 #include <asm/cacheflush.h>
 #include <asm/tlbflush.h>
 #include <asm/traps.h>
+#include <asm/memblock.h>
 
 #include "setup.h"
 
diff --git a/arch/unicore32/mm/init.c b/arch/unicore32/mm/init.c
index 3b379cd..de186bd 100644
--- a/arch/unicore32/mm/init.c
+++ b/arch/unicore32/mm/init.c
@@ -26,6 +26,7 @@
 #include <asm/setup.h>
 #include <asm/sizes.h>
 #include <asm/tlb.h>
+#include <asm/memblock.h>
 #include <mach/map.h>
 
 #include "mm.h"
@@ -245,7 +246,6 @@
 	sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]),
 		meminfo_cmp, NULL);
 
-	memblock_init();
 	for (i = 0; i < mi->nr_banks; i++)
 		memblock_add(mi->bank[i].start, mi->bank[i].size);
 
@@ -264,7 +264,7 @@
 
 	uc32_mm_memblock_reserve();
 
-	memblock_analyze();
+	memblock_allow_resize();
 	memblock_dump_all();
 }
 
diff --git a/arch/unicore32/mm/mmu.c b/arch/unicore32/mm/mmu.c
index 3e5c3e5..43c20b4 100644
--- a/arch/unicore32/mm/mmu.c
+++ b/arch/unicore32/mm/mmu.c
@@ -25,6 +25,7 @@
 #include <asm/setup.h>
 #include <asm/sizes.h>
 #include <asm/tlb.h>
+#include <asm/memblock.h>
 
 #include <mach/map.h>
 
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index efb4294..1d2a69d 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -26,6 +26,8 @@
 	select HAVE_IOREMAP_PROT
 	select HAVE_KPROBES
 	select HAVE_MEMBLOCK
+	select HAVE_MEMBLOCK_NODE_MAP
+	select ARCH_DISCARD_MEMBLOCK
 	select ARCH_WANT_OPTIONAL_GPIOLIB
 	select ARCH_WANT_FRAME_POINTERS
 	select HAVE_DMA_ATTRS
@@ -204,9 +206,6 @@
 	bool
 	default X86_64
 
-config ARCH_POPULATES_NODE_MAP
-	def_bool y
-
 config AUDIT_ARCH
 	bool
 	default X86_64
@@ -343,6 +342,7 @@
 
 	  If you enable this option then you'll be able to select support
 	  for the following (non-PC) 64 bit x86 platforms:
+		Numascale NumaChip
 		ScaleMP vSMP
 		SGI Ultraviolet
 
@@ -351,6 +351,18 @@
 endif
 # This is an alphabetically sorted list of 64 bit extended platforms
 # Please maintain the alphabetic order if and when there are additions
+config X86_NUMACHIP
+	bool "Numascale NumaChip"
+	depends on X86_64
+	depends on X86_EXTENDED_PLATFORM
+	depends on NUMA
+	depends on SMP
+	depends on X86_X2APIC
+	depends on !EDAC_AMD64
+	---help---
+	  Adds support for Numascale NumaChip large-SMP systems. Needed to
+	  enable more than ~168 cores.
+	  If you don't have one of these, you should say N here.
 
 config X86_VSMP
 	bool "ScaleMP vSMP"
@@ -1730,7 +1742,7 @@
 
 config X86_APM_BOOT
 	def_bool y
-	depends on APM || APM_MODULE
+	depends on APM
 
 menuconfig APM
 	tristate "APM (Advanced Power Management) BIOS support"
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index a6253ec..3e27456 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -134,7 +134,7 @@
 	CFI_REL_OFFSET rsp,0
 	pushfq_cfi
 	/*CFI_REL_OFFSET rflags,0*/
-	movl	8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
+	movl	TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
 	CFI_REGISTER rip,r10
 	pushq_cfi $__USER32_CS
 	/*CFI_REL_OFFSET cs,0*/
@@ -150,9 +150,8 @@
  	.section __ex_table,"a"
  	.quad 1b,ia32_badarg
  	.previous	
-	GET_THREAD_INFO(%r10)
-	orl    $TS_COMPAT,TI_status(%r10)
-	testl  $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
+	orl     $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+	testl   $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
 	CFI_REMEMBER_STATE
 	jnz  sysenter_tracesys
 	cmpq	$(IA32_NR_syscalls-1),%rax
@@ -162,13 +161,12 @@
 sysenter_dispatch:
 	call	*ia32_sys_call_table(,%rax,8)
 	movq	%rax,RAX-ARGOFFSET(%rsp)
-	GET_THREAD_INFO(%r10)
 	DISABLE_INTERRUPTS(CLBR_NONE)
 	TRACE_IRQS_OFF
-	testl	$_TIF_ALLWORK_MASK,TI_flags(%r10)
+	testl	$_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
 	jnz	sysexit_audit
 sysexit_from_sys_call:
-	andl    $~TS_COMPAT,TI_status(%r10)
+	andl    $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
 	/* clear IF, that popfq doesn't enable interrupts early */
 	andl  $~0x200,EFLAGS-R11(%rsp) 
 	movl	RIP-R11(%rsp),%edx		/* User %eip */
@@ -205,7 +203,7 @@
 	.endm
 
 	.macro auditsys_exit exit
-	testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
+	testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
 	jnz ia32_ret_from_sys_call
 	TRACE_IRQS_ON
 	sti
@@ -215,12 +213,11 @@
 	movzbl %al,%edi		/* zero-extend that into %edi */
 	inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
 	call audit_syscall_exit
-	GET_THREAD_INFO(%r10)
 	movl RAX-ARGOFFSET(%rsp),%eax	/* reload syscall return value */
 	movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
 	cli
 	TRACE_IRQS_OFF
-	testl %edi,TI_flags(%r10)
+	testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
 	jz \exit
 	CLEAR_RREGS -ARGOFFSET
 	jmp int_with_check
@@ -238,7 +235,7 @@
 
 sysenter_tracesys:
 #ifdef CONFIG_AUDITSYSCALL
-	testl	$(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
+	testl	$(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
 	jz	sysenter_auditsys
 #endif
 	SAVE_REST
@@ -309,9 +306,8 @@
 	.section __ex_table,"a"
 	.quad 1b,ia32_badarg
 	.previous	
-	GET_THREAD_INFO(%r10)
-	orl   $TS_COMPAT,TI_status(%r10)
-	testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
+	orl     $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+	testl   $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
 	CFI_REMEMBER_STATE
 	jnz   cstar_tracesys
 	cmpq $IA32_NR_syscalls-1,%rax
@@ -321,13 +317,12 @@
 cstar_dispatch:
 	call *ia32_sys_call_table(,%rax,8)
 	movq %rax,RAX-ARGOFFSET(%rsp)
-	GET_THREAD_INFO(%r10)
 	DISABLE_INTERRUPTS(CLBR_NONE)
 	TRACE_IRQS_OFF
-	testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
+	testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
 	jnz sysretl_audit
 sysretl_from_sys_call:
-	andl $~TS_COMPAT,TI_status(%r10)
+	andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
 	RESTORE_ARGS 0,-ARG_SKIP,0,0,0
 	movl RIP-ARGOFFSET(%rsp),%ecx
 	CFI_REGISTER rip,rcx
@@ -355,7 +350,7 @@
 
 cstar_tracesys:
 #ifdef CONFIG_AUDITSYSCALL
-	testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
+	testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
 	jz cstar_auditsys
 #endif
 	xchgl %r9d,%ebp
@@ -420,9 +415,8 @@
 	/* note the registers are not zero extended to the sf.
 	   this could be a problem. */
 	SAVE_ARGS 0,1,0
-	GET_THREAD_INFO(%r10)
-	orl   $TS_COMPAT,TI_status(%r10)
-	testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
+	orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+	testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
 	jnz ia32_tracesys
 	cmpq $(IA32_NR_syscalls-1),%rax
 	ja ia32_badsys
@@ -459,8 +453,8 @@
 	CFI_ENDPROC
 	
 	.macro PTREGSCALL label, func, arg
-	.globl \label
-\label:
+	ALIGN
+GLOBAL(\label)
 	leaq \func(%rip),%rax
 	leaq -ARGOFFSET+8(%rsp),\arg	/* 8 for return address */
 	jmp  ia32_ptregs_common	
@@ -477,7 +471,8 @@
 	PTREGSCALL stub32_vfork, sys_vfork, %rdi
 	PTREGSCALL stub32_iopl, sys_iopl, %rsi
 
-ENTRY(ia32_ptregs_common)
+	ALIGN
+ia32_ptregs_common:
 	popq %r11
 	CFI_ENDPROC
 	CFI_STARTPROC32	simple
diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
index 091508b..952bd01 100644
--- a/arch/x86/include/asm/alternative-asm.h
+++ b/arch/x86/include/asm/alternative-asm.h
@@ -4,10 +4,10 @@
 
 #ifdef CONFIG_SMP
 	.macro LOCK_PREFIX
-1:	lock
+672:	lock
 	.section .smp_locks,"a"
 	.balign 4
-	.long 1b - .
+	.long 672b - .
 	.previous
 	.endm
 #else
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 1a6c09a..3ab9bdd 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -176,6 +176,7 @@
 }
 
 extern int x2apic_phys;
+extern int x2apic_preenabled;
 extern void check_x2apic(void);
 extern void enable_x2apic(void);
 extern void x2apic_icr_write(u32 low, u32 id);
@@ -198,6 +199,9 @@
 	x2apic_phys = 1;
 }
 #else
+static inline void disable_x2apic(void)
+{
+}
 static inline void check_x2apic(void)
 {
 }
@@ -212,6 +216,7 @@
 {
 }
 
+#define	nox2apic	0
 #define	x2apic_preenabled 0
 #define	x2apic_supported()	0
 #endif
@@ -410,6 +415,7 @@
 #endif
 
 #ifdef CONFIG_X86_LOCAL_APIC
+
 static inline u32 apic_read(u32 reg)
 {
 	return apic->read(reg);
diff --git a/arch/x86/include/asm/apic_flat_64.h b/arch/x86/include/asm/apic_flat_64.h
new file mode 100644
index 0000000..a2d3127
--- /dev/null
+++ b/arch/x86/include/asm/apic_flat_64.h
@@ -0,0 +1,7 @@
+#ifndef _ASM_X86_APIC_FLAT_64_H
+#define _ASM_X86_APIC_FLAT_64_H
+
+extern void flat_init_apic_ldr(void);
+
+#endif
+
diff --git a/arch/x86/include/asm/apicdef.h b/arch/x86/include/asm/apicdef.h
index 3925d80..134bba0 100644
--- a/arch/x86/include/asm/apicdef.h
+++ b/arch/x86/include/asm/apicdef.h
@@ -144,6 +144,7 @@
 
 #define APIC_BASE (fix_to_virt(FIX_APIC_BASE))
 #define APIC_BASE_MSR	0x800
+#define XAPIC_ENABLE	(1UL << 11)
 #define X2APIC_ENABLE	(1UL << 10)
 
 #ifdef CONFIG_X86_32
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index 1775d6e..b97596e 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -380,6 +380,8 @@
 	return word;
 }
 
+#undef ADDR
+
 #ifdef __KERNEL__
 /**
  * ffs - find first set bit in word
@@ -395,10 +397,25 @@
 static inline int ffs(int x)
 {
 	int r;
-#ifdef CONFIG_X86_CMOV
+
+#ifdef CONFIG_X86_64
+	/*
+	 * AMD64 says BSFL won't clobber the dest reg if x==0; Intel64 says the
+	 * dest reg is undefined if x==0, but their CPU architect says its
+	 * value is written to set it to the same as before, except that the
+	 * top 32 bits will be cleared.
+	 *
+	 * We cannot do this on 32 bits because at the very least some
+	 * 486 CPUs did not behave this way.
+	 */
+	long tmp = -1;
+	asm("bsfl %1,%0"
+	    : "=r" (r)
+	    : "rm" (x), "0" (tmp));
+#elif defined(CONFIG_X86_CMOV)
 	asm("bsfl %1,%0\n\t"
 	    "cmovzl %2,%0"
-	    : "=r" (r) : "rm" (x), "r" (-1));
+	    : "=&r" (r) : "rm" (x), "r" (-1));
 #else
 	asm("bsfl %1,%0\n\t"
 	    "jnz 1f\n\t"
@@ -422,7 +439,22 @@
 static inline int fls(int x)
 {
 	int r;
-#ifdef CONFIG_X86_CMOV
+
+#ifdef CONFIG_X86_64
+	/*
+	 * AMD64 says BSRL won't clobber the dest reg if x==0; Intel64 says the
+	 * dest reg is undefined if x==0, but their CPU architect says its
+	 * value is written to set it to the same as before, except that the
+	 * top 32 bits will be cleared.
+	 *
+	 * We cannot do this on 32 bits because at the very least some
+	 * 486 CPUs did not behave this way.
+	 */
+	long tmp = -1;
+	asm("bsrl %1,%0"
+	    : "=r" (r)
+	    : "rm" (x), "0" (tmp));
+#elif defined(CONFIG_X86_CMOV)
 	asm("bsrl %1,%0\n\t"
 	    "cmovzl %2,%0"
 	    : "=&r" (r) : "rm" (x), "rm" (-1));
@@ -434,11 +466,35 @@
 #endif
 	return r + 1;
 }
-#endif /* __KERNEL__ */
 
-#undef ADDR
-
-#ifdef __KERNEL__
+/**
+ * fls64 - find last set bit in a 64-bit word
+ * @x: the word to search
+ *
+ * This is defined in a similar way as the libc and compiler builtin
+ * ffsll, but returns the position of the most significant set bit.
+ *
+ * fls64(value) returns 0 if value is 0 or the position of the last
+ * set bit if value is nonzero. The last (most significant) bit is
+ * at position 64.
+ */
+#ifdef CONFIG_X86_64
+static __always_inline int fls64(__u64 x)
+{
+	long bitpos = -1;
+	/*
+	 * AMD64 says BSRQ won't clobber the dest reg if x==0; Intel64 says the
+	 * dest reg is undefined if x==0, but their CPU architect says its
+	 * value is written to set it to the same as before.
+	 */
+	asm("bsrq %1,%0"
+	    : "+r" (bitpos)
+	    : "rm" (x));
+	return bitpos + 1;
+}
+#else
+#include <asm-generic/bitops/fls64.h>
+#endif
 
 #include <asm-generic/bitops/find.h>
 
@@ -450,12 +506,6 @@
 
 #include <asm-generic/bitops/const_hweight.h>
 
-#endif /* __KERNEL__ */
-
-#include <asm-generic/bitops/fls64.h>
-
-#ifdef __KERNEL__
-
 #include <asm-generic/bitops/le.h>
 
 #include <asm-generic/bitops/ext2-atomic-setbit.h>
diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
index 5d3acdf..0c9fa27 100644
--- a/arch/x86/include/asm/cmpxchg.h
+++ b/arch/x86/include/asm/cmpxchg.h
@@ -14,6 +14,8 @@
 	__compiletime_error("Bad argument size for cmpxchg");
 extern void __xadd_wrong_size(void)
 	__compiletime_error("Bad argument size for xadd");
+extern void __add_wrong_size(void)
+	__compiletime_error("Bad argument size for add");
 
 /*
  * Constants for operation sizes. On 32-bit, the 64-bit size it set to
@@ -31,60 +33,47 @@
 #define	__X86_CASE_Q	-1		/* sizeof will never return -1 */
 #endif
 
+/* 
+ * An exchange-type operation, which takes a value and a pointer, and
+ * returns a the old value.
+ */
+#define __xchg_op(ptr, arg, op, lock)					\
+	({								\
+	        __typeof__ (*(ptr)) __ret = (arg);			\
+		switch (sizeof(*(ptr))) {				\
+		case __X86_CASE_B:					\
+			asm volatile (lock #op "b %b0, %1\n"		\
+				      : "+r" (__ret), "+m" (*(ptr))	\
+				      : : "memory", "cc");		\
+			break;						\
+		case __X86_CASE_W:					\
+			asm volatile (lock #op "w %w0, %1\n"		\
+				      : "+r" (__ret), "+m" (*(ptr))	\
+				      : : "memory", "cc");		\
+			break;						\
+		case __X86_CASE_L:					\
+			asm volatile (lock #op "l %0, %1\n"		\
+				      : "+r" (__ret), "+m" (*(ptr))	\
+				      : : "memory", "cc");		\
+			break;						\
+		case __X86_CASE_Q:					\
+			asm volatile (lock #op "q %q0, %1\n"		\
+				      : "+r" (__ret), "+m" (*(ptr))	\
+				      : : "memory", "cc");		\
+			break;						\
+		default:						\
+			__ ## op ## _wrong_size();			\
+		}							\
+		__ret;							\
+	})
+
 /*
  * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
  * Since this is generally used to protect other memory information, we
  * use "asm volatile" and "memory" clobbers to prevent gcc from moving
  * information around.
  */
-#define __xchg(x, ptr, size)						\
-({									\
-	__typeof(*(ptr)) __x = (x);					\
-	switch (size) {							\
-	case __X86_CASE_B:						\
-	{								\
-		volatile u8 *__ptr = (volatile u8 *)(ptr);		\
-		asm volatile("xchgb %0,%1"				\
-			     : "=q" (__x), "+m" (*__ptr)		\
-			     : "0" (__x)				\
-			     : "memory");				\
-		break;							\
-	}								\
-	case __X86_CASE_W:						\
-	{								\
-		volatile u16 *__ptr = (volatile u16 *)(ptr);		\
-		asm volatile("xchgw %0,%1"				\
-			     : "=r" (__x), "+m" (*__ptr)		\
-			     : "0" (__x)				\
-			     : "memory");				\
-		break;							\
-	}								\
-	case __X86_CASE_L:						\
-	{								\
-		volatile u32 *__ptr = (volatile u32 *)(ptr);		\
-		asm volatile("xchgl %0,%1"				\
-			     : "=r" (__x), "+m" (*__ptr)		\
-			     : "0" (__x)				\
-			     : "memory");				\
-		break;							\
-	}								\
-	case __X86_CASE_Q:						\
-	{								\
-		volatile u64 *__ptr = (volatile u64 *)(ptr);		\
-		asm volatile("xchgq %0,%1"				\
-			     : "=r" (__x), "+m" (*__ptr)		\
-			     : "0" (__x)				\
-			     : "memory");				\
-		break;							\
-	}								\
-	default:							\
-		__xchg_wrong_size();					\
-	}								\
-	__x;								\
-})
-
-#define xchg(ptr, v)							\
-	__xchg((v), (ptr), sizeof(*ptr))
+#define xchg(ptr, v)	__xchg_op((ptr), (v), xchg, "")
 
 /*
  * Atomic compare and exchange.  Compare OLD with MEM, if identical,
@@ -165,36 +154,6 @@
 	__cmpxchg_local((ptr), (old), (new), sizeof(*ptr))
 #endif
 
-#define __xadd(ptr, inc, lock)						\
-	({								\
-	        __typeof__ (*(ptr)) __ret = (inc);			\
-		switch (sizeof(*(ptr))) {				\
-		case __X86_CASE_B:					\
-			asm volatile (lock "xaddb %b0, %1\n"		\
-				      : "+r" (__ret), "+m" (*(ptr))	\
-				      : : "memory", "cc");		\
-			break;						\
-		case __X86_CASE_W:					\
-			asm volatile (lock "xaddw %w0, %1\n"		\
-				      : "+r" (__ret), "+m" (*(ptr))	\
-				      : : "memory", "cc");		\
-			break;						\
-		case __X86_CASE_L:					\
-			asm volatile (lock "xaddl %0, %1\n"		\
-				      : "+r" (__ret), "+m" (*(ptr))	\
-				      : : "memory", "cc");		\
-			break;						\
-		case __X86_CASE_Q:					\
-			asm volatile (lock "xaddq %q0, %1\n"		\
-				      : "+r" (__ret), "+m" (*(ptr))	\
-				      : : "memory", "cc");		\
-			break;						\
-		default:						\
-			__xadd_wrong_size();				\
-		}							\
-		__ret;							\
-	})
-
 /*
  * xadd() adds "inc" to "*ptr" and atomically returns the previous
  * value of "*ptr".
@@ -203,8 +162,72 @@
  * xadd_sync() is always locked
  * xadd_local() is never locked
  */
+#define __xadd(ptr, inc, lock)	__xchg_op((ptr), (inc), xadd, lock)
 #define xadd(ptr, inc)		__xadd((ptr), (inc), LOCK_PREFIX)
 #define xadd_sync(ptr, inc)	__xadd((ptr), (inc), "lock; ")
 #define xadd_local(ptr, inc)	__xadd((ptr), (inc), "")
 
+#define __add(ptr, inc, lock)						\
+	({								\
+	        __typeof__ (*(ptr)) __ret = (inc);			\
+		switch (sizeof(*(ptr))) {				\
+		case __X86_CASE_B:					\
+			asm volatile (lock "addb %b1, %0\n"		\
+				      : "+m" (*(ptr)) : "ri" (inc)	\
+				      : "memory", "cc");		\
+			break;						\
+		case __X86_CASE_W:					\
+			asm volatile (lock "addw %w1, %0\n"		\
+				      : "+m" (*(ptr)) : "ri" (inc)	\
+				      : "memory", "cc");		\
+			break;						\
+		case __X86_CASE_L:					\
+			asm volatile (lock "addl %1, %0\n"		\
+				      : "+m" (*(ptr)) : "ri" (inc)	\
+				      : "memory", "cc");		\
+			break;						\
+		case __X86_CASE_Q:					\
+			asm volatile (lock "addq %1, %0\n"		\
+				      : "+m" (*(ptr)) : "ri" (inc)	\
+				      : "memory", "cc");		\
+			break;						\
+		default:						\
+			__add_wrong_size();				\
+		}							\
+		__ret;							\
+	})
+
+/*
+ * add_*() adds "inc" to "*ptr"
+ *
+ * __add() takes a lock prefix
+ * add_smp() is locked when multiple CPUs are online
+ * add_sync() is always locked
+ */
+#define add_smp(ptr, inc)	__add((ptr), (inc), LOCK_PREFIX)
+#define add_sync(ptr, inc)	__add((ptr), (inc), "lock; ")
+
+#define __cmpxchg_double(pfx, p1, p2, o1, o2, n1, n2)			\
+({									\
+	bool __ret;							\
+	__typeof__(*(p1)) __old1 = (o1), __new1 = (n1);			\
+	__typeof__(*(p2)) __old2 = (o2), __new2 = (n2);			\
+	BUILD_BUG_ON(sizeof(*(p1)) != sizeof(long));			\
+	BUILD_BUG_ON(sizeof(*(p2)) != sizeof(long));			\
+	VM_BUG_ON((unsigned long)(p1) % (2 * sizeof(long)));		\
+	VM_BUG_ON((unsigned long)((p1) + 1) != (unsigned long)(p2));	\
+	asm volatile(pfx "cmpxchg%c4b %2; sete %0"			\
+		     : "=a" (__ret), "+d" (__old2),			\
+		       "+m" (*(p1)), "+m" (*(p2))			\
+		     : "i" (2 * sizeof(long)), "a" (__old1),		\
+		       "b" (__new1), "c" (__new2));			\
+	__ret;								\
+})
+
+#define cmpxchg_double(p1, p2, o1, o2, n1, n2) \
+	__cmpxchg_double(LOCK_PREFIX, p1, p2, o1, o2, n1, n2)
+
+#define cmpxchg_double_local(p1, p2, o1, o2, n1, n2) \
+	__cmpxchg_double(, p1, p2, o1, o2, n1, n2)
+
 #endif	/* ASM_X86_CMPXCHG_H */
diff --git a/arch/x86/include/asm/cmpxchg_32.h b/arch/x86/include/asm/cmpxchg_32.h
index fbebb07..53f4b21 100644
--- a/arch/x86/include/asm/cmpxchg_32.h
+++ b/arch/x86/include/asm/cmpxchg_32.h
@@ -166,52 +166,6 @@
 
 #endif
 
-#define cmpxchg8b(ptr, o1, o2, n1, n2)				\
-({								\
-	char __ret;						\
-	__typeof__(o2) __dummy;					\
-	__typeof__(*(ptr)) __old1 = (o1);			\
-	__typeof__(o2) __old2 = (o2);				\
-	__typeof__(*(ptr)) __new1 = (n1);			\
-	__typeof__(o2) __new2 = (n2);				\
-	asm volatile(LOCK_PREFIX "cmpxchg8b %2; setz %1"	\
-		       : "=d"(__dummy), "=a" (__ret), "+m" (*ptr)\
-		       : "a" (__old1), "d"(__old2),		\
-		         "b" (__new1), "c" (__new2)		\
-		       : "memory");				\
-	__ret; })
-
-
-#define cmpxchg8b_local(ptr, o1, o2, n1, n2)			\
-({								\
-	char __ret;						\
-	__typeof__(o2) __dummy;					\
-	__typeof__(*(ptr)) __old1 = (o1);			\
-	__typeof__(o2) __old2 = (o2);				\
-	__typeof__(*(ptr)) __new1 = (n1);			\
-	__typeof__(o2) __new2 = (n2);				\
-	asm volatile("cmpxchg8b %2; setz %1"			\
-		       : "=d"(__dummy), "=a"(__ret), "+m" (*ptr)\
-		       : "a" (__old), "d"(__old2),		\
-		         "b" (__new1), "c" (__new2),		\
-		       : "memory");				\
-	__ret; })
-
-
-#define cmpxchg_double(ptr, o1, o2, n1, n2)				\
-({									\
-	BUILD_BUG_ON(sizeof(*(ptr)) != 4);				\
-	VM_BUG_ON((unsigned long)(ptr) % 8);				\
-	cmpxchg8b((ptr), (o1), (o2), (n1), (n2));			\
-})
-
-#define cmpxchg_double_local(ptr, o1, o2, n1, n2)			\
-({									\
-       BUILD_BUG_ON(sizeof(*(ptr)) != 4);				\
-       VM_BUG_ON((unsigned long)(ptr) % 8);				\
-       cmpxchg16b_local((ptr), (o1), (o2), (n1), (n2));			\
-})
-
 #define system_has_cmpxchg_double() cpu_has_cx8
 
 #endif /* _ASM_X86_CMPXCHG_32_H */
diff --git a/arch/x86/include/asm/cmpxchg_64.h b/arch/x86/include/asm/cmpxchg_64.h
index 285da02..614be87 100644
--- a/arch/x86/include/asm/cmpxchg_64.h
+++ b/arch/x86/include/asm/cmpxchg_64.h
@@ -20,49 +20,6 @@
 	cmpxchg_local((ptr), (o), (n));					\
 })
 
-#define cmpxchg16b(ptr, o1, o2, n1, n2)				\
-({								\
-	char __ret;						\
-	__typeof__(o2) __junk;					\
-	__typeof__(*(ptr)) __old1 = (o1);			\
-	__typeof__(o2) __old2 = (o2);				\
-	__typeof__(*(ptr)) __new1 = (n1);			\
-	__typeof__(o2) __new2 = (n2);				\
-	asm volatile(LOCK_PREFIX "cmpxchg16b %2;setz %1"	\
-		       : "=d"(__junk), "=a"(__ret), "+m" (*ptr)	\
-		       : "b"(__new1), "c"(__new2),		\
-		         "a"(__old1), "d"(__old2));		\
-	__ret; })
-
-
-#define cmpxchg16b_local(ptr, o1, o2, n1, n2)			\
-({								\
-	char __ret;						\
-	__typeof__(o2) __junk;					\
-	__typeof__(*(ptr)) __old1 = (o1);			\
-	__typeof__(o2) __old2 = (o2);				\
-	__typeof__(*(ptr)) __new1 = (n1);			\
-	__typeof__(o2) __new2 = (n2);				\
-	asm volatile("cmpxchg16b %2;setz %1"			\
-		       : "=d"(__junk), "=a"(__ret), "+m" (*ptr)	\
-		       : "b"(__new1), "c"(__new2),		\
-		         "a"(__old1), "d"(__old2));		\
-	__ret; })
-
-#define cmpxchg_double(ptr, o1, o2, n1, n2)				\
-({									\
-	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
-	VM_BUG_ON((unsigned long)(ptr) % 16);				\
-	cmpxchg16b((ptr), (o1), (o2), (n1), (n2));			\
-})
-
-#define cmpxchg_double_local(ptr, o1, o2, n1, n2)			\
-({									\
-	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
-	VM_BUG_ON((unsigned long)(ptr) % 16);				\
-	cmpxchg16b_local((ptr), (o1), (o2), (n1), (n2));		\
-})
-
 #define system_has_cmpxchg_double() cpu_has_cx16
 
 #endif /* _ASM_X86_CMPXCHG_64_H */
diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
index 9a2d644..ced283a 100644
--- a/arch/x86/include/asm/div64.h
+++ b/arch/x86/include/asm/div64.h
@@ -4,6 +4,7 @@
 #ifdef CONFIG_X86_32
 
 #include <linux/types.h>
+#include <linux/log2.h>
 
 /*
  * do_div() is NOT a C function. It wants to return
@@ -21,15 +22,20 @@
 ({								\
 	unsigned long __upper, __low, __high, __mod, __base;	\
 	__base = (base);					\
-	asm("":"=a" (__low), "=d" (__high) : "A" (n));		\
-	__upper = __high;					\
-	if (__high) {						\
-		__upper = __high % (__base);			\
-		__high = __high / (__base);			\
+	if (__builtin_constant_p(__base) && is_power_of_2(__base)) { \
+		__mod = n & (__base - 1);			\
+		n >>= ilog2(__base);				\
+	} else {						\
+		asm("" : "=a" (__low), "=d" (__high) : "A" (n));\
+		__upper = __high;				\
+		if (__high) {					\
+			__upper = __high % (__base);		\
+			__high = __high / (__base);		\
+		}						\
+		asm("divl %2" : "=a" (__low), "=d" (__mod)	\
+			: "rm" (__base), "0" (__low), "1" (__upper));	\
+		asm("" : "=A" (n) : "a" (__low), "d" (__high));	\
 	}							\
-	asm("divl %2":"=a" (__low), "=d" (__mod)		\
-	    : "rm" (__base), "0" (__low), "1" (__upper));	\
-	asm("":"=A" (n) : "a" (__low), "d" (__high));		\
 	__mod;							\
 })
 
diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
index c954703..3778256 100644
--- a/arch/x86/include/asm/e820.h
+++ b/arch/x86/include/asm/e820.h
@@ -53,13 +53,6 @@
  */
 #define E820_RESERVED_KERN        128
 
-/*
- * Address ranges that need to be mapped by the kernel direct
- * mapping. This is used to make sure regions such as
- * EFI_RUNTIME_SERVICES_DATA are directly mapped. See setup_arch().
- */
-#define E820_RESERVED_EFI         129
-
 #ifndef __ASSEMBLY__
 #include <linux/types.h>
 struct e820entry {
@@ -122,10 +115,9 @@
 }
 #endif
 
-extern unsigned long e820_end_pfn(unsigned long limit_pfn, unsigned type);
 extern unsigned long e820_end_of_ram_pfn(void);
 extern unsigned long e820_end_of_low_ram_pfn(void);
-extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align);
+extern u64 early_reserve_e820(u64 sizet, u64 align);
 
 void memblock_x86_fill(void);
 void memblock_find_dma_reserve(void);
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index b8d8bfc..7093e4a 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -33,6 +33,8 @@
 #define efi_call_virt6(f, a1, a2, a3, a4, a5, a6)	\
 	efi_call_virt(f, a1, a2, a3, a4, a5, a6)
 
+#define efi_ioremap(addr, size, type)		ioremap_cache(addr, size)
+
 #else /* !CONFIG_X86_32 */
 
 extern u64 efi_call0(void *fp);
@@ -82,6 +84,9 @@
 	efi_call6((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
 		  (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6))
 
+extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size,
+				 u32 type);
+
 #endif /* CONFIG_X86_32 */
 
 extern int add_efi_memmap;
diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h
index 55e4de6..da0b3ca 100644
--- a/arch/x86/include/asm/hardirq.h
+++ b/arch/x86/include/asm/hardirq.h
@@ -11,6 +11,7 @@
 #ifdef CONFIG_X86_LOCAL_APIC
 	unsigned int apic_timer_irqs;	/* arch dependent */
 	unsigned int irq_spurious_count;
+	unsigned int icr_read_retry_count;
 #endif
 	unsigned int x86_platform_ipis;	/* arch dependent */
 	unsigned int apic_perf_irqs;
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
index c9e09ea..6919e93 100644
--- a/arch/x86/include/asm/i387.h
+++ b/arch/x86/include/asm/i387.h
@@ -218,7 +218,7 @@
 #ifdef CONFIG_SMP
 #define safe_address (__per_cpu_offset[0])
 #else
-#define safe_address (kstat_cpu(0).cpustat.user)
+#define safe_address (__get_cpu_var(kernel_cpustat).cpustat[CPUTIME_USER])
 #endif
 
 /*
diff --git a/arch/x86/include/asm/insn.h b/arch/x86/include/asm/insn.h
index 88c765e..74df3f1 100644
--- a/arch/x86/include/asm/insn.h
+++ b/arch/x86/include/asm/insn.h
@@ -137,6 +137,13 @@
 	return (insn->vex_prefix.value != 0);
 }
 
+/* Ensure this instruction is decoded completely */
+static inline int insn_complete(struct insn *insn)
+{
+	return insn->opcode.got && insn->modrm.got && insn->sib.got &&
+		insn->displacement.got && insn->immediate.got;
+}
+
 static inline insn_byte_t insn_vex_m_bits(struct insn *insn)
 {
 	if (insn->vex_prefix.nbytes == 2)	/* 2 bytes VEX */
diff --git a/arch/x86/include/asm/mach_timer.h b/arch/x86/include/asm/mach_timer.h
index 8537285..88d0c3c 100644
--- a/arch/x86/include/asm/mach_timer.h
+++ b/arch/x86/include/asm/mach_timer.h
@@ -15,7 +15,7 @@
 
 #define CALIBRATE_TIME_MSEC 30 /* 30 msecs */
 #define CALIBRATE_LATCH	\
-	((CLOCK_TICK_RATE * CALIBRATE_TIME_MSEC + 1000/2)/1000)
+	((PIT_TICK_RATE * CALIBRATE_TIME_MSEC + 1000/2)/1000)
 
 static inline void mach_prepare_counter(void)
 {
diff --git a/arch/x86/include/asm/mc146818rtc.h b/arch/x86/include/asm/mc146818rtc.h
index 01fdf56..0e8e85b 100644
--- a/arch/x86/include/asm/mc146818rtc.h
+++ b/arch/x86/include/asm/mc146818rtc.h
@@ -81,8 +81,8 @@
 #else
 #define lock_cmos_prefix(reg) do {} while (0)
 #define lock_cmos_suffix(reg) do {} while (0)
-#define lock_cmos(reg)
-#define unlock_cmos()
+#define lock_cmos(reg) do { } while (0)
+#define unlock_cmos() do { } while (0)
 #define do_i_have_lock_cmos() 0
 #define current_lock_cmos_reg() 0
 #endif
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 0e8ae57..f35ce43 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -50,10 +50,11 @@
 #define MCJ_CTX_MASK		3
 #define MCJ_CTX(flags)		((flags) & MCJ_CTX_MASK)
 #define MCJ_CTX_RANDOM		0    /* inject context: random */
-#define MCJ_CTX_PROCESS		1    /* inject context: process */
-#define MCJ_CTX_IRQ		2    /* inject context: IRQ */
-#define MCJ_NMI_BROADCAST	4    /* do NMI broadcasting */
-#define MCJ_EXCEPTION		8    /* raise as exception */
+#define MCJ_CTX_PROCESS		0x1  /* inject context: process */
+#define MCJ_CTX_IRQ		0x2  /* inject context: IRQ */
+#define MCJ_NMI_BROADCAST	0x4  /* do NMI broadcasting */
+#define MCJ_EXCEPTION		0x8  /* raise as exception */
+#define MCJ_IRQ_BRAODCAST	0x10 /* do IRQ broadcasting */
 
 /* Fields are zero when not available */
 struct mce {
@@ -120,7 +121,8 @@
 
 #ifdef __KERNEL__
 
-extern struct atomic_notifier_head x86_mce_decoder_chain;
+extern void mce_register_decode_chain(struct notifier_block *nb);
+extern void mce_unregister_decode_chain(struct notifier_block *nb);
 
 #include <linux/percpu.h>
 #include <linux/init.h>
@@ -149,7 +151,7 @@
 
 void mce_setup(struct mce *m);
 void mce_log(struct mce *m);
-DECLARE_PER_CPU(struct sys_device, mce_sysdev);
+DECLARE_PER_CPU(struct device, mce_device);
 
 /*
  * Maximum banks number.
diff --git a/arch/x86/include/asm/memblock.h b/arch/x86/include/asm/memblock.h
deleted file mode 100644
index 0cd3800..0000000
--- a/arch/x86/include/asm/memblock.h
+++ /dev/null
@@ -1,23 +0,0 @@
-#ifndef _X86_MEMBLOCK_H
-#define _X86_MEMBLOCK_H
-
-#define ARCH_DISCARD_MEMBLOCK
-
-u64 memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align);
-
-void memblock_x86_reserve_range(u64 start, u64 end, char *name);
-void memblock_x86_free_range(u64 start, u64 end);
-struct range;
-int __get_free_all_memory_range(struct range **range, int nodeid,
-			 unsigned long start_pfn, unsigned long end_pfn);
-int get_free_all_memory_range(struct range **rangep, int nodeid);
-
-void memblock_x86_register_active_regions(int nid, unsigned long start_pfn,
-					 unsigned long last_pfn);
-u64 memblock_x86_hole_size(u64 start, u64 end);
-u64 memblock_x86_find_in_range_node(int nid, u64 start, u64 end, u64 size, u64 align);
-u64 memblock_x86_free_memory_in_range(u64 addr, u64 limit);
-u64 memblock_x86_memory_in_range(u64 addr, u64 limit);
-bool memblock_x86_check_reserved_size(u64 *addrp, u64 *sizep, u64 align);
-
-#endif
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
index 2421507..4ebe157 100644
--- a/arch/x86/include/asm/microcode.h
+++ b/arch/x86/include/asm/microcode.h
@@ -48,6 +48,7 @@
 
 #ifdef CONFIG_MICROCODE_AMD
 extern struct microcode_ops * __init init_amd_microcode(void);
+extern void __exit exit_amd_microcode(void);
 
 static inline void get_ucode_data(void *to, const u8 *from, size_t n)
 {
@@ -59,6 +60,7 @@
 {
 	return NULL;
 }
+static inline void __exit exit_amd_microcode(void) {}
 #endif
 
 #endif /* _ASM_X86_MICROCODE_H */
diff --git a/arch/x86/include/asm/mrst.h b/arch/x86/include/asm/mrst.h
index 93f7909..0a0a954 100644
--- a/arch/x86/include/asm/mrst.h
+++ b/arch/x86/include/asm/mrst.h
@@ -67,7 +67,7 @@
 extern void mrst_early_console_init(void);
 
 extern struct console early_hsu_console;
-extern void hsu_early_console_init(void);
+extern void hsu_early_console_init(const char *);
 
 extern void intel_scu_devices_create(void);
 extern void intel_scu_devices_destroy(void);
diff --git a/arch/x86/include/asm/numachip/numachip_csr.h b/arch/x86/include/asm/numachip/numachip_csr.h
new file mode 100644
index 0000000..660f843
--- /dev/null
+++ b/arch/x86/include/asm/numachip/numachip_csr.h
@@ -0,0 +1,167 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Numascale NumaConnect-Specific Header file
+ *
+ * Copyright (C) 2011 Numascale AS. All rights reserved.
+ *
+ * Send feedback to <support@numascale.com>
+ *
+ */
+
+#ifndef _ASM_X86_NUMACHIP_NUMACHIP_CSR_H
+#define _ASM_X86_NUMACHIP_NUMACHIP_CSR_H
+
+#include <linux/numa.h>
+#include <linux/percpu.h>
+#include <linux/io.h>
+#include <linux/swab.h>
+#include <asm/types.h>
+#include <asm/processor.h>
+
+#define CSR_NODE_SHIFT		16
+#define CSR_NODE_BITS(p)	(((unsigned long)(p)) << CSR_NODE_SHIFT)
+#define CSR_NODE_MASK		0x0fff		/* 4K nodes */
+
+/* 32K CSR space, b15 indicates geo/non-geo */
+#define CSR_OFFSET_MASK	0x7fffUL
+
+/* Global CSR space covers all 4K possible nodes with 64K CSR space per node */
+#define NUMACHIP_GCSR_BASE	0x3fff00000000ULL
+#define NUMACHIP_GCSR_LIM	0x3fff0fffffffULL
+#define NUMACHIP_GCSR_SIZE	(NUMACHIP_GCSR_LIM - NUMACHIP_GCSR_BASE + 1)
+
+/*
+ * Local CSR space starts in global CSR space with "nodeid" = 0xfff0, however
+ * when using the direct mapping on x86_64, both start and size needs to be
+ * aligned with PMD_SIZE which is 2M
+ */
+#define NUMACHIP_LCSR_BASE	0x3ffffe000000ULL
+#define NUMACHIP_LCSR_LIM	0x3fffffffffffULL
+#define NUMACHIP_LCSR_SIZE	(NUMACHIP_LCSR_LIM - NUMACHIP_LCSR_BASE + 1)
+
+static inline void *gcsr_address(int node, unsigned long offset)
+{
+	return __va(NUMACHIP_GCSR_BASE | (1UL << 15) |
+		CSR_NODE_BITS(node & CSR_NODE_MASK) | (offset & CSR_OFFSET_MASK));
+}
+
+static inline void *lcsr_address(unsigned long offset)
+{
+	return __va(NUMACHIP_LCSR_BASE | (1UL << 15) |
+		CSR_NODE_BITS(0xfff0) | (offset & CSR_OFFSET_MASK));
+}
+
+static inline unsigned int read_gcsr(int node, unsigned long offset)
+{
+	return swab32(readl(gcsr_address(node, offset)));
+}
+
+static inline void write_gcsr(int node, unsigned long offset, unsigned int val)
+{
+	writel(swab32(val), gcsr_address(node, offset));
+}
+
+static inline unsigned int read_lcsr(unsigned long offset)
+{
+	return swab32(readl(lcsr_address(offset)));
+}
+
+static inline void write_lcsr(unsigned long offset, unsigned int val)
+{
+	writel(swab32(val), lcsr_address(offset));
+}
+
+/* ========================================================================= */
+/*                   CSR_G0_STATE_CLEAR                                      */
+/* ========================================================================= */
+
+#define CSR_G0_STATE_CLEAR (0x000 + (0 << 12))
+union numachip_csr_g0_state_clear {
+	unsigned int v;
+	struct numachip_csr_g0_state_clear_s {
+		unsigned int _state:2;
+		unsigned int _rsvd_2_6:5;
+		unsigned int _lost:1;
+		unsigned int _rsvd_8_31:24;
+	} s;
+};
+
+/* ========================================================================= */
+/*                   CSR_G0_NODE_IDS                                         */
+/* ========================================================================= */
+
+#define CSR_G0_NODE_IDS (0x008 + (0 << 12))
+union numachip_csr_g0_node_ids {
+	unsigned int v;
+	struct numachip_csr_g0_node_ids_s {
+		unsigned int _initialid:16;
+		unsigned int _nodeid:12;
+		unsigned int _rsvd_28_31:4;
+	} s;
+};
+
+/* ========================================================================= */
+/*                   CSR_G3_EXT_IRQ_GEN                                      */
+/* ========================================================================= */
+
+#define CSR_G3_EXT_IRQ_GEN (0x030 + (3 << 12))
+union numachip_csr_g3_ext_irq_gen {
+	unsigned int v;
+	struct numachip_csr_g3_ext_irq_gen_s {
+		unsigned int _vector:8;
+		unsigned int _msgtype:3;
+		unsigned int _index:5;
+		unsigned int _destination_apic_id:16;
+	} s;
+};
+
+/* ========================================================================= */
+/*                   CSR_G3_EXT_IRQ_STATUS                                   */
+/* ========================================================================= */
+
+#define CSR_G3_EXT_IRQ_STATUS (0x034 + (3 << 12))
+union numachip_csr_g3_ext_irq_status {
+	unsigned int v;
+	struct numachip_csr_g3_ext_irq_status_s {
+		unsigned int _result:32;
+	} s;
+};
+
+/* ========================================================================= */
+/*                   CSR_G3_EXT_IRQ_DEST                                     */
+/* ========================================================================= */
+
+#define CSR_G3_EXT_IRQ_DEST (0x038 + (3 << 12))
+union numachip_csr_g3_ext_irq_dest {
+	unsigned int v;
+	struct numachip_csr_g3_ext_irq_dest_s {
+		unsigned int _irq:8;
+		unsigned int _rsvd_8_31:24;
+	} s;
+};
+
+/* ========================================================================= */
+/*                   CSR_G3_NC_ATT_MAP_SELECT                                */
+/* ========================================================================= */
+
+#define CSR_G3_NC_ATT_MAP_SELECT (0x7fc + (3 << 12))
+union numachip_csr_g3_nc_att_map_select {
+	unsigned int v;
+	struct numachip_csr_g3_nc_att_map_select_s {
+		unsigned int _upper_address_bits:4;
+		unsigned int _select_ram:4;
+		unsigned int _rsvd_8_31:24;
+	} s;
+};
+
+/* ========================================================================= */
+/*                   CSR_G3_NC_ATT_MAP_SELECT_0-255                          */
+/* ========================================================================= */
+
+#define CSR_G3_NC_ATT_MAP_SELECT_0 (0x800 + (3 << 12))
+
+#endif /* _ASM_X86_NUMACHIP_NUMACHIP_CSR_H */
+
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 3470c9d..529bf07e 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -451,23 +451,20 @@
 #endif /* !CONFIG_M386 */
 
 #ifdef CONFIG_X86_CMPXCHG64
-#define percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2)			\
+#define percpu_cmpxchg8b_double(pcp1, pcp2, o1, o2, n1, n2)		\
 ({									\
-	char __ret;							\
-	typeof(o1) __o1 = o1;						\
-	typeof(o1) __n1 = n1;						\
-	typeof(o2) __o2 = o2;						\
-	typeof(o2) __n2 = n2;						\
-	typeof(o2) __dummy = n2;					\
+	bool __ret;							\
+	typeof(pcp1) __o1 = (o1), __n1 = (n1);				\
+	typeof(pcp2) __o2 = (o2), __n2 = (n2);				\
 	asm volatile("cmpxchg8b "__percpu_arg(1)"\n\tsetz %0\n\t"	\
-		    : "=a"(__ret), "=m" (pcp1), "=d"(__dummy)		\
-		    :  "b"(__n1), "c"(__n2), "a"(__o1), "d"(__o2));	\
+		    : "=a" (__ret), "+m" (pcp1), "+m" (pcp2), "+d" (__o2) \
+		    :  "b" (__n1), "c" (__n2), "a" (__o1));		\
 	__ret;								\
 })
 
-#define __this_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2)		percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2)
-#define this_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2)		percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2)
-#define irqsafe_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2)	percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2)
+#define __this_cpu_cmpxchg_double_4	percpu_cmpxchg8b_double
+#define this_cpu_cmpxchg_double_4	percpu_cmpxchg8b_double
+#define irqsafe_cpu_cmpxchg_double_4	percpu_cmpxchg8b_double
 #endif /* CONFIG_X86_CMPXCHG64 */
 
 /*
@@ -508,31 +505,23 @@
  * it in software.  The address used in the cmpxchg16 instruction must be
  * aligned to a 16 byte boundary.
  */
-#ifdef CONFIG_SMP
-#define CMPXCHG16B_EMU_CALL "call this_cpu_cmpxchg16b_emu\n\t" ASM_NOP3
-#else
-#define CMPXCHG16B_EMU_CALL "call this_cpu_cmpxchg16b_emu\n\t" ASM_NOP2
-#endif
-#define percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2)			\
+#define percpu_cmpxchg16b_double(pcp1, pcp2, o1, o2, n1, n2)		\
 ({									\
-	char __ret;							\
-	typeof(o1) __o1 = o1;						\
-	typeof(o1) __n1 = n1;						\
-	typeof(o2) __o2 = o2;						\
-	typeof(o2) __n2 = n2;						\
-	typeof(o2) __dummy;						\
-	alternative_io(CMPXCHG16B_EMU_CALL,				\
-		       "cmpxchg16b " __percpu_prefix "(%%rsi)\n\tsetz %0\n\t",	\
+	bool __ret;							\
+	typeof(pcp1) __o1 = (o1), __n1 = (n1);				\
+	typeof(pcp2) __o2 = (o2), __n2 = (n2);				\
+	alternative_io("leaq %P1,%%rsi\n\tcall this_cpu_cmpxchg16b_emu\n\t", \
+		       "cmpxchg16b " __percpu_arg(1) "\n\tsetz %0\n\t",	\
 		       X86_FEATURE_CX16,				\
-		       ASM_OUTPUT2("=a"(__ret), "=d"(__dummy)),		\
-		       "S" (&pcp1), "b"(__n1), "c"(__n2),		\
-		       "a"(__o1), "d"(__o2) : "memory");		\
+		       ASM_OUTPUT2("=a" (__ret), "+m" (pcp1),		\
+				   "+m" (pcp2), "+d" (__o2)),		\
+		       "b" (__n1), "c" (__n2), "a" (__o1) : "rsi");	\
 	__ret;								\
 })
 
-#define __this_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2)		percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2)
-#define this_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2)		percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2)
-#define irqsafe_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2)	percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2)
+#define __this_cpu_cmpxchg_double_8	percpu_cmpxchg16b_double
+#define this_cpu_cmpxchg_double_8	percpu_cmpxchg16b_double
+#define irqsafe_cpu_cmpxchg_double_8	percpu_cmpxchg16b_double
 
 #endif
 
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index f61c62f..096c975 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -57,6 +57,7 @@
 		(1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
 
 #define ARCH_PERFMON_BRANCH_MISSES_RETIRED		6
+#define ARCH_PERFMON_EVENTS_COUNT			7
 
 /*
  * Intel "Architectural Performance Monitoring" CPUID
@@ -72,6 +73,19 @@
 	unsigned int full;
 };
 
+union cpuid10_ebx {
+	struct {
+		unsigned int no_unhalted_core_cycles:1;
+		unsigned int no_instructions_retired:1;
+		unsigned int no_unhalted_reference_cycles:1;
+		unsigned int no_llc_reference:1;
+		unsigned int no_llc_misses:1;
+		unsigned int no_branch_instruction_retired:1;
+		unsigned int no_branch_misses_retired:1;
+	} split;
+	unsigned int full;
+};
+
 union cpuid10_edx {
 	struct {
 		unsigned int num_counters_fixed:5;
@@ -81,6 +95,15 @@
 	unsigned int full;
 };
 
+struct x86_pmu_capability {
+	int		version;
+	int		num_counters_gp;
+	int		num_counters_fixed;
+	int		bit_width_gp;
+	int		bit_width_fixed;
+	unsigned int	events_mask;
+	int		events_mask_len;
+};
 
 /*
  * Fixed-purpose performance events:
@@ -89,23 +112,24 @@
 /*
  * All 3 fixed-mode PMCs are configured via this single MSR:
  */
-#define MSR_ARCH_PERFMON_FIXED_CTR_CTRL			0x38d
+#define MSR_ARCH_PERFMON_FIXED_CTR_CTRL	0x38d
 
 /*
  * The counts are available in three separate MSRs:
  */
 
 /* Instr_Retired.Any: */
-#define MSR_ARCH_PERFMON_FIXED_CTR0			0x309
-#define X86_PMC_IDX_FIXED_INSTRUCTIONS			(X86_PMC_IDX_FIXED + 0)
+#define MSR_ARCH_PERFMON_FIXED_CTR0	0x309
+#define X86_PMC_IDX_FIXED_INSTRUCTIONS	(X86_PMC_IDX_FIXED + 0)
 
 /* CPU_CLK_Unhalted.Core: */
-#define MSR_ARCH_PERFMON_FIXED_CTR1			0x30a
-#define X86_PMC_IDX_FIXED_CPU_CYCLES			(X86_PMC_IDX_FIXED + 1)
+#define MSR_ARCH_PERFMON_FIXED_CTR1	0x30a
+#define X86_PMC_IDX_FIXED_CPU_CYCLES	(X86_PMC_IDX_FIXED + 1)
 
 /* CPU_CLK_Unhalted.Ref: */
-#define MSR_ARCH_PERFMON_FIXED_CTR2			0x30b
-#define X86_PMC_IDX_FIXED_BUS_CYCLES			(X86_PMC_IDX_FIXED + 2)
+#define MSR_ARCH_PERFMON_FIXED_CTR2	0x30b
+#define X86_PMC_IDX_FIXED_REF_CYCLES	(X86_PMC_IDX_FIXED + 2)
+#define X86_PMC_MSK_FIXED_REF_CYCLES	(1ULL << X86_PMC_IDX_FIXED_REF_CYCLES)
 
 /*
  * We model BTS tracing as another fixed-mode PMC.
@@ -202,6 +226,7 @@
 };
 
 extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr);
+extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap);
 #else
 static inline perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
 {
@@ -209,6 +234,11 @@
 	return NULL;
 }
 
+static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
+{
+	memset(cap, 0, sizeof(*cap));
+}
+
 static inline void perf_events_lapic_init(void)	{ }
 #endif
 
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 18601c8..49afb3f 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -703,7 +703,7 @@
 	pte_update(mm, addr, ptep);
 }
 
-#define flush_tlb_fix_spurious_fault(vma, address)
+#define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
 
 #define mk_pmd(page, pgprot)   pfn_pmd(page_to_pfn(page), (pgprot))
 
diff --git a/arch/x86/include/asm/processor-flags.h b/arch/x86/include/asm/processor-flags.h
index 2dddb31..f8ab3ea 100644
--- a/arch/x86/include/asm/processor-flags.h
+++ b/arch/x86/include/asm/processor-flags.h
@@ -6,6 +6,7 @@
  * EFLAGS bits
  */
 #define X86_EFLAGS_CF	0x00000001 /* Carry Flag */
+#define X86_EFLAGS_BIT1	0x00000002 /* Bit 1 - always on */
 #define X86_EFLAGS_PF	0x00000004 /* Parity Flag */
 #define X86_EFLAGS_AF	0x00000010 /* Auxiliary carry Flag */
 #define X86_EFLAGS_ZF	0x00000040 /* Zero Flag */
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index b650435..aa9088c 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -99,7 +99,6 @@
 	u16			apicid;
 	u16			initial_apicid;
 	u16			x86_clflush_size;
-#ifdef CONFIG_SMP
 	/* number of cores as seen by the OS: */
 	u16			booted_cores;
 	/* Physical processor id: */
@@ -110,7 +109,6 @@
 	u8			compute_unit_id;
 	/* Index into per_cpu list: */
 	u16			cpu_index;
-#endif
 	u32			microcode;
 } __attribute__((__aligned__(SMP_CACHE_BYTES)));
 
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index 972c260..a82c2bf 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -79,23 +79,10 @@
 	return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail;
 }
 
-#if (NR_CPUS < 256)
 static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
 {
-	asm volatile(UNLOCK_LOCK_PREFIX "incb %0"
-		     : "+m" (lock->head_tail)
-		     :
-		     : "memory", "cc");
+	__add(&lock->tickets.head, 1, UNLOCK_LOCK_PREFIX);
 }
-#else
-static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
-{
-	asm volatile(UNLOCK_LOCK_PREFIX "incw %0"
-		     : "+m" (lock->head_tail)
-		     :
-		     : "memory", "cc");
-}
-#endif
 
 static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
 {
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index a1fe5c1..7404715 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -40,7 +40,8 @@
 						*/
 	__u8			supervisor_stack[0];
 #endif
-	int			uaccess_err;
+	int			sig_on_uaccess_error:1;
+	int			uaccess_err:1;	/* uaccess failed */
 };
 
 #define INIT_THREAD_INFO(tsk)			\
@@ -90,7 +91,6 @@
 #define TIF_MEMDIE		20	/* is terminating due to OOM killer */
 #define TIF_DEBUG		21	/* uses debug registers */
 #define TIF_IO_BITMAP		22	/* uses I/O bitmap */
-#define TIF_FREEZE		23	/* is freezing for suspend */
 #define TIF_FORCED_TF		24	/* true if TF in eflags artificially */
 #define TIF_BLOCKSTEP		25	/* set when we want DEBUGCTLMSR_BTF */
 #define TIF_LAZY_MMU_UPDATES	27	/* task is updating the mmu lazily */
@@ -112,7 +112,6 @@
 #define _TIF_FORK		(1 << TIF_FORK)
 #define _TIF_DEBUG		(1 << TIF_DEBUG)
 #define _TIF_IO_BITMAP		(1 << TIF_IO_BITMAP)
-#define _TIF_FREEZE		(1 << TIF_FREEZE)
 #define _TIF_FORCED_TF		(1 << TIF_FORCED_TF)
 #define _TIF_BLOCKSTEP		(1 << TIF_BLOCKSTEP)
 #define _TIF_LAZY_MMU_UPDATES	(1 << TIF_LAZY_MMU_UPDATES)
@@ -231,6 +230,12 @@
 	movq PER_CPU_VAR(kernel_stack),reg ; \
 	subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
 
+/*
+ * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
+ * a certain register (to be used in assembler memory operands).
+ */
+#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
+
 #endif
 
 #endif /* !X86_32 */
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index c006924..800f77c 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -130,10 +130,8 @@
 	.balance_interval	= 1,					\
 }
 
-#ifdef CONFIG_X86_64
 extern int __node_distance(int, int);
 #define node_distance(a, b) __node_distance(a, b)
-#endif
 
 #else /* !CONFIG_NUMA */
 
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h
index 83e2efd..15d9915 100644
--- a/arch/x86/include/asm/tsc.h
+++ b/arch/x86/include/asm/tsc.h
@@ -51,6 +51,8 @@
 extern int check_tsc_unstable(void);
 extern unsigned long native_calibrate_tsc(void);
 
+extern int tsc_clocksource_reliable;
+
 /*
  * Boot-time check whether the TSCs are synchronized across
  * all CPUs/cores:
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 36361bf..8be5f54 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -462,7 +462,7 @@
 	barrier();
 
 #define uaccess_catch(err)						\
-	(err) |= current_thread_info()->uaccess_err;			\
+	(err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0);	\
 	current_thread_info()->uaccess_err = prev_err;			\
 } while (0)
 
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index 1971e65..1ac860a 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -7,6 +7,7 @@
 struct mpc_bus;
 struct mpc_cpu;
 struct mpc_table;
+struct cpuinfo_x86;
 
 /**
  * struct x86_init_mpparse - platform specific mpparse ops
@@ -147,6 +148,7 @@
  */
 struct x86_cpuinit_ops {
 	void (*setup_percpu_clockev)(void);
+	void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
 };
 
 /**
@@ -186,5 +188,6 @@
 
 extern void x86_init_noop(void);
 extern void x86_init_uint_noop(unsigned int unused);
+extern void x86_default_fixup_cpu_id(struct cpuinfo_x86 *c, int node);
 
 #endif
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 4558f0d..ce664f3 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -219,6 +219,8 @@
 acpi_parse_x2apic(struct acpi_subtable_header *header, const unsigned long end)
 {
 	struct acpi_madt_local_x2apic *processor = NULL;
+	int apic_id;
+	u8 enabled;
 
 	processor = (struct acpi_madt_local_x2apic *)header;
 
@@ -227,6 +229,8 @@
 
 	acpi_table_print_madt_entry(header);
 
+	apic_id = processor->local_apic_id;
+	enabled = processor->lapic_flags & ACPI_MADT_ENABLED;
 #ifdef CONFIG_X86_X2APIC
 	/*
 	 * We need to register disabled CPU as well to permit
@@ -235,8 +239,10 @@
 	 * to not preallocating memory for all NR_CPUS
 	 * when we use CPU hotplug.
 	 */
-	acpi_register_lapic(processor->local_apic_id,	/* APIC ID */
-			    processor->lapic_flags & ACPI_MADT_ENABLED);
+	if (!cpu_has_x2apic && (apic_id >= 0xff) && enabled)
+		printk(KERN_WARNING PREFIX "x2apic entry ignored\n");
+	else
+		acpi_register_lapic(apic_id, enabled);
 #else
 	printk(KERN_WARNING PREFIX "x2apic entry ignored\n");
 #endif
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index 4c39baa..013c181 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -123,16 +123,14 @@
 {
 	struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
 	unsigned int mask;
-	int cuid = 0;
+	int cuid;
 
 	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
 		return 0;
 
 	pci_read_config_dword(link, 0x1d4, &mask);
 
-#ifdef CONFIG_SMP
 	cuid = cpu_data(cpu).compute_unit_id;
-#endif
 	return (mask >> (4 * cuid)) & 0xf;
 }
 
@@ -141,7 +139,7 @@
 	static unsigned int reset, ban;
 	struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
 	unsigned int reg;
-	int cuid = 0;
+	int cuid;
 
 	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
 		return -EINVAL;
@@ -159,9 +157,7 @@
 		pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
 	}
 
-#ifdef CONFIG_SMP
 	cuid = cpu_data(cpu).compute_unit_id;
-#endif
 	mask <<= 4 * cuid;
 	mask |= (0xf ^ (1 << cuid)) << 26;
 
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
index 3d2661c..6e76c19 100644
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -88,13 +88,13 @@
 	 */
 	addr = memblock_find_in_range(GART_MIN_ADDR, GART_MAX_ADDR,
 				      aper_size, aper_size);
-	if (addr == MEMBLOCK_ERROR || addr + aper_size > GART_MAX_ADDR) {
+	if (!addr || addr + aper_size > GART_MAX_ADDR) {
 		printk(KERN_ERR
 			"Cannot allocate aperture memory hole (%lx,%uK)\n",
 				addr, aper_size>>10);
 		return 0;
 	}
-	memblock_x86_reserve_range(addr, addr + aper_size, "aperture64");
+	memblock_reserve(addr, aper_size);
 	/*
 	 * Kmemleak should not scan this block as it may not be mapped via the
 	 * kernel direct mapping.
diff --git a/arch/x86/kernel/apic/Makefile b/arch/x86/kernel/apic/Makefile
index 767fd04..0ae0323 100644
--- a/arch/x86/kernel/apic/Makefile
+++ b/arch/x86/kernel/apic/Makefile
@@ -10,6 +10,7 @@
 
 ifeq ($(CONFIG_X86_64),y)
 # APIC probe will depend on the listing order here
+obj-$(CONFIG_X86_NUMACHIP)	+= apic_numachip.o
 obj-$(CONFIG_X86_UV)		+= x2apic_uv_x.o
 obj-$(CONFIG_X86_X2APIC)	+= x2apic_phys.o
 obj-$(CONFIG_X86_X2APIC)	+= x2apic_cluster.o
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index f98d84c..2eec05b 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -146,16 +146,26 @@
 int x2apic_mode;
 #ifdef CONFIG_X86_X2APIC
 /* x2apic enabled before OS handover */
-static int x2apic_preenabled;
+int x2apic_preenabled;
+static int x2apic_disabled;
+static int nox2apic;
 static __init int setup_nox2apic(char *str)
 {
 	if (x2apic_enabled()) {
-		pr_warning("Bios already enabled x2apic, "
-			   "can't enforce nox2apic");
-		return 0;
-	}
+		int apicid = native_apic_msr_read(APIC_ID);
 
-	setup_clear_cpu_cap(X86_FEATURE_X2APIC);
+		if (apicid >= 255) {
+			pr_warning("Apicid: %08x, cannot enforce nox2apic\n",
+				   apicid);
+			return 0;
+		}
+
+		pr_warning("x2apic already enabled. will disable it\n");
+	} else
+		setup_clear_cpu_cap(X86_FEATURE_X2APIC);
+
+	nox2apic = 1;
+
 	return 0;
 }
 early_param("nox2apic", setup_nox2apic);
@@ -250,6 +260,7 @@
 		send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
 		if (!send_status)
 			break;
+		inc_irq_stat(icr_read_retry_count);
 		udelay(100);
 	} while (timeout++ < 1000);
 
@@ -876,8 +887,8 @@
 	 * Besides, if we don't timer interrupts ignore the global
 	 * interrupt lock, which is the WrongThing (tm) to do.
 	 */
-	exit_idle();
 	irq_enter();
+	exit_idle();
 	local_apic_timer_interrupt();
 	irq_exit();
 
@@ -1431,6 +1442,45 @@
 }
 
 #ifdef CONFIG_X86_X2APIC
+/*
+ * Need to disable xapic and x2apic at the same time and then enable xapic mode
+ */
+static inline void __disable_x2apic(u64 msr)
+{
+	wrmsrl(MSR_IA32_APICBASE,
+	       msr & ~(X2APIC_ENABLE | XAPIC_ENABLE));
+	wrmsrl(MSR_IA32_APICBASE, msr & ~X2APIC_ENABLE);
+}
+
+static __init void disable_x2apic(void)
+{
+	u64 msr;
+
+	if (!cpu_has_x2apic)
+		return;
+
+	rdmsrl(MSR_IA32_APICBASE, msr);
+	if (msr & X2APIC_ENABLE) {
+		u32 x2apic_id = read_apic_id();
+
+		if (x2apic_id >= 255)
+			panic("Cannot disable x2apic, id: %08x\n", x2apic_id);
+
+		pr_info("Disabling x2apic\n");
+		__disable_x2apic(msr);
+
+		if (nox2apic) {
+			clear_cpu_cap(&cpu_data(0), X86_FEATURE_X2APIC);
+			setup_clear_cpu_cap(X86_FEATURE_X2APIC);
+		}
+
+		x2apic_disabled = 1;
+		x2apic_mode = 0;
+
+		register_lapic_address(mp_lapic_addr);
+	}
+}
+
 void check_x2apic(void)
 {
 	if (x2apic_enabled()) {
@@ -1441,15 +1491,20 @@
 
 void enable_x2apic(void)
 {
-	int msr, msr2;
+	u64 msr;
+
+	rdmsrl(MSR_IA32_APICBASE, msr);
+	if (x2apic_disabled) {
+		__disable_x2apic(msr);
+		return;
+	}
 
 	if (!x2apic_mode)
 		return;
 
-	rdmsr(MSR_IA32_APICBASE, msr, msr2);
 	if (!(msr & X2APIC_ENABLE)) {
 		printk_once(KERN_INFO "Enabling x2apic\n");
-		wrmsr(MSR_IA32_APICBASE, msr | X2APIC_ENABLE, msr2);
+		wrmsrl(MSR_IA32_APICBASE, msr | X2APIC_ENABLE);
 	}
 }
 #endif /* CONFIG_X86_X2APIC */
@@ -1486,25 +1541,34 @@
 	ret = save_ioapic_entries();
 	if (ret) {
 		pr_info("Saving IO-APIC state failed: %d\n", ret);
-		goto out;
+		return;
 	}
 
 	local_irq_save(flags);
 	legacy_pic->mask_all();
 	mask_ioapic_entries();
 
+	if (x2apic_preenabled && nox2apic)
+		disable_x2apic();
+
 	if (dmar_table_init_ret)
 		ret = -1;
 	else
 		ret = enable_IR();
 
+	if (!x2apic_supported())
+		goto skip_x2apic;
+
 	if (ret < 0) {
 		/* IR is required if there is APIC ID > 255 even when running
 		 * under KVM
 		 */
 		if (max_physical_apicid > 255 ||
-		    !hypervisor_x2apic_available())
-			goto nox2apic;
+		    !hypervisor_x2apic_available()) {
+			if (x2apic_preenabled)
+				disable_x2apic();
+			goto skip_x2apic;
+		}
 		/*
 		 * without IR all CPUs can be addressed by IOAPIC/MSI
 		 * only in physical mode
@@ -1512,8 +1576,10 @@
 		x2apic_force_phys();
 	}
 
-	if (ret == IRQ_REMAP_XAPIC_MODE)
-		goto nox2apic;
+	if (ret == IRQ_REMAP_XAPIC_MODE) {
+		pr_info("x2apic not enabled, IRQ remapping is in xapic mode\n");
+		goto skip_x2apic;
+	}
 
 	x2apic_enabled = 1;
 
@@ -1523,22 +1589,11 @@
 		pr_info("Enabled x2apic\n");
 	}
 
-nox2apic:
+skip_x2apic:
 	if (ret < 0) /* IR enabling failed */
 		restore_ioapic_entries();
 	legacy_pic->restore_mask();
 	local_irq_restore(flags);
-
-out:
-	if (x2apic_enabled || !x2apic_supported())
-		return;
-
-	if (x2apic_preenabled)
-		panic("x2apic: enabled by BIOS but kernel init failed.");
-	else if (ret == IRQ_REMAP_XAPIC_MODE)
-		pr_info("x2apic not enabled, IRQ remapping is in xapic mode\n");
-	else if (ret < 0)
-		pr_info("x2apic not enabled, IRQ remapping init failed\n");
 }
 
 #ifdef CONFIG_X86_64
@@ -1809,8 +1864,8 @@
 {
 	u32 v;
 
-	exit_idle();
 	irq_enter();
+	exit_idle();
 	/*
 	 * Check if this really is a spurious interrupt and ACK it
 	 * if it is a vectored one.  Just in case...
@@ -1846,8 +1901,8 @@
 		"Illegal register address",	/* APIC Error Bit 7 */
 	};
 
-	exit_idle();
 	irq_enter();
+	exit_idle();
 	/* First tickle the hardware, only then report what went on. -- REW */
 	v0 = apic_read(APIC_ESR);
 	apic_write(APIC_ESR, 0);
diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
index f7a41e4..8c3cdde 100644
--- a/arch/x86/kernel/apic/apic_flat_64.c
+++ b/arch/x86/kernel/apic/apic_flat_64.c
@@ -62,7 +62,7 @@
  * an APIC.  See e.g. "AP-388 82489DX User's Manual" (Intel
  * document number 292116).  So here it goes...
  */
-static void flat_init_apic_ldr(void)
+void flat_init_apic_ldr(void)
 {
 	unsigned long val;
 	unsigned long num, id;
@@ -171,9 +171,14 @@
 	return initial_apic_id >> index_msb;
 }
 
+static int flat_probe(void)
+{
+	return 1;
+}
+
 static struct apic apic_flat =  {
 	.name				= "flat",
-	.probe				= NULL,
+	.probe				= flat_probe,
 	.acpi_madt_oem_check		= flat_acpi_madt_oem_check,
 	.apic_id_registered		= flat_apic_id_registered,
 
diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c
new file mode 100644
index 0000000..09d3d8c
--- /dev/null
+++ b/arch/x86/kernel/apic/apic_numachip.c
@@ -0,0 +1,294 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Numascale NumaConnect-Specific APIC Code
+ *
+ * Copyright (C) 2011 Numascale AS. All rights reserved.
+ *
+ * Send feedback to <support@numascale.com>
+ *
+ */
+
+#include <linux/errno.h>
+#include <linux/threads.h>
+#include <linux/cpumask.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/ctype.h>
+#include <linux/init.h>
+#include <linux/hardirq.h>
+#include <linux/delay.h>
+
+#include <asm/numachip/numachip_csr.h>
+#include <asm/smp.h>
+#include <asm/apic.h>
+#include <asm/ipi.h>
+#include <asm/apic_flat_64.h>
+
+static int numachip_system __read_mostly;
+
+static struct apic apic_numachip __read_mostly;
+
+static unsigned int get_apic_id(unsigned long x)
+{
+	unsigned long value;
+	unsigned int id;
+
+	rdmsrl(MSR_FAM10H_NODE_ID, value);
+	id = ((x >> 24) & 0xffU) | ((value << 2) & 0x3f00U);
+
+	return id;
+}
+
+static unsigned long set_apic_id(unsigned int id)
+{
+	unsigned long x;
+
+	x = ((id & 0xffU) << 24);
+	return x;
+}
+
+static unsigned int read_xapic_id(void)
+{
+	return get_apic_id(apic_read(APIC_ID));
+}
+
+static int numachip_apic_id_registered(void)
+{
+	return physid_isset(read_xapic_id(), phys_cpu_present_map);
+}
+
+static int numachip_phys_pkg_id(int initial_apic_id, int index_msb)
+{
+	return initial_apic_id >> index_msb;
+}
+
+static const struct cpumask *numachip_target_cpus(void)
+{
+	return cpu_online_mask;
+}
+
+static void numachip_vector_allocation_domain(int cpu, struct cpumask *retmask)
+{
+	cpumask_clear(retmask);
+	cpumask_set_cpu(cpu, retmask);
+}
+
+static int __cpuinit numachip_wakeup_secondary(int phys_apicid, unsigned long start_rip)
+{
+	union numachip_csr_g3_ext_irq_gen int_gen;
+
+	int_gen.s._destination_apic_id = phys_apicid;
+	int_gen.s._vector = 0;
+	int_gen.s._msgtype = APIC_DM_INIT >> 8;
+	int_gen.s._index = 0;
+
+	write_lcsr(CSR_G3_EXT_IRQ_GEN, int_gen.v);
+
+	int_gen.s._msgtype = APIC_DM_STARTUP >> 8;
+	int_gen.s._vector = start_rip >> 12;
+
+	write_lcsr(CSR_G3_EXT_IRQ_GEN, int_gen.v);
+
+	atomic_set(&init_deasserted, 1);
+	return 0;
+}
+
+static void numachip_send_IPI_one(int cpu, int vector)
+{
+	union numachip_csr_g3_ext_irq_gen int_gen;
+	int apicid = per_cpu(x86_cpu_to_apicid, cpu);
+
+	int_gen.s._destination_apic_id = apicid;
+	int_gen.s._vector = vector;
+	int_gen.s._msgtype = (vector == NMI_VECTOR ? APIC_DM_NMI : APIC_DM_FIXED) >> 8;
+	int_gen.s._index = 0;
+
+	write_lcsr(CSR_G3_EXT_IRQ_GEN, int_gen.v);
+}
+
+static void numachip_send_IPI_mask(const struct cpumask *mask, int vector)
+{
+	unsigned int cpu;
+
+	for_each_cpu(cpu, mask)
+		numachip_send_IPI_one(cpu, vector);
+}
+
+static void numachip_send_IPI_mask_allbutself(const struct cpumask *mask,
+						int vector)
+{
+	unsigned int this_cpu = smp_processor_id();
+	unsigned int cpu;
+
+	for_each_cpu(cpu, mask) {
+		if (cpu != this_cpu)
+			numachip_send_IPI_one(cpu, vector);
+	}
+}
+
+static void numachip_send_IPI_allbutself(int vector)
+{
+	unsigned int this_cpu = smp_processor_id();
+	unsigned int cpu;
+
+	for_each_online_cpu(cpu) {
+		if (cpu != this_cpu)
+			numachip_send_IPI_one(cpu, vector);
+	}
+}
+
+static void numachip_send_IPI_all(int vector)
+{
+	numachip_send_IPI_mask(cpu_online_mask, vector);
+}
+
+static void numachip_send_IPI_self(int vector)
+{
+	__default_send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL);
+}
+
+static unsigned int numachip_cpu_mask_to_apicid(const struct cpumask *cpumask)
+{
+	int cpu;
+
+	/*
+	 * We're using fixed IRQ delivery, can only return one phys APIC ID.
+	 * May as well be the first.
+	 */
+	cpu = cpumask_first(cpumask);
+	if (likely((unsigned)cpu < nr_cpu_ids))
+		return per_cpu(x86_cpu_to_apicid, cpu);
+
+	return BAD_APICID;
+}
+
+static unsigned int
+numachip_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
+				const struct cpumask *andmask)
+{
+	int cpu;
+
+	/*
+	 * We're using fixed IRQ delivery, can only return one phys APIC ID.
+	 * May as well be the first.
+	 */
+	for_each_cpu_and(cpu, cpumask, andmask) {
+		if (cpumask_test_cpu(cpu, cpu_online_mask))
+			break;
+	}
+	return per_cpu(x86_cpu_to_apicid, cpu);
+}
+
+static int __init numachip_probe(void)
+{
+	return apic == &apic_numachip;
+}
+
+static void __init map_csrs(void)
+{
+	printk(KERN_INFO "NumaChip: Mapping local CSR space (%016llx - %016llx)\n",
+		NUMACHIP_LCSR_BASE, NUMACHIP_LCSR_BASE + NUMACHIP_LCSR_SIZE - 1);
+	init_extra_mapping_uc(NUMACHIP_LCSR_BASE, NUMACHIP_LCSR_SIZE);
+
+	printk(KERN_INFO "NumaChip: Mapping global CSR space (%016llx - %016llx)\n",
+		NUMACHIP_GCSR_BASE, NUMACHIP_GCSR_BASE + NUMACHIP_GCSR_SIZE - 1);
+	init_extra_mapping_uc(NUMACHIP_GCSR_BASE, NUMACHIP_GCSR_SIZE);
+}
+
+static void fixup_cpu_id(struct cpuinfo_x86 *c, int node)
+{
+	c->phys_proc_id = node;
+	per_cpu(cpu_llc_id, smp_processor_id()) = node;
+}
+
+static int __init numachip_system_init(void)
+{
+	unsigned int val;
+
+	if (!numachip_system)
+		return 0;
+
+	x86_cpuinit.fixup_cpu_id = fixup_cpu_id;
+
+	map_csrs();
+
+	val = read_lcsr(CSR_G0_NODE_IDS);
+	printk(KERN_INFO "NumaChip: Local NodeID = %08x\n", val);
+
+	return 0;
+}
+early_initcall(numachip_system_init);
+
+static int numachip_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
+{
+	if (!strncmp(oem_id, "NUMASC", 6)) {
+		numachip_system = 1;
+		return 1;
+	}
+
+	return 0;
+}
+
+static struct apic apic_numachip __refconst = {
+
+	.name				= "NumaConnect system",
+	.probe				= numachip_probe,
+	.acpi_madt_oem_check		= numachip_acpi_madt_oem_check,
+	.apic_id_registered		= numachip_apic_id_registered,
+
+	.irq_delivery_mode		= dest_Fixed,
+	.irq_dest_mode			= 0, /* physical */
+
+	.target_cpus			= numachip_target_cpus,
+	.disable_esr			= 0,
+	.dest_logical			= 0,
+	.check_apicid_used		= NULL,
+	.check_apicid_present		= NULL,
+
+	.vector_allocation_domain	= numachip_vector_allocation_domain,
+	.init_apic_ldr			= flat_init_apic_ldr,
+
+	.ioapic_phys_id_map		= NULL,
+	.setup_apic_routing		= NULL,
+	.multi_timer_check		= NULL,
+	.cpu_present_to_apicid		= default_cpu_present_to_apicid,
+	.apicid_to_cpu_present		= NULL,
+	.setup_portio_remap		= NULL,
+	.check_phys_apicid_present	= default_check_phys_apicid_present,
+	.enable_apic_mode		= NULL,
+	.phys_pkg_id			= numachip_phys_pkg_id,
+	.mps_oem_check			= NULL,
+
+	.get_apic_id			= get_apic_id,
+	.set_apic_id			= set_apic_id,
+	.apic_id_mask			= 0xffU << 24,
+
+	.cpu_mask_to_apicid		= numachip_cpu_mask_to_apicid,
+	.cpu_mask_to_apicid_and		= numachip_cpu_mask_to_apicid_and,
+
+	.send_IPI_mask			= numachip_send_IPI_mask,
+	.send_IPI_mask_allbutself	= numachip_send_IPI_mask_allbutself,
+	.send_IPI_allbutself		= numachip_send_IPI_allbutself,
+	.send_IPI_all			= numachip_send_IPI_all,
+	.send_IPI_self			= numachip_send_IPI_self,
+
+	.wakeup_secondary_cpu		= numachip_wakeup_secondary,
+	.trampoline_phys_low		= DEFAULT_TRAMPOLINE_PHYS_LOW,
+	.trampoline_phys_high		= DEFAULT_TRAMPOLINE_PHYS_HIGH,
+	.wait_for_init_deassert		= NULL,
+	.smp_callin_clear_local_apic	= NULL,
+	.inquire_remote_apic		= NULL, /* REMRD not supported */
+
+	.read				= native_apic_mem_read,
+	.write				= native_apic_mem_write,
+	.icr_read			= native_apic_icr_read,
+	.icr_write			= native_apic_icr_write,
+	.wait_icr_idle			= native_apic_wait_icr_idle,
+	.safe_wait_icr_idle		= native_safe_apic_wait_icr_idle,
+};
+apic_driver(apic_numachip);
+
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 6d939d7..fb07275 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2421,8 +2421,8 @@
 	unsigned vector, me;
 
 	ack_APIC_irq();
-	exit_idle();
 	irq_enter();
+	exit_idle();
 
 	me = smp_processor_id();
 	for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
@@ -2948,6 +2948,10 @@
 	}
 	local_irq_disable();
 	apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n");
+	if (x2apic_preenabled)
+		apic_printk(APIC_QUIET, KERN_INFO
+			    "Perhaps problem with the pre-enabled x2apic mode\n"
+			    "Try booting with x2apic and interrupt-remapping disabled in the bios.\n");
 	panic("IO-APIC + timer doesn't work!  Boot with apic=debug and send a "
 		"report.  Then try booting with the 'noapic' option.\n");
 out:
diff --git a/arch/x86/kernel/check.c b/arch/x86/kernel/check.c
index 452932d..5da1269 100644
--- a/arch/x86/kernel/check.c
+++ b/arch/x86/kernel/check.c
@@ -62,7 +62,8 @@
 
 void __init setup_bios_corruption_check(void)
 {
-	u64 addr = PAGE_SIZE;	/* assume first page is reserved anyway */
+	phys_addr_t start, end;
+	u64 i;
 
 	if (memory_corruption_check == -1) {
 		memory_corruption_check =
@@ -82,28 +83,23 @@
 
 	corruption_check_size = round_up(corruption_check_size, PAGE_SIZE);
 
-	while (addr < corruption_check_size && num_scan_areas < MAX_SCAN_AREAS) {
-		u64 size;
-		addr = memblock_x86_find_in_range_size(addr, &size, PAGE_SIZE);
+	for_each_free_mem_range(i, MAX_NUMNODES, &start, &end, NULL) {
+		start = clamp_t(phys_addr_t, round_up(start, PAGE_SIZE),
+				PAGE_SIZE, corruption_check_size);
+		end = clamp_t(phys_addr_t, round_down(end, PAGE_SIZE),
+			      PAGE_SIZE, corruption_check_size);
+		if (start >= end)
+			continue;
 
-		if (addr == MEMBLOCK_ERROR)
-			break;
-
-		if (addr >= corruption_check_size)
-			break;
-
-		if ((addr + size) > corruption_check_size)
-			size = corruption_check_size - addr;
-
-		memblock_x86_reserve_range(addr, addr + size, "SCAN RAM");
-		scan_areas[num_scan_areas].addr = addr;
-		scan_areas[num_scan_areas].size = size;
-		num_scan_areas++;
+		memblock_reserve(start, end - start);
+		scan_areas[num_scan_areas].addr = start;
+		scan_areas[num_scan_areas].size = end - start;
 
 		/* Assume we've already mapped this early memory */
-		memset(__va(addr), 0, size);
+		memset(__va(start), 0, end - start);
 
-		addr += size;
+		if (++num_scan_areas >= MAX_SCAN_AREAS)
+			break;
 	}
 
 	if (num_scan_areas)
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 0bab2b1..f4773f4 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -148,7 +148,6 @@
 
 static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c)
 {
-#ifdef CONFIG_SMP
 	/* calling is from identify_secondary_cpu() ? */
 	if (!c->cpu_index)
 		return;
@@ -192,7 +191,6 @@
 
 valid_k7:
 	;
-#endif
 }
 
 static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c)
@@ -353,6 +351,13 @@
 	if (node == NUMA_NO_NODE)
 		node = per_cpu(cpu_llc_id, cpu);
 
+	/*
+	 * If core numbers are inconsistent, it's likely a multi-fabric platform,
+	 * so invoke platform-specific handler
+	 */
+	if (c->phys_proc_id != node)
+		x86_cpuinit.fixup_cpu_id(c, node);
+
 	if (!node_online(node)) {
 		/*
 		 * Two possibilities here:
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c
index e58d978..159103c 100644
--- a/arch/x86/kernel/cpu/centaur.c
+++ b/arch/x86/kernel/cpu/centaur.c
@@ -278,7 +278,7 @@
 	}
 #ifdef CONFIG_X86_32
 	/* Cyrix III family needs CX8 & PGE explicitly enabled. */
-	if (c->x86_model >= 6 && c->x86_model <= 9) {
+	if (c->x86_model >= 6 && c->x86_model <= 13) {
 		rdmsr(MSR_VIA_FCR, lo, hi);
 		lo |= (1<<1 | 1<<7);
 		wrmsr(MSR_VIA_FCR, lo, hi);
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index aa003b1..850f296 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -676,9 +676,7 @@
 	if (this_cpu->c_early_init)
 		this_cpu->c_early_init(c);
 
-#ifdef CONFIG_SMP
 	c->cpu_index = 0;
-#endif
 	filter_cpuid_features(c, false);
 
 	setup_smep(c);
@@ -764,10 +762,7 @@
 		c->apicid = c->initial_apicid;
 # endif
 #endif
-
-#ifdef CONFIG_X86_HT
 		c->phys_proc_id = c->initial_apicid;
-#endif
 	}
 
 	setup_smep(c);
@@ -1141,6 +1136,15 @@
 #endif /* ! CONFIG_KGDB */
 
 /*
+ * Prints an error where the NUMA and configured core-number mismatch and the
+ * platform didn't override this to fix it up
+ */
+void __cpuinit x86_default_fixup_cpu_id(struct cpuinfo_x86 *c, int node)
+{
+	pr_err("NUMA core number %d differs from configured core number %d\n", node, c->phys_proc_id);
+}
+
+/*
  * cpu_init() initializes state that is per-CPU. Some data is already
  * initialized (naturally) in the bootstrap process, such as the GDT
  * and IDT. We reload them nevertheless, this function acts as a
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
index 1b22dcc..8bacc78 100644
--- a/arch/x86/kernel/cpu/cpu.h
+++ b/arch/x86/kernel/cpu/cpu.h
@@ -1,5 +1,4 @@
 #ifndef ARCH_X86_CPU_H
-
 #define ARCH_X86_CPU_H
 
 struct cpu_model_info {
@@ -35,6 +34,4 @@
 
 extern void get_cpu_cap(struct cpuinfo_x86 *c);
 extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
-extern void get_cpu_cap(struct cpuinfo_x86 *c);
-
-#endif
+#endif /* ARCH_X86_CPU_H */
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 5231312..3e6ff6c 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -181,7 +181,6 @@
 
 static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c)
 {
-#ifdef CONFIG_SMP
 	/* calling is from identify_secondary_cpu() ? */
 	if (!c->cpu_index)
 		return;
@@ -198,7 +197,6 @@
 		WARN_ONCE(1, "WARNING: SMP operation may be unreliable"
 				    "with B stepping processors.\n");
 	}
-#endif
 }
 
 static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index a3b0811..6b45e5e 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -844,8 +844,7 @@
 
 #include <linux/kobject.h>
 #include <linux/sysfs.h>
-
-extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */
+#include <linux/cpu.h>
 
 /* pointer to kobject for cpuX/cache */
 static DEFINE_PER_CPU(struct kobject *, ici_cache_kobject);
@@ -1073,9 +1072,9 @@
 static DECLARE_BITMAP(cache_dev_map, NR_CPUS);
 
 /* Add/Remove cache interface for CPU device */
-static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
+static int __cpuinit cache_add_dev(struct device *dev)
 {
-	unsigned int cpu = sys_dev->id;
+	unsigned int cpu = dev->id;
 	unsigned long i, j;
 	struct _index_kobject *this_object;
 	struct _cpuid4_info   *this_leaf;
@@ -1087,7 +1086,7 @@
 
 	retval = kobject_init_and_add(per_cpu(ici_cache_kobject, cpu),
 				      &ktype_percpu_entry,
-				      &sys_dev->kobj, "%s", "cache");
+				      &dev->kobj, "%s", "cache");
 	if (retval < 0) {
 		cpuid4_cache_sysfs_exit(cpu);
 		return retval;
@@ -1124,9 +1123,9 @@
 	return 0;
 }
 
-static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
+static void __cpuinit cache_remove_dev(struct device *dev)
 {
-	unsigned int cpu = sys_dev->id;
+	unsigned int cpu = dev->id;
 	unsigned long i;
 
 	if (per_cpu(ici_cpuid4_info, cpu) == NULL)
@@ -1145,17 +1144,17 @@
 					unsigned long action, void *hcpu)
 {
 	unsigned int cpu = (unsigned long)hcpu;
-	struct sys_device *sys_dev;
+	struct device *dev;
 
-	sys_dev = get_cpu_sysdev(cpu);
+	dev = get_cpu_device(cpu);
 	switch (action) {
 	case CPU_ONLINE:
 	case CPU_ONLINE_FROZEN:
-		cache_add_dev(sys_dev);
+		cache_add_dev(dev);
 		break;
 	case CPU_DEAD:
 	case CPU_DEAD_FROZEN:
-		cache_remove_dev(sys_dev);
+		cache_remove_dev(dev);
 		break;
 	}
 	return NOTIFY_OK;
@@ -1174,9 +1173,9 @@
 
 	for_each_online_cpu(i) {
 		int err;
-		struct sys_device *sys_dev = get_cpu_sysdev(i);
+		struct device *dev = get_cpu_device(i);
 
-		err = cache_add_dev(sys_dev);
+		err = cache_add_dev(dev);
 		if (err)
 			return err;
 	}
diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
index 319882e..fc4beb3 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
@@ -17,6 +17,7 @@
 #include <linux/kernel.h>
 #include <linux/string.h>
 #include <linux/fs.h>
+#include <linux/preempt.h>
 #include <linux/smp.h>
 #include <linux/notifier.h>
 #include <linux/kdebug.h>
@@ -92,6 +93,18 @@
 	return NMI_HANDLED;
 }
 
+static void mce_irq_ipi(void *info)
+{
+	int cpu = smp_processor_id();
+	struct mce *m = &__get_cpu_var(injectm);
+
+	if (cpumask_test_cpu(cpu, mce_inject_cpumask) &&
+			m->inject_flags & MCJ_EXCEPTION) {
+		cpumask_clear_cpu(cpu, mce_inject_cpumask);
+		raise_exception(m, NULL);
+	}
+}
+
 /* Inject mce on current CPU */
 static int raise_local(void)
 {
@@ -139,9 +152,10 @@
 		return;
 
 #ifdef CONFIG_X86_LOCAL_APIC
-	if (m->inject_flags & MCJ_NMI_BROADCAST) {
+	if (m->inject_flags & (MCJ_IRQ_BRAODCAST | MCJ_NMI_BROADCAST)) {
 		unsigned long start;
 		int cpu;
+
 		get_online_cpus();
 		cpumask_copy(mce_inject_cpumask, cpu_online_mask);
 		cpumask_clear_cpu(get_cpu(), mce_inject_cpumask);
@@ -151,13 +165,25 @@
 			    MCJ_CTX(mcpu->inject_flags) != MCJ_CTX_RANDOM)
 				cpumask_clear_cpu(cpu, mce_inject_cpumask);
 		}
-		if (!cpumask_empty(mce_inject_cpumask))
-			apic->send_IPI_mask(mce_inject_cpumask, NMI_VECTOR);
+		if (!cpumask_empty(mce_inject_cpumask)) {
+			if (m->inject_flags & MCJ_IRQ_BRAODCAST) {
+				/*
+				 * don't wait because mce_irq_ipi is necessary
+				 * to be sync with following raise_local
+				 */
+				preempt_disable();
+				smp_call_function_many(mce_inject_cpumask,
+					mce_irq_ipi, NULL, 0);
+				preempt_enable();
+			} else if (m->inject_flags & MCJ_NMI_BROADCAST)
+				apic->send_IPI_mask(mce_inject_cpumask,
+						NMI_VECTOR);
+		}
 		start = jiffies;
 		while (!cpumask_empty(mce_inject_cpumask)) {
 			if (!time_before(jiffies, start + 2*HZ)) {
 				printk(KERN_ERR
-				"Timeout waiting for mce inject NMI %lx\n",
+				"Timeout waiting for mce inject %lx\n",
 					*cpumask_bits(mce_inject_cpumask));
 				break;
 			}
diff --git a/arch/x86/kernel/cpu/mcheck/mce-internal.h b/arch/x86/kernel/cpu/mcheck/mce-internal.h
index fefcc69..ed44c8a 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-internal.h
+++ b/arch/x86/kernel/cpu/mcheck/mce-internal.h
@@ -1,4 +1,4 @@
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <asm/mce.h>
 
 enum severity_level {
@@ -17,7 +17,7 @@
 struct mce_bank {
 	u64			ctl;			/* subevents to enable */
 	unsigned char init;				/* initialise bank? */
-	struct sysdev_attribute attr;			/* sysdev attribute */
+	struct device_attribute attr;			/* device attribute */
 	char			attrname[ATTR_LEN];	/* attribute name */
 };
 
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 2af127d..f22a9f7 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -19,7 +19,7 @@
 #include <linux/kernel.h>
 #include <linux/percpu.h>
 #include <linux/string.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/syscore_ops.h>
 #include <linux/delay.h>
 #include <linux/ctype.h>
@@ -95,13 +95,6 @@
 static DEFINE_PER_CPU(struct mce, mces_seen);
 static int			cpu_missing;
 
-/*
- * CPU/chipset specific EDAC code can register a notifier call here to print
- * MCE errors in a human-readable form.
- */
-ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain);
-EXPORT_SYMBOL_GPL(x86_mce_decoder_chain);
-
 /* MCA banks polled by the period polling timer for corrected events */
 DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = {
 	[0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL
@@ -109,6 +102,12 @@
 
 static DEFINE_PER_CPU(struct work_struct, mce_work);
 
+/*
+ * CPU/chipset specific EDAC code can register a notifier call here to print
+ * MCE errors in a human-readable form.
+ */
+ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain);
+
 /* Do initial initialization of a struct mce */
 void mce_setup(struct mce *m)
 {
@@ -119,9 +118,7 @@
 	m->time = get_seconds();
 	m->cpuvendor = boot_cpu_data.x86_vendor;
 	m->cpuid = cpuid_eax(1);
-#ifdef CONFIG_SMP
 	m->socketid = cpu_data(m->extcpu).phys_proc_id;
-#endif
 	m->apicid = cpu_data(m->extcpu).initial_apicid;
 	rdmsrl(MSR_IA32_MCG_CAP, m->mcgcap);
 }
@@ -190,6 +187,57 @@
 	set_bit(0, &mce_need_notify);
 }
 
+static void drain_mcelog_buffer(void)
+{
+	unsigned int next, i, prev = 0;
+
+	next = rcu_dereference_check_mce(mcelog.next);
+
+	do {
+		struct mce *m;
+
+		/* drain what was logged during boot */
+		for (i = prev; i < next; i++) {
+			unsigned long start = jiffies;
+			unsigned retries = 1;
+
+			m = &mcelog.entry[i];
+
+			while (!m->finished) {
+				if (time_after_eq(jiffies, start + 2*retries))
+					retries++;
+
+				cpu_relax();
+
+				if (!m->finished && retries >= 4) {
+					pr_err("MCE: skipping error being logged currently!\n");
+					break;
+				}
+			}
+			smp_rmb();
+			atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m);
+		}
+
+		memset(mcelog.entry + prev, 0, (next - prev) * sizeof(*m));
+		prev = next;
+		next = cmpxchg(&mcelog.next, prev, 0);
+	} while (next != prev);
+}
+
+
+void mce_register_decode_chain(struct notifier_block *nb)
+{
+	atomic_notifier_chain_register(&x86_mce_decoder_chain, nb);
+	drain_mcelog_buffer();
+}
+EXPORT_SYMBOL_GPL(mce_register_decode_chain);
+
+void mce_unregister_decode_chain(struct notifier_block *nb)
+{
+	atomic_notifier_chain_unregister(&x86_mce_decoder_chain, nb);
+}
+EXPORT_SYMBOL_GPL(mce_unregister_decode_chain);
+
 static void print_mce(struct mce *m)
 {
 	int ret = 0;
@@ -1770,7 +1818,7 @@
 };
 
 /*
- * mce_sysdev: Sysfs support
+ * mce_device: Sysfs support
  */
 
 static void mce_cpu_restart(void *data)
@@ -1806,27 +1854,28 @@
 		__mcheck_cpu_init_timer();
 }
 
-static struct sysdev_class mce_sysdev_class = {
+static struct bus_type mce_subsys = {
 	.name		= "machinecheck",
+	.dev_name	= "machinecheck",
 };
 
-DEFINE_PER_CPU(struct sys_device, mce_sysdev);
+DEFINE_PER_CPU(struct device, mce_device);
 
 __cpuinitdata
 void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu);
 
-static inline struct mce_bank *attr_to_bank(struct sysdev_attribute *attr)
+static inline struct mce_bank *attr_to_bank(struct device_attribute *attr)
 {
 	return container_of(attr, struct mce_bank, attr);
 }
 
-static ssize_t show_bank(struct sys_device *s, struct sysdev_attribute *attr,
+static ssize_t show_bank(struct device *s, struct device_attribute *attr,
 			 char *buf)
 {
 	return sprintf(buf, "%llx\n", attr_to_bank(attr)->ctl);
 }
 
-static ssize_t set_bank(struct sys_device *s, struct sysdev_attribute *attr,
+static ssize_t set_bank(struct device *s, struct device_attribute *attr,
 			const char *buf, size_t size)
 {
 	u64 new;
@@ -1841,14 +1890,14 @@
 }
 
 static ssize_t
-show_trigger(struct sys_device *s, struct sysdev_attribute *attr, char *buf)
+show_trigger(struct device *s, struct device_attribute *attr, char *buf)
 {
 	strcpy(buf, mce_helper);
 	strcat(buf, "\n");
 	return strlen(mce_helper) + 1;
 }
 
-static ssize_t set_trigger(struct sys_device *s, struct sysdev_attribute *attr,
+static ssize_t set_trigger(struct device *s, struct device_attribute *attr,
 				const char *buf, size_t siz)
 {
 	char *p;
@@ -1863,8 +1912,8 @@
 	return strlen(mce_helper) + !!p;
 }
 
-static ssize_t set_ignore_ce(struct sys_device *s,
-			     struct sysdev_attribute *attr,
+static ssize_t set_ignore_ce(struct device *s,
+			     struct device_attribute *attr,
 			     const char *buf, size_t size)
 {
 	u64 new;
@@ -1887,8 +1936,8 @@
 	return size;
 }
 
-static ssize_t set_cmci_disabled(struct sys_device *s,
-				 struct sysdev_attribute *attr,
+static ssize_t set_cmci_disabled(struct device *s,
+				 struct device_attribute *attr,
 				 const char *buf, size_t size)
 {
 	u64 new;
@@ -1910,108 +1959,107 @@
 	return size;
 }
 
-static ssize_t store_int_with_restart(struct sys_device *s,
-				      struct sysdev_attribute *attr,
+static ssize_t store_int_with_restart(struct device *s,
+				      struct device_attribute *attr,
 				      const char *buf, size_t size)
 {
-	ssize_t ret = sysdev_store_int(s, attr, buf, size);
+	ssize_t ret = device_store_int(s, attr, buf, size);
 	mce_restart();
 	return ret;
 }
 
-static SYSDEV_ATTR(trigger, 0644, show_trigger, set_trigger);
-static SYSDEV_INT_ATTR(tolerant, 0644, tolerant);
-static SYSDEV_INT_ATTR(monarch_timeout, 0644, monarch_timeout);
-static SYSDEV_INT_ATTR(dont_log_ce, 0644, mce_dont_log_ce);
+static DEVICE_ATTR(trigger, 0644, show_trigger, set_trigger);
+static DEVICE_INT_ATTR(tolerant, 0644, tolerant);
+static DEVICE_INT_ATTR(monarch_timeout, 0644, monarch_timeout);
+static DEVICE_INT_ATTR(dont_log_ce, 0644, mce_dont_log_ce);
 
-static struct sysdev_ext_attribute attr_check_interval = {
-	_SYSDEV_ATTR(check_interval, 0644, sysdev_show_int,
-		     store_int_with_restart),
+static struct dev_ext_attribute dev_attr_check_interval = {
+	__ATTR(check_interval, 0644, device_show_int, store_int_with_restart),
 	&check_interval
 };
 
-static struct sysdev_ext_attribute attr_ignore_ce = {
-	_SYSDEV_ATTR(ignore_ce, 0644, sysdev_show_int, set_ignore_ce),
+static struct dev_ext_attribute dev_attr_ignore_ce = {
+	__ATTR(ignore_ce, 0644, device_show_int, set_ignore_ce),
 	&mce_ignore_ce
 };
 
-static struct sysdev_ext_attribute attr_cmci_disabled = {
-	_SYSDEV_ATTR(cmci_disabled, 0644, sysdev_show_int, set_cmci_disabled),
+static struct dev_ext_attribute dev_attr_cmci_disabled = {
+	__ATTR(cmci_disabled, 0644, device_show_int, set_cmci_disabled),
 	&mce_cmci_disabled
 };
 
-static struct sysdev_attribute *mce_sysdev_attrs[] = {
-	&attr_tolerant.attr,
-	&attr_check_interval.attr,
-	&attr_trigger,
-	&attr_monarch_timeout.attr,
-	&attr_dont_log_ce.attr,
-	&attr_ignore_ce.attr,
-	&attr_cmci_disabled.attr,
+static struct device_attribute *mce_device_attrs[] = {
+	&dev_attr_tolerant.attr,
+	&dev_attr_check_interval.attr,
+	&dev_attr_trigger,
+	&dev_attr_monarch_timeout.attr,
+	&dev_attr_dont_log_ce.attr,
+	&dev_attr_ignore_ce.attr,
+	&dev_attr_cmci_disabled.attr,
 	NULL
 };
 
-static cpumask_var_t mce_sysdev_initialized;
+static cpumask_var_t mce_device_initialized;
 
-/* Per cpu sysdev init. All of the cpus still share the same ctrl bank: */
-static __cpuinit int mce_sysdev_create(unsigned int cpu)
+/* Per cpu device init. All of the cpus still share the same ctrl bank: */
+static __cpuinit int mce_device_create(unsigned int cpu)
 {
-	struct sys_device *sysdev = &per_cpu(mce_sysdev, cpu);
+	struct device *dev = &per_cpu(mce_device, cpu);
 	int err;
 	int i, j;
 
 	if (!mce_available(&boot_cpu_data))
 		return -EIO;
 
-	memset(&sysdev->kobj, 0, sizeof(struct kobject));
-	sysdev->id  = cpu;
-	sysdev->cls = &mce_sysdev_class;
+	memset(&dev->kobj, 0, sizeof(struct kobject));
+	dev->id  = cpu;
+	dev->bus = &mce_subsys;
 
-	err = sysdev_register(sysdev);
+	err = device_register(dev);
 	if (err)
 		return err;
 
-	for (i = 0; mce_sysdev_attrs[i]; i++) {
-		err = sysdev_create_file(sysdev, mce_sysdev_attrs[i]);
+	for (i = 0; mce_device_attrs[i]; i++) {
+		err = device_create_file(dev, mce_device_attrs[i]);
 		if (err)
 			goto error;
 	}
 	for (j = 0; j < banks; j++) {
-		err = sysdev_create_file(sysdev, &mce_banks[j].attr);
+		err = device_create_file(dev, &mce_banks[j].attr);
 		if (err)
 			goto error2;
 	}
-	cpumask_set_cpu(cpu, mce_sysdev_initialized);
+	cpumask_set_cpu(cpu, mce_device_initialized);
 
 	return 0;
 error2:
 	while (--j >= 0)
-		sysdev_remove_file(sysdev, &mce_banks[j].attr);
+		device_remove_file(dev, &mce_banks[j].attr);
 error:
 	while (--i >= 0)
-		sysdev_remove_file(sysdev, mce_sysdev_attrs[i]);
+		device_remove_file(dev, mce_device_attrs[i]);
 
-	sysdev_unregister(sysdev);
+	device_unregister(dev);
 
 	return err;
 }
 
-static __cpuinit void mce_sysdev_remove(unsigned int cpu)
+static __cpuinit void mce_device_remove(unsigned int cpu)
 {
-	struct sys_device *sysdev = &per_cpu(mce_sysdev, cpu);
+	struct device *dev = &per_cpu(mce_device, cpu);
 	int i;
 
-	if (!cpumask_test_cpu(cpu, mce_sysdev_initialized))
+	if (!cpumask_test_cpu(cpu, mce_device_initialized))
 		return;
 
-	for (i = 0; mce_sysdev_attrs[i]; i++)
-		sysdev_remove_file(sysdev, mce_sysdev_attrs[i]);
+	for (i = 0; mce_device_attrs[i]; i++)
+		device_remove_file(dev, mce_device_attrs[i]);
 
 	for (i = 0; i < banks; i++)
-		sysdev_remove_file(sysdev, &mce_banks[i].attr);
+		device_remove_file(dev, &mce_banks[i].attr);
 
-	sysdev_unregister(sysdev);
-	cpumask_clear_cpu(cpu, mce_sysdev_initialized);
+	device_unregister(dev);
+	cpumask_clear_cpu(cpu, mce_device_initialized);
 }
 
 /* Make sure there are no machine checks on offlined CPUs. */
@@ -2061,7 +2109,7 @@
 	switch (action) {
 	case CPU_ONLINE:
 	case CPU_ONLINE_FROZEN:
-		mce_sysdev_create(cpu);
+		mce_device_create(cpu);
 		if (threshold_cpu_callback)
 			threshold_cpu_callback(action, cpu);
 		break;
@@ -2069,7 +2117,7 @@
 	case CPU_DEAD_FROZEN:
 		if (threshold_cpu_callback)
 			threshold_cpu_callback(action, cpu);
-		mce_sysdev_remove(cpu);
+		mce_device_remove(cpu);
 		break;
 	case CPU_DOWN_PREPARE:
 	case CPU_DOWN_PREPARE_FROZEN:
@@ -2103,7 +2151,7 @@
 
 	for (i = 0; i < banks; i++) {
 		struct mce_bank *b = &mce_banks[i];
-		struct sysdev_attribute *a = &b->attr;
+		struct device_attribute *a = &b->attr;
 
 		sysfs_attr_init(&a->attr);
 		a->attr.name	= b->attrname;
@@ -2123,16 +2171,16 @@
 	if (!mce_available(&boot_cpu_data))
 		return -EIO;
 
-	zalloc_cpumask_var(&mce_sysdev_initialized, GFP_KERNEL);
+	zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL);
 
 	mce_init_banks();
 
-	err = sysdev_class_register(&mce_sysdev_class);
+	err = subsys_system_register(&mce_subsys, NULL);
 	if (err)
 		return err;
 
 	for_each_online_cpu(i) {
-		err = mce_sysdev_create(i);
+		err = mce_device_create(i);
 		if (err)
 			return err;
 	}
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index f547421..ba0b94a 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -17,7 +17,6 @@
 #include <linux/notifier.h>
 #include <linux/kobject.h>
 #include <linux/percpu.h>
-#include <linux/sysdev.h>
 #include <linux/errno.h>
 #include <linux/sched.h>
 #include <linux/sysfs.h>
@@ -64,11 +63,9 @@
 };
 static DEFINE_PER_CPU(struct threshold_bank * [NR_BANKS], threshold_banks);
 
-#ifdef CONFIG_SMP
 static unsigned char shared_bank[NR_BANKS] = {
 	0, 0, 0, 0, 1
 };
-#endif
 
 static DEFINE_PER_CPU(unsigned char, bank_map);	/* see which banks are on */
 
@@ -202,10 +199,9 @@
 
 			if (!block)
 				per_cpu(bank_map, cpu) |= (1 << bank);
-#ifdef CONFIG_SMP
 			if (shared_bank[bank] && c->cpu_core_id)
 				break;
-#endif
+
 			offset = setup_APIC_mce(offset,
 						(high & MASK_LVTOFF_HI) >> 20);
 
@@ -531,7 +527,6 @@
 
 	sprintf(name, "threshold_bank%i", bank);
 
-#ifdef CONFIG_SMP
 	if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) {	/* symlink */
 		i = cpumask_first(cpu_llc_shared_mask(cpu));
 
@@ -548,7 +543,7 @@
 		if (!b)
 			goto out;
 
-		err = sysfs_create_link(&per_cpu(mce_sysdev, cpu).kobj,
+		err = sysfs_create_link(&per_cpu(mce_device, cpu).kobj,
 					b->kobj, name);
 		if (err)
 			goto out;
@@ -558,7 +553,6 @@
 
 		goto out;
 	}
-#endif
 
 	b = kzalloc(sizeof(struct threshold_bank), GFP_KERNEL);
 	if (!b) {
@@ -571,7 +565,7 @@
 		goto out;
 	}
 
-	b->kobj = kobject_create_and_add(name, &per_cpu(mce_sysdev, cpu).kobj);
+	b->kobj = kobject_create_and_add(name, &per_cpu(mce_device, cpu).kobj);
 	if (!b->kobj)
 		goto out_free;
 
@@ -591,7 +585,7 @@
 		if (i == cpu)
 			continue;
 
-		err = sysfs_create_link(&per_cpu(mce_sysdev, i).kobj,
+		err = sysfs_create_link(&per_cpu(mce_device, i).kobj,
 					b->kobj, name);
 		if (err)
 			goto out;
@@ -669,7 +663,7 @@
 #ifdef CONFIG_SMP
 	/* sibling symlink */
 	if (shared_bank[bank] && b->blocks->cpu != cpu) {
-		sysfs_remove_link(&per_cpu(mce_sysdev, cpu).kobj, name);
+		sysfs_remove_link(&per_cpu(mce_device, cpu).kobj, name);
 		per_cpu(threshold_banks, cpu)[bank] = NULL;
 
 		return;
@@ -681,7 +675,7 @@
 		if (i == cpu)
 			continue;
 
-		sysfs_remove_link(&per_cpu(mce_sysdev, i).kobj, name);
+		sysfs_remove_link(&per_cpu(mce_device, i).kobj, name);
 		per_cpu(threshold_banks, i)[bank] = NULL;
 	}
 
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index 787e06c..67bb17a 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -19,7 +19,6 @@
 #include <linux/kernel.h>
 #include <linux/percpu.h>
 #include <linux/export.h>
-#include <linux/sysdev.h>
 #include <linux/types.h>
 #include <linux/init.h>
 #include <linux/smp.h>
@@ -69,16 +68,16 @@
 static u32 lvtthmr_init __read_mostly;
 
 #ifdef CONFIG_SYSFS
-#define define_therm_throt_sysdev_one_ro(_name)				\
-	static SYSDEV_ATTR(_name, 0444,					\
-			   therm_throt_sysdev_show_##_name,		\
+#define define_therm_throt_device_one_ro(_name)				\
+	static DEVICE_ATTR(_name, 0444,					\
+			   therm_throt_device_show_##_name,		\
 				   NULL)				\
 
-#define define_therm_throt_sysdev_show_func(event, name)		\
+#define define_therm_throt_device_show_func(event, name)		\
 									\
-static ssize_t therm_throt_sysdev_show_##event##_##name(		\
-			struct sys_device *dev,				\
-			struct sysdev_attribute *attr,			\
+static ssize_t therm_throt_device_show_##event##_##name(		\
+			struct device *dev,				\
+			struct device_attribute *attr,			\
 			char *buf)					\
 {									\
 	unsigned int cpu = dev->id;					\
@@ -95,20 +94,20 @@
 	return ret;							\
 }
 
-define_therm_throt_sysdev_show_func(core_throttle, count);
-define_therm_throt_sysdev_one_ro(core_throttle_count);
+define_therm_throt_device_show_func(core_throttle, count);
+define_therm_throt_device_one_ro(core_throttle_count);
 
-define_therm_throt_sysdev_show_func(core_power_limit, count);
-define_therm_throt_sysdev_one_ro(core_power_limit_count);
+define_therm_throt_device_show_func(core_power_limit, count);
+define_therm_throt_device_one_ro(core_power_limit_count);
 
-define_therm_throt_sysdev_show_func(package_throttle, count);
-define_therm_throt_sysdev_one_ro(package_throttle_count);
+define_therm_throt_device_show_func(package_throttle, count);
+define_therm_throt_device_one_ro(package_throttle_count);
 
-define_therm_throt_sysdev_show_func(package_power_limit, count);
-define_therm_throt_sysdev_one_ro(package_power_limit_count);
+define_therm_throt_device_show_func(package_power_limit, count);
+define_therm_throt_device_one_ro(package_power_limit_count);
 
 static struct attribute *thermal_throttle_attrs[] = {
-	&attr_core_throttle_count.attr,
+	&dev_attr_core_throttle_count.attr,
 	NULL
 };
 
@@ -223,36 +222,36 @@
 
 #ifdef CONFIG_SYSFS
 /* Add/Remove thermal_throttle interface for CPU device: */
-static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev,
+static __cpuinit int thermal_throttle_add_dev(struct device *dev,
 				unsigned int cpu)
 {
 	int err;
 	struct cpuinfo_x86 *c = &cpu_data(cpu);
 
-	err = sysfs_create_group(&sys_dev->kobj, &thermal_attr_group);
+	err = sysfs_create_group(&dev->kobj, &thermal_attr_group);
 	if (err)
 		return err;
 
 	if (cpu_has(c, X86_FEATURE_PLN))
-		err = sysfs_add_file_to_group(&sys_dev->kobj,
-					      &attr_core_power_limit_count.attr,
+		err = sysfs_add_file_to_group(&dev->kobj,
+					      &dev_attr_core_power_limit_count.attr,
 					      thermal_attr_group.name);
 	if (cpu_has(c, X86_FEATURE_PTS)) {
-		err = sysfs_add_file_to_group(&sys_dev->kobj,
-					      &attr_package_throttle_count.attr,
+		err = sysfs_add_file_to_group(&dev->kobj,
+					      &dev_attr_package_throttle_count.attr,
 					      thermal_attr_group.name);
 		if (cpu_has(c, X86_FEATURE_PLN))
-			err = sysfs_add_file_to_group(&sys_dev->kobj,
-					&attr_package_power_limit_count.attr,
+			err = sysfs_add_file_to_group(&dev->kobj,
+					&dev_attr_package_power_limit_count.attr,
 					thermal_attr_group.name);
 	}
 
 	return err;
 }
 
-static __cpuinit void thermal_throttle_remove_dev(struct sys_device *sys_dev)
+static __cpuinit void thermal_throttle_remove_dev(struct device *dev)
 {
-	sysfs_remove_group(&sys_dev->kobj, &thermal_attr_group);
+	sysfs_remove_group(&dev->kobj, &thermal_attr_group);
 }
 
 /* Mutex protecting device creation against CPU hotplug: */
@@ -265,16 +264,16 @@
 			      void *hcpu)
 {
 	unsigned int cpu = (unsigned long)hcpu;
-	struct sys_device *sys_dev;
+	struct device *dev;
 	int err = 0;
 
-	sys_dev = get_cpu_sysdev(cpu);
+	dev = get_cpu_device(cpu);
 
 	switch (action) {
 	case CPU_UP_PREPARE:
 	case CPU_UP_PREPARE_FROZEN:
 		mutex_lock(&therm_cpu_lock);
-		err = thermal_throttle_add_dev(sys_dev, cpu);
+		err = thermal_throttle_add_dev(dev, cpu);
 		mutex_unlock(&therm_cpu_lock);
 		WARN_ON(err);
 		break;
@@ -283,7 +282,7 @@
 	case CPU_DEAD:
 	case CPU_DEAD_FROZEN:
 		mutex_lock(&therm_cpu_lock);
-		thermal_throttle_remove_dev(sys_dev);
+		thermal_throttle_remove_dev(dev);
 		mutex_unlock(&therm_cpu_lock);
 		break;
 	}
@@ -310,7 +309,7 @@
 #endif
 	/* connect live CPUs to sysfs */
 	for_each_online_cpu(cpu) {
-		err = thermal_throttle_add_dev(get_cpu_sysdev(cpu), cpu);
+		err = thermal_throttle_add_dev(get_cpu_device(cpu), cpu);
 		WARN_ON(err);
 	}
 #ifdef CONFIG_HOTPLUG_CPU
@@ -323,17 +322,6 @@
 
 #endif /* CONFIG_SYSFS */
 
-/*
- * Set up the most two significant bit to notify mce log that this thermal
- * event type.
- * This is a temp solution. May be changed in the future with mce log
- * infrasture.
- */
-#define CORE_THROTTLED		(0)
-#define CORE_POWER_LIMIT	((__u64)1 << 62)
-#define PACKAGE_THROTTLED	((__u64)2 << 62)
-#define PACKAGE_POWER_LIMIT	((__u64)3 << 62)
-
 static void notify_thresholds(__u64 msr_val)
 {
 	/* check whether the interrupt handler is defined;
@@ -363,27 +351,23 @@
 	if (therm_throt_process(msr_val & THERM_STATUS_PROCHOT,
 				THERMAL_THROTTLING_EVENT,
 				CORE_LEVEL) != 0)
-		mce_log_therm_throt_event(CORE_THROTTLED | msr_val);
+		mce_log_therm_throt_event(msr_val);
 
 	if (this_cpu_has(X86_FEATURE_PLN))
-		if (therm_throt_process(msr_val & THERM_STATUS_POWER_LIMIT,
+		therm_throt_process(msr_val & THERM_STATUS_POWER_LIMIT,
 					POWER_LIMIT_EVENT,
-					CORE_LEVEL) != 0)
-			mce_log_therm_throt_event(CORE_POWER_LIMIT | msr_val);
+					CORE_LEVEL);
 
 	if (this_cpu_has(X86_FEATURE_PTS)) {
 		rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val);
-		if (therm_throt_process(msr_val & PACKAGE_THERM_STATUS_PROCHOT,
+		therm_throt_process(msr_val & PACKAGE_THERM_STATUS_PROCHOT,
 					THERMAL_THROTTLING_EVENT,
-					PACKAGE_LEVEL) != 0)
-			mce_log_therm_throt_event(PACKAGE_THROTTLED | msr_val);
+					PACKAGE_LEVEL);
 		if (this_cpu_has(X86_FEATURE_PLN))
-			if (therm_throt_process(msr_val &
+			therm_throt_process(msr_val &
 					PACKAGE_THERM_STATUS_POWER_LIMIT,
 					POWER_LIMIT_EVENT,
-					PACKAGE_LEVEL) != 0)
-				mce_log_therm_throt_event(PACKAGE_POWER_LIMIT
-							  | msr_val);
+					PACKAGE_LEVEL);
 	}
 }
 
@@ -397,8 +381,8 @@
 
 asmlinkage void smp_thermal_interrupt(struct pt_regs *regs)
 {
-	exit_idle();
 	irq_enter();
+	exit_idle();
 	inc_irq_stat(irq_thermal_count);
 	smp_thermal_vector();
 	irq_exit();
diff --git a/arch/x86/kernel/cpu/mcheck/threshold.c b/arch/x86/kernel/cpu/mcheck/threshold.c
index d746df2..aa578ca 100644
--- a/arch/x86/kernel/cpu/mcheck/threshold.c
+++ b/arch/x86/kernel/cpu/mcheck/threshold.c
@@ -19,8 +19,8 @@
 
 asmlinkage void smp_threshold_interrupt(void)
 {
-	exit_idle();
 	irq_enter();
+	exit_idle();
 	inc_irq_stat(irq_threshold_count);
 	mce_threshold_vector();
 	irq_exit();
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 2bda212..5adce10 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -484,18 +484,195 @@
 	return event->pmu == &pmu;
 }
 
+/*
+ * Event scheduler state:
+ *
+ * Assign events iterating over all events and counters, beginning
+ * with events with least weights first. Keep the current iterator
+ * state in struct sched_state.
+ */
+struct sched_state {
+	int	weight;
+	int	event;		/* event index */
+	int	counter;	/* counter index */
+	int	unassigned;	/* number of events to be assigned left */
+	unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
+};
+
+/* Total max is X86_PMC_IDX_MAX, but we are O(n!) limited */
+#define	SCHED_STATES_MAX	2
+
+struct perf_sched {
+	int			max_weight;
+	int			max_events;
+	struct event_constraint	**constraints;
+	struct sched_state	state;
+	int			saved_states;
+	struct sched_state	saved[SCHED_STATES_MAX];
+};
+
+/*
+ * Initialize interator that runs through all events and counters.
+ */
+static void perf_sched_init(struct perf_sched *sched, struct event_constraint **c,
+			    int num, int wmin, int wmax)
+{
+	int idx;
+
+	memset(sched, 0, sizeof(*sched));
+	sched->max_events	= num;
+	sched->max_weight	= wmax;
+	sched->constraints	= c;
+
+	for (idx = 0; idx < num; idx++) {
+		if (c[idx]->weight == wmin)
+			break;
+	}
+
+	sched->state.event	= idx;		/* start with min weight */
+	sched->state.weight	= wmin;
+	sched->state.unassigned	= num;
+}
+
+static void perf_sched_save_state(struct perf_sched *sched)
+{
+	if (WARN_ON_ONCE(sched->saved_states >= SCHED_STATES_MAX))
+		return;
+
+	sched->saved[sched->saved_states] = sched->state;
+	sched->saved_states++;
+}
+
+static bool perf_sched_restore_state(struct perf_sched *sched)
+{
+	if (!sched->saved_states)
+		return false;
+
+	sched->saved_states--;
+	sched->state = sched->saved[sched->saved_states];
+
+	/* continue with next counter: */
+	clear_bit(sched->state.counter++, sched->state.used);
+
+	return true;
+}
+
+/*
+ * Select a counter for the current event to schedule. Return true on
+ * success.
+ */
+static bool __perf_sched_find_counter(struct perf_sched *sched)
+{
+	struct event_constraint *c;
+	int idx;
+
+	if (!sched->state.unassigned)
+		return false;
+
+	if (sched->state.event >= sched->max_events)
+		return false;
+
+	c = sched->constraints[sched->state.event];
+
+	/* Prefer fixed purpose counters */
+	if (x86_pmu.num_counters_fixed) {
+		idx = X86_PMC_IDX_FIXED;
+		for_each_set_bit_cont(idx, c->idxmsk, X86_PMC_IDX_MAX) {
+			if (!__test_and_set_bit(idx, sched->state.used))
+				goto done;
+		}
+	}
+	/* Grab the first unused counter starting with idx */
+	idx = sched->state.counter;
+	for_each_set_bit_cont(idx, c->idxmsk, X86_PMC_IDX_FIXED) {
+		if (!__test_and_set_bit(idx, sched->state.used))
+			goto done;
+	}
+
+	return false;
+
+done:
+	sched->state.counter = idx;
+
+	if (c->overlap)
+		perf_sched_save_state(sched);
+
+	return true;
+}
+
+static bool perf_sched_find_counter(struct perf_sched *sched)
+{
+	while (!__perf_sched_find_counter(sched)) {
+		if (!perf_sched_restore_state(sched))
+			return false;
+	}
+
+	return true;
+}
+
+/*
+ * Go through all unassigned events and find the next one to schedule.
+ * Take events with the least weight first. Return true on success.
+ */
+static bool perf_sched_next_event(struct perf_sched *sched)
+{
+	struct event_constraint *c;
+
+	if (!sched->state.unassigned || !--sched->state.unassigned)
+		return false;
+
+	do {
+		/* next event */
+		sched->state.event++;
+		if (sched->state.event >= sched->max_events) {
+			/* next weight */
+			sched->state.event = 0;
+			sched->state.weight++;
+			if (sched->state.weight > sched->max_weight)
+				return false;
+		}
+		c = sched->constraints[sched->state.event];
+	} while (c->weight != sched->state.weight);
+
+	sched->state.counter = 0;	/* start with first counter */
+
+	return true;
+}
+
+/*
+ * Assign a counter for each event.
+ */
+static int perf_assign_events(struct event_constraint **constraints, int n,
+			      int wmin, int wmax, int *assign)
+{
+	struct perf_sched sched;
+
+	perf_sched_init(&sched, constraints, n, wmin, wmax);
+
+	do {
+		if (!perf_sched_find_counter(&sched))
+			break;	/* failed */
+		if (assign)
+			assign[sched.state.event] = sched.state.counter;
+	} while (perf_sched_next_event(&sched));
+
+	return sched.state.unassigned;
+}
+
 int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
 {
 	struct event_constraint *c, *constraints[X86_PMC_IDX_MAX];
 	unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
-	int i, j, w, wmax, num = 0;
+	int i, wmin, wmax, num = 0;
 	struct hw_perf_event *hwc;
 
 	bitmap_zero(used_mask, X86_PMC_IDX_MAX);
 
-	for (i = 0; i < n; i++) {
+	for (i = 0, wmin = X86_PMC_IDX_MAX, wmax = 0; i < n; i++) {
 		c = x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]);
 		constraints[i] = c;
+		wmin = min(wmin, c->weight);
+		wmax = max(wmax, c->weight);
 	}
 
 	/*
@@ -521,59 +698,11 @@
 		if (assign)
 			assign[i] = hwc->idx;
 	}
-	if (i == n)
-		goto done;
 
-	/*
-	 * begin slow path
-	 */
+	/* slow path */
+	if (i != n)
+		num = perf_assign_events(constraints, n, wmin, wmax, assign);
 
-	bitmap_zero(used_mask, X86_PMC_IDX_MAX);
-
-	/*
-	 * weight = number of possible counters
-	 *
-	 * 1    = most constrained, only works on one counter
-	 * wmax = least constrained, works on any counter
-	 *
-	 * assign events to counters starting with most
-	 * constrained events.
-	 */
-	wmax = x86_pmu.num_counters;
-
-	/*
-	 * when fixed event counters are present,
-	 * wmax is incremented by 1 to account
-	 * for one more choice
-	 */
-	if (x86_pmu.num_counters_fixed)
-		wmax++;
-
-	for (w = 1, num = n; num && w <= wmax; w++) {
-		/* for each event */
-		for (i = 0; num && i < n; i++) {
-			c = constraints[i];
-			hwc = &cpuc->event_list[i]->hw;
-
-			if (c->weight != w)
-				continue;
-
-			for_each_set_bit(j, c->idxmsk, X86_PMC_IDX_MAX) {
-				if (!test_bit(j, used_mask))
-					break;
-			}
-
-			if (j == X86_PMC_IDX_MAX)
-				break;
-
-			__set_bit(j, used_mask);
-
-			if (assign)
-				assign[i] = j;
-			num--;
-		}
-	}
-done:
 	/*
 	 * scheduling failed or is just a simulation,
 	 * free resources if necessary
@@ -1119,6 +1248,7 @@
 
 static int __init init_hw_perf_events(void)
 {
+	struct x86_pmu_quirk *quirk;
 	struct event_constraint *c;
 	int err;
 
@@ -1147,8 +1277,8 @@
 
 	pr_cont("%s PMU driver.\n", x86_pmu.name);
 
-	if (x86_pmu.quirks)
-		x86_pmu.quirks();
+	for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next)
+		quirk->func();
 
 	if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) {
 		WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
@@ -1171,12 +1301,18 @@
 
 	unconstrained = (struct event_constraint)
 		__EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
-				   0, x86_pmu.num_counters);
+				   0, x86_pmu.num_counters, 0);
 
 	if (x86_pmu.event_constraints) {
+		/*
+		 * event on fixed counter2 (REF_CYCLES) only works on this
+		 * counter, so do not extend mask to generic counters
+		 */
 		for_each_event_constraint(c, x86_pmu.event_constraints) {
-			if (c->cmask != X86_RAW_EVENT_MASK)
+			if (c->cmask != X86_RAW_EVENT_MASK
+			    || c->idxmsk64 == X86_PMC_MSK_FIXED_REF_CYCLES) {
 				continue;
+			}
 
 			c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
 			c->weight += x86_pmu.num_counters;
@@ -1566,3 +1702,15 @@
 
 	return misc;
 }
+
+void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
+{
+	cap->version		= x86_pmu.version;
+	cap->num_counters_gp	= x86_pmu.num_counters;
+	cap->num_counters_fixed	= x86_pmu.num_counters_fixed;
+	cap->bit_width_gp	= x86_pmu.cntval_bits;
+	cap->bit_width_fixed	= x86_pmu.cntval_bits;
+	cap->events_mask	= (unsigned int)x86_pmu.events_maskl;
+	cap->events_mask_len	= x86_pmu.events_mask_len;
+}
+EXPORT_SYMBOL_GPL(perf_get_x86_pmu_capability);
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index b9698d4..8944062 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -45,6 +45,7 @@
 	u64	code;
 	u64	cmask;
 	int	weight;
+	int	overlap;
 };
 
 struct amd_nb {
@@ -151,15 +152,40 @@
 	void				*kfree_on_online;
 };
 
-#define __EVENT_CONSTRAINT(c, n, m, w) {\
+#define __EVENT_CONSTRAINT(c, n, m, w, o) {\
 	{ .idxmsk64 = (n) },		\
 	.code = (c),			\
 	.cmask = (m),			\
 	.weight = (w),			\
+	.overlap = (o),			\
 }
 
 #define EVENT_CONSTRAINT(c, n, m)	\
-	__EVENT_CONSTRAINT(c, n, m, HWEIGHT(n))
+	__EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0)
+
+/*
+ * The overlap flag marks event constraints with overlapping counter
+ * masks. This is the case if the counter mask of such an event is not
+ * a subset of any other counter mask of a constraint with an equal or
+ * higher weight, e.g.:
+ *
+ *  c_overlaps = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0);
+ *  c_another1 = EVENT_CONSTRAINT(0, 0x07, 0);
+ *  c_another2 = EVENT_CONSTRAINT(0, 0x38, 0);
+ *
+ * The event scheduler may not select the correct counter in the first
+ * cycle because it needs to know which subsequent events will be
+ * scheduled. It may fail to schedule the events then. So we set the
+ * overlap flag for such constraints to give the scheduler a hint which
+ * events to select for counter rescheduling.
+ *
+ * Care must be taken as the rescheduling algorithm is O(n!) which
+ * will increase scheduling cycles for an over-commited system
+ * dramatically.  The number of such EVENT_CONSTRAINT_OVERLAP() macros
+ * and its counter masks must be kept at a minimum.
+ */
+#define EVENT_CONSTRAINT_OVERLAP(c, n, m)	\
+	__EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1)
 
 /*
  * Constraint on the Event code.
@@ -235,6 +261,11 @@
 	u64	capabilities;
 };
 
+struct x86_pmu_quirk {
+	struct x86_pmu_quirk *next;
+	void (*func)(void);
+};
+
 /*
  * struct x86_pmu - generic x86 pmu
  */
@@ -259,6 +290,11 @@
 	int		num_counters_fixed;
 	int		cntval_bits;
 	u64		cntval_mask;
+	union {
+			unsigned long events_maskl;
+			unsigned long events_mask[BITS_TO_LONGS(ARCH_PERFMON_EVENTS_COUNT)];
+	};
+	int		events_mask_len;
 	int		apic;
 	u64		max_period;
 	struct event_constraint *
@@ -268,7 +304,7 @@
 	void		(*put_event_constraints)(struct cpu_hw_events *cpuc,
 						 struct perf_event *event);
 	struct event_constraint *event_constraints;
-	void		(*quirks)(void);
+	struct x86_pmu_quirk *quirks;
 	int		perfctr_second_write;
 
 	int		(*cpu_prepare)(int cpu);
@@ -309,6 +345,15 @@
 	struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr);
 };
 
+#define x86_add_quirk(func_)						\
+do {									\
+	static struct x86_pmu_quirk __quirk __initdata = {		\
+		.func = func_,						\
+	};								\
+	__quirk.next = x86_pmu.quirks;					\
+	x86_pmu.quirks = &__quirk;					\
+} while (0)
+
 #define ERF_NO_HT_SHARING	1
 #define ERF_HAS_RSP_1		2
 
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index aeefd45..0397b23 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -492,7 +492,7 @@
 static struct event_constraint amd_f15_PMC0  = EVENT_CONSTRAINT(0, 0x01, 0);
 static struct event_constraint amd_f15_PMC20 = EVENT_CONSTRAINT(0, 0x07, 0);
 static struct event_constraint amd_f15_PMC3  = EVENT_CONSTRAINT(0, 0x08, 0);
-static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT(0, 0x09, 0);
+static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0);
 static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0);
 static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0);
 
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 8d601b1..3bd37bd 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -28,6 +28,7 @@
   [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= 0x00c4,
   [PERF_COUNT_HW_BRANCH_MISSES]		= 0x00c5,
   [PERF_COUNT_HW_BUS_CYCLES]		= 0x013c,
+  [PERF_COUNT_HW_REF_CPU_CYCLES]	= 0x0300, /* pseudo-encoding */
 };
 
 static struct event_constraint intel_core_event_constraints[] __read_mostly =
@@ -45,12 +46,7 @@
 {
 	FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
 	FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
-	/*
-	 * Core2 has Fixed Counter 2 listed as CPU_CLK_UNHALTED.REF and event
-	 * 0x013c as CPU_CLK_UNHALTED.BUS and specifies there is a fixed
-	 * ratio between these counters.
-	 */
-	/* FIXED_EVENT_CONSTRAINT(0x013c, 2),  CPU_CLK_UNHALTED.REF */
+	FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
 	INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
 	INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
 	INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
@@ -68,7 +64,7 @@
 {
 	FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
 	FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
-	/* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
+	FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
 	INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
 	INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
 	INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
@@ -90,7 +86,7 @@
 {
 	FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
 	FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
-	/* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
+	FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
 	INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
 	INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
 	INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
@@ -102,7 +98,7 @@
 {
 	FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
 	FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
-	/* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
+	FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
 	INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
 	INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
 	INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
@@ -125,7 +121,7 @@
 {
 	FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
 	FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
-	/* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
+	FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
 	EVENT_CONSTRAINT_END
 };
 
@@ -1169,7 +1165,7 @@
 		 */
 		c = &unconstrained;
 	} else if (intel_try_alt_er(event, orig_idx)) {
-		raw_spin_unlock(&era->lock);
+		raw_spin_unlock_irqrestore(&era->lock, flags);
 		goto again;
 	}
 	raw_spin_unlock_irqrestore(&era->lock, flags);
@@ -1519,7 +1515,7 @@
 	.guest_get_msrs		= intel_guest_get_msrs,
 };
 
-static void intel_clovertown_quirks(void)
+static __init void intel_clovertown_quirk(void)
 {
 	/*
 	 * PEBS is unreliable due to:
@@ -1545,19 +1541,60 @@
 	x86_pmu.pebs_constraints = NULL;
 }
 
-static void intel_sandybridge_quirks(void)
+static __init void intel_sandybridge_quirk(void)
 {
 	printk(KERN_WARNING "PEBS disabled due to CPU errata.\n");
 	x86_pmu.pebs = 0;
 	x86_pmu.pebs_constraints = NULL;
 }
 
+static const struct { int id; char *name; } intel_arch_events_map[] __initconst = {
+	{ PERF_COUNT_HW_CPU_CYCLES, "cpu cycles" },
+	{ PERF_COUNT_HW_INSTRUCTIONS, "instructions" },
+	{ PERF_COUNT_HW_BUS_CYCLES, "bus cycles" },
+	{ PERF_COUNT_HW_CACHE_REFERENCES, "cache references" },
+	{ PERF_COUNT_HW_CACHE_MISSES, "cache misses" },
+	{ PERF_COUNT_HW_BRANCH_INSTRUCTIONS, "branch instructions" },
+	{ PERF_COUNT_HW_BRANCH_MISSES, "branch misses" },
+};
+
+static __init void intel_arch_events_quirk(void)
+{
+	int bit;
+
+	/* disable event that reported as not presend by cpuid */
+	for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) {
+		intel_perfmon_event_map[intel_arch_events_map[bit].id] = 0;
+		printk(KERN_WARNING "CPUID marked event: \'%s\' unavailable\n",
+				intel_arch_events_map[bit].name);
+	}
+}
+
+static __init void intel_nehalem_quirk(void)
+{
+	union cpuid10_ebx ebx;
+
+	ebx.full = x86_pmu.events_maskl;
+	if (ebx.split.no_branch_misses_retired) {
+		/*
+		 * Erratum AAJ80 detected, we work it around by using
+		 * the BR_MISP_EXEC.ANY event. This will over-count
+		 * branch-misses, but it's still much better than the
+		 * architectural event which is often completely bogus:
+		 */
+		intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89;
+		ebx.split.no_branch_misses_retired = 0;
+		x86_pmu.events_maskl = ebx.full;
+		printk(KERN_INFO "CPU erratum AAJ80 worked around\n");
+	}
+}
+
 __init int intel_pmu_init(void)
 {
 	union cpuid10_edx edx;
 	union cpuid10_eax eax;
+	union cpuid10_ebx ebx;
 	unsigned int unused;
-	unsigned int ebx;
 	int version;
 
 	if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
@@ -1574,8 +1611,8 @@
 	 * Check whether the Architectural PerfMon supports
 	 * Branch Misses Retired hw_event or not.
 	 */
-	cpuid(10, &eax.full, &ebx, &unused, &edx.full);
-	if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
+	cpuid(10, &eax.full, &ebx.full, &unused, &edx.full);
+	if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT)
 		return -ENODEV;
 
 	version = eax.split.version_id;
@@ -1589,6 +1626,9 @@
 	x86_pmu.cntval_bits		= eax.split.bit_width;
 	x86_pmu.cntval_mask		= (1ULL << eax.split.bit_width) - 1;
 
+	x86_pmu.events_maskl		= ebx.full;
+	x86_pmu.events_mask_len		= eax.split.mask_length;
+
 	/*
 	 * Quirk: v2 perfmon does not report fixed-purpose events, so
 	 * assume at least 3 events:
@@ -1608,6 +1648,8 @@
 
 	intel_ds_init();
 
+	x86_add_quirk(intel_arch_events_quirk); /* Install first, so it runs last */
+
 	/*
 	 * Install the hw-cache-events table:
 	 */
@@ -1617,7 +1659,7 @@
 		break;
 
 	case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
-		x86_pmu.quirks = intel_clovertown_quirks;
+		x86_add_quirk(intel_clovertown_quirk);
 	case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
 	case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
 	case 29: /* six-core 45 nm xeon "Dunnington" */
@@ -1651,17 +1693,8 @@
 		/* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
 		intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x1803fb1;
 
-		if (ebx & 0x40) {
-			/*
-			 * Erratum AAJ80 detected, we work it around by using
-			 * the BR_MISP_EXEC.ANY event. This will over-count
-			 * branch-misses, but it's still much better than the
-			 * architectural event which is often completely bogus:
-			 */
-			intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89;
+		x86_add_quirk(intel_nehalem_quirk);
 
-			pr_cont("erratum AAJ80 worked around, ");
-		}
 		pr_cont("Nehalem events, ");
 		break;
 
@@ -1701,7 +1734,7 @@
 		break;
 
 	case 42: /* SandyBridge */
-		x86_pmu.quirks = intel_sandybridge_quirks;
+		x86_add_quirk(intel_sandybridge_quirk);
 	case 45: /* SandyBridge, "Romely-EP" */
 		memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
 		       sizeof(hw_cache_event_ids));
@@ -1738,5 +1771,6 @@
 			break;
 		}
 	}
+
 	return 0;
 }
diff --git a/arch/x86/kernel/cpu/powerflags.c b/arch/x86/kernel/cpu/powerflags.c
index 5abbea2..7b3fe56 100644
--- a/arch/x86/kernel/cpu/powerflags.c
+++ b/arch/x86/kernel/cpu/powerflags.c
@@ -16,5 +16,6 @@
 	"100mhzsteps",
 	"hwpstate",
 	"",	/* tsc invariant mapped to constant_tsc */
-		/* nothing */
+	"cpb",  /* core performance boost */
+	"eff_freq_ro", /* Readonly aperf/mperf */
 };
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
index 14b2314..8022c66 100644
--- a/arch/x86/kernel/cpu/proc.c
+++ b/arch/x86/kernel/cpu/proc.c
@@ -64,12 +64,10 @@
 static int show_cpuinfo(struct seq_file *m, void *v)
 {
 	struct cpuinfo_x86 *c = v;
-	unsigned int cpu = 0;
+	unsigned int cpu;
 	int i;
 
-#ifdef CONFIG_SMP
 	cpu = c->cpu_index;
-#endif
 	seq_printf(m, "processor\t: %u\n"
 		   "vendor_id\t: %s\n"
 		   "cpu family\t: %d\n"
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
index 212a6a4..a524353 100644
--- a/arch/x86/kernel/cpuid.c
+++ b/arch/x86/kernel/cpuid.c
@@ -177,7 +177,7 @@
 	.notifier_call = cpuid_class_cpu_callback,
 };
 
-static char *cpuid_devnode(struct device *dev, mode_t *mode)
+static char *cpuid_devnode(struct device *dev, umode_t *mode)
 {
 	return kasprintf(GFP_KERNEL, "cpu/%u/cpuid", MINOR(dev->devt));
 }
diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
index 3b97a80..c99f9ed 100644
--- a/arch/x86/kernel/dumpstack_32.c
+++ b/arch/x86/kernel/dumpstack_32.c
@@ -116,16 +116,16 @@
 		for (i = 0; i < code_len; i++, ip++) {
 			if (ip < (u8 *)PAGE_OFFSET ||
 					probe_kernel_address(ip, c)) {
-				printk(" Bad EIP value.");
+				printk(KERN_CONT " Bad EIP value.");
 				break;
 			}
 			if (ip == (u8 *)regs->ip)
-				printk("<%02x> ", c);
+				printk(KERN_CONT "<%02x> ", c);
 			else
-				printk("%02x ", c);
+				printk(KERN_CONT "%02x ", c);
 		}
 	}
-	printk("\n");
+	printk(KERN_CONT "\n");
 }
 
 int is_valid_bugaddr(unsigned long ip)
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index 19853ad..6d728d9 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -284,16 +284,16 @@
 		for (i = 0; i < code_len; i++, ip++) {
 			if (ip < (u8 *)PAGE_OFFSET ||
 					probe_kernel_address(ip, c)) {
-				printk(" Bad RIP value.");
+				printk(KERN_CONT " Bad RIP value.");
 				break;
 			}
 			if (ip == (u8 *)regs->ip)
-				printk("<%02x> ", c);
+				printk(KERN_CONT "<%02x> ", c);
 			else
-				printk("%02x ", c);
+				printk(KERN_CONT "%02x ", c);
 		}
 	}
-	printk("\n");
+	printk(KERN_CONT "\n");
 }
 
 int is_valid_bugaddr(unsigned long ip)
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 65ffd11..8071e2f 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -135,7 +135,6 @@
 		printk(KERN_CONT "(usable)");
 		break;
 	case E820_RESERVED:
-	case E820_RESERVED_EFI:
 		printk(KERN_CONT "(reserved)");
 		break;
 	case E820_ACPI:
@@ -739,35 +738,17 @@
 /*
  * pre allocated 4k and reserved it in memblock and e820_saved
  */
-u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align)
+u64 __init early_reserve_e820(u64 size, u64 align)
 {
-	u64 size = 0;
 	u64 addr;
-	u64 start;
 
-	for (start = startt; ; start += size) {
-		start = memblock_x86_find_in_range_size(start, &size, align);
-		if (start == MEMBLOCK_ERROR)
-			return 0;
-		if (size >= sizet)
-			break;
+	addr = __memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
+	if (addr) {
+		e820_update_range_saved(addr, size, E820_RAM, E820_RESERVED);
+		printk(KERN_INFO "update e820_saved for early_reserve_e820\n");
+		update_e820_saved();
 	}
 
-#ifdef CONFIG_X86_32
-	if (start >= MAXMEM)
-		return 0;
-	if (start + size > MAXMEM)
-		size = MAXMEM - start;
-#endif
-
-	addr = round_down(start + size - sizet, align);
-	if (addr < start)
-		return 0;
-	memblock_x86_reserve_range(addr, addr + sizet, "new next");
-	e820_update_range_saved(addr, sizet, E820_RAM, E820_RESERVED);
-	printk(KERN_INFO "update e820_saved for early_reserve_e820\n");
-	update_e820_saved();
-
 	return addr;
 }
 
@@ -784,7 +765,7 @@
 /*
  * Find the highest page frame number we have available
  */
-unsigned long __init e820_end_pfn(unsigned long limit_pfn, unsigned type)
+static unsigned long __init e820_end_pfn(unsigned long limit_pfn, unsigned type)
 {
 	int i;
 	unsigned long last_pfn = 0;
@@ -1091,7 +1072,7 @@
 	 * We are safe to enable resizing, beause memblock_x86_fill()
 	 * is rather later for x86
 	 */
-	memblock_can_resize = 1;
+	memblock_allow_resize();
 
 	for (i = 0; i < e820.nr_map; i++) {
 		struct e820entry *ei = &e820.map[i];
@@ -1106,22 +1087,36 @@
 		memblock_add(ei->addr, ei->size);
 	}
 
-	memblock_analyze();
 	memblock_dump_all();
 }
 
 void __init memblock_find_dma_reserve(void)
 {
 #ifdef CONFIG_X86_64
-	u64 free_size_pfn;
-	u64 mem_size_pfn;
+	u64 nr_pages = 0, nr_free_pages = 0;
+	unsigned long start_pfn, end_pfn;
+	phys_addr_t start, end;
+	int i;
+	u64 u;
+
 	/*
 	 * need to find out used area below MAX_DMA_PFN
 	 * need to use memblock to get free size in [0, MAX_DMA_PFN]
 	 * at first, and assume boot_mem will not take below MAX_DMA_PFN
 	 */
-	mem_size_pfn = memblock_x86_memory_in_range(0, MAX_DMA_PFN << PAGE_SHIFT) >> PAGE_SHIFT;
-	free_size_pfn = memblock_x86_free_memory_in_range(0, MAX_DMA_PFN << PAGE_SHIFT) >> PAGE_SHIFT;
-	set_dma_reserve(mem_size_pfn - free_size_pfn);
+	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) {
+		start_pfn = min_t(unsigned long, start_pfn, MAX_DMA_PFN);
+		end_pfn = min_t(unsigned long, end_pfn, MAX_DMA_PFN);
+		nr_pages += end_pfn - start_pfn;
+	}
+
+	for_each_free_mem_range(u, MAX_NUMNODES, &start, &end, NULL) {
+		start_pfn = min_t(unsigned long, PFN_UP(start), MAX_DMA_PFN);
+		end_pfn = min_t(unsigned long, PFN_DOWN(end), MAX_DMA_PFN);
+		if (start_pfn < end_pfn)
+			nr_free_pages += end_pfn - start_pfn;
+	}
+
+	set_dma_reserve(nr_pages - nr_free_pages);
 #endif
 }
diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
index cd28a35..9d42a52 100644
--- a/arch/x86/kernel/early_printk.c
+++ b/arch/x86/kernel/early_printk.c
@@ -247,7 +247,7 @@
 		}
 
 		if (!strncmp(buf, "hsu", 3)) {
-			hsu_early_console_init();
+			hsu_early_console_init(buf + 3);
 			early_console_register(&early_hsu_console, keep);
 		}
 #endif
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index f3f6f53..22d0e21 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -625,6 +625,8 @@
 	movl %esp, %eax
 	jne work_notifysig_v86		# returning to kernel-space or
 					# vm86-space
+	TRACE_IRQS_ON
+	ENABLE_INTERRUPTS(CLBR_NONE)
 	xorl %edx, %edx
 	call do_notify_resume
 	jmp resume_userspace_sig
@@ -638,6 +640,8 @@
 #else
 	movl %esp, %eax
 #endif
+	TRACE_IRQS_ON
+	ENABLE_INTERRUPTS(CLBR_NONE)
 	xorl %edx, %edx
 	call do_notify_resume
 	jmp resume_userspace_sig
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index faf8d5e..a20e1cb 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -221,7 +221,7 @@
 	/*CFI_REL_OFFSET	ss,0*/
 	pushq_cfi %rax /* rsp */
 	CFI_REL_OFFSET	rsp,0
-	pushq_cfi $X86_EFLAGS_IF /* eflags - interrupts on */
+	pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_BIT1) /* eflags - interrupts on */
 	/*CFI_REL_OFFSET	rflags,0*/
 	pushq_cfi $__KERNEL_CS /* cs */
 	/*CFI_REL_OFFSET	cs,0*/
@@ -411,7 +411,7 @@
 	RESTORE_REST
 
 	testl $3, CS-ARGOFFSET(%rsp)		# from kernel_thread?
-	je   int_ret_from_sys_call
+	jz   retint_restore_args
 
 	testl $_TIF_IA32, TI_flags(%rcx)	# 32-bit compat task needs IRET
 	jnz  int_ret_from_sys_call
@@ -465,7 +465,7 @@
 	 * after the swapgs, so that it can do the swapgs
 	 * for the guest and jump here on syscall.
 	 */
-ENTRY(system_call_after_swapgs)
+GLOBAL(system_call_after_swapgs)
 
 	movq	%rsp,PER_CPU_VAR(old_rsp)
 	movq	PER_CPU_VAR(kernel_stack),%rsp
@@ -478,8 +478,7 @@
 	movq  %rax,ORIG_RAX-ARGOFFSET(%rsp)
 	movq  %rcx,RIP-ARGOFFSET(%rsp)
 	CFI_REL_OFFSET rip,RIP-ARGOFFSET
-	GET_THREAD_INFO(%rcx)
-	testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
+	testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
 	jnz tracesys
 system_call_fastpath:
 	cmpq $__NR_syscall_max,%rax
@@ -496,10 +495,9 @@
 	/* edi:	flagmask */
 sysret_check:
 	LOCKDEP_SYS_EXIT
-	GET_THREAD_INFO(%rcx)
 	DISABLE_INTERRUPTS(CLBR_NONE)
 	TRACE_IRQS_OFF
-	movl TI_flags(%rcx),%edx
+	movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
 	andl %edi,%edx
 	jnz  sysret_careful
 	CFI_REMEMBER_STATE
@@ -583,7 +581,7 @@
 	/* Do syscall tracing */
 tracesys:
 #ifdef CONFIG_AUDITSYSCALL
-	testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
+	testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
 	jz auditsys
 #endif
 	SAVE_REST
@@ -612,8 +610,6 @@
 GLOBAL(int_ret_from_sys_call)
 	DISABLE_INTERRUPTS(CLBR_NONE)
 	TRACE_IRQS_OFF
-	testl $3,CS-ARGOFFSET(%rsp)
-	je retint_restore_args
 	movl $_TIF_ALLWORK_MASK,%edi
 	/* edi:	mask to check */
 GLOBAL(int_with_check)
@@ -953,6 +949,7 @@
 ENTRY(\sym)
 	INTR_FRAME
 	pushq_cfi $~(\num)
+.Lcommon_\sym:
 	interrupt \do_sym
 	jmp ret_from_intr
 	CFI_ENDPROC
@@ -976,13 +973,21 @@
 	x86_platform_ipi smp_x86_platform_ipi
 
 #ifdef CONFIG_SMP
-.irp idx,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, \
+	ALIGN
+	INTR_FRAME
+.irp idx,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, \
 	16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
 .if NUM_INVALIDATE_TLB_VECTORS > \idx
-apicinterrupt (INVALIDATE_TLB_VECTOR_START)+\idx \
-	invalidate_interrupt\idx smp_invalidate_interrupt
+ENTRY(invalidate_interrupt\idx)
+	pushq_cfi $~(INVALIDATE_TLB_VECTOR_START+\idx)
+	jmp .Lcommon_invalidate_interrupt0
+	CFI_ADJUST_CFA_OFFSET -8
+END(invalidate_interrupt\idx)
 .endif
 .endr
+	CFI_ENDPROC
+apicinterrupt INVALIDATE_TLB_VECTOR_START, \
+	invalidate_interrupt0, smp_invalidate_interrupt
 #endif
 
 apicinterrupt THRESHOLD_APIC_VECTOR \
diff --git a/arch/x86/kernel/head.c b/arch/x86/kernel/head.c
index af0699b..48d9d4e 100644
--- a/arch/x86/kernel/head.c
+++ b/arch/x86/kernel/head.c
@@ -52,5 +52,5 @@
 		lowmem = 0x9f000;
 
 	/* reserve all memory between lowmem and the 1MB mark */
-	memblock_x86_reserve_range(lowmem, 0x100000, "* BIOS reserved");
+	memblock_reserve(lowmem, 0x100000 - lowmem);
 }
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
index 3bb0850..51ff186 100644
--- a/arch/x86/kernel/head32.c
+++ b/arch/x86/kernel/head32.c
@@ -31,9 +31,8 @@
 
 void __init i386_start_kernel(void)
 {
-	memblock_init();
-
-	memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
+	memblock_reserve(__pa_symbol(&_text),
+			 __pa_symbol(&__bss_stop) - __pa_symbol(&_text));
 
 #ifdef CONFIG_BLK_DEV_INITRD
 	/* Reserve INITRD */
@@ -42,7 +41,7 @@
 		u64 ramdisk_image = boot_params.hdr.ramdisk_image;
 		u64 ramdisk_size  = boot_params.hdr.ramdisk_size;
 		u64 ramdisk_end   = PAGE_ALIGN(ramdisk_image + ramdisk_size);
-		memblock_x86_reserve_range(ramdisk_image, ramdisk_end, "RAMDISK");
+		memblock_reserve(ramdisk_image, ramdisk_end - ramdisk_image);
 	}
 #endif
 
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 5655c22..3a3b779 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -98,9 +98,8 @@
 {
 	copy_bootdata(__va(real_mode_data));
 
-	memblock_init();
-
-	memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
+	memblock_reserve(__pa_symbol(&_text),
+			 __pa_symbol(&__bss_stop) - __pa_symbol(&_text));
 
 #ifdef CONFIG_BLK_DEV_INITRD
 	/* Reserve INITRD */
@@ -109,7 +108,7 @@
 		unsigned long ramdisk_image = boot_params.hdr.ramdisk_image;
 		unsigned long ramdisk_size  = boot_params.hdr.ramdisk_size;
 		unsigned long ramdisk_end   = PAGE_ALIGN(ramdisk_image + ramdisk_size);
-		memblock_x86_reserve_range(ramdisk_image, ramdisk_end, "RAMDISK");
+		memblock_reserve(ramdisk_image, ramdisk_end - ramdisk_image);
 	}
 #endif
 
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 1bb0bf4..ad0de0c 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -2,7 +2,6 @@
 #include <linux/clockchips.h>
 #include <linux/interrupt.h>
 #include <linux/export.h>
-#include <linux/sysdev.h>
 #include <linux/delay.h>
 #include <linux/errno.h>
 #include <linux/i8253.h>
@@ -32,8 +31,6 @@
 #define HPET_MIN_CYCLES			128
 #define HPET_MIN_PROG_DELTA		(HPET_MIN_CYCLES + (HPET_MIN_CYCLES >> 1))
 
-#define EVT_TO_HPET_DEV(evt) container_of(evt, struct hpet_dev, evt)
-
 /*
  * HPET address is set in acpi/boot.c, when an ACPI entry exists
  */
@@ -55,6 +52,11 @@
 	char				name[10];
 };
 
+inline struct hpet_dev *EVT_TO_HPET_DEV(struct clock_event_device *evtdev)
+{
+	return container_of(evtdev, struct hpet_dev, evt);
+}
+
 inline unsigned int hpet_readl(unsigned int a)
 {
 	return readl(hpet_virt_address + a);
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 429e0c9..7943e0c 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -74,6 +74,10 @@
 	for_each_online_cpu(j)
 		seq_printf(p, "%10u ", irq_stats(j)->apic_irq_work_irqs);
 	seq_printf(p, "  IRQ work interrupts\n");
+	seq_printf(p, "%*s: ", prec, "RTR");
+	for_each_online_cpu(j)
+		seq_printf(p, "%10u ", irq_stats(j)->icr_read_retry_count);
+	seq_printf(p, "  APIC ICR read retries\n");
 #endif
 	if (x86_platform_ipi_callback) {
 		seq_printf(p, "%*s: ", prec, "PLT");
@@ -136,6 +140,7 @@
 	sum += irq_stats(cpu)->irq_spurious_count;
 	sum += irq_stats(cpu)->apic_perf_irqs;
 	sum += irq_stats(cpu)->apic_irq_work_irqs;
+	sum += irq_stats(cpu)->icr_read_retry_count;
 #endif
 	if (x86_platform_ipi_callback)
 		sum += irq_stats(cpu)->x86_platform_ipis;
@@ -181,8 +186,8 @@
 	unsigned vector = ~regs->orig_ax;
 	unsigned irq;
 
-	exit_idle();
 	irq_enter();
+	exit_idle();
 
 	irq = __this_cpu_read(vector_irq[vector]);
 
@@ -209,10 +214,10 @@
 
 	ack_APIC_irq();
 
-	exit_idle();
-
 	irq_enter();
 
+	exit_idle();
+
 	inc_irq_stat(x86_platform_ipis);
 
 	if (x86_platform_ipi_callback)
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index b3300e6..313fb5c 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -9,7 +9,7 @@
 #include <linux/kprobes.h>
 #include <linux/init.h>
 #include <linux/kernel_stat.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/bitops.h>
 #include <linux/acpi.h>
 #include <linux/io.h>
diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
index ea9d5f2f..2889b3d 100644
--- a/arch/x86/kernel/jump_label.c
+++ b/arch/x86/kernel/jump_label.c
@@ -50,7 +50,7 @@
 	put_online_cpus();
 }
 
-void arch_jump_label_transform_static(struct jump_entry *entry,
+__init_or_module void arch_jump_label_transform_static(struct jump_entry *entry,
 				      enum jump_label_type type)
 {
 	__jump_label_transform(entry, type, text_poke_early);
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
index d494799..fe86493 100644
--- a/arch/x86/kernel/microcode_amd.c
+++ b/arch/x86/kernel/microcode_amd.c
@@ -1,14 +1,18 @@
 /*
  *  AMD CPU Microcode Update Driver for Linux
- *  Copyright (C) 2008 Advanced Micro Devices Inc.
+ *  Copyright (C) 2008-2011 Advanced Micro Devices Inc.
  *
  *  Author: Peter Oruba <peter.oruba@amd.com>
  *
  *  Based on work by:
  *  Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
  *
- *  This driver allows to upgrade microcode on AMD
- *  family 0x10 and 0x11 processors.
+ *  Maintainers:
+ *  Andreas Herrmann <andreas.herrmann3@amd.com>
+ *  Borislav Petkov <borislav.petkov@amd.com>
+ *
+ *  This driver allows to upgrade microcode on F10h AMD
+ *  CPUs and later.
  *
  *  Licensed under the terms of the GNU General Public
  *  License version 2. See file COPYING for details.
@@ -71,6 +75,9 @@
 
 static struct equiv_cpu_entry *equiv_cpu_table;
 
+/* page-sized ucode patch buffer */
+void *patch;
+
 static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
 {
 	struct cpuinfo_x86 *c = &cpu_data(cpu);
@@ -86,27 +93,76 @@
 	return 0;
 }
 
-static int get_matching_microcode(int cpu, struct microcode_header_amd *mc_hdr,
-				  int rev)
+static unsigned int verify_ucode_size(int cpu, u32 patch_size,
+				      unsigned int size)
 {
-	unsigned int current_cpu_id;
-	u16 equiv_cpu_id = 0;
-	unsigned int i = 0;
+	struct cpuinfo_x86 *c = &cpu_data(cpu);
+	u32 max_size;
+
+#define F1XH_MPB_MAX_SIZE 2048
+#define F14H_MPB_MAX_SIZE 1824
+#define F15H_MPB_MAX_SIZE 4096
+
+	switch (c->x86) {
+	case 0x14:
+		max_size = F14H_MPB_MAX_SIZE;
+		break;
+	case 0x15:
+		max_size = F15H_MPB_MAX_SIZE;
+		break;
+	default:
+		max_size = F1XH_MPB_MAX_SIZE;
+		break;
+	}
+
+	if (patch_size > min_t(u32, size, max_size)) {
+		pr_err("patch size mismatch\n");
+		return 0;
+	}
+
+	return patch_size;
+}
+
+static u16 find_equiv_id(void)
+{
+	unsigned int current_cpu_id, i = 0;
 
 	BUG_ON(equiv_cpu_table == NULL);
+
 	current_cpu_id = cpuid_eax(0x00000001);
 
 	while (equiv_cpu_table[i].installed_cpu != 0) {
-		if (current_cpu_id == equiv_cpu_table[i].installed_cpu) {
-			equiv_cpu_id = equiv_cpu_table[i].equiv_cpu;
-			break;
-		}
+		if (current_cpu_id == equiv_cpu_table[i].installed_cpu)
+			return equiv_cpu_table[i].equiv_cpu;
+
 		i++;
 	}
+	return 0;
+}
 
+/*
+ * we signal a good patch is found by returning its size > 0
+ */
+static int get_matching_microcode(int cpu, const u8 *ucode_ptr,
+				  unsigned int leftover_size, int rev,
+				  unsigned int *current_size)
+{
+	struct microcode_header_amd *mc_hdr;
+	unsigned int actual_size;
+	u16 equiv_cpu_id;
+
+	/* size of the current patch we're staring at */
+	*current_size = *(u32 *)(ucode_ptr + 4) + SECTION_HDR_SIZE;
+
+	equiv_cpu_id = find_equiv_id();
 	if (!equiv_cpu_id)
 		return 0;
 
+	/*
+	 * let's look at the patch header itself now
+	 */
+	mc_hdr = (struct microcode_header_amd *)(ucode_ptr + SECTION_HDR_SIZE);
+
 	if (mc_hdr->processor_rev_id != equiv_cpu_id)
 		return 0;
 
@@ -120,7 +176,20 @@
 	if (mc_hdr->patch_id <= rev)
 		return 0;
 
-	return 1;
+	/*
+	 * now that the header looks sane, verify its size
+	 */
+	actual_size = verify_ucode_size(cpu, *current_size, leftover_size);
+	if (!actual_size)
+		return 0;
+
+	/* clear the patch buffer */
+	memset(patch, 0, PAGE_SIZE);
+
+	/* all looks ok, get the binary patch */
+	get_ucode_data(patch, ucode_ptr + SECTION_HDR_SIZE, actual_size);
+
+	return actual_size;
 }
 
 static int apply_microcode_amd(int cpu)
@@ -155,63 +224,6 @@
 	return 0;
 }
 
-static unsigned int verify_ucode_size(int cpu, const u8 *buf, unsigned int size)
-{
-	struct cpuinfo_x86 *c = &cpu_data(cpu);
-	u32 max_size, actual_size;
-
-#define F1XH_MPB_MAX_SIZE 2048
-#define F14H_MPB_MAX_SIZE 1824
-#define F15H_MPB_MAX_SIZE 4096
-
-	switch (c->x86) {
-	case 0x14:
-		max_size = F14H_MPB_MAX_SIZE;
-		break;
-	case 0x15:
-		max_size = F15H_MPB_MAX_SIZE;
-		break;
-	default:
-		max_size = F1XH_MPB_MAX_SIZE;
-		break;
-	}
-
-	actual_size = *(u32 *)(buf + 4);
-
-	if (actual_size + SECTION_HDR_SIZE > size || actual_size > max_size) {
-		pr_err("section size mismatch\n");
-		return 0;
-	}
-
-	return actual_size;
-}
-
-static struct microcode_header_amd *
-get_next_ucode(int cpu, const u8 *buf, unsigned int size, unsigned int *mc_size)
-{
-	struct microcode_header_amd *mc = NULL;
-	unsigned int actual_size = 0;
-
-	if (*(u32 *)buf != UCODE_UCODE_TYPE) {
-		pr_err("invalid type field in container file section header\n");
-		goto out;
-	}
-
-	actual_size = verify_ucode_size(cpu, buf, size);
-	if (!actual_size)
-		goto out;
-
-	mc = vzalloc(actual_size);
-	if (!mc)
-		goto out;
-
-	get_ucode_data(mc, buf + SECTION_HDR_SIZE, actual_size);
-	*mc_size = actual_size + SECTION_HDR_SIZE;
-
-out:
-	return mc;
-}
-
 static int install_equiv_cpu_table(const u8 *buf)
 {
 	unsigned int *ibuf = (unsigned int *)buf;
@@ -247,36 +259,38 @@
 {
 	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
 	struct microcode_header_amd *mc_hdr = NULL;
-	unsigned int mc_size, leftover;
+	unsigned int mc_size, leftover, current_size = 0;
 	int offset;
 	const u8 *ucode_ptr = data;
 	void *new_mc = NULL;
 	unsigned int new_rev = uci->cpu_sig.rev;
-	enum ucode_state state = UCODE_OK;
+	enum ucode_state state = UCODE_ERROR;
 
 	offset = install_equiv_cpu_table(ucode_ptr);
 	if (offset < 0) {
 		pr_err("failed to create equivalent cpu table\n");
-		return UCODE_ERROR;
+		goto out;
 	}
-
 	ucode_ptr += offset;
 	leftover = size - offset;
 
+	if (*(u32 *)ucode_ptr != UCODE_UCODE_TYPE) {
+		pr_err("invalid type field in container file section header\n");
+		goto free_table;
+	}
+
 	while (leftover) {
-		mc_hdr = get_next_ucode(cpu, ucode_ptr, leftover, &mc_size);
-		if (!mc_hdr)
-			break;
-
-		if (get_matching_microcode(cpu, mc_hdr, new_rev)) {
-			vfree(new_mc);
+		mc_size = get_matching_microcode(cpu, ucode_ptr, leftover,
+						 new_rev, &current_size);
+		if (mc_size) {
+			mc_hdr  = patch;
+			new_mc  = patch;
 			new_rev = mc_hdr->patch_id;
-			new_mc  = mc_hdr;
-		} else
-			vfree(mc_hdr);
+			goto out_ok;
+		}
 
-		ucode_ptr += mc_size;
-		leftover  -= mc_size;
+		ucode_ptr += current_size;
+		leftover  -= current_size;
 	}
 
 	if (!new_mc) {
@@ -284,19 +298,16 @@
 		goto free_table;
 	}
 
-	if (!leftover) {
-		vfree(uci->mc);
-		uci->mc = new_mc;
-		pr_debug("CPU%d update ucode (0x%08x -> 0x%08x)\n",
-			 cpu, uci->cpu_sig.rev, new_rev);
-	} else {
-		vfree(new_mc);
-		state = UCODE_ERROR;
-	}
+out_ok:
+	uci->mc = new_mc;
+	state = UCODE_OK;
+	pr_debug("CPU%d update ucode (0x%08x -> 0x%08x)\n",
+		 cpu, uci->cpu_sig.rev, new_rev);
 
 free_table:
 	free_equiv_cpu_table();
 
+out:
 	return state;
 }
 
@@ -337,7 +348,6 @@
 {
 	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
 
-	vfree(uci->mc);
 	uci->mc = NULL;
 }
 
@@ -351,5 +361,14 @@
 
 struct microcode_ops * __init init_amd_microcode(void)
 {
+	patch = (void *)get_zeroed_page(GFP_KERNEL);
+	if (!patch)
+		return NULL;
+
 	return &microcode_amd_ops;
 }
+
+void __exit exit_amd_microcode(void)
+{
+	free_page((unsigned long)patch);
+}
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
index 9d46f5e..fda91c3 100644
--- a/arch/x86/kernel/microcode_core.c
+++ b/arch/x86/kernel/microcode_core.c
@@ -292,8 +292,8 @@
 	return err;
 }
 
-static ssize_t reload_store(struct sys_device *dev,
-			    struct sysdev_attribute *attr,
+static ssize_t reload_store(struct device *dev,
+			    struct device_attribute *attr,
 			    const char *buf, size_t size)
 {
 	unsigned long val;
@@ -318,30 +318,30 @@
 	return ret;
 }
 
-static ssize_t version_show(struct sys_device *dev,
-			struct sysdev_attribute *attr, char *buf)
+static ssize_t version_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
 {
 	struct ucode_cpu_info *uci = ucode_cpu_info + dev->id;
 
 	return sprintf(buf, "0x%x\n", uci->cpu_sig.rev);
 }
 
-static ssize_t pf_show(struct sys_device *dev,
-			struct sysdev_attribute *attr, char *buf)
+static ssize_t pf_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
 {
 	struct ucode_cpu_info *uci = ucode_cpu_info + dev->id;
 
 	return sprintf(buf, "0x%x\n", uci->cpu_sig.pf);
 }
 
-static SYSDEV_ATTR(reload, 0200, NULL, reload_store);
-static SYSDEV_ATTR(version, 0400, version_show, NULL);
-static SYSDEV_ATTR(processor_flags, 0400, pf_show, NULL);
+static DEVICE_ATTR(reload, 0200, NULL, reload_store);
+static DEVICE_ATTR(version, 0400, version_show, NULL);
+static DEVICE_ATTR(processor_flags, 0400, pf_show, NULL);
 
 static struct attribute *mc_default_attrs[] = {
-	&attr_reload.attr,
-	&attr_version.attr,
-	&attr_processor_flags.attr,
+	&dev_attr_reload.attr,
+	&dev_attr_version.attr,
+	&dev_attr_processor_flags.attr,
 	NULL
 };
 
@@ -405,43 +405,45 @@
 	return ustate;
 }
 
-static int mc_sysdev_add(struct sys_device *sys_dev)
+static int mc_device_add(struct device *dev, struct subsys_interface *sif)
 {
-	int err, cpu = sys_dev->id;
+	int err, cpu = dev->id;
 
 	if (!cpu_online(cpu))
 		return 0;
 
 	pr_debug("CPU%d added\n", cpu);
 
-	err = sysfs_create_group(&sys_dev->kobj, &mc_attr_group);
+	err = sysfs_create_group(&dev->kobj, &mc_attr_group);
 	if (err)
 		return err;
 
 	if (microcode_init_cpu(cpu) == UCODE_ERROR) {
-		sysfs_remove_group(&sys_dev->kobj, &mc_attr_group);
+		sysfs_remove_group(&dev->kobj, &mc_attr_group);
 		return -EINVAL;
 	}
 
 	return err;
 }
 
-static int mc_sysdev_remove(struct sys_device *sys_dev)
+static int mc_device_remove(struct device *dev, struct subsys_interface *sif)
 {
-	int cpu = sys_dev->id;
+	int cpu = dev->id;
 
 	if (!cpu_online(cpu))
 		return 0;
 
 	pr_debug("CPU%d removed\n", cpu);
 	microcode_fini_cpu(cpu);
-	sysfs_remove_group(&sys_dev->kobj, &mc_attr_group);
+	sysfs_remove_group(&dev->kobj, &mc_attr_group);
 	return 0;
 }
 
-static struct sysdev_driver mc_sysdev_driver = {
-	.add			= mc_sysdev_add,
-	.remove			= mc_sysdev_remove,
+static struct subsys_interface mc_cpu_interface = {
+	.name			= "microcode",
+	.subsys			= &cpu_subsys,
+	.add_dev		= mc_device_add,
+	.remove_dev		= mc_device_remove,
 };
 
 /**
@@ -464,9 +466,9 @@
 mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
 {
 	unsigned int cpu = (unsigned long)hcpu;
-	struct sys_device *sys_dev;
+	struct device *dev;
 
-	sys_dev = get_cpu_sysdev(cpu);
+	dev = get_cpu_device(cpu);
 	switch (action) {
 	case CPU_ONLINE:
 	case CPU_ONLINE_FROZEN:
@@ -474,13 +476,13 @@
 	case CPU_DOWN_FAILED:
 	case CPU_DOWN_FAILED_FROZEN:
 		pr_debug("CPU%d added\n", cpu);
-		if (sysfs_create_group(&sys_dev->kobj, &mc_attr_group))
+		if (sysfs_create_group(&dev->kobj, &mc_attr_group))
 			pr_err("Failed to create group for CPU%d\n", cpu);
 		break;
 	case CPU_DOWN_PREPARE:
 	case CPU_DOWN_PREPARE_FROZEN:
 		/* Suspend is in progress, only remove the interface */
-		sysfs_remove_group(&sys_dev->kobj, &mc_attr_group);
+		sysfs_remove_group(&dev->kobj, &mc_attr_group);
 		pr_debug("CPU%d removed\n", cpu);
 		break;
 
@@ -525,7 +527,7 @@
 	get_online_cpus();
 	mutex_lock(&microcode_mutex);
 
-	error = sysdev_driver_register(&cpu_sysdev_class, &mc_sysdev_driver);
+	error = subsys_interface_register(&mc_cpu_interface);
 
 	mutex_unlock(&microcode_mutex);
 	put_online_cpus();
@@ -535,7 +537,7 @@
 
 	error = microcode_dev_init();
 	if (error)
-		goto out_sysdev_driver;
+		goto out_driver;
 
 	register_syscore_ops(&mc_syscore_ops);
 	register_hotcpu_notifier(&mc_cpu_notifier);
@@ -545,11 +547,11 @@
 
 	return 0;
 
-out_sysdev_driver:
+out_driver:
 	get_online_cpus();
 	mutex_lock(&microcode_mutex);
 
-	sysdev_driver_unregister(&cpu_sysdev_class, &mc_sysdev_driver);
+	subsys_interface_unregister(&mc_cpu_interface);
 
 	mutex_unlock(&microcode_mutex);
 	put_online_cpus();
@@ -563,6 +565,8 @@
 
 static void __exit microcode_exit(void)
 {
+	struct cpuinfo_x86 *c = &cpu_data(0);
+
 	microcode_dev_exit();
 
 	unregister_hotcpu_notifier(&mc_cpu_notifier);
@@ -571,7 +575,7 @@
 	get_online_cpus();
 	mutex_lock(&microcode_mutex);
 
-	sysdev_driver_unregister(&cpu_sysdev_class, &mc_sysdev_driver);
+	subsys_interface_unregister(&mc_cpu_interface);
 
 	mutex_unlock(&microcode_mutex);
 	put_online_cpus();
@@ -580,6 +584,9 @@
 
 	microcode_ops = NULL;
 
+	if (c->x86_vendor == X86_VENDOR_AMD)
+		exit_amd_microcode();
+
 	pr_info("Microcode Update Driver: v" MICROCODE_VERSION " removed.\n");
 }
 module_exit(microcode_exit);
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index 0741b062..ca470e4 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -564,9 +564,7 @@
 
 static void __init smp_reserve_memory(struct mpf_intel *mpf)
 {
-	unsigned long size = get_mpc_size(mpf->physptr);
-
-	memblock_x86_reserve_range(mpf->physptr, mpf->physptr+size, "* MP-table mpc");
+	memblock_reserve(mpf->physptr, get_mpc_size(mpf->physptr));
 }
 
 static int __init smp_scan_config(unsigned long base, unsigned long length)
@@ -595,7 +593,7 @@
 			       mpf, (u64)virt_to_phys(mpf));
 
 			mem = virt_to_phys(mpf);
-			memblock_x86_reserve_range(mem, mem + sizeof(*mpf), "* MP-table mpf");
+			memblock_reserve(mem, sizeof(*mpf));
 			if (mpf->physptr)
 				smp_reserve_memory(mpf);
 
@@ -836,10 +834,8 @@
 
 void __init early_reserve_e820_mpc_new(void)
 {
-	if (enable_update_mptable && alloc_mptable) {
-		u64 startt = 0;
-		mpc_new_phys = early_reserve_e820(startt, mpc_new_length, 4);
-	}
+	if (enable_update_mptable && alloc_mptable)
+		mpc_new_phys = early_reserve_e820(mpc_new_length, 4);
 }
 
 static int __init update_mp_table(void)
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index 12fcbe2..9635676 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -236,7 +236,7 @@
 	.notifier_call = msr_class_cpu_callback,
 };
 
-static char *msr_devnode(struct device *dev, mode_t *mode)
+static char *msr_devnode(struct device *dev, umode_t *mode)
 {
 	return kasprintf(GFP_KERNEL, "cpu/%u/msr", MINOR(dev->devt));
 }
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index ee5d4fb..15763af 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -293,7 +293,7 @@
 	regs.orig_ax = -1;
 	regs.ip = (unsigned long) kernel_thread_helper;
 	regs.cs = __KERNEL_CS | get_kernel_rpl();
-	regs.flags = X86_EFLAGS_IF | 0x2;
+	regs.flags = X86_EFLAGS_IF | X86_EFLAGS_BIT1;
 
 	/* Ok, create the new process.. */
 	return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 795b79f..485204f 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -99,7 +99,8 @@
 
 	/* endless idle loop with no priority at all */
 	while (1) {
-		tick_nohz_stop_sched_tick(1);
+		tick_nohz_idle_enter();
+		rcu_idle_enter();
 		while (!need_resched()) {
 
 			check_pgt_cache();
@@ -116,7 +117,8 @@
 				pm_idle();
 			start_critical_timings();
 		}
-		tick_nohz_restart_sched_tick();
+		rcu_idle_exit();
+		tick_nohz_idle_exit();
 		preempt_enable_no_resched();
 		schedule();
 		preempt_disable();
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 3bd7e6e..9b9fe4a 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -122,7 +122,7 @@
 
 	/* endless idle loop with no priority at all */
 	while (1) {
-		tick_nohz_stop_sched_tick(1);
+		tick_nohz_idle_enter();
 		while (!need_resched()) {
 
 			rmb();
@@ -139,8 +139,14 @@
 			enter_idle();
 			/* Don't trace irqs off for idle */
 			stop_critical_timings();
+
+			/* enter_idle() needs rcu for notifiers */
+			rcu_idle_enter();
+
 			if (cpuidle_idle_call())
 				pm_idle();
+
+			rcu_idle_exit();
 			start_critical_timings();
 
 			/* In many cases the interrupt that ended idle
@@ -149,7 +155,7 @@
 			__exit_idle();
 		}
 
-		tick_nohz_restart_sched_tick();
+		tick_nohz_idle_exit();
 		preempt_enable_no_resched();
 		schedule();
 		preempt_disable();
@@ -293,13 +299,12 @@
 	memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
 
 	if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
-		p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
+		p->thread.io_bitmap_ptr = kmemdup(me->thread.io_bitmap_ptr,
+						  IO_BITMAP_BYTES, GFP_KERNEL);
 		if (!p->thread.io_bitmap_ptr) {
 			p->thread.io_bitmap_max = 0;
 			return -ENOMEM;
 		}
-		memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr,
-				IO_BITMAP_BYTES);
 		set_tsk_thread_flag(p, TIF_IO_BITMAP);
 	}
 
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 8252879..89a04c7 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -749,7 +749,8 @@
 /*
  * Handle PTRACE_POKEUSR calls for the debug register area.
  */
-int ptrace_set_debugreg(struct task_struct *tsk, int n, unsigned long val)
+static int ptrace_set_debugreg(struct task_struct *tsk, int n,
+			       unsigned long val)
 {
 	struct thread_struct *thread = &(tsk->thread);
 	int rc = 0;
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 9a9e40f..d05444a 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -306,7 +306,8 @@
 static void __init reserve_brk(void)
 {
 	if (_brk_end > _brk_start)
-		memblock_x86_reserve_range(__pa(_brk_start), __pa(_brk_end), "BRK");
+		memblock_reserve(__pa(_brk_start),
+				 __pa(_brk_end) - __pa(_brk_start));
 
 	/* Mark brk area as locked down and no longer taking any
 	   new allocations */
@@ -331,13 +332,13 @@
 	ramdisk_here = memblock_find_in_range(0, end_of_lowmem, area_size,
 					 PAGE_SIZE);
 
-	if (ramdisk_here == MEMBLOCK_ERROR)
+	if (!ramdisk_here)
 		panic("Cannot find place for new RAMDISK of size %lld\n",
 			 ramdisk_size);
 
 	/* Note: this includes all the lowmem currently occupied by
 	   the initrd, we rely on that fact to keep the data intact. */
-	memblock_x86_reserve_range(ramdisk_here, ramdisk_here + area_size, "NEW RAMDISK");
+	memblock_reserve(ramdisk_here, area_size);
 	initrd_start = ramdisk_here + PAGE_OFFSET;
 	initrd_end   = initrd_start + ramdisk_size;
 	printk(KERN_INFO "Allocated new RAMDISK: %08llx - %08llx\n",
@@ -393,7 +394,7 @@
 	initrd_start = 0;
 
 	if (ramdisk_size >= (end_of_lowmem>>1)) {
-		memblock_x86_free_range(ramdisk_image, ramdisk_end);
+		memblock_free(ramdisk_image, ramdisk_end - ramdisk_image);
 		printk(KERN_ERR "initrd too large to handle, "
 		       "disabling initrd\n");
 		return;
@@ -416,7 +417,7 @@
 
 	relocate_initrd();
 
-	memblock_x86_free_range(ramdisk_image, ramdisk_end);
+	memblock_free(ramdisk_image, ramdisk_end - ramdisk_image);
 }
 #else
 static void __init reserve_initrd(void)
@@ -490,15 +491,13 @@
 {
 	struct setup_data *data;
 	u64 pa_data;
-	char buf[32];
 
 	if (boot_params.hdr.version < 0x0209)
 		return;
 	pa_data = boot_params.hdr.setup_data;
 	while (pa_data) {
 		data = early_memremap(pa_data, sizeof(*data));
-		sprintf(buf, "setup data %x", data->type);
-		memblock_x86_reserve_range(pa_data, pa_data+sizeof(*data)+data->len, buf);
+		memblock_reserve(pa_data, sizeof(*data) + data->len);
 		pa_data = data->next;
 		early_iounmap(data, sizeof(*data));
 	}
@@ -554,7 +553,7 @@
 		crash_base = memblock_find_in_range(alignment,
 			       CRASH_KERNEL_ADDR_MAX, crash_size, alignment);
 
-		if (crash_base == MEMBLOCK_ERROR) {
+		if (!crash_base) {
 			pr_info("crashkernel reservation failed - No suitable area found.\n");
 			return;
 		}
@@ -568,7 +567,7 @@
 			return;
 		}
 	}
-	memblock_x86_reserve_range(crash_base, crash_base + crash_size, "CRASH KERNEL");
+	memblock_reserve(crash_base, crash_size);
 
 	printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
 			"for crashkernel (System RAM: %ldMB)\n",
@@ -626,7 +625,7 @@
 	addr = find_ibft_region(&size);
 
 	if (size)
-		memblock_x86_reserve_range(addr, addr + size, "* ibft");
+		memblock_reserve(addr, size);
 }
 
 static unsigned reserve_low = CONFIG_X86_RESERVE_LOW << 10;
@@ -691,8 +690,6 @@
 
 void __init setup_arch(char **cmdline_p)
 {
-	unsigned long end_pfn;
-
 #ifdef CONFIG_X86_32
 	memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
 	visws_early_detect();
@@ -934,24 +931,7 @@
 	init_gbpages();
 
 	/* max_pfn_mapped is updated here */
-	end_pfn = max_low_pfn;
-
-#ifdef CONFIG_X86_64
-	/*
-	 * There may be regions after the last E820_RAM region that we
-	 * want to include in the kernel direct mapping, such as
-	 * EFI_RUNTIME_SERVICES_DATA.
-	 */
-	if (efi_enabled) {
-		unsigned long efi_end;
-
-		efi_end = e820_end_pfn(MAXMEM>>PAGE_SHIFT, E820_RESERVED_EFI);
-		if (efi_end > max_low_pfn)
-			end_pfn = efi_end;
-	}
-#endif
-
-	max_low_pfn_mapped = init_memory_mapping(0, end_pfn << PAGE_SHIFT);
+	max_low_pfn_mapped = init_memory_mapping(0, max_low_pfn<<PAGE_SHIFT);
 	max_pfn_mapped = max_low_pfn_mapped;
 
 #ifdef CONFIG_X86_64
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 9f548cb..e38e217 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -840,7 +840,8 @@
 	pr_debug("++++++++++++++++++++=_---CPU UP  %u\n", cpu);
 
 	if (apicid == BAD_APICID || apicid == boot_cpu_physical_apicid ||
-	    !physid_isset(apicid, phys_cpu_present_map)) {
+	    !physid_isset(apicid, phys_cpu_present_map) ||
+	    (!x2apic_mode && apicid >= 255)) {
 		printk(KERN_ERR "%s: bad cpu %d\n", __func__, cpu);
 		return -EINVAL;
 	}
diff --git a/arch/x86/kernel/trampoline.c b/arch/x86/kernel/trampoline.c
index a91ae77..a73b610 100644
--- a/arch/x86/kernel/trampoline.c
+++ b/arch/x86/kernel/trampoline.c
@@ -14,11 +14,11 @@
 
 	/* Has to be in very low memory so we can execute real-mode AP code. */
 	mem = memblock_find_in_range(0, 1<<20, size, PAGE_SIZE);
-	if (mem == MEMBLOCK_ERROR)
+	if (!mem)
 		panic("Cannot allocate trampoline\n");
 
 	x86_trampoline_base = __va(mem);
-	memblock_x86_reserve_range(mem, mem + size, "TRAMPOLINE");
+	memblock_reserve(mem, size);
 
 	printk(KERN_DEBUG "Base memory trampoline at [%p] %llx size %zu\n",
 	       x86_trampoline_base, (unsigned long long)mem, size);
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index a8e3eb8..fa1191fb 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -306,15 +306,10 @@
 			== NOTIFY_STOP)
 		return;
 #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
-#ifdef CONFIG_KPROBES
+
 	if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
 			== NOTIFY_STOP)
 		return;
-#else
-	if (notify_die(DIE_TRAP, "int3", regs, error_code, 3, SIGTRAP)
-			== NOTIFY_STOP)
-		return;
-#endif
 
 	preempt_conditional_sti(regs);
 	do_trap(3, SIGTRAP, "int3", regs, error_code, NULL);
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index db48336..2c9cf0f 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -35,7 +35,7 @@
    erroneous rdtsc usage on !cpu_has_tsc processors */
 static int __read_mostly tsc_disabled = -1;
 
-static int tsc_clocksource_reliable;
+int tsc_clocksource_reliable;
 /*
  * Scheduler clock - returns current time in nanosec units.
  */
@@ -178,11 +178,11 @@
 }
 
 #define CAL_MS		10
-#define CAL_LATCH	(CLOCK_TICK_RATE / (1000 / CAL_MS))
+#define CAL_LATCH	(PIT_TICK_RATE / (1000 / CAL_MS))
 #define CAL_PIT_LOOPS	1000
 
 #define CAL2_MS		50
-#define CAL2_LATCH	(CLOCK_TICK_RATE / (1000 / CAL2_MS))
+#define CAL2_LATCH	(PIT_TICK_RATE / (1000 / CAL2_MS))
 #define CAL2_PIT_LOOPS	5000
 
 
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c
index 0aa5fed8..9eba29b 100644
--- a/arch/x86/kernel/tsc_sync.c
+++ b/arch/x86/kernel/tsc_sync.c
@@ -113,7 +113,7 @@
 	if (unsynchronized_tsc())
 		return;
 
-	if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) {
+	if (tsc_clocksource_reliable) {
 		if (cpu == (nr_cpu_ids-1) || system_state != SYSTEM_BOOTING)
 			pr_info(
 			"Skipped synchronization checks as TSC is reliable.\n");
@@ -172,7 +172,7 @@
 {
 	int cpus = 2;
 
-	if (unsynchronized_tsc() || boot_cpu_has(X86_FEATURE_TSC_RELIABLE))
+	if (unsynchronized_tsc() || tsc_clocksource_reliable)
 		return;
 
 	/*
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index e4d4a22..b07ba93 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -57,7 +57,7 @@
 	.lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
 };
 
-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = NATIVE;
+static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
 
 static int __init vsyscall_setup(char *str)
 {
@@ -140,11 +140,40 @@
 	return nr;
 }
 
+static bool write_ok_or_segv(unsigned long ptr, size_t size)
+{
+	/*
+	 * XXX: if access_ok, get_user, and put_user handled
+	 * sig_on_uaccess_error, this could go away.
+	 */
+
+	if (!access_ok(VERIFY_WRITE, (void __user *)ptr, size)) {
+		siginfo_t info;
+		struct thread_struct *thread = &current->thread;
+
+		thread->error_code	= 6;  /* user fault, no page, write */
+		thread->cr2		= ptr;
+		thread->trap_no		= 14;
+
+		memset(&info, 0, sizeof(info));
+		info.si_signo		= SIGSEGV;
+		info.si_errno		= 0;
+		info.si_code		= SEGV_MAPERR;
+		info.si_addr		= (void __user *)ptr;
+
+		force_sig_info(SIGSEGV, &info, current);
+		return false;
+	} else {
+		return true;
+	}
+}
+
 bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
 {
 	struct task_struct *tsk;
 	unsigned long caller;
 	int vsyscall_nr;
+	int prev_sig_on_uaccess_error;
 	long ret;
 
 	/*
@@ -180,35 +209,65 @@
 	if (seccomp_mode(&tsk->seccomp))
 		do_exit(SIGKILL);
 
+	/*
+	 * With a real vsyscall, page faults cause SIGSEGV.  We want to
+	 * preserve that behavior to make writing exploits harder.
+	 */
+	prev_sig_on_uaccess_error = current_thread_info()->sig_on_uaccess_error;
+	current_thread_info()->sig_on_uaccess_error = 1;
+
+	/*
+	 * 0 is a valid user pointer (in the access_ok sense) on 32-bit and
+	 * 64-bit, so we don't need to special-case it here.  For all the
+	 * vsyscalls, 0 means "don't write anything" not "write it at
+	 * address 0".
+	 */
+	ret = -EFAULT;
 	switch (vsyscall_nr) {
 	case 0:
+		if (!write_ok_or_segv(regs->di, sizeof(struct timeval)) ||
+		    !write_ok_or_segv(regs->si, sizeof(struct timezone)))
+			break;
+
 		ret = sys_gettimeofday(
 			(struct timeval __user *)regs->di,
 			(struct timezone __user *)regs->si);
 		break;
 
 	case 1:
+		if (!write_ok_or_segv(regs->di, sizeof(time_t)))
+			break;
+
 		ret = sys_time((time_t __user *)regs->di);
 		break;
 
 	case 2:
+		if (!write_ok_or_segv(regs->di, sizeof(unsigned)) ||
+		    !write_ok_or_segv(regs->si, sizeof(unsigned)))
+			break;
+
 		ret = sys_getcpu((unsigned __user *)regs->di,
 				 (unsigned __user *)regs->si,
 				 0);
 		break;
 	}
 
+	current_thread_info()->sig_on_uaccess_error = prev_sig_on_uaccess_error;
+
 	if (ret == -EFAULT) {
-		/*
-		 * Bad news -- userspace fed a bad pointer to a vsyscall.
-		 *
-		 * With a real vsyscall, that would have caused SIGSEGV.
-		 * To make writing reliable exploits using the emulated
-		 * vsyscalls harder, generate SIGSEGV here as well.
-		 */
+		/* Bad news -- userspace fed a bad pointer to a vsyscall. */
 		warn_bad_vsyscall(KERN_INFO, regs,
 				  "vsyscall fault (exploit attempt?)");
-		goto sigsegv;
+
+		/*
+		 * If we failed to generate a signal for any reason,
+		 * generate one here.  (This should be impossible.)
+		 */
+		if (WARN_ON_ONCE(!sigismember(&tsk->pending.signal, SIGBUS) &&
+				 !sigismember(&tsk->pending.signal, SIGSEGV)))
+			goto sigsegv;
+
+		return true;  /* Don't emulate the ret. */
 	}
 
 	regs->ax = ret;
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
index c1d6cd5..91f83e2 100644
--- a/arch/x86/kernel/x86_init.c
+++ b/arch/x86/kernel/x86_init.c
@@ -92,6 +92,7 @@
 
 struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {
 	.setup_percpu_clockev		= setup_secondary_APIC_clock,
+	.fixup_cpu_id			= x86_default_fixup_cpu_id,
 };
 
 static void default_nmi_init(void) { };
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index 76e3f1c..405f262 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -338,11 +338,15 @@
 		return HRTIMER_NORESTART;
 }
 
-static void create_pit_timer(struct kvm_kpit_state *ps, u32 val, int is_period)
+static void create_pit_timer(struct kvm *kvm, u32 val, int is_period)
 {
+	struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state;
 	struct kvm_timer *pt = &ps->pit_timer;
 	s64 interval;
 
+	if (!irqchip_in_kernel(kvm))
+		return;
+
 	interval = muldiv64(val, NSEC_PER_SEC, KVM_PIT_FREQ);
 
 	pr_debug("create pit timer, interval is %llu nsec\n", interval);
@@ -394,13 +398,13 @@
         /* FIXME: enhance mode 4 precision */
 	case 4:
 		if (!(ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)) {
-			create_pit_timer(ps, val, 0);
+			create_pit_timer(kvm, val, 0);
 		}
 		break;
 	case 2:
 	case 3:
 		if (!(ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)){
-			create_pit_timer(ps, val, 1);
+			create_pit_timer(kvm, val, 1);
 		}
 		break;
 	default:
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index c38efd7..4c938da 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -602,7 +602,6 @@
 {
 	struct kvm_cpuid_entry2 *best;
 	struct kvm_lapic *apic = vcpu->arch.apic;
-	u32 timer_mode_mask;
 
 	best = kvm_find_cpuid_entry(vcpu, 1, 0);
 	if (!best)
@@ -615,15 +614,12 @@
 			best->ecx |= bit(X86_FEATURE_OSXSAVE);
 	}
 
-	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
-		best->function == 0x1) {
-		best->ecx |= bit(X86_FEATURE_TSC_DEADLINE_TIMER);
-		timer_mode_mask = 3 << 17;
-	} else
-		timer_mode_mask = 1 << 17;
-
-	if (apic)
-		apic->lapic_timer.timer_mode_mask = timer_mode_mask;
+	if (apic) {
+		if (best->ecx & bit(X86_FEATURE_TSC_DEADLINE_TIMER))
+			apic->lapic_timer.timer_mode_mask = 3 << 17;
+		else
+			apic->lapic_timer.timer_mode_mask = 1 << 17;
+	}
 }
 
 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
@@ -2135,6 +2131,9 @@
 	case KVM_CAP_TSC_CONTROL:
 		r = kvm_has_tsc_control;
 		break;
+	case KVM_CAP_TSC_DEADLINE_TIMER:
+		r = boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER);
+		break;
 	default:
 		r = 0;
 		break;
diff --git a/arch/x86/lib/inat.c b/arch/x86/lib/inat.c
index 46fc4ee..88ad5fb 100644
--- a/arch/x86/lib/inat.c
+++ b/arch/x86/lib/inat.c
@@ -82,9 +82,16 @@
 	const insn_attr_t *table;
 	if (vex_m > X86_VEX_M_MAX || vex_p > INAT_LSTPFX_MAX)
 		return 0;
-	table = inat_avx_tables[vex_m][vex_p];
+	/* At first, this checks the master table */
+	table = inat_avx_tables[vex_m][0];
 	if (!table)
 		return 0;
+	if (!inat_is_group(table[opcode]) && vex_p) {
+		/* If this is not a group, get attribute directly */
+		table = inat_avx_tables[vex_m][vex_p];
+		if (!table)
+			return 0;
+	}
 	return table[opcode];
 }
 
diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
index 374562e..5a1f9f3 100644
--- a/arch/x86/lib/insn.c
+++ b/arch/x86/lib/insn.c
@@ -202,7 +202,7 @@
 		m = insn_vex_m_bits(insn);
 		p = insn_vex_p_bits(insn);
 		insn->attr = inat_get_avx_attribute(op, m, p);
-		if (!inat_accept_vex(insn->attr))
+		if (!inat_accept_vex(insn->attr) && !inat_is_group(insn->attr))
 			insn->attr = 0;	/* This instruction is bad */
 		goto end;	/* VEX has only 1 byte for opcode */
 	}
@@ -249,6 +249,8 @@
 			pfx = insn_last_prefix(insn);
 			insn->attr = inat_get_group_attribute(mod, pfx,
 							      insn->attr);
+			if (insn_is_avx(insn) && !inat_accept_vex(insn->attr))
+				insn->attr = 0;	/* This is bad */
 		}
 	}
 
diff --git a/arch/x86/lib/string_32.c b/arch/x86/lib/string_32.c
index 82004d2..bd59090 100644
--- a/arch/x86/lib/string_32.c
+++ b/arch/x86/lib/string_32.c
@@ -164,15 +164,13 @@
 size_t strlen(const char *s)
 {
 	int d0;
-	int res;
+	size_t res;
 	asm volatile("repne\n\t"
-		"scasb\n\t"
-		"notl %0\n\t"
-		"decl %0"
+		"scasb"
 		: "=c" (res), "=&D" (d0)
 		: "1" (s), "a" (0), "0" (0xffffffffu)
 		: "memory");
-	return res;
+	return ~res - 1;
 }
 EXPORT_SYMBOL(strlen);
 #endif
diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt
index a793da5..5b83c51 100644
--- a/arch/x86/lib/x86-opcode-map.txt
+++ b/arch/x86/lib/x86-opcode-map.txt
@@ -1,5 +1,11 @@
 # x86 Opcode Maps
 #
+# This is (mostly) based on following documentations.
+# - Intel(R) 64 and IA-32 Architectures Software Developer's Manual Vol.2
+#   (#325383-040US, October 2011)
+# - Intel(R) Advanced Vector Extensions Programming Reference
+#   (#319433-011,JUNE 2011).
+#
 #<Opcode maps>
 # Table: table-name
 # Referrer: escaped-name
@@ -15,10 +21,13 @@
 # EndTable
 #
 # AVX Superscripts
-#  (VEX): this opcode can accept VEX prefix.
-#  (oVEX): this opcode requires VEX prefix.
-#  (o128): this opcode only supports 128bit VEX.
-#  (o256): this opcode only supports 256bit VEX.
+#  (v): this opcode requires VEX prefix.
+#  (v1): this opcode only supports 128bit VEX.
+#
+# Last Prefix Superscripts
+#  - (66): the last prefix is 0x66
+#  - (F3): the last prefix is 0xF3
+#  - (F2): the last prefix is 0xF2
 #
 
 Table: one byte opcode
@@ -199,8 +208,8 @@
 a1: MOV rAX,Ov
 a2: MOV Ob,AL
 a3: MOV Ov,rAX
-a4: MOVS/B Xb,Yb
-a5: MOVS/W/D/Q Xv,Yv
+a4: MOVS/B Yb,Xb
+a5: MOVS/W/D/Q Yv,Xv
 a6: CMPS/B Xb,Yb
 a7: CMPS/W/D Xv,Yv
 a8: TEST AL,Ib
@@ -233,8 +242,8 @@
 c1: Grp2 Ev,Ib (1A)
 c2: RETN Iw (f64)
 c3: RETN
-c4: LES Gz,Mp (i64) | 3bytes-VEX (Prefix)
-c5: LDS Gz,Mp (i64) | 2bytes-VEX (Prefix)
+c4: LES Gz,Mp (i64) | VEX+2byte (Prefix)
+c5: LDS Gz,Mp (i64) | VEX+1byte (Prefix)
 c6: Grp11 Eb,Ib (1A)
 c7: Grp11 Ev,Iz (1A)
 c8: ENTER Iw,Ib
@@ -320,14 +329,19 @@
 # 3DNow! uses the last imm byte as opcode extension.
 0f: 3DNow! Pq,Qq,Ib
 # 0x0f 0x10-0x1f
-10: movups Vps,Wps (VEX) | movss Vss,Wss (F3),(VEX),(o128) | movupd Vpd,Wpd (66),(VEX) | movsd Vsd,Wsd (F2),(VEX),(o128)
-11: movups Wps,Vps (VEX) | movss Wss,Vss (F3),(VEX),(o128) | movupd Wpd,Vpd (66),(VEX) | movsd Wsd,Vsd (F2),(VEX),(o128)
-12: movlps Vq,Mq (VEX),(o128) | movlpd Vq,Mq (66),(VEX),(o128) | movhlps Vq,Uq (VEX),(o128) | movddup Vq,Wq (F2),(VEX) | movsldup Vq,Wq (F3),(VEX)
-13: mpvlps Mq,Vq (VEX),(o128) | movlpd Mq,Vq (66),(VEX),(o128)
-14: unpcklps Vps,Wq (VEX) | unpcklpd Vpd,Wq (66),(VEX)
-15: unpckhps Vps,Wq (VEX) | unpckhpd Vpd,Wq (66),(VEX)
-16: movhps Vq,Mq (VEX),(o128) | movhpd Vq,Mq (66),(VEX),(o128) | movlsps Vq,Uq (VEX),(o128) | movshdup Vq,Wq (F3),(VEX)
-17: movhps Mq,Vq (VEX),(o128) | movhpd Mq,Vq (66),(VEX),(o128)
+# NOTE: According to Intel SDM opcode map, vmovups and vmovupd has no operands
+# but it actually has operands. And also, vmovss and vmovsd only accept 128bit.
+# MOVSS/MOVSD has too many forms(3) on SDM. This map just shows a typical form.
+# Many AVX instructions lack v1 superscript, according to Intel AVX-Prgramming
+# Reference A.1
+10: vmovups Vps,Wps | vmovupd Vpd,Wpd (66) | vmovss Vx,Hx,Wss (F3),(v1) | vmovsd Vx,Hx,Wsd (F2),(v1)
+11: vmovups Wps,Vps | vmovupd Wpd,Vpd (66) | vmovss Wss,Hx,Vss (F3),(v1) | vmovsd Wsd,Hx,Vsd (F2),(v1)
+12: vmovlps Vq,Hq,Mq (v1) | vmovhlps Vq,Hq,Uq (v1) | vmovlpd Vq,Hq,Mq (66),(v1) | vmovsldup Vx,Wx (F3) | vmovddup Vx,Wx (F2)
+13: vmovlps Mq,Vq (v1) | vmovlpd Mq,Vq (66),(v1)
+14: vunpcklps Vx,Hx,Wx | vunpcklpd Vx,Hx,Wx (66)
+15: vunpckhps Vx,Hx,Wx | vunpckhpd Vx,Hx,Wx (66)
+16: vmovhps Vdq,Hq,Mq (v1) | vmovlhps Vdq,Hq,Uq (v1) | vmovhpd Vdq,Hq,Mq (66),(v1) | vmovshdup Vx,Wx (F3)
+17: vmovhps Mq,Vq (v1) | vmovhpd Mq,Vq (66),(v1)
 18: Grp16 (1A)
 19:
 1a:
@@ -345,14 +359,14 @@
 25:
 26:
 27:
-28: movaps Vps,Wps (VEX) | movapd Vpd,Wpd (66),(VEX)
-29: movaps Wps,Vps (VEX) | movapd Wpd,Vpd (66),(VEX)
-2a: cvtpi2ps Vps,Qpi | cvtsi2ss Vss,Ed/q (F3),(VEX),(o128) | cvtpi2pd Vpd,Qpi (66) | cvtsi2sd Vsd,Ed/q (F2),(VEX),(o128)
-2b: movntps Mps,Vps (VEX) | movntpd Mpd,Vpd (66),(VEX)
-2c: cvttps2pi Ppi,Wps | cvttss2si  Gd/q,Wss (F3),(VEX),(o128) | cvttpd2pi Ppi,Wpd (66) | cvttsd2si Gd/q,Wsd (F2),(VEX),(o128)
-2d: cvtps2pi Ppi,Wps | cvtss2si Gd/q,Wss (F3),(VEX),(o128) | cvtpd2pi Qpi,Wpd (66) | cvtsd2si Gd/q,Wsd (F2),(VEX),(o128)
-2e: ucomiss Vss,Wss (VEX),(o128) | ucomisd  Vsd,Wsd (66),(VEX),(o128)
-2f: comiss Vss,Wss (VEX),(o128) | comisd  Vsd,Wsd (66),(VEX),(o128)
+28: vmovaps Vps,Wps | vmovapd Vpd,Wpd (66)
+29: vmovaps Wps,Vps | vmovapd Wpd,Vpd (66)
+2a: cvtpi2ps Vps,Qpi | cvtpi2pd Vpd,Qpi (66) | vcvtsi2ss Vss,Hss,Ey (F3),(v1) | vcvtsi2sd Vsd,Hsd,Ey (F2),(v1)
+2b: vmovntps Mps,Vps | vmovntpd Mpd,Vpd (66)
+2c: cvttps2pi Ppi,Wps | cvttpd2pi Ppi,Wpd (66) | vcvttss2si Gy,Wss (F3),(v1) | vcvttsd2si Gy,Wsd (F2),(v1)
+2d: cvtps2pi Ppi,Wps | cvtpd2pi Qpi,Wpd (66) | vcvtss2si Gy,Wss (F3),(v1) | vcvtsd2si Gy,Wsd (F2),(v1)
+2e: vucomiss Vss,Wss (v1) | vucomisd  Vsd,Wsd (66),(v1)
+2f: vcomiss Vss,Wss (v1) | vcomisd  Vsd,Wsd (66),(v1)
 # 0x0f 0x30-0x3f
 30: WRMSR
 31: RDTSC
@@ -388,65 +402,66 @@
 4e: CMOVLE/NG Gv,Ev
 4f: CMOVNLE/G Gv,Ev
 # 0x0f 0x50-0x5f
-50: movmskps Gd/q,Ups (VEX) | movmskpd Gd/q,Upd (66),(VEX)
-51: sqrtps Vps,Wps (VEX) | sqrtss Vss,Wss (F3),(VEX),(o128) | sqrtpd Vpd,Wpd (66),(VEX) | sqrtsd Vsd,Wsd (F2),(VEX),(o128)
-52: rsqrtps Vps,Wps (VEX) | rsqrtss Vss,Wss (F3),(VEX),(o128)
-53: rcpps Vps,Wps (VEX) | rcpss Vss,Wss (F3),(VEX),(o128)
-54: andps Vps,Wps (VEX) | andpd Vpd,Wpd (66),(VEX)
-55: andnps Vps,Wps (VEX) | andnpd Vpd,Wpd (66),(VEX)
-56: orps Vps,Wps (VEX) | orpd Vpd,Wpd (66),(VEX)
-57: xorps Vps,Wps (VEX) | xorpd Vpd,Wpd (66),(VEX)
-58: addps Vps,Wps (VEX) | addss Vss,Wss (F3),(VEX),(o128) | addpd Vpd,Wpd (66),(VEX) | addsd Vsd,Wsd (F2),(VEX),(o128)
-59: mulps Vps,Wps (VEX) | mulss Vss,Wss (F3),(VEX),(o128) | mulpd Vpd,Wpd (66),(VEX) | mulsd Vsd,Wsd (F2),(VEX),(o128)
-5a: cvtps2pd Vpd,Wps (VEX) | cvtss2sd Vsd,Wss (F3),(VEX),(o128) | cvtpd2ps Vps,Wpd (66),(VEX) | cvtsd2ss Vsd,Wsd (F2),(VEX),(o128)
-5b: cvtdq2ps Vps,Wdq (VEX) | cvtps2dq Vdq,Wps (66),(VEX) | cvttps2dq Vdq,Wps (F3),(VEX)
-5c: subps Vps,Wps (VEX) | subss Vss,Wss (F3),(VEX),(o128) | subpd Vpd,Wpd (66),(VEX) | subsd Vsd,Wsd (F2),(VEX),(o128)
-5d: minps Vps,Wps (VEX) | minss Vss,Wss (F3),(VEX),(o128) | minpd Vpd,Wpd (66),(VEX) | minsd Vsd,Wsd (F2),(VEX),(o128)
-5e: divps Vps,Wps (VEX) | divss Vss,Wss (F3),(VEX),(o128) | divpd Vpd,Wpd (66),(VEX) | divsd Vsd,Wsd (F2),(VEX),(o128)
-5f: maxps Vps,Wps (VEX) | maxss Vss,Wss (F3),(VEX),(o128) | maxpd Vpd,Wpd (66),(VEX) | maxsd Vsd,Wsd (F2),(VEX),(o128)
+50: vmovmskps Gy,Ups | vmovmskpd Gy,Upd (66)
+51: vsqrtps Vps,Wps | vsqrtpd Vpd,Wpd (66) | vsqrtss Vss,Hss,Wss (F3),(v1) | vsqrtsd Vsd,Hsd,Wsd (F2),(v1)
+52: vrsqrtps Vps,Wps | vrsqrtss Vss,Hss,Wss (F3),(v1)
+53: vrcpps Vps,Wps | vrcpss Vss,Hss,Wss (F3),(v1)
+54: vandps Vps,Hps,Wps | vandpd Vpd,Hpd,Wpd (66)
+55: vandnps Vps,Hps,Wps | vandnpd Vpd,Hpd,Wpd (66)
+56: vorps Vps,Hps,Wps | vorpd Vpd,Hpd,Wpd (66)
+57: vxorps Vps,Hps,Wps | vxorpd Vpd,Hpd,Wpd (66)
+58: vaddps Vps,Hps,Wps | vaddpd Vpd,Hpd,Wpd (66) | vaddss Vss,Hss,Wss (F3),(v1) | vaddsd Vsd,Hsd,Wsd (F2),(v1)
+59: vmulps Vps,Hps,Wps | vmulpd Vpd,Hpd,Wpd (66) | vmulss Vss,Hss,Wss (F3),(v1) | vmulsd Vsd,Hsd,Wsd (F2),(v1)
+5a: vcvtps2pd Vpd,Wps | vcvtpd2ps Vps,Wpd (66) | vcvtss2sd Vsd,Hx,Wss (F3),(v1) | vcvtsd2ss Vss,Hx,Wsd (F2),(v1)
+5b: vcvtdq2ps Vps,Wdq | vcvtps2dq Vdq,Wps (66) | vcvttps2dq Vdq,Wps (F3)
+5c: vsubps Vps,Hps,Wps | vsubpd Vpd,Hpd,Wpd (66) | vsubss Vss,Hss,Wss (F3),(v1) | vsubsd Vsd,Hsd,Wsd (F2),(v1)
+5d: vminps Vps,Hps,Wps | vminpd Vpd,Hpd,Wpd (66) | vminss Vss,Hss,Wss (F3),(v1) | vminsd Vsd,Hsd,Wsd (F2),(v1)
+5e: vdivps Vps,Hps,Wps | vdivpd Vpd,Hpd,Wpd (66) | vdivss Vss,Hss,Wss (F3),(v1) | vdivsd Vsd,Hsd,Wsd (F2),(v1)
+5f: vmaxps Vps,Hps,Wps | vmaxpd Vpd,Hpd,Wpd (66) | vmaxss Vss,Hss,Wss (F3),(v1) | vmaxsd Vsd,Hsd,Wsd (F2),(v1)
 # 0x0f 0x60-0x6f
-60: punpcklbw Pq,Qd | punpcklbw Vdq,Wdq (66),(VEX),(o128)
-61: punpcklwd Pq,Qd | punpcklwd Vdq,Wdq (66),(VEX),(o128)
-62: punpckldq Pq,Qd | punpckldq Vdq,Wdq (66),(VEX),(o128)
-63: packsswb Pq,Qq | packsswb Vdq,Wdq (66),(VEX),(o128)
-64: pcmpgtb Pq,Qq | pcmpgtb Vdq,Wdq (66),(VEX),(o128)
-65: pcmpgtw Pq,Qq | pcmpgtw Vdq,Wdq (66),(VEX),(o128)
-66: pcmpgtd Pq,Qq | pcmpgtd Vdq,Wdq (66),(VEX),(o128)
-67: packuswb Pq,Qq | packuswb Vdq,Wdq (66),(VEX),(o128)
-68: punpckhbw Pq,Qd | punpckhbw Vdq,Wdq (66),(VEX),(o128)
-69: punpckhwd Pq,Qd | punpckhwd Vdq,Wdq (66),(VEX),(o128)
-6a: punpckhdq Pq,Qd | punpckhdq Vdq,Wdq (66),(VEX),(o128)
-6b: packssdw Pq,Qd | packssdw Vdq,Wdq (66),(VEX),(o128)
-6c: punpcklqdq Vdq,Wdq (66),(VEX),(o128)
-6d: punpckhqdq Vdq,Wdq (66),(VEX),(o128)
-6e: movd/q/ Pd,Ed/q | movd/q Vdq,Ed/q (66),(VEX),(o128)
-6f: movq Pq,Qq | movdqa Vdq,Wdq (66),(VEX) | movdqu Vdq,Wdq (F3),(VEX)
+60: punpcklbw Pq,Qd | vpunpcklbw Vx,Hx,Wx (66),(v1)
+61: punpcklwd Pq,Qd | vpunpcklwd Vx,Hx,Wx (66),(v1)
+62: punpckldq Pq,Qd | vpunpckldq Vx,Hx,Wx (66),(v1)
+63: packsswb Pq,Qq | vpacksswb Vx,Hx,Wx (66),(v1)
+64: pcmpgtb Pq,Qq | vpcmpgtb Vx,Hx,Wx (66),(v1)
+65: pcmpgtw Pq,Qq | vpcmpgtw Vx,Hx,Wx (66),(v1)
+66: pcmpgtd Pq,Qq | vpcmpgtd Vx,Hx,Wx (66),(v1)
+67: packuswb Pq,Qq | vpackuswb Vx,Hx,Wx (66),(v1)
+68: punpckhbw Pq,Qd | vpunpckhbw Vx,Hx,Wx (66),(v1)
+69: punpckhwd Pq,Qd | vpunpckhwd Vx,Hx,Wx (66),(v1)
+6a: punpckhdq Pq,Qd | vpunpckhdq Vx,Hx,Wx (66),(v1)
+6b: packssdw Pq,Qd | vpackssdw Vx,Hx,Wx (66),(v1)
+6c: vpunpcklqdq Vx,Hx,Wx (66),(v1)
+6d: vpunpckhqdq Vx,Hx,Wx (66),(v1)
+6e: movd/q Pd,Ey | vmovd/q Vy,Ey (66),(v1)
+6f: movq Pq,Qq | vmovdqa Vx,Wx (66) | vmovdqu Vx,Wx (F3)
 # 0x0f 0x70-0x7f
-70: pshufw Pq,Qq,Ib | pshufd Vdq,Wdq,Ib (66),(VEX),(o128) | pshufhw Vdq,Wdq,Ib (F3),(VEX),(o128) | pshuflw VdqWdq,Ib (F2),(VEX),(o128)
+70: pshufw Pq,Qq,Ib | vpshufd Vx,Wx,Ib (66),(v1) | vpshufhw Vx,Wx,Ib (F3),(v1) | vpshuflw Vx,Wx,Ib (F2),(v1)
 71: Grp12 (1A)
 72: Grp13 (1A)
 73: Grp14 (1A)
-74: pcmpeqb Pq,Qq | pcmpeqb Vdq,Wdq (66),(VEX),(o128)
-75: pcmpeqw Pq,Qq | pcmpeqw Vdq,Wdq (66),(VEX),(o128)
-76: pcmpeqd Pq,Qq | pcmpeqd Vdq,Wdq (66),(VEX),(o128)
-77: emms/vzeroupper/vzeroall (VEX)
-78: VMREAD Ed/q,Gd/q
-79: VMWRITE Gd/q,Ed/q
+74: pcmpeqb Pq,Qq | vpcmpeqb Vx,Hx,Wx (66),(v1)
+75: pcmpeqw Pq,Qq | vpcmpeqw Vx,Hx,Wx (66),(v1)
+76: pcmpeqd Pq,Qq | vpcmpeqd Vx,Hx,Wx (66),(v1)
+# Note: Remove (v), because vzeroall and vzeroupper becomes emms without VEX.
+77: emms | vzeroupper | vzeroall
+78: VMREAD Ey,Gy
+79: VMWRITE Gy,Ey
 7a:
 7b:
-7c: haddps Vps,Wps (F2),(VEX) | haddpd Vpd,Wpd (66),(VEX)
-7d: hsubps Vps,Wps (F2),(VEX) | hsubpd Vpd,Wpd (66),(VEX)
-7e: movd/q Ed/q,Pd | movd/q Ed/q,Vdq (66),(VEX),(o128) | movq Vq,Wq (F3),(VEX),(o128)
-7f: movq Qq,Pq | movdqa Wdq,Vdq (66),(VEX) | movdqu Wdq,Vdq (F3),(VEX)
+7c: vhaddpd Vpd,Hpd,Wpd (66) | vhaddps Vps,Hps,Wps (F2)
+7d: vhsubpd Vpd,Hpd,Wpd (66) | vhsubps Vps,Hps,Wps (F2)
+7e: movd/q Ey,Pd | vmovd/q Ey,Vy (66),(v1) | vmovq Vq,Wq (F3),(v1)
+7f: movq Qq,Pq | vmovdqa Wx,Vx (66) | vmovdqu Wx,Vx (F3)
 # 0x0f 0x80-0x8f
 80: JO Jz (f64)
 81: JNO Jz (f64)
-82: JB/JNAE/JC Jz (f64)
-83: JNB/JAE/JNC Jz (f64)
-84: JZ/JE Jz (f64)
-85: JNZ/JNE Jz (f64)
+82: JB/JC/JNAE Jz (f64)
+83: JAE/JNB/JNC Jz (f64)
+84: JE/JZ Jz (f64)
+85: JNE/JNZ Jz (f64)
 86: JBE/JNA Jz (f64)
-87: JNBE/JA Jz (f64)
+87: JA/JNBE Jz (f64)
 88: JS Jz (f64)
 89: JNS Jz (f64)
 8a: JP/JPE Jz (f64)
@@ -502,18 +517,18 @@
 b9: Grp10 (1A)
 ba: Grp8 Ev,Ib (1A)
 bb: BTC Ev,Gv
-bc: BSF Gv,Ev
-bd: BSR Gv,Ev
+bc: BSF Gv,Ev | TZCNT Gv,Ev (F3)
+bd: BSR Gv,Ev | LZCNT Gv,Ev (F3)
 be: MOVSX Gv,Eb
 bf: MOVSX Gv,Ew
 # 0x0f 0xc0-0xcf
 c0: XADD Eb,Gb
 c1: XADD Ev,Gv
-c2: cmpps Vps,Wps,Ib (VEX) | cmpss Vss,Wss,Ib (F3),(VEX),(o128) | cmppd Vpd,Wpd,Ib (66),(VEX) | cmpsd Vsd,Wsd,Ib (F2),(VEX)
-c3: movnti Md/q,Gd/q
-c4: pinsrw Pq,Rd/q/Mw,Ib | pinsrw Vdq,Rd/q/Mw,Ib (66),(VEX),(o128)
-c5: pextrw Gd,Nq,Ib | pextrw Gd,Udq,Ib (66),(VEX),(o128)
-c6: shufps Vps,Wps,Ib (VEX) | shufpd Vpd,Wpd,Ib (66),(VEX)
+c2: vcmpps Vps,Hps,Wps,Ib | vcmppd Vpd,Hpd,Wpd,Ib (66) | vcmpss Vss,Hss,Wss,Ib (F3),(v1) | vcmpsd Vsd,Hsd,Wsd,Ib (F2),(v1)
+c3: movnti My,Gy
+c4: pinsrw Pq,Ry/Mw,Ib | vpinsrw Vdq,Hdq,Ry/Mw,Ib (66),(v1)
+c5: pextrw Gd,Nq,Ib | vpextrw Gd,Udq,Ib (66),(v1)
+c6: vshufps Vps,Hps,Wps,Ib | vshufpd Vpd,Hpd,Wpd,Ib (66)
 c7: Grp9 (1A)
 c8: BSWAP RAX/EAX/R8/R8D
 c9: BSWAP RCX/ECX/R9/R9D
@@ -524,55 +539,55 @@
 ce: BSWAP RSI/ESI/R14/R14D
 cf: BSWAP RDI/EDI/R15/R15D
 # 0x0f 0xd0-0xdf
-d0: addsubps Vps,Wps (F2),(VEX) | addsubpd Vpd,Wpd (66),(VEX)
-d1: psrlw Pq,Qq | psrlw Vdq,Wdq (66),(VEX),(o128)
-d2: psrld Pq,Qq | psrld Vdq,Wdq (66),(VEX),(o128)
-d3: psrlq Pq,Qq | psrlq Vdq,Wdq (66),(VEX),(o128)
-d4: paddq Pq,Qq | paddq Vdq,Wdq (66),(VEX),(o128)
-d5: pmullw Pq,Qq | pmullw Vdq,Wdq (66),(VEX),(o128)
-d6: movq Wq,Vq (66),(VEX),(o128) | movq2dq Vdq,Nq (F3) | movdq2q Pq,Uq (F2)
-d7: pmovmskb Gd,Nq | pmovmskb Gd,Udq (66),(VEX),(o128)
-d8: psubusb Pq,Qq | psubusb Vdq,Wdq (66),(VEX),(o128)
-d9: psubusw Pq,Qq | psubusw Vdq,Wdq (66),(VEX),(o128)
-da: pminub Pq,Qq | pminub Vdq,Wdq (66),(VEX),(o128)
-db: pand Pq,Qq | pand Vdq,Wdq (66),(VEX),(o128)
-dc: paddusb Pq,Qq | paddusb Vdq,Wdq (66),(VEX),(o128)
-dd: paddusw Pq,Qq | paddusw Vdq,Wdq (66),(VEX),(o128)
-de: pmaxub Pq,Qq | pmaxub Vdq,Wdq (66),(VEX),(o128)
-df: pandn Pq,Qq | pandn Vdq,Wdq (66),(VEX),(o128)
+d0: vaddsubpd Vpd,Hpd,Wpd (66) | vaddsubps Vps,Hps,Wps (F2)
+d1: psrlw Pq,Qq | vpsrlw Vx,Hx,Wx (66),(v1)
+d2: psrld Pq,Qq | vpsrld Vx,Hx,Wx (66),(v1)
+d3: psrlq Pq,Qq | vpsrlq Vx,Hx,Wx (66),(v1)
+d4: paddq Pq,Qq | vpaddq Vx,Hx,Wx (66),(v1)
+d5: pmullw Pq,Qq | vpmullw Vx,Hx,Wx (66),(v1)
+d6: vmovq Wq,Vq (66),(v1) | movq2dq Vdq,Nq (F3) | movdq2q Pq,Uq (F2)
+d7: pmovmskb Gd,Nq | vpmovmskb Gd,Ux (66),(v1)
+d8: psubusb Pq,Qq | vpsubusb Vx,Hx,Wx (66),(v1)
+d9: psubusw Pq,Qq | vpsubusw Vx,Hx,Wx (66),(v1)
+da: pminub Pq,Qq | vpminub Vx,Hx,Wx (66),(v1)
+db: pand Pq,Qq | vpand Vx,Hx,Wx (66),(v1)
+dc: paddusb Pq,Qq | vpaddusb Vx,Hx,Wx (66),(v1)
+dd: paddusw Pq,Qq | vpaddusw Vx,Hx,Wx (66),(v1)
+de: pmaxub Pq,Qq | vpmaxub Vx,Hx,Wx (66),(v1)
+df: pandn Pq,Qq | vpandn Vx,Hx,Wx (66),(v1)
 # 0x0f 0xe0-0xef
-e0: pavgb Pq,Qq | pavgb Vdq,Wdq (66),(VEX),(o128)
-e1: psraw Pq,Qq | psraw Vdq,Wdq (66),(VEX),(o128)
-e2: psrad Pq,Qq | psrad Vdq,Wdq (66),(VEX),(o128)
-e3: pavgw Pq,Qq | pavgw Vdq,Wdq (66),(VEX),(o128)
-e4: pmulhuw Pq,Qq | pmulhuw Vdq,Wdq (66),(VEX),(o128)
-e5: pmulhw Pq,Qq | pmulhw Vdq,Wdq (66),(VEX),(o128)
-e6: cvtpd2dq Vdq,Wpd (F2),(VEX) | cvttpd2dq Vdq,Wpd (66),(VEX) | cvtdq2pd Vpd,Wdq (F3),(VEX)
-e7: movntq Mq,Pq | movntdq Mdq,Vdq (66),(VEX)
-e8: psubsb Pq,Qq | psubsb Vdq,Wdq (66),(VEX),(o128)
-e9: psubsw Pq,Qq | psubsw Vdq,Wdq (66),(VEX),(o128)
-ea: pminsw Pq,Qq | pminsw Vdq,Wdq (66),(VEX),(o128)
-eb: por Pq,Qq | por Vdq,Wdq (66),(VEX),(o128)
-ec: paddsb Pq,Qq | paddsb Vdq,Wdq (66),(VEX),(o128)
-ed: paddsw Pq,Qq | paddsw Vdq,Wdq (66),(VEX),(o128)
-ee: pmaxsw Pq,Qq | pmaxsw Vdq,Wdq (66),(VEX),(o128)
-ef: pxor Pq,Qq | pxor Vdq,Wdq (66),(VEX),(o128)
+e0: pavgb Pq,Qq | vpavgb Vx,Hx,Wx (66),(v1)
+e1: psraw Pq,Qq | vpsraw Vx,Hx,Wx (66),(v1)
+e2: psrad Pq,Qq | vpsrad Vx,Hx,Wx (66),(v1)
+e3: pavgw Pq,Qq | vpavgw Vx,Hx,Wx (66),(v1)
+e4: pmulhuw Pq,Qq | vpmulhuw Vx,Hx,Wx (66),(v1)
+e5: pmulhw Pq,Qq | vpmulhw Vx,Hx,Wx (66),(v1)
+e6: vcvttpd2dq Vx,Wpd (66) | vcvtdq2pd Vx,Wdq (F3) | vcvtpd2dq Vx,Wpd (F2)
+e7: movntq Mq,Pq | vmovntdq Mx,Vx (66)
+e8: psubsb Pq,Qq | vpsubsb Vx,Hx,Wx (66),(v1)
+e9: psubsw Pq,Qq | vpsubsw Vx,Hx,Wx (66),(v1)
+ea: pminsw Pq,Qq | vpminsw Vx,Hx,Wx (66),(v1)
+eb: por Pq,Qq | vpor Vx,Hx,Wx (66),(v1)
+ec: paddsb Pq,Qq | vpaddsb Vx,Hx,Wx (66),(v1)
+ed: paddsw Pq,Qq | vpaddsw Vx,Hx,Wx (66),(v1)
+ee: pmaxsw Pq,Qq | vpmaxsw Vx,Hx,Wx (66),(v1)
+ef: pxor Pq,Qq | vpxor Vx,Hx,Wx (66),(v1)
 # 0x0f 0xf0-0xff
-f0: lddqu Vdq,Mdq (F2),(VEX)
-f1: psllw Pq,Qq | psllw Vdq,Wdq (66),(VEX),(o128)
-f2: pslld Pq,Qq | pslld Vdq,Wdq (66),(VEX),(o128)
-f3: psllq Pq,Qq | psllq Vdq,Wdq (66),(VEX),(o128)
-f4: pmuludq Pq,Qq | pmuludq Vdq,Wdq (66),(VEX),(o128)
-f5: pmaddwd Pq,Qq | pmaddwd Vdq,Wdq (66),(VEX),(o128)
-f6: psadbw Pq,Qq | psadbw Vdq,Wdq (66),(VEX),(o128)
-f7: maskmovq Pq,Nq | maskmovdqu Vdq,Udq (66),(VEX),(o128)
-f8: psubb Pq,Qq | psubb Vdq,Wdq (66),(VEX),(o128)
-f9: psubw Pq,Qq | psubw Vdq,Wdq (66),(VEX),(o128)
-fa: psubd Pq,Qq | psubd Vdq,Wdq (66),(VEX),(o128)
-fb: psubq Pq,Qq | psubq Vdq,Wdq (66),(VEX),(o128)
-fc: paddb Pq,Qq | paddb Vdq,Wdq (66),(VEX),(o128)
-fd: paddw Pq,Qq | paddw Vdq,Wdq (66),(VEX),(o128)
-fe: paddd Pq,Qq | paddd Vdq,Wdq (66),(VEX),(o128)
+f0: vlddqu Vx,Mx (F2)
+f1: psllw Pq,Qq | vpsllw Vx,Hx,Wx (66),(v1)
+f2: pslld Pq,Qq | vpslld Vx,Hx,Wx (66),(v1)
+f3: psllq Pq,Qq | vpsllq Vx,Hx,Wx (66),(v1)
+f4: pmuludq Pq,Qq | vpmuludq Vx,Hx,Wx (66),(v1)
+f5: pmaddwd Pq,Qq | vpmaddwd Vx,Hx,Wx (66),(v1)
+f6: psadbw Pq,Qq | vpsadbw Vx,Hx,Wx (66),(v1)
+f7: maskmovq Pq,Nq | vmaskmovdqu Vx,Ux (66),(v1)
+f8: psubb Pq,Qq | vpsubb Vx,Hx,Wx (66),(v1)
+f9: psubw Pq,Qq | vpsubw Vx,Hx,Wx (66),(v1)
+fa: psubd Pq,Qq | vpsubd Vx,Hx,Wx (66),(v1)
+fb: psubq Pq,Qq | vpsubq Vx,Hx,Wx (66),(v1)
+fc: paddb Pq,Qq | vpaddb Vx,Hx,Wx (66),(v1)
+fd: paddw Pq,Qq | vpaddw Vx,Hx,Wx (66),(v1)
+fe: paddd Pq,Qq | vpaddd Vx,Hx,Wx (66),(v1)
 ff:
 EndTable
 
@@ -580,155 +595,193 @@
 Referrer: 3-byte escape 1
 AVXcode: 2
 # 0x0f 0x38 0x00-0x0f
-00: pshufb Pq,Qq | pshufb Vdq,Wdq (66),(VEX),(o128)
-01: phaddw Pq,Qq | phaddw Vdq,Wdq (66),(VEX),(o128)
-02: phaddd Pq,Qq | phaddd Vdq,Wdq (66),(VEX),(o128)
-03: phaddsw Pq,Qq | phaddsw Vdq,Wdq (66),(VEX),(o128)
-04: pmaddubsw Pq,Qq | pmaddubsw Vdq,Wdq (66),(VEX),(o128)
-05: phsubw Pq,Qq | phsubw Vdq,Wdq (66),(VEX),(o128)
-06: phsubd Pq,Qq | phsubd Vdq,Wdq (66),(VEX),(o128)
-07: phsubsw Pq,Qq | phsubsw Vdq,Wdq (66),(VEX),(o128)
-08: psignb Pq,Qq | psignb Vdq,Wdq (66),(VEX),(o128)
-09: psignw Pq,Qq | psignw Vdq,Wdq (66),(VEX),(o128)
-0a: psignd Pq,Qq | psignd Vdq,Wdq (66),(VEX),(o128)
-0b: pmulhrsw Pq,Qq | pmulhrsw Vdq,Wdq (66),(VEX),(o128)
-0c: Vpermilps /r (66),(oVEX)
-0d: Vpermilpd /r (66),(oVEX)
-0e: vtestps /r (66),(oVEX)
-0f: vtestpd /r (66),(oVEX)
+00: pshufb Pq,Qq | vpshufb Vx,Hx,Wx (66),(v1)
+01: phaddw Pq,Qq | vphaddw Vx,Hx,Wx (66),(v1)
+02: phaddd Pq,Qq | vphaddd Vx,Hx,Wx (66),(v1)
+03: phaddsw Pq,Qq | vphaddsw Vx,Hx,Wx (66),(v1)
+04: pmaddubsw Pq,Qq | vpmaddubsw Vx,Hx,Wx (66),(v1)
+05: phsubw Pq,Qq | vphsubw Vx,Hx,Wx (66),(v1)
+06: phsubd Pq,Qq | vphsubd Vx,Hx,Wx (66),(v1)
+07: phsubsw Pq,Qq | vphsubsw Vx,Hx,Wx (66),(v1)
+08: psignb Pq,Qq | vpsignb Vx,Hx,Wx (66),(v1)
+09: psignw Pq,Qq | vpsignw Vx,Hx,Wx (66),(v1)
+0a: psignd Pq,Qq | vpsignd Vx,Hx,Wx (66),(v1)
+0b: pmulhrsw Pq,Qq | vpmulhrsw Vx,Hx,Wx (66),(v1)
+0c: vpermilps Vx,Hx,Wx (66),(v)
+0d: vpermilpd Vx,Hx,Wx (66),(v)
+0e: vtestps Vx,Wx (66),(v)
+0f: vtestpd Vx,Wx (66),(v)
 # 0x0f 0x38 0x10-0x1f
 10: pblendvb Vdq,Wdq (66)
 11:
 12:
-13:
+13: vcvtph2ps Vx,Wx,Ib (66),(v)
 14: blendvps Vdq,Wdq (66)
 15: blendvpd Vdq,Wdq (66)
-16:
-17: ptest Vdq,Wdq (66),(VEX)
-18: vbroadcastss /r (66),(oVEX)
-19: vbroadcastsd /r (66),(oVEX),(o256)
-1a: vbroadcastf128 /r (66),(oVEX),(o256)
+16: vpermps Vqq,Hqq,Wqq (66),(v)
+17: vptest Vx,Wx (66)
+18: vbroadcastss Vx,Wd (66),(v)
+19: vbroadcastsd Vqq,Wq (66),(v)
+1a: vbroadcastf128 Vqq,Mdq (66),(v)
 1b:
-1c: pabsb Pq,Qq | pabsb Vdq,Wdq (66),(VEX),(o128)
-1d: pabsw Pq,Qq | pabsw Vdq,Wdq (66),(VEX),(o128)
-1e: pabsd Pq,Qq | pabsd Vdq,Wdq (66),(VEX),(o128)
+1c: pabsb Pq,Qq | vpabsb Vx,Wx (66),(v1)
+1d: pabsw Pq,Qq | vpabsw Vx,Wx (66),(v1)
+1e: pabsd Pq,Qq | vpabsd Vx,Wx (66),(v1)
 1f:
 # 0x0f 0x38 0x20-0x2f
-20: pmovsxbw Vdq,Udq/Mq (66),(VEX),(o128)
-21: pmovsxbd Vdq,Udq/Md (66),(VEX),(o128)
-22: pmovsxbq Vdq,Udq/Mw (66),(VEX),(o128)
-23: pmovsxwd Vdq,Udq/Mq (66),(VEX),(o128)
-24: pmovsxwq Vdq,Udq/Md (66),(VEX),(o128)
-25: pmovsxdq Vdq,Udq/Mq (66),(VEX),(o128)
+20: vpmovsxbw Vx,Ux/Mq (66),(v1)
+21: vpmovsxbd Vx,Ux/Md (66),(v1)
+22: vpmovsxbq Vx,Ux/Mw (66),(v1)
+23: vpmovsxwd Vx,Ux/Mq (66),(v1)
+24: vpmovsxwq Vx,Ux/Md (66),(v1)
+25: vpmovsxdq Vx,Ux/Mq (66),(v1)
 26:
 27:
-28: pmuldq Vdq,Wdq (66),(VEX),(o128)
-29: pcmpeqq Vdq,Wdq (66),(VEX),(o128)
-2a: movntdqa Vdq,Mdq (66),(VEX),(o128)
-2b: packusdw Vdq,Wdq (66),(VEX),(o128)
-2c: vmaskmovps(ld) /r (66),(oVEX)
-2d: vmaskmovpd(ld) /r (66),(oVEX)
-2e: vmaskmovps(st) /r (66),(oVEX)
-2f: vmaskmovpd(st) /r (66),(oVEX)
+28: vpmuldq Vx,Hx,Wx (66),(v1)
+29: vpcmpeqq Vx,Hx,Wx (66),(v1)
+2a: vmovntdqa Vx,Mx (66),(v1)
+2b: vpackusdw Vx,Hx,Wx (66),(v1)
+2c: vmaskmovps Vx,Hx,Mx (66),(v)
+2d: vmaskmovpd Vx,Hx,Mx (66),(v)
+2e: vmaskmovps Mx,Hx,Vx (66),(v)
+2f: vmaskmovpd Mx,Hx,Vx (66),(v)
 # 0x0f 0x38 0x30-0x3f
-30: pmovzxbw Vdq,Udq/Mq (66),(VEX),(o128)
-31: pmovzxbd Vdq,Udq/Md (66),(VEX),(o128)
-32: pmovzxbq Vdq,Udq/Mw (66),(VEX),(o128)
-33: pmovzxwd Vdq,Udq/Mq (66),(VEX),(o128)
-34: pmovzxwq Vdq,Udq/Md (66),(VEX),(o128)
-35: pmovzxdq Vdq,Udq/Mq (66),(VEX),(o128)
-36:
-37: pcmpgtq Vdq,Wdq (66),(VEX),(o128)
-38: pminsb Vdq,Wdq (66),(VEX),(o128)
-39: pminsd Vdq,Wdq (66),(VEX),(o128)
-3a: pminuw Vdq,Wdq (66),(VEX),(o128)
-3b: pminud Vdq,Wdq (66),(VEX),(o128)
-3c: pmaxsb Vdq,Wdq (66),(VEX),(o128)
-3d: pmaxsd Vdq,Wdq (66),(VEX),(o128)
-3e: pmaxuw Vdq,Wdq (66),(VEX),(o128)
-3f: pmaxud Vdq,Wdq (66),(VEX),(o128)
+30: vpmovzxbw Vx,Ux/Mq (66),(v1)
+31: vpmovzxbd Vx,Ux/Md (66),(v1)
+32: vpmovzxbq Vx,Ux/Mw (66),(v1)
+33: vpmovzxwd Vx,Ux/Mq (66),(v1)
+34: vpmovzxwq Vx,Ux/Md (66),(v1)
+35: vpmovzxdq Vx,Ux/Mq (66),(v1)
+36: vpermd Vqq,Hqq,Wqq (66),(v)
+37: vpcmpgtq Vx,Hx,Wx (66),(v1)
+38: vpminsb Vx,Hx,Wx (66),(v1)
+39: vpminsd Vx,Hx,Wx (66),(v1)
+3a: vpminuw Vx,Hx,Wx (66),(v1)
+3b: vpminud Vx,Hx,Wx (66),(v1)
+3c: vpmaxsb Vx,Hx,Wx (66),(v1)
+3d: vpmaxsd Vx,Hx,Wx (66),(v1)
+3e: vpmaxuw Vx,Hx,Wx (66),(v1)
+3f: vpmaxud Vx,Hx,Wx (66),(v1)
 # 0x0f 0x38 0x40-0x8f
-40: pmulld Vdq,Wdq (66),(VEX),(o128)
-41: phminposuw Vdq,Wdq (66),(VEX),(o128)
-80: INVEPT Gd/q,Mdq (66)
-81: INVPID Gd/q,Mdq (66)
+40: vpmulld Vx,Hx,Wx (66),(v1)
+41: vphminposuw Vdq,Wdq (66),(v1)
+42:
+43:
+44:
+45: vpsrlvd/q Vx,Hx,Wx (66),(v)
+46: vpsravd Vx,Hx,Wx (66),(v)
+47: vpsllvd/q Vx,Hx,Wx (66),(v)
+# Skip 0x48-0x57
+58: vpbroadcastd Vx,Wx (66),(v)
+59: vpbroadcastq Vx,Wx (66),(v)
+5a: vbroadcasti128 Vqq,Mdq (66),(v)
+# Skip 0x5b-0x77
+78: vpbroadcastb Vx,Wx (66),(v)
+79: vpbroadcastw Vx,Wx (66),(v)
+# Skip 0x7a-0x7f
+80: INVEPT Gy,Mdq (66)
+81: INVPID Gy,Mdq (66)
+82: INVPCID Gy,Mdq (66)
+8c: vpmaskmovd/q Vx,Hx,Mx (66),(v)
+8e: vpmaskmovd/q Mx,Vx,Hx (66),(v)
 # 0x0f 0x38 0x90-0xbf (FMA)
-96: vfmaddsub132pd/ps /r (66),(VEX)
-97: vfmsubadd132pd/ps /r (66),(VEX)
-98: vfmadd132pd/ps /r (66),(VEX)
-99: vfmadd132sd/ss /r (66),(VEX),(o128)
-9a: vfmsub132pd/ps /r (66),(VEX)
-9b: vfmsub132sd/ss /r (66),(VEX),(o128)
-9c: vfnmadd132pd/ps /r (66),(VEX)
-9d: vfnmadd132sd/ss /r (66),(VEX),(o128)
-9e: vfnmsub132pd/ps /r (66),(VEX)
-9f: vfnmsub132sd/ss /r (66),(VEX),(o128)
-a6: vfmaddsub213pd/ps /r (66),(VEX)
-a7: vfmsubadd213pd/ps /r (66),(VEX)
-a8: vfmadd213pd/ps /r (66),(VEX)
-a9: vfmadd213sd/ss /r (66),(VEX),(o128)
-aa: vfmsub213pd/ps /r (66),(VEX)
-ab: vfmsub213sd/ss /r (66),(VEX),(o128)
-ac: vfnmadd213pd/ps /r (66),(VEX)
-ad: vfnmadd213sd/ss /r (66),(VEX),(o128)
-ae: vfnmsub213pd/ps /r (66),(VEX)
-af: vfnmsub213sd/ss /r (66),(VEX),(o128)
-b6: vfmaddsub231pd/ps /r (66),(VEX)
-b7: vfmsubadd231pd/ps /r (66),(VEX)
-b8: vfmadd231pd/ps /r (66),(VEX)
-b9: vfmadd231sd/ss /r (66),(VEX),(o128)
-ba: vfmsub231pd/ps /r (66),(VEX)
-bb: vfmsub231sd/ss /r (66),(VEX),(o128)
-bc: vfnmadd231pd/ps /r (66),(VEX)
-bd: vfnmadd231sd/ss /r (66),(VEX),(o128)
-be: vfnmsub231pd/ps /r (66),(VEX)
-bf: vfnmsub231sd/ss /r (66),(VEX),(o128)
+90: vgatherdd/q Vx,Hx,Wx (66),(v)
+91: vgatherqd/q Vx,Hx,Wx (66),(v)
+92: vgatherdps/d Vx,Hx,Wx (66),(v)
+93: vgatherqps/d Vx,Hx,Wx (66),(v)
+94:
+95:
+96: vfmaddsub132ps/d Vx,Hx,Wx (66),(v)
+97: vfmsubadd132ps/d Vx,Hx,Wx (66),(v)
+98: vfmadd132ps/d Vx,Hx,Wx (66),(v)
+99: vfmadd132ss/d Vx,Hx,Wx (66),(v),(v1)
+9a: vfmsub132ps/d Vx,Hx,Wx (66),(v)
+9b: vfmsub132ss/d Vx,Hx,Wx (66),(v),(v1)
+9c: vfnmadd132ps/d Vx,Hx,Wx (66),(v)
+9d: vfnmadd132ss/d Vx,Hx,Wx (66),(v),(v1)
+9e: vfnmsub132ps/d Vx,Hx,Wx (66),(v)
+9f: vfnmsub132ss/d Vx,Hx,Wx (66),(v),(v1)
+a6: vfmaddsub213ps/d Vx,Hx,Wx (66),(v)
+a7: vfmsubadd213ps/d Vx,Hx,Wx (66),(v)
+a8: vfmadd213ps/d Vx,Hx,Wx (66),(v)
+a9: vfmadd213ss/d Vx,Hx,Wx (66),(v),(v1)
+aa: vfmsub213ps/d Vx,Hx,Wx (66),(v)
+ab: vfmsub213ss/d Vx,Hx,Wx (66),(v),(v1)
+ac: vfnmadd213ps/d Vx,Hx,Wx (66),(v)
+ad: vfnmadd213ss/d Vx,Hx,Wx (66),(v),(v1)
+ae: vfnmsub213ps/d Vx,Hx,Wx (66),(v)
+af: vfnmsub213ss/d Vx,Hx,Wx (66),(v),(v1)
+b6: vfmaddsub231ps/d Vx,Hx,Wx (66),(v)
+b7: vfmsubadd231ps/d Vx,Hx,Wx (66),(v)
+b8: vfmadd231ps/d Vx,Hx,Wx (66),(v)
+b9: vfmadd231ss/d Vx,Hx,Wx (66),(v),(v1)
+ba: vfmsub231ps/d Vx,Hx,Wx (66),(v)
+bb: vfmsub231ss/d Vx,Hx,Wx (66),(v),(v1)
+bc: vfnmadd231ps/d Vx,Hx,Wx (66),(v)
+bd: vfnmadd231ss/d Vx,Hx,Wx (66),(v),(v1)
+be: vfnmsub231ps/d Vx,Hx,Wx (66),(v)
+bf: vfnmsub231ss/d Vx,Hx,Wx (66),(v),(v1)
 # 0x0f 0x38 0xc0-0xff
-db: aesimc Vdq,Wdq (66),(VEX),(o128)
-dc: aesenc Vdq,Wdq (66),(VEX),(o128)
-dd: aesenclast Vdq,Wdq (66),(VEX),(o128)
-de: aesdec Vdq,Wdq (66),(VEX),(o128)
-df: aesdeclast Vdq,Wdq (66),(VEX),(o128)
-f0: MOVBE Gv,Mv | CRC32 Gd,Eb (F2)
-f1: MOVBE Mv,Gv | CRC32 Gd,Ev (F2)
+db: VAESIMC Vdq,Wdq (66),(v1)
+dc: VAESENC Vdq,Hdq,Wdq (66),(v1)
+dd: VAESENCLAST Vdq,Hdq,Wdq (66),(v1)
+de: VAESDEC Vdq,Hdq,Wdq (66),(v1)
+df: VAESDECLAST Vdq,Hdq,Wdq (66),(v1)
+f0: MOVBE Gy,My | MOVBE Gw,Mw (66) | CRC32 Gd,Eb (F2)
+f1: MOVBE My,Gy | MOVBE Mw,Gw (66) | CRC32 Gd,Ey (F2)
+f3: ANDN Gy,By,Ey (v)
+f4: Grp17 (1A)
+f5: BZHI Gy,Ey,By (v) | PEXT Gy,By,Ey (F3),(v) | PDEP Gy,By,Ey (F2),(v)
+f6: MULX By,Gy,rDX,Ey (F2),(v)
+f7: BEXTR Gy,Ey,By (v) | SHLX Gy,Ey,By (66),(v) | SARX Gy,Ey,By (F3),(v) | SHRX Gy,Ey,By (F2),(v)
 EndTable
 
 Table: 3-byte opcode 2 (0x0f 0x3a)
 Referrer: 3-byte escape 2
 AVXcode: 3
 # 0x0f 0x3a 0x00-0xff
-04: vpermilps /r,Ib (66),(oVEX)
-05: vpermilpd /r,Ib (66),(oVEX)
-06: vperm2f128 /r,Ib (66),(oVEX),(o256)
-08: roundps Vdq,Wdq,Ib (66),(VEX)
-09: roundpd Vdq,Wdq,Ib (66),(VEX)
-0a: roundss Vss,Wss,Ib (66),(VEX),(o128)
-0b: roundsd Vsd,Wsd,Ib (66),(VEX),(o128)
-0c: blendps Vdq,Wdq,Ib (66),(VEX)
-0d: blendpd Vdq,Wdq,Ib (66),(VEX)
-0e: pblendw Vdq,Wdq,Ib (66),(VEX),(o128)
-0f: palignr Pq,Qq,Ib | palignr Vdq,Wdq,Ib (66),(VEX),(o128)
-14: pextrb Rd/Mb,Vdq,Ib (66),(VEX),(o128)
-15: pextrw Rd/Mw,Vdq,Ib (66),(VEX),(o128)
-16: pextrd/pextrq Ed/q,Vdq,Ib (66),(VEX),(o128)
-17: extractps Ed,Vdq,Ib (66),(VEX),(o128)
-18: vinsertf128 /r,Ib (66),(oVEX),(o256)
-19: vextractf128 /r,Ib (66),(oVEX),(o256)
-20: pinsrb Vdq,Rd/q/Mb,Ib (66),(VEX),(o128)
-21: insertps Vdq,Udq/Md,Ib (66),(VEX),(o128)
-22: pinsrd/pinsrq Vdq,Ed/q,Ib (66),(VEX),(o128)
-40: dpps Vdq,Wdq,Ib (66),(VEX)
-41: dppd Vdq,Wdq,Ib (66),(VEX),(o128)
-42: mpsadbw Vdq,Wdq,Ib (66),(VEX),(o128)
-44: pclmulq Vdq,Wdq,Ib (66),(VEX),(o128)
-4a: vblendvps /r,Ib (66),(oVEX)
-4b: vblendvpd /r,Ib (66),(oVEX)
-4c: vpblendvb /r,Ib (66),(oVEX),(o128)
-60: pcmpestrm Vdq,Wdq,Ib (66),(VEX),(o128)
-61: pcmpestri Vdq,Wdq,Ib (66),(VEX),(o128)
-62: pcmpistrm Vdq,Wdq,Ib (66),(VEX),(o128)
-63: pcmpistri Vdq,Wdq,Ib (66),(VEX),(o128)
-df: aeskeygenassist Vdq,Wdq,Ib (66),(VEX),(o128)
+00: vpermq Vqq,Wqq,Ib (66),(v)
+01: vpermpd Vqq,Wqq,Ib (66),(v)
+02: vpblendd Vx,Hx,Wx,Ib (66),(v)
+03:
+04: vpermilps Vx,Wx,Ib (66),(v)
+05: vpermilpd Vx,Wx,Ib (66),(v)
+06: vperm2f128 Vqq,Hqq,Wqq,Ib (66),(v)
+07:
+08: vroundps Vx,Wx,Ib (66)
+09: vroundpd Vx,Wx,Ib (66)
+0a: vroundss Vss,Wss,Ib (66),(v1)
+0b: vroundsd Vsd,Wsd,Ib (66),(v1)
+0c: vblendps Vx,Hx,Wx,Ib (66)
+0d: vblendpd Vx,Hx,Wx,Ib (66)
+0e: vpblendw Vx,Hx,Wx,Ib (66),(v1)
+0f: palignr Pq,Qq,Ib | vpalignr Vx,Hx,Wx,Ib (66),(v1)
+14: vpextrb Rd/Mb,Vdq,Ib (66),(v1)
+15: vpextrw Rd/Mw,Vdq,Ib (66),(v1)
+16: vpextrd/q Ey,Vdq,Ib (66),(v1)
+17: vextractps Ed,Vdq,Ib (66),(v1)
+18: vinsertf128 Vqq,Hqq,Wqq,Ib (66),(v)
+19: vextractf128 Wdq,Vqq,Ib (66),(v)
+1d: vcvtps2ph Wx,Vx,Ib (66),(v)
+20: vpinsrb Vdq,Hdq,Ry/Mb,Ib (66),(v1)
+21: vinsertps Vdq,Hdq,Udq/Md,Ib (66),(v1)
+22: vpinsrd/q Vdq,Hdq,Ey,Ib (66),(v1)
+38: vinserti128 Vqq,Hqq,Wqq,Ib (66),(v)
+39: vextracti128 Wdq,Vqq,Ib (66),(v)
+40: vdpps Vx,Hx,Wx,Ib (66)
+41: vdppd Vdq,Hdq,Wdq,Ib (66),(v1)
+42: vmpsadbw Vx,Hx,Wx,Ib (66),(v1)
+44: vpclmulqdq Vdq,Hdq,Wdq,Ib (66),(v1)
+46: vperm2i128 Vqq,Hqq,Wqq,Ib (66),(v)
+4a: vblendvps Vx,Hx,Wx,Lx (66),(v)
+4b: vblendvpd Vx,Hx,Wx,Lx (66),(v)
+4c: vpblendvb Vx,Hx,Wx,Lx (66),(v1)
+60: vpcmpestrm Vdq,Wdq,Ib (66),(v1)
+61: vpcmpestri Vdq,Wdq,Ib (66),(v1)
+62: vpcmpistrm Vdq,Wdq,Ib (66),(v1)
+63: vpcmpistri Vdq,Wdq,Ib (66),(v1)
+df: VAESKEYGEN Vdq,Wdq,Ib (66),(v1)
+f0: RORX Gy,Ey,Ib (F2),(v)
 EndTable
 
 GrpTable: Grp1
@@ -790,7 +843,7 @@
 2: CALLN Ev (f64)
 3: CALLF Ep
 4: JMPN Ev (f64)
-5: JMPF Ep
+5: JMPF Mp
 6: PUSH Ev (d64)
 7:
 EndTable
@@ -807,7 +860,7 @@
 GrpTable: Grp7
 0: SGDT Ms | VMCALL (001),(11B) | VMLAUNCH (010),(11B) | VMRESUME (011),(11B) | VMXOFF (100),(11B)
 1: SIDT Ms | MONITOR (000),(11B) | MWAIT (001)
-2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B)
+2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) | VMFUNC (100),(11B)
 3: LIDT Ms
 4: SMSW Mw/Rv
 5:
@@ -824,44 +877,45 @@
 
 GrpTable: Grp9
 1: CMPXCHG8B/16B Mq/Mdq
-6: VMPTRLD Mq | VMCLEAR Mq (66) | VMXON Mq (F3)
-7: VMPTRST Mq
+6: VMPTRLD Mq | VMCLEAR Mq (66) | VMXON Mq (F3) | RDRAND Rv (11B)
+7: VMPTRST Mq | VMPTRST Mq (F3)
 EndTable
 
 GrpTable: Grp10
 EndTable
 
 GrpTable: Grp11
+# Note: the operands are given by group opcode
 0: MOV
 EndTable
 
 GrpTable: Grp12
-2: psrlw Nq,Ib (11B) | psrlw Udq,Ib (66),(11B),(VEX),(o128)
-4: psraw Nq,Ib (11B) | psraw Udq,Ib (66),(11B),(VEX),(o128)
-6: psllw Nq,Ib (11B) | psllw Udq,Ib (66),(11B),(VEX),(o128)
+2: psrlw Nq,Ib (11B) | vpsrlw Hx,Ux,Ib (66),(11B),(v1)
+4: psraw Nq,Ib (11B) | vpsraw Hx,Ux,Ib (66),(11B),(v1)
+6: psllw Nq,Ib (11B) | vpsllw Hx,Ux,Ib (66),(11B),(v1)
 EndTable
 
 GrpTable: Grp13
-2: psrld Nq,Ib (11B) | psrld Udq,Ib (66),(11B),(VEX),(o128)
-4: psrad Nq,Ib (11B) | psrad Udq,Ib (66),(11B),(VEX),(o128)
-6: pslld Nq,Ib (11B) | pslld Udq,Ib (66),(11B),(VEX),(o128)
+2: psrld Nq,Ib (11B) | vpsrld Hx,Ux,Ib (66),(11B),(v1)
+4: psrad Nq,Ib (11B) | vpsrad Hx,Ux,Ib (66),(11B),(v1)
+6: pslld Nq,Ib (11B) | vpslld Hx,Ux,Ib (66),(11B),(v1)
 EndTable
 
 GrpTable: Grp14
-2: psrlq Nq,Ib (11B) | psrlq Udq,Ib (66),(11B),(VEX),(o128)
-3: psrldq Udq,Ib (66),(11B),(VEX),(o128)
-6: psllq Nq,Ib (11B) | psllq Udq,Ib (66),(11B),(VEX),(o128)
-7: pslldq Udq,Ib (66),(11B),(VEX),(o128)
+2: psrlq Nq,Ib (11B) | vpsrlq Hx,Ux,Ib (66),(11B),(v1)
+3: vpsrldq Hx,Ux,Ib (66),(11B),(v1)
+6: psllq Nq,Ib (11B) | vpsllq Hx,Ux,Ib (66),(11B),(v1)
+7: vpslldq Hx,Ux,Ib (66),(11B),(v1)
 EndTable
 
 GrpTable: Grp15
-0: fxsave
-1: fxstor
-2: ldmxcsr (VEX)
-3: stmxcsr (VEX)
+0: fxsave | RDFSBASE Ry (F3),(11B)
+1: fxstor | RDGSBASE Ry (F3),(11B)
+2: vldmxcsr Md (v1) | WRFSBASE Ry (F3),(11B)
+3: vstmxcsr Md (v1) | WRGSBASE Ry (F3),(11B)
 4: XSAVE
 5: XRSTOR | lfence (11B)
-6: mfence (11B)
+6: XSAVEOPT | mfence (11B)
 7: clflush | sfence (11B)
 EndTable
 
@@ -872,6 +926,12 @@
 3: prefetch T2
 EndTable
 
+GrpTable: Grp17
+1: BLSR By,Ey (v)
+2: BLSMSK By,Ey (v)
+3: BLSI By,Ey (v)
+EndTable
+
 # AMD's Prefetch Group
 GrpTable: GrpP
 0: PREFETCH
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index 3d11327..23d8e5f 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -27,6 +27,4 @@
 obj-$(CONFIG_ACPI_NUMA)		+= srat.o
 obj-$(CONFIG_NUMA_EMU)		+= numa_emulation.o
 
-obj-$(CONFIG_HAVE_MEMBLOCK)		+= memblock.o
-
 obj-$(CONFIG_MEMTEST)		+= memtest.o
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
index d0474ad..1fb85db 100644
--- a/arch/x86/mm/extable.c
+++ b/arch/x86/mm/extable.c
@@ -25,7 +25,7 @@
 	if (fixup) {
 		/* If fixup is less than 16, it means uaccess error */
 		if (fixup->fixup < 16) {
-			current_thread_info()->uaccess_err = -EFAULT;
+			current_thread_info()->uaccess_err = 1;
 			regs->ip += fixup->fixup;
 			return 1;
 		}
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 5db0490..9d74824 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -626,7 +626,7 @@
 
 static noinline void
 no_context(struct pt_regs *regs, unsigned long error_code,
-	   unsigned long address)
+	   unsigned long address, int signal, int si_code)
 {
 	struct task_struct *tsk = current;
 	unsigned long *stackend;
@@ -634,8 +634,17 @@
 	int sig;
 
 	/* Are we prepared to handle this kernel fault? */
-	if (fixup_exception(regs))
+	if (fixup_exception(regs)) {
+		if (current_thread_info()->sig_on_uaccess_error && signal) {
+			tsk->thread.trap_no = 14;
+			tsk->thread.error_code = error_code | PF_USER;
+			tsk->thread.cr2 = address;
+
+			/* XXX: hwpoison faults will set the wrong code. */
+			force_sig_info_fault(signal, si_code, address, tsk, 0);
+		}
 		return;
+	}
 
 	/*
 	 * 32-bit:
@@ -755,7 +764,7 @@
 	if (is_f00f_bug(regs, address))
 		return;
 
-	no_context(regs, error_code, address);
+	no_context(regs, error_code, address, SIGSEGV, si_code);
 }
 
 static noinline void
@@ -819,7 +828,7 @@
 
 	/* Kernel mode? Handle exceptions or die: */
 	if (!(error_code & PF_USER)) {
-		no_context(regs, error_code, address);
+		no_context(regs, error_code, address, SIGBUS, BUS_ADRERR);
 		return;
 	}
 
@@ -854,7 +863,7 @@
 		if (!(fault & VM_FAULT_RETRY))
 			up_read(&current->mm->mmap_sem);
 		if (!(error_code & PF_USER))
-			no_context(regs, error_code, address);
+			no_context(regs, error_code, address, 0, 0);
 		return 1;
 	}
 	if (!(fault & VM_FAULT_ERROR))
@@ -864,7 +873,8 @@
 		/* Kernel mode? Handle exceptions or die: */
 		if (!(error_code & PF_USER)) {
 			up_read(&current->mm->mmap_sem);
-			no_context(regs, error_code, address);
+			no_context(regs, error_code, address,
+				   SIGSEGV, SEGV_MAPERR);
 			return 1;
 		}
 
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 87488b9..a298914 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -67,7 +67,7 @@
 	good_end = max_pfn_mapped << PAGE_SHIFT;
 
 	base = memblock_find_in_range(start, good_end, tables, PAGE_SIZE);
-	if (base == MEMBLOCK_ERROR)
+	if (!base)
 		panic("Cannot find space for the kernel page tables");
 
 	pgt_buf_start = base >> PAGE_SHIFT;
@@ -80,7 +80,7 @@
 
 void __init native_pagetable_reserve(u64 start, u64 end)
 {
-	memblock_x86_reserve_range(start, end, "PGTABLE");
+	memblock_reserve(start, end - start);
 }
 
 struct map_range {
@@ -279,8 +279,8 @@
 	 * pgt_buf_end) and free the other ones (pgt_buf_end - pgt_buf_top)
 	 * so that they can be reused for other purposes.
 	 *
-	 * On native it just means calling memblock_x86_reserve_range, on Xen it
-	 * also means marking RW the pagetable pages that we allocated before
+	 * On native it just means calling memblock_reserve, on Xen it also
+	 * means marking RW the pagetable pages that we allocated before
 	 * but that haven't been used.
 	 *
 	 * In fact on xen we mark RO the whole range pgt_buf_start -
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 29f7c6d9..0c1da39 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -427,23 +427,17 @@
 void __init add_highpages_with_active_regions(int nid,
 			 unsigned long start_pfn, unsigned long end_pfn)
 {
-	struct range *range;
-	int nr_range;
-	int i;
+	phys_addr_t start, end;
+	u64 i;
 
-	nr_range = __get_free_all_memory_range(&range, nid, start_pfn, end_pfn);
-
-	for (i = 0; i < nr_range; i++) {
-		struct page *page;
-		int node_pfn;
-
-		for (node_pfn = range[i].start; node_pfn < range[i].end;
-		     node_pfn++) {
-			if (!pfn_valid(node_pfn))
-				continue;
-			page = pfn_to_page(node_pfn);
-			add_one_highpage_init(page);
-		}
+	for_each_free_mem_range(i, nid, &start, &end, NULL) {
+		unsigned long pfn = clamp_t(unsigned long, PFN_UP(start),
+					    start_pfn, end_pfn);
+		unsigned long e_pfn = clamp_t(unsigned long, PFN_DOWN(end),
+					      start_pfn, end_pfn);
+		for ( ; pfn < e_pfn; pfn++)
+			if (pfn_valid(pfn))
+				add_one_highpage_init(pfn_to_page(pfn));
 	}
 }
 #else
@@ -650,18 +644,18 @@
 	highstart_pfn = highend_pfn = max_pfn;
 	if (max_pfn > max_low_pfn)
 		highstart_pfn = max_low_pfn;
-	memblock_x86_register_active_regions(0, 0, highend_pfn);
-	sparse_memory_present_with_active_regions(0);
 	printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
 		pages_to_mb(highend_pfn - highstart_pfn));
 	num_physpages = highend_pfn;
 	high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
 #else
-	memblock_x86_register_active_regions(0, 0, max_low_pfn);
-	sparse_memory_present_with_active_regions(0);
 	num_physpages = max_low_pfn;
 	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
 #endif
+
+	memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0);
+	sparse_memory_present_with_active_regions(0);
+
 #ifdef CONFIG_FLATMEM
 	max_mapnr = num_physpages;
 #endif
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index bbaaa00..a8a56ce 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -608,7 +608,7 @@
 #ifndef CONFIG_NUMA
 void __init initmem_init(void)
 {
-	memblock_x86_register_active_regions(0, 0, max_pfn);
+	memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0);
 }
 #endif
 
diff --git a/arch/x86/mm/memblock.c b/arch/x86/mm/memblock.c
deleted file mode 100644
index 992da5e..0000000
--- a/arch/x86/mm/memblock.c
+++ /dev/null
@@ -1,348 +0,0 @@
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/bitops.h>
-#include <linux/memblock.h>
-#include <linux/bootmem.h>
-#include <linux/mm.h>
-#include <linux/range.h>
-
-/* Check for already reserved areas */
-bool __init memblock_x86_check_reserved_size(u64 *addrp, u64 *sizep, u64 align)
-{
-	struct memblock_region *r;
-	u64 addr = *addrp, last;
-	u64 size = *sizep;
-	bool changed = false;
-
-again:
-	last = addr + size;
-	for_each_memblock(reserved, r) {
-		if (last > r->base && addr < r->base) {
-			size = r->base - addr;
-			changed = true;
-			goto again;
-		}
-		if (last > (r->base + r->size) && addr < (r->base + r->size)) {
-			addr = round_up(r->base + r->size, align);
-			size = last - addr;
-			changed = true;
-			goto again;
-		}
-		if (last <= (r->base + r->size) && addr >= r->base) {
-			*sizep = 0;
-			return false;
-		}
-	}
-	if (changed) {
-		*addrp = addr;
-		*sizep = size;
-	}
-	return changed;
-}
-
-/*
- * Find next free range after start, and size is returned in *sizep
- */
-u64 __init memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align)
-{
-	struct memblock_region *r;
-
-	for_each_memblock(memory, r) {
-		u64 ei_start = r->base;
-		u64 ei_last = ei_start + r->size;
-		u64 addr;
-
-		addr = round_up(ei_start, align);
-		if (addr < start)
-			addr = round_up(start, align);
-		if (addr >= ei_last)
-			continue;
-		*sizep = ei_last - addr;
-		while (memblock_x86_check_reserved_size(&addr, sizep, align))
-			;
-
-		if (*sizep)
-			return addr;
-	}
-
-	return MEMBLOCK_ERROR;
-}
-
-static __init struct range *find_range_array(int count)
-{
-	u64 end, size, mem;
-	struct range *range;
-
-	size = sizeof(struct range) * count;
-	end = memblock.current_limit;
-
-	mem = memblock_find_in_range(0, end, size, sizeof(struct range));
-	if (mem == MEMBLOCK_ERROR)
-		panic("can not find more space for range array");
-
-	/*
-	 * This range is tempoaray, so don't reserve it, it will not be
-	 * overlapped because We will not alloccate new buffer before
-	 * We discard this one
-	 */
-	range = __va(mem);
-	memset(range, 0, size);
-
-	return range;
-}
-
-static void __init memblock_x86_subtract_reserved(struct range *range, int az)
-{
-	u64 final_start, final_end;
-	struct memblock_region *r;
-
-	/* Take out region array itself at first*/
-	memblock_free_reserved_regions();
-
-	memblock_dbg("Subtract (%ld early reservations)\n", memblock.reserved.cnt);
-
-	for_each_memblock(reserved, r) {
-		memblock_dbg("  [%010llx-%010llx]\n", (u64)r->base, (u64)r->base + r->size - 1);
-		final_start = PFN_DOWN(r->base);
-		final_end = PFN_UP(r->base + r->size);
-		if (final_start >= final_end)
-			continue;
-		subtract_range(range, az, final_start, final_end);
-	}
-
-	/* Put region array back ? */
-	memblock_reserve_reserved_regions();
-}
-
-struct count_data {
-	int nr;
-};
-
-static int __init count_work_fn(unsigned long start_pfn,
-				unsigned long end_pfn, void *datax)
-{
-	struct count_data *data = datax;
-
-	data->nr++;
-
-	return 0;
-}
-
-static int __init count_early_node_map(int nodeid)
-{
-	struct count_data data;
-
-	data.nr = 0;
-	work_with_active_regions(nodeid, count_work_fn, &data);
-
-	return data.nr;
-}
-
-int __init __get_free_all_memory_range(struct range **rangep, int nodeid,
-			 unsigned long start_pfn, unsigned long end_pfn)
-{
-	int count;
-	struct range *range;
-	int nr_range;
-
-	count = (memblock.reserved.cnt + count_early_node_map(nodeid)) * 2;
-
-	range = find_range_array(count);
-	nr_range = 0;
-
-	/*
-	 * Use early_node_map[] and memblock.reserved.region to get range array
-	 * at first
-	 */
-	nr_range = add_from_early_node_map(range, count, nr_range, nodeid);
-	subtract_range(range, count, 0, start_pfn);
-	subtract_range(range, count, end_pfn, -1ULL);
-
-	memblock_x86_subtract_reserved(range, count);
-	nr_range = clean_sort_range(range, count);
-
-	*rangep = range;
-	return nr_range;
-}
-
-int __init get_free_all_memory_range(struct range **rangep, int nodeid)
-{
-	unsigned long end_pfn = -1UL;
-
-#ifdef CONFIG_X86_32
-	end_pfn = max_low_pfn;
-#endif
-	return __get_free_all_memory_range(rangep, nodeid, 0, end_pfn);
-}
-
-static u64 __init __memblock_x86_memory_in_range(u64 addr, u64 limit, bool get_free)
-{
-	int i, count;
-	struct range *range;
-	int nr_range;
-	u64 final_start, final_end;
-	u64 free_size;
-	struct memblock_region *r;
-
-	count = (memblock.reserved.cnt + memblock.memory.cnt) * 2;
-
-	range = find_range_array(count);
-	nr_range = 0;
-
-	addr = PFN_UP(addr);
-	limit = PFN_DOWN(limit);
-
-	for_each_memblock(memory, r) {
-		final_start = PFN_UP(r->base);
-		final_end = PFN_DOWN(r->base + r->size);
-		if (final_start >= final_end)
-			continue;
-		if (final_start >= limit || final_end <= addr)
-			continue;
-
-		nr_range = add_range(range, count, nr_range, final_start, final_end);
-	}
-	subtract_range(range, count, 0, addr);
-	subtract_range(range, count, limit, -1ULL);
-
-	/* Subtract memblock.reserved.region in range ? */
-	if (!get_free)
-		goto sort_and_count_them;
-	for_each_memblock(reserved, r) {
-		final_start = PFN_DOWN(r->base);
-		final_end = PFN_UP(r->base + r->size);
-		if (final_start >= final_end)
-			continue;
-		if (final_start >= limit || final_end <= addr)
-			continue;
-
-		subtract_range(range, count, final_start, final_end);
-	}
-
-sort_and_count_them:
-	nr_range = clean_sort_range(range, count);
-
-	free_size = 0;
-	for (i = 0; i < nr_range; i++)
-		free_size += range[i].end - range[i].start;
-
-	return free_size << PAGE_SHIFT;
-}
-
-u64 __init memblock_x86_free_memory_in_range(u64 addr, u64 limit)
-{
-	return __memblock_x86_memory_in_range(addr, limit, true);
-}
-
-u64 __init memblock_x86_memory_in_range(u64 addr, u64 limit)
-{
-	return __memblock_x86_memory_in_range(addr, limit, false);
-}
-
-void __init memblock_x86_reserve_range(u64 start, u64 end, char *name)
-{
-	if (start == end)
-		return;
-
-	if (WARN_ONCE(start > end, "memblock_x86_reserve_range: wrong range [%#llx, %#llx)\n", start, end))
-		return;
-
-	memblock_dbg("    memblock_x86_reserve_range: [%#010llx-%#010llx] %16s\n", start, end - 1, name);
-
-	memblock_reserve(start, end - start);
-}
-
-void __init memblock_x86_free_range(u64 start, u64 end)
-{
-	if (start == end)
-		return;
-
-	if (WARN_ONCE(start > end, "memblock_x86_free_range: wrong range [%#llx, %#llx)\n", start, end))
-		return;
-
-	memblock_dbg("       memblock_x86_free_range: [%#010llx-%#010llx]\n", start, end - 1);
-
-	memblock_free(start, end - start);
-}
-
-/*
- * Need to call this function after memblock_x86_register_active_regions,
- * so early_node_map[] is filled already.
- */
-u64 __init memblock_x86_find_in_range_node(int nid, u64 start, u64 end, u64 size, u64 align)
-{
-	u64 addr;
-	addr = find_memory_core_early(nid, size, align, start, end);
-	if (addr != MEMBLOCK_ERROR)
-		return addr;
-
-	/* Fallback, should already have start end within node range */
-	return memblock_find_in_range(start, end, size, align);
-}
-
-/*
- * Finds an active region in the address range from start_pfn to last_pfn and
- * returns its range in ei_startpfn and ei_endpfn for the memblock entry.
- */
-static int __init memblock_x86_find_active_region(const struct memblock_region *ei,
-				  unsigned long start_pfn,
-				  unsigned long last_pfn,
-				  unsigned long *ei_startpfn,
-				  unsigned long *ei_endpfn)
-{
-	u64 align = PAGE_SIZE;
-
-	*ei_startpfn = round_up(ei->base, align) >> PAGE_SHIFT;
-	*ei_endpfn = round_down(ei->base + ei->size, align) >> PAGE_SHIFT;
-
-	/* Skip map entries smaller than a page */
-	if (*ei_startpfn >= *ei_endpfn)
-		return 0;
-
-	/* Skip if map is outside the node */
-	if (*ei_endpfn <= start_pfn || *ei_startpfn >= last_pfn)
-		return 0;
-
-	/* Check for overlaps */
-	if (*ei_startpfn < start_pfn)
-		*ei_startpfn = start_pfn;
-	if (*ei_endpfn > last_pfn)
-		*ei_endpfn = last_pfn;
-
-	return 1;
-}
-
-/* Walk the memblock.memory map and register active regions within a node */
-void __init memblock_x86_register_active_regions(int nid, unsigned long start_pfn,
-					 unsigned long last_pfn)
-{
-	unsigned long ei_startpfn;
-	unsigned long ei_endpfn;
-	struct memblock_region *r;
-
-	for_each_memblock(memory, r)
-		if (memblock_x86_find_active_region(r, start_pfn, last_pfn,
-					   &ei_startpfn, &ei_endpfn))
-			add_active_range(nid, ei_startpfn, ei_endpfn);
-}
-
-/*
- * Find the hole size (in bytes) in the memory range.
- * @start: starting address of the memory range to scan
- * @end: ending address of the memory range to scan
- */
-u64 __init memblock_x86_hole_size(u64 start, u64 end)
-{
-	unsigned long start_pfn = start >> PAGE_SHIFT;
-	unsigned long last_pfn = end >> PAGE_SHIFT;
-	unsigned long ei_startpfn, ei_endpfn, ram = 0;
-	struct memblock_region *r;
-
-	for_each_memblock(memory, r)
-		if (memblock_x86_find_active_region(r, start_pfn, last_pfn,
-					   &ei_startpfn, &ei_endpfn))
-			ram += ei_endpfn - ei_startpfn;
-
-	return end - start - ((u64)ram << PAGE_SHIFT);
-}
diff --git a/arch/x86/mm/memtest.c b/arch/x86/mm/memtest.c
index 92faf3a..c80b9fb 100644
--- a/arch/x86/mm/memtest.c
+++ b/arch/x86/mm/memtest.c
@@ -34,7 +34,7 @@
 	       (unsigned long long) pattern,
 	       (unsigned long long) start_bad,
 	       (unsigned long long) end_bad);
-	memblock_x86_reserve_range(start_bad, end_bad, "BAD RAM");
+	memblock_reserve(start_bad, end_bad - start_bad);
 }
 
 static void __init memtest(u64 pattern, u64 start_phys, u64 size)
@@ -70,24 +70,19 @@
 
 static void __init do_one_pass(u64 pattern, u64 start, u64 end)
 {
-	u64 size = 0;
+	u64 i;
+	phys_addr_t this_start, this_end;
 
-	while (start < end) {
-		start = memblock_x86_find_in_range_size(start, &size, 1);
-
-		/* done ? */
-		if (start >= end)
-			break;
-		if (start + size > end)
-			size = end - start;
-
-		printk(KERN_INFO "  %010llx - %010llx pattern %016llx\n",
-		       (unsigned long long) start,
-		       (unsigned long long) start + size,
-		       (unsigned long long) cpu_to_be64(pattern));
-		memtest(pattern, start, size);
-
-		start += size;
+	for_each_free_mem_range(i, MAX_NUMNODES, &this_start, &this_end, NULL) {
+		this_start = clamp_t(phys_addr_t, this_start, start, end);
+		this_end = clamp_t(phys_addr_t, this_end, start, end);
+		if (this_start < this_end) {
+			printk(KERN_INFO "  %010llx - %010llx pattern %016llx\n",
+			       (unsigned long long)this_start,
+			       (unsigned long long)this_end,
+			       (unsigned long long)cpu_to_be64(pattern));
+			memtest(pattern, this_start, this_end - this_start);
+		}
 	}
 }
 
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index fbeaaf4..496f494 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -192,8 +192,6 @@
 /* Initialize NODE_DATA for a node on the local memory */
 static void __init setup_node_data(int nid, u64 start, u64 end)
 {
-	const u64 nd_low = PFN_PHYS(MAX_DMA_PFN);
-	const u64 nd_high = PFN_PHYS(max_pfn_mapped);
 	const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
 	bool remapped = false;
 	u64 nd_pa;
@@ -224,17 +222,12 @@
 		nd_pa = __pa(nd);
 		remapped = true;
 	} else {
-		nd_pa = memblock_x86_find_in_range_node(nid, nd_low, nd_high,
-						nd_size, SMP_CACHE_BYTES);
-		if (nd_pa == MEMBLOCK_ERROR)
-			nd_pa = memblock_find_in_range(nd_low, nd_high,
-						nd_size, SMP_CACHE_BYTES);
-		if (nd_pa == MEMBLOCK_ERROR) {
+		nd_pa = memblock_alloc_nid(nd_size, SMP_CACHE_BYTES, nid);
+		if (!nd_pa) {
 			pr_err("Cannot find %zu bytes in node %d\n",
 			       nd_size, nid);
 			return;
 		}
-		memblock_x86_reserve_range(nd_pa, nd_pa + nd_size, "NODE_DATA");
 		nd = __va(nd_pa);
 	}
 
@@ -371,8 +364,7 @@
 
 	/* numa_distance could be 1LU marking allocation failure, test cnt */
 	if (numa_distance_cnt)
-		memblock_x86_free_range(__pa(numa_distance),
-					__pa(numa_distance) + size);
+		memblock_free(__pa(numa_distance), size);
 	numa_distance_cnt = 0;
 	numa_distance = NULL;	/* enable table creation */
 }
@@ -395,13 +387,13 @@
 
 	phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),
 				      size, PAGE_SIZE);
-	if (phys == MEMBLOCK_ERROR) {
+	if (!phys) {
 		pr_warning("NUMA: Warning: can't allocate distance table!\n");
 		/* don't retry until explicitly reset */
 		numa_distance = (void *)1LU;
 		return -ENOMEM;
 	}
-	memblock_x86_reserve_range(phys, phys + size, "NUMA DIST");
+	memblock_reserve(phys, size);
 
 	numa_distance = __va(phys);
 	numa_distance_cnt = cnt;
@@ -482,8 +474,8 @@
 			numaram = 0;
 	}
 
-	e820ram = max_pfn - (memblock_x86_hole_size(0,
-					PFN_PHYS(max_pfn)) >> PAGE_SHIFT);
+	e820ram = max_pfn - absent_pages_in_range(0, max_pfn);
+
 	/* We seem to lose 3 pages somewhere. Allow 1M of slack. */
 	if ((s64)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) {
 		printk(KERN_ERR "NUMA: nodes only cover %LuMB of your %LuMB e820 RAM. Not used.\n",
@@ -505,13 +497,10 @@
 	if (WARN_ON(nodes_empty(node_possible_map)))
 		return -EINVAL;
 
-	for (i = 0; i < mi->nr_blks; i++)
-		memblock_x86_register_active_regions(mi->blk[i].nid,
-					mi->blk[i].start >> PAGE_SHIFT,
-					mi->blk[i].end >> PAGE_SHIFT);
-
-	/* for out of order entries */
-	sort_node_map();
+	for (i = 0; i < mi->nr_blks; i++) {
+		struct numa_memblk *mb = &mi->blk[i];
+		memblock_set_node(mb->start, mb->end - mb->start, mb->nid);
+	}
 
 	/*
 	 * If sections array is gonna be used for pfn -> nid mapping, check
@@ -545,6 +534,8 @@
 			setup_node_data(nid, start, end);
 	}
 
+	/* Dump memblock with node info and return. */
+	memblock_dump_all();
 	return 0;
 }
 
@@ -582,7 +573,7 @@
 	nodes_clear(node_possible_map);
 	nodes_clear(node_online_map);
 	memset(&numa_meminfo, 0, sizeof(numa_meminfo));
-	remove_all_active_ranges();
+	WARN_ON(memblock_set_node(0, ULLONG_MAX, MAX_NUMNODES));
 	numa_reset_distance();
 
 	ret = init_func();
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
index 3adebe7..534255a 100644
--- a/arch/x86/mm/numa_32.c
+++ b/arch/x86/mm/numa_32.c
@@ -199,23 +199,23 @@
 
 	/* allocate node memory and the lowmem remap area */
 	node_pa = memblock_find_in_range(start, end, size, LARGE_PAGE_BYTES);
-	if (node_pa == MEMBLOCK_ERROR) {
+	if (!node_pa) {
 		pr_warning("remap_alloc: failed to allocate %lu bytes for node %d\n",
 			   size, nid);
 		return;
 	}
-	memblock_x86_reserve_range(node_pa, node_pa + size, "KVA RAM");
+	memblock_reserve(node_pa, size);
 
 	remap_pa = memblock_find_in_range(min_low_pfn << PAGE_SHIFT,
 					  max_low_pfn << PAGE_SHIFT,
 					  size, LARGE_PAGE_BYTES);
-	if (remap_pa == MEMBLOCK_ERROR) {
+	if (!remap_pa) {
 		pr_warning("remap_alloc: failed to allocate %lu bytes remap area for node %d\n",
 			   size, nid);
-		memblock_x86_free_range(node_pa, node_pa + size);
+		memblock_free(node_pa, size);
 		return;
 	}
-	memblock_x86_reserve_range(remap_pa, remap_pa + size, "KVA PG");
+	memblock_reserve(remap_pa, size);
 	remap_va = phys_to_virt(remap_pa);
 
 	/* perform actual remap */
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index dd27f40..92e2711 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -19,7 +19,7 @@
 	for_each_online_node(i)
 		pages += free_all_bootmem_node(NODE_DATA(i));
 
-	pages += free_all_memory_core_early(MAX_NUMNODES);
+	pages += free_low_memory_core_early(MAX_NUMNODES);
 
 	return pages;
 }
diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c
index d0ed086..46db568 100644
--- a/arch/x86/mm/numa_emulation.c
+++ b/arch/x86/mm/numa_emulation.c
@@ -28,6 +28,16 @@
 	return -ENOENT;
 }
 
+static u64 mem_hole_size(u64 start, u64 end)
+{
+	unsigned long start_pfn = PFN_UP(start);
+	unsigned long end_pfn = PFN_DOWN(end);
+
+	if (start_pfn < end_pfn)
+		return PFN_PHYS(absent_pages_in_range(start_pfn, end_pfn));
+	return 0;
+}
+
 /*
  * Sets up nid to range from @start to @end.  The return value is -errno if
  * something went wrong, 0 otherwise.
@@ -89,7 +99,7 @@
 	 * Calculate target node size.  x86_32 freaks on __udivdi3() so do
 	 * the division in ulong number of pages and convert back.
 	 */
-	size = max_addr - addr - memblock_x86_hole_size(addr, max_addr);
+	size = max_addr - addr - mem_hole_size(addr, max_addr);
 	size = PFN_PHYS((unsigned long)(size >> PAGE_SHIFT) / nr_nodes);
 
 	/*
@@ -135,8 +145,7 @@
 			 * Continue to add memory to this fake node if its
 			 * non-reserved memory is less than the per-node size.
 			 */
-			while (end - start -
-			       memblock_x86_hole_size(start, end) < size) {
+			while (end - start - mem_hole_size(start, end) < size) {
 				end += FAKE_NODE_MIN_SIZE;
 				if (end > limit) {
 					end = limit;
@@ -150,7 +159,7 @@
 			 * this one must extend to the boundary.
 			 */
 			if (end < dma32_end && dma32_end - end -
-			    memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
+			    mem_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
 				end = dma32_end;
 
 			/*
@@ -158,8 +167,7 @@
 			 * next node, this one must extend to the end of the
 			 * physical node.
 			 */
-			if (limit - end -
-			    memblock_x86_hole_size(end, limit) < size)
+			if (limit - end - mem_hole_size(end, limit) < size)
 				end = limit;
 
 			ret = emu_setup_memblk(ei, pi, nid++ % nr_nodes,
@@ -180,7 +188,7 @@
 {
 	u64 end = start + size;
 
-	while (end - start - memblock_x86_hole_size(start, end) < size) {
+	while (end - start - mem_hole_size(start, end) < size) {
 		end += FAKE_NODE_MIN_SIZE;
 		if (end > max_addr) {
 			end = max_addr;
@@ -211,8 +219,7 @@
 	 * creates a uniform distribution of node sizes across the entire
 	 * machine (but not necessarily over physical nodes).
 	 */
-	min_size = (max_addr - addr - memblock_x86_hole_size(addr, max_addr)) /
-						MAX_NUMNODES;
+	min_size = (max_addr - addr - mem_hole_size(addr, max_addr)) / MAX_NUMNODES;
 	min_size = max(min_size, FAKE_NODE_MIN_SIZE);
 	if ((min_size & FAKE_NODE_MIN_HASH_MASK) < min_size)
 		min_size = (min_size + FAKE_NODE_MIN_SIZE) &
@@ -252,7 +259,7 @@
 			 * this one must extend to the boundary.
 			 */
 			if (end < dma32_end && dma32_end - end -
-			    memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
+			    mem_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
 				end = dma32_end;
 
 			/*
@@ -260,8 +267,7 @@
 			 * next node, this one must extend to the end of the
 			 * physical node.
 			 */
-			if (limit - end -
-			    memblock_x86_hole_size(end, limit) < size)
+			if (limit - end - mem_hole_size(end, limit) < size)
 				end = limit;
 
 			ret = emu_setup_memblk(ei, pi, nid++ % MAX_NUMNODES,
@@ -351,11 +357,11 @@
 
 		phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),
 					      phys_size, PAGE_SIZE);
-		if (phys == MEMBLOCK_ERROR) {
+		if (!phys) {
 			pr_warning("NUMA: Warning: can't allocate copy of distance table, disabling emulation\n");
 			goto no_emu;
 		}
-		memblock_x86_reserve_range(phys, phys + phys_size, "TMP NUMA DIST");
+		memblock_reserve(phys, phys_size);
 		phys_dist = __va(phys);
 
 		for (i = 0; i < numa_dist_cnt; i++)
@@ -424,7 +430,7 @@
 
 	/* free the copied physical distance table */
 	if (phys_dist)
-		memblock_x86_free_range(__pa(phys_dist), __pa(phys_dist) + phys_size);
+		memblock_free(__pa(phys_dist), phys_size);
 	return;
 
 no_emu:
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index f9e5267..eda2acb 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -998,7 +998,7 @@
 }
 EXPORT_SYMBOL(set_memory_uc);
 
-int _set_memory_array(unsigned long *addr, int addrinarray,
+static int _set_memory_array(unsigned long *addr, int addrinarray,
 		unsigned long new_type)
 {
 	int i, j;
diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c
index 81dbfde..fd61b3f 100644
--- a/arch/x86/mm/srat.c
+++ b/arch/x86/mm/srat.c
@@ -69,6 +69,12 @@
 	if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0)
 		return;
 	pxm = pa->proximity_domain;
+	apic_id = pa->apic_id;
+	if (!cpu_has_x2apic && (apic_id >= 0xff)) {
+		printk(KERN_INFO "SRAT: PXM %u -> X2APIC 0x%04x ignored\n",
+			 pxm, apic_id);
+		return;
+	}
 	node = setup_node(pxm);
 	if (node < 0) {
 		printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm);
@@ -76,7 +82,6 @@
 		return;
 	}
 
-	apic_id = pa->apic_id;
 	if (apic_id >= MAX_LOCAL_APIC) {
 		printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%04x -> Node %u skipped apicid that is too big\n", pxm, apic_id, node);
 		return;
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index bfab3fa..7b65f75 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -568,8 +568,8 @@
 					break;
 				}
 				if (filter[i].jt != 0) {
-					if (filter[i].jf)
-						t_offset += is_near(f_offset) ? 2 : 6;
+					if (filter[i].jf && f_offset)
+						t_offset += is_near(f_offset) ? 2 : 5;
 					EMIT_COND_JMP(t_op, t_offset);
 					if (filter[i].jf)
 						EMIT_JMP(f_offset);
diff --git a/arch/x86/oprofile/Makefile b/arch/x86/oprofile/Makefile
index 446902b..1599f56 100644
--- a/arch/x86/oprofile/Makefile
+++ b/arch/x86/oprofile/Makefile
@@ -4,9 +4,8 @@
 		oprof.o cpu_buffer.o buffer_sync.o \
 		event_buffer.o oprofile_files.o \
 		oprofilefs.o oprofile_stats.o  \
-		timer_int.o )
+		timer_int.o nmi_timer_int.o )
 
 oprofile-y				:= $(DRIVER_OBJS) init.o backtrace.o
 oprofile-$(CONFIG_X86_LOCAL_APIC) 	+= nmi_int.o op_model_amd.o \
 					   op_model_ppro.o op_model_p4.o
-oprofile-$(CONFIG_X86_IO_APIC)		+= nmi_timer_int.o
diff --git a/arch/x86/oprofile/init.c b/arch/x86/oprofile/init.c
index f148cf6..9e138d0 100644
--- a/arch/x86/oprofile/init.c
+++ b/arch/x86/oprofile/init.c
@@ -16,37 +16,23 @@
  * with the NMI mode driver.
  */
 
+#ifdef CONFIG_X86_LOCAL_APIC
 extern int op_nmi_init(struct oprofile_operations *ops);
-extern int op_nmi_timer_init(struct oprofile_operations *ops);
 extern void op_nmi_exit(void);
-extern void x86_backtrace(struct pt_regs * const regs, unsigned int depth);
+#else
+static int op_nmi_init(struct oprofile_operations *ops) { return -ENODEV; }
+static void op_nmi_exit(void) { }
+#endif
 
-static int nmi_timer;
+extern void x86_backtrace(struct pt_regs * const regs, unsigned int depth);
 
 int __init oprofile_arch_init(struct oprofile_operations *ops)
 {
-	int ret;
-
-	ret = -ENODEV;
-
-#ifdef CONFIG_X86_LOCAL_APIC
-	ret = op_nmi_init(ops);
-#endif
-	nmi_timer = (ret != 0);
-#ifdef CONFIG_X86_IO_APIC
-	if (nmi_timer)
-		ret = op_nmi_timer_init(ops);
-#endif
 	ops->backtrace = x86_backtrace;
-
-	return ret;
+	return op_nmi_init(ops);
 }
 
-
 void oprofile_arch_exit(void)
 {
-#ifdef CONFIG_X86_LOCAL_APIC
-	if (!nmi_timer)
-		op_nmi_exit();
-#endif
+	op_nmi_exit();
 }
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index 75f9528..26b8a85 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -595,24 +595,36 @@
 	return 0;
 }
 
-static int force_arch_perfmon;
-static int force_cpu_type(const char *str, struct kernel_param *kp)
+enum __force_cpu_type {
+	reserved = 0,		/* do not force */
+	timer,
+	arch_perfmon,
+};
+
+static int force_cpu_type;
+
+static int set_cpu_type(const char *str, struct kernel_param *kp)
 {
-	if (!strcmp(str, "arch_perfmon")) {
-		force_arch_perfmon = 1;
+	if (!strcmp(str, "timer")) {
+		force_cpu_type = timer;
+		printk(KERN_INFO "oprofile: forcing NMI timer mode\n");
+	} else if (!strcmp(str, "arch_perfmon")) {
+		force_cpu_type = arch_perfmon;
 		printk(KERN_INFO "oprofile: forcing architectural perfmon\n");
+	} else {
+		force_cpu_type = 0;
 	}
 
 	return 0;
 }
-module_param_call(cpu_type, force_cpu_type, NULL, NULL, 0);
+module_param_call(cpu_type, set_cpu_type, NULL, NULL, 0);
 
 static int __init ppro_init(char **cpu_type)
 {
 	__u8 cpu_model = boot_cpu_data.x86_model;
 	struct op_x86_model_spec *spec = &op_ppro_spec;	/* default */
 
-	if (force_arch_perfmon && cpu_has_arch_perfmon)
+	if (force_cpu_type == arch_perfmon && cpu_has_arch_perfmon)
 		return 0;
 
 	/*
@@ -679,6 +691,9 @@
 	if (!cpu_has_apic)
 		return -ENODEV;
 
+	if (force_cpu_type == timer)
+		return -ENODEV;
+
 	switch (vendor) {
 	case X86_VENDOR_AMD:
 		/* Needs to be at least an Athlon (or hammer in 32bit mode) */
diff --git a/arch/x86/oprofile/nmi_timer_int.c b/arch/x86/oprofile/nmi_timer_int.c
deleted file mode 100644
index 7f8052c..0000000
--- a/arch/x86/oprofile/nmi_timer_int.c
+++ /dev/null
@@ -1,50 +0,0 @@
-/**
- * @file nmi_timer_int.c
- *
- * @remark Copyright 2003 OProfile authors
- * @remark Read the file COPYING
- *
- * @author Zwane Mwaikambo <zwane@linuxpower.ca>
- */
-
-#include <linux/init.h>
-#include <linux/smp.h>
-#include <linux/errno.h>
-#include <linux/oprofile.h>
-#include <linux/rcupdate.h>
-#include <linux/kdebug.h>
-
-#include <asm/nmi.h>
-#include <asm/apic.h>
-#include <asm/ptrace.h>
-
-static int profile_timer_exceptions_notify(unsigned int val, struct pt_regs *regs)
-{
-	oprofile_add_sample(regs, 0);
-	return NMI_HANDLED;
-}
-
-static int timer_start(void)
-{
-	if (register_nmi_handler(NMI_LOCAL, profile_timer_exceptions_notify,
-					0, "oprofile-timer"))
-		return 1;
-	return 0;
-}
-
-
-static void timer_stop(void)
-{
-	unregister_nmi_handler(NMI_LOCAL, "oprofile-timer");
-	synchronize_sched();  /* Allow already-started NMIs to complete. */
-}
-
-
-int __init op_nmi_timer_init(struct oprofile_operations *ops)
-{
-	ops->start = timer_start;
-	ops->stop = timer_stop;
-	ops->cpu_type = "timer";
-	printk(KERN_INFO "oprofile: using NMI timer interrupt.\n");
-	return 0;
-}
diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
index db0e9a5..da8fe05 100644
--- a/arch/x86/pci/pcbios.c
+++ b/arch/x86/pci/pcbios.c
@@ -44,7 +44,7 @@
 	pcibios_enabled = 1;
 	set_memory_x(PAGE_OFFSET + BIOS_BEGIN, (BIOS_END - BIOS_BEGIN) >> PAGE_SHIFT);
 	if (__supported_pte_mask & _PAGE_NX)
-		printk(KERN_INFO "PCI : PCI BIOS aera is rw and x. Use pci=nobios if you want it NX.\n");
+		printk(KERN_INFO "PCI : PCI BIOS area is rw and x. Use pci=nobios if you want it NX.\n");
 }
 
 /*
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index c9718a1..4cf9bd0 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -238,7 +238,8 @@
 
 	spin_lock_irqsave(&rtc_lock, flags);
 	efi_call_phys_prelog();
-	status = efi_call_phys2(efi_phys.get_time, tm, tc);
+	status = efi_call_phys2(efi_phys.get_time, virt_to_phys(tm),
+				virt_to_phys(tc));
 	efi_call_phys_epilog();
 	spin_unlock_irqrestore(&rtc_lock, flags);
 	return status;
@@ -323,13 +324,10 @@
 		case EFI_UNUSABLE_MEMORY:
 			e820_type = E820_UNUSABLE;
 			break;
-		case EFI_RUNTIME_SERVICES_DATA:
-			e820_type = E820_RESERVED_EFI;
-			break;
 		default:
 			/*
 			 * EFI_RESERVED_TYPE EFI_RUNTIME_SERVICES_CODE
-			 * EFI_MEMORY_MAPPED_IO
+			 * EFI_RUNTIME_SERVICES_DATA EFI_MEMORY_MAPPED_IO
 			 * EFI_MEMORY_MAPPED_IO_PORT_SPACE EFI_PAL_CODE
 			 */
 			e820_type = E820_RESERVED;
@@ -355,8 +353,7 @@
 		boot_params.efi_info.efi_memdesc_size;
 	memmap.desc_version = boot_params.efi_info.efi_memdesc_version;
 	memmap.desc_size = boot_params.efi_info.efi_memdesc_size;
-	memblock_x86_reserve_range(pmap, pmap + memmap.nr_map * memmap.desc_size,
-		      "EFI memmap");
+	memblock_reserve(pmap, memmap.nr_map * memmap.desc_size);
 }
 
 #if EFI_DEBUG
@@ -400,16 +397,14 @@
 		if ((start+size >= virt_to_phys(_text)
 				&& start <= virt_to_phys(_end)) ||
 			!e820_all_mapped(start, start+size, E820_RAM) ||
-			memblock_x86_check_reserved_size(&start, &size,
-							1<<EFI_PAGE_SHIFT)) {
+			memblock_is_region_reserved(start, size)) {
 			/* Could not reserve, skip it */
 			md->num_pages = 0;
 			memblock_dbg(PFX "Could not reserve boot range "
 					"[0x%010llx-0x%010llx]\n",
 						start, start+size-1);
 		} else
-			memblock_x86_reserve_range(start, start+size,
-							"EFI Boot");
+			memblock_reserve(start, size);
 	}
 }
 
@@ -674,21 +669,10 @@
 		end_pfn = PFN_UP(end);
 		if (end_pfn <= max_low_pfn_mapped
 		    || (end_pfn > (1UL << (32 - PAGE_SHIFT))
-			&& end_pfn <= max_pfn_mapped)) {
+			&& end_pfn <= max_pfn_mapped))
 			va = __va(md->phys_addr);
-
-			if (!(md->attribute & EFI_MEMORY_WB)) {
-				addr = (u64) (unsigned long)va;
-				npages = md->num_pages;
-				memrange_efi_to_native(&addr, &npages);
-				set_memory_uc(addr, npages);
-			}
-		} else {
-			if (!(md->attribute & EFI_MEMORY_WB))
-				va = ioremap_nocache(md->phys_addr, size);
-			else
-				va = ioremap_cache(md->phys_addr, size);
-		}
+		else
+			va = efi_ioremap(md->phys_addr, size, md->type);
 
 		md->virt_addr = (u64) (unsigned long) va;
 
@@ -698,6 +682,13 @@
 			continue;
 		}
 
+		if (!(md->attribute & EFI_MEMORY_WB)) {
+			addr = md->virt_addr;
+			npages = md->num_pages;
+			memrange_efi_to_native(&addr, &npages);
+			set_memory_uc(addr, npages);
+		}
+
 		systab = (u64) (unsigned long) efi_phys.systab;
 		if (md->phys_addr <= systab && systab < end) {
 			systab += md->virt_addr - md->phys_addr;
diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
index e36bf71..40e4469 100644
--- a/arch/x86/platform/efi/efi_32.c
+++ b/arch/x86/platform/efi/efi_32.c
@@ -39,43 +39,14 @@
  */
 
 static unsigned long efi_rt_eflags;
-static pgd_t efi_bak_pg_dir_pointer[2];
 
 void efi_call_phys_prelog(void)
 {
-	unsigned long cr4;
-	unsigned long temp;
 	struct desc_ptr gdt_descr;
 
 	local_irq_save(efi_rt_eflags);
 
-	/*
-	 * If I don't have PAE, I should just duplicate two entries in page
-	 * directory. If I have PAE, I just need to duplicate one entry in
-	 * page directory.
-	 */
-	cr4 = read_cr4_safe();
-
-	if (cr4 & X86_CR4_PAE) {
-		efi_bak_pg_dir_pointer[0].pgd =
-		    swapper_pg_dir[pgd_index(0)].pgd;
-		swapper_pg_dir[0].pgd =
-		    swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
-	} else {
-		efi_bak_pg_dir_pointer[0].pgd =
-		    swapper_pg_dir[pgd_index(0)].pgd;
-		efi_bak_pg_dir_pointer[1].pgd =
-		    swapper_pg_dir[pgd_index(0x400000)].pgd;
-		swapper_pg_dir[pgd_index(0)].pgd =
-		    swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
-		temp = PAGE_OFFSET + 0x400000;
-		swapper_pg_dir[pgd_index(0x400000)].pgd =
-		    swapper_pg_dir[pgd_index(temp)].pgd;
-	}
-
-	/*
-	 * After the lock is released, the original page table is restored.
-	 */
+	load_cr3(initial_page_table);
 	__flush_tlb_all();
 
 	gdt_descr.address = __pa(get_cpu_gdt_table(0));
@@ -85,28 +56,13 @@
 
 void efi_call_phys_epilog(void)
 {
-	unsigned long cr4;
 	struct desc_ptr gdt_descr;
 
 	gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
 	gdt_descr.size = GDT_SIZE - 1;
 	load_gdt(&gdt_descr);
 
-	cr4 = read_cr4_safe();
-
-	if (cr4 & X86_CR4_PAE) {
-		swapper_pg_dir[pgd_index(0)].pgd =
-		    efi_bak_pg_dir_pointer[0].pgd;
-	} else {
-		swapper_pg_dir[pgd_index(0)].pgd =
-		    efi_bak_pg_dir_pointer[0].pgd;
-		swapper_pg_dir[pgd_index(0x400000)].pgd =
-		    efi_bak_pg_dir_pointer[1].pgd;
-	}
-
-	/*
-	 * After the lock is released, the original page table is restored.
-	 */
+	load_cr3(swapper_pg_dir);
 	__flush_tlb_all();
 
 	local_irq_restore(efi_rt_eflags);
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index 312250c..ac3aa54 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -80,3 +80,20 @@
 	local_irq_restore(efi_flags);
 	early_code_mapping_set_exec(0);
 }
+
+void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size,
+				 u32 type)
+{
+	unsigned long last_map_pfn;
+
+	if (type == EFI_MEMORY_MAPPED_IO)
+		return ioremap(phys_addr, size);
+
+	last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size);
+	if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) {
+		unsigned long top = last_map_pfn << PAGE_SHIFT;
+		efi_ioremap(top, size - (top - phys_addr), type);
+	}
+
+	return (void __iomem *)__va(phys_addr);
+}
diff --git a/arch/x86/platform/mrst/early_printk_mrst.c b/arch/x86/platform/mrst/early_printk_mrst.c
index 25bfdbb..3c6e328 100644
--- a/arch/x86/platform/mrst/early_printk_mrst.c
+++ b/arch/x86/platform/mrst/early_printk_mrst.c
@@ -245,16 +245,24 @@
  * Following is the early console based on Medfield HSU (High
  * Speed UART) device.
  */
-#define HSU_PORT2_PADDR		0xffa28180
+#define HSU_PORT_BASE		0xffa28080
 
 static void __iomem *phsu;
 
-void hsu_early_console_init(void)
+void hsu_early_console_init(const char *s)
 {
+	unsigned long paddr, port = 0;
 	u8 lcr;
 
-	phsu = (void *)set_fixmap_offset_nocache(FIX_EARLYCON_MEM_BASE,
-							HSU_PORT2_PADDR);
+	/*
+	 * Select the early HSU console port if specified by user in the
+	 * kernel command line.
+	 */
+	if (*s && !kstrtoul(s, 10, &port))
+		port = clamp_val(port, 0, 2);
+
+	paddr = HSU_PORT_BASE + port * 0x80;
+	phsu = (void *)set_fixmap_offset_nocache(FIX_EARLYCON_MEM_BASE, paddr);
 
 	/* Disable FIFO */
 	writeb(0x0, phsu + UART_FCR);
diff --git a/arch/x86/platform/uv/uv_sysfs.c b/arch/x86/platform/uv/uv_sysfs.c
index 309c70f..5d4ba30 100644
--- a/arch/x86/platform/uv/uv_sysfs.c
+++ b/arch/x86/platform/uv/uv_sysfs.c
@@ -19,7 +19,7 @@
  *  Copyright (c) Russ Anderson
  */
 
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <asm/uv/bios.h>
 #include <asm/uv/uv.h>
 
diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile
index f820826..d511aa9 100644
--- a/arch/x86/tools/Makefile
+++ b/arch/x86/tools/Makefile
@@ -18,14 +18,21 @@
 quiet_cmd_posttest = TEST    $@
       cmd_posttest = ($(OBJDUMP) -v | $(AWK) -f $(chkobjdump)) || $(OBJDUMP) -d -j .text $(objtree)/vmlinux | $(AWK) -f $(distill_awk) | $(obj)/test_get_len $(posttest_64bit) $(posttest_verbose)
 
-posttest: $(obj)/test_get_len vmlinux
-	$(call cmd,posttest)
+quiet_cmd_sanitytest = TEST    $@
+      cmd_sanitytest = $(obj)/insn_sanity $(posttest_64bit) -m 1000000
 
-hostprogs-y	:= test_get_len
+posttest: $(obj)/test_get_len vmlinux $(obj)/insn_sanity
+	$(call cmd,posttest)
+	$(call cmd,sanitytest)
+
+hostprogs-y	+= test_get_len insn_sanity
 
 # -I needed for generated C source and C source which in the kernel tree.
 HOSTCFLAGS_test_get_len.o := -Wall -I$(objtree)/arch/x86/lib/ -I$(srctree)/arch/x86/include/ -I$(srctree)/arch/x86/lib/ -I$(srctree)/include/
 
+HOSTCFLAGS_insn_sanity.o := -Wall -I$(objtree)/arch/x86/lib/ -I$(srctree)/arch/x86/include/ -I$(srctree)/arch/x86/lib/ -I$(srctree)/include/
+
 # Dependencies are also needed.
 $(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
 
+$(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
diff --git a/arch/x86/tools/gen-insn-attr-x86.awk b/arch/x86/tools/gen-insn-attr-x86.awk
index eaf11f5..5f6a5b6 100644
--- a/arch/x86/tools/gen-insn-attr-x86.awk
+++ b/arch/x86/tools/gen-insn-attr-x86.awk
@@ -47,7 +47,7 @@
 	sep_expr = "^\\|$"
 	group_expr = "^Grp[0-9A-Za-z]+"
 
-	imm_expr = "^[IJAO][a-z]"
+	imm_expr = "^[IJAOL][a-z]"
 	imm_flag["Ib"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)"
 	imm_flag["Jb"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)"
 	imm_flag["Iw"] = "INAT_MAKE_IMM(INAT_IMM_WORD)"
@@ -59,6 +59,7 @@
 	imm_flag["Iv"] = "INAT_MAKE_IMM(INAT_IMM_VWORD)"
 	imm_flag["Ob"] = "INAT_MOFFSET"
 	imm_flag["Ov"] = "INAT_MOFFSET"
+	imm_flag["Lx"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)"
 
 	modrm_expr = "^([CDEGMNPQRSUVW/][a-z]+|NTA|T[012])"
 	force64_expr = "\\([df]64\\)"
@@ -70,8 +71,12 @@
 	lprefix3_expr = "\\(F2\\)"
 	max_lprefix = 4
 
-	vexok_expr = "\\(VEX\\)"
-	vexonly_expr = "\\(oVEX\\)"
+	# All opcodes starting with lower-case 'v' or with (v1) superscript
+	# accepts VEX prefix
+	vexok_opcode_expr = "^v.*"
+	vexok_expr = "\\(v1\\)"
+	# All opcodes with (v) superscript supports *only* VEX prefix
+	vexonly_expr = "\\(v\\)"
 
 	prefix_expr = "\\(Prefix\\)"
 	prefix_num["Operand-Size"] = "INAT_PFX_OPNDSZ"
@@ -85,8 +90,8 @@
 	prefix_num["SEG=GS"] = "INAT_PFX_GS"
 	prefix_num["SEG=SS"] = "INAT_PFX_SS"
 	prefix_num["Address-Size"] = "INAT_PFX_ADDRSZ"
-	prefix_num["2bytes-VEX"] = "INAT_PFX_VEX2"
-	prefix_num["3bytes-VEX"] = "INAT_PFX_VEX3"
+	prefix_num["VEX+1byte"] = "INAT_PFX_VEX2"
+	prefix_num["VEX+2byte"] = "INAT_PFX_VEX3"
 
 	clear_vars()
 }
@@ -310,12 +315,10 @@
 		if (match(opcode, fpu_expr))
 			flags = add_flags(flags, "INAT_MODRM")
 
-		# check VEX only code
+		# check VEX codes
 		if (match(ext, vexonly_expr))
 			flags = add_flags(flags, "INAT_VEXOK | INAT_VEXONLY")
-
-		# check VEX only code
-		if (match(ext, vexok_expr))
+		else if (match(ext, vexok_expr) || match(opcode, vexok_opcode_expr))
 			flags = add_flags(flags, "INAT_VEXOK")
 
 		# check prefixes
diff --git a/arch/x86/tools/insn_sanity.c b/arch/x86/tools/insn_sanity.c
new file mode 100644
index 0000000..cc2f8c1
--- /dev/null
+++ b/arch/x86/tools/insn_sanity.c
@@ -0,0 +1,275 @@
+/*
+ * x86 decoder sanity test - based on test_get_insn.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2009
+ * Copyright (C) Hitachi, Ltd., 2011
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+#define unlikely(cond) (cond)
+#define ARRAY_SIZE(a)	(sizeof(a)/sizeof(a[0]))
+
+#include <asm/insn.h>
+#include <inat.c>
+#include <insn.c>
+
+/*
+ * Test of instruction analysis against tampering.
+ * Feed random binary to instruction decoder and ensure not to
+ * access out-of-instruction-buffer.
+ */
+
+#define DEFAULT_MAX_ITER	10000
+#define INSN_NOP 0x90
+
+static const char	*prog;		/* Program name */
+static int		verbose;	/* Verbosity */
+static int		x86_64;		/* x86-64 bit mode flag */
+static unsigned int	seed;		/* Random seed */
+static unsigned long	iter_start;	/* Start of iteration number */
+static unsigned long	iter_end = DEFAULT_MAX_ITER;	/* End of iteration number */
+static FILE		*input_file;	/* Input file name */
+
+static void usage(const char *err)
+{
+	if (err)
+		fprintf(stderr, "Error: %s\n\n", err);
+	fprintf(stderr, "Usage: %s [-y|-n|-v] [-s seed[,no]] [-m max] [-i input]\n", prog);
+	fprintf(stderr, "\t-y	64bit mode\n");
+	fprintf(stderr, "\t-n	32bit mode\n");
+	fprintf(stderr, "\t-v	Verbosity(-vv dumps any decoded result)\n");
+	fprintf(stderr, "\t-s	Give a random seed (and iteration number)\n");
+	fprintf(stderr, "\t-m	Give a maximum iteration number\n");
+	fprintf(stderr, "\t-i	Give an input file with decoded binary\n");
+	exit(1);
+}
+
+static void dump_field(FILE *fp, const char *name, const char *indent,
+		       struct insn_field *field)
+{
+	fprintf(fp, "%s.%s = {\n", indent, name);
+	fprintf(fp, "%s\t.value = %d, bytes[] = {%x, %x, %x, %x},\n",
+		indent, field->value, field->bytes[0], field->bytes[1],
+		field->bytes[2], field->bytes[3]);
+	fprintf(fp, "%s\t.got = %d, .nbytes = %d},\n", indent,
+		field->got, field->nbytes);
+}
+
+static void dump_insn(FILE *fp, struct insn *insn)
+{
+	fprintf(fp, "Instruction = {\n");
+	dump_field(fp, "prefixes", "\t",	&insn->prefixes);
+	dump_field(fp, "rex_prefix", "\t",	&insn->rex_prefix);
+	dump_field(fp, "vex_prefix", "\t",	&insn->vex_prefix);
+	dump_field(fp, "opcode", "\t",		&insn->opcode);
+	dump_field(fp, "modrm", "\t",		&insn->modrm);
+	dump_field(fp, "sib", "\t",		&insn->sib);
+	dump_field(fp, "displacement", "\t",	&insn->displacement);
+	dump_field(fp, "immediate1", "\t",	&insn->immediate1);
+	dump_field(fp, "immediate2", "\t",	&insn->immediate2);
+	fprintf(fp, "\t.attr = %x, .opnd_bytes = %d, .addr_bytes = %d,\n",
+		insn->attr, insn->opnd_bytes, insn->addr_bytes);
+	fprintf(fp, "\t.length = %d, .x86_64 = %d, .kaddr = %p}\n",
+		insn->length, insn->x86_64, insn->kaddr);
+}
+
+static void dump_stream(FILE *fp, const char *msg, unsigned long nr_iter,
+			unsigned char *insn_buf, struct insn *insn)
+{
+	int i;
+
+	fprintf(fp, "%s:\n", msg);
+
+	dump_insn(fp, insn);
+
+	fprintf(fp, "You can reproduce this with below command(s);\n");
+
+	/* Input a decoded instruction sequence directly */
+	fprintf(fp, " $ echo ");
+	for (i = 0; i < MAX_INSN_SIZE; i++)
+		fprintf(fp, " %02x", insn_buf[i]);
+	fprintf(fp, " | %s -i -\n", prog);
+
+	if (!input_file) {
+		fprintf(fp, "Or \n");
+		/* Give a seed and iteration number */
+		fprintf(fp, " $ %s -s 0x%x,%lu\n", prog, seed, nr_iter);
+	}
+}
+
+static void init_random_seed(void)
+{
+	int fd;
+
+	fd = open("/dev/urandom", O_RDONLY);
+	if (fd < 0)
+		goto fail;
+
+	if (read(fd, &seed, sizeof(seed)) != sizeof(seed))
+		goto fail;
+
+	close(fd);
+	return;
+fail:
+	usage("Failed to open /dev/urandom");
+}
+
+/* Read given instruction sequence from the input file */
+static int read_next_insn(unsigned char *insn_buf)
+{
+	char buf[256]  = "", *tmp;
+	int i;
+
+	tmp = fgets(buf, ARRAY_SIZE(buf), input_file);
+	if (tmp == NULL || feof(input_file))
+		return 0;
+
+	for (i = 0; i < MAX_INSN_SIZE; i++) {
+		insn_buf[i] = (unsigned char)strtoul(tmp, &tmp, 16);
+		if (*tmp != ' ')
+			break;
+	}
+
+	return i;
+}
+
+static int generate_insn(unsigned char *insn_buf)
+{
+	int i;
+
+	if (input_file)
+		return read_next_insn(insn_buf);
+
+	/* Fills buffer with random binary up to MAX_INSN_SIZE */
+	for (i = 0; i < MAX_INSN_SIZE - 1; i += 2)
+		*(unsigned short *)(&insn_buf[i]) = random() & 0xffff;
+
+	while (i < MAX_INSN_SIZE)
+		insn_buf[i++] = random() & 0xff;
+
+	return i;
+}
+
+static void parse_args(int argc, char **argv)
+{
+	int c;
+	char *tmp = NULL;
+	int set_seed = 0;
+
+	prog = argv[0];
+	while ((c = getopt(argc, argv, "ynvs:m:i:")) != -1) {
+		switch (c) {
+		case 'y':
+			x86_64 = 1;
+			break;
+		case 'n':
+			x86_64 = 0;
+			break;
+		case 'v':
+			verbose++;
+			break;
+		case 'i':
+			if (strcmp("-", optarg) == 0)
+				input_file = stdin;
+			else
+				input_file = fopen(optarg, "r");
+			if (!input_file)
+				usage("Failed to open input file");
+			break;
+		case 's':
+			seed = (unsigned int)strtoul(optarg, &tmp, 0);
+			if (*tmp == ',') {
+				optarg = tmp + 1;
+				iter_start = strtoul(optarg, &tmp, 0);
+			}
+			if (*tmp != '\0' || tmp == optarg)
+				usage("Failed to parse seed");
+			set_seed = 1;
+			break;
+		case 'm':
+			iter_end = strtoul(optarg, &tmp, 0);
+			if (*tmp != '\0' || tmp == optarg)
+				usage("Failed to parse max_iter");
+			break;
+		default:
+			usage(NULL);
+		}
+	}
+
+	/* Check errors */
+	if (iter_end < iter_start)
+		usage("Max iteration number must be bigger than iter-num");
+
+	if (set_seed && input_file)
+		usage("Don't use input file (-i) with random seed (-s)");
+
+	/* Initialize random seed */
+	if (!input_file) {
+		if (!set_seed)	/* No seed is given */
+			init_random_seed();
+		srand(seed);
+	}
+}
+
+int main(int argc, char **argv)
+{
+	struct insn insn;
+	int insns = 0;
+	int errors = 0;
+	unsigned long i;
+	unsigned char insn_buf[MAX_INSN_SIZE * 2];
+
+	parse_args(argc, argv);
+
+	/* Prepare stop bytes with NOPs */
+	memset(insn_buf + MAX_INSN_SIZE, INSN_NOP, MAX_INSN_SIZE);
+
+	for (i = 0; i < iter_end; i++) {
+		if (generate_insn(insn_buf) <= 0)
+			break;
+
+		if (i < iter_start)	/* Skip to given iteration number */
+			continue;
+
+		/* Decode an instruction */
+		insn_init(&insn, insn_buf, x86_64);
+		insn_get_length(&insn);
+
+		if (insn.next_byte <= insn.kaddr ||
+		    insn.kaddr + MAX_INSN_SIZE < insn.next_byte) {
+			/* Access out-of-range memory */
+			dump_stream(stderr, "Error: Found an access violation", i, insn_buf, &insn);
+			errors++;
+		} else if (verbose && !insn_complete(&insn))
+			dump_stream(stdout, "Info: Found an undecodable input", i, insn_buf, &insn);
+		else if (verbose >= 2)
+			dump_insn(stdout, &insn);
+		insns++;
+	}
+
+	fprintf(stdout, "%s: decoded and checked %d %s instructions with %d errors (seed:0x%x)\n", (errors) ? "Failure" : "Success", insns, (input_file) ? "given" : "random", errors, seed);
+
+	return errors ? 1 : 0;
+}
diff --git a/arch/x86/xen/debugfs.c b/arch/x86/xen/debugfs.c
index 7c0fedd..ef1db19 100644
--- a/arch/x86/xen/debugfs.c
+++ b/arch/x86/xen/debugfs.c
@@ -109,7 +109,7 @@
 	.llseek = no_llseek,
 };
 
-struct dentry *xen_debugfs_create_u32_array(const char *name, mode_t mode,
+struct dentry *xen_debugfs_create_u32_array(const char *name, umode_t mode,
 					    struct dentry *parent,
 					    u32 *array, unsigned elements)
 {
diff --git a/arch/x86/xen/debugfs.h b/arch/x86/xen/debugfs.h
index e281320..78d2549 100644
--- a/arch/x86/xen/debugfs.h
+++ b/arch/x86/xen/debugfs.h
@@ -3,7 +3,7 @@
 
 struct dentry * __init xen_init_debugfs(void);
 
-struct dentry *xen_debugfs_create_u32_array(const char *name, mode_t mode,
+struct dentry *xen_debugfs_create_u32_array(const char *name, umode_t mode,
 					    struct dentry *parent,
 					    u32 *array, unsigned elements);
 
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 1f92865..12eb07b 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1215,8 +1215,6 @@
 	local_irq_disable();
 	early_boot_irqs_disabled = true;
 
-	memblock_init();
-
 	xen_raw_console_write("mapping kernel into physical memory\n");
 	pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages);
 	xen_ident_map_ISA();
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 87f6673..f4bf8aa 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1774,10 +1774,8 @@
 	__xen_write_cr3(true, __pa(pgd));
 	xen_mc_issue(PARAVIRT_LAZY_CPU);
 
-	memblock_x86_reserve_range(__pa(xen_start_info->pt_base),
-		      __pa(xen_start_info->pt_base +
-			   xen_start_info->nr_pt_frames * PAGE_SIZE),
-		      "XEN PAGETABLES");
+	memblock_reserve(__pa(xen_start_info->pt_base),
+			 xen_start_info->nr_pt_frames * PAGE_SIZE);
 
 	return pgd;
 }
@@ -1853,10 +1851,8 @@
 			  PFN_DOWN(__pa(initial_page_table)));
 	xen_write_cr3(__pa(initial_page_table));
 
-	memblock_x86_reserve_range(__pa(xen_start_info->pt_base),
-		      __pa(xen_start_info->pt_base +
-			   xen_start_info->nr_pt_frames * PAGE_SIZE),
-		      "XEN PAGETABLES");
+	memblock_reserve(__pa(xen_start_info->pt_base),
+			 xen_start_info->nr_pt_frames * PAGE_SIZE));
 
 	return initial_page_table;
 }
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 1093f80..e03c636 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -75,7 +75,7 @@
 	if (i == XEN_EXTRA_MEM_MAX_REGIONS)
 		printk(KERN_WARNING "Warning: not enough extra memory regions\n");
 
-	memblock_x86_reserve_range(start, start + size, "XEN EXTRA");
+	memblock_reserve(start, size);
 
 	xen_max_p2m_pfn = PFN_DOWN(start + size);
 
@@ -173,9 +173,21 @@
 	domid_t domid = DOMID_SELF;
 	int ret;
 
-	ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
-	if (ret > 0)
-		max_pages = ret;
+	/*
+	 * For the initial domain we use the maximum reservation as
+	 * the maximum page.
+	 *
+	 * For guest domains the current maximum reservation reflects
+	 * the current maximum rather than the static maximum. In this
+	 * case the e820 map provided to us will cover the static
+	 * maximum region.
+	 */
+	if (xen_initial_domain()) {
+		ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
+		if (ret > 0)
+			max_pages = ret;
+	}
+
 	return min(max_pages, MAX_DOMAIN_PAGES);
 }
 
@@ -299,9 +311,8 @@
 	 *  - xen_start_info
 	 * See comment above "struct start_info" in <xen/interface/xen.h>
 	 */
-	memblock_x86_reserve_range(__pa(xen_start_info->mfn_list),
-		      __pa(xen_start_info->pt_base),
-			"XEN START INFO");
+	memblock_reserve(__pa(xen_start_info->mfn_list),
+			 xen_start_info->pt_base - xen_start_info->mfn_list);
 
 	sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
 
diff --git a/arch/xtensa/include/asm/socket.h b/arch/xtensa/include/asm/socket.h
index cbdf2ff..bb06968 100644
--- a/arch/xtensa/include/asm/socket.h
+++ b/arch/xtensa/include/asm/socket.h
@@ -73,4 +73,7 @@
 
 #define SO_RXQ_OVFL             40
 
+#define SO_WIFI_STATUS		41
+#define SCM_WIFI_STATUS		SO_WIFI_STATUS
+
 #endif	/* _XTENSA_SOCKET_H */
diff --git a/arch/xtensa/include/asm/thread_info.h b/arch/xtensa/include/asm/thread_info.h
index 7be8acc..6abbedd 100644
--- a/arch/xtensa/include/asm/thread_info.h
+++ b/arch/xtensa/include/asm/thread_info.h
@@ -132,7 +132,6 @@
 #define TIF_MEMDIE		5	/* is terminating due to OOM killer */
 #define TIF_RESTORE_SIGMASK	6	/* restore signal mask in do_signal() */
 #define TIF_POLLING_NRFLAG	16	/* true if poll_idle() is polling TIF_NEED_RESCHED */
-#define TIF_FREEZE		17	/* is freezing for suspend */
 
 #define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
 #define _TIF_SIGPENDING		(1<<TIF_SIGPENDING)
@@ -141,7 +140,6 @@
 #define _TIF_IRET		(1<<TIF_IRET)
 #define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG)
 #define _TIF_RESTORE_SIGMASK	(1<<TIF_RESTORE_SIGMASK)
-#define _TIF_FREEZE		(1<<TIF_FREEZE)
 
 #define _TIF_WORK_MASK		0x0000FFFE	/* work to do on interrupt/exception return */
 #define _TIF_ALLWORK_MASK	0x0000FFFF	/* work to do on any return to u-space */
diff --git a/arch/xtensa/include/asm/types.h b/arch/xtensa/include/asm/types.h
index b1c981e..6d4db7e 100644
--- a/arch/xtensa/include/asm/types.h
+++ b/arch/xtensa/include/asm/types.h
@@ -23,8 +23,6 @@
 
 #ifndef __ASSEMBLY__
 
-typedef unsigned short umode_t;
-
 /*
  * These aren't exported outside the kernel to avoid name space clashes
  */
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
index f3e5eb4..ac62f9c 100644
--- a/arch/xtensa/kernel/time.c
+++ b/arch/xtensa/kernel/time.c
@@ -41,14 +41,6 @@
 	.rating = 200,
 	.read = ccount_read,
 	.mask = CLOCKSOURCE_MASK(32),
-	/*
-	 * With a shift of 22 the lower limit of the cpu clock is
-	 * 1MHz, where NSEC_PER_CCOUNT is 1000 or a bit less than
-	 * 2^10: Since we have 32 bits and the multiplicator can
-	 * already take up as much as 10 bits, this leaves us with
-	 * remaining upper 22 bits.
-	 */
-	.shift = 22,
 };
 
 static irqreturn_t timer_interrupt(int irq, void *dev_id);
@@ -66,10 +58,7 @@
 	printk("%d.%02d MHz\n", (int)ccount_per_jiffy/(1000000/HZ),
 			(int)(ccount_per_jiffy/(10000/HZ))%100);
 #endif
-	ccount_clocksource.mult =
-		clocksource_hz2mult(CCOUNT_PER_JIFFY * HZ,
-				ccount_clocksource.shift);
-	clocksource_register(&ccount_clocksource);
+	clocksource_register_hz(&ccount_clocksource, CCOUNT_PER_JIFFY * HZ);
 
 	/* Initialize the linux timer interrupt. */
 
diff --git a/block/Kconfig b/block/Kconfig
index e97934e..09acf1b 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -99,6 +99,12 @@
 
 	See Documentation/cgroups/blkio-controller.txt for more information.
 
+menu "Partition Types"
+
+source "block/partitions/Kconfig"
+
+endmenu
+
 endif # BLOCK
 
 config BLOCK_COMPAT
diff --git a/block/Makefile b/block/Makefile
index 514c6e4..39b76ba 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -5,7 +5,8 @@
 obj-$(CONFIG_BLOCK) := elevator.o blk-core.o blk-tag.o blk-sysfs.o \
 			blk-flush.o blk-settings.o blk-ioc.o blk-map.o \
 			blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \
-			blk-iopoll.o blk-lib.o ioctl.o genhd.o scsi_ioctl.o
+			blk-iopoll.o blk-lib.o ioctl.o genhd.o scsi_ioctl.o \
+			partition-generic.o partitions/
 
 obj-$(CONFIG_BLK_DEV_BSG)	+= bsg.o
 obj-$(CONFIG_BLK_DEV_BSGLIB)	+= bsg-lib.o
diff --git a/block/blk-core.c b/block/blk-core.c
index ea70e6c..15de223 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -366,7 +366,14 @@
 		if (drain_all)
 			blk_throtl_drain(q);
 
-		__blk_run_queue(q);
+		/*
+		 * This function might be called on a queue which failed
+		 * driver init after queue creation.  Some drivers
+		 * (e.g. fd) get unhappy in such cases.  Kick queue iff
+		 * dispatch queue has something on it.
+		 */
+		if (!list_empty(&q->queue_head))
+			__blk_run_queue(q);
 
 		if (drain_all)
 			nr_rqs = q->rq.count[0] + q->rq.count[1];
@@ -467,6 +474,7 @@
 	q->backing_dev_info.state = 0;
 	q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
 	q->backing_dev_info.name = "block";
+	q->node = node_id;
 
 	err = bdi_init(&q->backing_dev_info);
 	if (err) {
@@ -551,7 +559,7 @@
 	if (!uninit_q)
 		return NULL;
 
-	q = blk_init_allocated_queue_node(uninit_q, rfn, lock, node_id);
+	q = blk_init_allocated_queue(uninit_q, rfn, lock);
 	if (!q)
 		blk_cleanup_queue(uninit_q);
 
@@ -563,18 +571,9 @@
 blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
 			 spinlock_t *lock)
 {
-	return blk_init_allocated_queue_node(q, rfn, lock, -1);
-}
-EXPORT_SYMBOL(blk_init_allocated_queue);
-
-struct request_queue *
-blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
-			      spinlock_t *lock, int node_id)
-{
 	if (!q)
 		return NULL;
 
-	q->node = node_id;
 	if (blk_init_free_list(q))
 		return NULL;
 
@@ -604,7 +603,7 @@
 
 	return NULL;
 }
-EXPORT_SYMBOL(blk_init_allocated_queue_node);
+EXPORT_SYMBOL(blk_init_allocated_queue);
 
 int blk_get_queue(struct request_queue *q)
 {
diff --git a/block/blk-map.c b/block/blk-map.c
index 164cd00..623e1cd 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -311,7 +311,7 @@
 	if (IS_ERR(bio))
 		return PTR_ERR(bio);
 
-	if (rq_data_dir(rq) == WRITE)
+	if (!reading)
 		bio->bi_rw |= REQ_WRITE;
 
 	if (do_copy)
diff --git a/block/blk-tag.c b/block/blk-tag.c
index e74d6d1..4af6f5c 100644
--- a/block/blk-tag.c
+++ b/block/blk-tag.c
@@ -282,18 +282,9 @@
 void blk_queue_end_tag(struct request_queue *q, struct request *rq)
 {
 	struct blk_queue_tag *bqt = q->queue_tags;
-	int tag = rq->tag;
+	unsigned tag = rq->tag; /* negative tags invalid */
 
-	BUG_ON(tag == -1);
-
-	if (unlikely(tag >= bqt->max_depth)) {
-		/*
-		 * This can happen after tag depth has been reduced.
-		 * But tag shouldn't be larger than real_max_depth.
-		 */
-		WARN_ON(tag >= bqt->real_max_depth);
-		return;
-	}
+	BUG_ON(tag >= bqt->real_max_depth);
 
 	list_del_init(&rq->queuelist);
 	rq->cmd_flags &= ~REQ_QUEUED;
diff --git a/block/bsg.c b/block/bsg.c
index 702f131..9651ec7 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -1070,7 +1070,7 @@
 
 static struct cdev bsg_cdev;
 
-static char *bsg_devnode(struct device *dev, mode_t *mode)
+static char *bsg_devnode(struct device *dev, umode_t *mode)
 {
 	return kasprintf(GFP_KERNEL, "bsg/%s", dev_name(dev));
 }
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 16ace89..3548705 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1655,6 +1655,8 @@
 		    struct request *next)
 {
 	struct cfq_queue *cfqq = RQ_CFQQ(rq);
+	struct cfq_data *cfqd = q->elevator->elevator_data;
+
 	/*
 	 * reposition in fifo if next is older than rq
 	 */
@@ -1669,6 +1671,16 @@
 	cfq_remove_request(next);
 	cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(rq))->blkg,
 					rq_data_dir(next), rq_is_sync(next));
+
+	cfqq = RQ_CFQQ(next);
+	/*
+	 * all requests of this queue are merged to other queues, delete it
+	 * from the service tree. If it's the active_queue,
+	 * cfq_dispatch_requests() will choose to expire it or do idle
+	 */
+	if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list) &&
+	    cfqq != cfqd->active_queue)
+		cfq_del_cfqq_rr(cfqd, cfqq);
 }
 
 static int cfq_allow_merge(struct request_queue *q, struct request *rq,
@@ -3184,7 +3196,7 @@
 		}
 	}
 
-	if (ret)
+	if (ret && ret != -EEXIST)
 		printk(KERN_ERR "cfq: cic link failed!\n");
 
 	return ret;
@@ -3200,6 +3212,7 @@
 {
 	struct io_context *ioc = NULL;
 	struct cfq_io_context *cic;
+	int ret;
 
 	might_sleep_if(gfp_mask & __GFP_WAIT);
 
@@ -3207,6 +3220,7 @@
 	if (!ioc)
 		return NULL;
 
+retry:
 	cic = cfq_cic_lookup(cfqd, ioc);
 	if (cic)
 		goto out;
@@ -3215,7 +3229,12 @@
 	if (cic == NULL)
 		goto err;
 
-	if (cfq_cic_link(cfqd, ioc, cic, gfp_mask))
+	ret = cfq_cic_link(cfqd, ioc, cic, gfp_mask);
+	if (ret == -EEXIST) {
+		/* someone has linked cic to ioc already */
+		cfq_cic_free(cic);
+		goto retry;
+	} else if (ret)
 		goto err_free;
 
 out:
@@ -4036,6 +4055,11 @@
 
 	if (blkio_alloc_blkg_stats(&cfqg->blkg)) {
 		kfree(cfqg);
+
+		spin_lock(&cic_index_lock);
+		ida_remove(&cic_index_ida, cfqd->cic_index);
+		spin_unlock(&cic_index_lock);
+
 		kfree(cfqd);
 		return NULL;
 	}
diff --git a/block/genhd.c b/block/genhd.c
index 02e9fca..83e7c04 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -15,7 +15,6 @@
 #include <linux/slab.h>
 #include <linux/kmod.h>
 #include <linux/kobj_map.h>
-#include <linux/buffer_head.h>
 #include <linux/mutex.h>
 #include <linux/idr.h>
 #include <linux/log2.h>
@@ -507,7 +506,7 @@
 	return 0;
 }
 
-void register_disk(struct gendisk *disk)
+static void register_disk(struct gendisk *disk)
 {
 	struct device *ddev = disk_to_dev(disk);
 	struct block_device *bdev;
@@ -1109,7 +1108,7 @@
 	.name		= "block",
 };
 
-static char *block_devnode(struct device *dev, mode_t *mode)
+static char *block_devnode(struct device *dev, umode_t *mode)
 {
 	struct gendisk *disk = dev_to_disk(dev);
 
diff --git a/block/ioctl.c b/block/ioctl.c
index ca939fc..4828fa3 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -5,7 +5,7 @@
 #include <linux/blkpg.h>
 #include <linux/hdreg.h>
 #include <linux/backing-dev.h>
-#include <linux/buffer_head.h>
+#include <linux/fs.h>
 #include <linux/blktrace_api.h>
 #include <asm/uaccess.h>
 
@@ -180,6 +180,26 @@
 EXPORT_SYMBOL_GPL(__blkdev_driver_ioctl);
 
 /*
+ * Is it an unrecognized ioctl? The correct returns are either
+ * ENOTTY (final) or ENOIOCTLCMD ("I don't know this one, try a
+ * fallback"). ENOIOCTLCMD gets turned into ENOTTY by the ioctl
+ * code before returning.
+ *
+ * Confused drivers sometimes return EINVAL, which is wrong. It
+ * means "I understood the ioctl command, but the parameters to
+ * it were wrong".
+ *
+ * We should aim to just fix the broken drivers, the EINVAL case
+ * should go away.
+ */
+static inline int is_unrecognized_ioctl(int ret)
+{
+	return	ret == -EINVAL ||
+		ret == -ENOTTY ||
+		ret == -ENOIOCTLCMD;
+}
+
+/*
  * always keep this in sync with compat_blkdev_ioctl()
  */
 int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
@@ -196,8 +216,7 @@
 			return -EACCES;
 
 		ret = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
-		/* -EINVAL to handle old uncorrected drivers */
-		if (ret != -EINVAL && ret != -ENOTTY)
+		if (!is_unrecognized_ioctl(ret))
 			return ret;
 
 		fsync_bdev(bdev);
@@ -206,8 +225,7 @@
 
 	case BLKROSET:
 		ret = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
-		/* -EINVAL to handle old uncorrected drivers */
-		if (ret != -EINVAL && ret != -ENOTTY)
+		if (!is_unrecognized_ioctl(ret))
 			return ret;
 		if (!capable(CAP_SYS_ADMIN))
 			return -EACCES;
diff --git a/fs/partitions/check.c b/block/partition-generic.c
similarity index 79%
rename from fs/partitions/check.c
rename to block/partition-generic.c
index e3c63d1..d06ec1c 100644
--- a/fs/partitions/check.c
+++ b/block/partition-generic.c
@@ -1,6 +1,4 @@
 /*
- *  fs/partitions/check.c
- *
  *  Code extracted from drivers/block/genhd.c
  *  Copyright (C) 1991-1998  Linus Torvalds
  *  Re-organised Feb 1998 Russell King
@@ -9,8 +7,6 @@
  *  block drivers, which allows all the partition code to
  *  be grouped in one location, and it to be mostly self
  *  contained.
- *
- *  Added needed MAJORS for new pairs, {hdi,hdj}, {hdk,hdl}
  */
 
 #include <linux/init.h>
@@ -22,98 +18,11 @@
 #include <linux/genhd.h>
 #include <linux/blktrace_api.h>
 
-#include "check.h"
-
-#include "acorn.h"
-#include "amiga.h"
-#include "atari.h"
-#include "ldm.h"
-#include "mac.h"
-#include "msdos.h"
-#include "osf.h"
-#include "sgi.h"
-#include "sun.h"
-#include "ibm.h"
-#include "ultrix.h"
-#include "efi.h"
-#include "karma.h"
-#include "sysv68.h"
+#include "partitions/check.h"
 
 #ifdef CONFIG_BLK_DEV_MD
 extern void md_autodetect_dev(dev_t dev);
 #endif
-
-int warn_no_part = 1; /*This is ugly: should make genhd removable media aware*/
-
-static int (*check_part[])(struct parsed_partitions *) = {
-	/*
-	 * Probe partition formats with tables at disk address 0
-	 * that also have an ADFS boot block at 0xdc0.
-	 */
-#ifdef CONFIG_ACORN_PARTITION_ICS
-	adfspart_check_ICS,
-#endif
-#ifdef CONFIG_ACORN_PARTITION_POWERTEC
-	adfspart_check_POWERTEC,
-#endif
-#ifdef CONFIG_ACORN_PARTITION_EESOX
-	adfspart_check_EESOX,
-#endif
-
-	/*
-	 * Now move on to formats that only have partition info at
-	 * disk address 0xdc0.  Since these may also have stale
-	 * PC/BIOS partition tables, they need to come before
-	 * the msdos entry.
-	 */
-#ifdef CONFIG_ACORN_PARTITION_CUMANA
-	adfspart_check_CUMANA,
-#endif
-#ifdef CONFIG_ACORN_PARTITION_ADFS
-	adfspart_check_ADFS,
-#endif
-
-#ifdef CONFIG_EFI_PARTITION
-	efi_partition,		/* this must come before msdos */
-#endif
-#ifdef CONFIG_SGI_PARTITION
-	sgi_partition,
-#endif
-#ifdef CONFIG_LDM_PARTITION
-	ldm_partition,		/* this must come before msdos */
-#endif
-#ifdef CONFIG_MSDOS_PARTITION
-	msdos_partition,
-#endif
-#ifdef CONFIG_OSF_PARTITION
-	osf_partition,
-#endif
-#ifdef CONFIG_SUN_PARTITION
-	sun_partition,
-#endif
-#ifdef CONFIG_AMIGA_PARTITION
-	amiga_partition,
-#endif
-#ifdef CONFIG_ATARI_PARTITION
-	atari_partition,
-#endif
-#ifdef CONFIG_MAC_PARTITION
-	mac_partition,
-#endif
-#ifdef CONFIG_ULTRIX_PARTITION
-	ultrix_partition,
-#endif
-#ifdef CONFIG_IBM_PARTITION
-	ibm_partition,
-#endif
-#ifdef CONFIG_KARMA_PARTITION
-	karma_partition,
-#endif
-#ifdef CONFIG_SYSV68_PARTITION
-	sysv68_partition,
-#endif
-	NULL
-};
  
 /*
  * disk_name() is used by partition check code and the genhd driver.
@@ -155,65 +64,6 @@
 
 EXPORT_SYMBOL(__bdevname);
 
-static struct parsed_partitions *
-check_partition(struct gendisk *hd, struct block_device *bdev)
-{
-	struct parsed_partitions *state;
-	int i, res, err;
-
-	state = kzalloc(sizeof(struct parsed_partitions), GFP_KERNEL);
-	if (!state)
-		return NULL;
-	state->pp_buf = (char *)__get_free_page(GFP_KERNEL);
-	if (!state->pp_buf) {
-		kfree(state);
-		return NULL;
-	}
-	state->pp_buf[0] = '\0';
-
-	state->bdev = bdev;
-	disk_name(hd, 0, state->name);
-	snprintf(state->pp_buf, PAGE_SIZE, " %s:", state->name);
-	if (isdigit(state->name[strlen(state->name)-1]))
-		sprintf(state->name, "p");
-
-	state->limit = disk_max_parts(hd);
-	i = res = err = 0;
-	while (!res && check_part[i]) {
-		memset(&state->parts, 0, sizeof(state->parts));
-		res = check_part[i++](state);
-		if (res < 0) {
-			/* We have hit an I/O error which we don't report now.
-		 	* But record it, and let the others do their job.
-		 	*/
-			err = res;
-			res = 0;
-		}
-
-	}
-	if (res > 0) {
-		printk(KERN_INFO "%s", state->pp_buf);
-
-		free_page((unsigned long)state->pp_buf);
-		return state;
-	}
-	if (state->access_beyond_eod)
-		err = -ENOSPC;
-	if (err)
-	/* The partition is unrecognized. So report I/O errors if there were any */
-		res = err;
-	if (!res)
-		strlcat(state->pp_buf, " unknown partition table\n", PAGE_SIZE);
-	else if (warn_no_part)
-		strlcat(state->pp_buf, " unable to read partition table\n", PAGE_SIZE);
-
-	printk(KERN_INFO "%s", state->pp_buf);
-
-	free_page((unsigned long)state->pp_buf);
-	kfree(state);
-	return ERR_PTR(res);
-}
-
 static ssize_t part_partition_show(struct device *dev,
 				   struct device_attribute *attr, char *buf)
 {
diff --git a/fs/partitions/Kconfig b/block/partitions/Kconfig
similarity index 100%
rename from fs/partitions/Kconfig
rename to block/partitions/Kconfig
diff --git a/fs/partitions/Makefile b/block/partitions/Makefile
similarity index 100%
rename from fs/partitions/Makefile
rename to block/partitions/Makefile
diff --git a/fs/partitions/acorn.c b/block/partitions/acorn.c
similarity index 100%
rename from fs/partitions/acorn.c
rename to block/partitions/acorn.c
diff --git a/fs/partitions/acorn.h b/block/partitions/acorn.h
similarity index 100%
rename from fs/partitions/acorn.h
rename to block/partitions/acorn.h
diff --git a/fs/partitions/amiga.c b/block/partitions/amiga.c
similarity index 100%
rename from fs/partitions/amiga.c
rename to block/partitions/amiga.c
diff --git a/fs/partitions/amiga.h b/block/partitions/amiga.h
similarity index 100%
rename from fs/partitions/amiga.h
rename to block/partitions/amiga.h
diff --git a/fs/partitions/atari.c b/block/partitions/atari.c
similarity index 100%
rename from fs/partitions/atari.c
rename to block/partitions/atari.c
diff --git a/fs/partitions/atari.h b/block/partitions/atari.h
similarity index 100%
rename from fs/partitions/atari.h
rename to block/partitions/atari.h
diff --git a/block/partitions/check.c b/block/partitions/check.c
new file mode 100644
index 0000000..bc90867
--- /dev/null
+++ b/block/partitions/check.c
@@ -0,0 +1,166 @@
+/*
+ *  fs/partitions/check.c
+ *
+ *  Code extracted from drivers/block/genhd.c
+ *  Copyright (C) 1991-1998  Linus Torvalds
+ *  Re-organised Feb 1998 Russell King
+ *
+ *  We now have independent partition support from the
+ *  block drivers, which allows all the partition code to
+ *  be grouped in one location, and it to be mostly self
+ *  contained.
+ *
+ *  Added needed MAJORS for new pairs, {hdi,hdj}, {hdk,hdl}
+ */
+
+#include <linux/slab.h>
+#include <linux/ctype.h>
+#include <linux/genhd.h>
+
+#include "check.h"
+
+#include "acorn.h"
+#include "amiga.h"
+#include "atari.h"
+#include "ldm.h"
+#include "mac.h"
+#include "msdos.h"
+#include "osf.h"
+#include "sgi.h"
+#include "sun.h"
+#include "ibm.h"
+#include "ultrix.h"
+#include "efi.h"
+#include "karma.h"
+#include "sysv68.h"
+
+int warn_no_part = 1; /*This is ugly: should make genhd removable media aware*/
+
+static int (*check_part[])(struct parsed_partitions *) = {
+	/*
+	 * Probe partition formats with tables at disk address 0
+	 * that also have an ADFS boot block at 0xdc0.
+	 */
+#ifdef CONFIG_ACORN_PARTITION_ICS
+	adfspart_check_ICS,
+#endif
+#ifdef CONFIG_ACORN_PARTITION_POWERTEC
+	adfspart_check_POWERTEC,
+#endif
+#ifdef CONFIG_ACORN_PARTITION_EESOX
+	adfspart_check_EESOX,
+#endif
+
+	/*
+	 * Now move on to formats that only have partition info at
+	 * disk address 0xdc0.  Since these may also have stale
+	 * PC/BIOS partition tables, they need to come before
+	 * the msdos entry.
+	 */
+#ifdef CONFIG_ACORN_PARTITION_CUMANA
+	adfspart_check_CUMANA,
+#endif
+#ifdef CONFIG_ACORN_PARTITION_ADFS
+	adfspart_check_ADFS,
+#endif
+
+#ifdef CONFIG_EFI_PARTITION
+	efi_partition,		/* this must come before msdos */
+#endif
+#ifdef CONFIG_SGI_PARTITION
+	sgi_partition,
+#endif
+#ifdef CONFIG_LDM_PARTITION
+	ldm_partition,		/* this must come before msdos */
+#endif
+#ifdef CONFIG_MSDOS_PARTITION
+	msdos_partition,
+#endif
+#ifdef CONFIG_OSF_PARTITION
+	osf_partition,
+#endif
+#ifdef CONFIG_SUN_PARTITION
+	sun_partition,
+#endif
+#ifdef CONFIG_AMIGA_PARTITION
+	amiga_partition,
+#endif
+#ifdef CONFIG_ATARI_PARTITION
+	atari_partition,
+#endif
+#ifdef CONFIG_MAC_PARTITION
+	mac_partition,
+#endif
+#ifdef CONFIG_ULTRIX_PARTITION
+	ultrix_partition,
+#endif
+#ifdef CONFIG_IBM_PARTITION
+	ibm_partition,
+#endif
+#ifdef CONFIG_KARMA_PARTITION
+	karma_partition,
+#endif
+#ifdef CONFIG_SYSV68_PARTITION
+	sysv68_partition,
+#endif
+	NULL
+};
+
+struct parsed_partitions *
+check_partition(struct gendisk *hd, struct block_device *bdev)
+{
+	struct parsed_partitions *state;
+	int i, res, err;
+
+	state = kzalloc(sizeof(struct parsed_partitions), GFP_KERNEL);
+	if (!state)
+		return NULL;
+	state->pp_buf = (char *)__get_free_page(GFP_KERNEL);
+	if (!state->pp_buf) {
+		kfree(state);
+		return NULL;
+	}
+	state->pp_buf[0] = '\0';
+
+	state->bdev = bdev;
+	disk_name(hd, 0, state->name);
+	snprintf(state->pp_buf, PAGE_SIZE, " %s:", state->name);
+	if (isdigit(state->name[strlen(state->name)-1]))
+		sprintf(state->name, "p");
+
+	state->limit = disk_max_parts(hd);
+	i = res = err = 0;
+	while (!res && check_part[i]) {
+		memset(&state->parts, 0, sizeof(state->parts));
+		res = check_part[i++](state);
+		if (res < 0) {
+			/* We have hit an I/O error which we don't report now.
+		 	* But record it, and let the others do their job.
+		 	*/
+			err = res;
+			res = 0;
+		}
+
+	}
+	if (res > 0) {
+		printk(KERN_INFO "%s", state->pp_buf);
+
+		free_page((unsigned long)state->pp_buf);
+		return state;
+	}
+	if (state->access_beyond_eod)
+		err = -ENOSPC;
+	if (err)
+	/* The partition is unrecognized. So report I/O errors if there were any */
+		res = err;
+	if (!res)
+		strlcat(state->pp_buf, " unknown partition table\n", PAGE_SIZE);
+	else if (warn_no_part)
+		strlcat(state->pp_buf, " unable to read partition table\n", PAGE_SIZE);
+
+	printk(KERN_INFO "%s", state->pp_buf);
+
+	free_page((unsigned long)state->pp_buf);
+	kfree(state);
+	return ERR_PTR(res);
+}
diff --git a/fs/partitions/check.h b/block/partitions/check.h
similarity index 92%
rename from fs/partitions/check.h
rename to block/partitions/check.h
index d68bf4d..52b1003 100644
--- a/fs/partitions/check.h
+++ b/block/partitions/check.h
@@ -22,6 +22,9 @@
 	char *pp_buf;
 };
 
+struct parsed_partitions *
+check_partition(struct gendisk *, struct block_device *);
+
 static inline void *read_part_sector(struct parsed_partitions *state,
 				     sector_t n, Sector *p)
 {
diff --git a/fs/partitions/efi.c b/block/partitions/efi.c
similarity index 100%
rename from fs/partitions/efi.c
rename to block/partitions/efi.c
diff --git a/fs/partitions/efi.h b/block/partitions/efi.h
similarity index 100%
rename from fs/partitions/efi.h
rename to block/partitions/efi.h
diff --git a/fs/partitions/ibm.c b/block/partitions/ibm.c
similarity index 100%
rename from fs/partitions/ibm.c
rename to block/partitions/ibm.c
diff --git a/fs/partitions/ibm.h b/block/partitions/ibm.h
similarity index 100%
rename from fs/partitions/ibm.h
rename to block/partitions/ibm.h
diff --git a/fs/partitions/karma.c b/block/partitions/karma.c
similarity index 100%
rename from fs/partitions/karma.c
rename to block/partitions/karma.c
diff --git a/fs/partitions/karma.h b/block/partitions/karma.h
similarity index 100%
rename from fs/partitions/karma.h
rename to block/partitions/karma.h
diff --git a/fs/partitions/ldm.c b/block/partitions/ldm.c
similarity index 100%
rename from fs/partitions/ldm.c
rename to block/partitions/ldm.c
diff --git a/fs/partitions/ldm.h b/block/partitions/ldm.h
similarity index 100%
rename from fs/partitions/ldm.h
rename to block/partitions/ldm.h
diff --git a/fs/partitions/mac.c b/block/partitions/mac.c
similarity index 100%
rename from fs/partitions/mac.c
rename to block/partitions/mac.c
diff --git a/fs/partitions/mac.h b/block/partitions/mac.h
similarity index 100%
rename from fs/partitions/mac.h
rename to block/partitions/mac.h
diff --git a/fs/partitions/msdos.c b/block/partitions/msdos.c
similarity index 100%
rename from fs/partitions/msdos.c
rename to block/partitions/msdos.c
diff --git a/fs/partitions/msdos.h b/block/partitions/msdos.h
similarity index 100%
rename from fs/partitions/msdos.h
rename to block/partitions/msdos.h
diff --git a/fs/partitions/osf.c b/block/partitions/osf.c
similarity index 100%
rename from fs/partitions/osf.c
rename to block/partitions/osf.c
diff --git a/fs/partitions/osf.h b/block/partitions/osf.h
similarity index 100%
rename from fs/partitions/osf.h
rename to block/partitions/osf.h
diff --git a/fs/partitions/sgi.c b/block/partitions/sgi.c
similarity index 100%
rename from fs/partitions/sgi.c
rename to block/partitions/sgi.c
diff --git a/fs/partitions/sgi.h b/block/partitions/sgi.h
similarity index 100%
rename from fs/partitions/sgi.h
rename to block/partitions/sgi.h
diff --git a/fs/partitions/sun.c b/block/partitions/sun.c
similarity index 100%
rename from fs/partitions/sun.c
rename to block/partitions/sun.c
diff --git a/fs/partitions/sun.h b/block/partitions/sun.h
similarity index 100%
rename from fs/partitions/sun.h
rename to block/partitions/sun.h
diff --git a/fs/partitions/sysv68.c b/block/partitions/sysv68.c
similarity index 100%
rename from fs/partitions/sysv68.c
rename to block/partitions/sysv68.c
diff --git a/fs/partitions/sysv68.h b/block/partitions/sysv68.h
similarity index 100%
rename from fs/partitions/sysv68.h
rename to block/partitions/sysv68.h
diff --git a/fs/partitions/ultrix.c b/block/partitions/ultrix.c
similarity index 100%
rename from fs/partitions/ultrix.c
rename to block/partitions/ultrix.c
diff --git a/fs/partitions/ultrix.h b/block/partitions/ultrix.h
similarity index 100%
rename from fs/partitions/ultrix.h
rename to block/partitions/ultrix.h
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 527a857..ae9c3ce 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -329,7 +329,6 @@
 
 config CRYPTO_GHASH
 	tristate "GHASH digest algorithm"
-	select CRYPTO_SHASH
 	select CRYPTO_GF128MUL
 	help
 	  GHASH is message digest algorithm for GCM (Galois/Counter Mode).
@@ -477,7 +476,6 @@
 config CRYPTO_GHASH_CLMUL_NI_INTEL
 	tristate "GHASH digest algorithm (CLMUL-NI accelerated)"
 	depends on X86 && 64BIT
-	select CRYPTO_SHASH
 	select CRYPTO_CRYPTD
 	help
 	  GHASH is message digest algorithm for GCM (Galois/Counter Mode).
diff --git a/drivers/Kconfig b/drivers/Kconfig
index b5e6f24..5afe5d1 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -116,6 +116,8 @@
 
 source "drivers/virtio/Kconfig"
 
+source "drivers/hv/Kconfig"
+
 source "drivers/xen/Kconfig"
 
 source "drivers/staging/Kconfig"
@@ -132,8 +134,6 @@
 
 source "drivers/virt/Kconfig"
 
-source "drivers/hv/Kconfig"
-
 source "drivers/devfreq/Kconfig"
 
 endmenu
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index de0e3df..7556913 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -374,7 +374,7 @@
 	depends on DEBUG_FS
 	default n
 	help
-	  This debug facility allows ACPI AML methods to me inserted and/or
+	  This debug facility allows ACPI AML methods to be inserted and/or
 	  replaced without rebooting the system. For details refer to:
 	  Documentation/acpi/method-customizing.txt.
 
diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
index c2793a8..d707756 100644
--- a/drivers/acpi/acpica/hwxface.c
+++ b/drivers/acpi/acpica/hwxface.c
@@ -356,7 +356,7 @@
  *
  * PARAMETERS:  register_id     - ID of ACPI Bit Register to access
  *              Value           - Value to write to the register, in bit
- *                                position zero. The bit is automaticallly
+ *                                position zero. The bit is automatically
  *                                shifted to the correct position.
  *
  * RETURN:      Status
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index 631b947..6a9e3ba 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -934,7 +934,8 @@
 static ssize_t erst_reader(u64 *id, enum pstore_type_id *type,
 			   struct timespec *time, char **buf,
 			   struct pstore_info *psi);
-static int erst_writer(enum pstore_type_id type, u64 *id, unsigned int part,
+static int erst_writer(enum pstore_type_id type, enum kmsg_dump_reason reason,
+		       u64 *id, unsigned int part,
 		       size_t size, struct pstore_info *psi);
 static int erst_clearer(enum pstore_type_id type, u64 id,
 			struct pstore_info *psi);
@@ -1053,7 +1054,8 @@
 	return (rc < 0) ? rc : (len - sizeof(*rcd));
 }
 
-static int erst_writer(enum pstore_type_id type, u64 *id, unsigned int part,
+static int erst_writer(enum pstore_type_id type, enum kmsg_dump_reason reason,
+		       u64 *id, unsigned int part,
 		       size_t size, struct pstore_info *psi)
 {
 	struct cper_pstore_record *rcd = (struct cper_pstore_record *)
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 7711d94..86933ca 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -873,7 +873,7 @@
 
 static const struct battery_file {
 	struct file_operations ops;
-	mode_t mode;
+	umode_t mode;
 	const char *name;
 } acpi_battery_file[] = {
 	FILE_DESCRIPTION_RO(info),
diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
index 6c47ae9..b258cab 100644
--- a/drivers/acpi/ec_sys.c
+++ b/drivers/acpi/ec_sys.c
@@ -105,7 +105,7 @@
 {
 	struct dentry *dev_dir;
 	char name[64];
-	mode_t mode = 0400;
+	umode_t mode = 0400;
 
 	if (ec_device_count == 0) {
 		acpi_ec_debugfs_dir = debugfs_create_dir("ec", NULL);
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index 9d7bc9f..20a68ca 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -446,7 +446,7 @@
 {
 	struct acpi_processor *pr = NULL;
 	int result = 0;
-	struct sys_device *sysdev;
+	struct device *dev;
 
 	pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL);
 	if (!pr)
@@ -491,8 +491,8 @@
 
 	per_cpu(processors, pr->id) = pr;
 
-	sysdev = get_cpu_sysdev(pr->id);
-	if (sysfs_create_link(&device->dev.kobj, &sysdev->kobj, "sysdev")) {
+	dev = get_cpu_device(pr->id);
+	if (sysfs_create_link(&device->dev.kobj, &dev->kobj, "sysdev")) {
 		result = -EFAULT;
 		goto err_free_cpumask;
 	}
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
index 870550d..3b599ab 100644
--- a/drivers/acpi/processor_thermal.c
+++ b/drivers/acpi/processor_thermal.c
@@ -30,7 +30,6 @@
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/cpufreq.h>
-#include <linux/sysdev.h>
 
 #include <asm/uaccess.h>
 
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 6d9a3ab..0a7ed69 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -476,6 +476,22 @@
 		DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW520F"),
 		},
 	},
+	{
+	.callback = init_nvs_nosave,
+	.ident = "Asus K54C",
+	.matches = {
+		DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
+		DMI_MATCH(DMI_PRODUCT_NAME, "K54C"),
+		},
+	},
+	{
+	.callback = init_nvs_nosave,
+	.ident = "Asus K54HR",
+	.matches = {
+		DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
+		DMI_MATCH(DMI_PRODUCT_NAME, "K54HR"),
+		},
+	},
 	{},
 };
 #endif /* CONFIG_SUSPEND */
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index bd230e8..54eaf96 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -52,6 +52,10 @@
 	int retval = 0;
 
 	retval = add_uevent_var(env, "AMBA_ID=%08x", pcdev->periphid);
+	if (retval)
+		return retval;
+
+	retval = add_uevent_var(env, "MODALIAS=amba:d%08X", pcdev->periphid);
 	return retval;
 }
 #else
@@ -109,31 +113,7 @@
 	return ret;
 }
 
-static int amba_pm_prepare(struct device *dev)
-{
-	struct device_driver *drv = dev->driver;
-	int ret = 0;
-
-	if (drv && drv->pm && drv->pm->prepare)
-		ret = drv->pm->prepare(dev);
-
-	return ret;
-}
-
-static void amba_pm_complete(struct device *dev)
-{
-	struct device_driver *drv = dev->driver;
-
-	if (drv && drv->pm && drv->pm->complete)
-		drv->pm->complete(dev);
-}
-
-#else /* !CONFIG_PM_SLEEP */
-
-#define amba_pm_prepare		NULL
-#define amba_pm_complete		NULL
-
-#endif /* !CONFIG_PM_SLEEP */
+#endif /* CONFIG_PM_SLEEP */
 
 #ifdef CONFIG_SUSPEND
 
@@ -155,22 +135,6 @@
 	return ret;
 }
 
-static int amba_pm_suspend_noirq(struct device *dev)
-{
-	struct device_driver *drv = dev->driver;
-	int ret = 0;
-
-	if (!drv)
-		return 0;
-
-	if (drv->pm) {
-		if (drv->pm->suspend_noirq)
-			ret = drv->pm->suspend_noirq(dev);
-	}
-
-	return ret;
-}
-
 static int amba_pm_resume(struct device *dev)
 {
 	struct device_driver *drv = dev->driver;
@@ -189,28 +153,10 @@
 	return ret;
 }
 
-static int amba_pm_resume_noirq(struct device *dev)
-{
-	struct device_driver *drv = dev->driver;
-	int ret = 0;
-
-	if (!drv)
-		return 0;
-
-	if (drv->pm) {
-		if (drv->pm->resume_noirq)
-			ret = drv->pm->resume_noirq(dev);
-	}
-
-	return ret;
-}
-
 #else /* !CONFIG_SUSPEND */
 
 #define amba_pm_suspend		NULL
 #define amba_pm_resume		NULL
-#define amba_pm_suspend_noirq	NULL
-#define amba_pm_resume_noirq	NULL
 
 #endif /* !CONFIG_SUSPEND */
 
@@ -234,22 +180,6 @@
 	return ret;
 }
 
-static int amba_pm_freeze_noirq(struct device *dev)
-{
-	struct device_driver *drv = dev->driver;
-	int ret = 0;
-
-	if (!drv)
-		return 0;
-
-	if (drv->pm) {
-		if (drv->pm->freeze_noirq)
-			ret = drv->pm->freeze_noirq(dev);
-	}
-
-	return ret;
-}
-
 static int amba_pm_thaw(struct device *dev)
 {
 	struct device_driver *drv = dev->driver;
@@ -268,22 +198,6 @@
 	return ret;
 }
 
-static int amba_pm_thaw_noirq(struct device *dev)
-{
-	struct device_driver *drv = dev->driver;
-	int ret = 0;
-
-	if (!drv)
-		return 0;
-
-	if (drv->pm) {
-		if (drv->pm->thaw_noirq)
-			ret = drv->pm->thaw_noirq(dev);
-	}
-
-	return ret;
-}
-
 static int amba_pm_poweroff(struct device *dev)
 {
 	struct device_driver *drv = dev->driver;
@@ -302,22 +216,6 @@
 	return ret;
 }
 
-static int amba_pm_poweroff_noirq(struct device *dev)
-{
-	struct device_driver *drv = dev->driver;
-	int ret = 0;
-
-	if (!drv)
-		return 0;
-
-	if (drv->pm) {
-		if (drv->pm->poweroff_noirq)
-			ret = drv->pm->poweroff_noirq(dev);
-	}
-
-	return ret;
-}
-
 static int amba_pm_restore(struct device *dev)
 {
 	struct device_driver *drv = dev->driver;
@@ -336,32 +234,12 @@
 	return ret;
 }
 
-static int amba_pm_restore_noirq(struct device *dev)
-{
-	struct device_driver *drv = dev->driver;
-	int ret = 0;
-
-	if (!drv)
-		return 0;
-
-	if (drv->pm) {
-		if (drv->pm->restore_noirq)
-			ret = drv->pm->restore_noirq(dev);
-	}
-
-	return ret;
-}
-
 #else /* !CONFIG_HIBERNATE_CALLBACKS */
 
 #define amba_pm_freeze		NULL
 #define amba_pm_thaw		NULL
 #define amba_pm_poweroff		NULL
 #define amba_pm_restore		NULL
-#define amba_pm_freeze_noirq	NULL
-#define amba_pm_thaw_noirq		NULL
-#define amba_pm_poweroff_noirq	NULL
-#define amba_pm_restore_noirq	NULL
 
 #endif /* !CONFIG_HIBERNATE_CALLBACKS */
 
@@ -402,20 +280,12 @@
 #ifdef CONFIG_PM
 
 static const struct dev_pm_ops amba_pm = {
-	.prepare	= amba_pm_prepare,
-	.complete	= amba_pm_complete,
 	.suspend	= amba_pm_suspend,
 	.resume		= amba_pm_resume,
 	.freeze		= amba_pm_freeze,
 	.thaw		= amba_pm_thaw,
 	.poweroff	= amba_pm_poweroff,
 	.restore	= amba_pm_restore,
-	.suspend_noirq	= amba_pm_suspend_noirq,
-	.resume_noirq	= amba_pm_resume_noirq,
-	.freeze_noirq	= amba_pm_freeze_noirq,
-	.thaw_noirq	= amba_pm_thaw_noirq,
-	.poweroff_noirq	= amba_pm_poweroff_noirq,
-	.restore_noirq	= amba_pm_restore_noirq,
 	SET_RUNTIME_PM_OPS(
 		amba_pm_runtime_suspend,
 		amba_pm_runtime_resume,
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 6bdedd7..cf047c4 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -820,7 +820,7 @@
 
 config PATA_OF_PLATFORM
 	tristate "OpenFirmware platform device PATA support"
-	depends on PATA_PLATFORM && OF
+	depends on PATA_PLATFORM && OF && OF_IRQ
 	help
 	  This option enables support for generic directly connected ATA
 	  devices commonly found on embedded systems with OpenFirmware
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index 3d0c2b0..9e373ba 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -1320,8 +1320,8 @@
           if (ia_vcc == NULL)
           {
              atomic_inc(&vcc->stats->rx_err);
+             atm_return(vcc, skb->truesize);
              dev_kfree_skb_any(skb);
-             atm_return(vcc, atm_guess_pdu2truesize(len));
              goto INCR_DLE;
            }
           // get real pkt length  pwang_test
@@ -1334,8 +1334,8 @@
              atomic_inc(&vcc->stats->rx_err);
              IF_ERR(printk("rx_dle_intr: Bad  AAL5 trailer %d (skb len %d)", 
                                                             length, skb->len);)
+             atm_return(vcc, skb->truesize);
              dev_kfree_skb_any(skb);
-             atm_return(vcc, atm_guess_pdu2truesize(len));
              goto INCR_DLE;
           }
           skb_trim(skb, length);
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 21cf46f..e95c67e 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -174,4 +174,15 @@
 
 source "drivers/base/regmap/Kconfig"
 
+config DMA_SHARED_BUFFER
+	bool "Buffer framework to be shared between drivers"
+	default n
+	select ANON_INODES
+	depends on EXPERIMENTAL
+	help
+	  This option enables the framework for buffer-sharing between
+	  multiple drivers. A buffer is associated with a file using driver
+	  APIs extension; the file's descriptor can then be passed on to other
+	  driver.
+
 endmenu
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 99a375a..2c8272d 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -3,16 +3,17 @@
 obj-y			:= core.o sys.o bus.o dd.o syscore.o \
 			   driver.o class.o platform.o \
 			   cpu.o firmware.o init.o map.o devres.o \
-			   attribute_container.o transport_class.o
+			   attribute_container.o transport_class.o \
+			   topology.o
 obj-$(CONFIG_DEVTMPFS)	+= devtmpfs.o
 obj-y			+= power/
 obj-$(CONFIG_HAS_DMA)	+= dma-mapping.o
 obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
+obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf.o
 obj-$(CONFIG_ISA)	+= isa.o
 obj-$(CONFIG_FW_LOADER)	+= firmware_class.o
 obj-$(CONFIG_NUMA)	+= node.o
 obj-$(CONFIG_MEMORY_HOTPLUG_SPARSE) += memory.o
-obj-$(CONFIG_SMP)	+= topology.o
 ifeq ($(CONFIG_SYSFS),y)
 obj-$(CONFIG_MODULES)	+= module.o
 endif
diff --git a/drivers/base/base.h b/drivers/base/base.h
index 21c1b96..7a6ae42 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -4,7 +4,9 @@
  * struct subsys_private - structure to hold the private to the driver core portions of the bus_type/class structure.
  *
  * @subsys - the struct kset that defines this subsystem
- * @devices_kset - the list of devices associated
+ * @devices_kset - the subsystem's 'devices' directory
+ * @interfaces - list of subsystem interfaces associated
+ * @mutex - protect the devices, and interfaces lists.
  *
  * @drivers_kset - the list of drivers associated
  * @klist_devices - the klist to iterate over the @devices_kset
@@ -14,10 +16,8 @@
  * @bus - pointer back to the struct bus_type that this structure is associated
  *        with.
  *
- * @class_interfaces - list of class_interfaces associated
  * @glue_dirs - "glue" directory to put in-between the parent device to
  *              avoid namespace conflicts
- * @class_mutex - mutex to protect the children, devices, and interfaces lists.
  * @class - pointer back to the struct class that this structure is associated
  *          with.
  *
@@ -28,6 +28,8 @@
 struct subsys_private {
 	struct kset subsys;
 	struct kset *devices_kset;
+	struct list_head interfaces;
+	struct mutex mutex;
 
 	struct kset *drivers_kset;
 	struct klist klist_devices;
@@ -36,9 +38,7 @@
 	unsigned int drivers_autoprobe:1;
 	struct bus_type *bus;
 
-	struct list_head class_interfaces;
 	struct kset glue_dirs;
-	struct mutex class_mutex;
 	struct class *class;
 };
 #define to_subsys_private(obj) container_of(obj, struct subsys_private, subsys.kobj)
@@ -94,7 +94,6 @@
 static inline int hypervisor_init(void) { return 0; }
 #endif
 extern int platform_bus_init(void);
-extern int system_bus_init(void);
 extern int cpu_dev_init(void);
 
 extern int bus_add_device(struct device *dev);
@@ -116,6 +115,7 @@
 
 extern int devres_release_all(struct device *dev);
 
+/* /sys/devices directory */
 extern struct kset *devices_kset;
 
 #if defined(CONFIG_MODULES) && defined(CONFIG_SYSFS)
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 000e7b2..99dc592 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -16,9 +16,14 @@
 #include <linux/slab.h>
 #include <linux/init.h>
 #include <linux/string.h>
+#include <linux/mutex.h>
 #include "base.h"
 #include "power/power.h"
 
+/* /sys/devices/system */
+/* FIXME: make static after drivers/base/sys.c is deleted */
+struct kset *system_kset;
+
 #define to_bus_attr(_attr) container_of(_attr, struct bus_attribute, attr)
 
 /*
@@ -360,6 +365,47 @@
 }
 EXPORT_SYMBOL_GPL(bus_find_device_by_name);
 
+/**
+ * subsys_find_device_by_id - find a device with a specific enumeration number
+ * @subsys: subsystem
+ * @id: index 'id' in struct device
+ * @hint: device to check first
+ *
+ * Check the hint's next object and if it is a match return it directly,
+ * otherwise, fall back to a full list search. Either way a reference for
+ * the returned object is taken.
+ */
+struct device *subsys_find_device_by_id(struct bus_type *subsys, unsigned int id,
+					struct device *hint)
+{
+	struct klist_iter i;
+	struct device *dev;
+
+	if (!subsys)
+		return NULL;
+
+	if (hint) {
+		klist_iter_init_node(&subsys->p->klist_devices, &i, &hint->p->knode_bus);
+		dev = next_device(&i);
+		if (dev && dev->id == id && get_device(dev)) {
+			klist_iter_exit(&i);
+			return dev;
+		}
+		klist_iter_exit(&i);
+	}
+
+	klist_iter_init_node(&subsys->p->klist_devices, &i, NULL);
+	while ((dev = next_device(&i))) {
+		if (dev->id == id && get_device(dev)) {
+			klist_iter_exit(&i);
+			return dev;
+		}
+	}
+	klist_iter_exit(&i);
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(subsys_find_device_by_id);
+
 static struct device_driver *next_driver(struct klist_iter *i)
 {
 	struct klist_node *n = klist_next(i);
@@ -487,38 +533,59 @@
 void bus_probe_device(struct device *dev)
 {
 	struct bus_type *bus = dev->bus;
+	struct subsys_interface *sif;
 	int ret;
 
-	if (bus && bus->p->drivers_autoprobe) {
+	if (!bus)
+		return;
+
+	if (bus->p->drivers_autoprobe) {
 		ret = device_attach(dev);
 		WARN_ON(ret < 0);
 	}
+
+	mutex_lock(&bus->p->mutex);
+	list_for_each_entry(sif, &bus->p->interfaces, node)
+		if (sif->add_dev)
+			sif->add_dev(dev, sif);
+	mutex_unlock(&bus->p->mutex);
 }
 
 /**
  * bus_remove_device - remove device from bus
  * @dev: device to be removed
  *
- * - Remove symlink from bus's directory.
+ * - Remove device from all interfaces.
+ * - Remove symlink from bus' directory.
  * - Delete device from bus's list.
  * - Detach from its driver.
  * - Drop reference taken in bus_add_device().
  */
 void bus_remove_device(struct device *dev)
 {
-	if (dev->bus) {
-		sysfs_remove_link(&dev->kobj, "subsystem");
-		sysfs_remove_link(&dev->bus->p->devices_kset->kobj,
-				  dev_name(dev));
-		device_remove_attrs(dev->bus, dev);
-		if (klist_node_attached(&dev->p->knode_bus))
-			klist_del(&dev->p->knode_bus);
+	struct bus_type *bus = dev->bus;
+	struct subsys_interface *sif;
 
-		pr_debug("bus: '%s': remove device %s\n",
-			 dev->bus->name, dev_name(dev));
-		device_release_driver(dev);
-		bus_put(dev->bus);
-	}
+	if (!bus)
+		return;
+
+	mutex_lock(&bus->p->mutex);
+	list_for_each_entry(sif, &bus->p->interfaces, node)
+		if (sif->remove_dev)
+			sif->remove_dev(dev, sif);
+	mutex_unlock(&bus->p->mutex);
+
+	sysfs_remove_link(&dev->kobj, "subsystem");
+	sysfs_remove_link(&dev->bus->p->devices_kset->kobj,
+			  dev_name(dev));
+	device_remove_attrs(dev->bus, dev);
+	if (klist_node_attached(&dev->p->knode_bus))
+		klist_del(&dev->p->knode_bus);
+
+	pr_debug("bus: '%s': remove device %s\n",
+		 dev->bus->name, dev_name(dev));
+	device_release_driver(dev);
+	bus_put(dev->bus);
 }
 
 static int driver_add_attrs(struct bus_type *bus, struct device_driver *drv)
@@ -847,14 +914,14 @@
 static BUS_ATTR(uevent, S_IWUSR, NULL, bus_uevent_store);
 
 /**
- * bus_register - register a bus with the system.
+ * __bus_register - register a driver-core subsystem
  * @bus: bus.
  *
  * Once we have that, we registered the bus with the kobject
  * infrastructure, then register the children subsystems it has:
- * the devices and drivers that belong to the bus.
+ * the devices and drivers that belong to the subsystem.
  */
-int bus_register(struct bus_type *bus)
+int __bus_register(struct bus_type *bus, struct lock_class_key *key)
 {
 	int retval;
 	struct subsys_private *priv;
@@ -898,6 +965,8 @@
 		goto bus_drivers_fail;
 	}
 
+	INIT_LIST_HEAD(&priv->interfaces);
+	__mutex_init(&priv->mutex, "subsys mutex", key);
 	klist_init(&priv->klist_devices, klist_devices_get, klist_devices_put);
 	klist_init(&priv->klist_drivers, NULL, NULL);
 
@@ -927,7 +996,7 @@
 	bus->p = NULL;
 	return retval;
 }
-EXPORT_SYMBOL_GPL(bus_register);
+EXPORT_SYMBOL_GPL(__bus_register);
 
 /**
  * bus_unregister - remove a bus from the system
@@ -939,6 +1008,8 @@
 void bus_unregister(struct bus_type *bus)
 {
 	pr_debug("bus: '%s': unregistering\n", bus->name);
+	if (bus->dev_root)
+		device_unregister(bus->dev_root);
 	bus_remove_attrs(bus);
 	remove_probe_files(bus);
 	kset_unregister(bus->p->drivers_kset);
@@ -1028,10 +1099,194 @@
 }
 EXPORT_SYMBOL_GPL(bus_sort_breadthfirst);
 
+/**
+ * subsys_dev_iter_init - initialize subsys device iterator
+ * @iter: subsys iterator to initialize
+ * @subsys: the subsys we wanna iterate over
+ * @start: the device to start iterating from, if any
+ * @type: device_type of the devices to iterate over, NULL for all
+ *
+ * Initialize subsys iterator @iter such that it iterates over devices
+ * of @subsys.  If @start is set, the list iteration will start there,
+ * otherwise if it is NULL, the iteration starts at the beginning of
+ * the list.
+ */
+void subsys_dev_iter_init(struct subsys_dev_iter *iter, struct bus_type *subsys,
+			  struct device *start, const struct device_type *type)
+{
+	struct klist_node *start_knode = NULL;
+
+	if (start)
+		start_knode = &start->p->knode_bus;
+	klist_iter_init_node(&subsys->p->klist_devices, &iter->ki, start_knode);
+	iter->type = type;
+}
+EXPORT_SYMBOL_GPL(subsys_dev_iter_init);
+
+/**
+ * subsys_dev_iter_next - iterate to the next device
+ * @iter: subsys iterator to proceed
+ *
+ * Proceed @iter to the next device and return it.  Returns NULL if
+ * iteration is complete.
+ *
+ * The returned device is referenced and won't be released till
+ * iterator is proceed to the next device or exited.  The caller is
+ * free to do whatever it wants to do with the device including
+ * calling back into subsys code.
+ */
+struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter)
+{
+	struct klist_node *knode;
+	struct device *dev;
+
+	for (;;) {
+		knode = klist_next(&iter->ki);
+		if (!knode)
+			return NULL;
+		dev = container_of(knode, struct device_private, knode_bus)->device;
+		if (!iter->type || iter->type == dev->type)
+			return dev;
+	}
+}
+EXPORT_SYMBOL_GPL(subsys_dev_iter_next);
+
+/**
+ * subsys_dev_iter_exit - finish iteration
+ * @iter: subsys iterator to finish
+ *
+ * Finish an iteration.  Always call this function after iteration is
+ * complete whether the iteration ran till the end or not.
+ */
+void subsys_dev_iter_exit(struct subsys_dev_iter *iter)
+{
+	klist_iter_exit(&iter->ki);
+}
+EXPORT_SYMBOL_GPL(subsys_dev_iter_exit);
+
+int subsys_interface_register(struct subsys_interface *sif)
+{
+	struct bus_type *subsys;
+	struct subsys_dev_iter iter;
+	struct device *dev;
+
+	if (!sif || !sif->subsys)
+		return -ENODEV;
+
+	subsys = bus_get(sif->subsys);
+	if (!subsys)
+		return -EINVAL;
+
+	mutex_lock(&subsys->p->mutex);
+	list_add_tail(&sif->node, &subsys->p->interfaces);
+	if (sif->add_dev) {
+		subsys_dev_iter_init(&iter, subsys, NULL, NULL);
+		while ((dev = subsys_dev_iter_next(&iter)))
+			sif->add_dev(dev, sif);
+		subsys_dev_iter_exit(&iter);
+	}
+	mutex_unlock(&subsys->p->mutex);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(subsys_interface_register);
+
+void subsys_interface_unregister(struct subsys_interface *sif)
+{
+	struct bus_type *subsys = sif->subsys;
+	struct subsys_dev_iter iter;
+	struct device *dev;
+
+	if (!sif)
+		return;
+
+	mutex_lock(&subsys->p->mutex);
+	list_del_init(&sif->node);
+	if (sif->remove_dev) {
+		subsys_dev_iter_init(&iter, subsys, NULL, NULL);
+		while ((dev = subsys_dev_iter_next(&iter)))
+			sif->remove_dev(dev, sif);
+		subsys_dev_iter_exit(&iter);
+	}
+	mutex_unlock(&subsys->p->mutex);
+
+	bus_put(subsys);
+}
+EXPORT_SYMBOL_GPL(subsys_interface_unregister);
+
+static void system_root_device_release(struct device *dev)
+{
+	kfree(dev);
+}
+/**
+ * subsys_system_register - register a subsystem at /sys/devices/system/
+ * @subsys - system subsystem
+ * @groups - default attributes for the root device
+ *
+ * All 'system' subsystems have a /sys/devices/system/<name> root device
+ * with the name of the subsystem. The root device can carry subsystem-
+ * wide attributes. All registered devices are below this single root
+ * device and are named after the subsystem with a simple enumeration
+ * number appended. The registered devices are not explicitely named;
+ * only 'id' in the device needs to be set.
+ *
+ * Do not use this interface for anything new, it exists for compatibility
+ * with bad ideas only. New subsystems should use plain subsystems; and
+ * add the subsystem-wide attributes should be added to the subsystem
+ * directory itself and not some create fake root-device placed in
+ * /sys/devices/system/<name>.
+ */
+int subsys_system_register(struct bus_type *subsys,
+			   const struct attribute_group **groups)
+{
+	struct device *dev;
+	int err;
+
+	err = bus_register(subsys);
+	if (err < 0)
+		return err;
+
+	dev = kzalloc(sizeof(struct device), GFP_KERNEL);
+	if (!dev) {
+		err = -ENOMEM;
+		goto err_dev;
+	}
+
+	err = dev_set_name(dev, "%s", subsys->name);
+	if (err < 0)
+		goto err_name;
+
+	dev->kobj.parent = &system_kset->kobj;
+	dev->groups = groups;
+	dev->release = system_root_device_release;
+
+	err = device_register(dev);
+	if (err < 0)
+		goto err_dev_reg;
+
+	subsys->dev_root = dev;
+	return 0;
+
+err_dev_reg:
+	put_device(dev);
+	dev = NULL;
+err_name:
+	kfree(dev);
+err_dev:
+	bus_unregister(subsys);
+	return err;
+}
+EXPORT_SYMBOL_GPL(subsys_system_register);
+
 int __init buses_init(void)
 {
 	bus_kset = kset_create_and_add("bus", &bus_uevent_ops, NULL);
 	if (!bus_kset)
 		return -ENOMEM;
+
+	system_kset = kset_create_and_add("system", NULL, &devices_kset->kobj);
+	if (!system_kset)
+		return -ENOMEM;
+
 	return 0;
 }
diff --git a/drivers/base/class.c b/drivers/base/class.c
index b80d91c..03243d4 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -184,9 +184,9 @@
 	if (!cp)
 		return -ENOMEM;
 	klist_init(&cp->klist_devices, klist_class_dev_get, klist_class_dev_put);
-	INIT_LIST_HEAD(&cp->class_interfaces);
+	INIT_LIST_HEAD(&cp->interfaces);
 	kset_init(&cp->glue_dirs);
-	__mutex_init(&cp->class_mutex, "struct class mutex", key);
+	__mutex_init(&cp->mutex, "subsys mutex", key);
 	error = kobject_set_name(&cp->subsys.kobj, "%s", cls->name);
 	if (error) {
 		kfree(cp);
@@ -460,15 +460,15 @@
 	if (!parent)
 		return -EINVAL;
 
-	mutex_lock(&parent->p->class_mutex);
-	list_add_tail(&class_intf->node, &parent->p->class_interfaces);
+	mutex_lock(&parent->p->mutex);
+	list_add_tail(&class_intf->node, &parent->p->interfaces);
 	if (class_intf->add_dev) {
 		class_dev_iter_init(&iter, parent, NULL, NULL);
 		while ((dev = class_dev_iter_next(&iter)))
 			class_intf->add_dev(dev, class_intf);
 		class_dev_iter_exit(&iter);
 	}
-	mutex_unlock(&parent->p->class_mutex);
+	mutex_unlock(&parent->p->mutex);
 
 	return 0;
 }
@@ -482,7 +482,7 @@
 	if (!parent)
 		return;
 
-	mutex_lock(&parent->p->class_mutex);
+	mutex_lock(&parent->p->mutex);
 	list_del_init(&class_intf->node);
 	if (class_intf->remove_dev) {
 		class_dev_iter_init(&iter, parent, NULL, NULL);
@@ -490,7 +490,7 @@
 			class_intf->remove_dev(dev, class_intf);
 		class_dev_iter_exit(&iter);
 	}
-	mutex_unlock(&parent->p->class_mutex);
+	mutex_unlock(&parent->p->mutex);
 
 	class_put(parent);
 }
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 919daa7..4a67cc0 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -118,6 +118,56 @@
 	.store	= dev_attr_store,
 };
 
+#define to_ext_attr(x) container_of(x, struct dev_ext_attribute, attr)
+
+ssize_t device_store_ulong(struct device *dev,
+			   struct device_attribute *attr,
+			   const char *buf, size_t size)
+{
+	struct dev_ext_attribute *ea = to_ext_attr(attr);
+	char *end;
+	unsigned long new = simple_strtoul(buf, &end, 0);
+	if (end == buf)
+		return -EINVAL;
+	*(unsigned long *)(ea->var) = new;
+	/* Always return full write size even if we didn't consume all */
+	return size;
+}
+EXPORT_SYMBOL_GPL(device_store_ulong);
+
+ssize_t device_show_ulong(struct device *dev,
+			  struct device_attribute *attr,
+			  char *buf)
+{
+	struct dev_ext_attribute *ea = to_ext_attr(attr);
+	return snprintf(buf, PAGE_SIZE, "%lx\n", *(unsigned long *)(ea->var));
+}
+EXPORT_SYMBOL_GPL(device_show_ulong);
+
+ssize_t device_store_int(struct device *dev,
+			 struct device_attribute *attr,
+			 const char *buf, size_t size)
+{
+	struct dev_ext_attribute *ea = to_ext_attr(attr);
+	char *end;
+	long new = simple_strtol(buf, &end, 0);
+	if (end == buf || new > INT_MAX || new < INT_MIN)
+		return -EINVAL;
+	*(int *)(ea->var) = new;
+	/* Always return full write size even if we didn't consume all */
+	return size;
+}
+EXPORT_SYMBOL_GPL(device_store_int);
+
+ssize_t device_show_int(struct device *dev,
+			struct device_attribute *attr,
+			char *buf)
+{
+	struct dev_ext_attribute *ea = to_ext_attr(attr);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", *(int *)(ea->var));
+}
+EXPORT_SYMBOL_GPL(device_show_int);
 
 /**
  *	device_release - free device structure.
@@ -198,7 +248,7 @@
 	if (MAJOR(dev->devt)) {
 		const char *tmp;
 		const char *name;
-		mode_t mode = 0;
+		umode_t mode = 0;
 
 		add_uevent_var(env, "MAJOR=%u", MAJOR(dev->devt));
 		add_uevent_var(env, "MINOR=%u", MINOR(dev->devt));
@@ -464,7 +514,7 @@
 static struct device_attribute devt_attr =
 	__ATTR(dev, S_IRUGO, show_dev, NULL);
 
-/* kset to create /sys/devices/  */
+/* /sys/devices/ */
 struct kset *devices_kset;
 
 /**
@@ -711,6 +761,10 @@
 		return k;
 	}
 
+	/* subsystems can specify a default root directory for their devices */
+	if (!parent && dev->bus && dev->bus->dev_root)
+		return &dev->bus->dev_root->kobj;
+
 	if (parent)
 		return &parent->kobj;
 	return NULL;
@@ -731,14 +785,6 @@
 	cleanup_glue_dir(dev, dev->kobj.parent);
 }
 
-static void setup_parent(struct device *dev, struct device *parent)
-{
-	struct kobject *kobj;
-	kobj = get_device_parent(dev, parent);
-	if (kobj)
-		dev->kobj.parent = kobj;
-}
-
 static int device_add_class_symlinks(struct device *dev)
 {
 	int error;
@@ -891,6 +937,7 @@
 int device_add(struct device *dev)
 {
 	struct device *parent = NULL;
+	struct kobject *kobj;
 	struct class_interface *class_intf;
 	int error = -EINVAL;
 
@@ -914,6 +961,10 @@
 		dev->init_name = NULL;
 	}
 
+	/* subsystems can specify simple device enumeration */
+	if (!dev_name(dev) && dev->bus && dev->bus->dev_name)
+		dev_set_name(dev, "%s%u", dev->bus->dev_name, dev->id);
+
 	if (!dev_name(dev)) {
 		error = -EINVAL;
 		goto name_error;
@@ -922,7 +973,9 @@
 	pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
 
 	parent = get_device(dev->parent);
-	setup_parent(dev, parent);
+	kobj = get_device_parent(dev, parent);
+	if (kobj)
+		dev->kobj.parent = kobj;
 
 	/* use parent numa_node */
 	if (parent)
@@ -982,17 +1035,17 @@
 			       &parent->p->klist_children);
 
 	if (dev->class) {
-		mutex_lock(&dev->class->p->class_mutex);
+		mutex_lock(&dev->class->p->mutex);
 		/* tie the class to the device */
 		klist_add_tail(&dev->knode_class,
 			       &dev->class->p->klist_devices);
 
 		/* notify any interfaces that the device is here */
 		list_for_each_entry(class_intf,
-				    &dev->class->p->class_interfaces, node)
+				    &dev->class->p->interfaces, node)
 			if (class_intf->add_dev)
 				class_intf->add_dev(dev, class_intf);
-		mutex_unlock(&dev->class->p->class_mutex);
+		mutex_unlock(&dev->class->p->mutex);
 	}
 done:
 	put_device(dev);
@@ -1107,15 +1160,15 @@
 	if (dev->class) {
 		device_remove_class_symlinks(dev);
 
-		mutex_lock(&dev->class->p->class_mutex);
+		mutex_lock(&dev->class->p->mutex);
 		/* notify any interfaces that the device is now gone */
 		list_for_each_entry(class_intf,
-				    &dev->class->p->class_interfaces, node)
+				    &dev->class->p->interfaces, node)
 			if (class_intf->remove_dev)
 				class_intf->remove_dev(dev, class_intf);
 		/* remove the device from the class list */
 		klist_del(&dev->knode_class);
-		mutex_unlock(&dev->class->p->class_mutex);
+		mutex_unlock(&dev->class->p->mutex);
 	}
 	device_remove_file(dev, &uevent_attr);
 	device_remove_attrs(dev);
@@ -1182,7 +1235,7 @@
  * freed by the caller.
  */
 const char *device_get_devnode(struct device *dev,
-			       mode_t *mode, const char **tmp)
+			       umode_t *mode, const char **tmp)
 {
 	char *s;
 
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 251acea..9a5578e 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -1,8 +1,7 @@
 /*
- * drivers/base/cpu.c - basic CPU class support
+ * CPU subsystem support
  */
 
-#include <linux/sysdev.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/sched.h>
@@ -14,40 +13,40 @@
 
 #include "base.h"
 
-static struct sysdev_class_attribute *cpu_sysdev_class_attrs[];
-
-struct sysdev_class cpu_sysdev_class = {
+struct bus_type cpu_subsys = {
 	.name = "cpu",
-	.attrs = cpu_sysdev_class_attrs,
+	.dev_name = "cpu",
 };
-EXPORT_SYMBOL(cpu_sysdev_class);
+EXPORT_SYMBOL_GPL(cpu_subsys);
 
-static DEFINE_PER_CPU(struct sys_device *, cpu_sys_devices);
+static DEFINE_PER_CPU(struct device *, cpu_sys_devices);
 
 #ifdef CONFIG_HOTPLUG_CPU
-static ssize_t show_online(struct sys_device *dev, struct sysdev_attribute *attr,
+static ssize_t show_online(struct device *dev,
+			   struct device_attribute *attr,
 			   char *buf)
 {
-	struct cpu *cpu = container_of(dev, struct cpu, sysdev);
+	struct cpu *cpu = container_of(dev, struct cpu, dev);
 
-	return sprintf(buf, "%u\n", !!cpu_online(cpu->sysdev.id));
+	return sprintf(buf, "%u\n", !!cpu_online(cpu->dev.id));
 }
 
-static ssize_t __ref store_online(struct sys_device *dev, struct sysdev_attribute *attr,
-				 const char *buf, size_t count)
+static ssize_t __ref store_online(struct device *dev,
+				  struct device_attribute *attr,
+				  const char *buf, size_t count)
 {
-	struct cpu *cpu = container_of(dev, struct cpu, sysdev);
+	struct cpu *cpu = container_of(dev, struct cpu, dev);
 	ssize_t ret;
 
 	cpu_hotplug_driver_lock();
 	switch (buf[0]) {
 	case '0':
-		ret = cpu_down(cpu->sysdev.id);
+		ret = cpu_down(cpu->dev.id);
 		if (!ret)
 			kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
 		break;
 	case '1':
-		ret = cpu_up(cpu->sysdev.id);
+		ret = cpu_up(cpu->dev.id);
 		if (!ret)
 			kobject_uevent(&dev->kobj, KOBJ_ONLINE);
 		break;
@@ -60,44 +59,44 @@
 		ret = count;
 	return ret;
 }
-static SYSDEV_ATTR(online, 0644, show_online, store_online);
+static DEVICE_ATTR(online, 0644, show_online, store_online);
 
 static void __cpuinit register_cpu_control(struct cpu *cpu)
 {
-	sysdev_create_file(&cpu->sysdev, &attr_online);
+	device_create_file(&cpu->dev, &dev_attr_online);
 }
 void unregister_cpu(struct cpu *cpu)
 {
-	int logical_cpu = cpu->sysdev.id;
+	int logical_cpu = cpu->dev.id;
 
 	unregister_cpu_under_node(logical_cpu, cpu_to_node(logical_cpu));
 
-	sysdev_remove_file(&cpu->sysdev, &attr_online);
+	device_remove_file(&cpu->dev, &dev_attr_online);
 
-	sysdev_unregister(&cpu->sysdev);
+	device_unregister(&cpu->dev);
 	per_cpu(cpu_sys_devices, logical_cpu) = NULL;
 	return;
 }
 
 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
-static ssize_t cpu_probe_store(struct sysdev_class *class,
-			       struct sysdev_class_attribute *attr,
+static ssize_t cpu_probe_store(struct device *dev,
+			       struct device_attribute *attr,
 			       const char *buf,
 			       size_t count)
 {
 	return arch_cpu_probe(buf, count);
 }
 
-static ssize_t cpu_release_store(struct sysdev_class *class,
-				 struct sysdev_class_attribute *attr,
+static ssize_t cpu_release_store(struct device *dev,
+				 struct device_attribute *attr,
 				 const char *buf,
 				 size_t count)
 {
 	return arch_cpu_release(buf, count);
 }
 
-static SYSDEV_CLASS_ATTR(probe, S_IWUSR, NULL, cpu_probe_store);
-static SYSDEV_CLASS_ATTR(release, S_IWUSR, NULL, cpu_release_store);
+static DEVICE_ATTR(probe, S_IWUSR, NULL, cpu_probe_store);
+static DEVICE_ATTR(release, S_IWUSR, NULL, cpu_release_store);
 #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
 
 #else /* ... !CONFIG_HOTPLUG_CPU */
@@ -109,15 +108,15 @@
 #ifdef CONFIG_KEXEC
 #include <linux/kexec.h>
 
-static ssize_t show_crash_notes(struct sys_device *dev, struct sysdev_attribute *attr,
+static ssize_t show_crash_notes(struct device *dev, struct device_attribute *attr,
 				char *buf)
 {
-	struct cpu *cpu = container_of(dev, struct cpu, sysdev);
+	struct cpu *cpu = container_of(dev, struct cpu, dev);
 	ssize_t rc;
 	unsigned long long addr;
 	int cpunum;
 
-	cpunum = cpu->sysdev.id;
+	cpunum = cpu->dev.id;
 
 	/*
 	 * Might be reading other cpu's data based on which cpu read thread
@@ -129,7 +128,7 @@
 	rc = sprintf(buf, "%Lx\n", addr);
 	return rc;
 }
-static SYSDEV_ATTR(crash_notes, 0400, show_crash_notes, NULL);
+static DEVICE_ATTR(crash_notes, 0400, show_crash_notes, NULL);
 #endif
 
 /*
@@ -137,12 +136,12 @@
  */
 
 struct cpu_attr {
-	struct sysdev_class_attribute attr;
+	struct device_attribute attr;
 	const struct cpumask *const * const map;
 };
 
-static ssize_t show_cpus_attr(struct sysdev_class *class,
-			      struct sysdev_class_attribute *attr,
+static ssize_t show_cpus_attr(struct device *dev,
+			      struct device_attribute *attr,
 			      char *buf)
 {
 	struct cpu_attr *ca = container_of(attr, struct cpu_attr, attr);
@@ -153,10 +152,10 @@
 	return n;
 }
 
-#define _CPU_ATTR(name, map)						\
-	{ _SYSDEV_CLASS_ATTR(name, 0444, show_cpus_attr, NULL), map }
+#define _CPU_ATTR(name, map) \
+	{ __ATTR(name, 0444, show_cpus_attr, NULL), map }
 
-/* Keep in sync with cpu_sysdev_class_attrs */
+/* Keep in sync with cpu_subsys_attrs */
 static struct cpu_attr cpu_attrs[] = {
 	_CPU_ATTR(online, &cpu_online_mask),
 	_CPU_ATTR(possible, &cpu_possible_mask),
@@ -166,19 +165,19 @@
 /*
  * Print values for NR_CPUS and offlined cpus
  */
-static ssize_t print_cpus_kernel_max(struct sysdev_class *class,
-				     struct sysdev_class_attribute *attr, char *buf)
+static ssize_t print_cpus_kernel_max(struct device *dev,
+				     struct device_attribute *attr, char *buf)
 {
 	int n = snprintf(buf, PAGE_SIZE-2, "%d\n", NR_CPUS - 1);
 	return n;
 }
-static SYSDEV_CLASS_ATTR(kernel_max, 0444, print_cpus_kernel_max, NULL);
+static DEVICE_ATTR(kernel_max, 0444, print_cpus_kernel_max, NULL);
 
 /* arch-optional setting to enable display of offline cpus >= nr_cpu_ids */
 unsigned int total_cpus;
 
-static ssize_t print_cpus_offline(struct sysdev_class *class,
-				  struct sysdev_class_attribute *attr, char *buf)
+static ssize_t print_cpus_offline(struct device *dev,
+				  struct device_attribute *attr, char *buf)
 {
 	int n = 0, len = PAGE_SIZE-2;
 	cpumask_var_t offline;
@@ -205,7 +204,7 @@
 	n += snprintf(&buf[n], len - n, "\n");
 	return n;
 }
-static SYSDEV_CLASS_ATTR(offline, 0444, print_cpus_offline, NULL);
+static DEVICE_ATTR(offline, 0444, print_cpus_offline, NULL);
 
 /*
  * register_cpu - Setup a sysfs device for a CPU.
@@ -218,57 +217,73 @@
 int __cpuinit register_cpu(struct cpu *cpu, int num)
 {
 	int error;
+
 	cpu->node_id = cpu_to_node(num);
-	cpu->sysdev.id = num;
-	cpu->sysdev.cls = &cpu_sysdev_class;
-
-	error = sysdev_register(&cpu->sysdev);
-
+	cpu->dev.id = num;
+	cpu->dev.bus = &cpu_subsys;
+	error = device_register(&cpu->dev);
 	if (!error && cpu->hotpluggable)
 		register_cpu_control(cpu);
 	if (!error)
-		per_cpu(cpu_sys_devices, num) = &cpu->sysdev;
+		per_cpu(cpu_sys_devices, num) = &cpu->dev;
 	if (!error)
 		register_cpu_under_node(num, cpu_to_node(num));
 
 #ifdef CONFIG_KEXEC
 	if (!error)
-		error = sysdev_create_file(&cpu->sysdev, &attr_crash_notes);
+		error = device_create_file(&cpu->dev, &dev_attr_crash_notes);
 #endif
 	return error;
 }
 
-struct sys_device *get_cpu_sysdev(unsigned cpu)
+struct device *get_cpu_device(unsigned cpu)
 {
 	if (cpu < nr_cpu_ids && cpu_possible(cpu))
 		return per_cpu(cpu_sys_devices, cpu);
 	else
 		return NULL;
 }
-EXPORT_SYMBOL_GPL(get_cpu_sysdev);
+EXPORT_SYMBOL_GPL(get_cpu_device);
+
+static struct attribute *cpu_root_attrs[] = {
+#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
+	&dev_attr_probe.attr,
+	&dev_attr_release.attr,
+#endif
+	&cpu_attrs[0].attr.attr,
+	&cpu_attrs[1].attr.attr,
+	&cpu_attrs[2].attr.attr,
+	&dev_attr_kernel_max.attr,
+	&dev_attr_offline.attr,
+	NULL
+};
+
+static struct attribute_group cpu_root_attr_group = {
+	.attrs = cpu_root_attrs,
+};
+
+static const struct attribute_group *cpu_root_attr_groups[] = {
+	&cpu_root_attr_group,
+	NULL,
+};
+
+bool cpu_is_hotpluggable(unsigned cpu)
+{
+	struct device *dev = get_cpu_device(cpu);
+	return dev && container_of(dev, struct cpu, dev)->hotpluggable;
+}
+EXPORT_SYMBOL_GPL(cpu_is_hotpluggable);
 
 int __init cpu_dev_init(void)
 {
 	int err;
 
-	err = sysdev_class_register(&cpu_sysdev_class);
-#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
-	if (!err)
-		err = sched_create_sysfs_power_savings_entries(&cpu_sysdev_class);
-#endif
+	err = subsys_system_register(&cpu_subsys, cpu_root_attr_groups);
+	if (err)
+		return err;
 
+#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
+	err = sched_create_sysfs_power_savings_entries(cpu_subsys.dev_root);
+#endif
 	return err;
 }
-
-static struct sysdev_class_attribute *cpu_sysdev_class_attrs[] = {
-#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
-	&attr_probe,
-	&attr_release,
-#endif
-	&cpu_attrs[0].attr,
-	&cpu_attrs[1].attr,
-	&cpu_attrs[2].attr,
-	&attr_kernel_max,
-	&attr_offline,
-	NULL
-};
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index 65cd748..524bf96 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -639,7 +639,7 @@
  * @dev: Device this memory belongs to
  * @p: Memory to free
  *
- * Free memory allocated with dev_kzalloc().
+ * Free memory allocated with devm_kzalloc().
  */
 void devm_kfree(struct device *dev, void *p)
 {
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index a4760e0..8493536 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -40,7 +40,7 @@
 	struct completion done;
 	int err;
 	const char *name;
-	mode_t mode;	/* 0 => delete */
+	umode_t mode;	/* 0 => delete */
 	struct device *dev;
 } *requests;
 
@@ -142,7 +142,7 @@
 	return req.err;
 }
 
-static int dev_mkdir(const char *name, mode_t mode)
+static int dev_mkdir(const char *name, umode_t mode)
 {
 	struct dentry *dentry;
 	struct path path;
@@ -189,7 +189,7 @@
 	return err;
 }
 
-static int handle_create(const char *nodename, mode_t mode, struct device *dev)
+static int handle_create(const char *nodename, umode_t mode, struct device *dev)
 {
 	struct dentry *dentry;
 	struct path path;
@@ -378,7 +378,7 @@
 
 static DECLARE_COMPLETION(setup_done);
 
-static int handle(const char *name, mode_t mode, struct device *dev)
+static int handle(const char *name, umode_t mode, struct device *dev)
 {
 	if (mode)
 		return handle_create(name, mode, dev);
@@ -413,10 +413,9 @@
 			}
 			spin_lock(&req_lock);
 		}
-		set_current_state(TASK_INTERRUPTIBLE);
+		__set_current_state(TASK_INTERRUPTIBLE);
 		spin_unlock(&req_lock);
 		schedule();
-		__set_current_state(TASK_RUNNING);
 	}
 	return 0;
 out:
diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c
new file mode 100644
index 0000000..e38ad24
--- /dev/null
+++ b/drivers/base/dma-buf.c
@@ -0,0 +1,291 @@
+/*
+ * Framework for buffer objects that can be shared across devices/subsystems.
+ *
+ * Copyright(C) 2011 Linaro Limited. All rights reserved.
+ * Author: Sumit Semwal <sumit.semwal@ti.com>
+ *
+ * Many thanks to linaro-mm-sig list, and specially
+ * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
+ * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
+ * refining of this idea.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/dma-buf.h>
+#include <linux/anon_inodes.h>
+#include <linux/export.h>
+
+static inline int is_dma_buf_file(struct file *);
+
+static int dma_buf_release(struct inode *inode, struct file *file)
+{
+	struct dma_buf *dmabuf;
+
+	if (!is_dma_buf_file(file))
+		return -EINVAL;
+
+	dmabuf = file->private_data;
+
+	dmabuf->ops->release(dmabuf);
+	kfree(dmabuf);
+	return 0;
+}
+
+static const struct file_operations dma_buf_fops = {
+	.release	= dma_buf_release,
+};
+
+/*
+ * is_dma_buf_file - Check if struct file* is associated with dma_buf
+ */
+static inline int is_dma_buf_file(struct file *file)
+{
+	return file->f_op == &dma_buf_fops;
+}
+
+/**
+ * dma_buf_export - Creates a new dma_buf, and associates an anon file
+ * with this buffer, so it can be exported.
+ * Also connect the allocator specific data and ops to the buffer.
+ *
+ * @priv:	[in]	Attach private data of allocator to this buffer
+ * @ops:	[in]	Attach allocator-defined dma buf ops to the new buffer.
+ * @size:	[in]	Size of the buffer
+ * @flags:	[in]	mode flags for the file.
+ *
+ * Returns, on success, a newly created dma_buf object, which wraps the
+ * supplied private data and operations for dma_buf_ops. On either missing
+ * ops, or error in allocating struct dma_buf, will return negative error.
+ *
+ */
+struct dma_buf *dma_buf_export(void *priv, struct dma_buf_ops *ops,
+				size_t size, int flags)
+{
+	struct dma_buf *dmabuf;
+	struct file *file;
+
+	if (WARN_ON(!priv || !ops
+			  || !ops->map_dma_buf
+			  || !ops->unmap_dma_buf
+			  || !ops->release)) {
+		return ERR_PTR(-EINVAL);
+	}
+
+	dmabuf = kzalloc(sizeof(struct dma_buf), GFP_KERNEL);
+	if (dmabuf == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	dmabuf->priv = priv;
+	dmabuf->ops = ops;
+	dmabuf->size = size;
+
+	file = anon_inode_getfile("dmabuf", &dma_buf_fops, dmabuf, flags);
+
+	dmabuf->file = file;
+
+	mutex_init(&dmabuf->lock);
+	INIT_LIST_HEAD(&dmabuf->attachments);
+
+	return dmabuf;
+}
+EXPORT_SYMBOL_GPL(dma_buf_export);
+
+
+/**
+ * dma_buf_fd - returns a file descriptor for the given dma_buf
+ * @dmabuf:	[in]	pointer to dma_buf for which fd is required.
+ *
+ * On success, returns an associated 'fd'. Else, returns error.
+ */
+int dma_buf_fd(struct dma_buf *dmabuf)
+{
+	int error, fd;
+
+	if (!dmabuf || !dmabuf->file)
+		return -EINVAL;
+
+	error = get_unused_fd();
+	if (error < 0)
+		return error;
+	fd = error;
+
+	fd_install(fd, dmabuf->file);
+
+	return fd;
+}
+EXPORT_SYMBOL_GPL(dma_buf_fd);
+
+/**
+ * dma_buf_get - returns the dma_buf structure related to an fd
+ * @fd:	[in]	fd associated with the dma_buf to be returned
+ *
+ * On success, returns the dma_buf structure associated with an fd; uses
+ * file's refcounting done by fget to increase refcount. returns ERR_PTR
+ * otherwise.
+ */
+struct dma_buf *dma_buf_get(int fd)
+{
+	struct file *file;
+
+	file = fget(fd);
+
+	if (!file)
+		return ERR_PTR(-EBADF);
+
+	if (!is_dma_buf_file(file)) {
+		fput(file);
+		return ERR_PTR(-EINVAL);
+	}
+
+	return file->private_data;
+}
+EXPORT_SYMBOL_GPL(dma_buf_get);
+
+/**
+ * dma_buf_put - decreases refcount of the buffer
+ * @dmabuf:	[in]	buffer to reduce refcount of
+ *
+ * Uses file's refcounting done implicitly by fput()
+ */
+void dma_buf_put(struct dma_buf *dmabuf)
+{
+	if (WARN_ON(!dmabuf || !dmabuf->file))
+		return;
+
+	fput(dmabuf->file);
+}
+EXPORT_SYMBOL_GPL(dma_buf_put);
+
+/**
+ * dma_buf_attach - Add the device to dma_buf's attachments list; optionally,
+ * calls attach() of dma_buf_ops to allow device-specific attach functionality
+ * @dmabuf:	[in]	buffer to attach device to.
+ * @dev:	[in]	device to be attached.
+ *
+ * Returns struct dma_buf_attachment * for this attachment; may return negative
+ * error codes.
+ *
+ */
+struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
+					  struct device *dev)
+{
+	struct dma_buf_attachment *attach;
+	int ret;
+
+	if (WARN_ON(!dmabuf || !dev || !dmabuf->ops))
+		return ERR_PTR(-EINVAL);
+
+	attach = kzalloc(sizeof(struct dma_buf_attachment), GFP_KERNEL);
+	if (attach == NULL)
+		goto err_alloc;
+
+	mutex_lock(&dmabuf->lock);
+
+	attach->dev = dev;
+	attach->dmabuf = dmabuf;
+	if (dmabuf->ops->attach) {
+		ret = dmabuf->ops->attach(dmabuf, dev, attach);
+		if (ret)
+			goto err_attach;
+	}
+	list_add(&attach->node, &dmabuf->attachments);
+
+	mutex_unlock(&dmabuf->lock);
+	return attach;
+
+err_alloc:
+	return ERR_PTR(-ENOMEM);
+err_attach:
+	kfree(attach);
+	mutex_unlock(&dmabuf->lock);
+	return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(dma_buf_attach);
+
+/**
+ * dma_buf_detach - Remove the given attachment from dmabuf's attachments list;
+ * optionally calls detach() of dma_buf_ops for device-specific detach
+ * @dmabuf:	[in]	buffer to detach from.
+ * @attach:	[in]	attachment to be detached; is free'd after this call.
+ *
+ */
+void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
+{
+	if (WARN_ON(!dmabuf || !attach || !dmabuf->ops))
+		return;
+
+	mutex_lock(&dmabuf->lock);
+	list_del(&attach->node);
+	if (dmabuf->ops->detach)
+		dmabuf->ops->detach(dmabuf, attach);
+
+	mutex_unlock(&dmabuf->lock);
+	kfree(attach);
+}
+EXPORT_SYMBOL_GPL(dma_buf_detach);
+
+/**
+ * dma_buf_map_attachment - Returns the scatterlist table of the attachment;
+ * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
+ * dma_buf_ops.
+ * @attach:	[in]	attachment whose scatterlist is to be returned
+ * @direction:	[in]	direction of DMA transfer
+ *
+ * Returns sg_table containing the scatterlist to be returned; may return NULL
+ * or ERR_PTR.
+ *
+ */
+struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
+					enum dma_data_direction direction)
+{
+	struct sg_table *sg_table = ERR_PTR(-EINVAL);
+
+	might_sleep();
+
+	if (WARN_ON(!attach || !attach->dmabuf || !attach->dmabuf->ops))
+		return ERR_PTR(-EINVAL);
+
+	mutex_lock(&attach->dmabuf->lock);
+	if (attach->dmabuf->ops->map_dma_buf)
+		sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
+	mutex_unlock(&attach->dmabuf->lock);
+
+	return sg_table;
+}
+EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
+
+/**
+ * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
+ * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
+ * dma_buf_ops.
+ * @attach:	[in]	attachment to unmap buffer from
+ * @sg_table:	[in]	scatterlist info of the buffer to unmap
+ *
+ */
+void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
+				struct sg_table *sg_table)
+{
+	if (WARN_ON(!attach || !attach->dmabuf || !sg_table
+			    || !attach->dmabuf->ops))
+		return;
+
+	mutex_lock(&attach->dmabuf->lock);
+	if (attach->dmabuf->ops->unmap_dma_buf)
+		attach->dmabuf->ops->unmap_dma_buf(attach, sg_table);
+	mutex_unlock(&attach->dmabuf->lock);
+
+}
+EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 06ed6b4..26ab358 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -226,13 +226,13 @@
 	int loading = simple_strtol(buf, NULL, 10);
 	int i;
 
+	mutex_lock(&fw_lock);
+
+	if (!fw_priv->fw)
+		goto out;
+
 	switch (loading) {
 	case 1:
-		mutex_lock(&fw_lock);
-		if (!fw_priv->fw) {
-			mutex_unlock(&fw_lock);
-			break;
-		}
 		firmware_free_data(fw_priv->fw);
 		memset(fw_priv->fw, 0, sizeof(struct firmware));
 		/* If the pages are not owned by 'struct firmware' */
@@ -243,7 +243,6 @@
 		fw_priv->page_array_size = 0;
 		fw_priv->nr_pages = 0;
 		set_bit(FW_STATUS_LOADING, &fw_priv->status);
-		mutex_unlock(&fw_lock);
 		break;
 	case 0:
 		if (test_bit(FW_STATUS_LOADING, &fw_priv->status)) {
@@ -274,7 +273,8 @@
 		fw_load_abort(fw_priv);
 		break;
 	}
-
+out:
+	mutex_unlock(&fw_lock);
 	return count;
 }
 
@@ -534,6 +534,8 @@
 		return 0;
 	}
 
+	read_lock_usermodehelper();
+
 	if (WARN_ON(usermodehelper_is_disabled())) {
 		dev_err(device, "firmware: %s will not be loaded\n", name);
 		retval = -EBUSY;
@@ -572,6 +574,8 @@
 	fw_destroy_instance(fw_priv);
 
 out:
+	read_unlock_usermodehelper();
+
 	if (retval) {
 		release_firmware(firmware);
 		*firmware_p = NULL;
diff --git a/drivers/base/init.c b/drivers/base/init.c
index c8a934e..c16f0b8 100644
--- a/drivers/base/init.c
+++ b/drivers/base/init.c
@@ -31,7 +31,6 @@
 	 * core core pieces.
 	 */
 	platform_bus_init();
-	system_bus_init();
 	cpu_dev_init();
 	memory_dev_init();
 }
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 8272d92..f17e3ea 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -1,5 +1,5 @@
 /*
- * drivers/base/memory.c - basic Memory class support
+ * Memory subsystem support
  *
  * Written by Matt Tolentino <matthew.e.tolentino@intel.com>
  *            Dave Hansen <haveblue@us.ibm.com>
@@ -10,7 +10,6 @@
  * SPARSEMEM should be contained here, or in mm/memory_hotplug.c.
  */
 
-#include <linux/sysdev.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/topology.h>
@@ -38,26 +37,9 @@
 	return section_nr / sections_per_block;
 }
 
-static struct sysdev_class memory_sysdev_class = {
+static struct bus_type memory_subsys = {
 	.name = MEMORY_CLASS_NAME,
-};
-
-static const char *memory_uevent_name(struct kset *kset, struct kobject *kobj)
-{
-	return MEMORY_CLASS_NAME;
-}
-
-static int memory_uevent(struct kset *kset, struct kobject *obj,
-			struct kobj_uevent_env *env)
-{
-	int retval = 0;
-
-	return retval;
-}
-
-static const struct kset_uevent_ops memory_uevent_ops = {
-	.name		= memory_uevent_name,
-	.uevent		= memory_uevent,
+	.dev_name = MEMORY_CLASS_NAME,
 };
 
 static BLOCKING_NOTIFIER_HEAD(memory_chain);
@@ -96,21 +78,21 @@
 {
 	int error;
 
-	memory->sysdev.cls = &memory_sysdev_class;
-	memory->sysdev.id = memory->start_section_nr / sections_per_block;
+	memory->dev.bus = &memory_subsys;
+	memory->dev.id = memory->start_section_nr / sections_per_block;
 
-	error = sysdev_register(&memory->sysdev);
+	error = device_register(&memory->dev);
 	return error;
 }
 
 static void
 unregister_memory(struct memory_block *memory)
 {
-	BUG_ON(memory->sysdev.cls != &memory_sysdev_class);
+	BUG_ON(memory->dev.bus != &memory_subsys);
 
 	/* drop the ref. we got in remove_memory_block() */
-	kobject_put(&memory->sysdev.kobj);
-	sysdev_unregister(&memory->sysdev);
+	kobject_put(&memory->dev.kobj);
+	device_unregister(&memory->dev);
 }
 
 unsigned long __weak memory_block_size_bytes(void)
@@ -138,22 +120,22 @@
  * uses.
  */
 
-static ssize_t show_mem_start_phys_index(struct sys_device *dev,
-			struct sysdev_attribute *attr, char *buf)
+static ssize_t show_mem_start_phys_index(struct device *dev,
+			struct device_attribute *attr, char *buf)
 {
 	struct memory_block *mem =
-		container_of(dev, struct memory_block, sysdev);
+		container_of(dev, struct memory_block, dev);
 	unsigned long phys_index;
 
 	phys_index = mem->start_section_nr / sections_per_block;
 	return sprintf(buf, "%08lx\n", phys_index);
 }
 
-static ssize_t show_mem_end_phys_index(struct sys_device *dev,
-			struct sysdev_attribute *attr, char *buf)
+static ssize_t show_mem_end_phys_index(struct device *dev,
+			struct device_attribute *attr, char *buf)
 {
 	struct memory_block *mem =
-		container_of(dev, struct memory_block, sysdev);
+		container_of(dev, struct memory_block, dev);
 	unsigned long phys_index;
 
 	phys_index = mem->end_section_nr / sections_per_block;
@@ -163,13 +145,13 @@
 /*
  * Show whether the section of memory is likely to be hot-removable
  */
-static ssize_t show_mem_removable(struct sys_device *dev,
-			struct sysdev_attribute *attr, char *buf)
+static ssize_t show_mem_removable(struct device *dev,
+			struct device_attribute *attr, char *buf)
 {
 	unsigned long i, pfn;
 	int ret = 1;
 	struct memory_block *mem =
-		container_of(dev, struct memory_block, sysdev);
+		container_of(dev, struct memory_block, dev);
 
 	for (i = 0; i < sections_per_block; i++) {
 		pfn = section_nr_to_pfn(mem->start_section_nr + i);
@@ -182,11 +164,11 @@
 /*
  * online, offline, going offline, etc.
  */
-static ssize_t show_mem_state(struct sys_device *dev,
-			struct sysdev_attribute *attr, char *buf)
+static ssize_t show_mem_state(struct device *dev,
+			struct device_attribute *attr, char *buf)
 {
 	struct memory_block *mem =
-		container_of(dev, struct memory_block, sysdev);
+		container_of(dev, struct memory_block, dev);
 	ssize_t len = 0;
 
 	/*
@@ -324,13 +306,13 @@
 }
 
 static ssize_t
-store_mem_state(struct sys_device *dev,
-		struct sysdev_attribute *attr, const char *buf, size_t count)
+store_mem_state(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
 {
 	struct memory_block *mem;
 	int ret = -EINVAL;
 
-	mem = container_of(dev, struct memory_block, sysdev);
+	mem = container_of(dev, struct memory_block, dev);
 
 	if (!strncmp(buf, "online", min((int)count, 6)))
 		ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE);
@@ -351,41 +333,41 @@
  * s.t. if I offline all of these sections I can then
  * remove the physical device?
  */
-static ssize_t show_phys_device(struct sys_device *dev,
-				struct sysdev_attribute *attr, char *buf)
+static ssize_t show_phys_device(struct device *dev,
+				struct device_attribute *attr, char *buf)
 {
 	struct memory_block *mem =
-		container_of(dev, struct memory_block, sysdev);
+		container_of(dev, struct memory_block, dev);
 	return sprintf(buf, "%d\n", mem->phys_device);
 }
 
-static SYSDEV_ATTR(phys_index, 0444, show_mem_start_phys_index, NULL);
-static SYSDEV_ATTR(end_phys_index, 0444, show_mem_end_phys_index, NULL);
-static SYSDEV_ATTR(state, 0644, show_mem_state, store_mem_state);
-static SYSDEV_ATTR(phys_device, 0444, show_phys_device, NULL);
-static SYSDEV_ATTR(removable, 0444, show_mem_removable, NULL);
+static DEVICE_ATTR(phys_index, 0444, show_mem_start_phys_index, NULL);
+static DEVICE_ATTR(end_phys_index, 0444, show_mem_end_phys_index, NULL);
+static DEVICE_ATTR(state, 0644, show_mem_state, store_mem_state);
+static DEVICE_ATTR(phys_device, 0444, show_phys_device, NULL);
+static DEVICE_ATTR(removable, 0444, show_mem_removable, NULL);
 
 #define mem_create_simple_file(mem, attr_name)	\
-	sysdev_create_file(&mem->sysdev, &attr_##attr_name)
+	device_create_file(&mem->dev, &dev_attr_##attr_name)
 #define mem_remove_simple_file(mem, attr_name)	\
-	sysdev_remove_file(&mem->sysdev, &attr_##attr_name)
+	device_remove_file(&mem->dev, &dev_attr_##attr_name)
 
 /*
  * Block size attribute stuff
  */
 static ssize_t
-print_block_size(struct sysdev_class *class, struct sysdev_class_attribute *attr,
+print_block_size(struct device *dev, struct device_attribute *attr,
 		 char *buf)
 {
 	return sprintf(buf, "%lx\n", get_memory_block_size());
 }
 
-static SYSDEV_CLASS_ATTR(block_size_bytes, 0444, print_block_size, NULL);
+static DEVICE_ATTR(block_size_bytes, 0444, print_block_size, NULL);
 
 static int block_size_init(void)
 {
-	return sysfs_create_file(&memory_sysdev_class.kset.kobj,
-				&attr_block_size_bytes.attr);
+	return device_create_file(memory_subsys.dev_root,
+				  &dev_attr_block_size_bytes);
 }
 
 /*
@@ -396,7 +378,7 @@
  */
 #ifdef CONFIG_ARCH_MEMORY_PROBE
 static ssize_t
-memory_probe_store(struct class *class, struct class_attribute *attr,
+memory_probe_store(struct device *dev, struct device_attribute *attr,
 		   const char *buf, size_t count)
 {
 	u64 phys_addr;
@@ -423,12 +405,11 @@
 out:
 	return ret;
 }
-static CLASS_ATTR(probe, S_IWUSR, NULL, memory_probe_store);
+static DEVICE_ATTR(probe, S_IWUSR, NULL, memory_probe_store);
 
 static int memory_probe_init(void)
 {
-	return sysfs_create_file(&memory_sysdev_class.kset.kobj,
-				&class_attr_probe.attr);
+	return device_create_file(memory_subsys.dev_root, &dev_attr_probe);
 }
 #else
 static inline int memory_probe_init(void)
@@ -444,8 +425,8 @@
 
 /* Soft offline a page */
 static ssize_t
-store_soft_offline_page(struct class *class,
-			struct class_attribute *attr,
+store_soft_offline_page(struct device *dev,
+			struct device_attribute *attr,
 			const char *buf, size_t count)
 {
 	int ret;
@@ -463,8 +444,8 @@
 
 /* Forcibly offline a page, including killing processes. */
 static ssize_t
-store_hard_offline_page(struct class *class,
-			struct class_attribute *attr,
+store_hard_offline_page(struct device *dev,
+			struct device_attribute *attr,
 			const char *buf, size_t count)
 {
 	int ret;
@@ -478,18 +459,18 @@
 	return ret ? ret : count;
 }
 
-static CLASS_ATTR(soft_offline_page, 0644, NULL, store_soft_offline_page);
-static CLASS_ATTR(hard_offline_page, 0644, NULL, store_hard_offline_page);
+static DEVICE_ATTR(soft_offline_page, 0644, NULL, store_soft_offline_page);
+static DEVICE_ATTR(hard_offline_page, 0644, NULL, store_hard_offline_page);
 
 static __init int memory_fail_init(void)
 {
 	int err;
 
-	err = sysfs_create_file(&memory_sysdev_class.kset.kobj,
-				&class_attr_soft_offline_page.attr);
+	err = device_create_file(memory_subsys.dev_root,
+				&dev_attr_soft_offline_page);
 	if (!err)
-		err = sysfs_create_file(&memory_sysdev_class.kset.kobj,
-				&class_attr_hard_offline_page.attr);
+		err = device_create_file(memory_subsys.dev_root,
+				&dev_attr_hard_offline_page);
 	return err;
 }
 #else
@@ -509,31 +490,23 @@
 	return 0;
 }
 
+/*
+ * A reference for the returned object is held and the reference for the
+ * hinted object is released.
+ */
 struct memory_block *find_memory_block_hinted(struct mem_section *section,
 					      struct memory_block *hint)
 {
-	struct kobject *kobj;
-	struct sys_device *sysdev;
-	struct memory_block *mem;
-	char name[sizeof(MEMORY_CLASS_NAME) + 9 + 1];
 	int block_id = base_memory_block_id(__section_nr(section));
+	struct device *hintdev = hint ? &hint->dev : NULL;
+	struct device *dev;
 
-	kobj = hint ? &hint->sysdev.kobj : NULL;
-
-	/*
-	 * This only works because we know that section == sysdev->id
-	 * slightly redundant with sysdev_register()
-	 */
-	sprintf(&name[0], "%s%d", MEMORY_CLASS_NAME, block_id);
-
-	kobj = kset_find_obj_hinted(&memory_sysdev_class.kset, name, kobj);
-	if (!kobj)
+	dev = subsys_find_device_by_id(&memory_subsys, block_id, hintdev);
+	if (hint)
+		put_device(&hint->dev);
+	if (!dev)
 		return NULL;
-
-	sysdev = container_of(kobj, struct sys_device, kobj);
-	mem = container_of(sysdev, struct memory_block, sysdev);
-
-	return mem;
+	return container_of(dev, struct memory_block, dev);
 }
 
 /*
@@ -542,7 +515,7 @@
  * this gets to be a real problem, we can always use a radix
  * tree or something here.
  *
- * This could be made generic for all sysdev classes.
+ * This could be made generic for all device subsystems.
  */
 struct memory_block *find_memory_block(struct mem_section *section)
 {
@@ -598,7 +571,7 @@
 	mem = find_memory_block(section);
 	if (mem) {
 		mem->section_count++;
-		kobject_put(&mem->sysdev.kobj);
+		kobject_put(&mem->dev.kobj);
 	} else
 		ret = init_memory_block(&mem, section, state);
 
@@ -631,7 +604,7 @@
 		unregister_memory(mem);
 		kfree(mem);
 	} else
-		kobject_put(&mem->sysdev.kobj);
+		kobject_put(&mem->dev.kobj);
 
 	mutex_unlock(&mem_sysfs_mutex);
 	return 0;
@@ -664,8 +637,7 @@
 	int err;
 	unsigned long block_sz;
 
-	memory_sysdev_class.kset.uevent_ops = &memory_uevent_ops;
-	ret = sysdev_class_register(&memory_sysdev_class);
+	ret = subsys_system_register(&memory_subsys, NULL);
 	if (ret)
 		goto out;
 
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 5693ece..44f427a 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -1,8 +1,7 @@
 /*
- * drivers/base/node.c - basic Node class support
+ * Basic Node interface support
  */
 
-#include <linux/sysdev.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/mm.h>
@@ -19,18 +18,16 @@
 #include <linux/swap.h>
 #include <linux/slab.h>
 
-static struct sysdev_class_attribute *node_state_attrs[];
-
-static struct sysdev_class node_class = {
+static struct bus_type node_subsys = {
 	.name = "node",
-	.attrs = node_state_attrs,
+	.dev_name = "node",
 };
 
 
-static ssize_t node_read_cpumap(struct sys_device *dev, int type, char *buf)
+static ssize_t node_read_cpumap(struct device *dev, int type, char *buf)
 {
 	struct node *node_dev = to_node(dev);
-	const struct cpumask *mask = cpumask_of_node(node_dev->sysdev.id);
+	const struct cpumask *mask = cpumask_of_node(node_dev->dev.id);
 	int len;
 
 	/* 2008/04/07: buf currently PAGE_SIZE, need 9 chars per 32 bits. */
@@ -44,23 +41,23 @@
 	return len;
 }
 
-static inline ssize_t node_read_cpumask(struct sys_device *dev,
-				struct sysdev_attribute *attr, char *buf)
+static inline ssize_t node_read_cpumask(struct device *dev,
+				struct device_attribute *attr, char *buf)
 {
 	return node_read_cpumap(dev, 0, buf);
 }
-static inline ssize_t node_read_cpulist(struct sys_device *dev,
-				struct sysdev_attribute *attr, char *buf)
+static inline ssize_t node_read_cpulist(struct device *dev,
+				struct device_attribute *attr, char *buf)
 {
 	return node_read_cpumap(dev, 1, buf);
 }
 
-static SYSDEV_ATTR(cpumap,  S_IRUGO, node_read_cpumask, NULL);
-static SYSDEV_ATTR(cpulist, S_IRUGO, node_read_cpulist, NULL);
+static DEVICE_ATTR(cpumap,  S_IRUGO, node_read_cpumask, NULL);
+static DEVICE_ATTR(cpulist, S_IRUGO, node_read_cpulist, NULL);
 
 #define K(x) ((x) << (PAGE_SHIFT - 10))
-static ssize_t node_read_meminfo(struct sys_device * dev,
-			struct sysdev_attribute *attr, char * buf)
+static ssize_t node_read_meminfo(struct device *dev,
+			struct device_attribute *attr, char *buf)
 {
 	int n;
 	int nid = dev->id;
@@ -157,10 +154,10 @@
 }
 
 #undef K
-static SYSDEV_ATTR(meminfo, S_IRUGO, node_read_meminfo, NULL);
+static DEVICE_ATTR(meminfo, S_IRUGO, node_read_meminfo, NULL);
 
-static ssize_t node_read_numastat(struct sys_device * dev,
-				struct sysdev_attribute *attr, char * buf)
+static ssize_t node_read_numastat(struct device *dev,
+				struct device_attribute *attr, char *buf)
 {
 	return sprintf(buf,
 		       "numa_hit %lu\n"
@@ -176,10 +173,10 @@
 		       node_page_state(dev->id, NUMA_LOCAL),
 		       node_page_state(dev->id, NUMA_OTHER));
 }
-static SYSDEV_ATTR(numastat, S_IRUGO, node_read_numastat, NULL);
+static DEVICE_ATTR(numastat, S_IRUGO, node_read_numastat, NULL);
 
-static ssize_t node_read_vmstat(struct sys_device *dev,
-				struct sysdev_attribute *attr, char *buf)
+static ssize_t node_read_vmstat(struct device *dev,
+				struct device_attribute *attr, char *buf)
 {
 	int nid = dev->id;
 	int i;
@@ -191,10 +188,10 @@
 
 	return n;
 }
-static SYSDEV_ATTR(vmstat, S_IRUGO, node_read_vmstat, NULL);
+static DEVICE_ATTR(vmstat, S_IRUGO, node_read_vmstat, NULL);
 
-static ssize_t node_read_distance(struct sys_device * dev,
-			struct sysdev_attribute *attr, char * buf)
+static ssize_t node_read_distance(struct device *dev,
+			struct device_attribute *attr, char * buf)
 {
 	int nid = dev->id;
 	int len = 0;
@@ -212,7 +209,7 @@
 	len += sprintf(buf + len, "\n");
 	return len;
 }
-static SYSDEV_ATTR(distance, S_IRUGO, node_read_distance, NULL);
+static DEVICE_ATTR(distance, S_IRUGO, node_read_distance, NULL);
 
 #ifdef CONFIG_HUGETLBFS
 /*
@@ -230,7 +227,7 @@
 static inline bool hugetlb_register_node(struct node *node)
 {
 	if (__hugetlb_register_node &&
-			node_state(node->sysdev.id, N_HIGH_MEMORY)) {
+			node_state(node->dev.id, N_HIGH_MEMORY)) {
 		__hugetlb_register_node(node);
 		return true;
 	}
@@ -266,17 +263,17 @@
 {
 	int error;
 
-	node->sysdev.id = num;
-	node->sysdev.cls = &node_class;
-	error = sysdev_register(&node->sysdev);
+	node->dev.id = num;
+	node->dev.bus = &node_subsys;
+	error = device_register(&node->dev);
 
 	if (!error){
-		sysdev_create_file(&node->sysdev, &attr_cpumap);
-		sysdev_create_file(&node->sysdev, &attr_cpulist);
-		sysdev_create_file(&node->sysdev, &attr_meminfo);
-		sysdev_create_file(&node->sysdev, &attr_numastat);
-		sysdev_create_file(&node->sysdev, &attr_distance);
-		sysdev_create_file(&node->sysdev, &attr_vmstat);
+		device_create_file(&node->dev, &dev_attr_cpumap);
+		device_create_file(&node->dev, &dev_attr_cpulist);
+		device_create_file(&node->dev, &dev_attr_meminfo);
+		device_create_file(&node->dev, &dev_attr_numastat);
+		device_create_file(&node->dev, &dev_attr_distance);
+		device_create_file(&node->dev, &dev_attr_vmstat);
 
 		scan_unevictable_register_node(node);
 
@@ -296,17 +293,17 @@
  */
 void unregister_node(struct node *node)
 {
-	sysdev_remove_file(&node->sysdev, &attr_cpumap);
-	sysdev_remove_file(&node->sysdev, &attr_cpulist);
-	sysdev_remove_file(&node->sysdev, &attr_meminfo);
-	sysdev_remove_file(&node->sysdev, &attr_numastat);
-	sysdev_remove_file(&node->sysdev, &attr_distance);
-	sysdev_remove_file(&node->sysdev, &attr_vmstat);
+	device_remove_file(&node->dev, &dev_attr_cpumap);
+	device_remove_file(&node->dev, &dev_attr_cpulist);
+	device_remove_file(&node->dev, &dev_attr_meminfo);
+	device_remove_file(&node->dev, &dev_attr_numastat);
+	device_remove_file(&node->dev, &dev_attr_distance);
+	device_remove_file(&node->dev, &dev_attr_vmstat);
 
 	scan_unevictable_unregister_node(node);
 	hugetlb_unregister_node(node);		/* no-op, if memoryless node */
 
-	sysdev_unregister(&node->sysdev);
+	device_unregister(&node->dev);
 }
 
 struct node node_devices[MAX_NUMNODES];
@@ -317,41 +314,41 @@
 int register_cpu_under_node(unsigned int cpu, unsigned int nid)
 {
 	int ret;
-	struct sys_device *obj;
+	struct device *obj;
 
 	if (!node_online(nid))
 		return 0;
 
-	obj = get_cpu_sysdev(cpu);
+	obj = get_cpu_device(cpu);
 	if (!obj)
 		return 0;
 
-	ret = sysfs_create_link(&node_devices[nid].sysdev.kobj,
+	ret = sysfs_create_link(&node_devices[nid].dev.kobj,
 				&obj->kobj,
 				kobject_name(&obj->kobj));
 	if (ret)
 		return ret;
 
 	return sysfs_create_link(&obj->kobj,
-				 &node_devices[nid].sysdev.kobj,
-				 kobject_name(&node_devices[nid].sysdev.kobj));
+				 &node_devices[nid].dev.kobj,
+				 kobject_name(&node_devices[nid].dev.kobj));
 }
 
 int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
 {
-	struct sys_device *obj;
+	struct device *obj;
 
 	if (!node_online(nid))
 		return 0;
 
-	obj = get_cpu_sysdev(cpu);
+	obj = get_cpu_device(cpu);
 	if (!obj)
 		return 0;
 
-	sysfs_remove_link(&node_devices[nid].sysdev.kobj,
+	sysfs_remove_link(&node_devices[nid].dev.kobj,
 			  kobject_name(&obj->kobj));
 	sysfs_remove_link(&obj->kobj,
-			  kobject_name(&node_devices[nid].sysdev.kobj));
+			  kobject_name(&node_devices[nid].dev.kobj));
 
 	return 0;
 }
@@ -393,15 +390,15 @@
 			continue;
 		if (page_nid != nid)
 			continue;
-		ret = sysfs_create_link_nowarn(&node_devices[nid].sysdev.kobj,
-					&mem_blk->sysdev.kobj,
-					kobject_name(&mem_blk->sysdev.kobj));
+		ret = sysfs_create_link_nowarn(&node_devices[nid].dev.kobj,
+					&mem_blk->dev.kobj,
+					kobject_name(&mem_blk->dev.kobj));
 		if (ret)
 			return ret;
 
-		return sysfs_create_link_nowarn(&mem_blk->sysdev.kobj,
-				&node_devices[nid].sysdev.kobj,
-				kobject_name(&node_devices[nid].sysdev.kobj));
+		return sysfs_create_link_nowarn(&mem_blk->dev.kobj,
+				&node_devices[nid].dev.kobj,
+				kobject_name(&node_devices[nid].dev.kobj));
 	}
 	/* mem section does not span the specified node */
 	return 0;
@@ -434,10 +431,10 @@
 			continue;
 		if (node_test_and_set(nid, *unlinked_nodes))
 			continue;
-		sysfs_remove_link(&node_devices[nid].sysdev.kobj,
-			 kobject_name(&mem_blk->sysdev.kobj));
-		sysfs_remove_link(&mem_blk->sysdev.kobj,
-			 kobject_name(&node_devices[nid].sysdev.kobj));
+		sysfs_remove_link(&node_devices[nid].dev.kobj,
+			 kobject_name(&mem_blk->dev.kobj));
+		sysfs_remove_link(&mem_blk->dev.kobj,
+			 kobject_name(&node_devices[nid].dev.kobj));
 	}
 	NODEMASK_FREE(unlinked_nodes);
 	return 0;
@@ -468,7 +465,7 @@
 	}
 
 	if (mem_blk)
-		kobject_put(&mem_blk->sysdev.kobj);
+		kobject_put(&mem_blk->dev.kobj);
 	return err;
 }
 
@@ -596,19 +593,19 @@
 }
 
 struct node_attr {
-	struct sysdev_class_attribute attr;
+	struct device_attribute attr;
 	enum node_states state;
 };
 
-static ssize_t show_node_state(struct sysdev_class *class,
-			       struct sysdev_class_attribute *attr, char *buf)
+static ssize_t show_node_state(struct device *dev,
+			       struct device_attribute *attr, char *buf)
 {
 	struct node_attr *na = container_of(attr, struct node_attr, attr);
 	return print_nodes_state(na->state, buf);
 }
 
 #define _NODE_ATTR(name, state) \
-	{ _SYSDEV_CLASS_ATTR(name, 0444, show_node_state, NULL), state }
+	{ __ATTR(name, 0444, show_node_state, NULL), state }
 
 static struct node_attr node_state_attr[] = {
 	_NODE_ATTR(possible, N_POSSIBLE),
@@ -620,17 +617,26 @@
 #endif
 };
 
-static struct sysdev_class_attribute *node_state_attrs[] = {
-	&node_state_attr[0].attr,
-	&node_state_attr[1].attr,
-	&node_state_attr[2].attr,
-	&node_state_attr[3].attr,
+static struct attribute *node_state_attrs[] = {
+	&node_state_attr[0].attr.attr,
+	&node_state_attr[1].attr.attr,
+	&node_state_attr[2].attr.attr,
+	&node_state_attr[3].attr.attr,
 #ifdef CONFIG_HIGHMEM
-	&node_state_attr[4].attr,
+	&node_state_attr[4].attr.attr,
 #endif
 	NULL
 };
 
+static struct attribute_group memory_root_attr_group = {
+	.attrs = node_state_attrs,
+};
+
+static const struct attribute_group *cpu_root_attr_groups[] = {
+	&memory_root_attr_group,
+	NULL,
+};
+
 #define NODE_CALLBACK_PRI	2	/* lower than SLAB */
 static int __init register_node_type(void)
 {
@@ -639,7 +645,7 @@
  	BUILD_BUG_ON(ARRAY_SIZE(node_state_attr) != NR_NODE_STATES);
  	BUILD_BUG_ON(ARRAY_SIZE(node_state_attrs)-1 != NR_NODE_STATES);
 
-	ret = sysdev_class_register(&node_class);
+	ret = subsys_system_register(&node_subsys, cpu_root_attr_groups);
 	if (!ret) {
 		hotplug_memory_notifier(node_memory_callback,
 					NODE_CALLBACK_PRI);
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 7a24895..f0c605e 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -383,7 +383,7 @@
  * Returns &struct platform_device pointer on success, or ERR_PTR() on error.
  */
 struct platform_device *platform_device_register_full(
-		struct platform_device_info *pdevinfo)
+		const struct platform_device_info *pdevinfo)
 {
 	int ret = -ENOMEM;
 	struct platform_device *pdev;
@@ -700,25 +700,6 @@
 	return ret;
 }
 
-int platform_pm_prepare(struct device *dev)
-{
-	struct device_driver *drv = dev->driver;
-	int ret = 0;
-
-	if (drv && drv->pm && drv->pm->prepare)
-		ret = drv->pm->prepare(dev);
-
-	return ret;
-}
-
-void platform_pm_complete(struct device *dev)
-{
-	struct device_driver *drv = dev->driver;
-
-	if (drv && drv->pm && drv->pm->complete)
-		drv->pm->complete(dev);
-}
-
 #endif /* CONFIG_PM_SLEEP */
 
 #ifdef CONFIG_SUSPEND
@@ -741,22 +722,6 @@
 	return ret;
 }
 
-int platform_pm_suspend_noirq(struct device *dev)
-{
-	struct device_driver *drv = dev->driver;
-	int ret = 0;
-
-	if (!drv)
-		return 0;
-
-	if (drv->pm) {
-		if (drv->pm->suspend_noirq)
-			ret = drv->pm->suspend_noirq(dev);
-	}
-
-	return ret;
-}
-
 int platform_pm_resume(struct device *dev)
 {
 	struct device_driver *drv = dev->driver;
@@ -775,22 +740,6 @@
 	return ret;
 }
 
-int platform_pm_resume_noirq(struct device *dev)
-{
-	struct device_driver *drv = dev->driver;
-	int ret = 0;
-
-	if (!drv)
-		return 0;
-
-	if (drv->pm) {
-		if (drv->pm->resume_noirq)
-			ret = drv->pm->resume_noirq(dev);
-	}
-
-	return ret;
-}
-
 #endif /* CONFIG_SUSPEND */
 
 #ifdef CONFIG_HIBERNATE_CALLBACKS
@@ -813,22 +762,6 @@
 	return ret;
 }
 
-int platform_pm_freeze_noirq(struct device *dev)
-{
-	struct device_driver *drv = dev->driver;
-	int ret = 0;
-
-	if (!drv)
-		return 0;
-
-	if (drv->pm) {
-		if (drv->pm->freeze_noirq)
-			ret = drv->pm->freeze_noirq(dev);
-	}
-
-	return ret;
-}
-
 int platform_pm_thaw(struct device *dev)
 {
 	struct device_driver *drv = dev->driver;
@@ -847,22 +780,6 @@
 	return ret;
 }
 
-int platform_pm_thaw_noirq(struct device *dev)
-{
-	struct device_driver *drv = dev->driver;
-	int ret = 0;
-
-	if (!drv)
-		return 0;
-
-	if (drv->pm) {
-		if (drv->pm->thaw_noirq)
-			ret = drv->pm->thaw_noirq(dev);
-	}
-
-	return ret;
-}
-
 int platform_pm_poweroff(struct device *dev)
 {
 	struct device_driver *drv = dev->driver;
@@ -881,22 +798,6 @@
 	return ret;
 }
 
-int platform_pm_poweroff_noirq(struct device *dev)
-{
-	struct device_driver *drv = dev->driver;
-	int ret = 0;
-
-	if (!drv)
-		return 0;
-
-	if (drv->pm) {
-		if (drv->pm->poweroff_noirq)
-			ret = drv->pm->poweroff_noirq(dev);
-	}
-
-	return ret;
-}
-
 int platform_pm_restore(struct device *dev)
 {
 	struct device_driver *drv = dev->driver;
@@ -915,22 +816,6 @@
 	return ret;
 }
 
-int platform_pm_restore_noirq(struct device *dev)
-{
-	struct device_driver *drv = dev->driver;
-	int ret = 0;
-
-	if (!drv)
-		return 0;
-
-	if (drv->pm) {
-		if (drv->pm->restore_noirq)
-			ret = drv->pm->restore_noirq(dev);
-	}
-
-	return ret;
-}
-
 #endif /* CONFIG_HIBERNATE_CALLBACKS */
 
 static const struct dev_pm_ops platform_dev_pm_ops = {
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
index 81676dd..2e58ebb 100644
--- a/drivers/base/power/Makefile
+++ b/drivers/base/power/Makefile
@@ -3,7 +3,7 @@
 obj-$(CONFIG_PM_RUNTIME)	+= runtime.o
 obj-$(CONFIG_PM_TRACE_RTC)	+= trace.o
 obj-$(CONFIG_PM_OPP)	+= opp.o
-obj-$(CONFIG_PM_GENERIC_DOMAINS)	+=  domain.o
+obj-$(CONFIG_PM_GENERIC_DOMAINS)	+=  domain.o domain_governor.o
 obj-$(CONFIG_HAVE_CLK)	+= clock_ops.o
 
 ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 6790cf7..92e6a90 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -15,13 +15,44 @@
 #include <linux/err.h>
 #include <linux/sched.h>
 #include <linux/suspend.h>
+#include <linux/export.h>
+
+#define GENPD_DEV_CALLBACK(genpd, type, callback, dev)		\
+({								\
+	type (*__routine)(struct device *__d); 			\
+	type __ret = (type)0;					\
+								\
+	__routine = genpd->dev_ops.callback; 			\
+	if (__routine) {					\
+		__ret = __routine(dev); 			\
+	} else {						\
+		__routine = dev_gpd_data(dev)->ops.callback;	\
+		if (__routine) 					\
+			__ret = __routine(dev);			\
+	}							\
+	__ret;							\
+})
+
+#define GENPD_DEV_TIMED_CALLBACK(genpd, type, callback, dev, field, name)	\
+({										\
+	ktime_t __start = ktime_get();						\
+	type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev);		\
+	s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start));		\
+	struct generic_pm_domain_data *__gpd_data = dev_gpd_data(dev);		\
+	if (__elapsed > __gpd_data->td.field) {					\
+		__gpd_data->td.field = __elapsed;				\
+		dev_warn(dev, name " latency exceeded, new value %lld ns\n",	\
+			__elapsed);						\
+	}									\
+	__retval;								\
+})
 
 static LIST_HEAD(gpd_list);
 static DEFINE_MUTEX(gpd_list_lock);
 
 #ifdef CONFIG_PM
 
-static struct generic_pm_domain *dev_to_genpd(struct device *dev)
+struct generic_pm_domain *dev_to_genpd(struct device *dev)
 {
 	if (IS_ERR_OR_NULL(dev->pm_domain))
 		return ERR_PTR(-EINVAL);
@@ -29,6 +60,31 @@
 	return pd_to_genpd(dev->pm_domain);
 }
 
+static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev)
+{
+	return GENPD_DEV_TIMED_CALLBACK(genpd, int, stop, dev,
+					stop_latency_ns, "stop");
+}
+
+static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev)
+{
+	return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev,
+					start_latency_ns, "start");
+}
+
+static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
+{
+	return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev,
+					save_state_latency_ns, "state save");
+}
+
+static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev)
+{
+	return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev,
+					restore_state_latency_ns,
+					"state restore");
+}
+
 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
 {
 	bool ret = false;
@@ -145,9 +201,21 @@
 	}
 
 	if (genpd->power_on) {
+		ktime_t time_start = ktime_get();
+		s64 elapsed_ns;
+
 		ret = genpd->power_on(genpd);
 		if (ret)
 			goto err;
+
+		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
+		if (elapsed_ns > genpd->power_on_latency_ns) {
+			genpd->power_on_latency_ns = elapsed_ns;
+			if (genpd->name)
+				pr_warning("%s: Power-on latency exceeded, "
+					"new value %lld ns\n", genpd->name,
+					elapsed_ns);
+		}
 	}
 
 	genpd_set_active(genpd);
@@ -190,7 +258,6 @@
 {
 	struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
 	struct device *dev = pdd->dev;
-	struct device_driver *drv = dev->driver;
 	int ret = 0;
 
 	if (gpd_data->need_restore)
@@ -198,15 +265,9 @@
 
 	mutex_unlock(&genpd->lock);
 
-	if (drv && drv->pm && drv->pm->runtime_suspend) {
-		if (genpd->start_device)
-			genpd->start_device(dev);
-
-		ret = drv->pm->runtime_suspend(dev);
-
-		if (genpd->stop_device)
-			genpd->stop_device(dev);
-	}
+	genpd_start_dev(genpd, dev);
+	ret = genpd_save_dev(genpd, dev);
+	genpd_stop_dev(genpd, dev);
 
 	mutex_lock(&genpd->lock);
 
@@ -227,22 +288,15 @@
 {
 	struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
 	struct device *dev = pdd->dev;
-	struct device_driver *drv = dev->driver;
 
 	if (!gpd_data->need_restore)
 		return;
 
 	mutex_unlock(&genpd->lock);
 
-	if (drv && drv->pm && drv->pm->runtime_resume) {
-		if (genpd->start_device)
-			genpd->start_device(dev);
-
-		drv->pm->runtime_resume(dev);
-
-		if (genpd->stop_device)
-			genpd->stop_device(dev);
-	}
+	genpd_start_dev(genpd, dev);
+	genpd_restore_dev(genpd, dev);
+	genpd_stop_dev(genpd, dev);
 
 	mutex_lock(&genpd->lock);
 
@@ -354,11 +408,16 @@
 	}
 
 	if (genpd->power_off) {
+		ktime_t time_start;
+		s64 elapsed_ns;
+
 		if (atomic_read(&genpd->sd_count) > 0) {
 			ret = -EBUSY;
 			goto out;
 		}
 
+		time_start = ktime_get();
+
 		/*
 		 * If sd_count > 0 at this point, one of the subdomains hasn't
 		 * managed to call pm_genpd_poweron() for the master yet after
@@ -372,9 +431,29 @@
 			genpd_set_active(genpd);
 			goto out;
 		}
+
+		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
+		if (elapsed_ns > genpd->power_off_latency_ns) {
+			genpd->power_off_latency_ns = elapsed_ns;
+			if (genpd->name)
+				pr_warning("%s: Power-off latency exceeded, "
+					"new value %lld ns\n", genpd->name,
+					elapsed_ns);
+		}
 	}
 
 	genpd->status = GPD_STATE_POWER_OFF;
+	genpd->power_off_time = ktime_get();
+
+	/* Update PM QoS information for devices in the domain. */
+	list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) {
+		struct gpd_timing_data *td = &to_gpd_data(pdd)->td;
+
+		pm_runtime_update_max_time_suspended(pdd->dev,
+					td->start_latency_ns +
+					td->restore_state_latency_ns +
+					genpd->power_on_latency_ns);
+	}
 
 	list_for_each_entry(link, &genpd->slave_links, slave_node) {
 		genpd_sd_counter_dec(link->master);
@@ -413,6 +492,8 @@
 static int pm_genpd_runtime_suspend(struct device *dev)
 {
 	struct generic_pm_domain *genpd;
+	bool (*stop_ok)(struct device *__dev);
+	int ret;
 
 	dev_dbg(dev, "%s()\n", __func__);
 
@@ -422,11 +503,16 @@
 
 	might_sleep_if(!genpd->dev_irq_safe);
 
-	if (genpd->stop_device) {
-		int ret = genpd->stop_device(dev);
-		if (ret)
-			return ret;
-	}
+	stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
+	if (stop_ok && !stop_ok(dev))
+		return -EBUSY;
+
+	ret = genpd_stop_dev(genpd, dev);
+	if (ret)
+		return ret;
+
+	pm_runtime_update_max_time_suspended(dev,
+				dev_gpd_data(dev)->td.start_latency_ns);
 
 	/*
 	 * If power.irq_safe is set, this routine will be run with interrupts
@@ -502,8 +588,7 @@
 	mutex_unlock(&genpd->lock);
 
  out:
-	if (genpd->start_device)
-		genpd->start_device(dev);
+	genpd_start_dev(genpd, dev);
 
 	return 0;
 }
@@ -534,6 +619,52 @@
 
 #ifdef CONFIG_PM_SLEEP
 
+static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
+				    struct device *dev)
+{
+	return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
+}
+
+static int genpd_suspend_dev(struct generic_pm_domain *genpd, struct device *dev)
+{
+	return GENPD_DEV_CALLBACK(genpd, int, suspend, dev);
+}
+
+static int genpd_suspend_late(struct generic_pm_domain *genpd, struct device *dev)
+{
+	return GENPD_DEV_CALLBACK(genpd, int, suspend_late, dev);
+}
+
+static int genpd_resume_early(struct generic_pm_domain *genpd, struct device *dev)
+{
+	return GENPD_DEV_CALLBACK(genpd, int, resume_early, dev);
+}
+
+static int genpd_resume_dev(struct generic_pm_domain *genpd, struct device *dev)
+{
+	return GENPD_DEV_CALLBACK(genpd, int, resume, dev);
+}
+
+static int genpd_freeze_dev(struct generic_pm_domain *genpd, struct device *dev)
+{
+	return GENPD_DEV_CALLBACK(genpd, int, freeze, dev);
+}
+
+static int genpd_freeze_late(struct generic_pm_domain *genpd, struct device *dev)
+{
+	return GENPD_DEV_CALLBACK(genpd, int, freeze_late, dev);
+}
+
+static int genpd_thaw_early(struct generic_pm_domain *genpd, struct device *dev)
+{
+	return GENPD_DEV_CALLBACK(genpd, int, thaw_early, dev);
+}
+
+static int genpd_thaw_dev(struct generic_pm_domain *genpd, struct device *dev)
+{
+	return GENPD_DEV_CALLBACK(genpd, int, thaw, dev);
+}
+
 /**
  * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
  * @genpd: PM domain to power off, if possible.
@@ -590,7 +721,7 @@
 	if (!device_can_wakeup(dev))
 		return false;
 
-	active_wakeup = genpd->active_wakeup && genpd->active_wakeup(dev);
+	active_wakeup = genpd_dev_active_wakeup(genpd, dev);
 	return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
 }
 
@@ -646,7 +777,7 @@
 	/*
 	 * The PM domain must be in the GPD_STATE_ACTIVE state at this point,
 	 * so pm_genpd_poweron() will return immediately, but if the device
-	 * is suspended (e.g. it's been stopped by .stop_device()), we need
+	 * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need
 	 * to make it operational.
 	 */
 	pm_runtime_resume(dev);
@@ -685,7 +816,7 @@
 	if (IS_ERR(genpd))
 		return -EINVAL;
 
-	return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev);
+	return genpd->suspend_power_off ? 0 : genpd_suspend_dev(genpd, dev);
 }
 
 /**
@@ -710,16 +841,14 @@
 	if (genpd->suspend_power_off)
 		return 0;
 
-	ret = pm_generic_suspend_noirq(dev);
+	ret = genpd_suspend_late(genpd, dev);
 	if (ret)
 		return ret;
 
-	if (dev->power.wakeup_path
-	    && genpd->active_wakeup && genpd->active_wakeup(dev))
+	if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
 		return 0;
 
-	if (genpd->stop_device)
-		genpd->stop_device(dev);
+	genpd_stop_dev(genpd, dev);
 
 	/*
 	 * Since all of the "noirq" callbacks are executed sequentially, it is
@@ -761,10 +890,9 @@
 	 */
 	pm_genpd_poweron(genpd);
 	genpd->suspended_count--;
-	if (genpd->start_device)
-		genpd->start_device(dev);
+	genpd_start_dev(genpd, dev);
 
-	return pm_generic_resume_noirq(dev);
+	return genpd_resume_early(genpd, dev);
 }
 
 /**
@@ -785,7 +913,7 @@
 	if (IS_ERR(genpd))
 		return -EINVAL;
 
-	return genpd->suspend_power_off ? 0 : pm_generic_resume(dev);
+	return genpd->suspend_power_off ? 0 : genpd_resume_dev(genpd, dev);
 }
 
 /**
@@ -806,7 +934,7 @@
 	if (IS_ERR(genpd))
 		return -EINVAL;
 
-	return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev);
+	return genpd->suspend_power_off ? 0 : genpd_freeze_dev(genpd, dev);
 }
 
 /**
@@ -832,12 +960,11 @@
 	if (genpd->suspend_power_off)
 		return 0;
 
-	ret = pm_generic_freeze_noirq(dev);
+	ret = genpd_freeze_late(genpd, dev);
 	if (ret)
 		return ret;
 
-	if (genpd->stop_device)
-		genpd->stop_device(dev);
+	genpd_stop_dev(genpd, dev);
 
 	return 0;
 }
@@ -864,10 +991,9 @@
 	if (genpd->suspend_power_off)
 		return 0;
 
-	if (genpd->start_device)
-		genpd->start_device(dev);
+	genpd_start_dev(genpd, dev);
 
-	return pm_generic_thaw_noirq(dev);
+	return genpd_thaw_early(genpd, dev);
 }
 
 /**
@@ -888,72 +1014,7 @@
 	if (IS_ERR(genpd))
 		return -EINVAL;
 
-	return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev);
-}
-
-/**
- * pm_genpd_dev_poweroff - Power off a device belonging to an I/O PM domain.
- * @dev: Device to suspend.
- *
- * Power off a device under the assumption that its pm_domain field points to
- * the domain member of an object of type struct generic_pm_domain representing
- * a PM domain consisting of I/O devices.
- */
-static int pm_genpd_dev_poweroff(struct device *dev)
-{
-	struct generic_pm_domain *genpd;
-
-	dev_dbg(dev, "%s()\n", __func__);
-
-	genpd = dev_to_genpd(dev);
-	if (IS_ERR(genpd))
-		return -EINVAL;
-
-	return genpd->suspend_power_off ? 0 : pm_generic_poweroff(dev);
-}
-
-/**
- * pm_genpd_dev_poweroff_noirq - Late power off of a device from a PM domain.
- * @dev: Device to suspend.
- *
- * Carry out a late powering off of a device under the assumption that its
- * pm_domain field points to the domain member of an object of type
- * struct generic_pm_domain representing a PM domain consisting of I/O devices.
- */
-static int pm_genpd_dev_poweroff_noirq(struct device *dev)
-{
-	struct generic_pm_domain *genpd;
-	int ret;
-
-	dev_dbg(dev, "%s()\n", __func__);
-
-	genpd = dev_to_genpd(dev);
-	if (IS_ERR(genpd))
-		return -EINVAL;
-
-	if (genpd->suspend_power_off)
-		return 0;
-
-	ret = pm_generic_poweroff_noirq(dev);
-	if (ret)
-		return ret;
-
-	if (dev->power.wakeup_path
-	    && genpd->active_wakeup && genpd->active_wakeup(dev))
-		return 0;
-
-	if (genpd->stop_device)
-		genpd->stop_device(dev);
-
-	/*
-	 * Since all of the "noirq" callbacks are executed sequentially, it is
-	 * guaranteed that this function will never run twice in parallel for
-	 * the same PM domain, so it is not necessary to use locking here.
-	 */
-	genpd->suspended_count++;
-	pm_genpd_sync_poweroff(genpd);
-
-	return 0;
+	return genpd->suspend_power_off ? 0 : genpd_thaw_dev(genpd, dev);
 }
 
 /**
@@ -993,31 +1054,9 @@
 
 	pm_genpd_poweron(genpd);
 	genpd->suspended_count--;
-	if (genpd->start_device)
-		genpd->start_device(dev);
+	genpd_start_dev(genpd, dev);
 
-	return pm_generic_restore_noirq(dev);
-}
-
-/**
- * pm_genpd_restore - Restore a device belonging to an I/O power domain.
- * @dev: Device to resume.
- *
- * Restore a device under the assumption that its pm_domain field points to the
- * domain member of an object of type struct generic_pm_domain representing
- * a power domain consisting of I/O devices.
- */
-static int pm_genpd_restore(struct device *dev)
-{
-	struct generic_pm_domain *genpd;
-
-	dev_dbg(dev, "%s()\n", __func__);
-
-	genpd = dev_to_genpd(dev);
-	if (IS_ERR(genpd))
-		return -EINVAL;
-
-	return genpd->suspend_power_off ? 0 : pm_generic_restore(dev);
+	return genpd_resume_early(genpd, dev);
 }
 
 /**
@@ -1067,20 +1106,19 @@
 #define pm_genpd_freeze_noirq		NULL
 #define pm_genpd_thaw_noirq		NULL
 #define pm_genpd_thaw			NULL
-#define pm_genpd_dev_poweroff_noirq	NULL
-#define pm_genpd_dev_poweroff		NULL
 #define pm_genpd_restore_noirq		NULL
-#define pm_genpd_restore		NULL
 #define pm_genpd_complete		NULL
 
 #endif /* CONFIG_PM_SLEEP */
 
 /**
- * pm_genpd_add_device - Add a device to an I/O PM domain.
+ * __pm_genpd_add_device - Add a device to an I/O PM domain.
  * @genpd: PM domain to add the device to.
  * @dev: Device to be added.
+ * @td: Set of PM QoS timing parameters to attach to the device.
  */
-int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
+int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
+			  struct gpd_timing_data *td)
 {
 	struct generic_pm_domain_data *gpd_data;
 	struct pm_domain_data *pdd;
@@ -1123,6 +1161,8 @@
 	gpd_data->base.dev = dev;
 	gpd_data->need_restore = false;
 	list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
+	if (td)
+		gpd_data->td = *td;
 
  out:
 	genpd_release_lock(genpd);
@@ -1280,6 +1320,204 @@
 }
 
 /**
+ * pm_genpd_add_callbacks - Add PM domain callbacks to a given device.
+ * @dev: Device to add the callbacks to.
+ * @ops: Set of callbacks to add.
+ * @td: Timing data to add to the device along with the callbacks (optional).
+ */
+int pm_genpd_add_callbacks(struct device *dev, struct gpd_dev_ops *ops,
+			   struct gpd_timing_data *td)
+{
+	struct pm_domain_data *pdd;
+	int ret = 0;
+
+	if (!(dev && dev->power.subsys_data && ops))
+		return -EINVAL;
+
+	pm_runtime_disable(dev);
+	device_pm_lock();
+
+	pdd = dev->power.subsys_data->domain_data;
+	if (pdd) {
+		struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
+
+		gpd_data->ops = *ops;
+		if (td)
+			gpd_data->td = *td;
+	} else {
+		ret = -EINVAL;
+	}
+
+	device_pm_unlock();
+	pm_runtime_enable(dev);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(pm_genpd_add_callbacks);
+
+/**
+ * __pm_genpd_remove_callbacks - Remove PM domain callbacks from a given device.
+ * @dev: Device to remove the callbacks from.
+ * @clear_td: If set, clear the device's timing data too.
+ */
+int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
+{
+	struct pm_domain_data *pdd;
+	int ret = 0;
+
+	if (!(dev && dev->power.subsys_data))
+		return -EINVAL;
+
+	pm_runtime_disable(dev);
+	device_pm_lock();
+
+	pdd = dev->power.subsys_data->domain_data;
+	if (pdd) {
+		struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
+
+		gpd_data->ops = (struct gpd_dev_ops){ 0 };
+		if (clear_td)
+			gpd_data->td = (struct gpd_timing_data){ 0 };
+	} else {
+		ret = -EINVAL;
+	}
+
+	device_pm_unlock();
+	pm_runtime_enable(dev);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks);
+
+/* Default device callbacks for generic PM domains. */
+
+/**
+ * pm_genpd_default_save_state - Default "save device state" for PM domians.
+ * @dev: Device to handle.
+ */
+static int pm_genpd_default_save_state(struct device *dev)
+{
+	int (*cb)(struct device *__dev);
+	struct device_driver *drv = dev->driver;
+
+	cb = dev_gpd_data(dev)->ops.save_state;
+	if (cb)
+		return cb(dev);
+
+	if (drv && drv->pm && drv->pm->runtime_suspend)
+		return drv->pm->runtime_suspend(dev);
+
+	return 0;
+}
+
+/**
+ * pm_genpd_default_restore_state - Default PM domians "restore device state".
+ * @dev: Device to handle.
+ */
+static int pm_genpd_default_restore_state(struct device *dev)
+{
+	int (*cb)(struct device *__dev);
+	struct device_driver *drv = dev->driver;
+
+	cb = dev_gpd_data(dev)->ops.restore_state;
+	if (cb)
+		return cb(dev);
+
+	if (drv && drv->pm && drv->pm->runtime_resume)
+		return drv->pm->runtime_resume(dev);
+
+	return 0;
+}
+
+/**
+ * pm_genpd_default_suspend - Default "device suspend" for PM domians.
+ * @dev: Device to handle.
+ */
+static int pm_genpd_default_suspend(struct device *dev)
+{
+	int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend;
+
+	return cb ? cb(dev) : pm_generic_suspend(dev);
+}
+
+/**
+ * pm_genpd_default_suspend_late - Default "late device suspend" for PM domians.
+ * @dev: Device to handle.
+ */
+static int pm_genpd_default_suspend_late(struct device *dev)
+{
+	int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend_late;
+
+	return cb ? cb(dev) : pm_generic_suspend_noirq(dev);
+}
+
+/**
+ * pm_genpd_default_resume_early - Default "early device resume" for PM domians.
+ * @dev: Device to handle.
+ */
+static int pm_genpd_default_resume_early(struct device *dev)
+{
+	int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume_early;
+
+	return cb ? cb(dev) : pm_generic_resume_noirq(dev);
+}
+
+/**
+ * pm_genpd_default_resume - Default "device resume" for PM domians.
+ * @dev: Device to handle.
+ */
+static int pm_genpd_default_resume(struct device *dev)
+{
+	int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume;
+
+	return cb ? cb(dev) : pm_generic_resume(dev);
+}
+
+/**
+ * pm_genpd_default_freeze - Default "device freeze" for PM domians.
+ * @dev: Device to handle.
+ */
+static int pm_genpd_default_freeze(struct device *dev)
+{
+	int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze;
+
+	return cb ? cb(dev) : pm_generic_freeze(dev);
+}
+
+/**
+ * pm_genpd_default_freeze_late - Default "late device freeze" for PM domians.
+ * @dev: Device to handle.
+ */
+static int pm_genpd_default_freeze_late(struct device *dev)
+{
+	int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze_late;
+
+	return cb ? cb(dev) : pm_generic_freeze_noirq(dev);
+}
+
+/**
+ * pm_genpd_default_thaw_early - Default "early device thaw" for PM domians.
+ * @dev: Device to handle.
+ */
+static int pm_genpd_default_thaw_early(struct device *dev)
+{
+	int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw_early;
+
+	return cb ? cb(dev) : pm_generic_thaw_noirq(dev);
+}
+
+/**
+ * pm_genpd_default_thaw - Default "device thaw" for PM domians.
+ * @dev: Device to handle.
+ */
+static int pm_genpd_default_thaw(struct device *dev)
+{
+	int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw;
+
+	return cb ? cb(dev) : pm_generic_thaw(dev);
+}
+
+/**
  * pm_genpd_init - Initialize a generic I/O PM domain object.
  * @genpd: PM domain object to initialize.
  * @gov: PM domain governor to associate with the domain (may be NULL).
@@ -1305,6 +1543,7 @@
 	genpd->resume_count = 0;
 	genpd->device_count = 0;
 	genpd->suspended_count = 0;
+	genpd->max_off_time_ns = -1;
 	genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
 	genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
 	genpd->domain.ops.runtime_idle = pm_generic_runtime_idle;
@@ -1317,11 +1556,21 @@
 	genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
 	genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
 	genpd->domain.ops.thaw = pm_genpd_thaw;
-	genpd->domain.ops.poweroff = pm_genpd_dev_poweroff;
-	genpd->domain.ops.poweroff_noirq = pm_genpd_dev_poweroff_noirq;
+	genpd->domain.ops.poweroff = pm_genpd_suspend;
+	genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq;
 	genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
-	genpd->domain.ops.restore = pm_genpd_restore;
+	genpd->domain.ops.restore = pm_genpd_resume;
 	genpd->domain.ops.complete = pm_genpd_complete;
+	genpd->dev_ops.save_state = pm_genpd_default_save_state;
+	genpd->dev_ops.restore_state = pm_genpd_default_restore_state;
+	genpd->dev_ops.suspend = pm_genpd_default_suspend;
+	genpd->dev_ops.suspend_late = pm_genpd_default_suspend_late;
+	genpd->dev_ops.resume_early = pm_genpd_default_resume_early;
+	genpd->dev_ops.resume = pm_genpd_default_resume;
+	genpd->dev_ops.freeze = pm_genpd_default_freeze;
+	genpd->dev_ops.freeze_late = pm_genpd_default_freeze_late;
+	genpd->dev_ops.thaw_early = pm_genpd_default_thaw_early;
+	genpd->dev_ops.thaw = pm_genpd_default_thaw;
 	mutex_lock(&gpd_list_lock);
 	list_add(&genpd->gpd_list_node, &gpd_list);
 	mutex_unlock(&gpd_list_lock);
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
new file mode 100644
index 0000000..51527ee
--- /dev/null
+++ b/drivers/base/power/domain_governor.c
@@ -0,0 +1,156 @@
+/*
+ * drivers/base/power/domain_governor.c - Governors for device PM domains.
+ *
+ * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
+ *
+ * This file is released under the GPLv2.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_qos.h>
+#include <linux/hrtimer.h>
+
+/**
+ * default_stop_ok - Default PM domain governor routine for stopping devices.
+ * @dev: Device to check.
+ */
+bool default_stop_ok(struct device *dev)
+{
+	struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
+
+	dev_dbg(dev, "%s()\n", __func__);
+
+	if (dev->power.max_time_suspended_ns < 0 || td->break_even_ns == 0)
+		return true;
+
+	return td->stop_latency_ns + td->start_latency_ns < td->break_even_ns
+		&& td->break_even_ns < dev->power.max_time_suspended_ns;
+}
+
+/**
+ * default_power_down_ok - Default generic PM domain power off governor routine.
+ * @pd: PM domain to check.
+ *
+ * This routine must be executed under the PM domain's lock.
+ */
+static bool default_power_down_ok(struct dev_pm_domain *pd)
+{
+	struct generic_pm_domain *genpd = pd_to_genpd(pd);
+	struct gpd_link *link;
+	struct pm_domain_data *pdd;
+	s64 min_dev_off_time_ns;
+	s64 off_on_time_ns;
+	ktime_t time_now = ktime_get();
+
+	off_on_time_ns = genpd->power_off_latency_ns +
+				genpd->power_on_latency_ns;
+	/*
+	 * It doesn't make sense to remove power from the domain if saving
+	 * the state of all devices in it and the power off/power on operations
+	 * take too much time.
+	 *
+	 * All devices in this domain have been stopped already at this point.
+	 */
+	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
+		if (pdd->dev->driver)
+			off_on_time_ns +=
+				to_gpd_data(pdd)->td.save_state_latency_ns;
+	}
+
+	/*
+	 * Check if subdomains can be off for enough time.
+	 *
+	 * All subdomains have been powered off already at this point.
+	 */
+	list_for_each_entry(link, &genpd->master_links, master_node) {
+		struct generic_pm_domain *sd = link->slave;
+		s64 sd_max_off_ns = sd->max_off_time_ns;
+
+		if (sd_max_off_ns < 0)
+			continue;
+
+		sd_max_off_ns -= ktime_to_ns(ktime_sub(time_now,
+						       sd->power_off_time));
+		/*
+		 * Check if the subdomain is allowed to be off long enough for
+		 * the current domain to turn off and on (that's how much time
+		 * it will have to wait worst case).
+		 */
+		if (sd_max_off_ns <= off_on_time_ns)
+			return false;
+	}
+
+	/*
+	 * Check if the devices in the domain can be off enough time.
+	 */
+	min_dev_off_time_ns = -1;
+	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
+		struct gpd_timing_data *td;
+		struct device *dev = pdd->dev;
+		s64 dev_off_time_ns;
+
+		if (!dev->driver || dev->power.max_time_suspended_ns < 0)
+			continue;
+
+		td = &to_gpd_data(pdd)->td;
+		dev_off_time_ns = dev->power.max_time_suspended_ns -
+			(td->start_latency_ns + td->restore_state_latency_ns +
+				ktime_to_ns(ktime_sub(time_now,
+						dev->power.suspend_time)));
+		if (dev_off_time_ns <= off_on_time_ns)
+			return false;
+
+		if (min_dev_off_time_ns > dev_off_time_ns
+		    || min_dev_off_time_ns < 0)
+			min_dev_off_time_ns = dev_off_time_ns;
+	}
+
+	if (min_dev_off_time_ns < 0) {
+		/*
+		 * There are no latency constraints, so the domain can spend
+		 * arbitrary time in the "off" state.
+		 */
+		genpd->max_off_time_ns = -1;
+		return true;
+	}
+
+	/*
+	 * The difference between the computed minimum delta and the time needed
+	 * to turn the domain on is the maximum theoretical time this domain can
+	 * spend in the "off" state.
+	 */
+	min_dev_off_time_ns -= genpd->power_on_latency_ns;
+
+	/*
+	 * If the difference between the computed minimum delta and the time
+	 * needed to turn the domain off and back on on is smaller than the
+	 * domain's power break even time, removing power from the domain is not
+	 * worth it.
+	 */
+	if (genpd->break_even_ns >
+	    min_dev_off_time_ns - genpd->power_off_latency_ns)
+		return false;
+
+	genpd->max_off_time_ns = min_dev_off_time_ns;
+	return true;
+}
+
+struct dev_power_governor simple_qos_governor = {
+	.stop_ok = default_stop_ok,
+	.power_down_ok = default_power_down_ok,
+};
+
+static bool always_on_power_down_ok(struct dev_pm_domain *domain)
+{
+	return false;
+}
+
+/**
+ * pm_genpd_gov_always_on - A governor implementing an always-on policy
+ */
+struct dev_power_governor pm_domain_always_on_gov = {
+	.power_down_ok = always_on_power_down_ok,
+	.stop_ok = default_stop_ok,
+};
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c
index 265a0ee..10bdd79 100644
--- a/drivers/base/power/generic_ops.c
+++ b/drivers/base/power/generic_ops.c
@@ -97,16 +97,16 @@
  * @event: PM transition of the system under way.
  * @bool: Whether or not this is the "noirq" stage.
  *
- * If the device has not been suspended at run time, execute the
- * suspend/freeze/poweroff/thaw callback provided by its driver, if defined, and
- * return its error code.  Otherwise, return zero.
+ * Execute the PM callback corresponding to @event provided by the driver of
+ * @dev, if defined, and return its error code.    Return 0 if the callback is
+ * not present.
  */
 static int __pm_generic_call(struct device *dev, int event, bool noirq)
 {
 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
 	int (*callback)(struct device *);
 
-	if (!pm || pm_runtime_suspended(dev))
+	if (!pm)
 		return 0;
 
 	switch (event) {
@@ -119,9 +119,15 @@
 	case PM_EVENT_HIBERNATE:
 		callback = noirq ? pm->poweroff_noirq : pm->poweroff;
 		break;
+	case PM_EVENT_RESUME:
+		callback = noirq ? pm->resume_noirq : pm->resume;
+		break;
 	case PM_EVENT_THAW:
 		callback = noirq ? pm->thaw_noirq : pm->thaw;
 		break;
+	case PM_EVENT_RESTORE:
+		callback = noirq ? pm->restore_noirq : pm->restore;
+		break;
 	default:
 		callback = NULL;
 		break;
@@ -211,56 +217,12 @@
 EXPORT_SYMBOL_GPL(pm_generic_thaw);
 
 /**
- * __pm_generic_resume - Generic resume/restore callback for subsystems.
- * @dev: Device to handle.
- * @event: PM transition of the system under way.
- * @bool: Whether or not this is the "noirq" stage.
- *
- * Execute the resume/resotre callback provided by the @dev's driver, if
- * defined.  If it returns 0, change the device's runtime PM status to 'active'.
- * Return the callback's error code.
- */
-static int __pm_generic_resume(struct device *dev, int event, bool noirq)
-{
-	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-	int (*callback)(struct device *);
-	int ret;
-
-	if (!pm)
-		return 0;
-
-	switch (event) {
-	case PM_EVENT_RESUME:
-		callback = noirq ? pm->resume_noirq : pm->resume;
-		break;
-	case PM_EVENT_RESTORE:
-		callback = noirq ? pm->restore_noirq : pm->restore;
-		break;
-	default:
-		callback = NULL;
-		break;
-	}
-
-	if (!callback)
-		return 0;
-
-	ret = callback(dev);
-	if (!ret && !noirq && pm_runtime_enabled(dev)) {
-		pm_runtime_disable(dev);
-		pm_runtime_set_active(dev);
-		pm_runtime_enable(dev);
-	}
-
-	return ret;
-}
-
-/**
  * pm_generic_resume_noirq - Generic resume_noirq callback for subsystems.
  * @dev: Device to resume.
  */
 int pm_generic_resume_noirq(struct device *dev)
 {
-	return __pm_generic_resume(dev, PM_EVENT_RESUME, true);
+	return __pm_generic_call(dev, PM_EVENT_RESUME, true);
 }
 EXPORT_SYMBOL_GPL(pm_generic_resume_noirq);
 
@@ -270,7 +232,7 @@
  */
 int pm_generic_resume(struct device *dev)
 {
-	return __pm_generic_resume(dev, PM_EVENT_RESUME, false);
+	return __pm_generic_call(dev, PM_EVENT_RESUME, false);
 }
 EXPORT_SYMBOL_GPL(pm_generic_resume);
 
@@ -280,7 +242,7 @@
  */
 int pm_generic_restore_noirq(struct device *dev)
 {
-	return __pm_generic_resume(dev, PM_EVENT_RESTORE, true);
+	return __pm_generic_call(dev, PM_EVENT_RESTORE, true);
 }
 EXPORT_SYMBOL_GPL(pm_generic_restore_noirq);
 
@@ -290,7 +252,7 @@
  */
 int pm_generic_restore(struct device *dev)
 {
-	return __pm_generic_resume(dev, PM_EVENT_RESTORE, false);
+	return __pm_generic_call(dev, PM_EVENT_RESTORE, false);
 }
 EXPORT_SYMBOL_GPL(pm_generic_restore);
 
@@ -314,28 +276,3 @@
 	pm_runtime_idle(dev);
 }
 #endif /* CONFIG_PM_SLEEP */
-
-struct dev_pm_ops generic_subsys_pm_ops = {
-#ifdef CONFIG_PM_SLEEP
-	.prepare = pm_generic_prepare,
-	.suspend = pm_generic_suspend,
-	.suspend_noirq = pm_generic_suspend_noirq,
-	.resume = pm_generic_resume,
-	.resume_noirq = pm_generic_resume_noirq,
-	.freeze = pm_generic_freeze,
-	.freeze_noirq = pm_generic_freeze_noirq,
-	.thaw = pm_generic_thaw,
-	.thaw_noirq = pm_generic_thaw_noirq,
-	.poweroff = pm_generic_poweroff,
-	.poweroff_noirq = pm_generic_poweroff_noirq,
-	.restore = pm_generic_restore,
-	.restore_noirq = pm_generic_restore_noirq,
-	.complete = pm_generic_complete,
-#endif
-#ifdef CONFIG_PM_RUNTIME
-	.runtime_suspend = pm_generic_runtime_suspend,
-	.runtime_resume = pm_generic_runtime_resume,
-	.runtime_idle = pm_generic_runtime_idle,
-#endif
-};
-EXPORT_SYMBOL_GPL(generic_subsys_pm_ops);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index c3d2dfc..e2cc3d2 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -32,6 +32,8 @@
 #include "../base.h"
 #include "power.h"
 
+typedef int (*pm_callback_t)(struct device *);
+
 /*
  * The entries in the dpm_list list are in a depth first order, simply
  * because children are guaranteed to be discovered after parents, and
@@ -164,8 +166,9 @@
 	ktime_t calltime = ktime_set(0, 0);
 
 	if (initcall_debug) {
-		pr_info("calling  %s+ @ %i\n",
-				dev_name(dev), task_pid_nr(current));
+		pr_info("calling  %s+ @ %i, parent: %s\n",
+			dev_name(dev), task_pid_nr(current),
+			dev->parent ? dev_name(dev->parent) : "none");
 		calltime = ktime_get();
 	}
 
@@ -211,151 +214,69 @@
 }
 
 /**
- * pm_op - Execute the PM operation appropriate for given PM event.
- * @dev: Device to handle.
+ * pm_op - Return the PM operation appropriate for given PM event.
  * @ops: PM operations to choose from.
  * @state: PM transition of the system being carried out.
  */
-static int pm_op(struct device *dev,
-		 const struct dev_pm_ops *ops,
-		 pm_message_t state)
+static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
 {
-	int error = 0;
-	ktime_t calltime;
-
-	calltime = initcall_debug_start(dev);
-
 	switch (state.event) {
 #ifdef CONFIG_SUSPEND
 	case PM_EVENT_SUSPEND:
-		if (ops->suspend) {
-			error = ops->suspend(dev);
-			suspend_report_result(ops->suspend, error);
-		}
-		break;
+		return ops->suspend;
 	case PM_EVENT_RESUME:
-		if (ops->resume) {
-			error = ops->resume(dev);
-			suspend_report_result(ops->resume, error);
-		}
-		break;
+		return ops->resume;
 #endif /* CONFIG_SUSPEND */
 #ifdef CONFIG_HIBERNATE_CALLBACKS
 	case PM_EVENT_FREEZE:
 	case PM_EVENT_QUIESCE:
-		if (ops->freeze) {
-			error = ops->freeze(dev);
-			suspend_report_result(ops->freeze, error);
-		}
-		break;
+		return ops->freeze;
 	case PM_EVENT_HIBERNATE:
-		if (ops->poweroff) {
-			error = ops->poweroff(dev);
-			suspend_report_result(ops->poweroff, error);
-		}
-		break;
+		return ops->poweroff;
 	case PM_EVENT_THAW:
 	case PM_EVENT_RECOVER:
-		if (ops->thaw) {
-			error = ops->thaw(dev);
-			suspend_report_result(ops->thaw, error);
-		}
+		return ops->thaw;
 		break;
 	case PM_EVENT_RESTORE:
-		if (ops->restore) {
-			error = ops->restore(dev);
-			suspend_report_result(ops->restore, error);
-		}
-		break;
+		return ops->restore;
 #endif /* CONFIG_HIBERNATE_CALLBACKS */
-	default:
-		error = -EINVAL;
 	}
 
-	initcall_debug_report(dev, calltime, error);
-
-	return error;
+	return NULL;
 }
 
 /**
- * pm_noirq_op - Execute the PM operation appropriate for given PM event.
- * @dev: Device to handle.
+ * pm_noirq_op - Return the PM operation appropriate for given PM event.
  * @ops: PM operations to choose from.
  * @state: PM transition of the system being carried out.
  *
  * The driver of @dev will not receive interrupts while this function is being
  * executed.
  */
-static int pm_noirq_op(struct device *dev,
-			const struct dev_pm_ops *ops,
-			pm_message_t state)
+static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
 {
-	int error = 0;
-	ktime_t calltime = ktime_set(0, 0), delta, rettime;
-
-	if (initcall_debug) {
-		pr_info("calling  %s+ @ %i, parent: %s\n",
-				dev_name(dev), task_pid_nr(current),
-				dev->parent ? dev_name(dev->parent) : "none");
-		calltime = ktime_get();
-	}
-
 	switch (state.event) {
 #ifdef CONFIG_SUSPEND
 	case PM_EVENT_SUSPEND:
-		if (ops->suspend_noirq) {
-			error = ops->suspend_noirq(dev);
-			suspend_report_result(ops->suspend_noirq, error);
-		}
-		break;
+		return ops->suspend_noirq;
 	case PM_EVENT_RESUME:
-		if (ops->resume_noirq) {
-			error = ops->resume_noirq(dev);
-			suspend_report_result(ops->resume_noirq, error);
-		}
-		break;
+		return ops->resume_noirq;
 #endif /* CONFIG_SUSPEND */
 #ifdef CONFIG_HIBERNATE_CALLBACKS
 	case PM_EVENT_FREEZE:
 	case PM_EVENT_QUIESCE:
-		if (ops->freeze_noirq) {
-			error = ops->freeze_noirq(dev);
-			suspend_report_result(ops->freeze_noirq, error);
-		}
-		break;
+		return ops->freeze_noirq;
 	case PM_EVENT_HIBERNATE:
-		if (ops->poweroff_noirq) {
-			error = ops->poweroff_noirq(dev);
-			suspend_report_result(ops->poweroff_noirq, error);
-		}
-		break;
+		return ops->poweroff_noirq;
 	case PM_EVENT_THAW:
 	case PM_EVENT_RECOVER:
-		if (ops->thaw_noirq) {
-			error = ops->thaw_noirq(dev);
-			suspend_report_result(ops->thaw_noirq, error);
-		}
-		break;
+		return ops->thaw_noirq;
 	case PM_EVENT_RESTORE:
-		if (ops->restore_noirq) {
-			error = ops->restore_noirq(dev);
-			suspend_report_result(ops->restore_noirq, error);
-		}
-		break;
+		return ops->restore_noirq;
 #endif /* CONFIG_HIBERNATE_CALLBACKS */
-	default:
-		error = -EINVAL;
 	}
 
-	if (initcall_debug) {
-		rettime = ktime_get();
-		delta = ktime_sub(rettime, calltime);
-		printk("initcall %s_i+ returned %d after %Ld usecs\n",
-			dev_name(dev), error,
-			(unsigned long long)ktime_to_ns(delta) >> 10);
-	}
-
-	return error;
+	return NULL;
 }
 
 static char *pm_verb(int event)
@@ -413,6 +334,26 @@
 		usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
 }
 
+static int dpm_run_callback(pm_callback_t cb, struct device *dev,
+			    pm_message_t state, char *info)
+{
+	ktime_t calltime;
+	int error;
+
+	if (!cb)
+		return 0;
+
+	calltime = initcall_debug_start(dev);
+
+	pm_dev_dbg(dev, state, info);
+	error = cb(dev);
+	suspend_report_result(cb, error);
+
+	initcall_debug_report(dev, calltime, error);
+
+	return error;
+}
+
 /*------------------------- Resume routines -------------------------*/
 
 /**
@@ -425,25 +366,34 @@
  */
 static int device_resume_noirq(struct device *dev, pm_message_t state)
 {
+	pm_callback_t callback = NULL;
+	char *info = NULL;
 	int error = 0;
 
 	TRACE_DEVICE(dev);
 	TRACE_RESUME(0);
 
 	if (dev->pm_domain) {
-		pm_dev_dbg(dev, state, "EARLY power domain ");
-		error = pm_noirq_op(dev, &dev->pm_domain->ops, state);
+		info = "EARLY power domain ";
+		callback = pm_noirq_op(&dev->pm_domain->ops, state);
 	} else if (dev->type && dev->type->pm) {
-		pm_dev_dbg(dev, state, "EARLY type ");
-		error = pm_noirq_op(dev, dev->type->pm, state);
+		info = "EARLY type ";
+		callback = pm_noirq_op(dev->type->pm, state);
 	} else if (dev->class && dev->class->pm) {
-		pm_dev_dbg(dev, state, "EARLY class ");
-		error = pm_noirq_op(dev, dev->class->pm, state);
+		info = "EARLY class ";
+		callback = pm_noirq_op(dev->class->pm, state);
 	} else if (dev->bus && dev->bus->pm) {
-		pm_dev_dbg(dev, state, "EARLY ");
-		error = pm_noirq_op(dev, dev->bus->pm, state);
+		info = "EARLY bus ";
+		callback = pm_noirq_op(dev->bus->pm, state);
 	}
 
+	if (!callback && dev->driver && dev->driver->pm) {
+		info = "EARLY driver ";
+		callback = pm_noirq_op(dev->driver->pm, state);
+	}
+
+	error = dpm_run_callback(callback, dev, state, info);
+
 	TRACE_RESUME(error);
 	return error;
 }
@@ -486,26 +436,6 @@
 EXPORT_SYMBOL_GPL(dpm_resume_noirq);
 
 /**
- * legacy_resume - Execute a legacy (bus or class) resume callback for device.
- * @dev: Device to resume.
- * @cb: Resume callback to execute.
- */
-static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
-{
-	int error;
-	ktime_t calltime;
-
-	calltime = initcall_debug_start(dev);
-
-	error = cb(dev);
-	suspend_report_result(cb, error);
-
-	initcall_debug_report(dev, calltime, error);
-
-	return error;
-}
-
-/**
  * device_resume - Execute "resume" callbacks for given device.
  * @dev: Device to handle.
  * @state: PM transition of the system being carried out.
@@ -513,6 +443,8 @@
  */
 static int device_resume(struct device *dev, pm_message_t state, bool async)
 {
+	pm_callback_t callback = NULL;
+	char *info = NULL;
 	int error = 0;
 	bool put = false;
 
@@ -535,40 +467,48 @@
 	put = true;
 
 	if (dev->pm_domain) {
-		pm_dev_dbg(dev, state, "power domain ");
-		error = pm_op(dev, &dev->pm_domain->ops, state);
-		goto End;
+		info = "power domain ";
+		callback = pm_op(&dev->pm_domain->ops, state);
+		goto Driver;
 	}
 
 	if (dev->type && dev->type->pm) {
-		pm_dev_dbg(dev, state, "type ");
-		error = pm_op(dev, dev->type->pm, state);
-		goto End;
+		info = "type ";
+		callback = pm_op(dev->type->pm, state);
+		goto Driver;
 	}
 
 	if (dev->class) {
 		if (dev->class->pm) {
-			pm_dev_dbg(dev, state, "class ");
-			error = pm_op(dev, dev->class->pm, state);
-			goto End;
+			info = "class ";
+			callback = pm_op(dev->class->pm, state);
+			goto Driver;
 		} else if (dev->class->resume) {
-			pm_dev_dbg(dev, state, "legacy class ");
-			error = legacy_resume(dev, dev->class->resume);
+			info = "legacy class ";
+			callback = dev->class->resume;
 			goto End;
 		}
 	}
 
 	if (dev->bus) {
 		if (dev->bus->pm) {
-			pm_dev_dbg(dev, state, "");
-			error = pm_op(dev, dev->bus->pm, state);
+			info = "bus ";
+			callback = pm_op(dev->bus->pm, state);
 		} else if (dev->bus->resume) {
-			pm_dev_dbg(dev, state, "legacy ");
-			error = legacy_resume(dev, dev->bus->resume);
+			info = "legacy bus ";
+			callback = dev->bus->resume;
+			goto End;
 		}
 	}
 
+ Driver:
+	if (!callback && dev->driver && dev->driver->pm) {
+		info = "driver ";
+		callback = pm_op(dev->driver->pm, state);
+	}
+
  End:
+	error = dpm_run_callback(callback, dev, state, info);
 	dev->power.is_suspended = false;
 
  Unlock:
@@ -660,24 +600,33 @@
  */
 static void device_complete(struct device *dev, pm_message_t state)
 {
+	void (*callback)(struct device *) = NULL;
+	char *info = NULL;
+
 	device_lock(dev);
 
 	if (dev->pm_domain) {
-		pm_dev_dbg(dev, state, "completing power domain ");
-		if (dev->pm_domain->ops.complete)
-			dev->pm_domain->ops.complete(dev);
+		info = "completing power domain ";
+		callback = dev->pm_domain->ops.complete;
 	} else if (dev->type && dev->type->pm) {
-		pm_dev_dbg(dev, state, "completing type ");
-		if (dev->type->pm->complete)
-			dev->type->pm->complete(dev);
+		info = "completing type ";
+		callback = dev->type->pm->complete;
 	} else if (dev->class && dev->class->pm) {
-		pm_dev_dbg(dev, state, "completing class ");
-		if (dev->class->pm->complete)
-			dev->class->pm->complete(dev);
+		info = "completing class ";
+		callback = dev->class->pm->complete;
 	} else if (dev->bus && dev->bus->pm) {
-		pm_dev_dbg(dev, state, "completing ");
-		if (dev->bus->pm->complete)
-			dev->bus->pm->complete(dev);
+		info = "completing bus ";
+		callback = dev->bus->pm->complete;
+	}
+
+	if (!callback && dev->driver && dev->driver->pm) {
+		info = "completing driver ";
+		callback = dev->driver->pm->complete;
+	}
+
+	if (callback) {
+		pm_dev_dbg(dev, state, info);
+		callback(dev);
 	}
 
 	device_unlock(dev);
@@ -763,31 +712,29 @@
  */
 static int device_suspend_noirq(struct device *dev, pm_message_t state)
 {
-	int error;
+	pm_callback_t callback = NULL;
+	char *info = NULL;
 
 	if (dev->pm_domain) {
-		pm_dev_dbg(dev, state, "LATE power domain ");
-		error = pm_noirq_op(dev, &dev->pm_domain->ops, state);
-		if (error)
-			return error;
+		info = "LATE power domain ";
+		callback = pm_noirq_op(&dev->pm_domain->ops, state);
 	} else if (dev->type && dev->type->pm) {
-		pm_dev_dbg(dev, state, "LATE type ");
-		error = pm_noirq_op(dev, dev->type->pm, state);
-		if (error)
-			return error;
+		info = "LATE type ";
+		callback = pm_noirq_op(dev->type->pm, state);
 	} else if (dev->class && dev->class->pm) {
-		pm_dev_dbg(dev, state, "LATE class ");
-		error = pm_noirq_op(dev, dev->class->pm, state);
-		if (error)
-			return error;
+		info = "LATE class ";
+		callback = pm_noirq_op(dev->class->pm, state);
 	} else if (dev->bus && dev->bus->pm) {
-		pm_dev_dbg(dev, state, "LATE ");
-		error = pm_noirq_op(dev, dev->bus->pm, state);
-		if (error)
-			return error;
+		info = "LATE bus ";
+		callback = pm_noirq_op(dev->bus->pm, state);
 	}
 
-	return 0;
+	if (!callback && dev->driver && dev->driver->pm) {
+		info = "LATE driver ";
+		callback = pm_noirq_op(dev->driver->pm, state);
+	}
+
+	return dpm_run_callback(callback, dev, state, info);
 }
 
 /**
@@ -864,6 +811,8 @@
  */
 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
 {
+	pm_callback_t callback = NULL;
+	char *info = NULL;
 	int error = 0;
 
 	dpm_wait_for_children(dev, async);
@@ -884,22 +833,22 @@
 	device_lock(dev);
 
 	if (dev->pm_domain) {
-		pm_dev_dbg(dev, state, "power domain ");
-		error = pm_op(dev, &dev->pm_domain->ops, state);
-		goto End;
+		info = "power domain ";
+		callback = pm_op(&dev->pm_domain->ops, state);
+		goto Run;
 	}
 
 	if (dev->type && dev->type->pm) {
-		pm_dev_dbg(dev, state, "type ");
-		error = pm_op(dev, dev->type->pm, state);
-		goto End;
+		info = "type ";
+		callback = pm_op(dev->type->pm, state);
+		goto Run;
 	}
 
 	if (dev->class) {
 		if (dev->class->pm) {
-			pm_dev_dbg(dev, state, "class ");
-			error = pm_op(dev, dev->class->pm, state);
-			goto End;
+			info = "class ";
+			callback = pm_op(dev->class->pm, state);
+			goto Run;
 		} else if (dev->class->suspend) {
 			pm_dev_dbg(dev, state, "legacy class ");
 			error = legacy_suspend(dev, state, dev->class->suspend);
@@ -909,14 +858,23 @@
 
 	if (dev->bus) {
 		if (dev->bus->pm) {
-			pm_dev_dbg(dev, state, "");
-			error = pm_op(dev, dev->bus->pm, state);
+			info = "bus ";
+			callback = pm_op(dev->bus->pm, state);
 		} else if (dev->bus->suspend) {
-			pm_dev_dbg(dev, state, "legacy ");
+			pm_dev_dbg(dev, state, "legacy bus ");
 			error = legacy_suspend(dev, state, dev->bus->suspend);
+			goto End;
 		}
 	}
 
+ Run:
+	if (!callback && dev->driver && dev->driver->pm) {
+		info = "driver ";
+		callback = pm_op(dev->driver->pm, state);
+	}
+
+	error = dpm_run_callback(callback, dev, state, info);
+
  End:
 	if (!error) {
 		dev->power.is_suspended = true;
@@ -1022,6 +980,8 @@
  */
 static int device_prepare(struct device *dev, pm_message_t state)
 {
+	int (*callback)(struct device *) = NULL;
+	char *info = NULL;
 	int error = 0;
 
 	device_lock(dev);
@@ -1029,34 +989,29 @@
 	dev->power.wakeup_path = device_may_wakeup(dev);
 
 	if (dev->pm_domain) {
-		pm_dev_dbg(dev, state, "preparing power domain ");
-		if (dev->pm_domain->ops.prepare)
-			error = dev->pm_domain->ops.prepare(dev);
-		suspend_report_result(dev->pm_domain->ops.prepare, error);
-		if (error)
-			goto End;
+		info = "preparing power domain ";
+		callback = dev->pm_domain->ops.prepare;
 	} else if (dev->type && dev->type->pm) {
-		pm_dev_dbg(dev, state, "preparing type ");
-		if (dev->type->pm->prepare)
-			error = dev->type->pm->prepare(dev);
-		suspend_report_result(dev->type->pm->prepare, error);
-		if (error)
-			goto End;
+		info = "preparing type ";
+		callback = dev->type->pm->prepare;
 	} else if (dev->class && dev->class->pm) {
-		pm_dev_dbg(dev, state, "preparing class ");
-		if (dev->class->pm->prepare)
-			error = dev->class->pm->prepare(dev);
-		suspend_report_result(dev->class->pm->prepare, error);
-		if (error)
-			goto End;
+		info = "preparing class ";
+		callback = dev->class->pm->prepare;
 	} else if (dev->bus && dev->bus->pm) {
-		pm_dev_dbg(dev, state, "preparing ");
-		if (dev->bus->pm->prepare)
-			error = dev->bus->pm->prepare(dev);
-		suspend_report_result(dev->bus->pm->prepare, error);
+		info = "preparing bus ";
+		callback = dev->bus->pm->prepare;
 	}
 
- End:
+	if (!callback && dev->driver && dev->driver->pm) {
+		info = "preparing driver ";
+		callback = dev->driver->pm->prepare;
+	}
+
+	if (callback) {
+		error = callback(dev);
+		suspend_report_result(callback, error);
+	}
+
 	device_unlock(dev);
 
 	return error;
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 86de6c5..c5d3588 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -47,21 +47,29 @@
 static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers);
 
 /**
- * dev_pm_qos_read_value - Get PM QoS constraint for a given device.
+ * __dev_pm_qos_read_value - Get PM QoS constraint for a given device.
+ * @dev: Device to get the PM QoS constraint value for.
+ *
+ * This routine must be called with dev->power.lock held.
+ */
+s32 __dev_pm_qos_read_value(struct device *dev)
+{
+	struct pm_qos_constraints *c = dev->power.constraints;
+
+	return c ? pm_qos_read_value(c) : 0;
+}
+
+/**
+ * dev_pm_qos_read_value - Get PM QoS constraint for a given device (locked).
  * @dev: Device to get the PM QoS constraint value for.
  */
 s32 dev_pm_qos_read_value(struct device *dev)
 {
-	struct pm_qos_constraints *c;
 	unsigned long flags;
-	s32 ret = 0;
+	s32 ret;
 
 	spin_lock_irqsave(&dev->power.lock, flags);
-
-	c = dev->power.constraints;
-	if (c)
-		ret = pm_qos_read_value(c);
-
+	ret = __dev_pm_qos_read_value(dev);
 	spin_unlock_irqrestore(&dev->power.lock, flags);
 
 	return ret;
@@ -412,3 +420,28 @@
 	return blocking_notifier_chain_unregister(&dev_pm_notifiers, notifier);
 }
 EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier);
+
+/**
+ * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor.
+ * @dev: Device whose ancestor to add the request for.
+ * @req: Pointer to the preallocated handle.
+ * @value: Constraint latency value.
+ */
+int dev_pm_qos_add_ancestor_request(struct device *dev,
+				    struct dev_pm_qos_request *req, s32 value)
+{
+	struct device *ancestor = dev->parent;
+	int error = -ENODEV;
+
+	while (ancestor && !ancestor->power.ignore_children)
+		ancestor = ancestor->parent;
+
+	if (ancestor)
+		error = dev_pm_qos_add_request(ancestor, req, value);
+
+	if (error)
+		req->dev = NULL;
+
+	return error;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 8c78443..541f821 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -250,6 +250,9 @@
 	else
 		callback = NULL;
 
+	if (!callback && dev->driver && dev->driver->pm)
+		callback = dev->driver->pm->runtime_idle;
+
 	if (callback)
 		__rpm_callback(callback, dev);
 
@@ -279,6 +282,47 @@
 	return retval != -EACCES ? retval : -EIO;
 }
 
+struct rpm_qos_data {
+	ktime_t time_now;
+	s64 constraint_ns;
+};
+
+/**
+ * rpm_update_qos_constraint - Update a given PM QoS constraint data.
+ * @dev: Device whose timing data to use.
+ * @data: PM QoS constraint data to update.
+ *
+ * Use the suspend timing data of @dev to update PM QoS constraint data pointed
+ * to by @data.
+ */
+static int rpm_update_qos_constraint(struct device *dev, void *data)
+{
+	struct rpm_qos_data *qos = data;
+	unsigned long flags;
+	s64 delta_ns;
+	int ret = 0;
+
+	spin_lock_irqsave(&dev->power.lock, flags);
+
+	if (dev->power.max_time_suspended_ns < 0)
+		goto out;
+
+	delta_ns = dev->power.max_time_suspended_ns -
+		ktime_to_ns(ktime_sub(qos->time_now, dev->power.suspend_time));
+	if (delta_ns <= 0) {
+		ret = -EBUSY;
+		goto out;
+	}
+
+	if (qos->constraint_ns > delta_ns || qos->constraint_ns == 0)
+		qos->constraint_ns = delta_ns;
+
+ out:
+	spin_unlock_irqrestore(&dev->power.lock, flags);
+
+	return ret;
+}
+
 /**
  * rpm_suspend - Carry out runtime suspend of given device.
  * @dev: Device to suspend.
@@ -305,6 +349,7 @@
 {
 	int (*callback)(struct device *);
 	struct device *parent = NULL;
+	struct rpm_qos_data qos;
 	int retval;
 
 	trace_rpm_suspend(dev, rpmflags);
@@ -400,8 +445,38 @@
 		goto out;
 	}
 
+	qos.constraint_ns = __dev_pm_qos_read_value(dev);
+	if (qos.constraint_ns < 0) {
+		/* Negative constraint means "never suspend". */
+		retval = -EPERM;
+		goto out;
+	}
+	qos.constraint_ns *= NSEC_PER_USEC;
+	qos.time_now = ktime_get();
+
 	__update_runtime_status(dev, RPM_SUSPENDING);
 
+	if (!dev->power.ignore_children) {
+		if (dev->power.irq_safe)
+			spin_unlock(&dev->power.lock);
+		else
+			spin_unlock_irq(&dev->power.lock);
+
+		retval = device_for_each_child(dev, &qos,
+					       rpm_update_qos_constraint);
+
+		if (dev->power.irq_safe)
+			spin_lock(&dev->power.lock);
+		else
+			spin_lock_irq(&dev->power.lock);
+
+		if (retval)
+			goto fail;
+	}
+
+	dev->power.suspend_time = qos.time_now;
+	dev->power.max_time_suspended_ns = qos.constraint_ns ? : -1;
+
 	if (dev->pm_domain)
 		callback = dev->pm_domain->ops.runtime_suspend;
 	else if (dev->type && dev->type->pm)
@@ -413,28 +488,13 @@
 	else
 		callback = NULL;
 
-	retval = rpm_callback(callback, dev);
-	if (retval) {
-		__update_runtime_status(dev, RPM_ACTIVE);
-		dev->power.deferred_resume = false;
-		if (retval == -EAGAIN || retval == -EBUSY) {
-			dev->power.runtime_error = 0;
+	if (!callback && dev->driver && dev->driver->pm)
+		callback = dev->driver->pm->runtime_suspend;
 
-			/*
-			 * If the callback routine failed an autosuspend, and
-			 * if the last_busy time has been updated so that there
-			 * is a new autosuspend expiration time, automatically
-			 * reschedule another autosuspend.
-			 */
-			if ((rpmflags & RPM_AUTO) &&
-			    pm_runtime_autosuspend_expiration(dev) != 0)
-				goto repeat;
-		} else {
-			pm_runtime_cancel_pending(dev);
-		}
-		wake_up_all(&dev->power.wait_queue);
-		goto out;
-	}
+	retval = rpm_callback(callback, dev);
+	if (retval)
+		goto fail;
+
  no_callback:
 	__update_runtime_status(dev, RPM_SUSPENDED);
 	pm_runtime_deactivate_timer(dev);
@@ -466,6 +526,29 @@
 	trace_rpm_return_int(dev, _THIS_IP_, retval);
 
 	return retval;
+
+ fail:
+	__update_runtime_status(dev, RPM_ACTIVE);
+	dev->power.suspend_time = ktime_set(0, 0);
+	dev->power.max_time_suspended_ns = -1;
+	dev->power.deferred_resume = false;
+	if (retval == -EAGAIN || retval == -EBUSY) {
+		dev->power.runtime_error = 0;
+
+		/*
+		 * If the callback routine failed an autosuspend, and
+		 * if the last_busy time has been updated so that there
+		 * is a new autosuspend expiration time, automatically
+		 * reschedule another autosuspend.
+		 */
+		if ((rpmflags & RPM_AUTO) &&
+		    pm_runtime_autosuspend_expiration(dev) != 0)
+			goto repeat;
+	} else {
+		pm_runtime_cancel_pending(dev);
+	}
+	wake_up_all(&dev->power.wait_queue);
+	goto out;
 }
 
 /**
@@ -620,6 +703,9 @@
 	if (dev->power.no_callbacks)
 		goto no_callback;	/* Assume success. */
 
+	dev->power.suspend_time = ktime_set(0, 0);
+	dev->power.max_time_suspended_ns = -1;
+
 	__update_runtime_status(dev, RPM_RESUMING);
 
 	if (dev->pm_domain)
@@ -633,6 +719,9 @@
 	else
 		callback = NULL;
 
+	if (!callback && dev->driver && dev->driver->pm)
+		callback = dev->driver->pm->runtime_resume;
+
 	retval = rpm_callback(callback, dev);
 	if (retval) {
 		__update_runtime_status(dev, RPM_SUSPENDED);
@@ -1279,6 +1368,9 @@
 	setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
 			(unsigned long)dev);
 
+	dev->power.suspend_time = ktime_set(0, 0);
+	dev->power.max_time_suspended_ns = -1;
+
 	init_waitqueue_head(&dev->power.wait_queue);
 }
 
@@ -1296,3 +1388,28 @@
 	if (dev->power.irq_safe && dev->parent)
 		pm_runtime_put_sync(dev->parent);
 }
+
+/**
+ * pm_runtime_update_max_time_suspended - Update device's suspend time data.
+ * @dev: Device to handle.
+ * @delta_ns: Value to subtract from the device's max_time_suspended_ns field.
+ *
+ * Update the device's power.max_time_suspended_ns field by subtracting
+ * @delta_ns from it.  The resulting value of power.max_time_suspended_ns is
+ * never negative.
+ */
+void pm_runtime_update_max_time_suspended(struct device *dev, s64 delta_ns)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->power.lock, flags);
+
+	if (delta_ns > 0 && dev->power.max_time_suspended_ns > 0) {
+		if (dev->power.max_time_suspended_ns > delta_ns)
+			dev->power.max_time_suspended_ns -= delta_ns;
+		else
+			dev->power.max_time_suspended_ns = 0;
+	}
+
+	spin_unlock_irqrestore(&dev->power.lock, flags);
+}
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
index 2fc6a66..0f6c7fb 100644
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -13,3 +13,6 @@
 
 config REGMAP_SPI
 	tristate
+
+config REGMAP_IRQ
+	bool
diff --git a/drivers/base/regmap/Makefile b/drivers/base/regmap/Makefile
index 0573c8a..defd579 100644
--- a/drivers/base/regmap/Makefile
+++ b/drivers/base/regmap/Makefile
@@ -1,4 +1,6 @@
-obj-$(CONFIG_REGMAP) += regmap.o regcache.o regcache-indexed.o regcache-rbtree.o regcache-lzo.o
+obj-$(CONFIG_REGMAP) += regmap.o regcache.o
+obj-$(CONFIG_REGMAP) += regcache-rbtree.o regcache-lzo.o
 obj-$(CONFIG_DEBUG_FS) += regmap-debugfs.o
 obj-$(CONFIG_REGMAP_I2C) += regmap-i2c.o
 obj-$(CONFIG_REGMAP_SPI) += regmap-spi.o
+obj-$(CONFIG_REGMAP_IRQ) += regmap-irq.o
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index 348ff02..1a02b75 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -74,6 +74,7 @@
 	struct reg_default *reg_defaults;
 	const void *reg_defaults_raw;
 	void *cache;
+	bool cache_dirty;
 };
 
 struct regcache_ops {
@@ -105,7 +106,7 @@
 #endif
 
 /* regcache core declarations */
-int regcache_init(struct regmap *map);
+int regcache_init(struct regmap *map, const struct regmap_config *config);
 void regcache_exit(struct regmap *map);
 int regcache_read(struct regmap *map,
 		       unsigned int reg, unsigned int *value);
@@ -118,10 +119,7 @@
 bool regcache_set_val(void *base, unsigned int idx,
 		      unsigned int val, unsigned int word_size);
 int regcache_lookup_reg(struct regmap *map, unsigned int reg);
-int regcache_insert_reg(struct regmap *map, unsigned int reg,
-			unsigned int val);
 
-extern struct regcache_ops regcache_indexed_ops;
 extern struct regcache_ops regcache_rbtree_ops;
 extern struct regcache_ops regcache_lzo_ops;
 
diff --git a/drivers/base/regmap/regcache-indexed.c b/drivers/base/regmap/regcache-indexed.c
deleted file mode 100644
index 507731a..0000000
--- a/drivers/base/regmap/regcache-indexed.c
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Register cache access API - indexed caching support
- *
- * Copyright 2011 Wolfson Microelectronics plc
- *
- * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/slab.h>
-
-#include "internal.h"
-
-static int regcache_indexed_read(struct regmap *map, unsigned int reg,
-				 unsigned int *value)
-{
-	int ret;
-
-	ret = regcache_lookup_reg(map, reg);
-	if (ret >= 0)
-		*value = map->reg_defaults[ret].def;
-
-	return ret;
-}
-
-static int regcache_indexed_write(struct regmap *map, unsigned int reg,
-				  unsigned int value)
-{
-	int ret;
-
-	ret = regcache_lookup_reg(map, reg);
-	if (ret < 0)
-		return regcache_insert_reg(map, reg, value);
-	map->reg_defaults[ret].def = value;
-	return 0;
-}
-
-static int regcache_indexed_sync(struct regmap *map)
-{
-	unsigned int i;
-	int ret;
-
-	for (i = 0; i < map->num_reg_defaults; i++) {
-		ret = _regmap_write(map, map->reg_defaults[i].reg,
-				    map->reg_defaults[i].def);
-		if (ret < 0)
-			return ret;
-		dev_dbg(map->dev, "Synced register %#x, value %#x\n",
-			map->reg_defaults[i].reg,
-			map->reg_defaults[i].def);
-	}
-	return 0;
-}
-
-struct regcache_ops regcache_indexed_ops = {
-	.type = REGCACHE_INDEXED,
-	.name = "indexed",
-	.read = regcache_indexed_read,
-	.write = regcache_indexed_write,
-	.sync = regcache_indexed_sync
-};
diff --git a/drivers/base/regmap/regcache-lzo.c b/drivers/base/regmap/regcache-lzo.c
index 066aeec..b7d1614 100644
--- a/drivers/base/regmap/regcache-lzo.c
+++ b/drivers/base/regmap/regcache-lzo.c
@@ -15,6 +15,8 @@
 
 #include "internal.h"
 
+static int regcache_lzo_exit(struct regmap *map);
+
 struct regcache_lzo_ctx {
 	void *wmem;
 	void *dst;
@@ -27,7 +29,7 @@
 };
 
 #define LZO_BLOCK_NUM 8
-static int regcache_lzo_block_count(void)
+static int regcache_lzo_block_count(struct regmap *map)
 {
 	return LZO_BLOCK_NUM;
 }
@@ -106,19 +108,22 @@
 					    unsigned int reg)
 {
 	return (reg * map->cache_word_size) /
-		DIV_ROUND_UP(map->cache_size_raw, regcache_lzo_block_count());
+		DIV_ROUND_UP(map->cache_size_raw,
+			     regcache_lzo_block_count(map));
 }
 
 static inline int regcache_lzo_get_blkpos(struct regmap *map,
 					  unsigned int reg)
 {
-	return reg % (DIV_ROUND_UP(map->cache_size_raw, regcache_lzo_block_count()) /
+	return reg % (DIV_ROUND_UP(map->cache_size_raw,
+				   regcache_lzo_block_count(map)) /
 		      map->cache_word_size);
 }
 
 static inline int regcache_lzo_get_blksize(struct regmap *map)
 {
-	return DIV_ROUND_UP(map->cache_size_raw, regcache_lzo_block_count());
+	return DIV_ROUND_UP(map->cache_size_raw,
+			    regcache_lzo_block_count(map));
 }
 
 static int regcache_lzo_init(struct regmap *map)
@@ -131,7 +136,7 @@
 
 	ret = 0;
 
-	blkcount = regcache_lzo_block_count();
+	blkcount = regcache_lzo_block_count(map);
 	map->cache = kzalloc(blkcount * sizeof *lzo_blocks,
 			     GFP_KERNEL);
 	if (!map->cache)
@@ -190,7 +195,7 @@
 
 	return 0;
 err:
-	regcache_exit(map);
+	regcache_lzo_exit(map);
 	return ret;
 }
 
@@ -203,7 +208,7 @@
 	if (!lzo_blocks)
 		return 0;
 
-	blkcount = regcache_lzo_block_count();
+	blkcount = regcache_lzo_block_count(map);
 	/*
 	 * the pointer to the bitmap used for syncing the cache
 	 * is shared amongst all lzo_blocks.  Ensure it is freed
@@ -351,7 +356,7 @@
 }
 
 struct regcache_ops regcache_lzo_ops = {
-	.type = REGCACHE_LZO,
+	.type = REGCACHE_COMPRESSED,
 	.name = "lzo",
 	.init = regcache_lzo_init,
 	.exit = regcache_lzo_exit,
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index e314984..32620c4 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -11,12 +11,15 @@
  */
 
 #include <linux/slab.h>
+#include <linux/debugfs.h>
 #include <linux/rbtree.h>
+#include <linux/seq_file.h>
 
 #include "internal.h"
 
 static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
 				 unsigned int value);
+static int regcache_rbtree_exit(struct regmap *map);
 
 struct regcache_rbtree_node {
 	/* the actual rbtree node holding this block */
@@ -124,6 +127,60 @@
 	return 1;
 }
 
+#ifdef CONFIG_DEBUG_FS
+static int rbtree_show(struct seq_file *s, void *ignored)
+{
+	struct regmap *map = s->private;
+	struct regcache_rbtree_ctx *rbtree_ctx = map->cache;
+	struct regcache_rbtree_node *n;
+	struct rb_node *node;
+	unsigned int base, top;
+	int nodes = 0;
+	int registers = 0;
+
+	mutex_lock(&map->lock);
+
+	for (node = rb_first(&rbtree_ctx->root); node != NULL;
+	     node = rb_next(node)) {
+		n = container_of(node, struct regcache_rbtree_node, node);
+
+		regcache_rbtree_get_base_top_reg(n, &base, &top);
+		seq_printf(s, "%x-%x (%d)\n", base, top, top - base + 1);
+
+		nodes++;
+		registers += top - base + 1;
+	}
+
+	seq_printf(s, "%d nodes, %d registers, average %d registers\n",
+		   nodes, registers, registers / nodes);
+
+	mutex_unlock(&map->lock);
+
+	return 0;
+}
+
+static int rbtree_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, rbtree_show, inode->i_private);
+}
+
+static const struct file_operations rbtree_fops = {
+	.open		= rbtree_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static void rbtree_debugfs_init(struct regmap *map)
+{
+	debugfs_create_file("rbtree", 0400, map->debugfs, map, &rbtree_fops);
+}
+#else
+static void rbtree_debugfs_init(struct regmap *map)
+{
+}
+#endif
+
 static int regcache_rbtree_init(struct regmap *map)
 {
 	struct regcache_rbtree_ctx *rbtree_ctx;
@@ -146,10 +203,12 @@
 			goto err;
 	}
 
+	rbtree_debugfs_init(map);
+
 	return 0;
 
 err:
-	regcache_exit(map);
+	regcache_rbtree_exit(map);
 	return ret;
 }
 
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index 666f6f5..1ead661 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -19,7 +19,6 @@
 #include "internal.h"
 
 static const struct regcache_ops *cache_types[] = {
-	&regcache_indexed_ops,
 	&regcache_rbtree_ops,
 	&regcache_lzo_ops,
 };
@@ -61,8 +60,10 @@
 
 	map->reg_defaults = kmalloc(count * sizeof(struct reg_default),
 				      GFP_KERNEL);
-	if (!map->reg_defaults)
-		return -ENOMEM;
+	if (!map->reg_defaults) {
+		ret = -ENOMEM;
+		goto err_free;
+	}
 
 	/* fill the reg_defaults */
 	map->num_reg_defaults = count;
@@ -77,9 +78,15 @@
 	}
 
 	return 0;
+
+err_free:
+	if (map->cache_free)
+		kfree(map->reg_defaults_raw);
+
+	return ret;
 }
 
-int regcache_init(struct regmap *map)
+int regcache_init(struct regmap *map, const struct regmap_config *config)
 {
 	int ret;
 	int i;
@@ -100,6 +107,12 @@
 		return -EINVAL;
 	}
 
+	map->num_reg_defaults = config->num_reg_defaults;
+	map->num_reg_defaults_raw = config->num_reg_defaults_raw;
+	map->reg_defaults_raw = config->reg_defaults_raw;
+	map->cache_word_size = DIV_ROUND_UP(config->val_bits, 8);
+	map->cache_size_raw = map->cache_word_size * config->num_reg_defaults_raw;
+
 	map->cache = NULL;
 	map->cache_ops = cache_types[i];
 
@@ -112,10 +125,10 @@
 	 * won't vanish from under us.  We'll need to make
 	 * a copy of it.
 	 */
-	if (map->reg_defaults) {
+	if (config->reg_defaults) {
 		if (!map->num_reg_defaults)
 			return -EINVAL;
-		tmp_buf = kmemdup(map->reg_defaults, map->num_reg_defaults *
+		tmp_buf = kmemdup(config->reg_defaults, map->num_reg_defaults *
 				  sizeof(struct reg_default), GFP_KERNEL);
 		if (!tmp_buf)
 			return -ENOMEM;
@@ -136,9 +149,18 @@
 	if (map->cache_ops->init) {
 		dev_dbg(map->dev, "Initializing %s cache\n",
 			map->cache_ops->name);
-		return map->cache_ops->init(map);
+		ret = map->cache_ops->init(map);
+		if (ret)
+			goto err_free;
 	}
 	return 0;
+
+err_free:
+	kfree(map->reg_defaults);
+	if (map->cache_free)
+		kfree(map->reg_defaults_raw);
+
+	return ret;
 }
 
 void regcache_exit(struct regmap *map)
@@ -171,16 +193,21 @@
 int regcache_read(struct regmap *map,
 		  unsigned int reg, unsigned int *value)
 {
+	int ret;
+
 	if (map->cache_type == REGCACHE_NONE)
 		return -ENOSYS;
 
 	BUG_ON(!map->cache_ops);
 
-	if (!regmap_readable(map, reg))
-		return -EIO;
+	if (!regmap_volatile(map, reg)) {
+		ret = map->cache_ops->read(map, reg, value);
 
-	if (!regmap_volatile(map, reg))
-		return map->cache_ops->read(map, reg, value);
+		if (ret == 0)
+			trace_regmap_reg_read_cache(map->dev, reg, *value);
+
+		return ret;
+	}
 
 	return -EINVAL;
 }
@@ -241,6 +268,8 @@
 		map->cache_ops->name);
 	name = map->cache_ops->name;
 	trace_regcache_sync(map->dev, name, "start");
+	if (!map->cache_dirty)
+		goto out;
 	if (map->cache_ops->sync) {
 		ret = map->cache_ops->sync(map);
 	} else {
@@ -291,6 +320,23 @@
 EXPORT_SYMBOL_GPL(regcache_cache_only);
 
 /**
+ * regcache_mark_dirty: Mark the register cache as dirty
+ *
+ * @map: map to mark
+ *
+ * Mark the register cache as dirty, for example due to the device
+ * having been powered down for suspend.  If the cache is not marked
+ * as dirty then the cache sync will be suppressed.
+ */
+void regcache_mark_dirty(struct regmap *map)
+{
+	mutex_lock(&map->lock);
+	map->cache_dirty = true;
+	mutex_unlock(&map->lock);
+}
+EXPORT_SYMBOL_GPL(regcache_mark_dirty);
+
+/**
  * regcache_cache_bypass: Put a register map into cache bypass mode
  *
  * @map: map to configure
@@ -381,22 +427,3 @@
 	else
 		return -ENOENT;
 }
-
-int regcache_insert_reg(struct regmap *map, unsigned int reg,
-			unsigned int val)
-{
-	void *tmp;
-
-	tmp = krealloc(map->reg_defaults,
-		       (map->num_reg_defaults + 1) * sizeof(struct reg_default),
-		       GFP_KERNEL);
-	if (!tmp)
-		return -ENOMEM;
-	map->reg_defaults = tmp;
-	map->num_reg_defaults++;
-	map->reg_defaults[map->num_reg_defaults - 1].reg = reg;
-	map->reg_defaults[map->num_reg_defaults - 1].def = val;
-	sort(map->reg_defaults, map->num_reg_defaults,
-	     sizeof(struct reg_default), regcache_default_cmp, NULL);
-	return 0;
-}
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
new file mode 100644
index 0000000..428836f
--- /dev/null
+++ b/drivers/base/regmap/regmap-irq.c
@@ -0,0 +1,302 @@
+/*
+ * regmap based irq_chip
+ *
+ * Copyright 2011 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/export.h>
+#include <linux/regmap.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+
+#include "internal.h"
+
+struct regmap_irq_chip_data {
+	struct mutex lock;
+
+	struct regmap *map;
+	struct regmap_irq_chip *chip;
+
+	int irq_base;
+
+	void *status_reg_buf;
+	unsigned int *status_buf;
+	unsigned int *mask_buf;
+	unsigned int *mask_buf_def;
+};
+
+static inline const
+struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data,
+				     int irq)
+{
+	return &data->chip->irqs[irq - data->irq_base];
+}
+
+static void regmap_irq_lock(struct irq_data *data)
+{
+	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
+
+	mutex_lock(&d->lock);
+}
+
+static void regmap_irq_sync_unlock(struct irq_data *data)
+{
+	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
+	int i, ret;
+
+	/*
+	 * If there's been a change in the mask write it back to the
+	 * hardware.  We rely on the use of the regmap core cache to
+	 * suppress pointless writes.
+	 */
+	for (i = 0; i < d->chip->num_regs; i++) {
+		ret = regmap_update_bits(d->map, d->chip->mask_base + i,
+					 d->mask_buf_def[i], d->mask_buf[i]);
+		if (ret != 0)
+			dev_err(d->map->dev, "Failed to sync masks in %x\n",
+				d->chip->mask_base + i);
+	}
+
+	mutex_unlock(&d->lock);
+}
+
+static void regmap_irq_enable(struct irq_data *data)
+{
+	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
+	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->irq);
+
+	d->mask_buf[irq_data->reg_offset] &= ~irq_data->mask;
+}
+
+static void regmap_irq_disable(struct irq_data *data)
+{
+	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
+	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->irq);
+
+	d->mask_buf[irq_data->reg_offset] |= irq_data->mask;
+}
+
+static struct irq_chip regmap_irq_chip = {
+	.name			= "regmap",
+	.irq_bus_lock		= regmap_irq_lock,
+	.irq_bus_sync_unlock	= regmap_irq_sync_unlock,
+	.irq_disable		= regmap_irq_disable,
+	.irq_enable		= regmap_irq_enable,
+};
+
+static irqreturn_t regmap_irq_thread(int irq, void *d)
+{
+	struct regmap_irq_chip_data *data = d;
+	struct regmap_irq_chip *chip = data->chip;
+	struct regmap *map = data->map;
+	int ret, i;
+	u8 *buf8 = data->status_reg_buf;
+	u16 *buf16 = data->status_reg_buf;
+	u32 *buf32 = data->status_reg_buf;
+	bool handled = false;
+
+	ret = regmap_bulk_read(map, chip->status_base, data->status_reg_buf,
+			       chip->num_regs);
+	if (ret != 0) {
+		dev_err(map->dev, "Failed to read IRQ status: %d\n", ret);
+		return IRQ_NONE;
+	}
+
+	/*
+	 * Ignore masked IRQs and ack if we need to; we ack early so
+	 * there is no race between handling and acknowleding the
+	 * interrupt.  We assume that typically few of the interrupts
+	 * will fire simultaneously so don't worry about overhead from
+	 * doing a write per register.
+	 */
+	for (i = 0; i < data->chip->num_regs; i++) {
+		switch (map->format.val_bytes) {
+		case 1:
+			data->status_buf[i] = buf8[i];
+			break;
+		case 2:
+			data->status_buf[i] = buf16[i];
+			break;
+		case 4:
+			data->status_buf[i] = buf32[i];
+			break;
+		default:
+			BUG();
+			return IRQ_NONE;
+		}
+
+		data->status_buf[i] &= ~data->mask_buf[i];
+
+		if (data->status_buf[i] && chip->ack_base) {
+			ret = regmap_write(map, chip->ack_base + i,
+					   data->status_buf[i]);
+			if (ret != 0)
+				dev_err(map->dev, "Failed to ack 0x%x: %d\n",
+					chip->ack_base + i, ret);
+		}
+	}
+
+	for (i = 0; i < chip->num_irqs; i++) {
+		if (data->status_buf[chip->irqs[i].reg_offset] &
+		    chip->irqs[i].mask) {
+			handle_nested_irq(data->irq_base + i);
+			handled = true;
+		}
+	}
+
+	if (handled)
+		return IRQ_HANDLED;
+	else
+		return IRQ_NONE;
+}
+
+/**
+ * regmap_add_irq_chip(): Use standard regmap IRQ controller handling
+ *
+ * map:       The regmap for the device.
+ * irq:       The IRQ the device uses to signal interrupts
+ * irq_flags: The IRQF_ flags to use for the primary interrupt.
+ * chip:      Configuration for the interrupt controller.
+ * data:      Runtime data structure for the controller, allocated on success
+ *
+ * Returns 0 on success or an errno on failure.
+ *
+ * In order for this to be efficient the chip really should use a
+ * register cache.  The chip driver is responsible for restoring the
+ * register values used by the IRQ controller over suspend and resume.
+ */
+int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
+			int irq_base, struct regmap_irq_chip *chip,
+			struct regmap_irq_chip_data **data)
+{
+	struct regmap_irq_chip_data *d;
+	int cur_irq, i;
+	int ret = -ENOMEM;
+
+	irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0);
+	if (irq_base < 0) {
+		dev_warn(map->dev, "Failed to allocate IRQs: %d\n",
+			 irq_base);
+		return irq_base;
+	}
+
+	d = kzalloc(sizeof(*d), GFP_KERNEL);
+	if (!d)
+		return -ENOMEM;
+
+	d->status_buf = kzalloc(sizeof(unsigned int) * chip->num_regs,
+				GFP_KERNEL);
+	if (!d->status_buf)
+		goto err_alloc;
+
+	d->status_reg_buf = kzalloc(map->format.val_bytes * chip->num_regs,
+				    GFP_KERNEL);
+	if (!d->status_reg_buf)
+		goto err_alloc;
+
+	d->mask_buf = kzalloc(sizeof(unsigned int) * chip->num_regs,
+			      GFP_KERNEL);
+	if (!d->mask_buf)
+		goto err_alloc;
+
+	d->mask_buf_def = kzalloc(sizeof(unsigned int) * chip->num_regs,
+				  GFP_KERNEL);
+	if (!d->mask_buf_def)
+		goto err_alloc;
+
+	d->map = map;
+	d->chip = chip;
+	d->irq_base = irq_base;
+	mutex_init(&d->lock);
+
+	for (i = 0; i < chip->num_irqs; i++)
+		d->mask_buf_def[chip->irqs[i].reg_offset]
+			|= chip->irqs[i].mask;
+
+	/* Mask all the interrupts by default */
+	for (i = 0; i < chip->num_regs; i++) {
+		d->mask_buf[i] = d->mask_buf_def[i];
+		ret = regmap_write(map, chip->mask_base + i, d->mask_buf[i]);
+		if (ret != 0) {
+			dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
+				chip->mask_base + i, ret);
+			goto err_alloc;
+		}
+	}
+
+	/* Register them with genirq */
+	for (cur_irq = irq_base;
+	     cur_irq < chip->num_irqs + irq_base;
+	     cur_irq++) {
+		irq_set_chip_data(cur_irq, d);
+		irq_set_chip_and_handler(cur_irq, &regmap_irq_chip,
+					 handle_edge_irq);
+		irq_set_nested_thread(cur_irq, 1);
+
+		/* ARM needs us to explicitly flag the IRQ as valid
+		 * and will set them noprobe when we do so. */
+#ifdef CONFIG_ARM
+		set_irq_flags(cur_irq, IRQF_VALID);
+#else
+		irq_set_noprobe(cur_irq);
+#endif
+	}
+
+	ret = request_threaded_irq(irq, NULL, regmap_irq_thread, irq_flags,
+				   chip->name, d);
+	if (ret != 0) {
+		dev_err(map->dev, "Failed to request IRQ %d: %d\n", irq, ret);
+		goto err_alloc;
+	}
+
+	return 0;
+
+err_alloc:
+	kfree(d->mask_buf_def);
+	kfree(d->mask_buf);
+	kfree(d->status_reg_buf);
+	kfree(d->status_buf);
+	kfree(d);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_add_irq_chip);
+
+/**
+ * regmap_del_irq_chip(): Stop interrupt handling for a regmap IRQ chip
+ *
+ * @irq: Primary IRQ for the device
+ * @d:   regmap_irq_chip_data allocated by regmap_add_irq_chip()
+ */
+void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
+{
+	if (!d)
+		return;
+
+	free_irq(irq, d);
+	kfree(d->mask_buf_def);
+	kfree(d->mask_buf);
+	kfree(d->status_reg_buf);
+	kfree(d->status_buf);
+	kfree(d);
+}
+EXPORT_SYMBOL_GPL(regmap_del_irq_chip);
+
+/**
+ * regmap_irq_chip_get_base(): Retrieve interrupt base for a regmap IRQ chip
+ *
+ * Useful for drivers to request their own IRQs.
+ *
+ * @data: regmap_irq controller to operate on.
+ */
+int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data)
+{
+	return data->irq_base;
+}
+EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base);
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index bf441db..be10a4f 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -64,6 +64,18 @@
 	return false;
 }
 
+static bool regmap_volatile_range(struct regmap *map, unsigned int reg,
+	unsigned int num)
+{
+	unsigned int i;
+
+	for (i = 0; i < num; i++)
+		if (!regmap_volatile(map, reg + i))
+			return false;
+
+	return true;
+}
+
 static void regmap_format_4_12_write(struct regmap *map,
 				     unsigned int reg, unsigned int val)
 {
@@ -78,6 +90,16 @@
 	*out = cpu_to_be16((reg << 9) | val);
 }
 
+static void regmap_format_10_14_write(struct regmap *map,
+				    unsigned int reg, unsigned int val)
+{
+	u8 *out = map->work_buf;
+
+	out[2] = val;
+	out[1] = (val >> 8) | (reg << 6);
+	out[0] = reg >> 2;
+}
+
 static void regmap_format_8(void *buf, unsigned int val)
 {
 	u8 *b = buf;
@@ -127,7 +149,7 @@
 	int ret = -EINVAL;
 
 	if (!bus || !config)
-		return NULL;
+		goto err;
 
 	map = kzalloc(sizeof(*map), GFP_KERNEL);
 	if (map == NULL) {
@@ -147,12 +169,6 @@
 	map->volatile_reg = config->volatile_reg;
 	map->precious_reg = config->precious_reg;
 	map->cache_type = config->cache_type;
-	map->reg_defaults = config->reg_defaults;
-	map->num_reg_defaults = config->num_reg_defaults;
-	map->num_reg_defaults_raw = config->num_reg_defaults_raw;
-	map->reg_defaults_raw = config->reg_defaults_raw;
-	map->cache_size_raw = (config->val_bits / 8) * config->num_reg_defaults_raw;
-	map->cache_word_size = config->val_bits / 8;
 
 	if (config->read_flag_mask || config->write_flag_mask) {
 		map->read_flag_mask = config->read_flag_mask;
@@ -182,6 +198,16 @@
 		}
 		break;
 
+	case 10:
+		switch (config->val_bits) {
+		case 14:
+			map->format.format_write = regmap_format_10_14_write;
+			break;
+		default:
+			goto err_map;
+		}
+		break;
+
 	case 8:
 		map->format.format_reg = regmap_format_8;
 		break;
@@ -215,14 +241,16 @@
 		goto err_map;
 	}
 
-	ret = regcache_init(map);
-	if (ret < 0)
-		goto err_map;
-
 	regmap_debugfs_init(map);
 
+	ret = regcache_init(map, config);
+	if (ret < 0)
+		goto err_free_workbuf;
+
 	return map;
 
+err_free_workbuf:
+	kfree(map->work_buf);
 err_map:
 	kfree(map);
 err:
@@ -231,6 +259,39 @@
 EXPORT_SYMBOL_GPL(regmap_init);
 
 /**
+ * regmap_reinit_cache(): Reinitialise the current register cache
+ *
+ * @map: Register map to operate on.
+ * @config: New configuration.  Only the cache data will be used.
+ *
+ * Discard any existing register cache for the map and initialize a
+ * new cache.  This can be used to restore the cache to defaults or to
+ * update the cache configuration to reflect runtime discovery of the
+ * hardware.
+ */
+int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
+{
+	int ret;
+
+	mutex_lock(&map->lock);
+
+	regcache_exit(map);
+
+	map->max_register = config->max_register;
+	map->writeable_reg = config->writeable_reg;
+	map->readable_reg = config->readable_reg;
+	map->volatile_reg = config->volatile_reg;
+	map->precious_reg = config->precious_reg;
+	map->cache_type = config->cache_type;
+
+	ret = regcache_init(map, config);
+
+	mutex_unlock(&map->lock);
+
+	return ret;
+}
+
+/**
  * regmap_exit(): Free a previously allocated register map
  */
 void regmap_exit(struct regmap *map)
@@ -306,8 +367,10 @@
 		ret = regcache_write(map, reg, val);
 		if (ret != 0)
 			return ret;
-		if (map->cache_only)
+		if (map->cache_only) {
+			map->cache_dirty = true;
 			return 0;
+		}
 	}
 
 	trace_regmap_reg_write(map->dev, reg, val);
@@ -375,9 +438,11 @@
 int regmap_raw_write(struct regmap *map, unsigned int reg,
 		     const void *val, size_t val_len)
 {
+	size_t val_count = val_len / map->format.val_bytes;
 	int ret;
 
-	WARN_ON(map->cache_type != REGCACHE_NONE);
+	WARN_ON(!regmap_volatile_range(map, reg, val_count) &&
+		map->cache_type != REGCACHE_NONE);
 
 	mutex_lock(&map->lock);
 
@@ -422,15 +487,15 @@
 {
 	int ret;
 
-	if (!map->format.parse_val)
-		return -EINVAL;
-
 	if (!map->cache_bypass) {
 		ret = regcache_read(map, reg, val);
 		if (ret == 0)
 			return 0;
 	}
 
+	if (!map->format.parse_val)
+		return -EINVAL;
+
 	if (map->cache_only)
 		return -EBUSY;
 
@@ -481,15 +546,11 @@
 int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
 		    size_t val_len)
 {
+	size_t val_count = val_len / map->format.val_bytes;
 	int ret;
-	int i;
-	bool vol = true;
 
-	for (i = 0; i < val_len / map->format.val_bytes; i++)
-		if (!regmap_volatile(map, reg + i))
-			vol = false;
-
-	WARN_ON(!vol && map->cache_type != REGCACHE_NONE);
+	WARN_ON(!regmap_volatile_range(map, reg, val_count) &&
+		map->cache_type != REGCACHE_NONE);
 
 	mutex_lock(&map->lock);
 
@@ -517,16 +578,11 @@
 {
 	int ret, i;
 	size_t val_bytes = map->format.val_bytes;
-	bool vol = true;
+	bool vol = regmap_volatile_range(map, reg, val_count);
 
 	if (!map->format.parse_val)
 		return -EINVAL;
 
-	/* Is this a block of volatile registers? */
-	for (i = 0; i < val_count; i++)
-		if (!regmap_volatile(map, reg + i))
-			vol = false;
-
 	if (vol || map->cache_type == REGCACHE_NONE) {
 		ret = regmap_raw_read(map, reg, val, val_bytes * val_count);
 		if (ret != 0)
@@ -546,8 +602,37 @@
 }
 EXPORT_SYMBOL_GPL(regmap_bulk_read);
 
+static int _regmap_update_bits(struct regmap *map, unsigned int reg,
+			       unsigned int mask, unsigned int val,
+			       bool *change)
+{
+	int ret;
+	unsigned int tmp, orig;
+
+	mutex_lock(&map->lock);
+
+	ret = _regmap_read(map, reg, &orig);
+	if (ret != 0)
+		goto out;
+
+	tmp = orig & ~mask;
+	tmp |= val & mask;
+
+	if (tmp != orig) {
+		ret = _regmap_write(map, reg, tmp);
+		*change = true;
+	} else {
+		*change = false;
+	}
+
+out:
+	mutex_unlock(&map->lock);
+
+	return ret;
+}
+
 /**
- * remap_update_bits: Perform a read/modify/write cycle on the register map
+ * regmap_update_bits: Perform a read/modify/write cycle on the register map
  *
  * @map: Register map to update
  * @reg: Register to update
@@ -559,27 +644,31 @@
 int regmap_update_bits(struct regmap *map, unsigned int reg,
 		       unsigned int mask, unsigned int val)
 {
-	int ret;
-	unsigned int tmp;
-
-	mutex_lock(&map->lock);
-
-	ret = _regmap_read(map, reg, &tmp);
-	if (ret != 0)
-		goto out;
-
-	tmp &= ~mask;
-	tmp |= val & mask;
-
-	ret = _regmap_write(map, reg, tmp);
-
-out:
-	mutex_unlock(&map->lock);
-
-	return ret;
+	bool change;
+	return _regmap_update_bits(map, reg, mask, val, &change);
 }
 EXPORT_SYMBOL_GPL(regmap_update_bits);
 
+/**
+ * regmap_update_bits_check: Perform a read/modify/write cycle on the
+ *                           register map and report if updated
+ *
+ * @map: Register map to update
+ * @reg: Register to update
+ * @mask: Bitmask to change
+ * @val: New value for bitmask
+ * @change: Boolean indicating if a write was done
+ *
+ * Returns zero for success, a negative number on error.
+ */
+int regmap_update_bits_check(struct regmap *map, unsigned int reg,
+			     unsigned int mask, unsigned int val,
+			     bool *change)
+{
+	return _regmap_update_bits(map, reg, mask, val, change);
+}
+EXPORT_SYMBOL_GPL(regmap_update_bits_check);
+
 static int __init regmap_initcall(void)
 {
 	regmap_debugfs_initcall();
diff --git a/drivers/base/sys.c b/drivers/base/sys.c
index 9dff77b..409f5ce 100644
--- a/drivers/base/sys.c
+++ b/drivers/base/sys.c
@@ -126,7 +126,7 @@
 }
 EXPORT_SYMBOL_GPL(sysdev_class_remove_file);
 
-static struct kset *system_kset;
+extern struct kset *system_kset;
 
 int sysdev_class_register(struct sysdev_class *cls)
 {
@@ -331,14 +331,6 @@
 EXPORT_SYMBOL_GPL(sysdev_register);
 EXPORT_SYMBOL_GPL(sysdev_unregister);
 
-int __init system_bus_init(void)
-{
-	system_kset = kset_create_and_add("system", NULL, &devices_kset->kobj);
-	if (!system_kset)
-		return -ENOMEM;
-	return 0;
-}
-
 #define to_ext_attr(x) container_of(x, struct sysdev_ext_attribute, attr)
 
 ssize_t sysdev_store_ulong(struct sys_device *sysdev,
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index f6f37a0..ae989c5 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -23,7 +23,6 @@
  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  *
  */
-#include <linux/sysdev.h>
 #include <linux/init.h>
 #include <linux/mm.h>
 #include <linux/cpu.h>
@@ -32,14 +31,14 @@
 #include <linux/topology.h>
 
 #define define_one_ro_named(_name, _func)				\
-static SYSDEV_ATTR(_name, 0444, _func, NULL)
+	static DEVICE_ATTR(_name, 0444, _func, NULL)
 
 #define define_one_ro(_name)				\
-static SYSDEV_ATTR(_name, 0444, show_##_name, NULL)
+	static DEVICE_ATTR(_name, 0444, show_##_name, NULL)
 
 #define define_id_show_func(name)				\
-static ssize_t show_##name(struct sys_device *dev,		\
-		struct sysdev_attribute *attr, char *buf)	\
+static ssize_t show_##name(struct device *dev,			\
+		struct device_attribute *attr, char *buf)	\
 {								\
 	unsigned int cpu = dev->id;				\
 	return sprintf(buf, "%d\n", topology_##name(cpu));	\
@@ -65,16 +64,16 @@
 
 #ifdef arch_provides_topology_pointers
 #define define_siblings_show_map(name)					\
-static ssize_t show_##name(struct sys_device *dev,			\
-			   struct sysdev_attribute *attr, char *buf)	\
+static ssize_t show_##name(struct device *dev,				\
+			   struct device_attribute *attr, char *buf)	\
 {									\
 	unsigned int cpu = dev->id;					\
 	return show_cpumap(0, topology_##name(cpu), buf);		\
 }
 
 #define define_siblings_show_list(name)					\
-static ssize_t show_##name##_list(struct sys_device *dev,		\
-				  struct sysdev_attribute *attr,	\
+static ssize_t show_##name##_list(struct device *dev,			\
+				  struct device_attribute *attr,	\
 				  char *buf)				\
 {									\
 	unsigned int cpu = dev->id;					\
@@ -83,15 +82,15 @@
 
 #else
 #define define_siblings_show_map(name)					\
-static ssize_t show_##name(struct sys_device *dev,			\
-			   struct sysdev_attribute *attr, char *buf)	\
+static ssize_t show_##name(struct device *dev,				\
+			   struct device_attribute *attr, char *buf)	\
 {									\
 	return show_cpumap(0, topology_##name(dev->id), buf);		\
 }
 
 #define define_siblings_show_list(name)					\
-static ssize_t show_##name##_list(struct sys_device *dev,		\
-				  struct sysdev_attribute *attr,	\
+static ssize_t show_##name##_list(struct device *dev,			\
+				  struct device_attribute *attr,	\
 				  char *buf)				\
 {									\
 	return show_cpumap(1, topology_##name(dev->id), buf);		\
@@ -124,16 +123,16 @@
 #endif
 
 static struct attribute *default_attrs[] = {
-	&attr_physical_package_id.attr,
-	&attr_core_id.attr,
-	&attr_thread_siblings.attr,
-	&attr_thread_siblings_list.attr,
-	&attr_core_siblings.attr,
-	&attr_core_siblings_list.attr,
+	&dev_attr_physical_package_id.attr,
+	&dev_attr_core_id.attr,
+	&dev_attr_thread_siblings.attr,
+	&dev_attr_thread_siblings_list.attr,
+	&dev_attr_core_siblings.attr,
+	&dev_attr_core_siblings_list.attr,
 #ifdef CONFIG_SCHED_BOOK
-	&attr_book_id.attr,
-	&attr_book_siblings.attr,
-	&attr_book_siblings_list.attr,
+	&dev_attr_book_id.attr,
+	&dev_attr_book_siblings.attr,
+	&dev_attr_book_siblings_list.attr,
 #endif
 	NULL
 };
@@ -146,16 +145,16 @@
 /* Add/Remove cpu_topology interface for CPU device */
 static int __cpuinit topology_add_dev(unsigned int cpu)
 {
-	struct sys_device *sys_dev = get_cpu_sysdev(cpu);
+	struct device *dev = get_cpu_device(cpu);
 
-	return sysfs_create_group(&sys_dev->kobj, &topology_attr_group);
+	return sysfs_create_group(&dev->kobj, &topology_attr_group);
 }
 
 static void __cpuinit topology_remove_dev(unsigned int cpu)
 {
-	struct sys_device *sys_dev = get_cpu_sysdev(cpu);
+	struct device *dev = get_cpu_device(cpu);
 
-	sysfs_remove_group(&sys_dev->kobj, &topology_attr_group);
+	sysfs_remove_group(&dev->kobj, &topology_attr_group);
 }
 
 static int __cpuinit topology_cpu_callback(struct notifier_block *nfb,
diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h
index 30a3085..fda56bd 100644
--- a/drivers/bcma/bcma_private.h
+++ b/drivers/bcma/bcma_private.h
@@ -18,6 +18,9 @@
 int __init bcma_bus_early_register(struct bcma_bus *bus,
 				   struct bcma_device *core_cc,
 				   struct bcma_device *core_mips);
+#ifdef CONFIG_PM
+int bcma_bus_resume(struct bcma_bus *bus);
+#endif
 
 /* scan.c */
 int bcma_bus_scan(struct bcma_bus *bus);
diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c
index 1b51d8b..443b83a 100644
--- a/drivers/bcma/host_pci.c
+++ b/drivers/bcma/host_pci.c
@@ -21,48 +21,58 @@
 	pr_debug("Switched to core: 0x%X\n", core->id.id);
 }
 
-static u8 bcma_host_pci_read8(struct bcma_device *core, u16 offset)
+/* Provides access to the requested core. Returns base offset that has to be
+ * used. It makes use of fixed windows when possible. */
+static u16 bcma_host_pci_provide_access_to_core(struct bcma_device *core)
 {
+	switch (core->id.id) {
+	case BCMA_CORE_CHIPCOMMON:
+		return 3 * BCMA_CORE_SIZE;
+	case BCMA_CORE_PCIE:
+		return 2 * BCMA_CORE_SIZE;
+	}
+
 	if (core->bus->mapped_core != core)
 		bcma_host_pci_switch_core(core);
+	return 0;
+}
+
+static u8 bcma_host_pci_read8(struct bcma_device *core, u16 offset)
+{
+	offset += bcma_host_pci_provide_access_to_core(core);
 	return ioread8(core->bus->mmio + offset);
 }
 
 static u16 bcma_host_pci_read16(struct bcma_device *core, u16 offset)
 {
-	if (core->bus->mapped_core != core)
-		bcma_host_pci_switch_core(core);
+	offset += bcma_host_pci_provide_access_to_core(core);
 	return ioread16(core->bus->mmio + offset);
 }
 
 static u32 bcma_host_pci_read32(struct bcma_device *core, u16 offset)
 {
-	if (core->bus->mapped_core != core)
-		bcma_host_pci_switch_core(core);
+	offset += bcma_host_pci_provide_access_to_core(core);
 	return ioread32(core->bus->mmio + offset);
 }
 
 static void bcma_host_pci_write8(struct bcma_device *core, u16 offset,
 				 u8 value)
 {
-	if (core->bus->mapped_core != core)
-		bcma_host_pci_switch_core(core);
+	offset += bcma_host_pci_provide_access_to_core(core);
 	iowrite8(value, core->bus->mmio + offset);
 }
 
 static void bcma_host_pci_write16(struct bcma_device *core, u16 offset,
 				 u16 value)
 {
-	if (core->bus->mapped_core != core)
-		bcma_host_pci_switch_core(core);
+	offset += bcma_host_pci_provide_access_to_core(core);
 	iowrite16(value, core->bus->mmio + offset);
 }
 
 static void bcma_host_pci_write32(struct bcma_device *core, u16 offset,
 				 u32 value)
 {
-	if (core->bus->mapped_core != core)
-		bcma_host_pci_switch_core(core);
+	offset += bcma_host_pci_provide_access_to_core(core);
 	iowrite32(value, core->bus->mmio + offset);
 }
 
@@ -224,6 +234,41 @@
 	pci_set_drvdata(dev, NULL);
 }
 
+#ifdef CONFIG_PM
+static int bcma_host_pci_suspend(struct pci_dev *dev, pm_message_t state)
+{
+	/* Host specific */
+	pci_save_state(dev);
+	pci_disable_device(dev);
+	pci_set_power_state(dev, pci_choose_state(dev, state));
+
+	return 0;
+}
+
+static int bcma_host_pci_resume(struct pci_dev *dev)
+{
+	struct bcma_bus *bus = pci_get_drvdata(dev);
+	int err;
+
+	/* Host specific */
+	pci_set_power_state(dev, 0);
+	err = pci_enable_device(dev);
+	if (err)
+		return err;
+	pci_restore_state(dev);
+
+	/* Bus specific */
+	err = bcma_bus_resume(bus);
+	if (err)
+		return err;
+
+	return 0;
+}
+#else /* CONFIG_PM */
+# define bcma_host_pci_suspend	NULL
+# define bcma_host_pci_resume	NULL
+#endif /* CONFIG_PM */
+
 static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = {
 	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) },
@@ -239,6 +284,8 @@
 	.id_table = bcma_pci_bridge_tbl,
 	.probe = bcma_host_pci_probe,
 	.remove = bcma_host_pci_remove,
+	.suspend = bcma_host_pci_suspend,
+	.resume = bcma_host_pci_resume,
 };
 
 int __init bcma_host_pci_init(void)
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 70c84b9..10f92b3 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -240,6 +240,22 @@
 	return 0;
 }
 
+#ifdef CONFIG_PM
+int bcma_bus_resume(struct bcma_bus *bus)
+{
+	struct bcma_device *core;
+
+	/* Init CC core */
+	core = bcma_find_core(bus, BCMA_CORE_CHIPCOMMON);
+	if (core) {
+		bus->drv_cc.setup_done = false;
+		bcma_core_chipcommon_init(&bus->drv_cc);
+	}
+
+	return 0;
+}
+#endif
+
 int __bcma_driver_register(struct bcma_driver *drv, struct module *owner)
 {
 	drv->drv.name = drv->name;
diff --git a/drivers/bcma/sprom.c b/drivers/bcma/sprom.c
index d729239..6f230fb 100644
--- a/drivers/bcma/sprom.c
+++ b/drivers/bcma/sprom.c
@@ -129,6 +129,9 @@
 	u16 v;
 	int i;
 
+	bus->sprom.revision = sprom[SSB_SPROMSIZE_WORDS_R4 - 1] &
+		SSB_SPROM_REVISION_REV;
+
 	for (i = 0; i < 3; i++) {
 		v = sprom[SPOFF(SSB_SPROM8_IL0MAC) + i];
 		*(((__be16 *)bus->sprom.il0mac) + i) = cpu_to_be16(v);
@@ -136,12 +139,70 @@
 
 	bus->sprom.board_rev = sprom[SPOFF(SSB_SPROM8_BOARDREV)];
 
+	bus->sprom.txpid2g[0] = (sprom[SPOFF(SSB_SPROM4_TXPID2G01)] &
+	     SSB_SPROM4_TXPID2G0) >> SSB_SPROM4_TXPID2G0_SHIFT;
+	bus->sprom.txpid2g[1] = (sprom[SPOFF(SSB_SPROM4_TXPID2G01)] &
+	     SSB_SPROM4_TXPID2G1) >> SSB_SPROM4_TXPID2G1_SHIFT;
+	bus->sprom.txpid2g[2] = (sprom[SPOFF(SSB_SPROM4_TXPID2G23)] &
+	     SSB_SPROM4_TXPID2G2) >> SSB_SPROM4_TXPID2G2_SHIFT;
+	bus->sprom.txpid2g[3] = (sprom[SPOFF(SSB_SPROM4_TXPID2G23)] &
+	     SSB_SPROM4_TXPID2G3) >> SSB_SPROM4_TXPID2G3_SHIFT;
+
+	bus->sprom.txpid5gl[0] = (sprom[SPOFF(SSB_SPROM4_TXPID5GL01)] &
+	     SSB_SPROM4_TXPID5GL0) >> SSB_SPROM4_TXPID5GL0_SHIFT;
+	bus->sprom.txpid5gl[1] = (sprom[SPOFF(SSB_SPROM4_TXPID5GL01)] &
+	     SSB_SPROM4_TXPID5GL1) >> SSB_SPROM4_TXPID5GL1_SHIFT;
+	bus->sprom.txpid5gl[2] = (sprom[SPOFF(SSB_SPROM4_TXPID5GL23)] &
+	     SSB_SPROM4_TXPID5GL2) >> SSB_SPROM4_TXPID5GL2_SHIFT;
+	bus->sprom.txpid5gl[3] = (sprom[SPOFF(SSB_SPROM4_TXPID5GL23)] &
+	     SSB_SPROM4_TXPID5GL3) >> SSB_SPROM4_TXPID5GL3_SHIFT;
+
+	bus->sprom.txpid5g[0] = (sprom[SPOFF(SSB_SPROM4_TXPID5G01)] &
+	     SSB_SPROM4_TXPID5G0) >> SSB_SPROM4_TXPID5G0_SHIFT;
+	bus->sprom.txpid5g[1] = (sprom[SPOFF(SSB_SPROM4_TXPID5G01)] &
+	     SSB_SPROM4_TXPID5G1) >> SSB_SPROM4_TXPID5G1_SHIFT;
+	bus->sprom.txpid5g[2] = (sprom[SPOFF(SSB_SPROM4_TXPID5G23)] &
+	     SSB_SPROM4_TXPID5G2) >> SSB_SPROM4_TXPID5G2_SHIFT;
+	bus->sprom.txpid5g[3] = (sprom[SPOFF(SSB_SPROM4_TXPID5G23)] &
+	     SSB_SPROM4_TXPID5G3) >> SSB_SPROM4_TXPID5G3_SHIFT;
+
+	bus->sprom.txpid5gh[0] = (sprom[SPOFF(SSB_SPROM4_TXPID5GH01)] &
+	     SSB_SPROM4_TXPID5GH0) >> SSB_SPROM4_TXPID5GH0_SHIFT;
+	bus->sprom.txpid5gh[1] = (sprom[SPOFF(SSB_SPROM4_TXPID5GH01)] &
+	     SSB_SPROM4_TXPID5GH1) >> SSB_SPROM4_TXPID5GH1_SHIFT;
+	bus->sprom.txpid5gh[2] = (sprom[SPOFF(SSB_SPROM4_TXPID5GH23)] &
+	     SSB_SPROM4_TXPID5GH2) >> SSB_SPROM4_TXPID5GH2_SHIFT;
+	bus->sprom.txpid5gh[3] = (sprom[SPOFF(SSB_SPROM4_TXPID5GH23)] &
+	     SSB_SPROM4_TXPID5GH3) >> SSB_SPROM4_TXPID5GH3_SHIFT;
+
 	bus->sprom.boardflags_lo = sprom[SPOFF(SSB_SPROM8_BFLLO)];
 	bus->sprom.boardflags_hi = sprom[SPOFF(SSB_SPROM8_BFLHI)];
 	bus->sprom.boardflags2_lo = sprom[SPOFF(SSB_SPROM8_BFL2LO)];
 	bus->sprom.boardflags2_hi = sprom[SPOFF(SSB_SPROM8_BFL2HI)];
 
 	bus->sprom.country_code = sprom[SPOFF(SSB_SPROM8_CCODE)];
+
+	bus->sprom.fem.ghz2.tssipos = (sprom[SPOFF(SSB_SPROM8_FEM2G)] &
+		SSB_SROM8_FEM_TSSIPOS) >> SSB_SROM8_FEM_TSSIPOS_SHIFT;
+	bus->sprom.fem.ghz2.extpa_gain = (sprom[SPOFF(SSB_SPROM8_FEM2G)] &
+		SSB_SROM8_FEM_EXTPA_GAIN) >> SSB_SROM8_FEM_EXTPA_GAIN_SHIFT;
+	bus->sprom.fem.ghz2.pdet_range = (sprom[SPOFF(SSB_SPROM8_FEM2G)] &
+		SSB_SROM8_FEM_PDET_RANGE) >> SSB_SROM8_FEM_PDET_RANGE_SHIFT;
+	bus->sprom.fem.ghz2.tr_iso = (sprom[SPOFF(SSB_SPROM8_FEM2G)] &
+		SSB_SROM8_FEM_TR_ISO) >> SSB_SROM8_FEM_TR_ISO_SHIFT;
+	bus->sprom.fem.ghz2.antswlut = (sprom[SPOFF(SSB_SPROM8_FEM2G)] &
+		SSB_SROM8_FEM_ANTSWLUT) >> SSB_SROM8_FEM_ANTSWLUT_SHIFT;
+
+	bus->sprom.fem.ghz5.tssipos = (sprom[SPOFF(SSB_SPROM8_FEM5G)] &
+		SSB_SROM8_FEM_TSSIPOS) >> SSB_SROM8_FEM_TSSIPOS_SHIFT;
+	bus->sprom.fem.ghz5.extpa_gain = (sprom[SPOFF(SSB_SPROM8_FEM5G)] &
+		SSB_SROM8_FEM_EXTPA_GAIN) >> SSB_SROM8_FEM_EXTPA_GAIN_SHIFT;
+	bus->sprom.fem.ghz5.pdet_range = (sprom[SPOFF(SSB_SPROM8_FEM5G)] &
+		SSB_SROM8_FEM_PDET_RANGE) >> SSB_SROM8_FEM_PDET_RANGE_SHIFT;
+	bus->sprom.fem.ghz5.tr_iso = (sprom[SPOFF(SSB_SPROM8_FEM5G)] &
+		SSB_SROM8_FEM_TR_ISO) >> SSB_SROM8_FEM_TR_ISO_SHIFT;
+	bus->sprom.fem.ghz5.antswlut = (sprom[SPOFF(SSB_SPROM8_FEM5G)] &
+		SSB_SROM8_FEM_ANTSWLUT) >> SSB_SROM8_FEM_ANTSWLUT_SHIFT;
 }
 
 int bcma_sprom_get(struct bcma_bus *bus)
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 8eba86b..386146d 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -63,7 +63,7 @@
 #include <linux/mutex.h>
 #include <linux/amifdreg.h>
 #include <linux/amifd.h>
-#include <linux/buffer_head.h>
+#include <linux/fs.h>
 #include <linux/blkdev.h>
 #include <linux/elevator.h>
 #include <linux/interrupt.h>
diff --git a/drivers/block/aoe/aoechr.c b/drivers/block/aoe/aoechr.c
index 5f8e39c..e86d206 100644
--- a/drivers/block/aoe/aoechr.c
+++ b/drivers/block/aoe/aoechr.c
@@ -270,7 +270,7 @@
 	.llseek = noop_llseek,
 };
 
-static char *aoe_devnode(struct device *dev, mode_t *mode)
+static char *aoe_devnode(struct device *dev, umode_t *mode)
 {
 	return kasprintf(GFP_KERNEL, "etherd/%s", dev_name(dev));
 }
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index d22119d..ec24643 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -17,7 +17,7 @@
 #include <linux/highmem.h>
 #include <linux/mutex.h>
 #include <linux/radix-tree.h>
-#include <linux/buffer_head.h> /* invalidate_bh_lrus() */
+#include <linux/fs.h>
 #include <linux/slab.h>
 
 #include <asm/uaccess.h>
@@ -402,14 +402,13 @@
 	error = -EBUSY;
 	if (bdev->bd_openers <= 1) {
 		/*
-		 * Invalidate the cache first, so it isn't written
-		 * back to the device.
+		 * Kill the cache first, so it isn't written back to the
+		 * device.
 		 *
 		 * Another thread might instantiate more buffercache here,
 		 * but there is not much we can do to close that race.
 		 */
-		invalidate_bh_lrus();
-		truncate_inode_pages(bdev->bd_inode->i_mapping, 0);
+		kill_bdev(bdev);
 		brd_free_pages(brd);
 		error = 0;
 	}
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 8004ac3..587cce5 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -2601,6 +2601,8 @@
 			c->Request.Timeout = 0;
 			c->Request.CDB[0] = BMIC_WRITE;
 			c->Request.CDB[6] = BMIC_CACHE_FLUSH;
+			c->Request.CDB[7] = (size >> 8) & 0xFF;
+			c->Request.CDB[8] = size & 0xFF;
 			break;
 		case TEST_UNIT_READY:
 			c->Request.CDBLen = 6;
@@ -4880,7 +4882,7 @@
 {
 	if (h->msix_vector || h->msi_vector) {
 		if (!request_irq(h->intr[h->intr_mode], msixhandler,
-				IRQF_DISABLED, h->devname, h))
+				0, h->devname, h))
 			return 0;
 		dev_err(&h->pdev->dev, "Unable to get msi irq %d"
 			" for %s\n", h->intr[h->intr_mode],
@@ -4889,7 +4891,7 @@
 	}
 
 	if (!request_irq(h->intr[h->intr_mode], intxhandler,
-			IRQF_DISABLED, h->devname, h))
+			IRQF_SHARED, h->devname, h))
 		return 0;
 	dev_err(&h->pdev->dev, "Unable to get irq %d for %s\n",
 		h->intr[h->intr_mode], h->devname);
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 9955a53..510fb10 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -188,7 +188,6 @@
 #include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/mod_devicetable.h>
-#include <linux/buffer_head.h>	/* for invalidate_buffers() */
 #include <linux/mutex.h>
 #include <linux/io.h>
 #include <linux/uaccess.h>
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 68b205a..f002577 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -69,7 +69,6 @@
 #include <linux/freezer.h>
 #include <linux/mutex.h>
 #include <linux/writeback.h>
-#include <linux/buffer_head.h>		/* for invalidate_bdev() */
 #include <linux/completion.h>
 #include <linux/highmem.h>
 #include <linux/kthread.h>
@@ -422,7 +421,7 @@
 
 		/*
 		 * We use punch hole to reclaim the free space used by the
-		 * image a.k.a. discard. However we do support discard if
+		 * image a.k.a. discard. However we do not support discard if
 		 * encryption is enabled, because it may give an attacker
 		 * useful information.
 		 */
@@ -797,7 +796,7 @@
 	}
 
 	q->limits.discard_granularity = inode->i_sb->s_blocksize;
-	q->limits.discard_alignment = inode->i_sb->s_blocksize;
+	q->limits.discard_alignment = 0;
 	q->limits.max_discard_sectors = UINT_MAX >> 9;
 	q->limits.discard_zeroes_data = 1;
 	queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index a63b0a2..d59edea 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2817,7 +2817,7 @@
 	.check_events =		pkt_check_events,
 };
 
-static char *pktcdvd_devnode(struct gendisk *gd, mode_t *mode)
+static char *pktcdvd_devnode(struct gendisk *gd, umode_t *mode)
 {
 	return kasprintf(GFP_KERNEL, "pktcdvd/%s", gd->disk_name);
 }
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 65cc424..148ab94 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -183,10 +183,6 @@
 
 static int __rbd_init_snaps_header(struct rbd_device *rbd_dev);
 static void rbd_dev_release(struct device *dev);
-static ssize_t rbd_snap_rollback(struct device *dev,
-				 struct device_attribute *attr,
-				 const char *buf,
-				 size_t size);
 static ssize_t rbd_snap_add(struct device *dev,
 			    struct device_attribute *attr,
 			    const char *buf,
@@ -461,6 +457,10 @@
 	u32 snap_count = le32_to_cpu(ondisk->snap_count);
 	int ret = -ENOMEM;
 
+	if (memcmp(ondisk, RBD_HEADER_TEXT, sizeof(RBD_HEADER_TEXT))) {
+		return -ENXIO;
+	}
+
 	init_rwsem(&header->snap_rwsem);
 	header->snap_names_len = le64_to_cpu(ondisk->snap_names_len);
 	header->snapc = kmalloc(sizeof(struct ceph_snap_context) +
@@ -1356,32 +1356,6 @@
 }
 
 /*
- * Request sync osd rollback
- */
-static int rbd_req_sync_rollback_obj(struct rbd_device *dev,
-				     u64 snapid,
-				     const char *obj)
-{
-	struct ceph_osd_req_op *ops;
-	int ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_ROLLBACK, 0);
-	if (ret < 0)
-		return ret;
-
-	ops[0].snap.snapid = snapid;
-
-	ret = rbd_req_sync_op(dev, NULL,
-			       CEPH_NOSNAP,
-			       0,
-			       CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
-			       ops,
-			       1, obj, 0, 0, NULL, NULL, NULL);
-
-	rbd_destroy_ops(ops);
-
-	return ret;
-}
-
-/*
  * Request sync osd read
  */
 static int rbd_req_sync_exec(struct rbd_device *dev,
@@ -1610,8 +1584,13 @@
 			goto out_dh;
 
 		rc = rbd_header_from_disk(header, dh, snap_count, GFP_KERNEL);
-		if (rc < 0)
+		if (rc < 0) {
+			if (rc == -ENXIO) {
+				pr_warning("unrecognized header format"
+					   " for image %s", rbd_dev->obj);
+			}
 			goto out_dh;
+		}
 
 		if (snap_count != header->total_snaps) {
 			snap_count = header->total_snaps;
@@ -1882,7 +1861,6 @@
 static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
 static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
 static DEVICE_ATTR(create_snap, S_IWUSR, NULL, rbd_snap_add);
-static DEVICE_ATTR(rollback_snap, S_IWUSR, NULL, rbd_snap_rollback);
 
 static struct attribute *rbd_attrs[] = {
 	&dev_attr_size.attr,
@@ -1893,7 +1871,6 @@
 	&dev_attr_current_snap.attr,
 	&dev_attr_refresh.attr,
 	&dev_attr_create_snap.attr,
-	&dev_attr_rollback_snap.attr,
 	NULL
 };
 
@@ -2424,64 +2401,6 @@
 	return ret;
 }
 
-static ssize_t rbd_snap_rollback(struct device *dev,
-				 struct device_attribute *attr,
-				 const char *buf,
-				 size_t count)
-{
-	struct rbd_device *rbd_dev = dev_to_rbd(dev);
-	int ret;
-	u64 snapid;
-	u64 cur_ofs;
-	char *seg_name = NULL;
-	char *snap_name = kmalloc(count + 1, GFP_KERNEL);
-	ret = -ENOMEM;
-	if (!snap_name)
-		return ret;
-
-	/* parse snaps add command */
-	snprintf(snap_name, count, "%s", buf);
-	seg_name = kmalloc(RBD_MAX_SEG_NAME_LEN + 1, GFP_NOIO);
-	if (!seg_name)
-		goto done;
-
-	mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
-
-	ret = snap_by_name(&rbd_dev->header, snap_name, &snapid, NULL);
-	if (ret < 0)
-		goto done_unlock;
-
-	dout("snapid=%lld\n", snapid);
-
-	cur_ofs = 0;
-	while (cur_ofs < rbd_dev->header.image_size) {
-		cur_ofs += rbd_get_segment(&rbd_dev->header,
-					   rbd_dev->obj,
-					   cur_ofs, (u64)-1,
-					   seg_name, NULL);
-		dout("seg_name=%s\n", seg_name);
-
-		ret = rbd_req_sync_rollback_obj(rbd_dev, snapid, seg_name);
-		if (ret < 0)
-			pr_warning("could not roll back obj %s err=%d\n",
-				   seg_name, ret);
-	}
-
-	ret = __rbd_update_snaps(rbd_dev);
-	if (ret < 0)
-		goto done_unlock;
-
-	ret = count;
-
-done_unlock:
-	mutex_unlock(&ctl_mutex);
-done:
-	kfree(seg_name);
-	kfree(snap_name);
-
-	return ret;
-}
-
 static struct bus_attribute rbd_bus_attrs[] = {
 	__ATTR(add, S_IWUSR, NULL, rbd_add),
 	__ATTR(remove, S_IWUSR, NULL, rbd_remove),
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index fd5adcd..6d5a914 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -26,7 +26,6 @@
 #include <linux/delay.h>
 #include <linux/platform_device.h>
 
-#include <asm/macintosh.h>
 #include <asm/mac_via.h>
 
 #define CARDNAME "swim"
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index ae3e167..89ddab1 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -16,6 +16,8 @@
  * handle GCR disks
  */
 
+#undef DEBUG
+
 #include <linux/stddef.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
@@ -36,13 +38,11 @@
 #include <asm/machdep.h>
 #include <asm/pmac_feature.h>
 
-static DEFINE_MUTEX(swim3_mutex);
-static struct request_queue *swim3_queue;
-static struct gendisk *disks[2];
-static struct request *fd_req;
-
 #define MAX_FLOPPIES	2
 
+static DEFINE_MUTEX(swim3_mutex);
+static struct gendisk *disks[MAX_FLOPPIES];
+
 enum swim_state {
 	idle,
 	locating,
@@ -177,7 +177,6 @@
 
 struct floppy_state {
 	enum swim_state	state;
-	spinlock_t lock;
 	struct swim3 __iomem *swim3;	/* hardware registers */
 	struct dbdma_regs __iomem *dma;	/* DMA controller registers */
 	int	swim3_intr;	/* interrupt number for SWIM3 */
@@ -204,8 +203,20 @@
 	int	wanted;
 	struct macio_dev *mdev;
 	char	dbdma_cmd_space[5 * sizeof(struct dbdma_cmd)];
+	int	index;
+	struct request *cur_req;
 };
 
+#define swim3_err(fmt, arg...)	dev_err(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
+#define swim3_warn(fmt, arg...)	dev_warn(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
+#define swim3_info(fmt, arg...)	dev_info(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
+
+#ifdef DEBUG
+#define swim3_dbg(fmt, arg...)	dev_dbg(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
+#else
+#define swim3_dbg(fmt, arg...)	do { } while(0)
+#endif
+
 static struct floppy_state floppy_states[MAX_FLOPPIES];
 static int floppy_count = 0;
 static DEFINE_SPINLOCK(swim3_lock);
@@ -224,17 +235,8 @@
 	0, 0, 0, 0, 0, 0
 };
 
-static void swim3_select(struct floppy_state *fs, int sel);
-static void swim3_action(struct floppy_state *fs, int action);
-static int swim3_readbit(struct floppy_state *fs, int bit);
-static void do_fd_request(struct request_queue * q);
-static void start_request(struct floppy_state *fs);
-static void set_timeout(struct floppy_state *fs, int nticks,
-			void (*proc)(unsigned long));
-static void scan_track(struct floppy_state *fs);
 static void seek_track(struct floppy_state *fs, int n);
 static void init_dma(struct dbdma_cmd *cp, int cmd, void *buf, int count);
-static void setup_transfer(struct floppy_state *fs);
 static void act(struct floppy_state *fs);
 static void scan_timeout(unsigned long data);
 static void seek_timeout(unsigned long data);
@@ -254,20 +256,23 @@
 					unsigned int clearing);
 static int floppy_revalidate(struct gendisk *disk);
 
-static bool swim3_end_request(int err, unsigned int nr_bytes)
+static bool swim3_end_request(struct floppy_state *fs, int err, unsigned int nr_bytes)
 {
-	if (__blk_end_request(fd_req, err, nr_bytes))
+	struct request *req = fs->cur_req;
+	int rc;
+
+	swim3_dbg("  end request, err=%d nr_bytes=%d, cur_req=%p\n",
+		  err, nr_bytes, req);
+
+	if (err)
+		nr_bytes = blk_rq_cur_bytes(req);
+	rc = __blk_end_request(req, err, nr_bytes);
+	if (rc)
 		return true;
-
-	fd_req = NULL;
+	fs->cur_req = NULL;
 	return false;
 }
 
-static bool swim3_end_request_cur(int err)
-{
-	return swim3_end_request(err, blk_rq_cur_bytes(fd_req));
-}
-
 static void swim3_select(struct floppy_state *fs, int sel)
 {
 	struct swim3 __iomem *sw = fs->swim3;
@@ -303,50 +308,53 @@
 	return (stat & DATA) == 0;
 }
 
-static void do_fd_request(struct request_queue * q)
-{
-	int i;
-
-	for(i=0; i<floppy_count; i++) {
-		struct floppy_state *fs = &floppy_states[i];
-		if (fs->mdev->media_bay &&
-		    check_media_bay(fs->mdev->media_bay) != MB_FD)
-			continue;
-		start_request(fs);
-	}
-}
-
 static void start_request(struct floppy_state *fs)
 {
 	struct request *req;
 	unsigned long x;
 
+	swim3_dbg("start request, initial state=%d\n", fs->state);
+
 	if (fs->state == idle && fs->wanted) {
 		fs->state = available;
 		wake_up(&fs->wait);
 		return;
 	}
 	while (fs->state == idle) {
-		if (!fd_req) {
-			fd_req = blk_fetch_request(swim3_queue);
-			if (!fd_req)
+		swim3_dbg("start request, idle loop, cur_req=%p\n", fs->cur_req);
+		if (!fs->cur_req) {
+			fs->cur_req = blk_fetch_request(disks[fs->index]->queue);
+			swim3_dbg("  fetched request %p\n", fs->cur_req);
+			if (!fs->cur_req)
 				break;
 		}
-		req = fd_req;
-#if 0
-		printk("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n",
-		       req->rq_disk->disk_name, req->cmd,
-		       (long)blk_rq_pos(req), blk_rq_sectors(req), req->buffer);
-		printk("           errors=%d current_nr_sectors=%u\n",
-		       req->errors, blk_rq_cur_sectors(req));
+		req = fs->cur_req;
+
+		if (fs->mdev->media_bay &&
+		    check_media_bay(fs->mdev->media_bay) != MB_FD) {
+			swim3_dbg("%s", "  media bay absent, dropping req\n");
+			swim3_end_request(fs, -ENODEV, 0);
+			continue;
+		}
+
+#if 0 /* This is really too verbose */
+		swim3_dbg("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n",
+			  req->rq_disk->disk_name, req->cmd,
+			  (long)blk_rq_pos(req), blk_rq_sectors(req),
+			  req->buffer);
+		swim3_dbg("           errors=%d current_nr_sectors=%u\n",
+			  req->errors, blk_rq_cur_sectors(req));
 #endif
 
 		if (blk_rq_pos(req) >= fs->total_secs) {
-			swim3_end_request_cur(-EIO);
+			swim3_dbg("  pos out of bounds (%ld, max is %ld)\n",
+				  (long)blk_rq_pos(req), (long)fs->total_secs);
+			swim3_end_request(fs, -EIO, 0);
 			continue;
 		}
 		if (fs->ejected) {
-			swim3_end_request_cur(-EIO);
+			swim3_dbg("%s", "  disk ejected\n");
+			swim3_end_request(fs, -EIO, 0);
 			continue;
 		}
 
@@ -354,7 +362,8 @@
 			if (fs->write_prot < 0)
 				fs->write_prot = swim3_readbit(fs, WRITE_PROT);
 			if (fs->write_prot) {
-				swim3_end_request_cur(-EIO);
+				swim3_dbg("%s", "  try to write, disk write protected\n");
+				swim3_end_request(fs, -EIO, 0);
 				continue;
 			}
 		}
@@ -369,7 +378,6 @@
 		x = ((long)blk_rq_pos(req)) % fs->secpercyl;
 		fs->head = x / fs->secpertrack;
 		fs->req_sector = x % fs->secpertrack + 1;
-		fd_req = req;
 		fs->state = do_transfer;
 		fs->retries = 0;
 
@@ -377,12 +385,14 @@
 	}
 }
 
+static void do_fd_request(struct request_queue * q)
+{
+	start_request(q->queuedata);
+}
+
 static void set_timeout(struct floppy_state *fs, int nticks,
 			void (*proc)(unsigned long))
 {
-	unsigned long flags;
-
-	spin_lock_irqsave(&fs->lock, flags);
 	if (fs->timeout_pending)
 		del_timer(&fs->timeout);
 	fs->timeout.expires = jiffies + nticks;
@@ -390,7 +400,6 @@
 	fs->timeout.data = (unsigned long) fs;
 	add_timer(&fs->timeout);
 	fs->timeout_pending = 1;
-	spin_unlock_irqrestore(&fs->lock, flags);
 }
 
 static inline void scan_track(struct floppy_state *fs)
@@ -442,40 +451,45 @@
 	struct swim3 __iomem *sw = fs->swim3;
 	struct dbdma_cmd *cp = fs->dma_cmd;
 	struct dbdma_regs __iomem *dr = fs->dma;
+	struct request *req = fs->cur_req;
 
-	if (blk_rq_cur_sectors(fd_req) <= 0) {
-		printk(KERN_ERR "swim3: transfer 0 sectors?\n");
+	if (blk_rq_cur_sectors(req) <= 0) {
+		swim3_warn("%s", "Transfer 0 sectors ?\n");
 		return;
 	}
-	if (rq_data_dir(fd_req) == WRITE)
+	if (rq_data_dir(req) == WRITE)
 		n = 1;
 	else {
 		n = fs->secpertrack - fs->req_sector + 1;
-		if (n > blk_rq_cur_sectors(fd_req))
-			n = blk_rq_cur_sectors(fd_req);
+		if (n > blk_rq_cur_sectors(req))
+			n = blk_rq_cur_sectors(req);
 	}
+
+	swim3_dbg("  setup xfer at sect %d (of %d) head %d for %d\n",
+		  fs->req_sector, fs->secpertrack, fs->head, n);
+
 	fs->scount = n;
 	swim3_select(fs, fs->head? READ_DATA_1: READ_DATA_0);
 	out_8(&sw->sector, fs->req_sector);
 	out_8(&sw->nsect, n);
 	out_8(&sw->gap3, 0);
 	out_le32(&dr->cmdptr, virt_to_bus(cp));
-	if (rq_data_dir(fd_req) == WRITE) {
+	if (rq_data_dir(req) == WRITE) {
 		/* Set up 3 dma commands: write preamble, data, postamble */
 		init_dma(cp, OUTPUT_MORE, write_preamble, sizeof(write_preamble));
 		++cp;
-		init_dma(cp, OUTPUT_MORE, fd_req->buffer, 512);
+		init_dma(cp, OUTPUT_MORE, req->buffer, 512);
 		++cp;
 		init_dma(cp, OUTPUT_LAST, write_postamble, sizeof(write_postamble));
 	} else {
-		init_dma(cp, INPUT_LAST, fd_req->buffer, n * 512);
+		init_dma(cp, INPUT_LAST, req->buffer, n * 512);
 	}
 	++cp;
 	out_le16(&cp->command, DBDMA_STOP);
 	out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
 	in_8(&sw->error);
 	out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
-	if (rq_data_dir(fd_req) == WRITE)
+	if (rq_data_dir(req) == WRITE)
 		out_8(&sw->control_bis, WRITE_SECTORS);
 	in_8(&sw->intr);
 	out_le32(&dr->control, (RUN << 16) | RUN);
@@ -488,12 +502,16 @@
 static void act(struct floppy_state *fs)
 {
 	for (;;) {
+		swim3_dbg("  act loop, state=%d, req_cyl=%d, cur_cyl=%d\n",
+			  fs->state, fs->req_cyl, fs->cur_cyl);
+
 		switch (fs->state) {
 		case idle:
 			return;		/* XXX shouldn't get here */
 
 		case locating:
 			if (swim3_readbit(fs, TRACK_ZERO)) {
+				swim3_dbg("%s", "    locate track 0\n");
 				fs->cur_cyl = 0;
 				if (fs->req_cyl == 0)
 					fs->state = do_transfer;
@@ -511,7 +529,7 @@
 				break;
 			}
 			if (fs->req_cyl == fs->cur_cyl) {
-				printk("whoops, seeking 0\n");
+				swim3_warn("%s", "Whoops, seeking 0\n");
 				fs->state = do_transfer;
 				break;
 			}
@@ -527,7 +545,9 @@
 		case do_transfer:
 			if (fs->cur_cyl != fs->req_cyl) {
 				if (fs->retries > 5) {
-					swim3_end_request_cur(-EIO);
+					swim3_err("Wrong cylinder in transfer, want: %d got %d\n",
+						  fs->req_cyl, fs->cur_cyl);
+					swim3_end_request(fs, -EIO, 0);
 					fs->state = idle;
 					return;
 				}
@@ -542,7 +562,7 @@
 			return;
 
 		default:
-			printk(KERN_ERR"swim3: unknown state %d\n", fs->state);
+			swim3_err("Unknown state %d\n", fs->state);
 			return;
 		}
 	}
@@ -552,59 +572,75 @@
 {
 	struct floppy_state *fs = (struct floppy_state *) data;
 	struct swim3 __iomem *sw = fs->swim3;
+	unsigned long flags;
 
+	swim3_dbg("* scan timeout, state=%d\n", fs->state);
+
+	spin_lock_irqsave(&swim3_lock, flags);
 	fs->timeout_pending = 0;
 	out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
 	out_8(&sw->select, RELAX);
 	out_8(&sw->intr_enable, 0);
 	fs->cur_cyl = -1;
 	if (fs->retries > 5) {
-		swim3_end_request_cur(-EIO);
+		swim3_end_request(fs, -EIO, 0);
 		fs->state = idle;
 		start_request(fs);
 	} else {
 		fs->state = jogging;
 		act(fs);
 	}
+	spin_unlock_irqrestore(&swim3_lock, flags);
 }
 
 static void seek_timeout(unsigned long data)
 {
 	struct floppy_state *fs = (struct floppy_state *) data;
 	struct swim3 __iomem *sw = fs->swim3;
+	unsigned long flags;
 
+	swim3_dbg("* seek timeout, state=%d\n", fs->state);
+
+	spin_lock_irqsave(&swim3_lock, flags);
 	fs->timeout_pending = 0;
 	out_8(&sw->control_bic, DO_SEEK);
 	out_8(&sw->select, RELAX);
 	out_8(&sw->intr_enable, 0);
-	printk(KERN_ERR "swim3: seek timeout\n");
-	swim3_end_request_cur(-EIO);
+	swim3_err("%s", "Seek timeout\n");
+	swim3_end_request(fs, -EIO, 0);
 	fs->state = idle;
 	start_request(fs);
+	spin_unlock_irqrestore(&swim3_lock, flags);
 }
 
 static void settle_timeout(unsigned long data)
 {
 	struct floppy_state *fs = (struct floppy_state *) data;
 	struct swim3 __iomem *sw = fs->swim3;
+	unsigned long flags;
 
+	swim3_dbg("* settle timeout, state=%d\n", fs->state);
+
+	spin_lock_irqsave(&swim3_lock, flags);
 	fs->timeout_pending = 0;
 	if (swim3_readbit(fs, SEEK_COMPLETE)) {
 		out_8(&sw->select, RELAX);
 		fs->state = locating;
 		act(fs);
-		return;
+		goto unlock;
 	}
 	out_8(&sw->select, RELAX);
 	if (fs->settle_time < 2*HZ) {
 		++fs->settle_time;
 		set_timeout(fs, 1, settle_timeout);
-		return;
+		goto unlock;
 	}
-	printk(KERN_ERR "swim3: seek settle timeout\n");
-	swim3_end_request_cur(-EIO);
+	swim3_err("%s", "Seek settle timeout\n");
+	swim3_end_request(fs, -EIO, 0);
 	fs->state = idle;
 	start_request(fs);
+ unlock:
+	spin_unlock_irqrestore(&swim3_lock, flags);
 }
 
 static void xfer_timeout(unsigned long data)
@@ -612,8 +648,12 @@
 	struct floppy_state *fs = (struct floppy_state *) data;
 	struct swim3 __iomem *sw = fs->swim3;
 	struct dbdma_regs __iomem *dr = fs->dma;
+	unsigned long flags;
 	int n;
 
+	swim3_dbg("* xfer timeout, state=%d\n", fs->state);
+
+	spin_lock_irqsave(&swim3_lock, flags);
 	fs->timeout_pending = 0;
 	out_le32(&dr->control, RUN << 16);
 	/* We must wait a bit for dbdma to stop */
@@ -622,12 +662,13 @@
 	out_8(&sw->intr_enable, 0);
 	out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION);
 	out_8(&sw->select, RELAX);
-	printk(KERN_ERR "swim3: timeout %sing sector %ld\n",
-	       (rq_data_dir(fd_req)==WRITE? "writ": "read"),
-	       (long)blk_rq_pos(fd_req));
-	swim3_end_request_cur(-EIO);
+	swim3_err("Timeout %sing sector %ld\n",
+	       (rq_data_dir(fs->cur_req)==WRITE? "writ": "read"),
+	       (long)blk_rq_pos(fs->cur_req));
+	swim3_end_request(fs, -EIO, 0);
 	fs->state = idle;
 	start_request(fs);
+	spin_unlock_irqrestore(&swim3_lock, flags);
 }
 
 static irqreturn_t swim3_interrupt(int irq, void *dev_id)
@@ -638,12 +679,17 @@
 	int stat, resid;
 	struct dbdma_regs __iomem *dr;
 	struct dbdma_cmd *cp;
+	unsigned long flags;
+	struct request *req = fs->cur_req;
 
+	swim3_dbg("* interrupt, state=%d\n", fs->state);
+
+	spin_lock_irqsave(&swim3_lock, flags);
 	intr = in_8(&sw->intr);
 	err = (intr & ERROR_INTR)? in_8(&sw->error): 0;
 	if ((intr & ERROR_INTR) && fs->state != do_transfer)
-		printk(KERN_ERR "swim3_interrupt, state=%d, dir=%x, intr=%x, err=%x\n",
-		       fs->state, rq_data_dir(fd_req), intr, err);
+		swim3_err("Non-transfer error interrupt: state=%d, dir=%x, intr=%x, err=%x\n",
+			  fs->state, rq_data_dir(req), intr, err);
 	switch (fs->state) {
 	case locating:
 		if (intr & SEEN_SECTOR) {
@@ -653,10 +699,10 @@
 			del_timer(&fs->timeout);
 			fs->timeout_pending = 0;
 			if (sw->ctrack == 0xff) {
-				printk(KERN_ERR "swim3: seen sector but cyl=ff?\n");
+				swim3_err("%s", "Seen sector but cyl=ff?\n");
 				fs->cur_cyl = -1;
 				if (fs->retries > 5) {
-					swim3_end_request_cur(-EIO);
+					swim3_end_request(fs, -EIO, 0);
 					fs->state = idle;
 					start_request(fs);
 				} else {
@@ -668,8 +714,8 @@
 			fs->cur_cyl = sw->ctrack;
 			fs->cur_sector = sw->csect;
 			if (fs->expect_cyl != -1 && fs->expect_cyl != fs->cur_cyl)
-				printk(KERN_ERR "swim3: expected cyl %d, got %d\n",
-				       fs->expect_cyl, fs->cur_cyl);
+				swim3_err("Expected cyl %d, got %d\n",
+					  fs->expect_cyl, fs->cur_cyl);
 			fs->state = do_transfer;
 			act(fs);
 		}
@@ -704,7 +750,7 @@
 		fs->timeout_pending = 0;
 		dr = fs->dma;
 		cp = fs->dma_cmd;
-		if (rq_data_dir(fd_req) == WRITE)
+		if (rq_data_dir(req) == WRITE)
 			++cp;
 		/*
 		 * Check that the main data transfer has finished.
@@ -729,31 +775,32 @@
 		if (intr & ERROR_INTR) {
 			n = fs->scount - 1 - resid / 512;
 			if (n > 0) {
-				blk_update_request(fd_req, 0, n << 9);
+				blk_update_request(req, 0, n << 9);
 				fs->req_sector += n;
 			}
 			if (fs->retries < 5) {
 				++fs->retries;
 				act(fs);
 			} else {
-				printk("swim3: error %sing block %ld (err=%x)\n",
-				       rq_data_dir(fd_req) == WRITE? "writ": "read",
-				       (long)blk_rq_pos(fd_req), err);
-				swim3_end_request_cur(-EIO);
+				swim3_err("Error %sing block %ld (err=%x)\n",
+				       rq_data_dir(req) == WRITE? "writ": "read",
+				       (long)blk_rq_pos(req), err);
+				swim3_end_request(fs, -EIO, 0);
 				fs->state = idle;
 			}
 		} else {
 			if ((stat & ACTIVE) == 0 || resid != 0) {
 				/* musta been an error */
-				printk(KERN_ERR "swim3: fd dma: stat=%x resid=%d\n", stat, resid);
-				printk(KERN_ERR "  state=%d, dir=%x, intr=%x, err=%x\n",
-				       fs->state, rq_data_dir(fd_req), intr, err);
-				swim3_end_request_cur(-EIO);
+				swim3_err("fd dma error: stat=%x resid=%d\n", stat, resid);
+				swim3_err("  state=%d, dir=%x, intr=%x, err=%x\n",
+					  fs->state, rq_data_dir(req), intr, err);
+				swim3_end_request(fs, -EIO, 0);
 				fs->state = idle;
 				start_request(fs);
 				break;
 			}
-			if (swim3_end_request(0, fs->scount << 9)) {
+			fs->retries = 0;
+			if (swim3_end_request(fs, 0, fs->scount << 9)) {
 				fs->req_sector += fs->scount;
 				if (fs->req_sector > fs->secpertrack) {
 					fs->req_sector -= fs->secpertrack;
@@ -770,8 +817,9 @@
 			start_request(fs);
 		break;
 	default:
-		printk(KERN_ERR "swim3: don't know what to do in state %d\n", fs->state);
+		swim3_err("Don't know what to do in state %d\n", fs->state);
 	}
+	spin_unlock_irqrestore(&swim3_lock, flags);
 	return IRQ_HANDLED;
 }
 
@@ -781,26 +829,31 @@
 }
 */
 
+/* Called under the mutex to grab exclusive access to a drive */
 static int grab_drive(struct floppy_state *fs, enum swim_state state,
 		      int interruptible)
 {
 	unsigned long flags;
 
-	spin_lock_irqsave(&fs->lock, flags);
-	if (fs->state != idle) {
+	swim3_dbg("%s", "-> grab drive\n");
+
+	spin_lock_irqsave(&swim3_lock, flags);
+	if (fs->state != idle && fs->state != available) {
 		++fs->wanted;
 		while (fs->state != available) {
+			spin_unlock_irqrestore(&swim3_lock, flags);
 			if (interruptible && signal_pending(current)) {
 				--fs->wanted;
-				spin_unlock_irqrestore(&fs->lock, flags);
 				return -EINTR;
 			}
 			interruptible_sleep_on(&fs->wait);
+			spin_lock_irqsave(&swim3_lock, flags);
 		}
 		--fs->wanted;
 	}
 	fs->state = state;
-	spin_unlock_irqrestore(&fs->lock, flags);
+	spin_unlock_irqrestore(&swim3_lock, flags);
+
 	return 0;
 }
 
@@ -808,10 +861,12 @@
 {
 	unsigned long flags;
 
-	spin_lock_irqsave(&fs->lock, flags);
+	swim3_dbg("%s", "-> release drive\n");
+
+	spin_lock_irqsave(&swim3_lock, flags);
 	fs->state = idle;
 	start_request(fs);
-	spin_unlock_irqrestore(&fs->lock, flags);
+	spin_unlock_irqrestore(&swim3_lock, flags);
 }
 
 static int fd_eject(struct floppy_state *fs)
@@ -966,6 +1021,7 @@
 {
 	struct floppy_state *fs = disk->private_data;
 	struct swim3 __iomem *sw = fs->swim3;
+
 	mutex_lock(&swim3_mutex);
 	if (fs->ref_count > 0 && --fs->ref_count == 0) {
 		swim3_action(fs, MOTOR_OFF);
@@ -1031,30 +1087,48 @@
 	.revalidate_disk= floppy_revalidate,
 };
 
+static void swim3_mb_event(struct macio_dev* mdev, int mb_state)
+{
+	struct floppy_state *fs = macio_get_drvdata(mdev);
+	struct swim3 __iomem *sw = fs->swim3;
+
+	if (!fs)
+		return;
+	if (mb_state != MB_FD)
+		return;
+
+	/* Clear state */
+	out_8(&sw->intr_enable, 0);
+	in_8(&sw->intr);
+	in_8(&sw->error);
+}
+
 static int swim3_add_device(struct macio_dev *mdev, int index)
 {
 	struct device_node *swim = mdev->ofdev.dev.of_node;
 	struct floppy_state *fs = &floppy_states[index];
 	int rc = -EBUSY;
 
+	/* Do this first for message macros */
+	memset(fs, 0, sizeof(*fs));
+	fs->mdev = mdev;
+	fs->index = index;
+
 	/* Check & Request resources */
 	if (macio_resource_count(mdev) < 2) {
-		printk(KERN_WARNING "ifd%d: no address for %s\n",
-		       index, swim->full_name);
+		swim3_err("%s", "No address in device-tree\n");
 		return -ENXIO;
 	}
-	if (macio_irq_count(mdev) < 2) {
-		printk(KERN_WARNING "fd%d: no intrs for device %s\n",
-			index, swim->full_name);
+	if (macio_irq_count(mdev) < 1) {
+		swim3_err("%s", "No interrupt in device-tree\n");
+		return -ENXIO;
 	}
 	if (macio_request_resource(mdev, 0, "swim3 (mmio)")) {
-		printk(KERN_ERR "fd%d: can't request mmio resource for %s\n",
-		       index, swim->full_name);
+		swim3_err("%s", "Can't request mmio resource\n");
 		return -EBUSY;
 	}
 	if (macio_request_resource(mdev, 1, "swim3 (dma)")) {
-		printk(KERN_ERR "fd%d: can't request dma resource for %s\n",
-		       index, swim->full_name);
+		swim3_err("%s", "Can't request dma resource\n");
 		macio_release_resource(mdev, 0);
 		return -EBUSY;
 	}
@@ -1063,22 +1137,18 @@
 	if (mdev->media_bay == NULL)
 		pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 1);
 	
-	memset(fs, 0, sizeof(*fs));
-	spin_lock_init(&fs->lock);
 	fs->state = idle;
 	fs->swim3 = (struct swim3 __iomem *)
 		ioremap(macio_resource_start(mdev, 0), 0x200);
 	if (fs->swim3 == NULL) {
-		printk("fd%d: couldn't map registers for %s\n",
-		       index, swim->full_name);
+		swim3_err("%s", "Couldn't map mmio registers\n");
 		rc = -ENOMEM;
 		goto out_release;
 	}
 	fs->dma = (struct dbdma_regs __iomem *)
 		ioremap(macio_resource_start(mdev, 1), 0x200);
 	if (fs->dma == NULL) {
-		printk("fd%d: couldn't map DMA for %s\n",
-		       index, swim->full_name);
+		swim3_err("%s", "Couldn't map dma registers\n");
 		iounmap(fs->swim3);
 		rc = -ENOMEM;
 		goto out_release;
@@ -1090,31 +1160,25 @@
 	fs->secpercyl = 36;
 	fs->secpertrack = 18;
 	fs->total_secs = 2880;
-	fs->mdev = mdev;
 	init_waitqueue_head(&fs->wait);
 
 	fs->dma_cmd = (struct dbdma_cmd *) DBDMA_ALIGN(fs->dbdma_cmd_space);
 	memset(fs->dma_cmd, 0, 2 * sizeof(struct dbdma_cmd));
 	st_le16(&fs->dma_cmd[1].command, DBDMA_STOP);
 
+	if (mdev->media_bay == NULL || check_media_bay(mdev->media_bay) == MB_FD)
+		swim3_mb_event(mdev, MB_FD);
+
 	if (request_irq(fs->swim3_intr, swim3_interrupt, 0, "SWIM3", fs)) {
-		printk(KERN_ERR "fd%d: couldn't request irq %d for %s\n",
-		       index, fs->swim3_intr, swim->full_name);
+		swim3_err("%s", "Couldn't request interrupt\n");
 		pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 0);
 		goto out_unmap;
 		return -EBUSY;
 	}
-/*
-	if (request_irq(fs->dma_intr, fd_dma_interrupt, 0, "SWIM3-dma", fs)) {
-		printk(KERN_ERR "Couldn't get irq %d for SWIM3 DMA",
-		       fs->dma_intr);
-		return -EBUSY;
-	}
-*/
 
 	init_timer(&fs->timeout);
 
-	printk(KERN_INFO "fd%d: SWIM3 floppy controller %s\n", floppy_count,
+	swim3_info("SWIM3 floppy controller %s\n",
 		mdev->media_bay ? "in media bay" : "");
 
 	return 0;
@@ -1132,41 +1196,42 @@
 
 static int __devinit swim3_attach(struct macio_dev *mdev, const struct of_device_id *match)
 {
-	int i, rc;
 	struct gendisk *disk;
+	int index, rc;
+
+	index = floppy_count++;
+	if (index >= MAX_FLOPPIES)
+		return -ENXIO;
 
 	/* Add the drive */
-	rc = swim3_add_device(mdev, floppy_count);
+	rc = swim3_add_device(mdev, index);
 	if (rc)
 		return rc;
+	/* Now register that disk. Same comment about failure handling */
+	disk = disks[index] = alloc_disk(1);
+	if (disk == NULL)
+		return -ENOMEM;
+	disk->queue = blk_init_queue(do_fd_request, &swim3_lock);
+	if (disk->queue == NULL) {
+		put_disk(disk);
+		return -ENOMEM;
+	}
+	disk->queue->queuedata = &floppy_states[index];
 
-	/* Now create the queue if not there yet */
-	if (swim3_queue == NULL) {
+	if (index == 0) {
 		/* If we failed, there isn't much we can do as the driver is still
 		 * too dumb to remove the device, just bail out
 		 */
 		if (register_blkdev(FLOPPY_MAJOR, "fd"))
 			return 0;
-		swim3_queue = blk_init_queue(do_fd_request, &swim3_lock);
-		if (swim3_queue == NULL) {
-			unregister_blkdev(FLOPPY_MAJOR, "fd");
-			return 0;
-		}
 	}
 
-	/* Now register that disk. Same comment about failure handling */
-	i = floppy_count++;
-	disk = disks[i] = alloc_disk(1);
-	if (disk == NULL)
-		return 0;
-
 	disk->major = FLOPPY_MAJOR;
-	disk->first_minor = i;
+	disk->first_minor = index;
 	disk->fops = &floppy_fops;
-	disk->private_data = &floppy_states[i];
-	disk->queue = swim3_queue;
+	disk->private_data = &floppy_states[index];
 	disk->flags |= GENHD_FL_REMOVABLE;
-	sprintf(disk->disk_name, "fd%d", i);
+	sprintf(disk->disk_name, "fd%d", index);
 	set_capacity(disk, 2880);
 	add_disk(disk);
 
@@ -1194,6 +1259,9 @@
 		.of_match_table	= swim3_match,
 	},
 	.probe		= swim3_attach,
+#ifdef CONFIG_PMAC_MEDIABAY
+	.mediabay_event	= swim3_mb_event,
+#endif
 #if 0
 	.suspend	= swim3_suspend,
 	.resume		= swim3_resume,
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index f759ad4..8069322 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -613,7 +613,7 @@
 	case XenbusStateConnected:
 		/*
 		 * Ensure we connect even when two watches fire in
-		 * close successsion and we miss the intermediate value
+		 * close succession and we miss the intermediate value
 		 * of frontend_state.
 		 */
 		if (dev->state == XenbusStateConnected)
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 11b41fd..5ccf142 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -188,7 +188,7 @@
 	  The core driver to support Marvell Bluetooth devices.
 
 	  This driver is required if you want to support
-	  Marvell Bluetooth devices, such as 8688/8787.
+	  Marvell Bluetooth devices, such as 8688/8787/8797.
 
 	  Say Y here to compile Marvell Bluetooth driver
 	  into the kernel or say M to compile it as module.
@@ -201,8 +201,8 @@
 	  The driver for Marvell Bluetooth chipsets with SDIO interface.
 
 	  This driver is required if you want to use Marvell Bluetooth
-	  devices with SDIO interface. Currently SD8688/SD8787 chipsets are
-	  supported.
+	  devices with SDIO interface. Currently SD8688/SD8787/SD8797
+	  chipsets are supported.
 
 	  Say Y here to compile support for Marvell BT-over-SDIO driver
 	  into the kernel or say M to compile it as module.
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index 106beb1..07f14d1 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -30,6 +30,7 @@
 #include <net/bluetooth/bluetooth.h>
 
 #define VERSION "1.0"
+#define ATH3K_FIRMWARE	"ath3k-1.fw"
 
 #define ATH3K_DNLOAD				0x01
 #define ATH3K_GETSTATE				0x05
@@ -400,9 +401,15 @@
 		return 0;
 	}
 
-	if (request_firmware(&firmware, "ath3k-1.fw", &udev->dev) < 0) {
-		BT_ERR("Error loading firmware");
-		return -EIO;
+	ret = request_firmware(&firmware, ATH3K_FIRMWARE, &udev->dev);
+	if (ret < 0) {
+		if (ret == -ENOENT)
+			BT_ERR("Firmware file \"%s\" not found",
+							ATH3K_FIRMWARE);
+		else
+			BT_ERR("Firmware file \"%s\" request failed (err=%d)",
+							ATH3K_FIRMWARE, ret);
+		return ret;
 	}
 
 	ret = ath3k_load_firmware(udev, firmware);
@@ -423,22 +430,10 @@
 	.id_table	= ath3k_table,
 };
 
-static int __init ath3k_init(void)
-{
-	BT_INFO("Atheros AR30xx firmware driver ver %s", VERSION);
-	return usb_register(&ath3k_driver);
-}
-
-static void __exit ath3k_exit(void)
-{
-	usb_deregister(&ath3k_driver);
-}
-
-module_init(ath3k_init);
-module_exit(ath3k_exit);
+module_usb_driver(ath3k_driver);
 
 MODULE_AUTHOR("Atheros Communications");
 MODULE_DESCRIPTION("Atheros AR30xx firmware driver");
 MODULE_VERSION(VERSION);
 MODULE_LICENSE("GPL");
-MODULE_FIRMWARE("ath3k-1.fw");
+MODULE_FIRMWARE(ATH3K_FIRMWARE);
diff --git a/drivers/bluetooth/bcm203x.c b/drivers/bluetooth/bcm203x.c
index 54952ab..1e742a5 100644
--- a/drivers/bluetooth/bcm203x.c
+++ b/drivers/bluetooth/bcm203x.c
@@ -281,26 +281,7 @@
 	.id_table	= bcm203x_table,
 };
 
-static int __init bcm203x_init(void)
-{
-	int err;
-
-	BT_INFO("Broadcom Blutonium firmware driver ver %s", VERSION);
-
-	err = usb_register(&bcm203x_driver);
-	if (err < 0)
-		BT_ERR("Failed to register USB driver");
-
-	return err;
-}
-
-static void __exit bcm203x_exit(void)
-{
-	usb_deregister(&bcm203x_driver);
-}
-
-module_init(bcm203x_init);
-module_exit(bcm203x_exit);
+module_usb_driver(bcm203x_driver);
 
 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
 MODULE_DESCRIPTION("Broadcom Blutonium firmware driver ver " VERSION);
diff --git a/drivers/bluetooth/bfusb.c b/drivers/bluetooth/bfusb.c
index 61b5914..a323bae 100644
--- a/drivers/bluetooth/bfusb.c
+++ b/drivers/bluetooth/bfusb.c
@@ -751,9 +751,7 @@
 
 	bfusb_close(hdev);
 
-	if (hci_unregister_dev(hdev) < 0)
-		BT_ERR("Can't unregister HCI device %s", hdev->name);
-
+	hci_unregister_dev(hdev);
 	hci_free_dev(hdev);
 }
 
@@ -764,26 +762,7 @@
 	.id_table	= bfusb_table,
 };
 
-static int __init bfusb_init(void)
-{
-	int err;
-
-	BT_INFO("BlueFRITZ! USB driver ver %s", VERSION);
-
-	err = usb_register(&bfusb_driver);
-	if (err < 0)
-		BT_ERR("Failed to register BlueFRITZ! USB driver");
-
-	return err;
-}
-
-static void __exit bfusb_exit(void)
-{
-	usb_deregister(&bfusb_driver);
-}
-
-module_init(bfusb_init);
-module_exit(bfusb_exit);
+module_usb_driver(bfusb_driver);
 
 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
 MODULE_DESCRIPTION("BlueFRITZ! USB driver ver " VERSION);
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index aed1904..c6a0c61 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -844,9 +844,7 @@
 	/* Turn FPGA off */
 	outb(0x80, iobase + 0x30);
 
-	if (hci_unregister_dev(hdev) < 0)
-		BT_ERR("Can't unregister HCI device %s", hdev->name);
-
+	hci_unregister_dev(hdev);
 	hci_free_dev(hdev);
 
 	return 0;
diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c
index 751b338..6283160 100644
--- a/drivers/bluetooth/bpa10x.c
+++ b/drivers/bluetooth/bpa10x.c
@@ -521,20 +521,7 @@
 	.id_table	= bpa10x_table,
 };
 
-static int __init bpa10x_init(void)
-{
-	BT_INFO("Digianswer Bluetooth USB driver ver %s", VERSION);
-
-	return usb_register(&bpa10x_driver);
-}
-
-static void __exit bpa10x_exit(void)
-{
-	usb_deregister(&bpa10x_driver);
-}
-
-module_init(bpa10x_init);
-module_exit(bpa10x_exit);
+module_usb_driver(bpa10x_driver);
 
 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
 MODULE_DESCRIPTION("Digianswer Bluetooth USB driver ver " VERSION);
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index 4fc0194..0c97e5d 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -636,9 +636,7 @@
 
 	bt3c_hci_close(hdev);
 
-	if (hci_unregister_dev(hdev) < 0)
-		BT_ERR("Can't unregister HCI device %s", hdev->name);
-
+	hci_unregister_dev(hdev);
 	hci_free_dev(hdev);
 
 	return 0;
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index a88a78c..6c3defa 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -475,8 +475,6 @@
 
 	init_waitqueue_entry(&wait, current);
 
-	current->flags |= PF_NOFREEZE;
-
 	for (;;) {
 		add_wait_queue(&thread->wait_q, &wait);
 
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index 9ef4816..27b74b0 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -65,7 +65,7 @@
 	.io_port_1 = 0x01,
 	.io_port_2 = 0x02,
 };
-static const struct btmrvl_sdio_card_reg btmrvl_reg_8787 = {
+static const struct btmrvl_sdio_card_reg btmrvl_reg_87xx = {
 	.cfg = 0x00,
 	.host_int_mask = 0x02,
 	.host_intstatus = 0x03,
@@ -92,7 +92,14 @@
 static const struct btmrvl_sdio_device btmrvl_sdio_sd8787 = {
 	.helper		= NULL,
 	.firmware	= "mrvl/sd8787_uapsta.bin",
-	.reg		= &btmrvl_reg_8787,
+	.reg		= &btmrvl_reg_87xx,
+	.sd_blksz_fw_dl	= 256,
+};
+
+static const struct btmrvl_sdio_device btmrvl_sdio_sd8797 = {
+	.helper		= NULL,
+	.firmware	= "mrvl/sd8797_uapsta.bin",
+	.reg		= &btmrvl_reg_87xx,
 	.sd_blksz_fw_dl	= 256,
 };
 
@@ -103,6 +110,9 @@
 	/* Marvell SD8787 Bluetooth device */
 	{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x911A),
 			.driver_data = (unsigned long) &btmrvl_sdio_sd8787 },
+	/* Marvell SD8797 Bluetooth device */
+	{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912A),
+			.driver_data = (unsigned long) &btmrvl_sdio_sd8797 },
 
 	{ }	/* Terminating entry */
 };
@@ -1076,3 +1086,4 @@
 MODULE_FIRMWARE("sd8688_helper.bin");
 MODULE_FIRMWARE("sd8688.bin");
 MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin");
+MODULE_FIRMWARE("mrvl/sd8797_uapsta.bin");
diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c
index 526b618..200b3a2 100644
--- a/drivers/bluetooth/btuart_cs.c
+++ b/drivers/bluetooth/btuart_cs.c
@@ -565,9 +565,7 @@
 
 	spin_unlock_irqrestore(&(info->lock), flags);
 
-	if (hci_unregister_dev(hdev) < 0)
-		BT_ERR("Can't unregister HCI device %s", hdev->name);
-
+	hci_unregister_dev(hdev);
 	hci_free_dev(hdev);
 
 	return 0;
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index fe4ebc3..55ac349 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -101,6 +101,7 @@
 	{ USB_DEVICE(0x0c10, 0x0000) },
 
 	/* Broadcom BCM20702A0 */
+	{ USB_DEVICE(0x0a5c, 0x21e3) },
 	{ USB_DEVICE(0x413c, 0x8197) },
 
 	{ }	/* Terminating entry */
@@ -315,7 +316,8 @@
 
 	err = usb_submit_urb(urb, mem_flags);
 	if (err < 0) {
-		BT_ERR("%s urb %p submission failed (%d)",
+		if (err != -EPERM && err != -ENODEV)
+			BT_ERR("%s urb %p submission failed (%d)",
 						hdev->name, urb, -err);
 		usb_unanchor_urb(urb);
 	}
@@ -400,7 +402,8 @@
 
 	err = usb_submit_urb(urb, mem_flags);
 	if (err < 0) {
-		BT_ERR("%s urb %p submission failed (%d)",
+		if (err != -EPERM && err != -ENODEV)
+			BT_ERR("%s urb %p submission failed (%d)",
 						hdev->name, urb, -err);
 		usb_unanchor_urb(urb);
 	}
@@ -506,15 +509,10 @@
 
 	pipe = usb_rcvisocpipe(data->udev, data->isoc_rx_ep->bEndpointAddress);
 
-	urb->dev      = data->udev;
-	urb->pipe     = pipe;
-	urb->context  = hdev;
-	urb->complete = btusb_isoc_complete;
-	urb->interval = data->isoc_rx_ep->bInterval;
+	usb_fill_int_urb(urb, data->udev, pipe, buf, size, btusb_isoc_complete,
+				hdev, data->isoc_rx_ep->bInterval);
 
 	urb->transfer_flags  = URB_FREE_BUFFER | URB_ISO_ASAP;
-	urb->transfer_buffer = buf;
-	urb->transfer_buffer_length = size;
 
 	__fill_isoc_descriptor(urb, size,
 			le16_to_cpu(data->isoc_rx_ep->wMaxPacketSize));
@@ -523,7 +521,8 @@
 
 	err = usb_submit_urb(urb, mem_flags);
 	if (err < 0) {
-		BT_ERR("%s urb %p submission failed (%d)",
+		if (err != -EPERM && err != -ENODEV)
+			BT_ERR("%s urb %p submission failed (%d)",
 						hdev->name, urb, -err);
 		usb_unanchor_urb(urb);
 	}
@@ -727,6 +726,9 @@
 		usb_fill_bulk_urb(urb, data->udev, pipe,
 				skb->data, skb->len, btusb_tx_complete, skb);
 
+		if (skb->priority >= HCI_PRIO_MAX - 1)
+			urb->transfer_flags  = URB_ISO_ASAP;
+
 		hdev->stat.acl_tx++;
 		break;
 
@@ -770,16 +772,17 @@
 
 	err = usb_submit_urb(urb, GFP_ATOMIC);
 	if (err < 0) {
-		BT_ERR("%s urb %p submission failed", hdev->name, urb);
+		if (err != -EPERM && err != -ENODEV)
+			BT_ERR("%s urb %p submission failed (%d)",
+						hdev->name, urb, -err);
 		kfree(urb->setup_packet);
 		usb_unanchor_urb(urb);
 	} else {
 		usb_mark_last_busy(data->udev);
 	}
 
-	usb_free_urb(urb);
-
 done:
+	usb_free_urb(urb);
 	return err;
 }
 
@@ -1223,20 +1226,7 @@
 	.supports_autosuspend = 1,
 };
 
-static int __init btusb_init(void)
-{
-	BT_INFO("Generic Bluetooth USB driver ver %s", VERSION);
-
-	return usb_register(&btusb_driver);
-}
-
-static void __exit btusb_exit(void)
-{
-	usb_deregister(&btusb_driver);
-}
-
-module_init(btusb_init);
-module_exit(btusb_exit);
+module_usb_driver(btusb_driver);
 
 module_param(ignore_dga, bool, 0644);
 MODULE_PARM_DESC(ignore_dga, "Ignore devices with id 08fd:0001");
diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c
index 5e4c2de..969bb22 100644
--- a/drivers/bluetooth/dtl1_cs.c
+++ b/drivers/bluetooth/dtl1_cs.c
@@ -551,9 +551,7 @@
 
 	spin_unlock_irqrestore(&(info->lock), flags);
 
-	if (hci_unregister_dev(hdev) < 0)
-		BT_ERR("Can't unregister HCI device %s", hdev->name);
-
+	hci_unregister_dev(hdev);
 	hci_free_dev(hdev);
 
 	return 0;
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index 67c180c..2ed6ab1 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -41,6 +41,8 @@
 
 #define VERSION "1.3"
 
+static bool amp;
+
 struct vhci_data {
 	struct hci_dev *hdev;
 
@@ -239,6 +241,9 @@
 	hdev->bus = HCI_VIRTUAL;
 	hdev->driver_data = data;
 
+	if (amp)
+		hdev->dev_type = HCI_AMP;
+
 	hdev->open     = vhci_open_dev;
 	hdev->close    = vhci_close_dev;
 	hdev->flush    = vhci_flush;
@@ -264,10 +269,7 @@
 	struct vhci_data *data = file->private_data;
 	struct hci_dev *hdev = data->hdev;
 
-	if (hci_unregister_dev(hdev) < 0) {
-		BT_ERR("Can't unregister HCI device %s", hdev->name);
-	}
-
+	hci_unregister_dev(hdev);
 	hci_free_dev(hdev);
 
 	file->private_data = NULL;
@@ -306,6 +308,9 @@
 module_init(vhci_init);
 module_exit(vhci_exit);
 
+module_param(amp, bool, 0644);
+MODULE_PARM_DESC(amp, "Create AMP controller device");
+
 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
 MODULE_DESCRIPTION("Bluetooth virtual HCI driver ver " VERSION);
 MODULE_VERSION(VERSION);
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index f997c27..2118211 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -267,7 +267,6 @@
 
 #include <linux/module.h>
 #include <linux/fs.h>
-#include <linux/buffer_head.h>
 #include <linux/major.h>
 #include <linux/types.h>
 #include <linux/errno.h>
diff --git a/drivers/char/hw_random/nomadik-rng.c b/drivers/char/hw_random/nomadik-rng.c
index 52e08ca..3d3c1e6 100644
--- a/drivers/char/hw_random/nomadik-rng.c
+++ b/drivers/char/hw_random/nomadik-rng.c
@@ -95,6 +95,8 @@
 	{0, 0},
 };
 
+MODULE_DEVICE_TABLE(amba, nmk_rng_ids);
+
 static struct amba_driver nmk_rng_driver = {
 	.drv = {
 		.owner = THIS_MODULE,
diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c
index 3ed20e8..cdd4c09f 100644
--- a/drivers/char/ipmi/ipmi_bt_sm.c
+++ b/drivers/char/ipmi/ipmi_bt_sm.c
@@ -560,7 +560,7 @@
 		BT_CONTROL(BT_H_BUSY);		/* set */
 
 		/*
-		 * Uncached, ordered writes should just proceeed serially but
+		 * Uncached, ordered writes should just proceed serially but
 		 * some BMCs don't clear B2H_ATN with one hit.  Fast-path a
 		 * workaround without too much penalty to the general case.
 		 */
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index c2917ffa..34767a6 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -139,6 +139,8 @@
 #define IPMI_WDOG_SET_TIMER		0x24
 #define IPMI_WDOG_GET_TIMER		0x25
 
+#define IPMI_WDOG_TIMER_NOT_INIT_RESP	0x80
+
 /* These are here until the real ones get into the watchdog.h interface. */
 #ifndef WDIOC_GETTIMEOUT
 #define	WDIOC_GETTIMEOUT        _IOW(WATCHDOG_IOCTL_BASE, 20, int)
@@ -596,6 +598,7 @@
 	struct kernel_ipmi_msg            msg;
 	int                               rv;
 	struct ipmi_system_interface_addr addr;
+	int				  timeout_retries = 0;
 
 	if (ipmi_ignore_heartbeat)
 		return 0;
@@ -616,6 +619,7 @@
 
 	mutex_lock(&heartbeat_lock);
 
+restart:
 	atomic_set(&heartbeat_tofree, 2);
 
 	/*
@@ -653,7 +657,33 @@
 	/* Wait for the heartbeat to be sent. */
 	wait_for_completion(&heartbeat_wait);
 
-	if (heartbeat_recv_msg.msg.data[0] != 0) {
+	if (heartbeat_recv_msg.msg.data[0] == IPMI_WDOG_TIMER_NOT_INIT_RESP)  {
+		timeout_retries++;
+		if (timeout_retries > 3) {
+			printk(KERN_ERR PFX ": Unable to restore the IPMI"
+			       " watchdog's settings, giving up.\n");
+			rv = -EIO;
+			goto out_unlock;
+		}
+
+		/*
+		 * The timer was not initialized, that means the BMC was
+		 * probably reset and lost the watchdog information.  Attempt
+		 * to restore the timer's info.  Note that we still hold
+		 * the heartbeat lock, to keep a heartbeat from happening
+		 * in this process, so must say no heartbeat to avoid a
+		 * deadlock on this mutex.
+		 */
+		rv = ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
+		if (rv) {
+			printk(KERN_ERR PFX ": Unable to send the command to"
+			       " set the watchdog's settings, giving up.\n");
+			goto out_unlock;
+		}
+
+		/* We might need a new heartbeat, so do it now */
+		goto restart;
+	} else if (heartbeat_recv_msg.msg.data[0] != 0) {
 		/*
 		 * Got an error in the heartbeat response.  It was already
 		 * reported in ipmi_wdog_msg_handler, but we should return
@@ -662,6 +692,7 @@
 		rv = -EINVAL;
 	}
 
+out_unlock:
 	mutex_unlock(&heartbeat_lock);
 
 	return rv;
@@ -922,11 +953,15 @@
 static void ipmi_wdog_msg_handler(struct ipmi_recv_msg *msg,
 				  void                 *handler_data)
 {
-	if (msg->msg.data[0] != 0) {
+	if (msg->msg.cmd == IPMI_WDOG_RESET_TIMER &&
+			msg->msg.data[0] == IPMI_WDOG_TIMER_NOT_INIT_RESP)
+		printk(KERN_INFO PFX "response: The IPMI controller appears"
+		       " to have been reset, will attempt to reinitialize"
+		       " the watchdog timer\n");
+	else if (msg->msg.data[0] != 0)
 		printk(KERN_ERR PFX "response: Error %x on cmd %x\n",
 		       msg->msg.data[0],
 		       msg->msg.cmd);
-	}
 
 	ipmi_free_recv_msg(msg);
 }
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 1451790..d6e9d08 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -847,7 +847,7 @@
 
 static const struct memdev {
 	const char *name;
-	mode_t mode;
+	umode_t mode;
 	const struct file_operations *fops;
 	struct backing_dev_info *dev_info;
 } devlist[] = {
@@ -901,7 +901,7 @@
 	.llseek = noop_llseek,
 };
 
-static char *mem_devnode(struct device *dev, mode_t *mode)
+static char *mem_devnode(struct device *dev, umode_t *mode)
 {
 	if (mode && devlist[MINOR(dev->devt)].mode)
 		*mode = devlist[MINOR(dev->devt)].mode;
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index 778273c..522136d 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -258,7 +258,7 @@
 EXPORT_SYMBOL(misc_register);
 EXPORT_SYMBOL(misc_deregister);
 
-static char *misc_devnode(struct device *dev, mode_t *mode)
+static char *misc_devnode(struct device *dev, umode_t *mode)
 {
 	struct miscdevice *c = dev_get_drvdata(dev);
 
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 6035ab8..85da874 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -624,8 +624,8 @@
 static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
 {
 	struct {
-		cycles_t cycles;
 		long jiffies;
+		unsigned cycles;
 		unsigned num;
 	} sample;
 	long delta, delta2, delta3;
@@ -637,7 +637,11 @@
 		goto out;
 
 	sample.jiffies = jiffies;
-	sample.cycles = get_cycles();
+
+	/* Use arch random value, fall back to cycles */
+	if (!arch_get_random_int(&sample.cycles))
+		sample.cycles = get_cycles();
+
 	sample.num = num;
 	mix_pool_bytes(&input_pool, &sample, sizeof(sample));
 
diff --git a/drivers/char/raw.c b/drivers/char/raw.c
index b6de2c0..54a3a6d 100644
--- a/drivers/char/raw.c
+++ b/drivers/char/raw.c
@@ -308,7 +308,7 @@
 
 static struct cdev raw_cdev;
 
-static char *raw_devnode(struct device *dev, mode_t *mode)
+static char *raw_devnode(struct device *dev, umode_t *mode)
 {
 	return kasprintf(GFP_KERNEL, "raw/%s", dev_name(dev));
 }
diff --git a/drivers/char/tile-srom.c b/drivers/char/tile-srom.c
index cf3ee00..4dc0194 100644
--- a/drivers/char/tile-srom.c
+++ b/drivers/char/tile-srom.c
@@ -329,7 +329,7 @@
 	__ATTR_NULL
 };
 
-static char *srom_devnode(struct device *dev, mode_t *mode)
+static char *srom_devnode(struct device *dev, umode_t *mode)
 {
 	*mode = S_IRUGO | S_IWUSR;
 	return kasprintf(GFP_KERNEL, "srom/%s", dev_name(dev));
diff --git a/drivers/clocksource/acpi_pm.c b/drivers/clocksource/acpi_pm.c
index effe797..6b5cf02 100644
--- a/drivers/clocksource/acpi_pm.c
+++ b/drivers/clocksource/acpi_pm.c
@@ -143,7 +143,7 @@
 #ifndef CONFIG_X86_64
 #include <asm/mach_timer.h>
 #define PMTMR_EXPECTED_RATE \
-  ((CALIBRATE_LATCH * (PMTMR_TICKS_PER_SEC >> 10)) / (CLOCK_TICK_RATE>>10))
+  ((CALIBRATE_LATCH * (PMTMR_TICKS_PER_SEC >> 10)) / (PIT_TICK_RATE>>10))
 /*
  * Some boards have the PMTMR running way too fast. We check
  * the PMTMR rate against PIT channel 2 to catch these cases.
diff --git a/drivers/clocksource/clksrc-dbx500-prcmu.c b/drivers/clocksource/clksrc-dbx500-prcmu.c
index 59feefe..fb6b6d2 100644
--- a/drivers/clocksource/clksrc-dbx500-prcmu.c
+++ b/drivers/clocksource/clksrc-dbx500-prcmu.c
@@ -58,25 +58,15 @@
 };
 
 #ifdef CONFIG_CLKSRC_DBX500_PRCMU_SCHED_CLOCK
-static DEFINE_CLOCK_DATA(cd);
 
-unsigned long long notrace sched_clock(void)
+static u32 notrace dbx500_prcmu_sched_clock_read(void)
 {
-	u32 cyc;
-
 	if (unlikely(!clksrc_dbx500_timer_base))
 		return 0;
 
-	cyc = clksrc_dbx500_prcmu_read(&clocksource_dbx500_prcmu);
-
-	return cyc_to_sched_clock(&cd, cyc, (u32)~0);
+	return clksrc_dbx500_prcmu_read(&clocksource_dbx500_prcmu);
 }
 
-static void notrace clksrc_dbx500_prcmu_update_sched_clock(void)
-{
-	u32 cyc = clksrc_dbx500_prcmu_read(&clocksource_dbx500_prcmu);
-	update_sched_clock(&cd, cyc, (u32)~0);
-}
 #endif
 
 void __init clksrc_dbx500_prcmu_init(void __iomem *base)
@@ -97,7 +87,7 @@
 		       clksrc_dbx500_timer_base + PRCMU_TIMER_REF);
 	}
 #ifdef CONFIG_CLKSRC_DBX500_PRCMU_SCHED_CLOCK
-	init_sched_clock(&cd, clksrc_dbx500_prcmu_update_sched_clock,
+	setup_sched_clock(dbx500_prcmu_sched_clock_read,
 			 32, RATE_32K);
 #endif
 	clocksource_calc_mult_shift(&clocksource_dbx500_prcmu,
diff --git a/drivers/clocksource/i8253.c b/drivers/clocksource/i8253.c
index 27c49e6..e7cab2d 100644
--- a/drivers/clocksource/i8253.c
+++ b/drivers/clocksource/i8253.c
@@ -53,7 +53,7 @@
 	count |= inb_p(PIT_CH0) << 8;
 
 	/* VIA686a test code... reset the latch if count > max + 1 */
-	if (count > LATCH) {
+	if (count > PIT_LATCH) {
 		outb_p(0x34, PIT_MODE);
 		outb_p(PIT_LATCH & 0xff, PIT_CH0);
 		outb_p(PIT_LATCH >> 8, PIT_CH0);
@@ -114,8 +114,8 @@
 	case CLOCK_EVT_MODE_PERIODIC:
 		/* binary, mode 2, LSB/MSB, ch 0 */
 		outb_p(0x34, PIT_MODE);
-		outb_p(LATCH & 0xff , PIT_CH0);	/* LSB */
-		outb_p(LATCH >> 8 , PIT_CH0);		/* MSB */
+		outb_p(PIT_LATCH & 0xff , PIT_CH0);	/* LSB */
+		outb_p(PIT_LATCH >> 8 , PIT_CH0);		/* MSB */
 		break;
 
 	case CLOCK_EVT_MODE_SHUTDOWN:
diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
index 79c47e8..55d0f95 100644
--- a/drivers/clocksource/tcb_clksrc.c
+++ b/drivers/clocksource/tcb_clksrc.c
@@ -59,7 +59,6 @@
 	.rating         = 200,
 	.read           = tc_get_cycles,
 	.mask           = CLOCKSOURCE_MASK(32),
-	.shift          = 18,
 	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
 };
 
@@ -256,7 +255,6 @@
 		best_divisor_idx = i;
 	}
 
-	clksrc.mult = clocksource_hz2mult(divided_rate, clksrc.shift);
 
 	printk(bootinfo, clksrc.name, CONFIG_ATMEL_TCB_CLKSRC_BLOCK,
 			divided_rate / 1000000,
@@ -292,7 +290,7 @@
 	__raw_writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR);
 
 	/* and away we go! */
-	clocksource_register(&clksrc);
+	clocksource_register_hz(&clksrc, divided_rate);
 
 	/* channel 2:  periodic and oneshot timer support */
 	setup_clkevents(tc, clk32k_divisor_idx);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 987a165..8c2df34 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -679,7 +679,7 @@
  */
 static int cpufreq_add_dev_policy(unsigned int cpu,
 				  struct cpufreq_policy *policy,
-				  struct sys_device *sys_dev)
+				  struct device *dev)
 {
 	int ret = 0;
 #ifdef CONFIG_SMP
@@ -728,7 +728,7 @@
 			spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
 			pr_debug("CPU already managed, adding link\n");
-			ret = sysfs_create_link(&sys_dev->kobj,
+			ret = sysfs_create_link(&dev->kobj,
 						&managed_policy->kobj,
 						"cpufreq");
 			if (ret)
@@ -761,7 +761,7 @@
 
 	for_each_cpu(j, policy->cpus) {
 		struct cpufreq_policy *managed_policy;
-		struct sys_device *cpu_sys_dev;
+		struct device *cpu_dev;
 
 		if (j == cpu)
 			continue;
@@ -770,8 +770,8 @@
 
 		pr_debug("CPU %u already managed, adding link\n", j);
 		managed_policy = cpufreq_cpu_get(cpu);
-		cpu_sys_dev = get_cpu_sysdev(j);
-		ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj,
+		cpu_dev = get_cpu_device(j);
+		ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
 					"cpufreq");
 		if (ret) {
 			cpufreq_cpu_put(managed_policy);
@@ -783,7 +783,7 @@
 
 static int cpufreq_add_dev_interface(unsigned int cpu,
 				     struct cpufreq_policy *policy,
-				     struct sys_device *sys_dev)
+				     struct device *dev)
 {
 	struct cpufreq_policy new_policy;
 	struct freq_attr **drv_attr;
@@ -793,7 +793,7 @@
 
 	/* prepare interface data */
 	ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
-				   &sys_dev->kobj, "cpufreq");
+				   &dev->kobj, "cpufreq");
 	if (ret)
 		return ret;
 
@@ -866,9 +866,9 @@
  * with with cpu hotplugging and all hell will break loose. Tried to clean this
  * mess up, but more thorough testing is needed. - Mathieu
  */
-static int cpufreq_add_dev(struct sys_device *sys_dev)
+static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
 {
-	unsigned int cpu = sys_dev->id;
+	unsigned int cpu = dev->id;
 	int ret = 0, found = 0;
 	struct cpufreq_policy *policy;
 	unsigned long flags;
@@ -947,7 +947,7 @@
 	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
 				     CPUFREQ_START, policy);
 
-	ret = cpufreq_add_dev_policy(cpu, policy, sys_dev);
+	ret = cpufreq_add_dev_policy(cpu, policy, dev);
 	if (ret) {
 		if (ret > 0)
 			/* This is a managed cpu, symlink created,
@@ -956,7 +956,7 @@
 		goto err_unlock_policy;
 	}
 
-	ret = cpufreq_add_dev_interface(cpu, policy, sys_dev);
+	ret = cpufreq_add_dev_interface(cpu, policy, dev);
 	if (ret)
 		goto err_out_unregister;
 
@@ -999,15 +999,15 @@
  * Caller should already have policy_rwsem in write mode for this CPU.
  * This routine frees the rwsem before returning.
  */
-static int __cpufreq_remove_dev(struct sys_device *sys_dev)
+static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
 {
-	unsigned int cpu = sys_dev->id;
+	unsigned int cpu = dev->id;
 	unsigned long flags;
 	struct cpufreq_policy *data;
 	struct kobject *kobj;
 	struct completion *cmp;
 #ifdef CONFIG_SMP
-	struct sys_device *cpu_sys_dev;
+	struct device *cpu_dev;
 	unsigned int j;
 #endif
 
@@ -1032,7 +1032,7 @@
 		pr_debug("removing link\n");
 		cpumask_clear_cpu(cpu, data->cpus);
 		spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
-		kobj = &sys_dev->kobj;
+		kobj = &dev->kobj;
 		cpufreq_cpu_put(data);
 		unlock_policy_rwsem_write(cpu);
 		sysfs_remove_link(kobj, "cpufreq");
@@ -1071,8 +1071,8 @@
 			strncpy(per_cpu(cpufreq_cpu_governor, j),
 				data->governor->name, CPUFREQ_NAME_LEN);
 #endif
-			cpu_sys_dev = get_cpu_sysdev(j);
-			kobj = &cpu_sys_dev->kobj;
+			cpu_dev = get_cpu_device(j);
+			kobj = &cpu_dev->kobj;
 			unlock_policy_rwsem_write(cpu);
 			sysfs_remove_link(kobj, "cpufreq");
 			lock_policy_rwsem_write(cpu);
@@ -1112,11 +1112,11 @@
 	if (unlikely(cpumask_weight(data->cpus) > 1)) {
 		/* first sibling now owns the new sysfs dir */
 		cpumask_clear_cpu(cpu, data->cpus);
-		cpufreq_add_dev(get_cpu_sysdev(cpumask_first(data->cpus)));
+		cpufreq_add_dev(get_cpu_device(cpumask_first(data->cpus)), NULL);
 
 		/* finally remove our own symlink */
 		lock_policy_rwsem_write(cpu);
-		__cpufreq_remove_dev(sys_dev);
+		__cpufreq_remove_dev(dev, sif);
 	}
 #endif
 
@@ -1128,9 +1128,9 @@
 }
 
 
-static int cpufreq_remove_dev(struct sys_device *sys_dev)
+static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
 {
-	unsigned int cpu = sys_dev->id;
+	unsigned int cpu = dev->id;
 	int retval;
 
 	if (cpu_is_offline(cpu))
@@ -1139,7 +1139,7 @@
 	if (unlikely(lock_policy_rwsem_write(cpu)))
 		BUG();
 
-	retval = __cpufreq_remove_dev(sys_dev);
+	retval = __cpufreq_remove_dev(dev, sif);
 	return retval;
 }
 
@@ -1271,9 +1271,11 @@
 }
 EXPORT_SYMBOL(cpufreq_get);
 
-static struct sysdev_driver cpufreq_sysdev_driver = {
-	.add		= cpufreq_add_dev,
-	.remove		= cpufreq_remove_dev,
+static struct subsys_interface cpufreq_interface = {
+	.name		= "cpufreq",
+	.subsys		= &cpu_subsys,
+	.add_dev	= cpufreq_add_dev,
+	.remove_dev	= cpufreq_remove_dev,
 };
 
 
@@ -1765,25 +1767,25 @@
 					unsigned long action, void *hcpu)
 {
 	unsigned int cpu = (unsigned long)hcpu;
-	struct sys_device *sys_dev;
+	struct device *dev;
 
-	sys_dev = get_cpu_sysdev(cpu);
-	if (sys_dev) {
+	dev = get_cpu_device(cpu);
+	if (dev) {
 		switch (action) {
 		case CPU_ONLINE:
 		case CPU_ONLINE_FROZEN:
-			cpufreq_add_dev(sys_dev);
+			cpufreq_add_dev(dev, NULL);
 			break;
 		case CPU_DOWN_PREPARE:
 		case CPU_DOWN_PREPARE_FROZEN:
 			if (unlikely(lock_policy_rwsem_write(cpu)))
 				BUG();
 
-			__cpufreq_remove_dev(sys_dev);
+			__cpufreq_remove_dev(dev, NULL);
 			break;
 		case CPU_DOWN_FAILED:
 		case CPU_DOWN_FAILED_FROZEN:
-			cpufreq_add_dev(sys_dev);
+			cpufreq_add_dev(dev, NULL);
 			break;
 		}
 	}
@@ -1830,8 +1832,7 @@
 	cpufreq_driver = driver_data;
 	spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
-	ret = sysdev_driver_register(&cpu_sysdev_class,
-					&cpufreq_sysdev_driver);
+	ret = subsys_interface_register(&cpufreq_interface);
 	if (ret)
 		goto err_null_driver;
 
@@ -1850,7 +1851,7 @@
 		if (ret) {
 			pr_debug("no CPU initialized for driver %s\n",
 							driver_data->name);
-			goto err_sysdev_unreg;
+			goto err_if_unreg;
 		}
 	}
 
@@ -1858,9 +1859,8 @@
 	pr_debug("driver %s up and running\n", driver_data->name);
 
 	return 0;
-err_sysdev_unreg:
-	sysdev_driver_unregister(&cpu_sysdev_class,
-			&cpufreq_sysdev_driver);
+err_if_unreg:
+	subsys_interface_unregister(&cpufreq_interface);
 err_null_driver:
 	spin_lock_irqsave(&cpufreq_driver_lock, flags);
 	cpufreq_driver = NULL;
@@ -1887,7 +1887,7 @@
 
 	pr_debug("unregistering driver %s\n", driver->name);
 
-	sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver);
+	subsys_interface_unregister(&cpufreq_interface);
 	unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
 
 	spin_lock_irqsave(&cpufreq_driver_lock, flags);
@@ -1907,8 +1907,7 @@
 		init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
 	}
 
-	cpufreq_global_kobject = kobject_create_and_add("cpufreq",
-						&cpu_sysdev_class.kset.kobj);
+	cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
 	BUG_ON(!cpufreq_global_kobject);
 	register_syscore_ops(&cpufreq_syscore_ops);
 
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index c97b468..235a340 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -95,27 +95,26 @@
 	.freq_step = 5,
 };
 
-static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
-							cputime64_t *wall)
+static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
 {
-	cputime64_t idle_time;
-	cputime64_t cur_wall_time;
-	cputime64_t busy_time;
+	u64 idle_time;
+	u64 cur_wall_time;
+	u64 busy_time;
 
 	cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
-	busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user,
-			kstat_cpu(cpu).cpustat.system);
 
-	busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq);
-	busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq);
-	busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal);
-	busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice);
+	busy_time  = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
+	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
+	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
+	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
+	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
+	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
 
-	idle_time = cputime64_sub(cur_wall_time, busy_time);
+	idle_time = cur_wall_time - busy_time;
 	if (wall)
-		*wall = (cputime64_t)jiffies_to_usecs(cur_wall_time);
+		*wall = jiffies_to_usecs(cur_wall_time);
 
-	return (cputime64_t)jiffies_to_usecs(idle_time);
+	return jiffies_to_usecs(idle_time);
 }
 
 static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
@@ -272,7 +271,7 @@
 		dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
 						&dbs_info->prev_cpu_wall);
 		if (dbs_tuners_ins.ignore_nice)
-			dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
+			dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
 	}
 	return count;
 }
@@ -353,20 +352,20 @@
 
 		cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
 
-		wall_time = (unsigned int) cputime64_sub(cur_wall_time,
-				j_dbs_info->prev_cpu_wall);
+		wall_time = (unsigned int)
+			(cur_wall_time - j_dbs_info->prev_cpu_wall);
 		j_dbs_info->prev_cpu_wall = cur_wall_time;
 
-		idle_time = (unsigned int) cputime64_sub(cur_idle_time,
-				j_dbs_info->prev_cpu_idle);
+		idle_time = (unsigned int)
+			(cur_idle_time - j_dbs_info->prev_cpu_idle);
 		j_dbs_info->prev_cpu_idle = cur_idle_time;
 
 		if (dbs_tuners_ins.ignore_nice) {
-			cputime64_t cur_nice;
+			u64 cur_nice;
 			unsigned long cur_nice_jiffies;
 
-			cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice,
-					 j_dbs_info->prev_cpu_nice);
+			cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] -
+					 j_dbs_info->prev_cpu_nice;
 			/*
 			 * Assumption: nice time between sampling periods will
 			 * be less than 2^32 jiffies for 32 bit sys
@@ -374,7 +373,7 @@
 			cur_nice_jiffies = (unsigned long)
 					cputime64_to_jiffies64(cur_nice);
 
-			j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
+			j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
 			idle_time += jiffies_to_usecs(cur_nice_jiffies);
 		}
 
@@ -501,10 +500,9 @@
 
 			j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
 						&j_dbs_info->prev_cpu_wall);
-			if (dbs_tuners_ins.ignore_nice) {
+			if (dbs_tuners_ins.ignore_nice)
 				j_dbs_info->prev_cpu_nice =
-						kstat_cpu(j).cpustat.nice;
-			}
+						kcpustat_cpu(j).cpustat[CPUTIME_NICE];
 		}
 		this_dbs_info->down_skip = 0;
 		this_dbs_info->requested_freq = policy->cur;
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index fa8af4e..3d679ee 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -119,27 +119,26 @@
 	.powersave_bias = 0,
 };
 
-static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
-							cputime64_t *wall)
+static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
 {
-	cputime64_t idle_time;
-	cputime64_t cur_wall_time;
-	cputime64_t busy_time;
+	u64 idle_time;
+	u64 cur_wall_time;
+	u64 busy_time;
 
 	cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
-	busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user,
-			kstat_cpu(cpu).cpustat.system);
 
-	busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq);
-	busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq);
-	busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal);
-	busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice);
+	busy_time  = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
+	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
+	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
+	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
+	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
+	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
 
-	idle_time = cputime64_sub(cur_wall_time, busy_time);
+	idle_time = cur_wall_time - busy_time;
 	if (wall)
-		*wall = (cputime64_t)jiffies_to_usecs(cur_wall_time);
+		*wall = jiffies_to_usecs(cur_wall_time);
 
-	return (cputime64_t)jiffies_to_usecs(idle_time);
+	return jiffies_to_usecs(idle_time);
 }
 
 static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
@@ -345,7 +344,7 @@
 		dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
 						&dbs_info->prev_cpu_wall);
 		if (dbs_tuners_ins.ignore_nice)
-			dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
+			dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
 
 	}
 	return count;
@@ -442,24 +441,24 @@
 		cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
 		cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time);
 
-		wall_time = (unsigned int) cputime64_sub(cur_wall_time,
-				j_dbs_info->prev_cpu_wall);
+		wall_time = (unsigned int)
+			(cur_wall_time - j_dbs_info->prev_cpu_wall);
 		j_dbs_info->prev_cpu_wall = cur_wall_time;
 
-		idle_time = (unsigned int) cputime64_sub(cur_idle_time,
-				j_dbs_info->prev_cpu_idle);
+		idle_time = (unsigned int)
+			(cur_idle_time - j_dbs_info->prev_cpu_idle);
 		j_dbs_info->prev_cpu_idle = cur_idle_time;
 
-		iowait_time = (unsigned int) cputime64_sub(cur_iowait_time,
-				j_dbs_info->prev_cpu_iowait);
+		iowait_time = (unsigned int)
+			(cur_iowait_time - j_dbs_info->prev_cpu_iowait);
 		j_dbs_info->prev_cpu_iowait = cur_iowait_time;
 
 		if (dbs_tuners_ins.ignore_nice) {
-			cputime64_t cur_nice;
+			u64 cur_nice;
 			unsigned long cur_nice_jiffies;
 
-			cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice,
-					 j_dbs_info->prev_cpu_nice);
+			cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] -
+					 j_dbs_info->prev_cpu_nice;
 			/*
 			 * Assumption: nice time between sampling periods will
 			 * be less than 2^32 jiffies for 32 bit sys
@@ -467,7 +466,7 @@
 			cur_nice_jiffies = (unsigned long)
 					cputime64_to_jiffies64(cur_nice);
 
-			j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
+			j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
 			idle_time += jiffies_to_usecs(cur_nice_jiffies);
 		}
 
@@ -646,10 +645,9 @@
 
 			j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
 						&j_dbs_info->prev_cpu_wall);
-			if (dbs_tuners_ins.ignore_nice) {
+			if (dbs_tuners_ins.ignore_nice)
 				j_dbs_info->prev_cpu_nice =
-						kstat_cpu(j).cpustat.nice;
-			}
+						kcpustat_cpu(j).cpustat[CPUTIME_NICE];
 		}
 		this_dbs_info->cpu = cpu;
 		this_dbs_info->rate_mult = 1;
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index c5072a9..b40ee14 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -11,7 +11,6 @@
 
 #include <linux/kernel.h>
 #include <linux/slab.h>
-#include <linux/sysdev.h>
 #include <linux/cpu.h>
 #include <linux/sysfs.h>
 #include <linux/cpufreq.h>
@@ -61,9 +60,8 @@
 	spin_lock(&cpufreq_stats_lock);
 	stat = per_cpu(cpufreq_stats_table, cpu);
 	if (stat->time_in_state)
-		stat->time_in_state[stat->last_index] =
-			cputime64_add(stat->time_in_state[stat->last_index],
-				      cputime_sub(cur_time, stat->last_time));
+		stat->time_in_state[stat->last_index] +=
+			cur_time - stat->last_time;
 	stat->last_time = cur_time;
 	spin_unlock(&cpufreq_stats_lock);
 	return 0;
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 06ce268..59f4261 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -291,10 +291,10 @@
 static int __cpuidle_register_device(struct cpuidle_device *dev)
 {
 	int ret;
-	struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu);
+	struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu);
 	struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver();
 
-	if (!sys_dev)
+	if (!dev)
 		return -EINVAL;
 	if (!try_module_get(cpuidle_driver->owner))
 		return -EINVAL;
@@ -303,7 +303,7 @@
 
 	per_cpu(cpuidle_devices, dev->cpu) = dev;
 	list_add(&dev->device_list, &cpuidle_detected_devices);
-	if ((ret = cpuidle_add_sysfs(sys_dev))) {
+	if ((ret = cpuidle_add_sysfs(cpu_dev))) {
 		module_put(cpuidle_driver->owner);
 		return ret;
 	}
@@ -344,7 +344,7 @@
  */
 void cpuidle_unregister_device(struct cpuidle_device *dev)
 {
-	struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu);
+	struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu);
 	struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver();
 
 	if (dev->registered == 0)
@@ -354,7 +354,7 @@
 
 	cpuidle_disable_device(dev);
 
-	cpuidle_remove_sysfs(sys_dev);
+	cpuidle_remove_sysfs(cpu_dev);
 	list_del(&dev->device_list);
 	wait_for_completion(&dev->kobj_unregister);
 	per_cpu(cpuidle_devices, dev->cpu) = NULL;
@@ -411,7 +411,7 @@
 	if (cpuidle_disabled())
 		return -ENODEV;
 
-	ret = cpuidle_add_class_sysfs(&cpu_sysdev_class);
+	ret = cpuidle_add_interface(cpu_subsys.dev_root);
 	if (ret)
 		return ret;
 
diff --git a/drivers/cpuidle/cpuidle.h b/drivers/cpuidle/cpuidle.h
index 38c3fd8..7db1866 100644
--- a/drivers/cpuidle/cpuidle.h
+++ b/drivers/cpuidle/cpuidle.h
@@ -5,7 +5,7 @@
 #ifndef __DRIVER_CPUIDLE_H
 #define __DRIVER_CPUIDLE_H
 
-#include <linux/sysdev.h>
+#include <linux/device.h>
 
 /* For internal use only */
 extern struct cpuidle_governor *cpuidle_curr_governor;
@@ -23,11 +23,11 @@
 extern int cpuidle_switch_governor(struct cpuidle_governor *gov);
 
 /* sysfs */
-extern int cpuidle_add_class_sysfs(struct sysdev_class *cls);
-extern void cpuidle_remove_class_sysfs(struct sysdev_class *cls);
+extern int cpuidle_add_interface(struct device *dev);
+extern void cpuidle_remove_interface(struct device *dev);
 extern int cpuidle_add_state_sysfs(struct cpuidle_device *device);
 extern void cpuidle_remove_state_sysfs(struct cpuidle_device *device);
-extern int cpuidle_add_sysfs(struct sys_device *sysdev);
-extern void cpuidle_remove_sysfs(struct sys_device *sysdev);
+extern int cpuidle_add_sysfs(struct device *dev);
+extern void cpuidle_remove_sysfs(struct device *dev);
 
 #endif /* __DRIVER_CPUIDLE_H */
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index 1e756e1..3fe41fe 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -22,8 +22,8 @@
 }
 __setup("cpuidle_sysfs_switch", cpuidle_sysfs_setup);
 
-static ssize_t show_available_governors(struct sysdev_class *class,
-					struct sysdev_class_attribute *attr,
+static ssize_t show_available_governors(struct device *dev,
+					struct device_attribute *attr,
 					char *buf)
 {
 	ssize_t i = 0;
@@ -42,8 +42,8 @@
 	return i;
 }
 
-static ssize_t show_current_driver(struct sysdev_class *class,
-				   struct sysdev_class_attribute *attr,
+static ssize_t show_current_driver(struct device *dev,
+				   struct device_attribute *attr,
 				   char *buf)
 {
 	ssize_t ret;
@@ -59,8 +59,8 @@
 	return ret;
 }
 
-static ssize_t show_current_governor(struct sysdev_class *class,
-				     struct sysdev_class_attribute *attr,
+static ssize_t show_current_governor(struct device *dev,
+				     struct device_attribute *attr,
 				     char *buf)
 {
 	ssize_t ret;
@@ -75,8 +75,8 @@
 	return ret;
 }
 
-static ssize_t store_current_governor(struct sysdev_class *class,
-				      struct sysdev_class_attribute *attr,
+static ssize_t store_current_governor(struct device *dev,
+				      struct device_attribute *attr,
 				      const char *buf, size_t count)
 {
 	char gov_name[CPUIDLE_NAME_LEN];
@@ -109,50 +109,48 @@
 		return count;
 }
 
-static SYSDEV_CLASS_ATTR(current_driver, 0444, show_current_driver, NULL);
-static SYSDEV_CLASS_ATTR(current_governor_ro, 0444, show_current_governor,
-			 NULL);
+static DEVICE_ATTR(current_driver, 0444, show_current_driver, NULL);
+static DEVICE_ATTR(current_governor_ro, 0444, show_current_governor, NULL);
 
-static struct attribute *cpuclass_default_attrs[] = {
-	&attr_current_driver.attr,
-	&attr_current_governor_ro.attr,
+static struct attribute *cpuidle_default_attrs[] = {
+	&dev_attr_current_driver.attr,
+	&dev_attr_current_governor_ro.attr,
 	NULL
 };
 
-static SYSDEV_CLASS_ATTR(available_governors, 0444, show_available_governors,
-			 NULL);
-static SYSDEV_CLASS_ATTR(current_governor, 0644, show_current_governor,
-			 store_current_governor);
+static DEVICE_ATTR(available_governors, 0444, show_available_governors, NULL);
+static DEVICE_ATTR(current_governor, 0644, show_current_governor,
+		   store_current_governor);
 
-static struct attribute *cpuclass_switch_attrs[] = {
-	&attr_available_governors.attr,
-	&attr_current_driver.attr,
-	&attr_current_governor.attr,
+static struct attribute *cpuidle_switch_attrs[] = {
+	&dev_attr_available_governors.attr,
+	&dev_attr_current_driver.attr,
+	&dev_attr_current_governor.attr,
 	NULL
 };
 
-static struct attribute_group cpuclass_attr_group = {
-	.attrs = cpuclass_default_attrs,
+static struct attribute_group cpuidle_attr_group = {
+	.attrs = cpuidle_default_attrs,
 	.name = "cpuidle",
 };
 
 /**
- * cpuidle_add_class_sysfs - add CPU global sysfs attributes
+ * cpuidle_add_interface - add CPU global sysfs attributes
  */
-int cpuidle_add_class_sysfs(struct sysdev_class *cls)
+int cpuidle_add_interface(struct device *dev)
 {
 	if (sysfs_switch)
-		cpuclass_attr_group.attrs = cpuclass_switch_attrs;
+		cpuidle_attr_group.attrs = cpuidle_switch_attrs;
 
-	return sysfs_create_group(&cls->kset.kobj, &cpuclass_attr_group);
+	return sysfs_create_group(&dev->kobj, &cpuidle_attr_group);
 }
 
 /**
- * cpuidle_remove_class_sysfs - remove CPU global sysfs attributes
+ * cpuidle_remove_interface - remove CPU global sysfs attributes
  */
-void cpuidle_remove_class_sysfs(struct sysdev_class *cls)
+void cpuidle_remove_interface(struct device *dev)
 {
-	sysfs_remove_group(&cls->kset.kobj, &cpuclass_attr_group);
+	sysfs_remove_group(&dev->kobj, &cpuidle_attr_group);
 }
 
 struct cpuidle_attr {
@@ -365,16 +363,16 @@
 
 /**
  * cpuidle_add_sysfs - creates a sysfs instance for the target device
- * @sysdev: the target device
+ * @dev: the target device
  */
-int cpuidle_add_sysfs(struct sys_device *sysdev)
+int cpuidle_add_sysfs(struct device *cpu_dev)
 {
-	int cpu = sysdev->id;
+	int cpu = cpu_dev->id;
 	struct cpuidle_device *dev;
 	int error;
 
 	dev = per_cpu(cpuidle_devices, cpu);
-	error = kobject_init_and_add(&dev->kobj, &ktype_cpuidle, &sysdev->kobj,
+	error = kobject_init_and_add(&dev->kobj, &ktype_cpuidle, &cpu_dev->kobj,
 				     "cpuidle");
 	if (!error)
 		kobject_uevent(&dev->kobj, KOBJ_ADD);
@@ -383,11 +381,11 @@
 
 /**
  * cpuidle_remove_sysfs - deletes a sysfs instance on the target device
- * @sysdev: the target device
+ * @dev: the target device
  */
-void cpuidle_remove_sysfs(struct sys_device *sysdev)
+void cpuidle_remove_sysfs(struct device *cpu_dev)
 {
-	int cpu = sysdev->id;
+	int cpu = cpu_dev->id;
 	struct cpuidle_device *dev;
 
 	dev = per_cpu(cpuidle_devices, cpu);
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index 8f04910..464fa21 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -65,4 +65,17 @@
 
 comment "DEVFREQ Drivers"
 
+config ARM_EXYNOS4_BUS_DEVFREQ
+	bool "ARM Exynos4210/4212/4412 Memory Bus DEVFREQ Driver"
+	depends on CPU_EXYNOS4210 || CPU_EXYNOS4212 || CPU_EXYNOS4412
+	select ARCH_HAS_OPP
+	select DEVFREQ_GOV_SIMPLE_ONDEMAND
+	help
+	  This adds the DEVFREQ driver for Exynos4210 memory bus (vdd_int)
+	  and Exynos4212/4412 memory interface and bus (vdd_mif + vdd_int).
+	  It reads PPMU counters of memory controllers and adjusts
+	  the operating frequencies and voltages with OPP support.
+	  To operate with optimal voltages, ASV support is required
+	  (CONFIG_EXYNOS_ASV).
+
 endif # PM_DEVFREQ
diff --git a/drivers/devfreq/Makefile b/drivers/devfreq/Makefile
index 4564a89..8c46423 100644
--- a/drivers/devfreq/Makefile
+++ b/drivers/devfreq/Makefile
@@ -3,3 +3,6 @@
 obj-$(CONFIG_DEVFREQ_GOV_PERFORMANCE)	+= governor_performance.o
 obj-$(CONFIG_DEVFREQ_GOV_POWERSAVE)	+= governor_powersave.o
 obj-$(CONFIG_DEVFREQ_GOV_USERSPACE)	+= governor_userspace.o
+
+# DEVFREQ Drivers
+obj-$(CONFIG_ARM_EXYNOS4_BUS_DEVFREQ)	+= exynos4_bus.o
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 59d24e9..c189b82 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -347,7 +347,7 @@
 		if (!IS_ERR(devfreq)) {
 			dev_err(dev, "%s: Unable to create devfreq for the device. It already has one.\n", __func__);
 			err = -EINVAL;
-			goto out;
+			goto err_out;
 		}
 	}
 
@@ -356,7 +356,7 @@
 		dev_err(dev, "%s: Unable to create devfreq for the device\n",
 			__func__);
 		err = -ENOMEM;
-		goto out;
+		goto err_out;
 	}
 
 	mutex_init(&devfreq->lock);
@@ -399,17 +399,16 @@
 				   devfreq->next_polling);
 	}
 	mutex_unlock(&devfreq_list_lock);
-	goto out;
+out:
+	return devfreq;
+
 err_init:
 	device_unregister(&devfreq->dev);
 err_dev:
 	mutex_unlock(&devfreq->lock);
 	kfree(devfreq);
-out:
-	if (err)
-		return ERR_PTR(err);
-	else
-		return devfreq;
+err_out:
+	return ERR_PTR(err);
 }
 
 /**
diff --git a/drivers/devfreq/exynos4_bus.c b/drivers/devfreq/exynos4_bus.c
new file mode 100644
index 0000000..6460577
--- /dev/null
+++ b/drivers/devfreq/exynos4_bus.c
@@ -0,0 +1,1135 @@
+/* drivers/devfreq/exynos4210_memorybus.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com/
+ *	MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * EXYNOS4 - Memory/Bus clock frequency scaling support in DEVFREQ framework
+ *	This version supports EXYNOS4210 only. This changes bus frequencies
+ *	and vddint voltages. Exynos4412/4212 should be able to be supported
+ *	with minor modifications.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/suspend.h>
+#include <linux/opp.h>
+#include <linux/devfreq.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/module.h>
+
+/* Exynos4 ASV has been in the mailing list, but not upstreamed, yet. */
+#ifdef CONFIG_EXYNOS_ASV
+extern unsigned int exynos_result_of_asv;
+#endif
+
+#include <mach/regs-clock.h>
+
+#include <plat/map-s5p.h>
+
+#define MAX_SAFEVOLT	1200000 /* 1.2V */
+
+enum exynos4_busf_type {
+	TYPE_BUSF_EXYNOS4210,
+	TYPE_BUSF_EXYNOS4x12,
+};
+
+/* Assume that the bus is saturated if the utilization is 40% */
+#define BUS_SATURATION_RATIO	40
+
+enum ppmu_counter {
+	PPMU_PMNCNT0 = 0,
+	PPMU_PMCCNT1,
+	PPMU_PMNCNT2,
+	PPMU_PMNCNT3,
+	PPMU_PMNCNT_MAX,
+};
+struct exynos4_ppmu {
+	void __iomem *hw_base;
+	unsigned int ccnt;
+	unsigned int event;
+	unsigned int count[PPMU_PMNCNT_MAX];
+	bool ccnt_overflow;
+	bool count_overflow[PPMU_PMNCNT_MAX];
+};
+
+enum busclk_level_idx {
+	LV_0 = 0,
+	LV_1,
+	LV_2,
+	LV_3,
+	LV_4,
+	_LV_END
+};
+#define EX4210_LV_MAX	LV_2
+#define EX4x12_LV_MAX	LV_4
+#define EX4210_LV_NUM	(LV_2 + 1)
+#define EX4x12_LV_NUM	(LV_4 + 1)
+
+struct busfreq_data {
+	enum exynos4_busf_type type;
+	struct device *dev;
+	struct devfreq *devfreq;
+	bool disabled;
+	struct regulator *vdd_int;
+	struct regulator *vdd_mif; /* Exynos4412/4212 only */
+	struct opp *curr_opp;
+	struct exynos4_ppmu dmc[2];
+
+	struct notifier_block pm_notifier;
+	struct mutex lock;
+
+	/* Dividers calculated at boot/probe-time */
+	unsigned int dmc_divtable[_LV_END]; /* DMC0 */
+	unsigned int top_divtable[_LV_END];
+};
+
+struct bus_opp_table {
+	unsigned int idx;
+	unsigned long clk;
+	unsigned long volt;
+};
+
+/* 4210 controls clock of mif and voltage of int */
+static struct bus_opp_table exynos4210_busclk_table[] = {
+	{LV_0, 400000, 1150000},
+	{LV_1, 267000, 1050000},
+	{LV_2, 133000, 1025000},
+	{0, 0, 0},
+};
+
+/*
+ * MIF is the main control knob clock for exynox4x12 MIF/INT
+ * clock and voltage of both mif/int are controlled.
+ */
+static struct bus_opp_table exynos4x12_mifclk_table[] = {
+	{LV_0, 400000, 1100000},
+	{LV_1, 267000, 1000000},
+	{LV_2, 160000, 950000},
+	{LV_3, 133000, 950000},
+	{LV_4, 100000, 950000},
+	{0, 0, 0},
+};
+
+/*
+ * INT is not the control knob of 4x12. LV_x is not meant to represent
+ * the current performance. (MIF does)
+ */
+static struct bus_opp_table exynos4x12_intclk_table[] = {
+	{LV_0, 200000, 1000000},
+	{LV_1, 160000, 950000},
+	{LV_2, 133000, 925000},
+	{LV_3, 100000, 900000},
+	{0, 0, 0},
+};
+
+/* TODO: asv volt definitions are "__initdata"? */
+/* Some chips have different operating voltages */
+static unsigned int exynos4210_asv_volt[][EX4210_LV_NUM] = {
+	{1150000, 1050000, 1050000},
+	{1125000, 1025000, 1025000},
+	{1100000, 1000000, 1000000},
+	{1075000, 975000, 975000},
+	{1050000, 950000, 950000},
+};
+
+static unsigned int exynos4x12_mif_step_50[][EX4x12_LV_NUM] = {
+	/* 400      267     160     133     100 */
+	{1050000, 950000, 900000, 900000, 900000}, /* ASV0 */
+	{1050000, 950000, 900000, 900000, 900000}, /* ASV1 */
+	{1050000, 950000, 900000, 900000, 900000}, /* ASV2 */
+	{1050000, 900000, 900000, 900000, 900000}, /* ASV3 */
+	{1050000, 900000, 900000, 900000, 850000}, /* ASV4 */
+	{1050000, 900000, 900000, 850000, 850000}, /* ASV5 */
+	{1050000, 900000, 850000, 850000, 850000}, /* ASV6 */
+	{1050000, 900000, 850000, 850000, 850000}, /* ASV7 */
+	{1050000, 900000, 850000, 850000, 850000}, /* ASV8 */
+};
+
+static unsigned int exynos4x12_int_volt[][EX4x12_LV_NUM] = {
+	/* 200    160      133     100 */
+	{1000000, 950000, 925000, 900000}, /* ASV0 */
+	{975000,  925000, 925000, 900000}, /* ASV1 */
+	{950000,  925000, 900000, 875000}, /* ASV2 */
+	{950000,  900000, 900000, 875000}, /* ASV3 */
+	{925000,  875000, 875000, 875000}, /* ASV4 */
+	{900000,  850000, 850000, 850000}, /* ASV5 */
+	{900000,  850000, 850000, 850000}, /* ASV6 */
+	{900000,  850000, 850000, 850000}, /* ASV7 */
+	{900000,  850000, 850000, 850000}, /* ASV8 */
+};
+
+/*** Clock Divider Data for Exynos4210 ***/
+static unsigned int exynos4210_clkdiv_dmc0[][8] = {
+	/*
+	 * Clock divider value for following
+	 * { DIVACP, DIVACP_PCLK, DIVDPHY, DIVDMC, DIVDMCD
+	 *		DIVDMCP, DIVCOPY2, DIVCORE_TIMERS }
+	 */
+
+	/* DMC L0: 400MHz */
+	{ 3, 1, 1, 1, 1, 1, 3, 1 },
+	/* DMC L1: 266.7MHz */
+	{ 4, 1, 1, 2, 1, 1, 3, 1 },
+	/* DMC L2: 133MHz */
+	{ 5, 1, 1, 5, 1, 1, 3, 1 },
+};
+static unsigned int exynos4210_clkdiv_top[][5] = {
+	/*
+	 * Clock divider value for following
+	 * { DIVACLK200, DIVACLK100, DIVACLK160, DIVACLK133, DIVONENAND }
+	 */
+	/* ACLK200 L0: 200MHz */
+	{ 3, 7, 4, 5, 1 },
+	/* ACLK200 L1: 160MHz */
+	{ 4, 7, 5, 6, 1 },
+	/* ACLK200 L2: 133MHz */
+	{ 5, 7, 7, 7, 1 },
+};
+static unsigned int exynos4210_clkdiv_lr_bus[][2] = {
+	/*
+	 * Clock divider value for following
+	 * { DIVGDL/R, DIVGPL/R }
+	 */
+	/* ACLK_GDL/R L1: 200MHz */
+	{ 3, 1 },
+	/* ACLK_GDL/R L2: 160MHz */
+	{ 4, 1 },
+	/* ACLK_GDL/R L3: 133MHz */
+	{ 5, 1 },
+};
+
+/*** Clock Divider Data for Exynos4212/4412 ***/
+static unsigned int exynos4x12_clkdiv_dmc0[][6] = {
+	/*
+	 * Clock divider value for following
+	 * { DIVACP, DIVACP_PCLK, DIVDPHY, DIVDMC, DIVDMCD
+	 *              DIVDMCP}
+	 */
+
+	/* DMC L0: 400MHz */
+	{3, 1, 1, 1, 1, 1},
+	/* DMC L1: 266.7MHz */
+	{4, 1, 1, 2, 1, 1},
+	/* DMC L2: 160MHz */
+	{5, 1, 1, 4, 1, 1},
+	/* DMC L3: 133MHz */
+	{5, 1, 1, 5, 1, 1},
+	/* DMC L4: 100MHz */
+	{7, 1, 1, 7, 1, 1},
+};
+static unsigned int exynos4x12_clkdiv_dmc1[][6] = {
+	/*
+	 * Clock divider value for following
+	 * { G2DACP, DIVC2C, DIVC2C_ACLK }
+	 */
+
+	/* DMC L0: 400MHz */
+	{3, 1, 1},
+	/* DMC L1: 266.7MHz */
+	{4, 2, 1},
+	/* DMC L2: 160MHz */
+	{5, 4, 1},
+	/* DMC L3: 133MHz */
+	{5, 5, 1},
+	/* DMC L4: 100MHz */
+	{7, 7, 1},
+};
+static unsigned int exynos4x12_clkdiv_top[][5] = {
+	/*
+	 * Clock divider value for following
+	 * { DIVACLK266_GPS, DIVACLK100, DIVACLK160,
+		DIVACLK133, DIVONENAND }
+	 */
+
+	/* ACLK_GDL/R L0: 200MHz */
+	{2, 7, 4, 5, 1},
+	/* ACLK_GDL/R L1: 200MHz */
+	{2, 7, 4, 5, 1},
+	/* ACLK_GDL/R L2: 160MHz */
+	{4, 7, 5, 7, 1},
+	/* ACLK_GDL/R L3: 133MHz */
+	{4, 7, 5, 7, 1},
+	/* ACLK_GDL/R L4: 100MHz */
+	{7, 7, 7, 7, 1},
+};
+static unsigned int exynos4x12_clkdiv_lr_bus[][2] = {
+	/*
+	 * Clock divider value for following
+	 * { DIVGDL/R, DIVGPL/R }
+	 */
+
+	/* ACLK_GDL/R L0: 200MHz */
+	{3, 1},
+	/* ACLK_GDL/R L1: 200MHz */
+	{3, 1},
+	/* ACLK_GDL/R L2: 160MHz */
+	{4, 1},
+	/* ACLK_GDL/R L3: 133MHz */
+	{5, 1},
+	/* ACLK_GDL/R L4: 100MHz */
+	{7, 1},
+};
+static unsigned int exynos4x12_clkdiv_sclkip[][3] = {
+	/*
+	 * Clock divider value for following
+	 * { DIVMFC, DIVJPEG, DIVFIMC0~3}
+	 */
+
+	/* SCLK_MFC: 200MHz */
+	{3, 3, 4},
+	/* SCLK_MFC: 200MHz */
+	{3, 3, 4},
+	/* SCLK_MFC: 160MHz */
+	{4, 4, 5},
+	/* SCLK_MFC: 133MHz */
+	{5, 5, 5},
+	/* SCLK_MFC: 100MHz */
+	{7, 7, 7},
+};
+
+
+static int exynos4210_set_busclk(struct busfreq_data *data, struct opp *opp)
+{
+	unsigned int index;
+	unsigned int tmp;
+
+	for (index = LV_0; index < EX4210_LV_NUM; index++)
+		if (opp_get_freq(opp) == exynos4210_busclk_table[index].clk)
+			break;
+
+	if (index == EX4210_LV_NUM)
+		return -EINVAL;
+
+	/* Change Divider - DMC0 */
+	tmp = data->dmc_divtable[index];
+
+	__raw_writel(tmp, S5P_CLKDIV_DMC0);
+
+	do {
+		tmp = __raw_readl(S5P_CLKDIV_STAT_DMC0);
+	} while (tmp & 0x11111111);
+
+	/* Change Divider - TOP */
+	tmp = data->top_divtable[index];
+
+	__raw_writel(tmp, S5P_CLKDIV_TOP);
+
+	do {
+		tmp = __raw_readl(S5P_CLKDIV_STAT_TOP);
+	} while (tmp & 0x11111);
+
+	/* Change Divider - LEFTBUS */
+	tmp = __raw_readl(S5P_CLKDIV_LEFTBUS);
+
+	tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK);
+
+	tmp |= ((exynos4210_clkdiv_lr_bus[index][0] <<
+				S5P_CLKDIV_BUS_GDLR_SHIFT) |
+		(exynos4210_clkdiv_lr_bus[index][1] <<
+				S5P_CLKDIV_BUS_GPLR_SHIFT));
+
+	__raw_writel(tmp, S5P_CLKDIV_LEFTBUS);
+
+	do {
+		tmp = __raw_readl(S5P_CLKDIV_STAT_LEFTBUS);
+	} while (tmp & 0x11);
+
+	/* Change Divider - RIGHTBUS */
+	tmp = __raw_readl(S5P_CLKDIV_RIGHTBUS);
+
+	tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK);
+
+	tmp |= ((exynos4210_clkdiv_lr_bus[index][0] <<
+				S5P_CLKDIV_BUS_GDLR_SHIFT) |
+		(exynos4210_clkdiv_lr_bus[index][1] <<
+				S5P_CLKDIV_BUS_GPLR_SHIFT));
+
+	__raw_writel(tmp, S5P_CLKDIV_RIGHTBUS);
+
+	do {
+		tmp = __raw_readl(S5P_CLKDIV_STAT_RIGHTBUS);
+	} while (tmp & 0x11);
+
+	return 0;
+}
+
+static int exynos4x12_set_busclk(struct busfreq_data *data, struct opp *opp)
+{
+	unsigned int index;
+	unsigned int tmp;
+
+	for (index = LV_0; index < EX4x12_LV_NUM; index++)
+		if (opp_get_freq(opp) == exynos4x12_mifclk_table[index].clk)
+			break;
+
+	if (index == EX4x12_LV_NUM)
+		return -EINVAL;
+
+	/* Change Divider - DMC0 */
+	tmp = data->dmc_divtable[index];
+
+	__raw_writel(tmp, S5P_CLKDIV_DMC0);
+
+	do {
+		tmp = __raw_readl(S5P_CLKDIV_STAT_DMC0);
+	} while (tmp & 0x11111111);
+
+	/* Change Divider - DMC1 */
+	tmp = __raw_readl(S5P_CLKDIV_DMC1);
+
+	tmp &= ~(S5P_CLKDIV_DMC1_G2D_ACP_MASK |
+		S5P_CLKDIV_DMC1_C2C_MASK |
+		S5P_CLKDIV_DMC1_C2CACLK_MASK);
+
+	tmp |= ((exynos4x12_clkdiv_dmc1[index][0] <<
+				S5P_CLKDIV_DMC1_G2D_ACP_SHIFT) |
+		(exynos4x12_clkdiv_dmc1[index][1] <<
+				S5P_CLKDIV_DMC1_C2C_SHIFT) |
+		(exynos4x12_clkdiv_dmc1[index][2] <<
+				S5P_CLKDIV_DMC1_C2CACLK_SHIFT));
+
+	__raw_writel(tmp, S5P_CLKDIV_DMC1);
+
+	do {
+		tmp = __raw_readl(S5P_CLKDIV_STAT_DMC1);
+	} while (tmp & 0x111111);
+
+	/* Change Divider - TOP */
+	tmp = __raw_readl(S5P_CLKDIV_TOP);
+
+	tmp &= ~(S5P_CLKDIV_TOP_ACLK266_GPS_MASK |
+		S5P_CLKDIV_TOP_ACLK100_MASK |
+		S5P_CLKDIV_TOP_ACLK160_MASK |
+		S5P_CLKDIV_TOP_ACLK133_MASK |
+		S5P_CLKDIV_TOP_ONENAND_MASK);
+
+	tmp |= ((exynos4x12_clkdiv_top[index][0] <<
+				S5P_CLKDIV_TOP_ACLK266_GPS_SHIFT) |
+		(exynos4x12_clkdiv_top[index][1] <<
+				S5P_CLKDIV_TOP_ACLK100_SHIFT) |
+		(exynos4x12_clkdiv_top[index][2] <<
+				S5P_CLKDIV_TOP_ACLK160_SHIFT) |
+		(exynos4x12_clkdiv_top[index][3] <<
+				S5P_CLKDIV_TOP_ACLK133_SHIFT) |
+		(exynos4x12_clkdiv_top[index][4] <<
+				S5P_CLKDIV_TOP_ONENAND_SHIFT));
+
+	__raw_writel(tmp, S5P_CLKDIV_TOP);
+
+	do {
+		tmp = __raw_readl(S5P_CLKDIV_STAT_TOP);
+	} while (tmp & 0x11111);
+
+	/* Change Divider - LEFTBUS */
+	tmp = __raw_readl(S5P_CLKDIV_LEFTBUS);
+
+	tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK);
+
+	tmp |= ((exynos4x12_clkdiv_lr_bus[index][0] <<
+				S5P_CLKDIV_BUS_GDLR_SHIFT) |
+		(exynos4x12_clkdiv_lr_bus[index][1] <<
+				S5P_CLKDIV_BUS_GPLR_SHIFT));
+
+	__raw_writel(tmp, S5P_CLKDIV_LEFTBUS);
+
+	do {
+		tmp = __raw_readl(S5P_CLKDIV_STAT_LEFTBUS);
+	} while (tmp & 0x11);
+
+	/* Change Divider - RIGHTBUS */
+	tmp = __raw_readl(S5P_CLKDIV_RIGHTBUS);
+
+	tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK);
+
+	tmp |= ((exynos4x12_clkdiv_lr_bus[index][0] <<
+				S5P_CLKDIV_BUS_GDLR_SHIFT) |
+		(exynos4x12_clkdiv_lr_bus[index][1] <<
+				S5P_CLKDIV_BUS_GPLR_SHIFT));
+
+	__raw_writel(tmp, S5P_CLKDIV_RIGHTBUS);
+
+	do {
+		tmp = __raw_readl(S5P_CLKDIV_STAT_RIGHTBUS);
+	} while (tmp & 0x11);
+
+	/* Change Divider - MFC */
+	tmp = __raw_readl(S5P_CLKDIV_MFC);
+
+	tmp &= ~(S5P_CLKDIV_MFC_MASK);
+
+	tmp |= ((exynos4x12_clkdiv_sclkip[index][0] <<
+				S5P_CLKDIV_MFC_SHIFT));
+
+	__raw_writel(tmp, S5P_CLKDIV_MFC);
+
+	do {
+		tmp = __raw_readl(S5P_CLKDIV_STAT_MFC);
+	} while (tmp & 0x1);
+
+	/* Change Divider - JPEG */
+	tmp = __raw_readl(S5P_CLKDIV_CAM1);
+
+	tmp &= ~(S5P_CLKDIV_CAM1_JPEG_MASK);
+
+	tmp |= ((exynos4x12_clkdiv_sclkip[index][1] <<
+				S5P_CLKDIV_CAM1_JPEG_SHIFT));
+
+	__raw_writel(tmp, S5P_CLKDIV_CAM1);
+
+	do {
+		tmp = __raw_readl(S5P_CLKDIV_STAT_CAM1);
+	} while (tmp & 0x1);
+
+	/* Change Divider - FIMC0~3 */
+	tmp = __raw_readl(S5P_CLKDIV_CAM);
+
+	tmp &= ~(S5P_CLKDIV_CAM_FIMC0_MASK | S5P_CLKDIV_CAM_FIMC1_MASK |
+		S5P_CLKDIV_CAM_FIMC2_MASK | S5P_CLKDIV_CAM_FIMC3_MASK);
+
+	tmp |= ((exynos4x12_clkdiv_sclkip[index][2] <<
+				S5P_CLKDIV_CAM_FIMC0_SHIFT) |
+		(exynos4x12_clkdiv_sclkip[index][2] <<
+				S5P_CLKDIV_CAM_FIMC1_SHIFT) |
+		(exynos4x12_clkdiv_sclkip[index][2] <<
+				S5P_CLKDIV_CAM_FIMC2_SHIFT) |
+		(exynos4x12_clkdiv_sclkip[index][2] <<
+				S5P_CLKDIV_CAM_FIMC3_SHIFT));
+
+	__raw_writel(tmp, S5P_CLKDIV_CAM);
+
+	do {
+		tmp = __raw_readl(S5P_CLKDIV_STAT_CAM1);
+	} while (tmp & 0x1111);
+
+	return 0;
+}
+
+
+static void busfreq_mon_reset(struct busfreq_data *data)
+{
+	unsigned int i;
+
+	for (i = 0; i < 2; i++) {
+		void __iomem *ppmu_base = data->dmc[i].hw_base;
+
+		/* Reset PPMU */
+		__raw_writel(0x8000000f, ppmu_base + 0xf010);
+		__raw_writel(0x8000000f, ppmu_base + 0xf050);
+		__raw_writel(0x6, ppmu_base + 0xf000);
+		__raw_writel(0x0, ppmu_base + 0xf100);
+
+		/* Set PPMU Event */
+		data->dmc[i].event = 0x6;
+		__raw_writel(((data->dmc[i].event << 12) | 0x1),
+			     ppmu_base + 0xfc);
+
+		/* Start PPMU */
+		__raw_writel(0x1, ppmu_base + 0xf000);
+	}
+}
+
+static void exynos4_read_ppmu(struct busfreq_data *data)
+{
+	int i, j;
+
+	for (i = 0; i < 2; i++) {
+		void __iomem *ppmu_base = data->dmc[i].hw_base;
+		u32 overflow;
+
+		/* Stop PPMU */
+		__raw_writel(0x0, ppmu_base + 0xf000);
+
+		/* Update local data from PPMU */
+		overflow = __raw_readl(ppmu_base + 0xf050);
+
+		data->dmc[i].ccnt = __raw_readl(ppmu_base + 0xf100);
+		data->dmc[i].ccnt_overflow = overflow & (1 << 31);
+
+		for (j = 0; j < PPMU_PMNCNT_MAX; j++) {
+			data->dmc[i].count[j] = __raw_readl(
+					ppmu_base + (0xf110 + (0x10 * j)));
+			data->dmc[i].count_overflow[j] = overflow & (1 << j);
+		}
+	}
+
+	busfreq_mon_reset(data);
+}
+
+static int exynos4x12_get_intspec(unsigned long mifclk)
+{
+	int i = 0;
+
+	while (exynos4x12_intclk_table[i].clk) {
+		if (exynos4x12_intclk_table[i].clk <= mifclk)
+			return i;
+		i++;
+	}
+
+	return -EINVAL;
+}
+
+static int exynos4_bus_setvolt(struct busfreq_data *data, struct opp *opp,
+			       struct opp *oldopp)
+{
+	int err = 0, tmp;
+	unsigned long volt = opp_get_voltage(opp);
+
+	switch (data->type) {
+	case TYPE_BUSF_EXYNOS4210:
+		/* OPP represents DMC clock + INT voltage */
+		err = regulator_set_voltage(data->vdd_int, volt,
+					    MAX_SAFEVOLT);
+		break;
+	case TYPE_BUSF_EXYNOS4x12:
+		/* OPP represents MIF clock + MIF voltage */
+		err = regulator_set_voltage(data->vdd_mif, volt,
+					    MAX_SAFEVOLT);
+		if (err)
+			break;
+
+		tmp = exynos4x12_get_intspec(opp_get_freq(opp));
+		if (tmp < 0) {
+			err = tmp;
+			regulator_set_voltage(data->vdd_mif,
+					      opp_get_voltage(oldopp),
+					      MAX_SAFEVOLT);
+			break;
+		}
+		err = regulator_set_voltage(data->vdd_int,
+					    exynos4x12_intclk_table[tmp].volt,
+					    MAX_SAFEVOLT);
+		/*  Try to recover */
+		if (err)
+			regulator_set_voltage(data->vdd_mif,
+					      opp_get_voltage(oldopp),
+					      MAX_SAFEVOLT);
+		break;
+	default:
+		err = -EINVAL;
+	}
+
+	return err;
+}
+
+static int exynos4_bus_target(struct device *dev, unsigned long *_freq)
+{
+	int err = 0;
+	struct platform_device *pdev = container_of(dev, struct platform_device,
+						    dev);
+	struct busfreq_data *data = platform_get_drvdata(pdev);
+	struct opp *opp = devfreq_recommended_opp(dev, _freq);
+	unsigned long old_freq = opp_get_freq(data->curr_opp);
+	unsigned long freq = opp_get_freq(opp);
+
+	if (old_freq == freq)
+		return 0;
+
+	dev_dbg(dev, "targetting %lukHz %luuV\n", freq, opp_get_voltage(opp));
+
+	mutex_lock(&data->lock);
+
+	if (data->disabled)
+		goto out;
+
+	if (old_freq < freq)
+		err = exynos4_bus_setvolt(data, opp, data->curr_opp);
+	if (err)
+		goto out;
+
+	if (old_freq != freq) {
+		switch (data->type) {
+		case TYPE_BUSF_EXYNOS4210:
+			err = exynos4210_set_busclk(data, opp);
+			break;
+		case TYPE_BUSF_EXYNOS4x12:
+			err = exynos4x12_set_busclk(data, opp);
+			break;
+		default:
+			err = -EINVAL;
+		}
+	}
+	if (err)
+		goto out;
+
+	if (old_freq > freq)
+		err = exynos4_bus_setvolt(data, opp, data->curr_opp);
+	if (err)
+		goto out;
+
+	data->curr_opp = opp;
+out:
+	mutex_unlock(&data->lock);
+	return err;
+}
+
+static int exynos4_get_busier_dmc(struct busfreq_data *data)
+{
+	u64 p0 = data->dmc[0].count[0];
+	u64 p1 = data->dmc[1].count[0];
+
+	p0 *= data->dmc[1].ccnt;
+	p1 *= data->dmc[0].ccnt;
+
+	if (data->dmc[1].ccnt == 0)
+		return 0;
+
+	if (p0 > p1)
+		return 0;
+	return 1;
+}
+
+static int exynos4_bus_get_dev_status(struct device *dev,
+				      struct devfreq_dev_status *stat)
+{
+	struct platform_device *pdev = container_of(dev, struct platform_device,
+						    dev);
+	struct busfreq_data *data = platform_get_drvdata(pdev);
+	int busier_dmc;
+	int cycles_x2 = 2; /* 2 x cycles */
+	void __iomem *addr;
+	u32 timing;
+	u32 memctrl;
+
+	exynos4_read_ppmu(data);
+	busier_dmc = exynos4_get_busier_dmc(data);
+	stat->current_frequency = opp_get_freq(data->curr_opp);
+
+	if (busier_dmc)
+		addr = S5P_VA_DMC1;
+	else
+		addr = S5P_VA_DMC0;
+
+	memctrl = __raw_readl(addr + 0x04); /* one of DDR2/3/LPDDR2 */
+	timing = __raw_readl(addr + 0x38); /* CL or WL/RL values */
+
+	switch ((memctrl >> 8) & 0xf) {
+	case 0x4: /* DDR2 */
+		cycles_x2 = ((timing >> 16) & 0xf) * 2;
+		break;
+	case 0x5: /* LPDDR2 */
+	case 0x6: /* DDR3 */
+		cycles_x2 = ((timing >> 8) & 0xf) + ((timing >> 0) & 0xf);
+		break;
+	default:
+		pr_err("%s: Unknown Memory Type(%d).\n", __func__,
+		       (memctrl >> 8) & 0xf);
+		return -EINVAL;
+	}
+
+	/* Number of cycles spent on memory access */
+	stat->busy_time = data->dmc[busier_dmc].count[0] / 2 * (cycles_x2 + 2);
+	stat->busy_time *= 100 / BUS_SATURATION_RATIO;
+	stat->total_time = data->dmc[busier_dmc].ccnt;
+
+	/* If the counters have overflown, retry */
+	if (data->dmc[busier_dmc].ccnt_overflow ||
+	    data->dmc[busier_dmc].count_overflow[0])
+		return -EAGAIN;
+
+	return 0;
+}
+
+static void exynos4_bus_exit(struct device *dev)
+{
+	struct platform_device *pdev = container_of(dev, struct platform_device,
+						    dev);
+	struct busfreq_data *data = platform_get_drvdata(pdev);
+
+	devfreq_unregister_opp_notifier(dev, data->devfreq);
+}
+
+static struct devfreq_dev_profile exynos4_devfreq_profile = {
+	.initial_freq	= 400000,
+	.polling_ms	= 50,
+	.target		= exynos4_bus_target,
+	.get_dev_status	= exynos4_bus_get_dev_status,
+	.exit		= exynos4_bus_exit,
+};
+
+static int exynos4210_init_tables(struct busfreq_data *data)
+{
+	u32 tmp;
+	int mgrp;
+	int i, err = 0;
+
+	tmp = __raw_readl(S5P_CLKDIV_DMC0);
+	for (i = LV_0; i < EX4210_LV_NUM; i++) {
+		tmp &= ~(S5P_CLKDIV_DMC0_ACP_MASK |
+			S5P_CLKDIV_DMC0_ACPPCLK_MASK |
+			S5P_CLKDIV_DMC0_DPHY_MASK |
+			S5P_CLKDIV_DMC0_DMC_MASK |
+			S5P_CLKDIV_DMC0_DMCD_MASK |
+			S5P_CLKDIV_DMC0_DMCP_MASK |
+			S5P_CLKDIV_DMC0_COPY2_MASK |
+			S5P_CLKDIV_DMC0_CORETI_MASK);
+
+		tmp |= ((exynos4210_clkdiv_dmc0[i][0] <<
+					S5P_CLKDIV_DMC0_ACP_SHIFT) |
+			(exynos4210_clkdiv_dmc0[i][1] <<
+					S5P_CLKDIV_DMC0_ACPPCLK_SHIFT) |
+			(exynos4210_clkdiv_dmc0[i][2] <<
+					S5P_CLKDIV_DMC0_DPHY_SHIFT) |
+			(exynos4210_clkdiv_dmc0[i][3] <<
+					S5P_CLKDIV_DMC0_DMC_SHIFT) |
+			(exynos4210_clkdiv_dmc0[i][4] <<
+					S5P_CLKDIV_DMC0_DMCD_SHIFT) |
+			(exynos4210_clkdiv_dmc0[i][5] <<
+					S5P_CLKDIV_DMC0_DMCP_SHIFT) |
+			(exynos4210_clkdiv_dmc0[i][6] <<
+					S5P_CLKDIV_DMC0_COPY2_SHIFT) |
+			(exynos4210_clkdiv_dmc0[i][7] <<
+					S5P_CLKDIV_DMC0_CORETI_SHIFT));
+
+		data->dmc_divtable[i] = tmp;
+	}
+
+	tmp = __raw_readl(S5P_CLKDIV_TOP);
+	for (i = LV_0; i <  EX4210_LV_NUM; i++) {
+		tmp &= ~(S5P_CLKDIV_TOP_ACLK200_MASK |
+			S5P_CLKDIV_TOP_ACLK100_MASK |
+			S5P_CLKDIV_TOP_ACLK160_MASK |
+			S5P_CLKDIV_TOP_ACLK133_MASK |
+			S5P_CLKDIV_TOP_ONENAND_MASK);
+
+		tmp |= ((exynos4210_clkdiv_top[i][0] <<
+					S5P_CLKDIV_TOP_ACLK200_SHIFT) |
+			(exynos4210_clkdiv_top[i][1] <<
+					S5P_CLKDIV_TOP_ACLK100_SHIFT) |
+			(exynos4210_clkdiv_top[i][2] <<
+					S5P_CLKDIV_TOP_ACLK160_SHIFT) |
+			(exynos4210_clkdiv_top[i][3] <<
+					S5P_CLKDIV_TOP_ACLK133_SHIFT) |
+			(exynos4210_clkdiv_top[i][4] <<
+					S5P_CLKDIV_TOP_ONENAND_SHIFT));
+
+		data->top_divtable[i] = tmp;
+	}
+
+#ifdef CONFIG_EXYNOS_ASV
+	tmp = exynos4_result_of_asv;
+#else
+	tmp = 0; /* Max voltages for the reliability of the unknown */
+#endif
+
+	pr_debug("ASV Group of Exynos4 is %d\n", tmp);
+	/* Use merged grouping for voltage */
+	switch (tmp) {
+	case 0:
+		mgrp = 0;
+		break;
+	case 1:
+	case 2:
+		mgrp = 1;
+		break;
+	case 3:
+	case 4:
+		mgrp = 2;
+		break;
+	case 5:
+	case 6:
+		mgrp = 3;
+		break;
+	case 7:
+		mgrp = 4;
+		break;
+	default:
+		pr_warn("Unknown ASV Group. Use max voltage.\n");
+		mgrp = 0;
+	}
+
+	for (i = LV_0; i < EX4210_LV_NUM; i++)
+		exynos4210_busclk_table[i].volt = exynos4210_asv_volt[mgrp][i];
+
+	for (i = LV_0; i < EX4210_LV_NUM; i++) {
+		err = opp_add(data->dev, exynos4210_busclk_table[i].clk,
+			      exynos4210_busclk_table[i].volt);
+		if (err) {
+			dev_err(data->dev, "Cannot add opp entries.\n");
+			return err;
+		}
+	}
+
+
+	return 0;
+}
+
+static int exynos4x12_init_tables(struct busfreq_data *data)
+{
+	unsigned int i;
+	unsigned int tmp;
+	int ret;
+
+	/* Enable pause function for DREX2 DVFS */
+	tmp = __raw_readl(S5P_DMC_PAUSE_CTRL);
+	tmp |= DMC_PAUSE_ENABLE;
+	__raw_writel(tmp, S5P_DMC_PAUSE_CTRL);
+
+	tmp = __raw_readl(S5P_CLKDIV_DMC0);
+
+	for (i = 0; i <  EX4x12_LV_NUM; i++) {
+		tmp &= ~(S5P_CLKDIV_DMC0_ACP_MASK |
+			S5P_CLKDIV_DMC0_ACPPCLK_MASK |
+			S5P_CLKDIV_DMC0_DPHY_MASK |
+			S5P_CLKDIV_DMC0_DMC_MASK |
+			S5P_CLKDIV_DMC0_DMCD_MASK |
+			S5P_CLKDIV_DMC0_DMCP_MASK);
+
+		tmp |= ((exynos4x12_clkdiv_dmc0[i][0] <<
+					S5P_CLKDIV_DMC0_ACP_SHIFT) |
+			(exynos4x12_clkdiv_dmc0[i][1] <<
+					S5P_CLKDIV_DMC0_ACPPCLK_SHIFT) |
+			(exynos4x12_clkdiv_dmc0[i][2] <<
+					S5P_CLKDIV_DMC0_DPHY_SHIFT) |
+			(exynos4x12_clkdiv_dmc0[i][3] <<
+					S5P_CLKDIV_DMC0_DMC_SHIFT) |
+			(exynos4x12_clkdiv_dmc0[i][4] <<
+					S5P_CLKDIV_DMC0_DMCD_SHIFT) |
+			(exynos4x12_clkdiv_dmc0[i][5] <<
+					S5P_CLKDIV_DMC0_DMCP_SHIFT));
+
+		data->dmc_divtable[i] = tmp;
+	}
+
+#ifdef CONFIG_EXYNOS_ASV
+	tmp = exynos4_result_of_asv;
+#else
+	tmp = 0; /* Max voltages for the reliability of the unknown */
+#endif
+
+	if (tmp > 8)
+		tmp = 0;
+	pr_debug("ASV Group of Exynos4x12 is %d\n", tmp);
+
+	for (i = 0; i < EX4x12_LV_NUM; i++) {
+		exynos4x12_mifclk_table[i].volt =
+			exynos4x12_mif_step_50[tmp][i];
+		exynos4x12_intclk_table[i].volt =
+			exynos4x12_int_volt[tmp][i];
+	}
+
+	for (i = 0; i < EX4x12_LV_NUM; i++) {
+		ret = opp_add(data->dev, exynos4x12_mifclk_table[i].clk,
+			      exynos4x12_mifclk_table[i].volt);
+		if (ret) {
+			dev_err(data->dev, "Fail to add opp entries.\n");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int exynos4_busfreq_pm_notifier_event(struct notifier_block *this,
+		unsigned long event, void *ptr)
+{
+	struct busfreq_data *data = container_of(this, struct busfreq_data,
+						 pm_notifier);
+	struct opp *opp;
+	unsigned long maxfreq = ULONG_MAX;
+	int err = 0;
+
+	switch (event) {
+	case PM_SUSPEND_PREPARE:
+		/* Set Fastest and Deactivate DVFS */
+		mutex_lock(&data->lock);
+
+		data->disabled = true;
+
+		opp = opp_find_freq_floor(data->dev, &maxfreq);
+
+		err = exynos4_bus_setvolt(data, opp, data->curr_opp);
+		if (err)
+			goto unlock;
+
+		switch (data->type) {
+		case TYPE_BUSF_EXYNOS4210:
+			err = exynos4210_set_busclk(data, opp);
+			break;
+		case TYPE_BUSF_EXYNOS4x12:
+			err = exynos4x12_set_busclk(data, opp);
+			break;
+		default:
+			err = -EINVAL;
+		}
+		if (err)
+			goto unlock;
+
+		data->curr_opp = opp;
+unlock:
+		mutex_unlock(&data->lock);
+		if (err)
+			return err;
+		return NOTIFY_OK;
+	case PM_POST_RESTORE:
+	case PM_POST_SUSPEND:
+		/* Reactivate */
+		mutex_lock(&data->lock);
+		data->disabled = false;
+		mutex_unlock(&data->lock);
+		return NOTIFY_OK;
+	}
+
+	return NOTIFY_DONE;
+}
+
+static __devinit int exynos4_busfreq_probe(struct platform_device *pdev)
+{
+	struct busfreq_data *data;
+	struct opp *opp;
+	struct device *dev = &pdev->dev;
+	int err = 0;
+
+	data = kzalloc(sizeof(struct busfreq_data), GFP_KERNEL);
+	if (data == NULL) {
+		dev_err(dev, "Cannot allocate memory.\n");
+		return -ENOMEM;
+	}
+
+	data->type = pdev->id_entry->driver_data;
+	data->dmc[0].hw_base = S5P_VA_DMC0;
+	data->dmc[1].hw_base = S5P_VA_DMC1;
+	data->pm_notifier.notifier_call = exynos4_busfreq_pm_notifier_event;
+	data->dev = dev;
+	mutex_init(&data->lock);
+
+	switch (data->type) {
+	case TYPE_BUSF_EXYNOS4210:
+		err = exynos4210_init_tables(data);
+		break;
+	case TYPE_BUSF_EXYNOS4x12:
+		err = exynos4x12_init_tables(data);
+		break;
+	default:
+		dev_err(dev, "Cannot determine the device id %d\n", data->type);
+		err = -EINVAL;
+	}
+	if (err)
+		goto err_regulator;
+
+	data->vdd_int = regulator_get(dev, "vdd_int");
+	if (IS_ERR(data->vdd_int)) {
+		dev_err(dev, "Cannot get the regulator \"vdd_int\"\n");
+		err = PTR_ERR(data->vdd_int);
+		goto err_regulator;
+	}
+	if (data->type == TYPE_BUSF_EXYNOS4x12) {
+		data->vdd_mif = regulator_get(dev, "vdd_mif");
+		if (IS_ERR(data->vdd_mif)) {
+			dev_err(dev, "Cannot get the regulator \"vdd_mif\"\n");
+			err = PTR_ERR(data->vdd_mif);
+			regulator_put(data->vdd_int);
+			goto err_regulator;
+
+		}
+	}
+
+	opp = opp_find_freq_floor(dev, &exynos4_devfreq_profile.initial_freq);
+	if (IS_ERR(opp)) {
+		dev_err(dev, "Invalid initial frequency %lu kHz.\n",
+		       exynos4_devfreq_profile.initial_freq);
+		err = PTR_ERR(opp);
+		goto err_opp_add;
+	}
+	data->curr_opp = opp;
+
+	platform_set_drvdata(pdev, data);
+
+	busfreq_mon_reset(data);
+
+	data->devfreq = devfreq_add_device(dev, &exynos4_devfreq_profile,
+					   &devfreq_simple_ondemand, NULL);
+	if (IS_ERR(data->devfreq)) {
+		err = PTR_ERR(data->devfreq);
+		goto err_opp_add;
+	}
+
+	devfreq_register_opp_notifier(dev, data->devfreq);
+
+	err = register_pm_notifier(&data->pm_notifier);
+	if (err) {
+		dev_err(dev, "Failed to setup pm notifier\n");
+		goto err_devfreq_add;
+	}
+
+	return 0;
+err_devfreq_add:
+	devfreq_remove_device(data->devfreq);
+err_opp_add:
+	if (data->vdd_mif)
+		regulator_put(data->vdd_mif);
+	regulator_put(data->vdd_int);
+err_regulator:
+	kfree(data);
+	return err;
+}
+
+static __devexit int exynos4_busfreq_remove(struct platform_device *pdev)
+{
+	struct busfreq_data *data = platform_get_drvdata(pdev);
+
+	unregister_pm_notifier(&data->pm_notifier);
+	devfreq_remove_device(data->devfreq);
+	regulator_put(data->vdd_int);
+	if (data->vdd_mif)
+		regulator_put(data->vdd_mif);
+	kfree(data);
+
+	return 0;
+}
+
+static int exynos4_busfreq_resume(struct device *dev)
+{
+	struct platform_device *pdev = container_of(dev, struct platform_device,
+						    dev);
+	struct busfreq_data *data = platform_get_drvdata(pdev);
+
+	busfreq_mon_reset(data);
+	return 0;
+}
+
+static const struct dev_pm_ops exynos4_busfreq_pm = {
+	.resume	= exynos4_busfreq_resume,
+};
+
+static const struct platform_device_id exynos4_busfreq_id[] = {
+	{ "exynos4210-busfreq", TYPE_BUSF_EXYNOS4210 },
+	{ "exynos4412-busfreq", TYPE_BUSF_EXYNOS4x12 },
+	{ "exynos4212-busfreq", TYPE_BUSF_EXYNOS4x12 },
+	{ },
+};
+
+static struct platform_driver exynos4_busfreq_driver = {
+	.probe	= exynos4_busfreq_probe,
+	.remove	= __devexit_p(exynos4_busfreq_remove),
+	.id_table = exynos4_busfreq_id,
+	.driver = {
+		.name	= "exynos4-busfreq",
+		.owner	= THIS_MODULE,
+		.pm	= &exynos4_busfreq_pm,
+	},
+};
+
+static int __init exynos4_busfreq_init(void)
+{
+	return platform_driver_register(&exynos4_busfreq_driver);
+}
+late_initcall(exynos4_busfreq_init);
+
+static void __exit exynos4_busfreq_exit(void)
+{
+	platform_driver_unregister(&exynos4_busfreq_driver);
+}
+module_exit(exynos4_busfreq_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("EXYNOS4 busfreq driver with devfreq framework");
+MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
+MODULE_ALIAS("exynos4-busfreq");
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index ab8f469..5a99bb3 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -124,7 +124,7 @@
 
 config MX3_IPU
 	bool "MX3x Image Processing Unit support"
-	depends on ARCH_MX3
+	depends on SOC_IMX31 || SOC_IMX35
 	select DMA_ENGINE
 	default y
 	help
@@ -216,7 +216,7 @@
 
 config IMX_SDMA
 	tristate "i.MX SDMA support"
-	depends on ARCH_MX25 || ARCH_MX3 || ARCH_MX5
+	depends on ARCH_MX25 || SOC_IMX31 || SOC_IMX35 || ARCH_MX5
 	select DMA_ENGINE
 	help
 	  Support the i.MX SDMA engine. This engine is integrated into
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index b7cbd1a..0698695 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -2054,6 +2054,8 @@
 	{ 0, 0 },
 };
 
+MODULE_DEVICE_TABLE(amba, pl08x_ids);
+
 static struct amba_driver pl08x_amba_driver = {
 	.drv.name	= DRIVER_NAME,
 	.id_table	= pl08x_ids,
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index eb1d864..2b8661b 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -214,9 +214,18 @@
 	return error_count;
 }
 
-static void dmatest_callback(void *completion)
+/* poor man's completion - we want to use wait_event_freezable() on it */
+struct dmatest_done {
+	bool			done;
+	wait_queue_head_t	*wait;
+};
+
+static void dmatest_callback(void *arg)
 {
-	complete(completion);
+	struct dmatest_done *done = arg;
+
+	done->done = true;
+	wake_up_all(done->wait);
 }
 
 /*
@@ -235,7 +244,9 @@
  */
 static int dmatest_func(void *data)
 {
+	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait);
 	struct dmatest_thread	*thread = data;
+	struct dmatest_done	done = { .wait = &done_wait };
 	struct dma_chan		*chan;
 	const char		*thread_name;
 	unsigned int		src_off, dst_off, len;
@@ -252,7 +263,7 @@
 	int			i;
 
 	thread_name = current->comm;
-	set_freezable_with_signal();
+	set_freezable();
 
 	ret = -ENOMEM;
 
@@ -306,9 +317,6 @@
 		struct dma_async_tx_descriptor *tx = NULL;
 		dma_addr_t dma_srcs[src_cnt];
 		dma_addr_t dma_dsts[dst_cnt];
-		struct completion cmp;
-		unsigned long start, tmo, end = 0 /* compiler... */;
-		bool reload = true;
 		u8 align = 0;
 
 		total_tests++;
@@ -391,9 +399,9 @@
 			continue;
 		}
 
-		init_completion(&cmp);
+		done.done = false;
 		tx->callback = dmatest_callback;
-		tx->callback_param = &cmp;
+		tx->callback_param = &done;
 		cookie = tx->tx_submit(tx);
 
 		if (dma_submit_error(cookie)) {
@@ -407,20 +415,20 @@
 		}
 		dma_async_issue_pending(chan);
 
-		do {
-			start = jiffies;
-			if (reload)
-				end = start + msecs_to_jiffies(timeout);
-			else if (end <= start)
-				end = start + 1;
-			tmo = wait_for_completion_interruptible_timeout(&cmp,
-								end - start);
-			reload = try_to_freeze();
-		} while (tmo == -ERESTARTSYS);
+		wait_event_freezable_timeout(done_wait, done.done,
+					     msecs_to_jiffies(timeout));
 
 		status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
 
-		if (tmo == 0) {
+		if (!done.done) {
+			/*
+			 * We're leaving the timed out dma operation with
+			 * dangling pointer to done_wait.  To make this
+			 * correct, we'll need to allocate wait_done for
+			 * each test iteration and perform "who's gonna
+			 * free it this time?" dancing.  For now, just
+			 * leave it dangling.
+			 */
 			pr_warning("%s: #%u: test timed out\n",
 				   thread_name, total_tests - 1);
 			failed_tests++;
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 57104147..2d8d1b0 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -990,6 +990,8 @@
 	{ 0, 0 },
 };
 
+MODULE_DEVICE_TABLE(amba, pl330_ids);
+
 #ifdef CONFIG_PM_RUNTIME
 static int pl330_runtime_suspend(struct device *dev)
 {
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h
index fe90cd4..e48ab31 100644
--- a/drivers/edac/edac_core.h
+++ b/drivers/edac/edac_core.h
@@ -32,7 +32,6 @@
 #include <linux/completion.h>
 #include <linux/kobject.h>
 #include <linux/platform_device.h>
-#include <linux/sysdev.h>
 #include <linux/workqueue.h>
 #include <linux/edac.h>
 
@@ -243,8 +242,8 @@
 	 */
 	struct edac_dev_sysfs_attribute *sysfs_attributes;
 
-	/* pointer to main 'edac' class in sysfs */
-	struct sysdev_class *edac_class;
+	/* pointer to main 'edac' subsys in sysfs */
+	struct bus_type *edac_subsys;
 
 	/* the internal state of this controller instance */
 	int op_state;
@@ -342,7 +341,7 @@
 
 	int pci_idx;
 
-	struct sysdev_class *edac_class;	/* pointer to class */
+	struct bus_type *edac_subsys;	/* pointer to subsystem */
 
 	/* the internal state of this controller instance */
 	int op_state;
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
index c3f6743..4b15459 100644
--- a/drivers/edac/edac_device.c
+++ b/drivers/edac/edac_device.c
@@ -23,7 +23,6 @@
 #include <linux/jiffies.h>
 #include <linux/spinlock.h>
 #include <linux/list.h>
-#include <linux/sysdev.h>
 #include <linux/ctype.h>
 #include <linux/workqueue.h>
 #include <asm/uaccess.h>
diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c
index 86649df..b4ea185 100644
--- a/drivers/edac/edac_device_sysfs.c
+++ b/drivers/edac/edac_device_sysfs.c
@@ -1,5 +1,5 @@
 /*
- * file for managing the edac_device class of devices for EDAC
+ * file for managing the edac_device subsystem of devices for EDAC
  *
  * (C) 2007 SoftwareBitMaker 
  *
@@ -230,21 +230,21 @@
  */
 int edac_device_register_sysfs_main_kobj(struct edac_device_ctl_info *edac_dev)
 {
-	struct sysdev_class *edac_class;
+	struct bus_type *edac_subsys;
 	int err;
 
 	debugf1("%s()\n", __func__);
 
 	/* get the /sys/devices/system/edac reference */
-	edac_class = edac_get_sysfs_class();
-	if (edac_class == NULL) {
-		debugf1("%s() no edac_class error\n", __func__);
+	edac_subsys = edac_get_sysfs_subsys();
+	if (edac_subsys == NULL) {
+		debugf1("%s() no edac_subsys error\n", __func__);
 		err = -ENODEV;
 		goto err_out;
 	}
 
-	/* Point to the 'edac_class' this instance 'reports' to */
-	edac_dev->edac_class = edac_class;
+	/* Point to the 'edac_subsys' this instance 'reports' to */
+	edac_dev->edac_subsys = edac_subsys;
 
 	/* Init the devices's kobject */
 	memset(&edac_dev->kobj, 0, sizeof(struct kobject));
@@ -261,7 +261,7 @@
 
 	/* register */
 	err = kobject_init_and_add(&edac_dev->kobj, &ktype_device_ctrl,
-				   &edac_class->kset.kobj,
+				   &edac_subsys->dev_root->kobj,
 				   "%s", edac_dev->name);
 	if (err) {
 		debugf1("%s()Failed to register '.../edac/%s'\n",
@@ -284,7 +284,7 @@
 	module_put(edac_dev->owner);
 
 err_mod_get:
-	edac_put_sysfs_class();
+	edac_put_sysfs_subsys();
 
 err_out:
 	return err;
@@ -308,7 +308,7 @@
 	 *   b) 'kfree' the memory
 	 */
 	kobject_put(&dev->kobj);
-	edac_put_sysfs_class();
+	edac_put_sysfs_subsys();
 }
 
 /* edac_dev -> instance information */
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index d69144a..ca6c04d 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -25,7 +25,6 @@
 #include <linux/jiffies.h>
 #include <linux/spinlock.h>
 #include <linux/list.h>
-#include <linux/sysdev.h>
 #include <linux/ctype.h>
 #include <linux/edac.h>
 #include <asm/uaccess.h>
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 29ffa35..d56e634 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -1021,19 +1021,19 @@
 int edac_sysfs_setup_mc_kset(void)
 {
 	int err = -EINVAL;
-	struct sysdev_class *edac_class;
+	struct bus_type *edac_subsys;
 
 	debugf1("%s()\n", __func__);
 
-	/* get the /sys/devices/system/edac class reference */
-	edac_class = edac_get_sysfs_class();
-	if (edac_class == NULL) {
-		debugf1("%s() no edac_class error=%d\n", __func__, err);
+	/* get the /sys/devices/system/edac subsys reference */
+	edac_subsys = edac_get_sysfs_subsys();
+	if (edac_subsys == NULL) {
+		debugf1("%s() no edac_subsys error=%d\n", __func__, err);
 		goto fail_out;
 	}
 
 	/* Init the MC's kobject */
-	mc_kset = kset_create_and_add("mc", NULL, &edac_class->kset.kobj);
+	mc_kset = kset_create_and_add("mc", NULL, &edac_subsys->dev_root->kobj);
 	if (!mc_kset) {
 		err = -ENOMEM;
 		debugf1("%s() Failed to register '.../edac/mc'\n", __func__);
@@ -1045,7 +1045,7 @@
 	return 0;
 
 fail_kset:
-	edac_put_sysfs_class();
+	edac_put_sysfs_subsys();
 
 fail_out:
 	return err;
@@ -1059,6 +1059,6 @@
 void edac_sysfs_teardown_mc_kset(void)
 {
 	kset_unregister(mc_kset);
-	edac_put_sysfs_class();
+	edac_put_sysfs_subsys();
 }
 
diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h
index 17aabb7..00f81b4 100644
--- a/drivers/edac/edac_module.h
+++ b/drivers/edac/edac_module.h
@@ -10,8 +10,6 @@
 #ifndef	__EDAC_MODULE_H__
 #define	__EDAC_MODULE_H__
 
-#include <linux/sysdev.h>
-
 #include "edac_core.h"
 
 /*
diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
index 2b378207..63af1c5 100644
--- a/drivers/edac/edac_pci.c
+++ b/drivers/edac/edac_pci.c
@@ -19,7 +19,6 @@
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <linux/list.h>
-#include <linux/sysdev.h>
 #include <linux/ctype.h>
 #include <linux/workqueue.h>
 #include <asm/uaccess.h>
diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
index 495198a..97f5064 100644
--- a/drivers/edac/edac_pci_sysfs.c
+++ b/drivers/edac/edac_pci_sysfs.c
@@ -338,12 +338,12 @@
  * edac_pci_main_kobj_setup()
  *
  *	setup the sysfs for EDAC PCI attributes
- *	assumes edac_class has already been initialized
+ *	assumes edac_subsys has already been initialized
  */
 static int edac_pci_main_kobj_setup(void)
 {
 	int err;
-	struct sysdev_class *edac_class;
+	struct bus_type *edac_subsys;
 
 	debugf0("%s()\n", __func__);
 
@@ -354,9 +354,9 @@
 	/* First time, so create the main kobject and its
 	 * controls and attributes
 	 */
-	edac_class = edac_get_sysfs_class();
-	if (edac_class == NULL) {
-		debugf1("%s() no edac_class\n", __func__);
+	edac_subsys = edac_get_sysfs_subsys();
+	if (edac_subsys == NULL) {
+		debugf1("%s() no edac_subsys\n", __func__);
 		err = -ENODEV;
 		goto decrement_count_fail;
 	}
@@ -381,7 +381,7 @@
 	/* Instanstiate the pci object */
 	err = kobject_init_and_add(edac_pci_top_main_kobj,
 				   &ktype_edac_pci_main_kobj,
-				   &edac_class->kset.kobj, "pci");
+				   &edac_subsys->dev_root->kobj, "pci");
 	if (err) {
 		debugf1("Failed to register '.../edac/pci'\n");
 		goto kobject_init_and_add_fail;
@@ -404,7 +404,7 @@
 	module_put(THIS_MODULE);
 
 mod_get_fail:
-	edac_put_sysfs_class();
+	edac_put_sysfs_subsys();
 
 decrement_count_fail:
 	/* if are on this error exit, nothing to tear down */
@@ -432,7 +432,7 @@
 			__func__);
 		kobject_put(edac_pci_top_main_kobj);
 	}
-	edac_put_sysfs_class();
+	edac_put_sysfs_subsys();
 }
 
 /*
diff --git a/drivers/edac/edac_stub.c b/drivers/edac/edac_stub.c
index 86ad2ee..670c448 100644
--- a/drivers/edac/edac_stub.c
+++ b/drivers/edac/edac_stub.c
@@ -26,7 +26,7 @@
 int edac_err_assert = 0;
 EXPORT_SYMBOL_GPL(edac_err_assert);
 
-static atomic_t edac_class_valid = ATOMIC_INIT(0);
+static atomic_t edac_subsys_valid = ATOMIC_INIT(0);
 
 /*
  * called to determine if there is an EDAC driver interested in
@@ -54,36 +54,37 @@
  * sysfs object: /sys/devices/system/edac
  *	need to export to other files
  */
-struct sysdev_class edac_class = {
+struct bus_type edac_subsys = {
 	.name = "edac",
+	.dev_name = "edac",
 };
-EXPORT_SYMBOL_GPL(edac_class);
+EXPORT_SYMBOL_GPL(edac_subsys);
 
 /* return pointer to the 'edac' node in sysfs */
-struct sysdev_class *edac_get_sysfs_class(void)
+struct bus_type *edac_get_sysfs_subsys(void)
 {
 	int err = 0;
 
-	if (atomic_read(&edac_class_valid))
+	if (atomic_read(&edac_subsys_valid))
 		goto out;
 
 	/* create the /sys/devices/system/edac directory */
-	err = sysdev_class_register(&edac_class);
+	err = subsys_system_register(&edac_subsys, NULL);
 	if (err) {
 		printk(KERN_ERR "Error registering toplevel EDAC sysfs dir\n");
 		return NULL;
 	}
 
 out:
-	atomic_inc(&edac_class_valid);
-	return &edac_class;
+	atomic_inc(&edac_subsys_valid);
+	return &edac_subsys;
 }
-EXPORT_SYMBOL_GPL(edac_get_sysfs_class);
+EXPORT_SYMBOL_GPL(edac_get_sysfs_subsys);
 
-void edac_put_sysfs_class(void)
+void edac_put_sysfs_subsys(void)
 {
 	/* last user unregisters it */
-	if (atomic_dec_and_test(&edac_class_valid))
-		sysdev_class_unregister(&edac_class);
+	if (atomic_dec_and_test(&edac_subsys_valid))
+		bus_unregister(&edac_subsys);
 }
-EXPORT_SYMBOL_GPL(edac_put_sysfs_class);
+EXPORT_SYMBOL_GPL(edac_put_sysfs_subsys);
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 70ad892..8568d9b 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -2234,7 +2234,7 @@
 	if (pvt->enable_scrub)
 		disable_sdram_scrub_setting(mci);
 
-	atomic_notifier_chain_unregister(&x86_mce_decoder_chain, &i7_mce_dec);
+	mce_unregister_decode_chain(&i7_mce_dec);
 
 	/* Disable EDAC polling */
 	i7core_pci_ctl_release(pvt);
@@ -2336,7 +2336,7 @@
 	/* DCLK for scrub rate setting */
 	pvt->dclk_freq = get_dclk_freq();
 
-	atomic_notifier_chain_register(&x86_mce_decoder_chain, &i7_mce_dec);
+	mce_register_decode_chain(&i7_mce_dec);
 
 	return 0;
 
diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
index a5da732..4184e01 100644
--- a/drivers/edac/i82975x_edac.c
+++ b/drivers/edac/i82975x_edac.c
@@ -277,11 +277,9 @@
 static int i82975x_process_error_info(struct mem_ctl_info *mci,
 		struct i82975x_error_info *info, int handle_errors)
 {
-	int row, multi_chan, chan;
+	int row, chan;
 	unsigned long offst, page;
 
-	multi_chan = mci->csrows[0].nr_channels - 1;
-
 	if (!(info->errsts2 & 0x0003))
 		return 0;
 
@@ -294,20 +292,30 @@
 	}
 
 	page = (unsigned long) info->eap;
-	if (info->xeap & 1)
-		page |= 0x100000000ul;
-	chan = page & 1;
 	page >>= 1;
-	offst = page & ((1 << PAGE_SHIFT) - 1);
-	page >>= PAGE_SHIFT;
+	if (info->xeap & 1)
+		page |= 0x80000000;
+	page >>= (PAGE_SHIFT - 1);
 	row = edac_mc_find_csrow_by_page(mci, page);
 
+	if (row == -1)	{
+		i82975x_mc_printk(mci, KERN_ERR, "error processing EAP:\n"
+			"\tXEAP=%u\n"
+			"\t EAP=0x%08x\n"
+			"\tPAGE=0x%08x\n",
+			(info->xeap & 1) ? 1 : 0, info->eap, (unsigned int) page);
+		return 0;
+	}
+	chan = (mci->csrows[row].nr_channels == 1) ? 0 : info->eap & 1;
+	offst = info->eap
+			& ((1 << PAGE_SHIFT) -
+				(1 << mci->csrows[row].grain));
+
 	if (info->errsts & 0x0002)
 		edac_mc_handle_ue(mci, page, offst , row, "i82975x UE");
 	else
 		edac_mc_handle_ce(mci, page, offst, info->derrsyn, row,
-				multi_chan ? chan : 0,
-				"i82975x CE");
+				chan, "i82975x CE");
 
 	return 1;
 }
@@ -410,7 +418,7 @@
 		csrow->last_page = cumul_size - 1;
 		csrow->nr_pages = cumul_size - last_cumul_size;
 		last_cumul_size = cumul_size;
-		csrow->grain = 1 << 6;	/* I82975X_EAP has 64B resolution */
+		csrow->grain = 1 << 7;	/* 128Byte cache-line resolution */
 		csrow->mtype = MEM_DDR2; /* I82975x supports only DDR2 */
 		csrow->dtype = i82975x_dram_type(mch_window, index);
 		csrow->edac_mode = EDAC_SECDED; /* only supported */
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index d0864d9..bd926ea 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -884,7 +884,7 @@
 
 	pr_info("MCE: In-kernel MCE decoding enabled.\n");
 
-	atomic_notifier_chain_register(&x86_mce_decoder_chain, &amd_mce_dec_nb);
+	mce_register_decode_chain(&amd_mce_dec_nb);
 
 	return 0;
 }
@@ -893,7 +893,7 @@
 #ifdef MODULE
 static void __exit mce_amd_exit(void)
 {
-	atomic_notifier_chain_unregister(&x86_mce_decoder_chain, &amd_mce_dec_nb);
+	mce_unregister_decode_chain(&amd_mce_dec_nb);
 	kfree(fam_ops);
 }
 
diff --git a/drivers/edac/mce_amd_inj.c b/drivers/edac/mce_amd_inj.c
index 73c3e26..885e8ad 100644
--- a/drivers/edac/mce_amd_inj.c
+++ b/drivers/edac/mce_amd_inj.c
@@ -11,7 +11,6 @@
  */
 
 #include <linux/kobject.h>
-#include <linux/sysdev.h>
 #include <linux/edac.h>
 #include <linux/module.h>
 #include <asm/mce.h>
@@ -116,14 +115,14 @@
 
 static int __init edac_init_mce_inject(void)
 {
-	struct sysdev_class *edac_class = NULL;
+	struct bus_type *edac_subsys = NULL;
 	int i, err = 0;
 
-	edac_class = edac_get_sysfs_class();
-	if (!edac_class)
+	edac_subsys = edac_get_sysfs_subsys();
+	if (!edac_subsys)
 		return -EINVAL;
 
-	mce_kobj = kobject_create_and_add("mce", &edac_class->kset.kobj);
+	mce_kobj = kobject_create_and_add("mce", &edac_subsys->dev_root->kobj);
 	if (!mce_kobj) {
 		printk(KERN_ERR "Error creating a mce kset.\n");
 		err = -ENOMEM;
@@ -147,7 +146,7 @@
 	kobject_del(mce_kobj);
 
 err_mce_kobj:
-	edac_put_sysfs_class();
+	edac_put_sysfs_subsys();
 
 	return err;
 }
@@ -161,7 +160,7 @@
 
 	kobject_del(mce_kobj);
 
-	edac_put_sysfs_class();
+	edac_put_sysfs_subsys();
 }
 
 module_init(edac_init_mce_inject);
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
index 3840096..fc75706 100644
--- a/drivers/edac/ppc4xx_edac.c
+++ b/drivers/edac/ppc4xx_edac.c
@@ -142,7 +142,7 @@
 
 /*
  * The ibm,sdram-4xx-ddr2 Device Control Registers (DCRs) are
- * indirectly acccessed and have a base and length defined by the
+ * indirectly accessed and have a base and length defined by the
  * device tree. The base can be anything; however, we expect the
  * length to be precisely two registers, the first for the address
  * window and the second for the data window.
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 7a402bf..1dc118d 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -1609,11 +1609,9 @@
 		mce->cpuvendor, mce->cpuid, mce->time,
 		mce->socketid, mce->apicid);
 
-#ifdef CONFIG_SMP
 	/* Only handle if it is the right mc controller */
 	if (cpu_data(mce->cpu).phys_proc_id != pvt->sbridge_dev->mc)
 		return NOTIFY_DONE;
-#endif
 
 	smp_rmb();
 	if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) {
@@ -1661,8 +1659,7 @@
 	debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n",
 		__func__, mci, &sbridge_dev->pdev[0]->dev);
 
-	atomic_notifier_chain_unregister(&x86_mce_decoder_chain,
-					 &sbridge_mce_dec);
+	mce_unregister_decode_chain(&sbridge_mce_dec);
 
 	/* Remove MC sysfs nodes */
 	edac_mc_del_mc(mci->dev);
@@ -1731,8 +1728,7 @@
 		goto fail0;
 	}
 
-	atomic_notifier_chain_register(&x86_mce_decoder_chain,
-				       &sbridge_mce_dec);
+	mce_register_decode_chain(&sbridge_mce_dec);
 	return 0;
 
 fail0:
diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
index b0a8117..d25599f 100644
--- a/drivers/firmware/efivars.c
+++ b/drivers/firmware/efivars.c
@@ -495,7 +495,8 @@
 	return 0;
 }
 
-static int efi_pstore_write(enum pstore_type_id type, u64 *id,
+static int efi_pstore_write(enum pstore_type_id type,
+		enum kmsg_dump_reason reason, u64 *id,
 		unsigned int part, size_t size, struct pstore_info *psi)
 {
 	char name[DUMP_NAME_LEN];
@@ -565,7 +566,7 @@
 static int efi_pstore_erase(enum pstore_type_id type, u64 id,
 			    struct pstore_info *psi)
 {
-	efi_pstore_write(type, &id, (unsigned int)id, 0, psi);
+	efi_pstore_write(type, 0, &id, (unsigned int)id, 0, psi);
 
 	return 0;
 }
@@ -587,7 +588,8 @@
 	return -1;
 }
 
-static int efi_pstore_write(enum pstore_type_id type, u64 *id,
+static int efi_pstore_write(enum pstore_type_id type,
+		enum kmsg_dump_reason reason, u64 *id,
 		unsigned int part, size_t size, struct pstore_info *psi)
 {
 	return 0;
diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
index c4e7c59..91ddf0f 100644
--- a/drivers/firmware/google/gsmi.c
+++ b/drivers/firmware/google/gsmi.c
@@ -345,7 +345,8 @@
 		memcpy(&param, gsmi_dev.param_buf->start, sizeof(param));
 
 		/* The size reported is the min of all of our buffers */
-		*data_size = min(*data_size, gsmi_dev.data_buf->length);
+		*data_size = min_t(unsigned long, *data_size,
+						gsmi_dev.data_buf->length);
 		*data_size = min_t(unsigned long, *data_size, param.data_len);
 
 		/* Copy data back to return buffer. */
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
index c811cb1..3ee852c 100644
--- a/drivers/firmware/iscsi_ibft.c
+++ b/drivers/firmware/iscsi_ibft.c
@@ -433,11 +433,11 @@
  * Helper routiners to check to determine if the entry is valid
  * in the proper iBFT structure.
  */
-static mode_t ibft_check_nic_for(void *data, int type)
+static umode_t ibft_check_nic_for(void *data, int type)
 {
 	struct ibft_kobject *entry = data;
 	struct ibft_nic *nic = entry->nic;
-	mode_t rc = 0;
+	umode_t rc = 0;
 
 	switch (type) {
 	case ISCSI_BOOT_ETH_INDEX:
@@ -488,11 +488,11 @@
 	return rc;
 }
 
-static mode_t __init ibft_check_tgt_for(void *data, int type)
+static umode_t __init ibft_check_tgt_for(void *data, int type)
 {
 	struct ibft_kobject *entry = data;
 	struct ibft_tgt *tgt = entry->tgt;
-	mode_t rc = 0;
+	umode_t rc = 0;
 
 	switch (type) {
 	case ISCSI_BOOT_TGT_INDEX:
@@ -524,11 +524,11 @@
 	return rc;
 }
 
-static mode_t __init ibft_check_initiator_for(void *data, int type)
+static umode_t __init ibft_check_initiator_for(void *data, int type)
 {
 	struct ibft_kobject *entry = data;
 	struct ibft_initiator *init = entry->initiator;
-	mode_t rc = 0;
+	umode_t rc = 0;
 
 	switch (type) {
 	case ISCSI_BOOT_INI_INDEX:
@@ -746,6 +746,37 @@
 	ibft_cleanup();
 }
 
+#ifdef CONFIG_ACPI
+static const struct {
+	char *sign;
+} ibft_signs[] = {
+	/*
+	 * One spec says "IBFT", the other says "iBFT". We have to check
+	 * for both.
+	 */
+	{ ACPI_SIG_IBFT },
+	{ "iBFT" },
+};
+
+static void __init acpi_find_ibft_region(void)
+{
+	int i;
+	struct acpi_table_header *table = NULL;
+
+	if (acpi_disabled)
+		return;
+
+	for (i = 0; i < ARRAY_SIZE(ibft_signs) && !ibft_addr; i++) {
+		acpi_get_table(ibft_signs[i].sign, 0, &table);
+		ibft_addr = (struct acpi_table_ibft *)table;
+	}
+}
+#else
+static void __init acpi_find_ibft_region(void)
+{
+}
+#endif
+
 /*
  * ibft_init() - creates sysfs tree entries for the iBFT data.
  */
@@ -753,9 +784,16 @@
 {
 	int rc = 0;
 
+	/*
+	   As on UEFI systems the setup_arch()/find_ibft_region()
+	   is called before ACPI tables are parsed and it only does
+	   legacy finding.
+	*/
+	if (!ibft_addr)
+		acpi_find_ibft_region();
+
 	if (ibft_addr) {
-		printk(KERN_INFO "iBFT detected at 0x%llx.\n",
-		       (u64)isa_virt_to_bus(ibft_addr));
+		pr_info("iBFT detected.\n");
 
 		rc = ibft_check_device();
 		if (rc)
diff --git a/drivers/firmware/iscsi_ibft_find.c b/drivers/firmware/iscsi_ibft_find.c
index bfe7232..4da4eb9 100644
--- a/drivers/firmware/iscsi_ibft_find.c
+++ b/drivers/firmware/iscsi_ibft_find.c
@@ -45,13 +45,6 @@
 static const struct {
 	char *sign;
 } ibft_signs[] = {
-#ifdef CONFIG_ACPI
-	/*
-	 * One spec says "IBFT", the other says "iBFT". We have to check
-	 * for both.
-	 */
-	{ ACPI_SIG_IBFT },
-#endif
 	{ "iBFT" },
 	{ "BIFT" },	/* Broadcom iSCSI Offload */
 };
@@ -62,14 +55,6 @@
 #define VGA_MEM 0xA0000 /* VGA buffer */
 #define VGA_SIZE 0x20000 /* 128kB */
 
-#ifdef CONFIG_ACPI
-static int __init acpi_find_ibft(struct acpi_table_header *header)
-{
-	ibft_addr = (struct acpi_table_ibft *)header;
-	return 0;
-}
-#endif /* CONFIG_ACPI */
-
 static int __init find_ibft_in_mem(void)
 {
 	unsigned long pos;
@@ -94,6 +79,7 @@
 				 * the table cannot be valid. */
 				if (pos + len <= (IBFT_END-1)) {
 					ibft_addr = (struct acpi_table_ibft *)virt;
+					pr_info("iBFT found at 0x%lx.\n", pos);
 					goto done;
 				}
 			}
@@ -108,20 +94,12 @@
  */
 unsigned long __init find_ibft_region(unsigned long *sizep)
 {
-#ifdef CONFIG_ACPI
-	int i;
-#endif
 	ibft_addr = NULL;
 
-#ifdef CONFIG_ACPI
-	for (i = 0; i < ARRAY_SIZE(ibft_signs) && !ibft_addr; i++)
-		acpi_table_parse(ibft_signs[i].sign, acpi_find_ibft);
-#endif /* CONFIG_ACPI */
-
 	/* iBFT 1.03 section 1.4.3.1 mandates that UEFI machines will
 	 * only use ACPI for this */
 
-	if (!ibft_addr && !efi_enabled)
+	if (!efi_enabled)
 		find_ibft_in_mem();
 
 	if (ibft_addr) {
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 8482a23..5099681 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -70,7 +70,7 @@
 
 config GPIO_DA9052
 	tristate "Dialog DA9052 GPIO"
-	depends on PMIC_DA9052
+	depends on PMIC_DA9052 && BROKEN
 	help
 	  Say yes here to enable the GPIO driver for the DA9052 chip.
 
@@ -356,7 +356,7 @@
 
 config GPIO_CS5535
 	tristate "AMD CS5535/CS5536 GPIO support"
-	depends on PCI && X86 && !CS5535_GPIO && MFD_CS5535
+	depends on PCI && X86 && MFD_CS5535
 	help
 	  The AMD CS5535 and CS5536 southbridges support 28 GPIO pins that
 	  can be used for quite a number of things.  The CS5535/6 is found on
@@ -387,7 +387,7 @@
 	  Say Y here to support Intel Langwell/Penwell GPIO.
 
 config GPIO_PCH
-	tristate "Intel EG20T PCH / OKI SEMICONDUCTOR ML7223 IOH GPIO"
+	tristate "Intel EG20T PCH/LAPIS Semiconductor IOH(ML7223/ML7831) GPIO"
 	depends on PCI && X86
 	select GENERIC_IRQ_CHIP
 	help
@@ -395,11 +395,12 @@
 	  which is an IOH(Input/Output Hub) for x86 embedded processor.
 	  This driver can access PCH GPIO device.
 
-	  This driver also can be used for OKI SEMICONDUCTOR IOH(Input/
-	  Output Hub), ML7223.
+	  This driver also can be used for LAPIS Semiconductor IOH(Input/
+	  Output Hub), ML7223 and ML7831.
 	  ML7223 IOH is for MP(Media Phone) use.
-	  ML7223 is companion chip for Intel Atom E6xx series.
-	  ML7223 is completely compatible for Intel EG20T PCH.
+	  ML7831 IOH is for general purpose use.
+	  ML7223/ML7831 is companion chip for Intel Atom E6xx series.
+	  ML7223/ML7831 is completely compatible for Intel EG20T PCH.
 
 config GPIO_ML_IOH
 	tristate "OKI SEMICONDUCTOR ML7213 IOH GPIO support"
diff --git a/drivers/gpio/gpio-adp5520.c b/drivers/gpio/gpio-adp5520.c
index 9f27815..2f263cc 100644
--- a/drivers/gpio/gpio-adp5520.c
+++ b/drivers/gpio/gpio-adp5520.c
@@ -193,17 +193,7 @@
 	.remove		= __devexit_p(adp5520_gpio_remove),
 };
 
-static int __init adp5520_gpio_init(void)
-{
-	return platform_driver_register(&adp5520_gpio_driver);
-}
-module_init(adp5520_gpio_init);
-
-static void __exit adp5520_gpio_exit(void)
-{
-	platform_driver_unregister(&adp5520_gpio_driver);
-}
-module_exit(adp5520_gpio_exit);
+module_platform_driver(adp5520_gpio_driver);
 
 MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
 MODULE_DESCRIPTION("GPIO ADP5520 Driver");
diff --git a/drivers/gpio/gpio-adp5588.c b/drivers/gpio/gpio-adp5588.c
index 3525ad9..9ad1703 100644
--- a/drivers/gpio/gpio-adp5588.c
+++ b/drivers/gpio/gpio-adp5588.c
@@ -418,9 +418,8 @@
 	if (ret)
 		goto err_irq;
 
-	dev_info(&client->dev, "gpios %d..%d (IRQ Base %d) on a %s Rev. %d\n",
-			gc->base, gc->base + gc->ngpio - 1,
-			pdata->irq_base, client->name, revid);
+	dev_info(&client->dev, "IRQ Base: %d Rev.: %d\n",
+			pdata->irq_base, revid);
 
 	if (pdata->setup) {
 		ret = pdata->setup(client, gc->base, gc->ngpio, pdata->context);
diff --git a/drivers/gpio/gpio-bt8xx.c b/drivers/gpio/gpio-bt8xx.c
index ec57936..5ca4098 100644
--- a/drivers/gpio/gpio-bt8xx.c
+++ b/drivers/gpio/gpio-bt8xx.c
@@ -223,9 +223,6 @@
 		goto err_release_mem;
 	}
 
-	printk(KERN_INFO "bt8xxgpio: Abusing BT8xx card for GPIOs %d to %d\n",
-	       bg->gpio.base, bg->gpio.base + BT8XXGPIO_NR_GPIOS - 1);
-
 	return 0;
 
 err_release_mem:
diff --git a/drivers/gpio/gpio-cs5535.c b/drivers/gpio/gpio-cs5535.c
index 6e16cba..19eda1b 100644
--- a/drivers/gpio/gpio-cs5535.c
+++ b/drivers/gpio/gpio-cs5535.c
@@ -347,7 +347,6 @@
 	if (err)
 		goto release_region;
 
-	dev_info(&pdev->dev, "GPIO support successfully loaded.\n");
 	return 0;
 
 release_region:
@@ -382,18 +381,7 @@
 	.remove = __devexit_p(cs5535_gpio_remove),
 };
 
-static int __init cs5535_gpio_init(void)
-{
-	return platform_driver_register(&cs5535_gpio_driver);
-}
-
-static void __exit cs5535_gpio_exit(void)
-{
-	platform_driver_unregister(&cs5535_gpio_driver);
-}
-
-module_init(cs5535_gpio_init);
-module_exit(cs5535_gpio_exit);
+module_platform_driver(cs5535_gpio_driver);
 
 MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>");
 MODULE_DESCRIPTION("AMD CS5535/CS5536 GPIO driver");
diff --git a/drivers/gpio/gpio-da9052.c b/drivers/gpio/gpio-da9052.c
index 038f5eb..56dd047 100644
--- a/drivers/gpio/gpio-da9052.c
+++ b/drivers/gpio/gpio-da9052.c
@@ -22,7 +22,6 @@
 #include <linux/mfd/da9052/da9052.h>
 #include <linux/mfd/da9052/reg.h>
 #include <linux/mfd/da9052/pdata.h>
-#include <linux/mfd/da9052/gpio.h>
 
 #define DA9052_INPUT				1
 #define DA9052_OUTPUT_OPENDRAIN		2
@@ -43,6 +42,9 @@
 #define DA9052_GPIO_MASK_UPPER_NIBBLE		0xF0
 #define DA9052_GPIO_MASK_LOWER_NIBBLE		0x0F
 #define DA9052_GPIO_NIBBLE_SHIFT		4
+#define DA9052_IRQ_GPI0			16
+#define DA9052_GPIO_ODD_SHIFT			7
+#define DA9052_GPIO_EVEN_SHIFT			3
 
 struct da9052_gpio {
 	struct da9052 *da9052;
@@ -104,33 +106,26 @@
 static void da9052_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
 {
 	struct da9052_gpio *gpio = to_da9052_gpio(gc);
-	unsigned char register_value = 0;
 	int ret;
 
 	if (da9052_gpio_port_odd(offset)) {
-		if (value) {
-			register_value = DA9052_GPIO_ODD_PORT_MODE;
 			ret = da9052_reg_update(gpio->da9052, (offset >> 1) +
 						DA9052_GPIO_0_1_REG,
 						DA9052_GPIO_ODD_PORT_MODE,
-						register_value);
+						value << DA9052_GPIO_ODD_SHIFT);
 			if (ret != 0)
 				dev_err(gpio->da9052->dev,
 					"Failed to updated gpio odd reg,%d",
 					ret);
-		}
 	} else {
-		if (value) {
-			register_value = DA9052_GPIO_EVEN_PORT_MODE;
 			ret = da9052_reg_update(gpio->da9052, (offset >> 1) +
 						DA9052_GPIO_0_1_REG,
 						DA9052_GPIO_EVEN_PORT_MODE,
-						register_value);
+						value << DA9052_GPIO_EVEN_SHIFT);
 			if (ret != 0)
 				dev_err(gpio->da9052->dev,
 					"Failed to updated gpio even reg,%d",
 					ret);
-		}
 	}
 }
 
@@ -201,9 +196,9 @@
 	.direction_input = da9052_gpio_direction_input,
 	.direction_output = da9052_gpio_direction_output,
 	.to_irq = da9052_gpio_to_irq,
-	.can_sleep = 1;
-	.ngpio = 16;
-	.base = -1;
+	.can_sleep = 1,
+	.ngpio = 16,
+	.base = -1,
 };
 
 static int __devinit da9052_gpio_probe(struct platform_device *pdev)
@@ -259,17 +254,7 @@
 	},
 };
 
-static int __init da9052_gpio_init(void)
-{
-	return platform_driver_register(&da9052_gpio_driver);
-}
-module_init(da9052_gpio_init);
-
-static void __exit da9052_gpio_exit(void)
-{
-	return platform_driver_unregister(&da9052_gpio_driver);
-}
-module_exit(da9052_gpio_exit);
+module_platform_driver(da9052_gpio_driver);
 
 MODULE_AUTHOR("David Dajun Chen <dchen@diasemi.com>");
 MODULE_DESCRIPTION("DA9052 GPIO Device Driver");
diff --git a/drivers/gpio/gpio-generic.c b/drivers/gpio/gpio-generic.c
index 4e24436..e38dd0c 100644
--- a/drivers/gpio/gpio-generic.c
+++ b/drivers/gpio/gpio-generic.c
@@ -524,17 +524,7 @@
 	.remove = __devexit_p(bgpio_pdev_remove),
 };
 
-static int __init bgpio_platform_init(void)
-{
-	return platform_driver_register(&bgpio_driver);
-}
-module_init(bgpio_platform_init);
-
-static void __exit bgpio_platform_exit(void)
-{
-	platform_driver_unregister(&bgpio_driver);
-}
-module_exit(bgpio_platform_exit);
+module_platform_driver(bgpio_driver);
 
 #endif /* CONFIG_GPIO_GENERIC_PLATFORM */
 
diff --git a/drivers/gpio/gpio-janz-ttl.c b/drivers/gpio/gpio-janz-ttl.c
index 813ac07..f2f000d 100644
--- a/drivers/gpio/gpio-janz-ttl.c
+++ b/drivers/gpio/gpio-janz-ttl.c
@@ -201,8 +201,6 @@
 		goto out_iounmap_regs;
 	}
 
-	dev_info(&pdev->dev, "module %d: registered GPIO device\n",
-			     pdata->modno);
 	return 0;
 
 out_iounmap_regs:
@@ -239,20 +237,9 @@
 	.remove		= __devexit_p(ttl_remove),
 };
 
-static int __init ttl_init(void)
-{
-	return platform_driver_register(&ttl_driver);
-}
-
-static void __exit ttl_exit(void)
-{
-	platform_driver_unregister(&ttl_driver);
-}
+module_platform_driver(ttl_driver);
 
 MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>");
 MODULE_DESCRIPTION("Janz MODULbus VMOD-TTL Driver");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS("platform:janz-ttl");
-
-module_init(ttl_init);
-module_exit(ttl_exit);
diff --git a/drivers/gpio/gpio-ml-ioh.c b/drivers/gpio/gpio-ml-ioh.c
index ea8e738..461958f 100644
--- a/drivers/gpio/gpio-ml-ioh.c
+++ b/drivers/gpio/gpio-ml-ioh.c
@@ -332,6 +332,34 @@
 		  &chip->reg->regs[chip->ch].imask);
 }
 
+static void ioh_irq_disable(struct irq_data *d)
+{
+	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+	struct ioh_gpio *chip = gc->private;
+	unsigned long flags;
+	u32 ien;
+
+	spin_lock_irqsave(&chip->spinlock, flags);
+	ien = ioread32(&chip->reg->regs[chip->ch].ien);
+	ien &= ~(1 << (d->irq - chip->irq_base));
+	iowrite32(ien, &chip->reg->regs[chip->ch].ien);
+	spin_unlock_irqrestore(&chip->spinlock, flags);
+}
+
+static void ioh_irq_enable(struct irq_data *d)
+{
+	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+	struct ioh_gpio *chip = gc->private;
+	unsigned long flags;
+	u32 ien;
+
+	spin_lock_irqsave(&chip->spinlock, flags);
+	ien = ioread32(&chip->reg->regs[chip->ch].ien);
+	ien |= 1 << (d->irq - chip->irq_base);
+	iowrite32(ien, &chip->reg->regs[chip->ch].ien);
+	spin_unlock_irqrestore(&chip->spinlock, flags);
+}
+
 static irqreturn_t ioh_gpio_handler(int irq, void *dev_id)
 {
 	struct ioh_gpio *chip = dev_id;
@@ -339,7 +367,7 @@
 	int i, j;
 	int ret = IRQ_NONE;
 
-	for (i = 0; i < 8; i++) {
+	for (i = 0; i < 8; i++, chip++) {
 		reg_val = ioread32(&chip->reg->regs[i].istatus);
 		for (j = 0; j < num_ports[i]; j++) {
 			if (reg_val & BIT(j)) {
@@ -370,6 +398,8 @@
 	ct->chip.irq_mask = ioh_irq_mask;
 	ct->chip.irq_unmask = ioh_irq_unmask;
 	ct->chip.irq_set_type = ioh_irq_type;
+	ct->chip.irq_disable = ioh_irq_disable;
+	ct->chip.irq_enable = ioh_irq_enable;
 
 	irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE,
 			       IRQ_NOREQUEST | IRQ_NOPROBE, 0);
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index ec3fcf0..5cd04b6 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -132,6 +132,15 @@
 	return 0;
 }
 
+static int mpc5121_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+	/* GPIO 28..31 are input only on MPC5121 */
+	if (gpio >= 28)
+		return -EINVAL;
+
+	return mpc8xxx_gpio_dir_out(gc, gpio, val);
+}
+
 static int mpc8xxx_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
 {
 	struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc);
@@ -340,11 +349,10 @@
 	mm_gc->save_regs = mpc8xxx_gpio_save_regs;
 	gc->ngpio = MPC8XXX_GPIO_PINS;
 	gc->direction_input = mpc8xxx_gpio_dir_in;
-	gc->direction_output = mpc8xxx_gpio_dir_out;
-	if (of_device_is_compatible(np, "fsl,mpc8572-gpio"))
-		gc->get = mpc8572_gpio_get;
-	else
-		gc->get = mpc8xxx_gpio_get;
+	gc->direction_output = of_device_is_compatible(np, "fsl,mpc5121-gpio") ?
+		mpc5121_gpio_dir_out : mpc8xxx_gpio_dir_out;
+	gc->get = of_device_is_compatible(np, "fsl,mpc8572-gpio") ?
+		mpc8572_gpio_get : mpc8xxx_gpio_get;
 	gc->set = mpc8xxx_gpio_set;
 	gc->to_irq = mpc8xxx_gpio_to_irq;
 
diff --git a/drivers/gpio/gpio-nomadik.c b/drivers/gpio/gpio-nomadik.c
index 1ebedfb..839624f 100644
--- a/drivers/gpio/gpio-nomadik.c
+++ b/drivers/gpio/gpio-nomadik.c
@@ -1150,8 +1150,8 @@
 
 	nmk_gpio_init_irq(nmk_chip);
 
-	dev_info(&dev->dev, "Bits %i-%i at address %p\n",
-		 nmk_chip->chip.base, nmk_chip->chip.base+31, nmk_chip->addr);
+	dev_info(&dev->dev, "at address %p\n",
+		 nmk_chip->addr);
 	return 0;
 
 out_free:
diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c
index 3e1f1ec..2d1de9e 100644
--- a/drivers/gpio/gpio-pcf857x.c
+++ b/drivers/gpio/gpio-pcf857x.c
@@ -290,10 +290,7 @@
 	 * methods can't be called from sleeping contexts.
 	 */
 
-	dev_info(&client->dev, "gpios %d..%d on a %s%s\n",
-			gpio->chip.base,
-			gpio->chip.base + gpio->chip.ngpio - 1,
-			client->name,
+	dev_info(&client->dev, "%s\n",
 			client->irq ? " (irq ignored)" : "");
 
 	/* Let platform code set up the GPIOs and their users.
diff --git a/drivers/gpio/gpio-pch.c b/drivers/gpio/gpio-pch.c
index a6008e1..f060329 100644
--- a/drivers/gpio/gpio-pch.c
+++ b/drivers/gpio/gpio-pch.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
+ * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -49,8 +49,8 @@
 
 enum pch_type_t {
 	INTEL_EG20T_PCH,
-	OKISEMI_ML7223m_IOH, /* OKISEMI ML7223 IOH PCIe Bus-m */
-	OKISEMI_ML7223n_IOH  /* OKISEMI ML7223 IOH PCIe Bus-n */
+	OKISEMI_ML7223m_IOH, /* LAPIS Semiconductor ML7223 IOH PCIe Bus-m */
+	OKISEMI_ML7223n_IOH  /* LAPIS Semiconductor ML7223 IOH PCIe Bus-n */
 };
 
 /* Specifies number of GPIO PINS */
@@ -524,6 +524,7 @@
 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8803) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8014) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8043) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8803) },
 	{ 0, }
 };
 MODULE_DEVICE_TABLE(pci, pch_gpio_pcidev_id);
diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c
index 093c90b..8f79c03 100644
--- a/drivers/gpio/gpio-pl061.c
+++ b/drivers/gpio/gpio-pl061.c
@@ -238,10 +238,6 @@
 	int ret, irq, i;
 	static DECLARE_BITMAP(init_irq, NR_IRQS);
 
-	pdata = dev->dev.platform_data;
-	if (pdata == NULL)
-		return -ENODEV;
-
 	chip = kzalloc(sizeof(*chip), GFP_KERNEL);
 	if (chip == NULL)
 		return -ENOMEM;
@@ -350,6 +346,8 @@
 	{ 0, 0 },
 };
 
+MODULE_DEVICE_TABLE(amba, pl061_ids);
+
 static struct amba_driver pl061_gpio_driver = {
 	.drv = {
 		.name	= "pl061_gpio",
diff --git a/drivers/gpio/gpio-rdc321x.c b/drivers/gpio/gpio-rdc321x.c
index 2762698..e97016a 100644
--- a/drivers/gpio/gpio-rdc321x.c
+++ b/drivers/gpio/gpio-rdc321x.c
@@ -227,18 +227,7 @@
 	.remove		= __devexit_p(rdc321x_gpio_remove),
 };
 
-static int __init rdc321x_gpio_init(void)
-{
-	return platform_driver_register(&rdc321x_gpio_driver);
-}
-
-static void __exit rdc321x_gpio_exit(void)
-{
-	platform_driver_unregister(&rdc321x_gpio_driver);
-}
-
-module_init(rdc321x_gpio_init);
-module_exit(rdc321x_gpio_exit);
+module_platform_driver(rdc321x_gpio_driver);
 
 MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
 MODULE_DESCRIPTION("RDC321x GPIO driver");
diff --git a/drivers/gpio/gpio-samsung.c b/drivers/gpio/gpio-samsung.c
index 8662518..ab098ba 100644
--- a/drivers/gpio/gpio-samsung.c
+++ b/drivers/gpio/gpio-samsung.c
@@ -22,7 +22,7 @@
 #include <linux/spinlock.h>
 #include <linux/module.h>
 #include <linux/interrupt.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/ioport.h>
 
 #include <asm/irq.h>
@@ -230,7 +230,7 @@
  * @chip: The gpio chip that is being configured.
  * @off: The offset for the GPIO being configured.
  *
- * The reverse of samsung_gpio_setcfg_2bit(). Will return a value whicg
+ * The reverse of samsung_gpio_setcfg_2bit(). Will return a value which
  * could be directly passed back to samsung_gpio_setcfg_2bit(), from the
  * S3C_GPIO_SPECIAL() macro.
  */
@@ -467,33 +467,42 @@
 #endif
 
 static struct samsung_gpio_cfg samsung_gpio_cfgs[] = {
-	{
+	[0] = {
 		.cfg_eint	= 0x0,
-	}, {
+	},
+	[1] = {
 		.cfg_eint	= 0x3,
-	}, {
+	},
+	[2] = {
 		.cfg_eint	= 0x7,
-	}, {
+	},
+	[3] = {
 		.cfg_eint	= 0xF,
-	}, {
+	},
+	[4] = {
 		.cfg_eint	= 0x0,
 		.set_config	= samsung_gpio_setcfg_2bit,
 		.get_config	= samsung_gpio_getcfg_2bit,
-	}, {
+	},
+	[5] = {
 		.cfg_eint	= 0x2,
 		.set_config	= samsung_gpio_setcfg_2bit,
 		.get_config	= samsung_gpio_getcfg_2bit,
-	}, {
+	},
+	[6] = {
 		.cfg_eint	= 0x3,
 		.set_config	= samsung_gpio_setcfg_2bit,
 		.get_config	= samsung_gpio_getcfg_2bit,
-	}, {
+	},
+	[7] = {
 		.set_config	= samsung_gpio_setcfg_2bit,
 		.get_config	= samsung_gpio_getcfg_2bit,
-	}, {
+	},
+	[8] = {
 		.set_pull	= exynos4_gpio_setpull,
 		.get_pull	= exynos4_gpio_getpull,
-	}, {
+	},
+	[9] = {
 		.cfg_eint	= 0x3,
 		.set_pull	= exynos4_gpio_setpull,
 		.get_pull	= exynos4_gpio_getpull,
diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c
index 1635158..8cadf4d 100644
--- a/drivers/gpio/gpio-sch.c
+++ b/drivers/gpio/gpio-sch.c
@@ -297,18 +297,7 @@
 	.remove		= __devexit_p(sch_gpio_remove),
 };
 
-static int __init sch_gpio_init(void)
-{
-	return platform_driver_register(&sch_gpio_driver);
-}
-
-static void __exit sch_gpio_exit(void)
-{
-	platform_driver_unregister(&sch_gpio_driver);
-}
-
-module_init(sch_gpio_init);
-module_exit(sch_gpio_exit);
+module_platform_driver(sch_gpio_driver);
 
 MODULE_AUTHOR("Denis Turischev <denis@compulab.co.il>");
 MODULE_DESCRIPTION("GPIO interface for Intel Poulsbo SCH");
diff --git a/drivers/gpio/gpio-timberdale.c b/drivers/gpio/gpio-timberdale.c
index c593bd4..031c6ad 100644
--- a/drivers/gpio/gpio-timberdale.c
+++ b/drivers/gpio/gpio-timberdale.c
@@ -359,18 +359,7 @@
 
 /*--------------------------------------------------------------------------*/
 
-static int __init timbgpio_init(void)
-{
-	return platform_driver_register(&timbgpio_platform_driver);
-}
-
-static void __exit timbgpio_exit(void)
-{
-	platform_driver_unregister(&timbgpio_platform_driver);
-}
-
-module_init(timbgpio_init);
-module_exit(timbgpio_exit);
+module_platform_driver(timbgpio_platform_driver);
 
 MODULE_DESCRIPTION("Timberdale GPIO driver");
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-ucb1400.c b/drivers/gpio/gpio-ucb1400.c
index 50e6bd1..26405ef 100644
--- a/drivers/gpio/gpio-ucb1400.c
+++ b/drivers/gpio/gpio-ucb1400.c
@@ -103,23 +103,12 @@
 	},
 };
 
-static int __init ucb1400_gpio_init(void)
-{
-	return platform_driver_register(&ucb1400_gpio_driver);
-}
-
-static void __exit ucb1400_gpio_exit(void)
-{
-	platform_driver_unregister(&ucb1400_gpio_driver);
-}
-
 void __init ucb1400_gpio_set_data(struct ucb1400_gpio_data *data)
 {
 	ucbdata = data;
 }
 
-module_init(ucb1400_gpio_init);
-module_exit(ucb1400_gpio_exit);
+module_platform_driver(ucb1400_gpio_driver);
 
 MODULE_DESCRIPTION("Philips UCB1400 GPIO driver");
 MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
index 98723cb..82d5c20 100644
--- a/drivers/gpio/gpio-vr41xx.c
+++ b/drivers/gpio/gpio-vr41xx.c
@@ -571,15 +571,4 @@
 	},
 };
 
-static int __init vr41xx_giu_init(void)
-{
-	return platform_driver_register(&giu_device_driver);
-}
-
-static void __exit vr41xx_giu_exit(void)
-{
-	platform_driver_unregister(&giu_device_driver);
-}
-
-module_init(vr41xx_giu_init);
-module_exit(vr41xx_giu_exit);
+module_platform_driver(giu_device_driver);
diff --git a/drivers/gpio/gpio-vx855.c b/drivers/gpio/gpio-vx855.c
index ef5aabd..76ebfe5 100644
--- a/drivers/gpio/gpio-vx855.c
+++ b/drivers/gpio/gpio-vx855.c
@@ -315,17 +315,7 @@
 	.remove		= __devexit_p(vx855gpio_remove),
 };
 
-static int vx855gpio_init(void)
-{
-	return platform_driver_register(&vx855gpio_driver);
-}
-module_init(vx855gpio_init);
-
-static void vx855gpio_exit(void)
-{
-	platform_driver_unregister(&vx855gpio_driver);
-}
-module_exit(vx855gpio_exit);
+module_platform_driver(vx855gpio_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Harald Welte <HaraldWelte@viatech.com>");
diff --git a/drivers/gpio/gpio-wm8994.c b/drivers/gpio/gpio-wm8994.c
index 96198f3..92ea535 100644
--- a/drivers/gpio/gpio-wm8994.c
+++ b/drivers/gpio/gpio-wm8994.c
@@ -117,6 +117,60 @@
 
 
 #ifdef CONFIG_DEBUG_FS
+static const char *wm8994_gpio_fn(u16 fn)
+{
+	switch (fn) {
+	case WM8994_GP_FN_PIN_SPECIFIC:
+		return "pin-specific";
+	case WM8994_GP_FN_GPIO:
+		return "GPIO";
+	case WM8994_GP_FN_SDOUT:
+		return "SDOUT";
+	case WM8994_GP_FN_IRQ:
+		return "IRQ";
+	case WM8994_GP_FN_TEMPERATURE:
+		return "Temperature";
+	case WM8994_GP_FN_MICBIAS1_DET:
+		return "MICBIAS1 detect";
+	case WM8994_GP_FN_MICBIAS1_SHORT:
+		return "MICBIAS1 short";
+	case WM8994_GP_FN_MICBIAS2_DET:
+		return "MICBIAS2 detect";
+	case WM8994_GP_FN_MICBIAS2_SHORT:
+		return "MICBIAS2 short";
+	case WM8994_GP_FN_FLL1_LOCK:
+		return "FLL1 lock";
+	case WM8994_GP_FN_FLL2_LOCK:
+		return "FLL2 lock";
+	case WM8994_GP_FN_SRC1_LOCK:
+		return "SRC1 lock";
+	case WM8994_GP_FN_SRC2_LOCK:
+		return "SRC2 lock";
+	case WM8994_GP_FN_DRC1_ACT:
+		return "DRC1 activity";
+	case WM8994_GP_FN_DRC2_ACT:
+		return "DRC2 activity";
+	case WM8994_GP_FN_DRC3_ACT:
+		return "DRC3 activity";
+	case WM8994_GP_FN_WSEQ_STATUS:
+		return "Write sequencer";
+	case WM8994_GP_FN_FIFO_ERROR:
+		return "FIFO error";
+	case WM8994_GP_FN_OPCLK:
+		return "OPCLK";
+	case WM8994_GP_FN_THW:
+		return "Thermal warning";
+	case WM8994_GP_FN_DCS_DONE:
+		return "DC servo";
+	case WM8994_GP_FN_FLL1_OUT:
+		return "FLL1 output";
+	case WM8994_GP_FN_FLL2_OUT:
+		return "FLL1 output";
+	default:
+		return "Unknown";
+	}
+}
+
 static void wm8994_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
 {
 	struct wm8994_gpio *wm8994_gpio = to_wm8994_gpio(chip);
@@ -148,8 +202,29 @@
 			continue;
 		}
 
-		/* No decode yet; note that GPIO2 is special */
-		seq_printf(s, "(%x)\n", reg);
+		if (reg & WM8994_GPN_DIR)
+			seq_printf(s, "in ");
+		else
+			seq_printf(s, "out ");
+
+		if (reg & WM8994_GPN_PU)
+			seq_printf(s, "pull up ");
+
+		if (reg & WM8994_GPN_PD)
+			seq_printf(s, "pull down ");
+
+		if (reg & WM8994_GPN_POL)
+			seq_printf(s, "inverted ");
+		else
+			seq_printf(s, "noninverted ");
+
+		if (reg & WM8994_GPN_OP_CFG)
+			seq_printf(s, "open drain ");
+		else
+			seq_printf(s, "CMOS ");
+
+		seq_printf(s, "%s (%x)\n",
+			   wm8994_gpio_fn(reg & WM8994_GPN_FN_MASK), reg);
 	}
 }
 #else
diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c
index 0ce6ac9..79b0fe8 100644
--- a/drivers/gpio/gpio-xilinx.c
+++ b/drivers/gpio/gpio-xilinx.c
@@ -206,7 +206,6 @@
 		       np->full_name, status);
 		return status;
 	}
-	pr_info("XGpio: %s: registered\n", np->full_name);
 	return 0;
 }
 
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index a971e3d..17fdf4b 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -114,7 +114,7 @@
 }
 
 /* caller holds gpio_lock *OR* gpio is marked as requested */
-static inline struct gpio_chip *gpio_to_chip(unsigned gpio)
+struct gpio_chip *gpio_to_chip(unsigned gpio)
 {
 	return gpio_desc[gpio].chip;
 }
@@ -1089,6 +1089,10 @@
 	if (status)
 		goto fail;
 
+	pr_info("gpiochip_add: registered GPIOs %d to %d on device: %s\n",
+		chip->base, chip->base + chip->ngpio - 1,
+		chip->label ? : "generic");
+
 	return 0;
 fail:
 	/* failures here can mean systems won't boot... */
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 0f9ef9b..62c3675 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -72,7 +72,7 @@
 	return 0;
 }
 
-static char *drm_devnode(struct device *dev, mode_t *mode)
+static char *drm_devnode(struct device *dev, umode_t *mode)
 {
 	return kasprintf(GFP_KERNEL, "dri/%s", dev_name(dev));
 }
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index d09a6e0..004b048 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -62,6 +62,7 @@
 	const struct intel_device_info *info = INTEL_INFO(dev);
 
 	seq_printf(m, "gen: %d\n", info->gen);
+	seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
 #define B(x) seq_printf(m, #x ": %s\n", yesno(info->x))
 	B(is_mobile);
 	B(is_i85x);
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index a9533c5..a9ae374 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1454,6 +1454,14 @@
 
 	diff1 = now - dev_priv->last_time1;
 
+	/* Prevent division-by-zero if we are asking too fast.
+	 * Also, we don't get interesting results if we are polling
+	 * faster than once in 10ms, so just return the saved value
+	 * in such cases.
+	 */
+	if (diff1 <= 10)
+		return dev_priv->chipset_power;
+
 	count1 = I915_READ(DMIEC);
 	count2 = I915_READ(DDREC);
 	count3 = I915_READ(CSIEC);
@@ -1484,6 +1492,8 @@
 	dev_priv->last_count1 = total_count;
 	dev_priv->last_time1 = now;
 
+	dev_priv->chipset_power = ret;
+
 	return ret;
 }
 
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 15bfa91..a1103fc 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -58,15 +58,15 @@
 MODULE_PARM_DESC(powersave,
 		"Enable powersavings, fbc, downclocking, etc. (default: true)");
 
-unsigned int i915_semaphores __read_mostly = 0;
+int i915_semaphores __read_mostly = -1;
 module_param_named(semaphores, i915_semaphores, int, 0600);
 MODULE_PARM_DESC(semaphores,
-		"Use semaphores for inter-ring sync (default: false)");
+		"Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))");
 
-unsigned int i915_enable_rc6 __read_mostly = 0;
+int i915_enable_rc6 __read_mostly = -1;
 module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
 MODULE_PARM_DESC(i915_enable_rc6,
-		"Enable power-saving render C-state 6 (default: true)");
+		"Enable power-saving render C-state 6 (default: -1 (use per-chip default)");
 
 int i915_enable_fbc __read_mostly = -1;
 module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
@@ -328,7 +328,7 @@
 	}
 }
 
-static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
+void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
 {
 	int count;
 
@@ -344,6 +344,22 @@
 		udelay(10);
 }
 
+void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
+{
+	int count;
+
+	count = 0;
+	while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1))
+		udelay(10);
+
+	I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 1);
+	POSTING_READ(FORCEWAKE_MT);
+
+	count = 0;
+	while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1) == 0)
+		udelay(10);
+}
+
 /*
  * Generally this is called implicitly by the register read function. However,
  * if some sequence requires the GT to not power down then this function should
@@ -356,15 +372,21 @@
 
 	/* Forcewake is atomic in case we get in here without the lock */
 	if (atomic_add_return(1, &dev_priv->forcewake_count) == 1)
-		__gen6_gt_force_wake_get(dev_priv);
+		dev_priv->display.force_wake_get(dev_priv);
 }
 
-static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
+void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
 {
 	I915_WRITE_NOTRACE(FORCEWAKE, 0);
 	POSTING_READ(FORCEWAKE);
 }
 
+void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
+{
+	I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 0);
+	POSTING_READ(FORCEWAKE_MT);
+}
+
 /*
  * see gen6_gt_force_wake_get()
  */
@@ -373,7 +395,7 @@
 	WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
 
 	if (atomic_dec_and_test(&dev_priv->forcewake_count))
-		__gen6_gt_force_wake_put(dev_priv);
+		dev_priv->display.force_wake_put(dev_priv);
 }
 
 void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
@@ -903,8 +925,9 @@
 /* We give fast paths for the really cool registers */
 #define NEEDS_FORCE_WAKE(dev_priv, reg) \
 	(((dev_priv)->info->gen >= 6) && \
-	((reg) < 0x40000) && \
-	((reg) != FORCEWAKE))
+	 ((reg) < 0x40000) &&		 \
+	 ((reg) != FORCEWAKE) &&	 \
+	 ((reg) != ECOBUS))
 
 #define __i915_read(x, y) \
 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 4a9c1b9..554bef7 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -107,6 +107,7 @@
 struct opregion_acpi;
 struct opregion_swsci;
 struct opregion_asle;
+struct drm_i915_private;
 
 struct intel_opregion {
 	struct opregion_header *header;
@@ -221,6 +222,8 @@
 			  struct drm_i915_gem_object *obj);
 	int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
 			    int x, int y);
+	void (*force_wake_get)(struct drm_i915_private *dev_priv);
+	void (*force_wake_put)(struct drm_i915_private *dev_priv);
 	/* clock updates for mode set */
 	/* cursor updates */
 	/* render clock increase/decrease */
@@ -710,6 +713,7 @@
 
 	u64 last_count1;
 	unsigned long last_time1;
+	unsigned long chipset_power;
 	u64 last_count2;
 	struct timespec last_time2;
 	unsigned long gfx_power;
@@ -998,11 +1002,11 @@
 extern unsigned int i915_fbpercrtc __always_unused;
 extern int i915_panel_ignore_lid __read_mostly;
 extern unsigned int i915_powersave __read_mostly;
-extern unsigned int i915_semaphores __read_mostly;
+extern int i915_semaphores __read_mostly;
 extern unsigned int i915_lvds_downclock __read_mostly;
 extern int i915_panel_use_ssc __read_mostly;
 extern int i915_vbt_sdvo_panel_type __read_mostly;
-extern unsigned int i915_enable_rc6 __read_mostly;
+extern int i915_enable_rc6 __read_mostly;
 extern int i915_enable_fbc __read_mostly;
 extern bool i915_enable_hangcheck __read_mostly;
 
@@ -1308,6 +1312,11 @@
 extern void intel_detect_pch(struct drm_device *dev);
 extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
 
+extern void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
+extern void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv);
+extern void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
+extern void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv);
+
 /* overlay */
 #ifdef CONFIG_DEBUG_FS
 extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
@@ -1352,8 +1361,9 @@
 /* We give fast paths for the really cool registers */
 #define NEEDS_FORCE_WAKE(dev_priv, reg) \
 	(((dev_priv)->info->gen >= 6) && \
-	((reg) < 0x40000) && \
-	((reg) != FORCEWAKE))
+	 ((reg) < 0x40000) &&		 \
+	 ((reg) != FORCEWAKE) &&	 \
+	 ((reg) != ECOBUS))
 
 #define __i915_read(x, y) \
 	u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 60ff1b6..8359dc7 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2026,13 +2026,8 @@
 	 * to handle this, the waiter on a request often wants an associated
 	 * buffer to have made it to the inactive list, and we would need
 	 * a separate wait queue to handle that.
-	 *
-	 * To avoid a recursion with the ilk VT-d workaround (that calls
-	 * gpu_idle when unbinding objects with interruptible==false) don't
-	 * retire requests in that case (because it might call unbind if the
-	 * active list holds the last reference to the object).
 	 */
-	if (ret == 0 && dev_priv->mm.interruptible)
+	if (ret == 0)
 		i915_gem_retire_requests_ring(ring);
 
 	return ret;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 3693e83..b9da890 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -32,6 +32,7 @@
 #include "i915_drv.h"
 #include "i915_trace.h"
 #include "intel_drv.h"
+#include <linux/dma_remapping.h>
 
 struct change_domains {
 	uint32_t invalidate_domains;
@@ -746,6 +747,22 @@
 	return 0;
 }
 
+static bool
+intel_enable_semaphores(struct drm_device *dev)
+{
+	if (INTEL_INFO(dev)->gen < 6)
+		return 0;
+
+	if (i915_semaphores >= 0)
+		return i915_semaphores;
+
+	/* Disable semaphores on SNB */
+	if (INTEL_INFO(dev)->gen == 6)
+		return 0;
+
+	return 1;
+}
+
 static int
 i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
 			       struct intel_ring_buffer *to)
@@ -758,7 +775,7 @@
 		return 0;
 
 	/* XXX gpu semaphores are implicated in various hard hangs on SNB */
-	if (INTEL_INFO(obj->base.dev)->gen < 6 || !i915_semaphores)
+	if (!intel_enable_semaphores(obj->base.dev))
 		return i915_gem_object_wait_rendering(obj);
 
 	idx = intel_ring_sync_index(from, to);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index b080cc8..a26d5b0 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -3303,10 +3303,10 @@
 /* or SDVOB */
 #define HDMIB   0xe1140
 #define  PORT_ENABLE    (1 << 31)
-#define  TRANSCODER_A   (0)
-#define  TRANSCODER_B   (1 << 30)
-#define  TRANSCODER(pipe)	((pipe) << 30)
-#define  TRANSCODER_MASK   (1 << 30)
+#define  TRANSCODER(pipe)       ((pipe) << 30)
+#define  TRANSCODER_CPT(pipe)   ((pipe) << 29)
+#define  TRANSCODER_MASK        (1 << 30)
+#define  TRANSCODER_MASK_CPT    (3 << 29)
 #define  COLOR_FORMAT_8bpc      (0)
 #define  COLOR_FORMAT_12bpc     (3 << 26)
 #define  SDVOB_HOTPLUG_ENABLE   (1 << 23)
@@ -3447,8 +3447,30 @@
 #define  EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B	(0x38<<22)
 #define  EDP_LINK_TRAIN_VOL_EMP_MASK_SNB	(0x3f<<22)
 
+/* IVB */
+#define EDP_LINK_TRAIN_400MV_0DB_IVB		(0x24 <<22)
+#define EDP_LINK_TRAIN_400MV_3_5DB_IVB		(0x2a <<22)
+#define EDP_LINK_TRAIN_400MV_6DB_IVB		(0x2f <<22)
+#define EDP_LINK_TRAIN_600MV_0DB_IVB		(0x30 <<22)
+#define EDP_LINK_TRAIN_600MV_3_5DB_IVB		(0x36 <<22)
+#define EDP_LINK_TRAIN_800MV_0DB_IVB		(0x38 <<22)
+#define EDP_LINK_TRAIN_800MV_3_5DB_IVB		(0x33 <<22)
+
+/* legacy values */
+#define EDP_LINK_TRAIN_500MV_0DB_IVB		(0x00 <<22)
+#define EDP_LINK_TRAIN_1000MV_0DB_IVB		(0x20 <<22)
+#define EDP_LINK_TRAIN_500MV_3_5DB_IVB		(0x02 <<22)
+#define EDP_LINK_TRAIN_1000MV_3_5DB_IVB		(0x22 <<22)
+#define EDP_LINK_TRAIN_1000MV_6DB_IVB		(0x23 <<22)
+
+#define  EDP_LINK_TRAIN_VOL_EMP_MASK_IVB	(0x3f<<22)
+
 #define  FORCEWAKE				0xA18C
 #define  FORCEWAKE_ACK				0x130090
+#define  FORCEWAKE_MT				0xa188 /* multi-threaded */
+#define  FORCEWAKE_MT_ACK			0x130040
+#define  ECOBUS					0xa180
+#define    FORCEWAKE_MT_ENABLE			(1<<5)
 
 #define  GT_FIFO_FREE_ENTRIES			0x120008
 #define    GT_FIFO_NUM_RESERVED_ENTRIES		20
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index e77a863..daa5743 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -38,8 +38,8 @@
 #include "i915_drv.h"
 #include "i915_trace.h"
 #include "drm_dp_helper.h"
-
 #include "drm_crtc_helper.h"
+#include <linux/dma_remapping.h>
 
 #define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
 
@@ -4670,6 +4670,7 @@
 /**
  * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
  * @crtc: CRTC structure
+ * @mode: requested mode
  *
  * A pipe may be connected to one or more outputs.  Based on the depth of the
  * attached framebuffer, choose a good color depth to use on the pipe.
@@ -4681,13 +4682,15 @@
  *    HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
  *    Displays may support a restricted set as well, check EDID and clamp as
  *      appropriate.
+ *    DP may want to dither down to 6bpc to fit larger modes
  *
  * RETURNS:
  * Dithering requirement (i.e. false if display bpc and pipe bpc match,
  * true if they don't match).
  */
 static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
-					 unsigned int *pipe_bpp)
+					 unsigned int *pipe_bpp,
+					 struct drm_display_mode *mode)
 {
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4758,6 +4761,11 @@
 		}
 	}
 
+	if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
+		DRM_DEBUG_KMS("Dithering DP to 6bpc\n");
+		display_bpc = 6;
+	}
+
 	/*
 	 * We could just drive the pipe at the highest bpc all the time and
 	 * enable dithering as needed, but that costs bandwidth.  So choose
@@ -5019,6 +5027,16 @@
 			pipeconf &= ~PIPECONF_DOUBLE_WIDE;
 	}
 
+	/* default to 8bpc */
+	pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN);
+	if (is_dp) {
+		if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
+			pipeconf |= PIPECONF_BPP_6 |
+				    PIPECONF_DITHER_EN |
+				    PIPECONF_DITHER_TYPE_SP;
+		}
+	}
+
 	dpll |= DPLL_VCO_ENABLE;
 
 	DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
@@ -5480,7 +5498,7 @@
 	/* determine panel color depth */
 	temp = I915_READ(PIPECONF(pipe));
 	temp &= ~PIPE_BPC_MASK;
-	dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp);
+	dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, mode);
 	switch (pipe_bpp) {
 	case 18:
 		temp |= PIPE_6BPC;
@@ -7189,11 +7207,16 @@
 	work->old_fb_obj = intel_fb->obj;
 	INIT_WORK(&work->work, intel_unpin_work_fn);
 
+	ret = drm_vblank_get(dev, intel_crtc->pipe);
+	if (ret)
+		goto free_work;
+
 	/* We borrow the event spin lock for protecting unpin_work */
 	spin_lock_irqsave(&dev->event_lock, flags);
 	if (intel_crtc->unpin_work) {
 		spin_unlock_irqrestore(&dev->event_lock, flags);
 		kfree(work);
+		drm_vblank_put(dev, intel_crtc->pipe);
 
 		DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
 		return -EBUSY;
@@ -7212,10 +7235,6 @@
 
 	crtc->fb = fb;
 
-	ret = drm_vblank_get(dev, intel_crtc->pipe);
-	if (ret)
-		goto cleanup_objs;
-
 	work->pending_flip_obj = obj;
 
 	work->enable_stall_check = true;
@@ -7238,7 +7257,6 @@
 
 cleanup_pending:
 	atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
-cleanup_objs:
 	drm_gem_object_unreference(&work->old_fb_obj->base);
 	drm_gem_object_unreference(&obj->base);
 	mutex_unlock(&dev->struct_mutex);
@@ -7247,6 +7265,8 @@
 	intel_crtc->unpin_work = NULL;
 	spin_unlock_irqrestore(&dev->event_lock, flags);
 
+	drm_vblank_put(dev, intel_crtc->pipe);
+free_work:
 	kfree(work);
 
 	return ret;
@@ -7887,6 +7907,31 @@
 	dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
 }
 
+static bool intel_enable_rc6(struct drm_device *dev)
+{
+	/*
+	 * Respect the kernel parameter if it is set
+	 */
+	if (i915_enable_rc6 >= 0)
+		return i915_enable_rc6;
+
+	/*
+	 * Disable RC6 on Ironlake
+	 */
+	if (INTEL_INFO(dev)->gen == 5)
+		return 0;
+
+	/*
+	 * Disable rc6 on Sandybridge
+	 */
+	if (INTEL_INFO(dev)->gen == 6) {
+		DRM_DEBUG_DRIVER("Sandybridge: RC6 disabled\n");
+		return 0;
+	}
+	DRM_DEBUG_DRIVER("RC6 enabled\n");
+	return 1;
+}
+
 void gen6_enable_rps(struct drm_i915_private *dev_priv)
 {
 	u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
@@ -7923,7 +7968,7 @@
 	I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
 	I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
 
-	if (i915_enable_rc6)
+	if (intel_enable_rc6(dev_priv->dev))
 		rc6_mask = GEN6_RC_CTL_RC6p_ENABLE |
 			GEN6_RC_CTL_RC6_ENABLE;
 
@@ -8372,7 +8417,7 @@
 	/* rc6 disabled by default due to repeated reports of hanging during
 	 * boot and resume.
 	 */
-	if (!i915_enable_rc6)
+	if (!intel_enable_rc6(dev))
 		return;
 
 	mutex_lock(&dev->struct_mutex);
@@ -8491,6 +8536,28 @@
 
 	/* For FIFO watermark updates */
 	if (HAS_PCH_SPLIT(dev)) {
+		dev_priv->display.force_wake_get = __gen6_gt_force_wake_get;
+		dev_priv->display.force_wake_put = __gen6_gt_force_wake_put;
+
+		/* IVB configs may use multi-threaded forcewake */
+		if (IS_IVYBRIDGE(dev)) {
+			u32	ecobus;
+
+			mutex_lock(&dev->struct_mutex);
+			__gen6_gt_force_wake_mt_get(dev_priv);
+			ecobus = I915_READ(ECOBUS);
+			__gen6_gt_force_wake_mt_put(dev_priv);
+			mutex_unlock(&dev->struct_mutex);
+
+			if (ecobus & FORCEWAKE_MT_ENABLE) {
+				DRM_DEBUG_KMS("Using MT version of forcewake\n");
+				dev_priv->display.force_wake_get =
+					__gen6_gt_force_wake_mt_get;
+				dev_priv->display.force_wake_put =
+					__gen6_gt_force_wake_mt_put;
+			}
+		}
+
 		if (HAS_PCH_IBX(dev))
 			dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
 		else if (HAS_PCH_CPT(dev))
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 4d0358f..92b041b 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -208,13 +208,15 @@
  */
 
 static int
-intel_dp_link_required(struct intel_dp *intel_dp, int pixel_clock)
+intel_dp_link_required(struct intel_dp *intel_dp, int pixel_clock, int check_bpp)
 {
 	struct drm_crtc *crtc = intel_dp->base.base.crtc;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	int bpp = 24;
 
-	if (intel_crtc)
+	if (check_bpp)
+		bpp = check_bpp;
+	else if (intel_crtc)
 		bpp = intel_crtc->bpp;
 
 	return (pixel_clock * bpp + 9) / 10;
@@ -233,6 +235,7 @@
 	struct intel_dp *intel_dp = intel_attached_dp(connector);
 	int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
 	int max_lanes = intel_dp_max_lane_count(intel_dp);
+	int max_rate, mode_rate;
 
 	if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
 		if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay)
@@ -242,9 +245,17 @@
 			return MODE_PANEL;
 	}
 
-	if (intel_dp_link_required(intel_dp, mode->clock)
-	    > intel_dp_max_data_rate(max_link_clock, max_lanes))
-		return MODE_CLOCK_HIGH;
+	mode_rate = intel_dp_link_required(intel_dp, mode->clock, 0);
+	max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
+
+	if (mode_rate > max_rate) {
+			mode_rate = intel_dp_link_required(intel_dp,
+							   mode->clock, 18);
+			if (mode_rate > max_rate)
+				return MODE_CLOCK_HIGH;
+			else
+				mode->private_flags |= INTEL_MODE_DP_FORCE_6BPC;
+	}
 
 	if (mode->clock < 10000)
 		return MODE_CLOCK_LOW;
@@ -362,8 +373,8 @@
 	 * clock divider.
 	 */
 	if (is_cpu_edp(intel_dp)) {
-		if (IS_GEN6(dev))
-			aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */
+		if (IS_GEN6(dev) || IS_GEN7(dev))
+			aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */
 		else
 			aux_clock_divider = 225; /* eDP input clock at 450Mhz */
 	} else if (HAS_PCH_SPLIT(dev))
@@ -672,6 +683,7 @@
 	int lane_count, clock;
 	int max_lane_count = intel_dp_max_lane_count(intel_dp);
 	int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
+	int bpp = mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 0;
 	static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
 
 	if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
@@ -689,7 +701,7 @@
 		for (clock = 0; clock <= max_clock; clock++) {
 			int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
 
-			if (intel_dp_link_required(intel_dp, mode->clock)
+			if (intel_dp_link_required(intel_dp, mode->clock, bpp)
 					<= link_avail) {
 				intel_dp->link_bw = bws[clock];
 				intel_dp->lane_count = lane_count;
@@ -817,10 +829,11 @@
 	}
 
 	/*
-	 * There are three kinds of DP registers:
+	 * There are four kinds of DP registers:
 	 *
 	 * 	IBX PCH
-	 * 	CPU
+	 * 	SNB CPU
+	 *	IVB CPU
 	 * 	CPT PCH
 	 *
 	 * IBX PCH and CPU are the same for almost everything,
@@ -873,7 +886,25 @@
 
 	/* Split out the IBX/CPU vs CPT settings */
 
-	if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
+	if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) {
+		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+			intel_dp->DP |= DP_SYNC_HS_HIGH;
+		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+			intel_dp->DP |= DP_SYNC_VS_HIGH;
+		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
+
+		if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
+			intel_dp->DP |= DP_ENHANCED_FRAMING;
+
+		intel_dp->DP |= intel_crtc->pipe << 29;
+
+		/* don't miss out required setting for eDP */
+		intel_dp->DP |= DP_PLL_ENABLE;
+		if (adjusted_mode->clock < 200000)
+			intel_dp->DP |= DP_PLL_FREQ_160MHZ;
+		else
+			intel_dp->DP |= DP_PLL_FREQ_270MHZ;
+	} else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
 		intel_dp->DP |= intel_dp->color_range;
 
 		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
@@ -1375,34 +1406,59 @@
  * These are source-specific values; current Intel hardware supports
  * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
  */
-#define I830_DP_VOLTAGE_MAX	    DP_TRAIN_VOLTAGE_SWING_800
-#define I830_DP_VOLTAGE_MAX_CPT	    DP_TRAIN_VOLTAGE_SWING_1200
 
 static uint8_t
-intel_dp_pre_emphasis_max(uint8_t voltage_swing)
+intel_dp_voltage_max(struct intel_dp *intel_dp)
 {
-	switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
-	case DP_TRAIN_VOLTAGE_SWING_400:
-		return DP_TRAIN_PRE_EMPHASIS_6;
-	case DP_TRAIN_VOLTAGE_SWING_600:
-		return DP_TRAIN_PRE_EMPHASIS_6;
-	case DP_TRAIN_VOLTAGE_SWING_800:
-		return DP_TRAIN_PRE_EMPHASIS_3_5;
-	case DP_TRAIN_VOLTAGE_SWING_1200:
-	default:
-		return DP_TRAIN_PRE_EMPHASIS_0;
+	struct drm_device *dev = intel_dp->base.base.dev;
+
+	if (IS_GEN7(dev) && is_cpu_edp(intel_dp))
+		return DP_TRAIN_VOLTAGE_SWING_800;
+	else if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+		return DP_TRAIN_VOLTAGE_SWING_1200;
+	else
+		return DP_TRAIN_VOLTAGE_SWING_800;
+}
+
+static uint8_t
+intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
+{
+	struct drm_device *dev = intel_dp->base.base.dev;
+
+	if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
+		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+		case DP_TRAIN_VOLTAGE_SWING_400:
+			return DP_TRAIN_PRE_EMPHASIS_6;
+		case DP_TRAIN_VOLTAGE_SWING_600:
+		case DP_TRAIN_VOLTAGE_SWING_800:
+			return DP_TRAIN_PRE_EMPHASIS_3_5;
+		default:
+			return DP_TRAIN_PRE_EMPHASIS_0;
+		}
+	} else {
+		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+		case DP_TRAIN_VOLTAGE_SWING_400:
+			return DP_TRAIN_PRE_EMPHASIS_6;
+		case DP_TRAIN_VOLTAGE_SWING_600:
+			return DP_TRAIN_PRE_EMPHASIS_6;
+		case DP_TRAIN_VOLTAGE_SWING_800:
+			return DP_TRAIN_PRE_EMPHASIS_3_5;
+		case DP_TRAIN_VOLTAGE_SWING_1200:
+		default:
+			return DP_TRAIN_PRE_EMPHASIS_0;
+		}
 	}
 }
 
 static void
 intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
 {
-	struct drm_device *dev = intel_dp->base.base.dev;
 	uint8_t v = 0;
 	uint8_t p = 0;
 	int lane;
 	uint8_t	*adjust_request = link_status + (DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS);
-	int voltage_max;
+	uint8_t voltage_max;
+	uint8_t preemph_max;
 
 	for (lane = 0; lane < intel_dp->lane_count; lane++) {
 		uint8_t this_v = intel_get_adjust_request_voltage(adjust_request, lane);
@@ -1414,15 +1470,13 @@
 			p = this_p;
 	}
 
-	if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
-		voltage_max = I830_DP_VOLTAGE_MAX_CPT;
-	else
-		voltage_max = I830_DP_VOLTAGE_MAX;
+	voltage_max = intel_dp_voltage_max(intel_dp);
 	if (v >= voltage_max)
 		v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
 
-	if (p >= intel_dp_pre_emphasis_max(v))
-		p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+	preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
+	if (p >= preemph_max)
+		p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
 
 	for (lane = 0; lane < 4; lane++)
 		intel_dp->train_set[lane] = v | p;
@@ -1494,6 +1548,37 @@
 	}
 }
 
+/* Gen7's DP voltage swing and pre-emphasis control */
+static uint32_t
+intel_gen7_edp_signal_levels(uint8_t train_set)
+{
+	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+					 DP_TRAIN_PRE_EMPHASIS_MASK);
+	switch (signal_levels) {
+	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
+		return EDP_LINK_TRAIN_400MV_0DB_IVB;
+	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
+		return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
+	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
+		return EDP_LINK_TRAIN_400MV_6DB_IVB;
+
+	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
+		return EDP_LINK_TRAIN_600MV_0DB_IVB;
+	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
+		return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
+
+	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
+		return EDP_LINK_TRAIN_800MV_0DB_IVB;
+	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
+		return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
+
+	default:
+		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
+			      "0x%x\n", signal_levels);
+		return EDP_LINK_TRAIN_500MV_0DB_IVB;
+	}
+}
+
 static uint8_t
 intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
 		      int lane)
@@ -1599,7 +1684,8 @@
 				  DP_LINK_CONFIGURATION_SIZE);
 
 	DP |= DP_PORT_EN;
-	if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+
+	if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
 		DP &= ~DP_LINK_TRAIN_MASK_CPT;
 	else
 		DP &= ~DP_LINK_TRAIN_MASK;
@@ -1613,7 +1699,11 @@
 		uint8_t	    link_status[DP_LINK_STATUS_SIZE];
 		uint32_t    signal_levels;
 
-		if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
+
+		if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
+			signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
+			DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
+		} else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
 			signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
 			DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
 		} else {
@@ -1622,7 +1712,7 @@
 			DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
 		}
 
-		if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+		if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
 			reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
 		else
 			reg = DP | DP_LINK_TRAIN_PAT_1;
@@ -1703,7 +1793,10 @@
 			break;
 		}
 
-		if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
+		if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
+			signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
+			DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
+		} else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
 			signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
 			DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
 		} else {
@@ -1711,7 +1804,7 @@
 			DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
 		}
 
-		if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+		if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
 			reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
 		else
 			reg = DP | DP_LINK_TRAIN_PAT_2;
@@ -1752,7 +1845,7 @@
 		++tries;
 	}
 
-	if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+	if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
 		reg = DP | DP_LINK_TRAIN_OFF_CPT;
 	else
 		reg = DP | DP_LINK_TRAIN_OFF;
@@ -1782,7 +1875,7 @@
 		udelay(100);
 	}
 
-	if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) {
+	if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
 		DP &= ~DP_LINK_TRAIN_MASK_CPT;
 		I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
 	} else {
@@ -1794,7 +1887,7 @@
 	msleep(17);
 
 	if (is_edp(intel_dp)) {
-		if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+		if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
 			DP |= DP_LINK_TRAIN_OFF_CPT;
 		else
 			DP |= DP_LINK_TRAIN_OFF;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index bd9a604..a1b4343 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -110,6 +110,7 @@
 /* drm_display_mode->private_flags */
 #define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0)
 #define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT)
+#define INTEL_MODE_DP_FORCE_6BPC (0x10)
 
 static inline void
 intel_mode_set_pixel_multiplier(struct drm_display_mode *mode,
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 42f165a..e441911 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -715,6 +715,14 @@
 			DMI_MATCH(DMI_PRODUCT_NAME, "EB1007"),
 		},
 	},
+	{
+		.callback = intel_no_lvds_dmi_callback,
+		.ident = "Asus AT5NM10T-I",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+			DMI_MATCH(DMI_BOARD_NAME, "AT5NM10T-I"),
+		},
+	},
 
 	{ }	/* terminating entry */
 };
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 21f60b7..04d79fd 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -178,13 +178,10 @@
 	if (HAS_PCH_SPLIT(dev)) {
 		max >>= 16;
 	} else {
-		if (IS_PINEVIEW(dev)) {
+		if (INTEL_INFO(dev)->gen < 4)
 			max >>= 17;
-		} else {
+		else
 			max >>= 16;
-			if (INTEL_INFO(dev)->gen < 4)
-				max &= ~1;
-		}
 
 		if (is_backlight_combination_mode(dev))
 			max *= 0xff;
@@ -203,13 +200,12 @@
 		val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
 	} else {
 		val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
-		if (IS_PINEVIEW(dev))
+		if (INTEL_INFO(dev)->gen < 4)
 			val >>= 1;
 
 		if (is_backlight_combination_mode(dev)) {
 			u8 lbpc;
 
-			val &= ~1;
 			pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
 			val *= lbpc;
 		}
@@ -246,11 +242,9 @@
 	}
 
 	tmp = I915_READ(BLC_PWM_CTL);
-	if (IS_PINEVIEW(dev)) {
-		tmp &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
+	if (INTEL_INFO(dev)->gen < 4) 
 		level <<= 1;
-	} else
-		tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
+	tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
 	I915_WRITE(BLC_PWM_CTL, tmp | level);
 }
 
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 3003fb2..f7b9268 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -50,6 +50,7 @@
 #define IS_TMDS(c)	(c->output_flag & SDVO_TMDS_MASK)
 #define IS_LVDS(c)	(c->output_flag & SDVO_LVDS_MASK)
 #define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK))
+#define IS_DIGITAL(c) (c->output_flag & (SDVO_TMDS_MASK | SDVO_LVDS_MASK))
 
 
 static const char *tv_format_names[] = {
@@ -1086,8 +1087,12 @@
 		}
 		sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
 	}
-	if (intel_crtc->pipe == 1)
-		sdvox |= SDVO_PIPE_B_SELECT;
+
+	if (INTEL_PCH_TYPE(dev) >= PCH_CPT)
+		sdvox |= TRANSCODER_CPT(intel_crtc->pipe);
+	else
+		sdvox |= TRANSCODER(intel_crtc->pipe);
+
 	if (intel_sdvo->has_hdmi_audio)
 		sdvox |= SDVO_AUDIO_ENABLE;
 
@@ -1314,6 +1319,18 @@
 	return status;
 }
 
+static bool
+intel_sdvo_connector_matches_edid(struct intel_sdvo_connector *sdvo,
+				  struct edid *edid)
+{
+	bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
+	bool connector_is_digital = !!IS_DIGITAL(sdvo);
+
+	DRM_DEBUG_KMS("connector_is_digital? %d, monitor_is_digital? %d\n",
+		      connector_is_digital, monitor_is_digital);
+	return connector_is_digital == monitor_is_digital;
+}
+
 static enum drm_connector_status
 intel_sdvo_detect(struct drm_connector *connector, bool force)
 {
@@ -1358,10 +1375,12 @@
 		if (edid == NULL)
 			edid = intel_sdvo_get_analog_edid(connector);
 		if (edid != NULL) {
-			if (edid->input & DRM_EDID_INPUT_DIGITAL)
-				ret = connector_status_disconnected;
-			else
+			if (intel_sdvo_connector_matches_edid(intel_sdvo_connector,
+							      edid))
 				ret = connector_status_connected;
+			else
+				ret = connector_status_disconnected;
+
 			connector->display_info.raw_edid = NULL;
 			kfree(edid);
 		} else
@@ -1402,11 +1421,8 @@
 		edid = intel_sdvo_get_analog_edid(connector);
 
 	if (edid != NULL) {
-		struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
-		bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
-		bool connector_is_digital = !!IS_TMDS(intel_sdvo_connector);
-
-		if (connector_is_digital == monitor_is_digital) {
+		if (intel_sdvo_connector_matches_edid(to_intel_sdvo_connector(connector),
+						      edid)) {
 			drm_mode_connector_update_edid_property(connector, edid);
 			drm_add_edid_modes(connector, edid);
 		}
diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h
index 4aa6f34..6b7b22f 100644
--- a/drivers/gpu/drm/i915/intel_sdvo_regs.h
+++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h
@@ -1,5 +1,5 @@
 /*
- * Copyright © 2006-2007 Intel Corporation
+ * Copyright © 2006-2007 Intel Corporation
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 5e00d16..92c9628 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -3276,6 +3276,18 @@
 			rdev->accel_working = false;
 		}
 	}
+
+	/* Don't start up if the MC ucode is missing on BTC parts.
+	 * The default clocks and voltages before the MC ucode
+	 * is loaded are not suffient for advanced operations.
+	 */
+	if (ASIC_IS_DCE5(rdev)) {
+		if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
+			DRM_ERROR("radeon: MC ucode required for NI+.\n");
+			return -EINVAL;
+		}
+	}
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index d24baf3..5082d17 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -2560,7 +2560,11 @@
 
 	rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
 	rdev->pm.current_clock_mode_index = 0;
-	rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
+	if (rdev->pm.default_power_state_index >= 0)
+		rdev->pm.current_vddc =
+			rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
+	else
+		rdev->pm.current_vddc = 0;
 }
 
 void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 8cca91a..dc27970 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -390,6 +390,11 @@
 			     struct ttm_object_file *tfile,
 			     int id,
 			     struct vmw_resource **p_res);
+extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
+				  struct ttm_object_file *tfile,
+				  uint32_t handle,
+				  struct vmw_surface **out_surf,
+				  struct vmw_dma_buffer **out_buf);
 extern void vmw_surface_res_free(struct vmw_resource *res);
 extern int vmw_surface_init(struct vmw_private *dev_priv,
 			    struct vmw_surface *srf,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index 03bbc2a..a0c2f12 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -33,6 +33,7 @@
 {
 	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
 	uint32_t fifo_min, hwversion;
+	const struct vmw_fifo_state *fifo = &dev_priv->fifo;
 
 	if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
 		return false;
@@ -41,7 +42,12 @@
 	if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
 		return false;
 
-	hwversion = ioread32(fifo_mem + SVGA_FIFO_3D_HWVERSION);
+	hwversion = ioread32(fifo_mem +
+			     ((fifo->capabilities &
+			       SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
+			      SVGA_FIFO_3D_HWVERSION_REVISED :
+			      SVGA_FIFO_3D_HWVERSION));
+
 	if (hwversion == 0)
 		return false;
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 5ff561d..66917c6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -58,8 +58,14 @@
 	case DRM_VMW_PARAM_FIFO_HW_VERSION:
 	{
 		__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+		const struct vmw_fifo_state *fifo = &dev_priv->fifo;
 
-		param->value = ioread32(fifo_mem + SVGA_FIFO_3D_HWVERSION);
+		param->value =
+			ioread32(fifo_mem +
+				 ((fifo->capabilities &
+				   SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
+				  SVGA_FIFO_3D_HWVERSION_REVISED :
+				  SVGA_FIFO_3D_HWVERSION));
 		break;
 	}
 	default:
@@ -166,13 +172,7 @@
 		ret = -EINVAL;
 		goto out_no_fb;
 	}
-
 	vfb = vmw_framebuffer_to_vfb(obj_to_fb(obj));
-	if (!vfb->dmabuf) {
-		DRM_ERROR("Framebuffer not dmabuf backed.\n");
-		ret = -EINVAL;
-		goto out_no_fb;
-	}
 
 	ret = ttm_read_lock(&vmaster->lock, true);
 	if (unlikely(ret != 0))
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 37d4054..f94b33a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -31,6 +31,44 @@
 /* Might need a hrtimer here? */
 #define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
 
+
+struct vmw_clip_rect {
+	int x1, x2, y1, y2;
+};
+
+/**
+ * Clip @num_rects number of @rects against @clip storing the
+ * results in @out_rects and the number of passed rects in @out_num.
+ */
+void vmw_clip_cliprects(struct drm_clip_rect *rects,
+			int num_rects,
+			struct vmw_clip_rect clip,
+			SVGASignedRect *out_rects,
+			int *out_num)
+{
+	int i, k;
+
+	for (i = 0, k = 0; i < num_rects; i++) {
+		int x1 = max_t(int, clip.x1, rects[i].x1);
+		int y1 = max_t(int, clip.y1, rects[i].y1);
+		int x2 = min_t(int, clip.x2, rects[i].x2);
+		int y2 = min_t(int, clip.y2, rects[i].y2);
+
+		if (x1 >= x2)
+			continue;
+		if (y1 >= y2)
+			continue;
+
+		out_rects[k].left   = x1;
+		out_rects[k].top    = y1;
+		out_rects[k].right  = x2;
+		out_rects[k].bottom = y2;
+		k++;
+	}
+
+	*out_num = k;
+}
+
 void vmw_display_unit_cleanup(struct vmw_display_unit *du)
 {
 	if (du->cursor_surface)
@@ -82,6 +120,43 @@
 	return 0;
 }
 
+int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
+			     struct vmw_dma_buffer *dmabuf,
+			     u32 width, u32 height,
+			     u32 hotspotX, u32 hotspotY)
+{
+	struct ttm_bo_kmap_obj map;
+	unsigned long kmap_offset;
+	unsigned long kmap_num;
+	void *virtual;
+	bool dummy;
+	int ret;
+
+	kmap_offset = 0;
+	kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+	ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0);
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("reserve failed\n");
+		return -EINVAL;
+	}
+
+	ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
+	if (unlikely(ret != 0))
+		goto err_unreserve;
+
+	virtual = ttm_kmap_obj_virtual(&map, &dummy);
+	ret = vmw_cursor_update_image(dev_priv, virtual, width, height,
+				      hotspotX, hotspotY);
+
+	ttm_bo_kunmap(&map);
+err_unreserve:
+	ttm_bo_unreserve(&dmabuf->base);
+
+	return ret;
+}
+
+
 void vmw_cursor_update_position(struct vmw_private *dev_priv,
 				bool show, int x, int y)
 {
@@ -110,24 +185,21 @@
 		return -EINVAL;
 
 	if (handle) {
-		ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
-						     handle, &surface);
-		if (!ret) {
-			if (!surface->snooper.image) {
-				DRM_ERROR("surface not suitable for cursor\n");
-				vmw_surface_unreference(&surface);
-				return -EINVAL;
-			}
-		} else {
-			ret = vmw_user_dmabuf_lookup(tfile,
-						     handle, &dmabuf);
-			if (ret) {
-				DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
-				return -EINVAL;
-			}
+		ret = vmw_user_lookup_handle(dev_priv, tfile,
+					     handle, &surface, &dmabuf);
+		if (ret) {
+			DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
+			return -EINVAL;
 		}
 	}
 
+	/* need to do this before taking down old image */
+	if (surface && !surface->snooper.image) {
+		DRM_ERROR("surface not suitable for cursor\n");
+		vmw_surface_unreference(&surface);
+		return -EINVAL;
+	}
+
 	/* takedown old cursor */
 	if (du->cursor_surface) {
 		du->cursor_surface->snooper.crtc = NULL;
@@ -146,36 +218,11 @@
 		vmw_cursor_update_image(dev_priv, surface->snooper.image,
 					64, 64, du->hotspot_x, du->hotspot_y);
 	} else if (dmabuf) {
-		struct ttm_bo_kmap_obj map;
-		unsigned long kmap_offset;
-		unsigned long kmap_num;
-		void *virtual;
-		bool dummy;
-
 		/* vmw_user_surface_lookup takes one reference */
 		du->cursor_dmabuf = dmabuf;
 
-		kmap_offset = 0;
-		kmap_num = (64*64*4) >> PAGE_SHIFT;
-
-		ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0);
-		if (unlikely(ret != 0)) {
-			DRM_ERROR("reserve failed\n");
-			return -EINVAL;
-		}
-
-		ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
-		if (unlikely(ret != 0))
-			goto err_unreserve;
-
-		virtual = ttm_kmap_obj_virtual(&map, &dummy);
-		vmw_cursor_update_image(dev_priv, virtual, 64, 64,
-					du->hotspot_x, du->hotspot_y);
-
-		ttm_bo_kunmap(&map);
-err_unreserve:
-		ttm_bo_unreserve(&dmabuf->base);
-
+		ret = vmw_cursor_update_dmabuf(dev_priv, dmabuf, width, height,
+					       du->hotspot_x, du->hotspot_y);
 	} else {
 		vmw_cursor_update_position(dev_priv, false, 0, 0);
 		return 0;
@@ -377,8 +424,9 @@
 				struct drm_clip_rect *clips,
 				unsigned num_clips, int inc)
 {
-	struct drm_clip_rect *clips_ptr;
 	struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
+	struct drm_clip_rect *clips_ptr;
+	struct drm_clip_rect *tmp;
 	struct drm_crtc *crtc;
 	size_t fifo_size;
 	int i, num_units;
@@ -391,7 +439,6 @@
 	} *cmd;
 	SVGASignedRect *blits;
 
-
 	num_units = 0;
 	list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list,
 			    head) {
@@ -402,13 +449,24 @@
 
 	BUG_ON(!clips || !num_clips);
 
+	tmp = kzalloc(sizeof(*tmp) * num_clips, GFP_KERNEL);
+	if (unlikely(tmp == NULL)) {
+		DRM_ERROR("Temporary cliprect memory alloc failed.\n");
+		return -ENOMEM;
+	}
+
 	fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips;
 	cmd = kzalloc(fifo_size, GFP_KERNEL);
 	if (unlikely(cmd == NULL)) {
 		DRM_ERROR("Temporary fifo memory alloc failed.\n");
-		return -ENOMEM;
+		ret = -ENOMEM;
+		goto out_free_tmp;
 	}
 
+	/* setup blits pointer */
+	blits = (SVGASignedRect *)&cmd[1];
+
+	/* initial clip region */
 	left = clips->x1;
 	right = clips->x2;
 	top = clips->y1;
@@ -434,45 +492,60 @@
 	cmd->body.srcRect.bottom = bottom;
 
 	clips_ptr = clips;
-	blits = (SVGASignedRect *)&cmd[1];
 	for (i = 0; i < num_clips; i++, clips_ptr += inc) {
-		blits[i].left   = clips_ptr->x1 - left;
-		blits[i].right  = clips_ptr->x2 - left;
-		blits[i].top    = clips_ptr->y1 - top;
-		blits[i].bottom = clips_ptr->y2 - top;
+		tmp[i].x1 = clips_ptr->x1 - left;
+		tmp[i].x2 = clips_ptr->x2 - left;
+		tmp[i].y1 = clips_ptr->y1 - top;
+		tmp[i].y2 = clips_ptr->y2 - top;
 	}
 
 	/* do per unit writing, reuse fifo for each */
 	for (i = 0; i < num_units; i++) {
 		struct vmw_display_unit *unit = units[i];
-		int clip_x1 = left - unit->crtc.x;
-		int clip_y1 = top - unit->crtc.y;
-		int clip_x2 = right - unit->crtc.x;
-		int clip_y2 = bottom - unit->crtc.y;
+		struct vmw_clip_rect clip;
+		int num;
+
+		clip.x1 = left - unit->crtc.x;
+		clip.y1 = top - unit->crtc.y;
+		clip.x2 = right - unit->crtc.x;
+		clip.y2 = bottom - unit->crtc.y;
 
 		/* skip any crtcs that misses the clip region */
-		if (clip_x1 >= unit->crtc.mode.hdisplay ||
-		    clip_y1 >= unit->crtc.mode.vdisplay ||
-		    clip_x2 <= 0 || clip_y2 <= 0)
+		if (clip.x1 >= unit->crtc.mode.hdisplay ||
+		    clip.y1 >= unit->crtc.mode.vdisplay ||
+		    clip.x2 <= 0 || clip.y2 <= 0)
 			continue;
 
+		/*
+		 * In order for the clip rects to be correctly scaled
+		 * the src and dest rects needs to be the same size.
+		 */
+		cmd->body.destRect.left = clip.x1;
+		cmd->body.destRect.right = clip.x2;
+		cmd->body.destRect.top = clip.y1;
+		cmd->body.destRect.bottom = clip.y2;
+
+		/* create a clip rect of the crtc in dest coords */
+		clip.x2 = unit->crtc.mode.hdisplay - clip.x1;
+		clip.y2 = unit->crtc.mode.vdisplay - clip.y1;
+		clip.x1 = 0 - clip.x1;
+		clip.y1 = 0 - clip.y1;
+
 		/* need to reset sid as it is changed by execbuf */
 		cmd->body.srcImage.sid = cpu_to_le32(framebuffer->user_handle);
-
 		cmd->body.destScreenId = unit->unit;
 
-		/*
-		 * The blit command is a lot more resilient then the
-		 * readback command when it comes to clip rects. So its
-		 * okay to go out of bounds.
-		 */
+		/* clip and write blits to cmd stream */
+		vmw_clip_cliprects(tmp, num_clips, clip, blits, &num);
 
-		cmd->body.destRect.left = clip_x1;
-		cmd->body.destRect.right = clip_x2;
-		cmd->body.destRect.top = clip_y1;
-		cmd->body.destRect.bottom = clip_y2;
+		/* if no cliprects hit skip this */
+		if (num == 0)
+			continue;
 
 
+		/* recalculate package length */
+		fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
+		cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
 		ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
 					  fifo_size, 0, NULL);
 
@@ -480,7 +553,10 @@
 			break;
 	}
 
+
 	kfree(cmd);
+out_free_tmp:
+	kfree(tmp);
 
 	return ret;
 }
@@ -556,6 +632,10 @@
 	 * Sanity checks.
 	 */
 
+	/* Surface must be marked as a scanout. */
+	if (unlikely(!surface->scanout))
+		return -EINVAL;
+
 	if (unlikely(surface->mip_levels[0] != 1 ||
 		     surface->num_sizes != 1 ||
 		     surface->sizes[0].width < mode_cmd->width ||
@@ -782,6 +862,7 @@
 			int clip_y1 = clips_ptr->y1 - unit->crtc.y;
 			int clip_x2 = clips_ptr->x2 - unit->crtc.x;
 			int clip_y2 = clips_ptr->y2 - unit->crtc.y;
+			int move_x, move_y;
 
 			/* skip any crtcs that misses the clip region */
 			if (clip_x1 >= unit->crtc.mode.hdisplay ||
@@ -789,12 +870,21 @@
 			    clip_x2 <= 0 || clip_y2 <= 0)
 				continue;
 
+			/* clip size to crtc size */
+			clip_x2 = min_t(int, clip_x2, unit->crtc.mode.hdisplay);
+			clip_y2 = min_t(int, clip_y2, unit->crtc.mode.vdisplay);
+
+			/* translate both src and dest to bring clip into screen */
+			move_x = min_t(int, clip_x1, 0);
+			move_y = min_t(int, clip_y1, 0);
+
+			/* actual translate done here */
 			blits[hit_num].header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
 			blits[hit_num].body.destScreenId = unit->unit;
-			blits[hit_num].body.srcOrigin.x = clips_ptr->x1;
-			blits[hit_num].body.srcOrigin.y = clips_ptr->y1;
-			blits[hit_num].body.destRect.left = clip_x1;
-			blits[hit_num].body.destRect.top = clip_y1;
+			blits[hit_num].body.srcOrigin.x = clips_ptr->x1 - move_x;
+			blits[hit_num].body.srcOrigin.y = clips_ptr->y1 - move_y;
+			blits[hit_num].body.destRect.left = clip_x1 - move_x;
+			blits[hit_num].body.destRect.top = clip_y1 - move_y;
 			blits[hit_num].body.destRect.right = clip_x2;
 			blits[hit_num].body.destRect.bottom = clip_y2;
 			hit_num++;
@@ -1003,7 +1093,6 @@
 	struct vmw_surface *surface = NULL;
 	struct vmw_dma_buffer *bo = NULL;
 	struct ttm_base_object *user_obj;
-	u64 required_size;
 	int ret;
 
 	/**
@@ -1012,8 +1101,9 @@
 	 * requested framebuffer.
 	 */
 
-	required_size = mode_cmd->pitch * mode_cmd->height;
-	if (unlikely(required_size > (u64) dev_priv->vram_size)) {
+	if (!vmw_kms_validate_mode_vram(dev_priv,
+					mode_cmd->pitch,
+					mode_cmd->height)) {
 		DRM_ERROR("VRAM size is too small for requested mode.\n");
 		return ERR_PTR(-ENOMEM);
 	}
@@ -1033,46 +1123,29 @@
 		return ERR_PTR(-ENOENT);
 	}
 
-	/**
-	 * End conditioned code.
-	 */
-
-	ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
-					     mode_cmd->handle, &surface);
+	/* returns either a dmabuf or surface */
+	ret = vmw_user_lookup_handle(dev_priv, tfile,
+				     mode_cmd->handle,
+				     &surface, &bo);
 	if (ret)
-		goto try_dmabuf;
+		goto err_out;
 
-	if (!surface->scanout)
-		goto err_not_scanout;
+	/* Create the new framebuffer depending one what we got back */
+	if (bo)
+		ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
+						     mode_cmd);
+	else if (surface)
+		ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv,
+						      surface, &vfb, mode_cmd);
+	else
+		BUG();
 
-	ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv, surface,
-					      &vfb, mode_cmd);
-
-	/* vmw_user_surface_lookup takes one ref so does new_fb */
-	vmw_surface_unreference(&surface);
-
-	if (ret) {
-		DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
-		ttm_base_object_unref(&user_obj);
-		return ERR_PTR(ret);
-	} else
-		vfb->user_obj = user_obj;
-	return &vfb->base;
-
-try_dmabuf:
-	DRM_INFO("%s: trying buffer\n", __func__);
-
-	ret = vmw_user_dmabuf_lookup(tfile, mode_cmd->handle, &bo);
-	if (ret) {
-		DRM_ERROR("failed to find buffer: %i\n", ret);
-		return ERR_PTR(-ENOENT);
-	}
-
-	ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
-					     mode_cmd);
-
-	/* vmw_user_dmabuf_lookup takes one ref so does new_fb */
-	vmw_dmabuf_unreference(&bo);
+err_out:
+	/* vmw_user_lookup_handle takes one ref so does new_fb */
+	if (bo)
+		vmw_dmabuf_unreference(&bo);
+	if (surface)
+		vmw_surface_unreference(&surface);
 
 	if (ret) {
 		DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
@@ -1082,14 +1155,6 @@
 		vfb->user_obj = user_obj;
 
 	return &vfb->base;
-
-err_not_scanout:
-	DRM_ERROR("surface not marked as scanout\n");
-	/* vmw_user_surface_lookup takes one ref */
-	vmw_surface_unreference(&surface);
-	ttm_base_object_unref(&user_obj);
-
-	return ERR_PTR(-EINVAL);
 }
 
 static struct drm_mode_config_funcs vmw_kms_funcs = {
@@ -1106,10 +1171,12 @@
 		    uint32_t num_clips)
 {
 	struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
+	struct drm_clip_rect *tmp;
 	struct drm_crtc *crtc;
 	size_t fifo_size;
 	int i, k, num_units;
 	int ret = 0; /* silence warning */
+	int left, right, top, bottom;
 
 	struct {
 		SVGA3dCmdHeader header;
@@ -1127,60 +1194,95 @@
 	BUG_ON(surface == NULL);
 	BUG_ON(!clips || !num_clips);
 
+	tmp = kzalloc(sizeof(*tmp) * num_clips, GFP_KERNEL);
+	if (unlikely(tmp == NULL)) {
+		DRM_ERROR("Temporary cliprect memory alloc failed.\n");
+		return -ENOMEM;
+	}
+
 	fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips;
 	cmd = kmalloc(fifo_size, GFP_KERNEL);
 	if (unlikely(cmd == NULL)) {
 		DRM_ERROR("Failed to allocate temporary fifo memory.\n");
-		return -ENOMEM;
+		ret = -ENOMEM;
+		goto out_free_tmp;
+	}
+
+	left = clips->x;
+	right = clips->x + clips->w;
+	top = clips->y;
+	bottom = clips->y + clips->h;
+
+	for (i = 1; i < num_clips; i++) {
+		left = min_t(int, left, (int)clips[i].x);
+		right = max_t(int, right, (int)clips[i].x + clips[i].w);
+		top = min_t(int, top, (int)clips[i].y);
+		bottom = max_t(int, bottom, (int)clips[i].y + clips[i].h);
 	}
 
 	/* only need to do this once */
 	memset(cmd, 0, fifo_size);
 	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN);
-	cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
-
-	cmd->body.srcRect.left = 0;
-	cmd->body.srcRect.right = surface->sizes[0].width;
-	cmd->body.srcRect.top = 0;
-	cmd->body.srcRect.bottom = surface->sizes[0].height;
 
 	blits = (SVGASignedRect *)&cmd[1];
+
+	cmd->body.srcRect.left = left;
+	cmd->body.srcRect.right = right;
+	cmd->body.srcRect.top = top;
+	cmd->body.srcRect.bottom = bottom;
+
 	for (i = 0; i < num_clips; i++) {
-		blits[i].left   = clips[i].x;
-		blits[i].right  = clips[i].x + clips[i].w;
-		blits[i].top    = clips[i].y;
-		blits[i].bottom = clips[i].y + clips[i].h;
+		tmp[i].x1 = clips[i].x - left;
+		tmp[i].x2 = clips[i].x + clips[i].w - left;
+		tmp[i].y1 = clips[i].y - top;
+		tmp[i].y2 = clips[i].y + clips[i].h - top;
 	}
 
 	for (k = 0; k < num_units; k++) {
 		struct vmw_display_unit *unit = units[k];
-		int clip_x1 = destX - unit->crtc.x;
-		int clip_y1 = destY - unit->crtc.y;
-		int clip_x2 = clip_x1 + surface->sizes[0].width;
-		int clip_y2 = clip_y1 + surface->sizes[0].height;
+		struct vmw_clip_rect clip;
+		int num;
+
+		clip.x1 = left + destX - unit->crtc.x;
+		clip.y1 = top + destY - unit->crtc.y;
+		clip.x2 = right + destX - unit->crtc.x;
+		clip.y2 = bottom + destY - unit->crtc.y;
 
 		/* skip any crtcs that misses the clip region */
-		if (clip_x1 >= unit->crtc.mode.hdisplay ||
-		    clip_y1 >= unit->crtc.mode.vdisplay ||
-		    clip_x2 <= 0 || clip_y2 <= 0)
+		if (clip.x1 >= unit->crtc.mode.hdisplay ||
+		    clip.y1 >= unit->crtc.mode.vdisplay ||
+		    clip.x2 <= 0 || clip.y2 <= 0)
 			continue;
 
+		/*
+		 * In order for the clip rects to be correctly scaled
+		 * the src and dest rects needs to be the same size.
+		 */
+		cmd->body.destRect.left = clip.x1;
+		cmd->body.destRect.right = clip.x2;
+		cmd->body.destRect.top = clip.y1;
+		cmd->body.destRect.bottom = clip.y2;
+
+		/* create a clip rect of the crtc in dest coords */
+		clip.x2 = unit->crtc.mode.hdisplay - clip.x1;
+		clip.y2 = unit->crtc.mode.vdisplay - clip.y1;
+		clip.x1 = 0 - clip.x1;
+		clip.y1 = 0 - clip.y1;
+
 		/* need to reset sid as it is changed by execbuf */
 		cmd->body.srcImage.sid = sid;
-
 		cmd->body.destScreenId = unit->unit;
 
-		/*
-		 * The blit command is a lot more resilient then the
-		 * readback command when it comes to clip rects. So its
-		 * okay to go out of bounds.
-		 */
+		/* clip and write blits to cmd stream */
+		vmw_clip_cliprects(tmp, num_clips, clip, blits, &num);
 
-		cmd->body.destRect.left = clip_x1;
-		cmd->body.destRect.right = clip_x2;
-		cmd->body.destRect.top = clip_y1;
-		cmd->body.destRect.bottom = clip_y2;
+		/* if no cliprects hit skip this */
+		if (num == 0)
+			continue;
 
+		/* recalculate package length */
+		fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
+		cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
 		ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
 					  fifo_size, 0, NULL);
 
@@ -1189,6 +1291,8 @@
 	}
 
 	kfree(cmd);
+out_free_tmp:
+	kfree(tmp);
 
 	return ret;
 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index af8e6e5..e1cb855 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -62,9 +62,14 @@
 int vmw_cursor_update_image(struct vmw_private *dev_priv,
 			    u32 *image, u32 width, u32 height,
 			    u32 hotspotX, u32 hotspotY);
+int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
+			     struct vmw_dma_buffer *dmabuf,
+			     u32 width, u32 height,
+			     u32 hotspotX, u32 hotspotY);
 void vmw_cursor_update_position(struct vmw_private *dev_priv,
 				bool show, int x, int y);
 
+
 /**
  * Base class display unit.
  *
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 90c5e39..8f8dbd4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -74,9 +74,10 @@
 {
 	struct vmw_legacy_display *lds = dev_priv->ldu_priv;
 	struct vmw_legacy_display_unit *entry;
+	struct vmw_display_unit *du = NULL;
 	struct drm_framebuffer *fb = NULL;
 	struct drm_crtc *crtc = NULL;
-	int i = 0;
+	int i = 0, ret;
 
 	/* If there is no display topology the host just assumes
 	 * that the guest will set the same layout as the host.
@@ -129,6 +130,25 @@
 
 	lds->last_num_active = lds->num_active;
 
+
+	/* Find the first du with a cursor. */
+	list_for_each_entry(entry, &lds->active, active) {
+		du = &entry->base;
+
+		if (!du->cursor_dmabuf)
+			continue;
+
+		ret = vmw_cursor_update_dmabuf(dev_priv,
+					       du->cursor_dmabuf,
+					       64, 64,
+					       du->hotspot_x,
+					       du->hotspot_y);
+		if (ret == 0)
+			break;
+
+		DRM_ERROR("Could not update cursor image\n");
+	}
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 86c5e4c..1c7f09e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -1190,6 +1190,29 @@
 		write_unlock(lock);
 }
 
+/**
+ * Helper function that looks either a surface or dmabuf.
+ *
+ * The pointer this pointed at by out_surf and out_buf needs to be null.
+ */
+int vmw_user_lookup_handle(struct vmw_private *dev_priv,
+			   struct ttm_object_file *tfile,
+			   uint32_t handle,
+			   struct vmw_surface **out_surf,
+			   struct vmw_dma_buffer **out_buf)
+{
+	int ret;
+
+	BUG_ON(*out_surf || *out_buf);
+
+	ret = vmw_user_surface_lookup_handle(dev_priv, tfile, handle, out_surf);
+	if (!ret)
+		return 0;
+
+	ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
+	return ret;
+}
+
 
 int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
 				   struct ttm_object_file *tfile,
diff --git a/drivers/hid/hid-pl.c b/drivers/hid/hid-pl.c
index 070f93a..47ed74c 100644
--- a/drivers/hid/hid-pl.c
+++ b/drivers/hid/hid-pl.c
@@ -9,10 +9,10 @@
  *   - contains two reports, one for each port (HID_QUIRK_MULTI_INPUT)
  *
  *  0e8f:0003 "GreenAsia Inc.    USB Joystick     "
- *   - tested with König Gaming gamepad
+ *   - tested with König Gaming gamepad
  *
  *  0e8f:0003 "GASIA USB Gamepad"
- *   - another version of the König gamepad
+ *   - another version of the König gamepad
  *
  *  Copyright (c) 2007, 2009 Anssi Hannula <anssi.hannula@gmail.com>
  */
diff --git a/drivers/hid/hid-twinhan.c b/drivers/hid/hid-twinhan.c
index c40afc5..f23456b 100644
--- a/drivers/hid/hid-twinhan.c
+++ b/drivers/hid/hid-twinhan.c
@@ -3,7 +3,7 @@
  *
  * Based on hid-gyration.c
  *
- * Copyright (c) 2009 Bruno Prémont <bonbons@linux-vserver.org>
+ * Copyright (c) 2009 Bruno Prémont <bonbons@linux-vserver.org>
  */
 
 /*
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 4ef02b2..7c297d3 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -859,7 +859,7 @@
 	.llseek		= noop_llseek,
 };
 
-static char *hiddev_devnode(struct device *dev, mode_t *mode)
+static char *hiddev_devnode(struct device *dev, umode_t *mode)
 {
 	return kasprintf(GFP_KERNEL, "usb/%s", dev_name(dev));
 }
diff --git a/drivers/hid/usbhid/usbkbd.c b/drivers/hid/usbhid/usbkbd.c
index 0658173..bc445d7 100644
--- a/drivers/hid/usbhid/usbkbd.c
+++ b/drivers/hid/usbhid/usbkbd.c
@@ -354,19 +354,4 @@
 	.id_table =	usb_kbd_id_table,
 };
 
-static int __init usb_kbd_init(void)
-{
-	int result = usb_register(&usb_kbd_driver);
-	if (result == 0)
-		printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
-				DRIVER_DESC "\n");
-	return result;
-}
-
-static void __exit usb_kbd_exit(void)
-{
-	usb_deregister(&usb_kbd_driver);
-}
-
-module_init(usb_kbd_init);
-module_exit(usb_kbd_exit);
+module_usb_driver(usb_kbd_driver);
diff --git a/drivers/hid/usbhid/usbmouse.c b/drivers/hid/usbhid/usbmouse.c
index 79b2bf8..0f6be45 100644
--- a/drivers/hid/usbhid/usbmouse.c
+++ b/drivers/hid/usbhid/usbmouse.c
@@ -241,19 +241,4 @@
 	.id_table	= usb_mouse_id_table,
 };
 
-static int __init usb_mouse_init(void)
-{
-	int retval = usb_register(&usb_mouse_driver);
-	if (retval == 0)
-		printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
-				DRIVER_DESC "\n");
-	return retval;
-}
-
-static void __exit usb_mouse_exit(void)
-{
-	usb_deregister(&usb_mouse_driver);
-}
-
-module_init(usb_mouse_init);
-module_exit(usb_mouse_exit);
+module_usb_driver(usb_mouse_driver);
diff --git a/drivers/hv/Kconfig b/drivers/hv/Kconfig
index 9fa09ac..70f5dde 100644
--- a/drivers/hv/Kconfig
+++ b/drivers/hv/Kconfig
@@ -1,3 +1,5 @@
+menu "Microsoft Hyper-V guest support"
+
 config HYPERV
 	tristate "Microsoft Hyper-V client drivers"
 	depends on X86 && ACPI && PCI
@@ -11,4 +13,4 @@
 	help
 	  Select this option to enable the Hyper-V Utilities.
 
-
+endmenu
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 12b85ff..36484db 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -223,6 +223,17 @@
 	vmbus_device_unregister(channel->device_obj);
 }
 
+void vmbus_free_channels(void)
+{
+	struct vmbus_channel *channel;
+
+	list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
+		vmbus_device_unregister(channel->device_obj);
+		kfree(channel->device_obj);
+		free_channel(channel);
+	}
+}
+
 /*
  * vmbus_process_offer - Process the offer by creating a channel/device
  * associated with this offer
@@ -287,6 +298,7 @@
 		spin_lock_irqsave(&vmbus_connection.channel_lock, flags);
 		list_del(&newchannel->listentry);
 		spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags);
+		kfree(newchannel->device_obj);
 
 		free_channel(newchannel);
 	} else {
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index 0fb100e..12aa97f 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -164,11 +164,6 @@
 
 	max_leaf = query_hypervisor_info();
 
-	rdmsrl(HV_X64_MSR_GUEST_OS_ID, hv_context.guestid);
-
-	if (hv_context.guestid != 0)
-		goto cleanup;
-
 	/* Write our OS info */
 	wrmsrl(HV_X64_MSR_GUEST_OS_ID, HV_LINUX_GUEST_ID);
 	hv_context.guestid = HV_LINUX_GUEST_ID;
@@ -237,6 +232,9 @@
 {
 	union hv_x64_msr_hypercall_contents hypercall_msr;
 
+	/* Reset our OS id */
+	wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
+
 	kfree(hv_context.signal_event_buffer);
 	hv_context.signal_event_buffer = NULL;
 	hv_context.signal_event_param = NULL;
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 0aee112..6d7d286 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -611,6 +611,7 @@
 
 struct vmbus_channel *relid2channel(u32 relid);
 
+void vmbus_free_channels(void);
 
 /* Connection interface */
 
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 0c048dd..a220e57 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -62,6 +62,14 @@
 	struct hv_dev_port_info outbound;
 };
 
+static int vmbus_exists(void)
+{
+	if (hv_acpi_dev == NULL)
+		return -ENODEV;
+
+	return 0;
+}
+
 
 static void get_channel_info(struct hv_device *device,
 			     struct hv_device_info *info)
@@ -590,6 +598,10 @@
 
 	pr_info("registering driver %s\n", hv_driver->name);
 
+	ret = vmbus_exists();
+	if (ret < 0)
+		return ret;
+
 	hv_driver->driver.name = hv_driver->name;
 	hv_driver->driver.owner = owner;
 	hv_driver->driver.mod_name = mod_name;
@@ -614,8 +626,8 @@
 {
 	pr_info("unregistering driver %s\n", hv_driver->name);
 
-	driver_unregister(&hv_driver->driver);
-
+	if (!vmbus_exists())
+		driver_unregister(&hv_driver->driver);
 }
 EXPORT_SYMBOL_GPL(vmbus_driver_unregister);
 
@@ -776,11 +788,23 @@
 
 cleanup:
 	acpi_bus_unregister_driver(&vmbus_acpi_driver);
+	hv_acpi_dev = NULL;
 	return ret;
 }
 
+static void __exit vmbus_exit(void)
+{
+
+	free_irq(irq, hv_acpi_dev);
+	vmbus_free_channels();
+	bus_unregister(&hv_bus);
+	hv_cleanup();
+	acpi_bus_unregister_driver(&vmbus_acpi_driver);
+}
+
 
 MODULE_LICENSE("GPL");
 MODULE_VERSION(HV_DRV_VERSION);
 
-module_init(hv_acpi_init);
+subsys_initcall(hv_acpi_init);
+module_exit(vmbus_exit);
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 91be41f..cb351d3 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -367,11 +367,11 @@
 	  will be called f71882fg.
 
 config SENSORS_F75375S
-	tristate "Fintek F75375S/SP and F75373"
+	tristate "Fintek F75375S/SP, F75373 and F75387"
 	depends on I2C
 	help
 	  If you say yes here you get support for hardware monitoring
-	  features of the Fintek F75375S/SP and F75373
+	  features of the Fintek F75375S/SP, F75373 and F75387
 
 	  This driver can also be built as a module.  If so, the module
 	  will be called f75375s.
diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
index 66f6729..522860a 100644
--- a/drivers/hwmon/acpi_power_meter.c
+++ b/drivers/hwmon/acpi_power_meter.c
@@ -170,7 +170,7 @@
 	unsigned long long data;
 	acpi_status status;
 
-	res = strict_strtoul(buf, 10, &temp);
+	res = kstrtoul(buf, 10, &temp);
 	if (res)
 		return res;
 
@@ -241,7 +241,7 @@
 	unsigned long long data;
 	acpi_status status;
 
-	res = strict_strtoul(buf, 10, &temp);
+	res = kstrtoul(buf, 10, &temp);
 	if (res)
 		return res;
 
@@ -311,7 +311,7 @@
 	int res;
 	unsigned long temp;
 
-	res = strict_strtoul(buf, 10, &temp);
+	res = kstrtoul(buf, 10, &temp);
 	if (res)
 		return res;
 
diff --git a/drivers/hwmon/adcxx.c b/drivers/hwmon/adcxx.c
index b2cacbe..ceb24a3 100644
--- a/drivers/hwmon/adcxx.c
+++ b/drivers/hwmon/adcxx.c
@@ -125,7 +125,7 @@
 	struct adcxx *adc = spi_get_drvdata(spi);
 	unsigned long value;
 
-	if (strict_strtoul(buf, 10, &value))
+	if (kstrtoul(buf, 10, &value))
 		return -EINVAL;
 
 	if (mutex_lock_interruptible(&adc->lock))
diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c
index 0683e6b..e6291da 100644
--- a/drivers/hwmon/adm1031.c
+++ b/drivers/hwmon/adm1031.c
@@ -767,7 +767,7 @@
 	int i, err;
 	u8 reg;
 
-	err = strict_strtoul(buf, 10, &val);
+	err = kstrtoul(buf, 10, &val);
 	if (err)
 		return err;
 
diff --git a/drivers/hwmon/adm9240.c b/drivers/hwmon/adm9240.c
index 9e234b9..3f63f5f 100644
--- a/drivers/hwmon/adm9240.c
+++ b/drivers/hwmon/adm9240.c
@@ -503,7 +503,7 @@
 	struct adm9240_data *data = i2c_get_clientdata(client);
 	unsigned long val;
 
-	if (strict_strtoul(buf, 10, &val) || val != 0)
+	if (kstrtoul(buf, 10, &val) || val != 0)
 		return -EINVAL;
 
 	mutex_lock(&data->update_lock);
diff --git a/drivers/hwmon/adt7411.c b/drivers/hwmon/adt7411.c
index 5cc3e37..5b02f7a 100644
--- a/drivers/hwmon/adt7411.c
+++ b/drivers/hwmon/adt7411.c
@@ -197,7 +197,7 @@
 	int ret;
 	unsigned long flag;
 
-	ret = strict_strtoul(buf, 0, &flag);
+	ret = kstrtoul(buf, 0, &flag);
 	if (ret || flag > 1)
 		return -EINVAL;
 
diff --git a/drivers/hwmon/adt7462.c b/drivers/hwmon/adt7462.c
index 2af0c7b..7a14948 100644
--- a/drivers/hwmon/adt7462.c
+++ b/drivers/hwmon/adt7462.c
@@ -833,7 +833,7 @@
 	struct adt7462_data *data = i2c_get_clientdata(client);
 	long temp;
 
-	if (strict_strtol(buf, 10, &temp) || !temp_enabled(data, attr->index))
+	if (kstrtol(buf, 10, &temp) || !temp_enabled(data, attr->index))
 		return -EINVAL;
 
 	temp = DIV_ROUND_CLOSEST(temp, 1000) + 64;
@@ -871,7 +871,7 @@
 	struct adt7462_data *data = i2c_get_clientdata(client);
 	long temp;
 
-	if (strict_strtol(buf, 10, &temp) || !temp_enabled(data, attr->index))
+	if (kstrtol(buf, 10, &temp) || !temp_enabled(data, attr->index))
 		return -EINVAL;
 
 	temp = DIV_ROUND_CLOSEST(temp, 1000) + 64;
@@ -935,7 +935,7 @@
 	int x = voltage_multiplier(data, attr->index);
 	long temp;
 
-	if (strict_strtol(buf, 10, &temp) || !x)
+	if (kstrtol(buf, 10, &temp) || !x)
 		return -EINVAL;
 
 	temp *= 1000; /* convert mV to uV */
@@ -977,7 +977,7 @@
 	int x = voltage_multiplier(data, attr->index);
 	long temp;
 
-	if (strict_strtol(buf, 10, &temp) || !x)
+	if (kstrtol(buf, 10, &temp) || !x)
 		return -EINVAL;
 
 	temp *= 1000; /* convert mV to uV */
@@ -1066,7 +1066,7 @@
 	struct adt7462_data *data = i2c_get_clientdata(client);
 	long temp;
 
-	if (strict_strtol(buf, 10, &temp) || !temp ||
+	if (kstrtol(buf, 10, &temp) || !temp ||
 	    !fan_enabled(data, attr->index))
 		return -EINVAL;
 
@@ -1115,7 +1115,7 @@
 	long temp;
 	u8 reg;
 
-	if (strict_strtol(buf, 10, &temp))
+	if (kstrtol(buf, 10, &temp))
 		return -EINVAL;
 
 	mutex_lock(&data->lock);
@@ -1147,7 +1147,7 @@
 	struct adt7462_data *data = i2c_get_clientdata(client);
 	long temp;
 
-	if (strict_strtol(buf, 10, &temp))
+	if (kstrtol(buf, 10, &temp))
 		return -EINVAL;
 
 	temp = SENSORS_LIMIT(temp, 0, 255);
@@ -1177,7 +1177,7 @@
 	struct adt7462_data *data = i2c_get_clientdata(client);
 	long temp;
 
-	if (strict_strtol(buf, 10, &temp))
+	if (kstrtol(buf, 10, &temp))
 		return -EINVAL;
 
 	temp = SENSORS_LIMIT(temp, 0, 255);
@@ -1209,7 +1209,7 @@
 	struct adt7462_data *data = i2c_get_clientdata(client);
 	long temp;
 
-	if (strict_strtol(buf, 10, &temp))
+	if (kstrtol(buf, 10, &temp))
 		return -EINVAL;
 
 	temp = SENSORS_LIMIT(temp, 0, 255);
@@ -1243,7 +1243,7 @@
 	struct adt7462_data *data = i2c_get_clientdata(client);
 	long temp;
 
-	if (strict_strtol(buf, 10, &temp))
+	if (kstrtol(buf, 10, &temp))
 		return -EINVAL;
 
 	temp = DIV_ROUND_CLOSEST(temp, 1000);
@@ -1289,7 +1289,7 @@
 	int tmin, trange_value;
 	long trange;
 
-	if (strict_strtol(buf, 10, &trange))
+	if (kstrtol(buf, 10, &trange))
 		return -EINVAL;
 
 	/* trange = tmax - tmin */
@@ -1330,7 +1330,7 @@
 	struct adt7462_data *data = i2c_get_clientdata(client);
 	long temp;
 
-	if (strict_strtol(buf, 10, &temp))
+	if (kstrtol(buf, 10, &temp))
 		return -EINVAL;
 
 	temp = DIV_ROUND_CLOSEST(temp, 1000) + 64;
@@ -1387,7 +1387,7 @@
 	struct adt7462_data *data = i2c_get_clientdata(client);
 	long temp;
 
-	if (strict_strtol(buf, 10, &temp))
+	if (kstrtol(buf, 10, &temp))
 		return -EINVAL;
 
 	switch (temp) {
@@ -1446,7 +1446,7 @@
 	struct adt7462_data *data = i2c_get_clientdata(client);
 	long temp;
 
-	if (strict_strtol(buf, 10, &temp))
+	if (kstrtol(buf, 10, &temp))
 		return -EINVAL;
 
 	temp = cvt_auto_temp(temp);
diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c
index c6d1ce0..5e10c79 100644
--- a/drivers/hwmon/adt7470.c
+++ b/drivers/hwmon/adt7470.c
@@ -449,7 +449,7 @@
 	struct adt7470_data *data = i2c_get_clientdata(client);
 	long temp;
 
-	if (strict_strtol(buf, 10, &temp))
+	if (kstrtol(buf, 10, &temp))
 		return -EINVAL;
 
 	temp = SENSORS_LIMIT(temp, 0, 60000);
@@ -478,7 +478,7 @@
 	struct adt7470_data *data = i2c_get_clientdata(client);
 	long temp;
 
-	if (strict_strtol(buf, 10, &temp))
+	if (kstrtol(buf, 10, &temp))
 		return -EINVAL;
 
 	temp = SENSORS_LIMIT(temp, -1, 10);
@@ -511,7 +511,7 @@
 	struct adt7470_data *data = i2c_get_clientdata(client);
 	long temp;
 
-	if (strict_strtol(buf, 10, &temp))
+	if (kstrtol(buf, 10, &temp))
 		return -EINVAL;
 
 	temp = DIV_ROUND_CLOSEST(temp, 1000);
@@ -545,7 +545,7 @@
 	struct adt7470_data *data = i2c_get_clientdata(client);
 	long temp;
 
-	if (strict_strtol(buf, 10, &temp))
+	if (kstrtol(buf, 10, &temp))
 		return -EINVAL;
 
 	temp = DIV_ROUND_CLOSEST(temp, 1000);
@@ -600,7 +600,7 @@
 	struct adt7470_data *data = i2c_get_clientdata(client);
 	long temp;
 
-	if (strict_strtol(buf, 10, &temp) || !temp)
+	if (kstrtol(buf, 10, &temp) || !temp)
 		return -EINVAL;
 
 	temp = FAN_RPM_TO_PERIOD(temp);
@@ -637,7 +637,7 @@
 	struct adt7470_data *data = i2c_get_clientdata(client);
 	long temp;
 
-	if (strict_strtol(buf, 10, &temp) || !temp)
+	if (kstrtol(buf, 10, &temp) || !temp)
 		return -EINVAL;
 
 	temp = FAN_RPM_TO_PERIOD(temp);
@@ -682,7 +682,7 @@
 	long temp;
 	u8 reg;
 
-	if (strict_strtol(buf, 10, &temp))
+	if (kstrtol(buf, 10, &temp))
 		return -EINVAL;
 
 	mutex_lock(&data->lock);
@@ -714,7 +714,7 @@
 	struct adt7470_data *data = i2c_get_clientdata(client);
 	long temp;
 
-	if (strict_strtol(buf, 10, &temp))
+	if (kstrtol(buf, 10, &temp))
 		return -EINVAL;
 
 	temp = SENSORS_LIMIT(temp, 0, 255);
@@ -746,7 +746,7 @@
 	struct adt7470_data *data = i2c_get_clientdata(client);
 	long temp;
 
-	if (strict_strtol(buf, 10, &temp))
+	if (kstrtol(buf, 10, &temp))
 		return -EINVAL;
 
 	temp = SENSORS_LIMIT(temp, 0, 255);
@@ -779,7 +779,7 @@
 	struct adt7470_data *data = i2c_get_clientdata(client);
 	long temp;
 
-	if (strict_strtol(buf, 10, &temp))
+	if (kstrtol(buf, 10, &temp))
 		return -EINVAL;
 
 	temp = SENSORS_LIMIT(temp, 0, 255);
@@ -822,7 +822,7 @@
 	struct adt7470_data *data = i2c_get_clientdata(client);
 	long temp;
 
-	if (strict_strtol(buf, 10, &temp))
+	if (kstrtol(buf, 10, &temp))
 		return -EINVAL;
 
 	temp = DIV_ROUND_CLOSEST(temp, 1000);
@@ -859,7 +859,7 @@
 	long temp;
 	u8 reg;
 
-	if (strict_strtol(buf, 10, &temp))
+	if (kstrtol(buf, 10, &temp))
 		return -EINVAL;
 
 	if (attr->index % 2)
@@ -919,7 +919,7 @@
 	long temp;
 	u8 reg;
 
-	if (strict_strtol(buf, 10, &temp))
+	if (kstrtol(buf, 10, &temp))
 		return -EINVAL;
 
 	temp = cvt_auto_temp(temp);
diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
index b5fcd87..7dab354 100644
--- a/drivers/hwmon/adt7475.c
+++ b/drivers/hwmon/adt7475.c
@@ -343,7 +343,7 @@
 	unsigned char reg;
 	long val;
 
-	if (strict_strtol(buf, 10, &val))
+	if (kstrtol(buf, 10, &val))
 		return -EINVAL;
 
 	mutex_lock(&data->lock);
@@ -432,7 +432,7 @@
 	int temp;
 	long val;
 
-	if (strict_strtol(buf, 10, &val))
+	if (kstrtol(buf, 10, &val))
 		return -EINVAL;
 
 	mutex_lock(&data->lock);
@@ -546,7 +546,7 @@
 	int temp;
 	long val;
 
-	if (strict_strtol(buf, 10, &val))
+	if (kstrtol(buf, 10, &val))
 		return -EINVAL;
 
 	mutex_lock(&data->lock);
@@ -602,7 +602,7 @@
 	struct adt7475_data *data = i2c_get_clientdata(client);
 	unsigned long val;
 
-	if (strict_strtoul(buf, 10, &val))
+	if (kstrtoul(buf, 10, &val))
 		return -EINVAL;
 
 	mutex_lock(&data->lock);
@@ -653,7 +653,7 @@
 	unsigned char reg = 0;
 	long val;
 
-	if (strict_strtol(buf, 10, &val))
+	if (kstrtol(buf, 10, &val))
 		return -EINVAL;
 
 	mutex_lock(&data->lock);
@@ -758,7 +758,7 @@
 	int r;
 	long val;
 
-	if (strict_strtol(buf, 10, &val))
+	if (kstrtol(buf, 10, &val))
 		return -EINVAL;
 
 	mutex_lock(&data->lock);
@@ -781,7 +781,7 @@
 	int r;
 	long val;
 
-	if (strict_strtol(buf, 10, &val))
+	if (kstrtol(buf, 10, &val))
 		return -EINVAL;
 
 	mutex_lock(&data->lock);
@@ -819,7 +819,7 @@
 	int out;
 	long val;
 
-	if (strict_strtol(buf, 10, &val))
+	if (kstrtol(buf, 10, &val))
 		return -EINVAL;
 
 	out = find_nearest(val, pwmfreq_table, ARRAY_SIZE(pwmfreq_table));
@@ -853,7 +853,7 @@
 	struct adt7475_data *data = i2c_get_clientdata(client);
 	long val;
 
-	if (strict_strtol(buf, 10, &val))
+	if (kstrtol(buf, 10, &val))
 		return -EINVAL;
 	if (val != 0 && val != 1)
 		return -EINVAL;
@@ -883,7 +883,7 @@
 	struct adt7475_data *data = dev_get_drvdata(dev);
 	long val;
 
-	if (strict_strtol(buf, 10, &val))
+	if (kstrtol(buf, 10, &val))
 		return -EINVAL;
 	if (val < 0 || val > 255)
 		return -EINVAL;
diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c
index 4033974..89a6b9d 100644
--- a/drivers/hwmon/amc6821.c
+++ b/drivers/hwmon/amc6821.c
@@ -238,7 +238,7 @@
 	int ix = to_sensor_dev_attr(attr)->index;
 	long val;
 
-	int ret = strict_strtol(buf, 10, &val);
+	int ret = kstrtol(buf, 10, &val);
 	if (ret)
 		return ret;
 	val = SENSORS_LIMIT(val / 1000, -128, 127);
@@ -327,7 +327,7 @@
 	struct i2c_client *client = to_i2c_client(dev);
 	struct amc6821_data *data = i2c_get_clientdata(client);
 	long val;
-	int ret = strict_strtol(buf, 10, &val);
+	int ret = kstrtol(buf, 10, &val);
 	if (ret)
 		return ret;
 
@@ -356,7 +356,7 @@
 	struct i2c_client *client = to_i2c_client(dev);
 	struct amc6821_data *data = i2c_get_clientdata(client);
 	long val;
-	int config = strict_strtol(buf, 10, &val);
+	int config = kstrtol(buf, 10, &val);
 	if (config)
 		return config;
 
@@ -477,7 +477,7 @@
 	u8 reg;
 	int dpwm;
 	long val;
-	int ret = strict_strtol(buf, 10, &val);
+	int ret = kstrtol(buf, 10, &val);
 	if (ret)
 		return ret;
 
@@ -556,7 +556,7 @@
 	struct amc6821_data *data = i2c_get_clientdata(client);
 	int dpwm;
 	long val;
-	int ret = strict_strtol(buf, 10, &val);
+	int ret = kstrtol(buf, 10, &val);
 	if (ret)
 		return ret;
 
@@ -623,7 +623,7 @@
 	struct amc6821_data *data = i2c_get_clientdata(client);
 	long val;
 	int ix = to_sensor_dev_attr(attr)->index;
-	int ret = strict_strtol(buf, 10, &val);
+	int ret = kstrtol(buf, 10, &val);
 	if (ret)
 		return ret;
 	val = 1 > val ? 0xFFFF : 6000000/val;
@@ -665,7 +665,7 @@
 	struct i2c_client *client = to_i2c_client(dev);
 	struct amc6821_data *data = i2c_get_clientdata(client);
 	long val;
-	int config = strict_strtol(buf, 10, &val);
+	int config = kstrtol(buf, 10, &val);
 	if (config)
 		return config;
 
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index 4c07436..b989553 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -782,7 +782,7 @@
 	char newkey[5];
 	u8 buffer[2];
 
-	if (strict_strtoul(sysfsbuf, 10, &speed) < 0 || speed >= 0x4000)
+	if (kstrtoul(sysfsbuf, 10, &speed) < 0 || speed >= 0x4000)
 		return -EINVAL;		/* Bigger than a 14-bit value */
 
 	sprintf(newkey, fan_speed_fmt[to_option(attr)], to_index(attr));
@@ -822,7 +822,7 @@
 	unsigned long input;
 	u16 val;
 
-	if (strict_strtoul(sysfsbuf, 10, &input) < 0)
+	if (kstrtoul(sysfsbuf, 10, &input) < 0)
 		return -EINVAL;
 
 	ret = applesmc_read_key(FANS_MANUAL, buffer, 2);
@@ -977,7 +977,7 @@
 {
 	unsigned long newkey;
 
-	if (strict_strtoul(sysfsbuf, 10, &newkey) < 0
+	if (kstrtoul(sysfsbuf, 10, &newkey) < 0
 	    || newkey >= smcreg.key_count)
 		return -EINVAL;
 
diff --git a/drivers/hwmon/asc7621.c b/drivers/hwmon/asc7621.c
index d2596ce..3efd324 100644
--- a/drivers/hwmon/asc7621.c
+++ b/drivers/hwmon/asc7621.c
@@ -188,7 +188,7 @@
 	SETUP_STORE_data_param(dev, attr);
 	long reqval;
 
-	if (strict_strtol(buf, 10, &reqval))
+	if (kstrtol(buf, 10, &reqval))
 		return -EINVAL;
 
 	reqval = SENSORS_LIMIT(reqval, 0, 255);
@@ -221,7 +221,7 @@
 	long reqval;
 	u8 currval;
 
-	if (strict_strtol(buf, 10, &reqval))
+	if (kstrtol(buf, 10, &reqval))
 		return -EINVAL;
 
 	reqval = SENSORS_LIMIT(reqval, 0, param->mask[0]);
@@ -265,7 +265,7 @@
 	SETUP_STORE_data_param(dev, attr);
 	long reqval;
 
-	if (strict_strtol(buf, 10, &reqval))
+	if (kstrtol(buf, 10, &reqval))
 		return -EINVAL;
 
 	/* If a minimum RPM of zero is requested, then we set the register to
@@ -338,7 +338,7 @@
 	long reqval;
 	u8 nr = sda->index;
 
-	if (strict_strtol(buf, 10, &reqval))
+	if (kstrtol(buf, 10, &reqval))
 		return -EINVAL;
 
 	reqval = SENSORS_LIMIT(reqval, 0, 0xffff);
@@ -371,7 +371,7 @@
 	long reqval;
 	s8 temp;
 
-	if (strict_strtol(buf, 10, &reqval))
+	if (kstrtol(buf, 10, &reqval))
 		return -EINVAL;
 
 	reqval = SENSORS_LIMIT(reqval, -127000, 127000);
@@ -427,7 +427,7 @@
 	long reqval, i, f;
 	s8 temp;
 
-	if (strict_strtol(buf, 10, &reqval))
+	if (kstrtol(buf, 10, &reqval))
 		return -EINVAL;
 
 	reqval = SENSORS_LIMIT(reqval, -32000, 31750);
@@ -482,7 +482,7 @@
 	int i;
 	u8 currval, newval = 0;
 
-	if (strict_strtol(buf, 10, &reqval))
+	if (kstrtol(buf, 10, &reqval))
 		return -EINVAL;
 
 	mutex_lock(&data->update_lock);
@@ -538,7 +538,7 @@
 		0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03,
 	};
 
-	if (strict_strtoul(buf, 10, &reqval))
+	if (kstrtoul(buf, 10, &reqval))
 		return -EINVAL;
 
 	if (reqval > 31)
@@ -601,7 +601,7 @@
 	long reqval;
 	u8 currval, config, altbit, newval, minoff = 255;
 
-	if (strict_strtol(buf, 10, &reqval))
+	if (kstrtol(buf, 10, &reqval))
 		return -EINVAL;
 
 	switch (reqval) {
@@ -675,7 +675,7 @@
 	u8 currval, newval = 255;
 	int i;
 
-	if (strict_strtoul(buf, 10, &reqval))
+	if (kstrtoul(buf, 10, &reqval))
 		return -EINVAL;
 
 	for (i = 0; i < ARRAY_SIZE(asc7621_pwm_freq_map); i++) {
@@ -724,7 +724,7 @@
 	u8 currval, newval = 255;
 	u32 i;
 
-	if (strict_strtol(buf, 10, &reqval))
+	if (kstrtol(buf, 10, &reqval))
 		return -EINVAL;
 
 	for (i = 0; i < ARRAY_SIZE(asc7621_pwm_auto_spinup_map); i++) {
@@ -771,7 +771,7 @@
 	u8 currval, newval = 255;
 	u32 i;
 
-	if (strict_strtol(buf, 10, &reqval))
+	if (kstrtol(buf, 10, &reqval))
 		return -EINVAL;
 
 	for (i = 0; i < ARRAY_SIZE(asc7621_temp_smoothing_time_map); i++) {
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 104b376..1fdef88 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -57,16 +57,15 @@
 #define TOTAL_ATTRS		(MAX_CORE_ATTRS + 1)
 #define MAX_CORE_DATA		(NUM_REAL_CORES + BASE_SYSFS_ATTR_NO)
 
-#ifdef CONFIG_SMP
 #define TO_PHYS_ID(cpu)		cpu_data(cpu).phys_proc_id
 #define TO_CORE_ID(cpu)		cpu_data(cpu).cpu_core_id
+#define TO_ATTR_NO(cpu)		(TO_CORE_ID(cpu) + BASE_SYSFS_ATTR_NO)
+
+#ifdef CONFIG_SMP
 #define for_each_sibling(i, cpu)	for_each_cpu(i, cpu_sibling_mask(cpu))
 #else
-#define TO_PHYS_ID(cpu)		(cpu)
-#define TO_CORE_ID(cpu)		(cpu)
 #define for_each_sibling(i, cpu)	for (i = 0; false; )
 #endif
-#define TO_ATTR_NO(cpu)		(TO_CORE_ID(cpu) + BASE_SYSFS_ATTR_NO)
 
 /*
  * Per-Core Temperature Data
diff --git a/drivers/hwmon/dme1737.c b/drivers/hwmon/dme1737.c
index d9c5927..d980395 100644
--- a/drivers/hwmon/dme1737.c
+++ b/drivers/hwmon/dme1737.c
@@ -1223,7 +1223,7 @@
 }
 
 static struct attribute *dme1737_pwm_chmod_attr[];
-static void dme1737_chmod_file(struct device*, struct attribute*, mode_t);
+static void dme1737_chmod_file(struct device*, struct attribute*, umode_t);
 
 static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
 		       const char *buf, size_t count)
@@ -1961,7 +1961,7 @@
 static int dme1737_i2c_get_features(int, struct dme1737_data*);
 
 static void dme1737_chmod_file(struct device *dev,
-			       struct attribute *attr, mode_t mode)
+			       struct attribute *attr, umode_t mode)
 {
 	if (sysfs_chmod_file(&dev->kobj, attr, mode)) {
 		dev_warn(dev, "Failed to change permissions of %s.\n",
@@ -1971,7 +1971,7 @@
 
 static void dme1737_chmod_group(struct device *dev,
 				const struct attribute_group *group,
-				mode_t mode)
+				umode_t mode)
 {
 	struct attribute **attr;
 
diff --git a/drivers/hwmon/ds620.c b/drivers/hwmon/ds620.c
index 225ae4f..300c3d4 100644
--- a/drivers/hwmon/ds620.c
+++ b/drivers/hwmon/ds620.c
@@ -161,7 +161,7 @@
 	struct i2c_client *client = to_i2c_client(dev);
 	struct ds620_data *data = i2c_get_clientdata(client);
 
-	res = strict_strtol(buf, 10, &val);
+	res = kstrtol(buf, 10, &val);
 
 	if (res)
 		return res;
diff --git a/drivers/hwmon/emc1403.c b/drivers/hwmon/emc1403.c
index cd2a6e4..270ffab 100644
--- a/drivers/hwmon/emc1403.c
+++ b/drivers/hwmon/emc1403.c
@@ -80,7 +80,7 @@
 	unsigned long val;
 	int retval;
 
-	if (strict_strtoul(buf, 10, &val))
+	if (kstrtoul(buf, 10, &val))
 		return -EINVAL;
 	retval = i2c_smbus_write_byte_data(client, sda->index,
 					DIV_ROUND_CLOSEST(val, 1000));
@@ -98,7 +98,7 @@
 	unsigned long val;
 	int retval;
 
-	if (strict_strtoul(buf, 10, &val))
+	if (kstrtoul(buf, 10, &val))
 		return -EINVAL;
 
 	mutex_lock(&data->mutex);
@@ -151,7 +151,7 @@
 	int hyst;
 	unsigned long val;
 
-	if (strict_strtoul(buf, 10, &val))
+	if (kstrtoul(buf, 10, &val))
 		return -EINVAL;
 
 	mutex_lock(&data->mutex);
diff --git a/drivers/hwmon/emc2103.c b/drivers/hwmon/emc2103.c
index af914ad..848a2b0 100644
--- a/drivers/hwmon/emc2103.c
+++ b/drivers/hwmon/emc2103.c
@@ -244,7 +244,7 @@
 	struct emc2103_data *data = i2c_get_clientdata(client);
 	long val;
 
-	int result = strict_strtol(buf, 10, &val);
+	int result = kstrtol(buf, 10, &val);
 	if (result < 0)
 		return -EINVAL;
 
@@ -268,7 +268,7 @@
 	struct emc2103_data *data = i2c_get_clientdata(client);
 	long val;
 
-	int result = strict_strtol(buf, 10, &val);
+	int result = kstrtol(buf, 10, &val);
 	if (result < 0)
 		return -EINVAL;
 
@@ -314,7 +314,7 @@
 	int new_range_bits, old_div = 8 / data->fan_multiplier;
 	long new_div;
 
-	int status = strict_strtol(buf, 10, &new_div);
+	int status = kstrtol(buf, 10, &new_div);
 	if (status < 0)
 		return -EINVAL;
 
@@ -388,7 +388,7 @@
 	struct i2c_client *client = to_i2c_client(dev);
 	long rpm_target;
 
-	int result = strict_strtol(buf, 10, &rpm_target);
+	int result = kstrtol(buf, 10, &rpm_target);
 	if (result < 0)
 		return -EINVAL;
 
@@ -434,7 +434,7 @@
 	long new_value;
 	u8 conf_reg;
 
-	int result = strict_strtol(buf, 10, &new_value);
+	int result = kstrtol(buf, 10, &new_value);
 	if (result < 0)
 		return -EINVAL;
 
diff --git a/drivers/hwmon/emc6w201.c b/drivers/hwmon/emc6w201.c
index 0064432..6ebb9b7 100644
--- a/drivers/hwmon/emc6w201.c
+++ b/drivers/hwmon/emc6w201.c
@@ -212,7 +212,7 @@
 	long val;
 	u8 reg;
 
-	err = strict_strtol(buf, 10, &val);
+	err = kstrtol(buf, 10, &val);
 	if (err < 0)
 		return err;
 
@@ -249,7 +249,7 @@
 	long val;
 	u8 reg;
 
-	err = strict_strtol(buf, 10, &val);
+	err = kstrtol(buf, 10, &val);
 	if (err < 0)
 		return err;
 
@@ -291,7 +291,7 @@
 	int err;
 	unsigned long val;
 
-	err = strict_strtoul(buf, 10, &val);
+	err = kstrtoul(buf, 10, &val);
 	if (err < 0)
 		return err;
 
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c
index 59dd881..e503058 100644
--- a/drivers/hwmon/f71882fg.c
+++ b/drivers/hwmon/f71882fg.c
@@ -1333,7 +1333,7 @@
 	int err, nr = to_sensor_dev_attr_2(devattr)->index;
 	long val;
 
-	err = strict_strtol(buf, 10, &val);
+	err = kstrtol(buf, 10, &val);
 	if (err)
 		return err;
 
@@ -1367,7 +1367,7 @@
 	int err, nr = to_sensor_dev_attr_2(devattr)->index;
 	unsigned long val;
 
-	err = strict_strtoul(buf, 10, &val);
+	err = kstrtoul(buf, 10, &val);
 	if (err)
 		return err;
 
@@ -1420,7 +1420,7 @@
 	int err;
 	long val;
 
-	err = strict_strtol(buf, 10, &val);
+	err = kstrtol(buf, 10, &val);
 	if (err)
 		return err;
 
@@ -1454,7 +1454,7 @@
 	int err, nr = to_sensor_dev_attr_2(devattr)->index;
 	unsigned long val;
 
-	err = strict_strtoul(buf, 10, &val);
+	err = kstrtoul(buf, 10, &val);
 	if (err)
 		return err;
 
@@ -1524,7 +1524,7 @@
 	int err, nr = to_sensor_dev_attr_2(devattr)->index;
 	long val;
 
-	err = strict_strtol(buf, 10, &val);
+	err = kstrtol(buf, 10, &val);
 	if (err)
 		return err;
 
@@ -1566,7 +1566,7 @@
 	u8 reg;
 	long val;
 
-	err = strict_strtol(buf, 10, &val);
+	err = kstrtol(buf, 10, &val);
 	if (err)
 		return err;
 
@@ -1609,7 +1609,7 @@
 	int err, nr = to_sensor_dev_attr_2(devattr)->index;
 	long val;
 
-	err = strict_strtol(buf, 10, &val);
+	err = kstrtol(buf, 10, &val);
 	if (err)
 		return err;
 
@@ -1670,7 +1670,7 @@
 	int err, nr = to_sensor_dev_attr_2(devattr)->index;
 	unsigned long val;
 
-	err = strict_strtoul(buf, 10, &val);
+	err = kstrtoul(buf, 10, &val);
 	if (err)
 		return err;
 
@@ -1737,7 +1737,7 @@
 	int err, nr = to_sensor_dev_attr_2(devattr)->index;
 	long val;
 
-	err = strict_strtol(buf, 10, &val);
+	err = kstrtol(buf, 10, &val);
 	if (err)
 		return err;
 
@@ -1788,7 +1788,7 @@
 	int err, nr = to_sensor_dev_attr_2(devattr)->index;
 	long val;
 
-	err = strict_strtol(buf, 10, &val);
+	err = kstrtol(buf, 10, &val);
 	if (err)
 		return err;
 
@@ -1835,7 +1835,7 @@
 	int err, nr = to_sensor_dev_attr_2(devattr)->index;
 	long val;
 
-	err = strict_strtol(buf, 10, &val);
+	err = kstrtol(buf, 10, &val);
 	if (err)
 		return err;
 
@@ -1915,7 +1915,7 @@
 	int point = to_sensor_dev_attr_2(devattr)->nr;
 	long val;
 
-	err = strict_strtol(buf, 10, &val);
+	err = kstrtol(buf, 10, &val);
 	if (err)
 		return err;
 
@@ -1969,7 +1969,7 @@
 	u8 reg;
 	long val;
 
-	err = strict_strtol(buf, 10, &val);
+	err = kstrtol(buf, 10, &val);
 	if (err)
 		return err;
 
@@ -2015,7 +2015,7 @@
 	int err, nr = to_sensor_dev_attr_2(devattr)->index;
 	unsigned long val;
 
-	err = strict_strtoul(buf, 10, &val);
+	err = kstrtoul(buf, 10, &val);
 	if (err)
 		return err;
 
@@ -2055,7 +2055,7 @@
 	int err, nr = to_sensor_dev_attr_2(devattr)->index;
 	long val;
 
-	err = strict_strtol(buf, 10, &val);
+	err = kstrtol(buf, 10, &val);
 	if (err)
 		return err;
 
@@ -2106,7 +2106,7 @@
 	int point = to_sensor_dev_attr_2(devattr)->nr;
 	long val;
 
-	err = strict_strtol(buf, 10, &val);
+	err = kstrtol(buf, 10, &val);
 	if (err)
 		return err;
 
diff --git a/drivers/hwmon/f75375s.c b/drivers/hwmon/f75375s.c
index 95cbfb3..eedf574 100644
--- a/drivers/hwmon/f75375s.c
+++ b/drivers/hwmon/f75375s.c
@@ -1,16 +1,19 @@
 /*
- * f75375s.c - driver for the Fintek F75375/SP and F75373
- *             hardware monitoring features
+ * f75375s.c - driver for the Fintek F75375/SP, F75373 and
+ *             F75387SG/RG hardware monitoring features
  * Copyright (C) 2006-2007  Riku Voipio
  *
  * Datasheets available at:
  *
  * f75375:
- * http://www.fintek.com.tw/files/productfiles/F75375_V026P.pdf 
+ * http://www.fintek.com.tw/files/productfiles/F75375_V026P.pdf
  *
  * f75373:
  * http://www.fintek.com.tw/files/productfiles/F75373_V025P.pdf
  *
+ * f75387:
+ * http://www.fintek.com.tw/files/productfiles/F75387_V027P.pdf
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation; either version 2 of the License, or
@@ -40,7 +43,7 @@
 /* Addresses to scan */
 static const unsigned short normal_i2c[] = { 0x2d, 0x2e, I2C_CLIENT_END };
 
-enum chips { f75373, f75375 };
+enum chips { f75373, f75375, f75387 };
 
 /* Fintek F75375 registers  */
 #define F75375_REG_CONFIG0		0x0
@@ -59,6 +62,7 @@
 #define F75375_REG_VOLT_LOW(nr)		(0x21 + (nr) * 2)
 
 #define F75375_REG_TEMP(nr)		(0x14 + (nr))
+#define F75387_REG_TEMP11_LSB(nr)	(0x1a + (nr))
 #define F75375_REG_TEMP_HIGH(nr)	(0x28 + (nr) * 2)
 #define F75375_REG_TEMP_HYST(nr)	(0x29 + (nr) * 2)
 
@@ -78,8 +82,11 @@
 #define F75375_REG_PWM1_DROP_DUTY	0x6B
 #define F75375_REG_PWM2_DROP_DUTY	0x6C
 
-#define FAN_CTRL_LINEAR(nr)		(4 + nr)
+#define F75375_FAN_CTRL_LINEAR(nr)	(4 + nr)
+#define F75387_FAN_CTRL_LINEAR(nr)	(1 + ((nr) * 4))
 #define FAN_CTRL_MODE(nr)		(4 + ((nr) * 2))
+#define F75387_FAN_DUTY_MODE(nr)	(2 + ((nr) * 4))
+#define F75387_FAN_MANU_MODE(nr)	((nr) * 4)
 
 /*
  * Data structures and manipulation thereof
@@ -102,13 +109,18 @@
 	u8 in_min[4];
 	u16 fan[2];
 	u16 fan_min[2];
-	u16 fan_full[2];
-	u16 fan_exp[2];
+	u16 fan_max[2];
+	u16 fan_target[2];
 	u8 fan_timer;
 	u8 pwm[2];
 	u8 pwm_mode[2];
 	u8 pwm_enable[2];
-	s8 temp[2];
+	/*
+	 * f75387: For remote temperature reading, it uses signed 11-bit
+	 * values with LSB = 0.125 degree Celsius, left-justified in 16-bit
+	 * registers. For original 8-bit temp readings, the LSB just is 0.
+	 */
+	s16 temp11[2];
 	s8 temp_high[2];
 	s8 temp_max_hyst[2];
 };
@@ -122,6 +134,7 @@
 static const struct i2c_device_id f75375_id[] = {
 	{ "f75373", f75373 },
 	{ "f75375", f75375 },
+	{ "f75387", f75387 },
 	{ }
 };
 MODULE_DEVICE_TABLE(i2c, f75375_id);
@@ -146,8 +159,8 @@
 /* in most cases, should be called while holding update_lock */
 static inline u16 f75375_read16(struct i2c_client *client, u8 reg)
 {
-	return ((i2c_smbus_read_byte_data(client, reg) << 8)
-		| i2c_smbus_read_byte_data(client, reg + 1));
+	return (i2c_smbus_read_byte_data(client, reg) << 8)
+		| i2c_smbus_read_byte_data(client, reg + 1);
 }
 
 static inline void f75375_write8(struct i2c_client *client, u8 reg,
@@ -181,11 +194,11 @@
 				f75375_read8(client, F75375_REG_TEMP_HIGH(nr));
 			data->temp_max_hyst[nr] =
 				f75375_read8(client, F75375_REG_TEMP_HYST(nr));
-			data->fan_full[nr] =
+			data->fan_max[nr] =
 				f75375_read16(client, F75375_REG_FAN_FULL(nr));
 			data->fan_min[nr] =
 				f75375_read16(client, F75375_REG_FAN_MIN(nr));
-			data->fan_exp[nr] =
+			data->fan_target[nr] =
 				f75375_read16(client, F75375_REG_FAN_EXP(nr));
 			data->pwm[nr] =	f75375_read8(client,
 				F75375_REG_FAN_PWM_DUTY(nr));
@@ -205,8 +218,14 @@
 	if (time_after(jiffies, data->last_updated + 2 * HZ)
 		|| !data->valid) {
 		for (nr = 0; nr < 2; nr++) {
-			data->temp[nr] =
-				f75375_read8(client, F75375_REG_TEMP(nr));
+			/* assign MSB, therefore shift it by 8 bits */
+			data->temp11[nr] =
+				f75375_read8(client, F75375_REG_TEMP(nr)) << 8;
+			if (data->kind == f75387)
+				/* merge F75387's temperature LSB (11-bit) */
+				data->temp11[nr] |=
+					f75375_read8(client,
+						     F75387_REG_TEMP11_LSB(nr));
 			data->fan[nr] =
 				f75375_read16(client, F75375_REG_FAN(nr));
 		}
@@ -226,14 +245,14 @@
 {
 	if (reg == 0 || reg == 0xffff)
 		return 0;
-	return (1500000 / reg);
+	return 1500000 / reg;
 }
 
 static inline u16 rpm_to_reg(int rpm)
 {
 	if (rpm < 367 || rpm > 0xffff)
 		return 0xffff;
-	return (1500000 / rpm);
+	return 1500000 / rpm;
 }
 
 static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
@@ -242,7 +261,12 @@
 	int nr = to_sensor_dev_attr(attr)->index;
 	struct i2c_client *client = to_i2c_client(dev);
 	struct f75375_data *data = i2c_get_clientdata(client);
-	int val = simple_strtoul(buf, NULL, 10);
+	unsigned long val;
+	int err;
+
+	err = kstrtoul(buf, 10, &val);
+	if (err < 0)
+		return err;
 
 	mutex_lock(&data->update_lock);
 	data->fan_min[nr] = rpm_to_reg(val);
@@ -251,17 +275,22 @@
 	return count;
 }
 
-static ssize_t set_fan_exp(struct device *dev, struct device_attribute *attr,
+static ssize_t set_fan_target(struct device *dev, struct device_attribute *attr,
 		const char *buf, size_t count)
 {
 	int nr = to_sensor_dev_attr(attr)->index;
 	struct i2c_client *client = to_i2c_client(dev);
 	struct f75375_data *data = i2c_get_clientdata(client);
-	int val = simple_strtoul(buf, NULL, 10);
+	unsigned long val;
+	int err;
+
+	err = kstrtoul(buf, 10, &val);
+	if (err < 0)
+		return err;
 
 	mutex_lock(&data->update_lock);
-	data->fan_exp[nr] = rpm_to_reg(val);
-	f75375_write16(client, F75375_REG_FAN_EXP(nr), data->fan_exp[nr]);
+	data->fan_target[nr] = rpm_to_reg(val);
+	f75375_write16(client, F75375_REG_FAN_EXP(nr), data->fan_target[nr]);
 	mutex_unlock(&data->update_lock);
 	return count;
 }
@@ -272,7 +301,12 @@
 	int nr = to_sensor_dev_attr(attr)->index;
 	struct i2c_client *client = to_i2c_client(dev);
 	struct f75375_data *data = i2c_get_clientdata(client);
-	int val = simple_strtoul(buf, NULL, 10);
+	unsigned long val;
+	int err;
+
+	err = kstrtoul(buf, 10, &val);
+	if (err < 0)
+		return err;
 
 	mutex_lock(&data->update_lock);
 	data->pwm[nr] = SENSORS_LIMIT(val, 0, 255);
@@ -294,28 +328,54 @@
 	struct f75375_data *data = i2c_get_clientdata(client);
 	u8 fanmode;
 
-	if (val < 0 || val > 4)
+	if (val < 0 || val > 3)
 		return -EINVAL;
 
 	fanmode = f75375_read8(client, F75375_REG_FAN_TIMER);
-	fanmode &= ~(3 << FAN_CTRL_MODE(nr));
-
-	switch (val) {
-	case 0: /* Full speed */
-		fanmode  |= (3 << FAN_CTRL_MODE(nr));
-		data->pwm[nr] = 255;
-		f75375_write8(client, F75375_REG_FAN_PWM_DUTY(nr),
-				data->pwm[nr]);
-		break;
-	case 1: /* PWM */
-		fanmode  |= (3 << FAN_CTRL_MODE(nr));
-		break;
-	case 2: /* AUTOMATIC*/
-		fanmode  |= (2 << FAN_CTRL_MODE(nr));
-		break;
-	case 3: /* fan speed */
-		break;
+	if (data->kind == f75387) {
+		/* clear each fanX_mode bit before setting them properly */
+		fanmode &= ~(1 << F75387_FAN_DUTY_MODE(nr));
+		fanmode &= ~(1 << F75387_FAN_MANU_MODE(nr));
+		switch (val) {
+		case 0: /* full speed */
+			fanmode |= (1 << F75387_FAN_MANU_MODE(nr));
+			fanmode |= (1 << F75387_FAN_DUTY_MODE(nr));
+			data->pwm[nr] = 255;
+			f75375_write8(client, F75375_REG_FAN_PWM_DUTY(nr),
+					data->pwm[nr]);
+			break;
+		case 1: /* PWM */
+			fanmode  |= (1 << F75387_FAN_MANU_MODE(nr));
+			fanmode  |= (1 << F75387_FAN_DUTY_MODE(nr));
+			break;
+		case 2: /* AUTOMATIC*/
+			fanmode  |=  (1 << F75387_FAN_DUTY_MODE(nr));
+			break;
+		case 3: /* fan speed */
+			fanmode |= (1 << F75387_FAN_MANU_MODE(nr));
+			break;
+		}
+	} else {
+		/* clear each fanX_mode bit before setting them properly */
+		fanmode &= ~(3 << FAN_CTRL_MODE(nr));
+		switch (val) {
+		case 0: /* full speed */
+			fanmode  |= (3 << FAN_CTRL_MODE(nr));
+			data->pwm[nr] = 255;
+			f75375_write8(client, F75375_REG_FAN_PWM_DUTY(nr),
+					data->pwm[nr]);
+			break;
+		case 1: /* PWM */
+			fanmode  |= (3 << FAN_CTRL_MODE(nr));
+			break;
+		case 2: /* AUTOMATIC*/
+			fanmode  |= (2 << FAN_CTRL_MODE(nr));
+			break;
+		case 3: /* fan speed */
+			break;
+		}
 	}
+
 	f75375_write8(client, F75375_REG_FAN_TIMER, fanmode);
 	data->pwm_enable[nr] = val;
 	return 0;
@@ -327,8 +387,12 @@
 	int nr = to_sensor_dev_attr(attr)->index;
 	struct i2c_client *client = to_i2c_client(dev);
 	struct f75375_data *data = i2c_get_clientdata(client);
-	int val = simple_strtoul(buf, NULL, 10);
-	int err = 0;
+	unsigned long val;
+	int err;
+
+	err = kstrtoul(buf, 10, &val);
+	if (err < 0)
+		return err;
 
 	mutex_lock(&data->update_lock);
 	err = set_pwm_enable_direct(client, nr, val);
@@ -342,20 +406,39 @@
 	int nr = to_sensor_dev_attr(attr)->index;
 	struct i2c_client *client = to_i2c_client(dev);
 	struct f75375_data *data = i2c_get_clientdata(client);
-	int val = simple_strtoul(buf, NULL, 10);
-	u8 conf = 0;
+	unsigned long val;
+	int err;
+	u8 conf;
+	char reg, ctrl;
+
+	err = kstrtoul(buf, 10, &val);
+	if (err < 0)
+		return err;
 
 	if (!(val == 0 || val == 1))
 		return -EINVAL;
 
+	/* F75373 does not support DC (linear voltage) fan control mode */
+	if (data->kind == f75373 && val == 0)
+		return -EINVAL;
+
+	/* take care for different registers */
+	if (data->kind == f75387) {
+		reg = F75375_REG_FAN_TIMER;
+		ctrl = F75387_FAN_CTRL_LINEAR(nr);
+	} else {
+		reg = F75375_REG_CONFIG1;
+		ctrl = F75375_FAN_CTRL_LINEAR(nr);
+	}
+
 	mutex_lock(&data->update_lock);
-	conf = f75375_read8(client, F75375_REG_CONFIG1);
-	conf &= ~(1 << FAN_CTRL_LINEAR(nr));
+	conf = f75375_read8(client, reg);
+	conf &= ~(1 << ctrl);
 
 	if (val == 0)
-		conf |= (1 << FAN_CTRL_LINEAR(nr)) ;
+		conf |= (1 << ctrl);
 
-	f75375_write8(client, F75375_REG_CONFIG1, conf);
+	f75375_write8(client, reg, conf);
 	data->pwm_mode[nr] = val;
 	mutex_unlock(&data->update_lock);
 	return count;
@@ -410,7 +493,13 @@
 	int nr = to_sensor_dev_attr(attr)->index;
 	struct i2c_client *client = to_i2c_client(dev);
 	struct f75375_data *data = i2c_get_clientdata(client);
-	int val = simple_strtoul(buf, NULL, 10);
+	unsigned long val;
+	int err;
+
+	err = kstrtoul(buf, 10, &val);
+	if (err < 0)
+		return err;
+
 	val = SENSORS_LIMIT(VOLT_TO_REG(val), 0, 0xff);
 	mutex_lock(&data->update_lock);
 	data->in_max[nr] = val;
@@ -425,7 +514,13 @@
 	int nr = to_sensor_dev_attr(attr)->index;
 	struct i2c_client *client = to_i2c_client(dev);
 	struct f75375_data *data = i2c_get_clientdata(client);
-	int val = simple_strtoul(buf, NULL, 10);
+	unsigned long val;
+	int err;
+
+	err = kstrtoul(buf, 10, &val);
+	if (err < 0)
+		return err;
+
 	val = SENSORS_LIMIT(VOLT_TO_REG(val), 0, 0xff);
 	mutex_lock(&data->update_lock);
 	data->in_min[nr] = val;
@@ -435,13 +530,14 @@
 }
 #define TEMP_FROM_REG(val) ((val) * 1000)
 #define TEMP_TO_REG(val) ((val) / 1000)
+#define TEMP11_FROM_REG(reg)	((reg) / 32 * 125)
 
-static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
+static ssize_t show_temp11(struct device *dev, struct device_attribute *attr,
 		char *buf)
 {
 	int nr = to_sensor_dev_attr(attr)->index;
 	struct f75375_data *data = f75375_update_device(dev);
-	return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp[nr]));
+	return sprintf(buf, "%d\n", TEMP11_FROM_REG(data->temp11[nr]));
 }
 
 static ssize_t show_temp_max(struct device *dev, struct device_attribute *attr,
@@ -466,7 +562,13 @@
 	int nr = to_sensor_dev_attr(attr)->index;
 	struct i2c_client *client = to_i2c_client(dev);
 	struct f75375_data *data = i2c_get_clientdata(client);
-	int val = simple_strtol(buf, NULL, 10);
+	unsigned long val;
+	int err;
+
+	err = kstrtoul(buf, 10, &val);
+	if (err < 0)
+		return err;
+
 	val = SENSORS_LIMIT(TEMP_TO_REG(val), 0, 127);
 	mutex_lock(&data->update_lock);
 	data->temp_high[nr] = val;
@@ -481,7 +583,13 @@
 	int nr = to_sensor_dev_attr(attr)->index;
 	struct i2c_client *client = to_i2c_client(dev);
 	struct f75375_data *data = i2c_get_clientdata(client);
-	int val = simple_strtol(buf, NULL, 10);
+	unsigned long val;
+	int err;
+
+	err = kstrtoul(buf, 10, &val);
+	if (err < 0)
+		return err;
+
 	val = SENSORS_LIMIT(TEMP_TO_REG(val), 0, 127);
 	mutex_lock(&data->update_lock);
 	data->temp_max_hyst[nr] = val;
@@ -502,8 +610,8 @@
 
 show_fan(fan);
 show_fan(fan_min);
-show_fan(fan_full);
-show_fan(fan_exp);
+show_fan(fan_max);
+show_fan(fan_target);
 
 static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, show_in, NULL, 0);
 static SENSOR_DEVICE_ATTR(in0_max, S_IRUGO|S_IWUSR,
@@ -525,28 +633,28 @@
 	show_in_max, set_in_max, 3);
 static SENSOR_DEVICE_ATTR(in3_min, S_IRUGO|S_IWUSR,
 	show_in_min, set_in_min, 3);
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp11, NULL, 0);
 static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IRUGO|S_IWUSR,
 	show_temp_max_hyst, set_temp_max_hyst, 0);
 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO|S_IWUSR,
 	show_temp_max, set_temp_max, 0);
-static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp11, NULL, 1);
 static SENSOR_DEVICE_ATTR(temp2_max_hyst, S_IRUGO|S_IWUSR,
 	show_temp_max_hyst, set_temp_max_hyst, 1);
 static SENSOR_DEVICE_ATTR(temp2_max, S_IRUGO|S_IWUSR,
 	show_temp_max, set_temp_max, 1);
 static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, show_fan, NULL, 0);
-static SENSOR_DEVICE_ATTR(fan1_full, S_IRUGO, show_fan_full, NULL, 0);
+static SENSOR_DEVICE_ATTR(fan1_max, S_IRUGO, show_fan_max, NULL, 0);
 static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO|S_IWUSR,
 	show_fan_min, set_fan_min, 0);
-static SENSOR_DEVICE_ATTR(fan1_exp, S_IRUGO|S_IWUSR,
-	show_fan_exp, set_fan_exp, 0);
+static SENSOR_DEVICE_ATTR(fan1_target, S_IRUGO|S_IWUSR,
+	show_fan_target, set_fan_target, 0);
 static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, show_fan, NULL, 1);
-static SENSOR_DEVICE_ATTR(fan2_full, S_IRUGO, show_fan_full, NULL, 1);
+static SENSOR_DEVICE_ATTR(fan2_max, S_IRUGO, show_fan_max, NULL, 1);
 static SENSOR_DEVICE_ATTR(fan2_min, S_IRUGO|S_IWUSR,
 	show_fan_min, set_fan_min, 1);
-static SENSOR_DEVICE_ATTR(fan2_exp, S_IRUGO|S_IWUSR,
-	show_fan_exp, set_fan_exp, 1);
+static SENSOR_DEVICE_ATTR(fan2_target, S_IRUGO|S_IWUSR,
+	show_fan_target, set_fan_target, 1);
 static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO|S_IWUSR,
 	show_pwm, set_pwm, 0);
 static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO|S_IWUSR,
@@ -568,13 +676,13 @@
 	&sensor_dev_attr_temp2_max.dev_attr.attr,
 	&sensor_dev_attr_temp2_max_hyst.dev_attr.attr,
 	&sensor_dev_attr_fan1_input.dev_attr.attr,
-	&sensor_dev_attr_fan1_full.dev_attr.attr,
+	&sensor_dev_attr_fan1_max.dev_attr.attr,
 	&sensor_dev_attr_fan1_min.dev_attr.attr,
-	&sensor_dev_attr_fan1_exp.dev_attr.attr,
+	&sensor_dev_attr_fan1_target.dev_attr.attr,
 	&sensor_dev_attr_fan2_input.dev_attr.attr,
-	&sensor_dev_attr_fan2_full.dev_attr.attr,
+	&sensor_dev_attr_fan2_max.dev_attr.attr,
 	&sensor_dev_attr_fan2_min.dev_attr.attr,
-	&sensor_dev_attr_fan2_exp.dev_attr.attr,
+	&sensor_dev_attr_fan2_target.dev_attr.attr,
 	&sensor_dev_attr_pwm1.dev_attr.attr,
 	&sensor_dev_attr_pwm1_enable.dev_attr.attr,
 	&sensor_dev_attr_pwm1_mode.dev_attr.attr,
@@ -604,6 +712,51 @@
 		struct f75375s_platform_data *f75375s_pdata)
 {
 	int nr;
+
+	if (!f75375s_pdata) {
+		u8 conf, mode;
+		int nr;
+
+		conf = f75375_read8(client, F75375_REG_CONFIG1);
+		mode = f75375_read8(client, F75375_REG_FAN_TIMER);
+		for (nr = 0; nr < 2; nr++) {
+			if (data->kind == f75387) {
+				bool manu, duty;
+
+				if (!(conf & (1 << F75387_FAN_CTRL_LINEAR(nr))))
+					data->pwm_mode[nr] = 1;
+
+				manu = ((mode >> F75387_FAN_MANU_MODE(nr)) & 1);
+				duty = ((mode >> F75387_FAN_DUTY_MODE(nr)) & 1);
+				if (manu && duty)
+					/* speed */
+					data->pwm_enable[nr] = 3;
+				else if (!manu && duty)
+					/* automatic */
+					data->pwm_enable[nr] = 2;
+				else
+					/* manual */
+					data->pwm_enable[nr] = 1;
+			} else {
+				if (!(conf & (1 << F75375_FAN_CTRL_LINEAR(nr))))
+					data->pwm_mode[nr] = 1;
+
+				switch ((mode >> FAN_CTRL_MODE(nr)) & 3) {
+				case 0:		/* speed */
+					data->pwm_enable[nr] = 3;
+					break;
+				case 1:		/* automatic */
+					data->pwm_enable[nr] = 2;
+					break;
+				default:	/* manual */
+					data->pwm_enable[nr] = 1;
+					break;
+				}
+			}
+		}
+		return;
+	}
+
 	set_pwm_enable_direct(client, 0, f75375s_pdata->pwm_enable[0]);
 	set_pwm_enable_direct(client, 1, f75375s_pdata->pwm_enable[1]);
 	for (nr = 0; nr < 2; nr++) {
@@ -624,14 +777,16 @@
 	if (!i2c_check_functionality(client->adapter,
 				I2C_FUNC_SMBUS_BYTE_DATA))
 		return -EIO;
-	if (!(data = kzalloc(sizeof(struct f75375_data), GFP_KERNEL)))
+	data = kzalloc(sizeof(struct f75375_data), GFP_KERNEL);
+	if (!data)
 		return -ENOMEM;
 
 	i2c_set_clientdata(client, data);
 	mutex_init(&data->update_lock);
 	data->kind = id->driver_data;
 
-	if ((err = sysfs_create_group(&client->dev.kobj, &f75375_group)))
+	err = sysfs_create_group(&client->dev.kobj, &f75375_group);
+	if (err)
 		goto exit_free;
 
 	if (data->kind == f75375) {
@@ -653,8 +808,7 @@
 		goto exit_remove;
 	}
 
-	if (f75375s_pdata != NULL)
-		f75375_init(client, data, f75375s_pdata);
+	f75375_init(client, data, f75375s_pdata);
 
 	return 0;
 
@@ -685,10 +839,15 @@
 
 	vendid = f75375_read16(client, F75375_REG_VENDOR);
 	chipid = f75375_read16(client, F75375_CHIP_ID);
-	if (chipid == 0x0306 && vendid == 0x1934)
+	if (vendid != 0x1934)
+		return -ENODEV;
+
+	if (chipid == 0x0306)
 		name = "f75375";
-	else if (chipid == 0x0204 && vendid == 0x1934)
+	else if (chipid == 0x0204)
 		name = "f75373";
+	else if (chipid == 0x0410)
+		name = "f75387";
 	else
 		return -ENODEV;
 
@@ -711,7 +870,7 @@
 
 MODULE_AUTHOR("Riku Voipio");
 MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("F75373/F75375 hardware monitoring driver");
+MODULE_DESCRIPTION("F75373/F75375/F75387 hardware monitoring driver");
 
 module_init(sensors_f75375_init);
 module_exit(sensors_f75375_exit);
diff --git a/drivers/hwmon/g760a.c b/drivers/hwmon/g760a.c
index 1d6a6fa..781277d 100644
--- a/drivers/hwmon/g760a.c
+++ b/drivers/hwmon/g760a.c
@@ -166,7 +166,7 @@
 	struct g760a_data *data = g760a_update_client(dev);
 	unsigned long val;
 
-	if (strict_strtoul(buf, 10, &val))
+	if (kstrtoul(buf, 10, &val))
 		return -EINVAL;
 
 	mutex_lock(&data->update_lock);
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index 9ba38f3..2ce8c44 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -224,7 +224,7 @@
 	int speed_index;
 	int ret = count;
 
-	if (strict_strtoul(buf, 10, &pwm) || pwm > 255)
+	if (kstrtoul(buf, 10, &pwm) || pwm > 255)
 		return -EINVAL;
 
 	mutex_lock(&fan_data->lock);
@@ -257,7 +257,7 @@
 	struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
 	unsigned long val;
 
-	if (strict_strtoul(buf, 10, &val) || val > 1)
+	if (kstrtoul(buf, 10, &val) || val > 1)
 		return -EINVAL;
 
 	if (fan_data->pwm_enable == val)
@@ -314,7 +314,7 @@
 	unsigned long rpm;
 	int ret = count;
 
-	if (strict_strtoul(buf, 10, &rpm))
+	if (kstrtoul(buf, 10, &rpm))
 		return -EINVAL;
 
 	mutex_lock(&fan_data->lock);
diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
index 6a967d7..cc2981f 100644
--- a/drivers/hwmon/ibmaem.c
+++ b/drivers/hwmon/ibmaem.c
@@ -904,7 +904,7 @@
 	unsigned long temp;
 	int res;
 
-	res = strict_strtoul(buf, 10, &temp);
+	res = kstrtoul(buf, 10, &temp);
 	if (res)
 		return res;
 
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index d912649..38c0b87 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -444,7 +444,7 @@
 	struct it87_data *data = dev_get_drvdata(dev);
 	unsigned long val;
 
-	if (strict_strtoul(buf, 10, &val) < 0)
+	if (kstrtoul(buf, 10, &val) < 0)
 		return -EINVAL;
 
 	mutex_lock(&data->update_lock);
@@ -463,7 +463,7 @@
 	struct it87_data *data = dev_get_drvdata(dev);
 	unsigned long val;
 
-	if (strict_strtoul(buf, 10, &val) < 0)
+	if (kstrtoul(buf, 10, &val) < 0)
 		return -EINVAL;
 
 	mutex_lock(&data->update_lock);
@@ -539,7 +539,7 @@
 	struct it87_data *data = dev_get_drvdata(dev);
 	long val;
 
-	if (strict_strtol(buf, 10, &val) < 0)
+	if (kstrtol(buf, 10, &val) < 0)
 		return -EINVAL;
 
 	mutex_lock(&data->update_lock);
@@ -557,7 +557,7 @@
 	struct it87_data *data = dev_get_drvdata(dev);
 	long val;
 
-	if (strict_strtol(buf, 10, &val) < 0)
+	if (kstrtol(buf, 10, &val) < 0)
 		return -EINVAL;
 
 	mutex_lock(&data->update_lock);
@@ -604,7 +604,7 @@
 	long val;
 	u8 reg;
 
-	if (strict_strtol(buf, 10, &val) < 0)
+	if (kstrtol(buf, 10, &val) < 0)
 		return -EINVAL;
 
 	reg = it87_read_value(data, IT87_REG_TEMP_ENABLE);
@@ -718,7 +718,7 @@
 	long val;
 	u8 reg;
 
-	if (strict_strtol(buf, 10, &val) < 0)
+	if (kstrtol(buf, 10, &val) < 0)
 		return -EINVAL;
 
 	mutex_lock(&data->update_lock);
@@ -751,7 +751,7 @@
 	int min;
 	u8 old;
 
-	if (strict_strtoul(buf, 10, &val) < 0)
+	if (kstrtoul(buf, 10, &val) < 0)
 		return -EINVAL;
 
 	mutex_lock(&data->update_lock);
@@ -820,7 +820,7 @@
 	struct it87_data *data = dev_get_drvdata(dev);
 	long val;
 
-	if (strict_strtol(buf, 10, &val) < 0 || val < 0 || val > 2)
+	if (kstrtol(buf, 10, &val) < 0 || val < 0 || val > 2)
 		return -EINVAL;
 
 	/* Check trip points before switching to automatic mode */
@@ -866,7 +866,7 @@
 	struct it87_data *data = dev_get_drvdata(dev);
 	long val;
 
-	if (strict_strtol(buf, 10, &val) < 0 || val < 0 || val > 255)
+	if (kstrtol(buf, 10, &val) < 0 || val < 0 || val > 255)
 		return -EINVAL;
 
 	mutex_lock(&data->update_lock);
@@ -900,7 +900,7 @@
 	unsigned long val;
 	int i;
 
-	if (strict_strtoul(buf, 10, &val) < 0)
+	if (kstrtoul(buf, 10, &val) < 0)
 		return -EINVAL;
 
 	/* Search for the nearest available frequency */
@@ -949,7 +949,7 @@
 		return -EINVAL;
 	}
 
-	if (strict_strtol(buf, 10, &val) < 0)
+	if (kstrtol(buf, 10, &val) < 0)
 		return -EINVAL;
 
 	switch (val) {
@@ -1001,7 +1001,7 @@
 	int point = sensor_attr->index;
 	long val;
 
-	if (strict_strtol(buf, 10, &val) < 0 || val < 0 || val > 255)
+	if (kstrtol(buf, 10, &val) < 0 || val < 0 || val > 255)
 		return -EINVAL;
 
 	mutex_lock(&data->update_lock);
@@ -1034,7 +1034,7 @@
 	int point = sensor_attr->index;
 	long val;
 
-	if (strict_strtol(buf, 10, &val) < 0 || val < -128000 || val > 127000)
+	if (kstrtol(buf, 10, &val) < 0 || val < -128000 || val > 127000)
 		return -EINVAL;
 
 	mutex_lock(&data->update_lock);
@@ -1126,7 +1126,7 @@
 	struct it87_data *data = dev_get_drvdata(dev);
 	long val;
 
-	if (strict_strtol(buf, 10, &val) < 0)
+	if (kstrtol(buf, 10, &val) < 0)
 		return -EINVAL;
 
 	mutex_lock(&data->update_lock);
@@ -1180,7 +1180,7 @@
 	long val;
 	int config;
 
-	if (strict_strtol(buf, 10, &val) < 0 || val != 0)
+	if (kstrtol(buf, 10, &val) < 0 || val != 0)
 		return -EINVAL;
 
 	mutex_lock(&data->update_lock);
@@ -1231,7 +1231,7 @@
 	struct it87_data *data = dev_get_drvdata(dev);
 	long val;
 
-	if (strict_strtol(buf, 10, &val) < 0
+	if (kstrtol(buf, 10, &val) < 0
 	 || (val != 0 && val != 1))
 		return -EINVAL;
 
@@ -1278,7 +1278,7 @@
 	struct it87_data *data = dev_get_drvdata(dev);
 	unsigned long val;
 
-	if (strict_strtoul(buf, 10, &val) < 0)
+	if (kstrtoul(buf, 10, &val) < 0)
 		return -EINVAL;
 
 	data->vrm = val;
diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c
index 2d3d728..28c09ee 100644
--- a/drivers/hwmon/jc42.c
+++ b/drivers/hwmon/jc42.c
@@ -309,7 +309,7 @@
 	struct jc42_data *data = i2c_get_clientdata(client);		\
 	int err, ret = count;						\
 	long val;							\
-	if (strict_strtol(buf, 10, &val) < 0)				\
+	if (kstrtol(buf, 10, &val) < 0)				\
 		return -EINVAL;						\
 	mutex_lock(&data->update_lock);					\
 	data->value = jc42_temp_to_reg(val, data->extended);		\
@@ -337,7 +337,7 @@
 	int err;
 	int ret = count;
 
-	if (strict_strtoul(buf, 10, &val) < 0)
+	if (kstrtoul(buf, 10, &val) < 0)
 		return -EINVAL;
 
 	diff = jc42_temp_from_reg(data->temp_crit) - val;
@@ -413,7 +413,7 @@
 	NULL
 };
 
-static mode_t jc42_attribute_mode(struct kobject *kobj,
+static umode_t jc42_attribute_mode(struct kobject *kobj,
 				  struct attribute *attr, int index)
 {
 	struct device *dev = container_of(kobj, struct device, kobj);
diff --git a/drivers/hwmon/jz4740-hwmon.c b/drivers/hwmon/jz4740-hwmon.c
index 7a48b1e..5253d23 100644
--- a/drivers/hwmon/jz4740-hwmon.c
+++ b/drivers/hwmon/jz4740-hwmon.c
@@ -59,7 +59,7 @@
 {
 	struct jz4740_hwmon *hwmon = dev_get_drvdata(dev);
 	struct completion *completion = &hwmon->read_completion;
-	unsigned long t;
+	long t;
 	unsigned long val;
 	int ret;
 
@@ -203,7 +203,7 @@
 	return 0;
 }
 
-struct platform_driver jz4740_hwmon_driver = {
+static struct platform_driver jz4740_hwmon_driver = {
 	.probe	= jz4740_hwmon_probe,
 	.remove = __devexit_p(jz4740_hwmon_remove),
 	.driver = {
diff --git a/drivers/hwmon/lm73.c b/drivers/hwmon/lm73.c
index 9e64d96..9c8093c 100644
--- a/drivers/hwmon/lm73.c
+++ b/drivers/hwmon/lm73.c
@@ -50,7 +50,7 @@
 	long temp;
 	short value;
 
-	int status = strict_strtol(buf, 10, &temp);
+	int status = kstrtol(buf, 10, &temp);
 	if (status < 0)
 		return status;
 
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index 1888dd0..b3311b1 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -93,6 +93,10 @@
 {
 	struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
 	struct lm75_data *data = lm75_update_device(dev);
+
+	if (IS_ERR(data))
+		return PTR_ERR(data);
+
 	return sprintf(buf, "%d\n",
 		       LM75_TEMP_FROM_REG(data->temp[attr->index]));
 }
@@ -107,7 +111,7 @@
 	long temp;
 	int error;
 
-	error = strict_strtol(buf, 10, &temp);
+	error = kstrtol(buf, 10, &temp);
 	if (error)
 		return error;
 
@@ -402,6 +406,7 @@
 {
 	struct i2c_client *client = to_i2c_client(dev);
 	struct lm75_data *data = i2c_get_clientdata(client);
+	struct lm75_data *ret = data;
 
 	mutex_lock(&data->update_lock);
 
@@ -414,19 +419,23 @@
 			int status;
 
 			status = lm75_read_value(client, LM75_REG_TEMP[i]);
-			if (status < 0)
-				dev_dbg(&client->dev, "reg %d, err %d\n",
-						LM75_REG_TEMP[i], status);
-			else
-				data->temp[i] = status;
+			if (unlikely(status < 0)) {
+				dev_dbg(dev,
+					"LM75: Failed to read value: reg %d, error %d\n",
+					LM75_REG_TEMP[i], status);
+				ret = ERR_PTR(status);
+				data->valid = 0;
+				goto abort;
+			}
+			data->temp[i] = status;
 		}
 		data->last_updated = jiffies;
 		data->valid = 1;
 	}
 
+abort:
 	mutex_unlock(&data->update_lock);
-
-	return data;
+	return ret;
 }
 
 /*-----------------------------------------------------------------------*/
diff --git a/drivers/hwmon/lm75.h b/drivers/hwmon/lm75.h
index e547a3e..89aa909 100644
--- a/drivers/hwmon/lm75.h
+++ b/drivers/hwmon/lm75.h
@@ -1,6 +1,6 @@
 /*
     lm75.h - Part of lm_sensors, Linux kernel modules for hardware
-             monitoring
+	      monitoring
     Copyright (c) 2003 Mark M. Hoffman <mhoffman@lightlink.com>
 
     This program is free software; you can redistribute it and/or modify
@@ -37,7 +37,7 @@
 static inline u16 LM75_TEMP_TO_REG(long temp)
 {
 	int ntemp = SENSORS_LIMIT(temp, LM75_TEMP_MIN, LM75_TEMP_MAX);
-	ntemp += (ntemp<0 ? -250 : 250);
+	ntemp += (ntemp < 0 ? -250 : 250);
 	return (u16)((ntemp / 500) << 7);
 }
 
@@ -47,4 +47,3 @@
 	   guarantee arithmetic shift and preserve the sign */
 	return ((s16)reg / 128) * 500;
 }
-
diff --git a/drivers/hwmon/lm80.c b/drivers/hwmon/lm80.c
index 18a0e6c..0891b38 100644
--- a/drivers/hwmon/lm80.c
+++ b/drivers/hwmon/lm80.c
@@ -66,19 +66,19 @@
    these macros are called: arguments may be evaluated more than once.
    Fixing this is just not worth it. */
 
-#define IN_TO_REG(val)		(SENSORS_LIMIT(((val)+5)/10,0,255))
-#define IN_FROM_REG(val)	((val)*10)
+#define IN_TO_REG(val)		(SENSORS_LIMIT(((val) + 5) / 10, 0, 255))
+#define IN_FROM_REG(val)	((val) * 10)
 
 static inline unsigned char FAN_TO_REG(unsigned rpm, unsigned div)
 {
 	if (rpm == 0)
 		return 255;
 	rpm = SENSORS_LIMIT(rpm, 1, 1000000);
-	return SENSORS_LIMIT((1350000 + rpm*div / 2) / (rpm*div), 1, 254);
+	return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
 }
 
-#define FAN_FROM_REG(val,div)	((val)==0?-1:\
-				(val)==255?0:1350000/((div)*(val)))
+#define FAN_FROM_REG(val, div)	((val) == 0 ? -1 : \
+				(val) == 255 ? 0 : 1350000/((div) * (val)))
 
 static inline long TEMP_FROM_REG(u16 temp)
 {
@@ -93,10 +93,11 @@
 	return res / 10;
 }
 
-#define TEMP_LIMIT_FROM_REG(val)	(((val)>0x80?(val)-0x100:(val))*1000)
+#define TEMP_LIMIT_FROM_REG(val)	(((val) > 0x80 ? \
+	(val) - 0x100 : (val)) * 1000)
 
-#define TEMP_LIMIT_TO_REG(val)		SENSORS_LIMIT((val)<0?\
-					((val)-500)/1000:((val)+500)/1000,0,255)
+#define TEMP_LIMIT_TO_REG(val)		SENSORS_LIMIT((val) < 0 ? \
+	((val) - 500) / 1000 : ((val) + 500) / 1000, 0, 255)
 
 #define DIV_FROM_REG(val)		(1 << (val))
 
@@ -164,7 +165,8 @@
  */
 
 #define show_in(suffix, value) \
-static ssize_t show_in_##suffix(struct device *dev, struct device_attribute *attr, char *buf) \
+static ssize_t show_in_##suffix(struct device *dev, \
+	struct device_attribute *attr, char *buf) \
 { \
 	int nr = to_sensor_dev_attr(attr)->index; \
 	struct lm80_data *data = lm80_update_device(dev); \
@@ -175,14 +177,14 @@
 show_in(input, in)
 
 #define set_in(suffix, value, reg) \
-static ssize_t set_in_##suffix(struct device *dev, struct device_attribute *attr, const char *buf, \
-	size_t count) \
+static ssize_t set_in_##suffix(struct device *dev, \
+	struct device_attribute *attr, const char *buf, size_t count) \
 { \
 	int nr = to_sensor_dev_attr(attr)->index; \
 	struct i2c_client *client = to_i2c_client(dev); \
 	struct lm80_data *data = i2c_get_clientdata(client); \
 	long val = simple_strtol(buf, NULL, 10); \
- \
+\
 	mutex_lock(&data->update_lock);\
 	data->value[nr] = IN_TO_REG(val); \
 	lm80_write_value(client, reg(nr), data->value[nr]); \
@@ -193,7 +195,8 @@
 set_in(max, in_max, LM80_REG_IN_MAX)
 
 #define show_fan(suffix, value) \
-static ssize_t show_fan_##suffix(struct device *dev, struct device_attribute *attr, char *buf) \
+static ssize_t show_fan_##suffix(struct device *dev, \
+	struct device_attribute *attr, char *buf) \
 { \
 	int nr = to_sensor_dev_attr(attr)->index; \
 	struct lm80_data *data = lm80_update_device(dev); \
@@ -245,10 +248,18 @@
 			   DIV_FROM_REG(data->fan_div[nr]));
 
 	switch (val) {
-	case 1: data->fan_div[nr] = 0; break;
-	case 2: data->fan_div[nr] = 1; break;
-	case 4: data->fan_div[nr] = 2; break;
-	case 8: data->fan_div[nr] = 3; break;
+	case 1:
+		data->fan_div[nr] = 0;
+		break;
+	case 2:
+		data->fan_div[nr] = 1;
+		break;
+	case 4:
+		data->fan_div[nr] = 2;
+		break;
+	case 8:
+		data->fan_div[nr] = 3;
+		break;
 	default:
 		dev_err(&client->dev, "fan_div value %ld not "
 			"supported. Choose one of 1, 2, 4 or 8!\n", val);
@@ -268,14 +279,16 @@
 	return count;
 }
 
-static ssize_t show_temp_input1(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_temp_input1(struct device *dev,
+	struct device_attribute *attr, char *buf)
 {
 	struct lm80_data *data = lm80_update_device(dev);
 	return sprintf(buf, "%ld\n", TEMP_FROM_REG(data->temp));
 }
 
 #define show_temp(suffix, value) \
-static ssize_t show_temp_##suffix(struct device *dev, struct device_attribute *attr, char *buf) \
+static ssize_t show_temp_##suffix(struct device *dev, \
+	struct device_attribute *attr, char *buf) \
 { \
 	struct lm80_data *data = lm80_update_device(dev); \
 	return sprintf(buf, "%d\n", TEMP_LIMIT_FROM_REG(data->value)); \
@@ -286,13 +299,13 @@
 show_temp(os_hyst, temp_os_hyst);
 
 #define set_temp(suffix, value, reg) \
-static ssize_t set_temp_##suffix(struct device *dev, struct device_attribute *attr, const char *buf, \
-	size_t count) \
+static ssize_t set_temp_##suffix(struct device *dev, \
+	struct device_attribute *attr, const char *buf, size_t count) \
 { \
 	struct i2c_client *client = to_i2c_client(dev); \
 	struct lm80_data *data = i2c_get_clientdata(client); \
 	long val = simple_strtoul(buf, NULL, 10); \
- \
+\
 	mutex_lock(&data->update_lock); \
 	data->value = TEMP_LIMIT_TO_REG(val); \
 	lm80_write_value(client, reg, data->value); \
@@ -366,13 +379,13 @@
 		show_fan_div, set_fan_div, 1);
 static DEVICE_ATTR(temp1_input, S_IRUGO, show_temp_input1, NULL);
 static DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp_hot_max,
-    set_temp_hot_max);
+	set_temp_hot_max);
 static DEVICE_ATTR(temp1_max_hyst, S_IWUSR | S_IRUGO, show_temp_hot_hyst,
-    set_temp_hot_hyst);
+	set_temp_hot_hyst);
 static DEVICE_ATTR(temp1_crit, S_IWUSR | S_IRUGO, show_temp_os_max,
-    set_temp_os_max);
+	set_temp_os_max);
 static DEVICE_ATTR(temp1_crit_hyst, S_IWUSR | S_IRUGO, show_temp_os_hyst,
-    set_temp_os_hyst);
+	set_temp_os_hyst);
 static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
 static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0);
 static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1);
@@ -459,7 +472,7 @@
 		if ((i2c_smbus_read_byte_data(client, i + 0x40) != cur)
 		 || (i2c_smbus_read_byte_data(client, i + 0x80) != cur)
 		 || (i2c_smbus_read_byte_data(client, i + 0xc0) != cur))
-		    return -ENODEV;
+			return -ENODEV;
 	}
 
 	strlcpy(info->type, "lm80", I2C_NAME_SIZE);
@@ -490,7 +503,8 @@
 	data->fan_min[1] = lm80_read_value(client, LM80_REG_FAN_MIN(2));
 
 	/* Register sysfs hooks */
-	if ((err = sysfs_create_group(&client->dev.kobj, &lm80_group)))
+	err = sysfs_create_group(&client->dev.kobj, &lm80_group);
+	if (err)
 		goto error_free;
 
 	data->hwmon_dev = hwmon_device_register(&client->dev);
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 615bc4f..bdfd675 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -730,7 +730,7 @@
 	long val;
 	int err;
 
-	err = strict_strtol(buf, 10, &val);
+	err = kstrtol(buf, 10, &val);
 	if (err < 0)
 		return err;
 
@@ -798,7 +798,7 @@
 	long val;
 	int err;
 
-	err = strict_strtol(buf, 10, &val);
+	err = kstrtol(buf, 10, &val);
 	if (err < 0)
 		return err;
 
@@ -859,7 +859,7 @@
 	int err;
 	int temp;
 
-	err = strict_strtol(buf, 10, &val);
+	err = kstrtol(buf, 10, &val);
 	if (err < 0)
 		return err;
 
@@ -912,7 +912,7 @@
 	unsigned long val;
 	int err;
 
-	err = strict_strtoul(buf, 10, &val);
+	err = kstrtoul(buf, 10, &val);
 	if (err)
 		return err;
 
@@ -1080,7 +1080,7 @@
 	long val;
 	int err;
 
-	err = strict_strtol(buf, 10, &val);
+	err = kstrtol(buf, 10, &val);
 	if (err < 0)
 		return err;
 
diff --git a/drivers/hwmon/lm95241.c b/drivers/hwmon/lm95241.c
index 513901d..70bca67 100644
--- a/drivers/hwmon/lm95241.c
+++ b/drivers/hwmon/lm95241.c
@@ -169,7 +169,7 @@
 	int shift;
 	u8 mask = to_sensor_dev_attr(attr)->index;
 
-	if (strict_strtoul(buf, 10, &val) < 0)
+	if (kstrtoul(buf, 10, &val) < 0)
 		return -EINVAL;
 	if (val != 1 && val != 2)
 		return -EINVAL;
@@ -216,7 +216,7 @@
 	struct lm95241_data *data = i2c_get_clientdata(client);
 	long val;
 
-	if (strict_strtol(buf, 10, &val) < 0)
+	if (kstrtol(buf, 10, &val) < 0)
 		return -EINVAL;
 	if (val < -128000)
 		return -EINVAL;
@@ -254,7 +254,7 @@
 	struct lm95241_data *data = i2c_get_clientdata(client);
 	long val;
 
-	if (strict_strtol(buf, 10, &val) < 0)
+	if (kstrtol(buf, 10, &val) < 0)
 		return -EINVAL;
 	if (val >= 256000)
 		return -EINVAL;
@@ -290,7 +290,7 @@
 	struct lm95241_data *data = i2c_get_clientdata(client);
 	unsigned long val;
 
-	if (strict_strtoul(buf, 10, &val) < 0)
+	if (kstrtoul(buf, 10, &val) < 0)
 		return -EINVAL;
 
 	data->interval = val * HZ / 1000;
diff --git a/drivers/hwmon/lm95245.c b/drivers/hwmon/lm95245.c
index dce9e68..5e5fc1b 100644
--- a/drivers/hwmon/lm95245.c
+++ b/drivers/hwmon/lm95245.c
@@ -254,7 +254,7 @@
 	int index = to_sensor_dev_attr(attr)->index;
 	unsigned long val;
 
-	if (strict_strtoul(buf, 10, &val) < 0)
+	if (kstrtoul(buf, 10, &val) < 0)
 		return -EINVAL;
 
 	val /= 1000;
@@ -279,7 +279,7 @@
 	struct lm95245_data *data = i2c_get_clientdata(client);
 	unsigned long val;
 
-	if (strict_strtoul(buf, 10, &val) < 0)
+	if (kstrtoul(buf, 10, &val) < 0)
 		return -EINVAL;
 
 	val /= 1000;
@@ -316,7 +316,7 @@
 	struct lm95245_data *data = i2c_get_clientdata(client);
 	unsigned long val;
 
-	if (strict_strtoul(buf, 10, &val) < 0)
+	if (kstrtoul(buf, 10, &val) < 0)
 		return -EINVAL;
 	if (val != 1 && val != 2)
 		return -EINVAL;
@@ -363,7 +363,7 @@
 	struct lm95245_data *data = i2c_get_clientdata(client);
 	unsigned long val;
 
-	if (strict_strtoul(buf, 10, &val) < 0)
+	if (kstrtoul(buf, 10, &val) < 0)
 		return -EINVAL;
 
 	mutex_lock(&data->update_lock);
diff --git a/drivers/hwmon/ltc4261.c b/drivers/hwmon/ltc4261.c
index 4b50601..ce52355 100644
--- a/drivers/hwmon/ltc4261.c
+++ b/drivers/hwmon/ltc4261.c
@@ -85,6 +85,7 @@
 					"Failed to read ADC value: error %d\n",
 					val);
 				ret = ERR_PTR(val);
+				data->valid = 0;
 				goto abort;
 			}
 			data->regs[i] = val;
diff --git a/drivers/hwmon/max1111.c b/drivers/hwmon/max1111.c
index c97b78e..84ef3a8 100644
--- a/drivers/hwmon/max1111.c
+++ b/drivers/hwmon/max1111.c
@@ -6,7 +6,7 @@
  * Copyright (C) 2004-2005 Richard Purdie
  *
  * Copyright (C) 2008 Marvell International Ltd.
- * 	Eric Miao <eric.miao@marvell.com>
+ *	Eric Miao <eric.miao@marvell.com>
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License version 2 as
diff --git a/drivers/hwmon/max16065.c b/drivers/hwmon/max16065.c
index 385886a..f8e323a 100644
--- a/drivers/hwmon/max16065.c
+++ b/drivers/hwmon/max16065.c
@@ -230,7 +230,7 @@
 	int err;
 	int limit;
 
-	err = strict_strtoul(buf, 10, &val);
+	err = kstrtoul(buf, 10, &val);
 	if (unlikely(err < 0))
 		return err;
 
diff --git a/drivers/hwmon/max1668.c b/drivers/hwmon/max1668.c
index 20d1b2d..6914195 100644
--- a/drivers/hwmon/max1668.c
+++ b/drivers/hwmon/max1668.c
@@ -335,10 +335,10 @@
 	NULL
 };
 
-static mode_t max1668_attribute_mode(struct kobject *kobj,
+static umode_t max1668_attribute_mode(struct kobject *kobj,
 				     struct attribute *attr, int index)
 {
-	int ret = S_IRUGO;
+	umode_t ret = S_IRUGO;
 	if (read_only)
 		return ret;
 	if (attr == &sensor_dev_attr_temp1_max.dev_attr.attr ||
diff --git a/drivers/hwmon/max6639.c b/drivers/hwmon/max6639.c
index f20d997..e10a092 100644
--- a/drivers/hwmon/max6639.c
+++ b/drivers/hwmon/max6639.c
@@ -208,7 +208,7 @@
 	unsigned long val;
 	int res;
 
-	res = strict_strtoul(buf, 10, &val);
+	res = kstrtoul(buf, 10, &val);
 	if (res)
 		return res;
 
@@ -241,7 +241,7 @@
 	unsigned long val;
 	int res;
 
-	res = strict_strtoul(buf, 10, &val);
+	res = kstrtoul(buf, 10, &val);
 	if (res)
 		return res;
 
@@ -275,7 +275,7 @@
 	unsigned long val;
 	int res;
 
-	res = strict_strtoul(buf, 10, &val);
+	res = kstrtoul(buf, 10, &val);
 	if (res)
 		return res;
 
@@ -308,7 +308,7 @@
 	unsigned long val;
 	int res;
 
-	res = strict_strtoul(buf, 10, &val);
+	res = kstrtoul(buf, 10, &val);
 	if (res)
 		return res;
 
diff --git a/drivers/hwmon/max6642.c b/drivers/hwmon/max6642.c
index e855d3b..209e8a5 100644
--- a/drivers/hwmon/max6642.c
+++ b/drivers/hwmon/max6642.c
@@ -234,7 +234,7 @@
 	struct max6642_data *data = i2c_get_clientdata(client);
 	struct sensor_device_attribute_2 *attr2 = to_sensor_dev_attr_2(attr);
 
-	err = strict_strtoul(buf, 10, &val);
+	err = kstrtoul(buf, 10, &val);
 	if (err < 0)
 		return err;
 
diff --git a/drivers/hwmon/max6650.c b/drivers/hwmon/max6650.c
index ece3aaf..2fc034a 100644
--- a/drivers/hwmon/max6650.c
+++ b/drivers/hwmon/max6650.c
@@ -464,7 +464,7 @@
 static SENSOR_DEVICE_ATTR(gpio2_alarm, S_IRUGO, get_alarm, NULL,
 			  MAX6650_ALRM_GPIO2);
 
-static mode_t max6650_attrs_visible(struct kobject *kobj, struct attribute *a,
+static umode_t max6650_attrs_visible(struct kobject *kobj, struct attribute *a,
 				    int n)
 {
 	struct device *dev = container_of(kobj, struct device, kobj);
diff --git a/drivers/hwmon/pc87427.c b/drivers/hwmon/pc87427.c
index 8da2181..cb35461 100644
--- a/drivers/hwmon/pc87427.c
+++ b/drivers/hwmon/pc87427.c
@@ -418,7 +418,7 @@
 	unsigned long val;
 	int iobase = data->address[LD_FAN];
 
-	if (strict_strtoul(buf, 10, &val) < 0)
+	if (kstrtoul(buf, 10, &val) < 0)
 		return -EINVAL;
 
 	mutex_lock(&data->lock);
@@ -572,7 +572,7 @@
 	int nr = to_sensor_dev_attr(devattr)->index;
 	unsigned long val;
 
-	if (strict_strtoul(buf, 10, &val) < 0 || val > 2)
+	if (kstrtoul(buf, 10, &val) < 0 || val > 2)
 		return -EINVAL;
 	/* Can't go to automatic mode if it isn't configured */
 	if (val == 2 && !(data->pwm_auto_ok & (1 << nr)))
@@ -604,7 +604,7 @@
 	int iobase = data->address[LD_FAN];
 	u8 mode;
 
-	if (strict_strtoul(buf, 10, &val) < 0 || val > 0xff)
+	if (kstrtoul(buf, 10, &val) < 0 || val > 0xff)
 		return -EINVAL;
 
 	mutex_lock(&data->lock);
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index 4b26f51..cfec923 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -19,8 +19,8 @@
 	default y
 	help
 	  If you say yes here you get hardware monitoring support for generic
-	  PMBus devices, including but not limited to ADP4000, BMR450, BMR451,
-	  BMR453, BMR454, NCP4200, and NCP4208.
+	  PMBus devices, including but not limited to ADP4000, BMR453, BMR454,
+	  NCP4200, and NCP4208.
 
 	  This driver can also be built as a module. If so, the module will
 	  be called pmbus.
@@ -113,8 +113,9 @@
 	default n
 	help
 	  If you say yes here you get hardware monitoring support for Intersil
-	  ZL2004, ZL2006, ZL2008, ZL2105, ZL2106, ZL6100, and ZL6105 Digital
-	  DC/DC Controllers.
+	  ZL2004, ZL2005, ZL2006, ZL2008, ZL2105, ZL2106, ZL6100, and ZL6105
+	  Digital DC/DC Controllers, as well as for Ericsson BMR450, BMR451,
+	  BMR462, BMR463, and BMR464.
 
 	  This driver can also be built as a module. If so, the module will
 	  be called zl6100.
diff --git a/drivers/hwmon/pmbus/adm1275.c b/drivers/hwmon/pmbus/adm1275.c
index 980a4d9..81c7c2e 100644
--- a/drivers/hwmon/pmbus/adm1275.c
+++ b/drivers/hwmon/pmbus/adm1275.c
@@ -170,35 +170,71 @@
 	return ret;
 }
 
+static const struct i2c_device_id adm1275_id[] = {
+	{ "adm1275", adm1275 },
+	{ "adm1276", adm1276 },
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, adm1275_id);
+
 static int adm1275_probe(struct i2c_client *client,
 			 const struct i2c_device_id *id)
 {
+	u8 block_buffer[I2C_SMBUS_BLOCK_MAX + 1];
 	int config, device_config;
 	int ret;
 	struct pmbus_driver_info *info;
 	struct adm1275_data *data;
+	const struct i2c_device_id *mid;
 
 	if (!i2c_check_functionality(client->adapter,
-				     I2C_FUNC_SMBUS_READ_BYTE_DATA))
+				     I2C_FUNC_SMBUS_READ_BYTE_DATA
+				     | I2C_FUNC_SMBUS_BLOCK_DATA))
 		return -ENODEV;
 
+	ret = i2c_smbus_read_block_data(client, PMBUS_MFR_ID, block_buffer);
+	if (ret < 0) {
+		dev_err(&client->dev, "Failed to read Manufacturer ID\n");
+		return ret;
+	}
+	if (ret != 3 || strncmp(block_buffer, "ADI", 3)) {
+		dev_err(&client->dev, "Unsupported Manufacturer ID\n");
+		return -ENODEV;
+	}
+
+	ret = i2c_smbus_read_block_data(client, PMBUS_MFR_MODEL, block_buffer);
+	if (ret < 0) {
+		dev_err(&client->dev, "Failed to read Manufacturer Model\n");
+		return ret;
+	}
+	for (mid = adm1275_id; mid->name[0]; mid++) {
+		if (!strncasecmp(mid->name, block_buffer, strlen(mid->name)))
+			break;
+	}
+	if (!mid->name[0]) {
+		dev_err(&client->dev, "Unsupported device\n");
+		return -ENODEV;
+	}
+
+	if (id->driver_data != mid->driver_data)
+		dev_notice(&client->dev,
+			   "Device mismatch: Configured %s, detected %s\n",
+			   id->name, mid->name);
+
+	config = i2c_smbus_read_byte_data(client, ADM1275_PMON_CONFIG);
+	if (config < 0)
+		return config;
+
+	device_config = i2c_smbus_read_byte_data(client, ADM1275_DEVICE_CONFIG);
+	if (device_config < 0)
+		return device_config;
+
 	data = kzalloc(sizeof(struct adm1275_data), GFP_KERNEL);
 	if (!data)
 		return -ENOMEM;
 
-	config = i2c_smbus_read_byte_data(client, ADM1275_PMON_CONFIG);
-	if (config < 0) {
-		ret = config;
-		goto err_mem;
-	}
+	data->id = mid->driver_data;
 
-	device_config = i2c_smbus_read_byte_data(client, ADM1275_DEVICE_CONFIG);
-	if (device_config < 0) {
-		ret = device_config;
-		goto err_mem;
-	}
-
-	data->id = id->driver_data;
 	info = &data->info;
 
 	info->pages = 1;
@@ -233,7 +269,7 @@
 	if (device_config & ADM1275_IOUT_WARN2_SELECT)
 		data->have_oc_fault = true;
 
-	switch (id->driver_data) {
+	switch (data->id) {
 	case adm1275:
 		if (config & ADM1275_VIN_VOUT_SELECT)
 			info->func[0] |=
@@ -281,13 +317,6 @@
 	return 0;
 }
 
-static const struct i2c_device_id adm1275_id[] = {
-	{ "adm1275", adm1275 },
-	{ "adm1276", adm1276 },
-	{ }
-};
-MODULE_DEVICE_TABLE(i2c, adm1275_id);
-
 static struct i2c_driver adm1275_driver = {
 	.driver = {
 		   .name = "adm1275",
diff --git a/drivers/hwmon/pmbus/pmbus.c b/drivers/hwmon/pmbus/pmbus.c
index 995e873..18a385e 100644
--- a/drivers/hwmon/pmbus/pmbus.c
+++ b/drivers/hwmon/pmbus/pmbus.c
@@ -200,8 +200,6 @@
  */
 static const struct i2c_device_id pmbus_id[] = {
 	{"adp4000", 1},
-	{"bmr450", 1},
-	{"bmr451", 1},
 	{"bmr453", 1},
 	{"bmr454", 1},
 	{"ncp4200", 1},
diff --git a/drivers/hwmon/pmbus/zl6100.c b/drivers/hwmon/pmbus/zl6100.c
index 2bc9800..48c7b4a 100644
--- a/drivers/hwmon/pmbus/zl6100.c
+++ b/drivers/hwmon/pmbus/zl6100.c
@@ -28,7 +28,7 @@
 #include <linux/delay.h>
 #include "pmbus.h"
 
-enum chips { zl2004, zl2006, zl2008, zl2105, zl2106, zl6100, zl6105 };
+enum chips { zl2004, zl2005, zl2006, zl2008, zl2105, zl2106, zl6100, zl6105 };
 
 struct zl6100_data {
 	int id;
@@ -38,8 +38,11 @@
 
 #define to_zl6100_data(x)  container_of(x, struct zl6100_data, info)
 
+#define ZL6100_MFR_CONFIG		0xd0
 #define ZL6100_DEVICE_ID		0xe4
 
+#define ZL6100_MFR_XTEMP_ENABLE		(1 << 7)
+
 #define ZL6100_WAIT_TIME		1000	/* uS	*/
 
 static ushort delay = ZL6100_WAIT_TIME;
@@ -65,6 +68,19 @@
 	if (page || reg >= PMBUS_VIRT_BASE)
 		return -ENXIO;
 
+	if (data->id == zl2005) {
+		/*
+		 * Limit register detection is not reliable on ZL2005.
+		 * Make sure registers are not erroneously detected.
+		 */
+		switch (reg) {
+		case PMBUS_VOUT_OV_WARN_LIMIT:
+		case PMBUS_VOUT_UV_WARN_LIMIT:
+		case PMBUS_IOUT_OC_WARN_LIMIT:
+			return -ENXIO;
+		}
+	}
+
 	zl6100_wait(data);
 	ret = pmbus_read_word_data(client, page, reg);
 	data->access = ktime_get();
@@ -122,7 +138,13 @@
 }
 
 static const struct i2c_device_id zl6100_id[] = {
+	{"bmr450", zl2005},
+	{"bmr451", zl2005},
+	{"bmr462", zl2008},
+	{"bmr463", zl2008},
+	{"bmr464", zl2008},
 	{"zl2004", zl2004},
+	{"zl2005", zl2005},
 	{"zl2006", zl2006},
 	{"zl2008", zl2008},
 	{"zl2105", zl2105},
@@ -143,7 +165,7 @@
 	const struct i2c_device_id *mid;
 
 	if (!i2c_check_functionality(client->adapter,
-				     I2C_FUNC_SMBUS_READ_BYTE_DATA
+				     I2C_FUNC_SMBUS_READ_WORD_DATA
 				     | I2C_FUNC_SMBUS_READ_BLOCK_DATA))
 		return -ENODEV;
 
@@ -177,8 +199,9 @@
 	data->id = mid->driver_data;
 
 	/*
-	 * ZL2008, ZL2105, and ZL6100 are known to require a wait time
+	 * ZL2005, ZL2008, ZL2105, and ZL6100 are known to require a wait time
 	 * between I2C accesses. ZL2004 and ZL6105 are known to be safe.
+	 * Other chips have not yet been tested.
 	 *
 	 * Only clear the wait time for chips known to be safe. The wait time
 	 * can be cleared later for additional chips if tests show that it
@@ -190,12 +213,9 @@
 	/*
 	 * Since there was a direct I2C device access above, wait before
 	 * accessing the chip again.
-	 * Set the timestamp, wait, then set it again. This should provide
-	 * enough buffer time to be safe.
 	 */
 	data->access = ktime_get();
 	zl6100_wait(data);
-	data->access = ktime_get();
 
 	info = &data->info;
 
@@ -203,7 +223,16 @@
 	info->func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_STATUS_INPUT
 	  | PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
 	  | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT
-	  | PMBUS_HAVE_TEMP | PMBUS_HAVE_TEMP2 | PMBUS_HAVE_STATUS_TEMP;
+	  | PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP;
+
+	ret = i2c_smbus_read_word_data(client, ZL6100_MFR_CONFIG);
+	if (ret < 0)
+		goto err_mem;
+	if (ret & ZL6100_MFR_XTEMP_ENABLE)
+		info->func[0] |= PMBUS_HAVE_TEMP2;
+
+	data->access = ktime_get();
+	zl6100_wait(data);
 
 	info->read_word_data = zl6100_read_word_data;
 	info->read_byte_data = zl6100_read_byte_data;
diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
index fe4104c..6ddeae0 100644
--- a/drivers/hwmon/sht15.c
+++ b/drivers/hwmon/sht15.c
@@ -683,7 +683,7 @@
 	long value;
 	u8 status;
 
-	if (strict_strtol(buf, 10, &value))
+	if (kstrtol(buf, 10, &value))
 		return -EINVAL;
 
 	mutex_lock(&data->read_lock);
diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c
index 643aa8c..c08eee2 100644
--- a/drivers/hwmon/tmp102.c
+++ b/drivers/hwmon/tmp102.c
@@ -112,7 +112,7 @@
 	long val;
 	int status;
 
-	if (strict_strtol(buf, 10, &val) < 0)
+	if (kstrtol(buf, 10, &val) < 0)
 		return -EINVAL;
 	val = SENSORS_LIMIT(val, -256000, 255000);
 
diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c
index ad8d535..8b9a7748 100644
--- a/drivers/hwmon/tmp401.c
+++ b/drivers/hwmon/tmp401.c
@@ -334,7 +334,7 @@
 	long val;
 	u16 reg;
 
-	if (strict_strtol(buf, 10, &val))
+	if (kstrtol(buf, 10, &val))
 		return -EINVAL;
 
 	reg = tmp401_temp_to_register(val, data->config);
@@ -361,7 +361,7 @@
 	long val;
 	u16 reg;
 
-	if (strict_strtol(buf, 10, &val))
+	if (kstrtol(buf, 10, &val))
 		return -EINVAL;
 
 	reg = tmp401_temp_to_register(val, data->config);
@@ -388,7 +388,7 @@
 	long val;
 	u8 reg;
 
-	if (strict_strtol(buf, 10, &val))
+	if (kstrtol(buf, 10, &val))
 		return -EINVAL;
 
 	reg = tmp401_crit_temp_to_register(val, data->config);
@@ -413,7 +413,7 @@
 	long val;
 	u8 reg;
 
-	if (strict_strtol(buf, 10, &val))
+	if (kstrtol(buf, 10, &val))
 		return -EINVAL;
 
 	if (data->config & TMP401_CONFIG_RANGE)
@@ -447,7 +447,7 @@
 {
 	long val;
 
-	if (strict_strtol(buf, 10, &val))
+	if (kstrtol(buf, 10, &val))
 		return -EINVAL;
 
 	if (val != 1) {
diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c
index 0517a8f..c48381f 100644
--- a/drivers/hwmon/tmp421.c
+++ b/drivers/hwmon/tmp421.c
@@ -157,7 +157,7 @@
 		return sprintf(buf, "0\n");
 }
 
-static mode_t tmp421_is_visible(struct kobject *kobj, struct attribute *a,
+static umode_t tmp421_is_visible(struct kobject *kobj, struct attribute *a,
 				int n)
 {
 	struct device *dev = container_of(kobj, struct device, kobj);
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index 93f5fc7..0e0af04 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -937,7 +937,7 @@
 	int nr = sensor_attr->index; \
 	unsigned long val; \
 	int err; \
-	err = strict_strtoul(buf, 10, &val); \
+	err = kstrtoul(buf, 10, &val); \
 	if (err < 0) \
 		return err; \
 	mutex_lock(&data->update_lock); \
@@ -1054,7 +1054,7 @@
 	unsigned int reg;
 	u8 new_div;
 
-	err = strict_strtoul(buf, 10, &val);
+	err = kstrtoul(buf, 10, &val);
 	if (err < 0)
 		return err;
 
@@ -1199,7 +1199,7 @@
 	int nr = sensor_attr->index; \
 	int err; \
 	long val; \
-	err = strict_strtol(buf, 10, &val); \
+	err = kstrtol(buf, 10, &val); \
 	if (err < 0) \
 		return err; \
 	mutex_lock(&data->update_lock); \
@@ -1324,7 +1324,7 @@
 	int err;
 	u16 reg;
 
-	err = strict_strtoul(buf, 10, &val);
+	err = kstrtoul(buf, 10, &val);
 	if (err < 0)
 		return err;
 
@@ -1351,7 +1351,7 @@
 	unsigned long val;
 	int err;
 
-	err = strict_strtoul(buf, 10, &val);
+	err = kstrtoul(buf, 10, &val);
 	if (err < 0)
 		return err;
 
@@ -1376,7 +1376,7 @@
 	int err;
 	u16 reg;
 
-	err = strict_strtoul(buf, 10, &val);
+	err = kstrtoul(buf, 10, &val);
 	if (err < 0)
 		return err;
 
@@ -1430,7 +1430,7 @@
 	long val;
 	int err;
 
-	err = strict_strtol(buf, 10, &val);
+	err = kstrtol(buf, 10, &val);
 	if (err < 0)
 		return err;
 
@@ -1455,7 +1455,7 @@
 	long val;
 	int err;
 
-	err = strict_strtol(buf, 10, &val);
+	err = kstrtol(buf, 10, &val);
 	if (err < 0)
 		return err;
 
@@ -1556,7 +1556,7 @@
 	int nr = sensor_attr->index; \
 	unsigned long val; \
 	int err; \
-	err = strict_strtoul(buf, 10, &val); \
+	err = kstrtoul(buf, 10, &val); \
 	if (err < 0) \
 		return err; \
 	val = SENSORS_LIMIT(val, 1, 255); \
@@ -1595,7 +1595,7 @@
 	int nr = sensor_attr->index; \
 	unsigned long val; \
 	int err; \
-	err = strict_strtoul(buf, 10, &val); \
+	err = kstrtoul(buf, 10, &val); \
 	if (err < 0) \
 		return err; \
 	val = step_time_to_reg(val, data->pwm_mode[nr]); \
@@ -1702,7 +1702,7 @@
 	unsigned long val;
 	u16 reg, mask;
 
-	if (strict_strtoul(buf, 10, &val) || val != 0)
+	if (kstrtoul(buf, 10, &val) || val != 0)
 		return -EINVAL;
 
 	mask = to_sensor_dev_attr_2(attr)->nr;
diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c
index 8c2844e..6e5d0ae 100644
--- a/drivers/hwmon/w83791d.c
+++ b/drivers/hwmon/w83791d.c
@@ -711,7 +711,7 @@
 	int nr = sensor_attr->index;
 	unsigned long val;
 
-	if (strict_strtoul(buf, 10, &val))
+	if (kstrtoul(buf, 10, &val))
 		return -EINVAL;
 
 	mutex_lock(&data->update_lock);
@@ -756,7 +756,7 @@
 	u8 val_shift = 0;
 	u8 keep_mask = 0;
 
-	int ret = strict_strtoul(buf, 10, &val);
+	int ret = kstrtoul(buf, 10, &val);
 
 	if (ret || val < 1 || val > 3)
 		return -EINVAL;
@@ -819,7 +819,7 @@
 	unsigned long val;
 	u8 target_mask;
 
-	if (strict_strtoul(buf, 10, &val))
+	if (kstrtoul(buf, 10, &val))
 		return -EINVAL;
 
 	mutex_lock(&data->update_lock);
@@ -863,7 +863,7 @@
 	u8 val_shift = 0;
 	u8 keep_mask = 0;
 
-	if (strict_strtoul(buf, 10, &val))
+	if (kstrtoul(buf, 10, &val))
 		return -EINVAL;
 
 	switch (nr) {
diff --git a/drivers/hwmon/w83792d.c b/drivers/hwmon/w83792d.c
index f3e7130..9ded133 100644
--- a/drivers/hwmon/w83792d.c
+++ b/drivers/hwmon/w83792d.c
@@ -749,7 +749,7 @@
 	unsigned long val;
 	u8 reg;
 
-	if (strict_strtoul(buf, 10, &val) || val != 0)
+	if (kstrtoul(buf, 10, &val) || val != 0)
 		return -EINVAL;
 
 	mutex_lock(&data->update_lock);
diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c
index 854f911..3cc6fef 100644
--- a/drivers/hwmon/w83793.c
+++ b/drivers/hwmon/w83793.c
@@ -450,7 +450,7 @@
 	unsigned long val;
 	u8 reg;
 
-	if (strict_strtoul(buf, 10, &val) || val != 0)
+	if (kstrtoul(buf, 10, &val) || val != 0)
 		return -EINVAL;
 
 	mutex_lock(&data->update_lock);
diff --git a/drivers/hwmon/w83795.c b/drivers/hwmon/w83795.c
index 845232d..3ee398d 100644
--- a/drivers/hwmon/w83795.c
+++ b/drivers/hwmon/w83795.c
@@ -730,7 +730,7 @@
 	u8 beep_bit = 1 << shift;
 	unsigned long val;
 
-	if (strict_strtoul(buf, 10, &val) < 0)
+	if (kstrtoul(buf, 10, &val) < 0)
 		return -EINVAL;
 	if (val != 0 && val != 1)
 		return -EINVAL;
@@ -755,7 +755,7 @@
 	struct w83795_data *data = i2c_get_clientdata(client);
 	unsigned long val;
 
-	if (strict_strtoul(buf, 10, &val) < 0 || val != 0)
+	if (kstrtoul(buf, 10, &val) < 0 || val != 0)
 		return -EINVAL;
 
 	mutex_lock(&data->update_lock);
@@ -801,7 +801,7 @@
 	struct w83795_data *data = i2c_get_clientdata(client);
 	unsigned long val;
 
-	if (strict_strtoul(buf, 10, &val))
+	if (kstrtoul(buf, 10, &val))
 		return -EINVAL;
 	val = fan_to_reg(val);
 
@@ -863,7 +863,7 @@
 	int index = sensor_attr->index;
 	unsigned long val;
 
-	if (strict_strtoul(buf, 10, &val) < 0)
+	if (kstrtoul(buf, 10, &val) < 0)
 		return -EINVAL;
 
 	mutex_lock(&data->update_lock);
@@ -924,7 +924,7 @@
 	unsigned long val;
 	int i;
 
-	if (strict_strtoul(buf, 10, &val) < 0)
+	if (kstrtoul(buf, 10, &val) < 0)
 		return -EINVAL;
 	if (val < 1 || val > 2)
 		return -EINVAL;
@@ -1021,7 +1021,7 @@
 	unsigned long channel;
 	u8 val = index / 2;
 
-	if (strict_strtoul(buf, 10, &channel) < 0 ||
+	if (kstrtoul(buf, 10, &channel) < 0 ||
 	    channel < 1 || channel > 14)
 		return -EINVAL;
 
@@ -1088,7 +1088,7 @@
 	int index = sensor_attr->index;
 	unsigned long tmp;
 
-	if (strict_strtoul(buf, 10, &tmp) < 0)
+	if (kstrtoul(buf, 10, &tmp) < 0)
 		return -EINVAL;
 
 	switch (nr) {
@@ -1149,7 +1149,7 @@
 	int index = sensor_attr->index;
 	unsigned long val;
 
-	if (strict_strtoul(buf, 10, &val) < 0)
+	if (kstrtoul(buf, 10, &val) < 0)
 		return -EINVAL;
 
 	mutex_lock(&data->update_lock);
@@ -1198,7 +1198,7 @@
 	unsigned long val;
 	u8 tmp;
 
-	if (strict_strtoul(buf, 10, &val) < 0)
+	if (kstrtoul(buf, 10, &val) < 0)
 		return -EINVAL;
 	val /= 1000;
 
@@ -1257,7 +1257,7 @@
 	int index = sensor_attr->index;
 	unsigned long val;
 
-	if (strict_strtoul(buf, 10, &val) < 0)
+	if (kstrtoul(buf, 10, &val) < 0)
 		return -EINVAL;
 
 	mutex_lock(&data->update_lock);
@@ -1293,7 +1293,7 @@
 	int index = sensor_attr->index;
 	unsigned long val;
 
-	if (strict_strtoul(buf, 10, &val) < 0)
+	if (kstrtoul(buf, 10, &val) < 0)
 		return -EINVAL;
 	val /= 1000;
 
@@ -1333,7 +1333,7 @@
 	struct w83795_data *data = i2c_get_clientdata(client);
 	long tmp;
 
-	if (strict_strtol(buf, 10, &tmp) < 0)
+	if (kstrtol(buf, 10, &tmp) < 0)
 		return -EINVAL;
 
 	mutex_lock(&data->update_lock);
@@ -1394,7 +1394,7 @@
 	struct w83795_data *data = i2c_get_clientdata(client);
 	long tmp;
 
-	if (strict_strtol(buf, 10, &tmp) < 0)
+	if (kstrtol(buf, 10, &tmp) < 0)
 		return -EINVAL;
 
 	mutex_lock(&data->update_lock);
@@ -1436,7 +1436,7 @@
 	unsigned long val;
 	u8 tmp;
 
-	if (strict_strtoul(buf, 10, &val) < 0)
+	if (kstrtoul(buf, 10, &val) < 0)
 		return -EINVAL;
 	if ((val != 4) && (val != 3))
 		return -EINVAL;
@@ -1512,7 +1512,7 @@
 	u8 tmp;
 	u8 lsb_idx;
 
-	if (strict_strtoul(buf, 10, &val) < 0)
+	if (kstrtoul(buf, 10, &val) < 0)
 		return -EINVAL;
 	val = in_to_reg(index, val);
 
@@ -1569,7 +1569,7 @@
 	struct w83795_data *data = i2c_get_clientdata(client);
 	unsigned long val;
 
-	if (strict_strtoul(buf, 10, &val) < 0)
+	if (kstrtoul(buf, 10, &val) < 0)
 		return -EINVAL;
 
 	switch (nr) {
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 2d3657a..5244c47 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -34,6 +34,7 @@
 #include <linux/sched.h>
 #include <linux/err.h>
 #include <linux/interrupt.h>
+#include <linux/of_i2c.h>
 #include <linux/platform_device.h>
 #include <linux/io.h>
 #include <linux/slab.h>
@@ -137,6 +138,7 @@
 			sizeof(adap->name));
 	adap->algo = &i2c_dw_algo;
 	adap->dev.parent = &pdev->dev;
+	adap->dev.of_node = pdev->dev.of_node;
 
 	adap->nr = pdev->id;
 	r = i2c_add_numbered_adapter(adap);
@@ -144,6 +146,7 @@
 		dev_err(&pdev->dev, "failure adding adapter\n");
 		goto err_free_irq;
 	}
+	of_i2c_register_devices(adap);
 
 	return 0;
 
@@ -187,6 +190,14 @@
 	return 0;
 }
 
+#ifdef CONFIG_OF
+static const struct of_device_id dw_i2c_of_match[] = {
+	{ .compatible = "snps,designware-i2c", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, dw_i2c_of_match);
+#endif
+
 /* work with hotplug and coldplug */
 MODULE_ALIAS("platform:i2c_designware");
 
@@ -195,6 +206,7 @@
 	.driver		= {
 		.name	= "i2c_designware",
 		.owner	= THIS_MODULE,
+		.of_match_table = of_match_ptr(dw_i2c_of_match),
 	},
 };
 
diff --git a/drivers/i2c/busses/i2c-diolan-u2c.c b/drivers/i2c/busses/i2c-diolan-u2c.c
index 7636671..7eb19a5 100644
--- a/drivers/i2c/busses/i2c-diolan-u2c.c
+++ b/drivers/i2c/busses/i2c-diolan-u2c.c
@@ -515,20 +515,7 @@
 	.id_table = diolan_u2c_table,
 };
 
-static int __init diolan_u2c_init(void)
-{
-	/* register this driver with the USB subsystem */
-	return usb_register(&diolan_u2c_driver);
-}
-
-static void __exit diolan_u2c_exit(void)
-{
-	/* deregister this driver with the USB subsystem */
-	usb_deregister(&diolan_u2c_driver);
-}
-
-module_init(diolan_u2c_init);
-module_exit(diolan_u2c_exit);
+module_usb_driver(diolan_u2c_driver);
 
 MODULE_AUTHOR("Guenter Roeck <guenter.roeck@ericsson.com>");
 MODULE_DESCRIPTION(DRIVER_NAME " driver");
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
index 8cebef4..18936ac 100644
--- a/drivers/i2c/busses/i2c-eg20t.c
+++ b/drivers/i2c/busses/i2c-eg20t.c
@@ -893,6 +893,13 @@
 	/* Set the number of I2C channel instance */
 	adap_info->ch_num = id->driver_data;
 
+	ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
+		  KBUILD_MODNAME, adap_info);
+	if (ret) {
+		pch_pci_err(pdev, "request_irq FAILED\n");
+		goto err_request_irq;
+	}
+
 	for (i = 0; i < adap_info->ch_num; i++) {
 		pch_adap = &adap_info->pch_data[i].pch_adapter;
 		adap_info->pch_i2c_suspended = false;
@@ -910,28 +917,23 @@
 
 		pch_adap->dev.parent = &pdev->dev;
 
+		pch_i2c_init(&adap_info->pch_data[i]);
 		ret = i2c_add_adapter(pch_adap);
 		if (ret) {
 			pch_pci_err(pdev, "i2c_add_adapter[ch:%d] FAILED\n", i);
-			goto err_i2c_add_adapter;
+			goto err_add_adapter;
 		}
-
-		pch_i2c_init(&adap_info->pch_data[i]);
-	}
-	ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
-		  KBUILD_MODNAME, adap_info);
-	if (ret) {
-		pch_pci_err(pdev, "request_irq FAILED\n");
-		goto err_i2c_add_adapter;
 	}
 
 	pci_set_drvdata(pdev, adap_info);
 	pch_pci_dbg(pdev, "returns %d.\n", ret);
 	return 0;
 
-err_i2c_add_adapter:
+err_add_adapter:
 	for (j = 0; j < i; j++)
 		i2c_del_adapter(&adap_info->pch_data[j].pch_adapter);
+	free_irq(pdev->irq, adap_info);
+err_request_irq:
 	pci_iounmap(pdev, base_addr);
 err_pci_iomap:
 	pci_release_regions(pdev);
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index a43d002..fa23faa 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -1047,13 +1047,14 @@
 		 * size. This is to ensure that we can handle the status on int
 		 * call back latencies.
 		 */
-		if (dev->rev >= OMAP_I2C_REV_ON_3530_4430) {
-			dev->fifo_size = 0;
+
+		dev->fifo_size = (dev->fifo_size / 2);
+
+		if (dev->rev >= OMAP_I2C_REV_ON_3530_4430)
 			dev->b_hw = 0; /* Disable hardware fixes */
-		} else {
-			dev->fifo_size = (dev->fifo_size / 2);
+		else
 			dev->b_hw = 1; /* Enable hardware fixes */
-		}
+
 		/* calculate wakeup latency constraint for MPU */
 		if (dev->set_mpu_wkup_lat != NULL)
 			dev->latency = (1000000 * dev->fifo_size) /
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 2754cef..4c17180 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -534,6 +534,7 @@
 
 	/* first, try busy waiting briefly */
 	do {
+		cpu_relax();
 		iicstat = readl(i2c->regs + S3C2410_IICSTAT);
 	} while ((iicstat & S3C2410_IICSTAT_START) && --spins);
 
@@ -786,7 +787,7 @@
 #else
 static int s3c24xx_i2c_parse_dt_gpio(struct s3c24xx_i2c *i2c)
 {
-	return -EINVAL;
+	return 0;
 }
 
 static void s3c24xx_i2c_dt_gpio_free(struct s3c24xx_i2c *i2c)
diff --git a/drivers/i2c/busses/i2c-tiny-usb.c b/drivers/i2c/busses/i2c-tiny-usb.c
index d03b040..f07307f 100644
--- a/drivers/i2c/busses/i2c-tiny-usb.c
+++ b/drivers/i2c/busses/i2c-tiny-usb.c
@@ -262,20 +262,7 @@
 	.id_table	= i2c_tiny_usb_table,
 };
 
-static int __init usb_i2c_tiny_usb_init(void)
-{
-	/* register this driver with the USB subsystem */
-	return usb_register(&i2c_tiny_usb_driver);
-}
-
-static void __exit usb_i2c_tiny_usb_exit(void)
-{
-	/* deregister this driver with the USB subsystem */
-	usb_deregister(&i2c_tiny_usb_driver);
-}
-
-module_init(usb_i2c_tiny_usb_init);
-module_exit(usb_i2c_tiny_usb_exit);
+module_usb_driver(i2c_tiny_usb_driver);
 
 /* ----- end of usb layer ------------------------------------------------ */
 
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index 4bb68f3..ac083a2 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -426,7 +426,7 @@
 			xiic_wakeup(i2c, STATE_ERROR);
 
 	} else if (pend & (XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_HALF_MASK)) {
-		/* Transmit register/FIFO is empty or ½ empty */
+		/* Transmit register/FIFO is empty or ½ empty */
 
 		clr = pend &
 			(XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_HALF_MASK);
diff --git a/drivers/ieee802154/fakehard.c b/drivers/ieee802154/fakehard.c
index eb0e2cc..73d4531 100644
--- a/drivers/ieee802154/fakehard.c
+++ b/drivers/ieee802154/fakehard.c
@@ -343,7 +343,7 @@
 {
 	dev->addr_len		= IEEE802154_ADDR_LEN;
 	memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN);
-	dev->features		= NETIF_F_NO_CSUM;
+	dev->features		= NETIF_F_HW_CSUM;
 	dev->needed_tailroom	= 2; /* FCS */
 	dev->mtu		= 127;
 	dev->tx_queue_len	= 10;
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index e9cf51b..1612cfd 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -178,6 +178,25 @@
 	mutex_unlock(&lock);
 }
 
+static int dst_fetch_ha(struct dst_entry *dst, struct rdma_dev_addr *addr)
+{
+	struct neighbour *n;
+	int ret;
+
+	rcu_read_lock();
+	n = dst_get_neighbour_noref(dst);
+	if (!n || !(n->nud_state & NUD_VALID)) {
+		if (n)
+			neigh_event_send(n, NULL);
+		ret = -ENODATA;
+	} else {
+		ret = rdma_copy_addr(addr, dst->dev, n->ha);
+	}
+	rcu_read_unlock();
+
+	return ret;
+}
+
 static int addr4_resolve(struct sockaddr_in *src_in,
 			 struct sockaddr_in *dst_in,
 			 struct rdma_dev_addr *addr)
@@ -185,7 +204,6 @@
 	__be32 src_ip = src_in->sin_addr.s_addr;
 	__be32 dst_ip = dst_in->sin_addr.s_addr;
 	struct rtable *rt;
-	struct neighbour *neigh;
 	struct flowi4 fl4;
 	int ret;
 
@@ -214,20 +232,7 @@
 		goto put;
 	}
 
-	neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, rt->dst.dev);
-	if (!neigh || !(neigh->nud_state & NUD_VALID)) {
-		rcu_read_lock();
-		neigh_event_send(dst_get_neighbour(&rt->dst), NULL);
-		rcu_read_unlock();
-		ret = -ENODATA;
-		if (neigh)
-			goto release;
-		goto put;
-	}
-
-	ret = rdma_copy_addr(addr, neigh->dev, neigh->ha);
-release:
-	neigh_release(neigh);
+	ret = dst_fetch_ha(&rt->dst, addr);
 put:
 	ip_rt_put(rt);
 out:
@@ -240,13 +245,12 @@
 			 struct rdma_dev_addr *addr)
 {
 	struct flowi6 fl6;
-	struct neighbour *neigh;
 	struct dst_entry *dst;
 	int ret;
 
 	memset(&fl6, 0, sizeof fl6);
-	ipv6_addr_copy(&fl6.daddr, &dst_in->sin6_addr);
-	ipv6_addr_copy(&fl6.saddr, &src_in->sin6_addr);
+	fl6.daddr = dst_in->sin6_addr;
+	fl6.saddr = src_in->sin6_addr;
 	fl6.flowi6_oif = addr->bound_dev_if;
 
 	dst = ip6_route_output(&init_net, NULL, &fl6);
@@ -260,7 +264,7 @@
 			goto put;
 
 		src_in->sin6_family = AF_INET6;
-		ipv6_addr_copy(&src_in->sin6_addr, &fl6.saddr);
+		src_in->sin6_addr = fl6.saddr;
 	}
 
 	if (dst->dev->flags & IFF_LOOPBACK) {
@@ -276,16 +280,7 @@
 		goto put;
 	}
 
-	rcu_read_lock();
-	neigh = dst_get_neighbour(dst);
-	if (!neigh || !(neigh->nud_state & NUD_VALID)) {
-		if (neigh)
-			neigh_event_send(neigh, NULL);
-		ret = -ENODATA;
-	} else {
-		ret = rdma_copy_addr(addr, dst->dev, neigh->ha);
-	}
-	rcu_read_unlock();
+	ret = dst_fetch_ha(dst, addr);
 put:
 	dst_release(dst);
 	return ret;
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 8b72f39..c889aae 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -3659,7 +3659,7 @@
 	.release = cm_release_port_obj
 };
 
-static char *cm_devnode(struct device *dev, mode_t *mode)
+static char *cm_devnode(struct device *dev, umode_t *mode)
 {
 	if (mode)
 		*mode = 0666;
diff --git a/drivers/infiniband/core/cm_msgs.h b/drivers/infiniband/core/cm_msgs.h
index 505db2a..7da9b21 100644
--- a/drivers/infiniband/core/cm_msgs.h
+++ b/drivers/infiniband/core/cm_msgs.h
@@ -799,6 +799,7 @@
 
 	u8 info_length;
 	u8 ap_status;
+	__be16 rsvd;
 	u8 info[IB_CM_APR_INFO_LENGTH];
 
 	u8 private_data[IB_CM_APR_PRIVATE_DATA_SIZE];
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 75ff821..e3e470f 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1110,7 +1110,7 @@
 	if (cma_any_addr((struct sockaddr *) &rt->addr.src_addr)) {
 		rt->addr.dev_addr.dev_type = ARPHRD_INFINIBAND;
 		rdma_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid);
-		ib_addr_set_pkey(&rt->addr.dev_addr, rt->path_rec[0].pkey);
+		ib_addr_set_pkey(&rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey));
 	} else {
 		ret = rdma_translate_ip((struct sockaddr *) &rt->addr.src_addr,
 					&rt->addr.dev_addr);
@@ -2005,11 +2005,11 @@
 	if (cma_zero_addr(src)) {
 		dst = (struct sockaddr *) &id_priv->id.route.addr.dst_addr;
 		if ((src->sa_family = dst->sa_family) == AF_INET) {
-			((struct sockaddr_in *) src)->sin_addr.s_addr =
-				((struct sockaddr_in *) dst)->sin_addr.s_addr;
+			((struct sockaddr_in *)src)->sin_addr =
+				((struct sockaddr_in *)dst)->sin_addr;
 		} else {
-			ipv6_addr_copy(&((struct sockaddr_in6 *) src)->sin6_addr,
-				       &((struct sockaddr_in6 *) dst)->sin6_addr);
+			((struct sockaddr_in6 *)src)->sin6_addr =
+				((struct sockaddr_in6 *)dst)->sin6_addr;
 		}
 	}
 
@@ -2513,6 +2513,9 @@
 
 	req.private_data_len = sizeof(struct cma_hdr) +
 			       conn_param->private_data_len;
+	if (req.private_data_len < conn_param->private_data_len)
+		return -EINVAL;
+
 	req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
 	if (!req.private_data)
 		return -ENOMEM;
@@ -2562,6 +2565,9 @@
 	memset(&req, 0, sizeof req);
 	offset = cma_user_data_offset(id_priv->id.ps);
 	req.private_data_len = offset + conn_param->private_data_len;
+	if (req.private_data_len < conn_param->private_data_len)
+		return -EINVAL;
+
 	private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
 	if (!private_data)
 		return -ENOMEM;
@@ -2920,7 +2926,7 @@
 	mutex_lock(&id_priv->qp_mutex);
 	if (!status && id_priv->id.qp)
 		status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid,
-					 multicast->rec.mlid);
+					 be16_to_cpu(multicast->rec.mlid));
 	mutex_unlock(&id_priv->qp_mutex);
 
 	memset(&event, 0, sizeof event);
@@ -3181,7 +3187,7 @@
 			if (id->qp)
 				ib_detach_mcast(id->qp,
 						&mc->multicast.ib->rec.mgid,
-						mc->multicast.ib->rec.mlid);
+						be16_to_cpu(mc->multicast.ib->rec.mlid));
 			if (rdma_node_get_transport(id_priv->cma_dev->device->node_type) == RDMA_TRANSPORT_IB) {
 				switch (rdma_port_get_link_layer(id->device, id->port_num)) {
 				case IB_LINK_LAYER_INFINIBAND:
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index b8a0b4a..06f0871 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -106,9 +106,6 @@
 	IB_UCM_MAX_DEVICES = 32
 };
 
-/* ib_cm and ib_user_cm modules share /sys/class/infiniband_cm */
-extern struct class cm_class;
-
 #define IB_UCM_BASE_DEV MKDEV(IB_UCM_MAJOR, IB_UCM_BASE_MINOR)
 
 static void ib_ucm_add_one(struct ib_device *device);
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index 07db229..f0d588f 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -1175,7 +1175,7 @@
 	kref_put(&umad_dev->ref, ib_umad_release_dev);
 }
 
-static char *umad_devnode(struct device *dev, mode_t *mode)
+static char *umad_devnode(struct device *dev, umode_t *mode)
 {
 	return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
 }
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 254f164..b930da4 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -241,11 +241,24 @@
 	return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context, 0);
 }
 
+static struct ib_qp *idr_write_qp(int qp_handle, struct ib_ucontext *context)
+{
+	struct ib_uobject *uobj;
+
+	uobj = idr_write_uobj(&ib_uverbs_qp_idr, qp_handle, context);
+	return uobj ? uobj->object : NULL;
+}
+
 static void put_qp_read(struct ib_qp *qp)
 {
 	put_uobj_read(qp->uobject);
 }
 
+static void put_qp_write(struct ib_qp *qp)
+{
+	put_uobj_write(qp->uobject);
+}
+
 static struct ib_srq *idr_read_srq(int srq_handle, struct ib_ucontext *context)
 {
 	return idr_read_obj(&ib_uverbs_srq_idr, srq_handle, context, 0);
@@ -2375,7 +2388,7 @@
 	if (copy_from_user(&cmd, buf, sizeof cmd))
 		return -EFAULT;
 
-	qp = idr_read_qp(cmd.qp_handle, file->ucontext);
+	qp = idr_write_qp(cmd.qp_handle, file->ucontext);
 	if (!qp)
 		return -EINVAL;
 
@@ -2404,7 +2417,7 @@
 		kfree(mcast);
 
 out_put:
-	put_qp_read(qp);
+	put_qp_write(qp);
 
 	return ret ? ret : in_len;
 }
@@ -2422,7 +2435,7 @@
 	if (copy_from_user(&cmd, buf, sizeof cmd))
 		return -EFAULT;
 
-	qp = idr_read_qp(cmd.qp_handle, file->ucontext);
+	qp = idr_write_qp(cmd.qp_handle, file->ucontext);
 	if (!qp)
 		return -EINVAL;
 
@@ -2441,14 +2454,14 @@
 		}
 
 out_put:
-	put_qp_read(qp);
+	put_qp_write(qp);
 
 	return ret ? ret : in_len;
 }
 
-int __uverbs_create_xsrq(struct ib_uverbs_file *file,
-			 struct ib_uverbs_create_xsrq *cmd,
-			 struct ib_udata *udata)
+static int __uverbs_create_xsrq(struct ib_uverbs_file *file,
+				struct ib_uverbs_create_xsrq *cmd,
+				struct ib_udata *udata)
 {
 	struct ib_uverbs_create_srq_resp resp;
 	struct ib_usrq_object           *obj;
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 8796367..604556d 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -846,7 +846,7 @@
 	kfree(uverbs_dev);
 }
 
-static char *uverbs_devnode(struct device *dev, mode_t *mode)
+static char *uverbs_devnode(struct device *dev, umode_t *mode)
 {
 	if (mode)
 		*mode = 0666;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index c88b12b..740dcc0 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -1338,7 +1338,6 @@
 	struct iwch_ep *child_ep, *parent_ep = ctx;
 	struct cpl_pass_accept_req *req = cplhdr(skb);
 	unsigned int hwtid = GET_TID(req);
-	struct neighbour *neigh;
 	struct dst_entry *dst;
 	struct l2t_entry *l2t;
 	struct rtable *rt;
@@ -1375,10 +1374,7 @@
 		goto reject;
 	}
 	dst = &rt->dst;
-	rcu_read_lock();
-	neigh = dst_get_neighbour(dst);
-	l2t = t3_l2t_get(tdev, neigh, neigh->dev);
-	rcu_read_unlock();
+	l2t = t3_l2t_get(tdev, dst, NULL);
 	if (!l2t) {
 		printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
 		       __func__);
@@ -1889,7 +1885,6 @@
 int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
 {
 	struct iwch_dev *h = to_iwch_dev(cm_id->device);
-	struct neighbour *neigh;
 	struct iwch_ep *ep;
 	struct rtable *rt;
 	int err = 0;
@@ -1947,13 +1942,7 @@
 		goto fail3;
 	}
 	ep->dst = &rt->dst;
-
-	rcu_read_lock();
-	neigh = dst_get_neighbour(ep->dst);
-
-	/* get a l2t entry */
-	ep->l2t = t3_l2t_get(ep->com.tdev, neigh, neigh->dev);
-	rcu_read_unlock();
+	ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst, NULL);
 	if (!ep->l2t) {
 		printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
 		err = -ENOMEM;
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 0747004..0668bb3 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -1556,6 +1556,67 @@
 	return;
 }
 
+static int import_ep(struct c4iw_ep *ep, __be32 peer_ip, struct dst_entry *dst,
+		     struct c4iw_dev *cdev, bool clear_mpa_v1)
+{
+	struct neighbour *n;
+	int err, step;
+
+	rcu_read_lock();
+	n = dst_get_neighbour_noref(dst);
+	err = -ENODEV;
+	if (!n)
+		goto out;
+	err = -ENOMEM;
+	if (n->dev->flags & IFF_LOOPBACK) {
+		struct net_device *pdev;
+
+		pdev = ip_dev_find(&init_net, peer_ip);
+		ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
+					n, pdev, 0);
+		if (!ep->l2t)
+			goto out;
+		ep->mtu = pdev->mtu;
+		ep->tx_chan = cxgb4_port_chan(pdev);
+		ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
+		step = cdev->rdev.lldi.ntxq /
+			cdev->rdev.lldi.nchan;
+		ep->txq_idx = cxgb4_port_idx(pdev) * step;
+		step = cdev->rdev.lldi.nrxq /
+			cdev->rdev.lldi.nchan;
+		ep->ctrlq_idx = cxgb4_port_idx(pdev);
+		ep->rss_qid = cdev->rdev.lldi.rxq_ids[
+			cxgb4_port_idx(pdev) * step];
+		dev_put(pdev);
+	} else {
+		ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
+					n, n->dev, 0);
+		if (!ep->l2t)
+			goto out;
+		ep->mtu = dst_mtu(ep->dst);
+		ep->tx_chan = cxgb4_port_chan(n->dev);
+		ep->smac_idx = (cxgb4_port_viid(n->dev) & 0x7F) << 1;
+		step = cdev->rdev.lldi.ntxq /
+			cdev->rdev.lldi.nchan;
+		ep->txq_idx = cxgb4_port_idx(n->dev) * step;
+		ep->ctrlq_idx = cxgb4_port_idx(n->dev);
+		step = cdev->rdev.lldi.nrxq /
+			cdev->rdev.lldi.nchan;
+		ep->rss_qid = cdev->rdev.lldi.rxq_ids[
+			cxgb4_port_idx(n->dev) * step];
+
+		if (clear_mpa_v1) {
+			ep->retry_with_mpa_v1 = 0;
+			ep->tried_with_mpa_v1 = 0;
+		}
+	}
+	err = 0;
+out:
+	rcu_read_unlock();
+
+	return err;
+}
+
 static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
 {
 	struct c4iw_ep *child_ep, *parent_ep;
@@ -1563,18 +1624,11 @@
 	unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid));
 	struct tid_info *t = dev->rdev.lldi.tids;
 	unsigned int hwtid = GET_TID(req);
-	struct neighbour *neigh;
 	struct dst_entry *dst;
-	struct l2t_entry *l2t;
 	struct rtable *rt;
 	__be32 local_ip, peer_ip;
 	__be16 local_port, peer_port;
-	struct net_device *pdev;
-	u32 tx_chan, smac_idx;
-	u16 rss_qid;
-	u32 mtu;
-	int step;
-	int txq_idx, ctrlq_idx;
+	int err;
 
 	parent_ep = lookup_stid(t, stid);
 	PDBG("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid);
@@ -1596,49 +1650,24 @@
 		goto reject;
 	}
 	dst = &rt->dst;
-	rcu_read_lock();
-	neigh = dst_get_neighbour(dst);
-	if (neigh->dev->flags & IFF_LOOPBACK) {
-		pdev = ip_dev_find(&init_net, peer_ip);
-		BUG_ON(!pdev);
-		l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh, pdev, 0);
-		mtu = pdev->mtu;
-		tx_chan = cxgb4_port_chan(pdev);
-		smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
-		step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan;
-		txq_idx = cxgb4_port_idx(pdev) * step;
-		ctrlq_idx = cxgb4_port_idx(pdev);
-		step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
-		rss_qid = dev->rdev.lldi.rxq_ids[cxgb4_port_idx(pdev) * step];
-		dev_put(pdev);
-	} else {
-		l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh, neigh->dev, 0);
-		mtu = dst_mtu(dst);
-		tx_chan = cxgb4_port_chan(neigh->dev);
-		smac_idx = (cxgb4_port_viid(neigh->dev) & 0x7F) << 1;
-		step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan;
-		txq_idx = cxgb4_port_idx(neigh->dev) * step;
-		ctrlq_idx = cxgb4_port_idx(neigh->dev);
-		step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
-		rss_qid = dev->rdev.lldi.rxq_ids[
-			  cxgb4_port_idx(neigh->dev) * step];
-	}
-	rcu_read_unlock();
-	if (!l2t) {
-		printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
-		       __func__);
-		dst_release(dst);
-		goto reject;
-	}
 
 	child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
 	if (!child_ep) {
 		printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
 		       __func__);
-		cxgb4_l2t_release(l2t);
 		dst_release(dst);
 		goto reject;
 	}
+
+	err = import_ep(child_ep, peer_ip, dst, dev, false);
+	if (err) {
+		printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
+		       __func__);
+		dst_release(dst);
+		kfree(child_ep);
+		goto reject;
+	}
+
 	state_set(&child_ep->com, CONNECTING);
 	child_ep->com.dev = dev;
 	child_ep->com.cm_id = NULL;
@@ -1651,18 +1680,11 @@
 	c4iw_get_ep(&parent_ep->com);
 	child_ep->parent_ep = parent_ep;
 	child_ep->tos = GET_POPEN_TOS(ntohl(req->tos_stid));
-	child_ep->l2t = l2t;
 	child_ep->dst = dst;
 	child_ep->hwtid = hwtid;
-	child_ep->tx_chan = tx_chan;
-	child_ep->smac_idx = smac_idx;
-	child_ep->rss_qid = rss_qid;
-	child_ep->mtu = mtu;
-	child_ep->txq_idx = txq_idx;
-	child_ep->ctrlq_idx = ctrlq_idx;
 
 	PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__,
-	     tx_chan, smac_idx, rss_qid);
+	     child_ep->tx_chan, child_ep->smac_idx, child_ep->rss_qid);
 
 	init_timer(&child_ep->timer);
 	cxgb4_insert_tid(t, child_ep, hwtid);
@@ -1792,11 +1814,8 @@
 
 static int c4iw_reconnect(struct c4iw_ep *ep)
 {
-	int err = 0;
 	struct rtable *rt;
-	struct net_device *pdev;
-	struct neighbour *neigh;
-	int step;
+	int err = 0;
 
 	PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id);
 	init_timer(&ep->timer);
@@ -1824,47 +1843,10 @@
 	}
 	ep->dst = &rt->dst;
 
-	rcu_read_lock();
-	neigh = dst_get_neighbour(ep->dst);
-
-	/* get a l2t entry */
-	if (neigh->dev->flags & IFF_LOOPBACK) {
-		PDBG("%s LOOPBACK\n", __func__);
-		pdev = ip_dev_find(&init_net,
-				   ep->com.cm_id->remote_addr.sin_addr.s_addr);
-		ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
-					neigh, pdev, 0);
-		ep->mtu = pdev->mtu;
-		ep->tx_chan = cxgb4_port_chan(pdev);
-		ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
-		step = ep->com.dev->rdev.lldi.ntxq /
-			ep->com.dev->rdev.lldi.nchan;
-		ep->txq_idx = cxgb4_port_idx(pdev) * step;
-		step = ep->com.dev->rdev.lldi.nrxq /
-			ep->com.dev->rdev.lldi.nchan;
-		ep->ctrlq_idx = cxgb4_port_idx(pdev);
-		ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
-			cxgb4_port_idx(pdev) * step];
-		dev_put(pdev);
-	} else {
-		ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
-					neigh, neigh->dev, 0);
-		ep->mtu = dst_mtu(ep->dst);
-		ep->tx_chan = cxgb4_port_chan(neigh->dev);
-		ep->smac_idx = (cxgb4_port_viid(neigh->dev) & 0x7F) << 1;
-		step = ep->com.dev->rdev.lldi.ntxq /
-			ep->com.dev->rdev.lldi.nchan;
-		ep->txq_idx = cxgb4_port_idx(neigh->dev) * step;
-		ep->ctrlq_idx = cxgb4_port_idx(neigh->dev);
-		step = ep->com.dev->rdev.lldi.nrxq /
-			ep->com.dev->rdev.lldi.nchan;
-		ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
-			cxgb4_port_idx(neigh->dev) * step];
-	}
-	rcu_read_unlock();
-	if (!ep->l2t) {
+	err = import_ep(ep, ep->com.cm_id->remote_addr.sin_addr.s_addr,
+			ep->dst, ep->com.dev, false);
+	if (err) {
 		printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
-		err = -ENOMEM;
 		goto fail4;
 	}
 
@@ -2240,13 +2222,10 @@
 
 int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
 {
-	int err = 0;
 	struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
 	struct c4iw_ep *ep;
 	struct rtable *rt;
-	struct net_device *pdev;
-	struct neighbour *neigh;
-	int step;
+	int err = 0;
 
 	if ((conn_param->ord > c4iw_max_read_depth) ||
 	    (conn_param->ird > c4iw_max_read_depth)) {
@@ -2307,49 +2286,10 @@
 	}
 	ep->dst = &rt->dst;
 
-	rcu_read_lock();
-	neigh = dst_get_neighbour(ep->dst);
-
-	/* get a l2t entry */
-	if (neigh->dev->flags & IFF_LOOPBACK) {
-		PDBG("%s LOOPBACK\n", __func__);
-		pdev = ip_dev_find(&init_net,
-				   cm_id->remote_addr.sin_addr.s_addr);
-		ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
-					neigh, pdev, 0);
-		ep->mtu = pdev->mtu;
-		ep->tx_chan = cxgb4_port_chan(pdev);
-		ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
-		step = ep->com.dev->rdev.lldi.ntxq /
-		       ep->com.dev->rdev.lldi.nchan;
-		ep->txq_idx = cxgb4_port_idx(pdev) * step;
-		step = ep->com.dev->rdev.lldi.nrxq /
-		       ep->com.dev->rdev.lldi.nchan;
-		ep->ctrlq_idx = cxgb4_port_idx(pdev);
-		ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
-			      cxgb4_port_idx(pdev) * step];
-		dev_put(pdev);
-	} else {
-		ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
-					neigh, neigh->dev, 0);
-		ep->mtu = dst_mtu(ep->dst);
-		ep->tx_chan = cxgb4_port_chan(neigh->dev);
-		ep->smac_idx = (cxgb4_port_viid(neigh->dev) & 0x7F) << 1;
-		step = ep->com.dev->rdev.lldi.ntxq /
-		       ep->com.dev->rdev.lldi.nchan;
-		ep->txq_idx = cxgb4_port_idx(neigh->dev) * step;
-		ep->ctrlq_idx = cxgb4_port_idx(neigh->dev);
-		step = ep->com.dev->rdev.lldi.nrxq /
-		       ep->com.dev->rdev.lldi.nchan;
-		ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
-			      cxgb4_port_idx(neigh->dev) * step];
-		ep->retry_with_mpa_v1 = 0;
-		ep->tried_with_mpa_v1 = 0;
-	}
-	rcu_read_unlock();
-	if (!ep->l2t) {
+	err = import_ep(ep, cm_id->remote_addr.sin_addr.s_addr,
+			ep->dst, ep->com.dev, true);
+	if (err) {
 		printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
-		err = -ENOMEM;
 		goto fail4;
 	}
 
diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
index 31ae1b1..b7d4216 100644
--- a/drivers/infiniband/hw/ipath/ipath_fs.c
+++ b/drivers/infiniband/hw/ipath/ipath_fs.c
@@ -46,7 +46,7 @@
 static struct super_block *ipath_super;
 
 static int ipathfs_mknod(struct inode *dir, struct dentry *dentry,
-			 int mode, const struct file_operations *fops,
+			 umode_t mode, const struct file_operations *fops,
 			 void *data)
 {
 	int error;
@@ -61,7 +61,7 @@
 	inode->i_mode = mode;
 	inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
 	inode->i_private = data;
-	if ((mode & S_IFMT) == S_IFDIR) {
+	if (S_ISDIR(mode)) {
 		inode->i_op = &simple_dir_inode_operations;
 		inc_nlink(inode);
 		inc_nlink(dir);
@@ -76,7 +76,7 @@
 	return error;
 }
 
-static int create_file(const char *name, mode_t mode,
+static int create_file(const char *name, umode_t mode,
 		       struct dentry *parent, struct dentry **dentry,
 		       const struct file_operations *fops, void *data)
 {
diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
index 4b8f9c4..a251bec 100644
--- a/drivers/infiniband/hw/mlx4/ah.c
+++ b/drivers/infiniband/hw/mlx4/ah.c
@@ -126,7 +126,7 @@
 		ah->av.ib.dlid = cpu_to_be16(0xc000);
 
 	memcpy(ah->av.eth.dgid, ah_attr->grh.dgid.raw, 16);
-	ah->av.eth.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
+	ah->av.eth.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 29);
 
 	return &ah->ibah;
 }
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index e8df155..5ecf38d 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -715,13 +715,17 @@
 		}
 
 		wc->slid	   = be16_to_cpu(cqe->rlid);
-		wc->sl		   = be16_to_cpu(cqe->sl_vid) >> 12;
 		g_mlpath_rqpn	   = be32_to_cpu(cqe->g_mlpath_rqpn);
 		wc->src_qp	   = g_mlpath_rqpn & 0xffffff;
 		wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f;
 		wc->wc_flags	  |= g_mlpath_rqpn & 0x80000000 ? IB_WC_GRH : 0;
 		wc->pkey_index     = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f;
 		wc->csum_ok	   = mlx4_ib_ipoib_csum_ok(cqe->status, cqe->checksum);
+		if (rdma_port_get_link_layer(wc->qp->device,
+				(*cur_qp)->port) == IB_LINK_LAYER_ETHERNET)
+			wc->sl  = be16_to_cpu(cqe->sl_vid) >> 13;
+		else
+			wc->sl  = be16_to_cpu(cqe->sl_vid) >> 12;
 	}
 
 	return 0;
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index f36da99..95c94d8 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -109,7 +109,8 @@
 
 	err = mlx4_cmd_box(dev->dev, inmailbox->dma, outmailbox->dma,
 			   in_modifier, op_modifier,
-			   MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C);
+			   MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
+			   MLX4_CMD_NATIVE);
 
 	if (!err)
 		memcpy(response_mad, outmailbox->buf, 256);
@@ -330,7 +331,8 @@
 		return IB_MAD_RESULT_FAILURE;
 
 	err = mlx4_cmd_box(dev->dev, 0, mailbox->dma, inmod, 0,
-			   MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C);
+			   MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C,
+			   MLX4_CMD_WRAPPED);
 	if (err)
 		err = IB_MAD_RESULT_FAILURE;
 	else {
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 77f3dbc..7b445df 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -177,7 +177,7 @@
 {
 	struct mlx4_dev *dev = to_mdev(device)->dev;
 
-	return dev->caps.port_mask & (1 << (port_num - 1)) ?
+	return dev->caps.port_mask[port_num] == MLX4_PORT_TYPE_IB ?
 		IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
 }
 
@@ -434,7 +434,7 @@
 	memset(mailbox->buf, 0, 256);
 	memcpy(mailbox->buf, props->node_desc, 64);
 	mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
-		 MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A);
+		 MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
 
 	mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox);
 
@@ -463,7 +463,7 @@
 	}
 
 	err = mlx4_cmd(dev->dev, mailbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
-		       MLX4_CMD_TIME_CLASS_B);
+		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
 
 	mlx4_free_cmd_mailbox(dev->dev, mailbox);
 	return err;
@@ -899,7 +899,8 @@
 	memcpy(gids, gw->gids, sizeof gw->gids);
 
 	err = mlx4_cmd(dev, mailbox->dma, MLX4_SET_PORT_GID_TABLE << 8 | gw->port,
-		       1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B);
+		       1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
+		       MLX4_CMD_NATIVE);
 	if (err)
 		printk(KERN_WARNING "set port command failed\n");
 	else {
@@ -1074,6 +1075,11 @@
 
 	printk_once(KERN_INFO "%s", mlx4_ib_version);
 
+	if (mlx4_is_mfunc(dev)) {
+		printk(KERN_WARNING "IB not yet supported in SRIOV\n");
+		return NULL;
+	}
+
 	mlx4_foreach_ib_transport_port(i, dev)
 		num_ports++;
 
@@ -1244,7 +1250,8 @@
 
 err_counter:
 	for (; i; --i)
-		mlx4_counter_free(ibdev->dev, ibdev->counters[i - 1]);
+		if (ibdev->counters[i - 1] != -1)
+			mlx4_counter_free(ibdev->dev, ibdev->counters[i - 1]);
 
 err_map:
 	iounmap(ibdev->uar_map);
@@ -1275,7 +1282,8 @@
 	}
 	iounmap(ibdev->uar_map);
 	for (p = 0; p < ibdev->num_ports; ++p)
-		mlx4_counter_free(ibdev->dev, ibdev->counters[p]);
+		if (ibdev->counters[p] != -1)
+			mlx4_counter_free(ibdev->dev, ibdev->counters[p]);
 	mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
 		mlx4_CLOSE_PORT(dev, p);
 
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index a16f0c8..aa2aefa 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -962,7 +962,7 @@
 
 	if (is_eth) {
 		path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE |
-			((port - 1) << 6) | ((ah->sl & 7) << 3) | ((ah->sl & 8) >> 1);
+			((port - 1) << 6) | ((ah->sl & 7) << 3);
 
 		if (!(ah->ah_flags & IB_AH_GRH))
 			return -1;
@@ -1437,7 +1437,7 @@
 			u16 pcp;
 
 			sqp->ud_header.vlan.type = cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE);
-			pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 27 & 3) << 13;
+			pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 29) << 13;
 			sqp->ud_header.vlan.tag = cpu_to_be16(vlan | pcp);
 		}
 	} else {
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 0a52d72..425065b 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -1348,7 +1348,8 @@
 	else
 		netdev = nesvnic->netdev;
 
-	neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, netdev);
+	rcu_read_lock();
+	neigh = dst_get_neighbour_noref(&rt->dst);
 	if (neigh) {
 		if (neigh->nud_state & NUD_VALID) {
 			nes_debug(NES_DBG_CM, "Neighbor MAC address for 0x%08X"
@@ -1359,7 +1360,6 @@
 				if (!memcmp(nesadapter->arp_table[arpindex].mac_addr,
 					    neigh->ha, ETH_ALEN)) {
 					/* Mac address same as in nes_arp_table */
-					neigh_release(neigh);
 					ip_rt_put(rt);
 					return rc;
 				}
@@ -1373,15 +1373,11 @@
 					     dst_ip, NES_ARP_ADD);
 			rc = nes_arp_table(nesvnic->nesdev, dst_ip, NULL,
 					   NES_ARP_RESOLVE);
+		} else {
+			neigh_event_send(neigh, NULL);
 		}
-		neigh_release(neigh);
 	}
-
-	if ((neigh == NULL) || (!(neigh->nud_state & NUD_VALID))) {
-		rcu_read_lock();
-		neigh_event_send(dst_get_neighbour(&rt->dst), NULL);
-		rcu_read_unlock();
-	}
+	rcu_read_unlock();
 	ip_rt_put(rt);
 	return rc;
 }
@@ -2838,6 +2834,7 @@
 		issue_disconn = 1;
 		issue_close = 1;
 		nesqp->cm_id = NULL;
+		del_timer(&nesqp->terminate_timer);
 		if (nesqp->flush_issued == 0) {
 			nesqp->flush_issued = 1;
 			issue_flush = 1;
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 7c0ff19..055f4b5 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -1529,7 +1529,7 @@
 	} else {
 		/* setup 10G MDIO operation */
 		tx_config &= 0xFFFFFFE3;
-		tx_config |= 0x15;
+		tx_config |= 0x1D;
 	}
 	nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config);
 
@@ -3619,10 +3619,6 @@
 			}
 			break;
 		case NES_AEQE_AEID_LLP_CLOSE_COMPLETE:
-			if (nesqp->term_flags) {
-				nes_terminate_done(nesqp, 0);
-				return;
-			}
 			spin_lock_irqsave(&nesqp->lock, flags);
 			nesqp->hw_iwarp_state = iwarp_state;
 			nesqp->hw_tcp_state = tcp_state;
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index c00d2f3..4b3fa71 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -1589,7 +1589,7 @@
 	.set_pauseparam = nes_netdev_set_pauseparam,
 };
 
-static void nes_vlan_mode(struct net_device *netdev, struct nes_device *nesdev, u32 features)
+static void nes_vlan_mode(struct net_device *netdev, struct nes_device *nesdev, netdev_features_t features)
 {
 	struct nes_adapter *nesadapter = nesdev->nesadapter;
 	u32 u32temp;
@@ -1610,7 +1610,7 @@
 	spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
 }
 
-static u32 nes_fix_features(struct net_device *netdev, u32 features)
+static netdev_features_t nes_fix_features(struct net_device *netdev, netdev_features_t features)
 {
 	/*
 	 * Since there is no support for separate rx/tx vlan accel
@@ -1624,7 +1624,7 @@
 	return features;
 }
 
-static int nes_set_features(struct net_device *netdev, u32 features)
+static int nes_set_features(struct net_device *netdev, netdev_features_t features)
 {
 	struct nes_vnic *nesvnic = netdev_priv(netdev);
 	struct nes_device *nesdev = nesvnic->nesdev;
diff --git a/drivers/infiniband/hw/nes/nes_utils.c b/drivers/infiniband/hw/nes/nes_utils.c
index cd10968..8b4c2ff 100644
--- a/drivers/infiniband/hw/nes/nes_utils.c
+++ b/drivers/infiniband/hw/nes/nes_utils.c
@@ -56,7 +56,7 @@
 u32 mh_detected;
 u32 mh_pauses_sent;
 
-u32 nes_set_pau(struct nes_device *nesdev)
+static u32 nes_set_pau(struct nes_device *nesdev)
 {
 	u32 ret = 0;
 	u32 counter;
diff --git a/drivers/infiniband/hw/qib/qib_7220.h b/drivers/infiniband/hw/qib/qib_7220.h
index 21f374a..a5356cb 100644
--- a/drivers/infiniband/hw/qib/qib_7220.h
+++ b/drivers/infiniband/hw/qib/qib_7220.h
@@ -97,7 +97,7 @@
 	u64 iblnkerrsnap;
 	u64 ibcctrl; /* kr_ibcctrl shadow */
 	u64 ibcddrctrl; /* kr_ibcddrctrl shadow */
-	u64 chase_end;
+	unsigned long chase_end;
 	u32 last_delay_mult;
 };
 
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c
index c90a55f..6fc9365 100644
--- a/drivers/infiniband/hw/qib/qib_driver.c
+++ b/drivers/infiniband/hw/qib/qib_driver.c
@@ -371,9 +371,8 @@
 						lnh == QIB_LRH_GRH,
 						qp,
 						be32_to_cpu(ohdr->bth[0]));
-				if (ruc_res) {
+				if (ruc_res)
 					goto unlock;
-				}
 
 				/* Only deal with RDMA Writes for now */
 				if (opcode <
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index 574600e..a740324 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -1285,7 +1285,7 @@
 	strlcpy(rcd->comm, current->comm, sizeof(rcd->comm));
 	ctxt_fp(fp) = rcd;
 	qib_stats.sps_ctxts++;
-	dd->freectxts++;
+	dd->freectxts--;
 	ret = 0;
 	goto bail;
 
@@ -1794,7 +1794,7 @@
 		if (dd->pageshadow)
 			unlock_expected_tids(rcd);
 		qib_stats.sps_ctxts--;
-		dd->freectxts--;
+		dd->freectxts++;
 	}
 
 	mutex_unlock(&qib_mutex);
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
index df7fa25..05e0f17 100644
--- a/drivers/infiniband/hw/qib/qib_fs.c
+++ b/drivers/infiniband/hw/qib/qib_fs.c
@@ -47,7 +47,7 @@
 #define private2dd(file) ((file)->f_dentry->d_inode->i_private)
 
 static int qibfs_mknod(struct inode *dir, struct dentry *dentry,
-		       int mode, const struct file_operations *fops,
+		       umode_t mode, const struct file_operations *fops,
 		       void *data)
 {
 	int error;
@@ -67,7 +67,7 @@
 	inode->i_mtime = inode->i_atime;
 	inode->i_ctime = inode->i_atime;
 	inode->i_private = data;
-	if ((mode & S_IFMT) == S_IFDIR) {
+	if (S_ISDIR(mode)) {
 		inode->i_op = &simple_dir_inode_operations;
 		inc_nlink(inode);
 		inc_nlink(dir);
@@ -82,7 +82,7 @@
 	return error;
 }
 
-static int create_file(const char *name, mode_t mode,
+static int create_file(const char *name, umode_t mode,
 		       struct dentry *parent, struct dentry **dentry,
 		       const struct file_operations *fops, void *data)
 {
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
index 781a802..4f18e2d 100644
--- a/drivers/infiniband/hw/qib/qib_iba6120.c
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -2076,9 +2076,11 @@
 static void qib_update_6120_usrhead(struct qib_ctxtdata *rcd, u64 hd,
 				    u32 updegr, u32 egrhd, u32 npkts)
 {
-	qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
 	if (updegr)
 		qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
+	mmiowb();
+	qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
+	mmiowb();
 }
 
 static u32 qib_6120_hdrqempty(struct qib_ctxtdata *rcd)
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
index 439d3c5..3c722f7 100644
--- a/drivers/infiniband/hw/qib/qib_iba7220.c
+++ b/drivers/infiniband/hw/qib/qib_iba7220.c
@@ -1051,7 +1051,7 @@
 static void handle_7220_chase(struct qib_pportdata *ppd, u64 ibcst)
 {
 	u8 ibclt;
-	u64 tnow;
+	unsigned long tnow;
 
 	ibclt = (u8)SYM_FIELD(ibcst, IBCStatus, LinkTrainingState);
 
@@ -1066,9 +1066,9 @@
 	case IB_7220_LT_STATE_CFGWAITRMT:
 	case IB_7220_LT_STATE_TXREVLANES:
 	case IB_7220_LT_STATE_CFGENH:
-		tnow = get_jiffies_64();
+		tnow = jiffies;
 		if (ppd->cpspec->chase_end &&
-		    time_after64(tnow, ppd->cpspec->chase_end)) {
+		    time_after(tnow, ppd->cpspec->chase_end)) {
 			ppd->cpspec->chase_end = 0;
 			qib_set_ib_7220_lstate(ppd,
 				QLOGIC_IB_IBCC_LINKCMD_DOWN,
@@ -2725,9 +2725,11 @@
 static void qib_update_7220_usrhead(struct qib_ctxtdata *rcd, u64 hd,
 				    u32 updegr, u32 egrhd, u32 npkts)
 {
-	qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
 	if (updegr)
 		qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
+	mmiowb();
+	qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
+	mmiowb();
 }
 
 static u32 qib_7220_hdrqempty(struct qib_ctxtdata *rcd)
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 1d58959..41e9208 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -615,8 +615,8 @@
 	u64 ibmalfsnap;
 	u64 ibcctrl_a; /* krp_ibcctrl_a shadow */
 	u64 ibcctrl_b; /* krp_ibcctrl_b shadow */
-	u64 qdr_dfe_time;
-	u64 chase_end;
+	unsigned long qdr_dfe_time;
+	unsigned long chase_end;
 	u32 autoneg_tries;
 	u32 recovery_init;
 	u32 qdr_dfe_on;
@@ -1672,7 +1672,8 @@
 		QLOGIC_IB_IBCC_LINKINITCMD_POLL);
 }
 
-static void disable_chase(struct qib_pportdata *ppd, u64 tnow, u8 ibclt)
+static void disable_chase(struct qib_pportdata *ppd, unsigned long tnow,
+		u8 ibclt)
 {
 	ppd->cpspec->chase_end = 0;
 
@@ -1688,7 +1689,7 @@
 static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
 {
 	u8 ibclt;
-	u64 tnow;
+	unsigned long tnow;
 
 	ibclt = (u8)SYM_FIELD(ibcst, IBCStatusA_0, LinkTrainingState);
 
@@ -1703,9 +1704,9 @@
 	case IB_7322_LT_STATE_CFGWAITRMT:
 	case IB_7322_LT_STATE_TXREVLANES:
 	case IB_7322_LT_STATE_CFGENH:
-		tnow = get_jiffies_64();
+		tnow = jiffies;
 		if (ppd->cpspec->chase_end &&
-		     time_after64(tnow, ppd->cpspec->chase_end))
+		     time_after(tnow, ppd->cpspec->chase_end))
 			disable_chase(ppd, tnow, ibclt);
 		else if (!ppd->cpspec->chase_end)
 			ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME;
@@ -2714,7 +2715,7 @@
 			pins >>= SYM_LSB(EXTStatus, GPIOIn);
 			if (!(pins & mask)) {
 				++handled;
-				qd->t_insert = get_jiffies_64();
+				qd->t_insert = jiffies;
 				queue_work(ib_wq, &qd->work);
 			}
 		}
@@ -3602,7 +3603,7 @@
 	if (qib_rcvhdrcnt)
 		dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, qib_rcvhdrcnt);
 	else
-		dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt,
+		dd->rcvhdrcnt = 2 * max(dd->cspec->rcvegrcnt,
 				    dd->num_pports > 1 ? 1024U : 2048U);
 }
 
@@ -4082,10 +4083,12 @@
 	 */
 	if (hd >> IBA7322_HDRHEAD_PKTINT_SHIFT)
 		adjust_rcv_timeout(rcd, npkts);
-	qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
-	qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
 	if (updegr)
 		qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
+	mmiowb();
+	qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
+	qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
+	mmiowb();
 }
 
 static u32 qib_7322_hdrqempty(struct qib_ctxtdata *rcd)
@@ -4794,7 +4797,7 @@
 		    (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
 				    QIBL_LINKACTIVE)) &&
 		    ppd->cpspec->qdr_dfe_time &&
-		    time_after64(get_jiffies_64(), ppd->cpspec->qdr_dfe_time)) {
+		    time_is_before_jiffies(ppd->cpspec->qdr_dfe_time)) {
 			ppd->cpspec->qdr_dfe_on = 0;
 
 			qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
@@ -5240,7 +5243,7 @@
 			/* schedule the qsfp refresh which should turn the link
 			   off */
 			if (ppd->dd->flags & QIB_HAS_QSFP) {
-				qd->t_insert = get_jiffies_64();
+				qd->t_insert = jiffies;
 				queue_work(ib_wq, &qd->work);
 			}
 			spin_lock_irqsave(&ppd->sdma_lock, flags);
@@ -5592,7 +5595,7 @@
 {
 	struct qib_qsfp_data *qd;
 	struct qib_pportdata *ppd;
-	u64 pwrup;
+	unsigned long pwrup;
 	unsigned long flags;
 	int ret;
 	u32 le2;
@@ -5620,8 +5623,7 @@
 		 * to insertion.
 		 */
 		while (1) {
-			u64 now = get_jiffies_64();
-			if (time_after64(now, pwrup))
+			if (time_is_before_jiffies(pwrup))
 				break;
 			msleep(20);
 		}
@@ -7506,7 +7508,7 @@
 
 static int serdes_7322_init_new(struct qib_pportdata *ppd)
 {
-	u64 tstart;
+	unsigned long tend;
 	u32 le_val, rxcaldone;
 	int chan, chan_done = (1 << SERDES_CHANS) - 1;
 
@@ -7611,10 +7613,8 @@
 	msleep(20);
 	/*       Start Calibration */
 	ibsd_wr_allchans(ppd, 4, (1 << 10), BMASK(10, 10));
-	tstart = get_jiffies_64();
-	while (chan_done &&
-	       !time_after64(get_jiffies_64(),
-			tstart + msecs_to_jiffies(500))) {
+	tend = jiffies + msecs_to_jiffies(500);
+	while (chan_done && !time_is_before_jiffies(tend)) {
 		msleep(20);
 		for (chan = 0; chan < SERDES_CHANS; ++chan) {
 			rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index 58b0f8a..cf0cd30 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -1015,7 +1015,7 @@
 #define DRIVER_LOAD_MSG "QLogic " QIB_DRV_NAME " loaded: "
 #define PFX QIB_DRV_NAME ": "
 
-static const struct pci_device_id qib_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(qib_pci_tbl) = {
 	{ PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_QLOGIC_IB_6120) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_IB_7220) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_IB_7322) },
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c
index 97a8bdf..f695061 100644
--- a/drivers/infiniband/hw/qib/qib_pcie.c
+++ b/drivers/infiniband/hw/qib/qib_pcie.c
@@ -560,9 +560,9 @@
  * BIOS may not set PCIe bus-utilization parameters for best performance.
  * Check and optionally adjust them to maximize our throughput.
  */
-static int qib_pcie_caps;
+static int qib_pcie_caps = 0x51;
 module_param_named(pcie_caps, qib_pcie_caps, int, S_IRUGO);
-MODULE_PARM_DESC(pcie_caps, "Max PCIe tuning: Payload (4lsb), ReadReq (D4..7)");
+MODULE_PARM_DESC(pcie_caps, "Max PCIe tuning: Payload (0..3), ReadReq (4..7)");
 
 static int qib_tune_pcie_caps(struct qib_devdata *dd)
 {
diff --git a/drivers/infiniband/hw/qib/qib_qsfp.h b/drivers/infiniband/hw/qib/qib_qsfp.h
index 46002a9..91908f5 100644
--- a/drivers/infiniband/hw/qib/qib_qsfp.h
+++ b/drivers/infiniband/hw/qib/qib_qsfp.h
@@ -177,7 +177,7 @@
 	struct qib_pportdata *ppd;
 	struct work_struct work;
 	struct qib_qsfp_cache cache;
-	u64 t_insert;
+	unsigned long t_insert;
 	u8 modpresent;
 };
 
diff --git a/drivers/infiniband/hw/qib/qib_sd7220.c b/drivers/infiniband/hw/qib/qib_sd7220.c
index de1a4b2..ac065dd 100644
--- a/drivers/infiniband/hw/qib/qib_sd7220.c
+++ b/drivers/infiniband/hw/qib/qib_sd7220.c
@@ -300,7 +300,7 @@
 }
 
 static void qib_sd_trimdone_monitor(struct qib_devdata *dd,
-       const char *where)
+	const char *where)
 {
 	int ret, chn, baduns;
 	u64 val;
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
index 78fbd56..dae5160 100644
--- a/drivers/infiniband/hw/qib/qib_sysfs.c
+++ b/drivers/infiniband/hw/qib/qib_sysfs.c
@@ -150,7 +150,7 @@
  * For userland compatibility, these offsets must remain fixed.
  * They are strings for QIB_STATUS_*
  */
-static const char *qib_status_str[] = {
+static const char * const qib_status_str[] = {
 	"Initted",
 	"",
 	"",
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index a894762..7b6c3bf 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -913,8 +913,8 @@
 		__raw_writel(last, piobuf);
 }
 
-static struct qib_verbs_txreq *get_txreq(struct qib_ibdev *dev,
-					 struct qib_qp *qp, int *retp)
+static noinline struct qib_verbs_txreq *__get_txreq(struct qib_ibdev *dev,
+					   struct qib_qp *qp)
 {
 	struct qib_verbs_txreq *tx;
 	unsigned long flags;
@@ -926,8 +926,9 @@
 		struct list_head *l = dev->txreq_free.next;
 
 		list_del(l);
+		spin_unlock(&dev->pending_lock);
+		spin_unlock_irqrestore(&qp->s_lock, flags);
 		tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
-		*retp = 0;
 	} else {
 		if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK &&
 		    list_empty(&qp->iowait)) {
@@ -935,14 +936,33 @@
 			qp->s_flags |= QIB_S_WAIT_TX;
 			list_add_tail(&qp->iowait, &dev->txwait);
 		}
-		tx = NULL;
 		qp->s_flags &= ~QIB_S_BUSY;
-		*retp = -EBUSY;
+		spin_unlock(&dev->pending_lock);
+		spin_unlock_irqrestore(&qp->s_lock, flags);
+		tx = ERR_PTR(-EBUSY);
 	}
+	return tx;
+}
 
-	spin_unlock(&dev->pending_lock);
-	spin_unlock_irqrestore(&qp->s_lock, flags);
+static inline struct qib_verbs_txreq *get_txreq(struct qib_ibdev *dev,
+					 struct qib_qp *qp)
+{
+	struct qib_verbs_txreq *tx;
+	unsigned long flags;
 
+	spin_lock_irqsave(&dev->pending_lock, flags);
+	/* assume the list non empty */
+	if (likely(!list_empty(&dev->txreq_free))) {
+		struct list_head *l = dev->txreq_free.next;
+
+		list_del(l);
+		spin_unlock_irqrestore(&dev->pending_lock, flags);
+		tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
+	} else {
+		/* call slow path to get the extra lock */
+		spin_unlock_irqrestore(&dev->pending_lock, flags);
+		tx =  __get_txreq(dev, qp);
+	}
 	return tx;
 }
 
@@ -1122,9 +1142,9 @@
 		goto bail;
 	}
 
-	tx = get_txreq(dev, qp, &ret);
-	if (!tx)
-		goto bail;
+	tx = get_txreq(dev, qp);
+	if (IS_ERR(tx))
+		goto bail_tx;
 
 	control = dd->f_setpbc_control(ppd, plen, qp->s_srate,
 				       be16_to_cpu(hdr->lrh[0]) >> 12);
@@ -1195,6 +1215,9 @@
 	ibp->n_unaligned++;
 bail:
 	return ret;
+bail_tx:
+	ret = PTR_ERR(tx);
+	goto bail;
 }
 
 /*
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 83695b4..3514ca0 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -171,7 +171,7 @@
 	return 0;
 }
 
-static u32 ipoib_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_features_t features)
 {
 	struct ipoib_dev_priv *priv = netdev_priv(dev);
 
@@ -556,15 +556,13 @@
 }
 
 /* called with rcu_read_lock */
-static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
+static void neigh_add_path(struct sk_buff *skb, struct neighbour *n, struct net_device *dev)
 {
 	struct ipoib_dev_priv *priv = netdev_priv(dev);
 	struct ipoib_path *path;
 	struct ipoib_neigh *neigh;
-	struct neighbour *n;
 	unsigned long flags;
 
-	n = dst_get_neighbour(skb_dst(skb));
 	neigh = ipoib_neigh_alloc(n, skb->dev);
 	if (!neigh) {
 		++dev->stats.tx_dropped;
@@ -638,16 +636,13 @@
 }
 
 /* called with rcu_read_lock */
-static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev)
+static void ipoib_path_lookup(struct sk_buff *skb, struct neighbour *n, struct net_device *dev)
 {
 	struct ipoib_dev_priv *priv = netdev_priv(skb->dev);
-	struct dst_entry *dst = skb_dst(skb);
-	struct neighbour *n;
 
 	/* Look up path record for unicasts */
-	n = dst_get_neighbour(dst);
 	if (n->ha[4] != 0xff) {
-		neigh_add_path(skb, dev);
+		neigh_add_path(skb, n, dev);
 		return;
 	}
 
@@ -723,12 +718,17 @@
 	unsigned long flags;
 
 	rcu_read_lock();
-	if (likely(skb_dst(skb)))
-		n = dst_get_neighbour(skb_dst(skb));
-
+	if (likely(skb_dst(skb))) {
+		n = dst_get_neighbour_noref(skb_dst(skb));
+		if (!n) {
+			++dev->stats.tx_dropped;
+			dev_kfree_skb_any(skb);
+			goto unlock;
+		}
+	}
 	if (likely(n)) {
 		if (unlikely(!*to_ipoib_neigh(n))) {
-			ipoib_path_lookup(skb, dev);
+			ipoib_path_lookup(skb, n, dev);
 			goto unlock;
 		}
 
@@ -751,7 +751,7 @@
 			list_del(&neigh->list);
 			ipoib_neigh_free(dev, neigh);
 			spin_unlock_irqrestore(&priv->lock, flags);
-			ipoib_path_lookup(skb, dev);
+			ipoib_path_lookup(skb, n, dev);
 			goto unlock;
 		}
 
@@ -841,7 +841,7 @@
 	dst = skb_dst(skb);
 	n = NULL;
 	if (dst)
-		n = dst_get_neighbour_raw(dst);
+		n = dst_get_neighbour_noref_raw(dst);
 	if ((!dst || !n) && daddr) {
 		struct ipoib_pseudoheader *phdr =
 			(struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr);
@@ -1222,6 +1222,8 @@
 	priv->dev->mtu  = IPOIB_UD_MTU(priv->max_ib_mtu);
 	priv->mcast_mtu  = priv->admin_mtu = priv->dev->mtu;
 
+	priv->dev->neigh_priv_len = sizeof(struct ipoib_neigh);
+
 	result = ib_query_pkey(hca, port, 0, &priv->pkey);
 	if (result) {
 		printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n",
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 873bff9..f7ff9dd 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -269,7 +269,7 @@
 
 		skb->dev = dev;
 		if (dst)
-			n = dst_get_neighbour_raw(dst);
+			n = dst_get_neighbour_noref_raw(dst);
 		if (!dst || !n) {
 			/* put pseudoheader back on for next time */
 			skb_push(skb, sizeof (struct ipoib_pseudoheader));
@@ -728,7 +728,7 @@
 
 		rcu_read_lock();
 		if (dst)
-			n = dst_get_neighbour(dst);
+			n = dst_get_neighbour_noref(dst);
 		if (n && !*to_ipoib_neigh(n)) {
 			struct ipoib_neigh *neigh = ipoib_neigh_alloc(n,
 								      skb->dev);
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 7e7373a..9a43cb0 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -638,7 +638,7 @@
 	iser_conn_terminate(ib_conn);
 }
 
-static mode_t iser_attr_is_visible(int param_type, int param)
+static umode_t iser_attr_is_visible(int param_type, int param)
 {
 	switch (param_type) {
 	case ISCSI_HOST_PARAM:
diff --git a/drivers/input/input.c b/drivers/input/input.c
index da38d97..1f78c957 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -1624,7 +1624,7 @@
 #endif
 };
 
-static char *input_devnode(struct device *dev, mode_t *mode)
+static char *input_devnode(struct device *dev, umode_t *mode)
 {
 	return kasprintf(GFP_KERNEL, "input/%s", dev_name(dev));
 }
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index d728875..32bbd4c 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -1041,18 +1041,7 @@
 	.id_table	= xpad_table,
 };
 
-static int __init usb_xpad_init(void)
-{
-	return usb_register(&xpad_driver);
-}
-
-static void __exit usb_xpad_exit(void)
-{
-	usb_deregister(&xpad_driver);
-}
-
-module_init(usb_xpad_init);
-module_exit(usb_xpad_exit);
+module_usb_driver(xpad_driver);
 
 MODULE_AUTHOR(DRIVER_AUTHOR);
 MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/input/misc/ati_remote2.c b/drivers/input/misc/ati_remote2.c
index 8d345e8..874a51c 100644
--- a/drivers/input/misc/ati_remote2.c
+++ b/drivers/input/misc/ati_remote2.c
@@ -1010,23 +1010,4 @@
 	return r;
 }
 
-static int __init ati_remote2_init(void)
-{
-	int r;
-
-	r = usb_register(&ati_remote2_driver);
-	if (r)
-		printk(KERN_ERR "ati_remote2: usb_register() = %d\n", r);
-	else
-		printk(KERN_INFO "ati_remote2: " DRIVER_DESC " " DRIVER_VERSION "\n");
-
-	return r;
-}
-
-static void __exit ati_remote2_exit(void)
-{
-	usb_deregister(&ati_remote2_driver);
-}
-
-module_init(ati_remote2_init);
-module_exit(ati_remote2_exit);
+module_usb_driver(ati_remote2_driver);
diff --git a/drivers/input/misc/cma3000_d0x.c b/drivers/input/misc/cma3000_d0x.c
index 80793f1..06517e6 100644
--- a/drivers/input/misc/cma3000_d0x.c
+++ b/drivers/input/misc/cma3000_d0x.c
@@ -115,8 +115,8 @@
 static irqreturn_t cma3000_thread_irq(int irq, void *dev_id)
 {
 	struct cma3000_accl_data *data = dev_id;
-	int datax, datay, dataz;
-	u8 ctrl, mode, range, intr_status;
+	int datax, datay, dataz, intr_status;
+	u8 ctrl, mode, range;
 
 	intr_status = CMA3000_READ(data, CMA3000_INTSTATUS, "interrupt status");
 	if (intr_status < 0)
diff --git a/drivers/input/misc/keyspan_remote.c b/drivers/input/misc/keyspan_remote.c
index fc62256..d99151a 100644
--- a/drivers/input/misc/keyspan_remote.c
+++ b/drivers/input/misc/keyspan_remote.c
@@ -580,26 +580,7 @@
 	.id_table =	keyspan_table
 };
 
-static int __init usb_keyspan_init(void)
-{
-	int result;
-
-	/* register this driver with the USB subsystem */
-	result = usb_register(&keyspan_driver);
-	if (result)
-		err("usb_register failed. Error number %d\n", result);
-
-	return result;
-}
-
-static void __exit usb_keyspan_exit(void)
-{
-	/* deregister this driver with the USB subsystem */
-	usb_deregister(&keyspan_driver);
-}
-
-module_init(usb_keyspan_init);
-module_exit(usb_keyspan_exit);
+module_usb_driver(keyspan_driver);
 
 MODULE_DEVICE_TABLE(usb, keyspan_table);
 MODULE_AUTHOR(DRIVER_AUTHOR);
diff --git a/drivers/input/misc/powermate.c b/drivers/input/misc/powermate.c
index f459471..538f704 100644
--- a/drivers/input/misc/powermate.c
+++ b/drivers/input/misc/powermate.c
@@ -441,18 +441,7 @@
         .id_table =     powermate_devices,
 };
 
-static int __init powermate_init(void)
-{
-	return usb_register(&powermate_driver);
-}
-
-static void __exit powermate_cleanup(void)
-{
-	usb_deregister(&powermate_driver);
-}
-
-module_init(powermate_init);
-module_exit(powermate_cleanup);
+module_usb_driver(powermate_driver);
 
 MODULE_AUTHOR( "William R Sowerbutts" );
 MODULE_DESCRIPTION( "Griffin Technology, Inc PowerMate driver" );
diff --git a/drivers/input/misc/yealink.c b/drivers/input/misc/yealink.c
index 41201c6..f4776e7 100644
--- a/drivers/input/misc/yealink.c
+++ b/drivers/input/misc/yealink.c
@@ -988,22 +988,7 @@
 	.id_table	= usb_table,
 };
 
-static int __init yealink_dev_init(void)
-{
-	int ret = usb_register(&yealink_driver);
-	if (ret == 0)
-		printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
-		       DRIVER_DESC "\n");
-	return ret;
-}
-
-static void __exit yealink_dev_exit(void)
-{
-	usb_deregister(&yealink_driver);
-}
-
-module_init(yealink_dev_init);
-module_exit(yealink_dev_exit);
+module_usb_driver(yealink_driver);
 
 MODULE_DEVICE_TABLE (usb, usb_table);
 
diff --git a/drivers/input/mouse/appletouch.c b/drivers/input/mouse/appletouch.c
index b77f999..0acbc7d 100644
--- a/drivers/input/mouse/appletouch.c
+++ b/drivers/input/mouse/appletouch.c
@@ -938,15 +938,4 @@
 	.id_table	= atp_table,
 };
 
-static int __init atp_init(void)
-{
-	return usb_register(&atp_driver);
-}
-
-static void __exit atp_exit(void)
-{
-	usb_deregister(&atp_driver);
-}
-
-module_init(atp_init);
-module_exit(atp_exit);
+module_usb_driver(atp_driver);
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
index 5ec617e..cf87f8b 100644
--- a/drivers/input/mouse/bcm5974.c
+++ b/drivers/input/mouse/bcm5974.c
@@ -940,16 +940,4 @@
 	.supports_autosuspend	= 1,
 };
 
-static int __init bcm5974_init(void)
-{
-	return usb_register(&bcm5974_driver);
-}
-
-static void __exit bcm5974_exit(void)
-{
-	usb_deregister(&bcm5974_driver);
-}
-
-module_init(bcm5974_init);
-module_exit(bcm5974_exit);
-
+module_usb_driver(bcm5974_driver);
diff --git a/drivers/input/mouse/sentelic.c b/drivers/input/mouse/sentelic.c
index c5b12d2..86d6f39 100644
--- a/drivers/input/mouse/sentelic.c
+++ b/drivers/input/mouse/sentelic.c
@@ -2,7 +2,7 @@
  * Finger Sensing Pad PS/2 mouse driver.
  *
  * Copyright (C) 2005-2007 Asia Vital Components Co., Ltd.
- * Copyright (C) 2005-2010 Tai-hwa Liang, Sentelic Corporation.
+ * Copyright (C) 2005-2011 Tai-hwa Liang, Sentelic Corporation.
  *
  *   This program is free software; you can redistribute it and/or
  *   modify it under the terms of the GNU General Public License
@@ -162,7 +162,7 @@
 	ps2_sendbyte(ps2dev, v, FSP_CMD_TIMEOUT2);
 
 	if (ps2_sendbyte(ps2dev, 0xf3, FSP_CMD_TIMEOUT) < 0)
-		return -1;
+		goto out;
 
 	if ((v = fsp_test_invert_cmd(reg_val)) != reg_val) {
 		/* inversion is required */
@@ -261,7 +261,7 @@
 	ps2_sendbyte(ps2dev, 0x88, FSP_CMD_TIMEOUT2);
 
 	if (ps2_sendbyte(ps2dev, 0xf3, FSP_CMD_TIMEOUT) < 0)
-		return -1;
+		goto out;
 
 	if ((v = fsp_test_invert_cmd(reg_val)) != reg_val) {
 		ps2_sendbyte(ps2dev, 0x47, FSP_CMD_TIMEOUT2);
@@ -309,7 +309,7 @@
 	};
 	int val;
 
-	if (fsp_reg_read(psmouse, FSP_REG_TMOD_STATUS1, &val) == -1)
+	if (fsp_reg_read(psmouse, FSP_REG_TMOD_STATUS, &val) == -1)
 		return -EIO;
 
 	*btn = buttons[(val & 0x30) >> 4];
diff --git a/drivers/input/mouse/sentelic.h b/drivers/input/mouse/sentelic.h
index ed1395a..2e4af24 100644
--- a/drivers/input/mouse/sentelic.h
+++ b/drivers/input/mouse/sentelic.h
@@ -2,7 +2,7 @@
  * Finger Sensing Pad PS/2 mouse driver.
  *
  * Copyright (C) 2005-2007 Asia Vital Components Co., Ltd.
- * Copyright (C) 2005-2009 Tai-hwa Liang, Sentelic Corporation.
+ * Copyright (C) 2005-2011 Tai-hwa Liang, Sentelic Corporation.
  *
  *   This program is free software; you can redistribute it and/or
  *   modify it under the terms of the GNU General Public License
@@ -33,6 +33,7 @@
 /* Finger-sensing Pad control registers */
 #define	FSP_REG_SYSCTL1		0x10
 #define	FSP_BIT_EN_REG_CLK	BIT(5)
+#define	FSP_REG_TMOD_STATUS	0x20
 #define	FSP_REG_OPC_QDOWN	0x31
 #define	FSP_BIT_EN_OPC_TAG	BIT(7)
 #define	FSP_REG_OPTZ_XLO	0x34
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index c080b82..a6dcd18 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -24,6 +24,7 @@
  */
 
 #include <linux/module.h>
+#include <linux/delay.h>
 #include <linux/dmi.h>
 #include <linux/input/mt.h>
 #include <linux/serio.h>
@@ -1220,6 +1221,16 @@
 
 	do {
 		psmouse_reset(psmouse);
+		if (retry) {
+			/*
+			 * On some boxes, right after resuming, the touchpad
+			 * needs some time to finish initializing (I assume
+			 * it needs time to calibrate) and start responding
+			 * to Synaptics-specific queries, so let's wait a
+			 * bit.
+			 */
+			ssleep(1);
+		}
 		error = synaptics_detect(psmouse, 0);
 	} while (error && ++retry < 3);
 
diff --git a/drivers/input/serio/ambakmi.c b/drivers/input/serio/ambakmi.c
index 12abc50..8407d5b 100644
--- a/drivers/input/serio/ambakmi.c
+++ b/drivers/input/serio/ambakmi.c
@@ -195,6 +195,8 @@
 	{ 0, 0 }
 };
 
+MODULE_DEVICE_TABLE(amba, amba_kmi_idtable);
+
 static struct amba_driver ambakmi_driver = {
 	.drv		= {
 		.name	= "kmi-pl050",
diff --git a/drivers/input/tablet/acecad.c b/drivers/input/tablet/acecad.c
index d94f7e9..f8b0b1d 100644
--- a/drivers/input/tablet/acecad.c
+++ b/drivers/input/tablet/acecad.c
@@ -269,19 +269,4 @@
 	.id_table =	usb_acecad_id_table,
 };
 
-static int __init usb_acecad_init(void)
-{
-	int result = usb_register(&usb_acecad_driver);
-	if (result == 0)
-		printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
-		       DRIVER_DESC "\n");
-	return result;
-}
-
-static void __exit usb_acecad_exit(void)
-{
-	usb_deregister(&usb_acecad_driver);
-}
-
-module_init(usb_acecad_init);
-module_exit(usb_acecad_exit);
+module_usb_driver(usb_acecad_driver);
diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c
index 6d89fd1..d5ef3de 100644
--- a/drivers/input/tablet/aiptek.c
+++ b/drivers/input/tablet/aiptek.c
@@ -1919,21 +1919,7 @@
 	.id_table = aiptek_ids,
 };
 
-static int __init aiptek_init(void)
-{
-	int result = usb_register(&aiptek_driver);
-	if (result == 0) {
-		printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
-		       DRIVER_DESC "\n");
-		printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_AUTHOR "\n");
-	}
-	return result;
-}
-
-static void __exit aiptek_exit(void)
-{
-	usb_deregister(&aiptek_driver);
-}
+module_usb_driver(aiptek_driver);
 
 MODULE_AUTHOR(DRIVER_AUTHOR);
 MODULE_DESCRIPTION(DRIVER_DESC);
@@ -1943,6 +1929,3 @@
 MODULE_PARM_DESC(programmableDelay, "delay used during tablet programming");
 module_param(jitterDelay, int, 0);
 MODULE_PARM_DESC(jitterDelay, "stylus/mouse settlement delay");
-
-module_init(aiptek_init);
-module_exit(aiptek_exit);
diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
index 8ea6afe..89a2978 100644
--- a/drivers/input/tablet/gtco.c
+++ b/drivers/input/tablet/gtco.c
@@ -1022,33 +1022,7 @@
 	.disconnect	= gtco_disconnect,
 };
 
-/*
- *  Register this module with the USB subsystem
- */
-static int __init gtco_init(void)
-{
-	int error;
-
-	error = usb_register(&gtco_driverinfo_table);
-	if (error) {
-		err("usb_register() failed rc=0x%x", error);
-		return error;
-	}
-
-	printk("GTCO usb driver version: %s", GTCO_VERSION);
-	return 0;
-}
-
-/*
- *   Deregister this module with the USB subsystem
- */
-static void __exit gtco_exit(void)
-{
-	usb_deregister(&gtco_driverinfo_table);
-}
-
-module_init(gtco_init);
-module_exit(gtco_exit);
+module_usb_driver(gtco_driverinfo_table);
 
 MODULE_DESCRIPTION("GTCO digitizer USB driver");
 MODULE_LICENSE("GPL");
diff --git a/drivers/input/tablet/hanwang.c b/drivers/input/tablet/hanwang.c
index 6504b62..b2db3cf 100644
--- a/drivers/input/tablet/hanwang.c
+++ b/drivers/input/tablet/hanwang.c
@@ -432,15 +432,4 @@
 	.id_table	= hanwang_ids,
 };
 
-static int __init hanwang_init(void)
-{
-	return usb_register(&hanwang_driver);
-}
-
-static void __exit hanwang_exit(void)
-{
-	usb_deregister(&hanwang_driver);
-}
-
-module_init(hanwang_init);
-module_exit(hanwang_exit);
+module_usb_driver(hanwang_driver);
diff --git a/drivers/input/tablet/kbtab.c b/drivers/input/tablet/kbtab.c
index 290f4e5..85a5b40 100644
--- a/drivers/input/tablet/kbtab.c
+++ b/drivers/input/tablet/kbtab.c
@@ -198,22 +198,4 @@
 	.id_table =	kbtab_ids,
 };
 
-static int __init kbtab_init(void)
-{
-	int retval;
-	retval = usb_register(&kbtab_driver);
-	if (retval)
-		goto out;
-	printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
-	       DRIVER_DESC "\n");
-out:
-	return retval;
-}
-
-static void __exit kbtab_exit(void)
-{
-	usb_deregister(&kbtab_driver);
-}
-
-module_init(kbtab_init);
-module_exit(kbtab_exit);
+module_usb_driver(kbtab_driver);
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
index 1c1b7b4..8f9cde3 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -919,21 +919,4 @@
 	.supports_autosuspend = 1,
 };
 
-static int __init wacom_init(void)
-{
-	int result;
-
-	result = usb_register(&wacom_driver);
-	if (result == 0)
-		printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
-		       DRIVER_DESC "\n");
-	return result;
-}
-
-static void __exit wacom_exit(void)
-{
-	usb_deregister(&wacom_driver);
-}
-
-module_init(wacom_init);
-module_exit(wacom_exit);
+module_usb_driver(wacom_driver);
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index da0d8761e..2ee47d0 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -1470,6 +1470,9 @@
 static const struct wacom_features wacom_features_0xE6 =
 	{ "Wacom ISDv4 E6",       WACOM_PKGLEN_TPC2FG,    27760, 15694,  255,
 	  0, TABLETPC2FG, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0xEC =
+	{ "Wacom ISDv4 EC",       WACOM_PKGLEN_GRAPHIRE,  25710, 14500,  255,
+	  0, TABLETPC,    WACOM_INTUOS_RES, WACOM_INTUOS_RES };
 static const struct wacom_features wacom_features_0x47 =
 	{ "Wacom Intuos2 6x8",    WACOM_PKGLEN_INTUOS,    20320, 16240, 1023,
 	  31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -1611,6 +1614,7 @@
 	{ USB_DEVICE_WACOM(0xE2) },
 	{ USB_DEVICE_WACOM(0xE3) },
 	{ USB_DEVICE_WACOM(0xE6) },
+	{ USB_DEVICE_WACOM(0xEC) },
 	{ USB_DEVICE_WACOM(0x47) },
 	{ USB_DEVICE_LENOVO(0x6004) },
 	{ }
diff --git a/drivers/input/touchscreen/ad7877.c b/drivers/input/touchscreen/ad7877.c
index 400131d..baa43df 100644
--- a/drivers/input/touchscreen/ad7877.c
+++ b/drivers/input/touchscreen/ad7877.c
@@ -612,10 +612,10 @@
 	NULL
 };
 
-static mode_t ad7877_attr_is_visible(struct kobject *kobj,
+static umode_t ad7877_attr_is_visible(struct kobject *kobj,
 				     struct attribute *attr, int n)
 {
-	mode_t mode = attr->mode;
+	umode_t mode = attr->mode;
 
 	if (attr == &dev_attr_aux3.attr) {
 		if (gpio3)
diff --git a/drivers/input/touchscreen/st1232.c b/drivers/input/touchscreen/st1232.c
index 4ab3713..8825fe3 100644
--- a/drivers/input/touchscreen/st1232.c
+++ b/drivers/input/touchscreen/st1232.c
@@ -23,6 +23,7 @@
 #include <linux/input.h>
 #include <linux/interrupt.h>
 #include <linux/module.h>
+#include <linux/pm_qos.h>
 #include <linux/slab.h>
 #include <linux/types.h>
 
@@ -46,6 +47,7 @@
 	struct i2c_client *client;
 	struct input_dev *input_dev;
 	struct st1232_ts_finger finger[MAX_FINGERS];
+	struct dev_pm_qos_request low_latency_req;
 };
 
 static int st1232_ts_read_data(struct st1232_ts_data *ts)
@@ -118,8 +120,17 @@
 	}
 
 	/* SYN_MT_REPORT only if no contact */
-	if (!count)
+	if (!count) {
 		input_mt_sync(input_dev);
+		if (ts->low_latency_req.dev) {
+			dev_pm_qos_remove_request(&ts->low_latency_req);
+			ts->low_latency_req.dev = NULL;
+		}
+	} else if (!ts->low_latency_req.dev) {
+		/* First contact, request 100 us latency. */
+		dev_pm_qos_add_ancestor_request(&ts->client->dev,
+						&ts->low_latency_req, 100);
+	}
 
 	/* SYN_REPORT */
 	input_sync(input_dev);
diff --git a/drivers/input/touchscreen/tsc2005.c b/drivers/input/touchscreen/tsc2005.c
index cbf0ff3..067d956 100644
--- a/drivers/input/touchscreen/tsc2005.c
+++ b/drivers/input/touchscreen/tsc2005.c
@@ -450,13 +450,13 @@
 	NULL
 };
 
-static mode_t tsc2005_attr_is_visible(struct kobject *kobj,
+static umode_t tsc2005_attr_is_visible(struct kobject *kobj,
 				      struct attribute *attr, int n)
 {
 	struct device *dev = container_of(kobj, struct device, kobj);
 	struct spi_device *spi = to_spi_device(dev);
 	struct tsc2005 *ts = spi_get_drvdata(spi);
-	mode_t mode = attr->mode;
+	umode_t mode = attr->mode;
 
 	if (attr == &dev_attr_selftest.attr) {
 		if (!ts->set_reset)
diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
index 73fd664..e539d92 100644
--- a/drivers/input/touchscreen/usbtouchscreen.c
+++ b/drivers/input/touchscreen/usbtouchscreen.c
@@ -1580,18 +1580,7 @@
 	.supports_autosuspend = 1,
 };
 
-static int __init usbtouch_init(void)
-{
-	return usb_register(&usbtouch_driver);
-}
-
-static void __exit usbtouch_cleanup(void)
-{
-	usb_deregister(&usbtouch_driver);
-}
-
-module_init(usbtouch_init);
-module_exit(usbtouch_cleanup);
+module_usb_driver(usbtouch_driver);
 
 MODULE_AUTHOR(DRIVER_AUTHOR);
 MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index a004c39..31053a9 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -41,6 +41,7 @@
 #include <linux/tboot.h>
 #include <linux/dmi.h>
 #include <linux/pci-ats.h>
+#include <linux/memblock.h>
 #include <asm/cacheflush.h>
 #include <asm/iommu.h>
 
@@ -405,6 +406,9 @@
 int dmar_disabled = 1;
 #endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
 
+int intel_iommu_enabled = 0;
+EXPORT_SYMBOL_GPL(intel_iommu_enabled);
+
 static int dmar_map_gfx = 1;
 static int dmar_forcedac;
 static int intel_iommu_strict;
@@ -2185,18 +2189,6 @@
 
 static int md_domain_init(struct dmar_domain *domain, int guest_width);
 
-static int __init si_domain_work_fn(unsigned long start_pfn,
-				    unsigned long end_pfn, void *datax)
-{
-	int *ret = datax;
-
-	*ret = iommu_domain_identity_map(si_domain,
-					 (uint64_t)start_pfn << PAGE_SHIFT,
-					 (uint64_t)end_pfn << PAGE_SHIFT);
-	return *ret;
-
-}
-
 static int __init si_domain_init(int hw)
 {
 	struct dmar_drhd_unit *drhd;
@@ -2228,9 +2220,15 @@
 		return 0;
 
 	for_each_online_node(nid) {
-		work_with_active_regions(nid, si_domain_work_fn, &ret);
-		if (ret)
-			return ret;
+		unsigned long start_pfn, end_pfn;
+		int i;
+
+		for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
+			ret = iommu_domain_identity_map(si_domain,
+					PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
+			if (ret)
+				return ret;
+		}
 	}
 
 	return 0;
@@ -3647,6 +3645,8 @@
 
 	bus_register_notifier(&pci_bus_type, &device_nb);
 
+	intel_iommu_enabled = 1;
+
 	return 0;
 }
 
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 2fb2963..5b5fa5c 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -90,7 +90,7 @@
 	if (bus == NULL || bus->iommu_ops == NULL)
 		return NULL;
 
-	domain = kmalloc(sizeof(*domain), GFP_KERNEL);
+	domain = kzalloc(sizeof(*domain), GFP_KERNEL);
 	if (!domain)
 		return NULL;
 
diff --git a/drivers/isdn/gigaset/i4l.c b/drivers/isdn/gigaset/i4l.c
index 04231cb..1793ba1 100644
--- a/drivers/isdn/gigaset/i4l.c
+++ b/drivers/isdn/gigaset/i4l.c
@@ -624,8 +624,6 @@
 {
 	isdn_if *iif;
 
-	pr_info("ISDN4Linux interface\n");
-
 	iif = kmalloc(sizeof *iif, GFP_KERNEL);
 	if (!iif) {
 		pr_err("out of memory\n");
@@ -684,6 +682,7 @@
  */
 void gigaset_isdn_regdrv(void)
 {
+	pr_info("ISDN4Linux interface\n");
 	/* nothing to do */
 }
 
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
index 71a8eb6..0e1f4d5 100644
--- a/drivers/isdn/hardware/mISDN/hfcsusb.c
+++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
@@ -2154,30 +2154,4 @@
 	.disconnect = hfcsusb_disconnect,
 };
 
-static int __init
-hfcsusb_init(void)
-{
-	printk(KERN_INFO DRIVER_NAME " driver Rev. %s debug(0x%x) poll(%i)\n",
-	    hfcsusb_rev, debug, poll);
-
-	if (usb_register(&hfcsusb_drv)) {
-		printk(KERN_INFO DRIVER_NAME
-		    ": Unable to register hfcsusb module at usb stack\n");
-		return -ENODEV;
-	}
-
-	return 0;
-}
-
-static void __exit
-hfcsusb_cleanup(void)
-{
-	if (debug & DBG_HFC_CALL_TRACE)
-		printk(KERN_INFO DRIVER_NAME ": %s\n", __func__);
-
-	/* unregister Hardware */
-	usb_deregister(&hfcsusb_drv);	/* release our driver */
-}
-
-module_init(hfcsusb_init);
-module_exit(hfcsusb_cleanup);
+module_usb_driver(hfcsusb_drv);
diff --git a/drivers/isdn/hisax/enternow_pci.c b/drivers/isdn/hisax/enternow_pci.c
index 26264ab..f55d29d 100644
--- a/drivers/isdn/hisax/enternow_pci.c
+++ b/drivers/isdn/hisax/enternow_pci.c
@@ -333,7 +333,7 @@
 	cs->hw.njet.isac = cs->hw.njet.base + 0xC0; // Fenster zum AMD
 
 	/* Reset an */
-	cs->hw.njet.ctrl_reg = 0x07;  // geändert von 0xff
+	cs->hw.njet.ctrl_reg = 0x07;  // geändert von 0xff
 	outb(cs->hw.njet.ctrl_reg, cs->hw.njet.base + NETJET_CTRL);
 	/* 20 ms Pause */
 	mdelay(20);
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index ff203a4..1b75a56 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -347,7 +347,8 @@
 config LEDS_NS2
 	tristate "LED support for Network Space v2 GPIO LEDs"
 	depends on LEDS_CLASS
-	depends on MACH_NETSPACE_V2 || MACH_INETSPACE_V2 || MACH_NETSPACE_MAX_V2 || D2NET_V2
+	depends on MACH_NETSPACE_V2 || MACH_INETSPACE_V2 || \
+		   MACH_NETSPACE_MAX_V2 || MACH_D2NET_V2
 	default y
 	help
 	  This option enable support for the dual-GPIO LED found on the
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 6d5628b..0c8739c 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -15,7 +15,6 @@
 #include <linux/list.h>
 #include <linux/spinlock.h>
 #include <linux/device.h>
-#include <linux/sysdev.h>
 #include <linux/timer.h>
 #include <linux/err.h>
 #include <linux/ctype.h>
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index 6f1ff93..46b4c76 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -17,7 +17,6 @@
 #include <linux/list.h>
 #include <linux/spinlock.h>
 #include <linux/device.h>
-#include <linux/sysdev.h>
 #include <linux/timer.h>
 #include <linux/rwsem.h>
 #include <linux/leds.h>
diff --git a/drivers/lguest/lguest_device.c b/drivers/lguest/lguest_device.c
index 0dc30ff..595d731 100644
--- a/drivers/lguest/lguest_device.c
+++ b/drivers/lguest/lguest_device.c
@@ -381,6 +381,11 @@
 	return PTR_ERR(vqs[i]);
 }
 
+static const char *lg_bus_name(struct virtio_device *vdev)
+{
+	return "";
+}
+
 /* The ops structure which hooks everything together. */
 static struct virtio_config_ops lguest_config_ops = {
 	.get_features = lg_get_features,
@@ -392,6 +397,7 @@
 	.reset = lg_reset,
 	.find_vqs = lg_find_vqs,
 	.del_vqs = lg_del_vqs,
+	.bus_name = lg_bus_name,
 };
 
 /*
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
index 65af42f..39809035 100644
--- a/drivers/lguest/x86/core.c
+++ b/drivers/lguest/x86/core.c
@@ -697,7 +697,7 @@
 	 * interrupts are enabled.  We always leave interrupts enabled while
 	 * running the Guest.
 	 */
-	regs->eflags = X86_EFLAGS_IF | 0x2;
+	regs->eflags = X86_EFLAGS_IF | X86_EFLAGS_BIT1;
 
 	/*
 	 * The "Extended Instruction Pointer" register says where the Guest is
diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c
index 2637c13..6dc26b6 100644
--- a/drivers/macintosh/rack-meter.c
+++ b/drivers/macintosh/rack-meter.c
@@ -81,13 +81,13 @@
  */
 static inline cputime64_t get_cpu_idle_time(unsigned int cpu)
 {
-	cputime64_t retval;
+	u64 retval;
 
-	retval = cputime64_add(kstat_cpu(cpu).cpustat.idle,
-			kstat_cpu(cpu).cpustat.iowait);
+	retval = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE] +
+		 kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT];
 
 	if (rackmeter_ignore_nice)
-		retval = cputime64_add(retval, kstat_cpu(cpu).cpustat.nice);
+		retval += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
 
 	return retval;
 }
@@ -220,13 +220,11 @@
 	int i, offset, load, cumm, pause;
 
 	cur_jiffies = jiffies64_to_cputime64(get_jiffies_64());
-	total_ticks = (unsigned int)cputime64_sub(cur_jiffies,
-						  rcpu->prev_wall);
+	total_ticks = (unsigned int) (cur_jiffies - rcpu->prev_wall);
 	rcpu->prev_wall = cur_jiffies;
 
 	total_idle_ticks = get_cpu_idle_time(cpu);
-	idle_ticks = (unsigned int) cputime64_sub(total_idle_ticks,
-				rcpu->prev_idle);
+	idle_ticks = (unsigned int) (total_idle_ticks - rcpu->prev_idle);
 	rcpu->prev_idle = total_idle_ticks;
 
 	/* We do a very dumb calculation to update the LEDs for now,
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index 116a49c..54ac7ff 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -32,7 +32,6 @@
 #include <linux/completion.h>
 #include <linux/miscdevice.h>
 #include <linux/delay.h>
-#include <linux/sysdev.h>
 #include <linux/poll.h>
 #include <linux/mutex.h>
 #include <linux/of_device.h>
@@ -681,9 +680,6 @@
 static int __init smu_init_sysfs(void)
 {
 	/*
-	 * Due to sysfs bogosity, a sysdev is not a real device, so
-	 * we should in fact create both if we want sysdev semantics
-	 * for power management.
 	 * For now, we don't power manage machines with an SMU chip,
 	 * I'm a bit too far from figuring out how that works with those
 	 * new chipsets, but that will come back and bite us
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index b690711..cdf36b1 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -1149,12 +1149,12 @@
 		return;
 	}
 	if (time_before(jiffies, bitmap->daemon_lastrun
-			+ bitmap->mddev->bitmap_info.daemon_sleep))
+			+ mddev->bitmap_info.daemon_sleep))
 		goto done;
 
 	bitmap->daemon_lastrun = jiffies;
 	if (bitmap->allclean) {
-		bitmap->mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
+		mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
 		goto done;
 	}
 	bitmap->allclean = 1;
@@ -1206,7 +1206,7 @@
 			 * sure that events_cleared is up-to-date.
 			 */
 			if (bitmap->need_sync &&
-			    bitmap->mddev->bitmap_info.external == 0) {
+			    mddev->bitmap_info.external == 0) {
 				bitmap_super_t *sb;
 				bitmap->need_sync = 0;
 				sb = kmap_atomic(bitmap->sb_page, KM_USER0);
@@ -1270,8 +1270,8 @@
 
  done:
 	if (bitmap->allclean == 0)
-		bitmap->mddev->thread->timeout =
-			bitmap->mddev->bitmap_info.daemon_sleep;
+		mddev->thread->timeout =
+			mddev->bitmap_info.daemon_sleep;
 	mutex_unlock(&mddev->bitmap_info.mutex);
 }
 
@@ -1393,9 +1393,6 @@
 			 atomic_read(&bitmap->behind_writes),
 			 bitmap->mddev->bitmap_info.max_write_behind);
 	}
-	if (bitmap->mddev->degraded)
-		/* Never clear bits or update events_cleared when degraded */
-		success = 0;
 
 	while (sectors) {
 		sector_t blocks;
@@ -1409,7 +1406,7 @@
 			return;
 		}
 
-		if (success &&
+		if (success && !bitmap->mddev->degraded &&
 		    bitmap->events_cleared < bitmap->mddev->events) {
 			bitmap->events_cleared = bitmap->mddev->events;
 			bitmap->need_sync = 1;
@@ -1590,7 +1587,7 @@
 	}
 	if (!*bmc) {
 		struct page *page;
-		*bmc = 1 | (needed ? NEEDED_MASK : 0);
+		*bmc = 2 | (needed ? NEEDED_MASK : 0);
 		bitmap_count_page(bitmap, offset, 1);
 		page = filemap_get_page(bitmap, offset >> CHUNK_BLOCK_SHIFT(bitmap));
 		set_page_attr(bitmap, page, BITMAP_PAGE_PENDING);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 4720f68..b89c548 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -14,7 +14,6 @@
 #include <linux/moduleparam.h>
 #include <linux/blkpg.h>
 #include <linux/bio.h>
-#include <linux/buffer_head.h>
 #include <linux/mempool.h>
 #include <linux/slab.h>
 #include <linux/idr.h>
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index c3273ef..6274565 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -230,6 +230,7 @@
 		return -EINVAL;
 
 	rdev->raid_disk = rdev->saved_raid_disk;
+	rdev->saved_raid_disk = -1;
 
 	newconf = linear_conf(mddev,mddev->raid_disks+1);
 
diff --git a/drivers/md/md.c b/drivers/md/md.c
index ee98173..ca8527f 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -36,8 +36,7 @@
 #include <linux/blkdev.h>
 #include <linux/sysctl.h>
 #include <linux/seq_file.h>
-#include <linux/mutex.h>
-#include <linux/buffer_head.h> /* for invalidate_bdev */
+#include <linux/fs.h>
 #include <linux/poll.h>
 #include <linux/ctype.h>
 #include <linux/string.h>
@@ -1714,6 +1713,8 @@
 		}
 		if (sb->devflags & WriteMostly1)
 			set_bit(WriteMostly, &rdev->flags);
+		if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT)
+			set_bit(Replacement, &rdev->flags);
 	} else /* MULTIPATH are always insync */
 		set_bit(In_sync, &rdev->flags);
 
@@ -1767,6 +1768,9 @@
 		sb->recovery_offset =
 			cpu_to_le64(rdev->recovery_offset);
 	}
+	if (test_bit(Replacement, &rdev->flags))
+		sb->feature_map |=
+			cpu_to_le32(MD_FEATURE_REPLACEMENT);
 
 	if (mddev->reshape_position != MaxSector) {
 		sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
@@ -2560,6 +2564,15 @@
 		len += sprintf(page+len, "%swrite_error", sep);
 		sep = ",";
 	}
+	if (test_bit(WantReplacement, &rdev->flags)) {
+		len += sprintf(page+len, "%swant_replacement", sep);
+		sep = ",";
+	}
+	if (test_bit(Replacement, &rdev->flags)) {
+		len += sprintf(page+len, "%sreplacement", sep);
+		sep = ",";
+	}
+
 	return len+sprintf(page+len, "\n");
 }
 
@@ -2628,6 +2641,42 @@
 	} else if (cmd_match(buf, "-write_error")) {
 		clear_bit(WriteErrorSeen, &rdev->flags);
 		err = 0;
+	} else if (cmd_match(buf, "want_replacement")) {
+		/* Any non-spare device that is not a replacement can
+		 * become want_replacement at any time, but we then need to
+		 * check if recovery is needed.
+		 */
+		if (rdev->raid_disk >= 0 &&
+		    !test_bit(Replacement, &rdev->flags))
+			set_bit(WantReplacement, &rdev->flags);
+		set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
+		md_wakeup_thread(rdev->mddev->thread);
+		err = 0;
+	} else if (cmd_match(buf, "-want_replacement")) {
+		/* Clearing 'want_replacement' is always allowed.
+		 * Once replacements starts it is too late though.
+		 */
+		err = 0;
+		clear_bit(WantReplacement, &rdev->flags);
+	} else if (cmd_match(buf, "replacement")) {
+		/* Can only set a device as a replacement when array has not
+		 * yet been started.  Once running, replacement is automatic
+		 * from spares, or by assigning 'slot'.
+		 */
+		if (rdev->mddev->pers)
+			err = -EBUSY;
+		else {
+			set_bit(Replacement, &rdev->flags);
+			err = 0;
+		}
+	} else if (cmd_match(buf, "-replacement")) {
+		/* Similarly, can only clear Replacement before start */
+		if (rdev->mddev->pers)
+			err = -EBUSY;
+		else {
+			clear_bit(Replacement, &rdev->flags);
+			err = 0;
+		}
 	}
 	if (!err)
 		sysfs_notify_dirent_safe(rdev->sysfs_state);
@@ -2689,7 +2738,7 @@
 		if (rdev->mddev->pers->hot_remove_disk == NULL)
 			return -EINVAL;
 		err = rdev->mddev->pers->
-			hot_remove_disk(rdev->mddev, rdev->raid_disk);
+			hot_remove_disk(rdev->mddev, rdev);
 		if (err)
 			return err;
 		sysfs_unlink_rdev(rdev->mddev, rdev);
@@ -2697,7 +2746,6 @@
 		set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
 		md_wakeup_thread(rdev->mddev->thread);
 	} else if (rdev->mddev->pers) {
-		struct md_rdev *rdev2;
 		/* Activating a spare .. or possibly reactivating
 		 * if we ever get bitmaps working here.
 		 */
@@ -2711,10 +2759,6 @@
 		if (rdev->mddev->pers->hot_add_disk == NULL)
 			return -EINVAL;
 
-		list_for_each_entry(rdev2, &rdev->mddev->disks, same_set)
-			if (rdev2->raid_disk == slot)
-				return -EEXIST;
-
 		if (slot >= rdev->mddev->raid_disks &&
 		    slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
 			return -ENOSPC;
@@ -6054,8 +6098,15 @@
 	struct mddev *mddev = NULL;
 	int ro;
 
-	if (!capable(CAP_SYS_ADMIN))
-		return -EACCES;
+	switch (cmd) {
+	case RAID_VERSION:
+	case GET_ARRAY_INFO:
+	case GET_DISK_INFO:
+		break;
+	default:
+		if (!capable(CAP_SYS_ADMIN))
+			return -EACCES;
+	}
 
 	/*
 	 * Commands dealing with the RAID driver but not any
@@ -6715,8 +6766,11 @@
 			if (test_bit(Faulty, &rdev->flags)) {
 				seq_printf(seq, "(F)");
 				continue;
-			} else if (rdev->raid_disk < 0)
+			}
+			if (rdev->raid_disk < 0)
 				seq_printf(seq, "(S)"); /* spare */
+			if (test_bit(Replacement, &rdev->flags))
+				seq_printf(seq, "(R)");
 			sectors += rdev->sectors;
 		}
 
@@ -7338,30 +7392,27 @@
 		     ! test_bit(In_sync, &rdev->flags)) &&
 		    atomic_read(&rdev->nr_pending)==0) {
 			if (mddev->pers->hot_remove_disk(
-				    mddev, rdev->raid_disk)==0) {
+				    mddev, rdev) == 0) {
 				sysfs_unlink_rdev(mddev, rdev);
 				rdev->raid_disk = -1;
 			}
 		}
 
-	if (mddev->degraded) {
-		list_for_each_entry(rdev, &mddev->disks, same_set) {
-			if (rdev->raid_disk >= 0 &&
-			    !test_bit(In_sync, &rdev->flags) &&
-			    !test_bit(Faulty, &rdev->flags))
+	list_for_each_entry(rdev, &mddev->disks, same_set) {
+		if (rdev->raid_disk >= 0 &&
+		    !test_bit(In_sync, &rdev->flags) &&
+		    !test_bit(Faulty, &rdev->flags))
+			spares++;
+		if (rdev->raid_disk < 0
+		    && !test_bit(Faulty, &rdev->flags)) {
+			rdev->recovery_offset = 0;
+			if (mddev->pers->
+			    hot_add_disk(mddev, rdev) == 0) {
+				if (sysfs_link_rdev(mddev, rdev))
+					/* failure here is OK */;
 				spares++;
-			if (rdev->raid_disk < 0
-			    && !test_bit(Faulty, &rdev->flags)) {
-				rdev->recovery_offset = 0;
-				if (mddev->pers->
-				    hot_add_disk(mddev, rdev) == 0) {
-					if (sysfs_link_rdev(mddev, rdev))
-						/* failure here is OK */;
-					spares++;
-					md_new_event(mddev);
-					set_bit(MD_CHANGE_DEVS, &mddev->flags);
-				} else
-					break;
+				md_new_event(mddev);
+				set_bit(MD_CHANGE_DEVS, &mddev->flags);
 			}
 		}
 	}
@@ -7476,7 +7527,7 @@
 				    test_bit(Faulty, &rdev->flags) &&
 				    atomic_read(&rdev->nr_pending)==0) {
 					if (mddev->pers->hot_remove_disk(
-						    mddev, rdev->raid_disk)==0) {
+						    mddev, rdev) == 0) {
 						sysfs_unlink_rdev(mddev, rdev);
 						rdev->raid_disk = -1;
 					}
diff --git a/drivers/md/md.h b/drivers/md/md.h
index cf742d9..44c63df 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -72,34 +72,7 @@
 	 * This reduces the burden of testing multiple flags in many cases
 	 */
 
-	unsigned long	flags;
-#define	Faulty		1		/* device is known to have a fault */
-#define	In_sync		2		/* device is in_sync with rest of array */
-#define	WriteMostly	4		/* Avoid reading if at all possible */
-#define	AutoDetected	7		/* added by auto-detect */
-#define Blocked		8		/* An error occurred but has not yet
-					 * been acknowledged by the metadata
-					 * handler, so don't allow writes
-					 * until it is cleared */
-#define WriteErrorSeen	9		/* A write error has been seen on this
-					 * device
-					 */
-#define FaultRecorded	10		/* Intermediate state for clearing
-					 * Blocked.  The Fault is/will-be
-					 * recorded in the metadata, but that
-					 * metadata hasn't been stored safely
-					 * on disk yet.
-					 */
-#define BlockedBadBlocks 11		/* A writer is blocked because they
-					 * found an unacknowledged bad-block.
-					 * This can safely be cleared at any
-					 * time, and the writer will re-check.
-					 * It may be set at any time, and at
-					 * worst the writer will timeout and
-					 * re-check.  So setting it as
-					 * accurately as possible is good, but
-					 * not absolutely critical.
-					 */
+	unsigned long	flags;	/* bit set of 'enum flag_bits' bits. */
 	wait_queue_head_t blocked_wait;
 
 	int desc_nr;			/* descriptor index in the superblock */
@@ -152,6 +125,44 @@
 		sector_t size;		/* in sectors */
 	} badblocks;
 };
+enum flag_bits {
+	Faulty,			/* device is known to have a fault */
+	In_sync,		/* device is in_sync with rest of array */
+	WriteMostly,		/* Avoid reading if at all possible */
+	AutoDetected,		/* added by auto-detect */
+	Blocked,		/* An error occurred but has not yet
+				 * been acknowledged by the metadata
+				 * handler, so don't allow writes
+				 * until it is cleared */
+	WriteErrorSeen,		/* A write error has been seen on this
+				 * device
+				 */
+	FaultRecorded,		/* Intermediate state for clearing
+				 * Blocked.  The Fault is/will-be
+				 * recorded in the metadata, but that
+				 * metadata hasn't been stored safely
+				 * on disk yet.
+				 */
+	BlockedBadBlocks,	/* A writer is blocked because they
+				 * found an unacknowledged bad-block.
+				 * This can safely be cleared at any
+				 * time, and the writer will re-check.
+				 * It may be set at any time, and at
+				 * worst the writer will timeout and
+				 * re-check.  So setting it as
+				 * accurately as possible is good, but
+				 * not absolutely critical.
+				 */
+	WantReplacement,	/* This device is a candidate to be
+				 * hot-replaced, either because it has
+				 * reported some faults, or because
+				 * of explicit request.
+				 */
+	Replacement,		/* This device is a replacement for
+				 * a want_replacement device with same
+				 * raid_disk number.
+				 */
+};
 
 #define BB_LEN_MASK	(0x00000000000001FFULL)
 #define BB_OFFSET_MASK	(0x7FFFFFFFFFFFFE00ULL)
@@ -428,7 +439,7 @@
 	 */
 	void (*error_handler)(struct mddev *mddev, struct md_rdev *rdev);
 	int (*hot_add_disk) (struct mddev *mddev, struct md_rdev *rdev);
-	int (*hot_remove_disk) (struct mddev *mddev, int number);
+	int (*hot_remove_disk) (struct mddev *mddev, struct md_rdev *rdev);
 	int (*spare_active) (struct mddev *mddev);
 	sector_t (*sync_request)(struct mddev *mddev, sector_t sector_nr, int *skipped, int go_faster);
 	int (*resize) (struct mddev *mddev, sector_t sectors);
@@ -482,15 +493,20 @@
 static inline int sysfs_link_rdev(struct mddev *mddev, struct md_rdev *rdev)
 {
 	char nm[20];
-	sprintf(nm, "rd%d", rdev->raid_disk);
-	return sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
+	if (!test_bit(Replacement, &rdev->flags)) {
+		sprintf(nm, "rd%d", rdev->raid_disk);
+		return sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
+	} else
+		return 0;
 }
 
 static inline void sysfs_unlink_rdev(struct mddev *mddev, struct md_rdev *rdev)
 {
 	char nm[20];
-	sprintf(nm, "rd%d", rdev->raid_disk);
-	sysfs_remove_link(&mddev->kobj, nm);
+	if (!test_bit(Replacement, &rdev->flags)) {
+		sprintf(nm, "rd%d", rdev->raid_disk);
+		sysfs_remove_link(&mddev->kobj, nm);
+	}
 }
 
 /*
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 5899246f..a222f51 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -292,17 +292,16 @@
 	return err;
 }
 
-static int multipath_remove_disk(struct mddev *mddev, int number)
+static int multipath_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
 {
 	struct mpconf *conf = mddev->private;
 	int err = 0;
-	struct md_rdev *rdev;
+	int number = rdev->raid_disk;
 	struct multipath_info *p = conf->multipaths + number;
 
 	print_multipath_conf(conf);
 
-	rdev = p->rdev;
-	if (rdev) {
+	if (rdev == p->rdev) {
 		if (test_bit(In_sync, &rdev->flags) ||
 		    atomic_read(&rdev->nr_pending)) {
 			printk(KERN_ERR "hot-remove-disk, slot %d is identified"
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index ede2461..cc24f0c 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -135,7 +135,7 @@
 			put_page(r1_bio->bios[j]->bi_io_vec[i].bv_page);
 	j = -1;
 out_free_bio:
-	while ( ++j < pi->raid_disks )
+	while (++j < pi->raid_disks)
 		bio_put(r1_bio->bios[j]);
 	r1bio_pool_free(r1_bio, data);
 	return NULL;
@@ -164,7 +164,7 @@
 {
 	int i;
 
-	for (i = 0; i < conf->raid_disks; i++) {
+	for (i = 0; i < conf->raid_disks * 2; i++) {
 		struct bio **bio = r1_bio->bios + i;
 		if (!BIO_SPECIAL(*bio))
 			bio_put(*bio);
@@ -185,7 +185,7 @@
 	struct r1conf *conf = r1_bio->mddev->private;
 	int i;
 
-	for (i=0; i<conf->raid_disks; i++) {
+	for (i = 0; i < conf->raid_disks * 2; i++) {
 		struct bio *bio = r1_bio->bios[i];
 		if (bio->bi_end_io)
 			rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
@@ -277,13 +277,14 @@
 static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio)
 {
 	int mirror;
-	int raid_disks = r1_bio->mddev->raid_disks;
+	struct r1conf *conf = r1_bio->mddev->private;
+	int raid_disks = conf->raid_disks;
 
-	for (mirror = 0; mirror < raid_disks; mirror++)
+	for (mirror = 0; mirror < raid_disks * 2; mirror++)
 		if (r1_bio->bios[mirror] == bio)
 			break;
 
-	BUG_ON(mirror == raid_disks);
+	BUG_ON(mirror == raid_disks * 2);
 	update_head_pos(mirror, r1_bio);
 
 	return mirror;
@@ -390,6 +391,11 @@
 	if (!uptodate) {
 		set_bit(WriteErrorSeen,
 			&conf->mirrors[mirror].rdev->flags);
+		if (!test_and_set_bit(WantReplacement,
+				      &conf->mirrors[mirror].rdev->flags))
+			set_bit(MD_RECOVERY_NEEDED, &
+				conf->mddev->recovery);
+
 		set_bit(R1BIO_WriteError, &r1_bio->state);
 	} else {
 		/*
@@ -505,7 +511,7 @@
 		start_disk = conf->last_used;
 	}
 
-	for (i = 0 ; i < conf->raid_disks ; i++) {
+	for (i = 0 ; i < conf->raid_disks * 2 ; i++) {
 		sector_t dist;
 		sector_t first_bad;
 		int bad_sectors;
@@ -609,7 +615,7 @@
 		return 1;
 
 	rcu_read_lock();
-	for (i = 0; i < mddev->raid_disks; i++) {
+	for (i = 0; i < conf->raid_disks; i++) {
 		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
 		if (rdev && !test_bit(Faulty, &rdev->flags)) {
 			struct request_queue *q = bdev_get_queue(rdev->bdev);
@@ -974,7 +980,7 @@
 	 */
 	plugged = mddev_check_plugged(mddev);
 
-	disks = conf->raid_disks;
+	disks = conf->raid_disks * 2;
  retry_write:
 	blocked_rdev = NULL;
 	rcu_read_lock();
@@ -988,7 +994,8 @@
 		}
 		r1_bio->bios[i] = NULL;
 		if (!rdev || test_bit(Faulty, &rdev->flags)) {
-			set_bit(R1BIO_Degraded, &r1_bio->state);
+			if (i < conf->raid_disks)
+				set_bit(R1BIO_Degraded, &r1_bio->state);
 			continue;
 		}
 
@@ -1263,6 +1270,25 @@
 	 */
 	for (i = 0; i < conf->raid_disks; i++) {
 		struct md_rdev *rdev = conf->mirrors[i].rdev;
+		struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
+		if (repl
+		    && repl->recovery_offset == MaxSector
+		    && !test_bit(Faulty, &repl->flags)
+		    && !test_and_set_bit(In_sync, &repl->flags)) {
+			/* replacement has just become active */
+			if (!rdev ||
+			    !test_and_clear_bit(In_sync, &rdev->flags))
+				count++;
+			if (rdev) {
+				/* Replaced device not technically
+				 * faulty, but we need to be sure
+				 * it gets removed and never re-added
+				 */
+				set_bit(Faulty, &rdev->flags);
+				sysfs_notify_dirent_safe(
+					rdev->sysfs_state);
+			}
+		}
 		if (rdev
 		    && !test_bit(Faulty, &rdev->flags)
 		    && !test_and_set_bit(In_sync, &rdev->flags)) {
@@ -1286,7 +1312,7 @@
 	int mirror = 0;
 	struct mirror_info *p;
 	int first = 0;
-	int last = mddev->raid_disks - 1;
+	int last = conf->raid_disks - 1;
 
 	if (mddev->recovery_disabled == conf->recovery_disabled)
 		return -EBUSY;
@@ -1294,8 +1320,9 @@
 	if (rdev->raid_disk >= 0)
 		first = last = rdev->raid_disk;
 
-	for (mirror = first; mirror <= last; mirror++)
-		if ( !(p=conf->mirrors+mirror)->rdev) {
+	for (mirror = first; mirror <= last; mirror++) {
+		p = conf->mirrors+mirror;
+		if (!p->rdev) {
 
 			disk_stack_limits(mddev->gendisk, rdev->bdev,
 					  rdev->data_offset << 9);
@@ -1322,21 +1349,35 @@
 			rcu_assign_pointer(p->rdev, rdev);
 			break;
 		}
+		if (test_bit(WantReplacement, &p->rdev->flags) &&
+		    p[conf->raid_disks].rdev == NULL) {
+			/* Add this device as a replacement */
+			clear_bit(In_sync, &rdev->flags);
+			set_bit(Replacement, &rdev->flags);
+			rdev->raid_disk = mirror;
+			err = 0;
+			conf->fullsync = 1;
+			rcu_assign_pointer(p[conf->raid_disks].rdev, rdev);
+			break;
+		}
+	}
 	md_integrity_add_rdev(rdev, mddev);
 	print_conf(conf);
 	return err;
 }
 
-static int raid1_remove_disk(struct mddev *mddev, int number)
+static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
 {
 	struct r1conf *conf = mddev->private;
 	int err = 0;
-	struct md_rdev *rdev;
+	int number = rdev->raid_disk;
 	struct mirror_info *p = conf->mirrors+ number;
 
+	if (rdev != p->rdev)
+		p = conf->mirrors + conf->raid_disks + number;
+
 	print_conf(conf);
-	rdev = p->rdev;
-	if (rdev) {
+	if (rdev == p->rdev) {
 		if (test_bit(In_sync, &rdev->flags) ||
 		    atomic_read(&rdev->nr_pending)) {
 			err = -EBUSY;
@@ -1358,7 +1399,21 @@
 			err = -EBUSY;
 			p->rdev = rdev;
 			goto abort;
-		}
+		} else if (conf->mirrors[conf->raid_disks + number].rdev) {
+			/* We just removed a device that is being replaced.
+			 * Move down the replacement.  We drain all IO before
+			 * doing this to avoid confusion.
+			 */
+			struct md_rdev *repl =
+				conf->mirrors[conf->raid_disks + number].rdev;
+			raise_barrier(conf);
+			clear_bit(Replacement, &repl->flags);
+			p->rdev = repl;
+			conf->mirrors[conf->raid_disks + number].rdev = NULL;
+			lower_barrier(conf);
+			clear_bit(WantReplacement, &rdev->flags);
+		} else
+			clear_bit(WantReplacement, &rdev->flags);
 		err = md_integrity_register(mddev);
 	}
 abort:
@@ -1411,6 +1466,10 @@
 		} while (sectors_to_go > 0);
 		set_bit(WriteErrorSeen,
 			&conf->mirrors[mirror].rdev->flags);
+		if (!test_and_set_bit(WantReplacement,
+				      &conf->mirrors[mirror].rdev->flags))
+			set_bit(MD_RECOVERY_NEEDED, &
+				mddev->recovery);
 		set_bit(R1BIO_WriteError, &r1_bio->state);
 	} else if (is_badblock(conf->mirrors[mirror].rdev,
 			       r1_bio->sector,
@@ -1441,8 +1500,13 @@
 	if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
 		/* success */
 		return 1;
-	if (rw == WRITE)
+	if (rw == WRITE) {
 		set_bit(WriteErrorSeen, &rdev->flags);
+		if (!test_and_set_bit(WantReplacement,
+				      &rdev->flags))
+			set_bit(MD_RECOVERY_NEEDED, &
+				rdev->mddev->recovery);
+	}
 	/* need to record an error - either for the block or the device */
 	if (!rdev_set_badblocks(rdev, sector, sectors, 0))
 		md_error(rdev->mddev, rdev);
@@ -1493,7 +1557,7 @@
 				}
 			}
 			d++;
-			if (d == conf->raid_disks)
+			if (d == conf->raid_disks * 2)
 				d = 0;
 		} while (!success && d != r1_bio->read_disk);
 
@@ -1510,7 +1574,7 @@
 			       mdname(mddev),
 			       bdevname(bio->bi_bdev, b),
 			       (unsigned long long)r1_bio->sector);
-			for (d = 0; d < conf->raid_disks; d++) {
+			for (d = 0; d < conf->raid_disks * 2; d++) {
 				rdev = conf->mirrors[d].rdev;
 				if (!rdev || test_bit(Faulty, &rdev->flags))
 					continue;
@@ -1536,7 +1600,7 @@
 		/* write it back and re-read */
 		while (d != r1_bio->read_disk) {
 			if (d == 0)
-				d = conf->raid_disks;
+				d = conf->raid_disks * 2;
 			d--;
 			if (r1_bio->bios[d]->bi_end_io != end_sync_read)
 				continue;
@@ -1551,7 +1615,7 @@
 		d = start;
 		while (d != r1_bio->read_disk) {
 			if (d == 0)
-				d = conf->raid_disks;
+				d = conf->raid_disks * 2;
 			d--;
 			if (r1_bio->bios[d]->bi_end_io != end_sync_read)
 				continue;
@@ -1584,7 +1648,7 @@
 	int primary;
 	int i;
 
-	for (primary = 0; primary < conf->raid_disks; primary++)
+	for (primary = 0; primary < conf->raid_disks * 2; primary++)
 		if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
 		    test_bit(BIO_UPTODATE, &r1_bio->bios[primary]->bi_flags)) {
 			r1_bio->bios[primary]->bi_end_io = NULL;
@@ -1592,7 +1656,7 @@
 			break;
 		}
 	r1_bio->read_disk = primary;
-	for (i = 0; i < conf->raid_disks; i++) {
+	for (i = 0; i < conf->raid_disks * 2; i++) {
 		int j;
 		int vcnt = r1_bio->sectors >> (PAGE_SHIFT- 9);
 		struct bio *pbio = r1_bio->bios[primary];
@@ -1656,7 +1720,7 @@
 {
 	struct r1conf *conf = mddev->private;
 	int i;
-	int disks = conf->raid_disks;
+	int disks = conf->raid_disks * 2;
 	struct bio *bio, *wbio;
 
 	bio = r1_bio->bios[r1_bio->read_disk];
@@ -1737,7 +1801,7 @@
 				success = 1;
 			else {
 				d++;
-				if (d == conf->raid_disks)
+				if (d == conf->raid_disks * 2)
 					d = 0;
 			}
 		} while (!success && d != read_disk);
@@ -1753,7 +1817,7 @@
 		start = d;
 		while (d != read_disk) {
 			if (d==0)
-				d = conf->raid_disks;
+				d = conf->raid_disks * 2;
 			d--;
 			rdev = conf->mirrors[d].rdev;
 			if (rdev &&
@@ -1765,7 +1829,7 @@
 		while (d != read_disk) {
 			char b[BDEVNAME_SIZE];
 			if (d==0)
-				d = conf->raid_disks;
+				d = conf->raid_disks * 2;
 			d--;
 			rdev = conf->mirrors[d].rdev;
 			if (rdev &&
@@ -1887,7 +1951,7 @@
 {
 	int m;
 	int s = r1_bio->sectors;
-	for (m = 0; m < conf->raid_disks ; m++) {
+	for (m = 0; m < conf->raid_disks * 2 ; m++) {
 		struct md_rdev *rdev = conf->mirrors[m].rdev;
 		struct bio *bio = r1_bio->bios[m];
 		if (bio->bi_end_io == NULL)
@@ -1909,7 +1973,7 @@
 static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
 {
 	int m;
-	for (m = 0; m < conf->raid_disks ; m++)
+	for (m = 0; m < conf->raid_disks * 2 ; m++)
 		if (r1_bio->bios[m] == IO_MADE_GOOD) {
 			struct md_rdev *rdev = conf->mirrors[m].rdev;
 			rdev_clear_badblocks(rdev,
@@ -2184,7 +2248,7 @@
 	r1_bio->state = 0;
 	set_bit(R1BIO_IsSync, &r1_bio->state);
 
-	for (i=0; i < conf->raid_disks; i++) {
+	for (i = 0; i < conf->raid_disks * 2; i++) {
 		struct md_rdev *rdev;
 		bio = r1_bio->bios[i];
 
@@ -2203,7 +2267,8 @@
 		rdev = rcu_dereference(conf->mirrors[i].rdev);
 		if (rdev == NULL ||
 		    test_bit(Faulty, &rdev->flags)) {
-			still_degraded = 1;
+			if (i < conf->raid_disks)
+				still_degraded = 1;
 		} else if (!test_bit(In_sync, &rdev->flags)) {
 			bio->bi_rw = WRITE;
 			bio->bi_end_io = end_sync_write;
@@ -2254,7 +2319,7 @@
 		 * need to mark them bad on all write targets
 		 */
 		int ok = 1;
-		for (i = 0 ; i < conf->raid_disks ; i++)
+		for (i = 0 ; i < conf->raid_disks * 2 ; i++)
 			if (r1_bio->bios[i]->bi_end_io == end_sync_write) {
 				struct md_rdev *rdev =
 					rcu_dereference(conf->mirrors[i].rdev);
@@ -2323,7 +2388,7 @@
 				len = sync_blocks<<9;
 		}
 
-		for (i=0 ; i < conf->raid_disks; i++) {
+		for (i = 0 ; i < conf->raid_disks * 2; i++) {
 			bio = r1_bio->bios[i];
 			if (bio->bi_end_io) {
 				page = bio->bi_io_vec[bio->bi_vcnt].bv_page;
@@ -2356,7 +2421,7 @@
 	 */
 	if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
 		atomic_set(&r1_bio->remaining, read_targets);
-		for (i=0; i<conf->raid_disks; i++) {
+		for (i = 0; i < conf->raid_disks * 2; i++) {
 			bio = r1_bio->bios[i];
 			if (bio->bi_end_io == end_sync_read) {
 				md_sync_acct(bio->bi_bdev, nr_sectors);
@@ -2393,7 +2458,8 @@
 	if (!conf)
 		goto abort;
 
-	conf->mirrors = kzalloc(sizeof(struct mirror_info)*mddev->raid_disks,
+	conf->mirrors = kzalloc(sizeof(struct mirror_info)
+				* mddev->raid_disks * 2,
 				 GFP_KERNEL);
 	if (!conf->mirrors)
 		goto abort;
@@ -2405,7 +2471,7 @@
 	conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
 	if (!conf->poolinfo)
 		goto abort;
-	conf->poolinfo->raid_disks = mddev->raid_disks;
+	conf->poolinfo->raid_disks = mddev->raid_disks * 2;
 	conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
 					  r1bio_pool_free,
 					  conf->poolinfo);
@@ -2414,14 +2480,20 @@
 
 	conf->poolinfo->mddev = mddev;
 
+	err = -EINVAL;
 	spin_lock_init(&conf->device_lock);
 	list_for_each_entry(rdev, &mddev->disks, same_set) {
 		int disk_idx = rdev->raid_disk;
 		if (disk_idx >= mddev->raid_disks
 		    || disk_idx < 0)
 			continue;
-		disk = conf->mirrors + disk_idx;
+		if (test_bit(Replacement, &rdev->flags))
+			disk = conf->mirrors + conf->raid_disks + disk_idx;
+		else
+			disk = conf->mirrors + disk_idx;
 
+		if (disk->rdev)
+			goto abort;
 		disk->rdev = rdev;
 
 		disk->head_position = 0;
@@ -2437,11 +2509,27 @@
 	conf->pending_count = 0;
 	conf->recovery_disabled = mddev->recovery_disabled - 1;
 
+	err = -EIO;
 	conf->last_used = -1;
-	for (i = 0; i < conf->raid_disks; i++) {
+	for (i = 0; i < conf->raid_disks * 2; i++) {
 
 		disk = conf->mirrors + i;
 
+		if (i < conf->raid_disks &&
+		    disk[conf->raid_disks].rdev) {
+			/* This slot has a replacement. */
+			if (!disk->rdev) {
+				/* No original, just make the replacement
+				 * a recovering spare
+				 */
+				disk->rdev =
+					disk[conf->raid_disks].rdev;
+				disk[conf->raid_disks].rdev = NULL;
+			} else if (!test_bit(In_sync, &disk->rdev->flags))
+				/* Original is not in_sync - bad */
+				goto abort;
+		}
+
 		if (!disk->rdev ||
 		    !test_bit(In_sync, &disk->rdev->flags)) {
 			disk->head_position = 0;
@@ -2455,7 +2543,6 @@
 			conf->last_used = i;
 	}
 
-	err = -EIO;
 	if (conf->last_used < 0) {
 		printk(KERN_ERR "md/raid1:%s: no operational mirrors\n",
 		       mdname(mddev));
@@ -2665,7 +2752,7 @@
 	if (!newpoolinfo)
 		return -ENOMEM;
 	newpoolinfo->mddev = mddev;
-	newpoolinfo->raid_disks = raid_disks;
+	newpoolinfo->raid_disks = raid_disks * 2;
 
 	newpool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
 				 r1bio_pool_free, newpoolinfo);
@@ -2673,7 +2760,8 @@
 		kfree(newpoolinfo);
 		return -ENOMEM;
 	}
-	newmirrors = kzalloc(sizeof(struct mirror_info) * raid_disks, GFP_KERNEL);
+	newmirrors = kzalloc(sizeof(struct mirror_info) * raid_disks * 2,
+			     GFP_KERNEL);
 	if (!newmirrors) {
 		kfree(newpoolinfo);
 		mempool_destroy(newpool);
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
index c732b6c..80ded13 100644
--- a/drivers/md/raid1.h
+++ b/drivers/md/raid1.h
@@ -12,6 +12,9 @@
  * pool was allocated for, so they know how much to allocate and free.
  * mddev->raid_disks cannot be used, as it can change while a pool is active
  * These two datums are stored in a kmalloced struct.
+ * The 'raid_disks' here is twice the raid_disks in r1conf.
+ * This allows space for each 'real' device can have a replacement in the
+ * second half of the array.
  */
 
 struct pool_info {
@@ -21,7 +24,9 @@
 
 struct r1conf {
 	struct mddev		*mddev;
-	struct mirror_info		*mirrors;
+	struct mirror_info	*mirrors;	/* twice 'raid_disks' to
+						 * allow for replacements.
+						 */
 	int			raid_disks;
 
 	/* When choose the best device for a read (read_balance())
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 685ddf3..6e8aa21 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -73,7 +73,8 @@
 	struct r10conf *conf = data;
 	int size = offsetof(struct r10bio, devs[conf->copies]);
 
-	/* allocate a r10bio with room for raid_disks entries in the bios array */
+	/* allocate a r10bio with room for raid_disks entries in the
+	 * bios array */
 	return kzalloc(size, gfp_flags);
 }
 
@@ -123,12 +124,19 @@
 		if (!bio)
 			goto out_free_bio;
 		r10_bio->devs[j].bio = bio;
+		if (!conf->have_replacement)
+			continue;
+		bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
+		if (!bio)
+			goto out_free_bio;
+		r10_bio->devs[j].repl_bio = bio;
 	}
 	/*
 	 * Allocate RESYNC_PAGES data pages and attach them
 	 * where needed.
 	 */
 	for (j = 0 ; j < nalloc; j++) {
+		struct bio *rbio = r10_bio->devs[j].repl_bio;
 		bio = r10_bio->devs[j].bio;
 		for (i = 0; i < RESYNC_PAGES; i++) {
 			if (j == 1 && !test_bit(MD_RECOVERY_SYNC,
@@ -143,6 +151,8 @@
 				goto out_free_pages;
 
 			bio->bi_io_vec[i].bv_page = page;
+			if (rbio)
+				rbio->bi_io_vec[i].bv_page = page;
 		}
 	}
 
@@ -156,8 +166,11 @@
 			safe_put_page(r10_bio->devs[j].bio->bi_io_vec[i].bv_page);
 	j = -1;
 out_free_bio:
-	while ( ++j < nalloc )
+	while (++j < nalloc) {
 		bio_put(r10_bio->devs[j].bio);
+		if (r10_bio->devs[j].repl_bio)
+			bio_put(r10_bio->devs[j].repl_bio);
+	}
 	r10bio_pool_free(r10_bio, conf);
 	return NULL;
 }
@@ -178,6 +191,9 @@
 			}
 			bio_put(bio);
 		}
+		bio = r10bio->devs[j].repl_bio;
+		if (bio)
+			bio_put(bio);
 	}
 	r10bio_pool_free(r10bio, conf);
 }
@@ -191,6 +207,10 @@
 		if (!BIO_SPECIAL(*bio))
 			bio_put(*bio);
 		*bio = NULL;
+		bio = &r10_bio->devs[i].repl_bio;
+		if (r10_bio->read_slot < 0 && !BIO_SPECIAL(*bio))
+			bio_put(*bio);
+		*bio = NULL;
 	}
 }
 
@@ -275,19 +295,27 @@
  * Find the disk number which triggered given bio
  */
 static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio,
-			 struct bio *bio, int *slotp)
+			 struct bio *bio, int *slotp, int *replp)
 {
 	int slot;
+	int repl = 0;
 
-	for (slot = 0; slot < conf->copies; slot++)
+	for (slot = 0; slot < conf->copies; slot++) {
 		if (r10_bio->devs[slot].bio == bio)
 			break;
+		if (r10_bio->devs[slot].repl_bio == bio) {
+			repl = 1;
+			break;
+		}
+	}
 
 	BUG_ON(slot == conf->copies);
 	update_head_pos(slot, r10_bio);
 
 	if (slotp)
 		*slotp = slot;
+	if (replp)
+		*replp = repl;
 	return r10_bio->devs[slot].devnum;
 }
 
@@ -296,11 +324,13 @@
 	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
 	struct r10bio *r10_bio = bio->bi_private;
 	int slot, dev;
+	struct md_rdev *rdev;
 	struct r10conf *conf = r10_bio->mddev->private;
 
 
 	slot = r10_bio->read_slot;
 	dev = r10_bio->devs[slot].devnum;
+	rdev = r10_bio->devs[slot].rdev;
 	/*
 	 * this branch is our 'one mirror IO has finished' event handler:
 	 */
@@ -318,7 +348,7 @@
 		 */
 		set_bit(R10BIO_Uptodate, &r10_bio->state);
 		raid_end_bio_io(r10_bio);
-		rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
+		rdev_dec_pending(rdev, conf->mddev);
 	} else {
 		/*
 		 * oops, read error - keep the refcount on the rdev
@@ -327,7 +357,7 @@
 		printk_ratelimited(KERN_ERR
 				   "md/raid10:%s: %s: rescheduling sector %llu\n",
 				   mdname(conf->mddev),
-				   bdevname(conf->mirrors[dev].rdev->bdev, b),
+				   bdevname(rdev->bdev, b),
 				   (unsigned long long)r10_bio->sector);
 		set_bit(R10BIO_ReadError, &r10_bio->state);
 		reschedule_retry(r10_bio);
@@ -366,17 +396,35 @@
 	int dev;
 	int dec_rdev = 1;
 	struct r10conf *conf = r10_bio->mddev->private;
-	int slot;
+	int slot, repl;
+	struct md_rdev *rdev = NULL;
 
-	dev = find_bio_disk(conf, r10_bio, bio, &slot);
+	dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
 
+	if (repl)
+		rdev = conf->mirrors[dev].replacement;
+	if (!rdev) {
+		smp_rmb();
+		repl = 0;
+		rdev = conf->mirrors[dev].rdev;
+	}
 	/*
 	 * this branch is our 'one mirror IO has finished' event handler:
 	 */
 	if (!uptodate) {
-		set_bit(WriteErrorSeen,	&conf->mirrors[dev].rdev->flags);
-		set_bit(R10BIO_WriteError, &r10_bio->state);
-		dec_rdev = 0;
+		if (repl)
+			/* Never record new bad blocks to replacement,
+			 * just fail it.
+			 */
+			md_error(rdev->mddev, rdev);
+		else {
+			set_bit(WriteErrorSeen,	&rdev->flags);
+			if (!test_and_set_bit(WantReplacement, &rdev->flags))
+				set_bit(MD_RECOVERY_NEEDED,
+					&rdev->mddev->recovery);
+			set_bit(R10BIO_WriteError, &r10_bio->state);
+			dec_rdev = 0;
+		}
 	} else {
 		/*
 		 * Set R10BIO_Uptodate in our master bio, so that
@@ -393,12 +441,15 @@
 		set_bit(R10BIO_Uptodate, &r10_bio->state);
 
 		/* Maybe we can clear some bad blocks. */
-		if (is_badblock(conf->mirrors[dev].rdev,
+		if (is_badblock(rdev,
 				r10_bio->devs[slot].addr,
 				r10_bio->sectors,
 				&first_bad, &bad_sectors)) {
 			bio_put(bio);
-			r10_bio->devs[slot].bio = IO_MADE_GOOD;
+			if (repl)
+				r10_bio->devs[slot].repl_bio = IO_MADE_GOOD;
+			else
+				r10_bio->devs[slot].bio = IO_MADE_GOOD;
 			dec_rdev = 0;
 			set_bit(R10BIO_MadeGood, &r10_bio->state);
 		}
@@ -414,7 +465,6 @@
 		rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
 }
 
-
 /*
  * RAID10 layout manager
  * As well as the chunksize and raid_disks count, there are two
@@ -562,14 +612,16 @@
  * FIXME: possibly should rethink readbalancing and do it differently
  * depending on near_copies / far_copies geometry.
  */
-static int read_balance(struct r10conf *conf, struct r10bio *r10_bio, int *max_sectors)
+static struct md_rdev *read_balance(struct r10conf *conf,
+				    struct r10bio *r10_bio,
+				    int *max_sectors)
 {
 	const sector_t this_sector = r10_bio->sector;
 	int disk, slot;
 	int sectors = r10_bio->sectors;
 	int best_good_sectors;
 	sector_t new_distance, best_dist;
-	struct md_rdev *rdev;
+	struct md_rdev *rdev, *best_rdev;
 	int do_balance;
 	int best_slot;
 
@@ -578,6 +630,7 @@
 retry:
 	sectors = r10_bio->sectors;
 	best_slot = -1;
+	best_rdev = NULL;
 	best_dist = MaxSector;
 	best_good_sectors = 0;
 	do_balance = 1;
@@ -599,10 +652,16 @@
 		if (r10_bio->devs[slot].bio == IO_BLOCKED)
 			continue;
 		disk = r10_bio->devs[slot].devnum;
-		rdev = rcu_dereference(conf->mirrors[disk].rdev);
+		rdev = rcu_dereference(conf->mirrors[disk].replacement);
+		if (rdev == NULL || test_bit(Faulty, &rdev->flags) ||
+		    r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
+			rdev = rcu_dereference(conf->mirrors[disk].rdev);
 		if (rdev == NULL)
 			continue;
-		if (!test_bit(In_sync, &rdev->flags))
+		if (test_bit(Faulty, &rdev->flags))
+			continue;
+		if (!test_bit(In_sync, &rdev->flags) &&
+		    r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
 			continue;
 
 		dev_sector = r10_bio->devs[slot].addr;
@@ -627,6 +686,7 @@
 				if (good_sectors > best_good_sectors) {
 					best_good_sectors = good_sectors;
 					best_slot = slot;
+					best_rdev = rdev;
 				}
 				if (!do_balance)
 					/* Must read from here */
@@ -655,16 +715,15 @@
 		if (new_distance < best_dist) {
 			best_dist = new_distance;
 			best_slot = slot;
+			best_rdev = rdev;
 		}
 	}
-	if (slot == conf->copies)
+	if (slot >= conf->copies) {
 		slot = best_slot;
+		rdev = best_rdev;
+	}
 
 	if (slot >= 0) {
-		disk = r10_bio->devs[slot].devnum;
-		rdev = rcu_dereference(conf->mirrors[disk].rdev);
-		if (!rdev)
-			goto retry;
 		atomic_inc(&rdev->nr_pending);
 		if (test_bit(Faulty, &rdev->flags)) {
 			/* Cannot risk returning a device that failed
@@ -675,11 +734,11 @@
 		}
 		r10_bio->read_slot = slot;
 	} else
-		disk = -1;
+		rdev = NULL;
 	rcu_read_unlock();
 	*max_sectors = best_good_sectors;
 
-	return disk;
+	return rdev;
 }
 
 static int raid10_congested(void *data, int bits)
@@ -846,7 +905,6 @@
 static void make_request(struct mddev *mddev, struct bio * bio)
 {
 	struct r10conf *conf = mddev->private;
-	struct mirror_info *mirror;
 	struct r10bio *r10_bio;
 	struct bio *read_bio;
 	int i;
@@ -945,27 +1003,27 @@
 		/*
 		 * read balancing logic:
 		 */
-		int disk;
+		struct md_rdev *rdev;
 		int slot;
 
 read_again:
-		disk = read_balance(conf, r10_bio, &max_sectors);
-		slot = r10_bio->read_slot;
-		if (disk < 0) {
+		rdev = read_balance(conf, r10_bio, &max_sectors);
+		if (!rdev) {
 			raid_end_bio_io(r10_bio);
 			return;
 		}
-		mirror = conf->mirrors + disk;
+		slot = r10_bio->read_slot;
 
 		read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
 		md_trim_bio(read_bio, r10_bio->sector - bio->bi_sector,
 			    max_sectors);
 
 		r10_bio->devs[slot].bio = read_bio;
+		r10_bio->devs[slot].rdev = rdev;
 
 		read_bio->bi_sector = r10_bio->devs[slot].addr +
-			mirror->rdev->data_offset;
-		read_bio->bi_bdev = mirror->rdev->bdev;
+			rdev->data_offset;
+		read_bio->bi_bdev = rdev->bdev;
 		read_bio->bi_end_io = raid10_end_read_request;
 		read_bio->bi_rw = READ | do_sync;
 		read_bio->bi_private = r10_bio;
@@ -1025,6 +1083,7 @@
 	 */
 	plugged = mddev_check_plugged(mddev);
 
+	r10_bio->read_slot = -1; /* make sure repl_bio gets freed */
 	raid10_find_phys(conf, r10_bio);
 retry_write:
 	blocked_rdev = NULL;
@@ -1034,12 +1093,25 @@
 	for (i = 0;  i < conf->copies; i++) {
 		int d = r10_bio->devs[i].devnum;
 		struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
+		struct md_rdev *rrdev = rcu_dereference(
+			conf->mirrors[d].replacement);
+		if (rdev == rrdev)
+			rrdev = NULL;
 		if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
 			atomic_inc(&rdev->nr_pending);
 			blocked_rdev = rdev;
 			break;
 		}
+		if (rrdev && unlikely(test_bit(Blocked, &rrdev->flags))) {
+			atomic_inc(&rrdev->nr_pending);
+			blocked_rdev = rrdev;
+			break;
+		}
+		if (rrdev && test_bit(Faulty, &rrdev->flags))
+			rrdev = NULL;
+
 		r10_bio->devs[i].bio = NULL;
+		r10_bio->devs[i].repl_bio = NULL;
 		if (!rdev || test_bit(Faulty, &rdev->flags)) {
 			set_bit(R10BIO_Degraded, &r10_bio->state);
 			continue;
@@ -1088,6 +1160,10 @@
 		}
 		r10_bio->devs[i].bio = bio;
 		atomic_inc(&rdev->nr_pending);
+		if (rrdev) {
+			r10_bio->devs[i].repl_bio = bio;
+			atomic_inc(&rrdev->nr_pending);
+		}
 	}
 	rcu_read_unlock();
 
@@ -1096,11 +1172,23 @@
 		int j;
 		int d;
 
-		for (j = 0; j < i; j++)
+		for (j = 0; j < i; j++) {
 			if (r10_bio->devs[j].bio) {
 				d = r10_bio->devs[j].devnum;
 				rdev_dec_pending(conf->mirrors[d].rdev, mddev);
 			}
+			if (r10_bio->devs[j].repl_bio) {
+				struct md_rdev *rdev;
+				d = r10_bio->devs[j].devnum;
+				rdev = conf->mirrors[d].replacement;
+				if (!rdev) {
+					/* Race with remove_disk */
+					smp_mb();
+					rdev = conf->mirrors[d].rdev;
+				}
+				rdev_dec_pending(rdev, mddev);
+			}
+		}
 		allow_barrier(conf);
 		md_wait_for_blocked_rdev(blocked_rdev, mddev);
 		wait_barrier(conf);
@@ -1147,6 +1235,31 @@
 		bio_list_add(&conf->pending_bio_list, mbio);
 		conf->pending_count++;
 		spin_unlock_irqrestore(&conf->device_lock, flags);
+
+		if (!r10_bio->devs[i].repl_bio)
+			continue;
+
+		mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
+		md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
+			    max_sectors);
+		r10_bio->devs[i].repl_bio = mbio;
+
+		/* We are actively writing to the original device
+		 * so it cannot disappear, so the replacement cannot
+		 * become NULL here
+		 */
+		mbio->bi_sector	= (r10_bio->devs[i].addr+
+				   conf->mirrors[d].replacement->data_offset);
+		mbio->bi_bdev = conf->mirrors[d].replacement->bdev;
+		mbio->bi_end_io	= raid10_end_write_request;
+		mbio->bi_rw = WRITE | do_sync | do_fua;
+		mbio->bi_private = r10_bio;
+
+		atomic_inc(&r10_bio->remaining);
+		spin_lock_irqsave(&conf->device_lock, flags);
+		bio_list_add(&conf->pending_bio_list, mbio);
+		conf->pending_count++;
+		spin_unlock_irqrestore(&conf->device_lock, flags);
 	}
 
 	/* Don't remove the bias on 'remaining' (one_write_done) until
@@ -1309,9 +1422,27 @@
 	 */
 	for (i = 0; i < conf->raid_disks; i++) {
 		tmp = conf->mirrors + i;
-		if (tmp->rdev
-		    && !test_bit(Faulty, &tmp->rdev->flags)
-		    && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
+		if (tmp->replacement
+		    && tmp->replacement->recovery_offset == MaxSector
+		    && !test_bit(Faulty, &tmp->replacement->flags)
+		    && !test_and_set_bit(In_sync, &tmp->replacement->flags)) {
+			/* Replacement has just become active */
+			if (!tmp->rdev
+			    || !test_and_clear_bit(In_sync, &tmp->rdev->flags))
+				count++;
+			if (tmp->rdev) {
+				/* Replaced device not technically faulty,
+				 * but we need to be sure it gets removed
+				 * and never re-added.
+				 */
+				set_bit(Faulty, &tmp->rdev->flags);
+				sysfs_notify_dirent_safe(
+					tmp->rdev->sysfs_state);
+			}
+			sysfs_notify_dirent_safe(tmp->replacement->sysfs_state);
+		} else if (tmp->rdev
+			   && !test_bit(Faulty, &tmp->rdev->flags)
+			   && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
 			count++;
 			sysfs_notify_dirent(tmp->rdev->sysfs_state);
 		}
@@ -1353,8 +1484,25 @@
 		struct mirror_info *p = &conf->mirrors[mirror];
 		if (p->recovery_disabled == mddev->recovery_disabled)
 			continue;
-		if (p->rdev)
-			continue;
+		if (p->rdev) {
+			if (!test_bit(WantReplacement, &p->rdev->flags) ||
+			    p->replacement != NULL)
+				continue;
+			clear_bit(In_sync, &rdev->flags);
+			set_bit(Replacement, &rdev->flags);
+			rdev->raid_disk = mirror;
+			err = 0;
+			disk_stack_limits(mddev->gendisk, rdev->bdev,
+					  rdev->data_offset << 9);
+			if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
+				blk_queue_max_segments(mddev->queue, 1);
+				blk_queue_segment_boundary(mddev->queue,
+							   PAGE_CACHE_SIZE - 1);
+			}
+			conf->fullsync = 1;
+			rcu_assign_pointer(p->replacement, rdev);
+			break;
+		}
 
 		disk_stack_limits(mddev->gendisk, rdev->bdev,
 				  rdev->data_offset << 9);
@@ -1385,40 +1533,61 @@
 	return err;
 }
 
-static int raid10_remove_disk(struct mddev *mddev, int number)
+static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
 {
 	struct r10conf *conf = mddev->private;
 	int err = 0;
-	struct md_rdev *rdev;
-	struct mirror_info *p = conf->mirrors+ number;
+	int number = rdev->raid_disk;
+	struct md_rdev **rdevp;
+	struct mirror_info *p = conf->mirrors + number;
 
 	print_conf(conf);
-	rdev = p->rdev;
-	if (rdev) {
-		if (test_bit(In_sync, &rdev->flags) ||
-		    atomic_read(&rdev->nr_pending)) {
-			err = -EBUSY;
-			goto abort;
-		}
-		/* Only remove faulty devices in recovery
-		 * is not possible.
-		 */
-		if (!test_bit(Faulty, &rdev->flags) &&
-		    mddev->recovery_disabled != p->recovery_disabled &&
-		    enough(conf, -1)) {
-			err = -EBUSY;
-			goto abort;
-		}
-		p->rdev = NULL;
-		synchronize_rcu();
-		if (atomic_read(&rdev->nr_pending)) {
-			/* lost the race, try later */
-			err = -EBUSY;
-			p->rdev = rdev;
-			goto abort;
-		}
-		err = md_integrity_register(mddev);
+	if (rdev == p->rdev)
+		rdevp = &p->rdev;
+	else if (rdev == p->replacement)
+		rdevp = &p->replacement;
+	else
+		return 0;
+
+	if (test_bit(In_sync, &rdev->flags) ||
+	    atomic_read(&rdev->nr_pending)) {
+		err = -EBUSY;
+		goto abort;
 	}
+	/* Only remove faulty devices if recovery
+	 * is not possible.
+	 */
+	if (!test_bit(Faulty, &rdev->flags) &&
+	    mddev->recovery_disabled != p->recovery_disabled &&
+	    (!p->replacement || p->replacement == rdev) &&
+	    enough(conf, -1)) {
+		err = -EBUSY;
+		goto abort;
+	}
+	*rdevp = NULL;
+	synchronize_rcu();
+	if (atomic_read(&rdev->nr_pending)) {
+		/* lost the race, try later */
+		err = -EBUSY;
+		*rdevp = rdev;
+		goto abort;
+	} else if (p->replacement) {
+		/* We must have just cleared 'rdev' */
+		p->rdev = p->replacement;
+		clear_bit(Replacement, &p->replacement->flags);
+		smp_mb(); /* Make sure other CPUs may see both as identical
+			   * but will never see neither -- if they are careful.
+			   */
+		p->replacement = NULL;
+		clear_bit(WantReplacement, &rdev->flags);
+	} else
+		/* We might have just remove the Replacement as faulty
+		 * Clear the flag just in case
+		 */
+		clear_bit(WantReplacement, &rdev->flags);
+
+	err = md_integrity_register(mddev);
+
 abort:
 
 	print_conf(conf);
@@ -1432,7 +1601,7 @@
 	struct r10conf *conf = r10_bio->mddev->private;
 	int d;
 
-	d = find_bio_disk(conf, r10_bio, bio, NULL);
+	d = find_bio_disk(conf, r10_bio, bio, NULL, NULL);
 
 	if (test_bit(BIO_UPTODATE, &bio->bi_flags))
 		set_bit(R10BIO_Uptodate, &r10_bio->state);
@@ -1493,19 +1662,34 @@
 	sector_t first_bad;
 	int bad_sectors;
 	int slot;
+	int repl;
+	struct md_rdev *rdev = NULL;
 
-	d = find_bio_disk(conf, r10_bio, bio, &slot);
+	d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
+	if (repl)
+		rdev = conf->mirrors[d].replacement;
+	if (!rdev) {
+		smp_mb();
+		rdev = conf->mirrors[d].rdev;
+	}
 
 	if (!uptodate) {
-		set_bit(WriteErrorSeen, &conf->mirrors[d].rdev->flags);
-		set_bit(R10BIO_WriteError, &r10_bio->state);
-	} else if (is_badblock(conf->mirrors[d].rdev,
+		if (repl)
+			md_error(mddev, rdev);
+		else {
+			set_bit(WriteErrorSeen, &rdev->flags);
+			if (!test_and_set_bit(WantReplacement, &rdev->flags))
+				set_bit(MD_RECOVERY_NEEDED,
+					&rdev->mddev->recovery);
+			set_bit(R10BIO_WriteError, &r10_bio->state);
+		}
+	} else if (is_badblock(rdev,
 			     r10_bio->devs[slot].addr,
 			     r10_bio->sectors,
 			     &first_bad, &bad_sectors))
 		set_bit(R10BIO_MadeGood, &r10_bio->state);
 
-	rdev_dec_pending(conf->mirrors[d].rdev, mddev);
+	rdev_dec_pending(rdev, mddev);
 
 	end_sync_request(r10_bio);
 }
@@ -1609,6 +1793,29 @@
 		generic_make_request(tbio);
 	}
 
+	/* Now write out to any replacement devices
+	 * that are active
+	 */
+	for (i = 0; i < conf->copies; i++) {
+		int j, d;
+		int vcnt = r10_bio->sectors >> (PAGE_SHIFT-9);
+
+		tbio = r10_bio->devs[i].repl_bio;
+		if (!tbio || !tbio->bi_end_io)
+			continue;
+		if (r10_bio->devs[i].bio->bi_end_io != end_sync_write
+		    && r10_bio->devs[i].bio != fbio)
+			for (j = 0; j < vcnt; j++)
+				memcpy(page_address(tbio->bi_io_vec[j].bv_page),
+				       page_address(fbio->bi_io_vec[j].bv_page),
+				       PAGE_SIZE);
+		d = r10_bio->devs[i].devnum;
+		atomic_inc(&r10_bio->remaining);
+		md_sync_acct(conf->mirrors[d].replacement->bdev,
+			     tbio->bi_size >> 9);
+		generic_make_request(tbio);
+	}
+
 done:
 	if (atomic_dec_and_test(&r10_bio->remaining)) {
 		md_done_sync(mddev, r10_bio->sectors, 1);
@@ -1668,8 +1875,13 @@
 					  s << 9,
 					  bio->bi_io_vec[idx].bv_page,
 					  WRITE, false);
-			if (!ok)
+			if (!ok) {
 				set_bit(WriteErrorSeen, &rdev->flags);
+				if (!test_and_set_bit(WantReplacement,
+						      &rdev->flags))
+					set_bit(MD_RECOVERY_NEEDED,
+						&rdev->mddev->recovery);
+			}
 		}
 		if (!ok) {
 			/* We don't worry if we cannot set a bad block -
@@ -1709,7 +1921,7 @@
 {
 	struct r10conf *conf = mddev->private;
 	int d;
-	struct bio *wbio;
+	struct bio *wbio, *wbio2;
 
 	if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) {
 		fix_recovery_read_error(r10_bio);
@@ -1721,12 +1933,20 @@
 	 * share the pages with the first bio
 	 * and submit the write request
 	 */
-	wbio = r10_bio->devs[1].bio;
 	d = r10_bio->devs[1].devnum;
-
-	atomic_inc(&conf->mirrors[d].rdev->nr_pending);
-	md_sync_acct(conf->mirrors[d].rdev->bdev, wbio->bi_size >> 9);
-	generic_make_request(wbio);
+	wbio = r10_bio->devs[1].bio;
+	wbio2 = r10_bio->devs[1].repl_bio;
+	if (wbio->bi_end_io) {
+		atomic_inc(&conf->mirrors[d].rdev->nr_pending);
+		md_sync_acct(conf->mirrors[d].rdev->bdev, wbio->bi_size >> 9);
+		generic_make_request(wbio);
+	}
+	if (wbio2 && wbio2->bi_end_io) {
+		atomic_inc(&conf->mirrors[d].replacement->nr_pending);
+		md_sync_acct(conf->mirrors[d].replacement->bdev,
+			     wbio2->bi_size >> 9);
+		generic_make_request(wbio2);
+	}
 }
 
 
@@ -1779,8 +1999,12 @@
 	if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
 		/* success */
 		return 1;
-	if (rw == WRITE)
+	if (rw == WRITE) {
 		set_bit(WriteErrorSeen, &rdev->flags);
+		if (!test_and_set_bit(WantReplacement, &rdev->flags))
+			set_bit(MD_RECOVERY_NEEDED,
+				&rdev->mddev->recovery);
+	}
 	/* need to record an error - either for the block or the device */
 	if (!rdev_set_badblocks(rdev, sector, sectors, 0))
 		md_error(rdev->mddev, rdev);
@@ -2060,10 +2284,9 @@
 static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
 {
 	int slot = r10_bio->read_slot;
-	int mirror = r10_bio->devs[slot].devnum;
 	struct bio *bio;
 	struct r10conf *conf = mddev->private;
-	struct md_rdev *rdev;
+	struct md_rdev *rdev = r10_bio->devs[slot].rdev;
 	char b[BDEVNAME_SIZE];
 	unsigned long do_sync;
 	int max_sectors;
@@ -2081,15 +2304,15 @@
 		fix_read_error(conf, mddev, r10_bio);
 		unfreeze_array(conf);
 	}
-	rdev_dec_pending(conf->mirrors[mirror].rdev, mddev);
+	rdev_dec_pending(rdev, mddev);
 
 	bio = r10_bio->devs[slot].bio;
 	bdevname(bio->bi_bdev, b);
 	r10_bio->devs[slot].bio =
 		mddev->ro ? IO_BLOCKED : NULL;
 read_more:
-	mirror = read_balance(conf, r10_bio, &max_sectors);
-	if (mirror == -1) {
+	rdev = read_balance(conf, r10_bio, &max_sectors);
+	if (rdev == NULL) {
 		printk(KERN_ALERT "md/raid10:%s: %s: unrecoverable I/O"
 		       " read error for block %llu\n",
 		       mdname(mddev), b,
@@ -2103,7 +2326,6 @@
 	if (bio)
 		bio_put(bio);
 	slot = r10_bio->read_slot;
-	rdev = conf->mirrors[mirror].rdev;
 	printk_ratelimited(
 		KERN_ERR
 		"md/raid10:%s: %s: redirecting"
@@ -2117,6 +2339,7 @@
 		    r10_bio->sector - bio->bi_sector,
 		    max_sectors);
 	r10_bio->devs[slot].bio = bio;
+	r10_bio->devs[slot].rdev = rdev;
 	bio->bi_sector = r10_bio->devs[slot].addr
 		+ rdev->data_offset;
 	bio->bi_bdev = rdev->bdev;
@@ -2187,6 +2410,22 @@
 					    r10_bio->sectors, 0))
 					md_error(conf->mddev, rdev);
 			}
+			rdev = conf->mirrors[dev].replacement;
+			if (r10_bio->devs[m].repl_bio == NULL)
+				continue;
+			if (test_bit(BIO_UPTODATE,
+				     &r10_bio->devs[m].repl_bio->bi_flags)) {
+				rdev_clear_badblocks(
+					rdev,
+					r10_bio->devs[m].addr,
+					r10_bio->sectors);
+			} else {
+				if (!rdev_set_badblocks(
+					    rdev,
+					    r10_bio->devs[m].addr,
+					    r10_bio->sectors, 0))
+					md_error(conf->mddev, rdev);
+			}
 		}
 		put_buf(r10_bio);
 	} else {
@@ -2209,6 +2448,15 @@
 				}
 				rdev_dec_pending(rdev, conf->mddev);
 			}
+			bio = r10_bio->devs[m].repl_bio;
+			rdev = conf->mirrors[dev].replacement;
+			if (rdev && bio == IO_MADE_GOOD) {
+				rdev_clear_badblocks(
+					rdev,
+					r10_bio->devs[m].addr,
+					r10_bio->sectors);
+				rdev_dec_pending(rdev, conf->mddev);
+			}
 		}
 		if (test_bit(R10BIO_WriteError,
 			     &r10_bio->state))
@@ -2272,9 +2520,14 @@
 static int init_resync(struct r10conf *conf)
 {
 	int buffs;
+	int i;
 
 	buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
 	BUG_ON(conf->r10buf_pool);
+	conf->have_replacement = 0;
+	for (i = 0; i < conf->raid_disks; i++)
+		if (conf->mirrors[i].replacement)
+			conf->have_replacement = 1;
 	conf->r10buf_pool = mempool_create(buffs, r10buf_pool_alloc, r10buf_pool_free, conf);
 	if (!conf->r10buf_pool)
 		return -ENOMEM;
@@ -2355,9 +2608,22 @@
 				bitmap_end_sync(mddev->bitmap, sect,
 						&sync_blocks, 1);
 			}
-		} else /* completed sync */
+		} else {
+			/* completed sync */
+			if ((!mddev->bitmap || conf->fullsync)
+			    && conf->have_replacement
+			    && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
+				/* Completed a full sync so the replacements
+				 * are now fully recovered.
+				 */
+				for (i = 0; i < conf->raid_disks; i++)
+					if (conf->mirrors[i].replacement)
+						conf->mirrors[i].replacement
+							->recovery_offset
+							= MaxSector;
+			}
 			conf->fullsync = 0;
-
+		}
 		bitmap_close_sync(mddev->bitmap);
 		close_sync(conf);
 		*skipped = 1;
@@ -2414,23 +2680,30 @@
 			sector_t sect;
 			int must_sync;
 			int any_working;
+			struct mirror_info *mirror = &conf->mirrors[i];
 
-			if (conf->mirrors[i].rdev == NULL ||
-			    test_bit(In_sync, &conf->mirrors[i].rdev->flags)) 
+			if ((mirror->rdev == NULL ||
+			     test_bit(In_sync, &mirror->rdev->flags))
+			    &&
+			    (mirror->replacement == NULL ||
+			     test_bit(Faulty,
+				      &mirror->replacement->flags)))
 				continue;
 
 			still_degraded = 0;
 			/* want to reconstruct this device */
 			rb2 = r10_bio;
 			sect = raid10_find_virt(conf, sector_nr, i);
-			/* Unless we are doing a full sync, we only need
-			 * to recover the block if it is set in the bitmap
+			/* Unless we are doing a full sync, or a replacement
+			 * we only need to recover the block if it is set in
+			 * the bitmap
 			 */
 			must_sync = bitmap_start_sync(mddev->bitmap, sect,
 						      &sync_blocks, 1);
 			if (sync_blocks < max_sync)
 				max_sync = sync_blocks;
 			if (!must_sync &&
+			    mirror->replacement == NULL &&
 			    !conf->fullsync) {
 				/* yep, skip the sync_blocks here, but don't assume
 				 * that there will never be anything to do here
@@ -2500,33 +2773,60 @@
 				bio->bi_end_io = end_sync_read;
 				bio->bi_rw = READ;
 				from_addr = r10_bio->devs[j].addr;
-				bio->bi_sector = from_addr +
-					conf->mirrors[d].rdev->data_offset;
-				bio->bi_bdev = conf->mirrors[d].rdev->bdev;
-				atomic_inc(&conf->mirrors[d].rdev->nr_pending);
-				atomic_inc(&r10_bio->remaining);
-				/* and we write to 'i' */
+				bio->bi_sector = from_addr + rdev->data_offset;
+				bio->bi_bdev = rdev->bdev;
+				atomic_inc(&rdev->nr_pending);
+				/* and we write to 'i' (if not in_sync) */
 
 				for (k=0; k<conf->copies; k++)
 					if (r10_bio->devs[k].devnum == i)
 						break;
 				BUG_ON(k == conf->copies);
-				bio = r10_bio->devs[1].bio;
-				bio->bi_next = biolist;
-				biolist = bio;
-				bio->bi_private = r10_bio;
-				bio->bi_end_io = end_sync_write;
-				bio->bi_rw = WRITE;
 				to_addr = r10_bio->devs[k].addr;
-				bio->bi_sector = to_addr +
-					conf->mirrors[i].rdev->data_offset;
-				bio->bi_bdev = conf->mirrors[i].rdev->bdev;
-
 				r10_bio->devs[0].devnum = d;
 				r10_bio->devs[0].addr = from_addr;
 				r10_bio->devs[1].devnum = i;
 				r10_bio->devs[1].addr = to_addr;
 
+				rdev = mirror->rdev;
+				if (!test_bit(In_sync, &rdev->flags)) {
+					bio = r10_bio->devs[1].bio;
+					bio->bi_next = biolist;
+					biolist = bio;
+					bio->bi_private = r10_bio;
+					bio->bi_end_io = end_sync_write;
+					bio->bi_rw = WRITE;
+					bio->bi_sector = to_addr
+						+ rdev->data_offset;
+					bio->bi_bdev = rdev->bdev;
+					atomic_inc(&r10_bio->remaining);
+				} else
+					r10_bio->devs[1].bio->bi_end_io = NULL;
+
+				/* and maybe write to replacement */
+				bio = r10_bio->devs[1].repl_bio;
+				if (bio)
+					bio->bi_end_io = NULL;
+				rdev = mirror->replacement;
+				/* Note: if rdev != NULL, then bio
+				 * cannot be NULL as r10buf_pool_alloc will
+				 * have allocated it.
+				 * So the second test here is pointless.
+				 * But it keeps semantic-checkers happy, and
+				 * this comment keeps human reviewers
+				 * happy.
+				 */
+				if (rdev == NULL || bio == NULL ||
+				    test_bit(Faulty, &rdev->flags))
+					break;
+				bio->bi_next = biolist;
+				biolist = bio;
+				bio->bi_private = r10_bio;
+				bio->bi_end_io = end_sync_write;
+				bio->bi_rw = WRITE;
+				bio->bi_sector = to_addr + rdev->data_offset;
+				bio->bi_bdev = rdev->bdev;
+				atomic_inc(&r10_bio->remaining);
 				break;
 			}
 			if (j == conf->copies) {
@@ -2544,8 +2844,16 @@
 					for (k = 0; k < conf->copies; k++)
 						if (r10_bio->devs[k].devnum == i)
 							break;
-					if (!rdev_set_badblocks(
-						    conf->mirrors[i].rdev,
+					if (!test_bit(In_sync,
+						      &mirror->rdev->flags)
+					    && !rdev_set_badblocks(
+						    mirror->rdev,
+						    r10_bio->devs[k].addr,
+						    max_sync, 0))
+						any_working = 0;
+					if (mirror->replacement &&
+					    !rdev_set_badblocks(
+						    mirror->replacement,
 						    r10_bio->devs[k].addr,
 						    max_sync, 0))
 						any_working = 0;
@@ -2556,7 +2864,7 @@
 						printk(KERN_INFO "md/raid10:%s: insufficient "
 						       "working devices for recovery.\n",
 						       mdname(mddev));
-					conf->mirrors[i].recovery_disabled
+					mirror->recovery_disabled
 						= mddev->recovery_disabled;
 				}
 				break;
@@ -2605,6 +2913,9 @@
 			sector_t first_bad, sector;
 			int bad_sectors;
 
+			if (r10_bio->devs[i].repl_bio)
+				r10_bio->devs[i].repl_bio->bi_end_io = NULL;
+
 			bio = r10_bio->devs[i].bio;
 			bio->bi_end_io = NULL;
 			clear_bit(BIO_UPTODATE, &bio->bi_flags);
@@ -2635,6 +2946,27 @@
 				conf->mirrors[d].rdev->data_offset;
 			bio->bi_bdev = conf->mirrors[d].rdev->bdev;
 			count++;
+
+			if (conf->mirrors[d].replacement == NULL ||
+			    test_bit(Faulty,
+				     &conf->mirrors[d].replacement->flags))
+				continue;
+
+			/* Need to set up for writing to the replacement */
+			bio = r10_bio->devs[i].repl_bio;
+			clear_bit(BIO_UPTODATE, &bio->bi_flags);
+
+			sector = r10_bio->devs[i].addr;
+			atomic_inc(&conf->mirrors[d].rdev->nr_pending);
+			bio->bi_next = biolist;
+			biolist = bio;
+			bio->bi_private = r10_bio;
+			bio->bi_end_io = end_sync_write;
+			bio->bi_rw = WRITE;
+			bio->bi_sector = sector +
+				conf->mirrors[d].replacement->data_offset;
+			bio->bi_bdev = conf->mirrors[d].replacement->bdev;
+			count++;
 		}
 
 		if (count < 2) {
@@ -2643,6 +2975,11 @@
 				if (r10_bio->devs[i].bio->bi_end_io)
 					rdev_dec_pending(conf->mirrors[d].rdev,
 							 mddev);
+				if (r10_bio->devs[i].repl_bio &&
+				    r10_bio->devs[i].repl_bio->bi_end_io)
+					rdev_dec_pending(
+						conf->mirrors[d].replacement,
+						mddev);
 			}
 			put_buf(r10_bio);
 			biolist = NULL;
@@ -2896,6 +3233,16 @@
 			continue;
 		disk = conf->mirrors + disk_idx;
 
+		if (test_bit(Replacement, &rdev->flags)) {
+			if (disk->replacement)
+				goto out_free_conf;
+			disk->replacement = rdev;
+		} else {
+			if (disk->rdev)
+				goto out_free_conf;
+			disk->rdev = rdev;
+		}
+
 		disk->rdev = rdev;
 		disk_stack_limits(mddev->gendisk, rdev->bdev,
 				  rdev->data_offset << 9);
@@ -2923,6 +3270,13 @@
 
 		disk = conf->mirrors + i;
 
+		if (!disk->rdev && disk->replacement) {
+			/* The replacement is all we have - use it */
+			disk->rdev = disk->replacement;
+			disk->replacement = NULL;
+			clear_bit(Replacement, &disk->rdev->flags);
+		}
+
 		if (!disk->rdev ||
 		    !test_bit(In_sync, &disk->rdev->flags)) {
 			disk->head_position = 0;
diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h
index 7facfdf..7c615613 100644
--- a/drivers/md/raid10.h
+++ b/drivers/md/raid10.h
@@ -2,7 +2,7 @@
 #define _RAID10_H
 
 struct mirror_info {
-	struct md_rdev	*rdev;
+	struct md_rdev	*rdev, *replacement;
 	sector_t	head_position;
 	int		recovery_disabled;	/* matches
 						 * mddev->recovery_disabled
@@ -18,12 +18,13 @@
 	spinlock_t		device_lock;
 
 	/* geometry */
-	int			near_copies;  /* number of copies laid out raid0 style */
+	int			near_copies;  /* number of copies laid out
+					       * raid0 style */
 	int 			far_copies;   /* number of copies laid out
 					       * at large strides across drives
 					       */
-	int			far_offset;   /* far_copies are offset by 1 stripe
-					       * instead of many
+	int			far_offset;   /* far_copies are offset by 1
+					       * stripe instead of many
 					       */
 	int			copies;	      /* near_copies * far_copies.
 					       * must be <= raid_disks
@@ -34,10 +35,11 @@
 					       * 1 stripe.
 					       */
 
-	sector_t		dev_sectors;  /* temp copy of mddev->dev_sectors */
+	sector_t		dev_sectors;  /* temp copy of
+					       * mddev->dev_sectors */
 
-	int chunk_shift; /* shift from chunks to sectors */
-	sector_t chunk_mask;
+	int			chunk_shift; /* shift from chunks to sectors */
+	sector_t		chunk_mask;
 
 	struct list_head	retry_list;
 	/* queue pending writes and submit them on unplug */
@@ -45,20 +47,22 @@
 	int			pending_count;
 
 	spinlock_t		resync_lock;
-	int nr_pending;
-	int nr_waiting;
-	int nr_queued;
-	int barrier;
+	int			nr_pending;
+	int			nr_waiting;
+	int			nr_queued;
+	int			barrier;
 	sector_t		next_resync;
 	int			fullsync;  /* set to 1 if a full sync is needed,
 					    * (fresh device added).
 					    * Cleared when a sync completes.
 					    */
-
+	int			have_replacement; /* There is at least one
+						   * replacement device.
+						   */
 	wait_queue_head_t	wait_barrier;
 
-	mempool_t *r10bio_pool;
-	mempool_t *r10buf_pool;
+	mempool_t		*r10bio_pool;
+	mempool_t		*r10buf_pool;
 	struct page		*tmppage;
 
 	/* When taking over an array from a different personality, we store
@@ -98,11 +102,18 @@
 	 * When resyncing we also use one for each copy.
 	 * When reconstructing, we use 2 bios, one for read, one for write.
 	 * We choose the number when they are allocated.
+	 * We sometimes need an extra bio to write to the replacement.
 	 */
 	struct {
-		struct bio		*bio;
-		sector_t addr;
-		int devnum;
+		struct bio	*bio;
+		union {
+			struct bio	*repl_bio; /* used for resync and
+						    * writes */
+			struct md_rdev	*rdev;	   /* used for reads
+						    * (read_slot >= 0) */
+		};
+		sector_t	addr;
+		int		devnum;
 	} devs[0];
 };
 
@@ -121,17 +132,19 @@
 #define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
 
 /* bits for r10bio.state */
-#define	R10BIO_Uptodate	0
-#define	R10BIO_IsSync	1
-#define	R10BIO_IsRecover 2
-#define	R10BIO_Degraded 3
+enum r10bio_state {
+	R10BIO_Uptodate,
+	R10BIO_IsSync,
+	R10BIO_IsRecover,
+	R10BIO_Degraded,
 /* Set ReadError on bios that experience a read error
  * so that raid10d knows what to do with them.
  */
-#define	R10BIO_ReadError 4
+	R10BIO_ReadError,
 /* If a write for this request means we can clear some
  * known-bad-block records, we set this flag.
  */
-#define	R10BIO_MadeGood 5
-#define	R10BIO_WriteError 6
+	R10BIO_MadeGood,
+	R10BIO_WriteError,
+};
 #endif
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 31670f8..360f2b9 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -370,12 +370,10 @@
  * of the two sections, and some non-in_sync devices may
  * be insync in the section most affected by failed devices.
  */
-static int has_failed(struct r5conf *conf)
+static int calc_degraded(struct r5conf *conf)
 {
-	int degraded;
+	int degraded, degraded2;
 	int i;
-	if (conf->mddev->reshape_position == MaxSector)
-		return conf->mddev->degraded > conf->max_degraded;
 
 	rcu_read_lock();
 	degraded = 0;
@@ -399,14 +397,14 @@
 				degraded++;
 	}
 	rcu_read_unlock();
-	if (degraded > conf->max_degraded)
-		return 1;
+	if (conf->raid_disks == conf->previous_raid_disks)
+		return degraded;
 	rcu_read_lock();
-	degraded = 0;
+	degraded2 = 0;
 	for (i = 0; i < conf->raid_disks; i++) {
 		struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
 		if (!rdev || test_bit(Faulty, &rdev->flags))
-			degraded++;
+			degraded2++;
 		else if (test_bit(In_sync, &rdev->flags))
 			;
 		else
@@ -416,9 +414,22 @@
 			 * almost certainly hasn't.
 			 */
 			if (conf->raid_disks <= conf->previous_raid_disks)
-				degraded++;
+				degraded2++;
 	}
 	rcu_read_unlock();
+	if (degraded2 > degraded)
+		return degraded2;
+	return degraded;
+}
+
+static int has_failed(struct r5conf *conf)
+{
+	int degraded;
+
+	if (conf->mddev->reshape_position == MaxSector)
+		return conf->mddev->degraded > conf->max_degraded;
+
+	degraded = calc_degraded(conf);
 	if (degraded > conf->max_degraded)
 		return 1;
 	return 0;
@@ -492,8 +503,9 @@
 
 	for (i = disks; i--; ) {
 		int rw;
-		struct bio *bi;
-		struct md_rdev *rdev;
+		int replace_only = 0;
+		struct bio *bi, *rbi;
+		struct md_rdev *rdev, *rrdev = NULL;
 		if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) {
 			if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags))
 				rw = WRITE_FUA;
@@ -501,27 +513,57 @@
 				rw = WRITE;
 		} else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
 			rw = READ;
-		else
+		else if (test_and_clear_bit(R5_WantReplace,
+					    &sh->dev[i].flags)) {
+			rw = WRITE;
+			replace_only = 1;
+		} else
 			continue;
 
 		bi = &sh->dev[i].req;
+		rbi = &sh->dev[i].rreq; /* For writing to replacement */
 
 		bi->bi_rw = rw;
-		if (rw & WRITE)
+		rbi->bi_rw = rw;
+		if (rw & WRITE) {
 			bi->bi_end_io = raid5_end_write_request;
-		else
+			rbi->bi_end_io = raid5_end_write_request;
+		} else
 			bi->bi_end_io = raid5_end_read_request;
 
 		rcu_read_lock();
+		rrdev = rcu_dereference(conf->disks[i].replacement);
+		smp_mb(); /* Ensure that if rrdev is NULL, rdev won't be */
 		rdev = rcu_dereference(conf->disks[i].rdev);
+		if (!rdev) {
+			rdev = rrdev;
+			rrdev = NULL;
+		}
+		if (rw & WRITE) {
+			if (replace_only)
+				rdev = NULL;
+			if (rdev == rrdev)
+				/* We raced and saw duplicates */
+				rrdev = NULL;
+		} else {
+			if (test_bit(R5_ReadRepl, &sh->dev[i].flags) && rrdev)
+				rdev = rrdev;
+			rrdev = NULL;
+		}
+
 		if (rdev && test_bit(Faulty, &rdev->flags))
 			rdev = NULL;
 		if (rdev)
 			atomic_inc(&rdev->nr_pending);
+		if (rrdev && test_bit(Faulty, &rrdev->flags))
+			rrdev = NULL;
+		if (rrdev)
+			atomic_inc(&rrdev->nr_pending);
 		rcu_read_unlock();
 
 		/* We have already checked bad blocks for reads.  Now
-		 * need to check for writes.
+		 * need to check for writes.  We never accept write errors
+		 * on the replacement, so we don't to check rrdev.
 		 */
 		while ((rw & WRITE) && rdev &&
 		       test_bit(WriteErrorSeen, &rdev->flags)) {
@@ -551,7 +593,8 @@
 		}
 
 		if (rdev) {
-			if (s->syncing || s->expanding || s->expanded)
+			if (s->syncing || s->expanding || s->expanded
+			    || s->replacing)
 				md_sync_acct(rdev->bdev, STRIPE_SECTORS);
 
 			set_bit(STRIPE_IO_STARTED, &sh->state);
@@ -563,16 +606,38 @@
 			atomic_inc(&sh->count);
 			bi->bi_sector = sh->sector + rdev->data_offset;
 			bi->bi_flags = 1 << BIO_UPTODATE;
-			bi->bi_vcnt = 1;
-			bi->bi_max_vecs = 1;
 			bi->bi_idx = 0;
-			bi->bi_io_vec = &sh->dev[i].vec;
 			bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
 			bi->bi_io_vec[0].bv_offset = 0;
 			bi->bi_size = STRIPE_SIZE;
 			bi->bi_next = NULL;
+			if (rrdev)
+				set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
 			generic_make_request(bi);
-		} else {
+		}
+		if (rrdev) {
+			if (s->syncing || s->expanding || s->expanded
+			    || s->replacing)
+				md_sync_acct(rrdev->bdev, STRIPE_SECTORS);
+
+			set_bit(STRIPE_IO_STARTED, &sh->state);
+
+			rbi->bi_bdev = rrdev->bdev;
+			pr_debug("%s: for %llu schedule op %ld on "
+				 "replacement disc %d\n",
+				__func__, (unsigned long long)sh->sector,
+				rbi->bi_rw, i);
+			atomic_inc(&sh->count);
+			rbi->bi_sector = sh->sector + rrdev->data_offset;
+			rbi->bi_flags = 1 << BIO_UPTODATE;
+			rbi->bi_idx = 0;
+			rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;
+			rbi->bi_io_vec[0].bv_offset = 0;
+			rbi->bi_size = STRIPE_SIZE;
+			rbi->bi_next = NULL;
+			generic_make_request(rbi);
+		}
+		if (!rdev && !rrdev) {
 			if (rw & WRITE)
 				set_bit(STRIPE_DEGRADED, &sh->state);
 			pr_debug("skip op %ld on disc %d for sector %llu\n",
@@ -1583,7 +1648,7 @@
 	int disks = sh->disks, i;
 	int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
 	char b[BDEVNAME_SIZE];
-	struct md_rdev *rdev;
+	struct md_rdev *rdev = NULL;
 
 
 	for (i=0 ; i<disks; i++)
@@ -1597,11 +1662,23 @@
 		BUG();
 		return;
 	}
+	if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
+		/* If replacement finished while this request was outstanding,
+		 * 'replacement' might be NULL already.
+		 * In that case it moved down to 'rdev'.
+		 * rdev is not removed until all requests are finished.
+		 */
+		rdev = conf->disks[i].replacement;
+	if (!rdev)
+		rdev = conf->disks[i].rdev;
 
 	if (uptodate) {
 		set_bit(R5_UPTODATE, &sh->dev[i].flags);
 		if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
-			rdev = conf->disks[i].rdev;
+			/* Note that this cannot happen on a
+			 * replacement device.  We just fail those on
+			 * any error
+			 */
 			printk_ratelimited(
 				KERN_INFO
 				"md/raid:%s: read error corrected"
@@ -1614,16 +1691,24 @@
 			clear_bit(R5_ReadError, &sh->dev[i].flags);
 			clear_bit(R5_ReWrite, &sh->dev[i].flags);
 		}
-		if (atomic_read(&conf->disks[i].rdev->read_errors))
-			atomic_set(&conf->disks[i].rdev->read_errors, 0);
+		if (atomic_read(&rdev->read_errors))
+			atomic_set(&rdev->read_errors, 0);
 	} else {
-		const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
+		const char *bdn = bdevname(rdev->bdev, b);
 		int retry = 0;
-		rdev = conf->disks[i].rdev;
 
 		clear_bit(R5_UPTODATE, &sh->dev[i].flags);
 		atomic_inc(&rdev->read_errors);
-		if (conf->mddev->degraded >= conf->max_degraded)
+		if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
+			printk_ratelimited(
+				KERN_WARNING
+				"md/raid:%s: read error on replacement device "
+				"(sector %llu on %s).\n",
+				mdname(conf->mddev),
+				(unsigned long long)(sh->sector
+						     + rdev->data_offset),
+				bdn);
+		else if (conf->mddev->degraded >= conf->max_degraded)
 			printk_ratelimited(
 				KERN_WARNING
 				"md/raid:%s: read error not correctable "
@@ -1657,7 +1742,7 @@
 			md_error(conf->mddev, rdev);
 		}
 	}
-	rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
+	rdev_dec_pending(rdev, conf->mddev);
 	clear_bit(R5_LOCKED, &sh->dev[i].flags);
 	set_bit(STRIPE_HANDLE, &sh->state);
 	release_stripe(sh);
@@ -1668,14 +1753,30 @@
 	struct stripe_head *sh = bi->bi_private;
 	struct r5conf *conf = sh->raid_conf;
 	int disks = sh->disks, i;
+	struct md_rdev *uninitialized_var(rdev);
 	int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
 	sector_t first_bad;
 	int bad_sectors;
+	int replacement = 0;
 
-	for (i=0 ; i<disks; i++)
-		if (bi == &sh->dev[i].req)
+	for (i = 0 ; i < disks; i++) {
+		if (bi == &sh->dev[i].req) {
+			rdev = conf->disks[i].rdev;
 			break;
-
+		}
+		if (bi == &sh->dev[i].rreq) {
+			rdev = conf->disks[i].replacement;
+			if (rdev)
+				replacement = 1;
+			else
+				/* rdev was removed and 'replacement'
+				 * replaced it.  rdev is not removed
+				 * until all requests are finished.
+				 */
+				rdev = conf->disks[i].rdev;
+			break;
+		}
+	}
 	pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n",
 		(unsigned long long)sh->sector, i, atomic_read(&sh->count),
 		uptodate);
@@ -1684,21 +1785,33 @@
 		return;
 	}
 
-	if (!uptodate) {
-		set_bit(WriteErrorSeen, &conf->disks[i].rdev->flags);
-		set_bit(R5_WriteError, &sh->dev[i].flags);
-	} else if (is_badblock(conf->disks[i].rdev, sh->sector, STRIPE_SECTORS,
-			       &first_bad, &bad_sectors))
-		set_bit(R5_MadeGood, &sh->dev[i].flags);
+	if (replacement) {
+		if (!uptodate)
+			md_error(conf->mddev, rdev);
+		else if (is_badblock(rdev, sh->sector,
+				     STRIPE_SECTORS,
+				     &first_bad, &bad_sectors))
+			set_bit(R5_MadeGoodRepl, &sh->dev[i].flags);
+	} else {
+		if (!uptodate) {
+			set_bit(WriteErrorSeen, &rdev->flags);
+			set_bit(R5_WriteError, &sh->dev[i].flags);
+			if (!test_and_set_bit(WantReplacement, &rdev->flags))
+				set_bit(MD_RECOVERY_NEEDED,
+					&rdev->mddev->recovery);
+		} else if (is_badblock(rdev, sh->sector,
+				       STRIPE_SECTORS,
+				       &first_bad, &bad_sectors))
+			set_bit(R5_MadeGood, &sh->dev[i].flags);
+	}
+	rdev_dec_pending(rdev, conf->mddev);
 
-	rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
-	
-	clear_bit(R5_LOCKED, &sh->dev[i].flags);
+	if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags))
+		clear_bit(R5_LOCKED, &sh->dev[i].flags);
 	set_bit(STRIPE_HANDLE, &sh->state);
 	release_stripe(sh);
 }
 
-
 static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous);
 	
 static void raid5_build_block(struct stripe_head *sh, int i, int previous)
@@ -1709,12 +1822,15 @@
 	dev->req.bi_io_vec = &dev->vec;
 	dev->req.bi_vcnt++;
 	dev->req.bi_max_vecs++;
-	dev->vec.bv_page = dev->page;
-	dev->vec.bv_len = STRIPE_SIZE;
-	dev->vec.bv_offset = 0;
-
-	dev->req.bi_sector = sh->sector;
 	dev->req.bi_private = sh;
+	dev->vec.bv_page = dev->page;
+
+	bio_init(&dev->rreq);
+	dev->rreq.bi_io_vec = &dev->rvec;
+	dev->rreq.bi_vcnt++;
+	dev->rreq.bi_max_vecs++;
+	dev->rreq.bi_private = sh;
+	dev->rvec.bv_page = dev->page;
 
 	dev->flags = 0;
 	dev->sector = compute_blocknr(sh, i, previous);
@@ -1724,18 +1840,15 @@
 {
 	char b[BDEVNAME_SIZE];
 	struct r5conf *conf = mddev->private;
+	unsigned long flags;
 	pr_debug("raid456: error called\n");
 
-	if (test_and_clear_bit(In_sync, &rdev->flags)) {
-		unsigned long flags;
-		spin_lock_irqsave(&conf->device_lock, flags);
-		mddev->degraded++;
-		spin_unlock_irqrestore(&conf->device_lock, flags);
-		/*
-		 * if recovery was running, make sure it aborts.
-		 */
-		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
-	}
+	spin_lock_irqsave(&conf->device_lock, flags);
+	clear_bit(In_sync, &rdev->flags);
+	mddev->degraded = calc_degraded(conf);
+	spin_unlock_irqrestore(&conf->device_lock, flags);
+	set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+
 	set_bit(Blocked, &rdev->flags);
 	set_bit(Faulty, &rdev->flags);
 	set_bit(MD_CHANGE_DEVS, &mddev->flags);
@@ -2362,8 +2475,9 @@
 	md_done_sync(conf->mddev, STRIPE_SECTORS, 0);
 	clear_bit(STRIPE_SYNCING, &sh->state);
 	s->syncing = 0;
+	s->replacing = 0;
 	/* There is nothing more to do for sync/check/repair.
-	 * For recover we need to record a bad block on all
+	 * For recover/replace we need to record a bad block on all
 	 * non-sync devices, or abort the recovery
 	 */
 	if (!test_bit(MD_RECOVERY_RECOVER, &conf->mddev->recovery))
@@ -2373,12 +2487,18 @@
 	 */
 	for (i = 0; i < conf->raid_disks; i++) {
 		struct md_rdev *rdev = conf->disks[i].rdev;
-		if (!rdev
-		    || test_bit(Faulty, &rdev->flags)
-		    || test_bit(In_sync, &rdev->flags))
-			continue;
-		if (!rdev_set_badblocks(rdev, sh->sector,
-					STRIPE_SECTORS, 0))
+		if (rdev
+		    && !test_bit(Faulty, &rdev->flags)
+		    && !test_bit(In_sync, &rdev->flags)
+		    && !rdev_set_badblocks(rdev, sh->sector,
+					   STRIPE_SECTORS, 0))
+			abort = 1;
+		rdev = conf->disks[i].replacement;
+		if (rdev
+		    && !test_bit(Faulty, &rdev->flags)
+		    && !test_bit(In_sync, &rdev->flags)
+		    && !rdev_set_badblocks(rdev, sh->sector,
+					   STRIPE_SECTORS, 0))
 			abort = 1;
 	}
 	if (abort) {
@@ -2387,6 +2507,22 @@
 	}
 }
 
+static int want_replace(struct stripe_head *sh, int disk_idx)
+{
+	struct md_rdev *rdev;
+	int rv = 0;
+	/* Doing recovery so rcu locking not required */
+	rdev = sh->raid_conf->disks[disk_idx].replacement;
+	if (rdev
+	    && !test_bit(Faulty, &rdev->flags)
+	    && !test_bit(In_sync, &rdev->flags)
+	    && (rdev->recovery_offset <= sh->sector
+		|| rdev->mddev->recovery_cp <= sh->sector))
+		rv = 1;
+
+	return rv;
+}
+
 /* fetch_block - checks the given member device to see if its data needs
  * to be read or computed to satisfy a request.
  *
@@ -2406,6 +2542,7 @@
 	    (dev->toread ||
 	     (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
 	     s->syncing || s->expanding ||
+	     (s->replacing && want_replace(sh, disk_idx)) ||
 	     (s->failed >= 1 && fdev[0]->toread) ||
 	     (s->failed >= 2 && fdev[1]->toread) ||
 	     (sh->raid_conf->level <= 5 && s->failed && fdev[0]->towrite &&
@@ -2959,22 +3096,18 @@
 	}
 }
 
-
 /*
  * handle_stripe - do things to a stripe.
  *
- * We lock the stripe and then examine the state of various bits
- * to see what needs to be done.
+ * We lock the stripe by setting STRIPE_ACTIVE and then examine the
+ * state of various bits to see what needs to be done.
  * Possible results:
- *    return some read request which now have data
- *    return some write requests which are safely on disc
+ *    return some read requests which now have data
+ *    return some write requests which are safely on storage
  *    schedule a read on some buffers
  *    schedule a write of some buffers
  *    return confirmation of parity correctness
  *
- * buffers are taken off read_list or write_list, and bh_cache buffers
- * get BH_Lock set before the stripe lock is released.
- *
  */
 
 static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
@@ -2983,10 +3116,10 @@
 	int disks = sh->disks;
 	struct r5dev *dev;
 	int i;
+	int do_recovery = 0;
 
 	memset(s, 0, sizeof(*s));
 
-	s->syncing = test_bit(STRIPE_SYNCING, &sh->state);
 	s->expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
 	s->expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
 	s->failed_num[0] = -1;
@@ -3004,7 +3137,8 @@
 		dev = &sh->dev[i];
 
 		pr_debug("check %d: state 0x%lx read %p write %p written %p\n",
-			i, dev->flags, dev->toread, dev->towrite, dev->written);
+			 i, dev->flags,
+			 dev->toread, dev->towrite, dev->written);
 		/* maybe we can reply to a read
 		 *
 		 * new wantfill requests are only permitted while
@@ -3035,7 +3169,21 @@
 		}
 		if (dev->written)
 			s->written++;
-		rdev = rcu_dereference(conf->disks[i].rdev);
+		/* Prefer to use the replacement for reads, but only
+		 * if it is recovered enough and has no bad blocks.
+		 */
+		rdev = rcu_dereference(conf->disks[i].replacement);
+		if (rdev && !test_bit(Faulty, &rdev->flags) &&
+		    rdev->recovery_offset >= sh->sector + STRIPE_SECTORS &&
+		    !is_badblock(rdev, sh->sector, STRIPE_SECTORS,
+				 &first_bad, &bad_sectors))
+			set_bit(R5_ReadRepl, &dev->flags);
+		else {
+			if (rdev)
+				set_bit(R5_NeedReplace, &dev->flags);
+			rdev = rcu_dereference(conf->disks[i].rdev);
+			clear_bit(R5_ReadRepl, &dev->flags);
+		}
 		if (rdev && test_bit(Faulty, &rdev->flags))
 			rdev = NULL;
 		if (rdev) {
@@ -3065,26 +3213,50 @@
 			}
 		} else if (test_bit(In_sync, &rdev->flags))
 			set_bit(R5_Insync, &dev->flags);
-		else {
+		else if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
 			/* in sync if before recovery_offset */
-			if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
-				set_bit(R5_Insync, &dev->flags);
-		}
+			set_bit(R5_Insync, &dev->flags);
+		else if (test_bit(R5_UPTODATE, &dev->flags) &&
+			 test_bit(R5_Expanded, &dev->flags))
+			/* If we've reshaped into here, we assume it is Insync.
+			 * We will shortly update recovery_offset to make
+			 * it official.
+			 */
+			set_bit(R5_Insync, &dev->flags);
+
 		if (rdev && test_bit(R5_WriteError, &dev->flags)) {
-			clear_bit(R5_Insync, &dev->flags);
-			if (!test_bit(Faulty, &rdev->flags)) {
+			/* This flag does not apply to '.replacement'
+			 * only to .rdev, so make sure to check that*/
+			struct md_rdev *rdev2 = rcu_dereference(
+				conf->disks[i].rdev);
+			if (rdev2 == rdev)
+				clear_bit(R5_Insync, &dev->flags);
+			if (rdev2 && !test_bit(Faulty, &rdev2->flags)) {
 				s->handle_bad_blocks = 1;
-				atomic_inc(&rdev->nr_pending);
+				atomic_inc(&rdev2->nr_pending);
 			} else
 				clear_bit(R5_WriteError, &dev->flags);
 		}
 		if (rdev && test_bit(R5_MadeGood, &dev->flags)) {
-			if (!test_bit(Faulty, &rdev->flags)) {
+			/* This flag does not apply to '.replacement'
+			 * only to .rdev, so make sure to check that*/
+			struct md_rdev *rdev2 = rcu_dereference(
+				conf->disks[i].rdev);
+			if (rdev2 && !test_bit(Faulty, &rdev2->flags)) {
 				s->handle_bad_blocks = 1;
-				atomic_inc(&rdev->nr_pending);
+				atomic_inc(&rdev2->nr_pending);
 			} else
 				clear_bit(R5_MadeGood, &dev->flags);
 		}
+		if (test_bit(R5_MadeGoodRepl, &dev->flags)) {
+			struct md_rdev *rdev2 = rcu_dereference(
+				conf->disks[i].replacement);
+			if (rdev2 && !test_bit(Faulty, &rdev2->flags)) {
+				s->handle_bad_blocks = 1;
+				atomic_inc(&rdev2->nr_pending);
+			} else
+				clear_bit(R5_MadeGoodRepl, &dev->flags);
+		}
 		if (!test_bit(R5_Insync, &dev->flags)) {
 			/* The ReadError flag will just be confusing now */
 			clear_bit(R5_ReadError, &dev->flags);
@@ -3096,9 +3268,25 @@
 			if (s->failed < 2)
 				s->failed_num[s->failed] = i;
 			s->failed++;
+			if (rdev && !test_bit(Faulty, &rdev->flags))
+				do_recovery = 1;
 		}
 	}
 	spin_unlock_irq(&conf->device_lock);
+	if (test_bit(STRIPE_SYNCING, &sh->state)) {
+		/* If there is a failed device being replaced,
+		 *     we must be recovering.
+		 * else if we are after recovery_cp, we must be syncing
+		 * else we can only be replacing
+		 * sync and recovery both need to read all devices, and so
+		 * use the same flag.
+		 */
+		if (do_recovery ||
+		    sh->sector >= conf->mddev->recovery_cp)
+			s->syncing = 1;
+		else
+			s->replacing = 1;
+	}
 	rcu_read_unlock();
 }
 
@@ -3140,7 +3328,7 @@
 
 	if (unlikely(s.blocked_rdev)) {
 		if (s.syncing || s.expanding || s.expanded ||
-		    s.to_write || s.written) {
+		    s.replacing || s.to_write || s.written) {
 			set_bit(STRIPE_HANDLE, &sh->state);
 			goto finish;
 		}
@@ -3166,7 +3354,7 @@
 		sh->reconstruct_state = 0;
 		if (s.to_read+s.to_write+s.written)
 			handle_failed_stripe(conf, sh, &s, disks, &s.return_bi);
-		if (s.syncing)
+		if (s.syncing + s.replacing)
 			handle_failed_sync(conf, sh, &s);
 	}
 
@@ -3197,7 +3385,9 @@
 	 */
 	if (s.to_read || s.non_overwrite
 	    || (conf->level == 6 && s.to_write && s.failed)
-	    || (s.syncing && (s.uptodate + s.compute < disks)) || s.expanding)
+	    || (s.syncing && (s.uptodate + s.compute < disks))
+	    || s.replacing
+	    || s.expanding)
 		handle_stripe_fill(sh, &s, disks);
 
 	/* Now we check to see if any write operations have recently
@@ -3259,7 +3449,20 @@
 			handle_parity_checks5(conf, sh, &s, disks);
 	}
 
-	if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
+	if (s.replacing && s.locked == 0
+	    && !test_bit(STRIPE_INSYNC, &sh->state)) {
+		/* Write out to replacement devices where possible */
+		for (i = 0; i < conf->raid_disks; i++)
+			if (test_bit(R5_UPTODATE, &sh->dev[i].flags) &&
+			    test_bit(R5_NeedReplace, &sh->dev[i].flags)) {
+				set_bit(R5_WantReplace, &sh->dev[i].flags);
+				set_bit(R5_LOCKED, &sh->dev[i].flags);
+				s.locked++;
+			}
+		set_bit(STRIPE_INSYNC, &sh->state);
+	}
+	if ((s.syncing || s.replacing) && s.locked == 0 &&
+	    test_bit(STRIPE_INSYNC, &sh->state)) {
 		md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
 		clear_bit(STRIPE_SYNCING, &sh->state);
 	}
@@ -3357,6 +3560,15 @@
 						     STRIPE_SECTORS);
 				rdev_dec_pending(rdev, conf->mddev);
 			}
+			if (test_and_clear_bit(R5_MadeGoodRepl, &dev->flags)) {
+				rdev = conf->disks[i].replacement;
+				if (!rdev)
+					/* rdev have been moved down */
+					rdev = conf->disks[i].rdev;
+				rdev_clear_badblocks(rdev, sh->sector,
+						     STRIPE_SECTORS);
+				rdev_dec_pending(rdev, conf->mddev);
+			}
 		}
 
 	if (s.ops_request)
@@ -3580,6 +3792,7 @@
 	int dd_idx;
 	struct bio* align_bi;
 	struct md_rdev *rdev;
+	sector_t end_sector;
 
 	if (!in_chunk_boundary(mddev, raid_bio)) {
 		pr_debug("chunk_aligned_read : non aligned\n");
@@ -3604,9 +3817,19 @@
 						    0,
 						    &dd_idx, NULL);
 
+	end_sector = align_bi->bi_sector + (align_bi->bi_size >> 9);
 	rcu_read_lock();
-	rdev = rcu_dereference(conf->disks[dd_idx].rdev);
-	if (rdev && test_bit(In_sync, &rdev->flags)) {
+	rdev = rcu_dereference(conf->disks[dd_idx].replacement);
+	if (!rdev || test_bit(Faulty, &rdev->flags) ||
+	    rdev->recovery_offset < end_sector) {
+		rdev = rcu_dereference(conf->disks[dd_idx].rdev);
+		if (rdev &&
+		    (test_bit(Faulty, &rdev->flags) ||
+		    !(test_bit(In_sync, &rdev->flags) ||
+		      rdev->recovery_offset >= end_sector)))
+			rdev = NULL;
+	}
+	if (rdev) {
 		sector_t first_bad;
 		int bad_sectors;
 
@@ -4131,7 +4354,6 @@
 		return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */
 	}
 
-
 	bitmap_cond_end_sync(mddev->bitmap, sector_nr);
 
 	sh = get_active_stripe(conf, sector_nr, 0, 1, 0);
@@ -4202,7 +4424,6 @@
 			return handled;
 		}
 
-		set_bit(R5_ReadError, &sh->dev[dd_idx].flags);
 		if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) {
 			release_stripe(sh);
 			raid5_set_bi_hw_segments(raid_bio, scnt);
@@ -4629,7 +4850,15 @@
 			continue;
 		disk = conf->disks + raid_disk;
 
-		disk->rdev = rdev;
+		if (test_bit(Replacement, &rdev->flags)) {
+			if (disk->replacement)
+				goto abort;
+			disk->replacement = rdev;
+		} else {
+			if (disk->rdev)
+				goto abort;
+			disk->rdev = rdev;
+		}
 
 		if (test_bit(In_sync, &rdev->flags)) {
 			char b[BDEVNAME_SIZE];
@@ -4718,6 +4947,7 @@
 	int dirty_parity_disks = 0;
 	struct md_rdev *rdev;
 	sector_t reshape_offset = 0;
+	int i;
 
 	if (mddev->recovery_cp != MaxSector)
 		printk(KERN_NOTICE "md/raid:%s: not clean"
@@ -4807,12 +5037,25 @@
 	conf->thread = NULL;
 	mddev->private = conf;
 
-	/*
-	 * 0 for a fully functional array, 1 or 2 for a degraded array.
-	 */
-	list_for_each_entry(rdev, &mddev->disks, same_set) {
-		if (rdev->raid_disk < 0)
+	for (i = 0; i < conf->raid_disks && conf->previous_raid_disks;
+	     i++) {
+		rdev = conf->disks[i].rdev;
+		if (!rdev && conf->disks[i].replacement) {
+			/* The replacement is all we have yet */
+			rdev = conf->disks[i].replacement;
+			conf->disks[i].replacement = NULL;
+			clear_bit(Replacement, &rdev->flags);
+			conf->disks[i].rdev = rdev;
+		}
+		if (!rdev)
 			continue;
+		if (conf->disks[i].replacement &&
+		    conf->reshape_progress != MaxSector) {
+			/* replacements and reshape simply do not mix. */
+			printk(KERN_ERR "md: cannot handle concurrent "
+			       "replacement and reshape.\n");
+			goto abort;
+		}
 		if (test_bit(In_sync, &rdev->flags)) {
 			working_disks++;
 			continue;
@@ -4846,8 +5089,10 @@
 		dirty_parity_disks++;
 	}
 
-	mddev->degraded = (max(conf->raid_disks, conf->previous_raid_disks)
-			   - working_disks);
+	/*
+	 * 0 for a fully functional array, 1 or 2 for a degraded array.
+	 */
+	mddev->degraded = calc_degraded(conf);
 
 	if (has_failed(conf)) {
 		printk(KERN_ERR "md/raid:%s: not enough operational devices"
@@ -5010,7 +5255,25 @@
 
 	for (i = 0; i < conf->raid_disks; i++) {
 		tmp = conf->disks + i;
-		if (tmp->rdev
+		if (tmp->replacement
+		    && tmp->replacement->recovery_offset == MaxSector
+		    && !test_bit(Faulty, &tmp->replacement->flags)
+		    && !test_and_set_bit(In_sync, &tmp->replacement->flags)) {
+			/* Replacement has just become active. */
+			if (!tmp->rdev
+			    || !test_and_clear_bit(In_sync, &tmp->rdev->flags))
+				count++;
+			if (tmp->rdev) {
+				/* Replaced device not technically faulty,
+				 * but we need to be sure it gets removed
+				 * and never re-added.
+				 */
+				set_bit(Faulty, &tmp->rdev->flags);
+				sysfs_notify_dirent_safe(
+					tmp->rdev->sysfs_state);
+			}
+			sysfs_notify_dirent_safe(tmp->replacement->sysfs_state);
+		} else if (tmp->rdev
 		    && tmp->rdev->recovery_offset == MaxSector
 		    && !test_bit(Faulty, &tmp->rdev->flags)
 		    && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
@@ -5019,49 +5282,68 @@
 		}
 	}
 	spin_lock_irqsave(&conf->device_lock, flags);
-	mddev->degraded -= count;
+	mddev->degraded = calc_degraded(conf);
 	spin_unlock_irqrestore(&conf->device_lock, flags);
 	print_raid5_conf(conf);
 	return count;
 }
 
-static int raid5_remove_disk(struct mddev *mddev, int number)
+static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
 {
 	struct r5conf *conf = mddev->private;
 	int err = 0;
-	struct md_rdev *rdev;
+	int number = rdev->raid_disk;
+	struct md_rdev **rdevp;
 	struct disk_info *p = conf->disks + number;
 
 	print_raid5_conf(conf);
-	rdev = p->rdev;
-	if (rdev) {
-		if (number >= conf->raid_disks &&
-		    conf->reshape_progress == MaxSector)
-			clear_bit(In_sync, &rdev->flags);
+	if (rdev == p->rdev)
+		rdevp = &p->rdev;
+	else if (rdev == p->replacement)
+		rdevp = &p->replacement;
+	else
+		return 0;
 
-		if (test_bit(In_sync, &rdev->flags) ||
-		    atomic_read(&rdev->nr_pending)) {
-			err = -EBUSY;
-			goto abort;
-		}
-		/* Only remove non-faulty devices if recovery
-		 * isn't possible.
-		 */
-		if (!test_bit(Faulty, &rdev->flags) &&
-		    mddev->recovery_disabled != conf->recovery_disabled &&
-		    !has_failed(conf) &&
-		    number < conf->raid_disks) {
-			err = -EBUSY;
-			goto abort;
-		}
-		p->rdev = NULL;
-		synchronize_rcu();
-		if (atomic_read(&rdev->nr_pending)) {
-			/* lost the race, try later */
-			err = -EBUSY;
-			p->rdev = rdev;
-		}
+	if (number >= conf->raid_disks &&
+	    conf->reshape_progress == MaxSector)
+		clear_bit(In_sync, &rdev->flags);
+
+	if (test_bit(In_sync, &rdev->flags) ||
+	    atomic_read(&rdev->nr_pending)) {
+		err = -EBUSY;
+		goto abort;
 	}
+	/* Only remove non-faulty devices if recovery
+	 * isn't possible.
+	 */
+	if (!test_bit(Faulty, &rdev->flags) &&
+	    mddev->recovery_disabled != conf->recovery_disabled &&
+	    !has_failed(conf) &&
+	    (!p->replacement || p->replacement == rdev) &&
+	    number < conf->raid_disks) {
+		err = -EBUSY;
+		goto abort;
+	}
+	*rdevp = NULL;
+	synchronize_rcu();
+	if (atomic_read(&rdev->nr_pending)) {
+		/* lost the race, try later */
+		err = -EBUSY;
+		*rdevp = rdev;
+	} else if (p->replacement) {
+		/* We must have just cleared 'rdev' */
+		p->rdev = p->replacement;
+		clear_bit(Replacement, &p->replacement->flags);
+		smp_mb(); /* Make sure other CPUs may see both as identical
+			   * but will never see neither - if they are careful
+			   */
+		p->replacement = NULL;
+		clear_bit(WantReplacement, &rdev->flags);
+	} else
+		/* We might have just removed the Replacement as faulty-
+		 * clear the bit just in case
+		 */
+		clear_bit(WantReplacement, &rdev->flags);
 abort:
 
 	print_raid5_conf(conf);
@@ -5097,8 +5379,9 @@
 		disk = rdev->saved_raid_disk;
 	else
 		disk = first;
-	for ( ; disk <= last ; disk++)
-		if ((p=conf->disks + disk)->rdev == NULL) {
+	for ( ; disk <= last ; disk++) {
+		p = conf->disks + disk;
+		if (p->rdev == NULL) {
 			clear_bit(In_sync, &rdev->flags);
 			rdev->raid_disk = disk;
 			err = 0;
@@ -5107,6 +5390,17 @@
 			rcu_assign_pointer(p->rdev, rdev);
 			break;
 		}
+		if (test_bit(WantReplacement, &p->rdev->flags) &&
+		    p->replacement == NULL) {
+			clear_bit(In_sync, &rdev->flags);
+			set_bit(Replacement, &rdev->flags);
+			rdev->raid_disk = disk;
+			err = 0;
+			conf->fullsync = 1;
+			rcu_assign_pointer(p->replacement, rdev);
+			break;
+		}
+	}
 	print_raid5_conf(conf);
 	return err;
 }
@@ -5280,8 +5574,7 @@
 		 * pre and post number of devices.
 		 */
 		spin_lock_irqsave(&conf->device_lock, flags);
-		mddev->degraded += (conf->raid_disks - conf->previous_raid_disks)
-			- added_devices;
+		mddev->degraded = calc_degraded(conf);
 		spin_unlock_irqrestore(&conf->device_lock, flags);
 	}
 	mddev->raid_disks = conf->raid_disks;
@@ -5350,17 +5643,15 @@
 			revalidate_disk(mddev->gendisk);
 		} else {
 			int d;
-			mddev->degraded = conf->raid_disks;
-			for (d = 0; d < conf->raid_disks ; d++)
-				if (conf->disks[d].rdev &&
-				    test_bit(In_sync,
-					     &conf->disks[d].rdev->flags))
-					mddev->degraded--;
+			spin_lock_irq(&conf->device_lock);
+			mddev->degraded = calc_degraded(conf);
+			spin_unlock_irq(&conf->device_lock);
 			for (d = conf->raid_disks ;
 			     d < conf->raid_disks - mddev->delta_disks;
 			     d++) {
 				struct md_rdev *rdev = conf->disks[d].rdev;
-				if (rdev && raid5_remove_disk(mddev, d) == 0) {
+				if (rdev &&
+				    raid5_remove_disk(mddev, rdev) == 0) {
 					sysfs_unlink_rdev(mddev, rdev);
 					rdev->raid_disk = -1;
 				}
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index e10c553..8d8e139 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -27,7 +27,7 @@
  * The possible state transitions are:
  *
  *  Empty -> Want   - on read or write to get old data for  parity calc
- *  Empty -> Dirty  - on compute_parity to satisfy write/sync request.(RECONSTRUCT_WRITE)
+ *  Empty -> Dirty  - on compute_parity to satisfy write/sync request.
  *  Empty -> Clean  - on compute_block when computing a block for failed drive
  *  Want  -> Empty  - on failed read
  *  Want  -> Clean  - on successful completion of read request
@@ -226,8 +226,11 @@
 		#endif
 	} ops;
 	struct r5dev {
-		struct bio	req;
-		struct bio_vec	vec;
+		/* rreq and rvec are used for the replacement device when
+		 * writing data to both devices.
+		 */
+		struct bio	req, rreq;
+		struct bio_vec	vec, rvec;
 		struct page	*page;
 		struct bio	*toread, *read, *towrite, *written;
 		sector_t	sector;			/* sector of this page */
@@ -239,7 +242,13 @@
  *     for handle_stripe.
  */
 struct stripe_head_state {
-	int syncing, expanding, expanded;
+	/* 'syncing' means that we need to read all devices, either
+	 * to check/correct parity, or to reconstruct a missing device.
+	 * 'replacing' means we are replacing one or more drives and
+	 * the source is valid at this point so we don't need to
+	 * read all devices, just the replacement targets.
+	 */
+	int syncing, expanding, expanded, replacing;
 	int locked, uptodate, to_read, to_write, failed, written;
 	int to_fill, compute, req_compute, non_overwrite;
 	int failed_num[2];
@@ -252,38 +261,41 @@
 	int handle_bad_blocks;
 };
 
-/* Flags */
-#define	R5_UPTODATE	0	/* page contains current data */
-#define	R5_LOCKED	1	/* IO has been submitted on "req" */
-#define	R5_OVERWRITE	2	/* towrite covers whole page */
+/* Flags for struct r5dev.flags */
+enum r5dev_flags {
+	R5_UPTODATE,	/* page contains current data */
+	R5_LOCKED,	/* IO has been submitted on "req" */
+	R5_DOUBLE_LOCKED,/* Cannot clear R5_LOCKED until 2 writes complete */
+	R5_OVERWRITE,	/* towrite covers whole page */
 /* and some that are internal to handle_stripe */
-#define	R5_Insync	3	/* rdev && rdev->in_sync at start */
-#define	R5_Wantread	4	/* want to schedule a read */
-#define	R5_Wantwrite	5
-#define	R5_Overlap	7	/* There is a pending overlapping request on this block */
-#define	R5_ReadError	8	/* seen a read error here recently */
-#define	R5_ReWrite	9	/* have tried to over-write the readerror */
+	R5_Insync,	/* rdev && rdev->in_sync at start */
+	R5_Wantread,	/* want to schedule a read */
+	R5_Wantwrite,
+	R5_Overlap,	/* There is a pending overlapping request
+			 * on this block */
+	R5_ReadError,	/* seen a read error here recently */
+	R5_ReWrite,	/* have tried to over-write the readerror */
 
-#define	R5_Expanded	10	/* This block now has post-expand data */
-#define	R5_Wantcompute	11	/* compute_block in progress treat as
-				 * uptodate
-				 */
-#define	R5_Wantfill	12	/* dev->toread contains a bio that needs
-				 * filling
-				 */
-#define	R5_Wantdrain	13	/* dev->towrite needs to be drained */
-#define	R5_WantFUA	14	/* Write should be FUA */
-#define	R5_WriteError	15	/* got a write error - need to record it */
-#define	R5_MadeGood	16	/* A bad block has been fixed by writing to it*/
-/*
- * Write method
- */
-#define RECONSTRUCT_WRITE	1
-#define READ_MODIFY_WRITE	2
-/* not a write method, but a compute_parity mode */
-#define	CHECK_PARITY		3
-/* Additional compute_parity mode -- updates the parity w/o LOCKING */
-#define UPDATE_PARITY		4
+	R5_Expanded,	/* This block now has post-expand data */
+	R5_Wantcompute,	/* compute_block in progress treat as
+			 * uptodate
+			 */
+	R5_Wantfill,	/* dev->toread contains a bio that needs
+			 * filling
+			 */
+	R5_Wantdrain,	/* dev->towrite needs to be drained */
+	R5_WantFUA,	/* Write should be FUA */
+	R5_WriteError,	/* got a write error - need to record it */
+	R5_MadeGood,	/* A bad block has been fixed by writing to it */
+	R5_ReadRepl,	/* Will/did read from replacement rather than orig */
+	R5_MadeGoodRepl,/* A bad block on the replacement device has been
+			 * fixed by writing to it */
+	R5_NeedReplace,	/* This device has a replacement which is not
+			 * up-to-date at this stripe. */
+	R5_WantReplace, /* We need to update the replacement, we have read
+			 * data in, and now is a good time to write it out.
+			 */
+};
 
 /*
  * Stripe state
@@ -311,13 +323,14 @@
 /*
  * Operation request flags
  */
-#define STRIPE_OP_BIOFILL	0
-#define STRIPE_OP_COMPUTE_BLK	1
-#define STRIPE_OP_PREXOR	2
-#define STRIPE_OP_BIODRAIN	3
-#define STRIPE_OP_RECONSTRUCT	4
-#define STRIPE_OP_CHECK	5
-
+enum {
+	STRIPE_OP_BIOFILL,
+	STRIPE_OP_COMPUTE_BLK,
+	STRIPE_OP_PREXOR,
+	STRIPE_OP_BIODRAIN,
+	STRIPE_OP_RECONSTRUCT,
+	STRIPE_OP_CHECK,
+};
 /*
  * Plugging:
  *
@@ -344,13 +357,12 @@
 
 
 struct disk_info {
-	struct md_rdev	*rdev;
+	struct md_rdev	*rdev, *replacement;
 };
 
 struct r5conf {
 	struct hlist_head	*stripe_hashtbl;
 	struct mddev		*mddev;
-	struct disk_info	*spare;
 	int			chunk_sectors;
 	int			level, algorithm;
 	int			max_degraded;
diff --git a/drivers/media/common/tuners/mxl5007t.c b/drivers/media/common/tuners/mxl5007t.c
index 7eb1bf7..5d02221 100644
--- a/drivers/media/common/tuners/mxl5007t.c
+++ b/drivers/media/common/tuners/mxl5007t.c
@@ -488,9 +488,10 @@
 
 static int mxl5007t_read_reg(struct mxl5007t_state *state, u8 reg, u8 *val)
 {
+	u8 buf[2] = { 0xfb, reg };
 	struct i2c_msg msg[] = {
 		{ .addr = state->i2c_props.addr, .flags = 0,
-		  .buf = &reg, .len = 1 },
+		  .buf = buf, .len = 2 },
 		{ .addr = state->i2c_props.addr, .flags = I2C_M_RD,
 		  .buf = val, .len = 1 },
 	};
diff --git a/drivers/media/common/tuners/tda18218.c b/drivers/media/common/tuners/tda18218.c
index aacfe23..4fc2973 100644
--- a/drivers/media/common/tuners/tda18218.c
+++ b/drivers/media/common/tuners/tda18218.c
@@ -141,7 +141,7 @@
 	switch (params->u.ofdm.bandwidth) {
 	case BANDWIDTH_6_MHZ:
 		LP_Fc = 0;
-		LO_Frac = params->frequency + 4000000;
+		LO_Frac = params->frequency + 3000000;
 		break;
 	case BANDWIDTH_7_MHZ:
 		LP_Fc = 1;
diff --git a/drivers/media/dvb/b2c2/flexcop-usb.c b/drivers/media/dvb/b2c2/flexcop-usb.c
index bedcfb6..26c666d 100644
--- a/drivers/media/dvb/b2c2/flexcop-usb.c
+++ b/drivers/media/dvb/b2c2/flexcop-usb.c
@@ -583,25 +583,7 @@
 	.id_table	= flexcop_usb_table,
 };
 
-/* module stuff */
-static int __init flexcop_usb_module_init(void)
-{
-	int result;
-	if ((result = usb_register(&flexcop_usb_driver))) {
-		err("usb_register failed. (%d)", result);
-		return result;
-	}
-	return 0;
-}
-
-static void __exit flexcop_usb_module_exit(void)
-{
-	/* deregister this driver from the USB subsystem */
-	usb_deregister(&flexcop_usb_driver);
-}
-
-module_init(flexcop_usb_module_init);
-module_exit(flexcop_usb_module_exit);
+module_usb_driver(flexcop_usb_driver);
 
 MODULE_AUTHOR(DRIVER_AUTHOR);
 MODULE_DESCRIPTION(DRIVER_NAME);
diff --git a/drivers/media/dvb/ddbridge/ddbridge-core.c b/drivers/media/dvb/ddbridge/ddbridge-core.c
index ba9a643..d1e91bc 100644
--- a/drivers/media/dvb/ddbridge/ddbridge-core.c
+++ b/drivers/media/dvb/ddbridge/ddbridge-core.c
@@ -1480,7 +1480,7 @@
 	.open           = ddb_open,
 };
 
-static char *ddb_devnode(struct device *device, mode_t *mode)
+static char *ddb_devnode(struct device *device, umode_t *mode)
 {
 	struct ddb *dev = dev_get_drvdata(device);
 
diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
index f732877..00a6732 100644
--- a/drivers/media/dvb/dvb-core/dvbdev.c
+++ b/drivers/media/dvb/dvb-core/dvbdev.c
@@ -450,7 +450,7 @@
 	return 0;
 }
 
-static char *dvb_devnode(struct device *dev, mode_t *mode)
+static char *dvb_devnode(struct device *dev, umode_t *mode)
 {
 	struct dvb_device *dvbdev = dev_get_drvdata(dev);
 
diff --git a/drivers/media/dvb/dvb-usb/a800.c b/drivers/media/dvb/dvb-usb/a800.c
index 2aef3c8..8d7fef8 100644
--- a/drivers/media/dvb/dvb-usb/a800.c
+++ b/drivers/media/dvb/dvb-usb/a800.c
@@ -183,26 +183,7 @@
 	.id_table	= a800_table,
 };
 
-/* module stuff */
-static int __init a800_module_init(void)
-{
-	int result;
-	if ((result = usb_register(&a800_driver))) {
-		err("usb_register failed. Error number %d",result);
-		return result;
-	}
-
-	return 0;
-}
-
-static void __exit a800_module_exit(void)
-{
-	/* deregister this driver from the USB subsystem */
-	usb_deregister(&a800_driver);
-}
-
-module_init (a800_module_init);
-module_exit (a800_module_exit);
+module_usb_driver(a800_driver);
 
 MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
 MODULE_DESCRIPTION("AVerMedia AverTV DVB-T USB 2.0 (A800)");
diff --git a/drivers/media/dvb/dvb-usb/af9015.c b/drivers/media/dvb/dvb-usb/af9015.c
index c6c275b..56cbd36 100644
--- a/drivers/media/dvb/dvb-usb/af9015.c
+++ b/drivers/media/dvb/dvb-usb/af9015.c
@@ -1713,25 +1713,7 @@
 	.id_table = af9015_usb_table,
 };
 
-/* module stuff */
-static int __init af9015_usb_module_init(void)
-{
-	int ret;
-	ret = usb_register(&af9015_usb_driver);
-	if (ret)
-		err("module init failed:%d", ret);
-
-	return ret;
-}
-
-static void __exit af9015_usb_module_exit(void)
-{
-	/* deregister this driver from the USB subsystem */
-	usb_deregister(&af9015_usb_driver);
-}
-
-module_init(af9015_usb_module_init);
-module_exit(af9015_usb_module_exit);
+module_usb_driver(af9015_usb_driver);
 
 MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
 MODULE_DESCRIPTION("Driver for Afatech AF9015 DVB-T");
diff --git a/drivers/media/dvb/dvb-usb/anysee.c b/drivers/media/dvb/dvb-usb/anysee.c
index 5f2278b..b39f14f 100644
--- a/drivers/media/dvb/dvb-usb/anysee.c
+++ b/drivers/media/dvb/dvb-usb/anysee.c
@@ -1091,26 +1091,7 @@
 	.id_table   = anysee_table,
 };
 
-/* module stuff */
-static int __init anysee_module_init(void)
-{
-	int ret;
-
-	ret = usb_register(&anysee_driver);
-	if (ret)
-		err("%s: usb_register failed. Error number %d", __func__, ret);
-
-	return ret;
-}
-
-static void __exit anysee_module_exit(void)
-{
-	/* deregister this driver from the USB subsystem */
-	usb_deregister(&anysee_driver);
-}
-
-module_init(anysee_module_init);
-module_exit(anysee_module_exit);
+module_usb_driver(anysee_driver);
 
 MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
 MODULE_DESCRIPTION("Driver Anysee E30 DVB-C & DVB-T USB2.0");
diff --git a/drivers/media/dvb/dvb-usb/au6610.c b/drivers/media/dvb/dvb-usb/au6610.c
index b779949..16210c0 100644
--- a/drivers/media/dvb/dvb-usb/au6610.c
+++ b/drivers/media/dvb/dvb-usb/au6610.c
@@ -244,26 +244,7 @@
 	.id_table   = au6610_table,
 };
 
-/* module stuff */
-static int __init au6610_module_init(void)
-{
-	int ret;
-
-	ret = usb_register(&au6610_driver);
-	if (ret)
-		err("usb_register failed. Error number %d", ret);
-
-	return ret;
-}
-
-static void __exit au6610_module_exit(void)
-{
-	/* deregister this driver from the USB subsystem */
-	usb_deregister(&au6610_driver);
-}
-
-module_init(au6610_module_init);
-module_exit(au6610_module_exit);
+module_usb_driver(au6610_driver);
 
 MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
 MODULE_DESCRIPTION("Driver for Alcor Micro AU6610 DVB-T USB2.0");
diff --git a/drivers/media/dvb/dvb-usb/az6027.c b/drivers/media/dvb/dvb-usb/az6027.c
index bf67b4d..5e45ae6 100644
--- a/drivers/media/dvb/dvb-usb/az6027.c
+++ b/drivers/media/dvb/dvb-usb/az6027.c
@@ -1174,28 +1174,7 @@
 	.id_table 	= az6027_usb_table,
 };
 
-/* module stuff */
-static int __init az6027_usb_module_init(void)
-{
-	int result;
-
-	result = usb_register(&az6027_usb_driver);
-	if (result) {
-		err("usb_register failed. (%d)", result);
-		return result;
-	}
-
-	return 0;
-}
-
-static void __exit az6027_usb_module_exit(void)
-{
-	/* deregister this driver from the USB subsystem */
-	usb_deregister(&az6027_usb_driver);
-}
-
-module_init(az6027_usb_module_init);
-module_exit(az6027_usb_module_exit);
+module_usb_driver(az6027_usb_driver);
 
 MODULE_AUTHOR("Adams Xu <Adams.xu@azwave.com.cn>");
 MODULE_DESCRIPTION("Driver for AZUREWAVE DVB-S/S2 USB2.0 (AZ6027)");
diff --git a/drivers/media/dvb/dvb-usb/ce6230.c b/drivers/media/dvb/dvb-usb/ce6230.c
index 57afb5a..fa63725 100644
--- a/drivers/media/dvb/dvb-usb/ce6230.c
+++ b/drivers/media/dvb/dvb-usb/ce6230.c
@@ -317,27 +317,7 @@
 	.id_table   = ce6230_table,
 };
 
-/* module stuff */
-static int __init ce6230_module_init(void)
-{
-	int ret;
-	deb_info("%s:\n", __func__);
-	ret = usb_register(&ce6230_driver);
-	if (ret)
-		err("usb_register failed with error:%d", ret);
-
-	return ret;
-}
-
-static void __exit ce6230_module_exit(void)
-{
-	deb_info("%s:\n", __func__);
-	/* deregister this driver from the USB subsystem */
-	usb_deregister(&ce6230_driver);
-}
-
-module_init(ce6230_module_init);
-module_exit(ce6230_module_exit);
+module_usb_driver(ce6230_driver);
 
 MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
 MODULE_DESCRIPTION("Driver for Intel CE6230 DVB-T USB2.0");
diff --git a/drivers/media/dvb/dvb-usb/cinergyT2-core.c b/drivers/media/dvb/dvb-usb/cinergyT2-core.c
index f9d9050..0a98548 100644
--- a/drivers/media/dvb/dvb-usb/cinergyT2-core.c
+++ b/drivers/media/dvb/dvb-usb/cinergyT2-core.c
@@ -247,25 +247,7 @@
 	.id_table	= cinergyt2_usb_table
 };
 
-static int __init cinergyt2_usb_init(void)
-{
-	int err;
-
-	err = usb_register(&cinergyt2_driver);
-	if (err) {
-		err("usb_register() failed! (err %i)\n", err);
-		return err;
-	}
-	return 0;
-}
-
-static void __exit cinergyt2_usb_exit(void)
-{
-	usb_deregister(&cinergyt2_driver);
-}
-
-module_init(cinergyt2_usb_init);
-module_exit(cinergyt2_usb_exit);
+module_usb_driver(cinergyt2_driver);
 
 MODULE_DESCRIPTION("Terratec Cinergy T2 DVB-T driver");
 MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
index 9f2a02c..949ea1b 100644
--- a/drivers/media/dvb/dvb-usb/cxusb.c
+++ b/drivers/media/dvb/dvb-usb/cxusb.c
@@ -2034,26 +2034,7 @@
 	.id_table	= cxusb_table,
 };
 
-/* module stuff */
-static int __init cxusb_module_init(void)
-{
-	int result;
-	if ((result = usb_register(&cxusb_driver))) {
-		err("usb_register failed. Error number %d",result);
-		return result;
-	}
-
-	return 0;
-}
-
-static void __exit cxusb_module_exit(void)
-{
-	/* deregister this driver from the USB subsystem */
-	usb_deregister(&cxusb_driver);
-}
-
-module_init (cxusb_module_init);
-module_exit (cxusb_module_exit);
+module_usb_driver(cxusb_driver);
 
 MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
 MODULE_AUTHOR("Michael Krufky <mkrufky@linuxtv.org>");
diff --git a/drivers/media/dvb/dvb-usb/dib0700_core.c b/drivers/media/dvb/dvb-usb/dib0700_core.c
index 156cbfc..2069994 100644
--- a/drivers/media/dvb/dvb-usb/dib0700_core.c
+++ b/drivers/media/dvb/dvb-usb/dib0700_core.c
@@ -832,27 +832,7 @@
 	.id_table   = dib0700_usb_id_table,
 };
 
-/* module stuff */
-static int __init dib0700_module_init(void)
-{
-	int result;
-	info("loaded with support for %d different device-types", dib0700_device_count);
-	if ((result = usb_register(&dib0700_driver))) {
-		err("usb_register failed. Error number %d",result);
-		return result;
-	}
-
-	return 0;
-}
-
-static void __exit dib0700_module_exit(void)
-{
-	/* deregister this driver from the USB subsystem */
-	usb_deregister(&dib0700_driver);
-}
-
-module_init (dib0700_module_init);
-module_exit (dib0700_module_exit);
+module_usb_driver(dib0700_driver);
 
 MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>");
 MODULE_DESCRIPTION("Driver for devices based on DiBcom DiB0700 - USB bridge");
diff --git a/drivers/media/dvb/dvb-usb/dibusb-mb.c b/drivers/media/dvb/dvb-usb/dibusb-mb.c
index 7270791..a4ac37e 100644
--- a/drivers/media/dvb/dvb-usb/dibusb-mb.c
+++ b/drivers/media/dvb/dvb-usb/dibusb-mb.c
@@ -463,26 +463,7 @@
 	.id_table	= dibusb_dib3000mb_table,
 };
 
-/* module stuff */
-static int __init dibusb_module_init(void)
-{
-	int result;
-	if ((result = usb_register(&dibusb_driver))) {
-		err("usb_register failed. Error number %d",result);
-		return result;
-	}
-
-	return 0;
-}
-
-static void __exit dibusb_module_exit(void)
-{
-	/* deregister this driver from the USB subsystem */
-	usb_deregister(&dibusb_driver);
-}
-
-module_init (dibusb_module_init);
-module_exit (dibusb_module_exit);
+module_usb_driver(dibusb_driver);
 
 MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
 MODULE_DESCRIPTION("Driver for DiBcom USB DVB-T devices (DiB3000M-B based)");
diff --git a/drivers/media/dvb/dvb-usb/dibusb-mc.c b/drivers/media/dvb/dvb-usb/dibusb-mc.c
index 9c165e2..9d1a59d0 100644
--- a/drivers/media/dvb/dvb-usb/dibusb-mc.c
+++ b/drivers/media/dvb/dvb-usb/dibusb-mc.c
@@ -141,26 +141,7 @@
 	.id_table	= dibusb_dib3000mc_table,
 };
 
-/* module stuff */
-static int __init dibusb_mc_module_init(void)
-{
-	int result;
-	if ((result = usb_register(&dibusb_mc_driver))) {
-		err("usb_register failed. Error number %d",result);
-		return result;
-	}
-
-	return 0;
-}
-
-static void __exit dibusb_mc_module_exit(void)
-{
-	/* deregister this driver from the USB subsystem */
-	usb_deregister(&dibusb_mc_driver);
-}
-
-module_init (dibusb_mc_module_init);
-module_exit (dibusb_mc_module_exit);
+module_usb_driver(dibusb_mc_driver);
 
 MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
 MODULE_DESCRIPTION("Driver for DiBcom USB2.0 DVB-T (DiB3000M-C/P based) devices");
diff --git a/drivers/media/dvb/dvb-usb/digitv.c b/drivers/media/dvb/dvb-usb/digitv.c
index f718411..0a9a798 100644
--- a/drivers/media/dvb/dvb-usb/digitv.c
+++ b/drivers/media/dvb/dvb-usb/digitv.c
@@ -346,26 +346,7 @@
 	.id_table	= digitv_table,
 };
 
-/* module stuff */
-static int __init digitv_module_init(void)
-{
-	int result;
-	if ((result = usb_register(&digitv_driver))) {
-		err("usb_register failed. Error number %d",result);
-		return result;
-	}
-
-	return 0;
-}
-
-static void __exit digitv_module_exit(void)
-{
-	/* deregister this driver from the USB subsystem */
-	usb_deregister(&digitv_driver);
-}
-
-module_init (digitv_module_init);
-module_exit (digitv_module_exit);
+module_usb_driver(digitv_driver);
 
 MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
 MODULE_DESCRIPTION("Driver for Nebula Electronics uDigiTV DVB-T USB2.0");
diff --git a/drivers/media/dvb/dvb-usb/dtt200u.c b/drivers/media/dvb/dvb-usb/dtt200u.c
index 106dfd5..66f205c 100644
--- a/drivers/media/dvb/dvb-usb/dtt200u.c
+++ b/drivers/media/dvb/dvb-usb/dtt200u.c
@@ -360,26 +360,7 @@
 	.id_table	= dtt200u_usb_table,
 };
 
-/* module stuff */
-static int __init dtt200u_usb_module_init(void)
-{
-	int result;
-	if ((result = usb_register(&dtt200u_usb_driver))) {
-		err("usb_register failed. (%d)",result);
-		return result;
-	}
-
-	return 0;
-}
-
-static void __exit dtt200u_usb_module_exit(void)
-{
-	/* deregister this driver from the USB subsystem */
-	usb_deregister(&dtt200u_usb_driver);
-}
-
-module_init(dtt200u_usb_module_init);
-module_exit(dtt200u_usb_module_exit);
+module_usb_driver(dtt200u_usb_driver);
 
 MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
 MODULE_DESCRIPTION("Driver for the WideView/Yakumo/Hama/Typhoon/Club3D/Miglia DVB-T USB2.0 devices");
diff --git a/drivers/media/dvb/dvb-usb/dtv5100.c b/drivers/media/dvb/dvb-usb/dtv5100.c
index 7373132..3d11df4 100644
--- a/drivers/media/dvb/dvb-usb/dtv5100.c
+++ b/drivers/media/dvb/dvb-usb/dtv5100.c
@@ -217,26 +217,7 @@
 	.id_table	= dtv5100_table,
 };
 
-/* module stuff */
-static int __init dtv5100_module_init(void)
-{
-	int ret;
-
-	ret = usb_register(&dtv5100_driver);
-	if (ret)
-		err("usb_register failed. Error number %d", ret);
-
-	return ret;
-}
-
-static void __exit dtv5100_module_exit(void)
-{
-	/* deregister this driver from the USB subsystem */
-	usb_deregister(&dtv5100_driver);
-}
-
-module_init(dtv5100_module_init);
-module_exit(dtv5100_module_exit);
+module_usb_driver(dtv5100_driver);
 
 MODULE_AUTHOR(DRIVER_AUTHOR);
 MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/media/dvb/dvb-usb/dw2102.c b/drivers/media/dvb/dvb-usb/dw2102.c
index f103ec1..ff941d2 100644
--- a/drivers/media/dvb/dvb-usb/dw2102.c
+++ b/drivers/media/dvb/dvb-usb/dw2102.c
@@ -1928,22 +1928,7 @@
 	.id_table = dw2102_table,
 };
 
-static int __init dw2102_module_init(void)
-{
-	int ret =  usb_register(&dw2102_driver);
-	if (ret)
-		err("usb_register failed. Error number %d", ret);
-
-	return ret;
-}
-
-static void __exit dw2102_module_exit(void)
-{
-	usb_deregister(&dw2102_driver);
-}
-
-module_init(dw2102_module_init);
-module_exit(dw2102_module_exit);
+module_usb_driver(dw2102_driver);
 
 MODULE_AUTHOR("Igor M. Liplianin (c) liplianin@me.by");
 MODULE_DESCRIPTION("Driver for DVBWorld DVB-S 2101, 2102, DVB-S2 2104,"
diff --git a/drivers/media/dvb/dvb-usb/ec168.c b/drivers/media/dvb/dvb-usb/ec168.c
index 78442fe..b4989ba 100644
--- a/drivers/media/dvb/dvb-usb/ec168.c
+++ b/drivers/media/dvb/dvb-usb/ec168.c
@@ -428,27 +428,7 @@
 	.id_table   = ec168_id,
 };
 
-/* module stuff */
-static int __init ec168_module_init(void)
-{
-	int ret;
-	deb_info("%s:\n", __func__);
-	ret = usb_register(&ec168_driver);
-	if (ret)
-		err("module init failed:%d", ret);
-
-	return ret;
-}
-
-static void __exit ec168_module_exit(void)
-{
-	deb_info("%s:\n", __func__);
-	/* deregister this driver from the USB subsystem */
-	usb_deregister(&ec168_driver);
-}
-
-module_init(ec168_module_init);
-module_exit(ec168_module_exit);
+module_usb_driver(ec168_driver);
 
 MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
 MODULE_DESCRIPTION("E3C EC168 DVB-T USB2.0 driver");
diff --git a/drivers/media/dvb/dvb-usb/friio.c b/drivers/media/dvb/dvb-usb/friio.c
index b092dc2..474a17e 100644
--- a/drivers/media/dvb/dvb-usb/friio.c
+++ b/drivers/media/dvb/dvb-usb/friio.c
@@ -514,28 +514,7 @@
 	.id_table	= friio_table,
 };
 
-
-/* module stuff */
-static int __init friio_module_init(void)
-{
-	int ret;
-
-	ret = usb_register(&friio_driver);
-	if (ret)
-		err("usb_register failed. Error number %d", ret);
-
-	return ret;
-}
-
-
-static void __exit friio_module_exit(void)
-{
-	/* deregister this driver from the USB subsystem */
-	usb_deregister(&friio_driver);
-}
-
-module_init(friio_module_init);
-module_exit(friio_module_exit);
+module_usb_driver(friio_driver);
 
 MODULE_AUTHOR("Akihiro Tsukada <tskd2@yahoo.co.jp>");
 MODULE_DESCRIPTION("Driver for Friio ISDB-T USB2.0 Receiver");
diff --git a/drivers/media/dvb/dvb-usb/gl861.c b/drivers/media/dvb/dvb-usb/gl861.c
index 63681df..c1f5582 100644
--- a/drivers/media/dvb/dvb-usb/gl861.c
+++ b/drivers/media/dvb/dvb-usb/gl861.c
@@ -209,26 +209,7 @@
 	.id_table	= gl861_table,
 };
 
-/* module stuff */
-static int __init gl861_module_init(void)
-{
-	int ret;
-
-	ret = usb_register(&gl861_driver);
-	if (ret)
-		err("usb_register failed. Error number %d", ret);
-
-	return ret;
-}
-
-static void __exit gl861_module_exit(void)
-{
-	/* deregister this driver from the USB subsystem */
-	usb_deregister(&gl861_driver);
-}
-
-module_init(gl861_module_init);
-module_exit(gl861_module_exit);
+module_usb_driver(gl861_driver);
 
 MODULE_AUTHOR("Carl Lundqvist <comabug@gmail.com>");
 MODULE_DESCRIPTION("Driver MSI Mega Sky 580 DVB-T USB2.0 / GL861");
diff --git a/drivers/media/dvb/dvb-usb/gp8psk.c b/drivers/media/dvb/dvb-usb/gp8psk.c
index 5f71284..5d0384d 100644
--- a/drivers/media/dvb/dvb-usb/gp8psk.c
+++ b/drivers/media/dvb/dvb-usb/gp8psk.c
@@ -320,26 +320,7 @@
 	.id_table	= gp8psk_usb_table,
 };
 
-/* module stuff */
-static int __init gp8psk_usb_module_init(void)
-{
-	int result;
-	if ((result = usb_register(&gp8psk_usb_driver))) {
-		err("usb_register failed. (%d)",result);
-		return result;
-	}
-
-	return 0;
-}
-
-static void __exit gp8psk_usb_module_exit(void)
-{
-	/* deregister this driver from the USB subsystem */
-	usb_deregister(&gp8psk_usb_driver);
-}
-
-module_init(gp8psk_usb_module_init);
-module_exit(gp8psk_usb_module_exit);
+module_usb_driver(gp8psk_usb_driver);
 
 MODULE_AUTHOR("Alan Nisota <alannisota@gamil.com>");
 MODULE_DESCRIPTION("Driver for Genpix DVB-S");
diff --git a/drivers/media/dvb/dvb-usb/it913x.c b/drivers/media/dvb/dvb-usb/it913x.c
index c462261..67094b8 100644
--- a/drivers/media/dvb/dvb-usb/it913x.c
+++ b/drivers/media/dvb/dvb-usb/it913x.c
@@ -675,26 +675,7 @@
 	.id_table	= it913x_table,
 };
 
-/* module stuff */
-static int __init it913x_module_init(void)
-{
-	int result = usb_register(&it913x_driver);
-	if (result) {
-		err("usb_register failed. Error number %d", result);
-		return result;
-	}
-
-	return 0;
-}
-
-static void __exit it913x_module_exit(void)
-{
-	/* deregister this driver from the USB subsystem */
-	usb_deregister(&it913x_driver);
-}
-
-module_init(it913x_module_init);
-module_exit(it913x_module_exit);
+module_usb_driver(it913x_driver);
 
 MODULE_AUTHOR("Malcolm Priestley <tvboxspy@gmail.com>");
 MODULE_DESCRIPTION("it913x USB 2 Driver");
diff --git a/drivers/media/dvb/dvb-usb/lmedm04.c b/drivers/media/dvb/dvb-usb/lmedm04.c
index b922824..1a876a6 100644
--- a/drivers/media/dvb/dvb-usb/lmedm04.c
+++ b/drivers/media/dvb/dvb-usb/lmedm04.c
@@ -1289,26 +1289,7 @@
 	.id_table	= lme2510_table,
 };
 
-/* module stuff */
-static int __init lme2510_module_init(void)
-{
-	int result = usb_register(&lme2510_driver);
-	if (result) {
-		err("usb_register failed. Error number %d", result);
-		return result;
-	}
-
-	return 0;
-}
-
-static void __exit lme2510_module_exit(void)
-{
-	/* deregister this driver from the USB subsystem */
-	usb_deregister(&lme2510_driver);
-}
-
-module_init(lme2510_module_init);
-module_exit(lme2510_module_exit);
+module_usb_driver(lme2510_driver);
 
 MODULE_AUTHOR("Malcolm Priestley <tvboxspy@gmail.com>");
 MODULE_DESCRIPTION("LME2510(C) DVB-S USB2.0");
diff --git a/drivers/media/dvb/dvb-usb/m920x.c b/drivers/media/dvb/dvb-usb/m920x.c
index a1e1287..288af29 100644
--- a/drivers/media/dvb/dvb-usb/m920x.c
+++ b/drivers/media/dvb/dvb-usb/m920x.c
@@ -1086,27 +1086,7 @@
 	.id_table	= m920x_table,
 };
 
-/* module stuff */
-static int __init m920x_module_init(void)
-{
-	int ret;
-
-	if ((ret = usb_register(&m920x_driver))) {
-		err("usb_register failed. Error number %d", ret);
-		return ret;
-	}
-
-	return 0;
-}
-
-static void __exit m920x_module_exit(void)
-{
-	/* deregister this driver from the USB subsystem */
-	usb_deregister(&m920x_driver);
-}
-
-module_init (m920x_module_init);
-module_exit (m920x_module_exit);
+module_usb_driver(m920x_driver);
 
 MODULE_AUTHOR("Aapo Tahkola <aet@rasterburn.org>");
 MODULE_DESCRIPTION("DVB Driver for ULI M920x");
diff --git a/drivers/media/dvb/dvb-usb/mxl111sf.c b/drivers/media/dvb/dvb-usb/mxl111sf.c
index b5c98da..825a8b2 100644
--- a/drivers/media/dvb/dvb-usb/mxl111sf.c
+++ b/drivers/media/dvb/dvb-usb/mxl111sf.c
@@ -1055,24 +1055,7 @@
 	.id_table	= mxl111sf_table,
 };
 
-static int __init mxl111sf_module_init(void)
-{
-	int result = usb_register(&mxl111sf_driver);
-	if (result) {
-		err("usb_register failed. Error number %d", result);
-		return result;
-	}
-
-	return 0;
-}
-
-static void __exit mxl111sf_module_exit(void)
-{
-	usb_deregister(&mxl111sf_driver);
-}
-
-module_init(mxl111sf_module_init);
-module_exit(mxl111sf_module_exit);
+module_usb_driver(mxl111sf_driver);
 
 MODULE_AUTHOR("Michael Krufky <mkrufky@kernellabs.com>");
 MODULE_DESCRIPTION("Driver for MaxLinear MxL111SF");
diff --git a/drivers/media/dvb/dvb-usb/nova-t-usb2.c b/drivers/media/dvb/dvb-usb/nova-t-usb2.c
index 21384da..6c55384 100644
--- a/drivers/media/dvb/dvb-usb/nova-t-usb2.c
+++ b/drivers/media/dvb/dvb-usb/nova-t-usb2.c
@@ -225,26 +225,7 @@
 	.id_table	= nova_t_table,
 };
 
-/* module stuff */
-static int __init nova_t_module_init(void)
-{
-	int result;
-	if ((result = usb_register(&nova_t_driver))) {
-		err("usb_register failed. Error number %d",result);
-		return result;
-	}
-
-	return 0;
-}
-
-static void __exit nova_t_module_exit(void)
-{
-	/* deregister this driver from the USB subsystem */
-	usb_deregister(&nova_t_driver);
-}
-
-module_init (nova_t_module_init);
-module_exit (nova_t_module_exit);
+module_usb_driver(nova_t_driver);
 
 MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
 MODULE_DESCRIPTION("Hauppauge WinTV-NOVA-T usb2");
diff --git a/drivers/media/dvb/dvb-usb/opera1.c b/drivers/media/dvb/dvb-usb/opera1.c
index 98fd9a6..c8a9504 100644
--- a/drivers/media/dvb/dvb-usb/opera1.c
+++ b/drivers/media/dvb/dvb-usb/opera1.c
@@ -574,22 +574,7 @@
 	.id_table = opera1_table,
 };
 
-static int __init opera1_module_init(void)
-{
-	int result = 0;
-	if ((result = usb_register(&opera1_driver))) {
-		err("usb_register failed. Error number %d", result);
-	}
-	return result;
-}
-
-static void __exit opera1_module_exit(void)
-{
-	usb_deregister(&opera1_driver);
-}
-
-module_init(opera1_module_init);
-module_exit(opera1_module_exit);
+module_usb_driver(opera1_driver);
 
 MODULE_AUTHOR("Mario Hlawitschka (c) dh1pa@amsat.org");
 MODULE_AUTHOR("Marco Gittler (c) g.marco@freenet.de");
diff --git a/drivers/media/dvb/dvb-usb/pctv452e.c b/drivers/media/dvb/dvb-usb/pctv452e.c
index f9aec5c..f526eb0 100644
--- a/drivers/media/dvb/dvb-usb/pctv452e.c
+++ b/drivers/media/dvb/dvb-usb/pctv452e.c
@@ -1055,22 +1055,7 @@
 	.id_table   = pctv452e_usb_table,
 };
 
-static int __init pctv452e_usb_init(void)
-{
-	int ret = usb_register(&pctv452e_usb_driver);
-	if (ret)
-		err("%s: usb_register failed! Error %d", __FILE__, ret);
-
-	return ret;
-}
-
-static void __exit pctv452e_usb_exit(void)
-{
-	usb_deregister(&pctv452e_usb_driver);
-}
-
-module_init(pctv452e_usb_init);
-module_exit(pctv452e_usb_exit);
+module_usb_driver(pctv452e_usb_driver);
 
 MODULE_AUTHOR("Dominik Kuhlen <dkuhlen@gmx.net>");
 MODULE_AUTHOR("Andre Weidemann <Andre.Weidemann@web.de>");
diff --git a/drivers/media/dvb/dvb-usb/technisat-usb2.c b/drivers/media/dvb/dvb-usb/technisat-usb2.c
index 0998fe9..acefaa8 100644
--- a/drivers/media/dvb/dvb-usb/technisat-usb2.c
+++ b/drivers/media/dvb/dvb-usb/technisat-usb2.c
@@ -781,25 +781,7 @@
 	.id_table   = technisat_usb2_id_table,
 };
 
-/* module stuff */
-static int __init technisat_usb2_module_init(void)
-{
-	int result = usb_register(&technisat_usb2_driver);
-	if (result) {
-		err("usb_register failed. Code %d", result);
-		return result;
-	}
-
-	return 0;
-}
-
-static void __exit technisat_usb2_module_exit(void)
-{
-	usb_deregister(&technisat_usb2_driver);
-}
-
-module_init(technisat_usb2_module_init);
-module_exit(technisat_usb2_module_exit);
+module_usb_driver(technisat_usb2_driver);
 
 MODULE_AUTHOR("Patrick Boettcher <pboettcher@kernellabs.com>");
 MODULE_DESCRIPTION("Driver for Technisat DVB-S/S2 USB 2.0 device");
diff --git a/drivers/media/dvb/dvb-usb/ttusb2.c b/drivers/media/dvb/dvb-usb/ttusb2.c
index ea4eab8..56acf8e 100644
--- a/drivers/media/dvb/dvb-usb/ttusb2.c
+++ b/drivers/media/dvb/dvb-usb/ttusb2.c
@@ -799,26 +799,7 @@
 	.id_table	= ttusb2_table,
 };
 
-/* module stuff */
-static int __init ttusb2_module_init(void)
-{
-	int result;
-	if ((result = usb_register(&ttusb2_driver))) {
-		err("usb_register failed. Error number %d",result);
-		return result;
-	}
-
-	return 0;
-}
-
-static void __exit ttusb2_module_exit(void)
-{
-	/* deregister this driver from the USB subsystem */
-	usb_deregister(&ttusb2_driver);
-}
-
-module_init (ttusb2_module_init);
-module_exit (ttusb2_module_exit);
+module_usb_driver(ttusb2_driver);
 
 MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
 MODULE_DESCRIPTION("Driver for Pinnacle PCTV 400e DVB-S USB2.0");
diff --git a/drivers/media/dvb/dvb-usb/umt-010.c b/drivers/media/dvb/dvb-usb/umt-010.c
index 463673a..9b04229 100644
--- a/drivers/media/dvb/dvb-usb/umt-010.c
+++ b/drivers/media/dvb/dvb-usb/umt-010.c
@@ -143,26 +143,7 @@
 	.id_table	= umt_table,
 };
 
-/* module stuff */
-static int __init umt_module_init(void)
-{
-	int result;
-	if ((result = usb_register(&umt_driver))) {
-		err("usb_register failed. Error number %d",result);
-		return result;
-	}
-
-	return 0;
-}
-
-static void __exit umt_module_exit(void)
-{
-	/* deregister this driver from the USB subsystem */
-	usb_deregister(&umt_driver);
-}
-
-module_init (umt_module_init);
-module_exit (umt_module_exit);
+module_usb_driver(umt_driver);
 
 MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
 MODULE_DESCRIPTION("Driver for HanfTek UMT 010 USB2.0 DVB-T device");
diff --git a/drivers/media/dvb/dvb-usb/vp702x.c b/drivers/media/dvb/dvb-usb/vp702x.c
index 45e31f2..07c673a 100644
--- a/drivers/media/dvb/dvb-usb/vp702x.c
+++ b/drivers/media/dvb/dvb-usb/vp702x.c
@@ -436,26 +436,7 @@
 	.id_table	= vp702x_usb_table,
 };
 
-/* module stuff */
-static int __init vp702x_usb_module_init(void)
-{
-	int result;
-	if ((result = usb_register(&vp702x_usb_driver))) {
-		err("usb_register failed. (%d)",result);
-		return result;
-	}
-
-	return 0;
-}
-
-static void __exit vp702x_usb_module_exit(void)
-{
-	/* deregister this driver from the USB subsystem */
-	usb_deregister(&vp702x_usb_driver);
-}
-
-module_init(vp702x_usb_module_init);
-module_exit(vp702x_usb_module_exit);
+module_usb_driver(vp702x_usb_driver);
 
 MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
 MODULE_DESCRIPTION("Driver for Twinhan StarBox DVB-S USB2.0 and clones");
diff --git a/drivers/media/dvb/dvb-usb/vp7045.c b/drivers/media/dvb/dvb-usb/vp7045.c
index 90873af..d750724 100644
--- a/drivers/media/dvb/dvb-usb/vp7045.c
+++ b/drivers/media/dvb/dvb-usb/vp7045.c
@@ -294,26 +294,7 @@
 	.id_table	= vp7045_usb_table,
 };
 
-/* module stuff */
-static int __init vp7045_usb_module_init(void)
-{
-	int result;
-	if ((result = usb_register(&vp7045_usb_driver))) {
-		err("usb_register failed. (%d)",result);
-		return result;
-	}
-
-	return 0;
-}
-
-static void __exit vp7045_usb_module_exit(void)
-{
-	/* deregister this driver from the USB subsystem */
-	usb_deregister(&vp7045_usb_driver);
-}
-
-module_init(vp7045_usb_module_init);
-module_exit(vp7045_usb_module_exit);
+module_usb_driver(vp7045_usb_driver);
 
 MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
 MODULE_DESCRIPTION("Driver for Twinhan MagicBox/Alpha and DNTV tinyUSB2 DVB-T USB2.0");
diff --git a/drivers/media/dvb/siano/smsusb.c b/drivers/media/dvb/siano/smsusb.c
index 51c7121..b1fe513 100644
--- a/drivers/media/dvb/siano/smsusb.c
+++ b/drivers/media/dvb/siano/smsusb.c
@@ -557,26 +557,7 @@
 	.resume			= smsusb_resume,
 };
 
-static int __init smsusb_module_init(void)
-{
-	int rc = usb_register(&smsusb_driver);
-	if (rc)
-		sms_err("usb_register failed. Error number %d", rc);
-
-	sms_debug("");
-
-	return rc;
-}
-
-static void __exit smsusb_module_exit(void)
-{
-	/* Regular USB Cleanup */
-	usb_deregister(&smsusb_driver);
-	sms_info("end");
-}
-
-module_init(smsusb_module_init);
-module_exit(smsusb_module_exit);
+module_usb_driver(smsusb_driver);
 
 MODULE_DESCRIPTION("Driver for the Siano SMS1xxx USB dongle");
 MODULE_AUTHOR("Siano Mobile Silicon, INC. (uris@siano-ms.com)");
diff --git a/drivers/media/dvb/ttusb-budget/dvb-ttusb-budget.c b/drivers/media/dvb/ttusb-budget/dvb-ttusb-budget.c
index 420bb42..e90192f 100644
--- a/drivers/media/dvb/ttusb-budget/dvb-ttusb-budget.c
+++ b/drivers/media/dvb/ttusb-budget/dvb-ttusb-budget.c
@@ -1794,26 +1794,7 @@
       .id_table		= ttusb_table,
 };
 
-static int __init ttusb_init(void)
-{
-	int err;
-
-	if ((err = usb_register(&ttusb_driver)) < 0) {
-		printk("%s: usb_register failed! Error number %d",
-		       __FILE__, err);
-		return err;
-	}
-
-	return 0;
-}
-
-static void __exit ttusb_exit(void)
-{
-	usb_deregister(&ttusb_driver);
-}
-
-module_init(ttusb_init);
-module_exit(ttusb_exit);
+module_usb_driver(ttusb_driver);
 
 MODULE_AUTHOR("Holger Waechtler <holger@convergence.de>");
 MODULE_DESCRIPTION("TTUSB DVB Driver");
diff --git a/drivers/media/dvb/ttusb-dec/ttusb_dec.c b/drivers/media/dvb/ttusb-dec/ttusb_dec.c
index f893bff..504c812 100644
--- a/drivers/media/dvb/ttusb-dec/ttusb_dec.c
+++ b/drivers/media/dvb/ttusb-dec/ttusb_dec.c
@@ -1756,26 +1756,7 @@
 	.id_table	= ttusb_dec_table,
 };
 
-static int __init ttusb_dec_init(void)
-{
-	int result;
-
-	if ((result = usb_register(&ttusb_dec_driver)) < 0) {
-		printk("%s: initialisation failed: error %d.\n", __func__,
-		       result);
-		return result;
-	}
-
-	return 0;
-}
-
-static void __exit ttusb_dec_exit(void)
-{
-	usb_deregister(&ttusb_dec_driver);
-}
-
-module_init(ttusb_dec_init);
-module_exit(ttusb_dec_exit);
+module_usb_driver(ttusb_dec_driver);
 
 MODULE_AUTHOR("Alex Woods <linux-dvb@giblets.org>");
 MODULE_DESCRIPTION(DRIVER_NAME);
diff --git a/drivers/media/radio/dsbr100.c b/drivers/media/radio/dsbr100.c
index 25e58cb..f36905b 100644
--- a/drivers/media/radio/dsbr100.c
+++ b/drivers/media/radio/dsbr100.c
@@ -624,21 +624,7 @@
 	return 0;
 }
 
-static int __init dsbr100_init(void)
-{
-	int retval = usb_register(&usb_dsbr100_driver);
-	printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
-	       DRIVER_DESC "\n");
-	return retval;
-}
-
-static void __exit dsbr100_exit(void)
-{
-	usb_deregister(&usb_dsbr100_driver);
-}
-
-module_init (dsbr100_init);
-module_exit (dsbr100_exit);
+module_usb_driver(usb_dsbr100_driver);
 
 MODULE_AUTHOR( DRIVER_AUTHOR );
 MODULE_DESCRIPTION( DRIVER_DESC );
diff --git a/drivers/media/radio/radio-mr800.c b/drivers/media/radio/radio-mr800.c
index 1742bd8..a860a72 100644
--- a/drivers/media/radio/radio-mr800.c
+++ b/drivers/media/radio/radio-mr800.c
@@ -659,25 +659,4 @@
 	return retval;
 }
 
-static int __init amradio_init(void)
-{
-	int retval = usb_register(&usb_amradio_driver);
-
-	pr_info(KBUILD_MODNAME
-		": version " DRIVER_VERSION " " DRIVER_DESC "\n");
-
-	if (retval)
-		pr_err(KBUILD_MODNAME
-			": usb_register failed. Error number %d\n", retval);
-
-	return retval;
-}
-
-static void __exit amradio_exit(void)
-{
-	usb_deregister(&usb_amradio_driver);
-}
-
-module_init(amradio_init);
-module_exit(amradio_exit);
-
+module_usb_driver(usb_amradio_driver);
diff --git a/drivers/media/radio/si470x/radio-si470x-usb.c b/drivers/media/radio/si470x/radio-si470x-usb.c
index a6ad707..b7debb6 100644
--- a/drivers/media/radio/si470x/radio-si470x-usb.c
+++ b/drivers/media/radio/si470x/radio-si470x-usb.c
@@ -861,33 +861,7 @@
 	.supports_autosuspend	= 1,
 };
 
-
-
-/**************************************************************************
- * Module Interface
- **************************************************************************/
-
-/*
- * si470x_module_init - module init
- */
-static int __init si470x_module_init(void)
-{
-	printk(KERN_INFO DRIVER_DESC ", Version " DRIVER_VERSION "\n");
-	return usb_register(&si470x_usb_driver);
-}
-
-
-/*
- * si470x_module_exit - module exit
- */
-static void __exit si470x_module_exit(void)
-{
-	usb_deregister(&si470x_usb_driver);
-}
-
-
-module_init(si470x_module_init);
-module_exit(si470x_module_exit);
+module_usb_driver(si470x_usb_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR(DRIVER_AUTHOR);
diff --git a/drivers/media/rc/ati_remote.c b/drivers/media/rc/ati_remote.c
index 303f22e..baf907b 100644
--- a/drivers/media/rc/ati_remote.c
+++ b/drivers/media/rc/ati_remote.c
@@ -189,7 +189,7 @@
 	dma_addr_t inbuf_dma;
 	dma_addr_t outbuf_dma;
 
-	unsigned char old_data[2];  /* Detect duplicate events */
+	unsigned char old_data;     /* Detect duplicate events */
 	unsigned long old_jiffies;
 	unsigned long acc_jiffies;  /* handle acceleration */
 	unsigned long first_jiffies;
@@ -221,35 +221,35 @@
 /* Translation table from hardware messages to input events. */
 static const struct {
 	short kind;
-	unsigned char data1, data2;
+	unsigned char data;
 	int type;
 	unsigned int code;
 	int value;
 }  ati_remote_tbl[] = {
 	/* Directional control pad axes */
-	{KIND_ACCEL,   0x35, 0x70, EV_REL, REL_X, -1},	 /* left */
-	{KIND_ACCEL,   0x36, 0x71, EV_REL, REL_X, 1},    /* right */
-	{KIND_ACCEL,   0x37, 0x72, EV_REL, REL_Y, -1},	 /* up */
-	{KIND_ACCEL,   0x38, 0x73, EV_REL, REL_Y, 1},    /* down */
+	{KIND_ACCEL,   0x70, EV_REL, REL_X, -1},   /* left */
+	{KIND_ACCEL,   0x71, EV_REL, REL_X, 1},    /* right */
+	{KIND_ACCEL,   0x72, EV_REL, REL_Y, -1},   /* up */
+	{KIND_ACCEL,   0x73, EV_REL, REL_Y, 1},    /* down */
 	/* Directional control pad diagonals */
-	{KIND_LU,      0x39, 0x74, EV_REL, 0, 0},        /* left up */
-	{KIND_RU,      0x3a, 0x75, EV_REL, 0, 0},        /* right up */
-	{KIND_LD,      0x3c, 0x77, EV_REL, 0, 0},        /* left down */
-	{KIND_RD,      0x3b, 0x76, EV_REL, 0, 0},        /* right down */
+	{KIND_LU,      0x74, EV_REL, 0, 0},        /* left up */
+	{KIND_RU,      0x75, EV_REL, 0, 0},        /* right up */
+	{KIND_LD,      0x77, EV_REL, 0, 0},        /* left down */
+	{KIND_RD,      0x76, EV_REL, 0, 0},        /* right down */
 
 	/* "Mouse button" buttons */
-	{KIND_LITERAL, 0x3d, 0x78, EV_KEY, BTN_LEFT, 1}, /* left btn down */
-	{KIND_LITERAL, 0x3e, 0x79, EV_KEY, BTN_LEFT, 0}, /* left btn up */
-	{KIND_LITERAL, 0x41, 0x7c, EV_KEY, BTN_RIGHT, 1},/* right btn down */
-	{KIND_LITERAL, 0x42, 0x7d, EV_KEY, BTN_RIGHT, 0},/* right btn up */
+	{KIND_LITERAL, 0x78, EV_KEY, BTN_LEFT, 1}, /* left btn down */
+	{KIND_LITERAL, 0x79, EV_KEY, BTN_LEFT, 0}, /* left btn up */
+	{KIND_LITERAL, 0x7c, EV_KEY, BTN_RIGHT, 1},/* right btn down */
+	{KIND_LITERAL, 0x7d, EV_KEY, BTN_RIGHT, 0},/* right btn up */
 
 	/* Artificial "doubleclick" events are generated by the hardware.
 	 * They are mapped to the "side" and "extra" mouse buttons here. */
-	{KIND_FILTERED, 0x3f, 0x7a, EV_KEY, BTN_SIDE, 1}, /* left dblclick */
-	{KIND_FILTERED, 0x43, 0x7e, EV_KEY, BTN_EXTRA, 1},/* right dblclick */
+	{KIND_FILTERED, 0x7a, EV_KEY, BTN_SIDE, 1}, /* left dblclick */
+	{KIND_FILTERED, 0x7e, EV_KEY, BTN_EXTRA, 1},/* right dblclick */
 
 	/* Non-mouse events are handled by rc-core */
-	{KIND_END, 0x00, 0x00, EV_MAX + 1, 0, 0}
+	{KIND_END, 0x00, EV_MAX + 1, 0, 0}
 };
 
 /* Local function prototypes */
@@ -397,25 +397,6 @@
 }
 
 /*
- *	ati_remote_event_lookup
- */
-static int ati_remote_event_lookup(int rem, unsigned char d1, unsigned char d2)
-{
-	int i;
-
-	for (i = 0; ati_remote_tbl[i].kind != KIND_END; i++) {
-		/*
-		 * Decide if the table entry matches the remote input.
-		 */
-		if (ati_remote_tbl[i].data1 == d1 &&
-		    ati_remote_tbl[i].data2 == d2)
-			return i;
-
-	}
-	return -1;
-}
-
-/*
  *	ati_remote_compute_accel
  *
  * Implements acceleration curve for directional control pad
@@ -463,7 +444,15 @@
 	int index = -1;
 	int acc;
 	int remote_num;
-	unsigned char scancode[2];
+	unsigned char scancode;
+	int i;
+
+	/*
+	 * data[0] = 0x14
+	 * data[1] = data[2] + data[3] + 0xd5 (a checksum byte)
+	 * data[2] = the key code (with toggle bit in MSB with some models)
+	 * data[3] = channel << 4 (the low 4 bits must be zero)
+	 */
 
 	/* Deal with strange looking inputs */
 	if ( (urb->actual_length != 4) || (data[0] != 0x14) ||
@@ -472,6 +461,13 @@
 		return;
 	}
 
+	if (data[1] != ((data[2] + data[3] + 0xd5) & 0xff)) {
+		dbginfo(&ati_remote->interface->dev,
+			"wrong checksum in input: %02x %02x %02x %02x\n",
+			data[0], data[1], data[2], data[3]);
+		return;
+	}
+
 	/* Mask unwanted remote channels.  */
 	/* note: remote_num is 0-based, channel 1 on remote == 0 here */
 	remote_num = (data[3] >> 4) & 0x0f;
@@ -482,31 +478,30 @@
 		return;
 	}
 
-	scancode[0] = (((data[1] - ((remote_num + 1) << 4)) & 0xf0) | (data[1] & 0x0f));
-
 	/*
-	 * Some devices (e.g. SnapStream Firefly) use 8080 as toggle code,
-	 * so we have to clear them. The first bit is a bit tricky as the
-	 * "non-toggled" state depends on remote_num, so we xor it with the
-	 * second bit which is only used for toggle.
+	 * MSB is a toggle code, though only used by some devices
+	 * (e.g. SnapStream Firefly)
 	 */
-	scancode[0] ^= (data[2] & 0x80);
+	scancode = data[2] & 0x7f;
 
-	scancode[1] = data[2] & ~0x80;
-
-	/* Look up event code index in mouse translation table. */
-	index = ati_remote_event_lookup(remote_num, scancode[0], scancode[1]);
+	/* Look up event code index in the mouse translation table. */
+	for (i = 0; ati_remote_tbl[i].kind != KIND_END; i++) {
+		if (scancode == ati_remote_tbl[i].data) {
+			index = i;
+			break;
+		}
+	}
 
 	if (index >= 0) {
 		dbginfo(&ati_remote->interface->dev,
-			"channel 0x%02x; mouse data %02x,%02x; index %d; keycode %d\n",
-			remote_num, data[1], data[2], index, ati_remote_tbl[index].code);
+			"channel 0x%02x; mouse data %02x; index %d; keycode %d\n",
+			remote_num, data[2], index, ati_remote_tbl[index].code);
 		if (!dev)
 			return; /* no mouse device */
 	} else
 		dbginfo(&ati_remote->interface->dev,
-			"channel 0x%02x; key data %02x,%02x, scancode %02x,%02x\n",
-			remote_num, data[1], data[2], scancode[0], scancode[1]);
+			"channel 0x%02x; key data %02x, scancode %02x\n",
+			remote_num, data[2], scancode);
 
 
 	if (index >= 0 && ati_remote_tbl[index].kind == KIND_LITERAL) {
@@ -523,8 +518,7 @@
 		unsigned long now = jiffies;
 
 		/* Filter duplicate events which happen "too close" together. */
-		if (ati_remote->old_data[0] == data[1] &&
-		    ati_remote->old_data[1] == data[2] &&
+		if (ati_remote->old_data == data[2] &&
 		    time_before(now, ati_remote->old_jiffies +
 				     msecs_to_jiffies(repeat_filter))) {
 			ati_remote->repeat_count++;
@@ -533,8 +527,7 @@
 			ati_remote->first_jiffies = now;
 		}
 
-		ati_remote->old_data[0] = data[1];
-		ati_remote->old_data[1] = data[2];
+		ati_remote->old_data = data[2];
 		ati_remote->old_jiffies = now;
 
 		/* Ensure we skip at least the 4 first duplicate events (generated
@@ -549,14 +542,13 @@
 
 		if (index < 0) {
 			/* Not a mouse event, hand it to rc-core. */
-			u32 rc_code = (scancode[0] << 8) | scancode[1];
 
 			/*
 			 * We don't use the rc-core repeat handling yet as
 			 * it would cause ghost repeats which would be a
 			 * regression for this driver.
 			 */
-			rc_keydown_notimeout(ati_remote->rdev, rc_code,
+			rc_keydown_notimeout(ati_remote->rdev, scancode,
 					     data[2]);
 			rc_keyup(ati_remote->rdev);
 			return;
@@ -607,8 +599,7 @@
 		input_sync(dev);
 
 		ati_remote->old_jiffies = jiffies;
-		ati_remote->old_data[0] = data[1];
-		ati_remote->old_data[1] = data[2];
+		ati_remote->old_data = data[2];
 	}
 }
 
@@ -908,38 +899,7 @@
 	kfree(ati_remote);
 }
 
-/*
- *	ati_remote_init
- */
-static int __init ati_remote_init(void)
-{
-	int result;
-
-	result = usb_register(&ati_remote_driver);
-	if (result)
-		printk(KERN_ERR KBUILD_MODNAME
-		       ": usb_register error #%d\n", result);
-	else
-		printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
-		       DRIVER_DESC "\n");
-
-	return result;
-}
-
-/*
- *	ati_remote_exit
- */
-static void __exit ati_remote_exit(void)
-{
-	usb_deregister(&ati_remote_driver);
-}
-
-/*
- *	module specification
- */
-
-module_init(ati_remote_init);
-module_exit(ati_remote_exit);
+module_usb_driver(ati_remote_driver);
 
 MODULE_AUTHOR(DRIVER_AUTHOR);
 MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/media/rc/ene_ir.c b/drivers/media/rc/ene_ir.c
index cf10ecf..860c112 100644
--- a/drivers/media/rc/ene_ir.c
+++ b/drivers/media/rc/ene_ir.c
@@ -324,7 +324,7 @@
 		return dev->extra_buf2_address + r_pointer;
 	}
 
-	dbg("attempt to read beyong ring bufer end");
+	dbg("attempt to read beyond ring buffer end");
 	return 0;
 }
 
diff --git a/drivers/media/rc/ene_ir.h b/drivers/media/rc/ene_ir.h
index fd108d9..6f978e8 100644
--- a/drivers/media/rc/ene_ir.h
+++ b/drivers/media/rc/ene_ir.h
@@ -227,7 +227,7 @@
 
 	/* TX buffer */
 	unsigned *tx_buffer;			/* input samples buffer*/
-	int tx_pos;				/* position in that bufer */
+	int tx_pos;				/* position in that buffer */
 	int tx_len;				/* current len of tx buffer */
 	int tx_done;				/* done transmitting */
 						/* one more sample pending*/
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index 6ed9646..3aeb29a 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -2458,23 +2458,4 @@
 	return rc;
 }
 
-static int __init imon_init(void)
-{
-	int rc;
-
-	rc = usb_register(&imon_driver);
-	if (rc) {
-		pr_err("usb register failed(%d)\n", rc);
-		rc = -ENODEV;
-	}
-
-	return rc;
-}
-
-static void __exit imon_exit(void)
-{
-	usb_deregister(&imon_driver);
-}
-
-module_init(imon_init);
-module_exit(imon_exit);
+module_usb_driver(imon_driver);
diff --git a/drivers/media/rc/keymaps/rc-ati-x10.c b/drivers/media/rc/keymaps/rc-ati-x10.c
index e1b8b26..81506440 100644
--- a/drivers/media/rc/keymaps/rc-ati-x10.c
+++ b/drivers/media/rc/keymaps/rc-ati-x10.c
@@ -27,55 +27,55 @@
 #include <media/rc-map.h>
 
 static struct rc_map_table ati_x10[] = {
-	{ 0xd20d, KEY_1 },
-	{ 0xd30e, KEY_2 },
-	{ 0xd40f, KEY_3 },
-	{ 0xd510, KEY_4 },
-	{ 0xd611, KEY_5 },
-	{ 0xd712, KEY_6 },
-	{ 0xd813, KEY_7 },
-	{ 0xd914, KEY_8 },
-	{ 0xda15, KEY_9 },
-	{ 0xdc17, KEY_0 },
-	{ 0xc500, KEY_A },
-	{ 0xc601, KEY_B },
-	{ 0xde19, KEY_C },
-	{ 0xe01b, KEY_D },
-	{ 0xe621, KEY_E },
-	{ 0xe823, KEY_F },
+	{ 0x0d, KEY_1 },
+	{ 0x0e, KEY_2 },
+	{ 0x0f, KEY_3 },
+	{ 0x10, KEY_4 },
+	{ 0x11, KEY_5 },
+	{ 0x12, KEY_6 },
+	{ 0x13, KEY_7 },
+	{ 0x14, KEY_8 },
+	{ 0x15, KEY_9 },
+	{ 0x17, KEY_0 },
+	{ 0x00, KEY_A },
+	{ 0x01, KEY_B },
+	{ 0x19, KEY_C },
+	{ 0x1b, KEY_D },
+	{ 0x21, KEY_E },
+	{ 0x23, KEY_F },
 
-	{ 0xdd18, KEY_KPENTER },    /* "check" */
-	{ 0xdb16, KEY_MENU },       /* "menu" */
-	{ 0xc702, KEY_POWER },      /* Power */
-	{ 0xc803, KEY_TV },         /* TV */
-	{ 0xc904, KEY_DVD },        /* DVD */
-	{ 0xca05, KEY_WWW },        /* WEB */
-	{ 0xcb06, KEY_BOOKMARKS },  /* "book" */
-	{ 0xcc07, KEY_EDIT },       /* "hand" */
-	{ 0xe11c, KEY_COFFEE },     /* "timer" */
-	{ 0xe520, KEY_FRONT },      /* "max" */
-	{ 0xe21d, KEY_LEFT },       /* left */
-	{ 0xe41f, KEY_RIGHT },      /* right */
-	{ 0xe722, KEY_DOWN },       /* down */
-	{ 0xdf1a, KEY_UP },         /* up */
-	{ 0xe31e, KEY_OK },         /* "OK" */
-	{ 0xce09, KEY_VOLUMEDOWN }, /* VOL + */
-	{ 0xcd08, KEY_VOLUMEUP },   /* VOL - */
-	{ 0xcf0a, KEY_MUTE },       /* MUTE  */
-	{ 0xd00b, KEY_CHANNELUP },  /* CH + */
-	{ 0xd10c, KEY_CHANNELDOWN },/* CH - */
-	{ 0xec27, KEY_RECORD },     /* ( o) red */
-	{ 0xea25, KEY_PLAY },       /* ( >) */
-	{ 0xe924, KEY_REWIND },     /* (<<) */
-	{ 0xeb26, KEY_FORWARD },    /* (>>) */
-	{ 0xed28, KEY_STOP },       /* ([]) */
-	{ 0xee29, KEY_PAUSE },      /* ('') */
-	{ 0xf02b, KEY_PREVIOUS },   /* (<-) */
-	{ 0xef2a, KEY_NEXT },       /* (>+) */
-	{ 0xf22d, KEY_INFO },       /* PLAYING */
-	{ 0xf32e, KEY_HOME },       /* TOP */
-	{ 0xf42f, KEY_END },        /* END */
-	{ 0xf530, KEY_SELECT },     /* SELECT */
+	{ 0x18, KEY_KPENTER },    /* "check" */
+	{ 0x16, KEY_MENU },       /* "menu" */
+	{ 0x02, KEY_POWER },      /* Power */
+	{ 0x03, KEY_TV },         /* TV */
+	{ 0x04, KEY_DVD },        /* DVD */
+	{ 0x05, KEY_WWW },        /* WEB */
+	{ 0x06, KEY_BOOKMARKS },  /* "book" */
+	{ 0x07, KEY_EDIT },       /* "hand" */
+	{ 0x1c, KEY_COFFEE },     /* "timer" */
+	{ 0x20, KEY_FRONT },      /* "max" */
+	{ 0x1d, KEY_LEFT },       /* left */
+	{ 0x1f, KEY_RIGHT },      /* right */
+	{ 0x22, KEY_DOWN },       /* down */
+	{ 0x1a, KEY_UP },         /* up */
+	{ 0x1e, KEY_OK },         /* "OK" */
+	{ 0x09, KEY_VOLUMEDOWN }, /* VOL + */
+	{ 0x08, KEY_VOLUMEUP },   /* VOL - */
+	{ 0x0a, KEY_MUTE },       /* MUTE  */
+	{ 0x0b, KEY_CHANNELUP },  /* CH + */
+	{ 0x0c, KEY_CHANNELDOWN },/* CH - */
+	{ 0x27, KEY_RECORD },     /* ( o) red */
+	{ 0x25, KEY_PLAY },       /* ( >) */
+	{ 0x24, KEY_REWIND },     /* (<<) */
+	{ 0x26, KEY_FORWARD },    /* (>>) */
+	{ 0x28, KEY_STOP },       /* ([]) */
+	{ 0x29, KEY_PAUSE },      /* ('') */
+	{ 0x2b, KEY_PREVIOUS },   /* (<-) */
+	{ 0x2a, KEY_NEXT },       /* (>+) */
+	{ 0x2d, KEY_INFO },       /* PLAYING */
+	{ 0x2e, KEY_HOME },       /* TOP */
+	{ 0x2f, KEY_END },        /* END */
+	{ 0x30, KEY_SELECT },     /* SELECT */
 };
 
 static struct rc_map_list ati_x10_map = {
diff --git a/drivers/media/rc/keymaps/rc-medion-x10.c b/drivers/media/rc/keymaps/rc-medion-x10.c
index 09e2cc0..479cdb8 100644
--- a/drivers/media/rc/keymaps/rc-medion-x10.c
+++ b/drivers/media/rc/keymaps/rc-medion-x10.c
@@ -25,70 +25,70 @@
 #include <media/rc-map.h>
 
 static struct rc_map_table medion_x10[] = {
-	{ 0xf12c, KEY_TV },    /* TV */
-	{ 0xf22d, KEY_VCR },   /* VCR */
-	{ 0xc904, KEY_DVD },   /* DVD */
-	{ 0xcb06, KEY_AUDIO }, /* MUSIC */
+	{ 0x2c, KEY_TV },    /* TV */
+	{ 0x2d, KEY_VCR },   /* VCR */
+	{ 0x04, KEY_DVD },   /* DVD */
+	{ 0x06, KEY_AUDIO }, /* MUSIC */
 
-	{ 0xf32e, KEY_RADIO },     /* RADIO */
-	{ 0xca05, KEY_DIRECTORY }, /* PHOTO */
-	{ 0xf42f, KEY_INFO },      /* TV-PREVIEW */
-	{ 0xf530, KEY_LIST },      /* CHANNEL-LST */
+	{ 0x2e, KEY_RADIO },     /* RADIO */
+	{ 0x05, KEY_DIRECTORY }, /* PHOTO */
+	{ 0x2f, KEY_INFO },      /* TV-PREVIEW */
+	{ 0x30, KEY_LIST },      /* CHANNEL-LST */
 
-	{ 0xe01b, KEY_SETUP }, /* SETUP */
-	{ 0xf631, KEY_VIDEO }, /* VIDEO DESKTOP */
+	{ 0x1b, KEY_SETUP }, /* SETUP */
+	{ 0x31, KEY_VIDEO }, /* VIDEO DESKTOP */
 
-	{ 0xcd08, KEY_VOLUMEDOWN },  /* VOL - */
-	{ 0xce09, KEY_VOLUMEUP },    /* VOL + */
-	{ 0xd00b, KEY_CHANNELUP },   /* CHAN + */
-	{ 0xd10c, KEY_CHANNELDOWN }, /* CHAN - */
-	{ 0xc500, KEY_MUTE },        /* MUTE */
+	{ 0x08, KEY_VOLUMEDOWN },  /* VOL - */
+	{ 0x09, KEY_VOLUMEUP },    /* VOL + */
+	{ 0x0b, KEY_CHANNELUP },   /* CHAN + */
+	{ 0x0c, KEY_CHANNELDOWN }, /* CHAN - */
+	{ 0x00, KEY_MUTE },        /* MUTE */
 
-	{ 0xf732, KEY_RED }, /* red */
-	{ 0xf833, KEY_GREEN }, /* green */
-	{ 0xf934, KEY_YELLOW }, /* yellow */
-	{ 0xfa35, KEY_BLUE }, /* blue */
-	{ 0xdb16, KEY_TEXT }, /* TXT */
+	{ 0x32, KEY_RED }, /* red */
+	{ 0x33, KEY_GREEN }, /* green */
+	{ 0x34, KEY_YELLOW }, /* yellow */
+	{ 0x35, KEY_BLUE }, /* blue */
+	{ 0x16, KEY_TEXT }, /* TXT */
 
-	{ 0xd20d, KEY_1 },
-	{ 0xd30e, KEY_2 },
-	{ 0xd40f, KEY_3 },
-	{ 0xd510, KEY_4 },
-	{ 0xd611, KEY_5 },
-	{ 0xd712, KEY_6 },
-	{ 0xd813, KEY_7 },
-	{ 0xd914, KEY_8 },
-	{ 0xda15, KEY_9 },
-	{ 0xdc17, KEY_0 },
-	{ 0xe11c, KEY_SEARCH }, /* TV/RAD, CH SRC */
-	{ 0xe520, KEY_DELETE }, /* DELETE */
+	{ 0x0d, KEY_1 },
+	{ 0x0e, KEY_2 },
+	{ 0x0f, KEY_3 },
+	{ 0x10, KEY_4 },
+	{ 0x11, KEY_5 },
+	{ 0x12, KEY_6 },
+	{ 0x13, KEY_7 },
+	{ 0x14, KEY_8 },
+	{ 0x15, KEY_9 },
+	{ 0x17, KEY_0 },
+	{ 0x1c, KEY_SEARCH }, /* TV/RAD, CH SRC */
+	{ 0x20, KEY_DELETE }, /* DELETE */
 
-	{ 0xfb36, KEY_KEYBOARD }, /* RENAME */
-	{ 0xdd18, KEY_SCREEN },   /* SNAPSHOT */
+	{ 0x36, KEY_KEYBOARD }, /* RENAME */
+	{ 0x18, KEY_SCREEN },   /* SNAPSHOT */
 
-	{ 0xdf1a, KEY_UP },    /* up */
-	{ 0xe722, KEY_DOWN },  /* down */
-	{ 0xe21d, KEY_LEFT },  /* left */
-	{ 0xe41f, KEY_RIGHT }, /* right */
-	{ 0xe31e, KEY_OK },    /* OK */
+	{ 0x1a, KEY_UP },    /* up */
+	{ 0x22, KEY_DOWN },  /* down */
+	{ 0x1d, KEY_LEFT },  /* left */
+	{ 0x1f, KEY_RIGHT }, /* right */
+	{ 0x1e, KEY_OK },    /* OK */
 
-	{ 0xfc37, KEY_SELECT }, /* ACQUIRE IMAGE */
-	{ 0xfd38, KEY_EDIT },   /* EDIT IMAGE */
+	{ 0x37, KEY_SELECT }, /* ACQUIRE IMAGE */
+	{ 0x38, KEY_EDIT },   /* EDIT IMAGE */
 
-	{ 0xe924, KEY_REWIND },   /* rewind  (<<) */
-	{ 0xea25, KEY_PLAY },     /* play    ( >) */
-	{ 0xeb26, KEY_FORWARD },  /* forward (>>) */
-	{ 0xec27, KEY_RECORD },   /* record  ( o) */
-	{ 0xed28, KEY_STOP },     /* stop    ([]) */
-	{ 0xee29, KEY_PAUSE },    /* pause   ('') */
+	{ 0x24, KEY_REWIND },   /* rewind  (<<) */
+	{ 0x25, KEY_PLAY },     /* play    ( >) */
+	{ 0x26, KEY_FORWARD },  /* forward (>>) */
+	{ 0x27, KEY_RECORD },   /* record  ( o) */
+	{ 0x28, KEY_STOP },     /* stop    ([]) */
+	{ 0x29, KEY_PAUSE },    /* pause   ('') */
 
-	{ 0xe621, KEY_PREVIOUS },        /* prev */
-	{ 0xfe39, KEY_SWITCHVIDEOMODE }, /* F SCR */
-	{ 0xe823, KEY_NEXT },            /* next */
-	{ 0xde19, KEY_MENU },            /* MENU */
-	{ 0xff3a, KEY_LANGUAGE },        /* AUDIO */
+	{ 0x21, KEY_PREVIOUS },        /* prev */
+	{ 0x39, KEY_SWITCHVIDEOMODE }, /* F SCR */
+	{ 0x23, KEY_NEXT },            /* next */
+	{ 0x19, KEY_MENU },            /* MENU */
+	{ 0x3a, KEY_LANGUAGE },        /* AUDIO */
 
-	{ 0xc702, KEY_POWER }, /* POWER */
+	{ 0x02, KEY_POWER }, /* POWER */
 };
 
 static struct rc_map_list medion_x10_map = {
diff --git a/drivers/media/rc/keymaps/rc-snapstream-firefly.c b/drivers/media/rc/keymaps/rc-snapstream-firefly.c
index ef14652..c7f33ec 100644
--- a/drivers/media/rc/keymaps/rc-snapstream-firefly.c
+++ b/drivers/media/rc/keymaps/rc-snapstream-firefly.c
@@ -22,63 +22,63 @@
 #include <media/rc-map.h>
 
 static struct rc_map_table snapstream_firefly[] = {
-	{ 0xf12c, KEY_ZOOM },       /* Maximize */
-	{ 0xc702, KEY_CLOSE },
+	{ 0x2c, KEY_ZOOM },       /* Maximize */
+	{ 0x02, KEY_CLOSE },
 
-	{ 0xd20d, KEY_1 },
-	{ 0xd30e, KEY_2 },
-	{ 0xd40f, KEY_3 },
-	{ 0xd510, KEY_4 },
-	{ 0xd611, KEY_5 },
-	{ 0xd712, KEY_6 },
-	{ 0xd813, KEY_7 },
-	{ 0xd914, KEY_8 },
-	{ 0xda15, KEY_9 },
-	{ 0xdc17, KEY_0 },
-	{ 0xdb16, KEY_BACK },
-	{ 0xdd18, KEY_KPENTER },    /* ent */
+	{ 0x0d, KEY_1 },
+	{ 0x0e, KEY_2 },
+	{ 0x0f, KEY_3 },
+	{ 0x10, KEY_4 },
+	{ 0x11, KEY_5 },
+	{ 0x12, KEY_6 },
+	{ 0x13, KEY_7 },
+	{ 0x14, KEY_8 },
+	{ 0x15, KEY_9 },
+	{ 0x17, KEY_0 },
+	{ 0x16, KEY_BACK },
+	{ 0x18, KEY_KPENTER },    /* ent */
 
-	{ 0xce09, KEY_VOLUMEUP },
-	{ 0xcd08, KEY_VOLUMEDOWN },
-	{ 0xcf0a, KEY_MUTE },
-	{ 0xd00b, KEY_CHANNELUP },
-	{ 0xd10c, KEY_CHANNELDOWN },
-	{ 0xc500, KEY_VENDOR },     /* firefly */
+	{ 0x09, KEY_VOLUMEUP },
+	{ 0x08, KEY_VOLUMEDOWN },
+	{ 0x0a, KEY_MUTE },
+	{ 0x0b, KEY_CHANNELUP },
+	{ 0x0c, KEY_CHANNELDOWN },
+	{ 0x00, KEY_VENDOR },     /* firefly */
 
-	{ 0xf32e, KEY_INFO },
-	{ 0xf42f, KEY_OPTION },
+	{ 0x2e, KEY_INFO },
+	{ 0x2f, KEY_OPTION },
 
-	{ 0xe21d, KEY_LEFT },
-	{ 0xe41f, KEY_RIGHT },
-	{ 0xe722, KEY_DOWN },
-	{ 0xdf1a, KEY_UP },
-	{ 0xe31e, KEY_OK },
+	{ 0x1d, KEY_LEFT },
+	{ 0x1f, KEY_RIGHT },
+	{ 0x22, KEY_DOWN },
+	{ 0x1a, KEY_UP },
+	{ 0x1e, KEY_OK },
 
-	{ 0xe11c, KEY_MENU },
-	{ 0xe520, KEY_EXIT },
+	{ 0x1c, KEY_MENU },
+	{ 0x20, KEY_EXIT },
 
-	{ 0xec27, KEY_RECORD },
-	{ 0xea25, KEY_PLAY },
-	{ 0xed28, KEY_STOP },
-	{ 0xe924, KEY_REWIND },
-	{ 0xeb26, KEY_FORWARD },
-	{ 0xee29, KEY_PAUSE },
-	{ 0xf02b, KEY_PREVIOUS },
-	{ 0xef2a, KEY_NEXT },
+	{ 0x27, KEY_RECORD },
+	{ 0x25, KEY_PLAY },
+	{ 0x28, KEY_STOP },
+	{ 0x24, KEY_REWIND },
+	{ 0x26, KEY_FORWARD },
+	{ 0x29, KEY_PAUSE },
+	{ 0x2b, KEY_PREVIOUS },
+	{ 0x2a, KEY_NEXT },
 
-	{ 0xcb06, KEY_AUDIO },      /* Music */
-	{ 0xca05, KEY_IMAGES },     /* Photos */
-	{ 0xc904, KEY_DVD },
-	{ 0xc803, KEY_TV },
-	{ 0xcc07, KEY_VIDEO },
+	{ 0x06, KEY_AUDIO },      /* Music */
+	{ 0x05, KEY_IMAGES },     /* Photos */
+	{ 0x04, KEY_DVD },
+	{ 0x03, KEY_TV },
+	{ 0x07, KEY_VIDEO },
 
-	{ 0xc601, KEY_HELP },
-	{ 0xf22d, KEY_MODE },       /* Mouse */
+	{ 0x01, KEY_HELP },
+	{ 0x2d, KEY_MODE },       /* Mouse */
 
-	{ 0xde19, KEY_A },
-	{ 0xe01b, KEY_B },
-	{ 0xe621, KEY_C },
-	{ 0xe823, KEY_D },
+	{ 0x19, KEY_A },
+	{ 0x1b, KEY_B },
+	{ 0x21, KEY_C },
+	{ 0x23, KEY_D },
 };
 
 static struct rc_map_list snapstream_firefly_map = {
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index 60d3c1e..20bb12d 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -1448,25 +1448,7 @@
 	.id_table =	mceusb_dev_table
 };
 
-static int __init mceusb_dev_init(void)
-{
-	int ret;
-
-	ret = usb_register(&mceusb_dev_driver);
-	if (ret < 0)
-		printk(KERN_ERR DRIVER_NAME
-		       ": usb register failed, result = %d\n", ret);
-
-	return ret;
-}
-
-static void __exit mceusb_dev_exit(void)
-{
-	usb_deregister(&mceusb_dev_driver);
-}
-
-module_init(mceusb_dev_init);
-module_exit(mceusb_dev_exit);
+module_usb_driver(mceusb_dev_driver);
 
 MODULE_DESCRIPTION(DRIVER_DESC);
 MODULE_AUTHOR(DRIVER_AUTHOR);
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 29f9000..f5db8b9 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -715,7 +715,7 @@
 }
 
 /* class for /sys/class/rc */
-static char *ir_devnode(struct device *dev, mode_t *mode)
+static char *ir_devnode(struct device *dev, umode_t *mode)
 {
 	return kasprintf(GFP_KERNEL, "rc/%s", dev_name(dev));
 }
diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c
index 61287fc..07322fb 100644
--- a/drivers/media/rc/redrat3.c
+++ b/drivers/media/rc/redrat3.c
@@ -1300,25 +1300,7 @@
 	.id_table	= redrat3_dev_table
 };
 
-static int __init redrat3_dev_init(void)
-{
-	int ret;
-
-	ret = usb_register(&redrat3_dev_driver);
-	if (ret < 0)
-		pr_err(DRIVER_NAME
-		       ": usb register failed, result = %d\n", ret);
-
-	return ret;
-}
-
-static void __exit redrat3_dev_exit(void)
-{
-	usb_deregister(&redrat3_dev_driver);
-}
-
-module_init(redrat3_dev_init);
-module_exit(redrat3_dev_exit);
+module_usb_driver(redrat3_dev_driver);
 
 MODULE_DESCRIPTION(DRIVER_DESC);
 MODULE_AUTHOR(DRIVER_AUTHOR);
diff --git a/drivers/media/rc/streamzap.c b/drivers/media/rc/streamzap.c
index e435d94..b1d29d0 100644
--- a/drivers/media/rc/streamzap.c
+++ b/drivers/media/rc/streamzap.c
@@ -523,33 +523,7 @@
 	return 0;
 }
 
-/**
- *	streamzap_init
- */
-static int __init streamzap_init(void)
-{
-	int ret;
-
-	/* register this driver with the USB subsystem */
-	ret = usb_register(&streamzap_driver);
-	if (ret < 0)
-		printk(KERN_ERR DRIVER_NAME ": usb register failed, "
-		       "result = %d\n", ret);
-
-	return ret;
-}
-
-/**
- *	streamzap_exit
- */
-static void __exit streamzap_exit(void)
-{
-	usb_deregister(&streamzap_driver);
-}
-
-
-module_init(streamzap_init);
-module_exit(streamzap_exit);
+module_usb_driver(streamzap_driver);
 
 MODULE_AUTHOR("Jarod Wilson <jarod@wilsonet.com>");
 MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/media/rc/winbond-cir.c b/drivers/media/rc/winbond-cir.c
index 13f54b5..e7f7a57 100644
--- a/drivers/media/rc/winbond-cir.c
+++ b/drivers/media/rc/winbond-cir.c
@@ -1176,6 +1176,6 @@
 module_init(wbcir_init);
 module_exit(wbcir_exit);
 
-MODULE_AUTHOR("David Härdeman <david@hardeman.nu>");
+MODULE_AUTHOR("David Härdeman <david@hardeman.nu>");
 MODULE_DESCRIPTION("Winbond SuperI/O Consumer IR Driver");
 MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/au0828/au0828-cards.c b/drivers/media/video/au0828/au0828-cards.c
index 39fc923..1c6015a 100644
--- a/drivers/media/video/au0828/au0828-cards.c
+++ b/drivers/media/video/au0828/au0828-cards.c
@@ -162,11 +162,14 @@
 	switch (tv.model) {
 	case 72000: /* WinTV-HVR950q (Retail, IR, ATSC/QAM */
 	case 72001: /* WinTV-HVR950q (Retail, IR, ATSC/QAM and analog video */
+	case 72101: /* WinTV-HVR950q (Retail, IR, ATSC/QAM and analog video */
+	case 72201: /* WinTV-HVR950q (OEM, IR, ATSC/QAM and analog video */
 	case 72211: /* WinTV-HVR950q (OEM, IR, ATSC/QAM and analog video */
 	case 72221: /* WinTV-HVR950q (OEM, IR, ATSC/QAM and analog video */
 	case 72231: /* WinTV-HVR950q (OEM, IR, ATSC/QAM and analog video */
 	case 72241: /* WinTV-HVR950q (OEM, No IR, ATSC/QAM and analog video */
 	case 72251: /* WinTV-HVR950q (Retail, IR, ATSC/QAM and analog video */
+	case 72261: /* WinTV-HVR950q (OEM, IR, ATSC/QAM and analog video */
 	case 72301: /* WinTV-HVR850 (Retail, IR, ATSC and analog video */
 	case 72500: /* WinTV-HVR950q (OEM, No IR, ATSC/QAM */
 		break;
@@ -324,6 +327,10 @@
 		.driver_info = AU0828_BOARD_HAUPPAUGE_HVR950Q_MXL },
 	{ USB_DEVICE(0x2040, 0x8200),
 		.driver_info = AU0828_BOARD_HAUPPAUGE_WOODBURY },
+	{ USB_DEVICE(0x2040, 0x7260),
+		.driver_info = AU0828_BOARD_HAUPPAUGE_HVR950Q },
+	{ USB_DEVICE(0x2040, 0x7213),
+		.driver_info = AU0828_BOARD_HAUPPAUGE_HVR950Q },
 	{ },
 };
 
diff --git a/drivers/media/video/cx231xx/cx231xx-cards.c b/drivers/media/video/cx231xx/cx231xx-cards.c
index 53dae2a..60b021e 100644
--- a/drivers/media/video/cx231xx/cx231xx-cards.c
+++ b/drivers/media/video/cx231xx/cx231xx-cards.c
@@ -1385,26 +1385,4 @@
 	.id_table = cx231xx_id_table,
 };
 
-static int __init cx231xx_module_init(void)
-{
-	int result;
-
-	printk(KERN_INFO DRIVER_NAME " v4l2 driver loaded.\n");
-
-	/* register this driver with the USB subsystem */
-	result = usb_register(&cx231xx_usb_driver);
-	if (result)
-		cx231xx_err(DRIVER_NAME
-			    " usb_register failed. Error number %d.\n", result);
-
-	return result;
-}
-
-static void __exit cx231xx_module_exit(void)
-{
-	/* deregister this driver with the USB subsystem */
-	usb_deregister(&cx231xx_usb_driver);
-}
-
-module_init(cx231xx_module_init);
-module_exit(cx231xx_module_exit);
+module_usb_driver(cx231xx_usb_driver);
diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c
index 9b747c2..93807dc 100644
--- a/drivers/media/video/em28xx/em28xx-cards.c
+++ b/drivers/media/video/em28xx/em28xx-cards.c
@@ -3325,26 +3325,4 @@
 	.id_table = em28xx_id_table,
 };
 
-static int __init em28xx_module_init(void)
-{
-	int result;
-
-	/* register this driver with the USB subsystem */
-	result = usb_register(&em28xx_usb_driver);
-	if (result)
-		em28xx_err(DRIVER_NAME
-			   " usb_register failed. Error number %d.\n", result);
-
-	printk(KERN_INFO DRIVER_NAME " driver loaded\n");
-
-	return result;
-}
-
-static void __exit em28xx_module_exit(void)
-{
-	/* deregister this driver with the USB subsystem */
-	usb_deregister(&em28xx_usb_driver);
-}
-
-module_init(em28xx_module_init);
-module_exit(em28xx_module_exit);
+module_usb_driver(em28xx_usb_driver);
diff --git a/drivers/media/video/et61x251/et61x251_core.c b/drivers/media/video/et61x251/et61x251_core.c
index d3777c8..40f214a 100644
--- a/drivers/media/video/et61x251/et61x251_core.c
+++ b/drivers/media/video/et61x251/et61x251_core.c
@@ -2680,27 +2680,4 @@
 	.disconnect = et61x251_usb_disconnect,
 };
 
-/*****************************************************************************/
-
-static int __init et61x251_module_init(void)
-{
-	int err = 0;
-
-	KDBG(2, ET61X251_MODULE_NAME " v" ET61X251_MODULE_VERSION);
-	KDBG(3, ET61X251_MODULE_AUTHOR);
-
-	if ((err = usb_register(&et61x251_usb_driver)))
-		KDBG(1, "usb_register() failed");
-
-	return err;
-}
-
-
-static void __exit et61x251_module_exit(void)
-{
-	usb_deregister(&et61x251_usb_driver);
-}
-
-
-module_init(et61x251_module_init);
-module_exit(et61x251_module_exit);
+module_usb_driver(et61x251_usb_driver);
diff --git a/drivers/media/video/gspca/benq.c b/drivers/media/video/gspca/benq.c
index 6ae2616..636627b 100644
--- a/drivers/media/video/gspca/benq.c
+++ b/drivers/media/video/gspca/benq.c
@@ -288,15 +288,4 @@
 #endif
 };
 
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
-	return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
-	usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/conex.c b/drivers/media/video/gspca/conex.c
index 4c56dbe..ea17b5d 100644
--- a/drivers/media/video/gspca/conex.c
+++ b/drivers/media/video/gspca/conex.c
@@ -1067,15 +1067,4 @@
 #endif
 };
 
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
-	return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
-	usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/cpia1.c b/drivers/media/video/gspca/cpia1.c
index f9b86b2..8f33bbd 100644
--- a/drivers/media/video/gspca/cpia1.c
+++ b/drivers/media/video/gspca/cpia1.c
@@ -2132,15 +2132,4 @@
 #endif
 };
 
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
-	return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
-	usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/etoms.c b/drivers/media/video/gspca/etoms.c
index 0357d6d..81a4adb 100644
--- a/drivers/media/video/gspca/etoms.c
+++ b/drivers/media/video/gspca/etoms.c
@@ -895,16 +895,4 @@
 #endif
 };
 
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
-	return usb_register(&sd_driver);
-}
-
-static void __exit sd_mod_exit(void)
-{
-	usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/finepix.c b/drivers/media/video/gspca/finepix.c
index ea48200..0107513 100644
--- a/drivers/media/video/gspca/finepix.c
+++ b/drivers/media/video/gspca/finepix.c
@@ -290,16 +290,4 @@
 #endif
 };
 
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
-	return usb_register(&sd_driver);
-}
-
-static void __exit sd_mod_exit(void)
-{
-	usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/gl860/gl860.c b/drivers/media/video/gspca/gl860/gl860.c
index 2ced3b7..a8f54c2 100644
--- a/drivers/media/video/gspca/gl860/gl860.c
+++ b/drivers/media/video/gspca/gl860/gl860.c
@@ -524,22 +524,7 @@
 
 /*====================== Init and Exit module functions ====================*/
 
-static int __init sd_mod_init(void)
-{
-	PDEBUG(D_PROBE, "driver startup - version %s", DRIVER_VERSION);
-
-	if (usb_register(&sd_driver) < 0)
-		return -1;
-	return 0;
-}
-
-static void __exit sd_mod_exit(void)
-{
-	usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
 
 /*==========================================================================*/
 
diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c
index 881e04c..2ca10df 100644
--- a/drivers/media/video/gspca/gspca.c
+++ b/drivers/media/video/gspca/gspca.c
@@ -838,13 +838,13 @@
 	gspca_dev->usb_err = 0;
 
 	/* do the specific subdriver stuff before endpoint selection */
-	gspca_dev->alt = 0;
+	intf = usb_ifnum_to_if(gspca_dev->dev, gspca_dev->iface);
+	gspca_dev->alt = gspca_dev->cam.bulk ? intf->num_altsetting : 0;
 	if (gspca_dev->sd_desc->isoc_init) {
 		ret = gspca_dev->sd_desc->isoc_init(gspca_dev);
 		if (ret < 0)
 			goto unlock;
 	}
-	intf = usb_ifnum_to_if(gspca_dev->dev, gspca_dev->iface);
 	xfer = gspca_dev->cam.bulk ? USB_ENDPOINT_XFER_BULK
 				   : USB_ENDPOINT_XFER_ISOC;
 
@@ -957,7 +957,7 @@
 				ret = -EIO;
 				goto out;
 			}
-			alt = ep_tb[--alt_idx].alt;
+			gspca_dev->alt = ep_tb[--alt_idx].alt;
 		}
 	}
 out:
diff --git a/drivers/media/video/gspca/jeilinj.c b/drivers/media/video/gspca/jeilinj.c
index 8e3dabe..5ab3f7e 100644
--- a/drivers/media/video/gspca/jeilinj.c
+++ b/drivers/media/video/gspca/jeilinj.c
@@ -582,16 +582,4 @@
 #endif
 };
 
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
-	return usb_register(&sd_driver);
-}
-
-static void __exit sd_mod_exit(void)
-{
-	usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/kinect.c b/drivers/media/video/gspca/kinect.c
index 4fe51fd..e8e8f2f 100644
--- a/drivers/media/video/gspca/kinect.c
+++ b/drivers/media/video/gspca/kinect.c
@@ -413,16 +413,4 @@
 #endif
 };
 
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
-	return usb_register(&sd_driver);
-}
-
-static void __exit sd_mod_exit(void)
-{
-	usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/konica.c b/drivers/media/video/gspca/konica.c
index f3f7fe0..b1da7f4 100644
--- a/drivers/media/video/gspca/konica.c
+++ b/drivers/media/video/gspca/konica.c
@@ -634,15 +634,4 @@
 #endif
 };
 
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
-	return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
-	usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/m5602/m5602_core.c b/drivers/media/video/gspca/m5602/m5602_core.c
index 67533e5..9fe3816b 100644
--- a/drivers/media/video/gspca/m5602/m5602_core.c
+++ b/drivers/media/video/gspca/m5602/m5602_core.c
@@ -404,19 +404,7 @@
 	.disconnect = m5602_disconnect
 };
 
-/* -- module insert / remove -- */
-static int __init mod_m5602_init(void)
-{
-	return usb_register(&sd_driver);
-}
-
-static void __exit mod_m5602_exit(void)
-{
-	usb_deregister(&sd_driver);
-}
-
-module_init(mod_m5602_init);
-module_exit(mod_m5602_exit);
+module_usb_driver(sd_driver);
 
 MODULE_AUTHOR(DRIVER_AUTHOR);
 MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/media/video/gspca/mars.c b/drivers/media/video/gspca/mars.c
index ef45fa5..5c2ea05c 100644
--- a/drivers/media/video/gspca/mars.c
+++ b/drivers/media/video/gspca/mars.c
@@ -517,15 +517,4 @@
 #endif
 };
 
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
-	return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
-	usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/mr97310a.c b/drivers/media/video/gspca/mr97310a.c
index 473e813..d73e5bd 100644
--- a/drivers/media/video/gspca/mr97310a.c
+++ b/drivers/media/video/gspca/mr97310a.c
@@ -1259,15 +1259,4 @@
 #endif
 };
 
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
-	return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
-	usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/nw80x.c b/drivers/media/video/gspca/nw80x.c
index 7681814..d4bec93 100644
--- a/drivers/media/video/gspca/nw80x.c
+++ b/drivers/media/video/gspca/nw80x.c
@@ -2118,18 +2118,7 @@
 #endif
 };
 
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
-	return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
-	usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
 
 module_param(webcam, int, 0644);
 MODULE_PARM_DESC(webcam,
diff --git a/drivers/media/video/gspca/ov519.c b/drivers/media/video/gspca/ov519.c
index 6a01b35..08b8ce1 100644
--- a/drivers/media/video/gspca/ov519.c
+++ b/drivers/media/video/gspca/ov519.c
@@ -5056,18 +5056,7 @@
 #endif
 };
 
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
-	return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
-	usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
 
 module_param(frame_rate, int, 0644);
 MODULE_PARM_DESC(frame_rate, "Frame rate (5, 10, 15, 20 or 30 fps)");
diff --git a/drivers/media/video/gspca/ov534.c b/drivers/media/video/gspca/ov534.c
index 76907ec..0475339 100644
--- a/drivers/media/video/gspca/ov534.c
+++ b/drivers/media/video/gspca/ov534.c
@@ -1533,16 +1533,4 @@
 #endif
 };
 
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
-	return usb_register(&sd_driver);
-}
-
-static void __exit sd_mod_exit(void)
-{
-	usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/ov534_9.c b/drivers/media/video/gspca/ov534_9.c
index b3b1ea6..f30060d 100644
--- a/drivers/media/video/gspca/ov534_9.c
+++ b/drivers/media/video/gspca/ov534_9.c
@@ -1432,16 +1432,4 @@
 #endif
 };
 
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
-	return usb_register(&sd_driver);
-}
-
-static void __exit sd_mod_exit(void)
-{
-	usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/pac207.c b/drivers/media/video/gspca/pac207.c
index 1600df1..ece8b1e 100644
--- a/drivers/media/video/gspca/pac207.c
+++ b/drivers/media/video/gspca/pac207.c
@@ -569,15 +569,4 @@
 #endif
 };
 
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
-	return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
-	usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/pac7302.c b/drivers/media/video/gspca/pac7302.c
index 1c44f78..2811195 100644
--- a/drivers/media/video/gspca/pac7302.c
+++ b/drivers/media/video/gspca/pac7302.c
@@ -1220,15 +1220,4 @@
 #endif
 };
 
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
-	return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
-	usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/pac7311.c b/drivers/media/video/gspca/pac7311.c
index 7509d05..1ac1111 100644
--- a/drivers/media/video/gspca/pac7311.c
+++ b/drivers/media/video/gspca/pac7311.c
@@ -868,15 +868,4 @@
 #endif
 };
 
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
-	return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
-	usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/se401.c b/drivers/media/video/gspca/se401.c
index 3b71bbc..1494e18 100644
--- a/drivers/media/video/gspca/se401.c
+++ b/drivers/media/video/gspca/se401.c
@@ -766,15 +766,4 @@
 	.post_reset = sd_post_reset,
 };
 
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
-	return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
-	usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/sn9c2028.c b/drivers/media/video/gspca/sn9c2028.c
index 48aae39..478533c 100644
--- a/drivers/media/video/gspca/sn9c2028.c
+++ b/drivers/media/video/gspca/sn9c2028.c
@@ -737,16 +737,4 @@
 #endif
 };
 
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
-	return usb_register(&sd_driver);
-}
-
-static void __exit sd_mod_exit(void)
-{
-	usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/sn9c20x.c b/drivers/media/video/gspca/sn9c20x.c
index 86e07a1..33cabc3 100644
--- a/drivers/media/video/gspca/sn9c20x.c
+++ b/drivers/media/video/gspca/sn9c20x.c
@@ -2554,15 +2554,4 @@
 #endif
 };
 
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
-	return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
-	usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/sonixb.c b/drivers/media/video/gspca/sonixb.c
index 146b459..ddb392d 100644
--- a/drivers/media/video/gspca/sonixb.c
+++ b/drivers/media/video/gspca/sonixb.c
@@ -1527,15 +1527,4 @@
 #endif
 };
 
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
-	return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
-	usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/sonixj.c b/drivers/media/video/gspca/sonixj.c
index c746bf1..afa3186 100644
--- a/drivers/media/video/gspca/sonixj.c
+++ b/drivers/media/video/gspca/sonixj.c
@@ -3104,15 +3104,4 @@
 #endif
 };
 
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
-	return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
-	usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/spca1528.c b/drivers/media/video/gspca/spca1528.c
index 6956731..070b9c3 100644
--- a/drivers/media/video/gspca/spca1528.c
+++ b/drivers/media/video/gspca/spca1528.c
@@ -590,15 +590,4 @@
 #endif
 };
 
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
-	return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
-	usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/spca500.c b/drivers/media/video/gspca/spca500.c
index bb82c94..1039847 100644
--- a/drivers/media/video/gspca/spca500.c
+++ b/drivers/media/video/gspca/spca500.c
@@ -1092,15 +1092,4 @@
 #endif
 };
 
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
-	return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
-	usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/spca501.c b/drivers/media/video/gspca/spca501.c
index 7aaac72..9c16821 100644
--- a/drivers/media/video/gspca/spca501.c
+++ b/drivers/media/video/gspca/spca501.c
@@ -2188,15 +2188,4 @@
 #endif
 };
 
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
-	return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
-	usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/spca505.c b/drivers/media/video/gspca/spca505.c
index 16722dc..1320f35 100644
--- a/drivers/media/video/gspca/spca505.c
+++ b/drivers/media/video/gspca/spca505.c
@@ -815,15 +815,4 @@
 #endif
 };
 
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
-	return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
-	usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/spca506.c b/drivers/media/video/gspca/spca506.c
index 89fec4c..54eed87 100644
--- a/drivers/media/video/gspca/spca506.c
+++ b/drivers/media/video/gspca/spca506.c
@@ -714,21 +714,4 @@
 #endif
 };
 
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
-	int ret;
-	ret = usb_register(&sd_driver);
-	if (ret < 0)
-		return ret;
-	PDEBUG(D_PROBE, "registered");
-	return 0;
-}
-static void __exit sd_mod_exit(void)
-{
-	usb_deregister(&sd_driver);
-	PDEBUG(D_PROBE, "deregistered");
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/spca508.c b/drivers/media/video/gspca/spca508.c
index a44fe3d..df4e169 100644
--- a/drivers/media/video/gspca/spca508.c
+++ b/drivers/media/video/gspca/spca508.c
@@ -1544,15 +1544,4 @@
 #endif
 };
 
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
-	return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
-	usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/spca561.c b/drivers/media/video/gspca/spca561.c
index c82fd53..259a0c7 100644
--- a/drivers/media/video/gspca/spca561.c
+++ b/drivers/media/video/gspca/spca561.c
@@ -1106,15 +1106,4 @@
 #endif
 };
 
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
-	return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
-	usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/sq905.c b/drivers/media/video/gspca/sq905.c
index df805f7..2fe3c29 100644
--- a/drivers/media/video/gspca/sq905.c
+++ b/drivers/media/video/gspca/sq905.c
@@ -432,16 +432,4 @@
 #endif
 };
 
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
-	return usb_register(&sd_driver);
-}
-
-static void __exit sd_mod_exit(void)
-{
-	usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/sq905c.c b/drivers/media/video/gspca/sq905c.c
index c2c0560..ae78363 100644
--- a/drivers/media/video/gspca/sq905c.c
+++ b/drivers/media/video/gspca/sq905c.c
@@ -339,16 +339,4 @@
 #endif
 };
 
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
-	return usb_register(&sd_driver);
-}
-
-static void __exit sd_mod_exit(void)
-{
-	usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/sq930x.c b/drivers/media/video/gspca/sq930x.c
index e4255b4..1a8ba9b 100644
--- a/drivers/media/video/gspca/sq930x.c
+++ b/drivers/media/video/gspca/sq930x.c
@@ -1197,15 +1197,4 @@
 #endif
 };
 
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
-	return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
-	usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/stk014.c b/drivers/media/video/gspca/stk014.c
index 42a7a28..4ae7cc8 100644
--- a/drivers/media/video/gspca/stk014.c
+++ b/drivers/media/video/gspca/stk014.c
@@ -519,15 +519,4 @@
 #endif
 };
 
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
-	return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
-	usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/stv0680.c b/drivers/media/video/gspca/stv0680.c
index 4dcc7e3..461ed64 100644
--- a/drivers/media/video/gspca/stv0680.c
+++ b/drivers/media/video/gspca/stv0680.c
@@ -355,15 +355,4 @@
 #endif
 };
 
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
-	return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
-	usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx.c b/drivers/media/video/gspca/stv06xx/stv06xx.c
index b1fca7d..0ab425f 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx.c
+++ b/drivers/media/video/gspca/stv06xx/stv06xx.c
@@ -612,18 +612,7 @@
 #endif
 };
 
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
-	return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
-	usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
 
 module_param(dump_bridge, bool, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(dump_bridge, "Dumps all usb bridge registers at startup");
diff --git a/drivers/media/video/gspca/sunplus.c b/drivers/media/video/gspca/sunplus.c
index c890977..c80f0c0 100644
--- a/drivers/media/video/gspca/sunplus.c
+++ b/drivers/media/video/gspca/sunplus.c
@@ -1211,15 +1211,4 @@
 #endif
 };
 
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
-	return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
-	usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/t613.c b/drivers/media/video/gspca/t613.c
index 90f0877..ea44deb 100644
--- a/drivers/media/video/gspca/t613.c
+++ b/drivers/media/video/gspca/t613.c
@@ -1438,15 +1438,4 @@
 #endif
 };
 
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
-	return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
-	usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/topro.c b/drivers/media/video/gspca/topro.c
index 29596c5..b2695b1 100644
--- a/drivers/media/video/gspca/topro.c
+++ b/drivers/media/video/gspca/topro.c
@@ -4971,18 +4971,7 @@
 #endif
 };
 
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
-	return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
-	usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
 
 module_param(force_sensor, int, 0644);
 MODULE_PARM_DESC(force_sensor,
diff --git a/drivers/media/video/gspca/tv8532.c b/drivers/media/video/gspca/tv8532.c
index 933ef2c..c8922c5 100644
--- a/drivers/media/video/gspca/tv8532.c
+++ b/drivers/media/video/gspca/tv8532.c
@@ -418,16 +418,4 @@
 #endif
 };
 
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
-	return usb_register(&sd_driver);
-}
-
-static void __exit sd_mod_exit(void)
-{
-	usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/vc032x.c b/drivers/media/video/gspca/vc032x.c
index 7ee2c82..208f6b2 100644
--- a/drivers/media/video/gspca/vc032x.c
+++ b/drivers/media/video/gspca/vc032x.c
@@ -4230,15 +4230,4 @@
 #endif
 };
 
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
-	return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
-	usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/vicam.c b/drivers/media/video/gspca/vicam.c
index 81dd4c9..d12ea15 100644
--- a/drivers/media/video/gspca/vicam.c
+++ b/drivers/media/video/gspca/vicam.c
@@ -368,16 +368,4 @@
 #endif
 };
 
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
-	return usb_register(&sd_driver);
-}
-
-static void __exit sd_mod_exit(void)
-{
-	usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/xirlink_cit.c b/drivers/media/video/gspca/xirlink_cit.c
index 3aed42a..fbb6ed2 100644
--- a/drivers/media/video/gspca/xirlink_cit.c
+++ b/drivers/media/video/gspca/xirlink_cit.c
@@ -3325,15 +3325,4 @@
 #endif
 };
 
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
-	return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
-	usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/zc3xx.c b/drivers/media/video/gspca/zc3xx.c
index 30ea1e4..0202fea 100644
--- a/drivers/media/video/gspca/zc3xx.c
+++ b/drivers/media/video/gspca/zc3xx.c
@@ -7050,18 +7050,7 @@
 #endif
 };
 
-static int __init sd_mod_init(void)
-{
-	return usb_register(&sd_driver);
-}
-
-static void __exit sd_mod_exit(void)
-{
-	usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
 
 module_param(force_sensor, int, 0644);
 MODULE_PARM_DESC(force_sensor,
diff --git a/drivers/media/video/hdpvr/hdpvr-core.c b/drivers/media/video/hdpvr/hdpvr-core.c
index 441dacf..3f1a5b1 100644
--- a/drivers/media/video/hdpvr/hdpvr-core.c
+++ b/drivers/media/video/hdpvr/hdpvr-core.c
@@ -452,26 +452,7 @@
 	.id_table =	hdpvr_table,
 };
 
-static int __init hdpvr_init(void)
-{
-	int result;
-
-	/* register this driver with the USB subsystem */
-	result = usb_register(&hdpvr_usb_driver);
-	if (result)
-		err("usb_register failed. Error number %d", result);
-
-	return result;
-}
-
-static void __exit hdpvr_exit(void)
-{
-	/* deregister this driver with the USB subsystem */
-	usb_deregister(&hdpvr_usb_driver);
-}
-
-module_init(hdpvr_init);
-module_exit(hdpvr_exit);
+module_usb_driver(hdpvr_usb_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_VERSION("0.2.1");
diff --git a/drivers/media/video/m5mols/m5mols.h b/drivers/media/video/m5mols/m5mols.h
index 89d09a8..82c8817 100644
--- a/drivers/media/video/m5mols/m5mols.h
+++ b/drivers/media/video/m5mols/m5mols.h
@@ -162,7 +162,6 @@
  * @pad: media pad
  * @ffmt: current fmt according to resolution type
  * @res_type: current resolution type
- * @code: current code
  * @irq_waitq: waitqueue for the capture
  * @work_irq: workqueue for the IRQ
  * @flags: state variable for the interrupt handler
@@ -192,7 +191,6 @@
 	struct media_pad pad;
 	struct v4l2_mbus_framefmt ffmt[M5MOLS_RESTYPE_MAX];
 	int res_type;
-	enum v4l2_mbus_pixelcode code;
 	wait_queue_head_t irq_waitq;
 	struct work_struct work_irq;
 	unsigned long flags;
diff --git a/drivers/media/video/m5mols/m5mols_core.c b/drivers/media/video/m5mols/m5mols_core.c
index 05ab370..e0f09e5 100644
--- a/drivers/media/video/m5mols/m5mols_core.c
+++ b/drivers/media/video/m5mols/m5mols_core.c
@@ -334,7 +334,7 @@
 	int ret = -EINVAL;
 	u8 reg;
 
-	if (mode < REG_PARAMETER && mode > REG_CAPTURE)
+	if (mode < REG_PARAMETER || mode > REG_CAPTURE)
 		return ret;
 
 	ret = m5mols_read_u8(sd, SYSTEM_SYSMODE, &reg);
@@ -511,9 +511,6 @@
 	struct m5mols_info *info = to_m5mols(sd);
 	struct v4l2_mbus_framefmt *format;
 
-	if (fmt->pad != 0)
-		return -EINVAL;
-
 	format = __find_format(info, fh, fmt->which, info->res_type);
 	if (!format)
 		return -EINVAL;
@@ -532,9 +529,6 @@
 	u32 resolution = 0;
 	int ret;
 
-	if (fmt->pad != 0)
-		return -EINVAL;
-
 	ret = __find_resolution(sd, format, &type, &resolution);
 	if (ret < 0)
 		return ret;
@@ -543,13 +537,14 @@
 	if (!sfmt)
 		return 0;
 
-	*sfmt		= m5mols_default_ffmt[type];
-	sfmt->width	= format->width;
-	sfmt->height	= format->height;
+
+	format->code = m5mols_default_ffmt[type].code;
+	format->colorspace = V4L2_COLORSPACE_JPEG;
+	format->field = V4L2_FIELD_NONE;
 
 	if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+		*sfmt = *format;
 		info->resolution = resolution;
-		info->code = format->code;
 		info->res_type = type;
 	}
 
@@ -626,13 +621,14 @@
 static int m5mols_s_stream(struct v4l2_subdev *sd, int enable)
 {
 	struct m5mols_info *info = to_m5mols(sd);
+	u32 code = info->ffmt[info->res_type].code;
 
 	if (enable) {
 		int ret = -EINVAL;
 
-		if (is_code(info->code, M5MOLS_RESTYPE_MONITOR))
+		if (is_code(code, M5MOLS_RESTYPE_MONITOR))
 			ret = m5mols_start_monitor(info);
-		if (is_code(info->code, M5MOLS_RESTYPE_CAPTURE))
+		if (is_code(code, M5MOLS_RESTYPE_CAPTURE))
 			ret = m5mols_start_capture(info);
 
 		return ret;
diff --git a/drivers/media/video/mt9m111.c b/drivers/media/video/mt9m111.c
index cf2c0fb..398f96f 100644
--- a/drivers/media/video/mt9m111.c
+++ b/drivers/media/video/mt9m111.c
@@ -955,6 +955,7 @@
 	mt9m111->rect.height	= MT9M111_MAX_HEIGHT;
 	mt9m111->fmt		= &mt9m111_colour_fmts[0];
 	mt9m111->lastpage	= -1;
+	mutex_init(&mt9m111->power_lock);
 
 	ret = mt9m111_video_probe(client);
 	if (ret) {
diff --git a/drivers/media/video/mt9t112.c b/drivers/media/video/mt9t112.c
index 32114a3..7b34b11 100644
--- a/drivers/media/video/mt9t112.c
+++ b/drivers/media/video/mt9t112.c
@@ -1083,8 +1083,10 @@
 	v4l2_i2c_subdev_init(&priv->subdev, client, &mt9t112_subdev_ops);
 
 	ret = mt9t112_camera_probe(client);
-	if (ret)
+	if (ret) {
 		kfree(priv);
+		return ret;
+	}
 
 	/* Cannot fail: using the default supported pixel code */
 	mt9t112_set_params(priv, &rect, V4L2_MBUS_FMT_UYVY8_2X8);
diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c
index 9c5c19f..ee0d0b3 100644
--- a/drivers/media/video/omap/omap_vout.c
+++ b/drivers/media/video/omap/omap_vout.c
@@ -38,6 +38,7 @@
 #include <linux/irq.h>
 #include <linux/videodev2.h>
 #include <linux/dma-mapping.h>
+#include <linux/slab.h>
 
 #include <media/videobuf-dma-contig.h>
 #include <media/v4l2-device.h>
@@ -2169,6 +2170,14 @@
 	vid_dev->num_displays = 0;
 	for_each_dss_dev(dssdev) {
 		omap_dss_get_device(dssdev);
+
+		if (!dssdev->driver) {
+			dev_warn(&pdev->dev, "no driver for display: %s\n",
+					dssdev->name);
+			omap_dss_put_device(dssdev);
+			continue;
+		}
+
 		vid_dev->displays[vid_dev->num_displays++] = dssdev;
 	}
 
diff --git a/drivers/media/video/omap1_camera.c b/drivers/media/video/omap1_camera.c
index e87ae2f..6a6cf38 100644
--- a/drivers/media/video/omap1_camera.c
+++ b/drivers/media/video/omap1_camera.c
@@ -24,6 +24,7 @@
 #include <linux/clk.h>
 #include <linux/dma-mapping.h>
 #include <linux/interrupt.h>
+#include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 
diff --git a/drivers/media/video/omap24xxcam-dma.c b/drivers/media/video/omap24xxcam-dma.c
index 1d54b86..3ea38a8 100644
--- a/drivers/media/video/omap24xxcam-dma.c
+++ b/drivers/media/video/omap24xxcam-dma.c
@@ -506,7 +506,7 @@
 	unsigned long flags;
 	struct sgdma_state *sg_state;
 
-	if ((sglen < 0) || ((sglen > 0) & !sglist))
+	if ((sglen < 0) || ((sglen > 0) && !sglist))
 		return -EINVAL;
 
 	spin_lock_irqsave(&sgdma->lock, flags);
diff --git a/drivers/media/video/omap3isp/ispccdc.c b/drivers/media/video/omap3isp/ispccdc.c
index b0b0fa5..54a4a3f 100644
--- a/drivers/media/video/omap3isp/ispccdc.c
+++ b/drivers/media/video/omap3isp/ispccdc.c
@@ -1408,7 +1408,7 @@
 {
 	struct isp_pipeline *pipe =
 		to_isp_pipeline(&ccdc->video_out.video.entity);
-	struct video_device *vdev = &ccdc->subdev.devnode;
+	struct video_device *vdev = ccdc->subdev.devnode;
 	struct v4l2_event event;
 
 	memset(&event, 0, sizeof(event));
diff --git a/drivers/media/video/omap3isp/ispstat.c b/drivers/media/video/omap3isp/ispstat.c
index 68d5394..bc0b2c7 100644
--- a/drivers/media/video/omap3isp/ispstat.c
+++ b/drivers/media/video/omap3isp/ispstat.c
@@ -496,7 +496,7 @@
 
 static void isp_stat_queue_event(struct ispstat *stat, int err)
 {
-	struct video_device *vdev = &stat->subdev.devnode;
+	struct video_device *vdev = stat->subdev.devnode;
 	struct v4l2_event event;
 	struct omap3isp_stat_event_status *status = (void *)event.u.data;
 
diff --git a/drivers/media/video/omap3isp/ispvideo.c b/drivers/media/video/omap3isp/ispvideo.c
index d100072..f229057 100644
--- a/drivers/media/video/omap3isp/ispvideo.c
+++ b/drivers/media/video/omap3isp/ispvideo.c
@@ -26,6 +26,7 @@
 #include <asm/cacheflush.h>
 #include <linux/clk.h>
 #include <linux/mm.h>
+#include <linux/module.h>
 #include <linux/pagemap.h>
 #include <linux/scatterlist.h>
 #include <linux/sched.h>
diff --git a/drivers/media/video/ov6650.c b/drivers/media/video/ov6650.c
index 9f2d26b..6806345 100644
--- a/drivers/media/video/ov6650.c
+++ b/drivers/media/video/ov6650.c
@@ -540,7 +540,7 @@
 static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
 {
 	struct i2c_client *client = v4l2_get_subdevdata(sd);
-	struct soc_camera_device *icd = (struct soc_camera_device *)sd->grp_id;
+	struct soc_camera_device *icd = v4l2_get_subdev_hostdata(sd);
 	struct soc_camera_sense *sense = icd->sense;
 	struct ov6650 *priv = to_ov6650(client);
 	bool half_scale = !is_unscaled_ok(mf->width, mf->height, &priv->rect);
diff --git a/drivers/media/video/s2255drv.c b/drivers/media/video/s2255drv.c
index 803c9c8..c1bef61 100644
--- a/drivers/media/video/s2255drv.c
+++ b/drivers/media/video/s2255drv.c
@@ -2682,25 +2682,7 @@
 	.id_table = s2255_table,
 };
 
-static int __init usb_s2255_init(void)
-{
-	int result;
-	/* register this driver with the USB subsystem */
-	result = usb_register(&s2255_driver);
-	if (result)
-		pr_err(KBUILD_MODNAME
-		       ": usb_register failed. Error number %d\n", result);
-	dprintk(2, "%s\n", __func__);
-	return result;
-}
-
-static void __exit usb_s2255_exit(void)
-{
-	usb_deregister(&s2255_driver);
-}
-
-module_init(usb_s2255_init);
-module_exit(usb_s2255_exit);
+module_usb_driver(s2255_driver);
 
 MODULE_DESCRIPTION("Sensoray 2255 Video for Linux driver");
 MODULE_AUTHOR("Dean Anderson (Sensoray Company Inc.)");
diff --git a/drivers/media/video/s5p-fimc/fimc-capture.c b/drivers/media/video/s5p-fimc/fimc-capture.c
index c8d91b0..2cc3b91 100644
--- a/drivers/media/video/s5p-fimc/fimc-capture.c
+++ b/drivers/media/video/s5p-fimc/fimc-capture.c
@@ -98,6 +98,10 @@
 			vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
 	}
 	set_bit(ST_CAPT_SUSPENDED, &fimc->state);
+
+	fimc_hw_reset(fimc);
+	cap->buf_index = 0;
+
 	spin_unlock_irqrestore(&fimc->slock, flags);
 
 	if (streaming)
@@ -137,7 +141,7 @@
 	struct fimc_dev *fimc = ctx->fimc_dev;
 	int ret;
 
-	if (test_bit(ST_CAPT_APPLY_CFG, &fimc->state))
+	if (!test_bit(ST_CAPT_APPLY_CFG, &fimc->state))
 		return 0;
 
 	spin_lock(&ctx->slock);
@@ -150,7 +154,7 @@
 		fimc_hw_set_rotation(ctx);
 		fimc_prepare_dma_offset(ctx, &ctx->d_frame);
 		fimc_hw_set_out_dma(ctx);
-		set_bit(ST_CAPT_APPLY_CFG, &fimc->state);
+		clear_bit(ST_CAPT_APPLY_CFG, &fimc->state);
 	}
 	spin_unlock(&ctx->slock);
 	return ret;
@@ -164,7 +168,6 @@
 	int min_bufs;
 	int ret;
 
-	fimc_hw_reset(fimc);
 	vid_cap->frame_count = 0;
 
 	ret = fimc_init_capture(fimc);
@@ -523,7 +526,7 @@
 	max_w = rotation ? pl->out_rot_en_w : pl->out_rot_dis_w;
 	min_w = ctx->state & FIMC_DST_CROP ? dst->width : var->min_out_pixsize;
 	min_h = ctx->state & FIMC_DST_CROP ? dst->height : var->min_out_pixsize;
-	if (fimc->id == 1 && var->pix_hoff)
+	if (var->min_vsize_align == 1 && !rotation)
 		align_h = fimc_fmt_is_rgb(ffmt->color) ? 0 : 1;
 
 	depth = fimc_get_format_depth(ffmt);
@@ -1239,6 +1242,7 @@
 
 	mutex_lock(&fimc->lock);
 	set_frame_bounds(ff, mf->width, mf->height);
+	fimc->vid_cap.mf = *mf;
 	ff->fmt = ffmt;
 
 	/* Reset the crop rectangle if required. */
@@ -1375,7 +1379,7 @@
 	media_entity_cleanup(&sd->entity);
 	v4l2_device_unregister_subdev(sd);
 	kfree(sd);
-	sd = NULL;
+	fimc->vid_cap.subdev = NULL;
 }
 
 /* Set default format at the sensor and host interface */
diff --git a/drivers/media/video/s5p-fimc/fimc-core.c b/drivers/media/video/s5p-fimc/fimc-core.c
index 19ca6db..07c6254 100644
--- a/drivers/media/video/s5p-fimc/fimc-core.c
+++ b/drivers/media/video/s5p-fimc/fimc-core.c
@@ -37,7 +37,7 @@
 static struct fimc_fmt fimc_formats[] = {
 	{
 		.name		= "RGB565",
-		.fourcc		= V4L2_PIX_FMT_RGB565X,
+		.fourcc		= V4L2_PIX_FMT_RGB565,
 		.depth		= { 16 },
 		.color		= S5P_FIMC_RGB565,
 		.memplanes	= 1,
@@ -1038,12 +1038,11 @@
 		mod_x = 6; /* 64 x 32 pixels tile */
 		mod_y = 5;
 	} else {
-		if (fimc->id == 1 && variant->pix_hoff)
+		if (variant->min_vsize_align == 1)
 			mod_y = fimc_fmt_is_rgb(fmt->color) ? 0 : 1;
 		else
-			mod_y = mod_x;
+			mod_y = ffs(variant->min_vsize_align) - 1;
 	}
-	dbg("mod_x: %d, mod_y: %d, max_w: %d", mod_x, mod_y, max_w);
 
 	v4l_bound_align_image(&pix->width, 16, max_w, mod_x,
 		&pix->height, 8, variant->pix_limit->scaler_dis_w, mod_y, 0);
@@ -1226,10 +1225,10 @@
 		fimc->variant->min_inp_pixsize : fimc->variant->min_out_pixsize;
 
 	/* Get pixel alignment constraints. */
-	if (fimc->id == 1 && fimc->variant->pix_hoff)
+	if (fimc->variant->min_vsize_align == 1)
 		halign = fimc_fmt_is_rgb(f->fmt->color) ? 0 : 1;
 	else
-		halign = ffs(min_size) - 1;
+		halign = ffs(fimc->variant->min_vsize_align) - 1;
 
 	for (i = 0; i < f->fmt->colplanes; i++)
 		depth += f->fmt->depth[i];
@@ -1615,7 +1614,6 @@
 	pdata = pdev->dev.platform_data;
 	fimc->pdata = pdata;
 
-	set_bit(ST_LPM, &fimc->state);
 
 	init_waitqueue_head(&fimc->irq_queue);
 	spin_lock_init(&fimc->slock);
@@ -1707,8 +1705,6 @@
 	/* Enable clocks and perform basic initalization */
 	clk_enable(fimc->clock[CLK_GATE]);
 	fimc_hw_reset(fimc);
-	if (fimc->variant->out_buf_count > 4)
-		fimc_hw_set_dma_seq(fimc, 0xF);
 
 	/* Resume the capture or mem-to-mem device */
 	if (fimc_capture_busy(fimc))
@@ -1750,8 +1746,6 @@
 		return 0;
 	}
 	fimc_hw_reset(fimc);
-	if (fimc->variant->out_buf_count > 4)
-		fimc_hw_set_dma_seq(fimc, 0xF);
 	spin_unlock_irqrestore(&fimc->slock, flags);
 
 	if (fimc_capture_busy(fimc))
@@ -1780,7 +1774,6 @@
 	struct fimc_dev *fimc = platform_get_drvdata(pdev);
 
 	pm_runtime_disable(&pdev->dev);
-	fimc_runtime_suspend(&pdev->dev);
 	pm_runtime_set_suspended(&pdev->dev);
 
 	vb2_dma_contig_cleanup_ctx(fimc->alloc_ctx);
@@ -1840,6 +1833,7 @@
 	.min_inp_pixsize = 16,
 	.min_out_pixsize = 16,
 	.hor_offs_align	 = 8,
+	.min_vsize_align = 16,
 	.out_buf_count	 = 4,
 	.pix_limit	 = &s5p_pix_limit[0],
 };
@@ -1849,6 +1843,7 @@
 	.min_inp_pixsize = 16,
 	.min_out_pixsize = 16,
 	.hor_offs_align	 = 8,
+	.min_vsize_align = 16,
 	.out_buf_count	 = 4,
 	.pix_limit = &s5p_pix_limit[1],
 };
@@ -1861,6 +1856,7 @@
 	.min_inp_pixsize = 16,
 	.min_out_pixsize = 16,
 	.hor_offs_align	 = 8,
+	.min_vsize_align = 16,
 	.out_buf_count	 = 4,
 	.pix_limit	 = &s5p_pix_limit[1],
 };
@@ -1874,6 +1870,7 @@
 	.min_inp_pixsize = 16,
 	.min_out_pixsize = 16,
 	.hor_offs_align	 = 1,
+	.min_vsize_align = 1,
 	.out_buf_count	 = 4,
 	.pix_limit	 = &s5p_pix_limit[2],
 };
@@ -1884,6 +1881,7 @@
 	.min_inp_pixsize = 16,
 	.min_out_pixsize = 16,
 	.hor_offs_align	 = 8,
+	.min_vsize_align = 16,
 	.out_buf_count	 = 4,
 	.pix_limit	 = &s5p_pix_limit[2],
 };
@@ -1898,6 +1896,7 @@
 	.min_inp_pixsize = 16,
 	.min_out_pixsize = 16,
 	.hor_offs_align	 = 2,
+	.min_vsize_align = 1,
 	.out_buf_count	 = 32,
 	.pix_limit	 = &s5p_pix_limit[1],
 };
@@ -1910,6 +1909,7 @@
 	.min_inp_pixsize = 16,
 	.min_out_pixsize = 16,
 	.hor_offs_align	 = 2,
+	.min_vsize_align = 1,
 	.out_buf_count	 = 32,
 	.pix_limit	 = &s5p_pix_limit[3],
 };
diff --git a/drivers/media/video/s5p-fimc/fimc-core.h b/drivers/media/video/s5p-fimc/fimc-core.h
index a6936da..c7f01c4 100644
--- a/drivers/media/video/s5p-fimc/fimc-core.h
+++ b/drivers/media/video/s5p-fimc/fimc-core.h
@@ -377,6 +377,7 @@
  * @min_inp_pixsize: minimum input pixel size
  * @min_out_pixsize: minimum output pixel size
  * @hor_offs_align: horizontal pixel offset aligment
+ * @min_vsize_align: minimum vertical pixel size alignment
  * @out_buf_count: the number of buffers in output DMA sequence
  */
 struct samsung_fimc_variant {
@@ -390,6 +391,7 @@
 	u16		min_inp_pixsize;
 	u16		min_out_pixsize;
 	u16		hor_offs_align;
+	u16		min_vsize_align;
 	u16		out_buf_count;
 };
 
diff --git a/drivers/media/video/s5p-fimc/fimc-mdevice.c b/drivers/media/video/s5p-fimc/fimc-mdevice.c
index cc337b1..615c862 100644
--- a/drivers/media/video/s5p-fimc/fimc-mdevice.c
+++ b/drivers/media/video/s5p-fimc/fimc-mdevice.c
@@ -220,6 +220,7 @@
 	sd = v4l2_i2c_new_subdev_board(&fmd->v4l2_dev, adapter,
 				       s_info->pdata->board_info, NULL);
 	if (IS_ERR_OR_NULL(sd)) {
+		i2c_put_adapter(adapter);
 		v4l2_err(&fmd->v4l2_dev, "Failed to acquire subdev\n");
 		return NULL;
 	}
@@ -234,12 +235,15 @@
 static void fimc_md_unregister_sensor(struct v4l2_subdev *sd)
 {
 	struct i2c_client *client = v4l2_get_subdevdata(sd);
+	struct i2c_adapter *adapter;
 
 	if (!client)
 		return;
 	v4l2_device_unregister_subdev(sd);
+	adapter = client->adapter;
 	i2c_unregister_device(client);
-	i2c_put_adapter(client->adapter);
+	if (adapter)
+		i2c_put_adapter(adapter);
 }
 
 static int fimc_md_register_sensor_entities(struct fimc_md *fmd)
@@ -381,20 +385,28 @@
 
 static int fimc_md_register_video_nodes(struct fimc_md *fmd)
 {
+	struct video_device *vdev;
 	int i, ret = 0;
 
 	for (i = 0; i < FIMC_MAX_DEVS && !ret; i++) {
 		if (!fmd->fimc[i])
 			continue;
 
-		if (fmd->fimc[i]->m2m.vfd)
-			ret = video_register_device(fmd->fimc[i]->m2m.vfd,
-						    VFL_TYPE_GRABBER, -1);
-		if (ret)
-			break;
-		if (fmd->fimc[i]->vid_cap.vfd)
-			ret = video_register_device(fmd->fimc[i]->vid_cap.vfd,
-						    VFL_TYPE_GRABBER, -1);
+		vdev = fmd->fimc[i]->m2m.vfd;
+		if (vdev) {
+			ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+			if (ret)
+				break;
+			v4l2_info(&fmd->v4l2_dev, "Registered %s as /dev/%s\n",
+				  vdev->name, video_device_node_name(vdev));
+		}
+
+		vdev = fmd->fimc[i]->vid_cap.vfd;
+		if (vdev == NULL)
+			continue;
+		ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+		v4l2_info(&fmd->v4l2_dev, "Registered %s as /dev/%s\n",
+			  vdev->name, video_device_node_name(vdev));
 	}
 
 	return ret;
@@ -502,7 +514,7 @@
 			if (WARN(csis == NULL,
 				 "MIPI-CSI interface specified "
 				 "but s5p-csis module is not loaded!\n"))
-				continue;
+				return -EINVAL;
 
 			ret = media_entity_create_link(&sensor->entity, 0,
 					      &csis->entity, CSIS_PAD_SINK,
@@ -742,9 +754,6 @@
 	struct fimc_md *fmd;
 	int ret;
 
-	if (WARN(!pdev->dev.platform_data, "Platform data not specified!\n"))
-		return -EINVAL;
-
 	fmd = kzalloc(sizeof(struct fimc_md), GFP_KERNEL);
 	if (!fmd)
 		return -ENOMEM;
@@ -782,9 +791,11 @@
 	if (ret)
 		goto err3;
 
-	ret = fimc_md_register_sensor_entities(fmd);
-	if (ret)
-		goto err3;
+	if (pdev->dev.platform_data) {
+		ret = fimc_md_register_sensor_entities(fmd);
+		if (ret)
+			goto err3;
+	}
 	ret = fimc_md_create_links(fmd);
 	if (ret)
 		goto err3;
diff --git a/drivers/media/video/s5p-fimc/fimc-reg.c b/drivers/media/video/s5p-fimc/fimc-reg.c
index 20e664e..44f5c2d 100644
--- a/drivers/media/video/s5p-fimc/fimc-reg.c
+++ b/drivers/media/video/s5p-fimc/fimc-reg.c
@@ -35,6 +35,9 @@
 	cfg = readl(dev->regs + S5P_CIGCTRL);
 	cfg &= ~S5P_CIGCTRL_SWRST;
 	writel(cfg, dev->regs + S5P_CIGCTRL);
+
+	if (dev->variant->out_buf_count > 4)
+		fimc_hw_set_dma_seq(dev, 0xF);
 }
 
 static u32 fimc_hw_get_in_flip(struct fimc_ctx *ctx)
@@ -251,7 +254,14 @@
 	struct fimc_scaler *sc = &ctx->scaler;
 	struct fimc_frame *src_frame = &ctx->s_frame;
 	struct fimc_frame *dst_frame = &ctx->d_frame;
-	u32 cfg = 0;
+
+	u32 cfg = readl(dev->regs + S5P_CISCCTRL);
+
+	cfg &= ~(S5P_CISCCTRL_CSCR2Y_WIDE | S5P_CISCCTRL_CSCY2R_WIDE |
+		 S5P_CISCCTRL_SCALEUP_H | S5P_CISCCTRL_SCALEUP_V |
+		 S5P_CISCCTRL_SCALERBYPASS | S5P_CISCCTRL_ONE2ONE |
+		 S5P_CISCCTRL_INRGB_FMT_MASK | S5P_CISCCTRL_OUTRGB_FMT_MASK |
+		 S5P_CISCCTRL_INTERLACE | S5P_CISCCTRL_RGB_EXT);
 
 	if (!(ctx->flags & FIMC_COLOR_RANGE_NARROW))
 		cfg |= (S5P_CISCCTRL_CSCR2Y_WIDE | S5P_CISCCTRL_CSCY2R_WIDE);
@@ -308,9 +318,9 @@
 	fimc_hw_set_scaler(ctx);
 
 	cfg = readl(dev->regs + S5P_CISCCTRL);
+	cfg &= ~(S5P_CISCCTRL_MHRATIO_MASK | S5P_CISCCTRL_MVRATIO_MASK);
 
 	if (variant->has_mainscaler_ext) {
-		cfg &= ~(S5P_CISCCTRL_MHRATIO_MASK | S5P_CISCCTRL_MVRATIO_MASK);
 		cfg |= S5P_CISCCTRL_MHRATIO_EXT(sc->main_hratio);
 		cfg |= S5P_CISCCTRL_MVRATIO_EXT(sc->main_vratio);
 		writel(cfg, dev->regs + S5P_CISCCTRL);
@@ -323,7 +333,6 @@
 		cfg |= S5P_CIEXTEN_MVRATIO_EXT(sc->main_vratio);
 		writel(cfg, dev->regs + S5P_CIEXTEN);
 	} else {
-		cfg &= ~(S5P_CISCCTRL_MHRATIO_MASK | S5P_CISCCTRL_MVRATIO_MASK);
 		cfg |= S5P_CISCCTRL_MHRATIO(sc->main_hratio);
 		cfg |= S5P_CISCCTRL_MVRATIO(sc->main_vratio);
 		writel(cfg, dev->regs + S5P_CISCCTRL);
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_enc.c b/drivers/media/video/s5p-mfc/s5p_mfc_enc.c
index 1e8cdb7..dff9dc7 100644
--- a/drivers/media/video/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_enc.c
@@ -61,7 +61,7 @@
 		.num_planes = 1,
 	},
 	{
-		.name = "H264 Encoded Stream",
+		.name = "H263 Encoded Stream",
 		.fourcc = V4L2_PIX_FMT_H263,
 		.codec_mode = S5P_FIMV_CODEC_H263_ENC,
 		.type = MFC_FMT_ENC,
diff --git a/drivers/media/video/s5p-tv/mixer_video.c b/drivers/media/video/s5p-tv/mixer_video.c
index e16d3a4..b47d0c0 100644
--- a/drivers/media/video/s5p-tv/mixer_video.c
+++ b/drivers/media/video/s5p-tv/mixer_video.c
@@ -16,6 +16,7 @@
 #include <media/v4l2-ioctl.h>
 #include <linux/videodev2.h>
 #include <linux/mm.h>
+#include <linux/module.h>
 #include <linux/version.h>
 #include <linux/timer.h>
 #include <media/videobuf2-dma-contig.h>
diff --git a/drivers/media/video/sh_mobile_ceu_camera.c b/drivers/media/video/sh_mobile_ceu_camera.c
index f390682..c51decf 100644
--- a/drivers/media/video/sh_mobile_ceu_camera.c
+++ b/drivers/media/video/sh_mobile_ceu_camera.c
@@ -566,8 +566,10 @@
 	ret = sh_mobile_ceu_soft_reset(pcdev);
 
 	csi2_sd = find_csi2(pcdev);
-	if (csi2_sd)
-		csi2_sd->grp_id = (long)icd;
+	if (csi2_sd) {
+		csi2_sd->grp_id = soc_camera_grp_id(icd);
+		v4l2_set_subdev_hostdata(csi2_sd, icd);
+	}
 
 	ret = v4l2_subdev_call(csi2_sd, core, s_power, 1);
 	if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV) {
@@ -768,7 +770,7 @@
 {
 	if (pcdev->csi2_pdev) {
 		struct v4l2_subdev *csi2_sd = find_csi2(pcdev);
-		if (csi2_sd && csi2_sd->grp_id == (u32)icd)
+		if (csi2_sd && csi2_sd->grp_id == soc_camera_grp_id(icd))
 			return csi2_sd;
 	}
 
@@ -1089,8 +1091,9 @@
 			/* Try 2560x1920, 1280x960, 640x480, 320x240 */
 			mf.width	= 2560 >> shift;
 			mf.height	= 1920 >> shift;
-			ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video,
-							 s_mbus_fmt, &mf);
+			ret = v4l2_device_call_until_err(sd->v4l2_dev,
+					soc_camera_grp_id(icd), video,
+					s_mbus_fmt, &mf);
 			if (ret < 0)
 				return ret;
 			shift++;
@@ -1389,7 +1392,8 @@
 	bool ceu_1to1;
 	int ret;
 
-	ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video,
+	ret = v4l2_device_call_until_err(sd->v4l2_dev,
+					 soc_camera_grp_id(icd), video,
 					 s_mbus_fmt, mf);
 	if (ret < 0)
 		return ret;
@@ -1426,8 +1430,9 @@
 		tmp_h = min(2 * tmp_h, max_height);
 		mf->width = tmp_w;
 		mf->height = tmp_h;
-		ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video,
-						 s_mbus_fmt, mf);
+		ret = v4l2_device_call_until_err(sd->v4l2_dev,
+					soc_camera_grp_id(icd), video,
+					s_mbus_fmt, mf);
 		dev_geo(dev, "Camera scaled to %ux%u\n",
 			mf->width, mf->height);
 		if (ret < 0) {
@@ -1580,8 +1585,9 @@
 	}
 
 	if (interm_width < icd->user_width || interm_height < icd->user_height) {
-		ret = v4l2_device_call_until_err(sd->v4l2_dev, (int)icd, video,
-						 s_mbus_fmt, &mf);
+		ret = v4l2_device_call_until_err(sd->v4l2_dev,
+					soc_camera_grp_id(icd), video,
+					s_mbus_fmt, &mf);
 		if (ret < 0)
 			return ret;
 
@@ -1867,7 +1873,8 @@
 	mf.code		= xlate->code;
 	mf.colorspace	= pix->colorspace;
 
-	ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video, try_mbus_fmt, &mf);
+	ret = v4l2_device_call_until_err(sd->v4l2_dev, soc_camera_grp_id(icd),
+					 video, try_mbus_fmt, &mf);
 	if (ret < 0)
 		return ret;
 
@@ -1891,8 +1898,9 @@
 			 */
 			mf.width = 2560;
 			mf.height = 1920;
-			ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video,
-							 try_mbus_fmt, &mf);
+			ret = v4l2_device_call_until_err(sd->v4l2_dev,
+					soc_camera_grp_id(icd), video,
+					try_mbus_fmt, &mf);
 			if (ret < 0) {
 				/* Shouldn't actually happen... */
 				dev_err(icd->parent,
diff --git a/drivers/media/video/sh_mobile_csi2.c b/drivers/media/video/sh_mobile_csi2.c
index ea4f047..8a652b5 100644
--- a/drivers/media/video/sh_mobile_csi2.c
+++ b/drivers/media/video/sh_mobile_csi2.c
@@ -143,7 +143,7 @@
 				 const struct v4l2_mbus_config *cfg)
 {
 	struct sh_csi2 *priv = container_of(sd, struct sh_csi2, subdev);
-	struct soc_camera_device *icd = (struct soc_camera_device *)sd->grp_id;
+	struct soc_camera_device *icd = v4l2_get_subdev_hostdata(sd);
 	struct v4l2_subdev *client_sd = soc_camera_to_subdev(icd);
 	struct v4l2_mbus_config client_cfg = {.type = V4L2_MBUS_CSI2,
 					      .flags = priv->mipi_flags};
@@ -202,7 +202,7 @@
 static int sh_csi2_client_connect(struct sh_csi2 *priv)
 {
 	struct sh_csi2_pdata *pdata = priv->pdev->dev.platform_data;
-	struct soc_camera_device *icd = (struct soc_camera_device *)priv->subdev.grp_id;
+	struct soc_camera_device *icd = v4l2_get_subdev_hostdata(&priv->subdev);
 	struct v4l2_subdev *client_sd = soc_camera_to_subdev(icd);
 	struct device *dev = v4l2_get_subdevdata(&priv->subdev);
 	struct v4l2_mbus_config cfg;
diff --git a/drivers/media/video/sn9c102/sn9c102_core.c b/drivers/media/video/sn9c102/sn9c102_core.c
index 16cb07c5..7025be1 100644
--- a/drivers/media/video/sn9c102/sn9c102_core.c
+++ b/drivers/media/video/sn9c102/sn9c102_core.c
@@ -3420,27 +3420,4 @@
 	.disconnect = sn9c102_usb_disconnect,
 };
 
-/*****************************************************************************/
-
-static int __init sn9c102_module_init(void)
-{
-	int err = 0;
-
-	KDBG(2, SN9C102_MODULE_NAME " v" SN9C102_MODULE_VERSION);
-	KDBG(3, SN9C102_MODULE_AUTHOR);
-
-	if ((err = usb_register(&sn9c102_usb_driver)))
-		KDBG(1, "usb_register() failed");
-
-	return err;
-}
-
-
-static void __exit sn9c102_module_exit(void)
-{
-	usb_deregister(&sn9c102_usb_driver);
-}
-
-
-module_init(sn9c102_module_init);
-module_exit(sn9c102_module_exit);
+module_usb_driver(sn9c102_usb_driver);
diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c
index b72580c..62e4312 100644
--- a/drivers/media/video/soc_camera.c
+++ b/drivers/media/video/soc_camera.c
@@ -1103,7 +1103,8 @@
 	}
 
 	sd = soc_camera_to_subdev(icd);
-	sd->grp_id = (long)icd;
+	sd->grp_id = soc_camera_grp_id(icd);
+	v4l2_set_subdev_hostdata(sd, icd);
 
 	if (v4l2_ctrl_add_handler(&icd->ctrl_handler, sd->ctrl_handler))
 		goto ectrl;
diff --git a/drivers/media/video/stk-webcam.c b/drivers/media/video/stk-webcam.c
index cbc105f..b7fb5a5 100644
--- a/drivers/media/video/stk-webcam.c
+++ b/drivers/media/video/stk-webcam.c
@@ -1377,25 +1377,4 @@
 #endif
 };
 
-
-static int __init stk_camera_init(void)
-{
-	int result;
-
-	result = usb_register(&stk_camera_driver);
-	if (result)
-		STK_ERROR("usb_register failed ! Error number %d\n", result);
-
-
-	return result;
-}
-
-static void __exit stk_camera_exit(void)
-{
-	usb_deregister(&stk_camera_driver);
-}
-
-module_init(stk_camera_init);
-module_exit(stk_camera_exit);
-
-
+module_usb_driver(stk_camera_driver);
diff --git a/drivers/media/video/tm6000/tm6000-cards.c b/drivers/media/video/tm6000/tm6000-cards.c
index ec2578a..ff939bc 100644
--- a/drivers/media/video/tm6000/tm6000-cards.c
+++ b/drivers/media/video/tm6000/tm6000-cards.c
@@ -1371,31 +1371,7 @@
 		.id_table = tm6000_id_table,
 };
 
-static int __init tm6000_module_init(void)
-{
-	int result;
-
-	printk(KERN_INFO "tm6000" " v4l2 driver version %d.%d.%d loaded\n",
-	       (TM6000_VERSION  >> 16) & 0xff,
-	       (TM6000_VERSION  >> 8) & 0xff, TM6000_VERSION  & 0xff);
-
-	/* register this driver with the USB subsystem */
-	result = usb_register(&tm6000_usb_driver);
-	if (result)
-		printk(KERN_ERR "tm6000"
-			   " usb_register failed. Error number %d.\n", result);
-
-	return result;
-}
-
-static void __exit tm6000_module_exit(void)
-{
-	/* deregister at USB subsystem */
-	usb_deregister(&tm6000_usb_driver);
-}
-
-module_init(tm6000_module_init);
-module_exit(tm6000_module_exit);
+module_usb_driver(tm6000_usb_driver);
 
 MODULE_DESCRIPTION("Trident TVMaster TM5600/TM6000/TM6010 USB2 adapter");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/video/zoran/zoran_driver.c b/drivers/media/video/zoran/zoran_driver.c
index d4d05d2..f6d2641 100644
--- a/drivers/media/video/zoran/zoran_driver.c
+++ b/drivers/media/video/zoran/zoran_driver.c
@@ -1550,7 +1550,7 @@
 		if (zoran_formats[i].flags & flag && num++ == fmt->index) {
 			strncpy(fmt->description, zoran_formats[i].name,
 				sizeof(fmt->description) - 1);
-			/* fmt struct pre-zeroed, so adding '\0' not neeed */
+			/* fmt struct pre-zeroed, so adding '\0' not needed */
 			fmt->pixelformat = zoran_formats[i].fourcc;
 			if (zoran_formats[i].flags & ZORAN_FORMAT_COMPRESSED)
 				fmt->flags |= V4L2_FMT_FLAG_COMPRESSED;
diff --git a/drivers/media/video/zr364xx.c b/drivers/media/video/zr364xx.c
index e78cf94..cd2e39f 100644
--- a/drivers/media/video/zr364xx.c
+++ b/drivers/media/video/zr364xx.c
@@ -1695,28 +1695,7 @@
 	.id_table = device_table
 };
 
-
-static int __init zr364xx_init(void)
-{
-	int retval;
-	retval = usb_register(&zr364xx_driver);
-	if (retval)
-		printk(KERN_ERR KBUILD_MODNAME ": usb_register failed!\n");
-	else
-		printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_DESC "\n");
-	return retval;
-}
-
-
-static void __exit zr364xx_exit(void)
-{
-	printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_DESC " module unloaded\n");
-	usb_deregister(&zr364xx_driver);
-}
-
-
-module_init(zr364xx_init);
-module_exit(zr364xx_exit);
+module_usb_driver(zr364xx_driver);
 
 MODULE_AUTHOR(DRIVER_AUTHOR);
 MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/message/fusion/lsi/mpi_ioc.h b/drivers/message/fusion/lsi/mpi_ioc.h
index fd62228..19fb21b 100644
--- a/drivers/message/fusion/lsi/mpi_ioc.h
+++ b/drivers/message/fusion/lsi/mpi_ioc.h
@@ -857,7 +857,7 @@
 #define MPI_EVENT_SAS_DSCVRY_PHY_BITS_MASK                  (0xFFFF0000)
 #define MPI_EVENT_SAS_DSCVRY_PHY_BITS_SHIFT                 (16)
 
-/* SAS Discovery Errror Event data */
+/* SAS Discovery Error Event data */
 
 typedef struct _EVENT_DATA_DISCOVERY_ERROR
 {
diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
index 07dbeaf..6d115c7 100644
--- a/drivers/message/i2o/i2o_proc.c
+++ b/drivers/message/i2o/i2o_proc.c
@@ -56,7 +56,7 @@
 /* Structure used to define /proc entries */
 typedef struct _i2o_proc_entry_t {
 	char *name;		/* entry name */
-	mode_t mode;		/* mode */
+	umode_t mode;		/* mode */
 	const struct file_operations *fops;	/* open function */
 } i2o_proc_entry;
 
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index f1391c2..c8322ee 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -328,6 +328,34 @@
 	  individual components like LCD backlight, voltage regulators,
 	  LEDs and battery-charger under the corresponding menus.
 
+config PMIC_DA9052
+	bool
+	select MFD_CORE
+
+config MFD_DA9052_SPI
+	bool "Support Dialog Semiconductor DA9052/53 PMIC variants with SPI"
+	select REGMAP_SPI
+	select REGMAP_IRQ
+	select PMIC_DA9052
+	depends on SPI_MASTER=y
+	help
+	  Support for the Dialog Semiconductor DA9052 PMIC
+	  when controlled using SPI. This driver provides common support
+	  for accessing the device, additional drivers must be enabled in
+	  order to use the functionality of the device.
+
+config MFD_DA9052_I2C
+	bool "Support Dialog Semiconductor DA9052/53 PMIC variants with I2C"
+	select REGMAP_I2C
+	select REGMAP_IRQ
+	select PMIC_DA9052
+	depends on I2C=y
+	help
+	  Support for the Dialog Semiconductor DA9052 PMIC
+	  when controlled using I2C. This driver provides common support
+	  for accessing the device, additional drivers must be enabled in
+	  order to use the functionality of the device.
+
 config PMIC_ADP5520
 	bool "Analog Devices ADP5520/01 MFD PMIC Core Support"
 	depends on I2C=y
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index b2292eb..d5f5743 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -67,6 +67,11 @@
 obj-$(CONFIG_UCB1400_CORE)	+= ucb1400_core.o
 
 obj-$(CONFIG_PMIC_DA903X)	+= da903x.o
+
+obj-$(CONFIG_PMIC_DA9052)	+= da9052-core.o
+obj-$(CONFIG_MFD_DA9052_SPI)	+= da9052-spi.o
+obj-$(CONFIG_MFD_DA9052_I2C)	+= da9052-i2c.o
+
 max8925-objs			:= max8925-core.o max8925-i2c.o
 obj-$(CONFIG_MFD_MAX8925)	+= max8925.o
 obj-$(CONFIG_MFD_MAX8997)	+= max8997.o max8997-irq.o
diff --git a/drivers/mfd/ab5500-debugfs.c b/drivers/mfd/ab5500-debugfs.c
index 43c0ebb..b7b2d348 100644
--- a/drivers/mfd/ab5500-debugfs.c
+++ b/drivers/mfd/ab5500-debugfs.c
@@ -4,7 +4,7 @@
  * Debugfs support for the AB5500 MFD driver
  */
 
-#include <linux/export.h>
+#include <linux/module.h>
 #include <linux/debugfs.h>
 #include <linux/seq_file.h>
 #include <linux/mfd/ab5500/ab5500.h>
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index 1e91738..d3d572b 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -620,6 +620,7 @@
 
 static struct resource __devinitdata ab8500_chargalg_resources[] = {};
 
+#ifdef CONFIG_DEBUG_FS
 static struct resource __devinitdata ab8500_debug_resources[] = {
 	{
 		.name	= "IRQ_FIRST",
@@ -634,6 +635,7 @@
 		.flags	= IORESOURCE_IRQ,
 	},
 };
+#endif
 
 static struct resource __devinitdata ab8500_usb_resources[] = {
 	{
diff --git a/drivers/mfd/adp5520.c b/drivers/mfd/adp5520.c
index f1d8848..8d816cc 100644
--- a/drivers/mfd/adp5520.c
+++ b/drivers/mfd/adp5520.c
@@ -109,7 +109,7 @@
 
 	ret = __adp5520_read(chip->client, reg, &reg_val);
 
-	if (!ret && ((reg_val & bit_mask) == 0)) {
+	if (!ret && ((reg_val & bit_mask) != bit_mask)) {
 		reg_val |= bit_mask;
 		ret = __adp5520_write(chip->client, reg, reg_val);
 	}
diff --git a/drivers/mfd/da903x.c b/drivers/mfd/da903x.c
index 1b79c37..1924b85 100644
--- a/drivers/mfd/da903x.c
+++ b/drivers/mfd/da903x.c
@@ -182,7 +182,7 @@
 	if (ret)
 		goto out;
 
-	if ((reg_val & bit_mask) == 0) {
+	if ((reg_val & bit_mask) != bit_mask) {
 		reg_val |= bit_mask;
 		ret = __da903x_write(chip->client, reg, reg_val);
 	}
@@ -549,6 +549,7 @@
 	struct da903x_chip *chip = i2c_get_clientdata(client);
 
 	da903x_remove_subdevs(chip);
+	free_irq(client->irq, chip);
 	kfree(chip);
 	return 0;
 }
diff --git a/drivers/mfd/da9052-core.c b/drivers/mfd/da9052-core.c
new file mode 100644
index 0000000..5ddde2a
--- /dev/null
+++ b/drivers/mfd/da9052-core.c
@@ -0,0 +1,694 @@
+/*
+ * Device access for Dialog DA9052 PMICs.
+ *
+ * Copyright(c) 2011 Dialog Semiconductor Ltd.
+ *
+ * Author: David Dajun Chen <dchen@diasemi.com>
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/mutex.h>
+#include <linux/mfd/core.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+#include <linux/mfd/da9052/da9052.h>
+#include <linux/mfd/da9052/pdata.h>
+#include <linux/mfd/da9052/reg.h>
+
+#define DA9052_NUM_IRQ_REGS		4
+#define DA9052_IRQ_MASK_POS_1		0x01
+#define DA9052_IRQ_MASK_POS_2		0x02
+#define DA9052_IRQ_MASK_POS_3		0x04
+#define DA9052_IRQ_MASK_POS_4		0x08
+#define DA9052_IRQ_MASK_POS_5		0x10
+#define DA9052_IRQ_MASK_POS_6		0x20
+#define DA9052_IRQ_MASK_POS_7		0x40
+#define DA9052_IRQ_MASK_POS_8		0x80
+
+static bool da9052_reg_readable(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case DA9052_PAGE0_CON_REG:
+	case DA9052_STATUS_A_REG:
+	case DA9052_STATUS_B_REG:
+	case DA9052_STATUS_C_REG:
+	case DA9052_STATUS_D_REG:
+	case DA9052_EVENT_A_REG:
+	case DA9052_EVENT_B_REG:
+	case DA9052_EVENT_C_REG:
+	case DA9052_EVENT_D_REG:
+	case DA9052_FAULTLOG_REG:
+	case DA9052_IRQ_MASK_A_REG:
+	case DA9052_IRQ_MASK_B_REG:
+	case DA9052_IRQ_MASK_C_REG:
+	case DA9052_IRQ_MASK_D_REG:
+	case DA9052_CONTROL_A_REG:
+	case DA9052_CONTROL_B_REG:
+	case DA9052_CONTROL_C_REG:
+	case DA9052_CONTROL_D_REG:
+	case DA9052_PDDIS_REG:
+	case DA9052_INTERFACE_REG:
+	case DA9052_RESET_REG:
+	case DA9052_GPIO_0_1_REG:
+	case DA9052_GPIO_2_3_REG:
+	case DA9052_GPIO_4_5_REG:
+	case DA9052_GPIO_6_7_REG:
+	case DA9052_GPIO_14_15_REG:
+	case DA9052_ID_0_1_REG:
+	case DA9052_ID_2_3_REG:
+	case DA9052_ID_4_5_REG:
+	case DA9052_ID_6_7_REG:
+	case DA9052_ID_8_9_REG:
+	case DA9052_ID_10_11_REG:
+	case DA9052_ID_12_13_REG:
+	case DA9052_ID_14_15_REG:
+	case DA9052_ID_16_17_REG:
+	case DA9052_ID_18_19_REG:
+	case DA9052_ID_20_21_REG:
+	case DA9052_SEQ_STATUS_REG:
+	case DA9052_SEQ_A_REG:
+	case DA9052_SEQ_B_REG:
+	case DA9052_SEQ_TIMER_REG:
+	case DA9052_BUCKA_REG:
+	case DA9052_BUCKB_REG:
+	case DA9052_BUCKCORE_REG:
+	case DA9052_BUCKPRO_REG:
+	case DA9052_BUCKMEM_REG:
+	case DA9052_BUCKPERI_REG:
+	case DA9052_LDO1_REG:
+	case DA9052_LDO2_REG:
+	case DA9052_LDO3_REG:
+	case DA9052_LDO4_REG:
+	case DA9052_LDO5_REG:
+	case DA9052_LDO6_REG:
+	case DA9052_LDO7_REG:
+	case DA9052_LDO8_REG:
+	case DA9052_LDO9_REG:
+	case DA9052_LDO10_REG:
+	case DA9052_SUPPLY_REG:
+	case DA9052_PULLDOWN_REG:
+	case DA9052_CHGBUCK_REG:
+	case DA9052_WAITCONT_REG:
+	case DA9052_ISET_REG:
+	case DA9052_BATCHG_REG:
+	case DA9052_CHG_CONT_REG:
+	case DA9052_INPUT_CONT_REG:
+	case DA9052_CHG_TIME_REG:
+	case DA9052_BBAT_CONT_REG:
+	case DA9052_BOOST_REG:
+	case DA9052_LED_CONT_REG:
+	case DA9052_LEDMIN123_REG:
+	case DA9052_LED1_CONF_REG:
+	case DA9052_LED2_CONF_REG:
+	case DA9052_LED3_CONF_REG:
+	case DA9052_LED1CONT_REG:
+	case DA9052_LED2CONT_REG:
+	case DA9052_LED3CONT_REG:
+	case DA9052_LED_CONT_4_REG:
+	case DA9052_LED_CONT_5_REG:
+	case DA9052_ADC_MAN_REG:
+	case DA9052_ADC_CONT_REG:
+	case DA9052_ADC_RES_L_REG:
+	case DA9052_ADC_RES_H_REG:
+	case DA9052_VDD_RES_REG:
+	case DA9052_VDD_MON_REG:
+	case DA9052_ICHG_AV_REG:
+	case DA9052_ICHG_THD_REG:
+	case DA9052_ICHG_END_REG:
+	case DA9052_TBAT_RES_REG:
+	case DA9052_TBAT_HIGHP_REG:
+	case DA9052_TBAT_HIGHN_REG:
+	case DA9052_TBAT_LOW_REG:
+	case DA9052_T_OFFSET_REG:
+	case DA9052_ADCIN4_RES_REG:
+	case DA9052_AUTO4_HIGH_REG:
+	case DA9052_AUTO4_LOW_REG:
+	case DA9052_ADCIN5_RES_REG:
+	case DA9052_AUTO5_HIGH_REG:
+	case DA9052_AUTO5_LOW_REG:
+	case DA9052_ADCIN6_RES_REG:
+	case DA9052_AUTO6_HIGH_REG:
+	case DA9052_AUTO6_LOW_REG:
+	case DA9052_TJUNC_RES_REG:
+	case DA9052_TSI_CONT_A_REG:
+	case DA9052_TSI_CONT_B_REG:
+	case DA9052_TSI_X_MSB_REG:
+	case DA9052_TSI_Y_MSB_REG:
+	case DA9052_TSI_LSB_REG:
+	case DA9052_TSI_Z_MSB_REG:
+	case DA9052_COUNT_S_REG:
+	case DA9052_COUNT_MI_REG:
+	case DA9052_COUNT_H_REG:
+	case DA9052_COUNT_D_REG:
+	case DA9052_COUNT_MO_REG:
+	case DA9052_COUNT_Y_REG:
+	case DA9052_ALARM_MI_REG:
+	case DA9052_ALARM_H_REG:
+	case DA9052_ALARM_D_REG:
+	case DA9052_ALARM_MO_REG:
+	case DA9052_ALARM_Y_REG:
+	case DA9052_SECOND_A_REG:
+	case DA9052_SECOND_B_REG:
+	case DA9052_SECOND_C_REG:
+	case DA9052_SECOND_D_REG:
+	case DA9052_PAGE1_CON_REG:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static bool da9052_reg_writeable(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case DA9052_PAGE0_CON_REG:
+	case DA9052_EVENT_A_REG:
+	case DA9052_EVENT_B_REG:
+	case DA9052_EVENT_C_REG:
+	case DA9052_EVENT_D_REG:
+	case DA9052_IRQ_MASK_A_REG:
+	case DA9052_IRQ_MASK_B_REG:
+	case DA9052_IRQ_MASK_C_REG:
+	case DA9052_IRQ_MASK_D_REG:
+	case DA9052_CONTROL_A_REG:
+	case DA9052_CONTROL_B_REG:
+	case DA9052_CONTROL_C_REG:
+	case DA9052_CONTROL_D_REG:
+	case DA9052_PDDIS_REG:
+	case DA9052_RESET_REG:
+	case DA9052_GPIO_0_1_REG:
+	case DA9052_GPIO_2_3_REG:
+	case DA9052_GPIO_4_5_REG:
+	case DA9052_GPIO_6_7_REG:
+	case DA9052_GPIO_14_15_REG:
+	case DA9052_ID_0_1_REG:
+	case DA9052_ID_2_3_REG:
+	case DA9052_ID_4_5_REG:
+	case DA9052_ID_6_7_REG:
+	case DA9052_ID_8_9_REG:
+	case DA9052_ID_10_11_REG:
+	case DA9052_ID_12_13_REG:
+	case DA9052_ID_14_15_REG:
+	case DA9052_ID_16_17_REG:
+	case DA9052_ID_18_19_REG:
+	case DA9052_ID_20_21_REG:
+	case DA9052_SEQ_STATUS_REG:
+	case DA9052_SEQ_A_REG:
+	case DA9052_SEQ_B_REG:
+	case DA9052_SEQ_TIMER_REG:
+	case DA9052_BUCKA_REG:
+	case DA9052_BUCKB_REG:
+	case DA9052_BUCKCORE_REG:
+	case DA9052_BUCKPRO_REG:
+	case DA9052_BUCKMEM_REG:
+	case DA9052_BUCKPERI_REG:
+	case DA9052_LDO1_REG:
+	case DA9052_LDO2_REG:
+	case DA9052_LDO3_REG:
+	case DA9052_LDO4_REG:
+	case DA9052_LDO5_REG:
+	case DA9052_LDO6_REG:
+	case DA9052_LDO7_REG:
+	case DA9052_LDO8_REG:
+	case DA9052_LDO9_REG:
+	case DA9052_LDO10_REG:
+	case DA9052_SUPPLY_REG:
+	case DA9052_PULLDOWN_REG:
+	case DA9052_CHGBUCK_REG:
+	case DA9052_WAITCONT_REG:
+	case DA9052_ISET_REG:
+	case DA9052_BATCHG_REG:
+	case DA9052_CHG_CONT_REG:
+	case DA9052_INPUT_CONT_REG:
+	case DA9052_BBAT_CONT_REG:
+	case DA9052_BOOST_REG:
+	case DA9052_LED_CONT_REG:
+	case DA9052_LEDMIN123_REG:
+	case DA9052_LED1_CONF_REG:
+	case DA9052_LED2_CONF_REG:
+	case DA9052_LED3_CONF_REG:
+	case DA9052_LED1CONT_REG:
+	case DA9052_LED2CONT_REG:
+	case DA9052_LED3CONT_REG:
+	case DA9052_LED_CONT_4_REG:
+	case DA9052_LED_CONT_5_REG:
+	case DA9052_ADC_MAN_REG:
+	case DA9052_ADC_CONT_REG:
+	case DA9052_ADC_RES_L_REG:
+	case DA9052_ADC_RES_H_REG:
+	case DA9052_VDD_RES_REG:
+	case DA9052_VDD_MON_REG:
+	case DA9052_ICHG_THD_REG:
+	case DA9052_ICHG_END_REG:
+	case DA9052_TBAT_HIGHP_REG:
+	case DA9052_TBAT_HIGHN_REG:
+	case DA9052_TBAT_LOW_REG:
+	case DA9052_T_OFFSET_REG:
+	case DA9052_AUTO4_HIGH_REG:
+	case DA9052_AUTO4_LOW_REG:
+	case DA9052_AUTO5_HIGH_REG:
+	case DA9052_AUTO5_LOW_REG:
+	case DA9052_AUTO6_HIGH_REG:
+	case DA9052_AUTO6_LOW_REG:
+	case DA9052_TSI_CONT_A_REG:
+	case DA9052_TSI_CONT_B_REG:
+	case DA9052_COUNT_S_REG:
+	case DA9052_COUNT_MI_REG:
+	case DA9052_COUNT_H_REG:
+	case DA9052_COUNT_D_REG:
+	case DA9052_COUNT_MO_REG:
+	case DA9052_COUNT_Y_REG:
+	case DA9052_ALARM_MI_REG:
+	case DA9052_ALARM_H_REG:
+	case DA9052_ALARM_D_REG:
+	case DA9052_ALARM_MO_REG:
+	case DA9052_ALARM_Y_REG:
+	case DA9052_PAGE1_CON_REG:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static bool da9052_reg_volatile(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case DA9052_STATUS_A_REG:
+	case DA9052_STATUS_B_REG:
+	case DA9052_STATUS_C_REG:
+	case DA9052_STATUS_D_REG:
+	case DA9052_EVENT_A_REG:
+	case DA9052_EVENT_B_REG:
+	case DA9052_EVENT_C_REG:
+	case DA9052_EVENT_D_REG:
+	case DA9052_FAULTLOG_REG:
+	case DA9052_CHG_TIME_REG:
+	case DA9052_ADC_RES_L_REG:
+	case DA9052_ADC_RES_H_REG:
+	case DA9052_VDD_RES_REG:
+	case DA9052_ICHG_AV_REG:
+	case DA9052_TBAT_RES_REG:
+	case DA9052_ADCIN4_RES_REG:
+	case DA9052_ADCIN5_RES_REG:
+	case DA9052_ADCIN6_RES_REG:
+	case DA9052_TJUNC_RES_REG:
+	case DA9052_TSI_X_MSB_REG:
+	case DA9052_TSI_Y_MSB_REG:
+	case DA9052_TSI_LSB_REG:
+	case DA9052_TSI_Z_MSB_REG:
+	case DA9052_COUNT_S_REG:
+	case DA9052_COUNT_MI_REG:
+	case DA9052_COUNT_H_REG:
+	case DA9052_COUNT_D_REG:
+	case DA9052_COUNT_MO_REG:
+	case DA9052_COUNT_Y_REG:
+	case DA9052_ALARM_MI_REG:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static struct resource da9052_rtc_resource = {
+	.name = "ALM",
+	.start = DA9052_IRQ_ALARM,
+	.end   = DA9052_IRQ_ALARM,
+	.flags = IORESOURCE_IRQ,
+};
+
+static struct resource da9052_onkey_resource = {
+	.name = "ONKEY",
+	.start = DA9052_IRQ_NONKEY,
+	.end   = DA9052_IRQ_NONKEY,
+	.flags = IORESOURCE_IRQ,
+};
+
+static struct resource da9052_bat_resources[] = {
+	{
+		.name = "BATT TEMP",
+		.start = DA9052_IRQ_TBAT,
+		.end   = DA9052_IRQ_TBAT,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.name = "DCIN DET",
+		.start = DA9052_IRQ_DCIN,
+		.end   = DA9052_IRQ_DCIN,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.name = "DCIN REM",
+		.start = DA9052_IRQ_DCINREM,
+		.end   = DA9052_IRQ_DCINREM,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.name = "VBUS DET",
+		.start = DA9052_IRQ_VBUS,
+		.end   = DA9052_IRQ_VBUS,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.name = "VBUS REM",
+		.start = DA9052_IRQ_VBUSREM,
+		.end   = DA9052_IRQ_VBUSREM,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.name = "CHG END",
+		.start = DA9052_IRQ_CHGEND,
+		.end   = DA9052_IRQ_CHGEND,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+static struct resource da9052_tsi_resources[] = {
+	{
+		.name = "PENDWN",
+		.start = DA9052_IRQ_PENDOWN,
+		.end   = DA9052_IRQ_PENDOWN,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.name = "TSIRDY",
+		.start = DA9052_IRQ_TSIREADY,
+		.end   = DA9052_IRQ_TSIREADY,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+static struct mfd_cell __devinitdata da9052_subdev_info[] = {
+	{
+		.name = "da9052-regulator",
+		.id = 1,
+	},
+	{
+		.name = "da9052-regulator",
+		.id = 2,
+	},
+	{
+		.name = "da9052-regulator",
+		.id = 3,
+	},
+	{
+		.name = "da9052-regulator",
+		.id = 4,
+	},
+	{
+		.name = "da9052-regulator",
+		.id = 5,
+	},
+	{
+		.name = "da9052-regulator",
+		.id = 6,
+	},
+	{
+		.name = "da9052-regulator",
+		.id = 7,
+	},
+	{
+		.name = "da9052-regulator",
+		.id = 8,
+	},
+	{
+		.name = "da9052-regulator",
+		.id = 9,
+	},
+	{
+		.name = "da9052-regulator",
+		.id = 10,
+	},
+	{
+		.name = "da9052-regulator",
+		.id = 11,
+	},
+	{
+		.name = "da9052-regulator",
+		.id = 12,
+	},
+	{
+		.name = "da9052-regulator",
+		.id = 13,
+	},
+	{
+		.name = "da9052-regulator",
+		.id = 14,
+	},
+	{
+		.name = "da9052-onkey",
+		.resources = &da9052_onkey_resource,
+		.num_resources = 1,
+	},
+	{
+		.name = "da9052-rtc",
+		.resources = &da9052_rtc_resource,
+		.num_resources = 1,
+	},
+	{
+		.name = "da9052-gpio",
+	},
+	{
+		.name = "da9052-hwmon",
+	},
+	{
+		.name = "da9052-leds",
+	},
+	{
+		.name = "da9052-wled1",
+	},
+	{
+		.name = "da9052-wled2",
+	},
+	{
+		.name = "da9052-wled3",
+	},
+	{
+		.name = "da9052-tsi",
+		.resources = da9052_tsi_resources,
+		.num_resources = ARRAY_SIZE(da9052_tsi_resources),
+	},
+	{
+		.name = "da9052-bat",
+		.resources = da9052_bat_resources,
+		.num_resources = ARRAY_SIZE(da9052_bat_resources),
+	},
+	{
+		.name = "da9052-watchdog",
+	},
+};
+
+static struct regmap_irq da9052_irqs[] = {
+	[DA9052_IRQ_DCIN] = {
+		.reg_offset = 0,
+		.mask = DA9052_IRQ_MASK_POS_1,
+	},
+	[DA9052_IRQ_VBUS] = {
+		.reg_offset = 0,
+		.mask = DA9052_IRQ_MASK_POS_2,
+	},
+	[DA9052_IRQ_DCINREM] = {
+		.reg_offset = 0,
+		.mask = DA9052_IRQ_MASK_POS_3,
+	},
+	[DA9052_IRQ_VBUSREM] = {
+		.reg_offset = 0,
+		.mask = DA9052_IRQ_MASK_POS_4,
+	},
+	[DA9052_IRQ_VDDLOW] = {
+		.reg_offset = 0,
+		.mask = DA9052_IRQ_MASK_POS_5,
+	},
+	[DA9052_IRQ_ALARM] = {
+		.reg_offset = 0,
+		.mask = DA9052_IRQ_MASK_POS_6,
+	},
+	[DA9052_IRQ_SEQRDY] = {
+		.reg_offset = 0,
+		.mask = DA9052_IRQ_MASK_POS_7,
+	},
+	[DA9052_IRQ_COMP1V2] = {
+		.reg_offset = 0,
+		.mask = DA9052_IRQ_MASK_POS_8,
+	},
+	[DA9052_IRQ_NONKEY] = {
+		.reg_offset = 1,
+		.mask = DA9052_IRQ_MASK_POS_1,
+	},
+	[DA9052_IRQ_IDFLOAT] = {
+		.reg_offset = 1,
+		.mask = DA9052_IRQ_MASK_POS_2,
+	},
+	[DA9052_IRQ_IDGND] = {
+		.reg_offset = 1,
+		.mask = DA9052_IRQ_MASK_POS_3,
+	},
+	[DA9052_IRQ_CHGEND] = {
+		.reg_offset = 1,
+		.mask = DA9052_IRQ_MASK_POS_4,
+	},
+	[DA9052_IRQ_TBAT] = {
+		.reg_offset = 1,
+		.mask = DA9052_IRQ_MASK_POS_5,
+	},
+	[DA9052_IRQ_ADC_EOM] = {
+		.reg_offset = 1,
+		.mask = DA9052_IRQ_MASK_POS_6,
+	},
+	[DA9052_IRQ_PENDOWN] = {
+		.reg_offset = 1,
+		.mask = DA9052_IRQ_MASK_POS_7,
+	},
+	[DA9052_IRQ_TSIREADY] = {
+		.reg_offset = 1,
+		.mask = DA9052_IRQ_MASK_POS_8,
+	},
+	[DA9052_IRQ_GPI0] = {
+		.reg_offset = 2,
+		.mask = DA9052_IRQ_MASK_POS_1,
+	},
+	[DA9052_IRQ_GPI1] = {
+		.reg_offset = 2,
+		.mask = DA9052_IRQ_MASK_POS_2,
+	},
+	[DA9052_IRQ_GPI2] = {
+		.reg_offset = 2,
+		.mask = DA9052_IRQ_MASK_POS_3,
+	},
+	[DA9052_IRQ_GPI3] = {
+		.reg_offset = 2,
+		.mask = DA9052_IRQ_MASK_POS_4,
+	},
+	[DA9052_IRQ_GPI4] = {
+		.reg_offset = 2,
+		.mask = DA9052_IRQ_MASK_POS_5,
+	},
+	[DA9052_IRQ_GPI5] = {
+		.reg_offset = 2,
+		.mask = DA9052_IRQ_MASK_POS_6,
+	},
+	[DA9052_IRQ_GPI6] = {
+		.reg_offset = 2,
+		.mask = DA9052_IRQ_MASK_POS_7,
+	},
+	[DA9052_IRQ_GPI7] = {
+		.reg_offset = 2,
+		.mask = DA9052_IRQ_MASK_POS_8,
+	},
+	[DA9052_IRQ_GPI8] = {
+		.reg_offset = 3,
+		.mask = DA9052_IRQ_MASK_POS_1,
+	},
+	[DA9052_IRQ_GPI9] = {
+		.reg_offset = 3,
+		.mask = DA9052_IRQ_MASK_POS_2,
+	},
+	[DA9052_IRQ_GPI10] = {
+		.reg_offset = 3,
+		.mask = DA9052_IRQ_MASK_POS_3,
+	},
+	[DA9052_IRQ_GPI11] = {
+		.reg_offset = 3,
+		.mask = DA9052_IRQ_MASK_POS_4,
+	},
+	[DA9052_IRQ_GPI12] = {
+		.reg_offset = 3,
+		.mask = DA9052_IRQ_MASK_POS_5,
+	},
+	[DA9052_IRQ_GPI13] = {
+		.reg_offset = 3,
+		.mask = DA9052_IRQ_MASK_POS_6,
+	},
+	[DA9052_IRQ_GPI14] = {
+		.reg_offset = 3,
+		.mask = DA9052_IRQ_MASK_POS_7,
+	},
+	[DA9052_IRQ_GPI15] = {
+		.reg_offset = 3,
+		.mask = DA9052_IRQ_MASK_POS_8,
+	},
+};
+
+static struct regmap_irq_chip da9052_regmap_irq_chip = {
+	.name = "da9052_irq",
+	.status_base = DA9052_EVENT_A_REG,
+	.mask_base = DA9052_IRQ_MASK_A_REG,
+	.ack_base = DA9052_EVENT_A_REG,
+	.num_regs = DA9052_NUM_IRQ_REGS,
+	.irqs = da9052_irqs,
+	.num_irqs = ARRAY_SIZE(da9052_irqs),
+};
+
+struct regmap_config da9052_regmap_config = {
+	.reg_bits = 8,
+	.val_bits = 8,
+
+	.cache_type = REGCACHE_RBTREE,
+
+	.max_register = DA9052_PAGE1_CON_REG,
+	.readable_reg = da9052_reg_readable,
+	.writeable_reg = da9052_reg_writeable,
+	.volatile_reg = da9052_reg_volatile,
+};
+EXPORT_SYMBOL_GPL(da9052_regmap_config);
+
+int __devinit da9052_device_init(struct da9052 *da9052, u8 chip_id)
+{
+	struct da9052_pdata *pdata = da9052->dev->platform_data;
+	struct irq_desc *desc;
+	int ret;
+
+	mutex_init(&da9052->io_lock);
+
+	if (pdata && pdata->init != NULL)
+		pdata->init(da9052);
+
+	da9052->chip_id = chip_id;
+
+	if (!pdata || !pdata->irq_base)
+		da9052->irq_base = -1;
+	else
+		da9052->irq_base = pdata->irq_base;
+
+	ret = regmap_add_irq_chip(da9052->regmap, da9052->chip_irq,
+				  IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+				  da9052->irq_base, &da9052_regmap_irq_chip,
+				  NULL);
+	if (ret < 0)
+		goto regmap_err;
+
+	desc = irq_to_desc(da9052->chip_irq);
+	da9052->irq_base = regmap_irq_chip_get_base(desc->action->dev_id);
+
+	ret = mfd_add_devices(da9052->dev, -1, da9052_subdev_info,
+			      ARRAY_SIZE(da9052_subdev_info), NULL, 0);
+	if (ret)
+		goto err;
+
+	return 0;
+
+err:
+	mfd_remove_devices(da9052->dev);
+regmap_err:
+	return ret;
+}
+
+void da9052_device_exit(struct da9052 *da9052)
+{
+	regmap_del_irq_chip(da9052->chip_irq,
+			    irq_get_irq_data(da9052->irq_base)->chip_data);
+	mfd_remove_devices(da9052->dev);
+}
+
+MODULE_AUTHOR("David Dajun Chen <dchen@diasemi.com>");
+MODULE_DESCRIPTION("DA9052 MFD Core");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/da9052-i2c.c b/drivers/mfd/da9052-i2c.c
new file mode 100644
index 0000000..44b97c7
--- /dev/null
+++ b/drivers/mfd/da9052-i2c.c
@@ -0,0 +1,140 @@
+/*
+ * I2C access for DA9052 PMICs.
+ *
+ * Copyright(c) 2011 Dialog Semiconductor Ltd.
+ *
+ * Author: David Dajun Chen <dchen@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/input.h>
+#include <linux/mfd/core.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+
+#include <linux/mfd/da9052/da9052.h>
+#include <linux/mfd/da9052/reg.h>
+
+static int da9052_i2c_enable_multiwrite(struct da9052 *da9052)
+{
+	int reg_val, ret;
+
+	ret = regmap_read(da9052->regmap, DA9052_CONTROL_B_REG, &reg_val);
+	if (ret < 0)
+		return ret;
+
+	if (reg_val & DA9052_CONTROL_B_WRITEMODE) {
+		reg_val &= ~DA9052_CONTROL_B_WRITEMODE;
+		ret = regmap_write(da9052->regmap, DA9052_CONTROL_B_REG,
+				   reg_val);
+		if (ret < 0)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int __devinit da9052_i2c_probe(struct i2c_client *client,
+				       const struct i2c_device_id *id)
+{
+	struct da9052 *da9052;
+	int ret;
+
+	da9052 = kzalloc(sizeof(struct da9052), GFP_KERNEL);
+	if (!da9052)
+		return -ENOMEM;
+
+	if (!i2c_check_functionality(client->adapter,
+				     I2C_FUNC_SMBUS_BYTE_DATA)) {
+		dev_info(&client->dev, "Error in %s:i2c_check_functionality\n",
+			 __func__);
+		ret = -ENODEV;
+		goto err;
+	}
+
+	da9052->dev = &client->dev;
+	da9052->chip_irq = client->irq;
+
+	i2c_set_clientdata(client, da9052);
+
+	da9052->regmap = regmap_init_i2c(client, &da9052_regmap_config);
+	if (IS_ERR(da9052->regmap)) {
+		ret = PTR_ERR(da9052->regmap);
+		dev_err(&client->dev, "Failed to allocate register map: %d\n",
+			ret);
+		goto err;
+	}
+
+	ret = da9052_i2c_enable_multiwrite(da9052);
+	if (ret < 0)
+		goto err;
+
+	ret = da9052_device_init(da9052, id->driver_data);
+	if (ret != 0)
+		goto err;
+
+	return 0;
+
+err:
+	kfree(da9052);
+	return ret;
+}
+
+static int da9052_i2c_remove(struct i2c_client *client)
+{
+	struct da9052 *da9052 = i2c_get_clientdata(client);
+
+	da9052_device_exit(da9052);
+	kfree(da9052);
+
+	return 0;
+}
+
+static struct i2c_device_id da9052_i2c_id[] = {
+	{"da9052", DA9052},
+	{"da9053-aa", DA9053_AA},
+	{"da9053-ba", DA9053_BA},
+	{"da9053-bb", DA9053_BB},
+	{}
+};
+
+static struct i2c_driver da9052_i2c_driver = {
+	.probe = da9052_i2c_probe,
+	.remove = da9052_i2c_remove,
+	.id_table = da9052_i2c_id,
+	.driver = {
+		.name = "da9052",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init da9052_i2c_init(void)
+{
+	int ret;
+
+	ret = i2c_add_driver(&da9052_i2c_driver);
+	if (ret != 0) {
+		pr_err("DA9052 I2C registration failed %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+subsys_initcall(da9052_i2c_init);
+
+static void __exit da9052_i2c_exit(void)
+{
+	i2c_del_driver(&da9052_i2c_driver);
+}
+module_exit(da9052_i2c_exit);
+
+MODULE_AUTHOR("David Dajun Chen <dchen@diasemi.com>");
+MODULE_DESCRIPTION("I2C driver for Dialog DA9052 PMIC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/da9052-spi.c b/drivers/mfd/da9052-spi.c
new file mode 100644
index 0000000..cdbc7ca
--- /dev/null
+++ b/drivers/mfd/da9052-spi.c
@@ -0,0 +1,115 @@
+/*
+ * SPI access for Dialog DA9052 PMICs.
+ *
+ * Copyright(c) 2011 Dialog Semiconductor Ltd.
+ *
+ * Author: David Dajun Chen <dchen@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/input.h>
+#include <linux/mfd/core.h>
+#include <linux/spi/spi.h>
+#include <linux/err.h>
+
+#include <linux/mfd/da9052/da9052.h>
+
+static int da9052_spi_probe(struct spi_device *spi)
+{
+	int ret;
+	const struct spi_device_id *id = spi_get_device_id(spi);
+	struct da9052 *da9052 = kzalloc(sizeof(struct da9052), GFP_KERNEL);
+
+	if (!da9052)
+		return -ENOMEM;
+
+	spi->mode = SPI_MODE_0 | SPI_CPOL;
+	spi->bits_per_word = 8;
+	spi_setup(spi);
+
+	da9052->dev = &spi->dev;
+	da9052->chip_irq = spi->irq;
+
+	dev_set_drvdata(&spi->dev, da9052);
+
+	da9052_regmap_config.read_flag_mask = 1;
+	da9052_regmap_config.write_flag_mask = 0;
+
+	da9052->regmap = regmap_init_spi(spi, &da9052_regmap_config);
+	if (IS_ERR(da9052->regmap)) {
+		ret = PTR_ERR(da9052->regmap);
+		dev_err(&spi->dev, "Failed to allocate register map: %d\n",
+			ret);
+		goto err;
+	}
+
+	ret = da9052_device_init(da9052, id->driver_data);
+	if (ret != 0)
+		goto err;
+
+	return 0;
+
+err:
+	kfree(da9052);
+	return ret;
+}
+
+static int da9052_spi_remove(struct spi_device *spi)
+{
+	struct da9052 *da9052 = dev_get_drvdata(&spi->dev);
+
+	da9052_device_exit(da9052);
+	kfree(da9052);
+
+	return 0;
+}
+
+static struct spi_device_id da9052_spi_id[] = {
+	{"da9052", DA9052},
+	{"da9053-aa", DA9053_AA},
+	{"da9053-ba", DA9053_BA},
+	{"da9053-bb", DA9053_BB},
+	{}
+};
+
+static struct spi_driver da9052_spi_driver = {
+	.probe = da9052_spi_probe,
+	.remove = __devexit_p(da9052_spi_remove),
+	.id_table = da9052_spi_id,
+	.driver = {
+		.name = "da9052",
+		.bus = &spi_bus_type,
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init da9052_spi_init(void)
+{
+	int ret;
+
+	ret = spi_register_driver(&da9052_spi_driver);
+	if (ret != 0) {
+		pr_err("Failed to register DA9052 SPI driver, %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+subsys_initcall(da9052_spi_init);
+
+static void __exit da9052_spi_exit(void)
+{
+	spi_unregister_driver(&da9052_spi_driver);
+}
+module_exit(da9052_spi_exit);
+
+MODULE_AUTHOR("David Dajun Chen <dchen@diasemi.com>");
+MODULE_DESCRIPTION("SPI driver for Dialog DA9052 PMIC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/jz4740-adc.c b/drivers/mfd/jz4740-adc.c
index 1e9ee53..ef39528 100644
--- a/drivers/mfd/jz4740-adc.c
+++ b/drivers/mfd/jz4740-adc.c
@@ -16,6 +16,7 @@
  */
 
 #include <linux/err.h>
+#include <linux/io.h>
 #include <linux/irq.h>
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
index bba26d9..a5ddf31 100644
--- a/drivers/mfd/tps6586x.c
+++ b/drivers/mfd/tps6586x.c
@@ -197,7 +197,7 @@
 	if (ret)
 		goto out;
 
-	if ((reg_val & bit_mask) == 0) {
+	if ((reg_val & bit_mask) != bit_mask) {
 		reg_val |= bit_mask;
 		ret = __tps6586x_write(to_i2c_client(dev), reg, reg_val);
 	}
diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
index 6f5b8cf..c1da84b 100644
--- a/drivers/mfd/tps65910.c
+++ b/drivers/mfd/tps65910.c
@@ -120,7 +120,7 @@
 		goto out;
 	}
 
-	data &= mask;
+	data &= ~mask;
 	err = tps65910_i2c_write(tps65910, reg, 1, &data);
 	if (err)
 		dev_err(tps65910->dev, "write to reg %x failed\n", reg);
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index bfbd660..61e70cf 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -363,13 +363,13 @@
 		pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no);
 		return -EPERM;
 	}
+	if (unlikely(!inuse)) {
+		pr_err("%s: not initialized\n", DRIVER_NAME);
+		return -EPERM;
+	}
 	sid = twl_map[mod_no].sid;
 	twl = &twl_modules[sid];
 
-	if (unlikely(!inuse)) {
-		pr_err("%s: client %d is not initialized\n", DRIVER_NAME, sid);
-		return -EPERM;
-	}
 	mutex_lock(&twl->xfer_lock);
 	/*
 	 * [MSG1]: fill the register address data
@@ -420,13 +420,13 @@
 		pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no);
 		return -EPERM;
 	}
+	if (unlikely(!inuse)) {
+		pr_err("%s: not initialized\n", DRIVER_NAME);
+		return -EPERM;
+	}
 	sid = twl_map[mod_no].sid;
 	twl = &twl_modules[sid];
 
-	if (unlikely(!inuse)) {
-		pr_err("%s: client %d is not initialized\n", DRIVER_NAME, sid);
-		return -EPERM;
-	}
 	mutex_lock(&twl->xfer_lock);
 	/* [MSG1] fill the register address data */
 	msg = &twl->xfer_msg[0];
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
index f062c8c..29f11e0 100644
--- a/drivers/mfd/twl4030-irq.c
+++ b/drivers/mfd/twl4030-irq.c
@@ -432,6 +432,7 @@
 	u32			edge_change;
 
 	struct mutex		irq_lock;
+	char			*irq_name;
 };
 
 /*----------------------------------------------------------------------*/
@@ -589,7 +590,7 @@
  * Generic handler for SIH interrupts ... we "know" this is called
  * in task context, with IRQs enabled.
  */
-static void handle_twl4030_sih(unsigned irq, struct irq_desc *desc)
+static irqreturn_t handle_twl4030_sih(int irq, void *data)
 {
 	struct sih_agent *agent = irq_get_handler_data(irq);
 	const struct sih *sih = agent->sih;
@@ -602,7 +603,7 @@
 		pr_err("twl4030: %s SIH, read ISR error %d\n",
 			sih->name, isr);
 		/* REVISIT:  recover; eventually mask it all, etc */
-		return;
+		return IRQ_HANDLED;
 	}
 
 	while (isr) {
@@ -616,6 +617,7 @@
 			pr_err("twl4030: %s SIH, invalid ISR bit %d\n",
 				sih->name, irq);
 	}
+	return IRQ_HANDLED;
 }
 
 static unsigned twl4030_irq_next;
@@ -668,18 +670,19 @@
 		activate_irq(irq);
 	}
 
-	status = irq_base;
 	twl4030_irq_next += i;
 
 	/* replace generic PIH handler (handle_simple_irq) */
 	irq = sih_mod + twl4030_irq_base;
 	irq_set_handler_data(irq, agent);
-	irq_set_chained_handler(irq, handle_twl4030_sih);
+	agent->irq_name = kasprintf(GFP_KERNEL, "twl4030_%s", sih->name);
+	status = request_threaded_irq(irq, NULL, handle_twl4030_sih, 0,
+				      agent->irq_name ?: sih->name, NULL);
 
 	pr_info("twl4030: %s (irq %d) chaining IRQs %d..%d\n", sih->name,
 			irq, irq_base, twl4030_irq_next - 1);
 
-	return status;
+	return status < 0 ? status : irq_base;
 }
 
 /* FIXME need a call to reverse twl4030_sih_setup() ... */
@@ -733,8 +736,9 @@
 	}
 
 	/* install an irq handler to demultiplex the TWL4030 interrupt */
-	status = request_threaded_irq(irq_num, NULL, handle_twl4030_pih, 0,
-					"TWL4030-PIH", NULL);
+	status = request_threaded_irq(irq_num, NULL, handle_twl4030_pih,
+				      IRQF_ONESHOT,
+				      "TWL4030-PIH", NULL);
 	if (status < 0) {
 		pr_err("twl4030: could not claim irq%d: %d\n", irq_num, status);
 		goto fail_rqirq;
diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c
index 3eee45f..c6b456a 100644
--- a/drivers/mfd/twl6030-irq.c
+++ b/drivers/mfd/twl6030-irq.c
@@ -138,8 +138,6 @@
 	static const unsigned max_i2c_errors = 100;
 	int ret;
 
-	current->flags |= PF_NOFREEZE;
-
 	while (!kthread_should_stop()) {
 		int i;
 		union {
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index 5d6ba13..61894fc 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -239,6 +239,7 @@
 
 	switch (wm8994->type) {
 	case WM8958:
+	case WM1811:
 		ret = wm8994_reg_read(wm8994, WM8958_MIC_DETECT_1);
 		if (ret < 0) {
 			dev_err(dev, "Failed to read power status: %d\n", ret);
diff --git a/drivers/misc/ad525x_dpot-i2c.c b/drivers/misc/ad525x_dpot-i2c.c
index a39e055..83adab6 100644
--- a/drivers/misc/ad525x_dpot-i2c.c
+++ b/drivers/misc/ad525x_dpot-i2c.c
@@ -1,7 +1,7 @@
 /*
  * Driver for the Analog Devices digital potentiometers (I2C bus)
  *
- * Copyright (C) 2010 Michael Hennerich, Analog Devices Inc.
+ * Copyright (C) 2010-2011 Michael Hennerich, Analog Devices Inc.
  *
  * Licensed under the GPL-2 or later.
  */
@@ -11,7 +11,6 @@
 
 #include "ad525x_dpot.h"
 
-/* ------------------------------------------------------------------------- */
 /* I2C bus functions */
 static int write_d8(void *client, u8 val)
 {
@@ -60,18 +59,13 @@
 		.bops = &bops,
 	};
 
-	struct ad_dpot_id dpot_id = {
-		.name = (char *) &id->name,
-		.devid = id->driver_data,
-	};
-
 	if (!i2c_check_functionality(client->adapter,
 				     I2C_FUNC_SMBUS_WORD_DATA)) {
 		dev_err(&client->dev, "SMBUS Word Data not Supported\n");
 		return -EIO;
 	}
 
-	return ad_dpot_probe(&client->dev, &bdata, &dpot_id);
+	return ad_dpot_probe(&client->dev, &bdata, id->driver_data, id->name);
 }
 
 static int __devexit ad_dpot_i2c_remove(struct i2c_client *client)
diff --git a/drivers/misc/ad525x_dpot-spi.c b/drivers/misc/ad525x_dpot-spi.c
index 7f9a55a..822749e 100644
--- a/drivers/misc/ad525x_dpot-spi.c
+++ b/drivers/misc/ad525x_dpot-spi.c
@@ -1,7 +1,7 @@
 /*
  * Driver for the Analog Devices digital potentiometers (SPI bus)
  *
- * Copyright (C) 2010 Michael Hennerich, Analog Devices Inc.
+ * Copyright (C) 2010-2011 Michael Hennerich, Analog Devices Inc.
  *
  * Licensed under the GPL-2 or later.
  */
@@ -11,40 +11,6 @@
 
 #include "ad525x_dpot.h"
 
-static const struct ad_dpot_id ad_dpot_spi_devlist[] = {
-	{.name = "ad5160", .devid = AD5160_ID},
-	{.name = "ad5161", .devid = AD5161_ID},
-	{.name = "ad5162", .devid = AD5162_ID},
-	{.name = "ad5165", .devid = AD5165_ID},
-	{.name = "ad5200", .devid = AD5200_ID},
-	{.name = "ad5201", .devid = AD5201_ID},
-	{.name = "ad5203", .devid = AD5203_ID},
-	{.name = "ad5204", .devid = AD5204_ID},
-	{.name = "ad5206", .devid = AD5206_ID},
-	{.name = "ad5207", .devid = AD5207_ID},
-	{.name = "ad5231", .devid = AD5231_ID},
-	{.name = "ad5232", .devid = AD5232_ID},
-	{.name = "ad5233", .devid = AD5233_ID},
-	{.name = "ad5235", .devid = AD5235_ID},
-	{.name = "ad5260", .devid = AD5260_ID},
-	{.name = "ad5262", .devid = AD5262_ID},
-	{.name = "ad5263", .devid = AD5263_ID},
-	{.name = "ad5290", .devid = AD5290_ID},
-	{.name = "ad5291", .devid = AD5291_ID},
-	{.name = "ad5292", .devid = AD5292_ID},
-	{.name = "ad5293", .devid = AD5293_ID},
-	{.name = "ad7376", .devid = AD7376_ID},
-	{.name = "ad8400", .devid = AD8400_ID},
-	{.name = "ad8402", .devid = AD8402_ID},
-	{.name = "ad8403", .devid = AD8403_ID},
-	{.name = "adn2850", .devid = ADN2850_ID},
-	{.name = "ad5270", .devid = AD5270_ID},
-	{.name = "ad5271", .devid = AD5271_ID},
-	{}
-};
-
-/* ------------------------------------------------------------------------- */
-
 /* SPI bus functions */
 static int write8(void *client, u8 val)
 {
@@ -109,36 +75,16 @@
 	.write_r8d8	= write16,
 	.write_r8d16	= write24,
 };
-
-static const struct ad_dpot_id *dpot_match_id(const struct ad_dpot_id *id,
-						char *name)
-{
-	while (id->name && id->name[0]) {
-		if (strcmp(name, id->name) == 0)
-			return id;
-		id++;
-	}
-	return NULL;
-}
-
 static int __devinit ad_dpot_spi_probe(struct spi_device *spi)
 {
-	char *name = spi->dev.platform_data;
-	const struct ad_dpot_id *dpot_id;
-
 	struct ad_dpot_bus_data bdata = {
 		.client = spi,
 		.bops = &bops,
 	};
 
-	dpot_id = dpot_match_id(ad_dpot_spi_devlist, name);
-
-	if (dpot_id == NULL) {
-		dev_err(&spi->dev, "%s not in supported device list", name);
-		return -ENODEV;
-	}
-
-	return ad_dpot_probe(&spi->dev, &bdata, dpot_id);
+	return ad_dpot_probe(&spi->dev, &bdata,
+			     spi_get_device_id(spi)->driver_data,
+			     spi_get_device_id(spi)->name);
 }
 
 static int __devexit ad_dpot_spi_remove(struct spi_device *spi)
@@ -146,14 +92,47 @@
 	return ad_dpot_remove(&spi->dev);
 }
 
+static const struct spi_device_id ad_dpot_spi_id[] = {
+	{"ad5160", AD5160_ID},
+	{"ad5161", AD5161_ID},
+	{"ad5162", AD5162_ID},
+	{"ad5165", AD5165_ID},
+	{"ad5200", AD5200_ID},
+	{"ad5201", AD5201_ID},
+	{"ad5203", AD5203_ID},
+	{"ad5204", AD5204_ID},
+	{"ad5206", AD5206_ID},
+	{"ad5207", AD5207_ID},
+	{"ad5231", AD5231_ID},
+	{"ad5232", AD5232_ID},
+	{"ad5233", AD5233_ID},
+	{"ad5235", AD5235_ID},
+	{"ad5260", AD5260_ID},
+	{"ad5262", AD5262_ID},
+	{"ad5263", AD5263_ID},
+	{"ad5290", AD5290_ID},
+	{"ad5291", AD5291_ID},
+	{"ad5292", AD5292_ID},
+	{"ad5293", AD5293_ID},
+	{"ad7376", AD7376_ID},
+	{"ad8400", AD8400_ID},
+	{"ad8402", AD8402_ID},
+	{"ad8403", AD8403_ID},
+	{"adn2850", ADN2850_ID},
+	{"ad5270", AD5270_ID},
+	{"ad5271", AD5271_ID},
+	{}
+};
+MODULE_DEVICE_TABLE(spi, ad_dpot_spi_id);
+
 static struct spi_driver ad_dpot_spi_driver = {
 	.driver = {
 		.name	= "ad_dpot",
-		.bus	= &spi_bus_type,
 		.owner	= THIS_MODULE,
 	},
 	.probe		= ad_dpot_spi_probe,
 	.remove		= __devexit_p(ad_dpot_spi_remove),
+	.id_table	= ad_dpot_spi_id,
 };
 
 static int __init ad_dpot_spi_init(void)
diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c
index 7cb9110..1d1d426 100644
--- a/drivers/misc/ad525x_dpot.c
+++ b/drivers/misc/ad525x_dpot.c
@@ -64,7 +64,7 @@
  * Author: Chris Verges <chrisv@cyberswitching.com>
  *
  * derived from ad5252.c
- * Copyright (c) 2006 Michael Hennerich <hennerich@blackfin.uclinux.org>
+ * Copyright (c) 2006-2011 Michael Hennerich <hennerich@blackfin.uclinux.org>
  *
  * Licensed under the GPL-2 or later.
  */
@@ -76,8 +76,6 @@
 #include <linux/delay.h>
 #include <linux/slab.h>
 
-#define DRIVER_VERSION			"0.2"
-
 #include "ad525x_dpot.h"
 
 /*
@@ -687,8 +685,9 @@
 	}
 }
 
-__devinit int ad_dpot_probe(struct device *dev,
-		struct ad_dpot_bus_data *bdata, const struct ad_dpot_id *id)
+int __devinit ad_dpot_probe(struct device *dev,
+		struct ad_dpot_bus_data *bdata, unsigned long devid,
+			    const char *name)
 {
 
 	struct dpot_data *data;
@@ -704,13 +703,13 @@
 	mutex_init(&data->update_lock);
 
 	data->bdata = *bdata;
-	data->devid = id->devid;
+	data->devid = devid;
 
-	data->max_pos = 1 << DPOT_MAX_POS(data->devid);
+	data->max_pos = 1 << DPOT_MAX_POS(devid);
 	data->rdac_mask = data->max_pos - 1;
-	data->feat = DPOT_FEAT(data->devid);
-	data->uid = DPOT_UID(data->devid);
-	data->wipers = DPOT_WIPERS(data->devid);
+	data->feat = DPOT_FEAT(devid);
+	data->uid = DPOT_UID(devid);
+	data->wipers = DPOT_WIPERS(devid);
 
 	for (i = DPOT_RDAC0; i < MAX_RDACS; i++)
 		if (data->wipers & (1 << i)) {
@@ -731,7 +730,7 @@
 	}
 
 	dev_info(dev, "%s %d-Position Digital Potentiometer registered\n",
-		 id->name, data->max_pos);
+		 name, data->max_pos);
 
 	return 0;
 
@@ -745,7 +744,7 @@
 	dev_set_drvdata(dev, NULL);
 exit:
 	dev_err(dev, "failed to create client for %s ID 0x%lX\n",
-			id->name, id->devid);
+		name, devid);
 	return err;
 }
 EXPORT_SYMBOL(ad_dpot_probe);
@@ -770,4 +769,3 @@
 	      "Michael Hennerich <hennerich@blackfin.uclinux.org>");
 MODULE_DESCRIPTION("Digital potentiometer driver");
 MODULE_LICENSE("GPL");
-MODULE_VERSION(DRIVER_VERSION);
diff --git a/drivers/misc/ad525x_dpot.h b/drivers/misc/ad525x_dpot.h
index 82b2cb7..6bd1eba 100644
--- a/drivers/misc/ad525x_dpot.h
+++ b/drivers/misc/ad525x_dpot.h
@@ -208,12 +208,8 @@
 	const struct ad_dpot_bus_ops *bops;
 };
 
-struct ad_dpot_id {
-	char *name;
-	unsigned long devid;
-};
-
-int ad_dpot_probe(struct device *dev, struct ad_dpot_bus_data *bdata, const struct ad_dpot_id *id);
+int ad_dpot_probe(struct device *dev, struct ad_dpot_bus_data *bdata,
+		  unsigned long devid, const char *name);
 int ad_dpot_remove(struct device *dev);
 
 #endif
diff --git a/drivers/misc/bmp085.c b/drivers/misc/bmp085.c
index 5f898cb..b29a2be 100644
--- a/drivers/misc/bmp085.c
+++ b/drivers/misc/bmp085.c
@@ -216,7 +216,7 @@
 		*temperature = (x1+x2+8) >> 4;
 
 exit:
-	return status;;
+	return status;
 }
 
 /*
diff --git a/drivers/misc/eeprom/eeprom_93cx6.c b/drivers/misc/eeprom/eeprom_93cx6.c
index 7b33de9..0ff4b02 100644
--- a/drivers/misc/eeprom/eeprom_93cx6.c
+++ b/drivers/misc/eeprom/eeprom_93cx6.c
@@ -63,6 +63,7 @@
 	eeprom->reg_data_out = 0;
 	eeprom->reg_data_clock = 0;
 	eeprom->reg_chip_select = 1;
+	eeprom->drive_data = 1;
 	eeprom->register_write(eeprom);
 
 	/*
@@ -101,6 +102,7 @@
 	 */
 	eeprom->reg_data_in = 0;
 	eeprom->reg_data_out = 0;
+	eeprom->drive_data = 1;
 
 	/*
 	 * Start writing all bits.
@@ -140,6 +142,7 @@
 	 */
 	eeprom->reg_data_in = 0;
 	eeprom->reg_data_out = 0;
+	eeprom->drive_data = 0;
 
 	/*
 	 * Start reading all bits.
@@ -231,3 +234,88 @@
 }
 EXPORT_SYMBOL_GPL(eeprom_93cx6_multiread);
 
+/**
+ * eeprom_93cx6_wren - set the write enable state
+ * @eeprom: Pointer to eeprom structure
+ * @enable: true to enable writes, otherwise disable writes
+ *
+ * Set the EEPROM write enable state to either allow or deny
+ * writes depending on the @enable value.
+ */
+void eeprom_93cx6_wren(struct eeprom_93cx6 *eeprom, bool enable)
+{
+	u16 command;
+
+	/* start the command */
+	eeprom_93cx6_startup(eeprom);
+
+	/* create command to enable/disable */
+
+	command = enable ? PCI_EEPROM_EWEN_OPCODE : PCI_EEPROM_EWDS_OPCODE;
+	command <<= (eeprom->width - 2);
+
+	eeprom_93cx6_write_bits(eeprom, command,
+				PCI_EEPROM_WIDTH_OPCODE + eeprom->width);
+
+	eeprom_93cx6_cleanup(eeprom);
+}
+EXPORT_SYMBOL_GPL(eeprom_93cx6_wren);
+
+/**
+ * eeprom_93cx6_write - write data to the EEPROM
+ * @eeprom: Pointer to eeprom structure
+ * @addr: Address to write data to.
+ * @data: The data to write to address @addr.
+ *
+ * Write the @data to the specified @addr in the EEPROM and
+ * waiting for the device to finish writing.
+ *
+ * Note, since we do not expect large number of write operations
+ * we delay in between parts of the operation to avoid using excessive
+ * amounts of CPU time busy waiting.
+ */
+void eeprom_93cx6_write(struct eeprom_93cx6 *eeprom, u8 addr, u16 data)
+{
+	int timeout = 100;
+	u16 command;
+
+	/* start the command */
+	eeprom_93cx6_startup(eeprom);
+
+	command = PCI_EEPROM_WRITE_OPCODE << eeprom->width;
+	command |= addr;
+
+	/* send write command */
+	eeprom_93cx6_write_bits(eeprom, command,
+				PCI_EEPROM_WIDTH_OPCODE + eeprom->width);
+
+	/* send data */
+	eeprom_93cx6_write_bits(eeprom, data, 16);
+
+	/* get ready to check for busy */
+	eeprom->drive_data = 0;
+	eeprom->reg_chip_select = 1;
+	eeprom->register_write(eeprom);
+
+	/* wait at-least 250ns to get DO to be the busy signal */
+	usleep_range(1000, 2000);
+
+	/* wait for DO to go high to signify finish */
+
+	while (true) {
+		eeprom->register_read(eeprom);
+
+		if (eeprom->reg_data_out)
+			break;
+
+		usleep_range(1000, 2000);
+
+		if (--timeout <= 0) {
+			printk(KERN_ERR "%s: timeout\n", __func__);
+			break;
+		}
+	}
+
+	eeprom_93cx6_cleanup(eeprom);
+}
+EXPORT_SYMBOL_GPL(eeprom_93cx6_write);
diff --git a/drivers/misc/ibmasm/command.c b/drivers/misc/ibmasm/command.c
index 5c766b4..7d56f45 100644
--- a/drivers/misc/ibmasm/command.c
+++ b/drivers/misc/ibmasm/command.c
@@ -18,7 +18,7 @@
  *
  * Copyright (C) IBM Corporation, 2004
  *
- * Author: Max Asböck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
  *
  */
 
diff --git a/drivers/misc/ibmasm/dot_command.c b/drivers/misc/ibmasm/dot_command.c
index 3dd2dfb..d7b2ca3 100644
--- a/drivers/misc/ibmasm/dot_command.c
+++ b/drivers/misc/ibmasm/dot_command.c
@@ -17,7 +17,7 @@
  *
  * Copyright (C) IBM Corporation, 2004
  *
- * Author: Max Asböck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
  *
  */
 
diff --git a/drivers/misc/ibmasm/dot_command.h b/drivers/misc/ibmasm/dot_command.h
index 6cbba1a..fc9fc9d 100644
--- a/drivers/misc/ibmasm/dot_command.h
+++ b/drivers/misc/ibmasm/dot_command.h
@@ -17,7 +17,7 @@
  *
  * Copyright (C) IBM Corporation, 2004
  *
- * Author: Max Asböck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
  *
  */
 
diff --git a/drivers/misc/ibmasm/event.c b/drivers/misc/ibmasm/event.c
index 76bfda1..8e540f4 100644
--- a/drivers/misc/ibmasm/event.c
+++ b/drivers/misc/ibmasm/event.c
@@ -18,7 +18,7 @@
  *
  * Copyright (C) IBM Corporation, 2004
  *
- * Author: Max Asböck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
  *
  */
 
diff --git a/drivers/misc/ibmasm/heartbeat.c b/drivers/misc/ibmasm/heartbeat.c
index 1bc4306..9074637 100644
--- a/drivers/misc/ibmasm/heartbeat.c
+++ b/drivers/misc/ibmasm/heartbeat.c
@@ -18,7 +18,7 @@
  *
  * Copyright (C) IBM Corporation, 2004
  *
- * Author: Max Asböck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
  *
  */
 
diff --git a/drivers/misc/ibmasm/i2o.h b/drivers/misc/ibmasm/i2o.h
index bf2c738..2e9566d 100644
--- a/drivers/misc/ibmasm/i2o.h
+++ b/drivers/misc/ibmasm/i2o.h
@@ -17,7 +17,7 @@
  *
  * Copyright (C) IBM Corporation, 2004
  *
- * Author: Max Asböck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
  *
  */
 
diff --git a/drivers/misc/ibmasm/ibmasm.h b/drivers/misc/ibmasm/ibmasm.h
index 4d8a4e2..9b08344 100644
--- a/drivers/misc/ibmasm/ibmasm.h
+++ b/drivers/misc/ibmasm/ibmasm.h
@@ -18,7 +18,7 @@
  *
  * Copyright (C) IBM Corporation, 2004
  *
- * Author: Max Asböck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
  *
  */
 
diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c
index 8994772..3536175 100644
--- a/drivers/misc/ibmasm/ibmasmfs.c
+++ b/drivers/misc/ibmasm/ibmasmfs.c
@@ -17,7 +17,7 @@
  *
  * Copyright (C) IBM Corporation, 2004
  *
- * Author: Max Asböck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
  *
  */
 
diff --git a/drivers/misc/ibmasm/lowlevel.c b/drivers/misc/ibmasm/lowlevel.c
index 4b2398e..5319ea2 100644
--- a/drivers/misc/ibmasm/lowlevel.c
+++ b/drivers/misc/ibmasm/lowlevel.c
@@ -17,7 +17,7 @@
  *
  * Copyright (C) IBM Corporation, 2004
  *
- * Author: Max Asböck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
  *
  */
 
diff --git a/drivers/misc/ibmasm/lowlevel.h b/drivers/misc/ibmasm/lowlevel.h
index 7667665..e97848f 100644
--- a/drivers/misc/ibmasm/lowlevel.h
+++ b/drivers/misc/ibmasm/lowlevel.h
@@ -17,7 +17,7 @@
  *
  * Copyright (C) IBM Corporation, 2004
  *
- * Author: Max Asböck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
  *
  */
 
diff --git a/drivers/misc/ibmasm/module.c b/drivers/misc/ibmasm/module.c
index a234d96..1ccedb71 100644
--- a/drivers/misc/ibmasm/module.c
+++ b/drivers/misc/ibmasm/module.c
@@ -18,7 +18,7 @@
  *
  * Copyright (C) IBM Corporation, 2004
  *
- * Author: Max Asböck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
  *
  * This driver is based on code originally written by Pete Reynolds
  * and others.
diff --git a/drivers/misc/ibmasm/r_heartbeat.c b/drivers/misc/ibmasm/r_heartbeat.c
index 2de487a..232034f 100644
--- a/drivers/misc/ibmasm/r_heartbeat.c
+++ b/drivers/misc/ibmasm/r_heartbeat.c
@@ -16,7 +16,7 @@
  *
  * Copyright (C) IBM Corporation, 2004
  *
- * Author: Max Asböck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
  *
  */
 
diff --git a/drivers/misc/ibmasm/remote.h b/drivers/misc/ibmasm/remote.h
index 00dbf1d4..a7729ef 100644
--- a/drivers/misc/ibmasm/remote.h
+++ b/drivers/misc/ibmasm/remote.h
@@ -18,7 +18,7 @@
  *
  * Copyright (C) IBM Corporation, 2004
  *
- * Author: Max Asböck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
  *
  * Originally written by Pete Reynolds
  */
diff --git a/drivers/misc/ibmasm/uart.c b/drivers/misc/ibmasm/uart.c
index 93baa35..1dcb9ae 100644
--- a/drivers/misc/ibmasm/uart.c
+++ b/drivers/misc/ibmasm/uart.c
@@ -18,7 +18,7 @@
  *
  * Copyright (C) IBM Corporation, 2004
  *
- * Author: Max Asböck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
  *
  */
 
diff --git a/drivers/misc/isl29020.c b/drivers/misc/isl29020.c
index 307aada..3d6cce6 100644
--- a/drivers/misc/isl29020.c
+++ b/drivers/misc/isl29020.c
@@ -158,7 +158,7 @@
 		dev_err(&client->dev, "default write failed.");
 		return retval;
 	}
-	return 0;;
+	return 0;
 }
 
 static int  isl29020_probe(struct i2c_client *client,
diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
index 7768b87..950dbe9 100644
--- a/drivers/misc/sgi-gru/gruprocfs.c
+++ b/drivers/misc/sgi-gru/gruprocfs.c
@@ -324,7 +324,7 @@
 
 static struct proc_entry {
 	char *name;
-	int mode;
+	umode_t mode;
 	const struct file_operations *fops;
 	struct proc_dir_entry *entry;
 } proc_files[] = {
diff --git a/drivers/misc/sgi-xp/xpnet.c b/drivers/misc/sgi-xp/xpnet.c
index 42f0673..3fac67a 100644
--- a/drivers/misc/sgi-xp/xpnet.c
+++ b/drivers/misc/sgi-xp/xpnet.c
@@ -576,7 +576,7 @@
 	 * report an error if the data is not retrievable and the
 	 * packet will be dropped.
 	 */
-	xpnet_device->features = NETIF_F_NO_CSUM;
+	xpnet_device->features = NETIF_F_HW_CSUM;
 
 	result = register_netdev(xpnet_device);
 	if (result != 0) {
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
index ba168a7..2b62232 100644
--- a/drivers/misc/ti-st/st_core.c
+++ b/drivers/misc/ti-st/st_core.c
@@ -137,6 +137,8 @@
  * st_reg_complete -
  * to call registration complete callbacks
  * of all protocol stack drivers
+ * This function is being called with spin lock held, protocol drivers are
+ * only expected to complete their waits and do nothing more than that.
  */
 void st_reg_complete(struct st_data_s *st_gdata, char err)
 {
@@ -538,11 +540,12 @@
 		set_bit(ST_REG_IN_PROGRESS, &st_gdata->st_state);
 		st_recv = st_kim_recv;
 
+		/* enable the ST LL - to set default chip state */
+		st_ll_enable(st_gdata);
+
 		/* release lock previously held - re-locked below */
 		spin_unlock_irqrestore(&st_gdata->lock, flags);
 
-		/* enable the ST LL - to set default chip state */
-		st_ll_enable(st_gdata);
 		/* this may take a while to complete
 		 * since it involves BT fw download
 		 */
@@ -553,10 +556,13 @@
 			    (test_bit(ST_REG_PENDING, &st_gdata->st_state))) {
 				pr_err(" KIM failure complete callback ");
 				st_reg_complete(st_gdata, err);
+				clear_bit(ST_REG_PENDING, &st_gdata->st_state);
 			}
 			return -EINVAL;
 		}
 
+		spin_lock_irqsave(&st_gdata->lock, flags);
+
 		clear_bit(ST_REG_IN_PROGRESS, &st_gdata->st_state);
 		st_recv = st_int_recv;
 
@@ -576,10 +582,10 @@
 		if (st_gdata->is_registered[new_proto->chnl_id] == true) {
 			pr_err(" proto %d already registered ",
 				   new_proto->chnl_id);
+			spin_unlock_irqrestore(&st_gdata->lock, flags);
 			return -EALREADY;
 		}
 
-		spin_lock_irqsave(&st_gdata->lock, flags);
 		add_channel_to_table(st_gdata, new_proto);
 		st_gdata->protos_registered++;
 		new_proto->write = st_write;
@@ -619,7 +625,7 @@
 
 	spin_lock_irqsave(&st_gdata->lock, flags);
 
-	if (st_gdata->list[proto->chnl_id] == NULL) {
+	if (st_gdata->is_registered[proto->chnl_id] == false) {
 		pr_err(" chnl_id %d not registered", proto->chnl_id);
 		spin_unlock_irqrestore(&st_gdata->lock, flags);
 		return -EPROTONOSUPPORT;
@@ -629,6 +635,10 @@
 	remove_channel_from_table(st_gdata, proto);
 	spin_unlock_irqrestore(&st_gdata->lock, flags);
 
+	/* paranoid check */
+	if (st_gdata->protos_registered < ST_EMPTY)
+		st_gdata->protos_registered = ST_EMPTY;
+
 	if ((st_gdata->protos_registered == ST_EMPTY) &&
 	    (!test_bit(ST_REG_PENDING, &st_gdata->st_state))) {
 		pr_info(" all chnl_ids unregistered ");
diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c
index 43ef8d1..a7a861c 100644
--- a/drivers/misc/ti-st/st_kim.c
+++ b/drivers/misc/ti-st/st_kim.c
@@ -469,37 +469,21 @@
 		/* wait for ldisc to be installed */
 		err = wait_for_completion_timeout(&kim_gdata->ldisc_installed,
 				msecs_to_jiffies(LDISC_TIME));
-		if (!err) {	/* timeout */
-			pr_err("line disc installation timed out ");
-			kim_gdata->ldisc_install = 0;
-			pr_info("ldisc_install = 0");
-			sysfs_notify(&kim_gdata->kim_pdev->dev.kobj,
-					NULL, "install");
-			/* the following wait is never going to be completed,
-			 * since the ldisc was never installed, hence serving
-			 * as a mdelay of LDISC_TIME msecs */
-			err = wait_for_completion_timeout
-				(&kim_gdata->ldisc_installed,
-				 msecs_to_jiffies(LDISC_TIME));
-			err = -ETIMEDOUT;
+		if (!err) {
+			/* ldisc installation timeout,
+			 * flush uart, power cycle BT_EN */
+			pr_err("ldisc installation timeout");
+			err = st_kim_stop(kim_gdata);
 			continue;
 		} else {
 			/* ldisc installed now */
-			pr_info(" line discipline installed ");
+			pr_info("line discipline installed");
 			err = download_firmware(kim_gdata);
 			if (err != 0) {
+				/* ldisc installed but fw download failed,
+				 * flush uart & power cycle BT_EN */
 				pr_err("download firmware failed");
-				kim_gdata->ldisc_install = 0;
-				pr_info("ldisc_install = 0");
-				sysfs_notify(&kim_gdata->kim_pdev->dev.kobj,
-						NULL, "install");
-				/* this wait might be completed, though in the
-				 * tty_close() since the ldisc is already
-				 * installed */
-				err = wait_for_completion_timeout
-					(&kim_gdata->ldisc_installed,
-					 msecs_to_jiffies(LDISC_TIME));
-				err = -EINVAL;
+				err = st_kim_stop(kim_gdata);
 				continue;
 			} else {	/* on success don't retry */
 				break;
@@ -510,8 +494,14 @@
 }
 
 /**
- * st_kim_stop - called from ST Core, on the last un-registration
- *	toggle low the chip enable gpio
+ * st_kim_stop - stop communication with chip.
+ *	This can be called from ST Core/KIM, on the-
+ *	(a) last un-register when chip need not be powered there-after,
+ *	(b) upon failure to either install ldisc or download firmware.
+ *	The function is responsible to (a) notify UIM about un-installation,
+ *	(b) flush UART if the ldisc was installed.
+ *	(c) reset BT_EN - pull down nshutdown at the end.
+ *	(d) invoke platform's chip disabling routine.
  */
 long st_kim_stop(void *kim_data)
 {
@@ -519,12 +509,16 @@
 	struct kim_data_s	*kim_gdata = (struct kim_data_s *)kim_data;
 	struct ti_st_plat_data	*pdata =
 		kim_gdata->kim_pdev->dev.platform_data;
+	struct tty_struct	*tty = kim_gdata->core_data->tty;
 
 	INIT_COMPLETION(kim_gdata->ldisc_installed);
 
-	/* Flush any pending characters in the driver and discipline. */
-	tty_ldisc_flush(kim_gdata->core_data->tty);
-	tty_driver_flush_buffer(kim_gdata->core_data->tty);
+	if (tty) {	/* can be called before ldisc is installed */
+		/* Flush any pending characters in the driver and discipline. */
+		tty_ldisc_flush(tty);
+		tty_driver_flush_buffer(tty);
+		tty->ops->flush_buffer(tty);
+	}
 
 	/* send uninstall notification to UIM */
 	pr_info("ldisc_install = 0");
@@ -579,6 +573,28 @@
 	return sprintf(buf, "%d\n", kim_data->ldisc_install);
 }
 
+#ifdef DEBUG
+static ssize_t store_dev_name(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct kim_data_s *kim_data = dev_get_drvdata(dev);
+	pr_debug("storing dev name >%s<", buf);
+	strncpy(kim_data->dev_name, buf, count);
+	pr_debug("stored dev name >%s<", kim_data->dev_name);
+	return count;
+}
+
+static ssize_t store_baud_rate(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct kim_data_s *kim_data = dev_get_drvdata(dev);
+	pr_debug("storing baud rate >%s<", buf);
+	sscanf(buf, "%ld", &kim_data->baud_rate);
+	pr_debug("stored baud rate >%ld<", kim_data->baud_rate);
+	return count;
+}
+#endif	/* if DEBUG */
+
 static ssize_t show_dev_name(struct device *dev,
 		struct device_attribute *attr, char *buf)
 {
@@ -605,10 +621,18 @@
 __ATTR(install, 0444, (void *)show_install, NULL);
 
 static struct kobj_attribute uart_dev_name =
+#ifdef DEBUG	/* TODO: move this to debug-fs if possible */
+__ATTR(dev_name, 0644, (void *)show_dev_name, (void *)store_dev_name);
+#else
 __ATTR(dev_name, 0444, (void *)show_dev_name, NULL);
+#endif
 
 static struct kobj_attribute uart_baud_rate =
+#ifdef DEBUG	/* TODO: move to debugfs */
+__ATTR(baud_rate, 0644, (void *)show_baud_rate, (void *)store_baud_rate);
+#else
 __ATTR(baud_rate, 0444, (void *)show_baud_rate, NULL);
+#endif
 
 static struct kobj_attribute uart_flow_cntrl =
 __ATTR(flow_cntrl, 0444, (void *)show_flow_cntrl, NULL);
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index a1cb21f..1e0e27c 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -1606,6 +1606,14 @@
 		  MMC_QUIRK_BLK_NO_CMD23),
 	MMC_FIXUP("MMC32G", 0x11, CID_OEMID_ANY, add_quirk_mmc,
 		  MMC_QUIRK_BLK_NO_CMD23),
+
+	/*
+	 * Some Micron MMC cards needs longer data read timeout than
+	 * indicated in CSD.
+	 */
+	MMC_FIXUP(CID_NAME_ANY, 0x13, 0x200, add_quirk_mmc,
+		  MMC_QUIRK_LONG_READ_TIME),
+
 	END_FIXUP
 };
 
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index b038c4a..e99bdc1 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -2949,7 +2949,7 @@
 }
 
 static int __mmc_test_register_dbgfs_file(struct mmc_card *card,
-	const char *name, mode_t mode, const struct file_operations *fops)
+	const char *name, umode_t mode, const struct file_operations *fops)
 {
 	struct dentry *file = NULL;
 	struct mmc_test_dbgfs_file *df;
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 5278ffb..950b97d 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -529,6 +529,18 @@
 			data->timeout_clks = 0;
 		}
 	}
+
+	/*
+	 * Some cards require longer data read timeout than indicated in CSD.
+	 * Address this by setting the read timeout to a "reasonably high"
+	 * value. For the cards tested, 300ms has proven enough. If necessary,
+	 * this value can be increased if other problematic cards require this.
+	 */
+	if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
+		data->timeout_ns = 300000000;
+		data->timeout_clks = 0;
+	}
+
 	/*
 	 * Some cards need very high timeouts if driven in SPI mode.
 	 * The worst observed timeout was 900ms after writing a
@@ -1213,6 +1225,46 @@
 	mmc_host_clk_release(host);
 }
 
+static void mmc_poweroff_notify(struct mmc_host *host)
+{
+	struct mmc_card *card;
+	unsigned int timeout;
+	unsigned int notify_type = EXT_CSD_NO_POWER_NOTIFICATION;
+	int err = 0;
+
+	card = host->card;
+
+	/*
+	 * Send power notify command only if card
+	 * is mmc and notify state is powered ON
+	 */
+	if (card && mmc_card_mmc(card) &&
+	    (card->poweroff_notify_state == MMC_POWERED_ON)) {
+
+		if (host->power_notify_type == MMC_HOST_PW_NOTIFY_SHORT) {
+			notify_type = EXT_CSD_POWER_OFF_SHORT;
+			timeout = card->ext_csd.generic_cmd6_time;
+			card->poweroff_notify_state = MMC_POWEROFF_SHORT;
+		} else {
+			notify_type = EXT_CSD_POWER_OFF_LONG;
+			timeout = card->ext_csd.power_off_longtime;
+			card->poweroff_notify_state = MMC_POWEROFF_LONG;
+		}
+
+		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+				 EXT_CSD_POWER_OFF_NOTIFICATION,
+				 notify_type, timeout);
+
+		if (err && err != -EBADMSG)
+			pr_err("Device failed to respond within %d poweroff "
+			       "time. Forcefully powering down the device\n",
+			       timeout);
+
+		/* Set the card state to no notification after the poweroff */
+		card->poweroff_notify_state = MMC_NO_POWER_NOTIFICATION;
+	}
+}
+
 /*
  * Apply power to the MMC stack.  This is a two-stage process.
  * First, we enable power to the card without the clock running.
@@ -1269,42 +1321,12 @@
 
 void mmc_power_off(struct mmc_host *host)
 {
-	struct mmc_card *card;
-	unsigned int notify_type;
-	unsigned int timeout;
-	int err;
-
 	mmc_host_clk_hold(host);
 
-	card = host->card;
 	host->ios.clock = 0;
 	host->ios.vdd = 0;
 
-	if (card && mmc_card_mmc(card) &&
-	    (card->poweroff_notify_state == MMC_POWERED_ON)) {
-
-		if (host->power_notify_type == MMC_HOST_PW_NOTIFY_SHORT) {
-			notify_type = EXT_CSD_POWER_OFF_SHORT;
-			timeout = card->ext_csd.generic_cmd6_time;
-			card->poweroff_notify_state = MMC_POWEROFF_SHORT;
-		} else {
-			notify_type = EXT_CSD_POWER_OFF_LONG;
-			timeout = card->ext_csd.power_off_longtime;
-			card->poweroff_notify_state = MMC_POWEROFF_LONG;
-		}
-
-		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
-				 EXT_CSD_POWER_OFF_NOTIFICATION,
-				 notify_type, timeout);
-
-		if (err && err != -EBADMSG)
-			pr_err("Device failed to respond within %d poweroff "
-			       "time. Forcefully powering down the device\n",
-			       timeout);
-
-		/* Set the card state to no notification after the poweroff */
-		card->poweroff_notify_state = MMC_NO_POWER_NOTIFICATION;
-	}
+	mmc_poweroff_notify(host);
 
 	/*
 	 * Reset ocr mask to be the highest possible voltage supported for
@@ -2196,7 +2218,7 @@
 
 	mmc_bus_get(host);
 
-	if (host->bus_ops && !host->bus_dead && host->bus_ops->awake)
+	if (host->bus_ops && !host->bus_dead && host->bus_ops->sleep)
 		err = host->bus_ops->sleep(host);
 
 	mmc_bus_put(host);
@@ -2302,8 +2324,17 @@
 		 * pre-claim the host.
 		 */
 		if (mmc_try_claim_host(host)) {
-			if (host->bus_ops->suspend)
+			if (host->bus_ops->suspend) {
+				/*
+				 * For eMMC 4.5 device send notify command
+				 * before sleep, because in sleep state eMMC 4.5
+				 * devices respond to only RESET and AWAKE cmd
+				 */
+				mmc_poweroff_notify(host);
 				err = host->bus_ops->suspend(host);
+			}
+			mmc_do_release_host(host);
+
 			if (err == -ENOSYS || !host->bus_ops->resume) {
 				/*
 				 * We simply "remove" the card in this case.
@@ -2318,7 +2349,6 @@
 				host->pm_flags = 0;
 				err = 0;
 			}
-			mmc_do_release_host(host);
 		} else {
 			err = -EBUSY;
 		}
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index e8a5eb3..d31c78b 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -302,17 +302,6 @@
 	host->max_blk_size = 512;
 	host->max_blk_count = PAGE_CACHE_SIZE / 512;
 
-	/*
-	 * Enable runtime power management by default. This flag was added due
-	 * to runtime power management causing disruption for some users, but
-	 * the power on/off code has been improved since then.
-	 *
-	 * We'll enable this flag by default as an experiment, and if no
-	 * problems are reported, we will follow up later and remove the flag
-	 * altogether.
-	 */
-	host->caps = MMC_CAP_POWER_OFF_CARD;
-
 	return host;
 
 free:
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index dbf421a..d240427 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -876,17 +876,21 @@
 	 * set the notification byte in the ext_csd register of device
 	 */
 	if ((host->caps2 & MMC_CAP2_POWEROFF_NOTIFY) &&
-	    (card->poweroff_notify_state == MMC_NO_POWER_NOTIFICATION)) {
+	    (card->ext_csd.rev >= 6)) {
 		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
 				 EXT_CSD_POWER_OFF_NOTIFICATION,
 				 EXT_CSD_POWER_ON,
 				 card->ext_csd.generic_cmd6_time);
 		if (err && err != -EBADMSG)
 			goto free_card;
-	}
 
-	if (!err)
-		card->poweroff_notify_state = MMC_POWERED_ON;
+		/*
+		 * The err can be -EBADMSG or 0,
+		 * so check for success and update the flag
+		 */
+		if (!err)
+			card->poweroff_notify_state = MMC_POWERED_ON;
+	}
 
 	/*
 	 * Activate high speed (if supported)
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 50b5f99..fa8dd2f 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -675,7 +675,8 @@
 	      unsigned int status)
 {
 	/* First check for errors */
-	if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
+	if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR|
+		      MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
 		u32 remain, success;
 
 		/* Terminate the DMA transfer */
@@ -754,8 +755,12 @@
 	}
 
 	if (!cmd->data || cmd->error) {
-		if (host->data)
+		if (host->data) {
+			/* Terminate the DMA transfer */
+			if (dma_inprogress(host))
+				mmci_dma_data_error(host);
 			mmci_stop_data(host);
+		}
 		mmci_request_end(host, cmd->mrq);
 	} else if (!(cmd->data->flags & MMC_DATA_READ)) {
 		mmci_start_data(host, cmd->data);
@@ -955,8 +960,9 @@
 		dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status);
 
 		data = host->data;
-		if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|
-			      MCI_RXOVERRUN|MCI_DATAEND|MCI_DATABLOCKEND) && data)
+		if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR|
+			      MCI_TXUNDERRUN|MCI_RXOVERRUN|MCI_DATAEND|
+			      MCI_DATABLOCKEND) && data)
 			mmci_data_irq(host, data, status);
 
 		cmd = host->cmd;
@@ -1496,6 +1502,8 @@
 	{ 0, 0 },
 };
 
+MODULE_DEVICE_TABLE(amba, mmci_ids);
+
 static struct amba_driver mmci_driver = {
 	.drv		= {
 		.name	= DRIVER_NAME,
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index 325ea61..8e0fbe9 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -732,6 +732,7 @@
 				"failed to config DMA channel. Falling back to PIO\n");
 			dma_release_channel(host->dma);
 			host->do_dma = 0;
+			host->dma = NULL;
 		}
 	}
 
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 2dba999..887c0e5 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -2,7 +2,7 @@
  *  linux/drivers/mmc/host/omap.c
  *
  *  Copyright (C) 2004 Nokia Corporation
- *  Written by Tuukka Tikkanen and Juha Yrjölä<juha.yrjola@nokia.com>
+ *  Written by Tuukka Tikkanen and Juha Yrjölä<juha.yrjola@nokia.com>
  *  Misc hacks here and there by Tony Lindgren <tony@atomide.com>
  *  Other hacks (DMA, SD, etc) by David Brownell
  *
@@ -1634,4 +1634,4 @@
 MODULE_DESCRIPTION("OMAP Multimedia Card driver");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS("platform:" DRIVER_NAME);
-MODULE_AUTHOR("Juha Yrjölä");
+MODULE_AUTHOR("Juha Yrjölä");
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 101cd31..d5fe43d 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -1010,6 +1010,7 @@
 			host->data->sg_len,
 			omap_hsmmc_get_dma_dir(host, host->data));
 		omap_free_dma(dma_ch);
+		host->data->host_cookie = 0;
 	}
 	host->data = NULL;
 }
@@ -1575,8 +1576,10 @@
 	struct mmc_data *data = mrq->data;
 
 	if (host->use_dma) {
-		dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
-			     omap_hsmmc_get_dma_dir(host, data));
+		if (data->host_cookie)
+			dma_unmap_sg(mmc_dev(host->mmc), data->sg,
+				     data->sg_len,
+				     omap_hsmmc_get_dma_dir(host, data));
 		data->host_cookie = 0;
 	}
 }
diff --git a/drivers/mmc/host/sdhci-cns3xxx.c b/drivers/mmc/host/sdhci-cns3xxx.c
index 4b920b7..b4257e7 100644
--- a/drivers/mmc/host/sdhci-cns3xxx.c
+++ b/drivers/mmc/host/sdhci-cns3xxx.c
@@ -15,6 +15,7 @@
 #include <linux/delay.h>
 #include <linux/device.h>
 #include <linux/mmc/host.h>
+#include <linux/module.h>
 #include <mach/cns3xxx.h>
 #include "sdhci-pltfm.h"
 
@@ -108,13 +109,10 @@
 	.driver		= {
 		.name	= "sdhci-cns3xxx",
 		.owner	= THIS_MODULE,
+		.pm	= SDHCI_PLTFM_PMOPS,
 	},
 	.probe		= sdhci_cns3xxx_probe,
 	.remove		= __devexit_p(sdhci_cns3xxx_remove),
-#ifdef CONFIG_PM
-	.suspend	= sdhci_pltfm_suspend,
-	.resume		= sdhci_pltfm_resume,
-#endif
 };
 
 static int __init sdhci_cns3xxx_init(void)
diff --git a/drivers/mmc/host/sdhci-dove.c b/drivers/mmc/host/sdhci-dove.c
index f2d29dc..a81312c 100644
--- a/drivers/mmc/host/sdhci-dove.c
+++ b/drivers/mmc/host/sdhci-dove.c
@@ -82,13 +82,10 @@
 	.driver		= {
 		.name	= "sdhci-dove",
 		.owner	= THIS_MODULE,
+		.pm	= SDHCI_PLTFM_PMOPS,
 	},
 	.probe		= sdhci_dove_probe,
 	.remove		= __devexit_p(sdhci_dove_remove),
-#ifdef CONFIG_PM
-	.suspend	= sdhci_pltfm_suspend,
-	.resume		= sdhci_pltfm_resume,
-#endif
 };
 
 static int __init sdhci_dove_init(void)
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 4b976f0..38ebc4e 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -599,14 +599,11 @@
 		.name	= "sdhci-esdhc-imx",
 		.owner	= THIS_MODULE,
 		.of_match_table = imx_esdhc_dt_ids,
+		.pm	= SDHCI_PLTFM_PMOPS,
 	},
 	.id_table	= imx_esdhc_devtype,
 	.probe		= sdhci_esdhc_imx_probe,
 	.remove		= __devexit_p(sdhci_esdhc_imx_remove),
-#ifdef CONFIG_PM
-	.suspend	= sdhci_pltfm_suspend,
-	.resume		= sdhci_pltfm_resume,
-#endif
 };
 
 static int __init sdhci_esdhc_imx_init(void)
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index 59e9d00..01e5f62 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -125,13 +125,10 @@
 		.name = "sdhci-esdhc",
 		.owner = THIS_MODULE,
 		.of_match_table = sdhci_esdhc_of_match,
+		.pm = SDHCI_PLTFM_PMOPS,
 	},
 	.probe = sdhci_esdhc_probe,
 	.remove = __devexit_p(sdhci_esdhc_remove),
-#ifdef CONFIG_PM
-	.suspend = sdhci_pltfm_suspend,
-	.resume = sdhci_pltfm_resume,
-#endif
 };
 
 static int __init sdhci_esdhc_init(void)
diff --git a/drivers/mmc/host/sdhci-of-hlwd.c b/drivers/mmc/host/sdhci-of-hlwd.c
index 9b0d794..3619adc 100644
--- a/drivers/mmc/host/sdhci-of-hlwd.c
+++ b/drivers/mmc/host/sdhci-of-hlwd.c
@@ -87,13 +87,10 @@
 		.name = "sdhci-hlwd",
 		.owner = THIS_MODULE,
 		.of_match_table = sdhci_hlwd_of_match,
+		.pm = SDHCI_PLTFM_PMOPS,
 	},
 	.probe = sdhci_hlwd_probe,
 	.remove = __devexit_p(sdhci_hlwd_remove),
-#ifdef CONFIG_PM
-	.suspend = sdhci_pltfm_suspend,
-	.resume = sdhci_pltfm_resume,
-#endif
 };
 
 static int __init sdhci_hlwd_init(void)
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index d833d9c..6878a946 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -54,8 +54,7 @@
 	int			(*probe_slot) (struct sdhci_pci_slot *);
 	void			(*remove_slot) (struct sdhci_pci_slot *, int);
 
-	int			(*suspend) (struct sdhci_pci_chip *,
-					pm_message_t);
+	int			(*suspend) (struct sdhci_pci_chip *);
 	int			(*resume) (struct sdhci_pci_chip *);
 };
 
@@ -549,7 +548,7 @@
 		jmicron_enable_mmc(slot->host, 0);
 }
 
-static int jmicron_suspend(struct sdhci_pci_chip *chip, pm_message_t state)
+static int jmicron_suspend(struct sdhci_pci_chip *chip)
 {
 	int i;
 
@@ -993,8 +992,9 @@
 
 #ifdef CONFIG_PM
 
-static int sdhci_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+static int sdhci_pci_suspend(struct device *dev)
 {
+	struct pci_dev *pdev = to_pci_dev(dev);
 	struct sdhci_pci_chip *chip;
 	struct sdhci_pci_slot *slot;
 	mmc_pm_flag_t slot_pm_flags;
@@ -1010,7 +1010,7 @@
 		if (!slot)
 			continue;
 
-		ret = sdhci_suspend_host(slot->host, state);
+		ret = sdhci_suspend_host(slot->host);
 
 		if (ret) {
 			for (i--; i >= 0; i--)
@@ -1026,7 +1026,7 @@
 	}
 
 	if (chip->fixes && chip->fixes->suspend) {
-		ret = chip->fixes->suspend(chip, state);
+		ret = chip->fixes->suspend(chip);
 		if (ret) {
 			for (i = chip->num_slots - 1; i >= 0; i--)
 				sdhci_resume_host(chip->slots[i]->host);
@@ -1042,16 +1042,17 @@
 		}
 		pci_set_power_state(pdev, PCI_D3hot);
 	} else {
-		pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
+		pci_enable_wake(pdev, PCI_D3hot, 0);
 		pci_disable_device(pdev);
-		pci_set_power_state(pdev, pci_choose_state(pdev, state));
+		pci_set_power_state(pdev, PCI_D3hot);
 	}
 
 	return 0;
 }
 
-static int sdhci_pci_resume(struct pci_dev *pdev)
+static int sdhci_pci_resume(struct device *dev)
 {
+	struct pci_dev *pdev = to_pci_dev(dev);
 	struct sdhci_pci_chip *chip;
 	struct sdhci_pci_slot *slot;
 	int i, ret;
@@ -1099,7 +1100,6 @@
 	struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
 	struct sdhci_pci_chip *chip;
 	struct sdhci_pci_slot *slot;
-	pm_message_t state = { .event = PM_EVENT_SUSPEND };
 	int i, ret;
 
 	chip = pci_get_drvdata(pdev);
@@ -1121,7 +1121,7 @@
 	}
 
 	if (chip->fixes && chip->fixes->suspend) {
-		ret = chip->fixes->suspend(chip, state);
+		ret = chip->fixes->suspend(chip);
 		if (ret) {
 			for (i = chip->num_slots - 1; i >= 0; i--)
 				sdhci_runtime_resume_host(chip->slots[i]->host);
@@ -1176,6 +1176,8 @@
 #endif
 
 static const struct dev_pm_ops sdhci_pci_pm_ops = {
+	.suspend = sdhci_pci_suspend,
+	.resume = sdhci_pci_resume,
 	.runtime_suspend = sdhci_pci_runtime_suspend,
 	.runtime_resume = sdhci_pci_runtime_resume,
 	.runtime_idle = sdhci_pci_runtime_idle,
@@ -1428,8 +1430,6 @@
 	.id_table =	pci_ids,
 	.probe =	sdhci_pci_probe,
 	.remove =	__devexit_p(sdhci_pci_remove),
-	.suspend =	sdhci_pci_suspend,
-	.resume	=	sdhci_pci_resume,
 	.driver =	{
 		.pm =   &sdhci_pci_pm_ops
 	},
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index a9e12ea..03970bc 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -194,21 +194,25 @@
 EXPORT_SYMBOL_GPL(sdhci_pltfm_unregister);
 
 #ifdef CONFIG_PM
-int sdhci_pltfm_suspend(struct platform_device *dev, pm_message_t state)
+static int sdhci_pltfm_suspend(struct device *dev)
 {
-	struct sdhci_host *host = platform_get_drvdata(dev);
+	struct sdhci_host *host = dev_get_drvdata(dev);
 
-	return sdhci_suspend_host(host, state);
+	return sdhci_suspend_host(host);
 }
-EXPORT_SYMBOL_GPL(sdhci_pltfm_suspend);
 
-int sdhci_pltfm_resume(struct platform_device *dev)
+static int sdhci_pltfm_resume(struct device *dev)
 {
-	struct sdhci_host *host = platform_get_drvdata(dev);
+	struct sdhci_host *host = dev_get_drvdata(dev);
 
 	return sdhci_resume_host(host);
 }
-EXPORT_SYMBOL_GPL(sdhci_pltfm_resume);
+
+const struct dev_pm_ops sdhci_pltfm_pmops = {
+	.suspend	= sdhci_pltfm_suspend,
+	.resume		= sdhci_pltfm_resume,
+};
+EXPORT_SYMBOL_GPL(sdhci_pltfm_pmops);
 #endif	/* CONFIG_PM */
 
 static int __init sdhci_pltfm_drv_init(void)
diff --git a/drivers/mmc/host/sdhci-pltfm.h b/drivers/mmc/host/sdhci-pltfm.h
index 3a9fc3f..37e0e18 100644
--- a/drivers/mmc/host/sdhci-pltfm.h
+++ b/drivers/mmc/host/sdhci-pltfm.h
@@ -99,8 +99,10 @@
 extern int sdhci_pltfm_unregister(struct platform_device *pdev);
 
 #ifdef CONFIG_PM
-extern int sdhci_pltfm_suspend(struct platform_device *dev, pm_message_t state);
-extern int sdhci_pltfm_resume(struct platform_device *dev);
+extern const struct dev_pm_ops sdhci_pltfm_pmops;
+#define SDHCI_PLTFM_PMOPS (&sdhci_pltfm_pmops)
+#else
+#define SDHCI_PLTFM_PMOPS NULL
 #endif
 
 #endif /* _DRIVERS_MMC_SDHCI_PLTFM_H */
diff --git a/drivers/mmc/host/sdhci-pxav2.c b/drivers/mmc/host/sdhci-pxav2.c
index d4bf6d3..7a039c3 100644
--- a/drivers/mmc/host/sdhci-pxav2.c
+++ b/drivers/mmc/host/sdhci-pxav2.c
@@ -218,13 +218,10 @@
 	.driver		= {
 		.name	= "sdhci-pxav2",
 		.owner	= THIS_MODULE,
+		.pm	= SDHCI_PLTFM_PMOPS,
 	},
 	.probe		= sdhci_pxav2_probe,
 	.remove		= __devexit_p(sdhci_pxav2_remove),
-#ifdef CONFIG_PM
-	.suspend	= sdhci_pltfm_suspend,
-	.resume		= sdhci_pltfm_resume,
-#endif
 };
 static int __init sdhci_pxav2_init(void)
 {
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index cff4ad3..15673a7 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -264,13 +264,10 @@
 	.driver		= {
 		.name	= "sdhci-pxav3",
 		.owner	= THIS_MODULE,
+		.pm	= SDHCI_PLTFM_PMOPS,
 	},
 	.probe		= sdhci_pxav3_probe,
 	.remove		= __devexit_p(sdhci_pxav3_remove),
-#ifdef CONFIG_PM
-	.suspend	= sdhci_pltfm_suspend,
-	.resume		= sdhci_pltfm_resume,
-#endif
 };
 static int __init sdhci_pxav3_init(void)
 {
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 3d00e72..0d33ff0 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -622,33 +622,38 @@
 
 #ifdef CONFIG_PM
 
-static int sdhci_s3c_suspend(struct platform_device *dev, pm_message_t pm)
+static int sdhci_s3c_suspend(struct device *dev)
 {
-	struct sdhci_host *host = platform_get_drvdata(dev);
+	struct sdhci_host *host = dev_get_drvdata(dev);
 
-	return sdhci_suspend_host(host, pm);
+	return sdhci_suspend_host(host);
 }
 
-static int sdhci_s3c_resume(struct platform_device *dev)
+static int sdhci_s3c_resume(struct device *dev)
 {
-	struct sdhci_host *host = platform_get_drvdata(dev);
+	struct sdhci_host *host = dev_get_drvdata(dev);
 
 	return sdhci_resume_host(host);
 }
 
+static const struct dev_pm_ops sdhci_s3c_pmops = {
+	.suspend	= sdhci_s3c_suspend,
+	.resume		= sdhci_s3c_resume,
+};
+
+#define SDHCI_S3C_PMOPS (&sdhci_s3c_pmops)
+
 #else
-#define sdhci_s3c_suspend NULL
-#define sdhci_s3c_resume NULL
+#define SDHCI_S3C_PMOPS NULL
 #endif
 
 static struct platform_driver sdhci_s3c_driver = {
 	.probe		= sdhci_s3c_probe,
 	.remove		= __devexit_p(sdhci_s3c_remove),
-	.suspend	= sdhci_s3c_suspend,
-	.resume	        = sdhci_s3c_resume,
 	.driver		= {
 		.owner	= THIS_MODULE,
 		.name	= "s3c-sdhci",
+		.pm	= SDHCI_S3C_PMOPS,
 	},
 };
 
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index 89699e8..e2e18d3 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -318,13 +318,10 @@
 		.name	= "sdhci-tegra",
 		.owner	= THIS_MODULE,
 		.of_match_table = sdhci_tegra_dt_match,
+		.pm	= SDHCI_PLTFM_PMOPS,
 	},
 	.probe		= sdhci_tegra_probe,
 	.remove		= __devexit_p(sdhci_tegra_remove),
-#ifdef CONFIG_PM
-	.suspend	= sdhci_pltfm_suspend,
-	.resume		= sdhci_pltfm_resume,
-#endif
 };
 
 static int __init sdhci_tegra_init(void)
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 6d8eea3..19ed580 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -2327,7 +2327,7 @@
 
 #ifdef CONFIG_PM
 
-int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state)
+int sdhci_suspend_host(struct sdhci_host *host)
 {
 	int ret;
 
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 0a5b654..a04d4d0 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -374,7 +374,7 @@
 extern void sdhci_remove_host(struct sdhci_host *host, int dead);
 
 #ifdef CONFIG_PM
-extern int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state);
+extern int sdhci_suspend_host(struct sdhci_host *host);
 extern int sdhci_resume_host(struct sdhci_host *host);
 extern void sdhci_enable_irq_wakeups(struct sdhci_host *host);
 #endif
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 369366c..d5505f3 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -908,7 +908,7 @@
 		if (host->power) {
 			pm_runtime_put(&host->pd->dev);
 			host->power = false;
-			if (p->down_pwr)
+			if (p->down_pwr && ios->power_mode == MMC_POWER_OFF)
 				p->down_pwr(host->pd);
 		}
 		host->state = STATE_IDLE;
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
index d85a60c..4208b39 100644
--- a/drivers/mmc/host/tmio_mmc_pio.c
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -798,7 +798,7 @@
 		/* start bus clock */
 		tmio_mmc_clk_start(host);
 	} else if (ios->power_mode != MMC_POWER_UP) {
-		if (host->set_pwr)
+		if (host->set_pwr && ios->power_mode == MMC_POWER_OFF)
 			host->set_pwr(host->pdev, 0);
 		if ((pdata->flags & TMIO_MMC_HAS_COLD_CD) &&
 		    pdata->power) {
diff --git a/drivers/mmc/host/ushc.c b/drivers/mmc/host/ushc.c
index f08f944..c0105a2 100644
--- a/drivers/mmc/host/ushc.c
+++ b/drivers/mmc/host/ushc.c
@@ -562,17 +562,7 @@
 	.disconnect = ushc_disconnect,
 };
 
-static int __init ushc_init(void)
-{
-	return usb_register(&ushc_driver);
-}
-module_init(ushc_init);
-
-static void __exit ushc_exit(void)
-{
-	usb_deregister(&ushc_driver);
-}
-module_exit(ushc_exit);
+module_usb_driver(ushc_driver);
 
 MODULE_DESCRIPTION("USB SD Host Controller driver");
 MODULE_AUTHOR("David Vrabel <david.vrabel@csr.com>");
diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
index e8f6e65..2ec978b 100644
--- a/drivers/mmc/host/vub300.c
+++ b/drivers/mmc/host/vub300.c
@@ -259,7 +259,7 @@
 static int firmware_rom_wait_states = 0x1C;
 #endif
 
-module_param(firmware_rom_wait_states, bool, 0644);
+module_param(firmware_rom_wait_states, int, 0644);
 MODULE_PARM_DESC(firmware_rom_wait_states,
 		 "ROM wait states byte=RRRIIEEE (Reserved Internal External)");
 
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index b78f231..ebeabc7 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -14,7 +14,6 @@
 #include <linux/list.h>
 #include <linux/init.h>
 #include <linux/mtd/mtd.h>
-#include <linux/buffer_head.h>
 #include <linux/mutex.h>
 #include <linux/mount.h>
 #include <linux/slab.h>
diff --git a/drivers/mtd/maps/plat-ram.c b/drivers/mtd/maps/plat-ram.c
index 94f5534..45876d0 100644
--- a/drivers/mtd/maps/plat-ram.c
+++ b/drivers/mtd/maps/plat-ram.c
@@ -227,10 +227,14 @@
 	if (!err)
 		dev_info(&pdev->dev, "registered mtd device\n");
 
-	/* add the whole device. */
-	err = mtd_device_register(info->mtd, NULL, 0);
-	if (err)
-		dev_err(&pdev->dev, "failed to register the entire device\n");
+	if (pdata->nr_partitions) {
+		/* add the whole device. */
+		err = mtd_device_register(info->mtd, NULL, 0);
+		if (err) {
+			dev_err(&pdev->dev,
+				"failed to register the entire device\n");
+		}
+	}
 
 	return err;
 
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
index 411a17d..2a25b67 100644
--- a/drivers/mtd/maps/pxa2xx-flash.c
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -98,7 +98,7 @@
 	}
 	info->mtd->owner = THIS_MODULE;
 
-	mtd_device_parse_register(info->mtd, probes, 0, NULL, 0);
+	mtd_device_parse_register(info->mtd, probes, 0, flash->parts, flash->nr_parts);
 
 	platform_set_drvdata(pdev, info);
 	return 0;
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index cce7b70..dd02792 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -420,7 +420,6 @@
 config MTD_NAND_GPMI_NAND
         bool "GPMI NAND Flash Controller driver"
         depends on MTD_NAND && (SOC_IMX23 || SOC_IMX28)
-	select MTD_PARTITIONS
 	select MTD_CMDLINE_PARTS
         help
 	 Enables NAND Flash support for IMX23 or IMX28.
diff --git a/drivers/mtd/nand/alauda.c b/drivers/mtd/nand/alauda.c
index eb40ea8..6a5ff64 100644
--- a/drivers/mtd/nand/alauda.c
+++ b/drivers/mtd/nand/alauda.c
@@ -717,17 +717,6 @@
 	.id_table =	alauda_table,
 };
 
-static int __init alauda_init(void)
-{
-	return usb_register(&alauda_driver);
-}
-
-static void __exit alauda_exit(void)
-{
-	usb_deregister(&alauda_driver);
-}
-
-module_init(alauda_init);
-module_exit(alauda_exit);
+module_usb_driver(alauda_driver);
 
 MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index 071b634..493ec2f 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -21,9 +21,9 @@
 #include <linux/clk.h>
 #include <linux/slab.h>
 #include <linux/interrupt.h>
+#include <linux/module.h>
 #include <linux/mtd/gpmi-nand.h>
 #include <linux/mtd/partitions.h>
-
 #include "gpmi-nand.h"
 
 /* add our owner bbt descriptor */
diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c
index ee17139..f8aacf4 100644
--- a/drivers/mtd/nand/ndfc.c
+++ b/drivers/mtd/nand/ndfc.c
@@ -188,7 +188,7 @@
 	if (!flash_np)
 		return -ENODEV;
 
-	ppdata->of_node = flash_np;
+	ppdata.of_node = flash_np;
 	ndfc->mtd.name = kasprintf(GFP_KERNEL, "%s.%s",
 			dev_name(&ndfc->ofdev->dev), flash_np->name);
 	if (!ndfc->mtd.name) {
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 654a5e9..9845afb 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -125,6 +125,8 @@
 	  'ifb1' etc.
 	  Look at the iproute2 documentation directory for usage etc
 
+source "drivers/net/team/Kconfig"
+
 config MACVLAN
 	tristate "MAC-VLAN support (EXPERIMENTAL)"
 	depends on EXPERIMENTAL
@@ -241,6 +243,8 @@
 
 source "drivers/net/caif/Kconfig"
 
+source "drivers/net/dsa/Kconfig"
+
 source "drivers/net/ethernet/Kconfig"
 
 source "drivers/net/fddi/Kconfig"
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index fa877cd..1988881 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -17,6 +17,7 @@
 obj-$(CONFIG_NETCONSOLE) += netconsole.o
 obj-$(CONFIG_PHYLIB) += phy/
 obj-$(CONFIG_RIONET) += rionet.o
+obj-$(CONFIG_NET_TEAM) += team/
 obj-$(CONFIG_TUN) += tun.o
 obj-$(CONFIG_VETH) += veth.o
 obj-$(CONFIG_VIRTIO_NET) += virtio_net.o
@@ -29,6 +30,7 @@
 obj-$(CONFIG_CAIF) += caif/
 obj-$(CONFIG_CAN) += can/
 obj-$(CONFIG_ETRAX_ETHERNET) += cris/
+obj-$(CONFIG_NET_DSA) += dsa/
 obj-$(CONFIG_ETHERNET) += ethernet/
 obj-$(CONFIG_FDDI) += fddi/
 obj-$(CONFIG_HIPPI) += hippi/
diff --git a/drivers/net/bonding/bond_ipv6.c b/drivers/net/bonding/bond_ipv6.c
deleted file mode 100644
index 027a0ee..0000000
--- a/drivers/net/bonding/bond_ipv6.c
+++ /dev/null
@@ -1,225 +0,0 @@
-/*
- * Copyright(c) 2008 Hewlett-Packard Development Company, L.P.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/types.h>
-#include <linux/if_vlan.h>
-#include <net/ipv6.h>
-#include <net/ndisc.h>
-#include <net/addrconf.h>
-#include <net/netns/generic.h>
-#include "bonding.h"
-
-/*
- * Assign bond->master_ipv6 to the next IPv6 address in the list, or
- * zero it out if there are none.
- */
-static void bond_glean_dev_ipv6(struct net_device *dev, struct in6_addr *addr)
-{
-	struct inet6_dev *idev;
-
-	if (!dev)
-		return;
-
-	idev = in6_dev_get(dev);
-	if (!idev)
-		return;
-
-	read_lock_bh(&idev->lock);
-	if (!list_empty(&idev->addr_list)) {
-		struct inet6_ifaddr *ifa
-			= list_first_entry(&idev->addr_list,
-					   struct inet6_ifaddr, if_list);
-		ipv6_addr_copy(addr, &ifa->addr);
-	} else
-		ipv6_addr_set(addr, 0, 0, 0, 0);
-
-	read_unlock_bh(&idev->lock);
-
-	in6_dev_put(idev);
-}
-
-static void bond_na_send(struct net_device *slave_dev,
-			 struct in6_addr *daddr,
-			 int router,
-			 unsigned short vlan_id)
-{
-	struct in6_addr mcaddr;
-	struct icmp6hdr icmp6h = {
-		.icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT,
-	};
-	struct sk_buff *skb;
-
-	icmp6h.icmp6_router = router;
-	icmp6h.icmp6_solicited = 0;
-	icmp6h.icmp6_override = 1;
-
-	addrconf_addr_solict_mult(daddr, &mcaddr);
-
-	pr_debug("ipv6 na on slave %s: dest %pI6, src %pI6\n",
-		 slave_dev->name, &mcaddr, daddr);
-
-	skb = ndisc_build_skb(slave_dev, &mcaddr, daddr, &icmp6h, daddr,
-			      ND_OPT_TARGET_LL_ADDR);
-
-	if (!skb) {
-		pr_err("NA packet allocation failed\n");
-		return;
-	}
-
-	if (vlan_id) {
-		/* The Ethernet header is not present yet, so it is
-		 * too early to insert a VLAN tag.  Force use of an
-		 * out-of-line tag here and let dev_hard_start_xmit()
-		 * insert it if the slave hardware can't.
-		 */
-		skb = __vlan_hwaccel_put_tag(skb, vlan_id);
-		if (!skb) {
-			pr_err("failed to insert VLAN tag\n");
-			return;
-		}
-	}
-
-	ndisc_send_skb(skb, slave_dev, NULL, &mcaddr, daddr, &icmp6h);
-}
-
-/*
- * Kick out an unsolicited Neighbor Advertisement for an IPv6 address on
- * the bonding master.  This will help the switch learn our address
- * if in active-backup mode.
- *
- * Caller must hold curr_slave_lock for read or better
- */
-void bond_send_unsolicited_na(struct bonding *bond)
-{
-	struct slave *slave = bond->curr_active_slave;
-	struct vlan_entry *vlan;
-	struct inet6_dev *idev;
-	int is_router;
-
-	pr_debug("%s: bond %s slave %s\n", bond->dev->name,
-		 __func__, slave ? slave->dev->name : "NULL");
-
-	if (!slave || !bond->send_unsol_na ||
-	    test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state))
-		return;
-
-	bond->send_unsol_na--;
-
-	idev = in6_dev_get(bond->dev);
-	if (!idev)
-		return;
-
-	is_router = !!idev->cnf.forwarding;
-
-	in6_dev_put(idev);
-
-	if (!ipv6_addr_any(&bond->master_ipv6))
-		bond_na_send(slave->dev, &bond->master_ipv6, is_router, 0);
-
-	list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
-		if (!ipv6_addr_any(&vlan->vlan_ipv6)) {
-			bond_na_send(slave->dev, &vlan->vlan_ipv6, is_router,
-				     vlan->vlan_id);
-		}
-	}
-}
-
-/*
- * bond_inet6addr_event: handle inet6addr notifier chain events.
- *
- * We keep track of device IPv6 addresses primarily to use as source
- * addresses in NS probes.
- *
- * We track one IPv6 for the main device (if it has one).
- */
-static int bond_inet6addr_event(struct notifier_block *this,
-				unsigned long event,
-				void *ptr)
-{
-	struct inet6_ifaddr *ifa = ptr;
-	struct net_device *vlan_dev, *event_dev = ifa->idev->dev;
-	struct bonding *bond;
-	struct vlan_entry *vlan;
-	struct bond_net *bn = net_generic(dev_net(event_dev), bond_net_id);
-
-	list_for_each_entry(bond, &bn->dev_list, bond_list) {
-		if (bond->dev == event_dev) {
-			switch (event) {
-			case NETDEV_UP:
-				if (ipv6_addr_any(&bond->master_ipv6))
-					ipv6_addr_copy(&bond->master_ipv6,
-						       &ifa->addr);
-				return NOTIFY_OK;
-			case NETDEV_DOWN:
-				if (ipv6_addr_equal(&bond->master_ipv6,
-						    &ifa->addr))
-					bond_glean_dev_ipv6(bond->dev,
-							    &bond->master_ipv6);
-				return NOTIFY_OK;
-			default:
-				return NOTIFY_DONE;
-			}
-		}
-
-		list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
-			rcu_read_lock();
-			vlan_dev = __vlan_find_dev_deep(bond->dev,
-							vlan->vlan_id);
-			rcu_read_unlock();
-			if (vlan_dev == event_dev) {
-				switch (event) {
-				case NETDEV_UP:
-					if (ipv6_addr_any(&vlan->vlan_ipv6))
-						ipv6_addr_copy(&vlan->vlan_ipv6,
-							       &ifa->addr);
-					return NOTIFY_OK;
-				case NETDEV_DOWN:
-					if (ipv6_addr_equal(&vlan->vlan_ipv6,
-							    &ifa->addr))
-						bond_glean_dev_ipv6(vlan_dev,
-								    &vlan->vlan_ipv6);
-					return NOTIFY_OK;
-				default:
-					return NOTIFY_DONE;
-				}
-			}
-		}
-	}
-	return NOTIFY_DONE;
-}
-
-static struct notifier_block bond_inet6addr_notifier = {
-	.notifier_call = bond_inet6addr_event,
-};
-
-void bond_register_ipv6_notifier(void)
-{
-	register_inet6addr_notifier(&bond_inet6addr_notifier);
-}
-
-void bond_unregister_ipv6_notifier(void)
-{
-	unregister_inet6addr_notifier(&bond_inet6addr_notifier);
-}
-
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 7f87568..435984a 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -428,27 +428,34 @@
  * @bond_dev: bonding net device that got called
  * @vid: vlan id being added
  */
-static void bond_vlan_rx_add_vid(struct net_device *bond_dev, uint16_t vid)
+static int bond_vlan_rx_add_vid(struct net_device *bond_dev, uint16_t vid)
 {
 	struct bonding *bond = netdev_priv(bond_dev);
-	struct slave *slave;
+	struct slave *slave, *stop_at;
 	int i, res;
 
 	bond_for_each_slave(bond, slave, i) {
-		struct net_device *slave_dev = slave->dev;
-		const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
-
-		if ((slave_dev->features & NETIF_F_HW_VLAN_FILTER) &&
-		    slave_ops->ndo_vlan_rx_add_vid) {
-			slave_ops->ndo_vlan_rx_add_vid(slave_dev, vid);
-		}
+		res = vlan_vid_add(slave->dev, vid);
+		if (res)
+			goto unwind;
 	}
 
 	res = bond_add_vlan(bond, vid);
 	if (res) {
 		pr_err("%s: Error: Failed to add vlan id %d\n",
 		       bond_dev->name, vid);
+		return res;
 	}
+
+	return 0;
+
+unwind:
+	/* unwind from head to the slave that failed */
+	stop_at = slave;
+	bond_for_each_slave_from_to(bond, slave, i, bond->first_slave, stop_at)
+		vlan_vid_del(slave->dev, vid);
+
+	return res;
 }
 
 /**
@@ -456,56 +463,48 @@
  * @bond_dev: bonding net device that got called
  * @vid: vlan id being removed
  */
-static void bond_vlan_rx_kill_vid(struct net_device *bond_dev, uint16_t vid)
+static int bond_vlan_rx_kill_vid(struct net_device *bond_dev, uint16_t vid)
 {
 	struct bonding *bond = netdev_priv(bond_dev);
 	struct slave *slave;
 	int i, res;
 
-	bond_for_each_slave(bond, slave, i) {
-		struct net_device *slave_dev = slave->dev;
-		const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
-
-		if ((slave_dev->features & NETIF_F_HW_VLAN_FILTER) &&
-		    slave_ops->ndo_vlan_rx_kill_vid) {
-			slave_ops->ndo_vlan_rx_kill_vid(slave_dev, vid);
-		}
-	}
+	bond_for_each_slave(bond, slave, i)
+		vlan_vid_del(slave->dev, vid);
 
 	res = bond_del_vlan(bond, vid);
 	if (res) {
 		pr_err("%s: Error: Failed to remove vlan id %d\n",
 		       bond_dev->name, vid);
+		return res;
 	}
+
+	return 0;
 }
 
 static void bond_add_vlans_on_slave(struct bonding *bond, struct net_device *slave_dev)
 {
 	struct vlan_entry *vlan;
-	const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
+	int res;
 
-	if (!(slave_dev->features & NETIF_F_HW_VLAN_FILTER) ||
-	    !(slave_ops->ndo_vlan_rx_add_vid))
-		return;
-
-	list_for_each_entry(vlan, &bond->vlan_list, vlan_list)
-		slave_ops->ndo_vlan_rx_add_vid(slave_dev, vlan->vlan_id);
+	list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
+		res = vlan_vid_add(slave_dev, vlan->vlan_id);
+		if (res)
+			pr_warning("%s: Failed to add vlan id %d to device %s\n",
+				   bond->dev->name, vlan->vlan_id,
+				   slave_dev->name);
+	}
 }
 
 static void bond_del_vlans_from_slave(struct bonding *bond,
 				      struct net_device *slave_dev)
 {
-	const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
 	struct vlan_entry *vlan;
 
-	if (!(slave_dev->features & NETIF_F_HW_VLAN_FILTER) ||
-	    !(slave_ops->ndo_vlan_rx_kill_vid))
-		return;
-
 	list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
 		if (!vlan->vlan_id)
 			continue;
-		slave_ops->ndo_vlan_rx_kill_vid(slave_dev, vlan->vlan_id);
+		vlan_vid_del(slave_dev, vlan->vlan_id);
 	}
 }
 
@@ -1325,11 +1324,12 @@
 	return 0;
 }
 
-static u32 bond_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t bond_fix_features(struct net_device *dev,
+	netdev_features_t features)
 {
 	struct slave *slave;
 	struct bonding *bond = netdev_priv(dev);
-	u32 mask;
+	netdev_features_t mask;
 	int i;
 
 	read_lock(&bond->lock);
@@ -1363,7 +1363,7 @@
 {
 	struct slave *slave;
 	struct net_device *bond_dev = bond->dev;
-	u32 vlan_features = BOND_VLAN_FEATURES;
+	netdev_features_t vlan_features = BOND_VLAN_FEATURES;
 	unsigned short max_hard_header_len = ETH_HLEN;
 	int i;
 
@@ -1822,7 +1822,7 @@
 				 "but new slave device does not support netpoll.\n",
 				 bond_dev->name);
 			res = -EBUSY;
-			goto err_close;
+			goto err_detach;
 		}
 	}
 #endif
@@ -1831,7 +1831,7 @@
 
 	res = bond_create_slave_symlinks(bond_dev, slave_dev);
 	if (res)
-		goto err_close;
+		goto err_detach;
 
 	res = netdev_rx_handler_register(slave_dev, bond_handle_frame,
 					 new_slave);
@@ -1852,6 +1852,11 @@
 err_dest_symlinks:
 	bond_destroy_slave_symlinks(bond_dev, slave_dev);
 
+err_detach:
+	write_lock_bh(&bond->lock);
+	bond_detach_slave(bond, new_slave);
+	write_unlock_bh(&bond->lock);
+
 err_close:
 	dev_close(slave_dev);
 
@@ -1897,7 +1902,7 @@
 	struct bonding *bond = netdev_priv(bond_dev);
 	struct slave *slave, *oldcurrent;
 	struct sockaddr addr;
-	u32 old_features = bond_dev->features;
+	netdev_features_t old_features = bond_dev->features;
 
 	/* slave is not a slave or master is not master of this slave */
 	if (!(slave_dev->flags & IFF_SLAVE) ||
@@ -4339,7 +4344,7 @@
 				NETIF_F_HW_VLAN_RX |
 				NETIF_F_HW_VLAN_FILTER;
 
-	bond_dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_NO_CSUM);
+	bond_dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_HW_CSUM);
 	bond_dev->features |= bond_dev->hw_features;
 }
 
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 4ef7e2f..aef42f0 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -26,7 +26,6 @@
 #include <linux/module.h>
 #include <linux/device.h>
 #include <linux/sched.h>
-#include <linux/sysdev.h>
 #include <linux/fs.h>
 #include <linux/types.h>
 #include <linux/string.h>
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
index 0733525..0a4fc62 100644
--- a/drivers/net/caif/caif_hsi.c
+++ b/drivers/net/caif/caif_hsi.c
@@ -117,15 +117,6 @@
 	dev_dbg(&cfhsi->ndev->dev, "%s.\n",
 		__func__);
 
-
-	ret = cfhsi->dev->cfhsi_wake_up(cfhsi->dev);
-	if (ret) {
-		dev_warn(&cfhsi->ndev->dev,
-			"%s: can't wake up HSI interface: %d.\n",
-			__func__, ret);
-		return ret;
-	}
-
 	do {
 		ret = cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
 				&fifo_occupancy);
@@ -168,8 +159,6 @@
 		}
 	} while (1);
 
-	cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
-
 	return ret;
 }
 
@@ -944,7 +933,7 @@
 
 		/* Create HSI frame. */
 		len = cfhsi_tx_frm(desc, cfhsi);
-		BUG_ON(!len);
+		WARN_ON(!len);
 
 		/* Set up new transfer. */
 		res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index 23406e6..8a3054b 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -38,15 +38,15 @@
 /*This list is protected by the rtnl lock. */
 static LIST_HEAD(ser_list);
 
-static int ser_loop;
+static bool ser_loop;
 module_param(ser_loop, bool, S_IRUGO);
 MODULE_PARM_DESC(ser_loop, "Run in simulated loopback mode.");
 
-static int ser_use_stx = 1;
+static bool ser_use_stx = true;
 module_param(ser_use_stx, bool, S_IRUGO);
 MODULE_PARM_DESC(ser_use_stx, "STX enabled or not.");
 
-static int ser_use_fcs = 1;
+static bool ser_use_fcs = true;
 
 module_param(ser_use_fcs, bool, S_IRUGO);
 MODULE_PARM_DESC(ser_use_fcs, "FCS enabled or not.");
@@ -261,7 +261,7 @@
 		skb_pull(skb, tty_wr);
 		if (skb->len == 0) {
 			struct sk_buff *tmp = skb_dequeue(&ser->head);
-			BUG_ON(tmp != skb);
+			WARN_ON(tmp != skb);
 			if (in_interrupt())
 				dev_kfree_skb_irq(skb);
 			else
@@ -305,7 +305,7 @@
 
 	ser = tty->disc_data;
 	BUG_ON(ser == NULL);
-	BUG_ON(ser->tty != tty);
+	WARN_ON(ser->tty != tty);
 	handle_tx(ser);
 }
 
diff --git a/drivers/net/caif/caif_shmcore.c b/drivers/net/caif/caif_shmcore.c
index d4b26fb..5b20413 100644
--- a/drivers/net/caif/caif_shmcore.c
+++ b/drivers/net/caif/caif_shmcore.c
@@ -238,11 +238,11 @@
 		if ((avail_emptybuff > HIGH_WATERMARK) &&
 					(!pshm_drv->tx_empty_available)) {
 			pshm_drv->tx_empty_available = 1;
+			spin_unlock_irqrestore(&pshm_drv->lock, flags);
 			pshm_drv->cfdev.flowctrl
 					(pshm_drv->pshm_dev->pshm_netdev,
 								CAIF_FLOW_ON);
 
-			spin_unlock_irqrestore(&pshm_drv->lock, flags);
 
 			/* Schedule the work queue. if required */
 			if (!work_pending(&pshm_drv->shm_tx_work))
@@ -285,6 +285,7 @@
 			list_entry(pshm_drv->rx_full_list.next, struct buf_list,
 					list);
 		list_del_init(&pbuf->list);
+		spin_unlock_irqrestore(&pshm_drv->lock, flags);
 
 		/* Retrieve pointer to start of the packet descriptor area. */
 		pck_desc = (struct shm_pck_desc *) pbuf->desc_vptr;
@@ -336,7 +337,11 @@
 			/* Get a suitable CAIF packet and copy in data. */
 			skb = netdev_alloc_skb(pshm_drv->pshm_dev->pshm_netdev,
 							frm_pck_len + 1);
-			BUG_ON(skb == NULL);
+
+			if (skb == NULL) {
+				pr_info("OOM: Try next frame in descriptor\n");
+				break;
+			}
 
 			p = skb_put(skb, frm_pck_len);
 			memcpy(p, pbuf->desc_vptr + frm_pck_ofs, frm_pck_len);
@@ -360,6 +365,7 @@
 			pck_desc++;
 		}
 
+		spin_lock_irqsave(&pshm_drv->lock, flags);
 		list_add_tail(&pbuf->list, &pshm_drv->rx_pend_list);
 
 		spin_unlock_irqrestore(&pshm_drv->lock, flags);
@@ -412,7 +418,6 @@
 
 		if (skb == NULL)
 			goto send_msg;
-
 		/* Check the available no. of buffers in the empty list */
 		list_for_each(pos, &pshm_drv->tx_empty_list)
 			avail_emptybuff++;
@@ -421,9 +426,11 @@
 					pshm_drv->tx_empty_available) {
 			/* Update blocking condition. */
 			pshm_drv->tx_empty_available = 0;
+			spin_unlock_irqrestore(&pshm_drv->lock, flags);
 			pshm_drv->cfdev.flowctrl
 					(pshm_drv->pshm_dev->pshm_netdev,
 					CAIF_FLOW_OFF);
+			spin_lock_irqsave(&pshm_drv->lock, flags);
 		}
 		/*
 		 * We simply return back to the caller if we do not have space
@@ -469,6 +476,8 @@
 			}
 
 			skb = skb_dequeue(&pshm_drv->sk_qhead);
+			if (skb == NULL)
+				break;
 			/* Copy in CAIF frame. */
 			skb_copy_bits(skb, 0, pbuf->desc_vptr +
 					pbuf->frm_ofs + SHM_HDR_LEN +
@@ -477,7 +486,7 @@
 			pshm_drv->pshm_dev->pshm_netdev->stats.tx_packets++;
 			pshm_drv->pshm_dev->pshm_netdev->stats.tx_bytes +=
 									frmlen;
-			dev_kfree_skb(skb);
+			dev_kfree_skb_irq(skb);
 
 			/* Fill in the shared memory packet descriptor area. */
 			pck_desc = (struct shm_pck_desc *) (pbuf->desc_vptr);
@@ -512,16 +521,11 @@
 static int shm_netdev_tx(struct sk_buff *skb, struct net_device *shm_netdev)
 {
 	struct shmdrv_layer *pshm_drv;
-	unsigned long flags = 0;
 
 	pshm_drv = netdev_priv(shm_netdev);
 
-	spin_lock_irqsave(&pshm_drv->lock, flags);
-
 	skb_queue_tail(&pshm_drv->sk_qhead, skb);
 
-	spin_unlock_irqrestore(&pshm_drv->lock, flags);
-
 	/* Schedule Tx work queue. for deferred processing of skbs*/
 	if (!work_pending(&pshm_drv->shm_tx_work))
 		queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work);
@@ -606,6 +610,7 @@
 		pshm_drv->shm_rx_addr = pshm_dev->shm_base_addr +
 						(NR_TX_BUF * TX_BUF_SZ);
 
+	spin_lock_init(&pshm_drv->lock);
 	INIT_LIST_HEAD(&pshm_drv->tx_empty_list);
 	INIT_LIST_HEAD(&pshm_drv->tx_pend_list);
 	INIT_LIST_HEAD(&pshm_drv->tx_full_list);
@@ -640,7 +645,7 @@
 		tx_buf->frm_ofs = SHM_CAIF_FRM_OFS;
 
 		if (pshm_dev->shm_loopback)
-			tx_buf->desc_vptr = (char *)tx_buf->phy_addr;
+			tx_buf->desc_vptr = (unsigned char *)tx_buf->phy_addr;
 		else
 			tx_buf->desc_vptr =
 					ioremap(tx_buf->phy_addr, TX_BUF_SZ);
@@ -664,7 +669,7 @@
 		rx_buf->len = RX_BUF_SZ;
 
 		if (pshm_dev->shm_loopback)
-			rx_buf->desc_vptr = (char *)rx_buf->phy_addr;
+			rx_buf->desc_vptr = (unsigned char *)rx_buf->phy_addr;
 		else
 			rx_buf->desc_vptr =
 					ioremap(rx_buf->phy_addr, RX_BUF_SZ);
diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c
index 05e791f..96391c3 100644
--- a/drivers/net/caif/caif_spi.c
+++ b/drivers/net/caif/caif_spi.c
@@ -35,7 +35,7 @@
 /* Returns the number of padding bytes for alignment. */
 #define PAD_POW2(x, pow) ((((x)&((pow)-1))==0) ? 0 : (((pow)-((x)&((pow)-1)))))
 
-static int spi_loop;
+static bool spi_loop;
 module_param(spi_loop, bool, S_IRUGO);
 MODULE_PARM_DESC(spi_loop, "SPI running in loopback mode.");
 
@@ -226,7 +226,7 @@
 			"Tx data (Len: %d):\n", cfspi->tx_cpck_len);
 
 	len += print_frame((buf + len), (DEBUGFS_BUF_SIZE - len),
-			   cfspi->xfer.va_tx,
+			   cfspi->xfer.va_tx[0],
 			   (cfspi->tx_cpck_len + SPI_CMD_SZ), 100);
 
 	len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
@@ -599,48 +599,11 @@
 	netif_stop_queue(dev);
 	return 0;
 }
-static const struct net_device_ops cfspi_ops = {
-	.ndo_open = cfspi_open,
-	.ndo_stop = cfspi_close,
-	.ndo_start_xmit = cfspi_xmit
-};
 
-static void cfspi_setup(struct net_device *dev)
+static int cfspi_init(struct net_device *dev)
 {
+	int res = 0;
 	struct cfspi *cfspi = netdev_priv(dev);
-	dev->features = 0;
-	dev->netdev_ops = &cfspi_ops;
-	dev->type = ARPHRD_CAIF;
-	dev->flags = IFF_NOARP | IFF_POINTOPOINT;
-	dev->tx_queue_len = 0;
-	dev->mtu = SPI_MAX_PAYLOAD_SIZE;
-	dev->destructor = free_netdev;
-	skb_queue_head_init(&cfspi->qhead);
-	skb_queue_head_init(&cfspi->chead);
-	cfspi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
-	cfspi->cfdev.use_frag = false;
-	cfspi->cfdev.use_stx = false;
-	cfspi->cfdev.use_fcs = false;
-	cfspi->ndev = dev;
-}
-
-int cfspi_spi_probe(struct platform_device *pdev)
-{
-	struct cfspi *cfspi = NULL;
-	struct net_device *ndev;
-	struct cfspi_dev *dev;
-	int res;
-	dev = (struct cfspi_dev *)pdev->dev.platform_data;
-
-	ndev = alloc_netdev(sizeof(struct cfspi),
-			"cfspi%d", cfspi_setup);
-	if (!ndev)
-		return -ENOMEM;
-
-	cfspi = netdev_priv(ndev);
-	netif_stop_queue(ndev);
-	cfspi->ndev = ndev;
-	cfspi->pdev = pdev;
 
 	/* Set flow info. */
 	cfspi->flow_off_sent = 0;
@@ -656,16 +619,11 @@
 		cfspi->slave_talked = false;
 	}
 
-	/* Assign the SPI device. */
-	cfspi->dev = dev;
-	/* Assign the device ifc to this SPI interface. */
-	dev->ifc = &cfspi->ifc;
-
 	/* Allocate DMA buffers. */
-	cfspi->xfer.va_tx = dma_alloc(&cfspi->xfer.pa_tx);
-	if (!cfspi->xfer.va_tx) {
+	cfspi->xfer.va_tx[0] = dma_alloc(&cfspi->xfer.pa_tx[0]);
+	if (!cfspi->xfer.va_tx[0]) {
 		res = -ENODEV;
-		goto err_dma_alloc_tx;
+		goto err_dma_alloc_tx_0;
 	}
 
 	cfspi->xfer.va_rx = dma_alloc(&cfspi->xfer.pa_rx);
@@ -714,6 +672,87 @@
 	/* Schedule the work queue. */
 	queue_work(cfspi->wq, &cfspi->work);
 
+	return 0;
+
+ err_create_wq:
+	dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx);
+ err_dma_alloc_rx:
+	dma_free(cfspi->xfer.va_tx[0], cfspi->xfer.pa_tx[0]);
+ err_dma_alloc_tx_0:
+	return res;
+}
+
+static void cfspi_uninit(struct net_device *dev)
+{
+	struct cfspi *cfspi = netdev_priv(dev);
+
+	/* Remove from list. */
+	spin_lock(&cfspi_list_lock);
+	list_del(&cfspi->list);
+	spin_unlock(&cfspi_list_lock);
+
+	cfspi->ndev = NULL;
+	/* Free DMA buffers. */
+	dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx);
+	dma_free(cfspi->xfer.va_tx[0], cfspi->xfer.pa_tx[0]);
+	set_bit(SPI_TERMINATE, &cfspi->state);
+	wake_up_interruptible(&cfspi->wait);
+	destroy_workqueue(cfspi->wq);
+	/* Destroy debugfs directory and files. */
+	dev_debugfs_rem(cfspi);
+	return;
+}
+
+static const struct net_device_ops cfspi_ops = {
+	.ndo_open = cfspi_open,
+	.ndo_stop = cfspi_close,
+	.ndo_init = cfspi_init,
+	.ndo_uninit = cfspi_uninit,
+	.ndo_start_xmit = cfspi_xmit
+};
+
+static void cfspi_setup(struct net_device *dev)
+{
+	struct cfspi *cfspi = netdev_priv(dev);
+	dev->features = 0;
+	dev->netdev_ops = &cfspi_ops;
+	dev->type = ARPHRD_CAIF;
+	dev->flags = IFF_NOARP | IFF_POINTOPOINT;
+	dev->tx_queue_len = 0;
+	dev->mtu = SPI_MAX_PAYLOAD_SIZE;
+	dev->destructor = free_netdev;
+	skb_queue_head_init(&cfspi->qhead);
+	skb_queue_head_init(&cfspi->chead);
+	cfspi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
+	cfspi->cfdev.use_frag = false;
+	cfspi->cfdev.use_stx = false;
+	cfspi->cfdev.use_fcs = false;
+	cfspi->ndev = dev;
+}
+
+int cfspi_spi_probe(struct platform_device *pdev)
+{
+	struct cfspi *cfspi = NULL;
+	struct net_device *ndev;
+	struct cfspi_dev *dev;
+	int res;
+	dev = (struct cfspi_dev *)pdev->dev.platform_data;
+
+	ndev = alloc_netdev(sizeof(struct cfspi),
+			"cfspi%d", cfspi_setup);
+	if (!dev)
+		return -ENODEV;
+
+	cfspi = netdev_priv(ndev);
+	netif_stop_queue(ndev);
+	cfspi->ndev = ndev;
+	cfspi->pdev = pdev;
+
+	/* Assign the SPI device. */
+	cfspi->dev = dev;
+	/* Assign the device ifc to this SPI interface. */
+	dev->ifc = &cfspi->ifc;
+
 	/* Register network device. */
 	res = register_netdev(ndev);
 	if (res) {
@@ -723,15 +762,6 @@
 	return res;
 
  err_net_reg:
-	dev_debugfs_rem(cfspi);
-	set_bit(SPI_TERMINATE, &cfspi->state);
-	wake_up_interruptible(&cfspi->wait);
-	destroy_workqueue(cfspi->wq);
- err_create_wq:
-	dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx);
- err_dma_alloc_rx:
-	dma_free(cfspi->xfer.va_tx, cfspi->xfer.pa_tx);
- err_dma_alloc_tx:
 	free_netdev(ndev);
 
 	return res;
@@ -739,34 +769,8 @@
 
 int cfspi_spi_remove(struct platform_device *pdev)
 {
-	struct list_head *list_node;
-	struct list_head *n;
-	struct cfspi *cfspi = NULL;
-	struct cfspi_dev *dev;
-
-	dev = (struct cfspi_dev *)pdev->dev.platform_data;
-	spin_lock(&cfspi_list_lock);
-	list_for_each_safe(list_node, n, &cfspi_list) {
-		cfspi = list_entry(list_node, struct cfspi, list);
-		/* Find the corresponding device. */
-		if (cfspi->dev == dev) {
-			/* Remove from list. */
-			list_del(list_node);
-			/* Free DMA buffers. */
-			dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx);
-			dma_free(cfspi->xfer.va_tx, cfspi->xfer.pa_tx);
-			set_bit(SPI_TERMINATE, &cfspi->state);
-			wake_up_interruptible(&cfspi->wait);
-			destroy_workqueue(cfspi->wq);
-			/* Destroy debugfs directory and files. */
-			dev_debugfs_rem(cfspi);
-			unregister_netdev(cfspi->ndev);
-			spin_unlock(&cfspi_list_lock);
-			return 0;
-		}
-	}
-	spin_unlock(&cfspi_list_lock);
-	return -ENODEV;
+	/* Everything is done in cfspi_uninit(). */
+	return 0;
 }
 
 static void __exit cfspi_exit_module(void)
@@ -777,7 +781,7 @@
 
 	list_for_each_safe(list_node, n, &cfspi_list) {
 		cfspi = list_entry(list_node, struct cfspi, list);
-		platform_device_unregister(cfspi->pdev);
+		unregister_netdev(cfspi->ndev);
 	}
 
 	/* Destroy sysfs files. */
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index f6c98fb..ab45758 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -116,6 +116,8 @@
 
 source "drivers/net/can/c_can/Kconfig"
 
+source "drivers/net/can/cc770/Kconfig"
+
 source "drivers/net/can/usb/Kconfig"
 
 source "drivers/net/can/softing/Kconfig"
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 24ebfe8..938be37 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -14,6 +14,7 @@
 obj-$(CONFIG_CAN_SJA1000)	+= sja1000/
 obj-$(CONFIG_CAN_MSCAN)		+= mscan/
 obj-$(CONFIG_CAN_C_CAN)		+= c_can/
+obj-$(CONFIG_CAN_CC770)		+= cc770/
 obj-$(CONFIG_CAN_AT91)		+= at91_can.o
 obj-$(CONFIG_CAN_TI_HECC)	+= ti_hecc.o
 obj-$(CONFIG_CAN_MCP251X)	+= mcp251x.o
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 044ea06..6ea905c 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -1383,18 +1383,7 @@
 	.id_table = at91_can_id_table,
 };
 
-static int __init at91_can_module_init(void)
-{
-	return platform_driver_register(&at91_can_driver);
-}
-
-static void __exit at91_can_module_exit(void)
-{
-	platform_driver_unregister(&at91_can_driver);
-}
-
-module_init(at91_can_module_init);
-module_exit(at91_can_module_exit);
+module_platform_driver(at91_can_driver);
 
 MODULE_AUTHOR("Marc Kleine-Budde <mkl@pengutronix.de>");
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index a1c5abc..349e0fa 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -676,17 +676,7 @@
 	},
 };
 
-static int __init bfin_can_init(void)
-{
-	return platform_driver_register(&bfin_can_driver);
-}
-module_init(bfin_can_init);
-
-static void __exit bfin_can_exit(void)
-{
-	platform_driver_unregister(&bfin_can_driver);
-}
-module_exit(bfin_can_exit);
+module_platform_driver(bfin_can_driver);
 
 MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
 MODULE_LICENSE("GPL");
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index 0b5c6f8..5e1a5ff 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -197,17 +197,7 @@
 	.remove = __devexit_p(c_can_plat_remove),
 };
 
-static int __init c_can_plat_init(void)
-{
-	return platform_driver_register(&c_can_plat_driver);
-}
-module_init(c_can_plat_init);
-
-static void __exit c_can_plat_exit(void)
-{
-	platform_driver_unregister(&c_can_plat_driver);
-}
-module_exit(c_can_plat_exit);
+module_platform_driver(c_can_plat_driver);
 
 MODULE_AUTHOR("Bhupesh Sharma <bhupesh.sharma@st.com>");
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/cc770/Kconfig b/drivers/net/can/cc770/Kconfig
new file mode 100644
index 0000000..22c07a8
--- /dev/null
+++ b/drivers/net/can/cc770/Kconfig
@@ -0,0 +1,21 @@
+menuconfig CAN_CC770
+	tristate "Bosch CC770 and Intel AN82527 devices"
+	depends on CAN_DEV && HAS_IOMEM
+
+if CAN_CC770
+
+config CAN_CC770_ISA
+	tristate "ISA Bus based legacy CC770 driver"
+	---help---
+	  This driver adds legacy support for CC770 and AN82527 chips
+	  connected to the ISA bus using I/O port, memory mapped or
+	  indirect access.
+
+config CAN_CC770_PLATFORM
+	tristate "Generic Platform Bus based CC770 driver"
+	---help---
+	  This driver adds support for the CC770 and AN82527 chips
+	  connected to the "platform bus" (Linux abstraction for directly
+	  to the processor attached devices).
+
+endif
diff --git a/drivers/net/can/cc770/Makefile b/drivers/net/can/cc770/Makefile
new file mode 100644
index 0000000..9fb8321
--- /dev/null
+++ b/drivers/net/can/cc770/Makefile
@@ -0,0 +1,9 @@
+#
+#  Makefile for the Bosch CC770 CAN controller drivers.
+#
+
+obj-$(CONFIG_CAN_CC770) += cc770.o
+obj-$(CONFIG_CAN_CC770_ISA) += cc770_isa.o
+obj-$(CONFIG_CAN_CC770_PLATFORM) += cc770_platform.o
+
+ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c
new file mode 100644
index 0000000..7668967
--- /dev/null
+++ b/drivers/net/can/cc770/cc770.c
@@ -0,0 +1,881 @@
+/*
+ * Core driver for the CC770 and AN82527 CAN controllers
+ *
+ * Copyright (C) 2009, 2011 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+#include <linux/can/dev.h>
+#include <linux/can/platform/cc770.h>
+
+#include "cc770.h"
+
+MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION(KBUILD_MODNAME "CAN netdevice driver");
+
+/*
+ * The CC770 is a CAN controller from Bosch, which is 100% compatible
+ * with the AN82527 from Intel, but with "bugs" being fixed and some
+ * additional functionality, mainly:
+ *
+ * 1. RX and TX error counters are readable.
+ * 2. Support of silent (listen-only) mode.
+ * 3. Message object 15 can receive all types of frames, also RTR and EFF.
+ *
+ * Details are available from Bosch's "CC770_Product_Info_2007-01.pdf",
+ * which explains in detail the compatibility between the CC770 and the
+ * 82527. This driver use the additional functionality 3. on real CC770
+ * devices. Unfortunately, the CC770 does still not store the message
+ * identifier of received remote transmission request frames and
+ * therefore it's set to 0.
+ *
+ * The message objects 1..14 can be used for TX and RX while the message
+ * objects 15 is optimized for RX. It has a shadow register for reliable
+ * data receiption under heavy bus load. Therefore it makes sense to use
+ * this message object for the needed use case. The frame type (EFF/SFF)
+ * for the message object 15 can be defined via kernel module parameter
+ * "msgobj15_eff". If not equal 0, it will receive 29-bit EFF frames,
+ * otherwise 11 bit SFF messages.
+ */
+static int msgobj15_eff;
+module_param(msgobj15_eff, int, S_IRUGO);
+MODULE_PARM_DESC(msgobj15_eff, "Extended 29-bit frames for message object 15 "
+		 "(default: 11-bit standard frames)");
+
+static int i82527_compat;
+module_param(i82527_compat, int, S_IRUGO);
+MODULE_PARM_DESC(i82527_compat, "Strict Intel 82527 comptibility mode "
+		 "without using additional functions");
+
+/*
+ * This driver uses the last 5 message objects 11..15. The definitions
+ * and structure below allows to configure and assign them to the real
+ * message object.
+ */
+static unsigned char cc770_obj_flags[CC770_OBJ_MAX] = {
+	[CC770_OBJ_RX0] = CC770_OBJ_FLAG_RX,
+	[CC770_OBJ_RX1] = CC770_OBJ_FLAG_RX | CC770_OBJ_FLAG_EFF,
+	[CC770_OBJ_RX_RTR0] = CC770_OBJ_FLAG_RX | CC770_OBJ_FLAG_RTR,
+	[CC770_OBJ_RX_RTR1] = CC770_OBJ_FLAG_RX | CC770_OBJ_FLAG_RTR |
+			      CC770_OBJ_FLAG_EFF,
+	[CC770_OBJ_TX] = 0,
+};
+
+static struct can_bittiming_const cc770_bittiming_const = {
+	.name = KBUILD_MODNAME,
+	.tseg1_min = 1,
+	.tseg1_max = 16,
+	.tseg2_min = 1,
+	.tseg2_max = 8,
+	.sjw_max = 4,
+	.brp_min = 1,
+	.brp_max = 64,
+	.brp_inc = 1,
+};
+
+static inline int intid2obj(unsigned int intid)
+{
+	if (intid == 2)
+		return 0;
+	else
+		return MSGOBJ_LAST + 2 - intid;
+}
+
+static void enable_all_objs(const struct net_device *dev)
+{
+	struct cc770_priv *priv = netdev_priv(dev);
+	u8 msgcfg;
+	unsigned char obj_flags;
+	unsigned int o, mo;
+
+	for (o = 0; o < ARRAY_SIZE(priv->obj_flags); o++) {
+		obj_flags = priv->obj_flags[o];
+		mo = obj2msgobj(o);
+
+		if (obj_flags & CC770_OBJ_FLAG_RX) {
+			/*
+			 * We don't need extra objects for RTR and EFF if
+			 * the additional CC770 functions are enabled.
+			 */
+			if (priv->control_normal_mode & CTRL_EAF) {
+				if (o > 0)
+					continue;
+				netdev_dbg(dev, "Message object %d for "
+					   "RX data, RTR, SFF and EFF\n", mo);
+			} else {
+				netdev_dbg(dev,
+					   "Message object %d for RX %s %s\n",
+					   mo, obj_flags & CC770_OBJ_FLAG_RTR ?
+					   "RTR" : "data",
+					   obj_flags & CC770_OBJ_FLAG_EFF ?
+					   "EFF" : "SFF");
+			}
+
+			if (obj_flags & CC770_OBJ_FLAG_EFF)
+				msgcfg = MSGCFG_XTD;
+			else
+				msgcfg = 0;
+			if (obj_flags & CC770_OBJ_FLAG_RTR)
+				msgcfg |= MSGCFG_DIR;
+
+			cc770_write_reg(priv, msgobj[mo].config, msgcfg);
+			cc770_write_reg(priv, msgobj[mo].ctrl0,
+					MSGVAL_SET | TXIE_RES |
+					RXIE_SET | INTPND_RES);
+
+			if (obj_flags & CC770_OBJ_FLAG_RTR)
+				cc770_write_reg(priv, msgobj[mo].ctrl1,
+						NEWDAT_RES | CPUUPD_SET |
+						TXRQST_RES | RMTPND_RES);
+			else
+				cc770_write_reg(priv, msgobj[mo].ctrl1,
+						NEWDAT_RES | MSGLST_RES |
+						TXRQST_RES | RMTPND_RES);
+		} else {
+			netdev_dbg(dev, "Message object %d for "
+				   "TX data, RTR, SFF and EFF\n", mo);
+
+			cc770_write_reg(priv, msgobj[mo].ctrl1,
+					RMTPND_RES | TXRQST_RES |
+					CPUUPD_RES | NEWDAT_RES);
+			cc770_write_reg(priv, msgobj[mo].ctrl0,
+					MSGVAL_RES | TXIE_RES |
+					RXIE_RES | INTPND_RES);
+		}
+	}
+}
+
+static void disable_all_objs(const struct cc770_priv *priv)
+{
+	int o, mo;
+
+	for (o = 0; o <  ARRAY_SIZE(priv->obj_flags); o++) {
+		mo = obj2msgobj(o);
+
+		if (priv->obj_flags[o] & CC770_OBJ_FLAG_RX) {
+			if (o > 0 && priv->control_normal_mode & CTRL_EAF)
+				continue;
+
+			cc770_write_reg(priv, msgobj[mo].ctrl1,
+					NEWDAT_RES | MSGLST_RES |
+					TXRQST_RES | RMTPND_RES);
+			cc770_write_reg(priv, msgobj[mo].ctrl0,
+					MSGVAL_RES | TXIE_RES |
+					RXIE_RES | INTPND_RES);
+		} else {
+			/* Clear message object for send */
+			cc770_write_reg(priv, msgobj[mo].ctrl1,
+					RMTPND_RES | TXRQST_RES |
+					CPUUPD_RES | NEWDAT_RES);
+			cc770_write_reg(priv, msgobj[mo].ctrl0,
+					MSGVAL_RES | TXIE_RES |
+					RXIE_RES | INTPND_RES);
+		}
+	}
+}
+
+static void set_reset_mode(struct net_device *dev)
+{
+	struct cc770_priv *priv = netdev_priv(dev);
+
+	/* Enable configuration and puts chip in bus-off, disable interrupts */
+	cc770_write_reg(priv, control, CTRL_CCE | CTRL_INI);
+
+	priv->can.state = CAN_STATE_STOPPED;
+
+	/* Clear interrupts */
+	cc770_read_reg(priv, interrupt);
+
+	/* Clear status register */
+	cc770_write_reg(priv, status, 0);
+
+	/* Disable all used message objects */
+	disable_all_objs(priv);
+}
+
+static void set_normal_mode(struct net_device *dev)
+{
+	struct cc770_priv *priv = netdev_priv(dev);
+
+	/* Clear interrupts */
+	cc770_read_reg(priv, interrupt);
+
+	/* Clear status register and pre-set last error code */
+	cc770_write_reg(priv, status, STAT_LEC_MASK);
+
+	/* Enable all used message objects*/
+	enable_all_objs(dev);
+
+	/*
+	 * Clear bus-off, interrupts only for errors,
+	 * not for status change
+	 */
+	cc770_write_reg(priv, control, priv->control_normal_mode);
+
+	priv->can.state = CAN_STATE_ERROR_ACTIVE;
+}
+
+static void chipset_init(struct cc770_priv *priv)
+{
+	int mo, id, data;
+
+	/* Enable configuration and put chip in bus-off, disable interrupts */
+	cc770_write_reg(priv, control, (CTRL_CCE | CTRL_INI));
+
+	/* Set CLKOUT divider and slew rates */
+	cc770_write_reg(priv, clkout, priv->clkout);
+
+	/* Configure CPU interface / CLKOUT enable */
+	cc770_write_reg(priv, cpu_interface, priv->cpu_interface);
+
+	/* Set bus configuration  */
+	cc770_write_reg(priv, bus_config, priv->bus_config);
+
+	/* Clear interrupts */
+	cc770_read_reg(priv, interrupt);
+
+	/* Clear status register */
+	cc770_write_reg(priv, status, 0);
+
+	/* Clear and invalidate message objects */
+	for (mo = MSGOBJ_FIRST; mo <= MSGOBJ_LAST; mo++) {
+		cc770_write_reg(priv, msgobj[mo].ctrl0,
+				INTPND_UNC | RXIE_RES |
+				TXIE_RES | MSGVAL_RES);
+		cc770_write_reg(priv, msgobj[mo].ctrl0,
+				INTPND_RES | RXIE_RES |
+				TXIE_RES | MSGVAL_RES);
+		cc770_write_reg(priv, msgobj[mo].ctrl1,
+				NEWDAT_RES | MSGLST_RES |
+				TXRQST_RES | RMTPND_RES);
+		for (data = 0; data < 8; data++)
+			cc770_write_reg(priv, msgobj[mo].data[data], 0);
+		for (id = 0; id < 4; id++)
+			cc770_write_reg(priv, msgobj[mo].id[id], 0);
+		cc770_write_reg(priv, msgobj[mo].config, 0);
+	}
+
+	/* Set all global ID masks to "don't care" */
+	cc770_write_reg(priv, global_mask_std[0], 0);
+	cc770_write_reg(priv, global_mask_std[1], 0);
+	cc770_write_reg(priv, global_mask_ext[0], 0);
+	cc770_write_reg(priv, global_mask_ext[1], 0);
+	cc770_write_reg(priv, global_mask_ext[2], 0);
+	cc770_write_reg(priv, global_mask_ext[3], 0);
+
+}
+
+static int cc770_probe_chip(struct net_device *dev)
+{
+	struct cc770_priv *priv = netdev_priv(dev);
+
+	/* Enable configuration, put chip in bus-off, disable ints */
+	cc770_write_reg(priv, control, CTRL_CCE | CTRL_EAF | CTRL_INI);
+	/* Configure cpu interface / CLKOUT disable */
+	cc770_write_reg(priv, cpu_interface, priv->cpu_interface);
+
+	/*
+	 * Check if hardware reset is still inactive or maybe there
+	 * is no chip in this address space
+	 */
+	if (cc770_read_reg(priv, cpu_interface) & CPUIF_RST) {
+		netdev_info(dev, "probing @0x%p failed (reset)\n",
+			    priv->reg_base);
+		return -ENODEV;
+	}
+
+	/* Write and read back test pattern (some arbitrary values) */
+	cc770_write_reg(priv, msgobj[1].data[1], 0x25);
+	cc770_write_reg(priv, msgobj[2].data[3], 0x52);
+	cc770_write_reg(priv, msgobj[10].data[6], 0xc3);
+	if ((cc770_read_reg(priv, msgobj[1].data[1]) != 0x25) ||
+	    (cc770_read_reg(priv, msgobj[2].data[3]) != 0x52) ||
+	    (cc770_read_reg(priv, msgobj[10].data[6]) != 0xc3)) {
+		netdev_info(dev, "probing @0x%p failed (pattern)\n",
+			    priv->reg_base);
+		return -ENODEV;
+	}
+
+	/* Check if this chip is a CC770 supporting additional functions */
+	if (cc770_read_reg(priv, control) & CTRL_EAF)
+		priv->control_normal_mode |= CTRL_EAF;
+
+	return 0;
+}
+
+static void cc770_start(struct net_device *dev)
+{
+	struct cc770_priv *priv = netdev_priv(dev);
+
+	/* leave reset mode */
+	if (priv->can.state != CAN_STATE_STOPPED)
+		set_reset_mode(dev);
+
+	/* leave reset mode */
+	set_normal_mode(dev);
+}
+
+static int cc770_set_mode(struct net_device *dev, enum can_mode mode)
+{
+	switch (mode) {
+	case CAN_MODE_START:
+		cc770_start(dev);
+		netif_wake_queue(dev);
+		break;
+
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	return 0;
+}
+
+static int cc770_set_bittiming(struct net_device *dev)
+{
+	struct cc770_priv *priv = netdev_priv(dev);
+	struct can_bittiming *bt = &priv->can.bittiming;
+	u8 btr0, btr1;
+
+	btr0 = ((bt->brp - 1) & 0x3f) | (((bt->sjw - 1) & 0x3) << 6);
+	btr1 = ((bt->prop_seg + bt->phase_seg1 - 1) & 0xf) |
+		(((bt->phase_seg2 - 1) & 0x7) << 4);
+	if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
+		btr1 |= 0x80;
+
+	netdev_info(dev, "setting BTR0=0x%02x BTR1=0x%02x\n", btr0, btr1);
+
+	cc770_write_reg(priv, bit_timing_0, btr0);
+	cc770_write_reg(priv, bit_timing_1, btr1);
+
+	return 0;
+}
+
+static int cc770_get_berr_counter(const struct net_device *dev,
+				  struct can_berr_counter *bec)
+{
+	struct cc770_priv *priv = netdev_priv(dev);
+
+	bec->txerr = cc770_read_reg(priv, tx_error_counter);
+	bec->rxerr = cc770_read_reg(priv, rx_error_counter);
+
+	return 0;
+}
+
+static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct cc770_priv *priv = netdev_priv(dev);
+	struct net_device_stats *stats = &dev->stats;
+	struct can_frame *cf = (struct can_frame *)skb->data;
+	unsigned int mo = obj2msgobj(CC770_OBJ_TX);
+	u8 dlc, rtr;
+	u32 id;
+	int i;
+
+	if (can_dropped_invalid_skb(dev, skb))
+		return NETDEV_TX_OK;
+
+	if ((cc770_read_reg(priv,
+			    msgobj[mo].ctrl1) & TXRQST_UNC) == TXRQST_SET) {
+		netdev_err(dev, "TX register is still occupied!\n");
+		return NETDEV_TX_BUSY;
+	}
+
+	netif_stop_queue(dev);
+
+	dlc = cf->can_dlc;
+	id = cf->can_id;
+	if (cf->can_id & CAN_RTR_FLAG)
+		rtr = 0;
+	else
+		rtr = MSGCFG_DIR;
+	cc770_write_reg(priv, msgobj[mo].ctrl1,
+			RMTPND_RES | TXRQST_RES | CPUUPD_SET | NEWDAT_RES);
+	cc770_write_reg(priv, msgobj[mo].ctrl0,
+			MSGVAL_SET | TXIE_SET | RXIE_RES | INTPND_RES);
+	if (id & CAN_EFF_FLAG) {
+		id &= CAN_EFF_MASK;
+		cc770_write_reg(priv, msgobj[mo].config,
+				(dlc << 4) | rtr | MSGCFG_XTD);
+		cc770_write_reg(priv, msgobj[mo].id[3], id << 3);
+		cc770_write_reg(priv, msgobj[mo].id[2], id >> 5);
+		cc770_write_reg(priv, msgobj[mo].id[1], id >> 13);
+		cc770_write_reg(priv, msgobj[mo].id[0], id >> 21);
+	} else {
+		id &= CAN_SFF_MASK;
+		cc770_write_reg(priv, msgobj[mo].config, (dlc << 4) | rtr);
+		cc770_write_reg(priv, msgobj[mo].id[0], id >> 3);
+		cc770_write_reg(priv, msgobj[mo].id[1], id << 5);
+	}
+
+	for (i = 0; i < dlc; i++)
+		cc770_write_reg(priv, msgobj[mo].data[i], cf->data[i]);
+
+	cc770_write_reg(priv, msgobj[mo].ctrl1,
+			RMTPND_RES | TXRQST_SET | CPUUPD_RES | NEWDAT_UNC);
+
+	stats->tx_bytes += dlc;
+
+	can_put_echo_skb(skb, dev, 0);
+
+	/*
+	 * HM: We had some cases of repeated IRQs so make sure the
+	 * INT is acknowledged I know it's already further up, but
+	 * doing again fixed the issue
+	 */
+	cc770_write_reg(priv, msgobj[mo].ctrl0,
+			MSGVAL_UNC | TXIE_UNC | RXIE_UNC | INTPND_RES);
+
+	return NETDEV_TX_OK;
+}
+
+static void cc770_rx(struct net_device *dev, unsigned int mo, u8 ctrl1)
+{
+	struct cc770_priv *priv = netdev_priv(dev);
+	struct net_device_stats *stats = &dev->stats;
+	struct can_frame *cf;
+	struct sk_buff *skb;
+	u8 config;
+	u32 id;
+	int i;
+
+	skb = alloc_can_skb(dev, &cf);
+	if (!skb)
+		return;
+
+	config = cc770_read_reg(priv, msgobj[mo].config);
+
+	if (ctrl1 & RMTPND_SET) {
+		/*
+		 * Unfortunately, the chip does not store the real message
+		 * identifier of the received remote transmission request
+		 * frame. Therefore we set it to 0.
+		 */
+		cf->can_id = CAN_RTR_FLAG;
+		if (config & MSGCFG_XTD)
+			cf->can_id |= CAN_EFF_FLAG;
+		cf->can_dlc = 0;
+	} else {
+		if (config & MSGCFG_XTD) {
+			id = cc770_read_reg(priv, msgobj[mo].id[3]);
+			id |= cc770_read_reg(priv, msgobj[mo].id[2]) << 8;
+			id |= cc770_read_reg(priv, msgobj[mo].id[1]) << 16;
+			id |= cc770_read_reg(priv, msgobj[mo].id[0]) << 24;
+			id >>= 3;
+			id |= CAN_EFF_FLAG;
+		} else {
+			id = cc770_read_reg(priv, msgobj[mo].id[1]);
+			id |= cc770_read_reg(priv, msgobj[mo].id[0]) << 8;
+			id >>= 5;
+		}
+
+		cf->can_id = id;
+		cf->can_dlc = get_can_dlc((config & 0xf0) >> 4);
+		for (i = 0; i < cf->can_dlc; i++)
+			cf->data[i] = cc770_read_reg(priv, msgobj[mo].data[i]);
+	}
+	netif_rx(skb);
+
+	stats->rx_packets++;
+	stats->rx_bytes += cf->can_dlc;
+}
+
+static int cc770_err(struct net_device *dev, u8 status)
+{
+	struct cc770_priv *priv = netdev_priv(dev);
+	struct net_device_stats *stats = &dev->stats;
+	struct can_frame *cf;
+	struct sk_buff *skb;
+	u8 lec;
+
+	netdev_dbg(dev, "status interrupt (%#x)\n", status);
+
+	skb = alloc_can_err_skb(dev, &cf);
+	if (!skb)
+		return -ENOMEM;
+
+	/* Use extended functions of the CC770 */
+	if (priv->control_normal_mode & CTRL_EAF) {
+		cf->data[6] = cc770_read_reg(priv, tx_error_counter);
+		cf->data[7] = cc770_read_reg(priv, rx_error_counter);
+	}
+
+	if (status & STAT_BOFF) {
+		/* Disable interrupts */
+		cc770_write_reg(priv, control, CTRL_INI);
+		cf->can_id |= CAN_ERR_BUSOFF;
+		priv->can.state = CAN_STATE_BUS_OFF;
+		can_bus_off(dev);
+	} else if (status & STAT_WARN) {
+		cf->can_id |= CAN_ERR_CRTL;
+		/* Only the CC770 does show error passive */
+		if (cf->data[7] > 127) {
+			cf->data[1] = CAN_ERR_CRTL_RX_PASSIVE |
+				CAN_ERR_CRTL_TX_PASSIVE;
+			priv->can.state = CAN_STATE_ERROR_PASSIVE;
+			priv->can.can_stats.error_passive++;
+		} else {
+			cf->data[1] = CAN_ERR_CRTL_RX_WARNING |
+				CAN_ERR_CRTL_TX_WARNING;
+			priv->can.state = CAN_STATE_ERROR_WARNING;
+			priv->can.can_stats.error_warning++;
+		}
+	} else {
+		/* Back to error avtive */
+		cf->can_id |= CAN_ERR_PROT;
+		cf->data[2] = CAN_ERR_PROT_ACTIVE;
+		priv->can.state = CAN_STATE_ERROR_ACTIVE;
+	}
+
+	lec = status & STAT_LEC_MASK;
+	if (lec < 7 && lec > 0) {
+		if (lec == STAT_LEC_ACK) {
+			cf->can_id |= CAN_ERR_ACK;
+		} else {
+			cf->can_id |= CAN_ERR_PROT;
+			switch (lec) {
+			case STAT_LEC_STUFF:
+				cf->data[2] |= CAN_ERR_PROT_STUFF;
+				break;
+			case STAT_LEC_FORM:
+				cf->data[2] |= CAN_ERR_PROT_FORM;
+				break;
+			case STAT_LEC_BIT1:
+				cf->data[2] |= CAN_ERR_PROT_BIT1;
+				break;
+			case STAT_LEC_BIT0:
+				cf->data[2] |= CAN_ERR_PROT_BIT0;
+				break;
+			case STAT_LEC_CRC:
+				cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ;
+				break;
+			}
+		}
+	}
+
+	netif_rx(skb);
+
+	stats->rx_packets++;
+	stats->rx_bytes += cf->can_dlc;
+
+	return 0;
+}
+
+static int cc770_status_interrupt(struct net_device *dev)
+{
+	struct cc770_priv *priv = netdev_priv(dev);
+	u8 status;
+
+	status = cc770_read_reg(priv, status);
+	/* Reset the status register including RXOK and TXOK */
+	cc770_write_reg(priv, status, STAT_LEC_MASK);
+
+	if (status & (STAT_WARN | STAT_BOFF) ||
+	    (status & STAT_LEC_MASK) != STAT_LEC_MASK) {
+		cc770_err(dev, status);
+		return status & STAT_BOFF;
+	}
+
+	return 0;
+}
+
+static void cc770_rx_interrupt(struct net_device *dev, unsigned int o)
+{
+	struct cc770_priv *priv = netdev_priv(dev);
+	struct net_device_stats *stats = &dev->stats;
+	unsigned int mo = obj2msgobj(o);
+	u8 ctrl1;
+	int n = CC770_MAX_MSG;
+
+	while (n--) {
+		ctrl1 = cc770_read_reg(priv, msgobj[mo].ctrl1);
+
+		if (!(ctrl1 & NEWDAT_SET))  {
+			/* Check for RTR if additional functions are enabled */
+			if (priv->control_normal_mode & CTRL_EAF) {
+				if (!(cc770_read_reg(priv, msgobj[mo].ctrl0) &
+				      INTPND_SET))
+					break;
+			} else {
+				break;
+			}
+		}
+
+		if (ctrl1 & MSGLST_SET) {
+			stats->rx_over_errors++;
+			stats->rx_errors++;
+		}
+		if (mo < MSGOBJ_LAST)
+			cc770_write_reg(priv, msgobj[mo].ctrl1,
+					NEWDAT_RES | MSGLST_RES |
+					TXRQST_UNC | RMTPND_UNC);
+		cc770_rx(dev, mo, ctrl1);
+
+		cc770_write_reg(priv, msgobj[mo].ctrl0,
+				MSGVAL_SET | TXIE_RES |
+				RXIE_SET | INTPND_RES);
+		cc770_write_reg(priv, msgobj[mo].ctrl1,
+				NEWDAT_RES | MSGLST_RES |
+				TXRQST_RES | RMTPND_RES);
+	}
+}
+
+static void cc770_rtr_interrupt(struct net_device *dev, unsigned int o)
+{
+	struct cc770_priv *priv = netdev_priv(dev);
+	unsigned int mo = obj2msgobj(o);
+	u8 ctrl0, ctrl1;
+	int n = CC770_MAX_MSG;
+
+	while (n--) {
+		ctrl0 = cc770_read_reg(priv, msgobj[mo].ctrl0);
+		if (!(ctrl0 & INTPND_SET))
+			break;
+
+		ctrl1 = cc770_read_reg(priv, msgobj[mo].ctrl1);
+		cc770_rx(dev, mo, ctrl1);
+
+		cc770_write_reg(priv, msgobj[mo].ctrl0,
+				MSGVAL_SET | TXIE_RES |
+				RXIE_SET | INTPND_RES);
+		cc770_write_reg(priv, msgobj[mo].ctrl1,
+				NEWDAT_RES | CPUUPD_SET |
+				TXRQST_RES | RMTPND_RES);
+	}
+}
+
+static void cc770_tx_interrupt(struct net_device *dev, unsigned int o)
+{
+	struct cc770_priv *priv = netdev_priv(dev);
+	struct net_device_stats *stats = &dev->stats;
+	unsigned int mo = obj2msgobj(o);
+
+	/* Nothing more to send, switch off interrupts */
+	cc770_write_reg(priv, msgobj[mo].ctrl0,
+			MSGVAL_RES | TXIE_RES | RXIE_RES | INTPND_RES);
+	/*
+	 * We had some cases of repeated IRQ so make sure the
+	 * INT is acknowledged
+	 */
+	cc770_write_reg(priv, msgobj[mo].ctrl0,
+			MSGVAL_UNC | TXIE_UNC | RXIE_UNC | INTPND_RES);
+
+	stats->tx_packets++;
+	can_get_echo_skb(dev, 0);
+	netif_wake_queue(dev);
+}
+
+irqreturn_t cc770_interrupt(int irq, void *dev_id)
+{
+	struct net_device *dev = (struct net_device *)dev_id;
+	struct cc770_priv *priv = netdev_priv(dev);
+	u8 intid;
+	int o, n = 0;
+
+	/* Shared interrupts and IRQ off? */
+	if (priv->can.state == CAN_STATE_STOPPED)
+		return IRQ_NONE;
+
+	if (priv->pre_irq)
+		priv->pre_irq(priv);
+
+	while (n < CC770_MAX_IRQ) {
+		/* Read the highest pending interrupt request */
+		intid = cc770_read_reg(priv, interrupt);
+		if (!intid)
+			break;
+		n++;
+
+		if (intid == 1) {
+			/* Exit in case of bus-off */
+			if (cc770_status_interrupt(dev))
+				break;
+		} else {
+			o = intid2obj(intid);
+
+			if (o >= CC770_OBJ_MAX) {
+				netdev_err(dev, "Unexpected interrupt id %d\n",
+					   intid);
+				continue;
+			}
+
+			if (priv->obj_flags[o] & CC770_OBJ_FLAG_RTR)
+				cc770_rtr_interrupt(dev, o);
+			else if (priv->obj_flags[o] & CC770_OBJ_FLAG_RX)
+				cc770_rx_interrupt(dev, o);
+			else
+				cc770_tx_interrupt(dev, o);
+		}
+	}
+
+	if (priv->post_irq)
+		priv->post_irq(priv);
+
+	if (n >= CC770_MAX_IRQ)
+		netdev_dbg(dev, "%d messages handled in ISR", n);
+
+	return (n) ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static int cc770_open(struct net_device *dev)
+{
+	struct cc770_priv *priv = netdev_priv(dev);
+	int err;
+
+	/* set chip into reset mode */
+	set_reset_mode(dev);
+
+	/* common open */
+	err = open_candev(dev);
+	if (err)
+		return err;
+
+	err = request_irq(dev->irq, &cc770_interrupt, priv->irq_flags,
+			  dev->name, dev);
+	if (err) {
+		close_candev(dev);
+		return -EAGAIN;
+	}
+
+	/* init and start chip */
+	cc770_start(dev);
+
+	netif_start_queue(dev);
+
+	return 0;
+}
+
+static int cc770_close(struct net_device *dev)
+{
+	netif_stop_queue(dev);
+	set_reset_mode(dev);
+
+	free_irq(dev->irq, dev);
+	close_candev(dev);
+
+	return 0;
+}
+
+struct net_device *alloc_cc770dev(int sizeof_priv)
+{
+	struct net_device *dev;
+	struct cc770_priv *priv;
+
+	dev = alloc_candev(sizeof(struct cc770_priv) + sizeof_priv,
+			   CC770_ECHO_SKB_MAX);
+	if (!dev)
+		return NULL;
+
+	priv = netdev_priv(dev);
+
+	priv->dev = dev;
+	priv->can.bittiming_const = &cc770_bittiming_const;
+	priv->can.do_set_bittiming = cc770_set_bittiming;
+	priv->can.do_set_mode = cc770_set_mode;
+	priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
+
+	memcpy(priv->obj_flags, cc770_obj_flags, sizeof(cc770_obj_flags));
+
+	if (sizeof_priv)
+		priv->priv = (void *)priv + sizeof(struct cc770_priv);
+
+	return dev;
+}
+EXPORT_SYMBOL_GPL(alloc_cc770dev);
+
+void free_cc770dev(struct net_device *dev)
+{
+	free_candev(dev);
+}
+EXPORT_SYMBOL_GPL(free_cc770dev);
+
+static const struct net_device_ops cc770_netdev_ops = {
+	.ndo_open = cc770_open,
+	.ndo_stop = cc770_close,
+	.ndo_start_xmit = cc770_start_xmit,
+};
+
+int register_cc770dev(struct net_device *dev)
+{
+	struct cc770_priv *priv = netdev_priv(dev);
+	int err;
+
+	err = cc770_probe_chip(dev);
+	if (err)
+		return err;
+
+	dev->netdev_ops = &cc770_netdev_ops;
+
+	dev->flags |= IFF_ECHO;	/* we support local echo */
+
+	/* Should we use additional functions? */
+	if (!i82527_compat && priv->control_normal_mode & CTRL_EAF) {
+		priv->can.do_get_berr_counter = cc770_get_berr_counter;
+		priv->control_normal_mode = CTRL_IE | CTRL_EAF | CTRL_EIE;
+		netdev_dbg(dev, "i82527 mode with additional functions\n");
+	} else {
+		priv->control_normal_mode = CTRL_IE | CTRL_EIE;
+		netdev_dbg(dev, "strict i82527 compatibility mode\n");
+	}
+
+	chipset_init(priv);
+	set_reset_mode(dev);
+
+	return register_candev(dev);
+}
+EXPORT_SYMBOL_GPL(register_cc770dev);
+
+void unregister_cc770dev(struct net_device *dev)
+{
+	set_reset_mode(dev);
+	unregister_candev(dev);
+}
+EXPORT_SYMBOL_GPL(unregister_cc770dev);
+
+static __init int cc770_init(void)
+{
+	if (msgobj15_eff) {
+		cc770_obj_flags[CC770_OBJ_RX0] |= CC770_OBJ_FLAG_EFF;
+		cc770_obj_flags[CC770_OBJ_RX1] &= ~CC770_OBJ_FLAG_EFF;
+	}
+
+	pr_info("CAN netdevice driver\n");
+
+	return 0;
+}
+module_init(cc770_init);
+
+static __exit void cc770_exit(void)
+{
+	pr_info("driver removed\n");
+}
+module_exit(cc770_exit);
diff --git a/drivers/net/can/cc770/cc770.h b/drivers/net/can/cc770/cc770.h
new file mode 100644
index 0000000..a1739db
--- /dev/null
+++ b/drivers/net/can/cc770/cc770.h
@@ -0,0 +1,203 @@
+/*
+ * Core driver for the CC770 and AN82527 CAN controllers
+ *
+ * Copyright (C) 2009, 2011 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CC770_DEV_H
+#define CC770_DEV_H
+
+#include <linux/can/dev.h>
+
+struct cc770_msgobj {
+	u8 ctrl0;
+	u8 ctrl1;
+	u8 id[4];
+	u8 config;
+	u8 data[8];
+	u8 dontuse;		/* padding */
+} __packed;
+
+struct cc770_regs {
+	union {
+		struct cc770_msgobj msgobj[16]; /* Message object 1..15 */
+		struct {
+			u8 control;		/* Control Register */
+			u8 status;		/* Status Register */
+			u8 cpu_interface;	/* CPU Interface Register */
+			u8 dontuse1;
+			u8 high_speed_read[2];	/* High Speed Read */
+			u8 global_mask_std[2];	/* Standard Global Mask */
+			u8 global_mask_ext[4];	/* Extended Global Mask */
+			u8 msg15_mask[4];	/* Message 15 Mask */
+			u8 dontuse2[15];
+			u8 clkout;		/* Clock Out Register */
+			u8 dontuse3[15];
+			u8 bus_config;		/* Bus Configuration Register */
+			u8 dontuse4[15];
+			u8 bit_timing_0;	/* Bit Timing Register byte 0 */
+			u8 dontuse5[15];
+			u8 bit_timing_1;	/* Bit Timing Register byte 1 */
+			u8 dontuse6[15];
+			u8 interrupt;		/* Interrupt Register */
+			u8 dontuse7[15];
+			u8 rx_error_counter;	/* Receive Error Counter */
+			u8 dontuse8[15];
+			u8 tx_error_counter;	/* Transmit Error Counter */
+			u8 dontuse9[31];
+			u8 p1_conf;
+			u8 dontuse10[15];
+			u8 p2_conf;
+			u8 dontuse11[15];
+			u8 p1_in;
+			u8 dontuse12[15];
+			u8 p2_in;
+			u8 dontuse13[15];
+			u8 p1_out;
+			u8 dontuse14[15];
+			u8 p2_out;
+			u8 dontuse15[15];
+			u8 serial_reset_addr;
+		};
+	};
+} __packed;
+
+/* Control Register (0x00) */
+#define CTRL_INI	0x01	/* Initialization */
+#define CTRL_IE		0x02	/* Interrupt Enable */
+#define CTRL_SIE	0x04	/* Status Interrupt Enable */
+#define CTRL_EIE	0x08	/* Error Interrupt Enable */
+#define CTRL_EAF	0x20	/* Enable additional functions */
+#define CTRL_CCE	0x40	/* Change Configuration Enable */
+
+/* Status Register (0x01) */
+#define STAT_LEC_STUFF	0x01	/* Stuff error */
+#define STAT_LEC_FORM	0x02	/* Form error */
+#define STAT_LEC_ACK	0x03	/* Acknowledgement error */
+#define STAT_LEC_BIT1	0x04	/* Bit1 error */
+#define STAT_LEC_BIT0	0x05	/* Bit0 error */
+#define STAT_LEC_CRC	0x06	/* CRC error */
+#define STAT_LEC_MASK	0x07	/* Last Error Code mask */
+#define STAT_TXOK	0x08	/* Transmit Message Successfully */
+#define STAT_RXOK	0x10	/* Receive Message Successfully */
+#define STAT_WAKE	0x20	/* Wake Up Status */
+#define STAT_WARN	0x40	/* Warning Status */
+#define STAT_BOFF	0x80	/* Bus Off Status */
+
+/*
+ * CPU Interface Register (0x02)
+ * Clock Out Register (0x1f)
+ * Bus Configuration Register (0x2f)
+ *
+ * see include/linux/can/platform/cc770.h
+ */
+
+/* Message Control Register 0 (Base Address + 0x0) */
+#define INTPND_RES	0x01	/* No Interrupt pending */
+#define INTPND_SET	0x02	/* Interrupt pending */
+#define INTPND_UNC	0x03
+#define RXIE_RES	0x04	/* Receive Interrupt Disable */
+#define RXIE_SET	0x08	/* Receive Interrupt Enable */
+#define RXIE_UNC	0x0c
+#define TXIE_RES	0x10	/* Transmit Interrupt Disable */
+#define TXIE_SET	0x20	/* Transmit Interrupt Enable */
+#define TXIE_UNC	0x30
+#define MSGVAL_RES	0x40	/* Message Invalid */
+#define MSGVAL_SET	0x80	/* Message Valid */
+#define MSGVAL_UNC	0xc0
+
+/* Message Control Register 1 (Base Address + 0x01) */
+#define NEWDAT_RES	0x01	/* No New Data */
+#define NEWDAT_SET	0x02	/* New Data */
+#define NEWDAT_UNC	0x03
+#define MSGLST_RES	0x04	/* No Message Lost */
+#define MSGLST_SET	0x08	/* Message Lost */
+#define MSGLST_UNC	0x0c
+#define CPUUPD_RES	0x04	/* No CPU Updating */
+#define CPUUPD_SET	0x08	/* CPU Updating */
+#define CPUUPD_UNC	0x0c
+#define TXRQST_RES	0x10	/* No Transmission Request */
+#define TXRQST_SET	0x20	/* Transmission Request */
+#define TXRQST_UNC	0x30
+#define RMTPND_RES	0x40	/* No Remote Request Pending */
+#define RMTPND_SET	0x80	/* Remote Request Pending */
+#define RMTPND_UNC	0xc0
+
+/* Message Configuration Register (Base Address + 0x06) */
+#define MSGCFG_XTD	0x04	/* Extended Identifier */
+#define MSGCFG_DIR	0x08	/* Direction is Transmit */
+
+#define MSGOBJ_FIRST	1
+#define MSGOBJ_LAST	15
+
+#define CC770_IO_SIZE	0x100
+#define CC770_MAX_IRQ	20	/* max. number of interrupts handled in ISR */
+#define CC770_MAX_MSG	4	/* max. number of messages handled in ISR */
+
+#define CC770_ECHO_SKB_MAX	1
+
+#define cc770_read_reg(priv, member)					\
+	priv->read_reg(priv, offsetof(struct cc770_regs, member))
+
+#define cc770_write_reg(priv, member, value)				\
+	priv->write_reg(priv, offsetof(struct cc770_regs, member), value)
+
+/*
+ * Message objects and flags used by this driver
+ */
+#define CC770_OBJ_FLAG_RX	0x01
+#define CC770_OBJ_FLAG_RTR	0x02
+#define CC770_OBJ_FLAG_EFF	0x04
+
+enum {
+	CC770_OBJ_RX0 = 0,	/* for receiving normal messages */
+	CC770_OBJ_RX1,		/* for receiving normal messages */
+	CC770_OBJ_RX_RTR0,	/* for receiving remote transmission requests */
+	CC770_OBJ_RX_RTR1,	/* for receiving remote transmission requests */
+	CC770_OBJ_TX,		/* for sending messages */
+	CC770_OBJ_MAX
+};
+
+#define obj2msgobj(o)	(MSGOBJ_LAST - (o)) /* message object 11..15 */
+
+/*
+ * CC770 private data structure
+ */
+struct cc770_priv {
+	struct can_priv can;	/* must be the first member */
+	struct sk_buff *echo_skb;
+
+	/* the lower-layer is responsible for appropriate locking */
+	u8 (*read_reg)(const struct cc770_priv *priv, int reg);
+	void (*write_reg)(const struct cc770_priv *priv, int reg, u8 val);
+	void (*pre_irq)(const struct cc770_priv *priv);
+	void (*post_irq)(const struct cc770_priv *priv);
+
+	void *priv;		/* for board-specific data */
+	struct net_device *dev;
+
+	void __iomem *reg_base;	 /* ioremap'ed address to registers */
+	unsigned long irq_flags; /* for request_irq() */
+
+	unsigned char obj_flags[CC770_OBJ_MAX];
+	u8 control_normal_mode;	/* Control register for normal mode */
+	u8 cpu_interface;	/* CPU interface register */
+	u8 clkout;		/* Clock out register */
+	u8 bus_config;		/* Bus conffiguration register */
+};
+
+struct net_device *alloc_cc770dev(int sizeof_priv);
+void free_cc770dev(struct net_device *dev);
+int register_cc770dev(struct net_device *dev);
+void unregister_cc770dev(struct net_device *dev);
+
+#endif /* CC770_DEV_H */
diff --git a/drivers/net/can/cc770/cc770_isa.c b/drivers/net/can/cc770/cc770_isa.c
new file mode 100644
index 0000000..4be5fe2
--- /dev/null
+++ b/drivers/net/can/cc770/cc770_isa.c
@@ -0,0 +1,367 @@
+/*
+ * Driver for CC770 and AN82527 CAN controllers on the legacy ISA bus
+ *
+ * Copyright (C) 2009, 2011 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Bosch CC770 and Intel AN82527 CAN controllers on the ISA or PC-104 bus.
+ * The I/O port or memory address and the IRQ number must be specified via
+ * module parameters:
+ *
+ *   insmod cc770_isa.ko port=0x310,0x380 irq=7,11
+ *
+ * for ISA devices using I/O ports or:
+ *
+ *   insmod cc770_isa.ko mem=0xd1000,0xd1000 irq=7,11
+ *
+ * for memory mapped ISA devices.
+ *
+ * Indirect access via address and data port is supported as well:
+ *
+ *   insmod cc770_isa.ko port=0x310,0x380 indirect=1 irq=7,11
+ *
+ * Furthermore, the following mode parameter can be defined:
+ *
+ *   clk: External oscillator clock frequency (default=16000000 [16 MHz])
+ *   cir: CPU interface register (default=0x40 [DSC])
+ *   bcr: Bus configuration register (default=0x40 [CBY])
+ *   cor: Clockout register (default=0x00)
+ *
+ * Note: for clk, cir, bcr and cor, the first argument re-defines the
+ * default for all other devices, e.g.:
+ *
+ *   insmod cc770_isa.ko mem=0xd1000,0xd1000 irq=7,11 clk=24000000
+ *
+ * is equivalent to
+ *
+ *   insmod cc770_isa.ko mem=0xd1000,0xd1000 irq=7,11 clk=24000000,24000000
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/platform/cc770.h>
+
+#include "cc770.h"
+
+#define MAXDEV 8
+
+MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
+MODULE_DESCRIPTION("Socket-CAN driver for CC770 on the ISA bus");
+MODULE_LICENSE("GPL v2");
+
+#define CLK_DEFAULT	16000000	/* 16 MHz */
+#define COR_DEFAULT	0x00
+#define BCR_DEFAULT	BUSCFG_CBY
+
+static unsigned long port[MAXDEV];
+static unsigned long mem[MAXDEV];
+static int __devinitdata irq[MAXDEV];
+static int __devinitdata clk[MAXDEV];
+static u8 __devinitdata cir[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
+static u8 __devinitdata cor[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
+static u8 __devinitdata bcr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
+static int __devinitdata indirect[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1};
+
+module_param_array(port, ulong, NULL, S_IRUGO);
+MODULE_PARM_DESC(port, "I/O port number");
+
+module_param_array(mem, ulong, NULL, S_IRUGO);
+MODULE_PARM_DESC(mem, "I/O memory address");
+
+module_param_array(indirect, int, NULL, S_IRUGO);
+MODULE_PARM_DESC(indirect, "Indirect access via address and data port");
+
+module_param_array(irq, int, NULL, S_IRUGO);
+MODULE_PARM_DESC(irq, "IRQ number");
+
+module_param_array(clk, int, NULL, S_IRUGO);
+MODULE_PARM_DESC(clk, "External oscillator clock frequency "
+		 "(default=16000000 [16 MHz])");
+
+module_param_array(cir, byte, NULL, S_IRUGO);
+MODULE_PARM_DESC(cir, "CPU interface register (default=0x40 [DSC])");
+
+module_param_array(cor, byte, NULL, S_IRUGO);
+MODULE_PARM_DESC(cor, "Clockout register (default=0x00)");
+
+module_param_array(bcr, byte, NULL, S_IRUGO);
+MODULE_PARM_DESC(bcr, "Bus configuration register (default=0x40 [CBY])");
+
+#define CC770_IOSIZE          0x20
+#define CC770_IOSIZE_INDIRECT 0x02
+
+static struct platform_device *cc770_isa_devs[MAXDEV];
+
+static u8 cc770_isa_mem_read_reg(const struct cc770_priv *priv, int reg)
+{
+	return readb(priv->reg_base + reg);
+}
+
+static void cc770_isa_mem_write_reg(const struct cc770_priv *priv,
+				      int reg, u8 val)
+{
+	writeb(val, priv->reg_base + reg);
+}
+
+static u8 cc770_isa_port_read_reg(const struct cc770_priv *priv, int reg)
+{
+	return inb((unsigned long)priv->reg_base + reg);
+}
+
+static void cc770_isa_port_write_reg(const struct cc770_priv *priv,
+				       int reg, u8 val)
+{
+	outb(val, (unsigned long)priv->reg_base + reg);
+}
+
+static u8 cc770_isa_port_read_reg_indirect(const struct cc770_priv *priv,
+					     int reg)
+{
+	unsigned long base = (unsigned long)priv->reg_base;
+
+	outb(reg, base);
+	return inb(base + 1);
+}
+
+static void cc770_isa_port_write_reg_indirect(const struct cc770_priv *priv,
+						int reg, u8 val)
+{
+	unsigned long base = (unsigned long)priv->reg_base;
+
+	outb(reg, base);
+	outb(val, base + 1);
+}
+
+static int __devinit cc770_isa_probe(struct platform_device *pdev)
+{
+	struct net_device *dev;
+	struct cc770_priv *priv;
+	void __iomem *base = NULL;
+	int iosize = CC770_IOSIZE;
+	int idx = pdev->id;
+	int err;
+	u32 clktmp;
+
+	dev_dbg(&pdev->dev, "probing idx=%d: port=%#lx, mem=%#lx, irq=%d\n",
+		idx, port[idx], mem[idx], irq[idx]);
+	if (mem[idx]) {
+		if (!request_mem_region(mem[idx], iosize, KBUILD_MODNAME)) {
+			err = -EBUSY;
+			goto exit;
+		}
+		base = ioremap_nocache(mem[idx], iosize);
+		if (!base) {
+			err = -ENOMEM;
+			goto exit_release;
+		}
+	} else {
+		if (indirect[idx] > 0 ||
+		    (indirect[idx] == -1 && indirect[0] > 0))
+			iosize = CC770_IOSIZE_INDIRECT;
+		if (!request_region(port[idx], iosize, KBUILD_MODNAME)) {
+			err = -EBUSY;
+			goto exit;
+		}
+	}
+
+	dev = alloc_cc770dev(0);
+	if (!dev) {
+		err = -ENOMEM;
+		goto exit_unmap;
+	}
+	priv = netdev_priv(dev);
+
+	dev->irq = irq[idx];
+	priv->irq_flags = IRQF_SHARED;
+	if (mem[idx]) {
+		priv->reg_base = base;
+		dev->base_addr = mem[idx];
+		priv->read_reg = cc770_isa_mem_read_reg;
+		priv->write_reg = cc770_isa_mem_write_reg;
+	} else {
+		priv->reg_base = (void __iomem *)port[idx];
+		dev->base_addr = port[idx];
+
+		if (iosize == CC770_IOSIZE_INDIRECT) {
+			priv->read_reg = cc770_isa_port_read_reg_indirect;
+			priv->write_reg = cc770_isa_port_write_reg_indirect;
+		} else {
+			priv->read_reg = cc770_isa_port_read_reg;
+			priv->write_reg = cc770_isa_port_write_reg;
+		}
+	}
+
+	if (clk[idx])
+		clktmp = clk[idx];
+	else if (clk[0])
+		clktmp = clk[0];
+	else
+		clktmp = CLK_DEFAULT;
+	priv->can.clock.freq = clktmp;
+
+	if (cir[idx] != 0xff) {
+		priv->cpu_interface = cir[idx];
+	} else if (cir[0] != 0xff) {
+		priv->cpu_interface = cir[0];
+	} else {
+		/* The system clock may not exceed 10 MHz */
+		if (clktmp > 10000000) {
+			priv->cpu_interface |= CPUIF_DSC;
+			clktmp /= 2;
+		}
+		/* The memory clock may not exceed 8 MHz */
+		if (clktmp > 8000000)
+			priv->cpu_interface |= CPUIF_DMC;
+	}
+
+	if (priv->cpu_interface & CPUIF_DSC)
+		priv->can.clock.freq /= 2;
+
+	if (bcr[idx] != 0xff)
+		priv->bus_config = bcr[idx];
+	else if (bcr[0] != 0xff)
+		priv->bus_config = bcr[0];
+	else
+		priv->bus_config = BCR_DEFAULT;
+
+	if (cor[idx] != 0xff)
+		priv->clkout = cor[idx];
+	else if (cor[0] != 0xff)
+		priv->clkout = cor[0];
+	else
+		priv->clkout = COR_DEFAULT;
+
+	dev_set_drvdata(&pdev->dev, dev);
+	SET_NETDEV_DEV(dev, &pdev->dev);
+
+	err = register_cc770dev(dev);
+	if (err) {
+		dev_err(&pdev->dev,
+			"couldn't register device (err=%d)\n", err);
+		goto exit_unmap;
+	}
+
+	dev_info(&pdev->dev, "device registered (reg_base=0x%p, irq=%d)\n",
+		 priv->reg_base, dev->irq);
+	return 0;
+
+ exit_unmap:
+	if (mem[idx])
+		iounmap(base);
+ exit_release:
+	if (mem[idx])
+		release_mem_region(mem[idx], iosize);
+	else
+		release_region(port[idx], iosize);
+ exit:
+	return err;
+}
+
+static int __devexit cc770_isa_remove(struct platform_device *pdev)
+{
+	struct net_device *dev = dev_get_drvdata(&pdev->dev);
+	struct cc770_priv *priv = netdev_priv(dev);
+	int idx = pdev->id;
+
+	unregister_cc770dev(dev);
+	dev_set_drvdata(&pdev->dev, NULL);
+
+	if (mem[idx]) {
+		iounmap(priv->reg_base);
+		release_mem_region(mem[idx], CC770_IOSIZE);
+	} else {
+		if (priv->read_reg == cc770_isa_port_read_reg_indirect)
+			release_region(port[idx], CC770_IOSIZE_INDIRECT);
+		else
+			release_region(port[idx], CC770_IOSIZE);
+	}
+	free_cc770dev(dev);
+
+	return 0;
+}
+
+static struct platform_driver cc770_isa_driver = {
+	.probe = cc770_isa_probe,
+	.remove = __devexit_p(cc770_isa_remove),
+	.driver = {
+		.name = KBUILD_MODNAME,
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init cc770_isa_init(void)
+{
+	int idx, err;
+
+	for (idx = 0; idx < ARRAY_SIZE(cc770_isa_devs); idx++) {
+		if ((port[idx] || mem[idx]) && irq[idx]) {
+			cc770_isa_devs[idx] =
+				platform_device_alloc(KBUILD_MODNAME, idx);
+			if (!cc770_isa_devs[idx]) {
+				err = -ENOMEM;
+				goto exit_free_devices;
+			}
+			err = platform_device_add(cc770_isa_devs[idx]);
+			if (err) {
+				platform_device_put(cc770_isa_devs[idx]);
+				goto exit_free_devices;
+			}
+			pr_debug("platform device %d: port=%#lx, mem=%#lx, "
+				 "irq=%d\n",
+				 idx, port[idx], mem[idx], irq[idx]);
+		} else if (idx == 0 || port[idx] || mem[idx]) {
+			pr_err("insufficient parameters supplied\n");
+			err = -EINVAL;
+			goto exit_free_devices;
+		}
+	}
+
+	err = platform_driver_register(&cc770_isa_driver);
+	if (err)
+		goto exit_free_devices;
+
+	pr_info("driver for max. %d devices registered\n", MAXDEV);
+
+	return 0;
+
+exit_free_devices:
+	while (--idx >= 0) {
+		if (cc770_isa_devs[idx])
+			platform_device_unregister(cc770_isa_devs[idx]);
+	}
+
+	return err;
+}
+module_init(cc770_isa_init);
+
+static void __exit cc770_isa_exit(void)
+{
+	int idx;
+
+	platform_driver_unregister(&cc770_isa_driver);
+	for (idx = 0; idx < ARRAY_SIZE(cc770_isa_devs); idx++) {
+		if (cc770_isa_devs[idx])
+			platform_device_unregister(cc770_isa_devs[idx]);
+	}
+}
+module_exit(cc770_isa_exit);
diff --git a/drivers/net/can/cc770/cc770_platform.c b/drivers/net/can/cc770/cc770_platform.c
new file mode 100644
index 0000000..53115ee
--- /dev/null
+++ b/drivers/net/can/cc770/cc770_platform.c
@@ -0,0 +1,272 @@
+/*
+ * Driver for CC770 and AN82527 CAN controllers on the platform bus
+ *
+ * Copyright (C) 2009, 2011 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * If platform data are used you should have similar definitions
+ * in your board-specific code:
+ *
+ *   static struct cc770_platform_data myboard_cc770_pdata = {
+ *           .osc_freq = 16000000,
+ *           .cir = 0x41,
+ *           .cor = 0x20,
+ *           .bcr = 0x40,
+ *   };
+ *
+ * Please see include/linux/can/platform/cc770.h for description of
+ * above fields.
+ *
+ * If the device tree is used, you need a CAN node definition in your
+ * DTS file similar to:
+ *
+ *   can@3,100 {
+ *           compatible = "bosch,cc770";
+ *           reg = <3 0x100 0x80>;
+ *           interrupts = <2 0>;
+ *           interrupt-parent = <&mpic>;
+ *           bosch,external-clock-frequency = <16000000>;
+ *   };
+ *
+ * See "Documentation/devicetree/bindings/net/can/cc770.txt" for further
+ * information.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/platform/cc770.h>
+
+#include "cc770.h"
+
+#define DRV_NAME "cc770_platform"
+
+MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
+MODULE_DESCRIPTION("Socket-CAN driver for CC770 on the platform bus");
+MODULE_LICENSE("GPL v2");
+
+#define CC770_PLATFORM_CAN_CLOCK  16000000
+
+static u8 cc770_platform_read_reg(const struct cc770_priv *priv, int reg)
+{
+	return ioread8(priv->reg_base + reg);
+}
+
+static void cc770_platform_write_reg(const struct cc770_priv *priv, int reg,
+				     u8 val)
+{
+	iowrite8(val, priv->reg_base + reg);
+}
+
+static int __devinit cc770_get_of_node_data(struct platform_device *pdev,
+					    struct cc770_priv *priv)
+{
+	struct device_node *np = pdev->dev.of_node;
+	const u32 *prop;
+	int prop_size;
+	u32 clkext;
+
+	prop = of_get_property(np, "bosch,external-clock-frequency",
+			       &prop_size);
+	if (prop && (prop_size ==  sizeof(u32)))
+		clkext = *prop;
+	else
+		clkext = CC770_PLATFORM_CAN_CLOCK; /* default */
+	priv->can.clock.freq = clkext;
+
+	/* The system clock may not exceed 10 MHz */
+	if (priv->can.clock.freq > 10000000) {
+		priv->cpu_interface |= CPUIF_DSC;
+		priv->can.clock.freq /= 2;
+	}
+
+	/* The memory clock may not exceed 8 MHz */
+	if (priv->can.clock.freq > 8000000)
+		priv->cpu_interface |= CPUIF_DMC;
+
+	if (of_get_property(np, "bosch,divide-memory-clock", NULL))
+		priv->cpu_interface |= CPUIF_DMC;
+	if (of_get_property(np, "bosch,iso-low-speed-mux", NULL))
+		priv->cpu_interface |= CPUIF_MUX;
+
+	if (!of_get_property(np, "bosch,no-comperator-bypass", NULL))
+		priv->bus_config |= BUSCFG_CBY;
+	if (of_get_property(np, "bosch,disconnect-rx0-input", NULL))
+		priv->bus_config |= BUSCFG_DR0;
+	if (of_get_property(np, "bosch,disconnect-rx1-input", NULL))
+		priv->bus_config |= BUSCFG_DR1;
+	if (of_get_property(np, "bosch,disconnect-tx1-output", NULL))
+		priv->bus_config |= BUSCFG_DT1;
+	if (of_get_property(np, "bosch,polarity-dominant", NULL))
+		priv->bus_config |= BUSCFG_POL;
+
+	prop = of_get_property(np, "bosch,clock-out-frequency", &prop_size);
+	if (prop && (prop_size == sizeof(u32)) && *prop > 0) {
+		u32 cdv = clkext / *prop;
+		int slew;
+
+		if (cdv > 0 && cdv < 16) {
+			priv->cpu_interface |= CPUIF_CEN;
+			priv->clkout |= (cdv - 1) & CLKOUT_CD_MASK;
+
+			prop = of_get_property(np, "bosch,slew-rate",
+					       &prop_size);
+			if (prop && (prop_size == sizeof(u32))) {
+				slew = *prop;
+			} else {
+				/* Determine default slew rate */
+				slew = (CLKOUT_SL_MASK >>
+					CLKOUT_SL_SHIFT) -
+					((cdv * clkext - 1) / 8000000);
+				if (slew < 0)
+					slew = 0;
+			}
+			priv->clkout |= (slew << CLKOUT_SL_SHIFT) &
+				CLKOUT_SL_MASK;
+		} else {
+			dev_dbg(&pdev->dev, "invalid clock-out-frequency\n");
+		}
+	}
+
+	return 0;
+}
+
+static int __devinit cc770_get_platform_data(struct platform_device *pdev,
+					     struct cc770_priv *priv)
+{
+
+	struct cc770_platform_data *pdata = pdev->dev.platform_data;
+
+	priv->can.clock.freq = pdata->osc_freq;
+	if (priv->cpu_interface | CPUIF_DSC)
+		priv->can.clock.freq /= 2;
+	priv->clkout = pdata->cor;
+	priv->bus_config = pdata->bcr;
+	priv->cpu_interface = pdata->cir;
+
+	return 0;
+}
+
+static int __devinit cc770_platform_probe(struct platform_device *pdev)
+{
+	struct net_device *dev;
+	struct cc770_priv *priv;
+	struct resource *mem;
+	resource_size_t mem_size;
+	void __iomem *base;
+	int err, irq;
+
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	irq = platform_get_irq(pdev, 0);
+	if (!mem || irq <= 0)
+		return -ENODEV;
+
+	mem_size = resource_size(mem);
+	if (!request_mem_region(mem->start, mem_size, pdev->name))
+		return -EBUSY;
+
+	base = ioremap(mem->start, mem_size);
+	if (!base) {
+		err = -ENOMEM;
+		goto exit_release_mem;
+	}
+
+	dev = alloc_cc770dev(0);
+	if (!dev) {
+		err = -ENOMEM;
+		goto exit_unmap_mem;
+	}
+
+	dev->irq = irq;
+	priv = netdev_priv(dev);
+	priv->read_reg = cc770_platform_read_reg;
+	priv->write_reg = cc770_platform_write_reg;
+	priv->irq_flags = IRQF_SHARED;
+	priv->reg_base = base;
+
+	if (pdev->dev.of_node)
+		err = cc770_get_of_node_data(pdev, priv);
+	else if (pdev->dev.platform_data)
+		err = cc770_get_platform_data(pdev, priv);
+	else
+		err = -ENODEV;
+	if (err)
+		goto exit_free_cc770;
+
+	dev_dbg(&pdev->dev,
+		 "reg_base=0x%p irq=%d clock=%d cpu_interface=0x%02x "
+		 "bus_config=0x%02x clkout=0x%02x\n",
+		 priv->reg_base, dev->irq, priv->can.clock.freq,
+		 priv->cpu_interface, priv->bus_config, priv->clkout);
+
+	dev_set_drvdata(&pdev->dev, dev);
+	SET_NETDEV_DEV(dev, &pdev->dev);
+
+	err = register_cc770dev(dev);
+	if (err) {
+		dev_err(&pdev->dev,
+			"couldn't register CC700 device (err=%d)\n", err);
+		goto exit_free_cc770;
+	}
+
+	return 0;
+
+exit_free_cc770:
+	free_cc770dev(dev);
+exit_unmap_mem:
+	iounmap(base);
+exit_release_mem:
+	release_mem_region(mem->start, mem_size);
+
+	return err;
+}
+
+static int __devexit cc770_platform_remove(struct platform_device *pdev)
+{
+	struct net_device *dev = dev_get_drvdata(&pdev->dev);
+	struct cc770_priv *priv = netdev_priv(dev);
+	struct resource *mem;
+
+	unregister_cc770dev(dev);
+	iounmap(priv->reg_base);
+	free_cc770dev(dev);
+
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	release_mem_region(mem->start, resource_size(mem));
+
+	return 0;
+}
+
+static struct of_device_id __devinitdata cc770_platform_table[] = {
+	{.compatible = "bosch,cc770"}, /* CC770 from Bosch */
+	{.compatible = "intc,82527"},  /* AN82527 from Intel CP */
+	{},
+};
+
+static struct platform_driver cc770_platform_driver = {
+	.driver = {
+		.name = DRV_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = cc770_platform_table,
+	},
+	.probe = cc770_platform_probe,
+	.remove = __devexit_p(cc770_platform_remove),
+};
+
+module_platform_driver(cc770_platform_driver);
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 25695bd..120f1ab 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -454,7 +454,7 @@
 
 	/* New-style flags. */
 	dev->flags = IFF_NOARP;
-	dev->features = NETIF_F_NO_CSUM;
+	dev->features = NETIF_F_HW_CSUM;
 }
 
 struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index e023379..165a4c7 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -1060,20 +1060,7 @@
 	.remove = __devexit_p(flexcan_remove),
 };
 
-static int __init flexcan_init(void)
-{
-	pr_info("%s netdevice driver\n", DRV_NAME);
-	return platform_driver_register(&flexcan_driver);
-}
-
-static void __exit flexcan_exit(void)
-{
-	platform_driver_unregister(&flexcan_driver);
-	pr_info("%s: driver removed\n", DRV_NAME);
-}
-
-module_init(flexcan_init);
-module_exit(flexcan_exit);
+module_platform_driver(flexcan_driver);
 
 MODULE_AUTHOR("Sascha Hauer <kernel@pengutronix.de>, "
 	      "Marc Kleine-Budde <kernel@pengutronix.de>");
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index 32778d5..08c893c 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -1803,20 +1803,9 @@
 	.remove		= __devexit_p(ican3_remove),
 };
 
-static int __init ican3_init(void)
-{
-	return platform_driver_register(&ican3_driver);
-}
-
-static void __exit ican3_exit(void)
-{
-	platform_driver_unregister(&ican3_driver);
-}
+module_platform_driver(ican3_driver);
 
 MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>");
 MODULE_DESCRIPTION("Janz MODULbus VMOD-ICAN3 Driver");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS("platform:janz-ican3");
-
-module_init(ican3_init);
-module_exit(ican3_exit);
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index 5fedc33..5caa572 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -411,17 +411,7 @@
 #endif
 };
 
-static int __init mpc5xxx_can_init(void)
-{
-	return platform_driver_register(&mpc5xxx_can_driver);
-}
-module_init(mpc5xxx_can_init);
-
-static void __exit mpc5xxx_can_exit(void)
-{
-	platform_driver_unregister(&mpc5xxx_can_driver);
-};
-module_exit(mpc5xxx_can_exit);
+module_platform_driver(mpc5xxx_can_driver);
 
 MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
 MODULE_DESCRIPTION("Freescale MPC5xxx CAN driver");
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index ec4a311..1c82dd8 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -581,7 +581,10 @@
 
 	priv->open_time = jiffies;
 
-	clrbits8(&regs->canctl1, MSCAN_LISTEN);
+	if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+		setbits8(&regs->canctl1, MSCAN_LISTEN);
+	else
+		clrbits8(&regs->canctl1, MSCAN_LISTEN);
 
 	ret = mscan_start(dev);
 	if (ret)
@@ -690,7 +693,8 @@
 	priv->can.bittiming_const = &mscan_bittiming_const;
 	priv->can.do_set_bittiming = mscan_do_set_bittiming;
 	priv->can.do_set_mode = mscan_do_set_mode;
-	priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
+	priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
+		CAN_CTRLMODE_LISTENONLY;
 
 	for (i = 0; i < TX_QUEUE_SIZE; i++) {
 		priv->tx_queue[i].id = i;
diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig
index fe9e64d..36e9d59 100644
--- a/drivers/net/can/sja1000/Kconfig
+++ b/drivers/net/can/sja1000/Kconfig
@@ -6,7 +6,6 @@
 
 config CAN_SJA1000_ISA
 	tristate "ISA Bus based legacy SJA1000 driver"
-	depends on ISA
 	---help---
 	  This driver adds legacy support for SJA1000 chips connected to
 	  the ISA bus using I/O port, memory mapped or indirect access.
diff --git a/drivers/net/can/sja1000/sja1000_isa.c b/drivers/net/can/sja1000/sja1000_isa.c
index 496223e..90c5c2d 100644
--- a/drivers/net/can/sja1000/sja1000_isa.c
+++ b/drivers/net/can/sja1000/sja1000_isa.c
@@ -17,7 +17,7 @@
 
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/isa.h>
+#include <linux/platform_device.h>
 #include <linux/interrupt.h>
 #include <linux/netdevice.h>
 #include <linux/delay.h>
@@ -44,9 +44,9 @@
 static unsigned long mem[MAXDEV];
 static int __devinitdata irq[MAXDEV];
 static int __devinitdata clk[MAXDEV];
-static char __devinitdata cdr[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1};
-static char __devinitdata ocr[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1};
-static char __devinitdata indirect[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1};
+static unsigned char __devinitdata cdr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
+static unsigned char __devinitdata ocr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
+static int __devinitdata indirect[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1};
 
 module_param_array(port, ulong, NULL, S_IRUGO);
 MODULE_PARM_DESC(port, "I/O port number");
@@ -54,7 +54,7 @@
 module_param_array(mem, ulong, NULL, S_IRUGO);
 MODULE_PARM_DESC(mem, "I/O memory address");
 
-module_param_array(indirect, byte, NULL, S_IRUGO);
+module_param_array(indirect, int, NULL, S_IRUGO);
 MODULE_PARM_DESC(indirect, "Indirect access via address and data port");
 
 module_param_array(irq, int, NULL, S_IRUGO);
@@ -75,6 +75,8 @@
 #define SJA1000_IOSIZE          0x20
 #define SJA1000_IOSIZE_INDIRECT 0x02
 
+static struct platform_device *sja1000_isa_devs[MAXDEV];
+
 static u8 sja1000_isa_mem_read_reg(const struct sja1000_priv *priv, int reg)
 {
 	return readb(priv->reg_base + reg);
@@ -115,26 +117,18 @@
 	outb(val, base + 1);
 }
 
-static int __devinit sja1000_isa_match(struct device *pdev, unsigned int idx)
-{
-	if (port[idx] || mem[idx]) {
-		if (irq[idx])
-			return 1;
-	} else if (idx)
-		return 0;
-
-	dev_err(pdev, "insufficient parameters supplied\n");
-	return 0;
-}
-
-static int __devinit sja1000_isa_probe(struct device *pdev, unsigned int idx)
+static int __devinit sja1000_isa_probe(struct platform_device *pdev)
 {
 	struct net_device *dev;
 	struct sja1000_priv *priv;
 	void __iomem *base = NULL;
 	int iosize = SJA1000_IOSIZE;
+	int idx = pdev->id;
 	int err;
 
+	dev_dbg(&pdev->dev, "probing idx=%d: port=%#lx, mem=%#lx, irq=%d\n",
+		idx, port[idx], mem[idx], irq[idx]);
+
 	if (mem[idx]) {
 		if (!request_mem_region(mem[idx], iosize, DRV_NAME)) {
 			err = -EBUSY;
@@ -189,31 +183,31 @@
 	else
 		priv->can.clock.freq = CLK_DEFAULT / 2;
 
-	if (ocr[idx] != -1)
-		priv->ocr = ocr[idx] & 0xff;
-	else if (ocr[0] != -1)
-		priv->ocr = ocr[0] & 0xff;
+	if (ocr[idx] != 0xff)
+		priv->ocr = ocr[idx];
+	else if (ocr[0] != 0xff)
+		priv->ocr = ocr[0];
 	else
 		priv->ocr = OCR_DEFAULT;
 
-	if (cdr[idx] != -1)
-		priv->cdr = cdr[idx] & 0xff;
-	else if (cdr[0] != -1)
-		priv->cdr = cdr[0] & 0xff;
+	if (cdr[idx] != 0xff)
+		priv->cdr = cdr[idx];
+	else if (cdr[0] != 0xff)
+		priv->cdr = cdr[0];
 	else
 		priv->cdr = CDR_DEFAULT;
 
-	dev_set_drvdata(pdev, dev);
-	SET_NETDEV_DEV(dev, pdev);
+	dev_set_drvdata(&pdev->dev, dev);
+	SET_NETDEV_DEV(dev, &pdev->dev);
 
 	err = register_sja1000dev(dev);
 	if (err) {
-		dev_err(pdev, "registering %s failed (err=%d)\n",
+		dev_err(&pdev->dev, "registering %s failed (err=%d)\n",
 			DRV_NAME, err);
 		goto exit_unmap;
 	}
 
-	dev_info(pdev, "%s device registered (reg_base=0x%p, irq=%d)\n",
+	dev_info(&pdev->dev, "%s device registered (reg_base=0x%p, irq=%d)\n",
 		 DRV_NAME, priv->reg_base, dev->irq);
 	return 0;
 
@@ -229,13 +223,14 @@
 	return err;
 }
 
-static int __devexit sja1000_isa_remove(struct device *pdev, unsigned int idx)
+static int __devexit sja1000_isa_remove(struct platform_device *pdev)
 {
-	struct net_device *dev = dev_get_drvdata(pdev);
+	struct net_device *dev = dev_get_drvdata(&pdev->dev);
 	struct sja1000_priv *priv = netdev_priv(dev);
+	int idx = pdev->id;
 
 	unregister_sja1000dev(dev);
-	dev_set_drvdata(pdev, NULL);
+	dev_set_drvdata(&pdev->dev, NULL);
 
 	if (mem[idx]) {
 		iounmap(priv->reg_base);
@@ -251,29 +246,70 @@
 	return 0;
 }
 
-static struct isa_driver sja1000_isa_driver = {
-	.match = sja1000_isa_match,
+static struct platform_driver sja1000_isa_driver = {
 	.probe = sja1000_isa_probe,
 	.remove = __devexit_p(sja1000_isa_remove),
 	.driver = {
 		.name = DRV_NAME,
+		.owner = THIS_MODULE,
 	},
 };
 
 static int __init sja1000_isa_init(void)
 {
-	int err = isa_register_driver(&sja1000_isa_driver, MAXDEV);
+	int idx, err;
 
-	if (!err)
-		printk(KERN_INFO
-		       "Legacy %s driver for max. %d devices registered\n",
-		       DRV_NAME, MAXDEV);
+	for (idx = 0; idx < MAXDEV; idx++) {
+		if ((port[idx] || mem[idx]) && irq[idx]) {
+			sja1000_isa_devs[idx] =
+				platform_device_alloc(DRV_NAME, idx);
+			if (!sja1000_isa_devs[idx]) {
+				err = -ENOMEM;
+				goto exit_free_devices;
+			}
+			err = platform_device_add(sja1000_isa_devs[idx]);
+			if (err) {
+				platform_device_put(sja1000_isa_devs[idx]);
+				goto exit_free_devices;
+			}
+			pr_debug("%s: platform device %d: port=%#lx, mem=%#lx, "
+				 "irq=%d\n",
+				 DRV_NAME, idx, port[idx], mem[idx], irq[idx]);
+		} else if (idx == 0 || port[idx] || mem[idx]) {
+				pr_err("%s: insufficient parameters supplied\n",
+				       DRV_NAME);
+				err = -EINVAL;
+				goto exit_free_devices;
+		}
+	}
+
+	err = platform_driver_register(&sja1000_isa_driver);
+	if (err)
+		goto exit_free_devices;
+
+	pr_info("Legacy %s driver for max. %d devices registered\n",
+		DRV_NAME, MAXDEV);
+
+	return 0;
+
+exit_free_devices:
+	while (--idx >= 0) {
+		if (sja1000_isa_devs[idx])
+			platform_device_unregister(sja1000_isa_devs[idx]);
+	}
+
 	return err;
 }
 
 static void __exit sja1000_isa_exit(void)
 {
-	isa_unregister_driver(&sja1000_isa_driver);
+	int idx;
+
+	platform_driver_unregister(&sja1000_isa_driver);
+	for (idx = 0; idx < MAXDEV; idx++) {
+		if (sja1000_isa_devs[idx])
+			platform_device_unregister(sja1000_isa_devs[idx]);
+	}
 }
 
 module_init(sja1000_isa_init);
diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c
index c3dd9d0..f2683eb 100644
--- a/drivers/net/can/sja1000/sja1000_of_platform.c
+++ b/drivers/net/can/sja1000/sja1000_of_platform.c
@@ -220,14 +220,4 @@
 	.remove = __devexit_p(sja1000_ofp_remove),
 };
 
-static int __init sja1000_ofp_init(void)
-{
-	return platform_driver_register(&sja1000_ofp_driver);
-}
-module_init(sja1000_ofp_init);
-
-static void __exit sja1000_ofp_exit(void)
-{
-	return platform_driver_unregister(&sja1000_ofp_driver);
-};
-module_exit(sja1000_ofp_exit);
+module_platform_driver(sja1000_ofp_driver);
diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c
index d9fadc4..4f50145 100644
--- a/drivers/net/can/sja1000/sja1000_platform.c
+++ b/drivers/net/can/sja1000/sja1000_platform.c
@@ -185,15 +185,4 @@
 	},
 };
 
-static int __init sp_init(void)
-{
-	return platform_driver_register(&sp_driver);
-}
-
-static void __exit sp_exit(void)
-{
-	platform_driver_unregister(&sp_driver);
-}
-
-module_init(sp_init);
-module_exit(sp_exit);
+module_platform_driver(sp_driver);
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index a979b00..3f1ebcc 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -387,7 +387,7 @@
 
 	/* New-style flags. */
 	dev->flags		= IFF_NOARP;
-	dev->features           = NETIF_F_NO_CSUM;
+	dev->features           = NETIF_F_HW_CSUM;
 }
 
 /******************************************
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index 09a8b86..a7c77c74 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -874,21 +874,9 @@
 	.remove = __devexit_p(softing_pdev_remove),
 };
 
+module_platform_driver(softing_driver);
+
 MODULE_ALIAS("platform:softing");
-
-static int __init softing_start(void)
-{
-	return platform_driver_register(&softing_driver);
-}
-
-static void __exit softing_stop(void)
-{
-	platform_driver_unregister(&softing_driver);
-}
-
-module_init(softing_start);
-module_exit(softing_stop);
-
 MODULE_DESCRIPTION("Softing DPRAM CAN driver");
 MODULE_AUTHOR("Kurt Van Dijck <kurt.van.dijck@eia.be>");
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 2adc294..df809e3 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -1037,20 +1037,7 @@
 	.resume = ti_hecc_resume,
 };
 
-static int __init ti_hecc_init_driver(void)
-{
-	printk(KERN_INFO DRV_DESC "\n");
-	return platform_driver_register(&ti_hecc_driver);
-}
-
-static void __exit ti_hecc_exit_driver(void)
-{
-	printk(KERN_INFO DRV_DESC " unloaded\n");
-	platform_driver_unregister(&ti_hecc_driver);
-}
-
-module_exit(ti_hecc_exit_driver);
-module_init(ti_hecc_init_driver);
+module_platform_driver(ti_hecc_driver);
 
 MODULE_AUTHOR("Anant Gole <anantgole@ti.com>");
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index a72c7bf..9697c14 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -1115,28 +1115,4 @@
 	.id_table = ems_usb_table,
 };
 
-static int __init ems_usb_init(void)
-{
-	int err;
-
-	printk(KERN_INFO "CPC-USB kernel driver loaded\n");
-
-	/* register this driver with the USB subsystem */
-	err = usb_register(&ems_usb_driver);
-
-	if (err) {
-		err("usb_register failed. Error number %d\n", err);
-		return err;
-	}
-
-	return 0;
-}
-
-static void __exit ems_usb_exit(void)
-{
-	/* deregister this driver with the USB subsystem */
-	usb_deregister(&ems_usb_driver);
-}
-
-module_init(ems_usb_init);
-module_exit(ems_usb_exit);
+module_usb_driver(ems_usb_driver);
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index eb8b0e6..9277463 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -1108,25 +1108,4 @@
 	.id_table = esd_usb2_table,
 };
 
-static int __init esd_usb2_init(void)
-{
-	int err;
-
-	/* register this driver with the USB subsystem */
-	err = usb_register(&esd_usb2_driver);
-
-	if (err) {
-		err("usb_register failed. Error number %d\n", err);
-		return err;
-	}
-
-	return 0;
-}
-module_init(esd_usb2_init);
-
-static void __exit esd_usb2_exit(void)
-{
-	/* deregister this driver with the USB subsystem */
-	usb_deregister(&esd_usb2_driver);
-}
-module_exit(esd_usb2_exit);
+module_usb_driver(esd_usb2_driver);
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index f93e2d6..ea2d942 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -63,7 +63,7 @@
  * See Documentation/networking/can.txt for details.
  */
 
-static int echo; /* echo testing. Default: 0 (Off) */
+static bool echo; /* echo testing. Default: 0 (Off) */
 module_param(echo, bool, S_IRUGO);
 MODULE_PARM_DESC(echo, "Echo sent frames (for testing). Default: 0 (Off)");
 
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
new file mode 100644
index 0000000..dd151d5
--- /dev/null
+++ b/drivers/net/dsa/Kconfig
@@ -0,0 +1,36 @@
+menu "Distributed Switch Architecture drivers"
+	depends on NET_DSA
+
+config NET_DSA_MV88E6XXX
+	tristate
+	default n
+
+config NET_DSA_MV88E6060
+	tristate "Marvell 88E6060 ethernet switch chip support"
+	select NET_DSA_TAG_TRAILER
+	---help---
+	  This enables support for the Marvell 88E6060 ethernet switch
+	  chip.
+
+config NET_DSA_MV88E6XXX_NEED_PPU
+	bool
+	default n
+
+config NET_DSA_MV88E6131
+	tristate "Marvell 88E6085/6095/6095F/6131 ethernet switch chip support"
+	select NET_DSA_MV88E6XXX
+	select NET_DSA_MV88E6XXX_NEED_PPU
+	select NET_DSA_TAG_DSA
+	---help---
+	  This enables support for the Marvell 88E6085/6095/6095F/6131
+	  ethernet switch chips.
+
+config NET_DSA_MV88E6123_61_65
+	tristate "Marvell 88E6123/6161/6165 ethernet switch chip support"
+	select NET_DSA_MV88E6XXX
+	select NET_DSA_TAG_EDSA
+	---help---
+	  This enables support for the Marvell 88E6123/6161/6165
+	  ethernet switch chips.
+
+endmenu
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
new file mode 100644
index 0000000..f3bda05
--- /dev/null
+++ b/drivers/net/dsa/Makefile
@@ -0,0 +1,9 @@
+obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o
+obj-$(CONFIG_NET_DSA_MV88E6XXX) += mv88e6xxx_drv.o
+mv88e6xxx_drv-y += mv88e6xxx.o
+ifdef CONFIG_NET_DSA_MV88E6123_61_65
+mv88e6xxx_drv-y += mv88e6123_61_65.o
+endif
+ifdef CONFIG_NET_DSA_MV88E6131
+mv88e6xxx_drv-y += mv88e6131.o
+endif
diff --git a/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
similarity index 96%
rename from net/dsa/mv88e6060.c
rename to drivers/net/dsa/mv88e6060.c
index 8f4ff5a..7fc4e81 100644
--- a/net/dsa/mv88e6060.c
+++ b/drivers/net/dsa/mv88e6060.c
@@ -11,7 +11,7 @@
 #include <linux/list.h>
 #include <linux/netdevice.h>
 #include <linux/phy.h>
-#include "dsa_priv.h"
+#include <net/dsa.h>
 
 #define REG_PORT(p)		(8 + (p))
 #define REG_GLOBAL		0x0f
@@ -286,3 +286,8 @@
 	unregister_switch_driver(&mv88e6060_switch_driver);
 }
 module_exit(mv88e6060_cleanup);
+
+MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
+MODULE_DESCRIPTION("Driver for Marvell 88E6060 ethernet switch chip");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mv88e6060");
diff --git a/net/dsa/mv88e6123_61_65.c b/drivers/net/dsa/mv88e6123_61_65.c
similarity index 96%
rename from net/dsa/mv88e6123_61_65.c
rename to drivers/net/dsa/mv88e6123_61_65.c
index 52faaa2..c0a458f 100644
--- a/net/dsa/mv88e6123_61_65.c
+++ b/drivers/net/dsa/mv88e6123_61_65.c
@@ -11,7 +11,7 @@
 #include <linux/list.h>
 #include <linux/netdevice.h>
 #include <linux/phy.h>
-#include "dsa_priv.h"
+#include <net/dsa.h>
 #include "mv88e6xxx.h"
 
 static char *mv88e6123_61_65_probe(struct mii_bus *bus, int sw_addr)
@@ -419,7 +419,7 @@
 	return ARRAY_SIZE(mv88e6123_61_65_hw_stats);
 }
 
-static struct dsa_switch_driver mv88e6123_61_65_switch_driver = {
+struct dsa_switch_driver mv88e6123_61_65_switch_driver = {
 	.tag_protocol		= cpu_to_be16(ETH_P_EDSA),
 	.priv_size		= sizeof(struct mv88e6xxx_priv_state),
 	.probe			= mv88e6123_61_65_probe,
@@ -433,15 +433,6 @@
 	.get_sset_count		= mv88e6123_61_65_get_sset_count,
 };
 
-static int __init mv88e6123_61_65_init(void)
-{
-	register_switch_driver(&mv88e6123_61_65_switch_driver);
-	return 0;
-}
-module_init(mv88e6123_61_65_init);
-
-static void __exit mv88e6123_61_65_cleanup(void)
-{
-	unregister_switch_driver(&mv88e6123_61_65_switch_driver);
-}
-module_exit(mv88e6123_61_65_cleanup);
+MODULE_ALIAS("platform:mv88e6123");
+MODULE_ALIAS("platform:mv88e6161");
+MODULE_ALIAS("platform:mv88e6165");
diff --git a/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c
similarity index 96%
rename from net/dsa/mv88e6131.c
rename to drivers/net/dsa/mv88e6131.c
index 9bd1061..e0eb6824 100644
--- a/net/dsa/mv88e6131.c
+++ b/drivers/net/dsa/mv88e6131.c
@@ -11,7 +11,7 @@
 #include <linux/list.h>
 #include <linux/netdevice.h>
 #include <linux/phy.h>
-#include "dsa_priv.h"
+#include <net/dsa.h>
 #include "mv88e6xxx.h"
 
 /*
@@ -415,7 +415,7 @@
 	return ARRAY_SIZE(mv88e6131_hw_stats);
 }
 
-static struct dsa_switch_driver mv88e6131_switch_driver = {
+struct dsa_switch_driver mv88e6131_switch_driver = {
 	.tag_protocol		= cpu_to_be16(ETH_P_DSA),
 	.priv_size		= sizeof(struct mv88e6xxx_priv_state),
 	.probe			= mv88e6131_probe,
@@ -429,15 +429,7 @@
 	.get_sset_count		= mv88e6131_get_sset_count,
 };
 
-static int __init mv88e6131_init(void)
-{
-	register_switch_driver(&mv88e6131_switch_driver);
-	return 0;
-}
-module_init(mv88e6131_init);
-
-static void __exit mv88e6131_cleanup(void)
-{
-	unregister_switch_driver(&mv88e6131_switch_driver);
-}
-module_exit(mv88e6131_cleanup);
+MODULE_ALIAS("platform:mv88e6085");
+MODULE_ALIAS("platform:mv88e6095");
+MODULE_ALIAS("platform:mv88e6095f");
+MODULE_ALIAS("platform:mv88e6131");
diff --git a/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
similarity index 93%
rename from net/dsa/mv88e6xxx.c
rename to drivers/net/dsa/mv88e6xxx.c
index efe661a..5467c04 100644
--- a/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -11,7 +11,7 @@
 #include <linux/list.h>
 #include <linux/netdevice.h>
 #include <linux/phy.h>
-#include "dsa_priv.h"
+#include <net/dsa.h>
 #include "mv88e6xxx.h"
 
 /*
@@ -520,3 +520,30 @@
 
 	mutex_unlock(&ps->stats_mutex);
 }
+
+static int __init mv88e6xxx_init(void)
+{
+#if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
+	register_switch_driver(&mv88e6131_switch_driver);
+#endif
+#if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65)
+	register_switch_driver(&mv88e6123_61_65_switch_driver);
+#endif
+	return 0;
+}
+module_init(mv88e6xxx_init);
+
+static void __exit mv88e6xxx_cleanup(void)
+{
+#if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65)
+	unregister_switch_driver(&mv88e6123_61_65_switch_driver);
+#endif
+#if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
+	unregister_switch_driver(&mv88e6131_switch_driver);
+#endif
+}
+module_exit(mv88e6xxx_cleanup);
+
+MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
+MODULE_DESCRIPTION("Driver for Marvell 88E6XXX ethernet switch chips");
+MODULE_LICENSE("GPL");
diff --git a/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h
similarity index 95%
rename from net/dsa/mv88e6xxx.h
rename to drivers/net/dsa/mv88e6xxx.h
index 61156ca2..fc2cd7b 100644
--- a/net/dsa/mv88e6xxx.h
+++ b/drivers/net/dsa/mv88e6xxx.h
@@ -71,6 +71,9 @@
 				 int nr_stats, struct mv88e6xxx_hw_stat *stats,
 				 int port, uint64_t *data);
 
+extern struct dsa_switch_driver mv88e6131_switch_driver;
+extern struct dsa_switch_driver mv88e6123_61_65_switch_driver;
+
 #define REG_READ(addr, reg)						\
 	({								\
 		int __ret;						\
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index a7c5e88..087648e 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -134,7 +134,7 @@
 	dev->flags |= IFF_NOARP;
 	dev->flags &= ~IFF_MULTICAST;
 	dev->features	|= NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO;
-	dev->features	|= NETIF_F_NO_CSUM | NETIF_F_HIGHDMA | NETIF_F_LLTX;
+	dev->features	|= NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_LLTX;
 	random_ether_addr(dev->dev_addr);
 }
 
diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c
index 972f80e..da410f0 100644
--- a/drivers/net/ethernet/3com/3c589_cs.c
+++ b/drivers/net/ethernet/3com/3c589_cs.c
@@ -468,9 +468,10 @@
 static void netdev_get_drvinfo(struct net_device *dev,
 			       struct ethtool_drvinfo *info)
 {
-	strcpy(info->driver, DRV_NAME);
-	strcpy(info->version, DRV_VERSION);
-	sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr);
+	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+	snprintf(info->bus_info, sizeof(info->bus_info),
+		"PCMCIA 0x%lx", dev->base_addr);
 }
 
 static const struct ethtool_ops netdev_ethtool_ops = {
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index b42c06b..8153a3e 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -2929,15 +2929,17 @@
 {
 	struct vortex_private *vp = netdev_priv(dev);
 
-	strcpy(info->driver, DRV_NAME);
+	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
 	if (VORTEX_PCI(vp)) {
-		strcpy(info->bus_info, pci_name(VORTEX_PCI(vp)));
+		strlcpy(info->bus_info, pci_name(VORTEX_PCI(vp)),
+			sizeof(info->bus_info));
 	} else {
 		if (VORTEX_EISA(vp))
-			strcpy(info->bus_info, dev_name(vp->gendev));
+			strlcpy(info->bus_info, dev_name(vp->gendev),
+				sizeof(info->bus_info));
 		else
-			sprintf(info->bus_info, "EISA 0x%lx %d",
-					dev->base_addr, dev->irq);
+			snprintf(info->bus_info, sizeof(info->bus_info),
+				"EISA 0x%lx %d", dev->base_addr, dev->irq);
 	}
 }
 
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
index 20ea075..6d6bc75 100644
--- a/drivers/net/ethernet/3com/typhoon.c
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -988,21 +988,23 @@
 
 	smp_rmb();
 	if(tp->card_state == Sleeping) {
-		strcpy(info->fw_version, "Sleep image");
+		strlcpy(info->fw_version, "Sleep image",
+			sizeof(info->fw_version));
 	} else {
 		INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
 		if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
-			strcpy(info->fw_version, "Unknown runtime");
+			strlcpy(info->fw_version, "Unknown runtime",
+				sizeof(info->fw_version));
 		} else {
 			u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
-			snprintf(info->fw_version, 32, "%02x.%03x.%03x",
-				 sleep_ver >> 24, (sleep_ver >> 12) & 0xfff,
-				 sleep_ver & 0xfff);
+			snprintf(info->fw_version, sizeof(info->fw_version),
+				"%02x.%03x.%03x", sleep_ver >> 24,
+				(sleep_ver >> 12) & 0xfff, sleep_ver & 0xfff);
 		}
 	}
 
-	strcpy(info->driver, KBUILD_MODNAME);
-	strcpy(info->bus_info, pci_name(pci_dev));
+	strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+	strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
 }
 
 static int
diff --git a/drivers/net/ethernet/8390/8390.h b/drivers/net/ethernet/8390/8390.h
index 58a12e4..ef325ff 100644
--- a/drivers/net/ethernet/8390/8390.h
+++ b/drivers/net/ethernet/8390/8390.h
@@ -14,8 +14,6 @@
 
 #define TX_PAGES 12	/* Two Tx slots */
 
-#define ETHER_ADDR_LEN 6
-
 /* The 8390 specific per-packet-header format. */
 struct e8390_pkt_hdr {
   unsigned char status; /* status */
diff --git a/drivers/net/ethernet/8390/apne.c b/drivers/net/ethernet/8390/apne.c
index 5477373..3ad5d2f 100644
--- a/drivers/net/ethernet/8390/apne.c
+++ b/drivers/net/ethernet/8390/apne.c
@@ -318,7 +318,7 @@
     i = request_irq(dev->irq, apne_interrupt, IRQF_SHARED, DRV_NAME, dev);
     if (i) return i;
 
-    for(i = 0; i < ETHER_ADDR_LEN; i++)
+    for (i = 0; i < ETH_ALEN; i++)
 	dev->dev_addr[i] = SA_prom[i];
 
     printk(" %pM\n", dev->dev_addr);
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index e9f8432..9e8ba4f 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -735,15 +735,14 @@
 	if (ax->plat->flags & AXFLG_MAC_FROMDEV) {
 		ei_outb(E8390_NODMA + E8390_PAGE1 + E8390_STOP,
 			ei_local->mem + E8390_CMD); /* 0x61 */
-		for (i = 0; i < ETHER_ADDR_LEN; i++)
+		for (i = 0; i < ETH_ALEN; i++)
 			dev->dev_addr[i] =
 				ei_inb(ioaddr + EN1_PHYS_SHIFT(i));
 	}
 
 	if ((ax->plat->flags & AXFLG_MAC_FROMPLATFORM) &&
 	    ax->plat->mac_addr)
-		memcpy(dev->dev_addr, ax->plat->mac_addr,
-		       ETHER_ADDR_LEN);
+		memcpy(dev->dev_addr, ax->plat->mac_addr, ETH_ALEN);
 
 	ax_reset_8390(dev);
 
@@ -991,18 +990,7 @@
 	.resume		= ax_resume,
 };
 
-static int __init axdrv_init(void)
-{
-	return platform_driver_register(&axdrv);
-}
-
-static void __exit axdrv_exit(void)
-{
-	platform_driver_unregister(&axdrv);
-}
-
-module_init(axdrv_init);
-module_exit(axdrv_exit);
+module_platform_driver(axdrv);
 
 MODULE_DESCRIPTION("AX88796 10/100 Ethernet platform driver");
 MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>");
diff --git a/drivers/net/ethernet/8390/es3210.c b/drivers/net/ethernet/8390/es3210.c
index 7a09575..6428f9e 100644
--- a/drivers/net/ethernet/8390/es3210.c
+++ b/drivers/net/ethernet/8390/es3210.c
@@ -195,7 +195,7 @@
 		goto out;
 	}
 
-	for (i = 0; i < ETHER_ADDR_LEN ; i++)
+	for (i = 0; i < ETH_ALEN ; i++)
 		dev->dev_addr[i] = inb(ioaddr + ES_SA_PROM + i);
 
 /*	Check the Racal vendor ID as well. */
diff --git a/drivers/net/ethernet/8390/hp-plus.c b/drivers/net/ethernet/8390/hp-plus.c
index eeac843..d42938b 100644
--- a/drivers/net/ethernet/8390/hp-plus.c
+++ b/drivers/net/ethernet/8390/hp-plus.c
@@ -202,7 +202,7 @@
 	/* Retrieve and checksum the station address. */
 	outw(MAC_Page, ioaddr + HP_PAGING);
 
-	for(i = 0; i < ETHER_ADDR_LEN; i++) {
+	for(i = 0; i < ETH_ALEN; i++) {
 		unsigned char inval = inb(ioaddr + 8 + i);
 		dev->dev_addr[i] = inval;
 		checksum += inval;
diff --git a/drivers/net/ethernet/8390/hp.c b/drivers/net/ethernet/8390/hp.c
index 18564d4..113f1e0 100644
--- a/drivers/net/ethernet/8390/hp.c
+++ b/drivers/net/ethernet/8390/hp.c
@@ -156,7 +156,7 @@
 
 	printk("%s: %s (ID %02x) at %#3x,", dev->name, name, board_id, ioaddr);
 
-	for(i = 0; i < ETHER_ADDR_LEN; i++)
+	for(i = 0; i < ETH_ALEN; i++)
 		dev->dev_addr[i] = inb(ioaddr + i);
 
 	printk(" %pM", dev->dev_addr);
diff --git a/drivers/net/ethernet/8390/hydra.c b/drivers/net/ethernet/8390/hydra.c
index 3dac937..5370c88 100644
--- a/drivers/net/ethernet/8390/hydra.c
+++ b/drivers/net/ethernet/8390/hydra.c
@@ -129,7 +129,7 @@
     if (!dev)
 	return -ENOMEM;
 
-    for(j = 0; j < ETHER_ADDR_LEN; j++)
+    for (j = 0; j < ETH_ALEN; j++)
 	dev->dev_addr[j] = *((u8 *)(board + HYDRA_ADDRPROM + 2*j));
 
     /* We must set the 8390 for word mode. */
diff --git a/drivers/net/ethernet/8390/lne390.c b/drivers/net/ethernet/8390/lne390.c
index f9888d2..69490ae 100644
--- a/drivers/net/ethernet/8390/lne390.c
+++ b/drivers/net/ethernet/8390/lne390.c
@@ -191,14 +191,14 @@
 		|| inb(ioaddr + LNE390_SA_PROM + 1) != LNE390_ADDR1
 		|| inb(ioaddr + LNE390_SA_PROM + 2) != LNE390_ADDR2 ) {
 		printk("lne390.c: card not found");
-		for(i = 0; i < ETHER_ADDR_LEN; i++)
+		for (i = 0; i < ETH_ALEN; i++)
 			printk(" %02x", inb(ioaddr + LNE390_SA_PROM + i));
 		printk(" (invalid prefix).\n");
 		return -ENODEV;
 	}
 #endif
 
-	for(i = 0; i < ETHER_ADDR_LEN; i++)
+	for (i = 0; i < ETH_ALEN; i++)
 		dev->dev_addr[i] = inb(ioaddr + LNE390_SA_PROM + i);
 	printk("lne390.c: LNE390%X in EISA slot %d, address %pM.\n",
 	       0xa+revision, ioaddr/0x1000, dev->dev_addr);
diff --git a/drivers/net/ethernet/8390/ne-h8300.c b/drivers/net/ethernet/8390/ne-h8300.c
index cd36a6a..9b9c77d 100644
--- a/drivers/net/ethernet/8390/ne-h8300.c
+++ b/drivers/net/ethernet/8390/ne-h8300.c
@@ -312,7 +312,7 @@
 
 	dev->base_addr = ioaddr;
 
-	for(i = 0; i < ETHER_ADDR_LEN; i++)
+	for (i = 0; i < ETH_ALEN; i++)
 		dev->dev_addr[i] = SA_prom[i];
 	printk(" %pM\n", dev->dev_addr);
 
diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c
index 1063093..f92ea2a 100644
--- a/drivers/net/ethernet/8390/ne.c
+++ b/drivers/net/ethernet/8390/ne.c
@@ -503,12 +503,12 @@
 #ifdef CONFIG_PLAT_MAPPI
 	outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP,
 		ioaddr + E8390_CMD); /* 0x61 */
-	for (i = 0 ; i < ETHER_ADDR_LEN ; i++) {
+	for (i = 0; i < ETH_ALEN; i++) {
 		dev->dev_addr[i] = SA_prom[i]
 			= inb_p(ioaddr + EN1_PHYS_SHIFT(i));
 	}
 #else
-	for(i = 0; i < ETHER_ADDR_LEN; i++) {
+	for (i = 0; i < ETH_ALEN; i++) {
 		dev->dev_addr[i] = SA_prom[i];
 	}
 #endif
diff --git a/drivers/net/ethernet/8390/ne2.c b/drivers/net/ethernet/8390/ne2.c
index 70cdc69..922b320 100644
--- a/drivers/net/ethernet/8390/ne2.c
+++ b/drivers/net/ethernet/8390/ne2.c
@@ -460,7 +460,7 @@
 
 	dev->base_addr = base_addr;
 
-	for(i = 0; i < ETHER_ADDR_LEN; i++)
+	for (i = 0; i < ETH_ALEN; i++)
 		dev->dev_addr[i] = SA_prom[i];
 
 	printk(" %pM\n", dev->dev_addr);
diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c
index 3992342..3fab04a 100644
--- a/drivers/net/ethernet/8390/ne2k-pci.c
+++ b/drivers/net/ethernet/8390/ne2k-pci.c
@@ -639,9 +639,9 @@
 	struct ei_device *ei = netdev_priv(dev);
 	struct pci_dev *pci_dev = (struct pci_dev *) ei->priv;
 
-	strcpy(info->driver, DRV_NAME);
-	strcpy(info->version, DRV_VERSION);
-	strcpy(info->bus_info, pci_name(pci_dev));
+	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+	strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
 }
 
 static const struct ethtool_ops ne2k_pci_ethtool_ops = {
diff --git a/drivers/net/ethernet/8390/ne3210.c b/drivers/net/ethernet/8390/ne3210.c
index 243ed2a..2a3e805 100644
--- a/drivers/net/ethernet/8390/ne3210.c
+++ b/drivers/net/ethernet/8390/ne3210.c
@@ -125,7 +125,7 @@
 #endif
 
 	port_index = inb(ioaddr + NE3210_CFG2) >> 6;
-	for(i = 0; i < ETHER_ADDR_LEN; i++)
+	for (i = 0; i < ETH_ALEN; i++)
 		dev->dev_addr[i] = inb(ioaddr + NE3210_SA_PROM + i);
 	printk("ne3210.c: NE3210 in EISA slot %d, media: %s, addr: %pM.\n",
 		edev->slot, ifmap[port_index], dev->dev_addr);
diff --git a/drivers/net/ethernet/8390/stnic.c b/drivers/net/ethernet/8390/stnic.c
index d85f0a8..3b90375 100644
--- a/drivers/net/ethernet/8390/stnic.c
+++ b/drivers/net/ethernet/8390/stnic.c
@@ -114,7 +114,7 @@
 #ifdef CONFIG_SH_STANDARD_BIOS
   sh_bios_get_node_addr (stnic_eadr);
 #endif
-  for (i = 0; i < ETHER_ADDR_LEN; i++)
+  for (i = 0; i < ETH_ALEN; i++)
     dev->dev_addr[i] = stnic_eadr[i];
 
   /* Set the base address to point to the NIC, not the "real" base! */
diff --git a/drivers/net/ethernet/8390/zorro8390.c b/drivers/net/ethernet/8390/zorro8390.c
index 3aa9fe9..bcd2732 100644
--- a/drivers/net/ethernet/8390/zorro8390.c
+++ b/drivers/net/ethernet/8390/zorro8390.c
@@ -365,7 +365,7 @@
 	if (i)
 		return i;
 
-	for (i = 0; i < ETHER_ADDR_LEN; i++)
+	for (i = 0; i < ETH_ALEN; i++)
 		dev->dev_addr[i] = SA_prom[i];
 
 	pr_debug("Found ethernet address: %pM\n", dev->dev_addr);
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 597f4d45..3474a61 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -28,6 +28,7 @@
 source "drivers/net/ethernet/adi/Kconfig"
 source "drivers/net/ethernet/broadcom/Kconfig"
 source "drivers/net/ethernet/brocade/Kconfig"
+source "drivers/net/ethernet/calxeda/Kconfig"
 source "drivers/net/ethernet/chelsio/Kconfig"
 source "drivers/net/ethernet/cirrus/Kconfig"
 source "drivers/net/ethernet/cisco/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index be5dde0..cd6d69a 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -14,6 +14,7 @@
 obj-$(CONFIG_NET_BFIN) += adi/
 obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/
 obj-$(CONFIG_NET_VENDOR_BROCADE) += brocade/
+obj-$(CONFIG_NET_CALXEDA_XGMAC) += calxeda/
 obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/
 obj-$(CONFIG_NET_VENDOR_CIRRUS) += cirrus/
 obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index 6d9f691..cb4f38a 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -607,7 +607,7 @@
 
 
 #ifdef VLAN_SUPPORT
-static void netdev_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+static int netdev_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
 {
 	struct netdev_private *np = netdev_priv(dev);
 
@@ -617,9 +617,11 @@
 	set_bit(vid, np->active_vlans);
 	set_rx_mode(dev);
 	spin_unlock(&np->lock);
+
+	return 0;
 }
 
-static void netdev_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+static int netdev_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
 {
 	struct netdev_private *np = netdev_priv(dev);
 
@@ -629,6 +631,8 @@
 	clear_bit(vid, np->active_vlans);
 	set_rx_mode(dev);
 	spin_unlock(&np->lock);
+
+	return 0;
 }
 #endif /* VLAN_SUPPORT */
 
@@ -1842,9 +1846,9 @@
 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 {
 	struct netdev_private *np = netdev_priv(dev);
-	strcpy(info->driver, DRV_NAME);
-	strcpy(info->version, DRV_VERSION);
-	strcpy(info->bus_info, pci_name(np->pci_dev));
+	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+	strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
 }
 
 static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 442fefa..c885aa9 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1623,18 +1623,7 @@
 	.remove = __devexit_p(greth_of_remove),
 };
 
-static int __init greth_init(void)
-{
-	return platform_driver_register(&greth_of_driver);
-}
-
-static void __exit greth_cleanup(void)
-{
-	platform_driver_unregister(&greth_of_driver);
-}
-
-module_init(greth_init);
-module_exit(greth_cleanup);
+module_platform_driver(greth_of_driver);
 
 MODULE_AUTHOR("Aeroflex Gaisler AB.");
 MODULE_DESCRIPTION("Aeroflex Gaisler Ethernet MAC driver");
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index a9745f4..33e0a8c 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -499,7 +499,7 @@
 	writel( VAL0 | APAD_XMT | REX_RTRY, mmio + CMD2 );
 
 	/* Setting the MAC address to the device */
-	for(i = 0; i < ETH_ADDR_LEN; i++)
+	for (i = 0; i < ETH_ALEN; i++)
 		writeb( dev->dev_addr[i], mmio + PADR + i );
 
 	/* Enable interrupt coalesce */
@@ -1412,10 +1412,11 @@
 {
 	struct amd8111e_priv *lp = netdev_priv(dev);
 	struct pci_dev *pci_dev = lp->pci_dev;
-	strcpy (info->driver, MODULE_NAME);
-	strcpy (info->version, MODULE_VERS);
-	sprintf(info->fw_version,"%u",chip_version);
-	strcpy (info->bus_info, pci_name(pci_dev));
+	strlcpy(info->driver, MODULE_NAME, sizeof(info->driver));
+	strlcpy(info->version, MODULE_VERS, sizeof(info->version));
+	snprintf(info->fw_version, sizeof(info->fw_version),
+		"%u", chip_version);
+	strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
 }
 
 static int amd8111e_get_regs_len(struct net_device *dev)
@@ -1549,7 +1550,7 @@
 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
 	spin_lock_irq(&lp->lock);
 	/* Setting the MAC address to the device */
-	for(i = 0; i < ETH_ADDR_LEN; i++)
+	for (i = 0; i < ETH_ALEN; i++)
 		writeb( dev->dev_addr[i], lp->mmio + PADR + i );
 
 	spin_unlock_irq(&lp->lock);
@@ -1885,7 +1886,7 @@
 	}
 
 	/* Initializing MAC address */
-	for(i = 0; i < ETH_ADDR_LEN; i++)
+	for (i = 0; i < ETH_ALEN; i++)
 		dev->dev_addr[i] = readb(lp->mmio + PADR + i);
 
 	/* Setting user defined parametrs */
diff --git a/drivers/net/ethernet/amd/amd8111e.h b/drivers/net/ethernet/amd/amd8111e.h
index 2ff2e7a..8baa352 100644
--- a/drivers/net/ethernet/amd/amd8111e.h
+++ b/drivers/net/ethernet/amd/amd8111e.h
@@ -586,7 +586,6 @@
 
 #define PKT_BUFF_SZ			1536
 #define MIN_PKT_LEN			60
-#define ETH_ADDR_LEN			6
 
 #define  AMD8111E_TX_TIMEOUT		(3 * HZ)/* 3 sec */
 #define SOFT_TIMER_FREQ 		0xBEBC  /* 0.5 sec */
@@ -808,8 +807,8 @@
 
 static int card_idx;
 static int speed_duplex[MAX_UNITS] = { 0, };
-static int coalesce[MAX_UNITS] = {1,1,1,1,1,1,1,1};
-static int dynamic_ipg[MAX_UNITS] = {0,0,0,0,0,0,0,0};
+static bool coalesce[MAX_UNITS] = { [ 0 ... MAX_UNITS-1] = true };
+static bool dynamic_ipg[MAX_UNITS] = { [ 0 ... MAX_UNITS-1] = false };
 static unsigned int chip_version;
 
 #endif /* _AMD8111E_H */
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index 4865ff1..cc9262b 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -1339,18 +1339,7 @@
 		.owner  = THIS_MODULE,
 	},
 };
+
+module_platform_driver(au1000_eth_driver);
+
 MODULE_ALIAS("platform:au1000-eth");
-
-
-static int __init au1000_init_module(void)
-{
-	return platform_driver_register(&au1000_eth_driver);
-}
-
-static void __exit au1000_exit_module(void)
-{
-	platform_driver_unregister(&au1000_eth_driver);
-}
-
-module_init(au1000_init_module);
-module_exit(au1000_exit_module);
diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c
index 3accd5d..6be0dd6 100644
--- a/drivers/net/ethernet/amd/nmclan_cs.c
+++ b/drivers/net/ethernet/amd/nmclan_cs.c
@@ -160,8 +160,6 @@
 Defines
 ---------------------------------------------------------------------------- */
 
-#define ETHER_ADDR_LEN			ETH_ALEN
-					/* 6 bytes in an Ethernet Address */
 #define MACE_LADRF_LEN			8
 					/* 8 bytes in Logical Address Filter */
 
@@ -600,7 +598,7 @@
   	}
   }
   /* Set PADR register */
-  for (i = 0; i < ETHER_ADDR_LEN; i++)
+  for (i = 0; i < ETH_ALEN; i++)
     mace_write(lp, ioaddr, MACE_PADR, enet_addr[i]);
 
   /* MAC Configuration Control Register should be written last */
@@ -639,11 +637,11 @@
 
   /* Read the ethernet address from the CIS. */
   len = pcmcia_get_tuple(link, 0x80, &buf);
-  if (!buf || len < ETHER_ADDR_LEN) {
+  if (!buf || len < ETH_ALEN) {
 	  kfree(buf);
 	  goto failed;
   }
-  memcpy(dev->dev_addr, buf, ETHER_ADDR_LEN);
+  memcpy(dev->dev_addr, buf, ETH_ALEN);
   kfree(buf);
 
   /* Verify configuration by reading the MACE ID. */
@@ -822,9 +820,10 @@
 static void netdev_get_drvinfo(struct net_device *dev,
 			       struct ethtool_drvinfo *info)
 {
-	strcpy(info->driver, DRV_NAME);
-	strcpy(info->version, DRV_VERSION);
-	sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr);
+	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+	snprintf(info->bus_info, sizeof(info->bus_info),
+		"PCMCIA 0x%lx", dev->base_addr);
 }
 
 static const struct ethtool_ops netdev_ethtool_ops = {
@@ -1420,7 +1419,7 @@
 static void set_multicast_list(struct net_device *dev)
 {
   mace_private *lp = netdev_priv(dev);
-  int adr[ETHER_ADDR_LEN] = {0}; /* Ethernet address */
+  int adr[ETH_ALEN] = {0}; /* Ethernet address */
   struct netdev_hw_addr *ha;
 
 #ifdef PCMCIA_DEBUG
@@ -1442,7 +1441,7 @@
     /* Calculate multicast logical address filter */
     memset(lp->multicast_ladrf, 0, MACE_LADRF_LEN);
     netdev_for_each_mc_addr(ha, dev) {
-      memcpy(adr, ha->addr, ETHER_ADDR_LEN);
+      memcpy(adr, ha->addr, ETH_ALEN);
       BuildLAF(lp->multicast_ladrf, adr);
     }
   }
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index f92bc6e..20e6dab 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -711,12 +711,14 @@
 {
 	struct pcnet32_private *lp = netdev_priv(dev);
 
-	strcpy(info->driver, DRV_NAME);
-	strcpy(info->version, DRV_VERSION);
+	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
 	if (lp->pci_dev)
-		strcpy(info->bus_info, pci_name(lp->pci_dev));
+		strlcpy(info->bus_info, pci_name(lp->pci_dev),
+			sizeof(info->bus_info));
 	else
-		sprintf(info->bus_info, "VLB 0x%lx", dev->base_addr);
+		snprintf(info->bus_info, sizeof(info->bus_info),
+			"VLB 0x%lx", dev->base_addr);
 }
 
 static u32 pcnet32_get_link(struct net_device *dev)
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
index 8fda457..7ea16d3 100644
--- a/drivers/net/ethernet/amd/sunlance.c
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -1540,17 +1540,4 @@
 	.remove		= __devexit_p(sunlance_sbus_remove),
 };
 
-
-/* Find all the lance cards on the system and initialize them */
-static int __init sparc_lance_init(void)
-{
-	return platform_driver_register(&sunlance_sbus_driver);
-}
-
-static void __exit sparc_lance_exit(void)
-{
-	platform_driver_unregister(&sunlance_sbus_driver);
-}
-
-module_init(sparc_lance_init);
-module_exit(sparc_lance_exit);
+module_platform_driver(sunlance_sbus_driver);
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
index 7be884d..0a9326a 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
@@ -232,7 +232,6 @@
 	strlcpy(drvinfo->driver,  atl1c_driver_name, sizeof(drvinfo->driver));
 	strlcpy(drvinfo->version, atl1c_driver_version,
 		sizeof(drvinfo->version));
-	strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
 	strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
 		sizeof(drvinfo->bus_info));
 	drvinfo->n_stats = 0;
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 02c7ed8..b859124 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -411,7 +411,7 @@
 	}
 }
 
-static void __atl1c_vlan_mode(u32 features, u32 *mac_ctrl_data)
+static void __atl1c_vlan_mode(netdev_features_t features, u32 *mac_ctrl_data)
 {
 	if (features & NETIF_F_HW_VLAN_RX) {
 		/* enable VLAN tag insert/strip */
@@ -422,7 +422,8 @@
 	}
 }
 
-static void atl1c_vlan_mode(struct net_device *netdev, u32 features)
+static void atl1c_vlan_mode(struct net_device *netdev,
+	netdev_features_t features)
 {
 	struct atl1c_adapter *adapter = netdev_priv(netdev);
 	struct pci_dev *pdev = adapter->pdev;
@@ -482,7 +483,8 @@
 		roundup(mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN, 8) : AT_RX_BUF_SIZE;
 }
 
-static u32 atl1c_fix_features(struct net_device *netdev, u32 features)
+static netdev_features_t atl1c_fix_features(struct net_device *netdev,
+	netdev_features_t features)
 {
 	/*
 	 * Since there is no support for separate rx/tx vlan accel
@@ -499,9 +501,10 @@
 	return features;
 }
 
-static int atl1c_set_features(struct net_device *netdev, u32 features)
+static int atl1c_set_features(struct net_device *netdev,
+	netdev_features_t features)
 {
-	u32 changed = netdev->features ^ features;
+	netdev_features_t changed = netdev->features ^ features;
 
 	if (changed & NETIF_F_HW_VLAN_RX)
 		atl1c_vlan_mode(netdev, features);
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
index 6269438..6e61f9f 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
@@ -310,10 +310,12 @@
 {
 	struct atl1e_adapter *adapter = netdev_priv(netdev);
 
-	strncpy(drvinfo->driver,  atl1e_driver_name, 32);
-	strncpy(drvinfo->version, atl1e_driver_version, 32);
-	strncpy(drvinfo->fw_version, "L1e", 32);
-	strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+	strlcpy(drvinfo->driver,  atl1e_driver_name, sizeof(drvinfo->driver));
+	strlcpy(drvinfo->version, atl1e_driver_version,
+		sizeof(drvinfo->version));
+	strlcpy(drvinfo->fw_version, "L1e", sizeof(drvinfo->fw_version));
+	strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+		sizeof(drvinfo->bus_info));
 	drvinfo->n_stats = 0;
 	drvinfo->testinfo_len = 0;
 	drvinfo->regdump_len = atl1e_get_regs_len(netdev);
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 95483bc..c915c08 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -313,7 +313,7 @@
 	}
 }
 
-static void __atl1e_vlan_mode(u32 features, u32 *mac_ctrl_data)
+static void __atl1e_vlan_mode(netdev_features_t features, u32 *mac_ctrl_data)
 {
 	if (features & NETIF_F_HW_VLAN_RX) {
 		/* enable VLAN tag insert/strip */
@@ -324,7 +324,8 @@
 	}
 }
 
-static void atl1e_vlan_mode(struct net_device *netdev, u32 features)
+static void atl1e_vlan_mode(struct net_device *netdev,
+	netdev_features_t features)
 {
 	struct atl1e_adapter *adapter = netdev_priv(netdev);
 	u32 mac_ctrl_data = 0;
@@ -370,7 +371,8 @@
 	return 0;
 }
 
-static u32 atl1e_fix_features(struct net_device *netdev, u32 features)
+static netdev_features_t atl1e_fix_features(struct net_device *netdev,
+	netdev_features_t features)
 {
 	/*
 	 * Since there is no support for separate rx/tx vlan accel
@@ -384,9 +386,10 @@
 	return features;
 }
 
-static int atl1e_set_features(struct net_device *netdev, u32 features)
+static int atl1e_set_features(struct net_device *netdev,
+	netdev_features_t features)
 {
-	u32 changed = netdev->features ^ features;
+	netdev_features_t changed = netdev->features ^ features;
 
 	if (changed & NETIF_F_HW_VLAN_RX)
 		atl1e_vlan_mode(netdev, features);
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 33a4e35..9bd2049 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -3365,7 +3365,6 @@
 	strlcpy(drvinfo->driver, ATLX_DRIVER_NAME, sizeof(drvinfo->driver));
 	strlcpy(drvinfo->version, ATLX_DRIVER_VERSION,
 		sizeof(drvinfo->version));
-	strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
 	strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
 		sizeof(drvinfo->bus_info));
 	drvinfo->eedump_len = ATL1_EEDUMP_LEN;
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 1feae59..071f4c8 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -361,7 +361,7 @@
     synchronize_irq(adapter->pdev->irq);
 }
 
-static void __atl2_vlan_mode(u32 features, u32 *ctrl)
+static void __atl2_vlan_mode(netdev_features_t features, u32 *ctrl)
 {
 	if (features & NETIF_F_HW_VLAN_RX) {
 		/* enable VLAN tag insert/strip */
@@ -372,7 +372,8 @@
 	}
 }
 
-static void atl2_vlan_mode(struct net_device *netdev, u32 features)
+static void atl2_vlan_mode(struct net_device *netdev,
+	netdev_features_t features)
 {
 	struct atl2_adapter *adapter = netdev_priv(netdev);
 	u32 ctrl;
@@ -391,7 +392,8 @@
 	atl2_vlan_mode(adapter->netdev, adapter->netdev->features);
 }
 
-static u32 atl2_fix_features(struct net_device *netdev, u32 features)
+static netdev_features_t atl2_fix_features(struct net_device *netdev,
+	netdev_features_t features)
 {
 	/*
 	 * Since there is no support for separate rx/tx vlan accel
@@ -405,9 +407,10 @@
 	return features;
 }
 
-static int atl2_set_features(struct net_device *netdev, u32 features)
+static int atl2_set_features(struct net_device *netdev,
+	netdev_features_t features)
 {
-	u32 changed = netdev->features ^ features;
+	netdev_features_t changed = netdev->features ^ features;
 
 	if (changed & NETIF_F_HW_VLAN_RX)
 		atl2_vlan_mode(netdev, features);
@@ -2049,10 +2052,12 @@
 {
 	struct atl2_adapter *adapter = netdev_priv(netdev);
 
-	strncpy(drvinfo->driver,  atl2_driver_name, 32);
-	strncpy(drvinfo->version, atl2_driver_version, 32);
-	strncpy(drvinfo->fw_version, "L2", 32);
-	strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+	strlcpy(drvinfo->driver,  atl2_driver_name, sizeof(drvinfo->driver));
+	strlcpy(drvinfo->version, atl2_driver_version,
+		sizeof(drvinfo->version));
+	strlcpy(drvinfo->fw_version, "L2", sizeof(drvinfo->fw_version));
+	strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+		sizeof(drvinfo->bus_info));
 	drvinfo->n_stats = 0;
 	drvinfo->testinfo_len = 0;
 	drvinfo->regdump_len = atl2_get_regs_len(netdev);
diff --git a/drivers/net/ethernet/atheros/atlx/atlx.c b/drivers/net/ethernet/atheros/atlx/atlx.c
index aabcf4b..8ff7411 100644
--- a/drivers/net/ethernet/atheros/atlx/atlx.c
+++ b/drivers/net/ethernet/atheros/atlx/atlx.c
@@ -211,7 +211,7 @@
 	spin_unlock_irqrestore(&adapter->lock, flags);
 }
 
-static void __atlx_vlan_mode(u32 features, u32 *ctrl)
+static void __atlx_vlan_mode(netdev_features_t features, u32 *ctrl)
 {
 	if (features & NETIF_F_HW_VLAN_RX) {
 		/* enable VLAN tag insert/strip */
@@ -222,7 +222,8 @@
 	}
 }
 
-static void atlx_vlan_mode(struct net_device *netdev, u32 features)
+static void atlx_vlan_mode(struct net_device *netdev,
+	netdev_features_t features)
 {
 	struct atlx_adapter *adapter = netdev_priv(netdev);
 	unsigned long flags;
@@ -242,7 +243,8 @@
 	atlx_vlan_mode(adapter->netdev, adapter->netdev->features);
 }
 
-static u32 atlx_fix_features(struct net_device *netdev, u32 features)
+static netdev_features_t atlx_fix_features(struct net_device *netdev,
+	netdev_features_t features)
 {
 	/*
 	 * Since there is no support for separate rx/tx vlan accel
@@ -256,9 +258,10 @@
 	return features;
 }
 
-static int atlx_set_features(struct net_device *netdev, u32 features)
+static int atlx_set_features(struct net_device *netdev,
+	netdev_features_t features)
 {
-	u32 changed = netdev->features ^ features;
+	netdev_features_t changed = netdev->features ^ features;
 
 	if (changed & NETIF_F_HW_VLAN_RX)
 		atlx_vlan_mode(netdev, features);
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 965c723..021fb81 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -57,11 +57,11 @@
 #include "bnx2_fw.h"
 
 #define DRV_MODULE_NAME		"bnx2"
-#define DRV_MODULE_VERSION	"2.1.11"
-#define DRV_MODULE_RELDATE	"July 20, 2011"
-#define FW_MIPS_FILE_06		"bnx2/bnx2-mips-06-6.2.1.fw"
+#define DRV_MODULE_VERSION	"2.2.1"
+#define DRV_MODULE_RELDATE	"Dec 18, 2011"
+#define FW_MIPS_FILE_06		"bnx2/bnx2-mips-06-6.2.3.fw"
 #define FW_RV2P_FILE_06		"bnx2/bnx2-rv2p-06-6.0.15.fw"
-#define FW_MIPS_FILE_09		"bnx2/bnx2-mips-09-6.2.1a.fw"
+#define FW_MIPS_FILE_09		"bnx2/bnx2-mips-09-6.2.1b.fw"
 #define FW_RV2P_FILE_09_Ax	"bnx2/bnx2-rv2p-09ax-6.0.17.fw"
 #define FW_RV2P_FILE_09		"bnx2/bnx2-rv2p-09-6.0.17.fw"
 
@@ -409,7 +409,7 @@
 	mutex_lock(&bp->cnic_lock);
 	cp->drv_state = 0;
 	bnapi->cnic_present = 0;
-	rcu_assign_pointer(bp->cnic_ops, NULL);
+	RCU_INIT_POINTER(bp->cnic_ops, NULL);
 	mutex_unlock(&bp->cnic_lock);
 	synchronize_rcu();
 	return 0;
@@ -2054,8 +2054,8 @@
 
 	if (bp->autoneg & AUTONEG_SPEED) {
 		u32 adv_reg, adv1000_reg;
-		u32 new_adv_reg = 0;
-		u32 new_adv1000_reg = 0;
+		u32 new_adv = 0;
+		u32 new_adv1000 = 0;
 
 		bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
 		adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
@@ -2064,27 +2064,18 @@
 		bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
 		adv1000_reg &= PHY_ALL_1000_SPEED;
 
-		if (bp->advertising & ADVERTISED_10baseT_Half)
-			new_adv_reg |= ADVERTISE_10HALF;
-		if (bp->advertising & ADVERTISED_10baseT_Full)
-			new_adv_reg |= ADVERTISE_10FULL;
-		if (bp->advertising & ADVERTISED_100baseT_Half)
-			new_adv_reg |= ADVERTISE_100HALF;
-		if (bp->advertising & ADVERTISED_100baseT_Full)
-			new_adv_reg |= ADVERTISE_100FULL;
-		if (bp->advertising & ADVERTISED_1000baseT_Full)
-			new_adv1000_reg |= ADVERTISE_1000FULL;
+		new_adv = ethtool_adv_to_mii_adv_t(bp->advertising);
+		new_adv |= ADVERTISE_CSMA;
+		new_adv |= bnx2_phy_get_pause_adv(bp);
 
-		new_adv_reg |= ADVERTISE_CSMA;
+		new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
 
-		new_adv_reg |= bnx2_phy_get_pause_adv(bp);
-
-		if ((adv1000_reg != new_adv1000_reg) ||
-			(adv_reg != new_adv_reg) ||
+		if ((adv1000_reg != new_adv1000) ||
+			(adv_reg != new_adv) ||
 			((bmcr & BMCR_ANENABLE) == 0)) {
 
-			bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
-			bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
+			bnx2_write_phy(bp, bp->mii_adv, new_adv);
+			bnx2_write_phy(bp, MII_CTRL1000, new_adv1000);
 			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
 				BMCR_ANENABLE);
 		}
@@ -2734,31 +2725,27 @@
 }
 
 static inline int
-bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
+bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
 {
-	struct sk_buff *skb;
+	u8 *data;
 	struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
 	dma_addr_t mapping;
 	struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
-	unsigned long align;
 
-	skb = __netdev_alloc_skb(bp->dev, bp->rx_buf_size, gfp);
-	if (skb == NULL) {
+	data = kmalloc(bp->rx_buf_size, gfp);
+	if (!data)
 		return -ENOMEM;
-	}
 
-	if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
-		skb_reserve(skb, BNX2_RX_ALIGN - align);
-
-	mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_use_size,
+	mapping = dma_map_single(&bp->pdev->dev,
+				 get_l2_fhdr(data),
+				 bp->rx_buf_use_size,
 				 PCI_DMA_FROMDEVICE);
 	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
-		dev_kfree_skb(skb);
+		kfree(data);
 		return -EIO;
 	}
 
-	rx_buf->skb = skb;
-	rx_buf->desc = (struct l2_fhdr *) skb->data;
+	rx_buf->data = data;
 	dma_unmap_addr_set(rx_buf, mapping, mapping);
 
 	rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
@@ -2823,6 +2810,7 @@
 	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
 	u16 hw_cons, sw_cons, sw_ring_cons;
 	int tx_pkt = 0, index;
+	unsigned int tx_bytes = 0;
 	struct netdev_queue *txq;
 
 	index = (bnapi - bp->bnx2_napi);
@@ -2877,6 +2865,7 @@
 
 		sw_cons = NEXT_TX_BD(sw_cons);
 
+		tx_bytes += skb->len;
 		dev_kfree_skb(skb);
 		tx_pkt++;
 		if (tx_pkt == budget)
@@ -2886,6 +2875,7 @@
 			hw_cons = bnx2_get_hw_tx_cons(bnapi);
 	}
 
+	netdev_tx_completed_queue(txq, tx_pkt, tx_bytes);
 	txr->hw_tx_cons = hw_cons;
 	txr->tx_cons = sw_cons;
 
@@ -2965,8 +2955,8 @@
 }
 
 static inline void
-bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
-		  struct sk_buff *skb, u16 cons, u16 prod)
+bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
+		   u8 *data, u16 cons, u16 prod)
 {
 	struct sw_bd *cons_rx_buf, *prod_rx_buf;
 	struct rx_bd *cons_bd, *prod_bd;
@@ -2980,8 +2970,7 @@
 
 	rxr->rx_prod_bseq += bp->rx_buf_use_size;
 
-	prod_rx_buf->skb = skb;
-	prod_rx_buf->desc = (struct l2_fhdr *) skb->data;
+	prod_rx_buf->data = data;
 
 	if (cons == prod)
 		return;
@@ -2995,33 +2984,39 @@
 	prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
 }
 
-static int
-bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
+static struct sk_buff *
+bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data,
 	    unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
 	    u32 ring_idx)
 {
 	int err;
 	u16 prod = ring_idx & 0xffff;
+	struct sk_buff *skb;
 
-	err = bnx2_alloc_rx_skb(bp, rxr, prod, GFP_ATOMIC);
+	err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
 	if (unlikely(err)) {
-		bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
+		bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod);
+error:
 		if (hdr_len) {
 			unsigned int raw_len = len + 4;
 			int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
 
 			bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
 		}
-		return err;
+		return NULL;
 	}
 
-	skb_reserve(skb, BNX2_RX_OFFSET);
 	dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
 			 PCI_DMA_FROMDEVICE);
-
+	skb = build_skb(data);
+	if (!skb) {
+		kfree(data);
+		goto error;
+	}
+	skb_reserve(skb, ((u8 *)get_l2_fhdr(data) - data) + BNX2_RX_OFFSET);
 	if (hdr_len == 0) {
 		skb_put(skb, len);
-		return 0;
+		return skb;
 	} else {
 		unsigned int i, frag_len, frag_size, pages;
 		struct sw_pg *rx_pg;
@@ -3052,7 +3047,7 @@
 					skb_frag_size_sub(frag, tail);
 					skb->data_len -= tail;
 				}
-				return 0;
+				return skb;
 			}
 			rx_pg = &rxr->rx_pg_ring[pg_cons];
 
@@ -3074,7 +3069,7 @@
 				rxr->rx_pg_prod = pg_prod;
 				bnx2_reuse_rx_skb_pages(bp, rxr, skb,
 							pages - i);
-				return err;
+				return NULL;
 			}
 
 			dma_unmap_page(&bp->pdev->dev, mapping_old,
@@ -3091,7 +3086,7 @@
 		rxr->rx_pg_prod = pg_prod;
 		rxr->rx_pg_cons = pg_cons;
 	}
-	return 0;
+	return skb;
 }
 
 static inline u16
@@ -3130,19 +3125,17 @@
 		struct sw_bd *rx_buf, *next_rx_buf;
 		struct sk_buff *skb;
 		dma_addr_t dma_addr;
+		u8 *data;
 
 		sw_ring_cons = RX_RING_IDX(sw_cons);
 		sw_ring_prod = RX_RING_IDX(sw_prod);
 
 		rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
-		skb = rx_buf->skb;
-		prefetchw(skb);
+		data = rx_buf->data;
+		rx_buf->data = NULL;
 
-		next_rx_buf =
-			&rxr->rx_buf_ring[RX_RING_IDX(NEXT_RX_BD(sw_cons))];
-		prefetch(next_rx_buf->desc);
-
-		rx_buf->skb = NULL;
+		rx_hdr = get_l2_fhdr(data);
+		prefetch(rx_hdr);
 
 		dma_addr = dma_unmap_addr(rx_buf, mapping);
 
@@ -3150,7 +3143,10 @@
 			BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
 			PCI_DMA_FROMDEVICE);
 
-		rx_hdr = rx_buf->desc;
+		next_rx_buf =
+			&rxr->rx_buf_ring[RX_RING_IDX(NEXT_RX_BD(sw_cons))];
+		prefetch(get_l2_fhdr(next_rx_buf->data));
+
 		len = rx_hdr->l2_fhdr_pkt_len;
 		status = rx_hdr->l2_fhdr_status;
 
@@ -3169,7 +3165,7 @@
 				       L2_FHDR_ERRORS_TOO_SHORT |
 				       L2_FHDR_ERRORS_GIANT_FRAME))) {
 
-			bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
+			bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
 					  sw_ring_prod);
 			if (pg_ring_used) {
 				int pages;
@@ -3184,30 +3180,29 @@
 		len -= 4;
 
 		if (len <= bp->rx_copy_thresh) {
-			struct sk_buff *new_skb;
-
-			new_skb = netdev_alloc_skb(bp->dev, len + 6);
-			if (new_skb == NULL) {
-				bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
+			skb = netdev_alloc_skb(bp->dev, len + 6);
+			if (skb == NULL) {
+				bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
 						  sw_ring_prod);
 				goto next_rx;
 			}
 
 			/* aligned copy */
-			skb_copy_from_linear_data_offset(skb,
-							 BNX2_RX_OFFSET - 6,
-				      new_skb->data, len + 6);
-			skb_reserve(new_skb, 6);
-			skb_put(new_skb, len);
+			memcpy(skb->data,
+			       (u8 *)rx_hdr + BNX2_RX_OFFSET - 6,
+			       len + 6);
+			skb_reserve(skb, 6);
+			skb_put(skb, len);
 
-			bnx2_reuse_rx_skb(bp, rxr, skb,
+			bnx2_reuse_rx_data(bp, rxr, data,
 				sw_ring_cons, sw_ring_prod);
 
-			skb = new_skb;
-		} else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
-			   dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
-			goto next_rx;
-
+		} else {
+			skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr,
+					  (sw_ring_cons << 16) | sw_ring_prod);
+			if (!skb)
+				goto next_rx;
+		}
 		if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
 		    !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
 			__vlan_hwaccel_put_tag(skb, rx_hdr->l2_fhdr_vlan_tag);
@@ -5234,7 +5229,7 @@
 
 	ring_prod = prod = rxr->rx_prod;
 	for (i = 0; i < bp->rx_ring_size; i++) {
-		if (bnx2_alloc_rx_skb(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
+		if (bnx2_alloc_rx_data(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
 			netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
 				    ring_num, i, bp->rx_ring_size);
 			break;
@@ -5329,7 +5324,7 @@
 	rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
 
 	rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
-		sizeof(struct skb_shared_info);
+		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 
 	bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
 	bp->rx_pg_ring_size = 0;
@@ -5351,8 +5346,9 @@
 	}
 
 	bp->rx_buf_use_size = rx_size;
-	/* hw alignment */
-	bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
+	/* hw alignment + build_skb() overhead*/
+	bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
+		NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 	bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
 	bp->rx_ring_size = size;
 	bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
@@ -5400,6 +5396,7 @@
 			}
 			dev_kfree_skb(skb);
 		}
+		netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
 	}
 }
 
@@ -5418,9 +5415,9 @@
 
 		for (j = 0; j < bp->rx_max_ring_idx; j++) {
 			struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
-			struct sk_buff *skb = rx_buf->skb;
+			u8 *data = rx_buf->data;
 
-			if (skb == NULL)
+			if (data == NULL)
 				continue;
 
 			dma_unmap_single(&bp->pdev->dev,
@@ -5428,9 +5425,9 @@
 					 bp->rx_buf_use_size,
 					 PCI_DMA_FROMDEVICE);
 
-			rx_buf->skb = NULL;
+			rx_buf->data = NULL;
 
-			dev_kfree_skb(skb);
+			kfree(data);
 		}
 		for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
 			bnx2_free_rx_page(bp, rxr, j);
@@ -5736,7 +5733,8 @@
 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
 {
 	unsigned int pkt_size, num_pkts, i;
-	struct sk_buff *skb, *rx_skb;
+	struct sk_buff *skb;
+	u8 *data;
 	unsigned char *packet;
 	u16 rx_start_idx, rx_idx;
 	dma_addr_t map;
@@ -5828,14 +5826,14 @@
 	}
 
 	rx_buf = &rxr->rx_buf_ring[rx_start_idx];
-	rx_skb = rx_buf->skb;
+	data = rx_buf->data;
 
-	rx_hdr = rx_buf->desc;
-	skb_reserve(rx_skb, BNX2_RX_OFFSET);
+	rx_hdr = get_l2_fhdr(data);
+	data = (u8 *)rx_hdr + BNX2_RX_OFFSET;
 
 	dma_sync_single_for_cpu(&bp->pdev->dev,
 		dma_unmap_addr(rx_buf, mapping),
-		bp->rx_buf_size, PCI_DMA_FROMDEVICE);
+		bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
 
 	if (rx_hdr->l2_fhdr_status &
 		(L2_FHDR_ERRORS_BAD_CRC |
@@ -5852,7 +5850,7 @@
 	}
 
 	for (i = 14; i < pkt_size; i++) {
-		if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
+		if (*(data + i) != (unsigned char) (i & 0xff)) {
 			goto loopback_test_done;
 		}
 	}
@@ -6552,6 +6550,8 @@
 	}
 	txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
 
+	netdev_tx_sent_queue(txq, skb->len);
+
 	prod = NEXT_TX_BD(prod);
 	txr->tx_prod_bseq += skb->len;
 
@@ -6873,10 +6873,10 @@
 {
 	struct bnx2 *bp = netdev_priv(dev);
 
-	strcpy(info->driver, DRV_MODULE_NAME);
-	strcpy(info->version, DRV_MODULE_VERSION);
-	strcpy(info->bus_info, pci_name(bp->pdev));
-	strcpy(info->fw_version, bp->fw_version);
+	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+	strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
+	strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
 }
 
 #define BNX2_REGDUMP_LEN		(32 * 1024)
@@ -7571,8 +7571,8 @@
 	return 0;
 }
 
-static u32
-bnx2_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t
+bnx2_fix_features(struct net_device *dev, netdev_features_t features)
 {
 	struct bnx2 *bp = netdev_priv(dev);
 
@@ -7583,7 +7583,7 @@
 }
 
 static int
-bnx2_set_features(struct net_device *dev, u32 features)
+bnx2_set_features(struct net_device *dev, netdev_features_t features)
 {
 	struct bnx2 *bp = netdev_priv(dev);
 
diff --git a/drivers/net/ethernet/broadcom/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h
index 99d31a7..1db2d51 100644
--- a/drivers/net/ethernet/broadcom/bnx2.h
+++ b/drivers/net/ethernet/broadcom/bnx2.h
@@ -6563,12 +6563,25 @@
 #define MB_TX_CID_ADDR	MB_GET_CID_ADDR(TX_CID)
 #define MB_RX_CID_ADDR	MB_GET_CID_ADDR(RX_CID)
 
+/*
+ * This driver uses new build_skb() API :
+ * RX ring buffer contains pointer to kmalloc() data only,
+ * skb are built only after Hardware filled the frame.
+ */
 struct sw_bd {
-	struct sk_buff		*skb;
-	struct l2_fhdr		*desc;
+	u8			*data;
 	DEFINE_DMA_UNMAP_ADDR(mapping);
 };
 
+/* Its faster to compute this from data than storing it in sw_bd
+ * (less cache misses)
+ */
+static inline struct l2_fhdr *get_l2_fhdr(u8 *data)
+{
+	return (struct l2_fhdr *)(PTR_ALIGN(data, BNX2_RX_ALIGN) + NET_SKB_PAD);
+}
+
+
 struct sw_pg {
 	struct page		*page;
 	DEFINE_DMA_UNMAP_ADDR(mapping);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index aec7212..8c73d34 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -23,8 +23,8 @@
  * (you will need to reboot afterwards) */
 /* #define BNX2X_STOP_ON_ERROR */
 
-#define DRV_MODULE_VERSION      "1.70.30-0"
-#define DRV_MODULE_RELDATE      "2011/10/25"
+#define DRV_MODULE_VERSION      "1.70.35-0"
+#define DRV_MODULE_RELDATE      "2011/11/10"
 #define BNX2X_BC_VER            0x040200
 
 #if defined(CONFIG_DCB)
@@ -293,8 +293,13 @@
 #define FCOE_TXQ_IDX(bp)	(MAX_ETH_TXQ_IDX(bp))
 
 /* fast path */
+/*
+ * This driver uses new build_skb() API :
+ * RX ring buffer contains pointer to kmalloc() data only,
+ * skb are built only after Hardware filled the frame.
+ */
 struct sw_rx_bd {
-	struct sk_buff	*skb;
+	u8		*data;
 	DEFINE_DMA_UNMAP_ADDR(mapping);
 };
 
@@ -411,8 +416,7 @@
 
 
 /* Number of u64 elements in SGE mask array */
-#define RX_SGE_MASK_LEN			((NUM_RX_SGE_PAGES * RX_SGE_CNT) / \
-					 BIT_VEC64_ELEM_SZ)
+#define RX_SGE_MASK_LEN			(NUM_RX_SGE / BIT_VEC64_ELEM_SZ)
 #define RX_SGE_MASK_LEN_MASK		(RX_SGE_MASK_LEN - 1)
 #define NEXT_SGE_MASK_ELEM(el)		(((el) + 1) & RX_SGE_MASK_LEN_MASK)
 
@@ -425,8 +429,8 @@
 
 struct bnx2x_agg_info {
 	/*
-	 * First aggregation buffer is an skb, the following - are pages.
-	 * We will preallocate the skbs for each aggregation when
+	 * First aggregation buffer is a data buffer, the following - are pages.
+	 * We will preallocate the data buffer for each aggregation when
 	 * we open the interface and will replace the BD at the consumer
 	 * with this one when we receive the TPA_START CQE in order to
 	 * keep the Rx BD ring consistent.
@@ -440,6 +444,7 @@
 	u16			parsing_flags;
 	u16			vlan_tag;
 	u16			len_on_bd;
+	u32			rxhash;
 };
 
 #define Q_STATS_OFFSET32(stat_name) \
@@ -507,6 +512,7 @@
 	__le16			fp_hc_idx;
 
 	u8			index;		/* number in fp array */
+	u8			rx_queue;	/* index for skb_record */
 	u8			cl_id;		/* eth client id */
 	u8			cl_qzone_id;
 	u8			fw_sb_id;	/* status block number in FW */
@@ -881,6 +887,8 @@
 #define CHIP_PORT_MODE_NONE			0x2
 #define CHIP_MODE(bp)			(bp->common.chip_port_mode)
 #define CHIP_MODE_IS_4_PORT(bp) (CHIP_MODE(bp) == CHIP_4_PORT_MODE)
+
+	u32			boot_mode;
 };
 
 /* IGU MSIX STATISTICS on 57712: 64 for VFs; 4 for PFs; 4 for Attentions */
@@ -1042,6 +1050,8 @@
 
 	u32				wb_comp;
 	u32				wb_data[4];
+
+	union drv_info_to_mcp		drv_info_to_mcp;
 };
 
 #define bnx2x_sp(bp, var)		(&bp->slowpath->var)
@@ -1122,18 +1132,21 @@
 enum {
 	BNX2X_PORT_QUERY_IDX,
 	BNX2X_PF_QUERY_IDX,
+	BNX2X_FCOE_QUERY_IDX,
 	BNX2X_FIRST_QUEUE_QUERY_IDX,
 };
 
 struct bnx2x_fw_stats_req {
 	struct stats_query_header hdr;
-	struct stats_query_entry query[STATS_QUERY_CMD_COUNT];
+	struct stats_query_entry query[FP_SB_MAX_E1x+
+		BNX2X_FIRST_QUEUE_QUERY_IDX];
 };
 
 struct bnx2x_fw_stats_data {
 	struct stats_counter	storm_counters;
 	struct per_port_stats	port;
 	struct per_pf_stats	pf;
+	struct fcoe_statistics_params	fcoe;
 	struct per_queue_stats  queue_stats[1];
 };
 
@@ -1141,6 +1154,7 @@
 enum {
 	BNX2X_SP_RTNL_SETUP_TC,
 	BNX2X_SP_RTNL_TX_TIMEOUT,
+	BNX2X_SP_RTNL_FAN_FAILURE,
 };
 
 
@@ -1186,10 +1200,20 @@
 #define ETH_MAX_JUMBO_PACKET_SIZE	9600
 
 	/* Max supported alignment is 256 (8 shift) */
-#define BNX2X_RX_ALIGN_SHIFT		((L1_CACHE_SHIFT < 8) ? \
-					 L1_CACHE_SHIFT : 8)
-	/* FW use 2 Cache lines Alignment for start packet and size  */
-#define BNX2X_FW_RX_ALIGN		(2 << BNX2X_RX_ALIGN_SHIFT)
+#define BNX2X_RX_ALIGN_SHIFT		min(8, L1_CACHE_SHIFT)
+
+	/* FW uses 2 Cache lines Alignment for start packet and size
+	 *
+	 * We assume skb_build() uses sizeof(struct skb_shared_info) bytes
+	 * at the end of skb->data, to avoid wasting a full cache line.
+	 * This reduces memory use (skb->truesize).
+	 */
+#define BNX2X_FW_RX_ALIGN_START	(1UL << BNX2X_RX_ALIGN_SHIFT)
+
+#define BNX2X_FW_RX_ALIGN_END					\
+	max(1UL << BNX2X_RX_ALIGN_SHIFT, 			\
+	    SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+
 #define BNX2X_PXP_DRAM_ALIGN		(BNX2X_RX_ALIGN_SHIFT - 5)
 
 	struct host_sp_status_block *def_status_blk;
@@ -1249,6 +1273,7 @@
 #define NO_ISCSI_OOO_FLAG		(1 << 13)
 #define NO_ISCSI_FLAG			(1 << 14)
 #define NO_FCOE_FLAG			(1 << 15)
+#define BC_SUPPORTS_PFC_STATS		(1 << 17)
 
 #define NO_ISCSI(bp)		((bp)->flags & NO_ISCSI_FLAG)
 #define NO_ISCSI_OOO(bp)	((bp)->flags & NO_ISCSI_OOO_FLAG)
@@ -1984,13 +2009,6 @@
 #define HW_PRTY_ASSERT_SET_4 (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | \
 			      AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)
 
-#define RSS_FLAGS(bp) \
-		(TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY | \
-		 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY | \
-		 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY | \
-		 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY | \
-		 (bp->multi_mode << \
-		  TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT))
 #define MULTI_MASK			0x7f
 
 
@@ -2055,6 +2073,8 @@
 #define BNX2X_VPD_LEN			128
 #define VENDOR_ID_LEN			4
 
+int bnx2x_close(struct net_device *dev);
+
 /* Congestion management fairness mode */
 #define CMNG_FNS_NONE		0
 #define CMNG_FNS_MINMAX		1
@@ -2072,4 +2092,16 @@
 
 void bnx2x_set_ethtool_ops(struct net_device *netdev);
 void bnx2x_notify_link_changed(struct bnx2x *bp);
+
+
+#define BNX2X_MF_PROTOCOL(bp) \
+	((bp)->mf_config[BP_VN(bp)] & FUNC_MF_CFG_PROTOCOL_MASK)
+
+#ifdef BCM_CNIC
+#define BNX2X_IS_MF_PROTOCOL_ISCSI(bp) \
+	(BNX2X_MF_PROTOCOL(bp) == FUNC_MF_CFG_PROTOCOL_ISCSI)
+
+#define IS_MF_ISCSI_SD(bp) (IS_MF_SD(bp) && BNX2X_IS_MF_PROTOCOL_ISCSI(bp))
+#endif
+
 #endif /* bnx2x.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 580b44e..2b731b2 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -79,19 +79,21 @@
  * @to:		destination FP index
  *
  * Makes sure the contents of the bp->fp[to].napi is kept
- * intact.
+ * intact. This is done by first copying the napi struct from
+ * the target to the source, and then mem copying the entire
+ * source onto the target
  */
 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
 {
 	struct bnx2x_fastpath *from_fp = &bp->fp[from];
 	struct bnx2x_fastpath *to_fp = &bp->fp[to];
-	struct napi_struct orig_napi = to_fp->napi;
+
+	/* Copy the NAPI object as it has been already initialized */
+	from_fp->napi = to_fp->napi;
+
 	/* Move bnx2x_fastpath contents */
 	memcpy(to_fp, from_fp, sizeof(*to_fp));
 	to_fp->index = to;
-
-	/* Restore the NAPI object as it has been already initialized */
-	to_fp->napi = orig_napi;
 }
 
 int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
@@ -100,7 +102,8 @@
  * return idx of last bd freed
  */
 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
-			     u16 idx)
+			     u16 idx, unsigned int *pkts_compl,
+			     unsigned int *bytes_compl)
 {
 	struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
 	struct eth_tx_start_bd *tx_start_bd;
@@ -157,6 +160,10 @@
 
 	/* release skb */
 	WARN_ON(!skb);
+	if (skb) {
+		(*pkts_compl)++;
+		(*bytes_compl) += skb->len;
+	}
 	dev_kfree_skb_any(skb);
 	tx_buf->first_bd = 0;
 	tx_buf->skb = NULL;
@@ -168,6 +175,7 @@
 {
 	struct netdev_queue *txq;
 	u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
+	unsigned int pkts_compl = 0, bytes_compl = 0;
 
 #ifdef BNX2X_STOP_ON_ERROR
 	if (unlikely(bp->panic))
@@ -187,10 +195,14 @@
 				      " pkt_cons %u\n",
 		   txdata->txq_index, hw_cons, sw_cons, pkt_cons);
 
-		bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons);
+		bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
+		    &pkts_compl, &bytes_compl);
+
 		sw_cons++;
 	}
 
+	netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
+
 	txdata->tx_pkt_cons = sw_cons;
 	txdata->tx_bd_cons = bd_cons;
 
@@ -292,8 +304,21 @@
 	   fp->last_max_sge, fp->rx_sge_prod);
 }
 
+/* Set Toeplitz hash value in the skb using the value from the
+ * CQE (calculated by HW).
+ */
+static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
+			    const struct eth_fast_path_rx_cqe *cqe)
+{
+	/* Set Toeplitz hash from CQE */
+	if ((bp->dev->features & NETIF_F_RXHASH) &&
+	    (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
+		return le32_to_cpu(cqe->rss_hash_result);
+	return 0;
+}
+
 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
-			    struct sk_buff *skb, u16 cons, u16 prod,
+			    u16 cons, u16 prod,
 			    struct eth_fast_path_rx_cqe *cqe)
 {
 	struct bnx2x *bp = fp->bp;
@@ -308,9 +333,9 @@
 	if (tpa_info->tpa_state != BNX2X_TPA_STOP)
 		BNX2X_ERR("start of bin not in stop [%d]\n", queue);
 
-	/* Try to map an empty skb from the aggregation info  */
+	/* Try to map an empty data buffer from the aggregation info  */
 	mapping = dma_map_single(&bp->pdev->dev,
-				 first_buf->skb->data,
+				 first_buf->data + NET_SKB_PAD,
 				 fp->rx_buf_size, DMA_FROM_DEVICE);
 	/*
 	 *  ...if it fails - move the skb from the consumer to the producer
@@ -320,15 +345,15 @@
 
 	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
 		/* Move the BD from the consumer to the producer */
-		bnx2x_reuse_rx_skb(fp, cons, prod);
+		bnx2x_reuse_rx_data(fp, cons, prod);
 		tpa_info->tpa_state = BNX2X_TPA_ERROR;
 		return;
 	}
 
-	/* move empty skb from pool to prod */
-	prod_rx_buf->skb = first_buf->skb;
+	/* move empty data from pool to prod */
+	prod_rx_buf->data = first_buf->data;
 	dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
-	/* point prod_bd to new skb */
+	/* point prod_bd to new data */
 	prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
 	prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
 
@@ -342,6 +367,7 @@
 	tpa_info->tpa_state = BNX2X_TPA_START;
 	tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
 	tpa_info->placement_offset = cqe->placement_offset;
+	tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe);
 
 #ifdef BNX2X_STOP_ON_ERROR
 	fp->tpa_queue_used |= (1 << queue);
@@ -469,11 +495,12 @@
 {
 	struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
 	struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
-	u8 pad = tpa_info->placement_offset;
+	u32 pad = tpa_info->placement_offset;
 	u16 len = tpa_info->len_on_bd;
-	struct sk_buff *skb = rx_buf->skb;
+	struct sk_buff *skb = NULL;
+	u8 *data = rx_buf->data;
 	/* alloc new skb */
-	struct sk_buff *new_skb;
+	u8 *new_data;
 	u8 old_tpa_state = tpa_info->tpa_state;
 
 	tpa_info->tpa_state = BNX2X_TPA_STOP;
@@ -484,18 +511,18 @@
 	if (old_tpa_state == BNX2X_TPA_ERROR)
 		goto drop;
 
-	/* Try to allocate the new skb */
-	new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
+	/* Try to allocate the new data */
+	new_data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
 
 	/* Unmap skb in the pool anyway, as we are going to change
 	   pool entry status to BNX2X_TPA_STOP even if new skb allocation
 	   fails. */
 	dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
 			 fp->rx_buf_size, DMA_FROM_DEVICE);
+	if (likely(new_data))
+		skb = build_skb(data);
 
-	if (likely(new_skb)) {
-		prefetch(skb);
-		prefetch(((char *)(skb)) + L1_CACHE_BYTES);
+	if (likely(skb)) {
 
 #ifdef BNX2X_STOP_ON_ERROR
 		if (pad + len > fp->rx_buf_size) {
@@ -507,8 +534,9 @@
 		}
 #endif
 
-		skb_reserve(skb, pad);
+		skb_reserve(skb, pad + NET_SKB_PAD);
 		skb_put(skb, len);
+		skb->rxhash = tpa_info->rxhash;
 
 		skb->protocol = eth_type_trans(skb, bp->dev);
 		skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -524,8 +552,8 @@
 		}
 
 
-		/* put new skb in bin */
-		rx_buf->skb = new_skb;
+		/* put new data in bin */
+		rx_buf->data = new_data;
 
 		return;
 	}
@@ -537,19 +565,6 @@
 	fp->eth_q_stats.rx_skb_alloc_failed++;
 }
 
-/* Set Toeplitz hash value in the skb using the value from the
- * CQE (calculated by HW).
- */
-static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
-					struct sk_buff *skb)
-{
-	/* Set Toeplitz hash from CQE */
-	if ((bp->dev->features & NETIF_F_RXHASH) &&
-	    (cqe->fast_path_cqe.status_flags &
-	     ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
-		skb->rxhash =
-		le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
-}
 
 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
 {
@@ -592,6 +607,7 @@
 		u8 cqe_fp_flags;
 		enum eth_rx_cqe_type cqe_fp_type;
 		u16 len, pad;
+		u8 *data;
 
 #ifdef BNX2X_STOP_ON_ERROR
 		if (unlikely(bp->panic))
@@ -602,13 +618,6 @@
 		bd_prod = RX_BD(bd_prod);
 		bd_cons = RX_BD(bd_cons);
 
-		/* Prefetch the page containing the BD descriptor
-		   at producer's index. It will be needed when new skb is
-		   allocated */
-		prefetch((void *)(PAGE_ALIGN((unsigned long)
-					     (&fp->rx_desc_ring[bd_prod])) -
-				  PAGE_SIZE + 1));
-
 		cqe = &fp->rx_comp_ring[comp_ring_cons];
 		cqe_fp = &cqe->fast_path_cqe;
 		cqe_fp_flags = cqe_fp->type_error_flags;
@@ -624,138 +633,123 @@
 		if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
 			bnx2x_sp_event(fp, cqe);
 			goto next_cqe;
+		}
+		rx_buf = &fp->rx_buf_ring[bd_cons];
+		data = rx_buf->data;
 
-		/* this is an rx packet */
-		} else {
-			rx_buf = &fp->rx_buf_ring[bd_cons];
-			skb = rx_buf->skb;
-			prefetch(skb);
-
-			if (!CQE_TYPE_FAST(cqe_fp_type)) {
+		if (!CQE_TYPE_FAST(cqe_fp_type)) {
 #ifdef BNX2X_STOP_ON_ERROR
-				/* sanity check */
-				if (fp->disable_tpa &&
-				    (CQE_TYPE_START(cqe_fp_type) ||
-				     CQE_TYPE_STOP(cqe_fp_type)))
-					BNX2X_ERR("START/STOP packet while "
-						  "disable_tpa type %x\n",
-						  CQE_TYPE(cqe_fp_type));
+			/* sanity check */
+			if (fp->disable_tpa &&
+			    (CQE_TYPE_START(cqe_fp_type) ||
+			     CQE_TYPE_STOP(cqe_fp_type)))
+				BNX2X_ERR("START/STOP packet while "
+					  "disable_tpa type %x\n",
+					  CQE_TYPE(cqe_fp_type));
 #endif
 
-				if (CQE_TYPE_START(cqe_fp_type)) {
-					u16 queue = cqe_fp->queue_index;
-					DP(NETIF_MSG_RX_STATUS,
-					   "calling tpa_start on queue %d\n",
-					   queue);
+			if (CQE_TYPE_START(cqe_fp_type)) {
+				u16 queue = cqe_fp->queue_index;
+				DP(NETIF_MSG_RX_STATUS,
+				   "calling tpa_start on queue %d\n",
+				   queue);
 
-					bnx2x_tpa_start(fp, queue, skb,
-							bd_cons, bd_prod,
-							cqe_fp);
+				bnx2x_tpa_start(fp, queue,
+						bd_cons, bd_prod,
+						cqe_fp);
+				goto next_rx;
+			} else {
+				u16 queue =
+					cqe->end_agg_cqe.queue_index;
+				DP(NETIF_MSG_RX_STATUS,
+				   "calling tpa_stop on queue %d\n",
+				   queue);
 
-					/* Set Toeplitz hash for LRO skb */
-					bnx2x_set_skb_rxhash(bp, cqe, skb);
-
-					goto next_rx;
-
-				} else {
-					u16 queue =
-						cqe->end_agg_cqe.queue_index;
-					DP(NETIF_MSG_RX_STATUS,
-					   "calling tpa_stop on queue %d\n",
-					   queue);
-
-					bnx2x_tpa_stop(bp, fp, queue,
-						       &cqe->end_agg_cqe,
-						       comp_ring_cons);
+				bnx2x_tpa_stop(bp, fp, queue,
+					       &cqe->end_agg_cqe,
+					       comp_ring_cons);
 #ifdef BNX2X_STOP_ON_ERROR
-					if (bp->panic)
-						return 0;
+				if (bp->panic)
+					return 0;
 #endif
 
-					bnx2x_update_sge_prod(fp, cqe_fp);
-					goto next_cqe;
-				}
+				bnx2x_update_sge_prod(fp, cqe_fp);
+				goto next_cqe;
 			}
-			/* non TPA */
-			len = le16_to_cpu(cqe_fp->pkt_len);
-			pad = cqe_fp->placement_offset;
-			dma_sync_single_for_cpu(&bp->pdev->dev,
+		}
+		/* non TPA */
+		len = le16_to_cpu(cqe_fp->pkt_len);
+		pad = cqe_fp->placement_offset;
+		dma_sync_single_for_cpu(&bp->pdev->dev,
 					dma_unmap_addr(rx_buf, mapping),
-						       pad + RX_COPY_THRESH,
-						       DMA_FROM_DEVICE);
-			prefetch(((char *)(skb)) + L1_CACHE_BYTES);
+					pad + RX_COPY_THRESH,
+					DMA_FROM_DEVICE);
+		pad += NET_SKB_PAD;
+		prefetch(data + pad); /* speedup eth_type_trans() */
+		/* is this an error packet? */
+		if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
+			DP(NETIF_MSG_RX_ERR,
+			   "ERROR  flags %x  rx packet %u\n",
+			   cqe_fp_flags, sw_comp_cons);
+			fp->eth_q_stats.rx_err_discard_pkt++;
+			goto reuse_rx;
+		}
 
-			/* is this an error packet? */
-			if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
+		/* Since we don't have a jumbo ring
+		 * copy small packets if mtu > 1500
+		 */
+		if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
+		    (len <= RX_COPY_THRESH)) {
+			skb = netdev_alloc_skb_ip_align(bp->dev, len);
+			if (skb == NULL) {
 				DP(NETIF_MSG_RX_ERR,
-				   "ERROR  flags %x  rx packet %u\n",
-				   cqe_fp_flags, sw_comp_cons);
-				fp->eth_q_stats.rx_err_discard_pkt++;
+				   "ERROR  packet dropped because of alloc failure\n");
+				fp->eth_q_stats.rx_skb_alloc_failed++;
 				goto reuse_rx;
 			}
-
-			/* Since we don't have a jumbo ring
-			 * copy small packets if mtu > 1500
-			 */
-			if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
-			    (len <= RX_COPY_THRESH)) {
-				struct sk_buff *new_skb;
-
-				new_skb = netdev_alloc_skb(bp->dev, len + pad);
-				if (new_skb == NULL) {
-					DP(NETIF_MSG_RX_ERR,
-					   "ERROR  packet dropped "
-					   "because of alloc failure\n");
-					fp->eth_q_stats.rx_skb_alloc_failed++;
-					goto reuse_rx;
-				}
-
-				/* aligned copy */
-				skb_copy_from_linear_data_offset(skb, pad,
-						    new_skb->data + pad, len);
-				skb_reserve(new_skb, pad);
-				skb_put(new_skb, len);
-
-				bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
-
-				skb = new_skb;
-
-			} else
-			if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
+			memcpy(skb->data, data + pad, len);
+			bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
+		} else {
+			if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
 				dma_unmap_single(&bp->pdev->dev,
-					dma_unmap_addr(rx_buf, mapping),
+						 dma_unmap_addr(rx_buf, mapping),
 						 fp->rx_buf_size,
 						 DMA_FROM_DEVICE);
+				skb = build_skb(data);
+				if (unlikely(!skb)) {
+					kfree(data);
+					fp->eth_q_stats.rx_skb_alloc_failed++;
+					goto next_rx;
+				}
 				skb_reserve(skb, pad);
-				skb_put(skb, len);
-
 			} else {
 				DP(NETIF_MSG_RX_ERR,
 				   "ERROR  packet dropped because "
 				   "of alloc failure\n");
 				fp->eth_q_stats.rx_skb_alloc_failed++;
 reuse_rx:
-				bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
+				bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
 				goto next_rx;
 			}
-
-			skb->protocol = eth_type_trans(skb, bp->dev);
-
-			/* Set Toeplitz hash for a none-LRO skb */
-			bnx2x_set_skb_rxhash(bp, cqe, skb);
-
-			skb_checksum_none_assert(skb);
-
-			if (bp->dev->features & NETIF_F_RXCSUM) {
-
-				if (likely(BNX2X_RX_CSUM_OK(cqe)))
-					skb->ip_summed = CHECKSUM_UNNECESSARY;
-				else
-					fp->eth_q_stats.hw_csum_err++;
-			}
 		}
 
-		skb_record_rx_queue(skb, fp->index);
+		skb_put(skb, len);
+		skb->protocol = eth_type_trans(skb, bp->dev);
+
+		/* Set Toeplitz hash for a none-LRO skb */
+		skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp);
+
+		skb_checksum_none_assert(skb);
+
+		if (bp->dev->features & NETIF_F_RXCSUM) {
+
+			if (likely(BNX2X_RX_CSUM_OK(cqe)))
+				skb->ip_summed = CHECKSUM_UNNECESSARY;
+			else
+				fp->eth_q_stats.hw_csum_err++;
+		}
+
+		skb_record_rx_queue(skb, fp->rx_queue);
 
 		if (le16_to_cpu(cqe_fp->pars_flags.flags) &
 		    PARSING_FLAGS_VLAN)
@@ -765,7 +759,7 @@
 
 
 next_rx:
-		rx_buf->skb = NULL;
+		rx_buf->data = NULL;
 
 		bd_cons = NEXT_RX_IDX(bd_cons);
 		bd_prod = NEXT_RX_IDX(bd_prod);
@@ -1011,9 +1005,9 @@
 				struct sw_rx_bd *first_buf =
 					&tpa_info->first_buf;
 
-				first_buf->skb = netdev_alloc_skb(bp->dev,
-						       fp->rx_buf_size);
-				if (!first_buf->skb) {
+				first_buf->data = kmalloc(fp->rx_buf_size + NET_SKB_PAD,
+							  GFP_ATOMIC);
+				if (!first_buf->data) {
 					BNX2X_ERR("Failed to allocate TPA "
 						  "skb pool for queue[%d] - "
 						  "disabling TPA on this "
@@ -1093,16 +1087,18 @@
 		struct bnx2x_fastpath *fp = &bp->fp[i];
 		for_each_cos_in_tx_queue(fp, cos) {
 			struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
+			unsigned pkts_compl = 0, bytes_compl = 0;
 
-			u16 bd_cons = txdata->tx_bd_cons;
 			u16 sw_prod = txdata->tx_pkt_prod;
 			u16 sw_cons = txdata->tx_pkt_cons;
 
 			while (sw_cons != sw_prod) {
-				bd_cons = bnx2x_free_tx_pkt(bp, txdata,
-							    TX_BD(sw_cons));
+				bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
+				    &pkts_compl, &bytes_compl);
 				sw_cons++;
 			}
+			netdev_tx_reset_queue(
+			    netdev_get_tx_queue(bp->dev, txdata->txq_index));
 		}
 	}
 }
@@ -1118,16 +1114,16 @@
 
 	for (i = 0; i < NUM_RX_BD; i++) {
 		struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
-		struct sk_buff *skb = rx_buf->skb;
+		u8 *data = rx_buf->data;
 
-		if (skb == NULL)
+		if (data == NULL)
 			continue;
 		dma_unmap_single(&bp->pdev->dev,
 				 dma_unmap_addr(rx_buf, mapping),
 				 fp->rx_buf_size, DMA_FROM_DEVICE);
 
-		rx_buf->skb = NULL;
-		dev_kfree_skb(skb);
+		rx_buf->data = NULL;
+		kfree(data);
 	}
 }
 
@@ -1445,6 +1441,11 @@
 		break;
 	}
 
+#ifdef BCM_CNIC
+	/* override in ISCSI SD mod */
+	if (IS_MF_ISCSI_SD(bp))
+		bp->num_queues = 1;
+#endif
 	/* Add special queues */
 	bp->num_queues += NON_ETH_CONTEXT_USE;
 }
@@ -1509,6 +1510,7 @@
 
 	for_each_queue(bp, i) {
 		struct bnx2x_fastpath *fp = &bp->fp[i];
+		u32 mtu;
 
 		/* Always use a mini-jumbo MTU for the FCoE L2 ring */
 		if (IS_FCOE_IDX(i))
@@ -1518,13 +1520,15 @@
 			 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
 			 * overrun attack.
 			 */
-			fp->rx_buf_size =
-				BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD +
-				BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
+			mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
 		else
-			fp->rx_buf_size =
-				bp->dev->mtu + ETH_OVREHEAD +
-				BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
+			mtu = bp->dev->mtu;
+		fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
+				  IP_HEADER_ALIGNMENT_PADDING +
+				  ETH_OVREHEAD +
+				  mtu +
+				  BNX2X_FW_RX_ALIGN_END;
+		/* Note : rx_buf_size doesnt take into account NET_SKB_PAD */
 	}
 }
 
@@ -1541,7 +1545,8 @@
 	if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
 		for (i = 0; i < sizeof(ind_table); i++)
 			ind_table[i] =
-				bp->fp->cl_id +	(i % num_eth_queues);
+				bp->fp->cl_id +
+				ethtool_rxfh_indir_default(i, num_eth_queues);
 	}
 
 	/*
@@ -1929,13 +1934,17 @@
 		break;
 	}
 
-	if (!bp->port.pmf)
+	if (bp->port.pmf)
+		bnx2x_update_drv_flags(bp, DRV_FLAGS_DCB_CONFIGURED, 0);
+	else
 		bnx2x__link_status_update(bp);
 
 	/* start the timer */
 	mod_timer(&bp->timer, jiffies + bp->current_interval);
 
 #ifdef BCM_CNIC
+	/* re-read iscsi info */
+	bnx2x_get_iscsi_info(bp);
 	bnx2x_setup_cnic_irq_info(bp);
 	if (bp->state == BNX2X_STATE_OPEN)
 		bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
@@ -2799,6 +2808,7 @@
 		mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
 					   skb_frag_size(frag), DMA_TO_DEVICE);
 		if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
+			unsigned int pkts_compl = 0, bytes_compl = 0;
 
 			DP(NETIF_MSG_TX_QUEUED, "Unable to map page - "
 						"dropping packet...\n");
@@ -2810,7 +2820,8 @@
 			 */
 			first_bd->nbd = cpu_to_le16(nbd);
 			bnx2x_free_tx_pkt(bp, txdata,
-					  TX_BD(txdata->tx_pkt_prod));
+					  TX_BD(txdata->tx_pkt_prod),
+					  &pkts_compl, &bytes_compl);
 			return NETDEV_TX_OK;
 		}
 
@@ -2871,6 +2882,8 @@
 		   pbd_e2->parsing_data);
 	DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
 
+	netdev_tx_sent_queue(txq, skb->len);
+
 	txdata->tx_pkt_prod++;
 	/*
 	 * Make sure that the BD data is updated before updating the producer
@@ -2981,9 +2994,14 @@
 	struct bnx2x *bp = netdev_priv(dev);
 	int rc = 0;
 
-	if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
+	if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data))
 		return -EINVAL;
 
+#ifdef BCM_CNIC
+	if (IS_MF_ISCSI_SD(bp) && !is_zero_ether_addr(addr->sa_data))
+		return -EINVAL;
+#endif
+
 	if (netif_running(dev))  {
 		rc = bnx2x_set_eth_mac(bp, false);
 		if (rc)
@@ -3098,7 +3116,12 @@
 	u8 cos;
 	int rx_ring_size = 0;
 
-	/* if rx_ring_size specified - use it */
+#ifdef BCM_CNIC
+	if (IS_MF_ISCSI_SD(bp)) {
+		rx_ring_size = MIN_RX_SIZE_NONTPA;
+		bp->rx_ring_size = rx_ring_size;
+	} else
+#endif
 	if (!bp->rx_ring_size) {
 
 		rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
@@ -3108,7 +3131,7 @@
 				     MIN_RX_SIZE_TPA, rx_ring_size);
 
 		bp->rx_ring_size = rx_ring_size;
-	} else
+	} else /* if rx_ring_size specified - use it */
 		rx_ring_size = bp->rx_ring_size;
 
 	/* Common */
@@ -3278,14 +3301,14 @@
 	msix_table_size = bp->igu_sb_cnt + 1;
 
 	/* fp array: RSS plus CNIC related L2 queues */
-	fp = kzalloc((BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE) *
+	fp = kcalloc(BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE,
 		     sizeof(*fp), GFP_KERNEL);
 	if (!fp)
 		goto alloc_err;
 	bp->fp = fp;
 
 	/* msix table */
-	tbl = kzalloc(msix_table_size * sizeof(*tbl), GFP_KERNEL);
+	tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
 	if (!tbl)
 		goto alloc_err;
 	bp->msix_table = tbl;
@@ -3409,7 +3432,8 @@
 	return bnx2x_reload_if_running(dev);
 }
 
-u32 bnx2x_fix_features(struct net_device *dev, u32 features)
+netdev_features_t bnx2x_fix_features(struct net_device *dev,
+	netdev_features_t features)
 {
 	struct bnx2x *bp = netdev_priv(dev);
 
@@ -3420,7 +3444,7 @@
 	return features;
 }
 
-int bnx2x_set_features(struct net_device *dev, u32 features)
+int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
 {
 	struct bnx2x *bp = netdev_priv(dev);
 	u32 flags = bp->flags;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 283d663..bf27c54 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -20,6 +20,7 @@
 #include <linux/types.h>
 #include <linux/pci.h>
 #include <linux/netdevice.h>
+#include <linux/etherdevice.h>
 
 
 #include "bnx2x.h"
@@ -533,8 +534,9 @@
  */
 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type);
 #endif
-u32 bnx2x_fix_features(struct net_device *dev, u32 features);
-int bnx2x_set_features(struct net_device *dev, u32 features);
+netdev_features_t bnx2x_fix_features(struct net_device *dev,
+	netdev_features_t features);
+int bnx2x_set_features(struct net_device *dev, netdev_features_t features);
 
 /**
  * bnx2x_tx_timeout - tx timeout netdev callback
@@ -874,8 +876,7 @@
 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
 {
 	/* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
-	memset(fp->sge_mask, 0xff,
-	       (NUM_RX_SGE >> BIT_VEC64_ELEM_SHIFT)*sizeof(u64));
+	memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask));
 
 	/* Clear the two last indices in the page to 1:
 	   these are the indices that correspond to the "next" element,
@@ -911,26 +912,27 @@
 	return 0;
 }
 
-static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
-				     struct bnx2x_fastpath *fp, u16 index)
+static inline int bnx2x_alloc_rx_data(struct bnx2x *bp,
+				      struct bnx2x_fastpath *fp, u16 index)
 {
-	struct sk_buff *skb;
+	u8 *data;
 	struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
 	struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
 	dma_addr_t mapping;
 
-	skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
-	if (unlikely(skb == NULL))
+	data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
+	if (unlikely(data == NULL))
 		return -ENOMEM;
 
-	mapping = dma_map_single(&bp->pdev->dev, skb->data, fp->rx_buf_size,
+	mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
+				 fp->rx_buf_size,
 				 DMA_FROM_DEVICE);
 	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
-		dev_kfree_skb_any(skb);
+		kfree(data);
 		return -ENOMEM;
 	}
 
-	rx_buf->skb = skb;
+	rx_buf->data = data;
 	dma_unmap_addr_set(rx_buf, mapping, mapping);
 
 	rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
@@ -939,12 +941,12 @@
 	return 0;
 }
 
-/* note that we are not allocating a new skb,
+/* note that we are not allocating a new buffer,
  * we are just moving one from cons to prod
  * we are not creating a new mapping,
  * so there is no need to check for dma_mapping_error().
  */
-static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
+static inline void bnx2x_reuse_rx_data(struct bnx2x_fastpath *fp,
 				      u16 cons, u16 prod)
 {
 	struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
@@ -954,7 +956,7 @@
 
 	dma_unmap_addr_set(prod_rx_buf, mapping,
 			   dma_unmap_addr(cons_rx_buf, mapping));
-	prod_rx_buf->skb = cons_rx_buf->skb;
+	prod_rx_buf->data = cons_rx_buf->data;
 	*prod_bd = *cons_bd;
 }
 
@@ -1030,9 +1032,9 @@
 	for (i = 0; i < last; i++) {
 		struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
 		struct sw_rx_bd *first_buf = &tpa_info->first_buf;
-		struct sk_buff *skb = first_buf->skb;
+		u8 *data = first_buf->data;
 
-		if (skb == NULL) {
+		if (data == NULL) {
 			DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
 			continue;
 		}
@@ -1040,8 +1042,8 @@
 			dma_unmap_single(&bp->pdev->dev,
 					 dma_unmap_addr(first_buf, mapping),
 					 fp->rx_buf_size, DMA_FROM_DEVICE);
-		dev_kfree_skb(skb);
-		first_buf->skb = NULL;
+		kfree(data);
+		first_buf->data = NULL;
 	}
 }
 
@@ -1149,7 +1151,7 @@
 	 * fp->eth_q_stats.rx_skb_alloc_failed = 0
 	 */
 	for (i = 0; i < rx_ring_size; i++) {
-		if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
+		if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
 			fp->eth_q_stats.rx_skb_alloc_failed++;
 			continue;
 		}
@@ -1318,6 +1320,7 @@
 	struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
 	unsigned long q_type = 0;
 
+	bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp);
 	bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp,
 						     BNX2X_FCOE_ETH_CL_ID_IDX);
 	/** Current BNX2X_FCOE_ETH_CID deffinition implies not more than
@@ -1488,4 +1491,77 @@
 	return max_cfg;
 }
 
+/**
+ * bnx2x_get_iscsi_info - update iSCSI params according to licensing info.
+ *
+ * @bp:		driver handle
+ *
+ */
+void bnx2x_get_iscsi_info(struct bnx2x *bp);
+
+/* returns func by VN for current port */
+static inline int func_by_vn(struct bnx2x *bp, int vn)
+{
+	return 2 * vn + BP_PORT(bp);
+}
+
+/**
+ * bnx2x_link_sync_notify - send notification to other functions.
+ *
+ * @bp:		driver handle
+ *
+ */
+static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
+{
+	int func;
+	int vn;
+
+	/* Set the attention towards other drivers on the same port */
+	for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
+		if (vn == BP_VN(bp))
+			continue;
+
+		func = func_by_vn(bp, vn);
+		REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
+		       (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
+	}
+}
+
+/**
+ * bnx2x_update_drv_flags - update flags in shmem
+ *
+ * @bp:		driver handle
+ * @flags:	flags to update
+ * @set:	set or clear
+ *
+ */
+static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set)
+{
+	if (SHMEM2_HAS(bp, drv_flags)) {
+		u32 drv_flags;
+		bnx2x_acquire_hw_lock(bp, HW_LOCK_DRV_FLAGS);
+		drv_flags = SHMEM2_RD(bp, drv_flags);
+
+		if (set)
+			SET_FLAGS(drv_flags, flags);
+		else
+			RESET_FLAGS(drv_flags, flags);
+
+		SHMEM2_WR(bp, drv_flags, drv_flags);
+		DP(NETIF_MSG_HW, "drv_flags 0x%08x\n", drv_flags);
+		bnx2x_release_hw_lock(bp, HW_LOCK_DRV_FLAGS);
+	}
+}
+
+static inline bool bnx2x_is_valid_ether_addr(struct bnx2x *bp, u8 *addr)
+{
+	if (is_valid_ether_addr(addr))
+		return true;
+#ifdef BCM_CNIC
+	if (is_zero_ether_addr(addr) && IS_MF_ISCSI_SD(bp))
+		return true;
+#endif
+	return false;
+}
+
 #endif /* BNX2X_CMN_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 51bd748..5051cf3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -685,24 +685,6 @@
 }
 #endif
 
-static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set)
-{
-	if (SHMEM2_HAS(bp, drv_flags)) {
-		u32 drv_flags;
-		bnx2x_acquire_hw_lock(bp, HW_LOCK_DRV_FLAGS);
-		drv_flags = SHMEM2_RD(bp, drv_flags);
-
-		if (set)
-			SET_FLAGS(drv_flags, flags);
-		else
-			RESET_FLAGS(drv_flags, flags);
-
-		SHMEM2_WR(bp, drv_flags, drv_flags);
-		DP(NETIF_MSG_HW, "drv_flags 0x%08x\n", drv_flags);
-		bnx2x_release_hw_lock(bp, HW_LOCK_DRV_FLAGS);
-	}
-}
-
 static inline void bnx2x_dcbx_update_tc_mapping(struct bnx2x *bp)
 {
 	u8 prio, cos;
@@ -755,18 +737,26 @@
 			/* mark DCBX result for PMF migration */
 			bnx2x_update_drv_flags(bp, DRV_FLAGS_DCB_CONFIGURED, 1);
 #ifdef BCM_DCBNL
-			/**
+			/*
 			 * Add new app tlvs to dcbnl
 			 */
 			bnx2x_dcbnl_update_applist(bp, false);
 #endif
-			bnx2x_dcbx_stop_hw_tx(bp);
-
-			/* reconfigure the netdevice with the results of the new
+			/*
+			 * reconfigure the netdevice with the results of the new
 			 * dcbx negotiation.
 			 */
 			bnx2x_dcbx_update_tc_mapping(bp);
 
+			/*
+			 * allow other funtions to update their netdevices
+			 * accordingly
+			 */
+			if (IS_MF(bp))
+				bnx2x_link_sync_notify(bp);
+
+			bnx2x_dcbx_stop_hw_tx(bp);
+
 			return;
 		}
 	case BNX2X_DCBX_STATE_TX_PAUSED:
@@ -775,6 +765,7 @@
 
 		bnx2x_dcbx_update_ets_params(bp);
 		bnx2x_dcbx_resume_hw_tx(bp);
+
 		return;
 	case BNX2X_DCBX_STATE_TX_RELEASED:
 		DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_RELEASED\n");
@@ -883,7 +874,7 @@
 		/*For IEEE admin_recommendation_bw_precentage
 		 *For IEEE admin_recommendation_ets_pg */
 		af->pfc.pri_en_bitmap = (u8)dp->admin_pfc_bitmap;
-		for (i = 0; i < 4; i++) {
+		for (i = 0; i < DCBX_CONFIG_MAX_APP_PROTOCOL; i++) {
 			if (dp->admin_priority_app_table[i].valid) {
 				struct bnx2x_admin_priority_app_table *table =
 					dp->admin_priority_app_table;
@@ -923,7 +914,7 @@
 
 void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled)
 {
-	if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3(bp)) {
+	if (!CHIP_IS_E1x(bp)) {
 		bp->dcb_state = dcb_on;
 		bp->dcbx_enabled = dcbx_enabled;
 	} else {
@@ -1863,7 +1854,7 @@
 void bnx2x_dcbx_pmf_update(struct bnx2x *bp)
 {
 	/* if we need to syncronize DCBX result from prev PMF
-	 * read it from shmem and update bp accordingly
+	 * read it from shmem and update bp and netdev accordingly
 	 */
 	if (SHMEM2_HAS(bp, drv_flags) &&
 	   GET_FLAGS(SHMEM2_RD(bp, drv_flags), DRV_FLAGS_DCB_CONFIGURED)) {
@@ -1875,6 +1866,22 @@
 					  bp->dcbx_error);
 		bnx2x_get_dcbx_drv_param(bp, &bp->dcbx_local_feat,
 					 bp->dcbx_error);
+#ifdef BCM_DCBNL
+		/*
+		 * Add new app tlvs to dcbnl
+		 */
+		bnx2x_dcbnl_update_applist(bp, false);
+		/*
+		 * Send a notification for the new negotiated parameters
+		 */
+		dcbnl_cee_notify(bp->dev, RTM_GETDCB, DCB_CMD_CEE_GET, 0, 0);
+#endif
+		/*
+		 * reconfigure the netdevice with the results of the new
+		 * dcbx negotiation.
+		 */
+		bnx2x_dcbx_update_tc_mapping(bp);
+
 	}
 }
 
@@ -2242,7 +2249,7 @@
 	int i, ff;
 
 	/* iterate over the app entries looking for idtype and idval */
-	for (i = 0, ff = -1; i < 4; i++) {
+	for (i = 0, ff = -1; i < DCBX_CONFIG_MAX_APP_PROTOCOL; i++) {
 		struct bnx2x_admin_priority_app_table *app_ent =
 			&bp->dcbx_config_params.admin_priority_app_table[i];
 		if (bnx2x_admin_app_is_equal(app_ent, idtype, idval))
@@ -2251,7 +2258,7 @@
 		if (ff < 0 && !app_ent->valid)
 			ff = i;
 	}
-	if (i < 4)
+	if (i < DCBX_CONFIG_MAX_APP_PROTOCOL)
 		/* if found overwrite up */
 		bp->dcbx_config_params.
 			admin_priority_app_table[i].priority = up;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
index 2c6a3bc..2ab9254 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
@@ -90,6 +90,7 @@
 		u32 app_id;
 };
 
+#define DCBX_CONFIG_MAX_APP_PROTOCOL 4
 struct bnx2x_config_dcbx_params {
 	u32 overwrite_settings;
 	u32 admin_dcbx_version;
@@ -109,7 +110,8 @@
 	u32 admin_recommendation_bw_precentage[8];
 	u32 admin_recommendation_ets_pg[8];
 	u32 admin_pfc_bitmap;
-	struct bnx2x_admin_priority_app_table admin_priority_app_table[4];
+	struct bnx2x_admin_priority_app_table
+		admin_priority_app_table[DCBX_CONFIG_MAX_APP_PROTOCOL];
 	u32 admin_default_priority;
 };
 
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index f0ca8b2..a688b9d97 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -107,6 +107,10 @@
 				4, STATS_FLAGS_PORT, "rx_filtered_packets" },
 	{ STATS_OFFSET32(mf_tag_discard),
 				4, STATS_FLAGS_PORT, "rx_mf_tag_discard" },
+	{ STATS_OFFSET32(pfc_frames_received_hi),
+				8, STATS_FLAGS_PORT, "pfc_frames_received" },
+	{ STATS_OFFSET32(pfc_frames_sent_hi),
+				8, STATS_FLAGS_PORT, "pfc_frames_sent" },
 	{ STATS_OFFSET32(brb_drop_hi),
 				8, STATS_FLAGS_PORT, "rx_brb_discard" },
 	{ STATS_OFFSET32(brb_truncate_hi),
@@ -352,7 +356,7 @@
 		DP(NETIF_MSG_LINK, "Unsupported port type\n");
 		return -EINVAL;
 	}
-	/* Save new config in case command complete successuly */
+	/* Save new config in case command complete successully */
 	new_multi_phy_config = bp->link_params.multi_phy_config;
 	/* Get the new cfg_idx */
 	cfg_idx = bnx2x_get_link_cfg_idx(bp);
@@ -761,8 +765,8 @@
 	struct bnx2x *bp = netdev_priv(dev);
 	u8 phy_fw_ver[PHY_FW_VER_LEN];
 
-	strcpy(info->driver, DRV_MODULE_NAME);
-	strcpy(info->version, DRV_MODULE_VERSION);
+	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
 
 	phy_fw_ver[0] = '\0';
 	if (bp->port.pmf) {
@@ -773,14 +777,14 @@
 		bnx2x_release_phy_lock(bp);
 	}
 
-	strncpy(info->fw_version, bp->fw_ver, 32);
+	strlcpy(info->fw_version, bp->fw_ver, sizeof(info->fw_version));
 	snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
 		 "bc %d.%d.%d%s%s",
 		 (bp->common.bc_ver & 0xff0000) >> 16,
 		 (bp->common.bc_ver & 0xff00) >> 8,
 		 (bp->common.bc_ver & 0xff),
 		 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
-	strcpy(info->bus_info, pci_name(bp->pdev));
+	strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
 	info->n_stats = BNX2X_NUM_STATS;
 	info->testinfo_len = BNX2X_NUM_TESTS;
 	info->eedump_len = bp->common.flash_size;
@@ -1740,6 +1744,8 @@
 	struct sw_rx_bd *rx_buf;
 	u16 len;
 	int rc = -ENODEV;
+	u8 *data;
+	struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
 
 	/* check the loopback mode */
 	switch (loopback_mode) {
@@ -1748,8 +1754,18 @@
 			return -EINVAL;
 		break;
 	case BNX2X_MAC_LOOPBACK:
-		bp->link_params.loopback_mode = CHIP_IS_E3(bp) ?
-						LOOPBACK_XMAC : LOOPBACK_BMAC;
+		if (CHIP_IS_E3(bp)) {
+			int cfg_idx = bnx2x_get_link_cfg_idx(bp);
+			if (bp->port.supported[cfg_idx] &
+			    (SUPPORTED_10000baseT_Full |
+			     SUPPORTED_20000baseMLD2_Full |
+			     SUPPORTED_20000baseKR2_Full))
+				bp->link_params.loopback_mode = LOOPBACK_XMAC;
+			else
+				bp->link_params.loopback_mode = LOOPBACK_UMAC;
+		} else
+			bp->link_params.loopback_mode = LOOPBACK_BMAC;
+
 		bnx2x_phy_init(&bp->link_params, &bp->link_vars);
 		break;
 	default:
@@ -1784,6 +1800,8 @@
 	tx_start_idx = le16_to_cpu(*txdata->tx_cons_sb);
 	rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
 
+	netdev_tx_sent_queue(txq, skb->len);
+
 	pkt_prod = txdata->tx_pkt_prod++;
 	tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
 	tx_buf->first_bd = txdata->tx_bd_prod;
@@ -1865,10 +1883,9 @@
 	dma_sync_single_for_cpu(&bp->pdev->dev,
 				   dma_unmap_addr(rx_buf, mapping),
 				   fp_rx->rx_buf_size, DMA_FROM_DEVICE);
-	skb = rx_buf->skb;
-	skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
+	data = rx_buf->data + NET_SKB_PAD + cqe->fast_path_cqe.placement_offset;
 	for (i = ETH_HLEN; i < pkt_size; i++)
-		if (*(skb->data + i) != (unsigned char) (i & 0xff))
+		if (*(data + i) != (unsigned char) (i & 0xff))
 			goto test_loopback_rx_exit;
 
 	rc = 0;
@@ -2285,18 +2302,20 @@
 	}
 }
 
-static int bnx2x_get_rxfh_indir(struct net_device *dev,
-				struct ethtool_rxfh_indir *indir)
+static u32 bnx2x_get_rxfh_indir_size(struct net_device *dev)
 {
 	struct bnx2x *bp = netdev_priv(dev);
-	size_t copy_size =
-		min_t(size_t, indir->size, T_ETH_INDIRECTION_TABLE_SIZE);
+
+	return (bp->multi_mode == ETH_RSS_MODE_DISABLED ?
+		0 : T_ETH_INDIRECTION_TABLE_SIZE);
+}
+
+static int bnx2x_get_rxfh_indir(struct net_device *dev, u32 *indir)
+{
+	struct bnx2x *bp = netdev_priv(dev);
 	u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
 	size_t i;
 
-	if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
-		return -EOPNOTSUPP;
-
 	/* Get the current configuration of the RSS indirection table */
 	bnx2x_get_rss_ind_table(&bp->rss_conf_obj, ind_table);
 
@@ -2309,33 +2328,19 @@
 	 * align the returned table to the Client ID of the leading RSS
 	 * queue.
 	 */
-	for (i = 0; i < copy_size; i++)
-		indir->ring_index[i] = ind_table[i] - bp->fp->cl_id;
-
-	indir->size = T_ETH_INDIRECTION_TABLE_SIZE;
+	for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++)
+		indir[i] = ind_table[i] - bp->fp->cl_id;
 
 	return 0;
 }
 
-static int bnx2x_set_rxfh_indir(struct net_device *dev,
-				const struct ethtool_rxfh_indir *indir)
+static int bnx2x_set_rxfh_indir(struct net_device *dev, const u32 *indir)
 {
 	struct bnx2x *bp = netdev_priv(dev);
 	size_t i;
 	u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
-	u32 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
-
-	if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
-		return -EOPNOTSUPP;
-
-	/* validate the size */
-	if (indir->size != T_ETH_INDIRECTION_TABLE_SIZE)
-		return -EINVAL;
 
 	for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
-		/* validate the indices */
-		if (indir->ring_index[i] >= num_eth_queues)
-			return -EINVAL;
 		/*
 		 * The same as in bnx2x_get_rxfh_indir: we can't use a memcpy()
 		 * as an internal storage of an indirection table is a u8 array
@@ -2345,7 +2350,7 @@
 		 * align the received table to the Client ID of the leading RSS
 		 * queue
 		 */
-		ind_table[i] = indir->ring_index[i] + bp->fp->cl_id;
+		ind_table[i] = indir[i] + bp->fp->cl_id;
 	}
 
 	return bnx2x_config_rss_pf(bp, ind_table, false);
@@ -2378,6 +2383,7 @@
 	.set_phys_id		= bnx2x_set_phys_id,
 	.get_ethtool_stats	= bnx2x_get_ethtool_stats,
 	.get_rxnfc		= bnx2x_get_rxnfc,
+	.get_rxfh_indir_size	= bnx2x_get_rxfh_indir_size,
 	.get_rxfh_indir		= bnx2x_get_rxfh_indir,
 	.set_rxfh_indir		= bnx2x_set_rxfh_indir,
 };
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index fc754cb..3e30c86 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -1247,11 +1247,14 @@
 	#define DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL  0xa1000000
 	#define REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL  0x00050234
 	#define REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED   0x00070014
+	#define REQ_BC_VER_4_PFC_STATS_SUPPORTED        0x00070201
 
 	#define DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG         0xb0000000
 	#define DRV_MSG_CODE_DCBX_PMF_DRV_OK            0xb2000000
 
 	#define DRV_MSG_CODE_VF_DISABLED_DONE           0xc0000000
+	#define DRV_MSG_CODE_DRV_INFO_ACK               0xd8000000
+	#define DRV_MSG_CODE_DRV_INFO_NACK              0xd9000000
 
 	#define DRV_MSG_CODE_SET_MF_BW                  0xe0000000
 	#define REQ_BC_VER_4_SET_MF_BW                  0x00060202
@@ -1304,6 +1307,8 @@
 	#define FW_MSG_CODE_VRFY_OPT_MDL_INVLD_IMG      0xa0200000
 	#define FW_MSG_CODE_VRFY_OPT_MDL_UNAPPROVED     0xa0300000
 	#define FW_MSG_CODE_VF_DISABLED_DONE            0xb0000000
+	#define FW_MSG_CODE_DRV_INFO_ACK                0xd8100000
+	#define FW_MSG_CODE_DRV_INFO_NACK               0xd9100000
 
 	#define FW_MSG_CODE_SET_MF_BW_SENT              0xe0000000
 	#define FW_MSG_CODE_SET_MF_BW_DONE              0xe1000000
@@ -1360,6 +1365,7 @@
 
 	#define DRV_STATUS_DCBX_EVENT_MASK              0x000f0000
 	#define DRV_STATUS_DCBX_NEGOTIATION_RESULTS     0x00010000
+	#define DRV_STATUS_DRV_INFO_REQ                 0x04000000
 
 	u32 virt_mac_upper;
 	#define VIRT_MAC_SIGN_MASK                      0xffff0000
@@ -1964,9 +1970,38 @@
 	u32 extended_dev_info_shared_addr;
 	u32 ncsi_oem_data_addr;
 
-	u32 ocsd_host_addr;
-	u32 ocbb_host_addr;
-	u32 ocsd_req_update_interval;
+	u32 ocsd_host_addr; /* initialized by option ROM */
+	u32 ocbb_host_addr; /* initialized by option ROM */
+	u32 ocsd_req_update_interval; /* initialized by option ROM */
+	u32 temperature_in_half_celsius;
+	u32 glob_struct_in_host;
+
+	u32 dcbx_neg_res_ext_offset;
+#define SHMEM_DCBX_NEG_RES_EXT_NONE			0x00000000
+
+	u32 drv_capabilities_flag[E2_FUNC_MAX];
+#define DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED 0x00000001
+#define DRV_FLAGS_CAPABILITIES_LOADED_L2        0x00000002
+#define DRV_FLAGS_CAPABILITIES_LOADED_FCOE      0x00000004
+#define DRV_FLAGS_CAPABILITIES_LOADED_ISCSI     0x00000008
+
+	u32 extended_dev_info_shared_cfg_size;
+
+	u32 dcbx_en[PORT_MAX];
+
+	/* The offset points to the multi threaded meta structure */
+	u32 multi_thread_data_offset;
+
+	/* address of DMAable host address holding values from the drivers */
+	u32 drv_info_host_addr_lo;
+	u32 drv_info_host_addr_hi;
+
+	/* general values written by the MFW (such as current version) */
+	u32 drv_info_control;
+#define DRV_INFO_CONTROL_VER_MASK          0x000000ff
+#define DRV_INFO_CONTROL_VER_SHIFT         0
+#define DRV_INFO_CONTROL_OP_CODE_MASK      0x0000ff00
+#define DRV_INFO_CONTROL_OP_CODE_SHIFT     8
 };
 
 
@@ -2501,14 +2536,18 @@
 #define MAC_STX_IDX_MAX                     2
 
 struct host_port_stats {
-	u32            host_port_stats_start;
+	u32            host_port_stats_counter;
 
 	struct mac_stx mac_stx[MAC_STX_IDX_MAX];
 
 	u32            brb_drop_hi;
 	u32            brb_drop_lo;
 
-	u32            host_port_stats_end;
+	u32            not_used; /* obsolete */
+	u32            pfc_frames_tx_hi;
+	u32            pfc_frames_tx_lo;
+	u32            pfc_frames_rx_hi;
+	u32            pfc_frames_rx_lo;
 };
 
 
@@ -2548,6 +2587,118 @@
 /* VIC definitions */
 #define VICSTATST_UIF_INDEX 2
 
+/* current drv_info version */
+#define DRV_INFO_CUR_VER 1
+
+/* drv_info op codes supported */
+enum drv_info_opcode {
+	ETH_STATS_OPCODE,
+	FCOE_STATS_OPCODE,
+	ISCSI_STATS_OPCODE
+};
+
+#define ETH_STAT_INFO_VERSION_LEN	12
+/*  Per PCI Function Ethernet Statistics required from the driver */
+struct eth_stats_info {
+	/* Function's Driver Version. padded to 12 */
+	u8 version[ETH_STAT_INFO_VERSION_LEN];
+	/* Locally Admin Addr. BigEndian EIU48. Actual size is 6 bytes */
+	u8 mac_local[8];
+	u8 mac_add1[8];		/* Additional Programmed MAC Addr 1. */
+	u8 mac_add2[8];		/* Additional Programmed MAC Addr 2. */
+	u32 mtu_size;		/* MTU Size. Note   : Negotiated MTU */
+	u32 feature_flags;	/* Feature_Flags. */
+#define FEATURE_ETH_CHKSUM_OFFLOAD_MASK		0x01
+#define FEATURE_ETH_LSO_MASK			0x02
+#define FEATURE_ETH_BOOTMODE_MASK		0x1C
+#define FEATURE_ETH_BOOTMODE_SHIFT		2
+#define FEATURE_ETH_BOOTMODE_NONE		(0x0 << 2)
+#define FEATURE_ETH_BOOTMODE_PXE		(0x1 << 2)
+#define FEATURE_ETH_BOOTMODE_ISCSI		(0x2 << 2)
+#define FEATURE_ETH_BOOTMODE_FCOE		(0x3 << 2)
+#define FEATURE_ETH_TOE_MASK			0x20
+	u32 lso_max_size;	/* LSO MaxOffloadSize. */
+	u32 lso_min_seg_cnt;	/* LSO MinSegmentCount. */
+	/* Num Offloaded Connections TCP_IPv4. */
+	u32 ipv4_ofld_cnt;
+	/* Num Offloaded Connections TCP_IPv6. */
+	u32 ipv6_ofld_cnt;
+	u32 promiscuous_mode;	/* Promiscuous Mode. non-zero true */
+	u32 txq_size;		/* TX Descriptors Queue Size */
+	u32 rxq_size;		/* RX Descriptors Queue Size */
+	/* TX Descriptor Queue Avg Depth. % Avg Queue Depth since last poll */
+	u32 txq_avg_depth;
+	/* RX Descriptors Queue Avg Depth. % Avg Queue Depth since last poll */
+	u32 rxq_avg_depth;
+	/* IOV_Offload. 0=none; 1=MultiQueue, 2=VEB 3= VEPA*/
+	u32 iov_offload;
+	/* Number of NetQueue/VMQ Config'd. */
+	u32 netq_cnt;
+	u32 vf_cnt;		/* Num VF assigned to this PF. */
+};
+
+/*  Per PCI Function FCOE Statistics required from the driver */
+struct fcoe_stats_info {
+	u8 version[12];		/* Function's Driver Version. */
+	u8 mac_local[8];	/* Locally Admin Addr. */
+	u8 mac_add1[8];		/* Additional Programmed MAC Addr 1. */
+	u8 mac_add2[8];		/* Additional Programmed MAC Addr 2. */
+	/* QoS Priority (per 802.1p). 0-7255 */
+	u32 qos_priority;
+	u32 txq_size;		/* FCoE TX Descriptors Queue Size. */
+	u32 rxq_size;		/* FCoE RX Descriptors Queue Size. */
+	/* FCoE TX Descriptor Queue Avg Depth. */
+	u32 txq_avg_depth;
+	/* FCoE RX Descriptors Queue Avg Depth. */
+	u32 rxq_avg_depth;
+	u32 rx_frames_lo;	/* FCoE RX Frames received. */
+	u32 rx_frames_hi;	/* FCoE RX Frames received. */
+	u32 rx_bytes_lo;	/* FCoE RX Bytes received. */
+	u32 rx_bytes_hi;	/* FCoE RX Bytes received. */
+	u32 tx_frames_lo;	/* FCoE TX Frames sent. */
+	u32 tx_frames_hi;	/* FCoE TX Frames sent. */
+	u32 tx_bytes_lo;	/* FCoE TX Bytes sent. */
+	u32 tx_bytes_hi;	/* FCoE TX Bytes sent. */
+};
+
+/* Per PCI  Function iSCSI Statistics required from the driver*/
+struct iscsi_stats_info {
+	u8 version[12];		/* Function's Driver Version. */
+	u8 mac_local[8];	/* Locally Admin iSCSI MAC Addr. */
+	u8 mac_add1[8];		/* Additional Programmed MAC Addr 1. */
+	/* QoS Priority (per 802.1p). 0-7255 */
+	u32 qos_priority;
+	u8 initiator_name[64];	/* iSCSI Boot Initiator Node name. */
+	u8 ww_port_name[64];	/* iSCSI World wide port name */
+	u8 boot_target_name[64];/* iSCSI Boot Target Name. */
+	u8 boot_target_ip[16];	/* iSCSI Boot Target IP. */
+	u32 boot_target_portal;	/* iSCSI Boot Target Portal. */
+	u8 boot_init_ip[16];	/* iSCSI Boot Initiator IP Address. */
+	u32 max_frame_size;	/* Max Frame Size. bytes */
+	u32 txq_size;		/* PDU TX Descriptors Queue Size. */
+	u32 rxq_size;		/* PDU RX Descriptors Queue Size. */
+	u32 txq_avg_depth;	/* PDU TX Descriptor Queue Avg Depth. */
+	u32 rxq_avg_depth;	/* PDU RX Descriptors Queue Avg Depth. */
+	u32 rx_pdus_lo;		/* iSCSI PDUs received. */
+	u32 rx_pdus_hi;		/* iSCSI PDUs received. */
+	u32 rx_bytes_lo;	/* iSCSI RX Bytes received. */
+	u32 rx_bytes_hi;	/* iSCSI RX Bytes received. */
+	u32 tx_pdus_lo;		/* iSCSI PDUs sent. */
+	u32 tx_pdus_hi;		/* iSCSI PDUs sent. */
+	u32 tx_bytes_lo;	/* iSCSI PDU TX Bytes sent. */
+	u32 tx_bytes_hi;	/* iSCSI PDU TX Bytes sent. */
+	u32 pcp_prior_map_tbl;	/* C-PCP to S-PCP Priority MapTable.
+				 * 9 nibbles, the position of each nibble
+				 * represents the C-PCP value, the value
+				 * of the nibble = S-PCP value.
+				 */
+};
+
+union drv_info_to_mcp {
+	struct eth_stats_info	ether_stat;
+	struct fcoe_stats_info	fcoe_stat;
+	struct iscsi_stats_info	iscsi_stat;
+};
 #define BCM_5710_FW_MAJOR_VERSION			7
 #define BCM_5710_FW_MINOR_VERSION			0
 #define BCM_5710_FW_REVISION_VERSION		29
@@ -4161,8 +4312,62 @@
 
 
 /*
- * cfc delete event data
+ * FCoE RX statistics parameters section#0
  */
+struct fcoe_rx_stat_params_section0 {
+	__le32 fcoe_rx_pkt_cnt;
+	__le32 fcoe_rx_byte_cnt;
+};
+
+
+/*
+ * FCoE RX statistics parameters section#1
+ */
+struct fcoe_rx_stat_params_section1 {
+	__le32 fcoe_ver_cnt;
+	__le32 fcoe_rx_drop_pkt_cnt;
+};
+
+
+/*
+ * FCoE RX statistics parameters section#2
+ */
+struct fcoe_rx_stat_params_section2 {
+	__le32 fc_crc_cnt;
+	__le32 eofa_del_cnt;
+	__le32 miss_frame_cnt;
+	__le32 seq_timeout_cnt;
+	__le32 drop_seq_cnt;
+	__le32 fcoe_rx_drop_pkt_cnt;
+	__le32 fcp_rx_pkt_cnt;
+	__le32 reserved0;
+};
+
+
+/*
+ * FCoE TX statistics parameters
+ */
+struct fcoe_tx_stat_params {
+	__le32 fcoe_tx_pkt_cnt;
+	__le32 fcoe_tx_byte_cnt;
+	__le32 fcp_tx_pkt_cnt;
+	__le32 reserved0;
+};
+
+/*
+ * FCoE statistics parameters
+ */
+struct fcoe_statistics_params {
+	struct fcoe_tx_stat_params tx_stat;
+	struct fcoe_rx_stat_params_section0 rx_stat0;
+	struct fcoe_rx_stat_params_section1 rx_stat1;
+	struct fcoe_rx_stat_params_section2 rx_stat2;
+};
+
+
+/*
+ * cfc delete event data
+*/
 struct cfc_del_event_data {
 	u32 cid;
 	u32 reserved0;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 882f48f..4df9505 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -27,7 +27,6 @@
 #include "bnx2x.h"
 #include "bnx2x_cmn.h"
 
-
 /********************************************************/
 #define ETH_HLEN			14
 /* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
@@ -163,6 +162,11 @@
 #define EDC_MODE_LIMITING				0x0044
 #define EDC_MODE_PASSIVE_DAC			0x0055
 
+/* BRB default for class 0 E2 */
+#define DEFAULT0_E2_BRB_MAC_PAUSE_XOFF_THR	170
+#define DEFAULT0_E2_BRB_MAC_PAUSE_XON_THR		250
+#define DEFAULT0_E2_BRB_MAC_FULL_XOFF_THR		10
+#define DEFAULT0_E2_BRB_MAC_FULL_XON_THR		50
 
 /* BRB thresholds for E2*/
 #define PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE		170
@@ -177,6 +181,12 @@
 #define PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE			50
 #define PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE		250
 
+/* BRB default for class 0 E3A0 */
+#define DEFAULT0_E3A0_BRB_MAC_PAUSE_XOFF_THR	290
+#define DEFAULT0_E3A0_BRB_MAC_PAUSE_XON_THR	410
+#define DEFAULT0_E3A0_BRB_MAC_FULL_XOFF_THR	10
+#define DEFAULT0_E3A0_BRB_MAC_FULL_XON_THR	50
+
 /* BRB thresholds for E3A0 */
 #define PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE		290
 #define PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE		0
@@ -190,6 +200,11 @@
 #define PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE		50
 #define PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE		410
 
+/* BRB default for E3B0 */
+#define DEFAULT0_E3B0_BRB_MAC_PAUSE_XOFF_THR	330
+#define DEFAULT0_E3B0_BRB_MAC_PAUSE_XON_THR	490
+#define DEFAULT0_E3B0_BRB_MAC_FULL_XOFF_THR	15
+#define DEFAULT0_E3B0_BRB_MAC_FULL_XON_THR	55
 
 /* BRB thresholds for E3B0 2 port mode*/
 #define PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_PAUSE		1025
@@ -239,18 +254,29 @@
 #define PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE		50
 #define PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_NON_PAUSE	384
 
-
 /* only for E3B0*/
 #define PFC_E3B0_4P_BRB_FULL_LB_XOFF_THR			304
 #define PFC_E3B0_4P_BRB_FULL_LB_XON_THR			384
-#define PFC_E3B0_4P_LB_GUART				120
+#define PFC_E3B0_4P_LB_GUART		120
 
 #define PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART		120
-#define PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST		80
+#define PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST	80
 
 #define PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART		80
-#define PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST		120
+#define PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST	120
 
+/* Pause defines*/
+#define DEFAULT_E3B0_BRB_FULL_LB_XOFF_THR			330
+#define DEFAULT_E3B0_BRB_FULL_LB_XON_THR			490
+#define DEFAULT_E3B0_LB_GUART		40
+
+#define DEFAULT_E3B0_BRB_MAC_0_CLASS_T_GUART		40
+#define DEFAULT_E3B0_BRB_MAC_0_CLASS_T_GUART_HYST	0
+
+#define DEFAULT_E3B0_BRB_MAC_1_CLASS_T_GUART		40
+#define DEFAULT_E3B0_BRB_MAC_1_CLASS_T_GUART_HYST	0
+
+/* ETS defines*/
 #define DCBX_INVALID_COS					(0xFF)
 
 #define ETS_BW_LIMIT_CREDIT_UPPER_BOUND		(0x5000)
@@ -440,7 +466,7 @@
 	u32 min_w_val = 0;
 	/* Calculate min_w_val.*/
 	if (vars->link_up) {
-		if (SPEED_20000 == vars->line_speed)
+		if (vars->line_speed == SPEED_20000)
 			min_w_val = ETS_E3B0_NIG_MIN_W_VAL_20GBPS;
 		else
 			min_w_val = ETS_E3B0_NIG_MIN_W_VAL_UP_TO_10GBPS;
@@ -490,7 +516,7 @@
 	REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_5 :
 		   NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_5, credit_upper_bound);
 
-	if (0 == port) {
+	if (!port) {
 		REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_6,
 			credit_upper_bound);
 		REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_7,
@@ -584,7 +610,7 @@
 		   NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4, 0x0);
 	REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_5 :
 		   NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5, 0x0);
-	if (0 == port) {
+	if (!port) {
 		REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_6, 0x0);
 		REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_7, 0x0);
 		REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_8, 0x0);
@@ -612,7 +638,7 @@
 	* In 2 port mode port0 has COS0-5 that can be used for WFQ.In 4
 	* port mode port1 has COS0-2 that can be used for WFQ.
 	*/
-	if (0 == port) {
+	if (!port) {
 		base_upper_bound = PBF_REG_COS0_UPPER_BOUND_P0;
 		max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0;
 	} else {
@@ -674,7 +700,7 @@
 	* In 2 port mode port0 has COS0-5 that can be used for WFQ.
 	* In 4 port mode port1 has COS0-2 that can be used for WFQ.
 	*/
-	if (0 == port) {
+	if (!port) {
 		base_weight = PBF_REG_COS0_WEIGHT_P0;
 		max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0;
 	} else {
@@ -846,34 +872,47 @@
 ******************************************************************************/
 static int bnx2x_ets_e3b0_get_total_bw(
 	const struct link_params *params,
-	const struct bnx2x_ets_params *ets_params,
+	struct bnx2x_ets_params *ets_params,
 	u16 *total_bw)
 {
 	struct bnx2x *bp = params->bp;
 	u8 cos_idx = 0;
+	u8 is_bw_cos_exist = 0;
 
 	*total_bw = 0 ;
+
 	/* Calculate total BW requested */
 	for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) {
-		if (bnx2x_cos_state_bw == ets_params->cos[cos_idx].state) {
+		if (ets_params->cos[cos_idx].state == bnx2x_cos_state_bw) {
+			is_bw_cos_exist = 1;
+			if (!ets_params->cos[cos_idx].params.bw_params.bw) {
+				DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config BW"
+						   "was set to 0\n");
+				/*
+				 * This is to prevent a state when ramrods
+				 * can't be sent
+				*/
+				ets_params->cos[cos_idx].params.bw_params.bw
+					 = 1;
+			}
 			*total_bw +=
 				ets_params->cos[cos_idx].params.bw_params.bw;
 		}
 	}
 
 	/* Check total BW is valid */
-	if ((100 != *total_bw) || (0 == *total_bw)) {
-		if (0 == *total_bw) {
+	if ((is_bw_cos_exist == 1) && (*total_bw != 100)) {
+		if (*total_bw == 0) {
 			DP(NETIF_MSG_LINK,
-			   "bnx2x_ets_E3B0_config toatl BW shouldn't be 0\n");
+			   "bnx2x_ets_E3B0_config total BW shouldn't be 0\n");
 			return -EINVAL;
 		}
 		DP(NETIF_MSG_LINK,
-		   "bnx2x_ets_E3B0_config toatl BW should be 100\n");
-		/**
-		*   We can handle a case whre the BW isn't 100 this can happen
-		*   if the TC are joined.
-		*/
+		   "bnx2x_ets_E3B0_config total BW should be 100\n");
+		/*
+		 * We can handle a case whre the BW isn't 100 this can happen
+		 * if the TC are joined.
+		 */
 	}
 	return 0;
 }
@@ -904,7 +943,7 @@
 	const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 :
 		DCBX_E3B0_MAX_NUM_COS_PORT0;
 
-	if (DCBX_INVALID_COS != sp_pri_to_cos[pri]) {
+	if (sp_pri_to_cos[pri] != DCBX_INVALID_COS) {
 		DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid "
 				   "parameter There can't be two COS's with "
 				   "the same strict pri\n");
@@ -913,7 +952,7 @@
 
 	if (pri > max_num_of_cos) {
 		DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid "
-			       "parameter Illegal strict priority\n");
+		   "parameter Illegal strict priority\n");
 	    return -EINVAL;
 	}
 
@@ -995,8 +1034,8 @@
 
 	/* Set all the strict priority first */
 	for (i = 0; i < max_num_of_cos; i++) {
-		if (DCBX_INVALID_COS != sp_pri_to_cos[i]) {
-			if (DCBX_MAX_NUM_COS <= sp_pri_to_cos[i]) {
+		if (sp_pri_to_cos[i] != DCBX_INVALID_COS) {
+			if (sp_pri_to_cos[i] >= DCBX_MAX_NUM_COS) {
 				DP(NETIF_MSG_LINK,
 					   "bnx2x_ets_e3b0_sp_set_pri_cli_reg "
 					   "invalid cos entry\n");
@@ -1010,7 +1049,7 @@
 			    sp_pri_to_cos[i], pri_set);
 			pri_bitmask = 1 << sp_pri_to_cos[i];
 			/* COS is used remove it from bitmap.*/
-			if (0 == (pri_bitmask & cos_bit_to_set)) {
+			if (!(pri_bitmask & cos_bit_to_set)) {
 				DP(NETIF_MSG_LINK,
 					"bnx2x_ets_e3b0_sp_set_pri_cli_reg "
 					"invalid There can't be two COS's with"
@@ -1072,7 +1111,7 @@
 ******************************************************************************/
 int bnx2x_ets_e3b0_config(const struct link_params *params,
 			 const struct link_vars *vars,
-			 const struct bnx2x_ets_params *ets_params)
+			 struct bnx2x_ets_params *ets_params)
 {
 	struct bnx2x *bp = params->bp;
 	int bnx2x_status = 0;
@@ -1105,15 +1144,15 @@
 	/* Prepare BW parameters*/
 	bnx2x_status = bnx2x_ets_e3b0_get_total_bw(params, ets_params,
 						   &total_bw);
-	if (0 != bnx2x_status) {
+	if (bnx2x_status) {
 		DP(NETIF_MSG_LINK,
 		   "bnx2x_ets_E3B0_config get_total_bw failed\n");
 		return -EINVAL;
 	}
 
-	/**
-	 *  Upper bound is set according to current link speed (min_w_val
-	 *  should be the same for upper bound and COS credit val).
+	/*
+	 * Upper bound is set according to current link speed (min_w_val
+	 * should be the same for upper bound and COS credit val).
 	 */
 	bnx2x_ets_e3b0_set_credit_upper_bound_nig(params, min_w_val_nig);
 	bnx2x_ets_e3b0_set_credit_upper_bound_pbf(params, min_w_val_pbf);
@@ -1122,7 +1161,7 @@
 	for (cos_entry = 0; cos_entry < ets_params->num_of_cos; cos_entry++) {
 		if (bnx2x_cos_state_bw == ets_params->cos[cos_entry].state) {
 			cos_bw_bitmap |= (1 << cos_entry);
-			/**
+			/*
 			 * The function also sets the BW in HW(not the mappin
 			 * yet)
 			 */
@@ -1146,7 +1185,7 @@
 			   "bnx2x_ets_e3b0_config cos state not valid\n");
 			return -EINVAL;
 		}
-		if (0 != bnx2x_status) {
+		if (bnx2x_status) {
 			DP(NETIF_MSG_LINK,
 			   "bnx2x_ets_e3b0_config set cos bw failed\n");
 			return bnx2x_status;
@@ -1157,7 +1196,7 @@
 	bnx2x_status = bnx2x_ets_e3b0_sp_set_pri_cli_reg(params,
 							 sp_pri_to_cos);
 
-	if (0 != bnx2x_status) {
+	if (bnx2x_status) {
 		DP(NETIF_MSG_LINK,
 		   "bnx2x_ets_E3B0_config set_pri_cli_reg failed\n");
 		return bnx2x_status;
@@ -1168,7 +1207,7 @@
 					      cos_sp_bitmap,
 					      cos_bw_bitmap);
 
-	if (0 != bnx2x_status) {
+	if (bnx2x_status) {
 		DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config SP failed\n");
 		return bnx2x_status;
 	}
@@ -1232,9 +1271,9 @@
 
 	DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n");
 
-	if ((0 == total_bw) ||
-	    (0 == cos0_bw) ||
-	    (0 == cos1_bw)) {
+	if ((!total_bw) ||
+	    (!cos0_bw) ||
+	    (!cos1_bw)) {
 		DP(NETIF_MSG_LINK, "Total BW can't be zero\n");
 		return;
 	}
@@ -1290,7 +1329,7 @@
 	 * dbg0-010     dbg1-001     cos1-100     cos0-011     MCP-000
 	 * dbg0-010     dbg1-001     cos0-011     cos1-100     MCP-000
 	 */
-	val = (0 == strict_cos) ? 0x2318 : 0x22E0;
+	val = (!strict_cos) ? 0x2318 : 0x22E0;
 	REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, val);
 
 	return 0;
@@ -1298,7 +1337,6 @@
 /******************************************************************/
 /*			PFC section				  */
 /******************************************************************/
-
 static void bnx2x_update_pfc_xmac(struct link_params *params,
 				  struct link_vars *vars,
 				  u8 is_lb)
@@ -1401,7 +1439,7 @@
 	if (!vars->link_up)
 		return;
 
-	if (MAC_TYPE_EMAC == vars->mac_type) {
+	if (vars->mac_type == MAC_TYPE_EMAC) {
 		DP(NETIF_MSG_LINK, "About to read PFC stats from EMAC\n");
 		bnx2x_emac_get_pfc_stat(params, pfc_frames_sent,
 					pfc_frames_received);
@@ -1435,6 +1473,18 @@
 
 	udelay(40);
 }
+static u8 bnx2x_is_4_port_mode(struct bnx2x *bp)
+{
+	u32 port4mode_ovwr_val;
+	/* Check 4-port override enabled */
+	port4mode_ovwr_val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
+	if (port4mode_ovwr_val & (1<<0)) {
+		/* Return 4-port mode override value */
+		return ((port4mode_ovwr_val & (1<<1)) == (1<<1));
+	}
+	/* Return 4-port mode from input pin */
+	return (u8)REG_RD(bp, MISC_REG_PORT4MODE_EN);
+}
 
 static void bnx2x_emac_init(struct link_params *params,
 			    struct link_vars *vars)
@@ -1601,31 +1651,18 @@
 
 }
 
-static u8 bnx2x_is_4_port_mode(struct bnx2x *bp)
-{
-	u32 port4mode_ovwr_val;
-	/* Check 4-port override enabled */
-	port4mode_ovwr_val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
-	if (port4mode_ovwr_val & (1<<0)) {
-		/* Return 4-port mode override value */
-		return ((port4mode_ovwr_val & (1<<1)) == (1<<1));
-	}
-	/* Return 4-port mode from input pin */
-	return (u8)REG_RD(bp, MISC_REG_PORT4MODE_EN);
-}
-
 /* Define the XMAC mode */
 static void bnx2x_xmac_init(struct link_params *params, u32 max_speed)
 {
 	struct bnx2x *bp = params->bp;
 	u32 is_port4mode = bnx2x_is_4_port_mode(bp);
 
-	/**
-	* In 4-port mode, need to set the mode only once, so if XMAC is
-	* already out of reset, it means the mode has already been set,
-	* and it must not* reset the XMAC again, since it controls both
-	* ports of the path
-	**/
+	/*
+	 * In 4-port mode, need to set the mode only once, so if XMAC is
+	 * already out of reset, it means the mode has already been set,
+	 * and it must not* reset the XMAC again, since it controls both
+	 * ports of the path
+	 */
 
 	if ((CHIP_NUM(bp) == CHIP_NUM_57840) &&
 	    (REG_RD(bp, MISC_REG_RESET_REG_2) &
@@ -1743,6 +1780,7 @@
 
 	return 0;
 }
+
 static int bnx2x_emac_enable(struct link_params *params,
 			     struct link_vars *vars, u8 lb)
 {
@@ -1999,7 +2037,6 @@
 	REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2);
 }
 
-
 /* PFC BRB internal port configuration params */
 struct bnx2x_pfc_brb_threshold_val {
 	u32 pause_xoff;
@@ -2009,6 +2046,8 @@
 };
 
 struct bnx2x_pfc_brb_e3b0_val {
+	u32 per_class_guaranty_mode;
+	u32 lb_guarantied_hyst;
 	u32 full_lb_xoff_th;
 	u32 full_lb_xon_threshold;
 	u32 lb_guarantied;
@@ -2021,6 +2060,9 @@
 struct bnx2x_pfc_brb_th_val {
 	struct bnx2x_pfc_brb_threshold_val pauseable_th;
 	struct bnx2x_pfc_brb_threshold_val non_pauseable_th;
+	struct bnx2x_pfc_brb_threshold_val default_class0;
+	struct bnx2x_pfc_brb_threshold_val default_class1;
+
 };
 static int bnx2x_pfc_brb_get_config_params(
 				struct link_params *params,
@@ -2028,140 +2070,200 @@
 {
 	struct bnx2x *bp = params->bp;
 	DP(NETIF_MSG_LINK, "Setting PFC BRB configuration\n");
+
+	config_val->default_class1.pause_xoff = 0;
+	config_val->default_class1.pause_xon = 0;
+	config_val->default_class1.full_xoff = 0;
+	config_val->default_class1.full_xon = 0;
+
 	if (CHIP_IS_E2(bp)) {
+		/*  class0 defaults */
+		config_val->default_class0.pause_xoff =
+			DEFAULT0_E2_BRB_MAC_PAUSE_XOFF_THR;
+		config_val->default_class0.pause_xon =
+			DEFAULT0_E2_BRB_MAC_PAUSE_XON_THR;
+		config_val->default_class0.full_xoff =
+			DEFAULT0_E2_BRB_MAC_FULL_XOFF_THR;
+		config_val->default_class0.full_xon =
+			DEFAULT0_E2_BRB_MAC_FULL_XON_THR;
+		/*  pause able*/
 		config_val->pauseable_th.pause_xoff =
-		    PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
+			PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
 		config_val->pauseable_th.pause_xon =
-		    PFC_E2_BRB_MAC_PAUSE_XON_THR_PAUSE;
+			PFC_E2_BRB_MAC_PAUSE_XON_THR_PAUSE;
 		config_val->pauseable_th.full_xoff =
-		    PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE;
+			PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE;
 		config_val->pauseable_th.full_xon =
-		    PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE;
+			PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE;
 		/* non pause able*/
 		config_val->non_pauseable_th.pause_xoff =
-		    PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
+			PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
 		config_val->non_pauseable_th.pause_xon =
-		    PFC_E2_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
+			PFC_E2_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
 		config_val->non_pauseable_th.full_xoff =
-		    PFC_E2_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
+			PFC_E2_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
 		config_val->non_pauseable_th.full_xon =
-		    PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE;
+			PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE;
 	} else if (CHIP_IS_E3A0(bp)) {
+		/*  class0 defaults */
+		config_val->default_class0.pause_xoff =
+			DEFAULT0_E3A0_BRB_MAC_PAUSE_XOFF_THR;
+		config_val->default_class0.pause_xon =
+			DEFAULT0_E3A0_BRB_MAC_PAUSE_XON_THR;
+		config_val->default_class0.full_xoff =
+			DEFAULT0_E3A0_BRB_MAC_FULL_XOFF_THR;
+		config_val->default_class0.full_xon =
+			DEFAULT0_E3A0_BRB_MAC_FULL_XON_THR;
+		/*  pause able */
 		config_val->pauseable_th.pause_xoff =
-		    PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
+			PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
 		config_val->pauseable_th.pause_xon =
-		    PFC_E3A0_BRB_MAC_PAUSE_XON_THR_PAUSE;
+			PFC_E3A0_BRB_MAC_PAUSE_XON_THR_PAUSE;
 		config_val->pauseable_th.full_xoff =
-		    PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE;
+			PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE;
 		config_val->pauseable_th.full_xon =
-		    PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE;
+			PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE;
 		/* non pause able*/
 		config_val->non_pauseable_th.pause_xoff =
-		    PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
+			PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
 		config_val->non_pauseable_th.pause_xon =
-		    PFC_E3A0_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
+			PFC_E3A0_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
 		config_val->non_pauseable_th.full_xoff =
-		    PFC_E3A0_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
+			PFC_E3A0_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
 		config_val->non_pauseable_th.full_xon =
-		    PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE;
+			PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE;
 	} else if (CHIP_IS_E3B0(bp)) {
+		/*  class0 defaults */
+		config_val->default_class0.pause_xoff =
+			DEFAULT0_E3B0_BRB_MAC_PAUSE_XOFF_THR;
+		config_val->default_class0.pause_xon =
+		    DEFAULT0_E3B0_BRB_MAC_PAUSE_XON_THR;
+		config_val->default_class0.full_xoff =
+		    DEFAULT0_E3B0_BRB_MAC_FULL_XOFF_THR;
+		config_val->default_class0.full_xon =
+		    DEFAULT0_E3B0_BRB_MAC_FULL_XON_THR;
+
 		if (params->phy[INT_PHY].flags &
 		    FLAGS_4_PORT_MODE) {
 			config_val->pauseable_th.pause_xoff =
-			    PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
+				PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
 			config_val->pauseable_th.pause_xon =
-			    PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_PAUSE;
+				PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_PAUSE;
 			config_val->pauseable_th.full_xoff =
-			    PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE;
+				PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE;
 			config_val->pauseable_th.full_xon =
-			    PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE;
+				PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE;
 			/* non pause able*/
 			config_val->non_pauseable_th.pause_xoff =
-			    PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
+			PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
 			config_val->non_pauseable_th.pause_xon =
-			    PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
+			PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
 			config_val->non_pauseable_th.full_xoff =
-			    PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
+			PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
 			config_val->non_pauseable_th.full_xon =
-			    PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_NON_PAUSE;
-	    } else {
-		config_val->pauseable_th.pause_xoff =
-		    PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
-		config_val->pauseable_th.pause_xon =
-		    PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_PAUSE;
-		config_val->pauseable_th.full_xoff =
-		    PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE;
-		config_val->pauseable_th.full_xon =
-			PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE;
-		/* non pause able*/
-		config_val->non_pauseable_th.pause_xoff =
-		    PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
-		config_val->non_pauseable_th.pause_xon =
-		    PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
-		config_val->non_pauseable_th.full_xoff =
-		    PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
-		config_val->non_pauseable_th.full_xon =
-		    PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_NON_PAUSE;
-	    }
+			PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_NON_PAUSE;
+		} else {
+			config_val->pauseable_th.pause_xoff =
+				PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
+			config_val->pauseable_th.pause_xon =
+				PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_PAUSE;
+			config_val->pauseable_th.full_xoff =
+				PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE;
+			config_val->pauseable_th.full_xon =
+				PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE;
+			/* non pause able*/
+			config_val->non_pauseable_th.pause_xoff =
+				PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
+			config_val->non_pauseable_th.pause_xon =
+				PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
+			config_val->non_pauseable_th.full_xoff =
+				PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
+			config_val->non_pauseable_th.full_xon =
+				PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_NON_PAUSE;
+		}
 	} else
 	    return -EINVAL;
 
 	return 0;
 }
 
-
-static void bnx2x_pfc_brb_get_e3b0_config_params(struct link_params *params,
-						 struct bnx2x_pfc_brb_e3b0_val
-						 *e3b0_val,
-						 u32 cos0_pauseable,
-						 u32 cos1_pauseable)
+static void bnx2x_pfc_brb_get_e3b0_config_params(
+		struct link_params *params,
+		struct bnx2x_pfc_brb_e3b0_val
+		*e3b0_val,
+		struct bnx2x_nig_brb_pfc_port_params *pfc_params,
+		const u8 pfc_enabled)
 {
-	if (params->phy[INT_PHY].flags & FLAGS_4_PORT_MODE) {
-		e3b0_val->full_lb_xoff_th =
-		    PFC_E3B0_4P_BRB_FULL_LB_XOFF_THR;
-		e3b0_val->full_lb_xon_threshold =
-		    PFC_E3B0_4P_BRB_FULL_LB_XON_THR;
-		e3b0_val->lb_guarantied =
-		    PFC_E3B0_4P_LB_GUART;
-		e3b0_val->mac_0_class_t_guarantied =
-		    PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART;
-		e3b0_val->mac_0_class_t_guarantied_hyst =
-		    PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST;
-		e3b0_val->mac_1_class_t_guarantied =
-		    PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART;
-		e3b0_val->mac_1_class_t_guarantied_hyst =
-		    PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST;
-	} else {
-		e3b0_val->full_lb_xoff_th =
-		    PFC_E3B0_2P_BRB_FULL_LB_XOFF_THR;
-		e3b0_val->full_lb_xon_threshold =
-		    PFC_E3B0_2P_BRB_FULL_LB_XON_THR;
-		e3b0_val->mac_0_class_t_guarantied_hyst =
-		    PFC_E3B0_2P_BRB_MAC_0_CLASS_T_GUART_HYST;
-		e3b0_val->mac_1_class_t_guarantied =
-		    PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART;
-		e3b0_val->mac_1_class_t_guarantied_hyst =
-		    PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART_HYST;
+	if (pfc_enabled && pfc_params) {
+		e3b0_val->per_class_guaranty_mode = 1;
+		e3b0_val->lb_guarantied_hyst = 80;
 
-		if (cos0_pauseable != cos1_pauseable) {
-			/* nonpauseable= Lossy + pauseable = Lossless*/
+		if (params->phy[INT_PHY].flags &
+		    FLAGS_4_PORT_MODE) {
+			e3b0_val->full_lb_xoff_th =
+				PFC_E3B0_4P_BRB_FULL_LB_XOFF_THR;
+			e3b0_val->full_lb_xon_threshold =
+				PFC_E3B0_4P_BRB_FULL_LB_XON_THR;
 			e3b0_val->lb_guarantied =
-			    PFC_E3B0_2P_MIX_PAUSE_LB_GUART;
+				PFC_E3B0_4P_LB_GUART;
 			e3b0_val->mac_0_class_t_guarantied =
-			    PFC_E3B0_2P_MIX_PAUSE_MAC_0_CLASS_T_GUART;
-		} else if (cos0_pauseable) {
-			/* Lossless +Lossless*/
-			e3b0_val->lb_guarantied =
-			    PFC_E3B0_2P_PAUSE_LB_GUART;
-			e3b0_val->mac_0_class_t_guarantied =
-			    PFC_E3B0_2P_PAUSE_MAC_0_CLASS_T_GUART;
+				PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART;
+			e3b0_val->mac_0_class_t_guarantied_hyst =
+				PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST;
+			e3b0_val->mac_1_class_t_guarantied =
+				PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART;
+			e3b0_val->mac_1_class_t_guarantied_hyst =
+				PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST;
 		} else {
-			/* Lossy +Lossy*/
-			e3b0_val->lb_guarantied =
-			    PFC_E3B0_2P_NON_PAUSE_LB_GUART;
-			e3b0_val->mac_0_class_t_guarantied =
-			    PFC_E3B0_2P_NON_PAUSE_MAC_0_CLASS_T_GUART;
+			e3b0_val->full_lb_xoff_th =
+				PFC_E3B0_2P_BRB_FULL_LB_XOFF_THR;
+			e3b0_val->full_lb_xon_threshold =
+				PFC_E3B0_2P_BRB_FULL_LB_XON_THR;
+			e3b0_val->mac_0_class_t_guarantied_hyst =
+				PFC_E3B0_2P_BRB_MAC_0_CLASS_T_GUART_HYST;
+			e3b0_val->mac_1_class_t_guarantied =
+				PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART;
+			e3b0_val->mac_1_class_t_guarantied_hyst =
+				PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART_HYST;
+
+			if (pfc_params->cos0_pauseable !=
+				pfc_params->cos1_pauseable) {
+				/* nonpauseable= Lossy + pauseable = Lossless*/
+				e3b0_val->lb_guarantied =
+					PFC_E3B0_2P_MIX_PAUSE_LB_GUART;
+				e3b0_val->mac_0_class_t_guarantied =
+			       PFC_E3B0_2P_MIX_PAUSE_MAC_0_CLASS_T_GUART;
+			} else if (pfc_params->cos0_pauseable) {
+				/* Lossless +Lossless*/
+				e3b0_val->lb_guarantied =
+					PFC_E3B0_2P_PAUSE_LB_GUART;
+				e3b0_val->mac_0_class_t_guarantied =
+				   PFC_E3B0_2P_PAUSE_MAC_0_CLASS_T_GUART;
+			} else {
+				/* Lossy +Lossy*/
+				e3b0_val->lb_guarantied =
+					PFC_E3B0_2P_NON_PAUSE_LB_GUART;
+				e3b0_val->mac_0_class_t_guarantied =
+			       PFC_E3B0_2P_NON_PAUSE_MAC_0_CLASS_T_GUART;
+			}
 		}
+	} else {
+		e3b0_val->per_class_guaranty_mode = 0;
+		e3b0_val->lb_guarantied_hyst = 0;
+		e3b0_val->full_lb_xoff_th =
+			DEFAULT_E3B0_BRB_FULL_LB_XOFF_THR;
+		e3b0_val->full_lb_xon_threshold =
+			DEFAULT_E3B0_BRB_FULL_LB_XON_THR;
+		e3b0_val->lb_guarantied =
+			DEFAULT_E3B0_LB_GUART;
+		e3b0_val->mac_0_class_t_guarantied =
+			DEFAULT_E3B0_BRB_MAC_0_CLASS_T_GUART;
+		e3b0_val->mac_0_class_t_guarantied_hyst =
+			DEFAULT_E3B0_BRB_MAC_0_CLASS_T_GUART_HYST;
+		e3b0_val->mac_1_class_t_guarantied =
+			DEFAULT_E3B0_BRB_MAC_1_CLASS_T_GUART;
+		e3b0_val->mac_1_class_t_guarantied_hyst =
+			DEFAULT_E3B0_BRB_MAC_1_CLASS_T_GUART_HYST;
 	}
 }
 static int bnx2x_update_pfc_brb(struct link_params *params,
@@ -2172,23 +2274,28 @@
 	struct bnx2x *bp = params->bp;
 	struct bnx2x_pfc_brb_th_val config_val = { {0} };
 	struct bnx2x_pfc_brb_threshold_val *reg_th_config =
-	    &config_val.pauseable_th;
+		&config_val.pauseable_th;
 	struct bnx2x_pfc_brb_e3b0_val e3b0_val = {0};
-	int set_pfc = params->feature_config_flags &
+	const int set_pfc = params->feature_config_flags &
 		FEATURE_CONFIG_PFC_ENABLED;
+	const u8 pfc_enabled = (set_pfc && pfc_params);
 	int bnx2x_status = 0;
 	u8 port = params->port;
 
 	/* default - pause configuration */
 	reg_th_config = &config_val.pauseable_th;
 	bnx2x_status = bnx2x_pfc_brb_get_config_params(params, &config_val);
-	if (0 != bnx2x_status)
+	if (bnx2x_status)
 		return bnx2x_status;
 
-	if (set_pfc && pfc_params)
+	if (pfc_enabled) {
 		/* First COS */
-		if (!pfc_params->cos0_pauseable)
+		if (pfc_params->cos0_pauseable)
+			reg_th_config = &config_val.pauseable_th;
+		else
 			reg_th_config = &config_val.non_pauseable_th;
+	} else
+		reg_th_config = &config_val.default_class0;
 	/*
 	 * The number of free blocks below which the pause signal to class 0
 	 * of MAC #n is asserted. n=0,1
@@ -2215,122 +2322,119 @@
 	REG_WR(bp, (port) ? BRB1_REG_FULL_0_XON_THRESHOLD_1 :
 	       BRB1_REG_FULL_0_XON_THRESHOLD_0 , reg_th_config->full_xon);
 
-	if (set_pfc && pfc_params) {
+	if (pfc_enabled) {
 		/* Second COS */
 		if (pfc_params->cos1_pauseable)
 			reg_th_config = &config_val.pauseable_th;
 		else
 			reg_th_config = &config_val.non_pauseable_th;
+	} else
+		reg_th_config = &config_val.default_class1;
+	/*
+	 * The number of free blocks below which the pause signal to
+	 * class 1 of MAC #n is asserted. n=0,1
+	 */
+	REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XOFF_THRESHOLD_1 :
+	       BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0,
+	       reg_th_config->pause_xoff);
+
+	/*
+	 * The number of free blocks above which the pause signal to
+	 * class 1 of MAC #n is de-asserted. n=0,1
+	 */
+	REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XON_THRESHOLD_1 :
+	       BRB1_REG_PAUSE_1_XON_THRESHOLD_0,
+	       reg_th_config->pause_xon);
+	/*
+	 * The number of free blocks below which the full signal to
+	 * class 1 of MAC #n is asserted. n=0,1
+	 */
+	REG_WR(bp, (port) ? BRB1_REG_FULL_1_XOFF_THRESHOLD_1 :
+	       BRB1_REG_FULL_1_XOFF_THRESHOLD_0,
+	       reg_th_config->full_xoff);
+	/*
+	 * The number of free blocks above which the full signal to
+	 * class 1 of MAC #n is de-asserted. n=0,1
+	 */
+	REG_WR(bp, (port) ? BRB1_REG_FULL_1_XON_THRESHOLD_1 :
+	       BRB1_REG_FULL_1_XON_THRESHOLD_0,
+	       reg_th_config->full_xon);
+
+	if (CHIP_IS_E3B0(bp)) {
+		bnx2x_pfc_brb_get_e3b0_config_params(
+			params,
+			&e3b0_val,
+			pfc_params,
+			pfc_enabled);
+
+		REG_WR(bp, BRB1_REG_PER_CLASS_GUARANTY_MODE,
+			   e3b0_val.per_class_guaranty_mode);
+
 		/*
-		 * The number of free blocks below which the pause signal to
-		 * class 1 of MAC #n is asserted. n=0,1
-		**/
-		REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XOFF_THRESHOLD_1 :
-		       BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0,
-		       reg_th_config->pause_xoff);
-		/*
-		 * The number of free blocks above which the pause signal to
-		 * class 1 of MAC #n is de-asserted. n=0,1
+		 * The hysteresis on the guarantied buffer space for the Lb
+		 * port before signaling XON.
 		 */
-		REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XON_THRESHOLD_1 :
-		       BRB1_REG_PAUSE_1_XON_THRESHOLD_0,
-		       reg_th_config->pause_xon);
+		REG_WR(bp, BRB1_REG_LB_GUARANTIED_HYST,
+			   e3b0_val.lb_guarantied_hyst);
+
 		/*
-		 * The number of free blocks below which the full signal to
-		 * class 1 of MAC #n is asserted. n=0,1
+		 * The number of free blocks below which the full signal to the
+		 * LB port is asserted.
 		 */
-		REG_WR(bp, (port) ? BRB1_REG_FULL_1_XOFF_THRESHOLD_1 :
-		       BRB1_REG_FULL_1_XOFF_THRESHOLD_0,
-		       reg_th_config->full_xoff);
+		REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD,
+		       e3b0_val.full_lb_xoff_th);
 		/*
-		 * The number of free blocks above which the full signal to
-		 * class 1 of MAC #n is de-asserted. n=0,1
+		 * The number of free blocks above which the full signal to the
+		 * LB port is de-asserted.
 		 */
-		REG_WR(bp, (port) ? BRB1_REG_FULL_1_XON_THRESHOLD_1 :
-		       BRB1_REG_FULL_1_XON_THRESHOLD_0,
-		       reg_th_config->full_xon);
+		REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD,
+		       e3b0_val.full_lb_xon_threshold);
+		/*
+		 * The number of blocks guarantied for the MAC #n port. n=0,1
+		 */
 
+		/* The number of blocks guarantied for the LB port.*/
+		REG_WR(bp, BRB1_REG_LB_GUARANTIED,
+		       e3b0_val.lb_guarantied);
 
-		if (CHIP_IS_E3B0(bp)) {
-			/*Should be done by init tool */
-			/*
-			* BRB_empty_for_dup = BRB1_REG_BRB_EMPTY_THRESHOLD
-			* reset value
-			* 944
-			*/
+		/*
+		 * The number of blocks guarantied for the MAC #n port.
+		 */
+		REG_WR(bp, BRB1_REG_MAC_GUARANTIED_0,
+		       2 * e3b0_val.mac_0_class_t_guarantied);
+		REG_WR(bp, BRB1_REG_MAC_GUARANTIED_1,
+		       2 * e3b0_val.mac_1_class_t_guarantied);
+		/*
+		 * The number of blocks guarantied for class #t in MAC0. t=0,1
+		 */
+		REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED,
+		       e3b0_val.mac_0_class_t_guarantied);
+		REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED,
+		       e3b0_val.mac_0_class_t_guarantied);
+		/*
+		 * The hysteresis on the guarantied buffer space for class in
+		 * MAC0.  t=0,1
+		 */
+		REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED_HYST,
+		       e3b0_val.mac_0_class_t_guarantied_hyst);
+		REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED_HYST,
+		       e3b0_val.mac_0_class_t_guarantied_hyst);
 
-			/**
-			 * The hysteresis on the guarantied buffer space for the Lb port
-			 * before signaling XON.
-			 **/
-			REG_WR(bp, BRB1_REG_LB_GUARANTIED_HYST, 80);
-
-			bnx2x_pfc_brb_get_e3b0_config_params(
-			    params,
-			    &e3b0_val,
-			    pfc_params->cos0_pauseable,
-			    pfc_params->cos1_pauseable);
-			/**
-			 * The number of free blocks below which the full signal to the
-			 * LB port is asserted.
-			*/
-			REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD,
-				   e3b0_val.full_lb_xoff_th);
-			/**
-			 * The number of free blocks above which the full signal to the
-			 * LB port is de-asserted.
-			*/
-			REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD,
-				   e3b0_val.full_lb_xon_threshold);
-			/**
-			* The number of blocks guarantied for the MAC #n port. n=0,1
-			*/
-
-			/*The number of blocks guarantied for the LB port.*/
-			REG_WR(bp, BRB1_REG_LB_GUARANTIED,
-			       e3b0_val.lb_guarantied);
-
-			/**
-			 * The number of blocks guarantied for the MAC #n port.
-			*/
-			REG_WR(bp, BRB1_REG_MAC_GUARANTIED_0,
-				   2 * e3b0_val.mac_0_class_t_guarantied);
-			REG_WR(bp, BRB1_REG_MAC_GUARANTIED_1,
-				   2 * e3b0_val.mac_1_class_t_guarantied);
-			/**
-			 * The number of blocks guarantied for class #t in MAC0. t=0,1
-			*/
-			REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED,
-			       e3b0_val.mac_0_class_t_guarantied);
-			REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED,
-			       e3b0_val.mac_0_class_t_guarantied);
-			/**
-			 * The hysteresis on the guarantied buffer space for class in
-			 * MAC0.  t=0,1
-			*/
-			REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED_HYST,
-			       e3b0_val.mac_0_class_t_guarantied_hyst);
-			REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED_HYST,
-			       e3b0_val.mac_0_class_t_guarantied_hyst);
-
-			/**
-			 * The number of blocks guarantied for class #t in MAC1.t=0,1
-			*/
-			REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED,
-			       e3b0_val.mac_1_class_t_guarantied);
-			REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED,
-			       e3b0_val.mac_1_class_t_guarantied);
-			/**
-			 * The hysteresis on the guarantied buffer space for class #t
-			* in MAC1.  t=0,1
-			*/
-			REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED_HYST,
-			       e3b0_val.mac_1_class_t_guarantied_hyst);
-			REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED_HYST,
-			       e3b0_val.mac_1_class_t_guarantied_hyst);
-
-	    }
-
+		/*
+		 * The number of blocks guarantied for class #t in MAC1.t=0,1
+		 */
+		REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED,
+		       e3b0_val.mac_1_class_t_guarantied);
+		REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED,
+		       e3b0_val.mac_1_class_t_guarantied);
+		/*
+		 * The hysteresis on the guarantied buffer space for class #t
+		 * in MAC1.  t=0,1
+		 */
+		REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED_HYST,
+		       e3b0_val.mac_1_class_t_guarantied_hyst);
+		REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED_HYST,
+		       e3b0_val.mac_1_class_t_guarantied_hyst);
 	}
 
 	return bnx2x_status;
@@ -2515,7 +2619,7 @@
 
 	/* update BRB params */
 	bnx2x_status = bnx2x_update_pfc_brb(params, vars, pfc_params);
-	if (0 != bnx2x_status)
+	if (bnx2x_status)
 		return bnx2x_status;
 
 	if (!vars->link_up)
@@ -2533,7 +2637,6 @@
 			bnx2x_emac_enable(params, vars, 0);
 			return bnx2x_status;
 		}
-
 		if (CHIP_IS_E2(bp))
 			bnx2x_update_pfc_bmac2(params, vars, bmac_loopback);
 		else
@@ -3053,7 +3156,6 @@
 		DP(NETIF_MSG_LINK, "write phy register failed\n");
 		netdev_err(bp->dev,  "MDC/MDIO access timeout\n");
 		rc = -EFAULT;
-
 	} else {
 		/* data */
 		tmp = ((phy->addr << 21) | (devad << 16) | val |
@@ -3090,8 +3192,6 @@
 			       EMAC_MDIO_STATUS_10MB);
 	return rc;
 }
-
-
 /******************************************************************/
 /*			BSC access functions from E3	          */
 /******************************************************************/
@@ -3339,7 +3439,7 @@
 		aer_val = 0x3800 + offset - 1;
 	else
 		aer_val = 0x3800 + offset;
-	DP(NETIF_MSG_LINK, "Set AER to 0x%x\n", aer_val);
+
 	CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
 			  MDIO_AER_BLOCK_AER_REG, aer_val);
 
@@ -3942,13 +4042,11 @@
 
 static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy,
 					   struct link_params *params,
-					   u8 fiber_mode)
+					   u8 fiber_mode,
+					   u8 always_autoneg)
 {
 	struct bnx2x *bp = params->bp;
 	u16 val16, digctrl_kx1, digctrl_kx2;
-	u8 lane;
-
-	lane = bnx2x_get_warpcore_lane(phy, params);
 
 	/* Clear XFI clock comp in non-10G single lane mode. */
 	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
@@ -3956,7 +4054,7 @@
 	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 			 MDIO_WC_REG_RX66_CONTROL, val16 & ~(3<<13));
 
-	if (phy->req_line_speed == SPEED_AUTO_NEG) {
+	if (always_autoneg || phy->req_line_speed == SPEED_AUTO_NEG) {
 		/* SGMII Autoneg */
 		bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
 				MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
@@ -3967,7 +4065,7 @@
 	} else {
 		bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
 				MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
-		val16 &= 0xcfbf;
+		val16 &= 0xcebf;
 		switch (phy->req_line_speed) {
 		case SPEED_10:
 			break;
@@ -4043,9 +4141,7 @@
 	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
 			 MDIO_WC_REG_DIGITAL5_MISC6, &val);
 }
-
-
-	/* Clear SFI/XFI link settings registers */
+/* Clear SFI/XFI link settings registers */
 static void bnx2x_warpcore_clear_regs(struct bnx2x_phy *phy,
 				      struct link_params *params,
 				      u16 lane)
@@ -4250,7 +4346,7 @@
 		vars->phy_flags |= PHY_SGMII_FLAG;
 		DP(NETIF_MSG_LINK, "Setting SGMII mode\n");
 		bnx2x_warpcore_clear_regs(phy, params, lane);
-		bnx2x_warpcore_set_sgmii_speed(phy, params, 0);
+		bnx2x_warpcore_set_sgmii_speed(phy, params, 0, 1);
 	} else {
 		switch (serdes_net_if) {
 		case PORT_HW_CFG_NET_SERDES_IF_KR:
@@ -4278,7 +4374,8 @@
 				}
 				bnx2x_warpcore_set_sgmii_speed(phy,
 								params,
-								fiber_mode);
+								fiber_mode,
+								0);
 			}
 
 			break;
@@ -4291,7 +4388,8 @@
 				bnx2x_warpcore_set_10G_XFI(phy, params, 0);
 			} else if (vars->line_speed == SPEED_1000) {
 				DP(NETIF_MSG_LINK, "Setting 1G Fiber\n");
-				bnx2x_warpcore_set_sgmii_speed(phy, params, 1);
+				bnx2x_warpcore_set_sgmii_speed(
+						phy, params, 1, 0);
 			}
 			/* Issue Module detection */
 			if (bnx2x_is_sfp_module_plugged(phy, params))
@@ -4428,12 +4526,6 @@
 
 		/* Switch back to 4-copy registers */
 		bnx2x_set_aer_mmd(params, phy);
-		/* Global loopback, not recommended. */
-		bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
-				MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
-		bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
-				MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16 |
-				0x4000);
 	} else {
 		/* 10G & 20G */
 		bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
@@ -4450,25 +4542,14 @@
 }
 
 
-void bnx2x_link_status_update(struct link_params *params,
-			      struct link_vars *vars)
+void bnx2x_sync_link(struct link_params *params,
+			   struct link_vars *vars)
 {
 	struct bnx2x *bp = params->bp;
 	u8 link_10g_plus;
-	u8 port = params->port;
-	u32 sync_offset, media_types;
-	/* Update PHY configuration */
-	set_phy_vars(params, vars);
-
-	vars->link_status = REG_RD(bp, params->shmem_base +
-				   offsetof(struct shmem_region,
-					    port_mb[port].link_status));
-
-	vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP);
-	vars->phy_flags = PHY_XGXS_FLAG;
 	if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG)
 		vars->phy_flags |= PHY_PHYSICAL_LINK_FLAG;
-
+	vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP);
 	if (vars->link_up) {
 		DP(NETIF_MSG_LINK, "phy link up\n");
 
@@ -4563,7 +4644,23 @@
 		if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG)
 			vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
 	}
+}
 
+void bnx2x_link_status_update(struct link_params *params,
+			      struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	u8 port = params->port;
+	u32 sync_offset, media_types;
+	/* Update PHY configuration */
+	set_phy_vars(params, vars);
+
+	vars->link_status = REG_RD(bp, params->shmem_base +
+				   offsetof(struct shmem_region,
+					    port_mb[port].link_status));
+
+	vars->phy_flags = PHY_XGXS_FLAG;
+	bnx2x_sync_link(params, vars);
 	/* Sync media type */
 	sync_offset = params->shmem_base +
 			offsetof(struct shmem_region,
@@ -4602,7 +4699,6 @@
 		 vars->line_speed, vars->duplex, vars->flow_ctrl);
 }
 
-
 static void bnx2x_set_master_ln(struct link_params *params,
 				struct bnx2x_phy *phy)
 {
@@ -4676,11 +4772,8 @@
 	 *  Each two bits represents a lane number:
 	 *  No swap is 0123 => 0x1b no need to enable the swap
 	 */
-	u16 ser_lane, rx_lane_swap, tx_lane_swap;
+	u16 rx_lane_swap, tx_lane_swap;
 
-	ser_lane = ((params->lane_config &
-		     PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
-		    PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
 	rx_lane_swap = ((params->lane_config &
 			 PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >>
 			PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT);
@@ -5356,7 +5449,6 @@
 				      struct link_params *params,
 				      struct link_vars *vars)
 {
-
 	struct bnx2x *bp = params->bp;
 
 	u16 gp_status, duplex = DUPLEX_HALF, link_up = 0, speed_mask;
@@ -5403,9 +5495,7 @@
 				     struct link_params *params,
 				     struct link_vars *vars)
 {
-
 	struct bnx2x *bp = params->bp;
-
 	u8 lane;
 	u16 gp_status1, gp_speed, link_up, duplex = DUPLEX_FULL;
 	int rc = 0;
@@ -6678,7 +6768,6 @@
 	return rc;
 }
 
-
 /*****************************************************************************/
 /*			    External Phy section			     */
 /*****************************************************************************/
@@ -8103,7 +8192,15 @@
 static void bnx2x_warpcore_hw_reset(struct bnx2x_phy *phy,
 				    struct link_params *params)
 {
+	struct bnx2x *bp = params->bp;
 	bnx2x_warpcore_power_module(params, phy, 0);
+	/* Put Warpcore in low power mode */
+	REG_WR(bp, MISC_REG_WC0_RESET, 0x0c0e);
+
+	/* Put LCPLL in low power mode */
+	REG_WR(bp, MISC_REG_LCPLL_E40_PWRDWN, 1);
+	REG_WR(bp, MISC_REG_LCPLL_E40_RESETB_ANA, 0);
+	REG_WR(bp, MISC_REG_LCPLL_E40_RESETB_DIG, 0);
 }
 
 static void bnx2x_power_sfp_module(struct link_params *params,
@@ -9040,13 +9137,13 @@
 			DP(NETIF_MSG_LINK,
 			   "8727 Power fault has been detected on port %d\n",
 			   oc_port);
-			netdev_err(bp->dev, "Error:  Power fault on Port %d has"
-					    " been detected and the power to "
-					    "that SFP+ module has been removed"
-					    " to prevent failure of the card."
-					    " Please remove the SFP+ module and"
-					    " restart the system to clear this"
-					    " error.\n",
+			netdev_err(bp->dev, "Error: Power fault on Port %d has "
+					    "been detected and the power to "
+					    "that SFP+ module has been removed "
+					    "to prevent failure of the card. "
+					    "Please remove the SFP+ module and "
+					    "restart the system to clear this "
+					    "error.\n",
 			 oc_port);
 			/* Disable all RX_ALARMs except for mod_abs */
 			bnx2x_cl45_write(bp, phy,
@@ -9228,7 +9325,7 @@
 static void bnx2x_848xx_set_led(struct bnx2x *bp,
 				struct bnx2x_phy *phy)
 {
-	u16 val;
+	u16 val, offset;
 
 	/* PHYC_CTL_LED_CTL */
 	bnx2x_cl45_read(bp, phy,
@@ -9263,14 +9360,22 @@
 			MDIO_PMA_REG_8481_LED3_BLINK,
 			0);
 
-	bnx2x_cl45_read(bp, phy,
-			MDIO_PMA_DEVAD,
-			MDIO_PMA_REG_84823_CTL_LED_CTL_1, &val);
-	val |= MDIO_PMA_REG_84823_LED3_STRETCH_EN; /* stretch_en for LED3*/
-
+	/* Configure the blink rate to ~15.9 Hz */
 	bnx2x_cl45_write(bp, phy,
-			 MDIO_PMA_DEVAD,
-			 MDIO_PMA_REG_84823_CTL_LED_CTL_1, val);
+			MDIO_PMA_DEVAD,
+			MDIO_PMA_REG_84823_CTL_SLOW_CLK_CNT_HIGH,
+			MDIO_PMA_REG_84823_BLINK_RATE_VAL_15P9HZ);
+
+	if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+		offset = MDIO_PMA_REG_84833_CTL_LED_CTL_1;
+	else
+		offset = MDIO_PMA_REG_84823_CTL_LED_CTL_1;
+
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, offset, &val);
+	val |= MDIO_PMA_REG_84823_LED3_STRETCH_EN; /* stretch_en for LED3*/
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD, offset, val);
 
 	/* 'Interrupt Mask' */
 	bnx2x_cl45_write(bp, phy,
@@ -9283,7 +9388,7 @@
 				       struct link_vars *vars)
 {
 	struct bnx2x *bp = params->bp;
-	u16 autoneg_val, an_1000_val, an_10_100_val;
+	u16 autoneg_val, an_1000_val, an_10_100_val, an_10g_val;
 	u16 tmp_req_line_speed;
 
 	tmp_req_line_speed = phy->req_line_speed;
@@ -9378,6 +9483,8 @@
 		bnx2x_cl45_write(bp, phy,
 				 MDIO_AN_DEVAD, MDIO_AN_REG_8481_AUX_CTRL,
 				 (1<<15 | 1<<9 | 7<<0));
+		/* The PHY needs this set even for forced link. */
+		an_10_100_val |= (1<<8) | (1<<7);
 		DP(NETIF_MSG_LINK, "Setting 100M force\n");
 	}
 	if ((phy->req_line_speed == SPEED_10) &&
@@ -9415,9 +9522,17 @@
 			DP(NETIF_MSG_LINK, "Advertising 10G\n");
 			/* Restart autoneg for 10G*/
 
+			bnx2x_cl45_read(bp, phy,
+					MDIO_AN_DEVAD,
+					MDIO_AN_REG_8481_10GBASE_T_AN_CTRL,
+					&an_10g_val);
 			bnx2x_cl45_write(bp, phy,
-				 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL,
-				 0x3200);
+					 MDIO_AN_DEVAD,
+					 MDIO_AN_REG_8481_10GBASE_T_AN_CTRL,
+					 an_10g_val | 0x1000);
+			bnx2x_cl45_write(bp, phy,
+					 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL,
+					 0x3200);
 	} else
 		bnx2x_cl45_write(bp, phy,
 				 MDIO_AN_DEVAD,
@@ -9449,18 +9564,74 @@
 	return bnx2x_848xx_cmn_config_init(phy, params, vars);
 }
 
+#define PHY84833_CMDHDLR_WAIT 300
+#define PHY84833_CMDHDLR_MAX_ARGS 5
+static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
+				   struct link_params *params,
+		   u16 fw_cmd,
+		   u16 cmd_args[])
+{
+	u32 idx;
+	u16 val;
+	struct bnx2x *bp = params->bp;
+	/* Write CMD_OPEN_OVERRIDE to STATUS reg */
+	bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+			MDIO_84833_CMD_HDLR_STATUS,
+			PHY84833_STATUS_CMD_OPEN_OVERRIDE);
+	for (idx = 0; idx < PHY84833_CMDHDLR_WAIT; idx++) {
+		bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+				MDIO_84833_CMD_HDLR_STATUS, &val);
+		if (val == PHY84833_STATUS_CMD_OPEN_FOR_CMDS)
+			break;
+		msleep(1);
+	}
+	if (idx >= PHY84833_CMDHDLR_WAIT) {
+		DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n");
+		return -EINVAL;
+	}
 
-#define PHY84833_HDSHK_WAIT 300
+	/* Prepare argument(s) and issue command */
+	for (idx = 0; idx < PHY84833_CMDHDLR_MAX_ARGS; idx++) {
+		bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+				MDIO_84833_CMD_HDLR_DATA1 + idx,
+				cmd_args[idx]);
+	}
+	bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+			MDIO_84833_CMD_HDLR_COMMAND, fw_cmd);
+	for (idx = 0; idx < PHY84833_CMDHDLR_WAIT; idx++) {
+		bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+				MDIO_84833_CMD_HDLR_STATUS, &val);
+		if ((val == PHY84833_STATUS_CMD_COMPLETE_PASS) ||
+			(val == PHY84833_STATUS_CMD_COMPLETE_ERROR))
+			break;
+		msleep(1);
+	}
+	if ((idx >= PHY84833_CMDHDLR_WAIT) ||
+		(val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) {
+		DP(NETIF_MSG_LINK, "FW cmd failed.\n");
+		return -EINVAL;
+	}
+	/* Gather returning data */
+	for (idx = 0; idx < PHY84833_CMDHDLR_MAX_ARGS; idx++) {
+		bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+				MDIO_84833_CMD_HDLR_DATA1 + idx,
+				&cmd_args[idx]);
+	}
+	bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+			MDIO_84833_CMD_HDLR_STATUS,
+			PHY84833_STATUS_CMD_CLEAR_COMPLETE);
+	return 0;
+}
+
+
 static int bnx2x_84833_pair_swap_cfg(struct bnx2x_phy *phy,
 				   struct link_params *params,
 				   struct link_vars *vars)
 {
-	u32 idx;
 	u32 pair_swap;
-	u16 val;
-	u16 data;
+	u16 data[PHY84833_CMDHDLR_MAX_ARGS];
+	int status;
 	struct bnx2x *bp = params->bp;
-	/* Do pair swap */
 
 	/* Check for configuration. */
 	pair_swap = REG_RD(bp, params->shmem_base +
@@ -9471,52 +9642,17 @@
 	if (pair_swap == 0)
 		return 0;
 
-	data = (u16)pair_swap;
+	/* Only the second argument is used for this command */
+	data[1] = (u16)pair_swap;
 
-	/* Write CMD_OPEN_OVERRIDE to STATUS reg */
-	bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
-			MDIO_84833_TOP_CFG_SCRATCH_REG2,
-			PHY84833_CMD_OPEN_OVERRIDE);
-	for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) {
-		bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
-				MDIO_84833_TOP_CFG_SCRATCH_REG2, &val);
-		if (val == PHY84833_CMD_OPEN_FOR_CMDS)
-			break;
-		msleep(1);
-	}
-	if (idx >= PHY84833_HDSHK_WAIT) {
-		DP(NETIF_MSG_LINK, "Pairswap: FW not ready.\n");
-		return -EINVAL;
-	}
+	status = bnx2x_84833_cmd_hdlr(phy, params,
+		PHY84833_CMD_SET_PAIR_SWAP, data);
+	if (status == 0)
+		DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data[1]);
 
-	bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
-			MDIO_84833_TOP_CFG_SCRATCH_REG4,
-			data);
-	/* Issue pair swap command */
-	bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
-			MDIO_84833_TOP_CFG_SCRATCH_REG0,
-			PHY84833_DIAG_CMD_PAIR_SWAP_CHANGE);
-	for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) {
-		bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
-				MDIO_84833_TOP_CFG_SCRATCH_REG2, &val);
-		if ((val == PHY84833_CMD_COMPLETE_PASS) ||
-			(val == PHY84833_CMD_COMPLETE_ERROR))
-			break;
-		msleep(1);
-	}
-	if ((idx >= PHY84833_HDSHK_WAIT) ||
-		(val == PHY84833_CMD_COMPLETE_ERROR)) {
-		DP(NETIF_MSG_LINK, "Pairswap: override failed.\n");
-		return -EINVAL;
-	}
-	bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
-			MDIO_84833_TOP_CFG_SCRATCH_REG2,
-			PHY84833_CMD_CLEAR_COMPLETE);
-	DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data);
-	return 0;
+	return status;
 }
 
-
 static u8 bnx2x_84833_get_reset_gpios(struct bnx2x *bp,
 				      u32 shmem_base_path[],
 				      u32 chip_id)
@@ -9579,24 +9715,6 @@
 	return 0;
 }
 
-static int bnx2x_84833_common_init_phy(struct bnx2x *bp,
-						u32 shmem_base_path[],
-						u32 chip_id)
-{
-	u8 reset_gpios;
-
-	reset_gpios = bnx2x_84833_get_reset_gpios(bp, shmem_base_path, chip_id);
-
-	bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_LOW);
-	udelay(10);
-	bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_HIGH);
-	msleep(800);
-	DP(NETIF_MSG_LINK, "84833 reset pulse on pin values 0x%x\n",
-		reset_gpios);
-
-	return 0;
-}
-
 #define PHY84833_CONSTANT_LATENCY 1193
 static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
 				   struct link_params *params,
@@ -9605,8 +9723,8 @@
 	struct bnx2x *bp = params->bp;
 	u8 port, initialize = 1;
 	u16 val;
-	u16 temp;
-	u32 actual_phy_selection, cms_enable, idx;
+	u32 actual_phy_selection, cms_enable;
+	u16 cmd_args[PHY84833_CMDHDLR_MAX_ARGS];
 	int rc = 0;
 
 	msleep(1);
@@ -9625,6 +9743,13 @@
 		bnx2x_cl45_write(bp, phy,
 				MDIO_PMA_DEVAD,
 				MDIO_PMA_REG_CTRL, 0x8000);
+	}
+
+	bnx2x_wait_reset_complete(bp, phy, params);
+
+	/* Wait for GPHY to come out of reset */
+	msleep(50);
+	if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
 		/* Bring PHY out of super isolate mode */
 		bnx2x_cl45_read(bp, phy,
 				MDIO_CTL_DEVAD,
@@ -9633,26 +9758,19 @@
 		bnx2x_cl45_write(bp, phy,
 				MDIO_CTL_DEVAD,
 				MDIO_84833_TOP_CFG_XGPHY_STRAP1, val);
-	}
-
-	bnx2x_wait_reset_complete(bp, phy, params);
-
-	/* Wait for GPHY to come out of reset */
-	msleep(50);
-
-	if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
 		bnx2x_84833_pair_swap_cfg(phy, params, vars);
-
-	/*
-	 * BCM84823 requires that XGXS links up first @ 10G for normal behavior
-	 */
-	temp = vars->line_speed;
-	vars->line_speed = SPEED_10000;
-	bnx2x_set_autoneg(&params->phy[INT_PHY], params, vars, 0);
-	bnx2x_program_serdes(&params->phy[INT_PHY], params, vars);
-	vars->line_speed = temp;
-
-	/* Set dual-media configuration according to configuration */
+	} else {
+		/*
+		 * BCM84823 requires that XGXS links up first @ 10G for normal
+		 * behavior.
+		 */
+		u16 temp;
+		temp = vars->line_speed;
+		vars->line_speed = SPEED_10000;
+		bnx2x_set_autoneg(&params->phy[INT_PHY], params, vars, 0);
+		bnx2x_program_serdes(&params->phy[INT_PHY], params, vars);
+		vars->line_speed = temp;
+	}
 
 	bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
 			MDIO_CTL_REG_84823_MEDIA, &val);
@@ -9700,64 +9818,18 @@
 
 	/* AutogrEEEn */
 	if (params->feature_config_flags &
-		FEATURE_CONFIG_AUTOGREEEN_ENABLED) {
-		/* Ensure that f/w is ready */
-		for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) {
-			bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
-					MDIO_84833_TOP_CFG_SCRATCH_REG2, &val);
-			if (val == PHY84833_CMD_OPEN_FOR_CMDS)
-				break;
-			usleep_range(1000, 1000);
-		}
-		if (idx >= PHY84833_HDSHK_WAIT) {
-			DP(NETIF_MSG_LINK, "AutogrEEEn: FW not ready.\n");
-			return -EINVAL;
-		}
+		FEATURE_CONFIG_AUTOGREEEN_ENABLED)
+		cmd_args[0] = 0x2;
+	else
+		cmd_args[0] = 0x0;
 
-		/* Select EEE mode */
-		bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
-				MDIO_84833_TOP_CFG_SCRATCH_REG3,
-				0x2);
-
-		/* Set Idle and Latency */
-		bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
-				MDIO_84833_TOP_CFG_SCRATCH_REG4,
-				PHY84833_CONSTANT_LATENCY + 1);
-
-		bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
-				MDIO_84833_TOP_CFG_DATA3_REG,
-				PHY84833_CONSTANT_LATENCY + 1);
-
-		bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
-				MDIO_84833_TOP_CFG_DATA4_REG,
-				PHY84833_CONSTANT_LATENCY);
-
-		/* Send EEE instruction to command register */
-		bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
-				MDIO_84833_TOP_CFG_SCRATCH_REG0,
-				PHY84833_DIAG_CMD_SET_EEE_MODE);
-
-		/* Ensure that the command has completed */
-		for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) {
-			bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
-					MDIO_84833_TOP_CFG_SCRATCH_REG2, &val);
-			if ((val == PHY84833_CMD_COMPLETE_PASS) ||
-				(val == PHY84833_CMD_COMPLETE_ERROR))
-				break;
-			usleep_range(1000, 1000);
-		}
-		if ((idx >= PHY84833_HDSHK_WAIT) ||
-			(val == PHY84833_CMD_COMPLETE_ERROR)) {
-			DP(NETIF_MSG_LINK, "AutogrEEEn: command failed.\n");
-			return -EINVAL;
-		}
-
-		/* Reset command handler */
-		bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
-			    MDIO_84833_TOP_CFG_SCRATCH_REG2,
-			    PHY84833_CMD_CLEAR_COMPLETE);
-	}
-
+	cmd_args[1] = 0x0;
+	cmd_args[2] = PHY84833_CONSTANT_LATENCY + 1;
+	cmd_args[3] = PHY84833_CONSTANT_LATENCY;
+	rc = bnx2x_84833_cmd_hdlr(phy, params,
+		PHY84833_CMD_SET_EEE_MODE, cmd_args);
+	if (rc != 0)
+		DP(NETIF_MSG_LINK, "Cfg AutogrEEEn failed.\n");
 	if (initialize)
 		rc = bnx2x_848xx_cmn_config_init(phy, params, vars);
 	else
@@ -10144,8 +10216,10 @@
 	DP(NETIF_MSG_LINK, "54618SE cfg init\n");
 	usleep_range(1000, 1000);
 
-	/* This works with E3 only, no need to check the chip
-	   before determining the port. */
+	/*
+	 * This works with E3 only, no need to check the chip
+	 * before determining the port.
+	 */
 	port = params->port;
 
 	cfg_pin = (REG_RD(bp, params->shmem_base +
@@ -11218,7 +11292,9 @@
 				       offsetof(struct shmem_region,
 			dev_info.port_feature_config[port].link_config)) &
 			  PORT_FEATURE_CONNECTED_SWITCH_MASK);
-	chip_id = REG_RD(bp, MISC_REG_CHIP_NUM) << 16;
+	chip_id = (REG_RD(bp, MISC_REG_CHIP_NUM) << 16) |
+		((REG_RD(bp, MISC_REG_CHIP_REV) & 0xf) << 12);
+
 	DP(NETIF_MSG_LINK, ":chip_id = 0x%x\n", chip_id);
 	if (USES_WARPCORE(bp)) {
 		u32 serdes_net_if;
@@ -11397,6 +11473,10 @@
 		return -EINVAL;
 	default:
 		*phy = phy_null;
+		/* In case external PHY wasn't found */
+		if ((phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
+		    (phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
+			return -EINVAL;
 		return 0;
 	}
 
@@ -11570,7 +11650,7 @@
 
 int bnx2x_phy_probe(struct link_params *params)
 {
-	u8 phy_index, actual_phy_idx, link_cfg_idx;
+	u8 phy_index, actual_phy_idx;
 	u32 phy_config_swapped, sync_offset, media_types;
 	struct bnx2x *bp = params->bp;
 	struct bnx2x_phy *phy;
@@ -11581,7 +11661,6 @@
 
 	for (phy_index = INT_PHY; phy_index < MAX_PHYS;
 	      phy_index++) {
-		link_cfg_idx = LINK_CONFIG_IDX(phy_index);
 		actual_phy_idx = phy_index;
 		if (phy_config_swapped) {
 			if (phy_index == EXT_PHY1)
@@ -12247,6 +12326,63 @@
 	return 0;
 }
 
+static int bnx2x_84833_common_init_phy(struct bnx2x *bp,
+						u32 shmem_base_path[],
+						u32 shmem2_base_path[],
+						u8 phy_index,
+						u32 chip_id)
+{
+	u8 reset_gpios;
+	struct bnx2x_phy phy;
+	u32 shmem_base, shmem2_base, cnt;
+	s8 port = 0;
+	u16 val;
+
+	reset_gpios = bnx2x_84833_get_reset_gpios(bp, shmem_base_path, chip_id);
+	bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_LOW);
+	udelay(10);
+	bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_HIGH);
+	DP(NETIF_MSG_LINK, "84833 reset pulse on pin values 0x%x\n",
+		reset_gpios);
+	for (port = PORT_MAX - 1; port >= PORT_0; port--) {
+		/* This PHY is for E2 and E3. */
+		shmem_base = shmem_base_path[port];
+		shmem2_base = shmem2_base_path[port];
+		/* Extract the ext phy address for the port */
+		if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
+				       0, &phy) !=
+		    0) {
+			DP(NETIF_MSG_LINK, "populate_phy failed\n");
+			return -EINVAL;
+		}
+
+		/* Wait for FW completing its initialization. */
+		for (cnt = 0; cnt < 1000; cnt++) {
+			bnx2x_cl45_read(bp, &phy,
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_REG_CTRL, &val);
+			if (!(val & (1<<15)))
+				break;
+			msleep(1);
+		}
+		if (cnt >= 1000)
+			DP(NETIF_MSG_LINK,
+				"84833 Cmn reset timeout (%d)\n", port);
+
+		/* Put the port in super isolate mode. */
+		bnx2x_cl45_read(bp, &phy,
+				MDIO_CTL_DEVAD,
+				MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val);
+		val |= MDIO_84833_SUPER_ISOLATE;
+		bnx2x_cl45_write(bp, &phy,
+				MDIO_CTL_DEVAD,
+				MDIO_84833_TOP_CFG_XGPHY_STRAP1, val);
+	}
+
+	return 0;
+}
+
+
 static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
 				     u32 shmem2_base_path[], u8 phy_index,
 				     u32 ext_phy_type, u32 chip_id)
@@ -12281,7 +12417,9 @@
 		 * GPIO3's are linked, and so both need to be toggled
 		 * to obtain required 2us pulse.
 		 */
-		rc = bnx2x_84833_common_init_phy(bp, shmem_base_path, chip_id);
+		rc = bnx2x_84833_common_init_phy(bp, shmem_base_path,
+						shmem2_base_path,
+						phy_index, chip_id);
 		break;
 	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
 		rc = -EINVAL;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index 2a46e63..e02a68a7 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -479,7 +479,7 @@
 /*  Configure the COS to ETS according to BW and SP settings.*/
 int bnx2x_ets_e3b0_config(const struct link_params *params,
 			 const struct link_vars *vars,
-			 const struct bnx2x_ets_params *ets_params);
+			 struct bnx2x_ets_params *ets_params);
 /* Read pfc statistic*/
 void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars,
 						 u32 pfc_frames_sent[2],
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 2f6361e..ffeaaa9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -2318,12 +2318,6 @@
 					CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
 }
 
-/* returns func by VN for current port */
-static inline int func_by_vn(struct bnx2x *bp, int vn)
-{
-	return 2 * vn + BP_PORT(bp);
-}
-
 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
 {
 	struct rate_shaping_vars_per_vn m_rs_vn;
@@ -2475,22 +2469,6 @@
 	   "rate shaping and fairness are disabled\n");
 }
 
-static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
-{
-	int func;
-	int vn;
-
-	/* Set the attention towards other drivers on the same port */
-	for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
-		if (vn == BP_VN(bp))
-			continue;
-
-		func = func_by_vn(bp, vn);
-		REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
-		       (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
-	}
-}
-
 /* This function is called upon link interrupt */
 static void bnx2x_link_attn(struct bnx2x *bp)
 {
@@ -2549,6 +2527,9 @@
 	if (bp->state != BNX2X_STATE_OPEN)
 		return;
 
+	/* read updated dcb configuration */
+	bnx2x_dcbx_pmf_update(bp);
+
 	bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
 
 	if (bp->link_vars.link_up)
@@ -2643,15 +2624,6 @@
 	return rc;
 }
 
-static u8 stat_counter_valid(struct bnx2x *bp, struct bnx2x_fastpath *fp)
-{
-#ifdef BCM_CNIC
-	/* Statistics are not supported for CNIC Clients at the moment */
-	if (IS_FCOE_FP(fp))
-		return false;
-#endif
-	return true;
-}
 
 void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
 {
@@ -2695,11 +2667,11 @@
 	 *  parent connection). The statistics are zeroed when the parent
 	 *  connection is initialized.
 	 */
-	if (stat_counter_valid(bp, fp)) {
-		__set_bit(BNX2X_Q_FLG_STATS, &flags);
-		if (zero_stats)
-			__set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
-	}
+
+	__set_bit(BNX2X_Q_FLG_STATS, &flags);
+	if (zero_stats)
+		__set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
+
 
 	return flags;
 }
@@ -2808,8 +2780,8 @@
 	/* This should be a maximum number of data bytes that may be
 	 * placed on the BD (not including paddings).
 	 */
-	rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN -
-		IP_HEADER_ALIGNMENT_PADDING;
+	rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN_START -
+		BNX2X_FW_RX_ALIGN_END -	IP_HEADER_ALIGNMENT_PADDING;
 
 	rxq_init->cl_qzone_id = fp->cl_qzone_id;
 	rxq_init->tpa_agg_sz = tpa_agg_size;
@@ -2940,6 +2912,143 @@
 	 */
 }
 
+#define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3
+
+static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
+{
+	struct eth_stats_info *ether_stat =
+		&bp->slowpath->drv_info_to_mcp.ether_stat;
+
+	/* leave last char as NULL */
+	memcpy(ether_stat->version, DRV_MODULE_VERSION,
+	       ETH_STAT_INFO_VERSION_LEN - 1);
+
+	bp->fp[0].mac_obj.get_n_elements(bp, &bp->fp[0].mac_obj,
+					 DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
+					 ether_stat->mac_local);
+
+	ether_stat->mtu_size = bp->dev->mtu;
+
+	if (bp->dev->features & NETIF_F_RXCSUM)
+		ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
+	if (bp->dev->features & NETIF_F_TSO)
+		ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK;
+	ether_stat->feature_flags |= bp->common.boot_mode;
+
+	ether_stat->promiscuous_mode = (bp->dev->flags & IFF_PROMISC) ? 1 : 0;
+
+	ether_stat->txq_size = bp->tx_ring_size;
+	ether_stat->rxq_size = bp->rx_ring_size;
+}
+
+static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
+{
+#ifdef BCM_CNIC
+	struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
+	struct fcoe_stats_info *fcoe_stat =
+		&bp->slowpath->drv_info_to_mcp.fcoe_stat;
+
+	memcpy(fcoe_stat->mac_local, bp->fip_mac, ETH_ALEN);
+
+	fcoe_stat->qos_priority =
+		app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE];
+
+	/* insert FCoE stats from ramrod response */
+	if (!NO_FCOE(bp)) {
+		struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
+			&bp->fw_stats_data->queue_stats[FCOE_IDX].
+			tstorm_queue_statistics;
+
+		struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
+			&bp->fw_stats_data->queue_stats[FCOE_IDX].
+			xstorm_queue_statistics;
+
+		struct fcoe_statistics_params *fw_fcoe_stat =
+			&bp->fw_stats_data->fcoe;
+
+		ADD_64(fcoe_stat->rx_bytes_hi, 0, fcoe_stat->rx_bytes_lo,
+		       fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
+
+		ADD_64(fcoe_stat->rx_bytes_hi,
+		       fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
+		       fcoe_stat->rx_bytes_lo,
+		       fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
+
+		ADD_64(fcoe_stat->rx_bytes_hi,
+		       fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
+		       fcoe_stat->rx_bytes_lo,
+		       fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
+
+		ADD_64(fcoe_stat->rx_bytes_hi,
+		       fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
+		       fcoe_stat->rx_bytes_lo,
+		       fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
+
+		ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
+		       fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
+
+		ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
+		       fcoe_q_tstorm_stats->rcv_ucast_pkts);
+
+		ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
+		       fcoe_q_tstorm_stats->rcv_bcast_pkts);
+
+		ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
+		       fcoe_q_tstorm_stats->rcv_mcast_pkts);
+
+		ADD_64(fcoe_stat->tx_bytes_hi, 0, fcoe_stat->tx_bytes_lo,
+		       fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
+
+		ADD_64(fcoe_stat->tx_bytes_hi,
+		       fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
+		       fcoe_stat->tx_bytes_lo,
+		       fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
+
+		ADD_64(fcoe_stat->tx_bytes_hi,
+		       fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
+		       fcoe_stat->tx_bytes_lo,
+		       fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
+
+		ADD_64(fcoe_stat->tx_bytes_hi,
+		       fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
+		       fcoe_stat->tx_bytes_lo,
+		       fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
+
+		ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
+		       fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
+
+		ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
+		       fcoe_q_xstorm_stats->ucast_pkts_sent);
+
+		ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
+		       fcoe_q_xstorm_stats->bcast_pkts_sent);
+
+		ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
+		       fcoe_q_xstorm_stats->mcast_pkts_sent);
+	}
+
+	/* ask L5 driver to add data to the struct */
+	bnx2x_cnic_notify(bp, CNIC_CTL_FCOE_STATS_GET_CMD);
+#endif
+}
+
+static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
+{
+#ifdef BCM_CNIC
+	struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
+	struct iscsi_stats_info *iscsi_stat =
+		&bp->slowpath->drv_info_to_mcp.iscsi_stat;
+
+	memcpy(iscsi_stat->mac_local, bp->cnic_eth_dev.iscsi_mac, ETH_ALEN);
+
+	iscsi_stat->qos_priority =
+		app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI];
+
+	/* ask L5 driver to add data to the struct */
+	bnx2x_cnic_notify(bp, CNIC_CTL_ISCSI_STATS_GET_CMD);
+#endif
+}
+
 /* called due to MCP event (on pmf):
  *	reread new bandwidth configuration
  *	configure FW
@@ -2960,6 +3069,50 @@
 	bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
 }
 
+static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
+{
+	enum drv_info_opcode op_code;
+	u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control);
+
+	/* if drv_info version supported by MFW doesn't match - send NACK */
+	if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
+		bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
+		return;
+	}
+
+	op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
+		  DRV_INFO_CONTROL_OP_CODE_SHIFT;
+
+	memset(&bp->slowpath->drv_info_to_mcp, 0,
+	       sizeof(union drv_info_to_mcp));
+
+	switch (op_code) {
+	case ETH_STATS_OPCODE:
+		bnx2x_drv_info_ether_stat(bp);
+		break;
+	case FCOE_STATS_OPCODE:
+		bnx2x_drv_info_fcoe_stat(bp);
+		break;
+	case ISCSI_STATS_OPCODE:
+		bnx2x_drv_info_iscsi_stat(bp);
+		break;
+	default:
+		/* if op code isn't supported - send NACK */
+		bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
+		return;
+	}
+
+	/* if we got drv_info attn from MFW then these fields are defined in
+	 * shmem2 for sure
+	 */
+	SHMEM2_WR(bp, drv_info_host_addr_lo,
+		U64_LO(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
+	SHMEM2_WR(bp, drv_info_host_addr_hi,
+		U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
+
+	bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0);
+}
+
 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
 {
 	DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
@@ -3318,6 +3471,17 @@
 	netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
 	       " the driver to shutdown the card to prevent permanent"
 	       " damage.  Please contact OEM Support for assistance\n");
+
+	/*
+	 * Scheudle device reset (unload)
+	 * This is due to some boards consuming sufficient power when driver is
+	 * up to overheat if fan fails.
+	 */
+	smp_mb__before_clear_bit();
+	set_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state);
+	smp_mb__after_clear_bit();
+	schedule_delayed_work(&bp->sp_rtnl_task, 0);
+
 }
 
 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
@@ -3456,6 +3620,8 @@
 			if (val & DRV_STATUS_SET_MF_BW)
 				bnx2x_set_mf_bw(bp);
 
+			if (val & DRV_STATUS_DRV_INFO_REQ)
+				bnx2x_handle_drv_info_req(bp);
 			if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
 				bnx2x_pmf_update(bp);
 
@@ -5247,7 +5413,7 @@
 	u8 cos;
 	unsigned long q_type = 0;
 	u32 cids[BNX2X_MULTI_TX_COS] = { 0 };
-
+	fp->rx_queue = fp_idx;
 	fp->cid = fp_idx;
 	fp->cl_id = bnx2x_fp_cl_id(fp);
 	fp->fw_sb_id = bnx2x_fp_fw_sb_id(fp);
@@ -6856,13 +7022,16 @@
 static inline int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
 {
 	int num_groups;
+	int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
 
-	/* number of eth_queues */
-	u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp);
+	/* number of queues for statistics is number of eth queues + FCoE */
+	u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
 
 	/* Total number of FW statistics requests =
-	 * 1 for port stats + 1 for PF stats + num_eth_queues */
-	bp->fw_stats_num = 2 + num_queue_stats;
+	 * 1 for port stats + 1 for PF stats + potential 1 for FCoE stats +
+	 * num of queues
+	 */
+	bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
 
 
 	/* Request is built from stats_query_header and an array of
@@ -6870,8 +7039,8 @@
 	 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
 	 * configured in the stats_query_header.
 	 */
-	num_groups = (2 + num_queue_stats) / STATS_QUERY_CMD_COUNT +
-		(((2 + num_queue_stats) % STATS_QUERY_CMD_COUNT) ? 1 : 0);
+	num_groups = ((bp->fw_stats_num) / STATS_QUERY_CMD_COUNT) +
+		     (((bp->fw_stats_num) % STATS_QUERY_CMD_COUNT) ? 1 : 0);
 
 	bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
 			num_groups * sizeof(struct stats_query_cmd_group);
@@ -6880,9 +7049,13 @@
 	 *
 	 * stats_counter holds per-STORM counters that are incremented
 	 * when STORM has finished with the current request.
+	 *
+	 * memory for FCoE offloaded statistics are counted anyway,
+	 * even if they will not be sent.
 	 */
 	bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
 		sizeof(struct per_pf_stats) +
+		sizeof(struct fcoe_statistics_params) +
 		sizeof(struct per_queue_stats) * num_queue_stats +
 		sizeof(struct stats_counter);
 
@@ -7025,6 +7198,13 @@
 {
 	unsigned long ramrod_flags = 0;
 
+#ifdef BCM_CNIC
+	if (is_zero_ether_addr(bp->dev->dev_addr) && IS_MF_ISCSI_SD(bp)) {
+		DP(NETIF_MSG_IFUP, "Ignoring Zero MAC for iSCSI SD mode\n");
+		return 0;
+	}
+#endif
+
 	DP(NETIF_MSG_IFUP, "Adding Eth MAC\n");
 
 	__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
@@ -8522,6 +8702,17 @@
 	if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
 		bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos);
 
+	/*
+	 * in case of fan failure we need to reset id if the "stop on error"
+	 * debug flag is set, since we trying to prevent permanent overheating
+	 * damage
+	 */
+	if (test_and_clear_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state)) {
+		DP(BNX2X_MSG_SP, "fan failure detected. Unloading driver\n");
+		netif_device_detach(bp->dev);
+		bnx2x_close(bp->dev);
+	}
+
 sp_rtnl_exit:
 	rtnl_unlock();
 }
@@ -8708,7 +8899,7 @@
 
 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
 {
-	u32 val, val2, val3, val4, id;
+	u32 val, val2, val3, val4, id, boot_mode;
 	u16 pmc;
 
 	/* Get the chip revision id and number. */
@@ -8817,6 +9008,26 @@
 	bp->link_params.feature_config_flags |=
 		(val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ?
 		FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0;
+	bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ?
+			BC_SUPPORTS_PFC_STATS : 0;
+
+	boot_mode = SHMEM_RD(bp,
+			dev_info.port_feature_config[BP_PORT(bp)].mba_config) &
+			PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK;
+	switch (boot_mode) {
+	case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE:
+		bp->common.boot_mode = FEATURE_ETH_BOOTMODE_PXE;
+		break;
+	case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB:
+		bp->common.boot_mode = FEATURE_ETH_BOOTMODE_ISCSI;
+		break;
+	case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_FCOE_BOOT:
+		bp->common.boot_mode = FEATURE_ETH_BOOTMODE_FCOE;
+		break;
+	case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_NONE:
+		bp->common.boot_mode = FEATURE_ETH_BOOTMODE_NONE;
+		break;
+	}
 
 	pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
 	bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
@@ -9267,22 +9478,43 @@
 							bp->common.shmem2_base);
 }
 
-#ifdef BCM_CNIC
-static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
+void bnx2x_get_iscsi_info(struct bnx2x *bp)
 {
+#ifdef BCM_CNIC
 	int port = BP_PORT(bp);
-	int func = BP_ABS_FUNC(bp);
 
 	u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
 				drv_lic_key[port].max_iscsi_conn);
-	u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
-				drv_lic_key[port].max_fcoe_conn);
 
-	/* Get the number of maximum allowed iSCSI and FCoE connections */
+	/* Get the number of maximum allowed iSCSI connections */
 	bp->cnic_eth_dev.max_iscsi_conn =
 		(max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
 		BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
 
+	BNX2X_DEV_INFO("max_iscsi_conn 0x%x\n",
+		       bp->cnic_eth_dev.max_iscsi_conn);
+
+	/*
+	 * If maximum allowed number of connections is zero -
+	 * disable the feature.
+	 */
+	if (!bp->cnic_eth_dev.max_iscsi_conn)
+		bp->flags |= NO_ISCSI_FLAG;
+#else
+	bp->flags |= NO_ISCSI_FLAG;
+#endif
+}
+
+static void __devinit bnx2x_get_fcoe_info(struct bnx2x *bp)
+{
+#ifdef BCM_CNIC
+	int port = BP_PORT(bp);
+	int func = BP_ABS_FUNC(bp);
+
+	u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
+				drv_lic_key[port].max_fcoe_conn);
+
+	/* Get the number of maximum allowed FCoE connections */
 	bp->cnic_eth_dev.max_fcoe_conn =
 		(max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
 		BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
@@ -9334,21 +9566,29 @@
 		}
 	}
 
-	BNX2X_DEV_INFO("max_iscsi_conn 0x%x max_fcoe_conn 0x%x\n",
-		       bp->cnic_eth_dev.max_iscsi_conn,
-		       bp->cnic_eth_dev.max_fcoe_conn);
+	BNX2X_DEV_INFO("max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_fcoe_conn);
 
 	/*
 	 * If maximum allowed number of connections is zero -
 	 * disable the feature.
 	 */
-	if (!bp->cnic_eth_dev.max_iscsi_conn)
-		bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
-
 	if (!bp->cnic_eth_dev.max_fcoe_conn)
 		bp->flags |= NO_FCOE_FLAG;
-}
+#else
+	bp->flags |= NO_FCOE_FLAG;
 #endif
+}
+
+static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
+{
+	/*
+	 * iSCSI may be dynamically disabled but reading
+	 * info here we will decrease memory usage by driver
+	 * if the feature is disabled for good
+	 */
+	bnx2x_get_iscsi_info(bp);
+	bnx2x_get_fcoe_info(bp);
+}
 
 static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
 {
@@ -9374,7 +9614,8 @@
 			bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
 
 #ifdef BCM_CNIC
-		/* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
+		/*
+		 * iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
 		 * FCoE MAC then the appropriate feature should be disabled.
 		 */
 		if (IS_MF_SI(bp)) {
@@ -9396,11 +9637,22 @@
 				val = MF_CFG_RD(bp, func_ext_config[func].
 						    fcoe_mac_addr_lower);
 				bnx2x_set_mac_buf(fip_mac, val, val2);
-				BNX2X_DEV_INFO("Read FCoE L2 MAC to %pM\n",
+				BNX2X_DEV_INFO("Read FCoE L2 MAC: %pM\n",
 					       fip_mac);
 
 			} else
 				bp->flags |= NO_FCOE_FLAG;
+		} else { /* SD mode */
+			if (BNX2X_IS_MF_PROTOCOL_ISCSI(bp)) {
+				/* use primary mac as iscsi mac */
+				memcpy(iscsi_mac, bp->dev->dev_addr, ETH_ALEN);
+				/* Zero primary MAC configuration */
+				memset(bp->dev->dev_addr, 0, ETH_ALEN);
+
+				BNX2X_DEV_INFO("SD ISCSI MODE\n");
+				BNX2X_DEV_INFO("Read iSCSI MAC: %pM\n",
+					       iscsi_mac);
+			}
 		}
 #endif
 	} else {
@@ -9449,7 +9701,7 @@
 	}
 #endif
 
-	if (!is_valid_ether_addr(bp->dev->dev_addr))
+	if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr))
 		dev_err(&bp->pdev->dev,
 			"bad Ethernet MAC address configuration: "
 			"%pM, change it manually before bringing up "
@@ -9661,9 +9913,7 @@
 	/* Get MAC addresses */
 	bnx2x_get_mac_hwinfo(bp);
 
-#ifdef BCM_CNIC
 	bnx2x_get_cnic_info(bp);
-#endif
 
 	/* Get current FW pulse sequence */
 	if (!BP_NOMCP(bp)) {
@@ -9681,30 +9931,49 @@
 static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
 {
 	int cnt, i, block_end, rodi;
-	char vpd_data[BNX2X_VPD_LEN+1];
+	char vpd_start[BNX2X_VPD_LEN+1];
 	char str_id_reg[VENDOR_ID_LEN+1];
 	char str_id_cap[VENDOR_ID_LEN+1];
+	char *vpd_data;
+	char *vpd_extended_data = NULL;
 	u8 len;
 
-	cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
+	cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_start);
 	memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
 
 	if (cnt < BNX2X_VPD_LEN)
 		goto out_not_found;
 
-	i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
+	/* VPD RO tag should be first tag after identifier string, hence
+	 * we should be able to find it in first BNX2X_VPD_LEN chars
+	 */
+	i = pci_vpd_find_tag(vpd_start, 0, BNX2X_VPD_LEN,
 			     PCI_VPD_LRDT_RO_DATA);
 	if (i < 0)
 		goto out_not_found;
 
-
 	block_end = i + PCI_VPD_LRDT_TAG_SIZE +
-		    pci_vpd_lrdt_size(&vpd_data[i]);
+		    pci_vpd_lrdt_size(&vpd_start[i]);
 
 	i += PCI_VPD_LRDT_TAG_SIZE;
 
-	if (block_end > BNX2X_VPD_LEN)
-		goto out_not_found;
+	if (block_end > BNX2X_VPD_LEN) {
+		vpd_extended_data = kmalloc(block_end, GFP_KERNEL);
+		if (vpd_extended_data  == NULL)
+			goto out_not_found;
+
+		/* read rest of vpd image into vpd_extended_data */
+		memcpy(vpd_extended_data, vpd_start, BNX2X_VPD_LEN);
+		cnt = pci_read_vpd(bp->pdev, BNX2X_VPD_LEN,
+				   block_end - BNX2X_VPD_LEN,
+				   vpd_extended_data + BNX2X_VPD_LEN);
+		if (cnt < (block_end - BNX2X_VPD_LEN))
+			goto out_not_found;
+		vpd_data = vpd_extended_data;
+	} else
+		vpd_data = vpd_start;
+
+	/* now vpd_data holds full vpd content in both cases */
 
 	rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
 				   PCI_VPD_RO_KEYWORD_MFR_ID);
@@ -9736,9 +10005,11 @@
 				bp->fw_ver[len] = ' ';
 			}
 		}
+		kfree(vpd_extended_data);
 		return;
 	}
 out_not_found:
+	kfree(vpd_extended_data);
 	return;
 }
 
@@ -9840,15 +10111,20 @@
 
 	bp->multi_mode = multi_mode;
 
+	bp->disable_tpa = disable_tpa;
+
+#ifdef BCM_CNIC
+	bp->disable_tpa |= IS_MF_ISCSI_SD(bp);
+#endif
+
 	/* Set TPA flags */
-	if (disable_tpa) {
+	if (bp->disable_tpa) {
 		bp->flags &= ~TPA_ENABLE_FLAG;
 		bp->dev->features &= ~NETIF_F_LRO;
 	} else {
 		bp->flags |= TPA_ENABLE_FLAG;
 		bp->dev->features |= NETIF_F_LRO;
 	}
-	bp->disable_tpa = disable_tpa;
 
 	if (CHIP_IS_E1(bp))
 		bp->dropless_fc = 0;
@@ -9965,7 +10241,7 @@
 }
 
 /* called with rtnl_lock */
-static int bnx2x_close(struct net_device *dev)
+int bnx2x_close(struct net_device *dev)
 {
 	struct bnx2x *bp = netdev_priv(dev);
 
@@ -10119,6 +10395,11 @@
 	}
 
 	bp->rx_mode = rx_mode;
+#ifdef BCM_CNIC
+	/* handle ISCSI SD mode */
+	if (IS_MF_ISCSI_SD(bp))
+		bp->rx_mode = BNX2X_RX_MODE_NONE;
+#endif
 
 	/* Schedule the rx_mode command */
 	if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) {
@@ -10198,6 +10479,15 @@
 }
 #endif
 
+static int bnx2x_validate_addr(struct net_device *dev)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	if (!bnx2x_is_valid_ether_addr(bp, dev->dev_addr))
+		return -EADDRNOTAVAIL;
+	return 0;
+}
+
 static const struct net_device_ops bnx2x_netdev_ops = {
 	.ndo_open		= bnx2x_open,
 	.ndo_stop		= bnx2x_close,
@@ -10205,7 +10495,7 @@
 	.ndo_select_queue	= bnx2x_select_queue,
 	.ndo_set_rx_mode	= bnx2x_set_rx_mode,
 	.ndo_set_mac_address	= bnx2x_change_mac_addr,
-	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_validate_addr	= bnx2x_validate_addr,
 	.ndo_do_ioctl		= bnx2x_ioctl,
 	.ndo_change_mtu		= bnx2x_change_mtu,
 	.ndo_fix_features	= bnx2x_fix_features,
@@ -10823,8 +11113,8 @@
 	bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
 
 #ifdef BCM_CNIC
-	/* disable FCOE L2 queue for E1x and E3*/
-	if (CHIP_IS_E1x(bp) || CHIP_IS_E3(bp))
+	/* disable FCOE L2 queue for E1x */
+	if (CHIP_IS_E1x(bp))
 		bp->flags |= NO_FCOE_FLAG;
 
 #endif
@@ -11486,6 +11776,38 @@
 		smp_mb__after_atomic_inc();
 		break;
 	}
+	case DRV_CTL_ULP_REGISTER_CMD: {
+		int ulp_type = ctl->data.ulp_type;
+
+		if (CHIP_IS_E3(bp)) {
+			int idx = BP_FW_MB_IDX(bp);
+			u32 cap;
+
+			cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
+			if (ulp_type == CNIC_ULP_ISCSI)
+				cap |= DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
+			else if (ulp_type == CNIC_ULP_FCOE)
+				cap |= DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
+			SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
+		}
+		break;
+	}
+	case DRV_CTL_ULP_UNREGISTER_CMD: {
+		int ulp_type = ctl->data.ulp_type;
+
+		if (CHIP_IS_E3(bp)) {
+			int idx = BP_FW_MB_IDX(bp);
+			u32 cap;
+
+			cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
+			if (ulp_type == CNIC_ULP_ISCSI)
+				cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
+			else if (ulp_type == CNIC_ULP_FCOE)
+				cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
+			SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
+		}
+		break;
+	}
 
 	default:
 		BNX2X_ERR("unknown command %x\n", ctl->cmd);
@@ -11561,7 +11883,7 @@
 
 	mutex_lock(&bp->cnic_mutex);
 	cp->drv_state = 0;
-	rcu_assign_pointer(bp->cnic_ops, NULL);
+	RCU_INIT_POINTER(bp->cnic_ops, NULL);
 	mutex_unlock(&bp->cnic_mutex);
 	synchronize_rcu();
 	kfree(bp->cnic_kwq);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index e58073e..44609de 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -160,8 +160,11 @@
 #define BRB1_REG_PAUSE_HIGH_THRESHOLD_1 			 0x6007c
 /* [RW 10] Write client 0: Assert pause threshold. */
 #define BRB1_REG_PAUSE_LOW_THRESHOLD_0				 0x60068
-#define BRB1_REG_PAUSE_LOW_THRESHOLD_1				 0x6006c
-/* [R 24] The number of full blocks occupied by port. */
+/* [RW 1] Indicates if to use per-class guaranty mode (new mode) or per-MAC
+ * guaranty mode (backwards-compatible mode). 0=per-MAC guaranty mode (BC
+ * mode). 1=per-class guaranty mode (new mode). */
+#define BRB1_REG_PER_CLASS_GUARANTY_MODE			 0x60268
+/* [R 24] The number of full blocks occpied by port. */
 #define BRB1_REG_PORT_NUM_OCC_BLOCKS_0				 0x60094
 /* [RW 1] Reset the design by software. */
 #define BRB1_REG_SOFT_RESET					 0x600dc
@@ -1619,6 +1622,14 @@
    register bits. */
 #define MISC_REG_LCPLL_CTRL_1					 0xa2a4
 #define MISC_REG_LCPLL_CTRL_REG_2				 0xa2a8
+/* [RW 1] LCPLL power down. Global register. Active High. Reset on POR
+ * reset. */
+#define MISC_REG_LCPLL_E40_PWRDWN				 0xaa74
+/* [RW 1] LCPLL VCO reset. Global register. Active Low Reset on POR reset. */
+#define MISC_REG_LCPLL_E40_RESETB_ANA				 0xaa78
+/* [RW 1] LCPLL post-divider reset. Global register. Active Low Reset on POR
+ * reset. */
+#define MISC_REG_LCPLL_E40_RESETB_DIG				 0xaa7c
 /* [RW 4] Interrupt mask register #0 read/write */
 #define MISC_REG_MISC_INT_MASK					 0xa388
 /* [RW 1] Parity mask register #0 read/write */
@@ -1754,6 +1765,7 @@
  * is compared to the value on ctrl_md_devad. Drives output
  * misc_xgxs0_phy_addr. Global register. */
 #define MISC_REG_WC0_CTRL_PHY_ADDR				 0xa9cc
+#define MISC_REG_WC0_RESET					 0xac30
 /* [RW 2] XMAC Core port mode. Indicates the number of ports on the system
    side. This should be less than or equal to phy_port_mode; if some of the
    ports are not used. This enables reduction of frequency on the core side.
@@ -6823,11 +6835,13 @@
 #define MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER	0x0000
 #define MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER		0x0100
 #define MDIO_CTL_REG_84823_MEDIA_FIBER_1G			0x1000
-#define MDIO_CTL_REG_84823_USER_CTRL_REG		0x4005
-#define MDIO_CTL_REG_84823_USER_CTRL_CMS		0x0080
-
-#define MDIO_PMA_REG_84823_CTL_LED_CTL_1		0xa8e3
-#define MDIO_PMA_REG_84823_LED3_STRETCH_EN		0x0080
+#define MDIO_CTL_REG_84823_USER_CTRL_REG			0x4005
+#define MDIO_CTL_REG_84823_USER_CTRL_CMS			0x0080
+#define MDIO_PMA_REG_84823_CTL_SLOW_CLK_CNT_HIGH		0xa82b
+#define MDIO_PMA_REG_84823_BLINK_RATE_VAL_15P9HZ	0x2f
+#define MDIO_PMA_REG_84823_CTL_LED_CTL_1			0xa8e3
+#define MDIO_PMA_REG_84833_CTL_LED_CTL_1			0xa8ec
+#define MDIO_PMA_REG_84823_LED3_STRETCH_EN			0x0080
 
 /* BCM84833 only */
 #define MDIO_84833_TOP_CFG_XGPHY_STRAP1			0x401a
@@ -6838,27 +6852,36 @@
 #define MDIO_84833_TOP_CFG_SCRATCH_REG2			0x4007
 #define MDIO_84833_TOP_CFG_SCRATCH_REG3			0x4008
 #define MDIO_84833_TOP_CFG_SCRATCH_REG4			0x4009
-#define MDIO_84833_TOP_CFG_DATA3_REG			0x4011
-#define MDIO_84833_TOP_CFG_DATA4_REG			0x4012
+#define MDIO_84833_TOP_CFG_SCRATCH_REG26		0x4037
+#define MDIO_84833_TOP_CFG_SCRATCH_REG27		0x4038
+#define MDIO_84833_TOP_CFG_SCRATCH_REG28		0x4039
+#define MDIO_84833_TOP_CFG_SCRATCH_REG29		0x403a
+#define MDIO_84833_TOP_CFG_SCRATCH_REG30		0x403b
+#define MDIO_84833_TOP_CFG_SCRATCH_REG31		0x403c
+#define MDIO_84833_CMD_HDLR_COMMAND	MDIO_84833_TOP_CFG_SCRATCH_REG0
+#define MDIO_84833_CMD_HDLR_STATUS	MDIO_84833_TOP_CFG_SCRATCH_REG26
+#define MDIO_84833_CMD_HDLR_DATA1	MDIO_84833_TOP_CFG_SCRATCH_REG27
+#define MDIO_84833_CMD_HDLR_DATA2	MDIO_84833_TOP_CFG_SCRATCH_REG28
+#define MDIO_84833_CMD_HDLR_DATA3	MDIO_84833_TOP_CFG_SCRATCH_REG29
+#define MDIO_84833_CMD_HDLR_DATA4	MDIO_84833_TOP_CFG_SCRATCH_REG30
+#define MDIO_84833_CMD_HDLR_DATA5	MDIO_84833_TOP_CFG_SCRATCH_REG31
 
 /* Mailbox command set used by 84833. */
-#define PHY84833_DIAG_CMD_PAIR_SWAP_CHANGE		0x2
+#define PHY84833_CMD_SET_PAIR_SWAP			0x8001
+#define PHY84833_CMD_GET_EEE_MODE			0x8008
+#define PHY84833_CMD_SET_EEE_MODE			0x8009
 /* Mailbox status set used by 84833. */
-#define PHY84833_CMD_RECEIVED				0x0001
-#define PHY84833_CMD_IN_PROGRESS			0x0002
-#define PHY84833_CMD_COMPLETE_PASS			0x0004
-#define PHY84833_CMD_COMPLETE_ERROR			0x0008
-#define PHY84833_CMD_OPEN_FOR_CMDS			0x0010
-#define PHY84833_CMD_SYSTEM_BOOT			0x0020
-#define PHY84833_CMD_NOT_OPEN_FOR_CMDS			0x0040
-#define PHY84833_CMD_CLEAR_COMPLETE			0x0080
-#define PHY84833_CMD_OPEN_OVERRIDE			0xa5a5
+#define PHY84833_STATUS_CMD_RECEIVED			0x0001
+#define PHY84833_STATUS_CMD_IN_PROGRESS			0x0002
+#define PHY84833_STATUS_CMD_COMPLETE_PASS		0x0004
+#define PHY84833_STATUS_CMD_COMPLETE_ERROR		0x0008
+#define PHY84833_STATUS_CMD_OPEN_FOR_CMDS		0x0010
+#define PHY84833_STATUS_CMD_SYSTEM_BOOT			0x0020
+#define PHY84833_STATUS_CMD_NOT_OPEN_FOR_CMDS		0x0040
+#define PHY84833_STATUS_CMD_CLEAR_COMPLETE		0x0080
+#define PHY84833_STATUS_CMD_OPEN_OVERRIDE		0xa5a5
 
 
-/* 84833 F/W Feature Commands */
-#define PHY84833_DIAG_CMD_GET_EEE_MODE			0x27
-#define PHY84833_DIAG_CMD_SET_EEE_MODE			0x28
-
 /* Warpcore clause 45 addressing */
 #define MDIO_WC_DEVAD					0x3
 #define MDIO_WC_REG_IEEE0BLK_MIICNTL			0x0
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 1451769..5ac6160 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -30,6 +30,8 @@
 
 #define BNX2X_MAX_EMUL_MULTI		16
 
+#define MAC_LEADING_ZERO_CNT (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
+
 /**** Exe Queue interfaces ****/
 
 /**
@@ -441,6 +443,36 @@
 	return true;
 }
 
+static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
+				int n, u8 *buf)
+{
+	struct bnx2x_vlan_mac_registry_elem *pos;
+	u8 *next = buf;
+	int counter = 0;
+
+	/* traverse list */
+	list_for_each_entry(pos, &o->head, link) {
+		if (counter < n) {
+			/* place leading zeroes in buffer */
+			memset(next, 0, MAC_LEADING_ZERO_CNT);
+
+			/* place mac after leading zeroes*/
+			memcpy(next + MAC_LEADING_ZERO_CNT, pos->u.mac.mac,
+			       ETH_ALEN);
+
+			/* calculate address of next element and
+			 * advance counter
+			 */
+			counter++;
+			next = buf + counter * ALIGN(ETH_ALEN, sizeof(u32));
+
+			DP(BNX2X_MSG_SP, "copied element number %d to address %p element was %pM\n",
+			   counter, next, pos->u.mac.mac);
+		}
+	}
+	return counter * ETH_ALEN;
+}
+
 /* check_add() callbacks */
 static int bnx2x_check_mac_add(struct bnx2x_vlan_mac_obj *o,
 			       union bnx2x_classification_ramrod_data *data)
@@ -1886,6 +1918,7 @@
 		mac_obj->check_move        = bnx2x_check_move;
 		mac_obj->ramrod_cmd        =
 			RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
+		mac_obj->get_n_elements    = bnx2x_get_n_elements;
 
 		/* Exe Queue */
 		bnx2x_exe_queue_init(bp,
@@ -3342,7 +3375,7 @@
 		if (!list_empty(&o->registry.exact_match.macs))
 			return 0;
 
-		elem = kzalloc(sizeof(*elem)*len, GFP_ATOMIC);
+		elem = kcalloc(len, sizeof(*elem), GFP_ATOMIC);
 		if (!elem) {
 			BNX2X_ERR("Failed to allocate registry memory\n");
 			return -ENOMEM;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 9a517c2..992308f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -285,6 +285,19 @@
 	/* RAMROD command to be used */
 	int				ramrod_cmd;
 
+	/* copy first n elements onto preallocated buffer
+	 *
+	 * @param n number of elements to get
+	 * @param buf buffer preallocated by caller into which elements
+	 *            will be copied. Note elements are 4-byte aligned
+	 *            so buffer size must be able to accomodate the
+	 *            aligned elements.
+	 *
+	 * @return number of copied bytes
+	 */
+	int (*get_n_elements)(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
+			      int n, u8 *buf);
+
 	/**
 	 * Checks if ADD-ramrod with the given params may be performed.
 	 *
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 02ac6a7..bc0121a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -39,6 +39,17 @@
 #endif
 }
 
+static u16 bnx2x_get_port_stats_dma_len(struct bnx2x *bp)
+{
+	u16 res = sizeof(struct host_port_stats) >> 2;
+
+	/* if PFC stats are not supported by the MFW, don't DMA them */
+	if (!(bp->flags &  BC_SUPPORTS_PFC_STATS))
+		res -= (sizeof(u32)*4) >> 2;
+
+	return res;
+}
+
 /*
  * Init service functions
  */
@@ -178,7 +189,8 @@
 				   DMAE_LEN32_RD_MAX * 4);
 	dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
 				   DMAE_LEN32_RD_MAX * 4);
-	dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
+	dmae->len = bnx2x_get_port_stats_dma_len(bp) - DMAE_LEN32_RD_MAX;
+
 	dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
 	dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
 	dmae->comp_val = DMAE_COMP_VAL;
@@ -217,7 +229,7 @@
 		dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
 		dmae->dst_addr_lo = bp->port.port_stx >> 2;
 		dmae->dst_addr_hi = 0;
-		dmae->len = sizeof(struct host_port_stats) >> 2;
+		dmae->len = bnx2x_get_port_stats_dma_len(bp);
 		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
 		dmae->comp_addr_hi = 0;
 		dmae->comp_val = 1;
@@ -540,6 +552,25 @@
 		UPDATE_STAT64(tx_stat_gterr,
 				tx_stat_dot3statsinternalmactransmiterrors);
 		UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
+
+		/* collect PFC stats */
+		DIFF_64(diff.hi, new->tx_stat_gtpp_hi,
+			pstats->pfc_frames_tx_hi,
+			diff.lo, new->tx_stat_gtpp_lo,
+			pstats->pfc_frames_tx_lo);
+		pstats->pfc_frames_tx_hi = new->tx_stat_gtpp_hi;
+		pstats->pfc_frames_tx_lo = new->tx_stat_gtpp_lo;
+		ADD_64(pstats->pfc_frames_tx_hi, diff.hi,
+			pstats->pfc_frames_tx_lo, diff.lo);
+
+		DIFF_64(diff.hi, new->rx_stat_grpp_hi,
+			pstats->pfc_frames_rx_hi,
+			diff.lo, new->rx_stat_grpp_lo,
+			pstats->pfc_frames_rx_lo);
+		pstats->pfc_frames_rx_hi = new->rx_stat_grpp_hi;
+		pstats->pfc_frames_rx_lo = new->rx_stat_grpp_lo;
+		ADD_64(pstats->pfc_frames_rx_hi, diff.hi,
+			pstats->pfc_frames_rx_lo, diff.lo);
 	}
 
 	estats->pause_frames_received_hi =
@@ -551,6 +582,15 @@
 				pstats->mac_stx[1].tx_stat_outxoffsent_hi;
 	estats->pause_frames_sent_lo =
 				pstats->mac_stx[1].tx_stat_outxoffsent_lo;
+
+	estats->pfc_frames_received_hi =
+				pstats->pfc_frames_rx_hi;
+	estats->pfc_frames_received_lo =
+				pstats->pfc_frames_rx_lo;
+	estats->pfc_frames_sent_hi =
+				pstats->pfc_frames_tx_hi;
+	estats->pfc_frames_sent_lo =
+				pstats->pfc_frames_tx_lo;
 }
 
 static void bnx2x_mstat_stats_update(struct bnx2x *bp)
@@ -571,6 +611,11 @@
 	ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent);
 	ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone);
 
+	/* collect pfc stats */
+	ADD_64(pstats->pfc_frames_tx_hi, new->stats_tx.tx_gtxpp_hi,
+		pstats->pfc_frames_tx_lo, new->stats_tx.tx_gtxpp_lo);
+	ADD_64(pstats->pfc_frames_rx_hi, new->stats_rx.rx_grxpp_hi,
+		pstats->pfc_frames_rx_lo, new->stats_rx.rx_grxpp_lo);
 
 	ADD_STAT64(stats_tx.tx_gt64, tx_stat_etherstatspkts64octets);
 	ADD_STAT64(stats_tx.tx_gt127,
@@ -628,6 +673,15 @@
 				pstats->mac_stx[1].tx_stat_outxoffsent_hi;
 	estats->pause_frames_sent_lo =
 				pstats->mac_stx[1].tx_stat_outxoffsent_lo;
+
+	estats->pfc_frames_received_hi =
+				pstats->pfc_frames_rx_hi;
+	estats->pfc_frames_received_lo =
+				pstats->pfc_frames_rx_lo;
+	estats->pfc_frames_sent_hi =
+				pstats->pfc_frames_tx_hi;
+	estats->pfc_frames_sent_lo =
+				pstats->pfc_frames_tx_lo;
 }
 
 static void bnx2x_emac_stats_update(struct bnx2x *bp)
@@ -740,7 +794,7 @@
 	estats->brb_drop_hi = pstats->brb_drop_hi;
 	estats->brb_drop_lo = pstats->brb_drop_lo;
 
-	pstats->host_port_stats_start = ++pstats->host_port_stats_end;
+	pstats->host_port_stats_counter++;
 
 	if (!BP_NOMCP(bp)) {
 		u32 nig_timer_max =
@@ -1265,7 +1319,7 @@
 		dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
 		dmae->dst_addr_lo = bp->port.port_stx >> 2;
 		dmae->dst_addr_hi = 0;
-		dmae->len = sizeof(struct host_port_stats) >> 2;
+		dmae->len = bnx2x_get_port_stats_dma_len(bp);
 		if (bp->func_stx) {
 			dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
 			dmae->comp_addr_hi = 0;
@@ -1349,12 +1403,14 @@
 	enum bnx2x_stats_state state;
 	if (unlikely(bp->panic))
 		return;
-	bnx2x_stats_stm[bp->stats_state][event].action(bp);
+
 	spin_lock_bh(&bp->stats_lock);
 	state = bp->stats_state;
 	bp->stats_state = bnx2x_stats_stm[state][event].next_state;
 	spin_unlock_bh(&bp->stats_lock);
 
+	bnx2x_stats_stm[state][event].action(bp);
+
 	if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
 		DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
 		   state, event, bp->stats_state);
@@ -1380,7 +1436,7 @@
 	dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
 	dmae->dst_addr_lo = bp->port.port_stx >> 2;
 	dmae->dst_addr_hi = 0;
-	dmae->len = sizeof(struct host_port_stats) >> 2;
+	dmae->len = bnx2x_get_port_stats_dma_len(bp);
 	dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
 	dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
 	dmae->comp_val = DMAE_COMP_VAL;
@@ -1457,6 +1513,7 @@
 static inline void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
 {
 	int i;
+	int first_queue_query_index;
 	struct stats_query_header *stats_hdr = &bp->fw_stats_req->hdr;
 
 	dma_addr_t cur_data_offset;
@@ -1512,14 +1569,40 @@
 	cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset));
 	cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset));
 
+	/**** FCoE FW statistics data ****/
+	if (!NO_FCOE(bp)) {
+		cur_data_offset = bp->fw_stats_data_mapping +
+			offsetof(struct bnx2x_fw_stats_data, fcoe);
+
+		cur_query_entry =
+			&bp->fw_stats_req->query[BNX2X_FCOE_QUERY_IDX];
+
+		cur_query_entry->kind = STATS_TYPE_FCOE;
+		/* For FCoE query index is a DONT CARE */
+		cur_query_entry->index = BP_PORT(bp);
+		cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
+		cur_query_entry->address.hi =
+			cpu_to_le32(U64_HI(cur_data_offset));
+		cur_query_entry->address.lo =
+			cpu_to_le32(U64_LO(cur_data_offset));
+	}
+
 	/**** Clients' queries ****/
 	cur_data_offset = bp->fw_stats_data_mapping +
 		offsetof(struct bnx2x_fw_stats_data, queue_stats);
 
+	/* first queue query index depends whether FCoE offloaded request will
+	 * be included in the ramrod
+	 */
+	if (!NO_FCOE(bp))
+		first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX;
+	else
+		first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX - 1;
+
 	for_each_eth_queue(bp, i) {
 		cur_query_entry =
 			&bp->fw_stats_req->
-					query[BNX2X_FIRST_QUEUE_QUERY_IDX + i];
+					query[first_queue_query_index + i];
 
 		cur_query_entry->kind = STATS_TYPE_QUEUE;
 		cur_query_entry->index = bnx2x_stats_id(&bp->fp[i]);
@@ -1531,6 +1614,21 @@
 
 		cur_data_offset += sizeof(struct per_queue_stats);
 	}
+
+	/* add FCoE queue query if needed */
+	if (!NO_FCOE(bp)) {
+		cur_query_entry =
+			&bp->fw_stats_req->
+					query[first_queue_query_index + i];
+
+		cur_query_entry->kind = STATS_TYPE_QUEUE;
+		cur_query_entry->index = bnx2x_stats_id(&bp->fp[FCOE_IDX]);
+		cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
+		cur_query_entry->address.hi =
+			cpu_to_le32(U64_HI(cur_data_offset));
+		cur_query_entry->address.lo =
+			cpu_to_le32(U64_LO(cur_data_offset));
+	}
 }
 
 void bnx2x_stats_init(struct bnx2x *bp)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
index 5d8ce2f..683deb0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -193,6 +193,12 @@
 	u32 total_tpa_aggregated_frames_lo;
 	u32 total_tpa_bytes_hi;
 	u32 total_tpa_bytes_lo;
+
+	/* PFC */
+	u32 pfc_frames_received_hi;
+	u32 pfc_frames_received_lo;
+	u32 pfc_frames_sent_hi;
+	u32 pfc_frames_sent_lo;
 };
 
 
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 6f10c69..dd3a0a2 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -250,6 +250,21 @@
 	return io->data;
 }
 
+static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	struct drv_ctl_info info;
+
+	if (reg)
+		info.cmd = DRV_CTL_ULP_REGISTER_CMD;
+	else
+		info.cmd = DRV_CTL_ULP_UNREGISTER_CMD;
+
+	info.data.ulp_type = ulp_type;
+	ethdev->drv_ctl(dev->netdev, &info);
+}
+
 static int cnic_in_use(struct cnic_sock *csk)
 {
 	return test_bit(SK_F_INUSE, &csk->flags);
@@ -506,7 +521,7 @@
 	}
 	read_unlock(&cnic_dev_lock);
 
-	rcu_assign_pointer(cnic_ulp_tbl[ulp_type], NULL);
+	RCU_INIT_POINTER(cnic_ulp_tbl[ulp_type], NULL);
 
 	mutex_unlock(&cnic_lock);
 	synchronize_rcu();
@@ -563,6 +578,8 @@
 
 	mutex_unlock(&cnic_lock);
 
+	cnic_ulp_ctl(dev, ulp_type, true);
+
 	return 0;
 
 }
@@ -579,7 +596,7 @@
 	}
 	mutex_lock(&cnic_lock);
 	if (rcu_dereference(cp->ulp_ops[ulp_type])) {
-		rcu_assign_pointer(cp->ulp_ops[ulp_type], NULL);
+		RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL);
 		cnic_put(dev);
 	} else {
 		pr_err("%s: device not registered to this ulp type %d\n",
@@ -602,6 +619,8 @@
 	if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]))
 		netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n");
 
+	cnic_ulp_ctl(dev, ulp_type, false);
+
 	return 0;
 }
 EXPORT_SYMBOL(cnic_unregister_driver);
@@ -1342,7 +1361,7 @@
 	if (ret == 1)
 		return 0;
 
-	return -EBUSY;
+	return ret;
 }
 
 static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type,
@@ -1830,7 +1849,7 @@
 done:
 	cqes[0] = (struct kcqe *) &kcqe;
 	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
-	return ret;
+	return 0;
 }
 
 
@@ -1928,7 +1947,7 @@
 	cqes[0] = (struct kcqe *) &kcqe;
 	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
 
-	return ret;
+	return 0;
 }
 
 static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
@@ -2494,6 +2513,57 @@
 	return ret;
 }
 
+static void cnic_bnx2x_kwqe_err(struct cnic_dev *dev, struct kwqe *kwqe)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct kcqe kcqe;
+	struct kcqe *cqes[1];
+	u32 cid;
+	u32 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
+	u32 layer_code = kwqe->kwqe_op_flag & KWQE_LAYER_MASK;
+	int ulp_type;
+
+	cid = kwqe->kwqe_info0;
+	memset(&kcqe, 0, sizeof(kcqe));
+
+	if (layer_code == KWQE_FLAGS_LAYER_MASK_L5_ISCSI) {
+		ulp_type = CNIC_ULP_ISCSI;
+		if (opcode == ISCSI_KWQE_OPCODE_UPDATE_CONN)
+			cid = kwqe->kwqe_info1;
+
+		kcqe.kcqe_op_flag = (opcode + 0x10) << KCQE_FLAGS_OPCODE_SHIFT;
+		kcqe.kcqe_op_flag |= KCQE_FLAGS_LAYER_MASK_L5_ISCSI;
+		kcqe.kcqe_info1 = ISCSI_KCQE_COMPLETION_STATUS_NIC_ERROR;
+		kcqe.kcqe_info2 = cid;
+		cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &kcqe.kcqe_info0);
+
+	} else if (layer_code == KWQE_FLAGS_LAYER_MASK_L4) {
+		struct l4_kcq *l4kcqe = (struct l4_kcq *) &kcqe;
+		u32 kcqe_op;
+
+		ulp_type = CNIC_ULP_L4;
+		if (opcode == L4_KWQE_OPCODE_VALUE_CONNECT1)
+			kcqe_op = L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE;
+		else if (opcode == L4_KWQE_OPCODE_VALUE_RESET)
+			kcqe_op = L4_KCQE_OPCODE_VALUE_RESET_COMP;
+		else if (opcode == L4_KWQE_OPCODE_VALUE_CLOSE)
+			kcqe_op = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
+		else
+			return;
+
+		kcqe.kcqe_op_flag = (kcqe_op << KCQE_FLAGS_OPCODE_SHIFT) |
+				    KCQE_FLAGS_LAYER_MASK_L4;
+		l4kcqe->status = L4_KCQE_COMPLETION_STATUS_NIC_ERROR;
+		l4kcqe->cid = cid;
+		cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &l4kcqe->conn_id);
+	} else {
+		return;
+	}
+
+	cqes[0] = (struct kcqe *) &kcqe;
+	cnic_reply_bnx2x_kcqes(dev, ulp_type, cqes, 1);
+}
+
 static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev,
 					 struct kwqe *wqes[], u32 num_wqes)
 {
@@ -2551,9 +2621,17 @@
 				   opcode);
 			break;
 		}
-		if (ret < 0)
+		if (ret < 0) {
 			netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
 				   opcode);
+
+			/* Possibly bnx2x parity error, send completion
+			 * to ulp drivers with error code to speed up
+			 * cleanup and reset recovery.
+			 */
+			if (ret == -EIO || ret == -EAGAIN)
+				cnic_bnx2x_kwqe_err(dev, kwqe);
+		}
 		i += work;
 	}
 	return 0;
@@ -3052,9 +3130,26 @@
 	}
 }
 
+static int cnic_copy_ulp_stats(struct cnic_dev *dev, int ulp_type)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_ulp_ops *ulp_ops;
+	int rc;
+
+	mutex_lock(&cnic_lock);
+	ulp_ops = cnic_ulp_tbl_prot(ulp_type);
+	if (ulp_ops && ulp_ops->cnic_get_stats)
+		rc = ulp_ops->cnic_get_stats(cp->ulp_handle[ulp_type]);
+	else
+		rc = -ENODEV;
+	mutex_unlock(&cnic_lock);
+	return rc;
+}
+
 static int cnic_ctl(void *data, struct cnic_ctl_info *info)
 {
 	struct cnic_dev *dev = data;
+	int ulp_type = CNIC_ULP_ISCSI;
 
 	switch (info->cmd) {
 	case CNIC_CTL_STOP_CMD:
@@ -3100,6 +3195,15 @@
 		}
 		break;
 	}
+	case CNIC_CTL_FCOE_STATS_GET_CMD:
+		ulp_type = CNIC_ULP_FCOE;
+		/* fall through */
+	case CNIC_CTL_ISCSI_STATS_GET_CMD:
+		cnic_hold(dev);
+		cnic_copy_ulp_stats(dev, ulp_type);
+		cnic_put(dev);
+		break;
+
 	default:
 		return -EINVAL;
 	}
@@ -3475,7 +3579,7 @@
 	struct flowi6 fl6;
 
 	memset(&fl6, 0, sizeof(fl6));
-	ipv6_addr_copy(&fl6.daddr, &dst_addr->sin6_addr);
+	fl6.daddr = dst_addr->sin6_addr;
 	if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
 		fl6.flowi6_oif = dst_addr->sin6_scope_id;
 
@@ -3804,6 +3908,9 @@
 	case L4_KCQE_OPCODE_VALUE_RESET_COMP:
 	case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
 	case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
+		if (l4kcqe->status == L4_KCQE_COMPLETION_STATUS_NIC_ERROR)
+			set_bit(SK_F_HW_ERR, &csk->flags);
+
 		cp->close_conn(csk, opcode);
 		break;
 
@@ -3931,7 +4038,9 @@
 	case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
 	case L4_KCQE_OPCODE_VALUE_RESET_COMP:
 		if (cnic_ready_to_close(csk, opcode)) {
-			if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
+			if (test_bit(SK_F_HW_ERR, &csk->flags))
+				close_complete = 1;
+			else if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
 				cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE;
 			else
 				close_complete = 1;
@@ -4824,6 +4933,7 @@
 	int func = CNIC_FUNC(cp), ret;
 	u32 pfid;
 
+	dev->stats_addr = ethdev->addr_drv_info_to_mcp;
 	cp->port_mode = CHIP_PORT_MODE_NONE;
 
 	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
@@ -5134,7 +5244,7 @@
 		}
 		cnic_shutdown_rings(dev);
 		clear_bit(CNIC_F_CNIC_UP, &dev->flags);
-		rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], NULL);
+		RCU_INIT_POINTER(cp->ulp_ops[CNIC_ULP_L4], NULL);
 		synchronize_rcu();
 		cnic_cm_shutdown(dev);
 		cp->stop_hw(dev);
@@ -5288,6 +5398,8 @@
 	cdev->pcidev = pdev;
 	cp->chip_id = ethdev->chip_id;
 
+	cdev->stats_addr = ethdev->addr_drv_info_to_mcp;
+
 	if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
 		cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
 	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) &&
diff --git a/drivers/net/ethernet/broadcom/cnic_defs.h b/drivers/net/ethernet/broadcom/cnic_defs.h
index 239de89..86936f6 100644
--- a/drivers/net/ethernet/broadcom/cnic_defs.h
+++ b/drivers/net/ethernet/broadcom/cnic_defs.h
@@ -85,6 +85,7 @@
 
 /* KCQ (kernel completion queue) completion status */
 #define L4_KCQE_COMPLETION_STATUS_SUCCESS           (0)
+#define L4_KCQE_COMPLETION_STATUS_NIC_ERROR         (4)
 #define L4_KCQE_COMPLETION_STATUS_TIMEOUT           (0x93)
 
 #define L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL    (0x83)
diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h
index 79443e0..1517763 100644
--- a/drivers/net/ethernet/broadcom/cnic_if.h
+++ b/drivers/net/ethernet/broadcom/cnic_if.h
@@ -1,6 +1,6 @@
 /* cnic_if.h: Broadcom CNIC core network driver.
  *
- * Copyright (c) 2006-2011 Broadcom Corporation
+ * Copyright (c) 2006-2012 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -12,8 +12,8 @@
 #ifndef CNIC_IF_H
 #define CNIC_IF_H
 
-#define CNIC_MODULE_VERSION	"2.5.7"
-#define CNIC_MODULE_RELDATE	"July 20, 2011"
+#define CNIC_MODULE_VERSION	"2.5.8"
+#define CNIC_MODULE_RELDATE	"Jan 3, 2012"
 
 #define CNIC_ULP_RDMA		0
 #define CNIC_ULP_ISCSI		1
@@ -86,6 +86,8 @@
 #define CNIC_CTL_START_CMD		2
 #define CNIC_CTL_COMPLETION_CMD		3
 #define CNIC_CTL_STOP_ISCSI_CMD		4
+#define CNIC_CTL_FCOE_STATS_GET_CMD	5
+#define CNIC_CTL_ISCSI_STATS_GET_CMD	6
 
 #define DRV_CTL_IO_WR_CMD		0x101
 #define DRV_CTL_IO_RD_CMD		0x102
@@ -96,6 +98,8 @@
 #define DRV_CTL_STOP_L2_CMD		0x107
 #define DRV_CTL_RET_L2_SPQ_CREDIT_CMD	0x10c
 #define DRV_CTL_ISCSI_STOPPED_CMD	0x10d
+#define DRV_CTL_ULP_REGISTER_CMD	0x10e
+#define DRV_CTL_ULP_UNREGISTER_CMD	0x10f
 
 struct cnic_ctl_completion {
 	u32	cid;
@@ -133,6 +137,7 @@
 		struct drv_ctl_spq_credit credit;
 		struct drv_ctl_io io;
 		struct drv_ctl_l2_ring ring;
+		int ulp_type;
 		char bytes[MAX_DRV_CTL_DATA];
 	} data;
 };
@@ -201,6 +206,7 @@
 					       struct kwqe_16 *[], u32);
 	int		(*drv_ctl)(struct net_device *, struct drv_ctl_info *);
 	unsigned long	reserved1[2];
+	union drv_info_to_mcp	*addr_drv_info_to_mcp;
 };
 
 struct cnic_sockaddr {
@@ -255,6 +261,7 @@
 #define SK_F_CONNECT_START	4
 #define SK_F_IPV6		5
 #define SK_F_CLOSING		7
+#define SK_F_HW_ERR		8
 
 	atomic_t ref_count;
 	u32 state;
@@ -297,6 +304,8 @@
 	int		max_fcoe_conn;
 	int		max_rdma_conn;
 
+	union drv_info_to_mcp	*stats_addr;
+
 	void		*cnic_priv;
 };
 
@@ -326,6 +335,7 @@
 	void (*cm_remote_abort)(struct cnic_sock *);
 	int (*iscsi_nl_send_msg)(void *ulp_ctx, u32 msg_type,
 				  char *data, u16 data_size);
+	int (*cnic_get_stats)(void *ulp_ctx);
 	struct module *owner;
 	atomic_t ref_count;
 };
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index 0a1d7f2..8fa7abc 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -163,7 +163,6 @@
 #define SBMAC_MAX_TXDESCR	256
 #define SBMAC_MAX_RXDESCR	256
 
-#define ETHER_ADDR_LEN		6
 #define ENET_PACKET_SIZE	1518
 /*#define ENET_PACKET_SIZE	9216 */
 
@@ -266,7 +265,7 @@
 	int			sbm_pause;	/* current pause setting */
 	int			sbm_link;	/* current link state */
 
-	unsigned char		sbm_hwaddr[ETHER_ADDR_LEN];
+	unsigned char		sbm_hwaddr[ETH_ALEN];
 
 	struct sbmacdma		sbm_txdma;	/* only channel 0 for now */
 	struct sbmacdma		sbm_rxdma;
@@ -2676,15 +2675,4 @@
 	},
 };
 
-static int __init sbmac_init_module(void)
-{
-	return platform_driver_register(&sbmac_driver);
-}
-
-static void __exit sbmac_cleanup_module(void)
-{
-	platform_driver_unregister(&sbmac_driver);
-}
-
-module_init(sbmac_init_module);
-module_exit(sbmac_cleanup_module);
+module_platform_driver(sbmac_driver);
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index bf40741..076e02a 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -89,10 +89,10 @@
 
 #define DRV_MODULE_NAME		"tg3"
 #define TG3_MAJ_NUM			3
-#define TG3_MIN_NUM			121
+#define TG3_MIN_NUM			122
 #define DRV_MODULE_VERSION	\
 	__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE	"November 2, 2011"
+#define DRV_MODULE_RELDATE	"December 7, 2011"
 
 #define RESET_KIND_SHUTDOWN	0
 #define RESET_KIND_INIT		1
@@ -135,7 +135,6 @@
 	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
 	 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
 #define TG3_DEF_RX_JUMBO_RING_PENDING	100
-#define TG3_RSS_INDIR_TBL_SIZE		128
 
 /* Do not place this n-ring entries value into the tp struct itself,
  * we really want to expose these constants to GCC so that modulo et
@@ -194,12 +193,13 @@
 #if (NET_IP_ALIGN != 0)
 #define TG3_RX_OFFSET(tp)	((tp)->rx_offset)
 #else
-#define TG3_RX_OFFSET(tp)	0
+#define TG3_RX_OFFSET(tp)	(NET_SKB_PAD)
 #endif
 
 /* minimum number of free TX descriptors required to wake up TX process */
 #define TG3_TX_WAKEUP_THRESH(tnapi)		((tnapi)->tx_pending / 4)
-#define TG3_TX_BD_DMA_MAX		4096
+#define TG3_TX_BD_DMA_MAX_2K		2048
+#define TG3_TX_BD_DMA_MAX_4K		4096
 
 #define TG3_RAW_IP_ALIGN 2
 
@@ -1670,22 +1670,6 @@
 	}
 }
 
-static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
-{
-	u16 miireg;
-
-	if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
-		miireg = ADVERTISE_PAUSE_CAP;
-	else if (flow_ctrl & FLOW_CTRL_TX)
-		miireg = ADVERTISE_PAUSE_ASYM;
-	else if (flow_ctrl & FLOW_CTRL_RX)
-		miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
-	else
-		miireg = 0;
-
-	return miireg;
-}
-
 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
 {
 	u16 miireg;
@@ -1706,18 +1690,12 @@
 {
 	u8 cap = 0;
 
-	if (lcladv & ADVERTISE_1000XPAUSE) {
-		if (lcladv & ADVERTISE_1000XPSE_ASYM) {
-			if (rmtadv & LPA_1000XPAUSE)
-				cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
-			else if (rmtadv & LPA_1000XPAUSE_ASYM)
-				cap = FLOW_CTRL_RX;
-		} else {
-			if (rmtadv & LPA_1000XPAUSE)
-				cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
-		}
-	} else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
-		if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
+	if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
+		cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
+	} else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
+		if (lcladv & ADVERTISE_1000XPAUSE)
+			cap = FLOW_CTRL_RX;
+		if (rmtadv & ADVERTISE_1000XPAUSE)
 			cap = FLOW_CTRL_TX;
 	}
 
@@ -1792,7 +1770,7 @@
 		if (phydev->duplex == DUPLEX_HALF)
 			mac_mode |= MAC_MODE_HALF_DUPLEX;
 		else {
-			lcl_adv = tg3_advert_flowctrl_1000T(
+			lcl_adv = mii_advertise_flowctrl(
 				  tp->link_config.flowctrl);
 
 			if (phydev->pause)
@@ -2160,7 +2138,7 @@
 	if (tp->link_config.active_speed == SPEED_1000 &&
 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
 	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
-	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
+	     tg3_flag(tp, 57765_CLASS)) &&
 	    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
 		val = MII_TG3_DSP_TAP26_ALNOKO |
 		      MII_TG3_DSP_TAP26_RMRXSTO;
@@ -2679,8 +2657,7 @@
 	bool need_vaux = false;
 
 	/* The GPIOs do something completely different on 57765. */
-	if (!tg3_flag(tp, IS_NIC) ||
-	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+	if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
 		return;
 
 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
@@ -3594,37 +3571,24 @@
 	u32 val, new_adv;
 
 	new_adv = ADVERTISE_CSMA;
-	if (advertise & ADVERTISED_10baseT_Half)
-		new_adv |= ADVERTISE_10HALF;
-	if (advertise & ADVERTISED_10baseT_Full)
-		new_adv |= ADVERTISE_10FULL;
-	if (advertise & ADVERTISED_100baseT_Half)
-		new_adv |= ADVERTISE_100HALF;
-	if (advertise & ADVERTISED_100baseT_Full)
-		new_adv |= ADVERTISE_100FULL;
-
-	new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
+	new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
+	new_adv |= mii_advertise_flowctrl(flowctrl);
 
 	err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
 	if (err)
 		goto done;
 
-	if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
-		goto done;
+	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
+		new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
 
-	new_adv = 0;
-	if (advertise & ADVERTISED_1000baseT_Half)
-		new_adv |= ADVERTISE_1000HALF;
-	if (advertise & ADVERTISED_1000baseT_Full)
-		new_adv |= ADVERTISE_1000FULL;
+		if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
+		    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
+			new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
 
-	if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
-	    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
-		new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
-
-	err = tg3_writephy(tp, MII_CTRL1000, new_adv);
-	if (err)
-		goto done;
+		err = tg3_writephy(tp, MII_CTRL1000, new_adv);
+		if (err)
+			goto done;
+	}
 
 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
 		goto done;
@@ -3650,6 +3614,7 @@
 		switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
 		case ASIC_REV_5717:
 		case ASIC_REV_57765:
+		case ASIC_REV_57766:
 		case ASIC_REV_5719:
 			/* If we advertised any eee advertisements above... */
 			if (val)
@@ -3786,76 +3751,61 @@
 	return err;
 }
 
-static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
+static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
 {
-	u32 adv_reg, all_mask = 0;
+	u32 advmsk, tgtadv, advertising;
 
-	if (mask & ADVERTISED_10baseT_Half)
-		all_mask |= ADVERTISE_10HALF;
-	if (mask & ADVERTISED_10baseT_Full)
-		all_mask |= ADVERTISE_10FULL;
-	if (mask & ADVERTISED_100baseT_Half)
-		all_mask |= ADVERTISE_100HALF;
-	if (mask & ADVERTISED_100baseT_Full)
-		all_mask |= ADVERTISE_100FULL;
+	advertising = tp->link_config.advertising;
+	tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
 
-	if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
-		return 0;
+	advmsk = ADVERTISE_ALL;
+	if (tp->link_config.active_duplex == DUPLEX_FULL) {
+		tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
+		advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
+	}
 
-	if ((adv_reg & ADVERTISE_ALL) != all_mask)
-		return 0;
+	if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
+		return false;
+
+	if ((*lcladv & advmsk) != tgtadv)
+		return false;
 
 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
 		u32 tg3_ctrl;
 
-		all_mask = 0;
-		if (mask & ADVERTISED_1000baseT_Half)
-			all_mask |= ADVERTISE_1000HALF;
-		if (mask & ADVERTISED_1000baseT_Full)
-			all_mask |= ADVERTISE_1000FULL;
+		tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
 
 		if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
-			return 0;
+			return false;
 
 		tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
-		if (tg3_ctrl != all_mask)
-			return 0;
+		if (tg3_ctrl != tgtadv)
+			return false;
 	}
 
-	return 1;
+	return true;
 }
 
-static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
+static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
 {
-	u32 curadv, reqadv;
+	u32 lpeth = 0;
 
-	if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
-		return 1;
+	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
+		u32 val;
 
-	curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
-	reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
+		if (tg3_readphy(tp, MII_STAT1000, &val))
+			return false;
 
-	if (tp->link_config.active_duplex == DUPLEX_FULL) {
-		if (curadv != reqadv)
-			return 0;
-
-		if (tg3_flag(tp, PAUSE_AUTONEG))
-			tg3_readphy(tp, MII_LPA, rmtadv);
-	} else {
-		/* Reprogram the advertisement register, even if it
-		 * does not affect the current link.  If the link
-		 * gets renegotiated in the future, we can save an
-		 * additional renegotiation cycle by advertising
-		 * it correctly in the first place.
-		 */
-		if (curadv != reqadv) {
-			*lcladv &= ~(ADVERTISE_PAUSE_CAP |
-				     ADVERTISE_PAUSE_ASYM);
-			tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
-		}
+		lpeth = mii_stat1000_to_ethtool_lpa_t(val);
 	}
 
-	return 1;
+	if (tg3_readphy(tp, MII_LPA, rmtadv))
+		return false;
+
+	lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
+	tp->link_config.rmt_adv = lpeth;
+
+	return true;
 }
 
 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
@@ -3961,6 +3911,8 @@
 	current_link_up = 0;
 	current_speed = SPEED_INVALID;
 	current_duplex = DUPLEX_INVALID;
+	tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
+	tp->link_config.rmt_adv = 0;
 
 	if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
 		err = tg3_phy_auxctl_read(tp,
@@ -4016,12 +3968,9 @@
 
 		if (tp->link_config.autoneg == AUTONEG_ENABLE) {
 			if ((bmcr & BMCR_ANENABLE) &&
-			    tg3_copper_is_advertising_all(tp,
-						tp->link_config.advertising)) {
-				if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
-								  &rmt_adv))
-					current_link_up = 1;
-			}
+			    tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
+			    tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
+				current_link_up = 1;
 		} else {
 			if (!(bmcr & BMCR_ANENABLE) &&
 			    tp->link_config.speed == current_speed &&
@@ -4033,8 +3982,22 @@
 		}
 
 		if (current_link_up == 1 &&
-		    tp->link_config.active_duplex == DUPLEX_FULL)
+		    tp->link_config.active_duplex == DUPLEX_FULL) {
+			u32 reg, bit;
+
+			if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
+				reg = MII_TG3_FET_GEN_STAT;
+				bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
+			} else {
+				reg = MII_TG3_EXT_STAT;
+				bit = MII_TG3_EXT_STAT_MDIX;
+			}
+
+			if (!tg3_readphy(tp, reg, &val) && (val & bit))
+				tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
+
 			tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
+		}
 	}
 
 relink:
@@ -4643,6 +4606,9 @@
 			if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
 				remote_adv |= LPA_1000XPAUSE_ASYM;
 
+			tp->link_config.rmt_adv =
+					   mii_adv_to_ethtool_adv_x(remote_adv);
+
 			tg3_setup_flow_control(tp, local_adv, remote_adv);
 			current_link_up = 1;
 			tp->serdes_counter = 0;
@@ -4714,6 +4680,9 @@
 			if (rxflags & MR_LP_ADV_ASYM_PAUSE)
 				remote_adv |= LPA_1000XPAUSE_ASYM;
 
+			tp->link_config.rmt_adv =
+					   mii_adv_to_ethtool_adv_x(remote_adv);
+
 			tg3_setup_flow_control(tp, local_adv, remote_adv);
 
 			current_link_up = 1;
@@ -4796,6 +4765,7 @@
 	udelay(40);
 
 	current_link_up = 0;
+	tp->link_config.rmt_adv = 0;
 	mac_status = tr32(MAC_STATUS);
 
 	if (tg3_flag(tp, HW_AUTONEG))
@@ -4887,6 +4857,7 @@
 	current_link_up = 0;
 	current_speed = SPEED_INVALID;
 	current_duplex = DUPLEX_INVALID;
+	tp->link_config.rmt_adv = 0;
 
 	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
 	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
@@ -4903,23 +4874,19 @@
 	    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
 		/* do nothing, just check for link up at the end */
 	} else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
-		u32 adv, new_adv;
+		u32 adv, newadv;
 
 		err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
-		new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
-				  ADVERTISE_1000XPAUSE |
-				  ADVERTISE_1000XPSE_ASYM |
-				  ADVERTISE_SLCT);
+		newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
+				 ADVERTISE_1000XPAUSE |
+				 ADVERTISE_1000XPSE_ASYM |
+				 ADVERTISE_SLCT);
 
-		new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
+		newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
+		newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
 
-		if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
-			new_adv |= ADVERTISE_1000XHALF;
-		if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
-			new_adv |= ADVERTISE_1000XFULL;
-
-		if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
-			tg3_writephy(tp, MII_ADVERTISE, new_adv);
+		if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
+			tg3_writephy(tp, MII_ADVERTISE, newadv);
 			bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
 			tg3_writephy(tp, MII_BMCR, bmcr);
 
@@ -4997,6 +4964,9 @@
 					current_duplex = DUPLEX_FULL;
 				else
 					current_duplex = DUPLEX_HALF;
+
+				tp->link_config.rmt_adv =
+					   mii_adv_to_ethtool_adv_x(remote_adv);
 			} else if (!tg3_flag(tp, 5780_CLASS)) {
 				/* Link is up via parallel detect */
 			} else {
@@ -5320,6 +5290,7 @@
 	u32 sw_idx = tnapi->tx_cons;
 	struct netdev_queue *txq;
 	int index = tnapi - tp->napi;
+	unsigned int pkts_compl = 0, bytes_compl = 0;
 
 	if (tg3_flag(tp, ENABLE_TSS))
 		index--;
@@ -5370,6 +5341,9 @@
 			sw_idx = NEXT_TX(sw_idx);
 		}
 
+		pkts_compl++;
+		bytes_compl += skb->len;
+
 		dev_kfree_skb(skb);
 
 		if (unlikely(tx_bug)) {
@@ -5378,6 +5352,8 @@
 		}
 	}
 
+	netdev_completed_queue(tp->dev, pkts_compl, bytes_compl);
+
 	tnapi->tx_cons = sw_idx;
 
 	/* Need to make the tx_cons update visible to tg3_start_xmit()
@@ -5397,15 +5373,15 @@
 	}
 }
 
-static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
+static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
 {
-	if (!ri->skb)
+	if (!ri->data)
 		return;
 
 	pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
 			 map_sz, PCI_DMA_FROMDEVICE);
-	dev_kfree_skb_any(ri->skb);
-	ri->skb = NULL;
+	kfree(ri->data);
+	ri->data = NULL;
 }
 
 /* Returns size of skb allocated or < 0 on error.
@@ -5419,28 +5395,28 @@
  * buffers the cpu only reads the last cacheline of the RX descriptor
  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
  */
-static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
+static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
 			    u32 opaque_key, u32 dest_idx_unmasked)
 {
 	struct tg3_rx_buffer_desc *desc;
 	struct ring_info *map;
-	struct sk_buff *skb;
+	u8 *data;
 	dma_addr_t mapping;
-	int skb_size, dest_idx;
+	int skb_size, data_size, dest_idx;
 
 	switch (opaque_key) {
 	case RXD_OPAQUE_RING_STD:
 		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
 		desc = &tpr->rx_std[dest_idx];
 		map = &tpr->rx_std_buffers[dest_idx];
-		skb_size = tp->rx_pkt_map_sz;
+		data_size = tp->rx_pkt_map_sz;
 		break;
 
 	case RXD_OPAQUE_RING_JUMBO:
 		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
 		desc = &tpr->rx_jmb[dest_idx].std;
 		map = &tpr->rx_jmb_buffers[dest_idx];
-		skb_size = TG3_RX_JMB_MAP_SZ;
+		data_size = TG3_RX_JMB_MAP_SZ;
 		break;
 
 	default:
@@ -5453,31 +5429,33 @@
 	 * Callers depend upon this behavior and assume that
 	 * we leave everything unchanged if we fail.
 	 */
-	skb = netdev_alloc_skb(tp->dev, skb_size + TG3_RX_OFFSET(tp));
-	if (skb == NULL)
+	skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
+		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+	data = kmalloc(skb_size, GFP_ATOMIC);
+	if (!data)
 		return -ENOMEM;
 
-	skb_reserve(skb, TG3_RX_OFFSET(tp));
-
-	mapping = pci_map_single(tp->pdev, skb->data, skb_size,
+	mapping = pci_map_single(tp->pdev,
+				 data + TG3_RX_OFFSET(tp),
+				 data_size,
 				 PCI_DMA_FROMDEVICE);
 	if (pci_dma_mapping_error(tp->pdev, mapping)) {
-		dev_kfree_skb(skb);
+		kfree(data);
 		return -EIO;
 	}
 
-	map->skb = skb;
+	map->data = data;
 	dma_unmap_addr_set(map, mapping, mapping);
 
 	desc->addr_hi = ((u64)mapping >> 32);
 	desc->addr_lo = ((u64)mapping & 0xffffffff);
 
-	return skb_size;
+	return data_size;
 }
 
 /* We only need to move over in the address because the other
  * members of the RX descriptor are invariant.  See notes above
- * tg3_alloc_rx_skb for full details.
+ * tg3_alloc_rx_data for full details.
  */
 static void tg3_recycle_rx(struct tg3_napi *tnapi,
 			   struct tg3_rx_prodring_set *dpr,
@@ -5511,7 +5489,7 @@
 		return;
 	}
 
-	dest_map->skb = src_map->skb;
+	dest_map->data = src_map->data;
 	dma_unmap_addr_set(dest_map, mapping,
 			   dma_unmap_addr(src_map, mapping));
 	dest_desc->addr_hi = src_desc->addr_hi;
@@ -5522,7 +5500,7 @@
 	 */
 	smp_wmb();
 
-	src_map->skb = NULL;
+	src_map->data = NULL;
 }
 
 /* The RX ring scheme is composed of multiple rings which post fresh
@@ -5576,19 +5554,20 @@
 		struct sk_buff *skb;
 		dma_addr_t dma_addr;
 		u32 opaque_key, desc_idx, *post_ptr;
+		u8 *data;
 
 		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
 		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
 		if (opaque_key == RXD_OPAQUE_RING_STD) {
 			ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
 			dma_addr = dma_unmap_addr(ri, mapping);
-			skb = ri->skb;
+			data = ri->data;
 			post_ptr = &std_prod_idx;
 			rx_std_posted++;
 		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
 			ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
 			dma_addr = dma_unmap_addr(ri, mapping);
-			skb = ri->skb;
+			data = ri->data;
 			post_ptr = &jmb_prod_idx;
 		} else
 			goto next_pkt_nopost;
@@ -5606,13 +5585,14 @@
 			goto next_pkt;
 		}
 
+		prefetch(data + TG3_RX_OFFSET(tp));
 		len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
 		      ETH_FCS_LEN;
 
 		if (len > TG3_RX_COPY_THRESH(tp)) {
 			int skb_size;
 
-			skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
+			skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
 						    *post_ptr);
 			if (skb_size < 0)
 				goto drop_it;
@@ -5620,35 +5600,37 @@
 			pci_unmap_single(tp->pdev, dma_addr, skb_size,
 					 PCI_DMA_FROMDEVICE);
 
-			/* Ensure that the update to the skb happens
+			skb = build_skb(data);
+			if (!skb) {
+				kfree(data);
+				goto drop_it_no_recycle;
+			}
+			skb_reserve(skb, TG3_RX_OFFSET(tp));
+			/* Ensure that the update to the data happens
 			 * after the usage of the old DMA mapping.
 			 */
 			smp_wmb();
 
-			ri->skb = NULL;
+			ri->data = NULL;
 
-			skb_put(skb, len);
 		} else {
-			struct sk_buff *copy_skb;
-
 			tg3_recycle_rx(tnapi, tpr, opaque_key,
 				       desc_idx, *post_ptr);
 
-			copy_skb = netdev_alloc_skb(tp->dev, len +
-						    TG3_RAW_IP_ALIGN);
-			if (copy_skb == NULL)
+			skb = netdev_alloc_skb(tp->dev,
+					       len + TG3_RAW_IP_ALIGN);
+			if (skb == NULL)
 				goto drop_it_no_recycle;
 
-			skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
-			skb_put(copy_skb, len);
+			skb_reserve(skb, TG3_RAW_IP_ALIGN);
 			pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
-			skb_copy_from_linear_data(skb, copy_skb->data, len);
+			memcpy(skb->data,
+			       data + TG3_RX_OFFSET(tp),
+			       len);
 			pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
-
-			/* We'll reuse the original ring buffer. */
-			skb = copy_skb;
 		}
 
+		skb_put(skb, len);
 		if ((tp->dev->features & NETIF_F_RXCSUM) &&
 		    (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
 		    (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
@@ -5787,7 +5769,7 @@
 		di = dpr->rx_std_prod_idx;
 
 		for (i = di; i < di + cpycnt; i++) {
-			if (dpr->rx_std_buffers[i].skb) {
+			if (dpr->rx_std_buffers[i].data) {
 				cpycnt = i - di;
 				err = -ENOSPC;
 				break;
@@ -5845,7 +5827,7 @@
 		di = dpr->rx_jmb_prod_idx;
 
 		for (i = di; i < di + cpycnt; i++) {
-			if (dpr->rx_jmb_buffers[i].skb) {
+			if (dpr->rx_jmb_buffers[i].data) {
 				cpycnt = i - di;
 				err = -ENOSPC;
 				break;
@@ -6443,25 +6425,25 @@
 	bool hwbug = false;
 
 	if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
-		hwbug = 1;
+		hwbug = true;
 
 	if (tg3_4g_overflow_test(map, len))
-		hwbug = 1;
+		hwbug = true;
 
 	if (tg3_40bit_overflow_test(tp, map, len))
-		hwbug = 1;
+		hwbug = true;
 
-	if (tg3_flag(tp, 4K_FIFO_LIMIT)) {
+	if (tp->dma_limit) {
 		u32 prvidx = *entry;
 		u32 tmp_flag = flags & ~TXD_FLAG_END;
-		while (len > TG3_TX_BD_DMA_MAX && *budget) {
-			u32 frag_len = TG3_TX_BD_DMA_MAX;
-			len -= TG3_TX_BD_DMA_MAX;
+		while (len > tp->dma_limit && *budget) {
+			u32 frag_len = tp->dma_limit;
+			len -= tp->dma_limit;
 
 			/* Avoid the 8byte DMA problem */
 			if (len <= 8) {
-				len += TG3_TX_BD_DMA_MAX / 2;
-				frag_len = TG3_TX_BD_DMA_MAX / 2;
+				len += tp->dma_limit / 2;
+				frag_len = tp->dma_limit / 2;
 			}
 
 			tnapi->tx_buffers[*entry].fragmented = true;
@@ -6482,7 +6464,7 @@
 				*budget -= 1;
 				*entry = NEXT_TX(*entry);
 			} else {
-				hwbug = 1;
+				hwbug = true;
 				tnapi->tx_buffers[prvidx].fragmented = false;
 			}
 		}
@@ -6816,6 +6798,7 @@
 	}
 
 	skb_tx_timestamp(skb);
+	netdev_sent_queue(tp->dev, skb->len);
 
 	/* Packets are ready, update Tx producer idx local and on card. */
 	tw32_tx_mbox(tnapi->prodmbox, entry);
@@ -6968,7 +6951,7 @@
 	return 0;
 }
 
-static void tg3_set_loopback(struct net_device *dev, u32 features)
+static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
 {
 	struct tg3 *tp = netdev_priv(dev);
 
@@ -6994,7 +6977,8 @@
 	}
 }
 
-static u32 tg3_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t tg3_fix_features(struct net_device *dev,
+	netdev_features_t features)
 {
 	struct tg3 *tp = netdev_priv(dev);
 
@@ -7004,9 +6988,9 @@
 	return features;
 }
 
-static int tg3_set_features(struct net_device *dev, u32 features)
+static int tg3_set_features(struct net_device *dev, netdev_features_t features)
 {
-	u32 changed = dev->features ^ features;
+	netdev_features_t changed = dev->features ^ features;
 
 	if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
 		tg3_set_loopback(dev, features);
@@ -7082,14 +7066,14 @@
 	if (tpr != &tp->napi[0].prodring) {
 		for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
 		     i = (i + 1) & tp->rx_std_ring_mask)
-			tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
+			tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
 					tp->rx_pkt_map_sz);
 
 		if (tg3_flag(tp, JUMBO_CAPABLE)) {
 			for (i = tpr->rx_jmb_cons_idx;
 			     i != tpr->rx_jmb_prod_idx;
 			     i = (i + 1) & tp->rx_jmb_ring_mask) {
-				tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
+				tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
 						TG3_RX_JMB_MAP_SZ);
 			}
 		}
@@ -7098,12 +7082,12 @@
 	}
 
 	for (i = 0; i <= tp->rx_std_ring_mask; i++)
-		tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
+		tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
 				tp->rx_pkt_map_sz);
 
 	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
 		for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
-			tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
+			tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
 					TG3_RX_JMB_MAP_SZ);
 	}
 }
@@ -7159,7 +7143,7 @@
 
 	/* Now allocate fresh SKBs for each rx ring. */
 	for (i = 0; i < tp->rx_pending; i++) {
-		if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
+		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
 			netdev_warn(tp->dev,
 				    "Using a smaller RX standard ring. Only "
 				    "%d out of %d buffers were allocated "
@@ -7191,7 +7175,7 @@
 	}
 
 	for (i = 0; i < tp->rx_jumbo_pending; i++) {
-		if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
+		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
 			netdev_warn(tp->dev,
 				    "Using a smaller RX jumbo ring. Only %d "
 				    "out of %d buffers were allocated "
@@ -7297,6 +7281,7 @@
 			dev_kfree_skb_any(skb);
 		}
 	}
+	netdev_reset_queue(tp->dev);
 }
 
 /* Initialize tx/rx rings for packet processing.
@@ -7591,8 +7576,6 @@
 		if (tnapi->hw_status)
 			memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
 	}
-	if (tp->hw_stats)
-		memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
 
 	return err;
 }
@@ -7626,15 +7609,11 @@
 
 	pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
 
-	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
-		if (tg3_flag(tp, PCI_EXPRESS))
-			pcie_set_readrq(tp->pdev, tp->pcie_readrq);
-		else {
-			pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
-					      tp->pci_cacheline_sz);
-			pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
-					      tp->pci_lat_timer);
-		}
+	if (!tg3_flag(tp, PCI_EXPRESS)) {
+		pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
+				      tp->pci_cacheline_sz);
+		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
+				      tp->pci_lat_timer);
 	}
 
 	/* Make sure PCI-X relaxed ordering bit is clear. */
@@ -7819,8 +7798,6 @@
 				      pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
 				      val16);
 
-		pcie_set_readrq(tp->pdev, tp->pcie_readrq);
-
 		/* Clear error status */
 		pci_write_config_word(tp->pdev,
 				      pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
@@ -7914,6 +7891,11 @@
 	return 0;
 }
 
+static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
+						 struct rtnl_link_stats64 *);
+static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *,
+						struct tg3_ethtool_stats *);
+
 /* tp->lock is held. */
 static int tg3_halt(struct tg3 *tp, int kind, int silent)
 {
@@ -7931,6 +7913,15 @@
 	tg3_write_sig_legacy(tp, kind);
 	tg3_write_sig_post_reset(tp, kind);
 
+	if (tp->hw_stats) {
+		/* Save the stats across chip resets... */
+		tg3_get_stats64(tp->dev, &tp->net_stats_prev),
+		tg3_get_estats(tp, &tp->estats_prev);
+
+		/* And make sure the next sample is new data */
+		memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
+	}
+
 	if (err)
 		return err;
 
@@ -8074,7 +8065,7 @@
 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
 	else if (tg3_flag(tp, 5717_PLUS))
 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
-	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+	else if (tg3_flag(tp, 57765_CLASS))
 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
 	else
 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
@@ -8091,7 +8082,7 @@
 	else if (!tg3_flag(tp, 5705_PLUS))
 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
 	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
-		 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+		 tg3_flag(tp, 57765_CLASS))
 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
 	else
 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
@@ -8197,7 +8188,8 @@
 	if (!tg3_flag(tp, 5750_PLUS) ||
 	    tg3_flag(tp, 5780_CLASS) ||
 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
-	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
+	    tg3_flag(tp, 57765_PLUS))
 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
 	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
 		 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
@@ -8217,10 +8209,7 @@
 	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
 		return;
 
-	if (!tg3_flag(tp, 5705_PLUS))
-		bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
-	else
-		bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
+	bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
 
 	host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
 
@@ -8231,6 +8220,54 @@
 		tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
 }
 
+static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp)
+{
+	int i;
+
+	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
+		tp->rss_ind_tbl[i] =
+			ethtool_rxfh_indir_default(i, tp->irq_cnt - 1);
+}
+
+static void tg3_rss_check_indir_tbl(struct tg3 *tp)
+{
+	int i;
+
+	if (!tg3_flag(tp, SUPPORT_MSIX))
+		return;
+
+	if (tp->irq_cnt <= 2) {
+		memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
+		return;
+	}
+
+	/* Validate table against current IRQ count */
+	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
+		if (tp->rss_ind_tbl[i] >= tp->irq_cnt - 1)
+			break;
+	}
+
+	if (i != TG3_RSS_INDIR_TBL_SIZE)
+		tg3_rss_init_dflt_indir_tbl(tp);
+}
+
+static void tg3_rss_write_indir_tbl(struct tg3 *tp)
+{
+	int i = 0;
+	u32 reg = MAC_RSS_INDIR_TBL_0;
+
+	while (i < TG3_RSS_INDIR_TBL_SIZE) {
+		u32 val = tp->rss_ind_tbl[i];
+		i++;
+		for (; i % 8; i++) {
+			val <<= 4;
+			val |= tp->rss_ind_tbl[i];
+		}
+		tw32(reg, val);
+		reg += 4;
+	}
+}
+
 /* tp->lock is held. */
 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
 {
@@ -8337,7 +8374,7 @@
 		tw32(GRC_MODE, grc_mode);
 	}
 
-	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
+	if (tg3_flag(tp, 57765_CLASS)) {
 		if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
 			u32 grc_mode = tr32(GRC_MODE);
 
@@ -8425,7 +8462,7 @@
 		      ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
 		if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
 			val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
-		if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
+		if (!tg3_flag(tp, 57765_CLASS) &&
 		    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
 			val |= DMA_RWCTRL_TAGGED_STAT_WA;
 		tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
@@ -8572,7 +8609,7 @@
 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
 			     val | BDINFO_FLAGS_USE_EXT_RECV);
 			if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
-			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+			    tg3_flag(tp, 57765_CLASS))
 				tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
 				     NIC_SRAM_RX_JUMBO_BUFFER_DESC);
 		} else {
@@ -8581,10 +8618,7 @@
 		}
 
 		if (tg3_flag(tp, 57765_PLUS)) {
-			if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
-				val = TG3_RX_STD_MAX_SIZE_5700;
-			else
-				val = TG3_RX_STD_MAX_SIZE_5717;
+			val = TG3_RX_STD_RING_SIZE(tp);
 			val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
 			val |= (TG3_RX_STD_DMA_SZ << 2);
 		} else
@@ -8661,6 +8695,9 @@
 	if (tg3_flag(tp, PCI_EXPRESS))
 		rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
 
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
+		rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
+
 	if (tg3_flag(tp, HW_TSO_1) ||
 	    tg3_flag(tp, HW_TSO_2) ||
 	    tg3_flag(tp, HW_TSO_3))
@@ -8924,28 +8961,7 @@
 	udelay(100);
 
 	if (tg3_flag(tp, ENABLE_RSS)) {
-		int i = 0;
-		u32 reg = MAC_RSS_INDIR_TBL_0;
-
-		if (tp->irq_cnt == 2) {
-			for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i += 8) {
-				tw32(reg, 0x0);
-				reg += 4;
-			}
-		} else {
-			u32 val;
-
-			while (i < TG3_RSS_INDIR_TBL_SIZE) {
-				val = i % (tp->irq_cnt - 1);
-				i++;
-				for (; i % 8; i++) {
-					val <<= 4;
-					val |= (i % (tp->irq_cnt - 1));
-				}
-				tw32(reg, val);
-				reg += 4;
-			}
-		}
+		tg3_rss_write_indir_tbl(tp);
 
 		/* Setup the "secret" hash key. */
 		tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
@@ -9002,7 +9018,7 @@
 	/* Prevent chip from dropping frames when flow control
 	 * is enabled.
 	 */
-	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+	if (tg3_flag(tp, 57765_CLASS))
 		val = 1;
 	else
 		val = 2;
@@ -9217,7 +9233,7 @@
 	spin_lock(&tp->lock);
 
 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
-	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+	    tg3_flag(tp, 57765_CLASS))
 		tg3_chk_missed_msi(tp);
 
 	if (!tg3_flag(tp, TAGGED_STATUS)) {
@@ -9669,6 +9685,8 @@
 	 */
 	tg3_ints_init(tp);
 
+	tg3_rss_check_indir_tbl(tp);
+
 	/* The placement of this call is tied
 	 * to the setup and use of Host TX descriptors.
 	 */
@@ -9700,8 +9718,8 @@
 		tg3_free_rings(tp);
 	} else {
 		if (tg3_flag(tp, TAGGED_STATUS) &&
-			GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
-			GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765)
+		    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
+		    !tg3_flag(tp, 57765_CLASS))
 			tp->timer_offset = HZ;
 		else
 			tp->timer_offset = HZ / 10;
@@ -9782,10 +9800,6 @@
 	return err;
 }
 
-static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
-						 struct rtnl_link_stats64 *);
-static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
-
 static int tg3_close(struct net_device *dev)
 {
 	int i;
@@ -9817,10 +9831,9 @@
 
 	tg3_ints_fini(tp);
 
-	tg3_get_stats64(tp->dev, &tp->net_stats_prev);
-
-	memcpy(&tp->estats_prev, tg3_get_estats(tp),
-	       sizeof(tp->estats_prev));
+	/* Clear stats across close / open calls */
+	memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
+	memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
 
 	tg3_napi_fini(tp);
 
@@ -9868,9 +9881,9 @@
 	estats->member =	old_estats->member + \
 				get_stat64(&hw_stats->member)
 
-static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
+static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp,
+					       struct tg3_ethtool_stats *estats)
 {
-	struct tg3_ethtool_stats *estats = &tp->estats;
 	struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
 
@@ -10318,12 +10331,20 @@
 			cmd->advertising |= ADVERTISED_Asym_Pause;
 		}
 	}
-	if (netif_running(dev)) {
+	if (netif_running(dev) && netif_carrier_ok(dev)) {
 		ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
 		cmd->duplex = tp->link_config.active_duplex;
+		cmd->lp_advertising = tp->link_config.rmt_adv;
+		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
+			if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
+				cmd->eth_tp_mdix = ETH_TP_MDI_X;
+			else
+				cmd->eth_tp_mdix = ETH_TP_MDI;
+		}
 	} else {
 		ethtool_cmd_speed_set(cmd, SPEED_INVALID);
 		cmd->duplex = DUPLEX_INVALID;
+		cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
 	}
 	cmd->phy_address = tp->phy_addr;
 	cmd->transceiver = XCVR_INTERNAL;
@@ -10428,10 +10449,10 @@
 {
 	struct tg3 *tp = netdev_priv(dev);
 
-	strcpy(info->driver, DRV_MODULE_NAME);
-	strcpy(info->version, DRV_MODULE_VERSION);
-	strcpy(info->fw_version, tp->fw_ver);
-	strcpy(info->bus_info, pci_name(tp->pdev));
+	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+	strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
+	strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
 }
 
 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@@ -10590,12 +10611,12 @@
 
 	epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
 
-	if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
+	if (tp->link_config.flowctrl & FLOW_CTRL_RX)
 		epause->rx_pause = 1;
 	else
 		epause->rx_pause = 0;
 
-	if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
+	if (tp->link_config.flowctrl & FLOW_CTRL_TX)
 		epause->tx_pause = 1;
 	else
 		epause->tx_pause = 0;
@@ -10715,6 +10736,78 @@
 	}
 }
 
+static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
+			 u32 *rules __always_unused)
+{
+	struct tg3 *tp = netdev_priv(dev);
+
+	if (!tg3_flag(tp, SUPPORT_MSIX))
+		return -EOPNOTSUPP;
+
+	switch (info->cmd) {
+	case ETHTOOL_GRXRINGS:
+		if (netif_running(tp->dev))
+			info->data = tp->irq_cnt;
+		else {
+			info->data = num_online_cpus();
+			if (info->data > TG3_IRQ_MAX_VECS_RSS)
+				info->data = TG3_IRQ_MAX_VECS_RSS;
+		}
+
+		/* The first interrupt vector only
+		 * handles link interrupts.
+		 */
+		info->data -= 1;
+		return 0;
+
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
+{
+	u32 size = 0;
+	struct tg3 *tp = netdev_priv(dev);
+
+	if (tg3_flag(tp, SUPPORT_MSIX))
+		size = TG3_RSS_INDIR_TBL_SIZE;
+
+	return size;
+}
+
+static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
+{
+	struct tg3 *tp = netdev_priv(dev);
+	int i;
+
+	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
+		indir[i] = tp->rss_ind_tbl[i];
+
+	return 0;
+}
+
+static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
+{
+	struct tg3 *tp = netdev_priv(dev);
+	size_t i;
+
+	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
+		tp->rss_ind_tbl[i] = indir[i];
+
+	if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
+		return 0;
+
+	/* It is legal to write the indirection
+	 * table while the device is running.
+	 */
+	tg3_full_lock(tp, 0);
+	tg3_rss_write_indir_tbl(tp);
+	tg3_full_unlock(tp);
+
+	return 0;
+}
+
 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
 {
 	switch (stringset) {
@@ -10769,7 +10862,8 @@
 				   struct ethtool_stats *estats, u64 *tmp_stats)
 {
 	struct tg3 *tp = netdev_priv(dev);
-	memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
+
+	tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
 }
 
 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
@@ -11352,7 +11446,7 @@
 
 	if (tg3_flag(tp, 5717_PLUS))
 		mem_tbl = mem_tbl_5717;
-	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+	else if (tg3_flag(tp, 57765_CLASS))
 		mem_tbl = mem_tbl_57765;
 	else if (tg3_flag(tp, 5755_PLUS))
 		mem_tbl = mem_tbl_5755;
@@ -11400,8 +11494,8 @@
 	u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
 	u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
 	u32 budget;
-	struct sk_buff *skb, *rx_skb;
-	u8 *tx_data;
+	struct sk_buff *skb;
+	u8 *tx_data, *rx_data;
 	dma_addr_t map;
 	int num_pkts, tx_len, rx_len, i, err;
 	struct tg3_rx_buffer_desc *desc;
@@ -11569,11 +11663,11 @@
 		}
 
 		if (opaque_key == RXD_OPAQUE_RING_STD) {
-			rx_skb = tpr->rx_std_buffers[desc_idx].skb;
+			rx_data = tpr->rx_std_buffers[desc_idx].data;
 			map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
 					     mapping);
 		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
-			rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
+			rx_data = tpr->rx_jmb_buffers[desc_idx].data;
 			map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
 					     mapping);
 		} else
@@ -11582,15 +11676,16 @@
 		pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
 					    PCI_DMA_FROMDEVICE);
 
+		rx_data += TG3_RX_OFFSET(tp);
 		for (i = data_off; i < rx_len; i++, val++) {
-			if (*(rx_skb->data + i) != (u8) (val & 0xff))
+			if (*(rx_data + i) != (u8) (val & 0xff))
 				goto out;
 		}
 	}
 
 	err = 0;
 
-	/* tg3_free_rings will unmap and free the rx_skb */
+	/* tg3_free_rings will unmap and free the rx_data */
 out:
 	return err;
 }
@@ -11943,6 +12038,10 @@
 	.get_coalesce		= tg3_get_coalesce,
 	.set_coalesce		= tg3_set_coalesce,
 	.get_sset_count		= tg3_get_sset_count,
+	.get_rxnfc		= tg3_get_rxnfc,
+	.get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
+	.get_rxfh_indir		= tg3_get_rxfh_indir,
+	.set_rxfh_indir		= tg3_set_rxfh_indir,
 };
 
 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
@@ -12612,7 +12711,7 @@
 		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
 			tg3_get_5906_nvram_info(tp);
 		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
-			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+			 tg3_flag(tp, 57765_CLASS))
 			tg3_get_57780_nvram_info(tp);
 		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
 			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
@@ -13218,8 +13317,7 @@
 
 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
 {
-	u32 adv = ADVERTISED_Autoneg |
-		  ADVERTISED_Pause;
+	u32 adv = ADVERTISED_Autoneg;
 
 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
 		adv |= ADVERTISED_1000baseT_Half |
@@ -13322,7 +13420,7 @@
 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
 	    !tg3_flag(tp, ENABLE_APE) &&
 	    !tg3_flag(tp, ENABLE_ASF)) {
-		u32 bmsr, mask;
+		u32 bmsr, dummy;
 
 		tg3_readphy(tp, MII_BMSR, &bmsr);
 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
@@ -13335,10 +13433,7 @@
 
 		tg3_phy_set_wirespeed(tp);
 
-		mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
-			ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
-			ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
-		if (!tg3_copper_is_advertising_all(tp, mask)) {
+		if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
 			tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
 					    tp->link_config.flowctrl);
 
@@ -13460,6 +13555,17 @@
 			strcpy(tp->board_part_number, "BCM57795");
 		else
 			goto nomatch;
+	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
+		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
+			strcpy(tp->board_part_number, "BCM57762");
+		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
+			strcpy(tp->board_part_number, "BCM57766");
+		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
+			strcpy(tp->board_part_number, "BCM57782");
+		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
+			strcpy(tp->board_part_number, "BCM57786");
+		else
+			goto nomatch;
 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
 		strcpy(tp->board_part_number, "BCM95906");
 	} else {
@@ -13798,7 +13904,11 @@
 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
-			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
+			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
+			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
+			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
+			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
+			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
 			pci_read_config_dword(tp->pdev,
 					      TG3PCI_GEN15_PRODID_ASICREV,
 					      &prod_id_asic_rev);
@@ -13945,7 +14055,10 @@
 		tg3_flag_set(tp, 5717_PLUS);
 
 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
-	    tg3_flag(tp, 5717_PLUS))
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
+		tg3_flag_set(tp, 57765_CLASS);
+
+	if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
 		tg3_flag_set(tp, 57765_PLUS);
 
 	/* Intentionally exclude ASIC_REV_5906 */
@@ -13997,9 +14110,13 @@
 	if (tg3_flag(tp, HW_TSO_1) ||
 	    tg3_flag(tp, HW_TSO_2) ||
 	    tg3_flag(tp, HW_TSO_3) ||
-	    (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
+	    tp->fw_needed) {
+		/* For firmware TSO, assume ASF is disabled.
+		 * We'll disable TSO later if we discover ASF
+		 * is enabled in tg3_get_eeprom_hw_cfg().
+		 */
 		tg3_flag_set(tp, TSO_CAPABLE);
-	else {
+	} else {
 		tg3_flag_clear(tp, TSO_CAPABLE);
 		tg3_flag_clear(tp, TSO_BUG);
 		tp->fw_needed = NULL;
@@ -14027,6 +14144,7 @@
 		if (tg3_flag(tp, 57765_PLUS)) {
 			tg3_flag_set(tp, SUPPORT_MSIX);
 			tp->irq_max = TG3_IRQ_MAX_VECS;
+			tg3_rss_init_dflt_indir_tbl(tp);
 		}
 	}
 
@@ -14034,9 +14152,13 @@
 		tg3_flag_set(tp, SHORT_DMA_BUG);
 
 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
-		tg3_flag_set(tp, 4K_FIFO_LIMIT);
+		tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
+	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
+		tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
 
-	if (tg3_flag(tp, 5717_PLUS))
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
 		tg3_flag_set(tp, LRG_PROD_RING_CAP);
 
 	if (tg3_flag(tp, 57765_PLUS) &&
@@ -14056,12 +14178,11 @@
 
 		tg3_flag_set(tp, PCI_EXPRESS);
 
-		tp->pcie_readrq = 4096;
-		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
-		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
-			tp->pcie_readrq = 2048;
-
-		pcie_set_readrq(tp->pdev, tp->pcie_readrq);
+		if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) {
+			int readrq = pcie_get_readrq(tp->pdev);
+			if (readrq > 2048)
+				pcie_set_readrq(tp->pdev, 2048);
+		}
 
 		pci_read_config_word(tp->pdev,
 				     pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
@@ -14273,6 +14394,12 @@
 	 */
 	tg3_get_eeprom_hw_cfg(tp);
 
+	if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
+		tg3_flag_clear(tp, TSO_CAPABLE);
+		tg3_flag_clear(tp, TSO_BUG);
+		tp->fw_needed = NULL;
+	}
+
 	if (tg3_flag(tp, ENABLE_APE)) {
 		/* Allow reads and writes to the
 		 * APE register and memory space.
@@ -14311,7 +14438,7 @@
 
 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
-	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+	    tg3_flag(tp, 57765_CLASS))
 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
 
 	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
@@ -14548,11 +14675,11 @@
 	else
 		tg3_flag_clear(tp, POLL_SERDES);
 
-	tp->rx_offset = NET_IP_ALIGN;
+	tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
 	tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
 	    tg3_flag(tp, PCIX_MODE)) {
-		tp->rx_offset = 0;
+		tp->rx_offset = NET_SKB_PAD;
 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
 		tp->rx_copy_thresh = ~(u16)0;
 #endif
@@ -15313,7 +15440,7 @@
 	u32 sndmbx, rcvmbx, intmbx;
 	char str[40];
 	u64 dma_mask, persist_dma_mask;
-	u32 features = 0;
+	netdev_features_t features = 0;
 
 	printk_once(KERN_INFO "%s\n", version);
 
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 94b4bd0..aea8f72 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -31,6 +31,8 @@
 #define TG3_RX_RET_MAX_SIZE_5705	512
 #define TG3_RX_RET_MAX_SIZE_5717	4096
 
+#define TG3_RSS_INDIR_TBL_SIZE		128
+
 /* First 256 bytes are a mirror of PCI config space. */
 #define TG3PCI_VENDOR			0x00000000
 #define  TG3PCI_VENDOR_BROADCOM		 0x14e4
@@ -57,6 +59,10 @@
 #define  TG3PCI_DEVICE_TIGON3_57795	 0x16b6
 #define  TG3PCI_DEVICE_TIGON3_5719	 0x1657
 #define  TG3PCI_DEVICE_TIGON3_5720	 0x165f
+#define  TG3PCI_DEVICE_TIGON3_57762	 0x1682
+#define  TG3PCI_DEVICE_TIGON3_57766	 0x1686
+#define  TG3PCI_DEVICE_TIGON3_57786	 0x16b3
+#define  TG3PCI_DEVICE_TIGON3_57782	 0x16b7
 /* 0x04 --> 0x2c unused */
 #define TG3PCI_SUBVENDOR_ID_BROADCOM		PCI_VENDOR_ID_BROADCOM
 #define TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6	0x1644
@@ -168,6 +174,7 @@
 #define   ASIC_REV_57765		 0x57785
 #define   ASIC_REV_5719			 0x5719
 #define   ASIC_REV_5720			 0x5720
+#define   ASIC_REV_57766		 0x57766
 #define  GET_CHIP_REV(CHIP_REV_ID)	((CHIP_REV_ID) >> 8)
 #define   CHIPREV_5700_AX		 0x70
 #define   CHIPREV_5700_BX		 0x71
@@ -1340,6 +1347,7 @@
 #define  RDMAC_MODE_MBUF_SBD_CRPT_ENAB	 0x00002000
 #define  RDMAC_MODE_FIFO_SIZE_128	 0x00020000
 #define  RDMAC_MODE_FIFO_LONG_BURST	 0x00030000
+#define  RDMAC_MODE_JMB_2K_MMRR		 0x00800000
 #define  RDMAC_MODE_MULT_DMA_RD_DIS	 0x01000000
 #define  RDMAC_MODE_IPV4_LSO_EN		 0x08000000
 #define  RDMAC_MODE_IPV6_LSO_EN		 0x10000000
@@ -2174,6 +2182,7 @@
 #define  MII_TG3_EXT_CTRL_TBI		0x8000
 
 #define MII_TG3_EXT_STAT		0x11 /* Extended status register */
+#define  MII_TG3_EXT_STAT_MDIX		0x2000
 #define  MII_TG3_EXT_STAT_LPASS		0x0100
 
 #define MII_TG3_RXR_COUNTERS		0x14 /* Local/Remote Receiver Counts */
@@ -2277,6 +2286,9 @@
 #define  MII_TG3_FET_PTEST_FRC_TX_LINK	0x1000
 #define  MII_TG3_FET_PTEST_FRC_TX_LOCK	0x0800
 
+#define MII_TG3_FET_GEN_STAT		0x1c
+#define  MII_TG3_FET_GEN_STAT_MDIXSTAT	0x2000
+
 #define MII_TG3_FET_TEST		0x1f
 #define  MII_TG3_FET_SHADOW_EN		0x0080
 
@@ -2662,9 +2674,13 @@
 /* 'mapping' is superfluous as the chip does not write into
  * the tx/rx post rings so we could just fetch it from there.
  * But the cache behavior is better how we are doing it now.
+ *
+ * This driver uses new build_skb() API :
+ * RX ring buffer contains pointer to kmalloc() data only,
+ * skb are built only after Hardware filled the frame.
  */
 struct ring_info {
-	struct sk_buff			*skb;
+	u8				*data;
 	DEFINE_DMA_UNMAP_ADDR(mapping);
 };
 
@@ -2690,6 +2706,7 @@
 #define DUPLEX_INVALID		0xff
 #define AUTONEG_INVALID		0xff
 	u16				active_speed;
+	u32				rmt_adv;
 
 	/* When we go in and out of low power mode we need
 	 * to swap with this state.
@@ -2865,6 +2882,8 @@
 	TG3_FLAG_NVRAM_BUFFERED,
 	TG3_FLAG_SUPPORT_MSI,
 	TG3_FLAG_SUPPORT_MSIX,
+	TG3_FLAG_USING_MSI,
+	TG3_FLAG_USING_MSIX,
 	TG3_FLAG_PCIX_MODE,
 	TG3_FLAG_PCI_HIGH_SPEED,
 	TG3_FLAG_PCI_32BIT,
@@ -2880,7 +2899,6 @@
 	TG3_FLAG_CHIP_RESETTING,
 	TG3_FLAG_INIT_COMPLETE,
 	TG3_FLAG_TSO_BUG,
-	TG3_FLAG_IS_5788,
 	TG3_FLAG_MAX_RXPEND_64,
 	TG3_FLAG_TSO_CAPABLE,
 	TG3_FLAG_PCI_EXPRESS, /* BCM5785 + pci_is_pcie() */
@@ -2889,14 +2907,9 @@
 	TG3_FLAG_IS_NIC,
 	TG3_FLAG_FLASH,
 	TG3_FLAG_HW_TSO_1,
-	TG3_FLAG_5705_PLUS,
-	TG3_FLAG_5750_PLUS,
-	TG3_FLAG_HW_TSO_3,
-	TG3_FLAG_USING_MSI,
-	TG3_FLAG_USING_MSIX,
-	TG3_FLAG_ICH_WORKAROUND,
-	TG3_FLAG_5780_CLASS,
 	TG3_FLAG_HW_TSO_2,
+	TG3_FLAG_HW_TSO_3,
+	TG3_FLAG_ICH_WORKAROUND,
 	TG3_FLAG_1SHOT_MSI,
 	TG3_FLAG_NO_FWARE_REPORTED,
 	TG3_FLAG_NO_NVRAM_ADDR_TRANS,
@@ -2910,18 +2923,23 @@
 	TG3_FLAG_RGMII_EXT_IBND_RX_EN,
 	TG3_FLAG_RGMII_EXT_IBND_TX_EN,
 	TG3_FLAG_CLKREQ_BUG,
-	TG3_FLAG_5755_PLUS,
 	TG3_FLAG_NO_NVRAM,
 	TG3_FLAG_ENABLE_RSS,
 	TG3_FLAG_ENABLE_TSS,
 	TG3_FLAG_SHORT_DMA_BUG,
 	TG3_FLAG_USE_JUMBO_BDFLAG,
 	TG3_FLAG_L1PLLPD_EN,
-	TG3_FLAG_57765_PLUS,
 	TG3_FLAG_APE_HAS_NCSI,
-	TG3_FLAG_5717_PLUS,
 	TG3_FLAG_4K_FIFO_LIMIT,
 	TG3_FLAG_RESET_TASK_PENDING,
+	TG3_FLAG_5705_PLUS,
+	TG3_FLAG_IS_5788,
+	TG3_FLAG_5750_PLUS,
+	TG3_FLAG_5780_CLASS,
+	TG3_FLAG_5755_PLUS,
+	TG3_FLAG_57765_PLUS,
+	TG3_FLAG_57765_CLASS,
+	TG3_FLAG_5717_PLUS,
 
 	/* Add new flags before this comment and TG3_FLAG_NUMBER_OF_FLAGS */
 	TG3_FLAG_NUMBER_OF_FLAGS,	/* Last entry in enum TG3_FLAGS */
@@ -2985,6 +3003,7 @@
 	/* begin "tx thread" cacheline section */
 	void				(*write32_tx_mbox) (struct tg3 *, u32,
 							    u32);
+	u32				dma_limit;
 
 	/* begin "rx thread" cacheline section */
 	struct tg3_napi			napi[TG3_IRQ_MAX_VECS];
@@ -3005,7 +3024,6 @@
 	unsigned long			rx_dropped;
 	unsigned long			tx_dropped;
 	struct rtnl_link_stats64	net_stats_prev;
-	struct tg3_ethtool_stats	estats;
 	struct tg3_ethtool_stats	estats_prev;
 
 	DECLARE_BITMAP(tg3_flags, TG3_FLAG_NUMBER_OF_FLAGS);
@@ -3131,10 +3149,12 @@
 #define TG3_PHYFLG_SERDES_PREEMPHASIS	0x00010000
 #define TG3_PHYFLG_PARALLEL_DETECT	0x00020000
 #define TG3_PHYFLG_EEE_CAP		0x00040000
+#define TG3_PHYFLG_MDIX_STATE		0x00200000
 
 	u32				led_ctrl;
 	u32				phy_otp;
 	u32				setlpicnt;
+	u8				rss_ind_tbl[TG3_RSS_INDIR_TBL_SIZE];
 
 #define TG3_BPN_SIZE			24
 	char				board_part_number[TG3_BPN_SIZE];
diff --git a/drivers/net/ethernet/brocade/bna/Makefile b/drivers/net/ethernet/brocade/bna/Makefile
index 74d3abc..6027302 100644
--- a/drivers/net/ethernet/brocade/bna/Makefile
+++ b/drivers/net/ethernet/brocade/bna/Makefile
@@ -5,7 +5,7 @@
 
 obj-$(CONFIG_BNA) += bna.o
 
-bna-objs := bnad.o bnad_ethtool.o bna_enet.o bna_tx_rx.o
+bna-objs := bnad.o bnad_ethtool.o bnad_debugfs.o bna_enet.o bna_tx_rx.o
 bna-objs += bfa_msgq.o bfa_ioc.o bfa_ioc_ct.o bfa_cee.o
 bna-objs += cna_fwimg.o
 
diff --git a/drivers/net/ethernet/brocade/bna/bfa_cee.c b/drivers/net/ethernet/brocade/bna/bfa_cee.c
index 8e62718..29f284f 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_cee.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_cee.c
@@ -185,6 +185,41 @@
 }
 
 /**
+ * bfa_cee_get_attr()
+ *
+ * @brief	Send the request to the f/w to fetch CEE attributes.
+ *
+ * @param[in]	Pointer to the CEE module data structure.
+ *
+ * @return	Status
+ */
+enum bfa_status
+bfa_nw_cee_get_attr(struct bfa_cee *cee, struct bfa_cee_attr *attr,
+		    bfa_cee_get_attr_cbfn_t cbfn, void *cbarg)
+{
+	struct bfi_cee_get_req *cmd;
+
+	BUG_ON(!((cee != NULL) && (cee->ioc != NULL)));
+	if (!bfa_nw_ioc_is_operational(cee->ioc))
+		return BFA_STATUS_IOC_FAILURE;
+
+	if (cee->get_attr_pending == true)
+		return  BFA_STATUS_DEVBUSY;
+
+	cee->get_attr_pending = true;
+	cmd = (struct bfi_cee_get_req *) cee->get_cfg_mb.msg;
+	cee->attr = attr;
+	cee->cbfn.get_attr_cbfn = cbfn;
+	cee->cbfn.get_attr_cbarg = cbarg;
+	bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_GET_CFG_REQ,
+		    bfa_ioc_portid(cee->ioc));
+	bfa_dma_be_addr_set(cmd->dma_addr, cee->attr_dma.pa);
+	bfa_nw_ioc_mbox_queue(cee->ioc, &cee->get_cfg_mb, NULL, NULL);
+
+	return BFA_STATUS_OK;
+}
+
+/**
  * bfa_cee_isrs()
  *
  * @brief Handles Mail-box interrupts for CEE module.
diff --git a/drivers/net/ethernet/brocade/bna/bfa_cee.h b/drivers/net/ethernet/brocade/bna/bfa_cee.h
index 58d54e9..93fde63 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_cee.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_cee.h
@@ -59,5 +59,7 @@
 void bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva,
 	u64 dma_pa);
 void bfa_nw_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc, void *dev);
-
+enum bfa_status bfa_nw_cee_get_attr(struct bfa_cee *cee,
+				struct bfa_cee_attr *attr,
+				bfa_cee_get_attr_cbfn_t cbfn, void *cbarg);
 #endif /* __BFA_CEE_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs.h b/drivers/net/ethernet/brocade/bna/bfa_defs.h
index 2f12d68..871c630 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_defs.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs.h
@@ -219,41 +219,39 @@
  * All numerical fields are in big-endian format.
  */
 struct bfa_mfg_block {
-	u8		version;	/*!< manufacturing block version */
-	u8		mfg_sig[3];	/*!< characters 'M', 'F', 'G' */
-	u16	mfgsize;	/*!< mfg block size */
-	u16	u16_chksum;	/*!< old u16 checksum */
-	char		brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)];
-	char		brcd_partnum[STRSZ(BFA_MFG_PARTNUM_SIZE)];
-	u8		mfg_day;	/*!< manufacturing day */
-	u8		mfg_month;	/*!< manufacturing month */
-	u16	mfg_year;	/*!< manufacturing year */
-	u64		mfg_wwn;	/*!< wwn base for this adapter */
-	u8		num_wwn;	/*!< number of wwns assigned */
-	u8		mfg_speeds;	/*!< speeds allowed for this adapter */
-	u8		rsv[2];
-	char		supplier_id[STRSZ(BFA_MFG_SUPPLIER_ID_SIZE)];
-	char		supplier_partnum[STRSZ(BFA_MFG_SUPPLIER_PARTNUM_SIZE)];
-	char
-		supplier_serialnum[STRSZ(BFA_MFG_SUPPLIER_SERIALNUM_SIZE)];
-	char
-		supplier_revision[STRSZ(BFA_MFG_SUPPLIER_REVISION_SIZE)];
-	mac_t		mfg_mac;	/*!< mac address */
-	u8		num_mac;	/*!< number of mac addresses */
-	u8		rsv2;
-	u32		card_type;	/*!< card type */
-	char		cap_nic;	/*!< capability nic */
-	char		cap_cna;	/*!< capability cna */
-	char		cap_hba;	/*!< capability hba */
-	char		cap_fc16g;	/*!< capability fc 16g */
-	char		cap_sriov;	/*!< capability sriov */
-	char		cap_mezz;	/*!< capability mezz */
-	u8		rsv3;
-	u8		mfg_nports;	/*!< number of ports */
-	char		media[8];	/*!< xfi/xaui */
-	char		initial_mode[8];/*!< initial mode: hba/cna/nic */
-	u8		rsv4[84];
-	u8		md5_chksum[BFA_MFG_CHKSUM_SIZE]; /*!< md5 checksum */
+	u8	version;	/* manufacturing block version */
+	u8	mfg_sig[3];	/* characters 'M', 'F', 'G' */
+	u16	mfgsize;	/* mfg block size */
+	u16	u16_chksum;	/* old u16 checksum */
+	char	brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)];
+	char	brcd_partnum[STRSZ(BFA_MFG_PARTNUM_SIZE)];
+	u8	mfg_day;	/* manufacturing day */
+	u8	mfg_month;	/* manufacturing month */
+	u16	mfg_year;	/* manufacturing year */
+	u64	mfg_wwn;	/* wwn base for this adapter */
+	u8	num_wwn;	/* number of wwns assigned */
+	u8	mfg_speeds;	/* speeds allowed for this adapter */
+	u8	rsv[2];
+	char	supplier_id[STRSZ(BFA_MFG_SUPPLIER_ID_SIZE)];
+	char	supplier_partnum[STRSZ(BFA_MFG_SUPPLIER_PARTNUM_SIZE)];
+	char	supplier_serialnum[STRSZ(BFA_MFG_SUPPLIER_SERIALNUM_SIZE)];
+	char	supplier_revision[STRSZ(BFA_MFG_SUPPLIER_REVISION_SIZE)];
+	mac_t	mfg_mac;	/* base mac address */
+	u8	num_mac;	/* number of mac addresses */
+	u8	rsv2;
+	u32	card_type;	/* card type          */
+	char	cap_nic;	/* capability nic     */
+	char	cap_cna;	/* capability cna     */
+	char	cap_hba;	/* capability hba     */
+	char	cap_fc16g;	/* capability fc 16g      */
+	char	cap_sriov;	/* capability sriov       */
+	char	cap_mezz;	/* capability mezz        */
+	u8	rsv3;
+	u8	mfg_nports;	/* number of ports        */
+	char	media[8];	/* xfi/xaui           */
+	char	initial_mode[8]; /* initial mode: hba/cna/nic */
+	u8	rsv4[84];
+	u8	md5_chksum[BFA_MFG_CHKSUM_SIZE]; /* md5 checksum */
 };
 
 #pragma pack()
@@ -293,4 +291,34 @@
 	BFA_MODE_NIC		= 3
 };
 
+/*
+ *	Flash module specific
+ */
+#define BFA_FLASH_PART_ENTRY_SIZE	32	/* partition entry size */
+#define BFA_FLASH_PART_MAX		32	/* maximal # of partitions */
+#define BFA_TOTAL_FLASH_SIZE		0x400000
+#define BFA_FLASH_PART_MFG		7
+
+/*
+ * flash partition attributes
+ */
+struct bfa_flash_part_attr {
+	u32	part_type;	/* partition type */
+	u32	part_instance;	/* partition instance */
+	u32	part_off;	/* partition offset */
+	u32	part_size;	/* partition size */
+	u32	part_len;	/* partition content length */
+	u32	part_status;	/* partition status */
+	char	rsv[BFA_FLASH_PART_ENTRY_SIZE - 24];
+};
+
+/*
+ * flash attributes
+ */
+struct bfa_flash_attr {
+	u32	status;	/* flash overall status */
+	u32	npart;  /* num of partitions */
+	struct bfa_flash_part_attr part[BFA_FLASH_PART_MAX];
+};
+
 #endif /* __BFA_DEFS_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index b0307a0..abfad27 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -74,6 +74,7 @@
 static void bfa_ioc_event_notify(struct bfa_ioc *, enum bfa_ioc_event);
 static void bfa_ioc_disable_comp(struct bfa_ioc *ioc);
 static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc);
+static void bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc);
 static void bfa_ioc_fail_notify(struct bfa_ioc *ioc);
 static void bfa_ioc_pf_enabled(struct bfa_ioc *ioc);
 static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc);
@@ -997,6 +998,7 @@
 static void
 bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf *iocpf)
 {
+	bfa_nw_ioc_debug_save_ftrc(iocpf->ioc);
 	bfa_ioc_hw_sem_get(iocpf->ioc);
 }
 
@@ -1743,6 +1745,114 @@
 		bfa_q_deq(&mod->cmd_q, &cmd);
 }
 
+/**
+ * Read data from SMEM to host through PCI memmap
+ *
+ * @param[in]  ioc     memory for IOC
+ * @param[in]  tbuf    app memory to store data from smem
+ * @param[in]  soff    smem offset
+ * @param[in]  sz      size of smem in bytes
+ */
+static int
+bfa_nw_ioc_smem_read(struct bfa_ioc *ioc, void *tbuf, u32 soff, u32 sz)
+{
+	u32 pgnum, loff, r32;
+	int i, len;
+	u32 *buf = tbuf;
+
+	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
+	loff = PSS_SMEM_PGOFF(soff);
+
+	/*
+	 *  Hold semaphore to serialize pll init and fwtrc.
+	*/
+	if (bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg) == 0)
+		return 1;
+
+	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
+
+	len = sz/sizeof(u32);
+	for (i = 0; i < len; i++) {
+		r32 = swab32(readl((loff) + (ioc->ioc_regs.smem_page_start)));
+		buf[i] = be32_to_cpu(r32);
+		loff += sizeof(u32);
+
+		/**
+		 * handle page offset wrap around
+		 */
+		loff = PSS_SMEM_PGOFF(loff);
+		if (loff == 0) {
+			pgnum++;
+			writel(pgnum, ioc->ioc_regs.host_page_num_fn);
+		}
+	}
+
+	writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
+	       ioc->ioc_regs.host_page_num_fn);
+
+	/*
+	 * release semaphore
+	 */
+	readl(ioc->ioc_regs.ioc_init_sem_reg);
+	writel(1, ioc->ioc_regs.ioc_init_sem_reg);
+	return 0;
+}
+
+/**
+ * Retrieve saved firmware trace from a prior IOC failure.
+ */
+int
+bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen)
+{
+	u32 loff = BFI_IOC_TRC_OFF + BNA_DBG_FWTRC_LEN * ioc->port_id;
+	int tlen, status = 0;
+
+	tlen = *trclen;
+	if (tlen > BNA_DBG_FWTRC_LEN)
+		tlen = BNA_DBG_FWTRC_LEN;
+
+	status = bfa_nw_ioc_smem_read(ioc, trcdata, loff, tlen);
+	*trclen = tlen;
+	return status;
+}
+
+/**
+ * Save firmware trace if configured.
+ */
+static void
+bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc)
+{
+	int tlen;
+
+	if (ioc->dbg_fwsave_once) {
+		ioc->dbg_fwsave_once = 0;
+		if (ioc->dbg_fwsave_len) {
+			tlen = ioc->dbg_fwsave_len;
+			bfa_nw_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
+		}
+	}
+}
+
+/**
+ * Retrieve saved firmware trace from a prior IOC failure.
+ */
+int
+bfa_nw_ioc_debug_fwsave(struct bfa_ioc *ioc, void *trcdata, int *trclen)
+{
+	int tlen;
+
+	if (ioc->dbg_fwsave_len == 0)
+		return BFA_STATUS_ENOFSAVE;
+
+	tlen = *trclen;
+	if (tlen > ioc->dbg_fwsave_len)
+		tlen = ioc->dbg_fwsave_len;
+
+	memcpy(trcdata, ioc->dbg_fwsave, tlen);
+	*trclen = tlen;
+	return BFA_STATUS_OK;
+}
+
 static void
 bfa_ioc_fail_notify(struct bfa_ioc *ioc)
 {
@@ -1751,6 +1861,7 @@
 	 */
 	ioc->cbfn->hbfail_cbfn(ioc->bfa);
 	bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
+	bfa_nw_ioc_debug_save_ftrc(ioc);
 }
 
 /**
@@ -2058,6 +2169,16 @@
 	bfa_fsm_send_event(ioc, IOC_E_DISABLE);
 }
 
+/**
+ * Initialize memory for saving firmware trace.
+ */
+void
+bfa_nw_ioc_debug_memclaim(struct bfa_ioc *ioc, void *dbg_fwsave)
+{
+	ioc->dbg_fwsave = dbg_fwsave;
+	ioc->dbg_fwsave_len = ioc->iocpf.auto_recover ? BNA_DBG_FWTRC_LEN : 0;
+}
+
 static u32
 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr)
 {
@@ -2172,6 +2293,15 @@
 }
 
 /**
+ * return true if IOC is operational
+ */
+bool
+bfa_nw_ioc_is_operational(struct bfa_ioc *ioc)
+{
+	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
+}
+
+/**
  * Add to IOC heartbeat failure notification queue. To be used by common
  * modules such as cee, port, diag.
  */
@@ -2471,3 +2601,366 @@
 			msecs_to_jiffies(BFA_IOC_POLL_TOV));
 	}
 }
+
+/*
+ *	Flash module specific
+ */
+
+/*
+ * FLASH DMA buffer should be big enough to hold both MFG block and
+ * asic block(64k) at the same time and also should be 2k aligned to
+ * avoid write segement to cross sector boundary.
+ */
+#define BFA_FLASH_SEG_SZ	2048
+#define BFA_FLASH_DMA_BUF_SZ	\
+	roundup(0x010000 + sizeof(struct bfa_mfg_block), BFA_FLASH_SEG_SZ)
+
+static void
+bfa_flash_cb(struct bfa_flash *flash)
+{
+	flash->op_busy = 0;
+	if (flash->cbfn)
+		flash->cbfn(flash->cbarg, flash->status);
+}
+
+static void
+bfa_flash_notify(void *cbarg, enum bfa_ioc_event event)
+{
+	struct bfa_flash *flash = cbarg;
+
+	switch (event) {
+	case BFA_IOC_E_DISABLED:
+	case BFA_IOC_E_FAILED:
+		if (flash->op_busy) {
+			flash->status = BFA_STATUS_IOC_FAILURE;
+			flash->cbfn(flash->cbarg, flash->status);
+			flash->op_busy = 0;
+		}
+		break;
+	default:
+		break;
+	}
+}
+
+/*
+ * Send flash write request.
+ *
+ * @param[in] cbarg - callback argument
+ */
+static void
+bfa_flash_write_send(struct bfa_flash *flash)
+{
+	struct bfi_flash_write_req *msg =
+			(struct bfi_flash_write_req *) flash->mb.msg;
+	u32	len;
+
+	msg->type = be32_to_cpu(flash->type);
+	msg->instance = flash->instance;
+	msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
+	len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
+	       flash->residue : BFA_FLASH_DMA_BUF_SZ;
+	msg->length = be32_to_cpu(len);
+
+	/* indicate if it's the last msg of the whole write operation */
+	msg->last = (len == flash->residue) ? 1 : 0;
+
+	bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ,
+		    bfa_ioc_portid(flash->ioc));
+	bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
+	memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len);
+	bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
+
+	flash->residue -= len;
+	flash->offset += len;
+}
+
+/*
+ * Send flash read request.
+ *
+ * @param[in] cbarg - callback argument
+ */
+static void
+bfa_flash_read_send(void *cbarg)
+{
+	struct bfa_flash *flash = cbarg;
+	struct bfi_flash_read_req *msg =
+			(struct bfi_flash_read_req *) flash->mb.msg;
+	u32	len;
+
+	msg->type = be32_to_cpu(flash->type);
+	msg->instance = flash->instance;
+	msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
+	len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
+	       flash->residue : BFA_FLASH_DMA_BUF_SZ;
+	msg->length = be32_to_cpu(len);
+	bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ,
+		    bfa_ioc_portid(flash->ioc));
+	bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
+	bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
+}
+
+/*
+ * Process flash response messages upon receiving interrupts.
+ *
+ * @param[in] flasharg - flash structure
+ * @param[in] msg - message structure
+ */
+static void
+bfa_flash_intr(void *flasharg, struct bfi_mbmsg *msg)
+{
+	struct bfa_flash *flash = flasharg;
+	u32	status;
+
+	union {
+		struct bfi_flash_query_rsp *query;
+		struct bfi_flash_write_rsp *write;
+		struct bfi_flash_read_rsp *read;
+		struct bfi_mbmsg   *msg;
+	} m;
+
+	m.msg = msg;
+
+	/* receiving response after ioc failure */
+	if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT)
+		return;
+
+	switch (msg->mh.msg_id) {
+	case BFI_FLASH_I2H_QUERY_RSP:
+		status = be32_to_cpu(m.query->status);
+		if (status == BFA_STATUS_OK) {
+			u32	i;
+			struct bfa_flash_attr *attr, *f;
+
+			attr = (struct bfa_flash_attr *) flash->ubuf;
+			f = (struct bfa_flash_attr *) flash->dbuf_kva;
+			attr->status = be32_to_cpu(f->status);
+			attr->npart = be32_to_cpu(f->npart);
+			for (i = 0; i < attr->npart; i++) {
+				attr->part[i].part_type =
+					be32_to_cpu(f->part[i].part_type);
+				attr->part[i].part_instance =
+					be32_to_cpu(f->part[i].part_instance);
+				attr->part[i].part_off =
+					be32_to_cpu(f->part[i].part_off);
+				attr->part[i].part_size =
+					be32_to_cpu(f->part[i].part_size);
+				attr->part[i].part_len =
+					be32_to_cpu(f->part[i].part_len);
+				attr->part[i].part_status =
+					be32_to_cpu(f->part[i].part_status);
+			}
+		}
+		flash->status = status;
+		bfa_flash_cb(flash);
+		break;
+	case BFI_FLASH_I2H_WRITE_RSP:
+		status = be32_to_cpu(m.write->status);
+		if (status != BFA_STATUS_OK || flash->residue == 0) {
+			flash->status = status;
+			bfa_flash_cb(flash);
+		} else
+			bfa_flash_write_send(flash);
+		break;
+	case BFI_FLASH_I2H_READ_RSP:
+		status = be32_to_cpu(m.read->status);
+		if (status != BFA_STATUS_OK) {
+			flash->status = status;
+			bfa_flash_cb(flash);
+		} else {
+			u32 len = be32_to_cpu(m.read->length);
+			memcpy(flash->ubuf + flash->offset,
+			       flash->dbuf_kva, len);
+			flash->residue -= len;
+			flash->offset += len;
+			if (flash->residue == 0) {
+				flash->status = status;
+				bfa_flash_cb(flash);
+			} else
+				bfa_flash_read_send(flash);
+		}
+		break;
+	case BFI_FLASH_I2H_BOOT_VER_RSP:
+	case BFI_FLASH_I2H_EVENT:
+		break;
+	default:
+		WARN_ON(1);
+	}
+}
+
+/*
+ * Flash memory info API.
+ */
+u32
+bfa_nw_flash_meminfo(void)
+{
+	return roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+}
+
+/*
+ * Flash attach API.
+ *
+ * @param[in] flash - flash structure
+ * @param[in] ioc  - ioc structure
+ * @param[in] dev  - device structure
+ */
+void
+bfa_nw_flash_attach(struct bfa_flash *flash, struct bfa_ioc *ioc, void *dev)
+{
+	flash->ioc = ioc;
+	flash->cbfn = NULL;
+	flash->cbarg = NULL;
+	flash->op_busy = 0;
+
+	bfa_nw_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash);
+	bfa_q_qe_init(&flash->ioc_notify);
+	bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash);
+	list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q);
+}
+
+/*
+ * Claim memory for flash
+ *
+ * @param[in] flash - flash structure
+ * @param[in] dm_kva - pointer to virtual memory address
+ * @param[in] dm_pa - physical memory address
+ */
+void
+bfa_nw_flash_memclaim(struct bfa_flash *flash, u8 *dm_kva, u64 dm_pa)
+{
+	flash->dbuf_kva = dm_kva;
+	flash->dbuf_pa = dm_pa;
+	memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ);
+	dm_kva += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+	dm_pa += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+}
+
+/*
+ * Get flash attribute.
+ *
+ * @param[in] flash - flash structure
+ * @param[in] attr - flash attribute structure
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+enum bfa_status
+bfa_nw_flash_get_attr(struct bfa_flash *flash, struct bfa_flash_attr *attr,
+		      bfa_cb_flash cbfn, void *cbarg)
+{
+	struct bfi_flash_query_req *msg =
+			(struct bfi_flash_query_req *) flash->mb.msg;
+
+	if (!bfa_nw_ioc_is_operational(flash->ioc))
+		return BFA_STATUS_IOC_NON_OP;
+
+	if (flash->op_busy)
+		return BFA_STATUS_DEVBUSY;
+
+	flash->op_busy = 1;
+	flash->cbfn = cbfn;
+	flash->cbarg = cbarg;
+	flash->ubuf = (u8 *) attr;
+
+	bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ,
+		    bfa_ioc_portid(flash->ioc));
+	bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr), flash->dbuf_pa);
+	bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
+
+	return BFA_STATUS_OK;
+}
+
+/*
+ * Update flash partition.
+ *
+ * @param[in] flash - flash structure
+ * @param[in] type - flash partition type
+ * @param[in] instance - flash partition instance
+ * @param[in] buf - update data buffer
+ * @param[in] len - data buffer length
+ * @param[in] offset - offset relative to the partition starting address
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+enum bfa_status
+bfa_nw_flash_update_part(struct bfa_flash *flash, u32 type, u8 instance,
+			 void *buf, u32 len, u32 offset,
+			 bfa_cb_flash cbfn, void *cbarg)
+{
+	if (!bfa_nw_ioc_is_operational(flash->ioc))
+		return BFA_STATUS_IOC_NON_OP;
+
+	/*
+	 * 'len' must be in word (4-byte) boundary
+	 */
+	if (!len || (len & 0x03))
+		return BFA_STATUS_FLASH_BAD_LEN;
+
+	if (type == BFA_FLASH_PART_MFG)
+		return BFA_STATUS_EINVAL;
+
+	if (flash->op_busy)
+		return BFA_STATUS_DEVBUSY;
+
+	flash->op_busy = 1;
+	flash->cbfn = cbfn;
+	flash->cbarg = cbarg;
+	flash->type = type;
+	flash->instance = instance;
+	flash->residue = len;
+	flash->offset = 0;
+	flash->addr_off = offset;
+	flash->ubuf = buf;
+
+	bfa_flash_write_send(flash);
+
+	return BFA_STATUS_OK;
+}
+
+/*
+ * Read flash partition.
+ *
+ * @param[in] flash - flash structure
+ * @param[in] type - flash partition type
+ * @param[in] instance - flash partition instance
+ * @param[in] buf - read data buffer
+ * @param[in] len - data buffer length
+ * @param[in] offset - offset relative to the partition starting address
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+enum bfa_status
+bfa_nw_flash_read_part(struct bfa_flash *flash, u32 type, u8 instance,
+		       void *buf, u32 len, u32 offset,
+		       bfa_cb_flash cbfn, void *cbarg)
+{
+	if (!bfa_nw_ioc_is_operational(flash->ioc))
+		return BFA_STATUS_IOC_NON_OP;
+
+	/*
+	 * 'len' must be in word (4-byte) boundary
+	 */
+	if (!len || (len & 0x03))
+		return BFA_STATUS_FLASH_BAD_LEN;
+
+	if (flash->op_busy)
+		return BFA_STATUS_DEVBUSY;
+
+	flash->op_busy = 1;
+	flash->cbfn = cbfn;
+	flash->cbarg = cbarg;
+	flash->type = type;
+	flash->instance = instance;
+	flash->residue = len;
+	flash->offset = 0;
+	flash->addr_off = offset;
+	flash->ubuf = buf;
+
+	bfa_flash_read_send(flash);
+
+	return BFA_STATUS_OK;
+}
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.h b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
index ca158d1..3b4460f 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
@@ -27,6 +27,8 @@
 #define BFA_IOC_HWSEM_TOV	500	/* msecs */
 #define BFA_IOC_HB_TOV		500	/* msecs */
 #define BFA_IOC_POLL_TOV	200	/* msecs */
+#define BNA_DBG_FWTRC_LEN      (BFI_IOC_TRC_ENTS * BFI_IOC_TRC_ENT_SZ + \
+				BFI_IOC_TRC_HDR_SZ)
 
 /**
  * PCI device information required by IOC
@@ -68,6 +70,16 @@
 	dma_addr->a32.addr_hi = (u32) htonl(upper_32_bits(pa));
 }
 
+#define bfa_alen_set(__alen, __len, __pa)	\
+	__bfa_alen_set(__alen, __len, (u64)__pa)
+
+static inline void
+__bfa_alen_set(struct bfi_alen *alen, u32 len, u64 pa)
+{
+	alen->al_len = cpu_to_be32(len);
+	bfa_dma_be_addr_set(alen->al_addr, pa);
+}
+
 struct bfa_ioc_regs {
 	void __iomem *hfn_mbox_cmd;
 	void __iomem *hfn_mbox;
@@ -296,6 +308,7 @@
 
 void bfa_nw_ioc_error_isr(struct bfa_ioc *ioc);
 bool bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc);
+bool bfa_nw_ioc_is_operational(struct bfa_ioc *ioc);
 void bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr);
 void bfa_nw_ioc_notify_register(struct bfa_ioc *ioc,
 	struct bfa_ioc_notify *notify);
@@ -307,6 +320,9 @@
 bool bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc,
 			struct bfi_ioc_image_hdr *fwhdr);
 mac_t bfa_nw_ioc_get_mac(struct bfa_ioc *ioc);
+void bfa_nw_ioc_debug_memclaim(struct bfa_ioc *ioc, void *dbg_fwsave);
+int bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen);
+int bfa_nw_ioc_debug_fwsave(struct bfa_ioc *ioc, void *trcdata, int *trclen);
 
 /*
  * Timeout APIs
@@ -322,4 +338,42 @@
 u32 *bfa_cb_image_get_chunk(enum bfi_asic_gen asic_gen, u32 off);
 u32 bfa_cb_image_get_size(enum bfi_asic_gen asic_gen);
 
+/*
+ *	Flash module specific
+ */
+typedef void	(*bfa_cb_flash) (void *cbarg, enum bfa_status status);
+
+struct bfa_flash {
+	struct bfa_ioc *ioc;		/* back pointer to ioc */
+	u32		type;		/* partition type */
+	u8		instance;	/* partition instance */
+	u8		rsv[3];
+	u32		op_busy;	/*  operation busy flag */
+	u32		residue;	/*  residual length */
+	u32		offset;		/*  offset */
+	enum bfa_status	status;		/*  status */
+	u8		*dbuf_kva;	/*  dma buf virtual address */
+	u64		dbuf_pa;	/*  dma buf physical address */
+	bfa_cb_flash	cbfn;		/*  user callback function */
+	void		*cbarg;		/*  user callback arg */
+	u8		*ubuf;		/*  user supplied buffer */
+	u32		addr_off;	/*  partition address offset */
+	struct bfa_mbox_cmd mb;		/*  mailbox */
+	struct bfa_ioc_notify ioc_notify; /*  ioc event notify */
+};
+
+enum bfa_status bfa_nw_flash_get_attr(struct bfa_flash *flash,
+			struct bfa_flash_attr *attr,
+			bfa_cb_flash cbfn, void *cbarg);
+enum bfa_status bfa_nw_flash_update_part(struct bfa_flash *flash,
+			u32 type, u8 instance, void *buf, u32 len, u32 offset,
+			bfa_cb_flash cbfn, void *cbarg);
+enum bfa_status bfa_nw_flash_read_part(struct bfa_flash *flash,
+			u32 type, u8 instance, void *buf, u32 len, u32 offset,
+			bfa_cb_flash cbfn, void *cbarg);
+u32	bfa_nw_flash_meminfo(void);
+void	bfa_nw_flash_attach(struct bfa_flash *flash,
+			    struct bfa_ioc *ioc, void *dev);
+void	bfa_nw_flash_memclaim(struct bfa_flash *flash, u8 *dm_kva, u64 dm_pa);
+
 #endif /* __BFA_IOC_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bfi.h b/drivers/net/ethernet/brocade/bna/bfi.h
index 7a1393a..0d9df69 100644
--- a/drivers/net/ethernet/brocade/bna/bfi.h
+++ b/drivers/net/ethernet/brocade/bna/bfi.h
@@ -83,6 +83,14 @@
 	} a32;
 };
 
+/**
+ * Generic DMA addr-len pair.
+ */
+struct bfi_alen {
+	union bfi_addr_u	al_addr;	/* DMA addr of buffer	*/
+	u32			al_len;		/* length of buffer */
+};
+
 /*
  * Large Message structure - 128 Bytes size Msgs
  */
@@ -249,6 +257,8 @@
  */
 #define BFI_IOC_TRC_OFF		(0x4b00)
 #define BFI_IOC_TRC_ENTS	256
+#define BFI_IOC_TRC_ENT_SZ	16
+#define BFI_IOC_TRC_HDR_SZ	32
 
 #define BFI_IOC_FW_SIGNATURE	(0xbfadbfad)
 #define BFI_IOC_MD5SUM_SZ	4
@@ -476,6 +486,93 @@
 	u16     len;
 };
 
+/*
+ *      FLASH module specific
+ */
+enum bfi_flash_h2i_msgs {
+	BFI_FLASH_H2I_QUERY_REQ = 1,
+	BFI_FLASH_H2I_ERASE_REQ = 2,
+	BFI_FLASH_H2I_WRITE_REQ = 3,
+	BFI_FLASH_H2I_READ_REQ = 4,
+	BFI_FLASH_H2I_BOOT_VER_REQ = 5,
+};
+
+enum bfi_flash_i2h_msgs {
+	BFI_FLASH_I2H_QUERY_RSP = BFA_I2HM(1),
+	BFI_FLASH_I2H_ERASE_RSP = BFA_I2HM(2),
+	BFI_FLASH_I2H_WRITE_RSP = BFA_I2HM(3),
+	BFI_FLASH_I2H_READ_RSP = BFA_I2HM(4),
+	BFI_FLASH_I2H_BOOT_VER_RSP = BFA_I2HM(5),
+	BFI_FLASH_I2H_EVENT = BFA_I2HM(127),
+};
+
+/*
+ * Flash query request
+ */
+struct bfi_flash_query_req {
+	struct bfi_mhdr mh;   /* Common msg header */
+	struct bfi_alen alen;
+};
+
+/*
+ * Flash write request
+ */
+struct bfi_flash_write_req {
+	struct bfi_mhdr mh;	/* Common msg header */
+	struct bfi_alen alen;
+	u32	type;   /* partition type */
+	u8	instance; /* partition instance */
+	u8	last;
+	u8	rsv[2];
+	u32	offset;
+	u32	length;
+};
+
+/*
+ * Flash read request
+ */
+struct bfi_flash_read_req {
+	struct bfi_mhdr mh;	/* Common msg header */
+	u32	type;		/* partition type */
+	u8	instance;	/* partition instance */
+	u8	rsv[3];
+	u32	offset;
+	u32	length;
+	struct bfi_alen alen;
+};
+
+/*
+ * Flash query response
+ */
+struct bfi_flash_query_rsp {
+	struct bfi_mhdr mh;	/* Common msg header */
+	u32	status;
+};
+
+/*
+ * Flash read response
+ */
+struct bfi_flash_read_rsp {
+	struct bfi_mhdr mh;	/* Common msg header */
+	u32	type;		/* partition type */
+	u8	instance;	/* partition instance */
+	u8	rsv[3];
+	u32	status;
+	u32	length;
+};
+
+/*
+ * Flash write response
+ */
+struct bfi_flash_write_rsp {
+	struct bfi_mhdr mh;	/* Common msg header */
+	u32	type;		/* partition type */
+	u8	instance;	/* partition instance */
+	u8	rsv[3];
+	u32	status;
+	u32	length;
+};
+
 #pragma pack()
 
 #endif /* __BFI_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
index 26f5c5a..9ccc586 100644
--- a/drivers/net/ethernet/brocade/bna/bna_enet.c
+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
@@ -1727,6 +1727,7 @@
 	bfa_nw_ioc_mem_claim(&ioceth->ioc, kva, dma);
 
 	kva = res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mdl[0].kva;
+	bfa_nw_ioc_debug_memclaim(&ioceth->ioc, kva);
 
 	/**
 	 * Attach common modules (Diag, SFP, CEE, Port) and claim respective
@@ -1740,6 +1741,11 @@
 	kva += bfa_nw_cee_meminfo();
 	dma += bfa_nw_cee_meminfo();
 
+	bfa_nw_flash_attach(&bna->flash, &ioceth->ioc, bna);
+	bfa_nw_flash_memclaim(&bna->flash, kva, dma);
+	kva += bfa_nw_flash_meminfo();
+	dma += bfa_nw_flash_meminfo();
+
 	bfa_msgq_attach(&bna->msgq, &ioceth->ioc);
 	bfa_msgq_memclaim(&bna->msgq, kva, dma);
 	bfa_msgq_regisr(&bna->msgq, BFI_MC_ENET, bna_msgq_rsp_handler, bna);
@@ -1892,7 +1898,8 @@
 	res_info[BNA_RES_MEM_T_COM].res_u.mem_info.num = 1;
 	res_info[BNA_RES_MEM_T_COM].res_u.mem_info.len = ALIGN(
 				(bfa_nw_cee_meminfo() +
-				bfa_msgq_meminfo()), PAGE_SIZE);
+				 bfa_nw_flash_meminfo() +
+				 bfa_msgq_meminfo()), PAGE_SIZE);
 
 	/* DMA memory for retrieving IOC attributes */
 	res_info[BNA_RES_MEM_T_ATTR].res_type = BNA_RES_T_MEM;
@@ -1904,8 +1911,8 @@
 	/* Virtual memory for retreiving fw_trc */
 	res_info[BNA_RES_MEM_T_FWTRC].res_type = BNA_RES_T_MEM;
 	res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mem_type = BNA_MEM_T_KVA;
-	res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.num = 0;
-	res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.len = 0;
+	res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.num = 1;
+	res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.len = BNA_DBG_FWTRC_LEN;
 
 	/* DMA memory for retreiving stats */
 	res_info[BNA_RES_MEM_T_STATS].res_type = BNA_RES_T_MEM;
diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h
index d090fbf..e8d3ab7 100644
--- a/drivers/net/ethernet/brocade/bna/bna_types.h
+++ b/drivers/net/ethernet/brocade/bna/bna_types.h
@@ -427,7 +427,7 @@
 
 /* Doorbell structure */
 struct bna_ib_dbell {
-	void *__iomem doorbell_addr;
+	void __iomem   *doorbell_addr;
 	u32		doorbell_ack;
 };
 
@@ -463,7 +463,7 @@
 	u32		consumer_index;
 	volatile u32	*hw_consumer_index;
 	u32		q_depth;
-	void *__iomem q_dbell;
+	void __iomem   *q_dbell;
 	struct bna_ib_dbell *i_dbell;
 	int			page_idx;
 	int			page_count;
@@ -599,7 +599,7 @@
 	u32		producer_index;
 	u32		consumer_index;
 	u32		q_depth;
-	void *__iomem q_dbell;
+	void __iomem   *q_dbell;
 	int			page_idx;
 	int			page_count;
 	/* Control path */
@@ -966,6 +966,7 @@
 
 	struct bna_ioceth ioceth;
 	struct bfa_cee cee;
+	struct bfa_flash flash;
 	struct bfa_msgq msgq;
 
 	struct bna_ethport ethport;
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 7f3091e..be7d91e 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -44,11 +44,18 @@
 module_param(bnad_ioc_auto_recover, uint, 0444);
 MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
 
+static uint bna_debugfs_enable = 1;
+module_param(bna_debugfs_enable, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(bna_debugfs_enable, "Enables debugfs feature, default=1,"
+		 " Range[false:0|true:1]");
+
 /*
  * Global variables
  */
 u32 bnad_rxqs_per_cq = 2;
-
+static u32 bna_id;
+static struct mutex bnad_list_mutex;
+static LIST_HEAD(bnad_list);
 static const u8 bnad_bcast_addr[] =  {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
 
 /*
@@ -75,6 +82,23 @@
 
 #define BNAD_TXRX_SYNC_MDELAY	250	/* 250 msecs */
 
+static void
+bnad_add_to_list(struct bnad *bnad)
+{
+	mutex_lock(&bnad_list_mutex);
+	list_add_tail(&bnad->list_entry, &bnad_list);
+	bnad->id = bna_id++;
+	mutex_unlock(&bnad_list_mutex);
+}
+
+static void
+bnad_remove_from_list(struct bnad *bnad)
+{
+	mutex_lock(&bnad_list_mutex);
+	list_del(&bnad->list_entry);
+	mutex_unlock(&bnad_list_mutex);
+}
+
 /*
  * Reinitialize completions in CQ, once Rx is taken down
  */
@@ -723,7 +747,7 @@
 bnad_cb_ethport_link_status(struct bnad *bnad,
 			enum bna_link_status link_status)
 {
-	bool link_up = 0;
+	bool link_up = false;
 
 	link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
 
@@ -1084,6 +1108,16 @@
 	complete(&bnad->bnad_completions.mtu_comp);
 }
 
+void
+bnad_cb_completion(void *arg, enum bfa_status status)
+{
+	struct bnad_iocmd_comp *iocmd_comp =
+			(struct bnad_iocmd_comp *)arg;
+
+	iocmd_comp->comp_status = (u32) status;
+	complete(&iocmd_comp->comp);
+}
+
 /* Resource allocation, free functions */
 
 static void
@@ -2968,7 +3002,7 @@
 	return err;
 }
 
-static void
+static int
 bnad_vlan_rx_add_vid(struct net_device *netdev,
 				 unsigned short vid)
 {
@@ -2976,7 +3010,7 @@
 	unsigned long flags;
 
 	if (!bnad->rx_info[0].rx)
-		return;
+		return 0;
 
 	mutex_lock(&bnad->conf_mutex);
 
@@ -2986,9 +3020,11 @@
 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
 
 	mutex_unlock(&bnad->conf_mutex);
+
+	return 0;
 }
 
-static void
+static int
 bnad_vlan_rx_kill_vid(struct net_device *netdev,
 				  unsigned short vid)
 {
@@ -2996,7 +3032,7 @@
 	unsigned long flags;
 
 	if (!bnad->rx_info[0].rx)
-		return;
+		return 0;
 
 	mutex_lock(&bnad->conf_mutex);
 
@@ -3006,6 +3042,8 @@
 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
 
 	mutex_unlock(&bnad->conf_mutex);
+
+	return 0;
 }
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
@@ -3163,12 +3201,14 @@
 {
 	spin_lock_init(&bnad->bna_lock);
 	mutex_init(&bnad->conf_mutex);
+	mutex_init(&bnad_list_mutex);
 }
 
 static void
 bnad_lock_uninit(struct bnad *bnad)
 {
 	mutex_destroy(&bnad->conf_mutex);
+	mutex_destroy(&bnad_list_mutex);
 }
 
 /* PCI Initialization */
@@ -3186,7 +3226,7 @@
 		goto disable_device;
 	if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
 	    !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
-		*using_dac = 1;
+		*using_dac = true;
 	} else {
 		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
 		if (err) {
@@ -3195,7 +3235,7 @@
 			if (err)
 				goto release_regions;
 		}
-		*using_dac = 0;
+		*using_dac = false;
 	}
 	pci_set_master(pdev);
 	return 0;
@@ -3249,8 +3289,8 @@
 		return err;
 	}
 	bnad = netdev_priv(netdev);
-
 	bnad_lock_init(bnad);
+	bnad_add_to_list(bnad);
 
 	mutex_lock(&bnad->conf_mutex);
 	/*
@@ -3277,6 +3317,10 @@
 	/* Set link to down state */
 	netif_carrier_off(netdev);
 
+	/* Setup the debugfs node for this bfad */
+	if (bna_debugfs_enable)
+		bnad_debugfs_init(bnad);
+
 	/* Get resource requirement form bna */
 	spin_lock_irqsave(&bnad->bna_lock, flags);
 	bna_res_req(&bnad->res_info[0]);
@@ -3398,11 +3442,15 @@
 res_free:
 	bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
 drv_uninit:
+	/* Remove the debugfs node for this bnad */
+	kfree(bnad->regdata);
+	bnad_debugfs_uninit(bnad);
 	bnad_uninit(bnad);
 pci_uninit:
 	bnad_pci_uninit(pdev);
 unlock_mutex:
 	mutex_unlock(&bnad->conf_mutex);
+	bnad_remove_from_list(bnad);
 	bnad_lock_uninit(bnad);
 	free_netdev(netdev);
 	return err;
@@ -3441,7 +3489,11 @@
 	bnad_disable_msix(bnad);
 	bnad_pci_uninit(pdev);
 	mutex_unlock(&bnad->conf_mutex);
+	bnad_remove_from_list(bnad);
 	bnad_lock_uninit(bnad);
+	/* Remove the debugfs node for this bnad */
+	kfree(bnad->regdata);
+	bnad_debugfs_uninit(bnad);
 	bnad_uninit(bnad);
 	free_netdev(netdev);
 }
diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h
index 5487ca4..55824d9 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.h
+++ b/drivers/net/ethernet/brocade/bna/bnad.h
@@ -124,6 +124,12 @@
 	BNAD_LS_UP		= 1
 };
 
+struct bnad_iocmd_comp {
+	struct bnad		*bnad;
+	struct completion	comp;
+	int			comp_status;
+};
+
 struct bnad_completion {
 	struct completion	ioc_comp;
 	struct completion	ucast_comp;
@@ -251,6 +257,8 @@
 
 struct bnad {
 	struct net_device	*netdev;
+	u32			id;
+	struct list_head	list_entry;
 
 	/* Data path */
 	struct bnad_tx_info tx_info[BNAD_MAX_TX];
@@ -320,12 +328,26 @@
 	char			adapter_name[BNAD_NAME_LEN];
 	char			port_name[BNAD_NAME_LEN];
 	char			mbox_irq_name[BNAD_NAME_LEN];
+
+	/* debugfs specific data */
+	char	*regdata;
+	u32	reglen;
+	struct dentry *bnad_dentry_files[5];
+	struct dentry *port_debugfs_root;
+};
+
+struct bnad_drvinfo {
+	struct bfa_ioc_attr  ioc_attr;
+	struct bfa_cee_attr  cee_attr;
+	struct bfa_flash_attr flash_attr;
+	u32	cee_status;
+	u32	flash_status;
 };
 
 /*
  * EXTERN VARIABLES
  */
-extern struct firmware *bfi_fw;
+extern const struct firmware *bfi_fw;
 extern u32		bnad_rxqs_per_cq;
 
 /*
@@ -340,6 +362,7 @@
 extern int bnad_enable_default_bcast(struct bnad *bnad);
 extern void bnad_restore_vlans(struct bnad *bnad, u32 rx_id);
 extern void bnad_set_ethtool_ops(struct net_device *netdev);
+extern void bnad_cb_completion(void *arg, enum bfa_status status);
 
 /* Configuration & setup */
 extern void bnad_tx_coalescing_timeo_set(struct bnad *bnad);
@@ -359,6 +382,10 @@
 extern void bnad_netdev_hwstats_fill(struct bnad *bnad,
 		struct rtnl_link_stats64 *stats);
 
+/* Debugfs */
+void	bnad_debugfs_init(struct bnad *bnad);
+void	bnad_debugfs_uninit(struct bnad *bnad);
+
 /**
  * MACROS
  */
diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
new file mode 100644
index 0000000..592ad39
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
@@ -0,0 +1,623 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include "bnad.h"
+
+/*
+ * BNA debufs interface
+ *
+ * To access the interface, debugfs file system should be mounted
+ * if not already mounted using:
+ *	mount -t debugfs none /sys/kernel/debug
+ *
+ * BNA Hierarchy:
+ *	- bna/pci_dev:<pci_name>
+ * where the pci_name corresponds to the one under /sys/bus/pci/drivers/bna
+ *
+ * Debugging service available per pci_dev:
+ *	fwtrc:  To collect current firmware trace.
+ *	fwsave: To collect last saved fw trace as a result of firmware crash.
+ *	regwr:  To write one word to chip register
+ *	regrd:  To read one or more words from chip register.
+ */
+
+struct bnad_debug_info {
+	char *debug_buffer;
+	void *i_private;
+	int buffer_len;
+};
+
+static int
+bnad_debugfs_open_fwtrc(struct inode *inode, struct file *file)
+{
+	struct bnad *bnad = inode->i_private;
+	struct bnad_debug_info *fw_debug;
+	unsigned long flags;
+	int rc;
+
+	fw_debug = kzalloc(sizeof(struct bnad_debug_info), GFP_KERNEL);
+	if (!fw_debug)
+		return -ENOMEM;
+
+	fw_debug->buffer_len = BNA_DBG_FWTRC_LEN;
+
+	fw_debug->debug_buffer = kzalloc(fw_debug->buffer_len, GFP_KERNEL);
+	if (!fw_debug->debug_buffer) {
+		kfree(fw_debug);
+		fw_debug = NULL;
+		pr_warn("bna %s: Failed to allocate fwtrc buffer\n",
+			pci_name(bnad->pcidev));
+		return -ENOMEM;
+	}
+
+	spin_lock_irqsave(&bnad->bna_lock, flags);
+	rc = bfa_nw_ioc_debug_fwtrc(&bnad->bna.ioceth.ioc,
+			fw_debug->debug_buffer,
+			&fw_debug->buffer_len);
+	spin_unlock_irqrestore(&bnad->bna_lock, flags);
+	if (rc != BFA_STATUS_OK) {
+		kfree(fw_debug->debug_buffer);
+		fw_debug->debug_buffer = NULL;
+		kfree(fw_debug);
+		fw_debug = NULL;
+		pr_warn("bnad %s: Failed to collect fwtrc\n",
+			pci_name(bnad->pcidev));
+		return -ENOMEM;
+	}
+
+	file->private_data = fw_debug;
+
+	return 0;
+}
+
+static int
+bnad_debugfs_open_fwsave(struct inode *inode, struct file *file)
+{
+	struct bnad *bnad = inode->i_private;
+	struct bnad_debug_info *fw_debug;
+	unsigned long flags;
+	int rc;
+
+	fw_debug = kzalloc(sizeof(struct bnad_debug_info), GFP_KERNEL);
+	if (!fw_debug)
+		return -ENOMEM;
+
+	fw_debug->buffer_len = BNA_DBG_FWTRC_LEN;
+
+	fw_debug->debug_buffer = kzalloc(fw_debug->buffer_len, GFP_KERNEL);
+	if (!fw_debug->debug_buffer) {
+		kfree(fw_debug);
+		fw_debug = NULL;
+		pr_warn("bna %s: Failed to allocate fwsave buffer\n",
+			pci_name(bnad->pcidev));
+		return -ENOMEM;
+	}
+
+	spin_lock_irqsave(&bnad->bna_lock, flags);
+	rc = bfa_nw_ioc_debug_fwsave(&bnad->bna.ioceth.ioc,
+			fw_debug->debug_buffer,
+			&fw_debug->buffer_len);
+	spin_unlock_irqrestore(&bnad->bna_lock, flags);
+	if (rc != BFA_STATUS_OK && rc != BFA_STATUS_ENOFSAVE) {
+		kfree(fw_debug->debug_buffer);
+		fw_debug->debug_buffer = NULL;
+		kfree(fw_debug);
+		fw_debug = NULL;
+		pr_warn("bna %s: Failed to collect fwsave\n",
+			pci_name(bnad->pcidev));
+		return -ENOMEM;
+	}
+
+	file->private_data = fw_debug;
+
+	return 0;
+}
+
+static int
+bnad_debugfs_open_reg(struct inode *inode, struct file *file)
+{
+	struct bnad_debug_info *reg_debug;
+
+	reg_debug = kzalloc(sizeof(struct bnad_debug_info), GFP_KERNEL);
+	if (!reg_debug)
+		return -ENOMEM;
+
+	reg_debug->i_private = inode->i_private;
+
+	file->private_data = reg_debug;
+
+	return 0;
+}
+
+static int
+bnad_get_debug_drvinfo(struct bnad *bnad, void *buffer, u32 len)
+{
+	struct bnad_drvinfo *drvinfo = (struct bnad_drvinfo *) buffer;
+	struct bnad_iocmd_comp fcomp;
+	unsigned long flags = 0;
+	int ret = BFA_STATUS_FAILED;
+
+	/* Get IOC info */
+	spin_lock_irqsave(&bnad->bna_lock, flags);
+	bfa_nw_ioc_get_attr(&bnad->bna.ioceth.ioc, &drvinfo->ioc_attr);
+	spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+	/* Retrieve CEE related info */
+	fcomp.bnad = bnad;
+	fcomp.comp_status = 0;
+	init_completion(&fcomp.comp);
+	spin_lock_irqsave(&bnad->bna_lock, flags);
+	ret = bfa_nw_cee_get_attr(&bnad->bna.cee, &drvinfo->cee_attr,
+				bnad_cb_completion, &fcomp);
+	if (ret != BFA_STATUS_OK) {
+		spin_unlock_irqrestore(&bnad->bna_lock, flags);
+		goto out;
+	}
+	spin_unlock_irqrestore(&bnad->bna_lock, flags);
+	wait_for_completion(&fcomp.comp);
+	drvinfo->cee_status = fcomp.comp_status;
+
+	/* Retrieve flash partition info */
+	fcomp.comp_status = 0;
+	init_completion(&fcomp.comp);
+	spin_lock_irqsave(&bnad->bna_lock, flags);
+	ret = bfa_nw_flash_get_attr(&bnad->bna.flash, &drvinfo->flash_attr,
+				bnad_cb_completion, &fcomp);
+	if (ret != BFA_STATUS_OK) {
+		spin_unlock_irqrestore(&bnad->bna_lock, flags);
+		goto out;
+	}
+	spin_unlock_irqrestore(&bnad->bna_lock, flags);
+	wait_for_completion(&fcomp.comp);
+	drvinfo->flash_status = fcomp.comp_status;
+out:
+	return ret;
+}
+
+static int
+bnad_debugfs_open_drvinfo(struct inode *inode, struct file *file)
+{
+	struct bnad *bnad = inode->i_private;
+	struct bnad_debug_info *drv_info;
+	int rc;
+
+	drv_info = kzalloc(sizeof(struct bnad_debug_info), GFP_KERNEL);
+	if (!drv_info)
+		return -ENOMEM;
+
+	drv_info->buffer_len = sizeof(struct bnad_drvinfo);
+
+	drv_info->debug_buffer = kzalloc(drv_info->buffer_len, GFP_KERNEL);
+	if (!drv_info->debug_buffer) {
+		kfree(drv_info);
+		drv_info = NULL;
+		pr_warn("bna %s: Failed to allocate drv info buffer\n",
+			pci_name(bnad->pcidev));
+		return -ENOMEM;
+	}
+
+	mutex_lock(&bnad->conf_mutex);
+	rc = bnad_get_debug_drvinfo(bnad, drv_info->debug_buffer,
+				drv_info->buffer_len);
+	mutex_unlock(&bnad->conf_mutex);
+	if (rc != BFA_STATUS_OK) {
+		kfree(drv_info->debug_buffer);
+		drv_info->debug_buffer = NULL;
+		kfree(drv_info);
+		drv_info = NULL;
+		pr_warn("bna %s: Failed to collect drvinfo\n",
+			pci_name(bnad->pcidev));
+		return -ENOMEM;
+	}
+
+	file->private_data = drv_info;
+
+	return 0;
+}
+
+/* Changes the current file position */
+static loff_t
+bnad_debugfs_lseek(struct file *file, loff_t offset, int orig)
+{
+	loff_t pos = file->f_pos;
+	struct bnad_debug_info *debug = file->private_data;
+
+	if (!debug)
+		return -EINVAL;
+
+	switch (orig) {
+	case 0:
+		file->f_pos = offset;
+		break;
+	case 1:
+		file->f_pos += offset;
+		break;
+	case 2:
+		file->f_pos = debug->buffer_len - offset;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (file->f_pos < 0 || file->f_pos > debug->buffer_len) {
+		file->f_pos = pos;
+		return -EINVAL;
+	}
+
+	return file->f_pos;
+}
+
+static ssize_t
+bnad_debugfs_read(struct file *file, char __user *buf,
+		  size_t nbytes, loff_t *pos)
+{
+	struct bnad_debug_info *debug = file->private_data;
+
+	if (!debug || !debug->debug_buffer)
+		return 0;
+
+	return simple_read_from_buffer(buf, nbytes, pos,
+				debug->debug_buffer, debug->buffer_len);
+}
+
+#define BFA_REG_CT_ADDRSZ	(0x40000)
+#define BFA_REG_CB_ADDRSZ	(0x20000)
+#define BFA_REG_ADDRSZ(__ioc)	\
+	((u32)(bfa_asic_id_ctc(bfa_ioc_devid(__ioc)) ?  \
+	 BFA_REG_CT_ADDRSZ : BFA_REG_CB_ADDRSZ))
+#define BFA_REG_ADDRMSK(__ioc)	(BFA_REG_ADDRSZ(__ioc) - 1)
+
+/*
+ * Function to check if the register offset passed is valid.
+ */
+static int
+bna_reg_offset_check(struct bfa_ioc *ioc, u32 offset, u32 len)
+{
+	u8 area;
+
+	/* check [16:15] */
+	area = (offset >> 15) & 0x7;
+	if (area == 0) {
+		/* PCIe core register */
+		if ((offset + (len<<2)) > 0x8000)	/* 8k dwords or 32KB */
+			return BFA_STATUS_EINVAL;
+	} else if (area == 0x1) {
+		/* CB 32 KB memory page */
+		if ((offset + (len<<2)) > 0x10000)	/* 8k dwords or 32KB */
+			return BFA_STATUS_EINVAL;
+	} else {
+		/* CB register space 64KB */
+		if ((offset + (len<<2)) > BFA_REG_ADDRMSK(ioc))
+			return BFA_STATUS_EINVAL;
+	}
+	return BFA_STATUS_OK;
+}
+
+static ssize_t
+bnad_debugfs_read_regrd(struct file *file, char __user *buf,
+			size_t nbytes, loff_t *pos)
+{
+	struct bnad_debug_info *regrd_debug = file->private_data;
+	struct bnad *bnad = (struct bnad *)regrd_debug->i_private;
+	ssize_t rc;
+
+	if (!bnad->regdata)
+		return 0;
+
+	rc = simple_read_from_buffer(buf, nbytes, pos,
+			bnad->regdata, bnad->reglen);
+
+	if ((*pos + nbytes) >= bnad->reglen) {
+		kfree(bnad->regdata);
+		bnad->regdata = NULL;
+		bnad->reglen = 0;
+	}
+
+	return rc;
+}
+
+static ssize_t
+bnad_debugfs_write_regrd(struct file *file, const char __user *buf,
+		size_t nbytes, loff_t *ppos)
+{
+	struct bnad_debug_info *regrd_debug = file->private_data;
+	struct bnad *bnad = (struct bnad *)regrd_debug->i_private;
+	struct bfa_ioc *ioc = &bnad->bna.ioceth.ioc;
+	int addr, len, rc, i;
+	u32 *regbuf;
+	void __iomem *rb, *reg_addr;
+	unsigned long flags;
+	void *kern_buf;
+
+	/* Allocate memory to store the user space buf */
+	kern_buf = kzalloc(nbytes, GFP_KERNEL);
+	if (!kern_buf) {
+		pr_warn("bna %s: Failed to allocate user buffer\n",
+			pci_name(bnad->pcidev));
+		return -ENOMEM;
+	}
+
+	if (copy_from_user(kern_buf, (void  __user *)buf, nbytes)) {
+		kfree(kern_buf);
+		return -ENOMEM;
+	}
+
+	rc = sscanf(kern_buf, "%x:%x", &addr, &len);
+	if (rc < 2) {
+		pr_warn("bna %s: Failed to read user buffer\n",
+			pci_name(bnad->pcidev));
+		kfree(kern_buf);
+		return -EINVAL;
+	}
+
+	kfree(kern_buf);
+	kfree(bnad->regdata);
+	bnad->regdata = NULL;
+	bnad->reglen = 0;
+
+	bnad->regdata = kzalloc(len << 2, GFP_KERNEL);
+	if (!bnad->regdata) {
+		pr_warn("bna %s: Failed to allocate regrd buffer\n",
+			pci_name(bnad->pcidev));
+		return -ENOMEM;
+	}
+
+	bnad->reglen = len << 2;
+	rb = bfa_ioc_bar0(ioc);
+	addr &= BFA_REG_ADDRMSK(ioc);
+
+	/* offset and len sanity check */
+	rc = bna_reg_offset_check(ioc, addr, len);
+	if (rc) {
+		pr_warn("bna %s: Failed reg offset check\n",
+			pci_name(bnad->pcidev));
+		kfree(bnad->regdata);
+		bnad->regdata = NULL;
+		bnad->reglen = 0;
+		return -EINVAL;
+	}
+
+	reg_addr = rb + addr;
+	regbuf =  (u32 *)bnad->regdata;
+	spin_lock_irqsave(&bnad->bna_lock, flags);
+	for (i = 0; i < len; i++) {
+		*regbuf = readl(reg_addr);
+		regbuf++;
+		reg_addr += sizeof(u32);
+	}
+	spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+	return nbytes;
+}
+
+static ssize_t
+bnad_debugfs_write_regwr(struct file *file, const char __user *buf,
+		size_t nbytes, loff_t *ppos)
+{
+	struct bnad_debug_info *debug = file->private_data;
+	struct bnad *bnad = (struct bnad *)debug->i_private;
+	struct bfa_ioc *ioc = &bnad->bna.ioceth.ioc;
+	int addr, val, rc;
+	void __iomem *reg_addr;
+	unsigned long flags;
+	void *kern_buf;
+
+	/* Allocate memory to store the user space buf */
+	kern_buf = kzalloc(nbytes, GFP_KERNEL);
+	if (!kern_buf) {
+		pr_warn("bna %s: Failed to allocate user buffer\n",
+			pci_name(bnad->pcidev));
+		return -ENOMEM;
+	}
+
+	if (copy_from_user(kern_buf, (void  __user *)buf, nbytes)) {
+		kfree(kern_buf);
+		return -ENOMEM;
+	}
+
+	rc = sscanf(kern_buf, "%x:%x", &addr, &val);
+	if (rc < 2) {
+		pr_warn("bna %s: Failed to read user buffer\n",
+			pci_name(bnad->pcidev));
+		kfree(kern_buf);
+		return -EINVAL;
+	}
+	kfree(kern_buf);
+
+	addr &= BFA_REG_ADDRMSK(ioc); /* offset only 17 bit and word align */
+
+	/* offset and len sanity check */
+	rc = bna_reg_offset_check(ioc, addr, 1);
+	if (rc) {
+		pr_warn("bna %s: Failed reg offset check\n",
+			pci_name(bnad->pcidev));
+		return -EINVAL;
+	}
+
+	reg_addr = (bfa_ioc_bar0(ioc)) + addr;
+	spin_lock_irqsave(&bnad->bna_lock, flags);
+	writel(val, reg_addr);
+	spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+	return nbytes;
+}
+
+static int
+bnad_debugfs_release(struct inode *inode, struct file *file)
+{
+	struct bnad_debug_info *debug = file->private_data;
+
+	if (!debug)
+		return 0;
+
+	file->private_data = NULL;
+	kfree(debug);
+	return 0;
+}
+
+static int
+bnad_debugfs_buffer_release(struct inode *inode, struct file *file)
+{
+	struct bnad_debug_info *debug = file->private_data;
+
+	if (!debug)
+		return 0;
+
+	kfree(debug->debug_buffer);
+
+	file->private_data = NULL;
+	kfree(debug);
+	debug = NULL;
+	return 0;
+}
+
+static const struct file_operations bnad_debugfs_op_fwtrc = {
+	.owner		=	THIS_MODULE,
+	.open		=	bnad_debugfs_open_fwtrc,
+	.llseek		=	bnad_debugfs_lseek,
+	.read		=	bnad_debugfs_read,
+	.release	=	bnad_debugfs_buffer_release,
+};
+
+static const struct file_operations bnad_debugfs_op_fwsave = {
+	.owner		=	THIS_MODULE,
+	.open		=	bnad_debugfs_open_fwsave,
+	.llseek		=	bnad_debugfs_lseek,
+	.read		=	bnad_debugfs_read,
+	.release	=	bnad_debugfs_buffer_release,
+};
+
+static const struct file_operations bnad_debugfs_op_regrd = {
+	.owner		=       THIS_MODULE,
+	.open		=	bnad_debugfs_open_reg,
+	.llseek		=	bnad_debugfs_lseek,
+	.read		=	bnad_debugfs_read_regrd,
+	.write		=	bnad_debugfs_write_regrd,
+	.release	=	bnad_debugfs_release,
+};
+
+static const struct file_operations bnad_debugfs_op_regwr = {
+	.owner		=	THIS_MODULE,
+	.open		=	bnad_debugfs_open_reg,
+	.llseek		=	bnad_debugfs_lseek,
+	.write		=	bnad_debugfs_write_regwr,
+	.release	=	bnad_debugfs_release,
+};
+
+static const struct file_operations bnad_debugfs_op_drvinfo = {
+	.owner		=	THIS_MODULE,
+	.open		=	bnad_debugfs_open_drvinfo,
+	.llseek		=	bnad_debugfs_lseek,
+	.read		=	bnad_debugfs_read,
+	.release	=	bnad_debugfs_buffer_release,
+};
+
+struct bnad_debugfs_entry {
+	const char *name;
+	mode_t  mode;
+	const struct file_operations *fops;
+};
+
+static const struct bnad_debugfs_entry bnad_debugfs_files[] = {
+	{ "fwtrc",  S_IFREG|S_IRUGO, &bnad_debugfs_op_fwtrc, },
+	{ "fwsave", S_IFREG|S_IRUGO, &bnad_debugfs_op_fwsave, },
+	{ "regrd",  S_IFREG|S_IRUGO|S_IWUSR, &bnad_debugfs_op_regrd, },
+	{ "regwr",  S_IFREG|S_IWUSR, &bnad_debugfs_op_regwr, },
+	{ "drvinfo", S_IFREG|S_IRUGO, &bnad_debugfs_op_drvinfo, },
+};
+
+static struct dentry *bna_debugfs_root;
+static atomic_t bna_debugfs_port_count;
+
+/* Initialize debugfs interface for BNA */
+void
+bnad_debugfs_init(struct bnad *bnad)
+{
+	const struct bnad_debugfs_entry *file;
+	char name[64];
+	int i;
+
+	/* Setup the BNA debugfs root directory*/
+	if (!bna_debugfs_root) {
+		bna_debugfs_root = debugfs_create_dir("bna", NULL);
+		atomic_set(&bna_debugfs_port_count, 0);
+		if (!bna_debugfs_root) {
+			pr_warn("BNA: debugfs root dir creation failed\n");
+			return;
+		}
+	}
+
+	/* Setup the pci_dev debugfs directory for the port */
+	snprintf(name, sizeof(name), "pci_dev:%s", pci_name(bnad->pcidev));
+	if (!bnad->port_debugfs_root) {
+		bnad->port_debugfs_root =
+			debugfs_create_dir(name, bna_debugfs_root);
+		if (!bnad->port_debugfs_root) {
+			pr_warn("bna pci_dev %s: root dir creation failed\n",
+				pci_name(bnad->pcidev));
+			return;
+		}
+
+		atomic_inc(&bna_debugfs_port_count);
+
+		for (i = 0; i < ARRAY_SIZE(bnad_debugfs_files); i++) {
+			file = &bnad_debugfs_files[i];
+			bnad->bnad_dentry_files[i] =
+					debugfs_create_file(file->name,
+							file->mode,
+							bnad->port_debugfs_root,
+							bnad,
+							file->fops);
+			if (!bnad->bnad_dentry_files[i]) {
+				pr_warn(
+				     "BNA pci_dev:%s: create %s entry failed\n",
+				     pci_name(bnad->pcidev), file->name);
+				return;
+			}
+		}
+	}
+}
+
+/* Uninitialize debugfs interface for BNA */
+void
+bnad_debugfs_uninit(struct bnad *bnad)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(bnad_debugfs_files); i++) {
+		if (bnad->bnad_dentry_files[i]) {
+			debugfs_remove(bnad->bnad_dentry_files[i]);
+			bnad->bnad_dentry_files[i] = NULL;
+		}
+	}
+
+	/* Remove the pci_dev debugfs directory for the port */
+	if (bnad->port_debugfs_root) {
+		debugfs_remove(bnad->port_debugfs_root);
+		bnad->port_debugfs_root = NULL;
+		atomic_dec(&bna_debugfs_port_count);
+	}
+
+	/* Remove the BNA debugfs root directory */
+	if (atomic_read(&bna_debugfs_port_count) == 0) {
+		debugfs_remove(bna_debugfs_root);
+		bna_debugfs_root = NULL;
+	}
+}
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index fd3dcc1..9b44ec8 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -38,7 +38,7 @@
 	sizeof(struct bnad_drv_stats) / sizeof(u64) +		\
 	offsetof(struct bfi_enet_stats, rxf_stats[0]) / sizeof(u64))
 
-static char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
+static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
 	"rx_packets",
 	"tx_packets",
 	"rx_bytes",
@@ -296,8 +296,8 @@
 	struct bfa_ioc_attr *ioc_attr;
 	unsigned long flags;
 
-	strcpy(drvinfo->driver, BNAD_NAME);
-	strcpy(drvinfo->version, BNAD_VERSION);
+	strlcpy(drvinfo->driver, BNAD_NAME, sizeof(drvinfo->driver));
+	strlcpy(drvinfo->version, BNAD_VERSION, sizeof(drvinfo->version));
 
 	ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL);
 	if (ioc_attr) {
@@ -305,12 +305,13 @@
 		bfa_nw_ioc_get_attr(&bnad->bna.ioceth.ioc, ioc_attr);
 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
 
-		strncpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver,
-			sizeof(drvinfo->fw_version) - 1);
+		strlcpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver,
+			sizeof(drvinfo->fw_version));
 		kfree(ioc_attr);
 	}
 
-	strncpy(drvinfo->bus_info, pci_name(bnad->pcidev), ETHTOOL_BUSINFO_LEN);
+	strlcpy(drvinfo->bus_info, pci_name(bnad->pcidev),
+		sizeof(drvinfo->bus_info));
 }
 
 static void
@@ -934,7 +935,144 @@
 	}
 }
 
-static struct ethtool_ops bnad_ethtool_ops = {
+static u32
+bnad_get_flash_partition_by_offset(struct bnad *bnad, u32 offset,
+				u32 *base_offset)
+{
+	struct bfa_flash_attr *flash_attr;
+	struct bnad_iocmd_comp fcomp;
+	u32 i, flash_part = 0, ret;
+	unsigned long flags = 0;
+
+	flash_attr = kzalloc(sizeof(struct bfa_flash_attr), GFP_KERNEL);
+	if (!flash_attr)
+		return -ENOMEM;
+
+	fcomp.bnad = bnad;
+	fcomp.comp_status = 0;
+
+	init_completion(&fcomp.comp);
+	spin_lock_irqsave(&bnad->bna_lock, flags);
+	ret = bfa_nw_flash_get_attr(&bnad->bna.flash, flash_attr,
+				bnad_cb_completion, &fcomp);
+	if (ret != BFA_STATUS_OK) {
+		spin_unlock_irqrestore(&bnad->bna_lock, flags);
+		kfree(flash_attr);
+		goto out_err;
+	}
+	spin_unlock_irqrestore(&bnad->bna_lock, flags);
+	wait_for_completion(&fcomp.comp);
+	ret = fcomp.comp_status;
+
+	/* Check for the flash type & base offset value */
+	if (ret == BFA_STATUS_OK) {
+		for (i = 0; i < flash_attr->npart; i++) {
+			if (offset >= flash_attr->part[i].part_off &&
+			    offset < (flash_attr->part[i].part_off +
+				      flash_attr->part[i].part_size)) {
+				flash_part = flash_attr->part[i].part_type;
+				*base_offset = flash_attr->part[i].part_off;
+				break;
+			}
+		}
+	}
+	kfree(flash_attr);
+	return flash_part;
+out_err:
+	return -EINVAL;
+}
+
+static int
+bnad_get_eeprom_len(struct net_device *netdev)
+{
+	return BFA_TOTAL_FLASH_SIZE;
+}
+
+static int
+bnad_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
+		u8 *bytes)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	struct bnad_iocmd_comp fcomp;
+	u32 flash_part = 0, base_offset = 0;
+	unsigned long flags = 0;
+	int ret = 0;
+
+	/* Check if the flash read request is valid */
+	if (eeprom->magic != (bnad->pcidev->vendor |
+			     (bnad->pcidev->device << 16)))
+		return -EFAULT;
+
+	/* Query the flash partition based on the offset */
+	flash_part = bnad_get_flash_partition_by_offset(bnad,
+				eeprom->offset, &base_offset);
+	if (flash_part <= 0)
+		return -EFAULT;
+
+	fcomp.bnad = bnad;
+	fcomp.comp_status = 0;
+
+	init_completion(&fcomp.comp);
+	spin_lock_irqsave(&bnad->bna_lock, flags);
+	ret = bfa_nw_flash_read_part(&bnad->bna.flash, flash_part,
+				bnad->id, bytes, eeprom->len,
+				eeprom->offset - base_offset,
+				bnad_cb_completion, &fcomp);
+	if (ret != BFA_STATUS_OK) {
+		spin_unlock_irqrestore(&bnad->bna_lock, flags);
+		goto done;
+	}
+
+	spin_unlock_irqrestore(&bnad->bna_lock, flags);
+	wait_for_completion(&fcomp.comp);
+	ret = fcomp.comp_status;
+done:
+	return ret;
+}
+
+static int
+bnad_set_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
+		u8 *bytes)
+{
+	struct bnad *bnad = netdev_priv(netdev);
+	struct bnad_iocmd_comp fcomp;
+	u32 flash_part = 0, base_offset = 0;
+	unsigned long flags = 0;
+	int ret = 0;
+
+	/* Check if the flash update request is valid */
+	if (eeprom->magic != (bnad->pcidev->vendor |
+			     (bnad->pcidev->device << 16)))
+		return -EINVAL;
+
+	/* Query the flash partition based on the offset */
+	flash_part = bnad_get_flash_partition_by_offset(bnad,
+				eeprom->offset, &base_offset);
+	if (flash_part <= 0)
+		return -EFAULT;
+
+	fcomp.bnad = bnad;
+	fcomp.comp_status = 0;
+
+	init_completion(&fcomp.comp);
+	spin_lock_irqsave(&bnad->bna_lock, flags);
+	ret = bfa_nw_flash_update_part(&bnad->bna.flash, flash_part,
+				bnad->id, bytes, eeprom->len,
+				eeprom->offset - base_offset,
+				bnad_cb_completion, &fcomp);
+	if (ret != BFA_STATUS_OK) {
+		spin_unlock_irqrestore(&bnad->bna_lock, flags);
+		goto done;
+	}
+
+	spin_unlock_irqrestore(&bnad->bna_lock, flags);
+	wait_for_completion(&fcomp.comp);
+	ret = fcomp.comp_status;
+done:
+	return ret;
+}
+
+static const struct ethtool_ops bnad_ethtool_ops = {
 	.get_settings = bnad_get_settings,
 	.set_settings = bnad_set_settings,
 	.get_drvinfo = bnad_get_drvinfo,
@@ -948,7 +1086,10 @@
 	.set_pauseparam = bnad_set_pauseparam,
 	.get_strings = bnad_get_strings,
 	.get_ethtool_stats = bnad_get_ethtool_stats,
-	.get_sset_count = bnad_get_sset_count
+	.get_sset_count = bnad_get_sset_count,
+	.get_eeprom_len = bnad_get_eeprom_len,
+	.get_eeprom = bnad_get_eeprom,
+	.set_eeprom = bnad_set_eeprom,
 };
 
 void
diff --git a/drivers/net/ethernet/brocade/bna/cna.h b/drivers/net/ethernet/brocade/bna/cna.h
index 1b3e90d..32e8f17 100644
--- a/drivers/net/ethernet/brocade/bna/cna.h
+++ b/drivers/net/ethernet/brocade/bna/cna.h
@@ -43,8 +43,7 @@
 
 #pragma pack(1)
 
-#define MAC_ADDRLEN	(6)
-typedef struct mac { u8 mac[MAC_ADDRLEN]; } mac_t;
+typedef struct mac { u8 mac[ETH_ALEN]; } mac_t;
 
 #pragma pack()
 
diff --git a/drivers/net/ethernet/brocade/bna/cna_fwimg.c b/drivers/net/ethernet/brocade/bna/cna_fwimg.c
index 725b9ff..cfc22a6 100644
--- a/drivers/net/ethernet/brocade/bna/cna_fwimg.c
+++ b/drivers/net/ethernet/brocade/bna/cna_fwimg.c
@@ -16,6 +16,7 @@
  * www.brocade.com
  */
 #include <linux/firmware.h>
+#include "bnad.h"
 #include "bfi.h"
 #include "cna.h"
 
diff --git a/drivers/net/ethernet/calxeda/Kconfig b/drivers/net/ethernet/calxeda/Kconfig
new file mode 100644
index 0000000..aba435c
--- /dev/null
+++ b/drivers/net/ethernet/calxeda/Kconfig
@@ -0,0 +1,7 @@
+config NET_CALXEDA_XGMAC
+	tristate "Calxeda 1G/10G XGMAC Ethernet driver"
+	depends on HAS_IOMEM
+	select CRC32
+	help
+	  This is the driver for the XGMAC Ethernet IP block found on Calxeda
+	  Highbank platforms.
diff --git a/drivers/net/ethernet/calxeda/Makefile b/drivers/net/ethernet/calxeda/Makefile
new file mode 100644
index 0000000..f0ef080
--- /dev/null
+++ b/drivers/net/ethernet/calxeda/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_NET_CALXEDA_XGMAC) += xgmac.o
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
new file mode 100644
index 0000000..1fce186
--- /dev/null
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -0,0 +1,1928 @@
+/*
+ * Copyright 2010-2011 Calxeda, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/circ_buf.h>
+#include <linux/interrupt.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/if.h>
+#include <linux/crc32.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+
+/* XGMAC Register definitions */
+#define XGMAC_CONTROL		0x00000000	/* MAC Configuration */
+#define XGMAC_FRAME_FILTER	0x00000004	/* MAC Frame Filter */
+#define XGMAC_FLOW_CTRL		0x00000018	/* MAC Flow Control */
+#define XGMAC_VLAN_TAG		0x0000001C	/* VLAN Tags */
+#define XGMAC_VERSION		0x00000020	/* Version */
+#define XGMAC_VLAN_INCL		0x00000024	/* VLAN tag for tx frames */
+#define XGMAC_LPI_CTRL		0x00000028	/* LPI Control and Status */
+#define XGMAC_LPI_TIMER		0x0000002C	/* LPI Timers Control */
+#define XGMAC_TX_PACE		0x00000030	/* Transmit Pace and Stretch */
+#define XGMAC_VLAN_HASH		0x00000034	/* VLAN Hash Table */
+#define XGMAC_DEBUG		0x00000038	/* Debug */
+#define XGMAC_INT_STAT		0x0000003C	/* Interrupt and Control */
+#define XGMAC_ADDR_HIGH(reg)	(0x00000040 + ((reg) * 8))
+#define XGMAC_ADDR_LOW(reg)	(0x00000044 + ((reg) * 8))
+#define XGMAC_HASH(n)		(0x00000300 + (n) * 4) /* HASH table regs */
+#define XGMAC_NUM_HASH		16
+#define XGMAC_OMR		0x00000400
+#define XGMAC_REMOTE_WAKE	0x00000700	/* Remote Wake-Up Frm Filter */
+#define XGMAC_PMT		0x00000704	/* PMT Control and Status */
+#define XGMAC_MMC_CTRL		0x00000800	/* XGMAC MMC Control */
+#define XGMAC_MMC_INTR_RX	0x00000804	/* Recieve Interrupt */
+#define XGMAC_MMC_INTR_TX	0x00000808	/* Transmit Interrupt */
+#define XGMAC_MMC_INTR_MASK_RX	0x0000080c	/* Recieve Interrupt Mask */
+#define XGMAC_MMC_INTR_MASK_TX	0x00000810	/* Transmit Interrupt Mask */
+
+/* Hardware TX Statistics Counters */
+#define XGMAC_MMC_TXOCTET_GB_LO	0x00000814
+#define XGMAC_MMC_TXOCTET_GB_HI	0x00000818
+#define XGMAC_MMC_TXFRAME_GB_LO	0x0000081C
+#define XGMAC_MMC_TXFRAME_GB_HI	0x00000820
+#define XGMAC_MMC_TXBCFRAME_G	0x00000824
+#define XGMAC_MMC_TXMCFRAME_G	0x0000082C
+#define XGMAC_MMC_TXUCFRAME_GB	0x00000864
+#define XGMAC_MMC_TXMCFRAME_GB	0x0000086C
+#define XGMAC_MMC_TXBCFRAME_GB	0x00000874
+#define XGMAC_MMC_TXUNDERFLOW	0x0000087C
+#define XGMAC_MMC_TXOCTET_G_LO	0x00000884
+#define XGMAC_MMC_TXOCTET_G_HI	0x00000888
+#define XGMAC_MMC_TXFRAME_G_LO	0x0000088C
+#define XGMAC_MMC_TXFRAME_G_HI	0x00000890
+#define XGMAC_MMC_TXPAUSEFRAME	0x00000894
+#define XGMAC_MMC_TXVLANFRAME	0x0000089C
+
+/* Hardware RX Statistics Counters */
+#define XGMAC_MMC_RXFRAME_GB_LO	0x00000900
+#define XGMAC_MMC_RXFRAME_GB_HI	0x00000904
+#define XGMAC_MMC_RXOCTET_GB_LO	0x00000908
+#define XGMAC_MMC_RXOCTET_GB_HI	0x0000090C
+#define XGMAC_MMC_RXOCTET_G_LO	0x00000910
+#define XGMAC_MMC_RXOCTET_G_HI	0x00000914
+#define XGMAC_MMC_RXBCFRAME_G	0x00000918
+#define XGMAC_MMC_RXMCFRAME_G	0x00000920
+#define XGMAC_MMC_RXCRCERR	0x00000928
+#define XGMAC_MMC_RXRUNT	0x00000930
+#define XGMAC_MMC_RXJABBER	0x00000934
+#define XGMAC_MMC_RXUCFRAME_G	0x00000970
+#define XGMAC_MMC_RXLENGTHERR	0x00000978
+#define XGMAC_MMC_RXPAUSEFRAME	0x00000988
+#define XGMAC_MMC_RXOVERFLOW	0x00000990
+#define XGMAC_MMC_RXVLANFRAME	0x00000998
+#define XGMAC_MMC_RXWATCHDOG	0x000009a0
+
+/* DMA Control and Status Registers */
+#define XGMAC_DMA_BUS_MODE	0x00000f00	/* Bus Mode */
+#define XGMAC_DMA_TX_POLL	0x00000f04	/* Transmit Poll Demand */
+#define XGMAC_DMA_RX_POLL	0x00000f08	/* Received Poll Demand */
+#define XGMAC_DMA_RX_BASE_ADDR	0x00000f0c	/* Receive List Base */
+#define XGMAC_DMA_TX_BASE_ADDR	0x00000f10	/* Transmit List Base */
+#define XGMAC_DMA_STATUS	0x00000f14	/* Status Register */
+#define XGMAC_DMA_CONTROL	0x00000f18	/* Ctrl (Operational Mode) */
+#define XGMAC_DMA_INTR_ENA	0x00000f1c	/* Interrupt Enable */
+#define XGMAC_DMA_MISS_FRAME_CTR 0x00000f20	/* Missed Frame Counter */
+#define XGMAC_DMA_RI_WDOG_TIMER	0x00000f24	/* RX Intr Watchdog Timer */
+#define XGMAC_DMA_AXI_BUS	0x00000f28	/* AXI Bus Mode */
+#define XGMAC_DMA_AXI_STATUS	0x00000f2C	/* AXI Status */
+#define XGMAC_DMA_HW_FEATURE	0x00000f58	/* Enabled Hardware Features */
+
+#define XGMAC_ADDR_AE		0x80000000
+#define XGMAC_MAX_FILTER_ADDR	31
+
+/* PMT Control and Status */
+#define XGMAC_PMT_POINTER_RESET	0x80000000
+#define XGMAC_PMT_GLBL_UNICAST	0x00000200
+#define XGMAC_PMT_WAKEUP_RX_FRM	0x00000040
+#define XGMAC_PMT_MAGIC_PKT	0x00000020
+#define XGMAC_PMT_WAKEUP_FRM_EN	0x00000004
+#define XGMAC_PMT_MAGIC_PKT_EN	0x00000002
+#define XGMAC_PMT_POWERDOWN	0x00000001
+
+#define XGMAC_CONTROL_SPD	0x40000000	/* Speed control */
+#define XGMAC_CONTROL_SPD_MASK	0x60000000
+#define XGMAC_CONTROL_SPD_1G	0x60000000
+#define XGMAC_CONTROL_SPD_2_5G	0x40000000
+#define XGMAC_CONTROL_SPD_10G	0x00000000
+#define XGMAC_CONTROL_SARC	0x10000000	/* Source Addr Insert/Replace */
+#define XGMAC_CONTROL_SARK_MASK	0x18000000
+#define XGMAC_CONTROL_CAR	0x04000000	/* CRC Addition/Replacement */
+#define XGMAC_CONTROL_CAR_MASK	0x06000000
+#define XGMAC_CONTROL_DP	0x01000000	/* Disable Padding */
+#define XGMAC_CONTROL_WD	0x00800000	/* Disable Watchdog on rx */
+#define XGMAC_CONTROL_JD	0x00400000	/* Jabber disable */
+#define XGMAC_CONTROL_JE	0x00100000	/* Jumbo frame */
+#define XGMAC_CONTROL_LM	0x00001000	/* Loop-back mode */
+#define XGMAC_CONTROL_IPC	0x00000400	/* Checksum Offload */
+#define XGMAC_CONTROL_ACS	0x00000080	/* Automatic Pad/FCS Strip */
+#define XGMAC_CONTROL_DDIC	0x00000010	/* Disable Deficit Idle Count */
+#define XGMAC_CONTROL_TE	0x00000008	/* Transmitter Enable */
+#define XGMAC_CONTROL_RE	0x00000004	/* Receiver Enable */
+
+/* XGMAC Frame Filter defines */
+#define XGMAC_FRAME_FILTER_PR	0x00000001	/* Promiscuous Mode */
+#define XGMAC_FRAME_FILTER_HUC	0x00000002	/* Hash Unicast */
+#define XGMAC_FRAME_FILTER_HMC	0x00000004	/* Hash Multicast */
+#define XGMAC_FRAME_FILTER_DAIF	0x00000008	/* DA Inverse Filtering */
+#define XGMAC_FRAME_FILTER_PM	0x00000010	/* Pass all multicast */
+#define XGMAC_FRAME_FILTER_DBF	0x00000020	/* Disable Broadcast frames */
+#define XGMAC_FRAME_FILTER_SAIF	0x00000100	/* Inverse Filtering */
+#define XGMAC_FRAME_FILTER_SAF	0x00000200	/* Source Address Filter */
+#define XGMAC_FRAME_FILTER_HPF	0x00000400	/* Hash or perfect Filter */
+#define XGMAC_FRAME_FILTER_VHF	0x00000800	/* VLAN Hash Filter */
+#define XGMAC_FRAME_FILTER_VPF	0x00001000	/* VLAN Perfect Filter */
+#define XGMAC_FRAME_FILTER_RA	0x80000000	/* Receive all mode */
+
+/* XGMAC FLOW CTRL defines */
+#define XGMAC_FLOW_CTRL_PT_MASK	0xffff0000	/* Pause Time Mask */
+#define XGMAC_FLOW_CTRL_PT_SHIFT	16
+#define XGMAC_FLOW_CTRL_DZQP	0x00000080	/* Disable Zero-Quanta Phase */
+#define XGMAC_FLOW_CTRL_PLT	0x00000020	/* Pause Low Threshhold */
+#define XGMAC_FLOW_CTRL_PLT_MASK 0x00000030	/* PLT MASK */
+#define XGMAC_FLOW_CTRL_UP	0x00000008	/* Unicast Pause Frame Detect */
+#define XGMAC_FLOW_CTRL_RFE	0x00000004	/* Rx Flow Control Enable */
+#define XGMAC_FLOW_CTRL_TFE	0x00000002	/* Tx Flow Control Enable */
+#define XGMAC_FLOW_CTRL_FCB_BPA	0x00000001	/* Flow Control Busy ... */
+
+/* XGMAC_INT_STAT reg */
+#define XGMAC_INT_STAT_PMT	0x0080		/* PMT Interrupt Status */
+#define XGMAC_INT_STAT_LPI	0x0040		/* LPI Interrupt Status */
+
+/* DMA Bus Mode register defines */
+#define DMA_BUS_MODE_SFT_RESET	0x00000001	/* Software Reset */
+#define DMA_BUS_MODE_DSL_MASK	0x0000007c	/* Descriptor Skip Length */
+#define DMA_BUS_MODE_DSL_SHIFT	2		/* (in DWORDS) */
+#define DMA_BUS_MODE_ATDS	0x00000080	/* Alternate Descriptor Size */
+
+/* Programmable burst length */
+#define DMA_BUS_MODE_PBL_MASK	0x00003f00	/* Programmable Burst Len */
+#define DMA_BUS_MODE_PBL_SHIFT	8
+#define DMA_BUS_MODE_FB		0x00010000	/* Fixed burst */
+#define DMA_BUS_MODE_RPBL_MASK	0x003e0000	/* Rx-Programmable Burst Len */
+#define DMA_BUS_MODE_RPBL_SHIFT	17
+#define DMA_BUS_MODE_USP	0x00800000
+#define DMA_BUS_MODE_8PBL	0x01000000
+#define DMA_BUS_MODE_AAL	0x02000000
+
+/* DMA Bus Mode register defines */
+#define DMA_BUS_PR_RATIO_MASK	0x0000c000	/* Rx/Tx priority ratio */
+#define DMA_BUS_PR_RATIO_SHIFT	14
+#define DMA_BUS_FB		0x00010000	/* Fixed Burst */
+
+/* DMA Control register defines */
+#define DMA_CONTROL_ST		0x00002000	/* Start/Stop Transmission */
+#define DMA_CONTROL_SR		0x00000002	/* Start/Stop Receive */
+#define DMA_CONTROL_DFF		0x01000000	/* Disable flush of rx frames */
+
+/* DMA Normal interrupt */
+#define DMA_INTR_ENA_NIE	0x00010000	/* Normal Summary */
+#define DMA_INTR_ENA_AIE	0x00008000	/* Abnormal Summary */
+#define DMA_INTR_ENA_ERE	0x00004000	/* Early Receive */
+#define DMA_INTR_ENA_FBE	0x00002000	/* Fatal Bus Error */
+#define DMA_INTR_ENA_ETE	0x00000400	/* Early Transmit */
+#define DMA_INTR_ENA_RWE	0x00000200	/* Receive Watchdog */
+#define DMA_INTR_ENA_RSE	0x00000100	/* Receive Stopped */
+#define DMA_INTR_ENA_RUE	0x00000080	/* Receive Buffer Unavailable */
+#define DMA_INTR_ENA_RIE	0x00000040	/* Receive Interrupt */
+#define DMA_INTR_ENA_UNE	0x00000020	/* Tx Underflow */
+#define DMA_INTR_ENA_OVE	0x00000010	/* Receive Overflow */
+#define DMA_INTR_ENA_TJE	0x00000008	/* Transmit Jabber */
+#define DMA_INTR_ENA_TUE	0x00000004	/* Transmit Buffer Unavail */
+#define DMA_INTR_ENA_TSE	0x00000002	/* Transmit Stopped */
+#define DMA_INTR_ENA_TIE	0x00000001	/* Transmit Interrupt */
+
+#define DMA_INTR_NORMAL		(DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \
+				 DMA_INTR_ENA_TUE)
+
+#define DMA_INTR_ABNORMAL	(DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \
+				 DMA_INTR_ENA_RWE | DMA_INTR_ENA_RSE | \
+				 DMA_INTR_ENA_RUE | DMA_INTR_ENA_UNE | \
+				 DMA_INTR_ENA_OVE | DMA_INTR_ENA_TJE | \
+				 DMA_INTR_ENA_TSE)
+
+/* DMA default interrupt mask */
+#define DMA_INTR_DEFAULT_MASK	(DMA_INTR_NORMAL | DMA_INTR_ABNORMAL)
+
+/* DMA Status register defines */
+#define DMA_STATUS_GMI		0x08000000	/* MMC interrupt */
+#define DMA_STATUS_GLI		0x04000000	/* GMAC Line interface int */
+#define DMA_STATUS_EB_MASK	0x00380000	/* Error Bits Mask */
+#define DMA_STATUS_EB_TX_ABORT	0x00080000	/* Error Bits - TX Abort */
+#define DMA_STATUS_EB_RX_ABORT	0x00100000	/* Error Bits - RX Abort */
+#define DMA_STATUS_TS_MASK	0x00700000	/* Transmit Process State */
+#define DMA_STATUS_TS_SHIFT	20
+#define DMA_STATUS_RS_MASK	0x000e0000	/* Receive Process State */
+#define DMA_STATUS_RS_SHIFT	17
+#define DMA_STATUS_NIS		0x00010000	/* Normal Interrupt Summary */
+#define DMA_STATUS_AIS		0x00008000	/* Abnormal Interrupt Summary */
+#define DMA_STATUS_ERI		0x00004000	/* Early Receive Interrupt */
+#define DMA_STATUS_FBI		0x00002000	/* Fatal Bus Error Interrupt */
+#define DMA_STATUS_ETI		0x00000400	/* Early Transmit Interrupt */
+#define DMA_STATUS_RWT		0x00000200	/* Receive Watchdog Timeout */
+#define DMA_STATUS_RPS		0x00000100	/* Receive Process Stopped */
+#define DMA_STATUS_RU		0x00000080	/* Receive Buffer Unavailable */
+#define DMA_STATUS_RI		0x00000040	/* Receive Interrupt */
+#define DMA_STATUS_UNF		0x00000020	/* Transmit Underflow */
+#define DMA_STATUS_OVF		0x00000010	/* Receive Overflow */
+#define DMA_STATUS_TJT		0x00000008	/* Transmit Jabber Timeout */
+#define DMA_STATUS_TU		0x00000004	/* Transmit Buffer Unavail */
+#define DMA_STATUS_TPS		0x00000002	/* Transmit Process Stopped */
+#define DMA_STATUS_TI		0x00000001	/* Transmit Interrupt */
+
+/* Common MAC defines */
+#define MAC_ENABLE_TX		0x00000008	/* Transmitter Enable */
+#define MAC_ENABLE_RX		0x00000004	/* Receiver Enable */
+
+/* XGMAC Operation Mode Register */
+#define XGMAC_OMR_TSF		0x00200000	/* TX FIFO Store and Forward */
+#define XGMAC_OMR_FTF		0x00100000	/* Flush Transmit FIFO */
+#define XGMAC_OMR_TTC		0x00020000	/* Transmit Threshhold Ctrl */
+#define XGMAC_OMR_TTC_MASK	0x00030000
+#define XGMAC_OMR_RFD		0x00006000	/* FC Deactivation Threshhold */
+#define XGMAC_OMR_RFD_MASK	0x00007000	/* FC Deact Threshhold MASK */
+#define XGMAC_OMR_RFA		0x00000600	/* FC Activation Threshhold */
+#define XGMAC_OMR_RFA_MASK	0x00000E00	/* FC Act Threshhold MASK */
+#define XGMAC_OMR_EFC		0x00000100	/* Enable Hardware FC */
+#define XGMAC_OMR_FEF		0x00000080	/* Forward Error Frames */
+#define XGMAC_OMR_DT		0x00000040	/* Drop TCP/IP csum Errors */
+#define XGMAC_OMR_RSF		0x00000020	/* RX FIFO Store and Forward */
+#define XGMAC_OMR_RTC		0x00000010	/* RX Threshhold Ctrl */
+#define XGMAC_OMR_RTC_MASK	0x00000018	/* RX Threshhold Ctrl MASK */
+
+/* XGMAC HW Features Register */
+#define DMA_HW_FEAT_TXCOESEL	0x00010000	/* TX Checksum offload */
+
+#define XGMAC_MMC_CTRL_CNT_FRZ	0x00000008
+
+/* XGMAC Descriptor Defines */
+#define MAX_DESC_BUF_SZ		(0x2000 - 8)
+
+#define RXDESC_EXT_STATUS	0x00000001
+#define RXDESC_CRC_ERR		0x00000002
+#define RXDESC_RX_ERR		0x00000008
+#define RXDESC_RX_WDOG		0x00000010
+#define RXDESC_FRAME_TYPE	0x00000020
+#define RXDESC_GIANT_FRAME	0x00000080
+#define RXDESC_LAST_SEG		0x00000100
+#define RXDESC_FIRST_SEG	0x00000200
+#define RXDESC_VLAN_FRAME	0x00000400
+#define RXDESC_OVERFLOW_ERR	0x00000800
+#define RXDESC_LENGTH_ERR	0x00001000
+#define RXDESC_SA_FILTER_FAIL	0x00002000
+#define RXDESC_DESCRIPTOR_ERR	0x00004000
+#define RXDESC_ERROR_SUMMARY	0x00008000
+#define RXDESC_FRAME_LEN_OFFSET	16
+#define RXDESC_FRAME_LEN_MASK	0x3fff0000
+#define RXDESC_DA_FILTER_FAIL	0x40000000
+
+#define RXDESC1_END_RING	0x00008000
+
+#define RXDESC_IP_PAYLOAD_MASK	0x00000003
+#define RXDESC_IP_PAYLOAD_UDP	0x00000001
+#define RXDESC_IP_PAYLOAD_TCP	0x00000002
+#define RXDESC_IP_PAYLOAD_ICMP	0x00000003
+#define RXDESC_IP_HEADER_ERR	0x00000008
+#define RXDESC_IP_PAYLOAD_ERR	0x00000010
+#define RXDESC_IPV4_PACKET	0x00000040
+#define RXDESC_IPV6_PACKET	0x00000080
+#define TXDESC_UNDERFLOW_ERR	0x00000001
+#define TXDESC_JABBER_TIMEOUT	0x00000002
+#define TXDESC_LOCAL_FAULT	0x00000004
+#define TXDESC_REMOTE_FAULT	0x00000008
+#define TXDESC_VLAN_FRAME	0x00000010
+#define TXDESC_FRAME_FLUSHED	0x00000020
+#define TXDESC_IP_HEADER_ERR	0x00000040
+#define TXDESC_PAYLOAD_CSUM_ERR	0x00000080
+#define TXDESC_ERROR_SUMMARY	0x00008000
+#define TXDESC_SA_CTRL_INSERT	0x00040000
+#define TXDESC_SA_CTRL_REPLACE	0x00080000
+#define TXDESC_2ND_ADDR_CHAINED	0x00100000
+#define TXDESC_END_RING		0x00200000
+#define TXDESC_CSUM_IP		0x00400000
+#define TXDESC_CSUM_IP_PAYLD	0x00800000
+#define TXDESC_CSUM_ALL		0x00C00000
+#define TXDESC_CRC_EN_REPLACE	0x01000000
+#define TXDESC_CRC_EN_APPEND	0x02000000
+#define TXDESC_DISABLE_PAD	0x04000000
+#define TXDESC_FIRST_SEG	0x10000000
+#define TXDESC_LAST_SEG		0x20000000
+#define TXDESC_INTERRUPT	0x40000000
+
+#define DESC_OWN		0x80000000
+#define DESC_BUFFER1_SZ_MASK	0x00001fff
+#define DESC_BUFFER2_SZ_MASK	0x1fff0000
+#define DESC_BUFFER2_SZ_OFFSET	16
+
+struct xgmac_dma_desc {
+	__le32 flags;
+	__le32 buf_size;
+	__le32 buf1_addr;		/* Buffer 1 Address Pointer */
+	__le32 buf2_addr;		/* Buffer 2 Address Pointer */
+	__le32 ext_status;
+	__le32 res[3];
+};
+
+struct xgmac_extra_stats {
+	/* Transmit errors */
+	unsigned long tx_jabber;
+	unsigned long tx_frame_flushed;
+	unsigned long tx_payload_error;
+	unsigned long tx_ip_header_error;
+	unsigned long tx_local_fault;
+	unsigned long tx_remote_fault;
+	/* Receive errors */
+	unsigned long rx_watchdog;
+	unsigned long rx_da_filter_fail;
+	unsigned long rx_sa_filter_fail;
+	unsigned long rx_payload_error;
+	unsigned long rx_ip_header_error;
+	/* Tx/Rx IRQ errors */
+	unsigned long tx_undeflow;
+	unsigned long tx_process_stopped;
+	unsigned long rx_buf_unav;
+	unsigned long rx_process_stopped;
+	unsigned long tx_early;
+	unsigned long fatal_bus_error;
+};
+
+struct xgmac_priv {
+	struct xgmac_dma_desc *dma_rx;
+	struct sk_buff **rx_skbuff;
+	unsigned int rx_tail;
+	unsigned int rx_head;
+
+	struct xgmac_dma_desc *dma_tx;
+	struct sk_buff **tx_skbuff;
+	unsigned int tx_head;
+	unsigned int tx_tail;
+
+	void __iomem *base;
+	struct sk_buff_head rx_recycle;
+	unsigned int dma_buf_sz;
+	dma_addr_t dma_rx_phy;
+	dma_addr_t dma_tx_phy;
+
+	struct net_device *dev;
+	struct device *device;
+	struct napi_struct napi;
+
+	struct xgmac_extra_stats xstats;
+
+	spinlock_t stats_lock;
+	int pmt_irq;
+	char rx_pause;
+	char tx_pause;
+	int wolopts;
+};
+
+/* XGMAC Configuration Settings */
+#define MAX_MTU			9000
+#define PAUSE_TIME		0x400
+
+#define DMA_RX_RING_SZ		256
+#define DMA_TX_RING_SZ		128
+/* minimum number of free TX descriptors required to wake up TX process */
+#define TX_THRESH		(DMA_TX_RING_SZ/4)
+
+/* DMA descriptor ring helpers */
+#define dma_ring_incr(n, s)	(((n) + 1) & ((s) - 1))
+#define dma_ring_space(h, t, s)	CIRC_SPACE(h, t, s)
+#define dma_ring_cnt(h, t, s)	CIRC_CNT(h, t, s)
+
+/* XGMAC Descriptor Access Helpers */
+static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz)
+{
+	if (buf_sz > MAX_DESC_BUF_SZ)
+		p->buf_size = cpu_to_le32(MAX_DESC_BUF_SZ |
+			(buf_sz - MAX_DESC_BUF_SZ) << DESC_BUFFER2_SZ_OFFSET);
+	else
+		p->buf_size = cpu_to_le32(buf_sz);
+}
+
+static inline int desc_get_buf_len(struct xgmac_dma_desc *p)
+{
+	u32 len = cpu_to_le32(p->flags);
+	return (len & DESC_BUFFER1_SZ_MASK) +
+		((len & DESC_BUFFER2_SZ_MASK) >> DESC_BUFFER2_SZ_OFFSET);
+}
+
+static inline void desc_init_rx_desc(struct xgmac_dma_desc *p, int ring_size,
+				     int buf_sz)
+{
+	struct xgmac_dma_desc *end = p + ring_size - 1;
+
+	memset(p, 0, sizeof(*p) * ring_size);
+
+	for (; p <= end; p++)
+		desc_set_buf_len(p, buf_sz);
+
+	end->buf_size |= cpu_to_le32(RXDESC1_END_RING);
+}
+
+static inline void desc_init_tx_desc(struct xgmac_dma_desc *p, u32 ring_size)
+{
+	memset(p, 0, sizeof(*p) * ring_size);
+	p[ring_size - 1].flags = cpu_to_le32(TXDESC_END_RING);
+}
+
+static inline int desc_get_owner(struct xgmac_dma_desc *p)
+{
+	return le32_to_cpu(p->flags) & DESC_OWN;
+}
+
+static inline void desc_set_rx_owner(struct xgmac_dma_desc *p)
+{
+	/* Clear all fields and set the owner */
+	p->flags = cpu_to_le32(DESC_OWN);
+}
+
+static inline void desc_set_tx_owner(struct xgmac_dma_desc *p, u32 flags)
+{
+	u32 tmpflags = le32_to_cpu(p->flags);
+	tmpflags &= TXDESC_END_RING;
+	tmpflags |= flags | DESC_OWN;
+	p->flags = cpu_to_le32(tmpflags);
+}
+
+static inline int desc_get_tx_ls(struct xgmac_dma_desc *p)
+{
+	return le32_to_cpu(p->flags) & TXDESC_LAST_SEG;
+}
+
+static inline u32 desc_get_buf_addr(struct xgmac_dma_desc *p)
+{
+	return le32_to_cpu(p->buf1_addr);
+}
+
+static inline void desc_set_buf_addr(struct xgmac_dma_desc *p,
+				     u32 paddr, int len)
+{
+	p->buf1_addr = cpu_to_le32(paddr);
+	if (len > MAX_DESC_BUF_SZ)
+		p->buf2_addr = cpu_to_le32(paddr + MAX_DESC_BUF_SZ);
+}
+
+static inline void desc_set_buf_addr_and_size(struct xgmac_dma_desc *p,
+					      u32 paddr, int len)
+{
+	desc_set_buf_len(p, len);
+	desc_set_buf_addr(p, paddr, len);
+}
+
+static inline int desc_get_rx_frame_len(struct xgmac_dma_desc *p)
+{
+	u32 data = le32_to_cpu(p->flags);
+	u32 len = (data & RXDESC_FRAME_LEN_MASK) >> RXDESC_FRAME_LEN_OFFSET;
+	if (data & RXDESC_FRAME_TYPE)
+		len -= ETH_FCS_LEN;
+
+	return len;
+}
+
+static void xgmac_dma_flush_tx_fifo(void __iomem *ioaddr)
+{
+	int timeout = 1000;
+	u32 reg = readl(ioaddr + XGMAC_OMR);
+	writel(reg | XGMAC_OMR_FTF, ioaddr + XGMAC_OMR);
+
+	while ((timeout-- > 0) && readl(ioaddr + XGMAC_OMR) & XGMAC_OMR_FTF)
+		udelay(1);
+}
+
+static int desc_get_tx_status(struct xgmac_priv *priv, struct xgmac_dma_desc *p)
+{
+	struct xgmac_extra_stats *x = &priv->xstats;
+	u32 status = le32_to_cpu(p->flags);
+
+	if (!(status & TXDESC_ERROR_SUMMARY))
+		return 0;
+
+	netdev_dbg(priv->dev, "tx desc error = 0x%08x\n", status);
+	if (status & TXDESC_JABBER_TIMEOUT)
+		x->tx_jabber++;
+	if (status & TXDESC_FRAME_FLUSHED)
+		x->tx_frame_flushed++;
+	if (status & TXDESC_UNDERFLOW_ERR)
+		xgmac_dma_flush_tx_fifo(priv->base);
+	if (status & TXDESC_IP_HEADER_ERR)
+		x->tx_ip_header_error++;
+	if (status & TXDESC_LOCAL_FAULT)
+		x->tx_local_fault++;
+	if (status & TXDESC_REMOTE_FAULT)
+		x->tx_remote_fault++;
+	if (status & TXDESC_PAYLOAD_CSUM_ERR)
+		x->tx_payload_error++;
+
+	return -1;
+}
+
+static int desc_get_rx_status(struct xgmac_priv *priv, struct xgmac_dma_desc *p)
+{
+	struct xgmac_extra_stats *x = &priv->xstats;
+	int ret = CHECKSUM_UNNECESSARY;
+	u32 status = le32_to_cpu(p->flags);
+	u32 ext_status = le32_to_cpu(p->ext_status);
+
+	if (status & RXDESC_DA_FILTER_FAIL) {
+		netdev_dbg(priv->dev, "XGMAC RX : Dest Address filter fail\n");
+		x->rx_da_filter_fail++;
+		return -1;
+	}
+
+	/* Check if packet has checksum already */
+	if ((status & RXDESC_FRAME_TYPE) && (status & RXDESC_EXT_STATUS) &&
+		!(ext_status & RXDESC_IP_PAYLOAD_MASK))
+		ret = CHECKSUM_NONE;
+
+	netdev_dbg(priv->dev, "rx status - frame type=%d, csum = %d, ext stat %08x\n",
+		   (status & RXDESC_FRAME_TYPE) ? 1 : 0, ret, ext_status);
+
+	if (!(status & RXDESC_ERROR_SUMMARY))
+		return ret;
+
+	/* Handle any errors */
+	if (status & (RXDESC_DESCRIPTOR_ERR | RXDESC_OVERFLOW_ERR |
+		RXDESC_GIANT_FRAME | RXDESC_LENGTH_ERR | RXDESC_CRC_ERR))
+		return -1;
+
+	if (status & RXDESC_EXT_STATUS) {
+		if (ext_status & RXDESC_IP_HEADER_ERR)
+			x->rx_ip_header_error++;
+		if (ext_status & RXDESC_IP_PAYLOAD_ERR)
+			x->rx_payload_error++;
+		netdev_dbg(priv->dev, "IP checksum error - stat %08x\n",
+			   ext_status);
+		return CHECKSUM_NONE;
+	}
+
+	return ret;
+}
+
+static inline void xgmac_mac_enable(void __iomem *ioaddr)
+{
+	u32 value = readl(ioaddr + XGMAC_CONTROL);
+	value |= MAC_ENABLE_RX | MAC_ENABLE_TX;
+	writel(value, ioaddr + XGMAC_CONTROL);
+
+	value = readl(ioaddr + XGMAC_DMA_CONTROL);
+	value |= DMA_CONTROL_ST | DMA_CONTROL_SR;
+	writel(value, ioaddr + XGMAC_DMA_CONTROL);
+}
+
+static inline void xgmac_mac_disable(void __iomem *ioaddr)
+{
+	u32 value = readl(ioaddr + XGMAC_DMA_CONTROL);
+	value &= ~(DMA_CONTROL_ST | DMA_CONTROL_SR);
+	writel(value, ioaddr + XGMAC_DMA_CONTROL);
+
+	value = readl(ioaddr + XGMAC_CONTROL);
+	value &= ~(MAC_ENABLE_TX | MAC_ENABLE_RX);
+	writel(value, ioaddr + XGMAC_CONTROL);
+}
+
+static void xgmac_set_mac_addr(void __iomem *ioaddr, unsigned char *addr,
+			       int num)
+{
+	u32 data;
+
+	data = (addr[5] << 8) | addr[4] | (num ? XGMAC_ADDR_AE : 0);
+	writel(data, ioaddr + XGMAC_ADDR_HIGH(num));
+	data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
+	writel(data, ioaddr + XGMAC_ADDR_LOW(num));
+}
+
+static void xgmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
+			       int num)
+{
+	u32 hi_addr, lo_addr;
+
+	/* Read the MAC address from the hardware */
+	hi_addr = readl(ioaddr + XGMAC_ADDR_HIGH(num));
+	lo_addr = readl(ioaddr + XGMAC_ADDR_LOW(num));
+
+	/* Extract the MAC address from the high and low words */
+	addr[0] = lo_addr & 0xff;
+	addr[1] = (lo_addr >> 8) & 0xff;
+	addr[2] = (lo_addr >> 16) & 0xff;
+	addr[3] = (lo_addr >> 24) & 0xff;
+	addr[4] = hi_addr & 0xff;
+	addr[5] = (hi_addr >> 8) & 0xff;
+}
+
+static int xgmac_set_flow_ctrl(struct xgmac_priv *priv, int rx, int tx)
+{
+	u32 reg;
+	unsigned int flow = 0;
+
+	priv->rx_pause = rx;
+	priv->tx_pause = tx;
+
+	if (rx || tx) {
+		if (rx)
+			flow |= XGMAC_FLOW_CTRL_RFE;
+		if (tx)
+			flow |= XGMAC_FLOW_CTRL_TFE;
+
+		flow |= XGMAC_FLOW_CTRL_PLT | XGMAC_FLOW_CTRL_UP;
+		flow |= (PAUSE_TIME << XGMAC_FLOW_CTRL_PT_SHIFT);
+
+		writel(flow, priv->base + XGMAC_FLOW_CTRL);
+
+		reg = readl(priv->base + XGMAC_OMR);
+		reg |= XGMAC_OMR_EFC;
+		writel(reg, priv->base + XGMAC_OMR);
+	} else {
+		writel(0, priv->base + XGMAC_FLOW_CTRL);
+
+		reg = readl(priv->base + XGMAC_OMR);
+		reg &= ~XGMAC_OMR_EFC;
+		writel(reg, priv->base + XGMAC_OMR);
+	}
+
+	return 0;
+}
+
+static void xgmac_rx_refill(struct xgmac_priv *priv)
+{
+	struct xgmac_dma_desc *p;
+	dma_addr_t paddr;
+
+	while (dma_ring_space(priv->rx_head, priv->rx_tail, DMA_RX_RING_SZ) > 1) {
+		int entry = priv->rx_head;
+		struct sk_buff *skb;
+
+		p = priv->dma_rx + entry;
+
+		if (priv->rx_skbuff[entry] != NULL)
+			continue;
+
+		skb = __skb_dequeue(&priv->rx_recycle);
+		if (skb == NULL)
+			skb = netdev_alloc_skb(priv->dev, priv->dma_buf_sz);
+		if (unlikely(skb == NULL))
+			break;
+
+		priv->rx_skbuff[entry] = skb;
+		paddr = dma_map_single(priv->device, skb->data,
+					 priv->dma_buf_sz, DMA_FROM_DEVICE);
+		desc_set_buf_addr(p, paddr, priv->dma_buf_sz);
+
+		netdev_dbg(priv->dev, "rx ring: head %d, tail %d\n",
+			priv->rx_head, priv->rx_tail);
+
+		priv->rx_head = dma_ring_incr(priv->rx_head, DMA_RX_RING_SZ);
+		/* Ensure descriptor is in memory before handing to h/w */
+		wmb();
+		desc_set_rx_owner(p);
+	}
+}
+
+/**
+ * init_xgmac_dma_desc_rings - init the RX/TX descriptor rings
+ * @dev: net device structure
+ * Description:  this function initializes the DMA RX/TX descriptors
+ * and allocates the socket buffers.
+ */
+static int xgmac_dma_desc_rings_init(struct net_device *dev)
+{
+	struct xgmac_priv *priv = netdev_priv(dev);
+	unsigned int bfsize;
+
+	/* Set the Buffer size according to the MTU;
+	 * indeed, in case of jumbo we need to bump-up the buffer sizes.
+	 */
+	bfsize = ALIGN(dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN + 64,
+		       64);
+
+	netdev_dbg(priv->dev, "mtu [%d] bfsize [%d]\n", dev->mtu, bfsize);
+
+	priv->rx_skbuff = kzalloc(sizeof(struct sk_buff *) * DMA_RX_RING_SZ,
+				  GFP_KERNEL);
+	if (!priv->rx_skbuff)
+		return -ENOMEM;
+
+	priv->dma_rx = dma_alloc_coherent(priv->device,
+					  DMA_RX_RING_SZ *
+					  sizeof(struct xgmac_dma_desc),
+					  &priv->dma_rx_phy,
+					  GFP_KERNEL);
+	if (!priv->dma_rx)
+		goto err_dma_rx;
+
+	priv->tx_skbuff = kzalloc(sizeof(struct sk_buff *) * DMA_TX_RING_SZ,
+				  GFP_KERNEL);
+	if (!priv->tx_skbuff)
+		goto err_tx_skb;
+
+	priv->dma_tx = dma_alloc_coherent(priv->device,
+					  DMA_TX_RING_SZ *
+					  sizeof(struct xgmac_dma_desc),
+					  &priv->dma_tx_phy,
+					  GFP_KERNEL);
+	if (!priv->dma_tx)
+		goto err_dma_tx;
+
+	netdev_dbg(priv->dev, "DMA desc rings: virt addr (Rx %p, "
+	    "Tx %p)\n\tDMA phy addr (Rx 0x%08x, Tx 0x%08x)\n",
+	    priv->dma_rx, priv->dma_tx,
+	    (unsigned int)priv->dma_rx_phy, (unsigned int)priv->dma_tx_phy);
+
+	priv->rx_tail = 0;
+	priv->rx_head = 0;
+	priv->dma_buf_sz = bfsize;
+	desc_init_rx_desc(priv->dma_rx, DMA_RX_RING_SZ, priv->dma_buf_sz);
+	xgmac_rx_refill(priv);
+
+	priv->tx_tail = 0;
+	priv->tx_head = 0;
+	desc_init_tx_desc(priv->dma_tx, DMA_TX_RING_SZ);
+
+	writel(priv->dma_tx_phy, priv->base + XGMAC_DMA_TX_BASE_ADDR);
+	writel(priv->dma_rx_phy, priv->base + XGMAC_DMA_RX_BASE_ADDR);
+
+	return 0;
+
+err_dma_tx:
+	kfree(priv->tx_skbuff);
+err_tx_skb:
+	dma_free_coherent(priv->device,
+			  DMA_RX_RING_SZ * sizeof(struct xgmac_dma_desc),
+			  priv->dma_rx, priv->dma_rx_phy);
+err_dma_rx:
+	kfree(priv->rx_skbuff);
+	return -ENOMEM;
+}
+
+static void xgmac_free_rx_skbufs(struct xgmac_priv *priv)
+{
+	int i;
+	struct xgmac_dma_desc *p;
+
+	if (!priv->rx_skbuff)
+		return;
+
+	for (i = 0; i < DMA_RX_RING_SZ; i++) {
+		if (priv->rx_skbuff[i] == NULL)
+			continue;
+
+		p = priv->dma_rx + i;
+		dma_unmap_single(priv->device, desc_get_buf_addr(p),
+				 priv->dma_buf_sz, DMA_FROM_DEVICE);
+		dev_kfree_skb_any(priv->rx_skbuff[i]);
+		priv->rx_skbuff[i] = NULL;
+	}
+}
+
+static void xgmac_free_tx_skbufs(struct xgmac_priv *priv)
+{
+	int i, f;
+	struct xgmac_dma_desc *p;
+
+	if (!priv->tx_skbuff)
+		return;
+
+	for (i = 0; i < DMA_TX_RING_SZ; i++) {
+		if (priv->tx_skbuff[i] == NULL)
+			continue;
+
+		p = priv->dma_tx + i;
+		dma_unmap_single(priv->device, desc_get_buf_addr(p),
+				 desc_get_buf_len(p), DMA_TO_DEVICE);
+
+		for (f = 0; f < skb_shinfo(priv->tx_skbuff[i])->nr_frags; f++) {
+			p = priv->dma_tx + i++;
+			dma_unmap_page(priv->device, desc_get_buf_addr(p),
+				       desc_get_buf_len(p), DMA_TO_DEVICE);
+		}
+
+		dev_kfree_skb_any(priv->tx_skbuff[i]);
+		priv->tx_skbuff[i] = NULL;
+	}
+}
+
+static void xgmac_free_dma_desc_rings(struct xgmac_priv *priv)
+{
+	/* Release the DMA TX/RX socket buffers */
+	xgmac_free_rx_skbufs(priv);
+	xgmac_free_tx_skbufs(priv);
+
+	/* Free the consistent memory allocated for descriptor rings */
+	if (priv->dma_tx) {
+		dma_free_coherent(priv->device,
+				  DMA_TX_RING_SZ * sizeof(struct xgmac_dma_desc),
+				  priv->dma_tx, priv->dma_tx_phy);
+		priv->dma_tx = NULL;
+	}
+	if (priv->dma_rx) {
+		dma_free_coherent(priv->device,
+				  DMA_RX_RING_SZ * sizeof(struct xgmac_dma_desc),
+				  priv->dma_rx, priv->dma_rx_phy);
+		priv->dma_rx = NULL;
+	}
+	kfree(priv->rx_skbuff);
+	priv->rx_skbuff = NULL;
+	kfree(priv->tx_skbuff);
+	priv->tx_skbuff = NULL;
+}
+
+/**
+ * xgmac_tx:
+ * @priv: private driver structure
+ * Description: it reclaims resources after transmission completes.
+ */
+static void xgmac_tx_complete(struct xgmac_priv *priv)
+{
+	int i;
+	void __iomem *ioaddr = priv->base;
+
+	writel(DMA_STATUS_TU | DMA_STATUS_NIS, ioaddr + XGMAC_DMA_STATUS);
+
+	while (dma_ring_cnt(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ)) {
+		unsigned int entry = priv->tx_tail;
+		struct sk_buff *skb = priv->tx_skbuff[entry];
+		struct xgmac_dma_desc *p = priv->dma_tx + entry;
+
+		/* Check if the descriptor is owned by the DMA. */
+		if (desc_get_owner(p))
+			break;
+
+		/* Verify tx error by looking at the last segment */
+		if (desc_get_tx_ls(p))
+			desc_get_tx_status(priv, p);
+
+		netdev_dbg(priv->dev, "tx ring: curr %d, dirty %d\n",
+			priv->tx_head, priv->tx_tail);
+
+		dma_unmap_single(priv->device, desc_get_buf_addr(p),
+				 desc_get_buf_len(p), DMA_TO_DEVICE);
+
+		priv->tx_skbuff[entry] = NULL;
+		priv->tx_tail = dma_ring_incr(entry, DMA_TX_RING_SZ);
+
+		if (!skb) {
+			continue;
+		}
+
+		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+			entry = priv->tx_tail = dma_ring_incr(priv->tx_tail,
+							      DMA_TX_RING_SZ);
+			p = priv->dma_tx + priv->tx_tail;
+
+			dma_unmap_page(priv->device, desc_get_buf_addr(p),
+				       desc_get_buf_len(p), DMA_TO_DEVICE);
+		}
+
+		/*
+		 * If there's room in the queue (limit it to size)
+		 * we add this skb back into the pool,
+		 * if it's the right size.
+		 */
+		if ((skb_queue_len(&priv->rx_recycle) <
+			DMA_RX_RING_SZ) &&
+			skb_recycle_check(skb, priv->dma_buf_sz))
+			__skb_queue_head(&priv->rx_recycle, skb);
+		else
+			dev_kfree_skb(skb);
+	}
+
+	if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) >
+	    TX_THRESH)
+		netif_wake_queue(priv->dev);
+}
+
+/**
+ * xgmac_tx_err:
+ * @priv: pointer to the private device structure
+ * Description: it cleans the descriptors and restarts the transmission
+ * in case of errors.
+ */
+static void xgmac_tx_err(struct xgmac_priv *priv)
+{
+	u32 reg, value, inten;
+
+	netif_stop_queue(priv->dev);
+
+	inten = readl(priv->base + XGMAC_DMA_INTR_ENA);
+	writel(0, priv->base + XGMAC_DMA_INTR_ENA);
+
+	reg = readl(priv->base + XGMAC_DMA_CONTROL);
+	writel(reg & ~DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL);
+	do {
+		value = readl(priv->base + XGMAC_DMA_STATUS) & 0x700000;
+	} while (value && (value != 0x600000));
+
+	xgmac_free_tx_skbufs(priv);
+	desc_init_tx_desc(priv->dma_tx, DMA_TX_RING_SZ);
+	priv->tx_tail = 0;
+	priv->tx_head = 0;
+	writel(reg | DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL);
+
+	writel(DMA_STATUS_TU | DMA_STATUS_TPS | DMA_STATUS_NIS | DMA_STATUS_AIS,
+		priv->base + XGMAC_DMA_STATUS);
+	writel(inten, priv->base + XGMAC_DMA_INTR_ENA);
+
+	netif_wake_queue(priv->dev);
+}
+
+static int xgmac_hw_init(struct net_device *dev)
+{
+	u32 value, ctrl;
+	int limit;
+	struct xgmac_priv *priv = netdev_priv(dev);
+	void __iomem *ioaddr = priv->base;
+
+	/* Save the ctrl register value */
+	ctrl = readl(ioaddr + XGMAC_CONTROL) & XGMAC_CONTROL_SPD_MASK;
+
+	/* SW reset */
+	value = DMA_BUS_MODE_SFT_RESET;
+	writel(value, ioaddr + XGMAC_DMA_BUS_MODE);
+	limit = 15000;
+	while (limit-- &&
+		(readl(ioaddr + XGMAC_DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET))
+		cpu_relax();
+	if (limit < 0)
+		return -EBUSY;
+
+	value = (0x10 << DMA_BUS_MODE_PBL_SHIFT) |
+		(0x10 << DMA_BUS_MODE_RPBL_SHIFT) |
+		DMA_BUS_MODE_FB | DMA_BUS_MODE_ATDS | DMA_BUS_MODE_AAL;
+	writel(value, ioaddr + XGMAC_DMA_BUS_MODE);
+
+	/* Enable interrupts */
+	writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS);
+	writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);
+
+	/* XGMAC requires AXI bus init. This is a 'magic number' for now */
+	writel(0x000100E, ioaddr + XGMAC_DMA_AXI_BUS);
+
+	ctrl |= XGMAC_CONTROL_DDIC | XGMAC_CONTROL_JE | XGMAC_CONTROL_ACS |
+		XGMAC_CONTROL_CAR;
+	if (dev->features & NETIF_F_RXCSUM)
+		ctrl |= XGMAC_CONTROL_IPC;
+	writel(ctrl, ioaddr + XGMAC_CONTROL);
+
+	value = DMA_CONTROL_DFF;
+	writel(value, ioaddr + XGMAC_DMA_CONTROL);
+
+	/* Set the HW DMA mode and the COE */
+	writel(XGMAC_OMR_TSF | XGMAC_OMR_RSF | XGMAC_OMR_RFD | XGMAC_OMR_RFA,
+		ioaddr + XGMAC_OMR);
+
+	/* Reset the MMC counters */
+	writel(1, ioaddr + XGMAC_MMC_CTRL);
+	return 0;
+}
+
+/**
+ *  xgmac_open - open entry point of the driver
+ *  @dev : pointer to the device structure.
+ *  Description:
+ *  This function is the open entry point of the driver.
+ *  Return value:
+ *  0 on success and an appropriate (-)ve integer as defined in errno.h
+ *  file on failure.
+ */
+static int xgmac_open(struct net_device *dev)
+{
+	int ret;
+	struct xgmac_priv *priv = netdev_priv(dev);
+	void __iomem *ioaddr = priv->base;
+
+	/* Check that the MAC address is valid.  If its not, refuse
+	 * to bring the device up. The user must specify an
+	 * address using the following linux command:
+	 *      ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx  */
+	if (!is_valid_ether_addr(dev->dev_addr)) {
+		random_ether_addr(dev->dev_addr);
+		netdev_dbg(priv->dev, "generated random MAC address %pM\n",
+			dev->dev_addr);
+	}
+
+	skb_queue_head_init(&priv->rx_recycle);
+	memset(&priv->xstats, 0, sizeof(struct xgmac_extra_stats));
+
+	/* Initialize the XGMAC and descriptors */
+	xgmac_hw_init(dev);
+	xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0);
+	xgmac_set_flow_ctrl(priv, priv->rx_pause, priv->tx_pause);
+
+	ret = xgmac_dma_desc_rings_init(dev);
+	if (ret < 0)
+		return ret;
+
+	/* Enable the MAC Rx/Tx */
+	xgmac_mac_enable(ioaddr);
+
+	napi_enable(&priv->napi);
+	netif_start_queue(dev);
+
+	return 0;
+}
+
+/**
+ *  xgmac_release - close entry point of the driver
+ *  @dev : device pointer.
+ *  Description:
+ *  This is the stop entry point of the driver.
+ */
+static int xgmac_stop(struct net_device *dev)
+{
+	struct xgmac_priv *priv = netdev_priv(dev);
+
+	netif_stop_queue(dev);
+
+	if (readl(priv->base + XGMAC_DMA_INTR_ENA))
+		napi_disable(&priv->napi);
+
+	writel(0, priv->base + XGMAC_DMA_INTR_ENA);
+	skb_queue_purge(&priv->rx_recycle);
+
+	/* Disable the MAC core */
+	xgmac_mac_disable(priv->base);
+
+	/* Release and free the Rx/Tx resources */
+	xgmac_free_dma_desc_rings(priv);
+
+	return 0;
+}
+
+/**
+ *  xgmac_xmit:
+ *  @skb : the socket buffer
+ *  @dev : device pointer
+ *  Description : Tx entry point of the driver.
+ */
+static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct xgmac_priv *priv = netdev_priv(dev);
+	unsigned int entry;
+	int i;
+	int nfrags = skb_shinfo(skb)->nr_frags;
+	struct xgmac_dma_desc *desc, *first;
+	unsigned int desc_flags;
+	unsigned int len;
+	dma_addr_t paddr;
+
+	if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) <
+	    (nfrags + 1)) {
+		writel(DMA_INTR_DEFAULT_MASK | DMA_INTR_ENA_TIE,
+			priv->base + XGMAC_DMA_INTR_ENA);
+		netif_stop_queue(dev);
+		return NETDEV_TX_BUSY;
+	}
+
+	desc_flags = (skb->ip_summed == CHECKSUM_PARTIAL) ?
+		TXDESC_CSUM_ALL : 0;
+	entry = priv->tx_head;
+	desc = priv->dma_tx + entry;
+	first = desc;
+
+	len = skb_headlen(skb);
+	paddr = dma_map_single(priv->device, skb->data, len, DMA_TO_DEVICE);
+	if (dma_mapping_error(priv->device, paddr)) {
+		dev_kfree_skb(skb);
+		return -EIO;
+	}
+	priv->tx_skbuff[entry] = skb;
+	desc_set_buf_addr_and_size(desc, paddr, len);
+
+	for (i = 0; i < nfrags; i++) {
+		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+		len = frag->size;
+
+		paddr = skb_frag_dma_map(priv->device, frag, 0, len,
+					 DMA_TO_DEVICE);
+		if (dma_mapping_error(priv->device, paddr)) {
+			dev_kfree_skb(skb);
+			return -EIO;
+		}
+
+		entry = dma_ring_incr(entry, DMA_TX_RING_SZ);
+		desc = priv->dma_tx + entry;
+		priv->tx_skbuff[entry] = NULL;
+
+		desc_set_buf_addr_and_size(desc, paddr, len);
+		if (i < (nfrags - 1))
+			desc_set_tx_owner(desc, desc_flags);
+	}
+
+	/* Interrupt on completition only for the latest segment */
+	if (desc != first)
+		desc_set_tx_owner(desc, desc_flags |
+			TXDESC_LAST_SEG | TXDESC_INTERRUPT);
+	else
+		desc_flags |= TXDESC_LAST_SEG | TXDESC_INTERRUPT;
+
+	/* Set owner on first desc last to avoid race condition */
+	wmb();
+	desc_set_tx_owner(first, desc_flags | TXDESC_FIRST_SEG);
+
+	priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ);
+
+	writel(1, priv->base + XGMAC_DMA_TX_POLL);
+
+	return NETDEV_TX_OK;
+}
+
+static int xgmac_rx(struct xgmac_priv *priv, int limit)
+{
+	unsigned int entry;
+	unsigned int count = 0;
+	struct xgmac_dma_desc *p;
+
+	while (count < limit) {
+		int ip_checksum;
+		struct sk_buff *skb;
+		int frame_len;
+
+		writel(DMA_STATUS_RI | DMA_STATUS_NIS,
+		       priv->base + XGMAC_DMA_STATUS);
+
+		entry = priv->rx_tail;
+		p = priv->dma_rx + entry;
+		if (desc_get_owner(p))
+			break;
+
+		count++;
+		priv->rx_tail = dma_ring_incr(priv->rx_tail, DMA_RX_RING_SZ);
+
+		/* read the status of the incoming frame */
+		ip_checksum = desc_get_rx_status(priv, p);
+		if (ip_checksum < 0)
+			continue;
+
+		skb = priv->rx_skbuff[entry];
+		if (unlikely(!skb)) {
+			netdev_err(priv->dev, "Inconsistent Rx descriptor chain\n");
+			break;
+		}
+		priv->rx_skbuff[entry] = NULL;
+
+		frame_len = desc_get_rx_frame_len(p);
+		netdev_dbg(priv->dev, "RX frame size %d, COE status: %d\n",
+			frame_len, ip_checksum);
+
+		skb_put(skb, frame_len);
+		dma_unmap_single(priv->device, desc_get_buf_addr(p),
+				 frame_len, DMA_FROM_DEVICE);
+
+		skb->protocol = eth_type_trans(skb, priv->dev);
+		skb->ip_summed = ip_checksum;
+		if (ip_checksum == CHECKSUM_NONE)
+			netif_receive_skb(skb);
+		else
+			napi_gro_receive(&priv->napi, skb);
+	}
+
+	xgmac_rx_refill(priv);
+
+	writel(1, priv->base + XGMAC_DMA_RX_POLL);
+
+	return count;
+}
+
+/**
+ *  xgmac_poll - xgmac poll method (NAPI)
+ *  @napi : pointer to the napi structure.
+ *  @budget : maximum number of packets that the current CPU can receive from
+ *	      all interfaces.
+ *  Description :
+ *   This function implements the the reception process.
+ *   Also it runs the TX completion thread
+ */
+static int xgmac_poll(struct napi_struct *napi, int budget)
+{
+	struct xgmac_priv *priv = container_of(napi,
+				       struct xgmac_priv, napi);
+	int work_done = 0;
+
+	xgmac_tx_complete(priv);
+	work_done = xgmac_rx(priv, budget);
+
+	if (work_done < budget) {
+		napi_complete(napi);
+		writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA);
+	}
+	return work_done;
+}
+
+/**
+ *  xgmac_tx_timeout
+ *  @dev : Pointer to net device structure
+ *  Description: this function is called when a packet transmission fails to
+ *   complete within a reasonable tmrate. The driver will mark the error in the
+ *   netdev structure and arrange for the device to be reset to a sane state
+ *   in order to transmit a new packet.
+ */
+static void xgmac_tx_timeout(struct net_device *dev)
+{
+	struct xgmac_priv *priv = netdev_priv(dev);
+
+	/* Clear Tx resources and restart transmitting again */
+	xgmac_tx_err(priv);
+}
+
+/**
+ *  xgmac_set_rx_mode - entry point for multicast addressing
+ *  @dev : pointer to the device structure
+ *  Description:
+ *  This function is a driver entry point which gets called by the kernel
+ *  whenever multicast addresses must be enabled/disabled.
+ *  Return value:
+ *  void.
+ */
+static void xgmac_set_rx_mode(struct net_device *dev)
+{
+	int i;
+	struct xgmac_priv *priv = netdev_priv(dev);
+	void __iomem *ioaddr = priv->base;
+	unsigned int value = 0;
+	u32 hash_filter[XGMAC_NUM_HASH];
+	int reg = 1;
+	struct netdev_hw_addr *ha;
+	bool use_hash = false;
+
+	netdev_dbg(priv->dev, "# mcasts %d, # unicast %d\n",
+		 netdev_mc_count(dev), netdev_uc_count(dev));
+
+	if (dev->flags & IFF_PROMISC) {
+		writel(XGMAC_FRAME_FILTER_PR, ioaddr + XGMAC_FRAME_FILTER);
+		return;
+	}
+
+	memset(hash_filter, 0, sizeof(hash_filter));
+
+	if (netdev_uc_count(dev) > XGMAC_MAX_FILTER_ADDR) {
+		use_hash = true;
+		value |= XGMAC_FRAME_FILTER_HUC | XGMAC_FRAME_FILTER_HPF;
+	}
+	netdev_for_each_uc_addr(ha, dev) {
+		if (use_hash) {
+			u32 bit_nr = ~ether_crc(ETH_ALEN, ha->addr) >> 23;
+
+			/* The most significant 4 bits determine the register to
+			 * use (H/L) while the other 5 bits determine the bit
+			 * within the register. */
+			hash_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+		} else {
+			xgmac_set_mac_addr(ioaddr, ha->addr, reg);
+			reg++;
+		}
+	}
+
+	if (dev->flags & IFF_ALLMULTI) {
+		value |= XGMAC_FRAME_FILTER_PM;
+		goto out;
+	}
+
+	if ((netdev_mc_count(dev) + reg - 1) > XGMAC_MAX_FILTER_ADDR) {
+		use_hash = true;
+		value |= XGMAC_FRAME_FILTER_HMC | XGMAC_FRAME_FILTER_HPF;
+	}
+	netdev_for_each_mc_addr(ha, dev) {
+		if (use_hash) {
+			u32 bit_nr = ~ether_crc(ETH_ALEN, ha->addr) >> 23;
+
+			/* The most significant 4 bits determine the register to
+			 * use (H/L) while the other 5 bits determine the bit
+			 * within the register. */
+			hash_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+		} else {
+			xgmac_set_mac_addr(ioaddr, ha->addr, reg);
+			reg++;
+		}
+	}
+
+out:
+	for (i = 0; i < XGMAC_NUM_HASH; i++)
+		writel(hash_filter[i], ioaddr + XGMAC_HASH(i));
+
+	writel(value, ioaddr + XGMAC_FRAME_FILTER);
+}
+
+/**
+ *  xgmac_change_mtu - entry point to change MTU size for the device.
+ *  @dev : device pointer.
+ *  @new_mtu : the new MTU size for the device.
+ *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
+ *  to drive packet transmission. Ethernet has an MTU of 1500 octets
+ *  (ETH_DATA_LEN). This value can be changed with ifconfig.
+ *  Return value:
+ *  0 on success and an appropriate (-)ve integer as defined in errno.h
+ *  file on failure.
+ */
+static int xgmac_change_mtu(struct net_device *dev, int new_mtu)
+{
+	struct xgmac_priv *priv = netdev_priv(dev);
+	int old_mtu;
+
+	if ((new_mtu < 46) || (new_mtu > MAX_MTU)) {
+		netdev_err(priv->dev, "invalid MTU, max MTU is: %d\n", MAX_MTU);
+		return -EINVAL;
+	}
+
+	old_mtu = dev->mtu;
+	dev->mtu = new_mtu;
+
+	/* return early if the buffer sizes will not change */
+	if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
+		return 0;
+	if (old_mtu == new_mtu)
+		return 0;
+
+	/* Stop everything, get ready to change the MTU */
+	if (!netif_running(dev))
+		return 0;
+
+	/* Bring the interface down and then back up */
+	xgmac_stop(dev);
+	return xgmac_open(dev);
+}
+
+static irqreturn_t xgmac_pmt_interrupt(int irq, void *dev_id)
+{
+	u32 intr_status;
+	struct net_device *dev = (struct net_device *)dev_id;
+	struct xgmac_priv *priv = netdev_priv(dev);
+	void __iomem *ioaddr = priv->base;
+
+	intr_status = readl(ioaddr + XGMAC_INT_STAT);
+	if (intr_status & XGMAC_INT_STAT_PMT) {
+		netdev_dbg(priv->dev, "received Magic frame\n");
+		/* clear the PMT bits 5 and 6 by reading the PMT */
+		readl(ioaddr + XGMAC_PMT);
+	}
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t xgmac_interrupt(int irq, void *dev_id)
+{
+	u32 intr_status;
+	bool tx_err = false;
+	struct net_device *dev = (struct net_device *)dev_id;
+	struct xgmac_priv *priv = netdev_priv(dev);
+	struct xgmac_extra_stats *x = &priv->xstats;
+
+	/* read the status register (CSR5) */
+	intr_status = readl(priv->base + XGMAC_DMA_STATUS);
+	intr_status &= readl(priv->base + XGMAC_DMA_INTR_ENA);
+	writel(intr_status, priv->base + XGMAC_DMA_STATUS);
+
+	/* It displays the DMA process states (CSR5 register) */
+	/* ABNORMAL interrupts */
+	if (unlikely(intr_status & DMA_STATUS_AIS)) {
+		if (intr_status & DMA_STATUS_TJT) {
+			netdev_err(priv->dev, "transmit jabber\n");
+			x->tx_jabber++;
+		}
+		if (intr_status & DMA_STATUS_RU)
+			x->rx_buf_unav++;
+		if (intr_status & DMA_STATUS_RPS) {
+			netdev_err(priv->dev, "receive process stopped\n");
+			x->rx_process_stopped++;
+		}
+		if (intr_status & DMA_STATUS_ETI) {
+			netdev_err(priv->dev, "transmit early interrupt\n");
+			x->tx_early++;
+		}
+		if (intr_status & DMA_STATUS_TPS) {
+			netdev_err(priv->dev, "transmit process stopped\n");
+			x->tx_process_stopped++;
+			tx_err = true;
+		}
+		if (intr_status & DMA_STATUS_FBI) {
+			netdev_err(priv->dev, "fatal bus error\n");
+			x->fatal_bus_error++;
+			tx_err = true;
+		}
+
+		if (tx_err)
+			xgmac_tx_err(priv);
+	}
+
+	/* TX/RX NORMAL interrupts */
+	if (intr_status & (DMA_STATUS_RI | DMA_STATUS_TU)) {
+		writel(DMA_INTR_ABNORMAL, priv->base + XGMAC_DMA_INTR_ENA);
+		napi_schedule(&priv->napi);
+	}
+
+	return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/* Polling receive - used by NETCONSOLE and other diagnostic tools
+ * to allow network I/O with interrupts disabled. */
+static void xgmac_poll_controller(struct net_device *dev)
+{
+	disable_irq(dev->irq);
+	xgmac_interrupt(dev->irq, dev);
+	enable_irq(dev->irq);
+}
+#endif
+
+static struct rtnl_link_stats64 *
+xgmac_get_stats64(struct net_device *dev,
+		       struct rtnl_link_stats64 *storage)
+{
+	struct xgmac_priv *priv = netdev_priv(dev);
+	void __iomem *base = priv->base;
+	u32 count;
+
+	spin_lock_bh(&priv->stats_lock);
+	writel(XGMAC_MMC_CTRL_CNT_FRZ, base + XGMAC_MMC_CTRL);
+
+	storage->rx_bytes = readl(base + XGMAC_MMC_RXOCTET_G_LO);
+	storage->rx_bytes |= (u64)(readl(base + XGMAC_MMC_RXOCTET_G_HI)) << 32;
+
+	storage->rx_packets = readl(base + XGMAC_MMC_RXFRAME_GB_LO);
+	storage->multicast = readl(base + XGMAC_MMC_RXMCFRAME_G);
+	storage->rx_crc_errors = readl(base + XGMAC_MMC_RXCRCERR);
+	storage->rx_length_errors = readl(base + XGMAC_MMC_RXLENGTHERR);
+	storage->rx_missed_errors = readl(base + XGMAC_MMC_RXOVERFLOW);
+
+	storage->tx_bytes = readl(base + XGMAC_MMC_TXOCTET_G_LO);
+	storage->tx_bytes |= (u64)(readl(base + XGMAC_MMC_TXOCTET_G_HI)) << 32;
+
+	count = readl(base + XGMAC_MMC_TXFRAME_GB_LO);
+	storage->tx_errors = count - readl(base + XGMAC_MMC_TXFRAME_G_LO);
+	storage->tx_packets = count;
+	storage->tx_fifo_errors = readl(base + XGMAC_MMC_TXUNDERFLOW);
+
+	writel(0, base + XGMAC_MMC_CTRL);
+	spin_unlock_bh(&priv->stats_lock);
+	return storage;
+}
+
+static int xgmac_set_mac_address(struct net_device *dev, void *p)
+{
+	struct xgmac_priv *priv = netdev_priv(dev);
+	void __iomem *ioaddr = priv->base;
+	struct sockaddr *addr = p;
+
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EADDRNOTAVAIL;
+
+	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+	xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0);
+
+	return 0;
+}
+
+static int xgmac_set_features(struct net_device *dev, netdev_features_t features)
+{
+	u32 ctrl;
+	struct xgmac_priv *priv = netdev_priv(dev);
+	void __iomem *ioaddr = priv->base;
+	u32 changed = dev->features ^ features;
+
+	if (!(changed & NETIF_F_RXCSUM))
+		return 0;
+
+	ctrl = readl(ioaddr + XGMAC_CONTROL);
+	if (features & NETIF_F_RXCSUM)
+		ctrl |= XGMAC_CONTROL_IPC;
+	else
+		ctrl &= ~XGMAC_CONTROL_IPC;
+	writel(ctrl, ioaddr + XGMAC_CONTROL);
+
+	return 0;
+}
+
+static const struct net_device_ops xgmac_netdev_ops = {
+	.ndo_open = xgmac_open,
+	.ndo_start_xmit = xgmac_xmit,
+	.ndo_stop = xgmac_stop,
+	.ndo_change_mtu = xgmac_change_mtu,
+	.ndo_set_rx_mode = xgmac_set_rx_mode,
+	.ndo_tx_timeout = xgmac_tx_timeout,
+	.ndo_get_stats64 = xgmac_get_stats64,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller = xgmac_poll_controller,
+#endif
+	.ndo_set_mac_address = xgmac_set_mac_address,
+	.ndo_set_features = xgmac_set_features,
+};
+
+static int xgmac_ethtool_getsettings(struct net_device *dev,
+					  struct ethtool_cmd *cmd)
+{
+	cmd->autoneg = 0;
+	cmd->duplex = DUPLEX_FULL;
+	ethtool_cmd_speed_set(cmd, 10000);
+	cmd->supported = 0;
+	cmd->advertising = 0;
+	cmd->transceiver = XCVR_INTERNAL;
+	return 0;
+}
+
+static void xgmac_get_pauseparam(struct net_device *netdev,
+				      struct ethtool_pauseparam *pause)
+{
+	struct xgmac_priv *priv = netdev_priv(netdev);
+
+	pause->rx_pause = priv->rx_pause;
+	pause->tx_pause = priv->tx_pause;
+}
+
+static int xgmac_set_pauseparam(struct net_device *netdev,
+				     struct ethtool_pauseparam *pause)
+{
+	struct xgmac_priv *priv = netdev_priv(netdev);
+
+	if (pause->autoneg)
+		return -EINVAL;
+
+	return xgmac_set_flow_ctrl(priv, pause->rx_pause, pause->tx_pause);
+}
+
+struct xgmac_stats {
+	char stat_string[ETH_GSTRING_LEN];
+	int stat_offset;
+	bool is_reg;
+};
+
+#define XGMAC_STAT(m)	\
+	{ #m, offsetof(struct xgmac_priv, xstats.m), false }
+#define XGMAC_HW_STAT(m, reg_offset)	\
+	{ #m, reg_offset, true }
+
+static const struct xgmac_stats xgmac_gstrings_stats[] = {
+	XGMAC_STAT(tx_frame_flushed),
+	XGMAC_STAT(tx_payload_error),
+	XGMAC_STAT(tx_ip_header_error),
+	XGMAC_STAT(tx_local_fault),
+	XGMAC_STAT(tx_remote_fault),
+	XGMAC_STAT(tx_early),
+	XGMAC_STAT(tx_process_stopped),
+	XGMAC_STAT(tx_jabber),
+	XGMAC_STAT(rx_buf_unav),
+	XGMAC_STAT(rx_process_stopped),
+	XGMAC_STAT(rx_payload_error),
+	XGMAC_STAT(rx_ip_header_error),
+	XGMAC_STAT(rx_da_filter_fail),
+	XGMAC_STAT(rx_sa_filter_fail),
+	XGMAC_STAT(fatal_bus_error),
+	XGMAC_HW_STAT(rx_watchdog, XGMAC_MMC_RXWATCHDOG),
+	XGMAC_HW_STAT(tx_vlan, XGMAC_MMC_TXVLANFRAME),
+	XGMAC_HW_STAT(rx_vlan, XGMAC_MMC_RXVLANFRAME),
+	XGMAC_HW_STAT(tx_pause, XGMAC_MMC_TXPAUSEFRAME),
+	XGMAC_HW_STAT(rx_pause, XGMAC_MMC_RXPAUSEFRAME),
+};
+#define XGMAC_STATS_LEN ARRAY_SIZE(xgmac_gstrings_stats)
+
+static void xgmac_get_ethtool_stats(struct net_device *dev,
+					 struct ethtool_stats *dummy,
+					 u64 *data)
+{
+	struct xgmac_priv *priv = netdev_priv(dev);
+	void *p = priv;
+	int i;
+
+	for (i = 0; i < XGMAC_STATS_LEN; i++) {
+		if (xgmac_gstrings_stats[i].is_reg)
+			*data++ = readl(priv->base +
+				xgmac_gstrings_stats[i].stat_offset);
+		else
+			*data++ = *(u32 *)(p +
+				xgmac_gstrings_stats[i].stat_offset);
+	}
+}
+
+static int xgmac_get_sset_count(struct net_device *netdev, int sset)
+{
+	switch (sset) {
+	case ETH_SS_STATS:
+		return XGMAC_STATS_LEN;
+	default:
+		return -EINVAL;
+	}
+}
+
+static void xgmac_get_strings(struct net_device *dev, u32 stringset,
+				   u8 *data)
+{
+	int i;
+	u8 *p = data;
+
+	switch (stringset) {
+	case ETH_SS_STATS:
+		for (i = 0; i < XGMAC_STATS_LEN; i++) {
+			memcpy(p, xgmac_gstrings_stats[i].stat_string,
+			       ETH_GSTRING_LEN);
+			p += ETH_GSTRING_LEN;
+		}
+		break;
+	default:
+		WARN_ON(1);
+		break;
+	}
+}
+
+static void xgmac_get_wol(struct net_device *dev,
+			       struct ethtool_wolinfo *wol)
+{
+	struct xgmac_priv *priv = netdev_priv(dev);
+
+	if (device_can_wakeup(priv->device)) {
+		wol->supported = WAKE_MAGIC | WAKE_UCAST;
+		wol->wolopts = priv->wolopts;
+	}
+}
+
+static int xgmac_set_wol(struct net_device *dev,
+			      struct ethtool_wolinfo *wol)
+{
+	struct xgmac_priv *priv = netdev_priv(dev);
+	u32 support = WAKE_MAGIC | WAKE_UCAST;
+
+	if (!device_can_wakeup(priv->device))
+		return -ENOTSUPP;
+
+	if (wol->wolopts & ~support)
+		return -EINVAL;
+
+	priv->wolopts = wol->wolopts;
+
+	if (wol->wolopts) {
+		device_set_wakeup_enable(priv->device, 1);
+		enable_irq_wake(dev->irq);
+	} else {
+		device_set_wakeup_enable(priv->device, 0);
+		disable_irq_wake(dev->irq);
+	}
+
+	return 0;
+}
+
+static const struct ethtool_ops xgmac_ethtool_ops = {
+	.get_settings = xgmac_ethtool_getsettings,
+	.get_link = ethtool_op_get_link,
+	.get_pauseparam = xgmac_get_pauseparam,
+	.set_pauseparam = xgmac_set_pauseparam,
+	.get_ethtool_stats = xgmac_get_ethtool_stats,
+	.get_strings = xgmac_get_strings,
+	.get_wol = xgmac_get_wol,
+	.set_wol = xgmac_set_wol,
+	.get_sset_count = xgmac_get_sset_count,
+};
+
+/**
+ * xgmac_probe
+ * @pdev: platform device pointer
+ * Description: the driver is initialized through platform_device.
+ */
+static int xgmac_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+	struct resource *res;
+	struct net_device *ndev = NULL;
+	struct xgmac_priv *priv = NULL;
+	u32 uid;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res)
+		return -ENODEV;
+
+	if (!request_mem_region(res->start, resource_size(res), pdev->name))
+		return -EBUSY;
+
+	ndev = alloc_etherdev(sizeof(struct xgmac_priv));
+	if (!ndev) {
+		ret = -ENOMEM;
+		goto err_alloc;
+	}
+
+	SET_NETDEV_DEV(ndev, &pdev->dev);
+	priv = netdev_priv(ndev);
+	platform_set_drvdata(pdev, ndev);
+	ether_setup(ndev);
+	ndev->netdev_ops = &xgmac_netdev_ops;
+	SET_ETHTOOL_OPS(ndev, &xgmac_ethtool_ops);
+	spin_lock_init(&priv->stats_lock);
+
+	priv->device = &pdev->dev;
+	priv->dev = ndev;
+	priv->rx_pause = 1;
+	priv->tx_pause = 1;
+
+	priv->base = ioremap(res->start, resource_size(res));
+	if (!priv->base) {
+		netdev_err(ndev, "ioremap failed\n");
+		ret = -ENOMEM;
+		goto err_io;
+	}
+
+	uid = readl(priv->base + XGMAC_VERSION);
+	netdev_info(ndev, "h/w version is 0x%x\n", uid);
+
+	writel(0, priv->base + XGMAC_DMA_INTR_ENA);
+	ndev->irq = platform_get_irq(pdev, 0);
+	if (ndev->irq == -ENXIO) {
+		netdev_err(ndev, "No irq resource\n");
+		ret = ndev->irq;
+		goto err_irq;
+	}
+
+	ret = request_irq(ndev->irq, xgmac_interrupt, 0,
+			  dev_name(&pdev->dev), ndev);
+	if (ret < 0) {
+		netdev_err(ndev, "Could not request irq %d - ret %d)\n",
+			ndev->irq, ret);
+		goto err_irq;
+	}
+
+	priv->pmt_irq = platform_get_irq(pdev, 1);
+	if (priv->pmt_irq == -ENXIO) {
+		netdev_err(ndev, "No pmt irq resource\n");
+		ret = priv->pmt_irq;
+		goto err_pmt_irq;
+	}
+
+	ret = request_irq(priv->pmt_irq, xgmac_pmt_interrupt, 0,
+			  dev_name(&pdev->dev), ndev);
+	if (ret < 0) {
+		netdev_err(ndev, "Could not request irq %d - ret %d)\n",
+			priv->pmt_irq, ret);
+		goto err_pmt_irq;
+	}
+
+	device_set_wakeup_capable(&pdev->dev, 1);
+	if (device_can_wakeup(priv->device))
+		priv->wolopts = WAKE_MAGIC;	/* Magic Frame as default */
+
+	ndev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA;
+	if (readl(priv->base + XGMAC_DMA_HW_FEATURE) & DMA_HW_FEAT_TXCOESEL)
+		ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+				     NETIF_F_RXCSUM;
+	ndev->features |= ndev->hw_features;
+	ndev->priv_flags |= IFF_UNICAST_FLT;
+
+	/* Get the MAC address */
+	xgmac_get_mac_addr(priv->base, ndev->dev_addr, 0);
+	if (!is_valid_ether_addr(ndev->dev_addr))
+		netdev_warn(ndev, "MAC address %pM not valid",
+			 ndev->dev_addr);
+
+	netif_napi_add(ndev, &priv->napi, xgmac_poll, 64);
+	ret = register_netdev(ndev);
+	if (ret)
+		goto err_reg;
+
+	return 0;
+
+err_reg:
+	netif_napi_del(&priv->napi);
+	free_irq(priv->pmt_irq, ndev);
+err_pmt_irq:
+	free_irq(ndev->irq, ndev);
+err_irq:
+	iounmap(priv->base);
+err_io:
+	free_netdev(ndev);
+err_alloc:
+	release_mem_region(res->start, resource_size(res));
+	platform_set_drvdata(pdev, NULL);
+	return ret;
+}
+
+/**
+ * xgmac_dvr_remove
+ * @pdev: platform device pointer
+ * Description: this function resets the TX/RX processes, disables the MAC RX/TX
+ * changes the link status, releases the DMA descriptor rings,
+ * unregisters the MDIO bus and unmaps the allocated memory.
+ */
+static int xgmac_remove(struct platform_device *pdev)
+{
+	struct net_device *ndev = platform_get_drvdata(pdev);
+	struct xgmac_priv *priv = netdev_priv(ndev);
+	struct resource *res;
+
+	xgmac_mac_disable(priv->base);
+
+	/* Free the IRQ lines */
+	free_irq(ndev->irq, ndev);
+	free_irq(priv->pmt_irq, ndev);
+
+	platform_set_drvdata(pdev, NULL);
+	unregister_netdev(ndev);
+	netif_napi_del(&priv->napi);
+
+	iounmap(priv->base);
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	release_mem_region(res->start, resource_size(res));
+
+	free_netdev(ndev);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static void xgmac_pmt(void __iomem *ioaddr, unsigned long mode)
+{
+	unsigned int pmt = 0;
+
+	if (mode & WAKE_MAGIC)
+		pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_MAGIC_PKT;
+	if (mode & WAKE_UCAST)
+		pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_GLBL_UNICAST;
+
+	writel(pmt, ioaddr + XGMAC_PMT);
+}
+
+static int xgmac_suspend(struct device *dev)
+{
+	struct net_device *ndev = platform_get_drvdata(to_platform_device(dev));
+	struct xgmac_priv *priv = netdev_priv(ndev);
+	u32 value;
+
+	if (!ndev || !netif_running(ndev))
+		return 0;
+
+	netif_device_detach(ndev);
+	napi_disable(&priv->napi);
+	writel(0, priv->base + XGMAC_DMA_INTR_ENA);
+
+	if (device_may_wakeup(priv->device)) {
+		/* Stop TX/RX DMA Only */
+		value = readl(priv->base + XGMAC_DMA_CONTROL);
+		value &= ~(DMA_CONTROL_ST | DMA_CONTROL_SR);
+		writel(value, priv->base + XGMAC_DMA_CONTROL);
+
+		xgmac_pmt(priv->base, priv->wolopts);
+	} else
+		xgmac_mac_disable(priv->base);
+
+	return 0;
+}
+
+static int xgmac_resume(struct device *dev)
+{
+	struct net_device *ndev = platform_get_drvdata(to_platform_device(dev));
+	struct xgmac_priv *priv = netdev_priv(ndev);
+	void __iomem *ioaddr = priv->base;
+
+	if (!netif_running(ndev))
+		return 0;
+
+	xgmac_pmt(ioaddr, 0);
+
+	/* Enable the MAC and DMA */
+	xgmac_mac_enable(ioaddr);
+	writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS);
+	writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);
+
+	netif_device_attach(ndev);
+	napi_enable(&priv->napi);
+
+	return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(xgmac_pm_ops, xgmac_suspend, xgmac_resume);
+#define XGMAC_PM_OPS (&xgmac_pm_ops)
+#else
+#define XGMAC_PM_OPS NULL
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct of_device_id xgmac_of_match[] = {
+	{ .compatible = "calxeda,hb-xgmac", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, xgmac_of_match);
+
+static struct platform_driver xgmac_driver = {
+	.driver = {
+		.name = "calxedaxgmac",
+		.of_match_table = xgmac_of_match,
+	},
+	.probe = xgmac_probe,
+	.remove = xgmac_remove,
+	.driver.pm = XGMAC_PM_OPS,
+};
+
+module_platform_driver(xgmac_driver);
+
+MODULE_AUTHOR("Calxeda, Inc.");
+MODULE_DESCRIPTION("Calxeda 10G XGMAC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index ca26d97..1d17c92 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -434,10 +434,10 @@
 {
 	struct adapter *adapter = dev->ml_priv;
 
-	strcpy(info->driver, DRV_NAME);
-	strcpy(info->version, DRV_VERSION);
-	strcpy(info->fw_version, "N/A");
-	strcpy(info->bus_info, pci_name(adapter->pdev));
+	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+	strlcpy(info->bus_info, pci_name(adapter->pdev),
+		sizeof(info->bus_info));
 }
 
 static int get_sset_count(struct net_device *dev, int sset)
@@ -849,7 +849,8 @@
 	return 0;
 }
 
-static u32 t1_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t t1_fix_features(struct net_device *dev,
+	netdev_features_t features)
 {
 	/*
 	 * Since there is no support for separate rx/tx vlan accel
@@ -863,9 +864,9 @@
 	return features;
 }
 
-static int t1_set_features(struct net_device *dev, u32 features)
+static int t1_set_features(struct net_device *dev, netdev_features_t features)
 {
-	u32 changed = dev->features ^ features;
+	netdev_features_t changed = dev->features ^ features;
 	struct adapter *adapter = dev->ml_priv;
 
 	if (changed & NETIF_F_HW_VLAN_RX)
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
index f9b6023..47a8435 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
@@ -742,7 +742,7 @@
 /*
  * Enable/disable VLAN acceleration.
  */
-void t1_vlan_mode(struct adapter *adapter, u32 features)
+void t1_vlan_mode(struct adapter *adapter, netdev_features_t features)
 {
 	struct sge *sge = adapter->sge;
 
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.h b/drivers/net/ethernet/chelsio/cxgb/sge.h
index e03980b..b9bf16b 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.h
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.h
@@ -79,7 +79,7 @@
 int t1_poll(struct napi_struct *, int);
 
 netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev);
-void t1_vlan_mode(struct adapter *adapter, u32 features);
+void t1_vlan_mode(struct adapter *adapter, netdev_features_t features);
 void t1_sge_start(struct sge *);
 void t1_sge_stop(struct sge *);
 int t1_sge_intr_error_handler(struct sge *);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 4d15c8f..857cc25 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -1576,12 +1576,11 @@
 	t3_get_tp_version(adapter, &tp_vers);
 	spin_unlock(&adapter->stats_lock);
 
-	strcpy(info->driver, DRV_NAME);
-	strcpy(info->version, DRV_VERSION);
-	strcpy(info->bus_info, pci_name(adapter->pdev));
-	if (!fw_vers)
-		strcpy(info->fw_version, "N/A");
-	else {
+	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+	strlcpy(info->bus_info, pci_name(adapter->pdev),
+		sizeof(info->bus_info));
+	if (fw_vers)
 		snprintf(info->fw_version, sizeof(info->fw_version),
 			 "%s %u.%u.%u TP %u.%u.%u",
 			 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
@@ -1591,7 +1590,6 @@
 			 G_TP_VERSION_MAJOR(tp_vers),
 			 G_TP_VERSION_MINOR(tp_vers),
 			 G_TP_VERSION_MICRO(tp_vers));
-	}
 }
 
 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
@@ -2531,7 +2529,7 @@
 	}
 }
 
-static void cxgb_vlan_mode(struct net_device *dev, u32 features)
+static void cxgb_vlan_mode(struct net_device *dev, netdev_features_t features)
 {
 	struct port_info *pi = netdev_priv(dev);
 	struct adapter *adapter = pi->adapter;
@@ -2552,7 +2550,8 @@
 	t3_synchronize_rx(adapter, pi);
 }
 
-static u32 cxgb_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t cxgb_fix_features(struct net_device *dev,
+	netdev_features_t features)
 {
 	/*
 	 * Since there is no support for separate rx/tx vlan accel
@@ -2566,9 +2565,9 @@
 	return features;
 }
 
-static int cxgb_set_features(struct net_device *dev, u32 features)
+static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
 {
-	u32 changed = dev->features ^ features;
+	netdev_features_t changed = dev->features ^ features;
 
 	if (changed & NETIF_F_HW_VLAN_RX)
 		cxgb_vlan_mode(dev, features);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
index 90ff131..65e4b28 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
@@ -969,7 +969,7 @@
 	case (NETEVENT_REDIRECT):{
 		struct netevent_redirect *nr = ctx;
 		cxgb_redirect(nr->old, nr->new);
-		cxgb_neigh_update(dst_get_neighbour(nr->new));
+		cxgb_neigh_update(dst_get_neighbour_noref(nr->new));
 		break;
 	}
 	default:
@@ -1072,8 +1072,11 @@
 
 static void cxgb_neigh_update(struct neighbour *neigh)
 {
-	struct net_device *dev = neigh->dev;
+	struct net_device *dev;
 
+	if (!neigh)
+		return;
+	dev = neigh->dev;
 	if (dev && (is_offloading(dev))) {
 		struct t3cdev *tdev = dev2t3cdev(dev);
 
@@ -1107,6 +1110,7 @@
 static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
 {
 	struct net_device *olddev, *newdev;
+	struct neighbour *n;
 	struct tid_info *ti;
 	struct t3cdev *tdev;
 	u32 tid;
@@ -1114,8 +1118,16 @@
 	struct l2t_entry *e;
 	struct t3c_tid_entry *te;
 
-	olddev = dst_get_neighbour(old)->dev;
-	newdev = dst_get_neighbour(new)->dev;
+	n = dst_get_neighbour_noref(old);
+	if (!n)
+		return;
+	olddev = n->dev;
+
+	n = dst_get_neighbour_noref(new);
+	if (!n)
+		return;
+	newdev = n->dev;
+
 	if (!is_offloading(olddev))
 		return;
 	if (!is_offloading(newdev)) {
@@ -1132,7 +1144,7 @@
 	}
 
 	/* Add new L2T entry */
-	e = t3_l2t_get(tdev, dst_get_neighbour(new), newdev);
+	e = t3_l2t_get(tdev, new, newdev);
 	if (!e) {
 		printk(KERN_ERR "%s: couldn't allocate new l2t entry!\n",
 		       __func__);
@@ -1301,7 +1313,7 @@
 
 out_free_l2t:
 	t3_free_l2t(L2DATA(dev));
-	rcu_assign_pointer(dev->l2opt, NULL);
+	RCU_INIT_POINTER(dev->l2opt, NULL);
 out_free:
 	kfree(t);
 	return err;
@@ -1329,7 +1341,7 @@
 	rcu_read_lock();
 	d = L2DATA(tdev);
 	rcu_read_unlock();
-	rcu_assign_pointer(tdev->l2opt, NULL);
+	RCU_INIT_POINTER(tdev->l2opt, NULL);
 	call_rcu(&d->rcu_head, clean_l2_data);
 	if (t->nofail_skb)
 		kfree_skb(t->nofail_skb);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.c b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
index 70fec8b..3fa3c88 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
@@ -298,18 +298,31 @@
 	spin_unlock(&e->lock);
 }
 
-struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
+struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst,
 			     struct net_device *dev)
 {
 	struct l2t_entry *e = NULL;
+	struct neighbour *neigh;
+	struct port_info *p;
 	struct l2t_data *d;
 	int hash;
-	u32 addr = *(u32 *) neigh->primary_key;
-	int ifidx = neigh->dev->ifindex;
-	struct port_info *p = netdev_priv(dev);
-	int smt_idx = p->port_id;
+	u32 addr;
+	int ifidx;
+	int smt_idx;
 
 	rcu_read_lock();
+	neigh = dst_get_neighbour_noref(dst);
+	if (!neigh)
+		goto done_rcu;
+
+	addr = *(u32 *) neigh->primary_key;
+	ifidx = neigh->dev->ifindex;
+
+	if (!dev)
+		dev = neigh->dev;
+	p = netdev_priv(dev);
+	smt_idx = p->port_id;
+
 	d = L2DATA(cdev);
 	if (!d)
 		goto done_rcu;
@@ -323,7 +336,7 @@
 			l2t_hold(d, e);
 			if (atomic_read(&e->refcnt) == 1)
 				reuse_entry(e, neigh);
-			goto done;
+			goto done_unlock;
 		}
 
 	/* Need to allocate a new entry */
@@ -344,7 +357,7 @@
 			e->vlan = VLAN_NONE;
 		spin_unlock(&e->lock);
 	}
-done:
+done_unlock:
 	write_unlock_bh(&d->lock);
 done_rcu:
 	rcu_read_unlock();
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
index c5f5479..c4e8643 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
@@ -109,7 +109,7 @@
 
 void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e);
 void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh);
-struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
+struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst,
 			     struct net_device *dev);
 int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
 		     struct l2t_entry *e);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 4c8f42a..e83d12c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -243,7 +243,7 @@
 MODULE_PARM_DESC(intr_cnt,
 		 "thresholds 1..3 for queue interrupt packet counters");
 
-static int vf_acls;
+static bool vf_acls;
 
 #ifdef CONFIG_PCI_IOV
 module_param(vf_acls, bool, 0644);
@@ -1002,13 +1002,12 @@
 {
 	struct adapter *adapter = netdev2adap(dev);
 
-	strcpy(info->driver, KBUILD_MODNAME);
-	strcpy(info->version, DRV_VERSION);
-	strcpy(info->bus_info, pci_name(adapter->pdev));
+	strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+	strlcpy(info->bus_info, pci_name(adapter->pdev),
+		sizeof(info->bus_info));
 
-	if (!adapter->params.fw_vers)
-		strcpy(info->fw_version, "N/A");
-	else
+	if (adapter->params.fw_vers)
 		snprintf(info->fw_version, sizeof(info->fw_version),
 			"%u.%u.%u.%u, TP %u.%u.%u.%u",
 			FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
@@ -1855,10 +1854,10 @@
 	return err;
 }
 
-static int cxgb_set_features(struct net_device *dev, u32 features)
+static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
 {
 	const struct port_info *pi = netdev_priv(dev);
-	u32 changed = dev->features ^ features;
+	netdev_features_t changed = dev->features ^ features;
 	int err;
 
 	if (!(changed & NETIF_F_HW_VLAN_RX))
@@ -1872,30 +1871,30 @@
 	return err;
 }
 
-static int get_rss_table(struct net_device *dev, struct ethtool_rxfh_indir *p)
+static u32 get_rss_table_size(struct net_device *dev)
 {
 	const struct port_info *pi = netdev_priv(dev);
-	unsigned int n = min_t(unsigned int, p->size, pi->rss_size);
 
-	p->size = pi->rss_size;
+	return pi->rss_size;
+}
+
+static int get_rss_table(struct net_device *dev, u32 *p)
+{
+	const struct port_info *pi = netdev_priv(dev);
+	unsigned int n = pi->rss_size;
+
 	while (n--)
-		p->ring_index[n] = pi->rss[n];
+		p[n] = pi->rss[n];
 	return 0;
 }
 
-static int set_rss_table(struct net_device *dev,
-			 const struct ethtool_rxfh_indir *p)
+static int set_rss_table(struct net_device *dev, const u32 *p)
 {
 	unsigned int i;
 	struct port_info *pi = netdev_priv(dev);
 
-	if (p->size != pi->rss_size)
-		return -EINVAL;
-	for (i = 0; i < p->size; i++)
-		if (p->ring_index[i] >= pi->nqsets)
-			return -EINVAL;
-	for (i = 0; i < p->size; i++)
-		pi->rss[i] = p->ring_index[i];
+	for (i = 0; i < pi->rss_size; i++)
+		pi->rss[i] = p[i];
 	if (pi->adapter->flags & FULL_INIT_DONE)
 		return write_rss(pi, pi->rss);
 	return 0;
@@ -1964,7 +1963,7 @@
 	return -EOPNOTSUPP;
 }
 
-static struct ethtool_ops cxgb_ethtool_ops = {
+static const struct ethtool_ops cxgb_ethtool_ops = {
 	.get_settings      = get_settings,
 	.set_settings      = set_settings,
 	.get_drvinfo       = get_drvinfo,
@@ -1990,6 +1989,7 @@
 	.get_wol           = get_wol,
 	.set_wol           = set_wol,
 	.get_rxnfc         = get_rxnfc,
+	.get_rxfh_indir_size = get_rss_table_size,
 	.get_rxfh_indir    = get_rss_table,
 	.set_rxfh_indir    = set_rss_table,
 	.flash_device      = set_flash,
@@ -3449,7 +3449,7 @@
 		if (!pi->rss)
 			return -ENOMEM;
 		for (j = 0; j < pi->rss_size; j++)
-			pi->rss[j] = j % pi->nqsets;
+			pi->rss[j] = ethtool_rxfh_indir_default(j, pi->nqsets);
 	}
 	return 0;
 }
@@ -3537,7 +3537,7 @@
 {
 	int func, i, err;
 	struct port_info *pi;
-	unsigned int highdma = 0;
+	bool highdma = false;
 	struct adapter *adapter = NULL;
 
 	printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
@@ -3563,7 +3563,7 @@
 	}
 
 	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
-		highdma = NETIF_F_HIGHDMA;
+		highdma = true;
 		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
 		if (err) {
 			dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
@@ -3637,7 +3637,9 @@
 			NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
 			NETIF_F_RXCSUM | NETIF_F_RXHASH |
 			NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
-		netdev->features |= netdev->hw_features | highdma;
+		if (highdma)
+			netdev->hw_features |= NETIF_F_HIGHDMA;
+		netdev->features |= netdev->hw_features;
 		netdev->vlan_features = netdev->features & VLAN_FEAT;
 
 		netdev->priv_flags |= IFF_UNICAST_FLT;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 140254c..2dae795 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -491,7 +491,7 @@
 	__be64 *d = &q->desc[q->pidx];
 	struct rx_sw_desc *sd = &q->sdesc[q->pidx];
 
-	gfp |= __GFP_NOWARN;         /* failures are expected */
+	gfp |= __GFP_NOWARN | __GFP_COLD;
 
 #if FL_PG_ORDER > 0
 	/*
@@ -528,7 +528,7 @@
 #endif
 
 	while (n--) {
-		pg = __netdev_alloc_page(adap->port[0], gfp);
+		pg = alloc_page(gfp);
 		if (unlikely(!pg)) {
 			q->alloc_failed++;
 			break;
@@ -537,7 +537,7 @@
 		mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE,
 				       PCI_DMA_FROMDEVICE);
 		if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
-			netdev_free_page(adap->port[0], pg);
+			put_page(pg);
 			goto out;
 		}
 		*d++ = cpu_to_be64(mapping);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index da9072b..e53365a7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -1092,7 +1092,8 @@
 	return ret;
 }
 
-static u32 cxgb4vf_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t cxgb4vf_fix_features(struct net_device *dev,
+	netdev_features_t features)
 {
 	/*
 	 * Since there is no support for separate rx/tx vlan accel
@@ -1106,10 +1107,11 @@
 	return features;
 }
 
-static int cxgb4vf_set_features(struct net_device *dev, u32 features)
+static int cxgb4vf_set_features(struct net_device *dev,
+	netdev_features_t features)
 {
 	struct port_info *pi = netdev_priv(dev);
-	u32 changed = dev->features ^ features;
+	netdev_features_t changed = dev->features ^ features;
 
 	if (changed & NETIF_F_HW_VLAN_RX)
 		t4vf_set_rxmode(pi->adapter, pi->viid, -1, -1, -1, -1,
@@ -1203,9 +1205,10 @@
 {
 	struct adapter *adapter = netdev2adap(dev);
 
-	strcpy(drvinfo->driver, KBUILD_MODNAME);
-	strcpy(drvinfo->version, DRV_VERSION);
-	strcpy(drvinfo->bus_info, pci_name(to_pci_dev(dev->dev.parent)));
+	strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
+	strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
+	strlcpy(drvinfo->bus_info, pci_name(to_pci_dev(dev->dev.parent)),
+		sizeof(drvinfo->bus_info));
 	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
 		 "%u.%u.%u.%u, TP %u.%u.%u.%u",
 		 FW_HDR_FW_VER_MAJOR_GET(adapter->params.dev.fwrev),
@@ -1561,7 +1564,7 @@
  */
 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
 
-static struct ethtool_ops cxgb4vf_ethtool_ops = {
+static const struct ethtool_ops cxgb4vf_ethtool_ops = {
 	.get_settings		= cxgb4vf_get_settings,
 	.get_drvinfo		= cxgb4vf_get_drvinfo,
 	.get_msglevel		= cxgb4vf_get_msglevel,
@@ -2000,7 +2003,7 @@
  */
 struct cxgb4vf_debugfs_entry {
 	const char *name;		/* name of debugfs node */
-	mode_t mode;			/* file system mode */
+	umode_t mode;			/* file system mode */
 	const struct file_operations *fops;
 };
 
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 8d5d55a..0bd585b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -653,8 +653,7 @@
 
 alloc_small_pages:
 	while (n--) {
-		page = __netdev_alloc_page(adapter->port[0],
-					   gfp | __GFP_NOWARN);
+		page = alloc_page(gfp | __GFP_NOWARN | __GFP_COLD);
 		if (unlikely(!page)) {
 			fl->alloc_failed++;
 			break;
@@ -664,7 +663,7 @@
 		dma_addr = dma_map_page(adapter->pdev_dev, page, 0, PAGE_SIZE,
 				       PCI_DMA_FROMDEVICE);
 		if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) {
-			netdev_free_page(adapter->port[0], page);
+			put_page(page);
 			break;
 		}
 		*d++ = cpu_to_be64(dma_addr);
@@ -684,7 +683,7 @@
 	/*
 	 * Update our accounting state to incorporate the new Free List
 	 * buffers, tell the hardware about them and return the number of
-	 * bufers which we were able to allocate.
+	 * buffers which we were able to allocate.
 	 */
 	cred = fl->avail - cred;
 	fl->pend_cred += cred;
diff --git a/drivers/net/ethernet/cisco/enic/enic_dev.c b/drivers/net/ethernet/cisco/enic/enic_dev.c
index fd6247b..bf0fc56 100644
--- a/drivers/net/ethernet/cisco/enic/enic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/enic_dev.c
@@ -212,23 +212,29 @@
 }
 
 /* rtnl lock is held */
-void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+int enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
 {
 	struct enic *enic = netdev_priv(netdev);
+	int err;
 
 	spin_lock(&enic->devcmd_lock);
-	enic_add_vlan(enic, vid);
+	err = enic_add_vlan(enic, vid);
 	spin_unlock(&enic->devcmd_lock);
+
+	return err;
 }
 
 /* rtnl lock is held */
-void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+int enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
 {
 	struct enic *enic = netdev_priv(netdev);
+	int err;
 
 	spin_lock(&enic->devcmd_lock);
-	enic_del_vlan(enic, vid);
+	err = enic_del_vlan(enic, vid);
 	spin_unlock(&enic->devcmd_lock);
+
+	return err;
 }
 
 int enic_dev_enable2(struct enic *enic, int active)
diff --git a/drivers/net/ethernet/cisco/enic/enic_dev.h b/drivers/net/ethernet/cisco/enic/enic_dev.h
index 1f83a47..da1cba3 100644
--- a/drivers/net/ethernet/cisco/enic/enic_dev.h
+++ b/drivers/net/ethernet/cisco/enic/enic_dev.h
@@ -46,8 +46,8 @@
 	int broadcast, int promisc, int allmulti);
 int enic_dev_add_addr(struct enic *enic, u8 *addr);
 int enic_dev_del_addr(struct enic *enic, u8 *addr);
-void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
-void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
+int enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
+int enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
 int enic_dev_notify_unset(struct enic *enic);
 int enic_dev_hang_notify(struct enic *enic);
 int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic);
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index c3786fd..2fd9db4 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -217,11 +217,11 @@
 
 	enic_dev_fw_info(enic, &fw_info);
 
-	strncpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
-	strncpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
-	strncpy(drvinfo->fw_version, fw_info->fw_version,
+	strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+	strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
+	strlcpy(drvinfo->fw_version, fw_info->fw_version,
 		sizeof(drvinfo->fw_version));
-	strncpy(drvinfo->bus_info, pci_name(enic->pdev),
+	strlcpy(drvinfo->bus_info, pci_name(enic->pdev),
 		sizeof(drvinfo->bus_info));
 }
 
@@ -2379,7 +2379,7 @@
 
 #endif
 	/* Allocate structure for port profiles */
-	enic->pp = kzalloc(num_pps * sizeof(*enic->pp), GFP_KERNEL);
+	enic->pp = kcalloc(num_pps, sizeof(*enic->pp), GFP_KERNEL);
 	if (!enic->pp) {
 		pr_err("port profile alloc failed, aborting\n");
 		err = -ENOMEM;
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 2a22f52..f801754 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -474,10 +474,11 @@
 	return mii_nway_restart(&dm->mii);
 }
 
-static int dm9000_set_features(struct net_device *dev, u32 features)
+static int dm9000_set_features(struct net_device *dev,
+	netdev_features_t features)
 {
 	board_info_t *dm = to_dm9000_board(dev);
-	u32 changed = dev->features ^ features;
+	netdev_features_t changed = dev->features ^ features;
 	unsigned long flags;
 
 	if (!(changed & NETIF_F_RXCSUM))
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index 1427739..1eb46a0 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -1598,9 +1598,9 @@
 {
 	struct de_private *de = netdev_priv(dev);
 
-	strcpy (info->driver, DRV_NAME);
-	strcpy (info->version, DRV_VERSION);
-	strcpy (info->bus_info, pci_name(de->pdev));
+	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+	strlcpy(info->bus_info, pci_name(de->pdev), sizeof(info->bus_info));
 	info->eedump_len = DE_EEPROM_SIZE;
 }
 
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index 871bcaa..4d71f5a 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -2127,14 +2127,9 @@
     u_long iobase = 0;                     /* Clear upper 32 bits in Alphas */
     int i, j;
     struct de4x5_private *lp = netdev_priv(dev);
-    struct list_head *walk;
+    struct pci_dev *this_dev;
 
-    list_for_each(walk, &pdev->bus_list) {
-	struct pci_dev *this_dev = pci_dev_b(walk);
-
-	/* Skip the pci_bus list entry */
-	if (list_entry(walk, struct pci_bus, devices) == pdev->bus) continue;
-
+    list_for_each_entry(this_dev, &pdev->bus->devices, bus_list) {
 	vendor = this_dev->vendor;
 	device = this_dev->device << 8;
 	if (!(is_DC21040 || is_DC21041 || is_DC21140 || is_DC2114x)) continue;
@@ -5196,7 +5191,7 @@
     struct de4x5_private *lp = netdev_priv(dev);
     char *p, *q, t;
 
-    lp->params.fdx = 0;
+    lp->params.fdx = false;
     lp->params.autosense = AUTO;
 
     if (args == NULL) return;
@@ -5206,7 +5201,7 @@
 	t = *q;
 	*q = '\0';
 
-	if (strstr(p, "fdx") || strstr(p, "FDX")) lp->params.fdx = 1;
+	if (strstr(p, "fdx") || strstr(p, "FDX")) lp->params.fdx = true;
 
 	if (strstr(p, "autosense") || strstr(p, "AUTOSENSE")) {
 	    if (strstr(p, "TP")) {
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index 17b11ee..51f7542 100644
--- a/drivers/net/ethernet/dec/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -1085,10 +1085,11 @@
 {
 	struct dmfe_board_info *np = netdev_priv(dev);
 
-	strcpy(info->driver, DRV_NAME);
-	strcpy(info->version, DRV_VERSION);
+	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
 	if (np->pdev)
-		strcpy(info->bus_info, pci_name(np->pdev));
+		strlcpy(info->bus_info, pci_name(np->pdev),
+			sizeof(info->bus_info));
 	else
 		sprintf(info->bus_info, "EISA 0x%lx %d",
 			dev->base_addr, dev->irq);
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index 9656dd0..4eb0d761 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -871,9 +871,9 @@
 static void tulip_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 {
 	struct tulip_private *np = netdev_priv(dev);
-	strcpy(info->driver, DRV_NAME);
-	strcpy(info->version, DRV_VERSION);
-	strcpy(info->bus_info, pci_name(np->pdev));
+	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+	strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
 }
 
 
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index 7a44a7a..48b0b65 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -960,10 +960,11 @@
 {
 	struct uli526x_board_info *np = netdev_priv(dev);
 
-	strcpy(info->driver, DRV_NAME);
-	strcpy(info->version, DRV_VERSION);
+	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
 	if (np->pdev)
-		strcpy(info->bus_info, pci_name(np->pdev));
+		strlcpy(info->bus_info, pci_name(np->pdev),
+			sizeof(info->bus_info));
 	else
 		sprintf(info->bus_info, "EISA 0x%lx %d",
 			dev->base_addr, dev->irq);
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index 4d01219..52da7b2 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -1390,9 +1390,9 @@
 {
 	struct netdev_private *np = netdev_priv(dev);
 
-	strcpy (info->driver, DRV_NAME);
-	strcpy (info->version, DRV_VERSION);
-	strcpy (info->bus_info, pci_name(np->pci_dev));
+	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+	strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
 }
 
 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
diff --git a/drivers/net/ethernet/dlink/de600.c b/drivers/net/ethernet/dlink/de600.c
index 23a6539..c24fab1 100644
--- a/drivers/net/ethernet/dlink/de600.c
+++ b/drivers/net/ethernet/dlink/de600.c
@@ -59,7 +59,7 @@
 
 #include "de600.h"
 
-static unsigned int check_lost = 1;
+static bool check_lost = true;
 module_param(check_lost, bool, 0);
 MODULE_PARM_DESC(check_lost, "If set then check for unplugged de600");
 
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index dcd7f7a..28a3a9b 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -1634,9 +1634,9 @@
 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 {
 	struct netdev_private *np = netdev_priv(dev);
-	strcpy(info->driver, DRV_NAME);
-	strcpy(info->version, DRV_VERSION);
-	strcpy(info->bus_info, pci_name(np->pci_dev));
+	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+	strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
 }
 
 static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index c1063d1..ce88c0f 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -804,9 +804,9 @@
 static void dnet_get_drvinfo(struct net_device *dev,
 			     struct ethtool_drvinfo *info)
 {
-	strcpy(info->driver, DRV_NAME);
-	strcpy(info->version, DRV_VERSION);
-	strcpy(info->bus_info, "0");
+	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+	strlcpy(info->bus_info, "0", sizeof(info->bus_info));
 }
 
 static const struct ethtool_ops dnet_ethtool_ops = {
@@ -977,18 +977,7 @@
 	},
 };
 
-static int __init dnet_init(void)
-{
-	return platform_driver_register(&dnet_driver);
-}
-
-static void __exit dnet_exit(void)
-{
-	platform_driver_unregister(&dnet_driver);
-}
-
-module_init(dnet_init);
-module_exit(dnet_exit);
+module_platform_driver(dnet_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Dave DNET Ethernet driver");
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 644e8fe..cbdec25 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -40,6 +40,7 @@
 #define OC_NAME			"Emulex OneConnect 10Gbps NIC"
 #define OC_NAME_BE		OC_NAME	"(be3)"
 #define OC_NAME_LANCER		OC_NAME "(Lancer)"
+#define OC_NAME_SH		OC_NAME "(Skyhawk)"
 #define DRV_DESC		"ServerEngines BladeEngine 10Gbps NIC Driver"
 
 #define BE_VENDOR_ID 		0x19a2
@@ -50,6 +51,7 @@
 #define OC_DEVICE_ID2		0x710	/* Device Id for BE3 cards */
 #define OC_DEVICE_ID3		0xe220	/* Device id for Lancer cards */
 #define OC_DEVICE_ID4           0xe228   /* Device id for VF in Lancer */
+#define OC_DEVICE_ID5		0x720	/* Device Id for Skyhawk cards */
 
 static inline char *nic_name(struct pci_dev *pdev)
 {
@@ -63,6 +65,8 @@
 		return OC_NAME_LANCER;
 	case BE_DEVICE_ID2:
 		return BE3_NAME;
+	case OC_DEVICE_ID5:
+		return OC_NAME_SH;
 	default:
 		return BE_NAME;
 	}
@@ -288,14 +292,14 @@
 };
 
 struct be_vf_cfg {
-	unsigned char vf_mac_addr[ETH_ALEN];
-	u32 vf_if_handle;
-	u32 vf_pmac_id;
-	u16 vf_vlan_tag;
-	u32 vf_tx_rate;
+	unsigned char mac_addr[ETH_ALEN];
+	int if_handle;
+	int pmac_id;
+	u16 vlan_tag;
+	u32 tx_rate;
 };
 
-#define BE_INVALID_PMAC_ID		0xffffffff
+#define BE_FLAGS_LINK_STATUS_INIT		1
 
 struct be_adapter {
 	struct pci_dev *pdev;
@@ -345,13 +349,16 @@
 	struct delayed_work work;
 	u16 work_counter;
 
+	u32 flags;
 	/* Ethtool knobs and info */
 	char fw_ver[FW_VER_LEN];
-	u32 if_handle;		/* Used to configure filtering */
+	int if_handle;		/* Used to configure filtering */
 	u32 pmac_id;		/* MAC addr handle used by BE card */
 	u32 beacon_state;	/* for set_phys_id */
 
 	bool eeh_err;
+	bool ue_detected;
+	bool fw_timeout;
 	u32 port_num;
 	bool promiscuous;
 	bool wol;
@@ -359,7 +366,6 @@
 	u32 function_caps;
 	u32 rx_fc;		/* Rx flow control */
 	u32 tx_fc;		/* Tx flow control */
-	bool ue_detected;
 	bool stats_cmd_sent;
 	int link_speed;
 	u8 port_type;
@@ -369,16 +375,20 @@
 	u32 flash_status;
 	struct completion flash_compl;
 
-	bool be3_native;
-	bool sriov_enabled;
-	struct be_vf_cfg *vf_cfg;
+	u32 num_vfs;
 	u8 is_virtfn;
+	struct be_vf_cfg *vf_cfg;
+	bool be3_native;
 	u32 sli_family;
 	u8 hba_port_num;
 	u16 pvid;
 };
 
 #define be_physfn(adapter) (!adapter->is_virtfn)
+#define	sriov_enabled(adapter)		(adapter->num_vfs > 0)
+#define for_all_vfs(adapter, vf_cfg, i)					\
+	for (i = 0, vf_cfg = &adapter->vf_cfg[i]; i < adapter->num_vfs;	\
+		i++, vf_cfg++)
 
 /* BladeEngine Generation numbers */
 #define BE_GEN2 2
@@ -524,9 +534,14 @@
 	return adapter->num_rx_qs > 1;
 }
 
+static inline bool be_error(struct be_adapter *adapter)
+{
+	return adapter->eeh_err || adapter->ue_detected || adapter->fw_timeout;
+}
+
 extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
 		u16 num_popped);
-extern void be_link_status_update(struct be_adapter *adapter, u32 link_status);
+extern void be_link_status_update(struct be_adapter *adapter, u8 link_status);
 extern void be_parse_stats(struct be_adapter *adapter);
 extern int be_load_fw(struct be_adapter *adapter, u8 *func);
 #endif				/* BE_H */
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 2c7b366..0fcb456 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -31,11 +31,8 @@
 	struct be_queue_info *mccq = &adapter->mcc_obj.q;
 	u32 val = 0;
 
-	if (adapter->eeh_err) {
-		dev_info(&adapter->pdev->dev,
-			"Error in Card Detected! Cannot issue commands\n");
+	if (be_error(adapter))
 		return;
-	}
 
 	val |= mccq->id & DB_MCCQ_RING_ID_MASK;
 	val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
@@ -128,7 +125,14 @@
 static void be_async_link_state_process(struct be_adapter *adapter,
 		struct be_async_event_link_state *evt)
 {
-	be_link_status_update(adapter, evt->port_link_status);
+	/* When link status changes, link speed must be re-queried from FW */
+	adapter->link_speed = -1;
+
+	/* For the initial link status do not rely on the ASYNC event as
+	 * it may not be received in some cases.
+	 */
+	if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT)
+		be_link_status_update(adapter, evt->port_link_status);
 }
 
 /* Grp5 CoS Priority evt */
@@ -266,10 +270,10 @@
 	int i, num, status = 0;
 	struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
 
-	if (adapter->eeh_err)
-		return -EIO;
-
 	for (i = 0; i < mcc_timeout; i++) {
+		if (be_error(adapter))
+			return -EIO;
+
 		num = be_process_mcc(adapter, &status);
 		if (num)
 			be_cq_notify(adapter, mcc_obj->cq.id,
@@ -280,7 +284,8 @@
 		udelay(100);
 	}
 	if (i == mcc_timeout) {
-		dev_err(&adapter->pdev->dev, "mccq poll timed out\n");
+		dev_err(&adapter->pdev->dev, "FW not responding\n");
+		adapter->fw_timeout = true;
 		return -1;
 	}
 	return status;
@@ -298,26 +303,21 @@
 	int msecs = 0;
 	u32 ready;
 
-	if (adapter->eeh_err) {
-		dev_err(&adapter->pdev->dev,
-			"Error detected in card.Cannot issue commands\n");
-		return -EIO;
-	}
-
 	do {
+		if (be_error(adapter))
+			return -EIO;
+
 		ready = ioread32(db);
-		if (ready == 0xffffffff) {
-			dev_err(&adapter->pdev->dev,
-				"pci slot disconnected\n");
+		if (ready == 0xffffffff)
 			return -1;
-		}
 
 		ready &= MPU_MAILBOX_DB_RDY_MASK;
 		if (ready)
 			break;
 
 		if (msecs > 4000) {
-			dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
+			dev_err(&adapter->pdev->dev, "FW not responding\n");
+			adapter->fw_timeout = true;
 			be_detect_dump_ue(adapter);
 			return -1;
 		}
@@ -555,9 +555,6 @@
 	u8 *wrb;
 	int status;
 
-	if (adapter->eeh_err)
-		return -EIO;
-
 	if (mutex_lock_interruptible(&adapter->mbox_lock))
 		return -1;
 
@@ -619,7 +616,7 @@
 
 /* Use MCC */
 int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
-			u8 type, bool permanent, u32 if_handle)
+			u8 type, bool permanent, u32 if_handle, u32 pmac_id)
 {
 	struct be_mcc_wrb *wrb;
 	struct be_cmd_req_mac_query *req;
@@ -641,6 +638,7 @@
 		req->permanent = 1;
 	} else {
 		req->if_id = cpu_to_le16((u16) if_handle);
+		req->pmac_id = cpu_to_le32(pmac_id);
 		req->permanent = 0;
 	}
 
@@ -695,12 +693,15 @@
 }
 
 /* Uses synchronous MCCQ */
-int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id, u32 dom)
+int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
 {
 	struct be_mcc_wrb *wrb;
 	struct be_cmd_req_pmac_del *req;
 	int status;
 
+	if (pmac_id == -1)
+		return 0;
+
 	spin_lock_bh(&adapter->mcc_lock);
 
 	wrb = wrb_from_mccq(adapter);
@@ -923,10 +924,14 @@
 	void *ctxt;
 	int status;
 
-	if (mutex_lock_interruptible(&adapter->mbox_lock))
-		return -1;
+	spin_lock_bh(&adapter->mcc_lock);
 
-	wrb = wrb_from_mbox(adapter);
+	wrb = wrb_from_mccq(adapter);
+	if (!wrb) {
+		status = -EBUSY;
+		goto err;
+	}
+
 	req = embedded_payload(wrb);
 	ctxt = &req->context;
 
@@ -952,14 +957,15 @@
 
 	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
 
-	status = be_mbox_notify_wait(adapter);
+	status = be_mcc_notify_wait(adapter);
 	if (!status) {
 		struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
 		txq->id = le16_to_cpu(resp->cid);
 		txq->created = true;
 	}
 
-	mutex_unlock(&adapter->mbox_lock);
+err:
+	spin_unlock_bh(&adapter->mcc_lock);
 
 	return status;
 }
@@ -1018,9 +1024,6 @@
 	u8 subsys = 0, opcode = 0;
 	int status;
 
-	if (adapter->eeh_err)
-		return -EIO;
-
 	if (mutex_lock_interruptible(&adapter->mbox_lock))
 		return -1;
 
@@ -1136,16 +1139,13 @@
 }
 
 /* Uses MCCQ */
-int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id, u32 domain)
+int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain)
 {
 	struct be_mcc_wrb *wrb;
 	struct be_cmd_req_if_destroy *req;
 	int status;
 
-	if (adapter->eeh_err)
-		return -EIO;
-
-	if (!interface_id)
+	if (interface_id == -1)
 		return 0;
 
 	spin_lock_bh(&adapter->mcc_lock);
@@ -1239,7 +1239,7 @@
 
 /* Uses synchronous mcc */
 int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed,
-			u16 *link_speed, u32 dom)
+			     u16 *link_speed, u8 *link_status, u32 dom)
 {
 	struct be_mcc_wrb *wrb;
 	struct be_cmd_req_link_status *req;
@@ -1247,6 +1247,9 @@
 
 	spin_lock_bh(&adapter->mcc_lock);
 
+	if (link_status)
+		*link_status = LINK_DOWN;
+
 	wrb = wrb_from_mccq(adapter);
 	if (!wrb) {
 		status = -EBUSY;
@@ -1254,6 +1257,9 @@
 	}
 	req = embedded_payload(wrb);
 
+	if (adapter->generation == BE_GEN3 || lancer_chip(adapter))
+		req->hdr.version = 1;
+
 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
 		OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req), wrb, NULL);
 
@@ -1261,10 +1267,13 @@
 	if (!status) {
 		struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
 		if (resp->mac_speed != PHY_LINK_SPEED_ZERO) {
-			*link_speed = le16_to_cpu(resp->link_speed);
+			if (link_speed)
+				*link_speed = le16_to_cpu(resp->link_speed);
 			if (mac_speed)
 				*mac_speed = resp->mac_speed;
 		}
+		if (link_status)
+			*link_status = resp->logical_link_status;
 	}
 
 err:
@@ -1673,8 +1682,9 @@
 {
 	struct be_mcc_wrb *wrb;
 	struct be_cmd_req_rss_config *req;
-	u32 myhash[10] = {0x0123, 0x4567, 0x89AB, 0xCDEF, 0x01EF,
-			0x0123, 0x4567, 0x89AB, 0xCDEF, 0x01EF};
+	u32 myhash[10] = {0x15d43fa5, 0x2534685a, 0x5f87693a, 0x5668494e,
+			0x33cf6a53, 0x383334c6, 0x76ac4257, 0x59b242b2,
+			0x3ea83c02, 0x4a110304};
 	int status;
 
 	if (mutex_lock_interruptible(&adapter->mbox_lock))
@@ -1836,6 +1846,53 @@
 	return status;
 }
 
+int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
+		u32 data_size, u32 data_offset, const char *obj_name,
+		u32 *data_read, u32 *eof, u8 *addn_status)
+{
+	struct be_mcc_wrb *wrb;
+	struct lancer_cmd_req_read_object *req;
+	struct lancer_cmd_resp_read_object *resp;
+	int status;
+
+	spin_lock_bh(&adapter->mcc_lock);
+
+	wrb = wrb_from_mccq(adapter);
+	if (!wrb) {
+		status = -EBUSY;
+		goto err_unlock;
+	}
+
+	req = embedded_payload(wrb);
+
+	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+			OPCODE_COMMON_READ_OBJECT,
+			sizeof(struct lancer_cmd_req_read_object), wrb,
+			NULL);
+
+	req->desired_read_len = cpu_to_le32(data_size);
+	req->read_offset = cpu_to_le32(data_offset);
+	strcpy(req->object_name, obj_name);
+	req->descriptor_count = cpu_to_le32(1);
+	req->buf_len = cpu_to_le32(data_size);
+	req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF));
+	req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma));
+
+	status = be_mcc_notify_wait(adapter);
+
+	resp = embedded_payload(wrb);
+	if (!status) {
+		*data_read = le32_to_cpu(resp->actual_read_len);
+		*eof = le32_to_cpu(resp->eof);
+	} else {
+		*addn_status = resp->additional_status;
+	}
+
+err_unlock:
+	spin_unlock_bh(&adapter->mcc_lock);
+	return status;
+}
+
 int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
 			u32 flash_type, u32 flash_opcode, u32 buf_size)
 {
@@ -2238,3 +2295,99 @@
 	mutex_unlock(&adapter->mbox_lock);
 	return status;
 }
+
+/* Uses synchronous MCCQ */
+int be_cmd_get_mac_from_list(struct be_adapter *adapter, u32 domain,
+							u32 *pmac_id)
+{
+	struct be_mcc_wrb *wrb;
+	struct be_cmd_req_get_mac_list *req;
+	int status;
+	int mac_count;
+
+	spin_lock_bh(&adapter->mcc_lock);
+
+	wrb = wrb_from_mccq(adapter);
+	if (!wrb) {
+		status = -EBUSY;
+		goto err;
+	}
+	req = embedded_payload(wrb);
+
+	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+				OPCODE_COMMON_GET_MAC_LIST, sizeof(*req),
+				wrb, NULL);
+
+	req->hdr.domain = domain;
+
+	status = be_mcc_notify_wait(adapter);
+	if (!status) {
+		struct be_cmd_resp_get_mac_list *resp =
+						embedded_payload(wrb);
+		int i;
+		u8 *ctxt = &resp->context[0][0];
+		status = -EIO;
+		mac_count = resp->mac_count;
+		be_dws_le_to_cpu(&resp->context, sizeof(resp->context));
+		for (i = 0; i < mac_count; i++) {
+			if (!AMAP_GET_BITS(struct amap_get_mac_list_context,
+					   act, ctxt)) {
+				*pmac_id = AMAP_GET_BITS
+					(struct amap_get_mac_list_context,
+					 macid, ctxt);
+				status = 0;
+				break;
+			}
+			ctxt += sizeof(struct amap_get_mac_list_context) / 8;
+		}
+	}
+
+err:
+	spin_unlock_bh(&adapter->mcc_lock);
+	return status;
+}
+
+/* Uses synchronous MCCQ */
+int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
+			u8 mac_count, u32 domain)
+{
+	struct be_mcc_wrb *wrb;
+	struct be_cmd_req_set_mac_list *req;
+	int status;
+	struct be_dma_mem cmd;
+
+	memset(&cmd, 0, sizeof(struct be_dma_mem));
+	cmd.size = sizeof(struct be_cmd_req_set_mac_list);
+	cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
+			&cmd.dma, GFP_KERNEL);
+	if (!cmd.va) {
+		dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
+		return -ENOMEM;
+	}
+
+	spin_lock_bh(&adapter->mcc_lock);
+
+	wrb = wrb_from_mccq(adapter);
+	if (!wrb) {
+		status = -EBUSY;
+		goto err;
+	}
+
+	req = cmd.va;
+	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+				OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
+				wrb, &cmd);
+
+	req->hdr.domain = domain;
+	req->mac_count = mac_count;
+	if (mac_count)
+		memcpy(req->mac, mac_array, ETH_ALEN*mac_count);
+
+	status = be_mcc_notify_wait(adapter);
+
+err:
+	dma_free_coherent(&adapter->pdev->dev, cmd.size,
+				cmd.va, cmd.dma);
+	spin_unlock_bh(&adapter->mcc_lock);
+	return status;
+}
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index a35cd03..dca8924 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -189,6 +189,9 @@
 #define OPCODE_COMMON_GET_PHY_DETAILS			102
 #define OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP		103
 #define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES	121
+#define OPCODE_COMMON_GET_MAC_LIST			147
+#define OPCODE_COMMON_SET_MAC_LIST			148
+#define OPCODE_COMMON_READ_OBJECT			171
 #define OPCODE_COMMON_WRITE_OBJECT			172
 
 #define OPCODE_ETH_RSS_CONFIG				1
@@ -294,6 +297,7 @@
 	u8 type;
 	u8 permanent;
 	u16 if_id;
+	u32 pmac_id;
 } __packed;
 
 struct be_cmd_resp_mac_query {
@@ -956,7 +960,8 @@
 	u8 mgmt_mac_duplex;
 	u8 mgmt_mac_speed;
 	u16 link_speed;
-	u32 rsvd0;
+	u8 logical_link_status;
+	u8 rsvd1[3];
 } __packed;
 
 /******************** Port Identification ***************************/
@@ -1161,6 +1166,38 @@
 	u32 actual_write_len;
 };
 
+/************************ Lancer Read FW info **************/
+#define LANCER_READ_FILE_CHUNK			(32*1024)
+#define LANCER_READ_FILE_EOF_MASK		0x80000000
+
+#define LANCER_FW_DUMP_FILE			"/dbg/dump.bin"
+#define LANCER_VPD_PF_FILE			"/vpd/ntr_pf.vpd"
+#define LANCER_VPD_VF_FILE			"/vpd/ntr_vf.vpd"
+
+struct lancer_cmd_req_read_object {
+	struct be_cmd_req_hdr hdr;
+	u32 desired_read_len;
+	u32 read_offset;
+	u8 object_name[104];
+	u32 descriptor_count;
+	u32 buf_len;
+	u32 addr_low;
+	u32 addr_high;
+};
+
+struct lancer_cmd_resp_read_object {
+	u8 opcode;
+	u8 subsystem;
+	u8 rsvd1[2];
+	u8 status;
+	u8 additional_status;
+	u8 rsvd2[2];
+	u32 resp_len;
+	u32 actual_resp_len;
+	u32 actual_read_len;
+	u32 eof;
+};
+
 /************************ WOL *******************************/
 struct be_cmd_req_acpi_wol_magic_config{
 	struct be_cmd_req_hdr hdr;
@@ -1307,6 +1344,34 @@
 	u8 rsvd[212];
 };
 
+/******************** GET/SET_MACLIST  **************************/
+#define BE_MAX_MAC			64
+struct amap_get_mac_list_context {
+	u8 macid[31];
+	u8 act;
+} __packed;
+
+struct be_cmd_req_get_mac_list {
+	struct be_cmd_req_hdr hdr;
+	u32 rsvd;
+} __packed;
+
+struct be_cmd_resp_get_mac_list {
+	struct be_cmd_resp_hdr hdr;
+	u8 mac_count;
+	u8 rsvd1;
+	u16 rsvd2;
+	u8 context[sizeof(struct amap_get_mac_list_context) / 8][BE_MAX_MAC];
+} __packed;
+
+struct be_cmd_req_set_mac_list {
+	struct be_cmd_req_hdr hdr;
+	u8 mac_count;
+	u8 rsvd1;
+	u16 rsvd2;
+	struct macaddr mac[BE_MAX_MAC];
+} __packed;
+
 /*************** HW Stats Get v1 **********************************/
 #define BE_TXP_SW_SZ			48
 struct be_port_rxf_stats_v1 {
@@ -1413,15 +1478,15 @@
 extern int be_pci_fnum_get(struct be_adapter *adapter);
 extern int be_cmd_POST(struct be_adapter *adapter);
 extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
-			u8 type, bool permanent, u32 if_handle);
+			u8 type, bool permanent, u32 if_handle, u32 pmac_id);
 extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
 			u32 if_id, u32 *pmac_id, u32 domain);
 extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id,
-			u32 pmac_id, u32 domain);
+			int pmac_id, u32 domain);
 extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags,
 			u32 en_flags, u8 *mac, u32 *if_handle, u32 *pmac_id,
 			u32 domain);
-extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle,
+extern int be_cmd_if_destroy(struct be_adapter *adapter, int if_handle,
 			u32 domain);
 extern int be_cmd_eq_create(struct be_adapter *adapter,
 			struct be_queue_info *eq, int eq_delay);
@@ -1443,8 +1508,8 @@
 			int type);
 extern int be_cmd_rxq_destroy(struct be_adapter *adapter,
 			struct be_queue_info *q);
-extern int be_cmd_link_status_query(struct be_adapter *adapter,
-			u8 *mac_speed, u16 *link_speed, u32 dom);
+extern int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed,
+				    u16 *link_speed, u8 *link_status, u32 dom);
 extern int be_cmd_reset(struct be_adapter *adapter);
 extern int be_cmd_get_stats(struct be_adapter *adapter,
 			struct be_dma_mem *nonemb_cmd);
@@ -1480,6 +1545,9 @@
 				u32 data_size, u32 data_offset,
 				const char *obj_name,
 				u32 *data_written, u8 *addn_status);
+int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
+		u32 data_size, u32 data_offset, const char *obj_name,
+		u32 *data_read, u32 *eof, u8 *addn_status);
 int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
 				int offset);
 extern int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
@@ -1506,4 +1574,8 @@
 extern int be_cmd_req_native_mode(struct be_adapter *adapter);
 extern int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size);
 extern void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf);
+extern int be_cmd_get_mac_from_list(struct be_adapter *adapter, u32 domain,
+							u32 *pmac_id);
+extern int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
+						u8 mac_count, u32 domain);
 
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index bf8153e..6db6b6a 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -127,8 +127,8 @@
 	memset(fw_on_flash, 0 , sizeof(fw_on_flash));
 	be_cmd_get_fw_ver(adapter, adapter->fw_ver, fw_on_flash);
 
-	strcpy(drvinfo->driver, DRV_NAME);
-	strcpy(drvinfo->version, DRV_VER);
+	strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+	strlcpy(drvinfo->version, DRV_VER, sizeof(drvinfo->version));
 	strncpy(drvinfo->fw_version, adapter->fw_ver, FW_VER_LEN);
 	if (memcmp(adapter->fw_ver, fw_on_flash, FW_VER_LEN) != 0) {
 		strcat(drvinfo->fw_version, " [");
@@ -136,21 +136,84 @@
 		strcat(drvinfo->fw_version, "]");
 	}
 
-	strcpy(drvinfo->bus_info, pci_name(adapter->pdev));
+	strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+		sizeof(drvinfo->bus_info));
 	drvinfo->testinfo_len = 0;
 	drvinfo->regdump_len = 0;
 	drvinfo->eedump_len = 0;
 }
 
+static u32
+lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name)
+{
+	u32 data_read = 0, eof;
+	u8 addn_status;
+	struct be_dma_mem data_len_cmd;
+	int status;
+
+	memset(&data_len_cmd, 0, sizeof(data_len_cmd));
+	/* data_offset and data_size should be 0 to get reg len */
+	status = lancer_cmd_read_object(adapter, &data_len_cmd, 0, 0,
+				file_name, &data_read, &eof, &addn_status);
+
+	return data_read;
+}
+
+static int
+lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
+		u32 buf_len, void *buf)
+{
+	struct be_dma_mem read_cmd;
+	u32 read_len = 0, total_read_len = 0, chunk_size;
+	u32 eof = 0;
+	u8 addn_status;
+	int status = 0;
+
+	read_cmd.size = LANCER_READ_FILE_CHUNK;
+	read_cmd.va = pci_alloc_consistent(adapter->pdev, read_cmd.size,
+			&read_cmd.dma);
+
+	if (!read_cmd.va) {
+		dev_err(&adapter->pdev->dev,
+				"Memory allocation failure while reading dump\n");
+		return -ENOMEM;
+	}
+
+	while ((total_read_len < buf_len) && !eof) {
+		chunk_size = min_t(u32, (buf_len - total_read_len),
+				LANCER_READ_FILE_CHUNK);
+		chunk_size = ALIGN(chunk_size, 4);
+		status = lancer_cmd_read_object(adapter, &read_cmd, chunk_size,
+				total_read_len, file_name, &read_len,
+				&eof, &addn_status);
+		if (!status) {
+			memcpy(buf + total_read_len, read_cmd.va, read_len);
+			total_read_len += read_len;
+			eof &= LANCER_READ_FILE_EOF_MASK;
+		} else {
+			status = -EIO;
+			break;
+		}
+	}
+	pci_free_consistent(adapter->pdev, read_cmd.size, read_cmd.va,
+			read_cmd.dma);
+
+	return status;
+}
+
 static int
 be_get_reg_len(struct net_device *netdev)
 {
 	struct be_adapter *adapter = netdev_priv(netdev);
 	u32 log_size = 0;
 
-	if (be_physfn(adapter))
-		be_cmd_get_reg_len(adapter, &log_size);
-
+	if (be_physfn(adapter)) {
+		if (lancer_chip(adapter))
+			log_size = lancer_cmd_get_file_len(adapter,
+					LANCER_FW_DUMP_FILE);
+		else
+			be_cmd_get_reg_len(adapter, &log_size);
+	}
 	return log_size;
 }
 
@@ -161,7 +224,11 @@
 
 	if (be_physfn(adapter)) {
 		memset(buf, 0, regs->len);
-		be_cmd_get_regs(adapter, regs->len, buf);
+		if (lancer_chip(adapter))
+			lancer_cmd_read_file(adapter, LANCER_FW_DUMP_FILE,
+					regs->len, buf);
+		else
+			be_cmd_get_regs(adapter, regs->len, buf);
 	}
 }
 
@@ -362,11 +429,14 @@
 	struct be_phy_info phy_info;
 	u8 mac_speed = 0;
 	u16 link_speed = 0;
+	u8 link_status;
 	int status;
 
 	if ((adapter->link_speed < 0) || (!(netdev->flags & IFF_UP))) {
 		status = be_cmd_link_status_query(adapter, &mac_speed,
-						&link_speed, 0);
+						  &link_speed, &link_status, 0);
+		if (!status)
+			be_link_status_update(adapter, link_status);
 
 		/* link_speed is in units of 10 Mbps */
 		if (link_speed) {
@@ -453,16 +523,13 @@
 	return 0;
 }
 
-static void
-be_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
+static void be_get_ringparam(struct net_device *netdev,
+			     struct ethtool_ringparam *ring)
 {
 	struct be_adapter *adapter = netdev_priv(netdev);
 
-	ring->rx_max_pending = adapter->rx_obj[0].q.len;
-	ring->tx_max_pending = adapter->tx_obj[0].q.len;
-
-	ring->rx_pending = atomic_read(&adapter->rx_obj[0].q.used);
-	ring->tx_pending = atomic_read(&adapter->tx_obj[0].q.used);
+	ring->rx_max_pending = ring->rx_pending = adapter->rx_obj[0].q.len;
+	ring->tx_max_pending = ring->tx_pending = adapter->tx_obj[0].q.len;
 }
 
 static void
@@ -636,7 +703,7 @@
 	}
 
 	if (be_cmd_link_status_query(adapter, &mac_speed,
-				&qos_link_speed, 0) != 0) {
+				     &qos_link_speed, NULL, 0) != 0) {
 		test->flags |= ETH_TEST_FL_FAILED;
 		data[4] = -1;
 	} else if (!mac_speed) {
@@ -660,7 +727,17 @@
 static int
 be_get_eeprom_len(struct net_device *netdev)
 {
-	return BE_READ_SEEPROM_LEN;
+	struct be_adapter *adapter = netdev_priv(netdev);
+	if (lancer_chip(adapter)) {
+		if (be_physfn(adapter))
+			return lancer_cmd_get_file_len(adapter,
+					LANCER_VPD_PF_FILE);
+		else
+			return lancer_cmd_get_file_len(adapter,
+					LANCER_VPD_VF_FILE);
+	} else {
+		return BE_READ_SEEPROM_LEN;
+	}
 }
 
 static int
@@ -675,6 +752,15 @@
 	if (!eeprom->len)
 		return -EINVAL;
 
+	if (lancer_chip(adapter)) {
+		if (be_physfn(adapter))
+			return lancer_cmd_read_file(adapter, LANCER_VPD_PF_FILE,
+					eeprom->len, data);
+		else
+			return lancer_cmd_read_file(adapter, LANCER_VPD_VF_FILE,
+					eeprom->len, data);
+	}
+
 	eeprom->magic = BE_VENDOR_ID | (adapter->pdev->device<<16);
 
 	memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index bf266a0..6c46753 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -27,13 +27,14 @@
 MODULE_AUTHOR("ServerEngines Corporation");
 MODULE_LICENSE("GPL");
 
-static ushort rx_frag_size = 2048;
 static unsigned int num_vfs;
-module_param(rx_frag_size, ushort, S_IRUGO);
 module_param(num_vfs, uint, S_IRUGO);
-MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
 
+static ushort rx_frag_size = 2048;
+module_param(rx_frag_size, ushort, S_IRUGO);
+MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
+
 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
 	{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
 	{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
@@ -41,6 +42,7 @@
 	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
 	{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
 	{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
+	{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
 	{ 0 }
 };
 MODULE_DEVICE_TABLE(pci, be_dev_ids);
@@ -237,7 +239,8 @@
 		return -EADDRNOTAVAIL;
 
 	status = be_cmd_mac_addr_query(adapter, current_mac,
-			MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
+				MAC_ADDRESS_TYPE_NETWORK, false,
+				adapter->if_handle, 0);
 	if (status)
 		goto err;
 
@@ -315,6 +318,8 @@
 	struct be_drv_stats *drvs = &adapter->drv_stats;
 
 	be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
+	drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
+	drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
 	drvs->rx_pause_frames = port_stats->rx_pause_frames;
 	drvs->rx_crc_errors = port_stats->rx_crc_errors;
 	drvs->rx_control_frames = port_stats->rx_control_frames;
@@ -491,19 +496,19 @@
 	return stats;
 }
 
-void be_link_status_update(struct be_adapter *adapter, u32 link_status)
+void be_link_status_update(struct be_adapter *adapter, u8 link_status)
 {
 	struct net_device *netdev = adapter->netdev;
 
-	/* when link status changes, link speed must be re-queried from card */
-	adapter->link_speed = -1;
-	if ((link_status & LINK_STATUS_MASK) == LINK_UP) {
-		netif_carrier_on(netdev);
-		dev_info(&adapter->pdev->dev, "%s: Link up\n", netdev->name);
-	} else {
+	if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
 		netif_carrier_off(netdev);
-		dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
+		adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
 	}
+
+	if ((link_status & LINK_STATUS_MASK) == LINK_UP)
+		netif_carrier_on(netdev);
+	else
+		netif_carrier_off(netdev);
 }
 
 static void be_tx_stats_update(struct be_tx_obj *txo,
@@ -549,11 +554,26 @@
 	wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
 }
 
+static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
+					struct sk_buff *skb)
+{
+	u8 vlan_prio;
+	u16 vlan_tag;
+
+	vlan_tag = vlan_tx_tag_get(skb);
+	vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+	/* If vlan priority provided by OS is NOT in available bmap */
+	if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
+		vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
+				adapter->recommended_prio;
+
+	return vlan_tag;
+}
+
 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
 		struct sk_buff *skb, u32 wrb_cnt, u32 len)
 {
-	u8 vlan_prio = 0;
-	u16 vlan_tag = 0;
+	u16 vlan_tag;
 
 	memset(hdr, 0, sizeof(*hdr));
 
@@ -584,12 +604,7 @@
 
 	if (vlan_tx_tag_present(skb)) {
 		AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
-		vlan_tag = vlan_tx_tag_get(skb);
-		vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
-		/* If vlan priority provided by OS is NOT in available bmap */
-		if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
-			vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
-					adapter->recommended_prio;
+		vlan_tag = be_get_tx_vlan_tag(adapter, skb);
 		AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
 	}
 
@@ -692,6 +707,25 @@
 	u32 start = txq->head;
 	bool dummy_wrb, stopped = false;
 
+	/* For vlan tagged pkts, BE
+	 * 1) calculates checksum even when CSO is not requested
+	 * 2) calculates checksum wrongly for padded pkt less than
+	 * 60 bytes long.
+	 * As a workaround disable TX vlan offloading in such cases.
+	 */
+	if (unlikely(vlan_tx_tag_present(skb) &&
+		     (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60))) {
+		skb = skb_share_check(skb, GFP_ATOMIC);
+		if (unlikely(!skb))
+			goto tx_drop;
+
+		skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb));
+		if (unlikely(!skb))
+			goto tx_drop;
+
+		skb->vlan_tci = 0;
+	}
+
 	wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
 
 	copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
@@ -719,6 +753,7 @@
 		txq->head = start;
 		dev_kfree_skb_any(skb);
 	}
+tx_drop:
 	return NETDEV_TX_OK;
 }
 
@@ -746,15 +781,15 @@
  */
 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
 {
+	struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf_num];
 	u16 vtag[BE_NUM_VLANS_SUPPORTED];
 	u16 ntags = 0, i;
 	int status = 0;
-	u32 if_handle;
 
 	if (vf) {
-		if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
-		vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
-		status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
+		vtag[0] = cpu_to_le16(vf_cfg->vlan_tag);
+		status = be_cmd_vlan_config(adapter, vf_cfg->if_handle, vtag,
+					    1, 1, 0);
 	}
 
 	/* No need to further configure vids if in promiscuous mode */
@@ -779,31 +814,48 @@
 	return status;
 }
 
-static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
+static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
 {
 	struct be_adapter *adapter = netdev_priv(netdev);
+	int status = 0;
 
-	adapter->vlans_added++;
-	if (!be_physfn(adapter))
-		return;
+	if (!be_physfn(adapter)) {
+		status = -EINVAL;
+		goto ret;
+	}
 
 	adapter->vlan_tag[vid] = 1;
 	if (adapter->vlans_added <= (adapter->max_vlans + 1))
-		be_vid_config(adapter, false, 0);
+		status = be_vid_config(adapter, false, 0);
+
+	if (!status)
+		adapter->vlans_added++;
+	else
+		adapter->vlan_tag[vid] = 0;
+ret:
+	return status;
 }
 
-static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
+static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
 {
 	struct be_adapter *adapter = netdev_priv(netdev);
+	int status = 0;
 
-	adapter->vlans_added--;
-
-	if (!be_physfn(adapter))
-		return;
+	if (!be_physfn(adapter)) {
+		status = -EINVAL;
+		goto ret;
+	}
 
 	adapter->vlan_tag[vid] = 0;
 	if (adapter->vlans_added <= adapter->max_vlans)
-		be_vid_config(adapter, false, 0);
+		status = be_vid_config(adapter, false, 0);
+
+	if (!status)
+		adapter->vlans_added--;
+	else
+		adapter->vlan_tag[vid] = 1;
+ret:
+	return status;
 }
 
 static void be_set_rx_mode(struct net_device *netdev)
@@ -840,28 +892,30 @@
 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
 {
 	struct be_adapter *adapter = netdev_priv(netdev);
+	struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
 	int status;
 
-	if (!adapter->sriov_enabled)
+	if (!sriov_enabled(adapter))
 		return -EPERM;
 
-	if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
+	if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
 		return -EINVAL;
 
-	if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
-		status = be_cmd_pmac_del(adapter,
-					adapter->vf_cfg[vf].vf_if_handle,
-					adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
+	if (lancer_chip(adapter)) {
+		status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
+	} else {
+		status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
+					 vf_cfg->pmac_id, vf + 1);
 
-	status = be_cmd_pmac_add(adapter, mac,
-				adapter->vf_cfg[vf].vf_if_handle,
-				&adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
+		status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
+					 &vf_cfg->pmac_id, vf + 1);
+	}
 
 	if (status)
 		dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
 				mac, vf);
 	else
-		memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
+		memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
 
 	return status;
 }
@@ -870,18 +924,19 @@
 			struct ifla_vf_info *vi)
 {
 	struct be_adapter *adapter = netdev_priv(netdev);
+	struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
 
-	if (!adapter->sriov_enabled)
+	if (!sriov_enabled(adapter))
 		return -EPERM;
 
-	if (vf >= num_vfs)
+	if (vf >= adapter->num_vfs)
 		return -EINVAL;
 
 	vi->vf = vf;
-	vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
-	vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
+	vi->tx_rate = vf_cfg->tx_rate;
+	vi->vlan = vf_cfg->vlan_tag;
 	vi->qos = 0;
-	memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
+	memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
 
 	return 0;
 }
@@ -892,17 +947,17 @@
 	struct be_adapter *adapter = netdev_priv(netdev);
 	int status = 0;
 
-	if (!adapter->sriov_enabled)
+	if (!sriov_enabled(adapter))
 		return -EPERM;
 
-	if ((vf >= num_vfs) || (vlan > 4095))
+	if (vf >= adapter->num_vfs || vlan > 4095)
 		return -EINVAL;
 
 	if (vlan) {
-		adapter->vf_cfg[vf].vf_vlan_tag = vlan;
+		adapter->vf_cfg[vf].vlan_tag = vlan;
 		adapter->vlans_added++;
 	} else {
-		adapter->vf_cfg[vf].vf_vlan_tag = 0;
+		adapter->vf_cfg[vf].vlan_tag = 0;
 		adapter->vlans_added--;
 	}
 
@@ -920,21 +975,25 @@
 	struct be_adapter *adapter = netdev_priv(netdev);
 	int status = 0;
 
-	if (!adapter->sriov_enabled)
+	if (!sriov_enabled(adapter))
 		return -EPERM;
 
-	if ((vf >= num_vfs) || (rate < 0))
+	if (vf >= adapter->num_vfs)
 		return -EINVAL;
 
-	if (rate > 10000)
-		rate = 10000;
+	if (rate < 100 || rate > 10000) {
+		dev_err(&adapter->pdev->dev,
+			"tx rate must be between 100 and 10000 Mbps\n");
+		return -EINVAL;
+	}
 
-	adapter->vf_cfg[vf].vf_tx_rate = rate;
 	status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
 
 	if (status)
-		dev_info(&adapter->pdev->dev,
+		dev_err(&adapter->pdev->dev,
 				"tx rate %d on VF %d failed\n", rate, vf);
+	else
+		adapter->vf_cfg[vf].tx_rate = rate;
 	return status;
 }
 
@@ -1645,8 +1704,7 @@
 
 static int be_num_txqs_want(struct be_adapter *adapter)
 {
-	if ((num_vfs && adapter->sriov_enabled) ||
-		be_is_mc(adapter) ||
+	if (sriov_enabled(adapter) || be_is_mc(adapter) ||
 		lancer_chip(adapter) || !be_physfn(adapter) ||
 		adapter->generation == BE_GEN2)
 		return 1;
@@ -1662,9 +1720,12 @@
 	u8 i;
 
 	adapter->num_tx_qs = be_num_txqs_want(adapter);
-	if (adapter->num_tx_qs != MAX_TX_QS)
+	if (adapter->num_tx_qs != MAX_TX_QS) {
+		rtnl_lock();
 		netif_set_real_num_tx_queues(adapter->netdev,
 			adapter->num_tx_qs);
+		rtnl_unlock();
+	}
 
 	adapter->tx_eq.max_eqd = 0;
 	adapter->tx_eq.min_eqd = 0;
@@ -1693,9 +1754,6 @@
 		if (be_queue_alloc(adapter, q, TX_Q_LEN,
 			sizeof(struct be_eth_wrb)))
 			goto err;
-
-		if (be_cmd_txq_create(adapter, q, cq))
-			goto err;
 	}
 	return 0;
 
@@ -1728,8 +1786,8 @@
 static u32 be_num_rxqs_want(struct be_adapter *adapter)
 {
 	if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
-		!adapter->sriov_enabled && be_physfn(adapter) &&
-		!be_is_mc(adapter)) {
+	     !sriov_enabled(adapter) && be_physfn(adapter) &&
+	     !be_is_mc(adapter)) {
 		return 1 + MAX_RSS_QS; /* one default non-RSS queue */
 	} else {
 		dev_warn(&adapter->pdev->dev,
@@ -1929,6 +1987,7 @@
 	struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
 	struct be_adapter *adapter =
 		container_of(tx_eq, struct be_adapter, tx_eq);
+	struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
 	struct be_tx_obj *txo;
 	struct be_eth_tx_compl *txcp;
 	int tx_compl, mcc_compl, status = 0;
@@ -1965,12 +2024,19 @@
 	mcc_compl = be_process_mcc(adapter, &status);
 
 	if (mcc_compl) {
-		struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
 		be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
 	}
 
 	napi_complete(napi);
 
+	/* Arm CQ again to regenerate EQEs for Lancer in INTx mode */
+	if (lancer_chip(adapter) && !msix_enabled(adapter)) {
+		for_all_tx_queues(adapter, txo, i)
+			be_cq_notify(adapter, txo->cq.id, true, 0);
+
+		be_cq_notify(adapter, mcc_obj->cq.id, true, 0);
+	}
+
 	be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
 	adapter->drv_stats.tx_events++;
 	return 1;
@@ -1982,6 +2048,9 @@
 	u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
 	u32 i;
 
+	if (adapter->eeh_err || adapter->ue_detected)
+		return;
+
 	if (lancer_chip(adapter)) {
 		sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
 		if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
@@ -2008,7 +2077,8 @@
 		sliport_status & SLIPORT_STATUS_ERR_MASK) {
 		adapter->ue_detected = true;
 		adapter->eeh_err = true;
-		dev_err(&adapter->pdev->dev, "UE Detected!!\n");
+		dev_err(&adapter->pdev->dev,
+			"Unrecoverable error in the card\n");
 	}
 
 	if (ue_lo) {
@@ -2036,53 +2106,6 @@
 	}
 }
 
-static void be_worker(struct work_struct *work)
-{
-	struct be_adapter *adapter =
-		container_of(work, struct be_adapter, work.work);
-	struct be_rx_obj *rxo;
-	int i;
-
-	if (!adapter->ue_detected)
-		be_detect_dump_ue(adapter);
-
-	/* when interrupts are not yet enabled, just reap any pending
-	* mcc completions */
-	if (!netif_running(adapter->netdev)) {
-		int mcc_compl, status = 0;
-
-		mcc_compl = be_process_mcc(adapter, &status);
-
-		if (mcc_compl) {
-			struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
-			be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
-		}
-
-		goto reschedule;
-	}
-
-	if (!adapter->stats_cmd_sent) {
-		if (lancer_chip(adapter))
-			lancer_cmd_get_pport_stats(adapter,
-						&adapter->stats_cmd);
-		else
-			be_cmd_get_stats(adapter, &adapter->stats_cmd);
-	}
-
-	for_all_rx_queues(adapter, rxo, i) {
-		be_rx_eqd_update(adapter, rxo);
-
-		if (rxo->rx_post_starved) {
-			rxo->rx_post_starved = false;
-			be_post_rx_frags(rxo, GFP_KERNEL);
-		}
-	}
-
-reschedule:
-	adapter->work_counter++;
-	schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
-}
-
 static void be_msix_disable(struct be_adapter *adapter)
 {
 	if (msix_enabled(adapter)) {
@@ -2119,27 +2142,28 @@
 static int be_sriov_enable(struct be_adapter *adapter)
 {
 	be_check_sriov_fn_type(adapter);
+
 #ifdef CONFIG_PCI_IOV
 	if (be_physfn(adapter) && num_vfs) {
 		int status, pos;
-		u16 nvfs;
+		u16 dev_vfs;
 
 		pos = pci_find_ext_capability(adapter->pdev,
 						PCI_EXT_CAP_ID_SRIOV);
 		pci_read_config_word(adapter->pdev,
-					pos + PCI_SRIOV_TOTAL_VF, &nvfs);
+				     pos + PCI_SRIOV_TOTAL_VF, &dev_vfs);
 
-		if (num_vfs > nvfs) {
+		adapter->num_vfs = min_t(u16, num_vfs, dev_vfs);
+		if (adapter->num_vfs != num_vfs)
 			dev_info(&adapter->pdev->dev,
-					"Device supports %d VFs and not %d\n",
-					nvfs, num_vfs);
-			num_vfs = nvfs;
-		}
+				 "Device supports %d VFs and not %d\n",
+				 adapter->num_vfs, num_vfs);
 
-		status = pci_enable_sriov(adapter->pdev, num_vfs);
-		adapter->sriov_enabled = status ? false : true;
+		status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
+		if (status)
+			adapter->num_vfs = 0;
 
-		if (adapter->sriov_enabled) {
+		if (adapter->num_vfs) {
 			adapter->vf_cfg = kcalloc(num_vfs,
 						sizeof(struct be_vf_cfg),
 						GFP_KERNEL);
@@ -2154,10 +2178,10 @@
 static void be_sriov_disable(struct be_adapter *adapter)
 {
 #ifdef CONFIG_PCI_IOV
-	if (adapter->sriov_enabled) {
+	if (sriov_enabled(adapter)) {
 		pci_disable_sriov(adapter->pdev);
 		kfree(adapter->vf_cfg);
-		adapter->sriov_enabled = false;
+		adapter->num_vfs = 0;
 	}
 #endif
 }
@@ -2351,8 +2375,8 @@
 static int be_rx_queues_setup(struct be_adapter *adapter)
 {
 	struct be_rx_obj *rxo;
-	int rc, i;
-	u8 rsstable[MAX_RSS_QS];
+	int rc, i, j;
+	u8 rsstable[128];
 
 	for_all_rx_queues(adapter, rxo, i) {
 		rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
@@ -2364,11 +2388,15 @@
 	}
 
 	if (be_multi_rxq(adapter)) {
-		for_all_rss_queues(adapter, rxo, i)
-			rsstable[i] = rxo->rss_id;
+		for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
+			for_all_rss_queues(adapter, rxo, i) {
+				if ((j + i) >= 128)
+					break;
+				rsstable[j + i] = rxo->rss_id;
+			}
+		}
+		rc = be_cmd_rss_config(adapter, rsstable, 128);
 
-		rc = be_cmd_rss_config(adapter, rsstable,
-			adapter->num_rx_qs - 1);
 		if (rc)
 			return rc;
 	}
@@ -2386,6 +2414,7 @@
 	struct be_adapter *adapter = netdev_priv(netdev);
 	struct be_eq_obj *tx_eq = &adapter->tx_eq;
 	struct be_rx_obj *rxo;
+	u8 link_status;
 	int status, i;
 
 	status = be_rx_queues_setup(adapter);
@@ -2409,6 +2438,11 @@
 	/* Now that interrupts are on we can process async mcc */
 	be_async_mcc_enable(adapter);
 
+	status = be_cmd_link_status_query(adapter, NULL, NULL,
+					  &link_status, 0);
+	if (!status)
+		be_link_status_update(adapter, link_status);
+
 	return 0;
 err:
 	be_close(adapter->netdev);
@@ -2465,19 +2499,24 @@
 	u32 vf;
 	int status = 0;
 	u8 mac[ETH_ALEN];
+	struct be_vf_cfg *vf_cfg;
 
 	be_vf_eth_addr_generate(adapter, mac);
 
-	for (vf = 0; vf < num_vfs; vf++) {
-		status = be_cmd_pmac_add(adapter, mac,
-					adapter->vf_cfg[vf].vf_if_handle,
-					&adapter->vf_cfg[vf].vf_pmac_id,
-					vf + 1);
+	for_all_vfs(adapter, vf_cfg, vf) {
+		if (lancer_chip(adapter)) {
+			status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
+		} else {
+			status = be_cmd_pmac_add(adapter, mac,
+						 vf_cfg->if_handle,
+						 &vf_cfg->pmac_id, vf + 1);
+		}
+
 		if (status)
 			dev_err(&adapter->pdev->dev,
-				"Mac address add failed for VF %d\n", vf);
+			"Mac address assignment failed for VF %d\n", vf);
 		else
-			memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
+			memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
 
 		mac[5] += 1;
 	}
@@ -2486,24 +2525,23 @@
 
 static void be_vf_clear(struct be_adapter *adapter)
 {
+	struct be_vf_cfg *vf_cfg;
 	u32 vf;
 
-	for (vf = 0; vf < num_vfs; vf++) {
-		if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
-			be_cmd_pmac_del(adapter,
-					adapter->vf_cfg[vf].vf_if_handle,
-					adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
-	}
+	for_all_vfs(adapter, vf_cfg, vf) {
+		if (lancer_chip(adapter))
+			be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
+		else
+			be_cmd_pmac_del(adapter, vf_cfg->if_handle,
+					vf_cfg->pmac_id, vf + 1);
 
-	for (vf = 0; vf < num_vfs; vf++)
-		if (adapter->vf_cfg[vf].vf_if_handle)
-			be_cmd_if_destroy(adapter,
-				adapter->vf_cfg[vf].vf_if_handle, vf + 1);
+		be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
+	}
 }
 
 static int be_clear(struct be_adapter *adapter)
 {
-	if (be_physfn(adapter) && adapter->sriov_enabled)
+	if (sriov_enabled(adapter))
 		be_vf_clear(adapter);
 
 	be_cmd_if_destroy(adapter, adapter->if_handle,  0);
@@ -2511,61 +2549,94 @@
 	be_mcc_queues_destroy(adapter);
 	be_rx_queues_destroy(adapter);
 	be_tx_queues_destroy(adapter);
-	adapter->eq_next_idx = 0;
-
-	adapter->be3_native = false;
-	adapter->promiscuous = false;
 
 	/* tell fw we're done with firing cmds */
 	be_cmd_fw_clean(adapter);
 	return 0;
 }
 
+static void be_vf_setup_init(struct be_adapter *adapter)
+{
+	struct be_vf_cfg *vf_cfg;
+	int vf;
+
+	for_all_vfs(adapter, vf_cfg, vf) {
+		vf_cfg->if_handle = -1;
+		vf_cfg->pmac_id = -1;
+	}
+}
+
 static int be_vf_setup(struct be_adapter *adapter)
 {
+	struct be_vf_cfg *vf_cfg;
 	u32 cap_flags, en_flags, vf;
 	u16 lnk_speed;
 	int status;
 
-	cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
-	for (vf = 0; vf < num_vfs; vf++) {
+	be_vf_setup_init(adapter);
+
+	cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
+				BE_IF_FLAGS_MULTICAST;
+	for_all_vfs(adapter, vf_cfg, vf) {
 		status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
-					&adapter->vf_cfg[vf].vf_if_handle,
-					NULL, vf+1);
-		if (status)
-			goto err;
-		adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
-	}
-
-	if (!lancer_chip(adapter)) {
-		status = be_vf_eth_addr_config(adapter);
+					  &vf_cfg->if_handle, NULL, vf + 1);
 		if (status)
 			goto err;
 	}
 
-	for (vf = 0; vf < num_vfs; vf++) {
+	status = be_vf_eth_addr_config(adapter);
+	if (status)
+		goto err;
+
+	for_all_vfs(adapter, vf_cfg, vf) {
 		status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
-				vf + 1);
+						  NULL, vf + 1);
 		if (status)
 			goto err;
-		adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
+		vf_cfg->tx_rate = lnk_speed * 10;
 	}
 	return 0;
 err:
 	return status;
 }
 
+static void be_setup_init(struct be_adapter *adapter)
+{
+	adapter->vlan_prio_bmap = 0xff;
+	adapter->link_speed = -1;
+	adapter->if_handle = -1;
+	adapter->be3_native = false;
+	adapter->promiscuous = false;
+	adapter->eq_next_idx = 0;
+}
+
+static int be_configure_mac_from_list(struct be_adapter *adapter, u8 *mac)
+{
+	u32 pmac_id;
+	int status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id);
+	if (status != 0)
+		goto do_none;
+	status = be_cmd_mac_addr_query(adapter, mac,
+			MAC_ADDRESS_TYPE_NETWORK,
+			false, adapter->if_handle, pmac_id);
+	if (status != 0)
+		goto do_none;
+	status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
+			&adapter->pmac_id, 0);
+do_none:
+	return status;
+}
+
 static int be_setup(struct be_adapter *adapter)
 {
 	struct net_device *netdev = adapter->netdev;
 	u32 cap_flags, en_flags;
 	u32 tx_fc, rx_fc;
-	int status;
+	int status, i;
 	u8 mac[ETH_ALEN];
+	struct be_tx_obj *txo;
 
-	/* Allow all priorities by default. A GRP5 evt may modify this */
-	adapter->vlan_prio_bmap = 0xff;
-	adapter->link_speed = -1;
+	be_setup_init(adapter);
 
 	be_cmd_req_native_mode(adapter);
 
@@ -2583,7 +2654,7 @@
 
 	memset(mac, 0, ETH_ALEN);
 	status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
-			true /*permanent */, 0);
+			true /*permanent */, 0, 0);
 	if (status)
 		return status;
 	memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
@@ -2592,7 +2663,8 @@
 	en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
 			BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
 	cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
-			BE_IF_FLAGS_PROMISCUOUS;
+			BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
+
 	if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
 		cap_flags |= BE_IF_FLAGS_RSS;
 		en_flags |= BE_IF_FLAGS_RSS;
@@ -2603,12 +2675,23 @@
 	if (status != 0)
 		goto err;
 
-	/* For BEx, the VF's permanent mac queried from card is incorrect.
-	 * Query the mac configued by the PF using if_handle
-	 */
-	if (!be_physfn(adapter) && !lancer_chip(adapter)) {
-		status = be_cmd_mac_addr_query(adapter, mac,
-			MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
+	 for_all_tx_queues(adapter, txo, i) {
+		status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
+		if (status)
+			goto err;
+	}
+
+	 /* The VF's permanent mac queried from card is incorrect.
+	  * For BEx: Query the mac configued by the PF using if_handle
+	  * For Lancer: Get and use mac_list to obtain mac address.
+	  */
+	if (!be_physfn(adapter)) {
+		if (lancer_chip(adapter))
+			status = be_configure_mac_from_list(adapter, mac);
+		else
+			status = be_cmd_mac_addr_query(adapter, mac,
+					MAC_ADDRESS_TYPE_NETWORK, false,
+					adapter->if_handle, 0);
 		if (!status) {
 			memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
 			memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
@@ -2624,18 +2707,21 @@
 	be_set_rx_mode(adapter->netdev);
 
 	status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
-	if (status)
+	/* For Lancer: It is legal for this cmd to fail on VF */
+	if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
 		goto err;
+
 	if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
 		status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
 					adapter->rx_fc);
-		if (status)
+		/* For Lancer: It is legal for this cmd to fail on VF */
+		if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
 			goto err;
 	}
 
 	pcie_set_readrq(adapter->pdev, 4096);
 
-	if (be_physfn(adapter) && adapter->sriov_enabled) {
+	if (sriov_enabled(adapter)) {
 		status = be_vf_setup(adapter);
 		if (status)
 			goto err;
@@ -2647,6 +2733,19 @@
 	return status;
 }
 
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void be_netpoll(struct net_device *netdev)
+{
+	struct be_adapter *adapter = netdev_priv(netdev);
+	struct be_rx_obj *rxo;
+	int i;
+
+	event_handle(adapter, &adapter->tx_eq, false);
+	for_all_rx_queues(adapter, rxo, i)
+		event_handle(adapter, &rxo->rx_eq, true);
+}
+#endif
+
 #define FW_FILE_HDR_SIGN 	"ServerEngines Corp. "
 static bool be_flash_redboot(struct be_adapter *adapter,
 			const u8 *p, u32 img_start, int image_size,
@@ -2995,7 +3094,10 @@
 	.ndo_set_vf_mac		= be_set_vf_mac,
 	.ndo_set_vf_vlan	= be_set_vf_vlan,
 	.ndo_set_vf_tx_rate	= be_set_vf_tx_rate,
-	.ndo_get_vf_config	= be_get_vf_config
+	.ndo_get_vf_config	= be_get_vf_config,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller	= be_netpoll,
+#endif
 };
 
 static void be_netdev_init(struct net_device *netdev)
@@ -3242,6 +3344,7 @@
 		break;
 	case BE_DEVICE_ID2:
 	case OC_DEVICE_ID2:
+	case OC_DEVICE_ID5:
 		adapter->generation = BE_GEN3;
 		break;
 	case OC_DEVICE_ID3:
@@ -3267,7 +3370,7 @@
 
 static int lancer_wait_ready(struct be_adapter *adapter)
 {
-#define SLIPORT_READY_TIMEOUT 500
+#define SLIPORT_READY_TIMEOUT 30
 	u32 sliport_status;
 	int status = 0, i;
 
@@ -3276,7 +3379,7 @@
 		if (sliport_status & SLIPORT_STATUS_RDY_MASK)
 			break;
 
-		msleep(20);
+		msleep(1000);
 	}
 
 	if (i == SLIPORT_READY_TIMEOUT)
@@ -3313,6 +3416,104 @@
 	return status;
 }
 
+static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
+{
+	int status;
+	u32 sliport_status;
+
+	if (adapter->eeh_err || adapter->ue_detected)
+		return;
+
+	sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
+
+	if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
+		dev_err(&adapter->pdev->dev,
+				"Adapter in error state."
+				"Trying to recover.\n");
+
+		status = lancer_test_and_set_rdy_state(adapter);
+		if (status)
+			goto err;
+
+		netif_device_detach(adapter->netdev);
+
+		if (netif_running(adapter->netdev))
+			be_close(adapter->netdev);
+
+		be_clear(adapter);
+
+		adapter->fw_timeout = false;
+
+		status = be_setup(adapter);
+		if (status)
+			goto err;
+
+		if (netif_running(adapter->netdev)) {
+			status = be_open(adapter->netdev);
+			if (status)
+				goto err;
+		}
+
+		netif_device_attach(adapter->netdev);
+
+		dev_err(&adapter->pdev->dev,
+				"Adapter error recovery succeeded\n");
+	}
+	return;
+err:
+	dev_err(&adapter->pdev->dev,
+			"Adapter error recovery failed\n");
+}
+
+static void be_worker(struct work_struct *work)
+{
+	struct be_adapter *adapter =
+		container_of(work, struct be_adapter, work.work);
+	struct be_rx_obj *rxo;
+	int i;
+
+	if (lancer_chip(adapter))
+		lancer_test_and_recover_fn_err(adapter);
+
+	be_detect_dump_ue(adapter);
+
+	/* when interrupts are not yet enabled, just reap any pending
+	* mcc completions */
+	if (!netif_running(adapter->netdev)) {
+		int mcc_compl, status = 0;
+
+		mcc_compl = be_process_mcc(adapter, &status);
+
+		if (mcc_compl) {
+			struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
+			be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
+		}
+
+		goto reschedule;
+	}
+
+	if (!adapter->stats_cmd_sent) {
+		if (lancer_chip(adapter))
+			lancer_cmd_get_pport_stats(adapter,
+						&adapter->stats_cmd);
+		else
+			be_cmd_get_stats(adapter, &adapter->stats_cmd);
+	}
+
+	for_all_rx_queues(adapter, rxo, i) {
+		be_rx_eqd_update(adapter, rxo);
+
+		if (rxo->rx_post_starved) {
+			rxo->rx_post_starved = false;
+			be_post_rx_frags(rxo, GFP_KERNEL);
+		}
+	}
+
+reschedule:
+	adapter->work_counter++;
+	schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
+}
+
 static int __devinit be_probe(struct pci_dev *pdev,
 			const struct pci_device_id *pdev_id)
 {
@@ -3365,7 +3566,12 @@
 		goto disable_sriov;
 
 	if (lancer_chip(adapter)) {
-		status = lancer_test_and_set_rdy_state(adapter);
+		status = lancer_wait_ready(adapter);
+		if (!status) {
+			iowrite32(SLI_PORT_CONTROL_IP_MASK,
+					adapter->db + SLIPORT_CONTROL_OFFSET);
+			status = lancer_test_and_set_rdy_state(adapter);
+		}
 		if (status) {
 			dev_err(&pdev->dev, "Adapter in non recoverable error\n");
 			goto ctrl_clean;
@@ -3559,6 +3765,8 @@
 
 	dev_info(&adapter->pdev->dev, "EEH reset\n");
 	adapter->eeh_err = false;
+	adapter->ue_detected = false;
+	adapter->fw_timeout = false;
 
 	status = pci_enable_device(pdev);
 	if (status)
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 251b635..60f0e78 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -1185,18 +1185,7 @@
 	},
 };
 
-static int __init ethoc_init(void)
-{
-	return platform_driver_register(&ethoc_driver);
-}
-
-static void __exit ethoc_exit(void)
-{
-	platform_driver_unregister(&ethoc_driver);
-}
-
-module_init(ethoc_init);
-module_exit(ethoc_exit);
+module_platform_driver(ethoc_driver);
 
 MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
 MODULE_DESCRIPTION("OpenCores Ethernet MAC driver");
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
index 61d2bdd..c82d444 100644
--- a/drivers/net/ethernet/fealnx.c
+++ b/drivers/net/ethernet/fealnx.c
@@ -1818,9 +1818,9 @@
 {
 	struct netdev_private *np = netdev_priv(dev);
 
-	strcpy(info->driver, DRV_NAME);
-	strcpy(info->version, DRV_VERSION);
-	strcpy(info->bus_info, pci_name(np->pci_dev));
+	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+	strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
 }
 
 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index 5272f9d..3574e14 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -21,10 +21,10 @@
 if NET_VENDOR_FREESCALE
 
 config FEC
-	bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
+	tristate "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
 	depends on (M523x || M527x || M5272 || M528x || M520x || M532x || \
-		   ARCH_MXC || ARCH_MXS)
-	default ARCH_MXC || ARCH_MXS if ARM
+		   ARCH_MXC || SOC_IMX28)
+	default ARCH_MXC || SOC_IMX28 if ARM
 	select PHYLIB
 	---help---
 	  Say Y here if you want to use the built-in 10/100 Fast ethernet
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c
index 1124ce0..20c2e3f 100644
--- a/drivers/net/ethernet/freescale/fec.c
+++ b/drivers/net/ethernet/freescale/fec.c
@@ -99,7 +99,7 @@
 MODULE_DEVICE_TABLE(platform, fec_devtype);
 
 enum imx_fec_type {
-	IMX25_FEC = 1, 	/* runs on i.mx25/50/53 */
+	IMX25_FEC = 1,	/* runs on i.mx25/50/53 */
 	IMX27_FEC,	/* runs on i.mx27/35/51 */
 	IMX28_FEC,
 	IMX6Q_FEC,
@@ -132,7 +132,7 @@
 #elif defined (CONFIG_M5272C3)
 #define	FEC_FLASHMAC	(0xffe04000 + 4)
 #elif defined(CONFIG_MOD5272)
-#define FEC_FLASHMAC 	0xffc0406b
+#define FEC_FLASHMAC	0xffc0406b
 #else
 #define	FEC_FLASHMAC	0
 #endif
@@ -232,6 +232,7 @@
 	struct	platform_device *pdev;
 
 	int	opened;
+	int	dev_id;
 
 	/* Phylib and MDIO interface */
 	struct	mii_bus *mii_bus;
@@ -254,11 +255,13 @@
 #define FEC_MMFR_TA		(2 << 16)
 #define FEC_MMFR_DATA(v)	(v & 0xffff)
 
-#define FEC_MII_TIMEOUT		1000 /* us */
+#define FEC_MII_TIMEOUT		30000 /* us */
 
 /* Transmitter timeout */
 #define TX_TIMEOUT (2 * HZ)
 
+static int mii_cnt;
+
 static void *swap_buffer(void *bufaddr, int len)
 {
 	int i;
@@ -515,6 +518,7 @@
 	struct fec_enet_private *fep = netdev_priv(ndev);
 	const struct platform_device_id *id_entry =
 				platform_get_device_id(fep->pdev);
+	u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8);
 
 	/* We cannot expect a graceful transmit stop without link !!! */
 	if (fep->link) {
@@ -531,8 +535,10 @@
 	writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
 
 	/* We have to keep ENET enabled to have MII interrupt stay working */
-	if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
+	if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
 		writel(2, fep->hwp + FEC_ECNTRL);
+		writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
+	}
 }
 
 
@@ -818,7 +824,7 @@
 			iap = (unsigned char *)FEC_FLASHMAC;
 #else
 		if (pdata)
-			memcpy(iap, pdata->mac, ETH_ALEN);
+			iap = (unsigned char *)&pdata->mac;
 #endif
 	}
 
@@ -837,7 +843,7 @@
 
 	/* Adjust MAC if using macaddr */
 	if (iap == macaddr)
-		 ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->pdev->id;
+		 ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->dev_id;
 }
 
 /* ------------------------------------------------------------------------- */
@@ -865,6 +871,8 @@
 	if (phy_dev->link) {
 		if (fep->full_duplex != phy_dev->duplex) {
 			fec_restart(ndev, phy_dev->duplex);
+			/* prevent unnecessary second fec_restart() below */
+			fep->link = phy_dev->link;
 			status_change = 1;
 		}
 	}
@@ -953,7 +961,7 @@
 	char mdio_bus_id[MII_BUS_ID_SIZE];
 	char phy_name[MII_BUS_ID_SIZE + 3];
 	int phy_id;
-	int dev_id = fep->pdev->id;
+	int dev_id = fep->dev_id;
 
 	fep->phy_dev = NULL;
 
@@ -972,8 +980,9 @@
 	}
 
 	if (phy_id >= PHY_MAX_ADDR) {
-		printk(KERN_INFO "%s: no PHY, assuming direct connection "
-			"to switch\n", ndev->name);
+		printk(KERN_INFO
+			"%s: no PHY, assuming direct connection to switch\n",
+			ndev->name);
 		strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE);
 		phy_id = 0;
 	}
@@ -998,8 +1007,9 @@
 	fep->link = 0;
 	fep->full_duplex = 0;
 
-	printk(KERN_INFO "%s: Freescale FEC PHY driver [%s] "
-		"(mii_bus:phy_addr=%s, irq=%d)\n", ndev->name,
+	printk(KERN_INFO
+		"%s: Freescale FEC PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
+		ndev->name,
 		fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev),
 		fep->phy_dev->irq);
 
@@ -1031,10 +1041,14 @@
 	 * mdio interface in board design, and need to be configured by
 	 * fec0 mii_bus.
 	 */
-	if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && pdev->id > 0) {
+	if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) {
 		/* fec1 uses fec0 mii_bus */
-		fep->mii_bus = fec0_mii_bus;
-		return 0;
+		if (mii_cnt && fec0_mii_bus) {
+			fep->mii_bus = fec0_mii_bus;
+			mii_cnt++;
+			return 0;
+		}
+		return -ENOENT;
 	}
 
 	fep->mii_timeout = 0;
@@ -1063,7 +1077,7 @@
 	fep->mii_bus->read = fec_enet_mdio_read;
 	fep->mii_bus->write = fec_enet_mdio_write;
 	fep->mii_bus->reset = fec_enet_mdio_reset;
-	snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id + 1);
+	snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%x", fep->dev_id + 1);
 	fep->mii_bus->priv = fep;
 	fep->mii_bus->parent = &pdev->dev;
 
@@ -1079,6 +1093,8 @@
 	if (mdiobus_register(fep->mii_bus))
 		goto err_out_free_mdio_irq;
 
+	mii_cnt++;
+
 	/* save fec0 mii_bus */
 	if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
 		fec0_mii_bus = fep->mii_bus;
@@ -1095,11 +1111,11 @@
 
 static void fec_enet_mii_remove(struct fec_enet_private *fep)
 {
-	if (fep->phy_dev)
-		phy_disconnect(fep->phy_dev);
-	mdiobus_unregister(fep->mii_bus);
-	kfree(fep->mii_bus->irq);
-	mdiobus_free(fep->mii_bus);
+	if (--mii_cnt == 0) {
+		mdiobus_unregister(fep->mii_bus);
+		kfree(fep->mii_bus->irq);
+		mdiobus_free(fep->mii_bus);
+	}
 }
 
 static int fec_enet_get_settings(struct net_device *ndev,
@@ -1136,7 +1152,7 @@
 	strcpy(info->bus_info, dev_name(&ndev->dev));
 }
 
-static struct ethtool_ops fec_enet_ethtool_ops = {
+static const struct ethtool_ops fec_enet_ethtool_ops = {
 	.get_settings		= fec_enet_get_settings,
 	.set_settings		= fec_enet_set_settings,
 	.get_drvinfo		= fec_enet_get_drvinfo,
@@ -1521,6 +1537,7 @@
 	int i, irq, ret = 0;
 	struct resource *r;
 	const struct of_device_id *of_id;
+	static int dev_id;
 
 	of_id = of_match_device(fec_dt_ids, &pdev->dev);
 	if (of_id)
@@ -1548,6 +1565,7 @@
 
 	fep->hwp = ioremap(r->start, resource_size(r));
 	fep->pdev = pdev;
+	fep->dev_id = dev_id++;
 
 	if (!fep->hwp) {
 		ret = -ENOMEM;
@@ -1571,8 +1589,12 @@
 
 	for (i = 0; i < FEC_IRQ_NUM; i++) {
 		irq = platform_get_irq(pdev, i);
-		if (i && irq < 0)
-			break;
+		if (irq < 0) {
+			if (i)
+				break;
+			ret = irq;
+			goto failed_irq;
+		}
 		ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev);
 		if (ret) {
 			while (--i >= 0) {
@@ -1583,7 +1605,7 @@
 		}
 	}
 
-	fep->clk = clk_get(&pdev->dev, "fec_clk");
+	fep->clk = clk_get(&pdev->dev, NULL);
 	if (IS_ERR(fep->clk)) {
 		ret = PTR_ERR(fep->clk);
 		goto failed_clk;
@@ -1635,13 +1657,18 @@
 	struct net_device *ndev = platform_get_drvdata(pdev);
 	struct fec_enet_private *fep = netdev_priv(ndev);
 	struct resource *r;
+	int i;
 
-	fec_stop(ndev);
+	unregister_netdev(ndev);
 	fec_enet_mii_remove(fep);
+	for (i = 0; i < FEC_IRQ_NUM; i++) {
+		int irq = platform_get_irq(pdev, i);
+		if (irq > 0)
+			free_irq(irq, ndev);
+	}
 	clk_disable(fep->clk);
 	clk_put(fep->clk);
 	iounmap(fep->hwp);
-	unregister_netdev(ndev);
 	free_netdev(ndev);
 
 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 5bf5471..910a8e1 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -1171,16 +1171,6 @@
 	.remove = fs_enet_remove,
 };
 
-static int __init fs_init(void)
-{
-	return platform_driver_register(&fs_enet_driver);
-}
-
-static void __exit fs_cleanup(void)
-{
-	platform_driver_unregister(&fs_enet_driver);
-}
-
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void fs_enet_netpoll(struct net_device *dev)
 {
@@ -1190,7 +1180,4 @@
 }
 #endif
 
-/**************************************************************************************/
-
-module_init(fs_init);
-module_exit(fs_cleanup);
+module_platform_driver(fs_enet_driver);
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
index b09270b..0f2d1a7 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
@@ -232,15 +232,4 @@
 	.remove = fs_enet_mdio_remove,
 };
 
-static int fs_enet_mdio_bb_init(void)
-{
-	return platform_driver_register(&fs_enet_bb_mdio_driver);
-}
-
-static void fs_enet_mdio_bb_exit(void)
-{
-	platform_driver_unregister(&fs_enet_bb_mdio_driver);
-}
-
-module_init(fs_enet_mdio_bb_init);
-module_exit(fs_enet_mdio_bb_exit);
+module_platform_driver(fs_enet_bb_mdio_driver);
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
index e0e9d6c..55bb8672 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
@@ -237,15 +237,4 @@
 	.remove = fs_enet_mdio_remove,
 };
 
-static int fs_enet_mdio_fec_init(void)
-{
-	return platform_driver_register(&fs_enet_fec_mdio_driver);
-}
-
-static void fs_enet_mdio_fec_exit(void)
-{
-	platform_driver_unregister(&fs_enet_fec_mdio_driver);
-}
-
-module_init(fs_enet_mdio_fec_init);
-module_exit(fs_enet_mdio_fec_exit);
+module_platform_driver(fs_enet_fec_mdio_driver);
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index 52f4e8a..9eb8159 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -183,28 +183,10 @@
 }
 EXPORT_SYMBOL_GPL(fsl_pq_mdio_bus_name);
 
-/* Scan the bus in reverse, looking for an empty spot */
-static int fsl_pq_mdio_find_free(struct mii_bus *new_bus)
-{
-	int i;
 
-	for (i = PHY_MAX_ADDR; i > 0; i--) {
-		u32 phy_id;
-
-		if (get_phy_id(new_bus, i, &phy_id))
-			return -1;
-
-		if (phy_id == 0xffffffff)
-			break;
-	}
-
-	return i;
-}
-
-
-#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
 static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct device_node *np)
 {
+#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
 	struct gfar __iomem *enet_regs;
 
 	/*
@@ -220,15 +202,15 @@
 	} else if (of_device_is_compatible(np, "fsl,etsec2-mdio") ||
 			of_device_is_compatible(np, "fsl,etsec2-tbi")) {
 		return of_iomap(np, 1);
-	} else
-		return NULL;
-}
+	}
 #endif
+	return NULL;
+}
 
 
-#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
 static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id)
 {
+#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
 	struct device_node *np = NULL;
 	int err = 0;
 
@@ -261,9 +243,10 @@
 		return err;
 	else
 		return -EINVAL;
-}
+#else
+	return -ENODEV;
 #endif
-
+}
 
 static int fsl_pq_mdio_probe(struct platform_device *ofdev)
 {
@@ -339,19 +322,13 @@
 			of_device_is_compatible(np, "fsl,etsec2-mdio") ||
 			of_device_is_compatible(np, "fsl,etsec2-tbi") ||
 			of_device_is_compatible(np, "gianfar")) {
-#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
 		tbipa = get_gfar_tbipa(regs, np);
 		if (!tbipa) {
 			err = -EINVAL;
 			goto err_free_irqs;
 		}
-#else
-		err = -ENODEV;
-		goto err_free_irqs;
-#endif
 	} else if (of_device_is_compatible(np, "fsl,ucc-mdio") ||
 			of_device_is_compatible(np, "ucc_geth_phy")) {
-#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
 		u32 id;
 		static u32 mii_mng_master;
 
@@ -364,10 +341,6 @@
 			mii_mng_master = id;
 			ucc_set_qe_mux_mii_mng(id - 1);
 		}
-#else
-		err = -ENODEV;
-		goto err_free_irqs;
-#endif
 	} else {
 		err = -ENODEV;
 		goto err_free_irqs;
@@ -386,23 +359,12 @@
 	}
 
 	if (tbiaddr == -1) {
-		out_be32(tbipa, 0);
-
-		tbiaddr = fsl_pq_mdio_find_free(new_bus);
-	}
-
-	/*
-	 * We define TBIPA at 0 to be illegal, opting to fail for boards that
-	 * have PHYs at 1-31, rather than change tbipa and rescan.
-	 */
-	if (tbiaddr == 0) {
 		err = -EBUSY;
-
 		goto err_free_irqs;
+	} else {
+		out_be32(tbipa, tbiaddr);
 	}
 
-	out_be32(tbipa, tbiaddr);
-
 	err = of_mdiobus_register(new_bus, np);
 	if (err) {
 		printk (KERN_ERR "%s: Cannot register as MDIO bus\n",
@@ -480,15 +442,6 @@
 	.remove = fsl_pq_mdio_remove,
 };
 
-int __init fsl_pq_mdio_init(void)
-{
-	return platform_driver_register(&fsl_pq_mdio_driver);
-}
-module_init(fsl_pq_mdio_init);
+module_platform_driver(fsl_pq_mdio_driver);
 
-void fsl_pq_mdio_exit(void)
-{
-	platform_driver_unregister(&fsl_pq_mdio_driver);
-}
-module_exit(fsl_pq_mdio_exit);
 MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 83199fd..e01cdaa 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -734,7 +734,7 @@
 
 	mac_addr = of_get_mac_address(np);
 	if (mac_addr)
-		memcpy(dev->dev_addr, mac_addr, MAC_ADDR_LEN);
+		memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
 
 	if (model && !strcasecmp(model, "TSEC"))
 		priv->device_flags =
@@ -2306,7 +2306,7 @@
 }
 
 /* Enables and disables VLAN insertion/extraction */
-void gfar_vlan_mode(struct net_device *dev, u32 features)
+void gfar_vlan_mode(struct net_device *dev, netdev_features_t features)
 {
 	struct gfar_private *priv = netdev_priv(dev);
 	struct gfar __iomem *regs = NULL;
@@ -3114,7 +3114,7 @@
 static void gfar_clear_exact_match(struct net_device *dev)
 {
 	int idx;
-	static const u8 zero_arr[MAC_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
+	static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
 
 	for(idx = 1;idx < GFAR_EM_NUM + 1;idx++)
 		gfar_set_mac_for_addr(dev, idx, zero_arr);
@@ -3137,7 +3137,7 @@
 {
 	u32 tempval;
 	struct gfar_private *priv = netdev_priv(dev);
-	u32 result = ether_crc(MAC_ADDR_LEN, addr);
+	u32 result = ether_crc(ETH_ALEN, addr);
 	int width = priv->hash_width;
 	u8 whichbit = (result >> (32 - width)) & 0x1f;
 	u8 whichreg = result >> (32 - width + 5);
@@ -3158,7 +3158,7 @@
 	struct gfar_private *priv = netdev_priv(dev);
 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
 	int idx;
-	char tmpbuf[MAC_ADDR_LEN];
+	char tmpbuf[ETH_ALEN];
 	u32 tempval;
 	u32 __iomem *macptr = &regs->macstnaddr1;
 
@@ -3166,8 +3166,8 @@
 
 	/* Now copy it into the mac registers backwards, cuz */
 	/* little endian is silly */
-	for (idx = 0; idx < MAC_ADDR_LEN; idx++)
-		tmpbuf[MAC_ADDR_LEN - 1 - idx] = addr[idx];
+	for (idx = 0; idx < ETH_ALEN; idx++)
+		tmpbuf[ETH_ALEN - 1 - idx] = addr[idx];
 
 	gfar_write(macptr, *((u32 *) (tmpbuf)));
 
@@ -3281,16 +3281,4 @@
 	.remove = gfar_remove,
 };
 
-static int __init gfar_init(void)
-{
-	return platform_driver_register(&gfar_driver);
-}
-
-static void __exit gfar_exit(void)
-{
-	platform_driver_unregister(&gfar_driver);
-}
-
-module_init(gfar_init);
-module_exit(gfar_exit);
-
+module_platform_driver(gfar_driver);
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 9aa4377..fe7ac3a 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -74,9 +74,6 @@
  * will be the next highest multiple of 512 bytes. */
 #define INCREMENTAL_BUFFER_SIZE 512
 
-
-#define MAC_ADDR_LEN 6
-
 #define PHY_INIT_TIMEOUT 100000
 #define GFAR_PHY_CHANGE_TIME 2
 
@@ -1179,9 +1176,9 @@
 extern void gfar_configure_coalescing(struct gfar_private *priv,
 		unsigned long tx_mask, unsigned long rx_mask);
 void gfar_init_sysfs(struct net_device *dev);
-int gfar_set_features(struct net_device *dev, u32 features);
+int gfar_set_features(struct net_device *dev, netdev_features_t features);
 extern void gfar_check_rx_parser_mode(struct gfar_private *priv);
-extern void gfar_vlan_mode(struct net_device *dev, u32 features);
+extern void gfar_vlan_mode(struct net_device *dev, netdev_features_t features);
 
 extern const struct ethtool_ops gfar_ethtool_ops;
 
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 212736b..5a3b2e5 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -519,12 +519,12 @@
 	return err;
 }
 
-int gfar_set_features(struct net_device *dev, u32 features)
+int gfar_set_features(struct net_device *dev, netdev_features_t features)
 {
 	struct gfar_private *priv = netdev_priv(dev);
 	unsigned long flags;
 	int err = 0, i = 0;
-	u32 changed = dev->features ^ features;
+	netdev_features_t changed = dev->features ^ features;
 
 	if (changed & (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX))
 		gfar_vlan_mode(dev, features);
@@ -1410,10 +1410,9 @@
 
 	/* We need a copy of the filer table because
 	 * we want to change its order */
-	temp_table = kmalloc(sizeof(*temp_table), GFP_KERNEL);
+	temp_table = kmemdup(tab, sizeof(*temp_table), GFP_KERNEL);
 	if (temp_table == NULL)
 		return -ENOMEM;
-	memcpy(temp_table, tab, sizeof(*temp_table));
 
 	mask_table = kcalloc(MAX_FILER_CACHE_IDX / 2 + 1,
 			sizeof(struct gfar_mask_entry), GFP_KERNEL);
@@ -1693,8 +1692,9 @@
 		ret = gfar_set_hash_opts(priv, cmd);
 		break;
 	case ETHTOOL_SRXCLSRLINS:
-		if (cmd->fs.ring_cookie != RX_CLS_FLOW_DISC &&
-			cmd->fs.ring_cookie >= priv->num_rx_queues) {
+		if ((cmd->fs.ring_cookie != RX_CLS_FLOW_DISC &&
+		     cmd->fs.ring_cookie >= priv->num_rx_queues) ||
+		    cmd->fs.location >= MAX_FILER_IDX) {
 			ret = -EINVAL;
 			break;
 		}
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index f67b8ae..83e0ed75 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -562,21 +562,7 @@
 	.remove      = gianfar_ptp_remove,
 };
 
-/* module operations */
-
-static int __init ptp_gianfar_init(void)
-{
-	return platform_driver_register(&gianfar_ptp_driver);
-}
-
-module_init(ptp_gianfar_init);
-
-static void __exit ptp_gianfar_exit(void)
-{
-	platform_driver_unregister(&gianfar_ptp_driver);
-}
-
-module_exit(ptp_gianfar_exit);
+module_platform_driver(gianfar_ptp_driver);
 
 MODULE_AUTHOR("Richard Cochran <richard.cochran@omicron.at>");
 MODULE_DESCRIPTION("PTP clock using the eTSEC");
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index b5dc027..ba2dc08 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -443,7 +443,7 @@
 
 static inline int compare_addr(u8 **addr1, u8 **addr2)
 {
-	return memcmp(addr1, addr2, ENET_NUM_OCTETS_PER_ADDRESS);
+	return memcmp(addr1, addr2, ETH_ALEN);
 }
 
 #ifdef DEBUG
diff --git a/drivers/net/ethernet/freescale/ucc_geth.h b/drivers/net/ethernet/freescale/ucc_geth.h
index d12fcad..2e395a2 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.h
+++ b/drivers/net/ethernet/freescale/ucc_geth.h
@@ -20,6 +20,7 @@
 
 #include <linux/kernel.h>
 #include <linux/list.h>
+#include <linux/if_ether.h>
 
 #include <asm/immap_qe.h>
 #include <asm/qe.h>
@@ -881,7 +882,6 @@
 #define TX_RING_MOD_MASK(size)                  (size-1)
 #define RX_RING_MOD_MASK(size)                  (size-1)
 
-#define ENET_NUM_OCTETS_PER_ADDRESS             6
 #define ENET_GROUP_ADDR                         0x01	/* Group address mask
 							   for ethernet
 							   addresses */
@@ -1051,7 +1051,7 @@
 
 /* UCC GETH 82xx Ethernet Address Container */
 struct enet_addr_container {
-	u8 address[ENET_NUM_OCTETS_PER_ADDRESS];	/* ethernet address */
+	u8 address[ETH_ALEN];	/* ethernet address */
 	enum ucc_geth_enet_address_recognition_location location;	/* location in
 								   82xx address
 								   recognition
@@ -1194,7 +1194,7 @@
 	u16 cpucount[NUM_TX_QUEUES];
 	u16 __iomem *p_cpucount[NUM_TX_QUEUES];
 	int indAddrRegUsed[NUM_OF_PADDRS];
-	u8 paddr[NUM_OF_PADDRS][ENET_NUM_OCTETS_PER_ADDRESS];	/* ethernet address */
+	u8 paddr[NUM_OF_PADDRS][ETH_ALEN];	/* ethernet address */
 	u8 numGroupAddrInHash;
 	u8 numIndAddrInHash;
 	u8 numIndAddrInReg;
diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
index 1541675..ee84b47 100644
--- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
+++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
@@ -1058,9 +1058,10 @@
 static void netdev_get_drvinfo(struct net_device *dev,
 			       struct ethtool_drvinfo *info)
 {
-	strcpy(info->driver, DRV_NAME);
-	strcpy(info->version, DRV_VERSION);
-	sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr);
+	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+	snprintf(info->bus_info, sizeof(info->bus_info),
+		"PCMCIA 0x%lx", dev->base_addr);
 }
 
 static const struct ethtool_ops netdev_ethtool_ops = {
diff --git a/drivers/net/ethernet/i825xx/eepro.c b/drivers/net/ethernet/i825xx/eepro.c
index 067c460..114cda77 100644
--- a/drivers/net/ethernet/i825xx/eepro.c
+++ b/drivers/net/ethernet/i825xx/eepro.c
@@ -1726,9 +1726,10 @@
 static void eepro_ethtool_get_drvinfo(struct net_device *dev,
 					struct ethtool_drvinfo *drvinfo)
 {
-	strcpy(drvinfo->driver, DRV_NAME);
-	strcpy(drvinfo->version, DRV_VERSION);
-	sprintf(drvinfo->bus_info, "ISA 0x%lx", dev->base_addr);
+	strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+	strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
+	snprintf(drvinfo->bus_info, sizeof(drvinfo->bus_info),
+		"ISA 0x%lx", dev->base_addr);
 }
 
 static const struct ethtool_ops eepro_ethtool_ops = {
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index bfeccbf..3554414 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -2114,17 +2114,19 @@
 	return NETDEV_TX_OK;
 }
 
-static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+static int ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
 {
 	struct ehea_port *port = netdev_priv(dev);
 	struct ehea_adapter *adapter = port->adapter;
 	struct hcp_ehea_port_cb1 *cb1;
 	int index;
 	u64 hret;
+	int err = 0;
 
 	cb1 = (void *)get_zeroed_page(GFP_KERNEL);
 	if (!cb1) {
 		pr_err("no mem for cb1\n");
+		err = -ENOMEM;
 		goto out;
 	}
 
@@ -2132,6 +2134,7 @@
 				      H_PORT_CB1, H_PORT_CB1_ALL, cb1);
 	if (hret != H_SUCCESS) {
 		pr_err("query_ehea_port failed\n");
+		err = -EINVAL;
 		goto out;
 	}
 
@@ -2140,24 +2143,28 @@
 
 	hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
 				       H_PORT_CB1, H_PORT_CB1_ALL, cb1);
-	if (hret != H_SUCCESS)
+	if (hret != H_SUCCESS) {
 		pr_err("modify_ehea_port failed\n");
+		err = -EINVAL;
+	}
 out:
 	free_page((unsigned long)cb1);
-	return;
+	return err;
 }
 
-static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+static int ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
 {
 	struct ehea_port *port = netdev_priv(dev);
 	struct ehea_adapter *adapter = port->adapter;
 	struct hcp_ehea_port_cb1 *cb1;
 	int index;
 	u64 hret;
+	int err = 0;
 
 	cb1 = (void *)get_zeroed_page(GFP_KERNEL);
 	if (!cb1) {
 		pr_err("no mem for cb1\n");
+		err = -ENOMEM;
 		goto out;
 	}
 
@@ -2165,6 +2172,7 @@
 				      H_PORT_CB1, H_PORT_CB1_ALL, cb1);
 	if (hret != H_SUCCESS) {
 		pr_err("query_ehea_port failed\n");
+		err = -EINVAL;
 		goto out;
 	}
 
@@ -2173,10 +2181,13 @@
 
 	hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
 				       H_PORT_CB1, H_PORT_CB1_ALL, cb1);
-	if (hret != H_SUCCESS)
+	if (hret != H_SUCCESS) {
 		pr_err("modify_ehea_port failed\n");
+		err = -EINVAL;
+	}
 out:
 	free_page((unsigned long)cb1);
+	return err;
 }
 
 int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index ed79b2d..2abce96 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2924,6 +2924,9 @@
 	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
 		zmii_detach(dev->zmii_dev, dev->zmii_port);
 
+	busy_phy_map &= ~(1 << dev->phy.address);
+	DBG(dev, "busy_phy_map now %#x" NL, busy_phy_map);
+
 	mal_unregister_commac(dev->mal, &dev->commac);
 	emac_put_deps(dev);
 
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index b1cd41b..e877371 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -735,7 +735,8 @@
 		sizeof(info->version) - 1);
 }
 
-static u32 ibmveth_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t ibmveth_fix_features(struct net_device *dev,
+	netdev_features_t features)
 {
 	/*
 	 * Since the ibmveth firmware interface does not have the
@@ -838,7 +839,8 @@
 	return rc1 ? rc1 : rc2;
 }
 
-static int ibmveth_set_features(struct net_device *dev, u32 features)
+static int ibmveth_set_features(struct net_device *dev,
+	netdev_features_t features)
 {
 	struct ibmveth_adapter *adapter = netdev_priv(dev);
 	int rx_csum = !!(features & NETIF_F_RXCSUM);
diff --git a/drivers/net/ethernet/icplus/ipg.c b/drivers/net/ethernet/icplus/ipg.c
index 8fd80a0..075451d 100644
--- a/drivers/net/ethernet/icplus/ipg.c
+++ b/drivers/net/ethernet/icplus/ipg.c
@@ -371,16 +371,9 @@
 	}
 
 	/* The last cycle is a tri-state, so read from the PHY. */
-	for (j = 7; j < 8; j++) {
-		for (i = 0; i < p[j].len; i++) {
-			ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | polarity);
-
-			p[j].field |= ((ipg_r8(PHY_CTRL) &
-				IPG_PC_MGMTDATA) >> 1) << (p[j].len - 1 - i);
-
-			ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | polarity);
-		}
-	}
+	ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | polarity);
+	ipg_r8(PHY_CTRL);
+	ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | polarity);
 }
 
 static void ipg_set_led_mode(struct net_device *dev)
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 5a2fdf7..9436397 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -2376,10 +2376,10 @@
 	struct ethtool_drvinfo *info)
 {
 	struct nic *nic = netdev_priv(netdev);
-	strcpy(info->driver, DRV_NAME);
-	strcpy(info->version, DRV_VERSION);
-	strcpy(info->fw_version, "N/A");
-	strcpy(info->bus_info, pci_name(nic->pdev));
+	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+	strlcpy(info->bus_info, pci_name(nic->pdev),
+		sizeof(info->bus_info));
 }
 
 #define E100_PHY_REGS 0x1C
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 2b223ac..3103f0b 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -515,14 +515,14 @@
 			      struct ethtool_drvinfo *drvinfo)
 {
 	struct e1000_adapter *adapter = netdev_priv(netdev);
-	char firmware_version[32];
 
-	strncpy(drvinfo->driver,  e1000_driver_name, 32);
-	strncpy(drvinfo->version, e1000_driver_version, 32);
+	strlcpy(drvinfo->driver,  e1000_driver_name,
+		sizeof(drvinfo->driver));
+	strlcpy(drvinfo->version, e1000_driver_version,
+		sizeof(drvinfo->version));
 
-	sprintf(firmware_version, "N/A");
-	strncpy(drvinfo->fw_version, firmware_version, 32);
-	strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+	strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+		sizeof(drvinfo->bus_info));
 	drvinfo->regdump_len = e1000_get_regs_len(netdev);
 	drvinfo->eedump_len = e1000_get_eeprom_len(netdev);
 }
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.h b/drivers/net/ethernet/intel/e1000/e1000_hw.h
index 5c9a840..f6c4d7e 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.h
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.h
@@ -448,7 +448,6 @@
 #define E1000_DEV_ID_INTEL_CE4100_GBE    0x2E6E
 
 #define NODE_ADDRESS_SIZE 6
-#define ETH_LENGTH_OF_ADDRESS 6
 
 /* MAC decode size is 128K - This is the size of BAR0 */
 #define MAC_DECODE_SIZE (128 * 1024)
@@ -813,8 +812,7 @@
 #define E1000_FLA      0x0001C	/* Flash Access - RW */
 #define E1000_MDIC     0x00020	/* MDI Control - RW */
 
-extern void __iomem *ce4100_gbe_mdio_base_virt;
-#define INTEL_CE_GBE_MDIO_RCOMP_BASE    (ce4100_gbe_mdio_base_virt)
+#define INTEL_CE_GBE_MDIO_RCOMP_BASE    (hw->ce4100_gbe_mdio_base_virt)
 #define E1000_MDIO_STS  (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0)
 #define E1000_MDIO_CMD  (INTEL_CE_GBE_MDIO_RCOMP_BASE + 4)
 #define E1000_MDIO_DRV  (INTEL_CE_GBE_MDIO_RCOMP_BASE + 8)
@@ -1344,6 +1342,7 @@
 struct e1000_hw {
 	u8 __iomem *hw_addr;
 	u8 __iomem *flash_address;
+	void __iomem *ce4100_gbe_mdio_base_virt;
 	e1000_mac_type mac_type;
 	e1000_phy_type phy_type;
 	u32 phy_init_script;
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index cf480b5..669ca38 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -33,11 +33,6 @@
 #include <linux/bitops.h>
 #include <linux/if_vlan.h>
 
-/* Intel Media SOC GbE MDIO physical base address */
-static unsigned long ce4100_gbe_mdio_base_phy;
-/* Intel Media SOC GbE MDIO virtual base address */
-void __iomem *ce4100_gbe_mdio_base_virt;
-
 char e1000_driver_name[] = "e1000";
 static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
 #define DRV_VERSION "7.3.21-k8-NAPI"
@@ -167,9 +162,10 @@
                                        struct sk_buff *skb);
 
 static bool e1000_vlan_used(struct e1000_adapter *adapter);
-static void e1000_vlan_mode(struct net_device *netdev, u32 features);
-static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
-static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
+static void e1000_vlan_mode(struct net_device *netdev,
+			    netdev_features_t features);
+static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
+static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
 static void e1000_restore_vlan(struct e1000_adapter *adapter);
 
 #ifdef CONFIG_PM
@@ -806,7 +802,8 @@
 	}
 }
 
-static u32 e1000_fix_features(struct net_device *netdev, u32 features)
+static netdev_features_t e1000_fix_features(struct net_device *netdev,
+	netdev_features_t features)
 {
 	/*
 	 * Since there is no support for separate rx/tx vlan accel
@@ -820,10 +817,11 @@
 	return features;
 }
 
-static int e1000_set_features(struct net_device *netdev, u32 features)
+static int e1000_set_features(struct net_device *netdev,
+	netdev_features_t features)
 {
 	struct e1000_adapter *adapter = netdev_priv(netdev);
-	u32 changed = features ^ netdev->features;
+	netdev_features_t changed = features ^ netdev->features;
 
 	if (changed & NETIF_F_HW_VLAN_RX)
 		e1000_vlan_mode(netdev, features);
@@ -1051,11 +1049,11 @@
 
 	err = -EIO;
 	if (hw->mac_type == e1000_ce4100) {
-		ce4100_gbe_mdio_base_phy = pci_resource_start(pdev, BAR_1);
-		ce4100_gbe_mdio_base_virt = ioremap(ce4100_gbe_mdio_base_phy,
+		hw->ce4100_gbe_mdio_base_virt =
+					ioremap(pci_resource_start(pdev, BAR_1),
 		                                pci_resource_len(pdev, BAR_1));
 
-		if (!ce4100_gbe_mdio_base_virt)
+		if (!hw->ce4100_gbe_mdio_base_virt)
 			goto err_mdio_ioremap;
 	}
 
@@ -1182,7 +1180,7 @@
 		if (global_quad_port_a != 0)
 			adapter->eeprom_wol = 0;
 		else
-			adapter->quad_port_a = 1;
+			adapter->quad_port_a = true;
 		/* Reset for multiple quad port adapters */
 		if (++global_quad_port_a == 4)
 			global_quad_port_a = 0;
@@ -1246,7 +1244,7 @@
 err_dma:
 err_sw_init:
 err_mdio_ioremap:
-	iounmap(ce4100_gbe_mdio_base_virt);
+	iounmap(hw->ce4100_gbe_mdio_base_virt);
 	iounmap(hw->hw_addr);
 err_ioremap:
 	free_netdev(netdev);
@@ -1283,6 +1281,8 @@
 	kfree(adapter->tx_ring);
 	kfree(adapter->rx_ring);
 
+	if (hw->mac_type == e1000_ce4100)
+		iounmap(hw->ce4100_gbe_mdio_base_virt);
 	iounmap(hw->hw_addr);
 	if (hw->flash_address)
 		iounmap(hw->flash_address);
@@ -1676,7 +1676,7 @@
 	 * need this to apply a workaround later in the send path. */
 	if (hw->mac_type == e1000_82544 &&
 	    hw->bus_type == e1000_bus_type_pcix)
-		adapter->pcix_82544 = 1;
+		adapter->pcix_82544 = true;
 
 	ew32(TCTL, tctl);
 
@@ -1999,7 +1999,7 @@
 
 	tx_ring->next_to_use = 0;
 	tx_ring->next_to_clean = 0;
-	tx_ring->last_tx_tso = 0;
+	tx_ring->last_tx_tso = false;
 
 	writel(0, hw->hw_addr + tx_ring->tdh);
 	writel(0, hw->hw_addr + tx_ring->tdt);
@@ -2848,7 +2848,7 @@
 		 * DMA'd to the controller */
 		if (!skb->data_len && tx_ring->last_tx_tso &&
 		    !skb_is_gso(skb)) {
-			tx_ring->last_tx_tso = 0;
+			tx_ring->last_tx_tso = false;
 			size -= 4;
 		}
 
@@ -3216,7 +3216,7 @@
 
 	if (likely(tso)) {
 		if (likely(hw->mac_type != e1000_82544))
-			tx_ring->last_tx_tso = 1;
+			tx_ring->last_tx_tso = true;
 		tx_flags |= E1000_TX_FLAGS_TSO;
 	} else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
 		tx_flags |= E1000_TX_FLAGS_CSUM;
@@ -4577,7 +4577,8 @@
 		e1000_irq_enable(adapter);
 }
 
-static void e1000_vlan_mode(struct net_device *netdev, u32 features)
+static void e1000_vlan_mode(struct net_device *netdev,
+	netdev_features_t features)
 {
 	struct e1000_adapter *adapter = netdev_priv(netdev);
 	struct e1000_hw *hw = &adapter->hw;
@@ -4600,7 +4601,7 @@
 		e1000_irq_enable(adapter);
 }
 
-static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
 {
 	struct e1000_adapter *adapter = netdev_priv(netdev);
 	struct e1000_hw *hw = &adapter->hw;
@@ -4609,7 +4610,7 @@
 	if ((hw->mng_cookie.status &
 	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
 	    (vid == adapter->mng_vlan_id))
-		return;
+		return 0;
 
 	if (!e1000_vlan_used(adapter))
 		e1000_vlan_filter_on_off(adapter, true);
@@ -4621,9 +4622,11 @@
 	e1000_write_vfta(hw, index, vfta);
 
 	set_bit(vid, adapter->active_vlans);
+
+	return 0;
 }
 
-static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
 {
 	struct e1000_adapter *adapter = netdev_priv(netdev);
 	struct e1000_hw *hw = &adapter->hw;
@@ -4644,6 +4647,8 @@
 
 	if (!e1000_vlan_used(adapter))
 		e1000_vlan_filter_on_off(adapter, false);
+
+	return 0;
 }
 
 static void e1000_restore_vlan(struct e1000_adapter *adapter)
@@ -4716,8 +4721,6 @@
 
 	netif_device_detach(netdev);
 
-	mutex_lock(&adapter->mutex);
-
 	if (netif_running(netdev)) {
 		WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
 		e1000_down(adapter);
@@ -4725,10 +4728,8 @@
 
 #ifdef CONFIG_PM
 	retval = pci_save_state(pdev);
-	if (retval) {
-		mutex_unlock(&adapter->mutex);
+	if (retval)
 		return retval;
-	}
 #endif
 
 	status = er32(STATUS);
@@ -4783,8 +4784,6 @@
 	if (netif_running(netdev))
 		e1000_free_irq(adapter);
 
-	mutex_unlock(&adapter->mutex);
-
 	pci_disable_device(pdev);
 
 	return 0;
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 9fe18d1..f478a22 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -309,6 +309,7 @@
 	u32 txd_cmd;
 
 	bool detect_tx_hung;
+	bool tx_hang_recheck;
 	u8 tx_timeout_factor;
 
 	u32 tx_int_delay;
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 69c9d21..fb2c28e 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -579,26 +579,24 @@
 			      struct ethtool_drvinfo *drvinfo)
 {
 	struct e1000_adapter *adapter = netdev_priv(netdev);
-	char firmware_version[32];
 
-	strncpy(drvinfo->driver,  e1000e_driver_name,
-		sizeof(drvinfo->driver) - 1);
-	strncpy(drvinfo->version, e1000e_driver_version,
-		sizeof(drvinfo->version) - 1);
+	strlcpy(drvinfo->driver,  e1000e_driver_name,
+		sizeof(drvinfo->driver));
+	strlcpy(drvinfo->version, e1000e_driver_version,
+		sizeof(drvinfo->version));
 
 	/*
 	 * EEPROM image version # is reported as firmware version # for
 	 * PCI-E controllers
 	 */
-	snprintf(firmware_version, sizeof(firmware_version), "%d.%d-%d",
+	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+		"%d.%d-%d",
 		(adapter->eeprom_vers & 0xF000) >> 12,
 		(adapter->eeprom_vers & 0x0FF0) >> 4,
 		(adapter->eeprom_vers & 0x000F));
 
-	strncpy(drvinfo->fw_version, firmware_version,
-		sizeof(drvinfo->fw_version) - 1);
-	strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
-		sizeof(drvinfo->bus_info) - 1);
+	strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+		sizeof(drvinfo->bus_info));
 	drvinfo->regdump_len = e1000_get_regs_len(netdev);
 	drvinfo->eedump_len = e1000_get_eeprom_len(netdev);
 }
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index a855db1..3911401 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -163,16 +163,13 @@
 			regs[n] = __er32(hw, E1000_TARC(n));
 		break;
 	default:
-		printk(KERN_INFO "%-15s %08x\n",
-		       reginfo->name, __er32(hw, reginfo->ofs));
+		pr_info("%-15s %08x\n",
+			reginfo->name, __er32(hw, reginfo->ofs));
 		return;
 	}
 
 	snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]");
-	printk(KERN_INFO "%-15s ", rname);
-	for (n = 0; n < 2; n++)
-		printk(KERN_CONT "%08x ", regs[n]);
-	printk(KERN_CONT "\n");
+	pr_info("%-15s %08x %08x\n", rname, regs[0], regs[1]);
 }
 
 /*
@@ -208,16 +205,15 @@
 	/* Print netdevice Info */
 	if (netdev) {
 		dev_info(&adapter->pdev->dev, "Net device Info\n");
-		printk(KERN_INFO "Device Name     state            "
-		       "trans_start      last_rx\n");
-		printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
-		       netdev->name, netdev->state, netdev->trans_start,
-		       netdev->last_rx);
+		pr_info("Device Name     state            trans_start      last_rx\n");
+		pr_info("%-15s %016lX %016lX %016lX\n",
+			netdev->name, netdev->state, netdev->trans_start,
+			netdev->last_rx);
 	}
 
 	/* Print Registers */
 	dev_info(&adapter->pdev->dev, "Register Dump\n");
-	printk(KERN_INFO " Register Name   Value\n");
+	pr_info(" Register Name   Value\n");
 	for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl;
 	     reginfo->name; reginfo++) {
 		e1000_regdump(hw, reginfo);
@@ -228,15 +224,14 @@
 		goto exit;
 
 	dev_info(&adapter->pdev->dev, "Tx Ring Summary\n");
-	printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma  ]"
-	       " leng ntw timestamp\n");
+	pr_info("Queue [NTU] [NTC] [bi(ntc)->dma  ] leng ntw timestamp\n");
 	buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
-	printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
-	       0, tx_ring->next_to_use, tx_ring->next_to_clean,
-	       (unsigned long long)buffer_info->dma,
-	       buffer_info->length,
-	       buffer_info->next_to_watch,
-	       (unsigned long long)buffer_info->time_stamp);
+	pr_info(" %5d %5X %5X %016llX %04X %3X %016llX\n",
+		0, tx_ring->next_to_use, tx_ring->next_to_clean,
+		(unsigned long long)buffer_info->dma,
+		buffer_info->length,
+		buffer_info->next_to_watch,
+		(unsigned long long)buffer_info->time_stamp);
 
 	/* Print Tx Ring */
 	if (!netif_msg_tx_done(adapter))
@@ -271,37 +266,32 @@
 	 *   +----------------------------------------------------------------+
 	 *   63       48 47     40 39  36 35    32 31     24 23  20 19        0
 	 */
-	printk(KERN_INFO "Tl[desc]     [address 63:0  ] [SpeCssSCmCsLen]"
-	       " [bi->dma       ] leng  ntw timestamp        bi->skb "
-	       "<-- Legacy format\n");
-	printk(KERN_INFO "Tc[desc]     [Ce CoCsIpceCoS] [MssHlRSCm0Plen]"
-	       " [bi->dma       ] leng  ntw timestamp        bi->skb "
-	       "<-- Ext Context format\n");
-	printk(KERN_INFO "Td[desc]     [address 63:0  ] [VlaPoRSCm1Dlen]"
-	       " [bi->dma       ] leng  ntw timestamp        bi->skb "
-	       "<-- Ext Data format\n");
+	pr_info("Tl[desc]     [address 63:0  ] [SpeCssSCmCsLen] [bi->dma       ] leng  ntw timestamp        bi->skb <-- Legacy format\n");
+	pr_info("Tc[desc]     [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma       ] leng  ntw timestamp        bi->skb <-- Ext Context format\n");
+	pr_info("Td[desc]     [address 63:0  ] [VlaPoRSCm1Dlen] [bi->dma       ] leng  ntw timestamp        bi->skb <-- Ext Data format\n");
 	for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
+		const char *next_desc;
 		tx_desc = E1000_TX_DESC(*tx_ring, i);
 		buffer_info = &tx_ring->buffer_info[i];
 		u0 = (struct my_u0 *)tx_desc;
-		printk(KERN_INFO "T%c[0x%03X]    %016llX %016llX %016llX "
-		       "%04X  %3X %016llX %p",
-		       (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' :
-			((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')), i,
-		       (unsigned long long)le64_to_cpu(u0->a),
-		       (unsigned long long)le64_to_cpu(u0->b),
-		       (unsigned long long)buffer_info->dma,
-		       buffer_info->length, buffer_info->next_to_watch,
-		       (unsigned long long)buffer_info->time_stamp,
-		       buffer_info->skb);
 		if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
-			printk(KERN_CONT " NTC/U\n");
+			next_desc = " NTC/U";
 		else if (i == tx_ring->next_to_use)
-			printk(KERN_CONT " NTU\n");
+			next_desc = " NTU";
 		else if (i == tx_ring->next_to_clean)
-			printk(KERN_CONT " NTC\n");
+			next_desc = " NTC";
 		else
-			printk(KERN_CONT "\n");
+			next_desc = "";
+		pr_info("T%c[0x%03X]    %016llX %016llX %016llX %04X  %3X %016llX %p%s\n",
+			(!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' :
+			 ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')),
+			i,
+			(unsigned long long)le64_to_cpu(u0->a),
+			(unsigned long long)le64_to_cpu(u0->b),
+			(unsigned long long)buffer_info->dma,
+			buffer_info->length, buffer_info->next_to_watch,
+			(unsigned long long)buffer_info->time_stamp,
+			buffer_info->skb, next_desc);
 
 		if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
 			print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
@@ -312,9 +302,9 @@
 	/* Print Rx Ring Summary */
 rx_ring_summary:
 	dev_info(&adapter->pdev->dev, "Rx Ring Summary\n");
-	printk(KERN_INFO "Queue [NTU] [NTC]\n");
-	printk(KERN_INFO " %5d %5X %5X\n", 0,
-	       rx_ring->next_to_use, rx_ring->next_to_clean);
+	pr_info("Queue [NTU] [NTC]\n");
+	pr_info(" %5d %5X %5X\n",
+		0, rx_ring->next_to_use, rx_ring->next_to_clean);
 
 	/* Print Rx Ring */
 	if (!netif_msg_rx_status(adapter))
@@ -337,10 +327,7 @@
 		 * 24 |                Buffer Address 3 [63:0]              |
 		 *    +-----------------------------------------------------+
 		 */
-		printk(KERN_INFO "R  [desc]      [buffer 0 63:0 ] "
-		       "[buffer 1 63:0 ] "
-		       "[buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma       ] "
-		       "[bi->skb] <-- Ext Pkt Split format\n");
+		pr_info("R  [desc]      [buffer 0 63:0 ] [buffer 1 63:0 ] [buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma       ] [bi->skb] <-- Ext Pkt Split format\n");
 		/* [Extended] Receive Descriptor (Write-Back) Format
 		 *
 		 *   63       48 47    32 31     13 12    8 7    4 3        0
@@ -352,35 +339,40 @@
 		 *   +------------------------------------------------------+
 		 *   63       48 47    32 31            20 19               0
 		 */
-		printk(KERN_INFO "RWB[desc]      [ck ipid mrqhsh] "
-		       "[vl   l0 ee  es] "
-		       "[ l3  l2  l1 hs] [reserved      ] ---------------- "
-		       "[bi->skb] <-- Ext Rx Write-Back format\n");
+		pr_info("RWB[desc]      [ck ipid mrqhsh] [vl   l0 ee  es] [ l3  l2  l1 hs] [reserved      ] ---------------- [bi->skb] <-- Ext Rx Write-Back format\n");
 		for (i = 0; i < rx_ring->count; i++) {
+			const char *next_desc;
 			buffer_info = &rx_ring->buffer_info[i];
 			rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i);
 			u1 = (struct my_u1 *)rx_desc_ps;
 			staterr =
 			    le32_to_cpu(rx_desc_ps->wb.middle.status_error);
+
+			if (i == rx_ring->next_to_use)
+				next_desc = " NTU";
+			else if (i == rx_ring->next_to_clean)
+				next_desc = " NTC";
+			else
+				next_desc = "";
+
 			if (staterr & E1000_RXD_STAT_DD) {
 				/* Descriptor Done */
-				printk(KERN_INFO "RWB[0x%03X]     %016llX "
-				       "%016llX %016llX %016llX "
-				       "---------------- %p", i,
-				       (unsigned long long)le64_to_cpu(u1->a),
-				       (unsigned long long)le64_to_cpu(u1->b),
-				       (unsigned long long)le64_to_cpu(u1->c),
-				       (unsigned long long)le64_to_cpu(u1->d),
-				       buffer_info->skb);
+				pr_info("%s[0x%03X]     %016llX %016llX %016llX %016llX ---------------- %p%s\n",
+					"RWB", i,
+					(unsigned long long)le64_to_cpu(u1->a),
+					(unsigned long long)le64_to_cpu(u1->b),
+					(unsigned long long)le64_to_cpu(u1->c),
+					(unsigned long long)le64_to_cpu(u1->d),
+					buffer_info->skb, next_desc);
 			} else {
-				printk(KERN_INFO "R  [0x%03X]     %016llX "
-				       "%016llX %016llX %016llX %016llX %p", i,
-				       (unsigned long long)le64_to_cpu(u1->a),
-				       (unsigned long long)le64_to_cpu(u1->b),
-				       (unsigned long long)le64_to_cpu(u1->c),
-				       (unsigned long long)le64_to_cpu(u1->d),
-				       (unsigned long long)buffer_info->dma,
-				       buffer_info->skb);
+				pr_info("%s[0x%03X]     %016llX %016llX %016llX %016llX %016llX %p%s\n",
+					"R  ", i,
+					(unsigned long long)le64_to_cpu(u1->a),
+					(unsigned long long)le64_to_cpu(u1->b),
+					(unsigned long long)le64_to_cpu(u1->c),
+					(unsigned long long)le64_to_cpu(u1->d),
+					(unsigned long long)buffer_info->dma,
+					buffer_info->skb, next_desc);
 
 				if (netif_msg_pktdata(adapter))
 					print_hex_dump(KERN_INFO, "",
@@ -388,13 +380,6 @@
 						phys_to_virt(buffer_info->dma),
 						adapter->rx_ps_bsize0, true);
 			}
-
-			if (i == rx_ring->next_to_use)
-				printk(KERN_CONT " NTU\n");
-			else if (i == rx_ring->next_to_clean)
-				printk(KERN_CONT " NTC\n");
-			else
-				printk(KERN_CONT "\n");
 		}
 		break;
 	default:
@@ -407,9 +392,7 @@
 		 * 8 |                      Reserved                       |
 		 *   +-----------------------------------------------------+
 		 */
-		printk(KERN_INFO "R  [desc]      [buf addr 63:0 ] "
-		       "[reserved 63:0 ] [bi->dma       ] "
-		       "[bi->skb] <-- Ext (Read) format\n");
+		pr_info("R  [desc]      [buf addr 63:0 ] [reserved 63:0 ] [bi->dma       ] [bi->skb] <-- Ext (Read) format\n");
 		/* Extended Receive Descriptor (Write-Back) Format
 		 *
 		 *   63       48 47    32 31    24 23            4 3        0
@@ -423,29 +406,37 @@
 		 *   +------------------------------------------------------+
 		 *   63       48 47    32 31            20 19               0
 		 */
-		printk(KERN_INFO "RWB[desc]      [cs ipid    mrq] "
-		       "[vt   ln xe  xs] "
-		       "[bi->skb] <-- Ext (Write-Back) format\n");
+		pr_info("RWB[desc]      [cs ipid    mrq] [vt   ln xe  xs] [bi->skb] <-- Ext (Write-Back) format\n");
 
 		for (i = 0; i < rx_ring->count; i++) {
+			const char *next_desc;
+
 			buffer_info = &rx_ring->buffer_info[i];
 			rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
 			u1 = (struct my_u1 *)rx_desc;
 			staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+
+			if (i == rx_ring->next_to_use)
+				next_desc = " NTU";
+			else if (i == rx_ring->next_to_clean)
+				next_desc = " NTC";
+			else
+				next_desc = "";
+
 			if (staterr & E1000_RXD_STAT_DD) {
 				/* Descriptor Done */
-				printk(KERN_INFO "RWB[0x%03X]     %016llX "
-				       "%016llX ---------------- %p", i,
-				       (unsigned long long)le64_to_cpu(u1->a),
-				       (unsigned long long)le64_to_cpu(u1->b),
-				       buffer_info->skb);
+				pr_info("%s[0x%03X]     %016llX %016llX ---------------- %p%s\n",
+					"RWB", i,
+					(unsigned long long)le64_to_cpu(u1->a),
+					(unsigned long long)le64_to_cpu(u1->b),
+					buffer_info->skb, next_desc);
 			} else {
-				printk(KERN_INFO "R  [0x%03X]     %016llX "
-				       "%016llX %016llX %p", i,
-				       (unsigned long long)le64_to_cpu(u1->a),
-				       (unsigned long long)le64_to_cpu(u1->b),
-				       (unsigned long long)buffer_info->dma,
-				       buffer_info->skb);
+				pr_info("%s[0x%03X]     %016llX %016llX %016llX %p%s\n",
+					"R  ", i,
+					(unsigned long long)le64_to_cpu(u1->a),
+					(unsigned long long)le64_to_cpu(u1->b),
+					(unsigned long long)buffer_info->dma,
+					buffer_info->skb, next_desc);
 
 				if (netif_msg_pktdata(adapter))
 					print_hex_dump(KERN_INFO, "",
@@ -456,13 +447,6 @@
 						       adapter->rx_buffer_len,
 						       true);
 			}
-
-			if (i == rx_ring->next_to_use)
-				printk(KERN_CONT " NTU\n");
-			else if (i == rx_ring->next_to_clean)
-				printk(KERN_CONT " NTC\n");
-			else
-				printk(KERN_CONT "\n");
 		}
 	}
 
@@ -875,7 +859,7 @@
 	u32 length, staterr;
 	unsigned int i;
 	int cleaned_count = 0;
-	bool cleaned = 0;
+	bool cleaned = false;
 	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
 
 	i = rx_ring->next_to_clean;
@@ -904,7 +888,7 @@
 
 		next_buffer = &rx_ring->buffer_info[i];
 
-		cleaned = 1;
+		cleaned = true;
 		cleaned_count++;
 		dma_unmap_single(&pdev->dev,
 				 buffer_info->dma,
@@ -1030,6 +1014,7 @@
 	struct e1000_adapter *adapter = container_of(work,
 	                                             struct e1000_adapter,
 	                                             print_hang_task);
+	struct net_device *netdev = adapter->netdev;
 	struct e1000_ring *tx_ring = adapter->tx_ring;
 	unsigned int i = tx_ring->next_to_clean;
 	unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
@@ -1041,6 +1026,21 @@
 	if (test_bit(__E1000_DOWN, &adapter->state))
 		return;
 
+	if (!adapter->tx_hang_recheck &&
+	    (adapter->flags2 & FLAG2_DMA_BURST)) {
+		/* May be block on write-back, flush and detect again
+		 * flush pending descriptor writebacks to memory
+		 */
+		ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
+		/* execute the writes immediately */
+		e1e_flush();
+		adapter->tx_hang_recheck = true;
+		return;
+	}
+	/* Real hang detected */
+	adapter->tx_hang_recheck = false;
+	netif_stop_queue(netdev);
+
 	e1e_rphy(hw, PHY_STATUS, &phy_status);
 	e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status);
 	e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status);
@@ -1095,6 +1095,7 @@
 	unsigned int i, eop;
 	unsigned int count = 0;
 	unsigned int total_tx_bytes = 0, total_tx_packets = 0;
+	unsigned int bytes_compl = 0, pkts_compl = 0;
 
 	i = tx_ring->next_to_clean;
 	eop = tx_ring->buffer_info[i].next_to_watch;
@@ -1112,6 +1113,10 @@
 			if (cleaned) {
 				total_tx_packets += buffer_info->segs;
 				total_tx_bytes += buffer_info->bytecount;
+				if (buffer_info->skb) {
+					bytes_compl += buffer_info->skb->len;
+					pkts_compl++;
+				}
 			}
 
 			e1000_put_txbuf(adapter, buffer_info);
@@ -1130,6 +1135,8 @@
 
 	tx_ring->next_to_clean = i;
 
+	netdev_completed_queue(netdev, pkts_compl, bytes_compl);
+
 #define TX_WAKE_THRESHOLD 32
 	if (count && netif_carrier_ok(netdev) &&
 	    e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) {
@@ -1150,14 +1157,14 @@
 		 * Detect a transmit hang in hardware, this serializes the
 		 * check with the clearing of time_stamp and movement of i
 		 */
-		adapter->detect_tx_hung = 0;
+		adapter->detect_tx_hung = false;
 		if (tx_ring->buffer_info[i].time_stamp &&
 		    time_after(jiffies, tx_ring->buffer_info[i].time_stamp
 			       + (adapter->tx_timeout_factor * HZ)) &&
-		    !(er32(STATUS) & E1000_STATUS_TXOFF)) {
+		    !(er32(STATUS) & E1000_STATUS_TXOFF))
 			schedule_work(&adapter->print_hang_task);
-			netif_stop_queue(netdev);
-		}
+		else
+			adapter->tx_hang_recheck = false;
 	}
 	adapter->total_tx_bytes += total_tx_bytes;
 	adapter->total_tx_packets += total_tx_packets;
@@ -1185,7 +1192,7 @@
 	unsigned int i, j;
 	u32 length, staterr;
 	int cleaned_count = 0;
-	bool cleaned = 0;
+	bool cleaned = false;
 	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
 
 	i = rx_ring->next_to_clean;
@@ -1211,7 +1218,7 @@
 
 		next_buffer = &rx_ring->buffer_info[i];
 
-		cleaned = 1;
+		cleaned = true;
 		cleaned_count++;
 		dma_unmap_single(&pdev->dev, buffer_info->dma,
 				 adapter->rx_ps_bsize0, DMA_FROM_DEVICE);
@@ -1222,8 +1229,7 @@
 			adapter->flags2 |= FLAG2_IS_DISCARDING;
 
 		if (adapter->flags2 & FLAG2_IS_DISCARDING) {
-			e_dbg("Packet Split buffers didn't pick up the full "
-			      "packet\n");
+			e_dbg("Packet Split buffers didn't pick up the full packet\n");
 			dev_kfree_skb_irq(skb);
 			if (staterr & E1000_RXD_STAT_EOP)
 				adapter->flags2 &= ~FLAG2_IS_DISCARDING;
@@ -1238,8 +1244,7 @@
 		length = le16_to_cpu(rx_desc->wb.middle.length0);
 
 		if (!length) {
-			e_dbg("Last part of the packet spanning multiple "
-			      "descriptors\n");
+			e_dbg("Last part of the packet spanning multiple descriptors\n");
 			dev_kfree_skb_irq(skb);
 			goto next_desc;
 		}
@@ -1917,8 +1922,7 @@
 					return;
 			}
 			/* MSI-X failed, so fall through and try MSI */
-			e_err("Failed to initialize MSI-X interrupts.  "
-			      "Falling back to MSI interrupts.\n");
+			e_err("Failed to initialize MSI-X interrupts.  Falling back to MSI interrupts.\n");
 			e1000e_reset_interrupt_capability(adapter);
 		}
 		adapter->int_mode = E1000E_INT_MODE_MSI;
@@ -1928,8 +1932,7 @@
 			adapter->flags |= FLAG_MSI_ENABLED;
 		} else {
 			adapter->int_mode = E1000E_INT_MODE_LEGACY;
-			e_err("Failed to initialize MSI interrupts.  Falling "
-			      "back to legacy interrupts.\n");
+			e_err("Failed to initialize MSI interrupts.  Falling back to legacy interrupts.\n");
 		}
 		/* Fall through */
 	case E1000E_INT_MODE_LEGACY:
@@ -2260,6 +2263,7 @@
 		e1000_put_txbuf(adapter, buffer_info);
 	}
 
+	netdev_reset_queue(adapter->netdev);
 	size = sizeof(struct e1000_buffer) * tx_ring->count;
 	memset(tx_ring->buffer_info, 0, size);
 
@@ -2518,7 +2522,7 @@
 	return work_done;
 }
 
-static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
 {
 	struct e1000_adapter *adapter = netdev_priv(netdev);
 	struct e1000_hw *hw = &adapter->hw;
@@ -2528,7 +2532,7 @@
 	if ((adapter->hw.mng_cookie.status &
 	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
 	    (vid == adapter->mng_vlan_id))
-		return;
+		return 0;
 
 	/* add VID to filter table */
 	if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
@@ -2539,9 +2543,11 @@
 	}
 
 	set_bit(vid, adapter->active_vlans);
+
+	return 0;
 }
 
-static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
 {
 	struct e1000_adapter *adapter = netdev_priv(netdev);
 	struct e1000_hw *hw = &adapter->hw;
@@ -2552,7 +2558,7 @@
 	    (vid == adapter->mng_vlan_id)) {
 		/* release control to f/w */
 		e1000e_release_hw_control(adapter);
-		return;
+		return 0;
 	}
 
 	/* remove VID from filter table */
@@ -2564,6 +2570,8 @@
 	}
 
 	clear_bit(vid, adapter->active_vlans);
+
+	return 0;
 }
 
 /**
@@ -3113,79 +3121,147 @@
 }
 
 /**
- *  e1000_update_mc_addr_list - Update Multicast addresses
- *  @hw: pointer to the HW structure
- *  @mc_addr_list: array of multicast addresses to program
- *  @mc_addr_count: number of multicast addresses to program
- *
- *  Updates the Multicast Table Array.
- *  The caller must have a packed mc_addr_list of multicast addresses.
- **/
-static void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list,
-				      u32 mc_addr_count)
-{
-	hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, mc_addr_count);
-}
-
-/**
- * e1000_set_multi - Multicast and Promiscuous mode set
+ * e1000e_write_mc_addr_list - write multicast addresses to MTA
  * @netdev: network interface device structure
  *
- * The set_multi entry point is called whenever the multicast address
- * list or the network interface flags are updated.  This routine is
- * responsible for configuring the hardware for proper multicast,
- * promiscuous mode, and all-multi behavior.
- **/
-static void e1000_set_multi(struct net_device *netdev)
+ * Writes multicast address list to the MTA hash table.
+ * Returns: -ENOMEM on failure
+ *                0 on no addresses written
+ *                X on writing X addresses to MTA
+ */
+static int e1000e_write_mc_addr_list(struct net_device *netdev)
 {
 	struct e1000_adapter *adapter = netdev_priv(netdev);
 	struct e1000_hw *hw = &adapter->hw;
 	struct netdev_hw_addr *ha;
-	u8  *mta_list;
+	u8 *mta_list;
+	int i;
+
+	if (netdev_mc_empty(netdev)) {
+		/* nothing to program, so clear mc list */
+		hw->mac.ops.update_mc_addr_list(hw, NULL, 0);
+		return 0;
+	}
+
+	mta_list = kzalloc(netdev_mc_count(netdev) * ETH_ALEN, GFP_ATOMIC);
+	if (!mta_list)
+		return -ENOMEM;
+
+	/* update_mc_addr_list expects a packed array of only addresses. */
+	i = 0;
+	netdev_for_each_mc_addr(ha, netdev)
+		memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
+
+	hw->mac.ops.update_mc_addr_list(hw, mta_list, i);
+	kfree(mta_list);
+
+	return netdev_mc_count(netdev);
+}
+
+/**
+ * e1000e_write_uc_addr_list - write unicast addresses to RAR table
+ * @netdev: network interface device structure
+ *
+ * Writes unicast address list to the RAR table.
+ * Returns: -ENOMEM on failure/insufficient address space
+ *                0 on no addresses written
+ *                X on writing X addresses to the RAR table
+ **/
+static int e1000e_write_uc_addr_list(struct net_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	unsigned int rar_entries = hw->mac.rar_entry_count;
+	int count = 0;
+
+	/* save a rar entry for our hardware address */
+	rar_entries--;
+
+	/* save a rar entry for the LAA workaround */
+	if (adapter->flags & FLAG_RESET_OVERWRITES_LAA)
+		rar_entries--;
+
+	/* return ENOMEM indicating insufficient memory for addresses */
+	if (netdev_uc_count(netdev) > rar_entries)
+		return -ENOMEM;
+
+	if (!netdev_uc_empty(netdev) && rar_entries) {
+		struct netdev_hw_addr *ha;
+
+		/*
+		 * write the addresses in reverse order to avoid write
+		 * combining
+		 */
+		netdev_for_each_uc_addr(ha, netdev) {
+			if (!rar_entries)
+				break;
+			e1000e_rar_set(hw, ha->addr, rar_entries--);
+			count++;
+		}
+	}
+
+	/* zero out the remaining RAR entries not used above */
+	for (; rar_entries > 0; rar_entries--) {
+		ew32(RAH(rar_entries), 0);
+		ew32(RAL(rar_entries), 0);
+	}
+	e1e_flush();
+
+	return count;
+}
+
+/**
+ * e1000e_set_rx_mode - secondary unicast, Multicast and Promiscuous mode set
+ * @netdev: network interface device structure
+ *
+ * The ndo_set_rx_mode entry point is called whenever the unicast or multicast
+ * address list or the network interface flags are updated.  This routine is
+ * responsible for configuring the hardware for proper unicast, multicast,
+ * promiscuous mode, and all-multi behavior.
+ **/
+static void e1000e_set_rx_mode(struct net_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
 	u32 rctl;
 
 	/* Check for Promiscuous and All Multicast modes */
-
 	rctl = er32(RCTL);
 
+	/* clear the affected bits */
+	rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
+
 	if (netdev->flags & IFF_PROMISC) {
 		rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
-		rctl &= ~E1000_RCTL_VFE;
 		/* Do not hardware filter VLANs in promisc mode */
 		e1000e_vlan_filter_disable(adapter);
 	} else {
+		int count;
 		if (netdev->flags & IFF_ALLMULTI) {
 			rctl |= E1000_RCTL_MPE;
-			rctl &= ~E1000_RCTL_UPE;
 		} else {
-			rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
+			/*
+			 * Write addresses to the MTA, if the attempt fails
+			 * then we should just turn on promiscuous mode so
+			 * that we can at least receive multicast traffic
+			 */
+			count = e1000e_write_mc_addr_list(netdev);
+			if (count < 0)
+				rctl |= E1000_RCTL_MPE;
 		}
 		e1000e_vlan_filter_enable(adapter);
+		/*
+		 * Write addresses to available RAR registers, if there is not
+		 * sufficient space to store all the addresses then enable
+		 * unicast promiscuous mode
+		 */
+		count = e1000e_write_uc_addr_list(netdev);
+		if (count < 0)
+			rctl |= E1000_RCTL_UPE;
 	}
 
 	ew32(RCTL, rctl);
 
-	if (!netdev_mc_empty(netdev)) {
-		int i = 0;
-
-		mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
-		if (!mta_list)
-			return;
-
-		/* prepare a packed array of only addresses. */
-		netdev_for_each_mc_addr(ha, netdev)
-			memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
-
-		e1000_update_mc_addr_list(hw, mta_list, i);
-		kfree(mta_list);
-	} else {
-		/*
-		 * if we're called from probe, we might not have
-		 * anything to do here, so clear out the list
-		 */
-		e1000_update_mc_addr_list(hw, NULL, 0);
-	}
-
 	if (netdev->features & NETIF_F_HW_VLAN_RX)
 		e1000e_vlan_strip_enable(adapter);
 	else
@@ -3198,7 +3274,7 @@
  **/
 static void e1000_configure(struct e1000_adapter *adapter)
 {
-	e1000_set_multi(adapter->netdev);
+	e1000e_set_rx_mode(adapter->netdev);
 
 	e1000_restore_vlan(adapter);
 	e1000_init_manageability_pt(adapter);
@@ -3444,7 +3520,6 @@
 
 	clear_bit(__E1000_DOWN, &adapter->state);
 
-	napi_enable(&adapter->napi);
 	if (adapter->msix_entries)
 		e1000_configure_msix(adapter);
 	e1000_irq_enable(adapter);
@@ -3506,7 +3581,6 @@
 	e1e_flush();
 	usleep_range(10000, 20000);
 
-	napi_disable(&adapter->napi);
 	e1000_irq_disable(adapter);
 
 	del_timer_sync(&adapter->watchdog_timer);
@@ -3782,6 +3856,7 @@
 
 	e1000_irq_enable(adapter);
 
+	adapter->tx_hang_recheck = false;
 	netif_start_queue(netdev);
 
 	adapter->idle_check = true;
@@ -3828,6 +3903,8 @@
 
 	pm_runtime_get_sync(&pdev->dev);
 
+	napi_disable(&adapter->napi);
+
 	if (!test_bit(__E1000_DOWN, &adapter->state)) {
 		e1000e_down(adapter);
 		e1000_free_irq(adapter);
@@ -4168,22 +4245,19 @@
 	u32 ctrl = er32(CTRL);
 
 	/* Link status message must follow this format for user tools */
-	printk(KERN_INFO "e1000e: %s NIC Link is Up %d Mbps %s, "
-	       "Flow Control: %s\n",
-	       adapter->netdev->name,
-	       adapter->link_speed,
-	       (adapter->link_duplex == FULL_DUPLEX) ?
-	       "Full Duplex" : "Half Duplex",
-	       ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ?
-	       "Rx/Tx" :
-	       ((ctrl & E1000_CTRL_RFCE) ? "Rx" :
-		((ctrl & E1000_CTRL_TFCE) ? "Tx" : "None")));
+	printk(KERN_INFO "e1000e: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
+		adapter->netdev->name,
+		adapter->link_speed,
+		adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half",
+		(ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE) ? "Rx/Tx" :
+		(ctrl & E1000_CTRL_RFCE) ? "Rx" :
+		(ctrl & E1000_CTRL_TFCE) ? "Tx" : "None");
 }
 
 static bool e1000e_has_link(struct e1000_adapter *adapter)
 {
 	struct e1000_hw *hw = &adapter->hw;
-	bool link_active = 0;
+	bool link_active = false;
 	s32 ret_val = 0;
 
 	/*
@@ -4198,7 +4272,7 @@
 			ret_val = hw->mac.ops.check_for_link(hw);
 			link_active = !hw->mac.get_link_status;
 		} else {
-			link_active = 1;
+			link_active = true;
 		}
 		break;
 	case e1000_media_type_fiber:
@@ -4297,7 +4371,7 @@
 
 	if (link) {
 		if (!netif_carrier_ok(netdev)) {
-			bool txb2b = 1;
+			bool txb2b = true;
 
 			/* Cancel scheduled suspend requests. */
 			pm_runtime_resume(netdev->dev.parent);
@@ -4323,21 +4397,18 @@
 				e1e_rphy(hw, PHY_AUTONEG_EXP, &autoneg_exp);
 
 				if (!(autoneg_exp & NWAY_ER_LP_NWAY_CAPS))
-					e_info("Autonegotiated half duplex but"
-					       " link partner cannot autoneg. "
-					       " Try forcing full duplex if "
-					       "link gets many collisions.\n");
+					e_info("Autonegotiated half duplex but link partner cannot autoneg.  Try forcing full duplex if link gets many collisions.\n");
 			}
 
 			/* adjust timeout factor according to speed/duplex */
 			adapter->tx_timeout_factor = 1;
 			switch (adapter->link_speed) {
 			case SPEED_10:
-				txb2b = 0;
+				txb2b = false;
 				adapter->tx_timeout_factor = 16;
 				break;
 			case SPEED_100:
-				txb2b = 0;
+				txb2b = false;
 				adapter->tx_timeout_factor = 10;
 				break;
 			}
@@ -4473,7 +4544,7 @@
 	e1000e_flush_descriptors(adapter);
 
 	/* Force detection of hung controller every watchdog period */
-	adapter->detect_tx_hung = 1;
+	adapter->detect_tx_hung = true;
 
 	/*
 	 * With 82571 controllers, LAA may be overwritten due to controller
@@ -4985,6 +5056,7 @@
 	/* if count is 0 then mapping error has occurred */
 	count = e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss);
 	if (count) {
+		netdev_sent_queue(netdev, skb->len);
 		e1000_tx_queue(adapter, tx_flags, count);
 		/* Make sure there is space in the ring for the next send. */
 		e1000_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 2);
@@ -5110,8 +5182,7 @@
 	if ((adapter->hw.mac.type == e1000_pch2lan) &&
 	    !(adapter->flags2 & FLAG2_CRC_STRIPPING) &&
 	    (new_mtu > ETH_DATA_LEN)) {
-		e_err("Jumbo Frames not supported on 82579 when CRC "
-		      "stripping is disabled.\n");
+		e_err("Jumbo Frames not supported on 82579 when CRC stripping is disabled.\n");
 		return -EINVAL;
 	}
 
@@ -5331,7 +5402,7 @@
 
 	if (wufc) {
 		e1000_setup_rctl(adapter);
-		e1000_set_multi(netdev);
+		e1000e_set_rx_mode(netdev);
 
 		/* turn on all-multi mode if wake on multicast is enabled */
 		if (wufc & E1000_WUFC_MC) {
@@ -5527,8 +5598,8 @@
 				phy_data & E1000_WUS_MC ? "Multicast Packet" :
 				phy_data & E1000_WUS_BC ? "Broadcast Packet" :
 				phy_data & E1000_WUS_MAG ? "Magic Packet" :
-				phy_data & E1000_WUS_LNKC ? "Link Status "
-				" Change" : "other");
+				phy_data & E1000_WUS_LNKC ?
+				"Link Status Change" : "other");
 		}
 		e1e_wphy(&adapter->hw, BM_WUS, ~0);
 	} else {
@@ -5859,10 +5930,11 @@
 	}
 }
 
-static int e1000_set_features(struct net_device *netdev, u32 features)
+static int e1000_set_features(struct net_device *netdev,
+	netdev_features_t features)
 {
 	struct e1000_adapter *adapter = netdev_priv(netdev);
-	u32 changed = features ^ netdev->features;
+	netdev_features_t changed = features ^ netdev->features;
 
 	if (changed & (NETIF_F_TSO | NETIF_F_TSO6))
 		adapter->flags |= FLAG_TSO_FORCE;
@@ -5884,7 +5956,7 @@
 	.ndo_stop		= e1000_close,
 	.ndo_start_xmit		= e1000_xmit_frame,
 	.ndo_get_stats64	= e1000e_get_stats64,
-	.ndo_set_rx_mode	= e1000_set_multi,
+	.ndo_set_rx_mode	= e1000e_set_rx_mode,
 	.ndo_set_mac_address	= e1000_set_mac,
 	.ndo_change_mtu		= e1000_change_mtu,
 	.ndo_do_ioctl		= e1000_ioctl,
@@ -5949,8 +6021,7 @@
 			err = dma_set_coherent_mask(&pdev->dev,
 						    DMA_BIT_MASK(32));
 			if (err) {
-				dev_err(&pdev->dev, "No usable DMA "
-					"configuration, aborting\n");
+				dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
 				goto err_dma;
 			}
 		}
@@ -6076,6 +6147,8 @@
 				  NETIF_F_TSO6 |
 				  NETIF_F_HW_CSUM);
 
+	netdev->priv_flags |= IFF_UNICAST_FLT;
+
 	if (pci_using_dac) {
 		netdev->features |= NETIF_F_HIGHDMA;
 		netdev->vlan_features |= NETIF_F_HIGHDMA;
@@ -6135,7 +6208,7 @@
 
 	/* Initialize link parameters. User can change them with ethtool */
 	adapter->hw.mac.autoneg = 1;
-	adapter->fc_autoneg = 1;
+	adapter->fc_autoneg = true;
 	adapter->hw.fc.requested_mode = e1000_fc_default;
 	adapter->hw.fc.current_mode = e1000_fc_default;
 	adapter->hw.phy.autoneg_advertised = 0x2f;
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 7881fb9..b8e20f0 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -29,6 +29,8 @@
  * e1000_82576
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/types.h>
 #include <linux/if_ether.h>
 
@@ -244,8 +246,7 @@
 	 * Check for invalid size
 	 */
 	if ((hw->mac.type == e1000_82576) && (size > 15)) {
-		printk("igb: The NVM size is not valid, "
-			"defaulting to 32K.\n");
+		pr_notice("The NVM size is not valid, defaulting to 32K\n");
 		size = 15;
 	}
 	nvm->word_size = 1 << size;
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index c69feeb..3d12e67 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -447,4 +447,9 @@
 	return 0;
 }
 
+static inline struct netdev_queue *txring_txq(const struct igb_ring *tx_ring)
+{
+	return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index);
+}
+
 #endif /* _IGB_H_ */
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 43873eb..7998bf4 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -36,6 +36,7 @@
 #include <linux/ethtool.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
+#include <linux/pm_runtime.h>
 
 #include "igb.h"
 
@@ -148,7 +149,8 @@
 				   SUPPORTED_1000baseT_Full|
 				   SUPPORTED_Autoneg |
 				   SUPPORTED_TP);
-		ecmd->advertising = ADVERTISED_TP;
+		ecmd->advertising = (ADVERTISED_TP |
+				     ADVERTISED_Pause);
 
 		if (hw->mac.autoneg == 1) {
 			ecmd->advertising |= ADVERTISED_Autoneg;
@@ -165,7 +167,8 @@
 
 		ecmd->advertising = (ADVERTISED_1000baseT_Full |
 				     ADVERTISED_FIBRE |
-				     ADVERTISED_Autoneg);
+				     ADVERTISED_Autoneg |
+				     ADVERTISED_Pause);
 
 		ecmd->port = PORT_FIBRE;
 	}
@@ -673,25 +676,22 @@
 			    struct ethtool_drvinfo *drvinfo)
 {
 	struct igb_adapter *adapter = netdev_priv(netdev);
-	char firmware_version[32];
 	u16 eeprom_data;
 
-	strncpy(drvinfo->driver,  igb_driver_name, sizeof(drvinfo->driver) - 1);
-	strncpy(drvinfo->version, igb_driver_version,
-		sizeof(drvinfo->version) - 1);
+	strlcpy(drvinfo->driver,  igb_driver_name, sizeof(drvinfo->driver));
+	strlcpy(drvinfo->version, igb_driver_version, sizeof(drvinfo->version));
 
 	/* EEPROM image version # is reported as firmware version # for
 	 * 82575 controllers */
 	adapter->hw.nvm.ops.read(&adapter->hw, 5, 1, &eeprom_data);
-	sprintf(firmware_version, "%d.%d-%d",
+	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+		"%d.%d-%d",
 		(eeprom_data & 0xF000) >> 12,
 		(eeprom_data & 0x0FF0) >> 4,
 		eeprom_data & 0x000F);
 
-	strncpy(drvinfo->fw_version, firmware_version,
-		sizeof(drvinfo->fw_version) - 1);
-	strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
-		sizeof(drvinfo->bus_info) - 1);
+	strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+		sizeof(drvinfo->bus_info));
 	drvinfo->n_stats = IGB_STATS_LEN;
 	drvinfo->testinfo_len = IGB_TEST_LEN;
 	drvinfo->regdump_len = igb_get_regs_len(netdev);
@@ -2162,6 +2162,19 @@
 	}
 }
 
+static int igb_ethtool_begin(struct net_device *netdev)
+{
+	struct igb_adapter *adapter = netdev_priv(netdev);
+	pm_runtime_get_sync(&adapter->pdev->dev);
+	return 0;
+}
+
+static void igb_ethtool_complete(struct net_device *netdev)
+{
+	struct igb_adapter *adapter = netdev_priv(netdev);
+	pm_runtime_put(&adapter->pdev->dev);
+}
+
 static const struct ethtool_ops igb_ethtool_ops = {
 	.get_settings           = igb_get_settings,
 	.set_settings           = igb_set_settings,
@@ -2188,6 +2201,8 @@
 	.get_ethtool_stats      = igb_get_ethtool_stats,
 	.get_coalesce           = igb_get_coalesce,
 	.set_coalesce           = igb_set_coalesce,
+	.begin			= igb_ethtool_begin,
+	.complete		= igb_ethtool_complete,
 };
 
 void igb_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index ced5444..01e5e89 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -25,6 +25,8 @@
 
 *******************************************************************************/
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/types.h>
 #include <linux/init.h>
@@ -51,6 +53,7 @@
 #include <linux/if_ether.h>
 #include <linux/aer.h>
 #include <linux/prefetch.h>
+#include <linux/pm_runtime.h>
 #ifdef CONFIG_IGB_DCA
 #include <linux/dca.h>
 #endif
@@ -145,9 +148,9 @@
 static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
 static void igb_tx_timeout(struct net_device *);
 static void igb_reset_task(struct work_struct *);
-static void igb_vlan_mode(struct net_device *netdev, u32 features);
-static void igb_vlan_rx_add_vid(struct net_device *, u16);
-static void igb_vlan_rx_kill_vid(struct net_device *, u16);
+static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features);
+static int igb_vlan_rx_add_vid(struct net_device *, u16);
+static int igb_vlan_rx_kill_vid(struct net_device *, u16);
 static void igb_restore_vlan(struct igb_adapter *);
 static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
 static void igb_ping_all_vfs(struct igb_adapter *);
@@ -170,8 +173,18 @@
 #endif
 
 #ifdef CONFIG_PM
-static int igb_suspend(struct pci_dev *, pm_message_t);
-static int igb_resume(struct pci_dev *);
+static int igb_suspend(struct device *);
+static int igb_resume(struct device *);
+#ifdef CONFIG_PM_RUNTIME
+static int igb_runtime_suspend(struct device *dev);
+static int igb_runtime_resume(struct device *dev);
+static int igb_runtime_idle(struct device *dev);
+#endif
+static const struct dev_pm_ops igb_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume)
+	SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume,
+			igb_runtime_idle)
+};
 #endif
 static void igb_shutdown(struct pci_dev *);
 #ifdef CONFIG_IGB_DCA
@@ -212,9 +225,7 @@
 	.probe    = igb_probe,
 	.remove   = __devexit_p(igb_remove),
 #ifdef CONFIG_PM
-	/* Power Management Hooks */
-	.suspend  = igb_suspend,
-	.resume   = igb_resume,
+	.driver.pm = &igb_pm_ops,
 #endif
 	.shutdown = igb_shutdown,
 	.err_handler = &igb_err_handler
@@ -325,16 +336,13 @@
 			regs[n] = rd32(E1000_TXDCTL(n));
 		break;
 	default:
-		printk(KERN_INFO "%-15s %08x\n",
-			reginfo->name, rd32(reginfo->ofs));
+		pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs));
 		return;
 	}
 
 	snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
-	printk(KERN_INFO "%-15s ", rname);
-	for (n = 0; n < 4; n++)
-		printk(KERN_CONT "%08x ", regs[n]);
-	printk(KERN_CONT "\n");
+	pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1],
+		regs[2], regs[3]);
 }
 
 /*
@@ -359,18 +367,15 @@
 	/* Print netdevice Info */
 	if (netdev) {
 		dev_info(&adapter->pdev->dev, "Net device Info\n");
-		printk(KERN_INFO "Device Name     state            "
-			"trans_start      last_rx\n");
-		printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
-		netdev->name,
-		netdev->state,
-		netdev->trans_start,
-		netdev->last_rx);
+		pr_info("Device Name     state            trans_start      "
+			"last_rx\n");
+		pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
+			netdev->state, netdev->trans_start, netdev->last_rx);
 	}
 
 	/* Print Registers */
 	dev_info(&adapter->pdev->dev, "Register Dump\n");
-	printk(KERN_INFO " Register Name   Value\n");
+	pr_info(" Register Name   Value\n");
 	for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
 	     reginfo->name; reginfo++) {
 		igb_regdump(hw, reginfo);
@@ -381,18 +386,17 @@
 		goto exit;
 
 	dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
-	printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma  ]"
-		" leng ntw timestamp\n");
+	pr_info("Queue [NTU] [NTC] [bi(ntc)->dma  ] leng ntw timestamp\n");
 	for (n = 0; n < adapter->num_tx_queues; n++) {
 		struct igb_tx_buffer *buffer_info;
 		tx_ring = adapter->tx_ring[n];
 		buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
-		printk(KERN_INFO " %5d %5X %5X %016llX %04X %p %016llX\n",
-			   n, tx_ring->next_to_use, tx_ring->next_to_clean,
-			   (u64)buffer_info->dma,
-			   buffer_info->length,
-			   buffer_info->next_to_watch,
-			   (u64)buffer_info->time_stamp);
+		pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
+			n, tx_ring->next_to_use, tx_ring->next_to_clean,
+			(u64)buffer_info->dma,
+			buffer_info->length,
+			buffer_info->next_to_watch,
+			(u64)buffer_info->time_stamp);
 	}
 
 	/* Print TX Rings */
@@ -414,36 +418,38 @@
 
 	for (n = 0; n < adapter->num_tx_queues; n++) {
 		tx_ring = adapter->tx_ring[n];
-		printk(KERN_INFO "------------------------------------\n");
-		printk(KERN_INFO "TX QUEUE INDEX = %d\n", tx_ring->queue_index);
-		printk(KERN_INFO "------------------------------------\n");
-		printk(KERN_INFO "T [desc]     [address 63:0  ] "
-			"[PlPOCIStDDM Ln] [bi->dma       ] "
-			"leng  ntw timestamp        bi->skb\n");
+		pr_info("------------------------------------\n");
+		pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
+		pr_info("------------------------------------\n");
+		pr_info("T [desc]     [address 63:0  ] [PlPOCIStDDM Ln] "
+			"[bi->dma       ] leng  ntw timestamp        "
+			"bi->skb\n");
 
 		for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
+			const char *next_desc;
 			struct igb_tx_buffer *buffer_info;
 			tx_desc = IGB_TX_DESC(tx_ring, i);
 			buffer_info = &tx_ring->tx_buffer_info[i];
 			u0 = (struct my_u0 *)tx_desc;
-			printk(KERN_INFO "T [0x%03X]    %016llX %016llX %016llX"
-				" %04X  %p %016llX %p", i,
+			if (i == tx_ring->next_to_use &&
+			    i == tx_ring->next_to_clean)
+				next_desc = " NTC/U";
+			else if (i == tx_ring->next_to_use)
+				next_desc = " NTU";
+			else if (i == tx_ring->next_to_clean)
+				next_desc = " NTC";
+			else
+				next_desc = "";
+
+			pr_info("T [0x%03X]    %016llX %016llX %016llX"
+				" %04X  %p %016llX %p%s\n", i,
 				le64_to_cpu(u0->a),
 				le64_to_cpu(u0->b),
 				(u64)buffer_info->dma,
 				buffer_info->length,
 				buffer_info->next_to_watch,
 				(u64)buffer_info->time_stamp,
-				buffer_info->skb);
-			if (i == tx_ring->next_to_use &&
-				i == tx_ring->next_to_clean)
-				printk(KERN_CONT " NTC/U\n");
-			else if (i == tx_ring->next_to_use)
-				printk(KERN_CONT " NTU\n");
-			else if (i == tx_ring->next_to_clean)
-				printk(KERN_CONT " NTC\n");
-			else
-				printk(KERN_CONT "\n");
+				buffer_info->skb, next_desc);
 
 			if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
 				print_hex_dump(KERN_INFO, "",
@@ -456,11 +462,11 @@
 	/* Print RX Rings Summary */
 rx_ring_summary:
 	dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
-	printk(KERN_INFO "Queue [NTU] [NTC]\n");
+	pr_info("Queue [NTU] [NTC]\n");
 	for (n = 0; n < adapter->num_rx_queues; n++) {
 		rx_ring = adapter->rx_ring[n];
-		printk(KERN_INFO " %5d %5X %5X\n", n,
-			   rx_ring->next_to_use, rx_ring->next_to_clean);
+		pr_info(" %5d %5X %5X\n",
+			n, rx_ring->next_to_use, rx_ring->next_to_clean);
 	}
 
 	/* Print RX Rings */
@@ -492,36 +498,43 @@
 
 	for (n = 0; n < adapter->num_rx_queues; n++) {
 		rx_ring = adapter->rx_ring[n];
-		printk(KERN_INFO "------------------------------------\n");
-		printk(KERN_INFO "RX QUEUE INDEX = %d\n", rx_ring->queue_index);
-		printk(KERN_INFO "------------------------------------\n");
-		printk(KERN_INFO "R  [desc]      [ PktBuf     A0] "
-			"[  HeadBuf   DD] [bi->dma       ] [bi->skb] "
-			"<-- Adv Rx Read format\n");
-		printk(KERN_INFO "RWB[desc]      [PcsmIpSHl PtRs] "
-			"[vl er S cks ln] ---------------- [bi->skb] "
-			"<-- Adv Rx Write-Back format\n");
+		pr_info("------------------------------------\n");
+		pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
+		pr_info("------------------------------------\n");
+		pr_info("R  [desc]      [ PktBuf     A0] [  HeadBuf   DD] "
+			"[bi->dma       ] [bi->skb] <-- Adv Rx Read format\n");
+		pr_info("RWB[desc]      [PcsmIpSHl PtRs] [vl er S cks ln] -----"
+			"----------- [bi->skb] <-- Adv Rx Write-Back format\n");
 
 		for (i = 0; i < rx_ring->count; i++) {
+			const char *next_desc;
 			struct igb_rx_buffer *buffer_info;
 			buffer_info = &rx_ring->rx_buffer_info[i];
 			rx_desc = IGB_RX_DESC(rx_ring, i);
 			u0 = (struct my_u0 *)rx_desc;
 			staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+
+			if (i == rx_ring->next_to_use)
+				next_desc = " NTU";
+			else if (i == rx_ring->next_to_clean)
+				next_desc = " NTC";
+			else
+				next_desc = "";
+
 			if (staterr & E1000_RXD_STAT_DD) {
 				/* Descriptor Done */
-				printk(KERN_INFO "RWB[0x%03X]     %016llX "
-					"%016llX ---------------- %p", i,
+				pr_info("%s[0x%03X]     %016llX %016llX -------"
+					"--------- %p%s\n", "RWB", i,
 					le64_to_cpu(u0->a),
 					le64_to_cpu(u0->b),
-					buffer_info->skb);
+					buffer_info->skb, next_desc);
 			} else {
-				printk(KERN_INFO "R  [0x%03X]     %016llX "
-					"%016llX %016llX %p", i,
+				pr_info("%s[0x%03X]     %016llX %016llX %016llX"
+					" %p%s\n", "R  ", i,
 					le64_to_cpu(u0->a),
 					le64_to_cpu(u0->b),
 					(u64)buffer_info->dma,
-					buffer_info->skb);
+					buffer_info->skb, next_desc);
 
 				if (netif_msg_pktdata(adapter)) {
 					print_hex_dump(KERN_INFO, "",
@@ -538,14 +551,6 @@
 					  PAGE_SIZE/2, true);
 				}
 			}
-
-			if (i == rx_ring->next_to_use)
-				printk(KERN_CONT " NTU\n");
-			else if (i == rx_ring->next_to_clean)
-				printk(KERN_CONT " NTC\n");
-			else
-				printk(KERN_CONT "\n");
-
 		}
 	}
 
@@ -599,10 +604,10 @@
 static int __init igb_init_module(void)
 {
 	int ret;
-	printk(KERN_INFO "%s - version %s\n",
+	pr_info("%s - version %s\n",
 	       igb_driver_string, igb_driver_version);
 
-	printk(KERN_INFO "%s\n", igb_copyright);
+	pr_info("%s\n", igb_copyright);
 
 #ifdef CONFIG_IGB_DCA
 	dca_register_notify(&dca_notifier);
@@ -1502,6 +1507,7 @@
 		igb_power_up_phy_copper(&adapter->hw);
 	else
 		igb_power_up_serdes_link_82575(&adapter->hw);
+	igb_reset_phy(&adapter->hw);
 }
 
 /**
@@ -1742,7 +1748,8 @@
 	igb_get_phy_info(hw);
 }
 
-static u32 igb_fix_features(struct net_device *netdev, u32 features)
+static netdev_features_t igb_fix_features(struct net_device *netdev,
+	netdev_features_t features)
 {
 	/*
 	 * Since there is no support for separate rx/tx vlan accel
@@ -1756,9 +1763,10 @@
 	return features;
 }
 
-static int igb_set_features(struct net_device *netdev, u32 features)
+static int igb_set_features(struct net_device *netdev,
+	netdev_features_t features)
 {
-	u32 changed = netdev->features ^ features;
+	netdev_features_t changed = netdev->features ^ features;
 
 	if (changed & NETIF_F_HW_VLAN_RX)
 		igb_vlan_mode(netdev, features);
@@ -2113,6 +2121,8 @@
 	default:
 		break;
 	}
+
+	pm_runtime_put_noidle(&pdev->dev);
 	return 0;
 
 err_register:
@@ -2152,6 +2162,8 @@
 	struct igb_adapter *adapter = netdev_priv(netdev);
 	struct e1000_hw *hw = &adapter->hw;
 
+	pm_runtime_get_noresume(&pdev->dev);
+
 	/*
 	 * The watchdog timer may be rescheduled, so explicitly
 	 * disable watchdog from being rescheduled.
@@ -2474,16 +2486,22 @@
  * handler is registered with the OS, the watchdog timer is started,
  * and the stack is notified that the interface is ready.
  **/
-static int igb_open(struct net_device *netdev)
+static int __igb_open(struct net_device *netdev, bool resuming)
 {
 	struct igb_adapter *adapter = netdev_priv(netdev);
 	struct e1000_hw *hw = &adapter->hw;
+	struct pci_dev *pdev = adapter->pdev;
 	int err;
 	int i;
 
 	/* disallow open during test */
-	if (test_bit(__IGB_TESTING, &adapter->state))
+	if (test_bit(__IGB_TESTING, &adapter->state)) {
+		WARN_ON(resuming);
 		return -EBUSY;
+	}
+
+	if (!resuming)
+		pm_runtime_get_sync(&pdev->dev);
 
 	netif_carrier_off(netdev);
 
@@ -2529,6 +2547,9 @@
 
 	netif_tx_start_all_queues(netdev);
 
+	if (!resuming)
+		pm_runtime_put(&pdev->dev);
+
 	/* start the watchdog. */
 	hw->mac.get_link_status = 1;
 	schedule_work(&adapter->watchdog_task);
@@ -2543,10 +2564,17 @@
 	igb_free_all_tx_resources(adapter);
 err_setup_tx:
 	igb_reset(adapter);
+	if (!resuming)
+		pm_runtime_put(&pdev->dev);
 
 	return err;
 }
 
+static int igb_open(struct net_device *netdev)
+{
+	return __igb_open(netdev, false);
+}
+
 /**
  * igb_close - Disables a network interface
  * @netdev: network interface device structure
@@ -2558,21 +2586,32 @@
  * needs to be disabled.  A global MAC reset is issued to stop the
  * hardware, and all transmit and receive resources are freed.
  **/
-static int igb_close(struct net_device *netdev)
+static int __igb_close(struct net_device *netdev, bool suspending)
 {
 	struct igb_adapter *adapter = netdev_priv(netdev);
+	struct pci_dev *pdev = adapter->pdev;
 
 	WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
-	igb_down(adapter);
 
+	if (!suspending)
+		pm_runtime_get_sync(&pdev->dev);
+
+	igb_down(adapter);
 	igb_free_irq(adapter);
 
 	igb_free_all_tx_resources(adapter);
 	igb_free_all_rx_resources(adapter);
 
+	if (!suspending)
+		pm_runtime_put_sync(&pdev->dev);
 	return 0;
 }
 
+static int igb_close(struct net_device *netdev)
+{
+	return __igb_close(netdev, false);
+}
+
 /**
  * igb_setup_tx_resources - allocate Tx resources (Descriptors)
  * @tx_ring: tx descriptor ring (for a specific queue) to setup
@@ -3203,6 +3242,7 @@
 		buffer_info = &tx_ring->tx_buffer_info[i];
 		igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
 	}
+	netdev_tx_reset_queue(txring_txq(tx_ring));
 
 	size = sizeof(struct igb_tx_buffer) * tx_ring->count;
 	memset(tx_ring->tx_buffer_info, 0, size);
@@ -3632,6 +3672,9 @@
 
 	link = igb_has_link(adapter);
 	if (link) {
+		/* Cancel scheduled suspend requests. */
+		pm_runtime_resume(netdev->dev.parent);
+
 		if (!netif_carrier_ok(netdev)) {
 			u32 ctrl;
 			hw->mac.ops.get_speed_and_duplex(hw,
@@ -3640,23 +3683,23 @@
 
 			ctrl = rd32(E1000_CTRL);
 			/* Links status message must follow this format */
-			printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, "
-				 "Flow Control: %s\n",
+			printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s "
+			       "Duplex, Flow Control: %s\n",
 			       netdev->name,
 			       adapter->link_speed,
 			       adapter->link_duplex == FULL_DUPLEX ?
-				 "Full Duplex" : "Half Duplex",
-			       ((ctrl & E1000_CTRL_TFCE) &&
-			        (ctrl & E1000_CTRL_RFCE)) ? "RX/TX" :
-			       ((ctrl & E1000_CTRL_RFCE) ?  "RX" :
-			       ((ctrl & E1000_CTRL_TFCE) ?  "TX" : "None")));
+			       "Full" : "Half",
+			       (ctrl & E1000_CTRL_TFCE) &&
+			       (ctrl & E1000_CTRL_RFCE) ? "RX/TX" :
+			       (ctrl & E1000_CTRL_RFCE) ?  "RX" :
+			       (ctrl & E1000_CTRL_TFCE) ?  "TX" : "None");
 
 			/* check for thermal sensor event */
-			if (igb_thermal_sensor_event(hw, E1000_THSTAT_LINK_THROTTLE)) {
-				printk(KERN_INFO "igb: %s The network adapter "
-						 "link speed was downshifted "
-						 "because it overheated.\n",
-						 netdev->name);
+			if (igb_thermal_sensor_event(hw,
+			    E1000_THSTAT_LINK_THROTTLE)) {
+				netdev_info(netdev, "The network adapter link "
+					    "speed was downshifted because it "
+					    "overheated\n");
 			}
 
 			/* adjust timeout factor according to speed/duplex */
@@ -3686,11 +3729,10 @@
 			adapter->link_duplex = 0;
 
 			/* check for thermal sensor event */
-			if (igb_thermal_sensor_event(hw, E1000_THSTAT_PWR_DOWN)) {
-				printk(KERN_ERR "igb: %s The network adapter "
-						"was stopped because it "
-						"overheated.\n",
-						netdev->name);
+			if (igb_thermal_sensor_event(hw,
+			    E1000_THSTAT_PWR_DOWN)) {
+				netdev_err(netdev, "The network adapter was "
+					   "stopped because it overheated\n");
 			}
 
 			/* Links status message must follow this format */
@@ -3704,6 +3746,9 @@
 			if (!test_bit(__IGB_DOWN, &adapter->state))
 				mod_timer(&adapter->phy_info_timer,
 					  round_jiffies(jiffies + 2 * HZ));
+
+			pm_schedule_suspend(netdev->dev.parent,
+					    MSEC_PER_SEC * 5);
 		}
 	}
 
@@ -4241,6 +4286,8 @@
 		frag++;
 	}
 
+	netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
+
 	/* write last descriptor with RS and EOP bits */
 	cmd_type |= cpu_to_le32(size) | cpu_to_le32(IGB_TXD_DCMD);
 	tx_desc->read.cmd_type_len = cmd_type;
@@ -5780,6 +5827,8 @@
 		}
 	}
 
+	netdev_tx_completed_queue(txring_txq(tx_ring),
+				  total_packets, total_bytes);
 	i += tx_ring->count;
 	tx_ring->next_to_clean = i;
 	u64_stats_update_begin(&tx_ring->tx_syncp);
@@ -6138,7 +6187,7 @@
 		return true;
 
 	if (!page) {
-		page = netdev_alloc_page(rx_ring->netdev);
+		page = alloc_page(GFP_ATOMIC | __GFP_COLD);
 		bi->page = page;
 		if (unlikely(!page)) {
 			rx_ring->rx_stats.alloc_failed++;
@@ -6467,7 +6516,7 @@
 	return 0;
 }
 
-static void igb_vlan_mode(struct net_device *netdev, u32 features)
+static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features)
 {
 	struct igb_adapter *adapter = netdev_priv(netdev);
 	struct e1000_hw *hw = &adapter->hw;
@@ -6494,7 +6543,7 @@
 	igb_rlpml_set(adapter);
 }
 
-static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+static int igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
 {
 	struct igb_adapter *adapter = netdev_priv(netdev);
 	struct e1000_hw *hw = &adapter->hw;
@@ -6507,9 +6556,11 @@
 	igb_vfta_set(hw, vid, true);
 
 	set_bit(vid, adapter->active_vlans);
+
+	return 0;
 }
 
-static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+static int igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
 {
 	struct igb_adapter *adapter = netdev_priv(netdev);
 	struct e1000_hw *hw = &adapter->hw;
@@ -6524,6 +6575,8 @@
 		igb_vfta_set(hw, vid, false);
 
 	clear_bit(vid, adapter->active_vlans);
+
+	return 0;
 }
 
 static void igb_restore_vlan(struct igb_adapter *adapter)
@@ -6582,13 +6635,14 @@
 	return -EINVAL;
 }
 
-static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
+static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
+			  bool runtime)
 {
 	struct net_device *netdev = pci_get_drvdata(pdev);
 	struct igb_adapter *adapter = netdev_priv(netdev);
 	struct e1000_hw *hw = &adapter->hw;
 	u32 ctrl, rctl, status;
-	u32 wufc = adapter->wol;
+	u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
 #ifdef CONFIG_PM
 	int retval = 0;
 #endif
@@ -6596,7 +6650,7 @@
 	netif_device_detach(netdev);
 
 	if (netif_running(netdev))
-		igb_close(netdev);
+		__igb_close(netdev, true);
 
 	igb_clear_interrupt_scheme(adapter);
 
@@ -6655,12 +6709,13 @@
 }
 
 #ifdef CONFIG_PM
-static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
+static int igb_suspend(struct device *dev)
 {
 	int retval;
 	bool wake;
+	struct pci_dev *pdev = to_pci_dev(dev);
 
-	retval = __igb_shutdown(pdev, &wake);
+	retval = __igb_shutdown(pdev, &wake, 0);
 	if (retval)
 		return retval;
 
@@ -6674,8 +6729,9 @@
 	return 0;
 }
 
-static int igb_resume(struct pci_dev *pdev)
+static int igb_resume(struct device *dev)
 {
+	struct pci_dev *pdev = to_pci_dev(dev);
 	struct net_device *netdev = pci_get_drvdata(pdev);
 	struct igb_adapter *adapter = netdev_priv(netdev);
 	struct e1000_hw *hw = &adapter->hw;
@@ -6696,7 +6752,18 @@
 	pci_enable_wake(pdev, PCI_D3hot, 0);
 	pci_enable_wake(pdev, PCI_D3cold, 0);
 
-	if (igb_init_interrupt_scheme(adapter)) {
+	if (!rtnl_is_locked()) {
+		/*
+		 * shut up ASSERT_RTNL() warning in
+		 * netif_set_real_num_tx/rx_queues.
+		 */
+		rtnl_lock();
+		err = igb_init_interrupt_scheme(adapter);
+		rtnl_unlock();
+	} else {
+		err = igb_init_interrupt_scheme(adapter);
+	}
+	if (err) {
 		dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
 		return -ENOMEM;
 	}
@@ -6709,23 +6776,61 @@
 
 	wr32(E1000_WUS, ~0);
 
-	if (netif_running(netdev)) {
-		err = igb_open(netdev);
+	if (netdev->flags & IFF_UP) {
+		err = __igb_open(netdev, true);
 		if (err)
 			return err;
 	}
 
 	netif_device_attach(netdev);
+	return 0;
+}
+
+#ifdef CONFIG_PM_RUNTIME
+static int igb_runtime_idle(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct igb_adapter *adapter = netdev_priv(netdev);
+
+	if (!igb_has_link(adapter))
+		pm_schedule_suspend(dev, MSEC_PER_SEC * 5);
+
+	return -EBUSY;
+}
+
+static int igb_runtime_suspend(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	int retval;
+	bool wake;
+
+	retval = __igb_shutdown(pdev, &wake, 1);
+	if (retval)
+		return retval;
+
+	if (wake) {
+		pci_prepare_to_sleep(pdev);
+	} else {
+		pci_wake_from_d3(pdev, false);
+		pci_set_power_state(pdev, PCI_D3hot);
+	}
 
 	return 0;
 }
+
+static int igb_runtime_resume(struct device *dev)
+{
+	return igb_resume(dev);
+}
+#endif /* CONFIG_PM_RUNTIME */
 #endif
 
 static void igb_shutdown(struct pci_dev *pdev)
 {
 	bool wake;
 
-	__igb_shutdown(pdev, &wake);
+	__igb_shutdown(pdev, &wake, 0);
 
 	if (system_state == SYSTEM_POWER_OFF) {
 		pci_wake_from_d3(pdev, wake);
@@ -7064,15 +7169,28 @@
 			wr32(E1000_DMCTXTH, 0);
 
 			/*
-			 * DMA Coalescing high water mark needs to be higher
-			 * than the RX threshold. set hwm to PBA -  2 * max
-			 * frame size
+			 * DMA Coalescing high water mark needs to be greater
+			 * than the Rx threshold. Set hwm to PBA - max frame
+			 * size in 16B units, capping it at PBA - 6KB.
 			 */
-			hwm = pba - (2 * adapter->max_frame_size);
+			hwm = 64 * pba - adapter->max_frame_size / 16;
+			if (hwm < 64 * (pba - 6))
+				hwm = 64 * (pba - 6);
+			reg = rd32(E1000_FCRTC);
+			reg &= ~E1000_FCRTC_RTH_COAL_MASK;
+			reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT)
+				& E1000_FCRTC_RTH_COAL_MASK);
+			wr32(E1000_FCRTC, reg);
+
+			/*
+			 * Set the DMA Coalescing Rx threshold to PBA - 2 * max
+			 * frame size, capping it at PBA - 10KB.
+			 */
+			dmac_thr = pba - adapter->max_frame_size / 512;
+			if (dmac_thr < pba - 10)
+				dmac_thr = pba - 10;
 			reg = rd32(E1000_DMACR);
 			reg &= ~E1000_DMACR_DMACTHR_MASK;
-			dmac_thr = pba - 4;
-
 			reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT)
 				& E1000_DMACR_DMACTHR_MASK);
 
@@ -7088,7 +7206,6 @@
 			 * coalescing(smart fifb)-UTRESH=0
 			 */
 			wr32(E1000_DMCRTRH, 0);
-			wr32(E1000_FCRTC, hwm);
 
 			reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4);
 
diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c
index 2c25858..7b600a1 100644
--- a/drivers/net/ethernet/intel/igbvf/ethtool.c
+++ b/drivers/net/ethernet/intel/igbvf/ethtool.c
@@ -191,12 +191,12 @@
                               struct ethtool_drvinfo *drvinfo)
 {
 	struct igbvf_adapter *adapter = netdev_priv(netdev);
-	char firmware_version[32] = "N/A";
 
-	strncpy(drvinfo->driver,  igbvf_driver_name, 32);
-	strncpy(drvinfo->version, igbvf_driver_version, 32);
-	strncpy(drvinfo->fw_version, firmware_version, 32);
-	strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+	strlcpy(drvinfo->driver,  igbvf_driver_name, sizeof(drvinfo->driver));
+	strlcpy(drvinfo->version, igbvf_driver_version,
+		sizeof(drvinfo->version));
+	strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+		sizeof(drvinfo->bus_info));
 	drvinfo->regdump_len = igbvf_get_regs_len(netdev);
 	drvinfo->eedump_len = igbvf_get_eeprom_len(netdev);
 }
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index cca7812..fd3da30 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -25,6 +25,8 @@
 
 *******************************************************************************/
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/types.h>
 #include <linux/init.h>
@@ -1174,18 +1176,20 @@
 	e1000_rlpml_set_vf(hw, max_frame_size);
 }
 
-static void igbvf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+static int igbvf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
 {
 	struct igbvf_adapter *adapter = netdev_priv(netdev);
 	struct e1000_hw *hw = &adapter->hw;
 
-	if (hw->mac.ops.set_vfta(hw, vid, true))
+	if (hw->mac.ops.set_vfta(hw, vid, true)) {
 		dev_err(&adapter->pdev->dev, "Failed to add vlan id %d\n", vid);
-	else
-		set_bit(vid, adapter->active_vlans);
+		return -EINVAL;
+	}
+	set_bit(vid, adapter->active_vlans);
+	return 0;
 }
 
-static void igbvf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+static int igbvf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
 {
 	struct igbvf_adapter *adapter = netdev_priv(netdev);
 	struct e1000_hw *hw = &adapter->hw;
@@ -1195,11 +1199,13 @@
 	if (!test_bit(__IGBVF_DOWN, &adapter->state))
 		igbvf_irq_enable(adapter);
 
-	if (hw->mac.ops.set_vfta(hw, vid, false))
+	if (hw->mac.ops.set_vfta(hw, vid, false)) {
 		dev_err(&adapter->pdev->dev,
 		        "Failed to remove vlan id %d\n", vid);
-	else
-		clear_bit(vid, adapter->active_vlans);
+		return -EINVAL;
+	}
+	clear_bit(vid, adapter->active_vlans);
+	return 0;
 }
 
 static void igbvf_restore_vlan(struct igbvf_adapter *adapter)
@@ -1746,10 +1752,9 @@
 
 static void igbvf_print_link_info(struct igbvf_adapter *adapter)
 {
-	dev_info(&adapter->pdev->dev, "Link is Up %d Mbps %s\n",
-	         adapter->link_speed,
-	         ((adapter->link_duplex == FULL_DUPLEX) ?
-	          "Full Duplex" : "Half Duplex"));
+	dev_info(&adapter->pdev->dev, "Link is Up %d Mbps %s Duplex\n",
+		 adapter->link_speed,
+		 adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half");
 }
 
 static bool igbvf_has_link(struct igbvf_adapter *adapter)
@@ -2532,7 +2537,8 @@
 	dev_info(&pdev->dev, "Address: %pM\n", netdev->dev_addr);
 }
 
-static int igbvf_set_features(struct net_device *netdev, u32 features)
+static int igbvf_set_features(struct net_device *netdev,
+	netdev_features_t features)
 {
 	struct igbvf_adapter *adapter = netdev_priv(netdev);
 
@@ -2842,9 +2848,8 @@
 static int __init igbvf_init_module(void)
 {
 	int ret;
-	printk(KERN_INFO "%s - version %s\n",
-	       igbvf_driver_string, igbvf_driver_version);
-	printk(KERN_INFO "%s\n", igbvf_copyright);
+	pr_info("%s - version %s\n", igbvf_driver_string, igbvf_driver_version);
+	pr_info("%s\n", igbvf_copyright);
 
 	ret = pci_register_driver(&igbvf_driver);
 
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
index 9dfce7d..dbb7dd2 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
@@ -473,10 +473,12 @@
 {
 	struct ixgb_adapter *adapter = netdev_priv(netdev);
 
-	strncpy(drvinfo->driver,  ixgb_driver_name, 32);
-	strncpy(drvinfo->version, ixgb_driver_version, 32);
-	strncpy(drvinfo->fw_version, "N/A", 32);
-	strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+	strlcpy(drvinfo->driver,  ixgb_driver_name,
+		sizeof(drvinfo->driver));
+	strlcpy(drvinfo->version, ixgb_driver_version,
+		sizeof(drvinfo->version));
+	strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+		sizeof(drvinfo->bus_info));
 	drvinfo->n_stats = IXGB_STATS_LEN;
 	drvinfo->regdump_len = ixgb_get_regs_len(netdev);
 	drvinfo->eedump_len = ixgb_get_eeprom_len(netdev);
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index e21148f..9bd5faf 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -101,8 +101,8 @@
 
 static void ixgb_vlan_strip_enable(struct ixgb_adapter *adapter);
 static void ixgb_vlan_strip_disable(struct ixgb_adapter *adapter);
-static void ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
-static void ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
+static int ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
+static int ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
 static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
@@ -228,7 +228,7 @@
 	if (IXGB_READ_REG(&adapter->hw, STATUS) & IXGB_STATUS_PCIX_MODE) {
 		err = pci_enable_msi(adapter->pdev);
 		if (!err) {
-			adapter->have_msi = 1;
+			adapter->have_msi = true;
 			irq_flags = 0;
 		}
 		/* proceed to try to request regular interrupt */
@@ -325,8 +325,8 @@
 	}
 }
 
-static u32
-ixgb_fix_features(struct net_device *netdev, u32 features)
+static netdev_features_t
+ixgb_fix_features(struct net_device *netdev, netdev_features_t features)
 {
 	/*
 	 * Tx VLAN insertion does not work per HW design when Rx stripping is
@@ -339,10 +339,10 @@
 }
 
 static int
-ixgb_set_features(struct net_device *netdev, u32 features)
+ixgb_set_features(struct net_device *netdev, netdev_features_t features)
 {
 	struct ixgb_adapter *adapter = netdev_priv(netdev);
-	u32 changed = features ^ netdev->features;
+	netdev_features_t changed = features ^ netdev->features;
 
 	if (!(changed & (NETIF_F_RXCSUM|NETIF_F_HW_VLAN_RX)))
 		return 0;
@@ -2217,7 +2217,7 @@
 	IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
 }
 
-static void
+static int
 ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
 {
 	struct ixgb_adapter *adapter = netdev_priv(netdev);
@@ -2230,9 +2230,11 @@
 	vfta |= (1 << (vid & 0x1F));
 	ixgb_write_vfta(&adapter->hw, index, vfta);
 	set_bit(vid, adapter->active_vlans);
+
+	return 0;
 }
 
-static void
+static int
 ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
 {
 	struct ixgb_adapter *adapter = netdev_priv(netdev);
@@ -2245,6 +2247,8 @@
 	vfta &= ~(1 << (vid & 0x1F));
 	ixgb_write_vfta(&adapter->hw, index, vfta);
 	clear_bit(vid, adapter->active_vlans);
+
+	return 0;
 }
 
 static void
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index a8368d5..258164d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -560,6 +560,7 @@
 
 extern char ixgbe_driver_name[];
 extern const char ixgbe_driver_version[];
+extern char ixgbe_default_device_descr[];
 
 extern void ixgbe_up(struct ixgbe_adapter *adapter);
 extern void ixgbe_down(struct ixgbe_adapter *adapter);
@@ -627,6 +628,8 @@
 extern u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up);
 #endif /* CONFIG_IXGBE_DCB */
 extern int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type);
+extern int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
+				  struct netdev_fcoe_hbainfo *info);
 #endif /* IXGBE_FCOE */
 
 #endif /* _IXGBE_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 4ae26a7..7720721 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -356,6 +356,7 @@
 	case IXGBE_DEV_ID_82599_SFP_FCOE:
 	case IXGBE_DEV_ID_82599_SFP_EM:
 	case IXGBE_DEV_ID_82599_SFP_SF2:
+	case IXGBE_DEV_ID_82599_SFP_SF_QP:
 	case IXGBE_DEV_ID_82599EN_SFP:
 		media_type = ixgbe_media_type_fiber;
 		break;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index f1365fe..a3aa633 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -266,10 +266,10 @@
 	if (hw->mac.type == ixgbe_mac_X540) {
 		if (hw->phy.id == 0)
 			hw->phy.ops.identify(hw);
-		hw->phy.ops.read_reg(hw, 0x3, IXGBE_PCRC8ECL, &i);
-		hw->phy.ops.read_reg(hw, 0x3, IXGBE_PCRC8ECH, &i);
-		hw->phy.ops.read_reg(hw, 0x3, IXGBE_LDPCECL, &i);
-		hw->phy.ops.read_reg(hw, 0x3, IXGBE_LDPCECH, &i);
+		hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL, MDIO_MMD_PCS, &i);
+		hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH, MDIO_MMD_PCS, &i);
+		hw->phy.ops.read_reg(hw, IXGBE_LDPCECL, MDIO_MMD_PCS, &i);
+		hw->phy.ops.read_reg(hw, IXGBE_LDPCECH, MDIO_MMD_PCS, &i);
 	}
 
 	return 0;
@@ -2599,7 +2599,7 @@
 s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
 {
 	ixgbe_link_speed speed = 0;
-	bool link_up = 0;
+	bool link_up = false;
 	u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
 	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
 
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index 33b93ff..da31735 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -158,10 +158,6 @@
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
-	/* Abort a bad configuration */
-	if (ffs(up_map) > adapter->dcb_cfg.num_tcs.pg_tcs)
-		return;
-
 	if (prio != DCB_ATTR_VALUE_UNDEFINED)
 		adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type = prio;
 	if (bwg_id != DCB_ATTR_VALUE_UNDEFINED)
@@ -185,7 +181,7 @@
 
 	if (adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap !=
 	     adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap)
-		adapter->dcb_set_bitmap |= BIT_PFC;
+		adapter->dcb_set_bitmap |= BIT_PFC | BIT_APP_UPCHG;
 }
 
 static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
@@ -206,10 +202,6 @@
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
-	/* Abort bad configurations */
-	if (ffs(up_map) > adapter->dcb_cfg.num_tcs.pg_tcs)
-		return;
-
 	if (prio != DCB_ATTR_VALUE_UNDEFINED)
 		adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type = prio;
 	if (bwg_id != DCB_ATTR_VALUE_UNDEFINED)
@@ -309,6 +301,27 @@
 	*setting = adapter->dcb_cfg.tc_config[priority].dcb_pfc;
 }
 
+#ifdef IXGBE_FCOE
+static void ixgbe_dcbnl_devreset(struct net_device *dev)
+{
+	struct ixgbe_adapter *adapter = netdev_priv(dev);
+
+	while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
+		usleep_range(1000, 2000);
+
+	if (netif_running(dev))
+		dev->netdev_ops->ndo_stop(dev);
+
+	ixgbe_clear_interrupt_scheme(adapter);
+	ixgbe_init_interrupt_scheme(adapter);
+
+	if (netif_running(dev))
+		dev->netdev_ops->ndo_open(dev);
+
+	clear_bit(__IXGBE_RESETTING, &adapter->state);
+}
+#endif
+
 static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -338,27 +351,6 @@
 	if (ret)
 		return DCB_NO_HW_CHG;
 
-#ifdef IXGBE_FCOE
-	if (up && !(up & (1 << adapter->fcoe.up)))
-		adapter->dcb_set_bitmap |= BIT_APP_UPCHG;
-
-	/*
-	 * Only take down the adapter if an app change occurred. FCoE
-	 * may shuffle tx rings in this case and this can not be done
-	 * without a reset currently.
-	 */
-	if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
-		while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
-			usleep_range(1000, 2000);
-
-		adapter->fcoe.up = ffs(up) - 1;
-
-		if (netif_running(netdev))
-			netdev->netdev_ops->ndo_stop(netdev);
-		ixgbe_clear_interrupt_scheme(adapter);
-	}
-#endif
-
 	if (adapter->dcb_cfg.pfc_mode_enable) {
 		switch (adapter->hw.mac.type) {
 		case ixgbe_mac_82599EB:
@@ -385,15 +377,6 @@
 		}
 	}
 
-#ifdef IXGBE_FCOE
-	if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
-		ixgbe_init_interrupt_scheme(adapter);
-		if (netif_running(netdev))
-			netdev->netdev_ops->ndo_open(netdev);
-		ret = DCB_HW_CHG_RST;
-	}
-#endif
-
 	if (adapter->dcb_set_bitmap & (BIT_PG_TX|BIT_PG_RX)) {
 		u16 refill[MAX_TRAFFIC_CLASS], max[MAX_TRAFFIC_CLASS];
 		u8 bwg_id[MAX_TRAFFIC_CLASS], prio_type[MAX_TRAFFIC_CLASS];
@@ -442,8 +425,19 @@
 	if (adapter->dcb_cfg.pfc_mode_enable)
 		adapter->hw.fc.current_mode = ixgbe_fc_pfc;
 
-	if (adapter->dcb_set_bitmap & BIT_APP_UPCHG)
-		clear_bit(__IXGBE_RESETTING, &adapter->state);
+#ifdef IXGBE_FCOE
+	/* Reprogam FCoE hardware offloads when the traffic class
+	 * FCoE is using changes. This happens if the APP info
+	 * changes or the up2tc mapping is updated.
+	 */
+	if ((up && !(up & (1 << adapter->fcoe.up))) ||
+	    (adapter->dcb_set_bitmap & BIT_APP_UPCHG)) {
+		adapter->fcoe.up = ffs(up) - 1;
+		ixgbe_dcbnl_devreset(netdev);
+		ret = DCB_HW_CHG_RST;
+	}
+#endif
+
 	adapter->dcb_set_bitmap = 0x00;
 	return ret;
 }
@@ -661,22 +655,6 @@
 	return ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc->pfc_en, prio_tc);
 }
 
-#ifdef IXGBE_FCOE
-static void ixgbe_dcbnl_devreset(struct net_device *dev)
-{
-	struct ixgbe_adapter *adapter = netdev_priv(dev);
-
-	if (netif_running(dev))
-		dev->netdev_ops->ndo_stop(dev);
-
-	ixgbe_clear_interrupt_scheme(adapter);
-	ixgbe_init_interrupt_scheme(adapter);
-
-	if (netif_running(dev))
-		dev->netdev_ops->ndo_open(dev);
-}
-#endif
-
 static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev,
 				   struct dcb_app *app)
 {
@@ -761,7 +739,9 @@
 		ixgbe_dcbnl_ieee_setets(dev, &ets);
 		ixgbe_dcbnl_ieee_setpfc(dev, &pfc);
 	} else if (mode & DCB_CAP_DCBX_VER_CEE) {
-		adapter->dcb_set_bitmap |= (BIT_PFC & BIT_PG_TX & BIT_PG_RX);
+		u8 mask = BIT_PFC | BIT_PG_TX | BIT_PG_RX | BIT_APP_UPCHG;
+
+		adapter->dcb_set_bitmap |= mask;
 		ixgbe_dcbnl_set_all(dev);
 	} else {
 		/* Drop into single TC mode strict priority as this
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 70d58c3..da7e580 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -888,23 +888,19 @@
                               struct ethtool_drvinfo *drvinfo)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
-	char firmware_version[32];
 	u32 nvm_track_id;
 
-	strncpy(drvinfo->driver, ixgbe_driver_name,
-	        sizeof(drvinfo->driver) - 1);
-	strncpy(drvinfo->version, ixgbe_driver_version,
-	        sizeof(drvinfo->version) - 1);
+	strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
+	strlcpy(drvinfo->version, ixgbe_driver_version,
+		sizeof(drvinfo->version));
 
 	nvm_track_id = (adapter->eeprom_verh << 16) |
 			adapter->eeprom_verl;
-	snprintf(firmware_version, sizeof(firmware_version), "0x%08x",
+	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "0x%08x",
 		 nvm_track_id);
 
-	strncpy(drvinfo->fw_version, firmware_version,
-		sizeof(drvinfo->fw_version) - 1);
-	strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
-		sizeof(drvinfo->bus_info) - 1);
+	strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+		sizeof(drvinfo->bus_info));
 	drvinfo->n_stats = IXGBE_STATS_LEN;
 	drvinfo->testinfo_len = IXGBE_TEST_LEN;
 	drvinfo->regdump_len = ixgbe_get_regs_len(netdev);
@@ -1959,12 +1955,21 @@
 	/* WOL not supported except for the following */
 	switch(hw->device_id) {
 	case IXGBE_DEV_ID_82599_SFP:
-		/* Only this subdevice supports WOL */
-		if (hw->subsystem_device_id != IXGBE_SUBDEV_ID_82599_SFP) {
+		/* Only these subdevices could supports WOL */
+		switch (hw->subsystem_device_id) {
+		case IXGBE_SUBDEV_ID_82599_560FLR:
+			/* only support first port */
+			if (hw->bus.func != 0) {
+				wol->supported = 0;
+				break;
+			}
+		case IXGBE_SUBDEV_ID_82599_SFP:
+			retval = 0;
+			break;
+		default:
 			wol->supported = 0;
 			break;
 		}
-		retval = 0;
 		break;
 	case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
 		/* All except this subdevice support WOL */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index df3b1be..d18d615 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -855,3 +855,86 @@
 	}
 	return rc;
 }
+
+/**
+ * ixgbe_fcoe_get_hbainfo - get FCoE HBA information
+ * @netdev : ixgbe adapter
+ * @info : HBA information
+ *
+ * Returns ixgbe HBA information
+ *
+ * Returns : 0 on success
+ */
+int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
+			   struct netdev_fcoe_hbainfo *info)
+{
+	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+	struct ixgbe_hw *hw = &adapter->hw;
+	int i, pos;
+	u8 buf[8];
+
+	if (!info)
+		return -EINVAL;
+
+	/* Don't return information on unsupported devices */
+	if (hw->mac.type != ixgbe_mac_82599EB &&
+	    hw->mac.type != ixgbe_mac_X540)
+		return -EINVAL;
+
+	/* Manufacturer */
+	snprintf(info->manufacturer, sizeof(info->manufacturer),
+		 "Intel Corporation");
+
+	/* Serial Number */
+
+	/* Get the PCI-e Device Serial Number Capability */
+	pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_DSN);
+	if (pos) {
+		pos += 4;
+		for (i = 0; i < 8; i++)
+			pci_read_config_byte(adapter->pdev, pos + i, &buf[i]);
+
+		snprintf(info->serial_number, sizeof(info->serial_number),
+			 "%02X%02X%02X%02X%02X%02X%02X%02X",
+			 buf[7], buf[6], buf[5], buf[4],
+			 buf[3], buf[2], buf[1], buf[0]);
+	} else
+		snprintf(info->serial_number, sizeof(info->serial_number),
+			 "Unknown");
+
+	/* Hardware Version */
+	snprintf(info->hardware_version,
+		 sizeof(info->hardware_version),
+		 "Rev %d", hw->revision_id);
+	/* Driver Name/Version */
+	snprintf(info->driver_version,
+		 sizeof(info->driver_version),
+		 "%s v%s",
+		 ixgbe_driver_name,
+		 ixgbe_driver_version);
+	/* Firmware Version */
+	snprintf(info->firmware_version,
+		 sizeof(info->firmware_version),
+		 "0x%08x",
+		 (adapter->eeprom_verh << 16) |
+		  adapter->eeprom_verl);
+
+	/* Model */
+	if (hw->mac.type == ixgbe_mac_82599EB) {
+		snprintf(info->model,
+			 sizeof(info->model),
+			 "Intel 82599");
+	} else {
+		snprintf(info->model,
+			 sizeof(info->model),
+			 "Intel X540");
+	}
+
+	/* Model Description */
+	snprintf(info->model_description,
+		 sizeof(info->model_description),
+		 "%s",
+		 ixgbe_default_device_descr);
+
+	return 0;
+}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 8ef92d1..1ee5d0f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -55,6 +55,8 @@
 char ixgbe_driver_name[] = "ixgbe";
 static const char ixgbe_driver_string[] =
 			      "Intel(R) 10 Gigabit PCI Express Network Driver";
+char ixgbe_default_device_descr[] =
+			      "Intel(R) 10 Gigabit Network Connection";
 #define MAJ 3
 #define MIN 6
 #define BUILD 7
@@ -106,6 +108,7 @@
 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 },
 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 },
 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 },
+	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 },
 	/* required last entry */
 	{0, }
 };
@@ -146,7 +149,7 @@
 {
 	BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state));
 
-	/* flush memory to make sure state is correct before next watchog */
+	/* flush memory to make sure state is correct before next watchdog */
 	smp_mb__before_clear_bit();
 	clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
 }
@@ -1140,7 +1143,7 @@
 
 		if (ring_is_ps_enabled(rx_ring)) {
 			if (!bi->page) {
-				bi->page = netdev_alloc_page(rx_ring->netdev);
+				bi->page = alloc_page(GFP_ATOMIC | __GFP_COLD);
 				if (!bi->page) {
 					rx_ring->rx_stats.alloc_rx_page_failed++;
 					goto no_buffers;
@@ -2156,7 +2159,7 @@
 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
 
 	/* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read
-	 * therefore no explict interrupt disable is necessary */
+	 * therefore no explicit interrupt disable is necessary */
 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
 	if (!eicr) {
 		/*
@@ -3044,7 +3047,7 @@
 	hw->mac.ops.enable_rx_dma(hw, rxctrl);
 }
 
-static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+static int ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 	struct ixgbe_hw *hw = &adapter->hw;
@@ -3053,9 +3056,11 @@
 	/* add VID to filter table */
 	hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, true);
 	set_bit(vid, adapter->active_vlans);
+
+	return 0;
 }
 
-static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 	struct ixgbe_hw *hw = &adapter->hw;
@@ -3064,6 +3069,8 @@
 	/* remove VID from filter table */
 	hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false);
 	clear_bit(vid, adapter->active_vlans);
+
+	return 0;
 }
 
 /**
@@ -3602,7 +3609,7 @@
 static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
 {
 	/*
-	 * We are assuming the worst case scenerio here, and that
+	 * We are assuming the worst case scenario here, and that
 	 * is that an SFP was inserted/removed after the reset
 	 * but before SFP detection was enabled.  As such the best
 	 * solution is to just start searching as soon as we start
@@ -3824,7 +3831,7 @@
 	case IXGBE_ERR_EEPROM_VERSION:
 		/* We are running on a pre-production device, log a warning */
 		e_dev_warn("This device is a pre-production adapter/LOM. "
-			   "Please be aware there may be issuesassociated with "
+			   "Please be aware there may be issues associated with "
 			   "your hardware.  If you are experiencing problems "
 			   "please contact your Intel or hardware "
 			   "representative who provided you with this "
@@ -4019,7 +4026,7 @@
 
 		/* Mark all the VFs as inactive */
 		for (i = 0 ; i < adapter->num_vfs; i++)
-			adapter->vfinfo[i].clear_to_send = 0;
+			adapter->vfinfo[i].clear_to_send = false;
 
 		/* ping all the active vfs to let them know we are going down */
 		ixgbe_ping_all_vfs(adapter);
@@ -5788,9 +5795,9 @@
  * @adapter - pointer to the device adapter structure
  *
  * This function serves two purposes.  First it strobes the interrupt lines
- * in order to make certain interrupts are occuring.  Secondly it sets the
+ * in order to make certain interrupts are occurring.  Secondly it sets the
  * bits needed to check for TX hangs.  As a result we should immediately
- * determine if a hang has occured.
+ * determine if a hang has occurred.
  */
 static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
 {
@@ -7128,7 +7135,7 @@
 		return -EINVAL;
 
 	/* Hardware has to reinitialize queues and interrupts to
-	 * match packet buffer alignment. Unfortunantly, the
+	 * match packet buffer alignment. Unfortunately, the
 	 * hardware is not flexible enough to do this dynamically.
 	 */
 	if (netif_running(dev))
@@ -7174,7 +7181,8 @@
 		ixgbe_reset(adapter);
 }
 
-static u32 ixgbe_fix_features(struct net_device *netdev, u32 data)
+static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
+	netdev_features_t data)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
@@ -7204,7 +7212,8 @@
 	return data;
 }
 
-static int ixgbe_set_features(struct net_device *netdev, u32 data)
+static int ixgbe_set_features(struct net_device *netdev,
+	netdev_features_t data)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 	bool need_reset = false;
@@ -7286,6 +7295,7 @@
 	.ndo_fcoe_enable = ixgbe_fcoe_enable,
 	.ndo_fcoe_disable = ixgbe_fcoe_disable,
 	.ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn,
+	.ndo_fcoe_get_hbainfo = ixgbe_fcoe_get_hbainfo,
 #endif /* IXGBE_FCOE */
 	.ndo_set_features = ixgbe_set_features,
 	.ndo_fix_features = ixgbe_fix_features,
@@ -7598,9 +7608,16 @@
 	adapter->wol = 0;
 	switch (pdev->device) {
 	case IXGBE_DEV_ID_82599_SFP:
-		/* Only this subdevice supports WOL */
-		if (pdev->subsystem_device == IXGBE_SUBDEV_ID_82599_SFP)
+		/* Only these subdevice supports WOL */
+		switch (pdev->subsystem_device) {
+		case IXGBE_SUBDEV_ID_82599_560FLR:
+			/* only support first port */
+			if (hw->bus.func != 0)
+				break;
+		case IXGBE_SUBDEV_ID_82599_SFP:
 			adapter->wol = IXGBE_WUFC_MAG;
+			break;
+		}
 		break;
 	case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
 		/* All except this subdevice support WOL */
@@ -7708,7 +7725,7 @@
 	/* add san mac addr to netdev */
 	ixgbe_add_sanmac_netdev(netdev);
 
-	e_dev_info("Intel(R) 10 Gigabit Network Connection\n");
+	e_dev_info("%s\n", ixgbe_default_device_descr);
 	cards_found++;
 	return 0;
 
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 9a56fd7..7cf1e1f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -1214,7 +1214,7 @@
 	u32 max_retry = 10;
 	u32 retry = 0;
 	u16 swfw_mask = 0;
-	bool nack = 1;
+	bool nack = true;
 	*data = 0;
 
 	if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
@@ -1421,7 +1421,7 @@
 static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data)
 {
 	s32 i;
-	bool bit = 0;
+	bool bit = false;
 
 	for (i = 7; i >= 0; i--) {
 		ixgbe_clock_in_i2c_bit(hw, &bit);
@@ -1443,7 +1443,7 @@
 	s32 status = 0;
 	s32 i;
 	u32 i2cctl;
-	bool bit = 0;
+	bool bit = false;
 
 	for (i = 7; i >= 0; i--) {
 		bit = (data >> i) & 0x1;
@@ -1457,6 +1457,7 @@
 	i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
 	i2cctl |= IXGBE_I2C_DATA_OUT;
 	IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, i2cctl);
+	IXGBE_WRITE_FLUSH(hw);
 
 	return status;
 }
@@ -1473,7 +1474,7 @@
 	u32 i = 0;
 	u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
 	u32 timeout = 10;
-	bool ack = 1;
+	bool ack = true;
 
 	ixgbe_raise_i2c_clk(hw, &i2cctl);
 
@@ -1646,9 +1647,9 @@
 	bool data;
 
 	if (*i2cctl & IXGBE_I2C_DATA_IN)
-		data = 1;
+		data = true;
 	else
-		data = 0;
+		data = false;
 
 	return data;
 }
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 00fcd39..cf6812d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -572,7 +572,7 @@
 
 		/* reply to reset with ack and vf mac address */
 		msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK;
-		memcpy(new_mac, vf_mac, IXGBE_ETH_LENGTH_OF_ADDRESS);
+		memcpy(new_mac, vf_mac, ETH_ALEN);
 		/*
 		 * Piggyback the multicast filter type so VF can compute the
 		 * correct vectors
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
index df04f1a..e8badab 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -33,7 +33,6 @@
 int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask);
 void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter);
 void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter);
-void ixgbe_dump_registers(struct ixgbe_adapter *adapter);
 int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac);
 int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan,
 			   u8 qos);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 6c5cca8..802bfa0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -57,6 +57,7 @@
 #define IXGBE_DEV_ID_82599_BACKPLANE_FCOE       0x152a
 #define IXGBE_DEV_ID_82599_SFP_FCOE      0x1529
 #define IXGBE_SUBDEV_ID_82599_SFP        0x11A9
+#define IXGBE_SUBDEV_ID_82599_560FLR     0x17D0
 #define IXGBE_DEV_ID_82599_SFP_EM        0x1507
 #define IXGBE_DEV_ID_82599_SFP_SF2       0x154D
 #define IXGBE_DEV_ID_82599EN_SFP         0x1557
@@ -65,6 +66,7 @@
 #define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ  0x000C
 #define IXGBE_DEV_ID_82599_LS            0x154F
 #define IXGBE_DEV_ID_X540T               0x1528
+#define IXGBE_DEV_ID_82599_SFP_SF_QP     0x154A
 
 /* VF Device IDs */
 #define IXGBE_DEV_ID_82599_VF           0x10ED
@@ -1710,8 +1712,6 @@
 #define IXGBE_NVM_POLL_WRITE       1  /* Flag for polling for write complete */
 #define IXGBE_NVM_POLL_READ        0  /* Flag for polling for read complete */
 
-#define IXGBE_ETH_LENGTH_OF_ADDRESS   6
-
 #define IXGBE_EEPROM_PAGE_SIZE_MAX       128
 #define IXGBE_EEPROM_RD_BUFFER_MAX_COUNT 512 /* EEPROM words # read in burst */
 #define IXGBE_EEPROM_WR_BUFFER_MAX_COUNT 256 /* EEPROM words # wr in burst */
@@ -2802,9 +2802,9 @@
 struct ixgbe_mac_info {
 	struct ixgbe_mac_operations     ops;
 	enum ixgbe_mac_type             type;
-	u8                              addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
-	u8                              perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
-	u8                              san_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
+	u8                              addr[ETH_ALEN];
+	u8                              perm_addr[ETH_ALEN];
+	u8                              san_addr[ETH_ALEN];
 	/* prefix for World Wide Node Name (WWNN) */
 	u16                             wwnn_prefix;
 	/* prefix for World Wide Port Name (WWPN) */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index e5101e9..8cc5ecc 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -751,16 +751,20 @@
 {
 	u32 macc_reg;
 	u32 ledctl_reg;
+	ixgbe_link_speed speed;
+	bool link_up;
 
 	/*
-	 * In order for the blink bit in the LED control register
-	 * to work, link and speed must be forced in the MAC. We
-	 * will reverse this when we stop the blinking.
+	 * Link should be up in order for the blink bit in the LED control
+	 * register to work. Force link and speed in the MAC if link is down.
+	 * This will be reversed when we stop the blinking.
 	 */
-	macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC);
-	macc_reg |= IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS;
-	IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg);
-
+	hw->mac.ops.check_link(hw, &speed, &link_up, false);
+	if (link_up == false) {
+		macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC);
+		macc_reg |= IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS;
+		IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg);
+	}
 	/* Set the LED to LINK_UP + BLINK. */
 	ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
 	ledctl_reg &= ~IXGBE_LED_MODE_MASK(index);
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index 78abb6f..2eb89cb 100644
--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -35,7 +35,6 @@
 #define IXGBE_VF_IRQ_CLEAR_MASK         7
 #define IXGBE_VF_MAX_TX_QUEUES          1
 #define IXGBE_VF_MAX_RX_QUEUES          1
-#define IXGBE_ETH_LENGTH_OF_ADDRESS     6
 
 /* Link speed */
 typedef u32 ixgbe_link_speed;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index e29ba45..dc8e651 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -27,6 +27,8 @@
 
 /* ethtool support for ixgbevf */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/types.h>
 #include <linux/module.h>
 #include <linux/slab.h>
@@ -265,11 +267,11 @@
 {
 	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
 
-	strlcpy(drvinfo->driver, ixgbevf_driver_name, 32);
-	strlcpy(drvinfo->version, ixgbevf_driver_version, 32);
-
-	strlcpy(drvinfo->fw_version, "N/A", 4);
-	strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+	strlcpy(drvinfo->driver, ixgbevf_driver_name, sizeof(drvinfo->driver));
+	strlcpy(drvinfo->version, ixgbevf_driver_version,
+		sizeof(drvinfo->version));
+	strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+		sizeof(drvinfo->bus_info));
 }
 
 static void ixgbevf_get_ringparam(struct net_device *netdev,
@@ -549,8 +551,8 @@
 	writel((W & M), (adapter->hw.hw_addr + R));                           \
 	val = readl(adapter->hw.hw_addr + R);                                 \
 	if ((W & M) != (val & M)) {                                           \
-		printk(KERN_ERR "set/check reg %04X test failed: got 0x%08X " \
-				 "expected 0x%08X\n", R, (val & M), (W & M)); \
+		pr_err("set/check reg %04X test failed: got 0x%08X expected " \
+		       "0x%08X\n", R, (val & M), (W & M));                    \
 		*data = R;                                                    \
 		writel(before, (adapter->hw.hw_addr + R));                    \
 		return 1;                                                     \
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 4c8e199..891162d 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -29,6 +29,9 @@
 /******************************************************************************
  Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
 ******************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/types.h>
 #include <linux/bitops.h>
 #include <linux/module.h>
@@ -363,7 +366,7 @@
 		if (!bi->page_dma &&
 		    (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
 			if (!bi->page) {
-				bi->page = netdev_alloc_page(adapter->netdev);
+				bi->page = alloc_page(GFP_ATOMIC | __GFP_COLD);
 				if (!bi->page) {
 					adapter->alloc_rx_page_failed++;
 					goto no_buffers;
@@ -1400,7 +1403,7 @@
 	}
 }
 
-static void ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
 {
 	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
 	struct ixgbe_hw *hw = &adapter->hw;
@@ -1409,9 +1412,11 @@
 	if (hw->mac.ops.set_vfta)
 		hw->mac.ops.set_vfta(hw, vid, 0, true);
 	set_bit(vid, adapter->active_vlans);
+
+	return 0;
 }
 
-static void ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
 {
 	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
 	struct ixgbe_hw *hw = &adapter->hw;
@@ -1420,6 +1425,8 @@
 	if (hw->mac.ops.set_vfta)
 		hw->mac.ops.set_vfta(hw, vid, 0, false);
 	clear_bit(vid, adapter->active_vlans);
+
+	return 0;
 }
 
 static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
@@ -1437,7 +1444,7 @@
 	int count = 0;
 
 	if ((netdev_uc_count(netdev)) > 10) {
-		printk(KERN_ERR "Too many unicast filters - No Space\n");
+		pr_err("Too many unicast filters - No Space\n");
 		return -ENOSPC;
 	}
 
@@ -2135,7 +2142,7 @@
 
 	err = ixgbevf_alloc_queues(adapter);
 	if (err) {
-		printk(KERN_ERR "Unable to allocate memory for queues\n");
+		pr_err("Unable to allocate memory for queues\n");
 		goto err_alloc_queues;
 	}
 
@@ -2189,7 +2196,7 @@
 	} else {
 		err = hw->mac.ops.init_hw(hw);
 		if (err) {
-			printk(KERN_ERR "init_shared_code failed: %d\n", err);
+			pr_err("init_shared_code failed: %d\n", err);
 			goto out;
 		}
 	}
@@ -2630,8 +2637,8 @@
 		 * the vf can't start. */
 		if (hw->adapter_stopped) {
 			err = IXGBE_ERR_MBX;
-			printk(KERN_ERR "Unable to start - perhaps the PF"
-			       " Driver isn't up yet\n");
+			pr_err("Unable to start - perhaps the PF Driver isn't "
+			       "up yet\n");
 			goto err_setup_reset;
 		}
 	}
@@ -2842,10 +2849,8 @@
 				break;
 			default:
 				if (unlikely(net_ratelimit())) {
-					printk(KERN_WARNING
-					       "partial checksum but "
-					       "proto=%x!\n",
-					       skb->protocol);
+					pr_warn("partial checksum but "
+						"proto=%x!\n", skb->protocol);
 				}
 				break;
 			}
@@ -3249,7 +3254,8 @@
 	return stats;
 }
 
-static int ixgbevf_set_features(struct net_device *netdev, u32 features)
+static int ixgbevf_set_features(struct net_device *netdev,
+	netdev_features_t features)
 {
 	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
 
@@ -3414,7 +3420,7 @@
 	memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
 
 	if (!is_valid_ether_addr(netdev->dev_addr)) {
-		printk(KERN_ERR "invalid MAC address\n");
+		pr_err("invalid MAC address\n");
 		err = -EIO;
 		goto err_sw_init;
 	}
@@ -3535,10 +3541,10 @@
 static int __init ixgbevf_init_module(void)
 {
 	int ret;
-	printk(KERN_INFO "ixgbevf: %s - version %s\n", ixgbevf_driver_string,
-	       ixgbevf_driver_version);
+	pr_info("%s - version %s\n", ixgbevf_driver_string,
+		ixgbevf_driver_version);
 
-	printk(KERN_INFO "%s\n", ixgbevf_copyright);
+	pr_info("%s\n", ixgbevf_copyright);
 
 	ret = pci_register_driver(&ixgbevf_driver);
 	return ret;
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h
index ea393eb..9d38a94 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.h
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h
@@ -47,8 +47,8 @@
 #define IXGBE_VFMAILBOX_RSTD     0x00000080 /* PF has indicated reset done */
 #define IXGBE_VFMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */
 
-#define IXGBE_PFMAILBOX(x)          (0x04B00 + (4 * x))
-#define IXGBE_PFMBMEM(vfn)          (0x13000 + (64 * vfn))
+#define IXGBE_PFMAILBOX(x)          (0x04B00 + (4 * (x)))
+#define IXGBE_PFMBMEM(vfn)          (0x13000 + (64 * (vfn)))
 
 #define IXGBE_PFMAILBOX_STS   0x00000001 /* Initiate message send to VF */
 #define IXGBE_PFMAILBOX_ACK   0x00000002 /* Ack message recv'd from VF */
diff --git a/drivers/net/ethernet/intel/ixgbevf/regs.h b/drivers/net/ethernet/intel/ixgbevf/regs.h
index 189200e..5e4d5e5 100644
--- a/drivers/net/ethernet/intel/ixgbevf/regs.h
+++ b/drivers/net/ethernet/intel/ixgbevf/regs.h
@@ -39,29 +39,29 @@
 #define IXGBE_VTEIMC           0x0010C
 #define IXGBE_VTEIAC           0x00110
 #define IXGBE_VTEIAM           0x00114
-#define IXGBE_VTEITR(x)        (0x00820 + (4 * x))
-#define IXGBE_VTIVAR(x)        (0x00120 + (4 * x))
+#define IXGBE_VTEITR(x)        (0x00820 + (4 * (x)))
+#define IXGBE_VTIVAR(x)        (0x00120 + (4 * (x)))
 #define IXGBE_VTIVAR_MISC      0x00140
-#define IXGBE_VTRSCINT(x)      (0x00180 + (4 * x))
-#define IXGBE_VFRDBAL(x)       (0x01000 + (0x40 * x))
-#define IXGBE_VFRDBAH(x)       (0x01004 + (0x40 * x))
-#define IXGBE_VFRDLEN(x)       (0x01008 + (0x40 * x))
-#define IXGBE_VFRDH(x)         (0x01010 + (0x40 * x))
-#define IXGBE_VFRDT(x)         (0x01018 + (0x40 * x))
-#define IXGBE_VFRXDCTL(x)      (0x01028 + (0x40 * x))
-#define IXGBE_VFSRRCTL(x)      (0x01014 + (0x40 * x))
-#define IXGBE_VFRSCCTL(x)      (0x0102C + (0x40 * x))
+#define IXGBE_VTRSCINT(x)      (0x00180 + (4 * (x)))
+#define IXGBE_VFRDBAL(x)       (0x01000 + (0x40 * (x)))
+#define IXGBE_VFRDBAH(x)       (0x01004 + (0x40 * (x)))
+#define IXGBE_VFRDLEN(x)       (0x01008 + (0x40 * (x)))
+#define IXGBE_VFRDH(x)         (0x01010 + (0x40 * (x)))
+#define IXGBE_VFRDT(x)         (0x01018 + (0x40 * (x)))
+#define IXGBE_VFRXDCTL(x)      (0x01028 + (0x40 * (x)))
+#define IXGBE_VFSRRCTL(x)      (0x01014 + (0x40 * (x)))
+#define IXGBE_VFRSCCTL(x)      (0x0102C + (0x40 * (x)))
 #define IXGBE_VFPSRTYPE        0x00300
-#define IXGBE_VFTDBAL(x)       (0x02000 + (0x40 * x))
-#define IXGBE_VFTDBAH(x)       (0x02004 + (0x40 * x))
-#define IXGBE_VFTDLEN(x)       (0x02008 + (0x40 * x))
-#define IXGBE_VFTDH(x)         (0x02010 + (0x40 * x))
-#define IXGBE_VFTDT(x)         (0x02018 + (0x40 * x))
-#define IXGBE_VFTXDCTL(x)      (0x02028 + (0x40 * x))
-#define IXGBE_VFTDWBAL(x)      (0x02038 + (0x40 * x))
-#define IXGBE_VFTDWBAH(x)      (0x0203C + (0x40 * x))
-#define IXGBE_VFDCA_RXCTRL(x)  (0x0100C + (0x40 * x))
-#define IXGBE_VFDCA_TXCTRL(x)  (0x0200c + (0x40 * x))
+#define IXGBE_VFTDBAL(x)       (0x02000 + (0x40 * (x)))
+#define IXGBE_VFTDBAH(x)       (0x02004 + (0x40 * (x)))
+#define IXGBE_VFTDLEN(x)       (0x02008 + (0x40 * (x)))
+#define IXGBE_VFTDH(x)         (0x02010 + (0x40 * (x)))
+#define IXGBE_VFTDT(x)         (0x02018 + (0x40 * (x)))
+#define IXGBE_VFTXDCTL(x)      (0x02028 + (0x40 * (x)))
+#define IXGBE_VFTDWBAL(x)      (0x02038 + (0x40 * (x)))
+#define IXGBE_VFTDWBAH(x)      (0x0203C + (0x40 * (x)))
+#define IXGBE_VFDCA_RXCTRL(x)  (0x0100C + (0x40 * (x)))
+#define IXGBE_VFDCA_TXCTRL(x)  (0x0200c + (0x40 * (x)))
 #define IXGBE_VFGPRC           0x0101C
 #define IXGBE_VFGPTC           0x0201C
 #define IXGBE_VFGORC_LSB       0x01020
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index aa3682e..21533e3 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -108,7 +108,7 @@
 	if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK))
 		return IXGBE_ERR_INVALID_MAC_ADDR;
 
-	memcpy(hw->mac.perm_addr, addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
+	memcpy(hw->mac.perm_addr, addr, ETH_ALEN);
 	hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD];
 
 	return 0;
@@ -211,7 +211,7 @@
  **/
 static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
 {
-	memcpy(mac_addr, hw->mac.perm_addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
+	memcpy(mac_addr, hw->mac.perm_addr, ETH_ALEN);
 
 	return 0;
 }
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 76b8457..27d651a 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -1990,7 +1990,7 @@
 		struct page *page,
 		u32 page_offset,
 		u32 len,
-		u8 hidma)
+		bool hidma)
 {
 	dma_addr_t dmaaddr;
 
@@ -2024,7 +2024,7 @@
 	struct jme_ring *txring = &(jme->txring[0]);
 	struct txdesc *txdesc = txring->desc, *ctxdesc;
 	struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
-	u8 hidma = jme->dev->features & NETIF_F_HIGHDMA;
+	bool hidma = jme->dev->features & NETIF_F_HIGHDMA;
 	int i, nr_frags = skb_shinfo(skb)->nr_frags;
 	int mask = jme->tx_ring_mask;
 	const struct skb_frag_struct *frag;
@@ -2399,9 +2399,9 @@
 {
 	struct jme_adapter *jme = netdev_priv(netdev);
 
-	strcpy(info->driver, DRV_NAME);
-	strcpy(info->version, DRV_VERSION);
-	strcpy(info->bus_info, pci_name(jme->pdev));
+	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+	strlcpy(info->bus_info, pci_name(jme->pdev), sizeof(info->bus_info));
 }
 
 static int
@@ -2727,8 +2727,8 @@
 	jme->msg_enable = value;
 }
 
-static u32
-jme_fix_features(struct net_device *netdev, u32 features)
+static netdev_features_t
+jme_fix_features(struct net_device *netdev, netdev_features_t features)
 {
 	if (netdev->mtu > 1900)
 		features &= ~(NETIF_F_ALL_TSO | NETIF_F_ALL_CSUM);
@@ -2736,7 +2736,7 @@
 }
 
 static int
-jme_set_features(struct net_device *netdev, u32 features)
+jme_set_features(struct net_device *netdev, netdev_features_t features)
 {
 	struct jme_adapter *jme = netdev_priv(netdev);
 
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index d8430f4..6ad094f 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -1230,18 +1230,7 @@
 	.remove = korina_remove,
 };
 
-static int __init korina_init_module(void)
-{
-	return platform_driver_register(&korina_driver);
-}
-
-static void korina_cleanup_module(void)
-{
-	return platform_driver_unregister(&korina_driver);
-}
-
-module_init(korina_init_module);
-module_exit(korina_cleanup_module);
+module_platform_driver(korina_driver);
 
 MODULE_AUTHOR("Philip Rischel <rischelp@idt.com>");
 MODULE_AUTHOR("Felix Fietkau <nbd@openwrt.org>");
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 194a031..e87847e 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1502,10 +1502,12 @@
 static void mv643xx_eth_get_drvinfo(struct net_device *dev,
 				    struct ethtool_drvinfo *drvinfo)
 {
-	strncpy(drvinfo->driver,  mv643xx_eth_driver_name, 32);
-	strncpy(drvinfo->version, mv643xx_eth_driver_version, 32);
-	strncpy(drvinfo->fw_version, "N/A", 32);
-	strncpy(drvinfo->bus_info, "platform", 32);
+	strlcpy(drvinfo->driver, mv643xx_eth_driver_name,
+		sizeof(drvinfo->driver));
+	strlcpy(drvinfo->version, mv643xx_eth_driver_version,
+		sizeof(drvinfo->version));
+	strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
+	strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
 	drvinfo->n_stats = ARRAY_SIZE(mv643xx_eth_stats);
 }
 
@@ -1578,10 +1580,10 @@
 
 
 static int
-mv643xx_eth_set_features(struct net_device *dev, u32 features)
+mv643xx_eth_set_features(struct net_device *dev, netdev_features_t features)
 {
 	struct mv643xx_eth_private *mp = netdev_priv(dev);
-	u32 rx_csum = features & NETIF_F_RXCSUM;
+	bool rx_csum = features & NETIF_F_RXCSUM;
 
 	wrlp(mp, PORT_CONFIG, rx_csum ? 0x02000000 : 0x00000000);
 
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index d17d062..5ec409e 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1645,18 +1645,7 @@
 		   },
 };
 
-static int __init pxa168_init_module(void)
-{
-	return platform_driver_register(&pxa168_eth_driver);
-}
-
-static void __exit pxa168_cleanup_module(void)
-{
-	platform_driver_unregister(&pxa168_eth_driver);
-}
-
-module_init(pxa168_init_module);
-module_exit(pxa168_cleanup_module);
+module_platform_driver(pxa168_eth_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Ethernet driver for Marvell PXA168");
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index c7b6083..18a87a5 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -394,10 +394,10 @@
 {
 	struct skge_port *skge = netdev_priv(dev);
 
-	strcpy(info->driver, DRV_NAME);
-	strcpy(info->version, DRV_VERSION);
-	strcpy(info->fw_version, "N/A");
-	strcpy(info->bus_info, pci_name(skge->hw->pdev));
+	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+	strlcpy(info->bus_info, pci_name(skge->hw->pdev),
+		sizeof(info->bus_info));
 }
 
 static const struct skge_stat {
@@ -2606,6 +2606,9 @@
 	spin_unlock_irq(&hw->hw_lock);
 
 	napi_enable(&skge->napi);
+
+	skge_set_multicast(dev);
+
 	return 0;
 
  free_tx_ring:
@@ -4039,7 +4042,7 @@
 	pci_set_drvdata(pdev, NULL);
 }
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static int skge_suspend(struct device *dev)
 {
 	struct pci_dev *pdev = to_pci_dev(dev);
@@ -4101,7 +4104,7 @@
 #else
 
 #define SKGE_PM_OPS NULL
-#endif
+#endif /* CONFIG_PM_SLEEP */
 
 static void skge_shutdown(struct pci_dev *pdev)
 {
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 7803efa..760c2b1 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -1110,6 +1110,7 @@
 	sky2->tx_prod = sky2->tx_cons = 0;
 	sky2->tx_tcpsum = 0;
 	sky2->tx_last_mss = 0;
+	netdev_reset_queue(sky2->netdev);
 
 	le = get_tx_le(sky2, &sky2->tx_prod);
 	le->addr = 0;
@@ -1284,7 +1285,7 @@
 };
 
 /* Enable/disable receive hash calculation (RSS) */
-static void rx_set_rss(struct net_device *dev, u32 features)
+static void rx_set_rss(struct net_device *dev, netdev_features_t features)
 {
 	struct sky2_port *sky2 = netdev_priv(dev);
 	struct sky2_hw *hw = sky2->hw;
@@ -1402,7 +1403,7 @@
 
 #define SKY2_VLAN_OFFLOADS (NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO)
 
-static void sky2_vlan_mode(struct net_device *dev, u32 features)
+static void sky2_vlan_mode(struct net_device *dev, netdev_features_t features)
 {
 	struct sky2_port *sky2 = netdev_priv(dev);
 	struct sky2_hw *hw = sky2->hw;
@@ -1971,6 +1972,7 @@
 	if (tx_avail(sky2) <= MAX_SKB_TX_LE)
 		netif_stop_queue(dev);
 
+	netdev_sent_queue(dev, skb->len);
 	sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod);
 
 	return NETDEV_TX_OK;
@@ -2002,7 +2004,8 @@
 static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
 {
 	struct net_device *dev = sky2->netdev;
-	unsigned idx;
+	u16 idx;
+	unsigned int bytes_compl = 0, pkts_compl = 0;
 
 	BUG_ON(done >= sky2->tx_ring_size);
 
@@ -2017,10 +2020,8 @@
 			netif_printk(sky2, tx_done, KERN_DEBUG, dev,
 				     "tx done %u\n", idx);
 
-			u64_stats_update_begin(&sky2->tx_stats.syncp);
-			++sky2->tx_stats.packets;
-			sky2->tx_stats.bytes += skb->len;
-			u64_stats_update_end(&sky2->tx_stats.syncp);
+			pkts_compl++;
+			bytes_compl += skb->len;
 
 			re->skb = NULL;
 			dev_kfree_skb_any(skb);
@@ -2031,6 +2032,13 @@
 
 	sky2->tx_cons = idx;
 	smp_mb();
+
+	netdev_completed_queue(dev, pkts_compl, bytes_compl);
+
+	u64_stats_update_begin(&sky2->tx_stats.syncp);
+	sky2->tx_stats.packets += pkts_compl;
+	sky2->tx_stats.bytes += bytes_compl;
+	u64_stats_update_end(&sky2->tx_stats.syncp);
 }
 
 static void sky2_tx_reset(struct sky2_hw *hw, unsigned port)
@@ -3643,10 +3651,10 @@
 {
 	struct sky2_port *sky2 = netdev_priv(dev);
 
-	strcpy(info->driver, DRV_NAME);
-	strcpy(info->version, DRV_VERSION);
-	strcpy(info->fw_version, "N/A");
-	strcpy(info->bus_info, pci_name(sky2->hw->pdev));
+	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+	strlcpy(info->bus_info, pci_name(sky2->hw->pdev),
+		sizeof(info->bus_info));
 }
 
 static const struct sky2_stat {
@@ -4311,7 +4319,8 @@
 	return sky2_vpd_write(sky2->hw, cap, data, eeprom->offset, eeprom->len);
 }
 
-static u32 sky2_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t sky2_fix_features(struct net_device *dev,
+	netdev_features_t features)
 {
 	const struct sky2_port *sky2 = netdev_priv(dev);
 	const struct sky2_hw *hw = sky2->hw;
@@ -4335,13 +4344,13 @@
 	return features;
 }
 
-static int sky2_set_features(struct net_device *dev, u32 features)
+static int sky2_set_features(struct net_device *dev, netdev_features_t features)
 {
 	struct sky2_port *sky2 = netdev_priv(dev);
-	u32 changed = dev->features ^ features;
+	netdev_features_t changed = dev->features ^ features;
 
 	if (changed & NETIF_F_RXCSUM) {
-		u32 on = features & NETIF_F_RXCSUM;
+		bool on = features & NETIF_F_RXCSUM;
 		sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
 			     on ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
 	}
diff --git a/drivers/net/ethernet/mellanox/mlx4/Makefile b/drivers/net/ethernet/mellanox/mlx4/Makefile
index d1aa45a..4a40ab9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx4/Makefile
@@ -1,7 +1,7 @@
 obj-$(CONFIG_MLX4_CORE)		+= mlx4_core.o
 
 mlx4_core-y :=	alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
-		mr.o pd.o port.o profile.o qp.o reset.o sense.o srq.o
+		mr.o pd.o port.o profile.o qp.o reset.o sense.o srq.o resource_tracker.o
 
 obj-$(CONFIG_MLX4_EN)               += mlx4_en.o
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c
index 45aea9c..915e947 100644
--- a/drivers/net/ethernet/mellanox/mlx4/catas.c
+++ b/drivers/net/ethernet/mellanox/mlx4/catas.c
@@ -48,7 +48,8 @@
 static int internal_err_reset = 1;
 module_param(internal_err_reset, int, 0644);
 MODULE_PARM_DESC(internal_err_reset,
-		 "Reset device on internal errors if non-zero (default 1)");
+		 "Reset device on internal errors if non-zero"
+		 " (default 1, in SRIOV mode default is 0)");
 
 static void dump_err_buf(struct mlx4_dev *dev)
 {
@@ -116,6 +117,10 @@
 	struct mlx4_priv *priv = mlx4_priv(dev);
 	phys_addr_t addr;
 
+	/*If we are in SRIOV the default of the module param must be 0*/
+	if (mlx4_is_mfunc(dev))
+		internal_err_reset = 0;
+
 	INIT_LIST_HEAD(&priv->catas_err.list);
 	init_timer(&priv->catas_err.timer);
 	priv->catas_err.map = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 78f5a1a..978f593 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -39,12 +39,18 @@
 #include <linux/errno.h>
 
 #include <linux/mlx4/cmd.h>
+#include <linux/semaphore.h>
 
 #include <asm/io.h>
 
 #include "mlx4.h"
+#include "fw.h"
 
 #define CMD_POLL_TOKEN 0xffff
+#define INBOX_MASK	0xffffffffffffff00ULL
+
+#define CMD_CHAN_VER 1
+#define CMD_CHAN_IF_REV 1
 
 enum {
 	/* command completed successfully: */
@@ -110,8 +116,12 @@
 	int			next;
 	u64			out_param;
 	u16			token;
+	u8			fw_status;
 };
 
+static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
+				    struct mlx4_vhcr_cmd *in_vhcr);
+
 static int mlx4_status_to_errno(u8 status)
 {
 	static const int trans_table[] = {
@@ -142,6 +152,139 @@
 	return trans_table[status];
 }
 
+static u8 mlx4_errno_to_status(int errno)
+{
+	switch (errno) {
+	case -EPERM:
+		return CMD_STAT_BAD_OP;
+	case -EINVAL:
+		return CMD_STAT_BAD_PARAM;
+	case -ENXIO:
+		return CMD_STAT_BAD_SYS_STATE;
+	case -EBUSY:
+		return CMD_STAT_RESOURCE_BUSY;
+	case -ENOMEM:
+		return CMD_STAT_EXCEED_LIM;
+	case -ENFILE:
+		return CMD_STAT_ICM_ERROR;
+	default:
+		return CMD_STAT_INTERNAL_ERR;
+	}
+}
+
+static int comm_pending(struct mlx4_dev *dev)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	u32 status = readl(&priv->mfunc.comm->slave_read);
+
+	return (swab32(status) >> 31) != priv->cmd.comm_toggle;
+}
+
+static void mlx4_comm_cmd_post(struct mlx4_dev *dev, u8 cmd, u16 param)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	u32 val;
+
+	priv->cmd.comm_toggle ^= 1;
+	val = param | (cmd << 16) | (priv->cmd.comm_toggle << 31);
+	__raw_writel((__force u32) cpu_to_be32(val),
+		     &priv->mfunc.comm->slave_write);
+	mmiowb();
+}
+
+static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param,
+		       unsigned long timeout)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	unsigned long end;
+	int err = 0;
+	int ret_from_pending = 0;
+
+	/* First, verify that the master reports correct status */
+	if (comm_pending(dev)) {
+		mlx4_warn(dev, "Communication channel is not idle."
+			  "my toggle is %d (cmd:0x%x)\n",
+			  priv->cmd.comm_toggle, cmd);
+		return -EAGAIN;
+	}
+
+	/* Write command */
+	down(&priv->cmd.poll_sem);
+	mlx4_comm_cmd_post(dev, cmd, param);
+
+	end = msecs_to_jiffies(timeout) + jiffies;
+	while (comm_pending(dev) && time_before(jiffies, end))
+		cond_resched();
+	ret_from_pending = comm_pending(dev);
+	if (ret_from_pending) {
+		/* check if the slave is trying to boot in the middle of
+		 * FLR process. The only non-zero result in the RESET command
+		 * is MLX4_DELAY_RESET_SLAVE*/
+		if ((MLX4_COMM_CMD_RESET == cmd)) {
+			mlx4_warn(dev, "Got slave FLRed from Communication"
+				  " channel (ret:0x%x)\n", ret_from_pending);
+			err = MLX4_DELAY_RESET_SLAVE;
+		} else {
+			mlx4_warn(dev, "Communication channel timed out\n");
+			err = -ETIMEDOUT;
+		}
+	}
+
+	up(&priv->cmd.poll_sem);
+	return err;
+}
+
+static int mlx4_comm_cmd_wait(struct mlx4_dev *dev, u8 op,
+			      u16 param, unsigned long timeout)
+{
+	struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
+	struct mlx4_cmd_context *context;
+	int err = 0;
+
+	down(&cmd->event_sem);
+
+	spin_lock(&cmd->context_lock);
+	BUG_ON(cmd->free_head < 0);
+	context = &cmd->context[cmd->free_head];
+	context->token += cmd->token_mask + 1;
+	cmd->free_head = context->next;
+	spin_unlock(&cmd->context_lock);
+
+	init_completion(&context->done);
+
+	mlx4_comm_cmd_post(dev, op, param);
+
+	if (!wait_for_completion_timeout(&context->done,
+					 msecs_to_jiffies(timeout))) {
+		err = -EBUSY;
+		goto out;
+	}
+
+	err = context->result;
+	if (err && context->fw_status != CMD_STAT_MULTI_FUNC_REQ) {
+		mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
+			 op, context->fw_status);
+		goto out;
+	}
+
+out:
+	spin_lock(&cmd->context_lock);
+	context->next = cmd->free_head;
+	cmd->free_head = context - cmd->context;
+	spin_unlock(&cmd->context_lock);
+
+	up(&cmd->event_sem);
+	return err;
+}
+
+int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param,
+		  unsigned long timeout)
+{
+	if (mlx4_priv(dev)->cmd.use_events)
+		return mlx4_comm_cmd_wait(dev, cmd, param, timeout);
+	return mlx4_comm_cmd_poll(dev, cmd, param, timeout);
+}
+
 static int cmd_pending(struct mlx4_dev *dev)
 {
 	u32 status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET);
@@ -167,8 +310,10 @@
 		end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS);
 
 	while (cmd_pending(dev)) {
-		if (time_after_eq(jiffies, end))
+		if (time_after_eq(jiffies, end)) {
+			mlx4_err(dev, "%s:cmd_pending failed\n", __func__);
 			goto out;
+		}
 		cond_resched();
 	}
 
@@ -192,7 +337,7 @@
 					       (cmd->toggle << HCR_T_BIT)	|
 					       (event ? (1 << HCR_E_BIT) : 0)	|
 					       (op_modifier << HCR_OPMOD_SHIFT) |
-					       op),			  hcr + 6);
+					       op), hcr + 6);
 
 	/*
 	 * Make sure that our HCR writes don't get mixed in with
@@ -209,6 +354,62 @@
 	return ret;
 }
 
+static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
+			  int out_is_imm, u32 in_modifier, u8 op_modifier,
+			  u16 op, unsigned long timeout)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct mlx4_vhcr_cmd *vhcr = priv->mfunc.vhcr;
+	int ret;
+
+	down(&priv->cmd.slave_sem);
+	vhcr->in_param = cpu_to_be64(in_param);
+	vhcr->out_param = out_param ? cpu_to_be64(*out_param) : 0;
+	vhcr->in_modifier = cpu_to_be32(in_modifier);
+	vhcr->opcode = cpu_to_be16((((u16) op_modifier) << 12) | (op & 0xfff));
+	vhcr->token = cpu_to_be16(CMD_POLL_TOKEN);
+	vhcr->status = 0;
+	vhcr->flags = !!(priv->cmd.use_events) << 6;
+	if (mlx4_is_master(dev)) {
+		ret = mlx4_master_process_vhcr(dev, dev->caps.function, vhcr);
+		if (!ret) {
+			if (out_is_imm) {
+				if (out_param)
+					*out_param =
+						be64_to_cpu(vhcr->out_param);
+				else {
+					mlx4_err(dev, "response expected while"
+						 "output mailbox is NULL for "
+						 "command 0x%x\n", op);
+					vhcr->status = CMD_STAT_BAD_PARAM;
+				}
+			}
+			ret = mlx4_status_to_errno(vhcr->status);
+		}
+	} else {
+		ret = mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_POST, 0,
+				    MLX4_COMM_TIME + timeout);
+		if (!ret) {
+			if (out_is_imm) {
+				if (out_param)
+					*out_param =
+						be64_to_cpu(vhcr->out_param);
+				else {
+					mlx4_err(dev, "response expected while"
+						 "output mailbox is NULL for "
+						 "command 0x%x\n", op);
+					vhcr->status = CMD_STAT_BAD_PARAM;
+				}
+			}
+			ret = mlx4_status_to_errno(vhcr->status);
+		} else
+			mlx4_err(dev, "failed execution of VHCR_POST command"
+				 "opcode 0x%x\n", op);
+	}
+	up(&priv->cmd.slave_sem);
+	return ret;
+}
+
 static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
 			 int out_is_imm, u32 in_modifier, u8 op_modifier,
 			 u16 op, unsigned long timeout)
@@ -217,6 +418,7 @@
 	void __iomem *hcr = priv->cmd.hcr;
 	int err = 0;
 	unsigned long end;
+	u32 stat;
 
 	down(&priv->cmd.poll_sem);
 
@@ -240,9 +442,12 @@
 					  __raw_readl(hcr + HCR_OUT_PARAM_OFFSET)) << 32 |
 			(u64) be32_to_cpu((__force __be32)
 					  __raw_readl(hcr + HCR_OUT_PARAM_OFFSET + 4));
-
-	err = mlx4_status_to_errno(be32_to_cpu((__force __be32)
-					       __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24);
+	stat = be32_to_cpu((__force __be32)
+			   __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24;
+	err = mlx4_status_to_errno(stat);
+	if (err)
+		mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
+			 op, stat);
 
 out:
 	up(&priv->cmd.poll_sem);
@@ -259,6 +464,7 @@
 	if (token != context->token)
 		return;
 
+	context->fw_status = status;
 	context->result    = mlx4_status_to_errno(status);
 	context->out_param = out_param;
 
@@ -287,14 +493,18 @@
 	mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
 		      in_modifier, op_modifier, op, context->token, 1);
 
-	if (!wait_for_completion_timeout(&context->done, msecs_to_jiffies(timeout))) {
+	if (!wait_for_completion_timeout(&context->done,
+					 msecs_to_jiffies(timeout))) {
 		err = -EBUSY;
 		goto out;
 	}
 
 	err = context->result;
-	if (err)
+	if (err) {
+		mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
+			 op, context->fw_status);
 		goto out;
+	}
 
 	if (out_is_imm)
 		*out_param = context->out_param;
@@ -311,17 +521,1046 @@
 
 int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
 	       int out_is_imm, u32 in_modifier, u8 op_modifier,
-	       u16 op, unsigned long timeout)
+	       u16 op, unsigned long timeout, int native)
 {
-	if (mlx4_priv(dev)->cmd.use_events)
-		return mlx4_cmd_wait(dev, in_param, out_param, out_is_imm,
-				     in_modifier, op_modifier, op, timeout);
-	else
-		return mlx4_cmd_poll(dev, in_param, out_param, out_is_imm,
-				     in_modifier, op_modifier, op, timeout);
+	if (!mlx4_is_mfunc(dev) || (native && mlx4_is_master(dev))) {
+		if (mlx4_priv(dev)->cmd.use_events)
+			return mlx4_cmd_wait(dev, in_param, out_param,
+					     out_is_imm, in_modifier,
+					     op_modifier, op, timeout);
+		else
+			return mlx4_cmd_poll(dev, in_param, out_param,
+					     out_is_imm, in_modifier,
+					     op_modifier, op, timeout);
+	}
+	return mlx4_slave_cmd(dev, in_param, out_param, out_is_imm,
+			      in_modifier, op_modifier, op, timeout);
 }
 EXPORT_SYMBOL_GPL(__mlx4_cmd);
 
+
+static int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev *dev)
+{
+	return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_ARM_COMM_CHANNEL,
+			MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+}
+
+static int mlx4_ACCESS_MEM(struct mlx4_dev *dev, u64 master_addr,
+			   int slave, u64 slave_addr,
+			   int size, int is_read)
+{
+	u64 in_param;
+	u64 out_param;
+
+	if ((slave_addr & 0xfff) | (master_addr & 0xfff) |
+	    (slave & ~0x7f) | (size & 0xff)) {
+		mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx "
+			      "master_addr:0x%llx slave_id:%d size:%d\n",
+			      slave_addr, master_addr, slave, size);
+		return -EINVAL;
+	}
+
+	if (is_read) {
+		in_param = (u64) slave | slave_addr;
+		out_param = (u64) dev->caps.function | master_addr;
+	} else {
+		in_param = (u64) dev->caps.function | master_addr;
+		out_param = (u64) slave | slave_addr;
+	}
+
+	return mlx4_cmd_imm(dev, in_param, &out_param, size, 0,
+			    MLX4_CMD_ACCESS_MEM,
+			    MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
+}
+
+int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave,
+		     struct mlx4_vhcr *vhcr,
+		     struct mlx4_cmd_mailbox *inbox,
+		     struct mlx4_cmd_mailbox *outbox,
+		     struct mlx4_cmd_info *cmd)
+{
+	u64 in_param;
+	u64 out_param;
+	int err;
+
+	in_param = cmd->has_inbox ? (u64) inbox->dma : vhcr->in_param;
+	out_param = cmd->has_outbox ? (u64) outbox->dma : vhcr->out_param;
+	if (cmd->encode_slave_id) {
+		in_param &= 0xffffffffffffff00ll;
+		in_param |= slave;
+	}
+
+	err = __mlx4_cmd(dev, in_param, &out_param, cmd->out_is_imm,
+			 vhcr->in_modifier, vhcr->op_modifier, vhcr->op,
+			 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
+
+	if (cmd->out_is_imm)
+		vhcr->out_param = out_param;
+
+	return err;
+}
+
+static struct mlx4_cmd_info cmd_info[] = {
+	{
+		.opcode = MLX4_CMD_QUERY_FW,
+		.has_inbox = false,
+		.has_outbox = true,
+		.out_is_imm = false,
+		.encode_slave_id = false,
+		.verify = NULL,
+		.wrapper = NULL
+	},
+	{
+		.opcode = MLX4_CMD_QUERY_HCA,
+		.has_inbox = false,
+		.has_outbox = true,
+		.out_is_imm = false,
+		.encode_slave_id = false,
+		.verify = NULL,
+		.wrapper = NULL
+	},
+	{
+		.opcode = MLX4_CMD_QUERY_DEV_CAP,
+		.has_inbox = false,
+		.has_outbox = true,
+		.out_is_imm = false,
+		.encode_slave_id = false,
+		.verify = NULL,
+		.wrapper = NULL
+	},
+	{
+		.opcode = MLX4_CMD_QUERY_FUNC_CAP,
+		.has_inbox = false,
+		.has_outbox = true,
+		.out_is_imm = false,
+		.encode_slave_id = false,
+		.verify = NULL,
+		.wrapper = mlx4_QUERY_FUNC_CAP_wrapper
+	},
+	{
+		.opcode = MLX4_CMD_QUERY_ADAPTER,
+		.has_inbox = false,
+		.has_outbox = true,
+		.out_is_imm = false,
+		.encode_slave_id = false,
+		.verify = NULL,
+		.wrapper = NULL
+	},
+	{
+		.opcode = MLX4_CMD_INIT_PORT,
+		.has_inbox = false,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.encode_slave_id = false,
+		.verify = NULL,
+		.wrapper = mlx4_INIT_PORT_wrapper
+	},
+	{
+		.opcode = MLX4_CMD_CLOSE_PORT,
+		.has_inbox = false,
+		.has_outbox = false,
+		.out_is_imm  = false,
+		.encode_slave_id = false,
+		.verify = NULL,
+		.wrapper = mlx4_CLOSE_PORT_wrapper
+	},
+	{
+		.opcode = MLX4_CMD_QUERY_PORT,
+		.has_inbox = false,
+		.has_outbox = true,
+		.out_is_imm = false,
+		.encode_slave_id = false,
+		.verify = NULL,
+		.wrapper = mlx4_QUERY_PORT_wrapper
+	},
+	{
+		.opcode = MLX4_CMD_SET_PORT,
+		.has_inbox = true,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.encode_slave_id = false,
+		.verify = NULL,
+		.wrapper = mlx4_SET_PORT_wrapper
+	},
+	{
+		.opcode = MLX4_CMD_MAP_EQ,
+		.has_inbox = false,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.encode_slave_id = false,
+		.verify = NULL,
+		.wrapper = mlx4_MAP_EQ_wrapper
+	},
+	{
+		.opcode = MLX4_CMD_SW2HW_EQ,
+		.has_inbox = true,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.encode_slave_id = true,
+		.verify = NULL,
+		.wrapper = mlx4_SW2HW_EQ_wrapper
+	},
+	{
+		.opcode = MLX4_CMD_HW_HEALTH_CHECK,
+		.has_inbox = false,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.encode_slave_id = false,
+		.verify = NULL,
+		.wrapper = NULL
+	},
+	{
+		.opcode = MLX4_CMD_NOP,
+		.has_inbox = false,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.encode_slave_id = false,
+		.verify = NULL,
+		.wrapper = NULL
+	},
+	{
+		.opcode = MLX4_CMD_ALLOC_RES,
+		.has_inbox = false,
+		.has_outbox = false,
+		.out_is_imm = true,
+		.encode_slave_id = false,
+		.verify = NULL,
+		.wrapper = mlx4_ALLOC_RES_wrapper
+	},
+	{
+		.opcode = MLX4_CMD_FREE_RES,
+		.has_inbox = false,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.encode_slave_id = false,
+		.verify = NULL,
+		.wrapper = mlx4_FREE_RES_wrapper
+	},
+	{
+		.opcode = MLX4_CMD_SW2HW_MPT,
+		.has_inbox = true,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.encode_slave_id = true,
+		.verify = NULL,
+		.wrapper = mlx4_SW2HW_MPT_wrapper
+	},
+	{
+		.opcode = MLX4_CMD_QUERY_MPT,
+		.has_inbox = false,
+		.has_outbox = true,
+		.out_is_imm = false,
+		.encode_slave_id = false,
+		.verify = NULL,
+		.wrapper = mlx4_QUERY_MPT_wrapper
+	},
+	{
+		.opcode = MLX4_CMD_HW2SW_MPT,
+		.has_inbox = false,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.encode_slave_id = false,
+		.verify = NULL,
+		.wrapper = mlx4_HW2SW_MPT_wrapper
+	},
+	{
+		.opcode = MLX4_CMD_READ_MTT,
+		.has_inbox = false,
+		.has_outbox = true,
+		.out_is_imm = false,
+		.encode_slave_id = false,
+		.verify = NULL,
+		.wrapper = NULL
+	},
+	{
+		.opcode = MLX4_CMD_WRITE_MTT,
+		.has_inbox = true,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.encode_slave_id = false,
+		.verify = NULL,
+		.wrapper = mlx4_WRITE_MTT_wrapper
+	},
+	{
+		.opcode = MLX4_CMD_SYNC_TPT,
+		.has_inbox = true,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.encode_slave_id = false,
+		.verify = NULL,
+		.wrapper = NULL
+	},
+	{
+		.opcode = MLX4_CMD_HW2SW_EQ,
+		.has_inbox = false,
+		.has_outbox = true,
+		.out_is_imm = false,
+		.encode_slave_id = true,
+		.verify = NULL,
+		.wrapper = mlx4_HW2SW_EQ_wrapper
+	},
+	{
+		.opcode = MLX4_CMD_QUERY_EQ,
+		.has_inbox = false,
+		.has_outbox = true,
+		.out_is_imm = false,
+		.encode_slave_id = true,
+		.verify = NULL,
+		.wrapper = mlx4_QUERY_EQ_wrapper
+	},
+	{
+		.opcode = MLX4_CMD_SW2HW_CQ,
+		.has_inbox = true,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.encode_slave_id = true,
+		.verify = NULL,
+		.wrapper = mlx4_SW2HW_CQ_wrapper
+	},
+	{
+		.opcode = MLX4_CMD_HW2SW_CQ,
+		.has_inbox = false,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.encode_slave_id = false,
+		.verify = NULL,
+		.wrapper = mlx4_HW2SW_CQ_wrapper
+	},
+	{
+		.opcode = MLX4_CMD_QUERY_CQ,
+		.has_inbox = false,
+		.has_outbox = true,
+		.out_is_imm = false,
+		.encode_slave_id = false,
+		.verify = NULL,
+		.wrapper = mlx4_QUERY_CQ_wrapper
+	},
+	{
+		.opcode = MLX4_CMD_MODIFY_CQ,
+		.has_inbox = true,
+		.has_outbox = false,
+		.out_is_imm = true,
+		.encode_slave_id = false,
+		.verify = NULL,
+		.wrapper = mlx4_MODIFY_CQ_wrapper
+	},
+	{
+		.opcode = MLX4_CMD_SW2HW_SRQ,
+		.has_inbox = true,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.encode_slave_id = true,
+		.verify = NULL,
+		.wrapper = mlx4_SW2HW_SRQ_wrapper
+	},
+	{
+		.opcode = MLX4_CMD_HW2SW_SRQ,
+		.has_inbox = false,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.encode_slave_id = false,
+		.verify = NULL,
+		.wrapper = mlx4_HW2SW_SRQ_wrapper
+	},
+	{
+		.opcode = MLX4_CMD_QUERY_SRQ,
+		.has_inbox = false,
+		.has_outbox = true,
+		.out_is_imm = false,
+		.encode_slave_id = false,
+		.verify = NULL,
+		.wrapper = mlx4_QUERY_SRQ_wrapper
+	},
+	{
+		.opcode = MLX4_CMD_ARM_SRQ,
+		.has_inbox = false,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.encode_slave_id = false,
+		.verify = NULL,
+		.wrapper = mlx4_ARM_SRQ_wrapper
+	},
+	{
+		.opcode = MLX4_CMD_RST2INIT_QP,
+		.has_inbox = true,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.encode_slave_id = true,
+		.verify = NULL,
+		.wrapper = mlx4_RST2INIT_QP_wrapper
+	},
+	{
+		.opcode = MLX4_CMD_INIT2INIT_QP,
+		.has_inbox = true,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.encode_slave_id = false,
+		.verify = NULL,
+		.wrapper = mlx4_GEN_QP_wrapper
+	},
+	{
+		.opcode = MLX4_CMD_INIT2RTR_QP,
+		.has_inbox = true,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.encode_slave_id = false,
+		.verify = NULL,
+		.wrapper = mlx4_INIT2RTR_QP_wrapper
+	},
+	{
+		.opcode = MLX4_CMD_RTR2RTS_QP,
+		.has_inbox = true,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.encode_slave_id = false,
+		.verify = NULL,
+		.wrapper = mlx4_GEN_QP_wrapper
+	},
+	{
+		.opcode = MLX4_CMD_RTS2RTS_QP,
+		.has_inbox = true,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.encode_slave_id = false,
+		.verify = NULL,
+		.wrapper = mlx4_GEN_QP_wrapper
+	},
+	{
+		.opcode = MLX4_CMD_SQERR2RTS_QP,
+		.has_inbox = true,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.encode_slave_id = false,
+		.verify = NULL,
+		.wrapper = mlx4_GEN_QP_wrapper
+	},
+	{
+		.opcode = MLX4_CMD_2ERR_QP,
+		.has_inbox = false,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.encode_slave_id = false,
+		.verify = NULL,
+		.wrapper = mlx4_GEN_QP_wrapper
+	},
+	{
+		.opcode = MLX4_CMD_RTS2SQD_QP,
+		.has_inbox = false,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.encode_slave_id = false,
+		.verify = NULL,
+		.wrapper = mlx4_GEN_QP_wrapper
+	},
+	{
+		.opcode = MLX4_CMD_SQD2SQD_QP,
+		.has_inbox = true,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.encode_slave_id = false,
+		.verify = NULL,
+		.wrapper = mlx4_GEN_QP_wrapper
+	},
+	{
+		.opcode = MLX4_CMD_SQD2RTS_QP,
+		.has_inbox = true,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.encode_slave_id = false,
+		.verify = NULL,
+		.wrapper = mlx4_GEN_QP_wrapper
+	},
+	{
+		.opcode = MLX4_CMD_2RST_QP,
+		.has_inbox = false,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.encode_slave_id = false,
+		.verify = NULL,
+		.wrapper = mlx4_2RST_QP_wrapper
+	},
+	{
+		.opcode = MLX4_CMD_QUERY_QP,
+		.has_inbox = false,
+		.has_outbox = true,
+		.out_is_imm = false,
+		.encode_slave_id = false,
+		.verify = NULL,
+		.wrapper = mlx4_GEN_QP_wrapper
+	},
+	{
+		.opcode = MLX4_CMD_SUSPEND_QP,
+		.has_inbox = false,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.encode_slave_id = false,
+		.verify = NULL,
+		.wrapper = mlx4_GEN_QP_wrapper
+	},
+	{
+		.opcode = MLX4_CMD_UNSUSPEND_QP,
+		.has_inbox = false,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.encode_slave_id = false,
+		.verify = NULL,
+		.wrapper = mlx4_GEN_QP_wrapper
+	},
+	{
+		.opcode = MLX4_CMD_QUERY_IF_STAT,
+		.has_inbox = false,
+		.has_outbox = true,
+		.out_is_imm = false,
+		.encode_slave_id = false,
+		.verify = NULL,
+		.wrapper = mlx4_QUERY_IF_STAT_wrapper
+	},
+	/* Native multicast commands are not available for guests */
+	{
+		.opcode = MLX4_CMD_QP_ATTACH,
+		.has_inbox = true,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.encode_slave_id = false,
+		.verify = NULL,
+		.wrapper = mlx4_QP_ATTACH_wrapper
+	},
+	{
+		.opcode = MLX4_CMD_PROMISC,
+		.has_inbox = false,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.encode_slave_id = false,
+		.verify = NULL,
+		.wrapper = mlx4_PROMISC_wrapper
+	},
+	/* Ethernet specific commands */
+	{
+		.opcode = MLX4_CMD_SET_VLAN_FLTR,
+		.has_inbox = true,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.encode_slave_id = false,
+		.verify = NULL,
+		.wrapper = mlx4_SET_VLAN_FLTR_wrapper
+	},
+	{
+		.opcode = MLX4_CMD_SET_MCAST_FLTR,
+		.has_inbox = false,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.encode_slave_id = false,
+		.verify = NULL,
+		.wrapper = mlx4_SET_MCAST_FLTR_wrapper
+	},
+	{
+		.opcode = MLX4_CMD_DUMP_ETH_STATS,
+		.has_inbox = false,
+		.has_outbox = true,
+		.out_is_imm = false,
+		.encode_slave_id = false,
+		.verify = NULL,
+		.wrapper = mlx4_DUMP_ETH_STATS_wrapper
+	},
+	{
+		.opcode = MLX4_CMD_INFORM_FLR_DONE,
+		.has_inbox = false,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.encode_slave_id = false,
+		.verify = NULL,
+		.wrapper = NULL
+	},
+};
+
+static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
+				    struct mlx4_vhcr_cmd *in_vhcr)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct mlx4_cmd_info *cmd = NULL;
+	struct mlx4_vhcr_cmd *vhcr_cmd = in_vhcr ? in_vhcr : priv->mfunc.vhcr;
+	struct mlx4_vhcr *vhcr;
+	struct mlx4_cmd_mailbox *inbox = NULL;
+	struct mlx4_cmd_mailbox *outbox = NULL;
+	u64 in_param;
+	u64 out_param;
+	int ret = 0;
+	int i;
+	int err = 0;
+
+	/* Create sw representation of Virtual HCR */
+	vhcr = kzalloc(sizeof(struct mlx4_vhcr), GFP_KERNEL);
+	if (!vhcr)
+		return -ENOMEM;
+
+	/* DMA in the vHCR */
+	if (!in_vhcr) {
+		ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
+				      priv->mfunc.master.slave_state[slave].vhcr_dma,
+				      ALIGN(sizeof(struct mlx4_vhcr_cmd),
+					    MLX4_ACCESS_MEM_ALIGN), 1);
+		if (ret) {
+			mlx4_err(dev, "%s:Failed reading vhcr"
+				 "ret: 0x%x\n", __func__, ret);
+			kfree(vhcr);
+			return ret;
+		}
+	}
+
+	/* Fill SW VHCR fields */
+	vhcr->in_param = be64_to_cpu(vhcr_cmd->in_param);
+	vhcr->out_param = be64_to_cpu(vhcr_cmd->out_param);
+	vhcr->in_modifier = be32_to_cpu(vhcr_cmd->in_modifier);
+	vhcr->token = be16_to_cpu(vhcr_cmd->token);
+	vhcr->op = be16_to_cpu(vhcr_cmd->opcode) & 0xfff;
+	vhcr->op_modifier = (u8) (be16_to_cpu(vhcr_cmd->opcode) >> 12);
+	vhcr->e_bit = vhcr_cmd->flags & (1 << 6);
+
+	/* Lookup command */
+	for (i = 0; i < ARRAY_SIZE(cmd_info); ++i) {
+		if (vhcr->op == cmd_info[i].opcode) {
+			cmd = &cmd_info[i];
+			break;
+		}
+	}
+	if (!cmd) {
+		mlx4_err(dev, "Unknown command:0x%x accepted from slave:%d\n",
+			 vhcr->op, slave);
+		vhcr_cmd->status = CMD_STAT_BAD_PARAM;
+		goto out_status;
+	}
+
+	/* Read inbox */
+	if (cmd->has_inbox) {
+		vhcr->in_param &= INBOX_MASK;
+		inbox = mlx4_alloc_cmd_mailbox(dev);
+		if (IS_ERR(inbox)) {
+			vhcr_cmd->status = CMD_STAT_BAD_SIZE;
+			inbox = NULL;
+			goto out_status;
+		}
+
+		if (mlx4_ACCESS_MEM(dev, inbox->dma, slave,
+				    vhcr->in_param,
+				    MLX4_MAILBOX_SIZE, 1)) {
+			mlx4_err(dev, "%s: Failed reading inbox (cmd:0x%x)\n",
+				 __func__, cmd->opcode);
+			vhcr_cmd->status = CMD_STAT_INTERNAL_ERR;
+			goto out_status;
+		}
+	}
+
+	/* Apply permission and bound checks if applicable */
+	if (cmd->verify && cmd->verify(dev, slave, vhcr, inbox)) {
+		mlx4_warn(dev, "Command:0x%x from slave: %d failed protection "
+			  "checks for resource_id:%d\n", vhcr->op, slave,
+			  vhcr->in_modifier);
+		vhcr_cmd->status = CMD_STAT_BAD_OP;
+		goto out_status;
+	}
+
+	/* Allocate outbox */
+	if (cmd->has_outbox) {
+		outbox = mlx4_alloc_cmd_mailbox(dev);
+		if (IS_ERR(outbox)) {
+			vhcr_cmd->status = CMD_STAT_BAD_SIZE;
+			outbox = NULL;
+			goto out_status;
+		}
+	}
+
+	/* Execute the command! */
+	if (cmd->wrapper) {
+		err = cmd->wrapper(dev, slave, vhcr, inbox, outbox,
+				   cmd);
+		if (cmd->out_is_imm)
+			vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
+	} else {
+		in_param = cmd->has_inbox ? (u64) inbox->dma :
+			vhcr->in_param;
+		out_param = cmd->has_outbox ? (u64) outbox->dma :
+			vhcr->out_param;
+		err = __mlx4_cmd(dev, in_param, &out_param,
+				 cmd->out_is_imm, vhcr->in_modifier,
+				 vhcr->op_modifier, vhcr->op,
+				 MLX4_CMD_TIME_CLASS_A,
+				 MLX4_CMD_NATIVE);
+
+		if (cmd->out_is_imm) {
+			vhcr->out_param = out_param;
+			vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
+		}
+	}
+
+	if (err) {
+		mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with"
+			  " error:%d, status %d\n",
+			  vhcr->op, slave, vhcr->errno, err);
+		vhcr_cmd->status = mlx4_errno_to_status(err);
+		goto out_status;
+	}
+
+
+	/* Write outbox if command completed successfully */
+	if (cmd->has_outbox && !vhcr_cmd->status) {
+		ret = mlx4_ACCESS_MEM(dev, outbox->dma, slave,
+				      vhcr->out_param,
+				      MLX4_MAILBOX_SIZE, MLX4_CMD_WRAPPED);
+		if (ret) {
+			/* If we failed to write back the outbox after the
+			 *command was successfully executed, we must fail this
+			 * slave, as it is now in undefined state */
+			mlx4_err(dev, "%s:Failed writing outbox\n", __func__);
+			goto out;
+		}
+	}
+
+out_status:
+	/* DMA back vhcr result */
+	if (!in_vhcr) {
+		ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
+				      priv->mfunc.master.slave_state[slave].vhcr_dma,
+				      ALIGN(sizeof(struct mlx4_vhcr),
+					    MLX4_ACCESS_MEM_ALIGN),
+				      MLX4_CMD_WRAPPED);
+		if (ret)
+			mlx4_err(dev, "%s:Failed writing vhcr result\n",
+				 __func__);
+		else if (vhcr->e_bit &&
+			 mlx4_GEN_EQE(dev, slave, &priv->mfunc.master.cmd_eqe))
+				mlx4_warn(dev, "Failed to generate command completion "
+					  "eqe for slave %d\n", slave);
+	}
+
+out:
+	kfree(vhcr);
+	mlx4_free_cmd_mailbox(dev, inbox);
+	mlx4_free_cmd_mailbox(dev, outbox);
+	return ret;
+}
+
+static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
+			       u16 param, u8 toggle)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
+	u32 reply;
+	u32 slave_status = 0;
+	u8 is_going_down = 0;
+
+	slave_state[slave].comm_toggle ^= 1;
+	reply = (u32) slave_state[slave].comm_toggle << 31;
+	if (toggle != slave_state[slave].comm_toggle) {
+		mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER"
+			  "STATE COMPROMISIED ***\n", toggle, slave);
+		goto reset_slave;
+	}
+	if (cmd == MLX4_COMM_CMD_RESET) {
+		mlx4_warn(dev, "Received reset from slave:%d\n", slave);
+		slave_state[slave].active = false;
+		/*check if we are in the middle of FLR process,
+		if so return "retry" status to the slave*/
+		if (MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) {
+			slave_status = MLX4_DELAY_RESET_SLAVE;
+			goto inform_slave_state;
+		}
+
+		/* write the version in the event field */
+		reply |= mlx4_comm_get_version();
+
+		goto reset_slave;
+	}
+	/*command from slave in the middle of FLR*/
+	if (cmd != MLX4_COMM_CMD_RESET &&
+	    MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) {
+		mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) "
+			  "in the middle of FLR\n", slave, cmd);
+		return;
+	}
+
+	switch (cmd) {
+	case MLX4_COMM_CMD_VHCR0:
+		if (slave_state[slave].last_cmd != MLX4_COMM_CMD_RESET)
+			goto reset_slave;
+		slave_state[slave].vhcr_dma = ((u64) param) << 48;
+		priv->mfunc.master.slave_state[slave].cookie = 0;
+		mutex_init(&priv->mfunc.master.gen_eqe_mutex[slave]);
+		break;
+	case MLX4_COMM_CMD_VHCR1:
+		if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR0)
+			goto reset_slave;
+		slave_state[slave].vhcr_dma |= ((u64) param) << 32;
+		break;
+	case MLX4_COMM_CMD_VHCR2:
+		if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR1)
+			goto reset_slave;
+		slave_state[slave].vhcr_dma |= ((u64) param) << 16;
+		break;
+	case MLX4_COMM_CMD_VHCR_EN:
+		if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR2)
+			goto reset_slave;
+		slave_state[slave].vhcr_dma |= param;
+		slave_state[slave].active = true;
+		break;
+	case MLX4_COMM_CMD_VHCR_POST:
+		if ((slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_EN) &&
+		    (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_POST))
+			goto reset_slave;
+		down(&priv->cmd.slave_sem);
+		if (mlx4_master_process_vhcr(dev, slave, NULL)) {
+			mlx4_err(dev, "Failed processing vhcr for slave:%d,"
+				 " reseting slave.\n", slave);
+			up(&priv->cmd.slave_sem);
+			goto reset_slave;
+		}
+		up(&priv->cmd.slave_sem);
+		break;
+	default:
+		mlx4_warn(dev, "Bad comm cmd:%d from slave:%d\n", cmd, slave);
+		goto reset_slave;
+	}
+	spin_lock(&priv->mfunc.master.slave_state_lock);
+	if (!slave_state[slave].is_slave_going_down)
+		slave_state[slave].last_cmd = cmd;
+	else
+		is_going_down = 1;
+	spin_unlock(&priv->mfunc.master.slave_state_lock);
+	if (is_going_down) {
+		mlx4_warn(dev, "Slave is going down aborting command(%d)"
+			  " executing from slave:%d\n",
+			  cmd, slave);
+		return;
+	}
+	__raw_writel((__force u32) cpu_to_be32(reply),
+		     &priv->mfunc.comm[slave].slave_read);
+	mmiowb();
+
+	return;
+
+reset_slave:
+	/* cleanup any slave resources */
+	mlx4_delete_all_resources_for_slave(dev, slave);
+	spin_lock(&priv->mfunc.master.slave_state_lock);
+	if (!slave_state[slave].is_slave_going_down)
+		slave_state[slave].last_cmd = MLX4_COMM_CMD_RESET;
+	spin_unlock(&priv->mfunc.master.slave_state_lock);
+	/*with slave in the middle of flr, no need to clean resources again.*/
+inform_slave_state:
+	memset(&slave_state[slave].event_eq, 0,
+	       sizeof(struct mlx4_slave_event_eq_info));
+	__raw_writel((__force u32) cpu_to_be32(reply),
+		     &priv->mfunc.comm[slave].slave_read);
+	wmb();
+}
+
+/* master command processing */
+void mlx4_master_comm_channel(struct work_struct *work)
+{
+	struct mlx4_mfunc_master_ctx *master =
+		container_of(work,
+			     struct mlx4_mfunc_master_ctx,
+			     comm_work);
+	struct mlx4_mfunc *mfunc =
+		container_of(master, struct mlx4_mfunc, master);
+	struct mlx4_priv *priv =
+		container_of(mfunc, struct mlx4_priv, mfunc);
+	struct mlx4_dev *dev = &priv->dev;
+	__be32 *bit_vec;
+	u32 comm_cmd;
+	u32 vec;
+	int i, j, slave;
+	int toggle;
+	int served = 0;
+	int reported = 0;
+	u32 slt;
+
+	bit_vec = master->comm_arm_bit_vector;
+	for (i = 0; i < COMM_CHANNEL_BIT_ARRAY_SIZE; i++) {
+		vec = be32_to_cpu(bit_vec[i]);
+		for (j = 0; j < 32; j++) {
+			if (!(vec & (1 << j)))
+				continue;
+			++reported;
+			slave = (i * 32) + j;
+			comm_cmd = swab32(readl(
+					  &mfunc->comm[slave].slave_write));
+			slt = swab32(readl(&mfunc->comm[slave].slave_read))
+				     >> 31;
+			toggle = comm_cmd >> 31;
+			if (toggle != slt) {
+				if (master->slave_state[slave].comm_toggle
+				    != slt) {
+					printk(KERN_INFO "slave %d out of sync."
+					       " read toggle %d, state toggle %d. "
+					       "Resynching.\n", slave, slt,
+					       master->slave_state[slave].comm_toggle);
+					master->slave_state[slave].comm_toggle =
+						slt;
+				}
+				mlx4_master_do_cmd(dev, slave,
+						   comm_cmd >> 16 & 0xff,
+						   comm_cmd & 0xffff, toggle);
+				++served;
+			}
+		}
+	}
+
+	if (reported && reported != served)
+		mlx4_warn(dev, "Got command event with bitmask from %d slaves"
+			  " but %d were served\n",
+			  reported, served);
+
+	if (mlx4_ARM_COMM_CHANNEL(dev))
+		mlx4_warn(dev, "Failed to arm comm channel events\n");
+}
+
+static int sync_toggles(struct mlx4_dev *dev)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	int wr_toggle;
+	int rd_toggle;
+	unsigned long end;
+
+	wr_toggle = swab32(readl(&priv->mfunc.comm->slave_write)) >> 31;
+	end = jiffies + msecs_to_jiffies(5000);
+
+	while (time_before(jiffies, end)) {
+		rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read)) >> 31;
+		if (rd_toggle == wr_toggle) {
+			priv->cmd.comm_toggle = rd_toggle;
+			return 0;
+		}
+
+		cond_resched();
+	}
+
+	/*
+	 * we could reach here if for example the previous VM using this
+	 * function misbehaved and left the channel with unsynced state. We
+	 * should fix this here and give this VM a chance to use a properly
+	 * synced channel
+	 */
+	mlx4_warn(dev, "recovering from previously mis-behaved VM\n");
+	__raw_writel((__force u32) 0, &priv->mfunc.comm->slave_read);
+	__raw_writel((__force u32) 0, &priv->mfunc.comm->slave_write);
+	priv->cmd.comm_toggle = 0;
+
+	return 0;
+}
+
+int mlx4_multi_func_init(struct mlx4_dev *dev)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct mlx4_slave_state *s_state;
+	int i, err, port;
+
+	priv->mfunc.vhcr = dma_alloc_coherent(&(dev->pdev->dev), PAGE_SIZE,
+					    &priv->mfunc.vhcr_dma,
+					    GFP_KERNEL);
+	if (!priv->mfunc.vhcr) {
+		mlx4_err(dev, "Couldn't allocate vhcr.\n");
+		return -ENOMEM;
+	}
+
+	if (mlx4_is_master(dev))
+		priv->mfunc.comm =
+		ioremap(pci_resource_start(dev->pdev, priv->fw.comm_bar) +
+			priv->fw.comm_base, MLX4_COMM_PAGESIZE);
+	else
+		priv->mfunc.comm =
+		ioremap(pci_resource_start(dev->pdev, 2) +
+			MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE);
+	if (!priv->mfunc.comm) {
+		mlx4_err(dev, "Couldn't map communication vector.\n");
+		goto err_vhcr;
+	}
+
+	if (mlx4_is_master(dev)) {
+		priv->mfunc.master.slave_state =
+			kzalloc(dev->num_slaves *
+				sizeof(struct mlx4_slave_state), GFP_KERNEL);
+		if (!priv->mfunc.master.slave_state)
+			goto err_comm;
+
+		for (i = 0; i < dev->num_slaves; ++i) {
+			s_state = &priv->mfunc.master.slave_state[i];
+			s_state->last_cmd = MLX4_COMM_CMD_RESET;
+			__raw_writel((__force u32) 0,
+				     &priv->mfunc.comm[i].slave_write);
+			__raw_writel((__force u32) 0,
+				     &priv->mfunc.comm[i].slave_read);
+			mmiowb();
+			for (port = 1; port <= MLX4_MAX_PORTS; port++) {
+				s_state->vlan_filter[port] =
+					kzalloc(sizeof(struct mlx4_vlan_fltr),
+						GFP_KERNEL);
+				if (!s_state->vlan_filter[port]) {
+					if (--port)
+						kfree(s_state->vlan_filter[port]);
+					goto err_slaves;
+				}
+				INIT_LIST_HEAD(&s_state->mcast_filters[port]);
+			}
+			spin_lock_init(&s_state->lock);
+		}
+
+		memset(&priv->mfunc.master.cmd_eqe, 0, sizeof(struct mlx4_eqe));
+		priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD;
+		INIT_WORK(&priv->mfunc.master.comm_work,
+			  mlx4_master_comm_channel);
+		INIT_WORK(&priv->mfunc.master.slave_event_work,
+			  mlx4_gen_slave_eqe);
+		INIT_WORK(&priv->mfunc.master.slave_flr_event_work,
+			  mlx4_master_handle_slave_flr);
+		spin_lock_init(&priv->mfunc.master.slave_state_lock);
+		priv->mfunc.master.comm_wq =
+			create_singlethread_workqueue("mlx4_comm");
+		if (!priv->mfunc.master.comm_wq)
+			goto err_slaves;
+
+		if (mlx4_init_resource_tracker(dev))
+			goto err_thread;
+
+		sema_init(&priv->cmd.slave_sem, 1);
+		err = mlx4_ARM_COMM_CHANNEL(dev);
+		if (err) {
+			mlx4_err(dev, " Failed to arm comm channel eq: %x\n",
+				 err);
+			goto err_resource;
+		}
+
+	} else {
+		err = sync_toggles(dev);
+		if (err) {
+			mlx4_err(dev, "Couldn't sync toggles\n");
+			goto err_comm;
+		}
+
+		sema_init(&priv->cmd.slave_sem, 1);
+	}
+	return 0;
+
+err_resource:
+	mlx4_free_resource_tracker(dev);
+err_thread:
+	flush_workqueue(priv->mfunc.master.comm_wq);
+	destroy_workqueue(priv->mfunc.master.comm_wq);
+err_slaves:
+	while (--i) {
+		for (port = 1; port <= MLX4_MAX_PORTS; port++)
+			kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
+	}
+	kfree(priv->mfunc.master.slave_state);
+err_comm:
+	iounmap(priv->mfunc.comm);
+err_vhcr:
+	dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
+					     priv->mfunc.vhcr,
+					     priv->mfunc.vhcr_dma);
+	priv->mfunc.vhcr = NULL;
+	return -ENOMEM;
+}
+
 int mlx4_cmd_init(struct mlx4_dev *dev)
 {
 	struct mlx4_priv *priv = mlx4_priv(dev);
@@ -331,22 +1570,51 @@
 	priv->cmd.use_events = 0;
 	priv->cmd.toggle     = 1;
 
-	priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_HCR_BASE,
-				MLX4_HCR_SIZE);
-	if (!priv->cmd.hcr) {
-		mlx4_err(dev, "Couldn't map command register.");
-		return -ENOMEM;
+	priv->cmd.hcr = NULL;
+	priv->mfunc.vhcr = NULL;
+
+	if (!mlx4_is_slave(dev)) {
+		priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) +
+					MLX4_HCR_BASE, MLX4_HCR_SIZE);
+		if (!priv->cmd.hcr) {
+			mlx4_err(dev, "Couldn't map command register.\n");
+			return -ENOMEM;
+		}
 	}
 
 	priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev,
 					 MLX4_MAILBOX_SIZE,
 					 MLX4_MAILBOX_SIZE, 0);
-	if (!priv->cmd.pool) {
-		iounmap(priv->cmd.hcr);
-		return -ENOMEM;
-	}
+	if (!priv->cmd.pool)
+		goto err_hcr;
 
 	return 0;
+
+err_hcr:
+	if (!mlx4_is_slave(dev))
+		iounmap(priv->cmd.hcr);
+	return -ENOMEM;
+}
+
+void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	int i, port;
+
+	if (mlx4_is_master(dev)) {
+		flush_workqueue(priv->mfunc.master.comm_wq);
+		destroy_workqueue(priv->mfunc.master.comm_wq);
+		for (i = 0; i < dev->num_slaves; i++) {
+			for (port = 1; port <= MLX4_MAX_PORTS; port++)
+				kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
+		}
+		kfree(priv->mfunc.master.slave_state);
+		iounmap(priv->mfunc.comm);
+		dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
+						     priv->mfunc.vhcr,
+						     priv->mfunc.vhcr_dma);
+		priv->mfunc.vhcr = NULL;
+	}
 }
 
 void mlx4_cmd_cleanup(struct mlx4_dev *dev)
@@ -354,7 +1622,9 @@
 	struct mlx4_priv *priv = mlx4_priv(dev);
 
 	pci_pool_destroy(priv->cmd.pool);
-	iounmap(priv->cmd.hcr);
+
+	if (!mlx4_is_slave(dev))
+		iounmap(priv->cmd.hcr);
 }
 
 /*
@@ -365,6 +1635,7 @@
 {
 	struct mlx4_priv *priv = mlx4_priv(dev);
 	int i;
+	int err = 0;
 
 	priv->cmd.context = kmalloc(priv->cmd.max_cmds *
 				   sizeof (struct mlx4_cmd_context),
@@ -389,11 +1660,10 @@
 		; /* nothing */
 	--priv->cmd.token_mask;
 
+	down(&priv->cmd.poll_sem);
 	priv->cmd.use_events = 1;
 
-	down(&priv->cmd.poll_sem);
-
-	return 0;
+	return err;
 }
 
 /*
@@ -433,7 +1703,8 @@
 }
 EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox);
 
-void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox)
+void mlx4_free_cmd_mailbox(struct mlx4_dev *dev,
+			   struct mlx4_cmd_mailbox *mailbox)
 {
 	if (!mailbox)
 		return;
@@ -442,3 +1713,8 @@
 	kfree(mailbox);
 }
 EXPORT_SYMBOL_GPL(mlx4_free_cmd_mailbox);
+
+u32 mlx4_comm_get_version(void)
+{
+	 return ((u32) CMD_CHAN_IF_REV << 8) | (u32) CMD_CHAN_VER;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index 499a516..475f9d6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -34,9 +34,9 @@
  * SOFTWARE.
  */
 
+#include <linux/init.h>
 #include <linux/hardirq.h>
 #include <linux/export.h>
-#include <linux/gfp.h>
 
 #include <linux/mlx4/cmd.h>
 #include <linux/mlx4/cq.h>
@@ -44,27 +44,6 @@
 #include "mlx4.h"
 #include "icm.h"
 
-struct mlx4_cq_context {
-	__be32			flags;
-	u16			reserved1[3];
-	__be16			page_offset;
-	__be32			logsize_usrpage;
-	__be16			cq_period;
-	__be16			cq_max_count;
-	u8			reserved2[3];
-	u8			comp_eqn;
-	u8			log_page_size;
-	u8			reserved3[2];
-	u8			mtt_base_addr_h;
-	__be32			mtt_base_addr_l;
-	__be32			last_notified_index;
-	__be32			solicit_producer_index;
-	__be32			consumer_index;
-	__be32			producer_index;
-	u32			reserved4[2];
-	__be64			db_rec_addr;
-};
-
 #define MLX4_CQ_STATUS_OK		( 0 << 28)
 #define MLX4_CQ_STATUS_OVERFLOW		( 9 << 28)
 #define MLX4_CQ_STATUS_WRITE_FAIL	(10 << 28)
@@ -81,7 +60,7 @@
 	cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree,
 			       cqn & (dev->caps.num_cqs - 1));
 	if (!cq) {
-		mlx4_warn(dev, "Completion event for bogus CQ %08x\n", cqn);
+		mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn);
 		return;
 	}
 
@@ -117,23 +96,24 @@
 static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
 			 int cq_num)
 {
-	return mlx4_cmd(dev, mailbox->dma, cq_num, 0, MLX4_CMD_SW2HW_CQ,
-			MLX4_CMD_TIME_CLASS_A);
+	return mlx4_cmd(dev, mailbox->dma | dev->caps.function, cq_num, 0,
+			MLX4_CMD_SW2HW_CQ, MLX4_CMD_TIME_CLASS_A,
+			MLX4_CMD_WRAPPED);
 }
 
 static int mlx4_MODIFY_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
 			 int cq_num, u32 opmod)
 {
 	return mlx4_cmd(dev, mailbox->dma, cq_num, opmod, MLX4_CMD_MODIFY_CQ,
-			MLX4_CMD_TIME_CLASS_A);
+			MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
 }
 
 static int mlx4_HW2SW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
 			 int cq_num)
 {
-	return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, cq_num,
-			    mailbox ? 0 : 1, MLX4_CMD_HW2SW_CQ,
-			    MLX4_CMD_TIME_CLASS_A);
+	return mlx4_cmd_box(dev, dev->caps.function, mailbox ? mailbox->dma : 0,
+			    cq_num, mailbox ? 0 : 1, MLX4_CMD_HW2SW_CQ,
+			    MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
 }
 
 int mlx4_cq_modify(struct mlx4_dev *dev, struct mlx4_cq *cq,
@@ -188,6 +168,78 @@
 }
 EXPORT_SYMBOL_GPL(mlx4_cq_resize);
 
+int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct mlx4_cq_table *cq_table = &priv->cq_table;
+	int err;
+
+	*cqn = mlx4_bitmap_alloc(&cq_table->bitmap);
+	if (*cqn == -1)
+		return -ENOMEM;
+
+	err = mlx4_table_get(dev, &cq_table->table, *cqn);
+	if (err)
+		goto err_out;
+
+	err = mlx4_table_get(dev, &cq_table->cmpt_table, *cqn);
+	if (err)
+		goto err_put;
+	return 0;
+
+err_put:
+	mlx4_table_put(dev, &cq_table->table, *cqn);
+
+err_out:
+	mlx4_bitmap_free(&cq_table->bitmap, *cqn);
+	return err;
+}
+
+static int mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn)
+{
+	u64 out_param;
+	int err;
+
+	if (mlx4_is_mfunc(dev)) {
+		err = mlx4_cmd_imm(dev, 0, &out_param, RES_CQ,
+				   RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
+				   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+		if (err)
+			return err;
+		else {
+			*cqn = get_param_l(&out_param);
+			return 0;
+		}
+	}
+	return __mlx4_cq_alloc_icm(dev, cqn);
+}
+
+void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct mlx4_cq_table *cq_table = &priv->cq_table;
+
+	mlx4_table_put(dev, &cq_table->cmpt_table, cqn);
+	mlx4_table_put(dev, &cq_table->table, cqn);
+	mlx4_bitmap_free(&cq_table->bitmap, cqn);
+}
+
+static void mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
+{
+	u64 in_param;
+	int err;
+
+	if (mlx4_is_mfunc(dev)) {
+		set_param_l(&in_param, cqn);
+		err = mlx4_cmd(dev, in_param, RES_CQ, RES_OP_RESERVE_AND_MAP,
+			       MLX4_CMD_FREE_RES,
+			       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+		if (err)
+			mlx4_warn(dev, "Failed freeing cq:%d\n", cqn);
+	} else
+		__mlx4_cq_free_icm(dev, cqn);
+}
+
 int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
 		  struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq,
 		  unsigned vector, int collapsed)
@@ -204,23 +256,15 @@
 
 	cq->vector = vector;
 
-	cq->cqn = mlx4_bitmap_alloc(&cq_table->bitmap);
-	if (cq->cqn == -1)
-		return -ENOMEM;
-
-	err = mlx4_table_get(dev, &cq_table->table, cq->cqn);
+	err = mlx4_cq_alloc_icm(dev, &cq->cqn);
 	if (err)
-		goto err_out;
-
-	err = mlx4_table_get(dev, &cq_table->cmpt_table, cq->cqn);
-	if (err)
-		goto err_put;
+		return err;
 
 	spin_lock_irq(&cq_table->lock);
 	err = radix_tree_insert(&cq_table->tree, cq->cqn, cq);
 	spin_unlock_irq(&cq_table->lock);
 	if (err)
-		goto err_cmpt_put;
+		goto err_icm;
 
 	mailbox = mlx4_alloc_cmd_mailbox(dev);
 	if (IS_ERR(mailbox)) {
@@ -259,14 +303,8 @@
 	radix_tree_delete(&cq_table->tree, cq->cqn);
 	spin_unlock_irq(&cq_table->lock);
 
-err_cmpt_put:
-	mlx4_table_put(dev, &cq_table->cmpt_table, cq->cqn);
-
-err_put:
-	mlx4_table_put(dev, &cq_table->table, cq->cqn);
-
-err_out:
-	mlx4_bitmap_free(&cq_table->bitmap, cq->cqn);
+err_icm:
+	mlx4_cq_free_icm(dev, cq->cqn);
 
 	return err;
 }
@@ -292,8 +330,7 @@
 		complete(&cq->free);
 	wait_for_completion(&cq->free);
 
-	mlx4_table_put(dev, &cq_table->table, cq->cqn);
-	mlx4_bitmap_free(&cq_table->bitmap, cq->cqn);
+	mlx4_cq_free_icm(dev, cq->cqn);
 }
 EXPORT_SYMBOL_GPL(mlx4_cq_free);
 
@@ -304,6 +341,8 @@
 
 	spin_lock_init(&cq_table->lock);
 	INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
+	if (mlx4_is_slave(dev))
+		return 0;
 
 	err = mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs,
 			       dev->caps.num_cqs - 1, dev->caps.reserved_cqs, 0);
@@ -315,6 +354,8 @@
 
 void mlx4_cleanup_cq_table(struct mlx4_dev *dev)
 {
+	if (mlx4_is_slave(dev))
+		return;
 	/* Nothing to do to clean up radix_tree */
 	mlx4_bitmap_cleanup(&mlx4_priv(dev)->cq_table.bitmap);
 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 227997d..00b8127 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -51,10 +51,7 @@
 	int err;
 
 	cq->size = entries;
-	if (mode == RX)
-		cq->buf_size = cq->size * sizeof(struct mlx4_cqe);
-	else
-		cq->buf_size = sizeof(struct mlx4_cqe);
+	cq->buf_size = cq->size * sizeof(struct mlx4_cqe);
 
 	cq->ring = ring;
 	cq->is_tx = mode;
@@ -120,7 +117,7 @@
 		cq->size = priv->rx_ring[cq->ring].actual_size;
 
 	err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt, &mdev->priv_uar,
-			    cq->wqres.db.dma, &cq->mcq, cq->vector, cq->is_tx);
+			    cq->wqres.db.dma, &cq->mcq, cq->vector, 0);
 	if (err)
 		return err;
 
@@ -147,6 +144,7 @@
 	mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
 	if (priv->mdev->dev->caps.comp_pool && cq->vector)
 		mlx4_release_eq(priv->mdev->dev, cq->vector);
+	cq->vector = 0;
 	cq->buf_size = 0;
 	cq->buf = NULL;
 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 74e2a2a..7dbc6a2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -45,13 +45,16 @@
 	struct mlx4_en_priv *priv = netdev_priv(dev);
 	struct mlx4_en_dev *mdev = priv->mdev;
 
-	strncpy(drvinfo->driver, DRV_NAME, 32);
-	strncpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")", 32);
-	sprintf(drvinfo->fw_version, "%d.%d.%d",
+	strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+	strlcpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")",
+		sizeof(drvinfo->version));
+	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+		"%d.%d.%d",
 		(u16) (mdev->dev->caps.fw_ver >> 32),
 		(u16) ((mdev->dev->caps.fw_ver >> 16) & 0xffff),
 		(u16) (mdev->dev->caps.fw_ver & 0xffff));
-	strncpy(drvinfo->bus_info, pci_name(mdev->dev->pdev), 32);
+	strlcpy(drvinfo->bus_info, pci_name(mdev->dev->pdev),
+		sizeof(drvinfo->bus_info));
 	drvinfo->n_stats = 0;
 	drvinfo->regdump_len = 0;
 	drvinfo->eedump_len = 0;
@@ -103,8 +106,17 @@
 	struct mlx4_en_priv *priv = netdev_priv(netdev);
 	int err = 0;
 	u64 config = 0;
+	u64 mask;
 
-	if (!(priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_WOL)) {
+	if ((priv->port < 1) || (priv->port > 2)) {
+		en_err(priv, "Failed to get WoL information\n");
+		return;
+	}
+
+	mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 :
+		MLX4_DEV_CAP_FLAG_WOL_PORT2;
+
+	if (!(priv->mdev->dev->caps.flags & mask)) {
 		wol->supported = 0;
 		wol->wolopts = 0;
 		return;
@@ -133,8 +145,15 @@
 	struct mlx4_en_priv *priv = netdev_priv(netdev);
 	u64 config = 0;
 	int err = 0;
+	u64 mask;
 
-	if (!(priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_WOL))
+	if ((priv->port < 1) || (priv->port > 2))
+		return -EOPNOTSUPP;
+
+	mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 :
+		MLX4_DEV_CAP_FLAG_WOL_PORT2;
+
+	if (!(priv->mdev->dev->caps.flags & mask))
 		return -EOPNOTSUPP;
 
 	if (wol->supported & ~WAKE_MAGIC)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 78d776b..72fa807 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -45,7 +45,7 @@
 #include "mlx4_en.h"
 #include "en_port.h"
 
-static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
 {
 	struct mlx4_en_priv *priv = netdev_priv(dev);
 	struct mlx4_en_dev *mdev = priv->mdev;
@@ -67,9 +67,10 @@
 		en_err(priv, "failed adding vlan %d\n", vid);
 	mutex_unlock(&mdev->state_lock);
 
+	return 0;
 }
 
-static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
 {
 	struct mlx4_en_priv *priv = netdev_priv(dev);
 	struct mlx4_en_dev *mdev = priv->mdev;
@@ -93,6 +94,8 @@
 			en_err(priv, "Failed configuring VLAN filter\n");
 	}
 	mutex_unlock(&mdev->state_lock);
+
+	return 0;
 }
 
 u64 mlx4_en_mac_to_u64(u8 *addr)
@@ -133,7 +136,7 @@
 	if (priv->port_up) {
 		/* Remove old MAC and insert the new one */
 		err = mlx4_replace_mac(mdev->dev, priv->port,
-				       priv->base_qpn, priv->mac, 0);
+				       priv->base_qpn, priv->mac);
 		if (err)
 			en_err(priv, "Failed changing HW MAC address\n");
 	} else
@@ -148,6 +151,7 @@
 	struct mlx4_en_priv *priv = netdev_priv(dev);
 
 	kfree(priv->mc_addrs);
+	priv->mc_addrs = NULL;
 	priv->mc_addrs_cnt = 0;
 }
 
@@ -167,6 +171,7 @@
 	i = 0;
 	netdev_for_each_mc_addr(ha, dev)
 		memcpy(mc_addrs + i++ * ETH_ALEN, ha->addr, ETH_ALEN);
+	mlx4_en_clear_list(dev);
 	priv->mc_addrs = mc_addrs;
 	priv->mc_addrs_cnt = mc_addrs_cnt;
 }
@@ -204,6 +209,16 @@
 		goto out;
 	}
 
+	if (!netif_carrier_ok(dev)) {
+		if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
+			if (priv->port_state.link_state) {
+				priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;
+				netif_carrier_on(dev);
+				en_dbg(LINK, priv, "Link Up\n");
+			}
+		}
+	}
+
 	/*
 	 * Promsicuous mode: disable all filters
 	 */
@@ -599,12 +614,12 @@
 		++rx_index;
 	}
 
-	/* Set port mac number */
-	en_dbg(DRV, priv, "Setting mac for port %d\n", priv->port);
-	err = mlx4_register_mac(mdev->dev, priv->port,
-				priv->mac, &priv->base_qpn, 0);
+	/* Set qp number */
+	en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port);
+	err = mlx4_get_eth_qp(mdev->dev, priv->port,
+				priv->mac, &priv->base_qpn);
 	if (err) {
-		en_err(priv, "Failed setting port mac\n");
+		en_err(priv, "Failed getting eth qp\n");
 		goto cq_err;
 	}
 	mdev->mac_removed[priv->port] = 0;
@@ -699,7 +714,7 @@
 
 	mlx4_en_release_rss_steer(priv);
 mac_err:
-	mlx4_unregister_mac(mdev->dev, priv->port, priv->base_qpn);
+	mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn);
 cq_err:
 	while (rx_index--)
 		mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]);
@@ -745,10 +760,6 @@
 	/* Flush multicast filter */
 	mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
 
-	/* Unregister Mac address for the port */
-	mlx4_unregister_mac(mdev->dev, priv->port, priv->base_qpn);
-	mdev->mac_removed[priv->port] = 1;
-
 	/* Free TX Rings */
 	for (i = 0; i < priv->tx_ring_num; i++) {
 		mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[i]);
@@ -762,6 +773,10 @@
 	/* Free RSS qps */
 	mlx4_en_release_rss_steer(priv);
 
+	/* Unregister Mac address for the port */
+	mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn);
+	mdev->mac_removed[priv->port] = 1;
+
 	/* Free RX Rings */
 	for (i = 0; i < priv->rx_ring_num; i++) {
 		mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
@@ -974,6 +989,21 @@
 	return 0;
 }
 
+static int mlx4_en_set_features(struct net_device *netdev,
+		netdev_features_t features)
+{
+	struct mlx4_en_priv *priv = netdev_priv(netdev);
+
+	if (features & NETIF_F_LOOPBACK)
+		priv->ctrl_flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
+	else
+		priv->ctrl_flags &=
+			cpu_to_be32(~MLX4_WQE_CTRL_FORCE_LOOPBACK);
+
+	return 0;
+
+}
+
 static const struct net_device_ops mlx4_netdev_ops = {
 	.ndo_open		= mlx4_en_open,
 	.ndo_stop		= mlx4_en_close,
@@ -990,6 +1020,7 @@
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller	= mlx4_en_netpoll,
 #endif
+	.ndo_set_features	= mlx4_en_set_features,
 };
 
 int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
@@ -1022,6 +1053,8 @@
 	priv->port = port;
 	priv->port_up = false;
 	priv->flags = prof->flags;
+	priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
+			MLX4_WQE_CTRL_SOLICITED);
 	priv->tx_ring_num = prof->tx_ring_num;
 	priv->rx_ring_num = prof->rx_ring_num;
 	priv->mac_index = -1;
@@ -1088,6 +1121,7 @@
 	dev->features = dev->hw_features | NETIF_F_HIGHDMA |
 			NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
 			NETIF_F_HW_VLAN_FILTER;
+	dev->hw_features |= NETIF_F_LOOPBACK;
 
 	mdev->pndev[port] = dev;
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index 03c84cd..3317914 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -41,13 +41,6 @@
 #include "mlx4_en.h"
 
 
-int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port,
-			u64 mac, u64 clear, u8 mode)
-{
-	return mlx4_cmd(dev, (mac | (clear << 63)), port, mode,
-			MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B);
-}
-
 int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv)
 {
 	struct mlx4_cmd_mailbox *mailbox;
@@ -72,76 +65,7 @@
 		filter->entry[i] = cpu_to_be32(entry);
 	}
 	err = mlx4_cmd(dev, mailbox->dma, priv->port, 0, MLX4_CMD_SET_VLAN_FLTR,
-		       MLX4_CMD_TIME_CLASS_B);
-	mlx4_free_cmd_mailbox(dev, mailbox);
-	return err;
-}
-
-
-int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
-			  u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx)
-{
-	struct mlx4_cmd_mailbox *mailbox;
-	struct mlx4_set_port_general_context *context;
-	int err;
-	u32 in_mod;
-
-	mailbox = mlx4_alloc_cmd_mailbox(dev);
-	if (IS_ERR(mailbox))
-		return PTR_ERR(mailbox);
-	context = mailbox->buf;
-	memset(context, 0, sizeof *context);
-
-	context->flags = SET_PORT_GEN_ALL_VALID;
-	context->mtu = cpu_to_be16(mtu);
-	context->pptx = (pptx * (!pfctx)) << 7;
-	context->pfctx = pfctx;
-	context->pprx = (pprx * (!pfcrx)) << 7;
-	context->pfcrx = pfcrx;
-
-	in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
-	err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
-		       MLX4_CMD_TIME_CLASS_B);
-
-	mlx4_free_cmd_mailbox(dev, mailbox);
-	return err;
-}
-
-int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
-			   u8 promisc)
-{
-	struct mlx4_cmd_mailbox *mailbox;
-	struct mlx4_set_port_rqp_calc_context *context;
-	int err;
-	u32 in_mod;
-	u32 m_promisc = (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) ?
-						MCAST_DIRECT : MCAST_DEFAULT;
-
-	if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER  &&
-			dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)
-		return 0;
-
-	mailbox = mlx4_alloc_cmd_mailbox(dev);
-	if (IS_ERR(mailbox))
-		return PTR_ERR(mailbox);
-	context = mailbox->buf;
-	memset(context, 0, sizeof *context);
-
-	context->base_qpn = cpu_to_be32(base_qpn);
-	context->n_mac = dev->caps.log_num_macs;
-	context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT |
-				       base_qpn);
-	context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT |
-				     base_qpn);
-	context->intra_no_vlan = 0;
-	context->no_vlan = MLX4_NO_VLAN_IDX;
-	context->intra_vlan_miss = 0;
-	context->vlan_miss = MLX4_VLAN_MISS_IDX;
-
-	in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port;
-	err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
-		       MLX4_CMD_TIME_CLASS_B);
-
+		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
 	mlx4_free_cmd_mailbox(dev, mailbox);
 	return err;
 }
@@ -159,7 +83,8 @@
 		return PTR_ERR(mailbox);
 	memset(mailbox->buf, 0, sizeof(*qport_context));
 	err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
-			   MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B);
+			   MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
+			   MLX4_CMD_WRAPPED);
 	if (err)
 		goto out;
 	qport_context = mailbox->buf;
@@ -204,7 +129,8 @@
 		return PTR_ERR(mailbox);
 	memset(mailbox->buf, 0, sizeof(*mlx4_en_stats));
 	err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0,
-			   MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B);
+			   MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
+			   MLX4_CMD_WRAPPED);
 	if (err)
 		goto out;
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.h b/drivers/net/ethernet/mellanox/mlx4/en_port.h
index 19eb244..6934fd7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.h
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.h
@@ -39,49 +39,6 @@
 #define SET_PORT_PROMISC_SHIFT	31
 #define SET_PORT_MC_PROMISC_SHIFT	30
 
-enum {
-	MLX4_CMD_SET_VLAN_FLTR  = 0x47,
-	MLX4_CMD_SET_MCAST_FLTR = 0x48,
-	MLX4_CMD_DUMP_ETH_STATS = 0x49,
-};
-
-enum {
-	MCAST_DIRECT_ONLY       = 0,
-	MCAST_DIRECT            = 1,
-	MCAST_DEFAULT           = 2
-};
-
-struct mlx4_set_port_general_context {
-	u8 reserved[3];
-	u8 flags;
-	u16 reserved2;
-	__be16 mtu;
-	u8 pptx;
-	u8 pfctx;
-	u16 reserved3;
-	u8 pprx;
-	u8 pfcrx;
-	u16 reserved4;
-};
-
-struct mlx4_set_port_rqp_calc_context {
-	__be32 base_qpn;
-	u8 rererved;
-	u8 n_mac;
-	u8 n_vlan;
-	u8 n_prio;
-	u8 reserved2[3];
-	u8 mac_miss;
-	u8 intra_no_vlan;
-	u8 no_vlan;
-	u8 intra_vlan_miss;
-	u8 vlan_miss;
-	u8 reserved3[3];
-	u8 no_vlan_prio;
-	__be32 promisc;
-	__be32 mcast;
-};
-
 #define VLAN_FLTR_SIZE	128
 struct mlx4_set_vlan_fltr_mbox {
 	__be32 entry[VLAN_FLTR_SIZE];
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
index 0dfb4ec..bcbc54c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_resources.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
@@ -44,7 +44,7 @@
 	struct mlx4_en_dev *mdev = priv->mdev;
 
 	memset(context, 0, sizeof *context);
-	context->flags = cpu_to_be32(7 << 16 | rss << 13);
+	context->flags = cpu_to_be32(7 << 16 | rss << MLX4_RSS_QPC_FLAG_OFFSET);
 	context->pd = cpu_to_be32(mdev->priv_pdn);
 	context->mtu_msgmax = 0xff;
 	if (!is_tx && !rss)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index c2df6c3..e8d6ad2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -541,6 +541,8 @@
 	unsigned int length;
 	int polled = 0;
 	int ip_summed;
+	struct ethhdr *ethh;
+	u64 s_mac;
 
 	if (!priv->port_up)
 		return 0;
@@ -577,6 +579,19 @@
 			goto next;
 		}
 
+		/* Get pointer to first fragment since we haven't skb yet and
+		 * cast it to ethhdr struct */
+		ethh = (struct ethhdr *)(page_address(skb_frags[0].page) +
+					 skb_frags[0].offset);
+		s_mac = mlx4_en_mac_to_u64(ethh->h_source);
+
+		/* If source MAC is equal to our own MAC and not performing
+		 * the selftest or flb disabled - drop the packet */
+		if (s_mac == priv->mac &&
+			(!(dev->features & NETIF_F_LOOPBACK) ||
+			 !priv->validate_loopback))
+			goto next;
+
 		/*
 		 * Packet is OK - process it.
 		 */
@@ -837,9 +852,10 @@
 	struct mlx4_en_dev *mdev = priv->mdev;
 	struct mlx4_en_rss_map *rss_map = &priv->rss_map;
 	struct mlx4_qp_context context;
-	struct mlx4_en_rss_context *rss_context;
+	struct mlx4_rss_context *rss_context;
 	void *ptr;
-	u8 rss_mask = 0x3f;
+	u8 rss_mask = (MLX4_RSS_IPV4 | MLX4_RSS_TCP_IPV4 | MLX4_RSS_IPV6 |
+			MLX4_RSS_TCP_IPV6);
 	int i, qpn;
 	int err = 0;
 	int good_qps = 0;
@@ -877,18 +893,21 @@
 	mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
 				priv->rx_ring[0].cqn, &context);
 
-	ptr = ((void *) &context) + 0x3c;
+	ptr = ((void *) &context) + offsetof(struct mlx4_qp_context, pri_path)
+					+ MLX4_RSS_OFFSET_IN_QPC_PRI_PATH;
 	rss_context = ptr;
 	rss_context->base_qpn = cpu_to_be32(ilog2(priv->rx_ring_num) << 24 |
 					    (rss_map->base_qpn));
 	rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn);
+	if (priv->mdev->profile.udp_rss) {
+		rss_mask |=  MLX4_RSS_UDP_IPV4 | MLX4_RSS_UDP_IPV6;
+		rss_context->base_qpn_udp = rss_context->default_qpn;
+	}
 	rss_context->flags = rss_mask;
-	rss_context->hash_fn = 1;
+	rss_context->hash_fn = MLX4_RSS_HASH_TOP;
 	for (i = 0; i < 10; i++)
 		rss_context->rss_key[i] = rsskey[i];
 
-	if (priv->mdev->profile.udp_rss)
-		rss_context->base_qpn_udp = rss_context->default_qpn;
 	err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context,
 			       &rss_map->indir_qp, &rss_map->indir_state);
 	if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index 9fdbcec..bf2e5d3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -43,7 +43,7 @@
 static int mlx4_en_test_registers(struct mlx4_en_priv *priv)
 {
 	return mlx4_cmd(priv->mdev->dev, 0, 0, 0, MLX4_CMD_HW_HEALTH_CHECK,
-			MLX4_CMD_TIME_CLASS_A);
+			MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
 }
 
 static int mlx4_en_test_loopback_xmit(struct mlx4_en_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index d901b42..9ef9038 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -307,59 +307,60 @@
 	return cnt;
 }
 
-
 static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
 {
 	struct mlx4_en_priv *priv = netdev_priv(dev);
 	struct mlx4_cq *mcq = &cq->mcq;
 	struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
-	struct mlx4_cqe *cqe = cq->buf;
+	struct mlx4_cqe *cqe;
 	u16 index;
-	u16 new_index;
+	u16 new_index, ring_index;
 	u32 txbbs_skipped = 0;
-	u32 cq_last_sav;
-
-	/* index always points to the first TXBB of the last polled descriptor */
-	index = ring->cons & ring->size_mask;
-	new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask;
-	if (index == new_index)
-		return;
+	u32 cons_index = mcq->cons_index;
+	int size = cq->size;
+	u32 size_mask = ring->size_mask;
+	struct mlx4_cqe *buf = cq->buf;
 
 	if (!priv->port_up)
 		return;
 
-	/*
-	 * We use a two-stage loop:
-	 * - the first samples the HW-updated CQE
-	 * - the second frees TXBBs until the last sample
-	 * This lets us amortize CQE cache misses, while still polling the CQ
-	 * until is quiescent.
-	 */
-	cq_last_sav = mcq->cons_index;
-	do {
+	index = cons_index & size_mask;
+	cqe = &buf[index];
+	ring_index = ring->cons & size_mask;
+
+	/* Process all completed CQEs */
+	while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
+			cons_index & size)) {
+		/*
+		 * make sure we read the CQE after we read the
+		 * ownership bit
+		 */
+		rmb();
+
+		/* Skip over last polled CQE */
+		new_index = be16_to_cpu(cqe->wqe_index) & size_mask;
+
 		do {
-			/* Skip over last polled CQE */
-			index = (index + ring->last_nr_txbb) & ring->size_mask;
 			txbbs_skipped += ring->last_nr_txbb;
-
-			/* Poll next CQE */
+			ring_index = (ring_index + ring->last_nr_txbb) & size_mask;
+			/* free next descriptor */
 			ring->last_nr_txbb = mlx4_en_free_tx_desc(
-						priv, ring, index,
-						!!((ring->cons + txbbs_skipped) &
-						   ring->size));
-			++mcq->cons_index;
+					priv, ring, ring_index,
+					!!((ring->cons + txbbs_skipped) &
+							ring->size));
+		} while (ring_index != new_index);
 
-		} while (index != new_index);
+		++cons_index;
+		index = cons_index & size_mask;
+		cqe = &buf[index];
+	}
 
-		new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask;
-	} while (index != new_index);
-	AVG_PERF_COUNTER(priv->pstats.tx_coal_avg,
-			 (u32) (mcq->cons_index - cq_last_sav));
 
 	/*
 	 * To prevent CQ overflow we first update CQ consumer and only then
 	 * the ring consumer.
 	 */
+	mcq->cons_index = cons_index;
 	mlx4_cq_set_ci(mcq);
 	wmb();
 	ring->cons += txbbs_skipped;
@@ -565,7 +566,8 @@
 		inl->byte_count = cpu_to_be32(1 << 31 | (skb->len - spc));
 	}
 	tx_desc->ctrl.vlan_tag = cpu_to_be16(*vlan_tag);
-	tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!(*vlan_tag);
+	tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN *
+		(!!vlan_tx_tag_present(skb));
 	tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
 }
 
@@ -676,27 +678,25 @@
 	/* Prepare ctrl segement apart opcode+ownership, which depends on
 	 * whether LSO is used */
 	tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag);
-	tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!vlan_tag;
+	tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN *
+		!!vlan_tx_tag_present(skb);
 	tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
-	tx_desc->ctrl.srcrb_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
-						MLX4_WQE_CTRL_SOLICITED);
+	tx_desc->ctrl.srcrb_flags = priv->ctrl_flags;
 	if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
 		tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
 							 MLX4_WQE_CTRL_TCP_UDP_CSUM);
 		ring->tx_csum++;
 	}
 
-	if (unlikely(priv->validate_loopback)) {
-		/* Copy dst mac address to wqe */
-		skb_reset_mac_header(skb);
-		ethh = eth_hdr(skb);
-		if (ethh && ethh->h_dest) {
-			mac = mlx4_en_mac_to_u64(ethh->h_dest);
-			mac_h = (u32) ((mac & 0xffff00000000ULL) >> 16);
-			mac_l = (u32) (mac & 0xffffffff);
-			tx_desc->ctrl.srcrb_flags |= cpu_to_be32(mac_h);
-			tx_desc->ctrl.imm = cpu_to_be32(mac_l);
-		}
+	/* Copy dst mac address to wqe */
+	skb_reset_mac_header(skb);
+	ethh = eth_hdr(skb);
+	if (ethh && ethh->h_dest) {
+		mac = mlx4_en_mac_to_u64(ethh->h_dest);
+		mac_h = (u32) ((mac & 0xffff00000000ULL) >> 16);
+		mac_l = (u32) (mac & 0xffffffff);
+		tx_desc->ctrl.srcrb_flags |= cpu_to_be32(mac_h);
+		tx_desc->ctrl.imm = cpu_to_be32(mac_l);
 	}
 
 	/* Handle LSO (TSO) packets */
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 24ee967..1e9b55e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -31,6 +31,7 @@
  * SOFTWARE.
  */
 
+#include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/slab.h>
 #include <linux/export.h>
@@ -52,30 +53,6 @@
 	MLX4_EQ_ENTRY_SIZE	= 0x20
 };
 
-/*
- * Must be packed because start is 64 bits but only aligned to 32 bits.
- */
-struct mlx4_eq_context {
-	__be32			flags;
-	u16			reserved1[3];
-	__be16			page_offset;
-	u8			log_eq_size;
-	u8			reserved2[4];
-	u8			eq_period;
-	u8			reserved3;
-	u8			eq_max_count;
-	u8			reserved4[3];
-	u8			intr;
-	u8			log_page_size;
-	u8			reserved5[2];
-	u8			mtt_base_addr_h;
-	__be32			mtt_base_addr_l;
-	u32			reserved6[2];
-	__be32			consumer_index;
-	__be32			producer_index;
-	u32			reserved7[4];
-};
-
 #define MLX4_EQ_STATUS_OK	   ( 0 << 28)
 #define MLX4_EQ_STATUS_WRITE_FAIL  (10 << 28)
 #define MLX4_EQ_OWNER_SW	   ( 0 << 24)
@@ -100,46 +77,9 @@
 			       (1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR)    | \
 			       (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE)    | \
 			       (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT)	    | \
-			       (1ull << MLX4_EVENT_TYPE_CMD))
-
-struct mlx4_eqe {
-	u8			reserved1;
-	u8			type;
-	u8			reserved2;
-	u8			subtype;
-	union {
-		u32		raw[6];
-		struct {
-			__be32	cqn;
-		} __packed comp;
-		struct {
-			u16	reserved1;
-			__be16	token;
-			u32	reserved2;
-			u8	reserved3[3];
-			u8	status;
-			__be64	out_param;
-		} __packed cmd;
-		struct {
-			__be32	qpn;
-		} __packed qp;
-		struct {
-			__be32	srqn;
-		} __packed srq;
-		struct {
-			__be32	cqn;
-			u32	reserved1;
-			u8	reserved2[3];
-			u8	syndrome;
-		} __packed cq_err;
-		struct {
-			u32	reserved1[2];
-			__be32	port;
-		} __packed port_change;
-	}			event;
-	u8			reserved3[3];
-	u8			owner;
-} __packed;
+			       (1ull << MLX4_EVENT_TYPE_CMD)		    | \
+			       (1ull << MLX4_EVENT_TYPE_COMM_CHANNEL)       | \
+			       (1ull << MLX4_EVENT_TYPE_FLR_EVENT))
 
 static void eq_set_ci(struct mlx4_eq *eq, int req_not)
 {
@@ -162,13 +102,144 @@
 	return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe;
 }
 
+static struct mlx4_eqe *next_slave_event_eqe(struct mlx4_slave_event_eq *slave_eq)
+{
+	struct mlx4_eqe *eqe =
+		&slave_eq->event_eqe[slave_eq->cons & (SLAVE_EVENT_EQ_SIZE - 1)];
+	return (!!(eqe->owner & 0x80) ^
+		!!(slave_eq->cons & SLAVE_EVENT_EQ_SIZE)) ?
+		eqe : NULL;
+}
+
+void mlx4_gen_slave_eqe(struct work_struct *work)
+{
+	struct mlx4_mfunc_master_ctx *master =
+		container_of(work, struct mlx4_mfunc_master_ctx,
+			     slave_event_work);
+	struct mlx4_mfunc *mfunc =
+		container_of(master, struct mlx4_mfunc, master);
+	struct mlx4_priv *priv = container_of(mfunc, struct mlx4_priv, mfunc);
+	struct mlx4_dev *dev = &priv->dev;
+	struct mlx4_slave_event_eq *slave_eq = &mfunc->master.slave_eq;
+	struct mlx4_eqe *eqe;
+	u8 slave;
+	int i;
+
+	for (eqe = next_slave_event_eqe(slave_eq); eqe;
+	      eqe = next_slave_event_eqe(slave_eq)) {
+		slave = eqe->slave_id;
+
+		/* All active slaves need to receive the event */
+		if (slave == ALL_SLAVES) {
+			for (i = 0; i < dev->num_slaves; i++) {
+				if (i != dev->caps.function &&
+				    master->slave_state[i].active)
+					if (mlx4_GEN_EQE(dev, i, eqe))
+						mlx4_warn(dev, "Failed to "
+							  " generate event "
+							  "for slave %d\n", i);
+			}
+		} else {
+			if (mlx4_GEN_EQE(dev, slave, eqe))
+				mlx4_warn(dev, "Failed to generate event "
+					       "for slave %d\n", slave);
+		}
+		++slave_eq->cons;
+	}
+}
+
+
+static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct mlx4_slave_event_eq *slave_eq = &priv->mfunc.master.slave_eq;
+	struct mlx4_eqe *s_eqe =
+		&slave_eq->event_eqe[slave_eq->prod & (SLAVE_EVENT_EQ_SIZE - 1)];
+
+	if ((!!(s_eqe->owner & 0x80)) ^
+	    (!!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE))) {
+		mlx4_warn(dev, "Master failed to generate an EQE for slave: %d. "
+			  "No free EQE on slave events queue\n", slave);
+		return;
+	}
+
+	memcpy(s_eqe, eqe, sizeof(struct mlx4_eqe) - 1);
+	s_eqe->slave_id = slave;
+	/* ensure all information is written before setting the ownersip bit */
+	wmb();
+	s_eqe->owner = !!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE) ? 0x0 : 0x80;
+	++slave_eq->prod;
+
+	queue_work(priv->mfunc.master.comm_wq,
+		   &priv->mfunc.master.slave_event_work);
+}
+
+static void mlx4_slave_event(struct mlx4_dev *dev, int slave,
+			     struct mlx4_eqe *eqe)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct mlx4_slave_state *s_slave =
+		&priv->mfunc.master.slave_state[slave];
+
+	if (!s_slave->active) {
+		/*mlx4_warn(dev, "Trying to pass event to inactive slave\n");*/
+		return;
+	}
+
+	slave_event(dev, slave, eqe);
+}
+
+void mlx4_master_handle_slave_flr(struct work_struct *work)
+{
+	struct mlx4_mfunc_master_ctx *master =
+		container_of(work, struct mlx4_mfunc_master_ctx,
+			     slave_flr_event_work);
+	struct mlx4_mfunc *mfunc =
+		container_of(master, struct mlx4_mfunc, master);
+	struct mlx4_priv *priv =
+		container_of(mfunc, struct mlx4_priv, mfunc);
+	struct mlx4_dev *dev = &priv->dev;
+	struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
+	int i;
+	int err;
+
+	mlx4_dbg(dev, "mlx4_handle_slave_flr\n");
+
+	for (i = 0 ; i < dev->num_slaves; i++) {
+
+		if (MLX4_COMM_CMD_FLR == slave_state[i].last_cmd) {
+			mlx4_dbg(dev, "mlx4_handle_slave_flr: "
+				 "clean slave: %d\n", i);
+
+			mlx4_delete_all_resources_for_slave(dev, i);
+			/*return the slave to running mode*/
+			spin_lock(&priv->mfunc.master.slave_state_lock);
+			slave_state[i].last_cmd = MLX4_COMM_CMD_RESET;
+			slave_state[i].is_slave_going_down = 0;
+			spin_unlock(&priv->mfunc.master.slave_state_lock);
+			/*notify the FW:*/
+			err = mlx4_cmd(dev, 0, i, 0, MLX4_CMD_INFORM_FLR_DONE,
+				       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+			if (err)
+				mlx4_warn(dev, "Failed to notify FW on "
+					  "FLR done (slave:%d)\n", i);
+		}
+	}
+}
+
 static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
 {
+	struct mlx4_priv *priv = mlx4_priv(dev);
 	struct mlx4_eqe *eqe;
 	int cqn;
 	int eqes_found = 0;
 	int set_ci = 0;
 	int port;
+	int slave = 0;
+	int ret;
+	u32 flr_slave;
+	u8 update_slave_state;
+	int i;
 
 	while ((eqe = next_eqe_sw(eq))) {
 		/*
@@ -191,14 +262,68 @@
 		case MLX4_EVENT_TYPE_PATH_MIG_FAILED:
 		case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
 		case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR:
-			mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
-				      eqe->type);
+			mlx4_dbg(dev, "event %d arrived\n", eqe->type);
+			if (mlx4_is_master(dev)) {
+				/* forward only to slave owning the QP */
+				ret = mlx4_get_slave_from_resource_id(dev,
+						RES_QP,
+						be32_to_cpu(eqe->event.qp.qpn)
+						& 0xffffff, &slave);
+				if (ret && ret != -ENOENT) {
+					mlx4_dbg(dev, "QP event %02x(%02x) on "
+						 "EQ %d at index %u: could "
+						 "not get slave id (%d)\n",
+						 eqe->type, eqe->subtype,
+						 eq->eqn, eq->cons_index, ret);
+					break;
+				}
+
+				if (!ret && slave != dev->caps.function) {
+					mlx4_slave_event(dev, slave, eqe);
+					break;
+				}
+
+			}
+			mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) &
+				      0xffffff, eqe->type);
 			break;
 
 		case MLX4_EVENT_TYPE_SRQ_LIMIT:
+			mlx4_warn(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n",
+				  __func__);
 		case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
-			mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 0xffffff,
-				      eqe->type);
+			if (mlx4_is_master(dev)) {
+				/* forward only to slave owning the SRQ */
+				ret = mlx4_get_slave_from_resource_id(dev,
+						RES_SRQ,
+						be32_to_cpu(eqe->event.srq.srqn)
+						& 0xffffff,
+						&slave);
+				if (ret && ret != -ENOENT) {
+					mlx4_warn(dev, "SRQ event %02x(%02x) "
+						  "on EQ %d at index %u: could"
+						  " not get slave id (%d)\n",
+						  eqe->type, eqe->subtype,
+						  eq->eqn, eq->cons_index, ret);
+					break;
+				}
+				mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x,"
+					  " event: %02x(%02x)\n", __func__,
+					  slave,
+					  be32_to_cpu(eqe->event.srq.srqn),
+					  eqe->type, eqe->subtype);
+
+				if (!ret && slave != dev->caps.function) {
+					mlx4_warn(dev, "%s: sending event "
+						  "%02x(%02x) to slave:%d\n",
+						   __func__, eqe->type,
+						  eqe->subtype, slave);
+					mlx4_slave_event(dev, slave, eqe);
+					break;
+				}
+			}
+			mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) &
+				       0xffffff, eqe->type);
 			break;
 
 		case MLX4_EVENT_TYPE_CMD:
@@ -211,13 +336,35 @@
 		case MLX4_EVENT_TYPE_PORT_CHANGE:
 			port = be32_to_cpu(eqe->event.port_change.port) >> 28;
 			if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) {
-				mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_DOWN,
+				mlx4_dispatch_event(dev,
+						    MLX4_DEV_EVENT_PORT_DOWN,
 						    port);
 				mlx4_priv(dev)->sense.do_sense_port[port] = 1;
+				if (mlx4_is_master(dev))
+					/*change the state of all slave's port
+					* to down:*/
+					for (i = 0; i < dev->num_slaves; i++) {
+						mlx4_dbg(dev, "%s: Sending "
+							 "MLX4_PORT_CHANGE_SUBTYPE_DOWN"
+							 " to slave: %d, port:%d\n",
+							 __func__, i, port);
+						if (i == dev->caps.function)
+							continue;
+						mlx4_slave_event(dev, i, eqe);
+					}
 			} else {
-				mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_UP,
+				mlx4_dispatch_event(dev,
+						    MLX4_DEV_EVENT_PORT_UP,
 						    port);
 				mlx4_priv(dev)->sense.do_sense_port[port] = 0;
+
+				if (mlx4_is_master(dev)) {
+					for (i = 0; i < dev->num_slaves; i++) {
+						if (i == dev->caps.function)
+							continue;
+						mlx4_slave_event(dev, i, eqe);
+					}
+				}
 			}
 			break;
 
@@ -226,7 +373,28 @@
 				  eqe->event.cq_err.syndrome == 1 ?
 				  "overrun" : "access violation",
 				  be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff);
-			mlx4_cq_event(dev, be32_to_cpu(eqe->event.cq_err.cqn),
+			if (mlx4_is_master(dev)) {
+				ret = mlx4_get_slave_from_resource_id(dev,
+					RES_CQ,
+					be32_to_cpu(eqe->event.cq_err.cqn)
+					& 0xffffff, &slave);
+				if (ret && ret != -ENOENT) {
+					mlx4_dbg(dev, "CQ event %02x(%02x) on "
+						 "EQ %d at index %u: could "
+						  "not get slave id (%d)\n",
+						  eqe->type, eqe->subtype,
+						  eq->eqn, eq->cons_index, ret);
+					break;
+				}
+
+				if (!ret && slave != dev->caps.function) {
+					mlx4_slave_event(dev, slave, eqe);
+					break;
+				}
+			}
+			mlx4_cq_event(dev,
+				      be32_to_cpu(eqe->event.cq_err.cqn)
+				      & 0xffffff,
 				      eqe->type);
 			break;
 
@@ -234,13 +402,60 @@
 			mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn);
 			break;
 
+		case MLX4_EVENT_TYPE_COMM_CHANNEL:
+			if (!mlx4_is_master(dev)) {
+				mlx4_warn(dev, "Received comm channel event "
+					       "for non master device\n");
+				break;
+			}
+			memcpy(&priv->mfunc.master.comm_arm_bit_vector,
+			       eqe->event.comm_channel_arm.bit_vec,
+			       sizeof eqe->event.comm_channel_arm.bit_vec);
+			queue_work(priv->mfunc.master.comm_wq,
+				   &priv->mfunc.master.comm_work);
+			break;
+
+		case MLX4_EVENT_TYPE_FLR_EVENT:
+			flr_slave = be32_to_cpu(eqe->event.flr_event.slave_id);
+			if (!mlx4_is_master(dev)) {
+				mlx4_warn(dev, "Non-master function received"
+					       "FLR event\n");
+				break;
+			}
+
+			mlx4_dbg(dev, "FLR event for slave: %d\n", flr_slave);
+
+			if (flr_slave > dev->num_slaves) {
+				mlx4_warn(dev,
+					  "Got FLR for unknown function: %d\n",
+					  flr_slave);
+				update_slave_state = 0;
+			} else
+				update_slave_state = 1;
+
+			spin_lock(&priv->mfunc.master.slave_state_lock);
+			if (update_slave_state) {
+				priv->mfunc.master.slave_state[flr_slave].active = false;
+				priv->mfunc.master.slave_state[flr_slave].last_cmd = MLX4_COMM_CMD_FLR;
+				priv->mfunc.master.slave_state[flr_slave].is_slave_going_down = 1;
+			}
+			spin_unlock(&priv->mfunc.master.slave_state_lock);
+			queue_work(priv->mfunc.master.comm_wq,
+				   &priv->mfunc.master.slave_flr_event_work);
+			break;
 		case MLX4_EVENT_TYPE_EEC_CATAS_ERROR:
 		case MLX4_EVENT_TYPE_ECC_DETECT:
 		default:
-			mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u\n",
-				  eqe->type, eqe->subtype, eq->eqn, eq->cons_index);
+			mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at "
+				  "index %u. owner=%x, nent=0x%x, slave=%x, "
+				  "ownership=%s\n",
+				  eqe->type, eqe->subtype, eq->eqn,
+				  eq->cons_index, eqe->owner, eq->nent,
+				  eqe->slave_id,
+				  !!(eqe->owner & 0x80) ^
+				  !!(eq->cons_index & eq->nent) ? "HW" : "SW");
 			break;
-		}
+		};
 
 		++eq->cons_index;
 		eqes_found = 1;
@@ -290,25 +505,58 @@
 	return IRQ_HANDLED;
 }
 
+int mlx4_MAP_EQ_wrapper(struct mlx4_dev *dev, int slave,
+			struct mlx4_vhcr *vhcr,
+			struct mlx4_cmd_mailbox *inbox,
+			struct mlx4_cmd_mailbox *outbox,
+			struct mlx4_cmd_info *cmd)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct mlx4_slave_event_eq_info *event_eq =
+		&priv->mfunc.master.slave_state[slave].event_eq;
+	u32 in_modifier = vhcr->in_modifier;
+	u32 eqn = in_modifier & 0x1FF;
+	u64 in_param =  vhcr->in_param;
+	int err = 0;
+
+	if (slave == dev->caps.function)
+		err = mlx4_cmd(dev, in_param, (in_modifier & 0x80000000) | eqn,
+			       0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B,
+			       MLX4_CMD_NATIVE);
+	if (!err) {
+		if (in_modifier >> 31) {
+			/* unmap */
+			event_eq->event_type &= ~in_param;
+		} else {
+			event_eq->eqn = eqn;
+			event_eq->event_type = in_param;
+		}
+	}
+	return err;
+}
+
 static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap,
 			int eq_num)
 {
 	return mlx4_cmd(dev, event_mask, (unmap << 31) | eq_num,
-			0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B);
+			0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B,
+			MLX4_CMD_WRAPPED);
 }
 
 static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
 			 int eq_num)
 {
-	return mlx4_cmd(dev, mailbox->dma, eq_num, 0, MLX4_CMD_SW2HW_EQ,
-			MLX4_CMD_TIME_CLASS_A);
+	return mlx4_cmd(dev, mailbox->dma | dev->caps.function, eq_num, 0,
+			MLX4_CMD_SW2HW_EQ, MLX4_CMD_TIME_CLASS_A,
+			MLX4_CMD_WRAPPED);
 }
 
 static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
 			 int eq_num)
 {
-	return mlx4_cmd_box(dev, 0, mailbox->dma, eq_num, 0, MLX4_CMD_HW2SW_EQ,
-			    MLX4_CMD_TIME_CLASS_A);
+	return mlx4_cmd_box(dev, dev->caps.function, mailbox->dma, eq_num,
+			    0, MLX4_CMD_HW2SW_EQ, MLX4_CMD_TIME_CLASS_A,
+			    MLX4_CMD_WRAPPED);
 }
 
 static int mlx4_num_eq_uar(struct mlx4_dev *dev)
@@ -585,14 +833,16 @@
 	for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
 		priv->eq_table.uar_map[i] = NULL;
 
-	err = mlx4_map_clr_int(dev);
-	if (err)
-		goto err_out_bitmap;
+	if (!mlx4_is_slave(dev)) {
+		err = mlx4_map_clr_int(dev);
+		if (err)
+			goto err_out_bitmap;
 
-	priv->eq_table.clr_mask =
-		swab32(1 << (priv->eq_table.inta_pin & 31));
-	priv->eq_table.clr_int  = priv->clr_base +
-		(priv->eq_table.inta_pin < 32 ? 4 : 0);
+		priv->eq_table.clr_mask =
+			swab32(1 << (priv->eq_table.inta_pin & 31));
+		priv->eq_table.clr_int  = priv->clr_base +
+			(priv->eq_table.inta_pin < 32 ? 4 : 0);
+	}
 
 	priv->eq_table.irq_names =
 		kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1 +
@@ -700,7 +950,8 @@
 		mlx4_free_eq(dev, &priv->eq_table.eq[i]);
 		--i;
 	}
-	mlx4_unmap_clr_int(dev);
+	if (!mlx4_is_slave(dev))
+		mlx4_unmap_clr_int(dev);
 	mlx4_free_irqs(dev);
 
 err_out_bitmap:
@@ -725,7 +976,8 @@
 	for (i = 0; i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i)
 		mlx4_free_eq(dev, &priv->eq_table.eq[i]);
 
-	mlx4_unmap_clr_int(dev);
+	if (!mlx4_is_slave(dev))
+		mlx4_unmap_clr_int(dev);
 
 	for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
 		if (priv->eq_table.uar_map[i])
@@ -748,7 +1000,7 @@
 
 	err = mlx4_NOP(dev);
 	/* When not in MSI_X, there is only one irq to check */
-	if (!(dev->flags & MLX4_FLAG_MSI_X))
+	if (!(dev->flags & MLX4_FLAG_MSI_X) || mlx4_is_slave(dev))
 		return err;
 
 	/* A loop over all completion vectors, for each vector we will check
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 435ca6e..a424a19 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -32,6 +32,7 @@
  * SOFTWARE.
  */
 
+#include <linux/etherdevice.h>
 #include <linux/mlx4/cmd.h>
 #include <linux/module.h>
 #include <linux/cache.h>
@@ -48,7 +49,7 @@
 extern void __buggy_use_of_MLX4_GET(void);
 extern void __buggy_use_of_MLX4_PUT(void);
 
-static int enable_qos;
+static bool enable_qos;
 module_param(enable_qos, bool, 0444);
 MODULE_PARM_DESC(enable_qos, "Enable Quality of Service support in the HCA (default: off)");
 
@@ -139,12 +140,185 @@
 	MLX4_PUT(inbox, cfg->log_pg_sz_m, MOD_STAT_CFG_PG_SZ_M_OFFSET);
 
 	err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_MOD_STAT_CFG,
-			MLX4_CMD_TIME_CLASS_A);
+			MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
 
 	mlx4_free_cmd_mailbox(dev, mailbox);
 	return err;
 }
 
+int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
+				struct mlx4_vhcr *vhcr,
+				struct mlx4_cmd_mailbox *inbox,
+				struct mlx4_cmd_mailbox *outbox,
+				struct mlx4_cmd_info *cmd)
+{
+	u8	field;
+	u32	size;
+	int	err = 0;
+
+#define QUERY_FUNC_CAP_FLAGS_OFFSET		0x0
+#define QUERY_FUNC_CAP_NUM_PORTS_OFFSET		0x1
+#define QUERY_FUNC_CAP_FUNCTION_OFFSET		0x3
+#define QUERY_FUNC_CAP_PF_BHVR_OFFSET		0x4
+#define QUERY_FUNC_CAP_QP_QUOTA_OFFSET		0x10
+#define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET		0x14
+#define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET		0x18
+#define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET		0x20
+#define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET		0x24
+#define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET		0x28
+#define QUERY_FUNC_CAP_MAX_EQ_OFFSET		0x2c
+#define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET	0X30
+
+#define QUERY_FUNC_CAP_PHYS_PORT_OFFSET		0x3
+#define QUERY_FUNC_CAP_ETH_PROPS_OFFSET		0xc
+
+	if (vhcr->op_modifier == 1) {
+		field = vhcr->in_modifier;
+		MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
+
+		field = 0; /* ensure fvl bit is not set */
+		MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_ETH_PROPS_OFFSET);
+	} else if (vhcr->op_modifier == 0) {
+		field = 1 << 7; /* enable only ethernet interface */
+		MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET);
+
+		field = slave;
+		MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FUNCTION_OFFSET);
+
+		field = dev->caps.num_ports;
+		MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
+
+		size = 0; /* no PF behavious is set for now */
+		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
+
+		size = dev->caps.num_qps;
+		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
+
+		size = dev->caps.num_srqs;
+		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
+
+		size = dev->caps.num_cqs;
+		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
+
+		size = dev->caps.num_eqs;
+		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
+
+		size = dev->caps.reserved_eqs;
+		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
+
+		size = dev->caps.num_mpts;
+		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
+
+		size = dev->caps.num_mtts;
+		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
+
+		size = dev->caps.num_mgms + dev->caps.num_amgms;
+		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
+
+	} else
+		err = -EINVAL;
+
+	return err;
+}
+
+int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, struct mlx4_func_cap *func_cap)
+{
+	struct mlx4_cmd_mailbox *mailbox;
+	u32			*outbox;
+	u8			field;
+	u32			size;
+	int			i;
+	int			err = 0;
+
+
+	mailbox = mlx4_alloc_cmd_mailbox(dev);
+	if (IS_ERR(mailbox))
+		return PTR_ERR(mailbox);
+
+	err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FUNC_CAP,
+			   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+	if (err)
+		goto out;
+
+	outbox = mailbox->buf;
+
+	MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS_OFFSET);
+	if (!(field & (1 << 7))) {
+		mlx4_err(dev, "The host doesn't support eth interface\n");
+		err = -EPROTONOSUPPORT;
+		goto out;
+	}
+
+	MLX4_GET(field, outbox, QUERY_FUNC_CAP_FUNCTION_OFFSET);
+	func_cap->function = field;
+
+	MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
+	func_cap->num_ports = field;
+
+	MLX4_GET(size, outbox, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
+	func_cap->pf_context_behaviour = size;
+
+	MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
+	func_cap->qp_quota = size & 0xFFFFFF;
+
+	MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
+	func_cap->srq_quota = size & 0xFFFFFF;
+
+	MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
+	func_cap->cq_quota = size & 0xFFFFFF;
+
+	MLX4_GET(size, outbox, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
+	func_cap->max_eq = size & 0xFFFFFF;
+
+	MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
+	func_cap->reserved_eq = size & 0xFFFFFF;
+
+	MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
+	func_cap->mpt_quota = size & 0xFFFFFF;
+
+	MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
+	func_cap->mtt_quota = size & 0xFFFFFF;
+
+	MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
+	func_cap->mcg_quota = size & 0xFFFFFF;
+
+	for (i = 1; i <= func_cap->num_ports; ++i) {
+		err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 1,
+				   MLX4_CMD_QUERY_FUNC_CAP,
+				   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+		if (err)
+			goto out;
+
+		MLX4_GET(field, outbox, QUERY_FUNC_CAP_ETH_PROPS_OFFSET);
+		if (field & (1 << 7)) {
+			mlx4_err(dev, "VLAN is enforced on this port\n");
+			err = -EPROTONOSUPPORT;
+			goto out;
+		}
+
+		if (field & (1 << 6)) {
+			mlx4_err(dev, "Force mac is enabled on this port\n");
+			err = -EPROTONOSUPPORT;
+			goto out;
+		}
+
+		MLX4_GET(field, outbox, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
+		func_cap->physical_port[i] = field;
+	}
+
+	/* All other resources are allocated by the master, but we still report
+	 * 'num' and 'reserved' capabilities as follows:
+	 * - num remains the maximum resource index
+	 * - 'num - reserved' is the total available objects of a resource, but
+	 *   resource indices may be less than 'reserved'
+	 * TODO: set per-resource quotas */
+
+out:
+	mlx4_free_cmd_mailbox(dev, mailbox);
+
+	return err;
+}
+
 int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
 {
 	struct mlx4_cmd_mailbox *mailbox;
@@ -229,7 +403,7 @@
 	outbox = mailbox->buf;
 
 	err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
-			   MLX4_CMD_TIME_CLASS_A);
+			   MLX4_CMD_TIME_CLASS_A, !mlx4_is_slave(dev));
 	if (err)
 		goto out;
 
@@ -396,12 +570,15 @@
 
 		for (i = 1; i <= dev_cap->num_ports; ++i) {
 			err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT,
-					   MLX4_CMD_TIME_CLASS_B);
+					   MLX4_CMD_TIME_CLASS_B,
+					   !mlx4_is_slave(dev));
 			if (err)
 				goto out;
 
 			MLX4_GET(field, outbox, QUERY_PORT_SUPPORTED_TYPE_OFFSET);
 			dev_cap->supported_port_types[i] = field & 3;
+			dev_cap->suggested_type[i] = (field >> 3) & 1;
+			dev_cap->default_sense[i] = (field >> 4) & 1;
 			MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET);
 			dev_cap->ib_mtu[i]	   = field & 0xf;
 			MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET);
@@ -470,6 +647,61 @@
 	return err;
 }
 
+int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
+			    struct mlx4_vhcr *vhcr,
+			    struct mlx4_cmd_mailbox *inbox,
+			    struct mlx4_cmd_mailbox *outbox,
+			    struct mlx4_cmd_info *cmd)
+{
+	u64 def_mac;
+	u8 port_type;
+	int err;
+
+#define MLX4_PORT_SUPPORT_IB		(1 << 0)
+#define MLX4_PORT_SUGGEST_TYPE		(1 << 3)
+#define MLX4_PORT_DEFAULT_SENSE		(1 << 4)
+#define MLX4_VF_PORT_ETH_ONLY_MASK	(0xff & ~MLX4_PORT_SUPPORT_IB & \
+					 ~MLX4_PORT_SUGGEST_TYPE & \
+					 ~MLX4_PORT_DEFAULT_SENSE)
+
+	err = mlx4_cmd_box(dev, 0, outbox->dma, vhcr->in_modifier, 0,
+			   MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
+			   MLX4_CMD_NATIVE);
+
+	if (!err && dev->caps.function != slave) {
+		/* set slave default_mac address */
+		MLX4_GET(def_mac, outbox->buf, QUERY_PORT_MAC_OFFSET);
+		def_mac += slave << 8;
+		MLX4_PUT(outbox->buf, def_mac, QUERY_PORT_MAC_OFFSET);
+
+		/* get port type - currently only eth is enabled */
+		MLX4_GET(port_type, outbox->buf,
+			 QUERY_PORT_SUPPORTED_TYPE_OFFSET);
+
+		/* Allow only Eth port, no link sensing allowed */
+		port_type &= MLX4_VF_PORT_ETH_ONLY_MASK;
+
+		/* check eth is enabled for this port */
+		if (!(port_type & 2))
+			mlx4_dbg(dev, "QUERY PORT: eth not supported by host");
+
+		MLX4_PUT(outbox->buf, port_type,
+			 QUERY_PORT_SUPPORTED_TYPE_OFFSET);
+	}
+
+	return err;
+}
+
+static int mlx4_QUERY_PORT(struct mlx4_dev *dev, void *ptr, u8 port)
+{
+	struct mlx4_cmd_mailbox *outbox = ptr;
+
+	return mlx4_cmd_box(dev, 0, outbox->dma, port, 0,
+			    MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
+			    MLX4_CMD_WRAPPED);
+}
+EXPORT_SYMBOL_GPL(mlx4_QUERY_PORT);
+
 int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
 {
 	struct mlx4_cmd_mailbox *mailbox;
@@ -519,7 +751,8 @@
 
 			if (++nent == MLX4_MAILBOX_SIZE / 16) {
 				err = mlx4_cmd(dev, mailbox->dma, nent, 0, op,
-						MLX4_CMD_TIME_CLASS_B);
+						MLX4_CMD_TIME_CLASS_B,
+						MLX4_CMD_NATIVE);
 				if (err)
 					goto out;
 				nent = 0;
@@ -528,7 +761,8 @@
 	}
 
 	if (nent)
-		err = mlx4_cmd(dev, mailbox->dma, nent, 0, op, MLX4_CMD_TIME_CLASS_B);
+		err = mlx4_cmd(dev, mailbox->dma, nent, 0, op,
+			       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
 	if (err)
 		goto out;
 
@@ -557,13 +791,15 @@
 
 int mlx4_UNMAP_FA(struct mlx4_dev *dev)
 {
-	return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_FA, MLX4_CMD_TIME_CLASS_B);
+	return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_FA,
+			MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
 }
 
 
 int mlx4_RUN_FW(struct mlx4_dev *dev)
 {
-	return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_RUN_FW, MLX4_CMD_TIME_CLASS_A);
+	return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_RUN_FW,
+			MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
 }
 
 int mlx4_QUERY_FW(struct mlx4_dev *dev)
@@ -579,6 +815,7 @@
 
 #define QUERY_FW_OUT_SIZE             0x100
 #define QUERY_FW_VER_OFFSET            0x00
+#define QUERY_FW_PPF_ID		       0x09
 #define QUERY_FW_CMD_IF_REV_OFFSET     0x0a
 #define QUERY_FW_MAX_CMD_OFFSET        0x0f
 #define QUERY_FW_ERR_START_OFFSET      0x30
@@ -589,13 +826,16 @@
 #define QUERY_FW_CLR_INT_BASE_OFFSET   0x20
 #define QUERY_FW_CLR_INT_BAR_OFFSET    0x28
 
+#define QUERY_FW_COMM_BASE_OFFSET      0x40
+#define QUERY_FW_COMM_BAR_OFFSET       0x48
+
 	mailbox = mlx4_alloc_cmd_mailbox(dev);
 	if (IS_ERR(mailbox))
 		return PTR_ERR(mailbox);
 	outbox = mailbox->buf;
 
 	err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FW,
-			    MLX4_CMD_TIME_CLASS_A);
+			    MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
 	if (err)
 		goto out;
 
@@ -608,6 +848,9 @@
 		((fw_ver & 0xffff0000ull) >> 16) |
 		((fw_ver & 0x0000ffffull) << 16);
 
+	MLX4_GET(lg, outbox, QUERY_FW_PPF_ID);
+	dev->caps.function = lg;
+
 	MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET);
 	if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV ||
 	    cmd_if_rev > MLX4_COMMAND_INTERFACE_MAX_REV) {
@@ -649,6 +892,11 @@
 	MLX4_GET(fw->clr_int_bar,  outbox, QUERY_FW_CLR_INT_BAR_OFFSET);
 	fw->clr_int_bar = (fw->clr_int_bar >> 6) * 2;
 
+	MLX4_GET(fw->comm_base, outbox, QUERY_FW_COMM_BASE_OFFSET);
+	MLX4_GET(fw->comm_bar,  outbox, QUERY_FW_COMM_BAR_OFFSET);
+	fw->comm_bar = (fw->comm_bar >> 6) * 2;
+	mlx4_dbg(dev, "Communication vector bar:%d offset:0x%llx\n",
+		 fw->comm_bar, fw->comm_base);
 	mlx4_dbg(dev, "FW size %d KB\n", fw->fw_pages >> 2);
 
 	/*
@@ -711,7 +959,7 @@
 	outbox = mailbox->buf;
 
 	err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_ADAPTER,
-			   MLX4_CMD_TIME_CLASS_A);
+			   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
 	if (err)
 		goto out;
 
@@ -743,6 +991,7 @@
 #define	 INIT_HCA_LOG_SRQ_OFFSET	 (INIT_HCA_QPC_OFFSET + 0x2f)
 #define	 INIT_HCA_CQC_BASE_OFFSET	 (INIT_HCA_QPC_OFFSET + 0x30)
 #define	 INIT_HCA_LOG_CQ_OFFSET		 (INIT_HCA_QPC_OFFSET + 0x37)
+#define	 INIT_HCA_EQE_CQE_OFFSETS	 (INIT_HCA_QPC_OFFSET + 0x38)
 #define	 INIT_HCA_ALTC_BASE_OFFSET	 (INIT_HCA_QPC_OFFSET + 0x40)
 #define	 INIT_HCA_AUXC_BASE_OFFSET	 (INIT_HCA_QPC_OFFSET + 0x50)
 #define	 INIT_HCA_EQC_BASE_OFFSET	 (INIT_HCA_QPC_OFFSET + 0x60)
@@ -831,10 +1080,11 @@
 
 	/* UAR attributes */
 
-	MLX4_PUT(inbox, (u8) (PAGE_SHIFT - 12), INIT_HCA_UAR_PAGE_SZ_OFFSET);
+	MLX4_PUT(inbox, param->uar_page_sz,	INIT_HCA_UAR_PAGE_SZ_OFFSET);
 	MLX4_PUT(inbox, param->log_uar_sz,      INIT_HCA_LOG_UAR_SZ_OFFSET);
 
-	err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 10000);
+	err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 10000,
+		       MLX4_CMD_NATIVE);
 
 	if (err)
 		mlx4_err(dev, "INIT_HCA returns %d\n", err);
@@ -843,6 +1093,101 @@
 	return err;
 }
 
+int mlx4_QUERY_HCA(struct mlx4_dev *dev,
+		   struct mlx4_init_hca_param *param)
+{
+	struct mlx4_cmd_mailbox *mailbox;
+	__be32 *outbox;
+	int err;
+
+#define QUERY_HCA_GLOBAL_CAPS_OFFSET	0x04
+
+	mailbox = mlx4_alloc_cmd_mailbox(dev);
+	if (IS_ERR(mailbox))
+		return PTR_ERR(mailbox);
+	outbox = mailbox->buf;
+
+	err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0,
+			   MLX4_CMD_QUERY_HCA,
+			   MLX4_CMD_TIME_CLASS_B,
+			   !mlx4_is_slave(dev));
+	if (err)
+		goto out;
+
+	MLX4_GET(param->global_caps, outbox, QUERY_HCA_GLOBAL_CAPS_OFFSET);
+
+	/* QPC/EEC/CQC/EQC/RDMARC attributes */
+
+	MLX4_GET(param->qpc_base,      outbox, INIT_HCA_QPC_BASE_OFFSET);
+	MLX4_GET(param->log_num_qps,   outbox, INIT_HCA_LOG_QP_OFFSET);
+	MLX4_GET(param->srqc_base,     outbox, INIT_HCA_SRQC_BASE_OFFSET);
+	MLX4_GET(param->log_num_srqs,  outbox, INIT_HCA_LOG_SRQ_OFFSET);
+	MLX4_GET(param->cqc_base,      outbox, INIT_HCA_CQC_BASE_OFFSET);
+	MLX4_GET(param->log_num_cqs,   outbox, INIT_HCA_LOG_CQ_OFFSET);
+	MLX4_GET(param->altc_base,     outbox, INIT_HCA_ALTC_BASE_OFFSET);
+	MLX4_GET(param->auxc_base,     outbox, INIT_HCA_AUXC_BASE_OFFSET);
+	MLX4_GET(param->eqc_base,      outbox, INIT_HCA_EQC_BASE_OFFSET);
+	MLX4_GET(param->log_num_eqs,   outbox, INIT_HCA_LOG_EQ_OFFSET);
+	MLX4_GET(param->rdmarc_base,   outbox, INIT_HCA_RDMARC_BASE_OFFSET);
+	MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET);
+
+	/* multicast attributes */
+
+	MLX4_GET(param->mc_base,         outbox, INIT_HCA_MC_BASE_OFFSET);
+	MLX4_GET(param->log_mc_entry_sz, outbox,
+		 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
+	MLX4_GET(param->log_mc_hash_sz,  outbox,
+		 INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
+	MLX4_GET(param->log_mc_table_sz, outbox,
+		 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
+
+	/* TPT attributes */
+
+	MLX4_GET(param->dmpt_base,  outbox, INIT_HCA_DMPT_BASE_OFFSET);
+	MLX4_GET(param->log_mpt_sz, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
+	MLX4_GET(param->mtt_base,   outbox, INIT_HCA_MTT_BASE_OFFSET);
+	MLX4_GET(param->cmpt_base,  outbox, INIT_HCA_CMPT_BASE_OFFSET);
+
+	/* UAR attributes */
+
+	MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET);
+	MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
+
+out:
+	mlx4_free_cmd_mailbox(dev, mailbox);
+
+	return err;
+}
+
+int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave,
+			   struct mlx4_vhcr *vhcr,
+			   struct mlx4_cmd_mailbox *inbox,
+			   struct mlx4_cmd_mailbox *outbox,
+			   struct mlx4_cmd_info *cmd)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	int port = vhcr->in_modifier;
+	int err;
+
+	if (priv->mfunc.master.slave_state[slave].init_port_mask & (1 << port))
+		return 0;
+
+	if (dev->caps.port_mask[port] == MLX4_PORT_TYPE_IB)
+		return -ENODEV;
+
+	/* Enable port only if it was previously disabled */
+	if (!priv->mfunc.master.init_port_ref[port]) {
+		err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
+			       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
+		if (err)
+			return err;
+		priv->mfunc.master.slave_state[slave].init_port_mask |=
+			(1 << port);
+	}
+	++priv->mfunc.master.init_port_ref[port];
+	return 0;
+}
+
 int mlx4_INIT_PORT(struct mlx4_dev *dev, int port)
 {
 	struct mlx4_cmd_mailbox *mailbox;
@@ -886,33 +1231,62 @@
 		MLX4_PUT(inbox, field, INIT_PORT_MAX_PKEY_OFFSET);
 
 		err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_INIT_PORT,
-			       MLX4_CMD_TIME_CLASS_A);
+			       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
 
 		mlx4_free_cmd_mailbox(dev, mailbox);
 	} else
 		err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
-			       MLX4_CMD_TIME_CLASS_A);
+			       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
 
 	return err;
 }
 EXPORT_SYMBOL_GPL(mlx4_INIT_PORT);
 
+int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
+			    struct mlx4_vhcr *vhcr,
+			    struct mlx4_cmd_mailbox *inbox,
+			    struct mlx4_cmd_mailbox *outbox,
+			    struct mlx4_cmd_info *cmd)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	int port = vhcr->in_modifier;
+	int err;
+
+	if (!(priv->mfunc.master.slave_state[slave].init_port_mask &
+	    (1 << port)))
+		return 0;
+
+	if (dev->caps.port_mask[port] == MLX4_PORT_TYPE_IB)
+		return -ENODEV;
+	if (priv->mfunc.master.init_port_ref[port] == 1) {
+		err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000,
+			       MLX4_CMD_NATIVE);
+		if (err)
+			return err;
+	}
+	priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
+	--priv->mfunc.master.init_port_ref[port];
+	return 0;
+}
+
 int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port)
 {
-	return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000);
+	return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000,
+			MLX4_CMD_WRAPPED);
 }
 EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT);
 
 int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic)
 {
-	return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, 1000);
+	return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, 1000,
+			MLX4_CMD_NATIVE);
 }
 
 int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages)
 {
 	int ret = mlx4_cmd_imm(dev, icm_size, aux_pages, 0, 0,
 			       MLX4_CMD_SET_ICM_SIZE,
-			       MLX4_CMD_TIME_CLASS_A);
+			       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
 	if (ret)
 		return ret;
 
@@ -929,7 +1303,7 @@
 int mlx4_NOP(struct mlx4_dev *dev)
 {
 	/* Input modifier of 0x1f means "finish as soon as possible." */
-	return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100);
+	return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100, MLX4_CMD_NATIVE);
 }
 
 #define MLX4_WOL_SETUP_MODE (5 << 28)
@@ -938,7 +1312,8 @@
 	u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
 
 	return mlx4_cmd_imm(dev, 0, config, in_mod, 0x3,
-			    MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A);
+			    MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A,
+			    MLX4_CMD_NATIVE);
 }
 EXPORT_SYMBOL_GPL(mlx4_wol_read);
 
@@ -947,6 +1322,6 @@
 	u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
 
 	return mlx4_cmd(dev, config, in_mod, 0x1, MLX4_CMD_MOD_STAT_CFG,
-					MLX4_CMD_TIME_CLASS_A);
+			MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
 }
 EXPORT_SYMBOL_GPL(mlx4_wol_write);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index bf5ec22..119e0cc 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -111,11 +111,30 @@
 	u64 max_icm_sz;
 	int max_gso_sz;
 	u8  supported_port_types[MLX4_MAX_PORTS + 1];
+	u8  suggested_type[MLX4_MAX_PORTS + 1];
+	u8  default_sense[MLX4_MAX_PORTS + 1];
 	u8  log_max_macs[MLX4_MAX_PORTS + 1];
 	u8  log_max_vlans[MLX4_MAX_PORTS + 1];
 	u32 max_counters;
 };
 
+struct mlx4_func_cap {
+	u8	function;
+	u8	num_ports;
+	u8	flags;
+	u32	pf_context_behaviour;
+	int	qp_quota;
+	int	cq_quota;
+	int	srq_quota;
+	int	mpt_quota;
+	int	mtt_quota;
+	int	max_eq;
+	int	reserved_eq;
+	int	mcg_quota;
+	u8	physical_port[MLX4_MAX_PORTS + 1];
+	u8	port_flags[MLX4_MAX_PORTS + 1];
+};
+
 struct mlx4_adapter {
 	char board_id[MLX4_BOARD_ID_LEN];
 	u8   inta_pin;
@@ -133,6 +152,7 @@
 	u64 dmpt_base;
 	u64 cmpt_base;
 	u64 mtt_base;
+	u64 global_caps;
 	u16 log_mc_entry_sz;
 	u16 log_mc_hash_sz;
 	u8  log_num_qps;
@@ -143,6 +163,7 @@
 	u8  log_mc_table_sz;
 	u8  log_mpt_sz;
 	u8  log_uar_sz;
+	u8  uar_page_sz; /* log pg sz in 4k chunks */
 };
 
 struct mlx4_init_ib_param {
@@ -167,12 +188,19 @@
 };
 
 int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap);
+int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, struct mlx4_func_cap *func_cap);
+int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
+				struct mlx4_vhcr *vhcr,
+				struct mlx4_cmd_mailbox *inbox,
+				struct mlx4_cmd_mailbox *outbox,
+				struct mlx4_cmd_info *cmd);
 int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm);
 int mlx4_UNMAP_FA(struct mlx4_dev *dev);
 int mlx4_RUN_FW(struct mlx4_dev *dev);
 int mlx4_QUERY_FW(struct mlx4_dev *dev);
 int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter);
 int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param);
+int mlx4_QUERY_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param);
 int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic);
 int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt);
 int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages);
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c
index 02393fd..a9ade1c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.c
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.c
@@ -213,7 +213,7 @@
 static int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count)
 {
 	return mlx4_cmd(dev, virt, page_count, 0, MLX4_CMD_UNMAP_ICM,
-			MLX4_CMD_TIME_CLASS_B);
+			MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
 }
 
 int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm)
@@ -223,7 +223,8 @@
 
 int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev)
 {
-	return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_ICM_AUX, MLX4_CMD_TIME_CLASS_B);
+	return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_ICM_AUX,
+			MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
 }
 
 int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj)
diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c
index ca6feb5..b4e9f6f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/intf.c
+++ b/drivers/net/ethernet/mellanox/mlx4/intf.c
@@ -142,7 +142,8 @@
 		mlx4_add_device(intf, priv);
 
 	mutex_unlock(&intf_mutex);
-	mlx4_start_catas_poll(dev);
+	if (!mlx4_is_slave(dev))
+		mlx4_start_catas_poll(dev);
 
 	return 0;
 }
@@ -152,7 +153,8 @@
 	struct mlx4_priv *priv = mlx4_priv(dev);
 	struct mlx4_interface *intf;
 
-	mlx4_stop_catas_poll(dev);
+	if (!mlx4_is_slave(dev))
+		mlx4_stop_catas_poll(dev);
 	mutex_lock(&intf_mutex);
 
 	list_for_each_entry(intf, &intf_list, list)
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 94bbc85..6bb62c5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -40,6 +40,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/slab.h>
 #include <linux/io-mapping.h>
+#include <linux/delay.h>
 
 #include <linux/mlx4/device.h>
 #include <linux/mlx4/doorbell.h>
@@ -75,21 +76,42 @@
 
 #endif /* CONFIG_PCI_MSI */
 
+static int num_vfs;
+module_param(num_vfs, int, 0444);
+MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0");
+
+static int probe_vf;
+module_param(probe_vf, int, 0644);
+MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)");
+
+int mlx4_log_num_mgm_entry_size = 10;
+module_param_named(log_num_mgm_entry_size,
+			mlx4_log_num_mgm_entry_size, int, 0444);
+MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num"
+					 " of qp per mcg, for example:"
+					 " 10 gives 248.range: 9<="
+					 " log_num_mgm_entry_size <= 12");
+
+#define MLX4_VF                                        (1 << 0)
+
+#define HCA_GLOBAL_CAP_MASK            0
+#define PF_CONTEXT_BEHAVIOUR_MASK      0
+
 static char mlx4_version[] __devinitdata =
 	DRV_NAME ": Mellanox ConnectX core driver v"
 	DRV_VERSION " (" DRV_RELDATE ")\n";
 
 static struct mlx4_profile default_profile = {
-	.num_qp		= 1 << 17,
+	.num_qp		= 1 << 18,
 	.num_srq	= 1 << 16,
 	.rdmarc_per_qp	= 1 << 4,
 	.num_cq		= 1 << 16,
 	.num_mcg	= 1 << 13,
-	.num_mpt	= 1 << 17,
+	.num_mpt	= 1 << 19,
 	.num_mtt	= 1 << 20,
 };
 
-static int log_num_mac = 2;
+static int log_num_mac = 7;
 module_param_named(log_num_mac, log_num_mac, int, 0444);
 MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)");
 
@@ -99,15 +121,33 @@
 /* Log2 max number of VLANs per ETH port (0-7) */
 #define MLX4_LOG_NUM_VLANS 7
 
-static int use_prio;
+static bool use_prio;
 module_param_named(use_prio, use_prio, bool, 0444);
 MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports "
 		  "(0/1, default 0)");
 
-static int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
+int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
 module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
 MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)");
 
+static int port_type_array[2] = {MLX4_PORT_TYPE_NONE, MLX4_PORT_TYPE_NONE};
+static int arr_argc = 2;
+module_param_array(port_type_array, int, &arr_argc, 0444);
+MODULE_PARM_DESC(port_type_array, "Array of port types: HW_DEFAULT (0) is default "
+				"1 for IB, 2 for Ethernet");
+
+struct mlx4_port_config {
+	struct list_head list;
+	enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1];
+	struct pci_dev *pdev;
+};
+
+static inline int mlx4_master_get_num_eqs(struct mlx4_dev *dev)
+{
+	return dev->caps.reserved_eqs +
+		MLX4_MFUNC_EQ_NUM * (dev->num_slaves + 1);
+}
+
 int mlx4_check_port_params(struct mlx4_dev *dev,
 			   enum mlx4_port_type *port_type)
 {
@@ -140,10 +180,8 @@
 {
 	int i;
 
-	dev->caps.port_mask = 0;
 	for (i = 1; i <= dev->caps.num_ports; ++i)
-		if (dev->caps.port_type[i] == MLX4_PORT_TYPE_IB)
-			dev->caps.port_mask |= 1 << (i - 1);
+		dev->caps.port_mask[i] = dev->caps.port_type[i];
 }
 
 static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
@@ -188,12 +226,15 @@
 		dev->caps.eth_mtu_cap[i]    = dev_cap->eth_mtu[i];
 		dev->caps.def_mac[i]        = dev_cap->def_mac[i];
 		dev->caps.supported_type[i] = dev_cap->supported_port_types[i];
+		dev->caps.suggested_type[i] = dev_cap->suggested_type[i];
+		dev->caps.default_sense[i] = dev_cap->default_sense[i];
 		dev->caps.trans_type[i]	    = dev_cap->trans_type[i];
 		dev->caps.vendor_oui[i]     = dev_cap->vendor_oui[i];
 		dev->caps.wavelength[i]     = dev_cap->wavelength[i];
 		dev->caps.trans_code[i]     = dev_cap->trans_code[i];
 	}
 
+	dev->caps.uar_page_size	     = PAGE_SIZE;
 	dev->caps.num_uars	     = dev_cap->uar_size / PAGE_SIZE;
 	dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay;
 	dev->caps.bf_reg_size	     = dev_cap->bf_reg_size;
@@ -207,7 +248,7 @@
 	dev->caps.reserved_srqs	     = dev_cap->reserved_srqs;
 	dev->caps.max_sq_desc_sz     = dev_cap->max_sq_desc_sz;
 	dev->caps.max_rq_desc_sz     = dev_cap->max_rq_desc_sz;
-	dev->caps.num_qp_per_mgm     = MLX4_QP_PER_MGM;
+	dev->caps.num_qp_per_mgm     = mlx4_get_qp_per_mgm(dev);
 	/*
 	 * Subtract 1 from the limit because we need to allocate a
 	 * spare CQE so the HCA HW can tell the difference between an
@@ -216,17 +257,18 @@
 	dev->caps.max_cqes	     = dev_cap->max_cq_sz - 1;
 	dev->caps.reserved_cqs	     = dev_cap->reserved_cqs;
 	dev->caps.reserved_eqs	     = dev_cap->reserved_eqs;
-	dev->caps.mtts_per_seg	     = 1 << log_mtts_per_seg;
-	dev->caps.reserved_mtts	     = DIV_ROUND_UP(dev_cap->reserved_mtts,
-						    dev->caps.mtts_per_seg);
+	dev->caps.reserved_mtts      = dev_cap->reserved_mtts;
 	dev->caps.reserved_mrws	     = dev_cap->reserved_mrws;
-	dev->caps.reserved_uars	     = dev_cap->reserved_uars;
+
+	/* The first 128 UARs are used for EQ doorbells */
+	dev->caps.reserved_uars	     = max_t(int, 128, dev_cap->reserved_uars);
 	dev->caps.reserved_pds	     = dev_cap->reserved_pds;
 	dev->caps.reserved_xrcds     = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
 					dev_cap->reserved_xrcds : 0;
 	dev->caps.max_xrcds          = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
 					dev_cap->max_xrcds : 0;
-	dev->caps.mtt_entry_sz	     = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz;
+	dev->caps.mtt_entry_sz       = dev_cap->mtt_entry_sz;
+
 	dev->caps.max_msg_sz         = dev_cap->max_msg_sz;
 	dev->caps.page_size_cap	     = ~(u32) (dev_cap->min_page_sz - 1);
 	dev->caps.flags		     = dev_cap->flags;
@@ -235,18 +277,70 @@
 	dev->caps.stat_rate_support  = dev_cap->stat_rate_support;
 	dev->caps.max_gso_sz	     = dev_cap->max_gso_sz;
 
+	/* Sense port always allowed on supported devices for ConnectX1 and 2 */
+	if (dev->pdev->device != 0x1003)
+		dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
+
 	dev->caps.log_num_macs  = log_num_mac;
 	dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS;
 	dev->caps.log_num_prios = use_prio ? 3 : 0;
 
 	for (i = 1; i <= dev->caps.num_ports; ++i) {
-		if (dev->caps.supported_type[i] != MLX4_PORT_TYPE_ETH)
-			dev->caps.port_type[i] = MLX4_PORT_TYPE_IB;
-		else
-			dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
-		dev->caps.possible_type[i] = dev->caps.port_type[i];
+		dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE;
+		if (dev->caps.supported_type[i]) {
+			/* if only ETH is supported - assign ETH */
+			if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_ETH)
+				dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
+			/* if only IB is supported,
+			 * assign IB only if SRIOV is off*/
+			else if (dev->caps.supported_type[i] ==
+				 MLX4_PORT_TYPE_IB) {
+				if (dev->flags & MLX4_FLAG_SRIOV)
+					dev->caps.port_type[i] =
+						MLX4_PORT_TYPE_NONE;
+				else
+					dev->caps.port_type[i] =
+						MLX4_PORT_TYPE_IB;
+			/* if IB and ETH are supported,
+			 * first of all check if SRIOV is on */
+			} else if (dev->flags & MLX4_FLAG_SRIOV)
+				dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
+			else {
+				/* In non-SRIOV mode, we set the port type
+				 * according to user selection of port type,
+				 * if usere selected none, take the FW hint */
+				if (port_type_array[i-1] == MLX4_PORT_TYPE_NONE)
+					dev->caps.port_type[i] = dev->caps.suggested_type[i] ?
+						MLX4_PORT_TYPE_ETH : MLX4_PORT_TYPE_IB;
+				else
+					dev->caps.port_type[i] = port_type_array[i-1];
+			}
+		}
+		/*
+		 * Link sensing is allowed on the port if 3 conditions are true:
+		 * 1. Both protocols are supported on the port.
+		 * 2. Different types are supported on the port
+		 * 3. FW declared that it supports link sensing
+		 */
 		mlx4_priv(dev)->sense.sense_allowed[i] =
-			dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO;
+			((dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO) &&
+			 (dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) &&
+			 (dev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT));
+
+		/*
+		 * If "default_sense" bit is set, we move the port to "AUTO" mode
+		 * and perform sense_port FW command to try and set the correct
+		 * port type from beginning
+		 */
+		if (mlx4_priv(dev)->sense.sense_allowed[i] && dev->caps.default_sense[i]) {
+			enum mlx4_port_type sensed_port = MLX4_PORT_TYPE_NONE;
+			dev->caps.possible_type[i] = MLX4_PORT_TYPE_AUTO;
+			mlx4_SENSE_PORT(dev, i, &sensed_port);
+			if (sensed_port != MLX4_PORT_TYPE_NONE)
+				dev->caps.port_type[i] = sensed_port;
+		} else {
+			dev->caps.possible_type[i] = dev->caps.port_type[i];
+		}
 
 		if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) {
 			dev->caps.log_num_macs = dev_cap->log_max_macs[i];
@@ -262,8 +356,6 @@
 		}
 	}
 
-	mlx4_set_port_mask(dev);
-
 	dev->caps.max_counters = 1 << ilog2(dev_cap->max_counters);
 
 	dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
@@ -282,6 +374,149 @@
 
 	return 0;
 }
+/*The function checks if there are live vf, return the num of them*/
+static int mlx4_how_many_lives_vf(struct mlx4_dev *dev)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct mlx4_slave_state *s_state;
+	int i;
+	int ret = 0;
+
+	for (i = 1/*the ppf is 0*/; i < dev->num_slaves; ++i) {
+		s_state = &priv->mfunc.master.slave_state[i];
+		if (s_state->active && s_state->last_cmd !=
+		    MLX4_COMM_CMD_RESET) {
+			mlx4_warn(dev, "%s: slave: %d is still active\n",
+				  __func__, i);
+			ret++;
+		}
+	}
+	return ret;
+}
+
+static int mlx4_is_slave_active(struct mlx4_dev *dev, int slave)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct mlx4_slave_state *s_slave;
+
+	if (!mlx4_is_master(dev))
+		return 0;
+
+	s_slave = &priv->mfunc.master.slave_state[slave];
+	return !!s_slave->active;
+}
+EXPORT_SYMBOL(mlx4_is_slave_active);
+
+static int mlx4_slave_cap(struct mlx4_dev *dev)
+{
+	int			   err;
+	u32			   page_size;
+	struct mlx4_dev_cap	   dev_cap;
+	struct mlx4_func_cap	   func_cap;
+	struct mlx4_init_hca_param hca_param;
+	int			   i;
+
+	memset(&hca_param, 0, sizeof(hca_param));
+	err = mlx4_QUERY_HCA(dev, &hca_param);
+	if (err) {
+		mlx4_err(dev, "QUERY_HCA command failed, aborting.\n");
+		return err;
+	}
+
+	/*fail if the hca has an unknown capability */
+	if ((hca_param.global_caps | HCA_GLOBAL_CAP_MASK) !=
+	    HCA_GLOBAL_CAP_MASK) {
+		mlx4_err(dev, "Unknown hca global capabilities\n");
+		return -ENOSYS;
+	}
+
+	mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz;
+
+	memset(&dev_cap, 0, sizeof(dev_cap));
+	err = mlx4_dev_cap(dev, &dev_cap);
+	if (err) {
+		mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
+		return err;
+	}
+
+	page_size = ~dev->caps.page_size_cap + 1;
+	mlx4_warn(dev, "HCA minimum page size:%d\n", page_size);
+	if (page_size > PAGE_SIZE) {
+		mlx4_err(dev, "HCA minimum page size of %d bigger than "
+			 "kernel PAGE_SIZE of %ld, aborting.\n",
+			 page_size, PAGE_SIZE);
+		return -ENODEV;
+	}
+
+	/* slave gets uar page size from QUERY_HCA fw command */
+	dev->caps.uar_page_size = 1 << (hca_param.uar_page_sz + 12);
+
+	/* TODO: relax this assumption */
+	if (dev->caps.uar_page_size != PAGE_SIZE) {
+		mlx4_err(dev, "UAR size:%d != kernel PAGE_SIZE of %ld\n",
+			 dev->caps.uar_page_size, PAGE_SIZE);
+		return -ENODEV;
+	}
+
+	memset(&func_cap, 0, sizeof(func_cap));
+	err = mlx4_QUERY_FUNC_CAP(dev, &func_cap);
+	if (err) {
+		mlx4_err(dev, "QUERY_FUNC_CAP command failed, aborting.\n");
+		return err;
+	}
+
+	if ((func_cap.pf_context_behaviour | PF_CONTEXT_BEHAVIOUR_MASK) !=
+	    PF_CONTEXT_BEHAVIOUR_MASK) {
+		mlx4_err(dev, "Unknown pf context behaviour\n");
+		return -ENOSYS;
+	}
+
+	dev->caps.function		= func_cap.function;
+	dev->caps.num_ports		= func_cap.num_ports;
+	dev->caps.num_qps		= func_cap.qp_quota;
+	dev->caps.num_srqs		= func_cap.srq_quota;
+	dev->caps.num_cqs		= func_cap.cq_quota;
+	dev->caps.num_eqs               = func_cap.max_eq;
+	dev->caps.reserved_eqs          = func_cap.reserved_eq;
+	dev->caps.num_mpts		= func_cap.mpt_quota;
+	dev->caps.num_mtts		= func_cap.mtt_quota;
+	dev->caps.num_pds               = MLX4_NUM_PDS;
+	dev->caps.num_mgms              = 0;
+	dev->caps.num_amgms             = 0;
+
+	for (i = 1; i <= dev->caps.num_ports; ++i)
+		dev->caps.port_mask[i] = dev->caps.port_type[i];
+
+	if (dev->caps.num_ports > MLX4_MAX_PORTS) {
+		mlx4_err(dev, "HCA has %d ports, but we only support %d, "
+			 "aborting.\n", dev->caps.num_ports, MLX4_MAX_PORTS);
+		return -ENODEV;
+	}
+
+	if (dev->caps.uar_page_size * (dev->caps.num_uars -
+				       dev->caps.reserved_uars) >
+				       pci_resource_len(dev->pdev, 2)) {
+		mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than "
+			 "PCI resource 2 size of 0x%llx, aborting.\n",
+			 dev->caps.uar_page_size * dev->caps.num_uars,
+			 (unsigned long long) pci_resource_len(dev->pdev, 2));
+		return -ENODEV;
+	}
+
+#if 0
+	mlx4_warn(dev, "sqp_demux:%d\n", dev->caps.sqp_demux);
+	mlx4_warn(dev, "num_uars:%d reserved_uars:%d uar region:0x%x bar2:0x%llx\n",
+		  dev->caps.num_uars, dev->caps.reserved_uars,
+		  dev->caps.uar_page_size * dev->caps.num_uars,
+		  pci_resource_len(dev->pdev, 2));
+	mlx4_warn(dev, "num_eqs:%d reserved_eqs:%d\n", dev->caps.num_eqs,
+		  dev->caps.reserved_eqs);
+	mlx4_warn(dev, "num_pds:%d reserved_pds:%d slave_pd_shift:%d pd_base:%d\n",
+		  dev->caps.num_pds, dev->caps.reserved_pds,
+		  dev->caps.slave_pd_shift, dev->caps.pd_base);
+#endif
+	return 0;
+}
 
 /*
  * Change the port configuration of the device.
@@ -377,7 +612,8 @@
 			types[i] = mdev->caps.port_type[i+1];
 	}
 
-	if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
+	if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) &&
+	    !(mdev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)) {
 		for (i = 1; i <= mdev->caps.num_ports; i++) {
 			if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) {
 				mdev->caps.possible_type[i] = mdev->caps.port_type[i];
@@ -451,6 +687,7 @@
 {
 	struct mlx4_priv *priv = mlx4_priv(dev);
 	int err;
+	int num_eqs;
 
 	err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table,
 				  cmpt_base +
@@ -480,12 +717,14 @@
 	if (err)
 		goto err_srq;
 
+	num_eqs = (mlx4_is_master(dev)) ?
+		roundup_pow_of_two(mlx4_master_get_num_eqs(dev)) :
+		dev->caps.num_eqs;
 	err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table,
 				  cmpt_base +
 				  ((u64) (MLX4_CMPT_TYPE_EQ *
 					  cmpt_entry_sz) << MLX4_CMPT_SHIFT),
-				  cmpt_entry_sz,
-				  dev->caps.num_eqs, dev->caps.num_eqs, 0, 0);
+				  cmpt_entry_sz, num_eqs, num_eqs, 0, 0);
 	if (err)
 		goto err_cq;
 
@@ -509,6 +748,7 @@
 {
 	struct mlx4_priv *priv = mlx4_priv(dev);
 	u64 aux_pages;
+	int num_eqs;
 	int err;
 
 	err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages);
@@ -540,10 +780,13 @@
 		goto err_unmap_aux;
 	}
 
+
+	num_eqs = (mlx4_is_master(dev)) ?
+		roundup_pow_of_two(mlx4_master_get_num_eqs(dev)) :
+		dev->caps.num_eqs;
 	err = mlx4_init_icm_table(dev, &priv->eq_table.table,
 				  init_hca->eqc_base, dev_cap->eqc_entry_sz,
-				  dev->caps.num_eqs, dev->caps.num_eqs,
-				  0, 0);
+				  num_eqs, num_eqs, 0, 0);
 	if (err) {
 		mlx4_err(dev, "Failed to map EQ context memory, aborting.\n");
 		goto err_unmap_cmpt;
@@ -563,7 +806,7 @@
 	err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table,
 				  init_hca->mtt_base,
 				  dev->caps.mtt_entry_sz,
-				  dev->caps.num_mtt_segs,
+				  dev->caps.num_mtts,
 				  dev->caps.reserved_mtts, 1, 0);
 	if (err) {
 		mlx4_err(dev, "Failed to map MTT context memory, aborting.\n");
@@ -650,7 +893,8 @@
 	 * and it's a lot easier than trying to track ref counts.
 	 */
 	err = mlx4_init_icm_table(dev, &priv->mcg_table.table,
-				  init_hca->mc_base, MLX4_MGM_ENTRY_SIZE,
+				  init_hca->mc_base,
+				  mlx4_get_mgm_entry_size(dev),
 				  dev->caps.num_mgms + dev->caps.num_amgms,
 				  dev->caps.num_mgms + dev->caps.num_amgms,
 				  0, 0);
@@ -726,6 +970,16 @@
 	mlx4_free_icm(dev, priv->fw.aux_icm, 0);
 }
 
+static void mlx4_slave_exit(struct mlx4_dev *dev)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+
+	down(&priv->cmd.slave_sem);
+	if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_TIME))
+		mlx4_warn(dev, "Failed to close slave function.\n");
+	up(&priv->cmd.slave_sem);
+}
+
 static int map_bf_area(struct mlx4_dev *dev)
 {
 	struct mlx4_priv *priv = mlx4_priv(dev);
@@ -733,8 +987,10 @@
 	resource_size_t bf_len;
 	int err = 0;
 
-	bf_start = pci_resource_start(dev->pdev, 2) + (dev->caps.num_uars << PAGE_SHIFT);
-	bf_len = pci_resource_len(dev->pdev, 2) - (dev->caps.num_uars << PAGE_SHIFT);
+	bf_start = pci_resource_start(dev->pdev, 2) +
+			(dev->caps.num_uars << PAGE_SHIFT);
+	bf_len = pci_resource_len(dev->pdev, 2) -
+			(dev->caps.num_uars << PAGE_SHIFT);
 	priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len);
 	if (!priv->bf_mapping)
 		err = -ENOMEM;
@@ -751,10 +1007,81 @@
 static void mlx4_close_hca(struct mlx4_dev *dev)
 {
 	unmap_bf_area(dev);
-	mlx4_CLOSE_HCA(dev, 0);
-	mlx4_free_icms(dev);
-	mlx4_UNMAP_FA(dev);
-	mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0);
+	if (mlx4_is_slave(dev))
+		mlx4_slave_exit(dev);
+	else {
+		mlx4_CLOSE_HCA(dev, 0);
+		mlx4_free_icms(dev);
+		mlx4_UNMAP_FA(dev);
+		mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0);
+	}
+}
+
+static int mlx4_init_slave(struct mlx4_dev *dev)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	u64 dma = (u64) priv->mfunc.vhcr_dma;
+	int num_of_reset_retries = NUM_OF_RESET_RETRIES;
+	int ret_from_reset = 0;
+	u32 slave_read;
+	u32 cmd_channel_ver;
+
+	down(&priv->cmd.slave_sem);
+	priv->cmd.max_cmds = 1;
+	mlx4_warn(dev, "Sending reset\n");
+	ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0,
+				       MLX4_COMM_TIME);
+	/* if we are in the middle of flr the slave will try
+	 * NUM_OF_RESET_RETRIES times before leaving.*/
+	if (ret_from_reset) {
+		if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) {
+			msleep(SLEEP_TIME_IN_RESET);
+			while (ret_from_reset && num_of_reset_retries) {
+				mlx4_warn(dev, "slave is currently in the"
+					  "middle of FLR. retrying..."
+					  "(try num:%d)\n",
+					  (NUM_OF_RESET_RETRIES -
+					   num_of_reset_retries  + 1));
+				ret_from_reset =
+					mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET,
+						      0, MLX4_COMM_TIME);
+				num_of_reset_retries = num_of_reset_retries - 1;
+			}
+		} else
+			goto err;
+	}
+
+	/* check the driver version - the slave I/F revision
+	 * must match the master's */
+	slave_read = swab32(readl(&priv->mfunc.comm->slave_read));
+	cmd_channel_ver = mlx4_comm_get_version();
+
+	if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) !=
+		MLX4_COMM_GET_IF_REV(slave_read)) {
+		mlx4_err(dev, "slave driver version is not supported"
+			 " by the master\n");
+		goto err;
+	}
+
+	mlx4_warn(dev, "Sending vhcr0\n");
+	if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR0, dma >> 48,
+						    MLX4_COMM_TIME))
+		goto err;
+	if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR1, dma >> 32,
+						    MLX4_COMM_TIME))
+		goto err;
+	if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR2, dma >> 16,
+						    MLX4_COMM_TIME))
+		goto err;
+	if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma, MLX4_COMM_TIME))
+		goto err;
+	up(&priv->cmd.slave_sem);
+	return 0;
+
+err:
+	mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, 0);
+	up(&priv->cmd.slave_sem);
+	return -EIO;
 }
 
 static int mlx4_init_hca(struct mlx4_dev *dev)
@@ -768,55 +1095,75 @@
 	u64 icm_size;
 	int err;
 
-	err = mlx4_QUERY_FW(dev);
-	if (err) {
-		if (err == -EACCES)
-			mlx4_info(dev, "non-primary physical function, skipping.\n");
-		else
-			mlx4_err(dev, "QUERY_FW command failed, aborting.\n");
-		return err;
-	}
+	if (!mlx4_is_slave(dev)) {
+		err = mlx4_QUERY_FW(dev);
+		if (err) {
+			if (err == -EACCES)
+				mlx4_info(dev, "non-primary physical function, skipping.\n");
+			else
+				mlx4_err(dev, "QUERY_FW command failed, aborting.\n");
+			goto unmap_bf;
+		}
 
-	err = mlx4_load_fw(dev);
-	if (err) {
-		mlx4_err(dev, "Failed to start FW, aborting.\n");
-		return err;
-	}
+		err = mlx4_load_fw(dev);
+		if (err) {
+			mlx4_err(dev, "Failed to start FW, aborting.\n");
+			goto unmap_bf;
+		}
 
-	mlx4_cfg.log_pg_sz_m = 1;
-	mlx4_cfg.log_pg_sz = 0;
-	err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg);
-	if (err)
-		mlx4_warn(dev, "Failed to override log_pg_sz parameter\n");
+		mlx4_cfg.log_pg_sz_m = 1;
+		mlx4_cfg.log_pg_sz = 0;
+		err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg);
+		if (err)
+			mlx4_warn(dev, "Failed to override log_pg_sz parameter\n");
 
-	err = mlx4_dev_cap(dev, &dev_cap);
-	if (err) {
-		mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
-		goto err_stop_fw;
-	}
+		err = mlx4_dev_cap(dev, &dev_cap);
+		if (err) {
+			mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
+			goto err_stop_fw;
+		}
 
-	profile = default_profile;
+		profile = default_profile;
 
-	icm_size = mlx4_make_profile(dev, &profile, &dev_cap, &init_hca);
-	if ((long long) icm_size < 0) {
-		err = icm_size;
-		goto err_stop_fw;
+		icm_size = mlx4_make_profile(dev, &profile, &dev_cap,
+					     &init_hca);
+		if ((long long) icm_size < 0) {
+			err = icm_size;
+			goto err_stop_fw;
+		}
+
+		init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
+		init_hca.uar_page_sz = PAGE_SHIFT - 12;
+
+		err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
+		if (err)
+			goto err_stop_fw;
+
+		err = mlx4_INIT_HCA(dev, &init_hca);
+		if (err) {
+			mlx4_err(dev, "INIT_HCA command failed, aborting.\n");
+			goto err_free_icm;
+		}
+	} else {
+		err = mlx4_init_slave(dev);
+		if (err) {
+			mlx4_err(dev, "Failed to initialize slave\n");
+			goto unmap_bf;
+		}
+
+		err = mlx4_slave_cap(dev);
+		if (err) {
+			mlx4_err(dev, "Failed to obtain slave caps\n");
+			goto err_close;
+		}
 	}
 
 	if (map_bf_area(dev))
 		mlx4_dbg(dev, "Failed to map blue flame area\n");
 
-	init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
-
-	err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
-	if (err)
-		goto err_stop_fw;
-
-	err = mlx4_INIT_HCA(dev, &init_hca);
-	if (err) {
-		mlx4_err(dev, "INIT_HCA command failed, aborting.\n");
-		goto err_free_icm;
-	}
+	/*Only the master set the ports, all the rest got it from it.*/
+	if (!mlx4_is_slave(dev))
+		mlx4_set_port_mask(dev);
 
 	err = mlx4_QUERY_ADAPTER(dev, &adapter);
 	if (err) {
@@ -830,16 +1177,19 @@
 	return 0;
 
 err_close:
-	mlx4_CLOSE_HCA(dev, 0);
+	mlx4_close_hca(dev);
 
 err_free_icm:
-	mlx4_free_icms(dev);
+	if (!mlx4_is_slave(dev))
+		mlx4_free_icms(dev);
 
 err_stop_fw:
+	if (!mlx4_is_slave(dev)) {
+		mlx4_UNMAP_FA(dev);
+		mlx4_free_icm(dev, priv->fw.fw_icm, 0);
+	}
+unmap_bf:
 	unmap_bf_area(dev);
-	mlx4_UNMAP_FA(dev);
-	mlx4_free_icm(dev, priv->fw.fw_icm, 0);
-
 	return err;
 }
 
@@ -986,55 +1336,56 @@
 		goto err_srq_table_free;
 	}
 
-	err = mlx4_init_mcg_table(dev);
-	if (err) {
-		mlx4_err(dev, "Failed to initialize "
-			 "multicast group table, aborting.\n");
-		goto err_qp_table_free;
+	if (!mlx4_is_slave(dev)) {
+		err = mlx4_init_mcg_table(dev);
+		if (err) {
+			mlx4_err(dev, "Failed to initialize "
+				 "multicast group table, aborting.\n");
+			goto err_qp_table_free;
+		}
 	}
 
 	err = mlx4_init_counters_table(dev);
 	if (err && err != -ENOENT) {
 		mlx4_err(dev, "Failed to initialize counters table, aborting.\n");
-		goto err_counters_table_free;
+		goto err_mcg_table_free;
 	}
 
-	for (port = 1; port <= dev->caps.num_ports; port++) {
-		enum mlx4_port_type port_type = 0;
-		mlx4_SENSE_PORT(dev, port, &port_type);
-		if (port_type)
-			dev->caps.port_type[port] = port_type;
-		ib_port_default_caps = 0;
-		err = mlx4_get_port_ib_caps(dev, port, &ib_port_default_caps);
-		if (err)
-			mlx4_warn(dev, "failed to get port %d default "
-				  "ib capabilities (%d). Continuing with "
-				  "caps = 0\n", port, err);
-		dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
+	if (!mlx4_is_slave(dev)) {
+		for (port = 1; port <= dev->caps.num_ports; port++) {
+			ib_port_default_caps = 0;
+			err = mlx4_get_port_ib_caps(dev, port,
+						    &ib_port_default_caps);
+			if (err)
+				mlx4_warn(dev, "failed to get port %d default "
+					  "ib capabilities (%d). Continuing "
+					  "with caps = 0\n", port, err);
+			dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
 
-		err = mlx4_check_ext_port_caps(dev, port);
-		if (err)
-			mlx4_warn(dev, "failed to get port %d extended "
-				  "port capabilities support info (%d)."
-				  " Assuming not supported\n", port, err);
+			err = mlx4_check_ext_port_caps(dev, port);
+			if (err)
+				mlx4_warn(dev, "failed to get port %d extended "
+					  "port capabilities support info (%d)."
+					  " Assuming not supported\n",
+					  port, err);
 
-		err = mlx4_SET_PORT(dev, port);
-		if (err) {
-			mlx4_err(dev, "Failed to set port %d, aborting\n",
-				port);
-			goto err_mcg_table_free;
+			err = mlx4_SET_PORT(dev, port);
+			if (err) {
+				mlx4_err(dev, "Failed to set port %d, aborting\n",
+					port);
+				goto err_counters_table_free;
+			}
 		}
 	}
-	mlx4_set_port_mask(dev);
 
 	return 0;
 
-err_mcg_table_free:
-	mlx4_cleanup_mcg_table(dev);
-
 err_counters_table_free:
 	mlx4_cleanup_counters_table(dev);
 
+err_mcg_table_free:
+	mlx4_cleanup_mcg_table(dev);
+
 err_qp_table_free:
 	mlx4_cleanup_qp_table(dev);
 
@@ -1081,8 +1432,16 @@
 	int i;
 
 	if (msi_x) {
-		nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
-			     nreq);
+		/* In multifunction mode each function gets 2 msi-X vectors
+		 * one for data path completions anf the other for asynch events
+		 * or command completions */
+		if (mlx4_is_mfunc(dev)) {
+			nreq = 2;
+		} else {
+			nreq = min_t(int, dev->caps.num_eqs -
+				     dev->caps.reserved_eqs, nreq);
+		}
+
 		entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
 		if (!entries)
 			goto no_msi;
@@ -1138,16 +1497,24 @@
 
 	info->dev = dev;
 	info->port = port;
-	mlx4_init_mac_table(dev, &info->mac_table);
-	mlx4_init_vlan_table(dev, &info->vlan_table);
-	info->base_qpn = dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] +
+	if (!mlx4_is_slave(dev)) {
+		INIT_RADIX_TREE(&info->mac_tree, GFP_KERNEL);
+		mlx4_init_mac_table(dev, &info->mac_table);
+		mlx4_init_vlan_table(dev, &info->vlan_table);
+		info->base_qpn =
+			dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] +
 			(port - 1) * (1 << log_num_mac);
+	}
 
 	sprintf(info->dev_name, "mlx4_port%d", port);
 	info->port_attr.attr.name = info->dev_name;
-	info->port_attr.attr.mode = S_IRUGO | S_IWUSR;
+	if (mlx4_is_mfunc(dev))
+		info->port_attr.attr.mode = S_IRUGO;
+	else {
+		info->port_attr.attr.mode = S_IRUGO | S_IWUSR;
+		info->port_attr.store     = set_port_type;
+	}
 	info->port_attr.show      = show_port_type;
-	info->port_attr.store     = set_port_type;
 	sysfs_attr_init(&info->port_attr.attr);
 
 	err = device_create_file(&dev->pdev->dev, &info->port_attr);
@@ -1220,6 +1587,46 @@
 	kfree(priv->steer);
 }
 
+static int extended_func_num(struct pci_dev *pdev)
+{
+	return PCI_SLOT(pdev->devfn) * 8 + PCI_FUNC(pdev->devfn);
+}
+
+#define MLX4_OWNER_BASE	0x8069c
+#define MLX4_OWNER_SIZE	4
+
+static int mlx4_get_ownership(struct mlx4_dev *dev)
+{
+	void __iomem *owner;
+	u32 ret;
+
+	owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE,
+			MLX4_OWNER_SIZE);
+	if (!owner) {
+		mlx4_err(dev, "Failed to obtain ownership bit\n");
+		return -ENOMEM;
+	}
+
+	ret = readl(owner);
+	iounmap(owner);
+	return (int) !!ret;
+}
+
+static void mlx4_free_ownership(struct mlx4_dev *dev)
+{
+	void __iomem *owner;
+
+	owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE,
+			MLX4_OWNER_SIZE);
+	if (!owner) {
+		mlx4_err(dev, "Failed to obtain ownership bit\n");
+		return;
+	}
+	writel(0, owner);
+	msleep(1000);
+	iounmap(owner);
+}
+
 static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
 {
 	struct mlx4_priv *priv;
@@ -1235,13 +1642,20 @@
 			"aborting.\n");
 		return err;
 	}
-
+	if (num_vfs > MLX4_MAX_NUM_VF) {
+		printk(KERN_ERR "There are more VF's (%d) than allowed(%d)\n",
+		       num_vfs, MLX4_MAX_NUM_VF);
+		return -EINVAL;
+	}
 	/*
-	 * Check for BARs.  We expect 0: 1MB
+	 * Check for BARs.
 	 */
-	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
-	    pci_resource_len(pdev, 0) != 1 << 20) {
-		dev_err(&pdev->dev, "Missing DCS, aborting.\n");
+	if (((id == NULL) || !(id->driver_data & MLX4_VF)) &&
+	    !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+		dev_err(&pdev->dev, "Missing DCS, aborting."
+			"(id == 0X%p, id->driver_data: 0x%lx,"
+			" pci_resource_flags(pdev, 0):0x%lx)\n", id,
+			id ? id->driver_data : 0, pci_resource_flags(pdev, 0));
 		err = -ENODEV;
 		goto err_disable_pdev;
 	}
@@ -1305,42 +1719,132 @@
 	mutex_init(&priv->bf_mutex);
 
 	dev->rev_id = pdev->revision;
+	/* Detect if this device is a virtual function */
+	if (id && id->driver_data & MLX4_VF) {
+		/* When acting as pf, we normally skip vfs unless explicitly
+		 * requested to probe them. */
+		if (num_vfs && extended_func_num(pdev) > probe_vf) {
+			mlx4_warn(dev, "Skipping virtual function:%d\n",
+						extended_func_num(pdev));
+			err = -ENODEV;
+			goto err_free_dev;
+		}
+		mlx4_warn(dev, "Detected virtual function - running in slave mode\n");
+		dev->flags |= MLX4_FLAG_SLAVE;
+	} else {
+		/* We reset the device and enable SRIOV only for physical
+		 * devices.  Try to claim ownership on the device;
+		 * if already taken, skip -- do not allow multiple PFs */
+		err = mlx4_get_ownership(dev);
+		if (err) {
+			if (err < 0)
+				goto err_free_dev;
+			else {
+				mlx4_warn(dev, "Multiple PFs not yet supported."
+					  " Skipping PF.\n");
+				err = -EINVAL;
+				goto err_free_dev;
+			}
+		}
 
-	/*
-	 * Now reset the HCA before we touch the PCI capabilities or
-	 * attempt a firmware command, since a boot ROM may have left
-	 * the HCA in an undefined state.
-	 */
-	err = mlx4_reset(dev);
-	if (err) {
-		mlx4_err(dev, "Failed to reset HCA, aborting.\n");
-		goto err_free_dev;
+		if (num_vfs) {
+			mlx4_warn(dev, "Enabling sriov with:%d vfs\n", num_vfs);
+			err = pci_enable_sriov(pdev, num_vfs);
+			if (err) {
+				mlx4_err(dev, "Failed to enable sriov,"
+					 "continuing without sriov enabled"
+					 " (err = %d).\n", err);
+				num_vfs = 0;
+				err = 0;
+			} else {
+				mlx4_warn(dev, "Running in master mode\n");
+				dev->flags |= MLX4_FLAG_SRIOV |
+					      MLX4_FLAG_MASTER;
+				dev->num_vfs = num_vfs;
+			}
+		}
+
+		/*
+		 * Now reset the HCA before we touch the PCI capabilities or
+		 * attempt a firmware command, since a boot ROM may have left
+		 * the HCA in an undefined state.
+		 */
+		err = mlx4_reset(dev);
+		if (err) {
+			mlx4_err(dev, "Failed to reset HCA, aborting.\n");
+			goto err_rel_own;
+		}
 	}
 
+slave_start:
 	if (mlx4_cmd_init(dev)) {
 		mlx4_err(dev, "Failed to init command interface, aborting.\n");
-		goto err_free_dev;
+		goto err_sriov;
+	}
+
+	/* In slave functions, the communication channel must be initialized
+	 * before posting commands. Also, init num_slaves before calling
+	 * mlx4_init_hca */
+	if (mlx4_is_mfunc(dev)) {
+		if (mlx4_is_master(dev))
+			dev->num_slaves = MLX4_MAX_NUM_SLAVES;
+		else {
+			dev->num_slaves = 0;
+			if (mlx4_multi_func_init(dev)) {
+				mlx4_err(dev, "Failed to init slave mfunc"
+					 " interface, aborting.\n");
+				goto err_cmd;
+			}
+		}
 	}
 
 	err = mlx4_init_hca(dev);
-	if (err)
-		goto err_cmd;
+	if (err) {
+		if (err == -EACCES) {
+			/* Not primary Physical function
+			 * Running in slave mode */
+			mlx4_cmd_cleanup(dev);
+			dev->flags |= MLX4_FLAG_SLAVE;
+			dev->flags &= ~MLX4_FLAG_MASTER;
+			goto slave_start;
+		} else
+			goto err_mfunc;
+	}
+
+	/* In master functions, the communication channel must be initialized
+	 * after obtaining its address from fw */
+	if (mlx4_is_master(dev)) {
+		if (mlx4_multi_func_init(dev)) {
+			mlx4_err(dev, "Failed to init master mfunc"
+				 "interface, aborting.\n");
+			goto err_close;
+		}
+	}
 
 	err = mlx4_alloc_eq_table(dev);
 	if (err)
-		goto err_close;
+		goto err_master_mfunc;
 
 	priv->msix_ctl.pool_bm = 0;
 	spin_lock_init(&priv->msix_ctl.pool_lock);
 
 	mlx4_enable_msi_x(dev);
-
-	err = mlx4_init_steering(dev);
-	if (err)
+	if ((mlx4_is_mfunc(dev)) &&
+	    !(dev->flags & MLX4_FLAG_MSI_X)) {
+		mlx4_err(dev, "INTx is not supported in multi-function mode."
+			 " aborting.\n");
 		goto err_free_eq;
+	}
+
+	if (!mlx4_is_slave(dev)) {
+		err = mlx4_init_steering(dev);
+		if (err)
+			goto err_free_eq;
+	}
 
 	err = mlx4_setup_hca(dev);
-	if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X)) {
+	if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) &&
+	    !mlx4_is_mfunc(dev)) {
 		dev->flags &= ~MLX4_FLAG_MSI_X;
 		pci_disable_msix(pdev);
 		err = mlx4_setup_hca(dev);
@@ -1383,20 +1887,37 @@
 	mlx4_cleanup_uar_table(dev);
 
 err_steer:
-	mlx4_clear_steering(dev);
+	if (!mlx4_is_slave(dev))
+		mlx4_clear_steering(dev);
 
 err_free_eq:
 	mlx4_free_eq_table(dev);
 
+err_master_mfunc:
+	if (mlx4_is_master(dev))
+		mlx4_multi_func_cleanup(dev);
+
 err_close:
 	if (dev->flags & MLX4_FLAG_MSI_X)
 		pci_disable_msix(pdev);
 
 	mlx4_close_hca(dev);
 
+err_mfunc:
+	if (mlx4_is_slave(dev))
+		mlx4_multi_func_cleanup(dev);
+
 err_cmd:
 	mlx4_cmd_cleanup(dev);
 
+err_sriov:
+	if (num_vfs && (dev->flags & MLX4_FLAG_SRIOV))
+		pci_disable_sriov(pdev);
+
+err_rel_own:
+	if (!mlx4_is_slave(dev))
+		mlx4_free_ownership(dev);
+
 err_free_dev:
 	kfree(priv);
 
@@ -1424,6 +1945,12 @@
 	int p;
 
 	if (dev) {
+		/* in SRIOV it is not allowed to unload the pf's
+		 * driver while there are alive vf's */
+		if (mlx4_is_master(dev)) {
+			if (mlx4_how_many_lives_vf(dev))
+				printk(KERN_ERR "Removing PF when there are assigned VF's !!!\n");
+		}
 		mlx4_stop_sense(dev);
 		mlx4_unregister_device(dev);
 
@@ -1443,17 +1970,31 @@
 		mlx4_cleanup_xrcd_table(dev);
 		mlx4_cleanup_pd_table(dev);
 
+		if (mlx4_is_master(dev))
+			mlx4_free_resource_tracker(dev);
+
 		iounmap(priv->kar);
 		mlx4_uar_free(dev, &priv->driver_uar);
 		mlx4_cleanup_uar_table(dev);
-		mlx4_clear_steering(dev);
+		if (!mlx4_is_slave(dev))
+			mlx4_clear_steering(dev);
 		mlx4_free_eq_table(dev);
+		if (mlx4_is_master(dev))
+			mlx4_multi_func_cleanup(dev);
 		mlx4_close_hca(dev);
+		if (mlx4_is_slave(dev))
+			mlx4_multi_func_cleanup(dev);
 		mlx4_cmd_cleanup(dev);
 
 		if (dev->flags & MLX4_FLAG_MSI_X)
 			pci_disable_msix(pdev);
+		if (num_vfs && (dev->flags & MLX4_FLAG_SRIOV)) {
+			mlx4_warn(dev, "Disabling sriov\n");
+			pci_disable_sriov(pdev);
+		}
 
+		if (!mlx4_is_slave(dev))
+			mlx4_free_ownership(dev);
 		kfree(priv);
 		pci_release_regions(pdev);
 		pci_disable_device(pdev);
@@ -1468,33 +2009,48 @@
 }
 
 static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = {
-	{ PCI_VDEVICE(MELLANOX, 0x6340) }, /* MT25408 "Hermon" SDR */
-	{ PCI_VDEVICE(MELLANOX, 0x634a) }, /* MT25408 "Hermon" DDR */
-	{ PCI_VDEVICE(MELLANOX, 0x6354) }, /* MT25408 "Hermon" QDR */
-	{ PCI_VDEVICE(MELLANOX, 0x6732) }, /* MT25408 "Hermon" DDR PCIe gen2 */
-	{ PCI_VDEVICE(MELLANOX, 0x673c) }, /* MT25408 "Hermon" QDR PCIe gen2 */
-	{ PCI_VDEVICE(MELLANOX, 0x6368) }, /* MT25408 "Hermon" EN 10GigE */
-	{ PCI_VDEVICE(MELLANOX, 0x6750) }, /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
-	{ PCI_VDEVICE(MELLANOX, 0x6372) }, /* MT25458 ConnectX EN 10GBASE-T 10GigE */
-	{ PCI_VDEVICE(MELLANOX, 0x675a) }, /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
-	{ PCI_VDEVICE(MELLANOX, 0x6764) }, /* MT26468 ConnectX EN 10GigE PCIe gen2*/
-	{ PCI_VDEVICE(MELLANOX, 0x6746) }, /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
-	{ PCI_VDEVICE(MELLANOX, 0x676e) }, /* MT26478 ConnectX2 40GigE PCIe gen2 */
-	{ PCI_VDEVICE(MELLANOX, 0x1002) }, /* MT25400 Family [ConnectX-2 Virtual Function] */
-	{ PCI_VDEVICE(MELLANOX, 0x1003) }, /* MT27500 Family [ConnectX-3] */
-	{ PCI_VDEVICE(MELLANOX, 0x1004) }, /* MT27500 Family [ConnectX-3 Virtual Function] */
-	{ PCI_VDEVICE(MELLANOX, 0x1005) }, /* MT27510 Family */
-	{ PCI_VDEVICE(MELLANOX, 0x1006) }, /* MT27511 Family */
-	{ PCI_VDEVICE(MELLANOX, 0x1007) }, /* MT27520 Family */
-	{ PCI_VDEVICE(MELLANOX, 0x1008) }, /* MT27521 Family */
-	{ PCI_VDEVICE(MELLANOX, 0x1009) }, /* MT27530 Family */
-	{ PCI_VDEVICE(MELLANOX, 0x100a) }, /* MT27531 Family */
-	{ PCI_VDEVICE(MELLANOX, 0x100b) }, /* MT27540 Family */
-	{ PCI_VDEVICE(MELLANOX, 0x100c) }, /* MT27541 Family */
-	{ PCI_VDEVICE(MELLANOX, 0x100d) }, /* MT27550 Family */
-	{ PCI_VDEVICE(MELLANOX, 0x100e) }, /* MT27551 Family */
-	{ PCI_VDEVICE(MELLANOX, 0x100f) }, /* MT27560 Family */
-	{ PCI_VDEVICE(MELLANOX, 0x1010) }, /* MT27561 Family */
+	/* MT25408 "Hermon" SDR */
+	{ PCI_VDEVICE(MELLANOX, 0x6340), 0 },
+	/* MT25408 "Hermon" DDR */
+	{ PCI_VDEVICE(MELLANOX, 0x634a), 0 },
+	/* MT25408 "Hermon" QDR */
+	{ PCI_VDEVICE(MELLANOX, 0x6354), 0 },
+	/* MT25408 "Hermon" DDR PCIe gen2 */
+	{ PCI_VDEVICE(MELLANOX, 0x6732), 0 },
+	/* MT25408 "Hermon" QDR PCIe gen2 */
+	{ PCI_VDEVICE(MELLANOX, 0x673c), 0 },
+	/* MT25408 "Hermon" EN 10GigE */
+	{ PCI_VDEVICE(MELLANOX, 0x6368), 0 },
+	/* MT25408 "Hermon" EN 10GigE PCIe gen2 */
+	{ PCI_VDEVICE(MELLANOX, 0x6750), 0 },
+	/* MT25458 ConnectX EN 10GBASE-T 10GigE */
+	{ PCI_VDEVICE(MELLANOX, 0x6372), 0 },
+	/* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
+	{ PCI_VDEVICE(MELLANOX, 0x675a), 0 },
+	/* MT26468 ConnectX EN 10GigE PCIe gen2*/
+	{ PCI_VDEVICE(MELLANOX, 0x6764), 0 },
+	/* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
+	{ PCI_VDEVICE(MELLANOX, 0x6746), 0 },
+	/* MT26478 ConnectX2 40GigE PCIe gen2 */
+	{ PCI_VDEVICE(MELLANOX, 0x676e), 0 },
+	/* MT25400 Family [ConnectX-2 Virtual Function] */
+	{ PCI_VDEVICE(MELLANOX, 0x1002), MLX4_VF },
+	/* MT27500 Family [ConnectX-3] */
+	{ PCI_VDEVICE(MELLANOX, 0x1003), 0 },
+	/* MT27500 Family [ConnectX-3 Virtual Function] */
+	{ PCI_VDEVICE(MELLANOX, 0x1004), MLX4_VF },
+	{ PCI_VDEVICE(MELLANOX, 0x1005), 0 }, /* MT27510 Family */
+	{ PCI_VDEVICE(MELLANOX, 0x1006), 0 }, /* MT27511 Family */
+	{ PCI_VDEVICE(MELLANOX, 0x1007), 0 }, /* MT27520 Family */
+	{ PCI_VDEVICE(MELLANOX, 0x1008), 0 }, /* MT27521 Family */
+	{ PCI_VDEVICE(MELLANOX, 0x1009), 0 }, /* MT27530 Family */
+	{ PCI_VDEVICE(MELLANOX, 0x100a), 0 }, /* MT27531 Family */
+	{ PCI_VDEVICE(MELLANOX, 0x100b), 0 }, /* MT27540 Family */
+	{ PCI_VDEVICE(MELLANOX, 0x100c), 0 }, /* MT27541 Family */
+	{ PCI_VDEVICE(MELLANOX, 0x100d), 0 }, /* MT27550 Family */
+	{ PCI_VDEVICE(MELLANOX, 0x100e), 0 }, /* MT27551 Family */
+	{ PCI_VDEVICE(MELLANOX, 0x100f), 0 }, /* MT27560 Family */
+	{ PCI_VDEVICE(MELLANOX, 0x1010), 0 }, /* MT27561 Family */
 	{ 0, }
 };
 
@@ -1523,6 +2079,12 @@
 		return -1;
 	}
 
+	/* Check if module param for ports type has legal combination */
+	if (port_type_array[0] == false && port_type_array[1] == true) {
+		printk(KERN_WARNING "Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n");
+		port_type_array[0] = true;
+	}
+
 	return 0;
 }
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 978688c..0785d9b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -44,28 +44,47 @@
 
 static const u8 zero_gid[16];	/* automatically initialized to 0 */
 
+struct mlx4_mgm {
+	__be32			next_gid_index;
+	__be32			members_count;
+	u32			reserved[2];
+	u8			gid[16];
+	__be32			qp[MLX4_MAX_QP_PER_MGM];
+};
+
+int mlx4_get_mgm_entry_size(struct mlx4_dev *dev)
+{
+	return min((1 << mlx4_log_num_mgm_entry_size), MLX4_MAX_MGM_ENTRY_SIZE);
+}
+
+int mlx4_get_qp_per_mgm(struct mlx4_dev *dev)
+{
+	return 4 * (mlx4_get_mgm_entry_size(dev) / 16 - 2);
+}
+
 static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index,
 			   struct mlx4_cmd_mailbox *mailbox)
 {
 	return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG,
-			    MLX4_CMD_TIME_CLASS_A);
+			    MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
 }
 
 static int mlx4_WRITE_ENTRY(struct mlx4_dev *dev, int index,
 			    struct mlx4_cmd_mailbox *mailbox)
 {
 	return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG,
-			MLX4_CMD_TIME_CLASS_A);
+			MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
 }
 
-static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 vep_num, u8 port, u8 steer,
+static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 port, u8 steer,
 			      struct mlx4_cmd_mailbox *mailbox)
 {
 	u32 in_mod;
 
-	in_mod = (u32) vep_num << 24 | (u32) port << 16 | steer << 1;
+	in_mod = (u32) port << 16 | steer << 1;
 	return mlx4_cmd(dev, mailbox->dma, in_mod, 0x1,
-			MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A);
+			MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A,
+			MLX4_CMD_NATIVE);
 }
 
 static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
@@ -75,7 +94,8 @@
 	int err;
 
 	err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, op_mod,
-			   MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A);
+			   MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A,
+			   MLX4_CMD_NATIVE);
 
 	if (!err)
 		*hash = imm;
@@ -102,7 +122,7 @@
  * Add new entry to steering data structure.
  * All promisc QPs should be added as well
  */
-static int new_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
+static int new_steering_entry(struct mlx4_dev *dev, u8 port,
 			      enum mlx4_steer_type steer,
 			      unsigned int index, u32 qpn)
 {
@@ -115,10 +135,8 @@
 	struct mlx4_promisc_qp *dqp = NULL;
 	u32 prot;
 	int err;
-	u8 pf_num;
 
-	pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
-	s_steer = &mlx4_priv(dev)->steer[pf_num];
+	s_steer = &mlx4_priv(dev)->steer[0];
 	new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL);
 	if (!new_entry)
 		return -ENOMEM;
@@ -130,7 +148,7 @@
 	/* If the given qpn is also a promisc qp,
 	 * it should be inserted to duplicates list
 	 */
-	pqp = get_promisc_qp(dev, pf_num, steer, qpn);
+	pqp = get_promisc_qp(dev, 0, steer, qpn);
 	if (pqp) {
 		dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
 		if (!dqp) {
@@ -165,7 +183,7 @@
 		/* don't add already existing qpn */
 		if (pqp->qpn == qpn)
 			continue;
-		if (members_count == MLX4_QP_PER_MGM) {
+		if (members_count == dev->caps.num_qp_per_mgm) {
 			/* out of space */
 			err = -ENOMEM;
 			goto out_mailbox;
@@ -193,7 +211,7 @@
 }
 
 /* update the data structures with existing steering entry */
-static int existing_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
+static int existing_steering_entry(struct mlx4_dev *dev, u8 port,
 				   enum mlx4_steer_type steer,
 				   unsigned int index, u32 qpn)
 {
@@ -201,12 +219,10 @@
 	struct mlx4_steer_index *tmp_entry, *entry = NULL;
 	struct mlx4_promisc_qp *pqp;
 	struct mlx4_promisc_qp *dqp;
-	u8 pf_num;
 
-	pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
-	s_steer = &mlx4_priv(dev)->steer[pf_num];
+	s_steer = &mlx4_priv(dev)->steer[0];
 
-	pqp = get_promisc_qp(dev, pf_num, steer, qpn);
+	pqp = get_promisc_qp(dev, 0, steer, qpn);
 	if (!pqp)
 		return 0; /* nothing to do */
 
@@ -225,7 +241,7 @@
 	 * we need to add it as a duplicate to this entry
 	 * for future references */
 	list_for_each_entry(dqp, &entry->duplicates, list) {
-		if (qpn == dqp->qpn)
+		if (qpn == pqp->qpn)
 			return 0; /* qp is already duplicated */
 	}
 
@@ -241,20 +257,18 @@
 
 /* Check whether a qpn is a duplicate on steering entry
  * If so, it should not be removed from mgm */
-static bool check_duplicate_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
+static bool check_duplicate_entry(struct mlx4_dev *dev, u8 port,
 				  enum mlx4_steer_type steer,
 				  unsigned int index, u32 qpn)
 {
 	struct mlx4_steer *s_steer;
 	struct mlx4_steer_index *tmp_entry, *entry = NULL;
 	struct mlx4_promisc_qp *dqp, *tmp_dqp;
-	u8 pf_num;
 
-	pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
-	s_steer = &mlx4_priv(dev)->steer[pf_num];
+	s_steer = &mlx4_priv(dev)->steer[0];
 
 	/* if qp is not promisc, it cannot be duplicated */
-	if (!get_promisc_qp(dev, pf_num, steer, qpn))
+	if (!get_promisc_qp(dev, 0, steer, qpn))
 		return false;
 
 	/* The qp is promisc qp so it is a duplicate on this index
@@ -279,7 +293,7 @@
 }
 
 /* I a steering entry contains only promisc QPs, it can be removed. */
-static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
+static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port,
 				      enum mlx4_steer_type steer,
 				      unsigned int index, u32 tqpn)
 {
@@ -291,10 +305,8 @@
 	u32 members_count;
 	bool ret = false;
 	int i;
-	u8 pf_num;
 
-	pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
-	s_steer = &mlx4_priv(dev)->steer[pf_num];
+	s_steer = &mlx4_priv(dev)->steer[0];
 
 	mailbox = mlx4_alloc_cmd_mailbox(dev);
 	if (IS_ERR(mailbox))
@@ -306,7 +318,7 @@
 	members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
 	for (i = 0;  i < members_count; i++) {
 		qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK;
-		if (!get_promisc_qp(dev, pf_num, steer, qpn) && qpn != tqpn) {
+		if (!get_promisc_qp(dev, 0, steer, qpn) && qpn != tqpn) {
 			/* the qp is not promisc, the entry can't be removed */
 			goto out;
 		}
@@ -332,7 +344,7 @@
 	return ret;
 }
 
-static int add_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
+static int add_promisc_qp(struct mlx4_dev *dev, u8 port,
 			  enum mlx4_steer_type steer, u32 qpn)
 {
 	struct mlx4_steer *s_steer;
@@ -347,14 +359,13 @@
 	bool found;
 	int last_index;
 	int err;
-	u8 pf_num;
 	struct mlx4_priv *priv = mlx4_priv(dev);
-	pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
-	s_steer = &mlx4_priv(dev)->steer[pf_num];
+
+	s_steer = &mlx4_priv(dev)->steer[0];
 
 	mutex_lock(&priv->mcg_table.mutex);
 
-	if (get_promisc_qp(dev, pf_num, steer, qpn)) {
+	if (get_promisc_qp(dev, 0, steer, qpn)) {
 		err = 0;  /* Noting to do, already exists */
 		goto out_mutex;
 	}
@@ -397,7 +408,7 @@
 		}
 		if (!found) {
 			/* Need to add the qpn to mgm */
-			if (members_count == MLX4_QP_PER_MGM) {
+			if (members_count == dev->caps.num_qp_per_mgm) {
 				/* entry is full */
 				err = -ENOMEM;
 				goto out_mailbox;
@@ -420,7 +431,7 @@
 		mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
 	mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
 
-	err = mlx4_WRITE_PROMISC(dev, vep_num, port, steer, mailbox);
+	err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox);
 	if (err)
 		goto out_list;
 
@@ -439,7 +450,7 @@
 	return err;
 }
 
-static int remove_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
+static int remove_promisc_qp(struct mlx4_dev *dev, u8 port,
 			     enum mlx4_steer_type steer, u32 qpn)
 {
 	struct mlx4_priv *priv = mlx4_priv(dev);
@@ -454,13 +465,11 @@
 	bool back_to_list = false;
 	int loc, i;
 	int err;
-	u8 pf_num;
 
-	pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
-	s_steer = &mlx4_priv(dev)->steer[pf_num];
+	s_steer = &mlx4_priv(dev)->steer[0];
 	mutex_lock(&priv->mcg_table.mutex);
 
-	pqp = get_promisc_qp(dev, pf_num, steer, qpn);
+	pqp = get_promisc_qp(dev, 0, steer, qpn);
 	if (unlikely(!pqp)) {
 		mlx4_warn(dev, "QP %x is not promiscuous QP\n", qpn);
 		/* nothing to do */
@@ -479,12 +488,13 @@
 		goto out_list;
 	}
 	mgm = mailbox->buf;
+	memset(mgm, 0, sizeof *mgm);
 	members_count = 0;
 	list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
 		mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
 	mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
 
-	err = mlx4_WRITE_PROMISC(dev, vep_num, port, steer, mailbox);
+	err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox);
 	if (err)
 		goto out_mailbox;
 
@@ -649,12 +659,13 @@
 		}
 		index += dev->caps.num_mgms;
 
+		new_entry = 1;
 		memset(mgm, 0, sizeof *mgm);
 		memcpy(mgm->gid, gid, 16);
 	}
 
 	members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
-	if (members_count == MLX4_QP_PER_MGM) {
+	if (members_count == dev->caps.num_qp_per_mgm) {
 		mlx4_err(dev, "MGM at index %x is full.\n", index);
 		err = -ENOMEM;
 		goto out;
@@ -696,9 +707,9 @@
 	if (prot == MLX4_PROT_ETH) {
 		/* manage the steering entry for promisc mode */
 		if (new_entry)
-			new_steering_entry(dev, 0, port, steer, index, qp->qpn);
+			new_steering_entry(dev, port, steer, index, qp->qpn);
 		else
-			existing_steering_entry(dev, 0, port, steer,
+			existing_steering_entry(dev, port, steer,
 						index, qp->qpn);
 	}
 	if (err && link && index != -1) {
@@ -749,7 +760,7 @@
 
 	/* if this pq is also a promisc qp, it shouldn't be removed */
 	if (prot == MLX4_PROT_ETH &&
-	    check_duplicate_entry(dev, 0, port, steer, index, qp->qpn))
+	    check_duplicate_entry(dev, port, steer, index, qp->qpn))
 		goto out;
 
 	members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
@@ -769,7 +780,8 @@
 	mgm->qp[i - 1]     = 0;
 
 	if (prot == MLX4_PROT_ETH)
-		removed_entry = can_remove_steering_entry(dev, 0, port, steer, index, qp->qpn);
+		removed_entry = can_remove_steering_entry(dev, port, steer,
+								index, qp->qpn);
 	if (i != 1 && (prot != MLX4_PROT_ETH || !removed_entry)) {
 		err = mlx4_WRITE_ENTRY(dev, index, mailbox);
 		goto out;
@@ -828,6 +840,34 @@
 	return err;
 }
 
+static int mlx4_QP_ATTACH(struct mlx4_dev *dev, struct mlx4_qp *qp,
+			  u8 gid[16], u8 attach, u8 block_loopback,
+			  enum mlx4_protocol prot)
+{
+	struct mlx4_cmd_mailbox *mailbox;
+	int err = 0;
+	int qpn;
+
+	if (!mlx4_is_mfunc(dev))
+		return -EBADF;
+
+	mailbox = mlx4_alloc_cmd_mailbox(dev);
+	if (IS_ERR(mailbox))
+		return PTR_ERR(mailbox);
+
+	memcpy(mailbox->buf, gid, 16);
+	qpn = qp->qpn;
+	qpn |= (prot << 28);
+	if (attach && block_loopback)
+		qpn |= (1 << 31);
+
+	err = mlx4_cmd(dev, mailbox->dma, qpn, attach,
+		       MLX4_CMD_QP_ATTACH, MLX4_CMD_TIME_CLASS_A,
+		       MLX4_CMD_WRAPPED);
+
+	mlx4_free_cmd_mailbox(dev, mailbox);
+	return err;
+}
 
 int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
 			  int block_mcast_loopback, enum mlx4_protocol prot)
@@ -843,9 +883,12 @@
 	if (prot == MLX4_PROT_ETH)
 		gid[7] |= (steer << 1);
 
-	return mlx4_qp_attach_common(dev, qp, gid,
-				     block_mcast_loopback, prot,
-				     steer);
+	if (mlx4_is_mfunc(dev))
+		return mlx4_QP_ATTACH(dev, qp, gid, 1,
+					block_mcast_loopback, prot);
+
+	return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback,
+					prot, steer);
 }
 EXPORT_SYMBOL_GPL(mlx4_multicast_attach);
 
@@ -860,22 +903,90 @@
 			!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
 		return 0;
 
-	if (prot == MLX4_PROT_ETH) {
+	if (prot == MLX4_PROT_ETH)
 		gid[7] |= (steer << 1);
-	}
+
+	if (mlx4_is_mfunc(dev))
+		return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot);
 
 	return mlx4_qp_detach_common(dev, qp, gid, prot, steer);
 }
 EXPORT_SYMBOL_GPL(mlx4_multicast_detach);
 
+int mlx4_unicast_attach(struct mlx4_dev *dev,
+			struct mlx4_qp *qp, u8 gid[16],
+			int block_mcast_loopback, enum mlx4_protocol prot)
+{
+	if (prot == MLX4_PROT_ETH &&
+			!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
+		return 0;
+
+	if (prot == MLX4_PROT_ETH)
+		gid[7] |= (MLX4_UC_STEER << 1);
+
+	if (mlx4_is_mfunc(dev))
+		return mlx4_QP_ATTACH(dev, qp, gid, 1,
+					block_mcast_loopback, prot);
+
+	return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback,
+					prot, MLX4_UC_STEER);
+}
+EXPORT_SYMBOL_GPL(mlx4_unicast_attach);
+
+int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
+			       u8 gid[16], enum mlx4_protocol prot)
+{
+	if (prot == MLX4_PROT_ETH &&
+			!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
+		return 0;
+
+	if (prot == MLX4_PROT_ETH)
+		gid[7] |= (MLX4_UC_STEER << 1);
+
+	if (mlx4_is_mfunc(dev))
+		return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot);
+
+	return mlx4_qp_detach_common(dev, qp, gid, prot, MLX4_UC_STEER);
+}
+EXPORT_SYMBOL_GPL(mlx4_unicast_detach);
+
+int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave,
+			 struct mlx4_vhcr *vhcr,
+			 struct mlx4_cmd_mailbox *inbox,
+			 struct mlx4_cmd_mailbox *outbox,
+			 struct mlx4_cmd_info *cmd)
+{
+	u32 qpn = (u32) vhcr->in_param & 0xffffffff;
+	u8 port = vhcr->in_param >> 62;
+	enum mlx4_steer_type steer = vhcr->in_modifier;
+
+	/* Promiscuous unicast is not allowed in mfunc */
+	if (mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER)
+		return 0;
+
+	if (vhcr->op_modifier)
+		return add_promisc_qp(dev, port, steer, qpn);
+	else
+		return remove_promisc_qp(dev, port, steer, qpn);
+}
+
+static int mlx4_PROMISC(struct mlx4_dev *dev, u32 qpn,
+			enum mlx4_steer_type steer, u8 add, u8 port)
+{
+	return mlx4_cmd(dev, (u64) qpn | (u64) port << 62, (u32) steer, add,
+			MLX4_CMD_PROMISC, MLX4_CMD_TIME_CLASS_A,
+			MLX4_CMD_WRAPPED);
+}
 
 int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
 {
 	if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
 		return 0;
 
+	if (mlx4_is_mfunc(dev))
+		return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 1, port);
 
-	return add_promisc_qp(dev, 0, port, MLX4_MC_STEER, qpn);
+	return add_promisc_qp(dev, port, MLX4_MC_STEER, qpn);
 }
 EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add);
 
@@ -884,8 +995,10 @@
 	if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
 		return 0;
 
+	if (mlx4_is_mfunc(dev))
+		return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 0, port);
 
-	return remove_promisc_qp(dev, 0, port, MLX4_MC_STEER, qpn);
+	return remove_promisc_qp(dev, port, MLX4_MC_STEER, qpn);
 }
 EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove);
 
@@ -894,8 +1007,10 @@
 	if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
 		return 0;
 
+	if (mlx4_is_mfunc(dev))
+		return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 1, port);
 
-	return add_promisc_qp(dev, 0, port, MLX4_UC_STEER, qpn);
+	return add_promisc_qp(dev, port, MLX4_UC_STEER, qpn);
 }
 EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add);
 
@@ -904,7 +1019,10 @@
 	if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
 		return 0;
 
-	return remove_promisc_qp(dev, 0, port, MLX4_UC_STEER, qpn);
+	if (mlx4_is_mfunc(dev))
+		return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 0, port);
+
+	return remove_promisc_qp(dev, port, MLX4_UC_STEER, qpn);
 }
 EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_remove);
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 5dfa68f..a80121a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -46,21 +46,25 @@
 #include <linux/mlx4/device.h>
 #include <linux/mlx4/driver.h>
 #include <linux/mlx4/doorbell.h>
+#include <linux/mlx4/cmd.h>
 
 #define DRV_NAME	"mlx4_core"
-#define DRV_VERSION	"1.0"
-#define DRV_RELDATE	"July 14, 2011"
+#define PFX		DRV_NAME ": "
+#define DRV_VERSION	"1.1"
+#define DRV_RELDATE	"Dec, 2011"
 
 enum {
 	MLX4_HCR_BASE		= 0x80680,
 	MLX4_HCR_SIZE		= 0x0001c,
-	MLX4_CLR_INT_SIZE	= 0x00008
+	MLX4_CLR_INT_SIZE	= 0x00008,
+	MLX4_SLAVE_COMM_BASE	= 0x0,
+	MLX4_COMM_PAGESIZE	= 0x1000
 };
 
 enum {
-	MLX4_MGM_ENTRY_SIZE	=  0x100,
-	MLX4_QP_PER_MGM		= 4 * (MLX4_MGM_ENTRY_SIZE / 16 - 2),
-	MLX4_MTT_ENTRY_PER_SEG	= 8
+	MLX4_MAX_MGM_ENTRY_SIZE = 0x1000,
+	MLX4_MAX_QP_PER_MGM	= 4 * (MLX4_MAX_MGM_ENTRY_SIZE / 16 - 2),
+	MLX4_MTT_ENTRY_PER_SEG	= 8,
 };
 
 enum {
@@ -80,6 +84,94 @@
 	MLX4_NUM_CMPTS		= MLX4_CMPT_NUM_TYPE << MLX4_CMPT_SHIFT
 };
 
+enum mlx4_mr_state {
+	MLX4_MR_DISABLED = 0,
+	MLX4_MR_EN_HW,
+	MLX4_MR_EN_SW
+};
+
+#define MLX4_COMM_TIME		10000
+enum {
+	MLX4_COMM_CMD_RESET,
+	MLX4_COMM_CMD_VHCR0,
+	MLX4_COMM_CMD_VHCR1,
+	MLX4_COMM_CMD_VHCR2,
+	MLX4_COMM_CMD_VHCR_EN,
+	MLX4_COMM_CMD_VHCR_POST,
+	MLX4_COMM_CMD_FLR = 254
+};
+
+/*The flag indicates that the slave should delay the RESET cmd*/
+#define MLX4_DELAY_RESET_SLAVE 0xbbbbbbb
+/*indicates how many retries will be done if we are in the middle of FLR*/
+#define NUM_OF_RESET_RETRIES	10
+#define SLEEP_TIME_IN_RESET	(2 * 1000)
+enum mlx4_resource {
+	RES_QP,
+	RES_CQ,
+	RES_SRQ,
+	RES_XRCD,
+	RES_MPT,
+	RES_MTT,
+	RES_MAC,
+	RES_VLAN,
+	RES_EQ,
+	RES_COUNTER,
+	MLX4_NUM_OF_RESOURCE_TYPE
+};
+
+enum mlx4_alloc_mode {
+	RES_OP_RESERVE,
+	RES_OP_RESERVE_AND_MAP,
+	RES_OP_MAP_ICM,
+};
+
+
+/*
+ *Virtual HCR structures.
+ * mlx4_vhcr is the sw representation, in machine endianess
+ *
+ * mlx4_vhcr_cmd is the formalized structure, the one that is passed
+ * to FW to go through communication channel.
+ * It is big endian, and has the same structure as the physical HCR
+ * used by command interface
+ */
+struct mlx4_vhcr {
+	u64	in_param;
+	u64	out_param;
+	u32	in_modifier;
+	u32	errno;
+	u16	op;
+	u16	token;
+	u8	op_modifier;
+	u8	e_bit;
+};
+
+struct mlx4_vhcr_cmd {
+	__be64 in_param;
+	__be32 in_modifier;
+	__be64 out_param;
+	__be16 token;
+	u16 reserved;
+	u8 status;
+	u8 flags;
+	__be16 opcode;
+};
+
+struct mlx4_cmd_info {
+	u16 opcode;
+	bool has_inbox;
+	bool has_outbox;
+	bool out_is_imm;
+	bool encode_slave_id;
+	int (*verify)(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr,
+		      struct mlx4_cmd_mailbox *inbox);
+	int (*wrapper)(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr,
+		       struct mlx4_cmd_mailbox *inbox,
+		       struct mlx4_cmd_mailbox *outbox,
+		       struct mlx4_cmd_info *cmd);
+};
+
 #ifdef CONFIG_MLX4_DEBUG
 extern int mlx4_debug_level;
 #else /* CONFIG_MLX4_DEBUG */
@@ -99,6 +191,12 @@
 #define mlx4_warn(mdev, format, arg...) \
 	dev_warn(&mdev->pdev->dev, format, ##arg)
 
+extern int mlx4_log_num_mgm_entry_size;
+extern int log_mtts_per_seg;
+
+#define MLX4_MAX_NUM_SLAVES	(MLX4_MAX_NUM_PF + MLX4_MAX_NUM_VF)
+#define ALL_SLAVES 0xff
+
 struct mlx4_bitmap {
 	u32			last;
 	u32			top;
@@ -130,6 +228,147 @@
 	struct mlx4_icm	      **icm;
 };
 
+/*
+ * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits.
+ */
+struct mlx4_mpt_entry {
+	__be32 flags;
+	__be32 qpn;
+	__be32 key;
+	__be32 pd_flags;
+	__be64 start;
+	__be64 length;
+	__be32 lkey;
+	__be32 win_cnt;
+	u8	reserved1[3];
+	u8	mtt_rep;
+	__be64 mtt_addr;
+	__be32 mtt_sz;
+	__be32 entity_size;
+	__be32 first_byte_offset;
+} __packed;
+
+/*
+ * Must be packed because start is 64 bits but only aligned to 32 bits.
+ */
+struct mlx4_eq_context {
+	__be32			flags;
+	u16			reserved1[3];
+	__be16			page_offset;
+	u8			log_eq_size;
+	u8			reserved2[4];
+	u8			eq_period;
+	u8			reserved3;
+	u8			eq_max_count;
+	u8			reserved4[3];
+	u8			intr;
+	u8			log_page_size;
+	u8			reserved5[2];
+	u8			mtt_base_addr_h;
+	__be32			mtt_base_addr_l;
+	u32			reserved6[2];
+	__be32			consumer_index;
+	__be32			producer_index;
+	u32			reserved7[4];
+};
+
+struct mlx4_cq_context {
+	__be32			flags;
+	u16			reserved1[3];
+	__be16			page_offset;
+	__be32			logsize_usrpage;
+	__be16			cq_period;
+	__be16			cq_max_count;
+	u8			reserved2[3];
+	u8			comp_eqn;
+	u8			log_page_size;
+	u8			reserved3[2];
+	u8			mtt_base_addr_h;
+	__be32			mtt_base_addr_l;
+	__be32			last_notified_index;
+	__be32			solicit_producer_index;
+	__be32			consumer_index;
+	__be32			producer_index;
+	u32			reserved4[2];
+	__be64			db_rec_addr;
+};
+
+struct mlx4_srq_context {
+	__be32			state_logsize_srqn;
+	u8			logstride;
+	u8			reserved1;
+	__be16			xrcd;
+	__be32			pg_offset_cqn;
+	u32			reserved2;
+	u8			log_page_size;
+	u8			reserved3[2];
+	u8			mtt_base_addr_h;
+	__be32			mtt_base_addr_l;
+	__be32			pd;
+	__be16			limit_watermark;
+	__be16			wqe_cnt;
+	u16			reserved4;
+	__be16			wqe_counter;
+	u32			reserved5;
+	__be64			db_rec_addr;
+};
+
+struct mlx4_eqe {
+	u8			reserved1;
+	u8			type;
+	u8			reserved2;
+	u8			subtype;
+	union {
+		u32		raw[6];
+		struct {
+			__be32	cqn;
+		} __packed comp;
+		struct {
+			u16	reserved1;
+			__be16	token;
+			u32	reserved2;
+			u8	reserved3[3];
+			u8	status;
+			__be64	out_param;
+		} __packed cmd;
+		struct {
+			__be32	qpn;
+		} __packed qp;
+		struct {
+			__be32	srqn;
+		} __packed srq;
+		struct {
+			__be32	cqn;
+			u32	reserved1;
+			u8	reserved2[3];
+			u8	syndrome;
+		} __packed cq_err;
+		struct {
+			u32	reserved1[2];
+			__be32	port;
+		} __packed port_change;
+		struct {
+			#define COMM_CHANNEL_BIT_ARRAY_SIZE	4
+			u32 reserved;
+			u32 bit_vec[COMM_CHANNEL_BIT_ARRAY_SIZE];
+		} __packed comm_channel_arm;
+		struct {
+			u8	port;
+			u8	reserved[3];
+			__be64	mac;
+		} __packed mac_update;
+		struct {
+			u8	port;
+		} __packed sw_event;
+		struct {
+			__be32	slave_id;
+		} __packed flr_event;
+	}			event;
+	u8			slave_id;
+	u8			reserved3[2];
+	u8			owner;
+} __packed;
+
 struct mlx4_eq {
 	struct mlx4_dev	       *dev;
 	void __iomem	       *doorbell;
@@ -142,6 +381,18 @@
 	struct mlx4_mtt		mtt;
 };
 
+struct mlx4_slave_eqe {
+	u8 type;
+	u8 port;
+	u32 param;
+};
+
+struct mlx4_slave_event_eq_info {
+	u32 eqn;
+	u16 token;
+	u64 event_type;
+};
+
 struct mlx4_profile {
 	int			num_qp;
 	int			rdmarc_per_qp;
@@ -155,16 +406,37 @@
 struct mlx4_fw {
 	u64			clr_int_base;
 	u64			catas_offset;
+	u64			comm_base;
 	struct mlx4_icm	       *fw_icm;
 	struct mlx4_icm	       *aux_icm;
 	u32			catas_size;
 	u16			fw_pages;
 	u8			clr_int_bar;
 	u8			catas_bar;
+	u8			comm_bar;
 };
 
-#define MGM_QPN_MASK       0x00FFFFFF
-#define MGM_BLCK_LB_BIT    30
+struct mlx4_comm {
+	u32			slave_write;
+	u32			slave_read;
+};
+
+enum {
+	MLX4_MCAST_CONFIG       = 0,
+	MLX4_MCAST_DISABLE      = 1,
+	MLX4_MCAST_ENABLE       = 2,
+};
+
+#define VLAN_FLTR_SIZE	128
+
+struct mlx4_vlan_fltr {
+	__be32 entry[VLAN_FLTR_SIZE];
+};
+
+struct mlx4_mcast_entry {
+	struct list_head list;
+	u64 addr;
+};
 
 struct mlx4_promisc_qp {
 	struct list_head list;
@@ -177,19 +449,87 @@
 	struct list_head duplicates;
 };
 
-struct mlx4_mgm {
-	__be32			next_gid_index;
-	__be32			members_count;
-	u32			reserved[2];
-	u8			gid[16];
-	__be32			qp[MLX4_QP_PER_MGM];
+struct mlx4_slave_state {
+	u8 comm_toggle;
+	u8 last_cmd;
+	u8 init_port_mask;
+	bool active;
+	u8 function;
+	dma_addr_t vhcr_dma;
+	u16 mtu[MLX4_MAX_PORTS + 1];
+	__be32 ib_cap_mask[MLX4_MAX_PORTS + 1];
+	struct mlx4_slave_eqe eq[MLX4_MFUNC_MAX_EQES];
+	struct list_head mcast_filters[MLX4_MAX_PORTS + 1];
+	struct mlx4_vlan_fltr *vlan_filter[MLX4_MAX_PORTS + 1];
+	struct mlx4_slave_event_eq_info event_eq;
+	u16 eq_pi;
+	u16 eq_ci;
+	spinlock_t lock;
+	/*initialized via the kzalloc*/
+	u8 is_slave_going_down;
+	u32 cookie;
 };
+
+struct slave_list {
+	struct mutex mutex;
+	struct list_head res_list[MLX4_NUM_OF_RESOURCE_TYPE];
+};
+
+struct mlx4_resource_tracker {
+	spinlock_t lock;
+	/* tree for each resources */
+	struct radix_tree_root res_tree[MLX4_NUM_OF_RESOURCE_TYPE];
+	/* num_of_slave's lists, one per slave */
+	struct slave_list *slave_list;
+};
+
+#define SLAVE_EVENT_EQ_SIZE	128
+struct mlx4_slave_event_eq {
+	u32 eqn;
+	u32 cons;
+	u32 prod;
+	struct mlx4_eqe event_eqe[SLAVE_EVENT_EQ_SIZE];
+};
+
+struct mlx4_master_qp0_state {
+	int proxy_qp0_active;
+	int qp0_active;
+	int port_active;
+};
+
+struct mlx4_mfunc_master_ctx {
+	struct mlx4_slave_state *slave_state;
+	struct mlx4_master_qp0_state qp0_state[MLX4_MAX_PORTS + 1];
+	int			init_port_ref[MLX4_MAX_PORTS + 1];
+	u16			max_mtu[MLX4_MAX_PORTS + 1];
+	int			disable_mcast_ref[MLX4_MAX_PORTS + 1];
+	struct mlx4_resource_tracker res_tracker;
+	struct workqueue_struct *comm_wq;
+	struct work_struct	comm_work;
+	struct work_struct	slave_event_work;
+	struct work_struct	slave_flr_event_work;
+	spinlock_t		slave_state_lock;
+	__be32			comm_arm_bit_vector[4];
+	struct mlx4_eqe		cmd_eqe;
+	struct mlx4_slave_event_eq slave_eq;
+	struct mutex		gen_eqe_mutex[MLX4_MFUNC_MAX];
+};
+
+struct mlx4_mfunc {
+	struct mlx4_comm __iomem       *comm;
+	struct mlx4_vhcr_cmd	       *vhcr;
+	dma_addr_t			vhcr_dma;
+
+	struct mlx4_mfunc_master_ctx	master;
+};
+
 struct mlx4_cmd {
 	struct pci_pool	       *pool;
 	void __iomem	       *hcr;
 	struct mutex		hcr_mutex;
 	struct semaphore	poll_sem;
 	struct semaphore	event_sem;
+	struct semaphore	slave_sem;
 	int			max_cmds;
 	spinlock_t		context_lock;
 	int			free_head;
@@ -197,6 +537,7 @@
 	u16			token_mask;
 	u8			use_events;
 	u8			toggle;
+	u8			comm_toggle;
 };
 
 struct mlx4_uar_table {
@@ -287,6 +628,48 @@
 	int			max;
 };
 
+#define SET_PORT_GEN_ALL_VALID		0x7
+#define SET_PORT_PROMISC_SHIFT		31
+#define SET_PORT_MC_PROMISC_SHIFT	30
+
+enum {
+	MCAST_DIRECT_ONLY	= 0,
+	MCAST_DIRECT		= 1,
+	MCAST_DEFAULT		= 2
+};
+
+
+struct mlx4_set_port_general_context {
+	u8 reserved[3];
+	u8 flags;
+	u16 reserved2;
+	__be16 mtu;
+	u8 pptx;
+	u8 pfctx;
+	u16 reserved3;
+	u8 pprx;
+	u8 pfcrx;
+	u16 reserved4;
+};
+
+struct mlx4_set_port_rqp_calc_context {
+	__be32 base_qpn;
+	u8 rererved;
+	u8 n_mac;
+	u8 n_vlan;
+	u8 n_prio;
+	u8 reserved2[3];
+	u8 mac_miss;
+	u8 intra_no_vlan;
+	u8 no_vlan;
+	u8 intra_vlan_miss;
+	u8 vlan_miss;
+	u8 reserved3[3];
+	u8 no_vlan_prio;
+	__be32 promisc;
+	__be32 mcast;
+};
+
 struct mlx4_mac_entry {
 	u64 mac;
 };
@@ -333,6 +716,7 @@
 
 	struct mlx4_fw		fw;
 	struct mlx4_cmd		cmd;
+	struct mlx4_mfunc	mfunc;
 
 	struct mlx4_bitmap	pd_bitmap;
 	struct mlx4_bitmap	xrcd_bitmap;
@@ -359,6 +743,7 @@
 	struct list_head	bf_list;
 	struct mutex		bf_mutex;
 	struct io_mapping	*bf_mapping;
+	int			reserved_mtts;
 };
 
 static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
@@ -403,6 +788,62 @@
 void mlx4_cleanup_qp_table(struct mlx4_dev *dev);
 void mlx4_cleanup_srq_table(struct mlx4_dev *dev);
 void mlx4_cleanup_mcg_table(struct mlx4_dev *dev);
+int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn);
+void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn);
+int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn);
+void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn);
+int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn);
+void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn);
+int __mlx4_mr_reserve(struct mlx4_dev *dev);
+void __mlx4_mr_release(struct mlx4_dev *dev, u32 index);
+int __mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index);
+void __mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index);
+u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order);
+void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 first_seg, int order);
+
+int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
+			   struct mlx4_vhcr *vhcr,
+			   struct mlx4_cmd_mailbox *inbox,
+			   struct mlx4_cmd_mailbox *outbox,
+			   struct mlx4_cmd_info *cmd);
+int mlx4_SYNC_TPT_wrapper(struct mlx4_dev *dev, int slave,
+			   struct mlx4_vhcr *vhcr,
+			   struct mlx4_cmd_mailbox *inbox,
+			   struct mlx4_cmd_mailbox *outbox,
+			   struct mlx4_cmd_info *cmd);
+int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
+			   struct mlx4_vhcr *vhcr,
+			   struct mlx4_cmd_mailbox *inbox,
+			   struct mlx4_cmd_mailbox *outbox,
+			   struct mlx4_cmd_info *cmd);
+int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
+			   struct mlx4_vhcr *vhcr,
+			   struct mlx4_cmd_mailbox *inbox,
+			   struct mlx4_cmd_mailbox *outbox,
+			   struct mlx4_cmd_info *cmd);
+int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
+			   struct mlx4_vhcr *vhcr,
+			   struct mlx4_cmd_mailbox *inbox,
+			   struct mlx4_cmd_mailbox *outbox,
+			   struct mlx4_cmd_info *cmd);
+int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
+			  struct mlx4_vhcr *vhcr,
+			  struct mlx4_cmd_mailbox *inbox,
+			  struct mlx4_cmd_mailbox *outbox,
+			  struct mlx4_cmd_info *cmd);
+int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave,
+		     struct mlx4_vhcr *vhcr,
+		     struct mlx4_cmd_mailbox *inbox,
+		     struct mlx4_cmd_mailbox *outbox,
+		     struct mlx4_cmd_info *cmd);
+int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
+			    int *base);
+void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt);
+int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac);
+void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac);
+int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac);
+int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+		     int start_index, int npages, u64 *page_list);
 
 void mlx4_start_catas_poll(struct mlx4_dev *dev);
 void mlx4_stop_catas_poll(struct mlx4_dev *dev);
@@ -419,13 +860,113 @@
 		      struct mlx4_profile *request,
 		      struct mlx4_dev_cap *dev_cap,
 		      struct mlx4_init_hca_param *init_hca);
+void mlx4_master_comm_channel(struct work_struct *work);
+void mlx4_gen_slave_eqe(struct work_struct *work);
+void mlx4_master_handle_slave_flr(struct work_struct *work);
+
+int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
+			   struct mlx4_vhcr *vhcr,
+			   struct mlx4_cmd_mailbox *inbox,
+			   struct mlx4_cmd_mailbox *outbox,
+			   struct mlx4_cmd_info *cmd);
+int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
+			  struct mlx4_vhcr *vhcr,
+			  struct mlx4_cmd_mailbox *inbox,
+			  struct mlx4_cmd_mailbox *outbox,
+			  struct mlx4_cmd_info *cmd);
+int mlx4_MAP_EQ_wrapper(struct mlx4_dev *dev, int slave,
+			struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox,
+			struct mlx4_cmd_mailbox *outbox,
+			struct mlx4_cmd_info *cmd);
+int mlx4_COMM_INT_wrapper(struct mlx4_dev *dev, int slave,
+			  struct mlx4_vhcr *vhcr,
+			  struct mlx4_cmd_mailbox *inbox,
+			  struct mlx4_cmd_mailbox *outbox,
+			  struct mlx4_cmd_info *cmd);
+int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
+			    struct mlx4_vhcr *vhcr,
+			    struct mlx4_cmd_mailbox *inbox,
+			    struct mlx4_cmd_mailbox *outbox,
+			  struct mlx4_cmd_info *cmd);
+int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
+			  struct mlx4_vhcr *vhcr,
+			  struct mlx4_cmd_mailbox *inbox,
+			  struct mlx4_cmd_mailbox *outbox,
+			  struct mlx4_cmd_info *cmd);
+int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
+			  struct mlx4_vhcr *vhcr,
+			  struct mlx4_cmd_mailbox *inbox,
+			  struct mlx4_cmd_mailbox *outbox,
+			  struct mlx4_cmd_info *cmd);
+int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
+			  struct mlx4_vhcr *vhcr,
+			  struct mlx4_cmd_mailbox *inbox,
+			  struct mlx4_cmd_mailbox *outbox,
+			  struct mlx4_cmd_info *cmd);
+int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
+			  struct mlx4_vhcr *vhcr,
+			  struct mlx4_cmd_mailbox *inbox,
+			  struct mlx4_cmd_mailbox *outbox,
+			  struct mlx4_cmd_info *cmd);
+int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
+			  struct mlx4_vhcr *vhcr,
+			  struct mlx4_cmd_mailbox *inbox,
+			  struct mlx4_cmd_mailbox *outbox,
+			   struct mlx4_cmd_info *cmd);
+int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+			   struct mlx4_vhcr *vhcr,
+			   struct mlx4_cmd_mailbox *inbox,
+			   struct mlx4_cmd_mailbox *outbox,
+			   struct mlx4_cmd_info *cmd);
+int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+			   struct mlx4_vhcr *vhcr,
+			   struct mlx4_cmd_mailbox *inbox,
+			   struct mlx4_cmd_mailbox *outbox,
+			   struct mlx4_cmd_info *cmd);
+int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+			   struct mlx4_vhcr *vhcr,
+			   struct mlx4_cmd_mailbox *inbox,
+			   struct mlx4_cmd_mailbox *outbox,
+			   struct mlx4_cmd_info *cmd);
+int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+			 struct mlx4_vhcr *vhcr,
+			 struct mlx4_cmd_mailbox *inbox,
+			 struct mlx4_cmd_mailbox *outbox,
+			 struct mlx4_cmd_info *cmd);
+int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
+			struct mlx4_vhcr *vhcr,
+			struct mlx4_cmd_mailbox *inbox,
+			struct mlx4_cmd_mailbox *outbox,
+			struct mlx4_cmd_info *cmd);
+int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
+			     struct mlx4_vhcr *vhcr,
+			     struct mlx4_cmd_mailbox *inbox,
+			     struct mlx4_cmd_mailbox *outbox,
+			     struct mlx4_cmd_info *cmd);
+int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
+			     struct mlx4_vhcr *vhcr,
+			     struct mlx4_cmd_mailbox *inbox,
+			     struct mlx4_cmd_mailbox *outbox,
+			     struct mlx4_cmd_info *cmd);
+int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
+			 struct mlx4_vhcr *vhcr,
+			 struct mlx4_cmd_mailbox *inbox,
+			 struct mlx4_cmd_mailbox *outbox,
+			 struct mlx4_cmd_info *cmd);
+
+int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe);
 
 int mlx4_cmd_init(struct mlx4_dev *dev);
 void mlx4_cmd_cleanup(struct mlx4_dev *dev);
+int mlx4_multi_func_init(struct mlx4_dev *dev);
+void mlx4_multi_func_cleanup(struct mlx4_dev *dev);
 void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param);
 int mlx4_cmd_use_events(struct mlx4_dev *dev);
 void mlx4_cmd_use_polling(struct mlx4_dev *dev);
 
+int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param,
+		  unsigned long timeout);
+
 void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn);
 void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type);
 
@@ -452,12 +993,113 @@
 void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table);
 
 int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port);
+/* resource tracker functions*/
+int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
+				    enum mlx4_resource resource_type,
+				    int resource_id, int *slave);
+void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave_id);
+int mlx4_init_resource_tracker(struct mlx4_dev *dev);
+
+void mlx4_free_resource_tracker(struct mlx4_dev *dev);
+
+int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave,
+			  struct mlx4_vhcr *vhcr,
+			  struct mlx4_cmd_mailbox *inbox,
+			  struct mlx4_cmd_mailbox *outbox,
+			  struct mlx4_cmd_info *cmd);
+int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave,
+			   struct mlx4_vhcr *vhcr,
+			   struct mlx4_cmd_mailbox *inbox,
+			   struct mlx4_cmd_mailbox *outbox,
+			   struct mlx4_cmd_info *cmd);
+int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
+			    struct mlx4_vhcr *vhcr,
+			    struct mlx4_cmd_mailbox *inbox,
+			    struct mlx4_cmd_mailbox *outbox,
+			    struct mlx4_cmd_info *cmd);
+int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
+			    struct mlx4_vhcr *vhcr,
+			    struct mlx4_cmd_mailbox *inbox,
+			    struct mlx4_cmd_mailbox *outbox,
+			    struct mlx4_cmd_info *cmd);
 int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps);
 int mlx4_check_ext_port_caps(struct mlx4_dev *dev, u8 port);
 
+
+int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
+			   struct mlx4_vhcr *vhcr,
+			   struct mlx4_cmd_mailbox *inbox,
+			   struct mlx4_cmd_mailbox *outbox,
+			   struct mlx4_cmd_info *cmd);
+
+int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave,
+			 struct mlx4_vhcr *vhcr,
+			 struct mlx4_cmd_mailbox *inbox,
+			 struct mlx4_cmd_mailbox *outbox,
+			 struct mlx4_cmd_info *cmd);
 int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
 			  enum mlx4_protocol prot, enum mlx4_steer_type steer);
 int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
 			  int block_mcast_loopback, enum mlx4_protocol prot,
 			  enum mlx4_steer_type steer);
+int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave,
+				struct mlx4_vhcr *vhcr,
+				struct mlx4_cmd_mailbox *inbox,
+				struct mlx4_cmd_mailbox *outbox,
+				struct mlx4_cmd_info *cmd);
+int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave,
+			       struct mlx4_vhcr *vhcr,
+			       struct mlx4_cmd_mailbox *inbox,
+			       struct mlx4_cmd_mailbox *outbox,
+			       struct mlx4_cmd_info *cmd);
+int mlx4_common_set_vlan_fltr(struct mlx4_dev *dev, int function,
+				     int port, void *buf);
+int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave, u32 in_mod,
+				struct mlx4_cmd_mailbox *outbox);
+int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave,
+				   struct mlx4_vhcr *vhcr,
+				   struct mlx4_cmd_mailbox *inbox,
+				   struct mlx4_cmd_mailbox *outbox,
+				struct mlx4_cmd_info *cmd);
+int mlx4_PKEY_TABLE_wrapper(struct mlx4_dev *dev, int slave,
+			    struct mlx4_vhcr *vhcr,
+			    struct mlx4_cmd_mailbox *inbox,
+			    struct mlx4_cmd_mailbox *outbox,
+			    struct mlx4_cmd_info *cmd);
+int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
+			       struct mlx4_vhcr *vhcr,
+			       struct mlx4_cmd_mailbox *inbox,
+			       struct mlx4_cmd_mailbox *outbox,
+			       struct mlx4_cmd_info *cmd);
+
+int mlx4_get_mgm_entry_size(struct mlx4_dev *dev);
+int mlx4_get_qp_per_mgm(struct mlx4_dev *dev);
+
+static inline void set_param_l(u64 *arg, u32 val)
+{
+	*((u32 *)arg) = val;
+}
+
+static inline void set_param_h(u64 *arg, u32 val)
+{
+	*arg = (*arg & 0xffffffff) | ((u64) val << 32);
+}
+
+static inline u32 get_param_l(u64 *arg)
+{
+	return (u32) (*arg & 0xffffffff);
+}
+
+static inline u32 get_param_h(u64 *arg)
+{
+	return (u32)(*arg >> 32);
+}
+
+static inline spinlock_t *mlx4_tlock(struct mlx4_dev *dev)
+{
+	return &mlx4_priv(dev)->mfunc.master.res_tracker.lock;
+}
+
+#define NOT_MASKED_PD_BITS 17
+
 #endif /* MLX4_H */
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 207b5ad..f2a8e65 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -51,8 +51,8 @@
 #include "en_port.h"
 
 #define DRV_NAME	"mlx4_en"
-#define DRV_VERSION	"1.5.4.2"
-#define DRV_RELDATE	"October 2011"
+#define DRV_VERSION	"2.0"
+#define DRV_RELDATE	"Dec 2011"
 
 #define MLX4_EN_MSG_LEVEL	(NETIF_MSG_LINK | NETIF_MSG_IFDOWN)
 
@@ -366,16 +366,6 @@
 	enum mlx4_qp_state indir_state;
 };
 
-struct mlx4_en_rss_context {
-	__be32 base_qpn;
-	__be32 default_qpn;
-	u16 reserved;
-	u8 hash_fn;
-	u8 flags;
-	__be32 rss_key[10];
-	__be32 base_qpn_udp;
-};
-
 struct mlx4_en_port_state {
 	int link_state;
 	int link_speed;
@@ -463,6 +453,7 @@
 	int base_qpn;
 
 	struct mlx4_en_rss_map rss_map;
+	u32 ctrl_flags;
 	u32 flags;
 #define MLX4_EN_FLAG_PROMISC	0x1
 #define MLX4_EN_FLAG_MC_PROMISC	0x2
@@ -495,9 +486,9 @@
 enum mlx4_en_wol {
 	MLX4_EN_WOL_MAGIC = (1ULL << 61),
 	MLX4_EN_WOL_ENABLED = (1ULL << 62),
-	MLX4_EN_WOL_DO_MODIFY = (1ULL << 63),
 };
 
+#define MLX4_EN_WOL_DO_MODIFY (1ULL << 63)
 
 void mlx4_en_destroy_netdev(struct net_device *dev);
 int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index efa3e77..01df556 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -32,35 +32,17 @@
  * SOFTWARE.
  */
 
+#include <linux/init.h>
 #include <linux/errno.h>
 #include <linux/export.h>
 #include <linux/slab.h>
+#include <linux/kernel.h>
 
 #include <linux/mlx4/cmd.h>
 
 #include "mlx4.h"
 #include "icm.h"
 
-/*
- * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits.
- */
-struct mlx4_mpt_entry {
-	__be32 flags;
-	__be32 qpn;
-	__be32 key;
-	__be32 pd_flags;
-	__be64 start;
-	__be64 length;
-	__be32 lkey;
-	__be32 win_cnt;
-	u8	reserved1[3];
-	u8	mtt_rep;
-	__be64 mtt_seg;
-	__be32 mtt_sz;
-	__be32 entity_size;
-	__be32 first_byte_offset;
-} __packed;
-
 #define MLX4_MPT_FLAG_SW_OWNS	    (0xfUL << 28)
 #define MLX4_MPT_FLAG_FREE	    (0x3UL << 28)
 #define MLX4_MPT_FLAG_MIO	    (1 << 17)
@@ -180,22 +162,48 @@
 	kfree(buddy->num_free);
 }
 
-static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
+u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
 {
 	struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
 	u32 seg;
+	int seg_order;
+	u32 offset;
 
-	seg = mlx4_buddy_alloc(&mr_table->mtt_buddy, order);
+	seg_order = max_t(int, order - log_mtts_per_seg, 0);
+
+	seg = mlx4_buddy_alloc(&mr_table->mtt_buddy, seg_order);
 	if (seg == -1)
 		return -1;
 
-	if (mlx4_table_get_range(dev, &mr_table->mtt_table, seg,
-				 seg + (1 << order) - 1)) {
-		mlx4_buddy_free(&mr_table->mtt_buddy, seg, order);
+	offset = seg * (1 << log_mtts_per_seg);
+
+	if (mlx4_table_get_range(dev, &mr_table->mtt_table, offset,
+				 offset + (1 << order) - 1)) {
+		mlx4_buddy_free(&mr_table->mtt_buddy, seg, seg_order);
 		return -1;
 	}
 
-	return seg;
+	return offset;
+}
+
+static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
+{
+	u64 in_param;
+	u64 out_param;
+	int err;
+
+	if (mlx4_is_mfunc(dev)) {
+		set_param_l(&in_param, order);
+		err = mlx4_cmd_imm(dev, in_param, &out_param, RES_MTT,
+						       RES_OP_RESERVE_AND_MAP,
+						       MLX4_CMD_ALLOC_RES,
+						       MLX4_CMD_TIME_CLASS_A,
+						       MLX4_CMD_WRAPPED);
+		if (err)
+			return -1;
+		return get_param_l(&out_param);
+	}
+	return __mlx4_alloc_mtt_range(dev, order);
 }
 
 int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
@@ -210,33 +218,63 @@
 	} else
 		mtt->page_shift = page_shift;
 
-	for (mtt->order = 0, i = dev->caps.mtts_per_seg; i < npages; i <<= 1)
+	for (mtt->order = 0, i = 1; i < npages; i <<= 1)
 		++mtt->order;
 
-	mtt->first_seg = mlx4_alloc_mtt_range(dev, mtt->order);
-	if (mtt->first_seg == -1)
+	mtt->offset = mlx4_alloc_mtt_range(dev, mtt->order);
+	if (mtt->offset == -1)
 		return -ENOMEM;
 
 	return 0;
 }
 EXPORT_SYMBOL_GPL(mlx4_mtt_init);
 
-void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
+void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
 {
+	u32 first_seg;
+	int seg_order;
 	struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
 
+	seg_order = max_t(int, order - log_mtts_per_seg, 0);
+	first_seg = offset / (1 << log_mtts_per_seg);
+
+	mlx4_buddy_free(&mr_table->mtt_buddy, first_seg, seg_order);
+	mlx4_table_put_range(dev, &mr_table->mtt_table, offset,
+			     offset + (1 << order) - 1);
+}
+
+static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
+{
+	u64 in_param;
+	int err;
+
+	if (mlx4_is_mfunc(dev)) {
+		set_param_l(&in_param, offset);
+		set_param_h(&in_param, order);
+		err = mlx4_cmd(dev, in_param, RES_MTT, RES_OP_RESERVE_AND_MAP,
+						       MLX4_CMD_FREE_RES,
+						       MLX4_CMD_TIME_CLASS_A,
+						       MLX4_CMD_WRAPPED);
+		if (err)
+			mlx4_warn(dev, "Failed to free mtt range at:"
+				  "%d order:%d\n", offset, order);
+		return;
+	}
+	 __mlx4_free_mtt_range(dev, offset, order);
+}
+
+void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
+{
 	if (mtt->order < 0)
 		return;
 
-	mlx4_buddy_free(&mr_table->mtt_buddy, mtt->first_seg, mtt->order);
-	mlx4_table_put_range(dev, &mr_table->mtt_table, mtt->first_seg,
-			     mtt->first_seg + (1 << mtt->order) - 1);
+	mlx4_free_mtt_range(dev, mtt->offset, mtt->order);
 }
 EXPORT_SYMBOL_GPL(mlx4_mtt_cleanup);
 
 u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
 {
-	return (u64) mtt->first_seg * dev->caps.mtt_entry_sz;
+	return (u64) mtt->offset * dev->caps.mtt_entry_sz;
 }
 EXPORT_SYMBOL_GPL(mlx4_mtt_addr);
 
@@ -253,69 +291,205 @@
 static int mlx4_SW2HW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
 			  int mpt_index)
 {
-	return mlx4_cmd(dev, mailbox->dma, mpt_index, 0, MLX4_CMD_SW2HW_MPT,
-			MLX4_CMD_TIME_CLASS_B);
+	return mlx4_cmd(dev, mailbox->dma | dev->caps.function , mpt_index,
+			0, MLX4_CMD_SW2HW_MPT, MLX4_CMD_TIME_CLASS_B,
+			MLX4_CMD_WRAPPED);
 }
 
 static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
 			  int mpt_index)
 {
 	return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index,
-			    !mailbox, MLX4_CMD_HW2SW_MPT, MLX4_CMD_TIME_CLASS_B);
+			    !mailbox, MLX4_CMD_HW2SW_MPT,
+			    MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
+}
+
+static int mlx4_mr_reserve_range(struct mlx4_dev *dev, int cnt, int align,
+			  u32 *base_mridx)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	u32 mridx;
+
+	mridx = mlx4_bitmap_alloc_range(&priv->mr_table.mpt_bitmap, cnt, align);
+	if (mridx == -1)
+		return -ENOMEM;
+
+	*base_mridx = mridx;
+	return 0;
+
+}
+EXPORT_SYMBOL_GPL(mlx4_mr_reserve_range);
+
+static void mlx4_mr_release_range(struct mlx4_dev *dev, u32 base_mridx, int cnt)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	mlx4_bitmap_free_range(&priv->mr_table.mpt_bitmap, base_mridx, cnt);
+}
+EXPORT_SYMBOL_GPL(mlx4_mr_release_range);
+
+static int mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd,
+			   u64 iova, u64 size, u32 access, int npages,
+			   int page_shift, struct mlx4_mr *mr)
+{
+	mr->iova       = iova;
+	mr->size       = size;
+	mr->pd	       = pd;
+	mr->access     = access;
+	mr->enabled    = MLX4_MR_DISABLED;
+	mr->key	       = hw_index_to_key(mridx);
+
+	return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
+}
+EXPORT_SYMBOL_GPL(mlx4_mr_alloc_reserved);
+
+static int mlx4_WRITE_MTT(struct mlx4_dev *dev,
+			  struct mlx4_cmd_mailbox *mailbox,
+			  int num_entries)
+{
+	return mlx4_cmd(dev, mailbox->dma, num_entries, 0, MLX4_CMD_WRITE_MTT,
+			MLX4_CMD_TIME_CLASS_A,  MLX4_CMD_WRAPPED);
+}
+
+int __mlx4_mr_reserve(struct mlx4_dev *dev)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+
+	return mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap);
+}
+
+static int mlx4_mr_reserve(struct mlx4_dev *dev)
+{
+	u64 out_param;
+
+	if (mlx4_is_mfunc(dev)) {
+		if (mlx4_cmd_imm(dev, 0, &out_param, RES_MPT, RES_OP_RESERVE,
+				   MLX4_CMD_ALLOC_RES,
+				   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED))
+			return -1;
+		return get_param_l(&out_param);
+	}
+	return  __mlx4_mr_reserve(dev);
+}
+
+void __mlx4_mr_release(struct mlx4_dev *dev, u32 index)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+
+	mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index);
+}
+
+static void mlx4_mr_release(struct mlx4_dev *dev, u32 index)
+{
+	u64 in_param;
+
+	if (mlx4_is_mfunc(dev)) {
+		set_param_l(&in_param, index);
+		if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_RESERVE,
+			       MLX4_CMD_FREE_RES,
+			       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED))
+			mlx4_warn(dev, "Failed to release mr index:%d\n",
+				  index);
+		return;
+	}
+	__mlx4_mr_release(dev, index);
+}
+
+int __mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index)
+{
+	struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
+
+	return mlx4_table_get(dev, &mr_table->dmpt_table, index);
+}
+
+static int mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index)
+{
+	u64 param;
+
+	if (mlx4_is_mfunc(dev)) {
+		set_param_l(&param, index);
+		return mlx4_cmd_imm(dev, param, &param, RES_MPT, RES_OP_MAP_ICM,
+							MLX4_CMD_ALLOC_RES,
+							MLX4_CMD_TIME_CLASS_A,
+							MLX4_CMD_WRAPPED);
+	}
+	return __mlx4_mr_alloc_icm(dev, index);
+}
+
+void __mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index)
+{
+	struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
+
+	mlx4_table_put(dev, &mr_table->dmpt_table, index);
+}
+
+static void mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index)
+{
+	u64 in_param;
+
+	if (mlx4_is_mfunc(dev)) {
+		set_param_l(&in_param, index);
+		if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_MAP_ICM,
+			     MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
+			     MLX4_CMD_WRAPPED))
+			mlx4_warn(dev, "Failed to free icm of mr index:%d\n",
+				  index);
+		return;
+	}
+	return __mlx4_mr_free_icm(dev, index);
 }
 
 int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
 		  int npages, int page_shift, struct mlx4_mr *mr)
 {
-	struct mlx4_priv *priv = mlx4_priv(dev);
 	u32 index;
 	int err;
 
-	index = mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap);
+	index = mlx4_mr_reserve(dev);
 	if (index == -1)
 		return -ENOMEM;
 
-	mr->iova       = iova;
-	mr->size       = size;
-	mr->pd	       = pd;
-	mr->access     = access;
-	mr->enabled    = 0;
-	mr->key	       = hw_index_to_key(index);
-
-	err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
+	err = mlx4_mr_alloc_reserved(dev, index, pd, iova, size,
+				     access, npages, page_shift, mr);
 	if (err)
-		mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index);
+		mlx4_mr_release(dev, index);
 
 	return err;
 }
 EXPORT_SYMBOL_GPL(mlx4_mr_alloc);
 
-void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr)
+static void mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr)
 {
-	struct mlx4_priv *priv = mlx4_priv(dev);
 	int err;
 
-	if (mr->enabled) {
+	if (mr->enabled == MLX4_MR_EN_HW) {
 		err = mlx4_HW2SW_MPT(dev, NULL,
 				     key_to_hw_index(mr->key) &
 				     (dev->caps.num_mpts - 1));
 		if (err)
-			mlx4_warn(dev, "HW2SW_MPT failed (%d)\n", err);
-	}
+			mlx4_warn(dev, "xxx HW2SW_MPT failed (%d)\n", err);
 
+		mr->enabled = MLX4_MR_EN_SW;
+	}
 	mlx4_mtt_cleanup(dev, &mr->mtt);
-	mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, key_to_hw_index(mr->key));
+}
+EXPORT_SYMBOL_GPL(mlx4_mr_free_reserved);
+
+void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr)
+{
+	mlx4_mr_free_reserved(dev, mr);
+	if (mr->enabled)
+		mlx4_mr_free_icm(dev, key_to_hw_index(mr->key));
+	mlx4_mr_release(dev, key_to_hw_index(mr->key));
 }
 EXPORT_SYMBOL_GPL(mlx4_mr_free);
 
 int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
 {
-	struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
 	struct mlx4_cmd_mailbox *mailbox;
 	struct mlx4_mpt_entry *mpt_entry;
 	int err;
 
-	err = mlx4_table_get(dev, &mr_table->dmpt_table, key_to_hw_index(mr->key));
+	err = mlx4_mr_alloc_icm(dev, key_to_hw_index(mr->key));
 	if (err)
 		return err;
 
@@ -340,9 +514,10 @@
 
 	if (mr->mtt.order < 0) {
 		mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL);
-		mpt_entry->mtt_seg = 0;
+		mpt_entry->mtt_addr = 0;
 	} else {
-		mpt_entry->mtt_seg = cpu_to_be64(mlx4_mtt_addr(dev, &mr->mtt));
+		mpt_entry->mtt_addr = cpu_to_be64(mlx4_mtt_addr(dev,
+						  &mr->mtt));
 	}
 
 	if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) {
@@ -350,8 +525,7 @@
 		mpt_entry->flags    |= cpu_to_be32(MLX4_MPT_FLAG_FREE);
 		mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG |
 						   MLX4_MPT_PD_FLAG_RAE);
-		mpt_entry->mtt_sz    = cpu_to_be32((1 << mr->mtt.order) *
-						   dev->caps.mtts_per_seg);
+		mpt_entry->mtt_sz    = cpu_to_be32(1 << mr->mtt.order);
 	} else {
 		mpt_entry->flags    |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS);
 	}
@@ -362,8 +536,7 @@
 		mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err);
 		goto err_cmd;
 	}
-
-	mr->enabled = 1;
+	mr->enabled = MLX4_MR_EN_HW;
 
 	mlx4_free_cmd_mailbox(dev, mailbox);
 
@@ -373,7 +546,7 @@
 	mlx4_free_cmd_mailbox(dev, mailbox);
 
 err_table:
-	mlx4_table_put(dev, &mr_table->dmpt_table, key_to_hw_index(mr->key));
+	mlx4_mr_free_icm(dev, key_to_hw_index(mr->key));
 	return err;
 }
 EXPORT_SYMBOL_GPL(mlx4_mr_enable);
@@ -385,18 +558,10 @@
 	__be64 *mtts;
 	dma_addr_t dma_handle;
 	int i;
-	int s = start_index * sizeof (u64);
 
-	/* All MTTs must fit in the same page */
-	if (start_index / (PAGE_SIZE / sizeof (u64)) !=
-	    (start_index + npages - 1) / (PAGE_SIZE / sizeof (u64)))
-		return -EINVAL;
+	mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->offset +
+			       start_index, &dma_handle);
 
-	if (start_index & (dev->caps.mtts_per_seg - 1))
-		return -EINVAL;
-
-	mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->first_seg +
-				s / dev->caps.mtt_entry_sz, &dma_handle);
 	if (!mtts)
 		return -ENOMEM;
 
@@ -412,27 +577,75 @@
 	return 0;
 }
 
+int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+		     int start_index, int npages, u64 *page_list)
+{
+	int err = 0;
+	int chunk;
+	int mtts_per_page;
+	int max_mtts_first_page;
+
+	/* compute how may mtts fit in the first page */
+	mtts_per_page = PAGE_SIZE / sizeof(u64);
+	max_mtts_first_page = mtts_per_page - (mtt->offset + start_index)
+			      % mtts_per_page;
+
+	chunk = min_t(int, max_mtts_first_page, npages);
+
+	while (npages > 0) {
+		err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list);
+		if (err)
+			return err;
+		npages      -= chunk;
+		start_index += chunk;
+		page_list   += chunk;
+
+		chunk = min_t(int, mtts_per_page, npages);
+	}
+	return err;
+}
+
 int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
 		   int start_index, int npages, u64 *page_list)
 {
+	struct mlx4_cmd_mailbox *mailbox = NULL;
+	__be64 *inbox = NULL;
 	int chunk;
-	int err;
+	int err = 0;
+	int i;
 
 	if (mtt->order < 0)
 		return -EINVAL;
 
-	while (npages > 0) {
-		chunk = min_t(int, PAGE_SIZE / sizeof(u64), npages);
-		err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list);
-		if (err)
-			return err;
+	if (mlx4_is_mfunc(dev)) {
+		mailbox = mlx4_alloc_cmd_mailbox(dev);
+		if (IS_ERR(mailbox))
+			return PTR_ERR(mailbox);
+		inbox = mailbox->buf;
 
-		npages      -= chunk;
-		start_index += chunk;
-		page_list   += chunk;
+		while (npages > 0) {
+			chunk = min_t(int, MLX4_MAILBOX_SIZE / sizeof(u64) - 2,
+				      npages);
+			inbox[0] = cpu_to_be64(mtt->offset + start_index);
+			inbox[1] = 0;
+			for (i = 0; i < chunk; ++i)
+				inbox[i + 2] = cpu_to_be64(page_list[i] |
+					       MLX4_MTT_FLAG_PRESENT);
+			err = mlx4_WRITE_MTT(dev, mailbox, chunk);
+			if (err) {
+				mlx4_free_cmd_mailbox(dev, mailbox);
+				return err;
+			}
+
+			npages      -= chunk;
+			start_index += chunk;
+			page_list   += chunk;
+		}
+		mlx4_free_cmd_mailbox(dev, mailbox);
+		return err;
 	}
 
-	return 0;
+	return __mlx4_write_mtt(dev, mtt, start_index, npages, page_list);
 }
 EXPORT_SYMBOL_GPL(mlx4_write_mtt);
 
@@ -462,21 +675,34 @@
 
 int mlx4_init_mr_table(struct mlx4_dev *dev)
 {
-	struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct mlx4_mr_table *mr_table = &priv->mr_table;
 	int err;
 
+	if (!is_power_of_2(dev->caps.num_mpts))
+		return -EINVAL;
+
+	/* Nothing to do for slaves - all MR handling is forwarded
+	* to the master */
+	if (mlx4_is_slave(dev))
+		return 0;
+
 	err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts,
 			       ~0, dev->caps.reserved_mrws, 0);
 	if (err)
 		return err;
 
 	err = mlx4_buddy_init(&mr_table->mtt_buddy,
-			      ilog2(dev->caps.num_mtt_segs));
+			      ilog2(dev->caps.num_mtts /
+			      (1 << log_mtts_per_seg)));
 	if (err)
 		goto err_buddy;
 
 	if (dev->caps.reserved_mtts) {
-		if (mlx4_alloc_mtt_range(dev, fls(dev->caps.reserved_mtts - 1)) == -1) {
+		priv->reserved_mtts =
+			mlx4_alloc_mtt_range(dev,
+					     fls(dev->caps.reserved_mtts - 1));
+		if (priv->reserved_mtts < 0) {
 			mlx4_warn(dev, "MTT table of order %d is too small.\n",
 				  mr_table->mtt_buddy.max_order);
 			err = -ENOMEM;
@@ -497,8 +723,14 @@
 
 void mlx4_cleanup_mr_table(struct mlx4_dev *dev)
 {
-	struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct mlx4_mr_table *mr_table = &priv->mr_table;
 
+	if (mlx4_is_slave(dev))
+		return;
+	if (priv->reserved_mtts >= 0)
+		mlx4_free_mtt_range(dev, priv->reserved_mtts,
+				    fls(dev->caps.reserved_mtts - 1));
 	mlx4_buddy_cleanup(&mr_table->mtt_buddy);
 	mlx4_bitmap_cleanup(&mr_table->mpt_bitmap);
 }
@@ -581,7 +813,7 @@
 		   int max_maps, u8 page_shift, struct mlx4_fmr *fmr)
 {
 	struct mlx4_priv *priv = mlx4_priv(dev);
-	u64 mtt_seg;
+	u64 mtt_offset;
 	int err = -ENOMEM;
 
 	if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32)
@@ -601,11 +833,12 @@
 	if (err)
 		return err;
 
-	mtt_seg = fmr->mr.mtt.first_seg * dev->caps.mtt_entry_sz;
+	mtt_offset = fmr->mr.mtt.offset * dev->caps.mtt_entry_sz;
 
 	fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table,
-				    fmr->mr.mtt.first_seg,
+				    fmr->mr.mtt.offset,
 				    &fmr->dma_handle);
+
 	if (!fmr->mtts) {
 		err = -ENOMEM;
 		goto err_free;
@@ -619,6 +852,46 @@
 }
 EXPORT_SYMBOL_GPL(mlx4_fmr_alloc);
 
+static int mlx4_fmr_alloc_reserved(struct mlx4_dev *dev, u32 mridx,
+			    u32 pd, u32 access, int max_pages,
+			    int max_maps, u8 page_shift, struct mlx4_fmr *fmr)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	int err = -ENOMEM;
+
+	if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32)
+		return -EINVAL;
+
+	/* All MTTs must fit in the same page */
+	if (max_pages * sizeof *fmr->mtts > PAGE_SIZE)
+		return -EINVAL;
+
+	fmr->page_shift = page_shift;
+	fmr->max_pages  = max_pages;
+	fmr->max_maps   = max_maps;
+	fmr->maps = 0;
+
+	err = mlx4_mr_alloc_reserved(dev, mridx, pd, 0, 0, access, max_pages,
+				     page_shift, &fmr->mr);
+	if (err)
+		return err;
+
+	fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table,
+				    fmr->mr.mtt.offset,
+				    &fmr->dma_handle);
+	if (!fmr->mtts) {
+		err = -ENOMEM;
+		goto err_free;
+	}
+
+	return 0;
+
+err_free:
+	mlx4_mr_free_reserved(dev, &fmr->mr);
+	return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_fmr_alloc_reserved);
+
 int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
 {
 	struct mlx4_priv *priv = mlx4_priv(dev);
@@ -640,12 +913,32 @@
 void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
 		    u32 *lkey, u32 *rkey)
 {
+	struct mlx4_cmd_mailbox *mailbox;
+	int err;
+
 	if (!fmr->maps)
 		return;
 
 	fmr->maps = 0;
 
-	*(u8 *) fmr->mpt = MLX4_MPT_STATUS_SW;
+	mailbox = mlx4_alloc_cmd_mailbox(dev);
+	if (IS_ERR(mailbox)) {
+		err = PTR_ERR(mailbox);
+		printk(KERN_WARNING "mlx4_ib: mlx4_alloc_cmd_mailbox"
+		       " failed (%d)\n", err);
+		return;
+	}
+
+	err = mlx4_HW2SW_MPT(dev, NULL,
+			     key_to_hw_index(fmr->mr.key) &
+			     (dev->caps.num_mpts - 1));
+	mlx4_free_cmd_mailbox(dev, mailbox);
+	if (err) {
+		printk(KERN_WARNING "mlx4_ib: mlx4_HW2SW_MPT failed (%d)\n",
+		       err);
+		return;
+	}
+	fmr->mr.enabled = MLX4_MR_EN_SW;
 }
 EXPORT_SYMBOL_GPL(mlx4_fmr_unmap);
 
@@ -654,15 +947,28 @@
 	if (fmr->maps)
 		return -EBUSY;
 
-	fmr->mr.enabled = 0;
 	mlx4_mr_free(dev, &fmr->mr);
+	fmr->mr.enabled = MLX4_MR_DISABLED;
 
 	return 0;
 }
 EXPORT_SYMBOL_GPL(mlx4_fmr_free);
 
+static int mlx4_fmr_free_reserved(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
+{
+	if (fmr->maps)
+		return -EBUSY;
+
+	mlx4_mr_free_reserved(dev, &fmr->mr);
+	fmr->mr.enabled = MLX4_MR_DISABLED;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mlx4_fmr_free_reserved);
+
 int mlx4_SYNC_TPT(struct mlx4_dev *dev)
 {
-	return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000);
+	return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000,
+			MLX4_CMD_WRAPPED);
 }
 EXPORT_SYMBOL_GPL(mlx4_SYNC_TPT);
diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c
index 260ed25..5c9a54d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/pd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/pd.c
@@ -31,6 +31,7 @@
  * SOFTWARE.
  */
 
+#include <linux/init.h>
 #include <linux/errno.h>
 #include <linux/export.h>
 #include <linux/io-mapping.h>
@@ -51,7 +52,8 @@
 	*pdn = mlx4_bitmap_alloc(&priv->pd_bitmap);
 	if (*pdn == -1)
 		return -ENOMEM;
-
+	if (mlx4_is_mfunc(dev))
+		*pdn |= (dev->caps.function + 1) << NOT_MASKED_PD_BITS;
 	return 0;
 }
 EXPORT_SYMBOL_GPL(mlx4_pd_alloc);
@@ -85,7 +87,8 @@
 	struct mlx4_priv *priv = mlx4_priv(dev);
 
 	return mlx4_bitmap_init(&priv->pd_bitmap, dev->caps.num_pds,
-				(1 << 24) - 1, dev->caps.reserved_pds, 0);
+				(1 << NOT_MASKED_PD_BITS) - 1,
+				 dev->caps.reserved_pds, 0);
 }
 
 void mlx4_cleanup_pd_table(struct mlx4_dev *dev)
@@ -108,13 +111,19 @@
 
 int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar)
 {
+	int offset;
+
 	uar->index = mlx4_bitmap_alloc(&mlx4_priv(dev)->uar_table.bitmap);
 	if (uar->index == -1)
 		return -ENOMEM;
 
-	uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + uar->index;
+	if (mlx4_is_slave(dev))
+		offset = uar->index % ((int) pci_resource_len(dev->pdev, 2) /
+				       dev->caps.uar_page_size);
+	else
+		offset = uar->index;
+	uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + offset;
 	uar->map = NULL;
-
 	return 0;
 }
 EXPORT_SYMBOL_GPL(mlx4_uar_alloc);
@@ -232,7 +241,7 @@
 
 	return mlx4_bitmap_init(&mlx4_priv(dev)->uar_table.bitmap,
 				dev->caps.num_uars, dev->caps.num_uars - 1,
-				max(128, dev->caps.reserved_uars), 0);
+				dev->caps.reserved_uars, 0);
 }
 
 void mlx4_cleanup_uar_table(struct mlx4_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index d942aea..88b52e5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -70,41 +70,12 @@
 	table->total = 0;
 }
 
-static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
-				   __be64 *entries)
-{
-	struct mlx4_cmd_mailbox *mailbox;
-	u32 in_mod;
-	int err;
-
-	mailbox = mlx4_alloc_cmd_mailbox(dev);
-	if (IS_ERR(mailbox))
-		return PTR_ERR(mailbox);
-
-	memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE);
-
-	in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port;
-	err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
-		       MLX4_CMD_TIME_CLASS_B);
-
-	mlx4_free_cmd_mailbox(dev, mailbox);
-	return err;
-}
-
-static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port,
-			     u64 mac, int *qpn, u8 reserve)
+static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
 {
 	struct mlx4_qp qp;
 	u8 gid[16] = {0};
 	int err;
 
-	if (reserve) {
-		err = mlx4_qp_reserve_range(dev, 1, 1, qpn);
-		if (err) {
-			mlx4_err(dev, "Failed to reserve qp for mac registration\n");
-			return err;
-		}
-	}
 	qp.qpn = *qpn;
 
 	mac &= 0xffffffffffffULL;
@@ -113,16 +84,15 @@
 	gid[5] = port;
 	gid[7] = MLX4_UC_STEER << 1;
 
-	err = mlx4_qp_attach_common(dev, &qp, gid, 0,
-				    MLX4_PROT_ETH, MLX4_UC_STEER);
-	if (err && reserve)
-		mlx4_qp_release_range(dev, *qpn, 1);
+	err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
+	if (err)
+		mlx4_warn(dev, "Failed Attaching Unicast\n");
 
 	return err;
 }
 
 static void mlx4_uc_steer_release(struct mlx4_dev *dev, u8 port,
-				  u64 mac, int qpn, u8 free)
+				  u64 mac, int qpn)
 {
 	struct mlx4_qp qp;
 	u8 gid[16] = {0};
@@ -134,89 +104,9 @@
 	gid[5] = port;
 	gid[7] = MLX4_UC_STEER << 1;
 
-	mlx4_qp_detach_common(dev, &qp, gid, MLX4_PROT_ETH, MLX4_UC_STEER);
-	if (free)
-		mlx4_qp_release_range(dev, qpn, 1);
+	mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
 }
 
-int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn, u8 wrap)
-{
-	struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
-	struct mlx4_mac_table *table = &info->mac_table;
-	struct mlx4_mac_entry *entry;
-	int i, err = 0;
-	int free = -1;
-
-	if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
-		err = mlx4_uc_steer_add(dev, port, mac, qpn, 1);
-		if (err)
-			return err;
-
-		entry = kmalloc(sizeof *entry, GFP_KERNEL);
-		if (!entry) {
-			mlx4_uc_steer_release(dev, port, mac, *qpn, 1);
-			return -ENOMEM;
-		}
-
-		entry->mac = mac;
-		err = radix_tree_insert(&info->mac_tree, *qpn, entry);
-		if (err) {
-			kfree(entry);
-			mlx4_uc_steer_release(dev, port, mac, *qpn, 1);
-			return err;
-		}
-	}
-
-	mlx4_dbg(dev, "Registering MAC: 0x%llx\n", (unsigned long long) mac);
-
-	mutex_lock(&table->mutex);
-	for (i = 0; i < MLX4_MAX_MAC_NUM - 1; i++) {
-		if (free < 0 && !table->refs[i]) {
-			free = i;
-			continue;
-		}
-
-		if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
-			/* MAC already registered, increase references count */
-			++table->refs[i];
-			goto out;
-		}
-	}
-
-	if (free < 0) {
-		err = -ENOMEM;
-		goto out;
-	}
-
-	mlx4_dbg(dev, "Free MAC index is %d\n", free);
-
-	if (table->total == table->max) {
-		/* No free mac entries */
-		err = -ENOSPC;
-		goto out;
-	}
-
-	/* Register new MAC */
-	table->refs[free] = 1;
-	table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID);
-
-	err = mlx4_set_port_mac_table(dev, port, table->entries);
-	if (unlikely(err)) {
-		mlx4_err(dev, "Failed adding MAC: 0x%llx\n", (unsigned long long) mac);
-		table->refs[free] = 0;
-		table->entries[free] = 0;
-		goto out;
-	}
-
-	if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
-		*qpn = info->base_qpn + free;
-	++table->total;
-out:
-	mutex_unlock(&table->mutex);
-	return err;
-}
-EXPORT_SYMBOL_GPL(mlx4_register_mac);
-
 static int validate_index(struct mlx4_dev *dev,
 			  struct mlx4_mac_table *table, int index)
 {
@@ -233,67 +123,251 @@
 		      struct mlx4_mac_table *table, u64 mac)
 {
 	int i;
+
 	for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
-		if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i])))
+		if ((mac & MLX4_MAC_MASK) ==
+		    (MLX4_MAC_MASK & be64_to_cpu(table->entries[i])))
 			return i;
 	}
 	/* Mac not found */
 	return -EINVAL;
 }
 
-void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int qpn)
+int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
 {
 	struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
-	struct mlx4_mac_table *table = &info->mac_table;
-	int index = qpn - info->base_qpn;
 	struct mlx4_mac_entry *entry;
+	int index = 0;
+	int err = 0;
+
+	mlx4_dbg(dev, "Registering MAC: 0x%llx for adding\n",
+			(unsigned long long) mac);
+	index = mlx4_register_mac(dev, port, mac);
+	if (index < 0) {
+		err = index;
+		mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
+			 (unsigned long long) mac);
+		return err;
+	}
+
+	if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)) {
+		*qpn = info->base_qpn + index;
+		return 0;
+	}
+
+	err = mlx4_qp_reserve_range(dev, 1, 1, qpn);
+	mlx4_dbg(dev, "Reserved qp %d\n", *qpn);
+	if (err) {
+		mlx4_err(dev, "Failed to reserve qp for mac registration\n");
+		goto qp_err;
+	}
+
+	err = mlx4_uc_steer_add(dev, port, mac, qpn);
+	if (err)
+		goto steer_err;
+
+	entry = kmalloc(sizeof *entry, GFP_KERNEL);
+	if (!entry) {
+		err = -ENOMEM;
+		goto alloc_err;
+	}
+	entry->mac = mac;
+	err = radix_tree_insert(&info->mac_tree, *qpn, entry);
+	if (err)
+		goto insert_err;
+	return 0;
+
+insert_err:
+	kfree(entry);
+
+alloc_err:
+	mlx4_uc_steer_release(dev, port, mac, *qpn);
+
+steer_err:
+	mlx4_qp_release_range(dev, *qpn, 1);
+
+qp_err:
+	mlx4_unregister_mac(dev, port, mac);
+	return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_get_eth_qp);
+
+void mlx4_put_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int qpn)
+{
+	struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
+	struct mlx4_mac_entry *entry;
+
+	mlx4_dbg(dev, "Registering MAC: 0x%llx for deleting\n",
+		 (unsigned long long) mac);
+	mlx4_unregister_mac(dev, port, mac);
 
 	if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
 		entry = radix_tree_lookup(&info->mac_tree, qpn);
 		if (entry) {
-			mlx4_uc_steer_release(dev, port, entry->mac, qpn, 1);
+			mlx4_dbg(dev, "Releasing qp: port %d, mac 0x%llx,"
+				 " qpn %d\n", port,
+				 (unsigned long long) mac, qpn);
+			mlx4_uc_steer_release(dev, port, entry->mac, qpn);
+			mlx4_qp_release_range(dev, qpn, 1);
 			radix_tree_delete(&info->mac_tree, qpn);
-			index = find_index(dev, table, entry->mac);
 			kfree(entry);
 		}
 	}
+}
+EXPORT_SYMBOL_GPL(mlx4_put_eth_qp);
+
+static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
+				   __be64 *entries)
+{
+	struct mlx4_cmd_mailbox *mailbox;
+	u32 in_mod;
+	int err;
+
+	mailbox = mlx4_alloc_cmd_mailbox(dev);
+	if (IS_ERR(mailbox))
+		return PTR_ERR(mailbox);
+
+	memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE);
+
+	in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port;
+
+	err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+
+	mlx4_free_cmd_mailbox(dev, mailbox);
+	return err;
+}
+
+int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
+{
+	struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
+	struct mlx4_mac_table *table = &info->mac_table;
+	int i, err = 0;
+	int free = -1;
+
+	mlx4_dbg(dev, "Registering MAC: 0x%llx for port %d\n",
+		 (unsigned long long) mac, port);
+
+	mutex_lock(&table->mutex);
+	for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
+		if (free < 0 && !table->entries[i]) {
+			free = i;
+			continue;
+		}
+
+		if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
+			/* MAC already registered, Must not have duplicates */
+			err = -EEXIST;
+			goto out;
+		}
+	}
+
+	mlx4_dbg(dev, "Free MAC index is %d\n", free);
+
+	if (table->total == table->max) {
+		/* No free mac entries */
+		err = -ENOSPC;
+		goto out;
+	}
+
+	/* Register new MAC */
+	table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID);
+
+	err = mlx4_set_port_mac_table(dev, port, table->entries);
+	if (unlikely(err)) {
+		mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
+			 (unsigned long long) mac);
+		table->entries[free] = 0;
+		goto out;
+	}
+
+	err = free;
+	++table->total;
+out:
+	mutex_unlock(&table->mutex);
+	return err;
+}
+EXPORT_SYMBOL_GPL(__mlx4_register_mac);
+
+int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
+{
+	u64 out_param;
+	int err;
+
+	if (mlx4_is_mfunc(dev)) {
+		set_param_l(&out_param, port);
+		err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
+				   RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
+				   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+		if (err)
+			return err;
+
+		return get_param_l(&out_param);
+	}
+	return __mlx4_register_mac(dev, port, mac);
+}
+EXPORT_SYMBOL_GPL(mlx4_register_mac);
+
+
+void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
+{
+	struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
+	struct mlx4_mac_table *table = &info->mac_table;
+	int index;
+
+	index = find_index(dev, table, mac);
 
 	mutex_lock(&table->mutex);
 
 	if (validate_index(dev, table, index))
 		goto out;
 
-	/* Check whether this address has reference count */
-	if (!(--table->refs[index])) {
-		table->entries[index] = 0;
-		mlx4_set_port_mac_table(dev, port, table->entries);
-		--table->total;
-	}
+	table->entries[index] = 0;
+	mlx4_set_port_mac_table(dev, port, table->entries);
+	--table->total;
 out:
 	mutex_unlock(&table->mutex);
 }
+EXPORT_SYMBOL_GPL(__mlx4_unregister_mac);
+
+void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
+{
+	u64 out_param;
+	int err;
+
+	if (mlx4_is_mfunc(dev)) {
+		set_param_l(&out_param, port);
+		err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
+				   RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
+				   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+		return;
+	}
+	__mlx4_unregister_mac(dev, port, mac);
+	return;
+}
 EXPORT_SYMBOL_GPL(mlx4_unregister_mac);
 
-int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac, u8 wrap)
+int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac)
 {
 	struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
 	struct mlx4_mac_table *table = &info->mac_table;
-	int index = qpn - info->base_qpn;
 	struct mlx4_mac_entry *entry;
-	int err;
+	int index = qpn - info->base_qpn;
+	int err = 0;
 
 	if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
 		entry = radix_tree_lookup(&info->mac_tree, qpn);
 		if (!entry)
 			return -EINVAL;
-		index = find_index(dev, table, entry->mac);
-		mlx4_uc_steer_release(dev, port, entry->mac, qpn, 0);
+		mlx4_uc_steer_release(dev, port, entry->mac, qpn);
+		mlx4_unregister_mac(dev, port, entry->mac);
 		entry->mac = new_mac;
-		err = mlx4_uc_steer_add(dev, port, entry->mac, &qpn, 0);
-		if (err || index < 0)
-			return err;
+		mlx4_register_mac(dev, port, new_mac);
+		err = mlx4_uc_steer_add(dev, port, entry->mac, &qpn);
+		return err;
 	}
 
+	/* CX1 doesn't support multi-functions */
 	mutex_lock(&table->mutex);
 
 	err = validate_index(dev, table, index);
@@ -304,7 +378,8 @@
 
 	err = mlx4_set_port_mac_table(dev, port, table->entries);
 	if (unlikely(err)) {
-		mlx4_err(dev, "Failed adding MAC: 0x%llx\n", (unsigned long long) new_mac);
+		mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
+			 (unsigned long long) new_mac);
 		table->entries[index] = 0;
 	}
 out:
@@ -312,6 +387,7 @@
 	return err;
 }
 EXPORT_SYMBOL_GPL(mlx4_replace_mac);
+
 static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
 				    __be32 *entries)
 {
@@ -326,7 +402,7 @@
 	memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE);
 	in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port;
 	err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
-		       MLX4_CMD_TIME_CLASS_B);
+		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
 
 	mlx4_free_cmd_mailbox(dev, mailbox);
 
@@ -352,7 +428,8 @@
 }
 EXPORT_SYMBOL_GPL(mlx4_find_cached_vlan);
 
-int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
+static int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan,
+				int *index)
 {
 	struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
 	int i, err = 0;
@@ -387,7 +464,7 @@
 		goto out;
 	}
 
-	/* Register new MAC */
+	/* Register new VLAN */
 	table->refs[free] = 1;
 	table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID);
 
@@ -405,9 +482,27 @@
 	mutex_unlock(&table->mutex);
 	return err;
 }
+
+int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
+{
+	u64 out_param;
+	int err;
+
+	if (mlx4_is_mfunc(dev)) {
+		set_param_l(&out_param, port);
+		err = mlx4_cmd_imm(dev, vlan, &out_param, RES_VLAN,
+				   RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
+				   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+		if (!err)
+			*index = get_param_l(&out_param);
+
+		return err;
+	}
+	return __mlx4_register_vlan(dev, port, vlan, index);
+}
 EXPORT_SYMBOL_GPL(mlx4_register_vlan);
 
-void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
+static void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
 {
 	struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
 
@@ -432,6 +527,25 @@
 out:
 	mutex_unlock(&table->mutex);
 }
+
+void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
+{
+	u64 in_param;
+	int err;
+
+	if (mlx4_is_mfunc(dev)) {
+		set_param_l(&in_param, port);
+		err = mlx4_cmd(dev, in_param, RES_VLAN, RES_OP_RESERVE_AND_MAP,
+			       MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
+			       MLX4_CMD_WRAPPED);
+		if (!err)
+			mlx4_warn(dev, "Failed freeing vlan at index:%d\n",
+					index);
+
+		return;
+	}
+	__mlx4_unregister_vlan(dev, port, index);
+}
 EXPORT_SYMBOL_GPL(mlx4_unregister_vlan);
 
 int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps)
@@ -462,7 +576,8 @@
 	*(__be32 *) (&inbuf[20]) = cpu_to_be32(port);
 
 	err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3,
-			   MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C);
+			   MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
+			   MLX4_CMD_NATIVE);
 	if (!err)
 		*caps = *(__be32 *) (outbuf + 84);
 	mlx4_free_cmd_mailbox(dev, inmailbox);
@@ -499,7 +614,8 @@
 	*(__be32 *) (&inbuf[20]) = cpu_to_be32(port);
 
 	err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3,
-			   MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C);
+			   MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
+			   MLX4_CMD_NATIVE);
 
 	packet_error = be16_to_cpu(*(__be16 *) (outbuf + 4));
 
@@ -512,6 +628,139 @@
 	return err;
 }
 
+static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
+				u8 op_mod, struct mlx4_cmd_mailbox *inbox)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct mlx4_port_info *port_info;
+	struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master;
+	struct mlx4_slave_state *slave_st = &master->slave_state[slave];
+	struct mlx4_set_port_rqp_calc_context *qpn_context;
+	struct mlx4_set_port_general_context *gen_context;
+	int reset_qkey_viols;
+	int port;
+	int is_eth;
+	u32 in_modifier;
+	u32 promisc;
+	u16 mtu, prev_mtu;
+	int err;
+	int i;
+	__be32 agg_cap_mask;
+	__be32 slave_cap_mask;
+	__be32 new_cap_mask;
+
+	port = in_mod & 0xff;
+	in_modifier = in_mod >> 8;
+	is_eth = op_mod;
+	port_info = &priv->port[port];
+
+	/* Slaves cannot perform SET_PORT operations except changing MTU */
+	if (is_eth) {
+		if (slave != dev->caps.function &&
+		    in_modifier != MLX4_SET_PORT_GENERAL) {
+			mlx4_warn(dev, "denying SET_PORT for slave:%d\n",
+					slave);
+			return -EINVAL;
+		}
+		switch (in_modifier) {
+		case MLX4_SET_PORT_RQP_CALC:
+			qpn_context = inbox->buf;
+			qpn_context->base_qpn =
+				cpu_to_be32(port_info->base_qpn);
+			qpn_context->n_mac = 0x7;
+			promisc = be32_to_cpu(qpn_context->promisc) >>
+				SET_PORT_PROMISC_SHIFT;
+			qpn_context->promisc = cpu_to_be32(
+				promisc << SET_PORT_PROMISC_SHIFT |
+				port_info->base_qpn);
+			promisc = be32_to_cpu(qpn_context->mcast) >>
+				SET_PORT_MC_PROMISC_SHIFT;
+			qpn_context->mcast = cpu_to_be32(
+				promisc << SET_PORT_MC_PROMISC_SHIFT |
+				port_info->base_qpn);
+			break;
+		case MLX4_SET_PORT_GENERAL:
+			gen_context = inbox->buf;
+			/* Mtu is configured as the max MTU among all the
+			 * the functions on the port. */
+			mtu = be16_to_cpu(gen_context->mtu);
+			mtu = min_t(int, mtu, dev->caps.eth_mtu_cap[port]);
+			prev_mtu = slave_st->mtu[port];
+			slave_st->mtu[port] = mtu;
+			if (mtu > master->max_mtu[port])
+				master->max_mtu[port] = mtu;
+			if (mtu < prev_mtu && prev_mtu ==
+						master->max_mtu[port]) {
+				slave_st->mtu[port] = mtu;
+				master->max_mtu[port] = mtu;
+				for (i = 0; i < dev->num_slaves; i++) {
+					master->max_mtu[port] =
+					max(master->max_mtu[port],
+					    master->slave_state[i].mtu[port]);
+				}
+			}
+
+			gen_context->mtu = cpu_to_be16(master->max_mtu[port]);
+			break;
+		}
+		return mlx4_cmd(dev, inbox->dma, in_mod, op_mod,
+				MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
+				MLX4_CMD_NATIVE);
+	}
+
+	/* For IB, we only consider:
+	 * - The capability mask, which is set to the aggregate of all
+	 *   slave function capabilities
+	 * - The QKey violatin counter - reset according to each request.
+	 */
+
+	if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
+		reset_qkey_viols = (*(u8 *) inbox->buf) & 0x40;
+		new_cap_mask = ((__be32 *) inbox->buf)[2];
+	} else {
+		reset_qkey_viols = ((u8 *) inbox->buf)[3] & 0x1;
+		new_cap_mask = ((__be32 *) inbox->buf)[1];
+	}
+
+	agg_cap_mask = 0;
+	slave_cap_mask =
+		priv->mfunc.master.slave_state[slave].ib_cap_mask[port];
+	priv->mfunc.master.slave_state[slave].ib_cap_mask[port] = new_cap_mask;
+	for (i = 0; i < dev->num_slaves; i++)
+		agg_cap_mask |=
+			priv->mfunc.master.slave_state[i].ib_cap_mask[port];
+
+	/* only clear mailbox for guests.  Master may be setting
+	* MTU or PKEY table size
+	*/
+	if (slave != dev->caps.function)
+		memset(inbox->buf, 0, 256);
+	if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
+		*(u8 *) inbox->buf	   = !!reset_qkey_viols << 6;
+		((__be32 *) inbox->buf)[2] = agg_cap_mask;
+	} else {
+		((u8 *) inbox->buf)[3]     = !!reset_qkey_viols;
+		((__be32 *) inbox->buf)[1] = agg_cap_mask;
+	}
+
+	err = mlx4_cmd(dev, inbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
+		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+	if (err)
+		priv->mfunc.master.slave_state[slave].ib_cap_mask[port] =
+			slave_cap_mask;
+	return err;
+}
+
+int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave,
+			  struct mlx4_vhcr *vhcr,
+			  struct mlx4_cmd_mailbox *inbox,
+			  struct mlx4_cmd_mailbox *outbox,
+			  struct mlx4_cmd_info *cmd)
+{
+	return mlx4_common_set_port(dev, slave, vhcr->in_modifier,
+				    vhcr->op_modifier, inbox);
+}
+
 int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port)
 {
 	struct mlx4_cmd_mailbox *mailbox;
@@ -528,8 +777,127 @@
 
 	((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port];
 	err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT,
-		       MLX4_CMD_TIME_CLASS_B);
+		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
 
 	mlx4_free_cmd_mailbox(dev, mailbox);
 	return err;
 }
+
+int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
+			  u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx)
+{
+	struct mlx4_cmd_mailbox *mailbox;
+	struct mlx4_set_port_general_context *context;
+	int err;
+	u32 in_mod;
+
+	mailbox = mlx4_alloc_cmd_mailbox(dev);
+	if (IS_ERR(mailbox))
+		return PTR_ERR(mailbox);
+	context = mailbox->buf;
+	memset(context, 0, sizeof *context);
+
+	context->flags = SET_PORT_GEN_ALL_VALID;
+	context->mtu = cpu_to_be16(mtu);
+	context->pptx = (pptx * (!pfctx)) << 7;
+	context->pfctx = pfctx;
+	context->pprx = (pprx * (!pfcrx)) << 7;
+	context->pfcrx = pfcrx;
+
+	in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
+	err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+		       MLX4_CMD_TIME_CLASS_B,  MLX4_CMD_WRAPPED);
+
+	mlx4_free_cmd_mailbox(dev, mailbox);
+	return err;
+}
+EXPORT_SYMBOL(mlx4_SET_PORT_general);
+
+int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
+			   u8 promisc)
+{
+	struct mlx4_cmd_mailbox *mailbox;
+	struct mlx4_set_port_rqp_calc_context *context;
+	int err;
+	u32 in_mod;
+	u32 m_promisc = (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) ?
+		MCAST_DIRECT : MCAST_DEFAULT;
+
+	if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER  &&
+	    dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)
+		return 0;
+
+	mailbox = mlx4_alloc_cmd_mailbox(dev);
+	if (IS_ERR(mailbox))
+		return PTR_ERR(mailbox);
+	context = mailbox->buf;
+	memset(context, 0, sizeof *context);
+
+	context->base_qpn = cpu_to_be32(base_qpn);
+	context->n_mac = dev->caps.log_num_macs;
+	context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT |
+				       base_qpn);
+	context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT |
+				     base_qpn);
+	context->intra_no_vlan = 0;
+	context->no_vlan = MLX4_NO_VLAN_IDX;
+	context->intra_vlan_miss = 0;
+	context->vlan_miss = MLX4_VLAN_MISS_IDX;
+
+	in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port;
+	err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+		       MLX4_CMD_TIME_CLASS_B,  MLX4_CMD_WRAPPED);
+
+	mlx4_free_cmd_mailbox(dev, mailbox);
+	return err;
+}
+EXPORT_SYMBOL(mlx4_SET_PORT_qpn_calc);
+
+int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave,
+				struct mlx4_vhcr *vhcr,
+				struct mlx4_cmd_mailbox *inbox,
+				struct mlx4_cmd_mailbox *outbox,
+				struct mlx4_cmd_info *cmd)
+{
+	int err = 0;
+
+	return err;
+}
+
+int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port,
+			u64 mac, u64 clear, u8 mode)
+{
+	return mlx4_cmd(dev, (mac | (clear << 63)), port, mode,
+			MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B,
+			MLX4_CMD_WRAPPED);
+}
+EXPORT_SYMBOL(mlx4_SET_MCAST_FLTR);
+
+int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave,
+			       struct mlx4_vhcr *vhcr,
+			       struct mlx4_cmd_mailbox *inbox,
+			       struct mlx4_cmd_mailbox *outbox,
+			       struct mlx4_cmd_info *cmd)
+{
+	int err = 0;
+
+	return err;
+}
+
+int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave,
+			       u32 in_mod, struct mlx4_cmd_mailbox *outbox)
+{
+	return mlx4_cmd_box(dev, 0, outbox->dma, in_mod, 0,
+			    MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
+			    MLX4_CMD_NATIVE);
+}
+
+int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave,
+				struct mlx4_vhcr *vhcr,
+				struct mlx4_cmd_mailbox *inbox,
+				struct mlx4_cmd_mailbox *outbox,
+				struct mlx4_cmd_info *cmd)
+{
+	return mlx4_common_dump_eth_stats(dev, slave,
+					  vhcr->in_modifier, outbox);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx4/profile.c b/drivers/net/ethernet/mellanox/mlx4/profile.c
index b967647..66f91ca 100644
--- a/drivers/net/ethernet/mellanox/mlx4/profile.c
+++ b/drivers/net/ethernet/mellanox/mlx4/profile.c
@@ -98,8 +98,8 @@
 	profile[MLX4_RES_EQ].size     = dev_cap->eqc_entry_sz;
 	profile[MLX4_RES_DMPT].size   = dev_cap->dmpt_entry_sz;
 	profile[MLX4_RES_CMPT].size   = dev_cap->cmpt_entry_sz;
-	profile[MLX4_RES_MTT].size    = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz;
-	profile[MLX4_RES_MCG].size    = MLX4_MGM_ENTRY_SIZE;
+	profile[MLX4_RES_MTT].size    = dev_cap->mtt_entry_sz;
+	profile[MLX4_RES_MCG].size    = mlx4_get_mgm_entry_size(dev);
 
 	profile[MLX4_RES_QP].num      = request->num_qp;
 	profile[MLX4_RES_RDMARC].num  = request->num_qp * request->rdmarc_per_qp;
@@ -210,7 +210,7 @@
 			init_hca->cmpt_base	 = profile[i].start;
 			break;
 		case MLX4_RES_MTT:
-			dev->caps.num_mtt_segs	 = profile[i].num;
+			dev->caps.num_mtts	 = profile[i].num;
 			priv->mr_table.mtt_base	 = profile[i].start;
 			init_hca->mtt_base	 = profile[i].start;
 			break;
@@ -218,7 +218,8 @@
 			dev->caps.num_mgms	  = profile[i].num >> 1;
 			dev->caps.num_amgms	  = profile[i].num >> 1;
 			init_hca->mc_base	  = profile[i].start;
-			init_hca->log_mc_entry_sz = ilog2(MLX4_MGM_ENTRY_SIZE);
+			init_hca->log_mc_entry_sz =
+					ilog2(mlx4_get_mgm_entry_size(dev));
 			init_hca->log_mc_table_sz = profile[i].log_num;
 			init_hca->log_mc_hash_sz  = profile[i].log_num - 1;
 			break;
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 15f870c..6b03ac8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -35,6 +35,8 @@
 
 #include <linux/gfp.h>
 #include <linux/export.h>
+#include <linux/init.h>
+
 #include <linux/mlx4/cmd.h>
 #include <linux/mlx4/qp.h>
 
@@ -55,7 +57,7 @@
 	spin_unlock(&qp_table->lock);
 
 	if (!qp) {
-		mlx4_warn(dev, "Async event for bogus QP %08x\n", qpn);
+		mlx4_dbg(dev, "Async event for none existent QP %08x\n", qpn);
 		return;
 	}
 
@@ -65,10 +67,17 @@
 		complete(&qp->free);
 }
 
-int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
-		   enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
-		   struct mlx4_qp_context *context, enum mlx4_qp_optpar optpar,
-		   int sqd_event, struct mlx4_qp *qp)
+static int is_qp0(struct mlx4_dev *dev, struct mlx4_qp *qp)
+{
+	return qp->qpn >= dev->caps.sqp_start &&
+		qp->qpn <= dev->caps.sqp_start + 1;
+}
+
+static int __mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+		     enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
+		     struct mlx4_qp_context *context,
+		     enum mlx4_qp_optpar optpar,
+		     int sqd_event, struct mlx4_qp *qp, int native)
 {
 	static const u16 op[MLX4_QP_NUM_STATE][MLX4_QP_NUM_STATE] = {
 		[MLX4_QP_STATE_RST] = {
@@ -110,16 +119,26 @@
 		}
 	};
 
+	struct mlx4_priv *priv = mlx4_priv(dev);
 	struct mlx4_cmd_mailbox *mailbox;
 	int ret = 0;
+	u8 port;
 
 	if (cur_state >= MLX4_QP_NUM_STATE || new_state >= MLX4_QP_NUM_STATE ||
 	    !op[cur_state][new_state])
 		return -EINVAL;
 
-	if (op[cur_state][new_state] == MLX4_CMD_2RST_QP)
-		return mlx4_cmd(dev, 0, qp->qpn, 2,
-				MLX4_CMD_2RST_QP, MLX4_CMD_TIME_CLASS_A);
+	if (op[cur_state][new_state] == MLX4_CMD_2RST_QP) {
+		ret = mlx4_cmd(dev, 0, qp->qpn, 2,
+			MLX4_CMD_2RST_QP, MLX4_CMD_TIME_CLASS_A, native);
+		if (mlx4_is_master(dev) && cur_state != MLX4_QP_STATE_ERR &&
+		    cur_state != MLX4_QP_STATE_RST &&
+		    is_qp0(dev, qp)) {
+			port = (qp->qpn & 1) + 1;
+			priv->mfunc.master.qp0_state[port].qp0_active = 0;
+		}
+		return ret;
+	}
 
 	mailbox = mlx4_alloc_cmd_mailbox(dev);
 	if (IS_ERR(mailbox))
@@ -132,47 +151,186 @@
 		context->log_page_size   = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
 	}
 
+	port = ((context->pri_path.sched_queue >> 6) & 1) + 1;
+	if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
+		context->pri_path.sched_queue = (context->pri_path.sched_queue &
+						0xc3);
+
 	*(__be32 *) mailbox->buf = cpu_to_be32(optpar);
 	memcpy(mailbox->buf + 8, context, sizeof *context);
 
 	((struct mlx4_qp_context *) (mailbox->buf + 8))->local_qpn =
 		cpu_to_be32(qp->qpn);
 
-	ret = mlx4_cmd(dev, mailbox->dma, qp->qpn | (!!sqd_event << 31),
+	ret = mlx4_cmd(dev, mailbox->dma | dev->caps.function,
+		       qp->qpn | (!!sqd_event << 31),
 		       new_state == MLX4_QP_STATE_RST ? 2 : 0,
-		       op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C);
+		       op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C, native);
 
 	mlx4_free_cmd_mailbox(dev, mailbox);
 	return ret;
 }
+
+int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+		   enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
+		   struct mlx4_qp_context *context,
+		   enum mlx4_qp_optpar optpar,
+		   int sqd_event, struct mlx4_qp *qp)
+{
+	return __mlx4_qp_modify(dev, mtt, cur_state, new_state, context,
+				optpar, sqd_event, qp, 0);
+}
 EXPORT_SYMBOL_GPL(mlx4_qp_modify);
 
+int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
+				   int *base)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct mlx4_qp_table *qp_table = &priv->qp_table;
+
+	*base = mlx4_bitmap_alloc_range(&qp_table->bitmap, cnt, align);
+	if (*base == -1)
+		return -ENOMEM;
+
+	return 0;
+}
+
 int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base)
 {
-	struct mlx4_priv *priv = mlx4_priv(dev);
-	struct mlx4_qp_table *qp_table = &priv->qp_table;
-	int qpn;
+	u64 in_param;
+	u64 out_param;
+	int err;
 
-	qpn = mlx4_bitmap_alloc_range(&qp_table->bitmap, cnt, align);
-	if (qpn == -1)
-		return -ENOMEM;
+	if (mlx4_is_mfunc(dev)) {
+		set_param_l(&in_param, cnt);
+		set_param_h(&in_param, align);
+		err = mlx4_cmd_imm(dev, in_param, &out_param,
+				   RES_QP, RES_OP_RESERVE,
+				   MLX4_CMD_ALLOC_RES,
+				   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+		if (err)
+			return err;
 
-	*base = qpn;
-	return 0;
+		*base = get_param_l(&out_param);
+		return 0;
+	}
+	return __mlx4_qp_reserve_range(dev, cnt, align, base);
 }
 EXPORT_SYMBOL_GPL(mlx4_qp_reserve_range);
 
-void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
+void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
 {
 	struct mlx4_priv *priv = mlx4_priv(dev);
 	struct mlx4_qp_table *qp_table = &priv->qp_table;
-	if (base_qpn < dev->caps.sqp_start + 8)
-		return;
 
+	if (mlx4_is_qp_reserved(dev, (u32) base_qpn))
+		return;
 	mlx4_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt);
 }
+
+void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
+{
+	u64 in_param;
+	int err;
+
+	if (mlx4_is_mfunc(dev)) {
+		set_param_l(&in_param, base_qpn);
+		set_param_h(&in_param, cnt);
+		err = mlx4_cmd(dev, in_param, RES_QP, RES_OP_RESERVE,
+			       MLX4_CMD_FREE_RES,
+			       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+		if (err) {
+			mlx4_warn(dev, "Failed to release qp range"
+				  " base:%d cnt:%d\n", base_qpn, cnt);
+		}
+	} else
+		 __mlx4_qp_release_range(dev, base_qpn, cnt);
+}
 EXPORT_SYMBOL_GPL(mlx4_qp_release_range);
 
+int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct mlx4_qp_table *qp_table = &priv->qp_table;
+	int err;
+
+	err = mlx4_table_get(dev, &qp_table->qp_table, qpn);
+	if (err)
+		goto err_out;
+
+	err = mlx4_table_get(dev, &qp_table->auxc_table, qpn);
+	if (err)
+		goto err_put_qp;
+
+	err = mlx4_table_get(dev, &qp_table->altc_table, qpn);
+	if (err)
+		goto err_put_auxc;
+
+	err = mlx4_table_get(dev, &qp_table->rdmarc_table, qpn);
+	if (err)
+		goto err_put_altc;
+
+	err = mlx4_table_get(dev, &qp_table->cmpt_table, qpn);
+	if (err)
+		goto err_put_rdmarc;
+
+	return 0;
+
+err_put_rdmarc:
+	mlx4_table_put(dev, &qp_table->rdmarc_table, qpn);
+
+err_put_altc:
+	mlx4_table_put(dev, &qp_table->altc_table, qpn);
+
+err_put_auxc:
+	mlx4_table_put(dev, &qp_table->auxc_table, qpn);
+
+err_put_qp:
+	mlx4_table_put(dev, &qp_table->qp_table, qpn);
+
+err_out:
+	return err;
+}
+
+static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn)
+{
+	u64 param;
+
+	if (mlx4_is_mfunc(dev)) {
+		set_param_l(&param, qpn);
+		return mlx4_cmd_imm(dev, param, &param, RES_QP, RES_OP_MAP_ICM,
+				    MLX4_CMD_ALLOC_RES, MLX4_CMD_TIME_CLASS_A,
+				    MLX4_CMD_WRAPPED);
+	}
+	return __mlx4_qp_alloc_icm(dev, qpn);
+}
+
+void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct mlx4_qp_table *qp_table = &priv->qp_table;
+
+	mlx4_table_put(dev, &qp_table->cmpt_table, qpn);
+	mlx4_table_put(dev, &qp_table->rdmarc_table, qpn);
+	mlx4_table_put(dev, &qp_table->altc_table, qpn);
+	mlx4_table_put(dev, &qp_table->auxc_table, qpn);
+	mlx4_table_put(dev, &qp_table->qp_table, qpn);
+}
+
+static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
+{
+	u64 in_param;
+
+	if (mlx4_is_mfunc(dev)) {
+		set_param_l(&in_param, qpn);
+		if (mlx4_cmd(dev, in_param, RES_QP, RES_OP_MAP_ICM,
+			     MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
+			     MLX4_CMD_WRAPPED))
+			mlx4_warn(dev, "Failed to free icm of qp:%d\n", qpn);
+	} else
+		__mlx4_qp_free_icm(dev, qpn);
+}
+
 int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp)
 {
 	struct mlx4_priv *priv = mlx4_priv(dev);
@@ -184,55 +342,27 @@
 
 	qp->qpn = qpn;
 
-	err = mlx4_table_get(dev, &qp_table->qp_table, qp->qpn);
+	err = mlx4_qp_alloc_icm(dev, qpn);
 	if (err)
-		goto err_out;
-
-	err = mlx4_table_get(dev, &qp_table->auxc_table, qp->qpn);
-	if (err)
-		goto err_put_qp;
-
-	err = mlx4_table_get(dev, &qp_table->altc_table, qp->qpn);
-	if (err)
-		goto err_put_auxc;
-
-	err = mlx4_table_get(dev, &qp_table->rdmarc_table, qp->qpn);
-	if (err)
-		goto err_put_altc;
-
-	err = mlx4_table_get(dev, &qp_table->cmpt_table, qp->qpn);
-	if (err)
-		goto err_put_rdmarc;
+		return err;
 
 	spin_lock_irq(&qp_table->lock);
-	err = radix_tree_insert(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1), qp);
+	err = radix_tree_insert(&dev->qp_table_tree, qp->qpn &
+				(dev->caps.num_qps - 1), qp);
 	spin_unlock_irq(&qp_table->lock);
 	if (err)
-		goto err_put_cmpt;
+		goto err_icm;
 
 	atomic_set(&qp->refcount, 1);
 	init_completion(&qp->free);
 
 	return 0;
 
-err_put_cmpt:
-	mlx4_table_put(dev, &qp_table->cmpt_table, qp->qpn);
-
-err_put_rdmarc:
-	mlx4_table_put(dev, &qp_table->rdmarc_table, qp->qpn);
-
-err_put_altc:
-	mlx4_table_put(dev, &qp_table->altc_table, qp->qpn);
-
-err_put_auxc:
-	mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn);
-
-err_put_qp:
-	mlx4_table_put(dev, &qp_table->qp_table, qp->qpn);
-
-err_out:
+err_icm:
+	mlx4_qp_free_icm(dev, qpn);
 	return err;
 }
+
 EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
 
 void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp)
@@ -248,24 +378,18 @@
 
 void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp)
 {
-	struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
-
 	if (atomic_dec_and_test(&qp->refcount))
 		complete(&qp->free);
 	wait_for_completion(&qp->free);
 
-	mlx4_table_put(dev, &qp_table->cmpt_table, qp->qpn);
-	mlx4_table_put(dev, &qp_table->rdmarc_table, qp->qpn);
-	mlx4_table_put(dev, &qp_table->altc_table, qp->qpn);
-	mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn);
-	mlx4_table_put(dev, &qp_table->qp_table, qp->qpn);
+	mlx4_qp_free_icm(dev, qp->qpn);
 }
 EXPORT_SYMBOL_GPL(mlx4_qp_free);
 
 static int mlx4_CONF_SPECIAL_QP(struct mlx4_dev *dev, u32 base_qpn)
 {
 	return mlx4_cmd(dev, 0, base_qpn, 0, MLX4_CMD_CONF_SPECIAL_QP,
-			MLX4_CMD_TIME_CLASS_B);
+			MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
 }
 
 int mlx4_init_qp_table(struct mlx4_dev *dev)
@@ -276,6 +400,8 @@
 
 	spin_lock_init(&qp_table->lock);
 	INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC);
+	if (mlx4_is_slave(dev))
+		return 0;
 
 	/*
 	 * We reserve 2 extra QPs per port for the special QPs.  The
@@ -327,6 +453,9 @@
 
 void mlx4_cleanup_qp_table(struct mlx4_dev *dev)
 {
+	if (mlx4_is_slave(dev))
+		return;
+
 	mlx4_CONF_SPECIAL_QP(dev, 0);
 	mlx4_bitmap_cleanup(&mlx4_priv(dev)->qp_table.bitmap);
 }
@@ -342,7 +471,8 @@
 		return PTR_ERR(mailbox);
 
 	err = mlx4_cmd_box(dev, 0, mailbox->dma, qp->qpn, 0,
-			   MLX4_CMD_QUERY_QP, MLX4_CMD_TIME_CLASS_A);
+			   MLX4_CMD_QUERY_QP, MLX4_CMD_TIME_CLASS_A,
+			   MLX4_CMD_WRAPPED);
 	if (!err)
 		memcpy(context, mailbox->buf + 8, sizeof *context);
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
new file mode 100644
index 0000000..ed20751
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -0,0 +1,3104 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
+ * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
+ * All rights reserved.
+ * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/mlx4/cmd.h>
+#include <linux/mlx4/qp.h>
+
+#include "mlx4.h"
+#include "fw.h"
+
+#define MLX4_MAC_VALID		(1ull << 63)
+#define MLX4_MAC_MASK		0x7fffffffffffffffULL
+#define ETH_ALEN		6
+
+struct mac_res {
+	struct list_head list;
+	u64 mac;
+	u8 port;
+};
+
+struct res_common {
+	struct list_head	list;
+	u32		        res_id;
+	int			owner;
+	int			state;
+	int			from_state;
+	int			to_state;
+	int			removing;
+};
+
+enum {
+	RES_ANY_BUSY = 1
+};
+
+struct res_gid {
+	struct list_head	list;
+	u8			gid[16];
+	enum mlx4_protocol	prot;
+};
+
+enum res_qp_states {
+	RES_QP_BUSY = RES_ANY_BUSY,
+
+	/* QP number was allocated */
+	RES_QP_RESERVED,
+
+	/* ICM memory for QP context was mapped */
+	RES_QP_MAPPED,
+
+	/* QP is in hw ownership */
+	RES_QP_HW
+};
+
+static inline const char *qp_states_str(enum res_qp_states state)
+{
+	switch (state) {
+	case RES_QP_BUSY: return "RES_QP_BUSY";
+	case RES_QP_RESERVED: return "RES_QP_RESERVED";
+	case RES_QP_MAPPED: return "RES_QP_MAPPED";
+	case RES_QP_HW: return "RES_QP_HW";
+	default: return "Unknown";
+	}
+}
+
+struct res_qp {
+	struct res_common	com;
+	struct res_mtt	       *mtt;
+	struct res_cq	       *rcq;
+	struct res_cq	       *scq;
+	struct res_srq	       *srq;
+	struct list_head	mcg_list;
+	spinlock_t		mcg_spl;
+	int			local_qpn;
+};
+
+enum res_mtt_states {
+	RES_MTT_BUSY = RES_ANY_BUSY,
+	RES_MTT_ALLOCATED,
+};
+
+static inline const char *mtt_states_str(enum res_mtt_states state)
+{
+	switch (state) {
+	case RES_MTT_BUSY: return "RES_MTT_BUSY";
+	case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
+	default: return "Unknown";
+	}
+}
+
+struct res_mtt {
+	struct res_common	com;
+	int			order;
+	atomic_t		ref_count;
+};
+
+enum res_mpt_states {
+	RES_MPT_BUSY = RES_ANY_BUSY,
+	RES_MPT_RESERVED,
+	RES_MPT_MAPPED,
+	RES_MPT_HW,
+};
+
+struct res_mpt {
+	struct res_common	com;
+	struct res_mtt	       *mtt;
+	int			key;
+};
+
+enum res_eq_states {
+	RES_EQ_BUSY = RES_ANY_BUSY,
+	RES_EQ_RESERVED,
+	RES_EQ_HW,
+};
+
+struct res_eq {
+	struct res_common	com;
+	struct res_mtt	       *mtt;
+};
+
+enum res_cq_states {
+	RES_CQ_BUSY = RES_ANY_BUSY,
+	RES_CQ_ALLOCATED,
+	RES_CQ_HW,
+};
+
+struct res_cq {
+	struct res_common	com;
+	struct res_mtt	       *mtt;
+	atomic_t		ref_count;
+};
+
+enum res_srq_states {
+	RES_SRQ_BUSY = RES_ANY_BUSY,
+	RES_SRQ_ALLOCATED,
+	RES_SRQ_HW,
+};
+
+static inline const char *srq_states_str(enum res_srq_states state)
+{
+	switch (state) {
+	case RES_SRQ_BUSY: return "RES_SRQ_BUSY";
+	case RES_SRQ_ALLOCATED: return "RES_SRQ_ALLOCATED";
+	case RES_SRQ_HW: return "RES_SRQ_HW";
+	default: return "Unknown";
+	}
+}
+
+struct res_srq {
+	struct res_common	com;
+	struct res_mtt	       *mtt;
+	struct res_cq	       *cq;
+	atomic_t		ref_count;
+};
+
+enum res_counter_states {
+	RES_COUNTER_BUSY = RES_ANY_BUSY,
+	RES_COUNTER_ALLOCATED,
+};
+
+static inline const char *counter_states_str(enum res_counter_states state)
+{
+	switch (state) {
+	case RES_COUNTER_BUSY: return "RES_COUNTER_BUSY";
+	case RES_COUNTER_ALLOCATED: return "RES_COUNTER_ALLOCATED";
+	default: return "Unknown";
+	}
+}
+
+struct res_counter {
+	struct res_common	com;
+	int			port;
+};
+
+/* For Debug uses */
+static const char *ResourceType(enum mlx4_resource rt)
+{
+	switch (rt) {
+	case RES_QP: return "RES_QP";
+	case RES_CQ: return "RES_CQ";
+	case RES_SRQ: return "RES_SRQ";
+	case RES_MPT: return "RES_MPT";
+	case RES_MTT: return "RES_MTT";
+	case RES_MAC: return  "RES_MAC";
+	case RES_EQ: return "RES_EQ";
+	case RES_COUNTER: return "RES_COUNTER";
+	default: return "Unknown resource type !!!";
+	};
+}
+
+int mlx4_init_resource_tracker(struct mlx4_dev *dev)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	int i;
+	int t;
+
+	priv->mfunc.master.res_tracker.slave_list =
+		kzalloc(dev->num_slaves * sizeof(struct slave_list),
+			GFP_KERNEL);
+	if (!priv->mfunc.master.res_tracker.slave_list)
+		return -ENOMEM;
+
+	for (i = 0 ; i < dev->num_slaves; i++) {
+		for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
+			INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
+				       slave_list[i].res_list[t]);
+		mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
+	}
+
+	mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
+		 dev->num_slaves);
+	for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
+		INIT_RADIX_TREE(&priv->mfunc.master.res_tracker.res_tree[i],
+				GFP_ATOMIC|__GFP_NOWARN);
+
+	spin_lock_init(&priv->mfunc.master.res_tracker.lock);
+	return 0 ;
+}
+
+void mlx4_free_resource_tracker(struct mlx4_dev *dev)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	int i;
+
+	if (priv->mfunc.master.res_tracker.slave_list) {
+		for (i = 0 ; i < dev->num_slaves; i++)
+			mlx4_delete_all_resources_for_slave(dev, i);
+
+		kfree(priv->mfunc.master.res_tracker.slave_list);
+	}
+}
+
+static void update_ud_gid(struct mlx4_dev *dev,
+			  struct mlx4_qp_context *qp_ctx, u8 slave)
+{
+	u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
+
+	if (MLX4_QP_ST_UD == ts)
+		qp_ctx->pri_path.mgid_index = 0x80 | slave;
+
+	mlx4_dbg(dev, "slave %d, new gid index: 0x%x ",
+		slave, qp_ctx->pri_path.mgid_index);
+}
+
+static int mpt_mask(struct mlx4_dev *dev)
+{
+	return dev->caps.num_mpts - 1;
+}
+
+static void *find_res(struct mlx4_dev *dev, int res_id,
+		      enum mlx4_resource type)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+
+	return radix_tree_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
+				 res_id);
+}
+
+static int get_res(struct mlx4_dev *dev, int slave, int res_id,
+		   enum mlx4_resource type,
+		   void *res)
+{
+	struct res_common *r;
+	int err = 0;
+
+	spin_lock_irq(mlx4_tlock(dev));
+	r = find_res(dev, res_id, type);
+	if (!r) {
+		err = -ENONET;
+		goto exit;
+	}
+
+	if (r->state == RES_ANY_BUSY) {
+		err = -EBUSY;
+		goto exit;
+	}
+
+	if (r->owner != slave) {
+		err = -EPERM;
+		goto exit;
+	}
+
+	r->from_state = r->state;
+	r->state = RES_ANY_BUSY;
+	mlx4_dbg(dev, "res %s id 0x%x to busy\n",
+		 ResourceType(type), r->res_id);
+
+	if (res)
+		*((struct res_common **)res) = r;
+
+exit:
+	spin_unlock_irq(mlx4_tlock(dev));
+	return err;
+}
+
+int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
+				    enum mlx4_resource type,
+				    int res_id, int *slave)
+{
+
+	struct res_common *r;
+	int err = -ENOENT;
+	int id = res_id;
+
+	if (type == RES_QP)
+		id &= 0x7fffff;
+	spin_lock(mlx4_tlock(dev));
+
+	r = find_res(dev, id, type);
+	if (r) {
+		*slave = r->owner;
+		err = 0;
+	}
+	spin_unlock(mlx4_tlock(dev));
+
+	return err;
+}
+
+static void put_res(struct mlx4_dev *dev, int slave, int res_id,
+		    enum mlx4_resource type)
+{
+	struct res_common *r;
+
+	spin_lock_irq(mlx4_tlock(dev));
+	r = find_res(dev, res_id, type);
+	if (r)
+		r->state = r->from_state;
+	spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static struct res_common *alloc_qp_tr(int id)
+{
+	struct res_qp *ret;
+
+	ret = kzalloc(sizeof *ret, GFP_KERNEL);
+	if (!ret)
+		return NULL;
+
+	ret->com.res_id = id;
+	ret->com.state = RES_QP_RESERVED;
+	INIT_LIST_HEAD(&ret->mcg_list);
+	spin_lock_init(&ret->mcg_spl);
+
+	return &ret->com;
+}
+
+static struct res_common *alloc_mtt_tr(int id, int order)
+{
+	struct res_mtt *ret;
+
+	ret = kzalloc(sizeof *ret, GFP_KERNEL);
+	if (!ret)
+		return NULL;
+
+	ret->com.res_id = id;
+	ret->order = order;
+	ret->com.state = RES_MTT_ALLOCATED;
+	atomic_set(&ret->ref_count, 0);
+
+	return &ret->com;
+}
+
+static struct res_common *alloc_mpt_tr(int id, int key)
+{
+	struct res_mpt *ret;
+
+	ret = kzalloc(sizeof *ret, GFP_KERNEL);
+	if (!ret)
+		return NULL;
+
+	ret->com.res_id = id;
+	ret->com.state = RES_MPT_RESERVED;
+	ret->key = key;
+
+	return &ret->com;
+}
+
+static struct res_common *alloc_eq_tr(int id)
+{
+	struct res_eq *ret;
+
+	ret = kzalloc(sizeof *ret, GFP_KERNEL);
+	if (!ret)
+		return NULL;
+
+	ret->com.res_id = id;
+	ret->com.state = RES_EQ_RESERVED;
+
+	return &ret->com;
+}
+
+static struct res_common *alloc_cq_tr(int id)
+{
+	struct res_cq *ret;
+
+	ret = kzalloc(sizeof *ret, GFP_KERNEL);
+	if (!ret)
+		return NULL;
+
+	ret->com.res_id = id;
+	ret->com.state = RES_CQ_ALLOCATED;
+	atomic_set(&ret->ref_count, 0);
+
+	return &ret->com;
+}
+
+static struct res_common *alloc_srq_tr(int id)
+{
+	struct res_srq *ret;
+
+	ret = kzalloc(sizeof *ret, GFP_KERNEL);
+	if (!ret)
+		return NULL;
+
+	ret->com.res_id = id;
+	ret->com.state = RES_SRQ_ALLOCATED;
+	atomic_set(&ret->ref_count, 0);
+
+	return &ret->com;
+}
+
+static struct res_common *alloc_counter_tr(int id)
+{
+	struct res_counter *ret;
+
+	ret = kzalloc(sizeof *ret, GFP_KERNEL);
+	if (!ret)
+		return NULL;
+
+	ret->com.res_id = id;
+	ret->com.state = RES_COUNTER_ALLOCATED;
+
+	return &ret->com;
+}
+
+static struct res_common *alloc_tr(int id, enum mlx4_resource type, int slave,
+				   int extra)
+{
+	struct res_common *ret;
+
+	switch (type) {
+	case RES_QP:
+		ret = alloc_qp_tr(id);
+		break;
+	case RES_MPT:
+		ret = alloc_mpt_tr(id, extra);
+		break;
+	case RES_MTT:
+		ret = alloc_mtt_tr(id, extra);
+		break;
+	case RES_EQ:
+		ret = alloc_eq_tr(id);
+		break;
+	case RES_CQ:
+		ret = alloc_cq_tr(id);
+		break;
+	case RES_SRQ:
+		ret = alloc_srq_tr(id);
+		break;
+	case RES_MAC:
+		printk(KERN_ERR "implementation missing\n");
+		return NULL;
+	case RES_COUNTER:
+		ret = alloc_counter_tr(id);
+		break;
+
+	default:
+		return NULL;
+	}
+	if (ret)
+		ret->owner = slave;
+
+	return ret;
+}
+
+static int add_res_range(struct mlx4_dev *dev, int slave, int base, int count,
+			 enum mlx4_resource type, int extra)
+{
+	int i;
+	int err;
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct res_common **res_arr;
+	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+	struct radix_tree_root *root = &tracker->res_tree[type];
+
+	res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
+	if (!res_arr)
+		return -ENOMEM;
+
+	for (i = 0; i < count; ++i) {
+		res_arr[i] = alloc_tr(base + i, type, slave, extra);
+		if (!res_arr[i]) {
+			for (--i; i >= 0; --i)
+				kfree(res_arr[i]);
+
+			kfree(res_arr);
+			return -ENOMEM;
+		}
+	}
+
+	spin_lock_irq(mlx4_tlock(dev));
+	for (i = 0; i < count; ++i) {
+		if (find_res(dev, base + i, type)) {
+			err = -EEXIST;
+			goto undo;
+		}
+		err = radix_tree_insert(root, base + i, res_arr[i]);
+		if (err)
+			goto undo;
+		list_add_tail(&res_arr[i]->list,
+			      &tracker->slave_list[slave].res_list[type]);
+	}
+	spin_unlock_irq(mlx4_tlock(dev));
+	kfree(res_arr);
+
+	return 0;
+
+undo:
+	for (--i; i >= base; --i)
+		radix_tree_delete(&tracker->res_tree[type], i);
+
+	spin_unlock_irq(mlx4_tlock(dev));
+
+	for (i = 0; i < count; ++i)
+		kfree(res_arr[i]);
+
+	kfree(res_arr);
+
+	return err;
+}
+
+static int remove_qp_ok(struct res_qp *res)
+{
+	if (res->com.state == RES_QP_BUSY)
+		return -EBUSY;
+	else if (res->com.state != RES_QP_RESERVED)
+		return -EPERM;
+
+	return 0;
+}
+
+static int remove_mtt_ok(struct res_mtt *res, int order)
+{
+	if (res->com.state == RES_MTT_BUSY ||
+	    atomic_read(&res->ref_count)) {
+		printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
+		       __func__, __LINE__,
+		       mtt_states_str(res->com.state),
+		       atomic_read(&res->ref_count));
+		return -EBUSY;
+	} else if (res->com.state != RES_MTT_ALLOCATED)
+		return -EPERM;
+	else if (res->order != order)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int remove_mpt_ok(struct res_mpt *res)
+{
+	if (res->com.state == RES_MPT_BUSY)
+		return -EBUSY;
+	else if (res->com.state != RES_MPT_RESERVED)
+		return -EPERM;
+
+	return 0;
+}
+
+static int remove_eq_ok(struct res_eq *res)
+{
+	if (res->com.state == RES_MPT_BUSY)
+		return -EBUSY;
+	else if (res->com.state != RES_MPT_RESERVED)
+		return -EPERM;
+
+	return 0;
+}
+
+static int remove_counter_ok(struct res_counter *res)
+{
+	if (res->com.state == RES_COUNTER_BUSY)
+		return -EBUSY;
+	else if (res->com.state != RES_COUNTER_ALLOCATED)
+		return -EPERM;
+
+	return 0;
+}
+
+static int remove_cq_ok(struct res_cq *res)
+{
+	if (res->com.state == RES_CQ_BUSY)
+		return -EBUSY;
+	else if (res->com.state != RES_CQ_ALLOCATED)
+		return -EPERM;
+
+	return 0;
+}
+
+static int remove_srq_ok(struct res_srq *res)
+{
+	if (res->com.state == RES_SRQ_BUSY)
+		return -EBUSY;
+	else if (res->com.state != RES_SRQ_ALLOCATED)
+		return -EPERM;
+
+	return 0;
+}
+
+static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
+{
+	switch (type) {
+	case RES_QP:
+		return remove_qp_ok((struct res_qp *)res);
+	case RES_CQ:
+		return remove_cq_ok((struct res_cq *)res);
+	case RES_SRQ:
+		return remove_srq_ok((struct res_srq *)res);
+	case RES_MPT:
+		return remove_mpt_ok((struct res_mpt *)res);
+	case RES_MTT:
+		return remove_mtt_ok((struct res_mtt *)res, extra);
+	case RES_MAC:
+		return -ENOSYS;
+	case RES_EQ:
+		return remove_eq_ok((struct res_eq *)res);
+	case RES_COUNTER:
+		return remove_counter_ok((struct res_counter *)res);
+	default:
+		return -EINVAL;
+	}
+}
+
+static int rem_res_range(struct mlx4_dev *dev, int slave, int base, int count,
+			 enum mlx4_resource type, int extra)
+{
+	int i;
+	int err;
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+	struct res_common *r;
+
+	spin_lock_irq(mlx4_tlock(dev));
+	for (i = base; i < base + count; ++i) {
+		r = radix_tree_lookup(&tracker->res_tree[type], i);
+		if (!r) {
+			err = -ENOENT;
+			goto out;
+		}
+		if (r->owner != slave) {
+			err = -EPERM;
+			goto out;
+		}
+		err = remove_ok(r, type, extra);
+		if (err)
+			goto out;
+	}
+
+	for (i = base; i < base + count; ++i) {
+		r = radix_tree_lookup(&tracker->res_tree[type], i);
+		radix_tree_delete(&tracker->res_tree[type], i);
+		list_del(&r->list);
+		kfree(r);
+	}
+	err = 0;
+
+out:
+	spin_unlock_irq(mlx4_tlock(dev));
+
+	return err;
+}
+
+static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
+				enum res_qp_states state, struct res_qp **qp,
+				int alloc)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+	struct res_qp *r;
+	int err = 0;
+
+	spin_lock_irq(mlx4_tlock(dev));
+	r = radix_tree_lookup(&tracker->res_tree[RES_QP], qpn);
+	if (!r)
+		err = -ENOENT;
+	else if (r->com.owner != slave)
+		err = -EPERM;
+	else {
+		switch (state) {
+		case RES_QP_BUSY:
+			mlx4_dbg(dev, "%s: failed RES_QP, 0x%x\n",
+				 __func__, r->com.res_id);
+			err = -EBUSY;
+			break;
+
+		case RES_QP_RESERVED:
+			if (r->com.state == RES_QP_MAPPED && !alloc)
+				break;
+
+			mlx4_dbg(dev, "failed RES_QP, 0x%x\n", r->com.res_id);
+			err = -EINVAL;
+			break;
+
+		case RES_QP_MAPPED:
+			if ((r->com.state == RES_QP_RESERVED && alloc) ||
+			    r->com.state == RES_QP_HW)
+				break;
+			else {
+				mlx4_dbg(dev, "failed RES_QP, 0x%x\n",
+					  r->com.res_id);
+				err = -EINVAL;
+			}
+
+			break;
+
+		case RES_QP_HW:
+			if (r->com.state != RES_QP_MAPPED)
+				err = -EINVAL;
+			break;
+		default:
+			err = -EINVAL;
+		}
+
+		if (!err) {
+			r->com.from_state = r->com.state;
+			r->com.to_state = state;
+			r->com.state = RES_QP_BUSY;
+			if (qp)
+				*qp = (struct res_qp *)r;
+		}
+	}
+
+	spin_unlock_irq(mlx4_tlock(dev));
+
+	return err;
+}
+
+static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
+				enum res_mpt_states state, struct res_mpt **mpt)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+	struct res_mpt *r;
+	int err = 0;
+
+	spin_lock_irq(mlx4_tlock(dev));
+	r = radix_tree_lookup(&tracker->res_tree[RES_MPT], index);
+	if (!r)
+		err = -ENOENT;
+	else if (r->com.owner != slave)
+		err = -EPERM;
+	else {
+		switch (state) {
+		case RES_MPT_BUSY:
+			err = -EINVAL;
+			break;
+
+		case RES_MPT_RESERVED:
+			if (r->com.state != RES_MPT_MAPPED)
+				err = -EINVAL;
+			break;
+
+		case RES_MPT_MAPPED:
+			if (r->com.state != RES_MPT_RESERVED &&
+			    r->com.state != RES_MPT_HW)
+				err = -EINVAL;
+			break;
+
+		case RES_MPT_HW:
+			if (r->com.state != RES_MPT_MAPPED)
+				err = -EINVAL;
+			break;
+		default:
+			err = -EINVAL;
+		}
+
+		if (!err) {
+			r->com.from_state = r->com.state;
+			r->com.to_state = state;
+			r->com.state = RES_MPT_BUSY;
+			if (mpt)
+				*mpt = (struct res_mpt *)r;
+		}
+	}
+
+	spin_unlock_irq(mlx4_tlock(dev));
+
+	return err;
+}
+
+static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
+				enum res_eq_states state, struct res_eq **eq)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+	struct res_eq *r;
+	int err = 0;
+
+	spin_lock_irq(mlx4_tlock(dev));
+	r = radix_tree_lookup(&tracker->res_tree[RES_EQ], index);
+	if (!r)
+		err = -ENOENT;
+	else if (r->com.owner != slave)
+		err = -EPERM;
+	else {
+		switch (state) {
+		case RES_EQ_BUSY:
+			err = -EINVAL;
+			break;
+
+		case RES_EQ_RESERVED:
+			if (r->com.state != RES_EQ_HW)
+				err = -EINVAL;
+			break;
+
+		case RES_EQ_HW:
+			if (r->com.state != RES_EQ_RESERVED)
+				err = -EINVAL;
+			break;
+
+		default:
+			err = -EINVAL;
+		}
+
+		if (!err) {
+			r->com.from_state = r->com.state;
+			r->com.to_state = state;
+			r->com.state = RES_EQ_BUSY;
+			if (eq)
+				*eq = r;
+		}
+	}
+
+	spin_unlock_irq(mlx4_tlock(dev));
+
+	return err;
+}
+
+static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
+				enum res_cq_states state, struct res_cq **cq)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+	struct res_cq *r;
+	int err;
+
+	spin_lock_irq(mlx4_tlock(dev));
+	r = radix_tree_lookup(&tracker->res_tree[RES_CQ], cqn);
+	if (!r)
+		err = -ENOENT;
+	else if (r->com.owner != slave)
+		err = -EPERM;
+	else {
+		switch (state) {
+		case RES_CQ_BUSY:
+			err = -EBUSY;
+			break;
+
+		case RES_CQ_ALLOCATED:
+			if (r->com.state != RES_CQ_HW)
+				err = -EINVAL;
+			else if (atomic_read(&r->ref_count))
+				err = -EBUSY;
+			else
+				err = 0;
+			break;
+
+		case RES_CQ_HW:
+			if (r->com.state != RES_CQ_ALLOCATED)
+				err = -EINVAL;
+			else
+				err = 0;
+			break;
+
+		default:
+			err = -EINVAL;
+		}
+
+		if (!err) {
+			r->com.from_state = r->com.state;
+			r->com.to_state = state;
+			r->com.state = RES_CQ_BUSY;
+			if (cq)
+				*cq = r;
+		}
+	}
+
+	spin_unlock_irq(mlx4_tlock(dev));
+
+	return err;
+}
+
+static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
+				 enum res_cq_states state, struct res_srq **srq)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+	struct res_srq *r;
+	int err = 0;
+
+	spin_lock_irq(mlx4_tlock(dev));
+	r = radix_tree_lookup(&tracker->res_tree[RES_SRQ], index);
+	if (!r)
+		err = -ENOENT;
+	else if (r->com.owner != slave)
+		err = -EPERM;
+	else {
+		switch (state) {
+		case RES_SRQ_BUSY:
+			err = -EINVAL;
+			break;
+
+		case RES_SRQ_ALLOCATED:
+			if (r->com.state != RES_SRQ_HW)
+				err = -EINVAL;
+			else if (atomic_read(&r->ref_count))
+				err = -EBUSY;
+			break;
+
+		case RES_SRQ_HW:
+			if (r->com.state != RES_SRQ_ALLOCATED)
+				err = -EINVAL;
+			break;
+
+		default:
+			err = -EINVAL;
+		}
+
+		if (!err) {
+			r->com.from_state = r->com.state;
+			r->com.to_state = state;
+			r->com.state = RES_SRQ_BUSY;
+			if (srq)
+				*srq = r;
+		}
+	}
+
+	spin_unlock_irq(mlx4_tlock(dev));
+
+	return err;
+}
+
+static void res_abort_move(struct mlx4_dev *dev, int slave,
+			   enum mlx4_resource type, int id)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+	struct res_common *r;
+
+	spin_lock_irq(mlx4_tlock(dev));
+	r = radix_tree_lookup(&tracker->res_tree[type], id);
+	if (r && (r->owner == slave))
+		r->state = r->from_state;
+	spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static void res_end_move(struct mlx4_dev *dev, int slave,
+			 enum mlx4_resource type, int id)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+	struct res_common *r;
+
+	spin_lock_irq(mlx4_tlock(dev));
+	r = radix_tree_lookup(&tracker->res_tree[type], id);
+	if (r && (r->owner == slave))
+		r->state = r->to_state;
+	spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
+{
+	return mlx4_is_qp_reserved(dev, qpn);
+}
+
+static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+			u64 in_param, u64 *out_param)
+{
+	int err;
+	int count;
+	int align;
+	int base;
+	int qpn;
+
+	switch (op) {
+	case RES_OP_RESERVE:
+		count = get_param_l(&in_param);
+		align = get_param_h(&in_param);
+		err = __mlx4_qp_reserve_range(dev, count, align, &base);
+		if (err)
+			return err;
+
+		err = add_res_range(dev, slave, base, count, RES_QP, 0);
+		if (err) {
+			__mlx4_qp_release_range(dev, base, count);
+			return err;
+		}
+		set_param_l(out_param, base);
+		break;
+	case RES_OP_MAP_ICM:
+		qpn = get_param_l(&in_param) & 0x7fffff;
+		if (valid_reserved(dev, slave, qpn)) {
+			err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
+			if (err)
+				return err;
+		}
+
+		err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
+					   NULL, 1);
+		if (err)
+			return err;
+
+		if (!valid_reserved(dev, slave, qpn)) {
+			err = __mlx4_qp_alloc_icm(dev, qpn);
+			if (err) {
+				res_abort_move(dev, slave, RES_QP, qpn);
+				return err;
+			}
+		}
+
+		res_end_move(dev, slave, RES_QP, qpn);
+		break;
+
+	default:
+		err = -EINVAL;
+		break;
+	}
+	return err;
+}
+
+static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+			 u64 in_param, u64 *out_param)
+{
+	int err = -EINVAL;
+	int base;
+	int order;
+
+	if (op != RES_OP_RESERVE_AND_MAP)
+		return err;
+
+	order = get_param_l(&in_param);
+	base = __mlx4_alloc_mtt_range(dev, order);
+	if (base == -1)
+		return -ENOMEM;
+
+	err = add_res_range(dev, slave, base, 1, RES_MTT, order);
+	if (err)
+		__mlx4_free_mtt_range(dev, base, order);
+	else
+		set_param_l(out_param, base);
+
+	return err;
+}
+
+static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+			 u64 in_param, u64 *out_param)
+{
+	int err = -EINVAL;
+	int index;
+	int id;
+	struct res_mpt *mpt;
+
+	switch (op) {
+	case RES_OP_RESERVE:
+		index = __mlx4_mr_reserve(dev);
+		if (index == -1)
+			break;
+		id = index & mpt_mask(dev);
+
+		err = add_res_range(dev, slave, id, 1, RES_MPT, index);
+		if (err) {
+			__mlx4_mr_release(dev, index);
+			break;
+		}
+		set_param_l(out_param, index);
+		break;
+	case RES_OP_MAP_ICM:
+		index = get_param_l(&in_param);
+		id = index & mpt_mask(dev);
+		err = mr_res_start_move_to(dev, slave, id,
+					   RES_MPT_MAPPED, &mpt);
+		if (err)
+			return err;
+
+		err = __mlx4_mr_alloc_icm(dev, mpt->key);
+		if (err) {
+			res_abort_move(dev, slave, RES_MPT, id);
+			return err;
+		}
+
+		res_end_move(dev, slave, RES_MPT, id);
+		break;
+	}
+	return err;
+}
+
+static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+			u64 in_param, u64 *out_param)
+{
+	int cqn;
+	int err;
+
+	switch (op) {
+	case RES_OP_RESERVE_AND_MAP:
+		err = __mlx4_cq_alloc_icm(dev, &cqn);
+		if (err)
+			break;
+
+		err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
+		if (err) {
+			__mlx4_cq_free_icm(dev, cqn);
+			break;
+		}
+
+		set_param_l(out_param, cqn);
+		break;
+
+	default:
+		err = -EINVAL;
+	}
+
+	return err;
+}
+
+static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+			 u64 in_param, u64 *out_param)
+{
+	int srqn;
+	int err;
+
+	switch (op) {
+	case RES_OP_RESERVE_AND_MAP:
+		err = __mlx4_srq_alloc_icm(dev, &srqn);
+		if (err)
+			break;
+
+		err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
+		if (err) {
+			__mlx4_srq_free_icm(dev, srqn);
+			break;
+		}
+
+		set_param_l(out_param, srqn);
+		break;
+
+	default:
+		err = -EINVAL;
+	}
+
+	return err;
+}
+
+static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+	struct mac_res *res;
+
+	res = kzalloc(sizeof *res, GFP_KERNEL);
+	if (!res)
+		return -ENOMEM;
+	res->mac = mac;
+	res->port = (u8) port;
+	list_add_tail(&res->list,
+		      &tracker->slave_list[slave].res_list[RES_MAC]);
+	return 0;
+}
+
+static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
+			       int port)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+	struct list_head *mac_list =
+		&tracker->slave_list[slave].res_list[RES_MAC];
+	struct mac_res *res, *tmp;
+
+	list_for_each_entry_safe(res, tmp, mac_list, list) {
+		if (res->mac == mac && res->port == (u8) port) {
+			list_del(&res->list);
+			kfree(res);
+			break;
+		}
+	}
+}
+
+static void rem_slave_macs(struct mlx4_dev *dev, int slave)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+	struct list_head *mac_list =
+		&tracker->slave_list[slave].res_list[RES_MAC];
+	struct mac_res *res, *tmp;
+
+	list_for_each_entry_safe(res, tmp, mac_list, list) {
+		list_del(&res->list);
+		__mlx4_unregister_mac(dev, res->port, res->mac);
+		kfree(res);
+	}
+}
+
+static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+			 u64 in_param, u64 *out_param)
+{
+	int err = -EINVAL;
+	int port;
+	u64 mac;
+
+	if (op != RES_OP_RESERVE_AND_MAP)
+		return err;
+
+	port = get_param_l(out_param);
+	mac = in_param;
+
+	err = __mlx4_register_mac(dev, port, mac);
+	if (err >= 0) {
+		set_param_l(out_param, err);
+		err = 0;
+	}
+
+	if (!err) {
+		err = mac_add_to_slave(dev, slave, mac, port);
+		if (err)
+			__mlx4_unregister_mac(dev, port, mac);
+	}
+	return err;
+}
+
+static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+			 u64 in_param, u64 *out_param)
+{
+	return 0;
+}
+
+int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
+			   struct mlx4_vhcr *vhcr,
+			   struct mlx4_cmd_mailbox *inbox,
+			   struct mlx4_cmd_mailbox *outbox,
+			   struct mlx4_cmd_info *cmd)
+{
+	int err;
+	int alop = vhcr->op_modifier;
+
+	switch (vhcr->in_modifier) {
+	case RES_QP:
+		err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
+				   vhcr->in_param, &vhcr->out_param);
+		break;
+
+	case RES_MTT:
+		err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
+				    vhcr->in_param, &vhcr->out_param);
+		break;
+
+	case RES_MPT:
+		err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
+				    vhcr->in_param, &vhcr->out_param);
+		break;
+
+	case RES_CQ:
+		err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
+				   vhcr->in_param, &vhcr->out_param);
+		break;
+
+	case RES_SRQ:
+		err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
+				    vhcr->in_param, &vhcr->out_param);
+		break;
+
+	case RES_MAC:
+		err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
+				    vhcr->in_param, &vhcr->out_param);
+		break;
+
+	case RES_VLAN:
+		err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
+				    vhcr->in_param, &vhcr->out_param);
+		break;
+
+	default:
+		err = -EINVAL;
+		break;
+	}
+
+	return err;
+}
+
+static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+		       u64 in_param)
+{
+	int err;
+	int count;
+	int base;
+	int qpn;
+
+	switch (op) {
+	case RES_OP_RESERVE:
+		base = get_param_l(&in_param) & 0x7fffff;
+		count = get_param_h(&in_param);
+		err = rem_res_range(dev, slave, base, count, RES_QP, 0);
+		if (err)
+			break;
+		__mlx4_qp_release_range(dev, base, count);
+		break;
+	case RES_OP_MAP_ICM:
+		qpn = get_param_l(&in_param) & 0x7fffff;
+		err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
+					   NULL, 0);
+		if (err)
+			return err;
+
+		if (!valid_reserved(dev, slave, qpn))
+			__mlx4_qp_free_icm(dev, qpn);
+
+		res_end_move(dev, slave, RES_QP, qpn);
+
+		if (valid_reserved(dev, slave, qpn))
+			err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
+		break;
+	default:
+		err = -EINVAL;
+		break;
+	}
+	return err;
+}
+
+static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+			u64 in_param, u64 *out_param)
+{
+	int err = -EINVAL;
+	int base;
+	int order;
+
+	if (op != RES_OP_RESERVE_AND_MAP)
+		return err;
+
+	base = get_param_l(&in_param);
+	order = get_param_h(&in_param);
+	err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
+	if (!err)
+		__mlx4_free_mtt_range(dev, base, order);
+	return err;
+}
+
+static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+			u64 in_param)
+{
+	int err = -EINVAL;
+	int index;
+	int id;
+	struct res_mpt *mpt;
+
+	switch (op) {
+	case RES_OP_RESERVE:
+		index = get_param_l(&in_param);
+		id = index & mpt_mask(dev);
+		err = get_res(dev, slave, id, RES_MPT, &mpt);
+		if (err)
+			break;
+		index = mpt->key;
+		put_res(dev, slave, id, RES_MPT);
+
+		err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
+		if (err)
+			break;
+		__mlx4_mr_release(dev, index);
+		break;
+	case RES_OP_MAP_ICM:
+			index = get_param_l(&in_param);
+			id = index & mpt_mask(dev);
+			err = mr_res_start_move_to(dev, slave, id,
+						   RES_MPT_RESERVED, &mpt);
+			if (err)
+				return err;
+
+			__mlx4_mr_free_icm(dev, mpt->key);
+			res_end_move(dev, slave, RES_MPT, id);
+			return err;
+		break;
+	default:
+		err = -EINVAL;
+		break;
+	}
+	return err;
+}
+
+static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+		       u64 in_param, u64 *out_param)
+{
+	int cqn;
+	int err;
+
+	switch (op) {
+	case RES_OP_RESERVE_AND_MAP:
+		cqn = get_param_l(&in_param);
+		err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
+		if (err)
+			break;
+
+		__mlx4_cq_free_icm(dev, cqn);
+		break;
+
+	default:
+		err = -EINVAL;
+		break;
+	}
+
+	return err;
+}
+
+static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+			u64 in_param, u64 *out_param)
+{
+	int srqn;
+	int err;
+
+	switch (op) {
+	case RES_OP_RESERVE_AND_MAP:
+		srqn = get_param_l(&in_param);
+		err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
+		if (err)
+			break;
+
+		__mlx4_srq_free_icm(dev, srqn);
+		break;
+
+	default:
+		err = -EINVAL;
+		break;
+	}
+
+	return err;
+}
+
+static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+			    u64 in_param, u64 *out_param)
+{
+	int port;
+	int err = 0;
+
+	switch (op) {
+	case RES_OP_RESERVE_AND_MAP:
+		port = get_param_l(out_param);
+		mac_del_from_slave(dev, slave, in_param, port);
+		__mlx4_unregister_mac(dev, port, in_param);
+		break;
+	default:
+		err = -EINVAL;
+		break;
+	}
+
+	return err;
+
+}
+
+static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+			    u64 in_param, u64 *out_param)
+{
+	return 0;
+}
+
+int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
+			  struct mlx4_vhcr *vhcr,
+			  struct mlx4_cmd_mailbox *inbox,
+			  struct mlx4_cmd_mailbox *outbox,
+			  struct mlx4_cmd_info *cmd)
+{
+	int err = -EINVAL;
+	int alop = vhcr->op_modifier;
+
+	switch (vhcr->in_modifier) {
+	case RES_QP:
+		err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
+				  vhcr->in_param);
+		break;
+
+	case RES_MTT:
+		err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
+				   vhcr->in_param, &vhcr->out_param);
+		break;
+
+	case RES_MPT:
+		err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
+				   vhcr->in_param);
+		break;
+
+	case RES_CQ:
+		err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
+				  vhcr->in_param, &vhcr->out_param);
+		break;
+
+	case RES_SRQ:
+		err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
+				   vhcr->in_param, &vhcr->out_param);
+		break;
+
+	case RES_MAC:
+		err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
+				   vhcr->in_param, &vhcr->out_param);
+		break;
+
+	case RES_VLAN:
+		err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
+				   vhcr->in_param, &vhcr->out_param);
+		break;
+
+	default:
+		break;
+	}
+	return err;
+}
+
+/* ugly but other choices are uglier */
+static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
+{
+	return (be32_to_cpu(mpt->flags) >> 9) & 1;
+}
+
+static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
+{
+	return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
+}
+
+static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
+{
+	return be32_to_cpu(mpt->mtt_sz);
+}
+
+static int mr_get_pdn(struct mlx4_mpt_entry *mpt)
+{
+	return be32_to_cpu(mpt->pd_flags) & 0xffffff;
+}
+
+static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
+{
+	return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
+}
+
+static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
+{
+	return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
+}
+
+static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
+{
+	int page_shift = (qpc->log_page_size & 0x3f) + 12;
+	int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
+	int log_sq_sride = qpc->sq_size_stride & 7;
+	int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
+	int log_rq_stride = qpc->rq_size_stride & 7;
+	int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
+	int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
+	int xrc = (be32_to_cpu(qpc->local_qpn) >> 23) & 1;
+	int sq_size;
+	int rq_size;
+	int total_pages;
+	int total_mem;
+	int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
+
+	sq_size = 1 << (log_sq_size + log_sq_sride + 4);
+	rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
+	total_mem = sq_size + rq_size;
+	total_pages =
+		roundup_pow_of_two((total_mem + (page_offset << 6)) >>
+				   page_shift);
+
+	return total_pages;
+}
+
+static int qp_get_pdn(struct mlx4_qp_context *qpc)
+{
+	return be32_to_cpu(qpc->pd) & 0xffffff;
+}
+
+static int pdn2slave(int pdn)
+{
+	return (pdn >> NOT_MASKED_PD_BITS) - 1;
+}
+
+static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
+			   int size, struct res_mtt *mtt)
+{
+	int res_start = mtt->com.res_id;
+	int res_size = (1 << mtt->order);
+
+	if (start < res_start || start + size > res_start + res_size)
+		return -EPERM;
+	return 0;
+}
+
+int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
+			   struct mlx4_vhcr *vhcr,
+			   struct mlx4_cmd_mailbox *inbox,
+			   struct mlx4_cmd_mailbox *outbox,
+			   struct mlx4_cmd_info *cmd)
+{
+	int err;
+	int index = vhcr->in_modifier;
+	struct res_mtt *mtt;
+	struct res_mpt *mpt;
+	int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
+	int phys;
+	int id;
+
+	id = index & mpt_mask(dev);
+	err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
+	if (err)
+		return err;
+
+	phys = mr_phys_mpt(inbox->buf);
+	if (!phys) {
+		err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
+		if (err)
+			goto ex_abort;
+
+		err = check_mtt_range(dev, slave, mtt_base,
+				      mr_get_mtt_size(inbox->buf), mtt);
+		if (err)
+			goto ex_put;
+
+		mpt->mtt = mtt;
+	}
+
+	if (pdn2slave(mr_get_pdn(inbox->buf)) != slave) {
+		err = -EPERM;
+		goto ex_put;
+	}
+
+	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+	if (err)
+		goto ex_put;
+
+	if (!phys) {
+		atomic_inc(&mtt->ref_count);
+		put_res(dev, slave, mtt->com.res_id, RES_MTT);
+	}
+
+	res_end_move(dev, slave, RES_MPT, id);
+	return 0;
+
+ex_put:
+	if (!phys)
+		put_res(dev, slave, mtt->com.res_id, RES_MTT);
+ex_abort:
+	res_abort_move(dev, slave, RES_MPT, id);
+
+	return err;
+}
+
+int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
+			   struct mlx4_vhcr *vhcr,
+			   struct mlx4_cmd_mailbox *inbox,
+			   struct mlx4_cmd_mailbox *outbox,
+			   struct mlx4_cmd_info *cmd)
+{
+	int err;
+	int index = vhcr->in_modifier;
+	struct res_mpt *mpt;
+	int id;
+
+	id = index & mpt_mask(dev);
+	err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
+	if (err)
+		return err;
+
+	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+	if (err)
+		goto ex_abort;
+
+	if (mpt->mtt)
+		atomic_dec(&mpt->mtt->ref_count);
+
+	res_end_move(dev, slave, RES_MPT, id);
+	return 0;
+
+ex_abort:
+	res_abort_move(dev, slave, RES_MPT, id);
+
+	return err;
+}
+
+int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
+			   struct mlx4_vhcr *vhcr,
+			   struct mlx4_cmd_mailbox *inbox,
+			   struct mlx4_cmd_mailbox *outbox,
+			   struct mlx4_cmd_info *cmd)
+{
+	int err;
+	int index = vhcr->in_modifier;
+	struct res_mpt *mpt;
+	int id;
+
+	id = index & mpt_mask(dev);
+	err = get_res(dev, slave, id, RES_MPT, &mpt);
+	if (err)
+		return err;
+
+	if (mpt->com.from_state != RES_MPT_HW) {
+		err = -EBUSY;
+		goto out;
+	}
+
+	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+
+out:
+	put_res(dev, slave, id, RES_MPT);
+	return err;
+}
+
+static int qp_get_rcqn(struct mlx4_qp_context *qpc)
+{
+	return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
+}
+
+static int qp_get_scqn(struct mlx4_qp_context *qpc)
+{
+	return be32_to_cpu(qpc->cqn_send) & 0xffffff;
+}
+
+static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
+{
+	return be32_to_cpu(qpc->srqn) & 0x1ffffff;
+}
+
+int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
+			     struct mlx4_vhcr *vhcr,
+			     struct mlx4_cmd_mailbox *inbox,
+			     struct mlx4_cmd_mailbox *outbox,
+			     struct mlx4_cmd_info *cmd)
+{
+	int err;
+	int qpn = vhcr->in_modifier & 0x7fffff;
+	struct res_mtt *mtt;
+	struct res_qp *qp;
+	struct mlx4_qp_context *qpc = inbox->buf + 8;
+	int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
+	int mtt_size = qp_get_mtt_size(qpc);
+	struct res_cq *rcq;
+	struct res_cq *scq;
+	int rcqn = qp_get_rcqn(qpc);
+	int scqn = qp_get_scqn(qpc);
+	u32 srqn = qp_get_srqn(qpc) & 0xffffff;
+	int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
+	struct res_srq *srq;
+	int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
+
+	err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
+	if (err)
+		return err;
+	qp->local_qpn = local_qpn;
+
+	err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
+	if (err)
+		goto ex_abort;
+
+	err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
+	if (err)
+		goto ex_put_mtt;
+
+	if (pdn2slave(qp_get_pdn(qpc)) != slave) {
+		err = -EPERM;
+		goto ex_put_mtt;
+	}
+
+	err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
+	if (err)
+		goto ex_put_mtt;
+
+	if (scqn != rcqn) {
+		err = get_res(dev, slave, scqn, RES_CQ, &scq);
+		if (err)
+			goto ex_put_rcq;
+	} else
+		scq = rcq;
+
+	if (use_srq) {
+		err = get_res(dev, slave, srqn, RES_SRQ, &srq);
+		if (err)
+			goto ex_put_scq;
+	}
+
+	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+	if (err)
+		goto ex_put_srq;
+	atomic_inc(&mtt->ref_count);
+	qp->mtt = mtt;
+	atomic_inc(&rcq->ref_count);
+	qp->rcq = rcq;
+	atomic_inc(&scq->ref_count);
+	qp->scq = scq;
+
+	if (scqn != rcqn)
+		put_res(dev, slave, scqn, RES_CQ);
+
+	if (use_srq) {
+		atomic_inc(&srq->ref_count);
+		put_res(dev, slave, srqn, RES_SRQ);
+		qp->srq = srq;
+	}
+	put_res(dev, slave, rcqn, RES_CQ);
+	put_res(dev, slave, mtt_base, RES_MTT);
+	res_end_move(dev, slave, RES_QP, qpn);
+
+	return 0;
+
+ex_put_srq:
+	if (use_srq)
+		put_res(dev, slave, srqn, RES_SRQ);
+ex_put_scq:
+	if (scqn != rcqn)
+		put_res(dev, slave, scqn, RES_CQ);
+ex_put_rcq:
+	put_res(dev, slave, rcqn, RES_CQ);
+ex_put_mtt:
+	put_res(dev, slave, mtt_base, RES_MTT);
+ex_abort:
+	res_abort_move(dev, slave, RES_QP, qpn);
+
+	return err;
+}
+
+static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
+{
+	return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
+}
+
+static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
+{
+	int log_eq_size = eqc->log_eq_size & 0x1f;
+	int page_shift = (eqc->log_page_size & 0x3f) + 12;
+
+	if (log_eq_size + 5 < page_shift)
+		return 1;
+
+	return 1 << (log_eq_size + 5 - page_shift);
+}
+
+static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
+{
+	return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
+}
+
+static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
+{
+	int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
+	int page_shift = (cqc->log_page_size & 0x3f) + 12;
+
+	if (log_cq_size + 5 < page_shift)
+		return 1;
+
+	return 1 << (log_cq_size + 5 - page_shift);
+}
+
+int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
+			  struct mlx4_vhcr *vhcr,
+			  struct mlx4_cmd_mailbox *inbox,
+			  struct mlx4_cmd_mailbox *outbox,
+			  struct mlx4_cmd_info *cmd)
+{
+	int err;
+	int eqn = vhcr->in_modifier;
+	int res_id = (slave << 8) | eqn;
+	struct mlx4_eq_context *eqc = inbox->buf;
+	int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
+	int mtt_size = eq_get_mtt_size(eqc);
+	struct res_eq *eq;
+	struct res_mtt *mtt;
+
+	err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
+	if (err)
+		return err;
+	err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
+	if (err)
+		goto out_add;
+
+	err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
+	if (err)
+		goto out_move;
+
+	err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
+	if (err)
+		goto out_put;
+
+	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+	if (err)
+		goto out_put;
+
+	atomic_inc(&mtt->ref_count);
+	eq->mtt = mtt;
+	put_res(dev, slave, mtt->com.res_id, RES_MTT);
+	res_end_move(dev, slave, RES_EQ, res_id);
+	return 0;
+
+out_put:
+	put_res(dev, slave, mtt->com.res_id, RES_MTT);
+out_move:
+	res_abort_move(dev, slave, RES_EQ, res_id);
+out_add:
+	rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
+	return err;
+}
+
+static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
+			      int len, struct res_mtt **res)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+	struct res_mtt *mtt;
+	int err = -EINVAL;
+
+	spin_lock_irq(mlx4_tlock(dev));
+	list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
+			    com.list) {
+		if (!check_mtt_range(dev, slave, start, len, mtt)) {
+			*res = mtt;
+			mtt->com.from_state = mtt->com.state;
+			mtt->com.state = RES_MTT_BUSY;
+			err = 0;
+			break;
+		}
+	}
+	spin_unlock_irq(mlx4_tlock(dev));
+
+	return err;
+}
+
+int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
+			   struct mlx4_vhcr *vhcr,
+			   struct mlx4_cmd_mailbox *inbox,
+			   struct mlx4_cmd_mailbox *outbox,
+			   struct mlx4_cmd_info *cmd)
+{
+	struct mlx4_mtt mtt;
+	__be64 *page_list = inbox->buf;
+	u64 *pg_list = (u64 *)page_list;
+	int i;
+	struct res_mtt *rmtt = NULL;
+	int start = be64_to_cpu(page_list[0]);
+	int npages = vhcr->in_modifier;
+	int err;
+
+	err = get_containing_mtt(dev, slave, start, npages, &rmtt);
+	if (err)
+		return err;
+
+	/* Call the SW implementation of write_mtt:
+	 * - Prepare a dummy mtt struct
+	 * - Translate inbox contents to simple addresses in host endianess */
+	mtt.offset = 0;  /* TBD this is broken but I don't handle it since
+			    we don't really use it */
+	mtt.order = 0;
+	mtt.page_shift = 0;
+	for (i = 0; i < npages; ++i)
+		pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
+
+	err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
+			       ((u64 *)page_list + 2));
+
+	if (rmtt)
+		put_res(dev, slave, rmtt->com.res_id, RES_MTT);
+
+	return err;
+}
+
+int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
+			  struct mlx4_vhcr *vhcr,
+			  struct mlx4_cmd_mailbox *inbox,
+			  struct mlx4_cmd_mailbox *outbox,
+			  struct mlx4_cmd_info *cmd)
+{
+	int eqn = vhcr->in_modifier;
+	int res_id = eqn | (slave << 8);
+	struct res_eq *eq;
+	int err;
+
+	err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
+	if (err)
+		return err;
+
+	err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
+	if (err)
+		goto ex_abort;
+
+	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+	if (err)
+		goto ex_put;
+
+	atomic_dec(&eq->mtt->ref_count);
+	put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
+	res_end_move(dev, slave, RES_EQ, res_id);
+	rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
+
+	return 0;
+
+ex_put:
+	put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
+ex_abort:
+	res_abort_move(dev, slave, RES_EQ, res_id);
+
+	return err;
+}
+
+int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct mlx4_slave_event_eq_info *event_eq;
+	struct mlx4_cmd_mailbox *mailbox;
+	u32 in_modifier = 0;
+	int err;
+	int res_id;
+	struct res_eq *req;
+
+	if (!priv->mfunc.master.slave_state)
+		return -EINVAL;
+
+	event_eq = &priv->mfunc.master.slave_state[slave].event_eq;
+
+	/* Create the event only if the slave is registered */
+	if ((event_eq->event_type & (1 << eqe->type)) == 0)
+		return 0;
+
+	mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
+	res_id = (slave << 8) | event_eq->eqn;
+	err = get_res(dev, slave, res_id, RES_EQ, &req);
+	if (err)
+		goto unlock;
+
+	if (req->com.from_state != RES_EQ_HW) {
+		err = -EINVAL;
+		goto put;
+	}
+
+	mailbox = mlx4_alloc_cmd_mailbox(dev);
+	if (IS_ERR(mailbox)) {
+		err = PTR_ERR(mailbox);
+		goto put;
+	}
+
+	if (eqe->type == MLX4_EVENT_TYPE_CMD) {
+		++event_eq->token;
+		eqe->event.cmd.token = cpu_to_be16(event_eq->token);
+	}
+
+	memcpy(mailbox->buf, (u8 *) eqe, 28);
+
+	in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
+
+	err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
+		       MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
+		       MLX4_CMD_NATIVE);
+
+	put_res(dev, slave, res_id, RES_EQ);
+	mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
+	mlx4_free_cmd_mailbox(dev, mailbox);
+	return err;
+
+put:
+	put_res(dev, slave, res_id, RES_EQ);
+
+unlock:
+	mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
+	return err;
+}
+
+int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
+			  struct mlx4_vhcr *vhcr,
+			  struct mlx4_cmd_mailbox *inbox,
+			  struct mlx4_cmd_mailbox *outbox,
+			  struct mlx4_cmd_info *cmd)
+{
+	int eqn = vhcr->in_modifier;
+	int res_id = eqn | (slave << 8);
+	struct res_eq *eq;
+	int err;
+
+	err = get_res(dev, slave, res_id, RES_EQ, &eq);
+	if (err)
+		return err;
+
+	if (eq->com.from_state != RES_EQ_HW) {
+		err = -EINVAL;
+		goto ex_put;
+	}
+
+	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+
+ex_put:
+	put_res(dev, slave, res_id, RES_EQ);
+	return err;
+}
+
+int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
+			  struct mlx4_vhcr *vhcr,
+			  struct mlx4_cmd_mailbox *inbox,
+			  struct mlx4_cmd_mailbox *outbox,
+			  struct mlx4_cmd_info *cmd)
+{
+	int err;
+	int cqn = vhcr->in_modifier;
+	struct mlx4_cq_context *cqc = inbox->buf;
+	int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
+	struct res_cq *cq;
+	struct res_mtt *mtt;
+
+	err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
+	if (err)
+		return err;
+	err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
+	if (err)
+		goto out_move;
+	err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
+	if (err)
+		goto out_put;
+	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+	if (err)
+		goto out_put;
+	atomic_inc(&mtt->ref_count);
+	cq->mtt = mtt;
+	put_res(dev, slave, mtt->com.res_id, RES_MTT);
+	res_end_move(dev, slave, RES_CQ, cqn);
+	return 0;
+
+out_put:
+	put_res(dev, slave, mtt->com.res_id, RES_MTT);
+out_move:
+	res_abort_move(dev, slave, RES_CQ, cqn);
+	return err;
+}
+
+int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
+			  struct mlx4_vhcr *vhcr,
+			  struct mlx4_cmd_mailbox *inbox,
+			  struct mlx4_cmd_mailbox *outbox,
+			  struct mlx4_cmd_info *cmd)
+{
+	int err;
+	int cqn = vhcr->in_modifier;
+	struct res_cq *cq;
+
+	err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
+	if (err)
+		return err;
+	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+	if (err)
+		goto out_move;
+	atomic_dec(&cq->mtt->ref_count);
+	res_end_move(dev, slave, RES_CQ, cqn);
+	return 0;
+
+out_move:
+	res_abort_move(dev, slave, RES_CQ, cqn);
+	return err;
+}
+
+int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
+			  struct mlx4_vhcr *vhcr,
+			  struct mlx4_cmd_mailbox *inbox,
+			  struct mlx4_cmd_mailbox *outbox,
+			  struct mlx4_cmd_info *cmd)
+{
+	int cqn = vhcr->in_modifier;
+	struct res_cq *cq;
+	int err;
+
+	err = get_res(dev, slave, cqn, RES_CQ, &cq);
+	if (err)
+		return err;
+
+	if (cq->com.from_state != RES_CQ_HW)
+		goto ex_put;
+
+	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ex_put:
+	put_res(dev, slave, cqn, RES_CQ);
+
+	return err;
+}
+
+static int handle_resize(struct mlx4_dev *dev, int slave,
+			 struct mlx4_vhcr *vhcr,
+			 struct mlx4_cmd_mailbox *inbox,
+			 struct mlx4_cmd_mailbox *outbox,
+			 struct mlx4_cmd_info *cmd,
+			 struct res_cq *cq)
+{
+	int err;
+	struct res_mtt *orig_mtt;
+	struct res_mtt *mtt;
+	struct mlx4_cq_context *cqc = inbox->buf;
+	int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
+
+	err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
+	if (err)
+		return err;
+
+	if (orig_mtt != cq->mtt) {
+		err = -EINVAL;
+		goto ex_put;
+	}
+
+	err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
+	if (err)
+		goto ex_put;
+
+	err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
+	if (err)
+		goto ex_put1;
+	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+	if (err)
+		goto ex_put1;
+	atomic_dec(&orig_mtt->ref_count);
+	put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
+	atomic_inc(&mtt->ref_count);
+	cq->mtt = mtt;
+	put_res(dev, slave, mtt->com.res_id, RES_MTT);
+	return 0;
+
+ex_put1:
+	put_res(dev, slave, mtt->com.res_id, RES_MTT);
+ex_put:
+	put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
+
+	return err;
+
+}
+
+int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
+			   struct mlx4_vhcr *vhcr,
+			   struct mlx4_cmd_mailbox *inbox,
+			   struct mlx4_cmd_mailbox *outbox,
+			   struct mlx4_cmd_info *cmd)
+{
+	int cqn = vhcr->in_modifier;
+	struct res_cq *cq;
+	int err;
+
+	err = get_res(dev, slave, cqn, RES_CQ, &cq);
+	if (err)
+		return err;
+
+	if (cq->com.from_state != RES_CQ_HW)
+		goto ex_put;
+
+	if (vhcr->op_modifier == 0) {
+		err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
+		if (err)
+			goto ex_put;
+	}
+
+	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ex_put:
+	put_res(dev, slave, cqn, RES_CQ);
+
+	return err;
+}
+
+static int srq_get_pdn(struct mlx4_srq_context *srqc)
+{
+	return be32_to_cpu(srqc->pd) & 0xffffff;
+}
+
+static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
+{
+	int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
+	int log_rq_stride = srqc->logstride & 7;
+	int page_shift = (srqc->log_page_size & 0x3f) + 12;
+
+	if (log_srq_size + log_rq_stride + 4 < page_shift)
+		return 1;
+
+	return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
+}
+
+int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+			   struct mlx4_vhcr *vhcr,
+			   struct mlx4_cmd_mailbox *inbox,
+			   struct mlx4_cmd_mailbox *outbox,
+			   struct mlx4_cmd_info *cmd)
+{
+	int err;
+	int srqn = vhcr->in_modifier;
+	struct res_mtt *mtt;
+	struct res_srq *srq;
+	struct mlx4_srq_context *srqc = inbox->buf;
+	int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
+
+	if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
+		return -EINVAL;
+
+	err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
+	if (err)
+		return err;
+	err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
+	if (err)
+		goto ex_abort;
+	err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
+			      mtt);
+	if (err)
+		goto ex_put_mtt;
+
+	if (pdn2slave(srq_get_pdn(srqc)) != slave) {
+		err = -EPERM;
+		goto ex_put_mtt;
+	}
+
+	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+	if (err)
+		goto ex_put_mtt;
+
+	atomic_inc(&mtt->ref_count);
+	srq->mtt = mtt;
+	put_res(dev, slave, mtt->com.res_id, RES_MTT);
+	res_end_move(dev, slave, RES_SRQ, srqn);
+	return 0;
+
+ex_put_mtt:
+	put_res(dev, slave, mtt->com.res_id, RES_MTT);
+ex_abort:
+	res_abort_move(dev, slave, RES_SRQ, srqn);
+
+	return err;
+}
+
+int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+			   struct mlx4_vhcr *vhcr,
+			   struct mlx4_cmd_mailbox *inbox,
+			   struct mlx4_cmd_mailbox *outbox,
+			   struct mlx4_cmd_info *cmd)
+{
+	int err;
+	int srqn = vhcr->in_modifier;
+	struct res_srq *srq;
+
+	err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
+	if (err)
+		return err;
+	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+	if (err)
+		goto ex_abort;
+	atomic_dec(&srq->mtt->ref_count);
+	if (srq->cq)
+		atomic_dec(&srq->cq->ref_count);
+	res_end_move(dev, slave, RES_SRQ, srqn);
+
+	return 0;
+
+ex_abort:
+	res_abort_move(dev, slave, RES_SRQ, srqn);
+
+	return err;
+}
+
+int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+			   struct mlx4_vhcr *vhcr,
+			   struct mlx4_cmd_mailbox *inbox,
+			   struct mlx4_cmd_mailbox *outbox,
+			   struct mlx4_cmd_info *cmd)
+{
+	int err;
+	int srqn = vhcr->in_modifier;
+	struct res_srq *srq;
+
+	err = get_res(dev, slave, srqn, RES_SRQ, &srq);
+	if (err)
+		return err;
+	if (srq->com.from_state != RES_SRQ_HW) {
+		err = -EBUSY;
+		goto out;
+	}
+	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+out:
+	put_res(dev, slave, srqn, RES_SRQ);
+	return err;
+}
+
+int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+			 struct mlx4_vhcr *vhcr,
+			 struct mlx4_cmd_mailbox *inbox,
+			 struct mlx4_cmd_mailbox *outbox,
+			 struct mlx4_cmd_info *cmd)
+{
+	int err;
+	int srqn = vhcr->in_modifier;
+	struct res_srq *srq;
+
+	err = get_res(dev, slave, srqn, RES_SRQ, &srq);
+	if (err)
+		return err;
+
+	if (srq->com.from_state != RES_SRQ_HW) {
+		err = -EBUSY;
+		goto out;
+	}
+
+	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+out:
+	put_res(dev, slave, srqn, RES_SRQ);
+	return err;
+}
+
+int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
+			struct mlx4_vhcr *vhcr,
+			struct mlx4_cmd_mailbox *inbox,
+			struct mlx4_cmd_mailbox *outbox,
+			struct mlx4_cmd_info *cmd)
+{
+	int err;
+	int qpn = vhcr->in_modifier & 0x7fffff;
+	struct res_qp *qp;
+
+	err = get_res(dev, slave, qpn, RES_QP, &qp);
+	if (err)
+		return err;
+	if (qp->com.from_state != RES_QP_HW) {
+		err = -EBUSY;
+		goto out;
+	}
+
+	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+out:
+	put_res(dev, slave, qpn, RES_QP);
+	return err;
+}
+
+int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
+			     struct mlx4_vhcr *vhcr,
+			     struct mlx4_cmd_mailbox *inbox,
+			     struct mlx4_cmd_mailbox *outbox,
+			     struct mlx4_cmd_info *cmd)
+{
+	struct mlx4_qp_context *qpc = inbox->buf + 8;
+
+	update_ud_gid(dev, qpc, (u8)slave);
+
+	return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+}
+
+int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
+			 struct mlx4_vhcr *vhcr,
+			 struct mlx4_cmd_mailbox *inbox,
+			 struct mlx4_cmd_mailbox *outbox,
+			 struct mlx4_cmd_info *cmd)
+{
+	int err;
+	int qpn = vhcr->in_modifier & 0x7fffff;
+	struct res_qp *qp;
+
+	err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
+	if (err)
+		return err;
+	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+	if (err)
+		goto ex_abort;
+
+	atomic_dec(&qp->mtt->ref_count);
+	atomic_dec(&qp->rcq->ref_count);
+	atomic_dec(&qp->scq->ref_count);
+	if (qp->srq)
+		atomic_dec(&qp->srq->ref_count);
+	res_end_move(dev, slave, RES_QP, qpn);
+	return 0;
+
+ex_abort:
+	res_abort_move(dev, slave, RES_QP, qpn);
+
+	return err;
+}
+
+static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
+				struct res_qp *rqp, u8 *gid)
+{
+	struct res_gid *res;
+
+	list_for_each_entry(res, &rqp->mcg_list, list) {
+		if (!memcmp(res->gid, gid, 16))
+			return res;
+	}
+	return NULL;
+}
+
+static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
+		       u8 *gid, enum mlx4_protocol prot)
+{
+	struct res_gid *res;
+	int err;
+
+	res = kzalloc(sizeof *res, GFP_KERNEL);
+	if (!res)
+		return -ENOMEM;
+
+	spin_lock_irq(&rqp->mcg_spl);
+	if (find_gid(dev, slave, rqp, gid)) {
+		kfree(res);
+		err = -EEXIST;
+	} else {
+		memcpy(res->gid, gid, 16);
+		res->prot = prot;
+		list_add_tail(&res->list, &rqp->mcg_list);
+		err = 0;
+	}
+	spin_unlock_irq(&rqp->mcg_spl);
+
+	return err;
+}
+
+static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
+		       u8 *gid, enum mlx4_protocol prot)
+{
+	struct res_gid *res;
+	int err;
+
+	spin_lock_irq(&rqp->mcg_spl);
+	res = find_gid(dev, slave, rqp, gid);
+	if (!res || res->prot != prot)
+		err = -EINVAL;
+	else {
+		list_del(&res->list);
+		kfree(res);
+		err = 0;
+	}
+	spin_unlock_irq(&rqp->mcg_spl);
+
+	return err;
+}
+
+int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
+			       struct mlx4_vhcr *vhcr,
+			       struct mlx4_cmd_mailbox *inbox,
+			       struct mlx4_cmd_mailbox *outbox,
+			       struct mlx4_cmd_info *cmd)
+{
+	struct mlx4_qp qp; /* dummy for calling attach/detach */
+	u8 *gid = inbox->buf;
+	enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
+	int err, err1;
+	int qpn;
+	struct res_qp *rqp;
+	int attach = vhcr->op_modifier;
+	int block_loopback = vhcr->in_modifier >> 31;
+	u8 steer_type_mask = 2;
+	enum mlx4_steer_type type = gid[7] & steer_type_mask;
+
+	qpn = vhcr->in_modifier & 0xffffff;
+	err = get_res(dev, slave, qpn, RES_QP, &rqp);
+	if (err)
+		return err;
+
+	qp.qpn = qpn;
+	if (attach) {
+		err = add_mcg_res(dev, slave, rqp, gid, prot);
+		if (err)
+			goto ex_put;
+
+		err = mlx4_qp_attach_common(dev, &qp, gid,
+					    block_loopback, prot, type);
+		if (err)
+			goto ex_rem;
+	} else {
+		err = rem_mcg_res(dev, slave, rqp, gid, prot);
+		if (err)
+			goto ex_put;
+		err = mlx4_qp_detach_common(dev, &qp, gid, prot, type);
+	}
+
+	put_res(dev, slave, qpn, RES_QP);
+	return 0;
+
+ex_rem:
+	/* ignore error return below, already in error */
+	err1 = rem_mcg_res(dev, slave, rqp, gid, prot);
+ex_put:
+	put_res(dev, slave, qpn, RES_QP);
+
+	return err;
+}
+
+enum {
+	BUSY_MAX_RETRIES = 10
+};
+
+int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
+			       struct mlx4_vhcr *vhcr,
+			       struct mlx4_cmd_mailbox *inbox,
+			       struct mlx4_cmd_mailbox *outbox,
+			       struct mlx4_cmd_info *cmd)
+{
+	int err;
+	int index = vhcr->in_modifier & 0xffff;
+
+	err = get_res(dev, slave, index, RES_COUNTER, NULL);
+	if (err)
+		return err;
+
+	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+	put_res(dev, slave, index, RES_COUNTER);
+	return err;
+}
+
+static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
+{
+	struct res_gid *rgid;
+	struct res_gid *tmp;
+	int err;
+	struct mlx4_qp qp; /* dummy for calling attach/detach */
+
+	list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
+		qp.qpn = rqp->local_qpn;
+		err = mlx4_qp_detach_common(dev, &qp, rgid->gid, rgid->prot,
+					    MLX4_MC_STEER);
+		list_del(&rgid->list);
+		kfree(rgid);
+	}
+}
+
+static int _move_all_busy(struct mlx4_dev *dev, int slave,
+			  enum mlx4_resource type, int print)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct mlx4_resource_tracker *tracker =
+		&priv->mfunc.master.res_tracker;
+	struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
+	struct res_common *r;
+	struct res_common *tmp;
+	int busy;
+
+	busy = 0;
+	spin_lock_irq(mlx4_tlock(dev));
+	list_for_each_entry_safe(r, tmp, rlist, list) {
+		if (r->owner == slave) {
+			if (!r->removing) {
+				if (r->state == RES_ANY_BUSY) {
+					if (print)
+						mlx4_dbg(dev,
+							 "%s id 0x%x is busy\n",
+							  ResourceType(type),
+							  r->res_id);
+					++busy;
+				} else {
+					r->from_state = r->state;
+					r->state = RES_ANY_BUSY;
+					r->removing = 1;
+				}
+			}
+		}
+	}
+	spin_unlock_irq(mlx4_tlock(dev));
+
+	return busy;
+}
+
+static int move_all_busy(struct mlx4_dev *dev, int slave,
+			 enum mlx4_resource type)
+{
+	unsigned long begin;
+	int busy;
+
+	begin = jiffies;
+	do {
+		busy = _move_all_busy(dev, slave, type, 0);
+		if (time_after(jiffies, begin + 5 * HZ))
+			break;
+		if (busy)
+			cond_resched();
+	} while (busy);
+
+	if (busy)
+		busy = _move_all_busy(dev, slave, type, 1);
+
+	return busy;
+}
+static void rem_slave_qps(struct mlx4_dev *dev, int slave)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+	struct list_head *qp_list =
+		&tracker->slave_list[slave].res_list[RES_QP];
+	struct res_qp *qp;
+	struct res_qp *tmp;
+	int state;
+	u64 in_param;
+	int qpn;
+	int err;
+
+	err = move_all_busy(dev, slave, RES_QP);
+	if (err)
+		mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
+			  "for slave %d\n", slave);
+
+	spin_lock_irq(mlx4_tlock(dev));
+	list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
+		spin_unlock_irq(mlx4_tlock(dev));
+		if (qp->com.owner == slave) {
+			qpn = qp->com.res_id;
+			detach_qp(dev, slave, qp);
+			state = qp->com.from_state;
+			while (state != 0) {
+				switch (state) {
+				case RES_QP_RESERVED:
+					spin_lock_irq(mlx4_tlock(dev));
+					radix_tree_delete(&tracker->res_tree[RES_QP],
+							  qp->com.res_id);
+					list_del(&qp->com.list);
+					spin_unlock_irq(mlx4_tlock(dev));
+					kfree(qp);
+					state = 0;
+					break;
+				case RES_QP_MAPPED:
+					if (!valid_reserved(dev, slave, qpn))
+						__mlx4_qp_free_icm(dev, qpn);
+					state = RES_QP_RESERVED;
+					break;
+				case RES_QP_HW:
+					in_param = slave;
+					err = mlx4_cmd(dev, in_param,
+						       qp->local_qpn, 2,
+						       MLX4_CMD_2RST_QP,
+						       MLX4_CMD_TIME_CLASS_A,
+						       MLX4_CMD_NATIVE);
+					if (err)
+						mlx4_dbg(dev, "rem_slave_qps: failed"
+							 " to move slave %d qpn %d to"
+							 " reset\n", slave,
+							 qp->local_qpn);
+					atomic_dec(&qp->rcq->ref_count);
+					atomic_dec(&qp->scq->ref_count);
+					atomic_dec(&qp->mtt->ref_count);
+					if (qp->srq)
+						atomic_dec(&qp->srq->ref_count);
+					state = RES_QP_MAPPED;
+					break;
+				default:
+					state = 0;
+				}
+			}
+		}
+		spin_lock_irq(mlx4_tlock(dev));
+	}
+	spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+	struct list_head *srq_list =
+		&tracker->slave_list[slave].res_list[RES_SRQ];
+	struct res_srq *srq;
+	struct res_srq *tmp;
+	int state;
+	u64 in_param;
+	LIST_HEAD(tlist);
+	int srqn;
+	int err;
+
+	err = move_all_busy(dev, slave, RES_SRQ);
+	if (err)
+		mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
+			  "busy for slave %d\n", slave);
+
+	spin_lock_irq(mlx4_tlock(dev));
+	list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
+		spin_unlock_irq(mlx4_tlock(dev));
+		if (srq->com.owner == slave) {
+			srqn = srq->com.res_id;
+			state = srq->com.from_state;
+			while (state != 0) {
+				switch (state) {
+				case RES_SRQ_ALLOCATED:
+					__mlx4_srq_free_icm(dev, srqn);
+					spin_lock_irq(mlx4_tlock(dev));
+					radix_tree_delete(&tracker->res_tree[RES_SRQ],
+							  srqn);
+					list_del(&srq->com.list);
+					spin_unlock_irq(mlx4_tlock(dev));
+					kfree(srq);
+					state = 0;
+					break;
+
+				case RES_SRQ_HW:
+					in_param = slave;
+					err = mlx4_cmd(dev, in_param, srqn, 1,
+						       MLX4_CMD_HW2SW_SRQ,
+						       MLX4_CMD_TIME_CLASS_A,
+						       MLX4_CMD_NATIVE);
+					if (err)
+						mlx4_dbg(dev, "rem_slave_srqs: failed"
+							 " to move slave %d srq %d to"
+							 " SW ownership\n",
+							 slave, srqn);
+
+					atomic_dec(&srq->mtt->ref_count);
+					if (srq->cq)
+						atomic_dec(&srq->cq->ref_count);
+					state = RES_SRQ_ALLOCATED;
+					break;
+
+				default:
+					state = 0;
+				}
+			}
+		}
+		spin_lock_irq(mlx4_tlock(dev));
+	}
+	spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+	struct list_head *cq_list =
+		&tracker->slave_list[slave].res_list[RES_CQ];
+	struct res_cq *cq;
+	struct res_cq *tmp;
+	int state;
+	u64 in_param;
+	LIST_HEAD(tlist);
+	int cqn;
+	int err;
+
+	err = move_all_busy(dev, slave, RES_CQ);
+	if (err)
+		mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
+			  "busy for slave %d\n", slave);
+
+	spin_lock_irq(mlx4_tlock(dev));
+	list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
+		spin_unlock_irq(mlx4_tlock(dev));
+		if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
+			cqn = cq->com.res_id;
+			state = cq->com.from_state;
+			while (state != 0) {
+				switch (state) {
+				case RES_CQ_ALLOCATED:
+					__mlx4_cq_free_icm(dev, cqn);
+					spin_lock_irq(mlx4_tlock(dev));
+					radix_tree_delete(&tracker->res_tree[RES_CQ],
+							  cqn);
+					list_del(&cq->com.list);
+					spin_unlock_irq(mlx4_tlock(dev));
+					kfree(cq);
+					state = 0;
+					break;
+
+				case RES_CQ_HW:
+					in_param = slave;
+					err = mlx4_cmd(dev, in_param, cqn, 1,
+						       MLX4_CMD_HW2SW_CQ,
+						       MLX4_CMD_TIME_CLASS_A,
+						       MLX4_CMD_NATIVE);
+					if (err)
+						mlx4_dbg(dev, "rem_slave_cqs: failed"
+							 " to move slave %d cq %d to"
+							 " SW ownership\n",
+							 slave, cqn);
+					atomic_dec(&cq->mtt->ref_count);
+					state = RES_CQ_ALLOCATED;
+					break;
+
+				default:
+					state = 0;
+				}
+			}
+		}
+		spin_lock_irq(mlx4_tlock(dev));
+	}
+	spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+	struct list_head *mpt_list =
+		&tracker->slave_list[slave].res_list[RES_MPT];
+	struct res_mpt *mpt;
+	struct res_mpt *tmp;
+	int state;
+	u64 in_param;
+	LIST_HEAD(tlist);
+	int mptn;
+	int err;
+
+	err = move_all_busy(dev, slave, RES_MPT);
+	if (err)
+		mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
+			  "busy for slave %d\n", slave);
+
+	spin_lock_irq(mlx4_tlock(dev));
+	list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
+		spin_unlock_irq(mlx4_tlock(dev));
+		if (mpt->com.owner == slave) {
+			mptn = mpt->com.res_id;
+			state = mpt->com.from_state;
+			while (state != 0) {
+				switch (state) {
+				case RES_MPT_RESERVED:
+					__mlx4_mr_release(dev, mpt->key);
+					spin_lock_irq(mlx4_tlock(dev));
+					radix_tree_delete(&tracker->res_tree[RES_MPT],
+							  mptn);
+					list_del(&mpt->com.list);
+					spin_unlock_irq(mlx4_tlock(dev));
+					kfree(mpt);
+					state = 0;
+					break;
+
+				case RES_MPT_MAPPED:
+					__mlx4_mr_free_icm(dev, mpt->key);
+					state = RES_MPT_RESERVED;
+					break;
+
+				case RES_MPT_HW:
+					in_param = slave;
+					err = mlx4_cmd(dev, in_param, mptn, 0,
+						     MLX4_CMD_HW2SW_MPT,
+						     MLX4_CMD_TIME_CLASS_A,
+						     MLX4_CMD_NATIVE);
+					if (err)
+						mlx4_dbg(dev, "rem_slave_mrs: failed"
+							 " to move slave %d mpt %d to"
+							 " SW ownership\n",
+							 slave, mptn);
+					if (mpt->mtt)
+						atomic_dec(&mpt->mtt->ref_count);
+					state = RES_MPT_MAPPED;
+					break;
+				default:
+					state = 0;
+				}
+			}
+		}
+		spin_lock_irq(mlx4_tlock(dev));
+	}
+	spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct mlx4_resource_tracker *tracker =
+		&priv->mfunc.master.res_tracker;
+	struct list_head *mtt_list =
+		&tracker->slave_list[slave].res_list[RES_MTT];
+	struct res_mtt *mtt;
+	struct res_mtt *tmp;
+	int state;
+	LIST_HEAD(tlist);
+	int base;
+	int err;
+
+	err = move_all_busy(dev, slave, RES_MTT);
+	if (err)
+		mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
+			  "busy for slave %d\n", slave);
+
+	spin_lock_irq(mlx4_tlock(dev));
+	list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
+		spin_unlock_irq(mlx4_tlock(dev));
+		if (mtt->com.owner == slave) {
+			base = mtt->com.res_id;
+			state = mtt->com.from_state;
+			while (state != 0) {
+				switch (state) {
+				case RES_MTT_ALLOCATED:
+					__mlx4_free_mtt_range(dev, base,
+							      mtt->order);
+					spin_lock_irq(mlx4_tlock(dev));
+					radix_tree_delete(&tracker->res_tree[RES_MTT],
+							  base);
+					list_del(&mtt->com.list);
+					spin_unlock_irq(mlx4_tlock(dev));
+					kfree(mtt);
+					state = 0;
+					break;
+
+				default:
+					state = 0;
+				}
+			}
+		}
+		spin_lock_irq(mlx4_tlock(dev));
+	}
+	spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+	struct list_head *eq_list =
+		&tracker->slave_list[slave].res_list[RES_EQ];
+	struct res_eq *eq;
+	struct res_eq *tmp;
+	int err;
+	int state;
+	LIST_HEAD(tlist);
+	int eqn;
+	struct mlx4_cmd_mailbox *mailbox;
+
+	err = move_all_busy(dev, slave, RES_EQ);
+	if (err)
+		mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
+			  "busy for slave %d\n", slave);
+
+	spin_lock_irq(mlx4_tlock(dev));
+	list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
+		spin_unlock_irq(mlx4_tlock(dev));
+		if (eq->com.owner == slave) {
+			eqn = eq->com.res_id;
+			state = eq->com.from_state;
+			while (state != 0) {
+				switch (state) {
+				case RES_EQ_RESERVED:
+					spin_lock_irq(mlx4_tlock(dev));
+					radix_tree_delete(&tracker->res_tree[RES_EQ],
+							  eqn);
+					list_del(&eq->com.list);
+					spin_unlock_irq(mlx4_tlock(dev));
+					kfree(eq);
+					state = 0;
+					break;
+
+				case RES_EQ_HW:
+					mailbox = mlx4_alloc_cmd_mailbox(dev);
+					if (IS_ERR(mailbox)) {
+						cond_resched();
+						continue;
+					}
+					err = mlx4_cmd_box(dev, slave, 0,
+							   eqn & 0xff, 0,
+							   MLX4_CMD_HW2SW_EQ,
+							   MLX4_CMD_TIME_CLASS_A,
+							   MLX4_CMD_NATIVE);
+					mlx4_dbg(dev, "rem_slave_eqs: failed"
+						 " to move slave %d eqs %d to"
+						 " SW ownership\n", slave, eqn);
+					mlx4_free_cmd_mailbox(dev, mailbox);
+					if (!err) {
+						atomic_dec(&eq->mtt->ref_count);
+						state = RES_EQ_RESERVED;
+					}
+					break;
+
+				default:
+					state = 0;
+				}
+			}
+		}
+		spin_lock_irq(mlx4_tlock(dev));
+	}
+	spin_unlock_irq(mlx4_tlock(dev));
+}
+
+void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+
+	mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
+	/*VLAN*/
+	rem_slave_macs(dev, slave);
+	rem_slave_qps(dev, slave);
+	rem_slave_srqs(dev, slave);
+	rem_slave_cqs(dev, slave);
+	rem_slave_mrs(dev, slave);
+	rem_slave_eqs(dev, slave);
+	rem_slave_mtts(dev, slave);
+	mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx4/sense.c b/drivers/net/ethernet/mellanox/mlx4/sense.c
index e2337a7..8024982 100644
--- a/drivers/net/ethernet/mellanox/mlx4/sense.c
+++ b/drivers/net/ethernet/mellanox/mlx4/sense.c
@@ -45,7 +45,8 @@
 	int err = 0;
 
 	err = mlx4_cmd_imm(dev, 0, &out_param, port, 0,
-			   MLX4_CMD_SENSE_PORT, MLX4_CMD_TIME_CLASS_B);
+			   MLX4_CMD_SENSE_PORT, MLX4_CMD_TIME_CLASS_B,
+			   MLX4_CMD_WRAPPED);
 	if (err) {
 		mlx4_err(dev, "Sense command failed for port: %d\n", port);
 		return err;
diff --git a/drivers/net/ethernet/mellanox/mlx4/srq.c b/drivers/net/ethernet/mellanox/mlx4/srq.c
index 9cbf3fc..2823fff 100644
--- a/drivers/net/ethernet/mellanox/mlx4/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/srq.c
@@ -31,6 +31,8 @@
  * SOFTWARE.
  */
 
+#include <linux/init.h>
+
 #include <linux/mlx4/cmd.h>
 #include <linux/export.h>
 #include <linux/gfp.h>
@@ -38,26 +40,6 @@
 #include "mlx4.h"
 #include "icm.h"
 
-struct mlx4_srq_context {
-	__be32			state_logsize_srqn;
-	u8			logstride;
-	u8			reserved1;
-	__be16			xrcd;
-	__be32			pg_offset_cqn;
-	u32			reserved2;
-	u8			log_page_size;
-	u8			reserved3[2];
-	u8			mtt_base_addr_h;
-	__be32			mtt_base_addr_l;
-	__be32			pd;
-	__be16			limit_watermark;
-	__be16			wqe_cnt;
-	u16			reserved4;
-	__be16			wqe_counter;
-	u32			reserved5;
-	__be64			db_rec_addr;
-};
-
 void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type)
 {
 	struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
@@ -85,8 +67,9 @@
 static int mlx4_SW2HW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
 			  int srq_num)
 {
-	return mlx4_cmd(dev, mailbox->dma, srq_num, 0, MLX4_CMD_SW2HW_SRQ,
-			MLX4_CMD_TIME_CLASS_A);
+	return mlx4_cmd(dev, mailbox->dma | dev->caps.function, srq_num, 0,
+			MLX4_CMD_SW2HW_SRQ, MLX4_CMD_TIME_CLASS_A,
+			MLX4_CMD_WRAPPED);
 }
 
 static int mlx4_HW2SW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
@@ -94,20 +77,89 @@
 {
 	return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, srq_num,
 			    mailbox ? 0 : 1, MLX4_CMD_HW2SW_SRQ,
-			    MLX4_CMD_TIME_CLASS_A);
+			    MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
 }
 
 static int mlx4_ARM_SRQ(struct mlx4_dev *dev, int srq_num, int limit_watermark)
 {
 	return mlx4_cmd(dev, limit_watermark, srq_num, 0, MLX4_CMD_ARM_SRQ,
-			MLX4_CMD_TIME_CLASS_B);
+			MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
 }
 
 static int mlx4_QUERY_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
 			  int srq_num)
 {
 	return mlx4_cmd_box(dev, 0, mailbox->dma, srq_num, 0, MLX4_CMD_QUERY_SRQ,
-			    MLX4_CMD_TIME_CLASS_A);
+			    MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+}
+
+int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn)
+{
+	struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
+	int err;
+
+
+	*srqn = mlx4_bitmap_alloc(&srq_table->bitmap);
+	if (*srqn == -1)
+		return -ENOMEM;
+
+	err = mlx4_table_get(dev, &srq_table->table, *srqn);
+	if (err)
+		goto err_out;
+
+	err = mlx4_table_get(dev, &srq_table->cmpt_table, *srqn);
+	if (err)
+		goto err_put;
+	return 0;
+
+err_put:
+	mlx4_table_put(dev, &srq_table->table, *srqn);
+
+err_out:
+	mlx4_bitmap_free(&srq_table->bitmap, *srqn);
+	return err;
+}
+
+static int mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn)
+{
+	u64 out_param;
+	int err;
+
+	if (mlx4_is_mfunc(dev)) {
+		err = mlx4_cmd_imm(dev, 0, &out_param, RES_SRQ,
+				   RES_OP_RESERVE_AND_MAP,
+				   MLX4_CMD_ALLOC_RES,
+				   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+		if (!err)
+			*srqn = get_param_l(&out_param);
+
+		return err;
+	}
+	return __mlx4_srq_alloc_icm(dev, srqn);
+}
+
+void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn)
+{
+	struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
+
+	mlx4_table_put(dev, &srq_table->cmpt_table, srqn);
+	mlx4_table_put(dev, &srq_table->table, srqn);
+	mlx4_bitmap_free(&srq_table->bitmap, srqn);
+}
+
+static void mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn)
+{
+	u64 in_param;
+
+	if (mlx4_is_mfunc(dev)) {
+		set_param_l(&in_param, srqn);
+		if (mlx4_cmd(dev, in_param, RES_SRQ, RES_OP_RESERVE_AND_MAP,
+			     MLX4_CMD_FREE_RES,
+			     MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED))
+			mlx4_warn(dev, "Failed freeing cq:%d\n", srqn);
+		return;
+	}
+	__mlx4_srq_free_icm(dev, srqn);
 }
 
 int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcd,
@@ -119,23 +171,15 @@
 	u64 mtt_addr;
 	int err;
 
-	srq->srqn = mlx4_bitmap_alloc(&srq_table->bitmap);
-	if (srq->srqn == -1)
-		return -ENOMEM;
-
-	err = mlx4_table_get(dev, &srq_table->table, srq->srqn);
+	err = mlx4_srq_alloc_icm(dev, &srq->srqn);
 	if (err)
-		goto err_out;
-
-	err = mlx4_table_get(dev, &srq_table->cmpt_table, srq->srqn);
-	if (err)
-		goto err_put;
+		return err;
 
 	spin_lock_irq(&srq_table->lock);
 	err = radix_tree_insert(&srq_table->tree, srq->srqn, srq);
 	spin_unlock_irq(&srq_table->lock);
 	if (err)
-		goto err_cmpt_put;
+		goto err_icm;
 
 	mailbox = mlx4_alloc_cmd_mailbox(dev);
 	if (IS_ERR(mailbox)) {
@@ -174,15 +218,8 @@
 	radix_tree_delete(&srq_table->tree, srq->srqn);
 	spin_unlock_irq(&srq_table->lock);
 
-err_cmpt_put:
-	mlx4_table_put(dev, &srq_table->cmpt_table, srq->srqn);
-
-err_put:
-	mlx4_table_put(dev, &srq_table->table, srq->srqn);
-
-err_out:
-	mlx4_bitmap_free(&srq_table->bitmap, srq->srqn);
-
+err_icm:
+	mlx4_srq_free_icm(dev, srq->srqn);
 	return err;
 }
 EXPORT_SYMBOL_GPL(mlx4_srq_alloc);
@@ -204,8 +241,7 @@
 		complete(&srq->free);
 	wait_for_completion(&srq->free);
 
-	mlx4_table_put(dev, &srq_table->table, srq->srqn);
-	mlx4_bitmap_free(&srq_table->bitmap, srq->srqn);
+	mlx4_srq_free_icm(dev, srq->srqn);
 }
 EXPORT_SYMBOL_GPL(mlx4_srq_free);
 
@@ -245,6 +281,8 @@
 
 	spin_lock_init(&srq_table->lock);
 	INIT_RADIX_TREE(&srq_table->tree, GFP_ATOMIC);
+	if (mlx4_is_slave(dev))
+		return 0;
 
 	err = mlx4_bitmap_init(&srq_table->bitmap, dev->caps.num_srqs,
 			       dev->caps.num_srqs - 1, dev->caps.reserved_srqs, 0);
@@ -256,5 +294,7 @@
 
 void mlx4_cleanup_srq_table(struct mlx4_dev *dev)
 {
+	if (mlx4_is_slave(dev))
+		return;
 	mlx4_bitmap_cleanup(&mlx4_priv(dev)->srq_table.bitmap);
 }
diff --git a/drivers/net/ethernet/micrel/Kconfig b/drivers/net/ethernet/micrel/Kconfig
index d10c2e1..1ea811c 100644
--- a/drivers/net/ethernet/micrel/Kconfig
+++ b/drivers/net/ethernet/micrel/Kconfig
@@ -42,6 +42,8 @@
 	select NET_CORE
 	select MII
 	select CRC32
+	select MISC_DEVICES
+	select EEPROM_93CX6
 	---help---
 	  SPI driver for Micrel KS8851 SPI attached network chip.
 
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index 4a6ae05..75ec87a 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -1264,18 +1264,7 @@
 	.remove		= ks8842_remove,
 };
 
-static int __init ks8842_init(void)
-{
-	return platform_driver_register(&ks8842_platform_driver);
-}
-
-static void __exit ks8842_exit(void)
-{
-	platform_driver_unregister(&ks8842_platform_driver);
-}
-
-module_init(ks8842_init);
-module_exit(ks8842_exit);
+module_platform_driver(ks8842_platform_driver);
 
 MODULE_DESCRIPTION("Timberdale KS8842 ethernet driver");
 MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index f56743a..6b35e7d 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -22,6 +22,7 @@
 #include <linux/cache.h>
 #include <linux/crc32.h>
 #include <linux/mii.h>
+#include <linux/eeprom_93cx6.h>
 
 #include <linux/spi/spi.h>
 
@@ -82,6 +83,7 @@
  * @rc_ccr: Cached copy of KS_CCR.
  * @rc_rxqcr: Cached copy of KS_RXQCR.
  * @eeprom_size: Companion eeprom size in Bytes, 0 if no eeprom
+ * @eeprom: 93CX6 EEPROM state for accessing on-board EEPROM.
  *
  * The @lock ensures that the chip is protected when certain operations are
  * in progress. When the read or write packet transfer is in progress, most
@@ -128,6 +130,8 @@
 	struct spi_message	spi_msg2;
 	struct spi_transfer	spi_xfer1;
 	struct spi_transfer	spi_xfer2[2];
+
+	struct eeprom_93cx6	eeprom;
 };
 
 static int msg_enable;
@@ -343,6 +347,26 @@
 }
 
 /**
+ * ks8851_set_powermode - set power mode of the device
+ * @ks: The device state
+ * @pwrmode: The power mode value to write to KS_PMECR.
+ *
+ * Change the power mode of the chip.
+ */
+static void ks8851_set_powermode(struct ks8851_net *ks, unsigned pwrmode)
+{
+	unsigned pmecr;
+
+	netif_dbg(ks, hw, ks->netdev, "setting power mode %d\n", pwrmode);
+
+	pmecr = ks8851_rdreg16(ks, KS_PMECR);
+	pmecr &= ~PMECR_PM_MASK;
+	pmecr |= pwrmode;
+
+	ks8851_wrreg16(ks, KS_PMECR, pmecr);
+}
+
+/**
  * ks8851_write_mac_addr - write mac address to device registers
  * @dev: The network device
  *
@@ -358,8 +382,15 @@
 
 	mutex_lock(&ks->lock);
 
+	/*
+	 * Wake up chip in case it was powered off when stopped; otherwise,
+	 * the first write to the MAC address does not take effect.
+	 */
+	ks8851_set_powermode(ks, PMECR_PM_NORMAL);
 	for (i = 0; i < ETH_ALEN; i++)
 		ks8851_wrreg8(ks, KS_MAR(i), dev->dev_addr[i]);
+	if (!netif_running(dev))
+		ks8851_set_powermode(ks, PMECR_PM_SOFTDOWN);
 
 	mutex_unlock(&ks->lock);
 
@@ -367,21 +398,47 @@
 }
 
 /**
+ * ks8851_read_mac_addr - read mac address from device registers
+ * @dev: The network device
+ *
+ * Update our copy of the KS8851 MAC address from the registers of @dev.
+*/
+static void ks8851_read_mac_addr(struct net_device *dev)
+{
+	struct ks8851_net *ks = netdev_priv(dev);
+	int i;
+
+	mutex_lock(&ks->lock);
+
+	for (i = 0; i < ETH_ALEN; i++)
+		dev->dev_addr[i] = ks8851_rdreg8(ks, KS_MAR(i));
+
+	mutex_unlock(&ks->lock);
+}
+
+/**
  * ks8851_init_mac - initialise the mac address
  * @ks: The device structure
  *
  * Get or create the initial mac address for the device and then set that
- * into the station address register. Currently we assume that the device
- * does not have a valid mac address in it, and so we use random_ether_addr()
+ * into the station address register. If there is an EEPROM present, then
+ * we try that. If no valid mac address is found we use random_ether_addr()
  * to create a new one.
- *
- * In future, the driver should check to see if the device has an EEPROM
- * attached and whether that has a valid ethernet address in it.
  */
 static void ks8851_init_mac(struct ks8851_net *ks)
 {
 	struct net_device *dev = ks->netdev;
 
+	/* first, try reading what we've got already */
+	if (ks->rc_ccr & CCR_EEPROM) {
+		ks8851_read_mac_addr(dev);
+		if (is_valid_ether_addr(dev->dev_addr))
+			return;
+
+		netdev_err(ks->netdev, "invalid mac address read %pM\n",
+				dev->dev_addr);
+	}
+
 	random_ether_addr(dev->dev_addr);
 	ks8851_write_mac_addr(dev);
 }
@@ -739,26 +796,6 @@
 }
 
 /**
- * ks8851_set_powermode - set power mode of the device
- * @ks: The device state
- * @pwrmode: The power mode value to write to KS_PMECR.
- *
- * Change the power mode of the chip.
- */
-static void ks8851_set_powermode(struct ks8851_net *ks, unsigned pwrmode)
-{
-	unsigned pmecr;
-
-	netif_dbg(ks, hw, ks->netdev, "setting power mode %d\n", pwrmode);
-
-	pmecr = ks8851_rdreg16(ks, KS_PMECR);
-	pmecr &= ~PMECR_PM_MASK;
-	pmecr |= pwrmode;
-
-	ks8851_wrreg16(ks, KS_PMECR, pmecr);
-}
-
-/**
  * ks8851_net_open - open network device
  * @dev: The network device being opened.
  *
@@ -1038,234 +1075,6 @@
 	.ndo_validate_addr	= eth_validate_addr,
 };
 
-/* Companion eeprom access */
-
-enum {	/* EEPROM programming states */
-	EEPROM_CONTROL,
-	EEPROM_ADDRESS,
-	EEPROM_DATA,
-	EEPROM_COMPLETE
-};
-
-/**
- * ks8851_eeprom_read - read a 16bits word in ks8851 companion EEPROM
- * @dev: The network device the PHY is on.
- * @addr: EEPROM address to read
- *
- * eeprom_size: used to define the data coding length. Can be changed
- * through debug-fs.
- *
- * Programs a read on the EEPROM using ks8851 EEPROM SW access feature.
- * Warning: The READ feature is not supported on ks8851 revision 0.
- *
- * Rough programming model:
- *  - on period start: set clock high and read value on bus
- *  - on period / 2: set clock low and program value on bus
- *  - start on period / 2
- */
-unsigned int ks8851_eeprom_read(struct net_device *dev, unsigned int addr)
-{
-	struct ks8851_net *ks = netdev_priv(dev);
-	int eepcr;
-	int ctrl = EEPROM_OP_READ;
-	int state = EEPROM_CONTROL;
-	int bit_count = EEPROM_OP_LEN - 1;
-	unsigned int data = 0;
-	int dummy;
-	unsigned int addr_len;
-
-	addr_len = (ks->eeprom_size == 128) ? 6 : 8;
-
-	/* start transaction: chip select high, authorize write */
-	mutex_lock(&ks->lock);
-	eepcr = EEPCR_EESA | EEPCR_EESRWA;
-	ks8851_wrreg16(ks, KS_EEPCR, eepcr);
-	eepcr |= EEPCR_EECS;
-	ks8851_wrreg16(ks, KS_EEPCR, eepcr);
-	mutex_unlock(&ks->lock);
-
-	while (state != EEPROM_COMPLETE) {
-		/* falling clock period starts... */
-		/* set EED_IO pin for control and address */
-		eepcr &= ~EEPCR_EEDO;
-		switch (state) {
-		case EEPROM_CONTROL:
-			eepcr |= ((ctrl >> bit_count) & 1) << 2;
-			if (bit_count-- <= 0) {
-				bit_count = addr_len - 1;
-				state = EEPROM_ADDRESS;
-			}
-			break;
-		case EEPROM_ADDRESS:
-			eepcr |= ((addr >> bit_count) & 1) << 2;
-			bit_count--;
-			break;
-		case EEPROM_DATA:
-			/* Change to receive mode */
-			eepcr &= ~EEPCR_EESRWA;
-			break;
-		}
-
-		/* lower clock  */
-		eepcr &= ~EEPCR_EESCK;
-
-		mutex_lock(&ks->lock);
-		ks8851_wrreg16(ks, KS_EEPCR, eepcr);
-		mutex_unlock(&ks->lock);
-
-		/* waitread period / 2 */
-		udelay(EEPROM_SK_PERIOD / 2);
-
-		/* rising clock period starts... */
-
-		/* raise clock */
-		mutex_lock(&ks->lock);
-		eepcr |= EEPCR_EESCK;
-		ks8851_wrreg16(ks, KS_EEPCR, eepcr);
-		mutex_unlock(&ks->lock);
-
-		/* Manage read */
-		switch (state) {
-		case EEPROM_ADDRESS:
-			if (bit_count < 0) {
-				bit_count = EEPROM_DATA_LEN - 1;
-				state = EEPROM_DATA;
-			}
-			break;
-		case EEPROM_DATA:
-			mutex_lock(&ks->lock);
-			dummy = ks8851_rdreg16(ks, KS_EEPCR);
-			mutex_unlock(&ks->lock);
-			data |= ((dummy >> EEPCR_EESB_OFFSET) & 1) << bit_count;
-			if (bit_count-- <= 0)
-				state = EEPROM_COMPLETE;
-			break;
-		}
-
-		/* wait period / 2 */
-		udelay(EEPROM_SK_PERIOD / 2);
-	}
-
-	/* close transaction */
-	mutex_lock(&ks->lock);
-	eepcr &= ~EEPCR_EECS;
-	ks8851_wrreg16(ks, KS_EEPCR, eepcr);
-	eepcr = 0;
-	ks8851_wrreg16(ks, KS_EEPCR, eepcr);
-	mutex_unlock(&ks->lock);
-
-	return data;
-}
-
-/**
- * ks8851_eeprom_write - write a 16bits word in ks8851 companion EEPROM
- * @dev: The network device the PHY is on.
- * @op: operand (can be WRITE, EWEN, EWDS)
- * @addr: EEPROM address to write
- * @data: data to write
- *
- * eeprom_size: used to define the data coding length. Can be changed
- * through debug-fs.
- *
- * Programs a write on the EEPROM using ks8851 EEPROM SW access feature.
- *
- * Note that a write enable is required before writing data.
- *
- * Rough programming model:
- *  - on period start: set clock high
- *  - on period / 2: set clock low and program value on bus
- *  - start on period / 2
- */
-void ks8851_eeprom_write(struct net_device *dev, unsigned int op,
-					unsigned int addr, unsigned int data)
-{
-	struct ks8851_net *ks = netdev_priv(dev);
-	int eepcr;
-	int state = EEPROM_CONTROL;
-	int bit_count = EEPROM_OP_LEN - 1;
-	unsigned int addr_len;
-
-	addr_len = (ks->eeprom_size == 128) ? 6 : 8;
-
-	switch (op) {
-	case EEPROM_OP_EWEN:
-		addr = 0x30;
-	break;
-	case EEPROM_OP_EWDS:
-		addr = 0;
-		break;
-	}
-
-	/* start transaction: chip select high, authorize write */
-	mutex_lock(&ks->lock);
-	eepcr = EEPCR_EESA | EEPCR_EESRWA;
-	ks8851_wrreg16(ks, KS_EEPCR, eepcr);
-	eepcr |= EEPCR_EECS;
-	ks8851_wrreg16(ks, KS_EEPCR, eepcr);
-	mutex_unlock(&ks->lock);
-
-	while (state != EEPROM_COMPLETE) {
-		/* falling clock period starts... */
-		/* set EED_IO pin for control and address */
-		eepcr &= ~EEPCR_EEDO;
-		switch (state) {
-		case EEPROM_CONTROL:
-			eepcr |= ((op >> bit_count) & 1) << 2;
-			if (bit_count-- <= 0) {
-				bit_count = addr_len - 1;
-				state = EEPROM_ADDRESS;
-			}
-			break;
-		case EEPROM_ADDRESS:
-			eepcr |= ((addr >> bit_count) & 1) << 2;
-			if (bit_count-- <= 0) {
-				if (op == EEPROM_OP_WRITE) {
-					bit_count = EEPROM_DATA_LEN - 1;
-					state = EEPROM_DATA;
-				} else {
-					state = EEPROM_COMPLETE;
-				}
-			}
-			break;
-		case EEPROM_DATA:
-			eepcr |= ((data >> bit_count) & 1) << 2;
-			if (bit_count-- <= 0)
-				state = EEPROM_COMPLETE;
-			break;
-		}
-
-		/* lower clock  */
-		eepcr &= ~EEPCR_EESCK;
-
-		mutex_lock(&ks->lock);
-		ks8851_wrreg16(ks, KS_EEPCR, eepcr);
-		mutex_unlock(&ks->lock);
-
-		/* wait period / 2 */
-		udelay(EEPROM_SK_PERIOD / 2);
-
-		/* rising clock period starts... */
-
-		/* raise clock */
-		eepcr |= EEPCR_EESCK;
-		mutex_lock(&ks->lock);
-		ks8851_wrreg16(ks, KS_EEPCR, eepcr);
-		mutex_unlock(&ks->lock);
-
-		/* wait period / 2 */
-		udelay(EEPROM_SK_PERIOD / 2);
-	}
-
-	/* close transaction */
-	mutex_lock(&ks->lock);
-	eepcr &= ~EEPCR_EECS;
-	ks8851_wrreg16(ks, KS_EEPCR, eepcr);
-	eepcr = 0;
-	ks8851_wrreg16(ks, KS_EEPCR, eepcr);
-	mutex_unlock(&ks->lock);
-
-}
-
 /* ethtool support */
 
 static void ks8851_get_drvinfo(struct net_device *dev,
@@ -1312,115 +1121,141 @@
 	return mii_nway_restart(&ks->mii);
 }
 
-static int ks8851_get_eeprom_len(struct net_device *dev)
+/* EEPROM support */
+
+static void ks8851_eeprom_regread(struct eeprom_93cx6 *ee)
+{
+	struct ks8851_net *ks = ee->data;
+	unsigned val;
+
+	val = ks8851_rdreg16(ks, KS_EEPCR);
+
+	ee->reg_data_out = (val & EEPCR_EESB) ? 1 : 0;
+	ee->reg_data_clock = (val & EEPCR_EESCK) ? 1 : 0;
+	ee->reg_chip_select = (val & EEPCR_EECS) ? 1 : 0;
+}
+
+static void ks8851_eeprom_regwrite(struct eeprom_93cx6 *ee)
+{
+	struct ks8851_net *ks = ee->data;
+	unsigned val = EEPCR_EESA;	/* default - eeprom access on */
+
+	if (ee->drive_data)
+		val |= EEPCR_EESRWA;
+	if (ee->reg_data_in)
+		val |= EEPCR_EEDO;
+	if (ee->reg_data_clock)
+		val |= EEPCR_EESCK;
+	if (ee->reg_chip_select)
+		val |= EEPCR_EECS;
+
+	ks8851_wrreg16(ks, KS_EEPCR, val);
+}
+
+/**
+ * ks8851_eeprom_claim - claim device EEPROM and activate the interface
+ * @ks: The network device state.
+ *
+ * Check for the presence of an EEPROM, and then activate software access
+ * to the device.
+ */
+static int ks8851_eeprom_claim(struct ks8851_net *ks)
+{
+	if (!(ks->rc_ccr & CCR_EEPROM))
+		return -ENOENT;
+
+	mutex_lock(&ks->lock);
+
+	/* start with clock low, cs high */
+	ks8851_wrreg16(ks, KS_EEPCR, EEPCR_EESA | EEPCR_EECS);
+	return 0;
+}
+
+/**
+ * ks8851_eeprom_release - release the EEPROM interface
+ * @ks: The device state
+ *
+ * Release the software access to the device EEPROM
+ */
+static void ks8851_eeprom_release(struct ks8851_net *ks)
+{
+	unsigned val = ks8851_rdreg16(ks, KS_EEPCR);
+
+	ks8851_wrreg16(ks, KS_EEPCR, val & ~EEPCR_EESA);
+	mutex_unlock(&ks->lock);
+}
+
+#define KS_EEPROM_MAGIC (0x00008851)
+
+static int ks8851_set_eeprom(struct net_device *dev,
+			     struct ethtool_eeprom *ee, u8 *data)
 {
 	struct ks8851_net *ks = netdev_priv(dev);
-	return ks->eeprom_size;
+	int offset = ee->offset;
+	int len = ee->len;
+	u16 tmp;
+
+	/* currently only support byte writing */
+	if (len != 1)
+		return -EINVAL;
+
+	if (ee->magic != KS_EEPROM_MAGIC)
+		return -EINVAL;
+
+	if (ks8851_eeprom_claim(ks))
+		return -ENOENT;
+
+	eeprom_93cx6_wren(&ks->eeprom, true);
+
+	/* ethtool currently only supports writing bytes, which means
+	 * we have to read/modify/write our 16bit EEPROMs */
+
+	eeprom_93cx6_read(&ks->eeprom, offset/2, &tmp);
+
+	if (offset & 1) {
+		tmp &= 0xff;
+		tmp |= *data << 8;
+	} else {
+		tmp &= 0xff00;
+		tmp |= *data;
+	}
+
+	eeprom_93cx6_write(&ks->eeprom, offset/2, tmp);
+	eeprom_93cx6_wren(&ks->eeprom, false);
+
+	ks8851_eeprom_release(ks);
+
+	return 0;
 }
 
 static int ks8851_get_eeprom(struct net_device *dev,
-			    struct ethtool_eeprom *eeprom, u8 *bytes)
+			     struct ethtool_eeprom *ee, u8 *data)
 {
 	struct ks8851_net *ks = netdev_priv(dev);
-	u16 *eeprom_buff;
-	int first_word;
-	int last_word;
-	int ret_val = 0;
-	u16 i;
+	int offset = ee->offset;
+	int len = ee->len;
 
-	if (eeprom->len == 0)
+	/* must be 2 byte aligned */
+	if (len & 1 || offset & 1)
 		return -EINVAL;
 
-	if (eeprom->len > ks->eeprom_size)
-		return -EINVAL;
+	if (ks8851_eeprom_claim(ks))
+		return -ENOENT;
 
-	eeprom->magic = ks8851_rdreg16(ks, KS_CIDER);
+	ee->magic = KS_EEPROM_MAGIC;
 
-	first_word = eeprom->offset >> 1;
-	last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+	eeprom_93cx6_multiread(&ks->eeprom, offset/2, (__le16 *)data, len/2);
+	ks8851_eeprom_release(ks);
 
-	eeprom_buff = kmalloc(sizeof(u16) *
-			(last_word - first_word + 1), GFP_KERNEL);
-	if (!eeprom_buff)
-		return -ENOMEM;
-
-	for (i = 0; i < last_word - first_word + 1; i++)
-		eeprom_buff[i] = ks8851_eeprom_read(dev, first_word + 1);
-
-	/* Device's eeprom is little-endian, word addressable */
-	for (i = 0; i < last_word - first_word + 1; i++)
-		le16_to_cpus(&eeprom_buff[i]);
-
-	memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
-	kfree(eeprom_buff);
-
-	return ret_val;
+	return 0;
 }
 
-static int ks8851_set_eeprom(struct net_device *dev,
-			    struct ethtool_eeprom *eeprom, u8 *bytes)
+static int ks8851_get_eeprom_len(struct net_device *dev)
 {
 	struct ks8851_net *ks = netdev_priv(dev);
-	u16 *eeprom_buff;
-	void *ptr;
-	int max_len;
-	int first_word;
-	int last_word;
-	int ret_val = 0;
-	u16 i;
 
-	if (eeprom->len == 0)
-		return -EOPNOTSUPP;
-
-	if (eeprom->len > ks->eeprom_size)
-		return -EINVAL;
-
-	if (eeprom->magic != ks8851_rdreg16(ks, KS_CIDER))
-		return -EFAULT;
-
-	first_word = eeprom->offset >> 1;
-	last_word = (eeprom->offset + eeprom->len - 1) >> 1;
-	max_len = (last_word - first_word + 1) * 2;
-	eeprom_buff = kmalloc(max_len, GFP_KERNEL);
-	if (!eeprom_buff)
-		return -ENOMEM;
-
-	ptr = (void *)eeprom_buff;
-
-	if (eeprom->offset & 1) {
-		/* need read/modify/write of first changed EEPROM word */
-		/* only the second byte of the word is being modified */
-		eeprom_buff[0] = ks8851_eeprom_read(dev, first_word);
-		ptr++;
-	}
-	if ((eeprom->offset + eeprom->len) & 1)
-		/* need read/modify/write of last changed EEPROM word */
-		/* only the first byte of the word is being modified */
-		eeprom_buff[last_word - first_word] =
-					ks8851_eeprom_read(dev, last_word);
-
-
-	/* Device's eeprom is little-endian, word addressable */
-	le16_to_cpus(&eeprom_buff[0]);
-	le16_to_cpus(&eeprom_buff[last_word - first_word]);
-
-	memcpy(ptr, bytes, eeprom->len);
-
-	for (i = 0; i < last_word - first_word + 1; i++)
-		eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]);
-
-	ks8851_eeprom_write(dev, EEPROM_OP_EWEN, 0, 0);
-
-	for (i = 0; i < last_word - first_word + 1; i++) {
-		ks8851_eeprom_write(dev, EEPROM_OP_WRITE, first_word + i,
-							eeprom_buff[i]);
-		mdelay(EEPROM_WRITE_TIME);
-	}
-
-	ks8851_eeprom_write(dev, EEPROM_OP_EWDS, 0, 0);
-
-	kfree(eeprom_buff);
-	return ret_val;
+	/* currently, we assume it is an 93C46 attached, so return 128 */
+	return ks->rc_ccr & CCR_EEPROM ? 128 : 0;
 }
 
 static const struct ethtool_ops ks8851_ethtool_ops = {
@@ -1613,6 +1448,13 @@
 	spi_message_add_tail(&ks->spi_xfer2[0], &ks->spi_msg2);
 	spi_message_add_tail(&ks->spi_xfer2[1], &ks->spi_msg2);
 
+	/* setup EEPROM state */
+
+	ks->eeprom.data = ks;
+	ks->eeprom.width = PCI_EEPROM_WIDTH_93C46;
+	ks->eeprom.register_read = ks8851_eeprom_regread;
+	ks->eeprom.register_write = ks8851_eeprom_regwrite;
+
 	/* setup mii state */
 	ks->mii.dev		= ndev;
 	ks->mii.phy_id		= 1,
@@ -1674,9 +1516,10 @@
 		goto err_netdev;
 	}
 
-	netdev_info(ndev, "revision %d, MAC %pM, IRQ %d\n",
+	netdev_info(ndev, "revision %d, MAC %pM, IRQ %d, %s EEPROM\n",
 		    CIDER_REV_GET(ks8851_rdreg16(ks, KS_CIDER)),
-		    ndev->dev_addr, ndev->irq);
+		    ndev->dev_addr, ndev->irq,
+		    ks->rc_ccr & CCR_EEPROM ? "has" : "no");
 
 	return 0;
 
diff --git a/drivers/net/ethernet/micrel/ks8851.h b/drivers/net/ethernet/micrel/ks8851.h
index 537fb06e..b0fae86 100644
--- a/drivers/net/ethernet/micrel/ks8851.h
+++ b/drivers/net/ethernet/micrel/ks8851.h
@@ -16,7 +16,7 @@
 #define CCR_32PIN				(1 << 0)
 
 /* MAC address registers */
-#define KS_MAR(_m)				0x15 - (_m)
+#define KS_MAR(_m)				(0x15 - (_m))
 #define KS_MARL					0x10
 #define KS_MARM					0x12
 #define KS_MARH					0x14
@@ -27,22 +27,11 @@
 #define KS_EEPCR				0x22
 #define EEPCR_EESRWA				(1 << 5)
 #define EEPCR_EESA				(1 << 4)
-#define EEPCR_EESB_OFFSET			3
-#define EEPCR_EESB				(1 << EEPCR_EESB_OFFSET)
+#define EEPCR_EESB				(1 << 3)
 #define EEPCR_EEDO				(1 << 2)
 #define EEPCR_EESCK				(1 << 1)
 #define EEPCR_EECS				(1 << 0)
 
-#define EEPROM_OP_LEN				3	/* bits:*/
-#define EEPROM_OP_READ				0x06
-#define EEPROM_OP_EWEN				0x04
-#define EEPROM_OP_WRITE				0x05
-#define EEPROM_OP_EWDS				0x14
-
-#define EEPROM_DATA_LEN				16	/* 16 bits EEPROM */
-#define EEPROM_WRITE_TIME			4	/* wrt ack time in ms */
-#define EEPROM_SK_PERIOD			400	/* in us */
-
 #define KS_MBIR					0x24
 #define MBIR_TXMBF				(1 << 12)
 #define MBIR_TXMBFA				(1 << 11)
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index d19c849..e58e78e 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -1500,8 +1500,7 @@
 	ks->all_mcast = 0;
 	ks->mcast_lst_size = 0;
 
-	ks->frame_head_info = (struct type_frame_head *) \
-		kmalloc(MHEADER_SIZE, GFP_KERNEL);
+	ks->frame_head_info = kmalloc(MHEADER_SIZE, GFP_KERNEL);
 	if (!ks->frame_head_info) {
 		pr_err("Error: Fail to allocate frame memory\n");
 		return false;
@@ -1659,18 +1658,7 @@
 	.remove = __devexit_p(ks8851_remove),
 };
 
-static int __init ks8851_init(void)
-{
-	return platform_driver_register(&ks8851_platform_driver);
-}
-
-static void __exit ks8851_exit(void)
-{
-	platform_driver_unregister(&ks8851_platform_driver);
-}
-
-module_init(ks8851_init);
-module_exit(ks8851_exit);
+module_platform_driver(ks8851_platform_driver);
 
 MODULE_DESCRIPTION("KS8851 MLL Network driver");
 MODULE_AUTHOR("David Choi <david.choi@micrel.com>");
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index 7ece990..6ed09a8 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -743,8 +743,7 @@
 /* Change default LED mode. */
 #define SET_DEFAULT_LED			LED_SPEED_DUPLEX_ACT
 
-#define MAC_ADDR_LEN			6
-#define MAC_ADDR_ORDER(i)		(MAC_ADDR_LEN - 1 - (i))
+#define MAC_ADDR_ORDER(i)		(ETH_ALEN - 1 - (i))
 
 #define MAX_ETHERNET_BODY_SIZE		1500
 #define ETHERNET_HEADER_SIZE		14
@@ -1043,7 +1042,7 @@
  * @valid:	Valid setting indicating the entry is being used.
  */
 struct ksz_mac_table {
-	u8 mac_addr[MAC_ADDR_LEN];
+	u8 mac_addr[ETH_ALEN];
 	u16 vid;
 	u8 fid;
 	u8 ports;
@@ -1187,8 +1186,8 @@
 	u8 diffserv[DIFFSERV_ENTRIES];
 	u8 p_802_1p[PRIO_802_1P_ENTRIES];
 
-	u8 br_addr[MAC_ADDR_LEN];
-	u8 other_addr[MAC_ADDR_LEN];
+	u8 br_addr[ETH_ALEN];
+	u8 other_addr[ETH_ALEN];
 
 	u8 broad_per;
 	u8 member;
@@ -1292,14 +1291,14 @@
 	int tx_int_mask;
 	int tx_size;
 
-	u8 perm_addr[MAC_ADDR_LEN];
-	u8 override_addr[MAC_ADDR_LEN];
-	u8 address[ADDITIONAL_ENTRIES][MAC_ADDR_LEN];
+	u8 perm_addr[ETH_ALEN];
+	u8 override_addr[ETH_ALEN];
+	u8 address[ADDITIONAL_ENTRIES][ETH_ALEN];
 	u8 addr_list_size;
 	u8 mac_override;
 	u8 promiscuous;
 	u8 all_multi;
-	u8 multi_list[MAX_MULTICAST_LIST][MAC_ADDR_LEN];
+	u8 multi_list[MAX_MULTICAST_LIST][ETH_ALEN];
 	u8 multi_bits[HW_MULTICAST_SIZE];
 	u8 multi_list_size;
 
@@ -3654,7 +3653,7 @@
 	static const u8 mask[] = { 0x3F };
 	static const u8 pattern[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
 
-	hw_set_wol_frame(hw, 2, 1, mask, MAC_ADDR_LEN, pattern);
+	hw_set_wol_frame(hw, 2, 1, mask, ETH_ALEN, pattern);
 }
 
 /**
@@ -3689,7 +3688,7 @@
 {
 	static const u8 mask[] = { 0x3F };
 
-	hw_set_wol_frame(hw, 0, 1, mask, MAC_ADDR_LEN, hw->override_addr);
+	hw_set_wol_frame(hw, 0, 1, mask, ETH_ALEN, hw->override_addr);
 }
 
 /**
@@ -4055,7 +4054,7 @@
 {
 	int i;
 
-	for (i = 0; i < MAC_ADDR_LEN; i++)
+	for (i = 0; i < ETH_ALEN; i++)
 		writeb(hw->override_addr[MAC_ADDR_ORDER(i)],
 			hw->io + KS884X_ADDR_0_OFFSET + i);
 
@@ -4072,17 +4071,16 @@
 {
 	int i;
 
-	for (i = 0; i < MAC_ADDR_LEN; i++)
+	for (i = 0; i < ETH_ALEN; i++)
 		hw->perm_addr[MAC_ADDR_ORDER(i)] = readb(hw->io +
 			KS884X_ADDR_0_OFFSET + i);
 
 	if (!hw->mac_override) {
-		memcpy(hw->override_addr, hw->perm_addr, MAC_ADDR_LEN);
+		memcpy(hw->override_addr, hw->perm_addr, ETH_ALEN);
 		if (empty_addr(hw->override_addr)) {
-			memcpy(hw->perm_addr, DEFAULT_MAC_ADDRESS,
-				MAC_ADDR_LEN);
+			memcpy(hw->perm_addr, DEFAULT_MAC_ADDRESS, ETH_ALEN);
 			memcpy(hw->override_addr, DEFAULT_MAC_ADDRESS,
-				MAC_ADDR_LEN);
+			       ETH_ALEN);
 			hw->override_addr[5] += hw->id;
 			hw_set_addr(hw);
 		}
@@ -4130,16 +4128,16 @@
 	int i;
 	int j = ADDITIONAL_ENTRIES;
 
-	if (!memcmp(hw->override_addr, mac_addr, MAC_ADDR_LEN))
+	if (!memcmp(hw->override_addr, mac_addr, ETH_ALEN))
 		return 0;
 	for (i = 0; i < hw->addr_list_size; i++) {
-		if (!memcmp(hw->address[i], mac_addr, MAC_ADDR_LEN))
+		if (!memcmp(hw->address[i], mac_addr, ETH_ALEN))
 			return 0;
 		if (ADDITIONAL_ENTRIES == j && empty_addr(hw->address[i]))
 			j = i;
 	}
 	if (j < ADDITIONAL_ENTRIES) {
-		memcpy(hw->address[j], mac_addr, MAC_ADDR_LEN);
+		memcpy(hw->address[j], mac_addr, ETH_ALEN);
 		hw_ena_add_addr(hw, j, hw->address[j]);
 		return 0;
 	}
@@ -4151,8 +4149,8 @@
 	int i;
 
 	for (i = 0; i < hw->addr_list_size; i++) {
-		if (!memcmp(hw->address[i], mac_addr, MAC_ADDR_LEN)) {
-			memset(hw->address[i], 0, MAC_ADDR_LEN);
+		if (!memcmp(hw->address[i], mac_addr, ETH_ALEN)) {
+			memset(hw->address[i], 0, ETH_ALEN);
 			writel(0, hw->io + ADD_ADDR_INCR * i +
 				KS_ADD_ADDR_0_HI);
 			return 0;
@@ -4382,12 +4380,10 @@
  */
 static int ksz_alloc_soft_desc(struct ksz_desc_info *desc_info, int transmit)
 {
-	desc_info->ring = kmalloc(sizeof(struct ksz_desc) * desc_info->alloc,
-		GFP_KERNEL);
+	desc_info->ring = kzalloc(sizeof(struct ksz_desc) * desc_info->alloc,
+				  GFP_KERNEL);
 	if (!desc_info->ring)
 		return 1;
-	memset((void *) desc_info->ring, 0,
-		sizeof(struct ksz_desc) * desc_info->alloc);
 	hw_init_desc(desc_info, transmit);
 	return 0;
 }
@@ -5676,7 +5672,7 @@
 		hw_del_addr(hw, dev->dev_addr);
 	else {
 		hw->mac_override = 1;
-		memcpy(hw->override_addr, mac->sa_data, MAC_ADDR_LEN);
+		memcpy(hw->override_addr, mac->sa_data, ETH_ALEN);
 	}
 
 	memcpy(dev->dev_addr, mac->sa_data, MAX_ADDR_LEN);
@@ -5786,7 +5782,7 @@
 		netdev_for_each_mc_addr(ha, dev) {
 			if (i >= MAX_MULTICAST_LIST)
 				break;
-			memcpy(hw->multi_list[i++], ha->addr, MAC_ADDR_LEN);
+			memcpy(hw->multi_list[i++], ha->addr, ETH_ALEN);
 		}
 		hw->multi_list_size = (u8) i;
 		hw_set_grp_addr(hw);
@@ -6093,9 +6089,10 @@
 	struct dev_priv *priv = netdev_priv(dev);
 	struct dev_info *hw_priv = priv->adapter;
 
-	strcpy(info->driver, DRV_NAME);
-	strcpy(info->version, DRV_VERSION);
-	strcpy(info->bus_info, pci_name(hw_priv->pdev));
+	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+	strlcpy(info->bus_info, pci_name(hw_priv->pdev),
+		sizeof(info->bus_info));
 }
 
 /**
@@ -6587,7 +6584,8 @@
  *
  * Return 0 if successful; otherwise an error code.
  */
-static int netdev_set_features(struct net_device *dev, u32 features)
+static int netdev_set_features(struct net_device *dev,
+	netdev_features_t features)
 {
 	struct dev_priv *priv = netdev_priv(dev);
 	struct dev_info *hw_priv = priv->adapter;
@@ -6609,7 +6607,7 @@
 	return 0;
 }
 
-static struct ethtool_ops netdev_ethtool_ops = {
+static const struct ethtool_ops netdev_ethtool_ops = {
 	.get_settings		= netdev_get_settings,
 	.set_settings		= netdev_set_settings,
 	.nway_reset		= netdev_nway_reset,
@@ -6860,7 +6858,7 @@
 	int num;
 
 	i = j = num = got_num = 0;
-	while (j < MAC_ADDR_LEN) {
+	while (j < ETH_ALEN) {
 		if (macaddr[i]) {
 			int digit;
 
@@ -6891,7 +6889,7 @@
 		}
 		i++;
 	}
-	if (MAC_ADDR_LEN == j) {
+	if (ETH_ALEN == j) {
 		if (MAIN_PORT == port)
 			hw_priv->hw.mac_override = 1;
 	}
@@ -7058,7 +7056,7 @@
 
 	/* Multiple device interfaces mode requires a second MAC address. */
 	if (hw->dev_count > 1) {
-		memcpy(sw->other_addr, hw->override_addr, MAC_ADDR_LEN);
+		memcpy(sw->other_addr, hw->override_addr, ETH_ALEN);
 		read_other_addr(hw);
 		if (mac1addr[0] != ':')
 			get_mac_addr(hw_priv, mac1addr, OTHER_PORT);
@@ -7108,12 +7106,11 @@
 		dev->irq = pdev->irq;
 		if (MAIN_PORT == i)
 			memcpy(dev->dev_addr, hw_priv->hw.override_addr,
-				MAC_ADDR_LEN);
+			       ETH_ALEN);
 		else {
-			memcpy(dev->dev_addr, sw->other_addr,
-				MAC_ADDR_LEN);
+			memcpy(dev->dev_addr, sw->other_addr, ETH_ALEN);
 			if (!memcmp(sw->other_addr, hw->override_addr,
-					MAC_ADDR_LEN))
+				    ETH_ALEN))
 				dev->dev_addr[5] += port->first_port;
 		}
 
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 0778edc..20b72ec 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -1491,7 +1491,7 @@
 	 * access to avoid theoretical race condition with functions that
 	 * change NETIF_F_LRO flag at runtime.
 	 */
-	bool lro_enabled = ACCESS_ONCE(mgp->dev->features) & NETIF_F_LRO;
+	bool lro_enabled = !!(ACCESS_ONCE(mgp->dev->features) & NETIF_F_LRO);
 
 	while (rx_done->entry[idx].length != 0 && work_done < budget) {
 		length = ntohs(rx_done->entry[idx].length);
@@ -3149,7 +3149,8 @@
 	return 0;
 }
 
-static u32 myri10ge_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t myri10ge_fix_features(struct net_device *dev,
+	netdev_features_t features)
 {
 	if (!(features & NETIF_F_RXCSUM))
 		features &= ~NETIF_F_LRO;
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp.h b/drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp.h
index 11be150..b7fc26c 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp.h
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp.h
@@ -356,7 +356,7 @@
 	MXGEFW_CMD_GET_DCA_OFFSET = 56,
 	/* offset of dca control for WDMAs */
 
-	/* VMWare NetQueue commands */
+	/* VMware NetQueue commands */
 	MXGEFW_CMD_NETQ_GET_FILTERS_PER_QUEUE = 57,
 	MXGEFW_CMD_NETQ_ADD_FILTER = 58,
 	/* data0 = filter_id << 16 | queue << 8 | type */
diff --git a/drivers/net/ethernet/natsemi/jazzsonic.c b/drivers/net/ethernet/natsemi/jazzsonic.c
index fc7c6a9..5b89fd3 100644
--- a/drivers/net/ethernet/natsemi/jazzsonic.c
+++ b/drivers/net/ethernet/natsemi/jazzsonic.c
@@ -294,15 +294,4 @@
 	},
 };
 
-static int __init jazz_sonic_init_module(void)
-{
-	return platform_driver_register(&jazz_sonic_driver);
-}
-
-static void __exit jazz_sonic_cleanup_module(void)
-{
-	platform_driver_unregister(&jazz_sonic_driver);
-}
-
-module_init(jazz_sonic_init_module);
-module_exit(jazz_sonic_cleanup_module);
+module_platform_driver(jazz_sonic_driver);
diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c
index a2eacbf..f1b8556 100644
--- a/drivers/net/ethernet/natsemi/macsonic.c
+++ b/drivers/net/ethernet/natsemi/macsonic.c
@@ -142,8 +142,7 @@
 {
 	int retval;
 
-	retval = request_irq(dev->irq, sonic_interrupt, IRQ_FLG_FAST,
-				"sonic", dev);
+	retval = request_irq(dev->irq, sonic_interrupt, 0, "sonic", dev);
 	if (retval) {
 		printk(KERN_ERR "%s: unable to get IRQ %d.\n",
 				dev->name, dev->irq);
@@ -154,8 +153,8 @@
 	 * rupt as well, which must prevent re-entrance of the sonic handler.
 	 */
 	if (dev->irq == IRQ_AUTO_3) {
-		retval = request_irq(IRQ_NUBUS_9, macsonic_interrupt,
-					IRQ_FLG_FAST, "sonic", dev);
+		retval = request_irq(IRQ_NUBUS_9, macsonic_interrupt, 0,
+				     "sonic", dev);
 		if (retval) {
 			printk(KERN_ERR "%s: unable to get IRQ %d.\n",
 					dev->name, IRQ_NUBUS_9);
@@ -643,15 +642,4 @@
 	},
 };
 
-static int __init mac_sonic_init_module(void)
-{
-	return platform_driver_register(&mac_sonic_driver);
-}
-
-static void __exit mac_sonic_cleanup_module(void)
-{
-	platform_driver_unregister(&mac_sonic_driver);
-}
-
-module_init(mac_sonic_init_module);
-module_exit(mac_sonic_cleanup_module);
+module_platform_driver(mac_sonic_driver);
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index 6ca047a..ac7b16b 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -2555,9 +2555,9 @@
 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 {
 	struct netdev_private *np = netdev_priv(dev);
-	strncpy(info->driver, DRV_NAME, ETHTOOL_BUSINFO_LEN);
-	strncpy(info->version, DRV_VERSION, ETHTOOL_BUSINFO_LEN);
-	strncpy(info->bus_info, pci_name(np->pci_dev), ETHTOOL_BUSINFO_LEN);
+	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+	strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
 }
 
 static int get_regs_len(struct net_device *dev)
diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
index 2b8f64d..c24b46c 100644
--- a/drivers/net/ethernet/natsemi/ns83820.c
+++ b/drivers/net/ethernet/natsemi/ns83820.c
@@ -1364,9 +1364,9 @@
 static void ns83820_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info)
 {
 	struct ns83820 *dev = PRIV(ndev);
-	strcpy(info->driver, "ns83820");
-	strcpy(info->version, VERSION);
-	strcpy(info->bus_info, pci_name(dev->pci_dev));
+	strlcpy(info->driver, "ns83820", sizeof(info->driver));
+	strlcpy(info->version, VERSION, sizeof(info->version));
+	strlcpy(info->bus_info, pci_name(dev->pci_dev), sizeof(info->bus_info));
 }
 
 static u32 ns83820_get_link(struct net_device *ndev)
diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c
index ccf61b9..e01c0a0 100644
--- a/drivers/net/ethernet/natsemi/xtsonic.c
+++ b/drivers/net/ethernet/natsemi/xtsonic.c
@@ -319,15 +319,4 @@
 	},
 };
 
-static int __init xtsonic_init(void)
-{
-	return platform_driver_register(&xtsonic_driver);
-}
-
-static void __exit xtsonic_cleanup(void)
-{
-	platform_driver_unregister(&xtsonic_driver);
-}
-
-module_init(xtsonic_init);
-module_exit(xtsonic_cleanup);
+module_platform_driver(xtsonic_driver);
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index c27fb3d..97f63e1 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -5391,10 +5391,9 @@
 {
 	struct s2io_nic *sp = netdev_priv(dev);
 
-	strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
-	strncpy(info->version, s2io_driver_version, sizeof(info->version));
-	strncpy(info->fw_version, "", sizeof(info->fw_version));
-	strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
+	strlcpy(info->driver, s2io_driver_name, sizeof(info->driver));
+	strlcpy(info->version, s2io_driver_version, sizeof(info->version));
+	strlcpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
 	info->regdump_len = XENA_REG_SPACE;
 	info->eedump_len = XENA_EEPROM_SPACE;
 }
@@ -6616,10 +6615,10 @@
 	}
 }
 
-static int s2io_set_features(struct net_device *dev, u32 features)
+static int s2io_set_features(struct net_device *dev, netdev_features_t features)
 {
 	struct s2io_nic *sp = netdev_priv(dev);
-	u32 changed = (features ^ dev->features) & NETIF_F_LRO;
+	netdev_features_t changed = (features ^ dev->features) & NETIF_F_LRO;
 
 	if (changed && netif_running(dev)) {
 		int rc;
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index a83197d..ef76725 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -2662,9 +2662,10 @@
 	mod_timer(&vdev->vp_lockup_timer, jiffies + HZ / 1000);
 }
 
-static u32 vxge_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t vxge_fix_features(struct net_device *dev,
+	netdev_features_t features)
 {
-	u32 changed = dev->features ^ features;
+	netdev_features_t changed = dev->features ^ features;
 
 	/* Enabling RTH requires some of the logic in vxge_device_register and a
 	 * vpath reset.  Due to these restrictions, only allow modification
@@ -2676,10 +2677,10 @@
 	return features;
 }
 
-static int vxge_set_features(struct net_device *dev, u32 features)
+static int vxge_set_features(struct net_device *dev, netdev_features_t features)
 {
 	struct vxgedev *vdev = netdev_priv(dev);
-	u32 changed = dev->features ^ features;
+	netdev_features_t changed = dev->features ^ features;
 
 	if (!(changed & NETIF_F_RXHASH))
 		return 0;
@@ -3304,7 +3305,7 @@
  *
  * Add the vlan id to the devices vlan id table
  */
-static void
+static int
 vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
 {
 	struct vxgedev *vdev = netdev_priv(dev);
@@ -3319,6 +3320,7 @@
 		vxge_hw_vpath_vid_add(vpath->handle, vid);
 	}
 	set_bit(vid, vdev->active_vlans);
+	return 0;
 }
 
 /**
@@ -3328,7 +3330,7 @@
  *
  * Remove the vlan id from the device's vlan id table
  */
-static void
+static int
 vxge_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
 {
 	struct vxgedev *vdev = netdev_priv(dev);
@@ -3347,6 +3349,7 @@
 	vxge_debug_entryexit(VXGE_TRACE,
 		"%s:%d  Exiting...", __func__, __LINE__);
 	clear_bit(vid, vdev->active_vlans);
+	return 0;
 }
 
 static const struct net_device_ops vxge_netdev_ops = {
diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c
index f1bfb8f..b75a049 100644
--- a/drivers/net/ethernet/nuvoton/w90p910_ether.c
+++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c
@@ -1103,18 +1103,7 @@
 	},
 };
 
-static int __init w90p910_ether_init(void)
-{
-	return platform_driver_register(&w90p910_ether_driver);
-}
-
-static void __exit w90p910_ether_exit(void)
-{
-	platform_driver_unregister(&w90p910_ether_driver);
-}
-
-module_init(w90p910_ether_init);
-module_exit(w90p910_ether_exit);
+module_platform_driver(w90p910_ether_driver);
 
 MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
 MODULE_DESCRIPTION("w90p910 MAC driver!");
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 1c61d36..4c4e7f4 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -65,7 +65,8 @@
 #include <linux/slab.h>
 #include <linux/uaccess.h>
 #include <linux/prefetch.h>
-#include  <linux/io.h>
+#include <linux/u64_stats_sync.h>
+#include <linux/io.h>
 
 #include <asm/irq.h>
 #include <asm/system.h>
@@ -736,6 +737,16 @@
  * - tx setup is lockless: it relies on netif_tx_lock. Actual submission
  *	needs netdev_priv(dev)->lock :-(
  * - set_multicast_list: preparation lockless, relies on netif_tx_lock.
+ *
+ * Hardware stats updates are protected by hwstats_lock:
+ * - updated by nv_do_stats_poll (timer). This is meant to avoid
+ *   integer wraparound in the NIC stats registers, at low frequency
+ *   (0.1 Hz)
+ * - updated by nv_get_ethtool_stats + nv_get_stats64
+ *
+ * Software stats are accessed only through 64b synchronization points
+ * and are not subject to other synchronization techniques (single
+ * update thread on the TX or RX paths).
  */
 
 /* in dev: base, irq */
@@ -745,9 +756,10 @@
 	struct net_device *dev;
 	struct napi_struct napi;
 
-	/* General data:
-	 * Locking: spin_lock(&np->lock); */
+	/* hardware stats are updated in syscall and timer */
+	spinlock_t hwstats_lock;
 	struct nv_ethtool_stats estats;
+
 	int in_shutdown;
 	u32 linkspeed;
 	int duplex;
@@ -798,6 +810,13 @@
 	u32 nic_poll_irq;
 	int rx_ring_size;
 
+	/* RX software stats */
+	struct u64_stats_sync swstats_rx_syncp;
+	u64 stat_rx_packets;
+	u64 stat_rx_bytes; /* not always available in HW */
+	u64 stat_rx_missed_errors;
+	u64 stat_rx_dropped;
+
 	/* media detection workaround.
 	 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
 	 */
@@ -820,6 +839,12 @@
 	struct nv_skb_map *tx_end_flip;
 	int tx_stop;
 
+	/* TX software stats */
+	struct u64_stats_sync swstats_tx_syncp;
+	u64 stat_tx_packets; /* not always available in HW */
+	u64 stat_tx_bytes;
+	u64 stat_tx_dropped;
+
 	/* msi/msi-x fields */
 	u32 msi_flags;
 	struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS];
@@ -892,6 +917,11 @@
 static int dma_64bit = NV_DMA_64BIT_ENABLED;
 
 /*
+ * Debug output control for tx_timeout
+ */
+static bool debug_tx_timeout = false;
+
+/*
  * Crossover Detection
  * Realtek 8201 phy + some OEM boards do not work properly.
  */
@@ -1630,11 +1660,19 @@
 	pci_push(base);
 }
 
-static void nv_get_hw_stats(struct net_device *dev)
+/* Caller must appropriately lock netdev_priv(dev)->hwstats_lock */
+static void nv_update_stats(struct net_device *dev)
 {
 	struct fe_priv *np = netdev_priv(dev);
 	u8 __iomem *base = get_hwbase(dev);
 
+	/* If it happens that this is run in top-half context, then
+	 * replace the spin_lock of hwstats_lock with
+	 * spin_lock_irqsave() in calling functions. */
+	WARN_ONCE(in_irq(), "forcedeth: estats spin_lock(_bh) from top-half");
+	assert_spin_locked(&np->hwstats_lock);
+
+	/* query hardware */
 	np->estats.tx_bytes += readl(base + NvRegTxCnt);
 	np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt);
 	np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt);
@@ -1693,40 +1731,73 @@
 }
 
 /*
- * nv_get_stats: dev->get_stats function
+ * nv_get_stats64: dev->ndo_get_stats64 function
  * Get latest stats value from the nic.
  * Called with read_lock(&dev_base_lock) held for read -
  * only synchronized against unregister_netdevice.
  */
-static struct net_device_stats *nv_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64*
+nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage)
+	__acquires(&netdev_priv(dev)->hwstats_lock)
+	__releases(&netdev_priv(dev)->hwstats_lock)
 {
 	struct fe_priv *np = netdev_priv(dev);
+	unsigned int syncp_start;
+
+	/*
+	 * Note: because HW stats are not always available and for
+	 * consistency reasons, the following ifconfig stats are
+	 * managed by software: rx_bytes, tx_bytes, rx_packets and
+	 * tx_packets. The related hardware stats reported by ethtool
+	 * should be equivalent to these ifconfig stats, with 4
+	 * additional bytes per packet (Ethernet FCS CRC), except for
+	 * tx_packets when TSO kicks in.
+	 */
+
+	/* software stats */
+	do {
+		syncp_start = u64_stats_fetch_begin_bh(&np->swstats_rx_syncp);
+		storage->rx_packets       = np->stat_rx_packets;
+		storage->rx_bytes         = np->stat_rx_bytes;
+		storage->rx_dropped       = np->stat_rx_dropped;
+		storage->rx_missed_errors = np->stat_rx_missed_errors;
+	} while (u64_stats_fetch_retry_bh(&np->swstats_rx_syncp, syncp_start));
+
+	do {
+		syncp_start = u64_stats_fetch_begin_bh(&np->swstats_tx_syncp);
+		storage->tx_packets = np->stat_tx_packets;
+		storage->tx_bytes   = np->stat_tx_bytes;
+		storage->tx_dropped = np->stat_tx_dropped;
+	} while (u64_stats_fetch_retry_bh(&np->swstats_tx_syncp, syncp_start));
 
 	/* If the nic supports hw counters then retrieve latest values */
-	if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) {
-		nv_get_hw_stats(dev);
+	if (np->driver_data & DEV_HAS_STATISTICS_V123) {
+		spin_lock_bh(&np->hwstats_lock);
 
-		/*
-		 * Note: because HW stats are not always available and
-		 * for consistency reasons, the following ifconfig
-		 * stats are managed by software: rx_bytes, tx_bytes,
-		 * rx_packets and tx_packets. The related hardware
-		 * stats reported by ethtool should be equivalent to
-		 * these ifconfig stats, with 4 additional bytes per
-		 * packet (Ethernet FCS CRC).
-		 */
+		nv_update_stats(dev);
 
-		/* copy to net_device stats */
-		dev->stats.tx_fifo_errors = np->estats.tx_fifo_errors;
-		dev->stats.tx_carrier_errors = np->estats.tx_carrier_errors;
-		dev->stats.rx_crc_errors = np->estats.rx_crc_errors;
-		dev->stats.rx_over_errors = np->estats.rx_over_errors;
-		dev->stats.rx_fifo_errors = np->estats.rx_drop_frame;
-		dev->stats.rx_errors = np->estats.rx_errors_total;
-		dev->stats.tx_errors = np->estats.tx_errors_total;
+		/* generic stats */
+		storage->rx_errors = np->estats.rx_errors_total;
+		storage->tx_errors = np->estats.tx_errors_total;
+
+		/* meaningful only when NIC supports stats v3 */
+		storage->multicast = np->estats.rx_multicast;
+
+		/* detailed rx_errors */
+		storage->rx_length_errors = np->estats.rx_length_error;
+		storage->rx_over_errors   = np->estats.rx_over_errors;
+		storage->rx_crc_errors    = np->estats.rx_crc_errors;
+		storage->rx_frame_errors  = np->estats.rx_frame_align_error;
+		storage->rx_fifo_errors   = np->estats.rx_drop_frame;
+
+		/* detailed tx_errors */
+		storage->tx_carrier_errors = np->estats.tx_carrier_errors;
+		storage->tx_fifo_errors    = np->estats.tx_fifo_errors;
+
+		spin_unlock_bh(&np->hwstats_lock);
 	}
 
-	return &dev->stats;
+	return storage;
 }
 
 /*
@@ -1759,8 +1830,12 @@
 				np->put_rx.orig = np->first_rx.orig;
 			if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
 				np->put_rx_ctx = np->first_rx_ctx;
-		} else
+		} else {
+			u64_stats_update_begin(&np->swstats_rx_syncp);
+			np->stat_rx_dropped++;
+			u64_stats_update_end(&np->swstats_rx_syncp);
 			return 1;
+		}
 	}
 	return 0;
 }
@@ -1791,8 +1866,12 @@
 				np->put_rx.ex = np->first_rx.ex;
 			if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
 				np->put_rx_ctx = np->first_rx_ctx;
-		} else
+		} else {
+			u64_stats_update_begin(&np->swstats_rx_syncp);
+			np->stat_rx_dropped++;
+			u64_stats_update_end(&np->swstats_rx_syncp);
 			return 1;
+		}
 	}
 	return 0;
 }
@@ -1849,6 +1928,7 @@
 		np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1];
 	np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb;
 	np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1];
+	netdev_reset_queue(np->dev);
 	np->tx_pkts_in_progress = 0;
 	np->tx_change_owner = NULL;
 	np->tx_end_flip = NULL;
@@ -1927,8 +2007,11 @@
 			np->tx_ring.ex[i].bufhigh = 0;
 			np->tx_ring.ex[i].buflow = 0;
 		}
-		if (nv_release_txskb(np, &np->tx_skb[i]))
-			dev->stats.tx_dropped++;
+		if (nv_release_txskb(np, &np->tx_skb[i])) {
+			u64_stats_update_begin(&np->swstats_tx_syncp);
+			np->stat_tx_dropped++;
+			u64_stats_update_end(&np->swstats_tx_syncp);
+		}
 		np->tx_skb[i].dma = 0;
 		np->tx_skb[i].dma_len = 0;
 		np->tx_skb[i].dma_single = 0;
@@ -2194,6 +2277,9 @@
 
 	/* set tx flags */
 	start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
+
+	netdev_sent_queue(np->dev, skb->len);
+
 	np->put_tx.orig = put_tx;
 
 	spin_unlock_irqrestore(&np->lock, flags);
@@ -2338,6 +2424,9 @@
 
 	/* set tx flags */
 	start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
+
+	netdev_sent_queue(np->dev, skb->len);
+
 	np->put_tx.ex = put_tx;
 
 	spin_unlock_irqrestore(&np->lock, flags);
@@ -2375,6 +2464,7 @@
 	u32 flags;
 	int tx_work = 0;
 	struct ring_desc *orig_get_tx = np->get_tx.orig;
+	unsigned int bytes_compl = 0;
 
 	while ((np->get_tx.orig != np->put_tx.orig) &&
 	       !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID) &&
@@ -2385,12 +2475,16 @@
 		if (np->desc_ver == DESC_VER_1) {
 			if (flags & NV_TX_LASTPACKET) {
 				if (flags & NV_TX_ERROR) {
-					if ((flags & NV_TX_RETRYERROR) && !(flags & NV_TX_RETRYCOUNT_MASK))
+					if ((flags & NV_TX_RETRYERROR)
+					    && !(flags & NV_TX_RETRYCOUNT_MASK))
 						nv_legacybackoff_reseed(dev);
 				} else {
-					dev->stats.tx_packets++;
-					dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
+					u64_stats_update_begin(&np->swstats_tx_syncp);
+					np->stat_tx_packets++;
+					np->stat_tx_bytes += np->get_tx_ctx->skb->len;
+					u64_stats_update_end(&np->swstats_tx_syncp);
 				}
+				bytes_compl += np->get_tx_ctx->skb->len;
 				dev_kfree_skb_any(np->get_tx_ctx->skb);
 				np->get_tx_ctx->skb = NULL;
 				tx_work++;
@@ -2398,12 +2492,16 @@
 		} else {
 			if (flags & NV_TX2_LASTPACKET) {
 				if (flags & NV_TX2_ERROR) {
-					if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK))
+					if ((flags & NV_TX2_RETRYERROR)
+					    && !(flags & NV_TX2_RETRYCOUNT_MASK))
 						nv_legacybackoff_reseed(dev);
 				} else {
-					dev->stats.tx_packets++;
-					dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
+					u64_stats_update_begin(&np->swstats_tx_syncp);
+					np->stat_tx_packets++;
+					np->stat_tx_bytes += np->get_tx_ctx->skb->len;
+					u64_stats_update_end(&np->swstats_tx_syncp);
 				}
+				bytes_compl += np->get_tx_ctx->skb->len;
 				dev_kfree_skb_any(np->get_tx_ctx->skb);
 				np->get_tx_ctx->skb = NULL;
 				tx_work++;
@@ -2414,6 +2512,9 @@
 		if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
 			np->get_tx_ctx = np->first_tx_ctx;
 	}
+
+	netdev_completed_queue(np->dev, tx_work, bytes_compl);
+
 	if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) {
 		np->tx_stop = 0;
 		netif_wake_queue(dev);
@@ -2427,6 +2528,7 @@
 	u32 flags;
 	int tx_work = 0;
 	struct ring_desc_ex *orig_get_tx = np->get_tx.ex;
+	unsigned long bytes_cleaned = 0;
 
 	while ((np->get_tx.ex != np->put_tx.ex) &&
 	       !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX2_VALID) &&
@@ -2436,17 +2538,21 @@
 
 		if (flags & NV_TX2_LASTPACKET) {
 			if (flags & NV_TX2_ERROR) {
-				if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) {
+				if ((flags & NV_TX2_RETRYERROR)
+				    && !(flags & NV_TX2_RETRYCOUNT_MASK)) {
 					if (np->driver_data & DEV_HAS_GEAR_MODE)
 						nv_gear_backoff_reseed(dev);
 					else
 						nv_legacybackoff_reseed(dev);
 				}
 			} else {
-				dev->stats.tx_packets++;
-				dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
+				u64_stats_update_begin(&np->swstats_tx_syncp);
+				np->stat_tx_packets++;
+				np->stat_tx_bytes += np->get_tx_ctx->skb->len;
+				u64_stats_update_end(&np->swstats_tx_syncp);
 			}
 
+			bytes_cleaned += np->get_tx_ctx->skb->len;
 			dev_kfree_skb_any(np->get_tx_ctx->skb);
 			np->get_tx_ctx->skb = NULL;
 			tx_work++;
@@ -2454,11 +2560,15 @@
 			if (np->tx_limit)
 				nv_tx_flip_ownership(dev);
 		}
+
 		if (unlikely(np->get_tx.ex++ == np->last_tx.ex))
 			np->get_tx.ex = np->first_tx.ex;
 		if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
 			np->get_tx_ctx = np->first_tx_ctx;
 	}
+
+	netdev_completed_queue(np->dev, tx_work, bytes_cleaned);
+
 	if (unlikely((np->tx_stop == 1) && (np->get_tx.ex != orig_get_tx))) {
 		np->tx_stop = 0;
 		netif_wake_queue(dev);
@@ -2477,56 +2587,64 @@
 	u32 status;
 	union ring_type put_tx;
 	int saved_tx_limit;
-	int i;
 
 	if (np->msi_flags & NV_MSI_X_ENABLED)
 		status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
 	else
 		status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
 
-	netdev_info(dev, "Got tx_timeout. irq: %08x\n", status);
+	netdev_warn(dev, "Got tx_timeout. irq status: %08x\n", status);
 
-	netdev_info(dev, "Ring at %lx\n", (unsigned long)np->ring_addr);
-	netdev_info(dev, "Dumping tx registers\n");
-	for (i = 0; i <= np->register_size; i += 32) {
-		netdev_info(dev,
-			    "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
-			    i,
-			    readl(base + i + 0), readl(base + i + 4),
-			    readl(base + i + 8), readl(base + i + 12),
-			    readl(base + i + 16), readl(base + i + 20),
-			    readl(base + i + 24), readl(base + i + 28));
-	}
-	netdev_info(dev, "Dumping tx ring\n");
-	for (i = 0; i < np->tx_ring_size; i += 4) {
-		if (!nv_optimized(np)) {
+	if (unlikely(debug_tx_timeout)) {
+		int i;
+
+		netdev_info(dev, "Ring at %lx\n", (unsigned long)np->ring_addr);
+		netdev_info(dev, "Dumping tx registers\n");
+		for (i = 0; i <= np->register_size; i += 32) {
 			netdev_info(dev,
-				    "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
+				    "%3x: %08x %08x %08x %08x "
+				    "%08x %08x %08x %08x\n",
 				    i,
-				    le32_to_cpu(np->tx_ring.orig[i].buf),
-				    le32_to_cpu(np->tx_ring.orig[i].flaglen),
-				    le32_to_cpu(np->tx_ring.orig[i+1].buf),
-				    le32_to_cpu(np->tx_ring.orig[i+1].flaglen),
-				    le32_to_cpu(np->tx_ring.orig[i+2].buf),
-				    le32_to_cpu(np->tx_ring.orig[i+2].flaglen),
-				    le32_to_cpu(np->tx_ring.orig[i+3].buf),
-				    le32_to_cpu(np->tx_ring.orig[i+3].flaglen));
-		} else {
-			netdev_info(dev,
-				    "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
-				    i,
-				    le32_to_cpu(np->tx_ring.ex[i].bufhigh),
-				    le32_to_cpu(np->tx_ring.ex[i].buflow),
-				    le32_to_cpu(np->tx_ring.ex[i].flaglen),
-				    le32_to_cpu(np->tx_ring.ex[i+1].bufhigh),
-				    le32_to_cpu(np->tx_ring.ex[i+1].buflow),
-				    le32_to_cpu(np->tx_ring.ex[i+1].flaglen),
-				    le32_to_cpu(np->tx_ring.ex[i+2].bufhigh),
-				    le32_to_cpu(np->tx_ring.ex[i+2].buflow),
-				    le32_to_cpu(np->tx_ring.ex[i+2].flaglen),
-				    le32_to_cpu(np->tx_ring.ex[i+3].bufhigh),
-				    le32_to_cpu(np->tx_ring.ex[i+3].buflow),
-				    le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
+				    readl(base + i + 0), readl(base + i + 4),
+				    readl(base + i + 8), readl(base + i + 12),
+				    readl(base + i + 16), readl(base + i + 20),
+				    readl(base + i + 24), readl(base + i + 28));
+		}
+		netdev_info(dev, "Dumping tx ring\n");
+		for (i = 0; i < np->tx_ring_size; i += 4) {
+			if (!nv_optimized(np)) {
+				netdev_info(dev,
+					    "%03x: %08x %08x // %08x %08x "
+					    "// %08x %08x // %08x %08x\n",
+					    i,
+					    le32_to_cpu(np->tx_ring.orig[i].buf),
+					    le32_to_cpu(np->tx_ring.orig[i].flaglen),
+					    le32_to_cpu(np->tx_ring.orig[i+1].buf),
+					    le32_to_cpu(np->tx_ring.orig[i+1].flaglen),
+					    le32_to_cpu(np->tx_ring.orig[i+2].buf),
+					    le32_to_cpu(np->tx_ring.orig[i+2].flaglen),
+					    le32_to_cpu(np->tx_ring.orig[i+3].buf),
+					    le32_to_cpu(np->tx_ring.orig[i+3].flaglen));
+			} else {
+				netdev_info(dev,
+					    "%03x: %08x %08x %08x "
+					    "// %08x %08x %08x "
+					    "// %08x %08x %08x "
+					    "// %08x %08x %08x\n",
+					    i,
+					    le32_to_cpu(np->tx_ring.ex[i].bufhigh),
+					    le32_to_cpu(np->tx_ring.ex[i].buflow),
+					    le32_to_cpu(np->tx_ring.ex[i].flaglen),
+					    le32_to_cpu(np->tx_ring.ex[i+1].bufhigh),
+					    le32_to_cpu(np->tx_ring.ex[i+1].buflow),
+					    le32_to_cpu(np->tx_ring.ex[i+1].flaglen),
+					    le32_to_cpu(np->tx_ring.ex[i+2].bufhigh),
+					    le32_to_cpu(np->tx_ring.ex[i+2].buflow),
+					    le32_to_cpu(np->tx_ring.ex[i+2].flaglen),
+					    le32_to_cpu(np->tx_ring.ex[i+3].bufhigh),
+					    le32_to_cpu(np->tx_ring.ex[i+3].buflow),
+					    le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
+			}
 		}
 	}
 
@@ -2649,8 +2767,11 @@
 					}
 					/* the rest are hard errors */
 					else {
-						if (flags & NV_RX_MISSEDFRAME)
-							dev->stats.rx_missed_errors++;
+						if (flags & NV_RX_MISSEDFRAME) {
+							u64_stats_update_begin(&np->swstats_rx_syncp);
+							np->stat_rx_missed_errors++;
+							u64_stats_update_end(&np->swstats_rx_syncp);
+						}
 						dev_kfree_skb(skb);
 						goto next_pkt;
 					}
@@ -2693,8 +2814,10 @@
 		skb_put(skb, len);
 		skb->protocol = eth_type_trans(skb, dev);
 		napi_gro_receive(&np->napi, skb);
-		dev->stats.rx_packets++;
-		dev->stats.rx_bytes += len;
+		u64_stats_update_begin(&np->swstats_rx_syncp);
+		np->stat_rx_packets++;
+		np->stat_rx_bytes += len;
+		u64_stats_update_end(&np->swstats_rx_syncp);
 next_pkt:
 		if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
 			np->get_rx.orig = np->first_rx.orig;
@@ -2777,8 +2900,10 @@
 				__vlan_hwaccel_put_tag(skb, vid);
 			}
 			napi_gro_receive(&np->napi, skb);
-			dev->stats.rx_packets++;
-			dev->stats.rx_bytes += len;
+			u64_stats_update_begin(&np->swstats_rx_syncp);
+			np->stat_rx_packets++;
+			np->stat_rx_bytes += len;
+			u64_stats_update_end(&np->swstats_rx_syncp);
 		} else {
 			dev_kfree_skb(skb);
 		}
@@ -3021,6 +3146,73 @@
 	}
 }
 
+static void nv_force_linkspeed(struct net_device *dev, int speed, int duplex)
+{
+	struct fe_priv *np = netdev_priv(dev);
+	u8 __iomem *base = get_hwbase(dev);
+	u32 phyreg, txreg;
+	int mii_status;
+
+	np->linkspeed = NVREG_LINKSPEED_FORCE|speed;
+	np->duplex = duplex;
+
+	/* see if gigabit phy */
+	mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
+	if (mii_status & PHY_GIGABIT) {
+		np->gigabit = PHY_GIGABIT;
+		phyreg = readl(base + NvRegSlotTime);
+		phyreg &= ~(0x3FF00);
+		if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10)
+			phyreg |= NVREG_SLOTTIME_10_100_FULL;
+		else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100)
+			phyreg |= NVREG_SLOTTIME_10_100_FULL;
+		else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000)
+			phyreg |= NVREG_SLOTTIME_1000_FULL;
+		writel(phyreg, base + NvRegSlotTime);
+	}
+
+	phyreg = readl(base + NvRegPhyInterface);
+	phyreg &= ~(PHY_HALF|PHY_100|PHY_1000);
+	if (np->duplex == 0)
+		phyreg |= PHY_HALF;
+	if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100)
+		phyreg |= PHY_100;
+	else if ((np->linkspeed & NVREG_LINKSPEED_MASK) ==
+							NVREG_LINKSPEED_1000)
+		phyreg |= PHY_1000;
+	writel(phyreg, base + NvRegPhyInterface);
+
+	if (phyreg & PHY_RGMII) {
+		if ((np->linkspeed & NVREG_LINKSPEED_MASK) ==
+							NVREG_LINKSPEED_1000)
+			txreg = NVREG_TX_DEFERRAL_RGMII_1000;
+		else
+			txreg = NVREG_TX_DEFERRAL_RGMII_10_100;
+	} else {
+		txreg = NVREG_TX_DEFERRAL_DEFAULT;
+	}
+	writel(txreg, base + NvRegTxDeferral);
+
+	if (np->desc_ver == DESC_VER_1) {
+		txreg = NVREG_TX_WM_DESC1_DEFAULT;
+	} else {
+		if ((np->linkspeed & NVREG_LINKSPEED_MASK) ==
+					 NVREG_LINKSPEED_1000)
+			txreg = NVREG_TX_WM_DESC2_3_1000;
+		else
+			txreg = NVREG_TX_WM_DESC2_3_DEFAULT;
+	}
+	writel(txreg, base + NvRegTxWatermark);
+
+	writel(NVREG_MISC1_FORCE | (np->duplex ? 0 : NVREG_MISC1_HD),
+			base + NvRegMisc1);
+	pci_push(base);
+	writel(np->linkspeed, base + NvRegLinkSpeed);
+	pci_push(base);
+
+	return;
+}
+
 /**
  * nv_update_linkspeed: Setup the MAC according to the link partner
  * @dev: Network device to be configured
@@ -3042,11 +3234,25 @@
 	int newls = np->linkspeed;
 	int newdup = np->duplex;
 	int mii_status;
+	u32 bmcr;
 	int retval = 0;
 	u32 control_1000, status_1000, phyreg, pause_flags, txreg;
 	u32 txrxFlags = 0;
 	u32 phy_exp;
 
+	/* If device loopback is enabled, set carrier on and enable max link
+	 * speed.
+	 */
+	bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
+	if (bmcr & BMCR_LOOPBACK) {
+		if (netif_running(dev)) {
+			nv_force_linkspeed(dev, NVREG_LINKSPEED_1000, 1);
+			if (!netif_carrier_ok(dev))
+				netif_carrier_on(dev);
+		}
+		return 1;
+	}
+
 	/* BMSR_LSTATUS is latched, read it twice:
 	 * we want the current value.
 	 */
@@ -3729,6 +3935,7 @@
 				writel(0, base + NvRegMSIXMap0);
 				writel(0, base + NvRegMSIXMap1);
 			}
+			netdev_info(dev, "MSI-X enabled\n");
 		}
 	}
 	if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
@@ -3750,6 +3957,7 @@
 			writel(0, base + NvRegMSIMap1);
 			/* enable msi vector 0 */
 			writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
+			netdev_info(dev, "MSI enabled\n");
 		}
 	}
 	if (ret != 0) {
@@ -3904,11 +4112,18 @@
 #endif
 
 static void nv_do_stats_poll(unsigned long data)
+	__acquires(&netdev_priv(dev)->hwstats_lock)
+	__releases(&netdev_priv(dev)->hwstats_lock)
 {
 	struct net_device *dev = (struct net_device *) data;
 	struct fe_priv *np = netdev_priv(dev);
 
-	nv_get_hw_stats(dev);
+	/* If lock is currently taken, the stats are being refreshed
+	 * and hence fresh enough */
+	if (spin_trylock(&np->hwstats_lock)) {
+		nv_update_stats(dev);
+		spin_unlock(&np->hwstats_lock);
+	}
 
 	if (!np->in_shutdown)
 		mod_timer(&np->stats_poll,
@@ -3918,9 +4133,9 @@
 static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 {
 	struct fe_priv *np = netdev_priv(dev);
-	strcpy(info->driver, DRV_NAME);
-	strcpy(info->version, FORCEDETH_VERSION);
-	strcpy(info->bus_info, pci_name(np->pci_dev));
+	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+	strlcpy(info->version, FORCEDETH_VERSION, sizeof(info->version));
+	strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
 }
 
 static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
@@ -4473,7 +4688,63 @@
 	return 0;
 }
 
-static u32 nv_fix_features(struct net_device *dev, u32 features)
+static int nv_set_loopback(struct net_device *dev, netdev_features_t features)
+{
+	struct fe_priv *np = netdev_priv(dev);
+	unsigned long flags;
+	u32 miicontrol;
+	int err, retval = 0;
+
+	spin_lock_irqsave(&np->lock, flags);
+	miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
+	if (features & NETIF_F_LOOPBACK) {
+		if (miicontrol & BMCR_LOOPBACK) {
+			spin_unlock_irqrestore(&np->lock, flags);
+			netdev_info(dev, "Loopback already enabled\n");
+			return 0;
+		}
+		nv_disable_irq(dev);
+		/* Turn on loopback mode */
+		miicontrol |= BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
+		err = mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol);
+		if (err) {
+			retval = PHY_ERROR;
+			spin_unlock_irqrestore(&np->lock, flags);
+			phy_init(dev);
+		} else {
+			if (netif_running(dev)) {
+				/* Force 1000 Mbps full-duplex */
+				nv_force_linkspeed(dev, NVREG_LINKSPEED_1000,
+									 1);
+				/* Force link up */
+				netif_carrier_on(dev);
+			}
+			spin_unlock_irqrestore(&np->lock, flags);
+			netdev_info(dev,
+				"Internal PHY loopback mode enabled.\n");
+		}
+	} else {
+		if (!(miicontrol & BMCR_LOOPBACK)) {
+			spin_unlock_irqrestore(&np->lock, flags);
+			netdev_info(dev, "Loopback already disabled\n");
+			return 0;
+		}
+		nv_disable_irq(dev);
+		/* Turn off loopback */
+		spin_unlock_irqrestore(&np->lock, flags);
+		netdev_info(dev, "Internal PHY loopback mode disabled.\n");
+		phy_init(dev);
+	}
+	msleep(500);
+	spin_lock_irqsave(&np->lock, flags);
+	nv_enable_irq(dev);
+	spin_unlock_irqrestore(&np->lock, flags);
+
+	return retval;
+}
+
+static netdev_features_t nv_fix_features(struct net_device *dev,
+	netdev_features_t features)
 {
 	/* vlan is dependent on rx checksum offload */
 	if (features & (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX))
@@ -4482,7 +4753,7 @@
 	return features;
 }
 
-static void nv_vlan_mode(struct net_device *dev, u32 features)
+static void nv_vlan_mode(struct net_device *dev, netdev_features_t features)
 {
 	struct fe_priv *np = get_nvpriv(dev);
 
@@ -4503,11 +4774,18 @@
 	spin_unlock_irq(&np->lock);
 }
 
-static int nv_set_features(struct net_device *dev, u32 features)
+static int nv_set_features(struct net_device *dev, netdev_features_t features)
 {
 	struct fe_priv *np = netdev_priv(dev);
 	u8 __iomem *base = get_hwbase(dev);
-	u32 changed = dev->features ^ features;
+	netdev_features_t changed = dev->features ^ features;
+	int retval;
+
+	if ((changed & NETIF_F_LOOPBACK) && netif_running(dev)) {
+		retval = nv_set_loopback(dev, features);
+		if (retval != 0)
+			return retval;
+	}
 
 	if (changed & NETIF_F_RXCSUM) {
 		spin_lock_irq(&np->lock);
@@ -4553,14 +4831,18 @@
 	}
 }
 
-static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *buffer)
+static void nv_get_ethtool_stats(struct net_device *dev,
+				 struct ethtool_stats *estats, u64 *buffer)
+	__acquires(&netdev_priv(dev)->hwstats_lock)
+	__releases(&netdev_priv(dev)->hwstats_lock)
 {
 	struct fe_priv *np = netdev_priv(dev);
 
-	/* update stats */
-	nv_get_hw_stats(dev);
-
-	memcpy(buffer, &np->estats, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64));
+	spin_lock_bh(&np->hwstats_lock);
+	nv_update_stats(dev);
+	memcpy(buffer, &np->estats,
+	       nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64));
+	spin_unlock_bh(&np->hwstats_lock);
 }
 
 static int nv_link_test(struct net_device *dev)
@@ -5142,6 +5424,12 @@
 
 	spin_unlock_irq(&np->lock);
 
+	/* If the loopback feature was set while the device was down, make sure
+	 * that it's set correctly now.
+	 */
+	if (dev->features & NETIF_F_LOOPBACK)
+		nv_set_loopback(dev, dev->features);
+
 	return 0;
 out_drain:
 	nv_drain_rxtx(dev);
@@ -5198,7 +5486,7 @@
 static const struct net_device_ops nv_netdev_ops = {
 	.ndo_open		= nv_open,
 	.ndo_stop		= nv_close,
-	.ndo_get_stats		= nv_get_stats,
+	.ndo_get_stats64	= nv_get_stats64,
 	.ndo_start_xmit		= nv_start_xmit,
 	.ndo_tx_timeout		= nv_tx_timeout,
 	.ndo_change_mtu		= nv_change_mtu,
@@ -5215,7 +5503,7 @@
 static const struct net_device_ops nv_netdev_ops_optimized = {
 	.ndo_open		= nv_open,
 	.ndo_stop		= nv_close,
-	.ndo_get_stats		= nv_get_stats,
+	.ndo_get_stats64	= nv_get_stats64,
 	.ndo_start_xmit		= nv_start_xmit_optimized,
 	.ndo_tx_timeout		= nv_tx_timeout,
 	.ndo_change_mtu		= nv_change_mtu,
@@ -5254,6 +5542,7 @@
 	np->dev = dev;
 	np->pci_dev = pci_dev;
 	spin_lock_init(&np->lock);
+	spin_lock_init(&np->hwstats_lock);
 	SET_NETDEV_DEV(dev, &pci_dev->dev);
 
 	init_timer(&np->oom_kick);
@@ -5262,7 +5551,7 @@
 	init_timer(&np->nic_poll);
 	np->nic_poll.data = (unsigned long) dev;
 	np->nic_poll.function = nv_do_nic_poll;	/* timer handler */
-	init_timer(&np->stats_poll);
+	init_timer_deferrable(&np->stats_poll);
 	np->stats_poll.data = (unsigned long) dev;
 	np->stats_poll.function = nv_do_stats_poll;	/* timer handler */
 
@@ -5346,6 +5635,9 @@
 
 	dev->features |= dev->hw_features;
 
+	/* Add loopback capability to the device. */
+	dev->hw_features |= NETIF_F_LOOPBACK;
+
 	np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG;
 	if ((id->driver_data & DEV_HAS_PAUSEFRAME_TX_V1) ||
 	    (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) ||
@@ -5621,12 +5913,14 @@
 	dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n",
 		 dev->name, np->phy_oui, np->phyaddr, dev->dev_addr);
 
-	dev_info(&pci_dev->dev, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n",
+	dev_info(&pci_dev->dev, "%s%s%s%s%s%s%s%s%s%s%sdesc-v%u\n",
 		 dev->features & NETIF_F_HIGHDMA ? "highdma " : "",
 		 dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ?
 			"csum " : "",
 		 dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ?
 			"vlan " : "",
+		 dev->features & (NETIF_F_LOOPBACK) ?
+			"loopback " : "",
 		 id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "",
 		 id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "",
 		 id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "",
@@ -6000,6 +6294,9 @@
 MODULE_PARM_DESC(phy_cross, "Phy crossover detection for Realtek 8201 phy is enabled by setting to 1 and disabled by setting to 0.");
 module_param(phy_power_down, int, 0);
 MODULE_PARM_DESC(phy_power_down, "Power down phy and disable link when interface is down (1), or leave phy powered up (0).");
+module_param(debug_tx_timeout, bool, 0);
+MODULE_PARM_DESC(debug_tx_timeout,
+		 "Dump tx related registers and ring when tx_timeout happens");
 
 MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
 MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
index 8c80271..ac4e72d 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
@@ -161,10 +161,10 @@
 {
 	struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 
-	strcpy(drvinfo->driver, KBUILD_MODNAME);
-	strcpy(drvinfo->version, pch_driver_version);
-	strcpy(drvinfo->fw_version, "N/A");
-	strcpy(drvinfo->bus_info, pci_name(adapter->pdev));
+	strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
+	strlcpy(drvinfo->version, pch_driver_version, sizeof(drvinfo->version));
+	strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+		sizeof(drvinfo->bus_info));
 	drvinfo->regdump_len = pch_gbe_get_regs_len(netdev);
 }
 
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 48406ca..964e9c0 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -2109,10 +2109,11 @@
  * Returns
  *	0:		HW state updated successfully
  */
-static int pch_gbe_set_features(struct net_device *netdev, u32 features)
+static int pch_gbe_set_features(struct net_device *netdev,
+	netdev_features_t features)
 {
 	struct pch_gbe_adapter *adapter = netdev_priv(netdev);
-	u32 changed = features ^ netdev->features;
+	netdev_features_t changed = features ^ netdev->features;
 
 	if (!(changed & NETIF_F_RXCSUM))
 		return 0;
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
index e09ea83..8a37198 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
@@ -83,14 +83,18 @@
 	u32 fw_minor = 0;
 	u32 fw_build = 0;
 
-	strncpy(drvinfo->driver, netxen_nic_driver_name, 32);
-	strncpy(drvinfo->version, NETXEN_NIC_LINUX_VERSIONID, 32);
+	strlcpy(drvinfo->driver, netxen_nic_driver_name,
+		sizeof(drvinfo->driver));
+	strlcpy(drvinfo->version, NETXEN_NIC_LINUX_VERSIONID,
+		sizeof(drvinfo->version));
 	fw_major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR);
 	fw_minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR);
 	fw_build = NXRD32(adapter, NETXEN_FW_VERSION_SUB);
-	sprintf(drvinfo->fw_version, "%d.%d.%d", fw_major, fw_minor, fw_build);
+	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+		"%d.%d.%d", fw_major, fw_minor, fw_build);
 
-	strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+	strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+		sizeof(drvinfo->bus_info));
 	drvinfo->regdump_len = NETXEN_NIC_REGS_LEN;
 	drvinfo->eedump_len = netxen_nic_get_eeprom_len(dev);
 }
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 8cf3173..7dd9a4b 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -544,7 +544,8 @@
 	adapter->set_multi(dev);
 }
 
-static u32 netxen_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t netxen_fix_features(struct net_device *dev,
+	netdev_features_t features)
 {
 	if (!(features & NETIF_F_RXCSUM)) {
 		netdev_info(dev, "disabling LRO as RXCSUM is off\n");
@@ -555,7 +556,8 @@
 	return features;
 }
 
-static int netxen_set_features(struct net_device *dev, u32 features)
+static int netxen_set_features(struct net_device *dev,
+	netdev_features_t features)
 {
 	struct netxen_adapter *adapter = netdev_priv(dev);
 	int hw_lro;
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index a4bdff4..7931531 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -1735,10 +1735,11 @@
 			   struct ethtool_drvinfo *drvinfo)
 {
 	struct ql3_adapter *qdev = netdev_priv(ndev);
-	strncpy(drvinfo->driver, ql3xxx_driver_name, 32);
-	strncpy(drvinfo->version, ql3xxx_driver_version, 32);
-	strncpy(drvinfo->fw_version, "N/A", 32);
-	strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32);
+	strlcpy(drvinfo->driver, ql3xxx_driver_name, sizeof(drvinfo->driver));
+	strlcpy(drvinfo->version, ql3xxx_driver_version,
+		sizeof(drvinfo->version));
+	strlcpy(drvinfo->bus_info, pci_name(qdev->pdev),
+		sizeof(drvinfo->bus_info));
 	drvinfo->regdump_len = 0;
 	drvinfo->eedump_len = 0;
 }
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 7ed53db..60976fc 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -1466,8 +1466,9 @@
 
 int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu);
 int qlcnic_change_mtu(struct net_device *netdev, int new_mtu);
-u32 qlcnic_fix_features(struct net_device *netdev, u32 features);
-int qlcnic_set_features(struct net_device *netdev, u32 features);
+netdev_features_t qlcnic_fix_features(struct net_device *netdev,
+	netdev_features_t features);
+int qlcnic_set_features(struct net_device *netdev, netdev_features_t features);
 int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable);
 int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable);
 int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 8aa1c6e..cc228cf 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -140,11 +140,14 @@
 	fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
 	fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
 	fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
-	sprintf(drvinfo->fw_version, "%d.%d.%d", fw_major, fw_minor, fw_build);
+	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+		"%d.%d.%d", fw_major, fw_minor, fw_build);
 
-	strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
-	strlcpy(drvinfo->driver, qlcnic_driver_name, 32);
-	strlcpy(drvinfo->version, QLCNIC_LINUX_VERSIONID, 32);
+	strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+		sizeof(drvinfo->bus_info));
+	strlcpy(drvinfo->driver, qlcnic_driver_name, sizeof(drvinfo->driver));
+	strlcpy(drvinfo->version, QLCNIC_LINUX_VERSIONID,
+		sizeof(drvinfo->version));
 }
 
 static int
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index bcb81e4..b528e52 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -817,12 +817,13 @@
 }
 
 
-u32 qlcnic_fix_features(struct net_device *netdev, u32 features)
+netdev_features_t qlcnic_fix_features(struct net_device *netdev,
+	netdev_features_t features)
 {
 	struct qlcnic_adapter *adapter = netdev_priv(netdev);
 
 	if ((adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
-		u32 changed = features ^ netdev->features;
+		netdev_features_t changed = features ^ netdev->features;
 		features ^= changed & (NETIF_F_ALL_CSUM | NETIF_F_RXCSUM);
 	}
 
@@ -833,10 +834,10 @@
 }
 
 
-int qlcnic_set_features(struct net_device *netdev, u32 features)
+int qlcnic_set_features(struct net_device *netdev, netdev_features_t features)
 {
 	struct qlcnic_adapter *adapter = netdev_priv(netdev);
-	u32 changed = netdev->features ^ features;
+	netdev_features_t changed = netdev->features ^ features;
 	int hw_lro = (features & NETIF_F_LRO) ? QLCNIC_LRO_ENABLED : 0;
 
 	if (!(changed & NETIF_F_LRO))
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 0bd1638..69b8e4e 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -97,8 +97,8 @@
 static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
 static void qlcnic_set_netdev_features(struct qlcnic_adapter *,
 				struct qlcnic_esw_func_cfg *);
-static void qlcnic_vlan_rx_add(struct net_device *, u16);
-static void qlcnic_vlan_rx_del(struct net_device *, u16);
+static int qlcnic_vlan_rx_add(struct net_device *, u16);
+static int qlcnic_vlan_rx_del(struct net_device *, u16);
 
 /*  PCI Device ID Table  */
 #define ENTRY(device) \
@@ -735,20 +735,22 @@
 		adapter->pvid = 0;
 }
 
-static void
+static int
 qlcnic_vlan_rx_add(struct net_device *netdev, u16 vid)
 {
 	struct qlcnic_adapter *adapter = netdev_priv(netdev);
 	set_bit(vid, adapter->vlans);
+	return 0;
 }
 
-static void
+static int
 qlcnic_vlan_rx_del(struct net_device *netdev, u16 vid)
 {
 	struct qlcnic_adapter *adapter = netdev_priv(netdev);
 
 	qlcnic_restore_indev_addr(netdev, NETDEV_DOWN);
 	clear_bit(vid, adapter->vlans);
+	return 0;
 }
 
 static void
@@ -792,7 +794,7 @@
 		struct qlcnic_esw_func_cfg *esw_cfg)
 {
 	struct net_device *netdev = adapter->netdev;
-	unsigned long features, vlan_features;
+	netdev_features_t features, vlan_features;
 
 	features = (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
 			NETIF_F_IPV6_CSUM | NETIF_F_GRO);
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
index 9b67bfe..8e2c2a7 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
@@ -366,13 +366,16 @@
 			   struct ethtool_drvinfo *drvinfo)
 {
 	struct ql_adapter *qdev = netdev_priv(ndev);
-	strncpy(drvinfo->driver, qlge_driver_name, 32);
-	strncpy(drvinfo->version, qlge_driver_version, 32);
-	snprintf(drvinfo->fw_version, 32, "v%d.%d.%d",
+	strlcpy(drvinfo->driver, qlge_driver_name, sizeof(drvinfo->driver));
+	strlcpy(drvinfo->version, qlge_driver_version,
+		sizeof(drvinfo->version));
+	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+		 "v%d.%d.%d",
 		 (qdev->fw_rev_id & 0x00ff0000) >> 16,
 		 (qdev->fw_rev_id & 0x0000ff00) >> 8,
 		 (qdev->fw_rev_id & 0x000000ff));
-	strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32);
+	strlcpy(drvinfo->bus_info, pci_name(qdev->pdev),
+		sizeof(drvinfo->bus_info));
 	drvinfo->n_stats = 0;
 	drvinfo->testinfo_len = 0;
 	if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index c92afcd..b548987 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -2307,7 +2307,7 @@
 	return work_done;
 }
 
-static void qlge_vlan_mode(struct net_device *ndev, u32 features)
+static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
 {
 	struct ql_adapter *qdev = netdev_priv(ndev);
 
@@ -2323,7 +2323,8 @@
 	}
 }
 
-static u32 qlge_fix_features(struct net_device *ndev, u32 features)
+static netdev_features_t qlge_fix_features(struct net_device *ndev,
+	netdev_features_t features)
 {
 	/*
 	 * Since there is no support for separate rx/tx vlan accel
@@ -2337,9 +2338,10 @@
 	return features;
 }
 
-static int qlge_set_features(struct net_device *ndev, u32 features)
+static int qlge_set_features(struct net_device *ndev,
+	netdev_features_t features)
 {
-	u32 changed = ndev->features ^ features;
+	netdev_features_t changed = ndev->features ^ features;
 
 	if (changed & NETIF_F_HW_VLAN_RX)
 		qlge_vlan_mode(ndev, features);
@@ -2347,56 +2349,66 @@
 	return 0;
 }
 
-static void __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
+static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
 {
 	u32 enable_bit = MAC_ADDR_E;
+	int err;
 
-	if (ql_set_mac_addr_reg
-	    (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
+	err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
+				  MAC_ADDR_TYPE_VLAN, vid);
+	if (err)
 		netif_err(qdev, ifup, qdev->ndev,
 			  "Failed to init vlan address.\n");
-	}
+	return err;
 }
 
-static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
+static int qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
 {
 	struct ql_adapter *qdev = netdev_priv(ndev);
 	int status;
+	int err;
 
 	status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
 	if (status)
-		return;
+		return status;
 
-	__qlge_vlan_rx_add_vid(qdev, vid);
+	err = __qlge_vlan_rx_add_vid(qdev, vid);
 	set_bit(vid, qdev->active_vlans);
 
 	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+
+	return err;
 }
 
-static void __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
+static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
 {
 	u32 enable_bit = 0;
+	int err;
 
-	if (ql_set_mac_addr_reg
-	    (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
+	err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
+				  MAC_ADDR_TYPE_VLAN, vid);
+	if (err)
 		netif_err(qdev, ifup, qdev->ndev,
 			  "Failed to clear vlan address.\n");
-	}
+	return err;
 }
 
-static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
+static int qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
 {
 	struct ql_adapter *qdev = netdev_priv(ndev);
 	int status;
+	int err;
 
 	status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
 	if (status)
-		return;
+		return status;
 
-	__qlge_vlan_rx_kill_vid(qdev, vid);
+	err = __qlge_vlan_rx_kill_vid(qdev, vid);
 	clear_bit(vid, qdev->active_vlans);
 
 	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+
+	return err;
 }
 
 static void qlge_restore_vlan(struct ql_adapter *qdev)
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index 4bf68cf..87aa439 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -52,12 +52,6 @@
 #define DRV_VERSION	"0.28"
 #define DRV_RELDATE	"07Oct2011"
 
-/* PHY CHIP Address */
-#define PHY1_ADDR	1	/* For MAC1 */
-#define PHY2_ADDR	3	/* For MAC2 */
-#define PHY_MODE	0x3100	/* PHY CHIP Register 0 */
-#define PHY_CAP		0x01E1	/* PHY CHIP Register 4 */
-
 /* Time in jiffies before concluding the transmitter is hung. */
 #define TX_TIMEOUT	(6000 * HZ / 1000)
 
@@ -69,8 +63,11 @@
 
 /* MAC registers */
 #define MCR0		0x00	/* Control register 0 */
+#define  MCR0_RCVEN	0x0002	/* Receive enable */
 #define  MCR0_PROMISC	0x0020	/* Promiscuous mode */
 #define  MCR0_HASH_EN	0x0100	/* Enable multicast hash table function */
+#define  MCR0_XMTEN	0x1000	/* Transmission enable */
+#define  MCR0_FD	0x8000	/* Full/Half duplex */
 #define MCR1		0x04	/* Control register 1 */
 #define  MAC_RST	0x0001	/* Reset the MAC */
 #define MBCR		0x08	/* Bus control */
@@ -129,6 +126,7 @@
 #define PHY_CC		0x88	/* PHY status change configuration register */
 #define PHY_ST		0x8A	/* PHY status register */
 #define MAC_SM		0xAC	/* MAC status machine */
+#define  MAC_SM_RST	0x0002	/* MAC status machine reset */
 #define MAC_ID		0xBE	/* Identifier register */
 
 #define TX_DCNT		0x80	/* TX descriptor count */
@@ -154,9 +152,6 @@
 #define DSC_RX_MIDH_HIT	0x0004	/* RX MID table hit (no error) */
 #define DSC_RX_IDX_MID_MASK 3	/* RX mask for the index of matched MIDx */
 
-/* PHY settings */
-#define ICPLUS_PHY_ID	0x0243
-
 MODULE_AUTHOR("Sten Wang <sten.wang@rdc.com.tw>,"
 	"Daniel Gimpelevich <daniel@gimpelevich.san-francisco.ca.us>,"
 	"Florian Fainelli <florian@openwrt.org>");
@@ -178,7 +173,7 @@
 	struct r6040_descriptor *vndescp;	/* 14-17 */
 	struct sk_buff *skb_ptr;	/* 18-1B */
 	u32	rev2;			/* 1C-1F */
-} __attribute__((aligned(32)));
+} __aligned(32);
 
 struct r6040_private {
 	spinlock_t lock;		/* driver lock */
@@ -191,7 +186,7 @@
 	struct r6040_descriptor *tx_ring;
 	dma_addr_t rx_ring_dma;
 	dma_addr_t tx_ring_dma;
-	u16	tx_free_desc, phy_addr;
+	u16	tx_free_desc;
 	u16	mcr0, mcr1;
 	struct net_device *dev;
 	struct mii_bus *mii_bus;
@@ -206,8 +201,6 @@
 	": RDC R6040 NAPI net driver,"
 	"version "DRV_VERSION " (" DRV_RELDATE ")";
 
-static int phy_table[] = { PHY1_ADDR, PHY2_ADDR };
-
 /* Read a word data from PHY Chip */
 static int r6040_phy_read(void __iomem *ioaddr, int phy_addr, int reg)
 {
@@ -379,11 +372,11 @@
 	iowrite16(MAC_RST, ioaddr + MCR1);
 	while (limit--) {
 		cmd = ioread16(ioaddr + MCR1);
-		if (cmd & 0x1)
+		if (cmd & MAC_RST)
 			break;
 	}
 	/* Reset internal state machine */
-	iowrite16(2, ioaddr + MAC_SM);
+	iowrite16(MAC_SM_RST, ioaddr + MAC_SM);
 	iowrite16(0, ioaddr + MAC_SM);
 	mdelay(5);
 
@@ -409,7 +402,7 @@
 	iowrite16(INT_MASK, ioaddr + MIER);
 
 	/* Enable TX and RX */
-	iowrite16(lp->mcr0 | 0x0002, ioaddr);
+	iowrite16(lp->mcr0 | MCR0_RCVEN, ioaddr);
 
 	/* Let TX poll the descriptors
 	 * we may got called by r6040_tx_timeout which has left
@@ -461,7 +454,7 @@
 	iowrite16(MAC_RST, ioaddr + MCR1);	/* Reset RDC MAC */
 	while (limit--) {
 		cmd = ioread16(ioaddr + MCR1);
-		if (cmd & 0x1)
+		if (cmd & MAC_RST)
 			break;
 	}
 
@@ -742,9 +735,10 @@
 	void __iomem *ioaddr = lp->base;
 	u16 *adrp;
 
-	/* MAC operation register */
-	iowrite16(0x01, ioaddr + MCR1); /* Reset MAC */
-	iowrite16(2, ioaddr + MAC_SM); /* Reset internal state machine */
+	/* Reset MAC */
+	iowrite16(MAC_RST, ioaddr + MCR1);
+	/* Reset internal state machine */
+	iowrite16(MAC_SM_RST, ioaddr + MAC_SM);
 	iowrite16(0, ioaddr + MAC_SM);
 	mdelay(5);
 
@@ -1013,7 +1007,7 @@
 
 	/* reflect duplex change */
 	if (phydev->link && (lp->old_duplex != phydev->duplex)) {
-		lp->mcr0 |= (phydev->duplex == DUPLEX_FULL ? 0x8000 : 0);
+		lp->mcr0 |= (phydev->duplex == DUPLEX_FULL ? MCR0_FD : 0);
 		iowrite16(lp->mcr0, ioaddr);
 
 		status_changed = 1;
@@ -1166,8 +1160,7 @@
 	lp->dev = dev;
 
 	/* Init RDC private data */
-	lp->mcr0 = 0x1002;
-	lp->phy_addr = phy_table[card_idx];
+	lp->mcr0 = MCR0_XMTEN | MCR0;
 
 	/* The RDC-specific entries in the device structure. */
 	dev->netdev_ops = &r6040_netdev_ops;
@@ -1188,7 +1181,8 @@
 	lp->mii_bus->write = r6040_mdiobus_write;
 	lp->mii_bus->reset = r6040_mdiobus_reset;
 	lp->mii_bus->name = "r6040_eth_mii";
-	snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%x", card_idx);
+	snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
+		dev_name(&pdev->dev), card_idx);
 	lp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
 	if (!lp->mii_bus->irq) {
 		dev_err(&pdev->dev, "mii_bus irq allocation failed\n");
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index ee5da92..cc6b391 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -859,7 +859,6 @@
 	struct cp_private *cp = netdev_priv(dev);
 	u32 mc_filter[2];	/* Multicast hash filter */
 	int rx_mode;
-	u32 tmp;
 
 	/* Note: do not reorder, GCC is clever about common statements. */
 	if (dev->flags & IFF_PROMISC) {
@@ -886,11 +885,9 @@
 	}
 
 	/* We can safely update without stopping the chip. */
-	tmp = cp_rx_config | rx_mode;
-	if (cp->rx_config != tmp) {
-		cpw32_f (RxConfig, tmp);
-		cp->rx_config = tmp;
-	}
+	cp->rx_config = cp_rx_config | rx_mode;
+	cpw32_f(RxConfig, cp->rx_config);
+
 	cpw32_f (MAR0 + 0, mc_filter[0]);
 	cpw32_f (MAR0 + 4, mc_filter[1]);
 }
@@ -1319,9 +1316,9 @@
 {
 	struct cp_private *cp = netdev_priv(dev);
 
-	strcpy (info->driver, DRV_NAME);
-	strcpy (info->version, DRV_VERSION);
-	strcpy (info->bus_info, pci_name(cp->pdev));
+	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+	strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
 }
 
 static void cp_get_ringparam(struct net_device *dev,
@@ -1392,7 +1389,7 @@
 	cp->msg_enable = value;
 }
 
-static int cp_set_features(struct net_device *dev, u32 features)
+static int cp_set_features(struct net_device *dev, netdev_features_t features)
 {
 	struct cp_private *cp = netdev_priv(dev);
 	unsigned long flags;
@@ -1589,7 +1586,7 @@
    No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
  */
 
-#define eeprom_delay()	readl(ee_addr)
+#define eeprom_delay()	readb(ee_addr)
 
 /* The EEPROM commands include the alway-set leading bit. */
 #define EE_EXTEND_CMD	(4)
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 4d6b254..a8779be 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -1122,7 +1122,7 @@
    No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
  */
 
-#define eeprom_delay()	(void)RTL_R32(Cfg9346)
+#define eeprom_delay()	(void)RTL_R8(Cfg9346)
 
 /* The EEPROM commands include the alway-set leading bit. */
 #define EE_WRITE_CMD	(5)
@@ -2330,9 +2330,9 @@
 static void rtl8139_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 {
 	struct rtl8139_private *tp = netdev_priv(dev);
-	strcpy(info->driver, DRV_NAME);
-	strcpy(info->version, DRV_VERSION);
-	strcpy(info->bus_info, pci_name(tp->pci_dev));
+	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+	strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
 	info->regdump_len = tp->regs_len;
 }
 
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 67bf078..7a0c800 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -69,9 +69,6 @@
    The RTL chips use a 64 element hash table based on the Ethernet CRC. */
 static const int multicast_filter_limit = 32;
 
-/* MAC address length */
-#define MAC_ADDR_LEN	6
-
 #define MAX_READ_REQUEST_SHIFT	12
 #define TX_DMA_BURST	6	/* Maximum PCI burst, '6' is 1024 */
 #define SafeMtu		0x1c20	/* ... actually life sucks beyond ~7k */
@@ -477,7 +474,6 @@
 	/* Config1 register p.24 */
 	LEDS1		= (1 << 7),
 	LEDS0		= (1 << 6),
-	MSIEnable	= (1 << 5),	/* Enable Message Signaled Interrupt */
 	Speed_down	= (1 << 4),
 	MEMMAP		= (1 << 3),
 	IOMAP		= (1 << 2),
@@ -485,6 +481,7 @@
 	PMEnable	= (1 << 0),	/* Power Management Enable */
 
 	/* Config2 register p. 25 */
+	MSIEnable	= (1 << 5),	/* 8169 only. Reserved in the 8168. */
 	PCI_Clock_66MHz = 0x01,
 	PCI_Clock_33MHz = 0x00,
 
@@ -1406,12 +1403,13 @@
 	struct rtl8169_private *tp = netdev_priv(dev);
 	struct rtl_fw *rtl_fw = tp->rtl_fw;
 
-	strcpy(info->driver, MODULENAME);
-	strcpy(info->version, RTL8169_VERSION);
-	strcpy(info->bus_info, pci_name(tp->pci_dev));
+	strlcpy(info->driver, MODULENAME, sizeof(info->driver));
+	strlcpy(info->version, RTL8169_VERSION, sizeof(info->version));
+	strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
 	BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version));
-	strcpy(info->fw_version, IS_ERR_OR_NULL(rtl_fw) ? "N/A" :
-	       rtl_fw->version);
+	if (!IS_ERR_OR_NULL(rtl_fw))
+		strlcpy(info->fw_version, rtl_fw->version,
+			sizeof(info->fw_version));
 }
 
 static int rtl8169_get_regs_len(struct net_device *dev)
@@ -1555,7 +1553,8 @@
 	return ret;
 }
 
-static u32 rtl8169_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t rtl8169_fix_features(struct net_device *dev,
+	netdev_features_t features)
 {
 	struct rtl8169_private *tp = netdev_priv(dev);
 
@@ -1569,7 +1568,8 @@
 	return features;
 }
 
-static int rtl8169_set_features(struct net_device *dev, u32 features)
+static int rtl8169_set_features(struct net_device *dev,
+	netdev_features_t features)
 {
 	struct rtl8169_private *tp = netdev_priv(dev);
 	void __iomem *ioaddr = tp->mmio_addr;
@@ -3426,22 +3426,24 @@
 };
 
 /* Cfg9346_Unlock assumed. */
-static unsigned rtl_try_msi(struct pci_dev *pdev, void __iomem *ioaddr,
+static unsigned rtl_try_msi(struct rtl8169_private *tp,
 			    const struct rtl_cfg_info *cfg)
 {
+	void __iomem *ioaddr = tp->mmio_addr;
 	unsigned msi = 0;
 	u8 cfg2;
 
 	cfg2 = RTL_R8(Config2) & ~MSIEnable;
 	if (cfg->features & RTL_FEATURE_MSI) {
-		if (pci_enable_msi(pdev)) {
-			dev_info(&pdev->dev, "no MSI. Back to INTx.\n");
+		if (pci_enable_msi(tp->pci_dev)) {
+			netif_info(tp, hw, tp->dev, "no MSI. Back to INTx.\n");
 		} else {
 			cfg2 |= MSIEnable;
 			msi = RTL_FEATURE_MSI;
 		}
 	}
-	RTL_W8(Config2, cfg2);
+	if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
+		RTL_W8(Config2, cfg2);
 	return msi;
 }
 
@@ -4077,7 +4079,7 @@
 		tp->features |= RTL_FEATURE_WOL;
 	if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
 		tp->features |= RTL_FEATURE_WOL;
-	tp->features |= rtl_try_msi(pdev, ioaddr, cfg);
+	tp->features |= rtl_try_msi(tp, cfg);
 	RTL_W8(Cfg9346, Cfg9346_Lock);
 
 	if (rtl_tbi_enabled(tp)) {
@@ -4099,7 +4101,7 @@
 	spin_lock_init(&tp->lock);
 
 	/* Get MAC address */
-	for (i = 0; i < MAC_ADDR_LEN; i++)
+	for (i = 0; i < ETH_ALEN; i++)
 		dev->dev_addr[i] = RTL_R8(MAC0 + i);
 	memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
 
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 9b23074..fc9bda9 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1369,13 +1369,13 @@
 	}
 }
 
-static struct ethtool_ops sh_eth_ethtool_ops = {
+static const struct ethtool_ops sh_eth_ethtool_ops = {
 	.get_settings	= sh_eth_get_settings,
 	.set_settings	= sh_eth_set_settings,
-	.nway_reset		= sh_eth_nway_reset,
+	.nway_reset	= sh_eth_nway_reset,
 	.get_msglevel	= sh_eth_get_msglevel,
 	.set_msglevel	= sh_eth_set_msglevel,
-	.get_link		= ethtool_op_get_link,
+	.get_link	= ethtool_op_get_link,
 	.get_strings	= sh_eth_get_strings,
 	.get_ethtool_stats  = sh_eth_get_ethtool_stats,
 	.get_sset_count     = sh_eth_get_sset_count,
@@ -1957,18 +1957,7 @@
 	},
 };
 
-static int __init sh_eth_init(void)
-{
-	return platform_driver_register(&sh_eth_driver);
-}
-
-static void __exit sh_eth_cleanup(void)
-{
-	platform_driver_unregister(&sh_eth_driver);
-}
-
-module_init(sh_eth_init);
-module_exit(sh_eth_cleanup);
+module_platform_driver(sh_eth_driver);
 
 MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
 MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index c3673f1..f955a19 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -834,23 +834,7 @@
 	}
 };
 
-static int __init sgiseeq_module_init(void)
-{
-	if (platform_driver_register(&sgiseeq_driver)) {
-		printk(KERN_ERR "Driver registration failed\n");
-		return -ENODEV;
-	}
-
-	return 0;
-}
-
-static void __exit sgiseeq_module_exit(void)
-{
-	platform_driver_unregister(&sgiseeq_driver);
-}
-
-module_init(sgiseeq_module_init);
-module_exit(sgiseeq_module_exit);
+module_platform_driver(sgiseeq_driver);
 
 MODULE_DESCRIPTION("SGI Seeq 8003 driver");
 MODULE_AUTHOR("Linux/MIPS Mailing List <linux-mips@linux-mips.org>");
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index d5731f1..e43702f 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -1336,7 +1336,8 @@
 	if (efx->n_channels > 1)
 		get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key));
 	for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
-		efx->rx_indir_table[i] = i % efx->n_rx_channels;
+		efx->rx_indir_table[i] =
+			ethtool_rxfh_indir_default(i, efx->n_rx_channels);
 
 	efx_set_channels(efx);
 	netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
@@ -1900,7 +1901,7 @@
 	/* Otherwise efx_start_port() will do this */
 }
 
-static int efx_set_features(struct net_device *net_dev, u32 data)
+static int efx_set_features(struct net_device *net_dev, netdev_features_t data)
 {
 	struct efx_nic *efx = netdev_priv(net_dev);
 
@@ -2235,9 +2236,9 @@
 	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE,
 		    PCI_DEVICE_ID_SOLARFLARE_SFC4000B),
 	 .driver_data = (unsigned long) &falcon_b0_nic_type},
-	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, BETHPAGE_A_P_DEVID),
+	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0803),	/* SFC9020 */
 	 .driver_data = (unsigned long) &siena_a0_nic_type},
-	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, SIENA_A_P_DEVID),
+	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0813),	/* SFL9021 */
 	 .driver_data = (unsigned long) &siena_a0_nic_type},
 	{0}			/* end of list */
 };
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 4764793..a3541ac 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -14,10 +14,6 @@
 #include "net_driver.h"
 #include "filter.h"
 
-/* PCI IDs */
-#define BETHPAGE_A_P_DEVID      0x0803
-#define SIENA_A_P_DEVID         0x0813
-
 /* Solarstorm controllers use BAR 0 for I/O space and BAR 2(&3) for memory */
 #define EFX_MEM_BAR 2
 
@@ -65,13 +61,23 @@
 extern int efx_probe_filters(struct efx_nic *efx);
 extern void efx_restore_filters(struct efx_nic *efx);
 extern void efx_remove_filters(struct efx_nic *efx);
-extern int efx_filter_insert_filter(struct efx_nic *efx,
+extern s32 efx_filter_insert_filter(struct efx_nic *efx,
 				    struct efx_filter_spec *spec,
 				    bool replace);
-extern int efx_filter_remove_filter(struct efx_nic *efx,
-				    struct efx_filter_spec *spec);
+extern int efx_filter_remove_id_safe(struct efx_nic *efx,
+				     enum efx_filter_priority priority,
+				     u32 filter_id);
+extern int efx_filter_get_filter_safe(struct efx_nic *efx,
+				      enum efx_filter_priority priority,
+				      u32 filter_id, struct efx_filter_spec *);
 extern void efx_filter_clear_rx(struct efx_nic *efx,
 				enum efx_filter_priority priority);
+extern u32 efx_filter_count_rx_used(struct efx_nic *efx,
+				    enum efx_filter_priority priority);
+extern u32 efx_filter_get_rx_id_limit(struct efx_nic *efx);
+extern s32 efx_filter_get_rx_ids(struct efx_nic *efx,
+				 enum efx_filter_priority priority,
+				 u32 *buf, u32 size);
 #ifdef CONFIG_RFS_ACCEL
 extern int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
 			  u16 rxq_index, u32 flow_id);
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index f3cd96d..29b2ebf 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -818,9 +818,58 @@
 	return efx_reset(efx, rc);
 }
 
+static int efx_ethtool_get_class_rule(struct efx_nic *efx,
+				      struct ethtool_rx_flow_spec *rule)
+{
+	struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec;
+	struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec;
+	struct efx_filter_spec spec;
+	u16 vid;
+	u8 proto;
+	int rc;
+
+	rc = efx_filter_get_filter_safe(efx, EFX_FILTER_PRI_MANUAL,
+					rule->location, &spec);
+	if (rc)
+		return rc;
+
+	if (spec.dmaq_id == 0xfff)
+		rule->ring_cookie = RX_CLS_FLOW_DISC;
+	else
+		rule->ring_cookie = spec.dmaq_id;
+
+	rc = efx_filter_get_eth_local(&spec, &vid,
+				      rule->h_u.ether_spec.h_dest);
+	if (rc == 0) {
+		rule->flow_type = ETHER_FLOW;
+		memset(rule->m_u.ether_spec.h_dest, ~0, ETH_ALEN);
+		if (vid != EFX_FILTER_VID_UNSPEC) {
+			rule->flow_type |= FLOW_EXT;
+			rule->h_ext.vlan_tci = htons(vid);
+			rule->m_ext.vlan_tci = htons(0xfff);
+		}
+		return 0;
+	}
+
+	rc = efx_filter_get_ipv4_local(&spec, &proto,
+				       &ip_entry->ip4dst, &ip_entry->pdst);
+	if (rc != 0) {
+		rc = efx_filter_get_ipv4_full(
+			&spec, &proto, &ip_entry->ip4src, &ip_entry->psrc,
+			&ip_entry->ip4dst, &ip_entry->pdst);
+		EFX_WARN_ON_PARANOID(rc);
+		ip_mask->ip4src = ~0;
+		ip_mask->psrc = ~0;
+	}
+	rule->flow_type = (proto == IPPROTO_TCP) ? TCP_V4_FLOW : UDP_V4_FLOW;
+	ip_mask->ip4dst = ~0;
+	ip_mask->pdst = ~0;
+	return rc;
+}
+
 static int
 efx_ethtool_get_rxnfc(struct net_device *net_dev,
-		      struct ethtool_rxnfc *info, u32 *rules __always_unused)
+		      struct ethtool_rxnfc *info, u32 *rule_locs)
 {
 	struct efx_nic *efx = netdev_priv(net_dev);
 
@@ -862,42 +911,80 @@
 		return 0;
 	}
 
+	case ETHTOOL_GRXCLSRLCNT:
+		info->data = efx_filter_get_rx_id_limit(efx);
+		if (info->data == 0)
+			return -EOPNOTSUPP;
+		info->data |= RX_CLS_LOC_SPECIAL;
+		info->rule_cnt =
+			efx_filter_count_rx_used(efx, EFX_FILTER_PRI_MANUAL);
+		return 0;
+
+	case ETHTOOL_GRXCLSRULE:
+		if (efx_filter_get_rx_id_limit(efx) == 0)
+			return -EOPNOTSUPP;
+		return efx_ethtool_get_class_rule(efx, &info->fs);
+
+	case ETHTOOL_GRXCLSRLALL: {
+		s32 rc;
+		info->data = efx_filter_get_rx_id_limit(efx);
+		if (info->data == 0)
+			return -EOPNOTSUPP;
+		rc = efx_filter_get_rx_ids(efx, EFX_FILTER_PRI_MANUAL,
+					   rule_locs, info->rule_cnt);
+		if (rc < 0)
+			return rc;
+		info->rule_cnt = rc;
+		return 0;
+	}
+
 	default:
 		return -EOPNOTSUPP;
 	}
 }
 
-static int efx_ethtool_set_rx_ntuple(struct net_device *net_dev,
-				     struct ethtool_rx_ntuple *ntuple)
+static int efx_ethtool_set_class_rule(struct efx_nic *efx,
+				      struct ethtool_rx_flow_spec *rule)
 {
-	struct efx_nic *efx = netdev_priv(net_dev);
-	struct ethtool_tcpip4_spec *ip_entry = &ntuple->fs.h_u.tcp_ip4_spec;
-	struct ethtool_tcpip4_spec *ip_mask = &ntuple->fs.m_u.tcp_ip4_spec;
-	struct ethhdr *mac_entry = &ntuple->fs.h_u.ether_spec;
-	struct ethhdr *mac_mask = &ntuple->fs.m_u.ether_spec;
-	struct efx_filter_spec filter;
+	struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec;
+	struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec;
+	struct ethhdr *mac_entry = &rule->h_u.ether_spec;
+	struct ethhdr *mac_mask = &rule->m_u.ether_spec;
+	struct efx_filter_spec spec;
 	int rc;
 
-	/* Range-check action */
-	if (ntuple->fs.action < ETHTOOL_RXNTUPLE_ACTION_CLEAR ||
-	    ntuple->fs.action >= (s32)efx->n_rx_channels)
+	/* Check that user wants us to choose the location */
+	if (rule->location != RX_CLS_LOC_ANY &&
+	    rule->location != RX_CLS_LOC_FIRST &&
+	    rule->location != RX_CLS_LOC_LAST)
 		return -EINVAL;
 
-	if (~ntuple->fs.data_mask)
+	/* Range-check ring_cookie */
+	if (rule->ring_cookie >= efx->n_rx_channels &&
+	    rule->ring_cookie != RX_CLS_FLOW_DISC)
 		return -EINVAL;
 
-	efx_filter_init_rx(&filter, EFX_FILTER_PRI_MANUAL, 0,
-			   (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP) ?
-			   0xfff : ntuple->fs.action);
+	/* Check for unsupported extensions */
+	if ((rule->flow_type & FLOW_EXT) &&
+	    (rule->m_ext.vlan_etype | rule->m_ext.data[0] |
+	     rule->m_ext.data[1]))
+		return -EINVAL;
 
-	switch (ntuple->fs.flow_type) {
+	efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL,
+			   (rule->location == RX_CLS_LOC_FIRST) ?
+			   EFX_FILTER_FLAG_RX_OVERRIDE_IP : 0,
+			   (rule->ring_cookie == RX_CLS_FLOW_DISC) ?
+			   0xfff : rule->ring_cookie);
+
+	switch (rule->flow_type) {
 	case TCP_V4_FLOW:
 	case UDP_V4_FLOW: {
-		u8 proto = (ntuple->fs.flow_type == TCP_V4_FLOW ?
+		u8 proto = (rule->flow_type == TCP_V4_FLOW ?
 			    IPPROTO_TCP : IPPROTO_UDP);
 
 		/* Must match all of destination, */
-		if (ip_mask->ip4dst | ip_mask->pdst)
+		if ((__force u32)~ip_mask->ip4dst |
+		    (__force u16)~ip_mask->pdst)
 			return -EINVAL;
 		/* all or none of source, */
 		if ((ip_mask->ip4src | ip_mask->psrc) &&
@@ -905,17 +992,17 @@
 		     (__force u16)~ip_mask->psrc))
 			return -EINVAL;
 		/* and nothing else */
-		if ((u8)~ip_mask->tos | (u16)~ntuple->fs.vlan_tag_mask)
+		if (ip_mask->tos | rule->m_ext.vlan_tci)
 			return -EINVAL;
 
-		if (!ip_mask->ip4src)
-			rc = efx_filter_set_ipv4_full(&filter, proto,
+		if (ip_mask->ip4src)
+			rc = efx_filter_set_ipv4_full(&spec, proto,
 						      ip_entry->ip4dst,
 						      ip_entry->pdst,
 						      ip_entry->ip4src,
 						      ip_entry->psrc);
 		else
-			rc = efx_filter_set_ipv4_local(&filter, proto,
+			rc = efx_filter_set_ipv4_local(&spec, proto,
 						       ip_entry->ip4dst,
 						       ip_entry->pdst);
 		if (rc)
@@ -923,23 +1010,24 @@
 		break;
 	}
 
-	case ETHER_FLOW:
-		/* Must match all of destination, */
-		if (!is_zero_ether_addr(mac_mask->h_dest))
+	case ETHER_FLOW | FLOW_EXT:
+		/* Must match all or none of VID */
+		if (rule->m_ext.vlan_tci != htons(0xfff) &&
+		    rule->m_ext.vlan_tci != 0)
 			return -EINVAL;
-		/* all or none of VID, */
-		if (ntuple->fs.vlan_tag_mask != 0xf000 &&
-		    ntuple->fs.vlan_tag_mask != 0xffff)
+	case ETHER_FLOW:
+		/* Must match all of destination */
+		if (!is_broadcast_ether_addr(mac_mask->h_dest))
 			return -EINVAL;
 		/* and nothing else */
-		if (!is_broadcast_ether_addr(mac_mask->h_source) ||
-		    mac_mask->h_proto != htons(0xffff))
+		if (!is_zero_ether_addr(mac_mask->h_source) ||
+		    mac_mask->h_proto)
 			return -EINVAL;
 
 		rc = efx_filter_set_eth_local(
-			&filter,
-			(ntuple->fs.vlan_tag_mask == 0xf000) ?
-			ntuple->fs.vlan_tag : EFX_FILTER_VID_UNSPEC,
+			&spec,
+			(rule->flow_type & FLOW_EXT && rule->m_ext.vlan_tci) ?
+			ntohs(rule->h_ext.vlan_tci) : EFX_FILTER_VID_UNSPEC,
 			mac_entry->h_dest);
 		if (rc)
 			return rc;
@@ -949,47 +1037,57 @@
 		return -EINVAL;
 	}
 
-	if (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_CLEAR)
-		return efx_filter_remove_filter(efx, &filter);
+	rc = efx_filter_insert_filter(efx, &spec, true);
+	if (rc < 0)
+		return rc;
 
-	rc = efx_filter_insert_filter(efx, &filter, true);
-	return rc < 0 ? rc : 0;
+	rule->location = rc;
+	return 0;
 }
 
-static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev,
-				      struct ethtool_rxfh_indir *indir)
+static int efx_ethtool_set_rxnfc(struct net_device *net_dev,
+				 struct ethtool_rxnfc *info)
 {
 	struct efx_nic *efx = netdev_priv(net_dev);
-	size_t copy_size =
-		min_t(size_t, indir->size, ARRAY_SIZE(efx->rx_indir_table));
 
-	if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
+	if (efx_filter_get_rx_id_limit(efx) == 0)
 		return -EOPNOTSUPP;
 
-	indir->size = ARRAY_SIZE(efx->rx_indir_table);
-	memcpy(indir->ring_index, efx->rx_indir_table,
-	       copy_size * sizeof(indir->ring_index[0]));
+	switch (info->cmd) {
+	case ETHTOOL_SRXCLSRLINS:
+		return efx_ethtool_set_class_rule(efx, &info->fs);
+
+	case ETHTOOL_SRXCLSRLDEL:
+		return efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_MANUAL,
+						 info->fs.location);
+
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+
+	return (efx_nic_rev(efx) < EFX_REV_FALCON_B0 ?
+		0 : ARRAY_SIZE(efx->rx_indir_table));
+}
+
+static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev, u32 *indir)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+
+	memcpy(indir, efx->rx_indir_table, sizeof(efx->rx_indir_table));
 	return 0;
 }
 
 static int efx_ethtool_set_rxfh_indir(struct net_device *net_dev,
-				      const struct ethtool_rxfh_indir *indir)
+				      const u32 *indir)
 {
 	struct efx_nic *efx = netdev_priv(net_dev);
-	size_t i;
 
-	if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
-		return -EOPNOTSUPP;
-
-	/* Validate size and indices */
-	if (indir->size != ARRAY_SIZE(efx->rx_indir_table))
-		return -EINVAL;
-	for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
-		if (indir->ring_index[i] >= efx->n_rx_channels)
-			return -EINVAL;
-
-	memcpy(efx->rx_indir_table, indir->ring_index,
-	       sizeof(efx->rx_indir_table));
+	memcpy(efx->rx_indir_table, indir, sizeof(efx->rx_indir_table));
 	efx_nic_push_rx_indir_table(efx);
 	return 0;
 }
@@ -1019,7 +1117,8 @@
 	.set_wol                = efx_ethtool_set_wol,
 	.reset			= efx_ethtool_reset,
 	.get_rxnfc		= efx_ethtool_get_rxnfc,
-	.set_rx_ntuple		= efx_ethtool_set_rx_ntuple,
+	.set_rxnfc		= efx_ethtool_set_rxnfc,
+	.get_rxfh_indir_size	= efx_ethtool_get_rxfh_indir_size,
 	.get_rxfh_indir		= efx_ethtool_get_rxfh_indir,
 	.set_rxfh_indir		= efx_ethtool_set_rxfh_indir,
 };
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index 97b606b..8ae1ebd 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -610,7 +610,7 @@
 	if (!nic_data->stats_pending)
 		return;
 
-	nic_data->stats_pending = 0;
+	nic_data->stats_pending = false;
 	if (*nic_data->stats_dma_done == FALCON_STATS_DONE) {
 		rmb(); /* read the done flag before the stats */
 		efx->mac_op->update_stats(efx);
diff --git a/drivers/net/ethernet/sfc/filter.c b/drivers/net/ethernet/sfc/filter.c
index 2b9636f..1fbbbee 100644
--- a/drivers/net/ethernet/sfc/filter.c
+++ b/drivers/net/ethernet/sfc/filter.c
@@ -155,6 +155,16 @@
 	spec->data[2] = ntohl(host2);
 }
 
+static inline void __efx_filter_get_ipv4(const struct efx_filter_spec *spec,
+					 __be32 *host1, __be16 *port1,
+					 __be32 *host2, __be16 *port2)
+{
+	*host1 = htonl(spec->data[0] >> 16 | spec->data[1] << 16);
+	*port1 = htons(spec->data[0]);
+	*host2 = htonl(spec->data[2]);
+	*port2 = htons(spec->data[1] >> 16);
+}
+
 /**
  * efx_filter_set_ipv4_local - specify IPv4 host, transport protocol and port
  * @spec: Specification to initialise
@@ -205,6 +215,26 @@
 	return 0;
 }
 
+int efx_filter_get_ipv4_local(const struct efx_filter_spec *spec,
+			      u8 *proto, __be32 *host, __be16 *port)
+{
+	__be32 host1;
+	__be16 port1;
+
+	switch (spec->type) {
+	case EFX_FILTER_TCP_WILD:
+		*proto = IPPROTO_TCP;
+		__efx_filter_get_ipv4(spec, &host1, &port1, host, port);
+		return 0;
+	case EFX_FILTER_UDP_WILD:
+		*proto = IPPROTO_UDP;
+		__efx_filter_get_ipv4(spec, &host1, port, host, &port1);
+		return 0;
+	default:
+		return -EINVAL;
+	}
+}
+
 /**
  * efx_filter_set_ipv4_full - specify IPv4 hosts, transport protocol and ports
  * @spec: Specification to initialise
@@ -242,6 +272,25 @@
 	return 0;
 }
 
+int efx_filter_get_ipv4_full(const struct efx_filter_spec *spec,
+			     u8 *proto, __be32 *host, __be16 *port,
+			     __be32 *rhost, __be16 *rport)
+{
+	switch (spec->type) {
+	case EFX_FILTER_TCP_FULL:
+		*proto = IPPROTO_TCP;
+		break;
+	case EFX_FILTER_UDP_FULL:
+		*proto = IPPROTO_UDP;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	__efx_filter_get_ipv4(spec, rhost, rport, host, port);
+	return 0;
+}
+
 /**
  * efx_filter_set_eth_local - specify local Ethernet address and optional VID
  * @spec: Specification to initialise
@@ -270,6 +319,29 @@
 	return 0;
 }
 
+int efx_filter_get_eth_local(const struct efx_filter_spec *spec,
+			     u16 *vid, u8 *addr)
+{
+	switch (spec->type) {
+	case EFX_FILTER_MAC_WILD:
+		*vid = EFX_FILTER_VID_UNSPEC;
+		break;
+	case EFX_FILTER_MAC_FULL:
+		*vid = spec->data[0];
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	addr[0] = spec->data[2] >> 8;
+	addr[1] = spec->data[2];
+	addr[2] = spec->data[1] >> 24;
+	addr[3] = spec->data[1] >> 16;
+	addr[4] = spec->data[1] >> 8;
+	addr[5] = spec->data[1];
+	return 0;
+}
+
 /* Build a filter entry and return its n-tuple key. */
 static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec)
 {
@@ -332,7 +404,7 @@
 
 static int efx_filter_search(struct efx_filter_table *table,
 			     struct efx_filter_spec *spec, u32 key,
-			     bool for_insert, int *depth_required)
+			     bool for_insert, unsigned int *depth_required)
 {
 	unsigned hash, incr, filter_idx, depth, depth_max;
 
@@ -366,12 +438,59 @@
 	}
 }
 
-/* Construct/deconstruct external filter IDs */
+/*
+ * Construct/deconstruct external filter IDs.  These must be ordered
+ * by matching priority, for RX NFC semantics.
+ *
+ * Each RX MAC filter entry has a flag for whether it can override an
+ * RX IP filter that also matches.  So we assign locations for MAC
+ * filters with overriding behaviour, then for IP filters, then for
+ * MAC filters without overriding behaviour.
+ */
 
-static inline int
-efx_filter_make_id(enum efx_filter_table_id table_id, unsigned index)
+#define EFX_FILTER_INDEX_WIDTH	13
+#define EFX_FILTER_INDEX_MASK	((1 << EFX_FILTER_INDEX_WIDTH) - 1)
+
+static inline u32 efx_filter_make_id(enum efx_filter_table_id table_id,
+				     unsigned int index, u8 flags)
 {
-	return table_id << 16 | index;
+	return (table_id == EFX_FILTER_TABLE_RX_MAC &&
+		flags & EFX_FILTER_FLAG_RX_OVERRIDE_IP) ?
+		index :
+		(table_id + 1) << EFX_FILTER_INDEX_WIDTH | index;
+}
+
+static inline enum efx_filter_table_id efx_filter_id_table_id(u32 id)
+{
+	return (id <= EFX_FILTER_INDEX_MASK) ?
+		EFX_FILTER_TABLE_RX_MAC :
+		(id >> EFX_FILTER_INDEX_WIDTH) - 1;
+}
+
+static inline unsigned int efx_filter_id_index(u32 id)
+{
+	return id & EFX_FILTER_INDEX_MASK;
+}
+
+static inline u8 efx_filter_id_flags(u32 id)
+{
+	return (id <= EFX_FILTER_INDEX_MASK) ?
+		EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_OVERRIDE_IP :
+		EFX_FILTER_FLAG_RX;
+}
+
+u32 efx_filter_get_rx_id_limit(struct efx_nic *efx)
+{
+	struct efx_filter_state *state = efx->filter_state;
+
+	if (state->table[EFX_FILTER_TABLE_RX_MAC].size != 0)
+		return ((EFX_FILTER_TABLE_RX_MAC + 1) << EFX_FILTER_INDEX_WIDTH)
+			+ state->table[EFX_FILTER_TABLE_RX_MAC].size;
+	else if (state->table[EFX_FILTER_TABLE_RX_IP].size != 0)
+		return ((EFX_FILTER_TABLE_RX_IP + 1) << EFX_FILTER_INDEX_WIDTH)
+			+ state->table[EFX_FILTER_TABLE_RX_IP].size;
+	else
+		return 0;
 }
 
 /**
@@ -384,14 +503,14 @@
  * On success, return the filter ID.
  * On failure, return a negative error code.
  */
-int efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
+s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
 			     bool replace)
 {
 	struct efx_filter_state *state = efx->filter_state;
 	struct efx_filter_table *table = efx_filter_spec_table(state, spec);
 	struct efx_filter_spec *saved_spec;
 	efx_oword_t filter;
-	int filter_idx, depth;
+	unsigned int filter_idx, depth;
 	u32 key;
 	int rc;
 
@@ -439,7 +558,7 @@
 	netif_vdbg(efx, hw, efx->net_dev,
 		   "%s: filter type %d index %d rxq %u set",
 		   __func__, spec->type, filter_idx, spec->dmaq_id);
-	rc = efx_filter_make_id(table->id, filter_idx);
+	rc = efx_filter_make_id(table->id, filter_idx, spec->flags);
 
 out:
 	spin_unlock_bh(&state->lock);
@@ -448,7 +567,7 @@
 
 static void efx_filter_table_clear_entry(struct efx_nic *efx,
 					 struct efx_filter_table *table,
-					 int filter_idx)
+					 unsigned int filter_idx)
 {
 	static efx_oword_t filter;
 
@@ -463,48 +582,101 @@
 }
 
 /**
- * efx_filter_remove_filter - remove a filter by specification
+ * efx_filter_remove_id_safe - remove a filter by ID, carefully
  * @efx: NIC from which to remove the filter
- * @spec: Specification for the filter
+ * @priority: Priority of filter, as passed to @efx_filter_insert_filter
+ * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
  *
- * On success, return zero.
- * On failure, return a negative error code.
+ * This function will range-check @filter_id, so it is safe to call
+ * with a value passed from userland.
  */
-int efx_filter_remove_filter(struct efx_nic *efx, struct efx_filter_spec *spec)
+int efx_filter_remove_id_safe(struct efx_nic *efx,
+			      enum efx_filter_priority priority,
+			      u32 filter_id)
 {
 	struct efx_filter_state *state = efx->filter_state;
-	struct efx_filter_table *table = efx_filter_spec_table(state, spec);
-	struct efx_filter_spec *saved_spec;
-	efx_oword_t filter;
-	int filter_idx, depth;
-	u32 key;
+	enum efx_filter_table_id table_id;
+	struct efx_filter_table *table;
+	unsigned int filter_idx;
+	struct efx_filter_spec *spec;
+	u8 filter_flags;
 	int rc;
 
-	if (!table)
-		return -EINVAL;
+	table_id = efx_filter_id_table_id(filter_id);
+	if ((unsigned int)table_id >= EFX_FILTER_TABLE_COUNT)
+		return -ENOENT;
+	table = &state->table[table_id];
 
-	key = efx_filter_build(&filter, spec);
+	filter_idx = efx_filter_id_index(filter_id);
+	if (filter_idx >= table->size)
+		return -ENOENT;
+	spec = &table->spec[filter_idx];
+
+	filter_flags = efx_filter_id_flags(filter_id);
 
 	spin_lock_bh(&state->lock);
 
-	rc = efx_filter_search(table, spec, key, false, &depth);
-	if (rc < 0)
-		goto out;
-	filter_idx = rc;
-	saved_spec = &table->spec[filter_idx];
-
-	if (spec->priority < saved_spec->priority) {
-		rc = -EPERM;
-		goto out;
+	if (test_bit(filter_idx, table->used_bitmap) &&
+	    spec->priority == priority && spec->flags == filter_flags) {
+		efx_filter_table_clear_entry(efx, table, filter_idx);
+		if (table->used == 0)
+			efx_filter_table_reset_search_depth(table);
+		rc = 0;
+	} else {
+		rc = -ENOENT;
 	}
 
-	efx_filter_table_clear_entry(efx, table, filter_idx);
-	if (table->used == 0)
-		efx_filter_table_reset_search_depth(table);
-	rc = 0;
-
-out:
 	spin_unlock_bh(&state->lock);
+
+	return rc;
+}
+
+/**
+ * efx_filter_get_filter_safe - retrieve a filter by ID, carefully
+ * @efx: NIC from which to remove the filter
+ * @priority: Priority of filter, as passed to @efx_filter_insert_filter
+ * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
+ * @spec: Buffer in which to store filter specification
+ *
+ * This function will range-check @filter_id, so it is safe to call
+ * with a value passed from userland.
+ */
+int efx_filter_get_filter_safe(struct efx_nic *efx,
+			       enum efx_filter_priority priority,
+			       u32 filter_id, struct efx_filter_spec *spec_buf)
+{
+	struct efx_filter_state *state = efx->filter_state;
+	enum efx_filter_table_id table_id;
+	struct efx_filter_table *table;
+	struct efx_filter_spec *spec;
+	unsigned int filter_idx;
+	u8 filter_flags;
+	int rc;
+
+	table_id = efx_filter_id_table_id(filter_id);
+	if ((unsigned int)table_id >= EFX_FILTER_TABLE_COUNT)
+		return -ENOENT;
+	table = &state->table[table_id];
+
+	filter_idx = efx_filter_id_index(filter_id);
+	if (filter_idx >= table->size)
+		return -ENOENT;
+	spec = &table->spec[filter_idx];
+
+	filter_flags = efx_filter_id_flags(filter_id);
+
+	spin_lock_bh(&state->lock);
+
+	if (test_bit(filter_idx, table->used_bitmap) &&
+	    spec->priority == priority && spec->flags == filter_flags) {
+		*spec_buf = *spec;
+		rc = 0;
+	} else {
+		rc = -ENOENT;
+	}
+
+	spin_unlock_bh(&state->lock);
+
 	return rc;
 }
 
@@ -514,7 +686,7 @@
 {
 	struct efx_filter_state *state = efx->filter_state;
 	struct efx_filter_table *table = &state->table[table_id];
-	int filter_idx;
+	unsigned int filter_idx;
 
 	spin_lock_bh(&state->lock);
 
@@ -538,6 +710,68 @@
 	efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_MAC, priority);
 }
 
+u32 efx_filter_count_rx_used(struct efx_nic *efx,
+			     enum efx_filter_priority priority)
+{
+	struct efx_filter_state *state = efx->filter_state;
+	enum efx_filter_table_id table_id;
+	struct efx_filter_table *table;
+	unsigned int filter_idx;
+	u32 count = 0;
+
+	spin_lock_bh(&state->lock);
+
+	for (table_id = EFX_FILTER_TABLE_RX_IP;
+	     table_id <= EFX_FILTER_TABLE_RX_MAC;
+	     table_id++) {
+		table = &state->table[table_id];
+		for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
+			if (test_bit(filter_idx, table->used_bitmap) &&
+			    table->spec[filter_idx].priority == priority)
+				++count;
+		}
+	}
+
+	spin_unlock_bh(&state->lock);
+
+	return count;
+}
+
+s32 efx_filter_get_rx_ids(struct efx_nic *efx,
+			  enum efx_filter_priority priority,
+			  u32 *buf, u32 size)
+{
+	struct efx_filter_state *state = efx->filter_state;
+	enum efx_filter_table_id table_id;
+	struct efx_filter_table *table;
+	unsigned int filter_idx;
+	s32 count = 0;
+
+	spin_lock_bh(&state->lock);
+
+	for (table_id = EFX_FILTER_TABLE_RX_IP;
+	     table_id <= EFX_FILTER_TABLE_RX_MAC;
+	     table_id++) {
+		table = &state->table[table_id];
+		for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
+			if (test_bit(filter_idx, table->used_bitmap) &&
+			    table->spec[filter_idx].priority == priority) {
+				if (count == size) {
+					count = -EMSGSIZE;
+					goto out;
+				}
+				buf[count++] = efx_filter_make_id(
+					table_id, filter_idx,
+					table->spec[filter_idx].flags);
+			}
+		}
+	}
+out:
+	spin_unlock_bh(&state->lock);
+
+	return count;
+}
+
 /* Restore filter stater after reset */
 void efx_restore_filters(struct efx_nic *efx)
 {
@@ -545,7 +779,7 @@
 	enum efx_filter_table_id table_id;
 	struct efx_filter_table *table;
 	efx_oword_t filter;
-	int filter_idx;
+	unsigned int filter_idx;
 
 	spin_lock_bh(&state->lock);
 
diff --git a/drivers/net/ethernet/sfc/filter.h b/drivers/net/ethernet/sfc/filter.h
index 872f213..3d4108c 100644
--- a/drivers/net/ethernet/sfc/filter.h
+++ b/drivers/net/ethernet/sfc/filter.h
@@ -78,6 +78,11 @@
  *
  * Use the efx_filter_set_*() functions to initialise the @type and
  * @data fields.
+ *
+ * The @priority field is used by software to determine whether a new
+ * filter may replace an old one.  The hardware priority of a filter
+ * depends on the filter type and %EFX_FILTER_FLAG_RX_OVERRIDE_IP
+ * flag.
  */
 struct efx_filter_spec {
 	u8	type:4;
@@ -100,11 +105,18 @@
 
 extern int efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto,
 				     __be32 host, __be16 port);
+extern int efx_filter_get_ipv4_local(const struct efx_filter_spec *spec,
+				     u8 *proto, __be32 *host, __be16 *port);
 extern int efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto,
 				    __be32 host, __be16 port,
 				    __be32 rhost, __be16 rport);
+extern int efx_filter_get_ipv4_full(const struct efx_filter_spec *spec,
+				    u8 *proto, __be32 *host, __be16 *port,
+				    __be32 *rhost, __be16 *rport);
 extern int efx_filter_set_eth_local(struct efx_filter_spec *spec,
 				    u16 vid, const u8 *addr);
+extern int efx_filter_get_eth_local(const struct efx_filter_spec *spec,
+				    u16 *vid, u8 *addr);
 enum {
 	EFX_FILTER_VID_UNSPEC = 0xffff,
 };
diff --git a/drivers/net/ethernet/sfc/mtd.c b/drivers/net/ethernet/sfc/mtd.c
index b630448..bc9dcd6 100644
--- a/drivers/net/ethernet/sfc/mtd.c
+++ b/drivers/net/ethernet/sfc/mtd.c
@@ -496,7 +496,7 @@
 		rc = efx_mcdi_nvram_update_start(efx, part->mcdi.nvram_type);
 		if (rc)
 			goto out;
-		part->mcdi.updating = 1;
+		part->mcdi.updating = true;
 	}
 
 	/* The MCDI interface can in fact do multiple erase blocks at once;
@@ -528,7 +528,7 @@
 		rc = efx_mcdi_nvram_update_start(efx, part->mcdi.nvram_type);
 		if (rc)
 			goto out;
-		part->mcdi.updating = 1;
+		part->mcdi.updating = true;
 	}
 
 	while (offset < end) {
@@ -553,7 +553,7 @@
 	int rc = 0;
 
 	if (part->mcdi.updating) {
-		part->mcdi.updating = 0;
+		part->mcdi.updating = false;
 		rc = efx_mcdi_nvram_update_finish(efx, part->mcdi.nvram_type);
 	}
 
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index b8e251a..c49502b 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -908,7 +908,7 @@
 	unsigned int phys_addr_channels;
 	unsigned int tx_dc_base;
 	unsigned int rx_dc_base;
-	u32 offload_features;
+	netdev_features_t offload_features;
 };
 
 /**************************************************************************
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 752d521..aca3498 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -479,11 +479,8 @@
 		if (efx->net_dev->features & NETIF_F_RXHASH)
 			skb->rxhash = efx_rx_buf_hash(eh);
 
-		skb_frag_set_page(skb, 0, page);
-		skb_shinfo(skb)->frags[0].page_offset =
-			efx_rx_buf_offset(efx, rx_buf);
-		skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx_buf->len);
-		skb_shinfo(skb)->nr_frags = 1;
+		skb_fill_page_desc(skb, 0, page,
+				   efx_rx_buf_offset(efx, rx_buf), rx_buf->len);
 
 		skb->len = rx_buf->len;
 		skb->data_len = rx_buf->len;
@@ -669,7 +666,7 @@
 		  rx_queue->ptr_mask);
 
 	/* Allocate RX buffers */
-	rx_queue->buffer = kzalloc(entries * sizeof(*rx_queue->buffer),
+	rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
 				   GFP_KERNEL);
 	if (!rx_queue->buffer)
 		return -ENOMEM;
diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
index 822f6c2..52edd24 100644
--- a/drivers/net/ethernet/sfc/selftest.c
+++ b/drivers/net/ethernet/sfc/selftest.c
@@ -503,8 +503,8 @@
 		/* Determine how many packets to send */
 		state->packet_count = efx->txq_entries / 3;
 		state->packet_count = min(1 << (i << 2), state->packet_count);
-		state->skbs = kzalloc(sizeof(state->skbs[0]) *
-				      state->packet_count, GFP_KERNEL);
+		state->skbs = kcalloc(state->packet_count,
+				      sizeof(state->skbs[0]), GFP_KERNEL);
 		if (!state->skbs)
 			return -ENOMEM;
 		state->flush = false;
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index cc2549c..4d5d619 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -232,7 +232,7 @@
 static int siena_probe_nic(struct efx_nic *efx)
 {
 	struct siena_nic_data *nic_data;
-	bool already_attached = 0;
+	bool already_attached = false;
 	efx_oword_t reg;
 	int rc;
 
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index df88c543..72f0fbc 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -31,7 +31,9 @@
 #define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u)
 
 static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
-			       struct efx_tx_buffer *buffer)
+			       struct efx_tx_buffer *buffer,
+			       unsigned int *pkts_compl,
+			       unsigned int *bytes_compl)
 {
 	if (buffer->unmap_len) {
 		struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
@@ -48,6 +50,8 @@
 	}
 
 	if (buffer->skb) {
+		(*pkts_compl)++;
+		(*bytes_compl) += buffer->skb->len;
 		dev_kfree_skb_any((struct sk_buff *) buffer->skb);
 		buffer->skb = NULL;
 		netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
@@ -250,6 +254,8 @@
 	buffer->skb = skb;
 	buffer->continuation = false;
 
+	netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
+
 	/* Pass off to hardware */
 	efx_nic_push_buffers(tx_queue);
 
@@ -267,10 +273,11 @@
  unwind:
 	/* Work backwards until we hit the original insert pointer value */
 	while (tx_queue->insert_count != tx_queue->write_count) {
+		unsigned int pkts_compl = 0, bytes_compl = 0;
 		--tx_queue->insert_count;
 		insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
 		buffer = &tx_queue->buffer[insert_ptr];
-		efx_dequeue_buffer(tx_queue, buffer);
+		efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
 		buffer->len = 0;
 	}
 
@@ -293,7 +300,9 @@
  * specified index.
  */
 static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
-				unsigned int index)
+				unsigned int index,
+				unsigned int *pkts_compl,
+				unsigned int *bytes_compl)
 {
 	struct efx_nic *efx = tx_queue->efx;
 	unsigned int stop_index, read_ptr;
@@ -311,7 +320,7 @@
 			return;
 		}
 
-		efx_dequeue_buffer(tx_queue, buffer);
+		efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
 		buffer->continuation = true;
 		buffer->len = 0;
 
@@ -422,10 +431,12 @@
 {
 	unsigned fill_level;
 	struct efx_nic *efx = tx_queue->efx;
+	unsigned int pkts_compl = 0, bytes_compl = 0;
 
 	EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
 
-	efx_dequeue_buffers(tx_queue, index);
+	efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
+	netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl);
 
 	/* See if we need to restart the netif queue.  This barrier
 	 * separates the update of read_count from the test of the
@@ -468,7 +479,7 @@
 		  tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
 
 	/* Allocate software ring */
-	tx_queue->buffer = kzalloc(entries * sizeof(*tx_queue->buffer),
+	tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
 				   GFP_KERNEL);
 	if (!tx_queue->buffer)
 		return -ENOMEM;
@@ -515,13 +526,15 @@
 
 	/* Free any buffers left in the ring */
 	while (tx_queue->read_count != tx_queue->write_count) {
+		unsigned int pkts_compl = 0, bytes_compl = 0;
 		buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
-		efx_dequeue_buffer(tx_queue, buffer);
+		efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
 		buffer->continuation = true;
 		buffer->len = 0;
 
 		++tx_queue->read_count;
 	}
+	netdev_tx_reset_queue(tx_queue->core_txq);
 }
 
 void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
@@ -1160,6 +1173,8 @@
 			goto mem_err;
 	}
 
+	netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
+
 	/* Pass off to hardware */
 	efx_nic_push_buffers(tx_queue);
 
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c
index 60135aa..53efe7c 100644
--- a/drivers/net/ethernet/sgi/meth.c
+++ b/drivers/net/ethernet/sgi/meth.c
@@ -28,6 +28,7 @@
 #include <linux/tcp.h>         /* struct tcphdr */
 #include <linux/skbuff.h>
 #include <linux/mii.h>         /* MII definitions */
+#include <linux/crc32.h>
 
 #include <asm/ip32/mace.h>
 #include <asm/ip32/ip32_ints.h>
@@ -58,12 +59,19 @@
 module_param(timeout, int, 0);
 
 /*
+ * Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
+ * MACE Ethernet uses a 64 element hash table based on the Ethernet CRC.
+ */
+#define METH_MCF_LIMIT 32
+
+/*
  * This structure is private to each device. It is used to pass
  * packets in and out, so there is place for a packet
  */
 struct meth_private {
 	/* in-memory copy of MAC Control register */
-	unsigned long mac_ctrl;
+	u64 mac_ctrl;
+
 	/* in-memory copy of DMA Control register */
 	unsigned long dma_ctrl;
 	/* address of PHY, used by mdio_* functions, initialized in mdio_probe */
@@ -79,6 +87,9 @@
 	struct sk_buff *rx_skbs[RX_RING_ENTRIES];
 	unsigned long rx_write;
 
+	/* Multicast filter. */
+	u64 mcast_filter;
+
 	spinlock_t meth_lock;
 };
 
@@ -765,6 +776,40 @@
 	}
 }
 
+static void meth_set_rx_mode(struct net_device *dev)
+{
+	struct meth_private *priv = netdev_priv(dev);
+	unsigned long flags;
+
+	netif_stop_queue(dev);
+	spin_lock_irqsave(&priv->meth_lock, flags);
+	priv->mac_ctrl &= ~METH_PROMISC;
+
+	if (dev->flags & IFF_PROMISC) {
+		priv->mac_ctrl |= METH_PROMISC;
+		priv->mcast_filter = 0xffffffffffffffffUL;
+	} else if ((netdev_mc_count(dev) > METH_MCF_LIMIT) ||
+		   (dev->flags & IFF_ALLMULTI)) {
+		priv->mac_ctrl |= METH_ACCEPT_AMCAST;
+		priv->mcast_filter = 0xffffffffffffffffUL;
+	} else {
+		struct netdev_hw_addr *ha;
+		priv->mac_ctrl |= METH_ACCEPT_MCAST;
+
+		netdev_for_each_mc_addr(ha, dev)
+			set_bit((ether_crc(ETH_ALEN, ha->addr) >> 26),
+			        (volatile unsigned long *)&priv->mcast_filter);
+	}
+
+	/* Write the changes to the chip registers. */
+	mace->eth.mac_ctrl = priv->mac_ctrl;
+	mace->eth.mcast_filter = priv->mcast_filter;
+
+	/* Done! */
+	spin_unlock_irqrestore(&priv->meth_lock, flags);
+	netif_wake_queue(dev);
+}
+
 static const struct net_device_ops meth_netdev_ops = {
 	.ndo_open		= meth_open,
 	.ndo_stop		= meth_release,
@@ -774,6 +819,7 @@
 	.ndo_change_mtu		= eth_change_mtu,
 	.ndo_validate_addr	= eth_validate_addr,
 	.ndo_set_mac_address	= eth_mac_addr,
+	.ndo_set_rx_mode    	= meth_set_rx_mode,
 };
 
 /*
@@ -830,24 +876,7 @@
 	}
 };
 
-static int __init meth_init_module(void)
-{
-	int err;
-
-	err = platform_driver_register(&meth_driver);
-	if (err)
-		printk(KERN_ERR "Driver registration failed\n");
-
-	return err;
-}
-
-static void __exit meth_exit_module(void)
-{
-	platform_driver_unregister(&meth_driver);
-}
-
-module_init(meth_init_module);
-module_exit(meth_exit_module);
+module_platform_driver(meth_driver);
 
 MODULE_AUTHOR("Ilya Volynets <ilya@theIlya.com>");
 MODULE_DESCRIPTION("SGI O2 Builtin Fast Ethernet driver");
diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
index 1b4658c..5b118cd 100644
--- a/drivers/net/ethernet/sis/sis190.c
+++ b/drivers/net/ethernet/sis/sis190.c
@@ -47,8 +47,6 @@
 #define sis190_rx_skb			netif_rx
 #define sis190_rx_quota(count, quota)	count
 
-#define MAC_ADDR_LEN		6
-
 #define NUM_TX_DESC		64	/* [8..1024] */
 #define NUM_RX_DESC		64	/* [8..8192] */
 #define TX_RING_BYTES		(NUM_TX_DESC * sizeof(struct TxDesc))
@@ -1601,7 +1599,7 @@
 	}
 
 	/* Get MAC address from EEPROM */
-	for (i = 0; i < MAC_ADDR_LEN / 2; i++) {
+	for (i = 0; i < ETH_ALEN / 2; i++) {
 		u16 w = sis190_read_eeprom(ioaddr, EEPROMMACAddr + i);
 
 		((__le16 *)dev->dev_addr)[i] = cpu_to_le16(w);
@@ -1653,7 +1651,7 @@
 	udelay(50);
 	pci_read_config_byte(isa_bridge, 0x48, &reg);
 
-        for (i = 0; i < MAC_ADDR_LEN; i++) {
+        for (i = 0; i < ETH_ALEN; i++) {
                 outb(0x9 + i, 0x78);
                 dev->dev_addr[i] = inb(0x79);
         }
@@ -1692,7 +1690,7 @@
 	 */
 	SIS_W16(RxMacControl, ctl & ~0x0f00);
 
-	for (i = 0; i < MAC_ADDR_LEN; i++)
+	for (i = 0; i < ETH_ALEN; i++)
 		SIS_W8(RxMacAddr + i, dev->dev_addr[i]);
 
 	SIS_W16(RxMacControl, ctl);
@@ -1760,9 +1758,10 @@
 {
 	struct sis190_private *tp = netdev_priv(dev);
 
-	strcpy(info->driver, DRV_NAME);
-	strcpy(info->version, DRV_VERSION);
-	strcpy(info->bus_info, pci_name(tp->pci_dev));
+	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+	strlcpy(info->bus_info, pci_name(tp->pci_dev),
+		sizeof(info->bus_info));
 }
 
 static int sis190_get_regs_len(struct net_device *dev)
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index a184abc..c8efc70 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -1991,9 +1991,10 @@
 {
 	struct sis900_private *sis_priv = netdev_priv(net_dev);
 
-	strcpy (info->driver, SIS900_MODULE_NAME);
-	strcpy (info->version, SIS900_DRV_VERSION);
-	strcpy (info->bus_info, pci_name(sis_priv->pci_dev));
+	strlcpy(info->driver, SIS900_MODULE_NAME, sizeof(info->driver));
+	strlcpy(info->version, SIS900_DRV_VERSION, sizeof(info->version));
+	strlcpy(info->bus_info, pci_name(sis_priv->pci_dev),
+		sizeof(info->bus_info));
 }
 
 static u32 sis900_get_msglevel(struct net_device *net_dev)
diff --git a/drivers/net/ethernet/sis/sis900.h b/drivers/net/ethernet/sis/sis900.h
index 150511a..1341f33 100644
--- a/drivers/net/ethernet/sis/sis900.h
+++ b/drivers/net/ethernet/sis/sis900.h
@@ -205,7 +205,7 @@
 	EXCCOLL = 0x00100000, COLCNT   = 0x000F0000
 };
 
-enum sis900_rx_bufer_status {
+enum sis900_rx_buffer_status {
 	OVERRUN = 0x02000000, DEST = 0x00800000,     BCAST = 0x01800000,
 	MCAST   = 0x01000000, UNIMATCH = 0x00800000, TOOLONG = 0x00400000,
 	RUNT    = 0x00200000, RXISERR  = 0x00100000, CRCERR  = 0x00080000,
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index 0a5dfb8..2c077ce 100644
--- a/drivers/net/ethernet/smsc/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -1414,9 +1414,9 @@
 {
 	struct epic_private *np = netdev_priv(dev);
 
-	strcpy (info->driver, DRV_NAME);
-	strcpy (info->version, DRV_VERSION);
-	strcpy (info->bus_info, pci_name(np->pci_dev));
+	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+	strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
 }
 
 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index 8f61fe9..313ba3b 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -2196,15 +2196,4 @@
 	},
 };
 
-static int __init smc911x_init(void)
-{
-	return platform_driver_register(&smc911x_driver);
-}
-
-static void __exit smc911x_cleanup(void)
-{
-	platform_driver_unregister(&smc911x_driver);
-}
-
-module_init(smc911x_init);
-module_exit(smc911x_cleanup);
+module_platform_driver(smc911x_driver);
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
index cbfa981..ada927a 100644
--- a/drivers/net/ethernet/smsc/smc91c92_cs.c
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -1909,8 +1909,8 @@
 
 static void smc_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 {
-	strcpy(info->driver, DRV_NAME);
-	strcpy(info->version, DRV_VERSION);
+	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
 }
 
 static int smc_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index f47f81e..64ad3ed 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -2417,15 +2417,4 @@
 	},
 };
 
-static int __init smc_init(void)
-{
-	return platform_driver_register(&smc_driver);
-}
-
-static void __exit smc_cleanup(void)
-{
-	platform_driver_unregister(&smc_driver);
-}
-
-module_init(smc_init);
-module_exit(smc_cleanup);
+module_platform_driver(smc_driver);
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 8843071..9d0b8ce 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -44,6 +44,7 @@
 #include <linux/module.h>
 #include <linux/netdevice.h>
 #include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
 #include <linux/sched.h>
 #include <linux/timer.h>
 #include <linux/bug.h>
@@ -88,6 +89,8 @@
 				unsigned int *buf, unsigned int wordcount);
 };
 
+#define SMSC911X_NUM_SUPPLIES 2
+
 struct smsc911x_data {
 	void __iomem *ioaddr;
 
@@ -138,6 +141,9 @@
 
 	/* register access functions */
 	const struct smsc911x_ops *ops;
+
+	/* regulators */
+	struct regulator_bulk_data supplies[SMSC911X_NUM_SUPPLIES];
 };
 
 /* Easy access to information */
@@ -362,6 +368,76 @@
 	spin_unlock_irqrestore(&pdata->dev_lock, flags);
 }
 
+/*
+ * enable resources, currently just regulators.
+ */
+static int smsc911x_enable_resources(struct platform_device *pdev)
+{
+	struct net_device *ndev = platform_get_drvdata(pdev);
+	struct smsc911x_data *pdata = netdev_priv(ndev);
+	int ret = 0;
+
+	ret = regulator_bulk_enable(ARRAY_SIZE(pdata->supplies),
+			pdata->supplies);
+	if (ret)
+		netdev_err(ndev, "failed to enable regulators %d\n",
+				ret);
+	return ret;
+}
+
+/*
+ * disable resources, currently just regulators.
+ */
+static int smsc911x_disable_resources(struct platform_device *pdev)
+{
+	struct net_device *ndev = platform_get_drvdata(pdev);
+	struct smsc911x_data *pdata = netdev_priv(ndev);
+	int ret = 0;
+
+	ret = regulator_bulk_disable(ARRAY_SIZE(pdata->supplies),
+			pdata->supplies);
+	return ret;
+}
+
+/*
+ * Request resources, currently just regulators.
+ *
+ * The SMSC911x has two power pins: vddvario and vdd33a, in designs where
+ * these are not always-on we need to request regulators to be turned on
+ * before we can try to access the device registers.
+ */
+static int smsc911x_request_resources(struct platform_device *pdev)
+{
+	struct net_device *ndev = platform_get_drvdata(pdev);
+	struct smsc911x_data *pdata = netdev_priv(ndev);
+	int ret = 0;
+
+	/* Request regulators */
+	pdata->supplies[0].supply = "vdd33a";
+	pdata->supplies[1].supply = "vddvario";
+	ret = regulator_bulk_get(&pdev->dev,
+			ARRAY_SIZE(pdata->supplies),
+			pdata->supplies);
+	if (ret)
+		netdev_err(ndev, "couldn't get regulators %d\n",
+				ret);
+	return ret;
+}
+
+/*
+ * Free resources, currently just regulators.
+ *
+ */
+static void smsc911x_free_resources(struct platform_device *pdev)
+{
+	struct net_device *ndev = platform_get_drvdata(pdev);
+	struct smsc911x_data *pdata = netdev_priv(ndev);
+
+	/* Free regulators */
+	regulator_bulk_free(ARRAY_SIZE(pdata->supplies),
+			pdata->supplies);
+}
+
 /* waits for MAC not busy, with timeout.  Only called by smsc911x_mac_read
  * and smsc911x_mac_write, so assumes mac_lock is held */
 static int smsc911x_mac_complete(struct smsc911x_data *pdata)
@@ -1243,10 +1319,92 @@
 	spin_unlock(&pdata->mac_lock);
 }
 
+static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata)
+{
+	int rc = 0;
+
+	if (!pdata->phy_dev)
+		return rc;
+
+	rc = phy_read(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS);
+
+	if (rc < 0) {
+		SMSC_WARN(pdata, drv, "Failed reading PHY control reg");
+		return rc;
+	}
+
+	/*
+	 * If energy is detected the PHY is already awake so is not necessary
+	 * to disable the energy detect power-down mode.
+	 */
+	if ((rc & MII_LAN83C185_EDPWRDOWN) &&
+	    !(rc & MII_LAN83C185_ENERGYON)) {
+		/* Disable energy detect mode for this SMSC Transceivers */
+		rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS,
+			       rc & (~MII_LAN83C185_EDPWRDOWN));
+
+		if (rc < 0) {
+			SMSC_WARN(pdata, drv, "Failed writing PHY control reg");
+			return rc;
+		}
+
+		mdelay(1);
+	}
+
+	return 0;
+}
+
+static int smsc911x_phy_enable_energy_detect(struct smsc911x_data *pdata)
+{
+	int rc = 0;
+
+	if (!pdata->phy_dev)
+		return rc;
+
+	rc = phy_read(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS);
+
+	if (rc < 0) {
+		SMSC_WARN(pdata, drv, "Failed reading PHY control reg");
+		return rc;
+	}
+
+	/* Only enable if energy detect mode is already disabled */
+	if (!(rc & MII_LAN83C185_EDPWRDOWN)) {
+		mdelay(100);
+		/* Enable energy detect mode for this SMSC Transceivers */
+		rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS,
+			       rc | MII_LAN83C185_EDPWRDOWN);
+
+		if (rc < 0) {
+			SMSC_WARN(pdata, drv, "Failed writing PHY control reg");
+			return rc;
+		}
+
+		mdelay(1);
+	}
+	return 0;
+}
+
 static int smsc911x_soft_reset(struct smsc911x_data *pdata)
 {
 	unsigned int timeout;
 	unsigned int temp;
+	int ret;
+
+	/*
+	 * LAN9210/LAN9211/LAN9220/LAN9221 chips have an internal PHY that
+	 * are initialized in a Energy Detect Power-Down mode that prevents
+	 * the MAC chip to be software reseted. So we have to wakeup the PHY
+	 * before.
+	 */
+	if (pdata->generation == 4) {
+		ret = smsc911x_phy_disable_energy_detect(pdata);
+
+		if (ret) {
+			SMSC_WARN(pdata, drv, "Failed to wakeup the PHY chip");
+			return ret;
+		}
+	}
 
 	/* Reset the LAN911x */
 	smsc911x_reg_write(pdata, HW_CFG, HW_CFG_SRST_);
@@ -1260,6 +1418,16 @@
 		SMSC_WARN(pdata, drv, "Failed to complete reset");
 		return -EIO;
 	}
+
+	if (pdata->generation == 4) {
+		ret = smsc911x_phy_enable_energy_detect(pdata);
+
+		if (ret) {
+			SMSC_WARN(pdata, drv, "Failed to wakeup the PHY chip");
+			return ret;
+		}
+	}
+
 	return 0;
 }
 
@@ -2092,6 +2260,9 @@
 
 	iounmap(pdata->ioaddr);
 
+	(void)smsc911x_disable_resources(pdev);
+	smsc911x_free_resources(pdev);
+
 	free_netdev(dev);
 
 	return 0;
@@ -2218,10 +2389,20 @@
 	pdata->dev = dev;
 	pdata->msg_enable = ((1 << debug) - 1);
 
+	platform_set_drvdata(pdev, dev);
+
+	retval = smsc911x_request_resources(pdev);
+	if (retval)
+		goto out_return_resources;
+
+	retval = smsc911x_enable_resources(pdev);
+	if (retval)
+		goto out_disable_resources;
+
 	if (pdata->ioaddr == NULL) {
 		SMSC_WARN(pdata, probe, "Error smsc911x base address invalid");
 		retval = -ENOMEM;
-		goto out_free_netdev_2;
+		goto out_disable_resources;
 	}
 
 	retval = smsc911x_probe_config_dt(&pdata->config, np);
@@ -2233,7 +2414,7 @@
 
 	if (retval) {
 		SMSC_WARN(pdata, probe, "Error smsc911x config not found");
-		goto out_unmap_io_3;
+		goto out_disable_resources;
 	}
 
 	/* assume standard, non-shifted, access to HW registers */
@@ -2244,7 +2425,7 @@
 
 	retval = smsc911x_init(dev);
 	if (retval < 0)
-		goto out_unmap_io_3;
+		goto out_disable_resources;
 
 	/* configure irq polarity and type before connecting isr */
 	if (pdata->config.irq_polarity == SMSC911X_IRQ_POLARITY_ACTIVE_HIGH)
@@ -2264,15 +2445,13 @@
 	if (retval) {
 		SMSC_WARN(pdata, probe,
 			  "Unable to claim requested irq: %d", dev->irq);
-		goto out_unmap_io_3;
+		goto out_free_irq;
 	}
 
-	platform_set_drvdata(pdev, dev);
-
 	retval = register_netdev(dev);
 	if (retval) {
 		SMSC_WARN(pdata, probe, "Error %i registering device", retval);
-		goto out_unset_drvdata_4;
+		goto out_free_irq;
 	} else {
 		SMSC_TRACE(pdata, probe,
 			   "Network interface: \"%s\"", dev->name);
@@ -2321,12 +2500,14 @@
 
 out_unregister_netdev_5:
 	unregister_netdev(dev);
-out_unset_drvdata_4:
-	platform_set_drvdata(pdev, NULL);
+out_free_irq:
 	free_irq(dev->irq, dev);
-out_unmap_io_3:
+out_disable_resources:
+	(void)smsc911x_disable_resources(pdev);
+out_return_resources:
+	smsc911x_free_resources(pdev);
+	platform_set_drvdata(pdev, NULL);
 	iounmap(pdata->ioaddr);
-out_free_netdev_2:
 	free_netdev(dev);
 out_release_io_1:
 	release_mem_region(res->start, resource_size(res));
diff --git a/drivers/net/ethernet/smsc/smsc911x.h b/drivers/net/ethernet/smsc/smsc911x.h
index 8d67aac..938ecf2 100644
--- a/drivers/net/ethernet/smsc/smsc911x.h
+++ b/drivers/net/ethernet/smsc/smsc911x.h
@@ -401,4 +401,8 @@
 #include <asm/smsc911x.h>
 #endif
 
+#ifdef CONFIG_SMSC_PHY
+#include <linux/smscphy.h>
+#endif
+
 #endif				/* __SMSC911X_H__ */
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index edb24b0..a9efbdf 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -279,9 +279,10 @@
 {
 	struct smsc9420_pdata *pd = netdev_priv(netdev);
 
-	strcpy(drvinfo->driver, DRV_NAME);
-	strcpy(drvinfo->bus_info, pci_name(pd->pdev));
-	strcpy(drvinfo->version, DRV_VERSION);
+	strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+	strlcpy(drvinfo->bus_info, pci_name(pd->pdev),
+		sizeof(drvinfo->bus_info));
+	strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
 }
 
 static u32 smsc9420_ethtool_get_msglevel(struct net_device *netdev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 22745d7..0364283 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -12,11 +12,36 @@
 
 if STMMAC_ETH
 
+config STMMAC_PLATFORM
+	tristate "STMMAC platform bus support"
+	depends on STMMAC_ETH
+	default y
+	---help---
+	  This selects the platform specific bus support for
+	  the stmmac device driver. This is the driver used
+	  on many embedded STM platforms based on ARM and SuperH
+	  processors.
+	  If you have a controller with this interface, say Y or M here.
+
+	  If unsure, say N.
+
+config STMMAC_PCI
+	tristate "STMMAC support on PCI bus (EXPERIMENTAL)"
+	depends on STMMAC_ETH && PCI && EXPERIMENTAL
+	---help---
+	  This is to select the Synopsys DWMAC available on PCI devices,
+	  if you have a controller with this interface, say Y or M here.
+
+	  This PCI support is tested on XLINX XC2V3000 FF1152AMT0221
+	  D1215994A VIRTEX FPGA board.
+
+	  If unsure, say N.
+
 config STMMAC_DEBUG_FS
 	bool "Enable monitoring via sysFS "
 	default n
 	depends on STMMAC_ETH && DEBUG_FS
-	-- help
+	---help---
 	  The stmmac entry in /sys reports DMA TX/RX rings
 	  or (if supported) the HW cap register.
 
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index d7c4516..bc965ac 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -2,6 +2,8 @@
 stmmac-$(CONFIG_STMMAC_TIMER) += stmmac_timer.o
 stmmac-$(CONFIG_STMMAC_RING) += ring_mode.o
 stmmac-$(CONFIG_STMMAC_CHAINED) += chain_mode.o
+stmmac-$(CONFIG_STMMAC_PLATFORM) += stmmac_platform.o
+stmmac-$(CONFIG_STMMAC_PCI) += stmmac_pci.o
 stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o	\
 	      dwmac_lib.o dwmac1000_core.o  dwmac1000_dma.o	\
 	      dwmac100_core.o dwmac100_dma.o enh_desc.o  norm_desc.o \
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 2cc1192..d0b814e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -22,7 +22,11 @@
   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
 *******************************************************************************/
 
+#include <linux/etherdevice.h>
 #include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/module.h>
+#include <linux/init.h>
 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
 #define STMMAC_VLAN_TAG_USED
 #include <linux/if_vlan.h>
@@ -315,5 +319,8 @@
 				unsigned int high, unsigned int low);
 extern void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
 				unsigned int high, unsigned int low);
+
+extern void stmmac_set_mac(void __iomem *ioaddr, bool enable);
+
 extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
 extern const struct stmmac_ring_mode_ops ring_mode_ops;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index e250935..f20aa12 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -238,6 +238,19 @@
 	writel(data, ioaddr + low);
 }
 
+/* Enable disable MAC RX/TX */
+void stmmac_set_mac(void __iomem *ioaddr, bool enable)
+{
+	u32 value = readl(ioaddr + MAC_CTRL_REG);
+
+	if (enable)
+		value |= MAC_RNABLE_RX | MAC_ENABLE_TX;
+	else
+		value &= ~(MAC_ENABLE_TX | MAC_RNABLE_RX);
+
+	writel(value, ioaddr + MAC_CTRL_REG);
+}
+
 void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
 			 unsigned int high, unsigned int low)
 {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index a140a8f..1207400 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -20,7 +20,8 @@
   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
 *******************************************************************************/
 
-#define DRV_MODULE_VERSION	"Oct_2011"
+#define STMMAC_RESOURCE_NAME   "stmmaceth"
+#define DRV_MODULE_VERSION	"Dec_2011"
 #include <linux/stmmac.h>
 #include <linux/phy.h>
 #include "common.h"
@@ -82,8 +83,18 @@
 	int hw_cap_support;
 };
 
+extern int phyaddr;
+
 extern int stmmac_mdio_unregister(struct net_device *ndev);
 extern int stmmac_mdio_register(struct net_device *ndev);
 extern void stmmac_set_ethtool_ops(struct net_device *netdev);
 extern const struct stmmac_desc_ops enh_desc_ops;
 extern const struct stmmac_desc_ops ndesc_ops;
+
+int stmmac_freeze(struct net_device *ndev);
+int stmmac_restore(struct net_device *ndev);
+int stmmac_resume(struct net_device *ndev);
+int stmmac_suspend(struct net_device *ndev);
+int stmmac_dvr_remove(struct net_device *ndev);
+struct stmmac_priv *stmmac_dvr_probe(struct device *device,
+				struct plat_stmmacenet_data *plat_dat);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 0395f9e..9573303 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -185,9 +185,10 @@
 	struct stmmac_priv *priv = netdev_priv(dev);
 
 	if (priv->plat->has_gmac)
-		strcpy(info->driver, GMAC_ETHTOOL_NAME);
+		strlcpy(info->driver, GMAC_ETHTOOL_NAME, sizeof(info->driver));
 	else
-		strcpy(info->driver, MAC100_ETHTOOL_NAME);
+		strlcpy(info->driver, MAC100_ETHTOOL_NAME,
+			sizeof(info->driver));
 
 	strcpy(info->version, DRV_MODULE_VERSION);
 	info->fw_version[0] = '\0';
@@ -458,7 +459,7 @@
 	return 0;
 }
 
-static struct ethtool_ops stmmac_ethtool_ops = {
+static const struct ethtool_ops stmmac_ethtool_ops = {
 	.begin = stmmac_check_if_running,
 	.get_drvinfo = stmmac_ethtool_getdrvinfo,
 	.get_settings = stmmac_ethtool_getsettings,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 72cd190..3738b47 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -28,12 +28,8 @@
 	https://bugzilla.stlinux.com/
 *******************************************************************************/
 
-#include <linux/module.h>
-#include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/interrupt.h>
-#include <linux/etherdevice.h>
-#include <linux/platform_device.h>
 #include <linux/ip.h>
 #include <linux/tcp.h>
 #include <linux/skbuff.h>
@@ -52,8 +48,6 @@
 #endif
 #include "stmmac.h"
 
-#define STMMAC_RESOURCE_NAME	"stmmaceth"
-
 #undef STMMAC_DEBUG
 /*#define STMMAC_DEBUG*/
 #ifdef STMMAC_DEBUG
@@ -93,7 +87,7 @@
 module_param(debug, int, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(debug, "Message Level (0: no output, 16: all)");
 
-static int phyaddr = -1;
+int phyaddr = -1;
 module_param(phyaddr, int, S_IRUGO);
 MODULE_PARM_DESC(phyaddr, "Physical device address");
 
@@ -141,6 +135,11 @@
 
 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
 
+#ifdef CONFIG_STMMAC_DEBUG_FS
+static int stmmac_init_fs(struct net_device *dev);
+static void stmmac_exit_fs(void);
+#endif
+
 /**
  * stmmac_verify_args - verify the driver parameters.
  * Description: it verifies if some wrong parameter is passed to the driver.
@@ -345,22 +344,6 @@
 	return 0;
 }
 
-static inline void stmmac_enable_mac(void __iomem *ioaddr)
-{
-	u32 value = readl(ioaddr + MAC_CTRL_REG);
-
-	value |= MAC_RNABLE_RX | MAC_ENABLE_TX;
-	writel(value, ioaddr + MAC_CTRL_REG);
-}
-
-static inline void stmmac_disable_mac(void __iomem *ioaddr)
-{
-	u32 value = readl(ioaddr + MAC_CTRL_REG);
-
-	value &= ~(MAC_ENABLE_TX | MAC_RNABLE_RX);
-	writel(value, ioaddr + MAC_CTRL_REG);
-}
-
 /**
  * display_ring
  * @p: pointer to the ring.
@@ -887,6 +870,53 @@
 }
 
 /**
+ * stmmac_mac_device_setup
+ * @dev : device pointer
+ * Description: this is to attach the GMAC or MAC 10/100
+ * main core structures that will be completed during the
+ * open step.
+ */
+static int stmmac_mac_device_setup(struct net_device *dev)
+{
+	struct stmmac_priv *priv = netdev_priv(dev);
+
+	struct mac_device_info *device;
+
+	if (priv->plat->has_gmac)
+		device = dwmac1000_setup(priv->ioaddr);
+	else
+		device = dwmac100_setup(priv->ioaddr);
+
+	if (!device)
+		return -ENOMEM;
+
+	priv->hw = device;
+	priv->hw->ring = &ring_mode_ops;
+
+	if (device_can_wakeup(priv->device)) {
+		priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */
+		enable_irq_wake(priv->wol_irq);
+	}
+
+	return 0;
+}
+
+static void stmmac_check_ether_addr(struct stmmac_priv *priv)
+{
+	/* verify if the MAC address is valid, in case of failures it
+	 * generates a random MAC address */
+	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
+		priv->hw->mac->get_umac_addr((void __iomem *)
+					     priv->dev->base_addr,
+					     priv->dev->dev_addr, 0);
+		if  (!is_valid_ether_addr(priv->dev->dev_addr))
+			random_ether_addr(priv->dev->dev_addr);
+	}
+	pr_warning("%s: device MAC address %pM\n", priv->dev->name,
+						   priv->dev->dev_addr);
+}
+
+/**
  *  stmmac_open - open entry point of the driver
  *  @dev : pointer to the device structure.
  *  Description:
@@ -900,18 +930,28 @@
 	struct stmmac_priv *priv = netdev_priv(dev);
 	int ret;
 
-	/* Check that the MAC address is valid.  If its not, refuse
-	 * to bring the device up. The user must specify an
-	 * address using the following linux command:
-	 *      ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx  */
-	if (!is_valid_ether_addr(dev->dev_addr)) {
-		random_ether_addr(dev->dev_addr);
-		pr_warning("%s: generated random MAC address %pM\n", dev->name,
-			dev->dev_addr);
-	}
+	/* MAC HW device setup */
+	ret = stmmac_mac_device_setup(dev);
+	if (ret < 0)
+		return ret;
+
+	stmmac_check_ether_addr(priv);
 
 	stmmac_verify_args();
 
+	/* Override with kernel parameters if supplied XXX CRS XXX
+	 * this needs to have multiple instances */
+	if ((phyaddr >= 0) && (phyaddr <= 31))
+		priv->plat->phy_addr = phyaddr;
+
+	/* MDIO bus Registration */
+	ret = stmmac_mdio_register(dev);
+	if (ret < 0) {
+		pr_debug("%s: MDIO bus (id: %d) registration failed",
+			 __func__, priv->plat->bus_id);
+		return ret;
+	}
+
 #ifdef CONFIG_STMMAC_TIMER
 	priv->tm = kzalloc(sizeof(struct stmmac_timer *), GFP_KERNEL);
 	if (unlikely(priv->tm == NULL)) {
@@ -1008,7 +1048,7 @@
 	}
 
 	/* Enable the MAC Rx/Tx */
-	stmmac_enable_mac(priv->ioaddr);
+	stmmac_set_mac(priv->ioaddr, true);
 
 	/* Set the HW DMA mode and the COE */
 	stmmac_dma_operation_mode(priv);
@@ -1019,6 +1059,11 @@
 
 	stmmac_mmc_setup(priv);
 
+#ifdef CONFIG_STMMAC_DEBUG_FS
+	ret = stmmac_init_fs(dev);
+	if (ret < 0)
+		pr_warning("\tFailed debugFS registration");
+#endif
 	/* Start the ball rolling... */
 	DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name);
 	priv->hw->dma->start_tx(priv->ioaddr);
@@ -1091,10 +1136,15 @@
 	free_dma_desc_resources(priv);
 
 	/* Disable the MAC Rx/Tx */
-	stmmac_disable_mac(priv->ioaddr);
+	stmmac_set_mac(priv->ioaddr, false);
 
 	netif_carrier_off(dev);
 
+#ifdef CONFIG_STMMAC_DEBUG_FS
+	stmmac_exit_fs();
+#endif
+	stmmac_mdio_unregister(dev);
+
 	return 0;
 }
 
@@ -1470,7 +1520,8 @@
 	return 0;
 }
 
-static u32 stmmac_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t stmmac_fix_features(struct net_device *dev,
+	netdev_features_t features)
 {
 	struct stmmac_priv *priv = netdev_priv(dev);
 
@@ -1738,28 +1789,41 @@
 };
 
 /**
- * stmmac_probe - Initialization of the adapter .
- * @dev : device pointer
- * Description: The function initializes the network device structure for
- * the STMMAC driver. It also calls the low level routines
- * in order to init the HW (i.e. the DMA engine)
+ * stmmac_dvr_probe
+ * @device: device pointer
+ * Description: this is the main probe function used to
+ * call the alloc_etherdev, allocate the priv structure.
  */
-static int stmmac_probe(struct net_device *dev)
+struct stmmac_priv *stmmac_dvr_probe(struct device *device,
+					struct plat_stmmacenet_data *plat_dat)
 {
 	int ret = 0;
-	struct stmmac_priv *priv = netdev_priv(dev);
+	struct net_device *ndev = NULL;
+	struct stmmac_priv *priv;
 
-	ether_setup(dev);
+	ndev = alloc_etherdev(sizeof(struct stmmac_priv));
+	if (!ndev) {
+		pr_err("%s: ERROR: allocating the device\n", __func__);
+		return NULL;
+	}
 
-	dev->netdev_ops = &stmmac_netdev_ops;
-	stmmac_set_ethtool_ops(dev);
+	SET_NETDEV_DEV(ndev, device);
 
-	dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
-	dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
-	dev->watchdog_timeo = msecs_to_jiffies(watchdog);
+	priv = netdev_priv(ndev);
+	priv->device = device;
+	priv->dev = ndev;
+
+	ether_setup(ndev);
+
+	ndev->netdev_ops = &stmmac_netdev_ops;
+	stmmac_set_ethtool_ops(ndev);
+
+	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
+	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
 #ifdef STMMAC_VLAN_TAG_USED
 	/* Both mac100 and gmac support receive VLAN tag detection */
-	dev->features |= NETIF_F_HW_VLAN_RX;
+	ndev->features |= NETIF_F_HW_VLAN_RX;
 #endif
 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
 
@@ -1767,248 +1831,60 @@
 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
 
 	priv->pause = pause;
-	netif_napi_add(dev, &priv->napi, stmmac_poll, 64);
-
-	/* Get the MAC address */
-	priv->hw->mac->get_umac_addr((void __iomem *) dev->base_addr,
-				     dev->dev_addr, 0);
-
-	if (!is_valid_ether_addr(dev->dev_addr))
-		pr_warning("\tno valid MAC address;"
-			"please, use ifconfig or nwhwconfig!\n");
+	priv->plat = plat_dat;
+	netif_napi_add(ndev, &priv->napi, stmmac_poll, 64);
 
 	spin_lock_init(&priv->lock);
 	spin_lock_init(&priv->tx_lock);
 
-	ret = register_netdev(dev);
+	ret = register_netdev(ndev);
 	if (ret) {
 		pr_err("%s: ERROR %i registering the device\n",
 		       __func__, ret);
-		return -ENODEV;
+		goto error;
 	}
 
 	DBG(probe, DEBUG, "%s: Scatter/Gather: %s - HW checksums: %s\n",
-	    dev->name, (dev->features & NETIF_F_SG) ? "on" : "off",
-	    (dev->features & NETIF_F_IP_CSUM) ? "on" : "off");
+	    ndev->name, (ndev->features & NETIF_F_SG) ? "on" : "off",
+	    (ndev->features & NETIF_F_IP_CSUM) ? "on" : "off");
 
-	return ret;
-}
+	return priv;
 
-/**
- * stmmac_mac_device_setup
- * @dev : device pointer
- * Description: select and initialise the mac device (mac100 or Gmac).
- */
-static int stmmac_mac_device_setup(struct net_device *dev)
-{
-	struct stmmac_priv *priv = netdev_priv(dev);
+error:
+	netif_napi_del(&priv->napi);
 
-	struct mac_device_info *device;
-
-	if (priv->plat->has_gmac) {
-		dev->priv_flags |= IFF_UNICAST_FLT;
-		device = dwmac1000_setup(priv->ioaddr);
-	} else {
-		device = dwmac100_setup(priv->ioaddr);
-	}
-
-	if (!device)
-		return -ENOMEM;
-
-	priv->hw = device;
-	priv->hw->ring = &ring_mode_ops;
-
-	if (device_can_wakeup(priv->device)) {
-		priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */
-		enable_irq_wake(priv->wol_irq);
-	}
-
-	return 0;
-}
-
-/**
- * stmmac_dvr_probe
- * @pdev: platform device pointer
- * Description: the driver is initialized through platform_device.
- */
-static int stmmac_dvr_probe(struct platform_device *pdev)
-{
-	int ret = 0;
-	struct resource *res;
-	void __iomem *addr = NULL;
-	struct net_device *ndev = NULL;
-	struct stmmac_priv *priv = NULL;
-	struct plat_stmmacenet_data *plat_dat;
-
-	pr_info("STMMAC driver:\n\tplatform registration... ");
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res)
-		return -ENODEV;
-	pr_info("\tdone!\n");
-
-	if (!request_mem_region(res->start, resource_size(res),
-				pdev->name)) {
-		pr_err("%s: ERROR: memory allocation failed"
-		       "cannot get the I/O addr 0x%x\n",
-		       __func__, (unsigned int)res->start);
-		return -EBUSY;
-	}
-
-	addr = ioremap(res->start, resource_size(res));
-	if (!addr) {
-		pr_err("%s: ERROR: memory mapping failed\n", __func__);
-		ret = -ENOMEM;
-		goto out_release_region;
-	}
-
-	ndev = alloc_etherdev(sizeof(struct stmmac_priv));
-	if (!ndev) {
-		pr_err("%s: ERROR: allocating the device\n", __func__);
-		ret = -ENOMEM;
-		goto out_unmap;
-	}
-
-	SET_NETDEV_DEV(ndev, &pdev->dev);
-
-	/* Get the MAC information */
-	ndev->irq = platform_get_irq_byname(pdev, "macirq");
-	if (ndev->irq == -ENXIO) {
-		pr_err("%s: ERROR: MAC IRQ configuration "
-		       "information not found\n", __func__);
-		ret = -ENXIO;
-		goto out_free_ndev;
-	}
-
-	priv = netdev_priv(ndev);
-	priv->device = &(pdev->dev);
-	priv->dev = ndev;
-	plat_dat = pdev->dev.platform_data;
-
-	priv->plat = plat_dat;
-
-	priv->ioaddr = addr;
-
-	/*
-	 * On some platforms e.g. SPEAr the wake up irq differs from the mac irq
-	 * The external wake up irq can be passed through the platform code
-	 * named as "eth_wake_irq"
-	 *
-	 * In case the wake up interrupt is not passed from the platform
-	 * so the driver will continue to use the mac irq (ndev->irq)
-	 */
-	priv->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
-	if (priv->wol_irq == -ENXIO)
-		priv->wol_irq = ndev->irq;
-
-	platform_set_drvdata(pdev, ndev);
-
-	/* Set the I/O base addr */
-	ndev->base_addr = (unsigned long)addr;
-
-	/* Custom initialisation */
-	if (priv->plat->init) {
-		ret = priv->plat->init(pdev);
-		if (unlikely(ret))
-			goto out_free_ndev;
-	}
-
-	/* MAC HW device detection */
-	ret = stmmac_mac_device_setup(ndev);
-	if (ret < 0)
-		goto out_plat_exit;
-
-	/* Network Device Registration */
-	ret = stmmac_probe(ndev);
-	if (ret < 0)
-		goto out_plat_exit;
-
-	/* Override with kernel parameters if supplied XXX CRS XXX
-	 * this needs to have multiple instances */
-	if ((phyaddr >= 0) && (phyaddr <= 31))
-		priv->plat->phy_addr = phyaddr;
-
-	pr_info("\t%s - (dev. name: %s - id: %d, IRQ #%d\n"
-	       "\tIO base addr: 0x%p)\n", ndev->name, pdev->name,
-	       pdev->id, ndev->irq, addr);
-
-	/* MDIO bus Registration */
-	pr_debug("\tMDIO bus (id: %d)...", priv->plat->bus_id);
-	ret = stmmac_mdio_register(ndev);
-	if (ret < 0)
-		goto out_unregister;
-	pr_debug("registered!\n");
-
-#ifdef CONFIG_STMMAC_DEBUG_FS
-	ret = stmmac_init_fs(ndev);
-	if (ret < 0)
-		pr_warning("\tFailed debugFS registration");
-#endif
-
-	return 0;
-
-out_unregister:
 	unregister_netdev(ndev);
-out_plat_exit:
-	if (priv->plat->exit)
-		priv->plat->exit(pdev);
-out_free_ndev:
 	free_netdev(ndev);
-	platform_set_drvdata(pdev, NULL);
-out_unmap:
-	iounmap(addr);
-out_release_region:
-	release_mem_region(res->start, resource_size(res));
 
-	return ret;
+	return NULL;
 }
 
 /**
  * stmmac_dvr_remove
- * @pdev: platform device pointer
+ * @ndev: net device pointer
  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
- * changes the link status, releases the DMA descriptor rings,
- * unregisters the MDIO bus and unmaps the allocated memory.
+ * changes the link status, releases the DMA descriptor rings.
  */
-static int stmmac_dvr_remove(struct platform_device *pdev)
+int stmmac_dvr_remove(struct net_device *ndev)
 {
-	struct net_device *ndev = platform_get_drvdata(pdev);
 	struct stmmac_priv *priv = netdev_priv(ndev);
-	struct resource *res;
 
 	pr_info("%s:\n\tremoving driver", __func__);
 
 	priv->hw->dma->stop_rx(priv->ioaddr);
 	priv->hw->dma->stop_tx(priv->ioaddr);
 
-	stmmac_disable_mac(priv->ioaddr);
-
+	stmmac_set_mac(priv->ioaddr, false);
 	netif_carrier_off(ndev);
-
-	stmmac_mdio_unregister(ndev);
-
-	if (priv->plat->exit)
-		priv->plat->exit(pdev);
-
-	platform_set_drvdata(pdev, NULL);
 	unregister_netdev(ndev);
-
-	iounmap((void *)priv->ioaddr);
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	release_mem_region(res->start, resource_size(res));
-
-#ifdef CONFIG_STMMAC_DEBUG_FS
-	stmmac_exit_fs();
-#endif
-
 	free_netdev(ndev);
 
 	return 0;
 }
 
 #ifdef CONFIG_PM
-static int stmmac_suspend(struct device *dev)
+int stmmac_suspend(struct net_device *ndev)
 {
-	struct net_device *ndev = dev_get_drvdata(dev);
 	struct stmmac_priv *priv = netdev_priv(ndev);
 	int dis_ic = 0;
 
@@ -2042,15 +1918,14 @@
 	if (device_may_wakeup(priv->device))
 		priv->hw->mac->pmt(priv->ioaddr, priv->wolopts);
 	else
-		stmmac_disable_mac(priv->ioaddr);
+		stmmac_set_mac(priv->ioaddr, false);
 
 	spin_unlock(&priv->lock);
 	return 0;
 }
 
-static int stmmac_resume(struct device *dev)
+int stmmac_resume(struct net_device *ndev)
 {
-	struct net_device *ndev = dev_get_drvdata(dev);
 	struct stmmac_priv *priv = netdev_priv(ndev);
 
 	if (!netif_running(ndev))
@@ -2069,7 +1944,7 @@
 	netif_device_attach(ndev);
 
 	/* Enable the MAC and DMA */
-	stmmac_enable_mac(priv->ioaddr);
+	stmmac_set_mac(priv->ioaddr, true);
 	priv->hw->dma->start_tx(priv->ioaddr);
 	priv->hw->dma->start_rx(priv->ioaddr);
 
@@ -2089,68 +1964,23 @@
 	return 0;
 }
 
-static int stmmac_freeze(struct device *dev)
+int stmmac_freeze(struct net_device *ndev)
 {
-	struct net_device *ndev = dev_get_drvdata(dev);
-
 	if (!ndev || !netif_running(ndev))
 		return 0;
 
 	return stmmac_release(ndev);
 }
 
-static int stmmac_restore(struct device *dev)
+int stmmac_restore(struct net_device *ndev)
 {
-	struct net_device *ndev = dev_get_drvdata(dev);
-
 	if (!ndev || !netif_running(ndev))
 		return 0;
 
 	return stmmac_open(ndev);
 }
-
-static const struct dev_pm_ops stmmac_pm_ops = {
-	.suspend = stmmac_suspend,
-	.resume = stmmac_resume,
-	.freeze = stmmac_freeze,
-	.thaw = stmmac_restore,
-	.restore = stmmac_restore,
-};
-#else
-static const struct dev_pm_ops stmmac_pm_ops;
 #endif /* CONFIG_PM */
 
-static struct platform_driver stmmac_driver = {
-	.probe = stmmac_dvr_probe,
-	.remove = stmmac_dvr_remove,
-	.driver = {
-		.name = STMMAC_RESOURCE_NAME,
-		.owner = THIS_MODULE,
-		.pm = &stmmac_pm_ops,
-	},
-};
-
-/**
- * stmmac_init_module - Entry point for the driver
- * Description: This function is the entry point for the driver.
- */
-static int __init stmmac_init_module(void)
-{
-	int ret;
-
-	ret = platform_driver_register(&stmmac_driver);
-	return ret;
-}
-
-/**
- * stmmac_cleanup_module - Cleanup routine for the driver
- * Description: This function is the cleanup routine for the driver.
- */
-static void __exit stmmac_cleanup_module(void)
-{
-	platform_driver_unregister(&stmmac_driver);
-}
-
 #ifndef MODULE
 static int __init stmmac_cmdline_opt(char *str)
 {
@@ -2210,9 +2040,6 @@
 __setup("stmmaceth=", stmmac_cmdline_opt);
 #endif
 
-module_init(stmmac_init_module);
-module_exit(stmmac_cleanup_module);
-
-MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet driver");
+MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
 MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index 9c3b9d5..51f4412 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -109,6 +109,7 @@
  */
 static int stmmac_mdio_reset(struct mii_bus *bus)
 {
+#if defined(CONFIG_STMMAC_PLATFORM)
 	struct net_device *ndev = bus->priv;
 	struct stmmac_priv *priv = netdev_priv(ndev);
 	unsigned int mii_address = priv->hw->mii.addr;
@@ -123,7 +124,7 @@
 	 * on MDC, so perform a dummy mdio read.
 	 */
 	writel(0, priv->ioaddr + mii_address);
-
+#endif
 	return 0;
 }
 
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
new file mode 100644
index 0000000..54a819a
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -0,0 +1,221 @@
+/*******************************************************************************
+  This contains the functions to handle the pci driver.
+
+  Copyright (C) 2011-2012  Vayavya Labs Pvt Ltd
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Author: Rayagond Kokatanur <rayagond@vayavyalabs.com>
+  Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/pci.h>
+#include "stmmac.h"
+
+struct plat_stmmacenet_data plat_dat;
+struct stmmac_mdio_bus_data mdio_data;
+
+static void stmmac_default_data(void)
+{
+	memset(&plat_dat, 0, sizeof(struct plat_stmmacenet_data));
+	plat_dat.bus_id = 1;
+	plat_dat.phy_addr = 0;
+	plat_dat.interface = PHY_INTERFACE_MODE_GMII;
+	plat_dat.pbl = 32;
+	plat_dat.clk_csr = 2;	/* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
+	plat_dat.has_gmac = 1;
+	plat_dat.force_sf_dma_mode = 1;
+
+	mdio_data.bus_id = 1;
+	mdio_data.phy_reset = NULL;
+	mdio_data.phy_mask = 0;
+	plat_dat.mdio_bus_data = &mdio_data;
+}
+
+/**
+ * stmmac_pci_probe
+ *
+ * @pdev: pci device pointer
+ * @id: pointer to table of device id/id's.
+ *
+ * Description: This probing function gets called for all PCI devices which
+ * match the ID table and are not "owned" by other driver yet. This function
+ * gets passed a "struct pci_dev *" for each device whose entry in the ID table
+ * matches the device. The probe functions returns zero when the driver choose
+ * to take "ownership" of the device or an error code(-ve no) otherwise.
+ */
+static int __devinit stmmac_pci_probe(struct pci_dev *pdev,
+				      const struct pci_device_id *id)
+{
+	int ret = 0;
+	void __iomem *addr = NULL;
+	struct stmmac_priv *priv = NULL;
+	int i;
+
+	/* Enable pci device */
+	ret = pci_enable_device(pdev);
+	if (ret) {
+		pr_err("%s : ERROR: failed to enable %s device\n", __func__,
+		       pci_name(pdev));
+		return ret;
+	}
+	if (pci_request_regions(pdev, STMMAC_RESOURCE_NAME)) {
+		pr_err("%s: ERROR: failed to get PCI region\n", __func__);
+		ret = -ENODEV;
+		goto err_out_req_reg_failed;
+	}
+
+	/* Get the base address of device */
+	for (i = 0; i <= 5; i++) {
+		if (pci_resource_len(pdev, i) == 0)
+			continue;
+		addr = pci_iomap(pdev, i, 0);
+		if (addr == NULL) {
+			pr_err("%s: ERROR: cannot map regiser memory, aborting",
+			       __func__);
+			ret = -EIO;
+			goto err_out_map_failed;
+		}
+		break;
+	}
+	pci_set_master(pdev);
+
+	stmmac_default_data();
+
+	priv = stmmac_dvr_probe(&(pdev->dev), &plat_dat);
+	if (!priv) {
+		pr_err("%s: main drivr probe failed", __func__);
+		goto err_out;
+	}
+	priv->ioaddr = addr;
+	priv->dev->base_addr = (unsigned long)addr;
+	priv->dev->irq = pdev->irq;
+	priv->wol_irq = pdev->irq;
+
+	pci_set_drvdata(pdev, priv->dev);
+
+	pr_debug("STMMAC platform driver registration completed");
+
+	return 0;
+
+err_out:
+	pci_clear_master(pdev);
+err_out_map_failed:
+	pci_release_regions(pdev);
+err_out_req_reg_failed:
+	pci_disable_device(pdev);
+
+	return ret;
+}
+
+/**
+ * stmmac_dvr_remove
+ *
+ * @pdev: platform device pointer
+ * Description: this function calls the main to free the net resources
+ * and releases the PCI resources.
+ */
+static void __devexit stmmac_pci_remove(struct pci_dev *pdev)
+{
+	struct net_device *ndev = pci_get_drvdata(pdev);
+	struct stmmac_priv *priv = netdev_priv(ndev);
+
+	stmmac_dvr_remove(ndev);
+
+	pci_set_drvdata(pdev, NULL);
+	pci_iounmap(pdev, priv->ioaddr);
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+}
+
+#ifdef CONFIG_PM
+static int stmmac_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	struct net_device *ndev = pci_get_drvdata(pdev);
+	int ret;
+
+	ret = stmmac_suspend(ndev);
+	pci_save_state(pdev);
+	pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+	return ret;
+}
+
+static int stmmac_pci_resume(struct pci_dev *pdev)
+{
+	struct net_device *ndev = pci_get_drvdata(pdev);
+
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+
+	return stmmac_resume(ndev);
+}
+#endif
+
+#define STMMAC_VENDOR_ID 0x700
+#define STMMAC_DEVICE_ID 0x1108
+
+static DEFINE_PCI_DEVICE_TABLE(stmmac_id_table) = {
+	{
+	PCI_DEVICE(STMMAC_VENDOR_ID, STMMAC_DEVICE_ID)}, {
+	}
+};
+
+MODULE_DEVICE_TABLE(pci, stmmac_id_table);
+
+static struct pci_driver stmmac_driver = {
+	.name = STMMAC_RESOURCE_NAME,
+	.id_table = stmmac_id_table,
+	.probe = stmmac_pci_probe,
+	.remove = __devexit_p(stmmac_pci_remove),
+#ifdef CONFIG_PM
+	.suspend = stmmac_pci_suspend,
+	.resume = stmmac_pci_resume,
+#endif
+};
+
+/**
+ * stmmac_init_module - Entry point for the driver
+ * Description: This function is the entry point for the driver.
+ */
+static int __init stmmac_init_module(void)
+{
+	int ret;
+
+	ret = pci_register_driver(&stmmac_driver);
+	if (ret < 0)
+		pr_err("%s: ERROR: driver registration failed\n", __func__);
+
+	return ret;
+}
+
+/**
+ * stmmac_cleanup_module - Cleanup routine for the driver
+ * Description: This function is the cleanup routine for the driver.
+ */
+static void __exit stmmac_cleanup_module(void)
+{
+	pci_unregister_driver(&stmmac_driver);
+}
+
+module_init(stmmac_init_module);
+module_exit(stmmac_cleanup_module);
+
+MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PCI driver");
+MODULE_AUTHOR("Rayagond Kokatanur <rayagond.kokatanur@vayavyalabs.com>");
+MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
new file mode 100644
index 0000000..7b1594f
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -0,0 +1,198 @@
+/*******************************************************************************
+  This contains the functions to handle the platform driver.
+
+  Copyright (C) 2007-2011  STMicroelectronics Ltd
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include "stmmac.h"
+
+/**
+ * stmmac_pltfr_probe
+ * @pdev: platform device pointer
+ * Description: platform_device probe function. It allocates
+ * the necessary resources and invokes the main to init
+ * the net device, register the mdio bus etc.
+ */
+static int stmmac_pltfr_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+	struct resource *res;
+	void __iomem *addr = NULL;
+	struct stmmac_priv *priv = NULL;
+	struct plat_stmmacenet_data *plat_dat;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res)
+		return -ENODEV;
+
+	if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
+		pr_err("%s: ERROR: memory allocation failed"
+		       "cannot get the I/O addr 0x%x\n",
+		       __func__, (unsigned int)res->start);
+		return -EBUSY;
+	}
+
+	addr = ioremap(res->start, resource_size(res));
+	if (!addr) {
+		pr_err("%s: ERROR: memory mapping failed", __func__);
+		ret = -ENOMEM;
+		goto out_release_region;
+	}
+	plat_dat = pdev->dev.platform_data;
+	priv = stmmac_dvr_probe(&(pdev->dev), plat_dat);
+	if (!priv) {
+		pr_err("%s: main drivr probe failed", __func__);
+		goto out_release_region;
+	}
+
+	priv->ioaddr = addr;
+	/* Set the I/O base addr */
+	priv->dev->base_addr = (unsigned long)addr;
+
+	/* Get the MAC information */
+	priv->dev->irq = platform_get_irq_byname(pdev, "macirq");
+	if (priv->dev->irq == -ENXIO) {
+		pr_err("%s: ERROR: MAC IRQ configuration "
+		       "information not found\n", __func__);
+		ret = -ENXIO;
+		goto out_unmap;
+	}
+
+	/*
+	 * On some platforms e.g. SPEAr the wake up irq differs from the mac irq
+	 * The external wake up irq can be passed through the platform code
+	 * named as "eth_wake_irq"
+	 *
+	 * In case the wake up interrupt is not passed from the platform
+	 * so the driver will continue to use the mac irq (ndev->irq)
+	 */
+	priv->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
+	if (priv->wol_irq == -ENXIO)
+		priv->wol_irq = priv->dev->irq;
+
+	platform_set_drvdata(pdev, priv->dev);
+
+	/* Custom initialisation */
+	if (priv->plat->init) {
+		ret = priv->plat->init(pdev);
+		if (unlikely(ret))
+			goto out_unmap;
+	}
+
+	pr_debug("STMMAC platform driver registration completed");
+
+	return 0;
+
+out_unmap:
+	iounmap(addr);
+	platform_set_drvdata(pdev, NULL);
+
+out_release_region:
+	release_mem_region(res->start, resource_size(res));
+
+	return ret;
+}
+
+/**
+ * stmmac_pltfr_remove
+ * @pdev: platform device pointer
+ * Description: this function calls the main to free the net resources
+ * and calls the platforms hook and release the resources (e.g. mem).
+ */
+static int stmmac_pltfr_remove(struct platform_device *pdev)
+{
+	struct net_device *ndev = platform_get_drvdata(pdev);
+	struct stmmac_priv *priv = netdev_priv(ndev);
+	struct resource *res;
+	int ret = stmmac_dvr_remove(ndev);
+
+	if (priv->plat->exit)
+		priv->plat->exit(pdev);
+
+	if (priv->plat->exit)
+		priv->plat->exit(pdev);
+
+	platform_set_drvdata(pdev, NULL);
+
+	iounmap((void *)priv->ioaddr);
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	release_mem_region(res->start, resource_size(res));
+
+	return ret;
+}
+
+#ifdef CONFIG_PM
+static int stmmac_pltfr_suspend(struct device *dev)
+{
+	struct net_device *ndev = dev_get_drvdata(dev);
+
+	return stmmac_suspend(ndev);
+}
+
+static int stmmac_pltfr_resume(struct device *dev)
+{
+	struct net_device *ndev = dev_get_drvdata(dev);
+
+	return stmmac_resume(ndev);
+}
+
+int stmmac_pltfr_freeze(struct device *dev)
+{
+	struct net_device *ndev = dev_get_drvdata(dev);
+
+	return stmmac_freeze(ndev);
+}
+
+int stmmac_pltfr_restore(struct device *dev)
+{
+	struct net_device *ndev = dev_get_drvdata(dev);
+
+	return stmmac_restore(ndev);
+}
+
+static const struct dev_pm_ops stmmac_pltfr_pm_ops = {
+	.suspend = stmmac_pltfr_suspend,
+	.resume = stmmac_pltfr_resume,
+	.freeze = stmmac_pltfr_freeze,
+	.thaw = stmmac_pltfr_restore,
+	.restore = stmmac_pltfr_restore,
+};
+#else
+static const struct dev_pm_ops stmmac_pltfr_pm_ops;
+#endif /* CONFIG_PM */
+
+static struct platform_driver stmmac_driver = {
+	.probe = stmmac_pltfr_probe,
+	.remove = stmmac_pltfr_remove,
+	.driver = {
+		   .name = STMMAC_RESOURCE_NAME,
+		   .owner = THIS_MODULE,
+		   .pm = &stmmac_pltfr_pm_ops,
+		   },
+};
+
+module_platform_driver(stmmac_driver);
+
+MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PLATFORM driver");
+MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index fd40988..f10665f 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -4532,10 +4532,9 @@
 static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 {
 	struct cas *cp = netdev_priv(dev);
-	strncpy(info->driver, DRV_MODULE_NAME, ETHTOOL_BUSINFO_LEN);
-	strncpy(info->version, DRV_MODULE_VERSION, ETHTOOL_BUSINFO_LEN);
-	info->fw_version[0] = '\0';
-	strncpy(info->bus_info, pci_name(cp->pdev), ETHTOOL_BUSINFO_LEN);
+	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+	strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
 	info->regdump_len = cp->casreg_len < CAS_MAX_REGS ?
 		cp->casreg_len : CAS_MAX_REGS;
 	info->n_stats = CAS_NUM_STAT_KEYS;
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 73c7081..cf43393 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -1151,19 +1151,8 @@
 		supported |= SUPPORTED_1000baseT_Full;
 	lp->supported = supported;
 
-	advertising = 0;
-	if (advert & ADVERTISE_10HALF)
-		advertising |= ADVERTISED_10baseT_Half;
-	if (advert & ADVERTISE_10FULL)
-		advertising |= ADVERTISED_10baseT_Full;
-	if (advert & ADVERTISE_100HALF)
-		advertising |= ADVERTISED_100baseT_Half;
-	if (advert & ADVERTISE_100FULL)
-		advertising |= ADVERTISED_100baseT_Full;
-	if (ctrl1000 & ADVERTISE_1000HALF)
-		advertising |= ADVERTISED_1000baseT_Half;
-	if (ctrl1000 & ADVERTISE_1000FULL)
-		advertising |= ADVERTISED_1000baseT_Full;
+	advertising = mii_adv_to_ethtool_adv_t(advert);
+	advertising |= mii_ctrl1000_to_ethtool_adv_t(ctrl1000);
 
 	if (bmcr & BMCR_ANENABLE) {
 		int neg, neg1000;
@@ -3609,6 +3598,7 @@
 static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
 {
 	struct netdev_queue *txq;
+	unsigned int tx_bytes;
 	u16 pkt_cnt, tmp;
 	int cons, index;
 	u64 cs;
@@ -3631,12 +3621,18 @@
 	netif_printk(np, tx_done, KERN_DEBUG, np->dev,
 		     "%s() pkt_cnt[%u] cons[%d]\n", __func__, pkt_cnt, cons);
 
-	while (pkt_cnt--)
+	tx_bytes = 0;
+	tmp = pkt_cnt;
+	while (tmp--) {
+		tx_bytes += rp->tx_buffs[cons].skb->len;
 		cons = release_tx_packet(np, rp, cons);
+	}
 
 	rp->cons = cons;
 	smp_mb();
 
+	netdev_tx_completed_queue(txq, pkt_cnt, tx_bytes);
+
 out:
 	if (unlikely(netif_tx_queue_stopped(txq) &&
 		     (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) {
@@ -4337,6 +4333,7 @@
 			struct tx_ring_info *rp = &np->tx_rings[i];
 
 			niu_free_tx_ring_info(np, rp);
+			netdev_tx_reset_queue(netdev_get_tx_queue(np->dev, i));
 		}
 		kfree(np->tx_rings);
 		np->tx_rings = NULL;
@@ -6742,6 +6739,8 @@
 		prod = NEXT_TX(rp, prod);
 	}
 
+	netdev_tx_sent_queue(txq, skb->len);
+
 	if (prod < rp->prod)
 		rp->wrap_bit ^= TX_RING_KICK_WRAP;
 	rp->prod = prod;
@@ -6823,12 +6822,13 @@
 	struct niu *np = netdev_priv(dev);
 	struct niu_vpd *vpd = &np->vpd;
 
-	strcpy(info->driver, DRV_MODULE_NAME);
-	strcpy(info->version, DRV_MODULE_VERSION);
-	sprintf(info->fw_version, "%d.%d",
+	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+	snprintf(info->fw_version, sizeof(info->fw_version), "%d.%d",
 		vpd->fcode_major, vpd->fcode_minor);
 	if (np->parent->plat_type != PLAT_TYPE_NIU)
-		strcpy(info->bus_info, pci_name(np->pdev));
+		strlcpy(info->bus_info, pci_name(np->pdev),
+			sizeof(info->bus_info));
 }
 
 static int niu_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
@@ -8589,9 +8589,11 @@
 	if (dev_id_1 < 0 || dev_id_2 < 0)
 		return 0;
 	if (type == PHY_TYPE_PMA_PMD || type == PHY_TYPE_PCS) {
+		/* Because of the NIU_PHY_ID_MASK being applied, the 8704
+		 * test covers the 8706 as well.
+		 */
 		if (((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8704) &&
-		    ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_MRVL88X2011) &&
-		    ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8706))
+		    ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_MRVL88X2011))
 			return 0;
 	} else {
 		if ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM5464R)
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index 0d8cfd9..220f724 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -1293,15 +1293,4 @@
 	.remove		= __devexit_p(bigmac_sbus_remove),
 };
 
-static int __init bigmac_init(void)
-{
-	return platform_driver_register(&bigmac_sbus_driver);
-}
-
-static void __exit bigmac_exit(void)
-{
-	platform_driver_unregister(&bigmac_sbus_driver);
-}
-
-module_init(bigmac_init);
-module_exit(bigmac_exit);
+module_platform_driver(bigmac_sbus_driver);
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index ceab215..31441a8 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -2517,9 +2517,9 @@
 {
 	struct gem *gp = netdev_priv(dev);
 
-	strcpy(info->driver, DRV_NAME);
-	strcpy(info->version, DRV_VERSION);
-	strcpy(info->bus_info, pci_name(gp->pdev));
+	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+	strlcpy(info->bus_info, pci_name(gp->pdev), sizeof(info->bus_info));
 }
 
 static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index cf14ab9..09c5186 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -2457,11 +2457,11 @@
 {
 	struct happy_meal *hp = netdev_priv(dev);
 
-	strcpy(info->driver, "sunhme");
-	strcpy(info->version, "2.02");
+	strlcpy(info->driver, "sunhme", sizeof(info->driver));
+	strlcpy(info->version, "2.02", sizeof(info->version));
 	if (hp->happy_flags & HFLAG_PCI) {
 		struct pci_dev *pdev = hp->happy_dev;
-		strcpy(info->bus_info, pci_name(pdev));
+		strlcpy(info->bus_info, pci_name(pdev), sizeof(info->bus_info));
 	}
 #ifdef CONFIG_SBUS
 	else {
@@ -2469,7 +2469,8 @@
 		struct platform_device *op = hp->happy_dev;
 		regs = of_get_property(op->dev.of_node, "regs", NULL);
 		if (regs)
-			sprintf(info->bus_info, "SBUS:%d",
+			snprintf(info->bus_info, sizeof(info->bus_info),
+				"SBUS:%d",
 				regs->which_io);
 	}
 #endif
@@ -2849,7 +2850,7 @@
 static int is_quattro_p(struct pci_dev *pdev)
 {
 	struct pci_dev *busdev = pdev->bus->self;
-	struct list_head *tmp;
+	struct pci_dev *this_pdev;
 	int n_hmes;
 
 	if (busdev == NULL ||
@@ -2858,15 +2859,10 @@
 		return 0;
 
 	n_hmes = 0;
-	tmp = pdev->bus->devices.next;
-	while (tmp != &pdev->bus->devices) {
-		struct pci_dev *this_pdev = pci_dev_b(tmp);
-
+	list_for_each_entry(this_pdev, &pdev->bus->devices, bus_list) {
 		if (this_pdev->vendor == PCI_VENDOR_ID_SUN &&
 		    this_pdev->device == PCI_DEVICE_ID_SUN_HAPPYMEAL)
 			n_hmes++;
-
-		tmp = tmp->next;
 	}
 
 	if (n_hmes != 4)
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 3a90af6..4b19e9b 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -727,9 +727,10 @@
  * @ndev network device
  * @vid  VLAN vid to add
  */
-static void bdx_vlan_rx_add_vid(struct net_device *ndev, uint16_t vid)
+static int bdx_vlan_rx_add_vid(struct net_device *ndev, uint16_t vid)
 {
 	__bdx_vlan_rx_vid(ndev, vid, 1);
+	return 0;
 }
 
 /*
@@ -737,9 +738,10 @@
  * @ndev network device
  * @vid  VLAN vid to kill
  */
-static void bdx_vlan_rx_kill_vid(struct net_device *ndev, unsigned short vid)
+static int bdx_vlan_rx_kill_vid(struct net_device *ndev, unsigned short vid)
 {
 	__bdx_vlan_rx_vid(ndev, vid, 0);
+	return 0;
 }
 
 /**
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index dca9d33..c97d2f5 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -836,11 +836,13 @@
 	chan_write(chan, cp, CPDMA_TEARDOWN_VALUE);
 
 	/* handle completed packets */
+	spin_unlock_irqrestore(&chan->lock, flags);
 	do {
 		ret = __cpdma_chan_process(chan);
 		if (ret < 0)
 			break;
 	} while ((ret & CPDMA_DESC_TD_COMPLETE) == 0);
+	spin_lock_irqsave(&chan->lock, flags);
 
 	/* remaining packets haven't been tx/rx'ed, clean them up */
 	while (chan->head) {
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 815c797..794ac30 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -115,6 +115,7 @@
 #define EMAC_DEF_TX_CH			(0) /* Default 0th channel */
 #define EMAC_DEF_RX_CH			(0) /* Default 0th channel */
 #define EMAC_DEF_RX_NUM_DESC		(128)
+#define EMAC_DEF_TX_NUM_DESC		(128)
 #define EMAC_DEF_MAX_TX_CH		(1) /* Max TX channels configured */
 #define EMAC_DEF_MAX_RX_CH		(1) /* Max RX channels configured */
 #define EMAC_POLL_WEIGHT		(64) /* Default NAPI poll weight */
@@ -336,6 +337,7 @@
 	u32 mac_hash2;
 	u32 multicast_hash_cnt[EMAC_NUM_MULTICAST_BITS];
 	u32 rx_addr_type;
+	atomic_t cur_tx;
 	const char *phy_id;
 	struct phy_device *phydev;
 	spinlock_t lock;
@@ -1044,6 +1046,9 @@
 {
 	struct sk_buff		*skb = token;
 	struct net_device	*ndev = skb->dev;
+	struct emac_priv	*priv = netdev_priv(ndev);
+
+	atomic_dec(&priv->cur_tx);
 
 	if (unlikely(netif_queue_stopped(ndev)))
 		netif_start_queue(ndev);
@@ -1092,6 +1097,9 @@
 		goto fail_tx;
 	}
 
+	if (atomic_inc_return(&priv->cur_tx) >= EMAC_DEF_TX_NUM_DESC)
+		netif_stop_queue(ndev);
+
 	return NETDEV_TX_OK;
 
 fail_tx:
diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c
index 1187a11..6b75063 100644
--- a/drivers/net/ethernet/tile/tilepro.c
+++ b/drivers/net/ethernet/tile/tilepro.c
@@ -1256,7 +1256,7 @@
 			  sizeof(dummy), NETIO_IPP_STOP_SHIM_OFF) < 0)
 		panic("Failed to stop LIPP/LEPP!\n");
 
-	priv->partly_opened = 0;
+	priv->partly_opened = false;
 }
 
 
@@ -1507,7 +1507,7 @@
 		       priv->network_cpus_count, priv->network_cpus_credits);
 #endif
 
-		priv->partly_opened = 1;
+		priv->partly_opened = true;
 
 	} else {
 		/* FIXME: Is this possible? */
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index 7bf1e20..5ee82a7 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -640,7 +640,7 @@
 	int status;
 
 	/* this hvc blocks until the DMA in progress really stopped */
-	status = lv1_net_stop_rx_dma(bus_id(card), dev_id(card), 0);
+	status = lv1_net_stop_rx_dma(bus_id(card), dev_id(card));
 	if (status)
 		dev_err(ctodev(card),
 			"lv1_net_stop_rx_dma failed, %d\n", status);
@@ -658,7 +658,7 @@
 	int status;
 
 	/* this hvc blocks until the DMA in progress really stopped */
-	status = lv1_net_stop_tx_dma(bus_id(card), dev_id(card), 0);
+	status = lv1_net_stop_tx_dma(bus_id(card), dev_id(card));
 	if (status)
 		dev_err(ctodev(card),
 			"lv1_net_stop_tx_dma failed, status=%d\n", status);
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index a8df7ec..a9ce01ba 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -1688,18 +1688,6 @@
 	mod_timer(&data->timer, jiffies + CHECK_PHY_INTERVAL);
 }
 
-static int tsi108_ether_init(void)
-{
-	int ret;
-	ret = platform_driver_register (&tsi_eth_driver);
-	if (ret < 0){
-		printk("tsi108_ether_init: error initializing ethernet "
-		       "device\n");
-		return ret;
-	}
-	return 0;
-}
-
 static int tsi108_ether_remove(struct platform_device *pdev)
 {
 	struct net_device *dev = platform_get_drvdata(pdev);
@@ -1714,13 +1702,7 @@
 
 	return 0;
 }
-static void tsi108_ether_exit(void)
-{
-	platform_driver_unregister(&tsi_eth_driver);
-}
-
-module_init(tsi108_ether_init);
-module_exit(tsi108_ether_exit);
+module_platform_driver(tsi_eth_driver);
 
 MODULE_AUTHOR("Tundra Semiconductor Corporation");
 MODULE_DESCRIPTION("Tsi108 Gigabit Ethernet driver");
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index f34dd99..5c4983b 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -35,6 +35,7 @@
 #define DRV_VERSION	"1.5.0"
 #define DRV_RELDATE	"2010-10-09"
 
+#include <linux/types.h>
 
 /* A few user-configurable values.
    These may be modified when a driver module is loaded. */
@@ -55,7 +56,7 @@
 
 /* Work-around for broken BIOSes: they are unable to get the chip back out of
    power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */
-static int avoid_D3;
+static bool avoid_D3;
 
 /*
  * In case you are looking for 'options[]' or 'full_duplex[]', they
@@ -488,8 +489,8 @@
 static const struct ethtool_ops netdev_ethtool_ops;
 static int  rhine_close(struct net_device *dev);
 static void rhine_shutdown (struct pci_dev *pdev);
-static void rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid);
-static void rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid);
+static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid);
+static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid);
 static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr);
 static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr);
 static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask);
@@ -1261,7 +1262,7 @@
 	rhine_set_vlan_cam_mask(ioaddr, vCAMmask);
 }
 
-static void rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
 {
 	struct rhine_private *rp = netdev_priv(dev);
 
@@ -1269,9 +1270,10 @@
 	set_bit(vid, rp->active_vlans);
 	rhine_update_vcam(dev);
 	spin_unlock_irq(&rp->lock);
+	return 0;
 }
 
-static void rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
 {
 	struct rhine_private *rp = netdev_priv(dev);
 
@@ -1279,6 +1281,7 @@
 	clear_bit(vid, rp->active_vlans);
 	rhine_update_vcam(dev);
 	spin_unlock_irq(&rp->lock);
+	return 0;
 }
 
 static void init_registers(struct net_device *dev)
@@ -2009,9 +2012,9 @@
 {
 	struct rhine_private *rp = netdev_priv(dev);
 
-	strcpy(info->driver, DRV_NAME);
-	strcpy(info->version, DRV_VERSION);
-	strcpy(info->bus_info, pci_name(rp->pdev));
+	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+	strlcpy(info->bus_info, pci_name(rp->pdev), sizeof(info->bus_info));
 }
 
 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
@@ -2320,7 +2323,7 @@
 #endif
 	if (dmi_check_system(rhine_dmi_table)) {
 		/* these BIOSes fail at PXE boot if chip is in D3 */
-		avoid_D3 = 1;
+		avoid_D3 = true;
 		pr_warn("Broken BIOS detected, avoid_D3 enabled\n");
 	}
 	else if (avoid_D3)
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index 4535d7c..4128d6b 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -522,7 +522,7 @@
 	mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
 }
 
-static void velocity_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+static int velocity_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
 {
 	struct velocity_info *vptr = netdev_priv(dev);
 
@@ -530,9 +530,10 @@
 	set_bit(vid, vptr->active_vlans);
 	velocity_init_cam_filter(vptr);
 	spin_unlock_irq(&vptr->lock);
+	return 0;
 }
 
-static void velocity_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+static int velocity_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
 {
 	struct velocity_info *vptr = netdev_priv(dev);
 
@@ -540,6 +541,7 @@
 	clear_bit(vid, vptr->active_vlans);
 	velocity_init_cam_filter(vptr);
 	spin_unlock_irq(&vptr->lock);
+	return 0;
 }
 
 static void velocity_init_rx_ring_indexes(struct velocity_info *vptr)
@@ -3270,9 +3272,9 @@
 static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 {
 	struct velocity_info *vptr = netdev_priv(dev);
-	strcpy(info->driver, VELOCITY_NAME);
-	strcpy(info->version, VELOCITY_VERSION);
-	strcpy(info->bus_info, pci_name(vptr->pdev));
+	strlcpy(info->driver, VELOCITY_NAME, sizeof(info->driver));
+	strlcpy(info->version, VELOCITY_VERSION, sizeof(info->version));
+	strlcpy(info->bus_info, pci_name(vptr->pdev), sizeof(info->bus_info));
 }
 
 static void velocity_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 2681b53..f21addb 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -237,7 +237,7 @@
 	struct sk_buff *skb;
 	int i;
 
-	lp->rx_skb = kzalloc(sizeof(*lp->rx_skb) * RX_BD_NUM, GFP_KERNEL);
+	lp->rx_skb = kcalloc(RX_BD_NUM, sizeof(*lp->rx_skb), GFP_KERNEL);
 	if (!lp->rx_skb) {
 		dev_err(&ndev->dev,
 				"can't allocate memory for DMA RX buffer\n");
@@ -920,12 +920,26 @@
 }
 #endif
 
+static int temac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
+{
+	struct temac_local *lp = netdev_priv(ndev);
+
+	if (!netif_running(ndev))
+		return -EINVAL;
+
+	if (!lp->phy_dev)
+		return -EINVAL;
+
+	return phy_mii_ioctl(lp->phy_dev, rq, cmd);
+}
+
 static const struct net_device_ops temac_netdev_ops = {
 	.ndo_open = temac_open,
 	.ndo_stop = temac_stop,
 	.ndo_start_xmit = temac_start_xmit,
 	.ndo_set_mac_address = netdev_set_mac_address,
 	.ndo_validate_addr = eth_validate_addr,
+	.ndo_do_ioctl = temac_ioctl,
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller = temac_poll_controller,
 #endif
@@ -1077,7 +1091,7 @@
 
 	of_node_put(np); /* Finished with the DMA node; drop the reference */
 
-	if ((lp->rx_irq == NO_IRQ) || (lp->tx_irq == NO_IRQ)) {
+	if (!lp->rx_irq || !lp->tx_irq) {
 		dev_err(&op->dev, "could not determine irqs\n");
 		rc = -ENOMEM;
 		goto err_iounmap_2;
@@ -1167,17 +1181,7 @@
 	},
 };
 
-static int __init temac_init(void)
-{
-	return platform_driver_register(&temac_of_driver);
-}
-module_init(temac_init);
-
-static void __exit temac_exit(void)
-{
-	platform_driver_unregister(&temac_of_driver);
-}
-module_exit(temac_exit);
+module_platform_driver(temac_of_driver);
 
 MODULE_DESCRIPTION("Xilinx LL_TEMAC Ethernet driver");
 MODULE_AUTHOR("Yoshio Kashiwagi");
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 8018d7d..79013e5 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -662,7 +662,7 @@
  */
 static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
 {
-	bool tx_complete = 0;
+	bool tx_complete = false;
 	struct net_device *dev = dev_id;
 	struct net_local *lp = netdev_priv(dev);
 	void __iomem *base_addr = lp->base_addr;
@@ -683,7 +683,7 @@
 		tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
 		out_be32(base_addr + XEL_TSR_OFFSET, tx_status);
 
-		tx_complete = 1;
+		tx_complete = true;
 	}
 
 	/* Check if the Transmission for the second buffer is completed */
@@ -695,7 +695,7 @@
 		out_be32(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET,
 			 tx_status);
 
-		tx_complete = 1;
+		tx_complete = true;
 	}
 
 	/* If there was a Tx interrupt, call the Tx Handler */
@@ -1129,7 +1129,7 @@
 
 	/* Get IRQ for the device */
 	rc = of_irq_to_resource(ofdev->dev.of_node, 0, &r_irq);
-	if (rc == NO_IRQ) {
+	if (!rc) {
 		dev_err(dev, "no IRQ found\n");
 		return rc;
 	}
@@ -1303,27 +1303,7 @@
 	.remove		= __devexit_p(xemaclite_of_remove),
 };
 
-/**
- * xgpiopss_init - Initial driver registration call
- *
- * Return:	0 upon success, or a negative error upon failure.
- */
-static int __init xemaclite_init(void)
-{
-	/* No kernel boot options used, we just need to register the driver */
-	return platform_driver_register(&xemaclite_of_driver);
-}
-
-/**
- * xemaclite_cleanup - Driver un-registration call
- */
-static void __exit xemaclite_cleanup(void)
-{
-	platform_driver_unregister(&xemaclite_of_driver);
-}
-
-module_init(xemaclite_init);
-module_exit(xemaclite_cleanup);
+module_platform_driver(xemaclite_of_driver);
 
 MODULE_AUTHOR("Xilinx, Inc.");
 MODULE_DESCRIPTION("Xilinx Ethernet MAC Lite driver");
diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c
index bbe8b7d..33979c3 100644
--- a/drivers/net/ethernet/xircom/xirc2ps_cs.c
+++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c
@@ -1411,7 +1411,7 @@
 static void netdev_get_drvinfo(struct net_device *dev,
 			       struct ethtool_drvinfo *info)
 {
-	strcpy(info->driver, "xirc2ps_cs");
+	strlcpy(info->driver, "xirc2ps_cs", sizeof(info->driver));
 	sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr);
 }
 
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index 46b5f5f..e05b645 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -164,7 +164,7 @@
 	.ndo_validate_addr = eth_validate_addr,
 };
 
-#define IFB_FEATURES (NETIF_F_NO_CSUM | NETIF_F_SG  | NETIF_F_FRAGLIST	| \
+#define IFB_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG  | NETIF_F_FRAGLIST	| \
 		      NETIF_F_TSO_ECN | NETIF_F_TSO | NETIF_F_TSO6	| \
 		      NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_TX)
 
diff --git a/drivers/net/irda/bfin_sir.c b/drivers/net/irda/bfin_sir.c
index 9d4ce1a..a561ae4 100644
--- a/drivers/net/irda/bfin_sir.c
+++ b/drivers/net/irda/bfin_sir.c
@@ -806,18 +806,7 @@
 	},
 };
 
-static int __init bfin_sir_init(void)
-{
-	return platform_driver_register(&bfin_ir_driver);
-}
-
-static void __exit bfin_sir_exit(void)
-{
-	platform_driver_unregister(&bfin_ir_driver);
-}
-
-module_init(bfin_sir_init);
-module_exit(bfin_sir_exit);
+module_platform_driver(bfin_ir_driver);
 
 module_param(max_rate, int, 0);
 MODULE_PARM_DESC(max_rate, "Maximum baud rate (115200, 57600, 38400, 19200, 9600)");
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
index b45b2cc..64f403d 100644
--- a/drivers/net/irda/donauboe.c
+++ b/drivers/net/irda/donauboe.c
@@ -197,7 +197,7 @@
 
 static int max_baud = 4000000;
 #ifdef USE_PROBE
-static int do_probe = 0;
+static bool do_probe = false;
 #endif
 
 
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index d9267cb..72f687b 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -1914,41 +1914,8 @@
 #endif
 };
 
-/************************* MODULE CALLBACKS *************************/
-/*
- * Deal with module insertion/removal
- * Mostly tell USB about our existence
- */
+module_usb_driver(irda_driver);
 
-/*------------------------------------------------------------------*/
-/*
- * Module insertion
- */
-static int __init usb_irda_init(void)
-{
-	int	ret;
-
-	ret = usb_register(&irda_driver);
-	if (ret < 0)
-		return ret;
-
-	IRDA_MESSAGE("USB IrDA support registered\n");
-	return 0;
-}
-module_init(usb_irda_init);
-
-/*------------------------------------------------------------------*/
-/*
- * Module removal
- */
-static void __exit usb_irda_cleanup(void)
-{
-	/* Deregister the driver and remove all pending instances */
-	usb_deregister(&irda_driver);
-}
-module_exit(usb_irda_cleanup);
-
-/*------------------------------------------------------------------*/
 /*
  * Module parameters
  */
diff --git a/drivers/net/irda/kingsun-sir.c b/drivers/net/irda/kingsun-sir.c
index cb90d64..79aebee 100644
--- a/drivers/net/irda/kingsun-sir.c
+++ b/drivers/net/irda/kingsun-sir.c
@@ -621,24 +621,7 @@
 #endif
 };
 
-/*
- * Module insertion
- */
-static int __init kingsun_init(void)
-{
-	return usb_register(&irda_driver);
-}
-module_init(kingsun_init);
-
-/*
- * Module removal
- */
-static void __exit kingsun_cleanup(void)
-{
-	/* Deregister the driver and remove all pending instances */
-	usb_deregister(&irda_driver);
-}
-module_exit(kingsun_cleanup);
+module_usb_driver(irda_driver);
 
 MODULE_AUTHOR("Alex Villacís Lasso <a_villacis@palosanto.com>");
 MODULE_DESCRIPTION("IrDA-USB Dongle Driver for KingSun/DonShine");
diff --git a/drivers/net/irda/ks959-sir.c b/drivers/net/irda/ks959-sir.c
index 1046014..abe689d 100644
--- a/drivers/net/irda/ks959-sir.c
+++ b/drivers/net/irda/ks959-sir.c
@@ -901,26 +901,7 @@
 #endif
 };
 
-/*
- * Module insertion
- */
-static int __init ks959_init(void)
-{
-	return usb_register(&irda_driver);
-}
-
-module_init(ks959_init);
-
-/*
- * Module removal
- */
-static void __exit ks959_cleanup(void)
-{
-	/* Deregister the driver and remove all pending instances */
-	usb_deregister(&irda_driver);
-}
-
-module_exit(ks959_cleanup);
+module_usb_driver(irda_driver);
 
 MODULE_AUTHOR("Alex Villacís Lasso <a_villacis@palosanto.com>");
 MODULE_DESCRIPTION("IrDA-USB Dongle Driver for KingSun KS-959");
diff --git a/drivers/net/irda/ksdazzle-sir.c b/drivers/net/irda/ksdazzle-sir.c
index 9cc142f..f8c0108 100644
--- a/drivers/net/irda/ksdazzle-sir.c
+++ b/drivers/net/irda/ksdazzle-sir.c
@@ -796,26 +796,7 @@
 #endif
 };
 
-/*
- * Module insertion
- */
-static int __init ksdazzle_init(void)
-{
-	return usb_register(&irda_driver);
-}
-
-module_init(ksdazzle_init);
-
-/*
- * Module removal
- */
-static void __exit ksdazzle_cleanup(void)
-{
-	/* Deregister the driver and remove all pending instances */
-	usb_deregister(&irda_driver);
-}
-
-module_exit(ksdazzle_cleanup);
+module_usb_driver(irda_driver);
 
 MODULE_AUTHOR("Alex Villacís Lasso <a_villacis@palosanto.com>");
 MODULE_DESCRIPTION("IrDA-USB Dongle Driver for KingSun Dazzle");
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
index be52bfe..1a00b59 100644
--- a/drivers/net/irda/mcs7780.c
+++ b/drivers/net/irda/mcs7780.c
@@ -968,25 +968,4 @@
 	IRDA_DEBUG(0, "MCS7780 now disconnected.\n");
 }
 
-/* Module insertion */
-static int __init mcs_init(void)
-{
-	int result;
-
-	/* register this driver with the USB subsystem */
-	result = usb_register(&mcs_driver);
-	if (result)
-		IRDA_ERROR("usb_register failed. Error number %d\n", result);
-
-	return result;
-}
-module_init(mcs_init);
-
-/* Module removal */
-static void __exit mcs_exit(void)
-{
-	/* deregister this driver with the USB subsystem */
-	usb_deregister(&mcs_driver);
-}
-module_exit(mcs_exit);
-
+module_usb_driver(mcs_driver);
diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c
index b56636d..2a4f2f1 100644
--- a/drivers/net/irda/nsc-ircc.c
+++ b/drivers/net/irda/nsc-ircc.c
@@ -1664,7 +1664,7 @@
 	switch_bank(iobase, BANK0);
         outb(inb(iobase+MCR) & ~MCR_DMA_EN, iobase+MCR);
 	
-	/* Check for underrrun! */
+	/* Check for underrun! */
 	if (inb(iobase+ASCR) & ASCR_TXUR) {
 		self->netdev->stats.tx_errors++;
 		self->netdev->stats.tx_fifo_errors++;
diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c
index d0851df..81d5275 100644
--- a/drivers/net/irda/pxaficp_ir.c
+++ b/drivers/net/irda/pxaficp_ir.c
@@ -966,18 +966,7 @@
 	.resume		= pxa_irda_resume,
 };
 
-static int __init pxa_irda_init(void)
-{
-	return platform_driver_register(&pxa_ir_driver);
-}
-
-static void __exit pxa_irda_exit(void)
-{
-	platform_driver_unregister(&pxa_ir_driver);
-}
-
-module_init(pxa_irda_init);
-module_exit(pxa_irda_exit);
+module_platform_driver(pxa_ir_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_ALIAS("platform:pxa2xx-ir");
diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c
index d275e27..725d6b3 100644
--- a/drivers/net/irda/sh_irda.c
+++ b/drivers/net/irda/sh_irda.c
@@ -873,18 +873,7 @@
 	},
 };
 
-static int __init sh_irda_init(void)
-{
-	return platform_driver_register(&sh_irda_driver);
-}
-
-static void __exit sh_irda_exit(void)
-{
-	platform_driver_unregister(&sh_irda_driver);
-}
-
-module_init(sh_irda_init);
-module_exit(sh_irda_exit);
+module_platform_driver(sh_irda_driver);
 
 MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
 MODULE_DESCRIPTION("SuperH IrDA driver");
diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c
index ed7d7d6..e6661b5 100644
--- a/drivers/net/irda/sh_sir.c
+++ b/drivers/net/irda/sh_sir.c
@@ -808,18 +808,7 @@
 	},
 };
 
-static int __init sh_sir_init(void)
-{
-	return platform_driver_register(&sh_sir_driver);
-}
-
-static void __exit sh_sir_exit(void)
-{
-	platform_driver_unregister(&sh_sir_driver);
-}
-
-module_init(sh_sir_init);
-module_exit(sh_sir_exit);
+module_platform_driver(sh_sir_driver);
 
 MODULE_AUTHOR("Kuninori Morimoto <morimoto.kuninori@renesas.com>");
 MODULE_DESCRIPTION("SuperH IrDA driver");
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index 8b1c348..6c95d40 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -79,7 +79,7 @@
 MODULE_DESCRIPTION("SMC IrCC SIR/FIR controller driver");
 MODULE_LICENSE("GPL");
 
-static int smsc_nopnp = 1;
+static bool smsc_nopnp = true;
 module_param_named(nopnp, smsc_nopnp, bool, 0);
 MODULE_PARM_DESC(nopnp, "Do not use PNP to detect controller settings, defaults to true");
 
diff --git a/drivers/net/irda/stir4200.c b/drivers/net/irda/stir4200.c
index 41c96b3..e6e59a0 100644
--- a/drivers/net/irda/stir4200.c
+++ b/drivers/net/irda/stir4200.c
@@ -750,7 +750,7 @@
 
 			write_reg(stir, REG_CTRL1, CTRL1_TXPWD|CTRL1_RXPWD);
 
-			refrigerator();
+			try_to_freeze();
 
 			if (change_speed(stir, stir->speed))
 				break;
@@ -1133,21 +1133,4 @@
 #endif
 };
 
-/*
- * Module insertion
- */
-static int __init stir_init(void)
-{
-	return usb_register(&irda_driver);
-}
-module_init(stir_init);
-
-/*
- * Module removal
- */
-static void __exit stir_cleanup(void)
-{
-	/* Deregister the driver and remove all pending instances */
-	usb_deregister(&irda_driver);
-}
-module_exit(stir_cleanup);
+module_usb_driver(irda_driver);
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c
index 6d64790..2d456dd 100644
--- a/drivers/net/irda/via-ircc.c
+++ b/drivers/net/irda/via-ircc.c
@@ -942,14 +942,14 @@
 	iobase = self->io.fir_base;
 	/* Disable DMA */
 //      DisableDmaChannel(self->io.dma);
-	/* Check for underrrun! */
+	/* Check for underrun! */
 	/* Clear bit, by writing 1 into it */
 	Tx_status = GetTXStatus(iobase);
 	if (Tx_status & 0x08) {
 		self->netdev->stats.tx_errors++;
 		self->netdev->stats.tx_fifo_errors++;
 		hwreset(self);
-// how to clear underrrun ?
+	/* how to clear underrun? */
 	} else {
 		self->netdev->stats.tx_packets++;
 		ResetChip(iobase, 3);
diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c
index c436660..7d43506 100644
--- a/drivers/net/irda/w83977af_ir.c
+++ b/drivers/net/irda/w83977af_ir.c
@@ -677,7 +677,7 @@
 	switch_bank(iobase, SET0);
 	outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR);
 	
-	/* Check for underrrun! */
+	/* Check for underrun! */
 	if (inb(iobase+AUDR) & AUDR_UNDR) {
 		IRDA_DEBUG(0, "%s(), Transmit underrun!\n", __func__ );
 		
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 4ce9e5f..b71998d 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -169,7 +169,7 @@
 	dev->features 		= NETIF_F_SG | NETIF_F_FRAGLIST
 		| NETIF_F_ALL_TSO
 		| NETIF_F_UFO
-		| NETIF_F_NO_CSUM
+		| NETIF_F_HW_CSUM
 		| NETIF_F_RXCSUM
 		| NETIF_F_HIGHDMA
 		| NETIF_F_LLTX
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 7413497..f2f820c 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -26,6 +26,7 @@
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
 #include <linux/if_arp.h>
+#include <linux/if_vlan.h>
 #include <linux/if_link.h>
 #include <linux/if_macvlan.h>
 #include <net/rtnetlink.h>
@@ -520,26 +521,23 @@
 	return stats;
 }
 
-static void macvlan_vlan_rx_add_vid(struct net_device *dev,
+static int macvlan_vlan_rx_add_vid(struct net_device *dev,
 				    unsigned short vid)
 {
 	struct macvlan_dev *vlan = netdev_priv(dev);
 	struct net_device *lowerdev = vlan->lowerdev;
-	const struct net_device_ops *ops = lowerdev->netdev_ops;
 
-	if (ops->ndo_vlan_rx_add_vid)
-		ops->ndo_vlan_rx_add_vid(lowerdev, vid);
+	return vlan_vid_add(lowerdev, vid);
 }
 
-static void macvlan_vlan_rx_kill_vid(struct net_device *dev,
+static int macvlan_vlan_rx_kill_vid(struct net_device *dev,
 				     unsigned short vid)
 {
 	struct macvlan_dev *vlan = netdev_priv(dev);
 	struct net_device *lowerdev = vlan->lowerdev;
-	const struct net_device_ops *ops = lowerdev->netdev_ops;
 
-	if (ops->ndo_vlan_rx_kill_vid)
-		ops->ndo_vlan_rx_kill_vid(lowerdev, vid);
+	vlan_vid_del(lowerdev, vid);
+	return 0;
 }
 
 static void macvlan_ethtool_get_drvinfo(struct net_device *dev,
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 1b7082d..58dc117 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -145,8 +145,8 @@
 	if (vlan) {
 		int index = get_slot(vlan, q);
 
-		rcu_assign_pointer(vlan->taps[index], NULL);
-		rcu_assign_pointer(q->vlan, NULL);
+		RCU_INIT_POINTER(vlan->taps[index], NULL);
+		RCU_INIT_POINTER(q->vlan, NULL);
 		sock_put(&q->sk);
 		--vlan->numvtaps;
 	}
@@ -175,6 +175,14 @@
 	if (!numvtaps)
 		goto out;
 
+	/* Check if we can use flow to select a queue */
+	rxq = skb_get_rxhash(skb);
+	if (rxq) {
+		tap = rcu_dereference(vlan->taps[rxq % numvtaps]);
+		if (tap)
+			goto out;
+	}
+
 	if (likely(skb_rx_queue_recorded(skb))) {
 		rxq = skb_get_rx_queue(skb);
 
@@ -186,14 +194,6 @@
 			goto out;
 	}
 
-	/* Check if we can use flow to select a queue */
-	rxq = skb_get_rxhash(skb);
-	if (rxq) {
-		tap = rcu_dereference(vlan->taps[rxq % numvtaps]);
-		if (tap)
-			goto out;
-	}
-
 	/* Everything failed - find first available queue */
 	for (rxq = 0; rxq < MAX_MACVTAP_QUEUES; rxq++) {
 		tap = rcu_dereference(vlan->taps[rxq]);
@@ -223,8 +223,8 @@
 					      lockdep_is_held(&macvtap_lock));
 		if (q) {
 			qlist[j++] = q;
-			rcu_assign_pointer(vlan->taps[i], NULL);
-			rcu_assign_pointer(q->vlan, NULL);
+			RCU_INIT_POINTER(vlan->taps[i], NULL);
+			RCU_INIT_POINTER(q->vlan, NULL);
 			vlan->numvtaps--;
 		}
 	}
diff --git a/drivers/net/mii.c b/drivers/net/mii.c
index c62e781..c70c233 100644
--- a/drivers/net/mii.c
+++ b/drivers/net/mii.c
@@ -35,26 +35,11 @@
 
 static u32 mii_get_an(struct mii_if_info *mii, u16 addr)
 {
-	u32 result = 0;
 	int advert;
 
 	advert = mii->mdio_read(mii->dev, mii->phy_id, addr);
-	if (advert & LPA_LPACK)
-		result |= ADVERTISED_Autoneg;
-	if (advert & ADVERTISE_10HALF)
-		result |= ADVERTISED_10baseT_Half;
-	if (advert & ADVERTISE_10FULL)
-		result |= ADVERTISED_10baseT_Full;
-	if (advert & ADVERTISE_100HALF)
-		result |= ADVERTISED_100baseT_Half;
-	if (advert & ADVERTISE_100FULL)
-		result |= ADVERTISED_100baseT_Full;
-	if (advert & ADVERTISE_PAUSE_CAP)
-		result |= ADVERTISED_Pause;
-	if (advert & ADVERTISE_PAUSE_ASYM)
-		result |= ADVERTISED_Asym_Pause;
 
-	return result;
+	return mii_lpa_to_ethtool_lpa_t(advert);
 }
 
 /**
@@ -104,19 +89,14 @@
 		ecmd->autoneg = AUTONEG_ENABLE;
 
 		ecmd->advertising |= mii_get_an(mii, MII_ADVERTISE);
-		if (ctrl1000 & ADVERTISE_1000HALF)
-			ecmd->advertising |= ADVERTISED_1000baseT_Half;
-		if (ctrl1000 & ADVERTISE_1000FULL)
-			ecmd->advertising |= ADVERTISED_1000baseT_Full;
+		if (mii->supports_gmii)
+			ecmd->advertising |=
+					mii_ctrl1000_to_ethtool_adv_t(ctrl1000);
 
 		if (bmsr & BMSR_ANEGCOMPLETE) {
 			ecmd->lp_advertising = mii_get_an(mii, MII_LPA);
-			if (stat1000 & LPA_1000HALF)
-				ecmd->lp_advertising |=
-					ADVERTISED_1000baseT_Half;
-			if (stat1000 & LPA_1000FULL)
-				ecmd->lp_advertising |=
-					ADVERTISED_1000baseT_Full;
+			ecmd->lp_advertising |=
+					mii_stat1000_to_ethtool_lpa_t(stat1000);
 		} else {
 			ecmd->lp_advertising = 0;
 		}
@@ -204,20 +184,11 @@
 			advert2 = mii->mdio_read(dev, mii->phy_id, MII_CTRL1000);
 			tmp2 = advert2 & ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
 		}
-		if (ecmd->advertising & ADVERTISED_10baseT_Half)
-			tmp |= ADVERTISE_10HALF;
-		if (ecmd->advertising & ADVERTISED_10baseT_Full)
-			tmp |= ADVERTISE_10FULL;
-		if (ecmd->advertising & ADVERTISED_100baseT_Half)
-			tmp |= ADVERTISE_100HALF;
-		if (ecmd->advertising & ADVERTISED_100baseT_Full)
-			tmp |= ADVERTISE_100FULL;
-		if (mii->supports_gmii) {
-			if (ecmd->advertising & ADVERTISED_1000baseT_Half)
-				tmp2 |= ADVERTISE_1000HALF;
-			if (ecmd->advertising & ADVERTISED_1000baseT_Full)
-				tmp2 |= ADVERTISE_1000FULL;
-		}
+		tmp |= ethtool_adv_to_mii_adv_t(ecmd->advertising);
+
+		if (mii->supports_gmii)
+			tmp2 |=
+			      ethtool_adv_to_mii_ctrl1000_t(ecmd->advertising);
 		if (advert != tmp) {
 			mii->mdio_write(dev, mii->phy_id, MII_ADVERTISE, tmp);
 			mii->advertising = tmp;
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index a702443..fbdcdf8 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -131,3 +131,7 @@
 	  If in doubt, say Y.
 
 endif # PHYLIB
+
+config MICREL_KS8995MA
+	tristate "Micrel KS8995MA 5-ports 10/100 managed Ethernet switch"
+	depends on SPI
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 2333215..e15c83f 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -23,3 +23,4 @@
 obj-$(CONFIG_STE10XP)		+= ste10Xp.o
 obj-$(CONFIG_MICREL_PHY)	+= micrel.o
 obj-$(CONFIG_MDIO_OCTEON)	+= mdio-octeon.o
+obj-$(CONFIG_MICREL_KS8995MA)	+= spi_ks8995.o
diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/phy/mdio-bitbang.c
index 6539189..daec9b0 100644
--- a/drivers/net/phy/mdio-bitbang.c
+++ b/drivers/net/phy/mdio-bitbang.c
@@ -202,6 +202,14 @@
 	return 0;
 }
 
+static int mdiobb_reset(struct mii_bus *bus)
+{
+	struct mdiobb_ctrl *ctrl = bus->priv;
+	if (ctrl->reset)
+		ctrl->reset(bus);
+	return 0;
+}
+
 struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl)
 {
 	struct mii_bus *bus;
@@ -214,6 +222,7 @@
 
 	bus->read = mdiobb_read;
 	bus->write = mdiobb_write;
+	bus->reset = mdiobb_reset;
 	bus->priv = ctrl;
 
 	return bus;
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index 2843c90..89c5a3e 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -95,6 +95,7 @@
 		goto out;
 
 	bitbang->ctrl.ops = &mdio_gpio_ops;
+	bitbang->ctrl.reset = pdata->reset;
 	bitbang->mdc = pdata->mdc;
 	bitbang->mdio = pdata->mdio;
 
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 83a5a5a..f320f46 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -563,20 +563,9 @@
 	if (adv < 0)
 		return adv;
 
-	adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | 
+	adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP |
 		 ADVERTISE_PAUSE_ASYM);
-	if (advertise & ADVERTISED_10baseT_Half)
-		adv |= ADVERTISE_10HALF;
-	if (advertise & ADVERTISED_10baseT_Full)
-		adv |= ADVERTISE_10FULL;
-	if (advertise & ADVERTISED_100baseT_Half)
-		adv |= ADVERTISE_100HALF;
-	if (advertise & ADVERTISED_100baseT_Full)
-		adv |= ADVERTISE_100FULL;
-	if (advertise & ADVERTISED_Pause)
-		adv |= ADVERTISE_PAUSE_CAP;
-	if (advertise & ADVERTISED_Asym_Pause)
-		adv |= ADVERTISE_PAUSE_ASYM;
+	adv |= ethtool_adv_to_mii_adv_t(advertise);
 
 	if (adv != oldadv) {
 		err = phy_write(phydev, MII_ADVERTISE, adv);
@@ -595,10 +584,7 @@
 			return adv;
 
 		adv &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
-		if (advertise & SUPPORTED_1000baseT_Half)
-			adv |= ADVERTISE_1000HALF;
-		if (advertise & SUPPORTED_1000baseT_Full)
-			adv |= ADVERTISE_1000FULL;
+		adv |= ethtool_adv_to_mii_ctrl1000_t(advertise);
 
 		if (adv != oldadv) {
 			err = phy_write(phydev, MII_CTRL1000, adv);
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index 342505c..fc3e7e9 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -22,26 +22,7 @@
 #include <linux/ethtool.h>
 #include <linux/phy.h>
 #include <linux/netdevice.h>
-
-#define MII_LAN83C185_ISF 29 /* Interrupt Source Flags */
-#define MII_LAN83C185_IM  30 /* Interrupt Mask */
-#define MII_LAN83C185_CTRL_STATUS 17 /* Mode/Status Register */
-
-#define MII_LAN83C185_ISF_INT1 (1<<1) /* Auto-Negotiation Page Received */
-#define MII_LAN83C185_ISF_INT2 (1<<2) /* Parallel Detection Fault */
-#define MII_LAN83C185_ISF_INT3 (1<<3) /* Auto-Negotiation LP Ack */
-#define MII_LAN83C185_ISF_INT4 (1<<4) /* Link Down */
-#define MII_LAN83C185_ISF_INT5 (1<<5) /* Remote Fault Detected */
-#define MII_LAN83C185_ISF_INT6 (1<<6) /* Auto-Negotiation complete */
-#define MII_LAN83C185_ISF_INT7 (1<<7) /* ENERGYON */
-
-#define MII_LAN83C185_ISF_INT_ALL (0x0e)
-
-#define MII_LAN83C185_ISF_INT_PHYLIB_EVENTS \
-	(MII_LAN83C185_ISF_INT6 | MII_LAN83C185_ISF_INT4 | \
-	 MII_LAN83C185_ISF_INT7)
-
-#define MII_LAN83C185_EDPWRDOWN	(1 << 13) /* EDPWRDOWN */
+#include <linux/smscphy.h>
 
 static int smsc_phy_config_intr(struct phy_device *phydev)
 {
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
new file mode 100644
index 0000000..116a2dd
--- /dev/null
+++ b/drivers/net/phy/spi_ks8995.c
@@ -0,0 +1,375 @@
+/*
+ * SPI driver for Micrel/Kendin KS8995M ethernet switch
+ *
+ * Copyright (C) 2008 Gabor Juhos <juhosg at openwrt.org>
+ *
+ * This file was based on: drivers/spi/at25.c
+ *     Copyright (C) 2006 David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+
+#include <linux/spi/spi.h>
+
+#define DRV_VERSION		"0.1.1"
+#define DRV_DESC		"Micrel KS8995 Ethernet switch SPI driver"
+
+/* ------------------------------------------------------------------------ */
+
+#define KS8995_REG_ID0		0x00    /* Chip ID0 */
+#define KS8995_REG_ID1		0x01    /* Chip ID1 */
+
+#define KS8995_REG_GC0		0x02    /* Global Control 0 */
+#define KS8995_REG_GC1		0x03    /* Global Control 1 */
+#define KS8995_REG_GC2		0x04    /* Global Control 2 */
+#define KS8995_REG_GC3		0x05    /* Global Control 3 */
+#define KS8995_REG_GC4		0x06    /* Global Control 4 */
+#define KS8995_REG_GC5		0x07    /* Global Control 5 */
+#define KS8995_REG_GC6		0x08    /* Global Control 6 */
+#define KS8995_REG_GC7		0x09    /* Global Control 7 */
+#define KS8995_REG_GC8		0x0a    /* Global Control 8 */
+#define KS8995_REG_GC9		0x0b    /* Global Control 9 */
+
+#define KS8995_REG_PC(p, r)	((0x10 * p) + r)	 /* Port Control */
+#define KS8995_REG_PS(p, r)	((0x10 * p) + r + 0xe)  /* Port Status */
+
+#define KS8995_REG_TPC0		0x60    /* TOS Priority Control 0 */
+#define KS8995_REG_TPC1		0x61    /* TOS Priority Control 1 */
+#define KS8995_REG_TPC2		0x62    /* TOS Priority Control 2 */
+#define KS8995_REG_TPC3		0x63    /* TOS Priority Control 3 */
+#define KS8995_REG_TPC4		0x64    /* TOS Priority Control 4 */
+#define KS8995_REG_TPC5		0x65    /* TOS Priority Control 5 */
+#define KS8995_REG_TPC6		0x66    /* TOS Priority Control 6 */
+#define KS8995_REG_TPC7		0x67    /* TOS Priority Control 7 */
+
+#define KS8995_REG_MAC0		0x68    /* MAC address 0 */
+#define KS8995_REG_MAC1		0x69    /* MAC address 1 */
+#define KS8995_REG_MAC2		0x6a    /* MAC address 2 */
+#define KS8995_REG_MAC3		0x6b    /* MAC address 3 */
+#define KS8995_REG_MAC4		0x6c    /* MAC address 4 */
+#define KS8995_REG_MAC5		0x6d    /* MAC address 5 */
+
+#define KS8995_REG_IAC0		0x6e    /* Indirect Access Control 0 */
+#define KS8995_REG_IAC1		0x6f    /* Indirect Access Control 0 */
+#define KS8995_REG_IAD7		0x70    /* Indirect Access Data 7 */
+#define KS8995_REG_IAD6		0x71    /* Indirect Access Data 6 */
+#define KS8995_REG_IAD5		0x72    /* Indirect Access Data 5 */
+#define KS8995_REG_IAD4		0x73    /* Indirect Access Data 4 */
+#define KS8995_REG_IAD3		0x74    /* Indirect Access Data 3 */
+#define KS8995_REG_IAD2		0x75    /* Indirect Access Data 2 */
+#define KS8995_REG_IAD1		0x76    /* Indirect Access Data 1 */
+#define KS8995_REG_IAD0		0x77    /* Indirect Access Data 0 */
+
+#define KS8995_REGS_SIZE	0x80
+
+#define ID1_CHIPID_M		0xf
+#define ID1_CHIPID_S		4
+#define ID1_REVISION_M		0x7
+#define ID1_REVISION_S		1
+#define ID1_START_SW		1	/* start the switch */
+
+#define FAMILY_KS8995		0x95
+#define CHIPID_M		0
+
+#define KS8995_CMD_WRITE	0x02U
+#define KS8995_CMD_READ		0x03U
+
+#define KS8995_RESET_DELAY	10 /* usec */
+
+struct ks8995_pdata {
+	/* not yet implemented */
+};
+
+struct ks8995_switch {
+	struct spi_device	*spi;
+	struct mutex		lock;
+	struct ks8995_pdata	*pdata;
+};
+
+static inline u8 get_chip_id(u8 val)
+{
+	return (val >> ID1_CHIPID_S) & ID1_CHIPID_M;
+}
+
+static inline u8 get_chip_rev(u8 val)
+{
+	return (val >> ID1_REVISION_S) & ID1_REVISION_M;
+}
+
+/* ------------------------------------------------------------------------ */
+static int ks8995_read(struct ks8995_switch *ks, char *buf,
+		 unsigned offset, size_t count)
+{
+	u8 cmd[2];
+	struct spi_transfer t[2];
+	struct spi_message m;
+	int err;
+
+	spi_message_init(&m);
+
+	memset(&t, 0, sizeof(t));
+
+	t[0].tx_buf = cmd;
+	t[0].len = sizeof(cmd);
+	spi_message_add_tail(&t[0], &m);
+
+	t[1].rx_buf = buf;
+	t[1].len = count;
+	spi_message_add_tail(&t[1], &m);
+
+	cmd[0] = KS8995_CMD_READ;
+	cmd[1] = offset;
+
+	mutex_lock(&ks->lock);
+	err = spi_sync(ks->spi, &m);
+	mutex_unlock(&ks->lock);
+
+	return err ? err : count;
+}
+
+
+static int ks8995_write(struct ks8995_switch *ks, char *buf,
+		 unsigned offset, size_t count)
+{
+	u8 cmd[2];
+	struct spi_transfer t[2];
+	struct spi_message m;
+	int err;
+
+	spi_message_init(&m);
+
+	memset(&t, 0, sizeof(t));
+
+	t[0].tx_buf = cmd;
+	t[0].len = sizeof(cmd);
+	spi_message_add_tail(&t[0], &m);
+
+	t[1].tx_buf = buf;
+	t[1].len = count;
+	spi_message_add_tail(&t[1], &m);
+
+	cmd[0] = KS8995_CMD_WRITE;
+	cmd[1] = offset;
+
+	mutex_lock(&ks->lock);
+	err = spi_sync(ks->spi, &m);
+	mutex_unlock(&ks->lock);
+
+	return err ? err : count;
+}
+
+static inline int ks8995_read_reg(struct ks8995_switch *ks, u8 addr, u8 *buf)
+{
+	return (ks8995_read(ks, buf, addr, 1) != 1);
+}
+
+static inline int ks8995_write_reg(struct ks8995_switch *ks, u8 addr, u8 val)
+{
+	char buf = val;
+
+	return (ks8995_write(ks, &buf, addr, 1) != 1);
+}
+
+/* ------------------------------------------------------------------------ */
+
+static int ks8995_stop(struct ks8995_switch *ks)
+{
+	return ks8995_write_reg(ks, KS8995_REG_ID1, 0);
+}
+
+static int ks8995_start(struct ks8995_switch *ks)
+{
+	return ks8995_write_reg(ks, KS8995_REG_ID1, 1);
+}
+
+static int ks8995_reset(struct ks8995_switch *ks)
+{
+	int err;
+
+	err = ks8995_stop(ks);
+	if (err)
+		return err;
+
+	udelay(KS8995_RESET_DELAY);
+
+	return ks8995_start(ks);
+}
+
+/* ------------------------------------------------------------------------ */
+
+static ssize_t ks8995_registers_read(struct file *filp, struct kobject *kobj,
+	struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count)
+{
+	struct device *dev;
+	struct ks8995_switch *ks8995;
+
+	dev = container_of(kobj, struct device, kobj);
+	ks8995 = dev_get_drvdata(dev);
+
+	if (unlikely(off > KS8995_REGS_SIZE))
+		return 0;
+
+	if ((off + count) > KS8995_REGS_SIZE)
+		count = KS8995_REGS_SIZE - off;
+
+	if (unlikely(!count))
+		return count;
+
+	return ks8995_read(ks8995, buf, off, count);
+}
+
+
+static ssize_t ks8995_registers_write(struct file *filp, struct kobject *kobj,
+	struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count)
+{
+	struct device *dev;
+	struct ks8995_switch *ks8995;
+
+	dev = container_of(kobj, struct device, kobj);
+	ks8995 = dev_get_drvdata(dev);
+
+	if (unlikely(off >= KS8995_REGS_SIZE))
+		return -EFBIG;
+
+	if ((off + count) > KS8995_REGS_SIZE)
+		count = KS8995_REGS_SIZE - off;
+
+	if (unlikely(!count))
+		return count;
+
+	return ks8995_write(ks8995, buf, off, count);
+}
+
+
+static struct bin_attribute ks8995_registers_attr = {
+	.attr = {
+		.name   = "registers",
+		.mode   = S_IRUSR | S_IWUSR,
+	},
+	.size   = KS8995_REGS_SIZE,
+	.read   = ks8995_registers_read,
+	.write  = ks8995_registers_write,
+};
+
+/* ------------------------------------------------------------------------ */
+
+static int __devinit ks8995_probe(struct spi_device *spi)
+{
+	struct ks8995_switch    *ks;
+	struct ks8995_pdata     *pdata;
+	u8      ids[2];
+	int     err;
+
+	/* Chip description */
+	pdata = spi->dev.platform_data;
+
+	ks = kzalloc(sizeof(*ks), GFP_KERNEL);
+	if (!ks) {
+		dev_err(&spi->dev, "no memory for private data\n");
+		return -ENOMEM;
+	}
+
+	mutex_init(&ks->lock);
+	ks->pdata = pdata;
+	ks->spi = spi_dev_get(spi);
+	dev_set_drvdata(&spi->dev, ks);
+
+	spi->mode = SPI_MODE_0;
+	spi->bits_per_word = 8;
+	err = spi_setup(spi);
+	if (err) {
+		dev_err(&spi->dev, "spi_setup failed, err=%d\n", err);
+		goto err_drvdata;
+	}
+
+	err = ks8995_read(ks, ids, KS8995_REG_ID0, sizeof(ids));
+	if (err < 0) {
+		dev_err(&spi->dev, "unable to read id registers, err=%d\n",
+				err);
+		goto err_drvdata;
+	}
+
+	switch (ids[0]) {
+	case FAMILY_KS8995:
+		break;
+	default:
+		dev_err(&spi->dev, "unknown family id:%02x\n", ids[0]);
+		err = -ENODEV;
+		goto err_drvdata;
+	}
+
+	err = ks8995_reset(ks);
+	if (err)
+		goto err_drvdata;
+
+	err = sysfs_create_bin_file(&spi->dev.kobj, &ks8995_registers_attr);
+	if (err) {
+		dev_err(&spi->dev, "unable to create sysfs file, err=%d\n",
+				    err);
+		goto err_drvdata;
+	}
+
+	dev_info(&spi->dev, "KS89%02X device found, Chip ID:%01x, "
+			"Revision:%01x\n", ids[0],
+			get_chip_id(ids[1]), get_chip_rev(ids[1]));
+
+	return 0;
+
+err_drvdata:
+	dev_set_drvdata(&spi->dev, NULL);
+	kfree(ks);
+	return err;
+}
+
+static int __devexit ks8995_remove(struct spi_device *spi)
+{
+	struct ks8995_data      *ks8995;
+
+	ks8995 = dev_get_drvdata(&spi->dev);
+	sysfs_remove_bin_file(&spi->dev.kobj, &ks8995_registers_attr);
+
+	dev_set_drvdata(&spi->dev, NULL);
+	kfree(ks8995);
+
+	return 0;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static struct spi_driver ks8995_driver = {
+	.driver = {
+		.name	    = "spi-ks8995",
+		.bus	     = &spi_bus_type,
+		.owner	   = THIS_MODULE,
+	},
+	.probe	  = ks8995_probe,
+	.remove	  = __devexit_p(ks8995_remove),
+};
+
+static int __init ks8995_init(void)
+{
+	printk(KERN_INFO DRV_DESC " version " DRV_VERSION"\n");
+
+	return spi_register_driver(&ks8995_driver);
+}
+module_init(ks8995_init);
+
+static void __exit ks8995_exit(void)
+{
+	spi_unregister_driver(&ks8995_driver);
+}
+module_exit(ks8995_exit);
+
+MODULE_DESCRIPTION(DRV_DESC);
+MODULE_VERSION(DRV_VERSION);
+MODULE_AUTHOR("Gabor Juhos <juhosg at openwrt.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 89f829f..c1c9293 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -162,7 +162,7 @@
 {
 	spin_lock(&chan_lock);
 	clear_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap);
-	rcu_assign_pointer(callid_sock[sock->proto.pptp.src_addr.call_id], NULL);
+	RCU_INIT_POINTER(callid_sock[sock->proto.pptp.src_addr.call_id], NULL);
 	spin_unlock(&chan_lock);
 	synchronize_rcu();
 }
@@ -423,10 +423,8 @@
 	lock_sock(sk);
 
 	opt->src_addr = sp->sa_addr.pptp;
-	if (add_chan(po)) {
-		release_sock(sk);
+	if (add_chan(po))
 		error = -EBUSY;
-	}
 
 	release_sock(sk);
 	return error;
diff --git a/drivers/net/team/Kconfig b/drivers/net/team/Kconfig
new file mode 100644
index 0000000..248a144
--- /dev/null
+++ b/drivers/net/team/Kconfig
@@ -0,0 +1,43 @@
+menuconfig NET_TEAM
+	tristate "Ethernet team driver support (EXPERIMENTAL)"
+	depends on EXPERIMENTAL
+	---help---
+	  This allows one to create virtual interfaces that teams together
+	  multiple ethernet devices.
+
+	  Team devices can be added using the "ip" command from the
+	  iproute2 package:
+
+	  "ip link add link [ address MAC ] [ NAME ] type team"
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called team.
+
+if NET_TEAM
+
+config NET_TEAM_MODE_ROUNDROBIN
+	tristate "Round-robin mode support"
+	depends on NET_TEAM
+	---help---
+	  Basic mode where port used for transmitting packets is selected in
+	  round-robin fashion using packet counter.
+
+	  All added ports are setup to have bond's mac address.
+
+	  To compile this team mode as a module, choose M here: the module
+	  will be called team_mode_roundrobin.
+
+config NET_TEAM_MODE_ACTIVEBACKUP
+	tristate "Active-backup mode support"
+	depends on NET_TEAM
+	---help---
+	  Only one port is active at a time and the rest of ports are used
+	  for backup.
+
+	  Mac addresses of ports are not modified. Userspace is responsible
+	  to do so.
+
+	  To compile this team mode as a module, choose M here: the module
+	  will be called team_mode_activebackup.
+
+endif # NET_TEAM
diff --git a/drivers/net/team/Makefile b/drivers/net/team/Makefile
new file mode 100644
index 0000000..85f2028
--- /dev/null
+++ b/drivers/net/team/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the network team driver
+#
+
+obj-$(CONFIG_NET_TEAM) += team.o
+obj-$(CONFIG_NET_TEAM_MODE_ROUNDROBIN) += team_mode_roundrobin.o
+obj-$(CONFIG_NET_TEAM_MODE_ACTIVEBACKUP) += team_mode_activebackup.o
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
new file mode 100644
index 0000000..ed2a862
--- /dev/null
+++ b/drivers/net/team/team.c
@@ -0,0 +1,1684 @@
+/*
+ * net/drivers/team/team.c - Network team device driver
+ * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/rcupdate.h>
+#include <linux/errno.h>
+#include <linux/ctype.h>
+#include <linux/notifier.h>
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/if_arp.h>
+#include <linux/socket.h>
+#include <linux/etherdevice.h>
+#include <linux/rtnetlink.h>
+#include <net/rtnetlink.h>
+#include <net/genetlink.h>
+#include <net/netlink.h>
+#include <linux/if_team.h>
+
+#define DRV_NAME "team"
+
+
+/**********
+ * Helpers
+ **********/
+
+#define team_port_exists(dev) (dev->priv_flags & IFF_TEAM_PORT)
+
+static struct team_port *team_port_get_rcu(const struct net_device *dev)
+{
+	struct team_port *port = rcu_dereference(dev->rx_handler_data);
+
+	return team_port_exists(dev) ? port : NULL;
+}
+
+static struct team_port *team_port_get_rtnl(const struct net_device *dev)
+{
+	struct team_port *port = rtnl_dereference(dev->rx_handler_data);
+
+	return team_port_exists(dev) ? port : NULL;
+}
+
+/*
+ * Since the ability to change mac address for open port device is tested in
+ * team_port_add, this function can be called without control of return value
+ */
+static int __set_port_mac(struct net_device *port_dev,
+			  const unsigned char *dev_addr)
+{
+	struct sockaddr addr;
+
+	memcpy(addr.sa_data, dev_addr, ETH_ALEN);
+	addr.sa_family = ARPHRD_ETHER;
+	return dev_set_mac_address(port_dev, &addr);
+}
+
+int team_port_set_orig_mac(struct team_port *port)
+{
+	return __set_port_mac(port->dev, port->orig.dev_addr);
+}
+
+int team_port_set_team_mac(struct team_port *port)
+{
+	return __set_port_mac(port->dev, port->team->dev->dev_addr);
+}
+EXPORT_SYMBOL(team_port_set_team_mac);
+
+
+/*******************
+ * Options handling
+ *******************/
+
+struct team_option *__team_find_option(struct team *team, const char *opt_name)
+{
+	struct team_option *option;
+
+	list_for_each_entry(option, &team->option_list, list) {
+		if (strcmp(option->name, opt_name) == 0)
+			return option;
+	}
+	return NULL;
+}
+
+int team_options_register(struct team *team,
+			  const struct team_option *option,
+			  size_t option_count)
+{
+	int i;
+	struct team_option **dst_opts;
+	int err;
+
+	dst_opts = kzalloc(sizeof(struct team_option *) * option_count,
+			   GFP_KERNEL);
+	if (!dst_opts)
+		return -ENOMEM;
+	for (i = 0; i < option_count; i++, option++) {
+		if (__team_find_option(team, option->name)) {
+			err = -EEXIST;
+			goto rollback;
+		}
+		dst_opts[i] = kmemdup(option, sizeof(*option), GFP_KERNEL);
+		if (!dst_opts[i]) {
+			err = -ENOMEM;
+			goto rollback;
+		}
+	}
+
+	for (i = 0; i < option_count; i++)
+		list_add_tail(&dst_opts[i]->list, &team->option_list);
+
+	kfree(dst_opts);
+	return 0;
+
+rollback:
+	for (i = 0; i < option_count; i++)
+		kfree(dst_opts[i]);
+
+	kfree(dst_opts);
+	return err;
+}
+
+EXPORT_SYMBOL(team_options_register);
+
+static void __team_options_change_check(struct team *team,
+					struct team_option *changed_option);
+
+static void __team_options_unregister(struct team *team,
+				      const struct team_option *option,
+				      size_t option_count)
+{
+	int i;
+
+	for (i = 0; i < option_count; i++, option++) {
+		struct team_option *del_opt;
+
+		del_opt = __team_find_option(team, option->name);
+		if (del_opt) {
+			list_del(&del_opt->list);
+			kfree(del_opt);
+		}
+	}
+}
+
+void team_options_unregister(struct team *team,
+			     const struct team_option *option,
+			     size_t option_count)
+{
+	__team_options_unregister(team, option, option_count);
+	__team_options_change_check(team, NULL);
+}
+EXPORT_SYMBOL(team_options_unregister);
+
+static int team_option_get(struct team *team, struct team_option *option,
+			   void *arg)
+{
+	return option->getter(team, arg);
+}
+
+static int team_option_set(struct team *team, struct team_option *option,
+			   void *arg)
+{
+	int err;
+
+	err = option->setter(team, arg);
+	if (err)
+		return err;
+
+	__team_options_change_check(team, option);
+	return err;
+}
+
+/****************
+ * Mode handling
+ ****************/
+
+static LIST_HEAD(mode_list);
+static DEFINE_SPINLOCK(mode_list_lock);
+
+static struct team_mode *__find_mode(const char *kind)
+{
+	struct team_mode *mode;
+
+	list_for_each_entry(mode, &mode_list, list) {
+		if (strcmp(mode->kind, kind) == 0)
+			return mode;
+	}
+	return NULL;
+}
+
+static bool is_good_mode_name(const char *name)
+{
+	while (*name != '\0') {
+		if (!isalpha(*name) && !isdigit(*name) && *name != '_')
+			return false;
+		name++;
+	}
+	return true;
+}
+
+int team_mode_register(struct team_mode *mode)
+{
+	int err = 0;
+
+	if (!is_good_mode_name(mode->kind) ||
+	    mode->priv_size > TEAM_MODE_PRIV_SIZE)
+		return -EINVAL;
+	spin_lock(&mode_list_lock);
+	if (__find_mode(mode->kind)) {
+		err = -EEXIST;
+		goto unlock;
+	}
+	list_add_tail(&mode->list, &mode_list);
+unlock:
+	spin_unlock(&mode_list_lock);
+	return err;
+}
+EXPORT_SYMBOL(team_mode_register);
+
+int team_mode_unregister(struct team_mode *mode)
+{
+	spin_lock(&mode_list_lock);
+	list_del_init(&mode->list);
+	spin_unlock(&mode_list_lock);
+	return 0;
+}
+EXPORT_SYMBOL(team_mode_unregister);
+
+static struct team_mode *team_mode_get(const char *kind)
+{
+	struct team_mode *mode;
+
+	spin_lock(&mode_list_lock);
+	mode = __find_mode(kind);
+	if (!mode) {
+		spin_unlock(&mode_list_lock);
+		request_module("team-mode-%s", kind);
+		spin_lock(&mode_list_lock);
+		mode = __find_mode(kind);
+	}
+	if (mode)
+		if (!try_module_get(mode->owner))
+			mode = NULL;
+
+	spin_unlock(&mode_list_lock);
+	return mode;
+}
+
+static void team_mode_put(const struct team_mode *mode)
+{
+	module_put(mode->owner);
+}
+
+static bool team_dummy_transmit(struct team *team, struct sk_buff *skb)
+{
+	dev_kfree_skb_any(skb);
+	return false;
+}
+
+rx_handler_result_t team_dummy_receive(struct team *team,
+				       struct team_port *port,
+				       struct sk_buff *skb)
+{
+	return RX_HANDLER_ANOTHER;
+}
+
+static void team_adjust_ops(struct team *team)
+{
+	/*
+	 * To avoid checks in rx/tx skb paths, ensure here that non-null and
+	 * correct ops are always set.
+	 */
+
+	if (list_empty(&team->port_list) ||
+	    !team->mode || !team->mode->ops->transmit)
+		team->ops.transmit = team_dummy_transmit;
+	else
+		team->ops.transmit = team->mode->ops->transmit;
+
+	if (list_empty(&team->port_list) ||
+	    !team->mode || !team->mode->ops->receive)
+		team->ops.receive = team_dummy_receive;
+	else
+		team->ops.receive = team->mode->ops->receive;
+}
+
+/*
+ * We can benefit from the fact that it's ensured no port is present
+ * at the time of mode change. Therefore no packets are in fly so there's no
+ * need to set mode operations in any special way.
+ */
+static int __team_change_mode(struct team *team,
+			      const struct team_mode *new_mode)
+{
+	/* Check if mode was previously set and do cleanup if so */
+	if (team->mode) {
+		void (*exit_op)(struct team *team) = team->ops.exit;
+
+		/* Clear ops area so no callback is called any longer */
+		memset(&team->ops, 0, sizeof(struct team_mode_ops));
+		team_adjust_ops(team);
+
+		if (exit_op)
+			exit_op(team);
+		team_mode_put(team->mode);
+		team->mode = NULL;
+		/* zero private data area */
+		memset(&team->mode_priv, 0,
+		       sizeof(struct team) - offsetof(struct team, mode_priv));
+	}
+
+	if (!new_mode)
+		return 0;
+
+	if (new_mode->ops->init) {
+		int err;
+
+		err = new_mode->ops->init(team);
+		if (err)
+			return err;
+	}
+
+	team->mode = new_mode;
+	memcpy(&team->ops, new_mode->ops, sizeof(struct team_mode_ops));
+	team_adjust_ops(team);
+
+	return 0;
+}
+
+static int team_change_mode(struct team *team, const char *kind)
+{
+	struct team_mode *new_mode;
+	struct net_device *dev = team->dev;
+	int err;
+
+	if (!list_empty(&team->port_list)) {
+		netdev_err(dev, "No ports can be present during mode change\n");
+		return -EBUSY;
+	}
+
+	if (team->mode && strcmp(team->mode->kind, kind) == 0) {
+		netdev_err(dev, "Unable to change to the same mode the team is in\n");
+		return -EINVAL;
+	}
+
+	new_mode = team_mode_get(kind);
+	if (!new_mode) {
+		netdev_err(dev, "Mode \"%s\" not found\n", kind);
+		return -EINVAL;
+	}
+
+	err = __team_change_mode(team, new_mode);
+	if (err) {
+		netdev_err(dev, "Failed to change to mode \"%s\"\n", kind);
+		team_mode_put(new_mode);
+		return err;
+	}
+
+	netdev_info(dev, "Mode changed to \"%s\"\n", kind);
+	return 0;
+}
+
+
+/************************
+ * Rx path frame handler
+ ************************/
+
+/* note: already called with rcu_read_lock */
+static rx_handler_result_t team_handle_frame(struct sk_buff **pskb)
+{
+	struct sk_buff *skb = *pskb;
+	struct team_port *port;
+	struct team *team;
+	rx_handler_result_t res;
+
+	skb = skb_share_check(skb, GFP_ATOMIC);
+	if (!skb)
+		return RX_HANDLER_CONSUMED;
+
+	*pskb = skb;
+
+	port = team_port_get_rcu(skb->dev);
+	team = port->team;
+
+	res = team->ops.receive(team, port, skb);
+	if (res == RX_HANDLER_ANOTHER) {
+		struct team_pcpu_stats *pcpu_stats;
+
+		pcpu_stats = this_cpu_ptr(team->pcpu_stats);
+		u64_stats_update_begin(&pcpu_stats->syncp);
+		pcpu_stats->rx_packets++;
+		pcpu_stats->rx_bytes += skb->len;
+		if (skb->pkt_type == PACKET_MULTICAST)
+			pcpu_stats->rx_multicast++;
+		u64_stats_update_end(&pcpu_stats->syncp);
+
+		skb->dev = team->dev;
+	} else {
+		this_cpu_inc(team->pcpu_stats->rx_dropped);
+	}
+
+	return res;
+}
+
+
+/****************
+ * Port handling
+ ****************/
+
+static bool team_port_find(const struct team *team,
+			   const struct team_port *port)
+{
+	struct team_port *cur;
+
+	list_for_each_entry(cur, &team->port_list, list)
+		if (cur == port)
+			return true;
+	return false;
+}
+
+/*
+ * Add/delete port to the team port list. Write guarded by rtnl_lock.
+ * Takes care of correct port->index setup (might be racy).
+ */
+static void team_port_list_add_port(struct team *team,
+				    struct team_port *port)
+{
+	port->index = team->port_count++;
+	hlist_add_head_rcu(&port->hlist,
+			   team_port_index_hash(team, port->index));
+	list_add_tail_rcu(&port->list, &team->port_list);
+}
+
+static void __reconstruct_port_hlist(struct team *team, int rm_index)
+{
+	int i;
+	struct team_port *port;
+
+	for (i = rm_index + 1; i < team->port_count; i++) {
+		port = team_get_port_by_index(team, i);
+		hlist_del_rcu(&port->hlist);
+		port->index--;
+		hlist_add_head_rcu(&port->hlist,
+				   team_port_index_hash(team, port->index));
+	}
+}
+
+static void team_port_list_del_port(struct team *team,
+				   struct team_port *port)
+{
+	int rm_index = port->index;
+
+	hlist_del_rcu(&port->hlist);
+	list_del_rcu(&port->list);
+	__reconstruct_port_hlist(team, rm_index);
+	team->port_count--;
+}
+
+#define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \
+			    NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
+			    NETIF_F_HIGHDMA | NETIF_F_LRO)
+
+static void __team_compute_features(struct team *team)
+{
+	struct team_port *port;
+	u32 vlan_features = TEAM_VLAN_FEATURES;
+	unsigned short max_hard_header_len = ETH_HLEN;
+
+	list_for_each_entry(port, &team->port_list, list) {
+		vlan_features = netdev_increment_features(vlan_features,
+					port->dev->vlan_features,
+					TEAM_VLAN_FEATURES);
+
+		if (port->dev->hard_header_len > max_hard_header_len)
+			max_hard_header_len = port->dev->hard_header_len;
+	}
+
+	team->dev->vlan_features = vlan_features;
+	team->dev->hard_header_len = max_hard_header_len;
+
+	netdev_change_features(team->dev);
+}
+
+static void team_compute_features(struct team *team)
+{
+	mutex_lock(&team->lock);
+	__team_compute_features(team);
+	mutex_unlock(&team->lock);
+}
+
+static int team_port_enter(struct team *team, struct team_port *port)
+{
+	int err = 0;
+
+	dev_hold(team->dev);
+	port->dev->priv_flags |= IFF_TEAM_PORT;
+	if (team->ops.port_enter) {
+		err = team->ops.port_enter(team, port);
+		if (err) {
+			netdev_err(team->dev, "Device %s failed to enter team mode\n",
+				   port->dev->name);
+			goto err_port_enter;
+		}
+	}
+
+	return 0;
+
+err_port_enter:
+	port->dev->priv_flags &= ~IFF_TEAM_PORT;
+	dev_put(team->dev);
+
+	return err;
+}
+
+static void team_port_leave(struct team *team, struct team_port *port)
+{
+	if (team->ops.port_leave)
+		team->ops.port_leave(team, port);
+	port->dev->priv_flags &= ~IFF_TEAM_PORT;
+	dev_put(team->dev);
+}
+
+static void __team_port_change_check(struct team_port *port, bool linkup);
+
+static int team_port_add(struct team *team, struct net_device *port_dev)
+{
+	struct net_device *dev = team->dev;
+	struct team_port *port;
+	char *portname = port_dev->name;
+	int err;
+
+	if (port_dev->flags & IFF_LOOPBACK ||
+	    port_dev->type != ARPHRD_ETHER) {
+		netdev_err(dev, "Device %s is of an unsupported type\n",
+			   portname);
+		return -EINVAL;
+	}
+
+	if (team_port_exists(port_dev)) {
+		netdev_err(dev, "Device %s is already a port "
+				"of a team device\n", portname);
+		return -EBUSY;
+	}
+
+	if (port_dev->flags & IFF_UP) {
+		netdev_err(dev, "Device %s is up. Set it down before adding it as a team port\n",
+			   portname);
+		return -EBUSY;
+	}
+
+	port = kzalloc(sizeof(struct team_port), GFP_KERNEL);
+	if (!port)
+		return -ENOMEM;
+
+	port->dev = port_dev;
+	port->team = team;
+
+	port->orig.mtu = port_dev->mtu;
+	err = dev_set_mtu(port_dev, dev->mtu);
+	if (err) {
+		netdev_dbg(dev, "Error %d calling dev_set_mtu\n", err);
+		goto err_set_mtu;
+	}
+
+	memcpy(port->orig.dev_addr, port_dev->dev_addr, ETH_ALEN);
+
+	err = team_port_enter(team, port);
+	if (err) {
+		netdev_err(dev, "Device %s failed to enter team mode\n",
+			   portname);
+		goto err_port_enter;
+	}
+
+	err = dev_open(port_dev);
+	if (err) {
+		netdev_dbg(dev, "Device %s opening failed\n",
+			   portname);
+		goto err_dev_open;
+	}
+
+	err = vlan_vids_add_by_dev(port_dev, dev);
+	if (err) {
+		netdev_err(dev, "Failed to add vlan ids to device %s\n",
+				portname);
+		goto err_vids_add;
+	}
+
+	err = netdev_set_master(port_dev, dev);
+	if (err) {
+		netdev_err(dev, "Device %s failed to set master\n", portname);
+		goto err_set_master;
+	}
+
+	err = netdev_rx_handler_register(port_dev, team_handle_frame,
+					 port);
+	if (err) {
+		netdev_err(dev, "Device %s failed to register rx_handler\n",
+			   portname);
+		goto err_handler_register;
+	}
+
+	team_port_list_add_port(team, port);
+	team_adjust_ops(team);
+	__team_compute_features(team);
+	__team_port_change_check(port, !!netif_carrier_ok(port_dev));
+
+	netdev_info(dev, "Port device %s added\n", portname);
+
+	return 0;
+
+err_handler_register:
+	netdev_set_master(port_dev, NULL);
+
+err_set_master:
+	vlan_vids_del_by_dev(port_dev, dev);
+
+err_vids_add:
+	dev_close(port_dev);
+
+err_dev_open:
+	team_port_leave(team, port);
+	team_port_set_orig_mac(port);
+
+err_port_enter:
+	dev_set_mtu(port_dev, port->orig.mtu);
+
+err_set_mtu:
+	kfree(port);
+
+	return err;
+}
+
+static int team_port_del(struct team *team, struct net_device *port_dev)
+{
+	struct net_device *dev = team->dev;
+	struct team_port *port;
+	char *portname = port_dev->name;
+
+	port = team_port_get_rtnl(port_dev);
+	if (!port || !team_port_find(team, port)) {
+		netdev_err(dev, "Device %s does not act as a port of this team\n",
+			   portname);
+		return -ENOENT;
+	}
+
+	__team_port_change_check(port, false);
+	team_port_list_del_port(team, port);
+	team_adjust_ops(team);
+	netdev_rx_handler_unregister(port_dev);
+	netdev_set_master(port_dev, NULL);
+	vlan_vids_del_by_dev(port_dev, dev);
+	dev_close(port_dev);
+	team_port_leave(team, port);
+	team_port_set_orig_mac(port);
+	dev_set_mtu(port_dev, port->orig.mtu);
+	synchronize_rcu();
+	kfree(port);
+	netdev_info(dev, "Port device %s removed\n", portname);
+	__team_compute_features(team);
+
+	return 0;
+}
+
+
+/*****************
+ * Net device ops
+ *****************/
+
+static const char team_no_mode_kind[] = "*NOMODE*";
+
+static int team_mode_option_get(struct team *team, void *arg)
+{
+	const char **str = arg;
+
+	*str = team->mode ? team->mode->kind : team_no_mode_kind;
+	return 0;
+}
+
+static int team_mode_option_set(struct team *team, void *arg)
+{
+	const char **str = arg;
+
+	return team_change_mode(team, *str);
+}
+
+static const struct team_option team_options[] = {
+	{
+		.name = "mode",
+		.type = TEAM_OPTION_TYPE_STRING,
+		.getter = team_mode_option_get,
+		.setter = team_mode_option_set,
+	},
+};
+
+static int team_init(struct net_device *dev)
+{
+	struct team *team = netdev_priv(dev);
+	int i;
+	int err;
+
+	team->dev = dev;
+	mutex_init(&team->lock);
+
+	team->pcpu_stats = alloc_percpu(struct team_pcpu_stats);
+	if (!team->pcpu_stats)
+		return -ENOMEM;
+
+	for (i = 0; i < TEAM_PORT_HASHENTRIES; i++)
+		INIT_HLIST_HEAD(&team->port_hlist[i]);
+	INIT_LIST_HEAD(&team->port_list);
+
+	team_adjust_ops(team);
+
+	INIT_LIST_HEAD(&team->option_list);
+	err = team_options_register(team, team_options, ARRAY_SIZE(team_options));
+	if (err)
+		goto err_options_register;
+	netif_carrier_off(dev);
+
+	return 0;
+
+err_options_register:
+	free_percpu(team->pcpu_stats);
+
+	return err;
+}
+
+static void team_uninit(struct net_device *dev)
+{
+	struct team *team = netdev_priv(dev);
+	struct team_port *port;
+	struct team_port *tmp;
+
+	mutex_lock(&team->lock);
+	list_for_each_entry_safe(port, tmp, &team->port_list, list)
+		team_port_del(team, port->dev);
+
+	__team_change_mode(team, NULL); /* cleanup */
+	__team_options_unregister(team, team_options, ARRAY_SIZE(team_options));
+	mutex_unlock(&team->lock);
+}
+
+static void team_destructor(struct net_device *dev)
+{
+	struct team *team = netdev_priv(dev);
+
+	free_percpu(team->pcpu_stats);
+	free_netdev(dev);
+}
+
+static int team_open(struct net_device *dev)
+{
+	netif_carrier_on(dev);
+	return 0;
+}
+
+static int team_close(struct net_device *dev)
+{
+	netif_carrier_off(dev);
+	return 0;
+}
+
+/*
+ * note: already called with rcu_read_lock
+ */
+static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct team *team = netdev_priv(dev);
+	bool tx_success = false;
+	unsigned int len = skb->len;
+
+	tx_success = team->ops.transmit(team, skb);
+	if (tx_success) {
+		struct team_pcpu_stats *pcpu_stats;
+
+		pcpu_stats = this_cpu_ptr(team->pcpu_stats);
+		u64_stats_update_begin(&pcpu_stats->syncp);
+		pcpu_stats->tx_packets++;
+		pcpu_stats->tx_bytes += len;
+		u64_stats_update_end(&pcpu_stats->syncp);
+	} else {
+		this_cpu_inc(team->pcpu_stats->tx_dropped);
+	}
+
+	return NETDEV_TX_OK;
+}
+
+static void team_change_rx_flags(struct net_device *dev, int change)
+{
+	struct team *team = netdev_priv(dev);
+	struct team_port *port;
+	int inc;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(port, &team->port_list, list) {
+		if (change & IFF_PROMISC) {
+			inc = dev->flags & IFF_PROMISC ? 1 : -1;
+			dev_set_promiscuity(port->dev, inc);
+		}
+		if (change & IFF_ALLMULTI) {
+			inc = dev->flags & IFF_ALLMULTI ? 1 : -1;
+			dev_set_allmulti(port->dev, inc);
+		}
+	}
+	rcu_read_unlock();
+}
+
+static void team_set_rx_mode(struct net_device *dev)
+{
+	struct team *team = netdev_priv(dev);
+	struct team_port *port;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(port, &team->port_list, list) {
+		dev_uc_sync(port->dev, dev);
+		dev_mc_sync(port->dev, dev);
+	}
+	rcu_read_unlock();
+}
+
+static int team_set_mac_address(struct net_device *dev, void *p)
+{
+	struct team *team = netdev_priv(dev);
+	struct team_port *port;
+	struct sockaddr *addr = p;
+
+	memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+	rcu_read_lock();
+	list_for_each_entry_rcu(port, &team->port_list, list)
+		if (team->ops.port_change_mac)
+			team->ops.port_change_mac(team, port);
+	rcu_read_unlock();
+	return 0;
+}
+
+static int team_change_mtu(struct net_device *dev, int new_mtu)
+{
+	struct team *team = netdev_priv(dev);
+	struct team_port *port;
+	int err;
+
+	/*
+	 * Alhough this is reader, it's guarded by team lock. It's not possible
+	 * to traverse list in reverse under rcu_read_lock
+	 */
+	mutex_lock(&team->lock);
+	list_for_each_entry(port, &team->port_list, list) {
+		err = dev_set_mtu(port->dev, new_mtu);
+		if (err) {
+			netdev_err(dev, "Device %s failed to change mtu",
+				   port->dev->name);
+			goto unwind;
+		}
+	}
+	mutex_unlock(&team->lock);
+
+	dev->mtu = new_mtu;
+
+	return 0;
+
+unwind:
+	list_for_each_entry_continue_reverse(port, &team->port_list, list)
+		dev_set_mtu(port->dev, dev->mtu);
+	mutex_unlock(&team->lock);
+
+	return err;
+}
+
+static struct rtnl_link_stats64 *
+team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+	struct team *team = netdev_priv(dev);
+	struct team_pcpu_stats *p;
+	u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes;
+	u32 rx_dropped = 0, tx_dropped = 0;
+	unsigned int start;
+	int i;
+
+	for_each_possible_cpu(i) {
+		p = per_cpu_ptr(team->pcpu_stats, i);
+		do {
+			start = u64_stats_fetch_begin_bh(&p->syncp);
+			rx_packets	= p->rx_packets;
+			rx_bytes	= p->rx_bytes;
+			rx_multicast	= p->rx_multicast;
+			tx_packets	= p->tx_packets;
+			tx_bytes	= p->tx_bytes;
+		} while (u64_stats_fetch_retry_bh(&p->syncp, start));
+
+		stats->rx_packets	+= rx_packets;
+		stats->rx_bytes		+= rx_bytes;
+		stats->multicast	+= rx_multicast;
+		stats->tx_packets	+= tx_packets;
+		stats->tx_bytes		+= tx_bytes;
+		/*
+		 * rx_dropped & tx_dropped are u32, updated
+		 * without syncp protection.
+		 */
+		rx_dropped	+= p->rx_dropped;
+		tx_dropped	+= p->tx_dropped;
+	}
+	stats->rx_dropped	= rx_dropped;
+	stats->tx_dropped	= tx_dropped;
+	return stats;
+}
+
+static int team_vlan_rx_add_vid(struct net_device *dev, uint16_t vid)
+{
+	struct team *team = netdev_priv(dev);
+	struct team_port *port;
+	int err;
+
+	/*
+	 * Alhough this is reader, it's guarded by team lock. It's not possible
+	 * to traverse list in reverse under rcu_read_lock
+	 */
+	mutex_lock(&team->lock);
+	list_for_each_entry(port, &team->port_list, list) {
+		err = vlan_vid_add(port->dev, vid);
+		if (err)
+			goto unwind;
+	}
+	mutex_unlock(&team->lock);
+
+	return 0;
+
+unwind:
+	list_for_each_entry_continue_reverse(port, &team->port_list, list)
+		vlan_vid_del(port->dev, vid);
+	mutex_unlock(&team->lock);
+
+	return err;
+}
+
+static int team_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
+{
+	struct team *team = netdev_priv(dev);
+	struct team_port *port;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(port, &team->port_list, list)
+		vlan_vid_del(port->dev, vid);
+	rcu_read_unlock();
+
+	return 0;
+}
+
+static int team_add_slave(struct net_device *dev, struct net_device *port_dev)
+{
+	struct team *team = netdev_priv(dev);
+	int err;
+
+	mutex_lock(&team->lock);
+	err = team_port_add(team, port_dev);
+	mutex_unlock(&team->lock);
+	return err;
+}
+
+static int team_del_slave(struct net_device *dev, struct net_device *port_dev)
+{
+	struct team *team = netdev_priv(dev);
+	int err;
+
+	mutex_lock(&team->lock);
+	err = team_port_del(team, port_dev);
+	mutex_unlock(&team->lock);
+	return err;
+}
+
+static netdev_features_t team_fix_features(struct net_device *dev,
+					   netdev_features_t features)
+{
+	struct team_port *port;
+	struct team *team = netdev_priv(dev);
+	netdev_features_t mask;
+
+	mask = features;
+	features &= ~NETIF_F_ONE_FOR_ALL;
+	features |= NETIF_F_ALL_FOR_ALL;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(port, &team->port_list, list) {
+		features = netdev_increment_features(features,
+						     port->dev->features,
+						     mask);
+	}
+	rcu_read_unlock();
+	return features;
+}
+
+static const struct net_device_ops team_netdev_ops = {
+	.ndo_init		= team_init,
+	.ndo_uninit		= team_uninit,
+	.ndo_open		= team_open,
+	.ndo_stop		= team_close,
+	.ndo_start_xmit		= team_xmit,
+	.ndo_change_rx_flags	= team_change_rx_flags,
+	.ndo_set_rx_mode	= team_set_rx_mode,
+	.ndo_set_mac_address	= team_set_mac_address,
+	.ndo_change_mtu		= team_change_mtu,
+	.ndo_get_stats64	= team_get_stats64,
+	.ndo_vlan_rx_add_vid	= team_vlan_rx_add_vid,
+	.ndo_vlan_rx_kill_vid	= team_vlan_rx_kill_vid,
+	.ndo_add_slave		= team_add_slave,
+	.ndo_del_slave		= team_del_slave,
+	.ndo_fix_features	= team_fix_features,
+};
+
+
+/***********************
+ * rt netlink interface
+ ***********************/
+
+static void team_setup(struct net_device *dev)
+{
+	ether_setup(dev);
+
+	dev->netdev_ops = &team_netdev_ops;
+	dev->destructor	= team_destructor;
+	dev->tx_queue_len = 0;
+	dev->flags |= IFF_MULTICAST;
+	dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
+
+	/*
+	 * Indicate we support unicast address filtering. That way core won't
+	 * bring us to promisc mode in case a unicast addr is added.
+	 * Let this up to underlay drivers.
+	 */
+	dev->priv_flags |= IFF_UNICAST_FLT;
+
+	dev->features |= NETIF_F_LLTX;
+	dev->features |= NETIF_F_GRO;
+	dev->hw_features = NETIF_F_HW_VLAN_TX |
+			   NETIF_F_HW_VLAN_RX |
+			   NETIF_F_HW_VLAN_FILTER;
+
+	dev->features |= dev->hw_features;
+}
+
+static int team_newlink(struct net *src_net, struct net_device *dev,
+			struct nlattr *tb[], struct nlattr *data[])
+{
+	int err;
+
+	if (tb[IFLA_ADDRESS] == NULL)
+		random_ether_addr(dev->dev_addr);
+
+	err = register_netdevice(dev);
+	if (err)
+		return err;
+
+	return 0;
+}
+
+static int team_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+	if (tb[IFLA_ADDRESS]) {
+		if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
+			return -EINVAL;
+		if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
+			return -EADDRNOTAVAIL;
+	}
+	return 0;
+}
+
+static struct rtnl_link_ops team_link_ops __read_mostly = {
+	.kind		= DRV_NAME,
+	.priv_size	= sizeof(struct team),
+	.setup		= team_setup,
+	.newlink	= team_newlink,
+	.validate	= team_validate,
+};
+
+
+/***********************************
+ * Generic netlink custom interface
+ ***********************************/
+
+static struct genl_family team_nl_family = {
+	.id		= GENL_ID_GENERATE,
+	.name		= TEAM_GENL_NAME,
+	.version	= TEAM_GENL_VERSION,
+	.maxattr	= TEAM_ATTR_MAX,
+	.netnsok	= true,
+};
+
+static const struct nla_policy team_nl_policy[TEAM_ATTR_MAX + 1] = {
+	[TEAM_ATTR_UNSPEC]			= { .type = NLA_UNSPEC, },
+	[TEAM_ATTR_TEAM_IFINDEX]		= { .type = NLA_U32 },
+	[TEAM_ATTR_LIST_OPTION]			= { .type = NLA_NESTED },
+	[TEAM_ATTR_LIST_PORT]			= { .type = NLA_NESTED },
+};
+
+static const struct nla_policy
+team_nl_option_policy[TEAM_ATTR_OPTION_MAX + 1] = {
+	[TEAM_ATTR_OPTION_UNSPEC]		= { .type = NLA_UNSPEC, },
+	[TEAM_ATTR_OPTION_NAME] = {
+		.type = NLA_STRING,
+		.len = TEAM_STRING_MAX_LEN,
+	},
+	[TEAM_ATTR_OPTION_CHANGED]		= { .type = NLA_FLAG },
+	[TEAM_ATTR_OPTION_TYPE]			= { .type = NLA_U8 },
+	[TEAM_ATTR_OPTION_DATA] = {
+		.type = NLA_BINARY,
+		.len = TEAM_STRING_MAX_LEN,
+	},
+};
+
+static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
+{
+	struct sk_buff *msg;
+	void *hdr;
+	int err;
+
+	msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+	if (!msg)
+		return -ENOMEM;
+
+	hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq,
+			  &team_nl_family, 0, TEAM_CMD_NOOP);
+	if (IS_ERR(hdr)) {
+		err = PTR_ERR(hdr);
+		goto err_msg_put;
+	}
+
+	genlmsg_end(msg, hdr);
+
+	return genlmsg_unicast(genl_info_net(info), msg, info->snd_pid);
+
+err_msg_put:
+	nlmsg_free(msg);
+
+	return err;
+}
+
+/*
+ * Netlink cmd functions should be locked by following two functions.
+ * Since dev gets held here, that ensures dev won't disappear in between.
+ */
+static struct team *team_nl_team_get(struct genl_info *info)
+{
+	struct net *net = genl_info_net(info);
+	int ifindex;
+	struct net_device *dev;
+	struct team *team;
+
+	if (!info->attrs[TEAM_ATTR_TEAM_IFINDEX])
+		return NULL;
+
+	ifindex = nla_get_u32(info->attrs[TEAM_ATTR_TEAM_IFINDEX]);
+	dev = dev_get_by_index(net, ifindex);
+	if (!dev || dev->netdev_ops != &team_netdev_ops) {
+		if (dev)
+			dev_put(dev);
+		return NULL;
+	}
+
+	team = netdev_priv(dev);
+	mutex_lock(&team->lock);
+	return team;
+}
+
+static void team_nl_team_put(struct team *team)
+{
+	mutex_unlock(&team->lock);
+	dev_put(team->dev);
+}
+
+static int team_nl_send_generic(struct genl_info *info, struct team *team,
+				int (*fill_func)(struct sk_buff *skb,
+						 struct genl_info *info,
+						 int flags, struct team *team))
+{
+	struct sk_buff *skb;
+	int err;
+
+	skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+	if (!skb)
+		return -ENOMEM;
+
+	err = fill_func(skb, info, NLM_F_ACK, team);
+	if (err < 0)
+		goto err_fill;
+
+	err = genlmsg_unicast(genl_info_net(info), skb, info->snd_pid);
+	return err;
+
+err_fill:
+	nlmsg_free(skb);
+	return err;
+}
+
+static int team_nl_fill_options_get_changed(struct sk_buff *skb,
+					    u32 pid, u32 seq, int flags,
+					    struct team *team,
+					    struct team_option *changed_option)
+{
+	struct nlattr *option_list;
+	void *hdr;
+	struct team_option *option;
+
+	hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags,
+			  TEAM_CMD_OPTIONS_GET);
+	if (IS_ERR(hdr))
+		return PTR_ERR(hdr);
+
+	NLA_PUT_U32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex);
+	option_list = nla_nest_start(skb, TEAM_ATTR_LIST_OPTION);
+	if (!option_list)
+		return -EMSGSIZE;
+
+	list_for_each_entry(option, &team->option_list, list) {
+		struct nlattr *option_item;
+		long arg;
+
+		option_item = nla_nest_start(skb, TEAM_ATTR_ITEM_OPTION);
+		if (!option_item)
+			goto nla_put_failure;
+		NLA_PUT_STRING(skb, TEAM_ATTR_OPTION_NAME, option->name);
+		if (option == changed_option)
+			NLA_PUT_FLAG(skb, TEAM_ATTR_OPTION_CHANGED);
+		switch (option->type) {
+		case TEAM_OPTION_TYPE_U32:
+			NLA_PUT_U8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32);
+			team_option_get(team, option, &arg);
+			NLA_PUT_U32(skb, TEAM_ATTR_OPTION_DATA, arg);
+			break;
+		case TEAM_OPTION_TYPE_STRING:
+			NLA_PUT_U8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING);
+			team_option_get(team, option, &arg);
+			NLA_PUT_STRING(skb, TEAM_ATTR_OPTION_DATA,
+				       (char *) arg);
+			break;
+		default:
+			BUG();
+		}
+		nla_nest_end(skb, option_item);
+	}
+
+	nla_nest_end(skb, option_list);
+	return genlmsg_end(skb, hdr);
+
+nla_put_failure:
+	genlmsg_cancel(skb, hdr);
+	return -EMSGSIZE;
+}
+
+static int team_nl_fill_options_get(struct sk_buff *skb,
+				    struct genl_info *info, int flags,
+				    struct team *team)
+{
+	return team_nl_fill_options_get_changed(skb, info->snd_pid,
+						info->snd_seq, NLM_F_ACK,
+						team, NULL);
+}
+
+static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info)
+{
+	struct team *team;
+	int err;
+
+	team = team_nl_team_get(info);
+	if (!team)
+		return -EINVAL;
+
+	err = team_nl_send_generic(info, team, team_nl_fill_options_get);
+
+	team_nl_team_put(team);
+
+	return err;
+}
+
+static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
+{
+	struct team *team;
+	int err = 0;
+	int i;
+	struct nlattr *nl_option;
+
+	team = team_nl_team_get(info);
+	if (!team)
+		return -EINVAL;
+
+	err = -EINVAL;
+	if (!info->attrs[TEAM_ATTR_LIST_OPTION]) {
+		err = -EINVAL;
+		goto team_put;
+	}
+
+	nla_for_each_nested(nl_option, info->attrs[TEAM_ATTR_LIST_OPTION], i) {
+		struct nlattr *mode_attrs[TEAM_ATTR_OPTION_MAX + 1];
+		enum team_option_type opt_type;
+		struct team_option *option;
+		char *opt_name;
+		bool opt_found = false;
+
+		if (nla_type(nl_option) != TEAM_ATTR_ITEM_OPTION) {
+			err = -EINVAL;
+			goto team_put;
+		}
+		err = nla_parse_nested(mode_attrs, TEAM_ATTR_OPTION_MAX,
+				       nl_option, team_nl_option_policy);
+		if (err)
+			goto team_put;
+		if (!mode_attrs[TEAM_ATTR_OPTION_NAME] ||
+		    !mode_attrs[TEAM_ATTR_OPTION_TYPE] ||
+		    !mode_attrs[TEAM_ATTR_OPTION_DATA]) {
+			err = -EINVAL;
+			goto team_put;
+		}
+		switch (nla_get_u8(mode_attrs[TEAM_ATTR_OPTION_TYPE])) {
+		case NLA_U32:
+			opt_type = TEAM_OPTION_TYPE_U32;
+			break;
+		case NLA_STRING:
+			opt_type = TEAM_OPTION_TYPE_STRING;
+			break;
+		default:
+			goto team_put;
+		}
+
+		opt_name = nla_data(mode_attrs[TEAM_ATTR_OPTION_NAME]);
+		list_for_each_entry(option, &team->option_list, list) {
+			long arg;
+			struct nlattr *opt_data_attr;
+
+			if (option->type != opt_type ||
+			    strcmp(option->name, opt_name))
+				continue;
+			opt_found = true;
+			opt_data_attr = mode_attrs[TEAM_ATTR_OPTION_DATA];
+			switch (opt_type) {
+			case TEAM_OPTION_TYPE_U32:
+				arg = nla_get_u32(opt_data_attr);
+				break;
+			case TEAM_OPTION_TYPE_STRING:
+				arg = (long) nla_data(opt_data_attr);
+				break;
+			default:
+				BUG();
+			}
+			err = team_option_set(team, option, &arg);
+			if (err)
+				goto team_put;
+		}
+		if (!opt_found) {
+			err = -ENOENT;
+			goto team_put;
+		}
+	}
+
+team_put:
+	team_nl_team_put(team);
+
+	return err;
+}
+
+static int team_nl_fill_port_list_get_changed(struct sk_buff *skb,
+					      u32 pid, u32 seq, int flags,
+					      struct team *team,
+					      struct team_port *changed_port)
+{
+	struct nlattr *port_list;
+	void *hdr;
+	struct team_port *port;
+
+	hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags,
+			  TEAM_CMD_PORT_LIST_GET);
+	if (IS_ERR(hdr))
+		return PTR_ERR(hdr);
+
+	NLA_PUT_U32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex);
+	port_list = nla_nest_start(skb, TEAM_ATTR_LIST_PORT);
+	if (!port_list)
+		return -EMSGSIZE;
+
+	list_for_each_entry(port, &team->port_list, list) {
+		struct nlattr *port_item;
+
+		port_item = nla_nest_start(skb, TEAM_ATTR_ITEM_PORT);
+		if (!port_item)
+			goto nla_put_failure;
+		NLA_PUT_U32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex);
+		if (port == changed_port)
+			NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_CHANGED);
+		if (port->linkup)
+			NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_LINKUP);
+		NLA_PUT_U32(skb, TEAM_ATTR_PORT_SPEED, port->speed);
+		NLA_PUT_U8(skb, TEAM_ATTR_PORT_DUPLEX, port->duplex);
+		nla_nest_end(skb, port_item);
+	}
+
+	nla_nest_end(skb, port_list);
+	return genlmsg_end(skb, hdr);
+
+nla_put_failure:
+	genlmsg_cancel(skb, hdr);
+	return -EMSGSIZE;
+}
+
+static int team_nl_fill_port_list_get(struct sk_buff *skb,
+				      struct genl_info *info, int flags,
+				      struct team *team)
+{
+	return team_nl_fill_port_list_get_changed(skb, info->snd_pid,
+						  info->snd_seq, NLM_F_ACK,
+						  team, NULL);
+}
+
+static int team_nl_cmd_port_list_get(struct sk_buff *skb,
+				     struct genl_info *info)
+{
+	struct team *team;
+	int err;
+
+	team = team_nl_team_get(info);
+	if (!team)
+		return -EINVAL;
+
+	err = team_nl_send_generic(info, team, team_nl_fill_port_list_get);
+
+	team_nl_team_put(team);
+
+	return err;
+}
+
+static struct genl_ops team_nl_ops[] = {
+	{
+		.cmd = TEAM_CMD_NOOP,
+		.doit = team_nl_cmd_noop,
+		.policy = team_nl_policy,
+	},
+	{
+		.cmd = TEAM_CMD_OPTIONS_SET,
+		.doit = team_nl_cmd_options_set,
+		.policy = team_nl_policy,
+		.flags = GENL_ADMIN_PERM,
+	},
+	{
+		.cmd = TEAM_CMD_OPTIONS_GET,
+		.doit = team_nl_cmd_options_get,
+		.policy = team_nl_policy,
+		.flags = GENL_ADMIN_PERM,
+	},
+	{
+		.cmd = TEAM_CMD_PORT_LIST_GET,
+		.doit = team_nl_cmd_port_list_get,
+		.policy = team_nl_policy,
+		.flags = GENL_ADMIN_PERM,
+	},
+};
+
+static struct genl_multicast_group team_change_event_mcgrp = {
+	.name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME,
+};
+
+static int team_nl_send_event_options_get(struct team *team,
+					  struct team_option *changed_option)
+{
+	struct sk_buff *skb;
+	int err;
+	struct net *net = dev_net(team->dev);
+
+	skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+	if (!skb)
+		return -ENOMEM;
+
+	err = team_nl_fill_options_get_changed(skb, 0, 0, 0, team,
+					       changed_option);
+	if (err < 0)
+		goto err_fill;
+
+	err = genlmsg_multicast_netns(net, skb, 0, team_change_event_mcgrp.id,
+				      GFP_KERNEL);
+	return err;
+
+err_fill:
+	nlmsg_free(skb);
+	return err;
+}
+
+static int team_nl_send_event_port_list_get(struct team_port *port)
+{
+	struct sk_buff *skb;
+	int err;
+	struct net *net = dev_net(port->team->dev);
+
+	skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+	if (!skb)
+		return -ENOMEM;
+
+	err = team_nl_fill_port_list_get_changed(skb, 0, 0, 0,
+						 port->team, port);
+	if (err < 0)
+		goto err_fill;
+
+	err = genlmsg_multicast_netns(net, skb, 0, team_change_event_mcgrp.id,
+				      GFP_KERNEL);
+	return err;
+
+err_fill:
+	nlmsg_free(skb);
+	return err;
+}
+
+static int team_nl_init(void)
+{
+	int err;
+
+	err = genl_register_family_with_ops(&team_nl_family, team_nl_ops,
+					    ARRAY_SIZE(team_nl_ops));
+	if (err)
+		return err;
+
+	err = genl_register_mc_group(&team_nl_family, &team_change_event_mcgrp);
+	if (err)
+		goto err_change_event_grp_reg;
+
+	return 0;
+
+err_change_event_grp_reg:
+	genl_unregister_family(&team_nl_family);
+
+	return err;
+}
+
+static void team_nl_fini(void)
+{
+	genl_unregister_family(&team_nl_family);
+}
+
+
+/******************
+ * Change checkers
+ ******************/
+
+static void __team_options_change_check(struct team *team,
+					struct team_option *changed_option)
+{
+	int err;
+
+	err = team_nl_send_event_options_get(team, changed_option);
+	if (err)
+		netdev_warn(team->dev, "Failed to send options change via netlink\n");
+}
+
+/* rtnl lock is held */
+static void __team_port_change_check(struct team_port *port, bool linkup)
+{
+	int err;
+
+	if (port->linkup == linkup)
+		return;
+
+	port->linkup = linkup;
+	if (linkup) {
+		struct ethtool_cmd ecmd;
+
+		err = __ethtool_get_settings(port->dev, &ecmd);
+		if (!err) {
+			port->speed = ethtool_cmd_speed(&ecmd);
+			port->duplex = ecmd.duplex;
+			goto send_event;
+		}
+	}
+	port->speed = 0;
+	port->duplex = 0;
+
+send_event:
+	err = team_nl_send_event_port_list_get(port);
+	if (err)
+		netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink\n",
+			    port->dev->name);
+
+}
+
+static void team_port_change_check(struct team_port *port, bool linkup)
+{
+	struct team *team = port->team;
+
+	mutex_lock(&team->lock);
+	__team_port_change_check(port, linkup);
+	mutex_unlock(&team->lock);
+}
+
+/************************************
+ * Net device notifier event handler
+ ************************************/
+
+static int team_device_event(struct notifier_block *unused,
+			     unsigned long event, void *ptr)
+{
+	struct net_device *dev = (struct net_device *) ptr;
+	struct team_port *port;
+
+	port = team_port_get_rtnl(dev);
+	if (!port)
+		return NOTIFY_DONE;
+
+	switch (event) {
+	case NETDEV_UP:
+		if (netif_carrier_ok(dev))
+			team_port_change_check(port, true);
+	case NETDEV_DOWN:
+		team_port_change_check(port, false);
+	case NETDEV_CHANGE:
+		if (netif_running(port->dev))
+			team_port_change_check(port,
+					       !!netif_carrier_ok(port->dev));
+		break;
+	case NETDEV_UNREGISTER:
+		team_del_slave(port->team->dev, dev);
+		break;
+	case NETDEV_FEAT_CHANGE:
+		team_compute_features(port->team);
+		break;
+	case NETDEV_CHANGEMTU:
+		/* Forbid to change mtu of underlaying device */
+		return NOTIFY_BAD;
+	case NETDEV_PRE_TYPE_CHANGE:
+		/* Forbid to change type of underlaying device */
+		return NOTIFY_BAD;
+	}
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block team_notifier_block __read_mostly = {
+	.notifier_call = team_device_event,
+};
+
+
+/***********************
+ * Module init and exit
+ ***********************/
+
+static int __init team_module_init(void)
+{
+	int err;
+
+	register_netdevice_notifier(&team_notifier_block);
+
+	err = rtnl_link_register(&team_link_ops);
+	if (err)
+		goto err_rtnl_reg;
+
+	err = team_nl_init();
+	if (err)
+		goto err_nl_init;
+
+	return 0;
+
+err_nl_init:
+	rtnl_link_unregister(&team_link_ops);
+
+err_rtnl_reg:
+	unregister_netdevice_notifier(&team_notifier_block);
+
+	return err;
+}
+
+static void __exit team_module_exit(void)
+{
+	team_nl_fini();
+	rtnl_link_unregister(&team_link_ops);
+	unregister_netdevice_notifier(&team_notifier_block);
+}
+
+module_init(team_module_init);
+module_exit(team_module_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
+MODULE_DESCRIPTION("Ethernet team device driver");
+MODULE_ALIAS_RTNL_LINK(DRV_NAME);
diff --git a/drivers/net/team/team_mode_activebackup.c b/drivers/net/team/team_mode_activebackup.c
new file mode 100644
index 0000000..f4d960e
--- /dev/null
+++ b/drivers/net/team/team_mode_activebackup.c
@@ -0,0 +1,136 @@
+/*
+ * net/drivers/team/team_mode_activebackup.c - Active-backup mode for team
+ * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <net/rtnetlink.h>
+#include <linux/if_team.h>
+
+struct ab_priv {
+	struct team_port __rcu *active_port;
+};
+
+static struct ab_priv *ab_priv(struct team *team)
+{
+	return (struct ab_priv *) &team->mode_priv;
+}
+
+static rx_handler_result_t ab_receive(struct team *team, struct team_port *port,
+				      struct sk_buff *skb) {
+	struct team_port *active_port;
+
+	active_port = rcu_dereference(ab_priv(team)->active_port);
+	if (active_port != port)
+		return RX_HANDLER_EXACT;
+	return RX_HANDLER_ANOTHER;
+}
+
+static bool ab_transmit(struct team *team, struct sk_buff *skb)
+{
+	struct team_port *active_port;
+
+	active_port = rcu_dereference(ab_priv(team)->active_port);
+	if (unlikely(!active_port))
+		goto drop;
+	skb->dev = active_port->dev;
+	if (dev_queue_xmit(skb))
+		return false;
+	return true;
+
+drop:
+	dev_kfree_skb_any(skb);
+	return false;
+}
+
+static void ab_port_leave(struct team *team, struct team_port *port)
+{
+	if (ab_priv(team)->active_port == port)
+		RCU_INIT_POINTER(ab_priv(team)->active_port, NULL);
+}
+
+static int ab_active_port_get(struct team *team, void *arg)
+{
+	u32 *ifindex = arg;
+
+	*ifindex = 0;
+	if (ab_priv(team)->active_port)
+		*ifindex = ab_priv(team)->active_port->dev->ifindex;
+	return 0;
+}
+
+static int ab_active_port_set(struct team *team, void *arg)
+{
+	u32 *ifindex = arg;
+	struct team_port *port;
+
+	list_for_each_entry_rcu(port, &team->port_list, list) {
+		if (port->dev->ifindex == *ifindex) {
+			rcu_assign_pointer(ab_priv(team)->active_port, port);
+			return 0;
+		}
+	}
+	return -ENOENT;
+}
+
+static const struct team_option ab_options[] = {
+	{
+		.name = "activeport",
+		.type = TEAM_OPTION_TYPE_U32,
+		.getter = ab_active_port_get,
+		.setter = ab_active_port_set,
+	},
+};
+
+int ab_init(struct team *team)
+{
+	return team_options_register(team, ab_options, ARRAY_SIZE(ab_options));
+}
+
+void ab_exit(struct team *team)
+{
+	team_options_unregister(team, ab_options, ARRAY_SIZE(ab_options));
+}
+
+static const struct team_mode_ops ab_mode_ops = {
+	.init			= ab_init,
+	.exit			= ab_exit,
+	.receive		= ab_receive,
+	.transmit		= ab_transmit,
+	.port_leave		= ab_port_leave,
+};
+
+static struct team_mode ab_mode = {
+	.kind		= "activebackup",
+	.owner		= THIS_MODULE,
+	.priv_size	= sizeof(struct ab_priv),
+	.ops		= &ab_mode_ops,
+};
+
+static int __init ab_init_module(void)
+{
+	return team_mode_register(&ab_mode);
+}
+
+static void __exit ab_cleanup_module(void)
+{
+	team_mode_unregister(&ab_mode);
+}
+
+module_init(ab_init_module);
+module_exit(ab_cleanup_module);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
+MODULE_DESCRIPTION("Active-backup mode for team");
+MODULE_ALIAS("team-mode-activebackup");
diff --git a/drivers/net/team/team_mode_roundrobin.c b/drivers/net/team/team_mode_roundrobin.c
new file mode 100644
index 0000000..a0e8f80
--- /dev/null
+++ b/drivers/net/team/team_mode_roundrobin.c
@@ -0,0 +1,107 @@
+/*
+ * net/drivers/team/team_mode_roundrobin.c - Round-robin mode for team
+ * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/if_team.h>
+
+struct rr_priv {
+	unsigned int sent_packets;
+};
+
+static struct rr_priv *rr_priv(struct team *team)
+{
+	return (struct rr_priv *) &team->mode_priv;
+}
+
+static struct team_port *__get_first_port_up(struct team *team,
+					     struct team_port *port)
+{
+	struct team_port *cur;
+
+	if (port->linkup)
+		return port;
+	cur = port;
+	list_for_each_entry_continue_rcu(cur, &team->port_list, list)
+		if (cur->linkup)
+			return cur;
+	list_for_each_entry_rcu(cur, &team->port_list, list) {
+		if (cur == port)
+			break;
+		if (cur->linkup)
+			return cur;
+	}
+	return NULL;
+}
+
+static bool rr_transmit(struct team *team, struct sk_buff *skb)
+{
+	struct team_port *port;
+	int port_index;
+
+	port_index = rr_priv(team)->sent_packets++ % team->port_count;
+	port = team_get_port_by_index_rcu(team, port_index);
+	port = __get_first_port_up(team, port);
+	if (unlikely(!port))
+		goto drop;
+	skb->dev = port->dev;
+	if (dev_queue_xmit(skb))
+		return false;
+	return true;
+
+drop:
+	dev_kfree_skb_any(skb);
+	return false;
+}
+
+static int rr_port_enter(struct team *team, struct team_port *port)
+{
+	return team_port_set_team_mac(port);
+}
+
+static void rr_port_change_mac(struct team *team, struct team_port *port)
+{
+	team_port_set_team_mac(port);
+}
+
+static const struct team_mode_ops rr_mode_ops = {
+	.transmit		= rr_transmit,
+	.port_enter		= rr_port_enter,
+	.port_change_mac	= rr_port_change_mac,
+};
+
+static struct team_mode rr_mode = {
+	.kind		= "roundrobin",
+	.owner		= THIS_MODULE,
+	.priv_size	= sizeof(struct rr_priv),
+	.ops		= &rr_mode_ops,
+};
+
+static int __init rr_init_module(void)
+{
+	return team_mode_register(&rr_mode);
+}
+
+static void __exit rr_cleanup_module(void)
+{
+	team_mode_unregister(&rr_mode);
+}
+
+module_init(rr_init_module);
+module_exit(rr_cleanup_module);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
+MODULE_DESCRIPTION("Round-robin mode for team");
+MODULE_ALIAS("team-mode-roundrobin");
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 7bea9c6..93c5d72 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -123,7 +123,7 @@
 	gid_t			group;
 
 	struct net_device	*dev;
-	u32			set_features;
+	netdev_features_t	set_features;
 #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
 			  NETIF_F_TSO6|NETIF_F_UFO)
 	struct fasync_struct	*fasync;
@@ -454,7 +454,8 @@
 	return 0;
 }
 
-static u32 tun_net_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t tun_net_fix_features(struct net_device *dev,
+	netdev_features_t features)
 {
 	struct tun_struct *tun = netdev_priv(dev);
 
@@ -1196,7 +1197,7 @@
  * privs required. */
 static int set_offload(struct tun_struct *tun, unsigned long arg)
 {
-	u32 features = 0;
+	netdev_features_t features = 0;
 
 	if (arg & TUN_F_CSUM) {
 		features |= NETIF_F_HW_CSUM;
@@ -1589,16 +1590,15 @@
 {
 	struct tun_struct *tun = netdev_priv(dev);
 
-	strcpy(info->driver, DRV_NAME);
-	strcpy(info->version, DRV_VERSION);
-	strcpy(info->fw_version, "N/A");
+	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
 
 	switch (tun->flags & TUN_TYPE_MASK) {
 	case TUN_TUN_DEV:
-		strcpy(info->bus_info, "tun");
+		strlcpy(info->bus_info, "tun", sizeof(info->bus_info));
 		break;
 	case TUN_TAP_DEV:
-		strcpy(info->bus_info, "tap");
+		strlcpy(info->bus_info, "tap", sizeof(info->bus_info));
 		break;
 	}
 }
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index e6fed4d..1ff7163 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -36,7 +36,7 @@
 #include <linux/usb/usbnet.h>
 #include <linux/slab.h>
 
-#define DRIVER_VERSION "08-Nov-2011"
+#define DRIVER_VERSION "22-Dec-2011"
 #define DRIVER_NAME "asix"
 
 /* ASIX AX8817X based USB 2.0 Ethernet Devices */
@@ -689,6 +689,10 @@
 	}
 	wolinfo->supported = WAKE_PHY | WAKE_MAGIC;
 	wolinfo->wolopts = 0;
+	if (opt & AX_MONITOR_LINK)
+		wolinfo->wolopts |= WAKE_PHY;
+	if (opt & AX_MONITOR_MAGIC)
+		wolinfo->wolopts |= WAKE_MAGIC;
 }
 
 static int
@@ -1655,6 +1659,10 @@
 	// ASIX 88772a
 	USB_DEVICE(0x0db0, 0xa877),
 	.driver_info = (unsigned long) &ax88772_info,
+}, {
+	// Asus USB Ethernet Adapter
+	USB_DEVICE (0x0b95, 0x7e2b),
+	.driver_info = (unsigned long) &ax88772_info,
 },
 	{ },		// END
 };
@@ -1670,17 +1678,7 @@
 	.supports_autosuspend = 1,
 };
 
-static int __init asix_init(void)
-{
- 	return usb_register(&asix_driver);
-}
-module_init(asix_init);
-
-static void __exit asix_exit(void)
-{
- 	usb_deregister(&asix_driver);
-}
-module_exit(asix_exit);
+module_usb_driver(asix_driver);
 
 MODULE_AUTHOR("David Hollis");
 MODULE_VERSION(DRIVER_VERSION);
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index a68272c..182cfb4 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -949,19 +949,4 @@
 	.id_table =	catc_id_table,
 };
 
-static int __init catc_init(void)
-{
-	int result = usb_register(&catc_driver);
-	if (result == 0)
-		printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
-		       DRIVER_DESC "\n");
-	return result;
-}
-
-static void __exit catc_exit(void)
-{
-	usb_deregister(&catc_driver);
-}
-
-module_init(catc_init);
-module_exit(catc_exit);
+module_usb_driver(catc_driver);
diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c
index a60d006..790cbde 100644
--- a/drivers/net/usb/cdc-phonet.c
+++ b/drivers/net/usb/cdc-phonet.c
@@ -130,7 +130,7 @@
 	struct page *page;
 	int err;
 
-	page = __netdev_alloc_page(dev, gfp_flags);
+	page = alloc_page(gfp_flags);
 	if (!page)
 		return -ENOMEM;
 
@@ -140,7 +140,7 @@
 	err = usb_submit_urb(req, gfp_flags);
 	if (unlikely(err)) {
 		dev_dbg(&dev->dev, "RX submit error (%d)\n", err);
-		netdev_free_page(dev, page);
+		put_page(page);
 	}
 	return err;
 }
@@ -208,9 +208,9 @@
 	dev->stats.rx_errors++;
 resubmit:
 	if (page)
-		netdev_free_page(dev, page);
+		put_page(page);
 	if (req)
-		rx_submit(pnd, req, GFP_ATOMIC);
+		rx_submit(pnd, req, GFP_ATOMIC | __GFP_COLD);
 }
 
 static int usbpn_close(struct net_device *dev);
@@ -229,7 +229,7 @@
 	for (i = 0; i < rxq_size; i++) {
 		struct urb *req = usb_alloc_urb(0, GFP_KERNEL);
 
-		if (!req || rx_submit(pnd, req, GFP_KERNEL)) {
+		if (!req || rx_submit(pnd, req, GFP_KERNEL | __GFP_COLD)) {
 			usbpn_close(dev);
 			return -ENOMEM;
 		}
@@ -457,18 +457,7 @@
 	.id_table =	usbpn_ids,
 };
 
-static int __init usbpn_init(void)
-{
-	return usb_register(&usbpn_driver);
-}
-
-static void __exit usbpn_exit(void)
-{
-	usb_deregister(&usbpn_driver);
-}
-
-module_init(usbpn_init);
-module_exit(usbpn_exit);
+module_usb_driver(usbpn_driver);
 
 MODULE_AUTHOR("Remi Denis-Courmont");
 MODULE_DESCRIPTION("USB CDC Phonet host interface");
diff --git a/drivers/net/usb/cdc_eem.c b/drivers/net/usb/cdc_eem.c
index 882f53f..439690b 100644
--- a/drivers/net/usb/cdc_eem.c
+++ b/drivers/net/usb/cdc_eem.c
@@ -369,18 +369,7 @@
 	.resume =	usbnet_resume,
 };
 
-
-static int __init eem_init(void)
-{
-	return usb_register(&eem_driver);
-}
-module_init(eem_init);
-
-static void __exit eem_exit(void)
-{
-	usb_deregister(&eem_driver);
-}
-module_exit(eem_exit);
+module_usb_driver(eem_driver);
 
 MODULE_AUTHOR("Omar Laazimani <omar.oberthur@gmail.com>");
 MODULE_DESCRIPTION("USB CDC EEM");
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 99ed6eb..41a61ef 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -425,6 +425,9 @@
 	int				status;
 	struct cdc_state		*info = (void *) &dev->data;
 
+	BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data)
+			< sizeof(struct cdc_state)));
+
 	status = usbnet_generic_cdc_bind(dev, intf);
 	if (status < 0)
 		return status;
@@ -615,21 +618,7 @@
 	.supports_autosuspend = 1,
 };
 
-
-static int __init cdc_init(void)
-{
-	BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data)
-			< sizeof(struct cdc_state)));
-
- 	return usb_register(&cdc_driver);
-}
-module_init(cdc_init);
-
-static void __exit cdc_exit(void)
-{
- 	usb_deregister(&cdc_driver);
-}
-module_exit(cdc_exit);
+module_usb_driver(cdc_driver);
 
 MODULE_AUTHOR("David Brownell");
 MODULE_DESCRIPTION("USB CDC Ethernet devices");
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index f06fb78..cb8e595 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -465,12 +465,10 @@
 	int temp;
 	u8 iface_no;
 
-	ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
+	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
 	if (ctx == NULL)
 		return -ENODEV;
 
-	memset(ctx, 0, sizeof(*ctx));
-
 	init_timer(&ctx->tx_timer);
 	spin_lock_init(&ctx->mtx);
 	ctx->netdev = dev->net;
@@ -1232,20 +1230,7 @@
 	.nway_reset = usbnet_nway_reset,
 };
 
-static int __init cdc_ncm_init(void)
-{
-	printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION "\n");
-	return usb_register(&cdc_ncm_driver);
-}
-
-module_init(cdc_ncm_init);
-
-static void __exit cdc_ncm_exit(void)
-{
-	usb_deregister(&cdc_ncm_driver);
-}
-
-module_exit(cdc_ncm_exit);
+module_usb_driver(cdc_ncm_driver);
 
 MODULE_AUTHOR("Hans Petter Selasky");
 MODULE_DESCRIPTION("USB CDC NCM host driver");
diff --git a/drivers/net/usb/cdc_subset.c b/drivers/net/usb/cdc_subset.c
index fc5f13d..b403d93 100644
--- a/drivers/net/usb/cdc_subset.c
+++ b/drivers/net/usb/cdc_subset.c
@@ -338,17 +338,7 @@
 	.id_table =	products,
 };
 
-static int __init cdc_subset_init(void)
-{
-	return usb_register(&cdc_subset_driver);
-}
-module_init(cdc_subset_init);
-
-static void __exit cdc_subset_exit(void)
-{
-	usb_deregister(&cdc_subset_driver);
-}
-module_exit(cdc_subset_exit);
+module_usb_driver(cdc_subset_driver);
 
 MODULE_AUTHOR("David Brownell");
 MODULE_DESCRIPTION("Simple 'CDC Subset' USB networking links");
diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c
index 8969f12..0e05313 100644
--- a/drivers/net/usb/cx82310_eth.c
+++ b/drivers/net/usb/cx82310_eth.c
@@ -329,17 +329,7 @@
 	.resume		= usbnet_resume,
 };
 
-static int __init cx82310_init(void)
-{
-	return usb_register(&cx82310_driver);
-}
-module_init(cx82310_init);
-
-static void __exit cx82310_exit(void)
-{
-	usb_deregister(&cx82310_driver);
-}
-module_exit(cx82310_exit);
+module_usb_driver(cx82310_driver);
 
 MODULE_AUTHOR("Ondrej Zary");
 MODULE_DESCRIPTION("Conexant CX82310-based ADSL router USB ethernet driver");
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index fbc0e4d..b972263 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -672,18 +672,7 @@
 	.resume = usbnet_resume,
 };
 
-static int __init dm9601_init(void)
-{
-	return usb_register(&dm9601_driver);
-}
-
-static void __exit dm9601_exit(void)
-{
-	usb_deregister(&dm9601_driver);
-}
-
-module_init(dm9601_init);
-module_exit(dm9601_exit);
+module_usb_driver(dm9601_driver);
 
 MODULE_AUTHOR("Peter Korsgaard <jacmet@sunsite.dk>");
 MODULE_DESCRIPTION("Davicom DM9601 USB 1.1 ethernet devices");
diff --git a/drivers/net/usb/gl620a.c b/drivers/net/usb/gl620a.c
index c4cfd1d..38266bd 100644
--- a/drivers/net/usb/gl620a.c
+++ b/drivers/net/usb/gl620a.c
@@ -227,17 +227,7 @@
 	.resume =	usbnet_resume,
 };
 
-static int __init usbnet_init(void)
-{
- 	return usb_register(&gl620a_driver);
-}
-module_init(usbnet_init);
-
-static void __exit usbnet_exit(void)
-{
- 	usb_deregister(&gl620a_driver);
-}
-module_exit(usbnet_exit);
+module_usb_driver(gl620a_driver);
 
 MODULE_AUTHOR("Jiun-Jie Huang");
 MODULE_DESCRIPTION("GL620-USB-A Host-to-Host Link cables");
diff --git a/drivers/net/usb/int51x1.c b/drivers/net/usb/int51x1.c
index 131ac6c..12a22a4 100644
--- a/drivers/net/usb/int51x1.c
+++ b/drivers/net/usb/int51x1.c
@@ -238,17 +238,7 @@
 	.resume     = usbnet_resume,
 };
 
-static int __init int51x1_init(void)
-{
-	return usb_register(&int51x1_driver);
-}
-module_init(int51x1_init);
-
-static void __exit int51x1_exit(void)
-{
-	usb_deregister(&int51x1_driver);
-}
-module_exit(int51x1_exit);
+module_usb_driver(int51x1_driver);
 
 MODULE_AUTHOR("Peter Holik");
 MODULE_DESCRIPTION("Intellon usb powerline adapter");
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 13c1f04..08a4df2 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -543,25 +543,7 @@
 	.id_table =	ipheth_table,
 };
 
-static int __init ipheth_init(void)
-{
-	int retval;
-
-	retval = usb_register(&ipheth_driver);
-	if (retval) {
-		err("usb_register failed: %d", retval);
-		return retval;
-	}
-	return 0;
-}
-
-static void __exit ipheth_exit(void)
-{
-	usb_deregister(&ipheth_driver);
-}
-
-module_init(ipheth_init);
-module_exit(ipheth_exit);
+module_usb_driver(ipheth_driver);
 
 MODULE_AUTHOR("Diego Giagio <diego@giagio.com>");
 MODULE_DESCRIPTION("Apple iPhone USB Ethernet driver");
diff --git a/drivers/net/usb/kalmia.c b/drivers/net/usb/kalmia.c
index 5a6d0f8..7562649 100644
--- a/drivers/net/usb/kalmia.c
+++ b/drivers/net/usb/kalmia.c
@@ -375,17 +375,7 @@
 	.resume = usbnet_resume
 };
 
-static int __init kalmia_init(void)
-{
-	return usb_register(&kalmia_driver);
-}
-module_init( kalmia_init);
-
-static void __exit kalmia_exit(void)
-{
-	usb_deregister(&kalmia_driver);
-}
-module_exit( kalmia_exit);
+module_usb_driver(kalmia_driver);
 
 MODULE_AUTHOR("Marius Bjoernstad Kotsbak <marius@kotsbak.com>");
 MODULE_DESCRIPTION("Samsung Kalmia USB network driver");
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index 582ca2d..d034d9c 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -1324,32 +1324,4 @@
 	}
 }
 
-
-/****************************************************************
- *     kaweth_init
- ****************************************************************/
-static int __init kaweth_init(void)
-{
-	dbg("Driver loading");
-	return usb_register(&kaweth_driver);
-}
-
-/****************************************************************
- *     kaweth_exit
- ****************************************************************/
-static void __exit kaweth_exit(void)
-{
-	usb_deregister(&kaweth_driver);
-}
-
-module_init(kaweth_init);
-module_exit(kaweth_exit);
-
-
-
-
-
-
-
-
-
+module_usb_driver(kaweth_driver);
diff --git a/drivers/net/usb/lg-vl600.c b/drivers/net/usb/lg-vl600.c
index 9c26c63..45a981f 100644
--- a/drivers/net/usb/lg-vl600.c
+++ b/drivers/net/usb/lg-vl600.c
@@ -346,17 +346,7 @@
 	.resume		= usbnet_resume,
 };
 
-static int __init vl600_init(void)
-{
-	return usb_register(&lg_vl600_driver);
-}
-module_init(vl600_init);
-
-static void __exit vl600_exit(void)
-{
-	usb_deregister(&lg_vl600_driver);
-}
-module_exit(vl600_exit);
+module_usb_driver(lg_vl600_driver);
 
 MODULE_AUTHOR("Anrzej Zaborowski");
 MODULE_DESCRIPTION("LG-VL600 modem's ethernet link");
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index db2cb74..a29aa9c 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -692,17 +692,7 @@
 	.reset_resume = mcs7830_reset_resume,
 };
 
-static int __init mcs7830_init(void)
-{
-	return usb_register(&mcs7830_driver);
-}
-module_init(mcs7830_init);
-
-static void __exit mcs7830_exit(void)
-{
-	usb_deregister(&mcs7830_driver);
-}
-module_exit(mcs7830_exit);
+module_usb_driver(mcs7830_driver);
 
 MODULE_DESCRIPTION("USB to network adapter MCS7830)");
 MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/net1080.c b/drivers/net/usb/net1080.c
index 01db460..83f965c 100644
--- a/drivers/net/usb/net1080.c
+++ b/drivers/net/usb/net1080.c
@@ -589,17 +589,7 @@
 	.resume =	usbnet_resume,
 };
 
-static int __init net1080_init(void)
-{
- 	return usb_register(&net1080_driver);
-}
-module_init(net1080_init);
-
-static void __exit net1080_exit(void)
-{
- 	usb_deregister(&net1080_driver);
-}
-module_exit(net1080_exit);
+module_usb_driver(net1080_driver);
 
 MODULE_AUTHOR("David Brownell");
 MODULE_DESCRIPTION("NetChip 1080 based USB Host-to-Host Links");
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 769f509..5d99b8c 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -55,8 +55,8 @@
 #define	BMSR_MEDIA	(BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | \
 			BMSR_100FULL | BMSR_ANEGCAPABLE)
 
-static int loopback;
-static int mii_mode;
+static bool loopback;
+static bool mii_mode;
 static char *devid;
 
 static struct usb_eth_dev usb_dev_id[] = {
@@ -517,7 +517,7 @@
 	for (i = 0; i < REG_TIMEOUT; i++) {
 		get_registers(pegasus, EthCtrl1, 1, &data);
 		if (~data & 0x08) {
-			if (loopback & 1)
+			if (loopback)
 				break;
 			if (mii_mode && (pegasus->features & HAS_HOME_PNA))
 				set_register(pegasus, Gpio1, 0x34);
@@ -561,7 +561,7 @@
 		data[1] |= 0x10;	/* set 100 Mbps */
 	if (mii_mode)
 		data[1] = 0;
-	data[2] = (loopback & 1) ? 0x09 : 0x01;
+	data[2] = loopback ? 0x09 : 0x01;
 
 	memcpy(pegasus->eth_regs, data, sizeof(data));
 	ret = set_registers(pegasus, EthCtrl0, 3, data);
diff --git a/drivers/net/usb/plusb.c b/drivers/net/usb/plusb.c
index 217aec8..b2b035e 100644
--- a/drivers/net/usb/plusb.c
+++ b/drivers/net/usb/plusb.c
@@ -154,17 +154,7 @@
 	.resume =	usbnet_resume,
 };
 
-static int __init plusb_init(void)
-{
-	return usb_register(&plusb_driver);
-}
-module_init(plusb_init);
-
-static void __exit plusb_exit(void)
-{
-	usb_deregister(&plusb_driver);
-}
-module_exit(plusb_exit);
+module_usb_driver(plusb_driver);
 
 MODULE_AUTHOR("David Brownell");
 MODULE_DESCRIPTION("Prolific PL-2301/2302/25A1 USB Host to Host Link Driver");
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index 255d6a4..c8f1b5b 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -635,17 +635,7 @@
 	.resume =	usbnet_resume,
 };
 
-static int __init rndis_init(void)
-{
-	return usb_register(&rndis_driver);
-}
-module_init(rndis_init);
-
-static void __exit rndis_exit(void)
-{
-	usb_deregister(&rndis_driver);
-}
-module_exit(rndis_exit);
+module_usb_driver(rndis_driver);
 
 MODULE_AUTHOR("David Brownell");
 MODULE_DESCRIPTION("USB Host side RNDIS driver");
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index bf8c84d..0710b4c 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -978,20 +978,7 @@
 	.resume		= rtl8150_resume
 };
 
-static int __init usb_rtl8150_init(void)
-{
-	printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
-	       DRIVER_DESC "\n");
-	return usb_register(&rtl8150_driver);
-}
-
-static void __exit usb_rtl8150_exit(void)
-{
-	usb_deregister(&rtl8150_driver);
-}
-
-module_init(usb_rtl8150_init);
-module_exit(usb_rtl8150_exit);
+module_usb_driver(rtl8150_driver);
 
 MODULE_AUTHOR(DRIVER_AUTHOR);
 MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index ed1b432..e45dfdc 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -900,6 +900,9 @@
 	u16 len;
 	bool need_tail;
 
+	BUILD_BUG_ON(FIELD_SIZEOF(struct usbnet, data)
+				< sizeof(struct cdc_state));
+
 	dev_dbg(&dev->udev->dev, "%s", __func__);
 	if (priv->link_up && check_ethip_packet(skb, dev) && is_ip(skb)) {
 		/* enough head room as is? */
@@ -981,21 +984,7 @@
 	.no_dynamic_id = 1,
 };
 
-static int __init sierra_net_init(void)
-{
-	BUILD_BUG_ON(FIELD_SIZEOF(struct usbnet, data)
-				< sizeof(struct cdc_state));
-
-	return usb_register(&sierra_net_driver);
-}
-
-static void __exit sierra_net_exit(void)
-{
-	usb_deregister(&sierra_net_driver);
-}
-
-module_exit(sierra_net_exit);
-module_init(sierra_net_init);
+module_usb_driver(sierra_net_driver);
 
 MODULE_AUTHOR(DRIVER_AUTHOR);
 MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index a5b9b12..3b017bb 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -76,7 +76,7 @@
 	struct usbnet *dev;
 };
 
-static int turbo_mode = true;
+static bool turbo_mode = true;
 module_param(turbo_mode, bool, 0644);
 MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction");
 
@@ -728,7 +728,8 @@
 }
 
 /* Enable or disable Rx checksum offload engine */
-static int smsc75xx_set_features(struct net_device *netdev, u32 features)
+static int smsc75xx_set_features(struct net_device *netdev,
+	netdev_features_t features)
 {
 	struct usbnet *dev = netdev_priv(netdev);
 	struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
@@ -1237,17 +1238,7 @@
 	.disconnect	= usbnet_disconnect,
 };
 
-static int __init smsc75xx_init(void)
-{
-	return usb_register(&smsc75xx_driver);
-}
-module_init(smsc75xx_init);
-
-static void __exit smsc75xx_exit(void)
-{
-	usb_deregister(&smsc75xx_driver);
-}
-module_exit(smsc75xx_exit);
+module_usb_driver(smsc75xx_driver);
 
 MODULE_AUTHOR("Nancy Lin");
 MODULE_AUTHOR("Steve Glendinning <steve.glendinning@smsc.com>");
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index eff6767..d45520e 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -59,7 +59,7 @@
 	struct usbnet *dev;
 };
 
-static int turbo_mode = true;
+static bool turbo_mode = true;
 module_param(turbo_mode, bool, 0644);
 MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction");
 
@@ -516,7 +516,8 @@
 }
 
 /* Enable or disable Tx & Rx checksum offload engines */
-static int smsc95xx_set_features(struct net_device *netdev, u32 features)
+static int smsc95xx_set_features(struct net_device *netdev,
+	netdev_features_t features)
 {
 	struct usbnet *dev = netdev_priv(netdev);
 	u32 read_buf;
@@ -1297,17 +1298,7 @@
 	.disconnect	= usbnet_disconnect,
 };
 
-static int __init smsc95xx_init(void)
-{
-	return usb_register(&smsc95xx_driver);
-}
-module_init(smsc95xx_init);
-
-static void __exit smsc95xx_exit(void)
-{
-	usb_deregister(&smsc95xx_driver);
-}
-module_exit(smsc95xx_exit);
+module_usb_driver(smsc95xx_driver);
 
 MODULE_AUTHOR("Nancy Lin");
 MODULE_AUTHOR("Steve Glendinning <steve.glendinning@smsc.com>");
diff --git a/drivers/net/usb/zaurus.c b/drivers/net/usb/zaurus.c
index 1a2234c..f701d41 100644
--- a/drivers/net/usb/zaurus.c
+++ b/drivers/net/usb/zaurus.c
@@ -362,17 +362,7 @@
 	.resume =	usbnet_resume,
 };
 
-static int __init zaurus_init(void)
-{
-	return usb_register(&zaurus_driver);
-}
-module_init(zaurus_init);
-
-static void __exit zaurus_exit(void)
-{
-	usb_deregister(&zaurus_driver);
-}
-module_exit(zaurus_exit);
+module_usb_driver(zaurus_driver);
 
 MODULE_AUTHOR("Pavel Machek, David Brownell");
 MODULE_DESCRIPTION("Sharp Zaurus PDA, and compatible products");
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index ef883e9..49f4667 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -27,8 +27,8 @@
 
 struct veth_net_stats {
 	u64			rx_packets;
-	u64			tx_packets;
 	u64			rx_bytes;
+	u64			tx_packets;
 	u64			tx_bytes;
 	u64			rx_dropped;
 	struct u64_stats_sync	syncp;
@@ -66,9 +66,8 @@
 
 static void veth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 {
-	strcpy(info->driver, DRV_NAME);
-	strcpy(info->version, DRV_VERSION);
-	strcpy(info->fw_version, "N/A");
+	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
 }
 
 static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
@@ -271,7 +270,7 @@
 	dev->features |= NETIF_F_LLTX;
 	dev->destructor = veth_dev_free;
 
-	dev->hw_features = NETIF_F_NO_CSUM | NETIF_F_SG | NETIF_F_RXCSUM;
+	dev->hw_features = NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_RXCSUM;
 }
 
 /*
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 6ee8410..76fe14e 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -30,7 +30,7 @@
 static int napi_weight = 128;
 module_param(napi_weight, int, 0444);
 
-static int csum = 1, gso = 1;
+static bool csum = true, gso = true;
 module_param(csum, bool, 0444);
 module_param(gso, bool, 0444);
 
@@ -39,6 +39,7 @@
 #define GOOD_COPY_LEN	128
 
 #define VIRTNET_SEND_COMMAND_SG_MAX    2
+#define VIRTNET_DRIVER_VERSION "1.0.0"
 
 struct virtnet_stats {
 	struct u64_stats_sync syncp;
@@ -155,6 +156,7 @@
 	*len -= size;
 }
 
+/* Called from bottom half context */
 static struct sk_buff *page_to_skb(struct virtnet_info *vi,
 				   struct page *page, unsigned int len)
 {
@@ -357,7 +359,7 @@
 	struct skb_vnet_hdr *hdr;
 	int err;
 
-	skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN);
+	skb = __netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN, gfp);
 	if (unlikely(!skb))
 		return -ENOMEM;
 
@@ -439,7 +441,13 @@
 	return err;
 }
 
-/* Returns false if we couldn't fill entirely (OOM). */
+/*
+ * Returns false if we couldn't fill entirely (OOM).
+ *
+ * Normally run in the receive path, but can also be run from ndo_open
+ * before we're receiving packets, or from refill_work which is
+ * careful to disable receiving (using napi_disable).
+ */
 static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
 {
 	int err;
@@ -501,7 +509,7 @@
 	/* In theory, this can happen: if we don't get any buffers in
 	 * we will *never* try to fill again. */
 	if (still_empty)
-		schedule_delayed_work(&vi->refill, HZ/2);
+		queue_delayed_work(system_nrt_wq, &vi->refill, HZ/2);
 }
 
 static int virtnet_poll(struct napi_struct *napi, int budget)
@@ -520,7 +528,7 @@
 
 	if (vi->num < vi->max / 2) {
 		if (!try_fill_recv(vi, GFP_ATOMIC))
-			schedule_delayed_work(&vi->refill, 0);
+			queue_delayed_work(system_nrt_wq, &vi->refill, 0);
 	}
 
 	/* Out of packets? */
@@ -699,6 +707,7 @@
 	}
 
 	tot->tx_dropped = dev->stats.tx_dropped;
+	tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
 	tot->rx_dropped = dev->stats.rx_dropped;
 	tot->rx_length_errors = dev->stats.rx_length_errors;
 	tot->rx_frame_errors = dev->stats.rx_frame_errors;
@@ -719,6 +728,10 @@
 {
 	struct virtnet_info *vi = netdev_priv(dev);
 
+	/* Make sure we have some buffers: if oom use wq. */
+	if (!try_fill_recv(vi, GFP_KERNEL))
+		queue_delayed_work(system_nrt_wq, &vi->refill, 0);
+
 	virtnet_napi_enable(vi);
 	return 0;
 }
@@ -772,6 +785,8 @@
 {
 	struct virtnet_info *vi = netdev_priv(dev);
 
+	/* Make sure refill_work doesn't re-enable napi! */
+	cancel_delayed_work_sync(&vi->refill);
 	napi_disable(&vi->napi);
 
 	return 0;
@@ -853,7 +868,7 @@
 	kfree(buf);
 }
 
-static void virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid)
+static int virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid)
 {
 	struct virtnet_info *vi = netdev_priv(dev);
 	struct scatterlist sg;
@@ -863,9 +878,10 @@
 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
 				  VIRTIO_NET_CTRL_VLAN_ADD, &sg, 1, 0))
 		dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
+	return 0;
 }
 
-static void virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
+static int virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
 {
 	struct virtnet_info *vi = netdev_priv(dev);
 	struct scatterlist sg;
@@ -875,6 +891,7 @@
 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
 				  VIRTIO_NET_CTRL_VLAN_DEL, &sg, 1, 0))
 		dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
+	return 0;
 }
 
 static void virtnet_get_ringparam(struct net_device *dev,
@@ -889,7 +906,21 @@
 
 }
 
+
+static void virtnet_get_drvinfo(struct net_device *dev,
+				struct ethtool_drvinfo *info)
+{
+	struct virtnet_info *vi = netdev_priv(dev);
+	struct virtio_device *vdev = vi->vdev;
+
+	strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+	strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version));
+	strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info));
+
+}
+
 static const struct ethtool_ops virtnet_ethtool_ops = {
+	.get_drvinfo = virtnet_get_drvinfo,
 	.get_link = ethtool_op_get_link,
 	.get_ringparam = virtnet_get_ringparam,
 };
@@ -1082,7 +1113,6 @@
 
 unregister:
 	unregister_netdev(dev);
-	cancel_delayed_work_sync(&vi->refill);
 free_vqs:
 	vdev->config->del_vqs(vdev);
 free_stats:
@@ -1121,9 +1151,7 @@
 	/* Stop all the virtqueues. */
 	vdev->config->reset(vdev);
 
-
 	unregister_netdev(vi->dev);
-	cancel_delayed_work_sync(&vi->refill);
 
 	/* Free unused buffers in both send and recv, if any. */
 	free_unused_bufs(vi);
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index d96bfb1..de7fc34 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1926,7 +1926,7 @@
 }
 
 
-static void
+static int
 vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
 {
 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
@@ -1943,10 +1943,12 @@
 	}
 
 	set_bit(vid, adapter->active_vlans);
+
+	return 0;
 }
 
 
-static void
+static int
 vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
 {
 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
@@ -1963,6 +1965,8 @@
 	}
 
 	clear_bit(vid, adapter->active_vlans);
+
+	return 0;
 }
 
 
@@ -2163,7 +2167,8 @@
 		rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE;
 		get_random_bytes(&rssConf->hashKey[0], rssConf->hashKeySize);
 		for (i = 0; i < rssConf->indTableSize; i++)
-			rssConf->indTable[i] = i % adapter->num_rx_queues;
+			rssConf->indTable[i] = ethtool_rxfh_indir_default(
+				i, adapter->num_rx_queues);
 
 		devRead->rssConfDesc.confVer = 1;
 		devRead->rssConfDesc.confLen = sizeof(*rssConf);
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index e662cbc..587a218 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -202,14 +202,9 @@
 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
 
 	strlcpy(drvinfo->driver, vmxnet3_driver_name, sizeof(drvinfo->driver));
-	drvinfo->driver[sizeof(drvinfo->driver) - 1] = '\0';
 
 	strlcpy(drvinfo->version, VMXNET3_DRIVER_VERSION_REPORT,
 		sizeof(drvinfo->version));
-	drvinfo->driver[sizeof(drvinfo->version) - 1] = '\0';
-
-	strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
-	drvinfo->fw_version[sizeof(drvinfo->fw_version) - 1] = '\0';
 
 	strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
 		ETHTOOL_BUSINFO_LEN);
@@ -262,11 +257,11 @@
 	}
 }
 
-int vmxnet3_set_features(struct net_device *netdev, u32 features)
+int vmxnet3_set_features(struct net_device *netdev, netdev_features_t features)
 {
 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
 	unsigned long flags;
-	u32 changed = features ^ netdev->features;
+	netdev_features_t changed = features ^ netdev->features;
 
 	if (changed & (NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_HW_VLAN_RX)) {
 		if (features & NETIF_F_RXCSUM)
@@ -570,44 +565,38 @@
 }
 
 #ifdef VMXNET3_RSS
-static int
-vmxnet3_get_rss_indir(struct net_device *netdev,
-		      struct ethtool_rxfh_indir *p)
+static u32
+vmxnet3_get_rss_indir_size(struct net_device *netdev)
 {
 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
 	struct UPT1_RSSConf *rssConf = adapter->rss_conf;
-	unsigned int n = min_t(unsigned int, p->size, rssConf->indTableSize);
 
-	p->size = rssConf->indTableSize;
+	return rssConf->indTableSize;
+}
+
+static int
+vmxnet3_get_rss_indir(struct net_device *netdev, u32 *p)
+{
+	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+	struct UPT1_RSSConf *rssConf = adapter->rss_conf;
+	unsigned int n = rssConf->indTableSize;
+
 	while (n--)
-		p->ring_index[n] = rssConf->indTable[n];
+		p[n] = rssConf->indTable[n];
 	return 0;
 
 }
 
 static int
-vmxnet3_set_rss_indir(struct net_device *netdev,
-		      const struct ethtool_rxfh_indir *p)
+vmxnet3_set_rss_indir(struct net_device *netdev, const u32 *p)
 {
 	unsigned int i;
 	unsigned long flags;
 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
 	struct UPT1_RSSConf *rssConf = adapter->rss_conf;
 
-	if (p->size != rssConf->indTableSize)
-		return -EINVAL;
-	for (i = 0; i < rssConf->indTableSize; i++) {
-		/*
-		 * Return with error code if any of the queue indices
-		 * is out of range
-		 */
-		if (p->ring_index[i] < 0 ||
-		    p->ring_index[i] >= adapter->num_rx_queues)
-			return -EINVAL;
-	}
-
 	for (i = 0; i < rssConf->indTableSize; i++)
-		rssConf->indTable[i] = p->ring_index[i];
+		rssConf->indTable[i] = p[i];
 
 	spin_lock_irqsave(&adapter->cmd_lock, flags);
 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
@@ -619,7 +608,7 @@
 }
 #endif
 
-static struct ethtool_ops vmxnet3_ethtool_ops = {
+static const struct ethtool_ops vmxnet3_ethtool_ops = {
 	.get_settings      = vmxnet3_get_settings,
 	.get_drvinfo       = vmxnet3_get_drvinfo,
 	.get_regs_len      = vmxnet3_get_regs_len,
@@ -634,6 +623,7 @@
 	.set_ringparam     = vmxnet3_set_ringparam,
 	.get_rxnfc         = vmxnet3_get_rxnfc,
 #ifdef VMXNET3_RSS
+	.get_rxfh_indir_size = vmxnet3_get_rss_indir_size,
 	.get_rxfh_indir    = vmxnet3_get_rss_indir,
 	.set_rxfh_indir    = vmxnet3_set_rss_indir,
 #endif
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index b18eac1..ed54797 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -401,7 +401,7 @@
 vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter);
 
 int
-vmxnet3_set_features(struct net_device *netdev, u32 features);
+vmxnet3_set_features(struct net_device *netdev, netdev_features_t features);
 
 int
 vmxnet3_create_queues(struct vmxnet3_adapter *adapter,
diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c
index 783168c..d43f4ef 100644
--- a/drivers/net/wan/sbni.c
+++ b/drivers/net/wan/sbni.c
@@ -155,7 +155,7 @@
 static const char  version[] =
 	"Granch SBNI12 driver ver 5.0.1  Jun 22 2001  Denis I.Timofeev.\n";
 
-static int  skip_pci_probe	__initdata = 0;
+static bool skip_pci_probe	__initdata = false;
 static int  scandone	__initdata = 0;
 static int  num		__initdata = 0;
 
diff --git a/drivers/net/wan/sealevel.c b/drivers/net/wan/sealevel.c
index 0b4fd05..4f774847 100644
--- a/drivers/net/wan/sealevel.c
+++ b/drivers/net/wan/sealevel.c
@@ -362,7 +362,7 @@
 static int txdma=1;
 static int rxdma=3;
 static int irq=5;
-static int slow=0;
+static bool slow=false;
 
 module_param(io, int, 0);
 MODULE_PARM_DESC(io, "The I/O base of the Sealevel card");
diff --git a/drivers/net/wimax/i2400m/i2400m.h b/drivers/net/wimax/i2400m/i2400m.h
index c421a61..c806d45 100644
--- a/drivers/net/wimax/i2400m/i2400m.h
+++ b/drivers/net/wimax/i2400m/i2400m.h
@@ -75,7 +75,7 @@
  *        device is up and running or shutdown (through ifconfig up /
  *        down). Bus-generic only.
  *
- *  - control ops: control.c - implements various commmands for
+ *  - control ops: control.c - implements various commands for
  *        controlling the device. bus-generic only.
  *
  *  - device model glue: driver.c - implements helpers for the
diff --git a/drivers/net/wimax/i2400m/tx.c b/drivers/net/wimax/i2400m/tx.c
index 4b9ecb2..f20886a 100644
--- a/drivers/net/wimax/i2400m/tx.c
+++ b/drivers/net/wimax/i2400m/tx.c
@@ -562,7 +562,7 @@
 {
 	struct device *dev = i2400m_dev(i2400m);
 	struct i2400m_msg_hdr *tx_msg;
-	bool try_head = 0;
+	bool try_head = false;
 	BUG_ON(i2400m->tx_msg != NULL);
 	/*
 	 * In certain situations, TX queue might have enough space to
@@ -580,7 +580,7 @@
 	else if (tx_msg == TAIL_FULL) {
 		i2400m_tx_skip_tail(i2400m);
 		d_printf(2, dev, "new TX message: tail full, trying head\n");
-		try_head = 1;
+		try_head = true;
 		goto try_head;
 	}
 	memset(tx_msg, 0, I2400M_TX_PLD_SIZE);
@@ -720,7 +720,7 @@
 	unsigned long flags;
 	size_t padded_len;
 	void *ptr;
-	bool try_head = 0;
+	bool try_head = false;
 	unsigned is_singleton = pl_type == I2400M_PT_RESET_WARM
 		|| pl_type == I2400M_PT_RESET_COLD;
 
@@ -771,7 +771,7 @@
 		d_printf(2, dev, "pl append: tail full\n");
 		i2400m_tx_close(i2400m);
 		i2400m_tx_skip_tail(i2400m);
-		try_head = 1;
+		try_head = true;
 		goto try_new;
 	} else if (ptr == NULL) {	/* All full */
 		result = -ENOSPC;
diff --git a/drivers/net/wimax/i2400m/usb-tx.c b/drivers/net/wimax/i2400m/usb-tx.c
index ac357ac..99ef81b 100644
--- a/drivers/net/wimax/i2400m/usb-tx.c
+++ b/drivers/net/wimax/i2400m/usb-tx.c
@@ -177,7 +177,6 @@
 static
 int i2400mu_txd(void *_i2400mu)
 {
-	int result = 0;
 	struct i2400mu *i2400mu = _i2400mu;
 	struct i2400m *i2400m = &i2400mu->i2400m;
 	struct device *dev = &i2400mu->usb_iface->dev;
@@ -208,16 +207,14 @@
 		/* Yeah, we ignore errors ... not much we can do */
 		i2400mu_tx(i2400mu, tx_msg, tx_msg_size);
 		i2400m_tx_msg_sent(i2400m);	/* ack it, advance the FIFO */
-		if (result < 0)
-			break;
 	}
 
 	spin_lock_irqsave(&i2400m->tx_lock, flags);
 	i2400mu->tx_kthread = NULL;
 	spin_unlock_irqrestore(&i2400m->tx_lock, flags);
 
-	d_fnend(4, dev, "(i2400mu %p) = %d\n", i2400mu, result);
-	return result;
+	d_fnend(4, dev, "(i2400mu %p)\n", i2400mu);
+	return 0;
 }
 
 
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 0a304b0..98db7619 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -42,7 +42,7 @@
 obj-$(CONFIG_MWL8K)	+= mwl8k.o
 
 obj-$(CONFIG_IWLWIFI)	+= iwlwifi/
-obj-$(CONFIG_IWLWIFI_LEGACY)	+= iwlegacy/
+obj-$(CONFIG_IWLEGACY)	+= iwlegacy/
 obj-$(CONFIG_RT2X00)	+= rt2x00/
 
 obj-$(CONFIG_P54_COMMON)	+= p54/
@@ -58,6 +58,6 @@
 obj-$(CONFIG_IWM)	+= iwmc3200wifi/
 
 obj-$(CONFIG_MWIFIEX)	+= mwifiex/
-obj-$(CONFIG_BRCMFMAC) += brcm80211/
-obj-$(CONFIG_BRCMUMAC) += brcm80211/
-obj-$(CONFIG_BRCMSMAC) += brcm80211/
+
+obj-$(CONFIG_BRCMFMAC)	+= brcm80211/
+obj-$(CONFIG_BRCMSMAC)	+= brcm80211/
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index ac1176a..1c008c6 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -1418,7 +1418,7 @@
 	emmh32_update(&context->seed,frame->da,ETH_ALEN * 2); // DA,SA
 	emmh32_update(&context->seed,(u8*)&mic->typelen,10); // Type/Length and Snap
 	emmh32_update(&context->seed,(u8*)&mic->seq,sizeof(mic->seq)); //SEQ
-	emmh32_update(&context->seed,frame->da + ETH_ALEN * 2,payLen); //payload
+	emmh32_update(&context->seed,(u8*)(frame + 1),payLen); //payload
 	emmh32_final(&context->seed, (u8*)&mic->mic);
 
 	/*    New Type/length ?????????? */
@@ -1506,7 +1506,7 @@
 		emmh32_update(&context->seed, eth->da, ETH_ALEN*2); 
 		emmh32_update(&context->seed, (u8 *)&mic->typelen, sizeof(mic->typelen)+sizeof(mic->u.snap)); 
 		emmh32_update(&context->seed, (u8 *)&mic->seq,sizeof(mic->seq));	
-		emmh32_update(&context->seed, eth->da + ETH_ALEN*2,payLen);	
+		emmh32_update(&context->seed, (u8 *)(eth + 1),payLen);	
 		//Calculate MIC
 		emmh32_final(&context->seed, digest);
 	
diff --git a/drivers/net/wireless/ath/Makefile b/drivers/net/wireless/ath/Makefile
index d121469..d716b74 100644
--- a/drivers/net/wireless/ath/Makefile
+++ b/drivers/net/wireless/ath/Makefile
@@ -11,3 +11,4 @@
 		key.o
 
 ath-$(CONFIG_ATH_DEBUG) += debug.o
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index 0f9ee46..efc0111 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -152,6 +152,7 @@
 	struct ath_cycle_counters cc_survey;
 
 	struct ath_regulatory regulatory;
+	struct ath_regulatory reg_world_copy;
 	const struct ath_ops *ops;
 	const struct ath_bus_ops *bus_ops;
 
@@ -214,6 +215,10 @@
  * @ATH_DBG_HWTIMER: hardware timer handling
  * @ATH_DBG_BTCOEX: bluetooth coexistance
  * @ATH_DBG_BSTUCK: stuck beacons
+ * @ATH_DBG_MCI: Message Coexistence Interface, a private protocol
+ *	used exclusively for WLAN-BT coexistence starting from
+ *	AR9462.
+ * @ATH_DBG_DFS: radar datection
  * @ATH_DBG_ANY: enable all debugging
  *
  * The debug level is used to control the amount and type of debugging output
@@ -239,6 +244,8 @@
 	ATH_DBG_BTCOEX		= 0x00002000,
 	ATH_DBG_WMI		= 0x00004000,
 	ATH_DBG_BSTUCK		= 0x00008000,
+	ATH_DBG_MCI		= 0x00010000,
+	ATH_DBG_DFS		= 0x00020000,
 	ATH_DBG_ANY		= 0xffffffff
 };
 
@@ -248,7 +255,7 @@
 
 #define ath_dbg(common, dbg_mask, fmt, ...)				\
 do {									\
-	if ((common)->debug_mask & dbg_mask)				\
+	if ((common)->debug_mask & ATH_DBG_##dbg_mask)			\
 		_ath_printk(KERN_DEBUG, common, fmt, ##__VA_ARGS__);	\
 } while (0)
 
@@ -258,10 +265,13 @@
 #else
 
 static inline  __attribute__ ((format (printf, 3, 4)))
-void ath_dbg(struct ath_common *common, enum ATH_DEBUG dbg_mask,
+void _ath_dbg(struct ath_common *common, enum ATH_DEBUG dbg_mask,
 	     const char *fmt, ...)
 {
 }
+#define ath_dbg(common, dbg_mask, fmt, ...)				\
+	_ath_dbg(common, ATH_DBG_##dbg_mask, fmt, ##__VA_ARGS__)
+
 #define ATH_DBG_WARN(foo, arg...) do {} while (0)
 #define ATH_DBG_WARN_ON_ONCE(foo) ({				\
 	int __ret_warn_once = !!(foo);				\
diff --git a/drivers/net/wireless/ath/ath5k/ahb.c b/drivers/net/wireless/ath/ath5k/ahb.c
index e5be7e70..ee7ea57 100644
--- a/drivers/net/wireless/ath/ath5k/ahb.c
+++ b/drivers/net/wireless/ath/ath5k/ahb.c
@@ -166,7 +166,9 @@
 		if (to_platform_device(ah->dev)->id == 0 &&
 		    (bcfg->config->flags & (BD_WLAN0 | BD_WLAN1)) ==
 		     (BD_WLAN1 | BD_WLAN0))
-			__set_bit(ATH_STAT_2G_DISABLED, ah->status);
+			ah->ah_capabilities.cap_needs_2GHz_ovr = true;
+		else
+			ah->ah_capabilities.cap_needs_2GHz_ovr = false;
 	}
 
 	ret = ath5k_init_ah(ah, &ath_ahb_bus_ops);
diff --git a/drivers/net/wireless/ath/ath5k/ani.c b/drivers/net/wireless/ath/ath5k/ani.c
index bea90e6..bf67416 100644
--- a/drivers/net/wireless/ath/ath5k/ani.c
+++ b/drivers/net/wireless/ath/ath5k/ani.c
@@ -27,15 +27,21 @@
  * or reducing sensitivity as necessary.
  *
  * The parameters are:
+ *
  *   - "noise immunity"
+ *
  *   - "spur immunity"
+ *
  *   - "firstep level"
+ *
  *   - "OFDM weak signal detection"
+ *
  *   - "CCK weak signal detection"
  *
  * Basically we look at the amount of ODFM and CCK timing errors we get and then
  * raise or lower immunity accordingly by setting one or more of these
  * parameters.
+ *
  * Newer chipsets have PHY error counters in hardware which will generate a MIB
  * interrupt when they overflow. Older hardware has too enable PHY error frames
  * by setting a RX flag and then count every single PHY error. When a specified
@@ -45,11 +51,13 @@
  */
 
 
-/*** ANI parameter control ***/
+/***********************\
+* ANI parameter control *
+\***********************/
 
 /**
  * ath5k_ani_set_noise_immunity_level() - Set noise immunity level
- *
+ * @ah: The &struct ath5k_hw
  * @level: level between 0 and @ATH5K_ANI_MAX_NOISE_IMM_LVL
  */
 void
@@ -91,12 +99,11 @@
 	ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "new level %d", level);
 }
 
-
 /**
  * ath5k_ani_set_spur_immunity_level() - Set spur immunity level
- *
+ * @ah: The &struct ath5k_hw
  * @level: level between 0 and @max_spur_level (the maximum level is dependent
- *	on the chip revision).
+ * on the chip revision).
  */
 void
 ath5k_ani_set_spur_immunity_level(struct ath5k_hw *ah, int level)
@@ -117,10 +124,9 @@
 	ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "new level %d", level);
 }
 
-
 /**
  * ath5k_ani_set_firstep_level() - Set "firstep" level
- *
+ * @ah: The &struct ath5k_hw
  * @level: level between 0 and @ATH5K_ANI_MAX_FIRSTEP_LVL
  */
 void
@@ -140,11 +146,9 @@
 	ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "new level %d", level);
 }
 
-
 /**
- * ath5k_ani_set_ofdm_weak_signal_detection() - Control OFDM weak signal
- *						detection
- *
+ * ath5k_ani_set_ofdm_weak_signal_detection() - Set OFDM weak signal detection
+ * @ah: The &struct ath5k_hw
  * @on: turn on or off
  */
 void
@@ -182,10 +186,9 @@
 			  on ? "on" : "off");
 }
 
-
 /**
- * ath5k_ani_set_cck_weak_signal_detection() - control CCK weak signal detection
- *
+ * ath5k_ani_set_cck_weak_signal_detection() - Set CCK weak signal detection
+ * @ah: The &struct ath5k_hw
  * @on: turn on or off
  */
 void
@@ -200,13 +203,16 @@
 }
 
 
-/*** ANI algorithm ***/
+/***************\
+* ANI algorithm *
+\***************/
 
 /**
  * ath5k_ani_raise_immunity() - Increase noise immunity
- *
+ * @ah: The &struct ath5k_hw
+ * @as: The &struct ath5k_ani_state
  * @ofdm_trigger: If this is true we are called because of too many OFDM errors,
- *	the algorithm will tune more parameters then.
+ * the algorithm will tune more parameters then.
  *
  * Try to raise noise immunity (=decrease sensitivity) in several steps
  * depending on the average RSSI of the beacons we received.
@@ -290,9 +296,10 @@
 	*/
 }
 
-
 /**
  * ath5k_ani_lower_immunity() - Decrease noise immunity
+ * @ah: The &struct ath5k_hw
+ * @as: The &struct ath5k_ani_state
  *
  * Try to lower noise immunity (=increase sensitivity) in several steps
  * depending on the average RSSI of the beacons we received.
@@ -352,9 +359,10 @@
 	}
 }
 
-
 /**
  * ath5k_hw_ani_get_listen_time() - Update counters and return listening time
+ * @ah: The &struct ath5k_hw
+ * @as: The &struct ath5k_ani_state
  *
  * Return an approximation of the time spent "listening" in milliseconds (ms)
  * since the last call of this function.
@@ -379,9 +387,10 @@
 	return listen;
 }
 
-
 /**
  * ath5k_ani_save_and_clear_phy_errors() - Clear and save PHY error counters
+ * @ah: The &struct ath5k_hw
+ * @as: The &struct ath5k_ani_state
  *
  * Clear the PHY error counters as soon as possible, since this might be called
  * from a MIB interrupt and we want to make sure we don't get interrupted again.
@@ -429,14 +438,14 @@
 	return 1;
 }
 
-
 /**
  * ath5k_ani_period_restart() - Restart ANI period
+ * @as: The &struct ath5k_ani_state
  *
  * Just reset counters, so they are clear for the next "ani period".
  */
 static void
-ath5k_ani_period_restart(struct ath5k_hw *ah, struct ath5k_ani_state *as)
+ath5k_ani_period_restart(struct ath5k_ani_state *as)
 {
 	/* keep last values for debugging */
 	as->last_ofdm_errors = as->ofdm_errors;
@@ -448,9 +457,9 @@
 	as->listen_time = 0;
 }
 
-
 /**
  * ath5k_ani_calibration() - The main ANI calibration function
+ * @ah: The &struct ath5k_hw
  *
  * We count OFDM and CCK errors relative to the time where we did not send or
  * receive ("listen" time) and raise or lower immunity accordingly.
@@ -492,7 +501,7 @@
 		/* too many PHY errors - we have to raise immunity */
 		bool ofdm_flag = as->ofdm_errors > ofdm_high ? true : false;
 		ath5k_ani_raise_immunity(ah, as, ofdm_flag);
-		ath5k_ani_period_restart(ah, as);
+		ath5k_ani_period_restart(as);
 
 	} else if (as->listen_time > 5 * ATH5K_ANI_LISTEN_PERIOD) {
 		/* If more than 5 (TODO: why 5?) periods have passed and we got
@@ -504,15 +513,18 @@
 		if (as->ofdm_errors <= ofdm_low && as->cck_errors <= cck_low)
 			ath5k_ani_lower_immunity(ah, as);
 
-		ath5k_ani_period_restart(ah, as);
+		ath5k_ani_period_restart(as);
 	}
 }
 
 
-/*** INTERRUPT HANDLER ***/
+/*******************\
+* Interrupt handler *
+\*******************/
 
 /**
  * ath5k_ani_mib_intr() - Interrupt handler for ANI MIB counters
+ * @ah: The &struct ath5k_hw
  *
  * Just read & reset the registers quickly, so they don't generate more
  * interrupts, save the counters and schedule the tasklet to decide whether
@@ -549,9 +561,11 @@
 		tasklet_schedule(&ah->ani_tasklet);
 }
 
-
 /**
- * ath5k_ani_phy_error_report() - Used by older HW to report PHY errors
+ * ath5k_ani_phy_error_report - Used by older HW to report PHY errors
+ *
+ * @ah: The &struct ath5k_hw
+ * @phyerr: One of enum ath5k_phy_error_code
  *
  * This is used by hardware without PHY error counters to report PHY errors
  * on a frame-by-frame basis, instead of the interrupt.
@@ -574,10 +588,13 @@
 }
 
 
-/*** INIT ***/
+/****************\
+* Initialization *
+\****************/
 
 /**
  * ath5k_enable_phy_err_counters() - Enable PHY error counters
+ * @ah: The &struct ath5k_hw
  *
  * Enable PHY error counters for OFDM and CCK timing errors.
  */
@@ -596,9 +613,9 @@
 	ath5k_hw_reg_write(ah, 0, AR5K_CCK_FIL_CNT);
 }
 
-
 /**
  * ath5k_disable_phy_err_counters() - Disable PHY error counters
+ * @ah: The &struct ath5k_hw
  *
  * Disable PHY error counters for OFDM and CCK timing errors.
  */
@@ -615,10 +632,10 @@
 	ath5k_hw_reg_write(ah, 0, AR5K_CCK_FIL_CNT);
 }
 
-
 /**
  * ath5k_ani_init() - Initialize ANI
- * @mode: Which mode to use (auto, manual high, manual low, off)
+ * @ah: The &struct ath5k_hw
+ * @mode: One of enum ath5k_ani_mode
  *
  * Initialize ANI according to mode.
  */
@@ -695,10 +712,18 @@
 }
 
 
-/*** DEBUG ***/
+/**************\
+* Debug output *
+\**************/
 
 #ifdef CONFIG_ATH5K_DEBUG
 
+/**
+ * ath5k_ani_print_counters() - Print ANI counters
+ * @ah: The &struct ath5k_hw
+ *
+ * Used for debugging ANI
+ */
 void
 ath5k_ani_print_counters(struct ath5k_hw *ah)
 {
diff --git a/drivers/net/wireless/ath/ath5k/ani.h b/drivers/net/wireless/ath/ath5k/ani.h
index 7358b6c..21aa355 100644
--- a/drivers/net/wireless/ath/ath5k/ani.h
+++ b/drivers/net/wireless/ath/ath5k/ani.h
@@ -40,13 +40,13 @@
  * enum ath5k_ani_mode - mode for ANI / noise sensitivity
  *
  * @ATH5K_ANI_MODE_OFF: Turn ANI off. This can be useful to just stop the ANI
- *	algorithm after it has been on auto mode.
- * ATH5K_ANI_MODE_MANUAL_LOW: Manually set all immunity parameters to low,
- *	maximizing sensitivity. ANI will not run.
- * ATH5K_ANI_MODE_MANUAL_HIGH: Manually set all immunity parameters to high,
- *	minimizing sensitivity. ANI will not run.
- * ATH5K_ANI_MODE_AUTO: Automatically control immunity parameters based on the
- *	amount of OFDM and CCK frame errors (default).
+ *			algorithm after it has been on auto mode.
+ * @ATH5K_ANI_MODE_MANUAL_LOW: Manually set all immunity parameters to low,
+ *			maximizing sensitivity. ANI will not run.
+ * @ATH5K_ANI_MODE_MANUAL_HIGH: Manually set all immunity parameters to high,
+ *			minimizing sensitivity. ANI will not run.
+ * @ATH5K_ANI_MODE_AUTO: Automatically control immunity parameters based on the
+ *			amount of OFDM and CCK frame errors (default).
  */
 enum ath5k_ani_mode {
 	ATH5K_ANI_MODE_OFF		= 0,
@@ -58,8 +58,22 @@
 
 /**
  * struct ath5k_ani_state - ANI state and associated counters
- *
- * @max_spur_level: the maximum spur level is chip dependent
+ * @ani_mode: One of enum ath5k_ani_mode
+ * @noise_imm_level: Noise immunity level
+ * @spur_level: Spur immunity level
+ * @firstep_level: FIRstep level
+ * @ofdm_weak_sig: OFDM weak signal detection state (on/off)
+ * @cck_weak_sig: CCK weak signal detection state (on/off)
+ * @max_spur_level: Max spur immunity level (chip specific)
+ * @listen_time: Listen time
+ * @ofdm_errors: OFDM timing error count
+ * @cck_errors: CCK timing error count
+ * @last_cc: The &struct ath_cycle_counters (for stats)
+ * @last_listen: Listen time from previous run (for stats)
+ * @last_ofdm_errors: OFDM timing error count from previous run (for tats)
+ * @last_cck_errors: CCK timing error count from previous run (for stats)
+ * @sum_ofdm_errors: Sum of OFDM timing errors (for stats)
+ * @sum_cck_errors: Sum of all CCK timing errors (for stats)
  */
 struct ath5k_ani_state {
 	enum ath5k_ani_mode	ani_mode;
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index fecbcd9..c2b2518 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -187,10 +187,9 @@
 #define AR5K_TUNE_MAX_TXPOWER			63
 #define AR5K_TUNE_DEFAULT_TXPOWER		25
 #define AR5K_TUNE_TPC_TXPOWER			false
-#define ATH5K_TUNE_CALIBRATION_INTERVAL_FULL    10000   /* 10 sec */
+#define ATH5K_TUNE_CALIBRATION_INTERVAL_FULL    60000   /* 60 sec */
+#define	ATH5K_TUNE_CALIBRATION_INTERVAL_SHORT	10000	/* 10 sec */
 #define ATH5K_TUNE_CALIBRATION_INTERVAL_ANI	1000	/* 1 sec */
-#define ATH5K_TUNE_CALIBRATION_INTERVAL_NF	60000	/* 60 sec */
-
 #define ATH5K_TX_COMPLETE_POLL_INT		3000	/* 3 sec */
 
 #define AR5K_INIT_CARR_SENSE_EN			1
@@ -262,16 +261,34 @@
 #define	AR5K_AGC_SETTLING_TURBO			37
 
 
-/* GENERIC CHIPSET DEFINITIONS */
 
-/* MAC Chips */
+/*****************************\
+* GENERIC CHIPSET DEFINITIONS *
+\*****************************/
+
+/**
+ * enum ath5k_version - MAC Chips
+ * @AR5K_AR5210: AR5210 (Crete)
+ * @AR5K_AR5211: AR5211 (Oahu/Maui)
+ * @AR5K_AR5212: AR5212 (Venice) and newer
+ */
 enum ath5k_version {
 	AR5K_AR5210	= 0,
 	AR5K_AR5211	= 1,
 	AR5K_AR5212	= 2,
 };
 
-/* PHY Chips */
+/**
+ * enum ath5k_radio - PHY Chips
+ * @AR5K_RF5110: RF5110 (Fez)
+ * @AR5K_RF5111: RF5111 (Sombrero)
+ * @AR5K_RF5112: RF2112/5112(A) (Derby/Derby2)
+ * @AR5K_RF2413: RF2413/2414 (Griffin/Griffin-Lite)
+ * @AR5K_RF5413: RF5413/5414/5424 (Eagle/Condor)
+ * @AR5K_RF2316: RF2315/2316 (Cobra SoC)
+ * @AR5K_RF2317: RF2317 (Spider SoC)
+ * @AR5K_RF2425: RF2425/2417 (Swan/Nalla)
+ */
 enum ath5k_radio {
 	AR5K_RF5110	= 0,
 	AR5K_RF5111	= 1,
@@ -303,11 +320,11 @@
 #define AR5K_SREV_AR5213A	0x59 /* Hainan */
 #define AR5K_SREV_AR2413	0x78 /* Griffin lite */
 #define AR5K_SREV_AR2414	0x70 /* Griffin */
-#define AR5K_SREV_AR2315_R6 0x86 /* AP51-Light */
-#define AR5K_SREV_AR2315_R7 0x87 /* AP51-Full */
+#define AR5K_SREV_AR2315_R6	0x86 /* AP51-Light */
+#define AR5K_SREV_AR2315_R7	0x87 /* AP51-Full */
 #define AR5K_SREV_AR5424	0x90 /* Condor */
-#define AR5K_SREV_AR2317_R1 0x90 /* AP61-Light */
-#define AR5K_SREV_AR2317_R2 0x91 /* AP61-Full */
+#define AR5K_SREV_AR2317_R1	0x90 /* AP61-Light */
+#define AR5K_SREV_AR2317_R2	0x91 /* AP61-Full */
 #define AR5K_SREV_AR5413	0xa4 /* Eagle lite */
 #define AR5K_SREV_AR5414	0xa0 /* Eagle */
 #define AR5K_SREV_AR2415	0xb0 /* Talon */
@@ -344,32 +361,40 @@
 
 /* TODO add support to mac80211 for vendor-specific rates and modes */
 
-/*
+/**
+ * DOC: Atheros XR
+ *
  * Some of this information is based on Documentation from:
  *
  * http://madwifi-project.org/wiki/ChipsetFeatures/SuperAG
  *
- * Modulation for Atheros' eXtended Range - range enhancing extension that is
- * supposed to double the distance an Atheros client device can keep a
- * connection with an Atheros access point. This is achieved by increasing
- * the receiver sensitivity up to, -105dBm, which is about 20dB above what
- * the 802.11 specifications demand. In addition, new (proprietary) data rates
- * are introduced: 3, 2, 1, 0.5 and 0.25 MBit/s.
+ * Atheros' eXtended Range - range enhancing extension is a modulation scheme
+ * that is supposed to double the link distance between an Atheros XR-enabled
+ * client device with an Atheros XR-enabled access point. This is achieved
+ * by increasing the receiver sensitivity up to, -105dBm, which is about 20dB
+ * above what the 802.11 specifications demand. In addition, new (proprietary)
+ * data rates are introduced: 3, 2, 1, 0.5 and 0.25 MBit/s.
  *
  * Please note that can you either use XR or TURBO but you cannot use both,
  * they are exclusive.
  *
+ * Also note that we do not plan to support XR mode at least for now. You can
+ * get a mode similar to XR by using 5MHz bwmode.
  */
-#define MODULATION_XR		0x00000200
-/*
- * Modulation for Atheros' Turbo G and Turbo A, its supposed to provide a
- * throughput transmission speed up to 40Mbit/s-60Mbit/s at a 108Mbit/s
- * signaling rate achieved through the bonding of two 54Mbit/s 802.11g
- * channels. To use this feature your Access Point must also support it.
+
+
+/**
+ * DOC: Atheros SuperAG
+ *
+ * In addition to XR we have another modulation scheme called TURBO mode
+ * that is supposed to provide a throughput transmission speed up to 40Mbit/s
+ * -60Mbit/s at a 108Mbit/s signaling rate achieved through the bonding of two
+ * 54Mbit/s 802.11g channels. To use this feature both ends must support it.
  * There is also a distinction between "static" and "dynamic" turbo modes:
  *
  * - Static: is the dumb version: devices set to this mode stick to it until
  *     the mode is turned off.
+ *
  * - Dynamic: is the intelligent version, the network decides itself if it
  *     is ok to use turbo. As soon as traffic is detected on adjacent channels
  *     (which would get used in turbo mode), or when a non-turbo station joins
@@ -383,24 +408,39 @@
  *
  * http://www.pcworld.com/article/id,113428-page,1/article.html
  *
- * The channel bonding seems to be driver specific though. In addition to
- * deciding what channels will be used, these "Turbo" modes are accomplished
- * by also enabling the following features:
+ * The channel bonding seems to be driver specific though.
+ *
+ * In addition to TURBO modes we also have the following features for even
+ * greater speed-up:
  *
  * - Bursting: allows multiple frames to be sent at once, rather than pausing
  *     after each frame. Bursting is a standards-compliant feature that can be
  *     used with any Access Point.
+ *
  * - Fast frames: increases the amount of information that can be sent per
  *     frame, also resulting in a reduction of transmission overhead. It is a
  *     proprietary feature that needs to be supported by the Access Point.
+ *
  * - Compression: data frames are compressed in real time using a Lempel Ziv
  *     algorithm. This is done transparently. Once this feature is enabled,
  *     compression and decompression takes place inside the chipset, without
  *     putting additional load on the host CPU.
  *
+ * As with XR we also don't plan to support SuperAG features for now. You can
+ * get a mode similar to TURBO by using 40MHz bwmode.
  */
-#define MODULATION_TURBO	0x00000080
 
+
+/**
+ * enum ath5k_driver_mode - PHY operation mode
+ * @AR5K_MODE_11A: 802.11a
+ * @AR5K_MODE_11B: 802.11b
+ * @AR5K_MODE_11G: 801.11g
+ * @AR5K_MODE_MAX: Used for boundary checks
+ *
+ * Do not change the order here, we use these as
+ * array indices and it also maps EEPROM structures.
+ */
 enum ath5k_driver_mode {
 	AR5K_MODE_11A		=	0,
 	AR5K_MODE_11B		=	1,
@@ -408,30 +448,64 @@
 	AR5K_MODE_MAX		=	3
 };
 
+/**
+ * enum ath5k_ant_mode - Antenna operation mode
+ * @AR5K_ANTMODE_DEFAULT: Default antenna setup
+ * @AR5K_ANTMODE_FIXED_A: Only antenna A is present
+ * @AR5K_ANTMODE_FIXED_B: Only antenna B is present
+ * @AR5K_ANTMODE_SINGLE_AP: STA locked on a single ap
+ * @AR5K_ANTMODE_SECTOR_AP: AP with tx antenna set on tx desc
+ * @AR5K_ANTMODE_SECTOR_STA: STA with tx antenna set on tx desc
+ * @AR5K_ANTMODE_DEBUG: Debug mode -A -> Rx, B-> Tx-
+ * @AR5K_ANTMODE_MAX: Used for boundary checks
+ *
+ * For more infos on antenna control check out phy.c
+ */
 enum ath5k_ant_mode {
-	AR5K_ANTMODE_DEFAULT	= 0,	/* default antenna setup */
-	AR5K_ANTMODE_FIXED_A	= 1,	/* only antenna A is present */
-	AR5K_ANTMODE_FIXED_B	= 2,	/* only antenna B is present */
-	AR5K_ANTMODE_SINGLE_AP	= 3,	/* sta locked on a single ap */
-	AR5K_ANTMODE_SECTOR_AP	= 4,	/* AP with tx antenna set on tx desc */
-	AR5K_ANTMODE_SECTOR_STA	= 5,	/* STA with tx antenna set on tx desc */
-	AR5K_ANTMODE_DEBUG	= 6,	/* Debug mode -A -> Rx, B-> Tx- */
+	AR5K_ANTMODE_DEFAULT	= 0,
+	AR5K_ANTMODE_FIXED_A	= 1,
+	AR5K_ANTMODE_FIXED_B	= 2,
+	AR5K_ANTMODE_SINGLE_AP	= 3,
+	AR5K_ANTMODE_SECTOR_AP	= 4,
+	AR5K_ANTMODE_SECTOR_STA	= 5,
+	AR5K_ANTMODE_DEBUG	= 6,
 	AR5K_ANTMODE_MAX,
 };
 
+/**
+ * enum ath5k_bw_mode - Bandwidth operation mode
+ * @AR5K_BWMODE_DEFAULT: 20MHz, default operation
+ * @AR5K_BWMODE_5MHZ: Quarter rate
+ * @AR5K_BWMODE_10MHZ: Half rate
+ * @AR5K_BWMODE_40MHZ: Turbo
+ */
 enum ath5k_bw_mode {
-	AR5K_BWMODE_DEFAULT	= 0,	/* 20MHz, default operation */
-	AR5K_BWMODE_5MHZ	= 1,	/* Quarter rate */
-	AR5K_BWMODE_10MHZ	= 2,	/* Half rate */
-	AR5K_BWMODE_40MHZ	= 3	/* Turbo */
+	AR5K_BWMODE_DEFAULT	= 0,
+	AR5K_BWMODE_5MHZ	= 1,
+	AR5K_BWMODE_10MHZ	= 2,
+	AR5K_BWMODE_40MHZ	= 3
 };
 
+
+
 /****************\
   TX DEFINITIONS
 \****************/
 
-/*
- * TX Status descriptor
+/**
+ * struct ath5k_tx_status - TX Status descriptor
+ * @ts_seqnum: Sequence number
+ * @ts_tstamp: Timestamp
+ * @ts_status: Status code
+ * @ts_final_idx: Final transmission series index
+ * @ts_final_retry: Final retry count
+ * @ts_rssi: RSSI for received ACK
+ * @ts_shortretry: Short retry count
+ * @ts_virtcol: Virtual collision count
+ * @ts_antenna: Antenna used
+ *
+ * TX status descriptor gets filled by the hw
+ * on each transmission attempt.
  */
 struct ath5k_tx_status {
 	u16	ts_seqnum;
@@ -454,7 +528,6 @@
  * enum ath5k_tx_queue - Queue types used to classify tx queues.
  * @AR5K_TX_QUEUE_INACTIVE: q is unused -- see ath5k_hw_release_tx_queue
  * @AR5K_TX_QUEUE_DATA: A normal data queue
- * @AR5K_TX_QUEUE_XR_DATA: An XR-data queue
  * @AR5K_TX_QUEUE_BEACON: The beacon queue
  * @AR5K_TX_QUEUE_CAB: The after-beacon queue
  * @AR5K_TX_QUEUE_UAPSD: Unscheduled Automatic Power Save Delivery queue
@@ -462,7 +535,6 @@
 enum ath5k_tx_queue {
 	AR5K_TX_QUEUE_INACTIVE = 0,
 	AR5K_TX_QUEUE_DATA,
-	AR5K_TX_QUEUE_XR_DATA,
 	AR5K_TX_QUEUE_BEACON,
 	AR5K_TX_QUEUE_CAB,
 	AR5K_TX_QUEUE_UAPSD,
@@ -471,36 +543,46 @@
 #define	AR5K_NUM_TX_QUEUES		10
 #define	AR5K_NUM_TX_QUEUES_NOQCU	2
 
-/*
- * Queue syb-types to classify normal data queues.
+/**
+ * enum ath5k_tx_queue_subtype - Queue sub-types to classify normal data queues
+ * @AR5K_WME_AC_BK: Background traffic
+ * @AR5K_WME_AC_BE: Best-effort (normal) traffic
+ * @AR5K_WME_AC_VI: Video traffic
+ * @AR5K_WME_AC_VO: Voice traffic
+ *
  * These are the 4 Access Categories as defined in
  * WME spec. 0 is the lowest priority and 4 is the
  * highest. Normal data that hasn't been classified
  * goes to the Best Effort AC.
  */
 enum ath5k_tx_queue_subtype {
-	AR5K_WME_AC_BK = 0,	/*Background traffic*/
-	AR5K_WME_AC_BE,		/*Best-effort (normal) traffic*/
-	AR5K_WME_AC_VI,		/*Video traffic*/
-	AR5K_WME_AC_VO,		/*Voice traffic*/
+	AR5K_WME_AC_BK = 0,
+	AR5K_WME_AC_BE,
+	AR5K_WME_AC_VI,
+	AR5K_WME_AC_VO,
 };
 
-/*
- * Queue ID numbers as returned by the hw functions, each number
- * represents a hw queue. If hw does not support hw queues
- * (eg 5210) all data goes in one queue. These match
- * d80211 definitions (net80211/MadWiFi don't use them).
+/**
+ * enum ath5k_tx_queue_id - Queue ID numbers as returned by the hw functions
+ * @AR5K_TX_QUEUE_ID_NOQCU_DATA: Data queue on AR5210 (no QCU available)
+ * @AR5K_TX_QUEUE_ID_NOQCU_BEACON: Beacon queue on AR5210 (no QCU available)
+ * @AR5K_TX_QUEUE_ID_DATA_MIN: Data queue min index
+ * @AR5K_TX_QUEUE_ID_DATA_MAX: Data queue max index
+ * @AR5K_TX_QUEUE_ID_CAB: Content after beacon queue
+ * @AR5K_TX_QUEUE_ID_BEACON: Beacon queue
+ * @AR5K_TX_QUEUE_ID_UAPSD: Urgent Automatic Power Save Delivery,
+ *
+ * Each number represents a hw queue. If hw does not support hw queues
+ * (eg 5210) all data goes in one queue.
  */
 enum ath5k_tx_queue_id {
 	AR5K_TX_QUEUE_ID_NOQCU_DATA	= 0,
 	AR5K_TX_QUEUE_ID_NOQCU_BEACON	= 1,
-	AR5K_TX_QUEUE_ID_DATA_MIN	= 0, /*IEEE80211_TX_QUEUE_DATA0*/
-	AR5K_TX_QUEUE_ID_DATA_MAX	= 3, /*IEEE80211_TX_QUEUE_DATA3*/
-	AR5K_TX_QUEUE_ID_DATA_SVP	= 5, /*IEEE80211_TX_QUEUE_SVP - Spectralink Voice Protocol*/
-	AR5K_TX_QUEUE_ID_CAB		= 6, /*IEEE80211_TX_QUEUE_AFTER_BEACON*/
-	AR5K_TX_QUEUE_ID_BEACON		= 7, /*IEEE80211_TX_QUEUE_BEACON*/
-	AR5K_TX_QUEUE_ID_UAPSD		= 8,
-	AR5K_TX_QUEUE_ID_XR_DATA	= 9,
+	AR5K_TX_QUEUE_ID_DATA_MIN	= 0,
+	AR5K_TX_QUEUE_ID_DATA_MAX	= 3,
+	AR5K_TX_QUEUE_ID_UAPSD		= 7,
+	AR5K_TX_QUEUE_ID_CAB		= 8,
+	AR5K_TX_QUEUE_ID_BEACON		= 9,
 };
 
 /*
@@ -521,46 +603,70 @@
 #define AR5K_TXQ_FLAG_POST_FR_BKOFF_DIS		0x1000	/* Disable backoff while bursting */
 #define AR5K_TXQ_FLAG_COMPRESSION_ENABLE	0x2000	/* Enable hw compression -not implemented-*/
 
-/*
- * Data transmit queue state.  One of these exists for each
- * hardware transmit queue.  Packets sent to us from above
- * are assigned to queues based on their priority.  Not all
- * devices support a complete set of hardware transmit queues.
- * For those devices the array sc_ac2q will map multiple
- * priorities to fewer hardware queues (typically all to one
- * hardware queue).
+/**
+ * struct ath5k_txq - Transmit queue state
+ * @qnum: Hardware q number
+ * @link: Link ptr in last TX desc
+ * @q: Transmit queue (&struct list_head)
+ * @lock: Lock on q and link
+ * @setup: Is the queue configured
+ * @txq_len:Number of queued buffers
+ * @txq_max: Max allowed num of queued buffers
+ * @txq_poll_mark: Used to check if queue got stuck
+ * @txq_stuck: Queue stuck counter
+ *
+ * One of these exists for each hardware transmit queue.
+ * Packets sent to us from above are assigned to queues based
+ * on their priority.  Not all devices support a complete set
+ * of hardware transmit queues. For those devices the array
+ * sc_ac2q will map multiple priorities to fewer hardware queues
+ * (typically all to one hardware queue).
  */
 struct ath5k_txq {
-	unsigned int		qnum;	/* hardware q number */
-	u32			*link;	/* link ptr in last TX desc */
-	struct list_head	q;	/* transmit queue */
-	spinlock_t		lock;	/* lock on q and link */
+	unsigned int		qnum;
+	u32			*link;
+	struct list_head	q;
+	spinlock_t		lock;
 	bool			setup;
-	int			txq_len; /* number of queued buffers */
-	int			txq_max; /* max allowed num of queued buffers */
+	int			txq_len;
+	int			txq_max;
 	bool			txq_poll_mark;
-	unsigned int		txq_stuck;	/* informational counter */
+	unsigned int		txq_stuck;
 };
 
-/*
- * A struct to hold tx queue's parameters
+/**
+ * struct ath5k_txq_info - A struct to hold TX queue's parameters
+ * @tqi_type: One of enum ath5k_tx_queue
+ * @tqi_subtype: One of enum ath5k_tx_queue_subtype
+ * @tqi_flags: TX queue flags (see above)
+ * @tqi_aifs: Arbitrated Inter-frame Space
+ * @tqi_cw_min: Minimum Contention Window
+ * @tqi_cw_max: Maximum Contention Window
+ * @tqi_cbr_period: Constant bit rate period
+ * @tqi_ready_time: Time queue waits after an event when RDYTIME is enabled
  */
 struct ath5k_txq_info {
 	enum ath5k_tx_queue tqi_type;
 	enum ath5k_tx_queue_subtype tqi_subtype;
-	u16	tqi_flags;	/* Tx queue flags (see above) */
-	u8	tqi_aifs;	/* Arbitrated Interframe Space */
-	u16	tqi_cw_min;	/* Minimum Contention Window */
-	u16	tqi_cw_max;	/* Maximum Contention Window */
-	u32	tqi_cbr_period; /* Constant bit rate period */
+	u16	tqi_flags;
+	u8	tqi_aifs;
+	u16	tqi_cw_min;
+	u16	tqi_cw_max;
+	u32	tqi_cbr_period;
 	u32	tqi_cbr_overflow_limit;
 	u32	tqi_burst_time;
-	u32	tqi_ready_time; /* Time queue waits after an event */
+	u32	tqi_ready_time;
 };
 
-/*
- * Transmit packet types.
- * used on tx control descriptor
+/**
+ * enum ath5k_pkt_type - Transmit packet types
+ * @AR5K_PKT_TYPE_NORMAL: Normal data
+ * @AR5K_PKT_TYPE_ATIM: ATIM
+ * @AR5K_PKT_TYPE_PSPOLL: PS-Poll
+ * @AR5K_PKT_TYPE_BEACON: Beacon
+ * @AR5K_PKT_TYPE_PROBE_RESP: Probe response
+ * @AR5K_PKT_TYPE_PIFS: PIFS
+ * Used on tx control descriptor
  */
 enum ath5k_pkt_type {
 	AR5K_PKT_TYPE_NORMAL		= 0,
@@ -583,27 +689,23 @@
 	(ah->ah_txpower.txp_rates_power_table[(_r)] & 0x3f) << (_v)	\
 )
 
-/*
- * DMA size definitions (2^(n+2))
- */
-enum ath5k_dmasize {
-	AR5K_DMASIZE_4B	= 0,
-	AR5K_DMASIZE_8B,
-	AR5K_DMASIZE_16B,
-	AR5K_DMASIZE_32B,
-	AR5K_DMASIZE_64B,
-	AR5K_DMASIZE_128B,
-	AR5K_DMASIZE_256B,
-	AR5K_DMASIZE_512B
-};
 
 
 /****************\
   RX DEFINITIONS
 \****************/
 
-/*
- * RX Status descriptor
+/**
+ * struct ath5k_rx_status - RX Status descriptor
+ * @rs_datalen: Data length
+ * @rs_tstamp: Timestamp
+ * @rs_status: Status code
+ * @rs_phyerr: PHY error mask
+ * @rs_rssi: RSSI in 0.5dbm units
+ * @rs_keyix: Index to the key used for decrypting
+ * @rs_rate: Rate used to decode the frame
+ * @rs_antenna: Antenna used to receive the frame
+ * @rs_more: Indicates this is a frame fragment (Fast frames)
  */
 struct ath5k_rx_status {
 	u16	rs_datalen;
@@ -645,10 +747,18 @@
 #define TSF_TO_TU(_tsf) (u32)((_tsf) >> 10)
 
 
+
 /*******************************\
   GAIN OPTIMIZATION DEFINITIONS
 \*******************************/
 
+/**
+ * enum ath5k_rfgain - RF Gain optimization engine state
+ * @AR5K_RFGAIN_INACTIVE: Engine disabled
+ * @AR5K_RFGAIN_ACTIVE: Probe active
+ * @AR5K_RFGAIN_READ_REQUESTED: Probe requested
+ * @AR5K_RFGAIN_NEED_CHANGE: Gain_F needs change
+ */
 enum ath5k_rfgain {
 	AR5K_RFGAIN_INACTIVE = 0,
 	AR5K_RFGAIN_ACTIVE,
@@ -656,6 +766,16 @@
 	AR5K_RFGAIN_NEED_CHANGE,
 };
 
+/**
+ * struct ath5k_gain - RF Gain optimization engine state data
+ * @g_step_idx: Current step index
+ * @g_current: Current gain
+ * @g_target: Target gain
+ * @g_low: Low gain boundary
+ * @g_high: High gain boundary
+ * @g_f_corr: Gain_F correction
+ * @g_state: One of enum ath5k_rfgain
+ */
 struct ath5k_gain {
 	u8			g_step_idx;
 	u8			g_current;
@@ -666,6 +786,8 @@
 	u8			g_state;
 };
 
+
+
 /********************\
   COMMON DEFINITIONS
 \********************/
@@ -674,9 +796,14 @@
 #define AR5K_SLOT_TIME_20	880
 #define AR5K_SLOT_TIME_MAX	0xffff
 
-/*
- * The following structure is used to map 2GHz channels to
- * 5GHz Atheros channels.
+/**
+ * struct ath5k_athchan_2ghz - 2GHz to 5GHZ map for RF5111
+ * @a2_flags: Channel flags (internal)
+ * @a2_athchan: HW channel number (internal)
+ *
+ * This structure is used to map 2GHz channels to
+ * 5GHz Atheros channels on 2111 frequency converter
+ * that comes together with RF5111
  * TODO: Clean up
  */
 struct ath5k_athchan_2ghz {
@@ -684,36 +811,80 @@
 	u16	a2_athchan;
 };
 
+/**
+ * enum ath5k_dmasize -  DMA size definitions (2^(n+2))
+ * @AR5K_DMASIZE_4B: 4Bytes
+ * @AR5K_DMASIZE_8B: 8Bytes
+ * @AR5K_DMASIZE_16B: 16Bytes
+ * @AR5K_DMASIZE_32B: 32Bytes
+ * @AR5K_DMASIZE_64B: 64Bytes (Default)
+ * @AR5K_DMASIZE_128B: 128Bytes
+ * @AR5K_DMASIZE_256B: 256Bytes
+ * @AR5K_DMASIZE_512B: 512Bytes
+ *
+ * These are used to set DMA burst size on hw
+ *
+ * Note: Some platforms can't handle more than 4Bytes
+ * be careful on embedded boards.
+ */
+enum ath5k_dmasize {
+	AR5K_DMASIZE_4B	= 0,
+	AR5K_DMASIZE_8B,
+	AR5K_DMASIZE_16B,
+	AR5K_DMASIZE_32B,
+	AR5K_DMASIZE_64B,
+	AR5K_DMASIZE_128B,
+	AR5K_DMASIZE_256B,
+	AR5K_DMASIZE_512B
+};
+
+
 
 /******************\
   RATE DEFINITIONS
 \******************/
 
 /**
+ * DOC: Rate codes
+ *
  * Seems the ar5xxx hardware supports up to 32 rates, indexed by 1-32.
  *
  * The rate code is used to get the RX rate or set the TX rate on the
  * hardware descriptors. It is also used for internal modulation control
  * and settings.
  *
- * This is the hardware rate map we are aware of:
+ * This is the hardware rate map we are aware of (html unfriendly):
  *
- * rate_code   0x01    0x02    0x03    0x04    0x05    0x06    0x07    0x08
- * rate_kbps   3000    1000    ?       ?       ?       2000    500     48000
+ * Rate code	Rate (Kbps)
+ * ---------	-----------
+ * 0x01		 3000 (XR)
+ * 0x02		 1000 (XR)
+ * 0x03		  250 (XR)
+ * 0x04 - 05	-Reserved-
+ * 0x06		 2000 (XR)
+ * 0x07		  500 (XR)
+ * 0x08		48000 (OFDM)
+ * 0x09		24000 (OFDM)
+ * 0x0A		12000 (OFDM)
+ * 0x0B		 6000 (OFDM)
+ * 0x0C		54000 (OFDM)
+ * 0x0D		36000 (OFDM)
+ * 0x0E		18000 (OFDM)
+ * 0x0F		 9000 (OFDM)
+ * 0x10 - 17	-Reserved-
+ * 0x18		11000L (CCK)
+ * 0x19		 5500L (CCK)
+ * 0x1A		 2000L (CCK)
+ * 0x1B		 1000L (CCK)
+ * 0x1C		11000S (CCK)
+ * 0x1D		 5500S (CCK)
+ * 0x1E		 2000S (CCK)
+ * 0x1F		-Reserved-
  *
- * rate_code   0x09    0x0A    0x0B    0x0C    0x0D    0x0E    0x0F    0x10
- * rate_kbps   24000   12000   6000    54000   36000   18000   9000    ?
- *
- * rate_code   17      18      19      20      21      22      23      24
- * rate_kbps   ?       ?       ?       ?       ?       ?       ?       11000
- *
- * rate_code   25      26      27      28      29      30      31      32
- * rate_kbps   5500    2000    1000    11000S  5500S   2000S   ?       ?
- *
- * "S" indicates CCK rates with short preamble.
+ * "S" indicates CCK rates with short preamble and "L" with long preamble.
  *
  * AR5211 has different rate codes for CCK (802.11B) rates. It only uses the
- * lowest 4 bits, so they are the same as below with a 0xF mask.
+ * lowest 4 bits, so they are the same as above with a 0xF mask.
  * (0xB, 0xA, 0x9 and 0x8 for 1M, 2M, 5.5M and 11M).
  * We handle this in ath5k_setup_bands().
  */
@@ -733,13 +904,9 @@
 #define ATH5K_RATE_CODE_36M	0x0D
 #define ATH5K_RATE_CODE_48M	0x08
 #define ATH5K_RATE_CODE_54M	0x0C
-/* XR */
-#define ATH5K_RATE_CODE_XR_500K	0x07
-#define ATH5K_RATE_CODE_XR_1M	0x02
-#define ATH5K_RATE_CODE_XR_2M	0x06
-#define ATH5K_RATE_CODE_XR_3M	0x01
 
-/* adding this flag to rate_code enables short preamble */
+/* Adding this flag to rate_code on B rates
+ * enables short preamble */
 #define AR5K_SET_SHORT_PREAMBLE 0x04
 
 /*
@@ -747,7 +914,7 @@
  */
 
 #define AR5K_KEYCACHE_SIZE	8
-extern int ath5k_modparam_nohwcrypt;
+extern bool ath5k_modparam_nohwcrypt;
 
 /***********************\
  HW RELATED DEFINITIONS
@@ -769,49 +936,65 @@
 
 /**
  * enum ath5k_int - Hardware interrupt masks helpers
+ * @AR5K_INT_RXOK: Frame successfully received
+ * @AR5K_INT_RXDESC: Request RX descriptor/Read RX descriptor
+ * @AR5K_INT_RXERR: Frame reception failed
+ * @AR5K_INT_RXNOFRM: No frame received within a specified time period
+ * @AR5K_INT_RXEOL: Reached "End Of List", means we need more RX descriptors
+ * @AR5K_INT_RXORN: Indicates we got RX FIFO overrun. Note that Rx overrun is
+ *		not always fatal, on some chips we can continue operation
+ *		without resetting the card, that's why %AR5K_INT_FATAL is not
+ *		common for all chips.
+ * @AR5K_INT_RX_ALL: Mask to identify all RX related interrupts
  *
- * @AR5K_INT_RX: mask to identify received frame interrupts, of type
- *	AR5K_ISR_RXOK or AR5K_ISR_RXERR
- * @AR5K_INT_RXDESC: Request RX descriptor/Read RX descriptor (?)
- * @AR5K_INT_RXNOFRM: No frame received (?)
- * @AR5K_INT_RXEOL: received End Of List for VEOL (Virtual End Of List). The
- *	Queue Control Unit (QCU) signals an EOL interrupt only if a descriptor's
- *	LinkPtr is NULL. For more details, refer to:
- *	http://www.freepatentsonline.com/20030225739.html
- * @AR5K_INT_RXORN: Indicates we got RX overrun (eg. no more descriptors).
- *	Note that Rx overrun is not always fatal, on some chips we can continue
- *	operation without resetting the card, that's why int_fatal is not
- *	common for all chips.
- * @AR5K_INT_TX: mask to identify received frame interrupts, of type
- *	AR5K_ISR_TXOK or AR5K_ISR_TXERR
- * @AR5K_INT_TXDESC: Request TX descriptor/Read TX status descriptor (?)
- * @AR5K_INT_TXURN: received when we should increase the TX trigger threshold
- *	We currently do increments on interrupt by
- *	(AR5K_TUNE_MAX_TX_FIFO_THRES - current_trigger_level) / 2
+ * @AR5K_INT_TXOK: Frame transmission success
+ * @AR5K_INT_TXDESC: Request TX descriptor/Read TX status descriptor
+ * @AR5K_INT_TXERR: Frame transmission failure
+ * @AR5K_INT_TXEOL: Received End Of List for VEOL (Virtual End Of List). The
+ *		Queue Control Unit (QCU) signals an EOL interrupt only if a
+ *		descriptor's LinkPtr is NULL. For more details, refer to:
+ *		"http://www.freepatentsonline.com/20030225739.html"
+ * @AR5K_INT_TXNOFRM: No frame was transmitted within a specified time period
+ * @AR5K_INT_TXURN: Indicates we got TX FIFO underrun. In such case we should
+ *		increase the TX trigger threshold.
+ * @AR5K_INT_TX_ALL: Mask to identify all TX related interrupts
+ *
  * @AR5K_INT_MIB: Indicates the either Management Information Base counters or
- *	one of the PHY error counters reached the maximum value and should be
- *	read and cleared.
+ *		one of the PHY error counters reached the maximum value and
+ *		should be read and cleared.
+ * @AR5K_INT_SWI: Software triggered interrupt.
  * @AR5K_INT_RXPHY: RX PHY Error
  * @AR5K_INT_RXKCM: RX Key cache miss
  * @AR5K_INT_SWBA: SoftWare Beacon Alert - indicates its time to send a
- *	beacon that must be handled in software. The alternative is if you
- *	have VEOL support, in that case you let the hardware deal with things.
+ *		beacon that must be handled in software. The alternative is if
+ *		you have VEOL support, in that case you let the hardware deal
+ *		with things.
+ * @AR5K_INT_BRSSI: Beacon received with an RSSI value below our threshold
  * @AR5K_INT_BMISS: If in STA mode this indicates we have stopped seeing
- *	beacons from the AP have associated with, we should probably try to
- *	reassociate. When in IBSS mode this might mean we have not received
- *	any beacons from any local stations. Note that every station in an
- *	IBSS schedules to send beacons at the Target Beacon Transmission Time
- *	(TBTT) with a random backoff.
- * @AR5K_INT_BNR: Beacon Not Ready interrupt - ??
- * @AR5K_INT_GPIO: GPIO interrupt is used for RF Kill, disabled for now
- *	until properly handled
- * @AR5K_INT_FATAL: Fatal errors were encountered, typically caused by DMA
- *	errors. These types of errors we can enable seem to be of type
- *	AR5K_SIMR2_MCABT, AR5K_SIMR2_SSERR and AR5K_SIMR2_DPERR.
+ *		beacons from the AP have associated with, we should probably
+ *		try to reassociate. When in IBSS mode this might mean we have
+ *		not received any beacons from any local stations. Note that
+ *		every station in an IBSS schedules to send beacons at the
+ *		Target Beacon Transmission Time (TBTT) with a random backoff.
+ * @AR5K_INT_BNR: Beacon queue got triggered (DMA beacon alert) while empty.
+ * @AR5K_INT_TIM: Beacon with local station's TIM bit set
+ * @AR5K_INT_DTIM: Beacon with DTIM bit and zero DTIM count received
+ * @AR5K_INT_DTIM_SYNC: DTIM sync lost
+ * @AR5K_INT_GPIO: GPIO interrupt is used for RF Kill switches connected to
+ *		our GPIO pins.
+ * @AR5K_INT_BCN_TIMEOUT: Beacon timeout, we waited after TBTT but got noting
+ * @AR5K_INT_CAB_TIMEOUT: We waited for CAB traffic after the beacon but got
+ *		nothing or an incomplete CAB frame sequence.
+ * @AR5K_INT_QCBRORN: A queue got it's CBR counter expired
+ * @AR5K_INT_QCBRURN: A queue got triggered wile empty
+ * @AR5K_INT_QTRIG: A queue got triggered
+ *
+ * @AR5K_INT_FATAL: Fatal errors were encountered, typically caused by bus/DMA
+ *		errors. Indicates we need to reset the card.
  * @AR5K_INT_GLOBAL: Used to clear and set the IER
- * @AR5K_INT_NOCARD: signals the card has been removed
- * @AR5K_INT_COMMON: common interrupts shared among MACs with the same
- *	bit value
+ * @AR5K_INT_NOCARD: Signals the card has been removed
+ * @AR5K_INT_COMMON: Common interrupts shared among MACs with the same
+ *		bit value
  *
  * These are mapped to take advantage of some common bits
  * between the MACs, to be able to set intr properties
@@ -847,15 +1030,15 @@
 	AR5K_INT_GPIO	=	0x01000000,
 	AR5K_INT_BCN_TIMEOUT =	0x02000000, /* Non common */
 	AR5K_INT_CAB_TIMEOUT =	0x04000000, /* Non common */
-	AR5K_INT_RX_DOPPLER =	0x08000000, /* Non common */
-	AR5K_INT_QCBRORN =	0x10000000, /* Non common */
-	AR5K_INT_QCBRURN =	0x20000000, /* Non common */
-	AR5K_INT_QTRIG	=	0x40000000, /* Non common */
+	AR5K_INT_QCBRORN =	0x08000000, /* Non common */
+	AR5K_INT_QCBRURN =	0x10000000, /* Non common */
+	AR5K_INT_QTRIG	=	0x20000000, /* Non common */
 	AR5K_INT_GLOBAL =	0x80000000,
 
 	AR5K_INT_TX_ALL = AR5K_INT_TXOK
 		| AR5K_INT_TXDESC
 		| AR5K_INT_TXERR
+		| AR5K_INT_TXNOFRM
 		| AR5K_INT_TXEOL
 		| AR5K_INT_TXURN,
 
@@ -891,15 +1074,32 @@
 	AR5K_INT_NOCARD	= 0xffffffff
 };
 
-/* mask which calibration is active at the moment */
+/**
+ * enum ath5k_calibration_mask - Mask which calibration is active at the moment
+ * @AR5K_CALIBRATION_FULL: Full calibration (AGC + SHORT)
+ * @AR5K_CALIBRATION_SHORT: Short calibration (NF + I/Q)
+ * @AR5K_CALIBRATION_NF: Noise Floor calibration
+ * @AR5K_CALIBRATION_ANI: Adaptive Noise Immunity
+ */
 enum ath5k_calibration_mask {
 	AR5K_CALIBRATION_FULL = 0x01,
 	AR5K_CALIBRATION_SHORT = 0x02,
-	AR5K_CALIBRATION_ANI = 0x04,
+	AR5K_CALIBRATION_NF = 0x04,
+	AR5K_CALIBRATION_ANI = 0x08,
 };
 
-/*
- * Power management
+/**
+ * enum ath5k_power_mode - Power management modes
+ * @AR5K_PM_UNDEFINED: Undefined
+ * @AR5K_PM_AUTO: Allow card to sleep if possible
+ * @AR5K_PM_AWAKE: Force card to wake up
+ * @AR5K_PM_FULL_SLEEP: Force card to full sleep (DANGEROUS)
+ * @AR5K_PM_NETWORK_SLEEP: Allow to sleep for a specified duration
+ *
+ * Currently only PM_AWAKE is used, FULL_SLEEP and NETWORK_SLEEP/AUTO
+ * are also known to have problems on some cards. This is not a big
+ * problem though because we can have almost the same effect as
+ * FULL_SLEEP by putting card on warm reset (it's almost powered down).
  */
 enum ath5k_power_mode {
 	AR5K_PM_UNDEFINED = 0,
@@ -957,6 +1157,8 @@
 	} cap_queues;
 
 	bool cap_has_phyerr_counters;
+	bool cap_has_mrr_support;
+	bool cap_needs_2GHz_ovr;
 };
 
 /* size of noise floor history (keep it a power of two) */
@@ -1072,13 +1274,11 @@
 	dma_addr_t		desc_daddr;	/* DMA (physical) address */
 	size_t			desc_len;	/* size of TX/RX descriptors */
 
-	DECLARE_BITMAP(status, 6);
+	DECLARE_BITMAP(status, 4);
 #define ATH_STAT_INVALID	0		/* disable hardware accesses */
-#define ATH_STAT_MRRETRY	1		/* multi-rate retry support */
-#define ATH_STAT_PROMISC	2
-#define ATH_STAT_LEDSOFT	3		/* enable LED gpio status */
-#define ATH_STAT_STARTED	4		/* opened & irqs enabled */
-#define ATH_STAT_2G_DISABLED	5		/* multiband radio without 2G */
+#define ATH_STAT_PROMISC	1
+#define ATH_STAT_LEDSOFT	2		/* enable LED gpio status */
+#define ATH_STAT_STARTED	3		/* opened & irqs enabled */
 
 	unsigned int		filter_flags;	/* HW flags, AR5K_RX_FILTER_* */
 	struct ieee80211_channel *curchan;	/* current h/w channel */
@@ -1097,6 +1297,7 @@
 				led_on;		/* pin setting for LED on */
 
 	struct work_struct	reset_work;	/* deferred chip reset */
+	struct work_struct	calib_work;	/* deferred phy calibration */
 
 	struct list_head	rxbuf;		/* receive buffer */
 	spinlock_t		rxbuflock;
@@ -1113,8 +1314,6 @@
 
 	struct ath5k_rfkill	rf_kill;
 
-	struct tasklet_struct	calib;		/* calibration tasklet */
-
 	spinlock_t		block;		/* protects beacon */
 	struct tasklet_struct	beacontq;	/* beacon intr tasklet */
 	struct list_head	bcbuf;		/* beacon buffer */
@@ -1144,7 +1343,7 @@
 	enum ath5k_int		ah_imr;
 
 	struct ieee80211_channel *ah_current_channel;
-	bool			ah_calibration;
+	bool			ah_iq_cal_needed;
 	bool			ah_single_chip;
 
 	enum ath5k_version	ah_version;
@@ -1187,7 +1386,13 @@
 	u32			ah_txq_imr_cbrurn;
 	u32			ah_txq_imr_qtrig;
 	u32			ah_txq_imr_nofrm;
-	u32			ah_txq_isr;
+
+	u32			ah_txq_isr_txok_all;
+	u32			ah_txq_isr_txurn;
+	u32			ah_txq_isr_qcborn;
+	u32			ah_txq_isr_qcburn;
+	u32			ah_txq_isr_qtrig;
+
 	u32			*ah_rf_banks;
 	size_t			ah_rf_banks_size;
 	size_t			ah_rf_regs_count;
@@ -1228,8 +1433,8 @@
 
 	/* Calibration timestamp */
 	unsigned long		ah_cal_next_full;
+	unsigned long		ah_cal_next_short;
 	unsigned long		ah_cal_next_ani;
-	unsigned long		ah_cal_next_nf;
 
 	/* Calibration mask */
 	u8			ah_cal_mask;
@@ -1338,11 +1543,11 @@
 u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah);
 void ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64);
 void ath5k_hw_reset_tsf(struct ath5k_hw *ah);
-void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval);
+void ath5k_hw_init_beacon_timers(struct ath5k_hw *ah, u32 next_beacon,
+							u32 interval);
 bool ath5k_hw_check_beacon_timers(struct ath5k_hw *ah, int intval);
 /* Init function */
-void ath5k_hw_pcu_init(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
-								u8 mode);
+void ath5k_hw_pcu_init(struct ath5k_hw *ah, enum nl80211_iftype op_mode);
 
 /* Queue Control Unit, DFS Control Unit Functions */
 int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
diff --git a/drivers/net/wireless/ath/ath5k/attach.c b/drivers/net/wireless/ath/ath5k/attach.c
index 91627dd..d7114c7 100644
--- a/drivers/net/wireless/ath/ath5k/attach.c
+++ b/drivers/net/wireless/ath/ath5k/attach.c
@@ -27,8 +27,7 @@
 #include "debug.h"
 
 /**
- * ath5k_hw_post - Power On Self Test helper function
- *
+ * ath5k_hw_post() - Power On Self Test helper function
  * @ah: The &struct ath5k_hw
  */
 static int ath5k_hw_post(struct ath5k_hw *ah)
@@ -92,8 +91,7 @@
 }
 
 /**
- * ath5k_hw_init - Check if hw is supported and init the needed structs
- *
+ * ath5k_hw_init() - Check if hw is supported and init the needed structs
  * @ah: The &struct ath5k_hw associated with the device
  *
  * Check if the device is supported, perform a POST and initialize the needed
@@ -298,7 +296,7 @@
 
 		/* Reset SERDES to load new settings */
 		ath5k_hw_reg_write(ah, 0x00000000, AR5K_PCIE_SERDES_RESET);
-		mdelay(1);
+		usleep_range(1000, 1500);
 	}
 
 	/* Get misc capabilities */
@@ -308,11 +306,6 @@
 		goto err;
 	}
 
-	if (test_bit(ATH_STAT_2G_DISABLED, ah->status)) {
-		__clear_bit(AR5K_MODE_11B, ah->ah_capabilities.cap_mode);
-		__clear_bit(AR5K_MODE_11G, ah->ah_capabilities.cap_mode);
-	}
-
 	/* Crypto settings */
 	common->keymax = (ah->ah_version == AR5K_AR5210 ?
 			  AR5K_KEYTABLE_SIZE_5210 : AR5K_KEYTABLE_SIZE_5211);
@@ -349,8 +342,7 @@
 }
 
 /**
- * ath5k_hw_deinit - Free the ath5k_hw struct
- *
+ * ath5k_hw_deinit() - Free the &struct ath5k_hw
  * @ah: The &struct ath5k_hw
  */
 void ath5k_hw_deinit(struct ath5k_hw *ah)
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index b346d04..d366dad 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -68,18 +68,23 @@
 #define CREATE_TRACE_POINTS
 #include "trace.h"
 
-int ath5k_modparam_nohwcrypt;
+bool ath5k_modparam_nohwcrypt;
 module_param_named(nohwcrypt, ath5k_modparam_nohwcrypt, bool, S_IRUGO);
 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
 
-static int modparam_all_channels;
+static bool modparam_all_channels;
 module_param_named(all_channels, modparam_all_channels, bool, S_IRUGO);
 MODULE_PARM_DESC(all_channels, "Expose all channels the device can use.");
 
-static int modparam_fastchanswitch;
+static bool modparam_fastchanswitch;
 module_param_named(fastchanswitch, modparam_fastchanswitch, bool, S_IRUGO);
 MODULE_PARM_DESC(fastchanswitch, "Enable fast channel switching for AR2413/AR5413 radios.");
 
+static int ath5k_modparam_no_hw_rfkill_switch;
+module_param_named(no_hw_rfkill_switch, ath5k_modparam_no_hw_rfkill_switch,
+								bool, S_IRUGO);
+MODULE_PARM_DESC(no_hw_rfkill_switch, "Ignore the GPIO RFKill switch state");
+
 
 /* Module info */
 MODULE_AUTHOR("Jiri Slaby");
@@ -183,7 +188,6 @@
 	{ .bitrate = 540,
 	  .hw_value = ATH5K_RATE_CODE_54M,
 	  .flags = 0 },
-	/* XR missing */
 };
 
 static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp)
@@ -721,22 +725,25 @@
 	if (ret)
 		goto err_unmap;
 
-	memset(mrr_rate, 0, sizeof(mrr_rate));
-	memset(mrr_tries, 0, sizeof(mrr_tries));
-	for (i = 0; i < 3; i++) {
-		rate = ieee80211_get_alt_retry_rate(ah->hw, info, i);
-		if (!rate)
-			break;
+	/* Set up MRR descriptor */
+	if (ah->ah_capabilities.cap_has_mrr_support) {
+		memset(mrr_rate, 0, sizeof(mrr_rate));
+		memset(mrr_tries, 0, sizeof(mrr_tries));
+		for (i = 0; i < 3; i++) {
+			rate = ieee80211_get_alt_retry_rate(ah->hw, info, i);
+			if (!rate)
+				break;
 
-		mrr_rate[i] = rate->hw_value;
-		mrr_tries[i] = info->control.rates[i + 1].count;
+			mrr_rate[i] = rate->hw_value;
+			mrr_tries[i] = info->control.rates[i + 1].count;
+		}
+
+		ath5k_hw_setup_mrr_tx_desc(ah, ds,
+			mrr_rate[0], mrr_tries[0],
+			mrr_rate[1], mrr_tries[1],
+			mrr_rate[2], mrr_tries[2]);
 	}
 
-	ath5k_hw_setup_mrr_tx_desc(ah, ds,
-		mrr_rate[0], mrr_tries[0],
-		mrr_rate[1], mrr_tries[1],
-		mrr_rate[2], mrr_tries[2]);
-
 	ds->ds_link = 0;
 	ds->ds_data = bf->skbaddr;
 
@@ -1689,7 +1696,7 @@
 	struct ath5k_hw *ah = (void *)data;
 
 	for (i = 0; i < AR5K_NUM_TX_QUEUES; i++)
-		if (ah->txqs[i].setup && (ah->ah_txq_isr & BIT(i)))
+		if (ah->txqs[i].setup && (ah->ah_txq_isr_txok_all & BIT(i)))
 			ath5k_tx_processq(ah, &ah->txqs[i]);
 
 	ah->tx_pending = false;
@@ -2005,7 +2012,7 @@
 	ah->nexttbtt = nexttbtt;
 
 	intval |= AR5K_BEACON_ENA;
-	ath5k_hw_init_beacon(ah, nexttbtt, intval);
+	ath5k_hw_init_beacon_timers(ah, nexttbtt, intval);
 
 	/*
 	 * debugging output last in order to preserve the time critical aspect
@@ -2112,16 +2119,29 @@
 ath5k_intr_calibration_poll(struct ath5k_hw *ah)
 {
 	if (time_is_before_eq_jiffies(ah->ah_cal_next_ani) &&
-	    !(ah->ah_cal_mask & AR5K_CALIBRATION_FULL)) {
-		/* run ANI only when full calibration is not active */
+	   !(ah->ah_cal_mask & AR5K_CALIBRATION_FULL) &&
+	   !(ah->ah_cal_mask & AR5K_CALIBRATION_SHORT)) {
+
+		/* Run ANI only when calibration is not active */
+
 		ah->ah_cal_next_ani = jiffies +
 			msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_ANI);
 		tasklet_schedule(&ah->ani_tasklet);
 
-	} else if (time_is_before_eq_jiffies(ah->ah_cal_next_full)) {
-		ah->ah_cal_next_full = jiffies +
-			msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL);
-		tasklet_schedule(&ah->calib);
+	} else if (time_is_before_eq_jiffies(ah->ah_cal_next_short) &&
+		!(ah->ah_cal_mask & AR5K_CALIBRATION_FULL) &&
+		!(ah->ah_cal_mask & AR5K_CALIBRATION_SHORT)) {
+
+		/* Run calibration only when another calibration
+		 * is not running.
+		 *
+		 * Note: This is for both full/short calibration,
+		 * if it's time for a full one, ath5k_calibrate_work will deal
+		 * with it. */
+
+		ah->ah_cal_next_short = jiffies +
+			msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_SHORT);
+		ieee80211_queue_work(ah->hw, &ah->calib_work);
 	}
 	/* we could use SWI to generate enough interrupts to meet our
 	 * calibration interval requirements, if necessary:
@@ -2149,69 +2169,110 @@
 	enum ath5k_int status;
 	unsigned int counter = 1000;
 
+
+	/*
+	 * If hw is not ready (or detached) and we get an
+	 * interrupt, or if we have no interrupts pending
+	 * (that means it's not for us) skip it.
+	 *
+	 * NOTE: Group 0/1 PCI interface registers are not
+	 * supported on WiSOCs, so we can't check for pending
+	 * interrupts (ISR belongs to another register group
+	 * so we are ok).
+	 */
 	if (unlikely(test_bit(ATH_STAT_INVALID, ah->status) ||
-		((ath5k_get_bus_type(ah) != ATH_AHB) &&
-				!ath5k_hw_is_intr_pending(ah))))
+			((ath5k_get_bus_type(ah) != ATH_AHB) &&
+			!ath5k_hw_is_intr_pending(ah))))
 		return IRQ_NONE;
 
+	/** Main loop **/
 	do {
-		ath5k_hw_get_isr(ah, &status);		/* NB: clears IRQ too */
+		ath5k_hw_get_isr(ah, &status);	/* NB: clears IRQ too */
+
 		ATH5K_DBG(ah, ATH5K_DEBUG_INTR, "status 0x%x/0x%x\n",
 				status, ah->imask);
+
+		/*
+		 * Fatal hw error -> Log and reset
+		 *
+		 * Fatal errors are unrecoverable so we have to
+		 * reset the card. These errors include bus and
+		 * dma errors.
+		 */
 		if (unlikely(status & AR5K_INT_FATAL)) {
-			/*
-			 * Fatal errors are unrecoverable.
-			 * Typically these are caused by DMA errors.
-			 */
+
 			ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
 				  "fatal int, resetting\n");
 			ieee80211_queue_work(ah->hw, &ah->reset_work);
+
+		/*
+		 * RX Overrun -> Count and reset if needed
+		 *
+		 * Receive buffers are full. Either the bus is busy or
+		 * the CPU is not fast enough to process all received
+		 * frames.
+		 */
 		} else if (unlikely(status & AR5K_INT_RXORN)) {
+
 			/*
-			 * Receive buffers are full. Either the bus is busy or
-			 * the CPU is not fast enough to process all received
-			 * frames.
 			 * Older chipsets need a reset to come out of this
 			 * condition, but we treat it as RX for newer chips.
-			 * We don't know exactly which versions need a reset -
+			 * We don't know exactly which versions need a reset
 			 * this guess is copied from the HAL.
 			 */
 			ah->stats.rxorn_intr++;
+
 			if (ah->ah_mac_srev < AR5K_SREV_AR5212) {
 				ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
 					  "rx overrun, resetting\n");
 				ieee80211_queue_work(ah->hw, &ah->reset_work);
 			} else
 				ath5k_schedule_rx(ah);
+
 		} else {
+
+			/* Software Beacon Alert -> Schedule beacon tasklet */
 			if (status & AR5K_INT_SWBA)
 				tasklet_hi_schedule(&ah->beacontq);
 
-			if (status & AR5K_INT_RXEOL) {
-				/*
-				* NB: the hardware should re-read the link when
-				*     RXE bit is written, but it doesn't work at
-				*     least on older hardware revs.
-				*/
+			/*
+			 * No more RX descriptors -> Just count
+			 *
+			 * NB: the hardware should re-read the link when
+			 *     RXE bit is written, but it doesn't work at
+			 *     least on older hardware revs.
+			 */
+			if (status & AR5K_INT_RXEOL)
 				ah->stats.rxeol_intr++;
-			}
-			if (status & AR5K_INT_TXURN) {
-				/* bump tx trigger level */
+
+
+			/* TX Underrun -> Bump tx trigger level */
+			if (status & AR5K_INT_TXURN)
 				ath5k_hw_update_tx_triglevel(ah, true);
-			}
+
+			/* RX -> Schedule rx tasklet */
 			if (status & (AR5K_INT_RXOK | AR5K_INT_RXERR))
 				ath5k_schedule_rx(ah);
-			if (status & (AR5K_INT_TXOK | AR5K_INT_TXDESC
-					| AR5K_INT_TXERR | AR5K_INT_TXEOL))
+
+			/* TX -> Schedule tx tasklet */
+			if (status & (AR5K_INT_TXOK
+					| AR5K_INT_TXDESC
+					| AR5K_INT_TXERR
+					| AR5K_INT_TXEOL))
 				ath5k_schedule_tx(ah);
-			if (status & AR5K_INT_BMISS) {
-				/* TODO */
-			}
+
+			/* Missed beacon -> TODO
+			if (status & AR5K_INT_BMISS)
+			*/
+
+			/* MIB event -> Update counters and notify ANI */
 			if (status & AR5K_INT_MIB) {
 				ah->stats.mib_intr++;
 				ath5k_hw_update_mib_counters(ah);
 				ath5k_ani_mib_intr(ah);
 			}
+
+			/* GPIO -> Notify RFKill layer */
 			if (status & AR5K_INT_GPIO)
 				tasklet_schedule(&ah->rf_kill.toggleq);
 
@@ -2222,12 +2283,19 @@
 
 	} while (ath5k_hw_is_intr_pending(ah) && --counter > 0);
 
+	/*
+	 * Until we handle rx/tx interrupts mask them on IMR
+	 *
+	 * NOTE: ah->(rx/tx)_pending are set when scheduling the tasklets
+	 * and unset after we 've handled the interrupts.
+	 */
 	if (ah->rx_pending || ah->tx_pending)
 		ath5k_set_current_imask(ah);
 
 	if (unlikely(!counter))
 		ATH5K_WARN(ah, "too many interrupts, giving up for now\n");
 
+	/* Fire up calibration poll */
 	ath5k_intr_calibration_poll(ah);
 
 	return IRQ_HANDLED;
@@ -2238,41 +2306,58 @@
  * for temperature/environment changes.
  */
 static void
-ath5k_tasklet_calibrate(unsigned long data)
+ath5k_calibrate_work(struct work_struct *work)
 {
-	struct ath5k_hw *ah = (void *)data;
+	struct ath5k_hw *ah = container_of(work, struct ath5k_hw,
+		calib_work);
 
-	/* Only full calibration for now */
-	ah->ah_cal_mask |= AR5K_CALIBRATION_FULL;
+	/* Should we run a full calibration ? */
+	if (time_is_before_eq_jiffies(ah->ah_cal_next_full)) {
+
+		ah->ah_cal_next_full = jiffies +
+			msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL);
+		ah->ah_cal_mask |= AR5K_CALIBRATION_FULL;
+
+		ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE,
+				"running full calibration\n");
+
+		if (ath5k_hw_gainf_calibrate(ah) == AR5K_RFGAIN_NEED_CHANGE) {
+			/*
+			 * Rfgain is out of bounds, reset the chip
+			 * to load new gain values.
+			 */
+			ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
+					"got new rfgain, resetting\n");
+			ieee80211_queue_work(ah->hw, &ah->reset_work);
+		}
+
+		/* TODO: On full calibration we should stop TX here,
+		 * so that it doesn't interfere (mostly due to gain_f
+		 * calibration that messes with tx packets -see phy.c).
+		 *
+		 * NOTE: Stopping the queues from above is not enough
+		 * to stop TX but saves us from disconecting (at least
+		 * we don't lose packets). */
+		ieee80211_stop_queues(ah->hw);
+	} else
+		ah->ah_cal_mask |= AR5K_CALIBRATION_SHORT;
+
 
 	ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE, "channel %u/%x\n",
 		ieee80211_frequency_to_channel(ah->curchan->center_freq),
 		ah->curchan->hw_value);
 
-	if (ath5k_hw_gainf_calibrate(ah) == AR5K_RFGAIN_NEED_CHANGE) {
-		/*
-		 * Rfgain is out of bounds, reset the chip
-		 * to load new gain values.
-		 */
-		ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "calibration, resetting\n");
-		ieee80211_queue_work(ah->hw, &ah->reset_work);
-	}
 	if (ath5k_hw_phy_calibrate(ah, ah->curchan))
 		ATH5K_ERR(ah, "calibration of channel %u failed\n",
 			ieee80211_frequency_to_channel(
 				ah->curchan->center_freq));
 
-	/* Noise floor calibration interrupts rx/tx path while I/Q calibration
-	 * doesn't.
-	 * TODO: We should stop TX here, so that it doesn't interfere.
-	 * Note that stopping the queues is not enough to stop TX! */
-	if (time_is_before_eq_jiffies(ah->ah_cal_next_nf)) {
-		ah->ah_cal_next_nf = jiffies +
-			msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_NF);
-		ath5k_hw_update_noise_floor(ah);
-	}
-
-	ah->ah_cal_mask &= ~AR5K_CALIBRATION_FULL;
+	/* Clear calibration flags */
+	if (ah->ah_cal_mask & AR5K_CALIBRATION_FULL) {
+		ieee80211_wake_queues(ah->hw);
+		ah->ah_cal_mask &= ~AR5K_CALIBRATION_FULL;
+	} else if (ah->ah_cal_mask & AR5K_CALIBRATION_SHORT)
+		ah->ah_cal_mask &= ~AR5K_CALIBRATION_SHORT;
 }
 
 
@@ -2407,8 +2492,8 @@
 	if (ret)
 		goto err_irq;
 
-	/* set up multi-rate retry capabilities */
-	if (ah->ah_version == AR5K_AR5212) {
+	/* Set up multi-rate retry capabilities */
+	if (ah->ah_capabilities.cap_has_mrr_support) {
 		hw->max_rates = 4;
 		hw->max_rate_tries = max(AR5K_INIT_RETRY_SHORT,
 					 AR5K_INIT_RETRY_LONG);
@@ -2544,15 +2629,22 @@
 	 * and then setup of the interrupt mask.
 	 */
 	ah->curchan = ah->hw->conf.channel;
-	ah->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL |
-		AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL |
-		AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_MIB;
+	ah->imask = AR5K_INT_RXOK
+		| AR5K_INT_RXERR
+		| AR5K_INT_RXEOL
+		| AR5K_INT_RXORN
+		| AR5K_INT_TXDESC
+		| AR5K_INT_TXEOL
+		| AR5K_INT_FATAL
+		| AR5K_INT_GLOBAL
+		| AR5K_INT_MIB;
 
 	ret = ath5k_reset(ah, NULL, false);
 	if (ret)
 		goto done;
 
-	ath5k_rfkill_hw_start(ah);
+	if (!ath5k_modparam_no_hw_rfkill_switch)
+		ath5k_rfkill_hw_start(ah);
 
 	/*
 	 * Reset the key cache since some parts do not reset the
@@ -2585,7 +2677,6 @@
 	ah->tx_pending = false;
 	tasklet_kill(&ah->rxtq);
 	tasklet_kill(&ah->txtq);
-	tasklet_kill(&ah->calib);
 	tasklet_kill(&ah->beacontq);
 	tasklet_kill(&ah->ani_tasklet);
 }
@@ -2637,7 +2728,8 @@
 
 	cancel_delayed_work_sync(&ah->tx_complete_work);
 
-	ath5k_rfkill_hw_stop(ah);
+	if (!ath5k_modparam_no_hw_rfkill_switch)
+		ath5k_rfkill_hw_stop(ah);
 }
 
 /*
@@ -2689,9 +2781,24 @@
 
 	ath5k_ani_init(ah, ani_mode);
 
-	ah->ah_cal_next_full = jiffies + msecs_to_jiffies(100);
-	ah->ah_cal_next_ani = jiffies;
-	ah->ah_cal_next_nf = jiffies;
+	/*
+	 * Set calibration intervals
+	 *
+	 * Note: We don't need to run calibration imediately
+	 * since some initial calibration is done on reset
+	 * even for fast channel switching. Also on scanning
+	 * this will get set again and again and it won't get
+	 * executed unless we connect somewhere and spend some
+	 * time on the channel (that's what calibration needs
+	 * anyway to be accurate).
+	 */
+	ah->ah_cal_next_full = jiffies +
+		msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL);
+	ah->ah_cal_next_ani = jiffies +
+		msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_ANI);
+	ah->ah_cal_next_short = jiffies +
+		msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_SHORT);
+
 	ewma_init(&ah->ah_beacon_rssi_avg, 1024, 8);
 
 	/* clear survey data and cycle counters */
@@ -2745,20 +2852,6 @@
 
 
 	/*
-	 * Check if the MAC has multi-rate retry support.
-	 * We do this by trying to setup a fake extended
-	 * descriptor.  MACs that don't have support will
-	 * return false w/o doing anything.  MACs that do
-	 * support it will return true w/o doing anything.
-	 */
-	ret = ath5k_hw_setup_mrr_tx_desc(ah, NULL, 0, 0, 0, 0, 0, 0);
-
-	if (ret < 0)
-		goto err;
-	if (ret > 0)
-		__set_bit(ATH_STAT_MRRETRY, ah->status);
-
-	/*
 	 * Collect the channel list.  The 802.11 layer
 	 * is responsible for filtering this list based
 	 * on settings like the phy mode and regulatory
@@ -2841,11 +2934,11 @@
 
 	tasklet_init(&ah->rxtq, ath5k_tasklet_rx, (unsigned long)ah);
 	tasklet_init(&ah->txtq, ath5k_tasklet_tx, (unsigned long)ah);
-	tasklet_init(&ah->calib, ath5k_tasklet_calibrate, (unsigned long)ah);
 	tasklet_init(&ah->beacontq, ath5k_tasklet_beacon, (unsigned long)ah);
 	tasklet_init(&ah->ani_tasklet, ath5k_tasklet_ani, (unsigned long)ah);
 
 	INIT_WORK(&ah->reset_work, ath5k_reset_work);
+	INIT_WORK(&ah->calib_work, ath5k_calibrate_work);
 	INIT_DELAYED_WORK(&ah->tx_complete_work, ath5k_tx_complete_poll_work);
 
 	ret = ath5k_hw_common(ah)->bus_ops->eeprom_read_mac(ah, mac);
diff --git a/drivers/net/wireless/ath/ath5k/caps.c b/drivers/net/wireless/ath/ath5k/caps.c
index 810fba9..994169a 100644
--- a/drivers/net/wireless/ath/ath5k/caps.c
+++ b/drivers/net/wireless/ath/ath5k/caps.c
@@ -85,12 +85,19 @@
 			caps->cap_range.range_2ghz_min = 2412;
 			caps->cap_range.range_2ghz_max = 2732;
 
-			if (AR5K_EEPROM_HDR_11B(ee_header))
-				__set_bit(AR5K_MODE_11B, caps->cap_mode);
+			/* Override 2GHz modes on SoCs that need it
+			 * NOTE: cap_needs_2GHz_ovr gets set from
+			 * ath_ahb_probe */
+			if (!caps->cap_needs_2GHz_ovr) {
+				if (AR5K_EEPROM_HDR_11B(ee_header))
+					__set_bit(AR5K_MODE_11B,
+							caps->cap_mode);
 
-			if (AR5K_EEPROM_HDR_11G(ee_header) &&
-			    ah->ah_version != AR5K_AR5211)
-				__set_bit(AR5K_MODE_11G, caps->cap_mode);
+				if (AR5K_EEPROM_HDR_11G(ee_header) &&
+				ah->ah_version != AR5K_AR5211)
+					__set_bit(AR5K_MODE_11G,
+							caps->cap_mode);
+			}
 		}
 	}
 
@@ -103,12 +110,18 @@
 	else
 		caps->cap_queues.q_tx_num = AR5K_NUM_TX_QUEUES;
 
-	/* newer hardware has PHY error counters */
+	/* Newer hardware has PHY error counters */
 	if (ah->ah_mac_srev >= AR5K_SREV_AR5213A)
 		caps->cap_has_phyerr_counters = true;
 	else
 		caps->cap_has_phyerr_counters = false;
 
+	/* MACs since AR5212 have MRR support */
+	if (ah->ah_version == AR5K_AR5212)
+		caps->cap_has_mrr_support = true;
+	else
+		caps->cap_has_mrr_support = false;
+
 	return 0;
 }
 
diff --git a/drivers/net/wireless/ath/ath5k/desc.c b/drivers/net/wireless/ath/ath5k/desc.c
index 7e88dda..f8bfa3a 100644
--- a/drivers/net/wireless/ath/ath5k/desc.c
+++ b/drivers/net/wireless/ath/ath5k/desc.c
@@ -26,20 +26,61 @@
 #include "debug.h"
 
 
+/**
+ * DOC: Hardware descriptor functions
+ *
+ * Here we handle the processing of the low-level hw descriptors
+ * that hw reads and writes via DMA for each TX and RX attempt (that means
+ * we can also have descriptors for failed TX/RX tries). We have two kind of
+ * descriptors for RX and TX, control descriptors tell the hw how to send or
+ * receive a packet where to read/write it from/to etc and status descriptors
+ * that contain information about how the packet was sent or received (errors
+ * included).
+ *
+ * Descriptor format is not exactly the same for each MAC chip version so we
+ * have function pointers on &struct ath5k_hw we initialize at runtime based on
+ * the chip used.
+ */
+
+
 /************************\
 * TX Control descriptors *
 \************************/
 
-/*
- * Initialize the 2-word tx control descriptor on 5210/5211
+/**
+ * ath5k_hw_setup_2word_tx_desc() - Initialize a 2-word tx control descriptor
+ * @ah: The &struct ath5k_hw
+ * @desc: The &struct ath5k_desc
+ * @pkt_len: Frame length in bytes
+ * @hdr_len: Header length in bytes (only used on AR5210)
+ * @padsize: Any padding we've added to the frame length
+ * @type: One of enum ath5k_pkt_type
+ * @tx_power: Tx power in 0.5dB steps
+ * @tx_rate0: HW idx for transmission rate
+ * @tx_tries0: Max number of retransmissions
+ * @key_index: Index on key table to use for encryption
+ * @antenna_mode: Which antenna to use (0 for auto)
+ * @flags: One of AR5K_TXDESC_* flags (desc.h)
+ * @rtscts_rate: HW idx for RTS/CTS transmission rate
+ * @rtscts_duration: What to put on duration field on the header of RTS/CTS
+ *
+ * Internal function to initialize a 2-Word TX control descriptor
+ * found on AR5210 and AR5211 MACs chips.
+ *
+ * Returns 0 on success or -EINVAL on false input
  */
 static int
-ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
-	unsigned int pkt_len, unsigned int hdr_len, int padsize,
-	enum ath5k_pkt_type type,
-	unsigned int tx_power, unsigned int tx_rate0, unsigned int tx_tries0,
-	unsigned int key_index, unsigned int antenna_mode, unsigned int flags,
-	unsigned int rtscts_rate, unsigned int rtscts_duration)
+ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah,
+			struct ath5k_desc *desc,
+			unsigned int pkt_len, unsigned int hdr_len,
+			int padsize,
+			enum ath5k_pkt_type type,
+			unsigned int tx_power,
+			unsigned int tx_rate0, unsigned int tx_tries0,
+			unsigned int key_index,
+			unsigned int antenna_mode,
+			unsigned int flags,
+			unsigned int rtscts_rate, unsigned int rtscts_duration)
 {
 	u32 frame_type;
 	struct ath5k_hw_2w_tx_ctl *tx_ctl;
@@ -172,17 +213,40 @@
 	return 0;
 }
 
-/*
- * Initialize the 4-word tx control descriptor on 5212
+/**
+ * ath5k_hw_setup_4word_tx_desc() - Initialize a 4-word tx control descriptor
+ * @ah: The &struct ath5k_hw
+ * @desc: The &struct ath5k_desc
+ * @pkt_len: Frame length in bytes
+ * @hdr_len: Header length in bytes (only used on AR5210)
+ * @padsize: Any padding we've added to the frame length
+ * @type: One of enum ath5k_pkt_type
+ * @tx_power: Tx power in 0.5dB steps
+ * @tx_rate0: HW idx for transmission rate
+ * @tx_tries0: Max number of retransmissions
+ * @key_index: Index on key table to use for encryption
+ * @antenna_mode: Which antenna to use (0 for auto)
+ * @flags: One of AR5K_TXDESC_* flags (desc.h)
+ * @rtscts_rate: HW idx for RTS/CTS transmission rate
+ * @rtscts_duration: What to put on duration field on the header of RTS/CTS
+ *
+ * Internal function to initialize a 4-Word TX control descriptor
+ * found on AR5212 and later MACs chips.
+ *
+ * Returns 0 on success or -EINVAL on false input
  */
-static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
-	struct ath5k_desc *desc, unsigned int pkt_len, unsigned int hdr_len,
-	int padsize,
-	enum ath5k_pkt_type type, unsigned int tx_power, unsigned int tx_rate0,
-	unsigned int tx_tries0, unsigned int key_index,
-	unsigned int antenna_mode, unsigned int flags,
-	unsigned int rtscts_rate,
-	unsigned int rtscts_duration)
+static int
+ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
+			struct ath5k_desc *desc,
+			unsigned int pkt_len, unsigned int hdr_len,
+			int padsize,
+			enum ath5k_pkt_type type,
+			unsigned int tx_power,
+			unsigned int tx_rate0, unsigned int tx_tries0,
+			unsigned int key_index,
+			unsigned int antenna_mode,
+			unsigned int flags,
+			unsigned int rtscts_rate, unsigned int rtscts_duration)
 {
 	struct ath5k_hw_4w_tx_ctl *tx_ctl;
 	unsigned int frame_len;
@@ -292,13 +356,29 @@
 	return 0;
 }
 
-/*
- * Initialize a 4-word multi rate retry tx control descriptor on 5212
+/**
+ * ath5k_hw_setup_mrr_tx_desc() - Initialize an MRR tx control descriptor
+ * @ah: The &struct ath5k_hw
+ * @desc: The &struct ath5k_desc
+ * @tx_rate1: HW idx for rate used on transmission series 1
+ * @tx_tries1: Max number of retransmissions for transmission series 1
+ * @tx_rate2: HW idx for rate used on transmission series 2
+ * @tx_tries2: Max number of retransmissions for transmission series 2
+ * @tx_rate3: HW idx for rate used on transmission series 3
+ * @tx_tries3: Max number of retransmissions for transmission series 3
+ *
+ * Multi rate retry (MRR) tx control descriptors are available only on AR5212
+ * MACs, they are part of the normal 4-word tx control descriptor (see above)
+ * but we handle them through a separate function for better abstraction.
+ *
+ * Returns 0 on success or -EINVAL on invalid input
  */
 int
-ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
-	unsigned int tx_rate1, u_int tx_tries1, u_int tx_rate2,
-	u_int tx_tries2, unsigned int tx_rate3, u_int tx_tries3)
+ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah,
+			struct ath5k_desc *desc,
+			u_int tx_rate1, u_int tx_tries1,
+			u_int tx_rate2, u_int tx_tries2,
+			u_int tx_rate3, u_int tx_tries3)
 {
 	struct ath5k_hw_4w_tx_ctl *tx_ctl;
 
@@ -350,11 +430,16 @@
 * TX Status descriptors *
 \***********************/
 
-/*
- * Process the tx status descriptor on 5210/5211
+/**
+ * ath5k_hw_proc_2word_tx_status() - Process a tx status descriptor on 5210/1
+ * @ah: The &struct ath5k_hw
+ * @desc: The &struct ath5k_desc
+ * @ts: The &struct ath5k_tx_status
  */
-static int ath5k_hw_proc_2word_tx_status(struct ath5k_hw *ah,
-		struct ath5k_desc *desc, struct ath5k_tx_status *ts)
+static int
+ath5k_hw_proc_2word_tx_status(struct ath5k_hw *ah,
+				struct ath5k_desc *desc,
+				struct ath5k_tx_status *ts)
 {
 	struct ath5k_hw_2w_tx_ctl *tx_ctl;
 	struct ath5k_hw_tx_status *tx_status;
@@ -399,11 +484,16 @@
 	return 0;
 }
 
-/*
- * Process a tx status descriptor on 5212
+/**
+ * ath5k_hw_proc_4word_tx_status() - Process a tx status descriptor on 5212
+ * @ah: The &struct ath5k_hw
+ * @desc: The &struct ath5k_desc
+ * @ts: The &struct ath5k_tx_status
  */
-static int ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah,
-		struct ath5k_desc *desc, struct ath5k_tx_status *ts)
+static int
+ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah,
+				struct ath5k_desc *desc,
+				struct ath5k_tx_status *ts)
 {
 	struct ath5k_hw_4w_tx_ctl *tx_ctl;
 	struct ath5k_hw_tx_status *tx_status;
@@ -460,11 +550,17 @@
 * RX Descriptors *
 \****************/
 
-/*
- * Initialize an rx control descriptor
+/**
+ * ath5k_hw_setup_rx_desc() - Initialize an rx control descriptor
+ * @ah: The &struct ath5k_hw
+ * @desc: The &struct ath5k_desc
+ * @size: RX buffer length in bytes
+ * @flags: One of AR5K_RXDESC_* flags
  */
-int ath5k_hw_setup_rx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
-			   u32 size, unsigned int flags)
+int
+ath5k_hw_setup_rx_desc(struct ath5k_hw *ah,
+			struct ath5k_desc *desc,
+			u32 size, unsigned int flags)
 {
 	struct ath5k_hw_rx_ctl *rx_ctl;
 
@@ -491,11 +587,22 @@
 	return 0;
 }
 
-/*
- * Process the rx status descriptor on 5210/5211
+/**
+ * ath5k_hw_proc_5210_rx_status() - Process the rx status descriptor on 5210/1
+ * @ah: The &struct ath5k_hw
+ * @desc: The &struct ath5k_desc
+ * @rs: The &struct ath5k_rx_status
+ *
+ * Internal function used to process an RX status descriptor
+ * on AR5210/5211 MAC.
+ *
+ * Returns 0 on success or -EINPROGRESS in case we haven't received the who;e
+ * frame yet.
  */
-static int ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah,
-		struct ath5k_desc *desc, struct ath5k_rx_status *rs)
+static int
+ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah,
+				struct ath5k_desc *desc,
+				struct ath5k_rx_status *rs)
 {
 	struct ath5k_hw_rx_status *rx_status;
 
@@ -574,12 +681,22 @@
 	return 0;
 }
 
-/*
- * Process the rx status descriptor on 5212
+/**
+ * ath5k_hw_proc_5212_rx_status() - Process the rx status descriptor on 5212
+ * @ah: The &struct ath5k_hw
+ * @desc: The &struct ath5k_desc
+ * @rs: The &struct ath5k_rx_status
+ *
+ * Internal function used to process an RX status descriptor
+ * on AR5212 and later MAC.
+ *
+ * Returns 0 on success or -EINPROGRESS in case we haven't received the who;e
+ * frame yet.
  */
-static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
-					struct ath5k_desc *desc,
-					struct ath5k_rx_status *rs)
+static int
+ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
+				struct ath5k_desc *desc,
+				struct ath5k_rx_status *rs)
 {
 	struct ath5k_hw_rx_status *rx_status;
 	u32 rxstat0, rxstat1;
@@ -646,10 +763,16 @@
 * Attach *
 \********/
 
-/*
- * Init function pointers inside ath5k_hw struct
+/**
+ * ath5k_hw_init_desc_functions() - Init function pointers inside ah
+ * @ah: The &struct ath5k_hw
+ *
+ * Maps the internal descriptor functions to the function pointers on ah, used
+ * from above. This is used as an abstraction layer to handle the various chips
+ * the same way.
  */
-int ath5k_hw_init_desc_functions(struct ath5k_hw *ah)
+int
+ath5k_hw_init_desc_functions(struct ath5k_hw *ah)
 {
 	if (ah->ah_version == AR5K_AR5212) {
 		ah->ah_setup_tx_desc = ath5k_hw_setup_4word_tx_desc;
diff --git a/drivers/net/wireless/ath/ath5k/desc.h b/drivers/net/wireless/ath/ath5k/desc.h
index cfd529b5..8d6c01a 100644
--- a/drivers/net/wireless/ath/ath5k/desc.h
+++ b/drivers/net/wireless/ath/ath5k/desc.h
@@ -20,25 +20,30 @@
  * RX/TX descriptor structures
  */
 
-/*
- * Common hardware RX control descriptor
+/**
+ * struct ath5k_hw_rx_ctl - Common hardware RX control descriptor
+ * @rx_control_0: RX control word 0
+ * @rx_control_1: RX control word 1
  */
 struct ath5k_hw_rx_ctl {
-	u32	rx_control_0; /* RX control word 0 */
-	u32	rx_control_1; /* RX control word 1 */
+	u32	rx_control_0;
+	u32	rx_control_1;
 } __packed __aligned(4);
 
 /* RX control word 1 fields/flags */
 #define AR5K_DESC_RX_CTL1_BUF_LEN		0x00000fff /* data buffer length */
 #define AR5K_DESC_RX_CTL1_INTREQ		0x00002000 /* RX interrupt request */
 
-/*
- * Common hardware RX status descriptor
+/**
+ * struct ath5k_hw_rx_status - Common hardware RX status descriptor
+ * @rx_status_0: RX status word 0
+ * @rx_status_1: RX status word 1
+ *
  * 5210, 5211 and 5212 differ only in the fields and flags defined below
  */
 struct ath5k_hw_rx_status {
-	u32	rx_status_0; /* RX status word 0 */
-	u32	rx_status_1; /* RX status word 1 */
+	u32	rx_status_0;
+	u32	rx_status_1;
 } __packed __aligned(4);
 
 /* 5210/5211 */
@@ -98,17 +103,36 @@
 
 /**
  * enum ath5k_phy_error_code - PHY Error codes
+ * @AR5K_RX_PHY_ERROR_UNDERRUN: Transmit underrun, [5210] No error
+ * @AR5K_RX_PHY_ERROR_TIMING: Timing error
+ * @AR5K_RX_PHY_ERROR_PARITY: Illegal parity
+ * @AR5K_RX_PHY_ERROR_RATE: Illegal rate
+ * @AR5K_RX_PHY_ERROR_LENGTH: Illegal length
+ * @AR5K_RX_PHY_ERROR_RADAR: Radar detect, [5210] 64 QAM rate
+ * @AR5K_RX_PHY_ERROR_SERVICE: Illegal service
+ * @AR5K_RX_PHY_ERROR_TOR: Transmit override receive
+ * @AR5K_RX_PHY_ERROR_OFDM_TIMING: OFDM Timing error [5212+]
+ * @AR5K_RX_PHY_ERROR_OFDM_SIGNAL_PARITY: OFDM Signal parity error [5212+]
+ * @AR5K_RX_PHY_ERROR_OFDM_RATE_ILLEGAL: OFDM Illegal rate [5212+]
+ * @AR5K_RX_PHY_ERROR_OFDM_LENGTH_ILLEGAL: OFDM Illegal length [5212+]
+ * @AR5K_RX_PHY_ERROR_OFDM_POWER_DROP: OFDM Power drop [5212+]
+ * @AR5K_RX_PHY_ERROR_OFDM_SERVICE: OFDM Service (?) [5212+]
+ * @AR5K_RX_PHY_ERROR_OFDM_RESTART: OFDM Restart (?) [5212+]
+ * @AR5K_RX_PHY_ERROR_CCK_TIMING: CCK Timing error [5212+]
+ * @AR5K_RX_PHY_ERROR_CCK_HEADER_CRC: Header CRC error [5212+]
+ * @AR5K_RX_PHY_ERROR_CCK_RATE_ILLEGAL: Illegal rate [5212+]
+ * @AR5K_RX_PHY_ERROR_CCK_SERVICE: CCK Service (?) [5212+]
+ * @AR5K_RX_PHY_ERROR_CCK_RESTART: CCK Restart (?) [5212+]
  */
 enum ath5k_phy_error_code {
-	AR5K_RX_PHY_ERROR_UNDERRUN		= 0,	/* Transmit underrun, [5210] No error */
-	AR5K_RX_PHY_ERROR_TIMING		= 1,	/* Timing error */
-	AR5K_RX_PHY_ERROR_PARITY		= 2,	/* Illegal parity */
-	AR5K_RX_PHY_ERROR_RATE			= 3,	/* Illegal rate */
-	AR5K_RX_PHY_ERROR_LENGTH		= 4,	/* Illegal length */
-	AR5K_RX_PHY_ERROR_RADAR			= 5,	/* Radar detect, [5210] 64 QAM rate */
-	AR5K_RX_PHY_ERROR_SERVICE		= 6,	/* Illegal service */
-	AR5K_RX_PHY_ERROR_TOR			= 7,	/* Transmit override receive */
-	/* these are specific to the 5212 */
+	AR5K_RX_PHY_ERROR_UNDERRUN		= 0,
+	AR5K_RX_PHY_ERROR_TIMING		= 1,
+	AR5K_RX_PHY_ERROR_PARITY		= 2,
+	AR5K_RX_PHY_ERROR_RATE			= 3,
+	AR5K_RX_PHY_ERROR_LENGTH		= 4,
+	AR5K_RX_PHY_ERROR_RADAR			= 5,
+	AR5K_RX_PHY_ERROR_SERVICE		= 6,
+	AR5K_RX_PHY_ERROR_TOR			= 7,
 	AR5K_RX_PHY_ERROR_OFDM_TIMING		= 17,
 	AR5K_RX_PHY_ERROR_OFDM_SIGNAL_PARITY	= 18,
 	AR5K_RX_PHY_ERROR_OFDM_RATE_ILLEGAL	= 19,
@@ -123,12 +147,14 @@
 	AR5K_RX_PHY_ERROR_CCK_RESTART		= 31,
 };
 
-/*
- * 5210/5211 hardware 2-word TX control descriptor
+/**
+ * struct ath5k_hw_2w_tx_ctl  - 5210/5211 hardware 2-word TX control descriptor
+ * @tx_control_0: TX control word 0
+ * @tx_control_1: TX control word 1
  */
 struct ath5k_hw_2w_tx_ctl {
-	u32	tx_control_0; /* TX control word 0 */
-	u32	tx_control_1; /* TX control word 1 */
+	u32	tx_control_0;
+	u32	tx_control_1;
 } __packed __aligned(4);
 
 /* TX control word 0 fields/flags */
@@ -177,14 +203,18 @@
 #define AR5K_AR5210_TX_DESC_FRAME_TYPE_PIFS	4
 #define AR5K_AR5211_TX_DESC_FRAME_TYPE_PRESP	4
 
-/*
- * 5212 hardware 4-word TX control descriptor
+/**
+ * struct ath5k_hw_4w_tx_ctl - 5212 hardware 4-word TX control descriptor
+ * @tx_control_0: TX control word 0
+ * @tx_control_1: TX control word 1
+ * @tx_control_2: TX control word 2
+ * @tx_control_3: TX control word 3
  */
 struct ath5k_hw_4w_tx_ctl {
-	u32	tx_control_0; /* TX control word 0 */
-	u32	tx_control_1; /* TX control word 1 */
-	u32	tx_control_2; /* TX control word 2 */
-	u32	tx_control_3; /* TX control word 3 */
+	u32	tx_control_0;
+	u32	tx_control_1;
+	u32	tx_control_2;
+	u32	tx_control_3;
 } __packed __aligned(4);
 
 /* TX control word 0 fields/flags */
@@ -238,12 +268,14 @@
 #define AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE	0x01f00000 /* RTS or CTS rate */
 #define AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE_S	20
 
-/*
- * Common TX status descriptor
+/**
+ * struct ath5k_hw_tx_status - Common TX status descriptor
+ * @tx_status_0: TX status word 0
+ * @tx_status_1: TX status word 1
  */
 struct ath5k_hw_tx_status {
-	u32	tx_status_0; /* TX status word 0 */
-	u32	tx_status_1; /* TX status word 1 */
+	u32	tx_status_0;
+	u32	tx_status_1;
 } __packed __aligned(4);
 
 /* TX status word 0 fields/flags */
@@ -276,37 +308,47 @@
 #define AR5K_DESC_TX_STATUS1_COMP_SUCCESS_5212	0x00800000 /* [5212] compression status */
 #define AR5K_DESC_TX_STATUS1_XMIT_ANTENNA_5212	0x01000000 /* [5212] transmit antenna */
 
-/*
- * 5210/5211 hardware TX descriptor
+/**
+ * struct ath5k_hw_5210_tx_desc - 5210/5211 hardware TX descriptor
+ * @tx_ctl: The &struct ath5k_hw_2w_tx_ctl
+ * @tx_stat: The &struct ath5k_hw_tx_status
  */
 struct ath5k_hw_5210_tx_desc {
 	struct ath5k_hw_2w_tx_ctl	tx_ctl;
 	struct ath5k_hw_tx_status	tx_stat;
 } __packed __aligned(4);
 
-/*
- * 5212 hardware TX descriptor
+/**
+ * struct ath5k_hw_5212_tx_desc - 5212 hardware TX descriptor
+ * @tx_ctl: The &struct ath5k_hw_4w_tx_ctl
+ * @tx_stat: The &struct ath5k_hw_tx_status
  */
 struct ath5k_hw_5212_tx_desc {
 	struct ath5k_hw_4w_tx_ctl	tx_ctl;
 	struct ath5k_hw_tx_status	tx_stat;
 } __packed __aligned(4);
 
-/*
- * Common hardware RX descriptor
+/**
+ * struct ath5k_hw_all_rx_desc - Common hardware RX descriptor
+ * @rx_ctl: The &struct ath5k_hw_rx_ctl
+ * @rx_stat: The &struct ath5k_hw_rx_status
  */
 struct ath5k_hw_all_rx_desc {
 	struct ath5k_hw_rx_ctl		rx_ctl;
 	struct ath5k_hw_rx_status	rx_stat;
 } __packed __aligned(4);
 
-/*
- * Atheros hardware DMA descriptor
+/**
+ * struct ath5k_desc - Atheros hardware DMA descriptor
+ * @ds_link: Physical address of the next descriptor
+ * @ds_data: Physical address of data buffer (skb)
+ * @ud: Union containing hw_5xxx_tx_desc structs and hw_all_rx_desc
+ *
  * This is read and written to by the hardware
  */
 struct ath5k_desc {
-	u32	ds_link;	/* physical address of the next descriptor */
-	u32	ds_data;	/* physical address of data buffer (skb) */
+	u32	ds_link;
+	u32	ds_data;
 
 	union {
 		struct ath5k_hw_5210_tx_desc	ds_tx5210;
diff --git a/drivers/net/wireless/ath/ath5k/dma.c b/drivers/net/wireless/ath/ath5k/dma.c
index 2481f9c..5cc9aa8 100644
--- a/drivers/net/wireless/ath/ath5k/dma.c
+++ b/drivers/net/wireless/ath/ath5k/dma.c
@@ -20,16 +20,13 @@
 * DMA and interrupt masking functions *
 \*************************************/
 
-/*
- * dma.c - DMA and interrupt masking functions
+/**
+ * DOC: DMA and interrupt masking functions
  *
  * Here we setup descriptor pointers (rxdp/txdp) start/stop dma engine and
  * handle queue setup for 5210 chipset (rest are handled on qcu.c).
  * Also we setup interrupt mask register (IMR) and read the various interrupt
  * status registers (ISR).
- *
- * TODO: Handle SISR on 5211+ and introduce a function to return the queue
- * number that resulted the interrupt.
  */
 
 #include "ath5k.h"
@@ -42,22 +39,22 @@
 \*********/
 
 /**
- * ath5k_hw_start_rx_dma - Start DMA receive
- *
+ * ath5k_hw_start_rx_dma() - Start DMA receive
  * @ah:	The &struct ath5k_hw
  */
-void ath5k_hw_start_rx_dma(struct ath5k_hw *ah)
+void
+ath5k_hw_start_rx_dma(struct ath5k_hw *ah)
 {
 	ath5k_hw_reg_write(ah, AR5K_CR_RXE, AR5K_CR);
 	ath5k_hw_reg_read(ah, AR5K_CR);
 }
 
 /**
- * ath5k_hw_stop_rx_dma - Stop DMA receive
- *
+ * ath5k_hw_stop_rx_dma() - Stop DMA receive
  * @ah:	The &struct ath5k_hw
  */
-static int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah)
+static int
+ath5k_hw_stop_rx_dma(struct ath5k_hw *ah)
 {
 	unsigned int i;
 
@@ -79,24 +76,24 @@
 }
 
 /**
- * ath5k_hw_get_rxdp - Get RX Descriptor's address
- *
+ * ath5k_hw_get_rxdp() - Get RX Descriptor's address
  * @ah: The &struct ath5k_hw
  */
-u32 ath5k_hw_get_rxdp(struct ath5k_hw *ah)
+u32
+ath5k_hw_get_rxdp(struct ath5k_hw *ah)
 {
 	return ath5k_hw_reg_read(ah, AR5K_RXDP);
 }
 
 /**
- * ath5k_hw_set_rxdp - Set RX Descriptor's address
- *
+ * ath5k_hw_set_rxdp() - Set RX Descriptor's address
  * @ah: The &struct ath5k_hw
  * @phys_addr: RX descriptor address
  *
  * Returns -EIO if rx is active
  */
-int ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr)
+int
+ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr)
 {
 	if (ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_CR_RXE) {
 		ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
@@ -114,8 +111,7 @@
 \**********/
 
 /**
- * ath5k_hw_start_tx_dma - Start DMA transmit for a specific queue
- *
+ * ath5k_hw_start_tx_dma() - Start DMA transmit for a specific queue
  * @ah: The &struct ath5k_hw
  * @queue: The hw queue number
  *
@@ -128,7 +124,8 @@
  * NOTE: Must be called after setting up tx control descriptor for that
  * queue (see below).
  */
-int ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue)
+int
+ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue)
 {
 	u32 tx_queue;
 
@@ -177,17 +174,16 @@
 }
 
 /**
- * ath5k_hw_stop_tx_dma - Stop DMA transmit on a specific queue
- *
+ * ath5k_hw_stop_tx_dma() - Stop DMA transmit on a specific queue
  * @ah: The &struct ath5k_hw
  * @queue: The hw queue number
  *
  * Stop DMA transmit on a specific hw queue and drain queue so we don't
  * have any pending frames. Returns -EBUSY if we still have pending frames,
  * -EINVAL if queue number is out of range or inactive.
- *
  */
-static int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
+static int
+ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
 {
 	unsigned int i = 40;
 	u32 tx_queue, pending;
@@ -320,14 +316,14 @@
 }
 
 /**
- * ath5k_hw_stop_beacon_queue - Stop beacon queue
- *
- * @ah The &struct ath5k_hw
- * @queue The queue number
+ * ath5k_hw_stop_beacon_queue() - Stop beacon queue
+ * @ah: The &struct ath5k_hw
+ * @queue: The queue number
  *
  * Returns -EIO if queue didn't stop
  */
-int ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue)
+int
+ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue)
 {
 	int ret;
 	ret = ath5k_hw_stop_tx_dma(ah, queue);
@@ -340,8 +336,7 @@
 }
 
 /**
- * ath5k_hw_get_txdp - Get TX Descriptor's address for a specific queue
- *
+ * ath5k_hw_get_txdp() - Get TX Descriptor's address for a specific queue
  * @ah: The &struct ath5k_hw
  * @queue: The hw queue number
  *
@@ -352,7 +347,8 @@
  *
  * XXX: Is TXDP read and clear ?
  */
-u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue)
+u32
+ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue)
 {
 	u16 tx_reg;
 
@@ -382,10 +378,10 @@
 }
 
 /**
- * ath5k_hw_set_txdp - Set TX Descriptor's address for a specific queue
- *
+ * ath5k_hw_set_txdp() - Set TX Descriptor's address for a specific queue
  * @ah: The &struct ath5k_hw
  * @queue: The hw queue number
+ * @phys_addr: The physical address
  *
  * Set TX descriptor's address for a specific queue. For 5210 we ignore
  * the queue number and we use tx queue type since we only have 2 queues
@@ -394,7 +390,8 @@
  * Returns -EINVAL if queue type is invalid for 5210 and -EIO if queue is still
  * active.
  */
-int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr)
+int
+ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr)
 {
 	u16 tx_reg;
 
@@ -435,8 +432,7 @@
 }
 
 /**
- * ath5k_hw_update_tx_triglevel - Update tx trigger level
- *
+ * ath5k_hw_update_tx_triglevel() - Update tx trigger level
  * @ah: The &struct ath5k_hw
  * @increase: Flag to force increase of trigger level
  *
@@ -444,15 +440,15 @@
  * buffer (aka FIFO threshold) that is used to indicate when PCU flushes
  * the buffer and transmits its data. Lowering this results sending small
  * frames more quickly but can lead to tx underruns, raising it a lot can
- * result other problems (i think bmiss is related). Right now we start with
- * the lowest possible (64Bytes) and if we get tx underrun we increase it using
- * the increase flag. Returns -EIO if we have reached maximum/minimum.
+ * result other problems. Right now we start with the lowest possible
+ * (64Bytes) and if we get tx underrun we increase it using the increase
+ * flag. Returns -EIO if we have reached maximum/minimum.
  *
  * XXX: Link this with tx DMA size ?
- * XXX: Use it to save interrupts ?
- * TODO: Needs testing, i think it's related to bmiss...
+ * XXX2: Use it to save interrupts ?
  */
-int ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase)
+int
+ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase)
 {
 	u32 trigger_level, imr;
 	int ret = -EIO;
@@ -498,21 +494,20 @@
 \*******************/
 
 /**
- * ath5k_hw_is_intr_pending - Check if we have pending interrupts
- *
+ * ath5k_hw_is_intr_pending() - Check if we have pending interrupts
  * @ah: The &struct ath5k_hw
  *
  * Check if we have pending interrupts to process. Returns 1 if we
  * have pending interrupts and 0 if we haven't.
  */
-bool ath5k_hw_is_intr_pending(struct ath5k_hw *ah)
+bool
+ath5k_hw_is_intr_pending(struct ath5k_hw *ah)
 {
 	return ath5k_hw_reg_read(ah, AR5K_INTPEND) == 1 ? 1 : 0;
 }
 
 /**
- * ath5k_hw_get_isr - Get interrupt status
- *
+ * ath5k_hw_get_isr() - Get interrupt status
  * @ah: The @struct ath5k_hw
  * @interrupt_mask: Driver's interrupt mask used to filter out
  * interrupts in sw.
@@ -523,62 +518,162 @@
  * being mapped on some standard non hw-specific positions
  * (check out &ath5k_int).
  *
- * NOTE: We use read-and-clear register, so after this function is called ISR
- * is zeroed.
+ * NOTE: We do write-to-clear, so the active PISR/SISR bits at the time this
+ * function gets called are cleared on return.
  */
-int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
+int
+ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
 {
-	u32 data;
+	u32 data = 0;
 
 	/*
-	 * Read interrupt status from the Interrupt Status register
-	 * on 5210
+	 * Read interrupt status from Primary Interrupt
+	 * Register.
+	 *
+	 * Note: PISR/SISR Not available on 5210
 	 */
 	if (ah->ah_version == AR5K_AR5210) {
-		data = ath5k_hw_reg_read(ah, AR5K_ISR);
-		if (unlikely(data == AR5K_INT_NOCARD)) {
-			*interrupt_mask = data;
+		u32 isr = 0;
+		isr = ath5k_hw_reg_read(ah, AR5K_ISR);
+		if (unlikely(isr == AR5K_INT_NOCARD)) {
+			*interrupt_mask = isr;
 			return -ENODEV;
 		}
-	} else {
+
 		/*
-		 * Read interrupt status from Interrupt
-		 * Status Register shadow copy (Read And Clear)
-		 *
-		 * Note: PISR/SISR Not available on 5210
+		 * Filter out the non-common bits from the interrupt
+		 * status.
 		 */
-		data = ath5k_hw_reg_read(ah, AR5K_RAC_PISR);
-		if (unlikely(data == AR5K_INT_NOCARD)) {
-			*interrupt_mask = data;
+		*interrupt_mask = (isr & AR5K_INT_COMMON) & ah->ah_imr;
+
+		/* Hanlde INT_FATAL */
+		if (unlikely(isr & (AR5K_ISR_SSERR | AR5K_ISR_MCABT
+						| AR5K_ISR_DPERR)))
+			*interrupt_mask |= AR5K_INT_FATAL;
+
+		/*
+		 * XXX: BMISS interrupts may occur after association.
+		 * I found this on 5210 code but it needs testing. If this is
+		 * true we should disable them before assoc and re-enable them
+		 * after a successful assoc + some jiffies.
+			interrupt_mask &= ~AR5K_INT_BMISS;
+		 */
+
+		data = isr;
+	} else {
+		u32 pisr = 0;
+		u32 pisr_clear = 0;
+		u32 sisr0 = 0;
+		u32 sisr1 = 0;
+		u32 sisr2 = 0;
+		u32 sisr3 = 0;
+		u32 sisr4 = 0;
+
+		/* Read PISR and SISRs... */
+		pisr = ath5k_hw_reg_read(ah, AR5K_PISR);
+		if (unlikely(pisr == AR5K_INT_NOCARD)) {
+			*interrupt_mask = pisr;
 			return -ENODEV;
 		}
-	}
 
-	/*
-	 * Get abstract interrupt mask (driver-compatible)
-	 */
-	*interrupt_mask = (data & AR5K_INT_COMMON) & ah->ah_imr;
+		sisr0 = ath5k_hw_reg_read(ah, AR5K_SISR0);
+		sisr1 = ath5k_hw_reg_read(ah, AR5K_SISR1);
+		sisr2 = ath5k_hw_reg_read(ah, AR5K_SISR2);
+		sisr3 = ath5k_hw_reg_read(ah, AR5K_SISR3);
+		sisr4 = ath5k_hw_reg_read(ah, AR5K_SISR4);
 
-	if (ah->ah_version != AR5K_AR5210) {
-		u32 sisr2 = ath5k_hw_reg_read(ah, AR5K_RAC_SISR2);
+		/*
+		 * PISR holds the logical OR of interrupt bits
+		 * from SISR registers:
+		 *
+		 * TXOK and TXDESC  -> Logical OR of TXOK and TXDESC
+		 *			per-queue bits on SISR0
+		 *
+		 * TXERR and TXEOL -> Logical OR of TXERR and TXEOL
+		 *			per-queue bits on SISR1
+		 *
+		 * TXURN -> Logical OR of TXURN per-queue bits on SISR2
+		 *
+		 * HIUERR -> Logical OR of MCABT, SSERR and DPER bits on SISR2
+		 *
+		 * BCNMISC -> Logical OR of TIM, CAB_END, DTIM_SYNC
+		 *		BCN_TIMEOUT, CAB_TIMEOUT and DTIM
+		 *		(and TSFOOR ?) bits on SISR2
+		 *
+		 * QCBRORN and QCBRURN -> Logical OR of QCBRORN and
+		 *			QCBRURN per-queue bits on SISR3
+		 * QTRIG -> Logical OR of QTRIG per-queue bits on SISR4
+		 *
+		 * If we clean these bits on PISR we 'll also clear all
+		 * related bits from SISRs, e.g. if we write the TXOK bit on
+		 * PISR we 'll clean all TXOK bits from SISR0 so if a new TXOK
+		 * interrupt got fired for another queue while we were reading
+		 * the interrupt registers and we write back the TXOK bit on
+		 * PISR we 'll lose it. So make sure that we don't write back
+		 * on PISR any bits that come from SISRs. Clearing them from
+		 * SISRs will also clear PISR so no need to worry here.
+		 */
 
-		/*HIU = Host Interface Unit (PCI etc)*/
-		if (unlikely(data & (AR5K_ISR_HIUERR)))
-			*interrupt_mask |= AR5K_INT_FATAL;
+		pisr_clear = pisr & ~AR5K_ISR_BITS_FROM_SISRS;
 
-		/*Beacon Not Ready*/
-		if (unlikely(data & (AR5K_ISR_BNR)))
-			*interrupt_mask |= AR5K_INT_BNR;
+		/*
+		 * Write to clear them...
+		 * Note: This means that each bit we write back
+		 * to the registers will get cleared, leaving the
+		 * rest unaffected. So this won't affect new interrupts
+		 * we didn't catch while reading/processing, we 'll get
+		 * them next time get_isr gets called.
+		 */
+		ath5k_hw_reg_write(ah, sisr0, AR5K_SISR0);
+		ath5k_hw_reg_write(ah, sisr1, AR5K_SISR1);
+		ath5k_hw_reg_write(ah, sisr2, AR5K_SISR2);
+		ath5k_hw_reg_write(ah, sisr3, AR5K_SISR3);
+		ath5k_hw_reg_write(ah, sisr4, AR5K_SISR4);
+		ath5k_hw_reg_write(ah, pisr_clear, AR5K_PISR);
+		/* Flush previous write */
+		ath5k_hw_reg_read(ah, AR5K_PISR);
 
-		if (unlikely(sisr2 & (AR5K_SISR2_SSERR |
-					AR5K_SISR2_DPERR |
-					AR5K_SISR2_MCABT)))
-			*interrupt_mask |= AR5K_INT_FATAL;
+		/*
+		 * Filter out the non-common bits from the interrupt
+		 * status.
+		 */
+		*interrupt_mask = (pisr & AR5K_INT_COMMON) & ah->ah_imr;
 
-		if (data & AR5K_ISR_TIM)
+
+		/* We treat TXOK,TXDESC, TXERR and TXEOL
+		 * the same way (schedule the tx tasklet)
+		 * so we track them all together per queue */
+		if (pisr & AR5K_ISR_TXOK)
+			ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr0,
+						AR5K_SISR0_QCU_TXOK);
+
+		if (pisr & AR5K_ISR_TXDESC)
+			ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr0,
+						AR5K_SISR0_QCU_TXDESC);
+
+		if (pisr & AR5K_ISR_TXERR)
+			ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr1,
+						AR5K_SISR1_QCU_TXERR);
+
+		if (pisr & AR5K_ISR_TXEOL)
+			ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr1,
+						AR5K_SISR1_QCU_TXEOL);
+
+		/* Currently this is not much usefull since we treat
+		 * all queues the same way if we get a TXURN (update
+		 * tx trigger level) but we might need it later on*/
+		if (pisr & AR5K_ISR_TXURN)
+			ah->ah_txq_isr_txurn |= AR5K_REG_MS(sisr2,
+						AR5K_SISR2_QCU_TXURN);
+
+		/* Misc Beacon related interrupts */
+
+		/* For AR5211 */
+		if (pisr & AR5K_ISR_TIM)
 			*interrupt_mask |= AR5K_INT_TIM;
 
-		if (data & AR5K_ISR_BCNMISC) {
+		/* For AR5212+ */
+		if (pisr & AR5K_ISR_BCNMISC) {
 			if (sisr2 & AR5K_SISR2_TIM)
 				*interrupt_mask |= AR5K_INT_TIM;
 			if (sisr2 & AR5K_SISR2_DTIM)
@@ -591,63 +686,39 @@
 				*interrupt_mask |= AR5K_INT_CAB_TIMEOUT;
 		}
 
-		if (data & AR5K_ISR_RXDOPPLER)
-			*interrupt_mask |= AR5K_INT_RX_DOPPLER;
-		if (data & AR5K_ISR_QCBRORN) {
-			*interrupt_mask |= AR5K_INT_QCBRORN;
-			ah->ah_txq_isr |= AR5K_REG_MS(
-					ath5k_hw_reg_read(ah, AR5K_RAC_SISR3),
-					AR5K_SISR3_QCBRORN);
-		}
-		if (data & AR5K_ISR_QCBRURN) {
-			*interrupt_mask |= AR5K_INT_QCBRURN;
-			ah->ah_txq_isr |= AR5K_REG_MS(
-					ath5k_hw_reg_read(ah, AR5K_RAC_SISR3),
-					AR5K_SISR3_QCBRURN);
-		}
-		if (data & AR5K_ISR_QTRIG) {
-			*interrupt_mask |= AR5K_INT_QTRIG;
-			ah->ah_txq_isr |= AR5K_REG_MS(
-					ath5k_hw_reg_read(ah, AR5K_RAC_SISR4),
-					AR5K_SISR4_QTRIG);
-		}
+		/* Below interrupts are unlikely to happen */
 
-		if (data & AR5K_ISR_TXOK)
-			ah->ah_txq_isr |= AR5K_REG_MS(
-					ath5k_hw_reg_read(ah, AR5K_RAC_SISR0),
-					AR5K_SISR0_QCU_TXOK);
-
-		if (data & AR5K_ISR_TXDESC)
-			ah->ah_txq_isr |= AR5K_REG_MS(
-					ath5k_hw_reg_read(ah, AR5K_RAC_SISR0),
-					AR5K_SISR0_QCU_TXDESC);
-
-		if (data & AR5K_ISR_TXERR)
-			ah->ah_txq_isr |= AR5K_REG_MS(
-					ath5k_hw_reg_read(ah, AR5K_RAC_SISR1),
-					AR5K_SISR1_QCU_TXERR);
-
-		if (data & AR5K_ISR_TXEOL)
-			ah->ah_txq_isr |= AR5K_REG_MS(
-					ath5k_hw_reg_read(ah, AR5K_RAC_SISR1),
-					AR5K_SISR1_QCU_TXEOL);
-
-		if (data & AR5K_ISR_TXURN)
-			ah->ah_txq_isr |= AR5K_REG_MS(
-					ath5k_hw_reg_read(ah, AR5K_RAC_SISR2),
-					AR5K_SISR2_QCU_TXURN);
-	} else {
-		if (unlikely(data & (AR5K_ISR_SSERR | AR5K_ISR_MCABT
-				| AR5K_ISR_HIUERR | AR5K_ISR_DPERR)))
+		/* HIU = Host Interface Unit (PCI etc)
+		 * Can be one of MCABT, SSERR, DPERR from SISR2 */
+		if (unlikely(pisr & (AR5K_ISR_HIUERR)))
 			*interrupt_mask |= AR5K_INT_FATAL;
 
-		/*
-		 * XXX: BMISS interrupts may occur after association.
-		 * I found this on 5210 code but it needs testing. If this is
-		 * true we should disable them before assoc and re-enable them
-		 * after a successful assoc + some jiffies.
-			interrupt_mask &= ~AR5K_INT_BMISS;
-		 */
+		/*Beacon Not Ready*/
+		if (unlikely(pisr & (AR5K_ISR_BNR)))
+			*interrupt_mask |= AR5K_INT_BNR;
+
+		/* A queue got CBR overrun */
+		if (unlikely(pisr & (AR5K_ISR_QCBRORN))) {
+			*interrupt_mask |= AR5K_INT_QCBRORN;
+			ah->ah_txq_isr_qcborn |= AR5K_REG_MS(sisr3,
+						AR5K_SISR3_QCBRORN);
+		}
+
+		/* A queue got CBR underrun */
+		if (unlikely(pisr & (AR5K_ISR_QCBRURN))) {
+			*interrupt_mask |= AR5K_INT_QCBRURN;
+			ah->ah_txq_isr_qcburn |= AR5K_REG_MS(sisr3,
+						AR5K_SISR3_QCBRURN);
+		}
+
+		/* A queue got triggered */
+		if (unlikely(pisr & (AR5K_ISR_QTRIG))) {
+			*interrupt_mask |= AR5K_INT_QTRIG;
+			ah->ah_txq_isr_qtrig |= AR5K_REG_MS(sisr4,
+						AR5K_SISR4_QTRIG);
+		}
+
+		data = pisr;
 	}
 
 	/*
@@ -661,8 +732,7 @@
 }
 
 /**
- * ath5k_hw_set_imr - Set interrupt mask
- *
+ * ath5k_hw_set_imr() - Set interrupt mask
  * @ah: The &struct ath5k_hw
  * @new_mask: The new interrupt mask to be set
  *
@@ -670,7 +740,8 @@
  * ath5k_int bits to hw-specific bits to remove abstraction and writing
  * Interrupt Mask Register.
  */
-enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
+enum ath5k_int
+ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
 {
 	enum ath5k_int old_mask, int_mask;
 
@@ -697,16 +768,14 @@
 		u32 simr2 = ath5k_hw_reg_read(ah, AR5K_SIMR2)
 				& AR5K_SIMR2_QCU_TXURN;
 
+		/* Fatal interrupt abstraction for 5211+ */
 		if (new_mask & AR5K_INT_FATAL) {
 			int_mask |= AR5K_IMR_HIUERR;
 			simr2 |= (AR5K_SIMR2_MCABT | AR5K_SIMR2_SSERR
 				| AR5K_SIMR2_DPERR);
 		}
 
-		/*Beacon Not Ready*/
-		if (new_mask & AR5K_INT_BNR)
-			int_mask |= AR5K_INT_BNR;
-
+		/* Misc beacon related interrupts */
 		if (new_mask & AR5K_INT_TIM)
 			int_mask |= AR5K_IMR_TIM;
 
@@ -721,8 +790,9 @@
 		if (new_mask & AR5K_INT_CAB_TIMEOUT)
 			simr2 |= AR5K_SISR2_CAB_TIMEOUT;
 
-		if (new_mask & AR5K_INT_RX_DOPPLER)
-			int_mask |= AR5K_IMR_RXDOPPLER;
+		/*Beacon Not Ready*/
+		if (new_mask & AR5K_INT_BNR)
+			int_mask |= AR5K_INT_BNR;
 
 		/* Note: Per queue interrupt masks
 		 * are set via ath5k_hw_reset_tx_queue() (qcu.c) */
@@ -730,10 +800,12 @@
 		ath5k_hw_reg_write(ah, simr2, AR5K_SIMR2);
 
 	} else {
+		/* Fatal interrupt abstraction for 5210 */
 		if (new_mask & AR5K_INT_FATAL)
 			int_mask |= (AR5K_IMR_SSERR | AR5K_IMR_MCABT
 				| AR5K_IMR_HIUERR | AR5K_IMR_DPERR);
 
+		/* Only common interrupts left for 5210 (no SIMRs) */
 		ath5k_hw_reg_write(ah, int_mask, AR5K_IMR);
 	}
 
@@ -760,8 +832,7 @@
 \********************/
 
 /**
- * ath5k_hw_dma_init - Initialize DMA unit
- *
+ * ath5k_hw_dma_init() - Initialize DMA unit
  * @ah: The &struct ath5k_hw
  *
  * Set DMA size and pre-enable interrupts
@@ -770,7 +841,8 @@
  *
  * XXX: Save/restore RXDP/TXDP registers ?
  */
-void ath5k_hw_dma_init(struct ath5k_hw *ah)
+void
+ath5k_hw_dma_init(struct ath5k_hw *ah)
 {
 	/*
 	 * Set Rx/Tx DMA Configuration
@@ -799,8 +871,7 @@
 }
 
 /**
- * ath5k_hw_dma_stop - stop DMA unit
- *
+ * ath5k_hw_dma_stop() - stop DMA unit
  * @ah: The &struct ath5k_hw
  *
  * Stop tx/rx DMA and interrupts. Returns
@@ -810,7 +881,8 @@
  * stuck frames on tx queues, only a reset
  * can fix that.
  */
-int ath5k_hw_dma_stop(struct ath5k_hw *ah)
+int
+ath5k_hw_dma_stop(struct ath5k_hw *ah)
 {
 	int i, qmax, err;
 	err = 0;
diff --git a/drivers/net/wireless/ath/ath5k/gpio.c b/drivers/net/wireless/ath/ath5k/gpio.c
index 8592978..73d3dd8 100644
--- a/drivers/net/wireless/ath/ath5k/gpio.c
+++ b/drivers/net/wireless/ath/ath5k/gpio.c
@@ -24,10 +24,33 @@
 #include "reg.h"
 #include "debug.h"
 
-/*
- * Set led state
+
+/**
+ * DOC: GPIO/LED functions
+ *
+ * Here we control the 6 bidirectional GPIO pins provided by the hw.
+ * We can set a GPIO pin to be an input or an output pin on GPIO control
+ * register and then read or set its status from GPIO data input/output
+ * registers.
+ *
+ * We also control the two LED pins provided by the hw, LED_0 is our
+ * "power" LED and LED_1 is our "network activity" LED but many scenarios
+ * are available from hw. Vendors might also provide LEDs connected to the
+ * GPIO pins, we handle them through the LED subsystem on led.c
  */
-void ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state)
+
+
+/**
+ * ath5k_hw_set_ledstate() - Set led state
+ * @ah: The &struct ath5k_hw
+ * @state: One of AR5K_LED_*
+ *
+ * Used to set the LED blinking state. This only
+ * works for the LED connected to the LED_0, LED_1 pins,
+ * not the GPIO based.
+ */
+void
+ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state)
 {
 	u32 led;
 	/*5210 has different led mode handling*/
@@ -74,10 +97,13 @@
 		AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, led_5210);
 }
 
-/*
- * Set GPIO inputs
+/**
+ * ath5k_hw_set_gpio_input() - Set GPIO inputs
+ * @ah: The &struct ath5k_hw
+ * @gpio: GPIO pin to set as input
  */
-int ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio)
+int
+ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio)
 {
 	if (gpio >= AR5K_NUM_GPIO)
 		return -EINVAL;
@@ -89,10 +115,13 @@
 	return 0;
 }
 
-/*
- * Set GPIO outputs
+/**
+ * ath5k_hw_set_gpio_output() - Set GPIO outputs
+ * @ah: The &struct ath5k_hw
+ * @gpio: The GPIO pin to set as output
  */
-int ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio)
+int
+ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio)
 {
 	if (gpio >= AR5K_NUM_GPIO)
 		return -EINVAL;
@@ -104,10 +133,13 @@
 	return 0;
 }
 
-/*
- * Get GPIO state
+/**
+ * ath5k_hw_get_gpio() - Get GPIO state
+ * @ah: The &struct ath5k_hw
+ * @gpio: The GPIO pin to read
  */
-u32 ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio)
+u32
+ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio)
 {
 	if (gpio >= AR5K_NUM_GPIO)
 		return 0xffffffff;
@@ -117,10 +149,14 @@
 		0x1;
 }
 
-/*
- * Set GPIO state
+/**
+ * ath5k_hw_set_gpio() - Set GPIO state
+ * @ah: The &struct ath5k_hw
+ * @gpio: The GPIO pin to set
+ * @val: Value to set (boolean)
  */
-int ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val)
+int
+ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val)
 {
 	u32 data;
 
@@ -138,10 +174,19 @@
 	return 0;
 }
 
-/*
- * Initialize the GPIO interrupt (RFKill switch)
+/**
+ * ath5k_hw_set_gpio_intr() - Initialize the GPIO interrupt (RFKill switch)
+ * @ah: The &struct ath5k_hw
+ * @gpio: The GPIO pin to use
+ * @interrupt_level: True to generate interrupt on active pin (high)
+ *
+ * This function is used to set up the GPIO interrupt for the hw RFKill switch.
+ * That switch is connected to a GPIO pin and it's number is stored on EEPROM.
+ * It can either open or close the circuit to indicate that we should disable
+ * RF/Wireless to save power (we also get that from EEPROM).
  */
-void ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio,
+void
+ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio,
 		u32 interrupt_level)
 {
 	u32 data;
diff --git a/drivers/net/wireless/ath/ath5k/initvals.c b/drivers/net/wireless/ath/ath5k/initvals.c
index 1ffecc0..a1ea78e 100644
--- a/drivers/net/wireless/ath/ath5k/initvals.c
+++ b/drivers/net/wireless/ath/ath5k/initvals.c
@@ -23,24 +23,27 @@
 #include "reg.h"
 #include "debug.h"
 
-/*
- * Mode-independent initial register writes
+/**
+ * struct ath5k_ini - Mode-independent initial register writes
+ * @ini_register: Register address
+ * @ini_value: Default value
+ * @ini_mode: 0 to write 1 to read (and clear)
  */
-
 struct ath5k_ini {
 	u16	ini_register;
 	u32	ini_value;
 
 	enum {
 		AR5K_INI_WRITE = 0,	/* Default */
-		AR5K_INI_READ = 1,	/* Cleared on read */
+		AR5K_INI_READ = 1,
 	} ini_mode;
 };
 
-/*
- * Mode specific initial register values
+/**
+ * struct ath5k_ini_mode - Mode specific initial register values
+ * @mode_register: Register address
+ * @mode_value: Set of values for each enum ath5k_driver_mode
  */
-
 struct ath5k_ini_mode {
 	u16	mode_register;
 	u32	mode_value[3];
@@ -386,11 +389,10 @@
 
 /* Initial mode-specific settings for AR5211
  * 5211 supports OFDM-only g (draft g) but we
- * need to test it !
- */
+ * need to test it ! */
 static const struct ath5k_ini_mode ar5211_ini_mode[] = {
 	{ AR5K_TXCFG,
-	/*	A/XR          B           G       */
+	/*	A          B           G       */
 	   { 0x00000015, 0x0000001d, 0x00000015 } },
 	{ AR5K_QUEUE_DFS_LOCAL_IFS(0),
 	   { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
@@ -460,7 +462,7 @@
 	   { 0x00000010, 0x00000010, 0x00000010 } },
 };
 
-/* Initial register settings for AR5212 */
+/* Initial register settings for AR5212 and newer chips */
 static const struct ath5k_ini ar5212_ini_common_start[] = {
 	{ AR5K_RXDP,		0x00000000 },
 	{ AR5K_RXCFG,		0x00000005 },
@@ -724,7 +726,8 @@
 	   { 0x00000000, 0x00000000, 0x00000108 } },
 };
 
-/* Initial mode-specific settings for AR5212 + RF5111 (Written after ar5212_ini) */
+/* Initial mode-specific settings for AR5212 + RF5111
+ * (Written after ar5212_ini) */
 static const struct ath5k_ini_mode rf5111_ini_mode_end[] = {
 	{ AR5K_TXCFG,
 	/*	A/XR          B           G       */
@@ -757,6 +760,7 @@
 	   { 0x1883800a, 0x1873800a, 0x1883800a } },
 };
 
+/* Common for all modes */
 static const struct ath5k_ini rf5111_ini_common_end[] = {
 	{ AR5K_DCU_FP,		0x00000000 },
 	{ AR5K_PHY_AGC,		0x00000000 },
@@ -774,7 +778,9 @@
 	{ 0xa23c,		0x13c889af },
 };
 
-/* Initial mode-specific settings for AR5212 + RF5112 (Written after ar5212_ini) */
+
+/* Initial mode-specific settings for AR5212 + RF5112
+ * (Written after ar5212_ini) */
 static const struct ath5k_ini_mode rf5112_ini_mode_end[] = {
 	{ AR5K_TXCFG,
 	/*	A/XR          B           G       */
@@ -825,7 +831,9 @@
 	{ 0xa23c,		0x13c889af },
 };
 
-/* Initial mode-specific settings for RF5413/5414 (Written after ar5212_ini) */
+
+/* Initial mode-specific settings for RF5413/5414
+ * (Written after ar5212_ini) */
 static const struct ath5k_ini_mode rf5413_ini_mode_end[] = {
 	{ AR5K_TXCFG,
 	/*	A/XR          B           G       */
@@ -963,7 +971,8 @@
 	{ 0xa384, 0xf3307ff0 },
 };
 
-/* Initial mode-specific settings for RF2413/2414 (Written after ar5212_ini) */
+/* Initial mode-specific settings for RF2413/2414
+ * (Written after ar5212_ini) */
 /* XXX: a mode ? */
 static const struct ath5k_ini_mode rf2413_ini_mode_end[] = {
 	{ AR5K_TXCFG,
@@ -1085,7 +1094,8 @@
 	{ 0xa384, 0xf3307ff0 },
 };
 
-/* Initial mode-specific settings for RF2425 (Written after ar5212_ini) */
+/* Initial mode-specific settings for RF2425
+ * (Written after ar5212_ini) */
 /* XXX: a mode ? */
 static const struct ath5k_ini_mode rf2425_ini_mode_end[] = {
 	{ AR5K_TXCFG,
@@ -1357,10 +1367,15 @@
 };
 
 
-/*
- * Write initial register dump
+/**
+ * ath5k_hw_ini_registers() - Write initial register dump common for all modes
+ * @ah: The &struct ath5k_hw
+ * @size: Dump size
+ * @ini_regs: The array of &struct ath5k_ini
+ * @skip_pcu: Skip PCU registers
  */
-static void ath5k_hw_ini_registers(struct ath5k_hw *ah, unsigned int size,
+static void
+ath5k_hw_ini_registers(struct ath5k_hw *ah, unsigned int size,
 		const struct ath5k_ini *ini_regs, bool skip_pcu)
 {
 	unsigned int i;
@@ -1388,7 +1403,15 @@
 	}
 }
 
-static void ath5k_hw_ini_mode_registers(struct ath5k_hw *ah,
+/**
+ * ath5k_hw_ini_mode_registers() - Write initial mode-specific register dump
+ * @ah: The &struct ath5k_hw
+ * @size: Dump size
+ * @ini_mode: The array of &struct ath5k_ini_mode
+ * @mode: One of enum ath5k_driver_mode
+ */
+static void
+ath5k_hw_ini_mode_registers(struct ath5k_hw *ah,
 		unsigned int size, const struct ath5k_ini_mode *ini_mode,
 		u8 mode)
 {
@@ -1402,7 +1425,17 @@
 
 }
 
-int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool skip_pcu)
+/**
+ * ath5k_hw_write_initvals() - Write initial chip-specific register dump
+ * @ah: The &struct ath5k_hw
+ * @mode: One of enum ath5k_driver_mode
+ * @skip_pcu: Skip PCU registers
+ *
+ * Write initial chip-specific register dump, to get the chipset on a
+ * clean and ready-to-work state after warm reset.
+ */
+int
+ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool skip_pcu)
 {
 	/*
 	 * Write initial register settings
diff --git a/drivers/net/wireless/ath/ath5k/pci.c b/drivers/net/wireless/ath/ath5k/pci.c
index dfa48eb..849fa06 100644
--- a/drivers/net/wireless/ath/ath5k/pci.c
+++ b/drivers/net/wireless/ath/ath5k/pci.c
@@ -98,7 +98,7 @@
 					0xffff);
 			return true;
 		}
-		udelay(15);
+		usleep_range(15, 20);
 	}
 
 	return false;
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index a7eafa3..cebfd6f 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -30,11 +30,47 @@
 #include "reg.h"
 #include "debug.h"
 
-/*
+/**
+ * DOC: Protocol Control Unit (PCU) functions
+ *
+ * Protocol control unit is responsible to maintain various protocol
+ * properties before a frame is send and after a frame is received to/from
+ * baseband. To be more specific, PCU handles:
+ *
+ * - Buffering of RX and TX frames (after QCU/DCUs)
+ *
+ * - Encrypting and decrypting (using the built-in engine)
+ *
+ * - Generating ACKs, RTS/CTS frames
+ *
+ * - Maintaining TSF
+ *
+ * - FCS
+ *
+ * - Updating beacon data (with TSF etc)
+ *
+ * - Generating virtual CCA
+ *
+ * - RX/Multicast filtering
+ *
+ * - BSSID filtering
+ *
+ * - Various statistics
+ *
+ * -Different operating modes: AP, STA, IBSS
+ *
+ * Note: Most of these functions can be tweaked/bypassed so you can do
+ * them on sw above for debugging or research. For more infos check out PCU
+ * registers on reg.h.
+ */
+
+/**
+ * DOC: ACK rates
+ *
  * AR5212+ can use higher rates for ack transmission
  * based on current tx rate instead of the base rate.
  * It does this to better utilize channel usage.
- * This is a mapping between G rates (that cover both
+ * There is a mapping between G rates (that cover both
  * CCK and OFDM) and ack rates that we use when setting
  * rate -> duration table. This mapping is hw-based so
  * don't change anything.
@@ -63,17 +99,18 @@
 \*******************/
 
 /**
- * ath5k_hw_get_frame_duration - Get tx time of a frame
- *
+ * ath5k_hw_get_frame_duration() - Get tx time of a frame
  * @ah: The &struct ath5k_hw
  * @len: Frame's length in bytes
  * @rate: The @struct ieee80211_rate
+ * @shortpre: Indicate short preample
  *
  * Calculate tx duration of a frame given it's rate and length
  * It extends ieee80211_generic_frame_duration for non standard
  * bwmodes.
  */
-int ath5k_hw_get_frame_duration(struct ath5k_hw *ah,
+int
+ath5k_hw_get_frame_duration(struct ath5k_hw *ah,
 		int len, struct ieee80211_rate *rate, bool shortpre)
 {
 	int sifs, preamble, plcp_bits, sym_time;
@@ -129,11 +166,11 @@
 }
 
 /**
- * ath5k_hw_get_default_slottime - Get the default slot time for current mode
- *
+ * ath5k_hw_get_default_slottime() - Get the default slot time for current mode
  * @ah: The &struct ath5k_hw
  */
-unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah)
+unsigned int
+ath5k_hw_get_default_slottime(struct ath5k_hw *ah)
 {
 	struct ieee80211_channel *channel = ah->ah_current_channel;
 	unsigned int slot_time;
@@ -160,11 +197,11 @@
 }
 
 /**
- * ath5k_hw_get_default_sifs - Get the default SIFS for current mode
- *
+ * ath5k_hw_get_default_sifs() - Get the default SIFS for current mode
  * @ah: The &struct ath5k_hw
  */
-unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah)
+unsigned int
+ath5k_hw_get_default_sifs(struct ath5k_hw *ah)
 {
 	struct ieee80211_channel *channel = ah->ah_current_channel;
 	unsigned int sifs;
@@ -191,17 +228,17 @@
 }
 
 /**
- * ath5k_hw_update_mib_counters - Update MIB counters (mac layer statistics)
- *
+ * ath5k_hw_update_mib_counters() - Update MIB counters (mac layer statistics)
  * @ah: The &struct ath5k_hw
  *
  * Reads MIB counters from PCU and updates sw statistics. Is called after a
  * MIB interrupt, because one of these counters might have reached their maximum
  * and triggered the MIB interrupt, to let us read and clear the counter.
  *
- * Is called in interrupt context!
+ * NOTE: Is called in interrupt context!
  */
-void ath5k_hw_update_mib_counters(struct ath5k_hw *ah)
+void
+ath5k_hw_update_mib_counters(struct ath5k_hw *ah)
 {
 	struct ath5k_statistics *stats = &ah->stats;
 
@@ -219,10 +256,8 @@
 \******************/
 
 /**
- * ath5k_hw_write_rate_duration - fill rate code to duration table
- *
- * @ah: the &struct ath5k_hw
- * @mode: one of enum ath5k_driver_mode
+ * ath5k_hw_write_rate_duration() - Fill rate code to duration table
+ * @ah: The &struct ath5k_hw
  *
  * Write the rate code to duration table upon hw reset. This is a helper for
  * ath5k_hw_pcu_init(). It seems all this is doing is setting an ACK timeout on
@@ -236,7 +271,8 @@
  * that include all OFDM and CCK rates.
  *
  */
-static inline void ath5k_hw_write_rate_duration(struct ath5k_hw *ah)
+static inline void
+ath5k_hw_write_rate_duration(struct ath5k_hw *ah)
 {
 	struct ieee80211_rate *rate;
 	unsigned int i;
@@ -280,12 +316,12 @@
 }
 
 /**
- * ath5k_hw_set_ack_timeout - Set ACK timeout on PCU
- *
+ * ath5k_hw_set_ack_timeout() - Set ACK timeout on PCU
  * @ah: The &struct ath5k_hw
  * @timeout: Timeout in usec
  */
-static int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
+static int
+ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
 {
 	if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_ACK))
 			<= timeout)
@@ -298,12 +334,12 @@
 }
 
 /**
- * ath5k_hw_set_cts_timeout - Set CTS timeout on PCU
- *
+ * ath5k_hw_set_cts_timeout() - Set CTS timeout on PCU
  * @ah: The &struct ath5k_hw
  * @timeout: Timeout in usec
  */
-static int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout)
+static int
+ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout)
 {
 	if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_CTS))
 			<= timeout)
@@ -321,14 +357,14 @@
 \*******************/
 
 /**
- * ath5k_hw_set_lladdr - Set station id
- *
+ * ath5k_hw_set_lladdr() - Set station id
  * @ah: The &struct ath5k_hw
- * @mac: The card's mac address
+ * @mac: The card's mac address (array of octets)
  *
  * Set station id on hw using the provided mac address
  */
-int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac)
+int
+ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac)
 {
 	struct ath_common *common = ath5k_hw_common(ah);
 	u32 low_id, high_id;
@@ -349,14 +385,14 @@
 }
 
 /**
- * ath5k_hw_set_bssid - Set current BSSID on hw
- *
+ * ath5k_hw_set_bssid() - Set current BSSID on hw
  * @ah: The &struct ath5k_hw
  *
  * Sets the current BSSID and BSSID mask we have from the
  * common struct into the hardware
  */
-void ath5k_hw_set_bssid(struct ath5k_hw *ah)
+void
+ath5k_hw_set_bssid(struct ath5k_hw *ah)
 {
 	struct ath_common *common = ath5k_hw_common(ah);
 	u16 tim_offset = 0;
@@ -389,7 +425,23 @@
 	ath5k_hw_enable_pspoll(ah, NULL, 0);
 }
 
-void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask)
+/**
+ * ath5k_hw_set_bssid_mask() - Filter out bssids we listen
+ * @ah: The &struct ath5k_hw
+ * @mask: The BSSID mask to set (array of octets)
+ *
+ * BSSID masking is a method used by AR5212 and newer hardware to inform PCU
+ * which bits of the interface's MAC address should be looked at when trying
+ * to decide which packets to ACK. In station mode and AP mode with a single
+ * BSS every bit matters since we lock to only one BSS. In AP mode with
+ * multiple BSSes (virtual interfaces) not every bit matters because hw must
+ * accept frames for all BSSes and so we tweak some bits of our mac address
+ * in order to have multiple BSSes.
+ *
+ * For more information check out ../hw.c of the common ath module.
+ */
+void
+ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask)
 {
 	struct ath_common *common = ath5k_hw_common(ah);
 
@@ -400,18 +452,21 @@
 		ath_hw_setbssidmask(common);
 }
 
-/*
- * Set multicast filter
+/**
+ * ath5k_hw_set_mcast_filter() - Set multicast filter
+ * @ah: The &struct ath5k_hw
+ * @filter0: Lower 32bits of muticast filter
+ * @filter1: Higher 16bits of multicast filter
  */
-void ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1)
+void
+ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1)
 {
 	ath5k_hw_reg_write(ah, filter0, AR5K_MCAST_FILTER0);
 	ath5k_hw_reg_write(ah, filter1, AR5K_MCAST_FILTER1);
 }
 
 /**
- * ath5k_hw_get_rx_filter - Get current rx filter
- *
+ * ath5k_hw_get_rx_filter() - Get current rx filter
  * @ah: The &struct ath5k_hw
  *
  * Returns the RX filter by reading rx filter and
@@ -420,7 +475,8 @@
  * and pass to the driver. For a list of frame types
  * check out reg.h.
  */
-u32 ath5k_hw_get_rx_filter(struct ath5k_hw *ah)
+u32
+ath5k_hw_get_rx_filter(struct ath5k_hw *ah)
 {
 	u32 data, filter = 0;
 
@@ -440,8 +496,7 @@
 }
 
 /**
- * ath5k_hw_set_rx_filter - Set rx filter
- *
+ * ath5k_hw_set_rx_filter() - Set rx filter
  * @ah: The &struct ath5k_hw
  * @filter: RX filter mask (see reg.h)
  *
@@ -449,7 +504,8 @@
  * register on 5212 and newer chips so that we have proper PHY
  * error reporting.
  */
-void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter)
+void
+ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter)
 {
 	u32 data = 0;
 
@@ -493,13 +549,13 @@
 #define ATH5K_MAX_TSF_READ 10
 
 /**
- * ath5k_hw_get_tsf64 - Get the full 64bit TSF
- *
+ * ath5k_hw_get_tsf64() - Get the full 64bit TSF
  * @ah: The &struct ath5k_hw
  *
  * Returns the current TSF
  */
-u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah)
+u64
+ath5k_hw_get_tsf64(struct ath5k_hw *ah)
 {
 	u32 tsf_lower, tsf_upper1, tsf_upper2;
 	int i;
@@ -536,28 +592,30 @@
 	return ((u64)tsf_upper1 << 32) | tsf_lower;
 }
 
+#undef ATH5K_MAX_TSF_READ
+
 /**
- * ath5k_hw_set_tsf64 - Set a new 64bit TSF
- *
+ * ath5k_hw_set_tsf64() - Set a new 64bit TSF
  * @ah: The &struct ath5k_hw
  * @tsf64: The new 64bit TSF
  *
  * Sets the new TSF
  */
-void ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64)
+void
+ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64)
 {
 	ath5k_hw_reg_write(ah, tsf64 & 0xffffffff, AR5K_TSF_L32);
 	ath5k_hw_reg_write(ah, (tsf64 >> 32) & 0xffffffff, AR5K_TSF_U32);
 }
 
 /**
- * ath5k_hw_reset_tsf - Force a TSF reset
- *
+ * ath5k_hw_reset_tsf() - Force a TSF reset
  * @ah: The &struct ath5k_hw
  *
  * Forces a TSF reset on PCU
  */
-void ath5k_hw_reset_tsf(struct ath5k_hw *ah)
+void
+ath5k_hw_reset_tsf(struct ath5k_hw *ah)
 {
 	u32 val;
 
@@ -573,10 +631,17 @@
 	ath5k_hw_reg_write(ah, val, AR5K_BEACON);
 }
 
-/*
- * Initialize beacon timers
+/**
+ * ath5k_hw_init_beacon_timers() - Initialize beacon timers
+ * @ah: The &struct ath5k_hw
+ * @next_beacon: Next TBTT
+ * @interval: Current beacon interval
+ *
+ * This function is used to initialize beacon timers based on current
+ * operation mode and settings.
  */
-void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
+void
+ath5k_hw_init_beacon_timers(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
 {
 	u32 timer1, timer2, timer3;
 
@@ -655,8 +720,7 @@
 }
 
 /**
- * ath5k_check_timer_win - Check if timer B is timer A + window
- *
+ * ath5k_check_timer_win() - Check if timer B is timer A + window
  * @a: timer a (before b)
  * @b: timer b (after a)
  * @window: difference between a and b
@@ -686,12 +750,11 @@
 }
 
 /**
- * ath5k_hw_check_beacon_timers - Check if the beacon timers are correct
- *
+ * ath5k_hw_check_beacon_timers() - Check if the beacon timers are correct
  * @ah: The &struct ath5k_hw
  * @intval: beacon interval
  *
- * This is a workaround for IBSS mode:
+ * This is a workaround for IBSS mode
  *
  * The need for this function arises from the fact that we have 4 separate
  * HW timer registers (TIMER0 - TIMER3), which are closely related to the
@@ -746,14 +809,14 @@
 }
 
 /**
- * ath5k_hw_set_coverage_class - Set IEEE 802.11 coverage class
- *
+ * ath5k_hw_set_coverage_class() - Set IEEE 802.11 coverage class
  * @ah: The &struct ath5k_hw
  * @coverage_class: IEEE 802.11 coverage class number
  *
  * Sets IFS intervals and ACK/CTS timeouts for given coverage class.
  */
-void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class)
+void
+ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class)
 {
 	/* As defined by IEEE 802.11-2007 17.3.8.6 */
 	int slot_time = ath5k_hw_get_default_slottime(ah) + 3 * coverage_class;
@@ -772,8 +835,7 @@
 \***************************/
 
 /**
- * ath5k_hw_start_rx_pcu - Start RX engine
- *
+ * ath5k_hw_start_rx_pcu() - Start RX engine
  * @ah: The &struct ath5k_hw
  *
  * Starts RX engine on PCU so that hw can process RXed frames
@@ -781,32 +843,33 @@
  *
  * NOTE: RX DMA should be already enabled using ath5k_hw_start_rx_dma
  */
-void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah)
+void
+ath5k_hw_start_rx_pcu(struct ath5k_hw *ah)
 {
 	AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX);
 }
 
 /**
- * at5k_hw_stop_rx_pcu - Stop RX engine
- *
+ * at5k_hw_stop_rx_pcu() - Stop RX engine
  * @ah: The &struct ath5k_hw
  *
  * Stops RX engine on PCU
  */
-void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah)
+void
+ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah)
 {
 	AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX);
 }
 
 /**
- * ath5k_hw_set_opmode - Set PCU operating mode
- *
+ * ath5k_hw_set_opmode() - Set PCU operating mode
  * @ah: The &struct ath5k_hw
- * @op_mode: &enum nl80211_iftype operating mode
+ * @op_mode: One of enum nl80211_iftype
  *
  * Configure PCU for the various operating modes (AP/STA etc)
  */
-int ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype op_mode)
+int
+ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype op_mode)
 {
 	struct ath_common *common = ath5k_hw_common(ah);
 	u32 pcu_reg, beacon_reg, low_id, high_id;
@@ -873,8 +936,17 @@
 	return 0;
 }
 
-void ath5k_hw_pcu_init(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
-								u8 mode)
+/**
+ * ath5k_hw_pcu_init() - Initialize PCU
+ * @ah: The &struct ath5k_hw
+ * @op_mode: One of enum nl80211_iftype
+ * @mode: One of enum ath5k_driver_mode
+ *
+ * This function is used to initialize PCU by setting current
+ * operation mode and various other settings.
+ */
+void
+ath5k_hw_pcu_init(struct ath5k_hw *ah, enum nl80211_iftype op_mode)
 {
 	/* Set bssid and bssid mask */
 	ath5k_hw_set_bssid(ah);
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index 01cb72d..e1f8613 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -1,6 +1,4 @@
 /*
- * PHY functions
- *
  * Copyright (c) 2004-2007 Reyk Floeter <reyk@openbsd.org>
  * Copyright (c) 2006-2009 Nick Kossifidis <mickflemm@gmail.com>
  * Copyright (c) 2007-2008 Jiri Slaby <jirislaby@gmail.com>
@@ -20,6 +18,10 @@
  *
  */
 
+/***********************\
+* PHY related functions *
+\***********************/
+
 #include <linux/delay.h>
 #include <linux/slab.h>
 #include <asm/unaligned.h>
@@ -31,14 +33,53 @@
 #include "../regd.h"
 
 
+/**
+ * DOC: PHY related functions
+ *
+ * Here we handle the low-level functions related to baseband
+ * and analog frontend (RF) parts. This is by far the most complex
+ * part of the hw code so make sure you know what you are doing.
+ *
+ * Here is a list of what this is all about:
+ *
+ * - Channel setting/switching
+ *
+ * - Automatic Gain Control (AGC) calibration
+ *
+ * - Noise Floor calibration
+ *
+ * - I/Q imbalance calibration (QAM correction)
+ *
+ * - Calibration due to thermal changes (gain_F)
+ *
+ * - Spur noise mitigation
+ *
+ * - RF/PHY initialization for the various operating modes and bwmodes
+ *
+ * - Antenna control
+ *
+ * - TX power control per channel/rate/packet type
+ *
+ * Also have in mind we never got documentation for most of these
+ * functions, what we have comes mostly from Atheros's code, reverse
+ * engineering and patent docs/presentations etc.
+ */
+
+
 /******************\
 * Helper functions *
 \******************/
 
-/*
- * Get the PHY Chip revision
+/**
+ * ath5k_hw_radio_revision() - Get the PHY Chip revision
+ * @ah: The &struct ath5k_hw
+ * @band: One of enum ieee80211_band
+ *
+ * Returns the revision number of a 2GHz, 5GHz or single chip
+ * radio.
  */
-u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band)
+u16
+ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band)
 {
 	unsigned int i;
 	u32 srev;
@@ -58,7 +99,7 @@
 		return 0;
 	}
 
-	mdelay(2);
+	usleep_range(2000, 2500);
 
 	/* ...wait until PHY is ready and read the selected radio revision */
 	ath5k_hw_reg_write(ah, 0x00001c16, AR5K_PHY(0x34));
@@ -81,10 +122,16 @@
 	return ret;
 }
 
-/*
- * Check if a channel is supported
+/**
+ * ath5k_channel_ok() - Check if a channel is supported by the hw
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * Note: We don't do any regulatory domain checks here, it's just
+ * a sanity check.
  */
-bool ath5k_channel_ok(struct ath5k_hw *ah, struct ieee80211_channel *channel)
+bool
+ath5k_channel_ok(struct ath5k_hw *ah, struct ieee80211_channel *channel)
 {
 	u16 freq = channel->center_freq;
 
@@ -101,7 +148,13 @@
 	return false;
 }
 
-bool ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah,
+/**
+ * ath5k_hw_chan_has_spur_noise() - Check if channel is sensitive to spur noise
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ */
+bool
+ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah,
 				struct ieee80211_channel *channel)
 {
 	u8 refclk_freq;
@@ -122,11 +175,20 @@
 		return false;
 }
 
-/*
- * Used to modify RF Banks before writing them to AR5K_RF_BUFFER
+/**
+ * ath5k_hw_rfb_op() - Perform an operation on the given RF Buffer
+ * @ah: The &struct ath5k_hw
+ * @rf_regs: The struct ath5k_rf_reg
+ * @val: New value
+ * @reg_id: RF register ID
+ * @set: Indicate we need to swap data
+ *
+ * This is an internal function used to modify RF Banks before
+ * writing them to AR5K_RF_BUFFER. Check out rfbuffer.h for more
+ * infos.
  */
-static unsigned int ath5k_hw_rfb_op(struct ath5k_hw *ah,
-					const struct ath5k_rf_reg *rf_regs,
+static unsigned int
+ath5k_hw_rfb_op(struct ath5k_hw *ah, const struct ath5k_rf_reg *rf_regs,
 					u32 val, u8 reg_id, bool set)
 {
 	const struct ath5k_rf_reg *rfreg = NULL;
@@ -204,8 +266,7 @@
 }
 
 /**
- * ath5k_hw_write_ofdm_timings - set OFDM timings on AR5212
- *
+ * ath5k_hw_write_ofdm_timings() - set OFDM timings on AR5212
  * @ah: the &struct ath5k_hw
  * @channel: the currently set channel upon reset
  *
@@ -216,10 +277,11 @@
  * mantissa and provide these values on hw.
  *
  * For more infos i think this patent is related
- * http://www.freepatentsonline.com/7184495.html
+ * "http://www.freepatentsonline.com/7184495.html"
  */
-static inline int ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah,
-	struct ieee80211_channel *channel)
+static inline int
+ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah,
+				struct ieee80211_channel *channel)
 {
 	/* Get exponent and mantissa and set it */
 	u32 coef_scaled, coef_exp, coef_man,
@@ -278,6 +340,10 @@
 	return 0;
 }
 
+/**
+ * ath5k_hw_phy_disable() - Disable PHY
+ * @ah: The &struct ath5k_hw
+ */
 int ath5k_hw_phy_disable(struct ath5k_hw *ah)
 {
 	/*Just a try M.F.*/
@@ -286,10 +352,13 @@
 	return 0;
 }
 
-/*
- * Wait for synth to settle
+/**
+ * ath5k_hw_wait_for_synth() - Wait for synth to settle
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
  */
-static void ath5k_hw_wait_for_synth(struct ath5k_hw *ah,
+static void
+ath5k_hw_wait_for_synth(struct ath5k_hw *ah,
 			struct ieee80211_channel *channel)
 {
 	/*
@@ -308,9 +377,9 @@
 			delay = delay << 2;
 		/* XXX: /2 on turbo ? Let's be safe
 		 * for now */
-		udelay(100 + delay);
+		usleep_range(100 + delay, 100 + (2 * delay));
 	} else {
-		mdelay(1);
+		usleep_range(1000, 1500);
 	}
 }
 
@@ -319,7 +388,9 @@
 * RF Gain optimization *
 \**********************/
 
-/*
+/**
+ * DOC: RF Gain optimization
+ *
  * This code is used to optimize RF gain on different environments
  * (temperature mostly) based on feedback from a power detector.
  *
@@ -328,22 +399,22 @@
  * no gain optimization ladder-.
  *
  * For more infos check out this patent doc
- * http://www.freepatentsonline.com/7400691.html
+ * "http://www.freepatentsonline.com/7400691.html"
  *
  * This paper describes power drops as seen on the receiver due to
  * probe packets
- * http://www.cnri.dit.ie/publications/ICT08%20-%20Practical%20Issues
- * %20of%20Power%20Control.pdf
+ * "http://www.cnri.dit.ie/publications/ICT08%20-%20Practical%20Issues
+ * %20of%20Power%20Control.pdf"
  *
  * And this is the MadWiFi bug entry related to the above
- * http://madwifi-project.org/ticket/1659
+ * "http://madwifi-project.org/ticket/1659"
  * with various measurements and diagrams
- *
- * TODO: Deal with power drops due to probes by setting an appropriate
- * tx power on the probe packets ! Make this part of the calibration process.
  */
 
-/* Initialize ah_gain during attach */
+/**
+ * ath5k_hw_rfgain_opt_init() - Initialize ah_gain during attach
+ * @ah: The &struct ath5k_hw
+ */
 int ath5k_hw_rfgain_opt_init(struct ath5k_hw *ah)
 {
 	/* Initialize the gain optimization values */
@@ -367,17 +438,21 @@
 	return 0;
 }
 
-/* Schedule a gain probe check on the next transmitted packet.
+/**
+ * ath5k_hw_request_rfgain_probe() - Request a PAPD probe packet
+ * @ah: The &struct ath5k_hw
+ *
+ * Schedules a gain probe check on the next transmitted packet.
  * That means our next packet is going to be sent with lower
  * tx power and a Peak to Average Power Detector (PAPD) will try
  * to measure the gain.
  *
- * XXX:  How about forcing a tx packet (bypassing PCU arbitrator etc)
+ * TODO: Force a tx packet (bypassing PCU arbitrator etc)
  * just after we enable the probe so that we don't mess with
- * standard traffic ? Maybe it's time to use sw interrupts and
- * a probe tasklet !!!
+ * standard traffic.
  */
-static void ath5k_hw_request_rfgain_probe(struct ath5k_hw *ah)
+static void
+ath5k_hw_request_rfgain_probe(struct ath5k_hw *ah)
 {
 
 	/* Skip if gain calibration is inactive or
@@ -395,9 +470,15 @@
 
 }
 
-/* Calculate gain_F measurement correction
- * based on the current step for RF5112 rev. 2 */
-static u32 ath5k_hw_rf_gainf_corr(struct ath5k_hw *ah)
+/**
+ * ath5k_hw_rf_gainf_corr() - Calculate Gain_F measurement correction
+ * @ah: The &struct ath5k_hw
+ *
+ * Calculate Gain_F measurement correction
+ * based on the current step for RF5112 rev. 2
+ */
+static u32
+ath5k_hw_rf_gainf_corr(struct ath5k_hw *ah)
 {
 	u32 mix, step;
 	u32 *rf;
@@ -450,11 +531,19 @@
 	return ah->ah_gain.g_f_corr;
 }
 
-/* Check if current gain_F measurement is in the range of our
+/**
+ * ath5k_hw_rf_check_gainf_readback() - Validate Gain_F feedback from detector
+ * @ah: The &struct ath5k_hw
+ *
+ * Check if current gain_F measurement is in the range of our
  * power detector windows. If we get a measurement outside range
  * we know it's not accurate (detectors can't measure anything outside
- * their detection window) so we must ignore it */
-static bool ath5k_hw_rf_check_gainf_readback(struct ath5k_hw *ah)
+ * their detection window) so we must ignore it.
+ *
+ * Returns true if readback was O.K. or false on failure
+ */
+static bool
+ath5k_hw_rf_check_gainf_readback(struct ath5k_hw *ah)
 {
 	const struct ath5k_rf_reg *rf_regs;
 	u32 step, mix_ovr, level[4];
@@ -506,9 +595,15 @@
 			ah->ah_gain.g_current <= level[3]);
 }
 
-/* Perform gain_F adjustment by choosing the right set
- * of parameters from RF gain optimization ladder */
-static s8 ath5k_hw_rf_gainf_adjust(struct ath5k_hw *ah)
+/**
+ * ath5k_hw_rf_gainf_adjust() - Perform Gain_F adjustment
+ * @ah: The &struct ath5k_hw
+ *
+ * Choose the right target gain based on current gain
+ * and RF gain optimization ladder
+ */
+static s8
+ath5k_hw_rf_gainf_adjust(struct ath5k_hw *ah)
 {
 	const struct ath5k_gain_opt *go;
 	const struct ath5k_gain_opt_step *g_step;
@@ -572,13 +667,18 @@
 	return ret;
 }
 
-/* Main callback for thermal RF gain calibration engine
+/**
+ * ath5k_hw_gainf_calibrate() - Do a gain_F calibration
+ * @ah: The &struct ath5k_hw
+ *
+ * Main callback for thermal RF gain calibration engine
  * Check for a new gain reading and schedule an adjustment
  * if needed.
  *
- * TODO: Use sw interrupt to schedule reset if gain_F needs
- * adjustment */
-enum ath5k_rfgain ath5k_hw_gainf_calibrate(struct ath5k_hw *ah)
+ * Returns one of enum ath5k_rfgain codes
+ */
+enum ath5k_rfgain
+ath5k_hw_gainf_calibrate(struct ath5k_hw *ah)
 {
 	u32 data, type;
 	struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
@@ -638,10 +738,18 @@
 	return ah->ah_gain.g_state;
 }
 
-/* Write initial RF gain table to set the RF sensitivity
- * this one works on all RF chips and has nothing to do
- * with gain_F calibration */
-static int ath5k_hw_rfgain_init(struct ath5k_hw *ah, enum ieee80211_band band)
+/**
+ * ath5k_hw_rfgain_init() - Write initial RF gain settings to hw
+ * @ah: The &struct ath5k_hw
+ * @band: One of enum ieee80211_band
+ *
+ * Write initial RF gain table to set the RF sensitivity.
+ *
+ * NOTE: This one works on all RF chips and has nothing to do
+ * with Gain_F calibration
+ */
+static int
+ath5k_hw_rfgain_init(struct ath5k_hw *ah, enum ieee80211_band band)
 {
 	const struct ath5k_ini_rfgain *ath5k_rfg;
 	unsigned int i, size, index;
@@ -688,16 +796,23 @@
 }
 
 
-
 /********************\
 * RF Registers setup *
 \********************/
 
-/*
- * Setup RF registers by writing RF buffer on hw
+/**
+ * ath5k_hw_rfregs_init() - Initialize RF register settings
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ * @mode: One of enum ath5k_driver_mode
+ *
+ * Setup RF registers by writing RF buffer on hw. For
+ * more infos on this, check out rfbuffer.h
  */
-static int ath5k_hw_rfregs_init(struct ath5k_hw *ah,
-	struct ieee80211_channel *channel, unsigned int mode)
+static int
+ath5k_hw_rfregs_init(struct ath5k_hw *ah,
+			struct ieee80211_channel *channel,
+			unsigned int mode)
 {
 	const struct ath5k_rf_reg *rf_regs;
 	const struct ath5k_ini_rfbuffer *ini_rfb;
@@ -1055,19 +1170,18 @@
   PHY/RF channel functions
 \**************************/
 
-/*
- * Conversion needed for RF5110
+/**
+ * ath5k_hw_rf5110_chan2athchan() - Convert channel freq on RF5110
+ * @channel: The &struct ieee80211_channel
+ *
+ * Map channel frequency to IEEE channel number and convert it
+ * to an internal channel value used by the RF5110 chipset.
  */
-static u32 ath5k_hw_rf5110_chan2athchan(struct ieee80211_channel *channel)
+static u32
+ath5k_hw_rf5110_chan2athchan(struct ieee80211_channel *channel)
 {
 	u32 athchan;
 
-	/*
-	 * Convert IEEE channel/MHz to an internal channel value used
-	 * by the AR5210 chipset. This has not been verified with
-	 * newer chipsets like the AR5212A who have a completely
-	 * different RF/PHY part.
-	 */
 	athchan = (ath5k_hw_bitswap(
 			(ieee80211_frequency_to_channel(
 				channel->center_freq) - 24) / 2, 5)
@@ -1075,10 +1189,13 @@
 	return athchan;
 }
 
-/*
- * Set channel on RF5110
+/**
+ * ath5k_hw_rf5110_channel() - Set channel frequency on RF5110
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
  */
-static int ath5k_hw_rf5110_channel(struct ath5k_hw *ah,
+static int
+ath5k_hw_rf5110_channel(struct ath5k_hw *ah,
 		struct ieee80211_channel *channel)
 {
 	u32 data;
@@ -1089,15 +1206,23 @@
 	data = ath5k_hw_rf5110_chan2athchan(channel);
 	ath5k_hw_reg_write(ah, data, AR5K_RF_BUFFER);
 	ath5k_hw_reg_write(ah, 0, AR5K_RF_BUFFER_CONTROL_0);
-	mdelay(1);
+	usleep_range(1000, 1500);
 
 	return 0;
 }
 
-/*
- * Conversion needed for 5111
+/**
+ * ath5k_hw_rf5111_chan2athchan() - Handle 2GHz channels on RF5111/2111
+ * @ieee: IEEE channel number
+ * @athchan: The &struct ath5k_athchan_2ghz
+ *
+ * In order to enable the RF2111 frequency converter on RF5111/2111 setups
+ * we need to add some offsets and extra flags to the data values we pass
+ * on to the PHY. So for every 2GHz channel this function gets called
+ * to do the conversion.
  */
-static int ath5k_hw_rf5111_chan2athchan(unsigned int ieee,
+static int
+ath5k_hw_rf5111_chan2athchan(unsigned int ieee,
 		struct ath5k_athchan_2ghz *athchan)
 {
 	int channel;
@@ -1123,10 +1248,13 @@
 	return 0;
 }
 
-/*
- * Set channel on 5111
+/**
+ * ath5k_hw_rf5111_channel() - Set channel frequency on RF5111/2111
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
  */
-static int ath5k_hw_rf5111_channel(struct ath5k_hw *ah,
+static int
+ath5k_hw_rf5111_channel(struct ath5k_hw *ah,
 		struct ieee80211_channel *channel)
 {
 	struct ath5k_athchan_2ghz ath5k_channel_2ghz;
@@ -1171,10 +1299,20 @@
 	return 0;
 }
 
-/*
- * Set channel on 5112 and newer
+/**
+ * ath5k_hw_rf5112_channel() - Set channel frequency on 5112 and newer
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * On RF5112/2112 and newer we don't need to do any conversion.
+ * We pass the frequency value after a few modifications to the
+ * chip directly.
+ *
+ * NOTE: Make sure channel frequency given is within our range or else
+ * we might damage the chip ! Use ath5k_channel_ok before calling this one.
  */
-static int ath5k_hw_rf5112_channel(struct ath5k_hw *ah,
+static int
+ath5k_hw_rf5112_channel(struct ath5k_hw *ah,
 		struct ieee80211_channel *channel)
 {
 	u32 data, data0, data1, data2;
@@ -1183,17 +1321,37 @@
 	data = data0 = data1 = data2 = 0;
 	c = channel->center_freq;
 
+	/* My guess based on code:
+	 * 2GHz RF has 2 synth modes, one with a Local Oscillator
+	 * at 2224Hz and one with a LO at 2192Hz. IF is 1520Hz
+	 * (3040/2). data0 is used to set the PLL divider and data1
+	 * selects synth mode. */
 	if (c < 4800) {
+		/* Channel 14 and all frequencies with 2Hz spacing
+		 * below/above (non-standard channels) */
 		if (!((c - 2224) % 5)) {
+			/* Same as (c - 2224) / 5 */
 			data0 = ((2 * (c - 704)) - 3040) / 10;
 			data1 = 1;
+		/* Channel 1 and all frequencies with 5Hz spacing
+		 * below/above (standard channels without channel 14) */
 		} else if (!((c - 2192) % 5)) {
+			/* Same as (c - 2192) / 5 */
 			data0 = ((2 * (c - 672)) - 3040) / 10;
 			data1 = 0;
 		} else
 			return -EINVAL;
 
 		data0 = ath5k_hw_bitswap((data0 << 2) & 0xff, 8);
+	/* This is more complex, we have a single synthesizer with
+	 * 4 reference clock settings (?) based on frequency spacing
+	 * and set using data2. LO is at 4800Hz and data0 is again used
+	 * to set some divider.
+	 *
+	 * NOTE: There is an old atheros presentation at Stanford
+	 * that mentions a method called dual direct conversion
+	 * with 1GHz sliding IF for RF5110. Maybe that's what we
+	 * have here, or an updated version. */
 	} else if ((c % 5) != 2 || c > 5435) {
 		if (!(c % 20) && c >= 5120) {
 			data0 = ath5k_hw_bitswap(((c - 4800) / 20 << 2), 8);
@@ -1219,10 +1377,16 @@
 	return 0;
 }
 
-/*
- * Set the channel on the RF2425
+/**
+ * ath5k_hw_rf2425_channel() - Set channel frequency on RF2425
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * AR2425/2417 have a different 2GHz RF so code changes
+ * a little bit from RF5112.
  */
-static int ath5k_hw_rf2425_channel(struct ath5k_hw *ah,
+static int
+ath5k_hw_rf2425_channel(struct ath5k_hw *ah,
 		struct ieee80211_channel *channel)
 {
 	u32 data, data0, data2;
@@ -1258,10 +1422,16 @@
 	return 0;
 }
 
-/*
- * Set a channel on the radio chip
+/**
+ * ath5k_hw_channel() - Set a channel on the radio chip
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * This is the main function called to set a channel on the
+ * radio chip based on the radio chip version.
  */
-static int ath5k_hw_channel(struct ath5k_hw *ah,
+static int
+ath5k_hw_channel(struct ath5k_hw *ah,
 		struct ieee80211_channel *channel)
 {
 	int ret;
@@ -1313,11 +1483,46 @@
 	return 0;
 }
 
+
 /*****************\
   PHY calibration
 \*****************/
 
-static s32 ath5k_hw_read_measured_noise_floor(struct ath5k_hw *ah)
+/**
+ * DOC: PHY Calibration routines
+ *
+ * Noise floor calibration: When we tell the hardware to
+ * perform a noise floor calibration by setting the
+ * AR5K_PHY_AGCCTL_NF bit on AR5K_PHY_AGCCTL, it will periodically
+ * sample-and-hold the minimum noise level seen at the antennas.
+ * This value is then stored in a ring buffer of recently measured
+ * noise floor values so we have a moving window of the last few
+ * samples. The median of the values in the history is then loaded
+ * into the hardware for its own use for RSSI and CCA measurements.
+ * This type of calibration doesn't interfere with traffic.
+ *
+ * AGC calibration: When we tell the hardware to perform
+ * an AGC (Automatic Gain Control) calibration by setting the
+ * AR5K_PHY_AGCCTL_CAL, hw disconnects the antennas and does
+ * a calibration on the DC offsets of ADCs. During this period
+ * rx/tx gets disabled so we have to deal with it on the driver
+ * part.
+ *
+ * I/Q calibration: When we tell the hardware to perform
+ * an I/Q calibration, it tries to correct I/Q imbalance and
+ * fix QAM constellation by sampling data from rxed frames.
+ * It doesn't interfere with traffic.
+ *
+ * For more infos on AGC and I/Q calibration check out patent doc
+ * #03/094463.
+ */
+
+/**
+ * ath5k_hw_read_measured_noise_floor() - Read measured NF from hw
+ * @ah: The &struct ath5k_hw
+ */
+static s32
+ath5k_hw_read_measured_noise_floor(struct ath5k_hw *ah)
 {
 	s32 val;
 
@@ -1325,7 +1530,12 @@
 	return sign_extend32(AR5K_REG_MS(val, AR5K_PHY_NF_MINCCA_PWR), 8);
 }
 
-void ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah)
+/**
+ * ath5k_hw_init_nfcal_hist() - Initialize NF calibration history buffer
+ * @ah: The &struct ath5k_hw
+ */
+void
+ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah)
 {
 	int i;
 
@@ -1334,6 +1544,11 @@
 		ah->ah_nfcal_hist.nfval[i] = AR5K_TUNE_CCA_MAX_GOOD_VALUE;
 }
 
+/**
+ * ath5k_hw_update_nfcal_hist() - Update NF calibration history buffer
+ * @ah: The &struct ath5k_hw
+ * @noise_floor: The NF we got from hw
+ */
 static void ath5k_hw_update_nfcal_hist(struct ath5k_hw *ah, s16 noise_floor)
 {
 	struct ath5k_nfcal_hist *hist = &ah->ah_nfcal_hist;
@@ -1341,7 +1556,12 @@
 	hist->nfval[hist->index] = noise_floor;
 }
 
-static s16 ath5k_hw_get_median_noise_floor(struct ath5k_hw *ah)
+/**
+ * ath5k_hw_get_median_noise_floor() - Get median NF from history buffer
+ * @ah: The &struct ath5k_hw
+ */
+static s16
+ath5k_hw_get_median_noise_floor(struct ath5k_hw *ah)
 {
 	s16 sort[ATH5K_NF_CAL_HIST_MAX];
 	s16 tmp;
@@ -1364,18 +1584,16 @@
 	return sort[(ATH5K_NF_CAL_HIST_MAX - 1) / 2];
 }
 
-/*
- * When we tell the hardware to perform a noise floor calibration
- * by setting the AR5K_PHY_AGCCTL_NF bit, it will periodically
- * sample-and-hold the minimum noise level seen at the antennas.
- * This value is then stored in a ring buffer of recently measured
- * noise floor values so we have a moving window of the last few
- * samples.
+/**
+ * ath5k_hw_update_noise_floor() - Update NF on hardware
+ * @ah: The &struct ath5k_hw
  *
- * The median of the values in the history is then loaded into the
- * hardware for its own use for RSSI and CCA measurements.
+ * This is the main function we call to perform a NF calibration,
+ * it reads NF from hardware, calculates the median and updates
+ * NF on hw.
  */
-void ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
+void
+ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
 {
 	struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
 	u32 val;
@@ -1390,6 +1608,8 @@
 		return;
 	}
 
+	ah->ah_cal_mask |= AR5K_CALIBRATION_NF;
+
 	ee_mode = ath5k_eeprom_mode_from_channel(ah->ah_current_channel);
 
 	/* completed NF calibration, test threshold */
@@ -1434,20 +1654,29 @@
 
 	ah->ah_noise_floor = nf;
 
+	ah->ah_cal_mask &= ~AR5K_CALIBRATION_NF;
+
 	ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE,
 		"noise floor calibrated: %d\n", nf);
 }
 
-/*
- * Perform a PHY calibration on RF5110
- * -Fix BPSK/QAM Constellation (I/Q correction)
+/**
+ * ath5k_hw_rf5110_calibrate() - Perform a PHY calibration on RF5110
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * Do a complete PHY calibration (AGC + NF + I/Q) on RF5110
  */
-static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
+static int
+ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
 		struct ieee80211_channel *channel)
 {
 	u32 phy_sig, phy_agc, phy_sat, beacon;
 	int ret;
 
+	if (!(ah->ah_cal_mask & AR5K_CALIBRATION_FULL))
+		return 0;
+
 	/*
 	 * Disable beacons and RX/TX queues, wait
 	 */
@@ -1456,7 +1685,7 @@
 	beacon = ath5k_hw_reg_read(ah, AR5K_BEACON_5210);
 	ath5k_hw_reg_write(ah, beacon & ~AR5K_BEACON_ENABLE, AR5K_BEACON_5210);
 
-	mdelay(2);
+	usleep_range(2000, 2500);
 
 	/*
 	 * Set the channel (with AGC turned off)
@@ -1469,7 +1698,7 @@
 	 * Activate PHY and wait
 	 */
 	ath5k_hw_reg_write(ah, AR5K_PHY_ACT_ENABLE, AR5K_PHY_ACT);
-	mdelay(1);
+	usleep_range(1000, 1500);
 
 	AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_AGC, AR5K_PHY_AGC_DISABLE);
 
@@ -1506,7 +1735,7 @@
 	ath5k_hw_reg_write(ah, AR5K_PHY_RFSTG_DISABLE, AR5K_PHY_RFSTG);
 	AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_AGC, AR5K_PHY_AGC_DISABLE);
 
-	mdelay(1);
+	usleep_range(1000, 1500);
 
 	/*
 	 * Enable calibration and wait until completion
@@ -1537,8 +1766,9 @@
 	return 0;
 }
 
-/*
- * Perform I/Q calibration on RF5111/5112 and newer chips
+/**
+ * ath5k_hw_rf511x_iq_calibrate() - Perform I/Q calibration on RF5111 and newer
+ * @ah: The &struct ath5k_hw
  */
 static int
 ath5k_hw_rf511x_iq_calibrate(struct ath5k_hw *ah)
@@ -1547,12 +1777,19 @@
 	s32 iq_corr, i_coff, i_coffd, q_coff, q_coffd;
 	int i;
 
-	if (!ah->ah_calibration ||
-		ath5k_hw_reg_read(ah, AR5K_PHY_IQ) & AR5K_PHY_IQ_RUN)
-		return 0;
+	/* Skip if I/Q calibration is not needed or if it's still running */
+	if (!ah->ah_iq_cal_needed)
+		return -EINVAL;
+	else if (ath5k_hw_reg_read(ah, AR5K_PHY_IQ) & AR5K_PHY_IQ_RUN) {
+		ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_CALIBRATE,
+				"I/Q calibration still running");
+		return -EBUSY;
+	}
 
 	/* Calibration has finished, get the results and re-run */
-	/* work around empty results which can apparently happen on 5212 */
+
+	/* Work around for empty results which can apparently happen on 5212:
+	 * Read registers up to 10 times until we get both i_pr and q_pwr */
 	for (i = 0; i <= 10; i++) {
 		iq_corr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_CORR);
 		i_pwr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_PWR_I);
@@ -1570,9 +1807,13 @@
 	else
 		q_coffd = q_pwr >> 7;
 
-	/* protect against divide by 0 and loss of sign bits */
+	/* In case i_coffd became zero, cancel calibration
+	 * not only it's too small, it'll also result a divide
+	 * by zero later on. */
 	if (i_coffd == 0 || q_coffd < 2)
-		return 0;
+		return -ECANCELED;
+
+	/* Protect against loss of sign bits */
 
 	i_coff = (-iq_corr) / i_coffd;
 	i_coff = clamp(i_coff, -32, 31); /* signed 6 bit */
@@ -1601,10 +1842,17 @@
 	return 0;
 }
 
-/*
- * Perform a PHY calibration
+/**
+ * ath5k_hw_phy_calibrate() - Perform a PHY calibration
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * The main function we call from above to perform
+ * a short or full PHY calibration based on RF chip
+ * and current channel
  */
-int ath5k_hw_phy_calibrate(struct ath5k_hw *ah,
+int
+ath5k_hw_phy_calibrate(struct ath5k_hw *ah,
 		struct ieee80211_channel *channel)
 {
 	int ret;
@@ -1613,10 +1861,43 @@
 		return ath5k_hw_rf5110_calibrate(ah, channel);
 
 	ret = ath5k_hw_rf511x_iq_calibrate(ah);
+	if (ret) {
+		ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_CALIBRATE,
+			"No I/Q correction performed (%uMHz)\n",
+			channel->center_freq);
 
-	if ((ah->ah_radio == AR5K_RF5111 || ah->ah_radio == AR5K_RF5112) &&
-	    (channel->hw_value != AR5K_MODE_11B))
-		ath5k_hw_request_rfgain_probe(ah);
+		/* Happens all the time if there is not much
+		 * traffic, consider it normal behaviour. */
+		ret = 0;
+	}
+
+	/* On full calibration do an AGC calibration and
+	 * request a PAPD probe for gainf calibration if
+	 * needed */
+	if (ah->ah_cal_mask & AR5K_CALIBRATION_FULL) {
+
+		AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL,
+					AR5K_PHY_AGCCTL_CAL);
+
+		ret = ath5k_hw_register_timeout(ah, AR5K_PHY_AGCCTL,
+			AR5K_PHY_AGCCTL_CAL | AR5K_PHY_AGCCTL_NF,
+			0, false);
+		if (ret) {
+			ATH5K_ERR(ah,
+				"gain calibration timeout (%uMHz)\n",
+				channel->center_freq);
+		}
+
+		if ((ah->ah_radio == AR5K_RF5111 ||
+			ah->ah_radio == AR5K_RF5112)
+			&& (channel->hw_value != AR5K_MODE_11B))
+			ath5k_hw_request_rfgain_probe(ah);
+	}
+
+	/* Update noise floor
+	 * XXX: Only do this after AGC calibration */
+	if (!(ah->ah_cal_mask & AR5K_CALIBRATION_NF))
+		ath5k_hw_update_noise_floor(ah);
 
 	return ret;
 }
@@ -1626,6 +1907,16 @@
 * Spur mitigation functions *
 \***************************/
 
+/**
+ * ath5k_hw_set_spur_mitigation_filter() - Configure SPUR filter
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * This function gets called during PHY initialization to
+ * configure the spur filter for the given channel. Spur is noise
+ * generated due to "reflection" effects, for more information on this
+ * method check out patent US7643810
+ */
 static void
 ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah,
 				struct ieee80211_channel *channel)
@@ -1865,15 +2156,73 @@
 * Antenna control *
 \*****************/
 
-static void /*TODO:Boundary check*/
+/**
+ * DOC: Antenna control
+ *
+ * Hw supports up to 14 antennas ! I haven't found any card that implements
+ * that. The maximum number of antennas I've seen is up to 4 (2 for 2GHz and 2
+ * for 5GHz). Antenna 1 (MAIN) should be omnidirectional, 2 (AUX)
+ * omnidirectional or sectorial and antennas 3-14 sectorial (or directional).
+ *
+ * We can have a single antenna for RX and multiple antennas for TX.
+ * RX antenna is our "default" antenna (usually antenna 1) set on
+ * DEFAULT_ANTENNA register and TX antenna is set on each TX control descriptor
+ * (0 for automatic selection, 1 - 14 antenna number).
+ *
+ * We can let hw do all the work doing fast antenna diversity for both
+ * tx and rx or we can do things manually. Here are the options we have
+ * (all are bits of STA_ID1 register):
+ *
+ * AR5K_STA_ID1_DEFAULT_ANTENNA -> When 0 is set as the TX antenna on TX
+ * control descriptor, use the default antenna to transmit or else use the last
+ * antenna on which we received an ACK.
+ *
+ * AR5K_STA_ID1_DESC_ANTENNA -> Update default antenna after each TX frame to
+ * the antenna on which we got the ACK for that frame.
+ *
+ * AR5K_STA_ID1_RTS_DEF_ANTENNA -> Use default antenna for RTS or else use the
+ * one on the TX descriptor.
+ *
+ * AR5K_STA_ID1_SELFGEN_DEF_ANT -> Use default antenna for self generated frames
+ * (ACKs etc), or else use current antenna (the one we just used for TX).
+ *
+ * Using the above we support the following scenarios:
+ *
+ * AR5K_ANTMODE_DEFAULT -> Hw handles antenna diversity etc automatically
+ *
+ * AR5K_ANTMODE_FIXED_A	-> Only antenna A (MAIN) is present
+ *
+ * AR5K_ANTMODE_FIXED_B	-> Only antenna B (AUX) is present
+ *
+ * AR5K_ANTMODE_SINGLE_AP -> Sta locked on a single ap
+ *
+ * AR5K_ANTMODE_SECTOR_AP -> AP with tx antenna set on tx desc
+ *
+ * AR5K_ANTMODE_SECTOR_STA -> STA with tx antenna set on tx desc
+ *
+ * AR5K_ANTMODE_DEBUG Debug mode -A -> Rx, B-> Tx-
+ *
+ * Also note that when setting antenna to F on tx descriptor card inverts
+ * current tx antenna.
+ */
+
+/**
+ * ath5k_hw_set_def_antenna() - Set default rx antenna on AR5211/5212 and newer
+ * @ah: The &struct ath5k_hw
+ * @ant: Antenna number
+ */
+static void
 ath5k_hw_set_def_antenna(struct ath5k_hw *ah, u8 ant)
 {
 	if (ah->ah_version != AR5K_AR5210)
 		ath5k_hw_reg_write(ah, ant & 0x7, AR5K_DEFAULT_ANTENNA);
 }
 
-/*
- * Enable/disable fast rx antenna diversity
+/**
+ * ath5k_hw_set_fast_div() -  Enable/disable fast rx antenna diversity
+ * @ah: The &struct ath5k_hw
+ * @ee_mode: One of enum ath5k_driver_mode
+ * @enable: True to enable, false to disable
  */
 static void
 ath5k_hw_set_fast_div(struct ath5k_hw *ah, u8 ee_mode, bool enable)
@@ -1913,6 +2262,14 @@
 	}
 }
 
+/**
+ * ath5k_hw_set_antenna_switch() - Set up antenna switch table
+ * @ah: The &struct ath5k_hw
+ * @ee_mode: One of enum ath5k_driver_mode
+ *
+ * Switch table comes from EEPROM and includes information on controlling
+ * the 2 antenna RX attenuators
+ */
 void
 ath5k_hw_set_antenna_switch(struct ath5k_hw *ah, u8 ee_mode)
 {
@@ -1944,8 +2301,10 @@
 		AR5K_PHY_ANT_SWITCH_TABLE_1);
 }
 
-/*
- * Set antenna operating mode
+/**
+ * ath5k_hw_set_antenna_mode() -  Set antenna operating mode
+ * @ah: The &struct ath5k_hw
+ * @ant_mode: One of enum ath5k_ant_mode
  */
 void
 ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode)
@@ -2068,8 +2427,13 @@
  * Helper functions
  */
 
-/*
- * Do linear interpolation between two given (x, y) points
+/**
+ * ath5k_get_interpolated_value() - Get interpolated Y val between two points
+ * @target: X value of the middle point
+ * @x_left: X value of the left point
+ * @x_right: X value of the right point
+ * @y_left: Y value of the left point
+ * @y_right: Y value of the right point
  */
 static s16
 ath5k_get_interpolated_value(s16 target, s16 x_left, s16 x_right,
@@ -2096,13 +2460,18 @@
 	return result;
 }
 
-/*
- * Find vertical boundary (min pwr) for the linear PCDAC curve.
+/**
+ * ath5k_get_linear_pcdac_min() - Find vertical boundary (min pwr) for the
+ * linear PCDAC curve
+ * @stepL: Left array with y values (pcdac steps)
+ * @stepR: Right array with y values (pcdac steps)
+ * @pwrL: Left array with x values (power steps)
+ * @pwrR: Right array with x values (power steps)
  *
  * Since we have the top of the curve and we draw the line below
  * until we reach 1 (1 pcdac step) we need to know which point
- * (x value) that is so that we don't go below y axis and have negative
- * pcdac values when creating the curve, or fill the table with zeroes.
+ * (x value) that is so that we don't go below x axis and have negative
+ * pcdac values when creating the curve, or fill the table with zeros.
  */
 static s16
 ath5k_get_linear_pcdac_min(const u8 *stepL, const u8 *stepR,
@@ -2148,7 +2517,16 @@
 	return max(min_pwrL, min_pwrR);
 }
 
-/*
+/**
+ * ath5k_create_power_curve() - Create a Power to PDADC or PCDAC curve
+ * @pmin: Minimum power value (xmin)
+ * @pmax: Maximum power value (xmax)
+ * @pwr: Array of power steps (x values)
+ * @vpd: Array of matching PCDAC/PDADC steps (y values)
+ * @num_points: Number of provided points
+ * @vpd_table: Array to fill with the full PCDAC/PDADC values (y values)
+ * @type: One of enum ath5k_powertable_type (eeprom.h)
+ *
  * Interpolate (pwr,vpd) points to create a Power to PDADC or a
  * Power to PCDAC curve.
  *
@@ -2206,7 +2584,14 @@
 	}
 }
 
-/*
+/**
+ * ath5k_get_chan_pcal_surrounding_piers() - Get surrounding calibration piers
+ * for a given channel.
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ * @pcinfo_l: The &struct ath5k_chan_pcal_info to put the left cal. pier
+ * @pcinfo_r: The &struct ath5k_chan_pcal_info to put the right cal. pier
+ *
  * Get the surrounding per-channel power calibration piers
  * for a given frequency so that we can interpolate between
  * them and come up with an appropriate dataset for our current
@@ -2289,11 +2674,17 @@
 	*pcinfo_r = &pcinfo[idx_r];
 }
 
-/*
+/**
+ * ath5k_get_rate_pcal_data() - Get the interpolated per-rate power
+ * calibration data
+ * @ah: The &struct ath5k_hw *ah,
+ * @channel: The &struct ieee80211_channel
+ * @rates: The &struct ath5k_rate_pcal_info to fill
+ *
  * Get the surrounding per-rate power calibration data
  * for a given frequency and interpolate between power
  * values to set max target power supported by hw for
- * each rate.
+ * each rate on this frequency.
  */
 static void
 ath5k_get_rate_pcal_data(struct ath5k_hw *ah,
@@ -2381,7 +2772,11 @@
 					rpinfo[idx_r].target_power_54);
 }
 
-/*
+/**
+ * ath5k_get_max_ctl_power() - Get max edge power for a given frequency
+ * @ah: the &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
  * Get the max edge power for this channel if
  * we have such data from EEPROM's Conformance Test
  * Limits (CTL), and limit max power if needed.
@@ -2461,8 +2856,39 @@
  * Power to PCDAC table functions
  */
 
-/*
- * Fill Power to PCDAC table on RF5111
+/**
+ * DOC: Power to PCDAC table functions
+ *
+ * For RF5111 we have an XPD -eXternal Power Detector- curve
+ * for each calibrated channel. Each curve has 0,5dB Power steps
+ * on x axis and PCDAC steps (offsets) on y axis and looks like an
+ * exponential function. To recreate the curve we read 11 points
+ * from eeprom (eeprom.c) and interpolate here.
+ *
+ * For RF5112 we have 4 XPD -eXternal Power Detector- curves
+ * for each calibrated channel on 0, -6, -12 and -18dBm but we only
+ * use the higher (3) and the lower (0) curves. Each curve again has 0.5dB
+ * power steps on x axis and PCDAC steps on y axis and looks like a
+ * linear function. To recreate the curve and pass the power values
+ * on hw, we get 4 points for xpd 0 (lower gain -> max power)
+ * and 3 points for xpd 3 (higher gain -> lower power) from eeprom (eeprom.c)
+ * and interpolate here.
+ *
+ * For a given channel we get the calibrated points (piers) for it or
+ * -if we don't have calibration data for this specific channel- from the
+ * available surrounding channels we have calibration data for, after we do a
+ * linear interpolation between them. Then since we have our calibrated points
+ * for this channel, we do again a linear interpolation between them to get the
+ * whole curve.
+ *
+ * We finally write the Y values of the curve(s) (the PCDAC values) on hw
+ */
+
+/**
+ * ath5k_fill_pwr_to_pcdac_table() - Fill Power to PCDAC table on RF5111
+ * @ah: The &struct ath5k_hw
+ * @table_min: Minimum power (x min)
+ * @table_max: Maximum power (x max)
  *
  * No further processing is needed for RF5111, the only thing we have to
  * do is fill the values below and above calibration range since eeprom data
@@ -2503,10 +2929,14 @@
 
 }
 
-/*
- * Combine available XPD Curves and fill Linear Power to PCDAC table
- * on RF5112
+/**
+ * ath5k_combine_linear_pcdac_curves() - Combine available PCDAC Curves
+ * @ah: The &struct ath5k_hw
+ * @table_min: Minimum power (x min)
+ * @table_max: Maximum power (x max)
+ * @pdcurves: Number of pd curves
  *
+ * Combine available XPD Curves and fill Linear Power to PCDAC table on RF5112
  * RFX112 can have up to 2 curves (one for low txpower range and one for
  * higher txpower range). We need to put them both on pcdac_out and place
  * them in the correct location. In case we only have one curve available
@@ -2608,7 +3038,10 @@
 	}
 }
 
-/* Write PCDAC values on hw */
+/**
+ * ath5k_write_pcdac_table() - Write the PCDAC values on hw
+ * @ah: The &struct ath5k_hw
+ */
 static void
 ath5k_write_pcdac_table(struct ath5k_hw *ah)
 {
@@ -2631,9 +3064,32 @@
  * Power to PDADC table functions
  */
 
-/*
- * Set the gain boundaries and create final Power to PDADC table
+/**
+ * DOC: Power to PDADC table functions
  *
+ * For RF2413 and later we have a Power to PDADC table (Power Detector)
+ * instead of a PCDAC (Power Control) and 4 pd gain curves for each
+ * calibrated channel. Each curve has power on x axis in 0.5 db steps and
+ * PDADC steps on y axis and looks like an exponential function like the
+ * RF5111 curve.
+ *
+ * To recreate the curves we read the points from eeprom (eeprom.c)
+ * and interpolate here. Note that in most cases only 2 (higher and lower)
+ * curves are used (like RF5112) but vendors have the opportunity to include
+ * all 4 curves on eeprom. The final curve (higher power) has an extra
+ * point for better accuracy like RF5112.
+ *
+ * The process is similar to what we do above for RF5111/5112
+ */
+
+/**
+ * ath5k_combine_pwr_to_pdadc_curves() - Combine the various PDADC curves
+ * @ah: The &struct ath5k_hw
+ * @pwr_min: Minimum power (x min)
+ * @pwr_max: Maximum power (x max)
+ * @pdcurves: Number of available curves
+ *
+ * Combine the various pd curves and create the final Power to PDADC table
  * We can have up to 4 pd curves, we need to do a similar process
  * as we do for RF5112. This time we don't have an edge_flag but we
  * set the gain boundaries on a separate register.
@@ -2757,7 +3213,11 @@
 
 }
 
-/* Write PDADC values on hw */
+/**
+ * ath5k_write_pwr_to_pdadc_table() - Write the PDADC values on hw
+ * @ah: The &struct ath5k_hw
+ * @ee_mode: One of enum ath5k_driver_mode
+ */
 static void
 ath5k_write_pwr_to_pdadc_table(struct ath5k_hw *ah, u8 ee_mode)
 {
@@ -2814,7 +3274,13 @@
  * Common code for PCDAC/PDADC tables
  */
 
-/*
+/**
+ * ath5k_setup_channel_powertable() - Set up power table for this channel
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ * @ee_mode: One of enum ath5k_driver_mode
+ * @type: One of enum ath5k_powertable_type (eeprom.h)
+ *
  * This is the main function that uses all of the above
  * to set PCDAC/PDADC table on hw for the current channel.
  * This table is used for tx power calibration on the baseband,
@@ -3012,7 +3478,12 @@
 	return 0;
 }
 
-/* Write power table for current channel to hw */
+/**
+ * ath5k_write_channel_powertable() - Set power table for current channel on hw
+ * @ah: The &struct ath5k_hw
+ * @ee_mode: One of enum ath5k_driver_mode
+ * @type: One of enum ath5k_powertable_type (eeprom.h)
+ */
 static void
 ath5k_write_channel_powertable(struct ath5k_hw *ah, u8 ee_mode, u8 type)
 {
@@ -3022,28 +3493,36 @@
 		ath5k_write_pcdac_table(ah);
 }
 
-/*
- * Per-rate tx power setting
- *
- * This is the code that sets the desired tx power (below
- * maximum) on hw for each rate (we also have TPC that sets
- * power per packet). We do that by providing an index on the
- * PCDAC/PDADC table we set up.
- */
 
-/*
- * Set rate power table
+/**
+ * DOC: Per-rate tx power setting
+ *
+ * This is the code that sets the desired tx power limit (below
+ * maximum) on hw for each rate (we also have TPC that sets
+ * power per packet type). We do that by providing an index on the
+ * PCDAC/PDADC table we set up above, for each rate.
  *
  * For now we only limit txpower based on maximum tx power
- * supported by hw (what's inside rate_info). We need to limit
- * this even more, based on regulatory domain etc.
+ * supported by hw (what's inside rate_info) + conformance test
+ * limits. We need to limit this even more, based on regulatory domain
+ * etc to be safe. Normally this is done from above so we don't care
+ * here, all we care is that the tx power we set will be O.K.
+ * for the hw (e.g. won't create noise on PA etc).
  *
- * Rate power table contains indices to PCDAC/PDADC table (0.5dB steps)
- * and is indexed as follows:
+ * Rate power table contains indices to PCDAC/PDADC table (0.5dB steps -
+ * x values) and is indexed as follows:
  * rates[0] - rates[7] -> OFDM rates
  * rates[8] - rates[14] -> CCK rates
  * rates[15] -> XR rates (they all have the same power)
  */
+
+/**
+ * ath5k_setup_rate_powertable() - Set up rate power table for a given tx power
+ * @ah: The &struct ath5k_hw
+ * @max_pwr: The maximum tx power requested in 0.5dB steps
+ * @rate_info: The &struct ath5k_rate_pcal_info to fill
+ * @ee_mode: One of enum ath5k_driver_mode
+ */
 static void
 ath5k_setup_rate_powertable(struct ath5k_hw *ah, u16 max_pwr,
 			struct ath5k_rate_pcal_info *rate_info,
@@ -3114,8 +3593,14 @@
 }
 
 
-/*
- * Set transmission power
+/**
+ * ath5k_hw_txpower() - Set transmission power limit for a given channel
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ * @txpower: Requested tx power in 0.5dB steps
+ *
+ * Combines all of the above to set the requested tx power limit
+ * on hw.
  */
 static int
 ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
@@ -3233,7 +3718,16 @@
 	return 0;
 }
 
-int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower)
+/**
+ * ath5k_hw_set_txpower_limit() - Set txpower limit for the current channel
+ * @ah: The &struct ath5k_hw
+ * @txpower: The requested tx power limit in 0.5dB steps
+ *
+ * This function provides access to ath5k_hw_txpower to the driver in
+ * case user or an application changes it while PHY is running.
+ */
+int
+ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower)
 {
 	ATH5K_DBG(ah, ATH5K_DEBUG_TXPOWER,
 		"changing txpower to %d\n", txpower);
@@ -3241,11 +3735,26 @@
 	return ath5k_hw_txpower(ah, ah->ah_current_channel, txpower);
 }
 
+
 /*************\
  Init function
 \*************/
 
-int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
+/**
+ * ath5k_hw_phy_init() - Initialize PHY
+ * @ah: The &struct ath5k_hw
+ * @channel: The @struct ieee80211_channel
+ * @mode: One of enum ath5k_driver_mode
+ * @fast: Try a fast channel switch instead
+ *
+ * This is the main function used during reset to initialize PHY
+ * or do a fast channel change if possible.
+ *
+ * NOTE: Do not call this one from the driver, it assumes PHY is in a
+ * warm reset state !
+ */
+int
+ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
 		      u8 mode, bool fast)
 {
 	struct ieee80211_channel *curr_channel;
@@ -3355,7 +3864,7 @@
 		if (ret)
 			return ret;
 
-		mdelay(1);
+		usleep_range(1000, 1500);
 
 		/*
 		 * Write RF buffer
@@ -3376,10 +3885,10 @@
 		}
 
 	} else if (ah->ah_version == AR5K_AR5210) {
-		mdelay(1);
+		usleep_range(1000, 1500);
 		/* Disable phy and wait */
 		ath5k_hw_reg_write(ah, AR5K_PHY_ACT_DISABLE, AR5K_PHY_ACT);
-		mdelay(1);
+		usleep_range(1000, 1500);
 	}
 
 	/* Set channel on PHY */
@@ -3405,7 +3914,7 @@
 	for (i = 0; i <= 20; i++) {
 		if (!(ath5k_hw_reg_read(ah, AR5K_PHY_ADC_TEST) & 0x10))
 			break;
-		udelay(200);
+		usleep_range(200, 250);
 	}
 	ath5k_hw_reg_write(ah, phy_tst1, AR5K_PHY_TST1);
 
@@ -3433,9 +3942,9 @@
 
 	/* At the same time start I/Q calibration for QAM constellation
 	 * -no need for CCK- */
-	ah->ah_calibration = false;
+	ah->ah_iq_cal_needed = false;
 	if (!(mode == AR5K_MODE_11B)) {
-		ah->ah_calibration = true;
+		ah->ah_iq_cal_needed = true;
 		AR5K_REG_WRITE_BITS(ah, AR5K_PHY_IQ,
 				AR5K_PHY_IQ_CAL_NUM_LOG_MAX, 15);
 		AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ,
diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c
index 7766542..30b50f9 100644
--- a/drivers/net/wireless/ath/ath5k/qcu.c
+++ b/drivers/net/wireless/ath/ath5k/qcu.c
@@ -17,23 +17,48 @@
  */
 
 /********************************************\
-Queue Control Unit, DFS Control Unit Functions
+Queue Control Unit, DCF Control Unit Functions
 \********************************************/
 
 #include "ath5k.h"
 #include "reg.h"
 #include "debug.h"
+#include <linux/log2.h>
+
+/**
+ * DOC: Queue Control Unit (QCU)/DCF Control Unit (DCU) functions
+ *
+ * Here we setup parameters for the 12 available TX queues. Note that
+ * on the various registers we can usually only map the first 10 of them so
+ * basically we have 10 queues to play with. Each queue has a matching
+ * QCU that controls when the queue will get triggered and multiple QCUs
+ * can be mapped to a single DCU that controls the various DFS parameters
+ * for the various queues. In our setup we have a 1:1 mapping between QCUs
+ * and DCUs allowing us to have different DFS settings for each queue.
+ *
+ * When a frame goes into a TX queue, QCU decides when it'll trigger a
+ * transmission based on various criteria (such as how many data we have inside
+ * it's buffer or -if it's a beacon queue- if it's time to fire up the queue
+ * based on TSF etc), DCU adds backoff, IFSes etc and then a scheduler
+ * (arbitrator) decides the priority of each QCU based on it's configuration
+ * (e.g. beacons are always transmitted when they leave DCU bypassing all other
+ * frames from other queues waiting to be transmitted). After a frame leaves
+ * the DCU it goes to PCU for further processing and then to PHY for
+ * the actual transmission.
+ */
 
 
 /******************\
 * Helper functions *
 \******************/
 
-/*
- * Get number of pending frames
- * for a specific queue [5211+]
+/**
+ * ath5k_hw_num_tx_pending() - Get number of pending frames for a  given queue
+ * @ah: The &struct ath5k_hw
+ * @queue: One of enum ath5k_tx_queue_id
  */
-u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue)
+u32
+ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue)
 {
 	u32 pending;
 	AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
@@ -58,10 +83,13 @@
 	return pending;
 }
 
-/*
- * Set a transmit queue inactive
+/**
+ * ath5k_hw_release_tx_queue() - Set a transmit queue inactive
+ * @ah: The &struct ath5k_hw
+ * @queue: One of enum ath5k_tx_queue_id
  */
-void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue)
+void
+ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue)
 {
 	if (WARN_ON(queue >= ah->ah_capabilities.cap_queues.q_tx_num))
 		return;
@@ -72,34 +100,56 @@
 	AR5K_Q_DISABLE_BITS(ah->ah_txq_status, queue);
 }
 
-/*
+/**
+ * ath5k_cw_validate() - Make sure the given cw is valid
+ * @cw_req: The contention window value to check
+ *
  * Make sure cw is a power of 2 minus 1 and smaller than 1024
  */
-static u16 ath5k_cw_validate(u16 cw_req)
+static u16
+ath5k_cw_validate(u16 cw_req)
 {
-	u32 cw = 1;
 	cw_req = min(cw_req, (u16)1023);
 
-	while (cw < cw_req)
-		cw = (cw << 1) | 1;
+	/* Check if cw_req + 1 a power of 2 */
+	if (is_power_of_2(cw_req + 1))
+		return cw_req;
 
-	return cw;
+	/* Check if cw_req is a power of 2 */
+	if (is_power_of_2(cw_req))
+		return cw_req - 1;
+
+	/* If none of the above is correct
+	 * find the closest power of 2 */
+	cw_req = (u16) roundup_pow_of_two(cw_req) - 1;
+
+	return cw_req;
 }
 
-/*
- * Get properties for a transmit queue
+/**
+ * ath5k_hw_get_tx_queueprops() - Get properties for a transmit queue
+ * @ah: The &struct ath5k_hw
+ * @queue: One of enum ath5k_tx_queue_id
+ * @queue_info: The &struct ath5k_txq_info to fill
  */
-int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
+int
+ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
 		struct ath5k_txq_info *queue_info)
 {
 	memcpy(queue_info, &ah->ah_txq[queue], sizeof(struct ath5k_txq_info));
 	return 0;
 }
 
-/*
- * Set properties for a transmit queue
+/**
+ * ath5k_hw_set_tx_queueprops() - Set properties for a transmit queue
+ * @ah: The &struct ath5k_hw
+ * @queue: One of enum ath5k_tx_queue_id
+ * @qinfo: The &struct ath5k_txq_info to use
+ *
+ * Returns 0 on success or -EIO if queue is inactive
  */
-int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
+int
+ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
 				const struct ath5k_txq_info *qinfo)
 {
 	struct ath5k_txq_info *qi;
@@ -139,10 +189,16 @@
 	return 0;
 }
 
-/*
- * Initialize a transmit queue
+/**
+ * ath5k_hw_setup_tx_queue() - Initialize a transmit queue
+ * @ah: The &struct ath5k_hw
+ * @queue_type: One of enum ath5k_tx_queue
+ * @queue_info: The &struct ath5k_txq_info to use
+ *
+ * Returns 0 on success, -EINVAL on invalid arguments
  */
-int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
+int
+ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
 		struct ath5k_txq_info *queue_info)
 {
 	unsigned int queue;
@@ -217,10 +273,16 @@
 * Single QCU/DCU initialization *
 \*******************************/
 
-/*
- * Set tx retry limits on DCU
+/**
+ * ath5k_hw_set_tx_retry_limits() - Set tx retry limits on DCU
+ * @ah: The &struct ath5k_hw
+ * @queue: One of enum ath5k_tx_queue_id
+ *
+ * This function is used when initializing a queue, to set
+ * retry limits based on ah->ah_retry_* and the chipset used.
  */
-void ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah,
+void
+ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah,
 				  unsigned int queue)
 {
 	/* Single data queue on AR5210 */
@@ -255,15 +317,15 @@
 }
 
 /**
- * ath5k_hw_reset_tx_queue - Initialize a single hw queue
+ * ath5k_hw_reset_tx_queue() - Initialize a single hw queue
+ * @ah: The &struct ath5k_hw
+ * @queue: One of enum ath5k_tx_queue_id
  *
- * @ah The &struct ath5k_hw
- * @queue The hw queue number
- *
- * Set DFS properties for the given transmit queue on DCU
+ * Set DCF properties for the given transmit queue on DCU
  * and configures all queue-specific parameters.
  */
-int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
+int
+ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
 {
 	struct ath5k_txq_info *tq = &ah->ah_txq[queue];
 
@@ -491,10 +553,9 @@
 \**************************/
 
 /**
- * ath5k_hw_set_ifs_intervals  - Set global inter-frame spaces on DCU
- *
- * @ah The &struct ath5k_hw
- * @slot_time Slot time in us
+ * ath5k_hw_set_ifs_intervals()  - Set global inter-frame spaces on DCU
+ * @ah: The &struct ath5k_hw
+ * @slot_time: Slot time in us
  *
  * Sets the global IFS intervals on DCU (also works on AR5210) for
  * the given slot time and the current bwmode.
@@ -597,7 +658,15 @@
 }
 
 
-int ath5k_hw_init_queues(struct ath5k_hw *ah)
+/**
+ * ath5k_hw_init_queues() - Initialize tx queues
+ * @ah: The &struct ath5k_hw
+ *
+ * Initializes all tx queues based on information on
+ * ah->ah_txq* set by the driver
+ */
+int
+ath5k_hw_init_queues(struct ath5k_hw *ah)
 {
 	int i, ret;
 
diff --git a/drivers/net/wireless/ath/ath5k/reg.h b/drivers/net/wireless/ath/ath5k/reg.h
index f5c1000..0ea1608 100644
--- a/drivers/net/wireless/ath/ath5k/reg.h
+++ b/drivers/net/wireless/ath/ath5k/reg.h
@@ -280,6 +280,10 @@
  * 5211/5212 we have one primary and 4 secondary registers.
  * So we have AR5K_ISR for 5210 and AR5K_PISR /SISRx for 5211/5212.
  * Most of these bits are common for all chipsets.
+ *
+ * NOTE: On 5211+ TXOK, TXDESC, TXERR, TXEOL and TXURN contain
+ * the logical OR from per-queue interrupt bits found on SISR registers
+ * (see below).
  */
 #define AR5K_ISR		0x001c			/* Register Address [5210] */
 #define AR5K_PISR		0x0080			/* Register Address [5211+] */
@@ -292,7 +296,10 @@
 #define AR5K_ISR_TXOK		0x00000040	/* Frame successfully transmitted */
 #define AR5K_ISR_TXDESC		0x00000080	/* TX descriptor request */
 #define AR5K_ISR_TXERR		0x00000100	/* Transmit error */
-#define AR5K_ISR_TXNOFRM	0x00000200	/* No frame transmitted (transmit timeout) */
+#define AR5K_ISR_TXNOFRM	0x00000200	/* No frame transmitted (transmit timeout)
+						 * NOTE: We don't have per-queue info for this
+						 * one, but we can enable it per-queue through
+						 * TXNOFRM_QCU field on TXNOFRM register */
 #define AR5K_ISR_TXEOL		0x00000400	/* Empty TX descriptor */
 #define AR5K_ISR_TXURN		0x00000800	/* Transmit FIFO underrun */
 #define AR5K_ISR_MIB		0x00001000	/* Update MIB counters */
@@ -302,21 +309,29 @@
 #define AR5K_ISR_SWBA		0x00010000	/* Software beacon alert */
 #define AR5K_ISR_BRSSI		0x00020000	/* Beacon rssi below threshold (?) */
 #define AR5K_ISR_BMISS		0x00040000	/* Beacon missed */
-#define AR5K_ISR_HIUERR		0x00080000	/* Host Interface Unit error [5211+] */
+#define AR5K_ISR_HIUERR		0x00080000	/* Host Interface Unit error [5211+]
+						 * 'or' of MCABT, SSERR, DPERR from SISR2 */
 #define AR5K_ISR_BNR		0x00100000	/* Beacon not ready [5211+] */
 #define AR5K_ISR_MCABT		0x00100000	/* Master Cycle Abort [5210] */
 #define AR5K_ISR_RXCHIRP	0x00200000	/* CHIRP Received [5212+] */
 #define AR5K_ISR_SSERR		0x00200000	/* Signaled System Error [5210] */
-#define AR5K_ISR_DPERR		0x00400000	/* Det par Error (?) [5210] */
+#define AR5K_ISR_DPERR		0x00400000	/* Bus parity error [5210] */
 #define AR5K_ISR_RXDOPPLER	0x00400000	/* Doppler chirp received [5212+] */
 #define AR5K_ISR_TIM		0x00800000	/* [5211+] */
-#define AR5K_ISR_BCNMISC	0x00800000	/* 'or' of TIM, CAB_END, DTIM_SYNC, BCN_TIMEOUT,
-						CAB_TIMEOUT and DTIM bits from SISR2 [5212+] */
+#define AR5K_ISR_BCNMISC	0x00800000	/* Misc beacon related interrupt
+						 * 'or' of TIM, CAB_END, DTIM_SYNC, BCN_TIMEOUT,
+						 * CAB_TIMEOUT and DTIM bits from SISR2 [5212+] */
 #define AR5K_ISR_GPIO		0x01000000	/* GPIO (rf kill) */
 #define AR5K_ISR_QCBRORN	0x02000000	/* QCU CBR overrun [5211+] */
 #define AR5K_ISR_QCBRURN	0x04000000	/* QCU CBR underrun [5211+] */
 #define AR5K_ISR_QTRIG		0x08000000	/* QCU scheduling trigger [5211+] */
 
+#define	AR5K_ISR_BITS_FROM_SISRS	(AR5K_ISR_TXOK | AR5K_ISR_TXDESC |\
+					AR5K_ISR_TXERR | AR5K_ISR_TXEOL |\
+					AR5K_ISR_TXURN | AR5K_ISR_HIUERR |\
+					AR5K_ISR_BCNMISC | AR5K_ISR_QCBRORN |\
+					AR5K_ISR_QCBRURN | AR5K_ISR_QTRIG)
+
 /*
  * Secondary status registers [5211+] (0 - 4)
  *
@@ -347,7 +362,7 @@
 #define	AR5K_SISR2_BCN_TIMEOUT	0x08000000	/* Beacon Timeout [5212+] */
 #define	AR5K_SISR2_CAB_TIMEOUT	0x10000000	/* CAB Timeout [5212+] */
 #define	AR5K_SISR2_DTIM		0x20000000	/* [5212+] */
-#define	AR5K_SISR2_TSFOOR	0x80000000	/* TSF OOR (?) */
+#define	AR5K_SISR2_TSFOOR	0x80000000	/* TSF Out of range */
 
 #define AR5K_SISR3		0x0090			/* Register Address [5211+] */
 #define AR5K_SISR3_QCBRORN	0x000003ff	/* Mask for QCBRORN */
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index 2abac25..250db40 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -19,9 +19,9 @@
  *
  */
 
-/*****************************\
-  Reset functions and helpers
-\*****************************/
+/****************************\
+  Reset function and helpers
+\****************************/
 
 #include <asm/unaligned.h>
 
@@ -33,14 +33,36 @@
 #include "debug.h"
 
 
+/**
+ * DOC: Reset function and helpers
+ *
+ * Here we implement the main reset routine, used to bring the card
+ * to a working state and ready to receive. We also handle routines
+ * that don't fit on other places such as clock, sleep and power control
+ */
+
+
 /******************\
 * Helper functions *
 \******************/
 
-/*
- * Check if a register write has been completed
+/**
+ * ath5k_hw_register_timeout() - Poll a register for a flag/field change
+ * @ah: The &struct ath5k_hw
+ * @reg: The register to read
+ * @flag: The flag/field to check on the register
+ * @val: The field value we expect (if we check a field)
+ * @is_set: Instead of checking if the flag got cleared, check if it got set
+ *
+ * Some registers contain flags that indicate that an operation is
+ * running. We use this function to poll these registers and check
+ * if these flags get cleared. We also use it to poll a register
+ * field (containing multiple flags) until it gets a specific value.
+ *
+ * Returns -EAGAIN if we exceeded AR5K_TUNE_REGISTER_TIMEOUT * 15us or 0
  */
-int ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag, u32 val,
+int
+ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag, u32 val,
 			      bool is_set)
 {
 	int i;
@@ -64,35 +86,48 @@
 \*************************/
 
 /**
- * ath5k_hw_htoclock - Translate usec to hw clock units
- *
+ * ath5k_hw_htoclock() - Translate usec to hw clock units
  * @ah: The &struct ath5k_hw
  * @usec: value in microseconds
+ *
+ * Translate usecs to hw clock units based on the current
+ * hw clock rate.
+ *
+ * Returns number of clock units
  */
-unsigned int ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec)
+unsigned int
+ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec)
 {
 	struct ath_common *common = ath5k_hw_common(ah);
 	return usec * common->clockrate;
 }
 
 /**
- * ath5k_hw_clocktoh - Translate hw clock units to usec
+ * ath5k_hw_clocktoh() - Translate hw clock units to usec
+ * @ah: The &struct ath5k_hw
  * @clock: value in hw clock units
+ *
+ * Translate hw clock units to usecs based on the current
+ * hw clock rate.
+ *
+ * Returns number of usecs
  */
-unsigned int ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock)
+unsigned int
+ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock)
 {
 	struct ath_common *common = ath5k_hw_common(ah);
 	return clock / common->clockrate;
 }
 
 /**
- * ath5k_hw_init_core_clock - Initialize core clock
+ * ath5k_hw_init_core_clock() - Initialize core clock
+ * @ah: The &struct ath5k_hw
  *
- * @ah The &struct ath5k_hw
- *
- * Initialize core clock parameters (usec, usec32, latencies etc).
+ * Initialize core clock parameters (usec, usec32, latencies etc),
+ * based on current bwmode and chipset properties.
  */
-static void ath5k_hw_init_core_clock(struct ath5k_hw *ah)
+static void
+ath5k_hw_init_core_clock(struct ath5k_hw *ah)
 {
 	struct ieee80211_channel *channel = ah->ah_current_channel;
 	struct ath_common *common = ath5k_hw_common(ah);
@@ -227,16 +262,21 @@
 	}
 }
 
-/*
+/**
+ * ath5k_hw_set_sleep_clock() - Setup sleep clock operation
+ * @ah: The &struct ath5k_hw
+ * @enable: Enable sleep clock operation (false to disable)
+ *
  * If there is an external 32KHz crystal available, use it
  * as ref. clock instead of 32/40MHz clock and baseband clocks
  * to save power during sleep or restore normal 32/40MHz
  * operation.
  *
- * XXX: When operating on 32KHz certain PHY registers (27 - 31,
- *	123 - 127) require delay on access.
+ * NOTE: When operating on 32KHz certain PHY registers (27 - 31,
+ * 123 - 127) require delay on access.
  */
-static void ath5k_hw_set_sleep_clock(struct ath5k_hw *ah, bool enable)
+static void
+ath5k_hw_set_sleep_clock(struct ath5k_hw *ah, bool enable)
 {
 	struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
 	u32 scal, spending, sclock;
@@ -340,10 +380,19 @@
 * Reset/Sleep control *
 \*********************/
 
-/*
- * Reset chipset
+/**
+ * ath5k_hw_nic_reset() - Reset the various chipset units
+ * @ah: The &struct ath5k_hw
+ * @val: Mask to indicate what units to reset
+ *
+ * To reset the various chipset units we need to write
+ * the mask to AR5K_RESET_CTL and poll the register until
+ * all flags are cleared.
+ *
+ * Returns 0 if we are O.K. or -EAGAIN (from athk5_hw_register_timeout)
  */
-static int ath5k_hw_nic_reset(struct ath5k_hw *ah, u32 val)
+static int
+ath5k_hw_nic_reset(struct ath5k_hw *ah, u32 val)
 {
 	int ret;
 	u32 mask = val ? val : ~0U;
@@ -357,7 +406,7 @@
 	ath5k_hw_reg_write(ah, val, AR5K_RESET_CTL);
 
 	/* Wait at least 128 PCI clocks */
-	udelay(15);
+	usleep_range(15, 20);
 
 	if (ah->ah_version == AR5K_AR5210) {
 		val &= AR5K_RESET_CTL_PCU | AR5K_RESET_CTL_DMA
@@ -382,12 +431,17 @@
 	return ret;
 }
 
-/*
- * Reset AHB chipset
- * AR5K_RESET_CTL_PCU flag resets WMAC
- * AR5K_RESET_CTL_BASEBAND flag resets WBB
+/**
+ * ath5k_hw_wisoc_reset() -  Reset AHB chipset
+ * @ah: The &struct ath5k_hw
+ * @flags: Mask to indicate what units to reset
+ *
+ * Same as ath5k_hw_nic_reset but for AHB based devices
+ *
+ * Returns 0 if we are O.K. or -EAGAIN (from athk5_hw_register_timeout)
  */
-static int ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags)
+static int
+ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags)
 {
 	u32 mask = flags ? flags : ~0U;
 	u32 __iomem *reg;
@@ -422,7 +476,7 @@
 	regval = __raw_readl(reg);
 	__raw_writel(regval | val, reg);
 	regval = __raw_readl(reg);
-	udelay(100);
+	usleep_range(100, 150);
 
 	/* Bring BB/MAC out of reset */
 	__raw_writel(regval & ~val, reg);
@@ -439,11 +493,23 @@
 	return 0;
 }
 
-
-/*
- * Sleep control
+/**
+ * ath5k_hw_set_power_mode() - Set power mode
+ * @ah: The &struct ath5k_hw
+ * @mode: One of enum ath5k_power_mode
+ * @set_chip: Set to true to write sleep control register
+ * @sleep_duration: How much time the device is allowed to sleep
+ * when sleep logic is enabled (in 128 microsecond increments).
+ *
+ * This function is used to configure sleep policy and allowed
+ * sleep modes. For more information check out the sleep control
+ * register on reg.h and STA_ID1.
+ *
+ * Returns 0 on success, -EIO if chip didn't wake up or -EINVAL if an invalid
+ * mode is requested.
  */
-static int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode,
+static int
+ath5k_hw_set_power_mode(struct ath5k_hw *ah, enum ath5k_power_mode mode,
 			      bool set_chip, u16 sleep_duration)
 {
 	unsigned int i;
@@ -493,7 +559,7 @@
 
 		ath5k_hw_reg_write(ah, data | AR5K_SLEEP_CTL_SLE_WAKE,
 							AR5K_SLEEP_CTL);
-		udelay(15);
+		usleep_range(15, 20);
 
 		for (i = 200; i > 0; i--) {
 			/* Check if the chip did wake up */
@@ -502,7 +568,7 @@
 				break;
 
 			/* Wait a bit and retry */
-			udelay(50);
+			usleep_range(50, 75);
 			ath5k_hw_reg_write(ah, data | AR5K_SLEEP_CTL_SLE_WAKE,
 							AR5K_SLEEP_CTL);
 		}
@@ -523,17 +589,20 @@
 	return 0;
 }
 
-/*
- * Put device on hold
+/**
+ * ath5k_hw_on_hold() - Put device on hold
+ * @ah: The &struct ath5k_hw
  *
- * Put MAC and Baseband on warm reset and
- * keep that state (don't clean sleep control
- * register). After this MAC and Baseband are
- * disabled and a full reset is needed to come
- * back. This way we save as much power as possible
+ * Put MAC and Baseband on warm reset and keep that state
+ * (don't clean sleep control register). After this MAC
+ * and Baseband are disabled and a full reset is needed
+ * to come back. This way we save as much power as possible
  * without putting the card on full sleep.
+ *
+ * Returns 0 on success or -EIO on error
  */
-int ath5k_hw_on_hold(struct ath5k_hw *ah)
+int
+ath5k_hw_on_hold(struct ath5k_hw *ah)
 {
 	struct pci_dev *pdev = ah->pdev;
 	u32 bus_flags;
@@ -543,7 +612,7 @@
 		return 0;
 
 	/* Make sure device is awake */
-	ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
+	ret = ath5k_hw_set_power_mode(ah, AR5K_PM_AWAKE, true, 0);
 	if (ret) {
 		ATH5K_ERR(ah, "failed to wakeup the MAC Chip\n");
 		return ret;
@@ -563,7 +632,7 @@
 		ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
 			AR5K_RESET_CTL_MAC | AR5K_RESET_CTL_DMA |
 			AR5K_RESET_CTL_PHY | AR5K_RESET_CTL_PCI);
-			mdelay(2);
+			usleep_range(2000, 2500);
 	} else {
 		ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
 			AR5K_RESET_CTL_BASEBAND | bus_flags);
@@ -575,7 +644,7 @@
 	}
 
 	/* ...wakeup again!*/
-	ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
+	ret = ath5k_hw_set_power_mode(ah, AR5K_PM_AWAKE, true, 0);
 	if (ret) {
 		ATH5K_ERR(ah, "failed to put device on hold\n");
 		return ret;
@@ -584,11 +653,18 @@
 	return ret;
 }
 
-/*
+/**
+ * ath5k_hw_nic_wakeup() - Force card out of sleep
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
  * Bring up MAC + PHY Chips and program PLL
- * Channel is NULL for the initial wakeup.
+ * NOTE: Channel is NULL for the initial wakeup.
+ *
+ * Returns 0 on success, -EIO on hw failure or -EINVAL for false channel infos
  */
-int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
+int
+ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
 {
 	struct pci_dev *pdev = ah->pdev;
 	u32 turbo, mode, clock, bus_flags;
@@ -600,7 +676,7 @@
 
 	if ((ath5k_get_bus_type(ah) != ATH_AHB) || channel) {
 		/* Wakeup the device */
-		ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
+		ret = ath5k_hw_set_power_mode(ah, AR5K_PM_AWAKE, true, 0);
 		if (ret) {
 			ATH5K_ERR(ah, "failed to wakeup the MAC Chip\n");
 			return ret;
@@ -621,7 +697,7 @@
 		ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
 			AR5K_RESET_CTL_MAC | AR5K_RESET_CTL_DMA |
 			AR5K_RESET_CTL_PHY | AR5K_RESET_CTL_PCI);
-			mdelay(2);
+			usleep_range(2000, 2500);
 	} else {
 		if (ath5k_get_bus_type(ah) == ATH_AHB)
 			ret = ath5k_hw_wisoc_reset(ah, AR5K_RESET_CTL_PCU |
@@ -637,7 +713,7 @@
 	}
 
 	/* ...wakeup again!...*/
-	ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
+	ret = ath5k_hw_set_power_mode(ah, AR5K_PM_AWAKE, true, 0);
 	if (ret) {
 		ATH5K_ERR(ah, "failed to resume the MAC Chip\n");
 		return ret;
@@ -739,7 +815,7 @@
 		/* ...update PLL if needed */
 		if (ath5k_hw_reg_read(ah, AR5K_PHY_PLL) != clock) {
 			ath5k_hw_reg_write(ah, clock, AR5K_PHY_PLL);
-			udelay(300);
+			usleep_range(300, 350);
 		}
 
 		/* ...set the PHY operating mode */
@@ -755,8 +831,19 @@
 * Post-initvals register modifications *
 \**************************************/
 
-/* TODO: Half/Quarter rate */
-static void ath5k_hw_tweak_initval_settings(struct ath5k_hw *ah,
+/**
+ * ath5k_hw_tweak_initval_settings() - Tweak initial settings
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * Some settings are not handled on initvals, e.g. bwmode
+ * settings, some phy settings, workarounds etc that in general
+ * don't fit anywhere else or are too small to introduce a separate
+ * function for each one. So we have this function to handle
+ * them all during reset and complete card's initialization.
+ */
+static void
+ath5k_hw_tweak_initval_settings(struct ath5k_hw *ah,
 				struct ieee80211_channel *channel)
 {
 	if (ah->ah_version == AR5K_AR5212 &&
@@ -875,7 +962,16 @@
 	}
 }
 
-static void ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah,
+/**
+ * ath5k_hw_commit_eeprom_settings() - Commit settings from EEPROM
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * Use settings stored on EEPROM to properly initialize the card
+ * based on various infos and per-mode calibration data.
+ */
+static void
+ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah,
 		struct ieee80211_channel *channel)
 {
 	struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
@@ -1029,7 +1125,23 @@
 * Main reset function *
 \*********************/
 
-int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
+/**
+ * ath5k_hw_reset() - The main reset function
+ * @ah: The &struct ath5k_hw
+ * @op_mode: One of enum nl80211_iftype
+ * @channel: The &struct ieee80211_channel
+ * @fast: Enable fast channel switching
+ * @skip_pcu: Skip pcu initialization
+ *
+ * This is the function we call each time we want to (re)initialize the
+ * card and pass new settings to hw. We also call it when hw runs into
+ * trouble to make it come back to a working state.
+ *
+ * Returns 0 on success, -EINVAL on false op_mode or channel infos, or -EIO
+ * on failure.
+ */
+int
+ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
 		struct ieee80211_channel *channel, bool fast, bool skip_pcu)
 {
 	u32 s_seq[10], s_led[3], tsf_up, tsf_lo;
@@ -1047,7 +1159,7 @@
 	 */
 	if (fast && (ah->ah_radio != AR5K_RF2413) &&
 	(ah->ah_radio != AR5K_RF5413))
-		fast = 0;
+		fast = false;
 
 	/* Disable sleep clock operation
 	 * to avoid register access delay on certain
@@ -1073,7 +1185,7 @@
 	if (ret && fast) {
 		ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
 			"DMA didn't stop, falling back to normal reset\n");
-		fast = 0;
+		fast = false;
 		/* Non fatal, just continue with
 		 * normal reset */
 		ret = 0;
@@ -1242,7 +1354,7 @@
 	/*
 	 * Initialize PCU
 	 */
-	ath5k_hw_pcu_init(ah, op_mode, mode);
+	ath5k_hw_pcu_init(ah, op_mode);
 
 	/*
 	 * Initialize PHY
diff --git a/drivers/net/wireless/ath/ath5k/rfbuffer.h b/drivers/net/wireless/ath/ath5k/rfbuffer.h
index 5d11c23..aed34d9 100644
--- a/drivers/net/wireless/ath/ath5k/rfbuffer.h
+++ b/drivers/net/wireless/ath/ath5k/rfbuffer.h
@@ -18,7 +18,9 @@
  */
 
 
-/*
+/**
+ * DOC: RF Buffer registers
+ *
  * There are some special registers on the RF chip
  * that control various operation settings related mostly to
  * the analog parts (channel, gain adjustment etc).
@@ -44,40 +46,63 @@
  */
 
 
-/*
+/**
+ * struct ath5k_ini_rfbuffer - Initial RF Buffer settings
+ * @rfb_bank: RF Bank number
+ * @rfb_ctrl_register: RF Buffer control register
+ * @rfb_mode_data: RF Buffer data for each mode
+ *
  * Struct to hold default mode specific RF
- * register values (RF Banks)
+ * register values (RF Banks) for each chip.
  */
 struct ath5k_ini_rfbuffer {
-	u8	rfb_bank;		/* RF Bank number */
-	u16	rfb_ctrl_register;	/* RF Buffer control register */
-	u32	rfb_mode_data[3];	/* RF Buffer data for each mode */
+	u8	rfb_bank;
+	u16	rfb_ctrl_register;
+	u32	rfb_mode_data[3];
 };
 
-/*
+/**
+ * struct ath5k_rfb_field - An RF Buffer field (register/value)
+ * @len: Field length
+ * @pos: Offset on the raw packet
+ * @col: Used for shifting
+ *
  * Struct to hold RF Buffer field
  * infos used to access certain RF
  * analog registers
  */
 struct ath5k_rfb_field {
-	u8	len;	/* Field length */
-	u16	pos;	/* Offset on the raw packet */
-	u8	col;	/* Column -used for shifting */
+	u8	len;
+	u16	pos;
+	u8	col;
 };
 
-/*
- * RF analog register definition
+/**
+ * struct ath5k_rf_reg - RF analog register definition
+ * @bank: RF Buffer Bank number
+ * @index: Register's index on ath5k_rf_regx_idx
+ * @field: The &struct ath5k_rfb_field
+ *
+ * We use this struct to define the set of RF registers
+ * on each chip that we want to tweak. Some RF registers
+ * are common between different chip versions so this saves
+ * us space and complexity because we can refer to an rf
+ * register by it's index no matter what chip we work with
+ * as long as it has that register.
  */
 struct ath5k_rf_reg {
-	u8			bank;	/* RF Buffer Bank number */
-	u8			index;	/* Register's index on rf_regs_idx */
-	struct ath5k_rfb_field	field;	/* RF Buffer field for this register */
+	u8			bank;
+	u8			index;
+	struct ath5k_rfb_field	field;
 };
 
-/* Map RF registers to indexes
+/**
+ * enum ath5k_rf_regs_idx - Map RF registers to indexes
+ *
  * We do this to handle common bits and make our
  * life easier by using an index for each register
- * instead of a full rfb_field */
+ * instead of a full rfb_field
+ */
 enum ath5k_rf_regs_idx {
 	/* BANK 2 */
 	AR5K_RF_TURBO = 0,
diff --git a/drivers/net/wireless/ath/ath5k/rfgain.h b/drivers/net/wireless/ath/ath5k/rfgain.h
index ebfae05..4d21df0 100644
--- a/drivers/net/wireless/ath/ath5k/rfgain.h
+++ b/drivers/net/wireless/ath/ath5k/rfgain.h
@@ -18,13 +18,17 @@
  *
  */
 
-/*
+/**
+ * struct ath5k_ini_rfgain - RF Gain table
+ * @rfg_register: RF Gain register address
+ * @rfg_value: Register value for 5 and 2GHz
+ *
  * Mode-specific RF Gain table (64bytes) for RF5111/5112
  * (RF5110 only comes with AR5210 and only supports a/turbo a mode so initial
  * RF Gain values are included in AR5K_AR5210_INI)
  */
 struct ath5k_ini_rfgain {
-	u16	rfg_register;	/* RF Gain register address */
+	u16	rfg_register;
 	u32	rfg_value[2];	/* [freq (see below)] */
 };
 
@@ -455,18 +459,31 @@
 #define AR5K_GAIN_CHECK_ADJUST(_g)		\
 	((_g)->g_current <= (_g)->g_low || (_g)->g_current >= (_g)->g_high)
 
+/**
+ * struct ath5k_gain_opt_step - An RF gain optimization step
+ * @gos_param: Set of parameters
+ * @gos_gain: Gain
+ */
 struct ath5k_gain_opt_step {
 	s8				gos_param[AR5K_GAIN_CRN_MAX_FIX_BITS];
 	s8				gos_gain;
 };
 
+/**
+ * struct ath5k_gain_opt - RF Gain optimization ladder
+ * @go_default: The default step
+ * @go_steps_count: How many optimization steps
+ * @go_step: Array of &struct ath5k_gain_opt_step
+ */
 struct ath5k_gain_opt {
 	u8				go_default;
 	u8				go_steps_count;
 	const struct ath5k_gain_opt_step	go_step[AR5K_GAIN_STEP_COUNT];
 };
 
+
 /*
+ * RF5111
  * Parameters on gos_param:
  * 1) Tx clip PHY register
  * 2) PWD 90 RF register
@@ -490,6 +507,7 @@
 };
 
 /*
+ * RF5112
  * Parameters on gos_param:
  * 1) Mixgain ovr RF register
  * 2) PWD 138 RF register
diff --git a/drivers/net/wireless/ath/ath5k/trace.h b/drivers/net/wireless/ath/ath5k/trace.h
index 39f002e..00f0158 100644
--- a/drivers/net/wireless/ath/ath5k/trace.h
+++ b/drivers/net/wireless/ath/ath5k/trace.h
@@ -3,7 +3,8 @@
 
 #include <linux/tracepoint.h>
 
-#ifndef CONFIG_ATH5K_TRACER
+
+#if !defined(CONFIG_ATH5K_TRACER) || defined(__CHECKER__)
 #undef TRACE_EVENT
 #define TRACE_EVENT(name, proto, ...) \
 static inline void trace_ ## name(proto) {}
@@ -93,7 +94,7 @@
 
 #endif /* __TRACE_ATH5K_H */
 
-#ifdef CONFIG_ATH5K_TRACER
+#if defined(CONFIG_ATH5K_TRACER) && !defined(__CHECKER__)
 
 #undef TRACE_INCLUDE_PATH
 #define TRACE_INCLUDE_PATH ../../drivers/net/wireless/ath/ath5k
diff --git a/drivers/net/wireless/ath/ath6kl/Makefile b/drivers/net/wireless/ath/ath6kl/Makefile
index 8f7a0d1..7070693 100644
--- a/drivers/net/wireless/ath/ath6kl/Makefile
+++ b/drivers/net/wireless/ath/ath6kl/Makefile
@@ -23,7 +23,7 @@
 
 obj-$(CONFIG_ATH6KL) := ath6kl.o
 ath6kl-y += debug.o
-ath6kl-y += htc_hif.o
+ath6kl-y += hif.o
 ath6kl-y += htc.o
 ath6kl-y += bmi.o
 ath6kl-y += cfg80211.o
diff --git a/drivers/net/wireless/ath/ath6kl/bmi.c b/drivers/net/wireless/ath/ath6kl/bmi.c
index c5d11cc..bce3575 100644
--- a/drivers/net/wireless/ath/ath6kl/bmi.c
+++ b/drivers/net/wireless/ath/ath6kl/bmi.c
@@ -19,165 +19,6 @@
 #include "target.h"
 #include "debug.h"
 
-static int ath6kl_get_bmi_cmd_credits(struct ath6kl *ar)
-{
-	u32 addr;
-	unsigned long timeout;
-	int ret;
-
-	ar->bmi.cmd_credits = 0;
-
-	/* Read the counter register to get the command credits */
-	addr = COUNT_DEC_ADDRESS + (HTC_MAILBOX_NUM_MAX + ENDPOINT1) * 4;
-
-	timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT);
-	while (time_before(jiffies, timeout) && !ar->bmi.cmd_credits) {
-
-		/*
-		 * Hit the credit counter with a 4-byte access, the first byte
-		 * read will hit the counter and cause a decrement, while the
-		 * remaining 3 bytes has no effect. The rationale behind this
-		 * is to make all HIF accesses 4-byte aligned.
-		 */
-		ret = hif_read_write_sync(ar, addr,
-					 (u8 *)&ar->bmi.cmd_credits, 4,
-					 HIF_RD_SYNC_BYTE_INC);
-		if (ret) {
-			ath6kl_err("Unable to decrement the command credit count register: %d\n",
-				   ret);
-			return ret;
-		}
-
-		/* The counter is only 8 bits.
-		 * Ignore anything in the upper 3 bytes
-		 */
-		ar->bmi.cmd_credits &= 0xFF;
-	}
-
-	if (!ar->bmi.cmd_credits) {
-		ath6kl_err("bmi communication timeout\n");
-		return -ETIMEDOUT;
-	}
-
-	return 0;
-}
-
-static int ath6kl_bmi_get_rx_lkahd(struct ath6kl *ar)
-{
-	unsigned long timeout;
-	u32 rx_word = 0;
-	int ret = 0;
-
-	timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT);
-	while (time_before(jiffies, timeout) && !rx_word) {
-		ret = hif_read_write_sync(ar, RX_LOOKAHEAD_VALID_ADDRESS,
-					  (u8 *)&rx_word, sizeof(rx_word),
-					  HIF_RD_SYNC_BYTE_INC);
-		if (ret) {
-			ath6kl_err("unable to read RX_LOOKAHEAD_VALID\n");
-			return ret;
-		}
-
-		 /* all we really want is one bit */
-		rx_word &= (1 << ENDPOINT1);
-	}
-
-	if (!rx_word) {
-		ath6kl_err("bmi_recv_buf FIFO empty\n");
-		return -EINVAL;
-	}
-
-	return ret;
-}
-
-static int ath6kl_bmi_send_buf(struct ath6kl *ar, u8 *buf, u32 len)
-{
-	int ret;
-	u32 addr;
-
-	ret = ath6kl_get_bmi_cmd_credits(ar);
-	if (ret)
-		return ret;
-
-	addr = ar->mbox_info.htc_addr;
-
-	ret = hif_read_write_sync(ar, addr, buf, len,
-				  HIF_WR_SYNC_BYTE_INC);
-	if (ret)
-		ath6kl_err("unable to send the bmi data to the device\n");
-
-	return ret;
-}
-
-static int ath6kl_bmi_recv_buf(struct ath6kl *ar, u8 *buf, u32 len)
-{
-	int ret;
-	u32 addr;
-
-	/*
-	 * During normal bootup, small reads may be required.
-	 * Rather than issue an HIF Read and then wait as the Target
-	 * adds successive bytes to the FIFO, we wait here until
-	 * we know that response data is available.
-	 *
-	 * This allows us to cleanly timeout on an unexpected
-	 * Target failure rather than risk problems at the HIF level.
-	 * In particular, this avoids SDIO timeouts and possibly garbage
-	 * data on some host controllers.  And on an interconnect
-	 * such as Compact Flash (as well as some SDIO masters) which
-	 * does not provide any indication on data timeout, it avoids
-	 * a potential hang or garbage response.
-	 *
-	 * Synchronization is more difficult for reads larger than the
-	 * size of the MBOX FIFO (128B), because the Target is unable
-	 * to push the 129th byte of data until AFTER the Host posts an
-	 * HIF Read and removes some FIFO data.  So for large reads the
-	 * Host proceeds to post an HIF Read BEFORE all the data is
-	 * actually available to read.  Fortunately, large BMI reads do
-	 * not occur in practice -- they're supported for debug/development.
-	 *
-	 * So Host/Target BMI synchronization is divided into these cases:
-	 *  CASE 1: length < 4
-	 *        Should not happen
-	 *
-	 *  CASE 2: 4 <= length <= 128
-	 *        Wait for first 4 bytes to be in FIFO
-	 *        If CONSERVATIVE_BMI_READ is enabled, also wait for
-	 *        a BMI command credit, which indicates that the ENTIRE
-	 *        response is available in the the FIFO
-	 *
-	 *  CASE 3: length > 128
-	 *        Wait for the first 4 bytes to be in FIFO
-	 *
-	 * For most uses, a small timeout should be sufficient and we will
-	 * usually see a response quickly; but there may be some unusual
-	 * (debug) cases of BMI_EXECUTE where we want an larger timeout.
-	 * For now, we use an unbounded busy loop while waiting for
-	 * BMI_EXECUTE.
-	 *
-	 * If BMI_EXECUTE ever needs to support longer-latency execution,
-	 * especially in production, this code needs to be enhanced to sleep
-	 * and yield.  Also note that BMI_COMMUNICATION_TIMEOUT is currently
-	 * a function of Host processor speed.
-	 */
-	if (len >= 4) { /* NB: Currently, always true */
-		ret = ath6kl_bmi_get_rx_lkahd(ar);
-		if (ret)
-			return ret;
-	}
-
-	addr = ar->mbox_info.htc_addr;
-	ret = hif_read_write_sync(ar, addr, buf, len,
-				  HIF_RD_SYNC_BYTE_INC);
-	if (ret) {
-		ath6kl_err("Unable to read the bmi data from the device: %d\n",
-			   ret);
-		return ret;
-	}
-
-	return 0;
-}
-
 int ath6kl_bmi_done(struct ath6kl *ar)
 {
 	int ret;
@@ -190,14 +31,12 @@
 
 	ar->bmi.done_sent = true;
 
-	ret = ath6kl_bmi_send_buf(ar, (u8 *)&cid, sizeof(cid));
+	ret = ath6kl_hif_bmi_write(ar, (u8 *)&cid, sizeof(cid));
 	if (ret) {
 		ath6kl_err("Unable to send bmi done: %d\n", ret);
 		return ret;
 	}
 
-	ath6kl_bmi_cleanup(ar);
-
 	return 0;
 }
 
@@ -212,13 +51,13 @@
 		return -EACCES;
 	}
 
-	ret = ath6kl_bmi_send_buf(ar, (u8 *)&cid, sizeof(cid));
+	ret = ath6kl_hif_bmi_write(ar, (u8 *)&cid, sizeof(cid));
 	if (ret) {
 		ath6kl_err("Unable to send get target info: %d\n", ret);
 		return ret;
 	}
 
-	ret = ath6kl_bmi_recv_buf(ar, (u8 *)&targ_info->version,
+	ret = ath6kl_hif_bmi_read(ar, (u8 *)&targ_info->version,
 				  sizeof(targ_info->version));
 	if (ret) {
 		ath6kl_err("Unable to recv target info: %d\n", ret);
@@ -227,7 +66,7 @@
 
 	if (le32_to_cpu(targ_info->version) == TARGET_VERSION_SENTINAL) {
 		/* Determine how many bytes are in the Target's targ_info */
-		ret = ath6kl_bmi_recv_buf(ar,
+		ret = ath6kl_hif_bmi_read(ar,
 				   (u8 *)&targ_info->byte_count,
 				   sizeof(targ_info->byte_count));
 		if (ret) {
@@ -246,7 +85,7 @@
 		}
 
 		/* Read the remainder of the targ_info */
-		ret = ath6kl_bmi_recv_buf(ar,
+		ret = ath6kl_hif_bmi_read(ar,
 				   ((u8 *)targ_info) +
 				   sizeof(targ_info->byte_count),
 				   sizeof(*targ_info) -
@@ -278,8 +117,8 @@
 		return -EACCES;
 	}
 
-	size = BMI_DATASZ_MAX + sizeof(cid) + sizeof(addr) + sizeof(len);
-	if (size > MAX_BMI_CMDBUF_SZ) {
+	size = ar->bmi.max_data_size + sizeof(cid) + sizeof(addr) + sizeof(len);
+	if (size > ar->bmi.max_cmd_size) {
 		WARN_ON(1);
 		return -EINVAL;
 	}
@@ -292,8 +131,8 @@
 	len_remain = len;
 
 	while (len_remain) {
-		rx_len = (len_remain < BMI_DATASZ_MAX) ?
-					len_remain : BMI_DATASZ_MAX;
+		rx_len = (len_remain < ar->bmi.max_data_size) ?
+					len_remain : ar->bmi.max_data_size;
 		offset = 0;
 		memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
 		offset += sizeof(cid);
@@ -302,13 +141,13 @@
 		memcpy(&(ar->bmi.cmd_buf[offset]), &rx_len, sizeof(rx_len));
 		offset += sizeof(len);
 
-		ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
+		ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
 		if (ret) {
 			ath6kl_err("Unable to write to the device: %d\n",
 				   ret);
 			return ret;
 		}
-		ret = ath6kl_bmi_recv_buf(ar, ar->bmi.cmd_buf, rx_len);
+		ret = ath6kl_hif_bmi_read(ar, ar->bmi.cmd_buf, rx_len);
 		if (ret) {
 			ath6kl_err("Unable to read from the device: %d\n",
 				   ret);
@@ -328,7 +167,7 @@
 	u32 offset;
 	u32 len_remain, tx_len;
 	const u32 header = sizeof(cid) + sizeof(addr) + sizeof(len);
-	u8 aligned_buf[BMI_DATASZ_MAX];
+	u8 aligned_buf[400];
 	u8 *src;
 
 	if (ar->bmi.done_sent) {
@@ -336,12 +175,15 @@
 		return -EACCES;
 	}
 
-	if ((BMI_DATASZ_MAX + header) > MAX_BMI_CMDBUF_SZ) {
+	if ((ar->bmi.max_data_size + header) > ar->bmi.max_cmd_size) {
 		WARN_ON(1);
 		return -EINVAL;
 	}
 
-	memset(ar->bmi.cmd_buf, 0, BMI_DATASZ_MAX + header);
+	if (WARN_ON(ar->bmi.max_data_size > sizeof(aligned_buf)))
+		return -E2BIG;
+
+	memset(ar->bmi.cmd_buf, 0, ar->bmi.max_data_size + header);
 
 	ath6kl_dbg(ATH6KL_DBG_BMI,
 		  "bmi write memory: addr: 0x%x, len: %d\n", addr, len);
@@ -350,7 +192,7 @@
 	while (len_remain) {
 		src = &buf[len - len_remain];
 
-		if (len_remain < (BMI_DATASZ_MAX - header)) {
+		if (len_remain < (ar->bmi.max_data_size - header)) {
 			if (len_remain & 3) {
 				/* align it with 4 bytes */
 				len_remain = len_remain +
@@ -360,7 +202,7 @@
 			}
 			tx_len = len_remain;
 		} else {
-			tx_len = (BMI_DATASZ_MAX - header);
+			tx_len = (ar->bmi.max_data_size - header);
 		}
 
 		offset = 0;
@@ -373,7 +215,7 @@
 		memcpy(&(ar->bmi.cmd_buf[offset]), src, tx_len);
 		offset += tx_len;
 
-		ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
+		ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
 		if (ret) {
 			ath6kl_err("Unable to write to the device: %d\n",
 				   ret);
@@ -398,7 +240,7 @@
 	}
 
 	size = sizeof(cid) + sizeof(addr) + sizeof(param);
-	if (size > MAX_BMI_CMDBUF_SZ) {
+	if (size > ar->bmi.max_cmd_size) {
 		WARN_ON(1);
 		return -EINVAL;
 	}
@@ -415,13 +257,13 @@
 	memcpy(&(ar->bmi.cmd_buf[offset]), param, sizeof(*param));
 	offset += sizeof(*param);
 
-	ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
+	ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
 	if (ret) {
 		ath6kl_err("Unable to write to the device: %d\n", ret);
 		return ret;
 	}
 
-	ret = ath6kl_bmi_recv_buf(ar, ar->bmi.cmd_buf, sizeof(*param));
+	ret = ath6kl_hif_bmi_read(ar, ar->bmi.cmd_buf, sizeof(*param));
 	if (ret) {
 		ath6kl_err("Unable to read from the device: %d\n", ret);
 		return ret;
@@ -445,7 +287,7 @@
 	}
 
 	size = sizeof(cid) + sizeof(addr);
-	if (size > MAX_BMI_CMDBUF_SZ) {
+	if (size > ar->bmi.max_cmd_size) {
 		WARN_ON(1);
 		return -EINVAL;
 	}
@@ -459,7 +301,7 @@
 	memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
 	offset += sizeof(addr);
 
-	ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
+	ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
 	if (ret) {
 		ath6kl_err("Unable to write to the device: %d\n", ret);
 		return ret;
@@ -481,7 +323,7 @@
 	}
 
 	size = sizeof(cid) + sizeof(addr);
-	if (size > MAX_BMI_CMDBUF_SZ) {
+	if (size > ar->bmi.max_cmd_size) {
 		WARN_ON(1);
 		return -EINVAL;
 	}
@@ -495,13 +337,13 @@
 	memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
 	offset += sizeof(addr);
 
-	ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
+	ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
 	if (ret) {
 		ath6kl_err("Unable to write to the device: %d\n", ret);
 		return ret;
 	}
 
-	ret = ath6kl_bmi_recv_buf(ar, ar->bmi.cmd_buf, sizeof(*param));
+	ret = ath6kl_hif_bmi_read(ar, ar->bmi.cmd_buf, sizeof(*param));
 	if (ret) {
 		ath6kl_err("Unable to read from the device: %d\n", ret);
 		return ret;
@@ -524,7 +366,7 @@
 	}
 
 	size = sizeof(cid) + sizeof(addr) + sizeof(param);
-	if (size > MAX_BMI_CMDBUF_SZ) {
+	if (size > ar->bmi.max_cmd_size) {
 		WARN_ON(1);
 		return -EINVAL;
 	}
@@ -542,7 +384,7 @@
 	memcpy(&(ar->bmi.cmd_buf[offset]), &param, sizeof(param));
 	offset += sizeof(param);
 
-	ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
+	ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
 	if (ret) {
 		ath6kl_err("Unable to write to the device: %d\n", ret);
 		return ret;
@@ -565,8 +407,8 @@
 		return -EACCES;
 	}
 
-	size = BMI_DATASZ_MAX + header;
-	if (size > MAX_BMI_CMDBUF_SZ) {
+	size = ar->bmi.max_data_size + header;
+	if (size > ar->bmi.max_cmd_size) {
 		WARN_ON(1);
 		return -EINVAL;
 	}
@@ -577,8 +419,8 @@
 
 	len_remain = len;
 	while (len_remain) {
-		tx_len = (len_remain < (BMI_DATASZ_MAX - header)) ?
-			  len_remain : (BMI_DATASZ_MAX - header);
+		tx_len = (len_remain < (ar->bmi.max_data_size - header)) ?
+			  len_remain : (ar->bmi.max_data_size - header);
 
 		offset = 0;
 		memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
@@ -589,7 +431,7 @@
 			tx_len);
 		offset += tx_len;
 
-		ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
+		ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
 		if (ret) {
 			ath6kl_err("Unable to write to the device: %d\n",
 				   ret);
@@ -615,7 +457,7 @@
 	}
 
 	size = sizeof(cid) + sizeof(addr);
-	if (size > MAX_BMI_CMDBUF_SZ) {
+	if (size > ar->bmi.max_cmd_size) {
 		WARN_ON(1);
 		return -EINVAL;
 	}
@@ -631,7 +473,7 @@
 	memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
 	offset += sizeof(addr);
 
-	ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
+	ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
 	if (ret) {
 		ath6kl_err("Unable to start LZ stream to the device: %d\n",
 			   ret);
@@ -672,10 +514,20 @@
 	return ret;
 }
 
+void ath6kl_bmi_reset(struct ath6kl *ar)
+{
+	ar->bmi.done_sent = false;
+}
+
 int ath6kl_bmi_init(struct ath6kl *ar)
 {
-	ar->bmi.cmd_buf = kzalloc(MAX_BMI_CMDBUF_SZ, GFP_ATOMIC);
+	if (WARN_ON(ar->bmi.max_data_size == 0))
+		return -EINVAL;
 
+	/* cmd + addr + len + data_size */
+	ar->bmi.max_cmd_size = ar->bmi.max_data_size + (sizeof(u32) * 3);
+
+	ar->bmi.cmd_buf = kzalloc(ar->bmi.max_cmd_size, GFP_ATOMIC);
 	if (!ar->bmi.cmd_buf)
 		return -ENOMEM;
 
diff --git a/drivers/net/wireless/ath/ath6kl/bmi.h b/drivers/net/wireless/ath/ath6kl/bmi.h
index 96851d5..f1ca681 100644
--- a/drivers/net/wireless/ath/ath6kl/bmi.h
+++ b/drivers/net/wireless/ath/ath6kl/bmi.h
@@ -44,12 +44,6 @@
  * BMI handles all required Target-side cache flushing.
  */
 
-#define MAX_BMI_CMDBUF_SZ (BMI_DATASZ_MAX + \
-			   (sizeof(u32) * 3 /* cmd + addr + len */))
-
-/* Maximum data size used for BMI transfers */
-#define BMI_DATASZ_MAX                      256
-
 /* BMI Commands */
 
 #define BMI_NO_COMMAND                      0
@@ -230,6 +224,8 @@
 
 int ath6kl_bmi_init(struct ath6kl *ar);
 void ath6kl_bmi_cleanup(struct ath6kl *ar);
+void ath6kl_bmi_reset(struct ath6kl *ar);
+
 int ath6kl_bmi_done(struct ath6kl *ar);
 int ath6kl_bmi_get_target_info(struct ath6kl *ar,
 			       struct ath6kl_bmi_target_info *targ_info);
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index f517eb8..6c59a21 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -123,17 +123,50 @@
 	.bitrates = ath6kl_a_rates,
 };
 
-static int ath6kl_set_wpa_version(struct ath6kl *ar,
+#define CCKM_KRK_CIPHER_SUITE 0x004096ff /* use for KRK */
+
+/* returns true if scheduled scan was stopped */
+static bool __ath6kl_cfg80211_sscan_stop(struct ath6kl_vif *vif)
+{
+	struct ath6kl *ar = vif->ar;
+
+	if (ar->state != ATH6KL_STATE_SCHED_SCAN)
+		return false;
+
+	del_timer_sync(&vif->sched_scan_timer);
+
+	ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx,
+					   ATH6KL_HOST_MODE_AWAKE);
+
+	ar->state = ATH6KL_STATE_ON;
+
+	return true;
+}
+
+static void ath6kl_cfg80211_sscan_disable(struct ath6kl_vif *vif)
+{
+	struct ath6kl *ar = vif->ar;
+	bool stopped;
+
+	stopped = __ath6kl_cfg80211_sscan_stop(vif);
+
+	if (!stopped)
+		return;
+
+	cfg80211_sched_scan_stopped(ar->wiphy);
+}
+
+static int ath6kl_set_wpa_version(struct ath6kl_vif *vif,
 				  enum nl80211_wpa_versions wpa_version)
 {
 	ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: %u\n", __func__, wpa_version);
 
 	if (!wpa_version) {
-		ar->auth_mode = NONE_AUTH;
+		vif->auth_mode = NONE_AUTH;
 	} else if (wpa_version & NL80211_WPA_VERSION_2) {
-		ar->auth_mode = WPA2_AUTH;
+		vif->auth_mode = WPA2_AUTH;
 	} else if (wpa_version & NL80211_WPA_VERSION_1) {
-		ar->auth_mode = WPA_AUTH;
+		vif->auth_mode = WPA_AUTH;
 	} else {
 		ath6kl_err("%s: %u not supported\n", __func__, wpa_version);
 		return -ENOTSUPP;
@@ -142,25 +175,24 @@
 	return 0;
 }
 
-static int ath6kl_set_auth_type(struct ath6kl *ar,
+static int ath6kl_set_auth_type(struct ath6kl_vif *vif,
 				enum nl80211_auth_type auth_type)
 {
-
 	ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: 0x%x\n", __func__, auth_type);
 
 	switch (auth_type) {
 	case NL80211_AUTHTYPE_OPEN_SYSTEM:
-		ar->dot11_auth_mode = OPEN_AUTH;
+		vif->dot11_auth_mode = OPEN_AUTH;
 		break;
 	case NL80211_AUTHTYPE_SHARED_KEY:
-		ar->dot11_auth_mode = SHARED_AUTH;
+		vif->dot11_auth_mode = SHARED_AUTH;
 		break;
 	case NL80211_AUTHTYPE_NETWORK_EAP:
-		ar->dot11_auth_mode = LEAP_AUTH;
+		vif->dot11_auth_mode = LEAP_AUTH;
 		break;
 
 	case NL80211_AUTHTYPE_AUTOMATIC:
-		ar->dot11_auth_mode = OPEN_AUTH | SHARED_AUTH;
+		vif->dot11_auth_mode = OPEN_AUTH | SHARED_AUTH;
 		break;
 
 	default:
@@ -171,11 +203,11 @@
 	return 0;
 }
 
-static int ath6kl_set_cipher(struct ath6kl *ar, u32 cipher, bool ucast)
+static int ath6kl_set_cipher(struct ath6kl_vif *vif, u32 cipher, bool ucast)
 {
-	u8 *ar_cipher = ucast ? &ar->prwise_crypto : &ar->grp_crypto;
-	u8 *ar_cipher_len = ucast ? &ar->prwise_crypto_len :
-		&ar->grp_crypto_len;
+	u8 *ar_cipher = ucast ? &vif->prwise_crypto : &vif->grp_crypto;
+	u8 *ar_cipher_len = ucast ? &vif->prwise_crypto_len :
+		&vif->grp_crypto_len;
 
 	ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: cipher 0x%x, ucast %u\n",
 		   __func__, cipher, ucast);
@@ -202,6 +234,10 @@
 		*ar_cipher = AES_CRYPT;
 		*ar_cipher_len = 0;
 		break;
+	case WLAN_CIPHER_SUITE_SMS4:
+		*ar_cipher = WAPI_CRYPT;
+		*ar_cipher_len = 0;
+		break;
 	default:
 		ath6kl_err("cipher 0x%x not supported\n", cipher);
 		return -ENOTSUPP;
@@ -210,28 +246,35 @@
 	return 0;
 }
 
-static void ath6kl_set_key_mgmt(struct ath6kl *ar, u32 key_mgmt)
+static void ath6kl_set_key_mgmt(struct ath6kl_vif *vif, u32 key_mgmt)
 {
 	ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: 0x%x\n", __func__, key_mgmt);
 
 	if (key_mgmt == WLAN_AKM_SUITE_PSK) {
-		if (ar->auth_mode == WPA_AUTH)
-			ar->auth_mode = WPA_PSK_AUTH;
-		else if (ar->auth_mode == WPA2_AUTH)
-			ar->auth_mode = WPA2_PSK_AUTH;
+		if (vif->auth_mode == WPA_AUTH)
+			vif->auth_mode = WPA_PSK_AUTH;
+		else if (vif->auth_mode == WPA2_AUTH)
+			vif->auth_mode = WPA2_PSK_AUTH;
+	} else if (key_mgmt == 0x00409600) {
+		if (vif->auth_mode == WPA_AUTH)
+			vif->auth_mode = WPA_AUTH_CCKM;
+		else if (vif->auth_mode == WPA2_AUTH)
+			vif->auth_mode = WPA2_AUTH_CCKM;
 	} else if (key_mgmt != WLAN_AKM_SUITE_8021X) {
-		ar->auth_mode = NONE_AUTH;
+		vif->auth_mode = NONE_AUTH;
 	}
 }
 
-static bool ath6kl_cfg80211_ready(struct ath6kl *ar)
+static bool ath6kl_cfg80211_ready(struct ath6kl_vif *vif)
 {
+	struct ath6kl *ar = vif->ar;
+
 	if (!test_bit(WMI_READY, &ar->flag)) {
 		ath6kl_err("wmi is not ready\n");
 		return false;
 	}
 
-	if (!test_bit(WLAN_ENABLED, &ar->flag)) {
+	if (!test_bit(WLAN_ENABLED, &vif->flags)) {
 		ath6kl_err("wlan disabled\n");
 		return false;
 	}
@@ -239,15 +282,146 @@
 	return true;
 }
 
+static bool ath6kl_is_wpa_ie(const u8 *pos)
+{
+	return pos[0] == WLAN_EID_WPA && pos[1] >= 4 &&
+		pos[2] == 0x00 && pos[3] == 0x50 &&
+		pos[4] == 0xf2 && pos[5] == 0x01;
+}
+
+static bool ath6kl_is_rsn_ie(const u8 *pos)
+{
+	return pos[0] == WLAN_EID_RSN;
+}
+
+static bool ath6kl_is_wps_ie(const u8 *pos)
+{
+	return (pos[0] == WLAN_EID_VENDOR_SPECIFIC &&
+		pos[1] >= 4 &&
+		pos[2] == 0x00 && pos[3] == 0x50 && pos[4] == 0xf2 &&
+		pos[5] == 0x04);
+}
+
+static int ath6kl_set_assoc_req_ies(struct ath6kl_vif *vif, const u8 *ies,
+				    size_t ies_len)
+{
+	struct ath6kl *ar = vif->ar;
+	const u8 *pos;
+	u8 *buf = NULL;
+	size_t len = 0;
+	int ret;
+
+	/*
+	 * Clear previously set flag
+	 */
+
+	ar->connect_ctrl_flags &= ~CONNECT_WPS_FLAG;
+
+	/*
+	 * Filter out RSN/WPA IE(s)
+	 */
+
+	if (ies && ies_len) {
+		buf = kmalloc(ies_len, GFP_KERNEL);
+		if (buf == NULL)
+			return -ENOMEM;
+		pos = ies;
+
+		while (pos + 1 < ies + ies_len) {
+			if (pos + 2 + pos[1] > ies + ies_len)
+				break;
+			if (!(ath6kl_is_wpa_ie(pos) || ath6kl_is_rsn_ie(pos))) {
+				memcpy(buf + len, pos, 2 + pos[1]);
+				len += 2 + pos[1];
+			}
+
+			if (ath6kl_is_wps_ie(pos))
+				ar->connect_ctrl_flags |= CONNECT_WPS_FLAG;
+
+			pos += 2 + pos[1];
+		}
+	}
+
+	ret = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx,
+				       WMI_FRAME_ASSOC_REQ, buf, len);
+	kfree(buf);
+	return ret;
+}
+
+static int ath6kl_nliftype_to_drv_iftype(enum nl80211_iftype type, u8 *nw_type)
+{
+	switch (type) {
+	case NL80211_IFTYPE_STATION:
+		*nw_type = INFRA_NETWORK;
+		break;
+	case NL80211_IFTYPE_ADHOC:
+		*nw_type = ADHOC_NETWORK;
+		break;
+	case NL80211_IFTYPE_AP:
+		*nw_type = AP_NETWORK;
+		break;
+	case NL80211_IFTYPE_P2P_CLIENT:
+		*nw_type = INFRA_NETWORK;
+		break;
+	case NL80211_IFTYPE_P2P_GO:
+		*nw_type = AP_NETWORK;
+		break;
+	default:
+		ath6kl_err("invalid interface type %u\n", type);
+		return -ENOTSUPP;
+	}
+
+	return 0;
+}
+
+static bool ath6kl_is_valid_iftype(struct ath6kl *ar, enum nl80211_iftype type,
+				   u8 *if_idx, u8 *nw_type)
+{
+	int i;
+
+	if (ath6kl_nliftype_to_drv_iftype(type, nw_type))
+		return false;
+
+	if (ar->ibss_if_active || ((type == NL80211_IFTYPE_ADHOC) &&
+	    ar->num_vif))
+		return false;
+
+	if (type == NL80211_IFTYPE_STATION ||
+	    type == NL80211_IFTYPE_AP || type == NL80211_IFTYPE_ADHOC) {
+		for (i = 0; i < ar->vif_max; i++) {
+			if ((ar->avail_idx_map >> i) & BIT(0)) {
+				*if_idx = i;
+				return true;
+			}
+		}
+	}
+
+	if (type == NL80211_IFTYPE_P2P_CLIENT ||
+	    type == NL80211_IFTYPE_P2P_GO) {
+		for (i = ar->max_norm_iface; i < ar->vif_max; i++) {
+			if ((ar->avail_idx_map >> i) & BIT(0)) {
+				*if_idx = i;
+				return true;
+			}
+		}
+	}
+
+	return false;
+}
+
 static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
 				   struct cfg80211_connect_params *sme)
 {
 	struct ath6kl *ar = ath6kl_priv(dev);
+	struct ath6kl_vif *vif = netdev_priv(dev);
 	int status;
+	u8 nw_subtype = (ar->p2p) ? SUBTYPE_P2PDEV : SUBTYPE_NONE;
 
-	ar->sme_state = SME_CONNECTING;
+	ath6kl_cfg80211_sscan_disable(vif);
 
-	if (!ath6kl_cfg80211_ready(ar))
+	vif->sme_state = SME_CONNECTING;
+
+	if (!ath6kl_cfg80211_ready(vif))
 		return -EIO;
 
 	if (test_bit(DESTROY_IN_PROGRESS, &ar->flag)) {
@@ -287,12 +461,22 @@
 		}
 	}
 
-	if (test_bit(CONNECTED, &ar->flag) &&
-	    ar->ssid_len == sme->ssid_len &&
-	    !memcmp(ar->ssid, sme->ssid, ar->ssid_len)) {
-		ar->reconnect_flag = true;
-		status = ath6kl_wmi_reconnect_cmd(ar->wmi, ar->req_bssid,
-						  ar->ch_hint);
+	if (sme->ie && (sme->ie_len > 0)) {
+		status = ath6kl_set_assoc_req_ies(vif, sme->ie, sme->ie_len);
+		if (status) {
+			up(&ar->sem);
+			return status;
+		}
+	} else
+		ar->connect_ctrl_flags &= ~CONNECT_WPS_FLAG;
+
+	if (test_bit(CONNECTED, &vif->flags) &&
+	    vif->ssid_len == sme->ssid_len &&
+	    !memcmp(vif->ssid, sme->ssid, vif->ssid_len)) {
+		vif->reconnect_flag = true;
+		status = ath6kl_wmi_reconnect_cmd(ar->wmi, vif->fw_vif_idx,
+						  vif->req_bssid,
+						  vif->ch_hint);
 
 		up(&ar->sem);
 		if (status) {
@@ -300,42 +484,43 @@
 			return -EIO;
 		}
 		return 0;
-	} else if (ar->ssid_len == sme->ssid_len &&
-		   !memcmp(ar->ssid, sme->ssid, ar->ssid_len)) {
-		ath6kl_disconnect(ar);
+	} else if (vif->ssid_len == sme->ssid_len &&
+		   !memcmp(vif->ssid, sme->ssid, vif->ssid_len)) {
+		ath6kl_disconnect(vif);
 	}
 
-	memset(ar->ssid, 0, sizeof(ar->ssid));
-	ar->ssid_len = sme->ssid_len;
-	memcpy(ar->ssid, sme->ssid, sme->ssid_len);
+	memset(vif->ssid, 0, sizeof(vif->ssid));
+	vif->ssid_len = sme->ssid_len;
+	memcpy(vif->ssid, sme->ssid, sme->ssid_len);
 
 	if (sme->channel)
-		ar->ch_hint = sme->channel->center_freq;
+		vif->ch_hint = sme->channel->center_freq;
 
-	memset(ar->req_bssid, 0, sizeof(ar->req_bssid));
+	memset(vif->req_bssid, 0, sizeof(vif->req_bssid));
 	if (sme->bssid && !is_broadcast_ether_addr(sme->bssid))
-		memcpy(ar->req_bssid, sme->bssid, sizeof(ar->req_bssid));
+		memcpy(vif->req_bssid, sme->bssid, sizeof(vif->req_bssid));
 
-	ath6kl_set_wpa_version(ar, sme->crypto.wpa_versions);
+	ath6kl_set_wpa_version(vif, sme->crypto.wpa_versions);
 
-	status = ath6kl_set_auth_type(ar, sme->auth_type);
+	status = ath6kl_set_auth_type(vif, sme->auth_type);
 	if (status) {
 		up(&ar->sem);
 		return status;
 	}
 
 	if (sme->crypto.n_ciphers_pairwise)
-		ath6kl_set_cipher(ar, sme->crypto.ciphers_pairwise[0], true);
+		ath6kl_set_cipher(vif, sme->crypto.ciphers_pairwise[0], true);
 	else
-		ath6kl_set_cipher(ar, 0, true);
+		ath6kl_set_cipher(vif, 0, true);
 
-	ath6kl_set_cipher(ar, sme->crypto.cipher_group, false);
+	ath6kl_set_cipher(vif, sme->crypto.cipher_group, false);
 
 	if (sme->crypto.n_akm_suites)
-		ath6kl_set_key_mgmt(ar, sme->crypto.akm_suites[0]);
+		ath6kl_set_key_mgmt(vif, sme->crypto.akm_suites[0]);
 
 	if ((sme->key_len) &&
-	    (ar->auth_mode == NONE_AUTH) && (ar->prwise_crypto == WEP_CRYPT)) {
+	    (vif->auth_mode == NONE_AUTH) &&
+	    (vif->prwise_crypto == WEP_CRYPT)) {
 		struct ath6kl_key *key = NULL;
 
 		if (sme->key_idx < WMI_MIN_KEY_INDEX ||
@@ -346,56 +531,60 @@
 			return -ENOENT;
 		}
 
-		key = &ar->keys[sme->key_idx];
+		key = &vif->keys[sme->key_idx];
 		key->key_len = sme->key_len;
 		memcpy(key->key, sme->key, key->key_len);
-		key->cipher = ar->prwise_crypto;
-		ar->def_txkey_index = sme->key_idx;
+		key->cipher = vif->prwise_crypto;
+		vif->def_txkey_index = sme->key_idx;
 
-		ath6kl_wmi_addkey_cmd(ar->wmi, sme->key_idx,
-				      ar->prwise_crypto,
+		ath6kl_wmi_addkey_cmd(ar->wmi, vif->fw_vif_idx, sme->key_idx,
+				      vif->prwise_crypto,
 				      GROUP_USAGE | TX_USAGE,
 				      key->key_len,
-				      NULL,
+				      NULL, 0,
 				      key->key, KEY_OP_INIT_VAL, NULL,
 				      NO_SYNC_WMIFLAG);
 	}
 
 	if (!ar->usr_bss_filter) {
-		clear_bit(CLEAR_BSSFILTER_ON_BEACON, &ar->flag);
-		if (ath6kl_wmi_bssfilter_cmd(ar->wmi, ALL_BSS_FILTER, 0) != 0) {
+		clear_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags);
+		if (ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
+		    ALL_BSS_FILTER, 0) != 0) {
 			ath6kl_err("couldn't set bss filtering\n");
 			up(&ar->sem);
 			return -EIO;
 		}
 	}
 
-	ar->nw_type = ar->next_mode;
+	vif->nw_type = vif->next_mode;
+
+	if (vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT)
+		nw_subtype = SUBTYPE_P2PCLIENT;
 
 	ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
 		   "%s: connect called with authmode %d dot11 auth %d"
 		   " PW crypto %d PW crypto len %d GRP crypto %d"
 		   " GRP crypto len %d channel hint %u\n",
 		   __func__,
-		   ar->auth_mode, ar->dot11_auth_mode, ar->prwise_crypto,
-		   ar->prwise_crypto_len, ar->grp_crypto,
-		   ar->grp_crypto_len, ar->ch_hint);
+		   vif->auth_mode, vif->dot11_auth_mode, vif->prwise_crypto,
+		   vif->prwise_crypto_len, vif->grp_crypto,
+		   vif->grp_crypto_len, vif->ch_hint);
 
-	ar->reconnect_flag = 0;
-	status = ath6kl_wmi_connect_cmd(ar->wmi, ar->nw_type,
-					ar->dot11_auth_mode, ar->auth_mode,
-					ar->prwise_crypto,
-					ar->prwise_crypto_len,
-					ar->grp_crypto, ar->grp_crypto_len,
-					ar->ssid_len, ar->ssid,
-					ar->req_bssid, ar->ch_hint,
-					ar->connect_ctrl_flags);
+	vif->reconnect_flag = 0;
+	status = ath6kl_wmi_connect_cmd(ar->wmi, vif->fw_vif_idx, vif->nw_type,
+					vif->dot11_auth_mode, vif->auth_mode,
+					vif->prwise_crypto,
+					vif->prwise_crypto_len,
+					vif->grp_crypto, vif->grp_crypto_len,
+					vif->ssid_len, vif->ssid,
+					vif->req_bssid, vif->ch_hint,
+					ar->connect_ctrl_flags, nw_subtype);
 
 	up(&ar->sem);
 
 	if (status == -EINVAL) {
-		memset(ar->ssid, 0, sizeof(ar->ssid));
-		ar->ssid_len = 0;
+		memset(vif->ssid, 0, sizeof(vif->ssid));
+		vif->ssid_len = 0;
 		ath6kl_err("invalid request\n");
 		return -ENOENT;
 	} else if (status) {
@@ -404,28 +593,40 @@
 	}
 
 	if ((!(ar->connect_ctrl_flags & CONNECT_DO_WPA_OFFLOAD)) &&
-	    ((ar->auth_mode == WPA_PSK_AUTH)
-	     || (ar->auth_mode == WPA2_PSK_AUTH))) {
-		mod_timer(&ar->disconnect_timer,
+	    ((vif->auth_mode == WPA_PSK_AUTH)
+	     || (vif->auth_mode == WPA2_PSK_AUTH))) {
+		mod_timer(&vif->disconnect_timer,
 			  jiffies + msecs_to_jiffies(DISCON_TIMER_INTVAL));
 	}
 
 	ar->connect_ctrl_flags &= ~CONNECT_DO_WPA_OFFLOAD;
-	set_bit(CONNECT_PEND, &ar->flag);
+	set_bit(CONNECT_PEND, &vif->flags);
 
 	return 0;
 }
 
-static int ath6kl_add_bss_if_needed(struct ath6kl *ar, const u8 *bssid,
+static int ath6kl_add_bss_if_needed(struct ath6kl_vif *vif,
+				    enum network_type nw_type,
+				    const u8 *bssid,
 				    struct ieee80211_channel *chan,
 				    const u8 *beacon_ie, size_t beacon_ie_len)
 {
+	struct ath6kl *ar = vif->ar;
 	struct cfg80211_bss *bss;
+	u16 cap_mask, cap_val;
 	u8 *ie;
 
-	bss = cfg80211_get_bss(ar->wdev->wiphy, chan, bssid,
-			       ar->ssid, ar->ssid_len, WLAN_CAPABILITY_ESS,
-			       WLAN_CAPABILITY_ESS);
+	if (nw_type & ADHOC_NETWORK) {
+		cap_mask = WLAN_CAPABILITY_IBSS;
+		cap_val = WLAN_CAPABILITY_IBSS;
+	} else {
+		cap_mask = WLAN_CAPABILITY_ESS;
+		cap_val = WLAN_CAPABILITY_ESS;
+	}
+
+	bss = cfg80211_get_bss(ar->wiphy, chan, bssid,
+			       vif->ssid, vif->ssid_len,
+			       cap_mask, cap_val);
 	if (bss == NULL) {
 		/*
 		 * Since cfg80211 may not yet know about the BSS,
@@ -435,21 +636,20 @@
 		 * Prepend SSID element since it is not included in the Beacon
 		 * IEs from the target.
 		 */
-		ie = kmalloc(2 + ar->ssid_len + beacon_ie_len, GFP_KERNEL);
+		ie = kmalloc(2 + vif->ssid_len + beacon_ie_len, GFP_KERNEL);
 		if (ie == NULL)
 			return -ENOMEM;
 		ie[0] = WLAN_EID_SSID;
-		ie[1] = ar->ssid_len;
-		memcpy(ie + 2, ar->ssid, ar->ssid_len);
-		memcpy(ie + 2 + ar->ssid_len, beacon_ie, beacon_ie_len);
-		bss = cfg80211_inform_bss(ar->wdev->wiphy, chan,
-					  bssid, 0, WLAN_CAPABILITY_ESS, 100,
-					  ie, 2 + ar->ssid_len + beacon_ie_len,
+		ie[1] = vif->ssid_len;
+		memcpy(ie + 2, vif->ssid, vif->ssid_len);
+		memcpy(ie + 2 + vif->ssid_len, beacon_ie, beacon_ie_len);
+		bss = cfg80211_inform_bss(ar->wiphy, chan,
+					  bssid, 0, cap_val, 100,
+					  ie, 2 + vif->ssid_len + beacon_ie_len,
 					  0, GFP_KERNEL);
 		if (bss)
-			ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "added dummy bss for "
-				   "%pM prior to indicating connect/roamed "
-				   "event\n", bssid);
+			ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "added bss %pM to "
+				   "cfg80211\n", bssid);
 		kfree(ie);
 	} else
 		ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "cfg80211 already has a bss "
@@ -463,7 +663,7 @@
 	return 0;
 }
 
-void ath6kl_cfg80211_connect_event(struct ath6kl *ar, u16 channel,
+void ath6kl_cfg80211_connect_event(struct ath6kl_vif *vif, u16 channel,
 				   u8 *bssid, u16 listen_intvl,
 				   u16 beacon_intvl,
 				   enum network_type nw_type,
@@ -471,6 +671,7 @@
 				   u8 assoc_resp_len, u8 *assoc_info)
 {
 	struct ieee80211_channel *chan;
+	struct ath6kl *ar = vif->ar;
 
 	/* capinfo + listen interval */
 	u8 assoc_req_ie_offset = sizeof(u16) + sizeof(u16);
@@ -489,11 +690,11 @@
 	 * Store Beacon interval here; DTIM period will be available only once
 	 * a Beacon frame from the AP is seen.
 	 */
-	ar->assoc_bss_beacon_int = beacon_intvl;
-	clear_bit(DTIM_PERIOD_AVAIL, &ar->flag);
+	vif->assoc_bss_beacon_int = beacon_intvl;
+	clear_bit(DTIM_PERIOD_AVAIL, &vif->flags);
 
 	if (nw_type & ADHOC_NETWORK) {
-		if (ar->wdev->iftype != NL80211_IFTYPE_ADHOC) {
+		if (vif->wdev.iftype != NL80211_IFTYPE_ADHOC) {
 			ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
 				   "%s: ath6k not in ibss mode\n", __func__);
 			return;
@@ -501,39 +702,39 @@
 	}
 
 	if (nw_type & INFRA_NETWORK) {
-		if (ar->wdev->iftype != NL80211_IFTYPE_STATION &&
-		    ar->wdev->iftype != NL80211_IFTYPE_P2P_CLIENT) {
+		if (vif->wdev.iftype != NL80211_IFTYPE_STATION &&
+		    vif->wdev.iftype != NL80211_IFTYPE_P2P_CLIENT) {
 			ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
 				   "%s: ath6k not in station mode\n", __func__);
 			return;
 		}
 	}
 
-	chan = ieee80211_get_channel(ar->wdev->wiphy, (int) channel);
+	chan = ieee80211_get_channel(ar->wiphy, (int) channel);
 
+	if (ath6kl_add_bss_if_needed(vif, nw_type, bssid, chan, assoc_info,
+				     beacon_ie_len) < 0) {
+		ath6kl_err("could not add cfg80211 bss entry\n");
+		return;
+	}
 
 	if (nw_type & ADHOC_NETWORK) {
-		cfg80211_ibss_joined(ar->net_dev, bssid, GFP_KERNEL);
+		ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "ad-hoc %s selected\n",
+			   nw_type & ADHOC_CREATOR ? "creator" : "joiner");
+		cfg80211_ibss_joined(vif->ndev, bssid, GFP_KERNEL);
 		return;
 	}
 
-	if (ath6kl_add_bss_if_needed(ar, bssid, chan, assoc_info,
-				     beacon_ie_len) < 0) {
-		ath6kl_err("could not add cfg80211 bss entry for "
-			   "connect/roamed notification\n");
-		return;
-	}
-
-	if (ar->sme_state == SME_CONNECTING) {
+	if (vif->sme_state == SME_CONNECTING) {
 		/* inform connect result to cfg80211 */
-		ar->sme_state = SME_CONNECTED;
-		cfg80211_connect_result(ar->net_dev, bssid,
+		vif->sme_state = SME_CONNECTED;
+		cfg80211_connect_result(vif->ndev, bssid,
 					assoc_req_ie, assoc_req_len,
 					assoc_resp_ie, assoc_resp_len,
 					WLAN_STATUS_SUCCESS, GFP_KERNEL);
-	} else if (ar->sme_state == SME_CONNECTED) {
+	} else if (vif->sme_state == SME_CONNECTED) {
 		/* inform roam event to cfg80211 */
-		cfg80211_roamed(ar->net_dev, chan, bssid,
+		cfg80211_roamed(vif->ndev, chan, bssid,
 				assoc_req_ie, assoc_req_len,
 				assoc_resp_ie, assoc_resp_len, GFP_KERNEL);
 	}
@@ -542,12 +743,15 @@
 static int ath6kl_cfg80211_disconnect(struct wiphy *wiphy,
 				      struct net_device *dev, u16 reason_code)
 {
-	struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(dev);
+	struct ath6kl *ar = ath6kl_priv(dev);
+	struct ath6kl_vif *vif = netdev_priv(dev);
 
 	ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: reason=%u\n", __func__,
 		   reason_code);
 
-	if (!ath6kl_cfg80211_ready(ar))
+	ath6kl_cfg80211_sscan_disable(vif);
+
+	if (!ath6kl_cfg80211_ready(vif))
 		return -EIO;
 
 	if (test_bit(DESTROY_IN_PROGRESS, &ar->flag)) {
@@ -560,44 +764,46 @@
 		return -ERESTARTSYS;
 	}
 
-	ar->reconnect_flag = 0;
-	ath6kl_disconnect(ar);
-	memset(ar->ssid, 0, sizeof(ar->ssid));
-	ar->ssid_len = 0;
+	vif->reconnect_flag = 0;
+	ath6kl_disconnect(vif);
+	memset(vif->ssid, 0, sizeof(vif->ssid));
+	vif->ssid_len = 0;
 
 	if (!test_bit(SKIP_SCAN, &ar->flag))
-		memset(ar->req_bssid, 0, sizeof(ar->req_bssid));
+		memset(vif->req_bssid, 0, sizeof(vif->req_bssid));
 
 	up(&ar->sem);
 
-	ar->sme_state = SME_DISCONNECTED;
+	vif->sme_state = SME_DISCONNECTED;
 
 	return 0;
 }
 
-void ath6kl_cfg80211_disconnect_event(struct ath6kl *ar, u8 reason,
+void ath6kl_cfg80211_disconnect_event(struct ath6kl_vif *vif, u8 reason,
 				      u8 *bssid, u8 assoc_resp_len,
 				      u8 *assoc_info, u16 proto_reason)
 {
-	if (ar->scan_req) {
-		cfg80211_scan_done(ar->scan_req, true);
-		ar->scan_req = NULL;
+	struct ath6kl *ar = vif->ar;
+
+	if (vif->scan_req) {
+		cfg80211_scan_done(vif->scan_req, true);
+		vif->scan_req = NULL;
 	}
 
-	if (ar->nw_type & ADHOC_NETWORK) {
-		if (ar->wdev->iftype != NL80211_IFTYPE_ADHOC) {
+	if (vif->nw_type & ADHOC_NETWORK) {
+		if (vif->wdev.iftype != NL80211_IFTYPE_ADHOC) {
 			ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
 				   "%s: ath6k not in ibss mode\n", __func__);
 			return;
 		}
 		memset(bssid, 0, ETH_ALEN);
-		cfg80211_ibss_joined(ar->net_dev, bssid, GFP_KERNEL);
+		cfg80211_ibss_joined(vif->ndev, bssid, GFP_KERNEL);
 		return;
 	}
 
-	if (ar->nw_type & INFRA_NETWORK) {
-		if (ar->wdev->iftype != NL80211_IFTYPE_STATION &&
-		    ar->wdev->iftype != NL80211_IFTYPE_P2P_CLIENT) {
+	if (vif->nw_type & INFRA_NETWORK) {
+		if (vif->wdev.iftype != NL80211_IFTYPE_STATION &&
+		    vif->wdev.iftype != NL80211_IFTYPE_P2P_CLIENT) {
 			ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
 				   "%s: ath6k not in station mode\n", __func__);
 			return;
@@ -614,42 +820,46 @@
 	 */
 
 	if (reason != DISCONNECT_CMD) {
-		ath6kl_wmi_disconnect_cmd(ar->wmi);
+		ath6kl_wmi_disconnect_cmd(ar->wmi, vif->fw_vif_idx);
 		return;
 	}
 
-	clear_bit(CONNECT_PEND, &ar->flag);
+	clear_bit(CONNECT_PEND, &vif->flags);
 
-	if (ar->sme_state == SME_CONNECTING) {
-		cfg80211_connect_result(ar->net_dev,
+	if (vif->sme_state == SME_CONNECTING) {
+		cfg80211_connect_result(vif->ndev,
 				bssid, NULL, 0,
 				NULL, 0,
 				WLAN_STATUS_UNSPECIFIED_FAILURE,
 				GFP_KERNEL);
-	} else if (ar->sme_state == SME_CONNECTED) {
-		cfg80211_disconnected(ar->net_dev, reason,
+	} else if (vif->sme_state == SME_CONNECTED) {
+		cfg80211_disconnected(vif->ndev, reason,
 				NULL, 0, GFP_KERNEL);
 	}
 
-	ar->sme_state = SME_DISCONNECTED;
+	vif->sme_state = SME_DISCONNECTED;
 }
 
 static int ath6kl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
 				struct cfg80211_scan_request *request)
 {
-	struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev);
+	struct ath6kl *ar = ath6kl_priv(ndev);
+	struct ath6kl_vif *vif = netdev_priv(ndev);
 	s8 n_channels = 0;
 	u16 *channels = NULL;
 	int ret = 0;
+	u32 force_fg_scan = 0;
 
-	if (!ath6kl_cfg80211_ready(ar))
+	if (!ath6kl_cfg80211_ready(vif))
 		return -EIO;
 
+	ath6kl_cfg80211_sscan_disable(vif);
+
 	if (!ar->usr_bss_filter) {
-		clear_bit(CLEAR_BSSFILTER_ON_BEACON, &ar->flag);
+		clear_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags);
 		ret = ath6kl_wmi_bssfilter_cmd(
-			ar->wmi,
-			(test_bit(CONNECTED, &ar->flag) ?
+			ar->wmi, vif->fw_vif_idx,
+			(test_bit(CONNECTED, &vif->flags) ?
 			 ALL_BUT_BSS_FILTER : ALL_BSS_FILTER), 0);
 		if (ret) {
 			ath6kl_err("couldn't set bss filtering\n");
@@ -664,14 +874,19 @@
 			request->n_ssids = MAX_PROBED_SSID_INDEX - 1;
 
 		for (i = 0; i < request->n_ssids; i++)
-			ath6kl_wmi_probedssid_cmd(ar->wmi, i + 1,
-						  SPECIFIC_SSID_FLAG,
+			ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx,
+						  i + 1, SPECIFIC_SSID_FLAG,
 						  request->ssids[i].ssid_len,
 						  request->ssids[i].ssid);
 	}
 
+	/*
+	 * FIXME: we should clear the IE in fw if it's not set so just
+	 * remove the check altogether
+	 */
 	if (request->ie) {
-		ret = ath6kl_wmi_set_appie_cmd(ar->wmi, WMI_FRAME_PROBE_REQ,
+		ret = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx,
+					       WMI_FRAME_PROBE_REQ,
 					       request->ie, request->ie_len);
 		if (ret) {
 			ath6kl_err("failed to set Probe Request appie for "
@@ -702,44 +917,63 @@
 			channels[i] = request->channels[i]->center_freq;
 	}
 
-	ret = ath6kl_wmi_startscan_cmd(ar->wmi, WMI_LONG_SCAN, 0,
-				       false, 0, 0, n_channels, channels);
+	if (test_bit(CONNECTED, &vif->flags))
+		force_fg_scan = 1;
+
+	if (test_bit(ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX,
+		    ar->fw_capabilities)) {
+		/*
+		 * If capable of doing P2P mgmt operations using
+		 * station interface, send additional information like
+		 * supported rates to advertise and xmit rates for
+		 * probe requests
+		 */
+		ret = ath6kl_wmi_beginscan_cmd(ar->wmi, vif->fw_vif_idx,
+						WMI_LONG_SCAN, force_fg_scan,
+						false, 0, 0, n_channels,
+						channels, request->no_cck,
+						request->rates);
+	} else {
+		ret = ath6kl_wmi_startscan_cmd(ar->wmi, vif->fw_vif_idx,
+						WMI_LONG_SCAN, force_fg_scan,
+						false, 0, 0, n_channels,
+						channels);
+	}
 	if (ret)
 		ath6kl_err("wmi_startscan_cmd failed\n");
 	else
-		ar->scan_req = request;
+		vif->scan_req = request;
 
 	kfree(channels);
 
 	return ret;
 }
 
-void ath6kl_cfg80211_scan_complete_event(struct ath6kl *ar, int status)
+void ath6kl_cfg80211_scan_complete_event(struct ath6kl_vif *vif, bool aborted)
 {
+	struct ath6kl *ar = vif->ar;
 	int i;
 
-	ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: status %d\n", __func__, status);
+	ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: status%s\n", __func__,
+		   aborted ? " aborted" : "");
 
-	if (!ar->scan_req)
+	if (!vif->scan_req)
 		return;
 
-	if ((status == -ECANCELED) || (status == -EBUSY)) {
-		cfg80211_scan_done(ar->scan_req, true);
+	if (aborted)
 		goto out;
-	}
 
-	cfg80211_scan_done(ar->scan_req, false);
-
-	if (ar->scan_req->n_ssids && ar->scan_req->ssids[0].ssid_len) {
-		for (i = 0; i < ar->scan_req->n_ssids; i++) {
-			ath6kl_wmi_probedssid_cmd(ar->wmi, i + 1,
-						  DISABLE_SSID_FLAG,
+	if (vif->scan_req->n_ssids && vif->scan_req->ssids[0].ssid_len) {
+		for (i = 0; i < vif->scan_req->n_ssids; i++) {
+			ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx,
+						  i + 1, DISABLE_SSID_FLAG,
 						  0, NULL);
 		}
 	}
 
 out:
-	ar->scan_req = NULL;
+	cfg80211_scan_done(vif->scan_req, aborted);
+	vif->scan_req = NULL;
 }
 
 static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
@@ -747,15 +981,22 @@
 				   const u8 *mac_addr,
 				   struct key_params *params)
 {
-	struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev);
+	struct ath6kl *ar = ath6kl_priv(ndev);
+	struct ath6kl_vif *vif = netdev_priv(ndev);
 	struct ath6kl_key *key = NULL;
 	u8 key_usage;
 	u8 key_type;
-	int status = 0;
 
-	if (!ath6kl_cfg80211_ready(ar))
+	if (!ath6kl_cfg80211_ready(vif))
 		return -EIO;
 
+	if (params->cipher == CCKM_KRK_CIPHER_SUITE) {
+		if (params->key_len != WMI_KRK_LEN)
+			return -EINVAL;
+		return ath6kl_wmi_add_krk_cmd(ar->wmi, vif->fw_vif_idx,
+					      params->key);
+	}
+
 	if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) {
 		ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
 			   "%s: key index %d out of bounds\n", __func__,
@@ -763,7 +1004,7 @@
 		return -ENOENT;
 	}
 
-	key = &ar->keys[key_index];
+	key = &vif->keys[key_index];
 	memset(key, 0, sizeof(struct ath6kl_key));
 
 	if (pairwise)
@@ -772,13 +1013,19 @@
 		key_usage = GROUP_USAGE;
 
 	if (params) {
+		int seq_len = params->seq_len;
+		if (params->cipher == WLAN_CIPHER_SUITE_SMS4 &&
+		    seq_len > ATH6KL_KEY_SEQ_LEN) {
+			/* Only first half of the WPI PN is configured */
+			seq_len = ATH6KL_KEY_SEQ_LEN;
+		}
 		if (params->key_len > WLAN_MAX_KEY_LEN ||
-		    params->seq_len > sizeof(key->seq))
+		    seq_len > sizeof(key->seq))
 			return -EINVAL;
 
 		key->key_len = params->key_len;
 		memcpy(key->key, params->key, key->key_len);
-		key->seq_len = params->seq_len;
+		key->seq_len = seq_len;
 		memcpy(key->seq, params->seq, key->seq_len);
 		key->cipher = params->cipher;
 	}
@@ -796,31 +1043,33 @@
 	case WLAN_CIPHER_SUITE_CCMP:
 		key_type = AES_CRYPT;
 		break;
+	case WLAN_CIPHER_SUITE_SMS4:
+		key_type = WAPI_CRYPT;
+		break;
 
 	default:
 		return -ENOTSUPP;
 	}
 
-	if (((ar->auth_mode == WPA_PSK_AUTH)
-	     || (ar->auth_mode == WPA2_PSK_AUTH))
+	if (((vif->auth_mode == WPA_PSK_AUTH)
+	     || (vif->auth_mode == WPA2_PSK_AUTH))
 	    && (key_usage & GROUP_USAGE))
-		del_timer(&ar->disconnect_timer);
+		del_timer(&vif->disconnect_timer);
 
 	ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
 		   "%s: index %d, key_len %d, key_type 0x%x, key_usage 0x%x, seq_len %d\n",
 		   __func__, key_index, key->key_len, key_type,
 		   key_usage, key->seq_len);
 
-	ar->def_txkey_index = key_index;
-
-	if (ar->nw_type == AP_NETWORK && !pairwise &&
-	    (key_type == TKIP_CRYPT || key_type == AES_CRYPT) && params) {
+	if (vif->nw_type == AP_NETWORK && !pairwise &&
+	    (key_type == TKIP_CRYPT || key_type == AES_CRYPT ||
+	     key_type == WAPI_CRYPT) && params) {
 		ar->ap_mode_bkey.valid = true;
 		ar->ap_mode_bkey.key_index = key_index;
 		ar->ap_mode_bkey.key_type = key_type;
 		ar->ap_mode_bkey.key_len = key->key_len;
 		memcpy(ar->ap_mode_bkey.key, key->key, key->key_len);
-		if (!test_bit(CONNECTED, &ar->flag)) {
+		if (!test_bit(CONNECTED, &vif->flags)) {
 			ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "Delay initial group "
 				   "key configuration until AP mode has been "
 				   "started\n");
@@ -832,8 +1081,8 @@
 		}
 	}
 
-	if (ar->next_mode == AP_NETWORK && key_type == WEP_CRYPT &&
-	    !test_bit(CONNECTED, &ar->flag)) {
+	if (vif->next_mode == AP_NETWORK && key_type == WEP_CRYPT &&
+	    !test_bit(CONNECTED, &vif->flags)) {
 		/*
 		 * Store the key locally so that it can be re-configured after
 		 * the AP mode has properly started
@@ -841,31 +1090,29 @@
 		 */
 		ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "Delay WEP key configuration "
 			   "until AP mode has been started\n");
-		ar->wep_key_list[key_index].key_len = key->key_len;
-		memcpy(ar->wep_key_list[key_index].key, key->key, key->key_len);
+		vif->wep_key_list[key_index].key_len = key->key_len;
+		memcpy(vif->wep_key_list[key_index].key, key->key,
+		       key->key_len);
 		return 0;
 	}
 
-	status = ath6kl_wmi_addkey_cmd(ar->wmi, ar->def_txkey_index,
-				       key_type, key_usage, key->key_len,
-				       key->seq, key->key, KEY_OP_INIT_VAL,
-				       (u8 *) mac_addr, SYNC_BOTH_WMIFLAG);
-
-	if (status)
-		return -EIO;
-
-	return 0;
+	return ath6kl_wmi_addkey_cmd(ar->wmi, vif->fw_vif_idx, key_index,
+				     key_type, key_usage, key->key_len,
+				     key->seq, key->seq_len, key->key,
+				     KEY_OP_INIT_VAL,
+				     (u8 *) mac_addr, SYNC_BOTH_WMIFLAG);
 }
 
 static int ath6kl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
 				   u8 key_index, bool pairwise,
 				   const u8 *mac_addr)
 {
-	struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev);
+	struct ath6kl *ar = ath6kl_priv(ndev);
+	struct ath6kl_vif *vif = netdev_priv(ndev);
 
 	ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: index %d\n", __func__, key_index);
 
-	if (!ath6kl_cfg80211_ready(ar))
+	if (!ath6kl_cfg80211_ready(vif))
 		return -EIO;
 
 	if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) {
@@ -875,15 +1122,15 @@
 		return -ENOENT;
 	}
 
-	if (!ar->keys[key_index].key_len) {
+	if (!vif->keys[key_index].key_len) {
 		ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
 			   "%s: index %d is empty\n", __func__, key_index);
 		return 0;
 	}
 
-	ar->keys[key_index].key_len = 0;
+	vif->keys[key_index].key_len = 0;
 
-	return ath6kl_wmi_deletekey_cmd(ar->wmi, key_index);
+	return ath6kl_wmi_deletekey_cmd(ar->wmi, vif->fw_vif_idx, key_index);
 }
 
 static int ath6kl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
@@ -892,13 +1139,13 @@
 				   void (*callback) (void *cookie,
 						     struct key_params *))
 {
-	struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev);
+	struct ath6kl_vif *vif = netdev_priv(ndev);
 	struct ath6kl_key *key = NULL;
 	struct key_params params;
 
 	ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: index %d\n", __func__, key_index);
 
-	if (!ath6kl_cfg80211_ready(ar))
+	if (!ath6kl_cfg80211_ready(vif))
 		return -EIO;
 
 	if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) {
@@ -908,7 +1155,7 @@
 		return -ENOENT;
 	}
 
-	key = &ar->keys[key_index];
+	key = &vif->keys[key_index];
 	memset(&params, 0, sizeof(params));
 	params.cipher = key->cipher;
 	params.key_len = key->key_len;
@@ -926,15 +1173,15 @@
 					   u8 key_index, bool unicast,
 					   bool multicast)
 {
-	struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev);
+	struct ath6kl *ar = ath6kl_priv(ndev);
+	struct ath6kl_vif *vif = netdev_priv(ndev);
 	struct ath6kl_key *key = NULL;
-	int status = 0;
 	u8 key_usage;
 	enum crypto_type key_type = NONE_CRYPT;
 
 	ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: index %d\n", __func__, key_index);
 
-	if (!ath6kl_cfg80211_ready(ar))
+	if (!ath6kl_cfg80211_ready(vif))
 		return -EIO;
 
 	if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) {
@@ -944,43 +1191,41 @@
 		return -ENOENT;
 	}
 
-	if (!ar->keys[key_index].key_len) {
+	if (!vif->keys[key_index].key_len) {
 		ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: invalid key index %d\n",
 			   __func__, key_index);
 		return -EINVAL;
 	}
 
-	ar->def_txkey_index = key_index;
-	key = &ar->keys[ar->def_txkey_index];
+	vif->def_txkey_index = key_index;
+	key = &vif->keys[vif->def_txkey_index];
 	key_usage = GROUP_USAGE;
-	if (ar->prwise_crypto == WEP_CRYPT)
+	if (vif->prwise_crypto == WEP_CRYPT)
 		key_usage |= TX_USAGE;
 	if (unicast)
-		key_type = ar->prwise_crypto;
+		key_type = vif->prwise_crypto;
 	if (multicast)
-		key_type = ar->grp_crypto;
+		key_type = vif->grp_crypto;
 
-	if (ar->next_mode == AP_NETWORK && !test_bit(CONNECTED, &ar->flag))
+	if (vif->next_mode == AP_NETWORK && !test_bit(CONNECTED, &vif->flags))
 		return 0; /* Delay until AP mode has been started */
 
-	status = ath6kl_wmi_addkey_cmd(ar->wmi, ar->def_txkey_index,
-				       key_type, key_usage,
-				       key->key_len, key->seq, key->key,
-				       KEY_OP_INIT_VAL, NULL,
-				       SYNC_BOTH_WMIFLAG);
-	if (status)
-		return -EIO;
-
-	return 0;
+	return ath6kl_wmi_addkey_cmd(ar->wmi, vif->fw_vif_idx,
+				     vif->def_txkey_index,
+				     key_type, key_usage,
+				     key->key_len, key->seq, key->seq_len,
+				     key->key,
+				     KEY_OP_INIT_VAL, NULL,
+				     SYNC_BOTH_WMIFLAG);
 }
 
-void ath6kl_cfg80211_tkip_micerr_event(struct ath6kl *ar, u8 keyid,
+void ath6kl_cfg80211_tkip_micerr_event(struct ath6kl_vif *vif, u8 keyid,
 				       bool ismcast)
 {
 	ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
 		   "%s: keyid %d, ismcast %d\n", __func__, keyid, ismcast);
 
-	cfg80211_michael_mic_failure(ar->net_dev, ar->bssid,
+	cfg80211_michael_mic_failure(vif->ndev, vif->bssid,
 				     (ismcast ? NL80211_KEYTYPE_GROUP :
 				      NL80211_KEYTYPE_PAIRWISE), keyid, NULL,
 				     GFP_KERNEL);
@@ -989,12 +1234,17 @@
 static int ath6kl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
 {
 	struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy);
+	struct ath6kl_vif *vif;
 	int ret;
 
 	ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: changed 0x%x\n", __func__,
 		   changed);
 
-	if (!ath6kl_cfg80211_ready(ar))
+	vif = ath6kl_vif_first(ar);
+	if (!vif)
+		return -EIO;
+
+	if (!ath6kl_cfg80211_ready(vif))
 		return -EIO;
 
 	if (changed & WIPHY_PARAM_RTS_THRESHOLD) {
@@ -1014,15 +1264,21 @@
 */
 static int ath6kl_cfg80211_set_txpower(struct wiphy *wiphy,
 				       enum nl80211_tx_power_setting type,
-				       int dbm)
+				       int mbm)
 {
 	struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy);
+	struct ath6kl_vif *vif;
 	u8 ath6kl_dbm;
+	int dbm = MBM_TO_DBM(mbm);
 
 	ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: type 0x%x, dbm %d\n", __func__,
 		   type, dbm);
 
-	if (!ath6kl_cfg80211_ready(ar))
+	vif = ath6kl_vif_first(ar);
+	if (!vif)
+		return -EIO;
+
+	if (!ath6kl_cfg80211_ready(vif))
 		return -EIO;
 
 	switch (type) {
@@ -1037,7 +1293,7 @@
 		return -EOPNOTSUPP;
 	}
 
-	ath6kl_wmi_set_tx_pwr_cmd(ar->wmi, ath6kl_dbm);
+	ath6kl_wmi_set_tx_pwr_cmd(ar->wmi, vif->fw_vif_idx, ath6kl_dbm);
 
 	return 0;
 }
@@ -1045,14 +1301,19 @@
 static int ath6kl_cfg80211_get_txpower(struct wiphy *wiphy, int *dbm)
 {
 	struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy);
+	struct ath6kl_vif *vif;
 
-	if (!ath6kl_cfg80211_ready(ar))
+	vif = ath6kl_vif_first(ar);
+	if (!vif)
 		return -EIO;
 
-	if (test_bit(CONNECTED, &ar->flag)) {
+	if (!ath6kl_cfg80211_ready(vif))
+		return -EIO;
+
+	if (test_bit(CONNECTED, &vif->flags)) {
 		ar->tx_pwr = 0;
 
-		if (ath6kl_wmi_get_tx_pwr_cmd(ar->wmi) != 0) {
+		if (ath6kl_wmi_get_tx_pwr_cmd(ar->wmi, vif->fw_vif_idx) != 0) {
 			ath6kl_err("ath6kl_wmi_get_tx_pwr_cmd failed\n");
 			return -EIO;
 		}
@@ -1076,11 +1337,12 @@
 {
 	struct ath6kl *ar = ath6kl_priv(dev);
 	struct wmi_power_mode_cmd mode;
+	struct ath6kl_vif *vif = netdev_priv(dev);
 
 	ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: pmgmt %d, timeout %d\n",
 		   __func__, pmgmt, timeout);
 
-	if (!ath6kl_cfg80211_ready(ar))
+	if (!ath6kl_cfg80211_ready(vif))
 		return -EIO;
 
 	if (pmgmt) {
@@ -1091,7 +1353,8 @@
 		mode.pwr_mode = MAX_PERF_POWER;
 	}
 
-	if (ath6kl_wmi_powermode_cmd(ar->wmi, mode.pwr_mode) != 0) {
+	if (ath6kl_wmi_powermode_cmd(ar->wmi, vif->fw_vif_idx,
+	     mode.pwr_mode) != 0) {
 		ath6kl_err("wmi_powermode_cmd failed\n");
 		return -EIO;
 	}
@@ -1099,41 +1362,83 @@
 	return 0;
 }
 
+static struct net_device *ath6kl_cfg80211_add_iface(struct wiphy *wiphy,
+						    char *name,
+						    enum nl80211_iftype type,
+						    u32 *flags,
+						    struct vif_params *params)
+{
+	struct ath6kl *ar = wiphy_priv(wiphy);
+	struct net_device *ndev;
+	u8 if_idx, nw_type;
+
+	if (ar->num_vif == ar->vif_max) {
+		ath6kl_err("Reached maximum number of supported vif\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (!ath6kl_is_valid_iftype(ar, type, &if_idx, &nw_type)) {
+		ath6kl_err("Not a supported interface type\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	ndev = ath6kl_interface_add(ar, name, type, if_idx, nw_type);
+	if (!ndev)
+		return ERR_PTR(-ENOMEM);
+
+	ar->num_vif++;
+
+	return ndev;
+}
+
+static int ath6kl_cfg80211_del_iface(struct wiphy *wiphy,
+				     struct net_device *ndev)
+{
+	struct ath6kl *ar = wiphy_priv(wiphy);
+	struct ath6kl_vif *vif = netdev_priv(ndev);
+
+	spin_lock_bh(&ar->list_lock);
+	list_del(&vif->list);
+	spin_unlock_bh(&ar->list_lock);
+
+	ath6kl_cleanup_vif(vif, test_bit(WMI_READY, &ar->flag));
+
+	ath6kl_deinit_if_data(vif);
+
+	return 0;
+}
+
 static int ath6kl_cfg80211_change_iface(struct wiphy *wiphy,
 					struct net_device *ndev,
 					enum nl80211_iftype type, u32 *flags,
 					struct vif_params *params)
 {
-	struct ath6kl *ar = ath6kl_priv(ndev);
-	struct wireless_dev *wdev = ar->wdev;
+	struct ath6kl_vif *vif = netdev_priv(ndev);
 
 	ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: type %u\n", __func__, type);
 
-	if (!ath6kl_cfg80211_ready(ar))
-		return -EIO;
-
 	switch (type) {
 	case NL80211_IFTYPE_STATION:
-		ar->next_mode = INFRA_NETWORK;
+		vif->next_mode = INFRA_NETWORK;
 		break;
 	case NL80211_IFTYPE_ADHOC:
-		ar->next_mode = ADHOC_NETWORK;
+		vif->next_mode = ADHOC_NETWORK;
 		break;
 	case NL80211_IFTYPE_AP:
-		ar->next_mode = AP_NETWORK;
+		vif->next_mode = AP_NETWORK;
 		break;
 	case NL80211_IFTYPE_P2P_CLIENT:
-		ar->next_mode = INFRA_NETWORK;
+		vif->next_mode = INFRA_NETWORK;
 		break;
 	case NL80211_IFTYPE_P2P_GO:
-		ar->next_mode = AP_NETWORK;
+		vif->next_mode = AP_NETWORK;
 		break;
 	default:
 		ath6kl_err("invalid interface type %u\n", type);
 		return -EOPNOTSUPP;
 	}
 
-	wdev->iftype = type;
+	vif->wdev.iftype = type;
 
 	return 0;
 }
@@ -1143,16 +1448,17 @@
 				     struct cfg80211_ibss_params *ibss_param)
 {
 	struct ath6kl *ar = ath6kl_priv(dev);
+	struct ath6kl_vif *vif = netdev_priv(dev);
 	int status;
 
-	if (!ath6kl_cfg80211_ready(ar))
+	if (!ath6kl_cfg80211_ready(vif))
 		return -EIO;
 
-	ar->ssid_len = ibss_param->ssid_len;
-	memcpy(ar->ssid, ibss_param->ssid, ar->ssid_len);
+	vif->ssid_len = ibss_param->ssid_len;
+	memcpy(vif->ssid, ibss_param->ssid, vif->ssid_len);
 
 	if (ibss_param->channel)
-		ar->ch_hint = ibss_param->channel->center_freq;
+		vif->ch_hint = ibss_param->channel->center_freq;
 
 	if (ibss_param->channel_fixed) {
 		/*
@@ -1164,44 +1470,45 @@
 		return -EOPNOTSUPP;
 	}
 
-	memset(ar->req_bssid, 0, sizeof(ar->req_bssid));
+	memset(vif->req_bssid, 0, sizeof(vif->req_bssid));
 	if (ibss_param->bssid && !is_broadcast_ether_addr(ibss_param->bssid))
-		memcpy(ar->req_bssid, ibss_param->bssid, sizeof(ar->req_bssid));
+		memcpy(vif->req_bssid, ibss_param->bssid,
+		       sizeof(vif->req_bssid));
 
-	ath6kl_set_wpa_version(ar, 0);
+	ath6kl_set_wpa_version(vif, 0);
 
-	status = ath6kl_set_auth_type(ar, NL80211_AUTHTYPE_OPEN_SYSTEM);
+	status = ath6kl_set_auth_type(vif, NL80211_AUTHTYPE_OPEN_SYSTEM);
 	if (status)
 		return status;
 
 	if (ibss_param->privacy) {
-		ath6kl_set_cipher(ar, WLAN_CIPHER_SUITE_WEP40, true);
-		ath6kl_set_cipher(ar, WLAN_CIPHER_SUITE_WEP40, false);
+		ath6kl_set_cipher(vif, WLAN_CIPHER_SUITE_WEP40, true);
+		ath6kl_set_cipher(vif, WLAN_CIPHER_SUITE_WEP40, false);
 	} else {
-		ath6kl_set_cipher(ar, 0, true);
-		ath6kl_set_cipher(ar, 0, false);
+		ath6kl_set_cipher(vif, 0, true);
+		ath6kl_set_cipher(vif, 0, false);
 	}
 
-	ar->nw_type = ar->next_mode;
+	vif->nw_type = vif->next_mode;
 
 	ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
 		   "%s: connect called with authmode %d dot11 auth %d"
 		   " PW crypto %d PW crypto len %d GRP crypto %d"
 		   " GRP crypto len %d channel hint %u\n",
 		   __func__,
-		   ar->auth_mode, ar->dot11_auth_mode, ar->prwise_crypto,
-		   ar->prwise_crypto_len, ar->grp_crypto,
-		   ar->grp_crypto_len, ar->ch_hint);
+		   vif->auth_mode, vif->dot11_auth_mode, vif->prwise_crypto,
+		   vif->prwise_crypto_len, vif->grp_crypto,
+		   vif->grp_crypto_len, vif->ch_hint);
 
-	status = ath6kl_wmi_connect_cmd(ar->wmi, ar->nw_type,
-					ar->dot11_auth_mode, ar->auth_mode,
-					ar->prwise_crypto,
-					ar->prwise_crypto_len,
-					ar->grp_crypto, ar->grp_crypto_len,
-					ar->ssid_len, ar->ssid,
-					ar->req_bssid, ar->ch_hint,
-					ar->connect_ctrl_flags);
-	set_bit(CONNECT_PEND, &ar->flag);
+	status = ath6kl_wmi_connect_cmd(ar->wmi, vif->fw_vif_idx, vif->nw_type,
+					vif->dot11_auth_mode, vif->auth_mode,
+					vif->prwise_crypto,
+					vif->prwise_crypto_len,
+					vif->grp_crypto, vif->grp_crypto_len,
+					vif->ssid_len, vif->ssid,
+					vif->req_bssid, vif->ch_hint,
+					ar->connect_ctrl_flags, SUBTYPE_NONE);
+	set_bit(CONNECT_PEND, &vif->flags);
 
 	return 0;
 }
@@ -1209,14 +1516,14 @@
 static int ath6kl_cfg80211_leave_ibss(struct wiphy *wiphy,
 				      struct net_device *dev)
 {
-	struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(dev);
+	struct ath6kl_vif *vif = netdev_priv(dev);
 
-	if (!ath6kl_cfg80211_ready(ar))
+	if (!ath6kl_cfg80211_ready(vif))
 		return -EIO;
 
-	ath6kl_disconnect(ar);
-	memset(ar->ssid, 0, sizeof(ar->ssid));
-	ar->ssid_len = 0;
+	ath6kl_disconnect(vif);
+	memset(vif->ssid, 0, sizeof(vif->ssid));
+	vif->ssid_len = 0;
 
 	return 0;
 }
@@ -1226,6 +1533,8 @@
 	WLAN_CIPHER_SUITE_WEP104,
 	WLAN_CIPHER_SUITE_TKIP,
 	WLAN_CIPHER_SUITE_CCMP,
+	CCKM_KRK_CIPHER_SUITE,
+	WLAN_CIPHER_SUITE_SMS4,
 };
 
 static bool is_rate_legacy(s32 rate)
@@ -1293,21 +1602,22 @@
 			      u8 *mac, struct station_info *sinfo)
 {
 	struct ath6kl *ar = ath6kl_priv(dev);
+	struct ath6kl_vif *vif = netdev_priv(dev);
 	long left;
 	bool sgi;
 	s32 rate;
 	int ret;
 	u8 mcs;
 
-	if (memcmp(mac, ar->bssid, ETH_ALEN) != 0)
+	if (memcmp(mac, vif->bssid, ETH_ALEN) != 0)
 		return -ENOENT;
 
 	if (down_interruptible(&ar->sem))
 		return -EBUSY;
 
-	set_bit(STATS_UPDATE_PEND, &ar->flag);
+	set_bit(STATS_UPDATE_PEND, &vif->flags);
 
-	ret = ath6kl_wmi_get_stats_cmd(ar->wmi);
+	ret = ath6kl_wmi_get_stats_cmd(ar->wmi, vif->fw_vif_idx);
 
 	if (ret != 0) {
 		up(&ar->sem);
@@ -1316,7 +1626,7 @@
 
 	left = wait_event_interruptible_timeout(ar->event_wq,
 						!test_bit(STATS_UPDATE_PEND,
-							  &ar->flag),
+							  &vif->flags),
 						WMI_TIMEOUT);
 
 	up(&ar->sem);
@@ -1326,24 +1636,24 @@
 	else if (left < 0)
 		return left;
 
-	if (ar->target_stats.rx_byte) {
-		sinfo->rx_bytes = ar->target_stats.rx_byte;
+	if (vif->target_stats.rx_byte) {
+		sinfo->rx_bytes = vif->target_stats.rx_byte;
 		sinfo->filled |= STATION_INFO_RX_BYTES;
-		sinfo->rx_packets = ar->target_stats.rx_pkt;
+		sinfo->rx_packets = vif->target_stats.rx_pkt;
 		sinfo->filled |= STATION_INFO_RX_PACKETS;
 	}
 
-	if (ar->target_stats.tx_byte) {
-		sinfo->tx_bytes = ar->target_stats.tx_byte;
+	if (vif->target_stats.tx_byte) {
+		sinfo->tx_bytes = vif->target_stats.tx_byte;
 		sinfo->filled |= STATION_INFO_TX_BYTES;
-		sinfo->tx_packets = ar->target_stats.tx_pkt;
+		sinfo->tx_packets = vif->target_stats.tx_pkt;
 		sinfo->filled |= STATION_INFO_TX_PACKETS;
 	}
 
-	sinfo->signal = ar->target_stats.cs_rssi;
+	sinfo->signal = vif->target_stats.cs_rssi;
 	sinfo->filled |= STATION_INFO_SIGNAL;
 
-	rate = ar->target_stats.tx_ucast_rate;
+	rate = vif->target_stats.tx_ucast_rate;
 
 	if (is_rate_legacy(rate)) {
 		sinfo->txrate.legacy = rate / 100;
@@ -1375,13 +1685,13 @@
 
 	sinfo->filled |= STATION_INFO_TX_BITRATE;
 
-	if (test_bit(CONNECTED, &ar->flag) &&
-	    test_bit(DTIM_PERIOD_AVAIL, &ar->flag) &&
-	    ar->nw_type == INFRA_NETWORK) {
+	if (test_bit(CONNECTED, &vif->flags) &&
+	    test_bit(DTIM_PERIOD_AVAIL, &vif->flags) &&
+	    vif->nw_type == INFRA_NETWORK) {
 		sinfo->filled |= STATION_INFO_BSS_PARAM;
 		sinfo->bss_param.flags = 0;
-		sinfo->bss_param.dtim_period = ar->assoc_bss_dtim_period;
-		sinfo->bss_param.beacon_interval = ar->assoc_bss_beacon_int;
+		sinfo->bss_param.dtim_period = vif->assoc_bss_dtim_period;
+		sinfo->bss_param.beacon_interval = vif->assoc_bss_beacon_int;
 	}
 
 	return 0;
@@ -1391,7 +1701,9 @@
 			    struct cfg80211_pmksa *pmksa)
 {
 	struct ath6kl *ar = ath6kl_priv(netdev);
-	return ath6kl_wmi_setpmkid_cmd(ar->wmi, pmksa->bssid,
+	struct ath6kl_vif *vif = netdev_priv(netdev);
+
+	return ath6kl_wmi_setpmkid_cmd(ar->wmi, vif->fw_vif_idx, pmksa->bssid,
 				       pmksa->pmkid, true);
 }
 
@@ -1399,25 +1711,302 @@
 			    struct cfg80211_pmksa *pmksa)
 {
 	struct ath6kl *ar = ath6kl_priv(netdev);
-	return ath6kl_wmi_setpmkid_cmd(ar->wmi, pmksa->bssid,
+	struct ath6kl_vif *vif = netdev_priv(netdev);
+
+	return ath6kl_wmi_setpmkid_cmd(ar->wmi, vif->fw_vif_idx, pmksa->bssid,
 				       pmksa->pmkid, false);
 }
 
 static int ath6kl_flush_pmksa(struct wiphy *wiphy, struct net_device *netdev)
 {
 	struct ath6kl *ar = ath6kl_priv(netdev);
-	if (test_bit(CONNECTED, &ar->flag))
-		return ath6kl_wmi_setpmkid_cmd(ar->wmi, ar->bssid, NULL, false);
+	struct ath6kl_vif *vif = netdev_priv(netdev);
+
+	if (test_bit(CONNECTED, &vif->flags))
+		return ath6kl_wmi_setpmkid_cmd(ar->wmi, vif->fw_vif_idx,
+					       vif->bssid, NULL, false);
+	return 0;
+}
+
+static int ath6kl_wow_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
+{
+	struct ath6kl_vif *vif;
+	int ret, pos, left;
+	u32 filter = 0;
+	u16 i;
+	u8 mask[WOW_MASK_SIZE];
+
+	vif = ath6kl_vif_first(ar);
+	if (!vif)
+		return -EIO;
+
+	if (!ath6kl_cfg80211_ready(vif))
+		return -EIO;
+
+	if (!test_bit(CONNECTED, &vif->flags))
+		return -EINVAL;
+
+	/* Clear existing WOW patterns */
+	for (i = 0; i < WOW_MAX_FILTERS_PER_LIST; i++)
+		ath6kl_wmi_del_wow_pattern_cmd(ar->wmi, vif->fw_vif_idx,
+					       WOW_LIST_ID, i);
+	/* Configure new WOW patterns */
+	for (i = 0; i < wow->n_patterns; i++) {
+
+		/*
+		 * Convert given nl80211 specific mask value to equivalent
+		 * driver specific mask value and send it to the chip along
+		 * with patterns. For example, If the mask value defined in
+		 * struct cfg80211_wowlan is 0xA (equivalent binary is 1010),
+		 * then equivalent driver specific mask value is
+		 * "0xFF 0x00 0xFF 0x00".
+		 */
+		memset(&mask, 0, sizeof(mask));
+		for (pos = 0; pos < wow->patterns[i].pattern_len; pos++) {
+			if (wow->patterns[i].mask[pos / 8] & (0x1 << (pos % 8)))
+				mask[pos] = 0xFF;
+		}
+		/*
+		 * Note: Pattern's offset is not passed as part of wowlan
+		 * parameter from CFG layer. So it's always passed as ZERO
+		 * to the firmware. It means, given WOW patterns are always
+		 * matched from the first byte of received pkt in the firmware.
+		 */
+		ret = ath6kl_wmi_add_wow_pattern_cmd(ar->wmi,
+					vif->fw_vif_idx, WOW_LIST_ID,
+					wow->patterns[i].pattern_len,
+					0 /* pattern offset */,
+					wow->patterns[i].pattern, mask);
+		if (ret)
+			return ret;
+	}
+
+	if (wow->disconnect)
+		filter |= WOW_FILTER_OPTION_NWK_DISASSOC;
+
+	if (wow->magic_pkt)
+		filter |= WOW_FILTER_OPTION_MAGIC_PACKET;
+
+	if (wow->gtk_rekey_failure)
+		filter |= WOW_FILTER_OPTION_GTK_ERROR;
+
+	if (wow->eap_identity_req)
+		filter |= WOW_FILTER_OPTION_EAP_REQ;
+
+	if (wow->four_way_handshake)
+		filter |= WOW_FILTER_OPTION_8021X_4WAYHS;
+
+	ret = ath6kl_wmi_set_wow_mode_cmd(ar->wmi, vif->fw_vif_idx,
+					  ATH6KL_WOW_MODE_ENABLE,
+					  filter,
+					  WOW_HOST_REQ_DELAY);
+	if (ret)
+		return ret;
+
+	ret = ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx,
+						 ATH6KL_HOST_MODE_ASLEEP);
+	if (ret)
+		return ret;
+
+	if (ar->tx_pending[ar->ctrl_ep]) {
+		left = wait_event_interruptible_timeout(ar->event_wq,
+				ar->tx_pending[ar->ctrl_ep] == 0, WMI_TIMEOUT);
+		if (left == 0) {
+			ath6kl_warn("clear wmi ctrl data timeout\n");
+			ret = -ETIMEDOUT;
+		} else if (left < 0) {
+			ath6kl_warn("clear wmi ctrl data failed: %d\n", left);
+			ret = left;
+		}
+	}
+
+	return ret;
+}
+
+static int ath6kl_wow_resume(struct ath6kl *ar)
+{
+	struct ath6kl_vif *vif;
+	int ret;
+
+	vif = ath6kl_vif_first(ar);
+	if (!vif)
+		return -EIO;
+
+	ret = ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx,
+						 ATH6KL_HOST_MODE_AWAKE);
+	return ret;
+}
+
+int ath6kl_cfg80211_suspend(struct ath6kl *ar,
+			    enum ath6kl_cfg_suspend_mode mode,
+			    struct cfg80211_wowlan *wow)
+{
+	int ret;
+
+	switch (mode) {
+	case ATH6KL_CFG_SUSPEND_WOW:
+
+		ath6kl_dbg(ATH6KL_DBG_SUSPEND, "wow mode suspend\n");
+
+		/* Flush all non control pkts in TX path */
+		ath6kl_tx_data_cleanup(ar);
+
+		ret = ath6kl_wow_suspend(ar, wow);
+		if (ret) {
+			ath6kl_err("wow suspend failed: %d\n", ret);
+			return ret;
+		}
+		ar->state = ATH6KL_STATE_WOW;
+		break;
+
+	case ATH6KL_CFG_SUSPEND_DEEPSLEEP:
+
+		ath6kl_cfg80211_stop_all(ar);
+
+		/* save the current power mode before enabling power save */
+		ar->wmi->saved_pwr_mode = ar->wmi->pwr_mode;
+
+		ret = ath6kl_wmi_powermode_cmd(ar->wmi, 0, REC_POWER);
+		if (ret) {
+			ath6kl_warn("wmi powermode command failed during suspend: %d\n",
+				    ret);
+		}
+
+		ar->state = ATH6KL_STATE_DEEPSLEEP;
+
+		break;
+
+	case ATH6KL_CFG_SUSPEND_CUTPOWER:
+
+		ath6kl_cfg80211_stop_all(ar);
+
+		if (ar->state == ATH6KL_STATE_OFF) {
+			ath6kl_dbg(ATH6KL_DBG_SUSPEND,
+				   "suspend hw off, no action for cutpower\n");
+			break;
+		}
+
+		ath6kl_dbg(ATH6KL_DBG_SUSPEND, "suspend cutting power\n");
+
+		ret = ath6kl_init_hw_stop(ar);
+		if (ret) {
+			ath6kl_warn("failed to stop hw during suspend: %d\n",
+				    ret);
+		}
+
+		ar->state = ATH6KL_STATE_CUTPOWER;
+
+		break;
+
+	case ATH6KL_CFG_SUSPEND_SCHED_SCAN:
+		/*
+		 * Nothing needed for schedule scan, firmware is already in
+		 * wow mode and sleeping most of the time.
+		 */
+		break;
+
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+int ath6kl_cfg80211_resume(struct ath6kl *ar)
+{
+	int ret;
+
+	switch (ar->state) {
+	case  ATH6KL_STATE_WOW:
+		ath6kl_dbg(ATH6KL_DBG_SUSPEND, "wow mode resume\n");
+
+		ret = ath6kl_wow_resume(ar);
+		if (ret) {
+			ath6kl_warn("wow mode resume failed: %d\n", ret);
+			return ret;
+		}
+
+		ar->state = ATH6KL_STATE_ON;
+		break;
+
+	case ATH6KL_STATE_DEEPSLEEP:
+		if (ar->wmi->pwr_mode != ar->wmi->saved_pwr_mode) {
+			ret = ath6kl_wmi_powermode_cmd(ar->wmi, 0,
+						       ar->wmi->saved_pwr_mode);
+			if (ret) {
+				ath6kl_warn("wmi powermode command failed during resume: %d\n",
+					    ret);
+			}
+		}
+
+		ar->state = ATH6KL_STATE_ON;
+
+		break;
+
+	case ATH6KL_STATE_CUTPOWER:
+		ath6kl_dbg(ATH6KL_DBG_SUSPEND, "resume restoring power\n");
+
+		ret = ath6kl_init_hw_start(ar);
+		if (ret) {
+			ath6kl_warn("Failed to boot hw in resume: %d\n", ret);
+			return ret;
+		}
+		break;
+
+	case ATH6KL_STATE_SCHED_SCAN:
+		break;
+
+	default:
+		break;
+	}
+
 	return 0;
 }
 
 #ifdef CONFIG_PM
-static int ar6k_cfg80211_suspend(struct wiphy *wiphy,
+
+/* hif layer decides what suspend mode to use */
+static int __ath6kl_cfg80211_suspend(struct wiphy *wiphy,
 				 struct cfg80211_wowlan *wow)
 {
 	struct ath6kl *ar = wiphy_priv(wiphy);
 
-	return ath6kl_hif_suspend(ar);
+	return ath6kl_hif_suspend(ar, wow);
+}
+
+static int __ath6kl_cfg80211_resume(struct wiphy *wiphy)
+{
+	struct ath6kl *ar = wiphy_priv(wiphy);
+
+	return ath6kl_hif_resume(ar);
+}
+
+/*
+ * FIXME: WOW suspend mode is selected if the host sdio controller supports
+ * both sdio irq wake up and keep power. The target pulls sdio data line to
+ * wake up the host when WOW pattern matches. This causes sdio irq handler
+ * is being called in the host side which internally hits ath6kl's RX path.
+ *
+ * Since sdio interrupt is not disabled, RX path executes even before
+ * the host executes the actual resume operation from PM module.
+ *
+ * In the current scenario, WOW resume should happen before start processing
+ * any data from the target. So It's required to perform WOW resume in RX path.
+ * Ideally we should perform WOW resume only in the actual platform
+ * resume path. This area needs bit rework to avoid WOW resume in RX path.
+ *
+ * ath6kl_check_wow_status() is called from ath6kl_rx().
+ */
+void ath6kl_check_wow_status(struct ath6kl *ar)
+{
+	if (ar->state == ATH6KL_STATE_WOW)
+		ath6kl_cfg80211_resume(ar);
+}
+
+#else
+
+void ath6kl_check_wow_status(struct ath6kl *ar)
+{
 }
 #endif
 
@@ -1425,14 +2014,14 @@
 			      struct ieee80211_channel *chan,
 			      enum nl80211_channel_type channel_type)
 {
-	struct ath6kl *ar = ath6kl_priv(dev);
+	struct ath6kl_vif *vif = netdev_priv(dev);
 
-	if (!ath6kl_cfg80211_ready(ar))
+	if (!ath6kl_cfg80211_ready(vif))
 		return -EIO;
 
 	ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: center_freq=%u hw_value=%u\n",
 		   __func__, chan->center_freq, chan->hw_value);
-	ar->next_chan = chan->center_freq;
+	vif->next_chan = chan->center_freq;
 
 	return 0;
 }
@@ -1444,9 +2033,10 @@
 		pos[4] == 0x9a && pos[5] == 0x09;
 }
 
-static int ath6kl_set_ap_probe_resp_ies(struct ath6kl *ar, const u8 *ies,
-					size_t ies_len)
+static int ath6kl_set_ap_probe_resp_ies(struct ath6kl_vif *vif,
+					const u8 *ies, size_t ies_len)
 {
+	struct ath6kl *ar = vif->ar;
 	const u8 *pos;
 	u8 *buf = NULL;
 	size_t len = 0;
@@ -1473,8 +2063,8 @@
 		}
 	}
 
-	ret = ath6kl_wmi_set_appie_cmd(ar->wmi, WMI_FRAME_PROBE_RESP,
-				       buf, len);
+	ret = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx,
+				       WMI_FRAME_PROBE_RESP, buf, len);
 	kfree(buf);
 	return ret;
 }
@@ -1483,36 +2073,39 @@
 			    struct beacon_parameters *info, bool add)
 {
 	struct ath6kl *ar = ath6kl_priv(dev);
+	struct ath6kl_vif *vif = netdev_priv(dev);
 	struct ieee80211_mgmt *mgmt;
 	u8 *ies;
 	int ies_len;
 	struct wmi_connect_cmd p;
 	int res;
-	int i;
+	int i, ret;
 
 	ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: add=%d\n", __func__, add);
 
-	if (!ath6kl_cfg80211_ready(ar))
+	if (!ath6kl_cfg80211_ready(vif))
 		return -EIO;
 
-	if (ar->next_mode != AP_NETWORK)
+	if (vif->next_mode != AP_NETWORK)
 		return -EOPNOTSUPP;
 
 	if (info->beacon_ies) {
-		res = ath6kl_wmi_set_appie_cmd(ar->wmi, WMI_FRAME_BEACON,
+		res = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx,
+					       WMI_FRAME_BEACON,
 					       info->beacon_ies,
 					       info->beacon_ies_len);
 		if (res)
 			return res;
 	}
 	if (info->proberesp_ies) {
-		res = ath6kl_set_ap_probe_resp_ies(ar, info->proberesp_ies,
+		res = ath6kl_set_ap_probe_resp_ies(vif, info->proberesp_ies,
 						   info->proberesp_ies_len);
 		if (res)
 			return res;
 	}
 	if (info->assocresp_ies) {
-		res = ath6kl_wmi_set_appie_cmd(ar->wmi, WMI_FRAME_ASSOC_RESP,
+		res = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx,
+					       WMI_FRAME_ASSOC_RESP,
 					       info->assocresp_ies,
 					       info->assocresp_ies_len);
 		if (res)
@@ -1539,12 +2132,14 @@
 
 	if (info->ssid == NULL)
 		return -EINVAL;
-	memcpy(ar->ssid, info->ssid, info->ssid_len);
-	ar->ssid_len = info->ssid_len;
+	memcpy(vif->ssid, info->ssid, info->ssid_len);
+	vif->ssid_len = info->ssid_len;
 	if (info->hidden_ssid != NL80211_HIDDEN_SSID_NOT_IN_USE)
 		return -EOPNOTSUPP; /* TODO */
 
-	ar->dot11_auth_mode = OPEN_AUTH;
+	ret = ath6kl_set_auth_type(vif, info->auth_type);
+	if (ret)
+		return ret;
 
 	memset(&p, 0, sizeof(p));
 
@@ -1566,7 +2161,7 @@
 	}
 	if (p.auth_mode == 0)
 		p.auth_mode = NONE_AUTH;
-	ar->auth_mode = p.auth_mode;
+	vif->auth_mode = p.auth_mode;
 
 	for (i = 0; i < info->crypto.n_ciphers_pairwise; i++) {
 		switch (info->crypto.ciphers_pairwise[i]) {
@@ -1580,13 +2175,16 @@
 		case WLAN_CIPHER_SUITE_CCMP:
 			p.prwise_crypto_type |= AES_CRYPT;
 			break;
+		case WLAN_CIPHER_SUITE_SMS4:
+			p.prwise_crypto_type |= WAPI_CRYPT;
+			break;
 		}
 	}
 	if (p.prwise_crypto_type == 0) {
 		p.prwise_crypto_type = NONE_CRYPT;
-		ath6kl_set_cipher(ar, 0, true);
+		ath6kl_set_cipher(vif, 0, true);
 	} else if (info->crypto.n_ciphers_pairwise == 1)
-		ath6kl_set_cipher(ar, info->crypto.ciphers_pairwise[0], true);
+		ath6kl_set_cipher(vif, info->crypto.ciphers_pairwise[0], true);
 
 	switch (info->crypto.cipher_group) {
 	case WLAN_CIPHER_SUITE_WEP40:
@@ -1599,21 +2197,34 @@
 	case WLAN_CIPHER_SUITE_CCMP:
 		p.grp_crypto_type = AES_CRYPT;
 		break;
+	case WLAN_CIPHER_SUITE_SMS4:
+		p.grp_crypto_type = WAPI_CRYPT;
+		break;
 	default:
 		p.grp_crypto_type = NONE_CRYPT;
 		break;
 	}
-	ath6kl_set_cipher(ar, info->crypto.cipher_group, false);
+	ath6kl_set_cipher(vif, info->crypto.cipher_group, false);
 
 	p.nw_type = AP_NETWORK;
-	ar->nw_type = ar->next_mode;
+	vif->nw_type = vif->next_mode;
 
-	p.ssid_len = ar->ssid_len;
-	memcpy(p.ssid, ar->ssid, ar->ssid_len);
-	p.dot11_auth_mode = ar->dot11_auth_mode;
-	p.ch = cpu_to_le16(ar->next_chan);
+	p.ssid_len = vif->ssid_len;
+	memcpy(p.ssid, vif->ssid, vif->ssid_len);
+	p.dot11_auth_mode = vif->dot11_auth_mode;
+	p.ch = cpu_to_le16(vif->next_chan);
 
-	res = ath6kl_wmi_ap_profile_commit(ar->wmi, &p);
+	if (vif->wdev.iftype == NL80211_IFTYPE_P2P_GO) {
+		p.nw_subtype = SUBTYPE_P2PGO;
+	} else {
+		/*
+		 * Due to firmware limitation, it is not possible to
+		 * do P2P mgmt operations in AP mode
+		 */
+		p.nw_subtype = SUBTYPE_NONE;
+	}
+
+	res = ath6kl_wmi_ap_profile_commit(ar->wmi, vif->fw_vif_idx, &p);
 	if (res < 0)
 		return res;
 
@@ -1635,14 +2246,15 @@
 static int ath6kl_del_beacon(struct wiphy *wiphy, struct net_device *dev)
 {
 	struct ath6kl *ar = ath6kl_priv(dev);
+	struct ath6kl_vif *vif = netdev_priv(dev);
 
-	if (ar->nw_type != AP_NETWORK)
+	if (vif->nw_type != AP_NETWORK)
 		return -EOPNOTSUPP;
-	if (!test_bit(CONNECTED, &ar->flag))
+	if (!test_bit(CONNECTED, &vif->flags))
 		return -ENOTCONN;
 
-	ath6kl_wmi_disconnect_cmd(ar->wmi);
-	clear_bit(CONNECTED, &ar->flag);
+	ath6kl_wmi_disconnect_cmd(ar->wmi, vif->fw_vif_idx);
+	clear_bit(CONNECTED, &vif->flags);
 
 	return 0;
 }
@@ -1651,8 +2263,9 @@
 				 u8 *mac, struct station_parameters *params)
 {
 	struct ath6kl *ar = ath6kl_priv(dev);
+	struct ath6kl_vif *vif = netdev_priv(dev);
 
-	if (ar->nw_type != AP_NETWORK)
+	if (vif->nw_type != AP_NETWORK)
 		return -EOPNOTSUPP;
 
 	/* Use this only for authorizing/unauthorizing a station */
@@ -1660,10 +2273,10 @@
 		return -EOPNOTSUPP;
 
 	if (params->sta_flags_set & BIT(NL80211_STA_FLAG_AUTHORIZED))
-		return ath6kl_wmi_ap_set_mlme(ar->wmi, WMI_AP_MLME_AUTHORIZE,
-					      mac, 0);
-	return ath6kl_wmi_ap_set_mlme(ar->wmi, WMI_AP_MLME_UNAUTHORIZE, mac,
-				      0);
+		return ath6kl_wmi_ap_set_mlme(ar->wmi, vif->fw_vif_idx,
+					      WMI_AP_MLME_AUTHORIZE, mac, 0);
+	return ath6kl_wmi_ap_set_mlme(ar->wmi, vif->fw_vif_idx,
+				      WMI_AP_MLME_UNAUTHORIZE, mac, 0);
 }
 
 static int ath6kl_remain_on_channel(struct wiphy *wiphy,
@@ -1674,13 +2287,20 @@
 				    u64 *cookie)
 {
 	struct ath6kl *ar = ath6kl_priv(dev);
+	struct ath6kl_vif *vif = netdev_priv(dev);
+	u32 id;
 
 	/* TODO: if already pending or ongoing remain-on-channel,
 	 * return -EBUSY */
-	*cookie = 1; /* only a single pending request is supported */
+	id = ++vif->last_roc_id;
+	if (id == 0) {
+		/* Do not use 0 as the cookie value */
+		id = ++vif->last_roc_id;
+	}
+	*cookie = id;
 
-	return ath6kl_wmi_remain_on_chnl_cmd(ar->wmi, chan->center_freq,
-					     duration);
+	return ath6kl_wmi_remain_on_chnl_cmd(ar->wmi, vif->fw_vif_idx,
+					     chan->center_freq, duration);
 }
 
 static int ath6kl_cancel_remain_on_channel(struct wiphy *wiphy,
@@ -1688,16 +2308,20 @@
 					   u64 cookie)
 {
 	struct ath6kl *ar = ath6kl_priv(dev);
+	struct ath6kl_vif *vif = netdev_priv(dev);
 
-	if (cookie != 1)
+	if (cookie != vif->last_roc_id)
 		return -ENOENT;
+	vif->last_cancel_roc_id = cookie;
 
-	return ath6kl_wmi_cancel_remain_on_chnl_cmd(ar->wmi);
+	return ath6kl_wmi_cancel_remain_on_chnl_cmd(ar->wmi, vif->fw_vif_idx);
 }
 
-static int ath6kl_send_go_probe_resp(struct ath6kl *ar, const u8 *buf,
-				     size_t len, unsigned int freq)
+static int ath6kl_send_go_probe_resp(struct ath6kl_vif *vif,
+				     const u8 *buf, size_t len,
+				     unsigned int freq)
 {
+	struct ath6kl *ar = vif->ar;
 	const u8 *pos;
 	u8 *p2p;
 	int p2p_len;
@@ -1724,8 +2348,8 @@
 		pos += 2 + pos[1];
 	}
 
-	ret = ath6kl_wmi_send_probe_response_cmd(ar->wmi, freq, mgmt->da,
-						 p2p, p2p_len);
+	ret = ath6kl_wmi_send_probe_response_cmd(ar->wmi, vif->fw_vif_idx, freq,
+						 mgmt->da, p2p, p2p_len);
 	kfree(p2p);
 	return ret;
 }
@@ -1734,44 +2358,61 @@
 			  struct ieee80211_channel *chan, bool offchan,
 			  enum nl80211_channel_type channel_type,
 			  bool channel_type_valid, unsigned int wait,
-			  const u8 *buf, size_t len, bool no_cck, u64 *cookie)
+			  const u8 *buf, size_t len, bool no_cck,
+			  bool dont_wait_for_ack, u64 *cookie)
 {
 	struct ath6kl *ar = ath6kl_priv(dev);
+	struct ath6kl_vif *vif = netdev_priv(dev);
 	u32 id;
 	const struct ieee80211_mgmt *mgmt;
 
 	mgmt = (const struct ieee80211_mgmt *) buf;
 	if (buf + len >= mgmt->u.probe_resp.variable &&
-	    ar->nw_type == AP_NETWORK && test_bit(CONNECTED, &ar->flag) &&
+	    vif->nw_type == AP_NETWORK && test_bit(CONNECTED, &vif->flags) &&
 	    ieee80211_is_probe_resp(mgmt->frame_control)) {
 		/*
 		 * Send Probe Response frame in AP mode using a separate WMI
 		 * command to allow the target to fill in the generic IEs.
 		 */
 		*cookie = 0; /* TX status not supported */
-		return ath6kl_send_go_probe_resp(ar, buf, len,
+		return ath6kl_send_go_probe_resp(vif, buf, len,
 						 chan->center_freq);
 	}
 
-	id = ar->send_action_id++;
+	id = vif->send_action_id++;
 	if (id == 0) {
 		/*
 		 * 0 is a reserved value in the WMI command and shall not be
 		 * used for the command.
 		 */
-		id = ar->send_action_id++;
+		id = vif->send_action_id++;
 	}
 
 	*cookie = id;
-	return ath6kl_wmi_send_action_cmd(ar->wmi, id, chan->center_freq, wait,
-					  buf, len);
+
+	if (test_bit(ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX,
+		    ar->fw_capabilities)) {
+		/*
+		 * If capable of doing P2P mgmt operations using
+		 * station interface, send additional information like
+		 * supported rates to advertise and xmit rates for
+		 * probe requests
+		 */
+		return ath6kl_wmi_send_mgmt_cmd(ar->wmi, vif->fw_vif_idx, id,
+						chan->center_freq, wait,
+						buf, len, no_cck);
+	} else {
+		return ath6kl_wmi_send_action_cmd(ar->wmi, vif->fw_vif_idx, id,
+						  chan->center_freq, wait,
+						  buf, len);
+	}
 }
 
 static void ath6kl_mgmt_frame_register(struct wiphy *wiphy,
 				       struct net_device *dev,
 				       u16 frame_type, bool reg)
 {
-	struct ath6kl *ar = ath6kl_priv(dev);
+	struct ath6kl_vif *vif = netdev_priv(dev);
 
 	ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: frame_type=0x%x reg=%d\n",
 		   __func__, frame_type, reg);
@@ -1781,10 +2422,94 @@
 		 * we cannot send WMI_PROBE_REQ_REPORT_CMD here. Instead, we
 		 * hardcode target to report Probe Request frames all the time.
 		 */
-		ar->probe_req_report = reg;
+		vif->probe_req_report = reg;
 	}
 }
 
+static int ath6kl_cfg80211_sscan_start(struct wiphy *wiphy,
+			struct net_device *dev,
+			struct cfg80211_sched_scan_request *request)
+{
+	struct ath6kl *ar = ath6kl_priv(dev);
+	struct ath6kl_vif *vif = netdev_priv(dev);
+	u16 interval;
+	int ret;
+	u8 i;
+
+	if (ar->state != ATH6KL_STATE_ON)
+		return -EIO;
+
+	if (vif->sme_state != SME_DISCONNECTED)
+		return -EBUSY;
+
+	for (i = 0; i < ar->wiphy->max_sched_scan_ssids; i++) {
+		ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx,
+					  i, DISABLE_SSID_FLAG,
+					  0, NULL);
+	}
+
+	/* fw uses seconds, also make sure that it's >0 */
+	interval = max_t(u16, 1, request->interval / 1000);
+
+	ath6kl_wmi_scanparams_cmd(ar->wmi, vif->fw_vif_idx,
+				  interval, interval,
+				  10, 0, 0, 0, 3, 0, 0, 0);
+
+	if (request->n_ssids && request->ssids[0].ssid_len) {
+		for (i = 0; i < request->n_ssids; i++) {
+			ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx,
+						  i, SPECIFIC_SSID_FLAG,
+						  request->ssids[i].ssid_len,
+						  request->ssids[i].ssid);
+		}
+	}
+
+	ret = ath6kl_wmi_set_wow_mode_cmd(ar->wmi, vif->fw_vif_idx,
+					  ATH6KL_WOW_MODE_ENABLE,
+					  WOW_FILTER_SSID,
+					  WOW_HOST_REQ_DELAY);
+	if (ret) {
+		ath6kl_warn("Failed to enable wow with ssid filter: %d\n", ret);
+		return ret;
+	}
+
+	/* this also clears IE in fw if it's not set */
+	ret = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx,
+				       WMI_FRAME_PROBE_REQ,
+				       request->ie, request->ie_len);
+	if (ret) {
+		ath6kl_warn("Failed to set probe request IE for scheduled scan: %d",
+			    ret);
+		return ret;
+	}
+
+	ret = ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx,
+						 ATH6KL_HOST_MODE_ASLEEP);
+	if (ret) {
+		ath6kl_warn("Failed to enable host sleep mode for sched scan: %d\n",
+			    ret);
+		return ret;
+	}
+
+	ar->state = ATH6KL_STATE_SCHED_SCAN;
+
+	return ret;
+}
+
+static int ath6kl_cfg80211_sscan_stop(struct wiphy *wiphy,
+				      struct net_device *dev)
+{
+	struct ath6kl_vif *vif = netdev_priv(dev);
+	bool stopped;
+
+	stopped = __ath6kl_cfg80211_sscan_stop(vif);
+
+	if (!stopped)
+		return -EIO;
+
+	return 0;
+}
+
 static const struct ieee80211_txrx_stypes
 ath6kl_mgmt_stypes[NUM_NL80211_IFTYPES] = {
 	[NL80211_IFTYPE_STATION] = {
@@ -1808,6 +2533,8 @@
 };
 
 static struct cfg80211_ops ath6kl_cfg80211_ops = {
+	.add_virtual_intf = ath6kl_cfg80211_add_iface,
+	.del_virtual_intf = ath6kl_cfg80211_del_iface,
 	.change_virtual_intf = ath6kl_cfg80211_change_iface,
 	.scan = ath6kl_cfg80211_scan,
 	.connect = ath6kl_cfg80211_connect,
@@ -1828,7 +2555,8 @@
 	.flush_pmksa = ath6kl_flush_pmksa,
 	CFG80211_TESTMODE_CMD(ath6kl_tm_cmd)
 #ifdef CONFIG_PM
-	.suspend = ar6k_cfg80211_suspend,
+	.suspend = __ath6kl_cfg80211_suspend,
+	.resume = __ath6kl_cfg80211_resume,
 #endif
 	.set_channel = ath6kl_set_channel,
 	.add_beacon = ath6kl_add_beacon,
@@ -1839,78 +2567,277 @@
 	.cancel_remain_on_channel = ath6kl_cancel_remain_on_channel,
 	.mgmt_tx = ath6kl_mgmt_tx,
 	.mgmt_frame_register = ath6kl_mgmt_frame_register,
+	.sched_scan_start = ath6kl_cfg80211_sscan_start,
+	.sched_scan_stop = ath6kl_cfg80211_sscan_stop,
 };
 
-struct wireless_dev *ath6kl_cfg80211_init(struct device *dev)
+void ath6kl_cfg80211_stop(struct ath6kl_vif *vif)
 {
-	int ret = 0;
-	struct wireless_dev *wdev;
-	struct ath6kl *ar;
+	ath6kl_cfg80211_sscan_disable(vif);
 
-	wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
-	if (!wdev) {
-		ath6kl_err("couldn't allocate wireless device\n");
-		return NULL;
+	switch (vif->sme_state) {
+	case SME_DISCONNECTED:
+		break;
+	case SME_CONNECTING:
+		cfg80211_connect_result(vif->ndev, vif->bssid, NULL, 0,
+					NULL, 0,
+					WLAN_STATUS_UNSPECIFIED_FAILURE,
+					GFP_KERNEL);
+		break;
+	case SME_CONNECTED:
+		cfg80211_disconnected(vif->ndev, 0, NULL, 0, GFP_KERNEL);
+		break;
 	}
 
-	/* create a new wiphy for use with cfg80211 */
-	wdev->wiphy = wiphy_new(&ath6kl_cfg80211_ops, sizeof(struct ath6kl));
-	if (!wdev->wiphy) {
-		ath6kl_err("couldn't allocate wiphy device\n");
-		kfree(wdev);
-		return NULL;
-	}
+	if (test_bit(CONNECTED, &vif->flags) ||
+	    test_bit(CONNECT_PEND, &vif->flags))
+		ath6kl_wmi_disconnect_cmd(vif->ar->wmi, vif->fw_vif_idx);
 
-	ar = wiphy_priv(wdev->wiphy);
-	ar->p2p = !!ath6kl_p2p;
+	vif->sme_state = SME_DISCONNECTED;
+	clear_bit(CONNECTED, &vif->flags);
+	clear_bit(CONNECT_PEND, &vif->flags);
 
-	wdev->wiphy->mgmt_stypes = ath6kl_mgmt_stypes;
+	/* disable scanning */
+	if (ath6kl_wmi_scanparams_cmd(vif->ar->wmi, vif->fw_vif_idx, 0xFFFF,
+				      0, 0, 0, 0, 0, 0, 0, 0, 0) != 0)
+		ath6kl_warn("failed to disable scan during stop\n");
 
-	wdev->wiphy->max_remain_on_channel_duration = 5000;
-
-	/* set device pointer for wiphy */
-	set_wiphy_dev(wdev->wiphy, dev);
-
-	wdev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
-		BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP);
-	if (ar->p2p) {
-		wdev->wiphy->interface_modes |= BIT(NL80211_IFTYPE_P2P_GO) |
-			BIT(NL80211_IFTYPE_P2P_CLIENT);
-	}
-	/* max num of ssids that can be probed during scanning */
-	wdev->wiphy->max_scan_ssids = MAX_PROBED_SSID_INDEX;
-	wdev->wiphy->max_scan_ie_len = 1000; /* FIX: what is correct limit? */
-	wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &ath6kl_band_2ghz;
-	wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = &ath6kl_band_5ghz;
-	wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
-
-	wdev->wiphy->cipher_suites = cipher_suites;
-	wdev->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
-
-	ret = wiphy_register(wdev->wiphy);
-	if (ret < 0) {
-		ath6kl_err("couldn't register wiphy device\n");
-		wiphy_free(wdev->wiphy);
-		kfree(wdev);
-		return NULL;
-	}
-
-	return wdev;
+	ath6kl_cfg80211_scan_complete_event(vif, true);
 }
 
-void ath6kl_cfg80211_deinit(struct ath6kl *ar)
+void ath6kl_cfg80211_stop_all(struct ath6kl *ar)
 {
-	struct wireless_dev *wdev = ar->wdev;
+	struct ath6kl_vif *vif;
 
-	if (ar->scan_req) {
-		cfg80211_scan_done(ar->scan_req, true);
-		ar->scan_req = NULL;
+	vif = ath6kl_vif_first(ar);
+	if (!vif) {
+		/* save the current power mode before enabling power save */
+		ar->wmi->saved_pwr_mode = ar->wmi->pwr_mode;
+
+		if (ath6kl_wmi_powermode_cmd(ar->wmi, 0, REC_POWER) != 0)
+			ath6kl_warn("ath6kl_deep_sleep_enable: "
+				    "wmi_powermode_cmd failed\n");
+		return;
 	}
 
-	if (!wdev)
-		return;
+	/*
+	 * FIXME: we should take ar->list_lock to protect changes in the
+	 * vif_list, but that's not trivial to do as ath6kl_cfg80211_stop()
+	 * sleeps.
+	 */
+	list_for_each_entry(vif, &ar->vif_list, list)
+		ath6kl_cfg80211_stop(vif);
+}
 
-	wiphy_unregister(wdev->wiphy);
-	wiphy_free(wdev->wiphy);
-	kfree(wdev);
+struct ath6kl *ath6kl_core_alloc(struct device *dev)
+{
+	struct ath6kl *ar;
+	struct wiphy *wiphy;
+	u8 ctr;
+
+	/* create a new wiphy for use with cfg80211 */
+	wiphy = wiphy_new(&ath6kl_cfg80211_ops, sizeof(struct ath6kl));
+
+	if (!wiphy) {
+		ath6kl_err("couldn't allocate wiphy device\n");
+		return NULL;
+	}
+
+	ar = wiphy_priv(wiphy);
+	ar->p2p = !!ath6kl_p2p;
+	ar->wiphy = wiphy;
+	ar->dev = dev;
+
+	ar->vif_max = 1;
+
+	ar->max_norm_iface = 1;
+
+	spin_lock_init(&ar->lock);
+	spin_lock_init(&ar->mcastpsq_lock);
+	spin_lock_init(&ar->list_lock);
+
+	init_waitqueue_head(&ar->event_wq);
+	sema_init(&ar->sem, 1);
+
+	INIT_LIST_HEAD(&ar->amsdu_rx_buffer_queue);
+	INIT_LIST_HEAD(&ar->vif_list);
+
+	clear_bit(WMI_ENABLED, &ar->flag);
+	clear_bit(SKIP_SCAN, &ar->flag);
+	clear_bit(DESTROY_IN_PROGRESS, &ar->flag);
+
+	ar->listen_intvl_t = A_DEFAULT_LISTEN_INTERVAL;
+	ar->listen_intvl_b = 0;
+	ar->tx_pwr = 0;
+
+	ar->intra_bss = 1;
+	ar->lrssi_roam_threshold = DEF_LRSSI_ROAM_THRESHOLD;
+
+	ar->state = ATH6KL_STATE_OFF;
+
+	memset((u8 *)ar->sta_list, 0,
+	       AP_MAX_NUM_STA * sizeof(struct ath6kl_sta));
+
+	/* Init the PS queues */
+	for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) {
+		spin_lock_init(&ar->sta_list[ctr].psq_lock);
+		skb_queue_head_init(&ar->sta_list[ctr].psq);
+	}
+
+	skb_queue_head_init(&ar->mcastpsq);
+
+	memcpy(ar->ap_country_code, DEF_AP_COUNTRY_CODE, 3);
+
+	return ar;
+}
+
+int ath6kl_register_ieee80211_hw(struct ath6kl *ar)
+{
+	struct wiphy *wiphy = ar->wiphy;
+	int ret;
+
+	wiphy->mgmt_stypes = ath6kl_mgmt_stypes;
+
+	wiphy->max_remain_on_channel_duration = 5000;
+
+	/* set device pointer for wiphy */
+	set_wiphy_dev(wiphy, ar->dev);
+
+	wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+				 BIT(NL80211_IFTYPE_ADHOC) |
+				 BIT(NL80211_IFTYPE_AP);
+	if (ar->p2p) {
+		wiphy->interface_modes |= BIT(NL80211_IFTYPE_P2P_GO) |
+					  BIT(NL80211_IFTYPE_P2P_CLIENT);
+	}
+
+	/* max num of ssids that can be probed during scanning */
+	wiphy->max_scan_ssids = MAX_PROBED_SSID_INDEX;
+	wiphy->max_scan_ie_len = 1000; /* FIX: what is correct limit? */
+	wiphy->bands[IEEE80211_BAND_2GHZ] = &ath6kl_band_2ghz;
+	wiphy->bands[IEEE80211_BAND_5GHZ] = &ath6kl_band_5ghz;
+	wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+
+	wiphy->cipher_suites = cipher_suites;
+	wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
+
+	wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
+			      WIPHY_WOWLAN_DISCONNECT |
+			      WIPHY_WOWLAN_GTK_REKEY_FAILURE  |
+			      WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
+			      WIPHY_WOWLAN_EAP_IDENTITY_REQ   |
+			      WIPHY_WOWLAN_4WAY_HANDSHAKE;
+	wiphy->wowlan.n_patterns = WOW_MAX_FILTERS_PER_LIST;
+	wiphy->wowlan.pattern_min_len = 1;
+	wiphy->wowlan.pattern_max_len = WOW_PATTERN_SIZE;
+
+	wiphy->max_sched_scan_ssids = 10;
+
+	ret = wiphy_register(wiphy);
+	if (ret < 0) {
+		ath6kl_err("couldn't register wiphy device\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int ath6kl_init_if_data(struct ath6kl_vif *vif)
+{
+	vif->aggr_cntxt = aggr_init(vif->ndev);
+	if (!vif->aggr_cntxt) {
+		ath6kl_err("failed to initialize aggr\n");
+		return -ENOMEM;
+	}
+
+	setup_timer(&vif->disconnect_timer, disconnect_timer_handler,
+		    (unsigned long) vif->ndev);
+	setup_timer(&vif->sched_scan_timer, ath6kl_wmi_sscan_timer,
+		    (unsigned long) vif);
+
+	set_bit(WMM_ENABLED, &vif->flags);
+	spin_lock_init(&vif->if_lock);
+
+	return 0;
+}
+
+void ath6kl_deinit_if_data(struct ath6kl_vif *vif)
+{
+	struct ath6kl *ar = vif->ar;
+
+	aggr_module_destroy(vif->aggr_cntxt);
+
+	ar->avail_idx_map |= BIT(vif->fw_vif_idx);
+
+	if (vif->nw_type == ADHOC_NETWORK)
+		ar->ibss_if_active = false;
+
+	unregister_netdevice(vif->ndev);
+
+	ar->num_vif--;
+}
+
+struct net_device *ath6kl_interface_add(struct ath6kl *ar, char *name,
+					enum nl80211_iftype type, u8 fw_vif_idx,
+					u8 nw_type)
+{
+	struct net_device *ndev;
+	struct ath6kl_vif *vif;
+
+	ndev = alloc_netdev(sizeof(*vif), name, ether_setup);
+	if (!ndev)
+		return NULL;
+
+	vif = netdev_priv(ndev);
+	ndev->ieee80211_ptr = &vif->wdev;
+	vif->wdev.wiphy = ar->wiphy;
+	vif->ar = ar;
+	vif->ndev = ndev;
+	SET_NETDEV_DEV(ndev, wiphy_dev(vif->wdev.wiphy));
+	vif->wdev.netdev = ndev;
+	vif->wdev.iftype = type;
+	vif->fw_vif_idx = fw_vif_idx;
+	vif->nw_type = vif->next_mode = nw_type;
+
+	memcpy(ndev->dev_addr, ar->mac_addr, ETH_ALEN);
+	if (fw_vif_idx != 0)
+		ndev->dev_addr[0] = (ndev->dev_addr[0] ^ (1 << fw_vif_idx)) |
+				     0x2;
+
+	init_netdev(ndev);
+
+	ath6kl_init_control_info(vif);
+
+	/* TODO: Pass interface specific pointer instead of ar */
+	if (ath6kl_init_if_data(vif))
+		goto err;
+
+	if (register_netdevice(ndev))
+		goto err;
+
+	ar->avail_idx_map &= ~BIT(fw_vif_idx);
+	vif->sme_state = SME_DISCONNECTED;
+	set_bit(WLAN_ENABLED, &vif->flags);
+	ar->wlan_pwr_state = WLAN_POWER_STATE_ON;
+	set_bit(NETDEV_REGISTERED, &vif->flags);
+
+	if (type == NL80211_IFTYPE_ADHOC)
+		ar->ibss_if_active = true;
+
+	spin_lock_bh(&ar->list_lock);
+	list_add_tail(&vif->list, &ar->vif_list);
+	spin_unlock_bh(&ar->list_lock);
+
+	return ndev;
+
+err:
+	aggr_module_destroy(vif->aggr_cntxt);
+	free_netdev(ndev);
+	return NULL;
+}
+
+void ath6kl_deinit_ieee80211_hw(struct ath6kl *ar)
+{
+	wiphy_unregister(ar->wiphy);
+	wiphy_free(ar->wiphy);
 }
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.h b/drivers/net/wireless/ath/ath6kl/cfg80211.h
index a84adc2..81f20a5 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.h
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.h
@@ -17,23 +17,43 @@
 #ifndef ATH6KL_CFG80211_H
 #define ATH6KL_CFG80211_H
 
-struct wireless_dev *ath6kl_cfg80211_init(struct device *dev);
-void ath6kl_cfg80211_deinit(struct ath6kl *ar);
+enum ath6kl_cfg_suspend_mode {
+	ATH6KL_CFG_SUSPEND_DEEPSLEEP,
+	ATH6KL_CFG_SUSPEND_CUTPOWER,
+	ATH6KL_CFG_SUSPEND_WOW,
+	ATH6KL_CFG_SUSPEND_SCHED_SCAN,
+};
 
-void ath6kl_cfg80211_scan_complete_event(struct ath6kl *ar, int status);
+struct net_device *ath6kl_interface_add(struct ath6kl *ar, char *name,
+					enum nl80211_iftype type,
+					u8 fw_vif_idx, u8 nw_type);
+int ath6kl_register_ieee80211_hw(struct ath6kl *ar);
+struct ath6kl *ath6kl_core_alloc(struct device *dev);
+void ath6kl_deinit_ieee80211_hw(struct ath6kl *ar);
 
-void ath6kl_cfg80211_connect_event(struct ath6kl *ar, u16 channel,
+void ath6kl_cfg80211_scan_complete_event(struct ath6kl_vif *vif, bool aborted);
+
+void ath6kl_cfg80211_connect_event(struct ath6kl_vif *vif, u16 channel,
 				   u8 *bssid, u16 listen_intvl,
 				   u16 beacon_intvl,
 				   enum network_type nw_type,
 				   u8 beacon_ie_len, u8 assoc_req_len,
 				   u8 assoc_resp_len, u8 *assoc_info);
 
-void ath6kl_cfg80211_disconnect_event(struct ath6kl *ar, u8 reason,
+void ath6kl_cfg80211_disconnect_event(struct ath6kl_vif *vif, u8 reason,
 				      u8 *bssid, u8 assoc_resp_len,
 				      u8 *assoc_info, u16 proto_reason);
 
-void ath6kl_cfg80211_tkip_micerr_event(struct ath6kl *ar, u8 keyid,
+void ath6kl_cfg80211_tkip_micerr_event(struct ath6kl_vif *vif, u8 keyid,
 				     bool ismcast);
 
+int ath6kl_cfg80211_suspend(struct ath6kl *ar,
+			    enum ath6kl_cfg_suspend_mode mode,
+			    struct cfg80211_wowlan *wow);
+
+int ath6kl_cfg80211_resume(struct ath6kl *ar);
+
+void ath6kl_cfg80211_stop(struct ath6kl_vif *vif);
+void ath6kl_cfg80211_stop_all(struct ath6kl *ar);
+
 #endif /* ATH6KL_CFG80211_H */
diff --git a/drivers/net/wireless/ath/ath6kl/common.h b/drivers/net/wireless/ath/ath6kl/common.h
index b92f0e5..bfd6597 100644
--- a/drivers/net/wireless/ath/ath6kl/common.h
+++ b/drivers/net/wireless/ath/ath6kl/common.h
@@ -23,8 +23,6 @@
 
 extern int ath6kl_printk(const char *level, const char *fmt, ...);
 
-#define A_CACHE_LINE_PAD            128
-
 /*
  * Reflects the version of binary interface exposed by ATH6KL target
  * firmware. Needs to be incremented by 1 for any change in the firmware
@@ -73,25 +71,16 @@
 	WEP_CRYPT           = 0x02,
 	TKIP_CRYPT          = 0x04,
 	AES_CRYPT           = 0x08,
+	WAPI_CRYPT          = 0x10,
 };
 
 struct htc_endpoint_credit_dist;
 struct ath6kl;
 enum htc_credit_dist_reason;
-struct htc_credit_state_info;
+struct ath6kl_htc_credit_info;
 
-int ath6k_setup_credit_dist(void *htc_handle,
-			    struct htc_credit_state_info *cred_info);
-void ath6k_credit_distribute(struct htc_credit_state_info *cred_inf,
-			     struct list_head *epdist_list,
-			     enum htc_credit_dist_reason reason);
-void ath6k_credit_init(struct htc_credit_state_info *cred_inf,
-		       struct list_head *ep_list,
-		       int tot_credits);
-void ath6k_seek_credits(struct htc_credit_state_info *cred_inf,
-			struct htc_endpoint_credit_dist *ep_dist);
 struct ath6kl *ath6kl_core_alloc(struct device *sdev);
 int ath6kl_core_init(struct ath6kl *ar);
-int ath6kl_unavail_ev(struct ath6kl *ar);
+void ath6kl_core_cleanup(struct ath6kl *ar);
 struct sk_buff *ath6kl_buf_alloc(int size);
 #endif /* COMMON_H */
diff --git a/drivers/net/wireless/ath/ath6kl/core.h b/drivers/net/wireless/ath/ath6kl/core.h
index 6d8a484..c863a28 100644
--- a/drivers/net/wireless/ath/ath6kl/core.h
+++ b/drivers/net/wireless/ath/ath6kl/core.h
@@ -70,10 +70,20 @@
 	ATH6KL_FW_IE_RESERVED_RAM_SIZE = 5,
 	ATH6KL_FW_IE_CAPABILITIES = 6,
 	ATH6KL_FW_IE_PATCH_ADDR = 7,
+	ATH6KL_FW_IE_BOARD_ADDR = 8,
+	ATH6KL_FW_IE_VIF_MAX = 9,
 };
 
 enum ath6kl_fw_capability {
 	ATH6KL_FW_CAPABILITY_HOST_P2P = 0,
+	ATH6KL_FW_CAPABILITY_SCHED_SCAN = 1,
+
+	/*
+	 * Firmware is capable of supporting P2P mgmt operations on a
+	 * station interface. After group formation, the station
+	 * interface will become a P2P client/GO interface as the case may be
+	 */
+	ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX,
 
 	/* this needs to be last */
 	ATH6KL_FW_CAPABILITY_MAX,
@@ -88,37 +98,47 @@
 };
 
 /* AR6003 1.0 definitions */
-#define AR6003_REV1_VERSION                 0x300002ba
+#define AR6003_HW_1_0_VERSION                 0x300002ba
 
 /* AR6003 2.0 definitions */
-#define AR6003_REV2_VERSION                 0x30000384
-#define AR6003_REV2_PATCH_DOWNLOAD_ADDRESS  0x57e910
-#define AR6003_REV2_OTP_FILE                "ath6k/AR6003/hw2.0/otp.bin.z77"
-#define AR6003_REV2_FIRMWARE_FILE           "ath6k/AR6003/hw2.0/athwlan.bin.z77"
-#define AR6003_REV2_TCMD_FIRMWARE_FILE      "ath6k/AR6003/hw2.0/athtcmd_ram.bin"
-#define AR6003_REV2_PATCH_FILE              "ath6k/AR6003/hw2.0/data.patch.bin"
-#define AR6003_REV2_FIRMWARE_2_FILE         "ath6k/AR6003/hw2.0/fw-2.bin"
-#define AR6003_REV2_BOARD_DATA_FILE         "ath6k/AR6003/hw2.0/bdata.bin"
-#define AR6003_REV2_DEFAULT_BOARD_DATA_FILE "ath6k/AR6003/hw2.0/bdata.SD31.bin"
+#define AR6003_HW_2_0_VERSION                 0x30000384
+#define AR6003_HW_2_0_PATCH_DOWNLOAD_ADDRESS  0x57e910
+#define AR6003_HW_2_0_OTP_FILE "ath6k/AR6003/hw2.0/otp.bin.z77"
+#define AR6003_HW_2_0_FIRMWARE_FILE "ath6k/AR6003/hw2.0/athwlan.bin.z77"
+#define AR6003_HW_2_0_TCMD_FIRMWARE_FILE "ath6k/AR6003/hw2.0/athtcmd_ram.bin"
+#define AR6003_HW_2_0_PATCH_FILE "ath6k/AR6003/hw2.0/data.patch.bin"
+#define AR6003_HW_2_0_FIRMWARE_2_FILE "ath6k/AR6003/hw2.0/fw-2.bin"
+#define AR6003_HW_2_0_BOARD_DATA_FILE "ath6k/AR6003/hw2.0/bdata.bin"
+#define AR6003_HW_2_0_DEFAULT_BOARD_DATA_FILE \
+			"ath6k/AR6003/hw2.0/bdata.SD31.bin"
 
 /* AR6003 3.0 definitions */
-#define AR6003_REV3_VERSION                 0x30000582
-#define AR6003_REV3_OTP_FILE                "ath6k/AR6003/hw2.1.1/otp.bin"
-#define AR6003_REV3_FIRMWARE_FILE           "ath6k/AR6003/hw2.1.1/athwlan.bin"
-#define AR6003_REV3_TCMD_FIRMWARE_FILE    "ath6k/AR6003/hw2.1.1/athtcmd_ram.bin"
-#define AR6003_REV3_PATCH_FILE            "ath6k/AR6003/hw2.1.1/data.patch.bin"
-#define AR6003_REV3_FIRMWARE_2_FILE           "ath6k/AR6003/hw2.1.1/fw-2.bin"
-#define AR6003_REV3_BOARD_DATA_FILE       "ath6k/AR6003/hw2.1.1/bdata.bin"
-#define AR6003_REV3_DEFAULT_BOARD_DATA_FILE	\
-	"ath6k/AR6003/hw2.1.1/bdata.SD31.bin"
+#define AR6003_HW_2_1_1_VERSION                 0x30000582
+#define AR6003_HW_2_1_1_OTP_FILE "ath6k/AR6003/hw2.1.1/otp.bin"
+#define AR6003_HW_2_1_1_FIRMWARE_FILE "ath6k/AR6003/hw2.1.1/athwlan.bin"
+#define AR6003_HW_2_1_1_TCMD_FIRMWARE_FILE \
+			"ath6k/AR6003/hw2.1.1/athtcmd_ram.bin"
+#define AR6003_HW_2_1_1_PATCH_FILE "ath6k/AR6003/hw2.1.1/data.patch.bin"
+#define AR6003_HW_2_1_1_FIRMWARE_2_FILE "ath6k/AR6003/hw2.1.1/fw-2.bin"
+#define AR6003_HW_2_1_1_BOARD_DATA_FILE "ath6k/AR6003/hw2.1.1/bdata.bin"
+#define AR6003_HW_2_1_1_DEFAULT_BOARD_DATA_FILE	\
+			"ath6k/AR6003/hw2.1.1/bdata.SD31.bin"
 
 /* AR6004 1.0 definitions */
-#define AR6004_REV1_VERSION                 0x30000623
-#define AR6004_REV1_FIRMWARE_FILE           "ath6k/AR6004/hw6.1/fw.ram.bin"
-#define AR6004_REV1_FIRMWARE_2_FILE         "ath6k/AR6004/hw6.1/fw-2.bin"
-#define AR6004_REV1_BOARD_DATA_FILE         "ath6k/AR6004/hw6.1/bdata.bin"
-#define AR6004_REV1_DEFAULT_BOARD_DATA_FILE "ath6k/AR6004/hw6.1/bdata.DB132.bin"
-#define AR6004_REV1_EPPING_FIRMWARE_FILE "ath6k/AR6004/hw6.1/endpointping.bin"
+#define AR6004_HW_1_0_VERSION                 0x30000623
+#define AR6004_HW_1_0_FIRMWARE_2_FILE         "ath6k/AR6004/hw1.0/fw-2.bin"
+#define AR6004_HW_1_0_FIRMWARE_FILE           "ath6k/AR6004/hw1.0/fw.ram.bin"
+#define AR6004_HW_1_0_BOARD_DATA_FILE         "ath6k/AR6004/hw1.0/bdata.bin"
+#define AR6004_HW_1_0_DEFAULT_BOARD_DATA_FILE \
+	"ath6k/AR6004/hw1.0/bdata.DB132.bin"
+
+/* AR6004 1.1 definitions */
+#define AR6004_HW_1_1_VERSION                 0x30000001
+#define AR6004_HW_1_1_FIRMWARE_2_FILE         "ath6k/AR6004/hw1.1/fw-2.bin"
+#define AR6004_HW_1_1_FIRMWARE_FILE           "ath6k/AR6004/hw1.1/fw.ram.bin"
+#define AR6004_HW_1_1_BOARD_DATA_FILE         "ath6k/AR6004/hw1.1/bdata.bin"
+#define AR6004_HW_1_1_DEFAULT_BOARD_DATA_FILE \
+	"ath6k/AR6004/hw1.1/bdata.DB132.bin"
 
 /* Per STA data, used in AP mode */
 #define STA_PS_AWAKE		BIT(0)
@@ -166,6 +186,7 @@
 #define ATH6KL_CONF_IGNORE_PS_FAIL_EVT_IN_SCAN  BIT(1)
 #define ATH6KL_CONF_ENABLE_11N			BIT(2)
 #define ATH6KL_CONF_ENABLE_TX_BURST		BIT(3)
+#define ATH6KL_CONF_SUSPEND_CUTPOWER		BIT(4)
 
 enum wlan_low_pwr_state {
 	WLAN_POWER_STATE_ON,
@@ -271,6 +292,8 @@
 	u32 cmd_credits;
 	bool done_sent;
 	u8 *cmd_buf;
+	u32 max_data_size;
+	u32 max_cmd_size;
 };
 
 struct target_stats {
@@ -380,40 +403,42 @@
 	u8 key_len;
 };
 
-/* Flag info */
-#define WMI_ENABLED	0
-#define WMI_READY	1
-#define CONNECTED	2
-#define STATS_UPDATE_PEND 3
-#define CONNECT_PEND	  4
-#define WMM_ENABLED	  5
-#define NETQ_STOPPED	  6
-#define WMI_CTRL_EP_FULL  7
-#define DTIM_EXPIRED	  8
-#define DESTROY_IN_PROGRESS  9
-#define NETDEV_REGISTERED    10
-#define SKIP_SCAN	     11
-#define WLAN_ENABLED	     12
-#define TESTMODE	     13
-#define CLEAR_BSSFILTER_ON_BEACON 14
-#define DTIM_PERIOD_AVAIL    15
+enum ath6kl_hif_type {
+	ATH6KL_HIF_TYPE_SDIO,
+	ATH6KL_HIF_TYPE_USB,
+};
 
-struct ath6kl {
-	struct device *dev;
-	struct net_device *net_dev;
-	struct ath6kl_bmi bmi;
-	const struct ath6kl_hif_ops *hif_ops;
-	struct wmi *wmi;
-	int tx_pending[ENDPOINT_MAX];
-	int total_tx_data_pend;
-	struct htc_target *htc_target;
-	void *hif_priv;
-	spinlock_t lock;
-	struct semaphore sem;
+/*
+ * Driver's maximum limit, note that some firmwares support only one vif
+ * and the runtime (current) limit must be checked from ar->vif_max.
+ */
+#define ATH6KL_VIF_MAX	3
+
+/* vif flags info */
+enum ath6kl_vif_state {
+	CONNECTED,
+	CONNECT_PEND,
+	WMM_ENABLED,
+	NETQ_STOPPED,
+	DTIM_EXPIRED,
+	NETDEV_REGISTERED,
+	CLEAR_BSSFILTER_ON_BEACON,
+	DTIM_PERIOD_AVAIL,
+	WLAN_ENABLED,
+	STATS_UPDATE_PEND,
+};
+
+struct ath6kl_vif {
+	struct list_head list;
+	struct wireless_dev wdev;
+	struct net_device *ndev;
+	struct ath6kl *ar;
+	/* Lock to protect vif specific net_stats and flags */
+	spinlock_t if_lock;
+	u8 fw_vif_idx;
+	unsigned long flags;
 	int ssid_len;
 	u8 ssid[IEEE80211_MAX_SSID_LEN];
-	u8 next_mode;
-	u8 nw_type;
 	u8 dot11_auth_mode;
 	u8 auth_mode;
 	u8 prwise_crypto;
@@ -421,21 +446,91 @@
 	u8 grp_crypto;
 	u8 grp_crypto_len;
 	u8 def_txkey_index;
-	struct ath6kl_wep_key wep_key_list[WMI_MAX_KEY_INDEX + 1];
+	u8 next_mode;
+	u8 nw_type;
 	u8 bssid[ETH_ALEN];
 	u8 req_bssid[ETH_ALEN];
 	u16 ch_hint;
 	u16 bss_ch;
+	struct ath6kl_wep_key wep_key_list[WMI_MAX_KEY_INDEX + 1];
+	struct ath6kl_key keys[WMI_MAX_KEY_INDEX + 1];
+	struct aggr_info *aggr_cntxt;
+
+	struct timer_list disconnect_timer;
+	struct timer_list sched_scan_timer;
+
+	struct cfg80211_scan_request *scan_req;
+	enum sme_state sme_state;
+	int reconnect_flag;
+	u32 last_roc_id;
+	u32 last_cancel_roc_id;
+	u32 send_action_id;
+	bool probe_req_report;
+	u16 next_chan;
+	u16 assoc_bss_beacon_int;
+	u8 assoc_bss_dtim_period;
+	struct net_device_stats net_stats;
+	struct target_stats target_stats;
+};
+
+#define WOW_LIST_ID		0
+#define WOW_HOST_REQ_DELAY	500 /* ms */
+
+#define ATH6KL_SCHED_SCAN_RESULT_DELAY 5000 /* ms */
+
+/* Flag info */
+enum ath6kl_dev_state {
+	WMI_ENABLED,
+	WMI_READY,
+	WMI_CTRL_EP_FULL,
+	TESTMODE,
+	DESTROY_IN_PROGRESS,
+	SKIP_SCAN,
+	ROAM_TBL_PEND,
+	FIRST_BOOT,
+};
+
+enum ath6kl_state {
+	ATH6KL_STATE_OFF,
+	ATH6KL_STATE_ON,
+	ATH6KL_STATE_DEEPSLEEP,
+	ATH6KL_STATE_CUTPOWER,
+	ATH6KL_STATE_WOW,
+	ATH6KL_STATE_SCHED_SCAN,
+};
+
+struct ath6kl {
+	struct device *dev;
+	struct wiphy *wiphy;
+
+	enum ath6kl_state state;
+
+	struct ath6kl_bmi bmi;
+	const struct ath6kl_hif_ops *hif_ops;
+	struct wmi *wmi;
+	int tx_pending[ENDPOINT_MAX];
+	int total_tx_data_pend;
+	struct htc_target *htc_target;
+	enum ath6kl_hif_type hif_type;
+	void *hif_priv;
+	struct list_head vif_list;
+	/* Lock to avoid race in vif_list entries among add/del/traverse */
+	spinlock_t list_lock;
+	u8 num_vif;
+	unsigned int vif_max;
+	u8 max_norm_iface;
+	u8 avail_idx_map;
+	spinlock_t lock;
+	struct semaphore sem;
 	u16 listen_intvl_b;
 	u16 listen_intvl_t;
 	u8 lrssi_roam_threshold;
 	struct ath6kl_version version;
 	u32 target_type;
 	u8 tx_pwr;
-	struct net_device_stats net_stats;
-	struct target_stats target_stats;
 	struct ath6kl_node_mapping node_map[MAX_NODE_NUM];
 	u8 ibss_ps_enable;
+	bool ibss_if_active;
 	u8 node_num;
 	u8 next_ep_id;
 	struct ath6kl_cookie *cookie_list;
@@ -446,7 +541,7 @@
 	u8 hiac_stream_active_pri;
 	u8 ep2ac_map[ENDPOINT_MAX];
 	enum htc_endpoint_id ctrl_ep;
-	struct htc_credit_state_info credit_state_info;
+	struct ath6kl_htc_credit_info credit_state_info;
 	u32 connect_ctrl_flags;
 	u32 user_key_ctrl;
 	u8 usr_bss_filter;
@@ -456,30 +551,37 @@
 	struct sk_buff_head mcastpsq;
 	spinlock_t mcastpsq_lock;
 	u8 intra_bss;
-	struct aggr_info *aggr_cntxt;
 	struct wmi_ap_mode_stat ap_stats;
 	u8 ap_country_code[3];
 	struct list_head amsdu_rx_buffer_queue;
-	struct timer_list disconnect_timer;
 	u8 rx_meta_ver;
-	struct wireless_dev *wdev;
-	struct cfg80211_scan_request *scan_req;
-	struct ath6kl_key keys[WMI_MAX_KEY_INDEX + 1];
-	enum sme_state sme_state;
 	enum wlan_low_pwr_state wlan_pwr_state;
-	struct wmi_scan_params_cmd sc_params;
+	u8 mac_addr[ETH_ALEN];
 #define AR_MCAST_FILTER_MAC_ADDR_SIZE  4
 	struct {
 		void *rx_report;
 		size_t rx_report_len;
 	} tm;
 
-	struct {
+	struct ath6kl_hw {
+		u32 id;
+		const char *name;
 		u32 dataset_patch_addr;
 		u32 app_load_addr;
 		u32 app_start_override_addr;
 		u32 board_ext_data_addr;
 		u32 reserved_ram_size;
+		u32 board_addr;
+		u32 refclk_hz;
+		u32 uarttx_pin;
+
+		const char *fw_otp;
+		const char *fw;
+		const char *fw_tcmd;
+		const char *fw_patch;
+		const char *fw_api2;
+		const char *fw_board;
+		const char *fw_default_board;
 	} hw;
 
 	u16 conf_flags;
@@ -487,7 +589,6 @@
 	struct ath6kl_mbox_info mbox_info;
 
 	struct ath6kl_cookie cookie_mem[MAX_COOKIE_NUM];
-	int reconnect_flag;
 	unsigned long flag;
 
 	u8 *fw_board;
@@ -508,13 +609,7 @@
 
 	struct dentry *debugfs_phy;
 
-	u32 send_action_id;
-	bool probe_req_report;
-	u16 next_chan;
-
 	bool p2p;
-	u16 assoc_bss_beacon_int;
-	u8 assoc_bss_dtim_period;
 
 #ifdef CONFIG_ATH6KL_DEBUG
 	struct {
@@ -529,23 +624,19 @@
 		struct {
 			unsigned int invalid_rate;
 		} war_stats;
+
+		u8 *roam_tbl;
+		unsigned int roam_tbl_len;
+
+		u8 keepalive;
+		u8 disc_timeout;
 	} debug;
 #endif /* CONFIG_ATH6KL_DEBUG */
 };
 
-static inline void *ath6kl_priv(struct net_device *dev)
+static inline struct ath6kl *ath6kl_priv(struct net_device *dev)
 {
-	return wdev_priv(dev->ieee80211_ptr);
-}
-
-static inline void ath6kl_deposit_credit_to_ep(struct htc_credit_state_info
-					       *cred_info,
-					       struct htc_endpoint_credit_dist
-					       *ep_dist, int credits)
-{
-	ep_dist->credits += credits;
-	ep_dist->cred_assngd += credits;
-	cred_info->cur_free_credits -= credits;
+	return ((struct ath6kl_vif *) netdev_priv(dev))->ar;
 }
 
 static inline u32 ath6kl_get_hi_item_addr(struct ath6kl *ar,
@@ -561,7 +652,6 @@
 	return addr;
 }
 
-void ath6kl_destroy(struct net_device *dev, unsigned int unregister);
 int ath6kl_configure_target(struct ath6kl *ar);
 void ath6kl_detect_error(unsigned long ptr);
 void disconnect_timer_handler(unsigned long ptr);
@@ -579,10 +669,8 @@
 int ath6kl_diag_read32(struct ath6kl *ar, u32 address, u32 *value);
 int ath6kl_diag_read(struct ath6kl *ar, u32 address, void *data, u32 length);
 int ath6kl_read_fwlogs(struct ath6kl *ar);
-void ath6kl_init_profile_info(struct ath6kl *ar);
+void ath6kl_init_profile_info(struct ath6kl_vif *vif);
 void ath6kl_tx_data_cleanup(struct ath6kl *ar);
-void ath6kl_stop_endpoint(struct net_device *dev, bool keep_profile,
-			  bool get_dbglogs);
 
 struct ath6kl_cookie *ath6kl_alloc_cookie(struct ath6kl *ar);
 void ath6kl_free_cookie(struct ath6kl *ar, struct ath6kl_cookie *cookie);
@@ -598,40 +686,49 @@
 void aggr_module_destroy(struct aggr_info *aggr_info);
 void aggr_reset_state(struct aggr_info *aggr_info);
 
-struct ath6kl_sta *ath6kl_find_sta(struct ath6kl *ar, u8 * node_addr);
+struct ath6kl_sta *ath6kl_find_sta(struct ath6kl_vif *vif, u8 * node_addr);
 struct ath6kl_sta *ath6kl_find_sta_by_aid(struct ath6kl *ar, u8 aid);
 
 void ath6kl_ready_event(void *devt, u8 * datap, u32 sw_ver, u32 abi_ver);
 int ath6kl_control_tx(void *devt, struct sk_buff *skb,
 		      enum htc_endpoint_id eid);
-void ath6kl_connect_event(struct ath6kl *ar, u16 channel,
+void ath6kl_connect_event(struct ath6kl_vif *vif, u16 channel,
 			  u8 *bssid, u16 listen_int,
 			  u16 beacon_int, enum network_type net_type,
 			  u8 beacon_ie_len, u8 assoc_req_len,
 			  u8 assoc_resp_len, u8 *assoc_info);
-void ath6kl_connect_ap_mode_bss(struct ath6kl *ar, u16 channel);
-void ath6kl_connect_ap_mode_sta(struct ath6kl *ar, u16 aid, u8 *mac_addr,
+void ath6kl_connect_ap_mode_bss(struct ath6kl_vif *vif, u16 channel);
+void ath6kl_connect_ap_mode_sta(struct ath6kl_vif *vif, u16 aid, u8 *mac_addr,
 				u8 keymgmt, u8 ucipher, u8 auth,
 				u8 assoc_req_len, u8 *assoc_info);
-void ath6kl_disconnect_event(struct ath6kl *ar, u8 reason,
+void ath6kl_disconnect_event(struct ath6kl_vif *vif, u8 reason,
 			     u8 *bssid, u8 assoc_resp_len,
 			     u8 *assoc_info, u16 prot_reason_status);
-void ath6kl_tkip_micerr_event(struct ath6kl *ar, u8 keyid, bool ismcast);
+void ath6kl_tkip_micerr_event(struct ath6kl_vif *vif, u8 keyid, bool ismcast);
 void ath6kl_txpwr_rx_evt(void *devt, u8 tx_pwr);
-void ath6kl_scan_complete_evt(struct ath6kl *ar, int status);
-void ath6kl_tgt_stats_event(struct ath6kl *ar, u8 *ptr, u32 len);
+void ath6kl_scan_complete_evt(struct ath6kl_vif *vif, int status);
+void ath6kl_tgt_stats_event(struct ath6kl_vif *vif, u8 *ptr, u32 len);
 void ath6kl_indicate_tx_activity(void *devt, u8 traffic_class, bool active);
 enum htc_endpoint_id ath6kl_ac2_endpoint_id(void *devt, u8 ac);
 
-void ath6kl_pspoll_event(struct ath6kl *ar, u8 aid);
+void ath6kl_pspoll_event(struct ath6kl_vif *vif, u8 aid);
 
-void ath6kl_dtimexpiry_event(struct ath6kl *ar);
-void ath6kl_disconnect(struct ath6kl *ar);
-void ath6kl_deep_sleep_enable(struct ath6kl *ar);
-void aggr_recv_delba_req_evt(struct ath6kl *ar, u8 tid);
-void aggr_recv_addba_req_evt(struct ath6kl *ar, u8 tid, u16 seq_no,
+void ath6kl_dtimexpiry_event(struct ath6kl_vif *vif);
+void ath6kl_disconnect(struct ath6kl_vif *vif);
+void aggr_recv_delba_req_evt(struct ath6kl_vif *vif, u8 tid);
+void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid, u16 seq_no,
 			     u8 win_sz);
 void ath6kl_wakeup_event(void *dev);
-void ath6kl_target_failure(struct ath6kl *ar);
+
+void ath6kl_reset_device(struct ath6kl *ar, u32 target_type,
+			 bool wait_fot_compltn, bool cold_reset);
+void ath6kl_init_control_info(struct ath6kl_vif *vif);
+void ath6kl_deinit_if_data(struct ath6kl_vif *vif);
+void ath6kl_core_free(struct ath6kl *ar);
+struct ath6kl_vif *ath6kl_vif_first(struct ath6kl *ar);
+void ath6kl_cleanup_vif(struct ath6kl_vif *vif, bool wmi_ready);
+int ath6kl_init_hw_start(struct ath6kl *ar);
+int ath6kl_init_hw_stop(struct ath6kl *ar);
+void ath6kl_check_wow_status(struct ath6kl *ar);
 
 #endif /* CORE_H */
diff --git a/drivers/net/wireless/ath/ath6kl/debug.c b/drivers/net/wireless/ath/ath6kl/debug.c
index 7879b53..eb808b4 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.c
+++ b/drivers/net/wireless/ath/ath6kl/debug.c
@@ -143,49 +143,48 @@
 
 static void dump_cred_dist(struct htc_endpoint_credit_dist *ep_dist)
 {
-	ath6kl_dbg(ATH6KL_DBG_ANY,
+	ath6kl_dbg(ATH6KL_DBG_CREDIT,
 		   "--- endpoint: %d  svc_id: 0x%X ---\n",
 		   ep_dist->endpoint, ep_dist->svc_id);
-	ath6kl_dbg(ATH6KL_DBG_ANY, " dist_flags     : 0x%X\n",
+	ath6kl_dbg(ATH6KL_DBG_CREDIT, " dist_flags     : 0x%X\n",
 		   ep_dist->dist_flags);
-	ath6kl_dbg(ATH6KL_DBG_ANY, " cred_norm      : %d\n",
+	ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_norm      : %d\n",
 		   ep_dist->cred_norm);
-	ath6kl_dbg(ATH6KL_DBG_ANY, " cred_min       : %d\n",
+	ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_min       : %d\n",
 		   ep_dist->cred_min);
-	ath6kl_dbg(ATH6KL_DBG_ANY, " credits        : %d\n",
+	ath6kl_dbg(ATH6KL_DBG_CREDIT, " credits        : %d\n",
 		   ep_dist->credits);
-	ath6kl_dbg(ATH6KL_DBG_ANY, " cred_assngd    : %d\n",
+	ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_assngd    : %d\n",
 		   ep_dist->cred_assngd);
-	ath6kl_dbg(ATH6KL_DBG_ANY, " seek_cred      : %d\n",
+	ath6kl_dbg(ATH6KL_DBG_CREDIT, " seek_cred      : %d\n",
 		   ep_dist->seek_cred);
-	ath6kl_dbg(ATH6KL_DBG_ANY, " cred_sz        : %d\n",
+	ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_sz        : %d\n",
 		   ep_dist->cred_sz);
-	ath6kl_dbg(ATH6KL_DBG_ANY, " cred_per_msg   : %d\n",
+	ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_per_msg   : %d\n",
 		   ep_dist->cred_per_msg);
-	ath6kl_dbg(ATH6KL_DBG_ANY, " cred_to_dist   : %d\n",
+	ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_to_dist   : %d\n",
 		   ep_dist->cred_to_dist);
-	ath6kl_dbg(ATH6KL_DBG_ANY, " txq_depth      : %d\n",
-		   get_queue_depth(&((struct htc_endpoint *)
-				     ep_dist->htc_rsvd)->txq));
-	ath6kl_dbg(ATH6KL_DBG_ANY,
+	ath6kl_dbg(ATH6KL_DBG_CREDIT, " txq_depth      : %d\n",
+		   get_queue_depth(&ep_dist->htc_ep->txq));
+	ath6kl_dbg(ATH6KL_DBG_CREDIT,
 		   "----------------------------------\n");
 }
 
+/* FIXME: move to htc.c */
 void dump_cred_dist_stats(struct htc_target *target)
 {
 	struct htc_endpoint_credit_dist *ep_list;
 
-	if (!AR_DBG_LVL_CHECK(ATH6KL_DBG_TRC))
+	if (!AR_DBG_LVL_CHECK(ATH6KL_DBG_CREDIT))
 		return;
 
 	list_for_each_entry(ep_list, &target->cred_dist_list, list)
 		dump_cred_dist(ep_list);
 
-	ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:%p dist:%p\n",
-		   target->cred_dist_cntxt, NULL);
-	ath6kl_dbg(ATH6KL_DBG_TRC, "credit distribution, total : %d, free : %d\n",
-		   target->cred_dist_cntxt->total_avail_credits,
-		   target->cred_dist_cntxt->cur_free_credits);
+	ath6kl_dbg(ATH6KL_DBG_CREDIT,
+		   "credit distribution total %d free %d\n",
+		   target->credit_info->total_avail_credits,
+		   target->credit_info->cur_free_credits);
 }
 
 static int ath6kl_debugfs_open(struct inode *inode, struct file *file)
@@ -397,13 +396,20 @@
 				   size_t count, loff_t *ppos)
 {
 	struct ath6kl *ar = file->private_data;
-	struct target_stats *tgt_stats = &ar->target_stats;
+	struct ath6kl_vif *vif;
+	struct target_stats *tgt_stats;
 	char *buf;
 	unsigned int len = 0, buf_len = 1500;
 	int i;
 	long left;
 	ssize_t ret_cnt;
 
+	vif = ath6kl_vif_first(ar);
+	if (!vif)
+		return -EIO;
+
+	tgt_stats = &vif->target_stats;
+
 	buf = kzalloc(buf_len, GFP_KERNEL);
 	if (!buf)
 		return -ENOMEM;
@@ -413,9 +419,9 @@
 		return -EBUSY;
 	}
 
-	set_bit(STATS_UPDATE_PEND, &ar->flag);
+	set_bit(STATS_UPDATE_PEND, &vif->flags);
 
-	if (ath6kl_wmi_get_stats_cmd(ar->wmi)) {
+	if (ath6kl_wmi_get_stats_cmd(ar->wmi, 0)) {
 		up(&ar->sem);
 		kfree(buf);
 		return -EIO;
@@ -423,7 +429,7 @@
 
 	left = wait_event_interruptible_timeout(ar->event_wq,
 						!test_bit(STATS_UPDATE_PEND,
-						&ar->flag), WMI_TIMEOUT);
+						&vif->flags), WMI_TIMEOUT);
 
 	up(&ar->sem);
 
@@ -555,10 +561,10 @@
 
 	len += scnprintf(buf + len, buf_len - len, "%25s%5d\n",
 			 "Total Avail Credits: ",
-			 target->cred_dist_cntxt->total_avail_credits);
+			 target->credit_info->total_avail_credits);
 	len += scnprintf(buf + len, buf_len - len, "%25s%5d\n",
 			 "Free credits :",
-			 target->cred_dist_cntxt->cur_free_credits);
+			 target->credit_info->cur_free_credits);
 
 	len += scnprintf(buf + len, buf_len - len,
 			 " Epid  Flags    Cred_norm  Cred_min  Credits  Cred_assngd"
@@ -577,8 +583,7 @@
 		print_credit_info("%9d", cred_per_msg);
 		print_credit_info("%14d", cred_to_dist);
 		len += scnprintf(buf + len, buf_len - len, "%12d\n",
-				 get_queue_depth(&((struct htc_endpoint *)
-						 ep_list->htc_rsvd)->txq));
+				 get_queue_depth(&ep_list->htc_ep->txq));
 	}
 
 	if (len > buf_len)
@@ -596,6 +601,107 @@
 	.llseek = default_llseek,
 };
 
+static unsigned int print_endpoint_stat(struct htc_target *target, char *buf,
+					unsigned int buf_len, unsigned int len,
+					int offset, const char *name)
+{
+	int i;
+	struct htc_endpoint_stats *ep_st;
+	u32 *counter;
+
+	len += scnprintf(buf + len, buf_len - len, "%s:", name);
+	for (i = 0; i < ENDPOINT_MAX; i++) {
+		ep_st = &target->endpoint[i].ep_st;
+		counter = ((u32 *) ep_st) + (offset / 4);
+		len += scnprintf(buf + len, buf_len - len, " %u", *counter);
+	}
+	len += scnprintf(buf + len, buf_len - len, "\n");
+
+	return len;
+}
+
+static ssize_t ath6kl_endpoint_stats_read(struct file *file,
+					  char __user *user_buf,
+					  size_t count, loff_t *ppos)
+{
+	struct ath6kl *ar = file->private_data;
+	struct htc_target *target = ar->htc_target;
+	char *buf;
+	unsigned int buf_len, len = 0;
+	ssize_t ret_cnt;
+
+	buf_len = sizeof(struct htc_endpoint_stats) / sizeof(u32) *
+		(25 + ENDPOINT_MAX * 11);
+	buf = kmalloc(buf_len, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+#define EPSTAT(name)							\
+	len = print_endpoint_stat(target, buf, buf_len, len,		\
+				  offsetof(struct htc_endpoint_stats, name), \
+				  #name)
+	EPSTAT(cred_low_indicate);
+	EPSTAT(tx_issued);
+	EPSTAT(tx_pkt_bundled);
+	EPSTAT(tx_bundles);
+	EPSTAT(tx_dropped);
+	EPSTAT(tx_cred_rpt);
+	EPSTAT(cred_rpt_from_rx);
+	EPSTAT(cred_rpt_from_other);
+	EPSTAT(cred_rpt_ep0);
+	EPSTAT(cred_from_rx);
+	EPSTAT(cred_from_other);
+	EPSTAT(cred_from_ep0);
+	EPSTAT(cred_cosumd);
+	EPSTAT(cred_retnd);
+	EPSTAT(rx_pkts);
+	EPSTAT(rx_lkahds);
+	EPSTAT(rx_bundl);
+	EPSTAT(rx_bundle_lkahd);
+	EPSTAT(rx_bundle_from_hdr);
+	EPSTAT(rx_alloc_thresh_hit);
+	EPSTAT(rxalloc_thresh_byte);
+#undef EPSTAT
+
+	if (len > buf_len)
+		len = buf_len;
+
+	ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+	kfree(buf);
+	return ret_cnt;
+}
+
+static ssize_t ath6kl_endpoint_stats_write(struct file *file,
+					   const char __user *user_buf,
+					   size_t count, loff_t *ppos)
+{
+	struct ath6kl *ar = file->private_data;
+	struct htc_target *target = ar->htc_target;
+	int ret, i;
+	u32 val;
+	struct htc_endpoint_stats *ep_st;
+
+	ret = kstrtou32_from_user(user_buf, count, 0, &val);
+	if (ret)
+		return ret;
+	if (val == 0) {
+		for (i = 0; i < ENDPOINT_MAX; i++) {
+			ep_st = &target->endpoint[i].ep_st;
+			memset(ep_st, 0, sizeof(*ep_st));
+		}
+	}
+
+	return count;
+}
+
+static const struct file_operations fops_endpoint_stats = {
+	.open = ath6kl_debugfs_open,
+	.read = ath6kl_endpoint_stats_read,
+	.write = ath6kl_endpoint_stats_write,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
 static unsigned long ath6kl_get_num_reg(void)
 {
 	int i;
@@ -868,6 +974,660 @@
 	.llseek = default_llseek,
 };
 
+int ath6kl_debug_roam_tbl_event(struct ath6kl *ar, const void *buf,
+				size_t len)
+{
+	const struct wmi_target_roam_tbl *tbl;
+	u16 num_entries;
+
+	if (len < sizeof(*tbl))
+		return -EINVAL;
+
+	tbl = (const struct wmi_target_roam_tbl *) buf;
+	num_entries = le16_to_cpu(tbl->num_entries);
+	if (sizeof(*tbl) + num_entries * sizeof(struct wmi_bss_roam_info) >
+	    len)
+		return -EINVAL;
+
+	if (ar->debug.roam_tbl == NULL ||
+	    ar->debug.roam_tbl_len < (unsigned int) len) {
+		kfree(ar->debug.roam_tbl);
+		ar->debug.roam_tbl = kmalloc(len, GFP_ATOMIC);
+		if (ar->debug.roam_tbl == NULL)
+			return -ENOMEM;
+	}
+
+	memcpy(ar->debug.roam_tbl, buf, len);
+	ar->debug.roam_tbl_len = len;
+
+	if (test_bit(ROAM_TBL_PEND, &ar->flag)) {
+		clear_bit(ROAM_TBL_PEND, &ar->flag);
+		wake_up(&ar->event_wq);
+	}
+
+	return 0;
+}
+
+static ssize_t ath6kl_roam_table_read(struct file *file, char __user *user_buf,
+				      size_t count, loff_t *ppos)
+{
+	struct ath6kl *ar = file->private_data;
+	int ret;
+	long left;
+	struct wmi_target_roam_tbl *tbl;
+	u16 num_entries, i;
+	char *buf;
+	unsigned int len, buf_len;
+	ssize_t ret_cnt;
+
+	if (down_interruptible(&ar->sem))
+		return -EBUSY;
+
+	set_bit(ROAM_TBL_PEND, &ar->flag);
+
+	ret = ath6kl_wmi_get_roam_tbl_cmd(ar->wmi);
+	if (ret) {
+		up(&ar->sem);
+		return ret;
+	}
+
+	left = wait_event_interruptible_timeout(
+		ar->event_wq, !test_bit(ROAM_TBL_PEND, &ar->flag), WMI_TIMEOUT);
+	up(&ar->sem);
+
+	if (left <= 0)
+		return -ETIMEDOUT;
+
+	if (ar->debug.roam_tbl == NULL)
+		return -ENOMEM;
+
+	tbl = (struct wmi_target_roam_tbl *) ar->debug.roam_tbl;
+	num_entries = le16_to_cpu(tbl->num_entries);
+
+	buf_len = 100 + num_entries * 100;
+	buf = kzalloc(buf_len, GFP_KERNEL);
+	if (buf == NULL)
+		return -ENOMEM;
+	len = 0;
+	len += scnprintf(buf + len, buf_len - len,
+			 "roam_mode=%u\n\n"
+			 "# roam_util bssid rssi rssidt last_rssi util bias\n",
+			 le16_to_cpu(tbl->roam_mode));
+
+	for (i = 0; i < num_entries; i++) {
+		struct wmi_bss_roam_info *info = &tbl->info[i];
+		len += scnprintf(buf + len, buf_len - len,
+				 "%d %pM %d %d %d %d %d\n",
+				 a_sle32_to_cpu(info->roam_util), info->bssid,
+				 info->rssi, info->rssidt, info->last_rssi,
+				 info->util, info->bias);
+	}
+
+	if (len > buf_len)
+		len = buf_len;
+
+	ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+	kfree(buf);
+	return ret_cnt;
+}
+
+static const struct file_operations fops_roam_table = {
+	.read = ath6kl_roam_table_read,
+	.open = ath6kl_debugfs_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static ssize_t ath6kl_force_roam_write(struct file *file,
+				       const char __user *user_buf,
+				       size_t count, loff_t *ppos)
+{
+	struct ath6kl *ar = file->private_data;
+	int ret;
+	char buf[20];
+	size_t len;
+	u8 bssid[ETH_ALEN];
+	int i;
+	int addr[ETH_ALEN];
+
+	len = min(count, sizeof(buf) - 1);
+	if (copy_from_user(buf, user_buf, len))
+		return -EFAULT;
+	buf[len] = '\0';
+
+	if (sscanf(buf, "%02x:%02x:%02x:%02x:%02x:%02x",
+		   &addr[0], &addr[1], &addr[2], &addr[3], &addr[4], &addr[5])
+	    != ETH_ALEN)
+		return -EINVAL;
+	for (i = 0; i < ETH_ALEN; i++)
+		bssid[i] = addr[i];
+
+	ret = ath6kl_wmi_force_roam_cmd(ar->wmi, bssid);
+	if (ret)
+		return ret;
+
+	return count;
+}
+
+static const struct file_operations fops_force_roam = {
+	.write = ath6kl_force_roam_write,
+	.open = ath6kl_debugfs_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static ssize_t ath6kl_roam_mode_write(struct file *file,
+				      const char __user *user_buf,
+				      size_t count, loff_t *ppos)
+{
+	struct ath6kl *ar = file->private_data;
+	int ret;
+	char buf[20];
+	size_t len;
+	enum wmi_roam_mode mode;
+
+	len = min(count, sizeof(buf) - 1);
+	if (copy_from_user(buf, user_buf, len))
+		return -EFAULT;
+	buf[len] = '\0';
+	if (len > 0 && buf[len - 1] == '\n')
+		buf[len - 1] = '\0';
+
+	if (strcasecmp(buf, "default") == 0)
+		mode = WMI_DEFAULT_ROAM_MODE;
+	else if (strcasecmp(buf, "bssbias") == 0)
+		mode = WMI_HOST_BIAS_ROAM_MODE;
+	else if (strcasecmp(buf, "lock") == 0)
+		mode = WMI_LOCK_BSS_MODE;
+	else
+		return -EINVAL;
+
+	ret = ath6kl_wmi_set_roam_mode_cmd(ar->wmi, mode);
+	if (ret)
+		return ret;
+
+	return count;
+}
+
+static const struct file_operations fops_roam_mode = {
+	.write = ath6kl_roam_mode_write,
+	.open = ath6kl_debugfs_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+void ath6kl_debug_set_keepalive(struct ath6kl *ar, u8 keepalive)
+{
+	ar->debug.keepalive = keepalive;
+}
+
+static ssize_t ath6kl_keepalive_read(struct file *file, char __user *user_buf,
+				     size_t count, loff_t *ppos)
+{
+	struct ath6kl *ar = file->private_data;
+	char buf[16];
+	int len;
+
+	len = snprintf(buf, sizeof(buf), "%u\n", ar->debug.keepalive);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath6kl_keepalive_write(struct file *file,
+				      const char __user *user_buf,
+				      size_t count, loff_t *ppos)
+{
+	struct ath6kl *ar = file->private_data;
+	int ret;
+	u8 val;
+
+	ret = kstrtou8_from_user(user_buf, count, 0, &val);
+	if (ret)
+		return ret;
+
+	ret = ath6kl_wmi_set_keepalive_cmd(ar->wmi, 0, val);
+	if (ret)
+		return ret;
+
+	return count;
+}
+
+static const struct file_operations fops_keepalive = {
+	.open = ath6kl_debugfs_open,
+	.read = ath6kl_keepalive_read,
+	.write = ath6kl_keepalive_write,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+void ath6kl_debug_set_disconnect_timeout(struct ath6kl *ar, u8 timeout)
+{
+	ar->debug.disc_timeout = timeout;
+}
+
+static ssize_t ath6kl_disconnect_timeout_read(struct file *file,
+					      char __user *user_buf,
+					      size_t count, loff_t *ppos)
+{
+	struct ath6kl *ar = file->private_data;
+	char buf[16];
+	int len;
+
+	len = snprintf(buf, sizeof(buf), "%u\n", ar->debug.disc_timeout);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath6kl_disconnect_timeout_write(struct file *file,
+					       const char __user *user_buf,
+					       size_t count, loff_t *ppos)
+{
+	struct ath6kl *ar = file->private_data;
+	int ret;
+	u8 val;
+
+	ret = kstrtou8_from_user(user_buf, count, 0, &val);
+	if (ret)
+		return ret;
+
+	ret = ath6kl_wmi_disctimeout_cmd(ar->wmi, 0, val);
+	if (ret)
+		return ret;
+
+	return count;
+}
+
+static const struct file_operations fops_disconnect_timeout = {
+	.open = ath6kl_debugfs_open,
+	.read = ath6kl_disconnect_timeout_read,
+	.write = ath6kl_disconnect_timeout_write,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static ssize_t ath6kl_create_qos_write(struct file *file,
+						const char __user *user_buf,
+						size_t count, loff_t *ppos)
+{
+
+	struct ath6kl *ar = file->private_data;
+	struct ath6kl_vif *vif;
+	char buf[200];
+	ssize_t len;
+	char *sptr, *token;
+	struct wmi_create_pstream_cmd pstream;
+	u32 val32;
+	u16 val16;
+
+	vif = ath6kl_vif_first(ar);
+	if (!vif)
+		return -EIO;
+
+	len = min(count, sizeof(buf) - 1);
+	if (copy_from_user(buf, user_buf, len))
+		return -EFAULT;
+	buf[len] = '\0';
+	sptr = buf;
+
+	token = strsep(&sptr, " ");
+	if (!token)
+		return -EINVAL;
+	if (kstrtou8(token, 0, &pstream.user_pri))
+		return -EINVAL;
+
+	token = strsep(&sptr, " ");
+	if (!token)
+		return -EINVAL;
+	if (kstrtou8(token, 0, &pstream.traffic_direc))
+		return -EINVAL;
+
+	token = strsep(&sptr, " ");
+	if (!token)
+		return -EINVAL;
+	if (kstrtou8(token, 0, &pstream.traffic_class))
+		return -EINVAL;
+
+	token = strsep(&sptr, " ");
+	if (!token)
+		return -EINVAL;
+	if (kstrtou8(token, 0, &pstream.traffic_type))
+		return -EINVAL;
+
+	token = strsep(&sptr, " ");
+	if (!token)
+		return -EINVAL;
+	if (kstrtou8(token, 0, &pstream.voice_psc_cap))
+		return -EINVAL;
+
+	token = strsep(&sptr, " ");
+	if (!token)
+		return -EINVAL;
+	if (kstrtou32(token, 0, &val32))
+		return -EINVAL;
+	pstream.min_service_int = cpu_to_le32(val32);
+
+	token = strsep(&sptr, " ");
+	if (!token)
+		return -EINVAL;
+	if (kstrtou32(token, 0, &val32))
+		return -EINVAL;
+	pstream.max_service_int = cpu_to_le32(val32);
+
+	token = strsep(&sptr, " ");
+	if (!token)
+		return -EINVAL;
+	if (kstrtou32(token, 0, &val32))
+		return -EINVAL;
+	pstream.inactivity_int = cpu_to_le32(val32);
+
+	token = strsep(&sptr, " ");
+	if (!token)
+		return -EINVAL;
+	if (kstrtou32(token, 0, &val32))
+		return -EINVAL;
+	pstream.suspension_int = cpu_to_le32(val32);
+
+	token = strsep(&sptr, " ");
+	if (!token)
+		return -EINVAL;
+	if (kstrtou32(token, 0, &val32))
+		return -EINVAL;
+	pstream.service_start_time = cpu_to_le32(val32);
+
+	token = strsep(&sptr, " ");
+	if (!token)
+		return -EINVAL;
+	if (kstrtou8(token, 0, &pstream.tsid))
+		return -EINVAL;
+
+	token = strsep(&sptr, " ");
+	if (!token)
+		return -EINVAL;
+	if (kstrtou16(token, 0, &val16))
+		return -EINVAL;
+	pstream.nominal_msdu = cpu_to_le16(val16);
+
+	token = strsep(&sptr, " ");
+	if (!token)
+		return -EINVAL;
+	if (kstrtou16(token, 0, &val16))
+		return -EINVAL;
+	pstream.max_msdu = cpu_to_le16(val16);
+
+	token = strsep(&sptr, " ");
+	if (!token)
+		return -EINVAL;
+	if (kstrtou32(token, 0, &val32))
+		return -EINVAL;
+	pstream.min_data_rate = cpu_to_le32(val32);
+
+	token = strsep(&sptr, " ");
+	if (!token)
+		return -EINVAL;
+	if (kstrtou32(token, 0, &val32))
+		return -EINVAL;
+	pstream.mean_data_rate = cpu_to_le32(val32);
+
+	token = strsep(&sptr, " ");
+	if (!token)
+		return -EINVAL;
+	if (kstrtou32(token, 0, &val32))
+		return -EINVAL;
+	pstream.peak_data_rate = cpu_to_le32(val32);
+
+	token = strsep(&sptr, " ");
+	if (!token)
+		return -EINVAL;
+	if (kstrtou32(token, 0, &val32))
+		return -EINVAL;
+	pstream.max_burst_size = cpu_to_le32(val32);
+
+	token = strsep(&sptr, " ");
+	if (!token)
+		return -EINVAL;
+	if (kstrtou32(token, 0, &val32))
+		return -EINVAL;
+	pstream.delay_bound = cpu_to_le32(val32);
+
+	token = strsep(&sptr, " ");
+	if (!token)
+		return -EINVAL;
+	if (kstrtou32(token, 0, &val32))
+		return -EINVAL;
+	pstream.min_phy_rate = cpu_to_le32(val32);
+
+	token = strsep(&sptr, " ");
+	if (!token)
+		return -EINVAL;
+	if (kstrtou32(token, 0, &val32))
+		return -EINVAL;
+	pstream.sba = cpu_to_le32(val32);
+
+	token = strsep(&sptr, " ");
+	if (!token)
+		return -EINVAL;
+	if (kstrtou32(token, 0, &val32))
+		return -EINVAL;
+	pstream.medium_time = cpu_to_le32(val32);
+
+	ath6kl_wmi_create_pstream_cmd(ar->wmi, vif->fw_vif_idx, &pstream);
+
+	return count;
+}
+
+static const struct file_operations fops_create_qos = {
+	.write = ath6kl_create_qos_write,
+	.open = ath6kl_debugfs_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static ssize_t ath6kl_delete_qos_write(struct file *file,
+				const char __user *user_buf,
+				size_t count, loff_t *ppos)
+{
+
+	struct ath6kl *ar = file->private_data;
+	struct ath6kl_vif *vif;
+	char buf[100];
+	ssize_t len;
+	char *sptr, *token;
+	u8 traffic_class;
+	u8 tsid;
+
+	vif = ath6kl_vif_first(ar);
+	if (!vif)
+		return -EIO;
+
+	len = min(count, sizeof(buf) - 1);
+	if (copy_from_user(buf, user_buf, len))
+		return -EFAULT;
+	buf[len] = '\0';
+	sptr = buf;
+
+	token = strsep(&sptr, " ");
+	if (!token)
+		return -EINVAL;
+	if (kstrtou8(token, 0, &traffic_class))
+		return -EINVAL;
+
+	token = strsep(&sptr, " ");
+	if (!token)
+		return -EINVAL;
+	if (kstrtou8(token, 0, &tsid))
+		return -EINVAL;
+
+	ath6kl_wmi_delete_pstream_cmd(ar->wmi, vif->fw_vif_idx,
+				      traffic_class, tsid);
+
+	return count;
+}
+
+static const struct file_operations fops_delete_qos = {
+	.write = ath6kl_delete_qos_write,
+	.open = ath6kl_debugfs_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static ssize_t ath6kl_bgscan_int_write(struct file *file,
+				const char __user *user_buf,
+				size_t count, loff_t *ppos)
+{
+	struct ath6kl *ar = file->private_data;
+	u16 bgscan_int;
+	char buf[32];
+	ssize_t len;
+
+	len = min(count, sizeof(buf) - 1);
+	if (copy_from_user(buf, user_buf, len))
+		return -EFAULT;
+
+	buf[len] = '\0';
+	if (kstrtou16(buf, 0, &bgscan_int))
+		return -EINVAL;
+
+	if (bgscan_int == 0)
+		bgscan_int = 0xffff;
+
+	ath6kl_wmi_scanparams_cmd(ar->wmi, 0, 0, 0, bgscan_int, 0, 0, 0, 3,
+				  0, 0, 0);
+
+	return count;
+}
+
+static const struct file_operations fops_bgscan_int = {
+	.write = ath6kl_bgscan_int_write,
+	.open = ath6kl_debugfs_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static ssize_t ath6kl_listen_int_write(struct file *file,
+						const char __user *user_buf,
+						size_t count, loff_t *ppos)
+{
+	struct ath6kl *ar = file->private_data;
+	u16 listen_int_t, listen_int_b;
+	char buf[32];
+	char *sptr, *token;
+	ssize_t len;
+
+	len = min(count, sizeof(buf) - 1);
+	if (copy_from_user(buf, user_buf, len))
+		return -EFAULT;
+
+	buf[len] = '\0';
+	sptr = buf;
+
+	token = strsep(&sptr, " ");
+	if (!token)
+		return -EINVAL;
+
+	if (kstrtou16(token, 0, &listen_int_t))
+		return -EINVAL;
+
+	if (kstrtou16(sptr, 0, &listen_int_b))
+		return -EINVAL;
+
+	if ((listen_int_t < 15) || (listen_int_t > 5000))
+		return -EINVAL;
+
+	if ((listen_int_b < 1) || (listen_int_b > 50))
+		return -EINVAL;
+
+	ar->listen_intvl_t = listen_int_t;
+	ar->listen_intvl_b = listen_int_b;
+
+	ath6kl_wmi_listeninterval_cmd(ar->wmi, 0, ar->listen_intvl_t,
+				      ar->listen_intvl_b);
+
+	return count;
+}
+
+static ssize_t ath6kl_listen_int_read(struct file *file,
+						char __user *user_buf,
+						size_t count, loff_t *ppos)
+{
+	struct ath6kl *ar = file->private_data;
+	char buf[32];
+	int len;
+
+	len = scnprintf(buf, sizeof(buf), "%u %u\n", ar->listen_intvl_t,
+					ar->listen_intvl_b);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_listen_int = {
+	.read = ath6kl_listen_int_read,
+	.write = ath6kl_listen_int_write,
+	.open = ath6kl_debugfs_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static ssize_t ath6kl_power_params_write(struct file *file,
+						const char __user *user_buf,
+						size_t count, loff_t *ppos)
+{
+	struct ath6kl *ar = file->private_data;
+	u8 buf[100];
+	unsigned int len = 0;
+	char *sptr, *token;
+	u16 idle_period, ps_poll_num, dtim,
+		tx_wakeup, num_tx;
+
+	len = min(count, sizeof(buf) - 1);
+	if (copy_from_user(buf, user_buf, len))
+		return -EFAULT;
+	buf[len] = '\0';
+	sptr = buf;
+
+	token = strsep(&sptr, " ");
+	if (!token)
+		return -EINVAL;
+	if (kstrtou16(token, 0, &idle_period))
+		return -EINVAL;
+
+	token = strsep(&sptr, " ");
+	if (!token)
+		return -EINVAL;
+	if (kstrtou16(token, 0, &ps_poll_num))
+		return -EINVAL;
+
+	token = strsep(&sptr, " ");
+	if (!token)
+		return -EINVAL;
+	if (kstrtou16(token, 0, &dtim))
+		return -EINVAL;
+
+	token = strsep(&sptr, " ");
+	if (!token)
+		return -EINVAL;
+	if (kstrtou16(token, 0, &tx_wakeup))
+		return -EINVAL;
+
+	token = strsep(&sptr, " ");
+	if (!token)
+		return -EINVAL;
+	if (kstrtou16(token, 0, &num_tx))
+		return -EINVAL;
+
+	ath6kl_wmi_pmparams_cmd(ar->wmi, 0, idle_period, ps_poll_num,
+				dtim, tx_wakeup, num_tx, 0);
+
+	return count;
+}
+
+static const struct file_operations fops_power_params = {
+	.write = ath6kl_power_params_write,
+	.open = ath6kl_debugfs_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
 int ath6kl_debug_init(struct ath6kl *ar)
 {
 	ar->debug.fwlog_buf.buf = vmalloc(ATH6KL_FWLOG_SIZE);
@@ -889,7 +1649,7 @@
 	ar->debug.fwlog_mask = 0;
 
 	ar->debugfs_phy = debugfs_create_dir("ath6kl",
-					     ar->wdev->wiphy->debugfsdir);
+					     ar->wiphy->debugfsdir);
 	if (!ar->debugfs_phy) {
 		vfree(ar->debug.fwlog_buf.buf);
 		kfree(ar->debug.fwlog_tmp);
@@ -902,6 +1662,9 @@
 	debugfs_create_file("credit_dist_stats", S_IRUSR, ar->debugfs_phy, ar,
 			    &fops_credit_dist_stats);
 
+	debugfs_create_file("endpoint_stats", S_IRUSR | S_IWUSR,
+			    ar->debugfs_phy, ar, &fops_endpoint_stats);
+
 	debugfs_create_file("fwlog", S_IRUSR, ar->debugfs_phy, ar,
 			    &fops_fwlog);
 
@@ -923,6 +1686,33 @@
 	debugfs_create_file("war_stats", S_IRUSR, ar->debugfs_phy, ar,
 			    &fops_war_stats);
 
+	debugfs_create_file("roam_table", S_IRUSR, ar->debugfs_phy, ar,
+			    &fops_roam_table);
+
+	debugfs_create_file("force_roam", S_IWUSR, ar->debugfs_phy, ar,
+			    &fops_force_roam);
+
+	debugfs_create_file("roam_mode", S_IWUSR, ar->debugfs_phy, ar,
+			    &fops_roam_mode);
+
+	debugfs_create_file("keepalive", S_IRUSR | S_IWUSR, ar->debugfs_phy, ar,
+			    &fops_keepalive);
+
+	debugfs_create_file("disconnect_timeout", S_IRUSR | S_IWUSR,
+			    ar->debugfs_phy, ar, &fops_disconnect_timeout);
+
+	debugfs_create_file("create_qos", S_IWUSR, ar->debugfs_phy, ar,
+				&fops_create_qos);
+
+	debugfs_create_file("delete_qos", S_IWUSR, ar->debugfs_phy, ar,
+				&fops_delete_qos);
+
+	debugfs_create_file("bgscan_interval", S_IWUSR,
+				ar->debugfs_phy, ar, &fops_bgscan_int);
+
+	debugfs_create_file("power_params", S_IWUSR, ar->debugfs_phy, ar,
+						&fops_power_params);
+
 	return 0;
 }
 
@@ -930,6 +1720,7 @@
 {
 	vfree(ar->debug.fwlog_buf.buf);
 	kfree(ar->debug.fwlog_tmp);
+	kfree(ar->debug.roam_tbl);
 }
 
 #endif
diff --git a/drivers/net/wireless/ath/ath6kl/debug.h b/drivers/net/wireless/ath/ath6kl/debug.h
index 7b7675f..9853c9c 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.h
+++ b/drivers/net/wireless/ath/ath6kl/debug.h
@@ -17,19 +17,19 @@
 #ifndef DEBUG_H
 #define DEBUG_H
 
-#include "htc_hif.h"
+#include "hif.h"
 
 enum ATH6K_DEBUG_MASK {
-	ATH6KL_DBG_WLAN_CONNECT = BIT(0),     /* wlan connect */
-	ATH6KL_DBG_WLAN_SCAN    = BIT(1),     /* wlan scan */
+	ATH6KL_DBG_CREDIT	= BIT(0),
+	/* hole */
 	ATH6KL_DBG_WLAN_TX      = BIT(2),     /* wlan tx */
 	ATH6KL_DBG_WLAN_RX      = BIT(3),     /* wlan rx */
 	ATH6KL_DBG_BMI		= BIT(4),     /* bmi tracing */
-	ATH6KL_DBG_HTC_SEND     = BIT(5),     /* htc send */
-	ATH6KL_DBG_HTC_RECV     = BIT(6),     /* htc recv */
+	ATH6KL_DBG_HTC		= BIT(5),
+	ATH6KL_DBG_HIF		= BIT(6),
 	ATH6KL_DBG_IRQ		= BIT(7),     /* interrupt processing */
-	ATH6KL_DBG_PM           = BIT(8),     /* power management */
-	ATH6KL_DBG_WLAN_NODE    = BIT(9),     /* general wlan node tracing */
+	/* hole */
+	/* hole */
 	ATH6KL_DBG_WMI          = BIT(10),    /* wmi tracing */
 	ATH6KL_DBG_TRC	        = BIT(11),    /* generic func tracing */
 	ATH6KL_DBG_SCATTER	= BIT(12),    /* hif scatter tracing */
@@ -40,6 +40,7 @@
 	ATH6KL_DBG_SDIO_DUMP	= BIT(17),
 	ATH6KL_DBG_BOOT		= BIT(18),    /* driver init and fw boot */
 	ATH6KL_DBG_WMI_DUMP	= BIT(19),
+	ATH6KL_DBG_SUSPEND	= BIT(20),
 	ATH6KL_DBG_ANY	        = 0xffffffff  /* enable all logs */
 };
 
@@ -90,6 +91,10 @@
 void dump_cred_dist_stats(struct htc_target *target);
 void ath6kl_debug_fwlog_event(struct ath6kl *ar, const void *buf, size_t len);
 void ath6kl_debug_war(struct ath6kl *ar, enum ath6kl_war war);
+int ath6kl_debug_roam_tbl_event(struct ath6kl *ar, const void *buf,
+				size_t len);
+void ath6kl_debug_set_keepalive(struct ath6kl *ar, u8 keepalive);
+void ath6kl_debug_set_disconnect_timeout(struct ath6kl *ar, u8 timeout);
 int ath6kl_debug_init(struct ath6kl *ar);
 void ath6kl_debug_cleanup(struct ath6kl *ar);
 
@@ -125,6 +130,21 @@
 {
 }
 
+static inline int ath6kl_debug_roam_tbl_event(struct ath6kl *ar,
+					      const void *buf, size_t len)
+{
+	return 0;
+}
+
+static inline void ath6kl_debug_set_keepalive(struct ath6kl *ar, u8 keepalive)
+{
+}
+
+static inline void ath6kl_debug_set_disconnect_timeout(struct ath6kl *ar,
+						       u8 timeout)
+{
+}
+
 static inline int ath6kl_debug_init(struct ath6kl *ar)
 {
 	return 0;
diff --git a/drivers/net/wireless/ath/ath6kl/hif-ops.h b/drivers/net/wireless/ath/ath6kl/hif-ops.h
index d6c898f..2fe1dad 100644
--- a/drivers/net/wireless/ath/ath6kl/hif-ops.h
+++ b/drivers/net/wireless/ath/ath6kl/hif-ops.h
@@ -18,10 +18,16 @@
 #define HIF_OPS_H
 
 #include "hif.h"
+#include "debug.h"
 
 static inline int hif_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf,
 				      u32 len, u32 request)
 {
+	ath6kl_dbg(ATH6KL_DBG_HIF,
+		   "hif %s sync addr 0x%x buf 0x%p len %d request 0x%x\n",
+		   (request & HIF_WRITE) ? "write" : "read",
+		   addr, buf, len, request);
+
 	return ar->hif_ops->read_write_sync(ar, addr, buf, len, request);
 }
 
@@ -29,16 +35,24 @@
 				  u32 length, u32 request,
 				  struct htc_packet *packet)
 {
+	ath6kl_dbg(ATH6KL_DBG_HIF,
+		   "hif write async addr 0x%x buf 0x%p len %d request 0x%x\n",
+		   address, buffer, length, request);
+
 	return ar->hif_ops->write_async(ar, address, buffer, length,
 					request, packet);
 }
 static inline void ath6kl_hif_irq_enable(struct ath6kl *ar)
 {
+	ath6kl_dbg(ATH6KL_DBG_HIF, "hif irq enable\n");
+
 	return ar->hif_ops->irq_enable(ar);
 }
 
 static inline void ath6kl_hif_irq_disable(struct ath6kl *ar)
 {
+	ath6kl_dbg(ATH6KL_DBG_HIF, "hif irq disable\n");
+
 	return ar->hif_ops->irq_disable(ar);
 }
 
@@ -69,9 +83,70 @@
 	return ar->hif_ops->cleanup_scatter(ar);
 }
 
-static inline int ath6kl_hif_suspend(struct ath6kl *ar)
+static inline int ath6kl_hif_suspend(struct ath6kl *ar,
+				     struct cfg80211_wowlan *wow)
 {
-	return ar->hif_ops->suspend(ar);
+	ath6kl_dbg(ATH6KL_DBG_HIF, "hif suspend\n");
+
+	return ar->hif_ops->suspend(ar, wow);
+}
+
+/*
+ * Read from the ATH6KL through its diagnostic window. No cooperation from
+ * the Target is required for this.
+ */
+static inline int ath6kl_hif_diag_read32(struct ath6kl *ar, u32 address,
+					 u32 *value)
+{
+	return ar->hif_ops->diag_read32(ar, address, value);
+}
+
+/*
+ * Write to the ATH6KL through its diagnostic window. No cooperation from
+ * the Target is required for this.
+ */
+static inline int ath6kl_hif_diag_write32(struct ath6kl *ar, u32 address,
+					  __le32 value)
+{
+	return ar->hif_ops->diag_write32(ar, address, value);
+}
+
+static inline int ath6kl_hif_bmi_read(struct ath6kl *ar, u8 *buf, u32 len)
+{
+	return ar->hif_ops->bmi_read(ar, buf, len);
+}
+
+static inline int ath6kl_hif_bmi_write(struct ath6kl *ar, u8 *buf, u32 len)
+{
+	return ar->hif_ops->bmi_write(ar, buf, len);
+}
+
+static inline int ath6kl_hif_resume(struct ath6kl *ar)
+{
+	ath6kl_dbg(ATH6KL_DBG_HIF, "hif resume\n");
+
+	return ar->hif_ops->resume(ar);
+}
+
+static inline int ath6kl_hif_power_on(struct ath6kl *ar)
+{
+	ath6kl_dbg(ATH6KL_DBG_HIF, "hif power on\n");
+
+	return ar->hif_ops->power_on(ar);
+}
+
+static inline int ath6kl_hif_power_off(struct ath6kl *ar)
+{
+	ath6kl_dbg(ATH6KL_DBG_HIF, "hif power off\n");
+
+	return ar->hif_ops->power_off(ar);
+}
+
+static inline void ath6kl_hif_stop(struct ath6kl *ar)
+{
+	ath6kl_dbg(ATH6KL_DBG_HIF, "hif stop\n");
+
+	ar->hif_ops->stop(ar);
 }
 
 #endif
diff --git a/drivers/net/wireless/ath/ath6kl/htc_hif.c b/drivers/net/wireless/ath/ath6kl/hif.c
similarity index 80%
rename from drivers/net/wireless/ath/ath6kl/htc_hif.c
rename to drivers/net/wireless/ath/ath6kl/hif.c
index 86b1cc7..e57da35 100644
--- a/drivers/net/wireless/ath/ath6kl/htc_hif.c
+++ b/drivers/net/wireless/ath/ath6kl/hif.c
@@ -13,18 +13,19 @@
  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
+#include "hif.h"
 
 #include "core.h"
 #include "target.h"
 #include "hif-ops.h"
-#include "htc_hif.h"
 #include "debug.h"
 
 #define MAILBOX_FOR_BLOCK_SIZE          1
 
 #define ATH6KL_TIME_QUANTUM	10  /* in ms */
 
-static int ath6kldev_cp_scat_dma_buf(struct hif_scatter_req *req, bool from_dma)
+static int ath6kl_hif_cp_scat_dma_buf(struct hif_scatter_req *req,
+				      bool from_dma)
 {
 	u8 *buf;
 	int i;
@@ -46,12 +47,11 @@
 	return 0;
 }
 
-int ath6kldev_rw_comp_handler(void *context, int status)
+int ath6kl_hif_rw_comp_handler(void *context, int status)
 {
 	struct htc_packet *packet = context;
 
-	ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
-		   "ath6kldev_rw_comp_handler (pkt:0x%p , status: %d\n",
+	ath6kl_dbg(ATH6KL_DBG_HIF, "hif rw completion pkt 0x%p status %d\n",
 		   packet, status);
 
 	packet->status = status;
@@ -59,30 +59,83 @@
 
 	return 0;
 }
+#define REG_DUMP_COUNT_AR6003   60
+#define REGISTER_DUMP_LEN_MAX   60
 
-static int ath6kldev_proc_dbg_intr(struct ath6kl_device *dev)
+static void ath6kl_hif_dump_fw_crash(struct ath6kl *ar)
+{
+	__le32 regdump_val[REGISTER_DUMP_LEN_MAX];
+	u32 i, address, regdump_addr = 0;
+	int ret;
+
+	if (ar->target_type != TARGET_TYPE_AR6003)
+		return;
+
+	/* the reg dump pointer is copied to the host interest area */
+	address = ath6kl_get_hi_item_addr(ar, HI_ITEM(hi_failure_state));
+	address = TARG_VTOP(ar->target_type, address);
+
+	/* read RAM location through diagnostic window */
+	ret = ath6kl_diag_read32(ar, address, &regdump_addr);
+
+	if (ret || !regdump_addr) {
+		ath6kl_warn("failed to get ptr to register dump area: %d\n",
+			    ret);
+		return;
+	}
+
+	ath6kl_dbg(ATH6KL_DBG_IRQ, "register dump data address 0x%x\n",
+		regdump_addr);
+	regdump_addr = TARG_VTOP(ar->target_type, regdump_addr);
+
+	/* fetch register dump data */
+	ret = ath6kl_diag_read(ar, regdump_addr, (u8 *)&regdump_val[0],
+				  REG_DUMP_COUNT_AR6003 * (sizeof(u32)));
+	if (ret) {
+		ath6kl_warn("failed to get register dump: %d\n", ret);
+		return;
+	}
+
+	ath6kl_info("crash dump:\n");
+	ath6kl_info("hw 0x%x fw %s\n", ar->wiphy->hw_version,
+		    ar->wiphy->fw_version);
+
+	BUILD_BUG_ON(REG_DUMP_COUNT_AR6003 % 4);
+
+	for (i = 0; i < REG_DUMP_COUNT_AR6003 / 4; i++) {
+		ath6kl_info("%d: 0x%8.8x 0x%8.8x 0x%8.8x 0x%8.8x\n",
+			    4 * i,
+			    le32_to_cpu(regdump_val[i]),
+			    le32_to_cpu(regdump_val[i + 1]),
+			    le32_to_cpu(regdump_val[i + 2]),
+			    le32_to_cpu(regdump_val[i + 3]));
+	}
+
+}
+
+static int ath6kl_hif_proc_dbg_intr(struct ath6kl_device *dev)
 {
 	u32 dummy;
-	int status;
+	int ret;
 
-	ath6kl_err("target debug interrupt\n");
-
-	ath6kl_target_failure(dev->ar);
+	ath6kl_warn("firmware crashed\n");
 
 	/*
 	 * read counter to clear the interrupt, the debug error interrupt is
 	 * counter 0.
 	 */
-	status = hif_read_write_sync(dev->ar, COUNT_DEC_ADDRESS,
+	ret = hif_read_write_sync(dev->ar, COUNT_DEC_ADDRESS,
 				     (u8 *)&dummy, 4, HIF_RD_SYNC_BYTE_INC);
-	if (status)
-		WARN_ON(1);
+	if (ret)
+		ath6kl_warn("Failed to clear debug interrupt: %d\n", ret);
 
-	return status;
+	ath6kl_hif_dump_fw_crash(dev->ar);
+
+	return ret;
 }
 
 /* mailbox recv message polling */
-int ath6kldev_poll_mboxmsg_rx(struct ath6kl_device *dev, u32 *lk_ahd,
+int ath6kl_hif_poll_mboxmsg_rx(struct ath6kl_device *dev, u32 *lk_ahd,
 			      int timeout)
 {
 	struct ath6kl_irq_proc_registers *rg;
@@ -118,7 +171,7 @@
 
 		/* delay a little  */
 		mdelay(ATH6KL_TIME_QUANTUM);
-		ath6kl_dbg(ATH6KL_DBG_HTC_RECV, "retry mbox poll : %d\n", i);
+		ath6kl_dbg(ATH6KL_DBG_HIF, "hif retry mbox poll try %d\n", i);
 	}
 
 	if (i == 0) {
@@ -131,7 +184,7 @@
 			 * Target failure handler will be called in case of
 			 * an assert.
 			 */
-			ath6kldev_proc_dbg_intr(dev);
+			ath6kl_hif_proc_dbg_intr(dev);
 	}
 
 	return status;
@@ -141,11 +194,14 @@
  * Disable packet reception (used in case the host runs out of buffers)
  * using the interrupt enable registers through the host I/F
  */
-int ath6kldev_rx_control(struct ath6kl_device *dev, bool enable_rx)
+int ath6kl_hif_rx_control(struct ath6kl_device *dev, bool enable_rx)
 {
 	struct ath6kl_irq_enable_reg regs;
 	int status = 0;
 
+	ath6kl_dbg(ATH6KL_DBG_HIF, "hif rx %s\n",
+		   enable_rx ? "enable" : "disable");
+
 	/* take the lock to protect interrupt enable shadows */
 	spin_lock_bh(&dev->lock);
 
@@ -168,7 +224,7 @@
 	return status;
 }
 
-int ath6kldev_submit_scat_req(struct ath6kl_device *dev,
+int ath6kl_hif_submit_scat_req(struct ath6kl_device *dev,
 			      struct hif_scatter_req *scat_req, bool read)
 {
 	int status = 0;
@@ -185,14 +241,14 @@
 			dev->ar->mbox_info.htc_addr;
 	}
 
-	ath6kl_dbg((ATH6KL_DBG_HTC_RECV | ATH6KL_DBG_HTC_SEND),
-		   "ath6kldev_submit_scat_req, entries: %d, total len: %d mbox:0x%X (mode: %s : %s)\n",
+	ath6kl_dbg(ATH6KL_DBG_HIF,
+		   "hif submit scatter request entries %d len %d mbox 0x%x %s %s\n",
 		   scat_req->scat_entries, scat_req->len,
 		   scat_req->addr, !read ? "async" : "sync",
 		   (read) ? "rd" : "wr");
 
 	if (!read && scat_req->virt_scat) {
-		status = ath6kldev_cp_scat_dma_buf(scat_req, false);
+		status = ath6kl_hif_cp_scat_dma_buf(scat_req, false);
 		if (status) {
 			scat_req->status = status;
 			scat_req->complete(dev->ar->htc_target, scat_req);
@@ -207,13 +263,13 @@
 		scat_req->status = status;
 		if (!status && scat_req->virt_scat)
 			scat_req->status =
-				ath6kldev_cp_scat_dma_buf(scat_req, true);
+				ath6kl_hif_cp_scat_dma_buf(scat_req, true);
 	}
 
 	return status;
 }
 
-static int ath6kldev_proc_counter_intr(struct ath6kl_device *dev)
+static int ath6kl_hif_proc_counter_intr(struct ath6kl_device *dev)
 {
 	u8 counter_int_status;
 
@@ -232,12 +288,12 @@
 	 * the debug assertion counter interrupt.
 	 */
 	if (counter_int_status & ATH6KL_TARGET_DEBUG_INTR_MASK)
-		return ath6kldev_proc_dbg_intr(dev);
+		return ath6kl_hif_proc_dbg_intr(dev);
 
 	return 0;
 }
 
-static int ath6kldev_proc_err_intr(struct ath6kl_device *dev)
+static int ath6kl_hif_proc_err_intr(struct ath6kl_device *dev)
 {
 	int status;
 	u8 error_int_status;
@@ -282,7 +338,7 @@
 	return status;
 }
 
-static int ath6kldev_proc_cpu_intr(struct ath6kl_device *dev)
+static int ath6kl_hif_proc_cpu_intr(struct ath6kl_device *dev)
 {
 	int status;
 	u8 cpu_int_status;
@@ -417,7 +473,7 @@
 		 * we rapidly pull packets.
 		 */
 		status = ath6kl_htc_rxmsg_pending_handler(dev->htc_cnxt,
-							  &lk_ahd, &fetched);
+							  lk_ahd, &fetched);
 		if (status)
 			goto out;
 
@@ -436,21 +492,21 @@
 
 	if (MS(HOST_INT_STATUS_CPU, host_int_status)) {
 		/* CPU Interrupt */
-		status = ath6kldev_proc_cpu_intr(dev);
+		status = ath6kl_hif_proc_cpu_intr(dev);
 		if (status)
 			goto out;
 	}
 
 	if (MS(HOST_INT_STATUS_ERROR, host_int_status)) {
 		/* Error Interrupt */
-		status = ath6kldev_proc_err_intr(dev);
+		status = ath6kl_hif_proc_err_intr(dev);
 		if (status)
 			goto out;
 	}
 
 	if (MS(HOST_INT_STATUS_COUNTER, host_int_status))
 		/* Counter Interrupt */
-		status = ath6kldev_proc_counter_intr(dev);
+		status = ath6kl_hif_proc_counter_intr(dev);
 
 out:
 	/*
@@ -479,9 +535,10 @@
 }
 
 /* interrupt handler, kicks off all interrupt processing */
-int ath6kldev_intr_bh_handler(struct ath6kl *ar)
+int ath6kl_hif_intr_bh_handler(struct ath6kl *ar)
 {
 	struct ath6kl_device *dev = ar->htc_target->dev;
+	unsigned long timeout;
 	int status = 0;
 	bool done = false;
 
@@ -495,7 +552,8 @@
 	 * IRQ processing is synchronous, interrupt status registers can be
 	 * re-read.
 	 */
-	while (!done) {
+	timeout = jiffies + msecs_to_jiffies(ATH6KL_HIF_COMMUNICATION_TIMEOUT);
+	while (time_before(jiffies, timeout) && !done) {
 		status = proc_pending_irqs(dev, &done);
 		if (status)
 			break;
@@ -504,7 +562,7 @@
 	return status;
 }
 
-static int ath6kldev_enable_intrs(struct ath6kl_device *dev)
+static int ath6kl_hif_enable_intrs(struct ath6kl_device *dev)
 {
 	struct ath6kl_irq_enable_reg regs;
 	int status;
@@ -552,7 +610,7 @@
 	return status;
 }
 
-int ath6kldev_disable_intrs(struct ath6kl_device *dev)
+int ath6kl_hif_disable_intrs(struct ath6kl_device *dev)
 {
 	struct ath6kl_irq_enable_reg regs;
 
@@ -571,7 +629,7 @@
 }
 
 /* enable device interrupts */
-int ath6kldev_unmask_intrs(struct ath6kl_device *dev)
+int ath6kl_hif_unmask_intrs(struct ath6kl_device *dev)
 {
 	int status = 0;
 
@@ -583,29 +641,29 @@
 	 * target "soft" resets. The ATH6KL interrupt enables reset back to an
 	 * "enabled" state when this happens.
 	 */
-	ath6kldev_disable_intrs(dev);
+	ath6kl_hif_disable_intrs(dev);
 
 	/* unmask the host controller interrupts */
 	ath6kl_hif_irq_enable(dev->ar);
-	status = ath6kldev_enable_intrs(dev);
+	status = ath6kl_hif_enable_intrs(dev);
 
 	return status;
 }
 
 /* disable all device interrupts */
-int ath6kldev_mask_intrs(struct ath6kl_device *dev)
+int ath6kl_hif_mask_intrs(struct ath6kl_device *dev)
 {
 	/*
 	 * Mask the interrupt at the HIF layer to avoid any stray interrupt
 	 * taken while we zero out our shadow registers in
-	 * ath6kldev_disable_intrs().
+	 * ath6kl_hif_disable_intrs().
 	 */
 	ath6kl_hif_irq_disable(dev->ar);
 
-	return ath6kldev_disable_intrs(dev);
+	return ath6kl_hif_disable_intrs(dev);
 }
 
-int ath6kldev_setup(struct ath6kl_device *dev)
+int ath6kl_hif_setup(struct ath6kl_device *dev)
 {
 	int status = 0;
 
@@ -621,19 +679,17 @@
 	/* must be a power of 2 */
 	if ((dev->htc_cnxt->block_sz & (dev->htc_cnxt->block_sz - 1)) != 0) {
 		WARN_ON(1);
+		status = -EINVAL;
 		goto fail_setup;
 	}
 
 	/* assemble mask, used for padding to a block */
 	dev->htc_cnxt->block_mask = dev->htc_cnxt->block_sz - 1;
 
-	ath6kl_dbg(ATH6KL_DBG_TRC, "block size: %d, mbox addr:0x%X\n",
+	ath6kl_dbg(ATH6KL_DBG_HIF, "hif block size %d mbox addr 0x%x\n",
 		   dev->htc_cnxt->block_sz, dev->ar->mbox_info.htc_addr);
 
-	ath6kl_dbg(ATH6KL_DBG_TRC,
-		   "hif interrupt processing is sync only\n");
-
-	status = ath6kldev_disable_intrs(dev);
+	status = ath6kl_hif_disable_intrs(dev);
 
 fail_setup:
 	return status;
diff --git a/drivers/net/wireless/ath/ath6kl/hif.h b/drivers/net/wireless/ath/ath6kl/hif.h
index 797e2d1..699a036 100644
--- a/drivers/net/wireless/ath/ath6kl/hif.h
+++ b/drivers/net/wireless/ath/ath6kl/hif.h
@@ -35,6 +35,7 @@
 #define MAX_SCATTER_REQ_TRANSFER_SIZE    (32 * 1024)
 
 #define MANUFACTURER_ID_AR6003_BASE        0x300
+#define MANUFACTURER_ID_AR6004_BASE        0x400
     /* SDIO manufacturer ID and Codes */
 #define MANUFACTURER_ID_ATH6KL_BASE_MASK     0xFF00
 #define MANUFACTURER_CODE                  0x271	/* Atheros */
@@ -59,6 +60,18 @@
 /* mode to enable special 4-bit interrupt assertion without clock */
 #define SDIO_IRQ_MODE_ASYNC_4BIT_IRQ   (1 << 0)
 
+/* HTC runs over mailbox 0 */
+#define HTC_MAILBOX	0
+
+#define ATH6KL_TARGET_DEBUG_INTR_MASK     0x01
+
+/* FIXME: are these duplicates with MAX_SCATTER_ values in hif.h? */
+#define ATH6KL_SCATTER_ENTRIES_PER_REQ            16
+#define ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER      (16 * 1024)
+#define ATH6KL_SCATTER_REQS                       4
+
+#define ATH6KL_HIF_COMMUNICATION_TIMEOUT	1000
+
 struct bus_request {
 	struct list_head list;
 
@@ -186,6 +199,34 @@
 	struct hif_scatter_item scat_list[1];
 };
 
+struct ath6kl_irq_proc_registers {
+	u8 host_int_status;
+	u8 cpu_int_status;
+	u8 error_int_status;
+	u8 counter_int_status;
+	u8 mbox_frame;
+	u8 rx_lkahd_valid;
+	u8 host_int_status2;
+	u8 gmbox_rx_avail;
+	__le32 rx_lkahd[2];
+	__le32 rx_gmbox_lkahd_alias[2];
+} __packed;
+
+struct ath6kl_irq_enable_reg {
+	u8 int_status_en;
+	u8 cpu_int_status_en;
+	u8 err_int_status_en;
+	u8 cntr_int_status_en;
+} __packed;
+
+struct ath6kl_device {
+	spinlock_t lock;
+	struct ath6kl_irq_proc_registers irq_proc_reg;
+	struct ath6kl_irq_enable_reg irq_en_reg;
+	struct htc_target *htc_cnxt;
+	struct ath6kl *ar;
+};
+
 struct ath6kl_hif_ops {
 	int (*read_write_sync)(struct ath6kl *ar, u32 addr, u8 *buf,
 			       u32 len, u32 request);
@@ -202,7 +243,30 @@
 	int (*scat_req_rw) (struct ath6kl *ar,
 			    struct hif_scatter_req *scat_req);
 	void (*cleanup_scatter)(struct ath6kl *ar);
-	int (*suspend)(struct ath6kl *ar);
+	int (*suspend)(struct ath6kl *ar, struct cfg80211_wowlan *wow);
+	int (*resume)(struct ath6kl *ar);
+	int (*diag_read32)(struct ath6kl *ar, u32 address, u32 *value);
+	int (*diag_write32)(struct ath6kl *ar, u32 address, __le32 value);
+	int (*bmi_read)(struct ath6kl *ar, u8 *buf, u32 len);
+	int (*bmi_write)(struct ath6kl *ar, u8 *buf, u32 len);
+	int (*power_on)(struct ath6kl *ar);
+	int (*power_off)(struct ath6kl *ar);
+	void (*stop)(struct ath6kl *ar);
 };
 
+int ath6kl_hif_setup(struct ath6kl_device *dev);
+int ath6kl_hif_unmask_intrs(struct ath6kl_device *dev);
+int ath6kl_hif_mask_intrs(struct ath6kl_device *dev);
+int ath6kl_hif_poll_mboxmsg_rx(struct ath6kl_device *dev,
+			       u32 *lk_ahd, int timeout);
+int ath6kl_hif_rx_control(struct ath6kl_device *dev, bool enable_rx);
+int ath6kl_hif_disable_intrs(struct ath6kl_device *dev);
+
+int ath6kl_hif_rw_comp_handler(void *context, int status);
+int ath6kl_hif_intr_bh_handler(struct ath6kl *ar);
+
+/* Scatter Function and Definitions */
+int ath6kl_hif_submit_scat_req(struct ath6kl_device *dev,
+			       struct hif_scatter_req *scat_req, bool read);
+
 #endif
diff --git a/drivers/net/wireless/ath/ath6kl/htc.c b/drivers/net/wireless/ath/ath6kl/htc.c
index f88a7c9..f3b63ca25 100644
--- a/drivers/net/wireless/ath/ath6kl/htc.c
+++ b/drivers/net/wireless/ath/ath6kl/htc.c
@@ -15,13 +15,321 @@
  */
 
 #include "core.h"
-#include "htc_hif.h"
+#include "hif.h"
 #include "debug.h"
 #include "hif-ops.h"
 #include <asm/unaligned.h>
 
 #define CALC_TXRX_PADDED_LEN(dev, len)  (__ALIGN_MASK((len), (dev)->block_mask))
 
+/* Functions for Tx credit handling */
+static void ath6kl_credit_deposit(struct ath6kl_htc_credit_info *cred_info,
+				  struct htc_endpoint_credit_dist *ep_dist,
+				  int credits)
+{
+	ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit deposit ep %d credits %d\n",
+		   ep_dist->endpoint, credits);
+
+	ep_dist->credits += credits;
+	ep_dist->cred_assngd += credits;
+	cred_info->cur_free_credits -= credits;
+}
+
+static void ath6kl_credit_init(struct ath6kl_htc_credit_info *cred_info,
+			       struct list_head *ep_list,
+			       int tot_credits)
+{
+	struct htc_endpoint_credit_dist *cur_ep_dist;
+	int count;
+
+	ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit init total %d\n", tot_credits);
+
+	cred_info->cur_free_credits = tot_credits;
+	cred_info->total_avail_credits = tot_credits;
+
+	list_for_each_entry(cur_ep_dist, ep_list, list) {
+		if (cur_ep_dist->endpoint == ENDPOINT_0)
+			continue;
+
+		cur_ep_dist->cred_min = cur_ep_dist->cred_per_msg;
+
+		if (tot_credits > 4) {
+			if ((cur_ep_dist->svc_id == WMI_DATA_BK_SVC) ||
+			    (cur_ep_dist->svc_id == WMI_DATA_BE_SVC)) {
+				ath6kl_credit_deposit(cred_info,
+						      cur_ep_dist,
+						      cur_ep_dist->cred_min);
+				cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
+			}
+		}
+
+		if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) {
+			ath6kl_credit_deposit(cred_info, cur_ep_dist,
+					      cur_ep_dist->cred_min);
+			/*
+			 * Control service is always marked active, it
+			 * never goes inactive EVER.
+			 */
+			cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
+		} else if (cur_ep_dist->svc_id == WMI_DATA_BK_SVC)
+			/* this is the lowest priority data endpoint */
+			/* FIXME: this looks fishy, check */
+			cred_info->lowestpri_ep_dist = cur_ep_dist->list;
+
+		/*
+		 * Streams have to be created (explicit | implicit) for all
+		 * kinds of traffic. BE endpoints are also inactive in the
+		 * beginning. When BE traffic starts it creates implicit
+		 * streams that redistributes credits.
+		 *
+		 * Note: all other endpoints have minimums set but are
+		 * initially given NO credits. credits will be distributed
+		 * as traffic activity demands
+		 */
+	}
+
+	WARN_ON(cred_info->cur_free_credits <= 0);
+
+	list_for_each_entry(cur_ep_dist, ep_list, list) {
+		if (cur_ep_dist->endpoint == ENDPOINT_0)
+			continue;
+
+		if (cur_ep_dist->svc_id == WMI_CONTROL_SVC)
+			cur_ep_dist->cred_norm = cur_ep_dist->cred_per_msg;
+		else {
+			/*
+			 * For the remaining data endpoints, we assume that
+			 * each cred_per_msg are the same. We use a simple
+			 * calculation here, we take the remaining credits
+			 * and determine how many max messages this can
+			 * cover and then set each endpoint's normal value
+			 * equal to 3/4 this amount.
+			 */
+			count = (cred_info->cur_free_credits /
+				 cur_ep_dist->cred_per_msg)
+				* cur_ep_dist->cred_per_msg;
+			count = (count * 3) >> 2;
+			count = max(count, cur_ep_dist->cred_per_msg);
+			cur_ep_dist->cred_norm = count;
+
+		}
+
+		ath6kl_dbg(ATH6KL_DBG_CREDIT,
+			   "credit ep %d svc_id %d credits %d per_msg %d norm %d min %d\n",
+			   cur_ep_dist->endpoint,
+			   cur_ep_dist->svc_id,
+			   cur_ep_dist->credits,
+			   cur_ep_dist->cred_per_msg,
+			   cur_ep_dist->cred_norm,
+			   cur_ep_dist->cred_min);
+	}
+}
+
+/* initialize and setup credit distribution */
+int ath6kl_credit_setup(void *htc_handle,
+			struct ath6kl_htc_credit_info *cred_info)
+{
+	u16 servicepriority[5];
+
+	memset(cred_info, 0, sizeof(struct ath6kl_htc_credit_info));
+
+	servicepriority[0] = WMI_CONTROL_SVC;  /* highest */
+	servicepriority[1] = WMI_DATA_VO_SVC;
+	servicepriority[2] = WMI_DATA_VI_SVC;
+	servicepriority[3] = WMI_DATA_BE_SVC;
+	servicepriority[4] = WMI_DATA_BK_SVC; /* lowest */
+
+	/* set priority list */
+	ath6kl_htc_set_credit_dist(htc_handle, cred_info, servicepriority, 5);
+
+	return 0;
+}
+
+/* reduce an ep's credits back to a set limit */
+static void ath6kl_credit_reduce(struct ath6kl_htc_credit_info *cred_info,
+				 struct htc_endpoint_credit_dist *ep_dist,
+				 int limit)
+{
+	int credits;
+
+	ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit reduce ep %d limit %d\n",
+		   ep_dist->endpoint, limit);
+
+	ep_dist->cred_assngd = limit;
+
+	if (ep_dist->credits <= limit)
+		return;
+
+	credits = ep_dist->credits - limit;
+	ep_dist->credits -= credits;
+	cred_info->cur_free_credits += credits;
+}
+
+static void ath6kl_credit_update(struct ath6kl_htc_credit_info *cred_info,
+				 struct list_head *epdist_list)
+{
+	struct htc_endpoint_credit_dist *cur_dist_list;
+
+	list_for_each_entry(cur_dist_list, epdist_list, list) {
+		if (cur_dist_list->endpoint == ENDPOINT_0)
+			continue;
+
+		if (cur_dist_list->cred_to_dist > 0) {
+			cur_dist_list->credits +=
+					cur_dist_list->cred_to_dist;
+			cur_dist_list->cred_to_dist = 0;
+			if (cur_dist_list->credits >
+			    cur_dist_list->cred_assngd)
+				ath6kl_credit_reduce(cred_info,
+						cur_dist_list,
+						cur_dist_list->cred_assngd);
+
+			if (cur_dist_list->credits >
+			    cur_dist_list->cred_norm)
+				ath6kl_credit_reduce(cred_info, cur_dist_list,
+						     cur_dist_list->cred_norm);
+
+			if (!(cur_dist_list->dist_flags & HTC_EP_ACTIVE)) {
+				if (cur_dist_list->txq_depth == 0)
+					ath6kl_credit_reduce(cred_info,
+							     cur_dist_list, 0);
+			}
+		}
+	}
+}
+
+/*
+ * HTC has an endpoint that needs credits, ep_dist is the endpoint in
+ * question.
+ */
+static void ath6kl_credit_seek(struct ath6kl_htc_credit_info *cred_info,
+				struct htc_endpoint_credit_dist *ep_dist)
+{
+	struct htc_endpoint_credit_dist *curdist_list;
+	int credits = 0;
+	int need;
+
+	if (ep_dist->svc_id == WMI_CONTROL_SVC)
+		goto out;
+
+	if ((ep_dist->svc_id == WMI_DATA_VI_SVC) ||
+	    (ep_dist->svc_id == WMI_DATA_VO_SVC))
+		if ((ep_dist->cred_assngd >= ep_dist->cred_norm))
+			goto out;
+
+	/*
+	 * For all other services, we follow a simple algorithm of:
+	 *
+	 * 1. checking the free pool for credits
+	 * 2. checking lower priority endpoints for credits to take
+	 */
+
+	credits = min(cred_info->cur_free_credits, ep_dist->seek_cred);
+
+	if (credits >= ep_dist->seek_cred)
+		goto out;
+
+	/*
+	 * We don't have enough in the free pool, try taking away from
+	 * lower priority services The rule for taking away credits:
+	 *
+	 *   1. Only take from lower priority endpoints
+	 *   2. Only take what is allocated above the minimum (never
+	 *      starve an endpoint completely)
+	 *   3. Only take what you need.
+	 */
+
+	list_for_each_entry_reverse(curdist_list,
+				    &cred_info->lowestpri_ep_dist,
+				    list) {
+		if (curdist_list == ep_dist)
+			break;
+
+		need = ep_dist->seek_cred - cred_info->cur_free_credits;
+
+		if ((curdist_list->cred_assngd - need) >=
+		     curdist_list->cred_min) {
+			/*
+			 * The current one has been allocated more than
+			 * it's minimum and it has enough credits assigned
+			 * above it's minimum to fulfill our need try to
+			 * take away just enough to fulfill our need.
+			 */
+			ath6kl_credit_reduce(cred_info, curdist_list,
+					     curdist_list->cred_assngd - need);
+
+			if (cred_info->cur_free_credits >=
+			    ep_dist->seek_cred)
+				break;
+		}
+
+		if (curdist_list->endpoint == ENDPOINT_0)
+			break;
+	}
+
+	credits = min(cred_info->cur_free_credits, ep_dist->seek_cred);
+
+out:
+	/* did we find some credits? */
+	if (credits)
+		ath6kl_credit_deposit(cred_info, ep_dist, credits);
+
+	ep_dist->seek_cred = 0;
+}
+
+/* redistribute credits based on activity change */
+static void ath6kl_credit_redistribute(struct ath6kl_htc_credit_info *info,
+				       struct list_head *ep_dist_list)
+{
+	struct htc_endpoint_credit_dist *curdist_list;
+
+	list_for_each_entry(curdist_list, ep_dist_list, list) {
+		if (curdist_list->endpoint == ENDPOINT_0)
+			continue;
+
+		if ((curdist_list->svc_id == WMI_DATA_BK_SVC)  ||
+		    (curdist_list->svc_id == WMI_DATA_BE_SVC))
+			curdist_list->dist_flags |= HTC_EP_ACTIVE;
+
+		if ((curdist_list->svc_id != WMI_CONTROL_SVC) &&
+		    !(curdist_list->dist_flags & HTC_EP_ACTIVE)) {
+			if (curdist_list->txq_depth == 0)
+				ath6kl_credit_reduce(info, curdist_list, 0);
+			else
+				ath6kl_credit_reduce(info,
+						     curdist_list,
+						     curdist_list->cred_min);
+		}
+	}
+}
+
+/*
+ *
+ * This function is invoked whenever endpoints require credit
+ * distributions. A lock is held while this function is invoked, this
+ * function shall NOT block. The ep_dist_list is a list of distribution
+ * structures in prioritized order as defined by the call to the
+ * htc_set_credit_dist() api.
+ */
+static void ath6kl_credit_distribute(struct ath6kl_htc_credit_info *cred_info,
+				     struct list_head *ep_dist_list,
+			      enum htc_credit_dist_reason reason)
+{
+	switch (reason) {
+	case HTC_CREDIT_DIST_SEND_COMPLETE:
+		ath6kl_credit_update(cred_info, ep_dist_list);
+		break;
+	case HTC_CREDIT_DIST_ACTIVITY_CHANGE:
+		ath6kl_credit_redistribute(cred_info, ep_dist_list);
+		break;
+	default:
+		break;
+	}
+
+	WARN_ON(cred_info->cur_free_credits > cred_info->total_avail_credits);
+	WARN_ON(cred_info->cur_free_credits < 0);
+}
+
 static void ath6kl_htc_tx_buf_align(u8 **buf, unsigned long len)
 {
 	u8 *align_addr;
@@ -102,12 +410,12 @@
 				packet->info.tx.cred_used;
 	endpoint->cred_dist.txq_depth = get_queue_depth(&endpoint->txq);
 
-	ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
-		   target->cred_dist_cntxt, &target->cred_dist_list);
+	ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx ctxt 0x%p dist 0x%p\n",
+		   target->credit_info, &target->cred_dist_list);
 
-	ath6k_credit_distribute(target->cred_dist_cntxt,
-				&target->cred_dist_list,
-				HTC_CREDIT_DIST_SEND_COMPLETE);
+	ath6kl_credit_distribute(target->credit_info,
+				 &target->cred_dist_list,
+				 HTC_CREDIT_DIST_SEND_COMPLETE);
 
 	spin_unlock_bh(&target->tx_lock);
 }
@@ -118,8 +426,8 @@
 	if (list_empty(txq))
 		return;
 
-	ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
-		   "send complete ep %d, (%d pkts)\n",
+	ath6kl_dbg(ATH6KL_DBG_HTC,
+		   "htc tx complete ep %d pkts %d\n",
 		   endpoint->eid, get_queue_depth(txq));
 
 	ath6kl_tx_complete(endpoint->target->dev->ar, txq);
@@ -131,6 +439,9 @@
 	struct htc_endpoint *endpoint = &target->endpoint[packet->endpoint];
 	struct list_head container;
 
+	ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx complete seqno %d\n",
+		   packet->info.tx.seqno);
+
 	htc_tx_comp_update(target, endpoint, packet);
 	INIT_LIST_HEAD(&container);
 	list_add_tail(&packet->list, &container);
@@ -148,8 +459,8 @@
 
 	INIT_LIST_HEAD(&tx_compq);
 
-	ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
-		"htc_async_tx_scat_complete  total len: %d  entries: %d\n",
+	ath6kl_dbg(ATH6KL_DBG_HTC,
+		"htc tx scat complete len %d entries %d\n",
 		scat_req->len, scat_req->scat_entries);
 
 	if (scat_req->status)
@@ -190,16 +501,13 @@
 
 	send_len = packet->act_len + HTC_HDR_LENGTH;
 
-	ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "%s: transmit len : %d (%s)\n",
-		   __func__, send_len, sync ? "sync" : "async");
-
 	padded_len = CALC_TXRX_PADDED_LEN(target, send_len);
 
-	ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
-		"DevSendPacket, padded len: %d mbox:0x%X (mode:%s)\n",
-		padded_len,
-		target->dev->ar->mbox_info.htc_addr,
-		sync ? "sync" : "async");
+	ath6kl_dbg(ATH6KL_DBG_HTC,
+		   "htc tx issue len %d seqno %d padded_len %d mbox 0x%X %s\n",
+		   send_len, packet->info.tx.seqno, padded_len,
+		   target->dev->ar->mbox_info.htc_addr,
+		   sync ? "sync" : "async");
 
 	if (sync) {
 		status = hif_read_write_sync(target->dev->ar,
@@ -227,7 +535,7 @@
 	*req_cred = (len > target->tgt_cred_sz) ?
 		     DIV_ROUND_UP(len, target->tgt_cred_sz) : 1;
 
-	ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "creds required:%d got:%d\n",
+	ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit check need %d got %d\n",
 		   *req_cred, ep->cred_dist.credits);
 
 	if (ep->cred_dist.credits < *req_cred) {
@@ -237,16 +545,13 @@
 		/* Seek more credits */
 		ep->cred_dist.seek_cred = *req_cred - ep->cred_dist.credits;
 
-		ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
-			   target->cred_dist_cntxt, &ep->cred_dist);
-
-		ath6k_seek_credits(target->cred_dist_cntxt, &ep->cred_dist);
+		ath6kl_credit_seek(target->credit_info, &ep->cred_dist);
 
 		ep->cred_dist.seek_cred = 0;
 
 		if (ep->cred_dist.credits < *req_cred) {
-			ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
-				   "not enough credits for ep %d - leaving packet in queue\n",
+			ath6kl_dbg(ATH6KL_DBG_CREDIT,
+				   "credit not found for ep %d\n",
 				   eid);
 			return -EINVAL;
 		}
@@ -260,17 +565,15 @@
 		ep->cred_dist.seek_cred =
 		ep->cred_dist.cred_per_msg - ep->cred_dist.credits;
 
-		ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
-			   target->cred_dist_cntxt, &ep->cred_dist);
-
-		ath6k_seek_credits(target->cred_dist_cntxt, &ep->cred_dist);
+		ath6kl_credit_seek(target->credit_info, &ep->cred_dist);
 
 		/* see if we were successful in getting more */
 		if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) {
 			/* tell the target we need credits ASAP! */
 			*flags |= HTC_FLAGS_NEED_CREDIT_UPDATE;
 			ep->ep_st.cred_low_indicate += 1;
-			ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "host needs credits\n");
+			ath6kl_dbg(ATH6KL_DBG_CREDIT,
+				   "credit we need credits asap\n");
 		}
 	}
 
@@ -295,8 +598,8 @@
 		packet = list_first_entry(&endpoint->txq, struct htc_packet,
 					  list);
 
-		ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
-			"got head pkt:0x%p , queue depth: %d\n",
+		ath6kl_dbg(ATH6KL_DBG_HTC,
+			"htc tx got packet 0x%p queue depth %d\n",
 			packet, get_queue_depth(&endpoint->txq));
 
 		len = CALC_TXRX_PADDED_LEN(target,
@@ -404,9 +707,9 @@
 
 		scat_req->len += len;
 		scat_req->scat_entries++;
-		ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
-			   "%d, adding pkt : 0x%p len:%d (remaining space:%d)\n",
-			   i, packet, len, rem_scat);
+		ath6kl_dbg(ATH6KL_DBG_HTC,
+			   "htc tx adding (%d) pkt 0x%p seqno %d len %d remaining %d\n",
+			   i, packet, packet->info.tx.seqno, len, rem_scat);
 	}
 
 	/* Roll back scatter setup in case of any failure */
@@ -455,12 +758,12 @@
 
 		if (!scat_req) {
 			/* no scatter resources  */
-			ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
-				"no more scatter resources\n");
+			ath6kl_dbg(ATH6KL_DBG_HTC,
+				"htc tx no more scatter resources\n");
 			break;
 		}
 
-		ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "pkts to scatter: %d\n",
+		ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx pkts to scatter: %d\n",
 			   n_scat);
 
 		scat_req->len = 0;
@@ -479,10 +782,10 @@
 		n_sent_bundle++;
 		tot_pkts_bundle += scat_req->scat_entries;
 
-		ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
-			   "send scatter total bytes: %d , entries: %d\n",
+		ath6kl_dbg(ATH6KL_DBG_HTC,
+			   "htc tx scatter bytes %d entries %d\n",
 			   scat_req->len, scat_req->scat_entries);
-		ath6kldev_submit_scat_req(target->dev, scat_req, false);
+		ath6kl_hif_submit_scat_req(target->dev, scat_req, false);
 
 		if (status)
 			break;
@@ -490,8 +793,8 @@
 
 	*sent_bundle = n_sent_bundle;
 	*n_bundle_pkts = tot_pkts_bundle;
-	ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "%s (sent:%d)\n",
-		   __func__, n_sent_bundle);
+	ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx bundle sent %d pkts\n",
+		   n_sent_bundle);
 
 	return;
 }
@@ -510,7 +813,7 @@
 	if (endpoint->tx_proc_cnt > 1) {
 		endpoint->tx_proc_cnt--;
 		spin_unlock_bh(&target->tx_lock);
-		ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "htc_try_send (busy)\n");
+		ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx busy\n");
 		return;
 	}
 
@@ -588,15 +891,12 @@
 		overflow = true;
 
 	if (overflow)
-		ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
-			   "ep %d, tx queue will overflow :%d , tx depth:%d, max:%d\n",
-			   endpoint->eid, overflow, txq_depth,
+		ath6kl_dbg(ATH6KL_DBG_HTC,
+			   "htc tx overflow ep %d depth %d max %d\n",
+			   endpoint->eid, txq_depth,
 			   endpoint->max_txq_depth);
 
 	if (overflow && ep_cb.tx_full) {
-		ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
-			   "indicating overflowed tx packet: 0x%p\n", tx_pkt);
-
 		if (ep_cb.tx_full(endpoint->target, tx_pkt) ==
 		    HTC_SEND_FULL_DROP) {
 			endpoint->ep_st.tx_dropped += 1;
@@ -625,12 +925,12 @@
 	 * are not modifying any state.
 	 */
 	list_for_each_entry(cred_dist, &target->cred_dist_list, list) {
-		endpoint = (struct htc_endpoint *)cred_dist->htc_rsvd;
+		endpoint = cred_dist->htc_ep;
 
 		spin_lock_bh(&target->tx_lock);
 		if (!list_empty(&endpoint->txq)) {
-			ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
-				   "ep %d has %d credits and %d packets in tx queue\n",
+			ath6kl_dbg(ATH6KL_DBG_HTC,
+				   "htc creds ep %d credits %d pkts %d\n",
 				   cred_dist->endpoint,
 				   endpoint->cred_dist.credits,
 				   get_queue_depth(&endpoint->txq));
@@ -704,13 +1004,13 @@
 }
 
 void ath6kl_htc_set_credit_dist(struct htc_target *target,
-				struct htc_credit_state_info *cred_dist_cntxt,
+				struct ath6kl_htc_credit_info *credit_info,
 				u16 srvc_pri_order[], int list_len)
 {
 	struct htc_endpoint *endpoint;
 	int i, ep;
 
-	target->cred_dist_cntxt = cred_dist_cntxt;
+	target->credit_info = credit_info;
 
 	list_add_tail(&target->endpoint[ENDPOINT_0].cred_dist.list,
 		      &target->cred_dist_list);
@@ -736,8 +1036,8 @@
 	struct htc_endpoint *endpoint;
 	struct list_head queue;
 
-	ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
-		   "htc_tx: ep id: %d, buf: 0x%p, len: %d\n",
+	ath6kl_dbg(ATH6KL_DBG_HTC,
+		   "htc tx ep id %d buf 0x%p len %d\n",
 		   packet->endpoint, packet->buf, packet->act_len);
 
 	if (packet->endpoint >= ENDPOINT_MAX) {
@@ -787,8 +1087,8 @@
 	list_for_each_entry_safe(packet, tmp_pkt, &discard_q, list) {
 		packet->status = -ECANCELED;
 		list_del(&packet->list);
-		ath6kl_dbg(ATH6KL_DBG_TRC,
-			"flushing tx pkt:0x%p, len:%d, ep:%d tag:0x%X\n",
+		ath6kl_dbg(ATH6KL_DBG_HTC,
+			"htc tx flushing pkt 0x%p len %d  ep %d tag 0x%x\n",
 			packet, packet->act_len,
 			packet->endpoint, packet->info.tx.tag);
 
@@ -844,12 +1144,13 @@
 		endpoint->cred_dist.txq_depth =
 			get_queue_depth(&endpoint->txq);
 
-		ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
-			   target->cred_dist_cntxt, &target->cred_dist_list);
+		ath6kl_dbg(ATH6KL_DBG_HTC,
+			   "htc tx activity ctxt 0x%p dist 0x%p\n",
+			   target->credit_info, &target->cred_dist_list);
 
-		ath6k_credit_distribute(target->cred_dist_cntxt,
-					&target->cred_dist_list,
-					HTC_CREDIT_DIST_ACTIVITY_CHANGE);
+		ath6kl_credit_distribute(target->credit_info,
+					 &target->cred_dist_list,
+					 HTC_CREDIT_DIST_ACTIVITY_CHANGE);
 	}
 
 	spin_unlock_bh(&target->tx_lock);
@@ -919,15 +1220,15 @@
 	padded_len = CALC_TXRX_PADDED_LEN(target, rx_len);
 
 	if (padded_len > packet->buf_len) {
-		ath6kl_err("not enough receive space for packet - padlen:%d recvlen:%d bufferlen:%d\n",
+		ath6kl_err("not enough receive space for packet - padlen %d recvlen %d bufferlen %d\n",
 			   padded_len, rx_len, packet->buf_len);
 		return -ENOMEM;
 	}
 
-	ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
-		   "dev_rx_pkt (0x%p : hdr:0x%X) padded len: %d mbox:0x%X (mode:%s)\n",
+	ath6kl_dbg(ATH6KL_DBG_HTC,
+		   "htc rx 0x%p hdr x%x len %d mbox 0x%x\n",
 		   packet, packet->info.rx.exp_hdr,
-		   padded_len, dev->ar->mbox_info.htc_addr, "sync");
+		   padded_len, dev->ar->mbox_info.htc_addr);
 
 	status = hif_read_write_sync(dev->ar,
 				     dev->ar->mbox_info.htc_addr,
@@ -1137,8 +1438,8 @@
 			}
 
 			endpoint->ep_st.rx_bundle_from_hdr += 1;
-			ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
-				   "htc hdr indicates :%d msg can be fetched as a bundle\n",
+			ath6kl_dbg(ATH6KL_DBG_HTC,
+				   "htc rx bundle pkts %d\n",
 				   n_msg);
 		} else
 			/* HTC header only indicates 1 message to fetch */
@@ -1191,8 +1492,8 @@
 		ath6kl_err("htc_ctrl_rx, got message with len:%zu\n",
 			packets->act_len + HTC_HDR_LENGTH);
 
-		ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES,
-				"Unexpected ENDPOINT 0 Message", "",
+		ath6kl_dbg_dump(ATH6KL_DBG_HTC,
+				"htc rx unexpected endpoint 0 message", "",
 				packets->buf - HTC_HDR_LENGTH,
 				packets->act_len + HTC_HDR_LENGTH);
 	}
@@ -1209,9 +1510,6 @@
 	int tot_credits = 0, i;
 	bool dist = false;
 
-	ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
-		   "htc_proc_cred_rpt, credit report entries:%d\n", n_entries);
-
 	spin_lock_bh(&target->tx_lock);
 
 	for (i = 0; i < n_entries; i++, rpt++) {
@@ -1223,8 +1521,9 @@
 
 		endpoint = &target->endpoint[rpt->eid];
 
-		ath6kl_dbg(ATH6KL_DBG_HTC_SEND, " ep %d got %d credits\n",
-			rpt->eid, rpt->credits);
+		ath6kl_dbg(ATH6KL_DBG_CREDIT,
+			   "credit report ep %d credits %d\n",
+			   rpt->eid, rpt->credits);
 
 		endpoint->ep_st.tx_cred_rpt += 1;
 		endpoint->ep_st.cred_retnd += rpt->credits;
@@ -1264,21 +1563,14 @@
 		tot_credits += rpt->credits;
 	}
 
-	ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
-		   "report indicated %d credits to distribute\n",
-		   tot_credits);
-
 	if (dist) {
 		/*
 		 * This was a credit return based on a completed send
 		 * operations note, this is done with the lock held
 		 */
-		ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
-			   target->cred_dist_cntxt, &target->cred_dist_list);
-
-		ath6k_credit_distribute(target->cred_dist_cntxt,
-					&target->cred_dist_list,
-					HTC_CREDIT_DIST_SEND_COMPLETE);
+		ath6kl_credit_distribute(target->credit_info,
+					 &target->cred_dist_list,
+					 HTC_CREDIT_DIST_SEND_COMPLETE);
 	}
 
 	spin_unlock_bh(&target->tx_lock);
@@ -1320,14 +1612,15 @@
 		if ((lk_ahd->pre_valid == ((~lk_ahd->post_valid) & 0xFF))
 		    && next_lk_ahds) {
 
-			ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
-				   "lk_ahd report found (pre valid:0x%X, post valid:0x%X)\n",
+			ath6kl_dbg(ATH6KL_DBG_HTC,
+				   "htc rx lk_ahd found pre_valid 0x%x post_valid 0x%x\n",
 				   lk_ahd->pre_valid, lk_ahd->post_valid);
 
 			/* look ahead bytes are valid, copy them over */
 			memcpy((u8 *)&next_lk_ahds[0], lk_ahd->lk_ahd, 4);
 
-			ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Next Look Ahead",
+			ath6kl_dbg_dump(ATH6KL_DBG_HTC,
+					"htc rx next look ahead",
 					"", next_lk_ahds, 4);
 
 			*n_lk_ahds = 1;
@@ -1346,7 +1639,7 @@
 			bundle_lkahd_rpt =
 				(struct htc_bundle_lkahd_rpt *) record_buf;
 
-			ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Bundle lk_ahd",
+			ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bundle lk_ahd",
 					"", record_buf, record->len);
 
 			for (i = 0; i < len; i++) {
@@ -1378,10 +1671,8 @@
 	u8 *record_buf;
 	u8 *orig_buf;
 
-	ath6kl_dbg(ATH6KL_DBG_HTC_RECV, "+htc_proc_trailer (len:%d)\n", len);
-
-	ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Recv Trailer", "",
-			buf, len);
+	ath6kl_dbg(ATH6KL_DBG_HTC, "htc rx trailer len %d\n", len);
+	ath6kl_dbg_dump(ATH6KL_DBG_HTC, NULL, "", buf, len);
 
 	orig_buf = buf;
 	orig_len = len;
@@ -1418,7 +1709,7 @@
 	}
 
 	if (status)
-		ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "BAD Recv Trailer",
+		ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bad trailer",
 				"", orig_buf, orig_len);
 
 	return status;
@@ -1436,9 +1727,6 @@
 	if (n_lkahds != NULL)
 		*n_lkahds = 0;
 
-	ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "HTC Recv PKT", "htc ",
-			packet->buf, packet->act_len);
-
 	/*
 	 * NOTE: we cannot assume the alignment of buf, so we use the safe
 	 * macros to retrieve 16 bit fields.
@@ -1480,9 +1768,9 @@
 	if (lk_ahd != packet->info.rx.exp_hdr) {
 		ath6kl_err("%s(): lk_ahd mismatch! (pPkt:0x%p flags:0x%X)\n",
 			   __func__, packet, packet->info.rx.rx_flags);
-		ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Expected Message lk_ahd",
+		ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx expected lk_ahd",
 				"", &packet->info.rx.exp_hdr, 4);
-		ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Current Frame Header",
+		ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx current header",
 				"", (u8 *)&lk_ahd, sizeof(lk_ahd));
 		status = -ENOMEM;
 		goto fail_rx;
@@ -1518,15 +1806,8 @@
 
 fail_rx:
 	if (status)
-		ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "BAD HTC Recv PKT",
-				"", packet->buf,
-				packet->act_len < 256 ? packet->act_len : 256);
-	else {
-		if (packet->act_len > 0)
-			ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES,
-					"HTC - Application Msg", "",
-					packet->buf, packet->act_len);
-	}
+		ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bad packet",
+				"", packet->buf, packet->act_len);
 
 	return status;
 }
@@ -1534,8 +1815,8 @@
 static void ath6kl_htc_rx_complete(struct htc_endpoint *endpoint,
 				   struct htc_packet *packet)
 {
-		ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
-			   "htc calling ep %d recv callback on packet 0x%p\n",
+		ath6kl_dbg(ATH6KL_DBG_HTC,
+			   "htc rx complete ep %d packet 0x%p\n",
 			   endpoint->eid, packet);
 		endpoint->ep_cb.rx(endpoint->target, packet);
 }
@@ -1571,9 +1852,9 @@
 
 	len = 0;
 
-	ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
-		   "%s(): (numpackets: %d , actual : %d)\n",
-		   __func__, get_queue_depth(rxq), n_scat_pkt);
+	ath6kl_dbg(ATH6KL_DBG_HTC,
+		   "htc rx bundle depth %d pkts %d\n",
+		   get_queue_depth(rxq), n_scat_pkt);
 
 	scat_req = hif_scatter_req_get(target->dev->ar);
 
@@ -1620,7 +1901,7 @@
 	scat_req->len = len;
 	scat_req->scat_entries = i;
 
-	status = ath6kldev_submit_scat_req(target->dev, scat_req, true);
+	status = ath6kl_hif_submit_scat_req(target->dev, scat_req, true);
 
 	if (!status)
 		*n_pkt_fetched = i;
@@ -1643,7 +1924,6 @@
 	int status = 0;
 
 	list_for_each_entry_safe(packet, tmp_pkt, comp_pktq, list) {
-		list_del(&packet->list);
 		ep = &target->endpoint[packet->endpoint];
 
 		/* process header for each of the recv packet */
@@ -1652,6 +1932,8 @@
 		if (status)
 			return status;
 
+		list_del(&packet->list);
+
 		if (list_empty(comp_pktq)) {
 			/*
 			 * Last packet's more packet flag is set
@@ -1686,11 +1968,15 @@
 	int fetched_pkts;
 	bool part_bundle = false;
 	int status = 0;
+	struct list_head tmp_rxq;
+	struct htc_packet *packet, *tmp_pkt;
 
 	/* now go fetch the list of HTC packets */
 	while (!list_empty(rx_pktq)) {
 		fetched_pkts = 0;
 
+		INIT_LIST_HEAD(&tmp_rxq);
+
 		if (target->rx_bndl_enable && (get_queue_depth(rx_pktq) > 1)) {
 			/*
 			 * There are enough packets to attempt a
@@ -1698,28 +1984,27 @@
 			 * allowed.
 			 */
 			status = ath6kl_htc_rx_bundle(target, rx_pktq,
-						      comp_pktq,
+						      &tmp_rxq,
 						      &fetched_pkts,
 						      part_bundle);
 			if (status)
-				return status;
+				goto fail_rx;
 
 			if (!list_empty(rx_pktq))
 				part_bundle = true;
+
+			list_splice_tail_init(&tmp_rxq, comp_pktq);
 		}
 
 		if (!fetched_pkts) {
-			struct htc_packet *packet;
 
 			packet = list_first_entry(rx_pktq, struct htc_packet,
 						   list);
 
-			list_del(&packet->list);
-
 			/* fully synchronous */
 			packet->completion = NULL;
 
-			if (!list_empty(rx_pktq))
+			if (!list_is_singular(rx_pktq))
 				/*
 				 * look_aheads in all packet
 				 * except the last one in the
@@ -1731,18 +2016,42 @@
 			/* go fetch the packet */
 			status = ath6kl_htc_rx_packet(target, packet,
 						      packet->act_len);
-			if (status)
-				return status;
 
-			list_add_tail(&packet->list, comp_pktq);
+			list_move_tail(&packet->list, &tmp_rxq);
+
+			if (status)
+				goto fail_rx;
+
+			list_splice_tail_init(&tmp_rxq, comp_pktq);
 		}
 	}
 
+	return 0;
+
+fail_rx:
+
+	/*
+	 * Cleanup any packets we allocated but didn't use to
+	 * actually fetch any packets.
+	 */
+
+	list_for_each_entry_safe(packet, tmp_pkt, rx_pktq, list) {
+		list_del(&packet->list);
+		htc_reclaim_rxbuf(target, packet,
+				&target->endpoint[packet->endpoint]);
+	}
+
+	list_for_each_entry_safe(packet, tmp_pkt, &tmp_rxq, list) {
+		list_del(&packet->list);
+		htc_reclaim_rxbuf(target, packet,
+				&target->endpoint[packet->endpoint]);
+	}
+
 	return status;
 }
 
 int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
-				     u32 msg_look_ahead[], int *num_pkts)
+				     u32 msg_look_ahead, int *num_pkts)
 {
 	struct htc_packet *packets, *tmp_pkt;
 	struct htc_endpoint *endpoint;
@@ -1759,7 +2068,7 @@
 	 * On first entry copy the look_aheads into our temp array for
 	 * processing
 	 */
-	memcpy(look_aheads, msg_look_ahead, sizeof(look_aheads));
+	look_aheads[0] = msg_look_ahead;
 
 	while (true) {
 
@@ -1827,15 +2136,6 @@
 	if (status) {
 		ath6kl_err("failed to get pending recv messages: %d\n",
 			   status);
-		/*
-		 * Cleanup any packets we allocated but didn't use to
-		 * actually fetch any packets.
-		 */
-		list_for_each_entry_safe(packets, tmp_pkt, &rx_pktq, list) {
-			list_del(&packets->list);
-			htc_reclaim_rxbuf(target, packets,
-					&target->endpoint[packets->endpoint]);
-		}
 
 		/* cleanup any packets in sync completion queue */
 		list_for_each_entry_safe(packets, tmp_pkt, &comp_pktq, list) {
@@ -1846,7 +2146,7 @@
 
 		if (target->htc_flags & HTC_OP_STATE_STOPPING) {
 			ath6kl_warn("host is going to stop blocking receiver for htc_stop\n");
-			ath6kldev_rx_control(target->dev, false);
+			ath6kl_hif_rx_control(target->dev, false);
 		}
 	}
 
@@ -1856,7 +2156,7 @@
 	 */
 	if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) {
 		ath6kl_warn("host has no rx buffers blocking receiver to prevent overrun\n");
-		ath6kldev_rx_control(target->dev, false);
+		ath6kl_hif_rx_control(target->dev, false);
 	}
 	*num_pkts = n_fetched;
 
@@ -1874,12 +2174,12 @@
 	struct htc_frame_hdr *htc_hdr;
 	u32 look_ahead;
 
-	if (ath6kldev_poll_mboxmsg_rx(target->dev, &look_ahead,
+	if (ath6kl_hif_poll_mboxmsg_rx(target->dev, &look_ahead,
 			       HTC_TARGET_RESPONSE_TIMEOUT))
 		return NULL;
 
-	ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
-		"htc_wait_for_ctrl_msg: look_ahead : 0x%X\n", look_ahead);
+	ath6kl_dbg(ATH6KL_DBG_HTC,
+		"htc rx wait ctrl look_ahead 0x%X\n", look_ahead);
 
 	htc_hdr = (struct htc_frame_hdr *)&look_ahead;
 
@@ -1943,8 +2243,8 @@
 
 	depth = get_queue_depth(pkt_queue);
 
-	ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
-		"htc_add_rxbuf_multiple: ep id: %d, cnt:%d, len: %d\n",
+	ath6kl_dbg(ATH6KL_DBG_HTC,
+		"htc rx add multiple ep id %d cnt %d len %d\n",
 		first_pkt->endpoint, depth, first_pkt->buf_len);
 
 	endpoint = &target->endpoint[first_pkt->endpoint];
@@ -1969,8 +2269,8 @@
 	/* check if we are blocked waiting for a new buffer */
 	if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) {
 		if (target->ep_waiting == first_pkt->endpoint) {
-			ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
-				"receiver was blocked on ep:%d, unblocking.\n",
+			ath6kl_dbg(ATH6KL_DBG_HTC,
+				"htc rx blocked on ep %d, unblocking\n",
 				target->ep_waiting);
 			target->rx_st_flags &= ~HTC_RECV_WAIT_BUFFERS;
 			target->ep_waiting = ENDPOINT_MAX;
@@ -1982,7 +2282,7 @@
 
 	if (rx_unblock && !(target->htc_flags & HTC_OP_STATE_STOPPING))
 		/* TODO : implement a buffer threshold count? */
-		ath6kldev_rx_control(target->dev, true);
+		ath6kl_hif_rx_control(target->dev, true);
 
 	return status;
 }
@@ -2004,8 +2304,8 @@
 					 &endpoint->rx_bufq, list) {
 			list_del(&packet->list);
 			spin_unlock_bh(&target->rx_lock);
-			ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
-				   "flushing rx pkt:0x%p, len:%d, ep:%d\n",
+			ath6kl_dbg(ATH6KL_DBG_HTC,
+				   "htc rx flush pkt 0x%p  len %d  ep %d\n",
 				   packet, packet->buf_len,
 				   packet->endpoint);
 			dev_kfree_skb(packet->pkt_cntxt);
@@ -2028,8 +2328,8 @@
 	unsigned int max_msg_sz = 0;
 	int status = 0;
 
-	ath6kl_dbg(ATH6KL_DBG_TRC,
-		   "htc_conn_service, target:0x%p service id:0x%X\n",
+	ath6kl_dbg(ATH6KL_DBG_HTC,
+		   "htc connect service target 0x%p service id 0x%x\n",
 		   target, conn_req->svc_id);
 
 	if (conn_req->svc_id == HTC_CTRL_RSVD_SVC) {
@@ -2115,7 +2415,7 @@
 	endpoint->len_max = max_msg_sz;
 	endpoint->ep_cb = conn_req->ep_cb;
 	endpoint->cred_dist.svc_id = conn_req->svc_id;
-	endpoint->cred_dist.htc_rsvd = endpoint;
+	endpoint->cred_dist.htc_ep = endpoint;
 	endpoint->cred_dist.endpoint = assigned_ep;
 	endpoint->cred_dist.cred_sz = target->tgt_cred_sz;
 
@@ -2172,6 +2472,7 @@
 	}
 
 	/* reset distribution list */
+	/* FIXME: free existing entries */
 	INIT_LIST_HEAD(&target->cred_dist_list);
 }
 
@@ -2201,8 +2502,8 @@
 	target->msg_per_bndl_max = min(target->max_scat_entries,
 				       target->msg_per_bndl_max);
 
-	ath6kl_dbg(ATH6KL_DBG_TRC,
-		   "htc bundling allowed. max msg per htc bundle: %d\n",
+	ath6kl_dbg(ATH6KL_DBG_BOOT,
+		   "htc bundling allowed msg_per_bndl_max %d\n",
 		   target->msg_per_bndl_max);
 
 	/* Max rx bundle size is limited by the max tx bundle size */
@@ -2211,7 +2512,7 @@
 	target->max_tx_bndl_sz = min(HIF_MBOX0_EXT_WIDTH,
 				     target->max_xfer_szper_scatreq);
 
-	ath6kl_dbg(ATH6KL_DBG_ANY, "max recv: %d max send: %d\n",
+	ath6kl_dbg(ATH6KL_DBG_BOOT, "htc max_rx_bndl_sz %d max_tx_bndl_sz %d\n",
 		   target->max_rx_bndl_sz, target->max_tx_bndl_sz);
 
 	if (target->max_tx_bndl_sz)
@@ -2265,8 +2566,8 @@
 	target->tgt_creds = le16_to_cpu(rdy_msg->ver2_0_info.cred_cnt);
 	target->tgt_cred_sz = le16_to_cpu(rdy_msg->ver2_0_info.cred_sz);
 
-	ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
-		   "target ready: credits: %d credit size: %d\n",
+	ath6kl_dbg(ATH6KL_DBG_BOOT,
+		   "htc target ready credits %d size %d\n",
 		   target->tgt_creds, target->tgt_cred_sz);
 
 	/* check if this is an extended ready message */
@@ -2280,7 +2581,7 @@
 		target->msg_per_bndl_max = 0;
 	}
 
-	ath6kl_dbg(ATH6KL_DBG_TRC, "using htc protocol version : %s (%d)\n",
+	ath6kl_dbg(ATH6KL_DBG_BOOT, "htc using protocol %s (%d)\n",
 		  (target->htc_tgt_ver == HTC_VERSION_2P0) ? "2.0" : ">= 2.1",
 		  target->htc_tgt_ver);
 
@@ -2300,6 +2601,10 @@
 	status = ath6kl_htc_conn_service((void *)target, &connect, &resp);
 
 	if (status)
+		/*
+		 * FIXME: this call doesn't make sense, the caller should
+		 * call ath6kl_htc_cleanup() when it wants remove htc
+		 */
 		ath6kl_hif_cleanup_scatter(target->dev->ar);
 
 fail_wait_target:
@@ -2320,8 +2625,11 @@
 	struct htc_packet *packet;
 	int status;
 
+	memset(&target->dev->irq_proc_reg, 0,
+	       sizeof(target->dev->irq_proc_reg));
+
 	/* Disable interrupts at the chip level */
-	ath6kldev_disable_intrs(target->dev);
+	ath6kl_hif_disable_intrs(target->dev);
 
 	target->htc_flags = 0;
 	target->rx_st_flags = 0;
@@ -2334,8 +2642,8 @@
 	}
 
 	/* NOTE: the first entry in the distribution list is ENDPOINT_0 */
-	ath6k_credit_init(target->cred_dist_cntxt, &target->cred_dist_list,
-			  target->tgt_creds);
+	ath6kl_credit_init(target->credit_info, &target->cred_dist_list,
+			   target->tgt_creds);
 
 	dump_cred_dist_stats(target);
 
@@ -2346,7 +2654,7 @@
 		return status;
 
 	/* unmask interrupts */
-	status = ath6kldev_unmask_intrs(target->dev);
+	status = ath6kl_hif_unmask_intrs(target->dev);
 
 	if (status)
 		ath6kl_htc_stop(target);
@@ -2354,6 +2662,44 @@
 	return status;
 }
 
+static int ath6kl_htc_reset(struct htc_target *target)
+{
+	u32 block_size, ctrl_bufsz;
+	struct htc_packet *packet;
+	int i;
+
+	reset_ep_state(target);
+
+	block_size = target->dev->ar->mbox_info.block_size;
+
+	ctrl_bufsz = (block_size > HTC_MAX_CTRL_MSG_LEN) ?
+		      (block_size + HTC_HDR_LENGTH) :
+		      (HTC_MAX_CTRL_MSG_LEN + HTC_HDR_LENGTH);
+
+	for (i = 0; i < NUM_CONTROL_BUFFERS; i++) {
+		packet = kzalloc(sizeof(*packet), GFP_KERNEL);
+		if (!packet)
+			return -ENOMEM;
+
+		packet->buf_start = kzalloc(ctrl_bufsz, GFP_KERNEL);
+		if (!packet->buf_start) {
+			kfree(packet);
+			return -ENOMEM;
+		}
+
+		packet->buf_len = ctrl_bufsz;
+		if (i < NUM_CONTROL_RX_BUFFERS) {
+			packet->act_len = 0;
+			packet->buf = packet->buf_start;
+			packet->endpoint = ENDPOINT_0;
+			list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
+		} else
+			list_add_tail(&packet->list, &target->free_ctrl_txbuf);
+	}
+
+	return 0;
+}
+
 /* htc_stop: stop interrupt reception, and flush all queued buffers */
 void ath6kl_htc_stop(struct htc_target *target)
 {
@@ -2366,21 +2712,19 @@
 	 * function returns all pending HIF I/O has completed, we can
 	 * safely flush the queues.
 	 */
-	ath6kldev_mask_intrs(target->dev);
+	ath6kl_hif_mask_intrs(target->dev);
 
 	ath6kl_htc_flush_txep_all(target);
 
 	ath6kl_htc_flush_rx_buf(target);
 
-	reset_ep_state(target);
+	ath6kl_htc_reset(target);
 }
 
 void *ath6kl_htc_create(struct ath6kl *ar)
 {
 	struct htc_target *target = NULL;
-	struct htc_packet *packet;
-	int status = 0, i = 0;
-	u32 block_size, ctrl_bufsz;
+	int status = 0;
 
 	target = kzalloc(sizeof(*target), GFP_KERNEL);
 	if (!target) {
@@ -2392,7 +2736,7 @@
 	if (!target->dev) {
 		ath6kl_err("unable to allocate memory\n");
 		status = -ENOMEM;
-		goto fail_create_htc;
+		goto err_htc_cleanup;
 	}
 
 	spin_lock_init(&target->htc_lock);
@@ -2407,49 +2751,20 @@
 	target->dev->htc_cnxt = target;
 	target->ep_waiting = ENDPOINT_MAX;
 
-	reset_ep_state(target);
-
-	status = ath6kldev_setup(target->dev);
-
+	status = ath6kl_hif_setup(target->dev);
 	if (status)
-		goto fail_create_htc;
+		goto err_htc_cleanup;
 
-	block_size = ar->mbox_info.block_size;
-
-	ctrl_bufsz = (block_size > HTC_MAX_CTRL_MSG_LEN) ?
-		      (block_size + HTC_HDR_LENGTH) :
-		      (HTC_MAX_CTRL_MSG_LEN + HTC_HDR_LENGTH);
-
-	for (i = 0; i < NUM_CONTROL_BUFFERS; i++) {
-		packet = kzalloc(sizeof(*packet), GFP_KERNEL);
-		if (!packet)
-			break;
-
-		packet->buf_start = kzalloc(ctrl_bufsz, GFP_KERNEL);
-		if (!packet->buf_start) {
-			kfree(packet);
-			break;
-		}
-
-		packet->buf_len = ctrl_bufsz;
-		if (i < NUM_CONTROL_RX_BUFFERS) {
-			packet->act_len = 0;
-			packet->buf = packet->buf_start;
-			packet->endpoint = ENDPOINT_0;
-			list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
-		} else
-			list_add_tail(&packet->list, &target->free_ctrl_txbuf);
-	}
-
-fail_create_htc:
-	if (i != NUM_CONTROL_BUFFERS || status) {
-		if (target) {
-			ath6kl_htc_cleanup(target);
-			target = NULL;
-		}
-	}
+	status = ath6kl_htc_reset(target);
+	if (status)
+		goto err_htc_cleanup;
 
 	return target;
+
+err_htc_cleanup:
+	ath6kl_htc_cleanup(target);
+
+	return NULL;
 }
 
 /* cleanup the HTC instance */
diff --git a/drivers/net/wireless/ath/ath6kl/htc.h b/drivers/net/wireless/ath/ath6kl/htc.h
index 8ce0c2c..57672e1 100644
--- a/drivers/net/wireless/ath/ath6kl/htc.h
+++ b/drivers/net/wireless/ath/ath6kl/htc.h
@@ -393,7 +393,7 @@
 	int cred_per_msg;
 
 	/* reserved for HTC use */
-	void *htc_rsvd;
+	struct htc_endpoint *htc_ep;
 
 	/*
 	 * current depth of TX queue , i.e. messages waiting for credits
@@ -414,9 +414,11 @@
 	HTC_CREDIT_DIST_SEEK_CREDITS,
 };
 
-struct htc_credit_state_info {
+struct ath6kl_htc_credit_info {
 	int total_avail_credits;
 	int cur_free_credits;
+
+	/* list of lowest priority endpoints */
 	struct list_head lowestpri_ep_dist;
 };
 
@@ -508,10 +510,13 @@
 /* our HTC target state */
 struct htc_target {
 	struct htc_endpoint endpoint[ENDPOINT_MAX];
+
+	/* contains struct htc_endpoint_credit_dist */
 	struct list_head cred_dist_list;
+
 	struct list_head free_ctrl_txbuf;
 	struct list_head free_ctrl_rxbuf;
-	struct htc_credit_state_info *cred_dist_cntxt;
+	struct ath6kl_htc_credit_info *credit_info;
 	int tgt_creds;
 	unsigned int tgt_cred_sz;
 	spinlock_t htc_lock;
@@ -542,7 +547,7 @@
 
 void *ath6kl_htc_create(struct ath6kl *ar);
 void ath6kl_htc_set_credit_dist(struct htc_target *target,
-				struct htc_credit_state_info *cred_info,
+				struct ath6kl_htc_credit_info *cred_info,
 				u16 svc_pri_order[], int len);
 int ath6kl_htc_wait_target(struct htc_target *target);
 int ath6kl_htc_start(struct htc_target *target);
@@ -563,7 +568,10 @@
 int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target,
 				  struct list_head *pktq);
 int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
-				     u32 msg_look_ahead[], int *n_pkts);
+				     u32 msg_look_ahead, int *n_pkts);
+
+int ath6kl_credit_setup(void *htc_handle,
+			struct ath6kl_htc_credit_info *cred_info);
 
 static inline void set_htc_pkt_info(struct htc_packet *packet, void *context,
 				    u8 *buf, unsigned int len,
diff --git a/drivers/net/wireless/ath/ath6kl/htc_hif.h b/drivers/net/wireless/ath/ath6kl/htc_hif.h
deleted file mode 100644
index 171ad63..0000000
--- a/drivers/net/wireless/ath/ath6kl/htc_hif.h
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Copyright (c) 2007-2011 Atheros Communications Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef HTC_HIF_H
-#define HTC_HIF_H
-
-#include "htc.h"
-#include "hif.h"
-
-#define ATH6KL_MAILBOXES	4
-
-/* HTC runs over mailbox 0 */
-#define HTC_MAILBOX	0
-
-#define ATH6KL_TARGET_DEBUG_INTR_MASK     0x01
-
-#define OTHER_INTS_ENABLED		(INT_STATUS_ENABLE_ERROR_MASK |	\
-					INT_STATUS_ENABLE_CPU_MASK   |	\
-					INT_STATUS_ENABLE_COUNTER_MASK)
-
-#define ATH6KL_REG_IO_BUFFER_SIZE			32
-#define ATH6KL_MAX_REG_IO_BUFFERS			8
-#define ATH6KL_SCATTER_ENTRIES_PER_REQ            16
-#define ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER      (16 * 1024)
-#define ATH6KL_SCATTER_REQS                       4
-
-#ifndef A_CACHE_LINE_PAD
-#define A_CACHE_LINE_PAD                        128
-#endif
-#define ATH6KL_MIN_SCATTER_ENTRIES_PER_REQ        2
-#define ATH6KL_MIN_TRANSFER_SIZE_PER_SCATTER      (4 * 1024)
-
-struct ath6kl_irq_proc_registers {
-	u8 host_int_status;
-	u8 cpu_int_status;
-	u8 error_int_status;
-	u8 counter_int_status;
-	u8 mbox_frame;
-	u8 rx_lkahd_valid;
-	u8 host_int_status2;
-	u8 gmbox_rx_avail;
-	__le32 rx_lkahd[2];
-	__le32 rx_gmbox_lkahd_alias[2];
-} __packed;
-
-struct ath6kl_irq_enable_reg {
-	u8 int_status_en;
-	u8 cpu_int_status_en;
-	u8 err_int_status_en;
-	u8 cntr_int_status_en;
-} __packed;
-
-struct ath6kl_device {
-	spinlock_t lock;
-	u8 pad1[A_CACHE_LINE_PAD];
-	struct ath6kl_irq_proc_registers irq_proc_reg;
-	u8 pad2[A_CACHE_LINE_PAD];
-	struct ath6kl_irq_enable_reg irq_en_reg;
-	u8 pad3[A_CACHE_LINE_PAD];
-	struct htc_target *htc_cnxt;
-	struct ath6kl *ar;
-};
-
-int ath6kldev_setup(struct ath6kl_device *dev);
-int ath6kldev_unmask_intrs(struct ath6kl_device *dev);
-int ath6kldev_mask_intrs(struct ath6kl_device *dev);
-int ath6kldev_poll_mboxmsg_rx(struct ath6kl_device *dev,
-			      u32 *lk_ahd, int timeout);
-int ath6kldev_rx_control(struct ath6kl_device *dev, bool enable_rx);
-int ath6kldev_disable_intrs(struct ath6kl_device *dev);
-
-int ath6kldev_rw_comp_handler(void *context, int status);
-int ath6kldev_intr_bh_handler(struct ath6kl *ar);
-
-/* Scatter Function and Definitions */
-int ath6kldev_submit_scat_req(struct ath6kl_device *dev,
-			    struct hif_scatter_req *scat_req, bool read);
-
-#endif /*ATH6KL_H_ */
diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c
index c1d2366..7f55be3 100644
--- a/drivers/net/wireless/ath/ath6kl/init.c
+++ b/drivers/net/wireless/ath/ath6kl/init.c
@@ -16,6 +16,7 @@
  */
 
 #include <linux/moduleparam.h>
+#include <linux/errno.h>
 #include <linux/of.h>
 #include <linux/mmc/sdio_func.h>
 #include "core.h"
@@ -26,9 +27,85 @@
 
 unsigned int debug_mask;
 static unsigned int testmode;
+static bool suspend_cutpower;
 
 module_param(debug_mask, uint, 0644);
 module_param(testmode, uint, 0644);
+module_param(suspend_cutpower, bool, 0444);
+
+static const struct ath6kl_hw hw_list[] = {
+	{
+		.id				= AR6003_HW_2_0_VERSION,
+		.name				= "ar6003 hw 2.0",
+		.dataset_patch_addr		= 0x57e884,
+		.app_load_addr			= 0x543180,
+		.board_ext_data_addr		= 0x57e500,
+		.reserved_ram_size		= 6912,
+		.refclk_hz			= 26000000,
+		.uarttx_pin			= 8,
+
+		/* hw2.0 needs override address hardcoded */
+		.app_start_override_addr	= 0x944C00,
+
+		.fw_otp			= AR6003_HW_2_0_OTP_FILE,
+		.fw			= AR6003_HW_2_0_FIRMWARE_FILE,
+		.fw_tcmd		= AR6003_HW_2_0_TCMD_FIRMWARE_FILE,
+		.fw_patch		= AR6003_HW_2_0_PATCH_FILE,
+		.fw_api2		= AR6003_HW_2_0_FIRMWARE_2_FILE,
+		.fw_board		= AR6003_HW_2_0_BOARD_DATA_FILE,
+		.fw_default_board	= AR6003_HW_2_0_DEFAULT_BOARD_DATA_FILE,
+	},
+	{
+		.id				= AR6003_HW_2_1_1_VERSION,
+		.name				= "ar6003 hw 2.1.1",
+		.dataset_patch_addr		= 0x57ff74,
+		.app_load_addr			= 0x1234,
+		.board_ext_data_addr		= 0x542330,
+		.reserved_ram_size		= 512,
+		.refclk_hz			= 26000000,
+		.uarttx_pin			= 8,
+
+		.fw_otp			= AR6003_HW_2_1_1_OTP_FILE,
+		.fw			= AR6003_HW_2_1_1_FIRMWARE_FILE,
+		.fw_tcmd		= AR6003_HW_2_1_1_TCMD_FIRMWARE_FILE,
+		.fw_patch		= AR6003_HW_2_1_1_PATCH_FILE,
+		.fw_api2		= AR6003_HW_2_1_1_FIRMWARE_2_FILE,
+		.fw_board		= AR6003_HW_2_1_1_BOARD_DATA_FILE,
+		.fw_default_board	= AR6003_HW_2_1_1_DEFAULT_BOARD_DATA_FILE,
+	},
+	{
+		.id				= AR6004_HW_1_0_VERSION,
+		.name				= "ar6004 hw 1.0",
+		.dataset_patch_addr		= 0x57e884,
+		.app_load_addr			= 0x1234,
+		.board_ext_data_addr		= 0x437000,
+		.reserved_ram_size		= 19456,
+		.board_addr			= 0x433900,
+		.refclk_hz			= 26000000,
+		.uarttx_pin			= 11,
+
+		.fw			= AR6004_HW_1_0_FIRMWARE_FILE,
+		.fw_api2		= AR6004_HW_1_0_FIRMWARE_2_FILE,
+		.fw_board		= AR6004_HW_1_0_BOARD_DATA_FILE,
+		.fw_default_board	= AR6004_HW_1_0_DEFAULT_BOARD_DATA_FILE,
+	},
+	{
+		.id				= AR6004_HW_1_1_VERSION,
+		.name				= "ar6004 hw 1.1",
+		.dataset_patch_addr		= 0x57e884,
+		.app_load_addr			= 0x1234,
+		.board_ext_data_addr		= 0x437000,
+		.reserved_ram_size		= 11264,
+		.board_addr			= 0x43d400,
+		.refclk_hz			= 40000000,
+		.uarttx_pin			= 11,
+
+		.fw			= AR6004_HW_1_1_FIRMWARE_FILE,
+		.fw_api2		= AR6004_HW_1_1_FIRMWARE_2_FILE,
+		.fw_board		= AR6004_HW_1_1_BOARD_DATA_FILE,
+		.fw_default_board	= AR6004_HW_1_1_DEFAULT_BOARD_DATA_FILE,
+	},
+};
 
 /*
  * Include definitions here that can be used to tune the WLAN module
@@ -55,7 +132,6 @@
  */
 #define WLAN_CONFIG_DISCONNECT_TIMEOUT 10
 
-#define CONFIG_AR600x_DEBUG_UART_TX_PIN 8
 
 #define ATH6KL_DATA_OFFSET    64
 struct sk_buff *ath6kl_buf_alloc(int size)
@@ -73,37 +149,21 @@
 	return skb;
 }
 
-void ath6kl_init_profile_info(struct ath6kl *ar)
+void ath6kl_init_profile_info(struct ath6kl_vif *vif)
 {
-	ar->ssid_len = 0;
-	memset(ar->ssid, 0, sizeof(ar->ssid));
+	vif->ssid_len = 0;
+	memset(vif->ssid, 0, sizeof(vif->ssid));
 
-	ar->dot11_auth_mode = OPEN_AUTH;
-	ar->auth_mode = NONE_AUTH;
-	ar->prwise_crypto = NONE_CRYPT;
-	ar->prwise_crypto_len = 0;
-	ar->grp_crypto = NONE_CRYPT;
-	ar->grp_crypto_len = 0;
-	memset(ar->wep_key_list, 0, sizeof(ar->wep_key_list));
-	memset(ar->req_bssid, 0, sizeof(ar->req_bssid));
-	memset(ar->bssid, 0, sizeof(ar->bssid));
-	ar->bss_ch = 0;
-	ar->nw_type = ar->next_mode = INFRA_NETWORK;
-}
-
-static u8 ath6kl_get_fw_iftype(struct ath6kl *ar)
-{
-	switch (ar->nw_type) {
-	case INFRA_NETWORK:
-		return HI_OPTION_FW_MODE_BSS_STA;
-	case ADHOC_NETWORK:
-		return HI_OPTION_FW_MODE_IBSS;
-	case AP_NETWORK:
-		return HI_OPTION_FW_MODE_AP;
-	default:
-		ath6kl_err("Unsupported interface type :%d\n", ar->nw_type);
-		return 0xff;
-	}
+	vif->dot11_auth_mode = OPEN_AUTH;
+	vif->auth_mode = NONE_AUTH;
+	vif->prwise_crypto = NONE_CRYPT;
+	vif->prwise_crypto_len = 0;
+	vif->grp_crypto = NONE_CRYPT;
+	vif->grp_crypto_len = 0;
+	memset(vif->wep_key_list, 0, sizeof(vif->wep_key_list));
+	memset(vif->req_bssid, 0, sizeof(vif->req_bssid));
+	memset(vif->bssid, 0, sizeof(vif->bssid));
+	vif->bss_ch = 0;
 }
 
 static int ath6kl_set_host_app_area(struct ath6kl *ar)
@@ -120,7 +180,7 @@
 		return -EIO;
 
 	address = TARG_VTOP(ar->target_type, data);
-	host_app_area.wmi_protocol_ver = WMI_PROTOCOL_VERSION;
+	host_app_area.wmi_protocol_ver = cpu_to_le32(WMI_PROTOCOL_VERSION);
 	if (ath6kl_diag_write(ar, address, (u8 *) &host_app_area,
 			      sizeof(struct host_app_area)))
 		return -EIO;
@@ -258,40 +318,12 @@
 	return 0;
 }
 
-static void ath6kl_init_control_info(struct ath6kl *ar)
+void ath6kl_init_control_info(struct ath6kl_vif *vif)
 {
-	u8 ctr;
-
-	clear_bit(WMI_ENABLED, &ar->flag);
-	ath6kl_init_profile_info(ar);
-	ar->def_txkey_index = 0;
-	memset(ar->wep_key_list, 0, sizeof(ar->wep_key_list));
-	ar->ch_hint = 0;
-	ar->listen_intvl_t = A_DEFAULT_LISTEN_INTERVAL;
-	ar->listen_intvl_b = 0;
-	ar->tx_pwr = 0;
-	clear_bit(SKIP_SCAN, &ar->flag);
-	set_bit(WMM_ENABLED, &ar->flag);
-	ar->intra_bss = 1;
-	memset(&ar->sc_params, 0, sizeof(ar->sc_params));
-	ar->sc_params.short_scan_ratio = WMI_SHORTSCANRATIO_DEFAULT;
-	ar->sc_params.scan_ctrl_flags = DEFAULT_SCAN_CTRL_FLAGS;
-	ar->lrssi_roam_threshold = DEF_LRSSI_ROAM_THRESHOLD;
-
-	memset((u8 *)ar->sta_list, 0,
-	       AP_MAX_NUM_STA * sizeof(struct ath6kl_sta));
-
-	spin_lock_init(&ar->mcastpsq_lock);
-
-	/* Init the PS queues */
-	for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) {
-		spin_lock_init(&ar->sta_list[ctr].psq_lock);
-		skb_queue_head_init(&ar->sta_list[ctr].psq);
-	}
-
-	skb_queue_head_init(&ar->mcastpsq);
-
-	memcpy(ar->ap_country_code, DEF_AP_COUNTRY_CODE, 3);
+	ath6kl_init_profile_info(vif);
+	vif->def_txkey_index = 0;
+	memset(vif->wep_key_list, 0, sizeof(vif->wep_key_list));
+	vif->ch_hint = 0;
 }
 
 /*
@@ -341,62 +373,7 @@
 	return status;
 }
 
-#define REG_DUMP_COUNT_AR6003   60
-#define REGISTER_DUMP_LEN_MAX   60
-
-static void ath6kl_dump_target_assert_info(struct ath6kl *ar)
-{
-	u32 address;
-	u32 regdump_loc = 0;
-	int status;
-	u32 regdump_val[REGISTER_DUMP_LEN_MAX];
-	u32 i;
-
-	if (ar->target_type != TARGET_TYPE_AR6003)
-		return;
-
-	/* the reg dump pointer is copied to the host interest area */
-	address = ath6kl_get_hi_item_addr(ar, HI_ITEM(hi_failure_state));
-	address = TARG_VTOP(ar->target_type, address);
-
-	/* read RAM location through diagnostic window */
-	status = ath6kl_diag_read32(ar, address, &regdump_loc);
-
-	if (status || !regdump_loc) {
-		ath6kl_err("failed to get ptr to register dump area\n");
-		return;
-	}
-
-	ath6kl_dbg(ATH6KL_DBG_TRC, "location of register dump data: 0x%X\n",
-		regdump_loc);
-	regdump_loc = TARG_VTOP(ar->target_type, regdump_loc);
-
-	/* fetch register dump data */
-	status = ath6kl_diag_read(ar, regdump_loc, (u8 *)&regdump_val[0],
-				  REG_DUMP_COUNT_AR6003 * (sizeof(u32)));
-
-	if (status) {
-		ath6kl_err("failed to get register dump\n");
-		return;
-	}
-	ath6kl_dbg(ATH6KL_DBG_TRC, "Register Dump:\n");
-
-	for (i = 0; i < REG_DUMP_COUNT_AR6003; i++)
-		ath6kl_dbg(ATH6KL_DBG_TRC, " %d :  0x%8.8X\n",
-			   i, regdump_val[i]);
-
-}
-
-void ath6kl_target_failure(struct ath6kl *ar)
-{
-	ath6kl_err("target asserted\n");
-
-	/* try dumping target assertion information (if any) */
-	ath6kl_dump_target_assert_info(ar);
-
-}
-
-static int ath6kl_target_config_wlan_params(struct ath6kl *ar)
+static int ath6kl_target_config_wlan_params(struct ath6kl *ar, int idx)
 {
 	int status = 0;
 	int ret;
@@ -406,46 +383,46 @@
 	 * default values. Required if checksum offload is needed. Set
 	 * RxMetaVersion to 2.
 	 */
-	if (ath6kl_wmi_set_rx_frame_format_cmd(ar->wmi,
+	if (ath6kl_wmi_set_rx_frame_format_cmd(ar->wmi, idx,
 					       ar->rx_meta_ver, 0, 0)) {
 		ath6kl_err("unable to set the rx frame format\n");
 		status = -EIO;
 	}
 
 	if (ar->conf_flags & ATH6KL_CONF_IGNORE_PS_FAIL_EVT_IN_SCAN)
-		if ((ath6kl_wmi_pmparams_cmd(ar->wmi, 0, 1, 0, 0, 1,
+		if ((ath6kl_wmi_pmparams_cmd(ar->wmi, idx, 0, 1, 0, 0, 1,
 		     IGNORE_POWER_SAVE_FAIL_EVENT_DURING_SCAN)) != 0) {
 			ath6kl_err("unable to set power save fail event policy\n");
 			status = -EIO;
 		}
 
 	if (!(ar->conf_flags & ATH6KL_CONF_IGNORE_ERP_BARKER))
-		if ((ath6kl_wmi_set_lpreamble_cmd(ar->wmi, 0,
+		if ((ath6kl_wmi_set_lpreamble_cmd(ar->wmi, idx, 0,
 		     WMI_DONOT_IGNORE_BARKER_IN_ERP)) != 0) {
 			ath6kl_err("unable to set barker preamble policy\n");
 			status = -EIO;
 		}
 
-	if (ath6kl_wmi_set_keepalive_cmd(ar->wmi,
+	if (ath6kl_wmi_set_keepalive_cmd(ar->wmi, idx,
 			WLAN_CONFIG_KEEP_ALIVE_INTERVAL)) {
 		ath6kl_err("unable to set keep alive interval\n");
 		status = -EIO;
 	}
 
-	if (ath6kl_wmi_disctimeout_cmd(ar->wmi,
+	if (ath6kl_wmi_disctimeout_cmd(ar->wmi, idx,
 			WLAN_CONFIG_DISCONNECT_TIMEOUT)) {
 		ath6kl_err("unable to set disconnect timeout\n");
 		status = -EIO;
 	}
 
 	if (!(ar->conf_flags & ATH6KL_CONF_ENABLE_TX_BURST))
-		if (ath6kl_wmi_set_wmm_txop(ar->wmi, WMI_TXOP_DISABLED)) {
+		if (ath6kl_wmi_set_wmm_txop(ar->wmi, idx, WMI_TXOP_DISABLED)) {
 			ath6kl_err("unable to set txop bursting\n");
 			status = -EIO;
 		}
 
-	if (ar->p2p) {
-		ret = ath6kl_wmi_info_req_cmd(ar->wmi,
+	if (ar->p2p && (ar->vif_max == 1 || idx)) {
+		ret = ath6kl_wmi_info_req_cmd(ar->wmi, idx,
 					      P2P_FLAG_CAPABILITIES_REQ |
 					      P2P_FLAG_MACADDR_REQ |
 					      P2P_FLAG_HMODEL_REQ);
@@ -453,13 +430,13 @@
 			ath6kl_dbg(ATH6KL_DBG_TRC, "failed to request P2P "
 				   "capabilities (%d) - assuming P2P not "
 				   "supported\n", ret);
-			ar->p2p = 0;
+			ar->p2p = false;
 		}
 	}
 
-	if (ar->p2p) {
+	if (ar->p2p && (ar->vif_max == 1 || idx)) {
 		/* Enable Probe Request reporting for P2P */
-		ret = ath6kl_wmi_probe_report_req_cmd(ar->wmi, true);
+		ret = ath6kl_wmi_probe_report_req_cmd(ar->wmi, idx, true);
 		if (ret) {
 			ath6kl_dbg(ATH6KL_DBG_TRC, "failed to enable Probe "
 				   "Request reporting (%d)\n", ret);
@@ -472,13 +449,40 @@
 int ath6kl_configure_target(struct ath6kl *ar)
 {
 	u32 param, ram_reserved_size;
-	u8 fw_iftype;
+	u8 fw_iftype, fw_mode = 0, fw_submode = 0;
+	int i, status;
 
-	fw_iftype = ath6kl_get_fw_iftype(ar);
-	if (fw_iftype == 0xff)
-		return -EINVAL;
+	/*
+	 * Note: Even though the firmware interface type is
+	 * chosen as BSS_STA for all three interfaces, can
+	 * be configured to IBSS/AP as long as the fw submode
+	 * remains normal mode (0 - AP, STA and IBSS). But
+	 * due to an target assert in firmware only one interface is
+	 * configured for now.
+	 */
+	fw_iftype = HI_OPTION_FW_MODE_BSS_STA;
 
-	/* Tell target which HTC version it is used*/
+	for (i = 0; i < ar->vif_max; i++)
+		fw_mode |= fw_iftype << (i * HI_OPTION_FW_MODE_BITS);
+
+	/*
+	 * By default, submodes :
+	 *		vif[0] - AP/STA/IBSS
+	 *		vif[1] - "P2P dev"/"P2P GO"/"P2P Client"
+	 *		vif[2] - "P2P dev"/"P2P GO"/"P2P Client"
+	 */
+
+	for (i = 0; i < ar->max_norm_iface; i++)
+		fw_submode |= HI_OPTION_FW_SUBMODE_NONE <<
+			      (i * HI_OPTION_FW_SUBMODE_BITS);
+
+	for (i = ar->max_norm_iface; i < ar->vif_max; i++)
+		fw_submode |= HI_OPTION_FW_SUBMODE_P2PDEV <<
+			      (i * HI_OPTION_FW_SUBMODE_BITS);
+
+	if (ar->p2p && ar->vif_max == 1)
+		fw_submode = HI_OPTION_FW_SUBMODE_P2PDEV;
+
 	param = HTC_PROTOCOL_VERSION;
 	if (ath6kl_bmi_write(ar,
 			     ath6kl_get_hi_item_addr(ar,
@@ -499,12 +503,10 @@
 		return -EIO;
 	}
 
-	param |= (1 << HI_OPTION_NUM_DEV_SHIFT);
-	param |= (fw_iftype << HI_OPTION_FW_MODE_SHIFT);
-	if (ar->p2p && fw_iftype == HI_OPTION_FW_MODE_BSS_STA) {
-		param |= HI_OPTION_FW_SUBMODE_P2PDEV <<
-			HI_OPTION_FW_SUBMODE_SHIFT;
-	}
+	param |= (ar->vif_max << HI_OPTION_NUM_DEV_SHIFT);
+	param |= fw_mode << HI_OPTION_FW_MODE_SHIFT;
+	param |= fw_submode << HI_OPTION_FW_SUBMODE_SHIFT;
+
 	param |= (0 << HI_OPTION_MAC_ADDR_METHOD_SHIFT);
 	param |= (0 << HI_OPTION_FW_BRIDGE_SHIFT);
 
@@ -550,71 +552,55 @@
 		/* use default number of control buffers */
 		return -EIO;
 
+	/* Configure GPIO AR600x UART */
+	param = ar->hw.uarttx_pin;
+	status = ath6kl_bmi_write(ar,
+				ath6kl_get_hi_item_addr(ar,
+				HI_ITEM(hi_dbg_uart_txpin)),
+				(u8 *)&param, 4);
+	if (status)
+		return status;
+
+	/* Configure target refclk_hz */
+	param =  ar->hw.refclk_hz;
+	status = ath6kl_bmi_write(ar,
+				ath6kl_get_hi_item_addr(ar,
+				HI_ITEM(hi_refclk_hz)),
+				(u8 *)&param, 4);
+	if (status)
+		return status;
+
 	return 0;
 }
 
-struct ath6kl *ath6kl_core_alloc(struct device *sdev)
+void ath6kl_core_free(struct ath6kl *ar)
 {
-	struct net_device *dev;
-	struct ath6kl *ar;
-	struct wireless_dev *wdev;
-
-	wdev = ath6kl_cfg80211_init(sdev);
-	if (!wdev) {
-		ath6kl_err("ath6kl_cfg80211_init failed\n");
-		return NULL;
-	}
-
-	ar = wdev_priv(wdev);
-	ar->dev = sdev;
-	ar->wdev = wdev;
-	wdev->iftype = NL80211_IFTYPE_STATION;
-
-	if (ath6kl_debug_init(ar)) {
-		ath6kl_err("Failed to initialize debugfs\n");
-		ath6kl_cfg80211_deinit(ar);
-		return NULL;
-	}
-
-	dev = alloc_netdev(0, "wlan%d", ether_setup);
-	if (!dev) {
-		ath6kl_err("no memory for network device instance\n");
-		ath6kl_cfg80211_deinit(ar);
-		return NULL;
-	}
-
-	dev->ieee80211_ptr = wdev;
-	SET_NETDEV_DEV(dev, wiphy_dev(wdev->wiphy));
-	wdev->netdev = dev;
-	ar->sme_state = SME_DISCONNECTED;
-
-	init_netdev(dev);
-
-	ar->net_dev = dev;
-	set_bit(WLAN_ENABLED, &ar->flag);
-
-	ar->wlan_pwr_state = WLAN_POWER_STATE_ON;
-
-	spin_lock_init(&ar->lock);
-
-	ath6kl_init_control_info(ar);
-	init_waitqueue_head(&ar->event_wq);
-	sema_init(&ar->sem, 1);
-	clear_bit(DESTROY_IN_PROGRESS, &ar->flag);
-
-	INIT_LIST_HEAD(&ar->amsdu_rx_buffer_queue);
-
-	setup_timer(&ar->disconnect_timer, disconnect_timer_handler,
-		    (unsigned long) dev);
-
-	return ar;
+	wiphy_free(ar->wiphy);
 }
 
-int ath6kl_unavail_ev(struct ath6kl *ar)
+void ath6kl_core_cleanup(struct ath6kl *ar)
 {
-	ath6kl_destroy(ar->net_dev, 1);
+	ath6kl_hif_power_off(ar);
 
-	return 0;
+	destroy_workqueue(ar->ath6kl_wq);
+
+	if (ar->htc_target)
+		ath6kl_htc_cleanup(ar->htc_target);
+
+	ath6kl_cookie_cleanup(ar);
+
+	ath6kl_cleanup_amsdu_rxbufs(ar);
+
+	ath6kl_bmi_cleanup(ar);
+
+	ath6kl_debug_cleanup(ar);
+
+	kfree(ar->fw_board);
+	kfree(ar->fw_otp);
+	kfree(ar->fw);
+	kfree(ar->fw_patch);
+
+	ath6kl_deinit_ieee80211_hw(ar);
 }
 
 /* firmware upload */
@@ -643,11 +629,11 @@
 static const char *get_target_ver_dir(const struct ath6kl *ar)
 {
 	switch (ar->version.target_ver) {
-	case AR6003_REV1_VERSION:
+	case AR6003_HW_1_0_VERSION:
 		return "ath6k/AR6003/hw1.0";
-	case AR6003_REV2_VERSION:
+	case AR6003_HW_2_0_VERSION:
 		return "ath6k/AR6003/hw2.0";
-	case AR6003_REV3_VERSION:
+	case AR6003_HW_2_1_1_VERSION:
 		return "ath6k/AR6003/hw2.1.1";
 	}
 	ath6kl_warn("%s: unsupported target version 0x%x.\n", __func__,
@@ -705,17 +691,10 @@
 	if (ar->fw_board != NULL)
 		return 0;
 
-	switch (ar->version.target_ver) {
-	case AR6003_REV2_VERSION:
-		filename = AR6003_REV2_BOARD_DATA_FILE;
-		break;
-	case AR6004_REV1_VERSION:
-		filename = AR6004_REV1_BOARD_DATA_FILE;
-		break;
-	default:
-		filename = AR6003_REV3_BOARD_DATA_FILE;
-		break;
-	}
+	if (WARN_ON(ar->hw.fw_board == NULL))
+		return -EINVAL;
+
+	filename = ar->hw.fw_board;
 
 	ret = ath6kl_get_fw(ar, filename, &ar->fw_board,
 			    &ar->fw_board_len);
@@ -733,17 +712,7 @@
 	ath6kl_warn("Failed to get board file %s (%d), trying to find default board file.\n",
 		    filename, ret);
 
-	switch (ar->version.target_ver) {
-	case AR6003_REV2_VERSION:
-		filename = AR6003_REV2_DEFAULT_BOARD_DATA_FILE;
-		break;
-	case AR6004_REV1_VERSION:
-		filename = AR6004_REV1_DEFAULT_BOARD_DATA_FILE;
-		break;
-	default:
-		filename = AR6003_REV3_DEFAULT_BOARD_DATA_FILE;
-		break;
-	}
+	filename = ar->hw.fw_default_board;
 
 	ret = ath6kl_get_fw(ar, filename, &ar->fw_board,
 			    &ar->fw_board_len);
@@ -767,19 +736,14 @@
 	if (ar->fw_otp != NULL)
 		return 0;
 
-	switch (ar->version.target_ver) {
-	case AR6003_REV2_VERSION:
-		filename = AR6003_REV2_OTP_FILE;
-		break;
-	case AR6004_REV1_VERSION:
-		ath6kl_dbg(ATH6KL_DBG_TRC, "AR6004 doesn't need OTP file\n");
+	if (ar->hw.fw_otp == NULL) {
+		ath6kl_dbg(ATH6KL_DBG_BOOT,
+			   "no OTP file configured for this hw\n");
 		return 0;
-		break;
-	default:
-		filename = AR6003_REV3_OTP_FILE;
-		break;
 	}
 
+	filename = ar->hw.fw_otp;
+
 	ret = ath6kl_get_fw(ar, filename, &ar->fw_otp,
 			    &ar->fw_otp_len);
 	if (ret) {
@@ -800,38 +764,22 @@
 		return 0;
 
 	if (testmode) {
-		switch (ar->version.target_ver) {
-		case AR6003_REV2_VERSION:
-			filename = AR6003_REV2_TCMD_FIRMWARE_FILE;
-			break;
-		case AR6003_REV3_VERSION:
-			filename = AR6003_REV3_TCMD_FIRMWARE_FILE;
-			break;
-		case AR6004_REV1_VERSION:
-			ath6kl_warn("testmode not supported with ar6004\n");
+		if (ar->hw.fw_tcmd == NULL) {
+			ath6kl_warn("testmode not supported\n");
 			return -EOPNOTSUPP;
-		default:
-			ath6kl_warn("unknown target version: 0x%x\n",
-				       ar->version.target_ver);
-			return -EINVAL;
 		}
 
+		filename = ar->hw.fw_tcmd;
+
 		set_bit(TESTMODE, &ar->flag);
 
 		goto get_fw;
 	}
 
-	switch (ar->version.target_ver) {
-	case AR6003_REV2_VERSION:
-		filename = AR6003_REV2_FIRMWARE_FILE;
-		break;
-	case AR6004_REV1_VERSION:
-		filename = AR6004_REV1_FIRMWARE_FILE;
-		break;
-	default:
-		filename = AR6003_REV3_FIRMWARE_FILE;
-		break;
-	}
+	if (WARN_ON(ar->hw.fw == NULL))
+		return -EINVAL;
+
+	filename = ar->hw.fw;
 
 get_fw:
 	ret = ath6kl_get_fw(ar, filename, &ar->fw, &ar->fw_len);
@@ -849,27 +797,20 @@
 	const char *filename;
 	int ret;
 
-	switch (ar->version.target_ver) {
-	case AR6003_REV2_VERSION:
-		filename = AR6003_REV2_PATCH_FILE;
-		break;
-	case AR6004_REV1_VERSION:
-		/* FIXME: implement for AR6004 */
+	if (ar->fw_patch != NULL)
 		return 0;
-		break;
-	default:
-		filename = AR6003_REV3_PATCH_FILE;
-		break;
-	}
 
-	if (ar->fw_patch == NULL) {
-		ret = ath6kl_get_fw(ar, filename, &ar->fw_patch,
-				    &ar->fw_patch_len);
-		if (ret) {
-			ath6kl_err("Failed to get patch file %s: %d\n",
-				   filename, ret);
-			return ret;
-		}
+	if (ar->hw.fw_patch == NULL)
+		return 0;
+
+	filename = ar->hw.fw_patch;
+
+	ret = ath6kl_get_fw(ar, filename, &ar->fw_patch,
+			    &ar->fw_patch_len);
+	if (ret) {
+		ath6kl_err("Failed to get patch file %s: %d\n",
+			   filename, ret);
+		return ret;
 	}
 
 	return 0;
@@ -904,19 +845,10 @@
 	int ret, ie_id, i, index, bit;
 	__le32 *val;
 
-	switch (ar->version.target_ver) {
-	case AR6003_REV2_VERSION:
-		filename = AR6003_REV2_FIRMWARE_2_FILE;
-		break;
-	case AR6003_REV3_VERSION:
-		filename = AR6003_REV3_FIRMWARE_2_FILE;
-		break;
-	case AR6004_REV1_VERSION:
-		filename = AR6004_REV1_FIRMWARE_2_FILE;
-		break;
-	default:
+	if (ar->hw.fw_api2 == NULL)
 		return -EOPNOTSUPP;
-	}
+
+	filename = ar->hw.fw_api2;
 
 	ret = request_firmware(&fw, filename, ar->dev);
 	if (ret)
@@ -1006,12 +938,15 @@
 				   ar->hw.reserved_ram_size);
 			break;
 		case ATH6KL_FW_IE_CAPABILITIES:
+			if (ie_len < DIV_ROUND_UP(ATH6KL_FW_CAPABILITY_MAX, 8))
+				break;
+
 			ath6kl_dbg(ATH6KL_DBG_BOOT,
 				   "found firmware capabilities ie (%zd B)\n",
 				   ie_len);
 
 			for (i = 0; i < ATH6KL_FW_CAPABILITY_MAX; i++) {
-				index = ALIGN(i, 8) / 8;
+				index = i / 8;
 				bit = i % 8;
 
 				if (data[index] & (1 << bit))
@@ -1030,9 +965,34 @@
 			ar->hw.dataset_patch_addr = le32_to_cpup(val);
 
 			ath6kl_dbg(ATH6KL_DBG_BOOT,
-				   "found patch address ie 0x%d\n",
+				   "found patch address ie 0x%x\n",
 				   ar->hw.dataset_patch_addr);
 			break;
+		case ATH6KL_FW_IE_BOARD_ADDR:
+			if (ie_len != sizeof(*val))
+				break;
+
+			val = (__le32 *) data;
+			ar->hw.board_addr = le32_to_cpup(val);
+
+			ath6kl_dbg(ATH6KL_DBG_BOOT,
+				   "found board address ie 0x%x\n",
+				   ar->hw.board_addr);
+			break;
+		case ATH6KL_FW_IE_VIF_MAX:
+			if (ie_len != sizeof(*val))
+				break;
+
+			val = (__le32 *) data;
+			ar->vif_max = min_t(unsigned int, le32_to_cpup(val),
+					    ATH6KL_VIF_MAX);
+
+			if (ar->vif_max > 1 && !ar->p2p)
+				ar->max_norm_iface = 2;
+
+			ath6kl_dbg(ATH6KL_DBG_BOOT,
+				   "found vif max ie %d\n", ar->vif_max);
+			break;
 		default:
 			ath6kl_dbg(ATH6KL_DBG_BOOT, "Unknown fw ie: %u\n",
 				   le32_to_cpup(&hdr->id));
@@ -1087,8 +1047,8 @@
 	 * For AR6004, host determine Target RAM address for
 	 * writing board data.
 	 */
-	if (ar->target_type == TARGET_TYPE_AR6004) {
-		board_address = AR6004_REV1_BOARD_DATA_ADDRESS;
+	if (ar->hw.board_addr != 0) {
+		board_address = ar->hw.board_addr;
 		ath6kl_bmi_write(ar,
 				ath6kl_get_hi_item_addr(ar,
 				HI_ITEM(hi_board_data)),
@@ -1106,7 +1066,8 @@
 			HI_ITEM(hi_board_ext_data)),
 			(u8 *) &board_ext_address, 4);
 
-	if (board_ext_address == 0) {
+	if (ar->target_type == TARGET_TYPE_AR6003 &&
+	    board_ext_address == 0) {
 		ath6kl_err("Failed to get board file target address.\n");
 		return -EINVAL;
 	}
@@ -1126,8 +1087,8 @@
 		break;
 	}
 
-	if (ar->fw_board_len == (board_data_size +
-				 board_ext_data_size)) {
+	if (board_ext_address &&
+	    ar->fw_board_len == (board_data_size + board_ext_data_size)) {
 
 		/* write extended board data */
 		ath6kl_dbg(ATH6KL_DBG_BOOT,
@@ -1182,10 +1143,11 @@
 static int ath6kl_upload_otp(struct ath6kl *ar)
 {
 	u32 address, param;
+	bool from_hw = false;
 	int ret;
 
-	if (WARN_ON(ar->fw_otp == NULL))
-		return -ENOENT;
+	if (ar->fw_otp == NULL)
+		return 0;
 
 	address = ar->hw.app_load_addr;
 
@@ -1210,15 +1172,20 @@
 		return ret;
 	}
 
-	ar->hw.app_start_override_addr = address;
+	if (ar->hw.app_start_override_addr == 0) {
+		ar->hw.app_start_override_addr = address;
+		from_hw = true;
+	}
 
-	ath6kl_dbg(ATH6KL_DBG_BOOT, "app_start_override_addr 0x%x\n",
+	ath6kl_dbg(ATH6KL_DBG_BOOT, "app_start_override_addr%s 0x%x\n",
+		   from_hw ? " (from hw)" : "",
 		   ar->hw.app_start_override_addr);
 
 	/* execute the OTP code */
-	ath6kl_dbg(ATH6KL_DBG_BOOT, "executing OTP at 0x%x\n", address);
+	ath6kl_dbg(ATH6KL_DBG_BOOT, "executing OTP at 0x%x\n",
+		   ar->hw.app_start_override_addr);
 	param = 0;
-	ath6kl_bmi_execute(ar, address, &param);
+	ath6kl_bmi_execute(ar, ar->hw.app_start_override_addr, &param);
 
 	return ret;
 }
@@ -1229,7 +1196,7 @@
 	int ret;
 
 	if (WARN_ON(ar->fw == NULL))
-		return -ENOENT;
+		return 0;
 
 	address = ar->hw.app_load_addr;
 
@@ -1259,8 +1226,8 @@
 	u32 address, param;
 	int ret;
 
-	if (WARN_ON(ar->fw_patch == NULL))
-		return -ENOENT;
+	if (ar->fw_patch == NULL)
+		return 0;
 
 	address = ar->hw.dataset_patch_addr;
 
@@ -1345,7 +1312,7 @@
 		return status;
 
 	/* WAR to avoid SDIO CRC err */
-	if (ar->version.target_ver == AR6003_REV2_VERSION) {
+	if (ar->version.target_ver == AR6003_HW_2_0_VERSION) {
 		ath6kl_err("temporary war to avoid sdio crc error\n");
 
 		param = 0x20;
@@ -1402,43 +1369,29 @@
 	if (status)
 		return status;
 
-	/* Configure GPIO AR6003 UART */
-	param = CONFIG_AR600x_DEBUG_UART_TX_PIN;
-	status = ath6kl_bmi_write(ar,
-				  ath6kl_get_hi_item_addr(ar,
-				  HI_ITEM(hi_dbg_uart_txpin)),
-				  (u8 *)&param, 4);
-
 	return status;
 }
 
 static int ath6kl_init_hw_params(struct ath6kl *ar)
 {
-	switch (ar->version.target_ver) {
-	case AR6003_REV2_VERSION:
-		ar->hw.dataset_patch_addr = AR6003_REV2_DATASET_PATCH_ADDRESS;
-		ar->hw.app_load_addr = AR6003_REV2_APP_LOAD_ADDRESS;
-		ar->hw.board_ext_data_addr = AR6003_REV2_BOARD_EXT_DATA_ADDRESS;
-		ar->hw.reserved_ram_size = AR6003_REV2_RAM_RESERVE_SIZE;
-		break;
-	case AR6003_REV3_VERSION:
-		ar->hw.dataset_patch_addr = AR6003_REV3_DATASET_PATCH_ADDRESS;
-		ar->hw.app_load_addr = 0x1234;
-		ar->hw.board_ext_data_addr = AR6003_REV3_BOARD_EXT_DATA_ADDRESS;
-		ar->hw.reserved_ram_size = AR6003_REV3_RAM_RESERVE_SIZE;
-		break;
-	case AR6004_REV1_VERSION:
-		ar->hw.dataset_patch_addr = AR6003_REV2_DATASET_PATCH_ADDRESS;
-		ar->hw.app_load_addr = AR6003_REV3_APP_LOAD_ADDRESS;
-		ar->hw.board_ext_data_addr = AR6004_REV1_BOARD_EXT_DATA_ADDRESS;
-		ar->hw.reserved_ram_size = AR6004_REV1_RAM_RESERVE_SIZE;
-		break;
-	default:
+	const struct ath6kl_hw *hw;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(hw_list); i++) {
+		hw = &hw_list[i];
+
+		if (hw->id == ar->version.target_ver)
+			break;
+	}
+
+	if (i == ARRAY_SIZE(hw_list)) {
 		ath6kl_err("Unsupported hardware version: 0x%x\n",
 			   ar->version.target_ver);
 		return -EINVAL;
 	}
 
+	ar->hw = *hw;
+
 	ath6kl_dbg(ATH6KL_DBG_BOOT,
 		   "target_ver 0x%x target_type 0x%x dataset_patch 0x%x app_load_addr 0x%x\n",
 		   ar->version.target_ver, ar->target_type,
@@ -1447,51 +1400,246 @@
 		   "app_start_override_addr 0x%x board_ext_data_addr 0x%x reserved_ram_size 0x%x",
 		   ar->hw.app_start_override_addr, ar->hw.board_ext_data_addr,
 		   ar->hw.reserved_ram_size);
+	ath6kl_dbg(ATH6KL_DBG_BOOT,
+		   "refclk_hz %d uarttx_pin %d",
+		   ar->hw.refclk_hz, ar->hw.uarttx_pin);
 
 	return 0;
 }
 
-static int ath6kl_init(struct net_device *dev)
+static const char *ath6kl_init_get_hif_name(enum ath6kl_hif_type type)
 {
-	struct ath6kl *ar = ath6kl_priv(dev);
-	int status = 0;
-	s32 timeleft;
+	switch (type) {
+	case ATH6KL_HIF_TYPE_SDIO:
+		return "sdio";
+	case ATH6KL_HIF_TYPE_USB:
+		return "usb";
+	}
 
-	if (!ar)
-		return -EIO;
+	return NULL;
+}
+
+int ath6kl_init_hw_start(struct ath6kl *ar)
+{
+	long timeleft;
+	int ret, i;
+
+	ath6kl_dbg(ATH6KL_DBG_BOOT, "hw start\n");
+
+	ret = ath6kl_hif_power_on(ar);
+	if (ret)
+		return ret;
+
+	ret = ath6kl_configure_target(ar);
+	if (ret)
+		goto err_power_off;
+
+	ret = ath6kl_init_upload(ar);
+	if (ret)
+		goto err_power_off;
 
 	/* Do we need to finish the BMI phase */
+	/* FIXME: return error from ath6kl_bmi_done() */
 	if (ath6kl_bmi_done(ar)) {
-		status = -EIO;
-		goto ath6kl_init_done;
+		ret = -EIO;
+		goto err_power_off;
 	}
 
-	/* Indicate that WMI is enabled (although not ready yet) */
-	set_bit(WMI_ENABLED, &ar->flag);
-	ar->wmi = ath6kl_wmi_init(ar);
-	if (!ar->wmi) {
-		ath6kl_err("failed to initialize wmi\n");
-		status = -EIO;
-		goto ath6kl_init_done;
-	}
-
-	ath6kl_dbg(ATH6KL_DBG_TRC, "%s: got wmi @ 0x%p.\n", __func__, ar->wmi);
-
 	/*
 	 * The reason we have to wait for the target here is that the
 	 * driver layer has to init BMI in order to set the host block
 	 * size.
 	 */
 	if (ath6kl_htc_wait_target(ar->htc_target)) {
-		status = -EIO;
-		goto err_node_cleanup;
+		ret = -EIO;
+		goto err_power_off;
 	}
 
 	if (ath6kl_init_service_ep(ar)) {
-		status = -EIO;
+		ret = -EIO;
 		goto err_cleanup_scatter;
 	}
 
+	/* setup credit distribution */
+	ath6kl_credit_setup(ar->htc_target, &ar->credit_state_info);
+
+	/* start HTC */
+	ret = ath6kl_htc_start(ar->htc_target);
+	if (ret) {
+		/* FIXME: call this */
+		ath6kl_cookie_cleanup(ar);
+		goto err_cleanup_scatter;
+	}
+
+	/* Wait for Wmi event to be ready */
+	timeleft = wait_event_interruptible_timeout(ar->event_wq,
+						    test_bit(WMI_READY,
+							     &ar->flag),
+						    WMI_TIMEOUT);
+
+	ath6kl_dbg(ATH6KL_DBG_BOOT, "firmware booted\n");
+
+
+	if (test_and_clear_bit(FIRST_BOOT, &ar->flag)) {
+		ath6kl_info("%s %s fw %s%s\n",
+			    ar->hw.name,
+			    ath6kl_init_get_hif_name(ar->hif_type),
+			    ar->wiphy->fw_version,
+			    test_bit(TESTMODE, &ar->flag) ? " testmode" : "");
+	}
+
+	if (ar->version.abi_ver != ATH6KL_ABI_VERSION) {
+		ath6kl_err("abi version mismatch: host(0x%x), target(0x%x)\n",
+			   ATH6KL_ABI_VERSION, ar->version.abi_ver);
+		ret = -EIO;
+		goto err_htc_stop;
+	}
+
+	if (!timeleft || signal_pending(current)) {
+		ath6kl_err("wmi is not ready or wait was interrupted\n");
+		ret = -EIO;
+		goto err_htc_stop;
+	}
+
+	ath6kl_dbg(ATH6KL_DBG_TRC, "%s: wmi is ready\n", __func__);
+
+	/* communicate the wmi protocol verision to the target */
+	/* FIXME: return error */
+	if ((ath6kl_set_host_app_area(ar)) != 0)
+		ath6kl_err("unable to set the host app area\n");
+
+	for (i = 0; i < ar->vif_max; i++) {
+		ret = ath6kl_target_config_wlan_params(ar, i);
+		if (ret)
+			goto err_htc_stop;
+	}
+
+	ar->state = ATH6KL_STATE_ON;
+
+	return 0;
+
+err_htc_stop:
+	ath6kl_htc_stop(ar->htc_target);
+err_cleanup_scatter:
+	ath6kl_hif_cleanup_scatter(ar);
+err_power_off:
+	ath6kl_hif_power_off(ar);
+
+	return ret;
+}
+
+int ath6kl_init_hw_stop(struct ath6kl *ar)
+{
+	int ret;
+
+	ath6kl_dbg(ATH6KL_DBG_BOOT, "hw stop\n");
+
+	ath6kl_htc_stop(ar->htc_target);
+
+	ath6kl_hif_stop(ar);
+
+	ath6kl_bmi_reset(ar);
+
+	ret = ath6kl_hif_power_off(ar);
+	if (ret)
+		ath6kl_warn("failed to power off hif: %d\n", ret);
+
+	ar->state = ATH6KL_STATE_OFF;
+
+	return 0;
+}
+
+int ath6kl_core_init(struct ath6kl *ar)
+{
+	struct ath6kl_bmi_target_info targ_info;
+	struct net_device *ndev;
+	int ret = 0, i;
+
+	ar->ath6kl_wq = create_singlethread_workqueue("ath6kl");
+	if (!ar->ath6kl_wq)
+		return -ENOMEM;
+
+	ret = ath6kl_bmi_init(ar);
+	if (ret)
+		goto err_wq;
+
+	/*
+	 * Turn on power to get hardware (target) version and leave power
+	 * on delibrately as we will boot the hardware anyway within few
+	 * seconds.
+	 */
+	ret = ath6kl_hif_power_on(ar);
+	if (ret)
+		goto err_bmi_cleanup;
+
+	ret = ath6kl_bmi_get_target_info(ar, &targ_info);
+	if (ret)
+		goto err_power_off;
+
+	ar->version.target_ver = le32_to_cpu(targ_info.version);
+	ar->target_type = le32_to_cpu(targ_info.type);
+	ar->wiphy->hw_version = le32_to_cpu(targ_info.version);
+
+	ret = ath6kl_init_hw_params(ar);
+	if (ret)
+		goto err_power_off;
+
+	ar->htc_target = ath6kl_htc_create(ar);
+
+	if (!ar->htc_target) {
+		ret = -ENOMEM;
+		goto err_power_off;
+	}
+
+	ret = ath6kl_fetch_firmwares(ar);
+	if (ret)
+		goto err_htc_cleanup;
+
+	/* FIXME: we should free all firmwares in the error cases below */
+
+	/* Indicate that WMI is enabled (although not ready yet) */
+	set_bit(WMI_ENABLED, &ar->flag);
+	ar->wmi = ath6kl_wmi_init(ar);
+	if (!ar->wmi) {
+		ath6kl_err("failed to initialize wmi\n");
+		ret = -EIO;
+		goto err_htc_cleanup;
+	}
+
+	ath6kl_dbg(ATH6KL_DBG_TRC, "%s: got wmi @ 0x%p.\n", __func__, ar->wmi);
+
+	ret = ath6kl_register_ieee80211_hw(ar);
+	if (ret)
+		goto err_node_cleanup;
+
+	ret = ath6kl_debug_init(ar);
+	if (ret) {
+		wiphy_unregister(ar->wiphy);
+		goto err_node_cleanup;
+	}
+
+	for (i = 0; i < ar->vif_max; i++)
+		ar->avail_idx_map |= BIT(i);
+
+	rtnl_lock();
+
+	/* Add an initial station interface */
+	ndev = ath6kl_interface_add(ar, "wlan%d", NL80211_IFTYPE_STATION, 0,
+				    INFRA_NETWORK);
+
+	rtnl_unlock();
+
+	if (!ndev) {
+		ath6kl_err("Failed to instantiate a network device\n");
+		ret = -ENOMEM;
+		wiphy_unregister(ar->wiphy);
+		goto err_debug_init;
+	}
+
+
+	ath6kl_dbg(ATH6KL_DBG_TRC, "%s: name=%s dev=0x%p, ar=0x%p\n",
+			__func__, ndev->name, ndev, ar);
+
 	/* setup access class priority mappings */
 	ar->ac_stream_pri_map[WMM_AC_BK] = 0; /* lowest  */
 	ar->ac_stream_pri_map[WMM_AC_BE] = 1;
@@ -1505,156 +1653,100 @@
 	/* allocate some buffers that handle larger AMSDU frames */
 	ath6kl_refill_amsdu_rxbufs(ar, ATH6KL_MAX_AMSDU_RX_BUFFERS);
 
-	/* setup credit distribution */
-	ath6k_setup_credit_dist(ar->htc_target, &ar->credit_state_info);
-
 	ath6kl_cookie_init(ar);
 
-	/* start HTC */
-	status = ath6kl_htc_start(ar->htc_target);
-
-	if (status) {
-		ath6kl_cookie_cleanup(ar);
-		goto err_rxbuf_cleanup;
-	}
-
-	/* Wait for Wmi event to be ready */
-	timeleft = wait_event_interruptible_timeout(ar->event_wq,
-						    test_bit(WMI_READY,
-							     &ar->flag),
-						    WMI_TIMEOUT);
-
-	ath6kl_dbg(ATH6KL_DBG_BOOT, "firmware booted\n");
-
-	if (ar->version.abi_ver != ATH6KL_ABI_VERSION) {
-		ath6kl_err("abi version mismatch: host(0x%x), target(0x%x)\n",
-			   ATH6KL_ABI_VERSION, ar->version.abi_ver);
-		status = -EIO;
-		goto err_htc_stop;
-	}
-
-	if (!timeleft || signal_pending(current)) {
-		ath6kl_err("wmi is not ready or wait was interrupted\n");
-		status = -EIO;
-		goto err_htc_stop;
-	}
-
-	ath6kl_dbg(ATH6KL_DBG_TRC, "%s: wmi is ready\n", __func__);
-
-	/* communicate the wmi protocol verision to the target */
-	if ((ath6kl_set_host_app_area(ar)) != 0)
-		ath6kl_err("unable to set the host app area\n");
-
 	ar->conf_flags = ATH6KL_CONF_IGNORE_ERP_BARKER |
 			 ATH6KL_CONF_ENABLE_11N | ATH6KL_CONF_ENABLE_TX_BURST;
 
-	ar->wdev->wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM;
+	if (suspend_cutpower)
+		ar->conf_flags |= ATH6KL_CONF_SUSPEND_CUTPOWER;
 
-	status = ath6kl_target_config_wlan_params(ar);
-	if (!status)
-		goto ath6kl_init_done;
+	ar->wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM |
+			    WIPHY_FLAG_HAVE_AP_SME |
+			    WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
+			    WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
 
-err_htc_stop:
-	ath6kl_htc_stop(ar->htc_target);
+	if (test_bit(ATH6KL_FW_CAPABILITY_SCHED_SCAN, ar->fw_capabilities))
+		ar->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
+
+	ar->wiphy->probe_resp_offload =
+		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
+		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
+		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P |
+		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_80211U;
+
+	set_bit(FIRST_BOOT, &ar->flag);
+
+	ret = ath6kl_init_hw_start(ar);
+	if (ret) {
+		ath6kl_err("Failed to start hardware: %d\n", ret);
+		goto err_rxbuf_cleanup;
+	}
+
+	/*
+	 * Set mac address which is received in ready event
+	 * FIXME: Move to ath6kl_interface_add()
+	 */
+	memcpy(ndev->dev_addr, ar->mac_addr, ETH_ALEN);
+
+	return ret;
+
 err_rxbuf_cleanup:
 	ath6kl_htc_flush_rx_buf(ar->htc_target);
 	ath6kl_cleanup_amsdu_rxbufs(ar);
-err_cleanup_scatter:
-	ath6kl_hif_cleanup_scatter(ar);
+	rtnl_lock();
+	ath6kl_deinit_if_data(netdev_priv(ndev));
+	rtnl_unlock();
+	wiphy_unregister(ar->wiphy);
+err_debug_init:
+	ath6kl_debug_cleanup(ar);
 err_node_cleanup:
 	ath6kl_wmi_shutdown(ar->wmi);
 	clear_bit(WMI_ENABLED, &ar->flag);
 	ar->wmi = NULL;
-
-ath6kl_init_done:
-	return status;
-}
-
-int ath6kl_core_init(struct ath6kl *ar)
-{
-	int ret = 0;
-	struct ath6kl_bmi_target_info targ_info;
-
-	ar->ath6kl_wq = create_singlethread_workqueue("ath6kl");
-	if (!ar->ath6kl_wq)
-		return -ENOMEM;
-
-	ret = ath6kl_bmi_init(ar);
-	if (ret)
-		goto err_wq;
-
-	ret = ath6kl_bmi_get_target_info(ar, &targ_info);
-	if (ret)
-		goto err_bmi_cleanup;
-
-	ar->version.target_ver = le32_to_cpu(targ_info.version);
-	ar->target_type = le32_to_cpu(targ_info.type);
-	ar->wdev->wiphy->hw_version = le32_to_cpu(targ_info.version);
-
-	ret = ath6kl_init_hw_params(ar);
-	if (ret)
-		goto err_bmi_cleanup;
-
-	ret = ath6kl_configure_target(ar);
-	if (ret)
-		goto err_bmi_cleanup;
-
-	ar->htc_target = ath6kl_htc_create(ar);
-
-	if (!ar->htc_target) {
-		ret = -ENOMEM;
-		goto err_bmi_cleanup;
-	}
-
-	ar->aggr_cntxt = aggr_init(ar->net_dev);
-	if (!ar->aggr_cntxt) {
-		ath6kl_err("failed to initialize aggr\n");
-		ret = -ENOMEM;
-		goto err_htc_cleanup;
-	}
-
-	ret = ath6kl_fetch_firmwares(ar);
-	if (ret)
-		goto err_htc_cleanup;
-
-	ret = ath6kl_init_upload(ar);
-	if (ret)
-		goto err_htc_cleanup;
-
-	ret = ath6kl_init(ar->net_dev);
-	if (ret)
-		goto err_htc_cleanup;
-
-	/* This runs the init function if registered */
-	ret = register_netdev(ar->net_dev);
-	if (ret) {
-		ath6kl_err("register_netdev failed\n");
-		ath6kl_destroy(ar->net_dev, 0);
-		return ret;
-	}
-
-	set_bit(NETDEV_REGISTERED, &ar->flag);
-
-	ath6kl_dbg(ATH6KL_DBG_TRC, "%s: name=%s dev=0x%p, ar=0x%p\n",
-			__func__, ar->net_dev->name, ar->net_dev, ar);
-
-	return ret;
-
 err_htc_cleanup:
 	ath6kl_htc_cleanup(ar->htc_target);
+err_power_off:
+	ath6kl_hif_power_off(ar);
 err_bmi_cleanup:
 	ath6kl_bmi_cleanup(ar);
 err_wq:
 	destroy_workqueue(ar->ath6kl_wq);
+
 	return ret;
 }
 
+void ath6kl_cleanup_vif(struct ath6kl_vif *vif, bool wmi_ready)
+{
+	static u8 bcast_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+	bool discon_issued;
+
+	netif_stop_queue(vif->ndev);
+
+	clear_bit(WLAN_ENABLED, &vif->flags);
+
+	if (wmi_ready) {
+		discon_issued = test_bit(CONNECTED, &vif->flags) ||
+				test_bit(CONNECT_PEND, &vif->flags);
+		ath6kl_disconnect(vif);
+		del_timer(&vif->disconnect_timer);
+
+		if (discon_issued)
+			ath6kl_disconnect_event(vif, DISCONNECT_CMD,
+						(vif->nw_type & AP_NETWORK) ?
+						bcast_mac : vif->bssid,
+						0, NULL, 0);
+	}
+
+	if (vif->scan_req) {
+		cfg80211_scan_done(vif->scan_req, true);
+		vif->scan_req = NULL;
+	}
+}
+
 void ath6kl_stop_txrx(struct ath6kl *ar)
 {
-	struct net_device *ndev = ar->net_dev;
-
-	if (!ndev)
-		return;
+	struct ath6kl_vif *vif, *tmp_vif;
 
 	set_bit(DESTROY_IN_PROGRESS, &ar->flag);
 
@@ -1663,65 +1755,44 @@
 		return;
 	}
 
-	if (ar->wlan_pwr_state != WLAN_POWER_STATE_CUT_PWR)
-		ath6kl_stop_endpoint(ndev, false, true);
+	spin_lock_bh(&ar->list_lock);
+	list_for_each_entry_safe(vif, tmp_vif, &ar->vif_list, list) {
+		list_del(&vif->list);
+		spin_unlock_bh(&ar->list_lock);
+		ath6kl_cleanup_vif(vif, test_bit(WMI_READY, &ar->flag));
+		rtnl_lock();
+		ath6kl_deinit_if_data(vif);
+		rtnl_unlock();
+		spin_lock_bh(&ar->list_lock);
+	}
+	spin_unlock_bh(&ar->list_lock);
+
+	clear_bit(WMI_READY, &ar->flag);
+
+	/*
+	 * After wmi_shudown all WMI events will be dropped. We
+	 * need to cleanup the buffers allocated in AP mode and
+	 * give disconnect notification to stack, which usually
+	 * happens in the disconnect_event. Simulate the disconnect
+	 * event by calling the function directly. Sometimes
+	 * disconnect_event will be received when the debug logs
+	 * are collected.
+	 */
+	ath6kl_wmi_shutdown(ar->wmi);
+
+	clear_bit(WMI_ENABLED, &ar->flag);
+	if (ar->htc_target) {
+		ath6kl_dbg(ATH6KL_DBG_TRC, "%s: shut down htc\n", __func__);
+		ath6kl_htc_stop(ar->htc_target);
+	}
+
+	/*
+	 * Try to reset the device if we can. The driver may have been
+	 * configure NOT to reset the target during a debug session.
+	 */
+	ath6kl_dbg(ATH6KL_DBG_TRC,
+			"attempting to reset target on instance destroy\n");
+	ath6kl_reset_device(ar, ar->target_type, true, true);
 
 	clear_bit(WLAN_ENABLED, &ar->flag);
 }
-
-/*
- * We need to differentiate between the surprise and planned removal of the
- * device because of the following consideration:
- *
- * - In case of surprise removal, the hcd already frees up the pending
- *   for the device and hence there is no need to unregister the function
- *   driver inorder to get these requests. For planned removal, the function
- *   driver has to explicitly unregister itself to have the hcd return all the
- *   pending requests before the data structures for the devices are freed up.
- *   Note that as per the current implementation, the function driver will
- *   end up releasing all the devices since there is no API to selectively
- *   release a particular device.
- *
- * - Certain commands issued to the target can be skipped for surprise
- *   removal since they will anyway not go through.
- */
-void ath6kl_destroy(struct net_device *dev, unsigned int unregister)
-{
-	struct ath6kl *ar;
-
-	if (!dev || !ath6kl_priv(dev)) {
-		ath6kl_err("failed to get device structure\n");
-		return;
-	}
-
-	ar = ath6kl_priv(dev);
-
-	destroy_workqueue(ar->ath6kl_wq);
-
-	if (ar->htc_target)
-		ath6kl_htc_cleanup(ar->htc_target);
-
-	aggr_module_destroy(ar->aggr_cntxt);
-
-	ath6kl_cookie_cleanup(ar);
-
-	ath6kl_cleanup_amsdu_rxbufs(ar);
-
-	ath6kl_bmi_cleanup(ar);
-
-	ath6kl_debug_cleanup(ar);
-
-	if (unregister && test_bit(NETDEV_REGISTERED, &ar->flag)) {
-		unregister_netdev(dev);
-		clear_bit(NETDEV_REGISTERED, &ar->flag);
-	}
-
-	free_netdev(dev);
-
-	kfree(ar->fw_board);
-	kfree(ar->fw_otp);
-	kfree(ar->fw);
-	kfree(ar->fw_patch);
-
-	ath6kl_cfg80211_deinit(ar);
-}
diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c
index 30b5a53..eea3c74 100644
--- a/drivers/net/wireless/ath/ath6kl/main.c
+++ b/drivers/net/wireless/ath/ath6kl/main.c
@@ -20,12 +20,13 @@
 #include "target.h"
 #include "debug.h"
 
-struct ath6kl_sta *ath6kl_find_sta(struct ath6kl *ar, u8 *node_addr)
+struct ath6kl_sta *ath6kl_find_sta(struct ath6kl_vif *vif, u8 *node_addr)
 {
+	struct ath6kl *ar = vif->ar;
 	struct ath6kl_sta *conn = NULL;
 	u8 i, max_conn;
 
-	max_conn = (ar->nw_type == AP_NETWORK) ? AP_MAX_NUM_STA : 0;
+	max_conn = (vif->nw_type == AP_NETWORK) ? AP_MAX_NUM_STA : 0;
 
 	for (i = 0; i < max_conn; i++) {
 		if (memcmp(node_addr, ar->sta_list[i].mac, ETH_ALEN) == 0) {
@@ -174,64 +175,6 @@
 	ar->cookie_count++;
 }
 
-/* set the window address register (using 4-byte register access ). */
-static int ath6kl_set_addrwin_reg(struct ath6kl *ar, u32 reg_addr, u32 addr)
-{
-	int status;
-	s32 i;
-	__le32 addr_val;
-
-	/*
-	 * Write bytes 1,2,3 of the register to set the upper address bytes,
-	 * the LSB is written last to initiate the access cycle
-	 */
-
-	for (i = 1; i <= 3; i++) {
-		/*
-		 * Fill the buffer with the address byte value we want to
-		 * hit 4 times. No need to worry about endianness as the
-		 * same byte is copied to all four bytes of addr_val at
-		 * any time.
-		 */
-		memset((u8 *)&addr_val, ((u8 *)&addr)[i], 4);
-
-		/*
-		 * Hit each byte of the register address with a 4-byte
-		 * write operation to the same address, this is a harmless
-		 * operation.
-		 */
-		status = hif_read_write_sync(ar, reg_addr + i, (u8 *)&addr_val,
-					     4, HIF_WR_SYNC_BYTE_FIX);
-		if (status)
-			break;
-	}
-
-	if (status) {
-		ath6kl_err("failed to write initial bytes of 0x%x to window reg: 0x%X\n",
-			   addr, reg_addr);
-		return status;
-	}
-
-	/*
-	 * Write the address register again, this time write the whole
-	 * 4-byte value. The effect here is that the LSB write causes the
-	 * cycle to start, the extra 3 byte write to bytes 1,2,3 has no
-	 * effect since we are writing the same values again
-	 */
-	addr_val = cpu_to_le32(addr);
-	status = hif_read_write_sync(ar, reg_addr,
-				     (u8 *)&(addr_val),
-				     4, HIF_WR_SYNC_BYTE_INC);
-
-	if (status) {
-		ath6kl_err("failed to write 0x%x to window reg: 0x%X\n",
-			   addr, reg_addr);
-		return status;
-	}
-
-	return 0;
-}
-
 /*
  * Read from the hardware through its diagnostic window. No cooperation
  * from the firmware is required for this.
@@ -240,14 +183,7 @@
 {
 	int ret;
 
-	/* set window register to start read cycle */
-	ret = ath6kl_set_addrwin_reg(ar, WINDOW_READ_ADDR_ADDRESS, address);
-	if (ret)
-		return ret;
-
-	/* read the data */
-	ret = hif_read_write_sync(ar, WINDOW_DATA_ADDRESS, (u8 *) value,
-				  sizeof(*value), HIF_RD_SYNC_BYTE_INC);
+	ret = ath6kl_hif_diag_read32(ar, address, value);
 	if (ret) {
 		ath6kl_warn("failed to read32 through diagnose window: %d\n",
 			    ret);
@@ -265,18 +201,15 @@
 {
 	int ret;
 
-	/* set write data */
-	ret = hif_read_write_sync(ar, WINDOW_DATA_ADDRESS, (u8 *) &value,
-				  sizeof(value), HIF_WR_SYNC_BYTE_INC);
+	ret = ath6kl_hif_diag_write32(ar, address, value);
+
 	if (ret) {
 		ath6kl_err("failed to write 0x%x during diagnose window to 0x%d\n",
 			   address, value);
 		return ret;
 	}
 
-	/* set window register, which starts the write cycle */
-	return ath6kl_set_addrwin_reg(ar, WINDOW_WRITE_ADDR_ADDRESS,
-				      address);
+	return 0;
 }
 
 int ath6kl_diag_read(struct ath6kl *ar, u32 address, void *data, u32 length)
@@ -393,8 +326,8 @@
 #define AR6003_RESET_CONTROL_ADDRESS 0x00004000
 #define AR6004_RESET_CONTROL_ADDRESS 0x00004000
 
-static void ath6kl_reset_device(struct ath6kl *ar, u32 target_type,
-				bool wait_fot_compltn, bool cold_reset)
+void ath6kl_reset_device(struct ath6kl *ar, u32 target_type,
+			 bool wait_fot_compltn, bool cold_reset)
 {
 	int status = 0;
 	u32 address;
@@ -425,102 +358,33 @@
 		ath6kl_err("failed to reset target\n");
 }
 
-void ath6kl_stop_endpoint(struct net_device *dev, bool keep_profile,
-			  bool get_dbglogs)
-{
-	struct ath6kl *ar = ath6kl_priv(dev);
-	static u8 bcast_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
-	bool discon_issued;
-
-	netif_stop_queue(dev);
-
-	/* disable the target and the interrupts associated with it */
-	if (test_bit(WMI_READY, &ar->flag)) {
-		discon_issued = (test_bit(CONNECTED, &ar->flag) ||
-				 test_bit(CONNECT_PEND, &ar->flag));
-		ath6kl_disconnect(ar);
-		if (!keep_profile)
-			ath6kl_init_profile_info(ar);
-
-		del_timer(&ar->disconnect_timer);
-
-		clear_bit(WMI_READY, &ar->flag);
-		ath6kl_wmi_shutdown(ar->wmi);
-		clear_bit(WMI_ENABLED, &ar->flag);
-		ar->wmi = NULL;
-
-		/*
-		 * After wmi_shudown all WMI events will be dropped. We
-		 * need to cleanup the buffers allocated in AP mode and
-		 * give disconnect notification to stack, which usually
-		 * happens in the disconnect_event. Simulate the disconnect
-		 * event by calling the function directly. Sometimes
-		 * disconnect_event will be received when the debug logs
-		 * are collected.
-		 */
-		if (discon_issued)
-			ath6kl_disconnect_event(ar, DISCONNECT_CMD,
-						(ar->nw_type & AP_NETWORK) ?
-						bcast_mac : ar->bssid,
-						0, NULL, 0);
-
-		ar->user_key_ctrl = 0;
-
-	} else {
-		ath6kl_dbg(ATH6KL_DBG_TRC,
-			   "%s: wmi is not ready 0x%p 0x%p\n",
-			   __func__, ar, ar->wmi);
-
-		/* Shut down WMI if we have started it */
-		if (test_bit(WMI_ENABLED, &ar->flag)) {
-			ath6kl_dbg(ATH6KL_DBG_TRC,
-				   "%s: shut down wmi\n", __func__);
-			ath6kl_wmi_shutdown(ar->wmi);
-			clear_bit(WMI_ENABLED, &ar->flag);
-			ar->wmi = NULL;
-		}
-	}
-
-	if (ar->htc_target) {
-		ath6kl_dbg(ATH6KL_DBG_TRC, "%s: shut down htc\n", __func__);
-		ath6kl_htc_stop(ar->htc_target);
-	}
-
-	/*
-	 * Try to reset the device if we can. The driver may have been
-	 * configure NOT to reset the target during a debug session.
-	 */
-	ath6kl_dbg(ATH6KL_DBG_TRC,
-		   "attempting to reset target on instance destroy\n");
-	ath6kl_reset_device(ar, ar->target_type, true, true);
-}
-
-static void ath6kl_install_static_wep_keys(struct ath6kl *ar)
+static void ath6kl_install_static_wep_keys(struct ath6kl_vif *vif)
 {
 	u8 index;
 	u8 keyusage;
 
 	for (index = WMI_MIN_KEY_INDEX; index <= WMI_MAX_KEY_INDEX; index++) {
-		if (ar->wep_key_list[index].key_len) {
+		if (vif->wep_key_list[index].key_len) {
 			keyusage = GROUP_USAGE;
-			if (index == ar->def_txkey_index)
+			if (index == vif->def_txkey_index)
 				keyusage |= TX_USAGE;
 
-			ath6kl_wmi_addkey_cmd(ar->wmi,
+			ath6kl_wmi_addkey_cmd(vif->ar->wmi, vif->fw_vif_idx,
 					      index,
 					      WEP_CRYPT,
 					      keyusage,
-					      ar->wep_key_list[index].key_len,
-					      NULL,
-					      ar->wep_key_list[index].key,
+					      vif->wep_key_list[index].key_len,
+					      NULL, 0,
+					      vif->wep_key_list[index].key,
 					      KEY_OP_INIT_VAL, NULL,
 					      NO_SYNC_WMIFLAG);
 		}
 	}
 }
 
-void ath6kl_connect_ap_mode_bss(struct ath6kl *ar, u16 channel)
+void ath6kl_connect_ap_mode_bss(struct ath6kl_vif *vif, u16 channel)
 {
+	struct ath6kl *ar = vif->ar;
 	struct ath6kl_req_key *ik;
 	int res;
 	u8 key_rsc[ATH6KL_KEY_SEQ_LEN];
@@ -529,11 +393,13 @@
 
 	ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "AP mode started on %u MHz\n", channel);
 
-	switch (ar->auth_mode) {
+	switch (vif->auth_mode) {
 	case NONE_AUTH:
-		if (ar->prwise_crypto == WEP_CRYPT)
-			ath6kl_install_static_wep_keys(ar);
-		break;
+		if (vif->prwise_crypto == WEP_CRYPT)
+			ath6kl_install_static_wep_keys(vif);
+		if (!ik->valid || ik->key_type != WAPI_CRYPT)
+			break;
+		/* for WAPI, we need to set the delayed group key, continue: */
 	case WPA_PSK_AUTH:
 	case WPA2_PSK_AUTH:
 	case (WPA_PSK_AUTH | WPA2_PSK_AUTH):
@@ -544,8 +410,9 @@
 			   "the initial group key for AP mode\n");
 		memset(key_rsc, 0, sizeof(key_rsc));
 		res = ath6kl_wmi_addkey_cmd(
-			ar->wmi, ik->key_index, ik->key_type,
-			GROUP_USAGE, ik->key_len, key_rsc, ik->key,
+			ar->wmi, vif->fw_vif_idx, ik->key_index, ik->key_type,
+			GROUP_USAGE, ik->key_len, key_rsc, ATH6KL_KEY_SEQ_LEN,
+			ik->key,
 			KEY_OP_INIT_VAL, NULL, SYNC_BOTH_WMIFLAG);
 		if (res) {
 			ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "Delayed "
@@ -554,15 +421,16 @@
 		break;
 	}
 
-	ath6kl_wmi_bssfilter_cmd(ar->wmi, NONE_BSS_FILTER, 0);
-	set_bit(CONNECTED, &ar->flag);
-	netif_carrier_on(ar->net_dev);
+	ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx, NONE_BSS_FILTER, 0);
+	set_bit(CONNECTED, &vif->flags);
+	netif_carrier_on(vif->ndev);
 }
 
-void ath6kl_connect_ap_mode_sta(struct ath6kl *ar, u16 aid, u8 *mac_addr,
+void ath6kl_connect_ap_mode_sta(struct ath6kl_vif *vif, u16 aid, u8 *mac_addr,
 				u8 keymgmt, u8 ucipher, u8 auth,
 				u8 assoc_req_len, u8 *assoc_info)
 {
+	struct ath6kl *ar = vif->ar;
 	u8 *ies = NULL, *wpa_ie = NULL, *pos;
 	size_t ies_len = 0;
 	struct station_info sinfo;
@@ -600,6 +468,18 @@
 				wpa_ie = pos; /* WPS IE */
 				break; /* overrides WPA/RSN IE */
 			}
+		} else if (pos[0] == 0x44 && wpa_ie == NULL) {
+			/*
+			 * Note: WAPI Parameter Set IE re-uses Element ID that
+			 * was officially allocated for BSS AC Access Delay. As
+			 * such, we need to be a bit more careful on when
+			 * parsing the frame. However, BSS AC Access Delay
+			 * element is not supposed to be included in
+			 * (Re)Association Request frames, so this should not
+			 * cause problems.
+			 */
+			wpa_ie = pos; /* WAPI IE */
+			break;
 		}
 		pos += 2 + pos[1];
 	}
@@ -617,380 +497,49 @@
 	sinfo.assoc_req_ies_len = ies_len;
 	sinfo.filled |= STATION_INFO_ASSOC_REQ_IES;
 
-	cfg80211_new_sta(ar->net_dev, mac_addr, &sinfo, GFP_KERNEL);
+	cfg80211_new_sta(vif->ndev, mac_addr, &sinfo, GFP_KERNEL);
 
-	netif_wake_queue(ar->net_dev);
-}
-
-/* Functions for Tx credit handling */
-void ath6k_credit_init(struct htc_credit_state_info *cred_info,
-		       struct list_head *ep_list,
-		       int tot_credits)
-{
-	struct htc_endpoint_credit_dist *cur_ep_dist;
-	int count;
-
-	cred_info->cur_free_credits = tot_credits;
-	cred_info->total_avail_credits = tot_credits;
-
-	list_for_each_entry(cur_ep_dist, ep_list, list) {
-		if (cur_ep_dist->endpoint == ENDPOINT_0)
-			continue;
-
-		cur_ep_dist->cred_min = cur_ep_dist->cred_per_msg;
-
-		if (tot_credits > 4)
-			if ((cur_ep_dist->svc_id == WMI_DATA_BK_SVC) ||
-			    (cur_ep_dist->svc_id == WMI_DATA_BE_SVC)) {
-				ath6kl_deposit_credit_to_ep(cred_info,
-						cur_ep_dist,
-						cur_ep_dist->cred_min);
-				cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
-			}
-
-		if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) {
-			ath6kl_deposit_credit_to_ep(cred_info, cur_ep_dist,
-						    cur_ep_dist->cred_min);
-			/*
-			 * Control service is always marked active, it
-			 * never goes inactive EVER.
-			 */
-			cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
-		} else if (cur_ep_dist->svc_id == WMI_DATA_BK_SVC)
-			/* this is the lowest priority data endpoint */
-			cred_info->lowestpri_ep_dist = cur_ep_dist->list;
-
-		/*
-		 * Streams have to be created (explicit | implicit) for all
-		 * kinds of traffic. BE endpoints are also inactive in the
-		 * beginning. When BE traffic starts it creates implicit
-		 * streams that redistributes credits.
-		 *
-		 * Note: all other endpoints have minimums set but are
-		 * initially given NO credits. credits will be distributed
-		 * as traffic activity demands
-		 */
-	}
-
-	WARN_ON(cred_info->cur_free_credits <= 0);
-
-	list_for_each_entry(cur_ep_dist, ep_list, list) {
-		if (cur_ep_dist->endpoint == ENDPOINT_0)
-			continue;
-
-		if (cur_ep_dist->svc_id == WMI_CONTROL_SVC)
-			cur_ep_dist->cred_norm = cur_ep_dist->cred_per_msg;
-		else {
-			/*
-			 * For the remaining data endpoints, we assume that
-			 * each cred_per_msg are the same. We use a simple
-			 * calculation here, we take the remaining credits
-			 * and determine how many max messages this can
-			 * cover and then set each endpoint's normal value
-			 * equal to 3/4 this amount.
-			 */
-			count = (cred_info->cur_free_credits /
-				 cur_ep_dist->cred_per_msg)
-				* cur_ep_dist->cred_per_msg;
-			count = (count * 3) >> 2;
-			count = max(count, cur_ep_dist->cred_per_msg);
-			cur_ep_dist->cred_norm = count;
-
-		}
-	}
-}
-
-/* initialize and setup credit distribution */
-int ath6k_setup_credit_dist(void *htc_handle,
-			    struct htc_credit_state_info *cred_info)
-{
-	u16 servicepriority[5];
-
-	memset(cred_info, 0, sizeof(struct htc_credit_state_info));
-
-	servicepriority[0] = WMI_CONTROL_SVC;  /* highest */
-	servicepriority[1] = WMI_DATA_VO_SVC;
-	servicepriority[2] = WMI_DATA_VI_SVC;
-	servicepriority[3] = WMI_DATA_BE_SVC;
-	servicepriority[4] = WMI_DATA_BK_SVC; /* lowest */
-
-	/* set priority list */
-	ath6kl_htc_set_credit_dist(htc_handle, cred_info, servicepriority, 5);
-
-	return 0;
-}
-
-/* reduce an ep's credits back to a set limit */
-static void ath6k_reduce_credits(struct htc_credit_state_info *cred_info,
-				 struct htc_endpoint_credit_dist  *ep_dist,
-				 int limit)
-{
-	int credits;
-
-	ep_dist->cred_assngd = limit;
-
-	if (ep_dist->credits <= limit)
-		return;
-
-	credits = ep_dist->credits - limit;
-	ep_dist->credits -= credits;
-	cred_info->cur_free_credits += credits;
-}
-
-static void ath6k_credit_update(struct htc_credit_state_info *cred_info,
-				struct list_head *epdist_list)
-{
-	struct htc_endpoint_credit_dist *cur_dist_list;
-
-	list_for_each_entry(cur_dist_list, epdist_list, list) {
-		if (cur_dist_list->endpoint == ENDPOINT_0)
-			continue;
-
-		if (cur_dist_list->cred_to_dist > 0) {
-			cur_dist_list->credits +=
-					cur_dist_list->cred_to_dist;
-			cur_dist_list->cred_to_dist = 0;
-			if (cur_dist_list->credits >
-			    cur_dist_list->cred_assngd)
-				ath6k_reduce_credits(cred_info,
-						cur_dist_list,
-						cur_dist_list->cred_assngd);
-
-			if (cur_dist_list->credits >
-			    cur_dist_list->cred_norm)
-				ath6k_reduce_credits(cred_info, cur_dist_list,
-						     cur_dist_list->cred_norm);
-
-			if (!(cur_dist_list->dist_flags & HTC_EP_ACTIVE)) {
-				if (cur_dist_list->txq_depth == 0)
-					ath6k_reduce_credits(cred_info,
-							     cur_dist_list, 0);
-			}
-		}
-	}
-}
-
-/*
- * HTC has an endpoint that needs credits, ep_dist is the endpoint in
- * question.
- */
-void ath6k_seek_credits(struct htc_credit_state_info *cred_info,
-			struct htc_endpoint_credit_dist *ep_dist)
-{
-	struct htc_endpoint_credit_dist *curdist_list;
-	int credits = 0;
-	int need;
-
-	if (ep_dist->svc_id == WMI_CONTROL_SVC)
-		goto out;
-
-	if ((ep_dist->svc_id == WMI_DATA_VI_SVC) ||
-	    (ep_dist->svc_id == WMI_DATA_VO_SVC))
-		if ((ep_dist->cred_assngd >= ep_dist->cred_norm))
-			goto out;
-
-	/*
-	 * For all other services, we follow a simple algorithm of:
-	 *
-	 * 1. checking the free pool for credits
-	 * 2. checking lower priority endpoints for credits to take
-	 */
-
-	credits = min(cred_info->cur_free_credits, ep_dist->seek_cred);
-
-	if (credits >= ep_dist->seek_cred)
-		goto out;
-
-	/*
-	 * We don't have enough in the free pool, try taking away from
-	 * lower priority services The rule for taking away credits:
-	 *
-	 *   1. Only take from lower priority endpoints
-	 *   2. Only take what is allocated above the minimum (never
-	 *      starve an endpoint completely)
-	 *   3. Only take what you need.
-	 */
-
-	list_for_each_entry_reverse(curdist_list,
-				    &cred_info->lowestpri_ep_dist,
-				    list) {
-		if (curdist_list == ep_dist)
-			break;
-
-		need = ep_dist->seek_cred - cred_info->cur_free_credits;
-
-		if ((curdist_list->cred_assngd - need) >=
-		     curdist_list->cred_min) {
-			/*
-			 * The current one has been allocated more than
-			 * it's minimum and it has enough credits assigned
-			 * above it's minimum to fulfill our need try to
-			 * take away just enough to fulfill our need.
-			 */
-			ath6k_reduce_credits(cred_info, curdist_list,
-					curdist_list->cred_assngd - need);
-
-			if (cred_info->cur_free_credits >=
-			    ep_dist->seek_cred)
-				break;
-		}
-
-		if (curdist_list->endpoint == ENDPOINT_0)
-			break;
-	}
-
-	credits = min(cred_info->cur_free_credits, ep_dist->seek_cred);
-
-out:
-	/* did we find some credits? */
-	if (credits)
-		ath6kl_deposit_credit_to_ep(cred_info, ep_dist, credits);
-
-	ep_dist->seek_cred = 0;
-}
-
-/* redistribute credits based on activity change */
-static void ath6k_redistribute_credits(struct htc_credit_state_info *info,
-				       struct list_head *ep_dist_list)
-{
-	struct htc_endpoint_credit_dist *curdist_list;
-
-	list_for_each_entry(curdist_list, ep_dist_list, list) {
-		if (curdist_list->endpoint == ENDPOINT_0)
-			continue;
-
-		if ((curdist_list->svc_id == WMI_DATA_BK_SVC)  ||
-		    (curdist_list->svc_id == WMI_DATA_BE_SVC))
-			curdist_list->dist_flags |= HTC_EP_ACTIVE;
-
-		if ((curdist_list->svc_id != WMI_CONTROL_SVC) &&
-		    !(curdist_list->dist_flags & HTC_EP_ACTIVE)) {
-			if (curdist_list->txq_depth == 0)
-				ath6k_reduce_credits(info,
-						curdist_list, 0);
-			else
-				ath6k_reduce_credits(info,
-						curdist_list,
-						curdist_list->cred_min);
-		}
-	}
-}
-
-/*
- *
- * This function is invoked whenever endpoints require credit
- * distributions. A lock is held while this function is invoked, this
- * function shall NOT block. The ep_dist_list is a list of distribution
- * structures in prioritized order as defined by the call to the
- * htc_set_credit_dist() api.
- */
-void ath6k_credit_distribute(struct htc_credit_state_info *cred_info,
-			     struct list_head *ep_dist_list,
-			     enum htc_credit_dist_reason reason)
-{
-	switch (reason) {
-	case HTC_CREDIT_DIST_SEND_COMPLETE:
-		ath6k_credit_update(cred_info, ep_dist_list);
-		break;
-	case HTC_CREDIT_DIST_ACTIVITY_CHANGE:
-		ath6k_redistribute_credits(cred_info, ep_dist_list);
-		break;
-	default:
-		break;
-	}
-
-	WARN_ON(cred_info->cur_free_credits > cred_info->total_avail_credits);
-	WARN_ON(cred_info->cur_free_credits < 0);
+	netif_wake_queue(vif->ndev);
 }
 
 void disconnect_timer_handler(unsigned long ptr)
 {
 	struct net_device *dev = (struct net_device *)ptr;
-	struct ath6kl *ar = ath6kl_priv(dev);
+	struct ath6kl_vif *vif = netdev_priv(dev);
 
-	ath6kl_init_profile_info(ar);
-	ath6kl_disconnect(ar);
+	ath6kl_init_profile_info(vif);
+	ath6kl_disconnect(vif);
 }
 
-void ath6kl_disconnect(struct ath6kl *ar)
+void ath6kl_disconnect(struct ath6kl_vif *vif)
 {
-	if (test_bit(CONNECTED, &ar->flag) ||
-	    test_bit(CONNECT_PEND, &ar->flag)) {
-		ath6kl_wmi_disconnect_cmd(ar->wmi);
+	if (test_bit(CONNECTED, &vif->flags) ||
+	    test_bit(CONNECT_PEND, &vif->flags)) {
+		ath6kl_wmi_disconnect_cmd(vif->ar->wmi, vif->fw_vif_idx);
 		/*
 		 * Disconnect command is issued, clear the connect pending
 		 * flag. The connected flag will be cleared in
 		 * disconnect event notification.
 		 */
-		clear_bit(CONNECT_PEND, &ar->flag);
+		clear_bit(CONNECT_PEND, &vif->flags);
 	}
 }
 
-void ath6kl_deep_sleep_enable(struct ath6kl *ar)
-{
-	switch (ar->sme_state) {
-	case SME_CONNECTING:
-		cfg80211_connect_result(ar->net_dev, ar->bssid, NULL, 0,
-					NULL, 0,
-					WLAN_STATUS_UNSPECIFIED_FAILURE,
-					GFP_KERNEL);
-		break;
-	case SME_CONNECTED:
-	default:
-		/*
-		 * FIXME: oddly enough smeState is in DISCONNECTED during
-		 * suspend, why? Need to send disconnected event in that
-		 * state.
-		 */
-		cfg80211_disconnected(ar->net_dev, 0, NULL, 0, GFP_KERNEL);
-		break;
-	}
-
-	if (test_bit(CONNECTED, &ar->flag) ||
-	    test_bit(CONNECT_PEND, &ar->flag))
-		ath6kl_wmi_disconnect_cmd(ar->wmi);
-
-	ar->sme_state = SME_DISCONNECTED;
-
-	/* disable scanning */
-	if (ath6kl_wmi_scanparams_cmd(ar->wmi, 0xFFFF, 0, 0, 0, 0, 0, 0, 0,
-				      0, 0) != 0)
-		printk(KERN_WARNING "ath6kl: failed to disable scan "
-		       "during suspend\n");
-
-	ath6kl_cfg80211_scan_complete_event(ar, -ECANCELED);
-}
-
 /* WMI Event handlers */
 
-static const char *get_hw_id_string(u32 id)
-{
-	switch (id) {
-	case AR6003_REV1_VERSION:
-		return "1.0";
-	case AR6003_REV2_VERSION:
-		return "2.0";
-	case AR6003_REV3_VERSION:
-		return "2.1.1";
-	default:
-		return "unknown";
-	}
-}
-
 void ath6kl_ready_event(void *devt, u8 *datap, u32 sw_ver, u32 abi_ver)
 {
 	struct ath6kl *ar = devt;
-	struct net_device *dev = ar->net_dev;
 
-	memcpy(dev->dev_addr, datap, ETH_ALEN);
+	memcpy(ar->mac_addr, datap, ETH_ALEN);
 	ath6kl_dbg(ATH6KL_DBG_TRC, "%s: mac addr = %pM\n",
-		   __func__, dev->dev_addr);
+		   __func__, ar->mac_addr);
 
 	ar->version.wlan_ver = sw_ver;
 	ar->version.abi_ver = abi_ver;
 
-	snprintf(ar->wdev->wiphy->fw_version,
-		 sizeof(ar->wdev->wiphy->fw_version),
+	snprintf(ar->wiphy->fw_version,
+		 sizeof(ar->wiphy->fw_version),
 		 "%u.%u.%u.%u",
 		 (ar->version.wlan_ver & 0xf0000000) >> 28,
 		 (ar->version.wlan_ver & 0x0f000000) >> 24,
@@ -1000,79 +549,85 @@
 	/* indicate to the waiting thread that the ready event was received */
 	set_bit(WMI_READY, &ar->flag);
 	wake_up(&ar->event_wq);
-
-	ath6kl_info("hw %s fw %s%s\n",
-		    get_hw_id_string(ar->wdev->wiphy->hw_version),
-		    ar->wdev->wiphy->fw_version,
-		    test_bit(TESTMODE, &ar->flag) ? " testmode" : "");
 }
 
-void ath6kl_scan_complete_evt(struct ath6kl *ar, int status)
+void ath6kl_scan_complete_evt(struct ath6kl_vif *vif, int status)
 {
-	ath6kl_cfg80211_scan_complete_event(ar, status);
+	struct ath6kl *ar = vif->ar;
+	bool aborted = false;
+
+	if (status != WMI_SCAN_STATUS_SUCCESS)
+		aborted = true;
+
+	ath6kl_cfg80211_scan_complete_event(vif, aborted);
 
 	if (!ar->usr_bss_filter) {
-		clear_bit(CLEAR_BSSFILTER_ON_BEACON, &ar->flag);
-		ath6kl_wmi_bssfilter_cmd(ar->wmi, NONE_BSS_FILTER, 0);
+		clear_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags);
+		ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
+					 NONE_BSS_FILTER, 0);
 	}
 
-	ath6kl_dbg(ATH6KL_DBG_WLAN_SCAN, "scan complete: %d\n", status);
+	ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "scan complete: %d\n", status);
 }
 
-void ath6kl_connect_event(struct ath6kl *ar, u16 channel, u8 *bssid,
+void ath6kl_connect_event(struct ath6kl_vif *vif, u16 channel, u8 *bssid,
 			  u16 listen_int, u16 beacon_int,
 			  enum network_type net_type, u8 beacon_ie_len,
 			  u8 assoc_req_len, u8 assoc_resp_len,
 			  u8 *assoc_info)
 {
-	unsigned long flags;
+	struct ath6kl *ar = vif->ar;
 
-	ath6kl_cfg80211_connect_event(ar, channel, bssid,
+	ath6kl_cfg80211_connect_event(vif, channel, bssid,
 				      listen_int, beacon_int,
 				      net_type, beacon_ie_len,
 				      assoc_req_len, assoc_resp_len,
 				      assoc_info);
 
-	memcpy(ar->bssid, bssid, sizeof(ar->bssid));
-	ar->bss_ch = channel;
+	memcpy(vif->bssid, bssid, sizeof(vif->bssid));
+	vif->bss_ch = channel;
 
-	if ((ar->nw_type == INFRA_NETWORK))
-		ath6kl_wmi_listeninterval_cmd(ar->wmi, ar->listen_intvl_t,
+	if ((vif->nw_type == INFRA_NETWORK))
+		ath6kl_wmi_listeninterval_cmd(ar->wmi, vif->fw_vif_idx,
+					      ar->listen_intvl_t,
 					      ar->listen_intvl_b);
 
-	netif_wake_queue(ar->net_dev);
+	netif_wake_queue(vif->ndev);
 
 	/* Update connect & link status atomically */
-	spin_lock_irqsave(&ar->lock, flags);
-	set_bit(CONNECTED, &ar->flag);
-	clear_bit(CONNECT_PEND, &ar->flag);
-	netif_carrier_on(ar->net_dev);
-	spin_unlock_irqrestore(&ar->lock, flags);
+	spin_lock_bh(&vif->if_lock);
+	set_bit(CONNECTED, &vif->flags);
+	clear_bit(CONNECT_PEND, &vif->flags);
+	netif_carrier_on(vif->ndev);
+	spin_unlock_bh(&vif->if_lock);
 
-	aggr_reset_state(ar->aggr_cntxt);
-	ar->reconnect_flag = 0;
+	aggr_reset_state(vif->aggr_cntxt);
+	vif->reconnect_flag = 0;
 
-	if ((ar->nw_type == ADHOC_NETWORK) && ar->ibss_ps_enable) {
+	if ((vif->nw_type == ADHOC_NETWORK) && ar->ibss_ps_enable) {
 		memset(ar->node_map, 0, sizeof(ar->node_map));
 		ar->node_num = 0;
 		ar->next_ep_id = ENDPOINT_2;
 	}
 
 	if (!ar->usr_bss_filter) {
-		set_bit(CLEAR_BSSFILTER_ON_BEACON, &ar->flag);
-		ath6kl_wmi_bssfilter_cmd(ar->wmi, CURRENT_BSS_FILTER, 0);
+		set_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags);
+		ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
+					 CURRENT_BSS_FILTER, 0);
 	}
 }
 
-void ath6kl_tkip_micerr_event(struct ath6kl *ar, u8 keyid, bool ismcast)
+void ath6kl_tkip_micerr_event(struct ath6kl_vif *vif, u8 keyid, bool ismcast)
 {
 	struct ath6kl_sta *sta;
+	struct ath6kl *ar = vif->ar;
 	u8 tsc[6];
+
 	/*
 	 * For AP case, keyid will have aid of STA which sent pkt with
 	 * MIC error. Use this aid to get MAC & send it to hostapd.
 	 */
-	if (ar->nw_type == AP_NETWORK) {
+	if (vif->nw_type == AP_NETWORK) {
 		sta = ath6kl_find_sta_by_aid(ar, (keyid >> 2));
 		if (!sta)
 			return;
@@ -1081,19 +636,20 @@
 			   "ap tkip mic error received from aid=%d\n", keyid);
 
 		memset(tsc, 0, sizeof(tsc)); /* FIX: get correct TSC */
-		cfg80211_michael_mic_failure(ar->net_dev, sta->mac,
+		cfg80211_michael_mic_failure(vif->ndev, sta->mac,
 					     NL80211_KEYTYPE_PAIRWISE, keyid,
 					     tsc, GFP_KERNEL);
 	} else
-		ath6kl_cfg80211_tkip_micerr_event(ar, keyid, ismcast);
+		ath6kl_cfg80211_tkip_micerr_event(vif, keyid, ismcast);
 
 }
 
-static void ath6kl_update_target_stats(struct ath6kl *ar, u8 *ptr, u32 len)
+static void ath6kl_update_target_stats(struct ath6kl_vif *vif, u8 *ptr, u32 len)
 {
 	struct wmi_target_stats *tgt_stats =
 		(struct wmi_target_stats *) ptr;
-	struct target_stats *stats = &ar->target_stats;
+	struct ath6kl *ar = vif->ar;
+	struct target_stats *stats = &vif->target_stats;
 	struct tkip_ccmp_stats *ccmp_stats;
 	u8 ac;
 
@@ -1189,8 +745,8 @@
 	stats->wow_evt_discarded +=
 		le16_to_cpu(tgt_stats->wow_stats.wow_evt_discarded);
 
-	if (test_bit(STATS_UPDATE_PEND, &ar->flag)) {
-		clear_bit(STATS_UPDATE_PEND, &ar->flag);
+	if (test_bit(STATS_UPDATE_PEND, &vif->flags)) {
+		clear_bit(STATS_UPDATE_PEND, &vif->flags);
 		wake_up(&ar->event_wq);
 	}
 }
@@ -1200,14 +756,15 @@
 	*var = cpu_to_le32(le32_to_cpu(*var) + le32_to_cpu(val));
 }
 
-void ath6kl_tgt_stats_event(struct ath6kl *ar, u8 *ptr, u32 len)
+void ath6kl_tgt_stats_event(struct ath6kl_vif *vif, u8 *ptr, u32 len)
 {
 	struct wmi_ap_mode_stat *p = (struct wmi_ap_mode_stat *) ptr;
+	struct ath6kl *ar = vif->ar;
 	struct wmi_ap_mode_stat *ap = &ar->ap_stats;
 	struct wmi_per_sta_stat *st_ap, *st_p;
 	u8 ac;
 
-	if (ar->nw_type == AP_NETWORK) {
+	if (vif->nw_type == AP_NETWORK) {
 		if (len < sizeof(*p))
 			return;
 
@@ -1226,7 +783,7 @@
 		}
 
 	} else {
-		ath6kl_update_target_stats(ar, ptr, len);
+		ath6kl_update_target_stats(vif, ptr, len);
 	}
 }
 
@@ -1245,11 +802,12 @@
 	wake_up(&ar->event_wq);
 }
 
-void ath6kl_pspoll_event(struct ath6kl *ar, u8 aid)
+void ath6kl_pspoll_event(struct ath6kl_vif *vif, u8 aid)
 {
 	struct ath6kl_sta *conn;
 	struct sk_buff *skb;
 	bool psq_empty = false;
+	struct ath6kl *ar = vif->ar;
 
 	conn = ath6kl_find_sta_by_aid(ar, aid);
 
@@ -1272,7 +830,7 @@
 	spin_unlock_bh(&conn->psq_lock);
 
 	conn->sta_flags |= STA_PS_POLLED;
-	ath6kl_data_tx(skb, ar->net_dev);
+	ath6kl_data_tx(skb, vif->ndev);
 	conn->sta_flags &= ~STA_PS_POLLED;
 
 	spin_lock_bh(&conn->psq_lock);
@@ -1280,13 +838,14 @@
 	spin_unlock_bh(&conn->psq_lock);
 
 	if (psq_empty)
-		ath6kl_wmi_set_pvb_cmd(ar->wmi, conn->aid, 0);
+		ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx, conn->aid, 0);
 }
 
-void ath6kl_dtimexpiry_event(struct ath6kl *ar)
+void ath6kl_dtimexpiry_event(struct ath6kl_vif *vif)
 {
 	bool mcastq_empty = false;
 	struct sk_buff *skb;
+	struct ath6kl *ar = vif->ar;
 
 	/*
 	 * If there are no associated STAs, ignore the DTIM expiry event.
@@ -1308,31 +867,31 @@
 		return;
 
 	/* set the STA flag to dtim_expired for the frame to go out */
-	set_bit(DTIM_EXPIRED, &ar->flag);
+	set_bit(DTIM_EXPIRED, &vif->flags);
 
 	spin_lock_bh(&ar->mcastpsq_lock);
 	while ((skb = skb_dequeue(&ar->mcastpsq)) != NULL) {
 		spin_unlock_bh(&ar->mcastpsq_lock);
 
-		ath6kl_data_tx(skb, ar->net_dev);
+		ath6kl_data_tx(skb, vif->ndev);
 
 		spin_lock_bh(&ar->mcastpsq_lock);
 	}
 	spin_unlock_bh(&ar->mcastpsq_lock);
 
-	clear_bit(DTIM_EXPIRED, &ar->flag);
+	clear_bit(DTIM_EXPIRED, &vif->flags);
 
 	/* clear the LSB of the BitMapCtl field of the TIM IE */
-	ath6kl_wmi_set_pvb_cmd(ar->wmi, MCAST_AID, 0);
+	ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx, MCAST_AID, 0);
 }
 
-void ath6kl_disconnect_event(struct ath6kl *ar, u8 reason, u8 *bssid,
+void ath6kl_disconnect_event(struct ath6kl_vif *vif, u8 reason, u8 *bssid,
 			     u8 assoc_resp_len, u8 *assoc_info,
 			     u16 prot_reason_status)
 {
-	unsigned long flags;
+	struct ath6kl *ar = vif->ar;
 
-	if (ar->nw_type == AP_NETWORK) {
+	if (vif->nw_type == AP_NETWORK) {
 		if (!ath6kl_remove_sta(ar, bssid, prot_reason_status))
 			return;
 
@@ -1344,31 +903,31 @@
 
 			/* clear the LSB of the TIM IE's BitMapCtl field */
 			if (test_bit(WMI_READY, &ar->flag))
-				ath6kl_wmi_set_pvb_cmd(ar->wmi, MCAST_AID, 0);
+				ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx,
+						       MCAST_AID, 0);
 		}
 
 		if (!is_broadcast_ether_addr(bssid)) {
 			/* send event to application */
-			cfg80211_del_sta(ar->net_dev, bssid, GFP_KERNEL);
+			cfg80211_del_sta(vif->ndev, bssid, GFP_KERNEL);
 		}
 
-		if (memcmp(ar->net_dev->dev_addr, bssid, ETH_ALEN) == 0) {
-			memset(ar->wep_key_list, 0, sizeof(ar->wep_key_list));
-			clear_bit(CONNECTED, &ar->flag);
+		if (memcmp(vif->ndev->dev_addr, bssid, ETH_ALEN) == 0) {
+			memset(vif->wep_key_list, 0, sizeof(vif->wep_key_list));
+			clear_bit(CONNECTED, &vif->flags);
 		}
 		return;
 	}
 
-	ath6kl_cfg80211_disconnect_event(ar, reason, bssid,
+	ath6kl_cfg80211_disconnect_event(vif, reason, bssid,
 				       assoc_resp_len, assoc_info,
 				       prot_reason_status);
 
-	aggr_reset_state(ar->aggr_cntxt);
+	aggr_reset_state(vif->aggr_cntxt);
 
-	del_timer(&ar->disconnect_timer);
+	del_timer(&vif->disconnect_timer);
 
-	ath6kl_dbg(ATH6KL_DBG_WLAN_CONNECT,
-		   "disconnect reason is %d\n", reason);
+	ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "disconnect reason is %d\n", reason);
 
 	/*
 	 * If the event is due to disconnect cmd from the host, only they
@@ -1377,83 +936,88 @@
 	 */
 	if (reason == DISCONNECT_CMD) {
 		if (!ar->usr_bss_filter && test_bit(WMI_READY, &ar->flag))
-			ath6kl_wmi_bssfilter_cmd(ar->wmi, NONE_BSS_FILTER, 0);
+			ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
+						 NONE_BSS_FILTER, 0);
 	} else {
-		set_bit(CONNECT_PEND, &ar->flag);
+		set_bit(CONNECT_PEND, &vif->flags);
 		if (((reason == ASSOC_FAILED) &&
 		    (prot_reason_status == 0x11)) ||
 		    ((reason == ASSOC_FAILED) && (prot_reason_status == 0x0)
-		     && (ar->reconnect_flag == 1))) {
-			set_bit(CONNECTED, &ar->flag);
+		     && (vif->reconnect_flag == 1))) {
+			set_bit(CONNECTED, &vif->flags);
 			return;
 		}
 	}
 
 	/* update connect & link status atomically */
-	spin_lock_irqsave(&ar->lock, flags);
-	clear_bit(CONNECTED, &ar->flag);
-	netif_carrier_off(ar->net_dev);
-	spin_unlock_irqrestore(&ar->lock, flags);
+	spin_lock_bh(&vif->if_lock);
+	clear_bit(CONNECTED, &vif->flags);
+	netif_carrier_off(vif->ndev);
+	spin_unlock_bh(&vif->if_lock);
 
-	if ((reason != CSERV_DISCONNECT) || (ar->reconnect_flag != 1))
-		ar->reconnect_flag = 0;
+	if ((reason != CSERV_DISCONNECT) || (vif->reconnect_flag != 1))
+		vif->reconnect_flag = 0;
 
 	if (reason != CSERV_DISCONNECT)
 		ar->user_key_ctrl = 0;
 
-	netif_stop_queue(ar->net_dev);
-	memset(ar->bssid, 0, sizeof(ar->bssid));
-	ar->bss_ch = 0;
+	netif_stop_queue(vif->ndev);
+	memset(vif->bssid, 0, sizeof(vif->bssid));
+	vif->bss_ch = 0;
 
 	ath6kl_tx_data_cleanup(ar);
 }
 
+struct ath6kl_vif *ath6kl_vif_first(struct ath6kl *ar)
+{
+	struct ath6kl_vif *vif;
+
+	spin_lock_bh(&ar->list_lock);
+	if (list_empty(&ar->vif_list)) {
+		spin_unlock_bh(&ar->list_lock);
+		return NULL;
+	}
+
+	vif = list_first_entry(&ar->vif_list, struct ath6kl_vif, list);
+
+	spin_unlock_bh(&ar->list_lock);
+
+	return vif;
+}
+
 static int ath6kl_open(struct net_device *dev)
 {
-	struct ath6kl *ar = ath6kl_priv(dev);
-	unsigned long flags;
+	struct ath6kl_vif *vif = netdev_priv(dev);
 
-	spin_lock_irqsave(&ar->lock, flags);
+	set_bit(WLAN_ENABLED, &vif->flags);
 
-	set_bit(WLAN_ENABLED, &ar->flag);
-
-	if (test_bit(CONNECTED, &ar->flag)) {
+	if (test_bit(CONNECTED, &vif->flags)) {
 		netif_carrier_on(dev);
 		netif_wake_queue(dev);
 	} else
 		netif_carrier_off(dev);
 
-	spin_unlock_irqrestore(&ar->lock, flags);
-
 	return 0;
 }
 
 static int ath6kl_close(struct net_device *dev)
 {
-	struct ath6kl *ar = ath6kl_priv(dev);
+	struct ath6kl_vif *vif = netdev_priv(dev);
 
 	netif_stop_queue(dev);
 
-	ath6kl_disconnect(ar);
+	ath6kl_cfg80211_stop(vif);
 
-	if (test_bit(WMI_READY, &ar->flag)) {
-		if (ath6kl_wmi_scanparams_cmd(ar->wmi, 0xFFFF, 0, 0, 0, 0, 0, 0,
-					      0, 0, 0))
-			return -EIO;
-
-		clear_bit(WLAN_ENABLED, &ar->flag);
-	}
-
-	ath6kl_cfg80211_scan_complete_event(ar, -ECANCELED);
+	clear_bit(WLAN_ENABLED, &vif->flags);
 
 	return 0;
 }
 
 static struct net_device_stats *ath6kl_get_stats(struct net_device *dev)
 {
-	struct ath6kl *ar = ath6kl_priv(dev);
+	struct ath6kl_vif *vif = netdev_priv(dev);
 
-	return &ar->net_stats;
+	return &vif->net_stats;
 }
 
 static struct net_device_ops ath6kl_netdev_ops = {
@@ -1466,6 +1030,7 @@
 void init_netdev(struct net_device *dev)
 {
 	dev->netdev_ops = &ath6kl_netdev_ops;
+	dev->destructor = free_netdev;
 	dev->watchdog_timeo = ATH6KL_TX_TIMEOUT;
 
 	dev->needed_headroom = ETH_HLEN;
diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c
index 066d4f8..9475e2d 100644
--- a/drivers/net/wireless/ath/ath6kl/sdio.c
+++ b/drivers/net/wireless/ath/ath6kl/sdio.c
@@ -22,7 +22,7 @@
 #include <linux/mmc/sdio_ids.h>
 #include <linux/mmc/sdio.h>
 #include <linux/mmc/sd.h>
-#include "htc_hif.h"
+#include "hif.h"
 #include "hif-ops.h"
 #include "target.h"
 #include "debug.h"
@@ -40,12 +40,18 @@
 	struct bus_request bus_req[BUS_REQUEST_MAX_NUM];
 
 	struct ath6kl *ar;
+
 	u8 *dma_buffer;
 
+	/* protects access to dma_buffer */
+	struct mutex dma_buffer_mutex;
+
 	/* scatter request list head */
 	struct list_head scat_req;
 
 	spinlock_t scat_lock;
+	bool scatter_enabled;
+
 	bool is_disabled;
 	atomic_t irq_handling;
 	const struct sdio_device_id *id;
@@ -135,6 +141,8 @@
 {
 	int ret = 0;
 
+	sdio_claim_host(func);
+
 	if (request & HIF_WRITE) {
 		/* FIXME: looks like ugly workaround for something */
 		if (addr >= HIF_MBOX_BASE_ADDR &&
@@ -156,6 +164,8 @@
 			ret = sdio_memcpy_fromio(func, buf, addr, len);
 	}
 
+	sdio_release_host(func);
+
 	ath6kl_dbg(ATH6KL_DBG_SDIO, "%s addr 0x%x%s buf 0x%p len %d\n",
 		   request & HIF_WRITE ? "wr" : "rd", addr,
 		   request & HIF_FIXED_ADDRESS ? " (fixed)" : "", buf, len);
@@ -167,12 +177,11 @@
 static struct bus_request *ath6kl_sdio_alloc_busreq(struct ath6kl_sdio *ar_sdio)
 {
 	struct bus_request *bus_req;
-	unsigned long flag;
 
-	spin_lock_irqsave(&ar_sdio->lock, flag);
+	spin_lock_bh(&ar_sdio->lock);
 
 	if (list_empty(&ar_sdio->bus_req_freeq)) {
-		spin_unlock_irqrestore(&ar_sdio->lock, flag);
+		spin_unlock_bh(&ar_sdio->lock);
 		return NULL;
 	}
 
@@ -180,7 +189,7 @@
 				   struct bus_request, list);
 	list_del(&bus_req->list);
 
-	spin_unlock_irqrestore(&ar_sdio->lock, flag);
+	spin_unlock_bh(&ar_sdio->lock);
 	ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n",
 		   __func__, bus_req);
 
@@ -190,14 +199,12 @@
 static void ath6kl_sdio_free_bus_req(struct ath6kl_sdio *ar_sdio,
 				     struct bus_request *bus_req)
 {
-	unsigned long flag;
-
 	ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n",
 		   __func__, bus_req);
 
-	spin_lock_irqsave(&ar_sdio->lock, flag);
+	spin_lock_bh(&ar_sdio->lock);
 	list_add_tail(&bus_req->list, &ar_sdio->bus_req_freeq);
-	spin_unlock_irqrestore(&ar_sdio->lock, flag);
+	spin_unlock_bh(&ar_sdio->lock);
 }
 
 static void ath6kl_sdio_setup_scat_data(struct hif_scatter_req *scat_req,
@@ -291,10 +298,14 @@
 	mmc_req.cmd = &cmd;
 	mmc_req.data = &data;
 
+	sdio_claim_host(ar_sdio->func);
+
 	mmc_set_data_timeout(&data, ar_sdio->func->card);
 	/* synchronous call to process request */
 	mmc_wait_for_req(ar_sdio->func->card->host, &mmc_req);
 
+	sdio_release_host(ar_sdio->func);
+
 	status = cmd.error ? cmd.error : data.error;
 
 scat_complete:
@@ -389,17 +400,19 @@
 	if (buf_needs_bounce(buf)) {
 		if (!ar_sdio->dma_buffer)
 			return -ENOMEM;
+		mutex_lock(&ar_sdio->dma_buffer_mutex);
 		tbuf = ar_sdio->dma_buffer;
 		memcpy(tbuf, buf, len);
 		bounced = true;
 	} else
 		tbuf = buf;
 
-	sdio_claim_host(ar_sdio->func);
 	ret = ath6kl_sdio_io(ar_sdio->func, request, addr, tbuf, len);
 	if ((request & HIF_READ) && bounced)
 		memcpy(buf, tbuf, len);
-	sdio_release_host(ar_sdio->func);
+
+	if (bounced)
+		mutex_unlock(&ar_sdio->dma_buffer_mutex);
 
 	return ret;
 }
@@ -418,29 +431,25 @@
 						     req->request);
 		context = req->packet;
 		ath6kl_sdio_free_bus_req(ar_sdio, req);
-		ath6kldev_rw_comp_handler(context, status);
+		ath6kl_hif_rw_comp_handler(context, status);
 	}
 }
 
 static void ath6kl_sdio_write_async_work(struct work_struct *work)
 {
 	struct ath6kl_sdio *ar_sdio;
-	unsigned long flags;
 	struct bus_request *req, *tmp_req;
 
 	ar_sdio = container_of(work, struct ath6kl_sdio, wr_async_work);
-	sdio_claim_host(ar_sdio->func);
 
-	spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
+	spin_lock_bh(&ar_sdio->wr_async_lock);
 	list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
 		list_del(&req->list);
-		spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
+		spin_unlock_bh(&ar_sdio->wr_async_lock);
 		__ath6kl_sdio_write_async(ar_sdio, req);
-		spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
+		spin_lock_bh(&ar_sdio->wr_async_lock);
 	}
-	spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
-
-	sdio_release_host(ar_sdio->func);
+	spin_unlock_bh(&ar_sdio->wr_async_lock);
 }
 
 static void ath6kl_sdio_irq_handler(struct sdio_func *func)
@@ -459,20 +468,23 @@
 	 */
 	sdio_release_host(ar_sdio->func);
 
-	status = ath6kldev_intr_bh_handler(ar_sdio->ar);
+	status = ath6kl_hif_intr_bh_handler(ar_sdio->ar);
 	sdio_claim_host(ar_sdio->func);
 	atomic_set(&ar_sdio->irq_handling, 0);
 	WARN_ON(status && status != -ECANCELED);
 }
 
-static int ath6kl_sdio_power_on(struct ath6kl_sdio *ar_sdio)
+static int ath6kl_sdio_power_on(struct ath6kl *ar)
 {
+	struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
 	struct sdio_func *func = ar_sdio->func;
 	int ret = 0;
 
 	if (!ar_sdio->is_disabled)
 		return 0;
 
+	ath6kl_dbg(ATH6KL_DBG_BOOT, "sdio power on\n");
+
 	sdio_claim_host(func);
 
 	ret = sdio_enable_func(func);
@@ -495,13 +507,16 @@
 	return ret;
 }
 
-static int ath6kl_sdio_power_off(struct ath6kl_sdio *ar_sdio)
+static int ath6kl_sdio_power_off(struct ath6kl *ar)
 {
+	struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
 	int ret;
 
 	if (ar_sdio->is_disabled)
 		return 0;
 
+	ath6kl_dbg(ATH6KL_DBG_BOOT, "sdio power off\n");
+
 	/* Disable the card */
 	sdio_claim_host(ar_sdio->func);
 	ret = sdio_disable_func(ar_sdio->func);
@@ -521,7 +536,6 @@
 {
 	struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
 	struct bus_request *bus_req;
-	unsigned long flags;
 
 	bus_req = ath6kl_sdio_alloc_busreq(ar_sdio);
 
@@ -534,9 +548,9 @@
 	bus_req->request = request;
 	bus_req->packet = packet;
 
-	spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
+	spin_lock_bh(&ar_sdio->wr_async_lock);
 	list_add_tail(&bus_req->list, &ar_sdio->wr_asyncq);
-	spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
+	spin_unlock_bh(&ar_sdio->wr_async_lock);
 	queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work);
 
 	return 0;
@@ -582,9 +596,8 @@
 {
 	struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
 	struct hif_scatter_req *node = NULL;
-	unsigned long flag;
 
-	spin_lock_irqsave(&ar_sdio->scat_lock, flag);
+	spin_lock_bh(&ar_sdio->scat_lock);
 
 	if (!list_empty(&ar_sdio->scat_req)) {
 		node = list_first_entry(&ar_sdio->scat_req,
@@ -592,7 +605,7 @@
 		list_del(&node->list);
 	}
 
-	spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
+	spin_unlock_bh(&ar_sdio->scat_lock);
 
 	return node;
 }
@@ -601,13 +614,12 @@
 					struct hif_scatter_req *s_req)
 {
 	struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
-	unsigned long flag;
 
-	spin_lock_irqsave(&ar_sdio->scat_lock, flag);
+	spin_lock_bh(&ar_sdio->scat_lock);
 
 	list_add_tail(&s_req->list, &ar_sdio->scat_req);
 
-	spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
+	spin_unlock_bh(&ar_sdio->scat_lock);
 
 }
 
@@ -618,7 +630,6 @@
 	struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
 	u32 request = scat_req->req;
 	int status = 0;
-	unsigned long flags;
 
 	if (!scat_req->len)
 		return -EINVAL;
@@ -627,14 +638,12 @@
 		"hif-scatter: total len: %d scatter entries: %d\n",
 		scat_req->len, scat_req->scat_entries);
 
-	if (request & HIF_SYNCHRONOUS) {
-		sdio_claim_host(ar_sdio->func);
+	if (request & HIF_SYNCHRONOUS)
 		status = ath6kl_sdio_scat_rw(ar_sdio, scat_req->busrequest);
-		sdio_release_host(ar_sdio->func);
-	} else {
-		spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
+	else {
+		spin_lock_bh(&ar_sdio->wr_async_lock);
 		list_add_tail(&scat_req->busrequest->list, &ar_sdio->wr_asyncq);
-		spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
+		spin_unlock_bh(&ar_sdio->wr_async_lock);
 		queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work);
 	}
 
@@ -646,23 +655,27 @@
 {
 	struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
 	struct hif_scatter_req *s_req, *tmp_req;
-	unsigned long flag;
 
 	/* empty the free list */
-	spin_lock_irqsave(&ar_sdio->scat_lock, flag);
+	spin_lock_bh(&ar_sdio->scat_lock);
 	list_for_each_entry_safe(s_req, tmp_req, &ar_sdio->scat_req, list) {
 		list_del(&s_req->list);
-		spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
+		spin_unlock_bh(&ar_sdio->scat_lock);
 
+		/*
+		 * FIXME: should we also call completion handler with
+		 * ath6kl_hif_rw_comp_handler() with status -ECANCELED so
+		 * that the packet is properly freed?
+		 */
 		if (s_req->busrequest)
 			ath6kl_sdio_free_bus_req(ar_sdio, s_req->busrequest);
 		kfree(s_req->virt_dma_buf);
 		kfree(s_req->sgentries);
 		kfree(s_req);
 
-		spin_lock_irqsave(&ar_sdio->scat_lock, flag);
+		spin_lock_bh(&ar_sdio->scat_lock);
 	}
-	spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
+	spin_unlock_bh(&ar_sdio->scat_lock);
 }
 
 /* setup of HIF scatter resources */
@@ -673,6 +686,11 @@
 	int ret;
 	bool virt_scat = false;
 
+	if (ar_sdio->scatter_enabled)
+		return 0;
+
+	ar_sdio->scatter_enabled = true;
+
 	/* check if host supports scatter and it meets our requirements */
 	if (ar_sdio->func->card->host->max_segs < MAX_SCATTER_ENTRIES_PER_REQ) {
 		ath6kl_err("host only supports scatter of :%d entries, need: %d\n",
@@ -687,8 +705,8 @@
 				MAX_SCATTER_REQUESTS, virt_scat);
 
 		if (!ret) {
-			ath6kl_dbg(ATH6KL_DBG_SCATTER,
-				   "hif-scatter enabled: max scatter req : %d entries: %d\n",
+			ath6kl_dbg(ATH6KL_DBG_BOOT,
+				   "hif-scatter enabled requests %d entries %d\n",
 				   MAX_SCATTER_REQUESTS,
 				   MAX_SCATTER_ENTRIES_PER_REQ);
 
@@ -712,8 +730,8 @@
 			return ret;
 		}
 
-		ath6kl_dbg(ATH6KL_DBG_SCATTER,
-			   "Vitual scatter enabled, max_scat_req:%d, entries:%d\n",
+		ath6kl_dbg(ATH6KL_DBG_BOOT,
+			   "virtual scatter enabled requests %d entries %d\n",
 			   ATH6KL_SCATTER_REQS, ATH6KL_SCATTER_ENTRIES_PER_REQ);
 
 		target->max_scat_entries = ATH6KL_SCATTER_ENTRIES_PER_REQ;
@@ -724,7 +742,47 @@
 	return 0;
 }
 
-static int ath6kl_sdio_suspend(struct ath6kl *ar)
+static int ath6kl_sdio_config(struct ath6kl *ar)
+{
+	struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+	struct sdio_func *func = ar_sdio->func;
+	int ret;
+
+	sdio_claim_host(func);
+
+	if ((ar_sdio->id->device & MANUFACTURER_ID_ATH6KL_BASE_MASK) >=
+	    MANUFACTURER_ID_AR6003_BASE) {
+		/* enable 4-bit ASYNC interrupt on AR6003 or later */
+		ret = ath6kl_sdio_func0_cmd52_wr_byte(func->card,
+						CCCR_SDIO_IRQ_MODE_REG,
+						SDIO_IRQ_MODE_ASYNC_4BIT_IRQ);
+		if (ret) {
+			ath6kl_err("Failed to enable 4-bit async irq mode %d\n",
+				   ret);
+			goto out;
+		}
+
+		ath6kl_dbg(ATH6KL_DBG_BOOT, "4-bit async irq mode enabled\n");
+	}
+
+	/* give us some time to enable, in ms */
+	func->enable_timeout = 100;
+
+	ret = sdio_set_block_size(func, HIF_MBOX_BLOCK_SIZE);
+	if (ret) {
+		ath6kl_err("Set sdio block size %d failed: %d)\n",
+			   HIF_MBOX_BLOCK_SIZE, ret);
+		sdio_release_host(func);
+		goto out;
+	}
+
+out:
+	sdio_release_host(func);
+
+	return ret;
+}
+
+static int ath6kl_sdio_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
 {
 	struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
 	struct sdio_func *func = ar_sdio->func;
@@ -733,12 +791,14 @@
 
 	flags = sdio_get_host_pm_caps(func);
 
-	if (!(flags & MMC_PM_KEEP_POWER))
-		/* as host doesn't support keep power we need to bail out */
-		ath6kl_dbg(ATH6KL_DBG_SDIO,
-			   "func %d doesn't support MMC_PM_KEEP_POWER\n",
-			   func->num);
-		return -EINVAL;
+	ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio suspend pm_caps 0x%x\n", flags);
+
+	if (!(flags & MMC_PM_KEEP_POWER) ||
+	    (ar->conf_flags & ATH6KL_CONF_SUSPEND_CUTPOWER)) {
+		/* as host doesn't support keep power we need to cut power */
+		return ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_CUTPOWER,
+					       NULL);
+	}
 
 	ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
 	if (ret) {
@@ -747,11 +807,367 @@
 		return ret;
 	}
 
-	ath6kl_deep_sleep_enable(ar);
+	if (!(flags & MMC_PM_WAKE_SDIO_IRQ))
+		goto deepsleep;
+
+	/* sdio irq wakes up host */
+
+	if (ar->state == ATH6KL_STATE_SCHED_SCAN) {
+		ret =  ath6kl_cfg80211_suspend(ar,
+					       ATH6KL_CFG_SUSPEND_SCHED_SCAN,
+					       NULL);
+		if (ret) {
+			ath6kl_warn("Schedule scan suspend failed: %d", ret);
+			return ret;
+		}
+
+		ret = sdio_set_host_pm_flags(func, MMC_PM_WAKE_SDIO_IRQ);
+		if (ret)
+			ath6kl_warn("set sdio wake irq flag failed: %d\n", ret);
+
+		return ret;
+	}
+
+	if (wow) {
+		/*
+		 * The host sdio controller is capable of keep power and
+		 * sdio irq wake up at this point. It's fine to continue
+		 * wow suspend operation.
+		 */
+		ret = ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_WOW, wow);
+		if (ret)
+			return ret;
+
+		ret = sdio_set_host_pm_flags(func, MMC_PM_WAKE_SDIO_IRQ);
+		if (ret)
+			ath6kl_err("set sdio wake irq flag failed: %d\n", ret);
+
+		return ret;
+	}
+
+deepsleep:
+	return ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_DEEPSLEEP, NULL);
+}
+
+static int ath6kl_sdio_resume(struct ath6kl *ar)
+{
+	switch (ar->state) {
+	case ATH6KL_STATE_OFF:
+	case ATH6KL_STATE_CUTPOWER:
+		ath6kl_dbg(ATH6KL_DBG_SUSPEND,
+			   "sdio resume configuring sdio\n");
+
+		/* need to set sdio settings after power is cut from sdio */
+		ath6kl_sdio_config(ar);
+		break;
+
+	case ATH6KL_STATE_ON:
+		break;
+
+	case ATH6KL_STATE_DEEPSLEEP:
+		break;
+
+	case ATH6KL_STATE_WOW:
+		break;
+	case ATH6KL_STATE_SCHED_SCAN:
+		break;
+	}
+
+	ath6kl_cfg80211_resume(ar);
 
 	return 0;
 }
 
+/* set the window address register (using 4-byte register access ). */
+static int ath6kl_set_addrwin_reg(struct ath6kl *ar, u32 reg_addr, u32 addr)
+{
+	int status;
+	u8 addr_val[4];
+	s32 i;
+
+	/*
+	 * Write bytes 1,2,3 of the register to set the upper address bytes,
+	 * the LSB is written last to initiate the access cycle
+	 */
+
+	for (i = 1; i <= 3; i++) {
+		/*
+		 * Fill the buffer with the address byte value we want to
+		 * hit 4 times.
+		 */
+		memset(addr_val, ((u8 *)&addr)[i], 4);
+
+		/*
+		 * Hit each byte of the register address with a 4-byte
+		 * write operation to the same address, this is a harmless
+		 * operation.
+		 */
+		status = ath6kl_sdio_read_write_sync(ar, reg_addr + i, addr_val,
+					     4, HIF_WR_SYNC_BYTE_FIX);
+		if (status)
+			break;
+	}
+
+	if (status) {
+		ath6kl_err("%s: failed to write initial bytes of 0x%x "
+			   "to window reg: 0x%X\n", __func__,
+			   addr, reg_addr);
+		return status;
+	}
+
+	/*
+	 * Write the address register again, this time write the whole
+	 * 4-byte value. The effect here is that the LSB write causes the
+	 * cycle to start, the extra 3 byte write to bytes 1,2,3 has no
+	 * effect since we are writing the same values again
+	 */
+	status = ath6kl_sdio_read_write_sync(ar, reg_addr, (u8 *)(&addr),
+				     4, HIF_WR_SYNC_BYTE_INC);
+
+	if (status) {
+		ath6kl_err("%s: failed to write 0x%x to window reg: 0x%X\n",
+			   __func__, addr, reg_addr);
+		return status;
+	}
+
+	return 0;
+}
+
+static int ath6kl_sdio_diag_read32(struct ath6kl *ar, u32 address, u32 *data)
+{
+	int status;
+
+	/* set window register to start read cycle */
+	status = ath6kl_set_addrwin_reg(ar, WINDOW_READ_ADDR_ADDRESS,
+					address);
+
+	if (status)
+		return status;
+
+	/* read the data */
+	status = ath6kl_sdio_read_write_sync(ar, WINDOW_DATA_ADDRESS,
+				(u8 *)data, sizeof(u32), HIF_RD_SYNC_BYTE_INC);
+	if (status) {
+		ath6kl_err("%s: failed to read from window data addr\n",
+			__func__);
+		return status;
+	}
+
+	return status;
+}
+
+static int ath6kl_sdio_diag_write32(struct ath6kl *ar, u32 address,
+				    __le32 data)
+{
+	int status;
+	u32 val = (__force u32) data;
+
+	/* set write data */
+	status = ath6kl_sdio_read_write_sync(ar, WINDOW_DATA_ADDRESS,
+				(u8 *) &val, sizeof(u32), HIF_WR_SYNC_BYTE_INC);
+	if (status) {
+		ath6kl_err("%s: failed to write 0x%x to window data addr\n",
+			   __func__, data);
+		return status;
+	}
+
+	/* set window register, which starts the write cycle */
+	return ath6kl_set_addrwin_reg(ar, WINDOW_WRITE_ADDR_ADDRESS,
+				      address);
+}
+
+static int ath6kl_sdio_bmi_credits(struct ath6kl *ar)
+{
+	u32 addr;
+	unsigned long timeout;
+	int ret;
+
+	ar->bmi.cmd_credits = 0;
+
+	/* Read the counter register to get the command credits */
+	addr = COUNT_DEC_ADDRESS + (HTC_MAILBOX_NUM_MAX + ENDPOINT1) * 4;
+
+	timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT);
+	while (time_before(jiffies, timeout) && !ar->bmi.cmd_credits) {
+
+		/*
+		 * Hit the credit counter with a 4-byte access, the first byte
+		 * read will hit the counter and cause a decrement, while the
+		 * remaining 3 bytes has no effect. The rationale behind this
+		 * is to make all HIF accesses 4-byte aligned.
+		 */
+		ret = ath6kl_sdio_read_write_sync(ar, addr,
+					 (u8 *)&ar->bmi.cmd_credits, 4,
+					 HIF_RD_SYNC_BYTE_INC);
+		if (ret) {
+			ath6kl_err("Unable to decrement the command credit "
+						"count register: %d\n", ret);
+			return ret;
+		}
+
+		/* The counter is only 8 bits.
+		 * Ignore anything in the upper 3 bytes
+		 */
+		ar->bmi.cmd_credits &= 0xFF;
+	}
+
+	if (!ar->bmi.cmd_credits) {
+		ath6kl_err("bmi communication timeout\n");
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+static int ath6kl_bmi_get_rx_lkahd(struct ath6kl *ar)
+{
+	unsigned long timeout;
+	u32 rx_word = 0;
+	int ret = 0;
+
+	timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT);
+	while ((time_before(jiffies, timeout)) && !rx_word) {
+		ret = ath6kl_sdio_read_write_sync(ar,
+					RX_LOOKAHEAD_VALID_ADDRESS,
+					(u8 *)&rx_word, sizeof(rx_word),
+					HIF_RD_SYNC_BYTE_INC);
+		if (ret) {
+			ath6kl_err("unable to read RX_LOOKAHEAD_VALID\n");
+			return ret;
+		}
+
+		 /* all we really want is one bit */
+		rx_word &= (1 << ENDPOINT1);
+	}
+
+	if (!rx_word) {
+		ath6kl_err("bmi_recv_buf FIFO empty\n");
+		return -EINVAL;
+	}
+
+	return ret;
+}
+
+static int ath6kl_sdio_bmi_write(struct ath6kl *ar, u8 *buf, u32 len)
+{
+	int ret;
+	u32 addr;
+
+	ret = ath6kl_sdio_bmi_credits(ar);
+	if (ret)
+		return ret;
+
+	addr = ar->mbox_info.htc_addr;
+
+	ret = ath6kl_sdio_read_write_sync(ar, addr, buf, len,
+					  HIF_WR_SYNC_BYTE_INC);
+	if (ret)
+		ath6kl_err("unable to send the bmi data to the device\n");
+
+	return ret;
+}
+
+static int ath6kl_sdio_bmi_read(struct ath6kl *ar, u8 *buf, u32 len)
+{
+	int ret;
+	u32 addr;
+
+	/*
+	 * During normal bootup, small reads may be required.
+	 * Rather than issue an HIF Read and then wait as the Target
+	 * adds successive bytes to the FIFO, we wait here until
+	 * we know that response data is available.
+	 *
+	 * This allows us to cleanly timeout on an unexpected
+	 * Target failure rather than risk problems at the HIF level.
+	 * In particular, this avoids SDIO timeouts and possibly garbage
+	 * data on some host controllers.  And on an interconnect
+	 * such as Compact Flash (as well as some SDIO masters) which
+	 * does not provide any indication on data timeout, it avoids
+	 * a potential hang or garbage response.
+	 *
+	 * Synchronization is more difficult for reads larger than the
+	 * size of the MBOX FIFO (128B), because the Target is unable
+	 * to push the 129th byte of data until AFTER the Host posts an
+	 * HIF Read and removes some FIFO data.  So for large reads the
+	 * Host proceeds to post an HIF Read BEFORE all the data is
+	 * actually available to read.  Fortunately, large BMI reads do
+	 * not occur in practice -- they're supported for debug/development.
+	 *
+	 * So Host/Target BMI synchronization is divided into these cases:
+	 *  CASE 1: length < 4
+	 *        Should not happen
+	 *
+	 *  CASE 2: 4 <= length <= 128
+	 *        Wait for first 4 bytes to be in FIFO
+	 *        If CONSERVATIVE_BMI_READ is enabled, also wait for
+	 *        a BMI command credit, which indicates that the ENTIRE
+	 *        response is available in the the FIFO
+	 *
+	 *  CASE 3: length > 128
+	 *        Wait for the first 4 bytes to be in FIFO
+	 *
+	 * For most uses, a small timeout should be sufficient and we will
+	 * usually see a response quickly; but there may be some unusual
+	 * (debug) cases of BMI_EXECUTE where we want an larger timeout.
+	 * For now, we use an unbounded busy loop while waiting for
+	 * BMI_EXECUTE.
+	 *
+	 * If BMI_EXECUTE ever needs to support longer-latency execution,
+	 * especially in production, this code needs to be enhanced to sleep
+	 * and yield.  Also note that BMI_COMMUNICATION_TIMEOUT is currently
+	 * a function of Host processor speed.
+	 */
+	if (len >= 4) { /* NB: Currently, always true */
+		ret = ath6kl_bmi_get_rx_lkahd(ar);
+		if (ret)
+			return ret;
+	}
+
+	addr = ar->mbox_info.htc_addr;
+	ret = ath6kl_sdio_read_write_sync(ar, addr, buf, len,
+				  HIF_RD_SYNC_BYTE_INC);
+	if (ret) {
+		ath6kl_err("Unable to read the bmi data from the device: %d\n",
+			   ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static void ath6kl_sdio_stop(struct ath6kl *ar)
+{
+	struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+	struct bus_request *req, *tmp_req;
+	void *context;
+
+	/* FIXME: make sure that wq is not queued again */
+
+	cancel_work_sync(&ar_sdio->wr_async_work);
+
+	spin_lock_bh(&ar_sdio->wr_async_lock);
+
+	list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
+		list_del(&req->list);
+
+		if (req->scat_req) {
+			/* this is a scatter gather request */
+			req->scat_req->status = -ECANCELED;
+			req->scat_req->complete(ar_sdio->ar->htc_target,
+						req->scat_req);
+		} else {
+			context = req->packet;
+			ath6kl_sdio_free_bus_req(ar_sdio, req);
+			ath6kl_hif_rw_comp_handler(context, -ECANCELED);
+		}
+	}
+
+	spin_unlock_bh(&ar_sdio->wr_async_lock);
+
+	WARN_ON(get_queue_depth(&ar_sdio->scat_req) != 4);
+}
+
 static const struct ath6kl_hif_ops ath6kl_sdio_ops = {
 	.read_write_sync = ath6kl_sdio_read_write_sync,
 	.write_async = ath6kl_sdio_write_async,
@@ -763,8 +1179,47 @@
 	.scat_req_rw = ath6kl_sdio_async_rw_scatter,
 	.cleanup_scatter = ath6kl_sdio_cleanup_scatter,
 	.suspend = ath6kl_sdio_suspend,
+	.resume = ath6kl_sdio_resume,
+	.diag_read32 = ath6kl_sdio_diag_read32,
+	.diag_write32 = ath6kl_sdio_diag_write32,
+	.bmi_read = ath6kl_sdio_bmi_read,
+	.bmi_write = ath6kl_sdio_bmi_write,
+	.power_on = ath6kl_sdio_power_on,
+	.power_off = ath6kl_sdio_power_off,
+	.stop = ath6kl_sdio_stop,
 };
 
+#ifdef CONFIG_PM_SLEEP
+
+/*
+ * Empty handlers so that mmc subsystem doesn't remove us entirely during
+ * suspend. We instead follow cfg80211 suspend/resume handlers.
+ */
+static int ath6kl_sdio_pm_suspend(struct device *device)
+{
+	ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio pm suspend\n");
+
+	return 0;
+}
+
+static int ath6kl_sdio_pm_resume(struct device *device)
+{
+	ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio pm resume\n");
+
+	return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(ath6kl_sdio_pm_ops, ath6kl_sdio_pm_suspend,
+			 ath6kl_sdio_pm_resume);
+
+#define ATH6KL_SDIO_PM_OPS (&ath6kl_sdio_pm_ops)
+
+#else
+
+#define ATH6KL_SDIO_PM_OPS NULL
+
+#endif /* CONFIG_PM_SLEEP */
+
 static int ath6kl_sdio_probe(struct sdio_func *func,
 			     const struct sdio_device_id *id)
 {
@@ -773,8 +1228,8 @@
 	struct ath6kl *ar;
 	int count;
 
-	ath6kl_dbg(ATH6KL_DBG_SDIO,
-		   "new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n",
+	ath6kl_dbg(ATH6KL_DBG_BOOT,
+		   "sdio new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n",
 		   func->num, func->vendor, func->device,
 		   func->max_blksize, func->cur_blksize);
 
@@ -797,6 +1252,7 @@
 	spin_lock_init(&ar_sdio->lock);
 	spin_lock_init(&ar_sdio->scat_lock);
 	spin_lock_init(&ar_sdio->wr_async_lock);
+	mutex_init(&ar_sdio->dma_buffer_mutex);
 
 	INIT_LIST_HEAD(&ar_sdio->scat_req);
 	INIT_LIST_HEAD(&ar_sdio->bus_req_freeq);
@@ -815,62 +1271,29 @@
 	}
 
 	ar_sdio->ar = ar;
+	ar->hif_type = ATH6KL_HIF_TYPE_SDIO;
 	ar->hif_priv = ar_sdio;
 	ar->hif_ops = &ath6kl_sdio_ops;
+	ar->bmi.max_data_size = 256;
 
 	ath6kl_sdio_set_mbox_info(ar);
 
-	sdio_claim_host(func);
-
-	if ((ar_sdio->id->device & MANUFACTURER_ID_ATH6KL_BASE_MASK) >=
-	    MANUFACTURER_ID_AR6003_BASE) {
-		/* enable 4-bit ASYNC interrupt on AR6003 or later */
-		ret = ath6kl_sdio_func0_cmd52_wr_byte(func->card,
-						CCCR_SDIO_IRQ_MODE_REG,
-						SDIO_IRQ_MODE_ASYNC_4BIT_IRQ);
-		if (ret) {
-			ath6kl_err("Failed to enable 4-bit async irq mode %d\n",
-				   ret);
-			sdio_release_host(func);
-			goto err_cfg80211;
-		}
-
-		ath6kl_dbg(ATH6KL_DBG_SDIO, "4-bit async irq mode enabled\n");
-	}
-
-	/* give us some time to enable, in ms */
-	func->enable_timeout = 100;
-
-	sdio_release_host(func);
-
-	ret = ath6kl_sdio_power_on(ar_sdio);
-	if (ret)
-		goto err_cfg80211;
-
-	sdio_claim_host(func);
-
-	ret = sdio_set_block_size(func, HIF_MBOX_BLOCK_SIZE);
+	ret = ath6kl_sdio_config(ar);
 	if (ret) {
-		ath6kl_err("Set sdio block size %d failed: %d)\n",
-			   HIF_MBOX_BLOCK_SIZE, ret);
-		sdio_release_host(func);
-		goto err_off;
+		ath6kl_err("Failed to config sdio: %d\n", ret);
+		goto err_core_alloc;
 	}
 
-	sdio_release_host(func);
-
 	ret = ath6kl_core_init(ar);
 	if (ret) {
 		ath6kl_err("Failed to init ath6kl core\n");
-		goto err_off;
+		goto err_core_alloc;
 	}
 
 	return ret;
 
-err_off:
-	ath6kl_sdio_power_off(ar_sdio);
-err_cfg80211:
-	ath6kl_cfg80211_deinit(ar_sdio->ar);
+err_core_alloc:
+	ath6kl_core_free(ar_sdio->ar);
 err_dma:
 	kfree(ar_sdio->dma_buffer);
 err_hif:
@@ -883,8 +1306,8 @@
 {
 	struct ath6kl_sdio *ar_sdio;
 
-	ath6kl_dbg(ATH6KL_DBG_SDIO,
-		   "removed func %d vendor 0x%x device 0x%x\n",
+	ath6kl_dbg(ATH6KL_DBG_BOOT,
+		   "sdio removed func %d vendor 0x%x device 0x%x\n",
 		   func->num, func->vendor, func->device);
 
 	ar_sdio = sdio_get_drvdata(func);
@@ -892,9 +1315,7 @@
 	ath6kl_stop_txrx(ar_sdio->ar);
 	cancel_work_sync(&ar_sdio->wr_async_work);
 
-	ath6kl_unavail_ev(ar_sdio->ar);
-
-	ath6kl_sdio_power_off(ar_sdio);
+	ath6kl_core_cleanup(ar_sdio->ar);
 
 	kfree(ar_sdio->dma_buffer);
 	kfree(ar_sdio);
@@ -903,16 +1324,19 @@
 static const struct sdio_device_id ath6kl_sdio_devices[] = {
 	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x0))},
 	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x1))},
+	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x0))},
+	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x1))},
 	{},
 };
 
 MODULE_DEVICE_TABLE(sdio, ath6kl_sdio_devices);
 
 static struct sdio_driver ath6kl_sdio_driver = {
-	.name = "ath6kl_sdio",
+	.name = "ath6kl",
 	.id_table = ath6kl_sdio_devices,
 	.probe = ath6kl_sdio_probe,
 	.remove = ath6kl_sdio_remove,
+	.drv.pm = ATH6KL_SDIO_PM_OPS,
 };
 
 static int __init ath6kl_sdio_init(void)
@@ -938,13 +1362,19 @@
 MODULE_DESCRIPTION("Driver support for Atheros AR600x SDIO devices");
 MODULE_LICENSE("Dual BSD/GPL");
 
-MODULE_FIRMWARE(AR6003_REV2_OTP_FILE);
-MODULE_FIRMWARE(AR6003_REV2_FIRMWARE_FILE);
-MODULE_FIRMWARE(AR6003_REV2_PATCH_FILE);
-MODULE_FIRMWARE(AR6003_REV2_BOARD_DATA_FILE);
-MODULE_FIRMWARE(AR6003_REV2_DEFAULT_BOARD_DATA_FILE);
-MODULE_FIRMWARE(AR6003_REV3_OTP_FILE);
-MODULE_FIRMWARE(AR6003_REV3_FIRMWARE_FILE);
-MODULE_FIRMWARE(AR6003_REV3_PATCH_FILE);
-MODULE_FIRMWARE(AR6003_REV3_BOARD_DATA_FILE);
-MODULE_FIRMWARE(AR6003_REV3_DEFAULT_BOARD_DATA_FILE);
+MODULE_FIRMWARE(AR6003_HW_2_0_OTP_FILE);
+MODULE_FIRMWARE(AR6003_HW_2_0_FIRMWARE_FILE);
+MODULE_FIRMWARE(AR6003_HW_2_0_PATCH_FILE);
+MODULE_FIRMWARE(AR6003_HW_2_0_BOARD_DATA_FILE);
+MODULE_FIRMWARE(AR6003_HW_2_0_DEFAULT_BOARD_DATA_FILE);
+MODULE_FIRMWARE(AR6003_HW_2_1_1_OTP_FILE);
+MODULE_FIRMWARE(AR6003_HW_2_1_1_FIRMWARE_FILE);
+MODULE_FIRMWARE(AR6003_HW_2_1_1_PATCH_FILE);
+MODULE_FIRMWARE(AR6003_HW_2_1_1_BOARD_DATA_FILE);
+MODULE_FIRMWARE(AR6003_HW_2_1_1_DEFAULT_BOARD_DATA_FILE);
+MODULE_FIRMWARE(AR6004_HW_1_0_FIRMWARE_FILE);
+MODULE_FIRMWARE(AR6004_HW_1_0_BOARD_DATA_FILE);
+MODULE_FIRMWARE(AR6004_HW_1_0_DEFAULT_BOARD_DATA_FILE);
+MODULE_FIRMWARE(AR6004_HW_1_1_FIRMWARE_FILE);
+MODULE_FIRMWARE(AR6004_HW_1_1_BOARD_DATA_FILE);
+MODULE_FIRMWARE(AR6004_HW_1_1_DEFAULT_BOARD_DATA_FILE);
diff --git a/drivers/net/wireless/ath/ath6kl/target.h b/drivers/net/wireless/ath/ath6kl/target.h
index c9a7605..108a723 100644
--- a/drivers/net/wireless/ath/ath6kl/target.h
+++ b/drivers/net/wireless/ath/ath6kl/target.h
@@ -20,7 +20,7 @@
 #define AR6003_BOARD_DATA_SZ		1024
 #define AR6003_BOARD_EXT_DATA_SZ	768
 
-#define AR6004_BOARD_DATA_SZ     7168
+#define AR6004_BOARD_DATA_SZ     6144
 #define AR6004_BOARD_EXT_DATA_SZ 0
 
 #define RESET_CONTROL_ADDRESS		0x00000000
@@ -320,7 +320,10 @@
 |   (2)   |   (2)   |   (2)   |   (2)   |   (2)   |   (2)   |   (2)   |   (2)
 |------------------------------------------------------------------------------|
 */
+#define HI_OPTION_FW_MODE_BITS	       0x2
 #define HI_OPTION_FW_MODE_SHIFT        0xC
+
+#define HI_OPTION_FW_SUBMODE_BITS      0x2
 #define HI_OPTION_FW_SUBMODE_SHIFT     0x14
 
 /* Convert a Target virtual address into a Target physical address */
@@ -331,20 +334,6 @@
 	(((target_type) == TARGET_TYPE_AR6003) ? AR6003_VTOP(vaddr) : \
 	(((target_type) == TARGET_TYPE_AR6004) ? AR6004_VTOP(vaddr) : 0))
 
-#define AR6003_REV2_APP_LOAD_ADDRESS            0x543180
-#define AR6003_REV2_BOARD_EXT_DATA_ADDRESS      0x57E500
-#define AR6003_REV2_DATASET_PATCH_ADDRESS       0x57e884
-#define AR6003_REV2_RAM_RESERVE_SIZE            6912
-
-#define AR6003_REV3_APP_LOAD_ADDRESS            0x545000
-#define AR6003_REV3_BOARD_EXT_DATA_ADDRESS      0x542330
-#define AR6003_REV3_DATASET_PATCH_ADDRESS       0x57FF74
-#define AR6003_REV3_RAM_RESERVE_SIZE            512
-
-#define AR6004_REV1_BOARD_DATA_ADDRESS          0x435400
-#define AR6004_REV1_BOARD_EXT_DATA_ADDRESS      0x437000
-#define AR6004_REV1_RAM_RESERVE_SIZE            11264
-
 #define ATH6KL_FWLOG_PAYLOAD_SIZE		1500
 
 struct ath6kl_dbglog_buf {
diff --git a/drivers/net/wireless/ath/ath6kl/txrx.c b/drivers/net/wireless/ath/ath6kl/txrx.c
index a711707..506a303 100644
--- a/drivers/net/wireless/ath/ath6kl/txrx.c
+++ b/drivers/net/wireless/ath/ath6kl/txrx.c
@@ -77,12 +77,13 @@
 	return ar->node_map[ep_map].ep_id;
 }
 
-static bool ath6kl_powersave_ap(struct ath6kl *ar, struct sk_buff *skb,
+static bool ath6kl_powersave_ap(struct ath6kl_vif *vif, struct sk_buff *skb,
 				bool *more_data)
 {
 	struct ethhdr *datap = (struct ethhdr *) skb->data;
 	struct ath6kl_sta *conn = NULL;
 	bool ps_queued = false, is_psq_empty = false;
+	struct ath6kl *ar = vif->ar;
 
 	if (is_multicast_ether_addr(datap->h_dest)) {
 		u8 ctr = 0;
@@ -100,7 +101,7 @@
 			 * If this transmit is not because of a Dtim Expiry
 			 * q it.
 			 */
-			if (!test_bit(DTIM_EXPIRED, &ar->flag)) {
+			if (!test_bit(DTIM_EXPIRED, &vif->flags)) {
 				bool is_mcastq_empty = false;
 
 				spin_lock_bh(&ar->mcastpsq_lock);
@@ -116,6 +117,7 @@
 				 */
 				if (is_mcastq_empty)
 					ath6kl_wmi_set_pvb_cmd(ar->wmi,
+							       vif->fw_vif_idx,
 							       MCAST_AID, 1);
 
 				ps_queued = true;
@@ -131,7 +133,7 @@
 			}
 		}
 	} else {
-		conn = ath6kl_find_sta(ar, datap->h_dest);
+		conn = ath6kl_find_sta(vif, datap->h_dest);
 		if (!conn) {
 			dev_kfree_skb(skb);
 
@@ -154,6 +156,7 @@
 				 */
 				if (is_psq_empty)
 					ath6kl_wmi_set_pvb_cmd(ar->wmi,
+							       vif->fw_vif_idx,
 							       conn->aid, 1);
 
 				ps_queued = true;
@@ -235,6 +238,7 @@
 	struct ath6kl *ar = ath6kl_priv(dev);
 	struct ath6kl_cookie *cookie = NULL;
 	enum htc_endpoint_id eid = ENDPOINT_UNUSED;
+	struct ath6kl_vif *vif = netdev_priv(dev);
 	u32 map_no = 0;
 	u16 htc_tag = ATH6KL_DATA_PKT_TAG;
 	u8 ac = 99 ; /* initialize to unmapped ac */
@@ -246,7 +250,7 @@
 		   skb, skb->data, skb->len);
 
 	/* If target is not associated */
-	if (!test_bit(CONNECTED, &ar->flag)) {
+	if (!test_bit(CONNECTED, &vif->flags)) {
 		dev_kfree_skb(skb);
 		return 0;
 	}
@@ -255,15 +259,21 @@
 		goto fail_tx;
 
 	/* AP mode Power saving processing */
-	if (ar->nw_type == AP_NETWORK) {
-		if (ath6kl_powersave_ap(ar, skb, &more_data))
+	if (vif->nw_type == AP_NETWORK) {
+		if (ath6kl_powersave_ap(vif, skb, &more_data))
 			return 0;
 	}
 
 	if (test_bit(WMI_ENABLED, &ar->flag)) {
 		if (skb_headroom(skb) < dev->needed_headroom) {
-			WARN_ON(1);
-			goto fail_tx;
+			struct sk_buff *tmp_skb = skb;
+
+			skb = skb_realloc_headroom(skb, dev->needed_headroom);
+			kfree_skb(tmp_skb);
+			if (skb == NULL) {
+				vif->net_stats.tx_dropped++;
+				return 0;
+			}
 		}
 
 		if (ath6kl_wmi_dix_2_dot3(ar->wmi, skb)) {
@@ -272,18 +282,20 @@
 		}
 
 		if (ath6kl_wmi_data_hdr_add(ar->wmi, skb, DATA_MSGTYPE,
-					    more_data, 0, 0, NULL)) {
+					    more_data, 0, 0, NULL,
+					    vif->fw_vif_idx)) {
 			ath6kl_err("wmi_data_hdr_add failed\n");
 			goto fail_tx;
 		}
 
-		if ((ar->nw_type == ADHOC_NETWORK) &&
-		     ar->ibss_ps_enable && test_bit(CONNECTED, &ar->flag))
+		if ((vif->nw_type == ADHOC_NETWORK) &&
+		     ar->ibss_ps_enable && test_bit(CONNECTED, &vif->flags))
 			chk_adhoc_ps_mapping = true;
 		else {
 			/* get the stream mapping */
-			ret = ath6kl_wmi_implicit_create_pstream(ar->wmi, skb,
-				    0, test_bit(WMM_ENABLED, &ar->flag), &ac);
+			ret = ath6kl_wmi_implicit_create_pstream(ar->wmi,
+				    vif->fw_vif_idx, skb,
+				    0, test_bit(WMM_ENABLED, &vif->flags), &ac);
 			if (ret)
 				goto fail_tx;
 		}
@@ -354,8 +366,8 @@
 fail_tx:
 	dev_kfree_skb(skb);
 
-	ar->net_stats.tx_dropped++;
-	ar->net_stats.tx_aborted_errors++;
+	vif->net_stats.tx_dropped++;
+	vif->net_stats.tx_aborted_errors++;
 
 	return 0;
 }
@@ -426,7 +438,9 @@
 					       struct htc_packet *packet)
 {
 	struct ath6kl *ar = target->dev->ar;
+	struct ath6kl_vif *vif;
 	enum htc_endpoint_id endpoint = packet->endpoint;
+	enum htc_send_full_action action = HTC_SEND_FULL_KEEP;
 
 	if (endpoint == ar->ctrl_ep) {
 		/*
@@ -439,19 +453,11 @@
 		set_bit(WMI_CTRL_EP_FULL, &ar->flag);
 		spin_unlock_bh(&ar->lock);
 		ath6kl_err("wmi ctrl ep is full\n");
-		return HTC_SEND_FULL_KEEP;
+		return action;
 	}
 
 	if (packet->info.tx.tag == ATH6KL_CONTROL_PKT_TAG)
-		return HTC_SEND_FULL_KEEP;
-
-	if (ar->nw_type == ADHOC_NETWORK)
-		/*
-		 * In adhoc mode, we cannot differentiate traffic
-		 * priorities so there is no need to continue, however we
-		 * should stop the network.
-		 */
-		goto stop_net_queues;
+		return action;
 
 	/*
 	 * The last MAX_HI_COOKIE_NUM "batch" of cookies are reserved for
@@ -464,24 +470,36 @@
 		 * Give preference to the highest priority stream by
 		 * dropping the packets which overflowed.
 		 */
-		return HTC_SEND_FULL_DROP;
+		action = HTC_SEND_FULL_DROP;
 
-stop_net_queues:
-	spin_lock_bh(&ar->lock);
-	set_bit(NETQ_STOPPED, &ar->flag);
-	spin_unlock_bh(&ar->lock);
-	netif_stop_queue(ar->net_dev);
+	/* FIXME: Locking */
+	spin_lock_bh(&ar->list_lock);
+	list_for_each_entry(vif, &ar->vif_list, list) {
+		if (vif->nw_type == ADHOC_NETWORK ||
+		    action != HTC_SEND_FULL_DROP) {
+			spin_unlock_bh(&ar->list_lock);
 
-	return HTC_SEND_FULL_KEEP;
+			spin_lock_bh(&vif->if_lock);
+			set_bit(NETQ_STOPPED, &vif->flags);
+			spin_unlock_bh(&vif->if_lock);
+			netif_stop_queue(vif->ndev);
+
+			return action;
+		}
+	}
+	spin_unlock_bh(&ar->list_lock);
+
+	return action;
 }
 
 /* TODO this needs to be looked at */
-static void ath6kl_tx_clear_node_map(struct ath6kl *ar,
+static void ath6kl_tx_clear_node_map(struct ath6kl_vif *vif,
 				     enum htc_endpoint_id eid, u32 map_no)
 {
+	struct ath6kl *ar = vif->ar;
 	u32 i;
 
-	if (ar->nw_type != ADHOC_NETWORK)
+	if (vif->nw_type != ADHOC_NETWORK)
 		return;
 
 	if (!ar->ibss_ps_enable)
@@ -523,7 +541,9 @@
 	int status;
 	enum htc_endpoint_id eid;
 	bool wake_event = false;
-	bool flushing = false;
+	bool flushing[ATH6KL_VIF_MAX] = {false};
+	u8 if_idx;
+	struct ath6kl_vif *vif;
 
 	skb_queue_head_init(&skb_queue);
 
@@ -549,8 +569,6 @@
 		if (!skb || !skb->data)
 			goto fatal;
 
-		packet->buf = skb->data;
-
 		__skb_queue_tail(&skb_queue, skb);
 
 		if (!status && (packet->act_len != skb->len))
@@ -569,15 +587,30 @@
 				wake_event = true;
 		}
 
+		if (eid == ar->ctrl_ep) {
+			if_idx = wmi_cmd_hdr_get_if_idx(
+				(struct wmi_cmd_hdr *) packet->buf);
+		} else {
+			if_idx = wmi_data_hdr_get_if_idx(
+				(struct wmi_data_hdr *) packet->buf);
+		}
+
+		vif = ath6kl_get_vif_by_index(ar, if_idx);
+		if (!vif) {
+			ath6kl_free_cookie(ar, ath6kl_cookie);
+			continue;
+		}
+
 		if (status) {
 			if (status == -ECANCELED)
 				/* a packet was flushed  */
-				flushing = true;
+				flushing[if_idx] = true;
 
-			ar->net_stats.tx_errors++;
+			vif->net_stats.tx_errors++;
 
-			if (status != -ENOSPC)
-				ath6kl_err("tx error, status: 0x%x\n", status);
+			if (status != -ENOSPC && status != -ECANCELED)
+				ath6kl_warn("tx complete error: %d\n", status);
+
 			ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
 				   "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n",
 				   __func__, skb, packet->buf, packet->act_len,
@@ -588,27 +621,34 @@
 				   __func__, skb, packet->buf, packet->act_len,
 				   eid, "OK");
 
-			flushing = false;
-			ar->net_stats.tx_packets++;
-			ar->net_stats.tx_bytes += skb->len;
+			flushing[if_idx] = false;
+			vif->net_stats.tx_packets++;
+			vif->net_stats.tx_bytes += skb->len;
 		}
 
-		ath6kl_tx_clear_node_map(ar, eid, map_no);
+		ath6kl_tx_clear_node_map(vif, eid, map_no);
 
 		ath6kl_free_cookie(ar, ath6kl_cookie);
 
-		if (test_bit(NETQ_STOPPED, &ar->flag))
-			clear_bit(NETQ_STOPPED, &ar->flag);
+		if (test_bit(NETQ_STOPPED, &vif->flags))
+			clear_bit(NETQ_STOPPED, &vif->flags);
 	}
 
 	spin_unlock_bh(&ar->lock);
 
 	__skb_queue_purge(&skb_queue);
 
-	if (test_bit(CONNECTED, &ar->flag)) {
-		if (!flushing)
-			netif_wake_queue(ar->net_dev);
+	/* FIXME: Locking */
+	spin_lock_bh(&ar->list_lock);
+	list_for_each_entry(vif, &ar->vif_list, list) {
+		if (test_bit(CONNECTED, &vif->flags) &&
+		    !flushing[vif->fw_vif_idx]) {
+			spin_unlock_bh(&ar->list_lock);
+			netif_wake_queue(vif->ndev);
+			spin_lock_bh(&ar->list_lock);
+		}
 	}
+	spin_unlock_bh(&ar->list_lock);
 
 	if (wake_event)
 		wake_up(&ar->event_wq);
@@ -1041,8 +1081,9 @@
 	struct ath6kl_sta *conn = NULL;
 	struct sk_buff *skb1 = NULL;
 	struct ethhdr *datap = NULL;
+	struct ath6kl_vif *vif;
 	u16 seq_no, offset;
-	u8 tid;
+	u8 tid, if_idx;
 
 	ath6kl_dbg(ATH6KL_DBG_WLAN_RX,
 		   "%s: ar=0x%p eid=%d, skb=0x%p, data=0x%p, len=0x%x status:%d",
@@ -1050,7 +1091,23 @@
 		   packet->act_len, status);
 
 	if (status || !(skb->data + HTC_HDR_LENGTH)) {
-		ar->net_stats.rx_errors++;
+		dev_kfree_skb(skb);
+		return;
+	}
+
+	skb_put(skb, packet->act_len + HTC_HDR_LENGTH);
+	skb_pull(skb, HTC_HDR_LENGTH);
+
+	if (ept == ar->ctrl_ep) {
+		if_idx =
+		wmi_cmd_hdr_get_if_idx((struct wmi_cmd_hdr *) skb->data);
+	} else {
+		if_idx =
+		wmi_data_hdr_get_if_idx((struct wmi_data_hdr *) skb->data);
+	}
+
+	vif = ath6kl_get_vif_by_index(ar, if_idx);
+	if (!vif) {
 		dev_kfree_skb(skb);
 		return;
 	}
@@ -1059,28 +1116,28 @@
 	 * Take lock to protect buffer counts and adaptive power throughput
 	 * state.
 	 */
-	spin_lock_bh(&ar->lock);
+	spin_lock_bh(&vif->if_lock);
 
-	ar->net_stats.rx_packets++;
-	ar->net_stats.rx_bytes += packet->act_len;
+	vif->net_stats.rx_packets++;
+	vif->net_stats.rx_bytes += packet->act_len;
 
-	spin_unlock_bh(&ar->lock);
+	spin_unlock_bh(&vif->if_lock);
 
-	skb_put(skb, packet->act_len + HTC_HDR_LENGTH);
-	skb_pull(skb, HTC_HDR_LENGTH);
 
 	ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "rx ",
 			skb->data, skb->len);
 
-	skb->dev = ar->net_dev;
+	skb->dev = vif->ndev;
 
 	if (!test_bit(WMI_ENABLED, &ar->flag)) {
 		if (EPPING_ALIGNMENT_PAD > 0)
 			skb_pull(skb, EPPING_ALIGNMENT_PAD);
-		ath6kl_deliver_frames_to_nw_stack(ar->net_dev, skb);
+		ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
 		return;
 	}
 
+	ath6kl_check_wow_status(ar);
+
 	if (ept == ar->ctrl_ep) {
 		ath6kl_wmi_control_rx(ar->wmi, skb);
 		return;
@@ -1096,18 +1153,18 @@
 	 * that do not have LLC hdr. They are 16 bytes in size.
 	 * Allow these frames in the AP mode.
 	 */
-	if (ar->nw_type != AP_NETWORK &&
+	if (vif->nw_type != AP_NETWORK &&
 	    ((packet->act_len < min_hdr_len) ||
 	     (packet->act_len > WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH))) {
 		ath6kl_info("frame len is too short or too long\n");
-		ar->net_stats.rx_errors++;
-		ar->net_stats.rx_length_errors++;
+		vif->net_stats.rx_errors++;
+		vif->net_stats.rx_length_errors++;
 		dev_kfree_skb(skb);
 		return;
 	}
 
 	/* Get the Power save state of the STA */
-	if (ar->nw_type == AP_NETWORK) {
+	if (vif->nw_type == AP_NETWORK) {
 		meta_type = wmi_data_hdr_get_meta(dhdr);
 
 		ps_state = !!((dhdr->info >> WMI_DATA_HDR_PS_SHIFT) &
@@ -1129,7 +1186,7 @@
 		}
 
 		datap = (struct ethhdr *) (skb->data + offset);
-		conn = ath6kl_find_sta(ar, datap->h_source);
+		conn = ath6kl_find_sta(vif, datap->h_source);
 
 		if (!conn) {
 			dev_kfree_skb(skb);
@@ -1160,12 +1217,13 @@
 				while ((skbuff = skb_dequeue(&conn->psq))
 				       != NULL) {
 					spin_unlock_bh(&conn->psq_lock);
-					ath6kl_data_tx(skbuff, ar->net_dev);
+					ath6kl_data_tx(skbuff, vif->ndev);
 					spin_lock_bh(&conn->psq_lock);
 				}
 				spin_unlock_bh(&conn->psq_lock);
 				/* Clear the PVB for this STA */
-				ath6kl_wmi_set_pvb_cmd(ar->wmi, conn->aid, 0);
+				ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx,
+						       conn->aid, 0);
 			}
 		}
 
@@ -1215,12 +1273,12 @@
 		return;
 	}
 
-	if (!(ar->net_dev->flags & IFF_UP)) {
+	if (!(vif->ndev->flags & IFF_UP)) {
 		dev_kfree_skb(skb);
 		return;
 	}
 
-	if (ar->nw_type == AP_NETWORK) {
+	if (vif->nw_type == AP_NETWORK) {
 		datap = (struct ethhdr *) skb->data;
 		if (is_multicast_ether_addr(datap->h_dest))
 			/*
@@ -1235,8 +1293,7 @@
 			 * frame to it on the air else send the
 			 * frame up the stack.
 			 */
-			struct ath6kl_sta *conn = NULL;
-			conn = ath6kl_find_sta(ar, datap->h_dest);
+			conn = ath6kl_find_sta(vif, datap->h_dest);
 
 			if (conn && ar->intra_bss) {
 				skb1 = skb;
@@ -1247,18 +1304,23 @@
 			}
 		}
 		if (skb1)
-			ath6kl_data_tx(skb1, ar->net_dev);
+			ath6kl_data_tx(skb1, vif->ndev);
+
+		if (skb == NULL) {
+			/* nothing to deliver up the stack */
+			return;
+		}
 	}
 
 	datap = (struct ethhdr *) skb->data;
 
 	if (is_unicast_ether_addr(datap->h_dest) &&
-	    aggr_process_recv_frm(ar->aggr_cntxt, tid, seq_no,
+	    aggr_process_recv_frm(vif->aggr_cntxt, tid, seq_no,
 				  is_amsdu, skb))
 		/* aggregation code will handle the skb */
 		return;
 
-	ath6kl_deliver_frames_to_nw_stack(ar->net_dev, skb);
+	ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
 }
 
 static void aggr_timeout(unsigned long arg)
@@ -1336,9 +1398,10 @@
 	memset(stats, 0, sizeof(struct rxtid_stats));
 }
 
-void aggr_recv_addba_req_evt(struct ath6kl *ar, u8 tid, u16 seq_no, u8 win_sz)
+void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid, u16 seq_no,
+			     u8 win_sz)
 {
-	struct aggr_info *p_aggr = ar->aggr_cntxt;
+	struct aggr_info *p_aggr = vif->aggr_cntxt;
 	struct rxtid *rxtid;
 	struct rxtid_stats *stats;
 	u16 hold_q_size;
@@ -1405,9 +1468,9 @@
 	return p_aggr;
 }
 
-void aggr_recv_delba_req_evt(struct ath6kl *ar, u8 tid)
+void aggr_recv_delba_req_evt(struct ath6kl_vif *vif, u8 tid)
 {
-	struct aggr_info *p_aggr = ar->aggr_cntxt;
+	struct aggr_info *p_aggr = vif->aggr_cntxt;
 	struct rxtid *rxtid;
 
 	if (!p_aggr)
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index a7de23c..f6f2aa2 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -21,7 +21,7 @@
 #include "../regd.h"
 #include "../regd_common.h"
 
-static int ath6kl_wmi_sync_point(struct wmi *wmi);
+static int ath6kl_wmi_sync_point(struct wmi *wmi, u8 if_idx);
 
 static const s32 wmi_rate_tbl[][2] = {
 	/* {W/O SGI, with SGI} */
@@ -81,6 +81,26 @@
 	return wmi->ep_id;
 }
 
+struct ath6kl_vif *ath6kl_get_vif_by_index(struct ath6kl *ar, u8 if_idx)
+{
+	struct ath6kl_vif *vif, *found = NULL;
+
+	if (WARN_ON(if_idx > (ar->vif_max - 1)))
+		return NULL;
+
+	/* FIXME: Locking */
+	spin_lock_bh(&ar->list_lock);
+	list_for_each_entry(vif, &ar->vif_list, list) {
+		if (vif->fw_vif_idx == if_idx) {
+			found = vif;
+			break;
+		}
+	}
+	spin_unlock_bh(&ar->list_lock);
+
+	return found;
+}
+
 /*  Performs DIX to 802.3 encapsulation for transmit packets.
  *  Assumes the entire DIX header is contigous and that there is
  *  enough room in the buffer for a 802.3 mac header and LLC+SNAP headers.
@@ -162,12 +182,12 @@
 int ath6kl_wmi_data_hdr_add(struct wmi *wmi, struct sk_buff *skb,
 			    u8 msg_type, bool more_data,
 			    enum wmi_data_hdr_data_type data_type,
-			    u8 meta_ver, void *tx_meta_info)
+			    u8 meta_ver, void *tx_meta_info, u8 if_idx)
 {
 	struct wmi_data_hdr *data_hdr;
 	int ret;
 
-	if (WARN_ON(skb == NULL))
+	if (WARN_ON(skb == NULL || (if_idx > wmi->parent_dev->vif_max - 1)))
 		return -EINVAL;
 
 	if (tx_meta_info) {
@@ -189,7 +209,7 @@
 		    WMI_DATA_HDR_MORE_MASK << WMI_DATA_HDR_MORE_SHIFT;
 
 	data_hdr->info2 = cpu_to_le16(meta_ver << WMI_DATA_HDR_META_SHIFT);
-	data_hdr->info3 = 0;
+	data_hdr->info3 = cpu_to_le16(if_idx & WMI_DATA_HDR_IF_IDX_MASK);
 
 	return 0;
 }
@@ -216,7 +236,8 @@
 		return ip_pri;
 }
 
-int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, struct sk_buff *skb,
+int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, u8 if_idx,
+				       struct sk_buff *skb,
 				       u32 layer2_priority, bool wmm_enabled,
 				       u8 *ac)
 {
@@ -262,7 +283,12 @@
 			usr_pri = layer2_priority & 0x7;
 	}
 
-	/* workaround for WMM S5 */
+	/*
+	 * workaround for WMM S5
+	 *
+	 * FIXME: wmi->traffic_class is always 100 so this test doesn't
+	 * make sense
+	 */
 	if ((wmi->traffic_class == WMM_AC_VI) &&
 	    ((usr_pri == 5) || (usr_pri == 4)))
 		usr_pri = 1;
@@ -284,7 +310,7 @@
 			cpu_to_le32(WMI_IMPLICIT_PSTREAM_INACTIVITY_INT);
 		/* Implicit streams are created with TSID 0xFF */
 		cmd.tsid = WMI_IMPLICIT_PSTREAM;
-		ath6kl_wmi_create_pstream_cmd(wmi, &cmd);
+		ath6kl_wmi_create_pstream_cmd(wmi, if_idx, &cmd);
 	}
 
 	*ac = traffic_class;
@@ -410,13 +436,14 @@
 }
 
 static int ath6kl_wmi_remain_on_chnl_event_rx(struct wmi *wmi, u8 *datap,
-					      int len)
+					      int len, struct ath6kl_vif *vif)
 {
 	struct wmi_remain_on_chnl_event *ev;
 	u32 freq;
 	u32 dur;
 	struct ieee80211_channel *chan;
 	struct ath6kl *ar = wmi->parent_dev;
+	u32 id;
 
 	if (len < sizeof(*ev))
 		return -EINVAL;
@@ -426,26 +453,29 @@
 	dur = le32_to_cpu(ev->duration);
 	ath6kl_dbg(ATH6KL_DBG_WMI, "remain_on_chnl: freq=%u dur=%u\n",
 		   freq, dur);
-	chan = ieee80211_get_channel(ar->wdev->wiphy, freq);
+	chan = ieee80211_get_channel(ar->wiphy, freq);
 	if (!chan) {
 		ath6kl_dbg(ATH6KL_DBG_WMI, "remain_on_chnl: Unknown channel "
 			   "(freq=%u)\n", freq);
 		return -EINVAL;
 	}
-	cfg80211_ready_on_channel(ar->net_dev, 1, chan, NL80211_CHAN_NO_HT,
+	id = vif->last_roc_id;
+	cfg80211_ready_on_channel(vif->ndev, id, chan, NL80211_CHAN_NO_HT,
 				  dur, GFP_ATOMIC);
 
 	return 0;
 }
 
 static int ath6kl_wmi_cancel_remain_on_chnl_event_rx(struct wmi *wmi,
-						     u8 *datap, int len)
+						     u8 *datap, int len,
+						     struct ath6kl_vif *vif)
 {
 	struct wmi_cancel_remain_on_chnl_event *ev;
 	u32 freq;
 	u32 dur;
 	struct ieee80211_channel *chan;
 	struct ath6kl *ar = wmi->parent_dev;
+	u32 id;
 
 	if (len < sizeof(*ev))
 		return -EINVAL;
@@ -455,23 +485,29 @@
 	dur = le32_to_cpu(ev->duration);
 	ath6kl_dbg(ATH6KL_DBG_WMI, "cancel_remain_on_chnl: freq=%u dur=%u "
 		   "status=%u\n", freq, dur, ev->status);
-	chan = ieee80211_get_channel(ar->wdev->wiphy, freq);
+	chan = ieee80211_get_channel(ar->wiphy, freq);
 	if (!chan) {
 		ath6kl_dbg(ATH6KL_DBG_WMI, "cancel_remain_on_chnl: Unknown "
 			   "channel (freq=%u)\n", freq);
 		return -EINVAL;
 	}
-	cfg80211_remain_on_channel_expired(ar->net_dev, 1, chan,
+	if (vif->last_cancel_roc_id &&
+	    vif->last_cancel_roc_id + 1 == vif->last_roc_id)
+		id = vif->last_cancel_roc_id; /* event for cancel command */
+	else
+		id = vif->last_roc_id; /* timeout on uncanceled r-o-c */
+	vif->last_cancel_roc_id = 0;
+	cfg80211_remain_on_channel_expired(vif->ndev, id, chan,
 					   NL80211_CHAN_NO_HT, GFP_ATOMIC);
 
 	return 0;
 }
 
-static int ath6kl_wmi_tx_status_event_rx(struct wmi *wmi, u8 *datap, int len)
+static int ath6kl_wmi_tx_status_event_rx(struct wmi *wmi, u8 *datap, int len,
+					 struct ath6kl_vif *vif)
 {
 	struct wmi_tx_status_event *ev;
 	u32 id;
-	struct ath6kl *ar = wmi->parent_dev;
 
 	if (len < sizeof(*ev))
 		return -EINVAL;
@@ -481,7 +517,7 @@
 	ath6kl_dbg(ATH6KL_DBG_WMI, "tx_status: id=%x ack_status=%u\n",
 		   id, ev->ack_status);
 	if (wmi->last_mgmt_tx_frame) {
-		cfg80211_mgmt_tx_status(ar->net_dev, id,
+		cfg80211_mgmt_tx_status(vif->ndev, id,
 					wmi->last_mgmt_tx_frame,
 					wmi->last_mgmt_tx_frame_len,
 					!!ev->ack_status, GFP_ATOMIC);
@@ -493,12 +529,12 @@
 	return 0;
 }
 
-static int ath6kl_wmi_rx_probe_req_event_rx(struct wmi *wmi, u8 *datap, int len)
+static int ath6kl_wmi_rx_probe_req_event_rx(struct wmi *wmi, u8 *datap, int len,
+					    struct ath6kl_vif *vif)
 {
 	struct wmi_p2p_rx_probe_req_event *ev;
 	u32 freq;
 	u16 dlen;
-	struct ath6kl *ar = wmi->parent_dev;
 
 	if (len < sizeof(*ev))
 		return -EINVAL;
@@ -513,10 +549,10 @@
 	}
 	ath6kl_dbg(ATH6KL_DBG_WMI, "rx_probe_req: len=%u freq=%u "
 		   "probe_req_report=%d\n",
-		   dlen, freq, ar->probe_req_report);
+		   dlen, freq, vif->probe_req_report);
 
-	if (ar->probe_req_report || ar->nw_type == AP_NETWORK)
-		cfg80211_rx_mgmt(ar->net_dev, freq, ev->data, dlen, GFP_ATOMIC);
+	if (vif->probe_req_report || vif->nw_type == AP_NETWORK)
+		cfg80211_rx_mgmt(vif->ndev, freq, ev->data, dlen, GFP_ATOMIC);
 
 	return 0;
 }
@@ -536,12 +572,12 @@
 	return 0;
 }
 
-static int ath6kl_wmi_rx_action_event_rx(struct wmi *wmi, u8 *datap, int len)
+static int ath6kl_wmi_rx_action_event_rx(struct wmi *wmi, u8 *datap, int len,
+					 struct ath6kl_vif *vif)
 {
 	struct wmi_rx_action_event *ev;
 	u32 freq;
 	u16 dlen;
-	struct ath6kl *ar = wmi->parent_dev;
 
 	if (len < sizeof(*ev))
 		return -EINVAL;
@@ -555,7 +591,7 @@
 		return -EINVAL;
 	}
 	ath6kl_dbg(ATH6KL_DBG_WMI, "rx_action: len=%u freq=%u\n", dlen, freq);
-	cfg80211_rx_mgmt(ar->net_dev, freq, ev->data, dlen, GFP_ATOMIC);
+	cfg80211_rx_mgmt(vif->ndev, freq, ev->data, dlen, GFP_ATOMIC);
 
 	return 0;
 }
@@ -620,7 +656,8 @@
 }
 
 /* Send a "simple" wmi command -- one with no arguments */
-static int ath6kl_wmi_simple_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id)
+static int ath6kl_wmi_simple_cmd(struct wmi *wmi, u8 if_idx,
+				 enum wmi_cmd_id cmd_id)
 {
 	struct sk_buff *skb;
 	int ret;
@@ -629,7 +666,7 @@
 	if (!skb)
 		return -ENOMEM;
 
-	ret = ath6kl_wmi_cmd_send(wmi, skb, cmd_id, NO_SYNC_WMIFLAG);
+	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, cmd_id, NO_SYNC_WMIFLAG);
 
 	return ret;
 }
@@ -641,7 +678,6 @@
 	if (len < sizeof(struct wmi_ready_event_2))
 		return -EINVAL;
 
-	wmi->ready = true;
 	ath6kl_ready_event(wmi->parent_dev, ev->mac_addr,
 			   le32_to_cpu(ev->sw_version),
 			   le32_to_cpu(ev->abi_version));
@@ -673,32 +709,73 @@
 	cmd->info.params.roam_rssi_floor = DEF_LRSSI_ROAM_FLOOR;
 	cmd->roam_ctrl = WMI_SET_LRSSI_SCAN_PARAMS;
 
-	ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_ROAM_CTRL_CMDID, NO_SYNC_WMIFLAG);
+	ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SET_ROAM_CTRL_CMDID,
+			    NO_SYNC_WMIFLAG);
 
 	return 0;
 }
 
-static int ath6kl_wmi_connect_event_rx(struct wmi *wmi, u8 *datap, int len)
+int ath6kl_wmi_force_roam_cmd(struct wmi *wmi, const u8 *bssid)
+{
+	struct sk_buff *skb;
+	struct roam_ctrl_cmd *cmd;
+
+	skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+	if (!skb)
+		return -ENOMEM;
+
+	cmd = (struct roam_ctrl_cmd *) skb->data;
+	memset(cmd, 0, sizeof(*cmd));
+
+	memcpy(cmd->info.bssid, bssid, ETH_ALEN);
+	cmd->roam_ctrl = WMI_FORCE_ROAM;
+
+	ath6kl_dbg(ATH6KL_DBG_WMI, "force roam to %pM\n", bssid);
+	return ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SET_ROAM_CTRL_CMDID,
+				   NO_SYNC_WMIFLAG);
+}
+
+int ath6kl_wmi_set_roam_mode_cmd(struct wmi *wmi, enum wmi_roam_mode mode)
+{
+	struct sk_buff *skb;
+	struct roam_ctrl_cmd *cmd;
+
+	skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+	if (!skb)
+		return -ENOMEM;
+
+	cmd = (struct roam_ctrl_cmd *) skb->data;
+	memset(cmd, 0, sizeof(*cmd));
+
+	cmd->info.roam_mode = mode;
+	cmd->roam_ctrl = WMI_SET_ROAM_MODE;
+
+	ath6kl_dbg(ATH6KL_DBG_WMI, "set roam mode %d\n", mode);
+	return ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SET_ROAM_CTRL_CMDID,
+				   NO_SYNC_WMIFLAG);
+}
+
+static int ath6kl_wmi_connect_event_rx(struct wmi *wmi, u8 *datap, int len,
+				       struct ath6kl_vif *vif)
 {
 	struct wmi_connect_event *ev;
 	u8 *pie, *peie;
-	struct ath6kl *ar = wmi->parent_dev;
 
 	if (len < sizeof(struct wmi_connect_event))
 		return -EINVAL;
 
 	ev = (struct wmi_connect_event *) datap;
 
-	if (ar->nw_type == AP_NETWORK) {
+	if (vif->nw_type == AP_NETWORK) {
 		/* AP mode start/STA connected event */
-		struct net_device *dev = ar->net_dev;
+		struct net_device *dev = vif->ndev;
 		if (memcmp(dev->dev_addr, ev->u.ap_bss.bssid, ETH_ALEN) == 0) {
 			ath6kl_dbg(ATH6KL_DBG_WMI, "%s: freq %d bssid %pM "
 				   "(AP started)\n",
 				   __func__, le16_to_cpu(ev->u.ap_bss.ch),
 				   ev->u.ap_bss.bssid);
 			ath6kl_connect_ap_mode_bss(
-				ar, le16_to_cpu(ev->u.ap_bss.ch));
+				vif, le16_to_cpu(ev->u.ap_bss.ch));
 		} else {
 			ath6kl_dbg(ATH6KL_DBG_WMI, "%s: aid %u mac_addr %pM "
 				   "auth=%u keymgmt=%u cipher=%u apsd_info=%u "
@@ -710,7 +787,7 @@
 				   le16_to_cpu(ev->u.ap_sta.cipher),
 				   ev->u.ap_sta.apsd_info);
 			ath6kl_connect_ap_mode_sta(
-				ar, ev->u.ap_sta.aid, ev->u.ap_sta.mac_addr,
+				vif, ev->u.ap_sta.aid, ev->u.ap_sta.mac_addr,
 				ev->u.ap_sta.keymgmt,
 				le16_to_cpu(ev->u.ap_sta.cipher),
 				ev->u.ap_sta.auth, ev->assoc_req_len,
@@ -755,7 +832,7 @@
 		pie += pie[1] + 2;
 	}
 
-	ath6kl_connect_event(wmi->parent_dev, le16_to_cpu(ev->u.sta.ch),
+	ath6kl_connect_event(vif, le16_to_cpu(ev->u.sta.ch),
 			     ev->u.sta.bssid,
 			     le16_to_cpu(ev->u.sta.listen_intvl),
 			     le16_to_cpu(ev->u.sta.beacon_intvl),
@@ -834,14 +911,15 @@
 		alpha2[0] = country->isoName[0];
 		alpha2[1] = country->isoName[1];
 
-		regulatory_hint(wmi->parent_dev->wdev->wiphy, alpha2);
+		regulatory_hint(wmi->parent_dev->wiphy, alpha2);
 
 		ath6kl_dbg(ATH6KL_DBG_WMI, "Country alpha2 being used: %c%c\n",
 				alpha2[0], alpha2[1]);
 	}
 }
 
-static int ath6kl_wmi_disconnect_event_rx(struct wmi *wmi, u8 *datap, int len)
+static int ath6kl_wmi_disconnect_event_rx(struct wmi *wmi, u8 *datap, int len,
+					  struct ath6kl_vif *vif)
 {
 	struct wmi_disconnect_event *ev;
 	wmi->traffic_class = 100;
@@ -857,10 +935,8 @@
 		   ev->disconn_reason, ev->assoc_resp_len);
 
 	wmi->is_wmm_enabled = false;
-	wmi->pair_crypto_type = NONE_CRYPT;
-	wmi->grp_crypto_type = NONE_CRYPT;
 
-	ath6kl_disconnect_event(wmi->parent_dev, ev->disconn_reason,
+	ath6kl_disconnect_event(vif, ev->disconn_reason,
 				ev->bssid, ev->assoc_resp_len, ev->assoc_info,
 				le16_to_cpu(ev->proto_reason_status));
 
@@ -886,7 +962,8 @@
 	return 0;
 }
 
-static int ath6kl_wmi_tkip_micerr_event_rx(struct wmi *wmi, u8 *datap, int len)
+static int ath6kl_wmi_tkip_micerr_event_rx(struct wmi *wmi, u8 *datap, int len,
+					   struct ath6kl_vif *vif)
 {
 	struct wmi_tkip_micerr_event *ev;
 
@@ -895,12 +972,20 @@
 
 	ev = (struct wmi_tkip_micerr_event *) datap;
 
-	ath6kl_tkip_micerr_event(wmi->parent_dev, ev->key_id, ev->is_mcast);
+	ath6kl_tkip_micerr_event(vif, ev->key_id, ev->is_mcast);
 
 	return 0;
 }
 
-static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len)
+void ath6kl_wmi_sscan_timer(unsigned long ptr)
+{
+	struct ath6kl_vif *vif = (struct ath6kl_vif *) ptr;
+
+	cfg80211_sched_scan_results(vif->ar->wiphy);
+}
+
+static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len,
+				       struct ath6kl_vif *vif)
 {
 	struct wmi_bss_info_hdr2 *bih;
 	u8 *buf;
@@ -927,26 +1012,27 @@
 		return 0; /* Only update BSS table for now */
 
 	if (bih->frame_type == BEACON_FTYPE &&
-	    test_bit(CLEAR_BSSFILTER_ON_BEACON, &ar->flag)) {
-		clear_bit(CLEAR_BSSFILTER_ON_BEACON, &ar->flag);
-		ath6kl_wmi_bssfilter_cmd(ar->wmi, NONE_BSS_FILTER, 0);
+	    test_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags)) {
+		clear_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags);
+		ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
+					 NONE_BSS_FILTER, 0);
 	}
 
-	channel = ieee80211_get_channel(ar->wdev->wiphy, le16_to_cpu(bih->ch));
+	channel = ieee80211_get_channel(ar->wiphy, le16_to_cpu(bih->ch));
 	if (channel == NULL)
 		return -EINVAL;
 
 	if (len < 8 + 2 + 2)
 		return -EINVAL;
 
-	if (bih->frame_type == BEACON_FTYPE && test_bit(CONNECTED, &ar->flag) &&
-	    memcmp(bih->bssid, ar->bssid, ETH_ALEN) == 0) {
+	if (bih->frame_type == BEACON_FTYPE && test_bit(CONNECTED, &vif->flags)
+	    && memcmp(bih->bssid, vif->bssid, ETH_ALEN) == 0) {
 		const u8 *tim;
 		tim = cfg80211_find_ie(WLAN_EID_TIM, buf + 8 + 2 + 2,
 				       len - 8 - 2 - 2);
 		if (tim && tim[1] >= 2) {
-			ar->assoc_bss_dtim_period = tim[3];
-			set_bit(DTIM_PERIOD_AVAIL, &ar->flag);
+			vif->assoc_bss_dtim_period = tim[3];
+			set_bit(DTIM_PERIOD_AVAIL, &vif->flags);
 		}
 	}
 
@@ -966,7 +1052,7 @@
 						  IEEE80211_STYPE_BEACON);
 		memset(mgmt->da, 0xff, ETH_ALEN);
 	} else {
-		struct net_device *dev = ar->net_dev;
+		struct net_device *dev = vif->ndev;
 
 		mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
 						  IEEE80211_STYPE_PROBE_RESP);
@@ -979,7 +1065,7 @@
 
 	memcpy(&mgmt->u.beacon, buf, len);
 
-	bss = cfg80211_inform_bss_frame(ar->wdev->wiphy, channel, mgmt,
+	bss = cfg80211_inform_bss_frame(ar->wiphy, channel, mgmt,
 					24 + len, (bih->snr - 95) * 100,
 					GFP_ATOMIC);
 	kfree(mgmt);
@@ -987,6 +1073,21 @@
 		return -ENOMEM;
 	cfg80211_put_bss(bss);
 
+	/*
+	 * Firmware doesn't return any event when scheduled scan has
+	 * finished, so we need to use a timer to find out when there are
+	 * no more results.
+	 *
+	 * The timer is started from the first bss info received, otherwise
+	 * the timer would not ever fire if the scan interval is short
+	 * enough.
+	 */
+	if (ar->state == ATH6KL_STATE_SCHED_SCAN &&
+	    !timer_pending(&vif->sched_scan_timer)) {
+		mod_timer(&vif->sched_scan_timer, jiffies +
+			  msecs_to_jiffies(ATH6KL_SCHED_SCAN_RESULT_DELAY));
+	}
+
 	return 0;
 }
 
@@ -1094,20 +1195,21 @@
 	return 0;
 }
 
-static int ath6kl_wmi_scan_complete_rx(struct wmi *wmi, u8 *datap, int len)
+static int ath6kl_wmi_scan_complete_rx(struct wmi *wmi, u8 *datap, int len,
+				       struct ath6kl_vif *vif)
 {
 	struct wmi_scan_complete_event *ev;
 
 	ev = (struct wmi_scan_complete_event *) datap;
 
-	ath6kl_scan_complete_evt(wmi->parent_dev, a_sle32_to_cpu(ev->status));
+	ath6kl_scan_complete_evt(vif, a_sle32_to_cpu(ev->status));
 	wmi->is_probe_ssid = false;
 
 	return 0;
 }
 
 static int ath6kl_wmi_neighbor_report_event_rx(struct wmi *wmi, u8 *datap,
-					       int len)
+					       int len, struct ath6kl_vif *vif)
 {
 	struct wmi_neighbor_report_event *ev;
 	u8 i;
@@ -1125,7 +1227,7 @@
 		ath6kl_dbg(ATH6KL_DBG_WMI, "neighbor %d/%d - %pM 0x%x\n",
 			   i + 1, ev->num_neighbors, ev->neighbor[i].bssid,
 			   ev->neighbor[i].bss_flags);
-		cfg80211_pmksa_candidate_notify(wmi->parent_dev->net_dev, i,
+		cfg80211_pmksa_candidate_notify(vif->ndev, i,
 						ev->neighbor[i].bssid,
 						!!(ev->neighbor[i].bss_flags &
 						   WMI_PREAUTH_CAPABLE_BSS),
@@ -1166,9 +1268,10 @@
 	return 0;
 }
 
-static int ath6kl_wmi_stats_event_rx(struct wmi *wmi, u8 *datap, int len)
+static int ath6kl_wmi_stats_event_rx(struct wmi *wmi, u8 *datap, int len,
+				     struct ath6kl_vif *vif)
 {
-	ath6kl_tgt_stats_event(wmi->parent_dev, datap, len);
+	ath6kl_tgt_stats_event(vif, datap, len);
 
 	return 0;
 }
@@ -1222,7 +1325,7 @@
 	cmd = (struct wmi_rssi_threshold_params_cmd *) skb->data;
 	memcpy(cmd, rssi_cmd, sizeof(struct wmi_rssi_threshold_params_cmd));
 
-	return ath6kl_wmi_cmd_send(wmi, skb, WMI_RSSI_THRESHOLD_PARAMS_CMDID,
+	return ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_RSSI_THRESHOLD_PARAMS_CMDID,
 				   NO_SYNC_WMIFLAG);
 }
 
@@ -1322,7 +1425,8 @@
 	return 0;
 }
 
-static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len)
+static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len,
+				   struct ath6kl_vif *vif)
 {
 	struct wmi_cac_event *reply;
 	struct ieee80211_tspec_ie *ts;
@@ -1343,7 +1447,8 @@
 		tsid = (tsinfo >> IEEE80211_WMM_IE_TSPEC_TID_SHIFT) &
 			IEEE80211_WMM_IE_TSPEC_TID_MASK;
 
-		ath6kl_wmi_delete_pstream_cmd(wmi, reply->ac, tsid);
+		ath6kl_wmi_delete_pstream_cmd(wmi, vif->fw_vif_idx,
+					      reply->ac, tsid);
 	} else if (reply->cac_indication == CAC_INDICATION_NO_RESP) {
 		/*
 		 * Following assumes that there is only one outstanding
@@ -1358,7 +1463,8 @@
 				break;
 		}
 		if (index < (sizeof(active_tsids) * 8))
-			ath6kl_wmi_delete_pstream_cmd(wmi, reply->ac, index);
+			ath6kl_wmi_delete_pstream_cmd(wmi, vif->fw_vif_idx,
+						      reply->ac, index);
 	}
 
 	/*
@@ -1403,7 +1509,7 @@
 	cmd = (struct wmi_snr_threshold_params_cmd *) skb->data;
 	memcpy(cmd, snr_cmd, sizeof(struct wmi_snr_threshold_params_cmd));
 
-	return ath6kl_wmi_cmd_send(wmi, skb, WMI_SNR_THRESHOLD_PARAMS_CMDID,
+	return ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SNR_THRESHOLD_PARAMS_CMDID,
 				   NO_SYNC_WMIFLAG);
 }
 
@@ -1528,14 +1634,15 @@
 	return 0;
 }
 
-int ath6kl_wmi_cmd_send(struct wmi *wmi, struct sk_buff *skb,
+int ath6kl_wmi_cmd_send(struct wmi *wmi, u8 if_idx, struct sk_buff *skb,
 			enum wmi_cmd_id cmd_id, enum wmi_sync_flag sync_flag)
 {
 	struct wmi_cmd_hdr *cmd_hdr;
 	enum htc_endpoint_id ep_id = wmi->ep_id;
 	int ret;
+	u16 info1;
 
-	if (WARN_ON(skb == NULL))
+	if (WARN_ON(skb == NULL || (if_idx > (wmi->parent_dev->vif_max - 1))))
 		return -EINVAL;
 
 	ath6kl_dbg(ATH6KL_DBG_WMI, "wmi tx id %d len %d flag %d\n",
@@ -1554,19 +1661,20 @@
 		 * Make sure all data currently queued is transmitted before
 		 * the cmd execution.  Establish a new sync point.
 		 */
-		ath6kl_wmi_sync_point(wmi);
+		ath6kl_wmi_sync_point(wmi, if_idx);
 	}
 
 	skb_push(skb, sizeof(struct wmi_cmd_hdr));
 
 	cmd_hdr = (struct wmi_cmd_hdr *) skb->data;
 	cmd_hdr->cmd_id = cpu_to_le16(cmd_id);
-	cmd_hdr->info1 = 0;	/* added for virtual interface */
+	info1 = if_idx & WMI_CMD_HDR_IF_ID_MASK;
+	cmd_hdr->info1 = cpu_to_le16(info1);
 
 	/* Only for OPT_TX_CMD, use BE endpoint. */
 	if (cmd_id == WMI_OPT_TX_FRAME_CMDID) {
 		ret = ath6kl_wmi_data_hdr_add(wmi, skb, OPT_MSGTYPE,
-					      false, false, 0, NULL);
+					      false, false, 0, NULL, if_idx);
 		if (ret) {
 			dev_kfree_skb(skb);
 			return ret;
@@ -1582,20 +1690,22 @@
 		 * Make sure all new data queued waits for the command to
 		 * execute. Establish a new sync point.
 		 */
-		ath6kl_wmi_sync_point(wmi);
+		ath6kl_wmi_sync_point(wmi, if_idx);
 	}
 
 	return 0;
 }
 
-int ath6kl_wmi_connect_cmd(struct wmi *wmi, enum network_type nw_type,
+int ath6kl_wmi_connect_cmd(struct wmi *wmi, u8 if_idx,
+			   enum network_type nw_type,
 			   enum dot11_auth_mode dot11_auth_mode,
 			   enum auth_mode auth_mode,
 			   enum crypto_type pairwise_crypto,
 			   u8 pairwise_crypto_len,
 			   enum crypto_type group_crypto,
 			   u8 group_crypto_len, int ssid_len, u8 *ssid,
-			   u8 *bssid, u16 channel, u32 ctrl_flags)
+			   u8 *bssid, u16 channel, u32 ctrl_flags,
+			   u8 nw_subtype)
 {
 	struct sk_buff *skb;
 	struct wmi_connect_cmd *cc;
@@ -1635,19 +1745,19 @@
 	cc->grp_crypto_len = group_crypto_len;
 	cc->ch = cpu_to_le16(channel);
 	cc->ctrl_flags = cpu_to_le32(ctrl_flags);
+	cc->nw_subtype = nw_subtype;
 
 	if (bssid != NULL)
 		memcpy(cc->bssid, bssid, ETH_ALEN);
 
-	wmi->pair_crypto_type = pairwise_crypto;
-	wmi->grp_crypto_type = group_crypto;
-
-	ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_CONNECT_CMDID, NO_SYNC_WMIFLAG);
+	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_CONNECT_CMDID,
+				  NO_SYNC_WMIFLAG);
 
 	return ret;
 }
 
-int ath6kl_wmi_reconnect_cmd(struct wmi *wmi, u8 *bssid, u16 channel)
+int ath6kl_wmi_reconnect_cmd(struct wmi *wmi, u8 if_idx, u8 *bssid,
+			     u16 channel)
 {
 	struct sk_buff *skb;
 	struct wmi_reconnect_cmd *cc;
@@ -1668,13 +1778,13 @@
 	if (bssid != NULL)
 		memcpy(cc->bssid, bssid, ETH_ALEN);
 
-	ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_RECONNECT_CMDID,
+	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_RECONNECT_CMDID,
 				  NO_SYNC_WMIFLAG);
 
 	return ret;
 }
 
-int ath6kl_wmi_disconnect_cmd(struct wmi *wmi)
+int ath6kl_wmi_disconnect_cmd(struct wmi *wmi, u8 if_idx)
 {
 	int ret;
 
@@ -1683,12 +1793,79 @@
 	wmi->traffic_class = 100;
 
 	/* Disconnect command does not need to do a SYNC before. */
-	ret = ath6kl_wmi_simple_cmd(wmi, WMI_DISCONNECT_CMDID);
+	ret = ath6kl_wmi_simple_cmd(wmi, if_idx, WMI_DISCONNECT_CMDID);
 
 	return ret;
 }
 
-int ath6kl_wmi_startscan_cmd(struct wmi *wmi, enum wmi_scan_type scan_type,
+int ath6kl_wmi_beginscan_cmd(struct wmi *wmi, u8 if_idx,
+			     enum wmi_scan_type scan_type,
+			     u32 force_fgscan, u32 is_legacy,
+			     u32 home_dwell_time, u32 force_scan_interval,
+			     s8 num_chan, u16 *ch_list, u32 no_cck, u32 *rates)
+{
+	struct sk_buff *skb;
+	struct wmi_begin_scan_cmd *sc;
+	s8 size;
+	int i, band, ret;
+	struct ath6kl *ar = wmi->parent_dev;
+	int num_rates;
+
+	size = sizeof(struct wmi_begin_scan_cmd);
+
+	if ((scan_type != WMI_LONG_SCAN) && (scan_type != WMI_SHORT_SCAN))
+		return -EINVAL;
+
+	if (num_chan > WMI_MAX_CHANNELS)
+		return -EINVAL;
+
+	if (num_chan)
+		size += sizeof(u16) * (num_chan - 1);
+
+	skb = ath6kl_wmi_get_new_buf(size);
+	if (!skb)
+		return -ENOMEM;
+
+	sc = (struct wmi_begin_scan_cmd *) skb->data;
+	sc->scan_type = scan_type;
+	sc->force_fg_scan = cpu_to_le32(force_fgscan);
+	sc->is_legacy = cpu_to_le32(is_legacy);
+	sc->home_dwell_time = cpu_to_le32(home_dwell_time);
+	sc->force_scan_intvl = cpu_to_le32(force_scan_interval);
+	sc->no_cck = cpu_to_le32(no_cck);
+	sc->num_ch = num_chan;
+
+	for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+		struct ieee80211_supported_band *sband =
+		    ar->wiphy->bands[band];
+		u32 ratemask = rates[band];
+		u8 *supp_rates = sc->supp_rates[band].rates;
+		num_rates = 0;
+
+		for (i = 0; i < sband->n_bitrates; i++) {
+			if ((BIT(i) & ratemask) == 0)
+				continue; /* skip rate */
+			supp_rates[num_rates++] =
+			    (u8) (sband->bitrates[i].bitrate / 5);
+		}
+		sc->supp_rates[band].nrates = num_rates;
+	}
+
+	for (i = 0; i < num_chan; i++)
+		sc->ch_list[i] = cpu_to_le16(ch_list[i]);
+
+	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_BEGIN_SCAN_CMDID,
+				  NO_SYNC_WMIFLAG);
+
+	return ret;
+}
+
+/* ath6kl_wmi_start_scan_cmd is to be deprecated. Use
+ * ath6kl_wmi_begin_scan_cmd instead. The new function supports P2P
+ * mgmt operations using station interface.
+ */
+int ath6kl_wmi_startscan_cmd(struct wmi *wmi, u8 if_idx,
+			     enum wmi_scan_type scan_type,
 			     u32 force_fgscan, u32 is_legacy,
 			     u32 home_dwell_time, u32 force_scan_interval,
 			     s8 num_chan, u16 *ch_list)
@@ -1724,13 +1901,14 @@
 	for (i = 0; i < num_chan; i++)
 		sc->ch_list[i] = cpu_to_le16(ch_list[i]);
 
-	ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_START_SCAN_CMDID,
+	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_START_SCAN_CMDID,
 				  NO_SYNC_WMIFLAG);
 
 	return ret;
 }
 
-int ath6kl_wmi_scanparams_cmd(struct wmi *wmi, u16 fg_start_sec,
+int ath6kl_wmi_scanparams_cmd(struct wmi *wmi, u8 if_idx,
+			      u16 fg_start_sec,
 			      u16 fg_end_sec, u16 bg_sec,
 			      u16 minact_chdw_msec, u16 maxact_chdw_msec,
 			      u16 pas_chdw_msec, u8 short_scan_ratio,
@@ -1757,12 +1935,12 @@
 	sc->max_dfsch_act_time = cpu_to_le32(max_dfsch_act_time);
 	sc->maxact_scan_per_ssid = cpu_to_le16(maxact_scan_per_ssid);
 
-	ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_SCAN_PARAMS_CMDID,
+	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_SCAN_PARAMS_CMDID,
 				  NO_SYNC_WMIFLAG);
 	return ret;
 }
 
-int ath6kl_wmi_bssfilter_cmd(struct wmi *wmi, u8 filter, u32 ie_mask)
+int ath6kl_wmi_bssfilter_cmd(struct wmi *wmi, u8 if_idx, u8 filter, u32 ie_mask)
 {
 	struct sk_buff *skb;
 	struct wmi_bss_filter_cmd *cmd;
@@ -1779,12 +1957,12 @@
 	cmd->bss_filter = filter;
 	cmd->ie_mask = cpu_to_le32(ie_mask);
 
-	ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_BSS_FILTER_CMDID,
+	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_BSS_FILTER_CMDID,
 				  NO_SYNC_WMIFLAG);
 	return ret;
 }
 
-int ath6kl_wmi_probedssid_cmd(struct wmi *wmi, u8 index, u8 flag,
+int ath6kl_wmi_probedssid_cmd(struct wmi *wmi, u8 if_idx, u8 index, u8 flag,
 			      u8 ssid_len, u8 *ssid)
 {
 	struct sk_buff *skb;
@@ -1816,12 +1994,13 @@
 	cmd->ssid_len = ssid_len;
 	memcpy(cmd->ssid, ssid, ssid_len);
 
-	ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_PROBED_SSID_CMDID,
+	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_PROBED_SSID_CMDID,
 				  NO_SYNC_WMIFLAG);
 	return ret;
 }
 
-int ath6kl_wmi_listeninterval_cmd(struct wmi *wmi, u16 listen_interval,
+int ath6kl_wmi_listeninterval_cmd(struct wmi *wmi, u8 if_idx,
+				  u16 listen_interval,
 				  u16 listen_beacons)
 {
 	struct sk_buff *skb;
@@ -1836,12 +2015,12 @@
 	cmd->listen_intvl = cpu_to_le16(listen_interval);
 	cmd->num_beacons = cpu_to_le16(listen_beacons);
 
-	ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_LISTEN_INT_CMDID,
+	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_LISTEN_INT_CMDID,
 				  NO_SYNC_WMIFLAG);
 	return ret;
 }
 
-int ath6kl_wmi_powermode_cmd(struct wmi *wmi, u8 pwr_mode)
+int ath6kl_wmi_powermode_cmd(struct wmi *wmi, u8 if_idx, u8 pwr_mode)
 {
 	struct sk_buff *skb;
 	struct wmi_power_mode_cmd *cmd;
@@ -1855,12 +2034,12 @@
 	cmd->pwr_mode = pwr_mode;
 	wmi->pwr_mode = pwr_mode;
 
-	ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_POWER_MODE_CMDID,
+	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_POWER_MODE_CMDID,
 				  NO_SYNC_WMIFLAG);
 	return ret;
 }
 
-int ath6kl_wmi_pmparams_cmd(struct wmi *wmi, u16 idle_period,
+int ath6kl_wmi_pmparams_cmd(struct wmi *wmi, u8 if_idx, u16 idle_period,
 			    u16 ps_poll_num, u16 dtim_policy,
 			    u16 tx_wakeup_policy, u16 num_tx_to_wakeup,
 			    u16 ps_fail_event_policy)
@@ -1881,12 +2060,12 @@
 	pm->num_tx_to_wakeup = cpu_to_le16(num_tx_to_wakeup);
 	pm->ps_fail_event_policy = cpu_to_le16(ps_fail_event_policy);
 
-	ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_POWER_PARAMS_CMDID,
+	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_POWER_PARAMS_CMDID,
 				  NO_SYNC_WMIFLAG);
 	return ret;
 }
 
-int ath6kl_wmi_disctimeout_cmd(struct wmi *wmi, u8 timeout)
+int ath6kl_wmi_disctimeout_cmd(struct wmi *wmi, u8 if_idx, u8 timeout)
 {
 	struct sk_buff *skb;
 	struct wmi_disc_timeout_cmd *cmd;
@@ -1899,15 +2078,20 @@
 	cmd = (struct wmi_disc_timeout_cmd *) skb->data;
 	cmd->discon_timeout = timeout;
 
-	ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_DISC_TIMEOUT_CMDID,
+	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_DISC_TIMEOUT_CMDID,
 				  NO_SYNC_WMIFLAG);
+
+	if (ret == 0)
+		ath6kl_debug_set_disconnect_timeout(wmi->parent_dev, timeout);
+
 	return ret;
 }
 
-int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 key_index,
+int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index,
 			  enum crypto_type key_type,
 			  u8 key_usage, u8 key_len,
-			  u8 *key_rsc, u8 *key_material,
+			  u8 *key_rsc, unsigned int key_rsc_len,
+			  u8 *key_material,
 			  u8 key_op_ctrl, u8 *mac_addr,
 			  enum wmi_sync_flag sync_flag)
 {
@@ -1920,7 +2104,7 @@
 		   key_index, key_type, key_usage, key_len, key_op_ctrl);
 
 	if ((key_index > WMI_MAX_KEY_INDEX) || (key_len > WMI_MAX_KEY_LEN) ||
-	    (key_material == NULL))
+	    (key_material == NULL) || key_rsc_len > 8)
 		return -EINVAL;
 
 	if ((WEP_CRYPT != key_type) && (NULL == key_rsc))
@@ -1938,20 +2122,20 @@
 	memcpy(cmd->key, key_material, key_len);
 
 	if (key_rsc != NULL)
-		memcpy(cmd->key_rsc, key_rsc, sizeof(cmd->key_rsc));
+		memcpy(cmd->key_rsc, key_rsc, key_rsc_len);
 
 	cmd->key_op_ctrl = key_op_ctrl;
 
 	if (mac_addr)
 		memcpy(cmd->key_mac_addr, mac_addr, ETH_ALEN);
 
-	ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_ADD_CIPHER_KEY_CMDID,
+	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_ADD_CIPHER_KEY_CMDID,
 				  sync_flag);
 
 	return ret;
 }
 
-int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 *krk)
+int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 if_idx, u8 *krk)
 {
 	struct sk_buff *skb;
 	struct wmi_add_krk_cmd *cmd;
@@ -1964,12 +2148,13 @@
 	cmd = (struct wmi_add_krk_cmd *) skb->data;
 	memcpy(cmd->krk, krk, WMI_KRK_LEN);
 
-	ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_ADD_KRK_CMDID, NO_SYNC_WMIFLAG);
+	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_ADD_KRK_CMDID,
+				  NO_SYNC_WMIFLAG);
 
 	return ret;
 }
 
-int ath6kl_wmi_deletekey_cmd(struct wmi *wmi, u8 key_index)
+int ath6kl_wmi_deletekey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index)
 {
 	struct sk_buff *skb;
 	struct wmi_delete_cipher_key_cmd *cmd;
@@ -1985,13 +2170,13 @@
 	cmd = (struct wmi_delete_cipher_key_cmd *) skb->data;
 	cmd->key_index = key_index;
 
-	ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_DELETE_CIPHER_KEY_CMDID,
+	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_DELETE_CIPHER_KEY_CMDID,
 				  NO_SYNC_WMIFLAG);
 
 	return ret;
 }
 
-int ath6kl_wmi_setpmkid_cmd(struct wmi *wmi, const u8 *bssid,
+int ath6kl_wmi_setpmkid_cmd(struct wmi *wmi, u8 if_idx, const u8 *bssid,
 			    const u8 *pmkid, bool set)
 {
 	struct sk_buff *skb;
@@ -2018,14 +2203,14 @@
 		cmd->enable = PMKID_DISABLE;
 	}
 
-	ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_PMKID_CMDID,
+	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_PMKID_CMDID,
 				  NO_SYNC_WMIFLAG);
 
 	return ret;
 }
 
 static int ath6kl_wmi_data_sync_send(struct wmi *wmi, struct sk_buff *skb,
-			      enum htc_endpoint_id ep_id)
+			      enum htc_endpoint_id ep_id, u8 if_idx)
 {
 	struct wmi_data_hdr *data_hdr;
 	int ret;
@@ -2037,14 +2222,14 @@
 
 	data_hdr = (struct wmi_data_hdr *) skb->data;
 	data_hdr->info = SYNC_MSGTYPE << WMI_DATA_HDR_MSG_TYPE_SHIFT;
-	data_hdr->info3 = 0;
+	data_hdr->info3 = cpu_to_le16(if_idx & WMI_DATA_HDR_IF_IDX_MASK);
 
 	ret = ath6kl_control_tx(wmi->parent_dev, skb, ep_id);
 
 	return ret;
 }
 
-static int ath6kl_wmi_sync_point(struct wmi *wmi)
+static int ath6kl_wmi_sync_point(struct wmi *wmi, u8 if_idx)
 {
 	struct sk_buff *skb;
 	struct wmi_sync_cmd *cmd;
@@ -2100,7 +2285,7 @@
 	 * Send sync cmd followed by sync data messages on all
 	 * endpoints being used
 	 */
-	ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SYNCHRONIZE_CMDID,
+	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SYNCHRONIZE_CMDID,
 				  NO_SYNC_WMIFLAG);
 
 	if (ret)
@@ -2119,7 +2304,7 @@
 					       traffic_class);
 		ret =
 		    ath6kl_wmi_data_sync_send(wmi, data_sync_bufs[index].skb,
-					      ep_id);
+					      ep_id, if_idx);
 
 		if (ret)
 			break;
@@ -2142,7 +2327,7 @@
 	return ret;
 }
 
-int ath6kl_wmi_create_pstream_cmd(struct wmi *wmi,
+int ath6kl_wmi_create_pstream_cmd(struct wmi *wmi, u8 if_idx,
 				  struct wmi_create_pstream_cmd *params)
 {
 	struct sk_buff *skb;
@@ -2231,12 +2416,13 @@
 		ath6kl_indicate_tx_activity(wmi->parent_dev,
 					    params->traffic_class, true);
 
-	ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_CREATE_PSTREAM_CMDID,
+	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_CREATE_PSTREAM_CMDID,
 				  NO_SYNC_WMIFLAG);
 	return ret;
 }
 
-int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 traffic_class, u8 tsid)
+int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 if_idx, u8 traffic_class,
+				  u8 tsid)
 {
 	struct sk_buff *skb;
 	struct wmi_delete_pstream_cmd *cmd;
@@ -2272,7 +2458,7 @@
 		   "sending delete_pstream_cmd: traffic class: %d tsid=%d\n",
 		   traffic_class, tsid);
 
-	ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_DELETE_PSTREAM_CMDID,
+	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_DELETE_PSTREAM_CMDID,
 				  SYNC_BEFORE_WMIFLAG);
 
 	spin_lock_bh(&wmi->lock);
@@ -2311,17 +2497,173 @@
 	cmd = (struct wmi_set_ip_cmd *) skb->data;
 	memcpy(cmd, ip_cmd, sizeof(struct wmi_set_ip_cmd));
 
-	ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_IP_CMDID, NO_SYNC_WMIFLAG);
+	ret = ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SET_IP_CMDID,
+				  NO_SYNC_WMIFLAG);
 	return ret;
 }
 
-static int ath6kl_wmi_get_wow_list_event_rx(struct wmi *wmi, u8 * datap,
-					    int len)
+static void ath6kl_wmi_relinquish_implicit_pstream_credits(struct wmi *wmi)
 {
-	if (len < sizeof(struct wmi_get_wow_list_reply))
-		return -EINVAL;
+	u16 active_tsids;
+	u8 stream_exist;
+	int i;
 
-	return 0;
+	/*
+	 * Relinquish credits from all implicitly created pstreams
+	 * since when we go to sleep. If user created explicit
+	 * thinstreams exists with in a fatpipe leave them intact
+	 * for the user to delete.
+	 */
+	spin_lock_bh(&wmi->lock);
+	stream_exist = wmi->fat_pipe_exist;
+	spin_unlock_bh(&wmi->lock);
+
+	for (i = 0; i < WMM_NUM_AC; i++) {
+		if (stream_exist & (1 << i)) {
+
+			/*
+			 * FIXME: Is this lock & unlock inside
+			 * for loop correct? may need rework.
+			 */
+			spin_lock_bh(&wmi->lock);
+			active_tsids = wmi->stream_exist_for_ac[i];
+			spin_unlock_bh(&wmi->lock);
+
+			/*
+			 * If there are no user created thin streams
+			 * delete the fatpipe
+			 */
+			if (!active_tsids) {
+				stream_exist &= ~(1 << i);
+				/*
+				 * Indicate inactivity to driver layer for
+				 * this fatpipe (pstream)
+				 */
+				ath6kl_indicate_tx_activity(wmi->parent_dev,
+							    i, false);
+			}
+		}
+	}
+
+	/* FIXME: Can we do this assignment without locking ? */
+	spin_lock_bh(&wmi->lock);
+	wmi->fat_pipe_exist = stream_exist;
+	spin_unlock_bh(&wmi->lock);
+}
+
+int ath6kl_wmi_set_host_sleep_mode_cmd(struct wmi *wmi, u8 if_idx,
+				       enum ath6kl_host_mode host_mode)
+{
+	struct sk_buff *skb;
+	struct wmi_set_host_sleep_mode_cmd *cmd;
+	int ret;
+
+	if ((host_mode != ATH6KL_HOST_MODE_ASLEEP) &&
+	    (host_mode != ATH6KL_HOST_MODE_AWAKE)) {
+		ath6kl_err("invalid host sleep mode: %d\n", host_mode);
+		return -EINVAL;
+	}
+
+	skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+	if (!skb)
+		return -ENOMEM;
+
+	cmd = (struct wmi_set_host_sleep_mode_cmd *) skb->data;
+
+	if (host_mode == ATH6KL_HOST_MODE_ASLEEP) {
+		ath6kl_wmi_relinquish_implicit_pstream_credits(wmi);
+		cmd->asleep = cpu_to_le32(1);
+	} else
+		cmd->awake = cpu_to_le32(1);
+
+	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb,
+				  WMI_SET_HOST_SLEEP_MODE_CMDID,
+				  NO_SYNC_WMIFLAG);
+	return ret;
+}
+
+int ath6kl_wmi_set_wow_mode_cmd(struct wmi *wmi, u8 if_idx,
+				enum ath6kl_wow_mode wow_mode,
+				u32 filter, u16 host_req_delay)
+{
+	struct sk_buff *skb;
+	struct wmi_set_wow_mode_cmd *cmd;
+	int ret;
+
+	if ((wow_mode != ATH6KL_WOW_MODE_ENABLE) &&
+	     wow_mode != ATH6KL_WOW_MODE_DISABLE) {
+		ath6kl_err("invalid wow mode: %d\n", wow_mode);
+		return -EINVAL;
+	}
+
+	skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+	if (!skb)
+		return -ENOMEM;
+
+	cmd = (struct wmi_set_wow_mode_cmd *) skb->data;
+	cmd->enable_wow = cpu_to_le32(wow_mode);
+	cmd->filter = cpu_to_le32(filter);
+	cmd->host_req_delay = cpu_to_le16(host_req_delay);
+
+	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_WOW_MODE_CMDID,
+				  NO_SYNC_WMIFLAG);
+	return ret;
+}
+
+int ath6kl_wmi_add_wow_pattern_cmd(struct wmi *wmi, u8 if_idx,
+				   u8 list_id, u8 filter_size,
+				   u8 filter_offset, u8 *filter, u8 *mask)
+{
+	struct sk_buff *skb;
+	struct wmi_add_wow_pattern_cmd *cmd;
+	u16 size;
+	u8 *filter_mask;
+	int ret;
+
+	/*
+	 * Allocate additional memory in the buffer to hold
+	 * filter and mask value, which is twice of filter_size.
+	 */
+	size = sizeof(*cmd) + (2 * filter_size);
+
+	skb = ath6kl_wmi_get_new_buf(size);
+	if (!skb)
+		return -ENOMEM;
+
+	cmd = (struct wmi_add_wow_pattern_cmd *) skb->data;
+	cmd->filter_list_id = list_id;
+	cmd->filter_size = filter_size;
+	cmd->filter_offset = filter_offset;
+
+	memcpy(cmd->filter, filter, filter_size);
+
+	filter_mask = (u8 *) (cmd->filter + filter_size);
+	memcpy(filter_mask, mask, filter_size);
+
+	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_ADD_WOW_PATTERN_CMDID,
+				  NO_SYNC_WMIFLAG);
+
+	return ret;
+}
+
+int ath6kl_wmi_del_wow_pattern_cmd(struct wmi *wmi, u8 if_idx,
+				   u16 list_id, u16 filter_id)
+{
+	struct sk_buff *skb;
+	struct wmi_del_wow_pattern_cmd *cmd;
+	int ret;
+
+	skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+	if (!skb)
+		return -ENOMEM;
+
+	cmd = (struct wmi_del_wow_pattern_cmd *) skb->data;
+	cmd->filter_list_id = cpu_to_le16(list_id);
+	cmd->filter_id = cpu_to_le16(filter_id);
+
+	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_DEL_WOW_PATTERN_CMDID,
+				  NO_SYNC_WMIFLAG);
+	return ret;
 }
 
 static int ath6kl_wmi_cmd_send_xtnd(struct wmi *wmi, struct sk_buff *skb,
@@ -2336,7 +2678,7 @@
 	cmd_hdr = (struct wmix_cmd_hdr *) skb->data;
 	cmd_hdr->cmd_id = cpu_to_le32(cmd_id);
 
-	ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_EXTENSION_CMDID, sync_flag);
+	ret = ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_EXTENSION_CMDID, sync_flag);
 
 	return ret;
 }
@@ -2379,12 +2721,12 @@
 	return ret;
 }
 
-int ath6kl_wmi_get_stats_cmd(struct wmi *wmi)
+int ath6kl_wmi_get_stats_cmd(struct wmi *wmi, u8 if_idx)
 {
-	return ath6kl_wmi_simple_cmd(wmi, WMI_GET_STATISTICS_CMDID);
+	return ath6kl_wmi_simple_cmd(wmi, if_idx, WMI_GET_STATISTICS_CMDID);
 }
 
-int ath6kl_wmi_set_tx_pwr_cmd(struct wmi *wmi, u8 dbM)
+int ath6kl_wmi_set_tx_pwr_cmd(struct wmi *wmi, u8 if_idx, u8 dbM)
 {
 	struct sk_buff *skb;
 	struct wmi_set_tx_pwr_cmd *cmd;
@@ -2397,18 +2739,24 @@
 	cmd = (struct wmi_set_tx_pwr_cmd *) skb->data;
 	cmd->dbM = dbM;
 
-	ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_TX_PWR_CMDID,
+	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_TX_PWR_CMDID,
 				  NO_SYNC_WMIFLAG);
 
 	return ret;
 }
 
-int ath6kl_wmi_get_tx_pwr_cmd(struct wmi *wmi)
+int ath6kl_wmi_get_tx_pwr_cmd(struct wmi *wmi, u8 if_idx)
 {
-	return ath6kl_wmi_simple_cmd(wmi, WMI_GET_TX_PWR_CMDID);
+	return ath6kl_wmi_simple_cmd(wmi, if_idx, WMI_GET_TX_PWR_CMDID);
 }
 
-int ath6kl_wmi_set_lpreamble_cmd(struct wmi *wmi, u8 status, u8 preamble_policy)
+int ath6kl_wmi_get_roam_tbl_cmd(struct wmi *wmi)
+{
+	return ath6kl_wmi_simple_cmd(wmi, 0, WMI_GET_ROAM_TBL_CMDID);
+}
+
+int ath6kl_wmi_set_lpreamble_cmd(struct wmi *wmi, u8 if_idx, u8 status,
+				 u8 preamble_policy)
 {
 	struct sk_buff *skb;
 	struct wmi_set_lpreamble_cmd *cmd;
@@ -2422,7 +2770,7 @@
 	cmd->status = status;
 	cmd->preamble_policy = preamble_policy;
 
-	ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_LPREAMBLE_CMDID,
+	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_LPREAMBLE_CMDID,
 				  NO_SYNC_WMIFLAG);
 	return ret;
 }
@@ -2440,11 +2788,12 @@
 	cmd = (struct wmi_set_rts_cmd *) skb->data;
 	cmd->threshold = cpu_to_le16(threshold);
 
-	ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_RTS_CMDID, NO_SYNC_WMIFLAG);
+	ret = ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SET_RTS_CMDID,
+				  NO_SYNC_WMIFLAG);
 	return ret;
 }
 
-int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, enum wmi_txop_cfg cfg)
+int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, u8 if_idx, enum wmi_txop_cfg cfg)
 {
 	struct sk_buff *skb;
 	struct wmi_set_wmm_txop_cmd *cmd;
@@ -2460,12 +2809,13 @@
 	cmd = (struct wmi_set_wmm_txop_cmd *) skb->data;
 	cmd->txop_enable = cfg;
 
-	ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_WMM_TXOP_CMDID,
+	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_WMM_TXOP_CMDID,
 				  NO_SYNC_WMIFLAG);
 	return ret;
 }
 
-int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 keep_alive_intvl)
+int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 if_idx,
+				 u8 keep_alive_intvl)
 {
 	struct sk_buff *skb;
 	struct wmi_set_keepalive_cmd *cmd;
@@ -2477,10 +2827,13 @@
 
 	cmd = (struct wmi_set_keepalive_cmd *) skb->data;
 	cmd->keep_alive_intvl = keep_alive_intvl;
-	wmi->keep_alive_intvl = keep_alive_intvl;
 
-	ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_KEEPALIVE_CMDID,
+	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_KEEPALIVE_CMDID,
 				  NO_SYNC_WMIFLAG);
+
+	if (ret == 0)
+		ath6kl_debug_set_keepalive(wmi->parent_dev, keep_alive_intvl);
+
 	return ret;
 }
 
@@ -2495,7 +2848,7 @@
 
 	memcpy(skb->data, buf, len);
 
-	ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_TEST_CMDID, NO_SYNC_WMIFLAG);
+	ret = ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_TEST_CMDID, NO_SYNC_WMIFLAG);
 
 	return ret;
 }
@@ -2528,28 +2881,31 @@
 	return 0;
 }
 
-static int ath6kl_wmi_addba_req_event_rx(struct wmi *wmi, u8 *datap, int len)
+static int ath6kl_wmi_addba_req_event_rx(struct wmi *wmi, u8 *datap, int len,
+					 struct ath6kl_vif *vif)
 {
 	struct wmi_addba_req_event *cmd = (struct wmi_addba_req_event *) datap;
 
-	aggr_recv_addba_req_evt(wmi->parent_dev, cmd->tid,
+	aggr_recv_addba_req_evt(vif, cmd->tid,
 				le16_to_cpu(cmd->st_seq_no), cmd->win_sz);
 
 	return 0;
 }
 
-static int ath6kl_wmi_delba_req_event_rx(struct wmi *wmi, u8 *datap, int len)
+static int ath6kl_wmi_delba_req_event_rx(struct wmi *wmi, u8 *datap, int len,
+					 struct ath6kl_vif *vif)
 {
 	struct wmi_delba_event *cmd = (struct wmi_delba_event *) datap;
 
-	aggr_recv_delba_req_evt(wmi->parent_dev, cmd->tid);
+	aggr_recv_delba_req_evt(vif, cmd->tid);
 
 	return 0;
 }
 
 /*  AP mode functions */
 
-int ath6kl_wmi_ap_profile_commit(struct wmi *wmip, struct wmi_connect_cmd *p)
+int ath6kl_wmi_ap_profile_commit(struct wmi *wmip, u8 if_idx,
+				 struct wmi_connect_cmd *p)
 {
 	struct sk_buff *skb;
 	struct wmi_connect_cmd *cm;
@@ -2562,7 +2918,7 @@
 	cm = (struct wmi_connect_cmd *) skb->data;
 	memcpy(cm, p, sizeof(*cm));
 
-	res = ath6kl_wmi_cmd_send(wmip, skb, WMI_AP_CONFIG_COMMIT_CMDID,
+	res = ath6kl_wmi_cmd_send(wmip, if_idx, skb, WMI_AP_CONFIG_COMMIT_CMDID,
 				  NO_SYNC_WMIFLAG);
 	ath6kl_dbg(ATH6KL_DBG_WMI, "%s: nw_type=%u auth_mode=%u ch=%u "
 		   "ctrl_flags=0x%x-> res=%d\n",
@@ -2571,7 +2927,8 @@
 	return res;
 }
 
-int ath6kl_wmi_ap_set_mlme(struct wmi *wmip, u8 cmd, const u8 *mac, u16 reason)
+int ath6kl_wmi_ap_set_mlme(struct wmi *wmip, u8 if_idx, u8 cmd, const u8 *mac,
+			   u16 reason)
 {
 	struct sk_buff *skb;
 	struct wmi_ap_set_mlme_cmd *cm;
@@ -2585,11 +2942,12 @@
 	cm->reason = cpu_to_le16(reason);
 	cm->cmd = cmd;
 
-	return ath6kl_wmi_cmd_send(wmip, skb, WMI_AP_SET_MLME_CMDID,
+	return ath6kl_wmi_cmd_send(wmip, if_idx, skb, WMI_AP_SET_MLME_CMDID,
 				   NO_SYNC_WMIFLAG);
 }
 
-static int ath6kl_wmi_pspoll_event_rx(struct wmi *wmi, u8 *datap, int len)
+static int ath6kl_wmi_pspoll_event_rx(struct wmi *wmi, u8 *datap, int len,
+				      struct ath6kl_vif *vif)
 {
 	struct wmi_pspoll_event *ev;
 
@@ -2598,19 +2956,21 @@
 
 	ev = (struct wmi_pspoll_event *) datap;
 
-	ath6kl_pspoll_event(wmi->parent_dev, le16_to_cpu(ev->aid));
+	ath6kl_pspoll_event(vif, le16_to_cpu(ev->aid));
 
 	return 0;
 }
 
-static int ath6kl_wmi_dtimexpiry_event_rx(struct wmi *wmi, u8 *datap, int len)
+static int ath6kl_wmi_dtimexpiry_event_rx(struct wmi *wmi, u8 *datap, int len,
+					  struct ath6kl_vif *vif)
 {
-	ath6kl_dtimexpiry_event(wmi->parent_dev);
+	ath6kl_dtimexpiry_event(vif);
 
 	return 0;
 }
 
-int ath6kl_wmi_set_pvb_cmd(struct wmi *wmi, u16 aid, bool flag)
+int ath6kl_wmi_set_pvb_cmd(struct wmi *wmi, u8 if_idx, u16 aid,
+			   bool flag)
 {
 	struct sk_buff *skb;
 	struct wmi_ap_set_pvb_cmd *cmd;
@@ -2625,13 +2985,14 @@
 	cmd->rsvd = cpu_to_le16(0);
 	cmd->flag = cpu_to_le32(flag);
 
-	ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_AP_SET_PVB_CMDID,
+	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_AP_SET_PVB_CMDID,
 				  NO_SYNC_WMIFLAG);
 
 	return 0;
 }
 
-int ath6kl_wmi_set_rx_frame_format_cmd(struct wmi *wmi, u8 rx_meta_ver,
+int ath6kl_wmi_set_rx_frame_format_cmd(struct wmi *wmi, u8 if_idx,
+				       u8 rx_meta_ver,
 				       bool rx_dot11_hdr, bool defrag_on_host)
 {
 	struct sk_buff *skb;
@@ -2648,14 +3009,14 @@
 	cmd->meta_ver = rx_meta_ver;
 
 	/* Delete the local aggr state, on host */
-	ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_RX_FRAME_FORMAT_CMDID,
+	ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_RX_FRAME_FORMAT_CMDID,
 				  NO_SYNC_WMIFLAG);
 
 	return ret;
 }
 
-int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 mgmt_frm_type, const u8 *ie,
-			     u8 ie_len)
+int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 if_idx, u8 mgmt_frm_type,
+			     const u8 *ie, u8 ie_len)
 {
 	struct sk_buff *skb;
 	struct wmi_set_appie_cmd *p;
@@ -2669,8 +3030,11 @@
 	p = (struct wmi_set_appie_cmd *) skb->data;
 	p->mgmt_frm_type = mgmt_frm_type;
 	p->ie_len = ie_len;
-	memcpy(p->ie_info, ie, ie_len);
-	return ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_APPIE_CMDID,
+
+	if (ie != NULL && ie_len > 0)
+		memcpy(p->ie_info, ie, ie_len);
+
+	return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_APPIE_CMDID,
 				   NO_SYNC_WMIFLAG);
 }
 
@@ -2688,11 +3052,11 @@
 	cmd = (struct wmi_disable_11b_rates_cmd *) skb->data;
 	cmd->disable = disable ? 1 : 0;
 
-	return ath6kl_wmi_cmd_send(wmi, skb, WMI_DISABLE_11B_RATES_CMDID,
+	return ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_DISABLE_11B_RATES_CMDID,
 				   NO_SYNC_WMIFLAG);
 }
 
-int ath6kl_wmi_remain_on_chnl_cmd(struct wmi *wmi, u32 freq, u32 dur)
+int ath6kl_wmi_remain_on_chnl_cmd(struct wmi *wmi, u8 if_idx, u32 freq, u32 dur)
 {
 	struct sk_buff *skb;
 	struct wmi_remain_on_chnl_cmd *p;
@@ -2706,12 +3070,16 @@
 	p = (struct wmi_remain_on_chnl_cmd *) skb->data;
 	p->freq = cpu_to_le32(freq);
 	p->duration = cpu_to_le32(dur);
-	return ath6kl_wmi_cmd_send(wmi, skb, WMI_REMAIN_ON_CHNL_CMDID,
+	return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_REMAIN_ON_CHNL_CMDID,
 				   NO_SYNC_WMIFLAG);
 }
 
-int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u32 id, u32 freq, u32 wait,
-			       const u8 *data, u16 data_len)
+/* ath6kl_wmi_send_action_cmd is to be deprecated. Use
+ * ath6kl_wmi_send_mgmt_cmd instead. The new function supports P2P
+ * mgmt operations using station interface.
+ */
+int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u8 if_idx, u32 id, u32 freq,
+			       u32 wait, const u8 *data, u16 data_len)
 {
 	struct sk_buff *skb;
 	struct wmi_send_action_cmd *p;
@@ -2731,6 +3099,7 @@
 	}
 
 	kfree(wmi->last_mgmt_tx_frame);
+	memcpy(buf, data, data_len);
 	wmi->last_mgmt_tx_frame = buf;
 	wmi->last_mgmt_tx_frame_len = data_len;
 
@@ -2742,18 +3111,61 @@
 	p->wait = cpu_to_le32(wait);
 	p->len = cpu_to_le16(data_len);
 	memcpy(p->data, data, data_len);
-	return ath6kl_wmi_cmd_send(wmi, skb, WMI_SEND_ACTION_CMDID,
+	return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SEND_ACTION_CMDID,
 				   NO_SYNC_WMIFLAG);
 }
 
-int ath6kl_wmi_send_probe_response_cmd(struct wmi *wmi, u32 freq,
-				       const u8 *dst,
-				       const u8 *data, u16 data_len)
+int ath6kl_wmi_send_mgmt_cmd(struct wmi *wmi, u8 if_idx, u32 id, u32 freq,
+			       u32 wait, const u8 *data, u16 data_len,
+			       u32 no_cck)
+{
+	struct sk_buff *skb;
+	struct wmi_send_mgmt_cmd *p;
+	u8 *buf;
+
+	if (wait)
+		return -EINVAL; /* Offload for wait not supported */
+
+	buf = kmalloc(data_len, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	skb = ath6kl_wmi_get_new_buf(sizeof(*p) + data_len);
+	if (!skb) {
+		kfree(buf);
+		return -ENOMEM;
+	}
+
+	kfree(wmi->last_mgmt_tx_frame);
+	memcpy(buf, data, data_len);
+	wmi->last_mgmt_tx_frame = buf;
+	wmi->last_mgmt_tx_frame_len = data_len;
+
+	ath6kl_dbg(ATH6KL_DBG_WMI, "send_action_cmd: id=%u freq=%u wait=%u "
+		   "len=%u\n", id, freq, wait, data_len);
+	p = (struct wmi_send_mgmt_cmd *) skb->data;
+	p->id = cpu_to_le32(id);
+	p->freq = cpu_to_le32(freq);
+	p->wait = cpu_to_le32(wait);
+	p->no_cck = cpu_to_le32(no_cck);
+	p->len = cpu_to_le16(data_len);
+	memcpy(p->data, data, data_len);
+	return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SEND_MGMT_CMDID,
+				   NO_SYNC_WMIFLAG);
+}
+
+int ath6kl_wmi_send_probe_response_cmd(struct wmi *wmi, u8 if_idx, u32 freq,
+				       const u8 *dst, const u8 *data,
+				       u16 data_len)
 {
 	struct sk_buff *skb;
 	struct wmi_p2p_probe_response_cmd *p;
+	size_t cmd_len = sizeof(*p) + data_len;
 
-	skb = ath6kl_wmi_get_new_buf(sizeof(*p) + data_len);
+	if (data_len == 0)
+		cmd_len++; /* work around target minimum length requirement */
+
+	skb = ath6kl_wmi_get_new_buf(cmd_len);
 	if (!skb)
 		return -ENOMEM;
 
@@ -2764,11 +3176,12 @@
 	memcpy(p->destination_addr, dst, ETH_ALEN);
 	p->len = cpu_to_le16(data_len);
 	memcpy(p->data, data, data_len);
-	return ath6kl_wmi_cmd_send(wmi, skb, WMI_SEND_PROBE_RESPONSE_CMDID,
+	return ath6kl_wmi_cmd_send(wmi, if_idx, skb,
+				   WMI_SEND_PROBE_RESPONSE_CMDID,
 				   NO_SYNC_WMIFLAG);
 }
 
-int ath6kl_wmi_probe_report_req_cmd(struct wmi *wmi, bool enable)
+int ath6kl_wmi_probe_report_req_cmd(struct wmi *wmi, u8 if_idx, bool enable)
 {
 	struct sk_buff *skb;
 	struct wmi_probe_req_report_cmd *p;
@@ -2781,11 +3194,11 @@
 		   enable);
 	p = (struct wmi_probe_req_report_cmd *) skb->data;
 	p->enable = enable ? 1 : 0;
-	return ath6kl_wmi_cmd_send(wmi, skb, WMI_PROBE_REQ_REPORT_CMDID,
+	return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_PROBE_REQ_REPORT_CMDID,
 				   NO_SYNC_WMIFLAG);
 }
 
-int ath6kl_wmi_info_req_cmd(struct wmi *wmi, u32 info_req_flags)
+int ath6kl_wmi_info_req_cmd(struct wmi *wmi, u8 if_idx, u32 info_req_flags)
 {
 	struct sk_buff *skb;
 	struct wmi_get_p2p_info *p;
@@ -2798,14 +3211,15 @@
 		   info_req_flags);
 	p = (struct wmi_get_p2p_info *) skb->data;
 	p->info_req_flags = cpu_to_le32(info_req_flags);
-	return ath6kl_wmi_cmd_send(wmi, skb, WMI_GET_P2P_INFO_CMDID,
+	return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_GET_P2P_INFO_CMDID,
 				   NO_SYNC_WMIFLAG);
 }
 
-int ath6kl_wmi_cancel_remain_on_chnl_cmd(struct wmi *wmi)
+int ath6kl_wmi_cancel_remain_on_chnl_cmd(struct wmi *wmi, u8 if_idx)
 {
 	ath6kl_dbg(ATH6KL_DBG_WMI, "cancel_remain_on_chnl_cmd\n");
-	return ath6kl_wmi_simple_cmd(wmi, WMI_CANCEL_REMAIN_ON_CHNL_CMDID);
+	return ath6kl_wmi_simple_cmd(wmi, if_idx,
+				     WMI_CANCEL_REMAIN_ON_CHNL_CMDID);
 }
 
 static int ath6kl_wmi_control_rx_xtnd(struct wmi *wmi, struct sk_buff *skb)
@@ -2818,7 +3232,6 @@
 
 	if (skb->len < sizeof(struct wmix_cmd_hdr)) {
 		ath6kl_err("bad packet 1\n");
-		wmi->stat.cmd_len_err++;
 		return -EINVAL;
 	}
 
@@ -2840,7 +3253,6 @@
 		break;
 	default:
 		ath6kl_warn("unknown cmd id 0x%x\n", id);
-		wmi->stat.cmd_id_err++;
 		ret = -EINVAL;
 		break;
 	}
@@ -2848,12 +3260,19 @@
 	return ret;
 }
 
+static int ath6kl_wmi_roam_tbl_event_rx(struct wmi *wmi, u8 *datap, int len)
+{
+	return ath6kl_debug_roam_tbl_event(wmi->parent_dev, datap, len);
+}
+
 /* Control Path */
 int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
 {
 	struct wmi_cmd_hdr *cmd;
+	struct ath6kl_vif *vif;
 	u32 len;
 	u16 id;
+	u8 if_idx;
 	u8 *datap;
 	int ret = 0;
 
@@ -2863,12 +3282,12 @@
 	if (skb->len < sizeof(struct wmi_cmd_hdr)) {
 		ath6kl_err("bad packet 1\n");
 		dev_kfree_skb(skb);
-		wmi->stat.cmd_len_err++;
 		return -EINVAL;
 	}
 
 	cmd = (struct wmi_cmd_hdr *) skb->data;
 	id = le16_to_cpu(cmd->cmd_id);
+	if_idx = le16_to_cpu(cmd->info1) & WMI_CMD_HDR_IF_ID_MASK;
 
 	skb_pull(skb, sizeof(struct wmi_cmd_hdr));
 
@@ -2879,6 +3298,15 @@
 	ath6kl_dbg_dump(ATH6KL_DBG_WMI_DUMP, NULL, "wmi rx ",
 			datap, len);
 
+	vif = ath6kl_get_vif_by_index(wmi->parent_dev, if_idx);
+	if (!vif) {
+		ath6kl_dbg(ATH6KL_DBG_WMI,
+			   "Wmi event for unavailable vif, vif_index:%d\n",
+			    if_idx);
+		dev_kfree_skb(skb);
+		return -EINVAL;
+	}
+
 	switch (id) {
 	case WMI_GET_BITRATE_CMDID:
 		ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_BITRATE_CMDID\n");
@@ -2898,11 +3326,11 @@
 		break;
 	case WMI_CONNECT_EVENTID:
 		ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CONNECT_EVENTID\n");
-		ret = ath6kl_wmi_connect_event_rx(wmi, datap, len);
+		ret = ath6kl_wmi_connect_event_rx(wmi, datap, len, vif);
 		break;
 	case WMI_DISCONNECT_EVENTID:
 		ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_DISCONNECT_EVENTID\n");
-		ret = ath6kl_wmi_disconnect_event_rx(wmi, datap, len);
+		ret = ath6kl_wmi_disconnect_event_rx(wmi, datap, len, vif);
 		break;
 	case WMI_PEER_NODE_EVENTID:
 		ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_PEER_NODE_EVENTID\n");
@@ -2910,11 +3338,11 @@
 		break;
 	case WMI_TKIP_MICERR_EVENTID:
 		ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_TKIP_MICERR_EVENTID\n");
-		ret = ath6kl_wmi_tkip_micerr_event_rx(wmi, datap, len);
+		ret = ath6kl_wmi_tkip_micerr_event_rx(wmi, datap, len, vif);
 		break;
 	case WMI_BSSINFO_EVENTID:
 		ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_BSSINFO_EVENTID\n");
-		ret = ath6kl_wmi_bssinfo_event_rx(wmi, datap, len);
+		ret = ath6kl_wmi_bssinfo_event_rx(wmi, datap, len, vif);
 		break;
 	case WMI_REGDOMAIN_EVENTID:
 		ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REGDOMAIN_EVENTID\n");
@@ -2926,11 +3354,12 @@
 		break;
 	case WMI_NEIGHBOR_REPORT_EVENTID:
 		ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_NEIGHBOR_REPORT_EVENTID\n");
-		ret = ath6kl_wmi_neighbor_report_event_rx(wmi, datap, len);
+		ret = ath6kl_wmi_neighbor_report_event_rx(wmi, datap, len,
+							  vif);
 		break;
 	case WMI_SCAN_COMPLETE_EVENTID:
 		ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_SCAN_COMPLETE_EVENTID\n");
-		ret = ath6kl_wmi_scan_complete_rx(wmi, datap, len);
+		ret = ath6kl_wmi_scan_complete_rx(wmi, datap, len, vif);
 		break;
 	case WMI_CMDERROR_EVENTID:
 		ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CMDERROR_EVENTID\n");
@@ -2938,7 +3367,7 @@
 		break;
 	case WMI_REPORT_STATISTICS_EVENTID:
 		ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REPORT_STATISTICS_EVENTID\n");
-		ret = ath6kl_wmi_stats_event_rx(wmi, datap, len);
+		ret = ath6kl_wmi_stats_event_rx(wmi, datap, len, vif);
 		break;
 	case WMI_RSSI_THRESHOLD_EVENTID:
 		ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_RSSI_THRESHOLD_EVENTID\n");
@@ -2953,6 +3382,7 @@
 		break;
 	case WMI_REPORT_ROAM_TBL_EVENTID:
 		ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REPORT_ROAM_TBL_EVENTID\n");
+		ret = ath6kl_wmi_roam_tbl_event_rx(wmi, datap, len);
 		break;
 	case WMI_EXTENSION_EVENTID:
 		ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_EXTENSION_EVENTID\n");
@@ -2960,7 +3390,7 @@
 		break;
 	case WMI_CAC_EVENTID:
 		ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CAC_EVENTID\n");
-		ret = ath6kl_wmi_cac_event_rx(wmi, datap, len);
+		ret = ath6kl_wmi_cac_event_rx(wmi, datap, len, vif);
 		break;
 	case WMI_CHANNEL_CHANGE_EVENTID:
 		ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CHANNEL_CHANGE_EVENTID\n");
@@ -2996,7 +3426,6 @@
 		break;
 	case WMI_GET_WOW_LIST_EVENTID:
 		ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_WOW_LIST_EVENTID\n");
-		ret = ath6kl_wmi_get_wow_list_event_rx(wmi, datap, len);
 		break;
 	case WMI_GET_PMKID_LIST_EVENTID:
 		ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_PMKID_LIST_EVENTID\n");
@@ -3004,25 +3433,25 @@
 		break;
 	case WMI_PSPOLL_EVENTID:
 		ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_PSPOLL_EVENTID\n");
-		ret = ath6kl_wmi_pspoll_event_rx(wmi, datap, len);
+		ret = ath6kl_wmi_pspoll_event_rx(wmi, datap, len, vif);
 		break;
 	case WMI_DTIMEXPIRY_EVENTID:
 		ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_DTIMEXPIRY_EVENTID\n");
-		ret = ath6kl_wmi_dtimexpiry_event_rx(wmi, datap, len);
+		ret = ath6kl_wmi_dtimexpiry_event_rx(wmi, datap, len, vif);
 		break;
 	case WMI_SET_PARAMS_REPLY_EVENTID:
 		ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_SET_PARAMS_REPLY_EVENTID\n");
 		break;
 	case WMI_ADDBA_REQ_EVENTID:
 		ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_ADDBA_REQ_EVENTID\n");
-		ret = ath6kl_wmi_addba_req_event_rx(wmi, datap, len);
+		ret = ath6kl_wmi_addba_req_event_rx(wmi, datap, len, vif);
 		break;
 	case WMI_ADDBA_RESP_EVENTID:
 		ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_ADDBA_RESP_EVENTID\n");
 		break;
 	case WMI_DELBA_REQ_EVENTID:
 		ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_DELBA_REQ_EVENTID\n");
-		ret = ath6kl_wmi_delba_req_event_rx(wmi, datap, len);
+		ret = ath6kl_wmi_delba_req_event_rx(wmi, datap, len, vif);
 		break;
 	case WMI_REPORT_BTCOEX_CONFIG_EVENTID:
 		ath6kl_dbg(ATH6KL_DBG_WMI,
@@ -3038,21 +3467,21 @@
 		break;
 	case WMI_REMAIN_ON_CHNL_EVENTID:
 		ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REMAIN_ON_CHNL_EVENTID\n");
-		ret = ath6kl_wmi_remain_on_chnl_event_rx(wmi, datap, len);
+		ret = ath6kl_wmi_remain_on_chnl_event_rx(wmi, datap, len, vif);
 		break;
 	case WMI_CANCEL_REMAIN_ON_CHNL_EVENTID:
 		ath6kl_dbg(ATH6KL_DBG_WMI,
 			   "WMI_CANCEL_REMAIN_ON_CHNL_EVENTID\n");
 		ret = ath6kl_wmi_cancel_remain_on_chnl_event_rx(wmi, datap,
-								len);
+								len, vif);
 		break;
 	case WMI_TX_STATUS_EVENTID:
 		ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_TX_STATUS_EVENTID\n");
-		ret = ath6kl_wmi_tx_status_event_rx(wmi, datap, len);
+		ret = ath6kl_wmi_tx_status_event_rx(wmi, datap, len, vif);
 		break;
 	case WMI_RX_PROBE_REQ_EVENTID:
 		ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_RX_PROBE_REQ_EVENTID\n");
-		ret = ath6kl_wmi_rx_probe_req_event_rx(wmi, datap, len);
+		ret = ath6kl_wmi_rx_probe_req_event_rx(wmi, datap, len, vif);
 		break;
 	case WMI_P2P_CAPABILITIES_EVENTID:
 		ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_P2P_CAPABILITIES_EVENTID\n");
@@ -3060,7 +3489,7 @@
 		break;
 	case WMI_RX_ACTION_EVENTID:
 		ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_RX_ACTION_EVENTID\n");
-		ret = ath6kl_wmi_rx_action_event_rx(wmi, datap, len);
+		ret = ath6kl_wmi_rx_action_event_rx(wmi, datap, len, vif);
 		break;
 	case WMI_P2P_INFO_EVENTID:
 		ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_P2P_INFO_EVENTID\n");
@@ -3068,7 +3497,6 @@
 		break;
 	default:
 		ath6kl_dbg(ATH6KL_DBG_WMI, "unknown cmd id 0x%x\n", id);
-		wmi->stat.cmd_id_err++;
 		ret = -EINVAL;
 		break;
 	}
@@ -3078,11 +3506,8 @@
 	return ret;
 }
 
-static void ath6kl_wmi_qos_state_init(struct wmi *wmi)
+void ath6kl_wmi_reset(struct wmi *wmi)
 {
-	if (!wmi)
-		return;
-
 	spin_lock_bh(&wmi->lock);
 
 	wmi->fat_pipe_exist = 0;
@@ -3103,16 +3528,9 @@
 
 	wmi->parent_dev = dev;
 
-	ath6kl_wmi_qos_state_init(wmi);
-
 	wmi->pwr_mode = REC_POWER;
-	wmi->phy_mode = WMI_11G_MODE;
 
-	wmi->pair_crypto_type = NONE_CRYPT;
-	wmi->grp_crypto_type = NONE_CRYPT;
-
-	wmi->ht_allowed[A_BAND_24GHZ] = 1;
-	wmi->ht_allowed[A_BAND_5GHZ] = 1;
+	ath6kl_wmi_reset(wmi);
 
 	return wmi;
 }
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.h b/drivers/net/wireless/ath/ath6kl/wmi.h
index f8e644d..42ac311 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.h
+++ b/drivers/net/wireless/ath/ath6kl/wmi.h
@@ -93,11 +93,6 @@
 	u8 last_rssi_poll_event;
 };
 
-struct wmi_stats {
-	u32 cmd_len_err;
-	u32 cmd_id_err;
-};
-
 struct wmi_data_sync_bufs {
 	u8 traffic_class;
 	struct sk_buff *skb;
@@ -111,32 +106,26 @@
 #define WMM_AC_VO   3		/* voice */
 
 struct wmi {
-	bool ready;
 	u16 stream_exist_for_ac[WMM_NUM_AC];
 	u8 fat_pipe_exist;
 	struct ath6kl *parent_dev;
-	struct wmi_stats stat;
 	u8 pwr_mode;
-	u8 phy_mode;
-	u8 keep_alive_intvl;
 	spinlock_t lock;
 	enum htc_endpoint_id ep_id;
 	struct sq_threshold_params
 	    sq_threshld[SIGNAL_QUALITY_METRICS_NUM_MAX];
-	enum crypto_type pair_crypto_type;
-	enum crypto_type grp_crypto_type;
 	bool is_wmm_enabled;
-	u8 ht_allowed[A_NUM_BANDS];
 	u8 traffic_class;
 	bool is_probe_ssid;
 
 	u8 *last_mgmt_tx_frame;
 	size_t last_mgmt_tx_frame_len;
+	u8 saved_pwr_mode;
 };
 
 struct host_app_area {
-	u32 wmi_protocol_ver;
-};
+	__le32 wmi_protocol_ver;
+} __packed;
 
 enum wmi_msg_type {
 	DATA_MSGTYPE = 0x0,
@@ -184,6 +173,8 @@
 #define WMI_DATA_HDR_META_MASK      0x7
 #define WMI_DATA_HDR_META_SHIFT     13
 
+#define WMI_DATA_HDR_IF_IDX_MASK    0xF
+
 struct wmi_data_hdr {
 	s8 rssi;
 
@@ -208,6 +199,12 @@
 	 * b15:b13      - META_DATA_VERSION 0 - 7
 	 */
 	__le16 info2;
+
+	/*
+	 * usage of info3, 16-bit:
+	 * b3:b0	- Interface index
+	 * b15:b4	- Reserved
+	 */
 	__le16 info3;
 } __packed;
 
@@ -250,6 +247,11 @@
 			       WMI_DATA_HDR_META_MASK;
 }
 
+static inline u8 wmi_data_hdr_get_if_idx(struct wmi_data_hdr *dhdr)
+{
+	return le16_to_cpu(dhdr->info3) & WMI_DATA_HDR_IF_IDX_MASK;
+}
+
 /* Tx meta version definitions */
 #define WMI_MAX_TX_META_SZ	12
 #define WMI_META_VERSION_1	0x01
@@ -299,6 +301,8 @@
 	u8 csum_flags;
 } __packed;
 
+#define WMI_CMD_HDR_IF_ID_MASK 0xF
+
 /* Control Path */
 struct wmi_cmd_hdr {
 	__le16 cmd_id;
@@ -312,6 +316,11 @@
 	__le16 reserved;
 } __packed;
 
+static inline u8 wmi_cmd_hdr_get_if_idx(struct wmi_cmd_hdr *chdr)
+{
+	return le16_to_cpu(chdr->info1) & WMI_CMD_HDR_IF_ID_MASK;
+}
+
 /* List of WMI commands */
 enum wmi_cmd_id {
 	WMI_CONNECT_CMDID = 0x0001,
@@ -320,6 +329,10 @@
 	WMI_SYNCHRONIZE_CMDID,
 	WMI_CREATE_PSTREAM_CMDID,
 	WMI_DELETE_PSTREAM_CMDID,
+	/* WMI_START_SCAN_CMDID is to be deprecated. Use
+	 * WMI_BEGIN_SCAN_CMDID instead. The new cmd supports P2P mgmt
+	 * operations using station interface.
+	 */
 	WMI_START_SCAN_CMDID,
 	WMI_SET_SCAN_PARAMS_CMDID,
 	WMI_SET_BSS_FILTER_CMDID,
@@ -533,12 +546,61 @@
 	WMI_GTK_OFFLOAD_OP_CMDID,
 	WMI_REMAIN_ON_CHNL_CMDID,
 	WMI_CANCEL_REMAIN_ON_CHNL_CMDID,
+	/* WMI_SEND_ACTION_CMDID is to be deprecated. Use
+	 * WMI_SEND_MGMT_CMDID instead. The new cmd supports P2P mgmt
+	 * operations using station interface.
+	 */
 	WMI_SEND_ACTION_CMDID,
 	WMI_PROBE_REQ_REPORT_CMDID,
 	WMI_DISABLE_11B_RATES_CMDID,
 	WMI_SEND_PROBE_RESPONSE_CMDID,
 	WMI_GET_P2P_INFO_CMDID,
 	WMI_AP_JOIN_BSS_CMDID,
+
+	WMI_SMPS_ENABLE_CMDID,
+	WMI_SMPS_CONFIG_CMDID,
+	WMI_SET_RATECTRL_PARM_CMDID,
+	/*  LPL specific commands*/
+	WMI_LPL_FORCE_ENABLE_CMDID,
+	WMI_LPL_SET_POLICY_CMDID,
+	WMI_LPL_GET_POLICY_CMDID,
+	WMI_LPL_GET_HWSTATE_CMDID,
+	WMI_LPL_SET_PARAMS_CMDID,
+	WMI_LPL_GET_PARAMS_CMDID,
+
+	WMI_SET_BUNDLE_PARAM_CMDID,
+
+	/*GreenTx specific commands*/
+
+	WMI_GREENTX_PARAMS_CMDID,
+
+	WMI_RTT_MEASREQ_CMDID,
+	WMI_RTT_CAPREQ_CMDID,
+	WMI_RTT_STATUSREQ_CMDID,
+
+	/* WPS Commands */
+	WMI_WPS_START_CMDID,
+	WMI_GET_WPS_STATUS_CMDID,
+
+	/* More P2P commands */
+	WMI_SET_NOA_CMDID,
+	WMI_GET_NOA_CMDID,
+	WMI_SET_OPPPS_CMDID,
+	WMI_GET_OPPPS_CMDID,
+	WMI_ADD_PORT_CMDID,
+	WMI_DEL_PORT_CMDID,
+
+	/* 802.11w cmd */
+	WMI_SET_RSN_CAP_CMDID,
+	WMI_GET_RSN_CAP_CMDID,
+	WMI_SET_IGTK_CMDID,
+
+	WMI_RX_FILTER_COALESCE_FILTER_OP_CMDID,
+	WMI_RX_FILTER_SET_FRAME_TEST_LIST_CMDID,
+
+	WMI_SEND_MGMT_CMDID,
+	WMI_BEGIN_SCAN_CMDID,
+
 };
 
 enum wmi_mgmt_frame_type {
@@ -558,6 +620,14 @@
 	AP_NETWORK = 0x10,
 };
 
+enum network_subtype {
+	SUBTYPE_NONE,
+	SUBTYPE_BT,
+	SUBTYPE_P2PDEV,
+	SUBTYPE_P2PCLIENT,
+	SUBTYPE_P2PGO,
+};
+
 enum dot11_auth_mode {
 	OPEN_AUTH = 0x01,
 	SHARED_AUTH = 0x02,
@@ -576,9 +646,6 @@
 	WPA2_AUTH_CCKM = 0x40,
 };
 
-#define WMI_MIN_CRYPTO_TYPE NONE_CRYPT
-#define WMI_MAX_CRYPTO_TYPE (AES_CRYPT + 1)
-
 #define WMI_MIN_KEY_INDEX   0
 #define WMI_MAX_KEY_INDEX   3
 
@@ -617,6 +684,7 @@
 	CONNECT_CSA_FOLLOW_BSS = 0x0020,
 	CONNECT_DO_WPA_OFFLOAD = 0x0040,
 	CONNECT_DO_NOT_DEAUTH = 0x0080,
+	CONNECT_WPS_FLAG = 0x0100,
 };
 
 struct wmi_connect_cmd {
@@ -632,6 +700,7 @@
 	__le16 ch;
 	u8 bssid[ETH_ALEN];
 	__le32 ctrl_flags;
+	u8 nw_subtype;
 } __packed;
 
 /* WMI_RECONNECT_CMDID */
@@ -719,6 +788,43 @@
 	WMI_SHORT_SCAN = 1,
 };
 
+struct wmi_supp_rates {
+	u8 nrates;
+	u8 rates[ATH6KL_RATE_MAXSIZE];
+};
+
+struct wmi_begin_scan_cmd {
+	__le32 force_fg_scan;
+
+	/* for legacy cisco AP compatibility */
+	__le32 is_legacy;
+
+	/* max duration in the home channel(msec) */
+	__le32 home_dwell_time;
+
+	/* time interval between scans (msec) */
+	__le32 force_scan_intvl;
+
+	/* no CCK rates */
+	__le32 no_cck;
+
+	/* enum wmi_scan_type */
+	u8 scan_type;
+
+	/* Supported rates to advertise in the probe request frames */
+	struct wmi_supp_rates supp_rates[IEEE80211_NUM_BANDS];
+
+	/* how many channels follow */
+	u8 num_ch;
+
+	/* channels in Mhz */
+	__le16 ch_list[1];
+} __packed;
+
+/* wmi_start_scan_cmd is to be deprecated. Use
+ * wmi_begin_scan_cmd instead. The new structure supports P2P mgmt
+ * operations using station interface.
+ */
 struct wmi_start_scan_cmd {
 	__le32 force_fg_scan;
 
@@ -741,9 +847,6 @@
 	__le16 ch_list[1];
 } __packed;
 
-/* WMI_SET_SCAN_PARAMS_CMDID */
-#define WMI_SHORTSCANRATIO_DEFAULT      3
-
 /*
  *  Warning: scan control flag value of 0xFF is used to disable
  *  all flags in WMI_SCAN_PARAMS_CMD. Do not add any more
@@ -776,13 +879,6 @@
 	ENABLE_SCAN_ABORT_EVENT = 0x40
 };
 
-#define DEFAULT_SCAN_CTRL_FLAGS			\
-	(CONNECT_SCAN_CTRL_FLAGS |		\
-	 SCAN_CONNECTED_CTRL_FLAGS |		\
-	 ACTIVE_SCAN_CTRL_FLAGS |		\
-	 ROAM_SCAN_CTRL_FLAGS |			\
-	 ENABLE_AUTO_CTRL_FLAGS)
-
 struct wmi_scan_params_cmd {
 	  /* sec */
 	__le16 fg_start_period;
@@ -1365,14 +1461,20 @@
 	WMI_SET_LRSSI_SCAN_PARAMS,
 };
 
+enum wmi_roam_mode {
+	WMI_DEFAULT_ROAM_MODE = 1, /* RSSI based roam */
+	WMI_HOST_BIAS_ROAM_MODE = 2, /* Host bias based roam */
+	WMI_LOCK_BSS_MODE = 3, /* Lock to the current BSS */
+};
+
 struct bss_bias {
 	u8 bssid[ETH_ALEN];
-	u8  bias;
+	s8 bias;
 } __packed;
 
 struct bss_bias_info {
 	u8 num_bss;
-	struct bss_bias bss_bias[1];
+	struct bss_bias bss_bias[0];
 } __packed;
 
 struct low_rssi_scan_params {
@@ -1385,10 +1487,11 @@
 
 struct roam_ctrl_cmd {
 	union {
-		u8 bssid[ETH_ALEN];
-		u8 roam_mode;
-		struct bss_bias_info bss;
-		struct low_rssi_scan_params params;
+		u8 bssid[ETH_ALEN]; /* WMI_FORCE_ROAM */
+		u8 roam_mode; /* WMI_SET_ROAM_MODE */
+		struct bss_bias_info bss; /* WMI_SET_HOST_BIAS */
+		struct low_rssi_scan_params params; /* WMI_SET_LRSSI_SCAN_PARAMS
+						     */
 	} __packed info;
 	u8 roam_ctrl;
 } __packed;
@@ -1455,6 +1558,10 @@
 	u8 is_mcast;
 } __packed;
 
+enum wmi_scan_status {
+	WMI_SCAN_STATUS_SUCCESS = 0,
+};
+
 /* WMI_SCAN_COMPLETE_EVENTID */
 struct wmi_scan_complete_event {
 	a_sle32 status;
@@ -1635,6 +1742,12 @@
 	u8 reserved;
 } __packed;
 
+struct wmi_target_roam_tbl {
+	__le16 roam_mode;
+	__le16 num_entries;
+	struct wmi_bss_roam_info info[];
+} __packed;
+
 /* WMI_CAC_EVENTID */
 enum cac_indication {
 	CAC_INDICATION_ADMISSION = 0x00,
@@ -1771,7 +1884,6 @@
 #define WSC_REG_ACTIVE     1
 #define WSC_REG_INACTIVE   0
 
-#define WOW_MAX_FILTER_LISTS	 1
 #define WOW_MAX_FILTERS_PER_LIST 4
 #define WOW_PATTERN_SIZE	 64
 #define WOW_MASK_SIZE		 64
@@ -1794,17 +1906,52 @@
 	__le32 ips[MAX_IP_ADDRS];
 } __packed;
 
-/* WMI_GET_WOW_LIST_CMD reply  */
-struct wmi_get_wow_list_reply {
-	/* number of patterns in reply */
-	u8 num_filters;
+enum ath6kl_wow_filters {
+	WOW_FILTER_SSID			= BIT(1),
+	WOW_FILTER_OPTION_MAGIC_PACKET  = BIT(2),
+	WOW_FILTER_OPTION_EAP_REQ	= BIT(3),
+	WOW_FILTER_OPTION_PATTERNS	= BIT(4),
+	WOW_FILTER_OPTION_OFFLOAD_ARP	= BIT(5),
+	WOW_FILTER_OPTION_OFFLOAD_NS	= BIT(6),
+	WOW_FILTER_OPTION_OFFLOAD_GTK	= BIT(7),
+	WOW_FILTER_OPTION_8021X_4WAYHS	= BIT(8),
+	WOW_FILTER_OPTION_NLO_DISCVRY	= BIT(9),
+	WOW_FILTER_OPTION_NWK_DISASSOC	= BIT(10),
+	WOW_FILTER_OPTION_GTK_ERROR	= BIT(11),
+	WOW_FILTER_OPTION_TEST_MODE	= BIT(15),
+};
 
-	/* this is filter # x of total num_filters */
-	u8 this_filter_num;
+enum ath6kl_host_mode {
+	ATH6KL_HOST_MODE_AWAKE,
+	ATH6KL_HOST_MODE_ASLEEP,
+};
 
-	u8 wow_mode;
-	u8 host_mode;
-	struct wow_filter wow_filters[1];
+struct wmi_set_host_sleep_mode_cmd {
+	__le32 awake;
+	__le32 asleep;
+} __packed;
+
+enum ath6kl_wow_mode {
+	ATH6KL_WOW_MODE_DISABLE,
+	ATH6KL_WOW_MODE_ENABLE,
+};
+
+struct wmi_set_wow_mode_cmd {
+	__le32 enable_wow;
+	__le32 filter;
+	__le16 host_req_delay;
+} __packed;
+
+struct wmi_add_wow_pattern_cmd {
+	u8 filter_list_id;
+	u8 filter_size;
+	u8 filter_offset;
+	u8 filter[0];
+} __packed;
+
+struct wmi_del_wow_pattern_cmd {
+	__le16 filter_list_id;
+	__le16 filter_id;
 } __packed;
 
 /* WMI_SET_AKMP_PARAMS_CMD */
@@ -1905,7 +2052,7 @@
  * !!! Warning !!!
  * -Changing the following values needs compilation of both driver and firmware
  */
-#define AP_MAX_NUM_STA          8
+#define AP_MAX_NUM_STA          10
 
 /* Spl. AID used to set DTIM flag in the beacons */
 #define MCAST_AID               0xFF
@@ -1988,6 +2135,10 @@
 	__le32 duration;
 } __packed;
 
+/* wmi_send_action_cmd is to be deprecated. Use
+ * wmi_send_mgmt_cmd instead. The new structure supports P2P mgmt
+ * operations using station interface.
+ */
 struct wmi_send_action_cmd {
 	__le32 id;
 	__le32 freq;
@@ -1996,6 +2147,15 @@
 	u8 data[0];
 } __packed;
 
+struct wmi_send_mgmt_cmd {
+	__le32 id;
+	__le32 freq;
+	__le32 wait;
+	__le32 no_cck;
+	__le16 len;
+	u8 data[0];
+} __packed;
+
 struct wmi_tx_status_event {
 	__le32 id;
 	u8 ack_status;
@@ -2163,120 +2323,162 @@
 int ath6kl_wmi_data_hdr_add(struct wmi *wmi, struct sk_buff *skb,
 			    u8 msg_type, bool more_data,
 			    enum wmi_data_hdr_data_type data_type,
-			    u8 meta_ver, void *tx_meta_info);
+			    u8 meta_ver, void *tx_meta_info, u8 if_idx);
 
 int ath6kl_wmi_dot11_hdr_remove(struct wmi *wmi, struct sk_buff *skb);
 int ath6kl_wmi_dot3_2_dix(struct sk_buff *skb);
-int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, struct sk_buff *skb,
-				       u32 layer2_priority, bool wmm_enabled,
-				       u8 *ac);
+int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, u8 if_idx,
+				       struct sk_buff *skb, u32 layer2_priority,
+				       bool wmm_enabled, u8 *ac);
 
 int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb);
 
-int ath6kl_wmi_cmd_send(struct wmi *wmi, struct sk_buff *skb,
+int ath6kl_wmi_cmd_send(struct wmi *wmi, u8 if_idx, struct sk_buff *skb,
 			enum wmi_cmd_id cmd_id, enum wmi_sync_flag sync_flag);
 
-int ath6kl_wmi_connect_cmd(struct wmi *wmi, enum network_type nw_type,
+int ath6kl_wmi_connect_cmd(struct wmi *wmi, u8 if_idx,
+			   enum network_type nw_type,
 			   enum dot11_auth_mode dot11_auth_mode,
 			   enum auth_mode auth_mode,
 			   enum crypto_type pairwise_crypto,
 			   u8 pairwise_crypto_len,
 			   enum crypto_type group_crypto,
 			   u8 group_crypto_len, int ssid_len, u8 *ssid,
-			   u8 *bssid, u16 channel, u32 ctrl_flags);
+			   u8 *bssid, u16 channel, u32 ctrl_flags,
+			   u8 nw_subtype);
 
-int ath6kl_wmi_reconnect_cmd(struct wmi *wmi, u8 *bssid, u16 channel);
-int ath6kl_wmi_disconnect_cmd(struct wmi *wmi);
-int ath6kl_wmi_startscan_cmd(struct wmi *wmi, enum wmi_scan_type scan_type,
+int ath6kl_wmi_reconnect_cmd(struct wmi *wmi, u8 if_idx, u8 *bssid,
+			     u16 channel);
+int ath6kl_wmi_disconnect_cmd(struct wmi *wmi, u8 if_idx);
+int ath6kl_wmi_startscan_cmd(struct wmi *wmi, u8 if_idx,
+			     enum wmi_scan_type scan_type,
 			     u32 force_fgscan, u32 is_legacy,
 			     u32 home_dwell_time, u32 force_scan_interval,
 			     s8 num_chan, u16 *ch_list);
-int ath6kl_wmi_scanparams_cmd(struct wmi *wmi, u16 fg_start_sec,
+
+int ath6kl_wmi_beginscan_cmd(struct wmi *wmi, u8 if_idx,
+			     enum wmi_scan_type scan_type,
+			     u32 force_fgscan, u32 is_legacy,
+			     u32 home_dwell_time, u32 force_scan_interval,
+			     s8 num_chan, u16 *ch_list, u32 no_cck,
+			     u32 *rates);
+
+int ath6kl_wmi_scanparams_cmd(struct wmi *wmi, u8 if_idx, u16 fg_start_sec,
 			      u16 fg_end_sec, u16 bg_sec,
 			      u16 minact_chdw_msec, u16 maxact_chdw_msec,
 			      u16 pas_chdw_msec, u8 short_scan_ratio,
 			      u8 scan_ctrl_flag, u32 max_dfsch_act_time,
 			      u16 maxact_scan_per_ssid);
-int ath6kl_wmi_bssfilter_cmd(struct wmi *wmi, u8 filter, u32 ie_mask);
-int ath6kl_wmi_probedssid_cmd(struct wmi *wmi, u8 index, u8 flag,
+int ath6kl_wmi_bssfilter_cmd(struct wmi *wmi, u8 if_idx, u8 filter,
+			     u32 ie_mask);
+int ath6kl_wmi_probedssid_cmd(struct wmi *wmi, u8 if_idx, u8 index, u8 flag,
 			      u8 ssid_len, u8 *ssid);
-int ath6kl_wmi_listeninterval_cmd(struct wmi *wmi, u16 listen_interval,
+int ath6kl_wmi_listeninterval_cmd(struct wmi *wmi, u8 if_idx,
+				  u16 listen_interval,
 				  u16 listen_beacons);
-int ath6kl_wmi_powermode_cmd(struct wmi *wmi, u8 pwr_mode);
-int ath6kl_wmi_pmparams_cmd(struct wmi *wmi, u16 idle_period,
+int ath6kl_wmi_powermode_cmd(struct wmi *wmi, u8 if_idx, u8 pwr_mode);
+int ath6kl_wmi_pmparams_cmd(struct wmi *wmi, u8 if_idx, u16 idle_period,
 			    u16 ps_poll_num, u16 dtim_policy,
 			    u16 tx_wakup_policy, u16 num_tx_to_wakeup,
 			    u16 ps_fail_event_policy);
-int ath6kl_wmi_disctimeout_cmd(struct wmi *wmi, u8 timeout);
-int ath6kl_wmi_create_pstream_cmd(struct wmi *wmi,
+int ath6kl_wmi_create_pstream_cmd(struct wmi *wmi, u8 if_idx,
 				  struct wmi_create_pstream_cmd *pstream);
-int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 traffic_class, u8 tsid);
+int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 if_idx, u8 traffic_class,
+				  u8 tsid);
+int ath6kl_wmi_disctimeout_cmd(struct wmi *wmi, u8 if_idx, u8 timeout);
 
 int ath6kl_wmi_set_rts_cmd(struct wmi *wmi, u16 threshold);
-int ath6kl_wmi_set_lpreamble_cmd(struct wmi *wmi, u8 status,
+int ath6kl_wmi_set_lpreamble_cmd(struct wmi *wmi, u8 if_idx, u8 status,
 				 u8 preamble_policy);
 
 int ath6kl_wmi_get_challenge_resp_cmd(struct wmi *wmi, u32 cookie, u32 source);
 int ath6kl_wmi_config_debug_module_cmd(struct wmi *wmi, u32 valid, u32 config);
 
-int ath6kl_wmi_get_stats_cmd(struct wmi *wmi);
-int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 key_index,
+int ath6kl_wmi_get_stats_cmd(struct wmi *wmi, u8 if_idx);
+int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index,
 			  enum crypto_type key_type,
 			  u8 key_usage, u8 key_len,
-			  u8 *key_rsc, u8 *key_material,
+			  u8 *key_rsc, unsigned int key_rsc_len,
+			  u8 *key_material,
 			  u8 key_op_ctrl, u8 *mac_addr,
 			  enum wmi_sync_flag sync_flag);
-int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 *krk);
-int ath6kl_wmi_deletekey_cmd(struct wmi *wmi, u8 key_index);
-int ath6kl_wmi_setpmkid_cmd(struct wmi *wmi, const u8 *bssid,
+int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 if_idx, u8 *krk);
+int ath6kl_wmi_deletekey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index);
+int ath6kl_wmi_setpmkid_cmd(struct wmi *wmi, u8 if_idx, const u8 *bssid,
 			    const u8 *pmkid, bool set);
-int ath6kl_wmi_set_tx_pwr_cmd(struct wmi *wmi, u8 dbM);
-int ath6kl_wmi_get_tx_pwr_cmd(struct wmi *wmi);
+int ath6kl_wmi_set_tx_pwr_cmd(struct wmi *wmi, u8 if_idx, u8 dbM);
+int ath6kl_wmi_get_tx_pwr_cmd(struct wmi *wmi, u8 if_idx);
+int ath6kl_wmi_get_roam_tbl_cmd(struct wmi *wmi);
 
-int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, enum wmi_txop_cfg cfg);
-int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 keep_alive_intvl);
+int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, u8 if_idx, enum wmi_txop_cfg cfg);
+int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 if_idx,
+				 u8 keep_alive_intvl);
 int ath6kl_wmi_test_cmd(struct wmi *wmi, void *buf, size_t len);
 
 s32 ath6kl_wmi_get_rate(s8 rate_index);
 
 int ath6kl_wmi_set_ip_cmd(struct wmi *wmi, struct wmi_set_ip_cmd *ip_cmd);
+int ath6kl_wmi_set_host_sleep_mode_cmd(struct wmi *wmi, u8 if_idx,
+				       enum ath6kl_host_mode host_mode);
+int ath6kl_wmi_set_wow_mode_cmd(struct wmi *wmi, u8 if_idx,
+				enum ath6kl_wow_mode wow_mode,
+				u32 filter, u16 host_req_delay);
+int ath6kl_wmi_add_wow_pattern_cmd(struct wmi *wmi, u8 if_idx,
+				   u8 list_id, u8 filter_size,
+				   u8 filter_offset, u8 *filter, u8 *mask);
+int ath6kl_wmi_del_wow_pattern_cmd(struct wmi *wmi, u8 if_idx,
+				   u16 list_id, u16 filter_id);
 int ath6kl_wmi_set_roam_lrssi_cmd(struct wmi *wmi, u8 lrssi);
+int ath6kl_wmi_force_roam_cmd(struct wmi *wmi, const u8 *bssid);
+int ath6kl_wmi_set_roam_mode_cmd(struct wmi *wmi, enum wmi_roam_mode mode);
 
 /* AP mode */
-int ath6kl_wmi_ap_profile_commit(struct wmi *wmip, struct wmi_connect_cmd *p);
+int ath6kl_wmi_ap_profile_commit(struct wmi *wmip, u8 if_idx,
+				 struct wmi_connect_cmd *p);
 
-int ath6kl_wmi_ap_set_mlme(struct wmi *wmip, u8 cmd, const u8 *mac, u16 reason);
+int ath6kl_wmi_ap_set_mlme(struct wmi *wmip, u8 if_idx, u8 cmd,
+			   const u8 *mac, u16 reason);
 
-int ath6kl_wmi_set_pvb_cmd(struct wmi *wmi, u16 aid, bool flag);
+int ath6kl_wmi_set_pvb_cmd(struct wmi *wmi, u8 if_idx, u16 aid, bool flag);
 
-int ath6kl_wmi_set_rx_frame_format_cmd(struct wmi *wmi, u8 rx_meta_version,
+int ath6kl_wmi_set_rx_frame_format_cmd(struct wmi *wmi, u8 if_idx,
+				       u8 rx_meta_version,
 				       bool rx_dot11_hdr, bool defrag_on_host);
 
-int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 mgmt_frm_type, const u8 *ie,
-			     u8 ie_len);
+int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 if_idx, u8 mgmt_frm_type,
+			     const u8 *ie, u8 ie_len);
 
 /* P2P */
 int ath6kl_wmi_disable_11b_rates_cmd(struct wmi *wmi, bool disable);
 
-int ath6kl_wmi_remain_on_chnl_cmd(struct wmi *wmi, u32 freq, u32 dur);
+int ath6kl_wmi_remain_on_chnl_cmd(struct wmi *wmi, u8 if_idx, u32 freq,
+				  u32 dur);
 
-int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u32 id, u32 freq, u32 wait,
-			       const u8 *data, u16 data_len);
+int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u8 if_idx, u32 id, u32 freq,
+			       u32 wait, const u8 *data, u16 data_len);
 
-int ath6kl_wmi_send_probe_response_cmd(struct wmi *wmi, u32 freq,
-				       const u8 *dst,
-				       const u8 *data, u16 data_len);
+int ath6kl_wmi_send_mgmt_cmd(struct wmi *wmi, u8 if_idx, u32 id, u32 freq,
+			       u32 wait, const u8 *data, u16 data_len,
+			       u32 no_cck);
 
-int ath6kl_wmi_probe_report_req_cmd(struct wmi *wmi, bool enable);
+int ath6kl_wmi_send_probe_response_cmd(struct wmi *wmi, u8 if_idx, u32 freq,
+				       const u8 *dst, const u8 *data,
+				       u16 data_len);
 
-int ath6kl_wmi_info_req_cmd(struct wmi *wmi, u32 info_req_flags);
+int ath6kl_wmi_probe_report_req_cmd(struct wmi *wmi, u8 if_idx, bool enable);
 
-int ath6kl_wmi_cancel_remain_on_chnl_cmd(struct wmi *wmi);
+int ath6kl_wmi_info_req_cmd(struct wmi *wmi, u8 if_idx, u32 info_req_flags);
 
-int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 mgmt_frm_type, const u8 *ie,
-			     u8 ie_len);
+int ath6kl_wmi_cancel_remain_on_chnl_cmd(struct wmi *wmi, u8 if_idx);
 
+int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 if_idx, u8 mgmt_frm_type,
+			     const u8 *ie, u8 ie_len);
+
+void ath6kl_wmi_sscan_timer(unsigned long ptr);
+
+struct ath6kl_vif *ath6kl_get_vif_by_index(struct ath6kl *ar, u8 if_idx);
 void *ath6kl_wmi_init(struct ath6kl *devt);
 void ath6kl_wmi_shutdown(struct wmi *wmi);
+void ath6kl_wmi_reset(struct wmi *wmi);
 
 #endif /* WMI_H */
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index d9c08c6..dc6be4a 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -2,6 +2,9 @@
 	tristate
 config ATH9K_COMMON
 	tristate
+config ATH9K_DFS_DEBUGFS
+	def_bool y
+	depends on ATH9K_DEBUGFS && ATH9K_DFS_CERTIFIED
 
 config ATH9K
 	tristate "Atheros 802.11n wireless cards support"
@@ -25,6 +28,7 @@
 
 config ATH9K_PCI
 	bool "Atheros ath9k PCI/PCIe bus support"
+	default y
 	depends on ATH9K && PCI
 	---help---
 	  This option enables the PCI bus support in ath9k.
@@ -50,6 +54,25 @@
 
 	  Also required for changing debug message flags at run time.
 
+config ATH9K_DFS_CERTIFIED
+	bool "Atheros DFS support for certified platforms"
+	depends on ATH9K && EXPERT
+	default n
+	---help---
+	  This option enables DFS support for initiating radiation on
+	  ath9k. There is no way to dynamically detect if a card was DFS
+	  certified and as such this is left as a build time option. This
+	  option should only be enabled by system integrators that can
+	  guarantee that all the platforms that their kernel will run on
+	  have obtained appropriate regulatory body certification for a
+	  respective Atheros card by using ath9k on the target shipping
+	  platforms.
+
+	  This is currently only a placeholder for future DFS support,
+	  as DFS support requires more components that still need to be
+	  developed. At this point enabling this option won't do anything
+	  except increase code size.
+
 config ATH9K_RATE_CONTROL
 	bool "Atheros ath9k rate control"
 	depends on ATH9K
@@ -58,6 +81,14 @@
 	  Say Y, if you want to use the ath9k specific rate control
 	  module instead of minstrel_ht.
 
+config ATH9K_BTCOEX_SUPPORT
+	bool "Atheros ath9k bluetooth coexistence support"
+	depends on ATH9K
+	default y
+	---help---
+	  Say Y, if you want to use the ath9k radios together with
+	  Bluetooth modules in the same system.
+
 config ATH9K_HTC
        tristate "Atheros HTC based wireless cards support"
        depends on USB && MAC80211
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index 36ed3c4..da02242 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -4,11 +4,14 @@
 		main.o \
 		recv.o \
 		xmit.o \
+		mci.o \
 
 ath9k-$(CONFIG_ATH9K_RATE_CONTROL) += rc.o
 ath9k-$(CONFIG_ATH9K_PCI) += pci.o
 ath9k-$(CONFIG_ATH9K_AHB) += ahb.o
 ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o
+ath9k-$(CONFIG_ATH9K_DFS_DEBUGFS) += dfs_debug.o
+ath9k-$(CONFIG_ATH9K_DFS_CERTIFIED) += dfs.o
 
 obj-$(CONFIG_ATH9K) += ath9k.o
 
@@ -33,7 +36,8 @@
 		ar9002_mac.o \
 		ar9003_mac.o \
 		ar9003_eeprom.o \
-		ar9003_paprd.o
+		ar9003_paprd.o \
+		ar9003_mci.o
 
 obj-$(CONFIG_ATH9K_HW) += ath9k_hw.o
 
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index a639b94..bc56f57 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -136,8 +136,8 @@
 		cck_base = AR_PHY_COUNTMAX - ah->config.cck_trig_high;
 	}
 
-	ath_dbg(common, ATH_DBG_ANI,
-		"Writing ofdmbase=%u   cckbase=%u\n", ofdm_base, cck_base);
+	ath_dbg(common, ANI, "Writing ofdmbase=%u   cckbase=%u\n",
+		ofdm_base, cck_base);
 
 	ENABLE_REGWRITE_BUFFER(ah);
 
@@ -268,8 +268,7 @@
 
 	aniState->noiseFloor = BEACON_RSSI(ah);
 
-	ath_dbg(common, ATH_DBG_ANI,
-		"**** ofdmlevel %d=>%d, rssi=%d[lo=%d hi=%d]\n",
+	ath_dbg(common, ANI, "**** ofdmlevel %d=>%d, rssi=%d[lo=%d hi=%d]\n",
 		aniState->ofdmNoiseImmunityLevel,
 		immunityLevel, aniState->noiseFloor,
 		aniState->rssiThrLow, aniState->rssiThrHigh);
@@ -336,8 +335,7 @@
 	const struct ani_cck_level_entry *entry_cck;
 
 	aniState->noiseFloor = BEACON_RSSI(ah);
-	ath_dbg(common, ATH_DBG_ANI,
-		"**** ccklevel %d=>%d, rssi=%d[lo=%d hi=%d]\n",
+	ath_dbg(common, ANI, "**** ccklevel %d=>%d, rssi=%d[lo=%d hi=%d]\n",
 		aniState->cckNoiseImmunityLevel, immunityLevel,
 		aniState->noiseFloor, aniState->rssiThrLow,
 		aniState->rssiThrHigh);
@@ -481,8 +479,7 @@
 
 	if (ah->opmode != NL80211_IFTYPE_STATION
 	    && ah->opmode != NL80211_IFTYPE_ADHOC) {
-		ath_dbg(common, ATH_DBG_ANI,
-			"Reset ANI state opmode %u\n", ah->opmode);
+		ath_dbg(common, ANI, "Reset ANI state opmode %u\n", ah->opmode);
 		ah->stats.ast_ani_reset++;
 
 		if (ah->opmode == NL80211_IFTYPE_AP) {
@@ -582,7 +579,7 @@
 		    ATH9K_ANI_OFDM_DEF_LEVEL ||
 		    aniState->cckNoiseImmunityLevel !=
 		    ATH9K_ANI_CCK_DEF_LEVEL) {
-			ath_dbg(common, ATH_DBG_ANI,
+			ath_dbg(common, ANI,
 				"Restore defaults: opmode %u chan %d Mhz/0x%x is_scanning=%d ofdm:%d cck:%d\n",
 				ah->opmode,
 				chan->channel,
@@ -599,7 +596,7 @@
 		/*
 		 * restore historical levels for this channel
 		 */
-		ath_dbg(common, ATH_DBG_ANI,
+		ath_dbg(common, ANI,
 			"Restore history: opmode %u chan %d Mhz/0x%x is_scanning=%d ofdm:%d cck:%d\n",
 			ah->opmode,
 			chan->channel,
@@ -662,7 +659,7 @@
 
 	if (!use_new_ani(ah) && (phyCnt1 < ofdm_base || phyCnt2 < cck_base)) {
 		if (phyCnt1 < ofdm_base) {
-			ath_dbg(common, ATH_DBG_ANI,
+			ath_dbg(common, ANI,
 				"phyCnt1 0x%x, resetting counter value to 0x%x\n",
 				phyCnt1, ofdm_base);
 			REG_WRITE(ah, AR_PHY_ERR_1, ofdm_base);
@@ -670,7 +667,7 @@
 				  AR_PHY_ERR_OFDM_TIMING);
 		}
 		if (phyCnt2 < cck_base) {
-			ath_dbg(common, ATH_DBG_ANI,
+			ath_dbg(common, ANI,
 				"phyCnt2 0x%x, resetting counter value to 0x%x\n",
 				phyCnt2, cck_base);
 			REG_WRITE(ah, AR_PHY_ERR_2, cck_base);
@@ -713,7 +710,7 @@
 	cckPhyErrRate =  aniState->cckPhyErrCount * 1000 /
 			 aniState->listenTime;
 
-	ath_dbg(common, ATH_DBG_ANI,
+	ath_dbg(common, ANI,
 		"listenTime=%d OFDM:%d errs=%d/s CCK:%d errs=%d/s ofdm_turn=%d\n",
 		aniState->listenTime,
 		aniState->ofdmNoiseImmunityLevel,
@@ -748,7 +745,7 @@
 {
 	struct ath_common *common = ath9k_hw_common(ah);
 
-	ath_dbg(common, ATH_DBG_ANI, "Enable MIB counters\n");
+	ath_dbg(common, ANI, "Enable MIB counters\n");
 
 	ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
 
@@ -770,7 +767,7 @@
 {
 	struct ath_common *common = ath9k_hw_common(ah);
 
-	ath_dbg(common, ATH_DBG_ANI, "Disable MIB counters\n");
+	ath_dbg(common, ANI, "Disable MIB counters\n");
 
 	REG_WRITE(ah, AR_MIBC, AR_MIBC_FMC);
 	ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
@@ -845,7 +842,7 @@
 	struct ath_common *common = ath9k_hw_common(ah);
 	int i;
 
-	ath_dbg(common, ATH_DBG_ANI, "Initialize ANI\n");
+	ath_dbg(common, ANI, "Initialize ANI\n");
 
 	if (use_new_ani(ah)) {
 		ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH_NEW;
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index f199e9e..f901a17 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -158,7 +158,7 @@
 	/* pre-reverse this field */
 	tmp_reg = ath9k_hw_reverse_bits(new_bias, 3);
 
-	ath_dbg(common, ATH_DBG_CONFIG, "Force rf_pwd_icsyndiv to %1d on %4d\n",
+	ath_dbg(common, CONFIG, "Force rf_pwd_icsyndiv to %1d on %4d\n",
 		new_bias, synth_freq);
 
 	/* swizzle rf_pwd_icsyndiv */
@@ -1053,8 +1053,7 @@
 		u32 level = param;
 
 		if (level >= ARRAY_SIZE(ah->totalSizeDesired)) {
-			ath_dbg(common, ATH_DBG_ANI,
-				"level out of range (%u > %zu)\n",
+			ath_dbg(common, ANI, "level out of range (%u > %zu)\n",
 				level, ARRAY_SIZE(ah->totalSizeDesired));
 			return false;
 		}
@@ -1157,8 +1156,7 @@
 		u32 level = param;
 
 		if (level >= ARRAY_SIZE(firstep)) {
-			ath_dbg(common, ATH_DBG_ANI,
-				"level out of range (%u > %zu)\n",
+			ath_dbg(common, ANI, "level out of range (%u > %zu)\n",
 				level, ARRAY_SIZE(firstep));
 			return false;
 		}
@@ -1177,8 +1175,7 @@
 		u32 level = param;
 
 		if (level >= ARRAY_SIZE(cycpwrThr1)) {
-			ath_dbg(common, ATH_DBG_ANI,
-				"level out of range (%u > %zu)\n",
+			ath_dbg(common, ANI, "level out of range (%u > %zu)\n",
 				level, ARRAY_SIZE(cycpwrThr1));
 			return false;
 		}
@@ -1195,23 +1192,22 @@
 	case ATH9K_ANI_PRESENT:
 		break;
 	default:
-		ath_dbg(common, ATH_DBG_ANI, "invalid cmd %u\n", cmd);
+		ath_dbg(common, ANI, "invalid cmd %u\n", cmd);
 		return false;
 	}
 
-	ath_dbg(common, ATH_DBG_ANI, "ANI parameters:\n");
-	ath_dbg(common, ATH_DBG_ANI,
+	ath_dbg(common, ANI, "ANI parameters:\n");
+	ath_dbg(common, ANI,
 		"noiseImmunityLevel=%d, spurImmunityLevel=%d, ofdmWeakSigDetectOff=%d\n",
 		aniState->noiseImmunityLevel,
 		aniState->spurImmunityLevel,
 		!aniState->ofdmWeakSigDetectOff);
-	ath_dbg(common, ATH_DBG_ANI,
+	ath_dbg(common, ANI,
 		"cckWeakSigThreshold=%d, firstepLevel=%d, listenTime=%d\n",
 		aniState->cckWeakSigThreshold,
 		aniState->firstepLevel,
 		aniState->listenTime);
-	ath_dbg(common, ATH_DBG_ANI,
-		"ofdmPhyErrCount=%d, cckPhyErrCount=%d\n\n",
+	ath_dbg(common, ANI, "ofdmPhyErrCount=%d, cckPhyErrCount=%d\n\n",
 		aniState->ofdmPhyErrCount,
 		aniState->cckPhyErrCount);
 
@@ -1295,7 +1291,7 @@
 				    AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
 
 		if (!on != aniState->ofdmWeakSigDetectOff) {
-			ath_dbg(common, ATH_DBG_ANI,
+			ath_dbg(common, ANI,
 				"** ch %d: ofdm weak signal: %s=>%s\n",
 				chan->channel,
 				!aniState->ofdmWeakSigDetectOff ?
@@ -1313,7 +1309,7 @@
 		u32 level = param;
 
 		if (level >= ARRAY_SIZE(firstep_table)) {
-			ath_dbg(common, ATH_DBG_ANI,
+			ath_dbg(common, ANI,
 				"ATH9K_ANI_FIRSTEP_LEVEL: level out of range (%u > %zu)\n",
 				level, ARRAY_SIZE(firstep_table));
 			return false;
@@ -1350,7 +1346,7 @@
 			      AR_PHY_FIND_SIG_FIRSTEP_LOW, value2);
 
 		if (level != aniState->firstepLevel) {
-			ath_dbg(common, ATH_DBG_ANI,
+			ath_dbg(common, ANI,
 				"** ch %d: level %d=>%d[def:%d] firstep[level]=%d ini=%d\n",
 				chan->channel,
 				aniState->firstepLevel,
@@ -1358,7 +1354,7 @@
 				ATH9K_ANI_FIRSTEP_LVL_NEW,
 				value,
 				aniState->iniDef.firstep);
-			ath_dbg(common, ATH_DBG_ANI,
+			ath_dbg(common, ANI,
 				"** ch %d: level %d=>%d[def:%d] firstep_low[level]=%d ini=%d\n",
 				chan->channel,
 				aniState->firstepLevel,
@@ -1378,7 +1374,7 @@
 		u32 level = param;
 
 		if (level >= ARRAY_SIZE(cycpwrThr1_table)) {
-			ath_dbg(common, ATH_DBG_ANI,
+			ath_dbg(common, ANI,
 				"ATH9K_ANI_SPUR_IMMUNITY_LEVEL: level out of range (%u > %zu)\n",
 				level, ARRAY_SIZE(cycpwrThr1_table));
 			return false;
@@ -1414,7 +1410,7 @@
 			      AR_PHY_EXT_TIMING5_CYCPWR_THR1, value2);
 
 		if (level != aniState->spurImmunityLevel) {
-			ath_dbg(common, ATH_DBG_ANI,
+			ath_dbg(common, ANI,
 				"** ch %d: level %d=>%d[def:%d] cycpwrThr1[level]=%d ini=%d\n",
 				chan->channel,
 				aniState->spurImmunityLevel,
@@ -1422,7 +1418,7 @@
 				ATH9K_ANI_SPUR_IMMUNE_LVL_NEW,
 				value,
 				aniState->iniDef.cycpwrThr1);
-			ath_dbg(common, ATH_DBG_ANI,
+			ath_dbg(common, ANI,
 				"** ch %d: level %d=>%d[def:%d] cycpwrThr1Ext[level]=%d ini=%d\n",
 				chan->channel,
 				aniState->spurImmunityLevel,
@@ -1448,11 +1444,11 @@
 	case ATH9K_ANI_PRESENT:
 		break;
 	default:
-		ath_dbg(common, ATH_DBG_ANI, "invalid cmd %u\n", cmd);
+		ath_dbg(common, ANI, "invalid cmd %u\n", cmd);
 		return false;
 	}
 
-	ath_dbg(common, ATH_DBG_ANI,
+	ath_dbg(common, ANI,
 		"ANI parameters: SI=%d, ofdmWS=%s FS=%d MRCcck=%s listenTime=%d ofdmErrs=%d cckErrs=%d\n",
 		aniState->spurImmunityLevel,
 		!aniState->ofdmWeakSigDetectOff ? "on" : "off",
@@ -1506,7 +1502,7 @@
 
 	iniDef = &aniState->iniDef;
 
-	ath_dbg(common, ATH_DBG_ANI, "ver %d.%d opmode %u chan %d Mhz/0x%x\n",
+	ath_dbg(common, ANI, "ver %d.%d opmode %u chan %d Mhz/0x%x\n",
 		ah->hw_version.macVersion,
 		ah->hw_version.macRev,
 		ah->opmode,
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
index 88279e3..c55e5bb 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -61,18 +61,16 @@
 	switch (currCal->calData->calType) {
 	case IQ_MISMATCH_CAL:
 		REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_IQ);
-		ath_dbg(common, ATH_DBG_CALIBRATE,
+		ath_dbg(common, CALIBRATE,
 			"starting IQ Mismatch Calibration\n");
 		break;
 	case ADC_GAIN_CAL:
 		REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_GAIN);
-		ath_dbg(common, ATH_DBG_CALIBRATE,
-			"starting ADC Gain Calibration\n");
+		ath_dbg(common, CALIBRATE, "starting ADC Gain Calibration\n");
 		break;
 	case ADC_DC_CAL:
 		REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_DC_PER);
-		ath_dbg(common, ATH_DBG_CALIBRATE,
-			"starting ADC DC Calibration\n");
+		ath_dbg(common, CALIBRATE, "starting ADC DC Calibration\n");
 		break;
 	}
 
@@ -129,7 +127,7 @@
 			REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
 		ah->totalIqCorrMeas[i] +=
 			(int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
-		ath_dbg(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
+		ath_dbg(ath9k_hw_common(ah), CALIBRATE,
 			"%d: Chn %d pmi=0x%08x;pmq=0x%08x;iqcm=0x%08x;\n",
 			ah->cal_samples, i, ah->totalPowerMeasI[i],
 			ah->totalPowerMeasQ[i],
@@ -151,7 +149,7 @@
 		ah->totalAdcQEvenPhase[i] +=
 			REG_READ(ah, AR_PHY_CAL_MEAS_3(i));
 
-		ath_dbg(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
+		ath_dbg(ath9k_hw_common(ah), CALIBRATE,
 			"%d: Chn %d oddi=0x%08x; eveni=0x%08x; oddq=0x%08x; evenq=0x%08x;\n",
 			ah->cal_samples, i,
 			ah->totalAdcIOddPhase[i],
@@ -175,7 +173,7 @@
 		ah->totalAdcDcOffsetQEvenPhase[i] +=
 			(int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_3(i));
 
-		ath_dbg(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
+		ath_dbg(ath9k_hw_common(ah), CALIBRATE,
 			"%d: Chn %d oddi=0x%08x; eveni=0x%08x; oddq=0x%08x; evenq=0x%08x;\n",
 			ah->cal_samples, i,
 			ah->totalAdcDcOffsetIOddPhase[i],
@@ -198,12 +196,12 @@
 		powerMeasQ = ah->totalPowerMeasQ[i];
 		iqCorrMeas = ah->totalIqCorrMeas[i];
 
-		ath_dbg(common, ATH_DBG_CALIBRATE,
+		ath_dbg(common, CALIBRATE,
 			"Starting IQ Cal and Correction for Chain %d\n",
 			i);
 
-		ath_dbg(common, ATH_DBG_CALIBRATE,
-			"Orignal: Chn %diq_corr_meas = 0x%08x\n",
+		ath_dbg(common, CALIBRATE,
+			"Original: Chn %d iq_corr_meas = 0x%08x\n",
 			i, ah->totalIqCorrMeas[i]);
 
 		iqCorrNeg = 0;
@@ -213,12 +211,11 @@
 			iqCorrNeg = 1;
 		}
 
-		ath_dbg(common, ATH_DBG_CALIBRATE,
-			"Chn %d pwr_meas_i = 0x%08x\n", i, powerMeasI);
-		ath_dbg(common, ATH_DBG_CALIBRATE,
-			"Chn %d pwr_meas_q = 0x%08x\n", i, powerMeasQ);
-		ath_dbg(common, ATH_DBG_CALIBRATE, "iqCorrNeg is 0x%08x\n",
-			iqCorrNeg);
+		ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_i = 0x%08x\n",
+			i, powerMeasI);
+		ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_q = 0x%08x\n",
+			i, powerMeasQ);
+		ath_dbg(common, CALIBRATE, "iqCorrNeg is 0x%08x\n", iqCorrNeg);
 
 		iCoffDenom = (powerMeasI / 2 + powerMeasQ / 2) / 128;
 		qCoffDenom = powerMeasQ / 64;
@@ -227,13 +224,13 @@
 		    (qCoffDenom != 0)) {
 			iCoff = iqCorrMeas / iCoffDenom;
 			qCoff = powerMeasI / qCoffDenom - 64;
-			ath_dbg(common, ATH_DBG_CALIBRATE,
-				"Chn %d iCoff = 0x%08x\n", i, iCoff);
-			ath_dbg(common, ATH_DBG_CALIBRATE,
-				"Chn %d qCoff = 0x%08x\n", i, qCoff);
+			ath_dbg(common, CALIBRATE, "Chn %d iCoff = 0x%08x\n",
+				i, iCoff);
+			ath_dbg(common, CALIBRATE, "Chn %d qCoff = 0x%08x\n",
+				i, qCoff);
 
 			iCoff = iCoff & 0x3f;
-			ath_dbg(common, ATH_DBG_CALIBRATE,
+			ath_dbg(common, CALIBRATE,
 				"New: Chn %d iCoff = 0x%08x\n", i, iCoff);
 			if (iqCorrNeg == 0x0)
 				iCoff = 0x40 - iCoff;
@@ -243,7 +240,7 @@
 			else if (qCoff <= -16)
 				qCoff = -16;
 
-			ath_dbg(common, ATH_DBG_CALIBRATE,
+			ath_dbg(common, CALIBRATE,
 				"Chn %d : iCoff = 0x%x  qCoff = 0x%x\n",
 				i, iCoff, qCoff);
 
@@ -253,7 +250,7 @@
 			REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(i),
 				      AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF,
 				      qCoff);
-			ath_dbg(common, ATH_DBG_CALIBRATE,
+			ath_dbg(common, CALIBRATE,
 				"IQ Cal and Correction done for Chain %d\n",
 				i);
 		}
@@ -275,21 +272,17 @@
 		qOddMeasOffset = ah->totalAdcQOddPhase[i];
 		qEvenMeasOffset = ah->totalAdcQEvenPhase[i];
 
-		ath_dbg(common, ATH_DBG_CALIBRATE,
+		ath_dbg(common, CALIBRATE,
 			"Starting ADC Gain Cal for Chain %d\n", i);
 
-		ath_dbg(common, ATH_DBG_CALIBRATE,
-			"Chn %d pwr_meas_odd_i = 0x%08x\n", i,
-			iOddMeasOffset);
-		ath_dbg(common, ATH_DBG_CALIBRATE,
-			"Chn %d pwr_meas_even_i = 0x%08x\n", i,
-			iEvenMeasOffset);
-		ath_dbg(common, ATH_DBG_CALIBRATE,
-			"Chn %d pwr_meas_odd_q = 0x%08x\n", i,
-			qOddMeasOffset);
-		ath_dbg(common, ATH_DBG_CALIBRATE,
-			"Chn %d pwr_meas_even_q = 0x%08x\n", i,
-			qEvenMeasOffset);
+		ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_odd_i = 0x%08x\n",
+			i, iOddMeasOffset);
+		ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_even_i = 0x%08x\n",
+			i, iEvenMeasOffset);
+		ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_odd_q = 0x%08x\n",
+			i, qOddMeasOffset);
+		ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_even_q = 0x%08x\n",
+			i, qEvenMeasOffset);
 
 		if (iOddMeasOffset != 0 && qEvenMeasOffset != 0) {
 			iGainMismatch =
@@ -299,19 +292,19 @@
 				((qOddMeasOffset * 32) /
 				 qEvenMeasOffset) & 0x3f;
 
-			ath_dbg(common, ATH_DBG_CALIBRATE,
-				"Chn %d gain_mismatch_i = 0x%08x\n", i,
-				iGainMismatch);
-			ath_dbg(common, ATH_DBG_CALIBRATE,
-				"Chn %d gain_mismatch_q = 0x%08x\n", i,
-				qGainMismatch);
+			ath_dbg(common, CALIBRATE,
+				"Chn %d gain_mismatch_i = 0x%08x\n",
+				i, iGainMismatch);
+			ath_dbg(common, CALIBRATE,
+				"Chn %d gain_mismatch_q = 0x%08x\n",
+				i, qGainMismatch);
 
 			val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i));
 			val &= 0xfffff000;
 			val |= (qGainMismatch) | (iGainMismatch << 6);
 			REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val);
 
-			ath_dbg(common, ATH_DBG_CALIBRATE,
+			ath_dbg(common, CALIBRATE,
 				"ADC Gain Cal done for Chain %d\n", i);
 		}
 	}
@@ -337,40 +330,36 @@
 		qOddMeasOffset = ah->totalAdcDcOffsetQOddPhase[i];
 		qEvenMeasOffset = ah->totalAdcDcOffsetQEvenPhase[i];
 
-		ath_dbg(common, ATH_DBG_CALIBRATE,
+		ath_dbg(common, CALIBRATE,
 			"Starting ADC DC Offset Cal for Chain %d\n", i);
 
-		ath_dbg(common, ATH_DBG_CALIBRATE,
-			"Chn %d pwr_meas_odd_i = %d\n", i,
-			iOddMeasOffset);
-		ath_dbg(common, ATH_DBG_CALIBRATE,
-			"Chn %d pwr_meas_even_i = %d\n", i,
-			iEvenMeasOffset);
-		ath_dbg(common, ATH_DBG_CALIBRATE,
-			"Chn %d pwr_meas_odd_q = %d\n", i,
-			qOddMeasOffset);
-		ath_dbg(common, ATH_DBG_CALIBRATE,
-			"Chn %d pwr_meas_even_q = %d\n", i,
-			qEvenMeasOffset);
+		ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_odd_i = %d\n",
+			i, iOddMeasOffset);
+		ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_even_i = %d\n",
+			i, iEvenMeasOffset);
+		ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_odd_q = %d\n",
+			i, qOddMeasOffset);
+		ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_even_q = %d\n",
+			i, qEvenMeasOffset);
 
 		iDcMismatch = (((iEvenMeasOffset - iOddMeasOffset) * 2) /
 			       numSamples) & 0x1ff;
 		qDcMismatch = (((qOddMeasOffset - qEvenMeasOffset) * 2) /
 			       numSamples) & 0x1ff;
 
-		ath_dbg(common, ATH_DBG_CALIBRATE,
-			"Chn %d dc_offset_mismatch_i = 0x%08x\n", i,
-			iDcMismatch);
-		ath_dbg(common, ATH_DBG_CALIBRATE,
-			"Chn %d dc_offset_mismatch_q = 0x%08x\n", i,
-			qDcMismatch);
+		ath_dbg(common, CALIBRATE,
+			"Chn %d dc_offset_mismatch_i = 0x%08x\n",
+			i, iDcMismatch);
+		ath_dbg(common, CALIBRATE,
+			"Chn %d dc_offset_mismatch_q = 0x%08x\n",
+			i, qDcMismatch);
 
 		val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i));
 		val &= 0xc0000fff;
 		val |= (qDcMismatch << 12) | (iDcMismatch << 21);
 		REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val);
 
-		ath_dbg(common, ATH_DBG_CALIBRATE,
+		ath_dbg(common, CALIBRATE,
 			"ADC DC Offset Cal done for Chain %d\n", i);
 	}
 
@@ -560,7 +549,7 @@
 		{ 0x7838, 0 },
 	};
 
-	ath_dbg(common, ATH_DBG_CALIBRATE, "Running PA Calibration\n");
+	ath_dbg(common, CALIBRATE, "Running PA Calibration\n");
 
 	/* PA CAL is not needed for high power solution */
 	if (ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE) ==
@@ -741,7 +730,7 @@
 		REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL);
 		if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
 				  AR_PHY_AGC_CONTROL_CAL, 0, AH_WAIT_TIMEOUT)) {
-			ath_dbg(common, ATH_DBG_CALIBRATE,
+			ath_dbg(common, CALIBRATE,
 				"offset calibration failed to complete in 1ms; noisy environment?\n");
 			return false;
 		}
@@ -755,7 +744,7 @@
 	REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL);
 	if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL,
 			  0, AH_WAIT_TIMEOUT)) {
-		ath_dbg(common, ATH_DBG_CALIBRATE,
+		ath_dbg(common, CALIBRATE,
 			"offset calibration failed to complete in 1ms; noisy environment?\n");
 		return false;
 	}
@@ -851,7 +840,7 @@
 		if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
 				   AR_PHY_AGC_CONTROL_CAL,
 				   0, AH_WAIT_TIMEOUT)) {
-			ath_dbg(common, ATH_DBG_CALIBRATE,
+			ath_dbg(common, CALIBRATE,
 				"offset calibration failed to complete in 1ms; noisy environment?\n");
 			return false;
 		}
@@ -886,22 +875,21 @@
 		if (ar9002_hw_is_cal_supported(ah, chan, ADC_GAIN_CAL)) {
 			INIT_CAL(&ah->adcgain_caldata);
 			INSERT_CAL(ah, &ah->adcgain_caldata);
-			ath_dbg(common, ATH_DBG_CALIBRATE,
-					"enabling ADC Gain Calibration.\n");
+			ath_dbg(common, CALIBRATE,
+					"enabling ADC Gain Calibration\n");
 		}
 
 		if (ar9002_hw_is_cal_supported(ah, chan, ADC_DC_CAL)) {
 			INIT_CAL(&ah->adcdc_caldata);
 			INSERT_CAL(ah, &ah->adcdc_caldata);
-			ath_dbg(common, ATH_DBG_CALIBRATE,
-					"enabling ADC DC Calibration.\n");
+			ath_dbg(common, CALIBRATE,
+					"enabling ADC DC Calibration\n");
 		}
 
 		if (ar9002_hw_is_cal_supported(ah, chan, IQ_MISMATCH_CAL)) {
 			INIT_CAL(&ah->iq_caldata);
 			INSERT_CAL(ah, &ah->iq_caldata);
-			ath_dbg(common, ATH_DBG_CALIBRATE,
-					"enabling IQ Calibration.\n");
+			ath_dbg(common, CALIBRATE, "enabling IQ Calibration\n");
 		}
 
 		ah->cal_list_curr = ah->cal_list;
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
index b592016..7b6417b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
@@ -107,7 +107,7 @@
 		}
 
 		if (isr & AR_ISR_RXORN) {
-			ath_dbg(common, ATH_DBG_INTERRUPT,
+			ath_dbg(common, INTERRUPT,
 				"receive FIFO overrun interrupt\n");
 		}
 
@@ -143,24 +143,24 @@
 
 		if (fatal_int) {
 			if (sync_cause & AR_INTR_SYNC_HOST1_FATAL) {
-				ath_dbg(common, ATH_DBG_ANY,
+				ath_dbg(common, ANY,
 					"received PCI FATAL interrupt\n");
 			}
 			if (sync_cause & AR_INTR_SYNC_HOST1_PERR) {
-				ath_dbg(common, ATH_DBG_ANY,
+				ath_dbg(common, ANY,
 					"received PCI PERR interrupt\n");
 			}
 			*masked |= ATH9K_INT_FATAL;
 		}
 		if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) {
-			ath_dbg(common, ATH_DBG_INTERRUPT,
+			ath_dbg(common, INTERRUPT,
 				"AR_INTR_SYNC_RADM_CPL_TIMEOUT\n");
 			REG_WRITE(ah, AR_RC, AR_RC_HOSTIF);
 			REG_WRITE(ah, AR_RC, 0);
 			*masked |= ATH9K_INT_FATAL;
 		}
 		if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT) {
-			ath_dbg(common, ATH_DBG_INTERRUPT,
+			ath_dbg(common, INTERRUPT,
 				"AR_INTR_SYNC_LOCAL_TIMEOUT\n");
 		}
 
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index 12a730d..8e70f0b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -18,6 +18,7 @@
 #include "hw-ops.h"
 #include "ar9003_phy.h"
 #include "ar9003_rtt.h"
+#include "ar9003_mci.h"
 
 #define MAX_MEASUREMENT	MAX_IQCAL_MEASUREMENT
 #define MAX_MAG_DELTA	11
@@ -51,7 +52,7 @@
 		currCal->calData->calCountMax);
 		REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_IQ);
 
-		ath_dbg(common, ATH_DBG_CALIBRATE,
+		ath_dbg(common, CALIBRATE,
 			"starting IQ Mismatch Calibration\n");
 
 		/* Kick-off cal */
@@ -63,7 +64,7 @@
 		REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_THERM,
 			      AR_PHY_65NM_CH0_THERM_START, 1);
 
-		ath_dbg(common, ATH_DBG_CALIBRATE,
+		ath_dbg(common, CALIBRATE,
 			"starting Temperature Compensation Calibration\n");
 		break;
 	}
@@ -193,7 +194,7 @@
 				REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
 			ah->totalIqCorrMeas[i] +=
 				(int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
-			ath_dbg(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
+			ath_dbg(ath9k_hw_common(ah), CALIBRATE,
 				"%d: Chn %d pmi=0x%08x;pmq=0x%08x;iqcm=0x%08x;\n",
 				ah->cal_samples, i, ah->totalPowerMeasI[i],
 				ah->totalPowerMeasQ[i],
@@ -220,12 +221,11 @@
 		powerMeasQ = ah->totalPowerMeasQ[i];
 		iqCorrMeas = ah->totalIqCorrMeas[i];
 
-		ath_dbg(common, ATH_DBG_CALIBRATE,
-			"Starting IQ Cal and Correction for Chain %d\n",
-			i);
+		ath_dbg(common, CALIBRATE,
+			"Starting IQ Cal and Correction for Chain %d\n", i);
 
-		ath_dbg(common, ATH_DBG_CALIBRATE,
-			"Orignal: Chn %diq_corr_meas = 0x%08x\n",
+		ath_dbg(common, CALIBRATE,
+			"Original: Chn %d iq_corr_meas = 0x%08x\n",
 			i, ah->totalIqCorrMeas[i]);
 
 		iqCorrNeg = 0;
@@ -235,12 +235,11 @@
 			iqCorrNeg = 1;
 		}
 
-		ath_dbg(common, ATH_DBG_CALIBRATE,
-			"Chn %d pwr_meas_i = 0x%08x\n", i, powerMeasI);
-		ath_dbg(common, ATH_DBG_CALIBRATE,
-			"Chn %d pwr_meas_q = 0x%08x\n", i, powerMeasQ);
-		ath_dbg(common, ATH_DBG_CALIBRATE, "iqCorrNeg is 0x%08x\n",
-			iqCorrNeg);
+		ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_i = 0x%08x\n",
+			i, powerMeasI);
+		ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_q = 0x%08x\n",
+			i, powerMeasQ);
+		ath_dbg(common, CALIBRATE, "iqCorrNeg is 0x%08x\n", iqCorrNeg);
 
 		iCoffDenom = (powerMeasI / 2 + powerMeasQ / 2) / 256;
 		qCoffDenom = powerMeasQ / 64;
@@ -248,10 +247,10 @@
 		if ((iCoffDenom != 0) && (qCoffDenom != 0)) {
 			iCoff = iqCorrMeas / iCoffDenom;
 			qCoff = powerMeasI / qCoffDenom - 64;
-			ath_dbg(common, ATH_DBG_CALIBRATE,
-				"Chn %d iCoff = 0x%08x\n", i, iCoff);
-			ath_dbg(common, ATH_DBG_CALIBRATE,
-				"Chn %d qCoff = 0x%08x\n", i, qCoff);
+			ath_dbg(common, CALIBRATE, "Chn %d iCoff = 0x%08x\n",
+				i, iCoff);
+			ath_dbg(common, CALIBRATE, "Chn %d qCoff = 0x%08x\n",
+				i, qCoff);
 
 			/* Force bounds on iCoff */
 			if (iCoff >= 63)
@@ -272,10 +271,10 @@
 			iCoff = iCoff & 0x7f;
 			qCoff = qCoff & 0x7f;
 
-			ath_dbg(common, ATH_DBG_CALIBRATE,
+			ath_dbg(common, CALIBRATE,
 				"Chn %d : iCoff = 0x%x  qCoff = 0x%x\n",
 				i, iCoff, qCoff);
-			ath_dbg(common, ATH_DBG_CALIBRATE,
+			ath_dbg(common, CALIBRATE,
 				"Register offset (0x%04x) before update = 0x%x\n",
 				offset_array[i],
 				REG_READ(ah, offset_array[i]));
@@ -286,25 +285,25 @@
 			REG_RMW_FIELD(ah, offset_array[i],
 				      AR_PHY_RX_IQCAL_CORR_IQCORR_Q_Q_COFF,
 				      qCoff);
-			ath_dbg(common, ATH_DBG_CALIBRATE,
+			ath_dbg(common, CALIBRATE,
 				"Register offset (0x%04x) QI COFF (bitfields 0x%08x) after update = 0x%x\n",
 				offset_array[i],
 				AR_PHY_RX_IQCAL_CORR_IQCORR_Q_I_COFF,
 				REG_READ(ah, offset_array[i]));
-			ath_dbg(common, ATH_DBG_CALIBRATE,
+			ath_dbg(common, CALIBRATE,
 				"Register offset (0x%04x) QQ COFF (bitfields 0x%08x) after update = 0x%x\n",
 				offset_array[i],
 				AR_PHY_RX_IQCAL_CORR_IQCORR_Q_Q_COFF,
 				REG_READ(ah, offset_array[i]));
 
-			ath_dbg(common, ATH_DBG_CALIBRATE,
+			ath_dbg(common, CALIBRATE,
 				"IQ Cal and Correction done for Chain %d\n", i);
 		}
 	}
 
 	REG_SET_BIT(ah, AR_PHY_RX_IQCAL_CORR_B0,
 		    AR_PHY_RX_IQCAL_CORR_IQCORR_ENABLE);
-	ath_dbg(common, ATH_DBG_CALIBRATE,
+	ath_dbg(common, CALIBRATE,
 		"IQ Cal and Correction (offset 0x%04x) enabled (bit position 0x%08x). New Value 0x%08x\n",
 		(unsigned) (AR_PHY_RX_IQCAL_CORR_B0),
 		AR_PHY_RX_IQCAL_CORR_IQCORR_ENABLE,
@@ -348,7 +347,7 @@
 	f2 = (f1 * f1 + f3 * f3) / result_shift;
 
 	if (!f2) {
-		ath_dbg(common, ATH_DBG_CALIBRATE, "Divide by 0\n");
+		ath_dbg(common, CALIBRATE, "Divide by 0\n");
 		return false;
 	}
 
@@ -469,7 +468,7 @@
 
 	if ((i2_p_q2_a0_d0 == 0) || (i2_p_q2_a0_d1 == 0) ||
 	    (i2_p_q2_a1_d0 == 0) || (i2_p_q2_a1_d1 == 0)) {
-		ath_dbg(common, ATH_DBG_CALIBRATE,
+		ath_dbg(common, CALIBRATE,
 			"Divide by 0:\n"
 			"a0_d0=%d\n"
 			"a0_d1=%d\n"
@@ -509,8 +508,7 @@
 	mag2 = ar9003_hw_find_mag_approx(ah, cos_2phi_2, sin_2phi_2);
 
 	if ((mag1 == 0) || (mag2 == 0)) {
-		ath_dbg(common, ATH_DBG_CALIBRATE,
-			"Divide by 0: mag1=%d, mag2=%d\n",
+		ath_dbg(common, CALIBRATE, "Divide by 0: mag1=%d, mag2=%d\n",
 			mag1, mag2);
 		return false;
 	}
@@ -528,8 +526,8 @@
 			     mag_a0_d0, phs_a0_d0,
 			     mag_a1_d0,
 			     phs_a1_d0, solved_eq)) {
-		ath_dbg(common, ATH_DBG_CALIBRATE,
-			"Call to ar9003_hw_solve_iq_cal() failed.\n");
+		ath_dbg(common, CALIBRATE,
+			"Call to ar9003_hw_solve_iq_cal() failed\n");
 		return false;
 	}
 
@@ -538,12 +536,12 @@
 	mag_rx = solved_eq[2];
 	phs_rx = solved_eq[3];
 
-	ath_dbg(common, ATH_DBG_CALIBRATE,
+	ath_dbg(common, CALIBRATE,
 		"chain %d: mag mismatch=%d phase mismatch=%d\n",
 		chain_idx, mag_tx/res_scale, phs_tx/res_scale);
 
 	if (res_scale == mag_tx) {
-		ath_dbg(common, ATH_DBG_CALIBRATE,
+		ath_dbg(common, CALIBRATE,
 			"Divide by 0: mag_tx=%d, res_scale=%d\n",
 			mag_tx, res_scale);
 		return false;
@@ -556,8 +554,7 @@
 	q_q_coff = (mag_corr_tx * 128 / res_scale);
 	q_i_coff = (phs_corr_tx * 256 / res_scale);
 
-	ath_dbg(common, ATH_DBG_CALIBRATE,
-		"tx chain %d: mag corr=%d  phase corr=%d\n",
+	ath_dbg(common, CALIBRATE, "tx chain %d: mag corr=%d  phase corr=%d\n",
 		chain_idx, q_q_coff, q_i_coff);
 
 	if (q_i_coff < -63)
@@ -571,12 +568,11 @@
 
 	iqc_coeff[0] = (q_q_coff * 128) + q_i_coff;
 
-	ath_dbg(common, ATH_DBG_CALIBRATE,
-		"tx chain %d: iq corr coeff=%x\n",
+	ath_dbg(common, CALIBRATE, "tx chain %d: iq corr coeff=%x\n",
 		chain_idx, iqc_coeff[0]);
 
 	if (-mag_rx == res_scale) {
-		ath_dbg(common, ATH_DBG_CALIBRATE,
+		ath_dbg(common, CALIBRATE,
 			"Divide by 0: mag_rx=%d, res_scale=%d\n",
 			mag_rx, res_scale);
 		return false;
@@ -589,8 +585,7 @@
 	q_q_coff = (mag_corr_rx * 128 / res_scale);
 	q_i_coff = (phs_corr_rx * 256 / res_scale);
 
-	ath_dbg(common, ATH_DBG_CALIBRATE,
-		"rx chain %d: mag corr=%d  phase corr=%d\n",
+	ath_dbg(common, CALIBRATE, "rx chain %d: mag corr=%d  phase corr=%d\n",
 		chain_idx, q_q_coff, q_i_coff);
 
 	if (q_i_coff < -63)
@@ -604,8 +599,7 @@
 
 	iqc_coeff[1] = (q_q_coff * 128) + q_i_coff;
 
-	ath_dbg(common, ATH_DBG_CALIBRATE,
-		"rx chain %d: iq corr coeff=%x\n",
+	ath_dbg(common, CALIBRATE, "rx chain %d: iq corr coeff=%x\n",
 		chain_idx, iqc_coeff[1]);
 
 	return true;
@@ -752,8 +746,7 @@
 	if (!ath9k_hw_wait(ah, AR_PHY_TX_IQCAL_START,
 			AR_PHY_TX_IQCAL_START_DO_CAL, 0,
 			AH_WAIT_TIMEOUT)) {
-		ath_dbg(common, ATH_DBG_CALIBRATE,
-			"Tx IQ Cal is not completed.\n");
+		ath_dbg(common, CALIBRATE, "Tx IQ Cal is not completed\n");
 		return false;
 	}
 	return true;
@@ -791,13 +784,13 @@
 			nmeasurement = MAX_MEASUREMENT;
 
 		for (im = 0; im < nmeasurement; im++) {
-			ath_dbg(common, ATH_DBG_CALIBRATE,
-				"Doing Tx IQ Cal for chain %d.\n", i);
+			ath_dbg(common, CALIBRATE,
+				"Doing Tx IQ Cal for chain %d\n", i);
 
 			if (REG_READ(ah, txiqcal_status[i]) &
 					AR_PHY_TX_IQCAL_STATUS_FAILED) {
-				ath_dbg(common, ATH_DBG_CALIBRATE,
-					"Tx IQ Cal failed for chain %d.\n", i);
+				ath_dbg(common, CALIBRATE,
+					"Tx IQ Cal failed for chain %d\n", i);
 				goto tx_iqcal_fail;
 			}
 
@@ -823,18 +816,16 @@
 				iq_res[idx + 1] = 0xffff & REG_READ(ah,
 						chan_info_tab[i] + offset);
 
-				ath_dbg(common, ATH_DBG_CALIBRATE,
-					"IQ RES[%d]=0x%x"
-					"IQ_RES[%d]=0x%x\n",
+				ath_dbg(common, CALIBRATE,
+					"IQ_RES[%d]=0x%x IQ_RES[%d]=0x%x\n",
 					idx, iq_res[idx], idx + 1,
 					iq_res[idx + 1]);
 			}
 
 			if (!ar9003_hw_calc_iq_corr(ah, i, iq_res,
 						coeff.iqc_coeff)) {
-				ath_dbg(common, ATH_DBG_CALIBRATE,
-					"Failed in calculation of \
-					IQ correction.\n");
+				ath_dbg(common, CALIBRATE,
+					"Failed in calculation of IQ correction\n");
 				goto tx_iqcal_fail;
 			}
 
@@ -854,7 +845,7 @@
 	return;
 
 tx_iqcal_fail:
-	ath_dbg(common, ATH_DBG_CALIBRATE, "Tx IQ Cal failed\n");
+	ath_dbg(common, CALIBRATE, "Tx IQ Cal failed\n");
 	return;
 }
 
@@ -934,10 +925,12 @@
 {
 	struct ath_common *common = ath9k_hw_common(ah);
 	struct ath9k_hw_cal_data *caldata = ah->caldata;
+	struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
 	bool txiqcal_done = false, txclcal_done = false;
 	bool is_reusable = true, status = true;
 	bool run_rtt_cal = false, run_agc_cal;
 	bool rtt = !!(ah->caps.hw_caps & ATH9K_HW_CAP_RTT);
+	bool mci = !!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI);
 	u32 agc_ctrl = 0, agc_supp_cals = AR_PHY_AGC_CONTROL_OFFSET_CAL |
 					  AR_PHY_AGC_CONTROL_FLTR_CAL   |
 					  AR_PHY_AGC_CONTROL_PKDET_CAL;
@@ -950,7 +943,7 @@
 		if (!ar9003_hw_rtt_restore(ah, chan))
 			run_rtt_cal = true;
 
-		ath_dbg(common, ATH_DBG_CALIBRATE, "RTT restore %s\n",
+		ath_dbg(common, CALIBRATE, "RTT restore %s\n",
 			run_rtt_cal ? "failed" : "succeed");
 	}
 	run_agc_cal = run_rtt_cal;
@@ -1005,6 +998,31 @@
 	} else if (caldata && !caldata->done_txiqcal_once)
 		run_agc_cal = true;
 
+	if (mci && IS_CHAN_2GHZ(chan) &&
+	    (mci_hw->bt_state  == MCI_BT_AWAKE) &&
+	    run_agc_cal &&
+	    !(mci_hw->config & ATH_MCI_CONFIG_DISABLE_MCI_CAL)) {
+
+		u32 pld[4] = {0, 0, 0, 0};
+
+		/* send CAL_REQ only when BT is AWAKE. */
+		ath_dbg(common, MCI, "MCI send WLAN_CAL_REQ 0x%x\n",
+			mci_hw->wlan_cal_seq);
+		MCI_GPM_SET_CAL_TYPE(pld, MCI_GPM_WLAN_CAL_REQ);
+		pld[MCI_GPM_WLAN_CAL_W_SEQUENCE] = mci_hw->wlan_cal_seq++;
+		ar9003_mci_send_message(ah, MCI_GPM, 0, pld, 16, true, false);
+
+		/* Wait BT_CAL_GRANT for 50ms */
+		ath_dbg(common, MCI, "MCI wait for BT_CAL_GRANT\n");
+
+		if (ar9003_mci_wait_for_gpm(ah, MCI_GPM_BT_CAL_GRANT, 0, 50000))
+			ath_dbg(common, MCI, "MCI got BT_CAL_GRANT\n");
+		else {
+			is_reusable = false;
+			ath_dbg(common, MCI, "\nMCI BT is not responding\n");
+		}
+	}
+
 	txiqcal_done = ar9003_hw_tx_iq_cal_run(ah);
 	REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
 	udelay(5);
@@ -1022,6 +1040,21 @@
 				       AR_PHY_AGC_CONTROL_CAL,
 				       0, AH_WAIT_TIMEOUT);
 	}
+
+	if (mci && IS_CHAN_2GHZ(chan) &&
+	    (mci_hw->bt_state  == MCI_BT_AWAKE)	&&
+	    run_agc_cal	&&
+	    !(mci_hw->config & ATH_MCI_CONFIG_DISABLE_MCI_CAL)) {
+
+		u32 pld[4] = {0, 0, 0, 0};
+
+		ath_dbg(common, MCI, "MCI Send WLAN_CAL_DONE 0x%x\n",
+			mci_hw->wlan_cal_done);
+		MCI_GPM_SET_CAL_TYPE(pld, MCI_GPM_WLAN_CAL_DONE);
+		pld[MCI_GPM_WLAN_CAL_W_SEQUENCE] = mci_hw->wlan_cal_done++;
+		ar9003_mci_send_message(ah, MCI_GPM, 0, pld, 16, true, false);
+	}
+
 	if (rtt && !run_rtt_cal) {
 		agc_ctrl |= agc_supp_cals;
 		REG_WRITE(ah, AR_PHY_AGC_CONTROL, agc_ctrl);
@@ -1031,9 +1064,8 @@
 		if (run_rtt_cal)
 			ar9003_hw_rtt_disable(ah);
 
-		ath_dbg(common, ATH_DBG_CALIBRATE,
-			"offset calibration failed to complete in 1ms;"
-			"noisy environment?\n");
+		ath_dbg(common, CALIBRATE,
+			"offset calibration failed to complete in 1ms; noisy environment?\n");
 		return false;
 	}
 
@@ -1092,15 +1124,14 @@
 	if (ah->supp_cals & IQ_MISMATCH_CAL) {
 		INIT_CAL(&ah->iq_caldata);
 		INSERT_CAL(ah, &ah->iq_caldata);
-		ath_dbg(common, ATH_DBG_CALIBRATE,
-			"enabling IQ Calibration.\n");
+		ath_dbg(common, CALIBRATE, "enabling IQ Calibration\n");
 	}
 
 	if (ah->supp_cals & TEMP_COMP_CAL) {
 		INIT_CAL(&ah->tempCompCalData);
 		INSERT_CAL(ah, &ah->tempCompCalData);
-		ath_dbg(common, ATH_DBG_CALIBRATE,
-			"enabling Temperature Compensation Calibration.\n");
+		ath_dbg(common, CALIBRATE,
+			"enabling Temperature Compensation Calibration\n");
 	}
 
 	/* Initialize current pointer to first element in list */
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 3b262ba6..9fbcbdd 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -121,10 +121,8 @@
 		 * if the register is per chain
 		 */
 		.noiseFloorThreshCh = {-1, 0, 0},
-		.ob = {1, 1, 1},/* 3 chain */
-		.db_stage2 = {1, 1, 1}, /* 3 chain  */
-		.db_stage3 = {0, 0, 0},
-		.db_stage4 = {0, 0, 0},
+		.reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+		.quick_drop = 0,
 		.xpaBiasLvl = 0,
 		.txFrameToDataStart = 0x0e,
 		.txFrameToPaOn = 0x0e,
@@ -144,7 +142,7 @@
 	 },
 	.base_ext1 = {
 		.ant_div_control = 0,
-		.future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+		.future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
 	},
 	.calFreqPier2G = {
 		FREQ2FBIN(2412, 1),
@@ -323,10 +321,8 @@
 		.spurChans = {0, 0, 0, 0, 0},
 		/* noiseFloorThreshCh Check if the register is per chain */
 		.noiseFloorThreshCh = {-1, 0, 0},
-		.ob = {3, 3, 3}, /* 3 chain */
-		.db_stage2 = {3, 3, 3}, /* 3 chain */
-		.db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */
-		.db_stage4 = {3, 3, 3},	 /* don't exist for 2G */
+		.reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+		.quick_drop = 0,
 		.xpaBiasLvl = 0,
 		.txFrameToDataStart = 0x0e,
 		.txFrameToPaOn = 0x0e,
@@ -698,10 +694,8 @@
 		 * if the register is per chain
 		 */
 		.noiseFloorThreshCh = {-1, 0, 0},
-		.ob = {1, 1, 1},/* 3 chain */
-		.db_stage2 = {1, 1, 1}, /* 3 chain  */
-		.db_stage3 = {0, 0, 0},
-		.db_stage4 = {0, 0, 0},
+		.reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+		.quick_drop = 0,
 		.xpaBiasLvl = 0,
 		.txFrameToDataStart = 0x0e,
 		.txFrameToPaOn = 0x0e,
@@ -721,7 +715,7 @@
 	 },
 	 .base_ext1 = {
 		.ant_div_control = 0,
-		.future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+		.future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
 	 },
 	.calFreqPier2G = {
 		FREQ2FBIN(2412, 1),
@@ -900,10 +894,8 @@
 		.spurChans = {FREQ2FBIN(5500, 0), 0, 0, 0, 0},
 		/* noiseFloorThreshCh Check if the register is per chain */
 		.noiseFloorThreshCh = {-1, 0, 0},
-		.ob = {3, 3, 3}, /* 3 chain */
-		.db_stage2 = {3, 3, 3}, /* 3 chain */
-		.db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */
-		.db_stage4 = {3, 3, 3},	 /* don't exist for 2G */
+		.reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+		.quick_drop = 0,
 		.xpaBiasLvl = 0xf,
 		.txFrameToDataStart = 0x0e,
 		.txFrameToPaOn = 0x0e,
@@ -1276,10 +1268,8 @@
 		 * if the register is per chain
 		 */
 		.noiseFloorThreshCh = {-1, 0, 0},
-		.ob = {1, 1, 1},/* 3 chain */
-		.db_stage2 = {1, 1, 1}, /* 3 chain  */
-		.db_stage3 = {0, 0, 0},
-		.db_stage4 = {0, 0, 0},
+		.reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+		.quick_drop = 0,
 		.xpaBiasLvl = 0,
 		.txFrameToDataStart = 0x0e,
 		.txFrameToPaOn = 0x0e,
@@ -1291,20 +1281,20 @@
 		.txEndToRxOn = 0x2,
 		.txFrameToXpaOn = 0xe,
 		.thresh62 = 28,
-		.papdRateMaskHt20 = LE32(0x80c080),
-		.papdRateMaskHt40 = LE32(0x80c080),
+		.papdRateMaskHt20 = LE32(0x0c80c080),
+		.papdRateMaskHt40 = LE32(0x0080c080),
 		.futureModal = {
 			0, 0, 0, 0, 0, 0, 0, 0,
 		},
 	},
 	.base_ext1 = {
 		.ant_div_control = 0,
-		.future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+		.future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
 	},
 	.calFreqPier2G = {
 		FREQ2FBIN(2412, 1),
 		FREQ2FBIN(2437, 1),
-		FREQ2FBIN(2472, 1),
+		FREQ2FBIN(2462, 1),
 	},
 	/* ar9300_cal_data_per_freq_op_loop 2g */
 	.calPierData2G = {
@@ -1314,7 +1304,7 @@
 	},
 	.calTarget_freqbin_Cck = {
 		FREQ2FBIN(2412, 1),
-		FREQ2FBIN(2484, 1),
+		FREQ2FBIN(2472, 1),
 	},
 	.calTarget_freqbin_2G = {
 		FREQ2FBIN(2412, 1),
@@ -1478,10 +1468,8 @@
 		.spurChans = {0, 0, 0, 0, 0},
 		/* noiseFloorThreshCh Check if the register is per chain */
 		.noiseFloorThreshCh = {-1, 0, 0},
-		.ob = {3, 3, 3}, /* 3 chain */
-		.db_stage2 = {3, 3, 3}, /* 3 chain */
-		.db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */
-		.db_stage4 = {3, 3, 3},	 /* don't exist for 2G */
+		.reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+		.quick_drop = 0,
 		.xpaBiasLvl = 0,
 		.txFrameToDataStart = 0x0e,
 		.txFrameToPaOn = 0x0e,
@@ -1515,7 +1503,7 @@
 		FREQ2FBIN(5500, 0),
 		FREQ2FBIN(5600, 0),
 		FREQ2FBIN(5700, 0),
-		FREQ2FBIN(5825, 0)
+		FREQ2FBIN(5785, 0)
 	},
 	.calPierData5G = {
 		{
@@ -1854,10 +1842,8 @@
 		 * if the register is per chain
 		 */
 		.noiseFloorThreshCh = {-1, 0, 0},
-		.ob = {1, 1, 1},/* 3 chain */
-		.db_stage2 = {1, 1, 1}, /* 3 chain  */
-		.db_stage3 = {0, 0, 0},
-		.db_stage4 = {0, 0, 0},
+		.reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+		.quick_drop = 0,
 		.xpaBiasLvl = 0,
 		.txFrameToDataStart = 0x0e,
 		.txFrameToPaOn = 0x0e,
@@ -1877,7 +1863,7 @@
 	},
 	.base_ext1 = {
 		.ant_div_control = 0,
-		.future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+		.future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
 	},
 	.calFreqPier2G = {
 		FREQ2FBIN(2412, 1),
@@ -2056,10 +2042,8 @@
 		.spurChans = {0, 0, 0, 0, 0},
 		/* noiseFloorThreshch check if the register is per chain */
 		.noiseFloorThreshCh = {-1, 0, 0},
-		.ob = {3, 3, 3}, /* 3 chain */
-		.db_stage2 = {3, 3, 3}, /* 3 chain */
-		.db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */
-		.db_stage4 = {3, 3, 3},	 /* don't exist for 2G */
+		.reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+		.quick_drop = 0,
 		.xpaBiasLvl = 0,
 		.txFrameToDataStart = 0x0e,
 		.txFrameToPaOn = 0x0e,
@@ -2431,10 +2415,8 @@
 		 * if the register is per chain
 		 */
 		.noiseFloorThreshCh = {-1, 0, 0},
-		.ob = {1, 1, 1},/* 3 chain */
-		.db_stage2 = {1, 1, 1}, /* 3 chain  */
-		.db_stage3 = {0, 0, 0},
-		.db_stage4 = {0, 0, 0},
+		.reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+		.quick_drop = 0,
 		.xpaBiasLvl = 0,
 		.txFrameToDataStart = 0x0e,
 		.txFrameToPaOn = 0x0e,
@@ -2454,12 +2436,12 @@
 	 },
 	 .base_ext1 = {
 		.ant_div_control = 0,
-		.future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+		.future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
 	 },
 	.calFreqPier2G = {
 		FREQ2FBIN(2412, 1),
 		FREQ2FBIN(2437, 1),
-		FREQ2FBIN(2472, 1),
+		FREQ2FBIN(2462, 1),
 	 },
 	/* ar9300_cal_data_per_freq_op_loop 2g */
 	.calPierData2G = {
@@ -2633,10 +2615,8 @@
 		.spurChans = {0, 0, 0, 0, 0},
 		/* noiseFloorThreshCh Check if the register is per chain */
 		.noiseFloorThreshCh = {-1, 0, 0},
-		.ob = {3, 3, 3}, /* 3 chain */
-		.db_stage2 = {3, 3, 3}, /* 3 chain */
-		.db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */
-		.db_stage4 = {3, 3, 3},	 /* don't exist for 2G */
+		.reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+		.quick_drop = 0,
 		.xpaBiasLvl = 0,
 		.txFrameToDataStart = 0x0e,
 		.txFrameToPaOn = 0x0e,
@@ -2663,7 +2643,7 @@
 		.xatten1MarginHigh = {0, 0, 0}
 	 },
 	.calFreqPier5G = {
-		FREQ2FBIN(5180, 0),
+		FREQ2FBIN(5160, 0),
 		FREQ2FBIN(5220, 0),
 		FREQ2FBIN(5320, 0),
 		FREQ2FBIN(5400, 0),
@@ -3023,6 +3003,8 @@
 		return eep->modalHeader5G.antennaGain;
 	case EEP_ANTENNA_GAIN_2G:
 		return eep->modalHeader2G.antennaGain;
+	case EEP_QUICK_DROP:
+		return pBase->miscConfiguration & BIT(1);
 	default:
 		return 0;
 	}
@@ -3061,8 +3043,7 @@
 	int i;
 
 	if ((address < 0) || ((address + count) / 2 > AR9300_EEPROM_SIZE - 1)) {
-		ath_dbg(common, ATH_DBG_EEPROM,
-			"eeprom address not in range\n");
+		ath_dbg(common, EEPROM, "eeprom address not in range\n");
 		return false;
 	}
 
@@ -3093,8 +3074,8 @@
 	return true;
 
 error:
-	ath_dbg(common, ATH_DBG_EEPROM,
-		"unable to read eeprom region at offset %d\n", address);
+	ath_dbg(common, EEPROM, "unable to read eeprom region at offset %d\n",
+		address);
 	return false;
 }
 
@@ -3178,13 +3159,13 @@
 		length &= 0xff;
 
 		if (length > 0 && spot >= 0 && spot+length <= mdataSize) {
-			ath_dbg(common, ATH_DBG_EEPROM,
+			ath_dbg(common, EEPROM,
 				"Restore at %d: spot=%d offset=%d length=%d\n",
 				it, spot, offset, length);
 			memcpy(&mptr[spot], &block[it+2], length);
 			spot += length;
 		} else if (length > 0) {
-			ath_dbg(common, ATH_DBG_EEPROM,
+			ath_dbg(common, EEPROM,
 				"Bad restore at %d: spot=%d offset=%d length=%d\n",
 				it, spot, offset, length);
 			return false;
@@ -3206,13 +3187,13 @@
 	switch (code) {
 	case _CompressNone:
 		if (length != mdata_size) {
-			ath_dbg(common, ATH_DBG_EEPROM,
+			ath_dbg(common, EEPROM,
 				"EEPROM structure size mismatch memory=%d eeprom=%d\n",
 				mdata_size, length);
 			return -1;
 		}
 		memcpy(mptr, (u8 *) (word + COMP_HDR_LEN), length);
-		ath_dbg(common, ATH_DBG_EEPROM,
+		ath_dbg(common, EEPROM,
 			"restored eeprom %d: uncompressed, length %d\n",
 			it, length);
 		break;
@@ -3221,22 +3202,21 @@
 		} else {
 			eep = ar9003_eeprom_struct_find_by_id(reference);
 			if (eep == NULL) {
-				ath_dbg(common, ATH_DBG_EEPROM,
+				ath_dbg(common, EEPROM,
 					"can't find reference eeprom struct %d\n",
 					reference);
 				return -1;
 			}
 			memcpy(mptr, eep, mdata_size);
 		}
-		ath_dbg(common, ATH_DBG_EEPROM,
+		ath_dbg(common, EEPROM,
 			"restore eeprom %d: block, reference %d, length %d\n",
 			it, reference, length);
 		ar9300_uncompress_block(ah, mptr, mdata_size,
 					(u8 *) (word + COMP_HDR_LEN), length);
 		break;
 	default:
-		ath_dbg(common, ATH_DBG_EEPROM,
-			"unknown compression code %d\n", code);
+		ath_dbg(common, EEPROM, "unknown compression code %d\n", code);
 		return -1;
 	}
 	return 0;
@@ -3312,34 +3292,32 @@
 		cptr = AR9300_BASE_ADDR_512;
 	else
 		cptr = AR9300_BASE_ADDR;
-	ath_dbg(common, ATH_DBG_EEPROM,
-		"Trying EEPROM access at Address 0x%04x\n", cptr);
+	ath_dbg(common, EEPROM, "Trying EEPROM access at Address 0x%04x\n",
+		cptr);
 	if (ar9300_check_eeprom_header(ah, read, cptr))
 		goto found;
 
 	cptr = AR9300_BASE_ADDR_512;
-	ath_dbg(common, ATH_DBG_EEPROM,
-		"Trying EEPROM access at Address 0x%04x\n", cptr);
+	ath_dbg(common, EEPROM, "Trying EEPROM access at Address 0x%04x\n",
+		cptr);
 	if (ar9300_check_eeprom_header(ah, read, cptr))
 		goto found;
 
 	read = ar9300_read_otp;
 	cptr = AR9300_BASE_ADDR;
-	ath_dbg(common, ATH_DBG_EEPROM,
-		"Trying OTP access at Address 0x%04x\n", cptr);
+	ath_dbg(common, EEPROM, "Trying OTP access at Address 0x%04x\n", cptr);
 	if (ar9300_check_eeprom_header(ah, read, cptr))
 		goto found;
 
 	cptr = AR9300_BASE_ADDR_512;
-	ath_dbg(common, ATH_DBG_EEPROM,
-		"Trying OTP access at Address 0x%04x\n", cptr);
+	ath_dbg(common, EEPROM, "Trying OTP access at Address 0x%04x\n", cptr);
 	if (ar9300_check_eeprom_header(ah, read, cptr))
 		goto found;
 
 	goto fail;
 
 found:
-	ath_dbg(common, ATH_DBG_EEPROM, "Found valid EEPROM data\n");
+	ath_dbg(common, EEPROM, "Found valid EEPROM data\n");
 
 	for (it = 0; it < MSTATE; it++) {
 		if (!read(ah, cptr, word, COMP_HDR_LEN))
@@ -3350,13 +3328,12 @@
 
 		ar9300_comp_hdr_unpack(word, &code, &reference,
 				       &length, &major, &minor);
-		ath_dbg(common, ATH_DBG_EEPROM,
+		ath_dbg(common, EEPROM,
 			"Found block at %x: code=%d ref=%d length=%d major=%d minor=%d\n",
 			cptr, code, reference, length, major, minor);
 		if ((!AR_SREV_9485(ah) && length >= 1024) ||
 		    (AR_SREV_9485(ah) && length > EEPROM_DATA_LEN_9485)) {
-			ath_dbg(common, ATH_DBG_EEPROM,
-				"Skipping bad header\n");
+			ath_dbg(common, EEPROM, "Skipping bad header\n");
 			cptr -= COMP_HDR_LEN;
 			continue;
 		}
@@ -3365,13 +3342,13 @@
 		read(ah, cptr, word, COMP_HDR_LEN + osize + COMP_CKSUM_LEN);
 		checksum = ar9300_comp_cksum(&word[COMP_HDR_LEN], length);
 		mchecksum = get_unaligned_le16(&word[COMP_HDR_LEN + osize]);
-		ath_dbg(common, ATH_DBG_EEPROM,
-			"checksum %x %x\n", checksum, mchecksum);
+		ath_dbg(common, EEPROM, "checksum %x %x\n",
+			checksum, mchecksum);
 		if (checksum == mchecksum) {
 			ar9300_compress_decision(ah, it, code, reference, mptr,
 						 word, length, mdata_size);
 		} else {
-			ath_dbg(common, ATH_DBG_EEPROM,
+			ath_dbg(common, EEPROM,
 				"skipping block with bad checksum\n");
 		}
 		cptr -= (COMP_HDR_LEN + osize + COMP_CKSUM_LEN);
@@ -3428,25 +3405,14 @@
 	PR_EEP("Chain0 NF Threshold", modal_hdr->noiseFloorThreshCh[0]);
 	PR_EEP("Chain1 NF Threshold", modal_hdr->noiseFloorThreshCh[1]);
 	PR_EEP("Chain2 NF Threshold", modal_hdr->noiseFloorThreshCh[2]);
+	PR_EEP("Quick Drop", modal_hdr->quick_drop);
+	PR_EEP("txEndToXpaOff", modal_hdr->txEndToXpaOff);
 	PR_EEP("xPA Bias Level", modal_hdr->xpaBiasLvl);
 	PR_EEP("txFrameToDataStart", modal_hdr->txFrameToDataStart);
 	PR_EEP("txFrameToPaOn", modal_hdr->txFrameToPaOn);
 	PR_EEP("txFrameToXpaOn", modal_hdr->txFrameToXpaOn);
 	PR_EEP("txClip", modal_hdr->txClip);
 	PR_EEP("ADC Desired size", modal_hdr->adcDesiredSize);
-	PR_EEP("Chain0 ob", modal_hdr->ob[0]);
-	PR_EEP("Chain1 ob", modal_hdr->ob[1]);
-	PR_EEP("Chain2 ob", modal_hdr->ob[2]);
-
-	PR_EEP("Chain0 db_stage2", modal_hdr->db_stage2[0]);
-	PR_EEP("Chain1 db_stage2", modal_hdr->db_stage2[1]);
-	PR_EEP("Chain2 db_stage2", modal_hdr->db_stage2[2]);
-	PR_EEP("Chain0 db_stage3", modal_hdr->db_stage3[0]);
-	PR_EEP("Chain1 db_stage3", modal_hdr->db_stage3[1]);
-	PR_EEP("Chain2 db_stage3", modal_hdr->db_stage3[2]);
-	PR_EEP("Chain0 db_stage4", modal_hdr->db_stage4[0]);
-	PR_EEP("Chain1 db_stage4", modal_hdr->db_stage4[1]);
-	PR_EEP("Chain2 db_stage4", modal_hdr->db_stage4[2]);
 
 	return len;
 }
@@ -3503,6 +3469,7 @@
 	PR_EEP("Internal regulator", !!(pBase->featureEnable & BIT(4)));
 	PR_EEP("Enable Paprd", !!(pBase->featureEnable & BIT(5)));
 	PR_EEP("Driver Strength", !!(pBase->miscConfiguration & BIT(0)));
+	PR_EEP("Quick Drop", !!(pBase->miscConfiguration & BIT(1)));
 	PR_EEP("Chain mask Reduce", (pBase->miscConfiguration >> 0x3) & 0x1);
 	PR_EEP("Write enable Gpio", pBase->eepromWriteEnableGpio);
 	PR_EEP("WLAN Disable Gpio", pBase->wlanDisableGpio);
@@ -3571,13 +3538,13 @@
 static u16 ar9003_switch_com_spdt_get(struct ath_hw *ah, bool is_2ghz)
 {
 	struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
-	__le32 val;
+	__le16 val;
 
 	if (is_2ghz)
 		val = eep->modalHeader2G.switchcomspdt;
 	else
 		val = eep->modalHeader5G.switchcomspdt;
-	return le32_to_cpu(val);
+	return le16_to_cpu(val);
 }
 
 
@@ -3965,6 +3932,40 @@
 	}
 }
 
+static void ar9003_hw_quick_drop_apply(struct ath_hw *ah, u16 freq)
+{
+	struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+	int quick_drop = ath9k_hw_ar9300_get_eeprom(ah, EEP_QUICK_DROP);
+	s32 t[3], f[3] = {5180, 5500, 5785};
+
+	if (!quick_drop)
+		return;
+
+	if (freq < 4000)
+		quick_drop = eep->modalHeader2G.quick_drop;
+	else {
+		t[0] = eep->base_ext1.quick_drop_low;
+		t[1] = eep->modalHeader5G.quick_drop;
+		t[2] = eep->base_ext1.quick_drop_high;
+		quick_drop = ar9003_hw_power_interpolate(freq, f, t, 3);
+	}
+	REG_RMW_FIELD(ah, AR_PHY_AGC, AR_PHY_AGC_QUICK_DROP, quick_drop);
+}
+
+static void ar9003_hw_txend_to_xpa_off_apply(struct ath_hw *ah, u16 freq)
+{
+	struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+	u32 value;
+
+	value = (freq < 4000) ? eep->modalHeader2G.txEndToXpaOff :
+				eep->modalHeader5G.txEndToXpaOff;
+
+	REG_RMW_FIELD(ah, AR_PHY_XPA_TIMING_CTL,
+		      AR_PHY_XPA_TIMING_CTL_TX_END_XPAB_OFF, value);
+	REG_RMW_FIELD(ah, AR_PHY_XPA_TIMING_CTL,
+		      AR_PHY_XPA_TIMING_CTL_TX_END_XPAA_OFF, value);
+}
+
 static void ath9k_hw_ar9300_set_board_values(struct ath_hw *ah,
 					     struct ath9k_channel *chan)
 {
@@ -3972,10 +3973,12 @@
 	ar9003_hw_ant_ctrl_apply(ah, IS_CHAN_2GHZ(chan));
 	ar9003_hw_drive_strength_apply(ah);
 	ar9003_hw_atten_apply(ah, chan);
+	ar9003_hw_quick_drop_apply(ah, chan->channel);
 	if (!AR_SREV_9330(ah) && !AR_SREV_9340(ah))
 		ar9003_hw_internal_regulator_apply(ah);
 	if (AR_SREV_9485(ah) || AR_SREV_9330(ah) || AR_SREV_9340(ah))
 		ar9003_hw_apply_tuning_caps(ah);
+	ar9003_hw_txend_to_xpa_off_apply(ah, chan->channel);
 }
 
 static void ath9k_hw_ar9300_set_addac(struct ath_hw *ah,
@@ -4416,8 +4419,8 @@
 					      is2GHz) + ht40PowerIncForPdadc;
 
 	for (i = 0; i < ar9300RateSize; i++) {
-		ath_dbg(common, ATH_DBG_EEPROM,
-			"TPC[%02d] 0x%08x\n", i, targetPowerValT2[i]);
+		ath_dbg(common, EEPROM, "TPC[%02d] 0x%08x\n",
+			i, targetPowerValT2[i]);
 	}
 }
 
@@ -4436,7 +4439,7 @@
 	struct ath_common *common = ath9k_hw_common(ah);
 
 	if (ichain >= AR9300_MAX_CHAINS) {
-		ath_dbg(common, ATH_DBG_EEPROM,
+		ath_dbg(common, EEPROM,
 			"Invalid chain index, must be less than %d\n",
 			AR9300_MAX_CHAINS);
 		return -1;
@@ -4444,7 +4447,7 @@
 
 	if (mode) {		/* 5GHz */
 		if (ipier >= AR9300_NUM_5G_CAL_PIERS) {
-			ath_dbg(common, ATH_DBG_EEPROM,
+			ath_dbg(common, EEPROM,
 				"Invalid 5GHz cal pier index, must be less than %d\n",
 				AR9300_NUM_5G_CAL_PIERS);
 			return -1;
@@ -4454,7 +4457,7 @@
 		is2GHz = 0;
 	} else {
 		if (ipier >= AR9300_NUM_2G_CAL_PIERS) {
-			ath_dbg(common, ATH_DBG_EEPROM,
+			ath_dbg(common, EEPROM,
 				"Invalid 2GHz cal pier index, must be less than %d\n",
 				AR9300_NUM_2G_CAL_PIERS);
 			return -1;
@@ -4616,8 +4619,7 @@
 
 	/* interpolate  */
 	for (ichain = 0; ichain < AR9300_MAX_CHAINS; ichain++) {
-		ath_dbg(common, ATH_DBG_EEPROM,
-			"ch=%d f=%d low=%d %d h=%d %d\n",
+		ath_dbg(common, EEPROM, "ch=%d f=%d low=%d %d h=%d %d\n",
 			ichain, frequency, lfrequency[ichain],
 			lcorrection[ichain], hfrequency[ichain],
 			hcorrection[ichain]);
@@ -4672,7 +4674,7 @@
 	ar9003_hw_power_control_override(ah, frequency, correction, voltage,
 					 temperature);
 
-	ath_dbg(common, ATH_DBG_EEPROM,
+	ath_dbg(common, EEPROM,
 		"for frequency=%d, calibration correction = %d %d %d\n",
 		frequency, correction[0], correction[1], correction[2]);
 
@@ -4771,7 +4773,7 @@
 {
 	struct ath_common *common = ath9k_hw_common(ah);
 	struct ar9300_eeprom *pEepData = &ah->eeprom.ar9300_eep;
-	u16 twiceMaxEdgePower = MAX_RATE_POWER;
+	u16 twiceMaxEdgePower;
 	int i;
 	u16 scaledPower = 0, minCtlPower;
 	static const u16 ctlModesFor11a[] = {
@@ -4858,7 +4860,7 @@
 		else
 			freq = centers.ctl_center;
 
-		ath_dbg(common, ATH_DBG_REGULATORY,
+		ath_dbg(common, REGULATORY,
 			"LOOP-Mode ctlMode %d < %d, isHt40CtlMode %d, EXT_ADDITIVE %d\n",
 			ctlMode, numCtlModes, isHt40CtlMode,
 			(pCtlMode[ctlMode] & EXT_ADDITIVE));
@@ -4872,8 +4874,9 @@
 			ctlNum = AR9300_NUM_CTLS_5G;
 		}
 
+		twiceMaxEdgePower = MAX_RATE_POWER;
 		for (i = 0; (i < ctlNum) && ctlIndex[i]; i++) {
-			ath_dbg(common, ATH_DBG_REGULATORY,
+			ath_dbg(common, REGULATORY,
 				"LOOP-Ctlidx %d: cfgCtl 0x%2.2x pCtlMode 0x%2.2x ctlIndex 0x%2.2x chan %d\n",
 				i, cfgCtl, pCtlMode[ctlMode], ctlIndex[i],
 				chan->channel);
@@ -4915,7 +4918,7 @@
 
 			minCtlPower = (u8)min(twiceMaxEdgePower, scaledPower);
 
-			ath_dbg(common, ATH_DBG_REGULATORY,
+			ath_dbg(common, REGULATORY,
 				"SEL-Min ctlMode %d pCtlMode %d 2xMaxEdge %d sP %d minCtlPwr %d\n",
 				ctlMode, pCtlMode[ctlMode], twiceMaxEdgePower,
 				scaledPower, minCtlPower);
@@ -5039,7 +5042,7 @@
 				target_power_val_t2_eep[i]) >
 			    paprd_scale_factor)) {
 				ah->paprd_ratemask &= ~(1 << i);
-				ath_dbg(common, ATH_DBG_EEPROM,
+				ath_dbg(common, EEPROM,
 					"paprd disabled for mcs %d\n", i);
 			}
 		}
@@ -5051,12 +5054,14 @@
 			regulatory->max_power_level = targetPowerValT2[i];
 	}
 
+	ath9k_hw_update_regulatory_maxpower(ah);
+
 	if (test)
 		return;
 
 	for (i = 0; i < ar9300RateSize; i++) {
-		ath_dbg(common, ATH_DBG_EEPROM,
-			"TPC[%02d] 0x%08x\n", i, targetPowerValT2[i]);
+		ath_dbg(common, EEPROM, "TPC[%02d] 0x%08x\n",
+			i, targetPowerValT2[i]);
 	}
 
 	ah->txpower_limit = regulatory->max_power_level;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
index 6335a86..bb223fe 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
@@ -216,10 +216,8 @@
 	u8 spurChans[AR_EEPROM_MODAL_SPURS];
 	/* 3  Check if the register is per chain */
 	int8_t noiseFloorThreshCh[AR9300_MAX_CHAINS];
-	u8 ob[AR9300_MAX_CHAINS];
-	u8 db_stage2[AR9300_MAX_CHAINS];
-	u8 db_stage3[AR9300_MAX_CHAINS];
-	u8 db_stage4[AR9300_MAX_CHAINS];
+	u8 reserved[11];
+	int8_t quick_drop;
 	u8 xpaBiasLvl;
 	u8 txFrameToDataStart;
 	u8 txFrameToPaOn;
@@ -269,7 +267,9 @@
 
 struct ar9300_BaseExtension_1 {
 	u8 ant_div_control;
-	u8 future[13];
+	u8 future[11];
+	int8_t quick_drop_low;
+	int8_t quick_drop_high;
 } __packed;
 
 struct ar9300_BaseExtension_2 {
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
index ccde784..88c81c5 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -175,20 +175,24 @@
 	u32 isr = 0;
 	u32 mask2 = 0;
 	struct ath9k_hw_capabilities *pCap = &ah->caps;
-	u32 sync_cause = 0;
 	struct ath_common *common = ath9k_hw_common(ah);
+	struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+	u32 sync_cause = 0, async_cause;
 
-	if (REG_READ(ah, AR_INTR_ASYNC_CAUSE) & AR_INTR_MAC_IRQ) {
+	async_cause = REG_READ(ah, AR_INTR_ASYNC_CAUSE);
+
+	if (async_cause & (AR_INTR_MAC_IRQ | AR_INTR_ASYNC_MASK_MCI)) {
 		if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M)
 				== AR_RTC_STATUS_ON)
 			isr = REG_READ(ah, AR_ISR);
 	}
 
+
 	sync_cause = REG_READ(ah, AR_INTR_SYNC_CAUSE) & AR_INTR_SYNC_DEFAULT;
 
 	*masked = 0;
 
-	if (!isr && !sync_cause)
+	if (!isr && !sync_cause && !async_cause)
 		return false;
 
 	if (isr) {
@@ -294,6 +298,33 @@
 			ar9003_hw_bb_watchdog_read(ah);
 	}
 
+	if (async_cause & AR_INTR_ASYNC_MASK_MCI) {
+		u32 raw_intr, rx_msg_intr;
+
+		rx_msg_intr = REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_RAW);
+		raw_intr = REG_READ(ah, AR_MCI_INTERRUPT_RAW);
+
+		if ((raw_intr == 0xdeadbeef) || (rx_msg_intr == 0xdeadbeef))
+			ath_dbg(common, MCI,
+				"MCI gets 0xdeadbeef during MCI int processing new raw_intr=0x%08x, new rx_msg_raw=0x%08x, raw_intr=0x%08x, rx_msg_raw=0x%08x\n",
+				raw_intr, rx_msg_intr, mci->raw_intr,
+				mci->rx_msg_intr);
+		else {
+			mci->rx_msg_intr |= rx_msg_intr;
+			mci->raw_intr |= raw_intr;
+			*masked |= ATH9K_INT_MCI;
+
+			if (rx_msg_intr & AR_MCI_INTERRUPT_RX_MSG_CONT_INFO)
+				mci->cont_status =
+					REG_READ(ah, AR_MCI_CONT_STATUS);
+
+			REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, rx_msg_intr);
+			REG_WRITE(ah, AR_MCI_INTERRUPT_RAW, raw_intr);
+			ath_dbg(common, MCI, "AR_INTR_SYNC_MCI\n");
+
+		}
+	}
+
 	if (sync_cause) {
 		if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) {
 			REG_WRITE(ah, AR_RC, AR_RC_HOSTIF);
@@ -302,7 +333,7 @@
 		}
 
 		if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT)
-			ath_dbg(common, ATH_DBG_INTERRUPT,
+			ath_dbg(common, INTERRUPT,
 				"AR_INTR_SYNC_LOCAL_TIMEOUT\n");
 
 		REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR, sync_cause);
@@ -333,7 +364,7 @@
 
 	if ((MS(ads->ds_info, AR_DescId) != ATHEROS_VENDOR_ID) ||
 	    (MS(ads->ds_info, AR_TxRxDesc) != 1)) {
-		ath_dbg(ath9k_hw_common(ah), ATH_DBG_XMIT,
+		ath_dbg(ath9k_hw_common(ah), XMIT,
 			"Tx Descriptor error %x\n", ads->ds_info);
 		memset(ads, 0, sizeof(*ads));
 		return -EIO;
@@ -541,7 +572,7 @@
 	memset((void *) ah->ts_ring, 0,
 		ah->ts_size * sizeof(struct ar9003_txs));
 
-	ath_dbg(ath9k_hw_common(ah), ATH_DBG_XMIT,
+	ath_dbg(ath9k_hw_common(ah), XMIT,
 		"TS Start 0x%x End 0x%x Virt %p, Size %d\n",
 		ah->ts_paddr_start, ah->ts_paddr_end,
 		ah->ts_ring, ah->ts_size);
@@ -552,7 +583,7 @@
 
 void ath9k_hw_setup_statusring(struct ath_hw *ah, void *ts_start,
 			       u32 ts_paddr_start,
-			       u8 size)
+			       u16 size)
 {
 
 	ah->ts_paddr_start = ts_paddr_start;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.h b/drivers/net/wireless/ath/ath9k/ar9003_mac.h
index c504493..e203b51 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.h
@@ -118,5 +118,5 @@
 void ath9k_hw_reset_txstatus_ring(struct ath_hw *ah);
 void ath9k_hw_setup_statusring(struct ath_hw *ah, void *ts_start,
 			       u32 ts_paddr_start,
-			       u8 size);
+			       u16 size);
 #endif
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.c b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
new file mode 100644
index 0000000..709520c
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
@@ -0,0 +1,1493 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/export.h>
+#include "hw.h"
+#include "ar9003_phy.h"
+#include "ar9003_mci.h"
+
+static void ar9003_mci_reset_req_wakeup(struct ath_hw *ah)
+{
+	if (!AR_SREV_9462_20(ah))
+		return;
+
+	REG_RMW_FIELD(ah, AR_MCI_COMMAND2,
+		      AR_MCI_COMMAND2_RESET_REQ_WAKEUP, 1);
+	udelay(1);
+	REG_RMW_FIELD(ah, AR_MCI_COMMAND2,
+		      AR_MCI_COMMAND2_RESET_REQ_WAKEUP, 0);
+}
+
+static int ar9003_mci_wait_for_interrupt(struct ath_hw *ah, u32 address,
+					u32 bit_position, int time_out)
+{
+	struct ath_common *common = ath9k_hw_common(ah);
+
+	while (time_out) {
+
+		if (REG_READ(ah, address) & bit_position) {
+
+			REG_WRITE(ah, address, bit_position);
+
+			if (address == AR_MCI_INTERRUPT_RX_MSG_RAW) {
+
+				if (bit_position &
+				    AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE)
+					ar9003_mci_reset_req_wakeup(ah);
+
+				if (bit_position &
+				    (AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING |
+				     AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING))
+					REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
+					AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE);
+
+				REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
+					  AR_MCI_INTERRUPT_RX_MSG);
+			}
+			break;
+		}
+
+		udelay(10);
+		time_out -= 10;
+
+		if (time_out < 0)
+			break;
+	}
+
+	if (time_out <= 0) {
+		ath_dbg(common, MCI,
+			"MCI Wait for Reg 0x%08x = 0x%08x timeout\n",
+			address, bit_position);
+		ath_dbg(common, MCI,
+			"MCI INT_RAW = 0x%08x, RX_MSG_RAW = 0x%08x\n",
+			REG_READ(ah, AR_MCI_INTERRUPT_RAW),
+			REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_RAW));
+		time_out = 0;
+	}
+
+	return time_out;
+}
+
+void ar9003_mci_remote_reset(struct ath_hw *ah, bool wait_done)
+{
+	u32 payload[4] = { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffff00};
+
+	if (!ATH9K_HW_CAP_MCI)
+		return;
+
+	ar9003_mci_send_message(ah, MCI_REMOTE_RESET, 0, payload, 16,
+				wait_done, false);
+	udelay(5);
+}
+
+void ar9003_mci_send_lna_transfer(struct ath_hw *ah, bool wait_done)
+{
+	u32 payload = 0x00000000;
+
+	if (!ATH9K_HW_CAP_MCI)
+		return;
+
+	ar9003_mci_send_message(ah, MCI_LNA_TRANS, 0, &payload, 1,
+				wait_done, false);
+}
+
+static void ar9003_mci_send_req_wake(struct ath_hw *ah, bool wait_done)
+{
+	ar9003_mci_send_message(ah, MCI_REQ_WAKE, MCI_FLAG_DISABLE_TIMESTAMP,
+				NULL, 0, wait_done, false);
+	udelay(5);
+}
+
+void ar9003_mci_send_sys_waking(struct ath_hw *ah, bool wait_done)
+{
+	if (!ATH9K_HW_CAP_MCI)
+		return;
+
+	ar9003_mci_send_message(ah, MCI_SYS_WAKING, MCI_FLAG_DISABLE_TIMESTAMP,
+				NULL, 0, wait_done, false);
+}
+
+static void ar9003_mci_send_lna_take(struct ath_hw *ah, bool wait_done)
+{
+	u32 payload = 0x70000000;
+
+	ar9003_mci_send_message(ah, MCI_LNA_TAKE, 0, &payload, 1,
+				wait_done, false);
+}
+
+static void ar9003_mci_send_sys_sleeping(struct ath_hw *ah, bool wait_done)
+{
+	ar9003_mci_send_message(ah, MCI_SYS_SLEEPING,
+				MCI_FLAG_DISABLE_TIMESTAMP,
+				NULL, 0, wait_done, false);
+}
+
+static void ar9003_mci_send_coex_version_query(struct ath_hw *ah,
+					       bool wait_done)
+{
+	struct ath_common *common = ath9k_hw_common(ah);
+	struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+	u32 payload[4] = {0, 0, 0, 0};
+
+	if (!mci->bt_version_known &&
+			(mci->bt_state != MCI_BT_SLEEP)) {
+		ath_dbg(common, MCI, "MCI Send Coex version query\n");
+		MCI_GPM_SET_TYPE_OPCODE(payload,
+				MCI_GPM_COEX_AGENT, MCI_GPM_COEX_VERSION_QUERY);
+		ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16,
+				wait_done, true);
+	}
+}
+
+static void ar9003_mci_send_coex_version_response(struct ath_hw *ah,
+						     bool wait_done)
+{
+	struct ath_common *common = ath9k_hw_common(ah);
+	struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+	u32 payload[4] = {0, 0, 0, 0};
+
+	ath_dbg(common, MCI, "MCI Send Coex version response\n");
+	MCI_GPM_SET_TYPE_OPCODE(payload, MCI_GPM_COEX_AGENT,
+			MCI_GPM_COEX_VERSION_RESPONSE);
+	*(((u8 *)payload) + MCI_GPM_COEX_B_MAJOR_VERSION) =
+		mci->wlan_ver_major;
+	*(((u8 *)payload) + MCI_GPM_COEX_B_MINOR_VERSION) =
+		mci->wlan_ver_minor;
+	ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16, wait_done, true);
+}
+
+static void ar9003_mci_send_coex_wlan_channels(struct ath_hw *ah,
+						  bool wait_done)
+{
+	struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+	u32 *payload = &mci->wlan_channels[0];
+
+	if ((mci->wlan_channels_update == true) &&
+			(mci->bt_state != MCI_BT_SLEEP)) {
+		MCI_GPM_SET_TYPE_OPCODE(payload,
+		MCI_GPM_COEX_AGENT, MCI_GPM_COEX_WLAN_CHANNELS);
+		ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16,
+					wait_done, true);
+		MCI_GPM_SET_TYPE_OPCODE(payload, 0xff, 0xff);
+	}
+}
+
+static void ar9003_mci_send_coex_bt_status_query(struct ath_hw *ah,
+						bool wait_done, u8 query_type)
+{
+	struct ath_common *common = ath9k_hw_common(ah);
+	struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+	u32 payload[4] = {0, 0, 0, 0};
+	bool query_btinfo = !!(query_type & (MCI_GPM_COEX_QUERY_BT_ALL_INFO |
+					     MCI_GPM_COEX_QUERY_BT_TOPOLOGY));
+
+	if (mci->bt_state != MCI_BT_SLEEP) {
+
+		ath_dbg(common, MCI, "MCI Send Coex BT Status Query 0x%02X\n",
+			query_type);
+
+		MCI_GPM_SET_TYPE_OPCODE(payload,
+				MCI_GPM_COEX_AGENT, MCI_GPM_COEX_STATUS_QUERY);
+
+		*(((u8 *)payload) + MCI_GPM_COEX_B_BT_BITMAP) = query_type;
+		/*
+		 * If bt_status_query message is  not sent successfully,
+		 * then need_flush_btinfo should be set again.
+		 */
+		if (!ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16,
+					     wait_done, true)) {
+			if (query_btinfo) {
+				mci->need_flush_btinfo = true;
+
+				ath_dbg(common, MCI,
+					"MCI send bt_status_query fail, set flush flag again\n");
+			}
+		}
+
+		if (query_btinfo)
+			mci->query_bt = false;
+	}
+}
+
+void ar9003_mci_send_coex_halt_bt_gpm(struct ath_hw *ah, bool halt,
+				      bool wait_done)
+{
+	struct ath_common *common = ath9k_hw_common(ah);
+	struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+	u32 payload[4] = {0, 0, 0, 0};
+
+	if (!ATH9K_HW_CAP_MCI)
+		return;
+
+	ath_dbg(common, MCI, "MCI Send Coex %s BT GPM\n",
+		(halt) ? "halt" : "unhalt");
+
+	MCI_GPM_SET_TYPE_OPCODE(payload,
+				MCI_GPM_COEX_AGENT, MCI_GPM_COEX_HALT_BT_GPM);
+
+	if (halt) {
+		mci->query_bt = true;
+		/* Send next unhalt no matter halt sent or not */
+		mci->unhalt_bt_gpm = true;
+		mci->need_flush_btinfo = true;
+		*(((u8 *)payload) + MCI_GPM_COEX_B_HALT_STATE) =
+			MCI_GPM_COEX_BT_GPM_HALT;
+	} else
+		*(((u8 *)payload) + MCI_GPM_COEX_B_HALT_STATE) =
+			MCI_GPM_COEX_BT_GPM_UNHALT;
+
+	ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16, wait_done, true);
+}
+
+
+static void ar9003_mci_prep_interface(struct ath_hw *ah)
+{
+	struct ath_common *common = ath9k_hw_common(ah);
+	struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+	u32 saved_mci_int_en;
+	u32 mci_timeout = 150;
+
+	mci->bt_state = MCI_BT_SLEEP;
+	saved_mci_int_en = REG_READ(ah, AR_MCI_INTERRUPT_EN);
+
+	REG_WRITE(ah, AR_MCI_INTERRUPT_EN, 0);
+	REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+		  REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_RAW));
+	REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
+		  REG_READ(ah, AR_MCI_INTERRUPT_RAW));
+
+	/* Remote Reset */
+	ath_dbg(common, MCI, "MCI Reset sequence start\n");
+	ath_dbg(common, MCI, "MCI send REMOTE_RESET\n");
+	ar9003_mci_remote_reset(ah, true);
+
+	/*
+	 * This delay is required for the reset delay worst case value 255 in
+	 * MCI_COMMAND2 register
+	 */
+
+	if (AR_SREV_9462_10(ah))
+		udelay(252);
+
+	ath_dbg(common, MCI, "MCI Send REQ_WAKE to remoter(BT)\n");
+	ar9003_mci_send_req_wake(ah, true);
+
+	if (ar9003_mci_wait_for_interrupt(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+				AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING, 500)) {
+
+		ath_dbg(common, MCI, "MCI SYS_WAKING from remote(BT)\n");
+		mci->bt_state = MCI_BT_AWAKE;
+
+		if (AR_SREV_9462_10(ah))
+			udelay(10);
+		/*
+		 * we don't need to send more remote_reset at this moment.
+		 * If BT receive first remote_reset, then BT HW will
+		 * be cleaned up and will be able to receive req_wake
+		 * and BT HW will respond sys_waking.
+		 * In this case, WLAN will receive BT's HW sys_waking.
+		 * Otherwise, if BT SW missed initial remote_reset,
+		 * that remote_reset will still clean up BT MCI RX,
+		 * and the req_wake will wake BT up,
+		 * and BT SW will respond this req_wake with a remote_reset and
+		 * sys_waking. In this case, WLAN will receive BT's SW
+		 * sys_waking. In either case, BT's RX is cleaned up. So we
+		 * don't need to reply BT's remote_reset now, if any.
+		 * Similarly, if in any case, WLAN can receive BT's sys_waking,
+		 * that means WLAN's RX is also fine.
+		 */
+
+		/* Send SYS_WAKING to BT */
+
+		ath_dbg(common, MCI, "MCI send SW SYS_WAKING to remote BT\n");
+
+		ar9003_mci_send_sys_waking(ah, true);
+		udelay(10);
+
+		/*
+		 * Set BT priority interrupt value to be 0xff to
+		 * avoid having too many BT PRIORITY interrupts.
+		 */
+
+		REG_WRITE(ah, AR_MCI_BT_PRI0, 0xFFFFFFFF);
+		REG_WRITE(ah, AR_MCI_BT_PRI1, 0xFFFFFFFF);
+		REG_WRITE(ah, AR_MCI_BT_PRI2, 0xFFFFFFFF);
+		REG_WRITE(ah, AR_MCI_BT_PRI3, 0xFFFFFFFF);
+		REG_WRITE(ah, AR_MCI_BT_PRI, 0X000000FF);
+
+		/*
+		 * A contention reset will be received after send out
+		 * sys_waking. Also BT priority interrupt bits will be set.
+		 * Clear those bits before the next step.
+		 */
+
+		REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+			  AR_MCI_INTERRUPT_RX_MSG_CONT_RST);
+		REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
+			  AR_MCI_INTERRUPT_BT_PRI);
+
+		if (AR_SREV_9462_10(ah) || mci->is_2g) {
+			/* Send LNA_TRANS */
+			ath_dbg(common, MCI, "MCI send LNA_TRANS to BT\n");
+			ar9003_mci_send_lna_transfer(ah, true);
+			udelay(5);
+		}
+
+		if (AR_SREV_9462_10(ah) || (mci->is_2g &&
+					    !mci->update_2g5g)) {
+			if (ar9003_mci_wait_for_interrupt(ah,
+				AR_MCI_INTERRUPT_RX_MSG_RAW,
+				AR_MCI_INTERRUPT_RX_MSG_LNA_INFO,
+				mci_timeout))
+				ath_dbg(common, MCI,
+					"MCI WLAN has control over the LNA & BT obeys it\n");
+			else
+				ath_dbg(common, MCI,
+					"MCI BT didn't respond to LNA_TRANS\n");
+		}
+
+		if (AR_SREV_9462_10(ah)) {
+			/* Send another remote_reset to deassert BT clk_req. */
+			ath_dbg(common, MCI,
+				"MCI another remote_reset to deassert clk_req\n");
+			ar9003_mci_remote_reset(ah, true);
+			udelay(252);
+		}
+	}
+
+	/* Clear the extra redundant SYS_WAKING from BT */
+	if ((mci->bt_state == MCI_BT_AWAKE) &&
+		(REG_READ_FIELD(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+				AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING)) &&
+		(REG_READ_FIELD(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+				AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING) == 0)) {
+
+			REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+				  AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING);
+			REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
+				  AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE);
+	}
+
+	REG_WRITE(ah, AR_MCI_INTERRUPT_EN, saved_mci_int_en);
+}
+
+void ar9003_mci_disable_interrupt(struct ath_hw *ah)
+{
+	if (!ATH9K_HW_CAP_MCI)
+		return;
+
+	REG_WRITE(ah, AR_MCI_INTERRUPT_EN, 0);
+	REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_EN, 0);
+}
+
+void ar9003_mci_enable_interrupt(struct ath_hw *ah)
+{
+	if (!ATH9K_HW_CAP_MCI)
+		return;
+
+	REG_WRITE(ah, AR_MCI_INTERRUPT_EN, AR_MCI_INTERRUPT_DEFAULT);
+	REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_EN,
+		  AR_MCI_INTERRUPT_RX_MSG_DEFAULT);
+}
+
+bool ar9003_mci_check_int(struct ath_hw *ah, u32 ints)
+{
+	u32 intr;
+
+	if (!ATH9K_HW_CAP_MCI)
+		return false;
+
+	intr = REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_RAW);
+	return ((intr & ints) == ints);
+}
+
+void ar9003_mci_get_interrupt(struct ath_hw *ah, u32 *raw_intr,
+			      u32 *rx_msg_intr)
+{
+	struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+
+	if (!ATH9K_HW_CAP_MCI)
+		return;
+
+	*raw_intr = mci->raw_intr;
+	*rx_msg_intr = mci->rx_msg_intr;
+
+	/* Clean int bits after the values are read. */
+	mci->raw_intr = 0;
+	mci->rx_msg_intr = 0;
+}
+EXPORT_SYMBOL(ar9003_mci_get_interrupt);
+
+void ar9003_mci_2g5g_changed(struct ath_hw *ah, bool is_2g)
+{
+	struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+
+	if (!ATH9K_HW_CAP_MCI)
+		return;
+
+	if (!mci->update_2g5g &&
+	    (mci->is_2g != is_2g))
+		mci->update_2g5g = true;
+
+	mci->is_2g = is_2g;
+}
+
+static bool ar9003_mci_is_gpm_valid(struct ath_hw *ah, u32 msg_index)
+{
+	struct ath_common *common = ath9k_hw_common(ah);
+	struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+	u32 *payload;
+	u32 recv_type, offset;
+
+	if (msg_index == MCI_GPM_INVALID)
+		return false;
+
+	offset = msg_index << 4;
+
+	payload = (u32 *)(mci->gpm_buf + offset);
+	recv_type = MCI_GPM_TYPE(payload);
+
+	if (recv_type == MCI_GPM_RSVD_PATTERN) {
+		ath_dbg(common, MCI, "MCI Skip RSVD GPM\n");
+		return false;
+	}
+
+	return true;
+}
+
+static void ar9003_mci_observation_set_up(struct ath_hw *ah)
+{
+	struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+	if (mci->config & ATH_MCI_CONFIG_MCI_OBS_MCI) {
+
+		ath9k_hw_cfg_output(ah, 3,
+					AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_DATA);
+		ath9k_hw_cfg_output(ah, 2, AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_CLK);
+		ath9k_hw_cfg_output(ah, 1, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_DATA);
+		ath9k_hw_cfg_output(ah, 0, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_CLK);
+
+	} else if (mci->config & ATH_MCI_CONFIG_MCI_OBS_TXRX) {
+
+		ath9k_hw_cfg_output(ah, 3, AR_GPIO_OUTPUT_MUX_AS_WL_IN_TX);
+		ath9k_hw_cfg_output(ah, 2, AR_GPIO_OUTPUT_MUX_AS_WL_IN_RX);
+		ath9k_hw_cfg_output(ah, 1, AR_GPIO_OUTPUT_MUX_AS_BT_IN_TX);
+		ath9k_hw_cfg_output(ah, 0, AR_GPIO_OUTPUT_MUX_AS_BT_IN_RX);
+		ath9k_hw_cfg_output(ah, 5, AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+
+	} else if (mci->config & ATH_MCI_CONFIG_MCI_OBS_BT) {
+
+		ath9k_hw_cfg_output(ah, 3, AR_GPIO_OUTPUT_MUX_AS_BT_IN_TX);
+		ath9k_hw_cfg_output(ah, 2, AR_GPIO_OUTPUT_MUX_AS_BT_IN_RX);
+		ath9k_hw_cfg_output(ah, 1, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_DATA);
+		ath9k_hw_cfg_output(ah, 0, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_CLK);
+
+	} else
+		return;
+
+	REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, AR_GPIO_JTAG_DISABLE);
+
+	if (AR_SREV_9462_20_OR_LATER(ah)) {
+		REG_RMW_FIELD(ah, AR_PHY_GLB_CONTROL,
+			      AR_GLB_DS_JTAG_DISABLE, 1);
+		REG_RMW_FIELD(ah, AR_PHY_GLB_CONTROL,
+			      AR_GLB_WLAN_UART_INTF_EN, 0);
+		REG_SET_BIT(ah, AR_GLB_GPIO_CONTROL,
+			    ATH_MCI_CONFIG_MCI_OBS_GPIO);
+	}
+
+	REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2, AR_BTCOEX_CTRL2_GPIO_OBS_SEL, 0);
+	REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2, AR_BTCOEX_CTRL2_MAC_BB_OBS_SEL, 1);
+	REG_WRITE(ah, AR_OBS, 0x4b);
+	REG_RMW_FIELD(ah, AR_DIAG_SW, AR_DIAG_OBS_PT_SEL1, 0x03);
+	REG_RMW_FIELD(ah, AR_DIAG_SW, AR_DIAG_OBS_PT_SEL2, 0x01);
+	REG_RMW_FIELD(ah, AR_MACMISC, AR_MACMISC_MISC_OBS_BUS_LSB, 0x02);
+	REG_RMW_FIELD(ah, AR_MACMISC, AR_MACMISC_MISC_OBS_BUS_MSB, 0x03);
+	REG_RMW_FIELD(ah, AR_PHY_TEST_CTL_STATUS,
+		      AR_PHY_TEST_CTL_DEBUGPORT_SEL, 0x07);
+}
+
+static bool ar9003_mci_send_coex_bt_flags(struct ath_hw *ah, bool wait_done,
+						u8 opcode, u32 bt_flags)
+{
+	struct ath_common *common = ath9k_hw_common(ah);
+	u32 pld[4] = {0, 0, 0, 0};
+
+	MCI_GPM_SET_TYPE_OPCODE(pld,
+			MCI_GPM_COEX_AGENT, MCI_GPM_COEX_BT_UPDATE_FLAGS);
+
+	*(((u8 *)pld) + MCI_GPM_COEX_B_BT_FLAGS_OP)  = opcode;
+	*(((u8 *)pld) + MCI_GPM_COEX_W_BT_FLAGS + 0) = bt_flags & 0xFF;
+	*(((u8 *)pld) + MCI_GPM_COEX_W_BT_FLAGS + 1) = (bt_flags >> 8) & 0xFF;
+	*(((u8 *)pld) + MCI_GPM_COEX_W_BT_FLAGS + 2) = (bt_flags >> 16) & 0xFF;
+	*(((u8 *)pld) + MCI_GPM_COEX_W_BT_FLAGS + 3) = (bt_flags >> 24) & 0xFF;
+
+	ath_dbg(common, MCI,
+		"MCI BT_MCI_FLAGS: Send Coex BT Update Flags %s 0x%08x\n",
+		opcode == MCI_GPM_COEX_BT_FLAGS_READ ? "READ" :
+		opcode == MCI_GPM_COEX_BT_FLAGS_SET ? "SET" : "CLEAR",
+		bt_flags);
+
+	return ar9003_mci_send_message(ah, MCI_GPM, 0, pld, 16,
+							wait_done, true);
+}
+
+void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
+		      bool is_full_sleep)
+{
+	struct ath_common *common = ath9k_hw_common(ah);
+	struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+	u32 regval, thresh;
+
+	if (!ATH9K_HW_CAP_MCI)
+		return;
+
+	ath_dbg(common, MCI, "MCI full_sleep = %d, is_2g = %d\n",
+		is_full_sleep, is_2g);
+
+	/*
+	 * GPM buffer and scheduling message buffer are not allocated
+	 */
+
+	if (!mci->gpm_addr && !mci->sched_addr) {
+		ath_dbg(common, MCI,
+			"MCI GPM and schedule buffers are not allocated\n");
+		return;
+	}
+
+	if (REG_READ(ah, AR_BTCOEX_CTRL) == 0xdeadbeef) {
+		ath_dbg(common, MCI, "MCI it's deadbeef, quit mci_reset\n");
+		return;
+	}
+
+	/* Program MCI DMA related registers */
+	REG_WRITE(ah, AR_MCI_GPM_0, mci->gpm_addr);
+	REG_WRITE(ah, AR_MCI_GPM_1, mci->gpm_len);
+	REG_WRITE(ah, AR_MCI_SCHD_TABLE_0, mci->sched_addr);
+
+	/*
+	* To avoid MCI state machine be affected by incoming remote MCI msgs,
+	* MCI mode will be enabled later, right before reset the MCI TX and RX.
+	*/
+
+	regval = SM(1, AR_BTCOEX_CTRL_AR9462_MODE) |
+		 SM(1, AR_BTCOEX_CTRL_WBTIMER_EN) |
+		 SM(1, AR_BTCOEX_CTRL_PA_SHARED) |
+		 SM(1, AR_BTCOEX_CTRL_LNA_SHARED) |
+		 SM(2, AR_BTCOEX_CTRL_NUM_ANTENNAS) |
+		 SM(3, AR_BTCOEX_CTRL_RX_CHAIN_MASK) |
+		 SM(0, AR_BTCOEX_CTRL_1_CHAIN_ACK) |
+		 SM(0, AR_BTCOEX_CTRL_1_CHAIN_BCN) |
+		 SM(0, AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
+
+	if (is_2g && (AR_SREV_9462_20(ah)) &&
+		!(mci->config & ATH_MCI_CONFIG_DISABLE_OSLA)) {
+
+		regval |= SM(1, AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
+		ath_dbg(common, MCI, "MCI sched one step look ahead\n");
+
+		if (!(mci->config &
+		      ATH_MCI_CONFIG_DISABLE_AGGR_THRESH)) {
+
+			thresh = MS(mci->config,
+				    ATH_MCI_CONFIG_AGGR_THRESH);
+			thresh &= 7;
+			regval |= SM(1,
+				     AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN);
+			regval |= SM(thresh, AR_BTCOEX_CTRL_AGGR_THRESH);
+
+			REG_RMW_FIELD(ah, AR_MCI_SCHD_TABLE_2,
+				      AR_MCI_SCHD_TABLE_2_HW_BASED, 1);
+			REG_RMW_FIELD(ah, AR_MCI_SCHD_TABLE_2,
+				      AR_MCI_SCHD_TABLE_2_MEM_BASED, 1);
+
+		} else
+			ath_dbg(common, MCI, "MCI sched aggr thresh: off\n");
+	} else
+		ath_dbg(common, MCI, "MCI SCHED one step look ahead off\n");
+
+	if (AR_SREV_9462_10(ah))
+		regval |= SM(1, AR_BTCOEX_CTRL_SPDT_ENABLE_10);
+
+	REG_WRITE(ah, AR_BTCOEX_CTRL, regval);
+
+	if (AR_SREV_9462_20(ah)) {
+		REG_SET_BIT(ah, AR_PHY_GLB_CONTROL,
+			    AR_BTCOEX_CTRL_SPDT_ENABLE);
+		REG_RMW_FIELD(ah, AR_BTCOEX_CTRL3,
+			      AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT, 20);
+	}
+
+	REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2, AR_BTCOEX_CTRL2_RX_DEWEIGHT, 1);
+	REG_RMW_FIELD(ah, AR_PCU_MISC, AR_PCU_BT_ANT_PREVENT_RX, 0);
+
+	thresh = MS(mci->config, ATH_MCI_CONFIG_CLK_DIV);
+	REG_RMW_FIELD(ah, AR_MCI_TX_CTRL, AR_MCI_TX_CTRL_CLK_DIV, thresh);
+	REG_SET_BIT(ah, AR_BTCOEX_CTRL, AR_BTCOEX_CTRL_MCI_MODE_EN);
+
+	/* Resetting the Rx and Tx paths of MCI */
+	regval = REG_READ(ah, AR_MCI_COMMAND2);
+	regval |= SM(1, AR_MCI_COMMAND2_RESET_TX);
+	REG_WRITE(ah, AR_MCI_COMMAND2, regval);
+
+	udelay(1);
+
+	regval &= ~SM(1, AR_MCI_COMMAND2_RESET_TX);
+	REG_WRITE(ah, AR_MCI_COMMAND2, regval);
+
+	if (is_full_sleep) {
+		ar9003_mci_mute_bt(ah);
+		udelay(100);
+	}
+
+	regval |= SM(1, AR_MCI_COMMAND2_RESET_RX);
+	REG_WRITE(ah, AR_MCI_COMMAND2, regval);
+	udelay(1);
+	regval &= ~SM(1, AR_MCI_COMMAND2_RESET_RX);
+	REG_WRITE(ah, AR_MCI_COMMAND2, regval);
+
+	ar9003_mci_state(ah, MCI_STATE_INIT_GPM_OFFSET, NULL);
+	REG_WRITE(ah, AR_MCI_MSG_ATTRIBUTES_TABLE,
+		  (SM(0xe801, AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR) |
+		   SM(0x0000, AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM)));
+
+	REG_CLR_BIT(ah, AR_MCI_TX_CTRL,
+			AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
+
+	if (AR_SREV_9462_20_OR_LATER(ah))
+		ar9003_mci_observation_set_up(ah);
+
+	mci->ready = true;
+	ar9003_mci_prep_interface(ah);
+
+	if (en_int)
+		ar9003_mci_enable_interrupt(ah);
+}
+
+void ar9003_mci_mute_bt(struct ath_hw *ah)
+{
+	struct ath_common *common = ath9k_hw_common(ah);
+
+	if (!ATH9K_HW_CAP_MCI)
+		return;
+
+	/* disable all MCI messages */
+	REG_WRITE(ah, AR_MCI_MSG_ATTRIBUTES_TABLE, 0xffff0000);
+	REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS0, 0xffffffff);
+	REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS1, 0xffffffff);
+	REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS2, 0xffffffff);
+	REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS3, 0xffffffff);
+	REG_SET_BIT(ah, AR_MCI_TX_CTRL, AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
+
+	/* wait pending HW messages to flush out */
+	udelay(10);
+
+	/*
+	 * Send LNA_TAKE and SYS_SLEEPING when
+	 * 1. reset not after resuming from full sleep
+	 * 2. before reset MCI RX, to quiet BT and avoid MCI RX misalignment
+	 */
+
+	ath_dbg(common, MCI, "MCI Send LNA take\n");
+	ar9003_mci_send_lna_take(ah, true);
+
+	udelay(5);
+
+	ath_dbg(common, MCI, "MCI Send sys sleeping\n");
+	ar9003_mci_send_sys_sleeping(ah, true);
+}
+
+void ar9003_mci_sync_bt_state(struct ath_hw *ah)
+{
+	struct ath_common *common = ath9k_hw_common(ah);
+	struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+	u32 cur_bt_state;
+
+	if (!ATH9K_HW_CAP_MCI)
+		return;
+
+	cur_bt_state = ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP, NULL);
+
+	if (mci->bt_state != cur_bt_state) {
+		ath_dbg(common, MCI,
+			"MCI BT state mismatches. old: %d, new: %d\n",
+			mci->bt_state, cur_bt_state);
+		mci->bt_state = cur_bt_state;
+	}
+
+	if (mci->bt_state != MCI_BT_SLEEP) {
+
+		ar9003_mci_send_coex_version_query(ah, true);
+		ar9003_mci_send_coex_wlan_channels(ah, true);
+
+		if (mci->unhalt_bt_gpm == true) {
+			ath_dbg(common, MCI, "MCI unhalt BT GPM\n");
+			ar9003_mci_send_coex_halt_bt_gpm(ah, false, true);
+		}
+	}
+}
+
+static void ar9003_mci_send_2g5g_status(struct ath_hw *ah, bool wait_done)
+{
+	struct ath_common *common = ath9k_hw_common(ah);
+	struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+	u32 new_flags, to_set, to_clear;
+
+	if (AR_SREV_9462_20(ah) &&
+	    mci->update_2g5g &&
+	    (mci->bt_state != MCI_BT_SLEEP)) {
+
+		if (mci->is_2g) {
+			new_flags = MCI_2G_FLAGS;
+			to_clear = MCI_2G_FLAGS_CLEAR_MASK;
+			to_set = MCI_2G_FLAGS_SET_MASK;
+		} else {
+			new_flags = MCI_5G_FLAGS;
+			to_clear = MCI_5G_FLAGS_CLEAR_MASK;
+			to_set = MCI_5G_FLAGS_SET_MASK;
+		}
+
+		ath_dbg(common, MCI,
+			"MCI BT_MCI_FLAGS: %s 0x%08x clr=0x%08x, set=0x%08x\n",
+		mci->is_2g ? "2G" : "5G", new_flags, to_clear, to_set);
+
+		if (to_clear)
+			ar9003_mci_send_coex_bt_flags(ah, wait_done,
+					MCI_GPM_COEX_BT_FLAGS_CLEAR, to_clear);
+
+		if (to_set)
+			ar9003_mci_send_coex_bt_flags(ah, wait_done,
+					MCI_GPM_COEX_BT_FLAGS_SET, to_set);
+	}
+
+	if (AR_SREV_9462_10(ah) && (mci->bt_state != MCI_BT_SLEEP))
+		mci->update_2g5g = false;
+}
+
+static void ar9003_mci_queue_unsent_gpm(struct ath_hw *ah, u8 header,
+					u32 *payload, bool queue)
+{
+	struct ath_common *common = ath9k_hw_common(ah);
+	struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+	u8 type, opcode;
+
+	if (queue) {
+
+		if (payload)
+			ath_dbg(common, MCI,
+				"MCI ERROR: Send fail: %02x: %02x %02x %02x\n",
+				header,
+				*(((u8 *)payload) + 4),
+				*(((u8 *)payload) + 5),
+				*(((u8 *)payload) + 6));
+		else
+			ath_dbg(common, MCI, "MCI ERROR: Send fail: %02x\n",
+				header);
+	}
+
+	/* check if the message is to be queued */
+	if (header != MCI_GPM)
+		return;
+
+	type = MCI_GPM_TYPE(payload);
+	opcode = MCI_GPM_OPCODE(payload);
+
+	if (type != MCI_GPM_COEX_AGENT)
+		return;
+
+	switch (opcode) {
+	case MCI_GPM_COEX_BT_UPDATE_FLAGS:
+
+		if (AR_SREV_9462_10(ah))
+			break;
+
+		if (*(((u8 *)payload) + MCI_GPM_COEX_B_BT_FLAGS_OP) ==
+				MCI_GPM_COEX_BT_FLAGS_READ)
+			break;
+
+		mci->update_2g5g = queue;
+
+		if (queue)
+			ath_dbg(common, MCI,
+				"MCI BT_MCI_FLAGS: 2G5G status <queued> %s\n",
+				mci->is_2g ? "2G" : "5G");
+		else
+			ath_dbg(common, MCI,
+				"MCI BT_MCI_FLAGS: 2G5G status <sent> %s\n",
+				mci->is_2g ? "2G" : "5G");
+
+		break;
+
+	case MCI_GPM_COEX_WLAN_CHANNELS:
+
+		mci->wlan_channels_update = queue;
+		if (queue)
+			ath_dbg(common, MCI, "MCI WLAN channel map <queued>\n");
+		else
+			ath_dbg(common, MCI, "MCI WLAN channel map <sent>\n");
+		break;
+
+	case MCI_GPM_COEX_HALT_BT_GPM:
+
+		if (*(((u8 *)payload) + MCI_GPM_COEX_B_HALT_STATE) ==
+				MCI_GPM_COEX_BT_GPM_UNHALT) {
+
+			mci->unhalt_bt_gpm = queue;
+
+			if (queue)
+				ath_dbg(common, MCI,
+					"MCI UNHALT BT GPM <queued>\n");
+			else {
+				mci->halted_bt_gpm = false;
+				ath_dbg(common, MCI,
+					"MCI UNHALT BT GPM <sent>\n");
+			}
+		}
+
+		if (*(((u8 *)payload) + MCI_GPM_COEX_B_HALT_STATE) ==
+				MCI_GPM_COEX_BT_GPM_HALT) {
+
+			mci->halted_bt_gpm = !queue;
+
+			if (queue)
+				ath_dbg(common, MCI,
+					"MCI HALT BT GPM <not sent>\n");
+			else
+				ath_dbg(common, MCI,
+					"MCI UNHALT BT GPM <sent>\n");
+		}
+
+		break;
+	default:
+		break;
+	}
+}
+
+void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool wait_done)
+{
+	struct ath_common *common = ath9k_hw_common(ah);
+	struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+
+	if (!ATH9K_HW_CAP_MCI)
+		return;
+
+	if (mci->update_2g5g) {
+		if (mci->is_2g) {
+
+			ar9003_mci_send_2g5g_status(ah, true);
+			ath_dbg(common, MCI, "MCI Send LNA trans\n");
+			ar9003_mci_send_lna_transfer(ah, true);
+			udelay(5);
+
+			REG_CLR_BIT(ah, AR_MCI_TX_CTRL,
+				    AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
+
+			if (AR_SREV_9462_20(ah)) {
+				REG_CLR_BIT(ah, AR_PHY_GLB_CONTROL,
+					    AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL);
+				if (!(mci->config &
+				      ATH_MCI_CONFIG_DISABLE_OSLA)) {
+					REG_SET_BIT(ah, AR_BTCOEX_CTRL,
+					AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
+				}
+			}
+		} else {
+			ath_dbg(common, MCI, "MCI Send LNA take\n");
+			ar9003_mci_send_lna_take(ah, true);
+			udelay(5);
+
+			REG_SET_BIT(ah, AR_MCI_TX_CTRL,
+				    AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
+
+			if (AR_SREV_9462_20(ah)) {
+				REG_SET_BIT(ah, AR_PHY_GLB_CONTROL,
+					    AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL);
+				REG_CLR_BIT(ah, AR_BTCOEX_CTRL,
+					AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
+			}
+
+			ar9003_mci_send_2g5g_status(ah, true);
+		}
+	}
+}
+
+bool ar9003_mci_send_message(struct ath_hw *ah, u8 header, u32 flag,
+			     u32 *payload, u8 len, bool wait_done,
+			     bool check_bt)
+{
+	struct ath_common *common = ath9k_hw_common(ah);
+	struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+	bool msg_sent = false;
+	u32 regval;
+	u32 saved_mci_int_en;
+	int i;
+
+	if (!ATH9K_HW_CAP_MCI)
+		return false;
+
+	saved_mci_int_en = REG_READ(ah, AR_MCI_INTERRUPT_EN);
+	regval = REG_READ(ah, AR_BTCOEX_CTRL);
+
+	if ((regval == 0xdeadbeef) || !(regval & AR_BTCOEX_CTRL_MCI_MODE_EN)) {
+
+		ath_dbg(common, MCI,
+			"MCI Not sending 0x%x. MCI is not enabled. full_sleep = %d\n",
+			header,
+			(ah->power_mode == ATH9K_PM_FULL_SLEEP) ? 1 : 0);
+
+		ar9003_mci_queue_unsent_gpm(ah, header, payload, true);
+		return false;
+
+	} else if (check_bt && (mci->bt_state == MCI_BT_SLEEP)) {
+
+		ath_dbg(common, MCI,
+			"MCI Don't send message 0x%x. BT is in sleep state\n",
+			header);
+
+		ar9003_mci_queue_unsent_gpm(ah, header, payload, true);
+		return false;
+	}
+
+	if (wait_done)
+		REG_WRITE(ah, AR_MCI_INTERRUPT_EN, 0);
+
+	/* Need to clear SW_MSG_DONE raw bit before wait */
+
+	REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
+		  (AR_MCI_INTERRUPT_SW_MSG_DONE |
+		   AR_MCI_INTERRUPT_MSG_FAIL_MASK));
+
+	if (payload) {
+		for (i = 0; (i * 4) < len; i++)
+			REG_WRITE(ah, (AR_MCI_TX_PAYLOAD0 + i * 4),
+				  *(payload + i));
+	}
+
+	REG_WRITE(ah, AR_MCI_COMMAND0,
+		  (SM((flag & MCI_FLAG_DISABLE_TIMESTAMP),
+		      AR_MCI_COMMAND0_DISABLE_TIMESTAMP) |
+		   SM(len, AR_MCI_COMMAND0_LEN) |
+		   SM(header, AR_MCI_COMMAND0_HEADER)));
+
+	if (wait_done &&
+	    !(ar9003_mci_wait_for_interrupt(ah, AR_MCI_INTERRUPT_RAW,
+					AR_MCI_INTERRUPT_SW_MSG_DONE, 500)))
+		ar9003_mci_queue_unsent_gpm(ah, header, payload, true);
+	else {
+		ar9003_mci_queue_unsent_gpm(ah, header, payload, false);
+		msg_sent = true;
+	}
+
+	if (wait_done)
+		REG_WRITE(ah, AR_MCI_INTERRUPT_EN, saved_mci_int_en);
+
+	return msg_sent;
+}
+EXPORT_SYMBOL(ar9003_mci_send_message);
+
+void ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf,
+		      u16 len, u32 sched_addr)
+{
+	struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+	void *sched_buf = (void *)((char *) gpm_buf + (sched_addr - gpm_addr));
+
+	if (!ATH9K_HW_CAP_MCI)
+		return;
+
+	mci->gpm_addr = gpm_addr;
+	mci->gpm_buf = gpm_buf;
+	mci->gpm_len = len;
+	mci->sched_addr = sched_addr;
+	mci->sched_buf = sched_buf;
+
+	ar9003_mci_reset(ah, true, true, true);
+}
+EXPORT_SYMBOL(ar9003_mci_setup);
+
+void ar9003_mci_cleanup(struct ath_hw *ah)
+{
+	struct ath_common *common = ath9k_hw_common(ah);
+
+	if (!ATH9K_HW_CAP_MCI)
+		return;
+
+	/* Turn off MCI and Jupiter mode. */
+	REG_WRITE(ah, AR_BTCOEX_CTRL, 0x00);
+	ath_dbg(common, MCI, "MCI ar9003_mci_cleanup\n");
+	ar9003_mci_disable_interrupt(ah);
+}
+EXPORT_SYMBOL(ar9003_mci_cleanup);
+
+static void ar9003_mci_process_gpm_extra(struct ath_hw *ah, u8 gpm_type,
+					 u8 gpm_opcode, u32 *p_gpm)
+{
+	struct ath_common *common = ath9k_hw_common(ah);
+	struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+	u8 *p_data = (u8 *) p_gpm;
+
+	if (gpm_type != MCI_GPM_COEX_AGENT)
+		return;
+
+	switch (gpm_opcode) {
+	case MCI_GPM_COEX_VERSION_QUERY:
+		ath_dbg(common, MCI, "MCI Recv GPM COEX Version Query\n");
+		ar9003_mci_send_coex_version_response(ah, true);
+		break;
+	case MCI_GPM_COEX_VERSION_RESPONSE:
+		ath_dbg(common, MCI, "MCI Recv GPM COEX Version Response\n");
+		mci->bt_ver_major =
+			*(p_data + MCI_GPM_COEX_B_MAJOR_VERSION);
+		mci->bt_ver_minor =
+			*(p_data + MCI_GPM_COEX_B_MINOR_VERSION);
+		mci->bt_version_known = true;
+		ath_dbg(common, MCI, "MCI BT Coex version: %d.%d\n",
+			mci->bt_ver_major, mci->bt_ver_minor);
+		break;
+	case MCI_GPM_COEX_STATUS_QUERY:
+		ath_dbg(common, MCI,
+			"MCI Recv GPM COEX Status Query = 0x%02X\n",
+			*(p_data + MCI_GPM_COEX_B_WLAN_BITMAP));
+		mci->wlan_channels_update = true;
+		ar9003_mci_send_coex_wlan_channels(ah, true);
+		break;
+	case MCI_GPM_COEX_BT_PROFILE_INFO:
+		mci->query_bt = true;
+		ath_dbg(common, MCI, "MCI Recv GPM COEX BT_Profile_Info\n");
+		break;
+	case MCI_GPM_COEX_BT_STATUS_UPDATE:
+		mci->query_bt = true;
+		ath_dbg(common, MCI,
+			"MCI Recv GPM COEX BT_Status_Update SEQ=%d (drop&query)\n",
+			*(p_gpm + 3));
+		break;
+	default:
+		break;
+	}
+}
+
+u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type,
+			    u8 gpm_opcode, int time_out)
+{
+	struct ath_common *common = ath9k_hw_common(ah);
+	struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+	u32 *p_gpm = NULL, mismatch = 0, more_data;
+	u32 offset;
+	u8 recv_type = 0, recv_opcode = 0;
+	bool b_is_bt_cal_done = (gpm_type == MCI_GPM_BT_CAL_DONE);
+
+	if (!ATH9K_HW_CAP_MCI)
+		return 0;
+
+	more_data = time_out ? MCI_GPM_NOMORE : MCI_GPM_MORE;
+
+	while (time_out > 0) {
+		if (p_gpm) {
+			MCI_GPM_RECYCLE(p_gpm);
+			p_gpm = NULL;
+		}
+
+		if (more_data != MCI_GPM_MORE)
+			time_out = ar9003_mci_wait_for_interrupt(ah,
+					AR_MCI_INTERRUPT_RX_MSG_RAW,
+					AR_MCI_INTERRUPT_RX_MSG_GPM,
+					time_out);
+
+		if (!time_out)
+			break;
+
+		offset = ar9003_mci_state(ah,
+				MCI_STATE_NEXT_GPM_OFFSET, &more_data);
+
+		if (offset == MCI_GPM_INVALID)
+			continue;
+
+		p_gpm = (u32 *) (mci->gpm_buf + offset);
+		recv_type = MCI_GPM_TYPE(p_gpm);
+		recv_opcode = MCI_GPM_OPCODE(p_gpm);
+
+		if (MCI_GPM_IS_CAL_TYPE(recv_type)) {
+
+			if (recv_type == gpm_type) {
+
+				if ((gpm_type == MCI_GPM_BT_CAL_DONE) &&
+				    !b_is_bt_cal_done) {
+					gpm_type = MCI_GPM_BT_CAL_GRANT;
+					ath_dbg(common, MCI,
+						"MCI Recv BT_CAL_DONE wait BT_CAL_GRANT\n");
+					continue;
+				}
+
+				break;
+			}
+		} else if ((recv_type == gpm_type) &&
+			   (recv_opcode == gpm_opcode))
+			break;
+
+		/* not expected message */
+
+		/*
+		 * check if it's cal_grant
+		 *
+		 * When we're waiting for cal_grant in reset routine,
+		 * it's possible that BT sends out cal_request at the
+		 * same time. Since BT's calibration doesn't happen
+		 * that often, we'll let BT completes calibration then
+		 * we continue to wait for cal_grant from BT.
+		 * Orginal: Wait BT_CAL_GRANT.
+		 * New: Receive BT_CAL_REQ -> send WLAN_CAL_GRANT->wait
+		 * BT_CAL_DONE -> Wait BT_CAL_GRANT.
+		 */
+
+		if ((gpm_type == MCI_GPM_BT_CAL_GRANT) &&
+		    (recv_type == MCI_GPM_BT_CAL_REQ)) {
+
+			u32 payload[4] = {0, 0, 0, 0};
+
+			gpm_type = MCI_GPM_BT_CAL_DONE;
+			ath_dbg(common, MCI,
+				"MCI Rcv BT_CAL_REQ, send WLAN_CAL_GRANT\n");
+
+			MCI_GPM_SET_CAL_TYPE(payload,
+					MCI_GPM_WLAN_CAL_GRANT);
+
+			ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16,
+						false, false);
+
+			ath_dbg(common, MCI, "MCI now wait for BT_CAL_DONE\n");
+
+			continue;
+		} else {
+			ath_dbg(common, MCI, "MCI GPM subtype not match 0x%x\n",
+				*(p_gpm + 1));
+			mismatch++;
+			ar9003_mci_process_gpm_extra(ah, recv_type,
+					recv_opcode, p_gpm);
+		}
+	}
+	if (p_gpm) {
+		MCI_GPM_RECYCLE(p_gpm);
+		p_gpm = NULL;
+	}
+
+	if (time_out <= 0) {
+		time_out = 0;
+		ath_dbg(common, MCI,
+			"MCI GPM received timeout, mismatch = %d\n", mismatch);
+	} else
+		ath_dbg(common, MCI, "MCI Receive GPM type=0x%x, code=0x%x\n",
+			gpm_type, gpm_opcode);
+
+	while (more_data == MCI_GPM_MORE) {
+
+		ath_dbg(common, MCI, "MCI discard remaining GPM\n");
+		offset = ar9003_mci_state(ah, MCI_STATE_NEXT_GPM_OFFSET,
+					  &more_data);
+
+		if (offset == MCI_GPM_INVALID)
+			break;
+
+		p_gpm = (u32 *) (mci->gpm_buf + offset);
+		recv_type = MCI_GPM_TYPE(p_gpm);
+		recv_opcode = MCI_GPM_OPCODE(p_gpm);
+
+		if (!MCI_GPM_IS_CAL_TYPE(recv_type))
+			ar9003_mci_process_gpm_extra(ah, recv_type,
+						     recv_opcode, p_gpm);
+
+		MCI_GPM_RECYCLE(p_gpm);
+	}
+
+	return time_out;
+}
+
+u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data)
+{
+	struct ath_common *common = ath9k_hw_common(ah);
+	struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+	u32 value = 0, more_gpm = 0, gpm_ptr;
+	u8 query_type;
+
+	if (!ATH9K_HW_CAP_MCI)
+		return 0;
+
+	switch (state_type) {
+	case MCI_STATE_ENABLE:
+		if (mci->ready) {
+
+			value = REG_READ(ah, AR_BTCOEX_CTRL);
+
+			if ((value == 0xdeadbeef) || (value == 0xffffffff))
+				value = 0;
+		}
+		value &= AR_BTCOEX_CTRL_MCI_MODE_EN;
+		break;
+	case MCI_STATE_INIT_GPM_OFFSET:
+		value = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
+		ath_dbg(common, MCI, "MCI GPM initial WRITE_PTR=%d\n", value);
+		mci->gpm_idx = value;
+		break;
+	case MCI_STATE_NEXT_GPM_OFFSET:
+	case MCI_STATE_LAST_GPM_OFFSET:
+		/*
+		* This could be useful to avoid new GPM message interrupt which
+		* may lead to spurious interrupt after power sleep, or multiple
+		* entry of ath_mci_intr().
+		* Adding empty GPM check by returning HAL_MCI_GPM_INVALID can
+		* alleviate this effect, but clearing GPM RX interrupt bit is
+		* safe, because whether this is called from hw or driver code
+		* there must be an interrupt bit set/triggered initially
+		*/
+		REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+			  AR_MCI_INTERRUPT_RX_MSG_GPM);
+
+		gpm_ptr = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
+		value = gpm_ptr;
+
+		if (value == 0)
+			value = mci->gpm_len - 1;
+		else if (value >= mci->gpm_len) {
+			if (value != 0xFFFF) {
+				value = 0;
+				ath_dbg(common, MCI,
+					"MCI GPM offset out of range\n");
+			}
+		} else
+			value--;
+
+		if (value == 0xFFFF) {
+			value = MCI_GPM_INVALID;
+			more_gpm = MCI_GPM_NOMORE;
+			ath_dbg(common, MCI,
+				"MCI GPM ptr invalid @ptr=%d, offset=%d, more=GPM_NOMORE\n",
+				gpm_ptr, value);
+		} else if (state_type == MCI_STATE_NEXT_GPM_OFFSET) {
+
+			if (gpm_ptr == mci->gpm_idx) {
+				value = MCI_GPM_INVALID;
+				more_gpm = MCI_GPM_NOMORE;
+
+				ath_dbg(common, MCI,
+					"MCI GPM message not available @ptr=%d, @offset=%d, more=GPM_NOMORE\n",
+					gpm_ptr, value);
+			} else {
+				for (;;) {
+
+					u32 temp_index;
+
+					/* skip reserved GPM if any */
+
+					if (value != mci->gpm_idx)
+						more_gpm = MCI_GPM_MORE;
+					else
+						more_gpm = MCI_GPM_NOMORE;
+
+					temp_index = mci->gpm_idx;
+					mci->gpm_idx++;
+
+					if (mci->gpm_idx >=
+					    mci->gpm_len)
+						mci->gpm_idx = 0;
+
+					ath_dbg(common, MCI,
+						"MCI GPM message got ptr=%d, @offset=%d, more=%d\n",
+						gpm_ptr, temp_index,
+						(more_gpm == MCI_GPM_MORE));
+
+					if (ar9003_mci_is_gpm_valid(ah,
+								temp_index)) {
+						value = temp_index;
+						break;
+					}
+
+					if (more_gpm == MCI_GPM_NOMORE) {
+						value = MCI_GPM_INVALID;
+						break;
+					}
+				}
+			}
+			if (p_data)
+				*p_data = more_gpm;
+			}
+
+			if (value != MCI_GPM_INVALID)
+				value <<= 4;
+
+			break;
+	case MCI_STATE_LAST_SCHD_MSG_OFFSET:
+		value = MS(REG_READ(ah, AR_MCI_RX_STATUS),
+				    AR_MCI_RX_LAST_SCHD_MSG_INDEX);
+		/* Make it in bytes */
+		value <<= 4;
+		break;
+
+	case MCI_STATE_REMOTE_SLEEP:
+		value = MS(REG_READ(ah, AR_MCI_RX_STATUS),
+			   AR_MCI_RX_REMOTE_SLEEP) ?
+			MCI_BT_SLEEP : MCI_BT_AWAKE;
+		break;
+
+	case MCI_STATE_CONT_RSSI_POWER:
+		value = MS(mci->cont_status, AR_MCI_CONT_RSSI_POWER);
+			break;
+
+	case MCI_STATE_CONT_PRIORITY:
+		value = MS(mci->cont_status, AR_MCI_CONT_RRIORITY);
+		break;
+
+	case MCI_STATE_CONT_TXRX:
+		value = MS(mci->cont_status, AR_MCI_CONT_TXRX);
+		break;
+
+	case MCI_STATE_BT:
+		value = mci->bt_state;
+		break;
+
+	case MCI_STATE_SET_BT_SLEEP:
+		mci->bt_state = MCI_BT_SLEEP;
+		break;
+
+	case MCI_STATE_SET_BT_AWAKE:
+		mci->bt_state = MCI_BT_AWAKE;
+		ar9003_mci_send_coex_version_query(ah, true);
+		ar9003_mci_send_coex_wlan_channels(ah, true);
+
+		if (mci->unhalt_bt_gpm) {
+
+			ath_dbg(common, MCI, "MCI unhalt BT GPM\n");
+			ar9003_mci_send_coex_halt_bt_gpm(ah, false, true);
+		}
+
+		ar9003_mci_2g5g_switch(ah, true);
+		break;
+
+	case MCI_STATE_SET_BT_CAL_START:
+		mci->bt_state = MCI_BT_CAL_START;
+		break;
+
+	case MCI_STATE_SET_BT_CAL:
+		mci->bt_state = MCI_BT_CAL;
+		break;
+
+	case MCI_STATE_RESET_REQ_WAKE:
+		ar9003_mci_reset_req_wakeup(ah);
+		mci->update_2g5g = true;
+
+		if ((AR_SREV_9462_20_OR_LATER(ah)) &&
+		    (mci->config & ATH_MCI_CONFIG_MCI_OBS_MASK)) {
+			/* Check if we still have control of the GPIOs */
+			if ((REG_READ(ah, AR_GLB_GPIO_CONTROL) &
+				      ATH_MCI_CONFIG_MCI_OBS_GPIO) !=
+					ATH_MCI_CONFIG_MCI_OBS_GPIO) {
+
+				ath_dbg(common, MCI,
+					"MCI reconfigure observation\n");
+				ar9003_mci_observation_set_up(ah);
+			}
+		}
+		break;
+
+	case MCI_STATE_SEND_WLAN_COEX_VERSION:
+		ar9003_mci_send_coex_version_response(ah, true);
+		break;
+
+	case MCI_STATE_SET_BT_COEX_VERSION:
+
+		if (!p_data)
+			ath_dbg(common, MCI,
+				"MCI Set BT Coex version with NULL data!!\n");
+		else {
+			mci->bt_ver_major = (*p_data >> 8) & 0xff;
+			mci->bt_ver_minor = (*p_data) & 0xff;
+			mci->bt_version_known = true;
+			ath_dbg(common, MCI, "MCI BT version set: %d.%d\n",
+				mci->bt_ver_major, mci->bt_ver_minor);
+		}
+		break;
+
+	case MCI_STATE_SEND_WLAN_CHANNELS:
+		if (p_data) {
+			if (((mci->wlan_channels[1] & 0xffff0000) ==
+			     (*(p_data + 1) & 0xffff0000)) &&
+			    (mci->wlan_channels[2] == *(p_data + 2)) &&
+			    (mci->wlan_channels[3] == *(p_data + 3)))
+				break;
+
+			mci->wlan_channels[0] = *p_data++;
+			mci->wlan_channels[1] = *p_data++;
+			mci->wlan_channels[2] = *p_data++;
+			mci->wlan_channels[3] = *p_data++;
+		}
+		mci->wlan_channels_update = true;
+		ar9003_mci_send_coex_wlan_channels(ah, true);
+		break;
+
+	case MCI_STATE_SEND_VERSION_QUERY:
+		ar9003_mci_send_coex_version_query(ah, true);
+		break;
+
+	case MCI_STATE_SEND_STATUS_QUERY:
+		query_type = (AR_SREV_9462_10(ah)) ?
+				MCI_GPM_COEX_QUERY_BT_ALL_INFO :
+				MCI_GPM_COEX_QUERY_BT_TOPOLOGY;
+
+		ar9003_mci_send_coex_bt_status_query(ah, true, query_type);
+		break;
+
+	case MCI_STATE_NEED_FLUSH_BT_INFO:
+			/*
+			 * btcoex_hw.mci.unhalt_bt_gpm means whether it's
+			 * needed to send UNHALT message. It's set whenever
+			 * there's a request to send HALT message.
+			 * mci_halted_bt_gpm means whether HALT message is sent
+			 * out successfully.
+			 *
+			 * Checking (mci_unhalt_bt_gpm == false) instead of
+			 * checking (ah->mci_halted_bt_gpm == false) will make
+			 * sure currently is in UNHALT-ed mode and BT can
+			 * respond to status query.
+			 */
+			value = (!mci->unhalt_bt_gpm &&
+				 mci->need_flush_btinfo) ? 1 : 0;
+			if (p_data)
+				mci->need_flush_btinfo =
+					(*p_data != 0) ? true : false;
+			break;
+
+	case MCI_STATE_RECOVER_RX:
+
+		ath_dbg(common, MCI, "MCI hw RECOVER_RX\n");
+		ar9003_mci_prep_interface(ah);
+		mci->query_bt = true;
+		mci->need_flush_btinfo = true;
+		ar9003_mci_send_coex_wlan_channels(ah, true);
+		ar9003_mci_2g5g_switch(ah, true);
+		break;
+
+	case MCI_STATE_NEED_FTP_STOMP:
+		value = !(mci->config & ATH_MCI_CONFIG_DISABLE_FTP_STOMP);
+		break;
+
+	case MCI_STATE_NEED_TUNING:
+		value = !(mci->config & ATH_MCI_CONFIG_DISABLE_TUNING);
+		break;
+
+	default:
+		break;
+
+	}
+
+	return value;
+}
+EXPORT_SYMBOL(ar9003_mci_state);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.h b/drivers/net/wireless/ath/ath9k/ar9003_mci.h
new file mode 100644
index 0000000..798da11
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef AR9003_MCI_H
+#define AR9003_MCI_H
+
+#define MCI_FLAG_DISABLE_TIMESTAMP      0x00000001      /* Disable time stamp */
+
+/* Default remote BT device MCI COEX version */
+#define MCI_GPM_COEX_MAJOR_VERSION_DEFAULT  3
+#define MCI_GPM_COEX_MINOR_VERSION_DEFAULT  0
+
+/* Local WLAN MCI COEX version */
+#define MCI_GPM_COEX_MAJOR_VERSION_WLAN     3
+#define MCI_GPM_COEX_MINOR_VERSION_WLAN     0
+
+enum mci_gpm_coex_query_type {
+	MCI_GPM_COEX_QUERY_BT_ALL_INFO      = BIT(0),
+	MCI_GPM_COEX_QUERY_BT_TOPOLOGY      = BIT(1),
+	MCI_GPM_COEX_QUERY_BT_DEBUG         = BIT(2),
+};
+
+enum mci_gpm_coex_halt_bt_gpm {
+	MCI_GPM_COEX_BT_GPM_UNHALT,
+	MCI_GPM_COEX_BT_GPM_HALT
+};
+
+enum mci_gpm_coex_bt_update_flags_op {
+	MCI_GPM_COEX_BT_FLAGS_READ,
+	MCI_GPM_COEX_BT_FLAGS_SET,
+	MCI_GPM_COEX_BT_FLAGS_CLEAR
+};
+
+#define MCI_NUM_BT_CHANNELS     79
+
+#define MCI_BT_MCI_FLAGS_UPDATE_CORR          0x00000002
+#define MCI_BT_MCI_FLAGS_UPDATE_HDR           0x00000004
+#define MCI_BT_MCI_FLAGS_UPDATE_PLD           0x00000008
+#define MCI_BT_MCI_FLAGS_LNA_CTRL             0x00000010
+#define MCI_BT_MCI_FLAGS_DEBUG                0x00000020
+#define MCI_BT_MCI_FLAGS_SCHED_MSG            0x00000040
+#define MCI_BT_MCI_FLAGS_CONT_MSG             0x00000080
+#define MCI_BT_MCI_FLAGS_COEX_GPM             0x00000100
+#define MCI_BT_MCI_FLAGS_CPU_INT_MSG          0x00000200
+#define MCI_BT_MCI_FLAGS_MCI_MODE             0x00000400
+#define MCI_BT_MCI_FLAGS_AR9462_MODE          0x00001000
+#define MCI_BT_MCI_FLAGS_OTHER                0x00010000
+
+#define MCI_DEFAULT_BT_MCI_FLAGS              0x00011dde
+
+#define MCI_TOGGLE_BT_MCI_FLAGS  (MCI_BT_MCI_FLAGS_UPDATE_CORR | \
+				  MCI_BT_MCI_FLAGS_UPDATE_HDR  | \
+				  MCI_BT_MCI_FLAGS_UPDATE_PLD  | \
+				  MCI_BT_MCI_FLAGS_MCI_MODE)
+
+#define MCI_2G_FLAGS_CLEAR_MASK   0x00000000
+#define MCI_2G_FLAGS_SET_MASK     MCI_TOGGLE_BT_MCI_FLAGS
+#define MCI_2G_FLAGS              MCI_DEFAULT_BT_MCI_FLAGS
+
+#define MCI_5G_FLAGS_CLEAR_MASK   MCI_TOGGLE_BT_MCI_FLAGS
+#define MCI_5G_FLAGS_SET_MASK     0x00000000
+#define MCI_5G_FLAGS              (MCI_DEFAULT_BT_MCI_FLAGS & \
+				   ~MCI_TOGGLE_BT_MCI_FLAGS)
+
+/*
+ * Default value for AR9462 is 0x00002201
+ */
+#define ATH_MCI_CONFIG_CONCUR_TX            0x00000003
+#define ATH_MCI_CONFIG_MCI_OBS_MCI          0x00000004
+#define ATH_MCI_CONFIG_MCI_OBS_TXRX         0x00000008
+#define ATH_MCI_CONFIG_MCI_OBS_BT           0x00000010
+#define ATH_MCI_CONFIG_DISABLE_MCI_CAL      0x00000020
+#define ATH_MCI_CONFIG_DISABLE_OSLA         0x00000040
+#define ATH_MCI_CONFIG_DISABLE_FTP_STOMP    0x00000080
+#define ATH_MCI_CONFIG_AGGR_THRESH          0x00000700
+#define ATH_MCI_CONFIG_AGGR_THRESH_S        8
+#define ATH_MCI_CONFIG_DISABLE_AGGR_THRESH  0x00000800
+#define ATH_MCI_CONFIG_CLK_DIV              0x00003000
+#define ATH_MCI_CONFIG_CLK_DIV_S            12
+#define ATH_MCI_CONFIG_DISABLE_TUNING       0x00004000
+#define ATH_MCI_CONFIG_MCI_WEIGHT_DBG       0x40000000
+#define ATH_MCI_CONFIG_DISABLE_MCI          0x80000000
+
+#define ATH_MCI_CONFIG_MCI_OBS_MASK     (ATH_MCI_CONFIG_MCI_OBS_MCI  | \
+					 ATH_MCI_CONFIG_MCI_OBS_TXRX | \
+					 ATH_MCI_CONFIG_MCI_OBS_BT)
+#define ATH_MCI_CONFIG_MCI_OBS_GPIO     0x0000002F
+
+#endif
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
index a4450cb..59647a3 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
@@ -119,8 +119,8 @@
 		break;
 	default:
 		delta = 0;
-		ath_dbg(common, ATH_DBG_CALIBRATE,
-		"Invalid tx-chainmask: %u\n", ah->txchainmask);
+		ath_dbg(common, CALIBRATE, "Invalid tx-chainmask: %u\n",
+			ah->txchainmask);
 	}
 
 	power += delta;
@@ -148,13 +148,12 @@
 	else
 		training_power = ar9003_get_training_power_5g(ah);
 
-	ath_dbg(common, ATH_DBG_CALIBRATE,
-		"Training power: %d, Target power: %d\n",
+	ath_dbg(common, CALIBRATE, "Training power: %d, Target power: %d\n",
 		training_power, ah->paprd_target_power);
 
 	if (training_power < 0) {
-		ath_dbg(common, ATH_DBG_CALIBRATE,
-			"PAPRD target power delta out of range");
+		ath_dbg(common, CALIBRATE,
+			"PAPRD target power delta out of range\n");
 		return -ERANGE;
 	}
 	ah->paprd_training_power = training_power;
@@ -311,8 +310,8 @@
 		reg_cl_gain = AR_PHY_CL_TAB_2;
 		break;
 	default:
-		ath_dbg(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
-		"Invalid chainmask: %d\n", chain);
+		ath_dbg(ath9k_hw_common(ah), CALIBRATE,
+			"Invalid chainmask: %d\n", chain);
 		break;
 	}
 
@@ -850,7 +849,7 @@
 		agc2_pwr = REG_READ_FIELD(ah, AR_PHY_PAPRD_TRAINER_STAT1,
 				AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_AGC2_PWR);
 
-		ath_dbg(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
+		ath_dbg(ath9k_hw_common(ah), CALIBRATE,
 			"AGC2_PWR = 0x%x training done = 0x%x\n",
 			agc2_pwr, paprd_done);
 	/*
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index 2330e7e..2589b38 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -199,12 +199,14 @@
 			synth_freq = chan->channel;
 		}
 	} else {
-		range = 10;
+		range = AR_SREV_9462(ah) ? 5 : 10;
 		max_spur_cnts = 4;
 		synth_freq = chan->channel;
 	}
 
 	for (i = 0; i < max_spur_cnts; i++) {
+		if (AR_SREV_9462(ah) && (i == 0 || i == 3))
+			continue;
 		negative = 0;
 		if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah))
 			cur_bb_spur = FBIN2FREQ(spur_fbin_ptr[i],
@@ -880,7 +882,7 @@
 				    AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
 
 		if (!on != aniState->ofdmWeakSigDetectOff) {
-			ath_dbg(common, ATH_DBG_ANI,
+			ath_dbg(common, ANI,
 				"** ch %d: ofdm weak signal: %s=>%s\n",
 				chan->channel,
 				!aniState->ofdmWeakSigDetectOff ?
@@ -898,7 +900,7 @@
 		u32 level = param;
 
 		if (level >= ARRAY_SIZE(firstep_table)) {
-			ath_dbg(common, ATH_DBG_ANI,
+			ath_dbg(common, ANI,
 				"ATH9K_ANI_FIRSTEP_LEVEL: level out of range (%u > %zu)\n",
 				level, ARRAY_SIZE(firstep_table));
 			return false;
@@ -935,7 +937,7 @@
 			      AR_PHY_FIND_SIG_LOW_FIRSTEP_LOW, value2);
 
 		if (level != aniState->firstepLevel) {
-			ath_dbg(common, ATH_DBG_ANI,
+			ath_dbg(common, ANI,
 				"** ch %d: level %d=>%d[def:%d] firstep[level]=%d ini=%d\n",
 				chan->channel,
 				aniState->firstepLevel,
@@ -943,7 +945,7 @@
 				ATH9K_ANI_FIRSTEP_LVL_NEW,
 				value,
 				aniState->iniDef.firstep);
-			ath_dbg(common, ATH_DBG_ANI,
+			ath_dbg(common, ANI,
 				"** ch %d: level %d=>%d[def:%d] firstep_low[level]=%d ini=%d\n",
 				chan->channel,
 				aniState->firstepLevel,
@@ -963,7 +965,7 @@
 		u32 level = param;
 
 		if (level >= ARRAY_SIZE(cycpwrThr1_table)) {
-			ath_dbg(common, ATH_DBG_ANI,
+			ath_dbg(common, ANI,
 				"ATH9K_ANI_SPUR_IMMUNITY_LEVEL: level out of range (%u > %zu)\n",
 				level, ARRAY_SIZE(cycpwrThr1_table));
 			return false;
@@ -999,7 +1001,7 @@
 			      AR_PHY_EXT_CYCPWR_THR1, value2);
 
 		if (level != aniState->spurImmunityLevel) {
-			ath_dbg(common, ATH_DBG_ANI,
+			ath_dbg(common, ANI,
 				"** ch %d: level %d=>%d[def:%d] cycpwrThr1[level]=%d ini=%d\n",
 				chan->channel,
 				aniState->spurImmunityLevel,
@@ -1007,7 +1009,7 @@
 				ATH9K_ANI_SPUR_IMMUNE_LVL_NEW,
 				value,
 				aniState->iniDef.cycpwrThr1);
-			ath_dbg(common, ATH_DBG_ANI,
+			ath_dbg(common, ANI,
 				"** ch %d: level %d=>%d[def:%d] cycpwrThr1Ext[level]=%d ini=%d\n",
 				chan->channel,
 				aniState->spurImmunityLevel,
@@ -1034,8 +1036,7 @@
 		REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL,
 			      AR_PHY_MRC_CCK_MUX_REG, is_on);
 		if (!is_on != aniState->mrcCCKOff) {
-			ath_dbg(common, ATH_DBG_ANI,
-				"** ch %d: MRC CCK: %s=>%s\n",
+			ath_dbg(common, ANI, "** ch %d: MRC CCK: %s=>%s\n",
 				chan->channel,
 				!aniState->mrcCCKOff ? "on" : "off",
 				is_on ? "on" : "off");
@@ -1050,11 +1051,11 @@
 	case ATH9K_ANI_PRESENT:
 		break;
 	default:
-		ath_dbg(common, ATH_DBG_ANI, "invalid cmd %u\n", cmd);
+		ath_dbg(common, ANI, "invalid cmd %u\n", cmd);
 		return false;
 	}
 
-	ath_dbg(common, ATH_DBG_ANI,
+	ath_dbg(common, ANI,
 		"ANI parameters: SI=%d, ofdmWS=%s FS=%d MRCcck=%s listenTime=%d ofdmErrs=%d cckErrs=%d\n",
 		aniState->spurImmunityLevel,
 		!aniState->ofdmWeakSigDetectOff ? "on" : "off",
@@ -1123,8 +1124,7 @@
 	aniState = &ah->curchan->ani;
 	iniDef = &aniState->iniDef;
 
-	ath_dbg(common, ATH_DBG_ANI,
-		"ver %d.%d opmode %u chan %d Mhz/0x%x\n",
+	ath_dbg(common, ANI, "ver %d.%d opmode %u chan %d Mhz/0x%x\n",
 		ah->hw_version.macVersion,
 		ah->hw_version.macRev,
 		ah->opmode,
@@ -1386,7 +1386,7 @@
 			  ~(AR_PHY_WATCHDOG_NON_IDLE_ENABLE |
 			    AR_PHY_WATCHDOG_IDLE_ENABLE));
 
-		ath_dbg(common, ATH_DBG_RESET, "Disabled BB Watchdog\n");
+		ath_dbg(common, RESET, "Disabled BB Watchdog\n");
 		return;
 	}
 
@@ -1422,8 +1422,7 @@
 		  AR_PHY_WATCHDOG_IDLE_MASK |
 		  (AR_PHY_WATCHDOG_NON_IDLE_MASK & (idle_count << 2)));
 
-	ath_dbg(common, ATH_DBG_RESET,
-		"Enabled BB Watchdog timeout (%u ms)\n",
+	ath_dbg(common, RESET, "Enabled BB Watchdog timeout (%u ms)\n",
 		idle_tmo_ms);
 }
 
@@ -1452,9 +1451,9 @@
 		return;
 
 	status = ah->bb_watchdog_last_status;
-	ath_dbg(common, ATH_DBG_RESET,
+	ath_dbg(common, RESET,
 		"\n==== BB update: BB status=0x%08x ====\n", status);
-	ath_dbg(common, ATH_DBG_RESET,
+	ath_dbg(common, RESET,
 		"** BB state: wd=%u det=%u rdar=%u rOFDM=%d rCCK=%u tOFDM=%u tCCK=%u agc=%u src=%u **\n",
 		MS(status, AR_PHY_WATCHDOG_INFO),
 		MS(status, AR_PHY_WATCHDOG_DET_HANG),
@@ -1466,22 +1465,19 @@
 		MS(status, AR_PHY_WATCHDOG_AGC_SM),
 		MS(status, AR_PHY_WATCHDOG_SRCH_SM));
 
-	ath_dbg(common, ATH_DBG_RESET,
-		"** BB WD cntl: cntl1=0x%08x cntl2=0x%08x **\n",
+	ath_dbg(common, RESET, "** BB WD cntl: cntl1=0x%08x cntl2=0x%08x **\n",
 		REG_READ(ah, AR_PHY_WATCHDOG_CTL_1),
 		REG_READ(ah, AR_PHY_WATCHDOG_CTL_2));
-	ath_dbg(common, ATH_DBG_RESET,
-		"** BB mode: BB_gen_controls=0x%08x **\n",
+	ath_dbg(common, RESET, "** BB mode: BB_gen_controls=0x%08x **\n",
 		REG_READ(ah, AR_PHY_GEN_CTRL));
 
 #define PCT(_field) (common->cc_survey._field * 100 / common->cc_survey.cycles)
 	if (common->cc_survey.cycles)
-		ath_dbg(common, ATH_DBG_RESET,
+		ath_dbg(common, RESET,
 			"** BB busy times: rx_clear=%d%%, rx_frame=%d%%, tx_frame=%d%% **\n",
 			PCT(rx_busy), PCT(rx_frame), PCT(tx_frame));
 
-	ath_dbg(common, ATH_DBG_RESET,
-		"==== BB update: done ====\n\n");
+	ath_dbg(common, RESET, "==== BB update: done ====\n\n");
 }
 EXPORT_SYMBOL(ar9003_hw_bb_watchdog_dbg_info);
 
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index 4114fe7..ed64114 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -389,6 +389,8 @@
 #define AR_PHY_DAG_CTRLCCK_RSSI_THR_S   10
 
 #define AR_PHY_RIFS_INIT_DELAY         0x3ff0000
+#define AR_PHY_AGC_QUICK_DROP       0x03c00000
+#define AR_PHY_AGC_QUICK_DROP_S     22
 #define AR_PHY_AGC_COARSE_LOW       0x00007F80
 #define AR_PHY_AGC_COARSE_LOW_S     7
 #define AR_PHY_AGC_COARSE_HIGH      0x003F8000
@@ -488,6 +490,8 @@
 #define AR_PHY_TEST_CTL_TSTADC_EN_S       8
 #define AR_PHY_TEST_CTL_RX_OBS_SEL        0x3C00
 #define AR_PHY_TEST_CTL_RX_OBS_SEL_S      10
+#define AR_PHY_TEST_CTL_DEBUGPORT_SEL	  0xe0000000
+#define AR_PHY_TEST_CTL_DEBUGPORT_SEL_S	  29
 
 
 #define AR_PHY_TSTDAC            (AR_SM_BASE + 0x168)
@@ -999,6 +1003,7 @@
 
 /* GLB Registers */
 #define AR_GLB_BASE	0x20000
+#define AR_GLB_GPIO_CONTROL	(AR_GLB_BASE)
 #define AR_PHY_GLB_CONTROL	(AR_GLB_BASE + 0x44)
 #define AR_GLB_SCRATCH(_ah)	(AR_GLB_BASE + \
 					(AR_SREV_9462_20(_ah) ? 0x4c : 0x50))
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_rtt.c b/drivers/net/wireless/ath/ath9k/ar9003_rtt.c
index 48803ee..458bedf 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_rtt.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_rtt.c
@@ -16,6 +16,7 @@
 
 #include "hw.h"
 #include "ar9003_phy.h"
+#include "ar9003_rtt.h"
 
 #define RTT_RESTORE_TIMEOUT          1000
 #define RTT_ACCESS_TIMEOUT           100
diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
index 9c51b39..dc2054f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
@@ -41,24 +41,24 @@
 
 static const u32 ar9462_2p0_baseband_postamble[][5] = {
 	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
-	{0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a8011},
-	{0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a012e},
-	{0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
-	{0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
+	{0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a800d},
+	{0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a01ae},
+	{0x00009824, 0x5ac640de, 0x5ac640d0, 0x5ac640d0, 0x63c640da},
+	{0x00009828, 0x0796be89, 0x0696b081, 0x0696b881, 0x09143e81},
 	{0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
 	{0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c},
 	{0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
 	{0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0},
 	{0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020},
-	{0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
-	{0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e},
-	{0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3039605e, 0x33795d5e},
+	{0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000d8},
+	{0x00009e10, 0x92c88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec86d2e},
+	{0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3376605e, 0x33795d5e},
 	{0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
 	{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
 	{0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
 	{0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
-	{0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c782},
-	{0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
+	{0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282},
+	{0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27},
 	{0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
 	{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
 	{0x0000a204, 0x013187c0, 0x013187c4, 0x013187c4, 0x013187c0},
@@ -81,6 +81,15 @@
 	{0x0000a2d0, 0x00041981, 0x00041981, 0x00041981, 0x00041982},
 	{0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
 	{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000a3a4, 0x00000010, 0x00000010, 0x00000000, 0x00000000},
+	{0x0000a3a8, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa},
+	{0x0000a3ac, 0xaaaaaa00, 0xaaaaaa30, 0xaaaaaa00, 0xaaaaaa00},
+	{0x0000a41c, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce},
+	{0x0000a420, 0x000001ce, 0x000001ce, 0x000001ce, 0x000001ce},
+	{0x0000a424, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce},
+	{0x0000a428, 0x000001ce, 0x000001ce, 0x000001ce, 0x000001ce},
+	{0x0000a42c, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce},
+	{0x0000a430, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce},
 	{0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
 	{0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x00100000},
 	{0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
@@ -688,8 +697,8 @@
 static const u32 ar9462_2p0_radio_postamble_sys3ant[][5] = {
 	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
 	{0x000160ac, 0xa4646c08, 0xa4646c08, 0x24645808, 0x24645808},
-	{0x00016140, 0x10804008, 0x10804008, 0x90804008, 0x90804008},
-	{0x00016540, 0x10804008, 0x10804008, 0x90804008, 0x90804008},
+	{0x00016140, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
+	{0x00016540, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
 };
 
 static const u32 ar9462_2p0_baseband_postamble_emulation[][5] = {
@@ -717,8 +726,8 @@
 static const u32 ar9462_2p0_radio_postamble_sys2ant[][5] = {
 	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
 	{0x000160ac, 0xa4646c08, 0xa4646c08, 0x24645808, 0x24645808},
-	{0x00016140, 0x10804008, 0x10804008, 0x90804008, 0x90804008},
-	{0x00016540, 0x10804008, 0x10804008, 0x90804008, 0x90804008},
+	{0x00016140, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
+	{0x00016540, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
 };
 
 static const u32 ar9462_common_wo_xlna_rx_gain_table_2p0[][2] = {
@@ -1059,7 +1068,7 @@
 
 static const u32 ar9462_2p0_soc_postamble[][5] = {
 	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
-	{0x00007010, 0x00002233, 0x00002233, 0x00002233, 0x00002233},
+	{0x00007010, 0x00000033, 0x00000033, 0x00000033, 0x00000033},
 };
 
 static const u32 ar9462_2p0_baseband_core[][2] = {
@@ -1107,11 +1116,11 @@
 	{0x00009e30, 0x06336f77},
 	{0x00009e34, 0x6af6532f},
 	{0x00009e38, 0x0cc80c00},
-	{0x00009e40, 0x0d261820},
+	{0x00009e40, 0x15262820},
 	{0x00009e4c, 0x00001004},
 	{0x00009e50, 0x00ff03f1},
-	{0x00009e54, 0xe4c355c7},
-	{0x00009e58, 0xfd897735},
+	{0x00009e54, 0xe4c555c2},
+	{0x00009e58, 0xfd857722},
 	{0x00009e5c, 0xe9198724},
 	{0x00009fc0, 0x803e4788},
 	{0x00009fc4, 0x0001efb5},
@@ -1142,9 +1151,6 @@
 	{0x0000a398, 0x001f0e0f},
 	{0x0000a39c, 0x0075393f},
 	{0x0000a3a0, 0xb79f6427},
-	{0x0000a3a4, 0x00000000},
-	{0x0000a3a8, 0xaaaaaaaa},
-	{0x0000a3ac, 0x3c466478},
 	{0x0000a3c0, 0x20202020},
 	{0x0000a3c4, 0x22222220},
 	{0x0000a3c8, 0x20200020},
@@ -1167,12 +1173,6 @@
 	{0x0000a40c, 0x00820820},
 	{0x0000a414, 0x1ce739ce},
 	{0x0000a418, 0x2d001dce},
-	{0x0000a41c, 0x1ce739ce},
-	{0x0000a420, 0x000001ce},
-	{0x0000a424, 0x1ce739ce},
-	{0x0000a428, 0x000001ce},
-	{0x0000a42c, 0x1ce739ce},
-	{0x0000a430, 0x1ce739ce},
 	{0x0000a434, 0x00000000},
 	{0x0000a438, 0x00001801},
 	{0x0000a43c, 0x00100000},
@@ -1257,8 +1257,8 @@
 	{0x0000a540, 0x48025e6c, 0x48025e6c, 0x38001660, 0x38001660},
 	{0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3b001861, 0x3b001861},
 	{0x0000a548, 0x53025eb2, 0x53025eb2, 0x3e001a81, 0x3e001a81},
-	{0x0000a54c, 0x59025eb2, 0x59025eb2, 0x42001a83, 0x42001a83},
-	{0x0000a550, 0x5f025ef6, 0x5f025ef6, 0x44001c84, 0x44001c84},
+	{0x0000a54c, 0x59025eb6, 0x59025eb6, 0x42001a83, 0x42001a83},
+	{0x0000a550, 0x5d025ef6, 0x5d025ef6, 0x44001c84, 0x44001c84},
 	{0x0000a554, 0x62025f56, 0x62025f56, 0x48001ce3, 0x48001ce3},
 	{0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5},
 	{0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9},
@@ -1850,8 +1850,8 @@
 	{0x0000a540, 0x48025e6c, 0x48025e6c, 0x38001660, 0x38001660},
 	{0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3b001861, 0x3b001861},
 	{0x0000a548, 0x53025eb2, 0x53025eb2, 0x3e001a81, 0x3e001a81},
-	{0x0000a54c, 0x59025eb2, 0x59025eb2, 0x42001a83, 0x42001a83},
-	{0x0000a550, 0x5f025ef6, 0x5f025ef6, 0x44001c84, 0x44001c84},
+	{0x0000a54c, 0x59025eb6, 0x59025eb6, 0x42001a83, 0x42001a83},
+	{0x0000a550, 0x5d025ef6, 0x5d025ef6, 0x44001c84, 0x44001c84},
 	{0x0000a554, 0x62025f56, 0x62025f56, 0x48001ce3, 0x48001ce3},
 	{0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5},
 	{0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9},
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 1c269f5..b30e9fc 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -25,6 +25,7 @@
 
 #include "debug.h"
 #include "common.h"
+#include "mci.h"
 
 /*
  * Header for the ath9k.ko driver core *only* -- hw code nor any other driver
@@ -96,7 +97,7 @@
 #define bf_isampdu(bf)		(bf->bf_state.bf_type & BUF_AMPDU)
 #define bf_isaggr(bf)		(bf->bf_state.bf_type & BUF_AGGR)
 
-#define ATH_TXSTATUS_RING_SIZE 64
+#define ATH_TXSTATUS_RING_SIZE 512
 
 #define	DS2PHYS(_dd, _ds)						\
 	((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
@@ -158,6 +159,9 @@
 /* return block-ack bitmap index given sequence and starting sequence */
 #define ATH_BA_INDEX(_st, _seq) (((_seq) - (_st)) & (IEEE80211_SEQ_MAX - 1))
 
+/* return the seqno for _start + _offset */
+#define ATH_BA_INDEX2SEQ(_seq, _offset) (((_seq) + (_offset)) & (IEEE80211_SEQ_MAX - 1))
+
 /* returns delimiter padding required given the packet length */
 #define ATH_AGGR_GET_NDELIM(_len)					\
        (((_len) >= ATH_AGGR_MINPLEN) ? 0 :                             \
@@ -192,6 +196,7 @@
 	u8 txq_headidx;
 	u8 txq_tailidx;
 	int pending_frames;
+	struct sk_buff_head complete_q;
 };
 
 struct ath_atx_ac {
@@ -237,6 +242,7 @@
 	struct ath_node *an;
 	struct ath_atx_ac *ac;
 	unsigned long tx_buf[BITS_TO_LONGS(ATH_TID_MAX_BUFS)];
+	int bar_index;
 	u16 seq_start;
 	u16 seq_next;
 	u16 baw_size;
@@ -251,8 +257,9 @@
 struct ath_node {
 #ifdef CONFIG_ATH9K_DEBUGFS
 	struct list_head list; /* for sc->nodes */
-	struct ieee80211_sta *sta; /* station struct we're part of */
 #endif
+	struct ieee80211_sta *sta; /* station struct we're part of */
+	struct ieee80211_vif *vif; /* interface with which we're associated */
 	struct ath_atx_tid tid[WME_NUM_TID];
 	struct ath_atx_ac ac[WME_NUM_AC];
 	int ps_key;
@@ -274,7 +281,6 @@
 };
 
 #define ATH_TX_ERROR        0x01
-#define ATH_TX_BAR          0x02
 
 /**
  * @txq_map:  Index is mac80211 queue number.  This is
@@ -443,7 +449,9 @@
 	u32 btcoex_no_stomp; /* in usec */
 	u32 btcoex_period; /* in usec */
 	u32 btscan_no_stomp; /* in usec */
+	u32 duty_cycle;
 	struct ath_gen_timer *no_stomp_timer; /* Timer for no BT stomping */
+	struct ath_mci_profile mci;
 };
 
 int ath_init_btcoex_timer(struct ath_softc *sc);
@@ -458,7 +466,7 @@
 #define ATH_LED_PIN_9287		8
 #define ATH_LED_PIN_9300		10
 #define ATH_LED_PIN_9485		6
-#define ATH_LED_PIN_9462		0
+#define ATH_LED_PIN_9462		4
 
 #ifdef CONFIG_MAC80211_LEDS
 void ath_init_leds(struct ath_softc *sc);
@@ -538,7 +546,7 @@
 #define DEFAULT_CACHELINE       32
 #define ATH_REGCLASSIDS_MAX     10
 #define ATH_CABQ_READY_TIME     80      /* % of beacon interval */
-#define ATH_MAX_SW_RETRIES      10
+#define ATH_MAX_SW_RETRIES      30
 #define ATH_CHAN_MAX            255
 
 #define ATH_TXPOWER_MAX         100     /* .5 dBm units */
@@ -643,6 +651,7 @@
 	struct delayed_work tx_complete_work;
 	struct delayed_work hw_pll_work;
 	struct ath_btcoex btcoex;
+	struct ath_mci_coex mci_coex;
 
 	struct ath_descdma txsdma;
 
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index a13cabb..b8967e4 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -117,11 +117,10 @@
 	memset(&txctl, 0, sizeof(struct ath_tx_control));
 	txctl.txq = sc->beacon.cabq;
 
-	ath_dbg(common, ATH_DBG_XMIT,
-		"transmitting CABQ packet, skb: %p\n", skb);
+	ath_dbg(common, XMIT, "transmitting CABQ packet, skb: %p\n", skb);
 
 	if (ath_tx_start(hw, skb, &txctl) != 0) {
-		ath_dbg(common, ATH_DBG_XMIT, "CABQ TX failed\n");
+		ath_dbg(common, XMIT, "CABQ TX failed\n");
 		dev_kfree_skb_any(skb);
 	}
 }
@@ -204,7 +203,7 @@
 
 	if (skb && cabq_depth) {
 		if (sc->nvifs > 1) {
-			ath_dbg(common, ATH_DBG_BEACON,
+			ath_dbg(common, BEACON,
 				"Flushing previous cabq traffic\n");
 			ath_draintxq(sc, cabq, false);
 		}
@@ -297,7 +296,7 @@
 		tsfadjust = TU_TO_USEC(intval * avp->av_bslot) / ATH_BCBUF;
 		avp->tsf_adjust = cpu_to_le64(tsfadjust);
 
-		ath_dbg(common, ATH_DBG_BEACON,
+		ath_dbg(common, BEACON,
 			"stagger beacons, bslot %d intval %u tsfadjust %llu\n",
 			avp->av_bslot, intval, (unsigned long long)tsfadjust);
 
@@ -357,6 +356,7 @@
 	struct ath_buf *bf = NULL;
 	struct ieee80211_vif *vif;
 	struct ath_tx_status ts;
+	bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
 	int slot;
 	u32 bfaddr, bc = 0;
 
@@ -371,15 +371,14 @@
 		sc->beacon.bmisscnt++;
 
 		if (sc->beacon.bmisscnt < BSTUCK_THRESH * sc->nbcnvifs) {
-			ath_dbg(common, ATH_DBG_BSTUCK,
+			ath_dbg(common, BSTUCK,
 				"missed %u consecutive beacons\n",
 				sc->beacon.bmisscnt);
 			ath9k_hw_stop_dma_queue(ah, sc->beacon.beaconq);
 			if (sc->beacon.bmisscnt > 3)
 				ath9k_hw_bstuck_nfcal(ah);
 		} else if (sc->beacon.bmisscnt >= BSTUCK_THRESH) {
-			ath_dbg(common, ATH_DBG_BSTUCK,
-				"beacon is officially stuck\n");
+			ath_dbg(common, BSTUCK, "beacon is officially stuck\n");
 			sc->sc_flags |= SC_OP_TSF_RESET;
 			ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
 		}
@@ -406,7 +405,7 @@
 		slot = (tsftu % (intval * ATH_BCBUF)) / intval;
 		vif = sc->beacon.bslot[slot];
 
-		ath_dbg(common, ATH_DBG_BEACON,
+		ath_dbg(common, BEACON,
 			"slot %d [tsf %llu tsftu %u intval %u] vif %p\n",
 			slot, tsf, tsftu / ATH_BCBUF, intval, vif);
 	} else {
@@ -424,7 +423,7 @@
 		}
 
 		if (sc->beacon.bmisscnt != 0) {
-			ath_dbg(common, ATH_DBG_BSTUCK,
+			ath_dbg(common, BSTUCK,
 				"resume beacon xmit after %u misses\n",
 				sc->beacon.bmisscnt);
 			sc->beacon.bmisscnt = 0;
@@ -458,10 +457,12 @@
 	if (bfaddr != 0) {
 		/* NB: cabq traffic should already be queued and primed */
 		ath9k_hw_puttxbuf(ah, sc->beacon.beaconq, bfaddr);
-		ath9k_hw_txstart(ah, sc->beacon.beaconq);
+
+		if (!edma)
+			ath9k_hw_txstart(ah, sc->beacon.beaconq);
 
 		sc->beacon.ast_be_xmit += bc;     /* XXX per-vif? */
-		if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+		if (edma) {
 			spin_lock_bh(&sc->sc_pcu_lock);
 			ath9k_hw_txprocdesc(ah, bf->bf_desc, (void *)&ts);
 			spin_unlock_bh(&sc->sc_pcu_lock);
@@ -541,7 +542,7 @@
 
 	/* No need to configure beacon if we are not associated */
 	if (!common->curaid) {
-		ath_dbg(common, ATH_DBG_BEACON,
+		ath_dbg(common, BEACON,
 			"STA is not yet associated..skipping beacon config\n");
 		return;
 	}
@@ -631,8 +632,8 @@
 	/* TSF out of range threshold fixed at 1 second */
 	bs.bs_tsfoor_threshold = ATH9K_TSFOOR_THRESHOLD;
 
-	ath_dbg(common, ATH_DBG_BEACON, "tsf: %llu tsftu: %u\n", tsf, tsftu);
-	ath_dbg(common, ATH_DBG_BEACON,
+	ath_dbg(common, BEACON, "tsf: %llu tsftu: %u\n", tsf, tsftu);
+	ath_dbg(common, BEACON,
 		"bmiss: %u sleep: %u cfp-period: %u maxdur: %u next: %u\n",
 		bs.bs_bmissthreshold, bs.bs_sleepduration,
 		bs.bs_cfpperiod, bs.bs_cfpmaxduration, bs.bs_cfpnext);
@@ -660,8 +661,7 @@
 	tsf = roundup(ath9k_hw_gettsf32(ah) + TU_TO_USEC(FUDGE), intval);
 	nexttbtt = tsf + intval;
 
-	ath_dbg(common, ATH_DBG_BEACON,
-		"IBSS nexttbtt %u intval %u (%u)\n",
+	ath_dbg(common, BEACON, "IBSS nexttbtt %u intval %u (%u)\n",
 		nexttbtt, intval, conf->beacon_interval);
 
 	/*
@@ -699,9 +699,8 @@
 	    (sc->nbcnvifs > 1) &&
 	    (vif->type == NL80211_IFTYPE_AP) &&
 	    (cur_conf->beacon_interval != bss_conf->beacon_int)) {
-		ath_dbg(common, ATH_DBG_CONFIG,
-			"Changing beacon interval of multiple \
-			AP interfaces !\n");
+		ath_dbg(common, CONFIG,
+			"Changing beacon interval of multiple AP interfaces !\n");
 		return false;
 	}
 	/*
@@ -710,7 +709,7 @@
 	 */
 	if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) &&
 	    (vif->type != NL80211_IFTYPE_AP)) {
-		ath_dbg(common, ATH_DBG_CONFIG,
+		ath_dbg(common, CONFIG,
 			"STA vif's beacon not allowed on AP mode\n");
 		return false;
 	}
@@ -722,7 +721,7 @@
 	    (vif->type == NL80211_IFTYPE_STATION) &&
 	    (sc->sc_flags & SC_OP_BEACONS) &&
 	    !avp->primary_sta_vif) {
-		ath_dbg(common, ATH_DBG_CONFIG,
+		ath_dbg(common, CONFIG,
 			"Beacon already configured for a station interface\n");
 		return false;
 	}
@@ -802,8 +801,7 @@
 		ath_beacon_config_sta(sc, cur_conf);
 		break;
 	default:
-		ath_dbg(common, ATH_DBG_CONFIG,
-			"Unsupported beaconing mode\n");
+		ath_dbg(common, CONFIG, "Unsupported beaconing mode\n");
 		return;
 	}
 
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.c b/drivers/net/wireless/ath/ath9k/btcoex.c
index 0122639..a6712a9 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.c
+++ b/drivers/net/wireless/ath/ath9k/btcoex.c
@@ -21,7 +21,7 @@
 	ATH_BT_COEX_MODE_LEGACY,        /* legacy rx_clear mode */
 	ATH_BT_COEX_MODE_UNSLOTTED,     /* untimed/unslotted mode */
 	ATH_BT_COEX_MODE_SLOTTED,       /* slotted mode */
-	ATH_BT_COEX_MODE_DISALBED,      /* coexistence disabled */
+	ATH_BT_COEX_MODE_DISABLED,      /* coexistence disabled */
 };
 
 struct ath_btcoex_config {
@@ -36,6 +36,20 @@
 	bool bt_hold_rx_clear;
 };
 
+static const u32 ar9003_wlan_weights[ATH_BTCOEX_STOMP_MAX]
+				    [AR9300_NUM_WLAN_WEIGHTS] = {
+	{ 0xfffffff0, 0xfffffff0, 0xfffffff0, 0xfffffff0 }, /* STOMP_ALL */
+	{ 0x88888880, 0x88888880, 0x88888880, 0x88888880 }, /* STOMP_LOW */
+	{ 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, /* STOMP_NONE */
+};
+
+static const u32 ar9462_wlan_weights[ATH_BTCOEX_STOMP_MAX]
+				    [AR9300_NUM_WLAN_WEIGHTS] = {
+	{ 0x01017d01, 0x41414101, 0x41414101, 0x41414141 }, /* STOMP_ALL */
+	{ 0x01017d01, 0x3b3b3b01, 0x3b3b3b01, 0x3b3b3b3b }, /* STOMP_LOW */
+	{ 0x01017d01, 0x01010101, 0x01010101, 0x01010101 }, /* STOMP_NONE */
+	{ 0x01017d01, 0x013b0101, 0x3b3b0101, 0x3b3b013b }, /* STOMP_LOW_FTP */
+};
 
 void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum)
 {
@@ -54,6 +68,9 @@
 	u32 i, idx;
 	bool rxclear_polarity = ath_bt_config.bt_rxclear_polarity;
 
+	if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE)
+		return;
+
 	if (AR_SREV_9300_20_OR_LATER(ah))
 		rxclear_polarity = !ath_bt_config.bt_rxclear_polarity;
 
@@ -85,6 +102,9 @@
 {
 	struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
 
+	if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE)
+		return;
+
 	/* connect bt_active to baseband */
 	REG_CLR_BIT(ah, AR_GPIO_INPUT_EN_VAL,
 		    (AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_DEF |
@@ -107,6 +127,9 @@
 {
 	struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
 
+	if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE)
+		return;
+
 	/* btcoex 3-wire */
 	REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
 			(AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_BB |
@@ -133,6 +156,9 @@
 {
 	struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
 
+	if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE)
+		return;
+
 	/* Configure the desired GPIO port for TX_FRAME output */
 	ath9k_hw_cfg_output(ah, btcoex_hw->wlanactive_gpio,
 			    AR_GPIO_OUTPUT_MUX_AS_TX_FRAME);
@@ -144,6 +170,9 @@
 {
 	struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
 
+	if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE)
+		return;
+
 	btcoex_hw->bt_coex_weights = SM(bt_weight, AR_BTCOEX_BT_WGHT) |
 				     SM(wlan_weight, AR_BTCOEX_WL_WGHT);
 }
@@ -152,27 +181,26 @@
 
 static void ath9k_hw_btcoex_enable_3wire(struct ath_hw *ah)
 {
-	struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
+	struct ath_btcoex_hw *btcoex = &ah->btcoex_hw;
 	u32  val;
+	int i;
 
 	/*
 	 * Program coex mode and weight registers to
 	 * enable coex 3-wire
 	 */
-	REG_WRITE(ah, AR_BT_COEX_MODE, btcoex_hw->bt_coex_mode);
-	REG_WRITE(ah, AR_BT_COEX_MODE2, btcoex_hw->bt_coex_mode2);
+	REG_WRITE(ah, AR_BT_COEX_MODE, btcoex->bt_coex_mode);
+	REG_WRITE(ah, AR_BT_COEX_MODE2, btcoex->bt_coex_mode2);
 
 
 	if (AR_SREV_9300_20_OR_LATER(ah)) {
-		REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS0, ah->bt_coex_wlan_weight[0]);
-		REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS1, ah->bt_coex_wlan_weight[1]);
-		REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS0, ah->bt_coex_bt_weight[0]);
-		REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS1, ah->bt_coex_bt_weight[1]);
-		REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS2, ah->bt_coex_bt_weight[2]);
-		REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS3, ah->bt_coex_bt_weight[3]);
-
+		REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS0, btcoex->wlan_weight[0]);
+		REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS1, btcoex->wlan_weight[1]);
+		for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
+			REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS(i),
+				  btcoex->bt_weight[i]);
 	} else
-		REG_WRITE(ah, AR_BT_COEX_WEIGHT, btcoex_hw->bt_coex_weights);
+		REG_WRITE(ah, AR_BT_COEX_WEIGHT, btcoex->bt_coex_weights);
 
 
 
@@ -185,23 +213,39 @@
 	REG_RMW_FIELD(ah, AR_QUIET1, AR_QUIET1_QUIET_ACK_CTS_ENABLE, 1);
 	REG_RMW_FIELD(ah, AR_PCU_MISC, AR_PCU_BT_ANT_PREVENT_RX, 0);
 
-	ath9k_hw_cfg_output(ah, btcoex_hw->wlanactive_gpio,
+	ath9k_hw_cfg_output(ah, btcoex->wlanactive_gpio,
 			    AR_GPIO_OUTPUT_MUX_AS_RX_CLEAR_EXTERNAL);
 }
 
+static void ath9k_hw_btcoex_enable_mci(struct ath_hw *ah)
+{
+	struct ath_btcoex_hw *btcoex = &ah->btcoex_hw;
+	int i;
+
+	for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
+		REG_WRITE(ah, AR_MCI_COEX_WL_WEIGHTS(i),
+			  btcoex->wlan_weight[i]);
+
+	REG_RMW_FIELD(ah, AR_QUIET1, AR_QUIET1_QUIET_ACK_CTS_ENABLE, 1);
+	btcoex->enabled = true;
+}
+
 void ath9k_hw_btcoex_enable(struct ath_hw *ah)
 {
 	struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
 
-	switch (btcoex_hw->scheme) {
+	switch (ath9k_hw_get_btcoex_scheme(ah)) {
 	case ATH_BTCOEX_CFG_NONE:
-		break;
+		return;
 	case ATH_BTCOEX_CFG_2WIRE:
 		ath9k_hw_btcoex_enable_2wire(ah);
 		break;
 	case ATH_BTCOEX_CFG_3WIRE:
 		ath9k_hw_btcoex_enable_3wire(ah);
 		break;
+	case ATH_BTCOEX_CFG_MCI:
+		ath9k_hw_btcoex_enable_mci(ah);
+		return;
 	}
 
 	REG_RMW(ah, AR_GPIO_PDPU,
@@ -215,7 +259,18 @@
 void ath9k_hw_btcoex_disable(struct ath_hw *ah)
 {
 	struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
+	int i;
 
+	if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE)
+		return;
+
+	btcoex_hw->enabled = false;
+	if (btcoex_hw->scheme == ATH_BTCOEX_CFG_MCI) {
+		ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE);
+		for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
+			REG_WRITE(ah, AR_MCI_COEX_WL_WEIGHTS(i),
+				  btcoex_hw->wlan_weight[i]);
+	}
 	ath9k_hw_set_gpio(ah, btcoex_hw->wlanactive_gpio, 0);
 
 	ath9k_hw_cfg_output(ah, btcoex_hw->wlanactive_gpio,
@@ -228,49 +283,27 @@
 		if (AR_SREV_9300_20_OR_LATER(ah)) {
 			REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS0, 0);
 			REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS1, 0);
-			REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS0, 0);
-			REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS1, 0);
-			REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS2, 0);
-			REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS3, 0);
+			for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
+				REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS(i), 0);
 		} else
 			REG_WRITE(ah, AR_BT_COEX_WEIGHT, 0);
 
 	}
-
-	ah->btcoex_hw.enabled = false;
 }
 EXPORT_SYMBOL(ath9k_hw_btcoex_disable);
 
 static void ar9003_btcoex_bt_stomp(struct ath_hw *ah,
 			 enum ath_stomp_type stomp_type)
 {
-	ah->bt_coex_bt_weight[0] = AR9300_BT_WGHT;
-	ah->bt_coex_bt_weight[1] = AR9300_BT_WGHT;
-	ah->bt_coex_bt_weight[2] = AR9300_BT_WGHT;
-	ah->bt_coex_bt_weight[3] = AR9300_BT_WGHT;
+	struct ath_btcoex_hw *btcoex = &ah->btcoex_hw;
+	const u32 *weight = AR_SREV_9462(ah) ? ar9003_wlan_weights[stomp_type] :
+					       ar9462_wlan_weights[stomp_type];
+	int i;
 
-
-	switch (stomp_type) {
-	case ATH_BTCOEX_STOMP_ALL:
-		ah->bt_coex_wlan_weight[0] = AR9300_STOMP_ALL_WLAN_WGHT0;
-		ah->bt_coex_wlan_weight[1] = AR9300_STOMP_ALL_WLAN_WGHT1;
-		break;
-	case ATH_BTCOEX_STOMP_LOW:
-		ah->bt_coex_wlan_weight[0] = AR9300_STOMP_LOW_WLAN_WGHT0;
-		ah->bt_coex_wlan_weight[1] = AR9300_STOMP_LOW_WLAN_WGHT1;
-		break;
-	case ATH_BTCOEX_STOMP_NONE:
-		ah->bt_coex_wlan_weight[0] = AR9300_STOMP_NONE_WLAN_WGHT0;
-		ah->bt_coex_wlan_weight[1] = AR9300_STOMP_NONE_WLAN_WGHT1;
-		break;
-
-	default:
-		ath_dbg(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
-				"Invalid Stomptype\n");
-		break;
+	for (i = 0; i < AR9300_NUM_WLAN_WEIGHTS; i++) {
+		btcoex->bt_weight[i] = AR9300_BT_WGHT;
+		btcoex->wlan_weight[i] = weight[i];
 	}
-
-	ath9k_hw_btcoex_enable(ah);
 }
 
 /*
@@ -279,6 +312,9 @@
 void ath9k_hw_btcoex_bt_stomp(struct ath_hw *ah,
 			      enum ath_stomp_type stomp_type)
 {
+	if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE)
+		return;
+
 	if (AR_SREV_9300_20_OR_LATER(ah)) {
 		ar9003_btcoex_bt_stomp(ah, stomp_type);
 		return;
@@ -298,11 +334,8 @@
 				AR_STOMP_NONE_WLAN_WGHT);
 		break;
 	default:
-		ath_dbg(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
-				"Invalid Stomptype\n");
+		ath_dbg(ath9k_hw_common(ah), BTCOEX, "Invalid Stomptype\n");
 		break;
 	}
-
-	ath9k_hw_btcoex_enable(ah);
 }
 EXPORT_SYMBOL(ath9k_hw_btcoex_bt_stomp);
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.h b/drivers/net/wireless/ath/ath9k/btcoex.h
index 234f776..278361c 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.h
+++ b/drivers/net/wireless/ath/ath9k/btcoex.h
@@ -36,22 +36,57 @@
 #define ATH_BT_CNT_THRESHOLD	       3
 #define ATH_BT_CNT_SCAN_THRESHOLD      15
 
+#define AR9300_NUM_BT_WEIGHTS   4
+#define AR9300_NUM_WLAN_WEIGHTS 4
 /* Defines the BT AR_BT_COEX_WGHT used */
 enum ath_stomp_type {
-	ATH_BTCOEX_NO_STOMP,
 	ATH_BTCOEX_STOMP_ALL,
 	ATH_BTCOEX_STOMP_LOW,
-	ATH_BTCOEX_STOMP_NONE
+	ATH_BTCOEX_STOMP_NONE,
+	ATH_BTCOEX_STOMP_LOW_FTP,
+	ATH_BTCOEX_STOMP_MAX
 };
 
 enum ath_btcoex_scheme {
 	ATH_BTCOEX_CFG_NONE,
 	ATH_BTCOEX_CFG_2WIRE,
 	ATH_BTCOEX_CFG_3WIRE,
+	ATH_BTCOEX_CFG_MCI,
+};
+
+struct ath9k_hw_mci {
+	u32 raw_intr;
+	u32 rx_msg_intr;
+	u32 cont_status;
+	u32 gpm_addr;
+	u32 gpm_len;
+	u32 gpm_idx;
+	u32 sched_addr;
+	u32 wlan_channels[4];
+	u32 wlan_cal_seq;
+	u32 wlan_cal_done;
+	u32 config;
+	u8 *gpm_buf;
+	u8 *sched_buf;
+	bool ready;
+	bool update_2g5g;
+	bool is_2g;
+	bool query_bt;
+	bool unhalt_bt_gpm; /* need send UNHALT */
+	bool halted_bt_gpm; /* HALT sent */
+	bool need_flush_btinfo;
+	bool bt_version_known;
+	bool wlan_channels_update;
+	u8 wlan_ver_major;
+	u8 wlan_ver_minor;
+	u8 bt_ver_major;
+	u8 bt_ver_minor;
+	u8 bt_state;
 };
 
 struct ath_btcoex_hw {
 	enum ath_btcoex_scheme scheme;
+	struct ath9k_hw_mci mci;
 	bool enabled;
 	u8 wlanactive_gpio;
 	u8 btactive_gpio;
@@ -59,6 +94,8 @@
 	u32 bt_coex_mode; 	/* Register setting for AR_BT_COEX_MODE */
 	u32 bt_coex_weights; 	/* Register setting for AR_BT_COEX_WEIGHT */
 	u32 bt_coex_mode2; 	/* Register setting for AR_BT_COEX_MODE2 */
+	u32 bt_weight[AR9300_NUM_BT_WEIGHTS];
+	u32 wlan_weight[AR9300_NUM_WLAN_WEIGHTS];
 };
 
 void ath9k_hw_btcoex_init_2wire(struct ath_hw *ah);
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index 9953881..172e33d 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -116,7 +116,7 @@
 		if (h[i].privNF > limit->max) {
 			high_nf_mid = true;
 
-			ath_dbg(common, ATH_DBG_CALIBRATE,
+			ath_dbg(common, CALIBRATE,
 				"NFmid[%d] (%d) > MAX (%d), %s\n",
 				i, h[i].privNF, limit->max,
 				(cal->nfcal_interference ?
@@ -199,8 +199,7 @@
 		return true;
 
 	if (currCal->calState != CAL_DONE) {
-		ath_dbg(common, ATH_DBG_CALIBRATE,
-			"Calibration state incorrect, %d\n",
+		ath_dbg(common, CALIBRATE, "Calibration state incorrect, %d\n",
 			currCal->calState);
 		return true;
 	}
@@ -208,8 +207,7 @@
 	if (!(ah->supp_cals & currCal->calData->calType))
 		return true;
 
-	ath_dbg(common, ATH_DBG_CALIBRATE,
-		"Resetting Cal %d state for channel %u\n",
+	ath_dbg(common, CALIBRATE, "Resetting Cal %d state for channel %u\n",
 		currCal->calData->calType, conf->channel->center_freq);
 
 	ah->caldata->CalValid &= ~currCal->calData->calType;
@@ -302,7 +300,7 @@
 	 * noisefloor until the next calibration timer.
 	 */
 	if (j == 10000) {
-		ath_dbg(common, ATH_DBG_ANY,
+		ath_dbg(common, ANY,
 			"Timeout while waiting for nf to load: AR_PHY_AGC_CONTROL=0x%x\n",
 			REG_READ(ah, AR_PHY_AGC_CONTROL));
 		return;
@@ -344,17 +342,17 @@
 		if (!nf[i])
 			continue;
 
-		ath_dbg(common, ATH_DBG_CALIBRATE,
+		ath_dbg(common, CALIBRATE,
 			"NF calibrated [%s] [chain %d] is %d\n",
 			(i >= 3 ? "ext" : "ctl"), i % 3, nf[i]);
 
 		if (nf[i] > ATH9K_NF_TOO_HIGH) {
-			ath_dbg(common, ATH_DBG_CALIBRATE,
+			ath_dbg(common, CALIBRATE,
 				"NF[%d] (%d) > MAX (%d), correcting to MAX\n",
 				i, nf[i], ATH9K_NF_TOO_HIGH);
 			nf[i] = limit->max;
 		} else if (nf[i] < limit->min) {
-			ath_dbg(common, ATH_DBG_CALIBRATE,
+			ath_dbg(common, CALIBRATE,
 				"NF[%d] (%d) < MIN (%d), correcting to NOM\n",
 				i, nf[i], limit->min);
 			nf[i] = limit->nominal;
@@ -373,7 +371,7 @@
 
 	chan->channelFlags &= (~CHANNEL_CW_INT);
 	if (REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF) {
-		ath_dbg(common, ATH_DBG_CALIBRATE,
+		ath_dbg(common, CALIBRATE,
 			"NF did not complete in calibration window\n");
 		return false;
 	}
@@ -383,7 +381,7 @@
 	nf = nfarray[0];
 	if (ath9k_hw_get_nf_thresh(ah, c->band, &nfThresh)
 	    && nf > nfThresh) {
-		ath_dbg(common, ATH_DBG_CALIBRATE,
+		ath_dbg(common, CALIBRATE,
 			"noise floor failed detected; detected %d, threshold %d\n",
 			nf, nfThresh);
 		chan->channelFlags |= CHANNEL_CW_INT;
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 2741203..68d972b 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -709,24 +709,29 @@
 
 	len += snprintf(buf + len, size - len,
 			"Stations:\n"
-			" tid: addr sched paused buf_q-empty an ac\n"
+			" tid: addr sched paused buf_q-empty an ac baw\n"
 			" ac: addr sched tid_q-empty txq\n");
 
 	spin_lock(&sc->nodes_lock);
 	list_for_each_entry(an, &sc->nodes, list) {
+		unsigned short ma = an->maxampdu;
+		if (ma == 0)
+			ma = 65535; /* see ath_lookup_rate */
 		len += snprintf(buf + len, size - len,
-				"%pM\n", an->sta->addr);
+				"iface: %pM  sta: %pM max-ampdu: %hu mpdu-density: %uus\n",
+				an->vif->addr, an->sta->addr, ma,
+				(unsigned int)(an->mpdudensity));
 		if (len >= size)
 			goto done;
 
 		for (q = 0; q < WME_NUM_TID; q++) {
 			struct ath_atx_tid *tid = &(an->tid[q]);
 			len += snprintf(buf + len, size - len,
-					" tid: %p %s %s %i %p %p\n",
+					" tid: %p %s %s %i %p %p %hu\n",
 					tid, tid->sched ? "sched" : "idle",
 					tid->paused ? "paused" : "running",
 					skb_queue_empty(&tid->buf_q),
-					tid->an, tid->ac);
+					tid->an, tid->ac, tid->baw_size);
 			if (len >= size)
 				goto done;
 		}
@@ -851,7 +856,7 @@
 	sc->debug.stats.txstats[qnum].tx_bytes_all += bf->bf_mpdu->len;
 
 	if (bf_isampdu(bf)) {
-		if (flags & ATH_TX_BAR)
+		if (flags & ATH_TX_ERROR)
 			TX_STAT_INC(qnum, a_xretries);
 		else
 			TX_STAT_INC(qnum, a_completed);
@@ -1625,6 +1630,9 @@
 	debugfs_create_file("debug", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
 			    sc, &fops_debug);
 #endif
+
+	ath9k_dfs_init_debug(sc);
+
 	debugfs_create_file("dma", S_IRUSR, sc->debug.debugfs_phy, sc,
 			    &fops_dma);
 	debugfs_create_file("interrupt", S_IRUSR, sc->debug.debugfs_phy, sc,
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 356352a..776a24a 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -19,6 +19,7 @@
 
 #include "hw.h"
 #include "rc.h"
+#include "dfs_debug.h"
 
 struct ath_txq;
 struct ath_buf;
@@ -187,6 +188,7 @@
 	struct ath_interrupt_stats istats;
 	struct ath_tx_stats txstats[ATH9K_NUM_TX_QUEUES];
 	struct ath_rx_stats rxstats;
+	struct ath_dfs_stats dfs_stats;
 	u32 reset[__RESET_TYPE_MAX];
 };
 
diff --git a/drivers/net/wireless/ath/ath9k/dfs.c b/drivers/net/wireless/ath/ath9k/dfs.c
new file mode 100644
index 0000000..f4f56af
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/dfs.c
@@ -0,0 +1,215 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ * Copyright (c) 2011 Neratec Solutions AG
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "hw.h"
+#include "hw-ops.h"
+#include "ath9k.h"
+#include "dfs.h"
+#include "dfs_debug.h"
+
+/*
+ * TODO: move into or synchronize this with generic header
+ *	 as soon as IF is defined
+ */
+struct dfs_radar_pulse {
+	u16 freq;
+	u64 ts;
+	u32 width;
+	u8 rssi;
+};
+
+/* internal struct to pass radar data */
+struct ath_radar_data {
+	u8 pulse_bw_info;
+	u8 rssi;
+	u8 ext_rssi;
+	u8 pulse_length_ext;
+	u8 pulse_length_pri;
+};
+
+/* convert pulse duration to usecs, considering clock mode */
+static u32 dur_to_usecs(struct ath_hw *ah, u32 dur)
+{
+	const u32 AR93X_NSECS_PER_DUR = 800;
+	const u32 AR93X_NSECS_PER_DUR_FAST = (8000 / 11);
+	u32 nsecs;
+
+	if (IS_CHAN_A_FAST_CLOCK(ah, ah->curchan))
+		nsecs = dur * AR93X_NSECS_PER_DUR_FAST;
+	else
+		nsecs = dur * AR93X_NSECS_PER_DUR;
+
+	return (nsecs + 500) / 1000;
+}
+
+#define PRI_CH_RADAR_FOUND 0x01
+#define EXT_CH_RADAR_FOUND 0x02
+static bool
+ath9k_postprocess_radar_event(struct ath_softc *sc,
+			      struct ath_radar_data *are,
+			      struct dfs_radar_pulse *drp)
+{
+	u8 rssi;
+	u16 dur;
+
+	ath_dbg(ath9k_hw_common(sc->sc_ah), DFS,
+		"pulse_bw_info=0x%x, pri,ext len/rssi=(%u/%u, %u/%u)\n",
+		are->pulse_bw_info,
+		are->pulse_length_pri, are->rssi,
+		are->pulse_length_ext, are->ext_rssi);
+
+	/*
+	 * Only the last 2 bits of the BW info are relevant, they indicate
+	 * which channel the radar was detected in.
+	 */
+	are->pulse_bw_info &= 0x03;
+
+	switch (are->pulse_bw_info) {
+	case PRI_CH_RADAR_FOUND:
+		/* radar in ctrl channel */
+		dur = are->pulse_length_pri;
+		DFS_STAT_INC(sc, pri_phy_errors);
+		/*
+		 * cannot use ctrl channel RSSI
+		 * if extension channel is stronger
+		 */
+		rssi = (are->ext_rssi >= (are->rssi + 3)) ? 0 : are->rssi;
+		break;
+	case EXT_CH_RADAR_FOUND:
+		/* radar in extension channel */
+		dur = are->pulse_length_ext;
+		DFS_STAT_INC(sc, ext_phy_errors);
+		/*
+		 * cannot use extension channel RSSI
+		 * if control channel is stronger
+		 */
+		rssi = (are->rssi >= (are->ext_rssi + 12)) ? 0 : are->ext_rssi;
+		break;
+	case (PRI_CH_RADAR_FOUND | EXT_CH_RADAR_FOUND):
+		/*
+		 * Conducted testing, when pulse is on DC, both pri and ext
+		 * durations are reported to be same
+		 *
+		 * Radiated testing, when pulse is on DC, different pri and
+		 * ext durations are reported, so take the larger of the two
+		 */
+		if (are->pulse_length_ext >= are->pulse_length_pri)
+			dur = are->pulse_length_ext;
+		else
+			dur = are->pulse_length_pri;
+		DFS_STAT_INC(sc, dc_phy_errors);
+
+		/* when both are present use stronger one */
+		rssi = (are->rssi < are->ext_rssi) ? are->ext_rssi : are->rssi;
+		break;
+	default:
+		/*
+		 * Bogus bandwidth info was received in descriptor,
+		 * so ignore this PHY error
+		 */
+		DFS_STAT_INC(sc, bwinfo_discards);
+		return false;
+	}
+
+	if (rssi == 0) {
+		DFS_STAT_INC(sc, rssi_discards);
+		return false;
+	}
+
+	/*
+	 * TODO: check chirping pulses
+	 *	 checks for chirping are dependent on the DFS regulatory domain
+	 *	 used, which is yet TBD
+	 */
+
+	/* convert duration to usecs */
+	drp->width = dur_to_usecs(sc->sc_ah, dur);
+	drp->rssi = rssi;
+
+	DFS_STAT_INC(sc, pulses_detected);
+	return true;
+}
+#undef PRI_CH_RADAR_FOUND
+#undef EXT_CH_RADAR_FOUND
+
+/*
+ * DFS: check PHY-error for radar pulse and feed the detector
+ */
+void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
+			      struct ath_rx_status *rs, u64 mactime)
+{
+	struct ath_radar_data ard;
+	u16 datalen;
+	char *vdata_end;
+	struct dfs_radar_pulse drp;
+	struct ath_hw *ah = sc->sc_ah;
+	struct ath_common *common = ath9k_hw_common(ah);
+
+	if ((!(rs->rs_phyerr != ATH9K_PHYERR_RADAR)) &&
+	    (!(rs->rs_phyerr != ATH9K_PHYERR_FALSE_RADAR_EXT))) {
+		ath_dbg(common, DFS,
+			"Error: rs_phyer=0x%x not a radar error\n",
+			rs->rs_phyerr);
+		return;
+	}
+
+	datalen = rs->rs_datalen;
+	if (datalen == 0) {
+		DFS_STAT_INC(sc, datalen_discards);
+		return;
+	}
+
+	ard.rssi = rs->rs_rssi_ctl0;
+	ard.ext_rssi = rs->rs_rssi_ext0;
+
+	/*
+	 * hardware stores this as 8 bit signed value.
+	 * we will cap it at 0 if it is a negative number
+	 */
+	if (ard.rssi & 0x80)
+		ard.rssi = 0;
+	if (ard.ext_rssi & 0x80)
+		ard.ext_rssi = 0;
+
+	vdata_end = (char *)data + datalen;
+	ard.pulse_bw_info = vdata_end[-1];
+	ard.pulse_length_ext = vdata_end[-2];
+	ard.pulse_length_pri = vdata_end[-3];
+
+	ath_dbg(common, DFS,
+		"bw_info=%d, length_pri=%d, length_ext=%d, "
+		"rssi_pri=%d, rssi_ext=%d\n",
+		ard.pulse_bw_info, ard.pulse_length_pri, ard.pulse_length_ext,
+		ard.rssi, ard.ext_rssi);
+
+	drp.freq = ah->curchan->channel;
+	drp.ts = mactime;
+	if (ath9k_postprocess_radar_event(sc, &ard, &drp)) {
+		static u64 last_ts;
+		ath_dbg(common, DFS,
+			"ath9k_dfs_process_phyerr: channel=%d, ts=%llu, "
+			"width=%d, rssi=%d, delta_ts=%llu\n",
+			drp.freq, drp.ts, drp.width, drp.rssi, drp.ts-last_ts);
+		last_ts = drp.ts;
+		/*
+		 * TODO: forward pulse to pattern detector
+		 *
+		 * ieee80211_add_radar_pulse(drp.freq, drp.ts,
+		 *                           drp.width, drp.rssi);
+		 */
+	}
+}
diff --git a/drivers/net/wireless/ath/ath9k/dfs.h b/drivers/net/wireless/ath/ath9k/dfs.h
new file mode 100644
index 0000000..c241285
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/dfs.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ * Copyright (c) 2011 Neratec Solutions AG
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef ATH9K_DFS_H
+#define ATH9K_DFS_H
+
+#if defined(CONFIG_ATH9K_DFS_CERTIFIED)
+/**
+ * ath9k_dfs_process_phyerr - process radar PHY error
+ * @sc: ath_softc
+ * @data: RX payload data
+ * @rs: RX status after processing descriptor
+ * @mactime: receive time
+ *
+ * This function is called whenever the HW DFS module detects a radar
+ * pulse and reports it as a PHY error.
+ *
+ * The radar information provided as raw payload data is validated and
+ * filtered for false pulses. Events passing all tests are forwarded to
+ * the upper layer for pattern detection.
+ */
+void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
+			      struct ath_rx_status *rs, u64 mactime);
+#else
+static inline void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
+					    struct ath_rx_status *rs, u64 mactime) { }
+#endif
+
+#endif /* ATH9K_DFS_H */
diff --git a/drivers/net/wireless/ath/ath9k/dfs_debug.c b/drivers/net/wireless/ath/ath9k/dfs_debug.c
new file mode 100644
index 0000000..106d031
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/dfs_debug.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ * Copyright (c) 2011 Neratec Solutions AG
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/export.h>
+
+#include "ath9k.h"
+#include "dfs_debug.h"
+
+#define ATH9K_DFS_STAT(s, p) \
+	len += snprintf(buf + len, size - len, "%28s : %10u\n", s, \
+			sc->debug.stats.dfs_stats.p);
+
+static ssize_t read_file_dfs(struct file *file, char __user *user_buf,
+			     size_t count, loff_t *ppos)
+{
+	struct ath_softc *sc = file->private_data;
+	struct ath9k_hw_version *hw_ver = &sc->sc_ah->hw_version;
+	char *buf;
+	unsigned int len = 0, size = 8000;
+	ssize_t retval = 0;
+
+	buf = kzalloc(size, GFP_KERNEL);
+	if (buf == NULL)
+		return -ENOMEM;
+
+	len += snprintf(buf + len, size - len, "DFS support for "
+			"macVersion = 0x%x, macRev = 0x%x: %s\n",
+			hw_ver->macVersion, hw_ver->macRev,
+			(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_DFS) ?
+					"enabled" : "disabled");
+	ATH9K_DFS_STAT("DFS pulses detected     ", pulses_detected);
+	ATH9K_DFS_STAT("Datalen discards        ", datalen_discards);
+	ATH9K_DFS_STAT("RSSI discards           ", rssi_discards);
+	ATH9K_DFS_STAT("BW info discards        ", bwinfo_discards);
+	ATH9K_DFS_STAT("Primary channel pulses  ", pri_phy_errors);
+	ATH9K_DFS_STAT("Secondary channel pulses", ext_phy_errors);
+	ATH9K_DFS_STAT("Dual channel pulses     ", dc_phy_errors);
+
+	if (len > size)
+		len = size;
+
+	retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+	kfree(buf);
+
+	return retval;
+}
+
+static int ath9k_dfs_debugfs_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+
+	return 0;
+}
+
+static const struct file_operations fops_dfs_stats = {
+	.read = read_file_dfs,
+	.open = ath9k_dfs_debugfs_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+void ath9k_dfs_init_debug(struct ath_softc *sc)
+{
+	debugfs_create_file("dfs_stats", S_IRUSR,
+			    sc->debug.debugfs_phy, sc, &fops_dfs_stats);
+}
diff --git a/drivers/net/wireless/ath/ath9k/dfs_debug.h b/drivers/net/wireless/ath/ath9k/dfs_debug.h
new file mode 100644
index 0000000..4911724
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/dfs_debug.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ * Copyright (c) 2011 Neratec Solutions AG
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+
+#ifndef ATH9K_DFS_DEBUG_H
+#define ATH9K_DFS_DEBUG_H
+
+#include "hw.h"
+
+/**
+ * struct ath_dfs_stats - DFS Statistics
+ *
+ * @pulses_detected:  No. of pulses detected so far
+ * @datalen_discards: No. of pulses discarded due to invalid datalen
+ * @rssi_discards:    No. of pulses discarded due to invalid RSSI
+ * @bwinfo_discards:  No. of pulses discarded due to invalid BW info
+ * @pri_phy_errors:   No. of pulses reported for primary channel
+ * @ext_phy_errors:   No. of pulses reported for extension channel
+ * @dc_phy_errors:    No. of pulses reported for primary + extension channel
+ */
+struct ath_dfs_stats {
+	u32 pulses_detected;
+	u32 datalen_discards;
+	u32 rssi_discards;
+	u32 bwinfo_discards;
+	u32 pri_phy_errors;
+	u32 ext_phy_errors;
+	u32 dc_phy_errors;
+};
+
+#if defined(CONFIG_ATH9K_DFS_DEBUGFS)
+
+#define DFS_STAT_INC(sc, c) (sc->debug.stats.dfs_stats.c++)
+void ath9k_dfs_init_debug(struct ath_softc *sc);
+
+#else
+
+#define DFS_STAT_INC(sc, c) do { } while (0)
+static inline void ath9k_dfs_init_debug(struct ath_softc *sc) { }
+
+#endif /* CONFIG_ATH9K_DFS_DEBUGFS */
+
+#endif /* ATH9K_DFS_DEBUG_H */
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c
index e46f751..c435232 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom.c
@@ -305,8 +305,7 @@
 		regulatory->max_power_level += INCREASE_MAXPOW_BY_THREE_CHAIN;
 		break;
 	default:
-		ath_dbg(common, ATH_DBG_EEPROM,
-			"Invalid chainmask configuration\n");
+		ath_dbg(common, EEPROM, "Invalid chainmask configuration\n");
 		break;
 	}
 }
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h
index 49abd34..5ff7ab9 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/eeprom.h
@@ -249,7 +249,8 @@
 	EEP_ANT_DIV_CTL1,
 	EEP_CHAIN_MASK_REDUCE,
 	EEP_ANTENNA_GAIN_2G,
-	EEP_ANTENNA_GAIN_5G
+	EEP_ANTENNA_GAIN_5G,
+	EEP_QUICK_DROP
 };
 
 enum ar5416_rates {
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
index 9a7520f..4322ac8 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
@@ -38,7 +38,7 @@
 
 	for (addr = 0; addr < SIZE_EEPROM_4K; addr++) {
 		if (!ath9k_hw_nvram_read(common, addr + eep_start_loc, eep_data)) {
-			ath_dbg(common, ATH_DBG_EEPROM,
+			ath_dbg(common, EEPROM,
 				"Unable to read eeprom region\n");
 			return false;
 		}
@@ -62,8 +62,7 @@
 	struct ath_common *common = ath9k_hw_common(ah);
 
 	if (!ath9k_hw_use_flash(ah)) {
-		ath_dbg(common, ATH_DBG_EEPROM,
-			"Reading from EEPROM, not flash\n");
+		ath_dbg(common, EEPROM, "Reading from EEPROM, not flash\n");
 	}
 
 	if (common->bus_ops->ath_bus_type == ATH_USB)
@@ -204,8 +203,7 @@
 			return false;
 		}
 
-		ath_dbg(common, ATH_DBG_EEPROM,
-			"Read Magic = 0x%04X\n", magic);
+		ath_dbg(common, EEPROM, "Read Magic = 0x%04X\n", magic);
 
 		if (magic != AR5416_EEPROM_MAGIC) {
 			magic2 = swab16(magic);
@@ -227,7 +225,7 @@
 		}
 	}
 
-	ath_dbg(common, ATH_DBG_EEPROM, "need_swap = %s.\n",
+	ath_dbg(common, EEPROM, "need_swap = %s\n",
 		need_swap ? "True" : "False");
 
 	if (need_swap)
@@ -249,7 +247,7 @@
 		u32 integer;
 		u16 word;
 
-		ath_dbg(common, ATH_DBG_EEPROM,
+		ath_dbg(common, EEPROM,
 			"EEPROM Endianness is not native.. Changing\n");
 
 		word = swab16(eep->baseEepHeader.length);
@@ -435,11 +433,11 @@
 				reg32 = get_unaligned_le32(&pdadcValues[4 * j]);
 				REG_WRITE(ah, regOffset, reg32);
 
-				ath_dbg(common, ATH_DBG_EEPROM,
+				ath_dbg(common, EEPROM,
 					"PDADC (%d,%4x): %4.4x %8.8x\n",
 					i, regChainOffset, regOffset,
 					reg32);
-				ath_dbg(common, ATH_DBG_EEPROM,
+				ath_dbg(common, EEPROM,
 					"PDADC: Chain %d | "
 					"PDADC %3d Value %3d | "
 					"PDADC %3d Value %3d | "
@@ -473,7 +471,7 @@
 
 	int i;
 	u16 twiceMinEdgePower;
-	u16 twiceMaxEdgePower = MAX_RATE_POWER;
+	u16 twiceMaxEdgePower;
 	u16 scaledPower = 0, minCtlPower;
 	u16 numCtlModes;
 	const u16 *pCtlMode;
@@ -542,9 +540,7 @@
 		else
 			freq = centers.ctl_center;
 
-		if (ah->eep_ops->get_eeprom_ver(ah) == 14 &&
-		    ah->eep_ops->get_eeprom_rev(ah) <= 2)
-			twiceMaxEdgePower = MAX_RATE_POWER;
+		twiceMaxEdgePower = MAX_RATE_POWER;
 
 		for (i = 0; (i < AR5416_EEP4K_NUM_CTLS) &&
 			     pEepData->ctlIndex[i]; i++) {
@@ -1081,8 +1077,7 @@
 
 	u16 spur_val = AR_NO_SPUR;
 
-	ath_dbg(common, ATH_DBG_ANI,
-		"Getting spur idx:%d is2Ghz:%d val:%x\n",
+	ath_dbg(common, ANI, "Getting spur idx:%d is2Ghz:%d val:%x\n",
 		i, is2GHz, ah->config.spurchans[i][is2GHz]);
 
 	switch (ah->config.spurmode) {
@@ -1090,8 +1085,8 @@
 		break;
 	case SPUR_ENABLE_IOCTL:
 		spur_val = ah->config.spurchans[i][is2GHz];
-		ath_dbg(common, ATH_DBG_ANI,
-			"Getting spur val from new loc. %d\n", spur_val);
+		ath_dbg(common, ANI, "Getting spur val from new loc. %d\n",
+			spur_val);
 		break;
 	case SPUR_ENABLE_EEPROM:
 		spur_val = EEP_MAP4K_SPURCHAN;
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index 4f5c50a..f272236 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -41,7 +41,7 @@
 	for (addr = 0; addr < SIZE_EEPROM_AR9287; addr++) {
 		if (!ath9k_hw_nvram_read(common, addr + eep_start_loc,
 					 eep_data)) {
-			ath_dbg(common, ATH_DBG_EEPROM,
+			ath_dbg(common, EEPROM,
 				"Unable to read eeprom region\n");
 			return false;
 		}
@@ -66,8 +66,7 @@
 	struct ath_common *common = ath9k_hw_common(ah);
 
 	if (!ath9k_hw_use_flash(ah)) {
-		ath_dbg(common, ATH_DBG_EEPROM,
-			"Reading from EEPROM, not flash\n");
+		ath_dbg(common, EEPROM, "Reading from EEPROM, not flash\n");
 	}
 
 	if (common->bus_ops->ath_bus_type == ATH_USB)
@@ -197,8 +196,7 @@
 			return false;
 		}
 
-		ath_dbg(common, ATH_DBG_EEPROM,
-			"Read Magic = 0x%04X\n", magic);
+		ath_dbg(common, EEPROM, "Read Magic = 0x%04X\n", magic);
 
 		if (magic != AR5416_EEPROM_MAGIC) {
 			magic2 = swab16(magic);
@@ -220,7 +218,7 @@
 		}
 	}
 
-	ath_dbg(common, ATH_DBG_EEPROM, "need_swap = %s.\n",
+	ath_dbg(common, EEPROM, "need_swap = %s\n",
 		need_swap ? "True" : "False");
 
 	if (need_swap)
@@ -569,7 +567,7 @@
 #define REDUCE_SCALED_POWER_BY_TWO_CHAIN     6
 #define REDUCE_SCALED_POWER_BY_THREE_CHAIN   10
 
-	u16 twiceMaxEdgePower = MAX_RATE_POWER;
+	u16 twiceMaxEdgePower;
 	int i;
 	struct cal_ctl_data_ar9287 *rep;
 	struct cal_target_power_leg targetPowerOfdm = {0, {0, 0, 0, 0} },
@@ -669,6 +667,7 @@
 		else
 			freq = centers.ctl_center;
 
+		twiceMaxEdgePower = MAX_RATE_POWER;
 		/* Walk through the CTL indices stored in EEPROM */
 		for (i = 0; (i < AR9287_NUM_CTLS) && pEepData->ctlIndex[i]; i++) {
 			struct cal_ctl_edges *pRdEdgesPower;
@@ -1040,8 +1039,7 @@
 	struct ath_common *common = ath9k_hw_common(ah);
 	u16 spur_val = AR_NO_SPUR;
 
-	ath_dbg(common, ATH_DBG_ANI,
-		"Getting spur idx:%d is2Ghz:%d val:%x\n",
+	ath_dbg(common, ANI, "Getting spur idx:%d is2Ghz:%d val:%x\n",
 		i, is2GHz, ah->config.spurchans[i][is2GHz]);
 
 	switch (ah->config.spurmode) {
@@ -1049,8 +1047,8 @@
 		break;
 	case SPUR_ENABLE_IOCTL:
 		spur_val = ah->config.spurchans[i][is2GHz];
-		ath_dbg(common, ATH_DBG_ANI,
-			"Getting spur val from new loc. %d\n", spur_val);
+		ath_dbg(common, ANI, "Getting spur val from new loc. %d\n",
+			spur_val);
 		break;
 	case SPUR_ENABLE_EEPROM:
 		spur_val = EEP_MAP9287_SPURCHAN;
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index 81e6296..619b95d 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -121,8 +121,7 @@
 	struct ath_common *common = ath9k_hw_common(ah);
 
 	if (!ath9k_hw_use_flash(ah)) {
-		ath_dbg(common, ATH_DBG_EEPROM,
-			"Reading from EEPROM, not flash\n");
+		ath_dbg(common, EEPROM, "Reading from EEPROM, not flash\n");
 	}
 
 	if (common->bus_ops->ath_bus_type == ATH_USB)
@@ -279,8 +278,7 @@
 	}
 
 	if (!ath9k_hw_use_flash(ah)) {
-		ath_dbg(common, ATH_DBG_EEPROM,
-			"Read Magic = 0x%04X\n", magic);
+		ath_dbg(common, EEPROM, "Read Magic = 0x%04X\n", magic);
 
 		if (magic != AR5416_EEPROM_MAGIC) {
 			magic2 = swab16(magic);
@@ -303,7 +301,7 @@
 		}
 	}
 
-	ath_dbg(common, ATH_DBG_EEPROM, "need_swap = %s.\n",
+	ath_dbg(common, EEPROM, "need_swap = %s\n",
 		need_swap ? "True" : "False");
 
 	if (need_swap)
@@ -325,7 +323,7 @@
 		u32 integer, j;
 		u16 word;
 
-		ath_dbg(common, ATH_DBG_EEPROM,
+		ath_dbg(common, EEPROM,
 			"EEPROM Endianness is not native.. Changing.\n");
 
 		word = swab16(eep->baseEepHeader.length);
@@ -385,7 +383,7 @@
 	if ((ah->hw_version.devid == AR9280_DEVID_PCI) &&
 	    ((eep->baseEepHeader.version & 0xff) > 0x0a) &&
 	    (eep->baseEepHeader.pwdclkind == 0))
-		ah->need_an_top2_fixup = 1;
+		ah->need_an_top2_fixup = true;
 
 	if ((common->bus_ops->ath_bus_type == ATH_USB) &&
 	    (AR_SREV_9280(ah)))
@@ -965,15 +963,12 @@
 				reg32 = get_unaligned_le32(&pdadcValues[4 * j]);
 				REG_WRITE(ah, regOffset, reg32);
 
-				ath_dbg(common, ATH_DBG_EEPROM,
+				ath_dbg(common, EEPROM,
 					"PDADC (%d,%4x): %4.4x %8.8x\n",
 					i, regChainOffset, regOffset,
 					reg32);
-				ath_dbg(common, ATH_DBG_EEPROM,
-					"PDADC: Chain %d | PDADC %3d "
-					"Value %3d | PDADC %3d Value %3d | "
-					"PDADC %3d Value %3d | PDADC %3d "
-					"Value %3d |\n",
+				ath_dbg(common, EEPROM,
+					"PDADC: Chain %d | PDADC %3d Value %3d | PDADC %3d Value %3d | PDADC %3d Value %3d | PDADC %3d Value %3d |\n",
 					i, 4 * j, pdadcValues[4 * j],
 					4 * j + 1, pdadcValues[4 * j + 1],
 					4 * j + 2, pdadcValues[4 * j + 2],
@@ -1000,7 +995,7 @@
 #define REDUCE_SCALED_POWER_BY_THREE_CHAIN   9 /* 10*log10(3)*2 */
 
 	struct ar5416_eeprom_def *pEepData = &ah->eeprom.def;
-	u16 twiceMaxEdgePower = MAX_RATE_POWER;
+	u16 twiceMaxEdgePower;
 	int i;
 	struct cal_ctl_data *rep;
 	struct cal_target_power_leg targetPowerOfdm, targetPowerCck = {
@@ -1121,9 +1116,7 @@
 		else
 			freq = centers.ctl_center;
 
-		if (ah->eep_ops->get_eeprom_ver(ah) == 14 &&
-		    ah->eep_ops->get_eeprom_rev(ah) <= 2)
-			twiceMaxEdgePower = MAX_RATE_POWER;
+		twiceMaxEdgePower = MAX_RATE_POWER;
 
 		for (i = 0; (i < AR5416_NUM_CTLS) && pEepData->ctlIndex[i]; i++) {
 			if ((((cfgCtl & ~CTL_MODE_M) |
@@ -1280,7 +1273,7 @@
 		regulatory->max_power_level += INCREASE_MAXPOW_BY_THREE_CHAIN;
 		break;
 	default:
-		ath_dbg(ath9k_hw_common(ah), ATH_DBG_EEPROM,
+		ath_dbg(ath9k_hw_common(ah), EEPROM,
 			"Invalid chainmask configuration\n");
 		break;
 	}
@@ -1398,8 +1391,7 @@
 
 	u16 spur_val = AR_NO_SPUR;
 
-	ath_dbg(common, ATH_DBG_ANI,
-		"Getting spur idx:%d is2Ghz:%d val:%x\n",
+	ath_dbg(common, ANI, "Getting spur idx:%d is2Ghz:%d val:%x\n",
 		i, is2GHz, ah->config.spurchans[i][is2GHz]);
 
 	switch (ah->config.spurmode) {
@@ -1407,8 +1399,8 @@
 		break;
 	case SPUR_ENABLE_IOCTL:
 		spur_val = ah->config.spurchans[i][is2GHz];
-		ath_dbg(common, ATH_DBG_ANI,
-			"Getting spur val from new loc. %d\n", spur_val);
+		ath_dbg(common, ANI, "Getting spur val from new loc. %d\n",
+			spur_val);
 		break;
 	case SPUR_ENABLE_EEPROM:
 		spur_val = EEP_DEF_SPURCHAN;
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index 655576c..597c84e 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -130,12 +130,12 @@
 		sc->sc_flags &= ~(SC_OP_BT_PRIORITY_DETECTED | SC_OP_BT_SCAN);
 		/* Detect if colocated bt started scanning */
 		if (btcoex->bt_priority_cnt >= ATH_BT_CNT_SCAN_THRESHOLD) {
-			ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_BTCOEX,
+			ath_dbg(ath9k_hw_common(sc->sc_ah), BTCOEX,
 				"BT scan detected\n");
 			sc->sc_flags |= (SC_OP_BT_SCAN |
 					 SC_OP_BT_PRIORITY_DETECTED);
 		} else if (btcoex->bt_priority_cnt >= ATH_BT_CNT_THRESHOLD) {
-			ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_BTCOEX,
+			ath_dbg(ath9k_hw_common(sc->sc_ah), BTCOEX,
 				"BT priority traffic detected\n");
 			sc->sc_flags |= SC_OP_BT_PRIORITY_DETECTED;
 		}
@@ -189,8 +189,8 @@
 	bool is_btscan;
 
 	ath9k_ps_wakeup(sc);
-	ath_detect_bt_priority(sc);
-
+	if (!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI))
+		ath_detect_bt_priority(sc);
 	is_btscan = sc->sc_flags & SC_OP_BT_SCAN;
 
 	spin_lock_bh(&btcoex->btcoex_lock);
@@ -198,6 +198,7 @@
 	ath9k_hw_btcoex_bt_stomp(ah, is_btscan ? ATH_BTCOEX_STOMP_ALL :
 			      btcoex->bt_stomp_type);
 
+	ath9k_hw_btcoex_enable(ah);
 	spin_unlock_bh(&btcoex->btcoex_lock);
 
 	if (btcoex->btcoex_period != btcoex->btcoex_no_stomp) {
@@ -212,8 +213,9 @@
 	}
 
 	ath9k_ps_restore(sc);
+	timer_period = btcoex->btcoex_period / 1000;
 	mod_timer(&btcoex->period_timer, jiffies +
-				  msecs_to_jiffies(ATH_BTCOEX_DEF_BT_PERIOD));
+				  msecs_to_jiffies(timer_period));
 }
 
 /*
@@ -228,8 +230,7 @@
 	struct ath_common *common = ath9k_hw_common(ah);
 	bool is_btscan = sc->sc_flags & SC_OP_BT_SCAN;
 
-	ath_dbg(common, ATH_DBG_BTCOEX,
-		"no stomp timer running\n");
+	ath_dbg(common, BTCOEX, "no stomp timer running\n");
 
 	ath9k_ps_wakeup(sc);
 	spin_lock_bh(&btcoex->btcoex_lock);
@@ -239,6 +240,7 @@
 	 else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL)
 		ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_LOW);
 
+	ath9k_hw_btcoex_enable(ah);
 	spin_unlock_bh(&btcoex->btcoex_lock);
 	ath9k_ps_restore(sc);
 }
@@ -247,6 +249,9 @@
 {
 	struct ath_btcoex *btcoex = &sc->btcoex;
 
+	if (ath9k_hw_get_btcoex_scheme(sc->sc_ah) == ATH_BTCOEX_CFG_NONE)
+		return 0;
+
 	btcoex->btcoex_period = ATH_BTCOEX_DEF_BT_PERIOD * 1000;
 	btcoex->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) *
 		btcoex->btcoex_period / 100;
@@ -277,8 +282,10 @@
 	struct ath_btcoex *btcoex = &sc->btcoex;
 	struct ath_hw *ah = sc->sc_ah;
 
-	ath_dbg(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
-		"Starting btcoex timers\n");
+	ath_dbg(ath9k_hw_common(ah), BTCOEX, "Starting btcoex timers\n");
+
+	if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE)
+		return;
 
 	/* make sure duty cycle timer is also stopped when resuming */
 	if (btcoex->hw_timer_enabled)
@@ -300,6 +307,9 @@
 	struct ath_btcoex *btcoex = &sc->btcoex;
 	struct ath_hw *ah = sc->sc_ah;
 
+	if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE)
+		return;
+
 	del_timer_sync(&btcoex->period_timer);
 
 	if (btcoex->hw_timer_enabled)
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
index 57fe22b..2eadffb 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
@@ -167,9 +167,9 @@
 	/* TSF out of range threshold fixed at 1 second */
 	bs.bs_tsfoor_threshold = ATH9K_TSFOOR_THRESHOLD;
 
-	ath_dbg(common, ATH_DBG_CONFIG, "intval: %u tsf: %llu tsftu: %u\n",
+	ath_dbg(common, CONFIG, "intval: %u tsf: %llu tsftu: %u\n",
 		intval, tsf, tsftu);
-	ath_dbg(common, ATH_DBG_CONFIG,
+	ath_dbg(common, CONFIG,
 		"bmiss: %u sleep: %u cfp-period: %u maxdur: %u next: %u\n",
 		bs.bs_bmissthreshold, bs.bs_sleepduration,
 		bs.bs_cfpperiod, bs.bs_cfpmaxduration, bs.bs_cfpnext);
@@ -224,9 +224,8 @@
 	if (priv->op_flags & OP_ENABLE_BEACON)
 		imask |= ATH9K_INT_SWBA;
 
-	ath_dbg(common, ATH_DBG_CONFIG,
-		"AP Beacon config, intval: %d, nexttbtt: %u, resp_time: %d "
-		"imask: 0x%x\n",
+	ath_dbg(common, CONFIG,
+		"AP Beacon config, intval: %d, nexttbtt: %u, resp_time: %d imask: 0x%x\n",
 		bss_conf->beacon_interval, nexttbtt,
 		priv->ah->config.sw_beacon_response_time, imask);
 
@@ -273,9 +272,8 @@
 	if (priv->op_flags & OP_ENABLE_BEACON)
 		imask |= ATH9K_INT_SWBA;
 
-	ath_dbg(common, ATH_DBG_CONFIG,
-		"IBSS Beacon config, intval: %d, nexttbtt: %u, "
-		"resp_time: %d, imask: 0x%x\n",
+	ath_dbg(common, CONFIG,
+		"IBSS Beacon config, intval: %d, nexttbtt: %u, resp_time: %d, imask: 0x%x\n",
 		bss_conf->beacon_interval, nexttbtt,
 		priv->ah->config.sw_beacon_response_time, imask);
 
@@ -323,7 +321,7 @@
 
 		tx_slot = ath9k_htc_tx_get_slot(priv);
 		if (tx_slot < 0) {
-			ath_dbg(common, ATH_DBG_XMIT, "No free CAB slot\n");
+			ath_dbg(common, XMIT, "No free CAB slot\n");
 			dev_kfree_skb_any(skb);
 			goto next;
 		}
@@ -333,8 +331,7 @@
 			ath9k_htc_tx_clear_slot(priv, tx_slot);
 			dev_kfree_skb_any(skb);
 
-			ath_dbg(common, ATH_DBG_XMIT,
-				"Failed to send CAB frame\n");
+			ath_dbg(common, XMIT, "Failed to send CAB frame\n");
 		} else {
 			spin_lock_bh(&priv->tx.tx_lock);
 			priv->tx.queued_cnt++;
@@ -409,7 +406,7 @@
 	ret = htc_send(priv->htc, beacon);
 	if (ret != 0) {
 		if (ret == -ENOMEM) {
-			ath_dbg(common, ATH_DBG_BSTUCK,
+			ath_dbg(common, BSTUCK,
 				"Failed to send beacon, no free TX buffer\n");
 		}
 		dev_kfree_skb_any(beacon);
@@ -434,7 +431,7 @@
 	slot = ((tsftu % intval) * ATH9K_HTC_MAX_BCN_VIF) / intval;
 	slot = ATH9K_HTC_MAX_BCN_VIF - slot - 1;
 
-	ath_dbg(common, ATH_DBG_BEACON,
+	ath_dbg(common, BEACON,
 		"Choose slot: %d, tsf: %llu, tsftu: %u, intval: %u\n",
 		slot, tsf, tsftu, intval);
 
@@ -450,8 +447,7 @@
 	if (swba->beacon_pending != 0) {
 		priv->cur_beacon_conf.bmiss_cnt++;
 		if (priv->cur_beacon_conf.bmiss_cnt > BSTUCK_THRESHOLD) {
-			ath_dbg(common, ATH_DBG_BSTUCK,
-				"Beacon stuck, HW reset\n");
+			ath_dbg(common, BSTUCK, "Beacon stuck, HW reset\n");
 			ieee80211_queue_work(priv->hw,
 					     &priv->fatal_work);
 		}
@@ -459,7 +455,7 @@
 	}
 
 	if (priv->cur_beacon_conf.bmiss_cnt) {
-		ath_dbg(common, ATH_DBG_BSTUCK,
+		ath_dbg(common, BSTUCK,
 			"Resuming beacon xmit after %u misses\n",
 			priv->cur_beacon_conf.bmiss_cnt);
 		priv->cur_beacon_conf.bmiss_cnt = 0;
@@ -495,8 +491,8 @@
 	priv->cur_beacon_conf.bslot[avp->bslot] = vif;
 	spin_unlock_bh(&priv->beacon_lock);
 
-	ath_dbg(common, ATH_DBG_CONFIG,
-		"Added interface at beacon slot: %d\n", avp->bslot);
+	ath_dbg(common, CONFIG, "Added interface at beacon slot: %d\n",
+		avp->bslot);
 }
 
 void ath9k_htc_remove_bslot(struct ath9k_htc_priv *priv,
@@ -509,8 +505,8 @@
 	priv->cur_beacon_conf.bslot[avp->bslot] = NULL;
 	spin_unlock_bh(&priv->beacon_lock);
 
-	ath_dbg(common, ATH_DBG_CONFIG,
-		"Removed interface at beacon slot: %d\n", avp->bslot);
+	ath_dbg(common, CONFIG, "Removed interface at beacon slot: %d\n",
+		avp->bslot);
 }
 
 /*
@@ -536,8 +532,7 @@
 	tsfadjust = cur_conf->beacon_interval * avp->bslot / ATH9K_HTC_MAX_BCN_VIF;
 	avp->tsfadjust = cpu_to_le64(TU_TO_USEC(tsfadjust));
 
-	ath_dbg(common, ATH_DBG_CONFIG,
-		"tsfadjust is: %llu for bslot: %d\n",
+	ath_dbg(common, CONFIG, "tsfadjust is: %llu for bslot: %d\n",
 		(unsigned long long)tsfadjust, avp->bslot);
 }
 
@@ -568,7 +563,7 @@
 	    (priv->num_ap_vif > 1) &&
 	    (vif->type == NL80211_IFTYPE_AP) &&
 	    (cur_conf->beacon_interval != bss_conf->beacon_int)) {
-		ath_dbg(common, ATH_DBG_CONFIG,
+		ath_dbg(common, CONFIG,
 			"Changing beacon interval of multiple AP interfaces !\n");
 		return false;
 	}
@@ -579,7 +574,7 @@
 	 */
 	if (priv->num_ap_vif &&
 	    (vif->type != NL80211_IFTYPE_AP)) {
-		ath_dbg(common, ATH_DBG_CONFIG,
+		ath_dbg(common, CONFIG,
 			"HW in AP mode, cannot set STA beacon parameters\n");
 		return false;
 	}
@@ -597,7 +592,7 @@
 							   &beacon_configured);
 
 		if (beacon_configured) {
-			ath_dbg(common, ATH_DBG_CONFIG,
+			ath_dbg(common, CONFIG,
 				"Beacon already configured for a station interface\n");
 			return false;
 		}
@@ -637,8 +632,7 @@
 		ath9k_htc_beacon_config_ap(priv, cur_conf);
 		break;
 	default:
-		ath_dbg(common, ATH_DBG_CONFIG,
-			"Unsupported beaconing mode\n");
+		ath_dbg(common, CONFIG, "Unsupported beaconing mode\n");
 		return;
 	}
 }
@@ -659,8 +653,7 @@
 		ath9k_htc_beacon_config_ap(priv, cur_conf);
 		break;
 	default:
-		ath_dbg(common, ATH_DBG_CONFIG,
-			"Unsupported beaconing mode\n");
+		ath_dbg(common, CONFIG, "Unsupported beaconing mode\n");
 		return;
 	}
 }
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
index e3a02eb..6506e1f 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
@@ -36,12 +36,12 @@
 		priv->op_flags &= ~(OP_BT_PRIORITY_DETECTED | OP_BT_SCAN);
 		/* Detect if colocated bt started scanning */
 		if (btcoex->bt_priority_cnt >= ATH_BT_CNT_SCAN_THRESHOLD) {
-			ath_dbg(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
+			ath_dbg(ath9k_hw_common(ah), BTCOEX,
 				"BT scan detected\n");
 			priv->op_flags |= (OP_BT_SCAN |
 					 OP_BT_PRIORITY_DETECTED);
 		} else if (btcoex->bt_priority_cnt >= ATH_BT_CNT_THRESHOLD) {
-			ath_dbg(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
+			ath_dbg(ath9k_hw_common(ah), BTCOEX,
 				"BT priority traffic detected\n");
 			priv->op_flags |= OP_BT_PRIORITY_DETECTED;
 		}
@@ -80,6 +80,7 @@
 	ath9k_hw_btcoex_bt_stomp(priv->ah, is_btscan ? ATH_BTCOEX_STOMP_ALL :
 			btcoex->bt_stomp_type);
 
+	ath9k_hw_btcoex_enable(priv->ah);
 	timer_period = is_btscan ? btcoex->btscan_no_stomp :
 		btcoex->btcoex_no_stomp;
 	ieee80211_queue_delayed_work(priv->hw, &priv->duty_cycle_work,
@@ -101,19 +102,22 @@
 	struct ath_common *common = ath9k_hw_common(ah);
 	bool is_btscan = priv->op_flags & OP_BT_SCAN;
 
-	ath_dbg(common, ATH_DBG_BTCOEX,
-		"time slice work for bt and wlan\n");
+	ath_dbg(common, BTCOEX, "time slice work for bt and wlan\n");
 
 	if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_LOW || is_btscan)
 		ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE);
 	else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL)
 		ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_LOW);
+	ath9k_hw_btcoex_enable(priv->ah);
 }
 
 void ath_htc_init_btcoex_work(struct ath9k_htc_priv *priv)
 {
 	struct ath_btcoex *btcoex = &priv->btcoex;
 
+	if (ath9k_hw_get_btcoex_scheme(priv->ah) == ATH_BTCOEX_CFG_NONE)
+		return;
+
 	btcoex->btcoex_period = ATH_BTCOEX_DEF_BT_PERIOD;
 	btcoex->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) *
 		btcoex->btcoex_period / 100;
@@ -132,7 +136,10 @@
 	struct ath_btcoex *btcoex = &priv->btcoex;
 	struct ath_hw *ah = priv->ah;
 
-	ath_dbg(ath9k_hw_common(ah), ATH_DBG_BTCOEX, "Starting btcoex work\n");
+	if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE)
+		return;
+
+	ath_dbg(ath9k_hw_common(ah), BTCOEX, "Starting btcoex work\n");
 
 	btcoex->bt_priority_cnt = 0;
 	btcoex->bt_priority_time = jiffies;
@@ -146,6 +153,9 @@
  */
 void ath_htc_cancel_btcoex_work(struct ath9k_htc_priv *priv)
 {
+	if (ath9k_hw_get_btcoex_scheme(priv->ah) == ATH_BTCOEX_CFG_NONE)
+		return;
+
 	cancel_delayed_work_sync(&priv->coex_period_work);
 	cancel_delayed_work_sync(&priv->duty_cycle_work);
 }
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index 966661c..9be10a2 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -299,8 +299,7 @@
 			  (u8 *) &val, sizeof(val),
 			  100);
 	if (unlikely(r)) {
-		ath_dbg(common, ATH_DBG_WMI,
-			"REGISTER READ FAILED: (0x%04x, %d)\n",
+		ath_dbg(common, WMI, "REGISTER READ FAILED: (0x%04x, %d)\n",
 			reg_offset, r);
 		return -EIO;
 	}
@@ -327,7 +326,7 @@
 			   (u8 *)tmpval, sizeof(u32) * count,
 			   100);
 	if (unlikely(ret)) {
-		ath_dbg(common, ATH_DBG_WMI,
+		ath_dbg(common, WMI,
 			"Multiple REGISTER READ FAILED (count: %d)\n", count);
 	}
 
@@ -352,8 +351,7 @@
 			  (u8 *) &val, sizeof(val),
 			  100);
 	if (unlikely(r)) {
-		ath_dbg(common, ATH_DBG_WMI,
-			"REGISTER WRITE FAILED:(0x%04x, %d)\n",
+		ath_dbg(common, WMI, "REGISTER WRITE FAILED:(0x%04x, %d)\n",
 			reg_offset, r);
 	}
 }
@@ -384,7 +382,7 @@
 			  (u8 *) &rsp_status, sizeof(rsp_status),
 			  100);
 		if (unlikely(r)) {
-			ath_dbg(common, ATH_DBG_WMI,
+			ath_dbg(common, WMI,
 				"REGISTER WRITE FAILED, multi len: %d\n",
 				priv->wmi->multi_write_idx);
 		}
@@ -434,7 +432,7 @@
 			  (u8 *) &rsp_status, sizeof(rsp_status),
 			  100);
 		if (unlikely(r)) {
-			ath_dbg(common, ATH_DBG_WMI,
+			ath_dbg(common, WMI,
 				"REGISTER WRITE FAILED, multi len: %d\n",
 				priv->wmi->multi_write_idx);
 		}
@@ -512,8 +510,7 @@
 	tx_streams = ath9k_cmn_count_streams(priv->ah->txchainmask, 2);
 	rx_streams = ath9k_cmn_count_streams(priv->ah->rxchainmask, 2);
 
-	ath_dbg(common, ATH_DBG_CONFIG,
-		"TX streams %d, RX streams: %d\n",
+	ath_dbg(common, CONFIG, "TX streams %d, RX streams: %d\n",
 		tx_streams, rx_streams);
 
 	if (tx_streams != rx_streams) {
@@ -610,7 +607,7 @@
 {
 	int qnum;
 
-	switch (priv->ah->btcoex_hw.scheme) {
+	switch (ath9k_hw_get_btcoex_scheme(priv->ah)) {
 	case ATH_BTCOEX_CFG_NONE:
 		break;
 	case ATH_BTCOEX_CFG_3WIRE:
@@ -704,7 +701,8 @@
 
 	if (product && strncmp(product, ATH_HTC_BTCOEX_PRODUCT_ID, 5) == 0) {
 		ah->btcoex_hw.scheme = ATH_BTCOEX_CFG_3WIRE;
-		ath9k_init_btcoex(priv);
+		if (ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE)
+			ath9k_init_btcoex(priv);
 	}
 
 	return 0;
@@ -876,9 +874,8 @@
 		goto err_world;
 	}
 
-	ath_dbg(common, ATH_DBG_CONFIG,
-		"WMI:%d, BCN:%d, CAB:%d, UAPSD:%d, MGMT:%d, "
-		"BE:%d, BK:%d, VI:%d, VO:%d\n",
+	ath_dbg(common, CONFIG,
+		"WMI:%d, BCN:%d, CAB:%d, UAPSD:%d, MGMT:%d, BE:%d, BK:%d, VI:%d, VO:%d\n",
 		priv->wmi_cmd_ep,
 		priv->beacon_ep,
 		priv->cab_ep,
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 0b9a0e8..ef4c606 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -266,7 +266,7 @@
 
 	ath9k_wmi_event_drain(priv);
 
-	ath_dbg(common, ATH_DBG_CONFIG,
+	ath_dbg(common, CONFIG,
 		"(%u MHz) -> (%u MHz), HT: %d, HT40: %d fastcc: %d\n",
 		priv->ah->curchan->channel,
 		channel->center_freq, conf_is_ht(conf), conf_is_ht40(conf),
@@ -415,7 +415,7 @@
 	priv->vif_sta_pos[priv->mon_vif_idx] = sta_idx;
 	priv->ah->is_monitoring = true;
 
-	ath_dbg(common, ATH_DBG_CONFIG,
+	ath_dbg(common, CONFIG,
 		"Attached a monitor interface at idx: %d, sta idx: %d\n",
 		priv->mon_vif_idx, sta_idx);
 
@@ -427,7 +427,7 @@
 	 */
 	__ath9k_htc_remove_monitor_interface(priv);
 err_vif:
-	ath_dbg(common, ATH_DBG_FATAL, "Unable to attach a monitor interface\n");
+	ath_dbg(common, FATAL, "Unable to attach a monitor interface\n");
 
 	return ret;
 }
@@ -452,7 +452,7 @@
 	priv->nstations--;
 	priv->ah->is_monitoring = false;
 
-	ath_dbg(common, ATH_DBG_CONFIG,
+	ath_dbg(common, CONFIG,
 		"Removed a monitor interface at idx: %d, sta idx: %d\n",
 		priv->mon_vif_idx, sta_idx);
 
@@ -512,11 +512,11 @@
 	}
 
 	if (sta) {
-		ath_dbg(common, ATH_DBG_CONFIG,
+		ath_dbg(common, CONFIG,
 			"Added a station entry for: %pM (idx: %d)\n",
 			sta->addr, tsta.sta_index);
 	} else {
-		ath_dbg(common, ATH_DBG_CONFIG,
+		ath_dbg(common, CONFIG,
 			"Added a station entry for VIF %d (idx: %d)\n",
 			avp->index, tsta.sta_index);
 	}
@@ -556,11 +556,11 @@
 	}
 
 	if (sta) {
-		ath_dbg(common, ATH_DBG_CONFIG,
+		ath_dbg(common, CONFIG,
 			"Removed a station entry for: %pM (idx: %d)\n",
 			sta->addr, sta_idx);
 	} else {
-		ath_dbg(common, ATH_DBG_CONFIG,
+		ath_dbg(common, CONFIG,
 			"Removed a station entry for VIF %d (idx: %d)\n",
 			avp->index, sta_idx);
 	}
@@ -665,7 +665,7 @@
 	ath9k_htc_setup_rate(priv, sta, &trate);
 	ret = ath9k_htc_send_rate_cmd(priv, &trate);
 	if (!ret)
-		ath_dbg(common, ATH_DBG_CONFIG,
+		ath_dbg(common, CONFIG,
 			"Updated target sta: %pM, rate caps: 0x%X\n",
 			sta->addr, be32_to_cpu(trate.capflags));
 }
@@ -692,7 +692,7 @@
 
 	ret = ath9k_htc_send_rate_cmd(priv, &trate);
 	if (!ret)
-		ath_dbg(common, ATH_DBG_CONFIG,
+		ath_dbg(common, CONFIG,
 			"Updated target sta: %pM, rate caps: 0x%X\n",
 			bss_conf->bssid, be32_to_cpu(trate.capflags));
 }
@@ -721,11 +721,11 @@
 
 	WMI_CMD_BUF(WMI_TX_AGGR_ENABLE_CMDID, &aggr);
 	if (ret)
-		ath_dbg(common, ATH_DBG_CONFIG,
+		ath_dbg(common, CONFIG,
 			"Unable to %s TX aggregation for (%pM, %d)\n",
 			(aggr.aggr_enable) ? "start" : "stop", sta->addr, tid);
 	else
-		ath_dbg(common, ATH_DBG_CONFIG,
+		ath_dbg(common, CONFIG,
 			"%s TX aggregation for (%pM, %d)\n",
 			(aggr.aggr_enable) ? "Starting" : "Stopping",
 			sta->addr, tid);
@@ -784,7 +784,7 @@
 	/* Long calibration runs independently of short calibration. */
 	if ((timestamp - common->ani.longcal_timer) >= ATH_LONG_CALINTERVAL) {
 		longcal = true;
-		ath_dbg(common, ATH_DBG_ANI, "longcal @%lu\n", jiffies);
+		ath_dbg(common, ANI, "longcal @%lu\n", jiffies);
 		common->ani.longcal_timer = timestamp;
 	}
 
@@ -793,8 +793,7 @@
 		if ((timestamp - common->ani.shortcal_timer) >=
 		    short_cal_interval) {
 			shortcal = true;
-			ath_dbg(common, ATH_DBG_ANI,
-				"shortcal @%lu\n", jiffies);
+			ath_dbg(common, ANI, "shortcal @%lu\n", jiffies);
 			common->ani.shortcal_timer = timestamp;
 			common->ani.resetcal_timer = timestamp;
 		}
@@ -808,7 +807,8 @@
 	}
 
 	/* Verify whether we must check ANI */
-	if ((timestamp - common->ani.checkani_timer) >= ATH_ANI_POLLINTERVAL) {
+	if (ah->config.enable_ani &&
+	    (timestamp - common->ani.checkani_timer) >= ATH_ANI_POLLINTERVAL) {
 		aniflag = true;
 		common->ani.checkani_timer = timestamp;
 	}
@@ -838,7 +838,7 @@
 	* short calibration and long calibration.
 	*/
 	cal_interval = ATH_LONG_CALINTERVAL;
-	if (priv->ah->config.enable_ani)
+	if (ah->config.enable_ani)
 		cal_interval = min(cal_interval, (u32)ATH_ANI_POLLINTERVAL);
 	if (!common->ani.caldone)
 		cal_interval = min(cal_interval, (u32)short_cal_interval);
@@ -865,7 +865,7 @@
 	padsize = padpos & 3;
 	if (padsize && skb->len > padpos) {
 		if (skb_headroom(skb) < padsize) {
-			ath_dbg(common, ATH_DBG_XMIT, "No room for padding\n");
+			ath_dbg(common, XMIT, "No room for padding\n");
 			goto fail_tx;
 		}
 		skb_push(skb, padsize);
@@ -874,13 +874,13 @@
 
 	slot = ath9k_htc_tx_get_slot(priv);
 	if (slot < 0) {
-		ath_dbg(common, ATH_DBG_XMIT, "No free TX slot\n");
+		ath_dbg(common, XMIT, "No free TX slot\n");
 		goto fail_tx;
 	}
 
 	ret = ath9k_htc_tx_start(priv, skb, slot, false);
 	if (ret != 0) {
-		ath_dbg(common, ATH_DBG_XMIT, "Tx failed\n");
+		ath_dbg(common, XMIT, "Tx failed\n");
 		goto clear_slot;
 	}
 
@@ -908,7 +908,7 @@
 
 	mutex_lock(&priv->mutex);
 
-	ath_dbg(common, ATH_DBG_CONFIG,
+	ath_dbg(common, CONFIG,
 		"Starting driver with initial channel: %d MHz\n",
 		curchan->center_freq);
 
@@ -942,7 +942,7 @@
 
 	ret = ath9k_htc_update_cap_target(priv, 0);
 	if (ret)
-		ath_dbg(common, ATH_DBG_CONFIG,
+		ath_dbg(common, CONFIG,
 			"Failed to update capability in target\n");
 
 	priv->op_flags &= ~OP_INVALID;
@@ -957,7 +957,7 @@
 	mod_timer(&priv->tx.cleanup_timer,
 		  jiffies + msecs_to_jiffies(ATH9K_HTC_TX_CLEANUP_INTERVAL));
 
-	if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE) {
+	if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE) {
 		ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
 					   AR_STOMP_LOW_WLAN_WGHT);
 		ath9k_hw_btcoex_enable(ah);
@@ -979,7 +979,7 @@
 	mutex_lock(&priv->mutex);
 
 	if (priv->op_flags & OP_INVALID) {
-		ath_dbg(common, ATH_DBG_ANY, "Device not present\n");
+		ath_dbg(common, ANY, "Device not present\n");
 		mutex_unlock(&priv->mutex);
 		return;
 	}
@@ -1009,7 +1009,8 @@
 
 	mutex_lock(&priv->mutex);
 
-	if (ah->btcoex_hw.enabled) {
+	if (ah->btcoex_hw.enabled &&
+	    ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) {
 		ath9k_hw_btcoex_disable(ah);
 		if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
 			ath_htc_cancel_btcoex_work(priv);
@@ -1026,7 +1027,7 @@
 
 	priv->op_flags |= OP_INVALID;
 
-	ath_dbg(common, ATH_DBG_CONFIG, "Driver halt\n");
+	ath_dbg(common, CONFIG, "Driver halt\n");
 	mutex_unlock(&priv->mutex);
 }
 
@@ -1119,8 +1120,8 @@
 		ath9k_htc_start_ani(priv);
 	}
 
-	ath_dbg(common, ATH_DBG_CONFIG,
-		"Attach a VIF of type: %d at idx: %d\n", vif->type, avp->index);
+	ath_dbg(common, CONFIG, "Attach a VIF of type: %d at idx: %d\n",
+		vif->type, avp->index);
 
 out:
 	ath9k_htc_ps_restore(priv);
@@ -1176,7 +1177,7 @@
 			ath9k_htc_stop_ani(priv);
 	}
 
-	ath_dbg(common, ATH_DBG_CONFIG, "Detach Interface at idx: %d\n", avp->index);
+	ath_dbg(common, CONFIG, "Detach Interface at idx: %d\n", avp->index);
 
 	ath9k_htc_ps_restore(priv);
 	mutex_unlock(&priv->mutex);
@@ -1201,8 +1202,7 @@
 		mutex_unlock(&priv->htc_pm_lock);
 
 		if (enable_radio) {
-			ath_dbg(common, ATH_DBG_CONFIG,
-				"not-idle: enabling radio\n");
+			ath_dbg(common, CONFIG, "not-idle: enabling radio\n");
 			ath9k_htc_setpower(priv, ATH9K_PM_AWAKE);
 			ath9k_htc_radio_enable(hw);
 		}
@@ -1224,7 +1224,7 @@
 		struct ieee80211_channel *curchan = hw->conf.channel;
 		int pos = curchan->hw_value;
 
-		ath_dbg(common, ATH_DBG_CONFIG, "Set channel: %d MHz\n",
+		ath_dbg(common, CONFIG, "Set channel: %d MHz\n",
 			curchan->center_freq);
 
 		ath9k_cmn_update_ichannel(&priv->ah->channels[pos],
@@ -1264,8 +1264,7 @@
 		}
 		mutex_unlock(&priv->htc_pm_lock);
 
-		ath_dbg(common, ATH_DBG_CONFIG,
-			"idle: disabling radio\n");
+		ath_dbg(common, CONFIG, "idle: disabling radio\n");
 		ath9k_htc_radio_disable(hw);
 	}
 
@@ -1297,7 +1296,7 @@
 	*total_flags &= SUPPORTED_FILTERS;
 
 	if (priv->op_flags & OP_INVALID) {
-		ath_dbg(ath9k_hw_common(priv->ah), ATH_DBG_ANY,
+		ath_dbg(ath9k_hw_common(priv->ah), ANY,
 			"Unable to configure filter on invalid state\n");
 		mutex_unlock(&priv->mutex);
 		return;
@@ -1308,8 +1307,8 @@
 	rfilt = ath9k_htc_calcrxfilter(priv);
 	ath9k_hw_setrxfilter(priv->ah, rfilt);
 
-	ath_dbg(ath9k_hw_common(priv->ah), ATH_DBG_CONFIG,
-		"Set HW RX filter: 0x%x\n", rfilt);
+	ath_dbg(ath9k_hw_common(priv->ah), CONFIG, "Set HW RX filter: 0x%x\n",
+		rfilt);
 
 	ath9k_htc_ps_restore(priv);
 	mutex_unlock(&priv->mutex);
@@ -1376,7 +1375,7 @@
 
 	qnum = get_hw_qnum(queue, priv->hwq_map);
 
-	ath_dbg(common, ATH_DBG_CONFIG,
+	ath_dbg(common, CONFIG,
 		"Configure tx [queue/hwq] [%d/%d],  aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n",
 		queue, qnum, params->aifs, params->cw_min,
 		params->cw_max, params->txop);
@@ -1411,7 +1410,7 @@
 		return -ENOSPC;
 
 	mutex_lock(&priv->mutex);
-	ath_dbg(common, ATH_DBG_CONFIG, "Set HW Key\n");
+	ath_dbg(common, CONFIG, "Set HW Key\n");
 	ath9k_htc_ps_wakeup(priv);
 
 	switch (cmd) {
@@ -1447,8 +1446,7 @@
 	struct ath_common *common = ath9k_hw_common(priv->ah);
 
 	ath9k_hw_write_associd(priv->ah);
-	ath_dbg(common, ATH_DBG_CONFIG,
-		"BSSID: %pM aid: 0x%x\n",
+	ath_dbg(common, CONFIG, "BSSID: %pM aid: 0x%x\n",
 		common->curbssid, common->curaid);
 }
 
@@ -1486,7 +1484,7 @@
 	ath9k_htc_ps_wakeup(priv);
 
 	if (changed & BSS_CHANGED_ASSOC) {
-		ath_dbg(common, ATH_DBG_CONFIG, "BSS Changed ASSOC %d\n",
+		ath_dbg(common, CONFIG, "BSS Changed ASSOC %d\n",
 			bss_conf->assoc);
 
 		bss_conf->assoc ?
@@ -1511,8 +1509,8 @@
 	}
 
 	if ((changed & BSS_CHANGED_BEACON_ENABLED) && bss_conf->enable_beacon) {
-		ath_dbg(common, ATH_DBG_CONFIG,
-			"Beacon enabled for BSS: %pM\n", bss_conf->bssid);
+		ath_dbg(common, CONFIG, "Beacon enabled for BSS: %pM\n",
+			bss_conf->bssid);
 		ath9k_htc_set_tsfadjust(priv, vif);
 		priv->op_flags |= OP_ENABLE_BEACON;
 		ath9k_htc_beacon_config(priv, vif);
@@ -1524,7 +1522,7 @@
 		 * AP/IBSS interfaces.
 		 */
 		if ((priv->num_ap_vif <= 1) || priv->num_ibss_vif) {
-			ath_dbg(common, ATH_DBG_CONFIG,
+			ath_dbg(common, CONFIG,
 				"Beacon disabled for BSS: %pM\n",
 				bss_conf->bssid);
 			priv->op_flags &= ~OP_ENABLE_BEACON;
@@ -1542,7 +1540,7 @@
 		    (vif->type == NL80211_IFTYPE_AP)) {
 			priv->op_flags |= OP_TSF_RESET;
 		}
-		ath_dbg(common, ATH_DBG_CONFIG,
+		ath_dbg(common, CONFIG,
 			"Beacon interval changed for BSS: %pM\n",
 			bss_conf->bssid);
 		ath9k_htc_beacon_config(priv, vif);
@@ -1732,8 +1730,7 @@
 		goto out;
 	}
 
-	ath_dbg(common, ATH_DBG_CONFIG,
-		"Set bitrate masks: 0x%x, 0x%x\n",
+	ath_dbg(common, CONFIG, "Set bitrate masks: 0x%x, 0x%x\n",
 		mask->control[IEEE80211_BAND_2GHZ].legacy,
 		mask->control[IEEE80211_BAND_5GHZ].legacy);
 out:
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 2d81c70..3e40a64 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -355,7 +355,7 @@
 		vif_idx = avp->index;
 	} else {
 		if (!priv->ah->is_monitoring) {
-			ath_dbg(ath9k_hw_common(priv->ah), ATH_DBG_XMIT,
+			ath_dbg(ath9k_hw_common(priv->ah), XMIT,
 				"VIF is null, but no monitor interface !\n");
 			return -EINVAL;
 		}
@@ -620,8 +620,7 @@
 	}
 	spin_unlock_irqrestore(&epid_queue->lock, flags);
 
-	ath_dbg(common, ATH_DBG_XMIT,
-		"No matching packet for cookie: %d, epid: %d\n",
+	ath_dbg(common, XMIT, "No matching packet for cookie: %d, epid: %d\n",
 		txs->cookie, epid);
 
 	return NULL;
@@ -705,8 +704,7 @@
 	if (time_after(jiffies,
 		       tx_ctl->timestamp +
 		       msecs_to_jiffies(ATH9K_HTC_TX_TIMEOUT_INTERVAL))) {
-		ath_dbg(common, ATH_DBG_XMIT,
-			"Dropping a packet due to TX timeout\n");
+		ath_dbg(common, XMIT, "Dropping a packet due to TX timeout\n");
 		return true;
 	}
 
@@ -753,7 +751,7 @@
 
 		skb = ath9k_htc_tx_get_packet(priv, &event->txs);
 		if (skb) {
-			ath_dbg(common, ATH_DBG_XMIT,
+			ath_dbg(common, XMIT,
 				"Found packet for cookie: %d, epid: %d\n",
 				event->txs.cookie,
 				MS(event->txs.ts_rate, ATH9K_HTC_TXSTAT_EPID));
@@ -1167,8 +1165,7 @@
 	spin_unlock(&priv->rx.rxbuflock);
 
 	if (rxbuf == NULL) {
-		ath_dbg(common, ATH_DBG_ANY,
-			"No free RX buffer\n");
+		ath_dbg(common, ANY, "No free RX buffer\n");
 		goto err;
 	}
 
diff --git a/drivers/net/wireless/ath/ath9k/hw-ops.h b/drivers/net/wireless/ath/ath9k/hw-ops.h
index e74c233..c4ad0b0 100644
--- a/drivers/net/wireless/ath/ath9k/hw-ops.h
+++ b/drivers/net/wireless/ath/ath9k/hw-ops.h
@@ -212,4 +212,13 @@
 	return ath9k_hw_private_ops(ah)->fast_chan_change(ah, chan,
 							  ini_reloaded);
 }
+
+static inline void ath9k_hw_set_radar_params(struct ath_hw *ah)
+{
+	if (!ath9k_hw_private_ops(ah)->set_radar_params)
+		return;
+
+	ath9k_hw_private_ops(ah)->set_radar_params(ah, &ah->radar_conf);
+}
+
 #endif /* ATH9K_HW_OPS_H */
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 8873c6e..ee77595 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -133,7 +133,7 @@
 		udelay(AH_TIME_QUANTUM);
 	}
 
-	ath_dbg(ath9k_hw_common(ah), ATH_DBG_ANY,
+	ath_dbg(ath9k_hw_common(ah), ANY,
 		"timeout (%d us) on reg 0x%x: 0x%08x & 0x%08x != 0x%08x\n",
 		timeout, reg, REG_READ(ah, reg), mask, val);
 
@@ -491,8 +491,7 @@
 	if (ecode != 0)
 		return ecode;
 
-	ath_dbg(ath9k_hw_common(ah), ATH_DBG_CONFIG,
-		"Eeprom VER: %d, REV: %d\n",
+	ath_dbg(ath9k_hw_common(ah), CONFIG, "Eeprom VER: %d, REV: %d\n",
 		ah->eep_ops->get_eeprom_ver(ah),
 		ah->eep_ops->get_eeprom_rev(ah));
 
@@ -504,7 +503,7 @@
 		return ecode;
 	}
 
-	if (!AR_SREV_9100(ah) && !AR_SREV_9340(ah)) {
+	if (ah->config.enable_ani) {
 		ath9k_hw_ani_setup(ah);
 		ath9k_hw_ani_init(ah);
 	}
@@ -567,7 +566,7 @@
 		}
 	}
 
-	ath_dbg(common, ATH_DBG_RESET, "serialize_regmode is %d\n",
+	ath_dbg(common, RESET, "serialize_regmode is %d\n",
 		ah->config.serialize_regmode);
 
 	if (AR_SREV_9285(ah) || AR_SREV_9271(ah))
@@ -610,6 +609,10 @@
 	if (!AR_SREV_9300_20_OR_LATER(ah))
 		ah->ani_function &= ~ATH9K_ANI_MRC_CCK;
 
+	/* disable ANI for 9340 */
+	if (AR_SREV_9340(ah))
+		ah->config.enable_ani = false;
+
 	ath9k_hw_init_mode_regs(ah);
 
 	if (!ah->is_pciexpress)
@@ -954,8 +957,8 @@
 static bool ath9k_hw_set_global_txtimeout(struct ath_hw *ah, u32 tu)
 {
 	if (tu > 0xFFFF) {
-		ath_dbg(ath9k_hw_common(ah), ATH_DBG_XMIT,
-			"bad global tx timeout %u\n", tu);
+		ath_dbg(ath9k_hw_common(ah), XMIT, "bad global tx timeout %u\n",
+			tu);
 		ah->globaltxtimeout = (u32) -1;
 		return false;
 	} else {
@@ -976,7 +979,7 @@
 	int rx_lat = 0, tx_lat = 0, eifs = 0;
 	u32 reg;
 
-	ath_dbg(ath9k_hw_common(ah), ATH_DBG_RESET, "ah->misc_mode 0x%x\n",
+	ath_dbg(ath9k_hw_common(ah), RESET, "ah->misc_mode 0x%x\n",
 		ah->misc_mode);
 
 	if (!chan)
@@ -1271,7 +1274,7 @@
 		    (npend || type == ATH9K_RESET_COLD)) {
 			int reset_err = 0;
 
-			ath_dbg(ath9k_hw_common(ah), ATH_DBG_RESET,
+			ath_dbg(ath9k_hw_common(ah), RESET,
 				"reset MAC via external reset\n");
 
 			reset_err = ah->external_reset();
@@ -1294,8 +1297,7 @@
 
 	REG_WRITE(ah, AR_RTC_RC, 0);
 	if (!ath9k_hw_wait(ah, AR_RTC_RC, AR_RTC_RC_M, 0, AH_WAIT_TIMEOUT)) {
-		ath_dbg(ath9k_hw_common(ah), ATH_DBG_RESET,
-			"RTC stuck in MAC reset\n");
+		ath_dbg(ath9k_hw_common(ah), RESET, "RTC stuck in MAC reset\n");
 		return false;
 	}
 
@@ -1340,8 +1342,7 @@
 			   AR_RTC_STATUS_M,
 			   AR_RTC_STATUS_ON,
 			   AH_WAIT_TIMEOUT)) {
-		ath_dbg(ath9k_hw_common(ah), ATH_DBG_RESET,
-			"RTC not waking up\n");
+		ath_dbg(ath9k_hw_common(ah), RESET, "RTC not waking up\n");
 		return false;
 	}
 
@@ -1350,6 +1351,7 @@
 
 static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type)
 {
+	bool ret = false;
 
 	if (AR_SREV_9300_20_OR_LATER(ah)) {
 		REG_WRITE(ah, AR_WA, ah->WARegVal);
@@ -1361,13 +1363,20 @@
 
 	switch (type) {
 	case ATH9K_RESET_POWER_ON:
-		return ath9k_hw_set_reset_power_on(ah);
+		ret = ath9k_hw_set_reset_power_on(ah);
+		break;
 	case ATH9K_RESET_WARM:
 	case ATH9K_RESET_COLD:
-		return ath9k_hw_set_reset(ah, type);
+		ret = ath9k_hw_set_reset(ah, type);
+		break;
 	default:
-		return false;
+		break;
 	}
+
+	if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
+		REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
+
+	return ret;
 }
 
 static bool ath9k_hw_chip_reset(struct ath_hw *ah,
@@ -1406,7 +1415,7 @@
 
 	for (qnum = 0; qnum < AR_NUM_QCU; qnum++) {
 		if (ath9k_hw_numtxpending(ah, qnum)) {
-			ath_dbg(common, ATH_DBG_QUEUE,
+			ath_dbg(common, QUEUE,
 				"Transmit frames pending on queue %d\n", qnum);
 			return false;
 		}
@@ -1506,6 +1515,7 @@
 		   struct ath9k_hw_cal_data *caldata, bool bChannelChange)
 {
 	struct ath_common *common = ath9k_hw_common(ah);
+	struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
 	u32 saveLedState;
 	struct ath9k_channel *curchan = ah->curchan;
 	u32 saveDefAntenna;
@@ -1513,6 +1523,52 @@
 	u64 tsf = 0;
 	int i, r;
 	bool allow_fbs = false;
+	bool mci = !!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI);
+	bool save_fullsleep = ah->chip_fullsleep;
+
+	if (mci) {
+
+		ar9003_mci_2g5g_changed(ah, IS_CHAN_2GHZ(chan));
+
+		if (mci_hw->bt_state == MCI_BT_CAL_START) {
+			u32 payload[4] = {0, 0, 0, 0};
+
+			ath_dbg(common, MCI, "MCI stop rx for BT CAL\n");
+
+			mci_hw->bt_state = MCI_BT_CAL;
+
+			/*
+			 * MCI FIX: disable mci interrupt here. This is to avoid
+			 * SW_MSG_DONE or RX_MSG bits to trigger MCI_INT and
+			 * lead to mci_intr reentry.
+			 */
+
+			ar9003_mci_disable_interrupt(ah);
+
+			ath_dbg(common, MCI, "send WLAN_CAL_GRANT\n");
+			MCI_GPM_SET_CAL_TYPE(payload, MCI_GPM_WLAN_CAL_GRANT);
+			ar9003_mci_send_message(ah, MCI_GPM, 0, payload,
+						16, true, false);
+
+			ath_dbg(common, MCI, "\nMCI BT is calibrating\n");
+
+			/* Wait BT calibration to be completed for 25ms */
+
+			if (ar9003_mci_wait_for_gpm(ah, MCI_GPM_BT_CAL_DONE,
+								  0, 25000))
+				ath_dbg(common, MCI,
+					"MCI got BT_CAL_DONE\n");
+			else
+				ath_dbg(common, MCI,
+					"MCI ### BT cal takes to long, force bt_state to be bt_awake\n");
+			mci_hw->bt_state = MCI_BT_AWAKE;
+			/* MCI FIX: enable mci interrupt here */
+			ar9003_mci_enable_interrupt(ah);
+
+			return true;
+		}
+	}
+
 
 	if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
 		return -EIO;
@@ -1550,12 +1606,29 @@
 		if (ath9k_hw_channel_change(ah, chan)) {
 			ath9k_hw_loadnf(ah, ah->curchan);
 			ath9k_hw_start_nfcal(ah, true);
+			if (mci && mci_hw->ready)
+				ar9003_mci_2g5g_switch(ah, true);
+
 			if (AR_SREV_9271(ah))
 				ar9002_hw_load_ani_reg(ah, chan);
 			return 0;
 		}
 	}
 
+	if (mci) {
+		ar9003_mci_disable_interrupt(ah);
+
+		if (mci_hw->ready && !save_fullsleep) {
+			ar9003_mci_mute_bt(ah);
+			udelay(20);
+			REG_WRITE(ah, AR_BTCOEX_CTRL, 0);
+		}
+
+		mci_hw->bt_state = MCI_BT_SLEEP;
+		mci_hw->ready = false;
+	}
+
+
 	saveDefAntenna = REG_READ(ah, AR_DEF_ANTENNA);
 	if (saveDefAntenna == 0)
 		saveDefAntenna = 1;
@@ -1611,6 +1684,9 @@
 	if (r)
 		return r;
 
+	if (mci)
+		ar9003_mci_reset(ah, false, IS_CHAN_2GHZ(chan), save_fullsleep);
+
 	/*
 	 * Some AR91xx SoC devices frequently fail to accept TSF writes
 	 * right after the chip reset. When that happens, write a new
@@ -1728,6 +1804,54 @@
 	ath9k_hw_loadnf(ah, chan);
 	ath9k_hw_start_nfcal(ah, true);
 
+	if (mci && mci_hw->ready) {
+
+		if (IS_CHAN_2GHZ(chan) &&
+		    (mci_hw->bt_state == MCI_BT_SLEEP)) {
+
+			if (ar9003_mci_check_int(ah,
+			    AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET) ||
+			    ar9003_mci_check_int(ah,
+			    AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE)) {
+
+				/*
+				 * BT is sleeping. Check if BT wakes up during
+				 * WLAN calibration. If BT wakes up during
+				 * WLAN calibration, need to go through all
+				 * message exchanges again and recal.
+				 */
+
+				ath_dbg(common, MCI,
+					"MCI BT wakes up during WLAN calibration\n");
+
+				REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+					  AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET |
+					  AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE);
+				ath_dbg(common, MCI, "MCI send REMOTE_RESET\n");
+				ar9003_mci_remote_reset(ah, true);
+				ar9003_mci_send_sys_waking(ah, true);
+				udelay(1);
+				if (IS_CHAN_2GHZ(chan))
+					ar9003_mci_send_lna_transfer(ah, true);
+
+				mci_hw->bt_state = MCI_BT_AWAKE;
+
+				ath_dbg(common, MCI, "MCI re-cal\n");
+
+				if (caldata) {
+					caldata->done_txiqcal_once = false;
+					caldata->done_txclcal_once = false;
+					caldata->rtt_hist.num_readings = 0;
+				}
+
+				if (!ath9k_hw_init_cal(ah, chan))
+					return -EIO;
+
+			}
+		}
+		ar9003_mci_enable_interrupt(ah);
+	}
+
 	ENABLE_REGWRITE_BUFFER(ah);
 
 	ath9k_hw_restore_chainmask(ah);
@@ -1742,14 +1866,14 @@
 		u32 mask;
 		mask = REG_READ(ah, AR_CFG);
 		if (mask & (AR_CFG_SWRB | AR_CFG_SWTB | AR_CFG_SWRG)) {
-			ath_dbg(common, ATH_DBG_RESET,
-				"CFG Byte Swap Set 0x%x\n", mask);
+			ath_dbg(common, RESET, "CFG Byte Swap Set 0x%x\n",
+				mask);
 		} else {
 			mask =
 				INIT_CONFIG_STATUS | AR_CFG_SWRB | AR_CFG_SWTB;
 			REG_WRITE(ah, AR_CFG, mask);
-			ath_dbg(common, ATH_DBG_RESET,
-				"Setting CFG 0x%x\n", REG_READ(ah, AR_CFG));
+			ath_dbg(common, RESET, "Setting CFG 0x%x\n",
+				REG_READ(ah, AR_CFG));
 		}
 	} else {
 		if (common->bus_ops->ath_bus_type == ATH_USB) {
@@ -1767,9 +1891,25 @@
 #endif
 	}
 
-	if (ah->btcoex_hw.enabled)
+	if (ah->btcoex_hw.enabled &&
+	    ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE)
 		ath9k_hw_btcoex_enable(ah);
 
+	if (mci && mci_hw->ready) {
+		/*
+		 * check BT state again to make
+		 * sure it's not changed.
+		 */
+
+		ar9003_mci_sync_bt_state(ah);
+		ar9003_mci_2g5g_switch(ah, true);
+
+		if ((mci_hw->bt_state == MCI_BT_AWAKE) &&
+				(mci_hw->query_bt == true)) {
+			mci_hw->need_flush_btinfo = true;
+		}
+	}
+
 	if (AR_SREV_9300_20_OR_LATER(ah)) {
 		ar9003_hw_bb_watchdog_config(ah);
 
@@ -1934,6 +2074,7 @@
 bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode)
 {
 	struct ath_common *common = ath9k_hw_common(ah);
+	struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
 	int status = true, setChip = true;
 	static const char *modes[] = {
 		"AWAKE",
@@ -1945,18 +2086,41 @@
 	if (ah->power_mode == mode)
 		return status;
 
-	ath_dbg(common, ATH_DBG_RESET, "%s -> %s\n",
+	ath_dbg(common, RESET, "%s -> %s\n",
 		modes[ah->power_mode], modes[mode]);
 
 	switch (mode) {
 	case ATH9K_PM_AWAKE:
 		status = ath9k_hw_set_power_awake(ah, setChip);
+
+		if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
+			REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
+
 		break;
 	case ATH9K_PM_FULL_SLEEP:
+
+		if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI) {
+			if (ar9003_mci_state(ah, MCI_STATE_ENABLE, NULL) &&
+				(mci->bt_state != MCI_BT_SLEEP) &&
+				!mci->halted_bt_gpm) {
+				ath_dbg(common, MCI,
+					"MCI halt BT GPM (full_sleep)\n");
+				ar9003_mci_send_coex_halt_bt_gpm(ah,
+								 true, true);
+			}
+
+			mci->ready = false;
+			REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
+		}
+
 		ath9k_set_power_sleep(ah, setChip);
 		ah->chip_fullsleep = true;
 		break;
 	case ATH9K_PM_NETWORK_SLEEP:
+
+		if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
+			REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
+
 		ath9k_set_power_network_sleep(ah, setChip);
 		break;
 	default:
@@ -2006,9 +2170,8 @@
 			AR_TBTT_TIMER_EN | AR_DBA_TIMER_EN | AR_SWBA_TIMER_EN;
 		break;
 	default:
-		ath_dbg(ath9k_hw_common(ah), ATH_DBG_BEACON,
-			"%s: unsupported opmode: %d\n",
-			__func__, ah->opmode);
+		ath_dbg(ath9k_hw_common(ah), BEACON,
+			"%s: unsupported opmode: %d\n", __func__, ah->opmode);
 		return;
 		break;
 	}
@@ -2059,10 +2222,10 @@
 	else
 		nextTbtt = bs->bs_nexttbtt;
 
-	ath_dbg(common, ATH_DBG_BEACON, "next DTIM %d\n", bs->bs_nextdtim);
-	ath_dbg(common, ATH_DBG_BEACON, "next beacon %d\n", nextTbtt);
-	ath_dbg(common, ATH_DBG_BEACON, "beacon period %d\n", beaconintval);
-	ath_dbg(common, ATH_DBG_BEACON, "DTIM period %d\n", dtimperiod);
+	ath_dbg(common, BEACON, "next DTIM %d\n", bs->bs_nextdtim);
+	ath_dbg(common, BEACON, "next beacon %d\n", nextTbtt);
+	ath_dbg(common, BEACON, "beacon period %d\n", beaconintval);
+	ath_dbg(common, BEACON, "DTIM period %d\n", dtimperiod);
 
 	ENABLE_REGWRITE_BUFFER(ah);
 
@@ -2109,6 +2272,30 @@
 		return chip_chainmask;
 }
 
+/**
+ * ath9k_hw_dfs_tested - checks if DFS has been tested with used chipset
+ * @ah: the atheros hardware data structure
+ *
+ * We enable DFS support upstream on chipsets which have passed a series
+ * of tests. The testing requirements are going to be documented. Desired
+ * test requirements are documented at:
+ *
+ * http://wireless.kernel.org/en/users/Drivers/ath9k/dfs
+ *
+ * Once a new chipset gets properly tested an individual commit can be used
+ * to document the testing for DFS for that chipset.
+ */
+static bool ath9k_hw_dfs_tested(struct ath_hw *ah)
+{
+
+	switch (ah->hw_version.macVersion) {
+	/* AR9580 will likely be our first target to get testing on */
+	case AR_SREV_VERSION_9580:
+	default:
+		return false;
+	}
+}
+
 int ath9k_hw_fill_cap_info(struct ath_hw *ah)
 {
 	struct ath9k_hw_capabilities *pCap = &ah->caps;
@@ -2130,8 +2317,8 @@
 			regulatory->current_rd += 5;
 		else if (regulatory->current_rd == 0x41)
 			regulatory->current_rd = 0x43;
-		ath_dbg(common, ATH_DBG_REGULATORY,
-			"regdomain mapped to 0x%x\n", regulatory->current_rd);
+		ath_dbg(common, REGULATORY, "regdomain mapped to 0x%x\n",
+			regulatory->current_rd);
 	}
 
 	eeval = ah->eep_ops->get_eeprom(ah, EEP_OP_MODE);
@@ -2149,6 +2336,8 @@
 
 	if (AR_SREV_9485(ah) || AR_SREV_9285(ah) || AR_SREV_9330(ah))
 		chip_chainmask = 1;
+	else if (AR_SREV_9462(ah))
+		chip_chainmask = 3;
 	else if (!AR_SREV_9280_20_OR_LATER(ah))
 		chip_chainmask = 7;
 	else if (!AR_SREV_9300_20_OR_LATER(ah) || AR_SREV_9340(ah))
@@ -2205,12 +2394,10 @@
 	else
 		pCap->num_gpio_pins = AR_NUM_GPIO;
 
-	if (AR_SREV_9160_10_OR_LATER(ah) || AR_SREV_9100(ah)) {
-		pCap->hw_caps |= ATH9K_HW_CAP_CST;
+	if (AR_SREV_9160_10_OR_LATER(ah) || AR_SREV_9100(ah))
 		pCap->rts_aggr_limit = ATH_AMPDU_LIMIT_MAX;
-	} else {
+	else
 		pCap->rts_aggr_limit = (8 * 1024);
-	}
 
 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
 	ah->rfsilent = ah->eep_ops->get_eeprom(ah, EEP_RF_SILENT);
@@ -2234,7 +2421,9 @@
 		pCap->hw_caps |= ATH9K_HW_CAP_4KB_SPLITTRANS;
 
 	if (common->btcoex_enabled) {
-		if (AR_SREV_9300_20_OR_LATER(ah)) {
+		if (AR_SREV_9462(ah))
+			btcoex_hw->scheme = ATH_BTCOEX_CFG_MCI;
+		else if (AR_SREV_9300_20_OR_LATER(ah)) {
 			btcoex_hw->scheme = ATH_BTCOEX_CFG_3WIRE;
 			btcoex_hw->btactive_gpio = ATH_BTACTIVE_GPIO_9300;
 			btcoex_hw->wlanactive_gpio = ATH_WLANACTIVE_GPIO_9300;
@@ -2318,6 +2507,9 @@
 		pCap->pcie_lcr_offset = 0x80;
 	}
 
+	if (ath9k_hw_dfs_tested(ah))
+		pCap->hw_caps |= ATH9K_HW_CAP_DFS;
+
 	tx_chainmask = pCap->tx_chainmask;
 	rx_chainmask = pCap->rx_chainmask;
 	while (tx_chainmask || rx_chainmask) {
@@ -2332,11 +2524,11 @@
 
 	if (AR_SREV_9300_20_OR_LATER(ah)) {
 		ah->enabled_cals |= TX_IQ_CAL;
-		if (!AR_SREV_9330(ah))
+		if (AR_SREV_9485_OR_LATER(ah))
 			ah->enabled_cals |= TX_IQ_ON_AGC_CAL;
 	}
 	if (AR_SREV_9462(ah))
-		pCap->hw_caps |= ATH9K_HW_CAP_RTT;
+		pCap->hw_caps |= ATH9K_HW_CAP_RTT | ATH9K_HW_CAP_MCI;
 
 	return 0;
 }
@@ -2584,7 +2776,7 @@
 	struct ath9k_channel *chan = ah->curchan;
 	struct ieee80211_channel *channel = chan->chan;
 
-	reg->power_limit = min_t(int, limit, MAX_RATE_POWER);
+	reg->power_limit = min_t(u32, limit, MAX_RATE_POWER);
 	if (test)
 		channel->max_power = MAX_RATE_POWER / 2;
 
@@ -2651,7 +2843,7 @@
 {
 	if (!ath9k_hw_wait(ah, AR_SLP32_MODE, AR_SLP32_TSF_WRITE_STATUS, 0,
 			   AH_TSF_WRITE_TIMEOUT))
-		ath_dbg(ath9k_hw_common(ah), ATH_DBG_RESET,
+		ath_dbg(ath9k_hw_common(ah), RESET,
 			"AR_SLP32_TSF_WRITE_STATUS limit exceeded\n");
 
 	REG_WRITE(ah, AR_RESET_TSF, AR_RESET_TSF_ONCE);
@@ -2776,7 +2968,7 @@
 
 	timer_next = tsf + trig_timeout;
 
-	ath_dbg(ath9k_hw_common(ah), ATH_DBG_HWTIMER,
+	ath_dbg(ath9k_hw_common(ah), HWTIMER,
 		"current tsf %x period %x timer_next %x\n",
 		tsf, timer_period, timer_next);
 
@@ -2865,8 +3057,8 @@
 		index = rightmost_index(timer_table, &thresh_mask);
 		timer = timer_table->timers[index];
 		BUG_ON(!timer);
-		ath_dbg(common, ATH_DBG_HWTIMER,
-			"TSF overflow for Gen timer %d\n", index);
+		ath_dbg(common, HWTIMER, "TSF overflow for Gen timer %d\n",
+			index);
 		timer->overflow(timer->arg);
 	}
 
@@ -2874,7 +3066,7 @@
 		index = rightmost_index(timer_table, &trigger_mask);
 		timer = timer_table->timers[index];
 		BUG_ON(!timer);
-		ath_dbg(common, ATH_DBG_HWTIMER,
+		ath_dbg(common, HWTIMER,
 			"Gen timer[%d] trigger\n", index);
 		timer->trigger(timer->arg);
 	}
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index f389b3c..6a29004 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -59,9 +59,6 @@
 #define AT9285_COEX3WIRE_SA_SUBSYSID	0x30aa
 #define AT9285_COEX3WIRE_DA_SUBSYSID	0x30ab
 
-#define AR9300_NUM_BT_WEIGHTS   4
-#define AR9300_NUM_WLAN_WEIGHTS 4
-
 #define ATH_AMPDU_LIMIT_MAX        (64 * 1024 - 1)
 
 #define	ATH_DEFAULT_NOISE_FLOOR -95
@@ -129,6 +126,16 @@
 #define AR_GPIO_OUTPUT_MUX_AS_RX_CLEAR_EXTERNAL  4
 #define AR_GPIO_OUTPUT_MUX_AS_MAC_NETWORK_LED    5
 #define AR_GPIO_OUTPUT_MUX_AS_MAC_POWER_LED      6
+#define AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_DATA      0x16
+#define AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_CLK       0x17
+#define AR_GPIO_OUTPUT_MUX_AS_MCI_BT_DATA        0x18
+#define AR_GPIO_OUTPUT_MUX_AS_MCI_BT_CLK         0x19
+#define AR_GPIO_OUTPUT_MUX_AS_WL_IN_TX           0x14
+#define AR_GPIO_OUTPUT_MUX_AS_WL_IN_RX           0x13
+#define AR_GPIO_OUTPUT_MUX_AS_BT_IN_TX           9
+#define AR_GPIO_OUTPUT_MUX_AS_BT_IN_RX           8
+#define AR_GPIO_OUTPUT_MUX_AS_RUCKUS_STROBE      0x1d
+#define AR_GPIO_OUTPUT_MUX_AS_RUCKUS_DATA        0x1e
 
 #define AR_GPIOD_MASK               0x00001FFF
 #define AR_GPIO_BIT(_gpio)          (1 << (_gpio))
@@ -189,20 +196,25 @@
 enum ath9k_hw_caps {
 	ATH9K_HW_CAP_HT                         = BIT(0),
 	ATH9K_HW_CAP_RFSILENT                   = BIT(1),
-	ATH9K_HW_CAP_CST                        = BIT(2),
-	ATH9K_HW_CAP_AUTOSLEEP                  = BIT(4),
-	ATH9K_HW_CAP_4KB_SPLITTRANS             = BIT(5),
-	ATH9K_HW_CAP_EDMA			= BIT(6),
-	ATH9K_HW_CAP_RAC_SUPPORTED		= BIT(7),
-	ATH9K_HW_CAP_LDPC			= BIT(8),
-	ATH9K_HW_CAP_FASTCLOCK			= BIT(9),
-	ATH9K_HW_CAP_SGI_20			= BIT(10),
-	ATH9K_HW_CAP_PAPRD			= BIT(11),
-	ATH9K_HW_CAP_ANT_DIV_COMB		= BIT(12),
-	ATH9K_HW_CAP_2GHZ			= BIT(13),
-	ATH9K_HW_CAP_5GHZ			= BIT(14),
-	ATH9K_HW_CAP_APM			= BIT(15),
-	ATH9K_HW_CAP_RTT			= BIT(16),
+	ATH9K_HW_CAP_AUTOSLEEP                  = BIT(2),
+	ATH9K_HW_CAP_4KB_SPLITTRANS             = BIT(3),
+	ATH9K_HW_CAP_EDMA			= BIT(4),
+	ATH9K_HW_CAP_RAC_SUPPORTED		= BIT(5),
+	ATH9K_HW_CAP_LDPC			= BIT(6),
+	ATH9K_HW_CAP_FASTCLOCK			= BIT(7),
+	ATH9K_HW_CAP_SGI_20			= BIT(8),
+	ATH9K_HW_CAP_PAPRD			= BIT(9),
+	ATH9K_HW_CAP_ANT_DIV_COMB		= BIT(10),
+	ATH9K_HW_CAP_2GHZ			= BIT(11),
+	ATH9K_HW_CAP_5GHZ			= BIT(12),
+	ATH9K_HW_CAP_APM			= BIT(13),
+	ATH9K_HW_CAP_RTT			= BIT(14),
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+	ATH9K_HW_CAP_MCI			= BIT(15),
+#else
+	ATH9K_HW_CAP_MCI			= 0,
+#endif
+	ATH9K_HW_CAP_DFS			= BIT(16),
 };
 
 struct ath9k_hw_capabilities {
@@ -268,6 +280,7 @@
 	ATH9K_INT_TX = 0x00000040,
 	ATH9K_INT_TXDESC = 0x00000080,
 	ATH9K_INT_TIM_TIMER = 0x00000100,
+	ATH9K_INT_MCI = 0x00000200,
 	ATH9K_INT_BB_WATCHDOG = 0x00000400,
 	ATH9K_INT_TXURN = 0x00000800,
 	ATH9K_INT_MIB = 0x00001000,
@@ -419,6 +432,161 @@
 	ATH9K_RX_QUEUE_MAX,
 };
 
+enum mci_message_header {		/* length of payload */
+	MCI_LNA_CTRL     = 0x10,        /* len = 0 */
+	MCI_CONT_NACK    = 0x20,        /* len = 0 */
+	MCI_CONT_INFO    = 0x30,        /* len = 4 */
+	MCI_CONT_RST     = 0x40,        /* len = 0 */
+	MCI_SCHD_INFO    = 0x50,        /* len = 16 */
+	MCI_CPU_INT      = 0x60,        /* len = 4 */
+	MCI_SYS_WAKING   = 0x70,        /* len = 0 */
+	MCI_GPM          = 0x80,        /* len = 16 */
+	MCI_LNA_INFO     = 0x90,        /* len = 1 */
+	MCI_LNA_STATE    = 0x94,
+	MCI_LNA_TAKE     = 0x98,
+	MCI_LNA_TRANS    = 0x9c,
+	MCI_SYS_SLEEPING = 0xa0,        /* len = 0 */
+	MCI_REQ_WAKE     = 0xc0,        /* len = 0 */
+	MCI_DEBUG_16     = 0xfe,        /* len = 2 */
+	MCI_REMOTE_RESET = 0xff         /* len = 16 */
+};
+
+enum ath_mci_gpm_coex_profile_type {
+	MCI_GPM_COEX_PROFILE_UNKNOWN,
+	MCI_GPM_COEX_PROFILE_RFCOMM,
+	MCI_GPM_COEX_PROFILE_A2DP,
+	MCI_GPM_COEX_PROFILE_HID,
+	MCI_GPM_COEX_PROFILE_BNEP,
+	MCI_GPM_COEX_PROFILE_VOICE,
+	MCI_GPM_COEX_PROFILE_MAX
+};
+
+/* MCI GPM/Coex opcode/type definitions */
+enum {
+	MCI_GPM_COEX_W_GPM_PAYLOAD      = 1,
+	MCI_GPM_COEX_B_GPM_TYPE         = 4,
+	MCI_GPM_COEX_B_GPM_OPCODE       = 5,
+	/* MCI_GPM_WLAN_CAL_REQ, MCI_GPM_WLAN_CAL_DONE */
+	MCI_GPM_WLAN_CAL_W_SEQUENCE     = 2,
+
+	/* MCI_GPM_COEX_VERSION_QUERY */
+	/* MCI_GPM_COEX_VERSION_RESPONSE */
+	MCI_GPM_COEX_B_MAJOR_VERSION    = 6,
+	MCI_GPM_COEX_B_MINOR_VERSION    = 7,
+	/* MCI_GPM_COEX_STATUS_QUERY */
+	MCI_GPM_COEX_B_BT_BITMAP        = 6,
+	MCI_GPM_COEX_B_WLAN_BITMAP      = 7,
+	/* MCI_GPM_COEX_HALT_BT_GPM */
+	MCI_GPM_COEX_B_HALT_STATE       = 6,
+	/* MCI_GPM_COEX_WLAN_CHANNELS */
+	MCI_GPM_COEX_B_CHANNEL_MAP      = 6,
+	/* MCI_GPM_COEX_BT_PROFILE_INFO */
+	MCI_GPM_COEX_B_PROFILE_TYPE     = 6,
+	MCI_GPM_COEX_B_PROFILE_LINKID   = 7,
+	MCI_GPM_COEX_B_PROFILE_STATE    = 8,
+	MCI_GPM_COEX_B_PROFILE_ROLE     = 9,
+	MCI_GPM_COEX_B_PROFILE_RATE     = 10,
+	MCI_GPM_COEX_B_PROFILE_VOTYPE   = 11,
+	MCI_GPM_COEX_H_PROFILE_T        = 12,
+	MCI_GPM_COEX_B_PROFILE_W        = 14,
+	MCI_GPM_COEX_B_PROFILE_A        = 15,
+	/* MCI_GPM_COEX_BT_STATUS_UPDATE */
+	MCI_GPM_COEX_B_STATUS_TYPE      = 6,
+	MCI_GPM_COEX_B_STATUS_LINKID    = 7,
+	MCI_GPM_COEX_B_STATUS_STATE     = 8,
+	/* MCI_GPM_COEX_BT_UPDATE_FLAGS */
+	MCI_GPM_COEX_W_BT_FLAGS         = 6,
+	MCI_GPM_COEX_B_BT_FLAGS_OP      = 10
+};
+
+enum mci_gpm_subtype {
+	MCI_GPM_BT_CAL_REQ      = 0,
+	MCI_GPM_BT_CAL_GRANT    = 1,
+	MCI_GPM_BT_CAL_DONE     = 2,
+	MCI_GPM_WLAN_CAL_REQ    = 3,
+	MCI_GPM_WLAN_CAL_GRANT  = 4,
+	MCI_GPM_WLAN_CAL_DONE   = 5,
+	MCI_GPM_COEX_AGENT      = 0x0c,
+	MCI_GPM_RSVD_PATTERN    = 0xfe,
+	MCI_GPM_RSVD_PATTERN32  = 0xfefefefe,
+	MCI_GPM_BT_DEBUG        = 0xff
+};
+
+enum mci_bt_state {
+	MCI_BT_SLEEP,
+	MCI_BT_AWAKE,
+	MCI_BT_CAL_START,
+	MCI_BT_CAL
+};
+
+/* Type of state query */
+enum mci_state_type {
+	MCI_STATE_ENABLE,
+	MCI_STATE_INIT_GPM_OFFSET,
+	MCI_STATE_NEXT_GPM_OFFSET,
+	MCI_STATE_LAST_GPM_OFFSET,
+	MCI_STATE_BT,
+	MCI_STATE_SET_BT_SLEEP,
+	MCI_STATE_SET_BT_AWAKE,
+	MCI_STATE_SET_BT_CAL_START,
+	MCI_STATE_SET_BT_CAL,
+	MCI_STATE_LAST_SCHD_MSG_OFFSET,
+	MCI_STATE_REMOTE_SLEEP,
+	MCI_STATE_CONT_RSSI_POWER,
+	MCI_STATE_CONT_PRIORITY,
+	MCI_STATE_CONT_TXRX,
+	MCI_STATE_RESET_REQ_WAKE,
+	MCI_STATE_SEND_WLAN_COEX_VERSION,
+	MCI_STATE_SET_BT_COEX_VERSION,
+	MCI_STATE_SEND_WLAN_CHANNELS,
+	MCI_STATE_SEND_VERSION_QUERY,
+	MCI_STATE_SEND_STATUS_QUERY,
+	MCI_STATE_NEED_FLUSH_BT_INFO,
+	MCI_STATE_SET_CONCUR_TX_PRI,
+	MCI_STATE_RECOVER_RX,
+	MCI_STATE_NEED_FTP_STOMP,
+	MCI_STATE_NEED_TUNING,
+	MCI_STATE_DEBUG,
+	MCI_STATE_MAX
+};
+
+enum mci_gpm_coex_opcode {
+	MCI_GPM_COEX_VERSION_QUERY,
+	MCI_GPM_COEX_VERSION_RESPONSE,
+	MCI_GPM_COEX_STATUS_QUERY,
+	MCI_GPM_COEX_HALT_BT_GPM,
+	MCI_GPM_COEX_WLAN_CHANNELS,
+	MCI_GPM_COEX_BT_PROFILE_INFO,
+	MCI_GPM_COEX_BT_STATUS_UPDATE,
+	MCI_GPM_COEX_BT_UPDATE_FLAGS
+};
+
+#define MCI_GPM_NOMORE  0
+#define MCI_GPM_MORE    1
+#define MCI_GPM_INVALID 0xffffffff
+
+#define MCI_GPM_RECYCLE(_p_gpm)	do {			  \
+	*(((u32 *)_p_gpm) + MCI_GPM_COEX_W_GPM_PAYLOAD) = \
+				MCI_GPM_RSVD_PATTERN32;   \
+} while (0)
+
+#define MCI_GPM_TYPE(_p_gpm)	\
+	(*(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_TYPE) & 0xff)
+
+#define MCI_GPM_OPCODE(_p_gpm)	\
+	(*(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_OPCODE) & 0xff)
+
+#define MCI_GPM_SET_CAL_TYPE(_p_gpm, _cal_type)	do {			   \
+	*(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_TYPE) = (_cal_type) & 0xff;\
+} while (0)
+
+#define MCI_GPM_SET_TYPE_OPCODE(_p_gpm, _type, _opcode) do {		   \
+	*(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_TYPE) = (_type) & 0xff;	   \
+	*(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_OPCODE) = (_opcode) & 0xff;\
+} while (0)
+
+#define MCI_GPM_IS_CAL_TYPE(_type) ((_type) <= MCI_GPM_WLAN_CAL_DONE)
+
 struct ath9k_beacon_state {
 	u32 bs_nexttbtt;
 	u32 bs_nextdtim;
@@ -791,8 +959,6 @@
 
 	/* Bluetooth coexistance */
 	struct ath_btcoex_hw btcoex_hw;
-	u32 bt_coex_bt_weight[AR9300_NUM_BT_WEIGHTS];
-	u32 bt_coex_wlan_weight[AR9300_NUM_WLAN_WEIGHTS];
 
 	u32 intr_txqs;
 	u8 txchainmask;
@@ -850,7 +1016,7 @@
 	u32 ts_paddr_start;
 	u32 ts_paddr_end;
 	u16 ts_tail;
-	u8 ts_size;
+	u16 ts_size;
 
 	u32 bb_watchdog_last_status;
 	u32 bb_watchdog_timeout_ms; /* in ms, 0 to disable */
@@ -948,7 +1114,6 @@
 void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit, bool test);
 void ath9k_hw_setopmode(struct ath_hw *ah);
 void ath9k_hw_setmcastfilter(struct ath_hw *ah, u32 filter0, u32 filter1);
-void ath9k_hw_setbssidmask(struct ath_hw *ah);
 void ath9k_hw_write_associd(struct ath_hw *ah);
 u32 ath9k_hw_gettsf32(struct ath_hw *ah);
 u64 ath9k_hw_gettsf64(struct ath_hw *ah);
@@ -1041,6 +1206,42 @@
 void ath9k_hw_proc_mib_event(struct ath_hw *ah);
 void ath9k_hw_ani_monitor(struct ath_hw *ah, struct ath9k_channel *chan);
 
+bool ar9003_mci_send_message(struct ath_hw *ah, u8 header, u32 flag,
+			     u32 *payload, u8 len, bool wait_done,
+			     bool check_bt);
+void ar9003_mci_mute_bt(struct ath_hw *ah);
+u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data);
+void ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf,
+		      u16 len, u32 sched_addr);
+void ar9003_mci_cleanup(struct ath_hw *ah);
+void ar9003_mci_send_coex_halt_bt_gpm(struct ath_hw *ah, bool halt,
+				      bool wait_done);
+u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type,
+			    u8 gpm_opcode, int time_out);
+void ar9003_mci_2g5g_changed(struct ath_hw *ah, bool is_2g);
+void ar9003_mci_disable_interrupt(struct ath_hw *ah);
+void ar9003_mci_enable_interrupt(struct ath_hw *ah);
+void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool wait_done);
+void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
+		      bool is_full_sleep);
+bool ar9003_mci_check_int(struct ath_hw *ah, u32 ints);
+void ar9003_mci_remote_reset(struct ath_hw *ah, bool wait_done);
+void ar9003_mci_send_sys_waking(struct ath_hw *ah, bool wait_done);
+void ar9003_mci_send_lna_transfer(struct ath_hw *ah, bool wait_done);
+void ar9003_mci_sync_bt_state(struct ath_hw *ah);
+void ar9003_mci_get_interrupt(struct ath_hw *ah, u32 *raw_intr,
+			      u32 *rx_msg_intr);
+
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+static inline enum ath_btcoex_scheme
+ath9k_hw_get_btcoex_scheme(struct ath_hw *ah)
+{
+	return ah->btcoex_hw.scheme;
+}
+#else
+#define ath9k_hw_get_btcoex_scheme(...) ATH_BTCOEX_CFG_NONE
+#endif
+
 #define ATH9K_CLOCK_RATE_CCK		22
 #define ATH9K_CLOCK_RATE_5GHZ_OFDM	40
 #define ATH9K_CLOCK_RATE_2GHZ_OFDM	44
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index d4c909f..abf9435 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -258,6 +258,8 @@
 
 	if (AR_SREV_9330(ah) || AR_SREV_9485(ah))
 		max_streams = 1;
+	else if (AR_SREV_9462(ah))
+		max_streams = 2;
 	else if (AR_SREV_9300_20_OR_LATER(ah))
 		max_streams = 3;
 	else
@@ -274,8 +276,7 @@
 	tx_streams = ath9k_cmn_count_streams(ah->txchainmask, max_streams);
 	rx_streams = ath9k_cmn_count_streams(ah->rxchainmask, max_streams);
 
-	ath_dbg(common, ATH_DBG_CONFIG,
-		"TX streams %d, RX streams: %d\n",
+	ath_dbg(common, CONFIG, "TX streams %d, RX streams: %d\n",
 		tx_streams, rx_streams);
 
 	if (tx_streams != rx_streams) {
@@ -295,9 +296,22 @@
 {
 	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
 	struct ath_softc *sc = hw->priv;
-	struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah);
+	struct ath_hw *ah = sc->sc_ah;
+	struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
+	int ret;
 
-	return ath_reg_notifier_apply(wiphy, request, reg);
+	ret = ath_reg_notifier_apply(wiphy, request, reg);
+
+	/* Set tx power */
+	if (ah->curchan) {
+		sc->config.txpowlimit = 2 * ah->curchan->chan->max_power;
+		ath9k_ps_wakeup(sc);
+		ath9k_hw_set_txpowerlimit(ah, sc->config.txpowlimit, false);
+		sc->curtxpow = ath9k_hw_regulatory(ah)->power_limit;
+		ath9k_ps_restore(sc);
+	}
+
+	return ret;
 }
 
 /*
@@ -314,7 +328,7 @@
 	struct ath_buf *bf;
 	int i, bsize, error, desc_len;
 
-	ath_dbg(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
+	ath_dbg(common, CONFIG, "%s DMA: %u buffers %u desc/buf\n",
 		name, nbuf, ndesc);
 
 	INIT_LIST_HEAD(head);
@@ -360,7 +374,7 @@
 		goto fail;
 	}
 	ds = (u8 *) dd->dd_desc;
-	ath_dbg(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
+	ath_dbg(common, CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
 		name, ds, (u32) dd->dd_desc_len,
 		ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
 
@@ -408,9 +422,10 @@
 static int ath9k_init_btcoex(struct ath_softc *sc)
 {
 	struct ath_txq *txq;
+	struct ath_hw *ah = sc->sc_ah;
 	int r;
 
-	switch (sc->sc_ah->btcoex_hw.scheme) {
+	switch (ath9k_hw_get_btcoex_scheme(sc->sc_ah)) {
 	case ATH_BTCOEX_CFG_NONE:
 		break;
 	case ATH_BTCOEX_CFG_2WIRE:
@@ -425,6 +440,37 @@
 		ath9k_hw_init_btcoex_hw(sc->sc_ah, txq->axq_qnum);
 		sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
 		break;
+	case ATH_BTCOEX_CFG_MCI:
+		sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
+		sc->btcoex.duty_cycle = ATH_BTCOEX_DEF_DUTY_CYCLE;
+		INIT_LIST_HEAD(&sc->btcoex.mci.info);
+
+		r = ath_mci_setup(sc);
+		if (r)
+			return r;
+
+		if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI) {
+			ah->btcoex_hw.mci.ready = false;
+			ah->btcoex_hw.mci.bt_state = 0;
+			ah->btcoex_hw.mci.bt_ver_major = 3;
+			ah->btcoex_hw.mci.bt_ver_minor = 0;
+			ah->btcoex_hw.mci.bt_version_known = false;
+			ah->btcoex_hw.mci.update_2g5g = true;
+			ah->btcoex_hw.mci.is_2g = true;
+			ah->btcoex_hw.mci.wlan_channels_update = false;
+			ah->btcoex_hw.mci.wlan_channels[0] = 0x00000000;
+			ah->btcoex_hw.mci.wlan_channels[1] = 0xffffffff;
+			ah->btcoex_hw.mci.wlan_channels[2] = 0xffffffff;
+			ah->btcoex_hw.mci.wlan_channels[3] = 0x7fffffff;
+			ah->btcoex_hw.mci.query_bt = true;
+			ah->btcoex_hw.mci.unhalt_bt_gpm = true;
+			ah->btcoex_hw.mci.halted_bt_gpm = false;
+			ah->btcoex_hw.mci.need_flush_btinfo = false;
+			ah->btcoex_hw.mci.wlan_cal_seq = 0;
+			ah->btcoex_hw.mci.wlan_cal_done = 0;
+			ah->btcoex_hw.mci.config = 0x2201;
+		}
+		break;
 	default:
 		WARN_ON(1);
 		break;
@@ -695,6 +741,7 @@
 		hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
 
 	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+	hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
 
 	hw->queues = 4;
 	hw->max_rates = 4;
@@ -833,9 +880,12 @@
 		kfree(sc->sbands[IEEE80211_BAND_5GHZ].channels);
 
         if ((sc->btcoex.no_stomp_timer) &&
-	    sc->sc_ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
+	    ath9k_hw_get_btcoex_scheme(sc->sc_ah) == ATH_BTCOEX_CFG_3WIRE)
 		ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer);
 
+	if (ath9k_hw_get_btcoex_scheme(sc->sc_ah) == ATH_BTCOEX_CFG_MCI)
+		ath_mci_cleanup(sc);
+
 	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
 		if (ATH_TXQ_SETUP(sc, i))
 			ath_tx_cleanupq(sc, &sc->tx.txq[i]);
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index ecdb6fd..fd3f19c 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -21,7 +21,7 @@
 static void ath9k_hw_set_txq_interrupts(struct ath_hw *ah,
 					struct ath9k_tx_queue_info *qi)
 {
-	ath_dbg(ath9k_hw_common(ah), ATH_DBG_INTERRUPT,
+	ath_dbg(ath9k_hw_common(ah), INTERRUPT,
 		"tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n",
 		ah->txok_interrupt_mask, ah->txerr_interrupt_mask,
 		ah->txdesc_interrupt_mask, ah->txeol_interrupt_mask,
@@ -57,8 +57,7 @@
 
 void ath9k_hw_txstart(struct ath_hw *ah, u32 q)
 {
-	ath_dbg(ath9k_hw_common(ah), ATH_DBG_QUEUE,
-		"Enable TXE on queue: %u\n", q);
+	ath_dbg(ath9k_hw_common(ah), QUEUE, "Enable TXE on queue: %u\n", q);
 	REG_WRITE(ah, AR_Q_TXE, 1 << q);
 }
 EXPORT_SYMBOL(ath9k_hw_txstart);
@@ -202,12 +201,12 @@
 
 	qi = &ah->txq[q];
 	if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
-		ath_dbg(common, ATH_DBG_QUEUE,
+		ath_dbg(common, QUEUE,
 			"Set TXQ properties, inactive queue: %u\n", q);
 		return false;
 	}
 
-	ath_dbg(common, ATH_DBG_QUEUE, "Set queue properties for: %u\n", q);
+	ath_dbg(common, QUEUE, "Set queue properties for: %u\n", q);
 
 	qi->tqi_ver = qinfo->tqi_ver;
 	qi->tqi_subtype = qinfo->tqi_subtype;
@@ -266,7 +265,7 @@
 
 	qi = &ah->txq[q];
 	if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
-		ath_dbg(common, ATH_DBG_QUEUE,
+		ath_dbg(common, QUEUE,
 			"Get TXQ properties, inactive queue: %u\n", q);
 		return false;
 	}
@@ -325,7 +324,7 @@
 		return -1;
 	}
 
-	ath_dbg(common, ATH_DBG_QUEUE, "Setup TX queue: %u\n", q);
+	ath_dbg(common, QUEUE, "Setup TX queue: %u\n", q);
 
 	qi = &ah->txq[q];
 	if (qi->tqi_type != ATH9K_TX_QUEUE_INACTIVE) {
@@ -348,12 +347,11 @@
 
 	qi = &ah->txq[q];
 	if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
-		ath_dbg(common, ATH_DBG_QUEUE,
-			"Release TXQ, inactive queue: %u\n", q);
+		ath_dbg(common, QUEUE, "Release TXQ, inactive queue: %u\n", q);
 		return false;
 	}
 
-	ath_dbg(common, ATH_DBG_QUEUE, "Release TX queue: %u\n", q);
+	ath_dbg(common, QUEUE, "Release TX queue: %u\n", q);
 
 	qi->tqi_type = ATH9K_TX_QUEUE_INACTIVE;
 	ah->txok_interrupt_mask &= ~(1 << q);
@@ -376,12 +374,11 @@
 
 	qi = &ah->txq[q];
 	if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
-		ath_dbg(common, ATH_DBG_QUEUE,
-			"Reset TXQ, inactive queue: %u\n", q);
+		ath_dbg(common, QUEUE, "Reset TXQ, inactive queue: %u\n", q);
 		return true;
 	}
 
-	ath_dbg(common, ATH_DBG_QUEUE, "Reset TX queue: %u\n", q);
+	ath_dbg(common, QUEUE, "Reset TX queue: %u\n", q);
 
 	if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) {
 		if (chan && IS_CHAN_B(chan))
@@ -760,7 +757,10 @@
 		return true;
 
 	host_isr = REG_READ(ah, AR_INTR_ASYNC_CAUSE);
-	if ((host_isr & AR_INTR_MAC_IRQ) && (host_isr != AR_INTR_SPURIOUS))
+
+	if (((host_isr & AR_INTR_MAC_IRQ) ||
+	     (host_isr & AR_INTR_ASYNC_MASK_MCI)) &&
+	    (host_isr != AR_INTR_SPURIOUS))
 		return true;
 
 	host_isr = REG_READ(ah, AR_INTR_SYNC_CAUSE);
@@ -781,7 +781,7 @@
 	else
 		atomic_dec(&ah->intr_ref_cnt);
 
-	ath_dbg(common, ATH_DBG_INTERRUPT, "disable IER\n");
+	ath_dbg(common, INTERRUPT, "disable IER\n");
 	REG_WRITE(ah, AR_IER, AR_IER_DISABLE);
 	(void) REG_READ(ah, AR_IER);
 	if (!AR_SREV_9100(ah)) {
@@ -798,13 +798,13 @@
 {
 	struct ath_common *common = ath9k_hw_common(ah);
 	u32 sync_default = AR_INTR_SYNC_DEFAULT;
+	u32 async_mask;
 
 	if (!(ah->imask & ATH9K_INT_GLOBAL))
 		return;
 
 	if (!atomic_inc_and_test(&ah->intr_ref_cnt)) {
-		ath_dbg(common, ATH_DBG_INTERRUPT,
-			"Do not enable IER ref count %d\n",
+		ath_dbg(common, INTERRUPT, "Do not enable IER ref count %d\n",
 			atomic_read(&ah->intr_ref_cnt));
 		return;
 	}
@@ -812,18 +812,21 @@
 	if (AR_SREV_9340(ah))
 		sync_default &= ~AR_INTR_SYNC_HOST1_FATAL;
 
-	ath_dbg(common, ATH_DBG_INTERRUPT, "enable IER\n");
+	async_mask = AR_INTR_MAC_IRQ;
+
+	if (ah->imask & ATH9K_INT_MCI)
+		async_mask |= AR_INTR_ASYNC_MASK_MCI;
+
+	ath_dbg(common, INTERRUPT, "enable IER\n");
 	REG_WRITE(ah, AR_IER, AR_IER_ENABLE);
 	if (!AR_SREV_9100(ah)) {
-		REG_WRITE(ah, AR_INTR_ASYNC_ENABLE,
-			  AR_INTR_MAC_IRQ);
-		REG_WRITE(ah, AR_INTR_ASYNC_MASK, AR_INTR_MAC_IRQ);
-
+		REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, async_mask);
+		REG_WRITE(ah, AR_INTR_ASYNC_MASK, async_mask);
 
 		REG_WRITE(ah, AR_INTR_SYNC_ENABLE, sync_default);
 		REG_WRITE(ah, AR_INTR_SYNC_MASK, sync_default);
 	}
-	ath_dbg(common, ATH_DBG_INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
+	ath_dbg(common, INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
 		REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER));
 }
 EXPORT_SYMBOL(ath9k_hw_enable_interrupts);
@@ -838,7 +841,7 @@
 	if (!(ints & ATH9K_INT_GLOBAL))
 		ath9k_hw_disable_interrupts(ah);
 
-	ath_dbg(common, ATH_DBG_INTERRUPT, "New interrupt mask 0x%x\n", ints);
+	ath_dbg(common, INTERRUPT, "New interrupt mask 0x%x\n", ints);
 
 	mask = ints & ATH9K_INT_COMMON;
 	mask2 = 0;
@@ -901,7 +904,7 @@
 			mask2 |= AR_IMR_S2_CST;
 	}
 
-	ath_dbg(common, ATH_DBG_INTERRUPT, "new IMR 0x%x\n", mask);
+	ath_dbg(common, INTERRUPT, "new IMR 0x%x\n", mask);
 	REG_WRITE(ah, AR_IMR, mask);
 	ah->imrs2_reg &= ~(AR_IMR_S2_TIM | AR_IMR_S2_DTIM | AR_IMR_S2_DTIMSYNC |
 			   AR_IMR_S2_CABEND | AR_IMR_S2_CABTO |
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 93fbe6f..e267c92 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -118,7 +118,7 @@
 	if (--sc->ps_usecount != 0)
 		goto unlock;
 
-	if (sc->ps_idle)
+	if (sc->ps_idle && (sc->ps_flags & PS_WAIT_FOR_TX_ACK))
 		mode = ATH9K_PM_FULL_SLEEP;
 	else if (sc->ps_enabled &&
 		 !(sc->ps_flags & (PS_WAIT_FOR_BEACON |
@@ -286,7 +286,7 @@
 			ath_start_ani(common);
 	}
 
-	if (ath9k_hw_ops(ah)->antdiv_comb_conf_get && sc->ant_rx != 3) {
+	if ((ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) && sc->ant_rx != 3) {
 		struct ath_hw_antcomb_conf div_ant_conf;
 		u8 lna_conf;
 
@@ -332,14 +332,14 @@
 		hchan = ah->curchan;
 	}
 
-	if (fastcc && !ath9k_hw_check_alive(ah))
+	if (fastcc && (ah->chip_fullsleep ||
+	    !ath9k_hw_check_alive(ah)))
 		fastcc = false;
 
 	if (!ath_prepare_reset(sc, retry_tx, flush))
 		fastcc = false;
 
-	ath_dbg(common, ATH_DBG_CONFIG,
-		"Reset to %u MHz, HT40: %d fastcc: %d\n",
+	ath_dbg(common, CONFIG, "Reset to %u MHz, HT40: %d fastcc: %d\n",
 		hchan->channel, !!(hchan->channelFlags & (CHANNEL_HT40MINUS |
 							  CHANNEL_HT40PLUS)),
 		fastcc);
@@ -428,7 +428,7 @@
 	txctl.paprd = BIT(chain);
 
 	if (ath_tx_start(hw, skb, &txctl) != 0) {
-		ath_dbg(common, ATH_DBG_CALIBRATE, "PAPRD TX failed\n");
+		ath_dbg(common, CALIBRATE, "PAPRD TX failed\n");
 		dev_kfree_skb_any(skb);
 		return false;
 	}
@@ -437,7 +437,7 @@
 			msecs_to_jiffies(ATH_PAPRD_TIMEOUT));
 
 	if (!time_left)
-		ath_dbg(common, ATH_DBG_CALIBRATE,
+		ath_dbg(common, CALIBRATE,
 			"Timeout waiting for paprd training on TX chain %d\n",
 			chain);
 
@@ -486,27 +486,27 @@
 
 		chain_ok = 0;
 
-		ath_dbg(common, ATH_DBG_CALIBRATE,
-			"Sending PAPRD frame for thermal measurement "
-			"on chain %d\n", chain);
+		ath_dbg(common, CALIBRATE,
+			"Sending PAPRD frame for thermal measurement on chain %d\n",
+			chain);
 		if (!ath_paprd_send_frame(sc, skb, chain))
 			goto fail_paprd;
 
 		ar9003_paprd_setup_gain_table(ah, chain);
 
-		ath_dbg(common, ATH_DBG_CALIBRATE,
+		ath_dbg(common, CALIBRATE,
 			"Sending PAPRD training frame on chain %d\n", chain);
 		if (!ath_paprd_send_frame(sc, skb, chain))
 			goto fail_paprd;
 
 		if (!ar9003_paprd_is_done(ah)) {
-			ath_dbg(common, ATH_DBG_CALIBRATE,
+			ath_dbg(common, CALIBRATE,
 				"PAPRD not yet done on chain %d\n", chain);
 			break;
 		}
 
 		if (ar9003_paprd_create_curve(ah, caldata, chain)) {
-			ath_dbg(common, ATH_DBG_CALIBRATE,
+			ath_dbg(common, CALIBRATE,
 				"PAPRD create curve failed on chain %d\n",
 								   chain);
 			break;
@@ -561,7 +561,6 @@
 	/* Long calibration runs independently of short calibration. */
 	if ((timestamp - common->ani.longcal_timer) >= long_cal_interval) {
 		longcal = true;
-		ath_dbg(common, ATH_DBG_ANI, "longcal @%lu\n", jiffies);
 		common->ani.longcal_timer = timestamp;
 	}
 
@@ -569,8 +568,6 @@
 	if (!common->ani.caldone) {
 		if ((timestamp - common->ani.shortcal_timer) >= short_cal_interval) {
 			shortcal = true;
-			ath_dbg(common, ATH_DBG_ANI,
-				"shortcal @%lu\n", jiffies);
 			common->ani.shortcal_timer = timestamp;
 			common->ani.resetcal_timer = timestamp;
 		}
@@ -584,8 +581,9 @@
 	}
 
 	/* Verify whether we must check ANI */
-	if ((timestamp - common->ani.checkani_timer) >=
-	     ah->config.ani_poll_interval) {
+	if (sc->sc_ah->config.enable_ani
+	    && (timestamp - common->ani.checkani_timer) >=
+	    ah->config.ani_poll_interval) {
 		aniflag = true;
 		common->ani.checkani_timer = timestamp;
 	}
@@ -605,6 +603,12 @@
 						ah->rxchainmask, longcal);
 	}
 
+	ath_dbg(common, ANI,
+		"Calibration @%lu finished: %s %s %s, caldone: %s\n",
+		jiffies,
+		longcal ? "long" : "", shortcal ? "short" : "",
+		aniflag ? "ani" : "", common->ani.caldone ? "true" : "false");
+
 	ath9k_ps_restore(sc);
 
 set_timer:
@@ -630,7 +634,8 @@
 	}
 }
 
-static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta)
+static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta,
+			    struct ieee80211_vif *vif)
 {
 	struct ath_node *an;
 	an = (struct ath_node *)sta->drv_priv;
@@ -639,8 +644,9 @@
 	spin_lock(&sc->nodes_lock);
 	list_add(&an->list, &sc->nodes);
 	spin_unlock(&sc->nodes_lock);
-	an->sta = sta;
 #endif
+	an->sta = sta;
+	an->vif = vif;
 	if (sc->sc_flags & SC_OP_TXAGGR) {
 		ath_tx_node_init(sc, an);
 		an->maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
@@ -709,8 +715,7 @@
 		 * TSF sync does not look correct; remain awake to sync with
 		 * the next Beacon.
 		 */
-		ath_dbg(common, ATH_DBG_PS,
-			"TSFOOR - Sync with next Beacon\n");
+		ath_dbg(common, PS, "TSFOOR - Sync with next Beacon\n");
 		sc->ps_flags |= PS_WAIT_FOR_BEACON | PS_BEACON_SYNC;
 	}
 
@@ -736,10 +741,13 @@
 			ath_tx_tasklet(sc);
 	}
 
-	if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
+	if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE)
 		if (status & ATH9K_INT_GENTIMER)
 			ath_gen_timer_isr(sc->sc_ah);
 
+	if ((status & ATH9K_INT_MCI) && ATH9K_HW_CAP_MCI)
+		ath_mci_intr(sc);
+
 out:
 	/* re-enable hardware interrupt */
 	ath9k_hw_enable_interrupts(ah);
@@ -762,7 +770,8 @@
 		ATH9K_INT_BMISS |		\
 		ATH9K_INT_CST |			\
 		ATH9K_INT_TSFOOR |		\
-		ATH9K_INT_GENTIMER)
+		ATH9K_INT_GENTIMER |		\
+		ATH9K_INT_MCI)
 
 	struct ath_softc *sc = dev;
 	struct ath_hw *ah = sc->sc_ah;
@@ -880,82 +889,6 @@
 #undef SCHED_INTR
 }
 
-static void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
-{
-	struct ath_hw *ah = sc->sc_ah;
-	struct ath_common *common = ath9k_hw_common(ah);
-	struct ieee80211_channel *channel = hw->conf.channel;
-	int r;
-
-	ath9k_ps_wakeup(sc);
-	spin_lock_bh(&sc->sc_pcu_lock);
-	atomic_set(&ah->intr_ref_cnt, -1);
-
-	ath9k_hw_configpcipowersave(ah, false);
-
-	if (!ah->curchan)
-		ah->curchan = ath9k_cmn_get_curchannel(sc->hw, ah);
-
-	r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
-	if (r) {
-		ath_err(common,
-			"Unable to reset channel (%u MHz), reset status %d\n",
-			channel->center_freq, r);
-	}
-
-	ath_complete_reset(sc, true);
-
-	/* Enable LED */
-	ath9k_hw_cfg_output(ah, ah->led_pin,
-			    AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
-	ath9k_hw_set_gpio(ah, ah->led_pin, 0);
-
-	spin_unlock_bh(&sc->sc_pcu_lock);
-
-	ath9k_ps_restore(sc);
-}
-
-void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
-{
-	struct ath_hw *ah = sc->sc_ah;
-	struct ieee80211_channel *channel = hw->conf.channel;
-	int r;
-
-	ath9k_ps_wakeup(sc);
-
-	ath_cancel_work(sc);
-
-	spin_lock_bh(&sc->sc_pcu_lock);
-
-	/*
-	 * Keep the LED on when the radio is disabled
-	 * during idle unassociated state.
-	 */
-	if (!sc->ps_idle) {
-		ath9k_hw_set_gpio(ah, ah->led_pin, 1);
-		ath9k_hw_cfg_gpio_input(ah, ah->led_pin);
-	}
-
-	ath_prepare_reset(sc, false, true);
-
-	if (!ah->curchan)
-		ah->curchan = ath9k_cmn_get_curchannel(hw, ah);
-
-	r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
-	if (r) {
-		ath_err(ath9k_hw_common(sc->sc_ah),
-			"Unable to reset channel (%u MHz), reset status %d\n",
-			channel->center_freq, r);
-	}
-
-	ath9k_hw_phy_disable(ah);
-
-	ath9k_hw_configpcipowersave(ah, true);
-
-	spin_unlock_bh(&sc->sc_pcu_lock);
-	ath9k_ps_restore(sc);
-}
-
 static int ath_reset(struct ath_softc *sc, bool retry_tx)
 {
 	int r;
@@ -1002,8 +935,8 @@
 	busy = ath_update_survey_stats(sc);
 	spin_unlock_irqrestore(&common->cc_lock, flags);
 
-	ath_dbg(common, ATH_DBG_RESET, "Possible baseband hang, "
-		"busy=%d (try %d)\n", busy, sc->hw_busy_count + 1);
+	ath_dbg(common, RESET, "Possible baseband hang, busy=%d (try %d)\n",
+		busy, sc->hw_busy_count + 1);
 	if (busy >= 99) {
 		if (++sc->hw_busy_count >= 3) {
 			RESET_STAT_INC(sc, RESET_TYPE_BB_HANG);
@@ -1026,8 +959,7 @@
 		count++;
 		if (count == 3) {
 			/* Rx is hung for more than 500ms. Reset it */
-			ath_dbg(common, ATH_DBG_RESET,
-				"Possible RX hang, resetting");
+			ath_dbg(common, RESET, "Possible RX hang, resetting\n");
 			RESET_STAT_INC(sc, RESET_TYPE_PLL_HANG);
 			ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
 			count = 0;
@@ -1067,7 +999,7 @@
 	struct ath9k_channel *init_channel;
 	int r;
 
-	ath_dbg(common, ATH_DBG_CONFIG,
+	ath_dbg(common, CONFIG,
 		"Starting driver with initial channel: %d MHz\n",
 		curchan->center_freq);
 
@@ -1091,6 +1023,9 @@
 	 * and then setup of the interrupt mask.
 	 */
 	spin_lock_bh(&sc->sc_pcu_lock);
+
+	atomic_set(&ah->intr_ref_cnt, -1);
+
 	r = ath9k_hw_reset(ah, init_channel, ah->caldata, false);
 	if (r) {
 		ath_err(common,
@@ -1117,6 +1052,9 @@
 	if (ah->caps.hw_caps & ATH9K_HW_CAP_HT)
 		ah->imask |= ATH9K_INT_CST;
 
+	if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
+		ah->imask |= ATH9K_INT_MCI;
+
 	sc->sc_flags &= ~SC_OP_INVALID;
 	sc->sc_ah->is_monitoring = false;
 
@@ -1129,15 +1067,28 @@
 		goto mutex_unlock;
 	}
 
+	if (ah->led_pin >= 0) {
+		ath9k_hw_cfg_output(ah, ah->led_pin,
+				    AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+		ath9k_hw_set_gpio(ah, ah->led_pin, 0);
+	}
+
+	/*
+	 * Reset key cache to sane defaults (all entries cleared) instead of
+	 * semi-random values after suspend/resume.
+	 */
+	ath9k_cmn_init_crypto(sc->sc_ah);
+
 	spin_unlock_bh(&sc->sc_pcu_lock);
 
-	if ((ah->btcoex_hw.scheme != ATH_BTCOEX_CFG_NONE) &&
+	if ((ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) &&
 	    !ah->btcoex_hw.enabled) {
-		ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
-					   AR_STOMP_LOW_WLAN_WGHT);
+		if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI))
+			ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
+						   AR_STOMP_LOW_WLAN_WGHT);
 		ath9k_hw_btcoex_enable(ah);
 
-		if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
+		if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE)
 			ath9k_btcoex_timer_resume(sc);
 	}
 
@@ -1167,12 +1118,19 @@
 		if (ieee80211_is_data(hdr->frame_control) &&
 		    !ieee80211_is_nullfunc(hdr->frame_control) &&
 		    !ieee80211_has_pm(hdr->frame_control)) {
-			ath_dbg(common, ATH_DBG_PS,
+			ath_dbg(common, PS,
 				"Add PM=1 for a TX frame while in PS mode\n");
 			hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
 		}
 	}
 
+	/*
+	 * Cannot tx while the hardware is in full sleep, it first needs a full
+	 * chip reset to recover from that
+	 */
+	if (unlikely(sc->sc_ah->power_mode == ATH9K_PM_FULL_SLEEP))
+		goto exit;
+
 	if (unlikely(sc->sc_ah->power_mode != ATH9K_PM_AWAKE)) {
 		/*
 		 * We are using PS-Poll and mac80211 can request TX while in
@@ -1183,12 +1141,11 @@
 		if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
 			ath9k_hw_setrxabort(sc->sc_ah, 0);
 		if (ieee80211_is_pspoll(hdr->frame_control)) {
-			ath_dbg(common, ATH_DBG_PS,
+			ath_dbg(common, PS,
 				"Sending PS-Poll to pick a buffered frame\n");
 			sc->ps_flags |= PS_WAIT_FOR_PSPOLL_DATA;
 		} else {
-			ath_dbg(common, ATH_DBG_PS,
-				"Wake up to complete TX\n");
+			ath_dbg(common, PS, "Wake up to complete TX\n");
 			sc->ps_flags |= PS_WAIT_FOR_TX_ACK;
 		}
 		/*
@@ -1202,10 +1159,10 @@
 	memset(&txctl, 0, sizeof(struct ath_tx_control));
 	txctl.txq = sc->tx.txq_map[skb_get_queue_mapping(skb)];
 
-	ath_dbg(common, ATH_DBG_XMIT, "transmitting packet, skb: %p\n", skb);
+	ath_dbg(common, XMIT, "transmitting packet, skb: %p\n", skb);
 
 	if (ath_tx_start(hw, skb, &txctl) != 0) {
-		ath_dbg(common, ATH_DBG_XMIT, "TX failed\n");
+		ath_dbg(common, XMIT, "TX failed\n");
 		goto exit;
 	}
 
@@ -1219,13 +1176,14 @@
 	struct ath_softc *sc = hw->priv;
 	struct ath_hw *ah = sc->sc_ah;
 	struct ath_common *common = ath9k_hw_common(ah);
+	bool prev_idle;
 
 	mutex_lock(&sc->mutex);
 
 	ath_cancel_work(sc);
 
 	if (sc->sc_flags & SC_OP_INVALID) {
-		ath_dbg(common, ATH_DBG_ANY, "Device not present\n");
+		ath_dbg(common, ANY, "Device not present\n");
 		mutex_unlock(&sc->mutex);
 		return;
 	}
@@ -1233,10 +1191,12 @@
 	/* Ensure HW is awake when we try to shut it down. */
 	ath9k_ps_wakeup(sc);
 
-	if (ah->btcoex_hw.enabled) {
+	if (ah->btcoex_hw.enabled &&
+	    ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) {
 		ath9k_hw_btcoex_disable(ah);
-		if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
+		if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE)
 			ath9k_btcoex_timer_pause(sc);
+		ath_mci_flush_profile(&sc->btcoex.mci);
 	}
 
 	spin_lock_bh(&sc->sc_pcu_lock);
@@ -1248,21 +1208,6 @@
 	 * before setting the invalid flag. */
 	ath9k_hw_disable_interrupts(ah);
 
-	if (!(sc->sc_flags & SC_OP_INVALID)) {
-		ath_drain_all_txq(sc, false);
-		ath_stoprecv(sc);
-		ath9k_hw_phy_disable(ah);
-	} else
-		sc->rx.rxlink = NULL;
-
-	if (sc->rx.frag) {
-		dev_kfree_skb_any(sc->rx.frag);
-		sc->rx.frag = NULL;
-	}
-
-	/* disable HAL and put h/w to sleep */
-	ath9k_hw_disable(ah);
-
 	spin_unlock_bh(&sc->sc_pcu_lock);
 
 	/* we can now sync irq and kill any running tasklets, since we already
@@ -1271,16 +1216,41 @@
 	tasklet_kill(&sc->intr_tq);
 	tasklet_kill(&sc->bcon_tasklet);
 
+	prev_idle = sc->ps_idle;
+	sc->ps_idle = true;
+
+	spin_lock_bh(&sc->sc_pcu_lock);
+
+	if (ah->led_pin >= 0) {
+		ath9k_hw_set_gpio(ah, ah->led_pin, 1);
+		ath9k_hw_cfg_gpio_input(ah, ah->led_pin);
+	}
+
+	ath_prepare_reset(sc, false, true);
+
+	if (sc->rx.frag) {
+		dev_kfree_skb_any(sc->rx.frag);
+		sc->rx.frag = NULL;
+	}
+
+	if (!ah->curchan)
+		ah->curchan = ath9k_cmn_get_curchannel(hw, ah);
+
+	ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
+	ath9k_hw_phy_disable(ah);
+
+	ath9k_hw_configpcipowersave(ah, true);
+
+	spin_unlock_bh(&sc->sc_pcu_lock);
+
 	ath9k_ps_restore(sc);
 
-	sc->ps_idle = true;
-	ath_radio_disable(sc, hw);
-
 	sc->sc_flags |= SC_OP_INVALID;
+	sc->ps_idle = prev_idle;
 
 	mutex_unlock(&sc->mutex);
 
-	ath_dbg(common, ATH_DBG_CONFIG, "Driver halt\n");
+	ath_dbg(common, CONFIG, "Driver halt\n");
 }
 
 bool ath9k_uses_beacons(int type)
@@ -1495,8 +1465,7 @@
 		goto out;
 	}
 
-	ath_dbg(common, ATH_DBG_CONFIG,
-		"Attach a VIF of type: %d\n", vif->type);
+	ath_dbg(common, CONFIG, "Attach a VIF of type: %d\n", vif->type);
 
 	sc->nvifs++;
 
@@ -1516,7 +1485,7 @@
 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 	int ret = 0;
 
-	ath_dbg(common, ATH_DBG_CONFIG, "Change Interface\n");
+	ath_dbg(common, CONFIG, "Change Interface\n");
 	mutex_lock(&sc->mutex);
 	ath9k_ps_wakeup(sc);
 
@@ -1559,7 +1528,7 @@
 	struct ath_softc *sc = hw->priv;
 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 
-	ath_dbg(common, ATH_DBG_CONFIG, "Detach Interface\n");
+	ath_dbg(common, CONFIG, "Detach Interface\n");
 
 	ath9k_ps_wakeup(sc);
 	mutex_lock(&sc->mutex);
@@ -1616,8 +1585,8 @@
 	struct ath_hw *ah = sc->sc_ah;
 	struct ath_common *common = ath9k_hw_common(ah);
 	struct ieee80211_conf *conf = &hw->conf;
-	bool disable_radio = false;
 
+	ath9k_ps_wakeup(sc);
 	mutex_lock(&sc->mutex);
 
 	/*
@@ -1628,13 +1597,8 @@
 	 */
 	if (changed & IEEE80211_CONF_CHANGE_IDLE) {
 		sc->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE);
-		if (!sc->ps_idle) {
-			ath_radio_enable(sc, hw);
-			ath_dbg(common, ATH_DBG_CONFIG,
-				"not-idle: enabling radio\n");
-		} else {
-			disable_radio = true;
-		}
+		if (sc->ps_idle)
+			ath_cancel_work(sc);
 	}
 
 	/*
@@ -1655,12 +1619,10 @@
 
 	if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
 		if (conf->flags & IEEE80211_CONF_MONITOR) {
-			ath_dbg(common, ATH_DBG_CONFIG,
-				"Monitor mode is enabled\n");
+			ath_dbg(common, CONFIG, "Monitor mode is enabled\n");
 			sc->sc_ah->is_monitoring = true;
 		} else {
-			ath_dbg(common, ATH_DBG_CONFIG,
-				"Monitor mode is disabled\n");
+			ath_dbg(common, CONFIG, "Monitor mode is disabled\n");
 			sc->sc_ah->is_monitoring = false;
 		}
 	}
@@ -1680,8 +1642,7 @@
 		else
 			sc->sc_flags &= ~SC_OP_OFFCHANNEL;
 
-		ath_dbg(common, ATH_DBG_CONFIG,
-			"Set channel: %d MHz type: %d\n",
+		ath_dbg(common, CONFIG, "Set channel: %d MHz type: %d\n",
 			curchan->center_freq, conf->channel_type);
 
 		/* update survey stats for the old channel before switching */
@@ -1738,21 +1699,14 @@
 	}
 
 	if (changed & IEEE80211_CONF_CHANGE_POWER) {
-		ath_dbg(common, ATH_DBG_CONFIG,
-			"Set power: %d\n", conf->power_level);
+		ath_dbg(common, CONFIG, "Set power: %d\n", conf->power_level);
 		sc->config.txpowlimit = 2 * conf->power_level;
-		ath9k_ps_wakeup(sc);
 		ath9k_cmn_update_txpow(ah, sc->curtxpow,
 				       sc->config.txpowlimit, &sc->curtxpow);
-		ath9k_ps_restore(sc);
-	}
-
-	if (disable_radio) {
-		ath_dbg(common, ATH_DBG_CONFIG, "idle: disabling radio\n");
-		ath_radio_disable(sc, hw);
 	}
 
 	mutex_unlock(&sc->mutex);
+	ath9k_ps_restore(sc);
 
 	return 0;
 }
@@ -1785,8 +1739,8 @@
 	ath9k_hw_setrxfilter(sc->sc_ah, rfilt);
 	ath9k_ps_restore(sc);
 
-	ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_CONFIG,
-		"Set HW RX filter: 0x%x\n", rfilt);
+	ath_dbg(ath9k_hw_common(sc->sc_ah), CONFIG, "Set HW RX filter: 0x%x\n",
+		rfilt);
 }
 
 static int ath9k_sta_add(struct ieee80211_hw *hw,
@@ -1798,7 +1752,7 @@
 	struct ath_node *an = (struct ath_node *) sta->drv_priv;
 	struct ieee80211_key_conf ps_key = { };
 
-	ath_node_attach(sc, sta);
+	ath_node_attach(sc, sta, vif);
 
 	if (vif->type != NL80211_IFTYPE_AP &&
 	    vif->type != NL80211_IFTYPE_AP_VLAN)
@@ -1843,6 +1797,9 @@
 	struct ath_softc *sc = hw->priv;
 	struct ath_node *an = (struct ath_node *) sta->drv_priv;
 
+	if (!(sc->sc_flags & SC_OP_TXAGGR))
+		return;
+
 	switch (cmd) {
 	case STA_NOTIFY_SLEEP:
 		an->sleeping = true;
@@ -1880,7 +1837,7 @@
 	qi.tqi_cwmax = params->cw_max;
 	qi.tqi_burstTime = params->txop;
 
-	ath_dbg(common, ATH_DBG_CONFIG,
+	ath_dbg(common, CONFIG,
 		"Configure tx [queue/halq] [%d/%d], aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n",
 		queue, txq->axq_qnum, params->aifs, params->cw_min,
 		params->cw_max, params->txop);
@@ -1912,7 +1869,8 @@
 	if (ath9k_modparam_nohwcrypt)
 		return -ENOSPC;
 
-	if (vif->type == NL80211_IFTYPE_ADHOC &&
+	if ((vif->type == NL80211_IFTYPE_ADHOC ||
+	     vif->type == NL80211_IFTYPE_MESH_POINT) &&
 	    (key->cipher == WLAN_CIPHER_SUITE_TKIP ||
 	     key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
 	    !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
@@ -1928,7 +1886,7 @@
 
 	mutex_lock(&sc->mutex);
 	ath9k_ps_wakeup(sc);
-	ath_dbg(common, ATH_DBG_CONFIG, "Set HW Key\n");
+	ath_dbg(common, CONFIG, "Set HW Key\n");
 
 	switch (cmd) {
 	case SET_KEY:
@@ -1980,9 +1938,8 @@
 		memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
 		common->curaid = bss_conf->aid;
 		ath9k_hw_write_associd(sc->sc_ah);
-		ath_dbg(common, ATH_DBG_CONFIG,
-				"Bss Info ASSOC %d, bssid: %pM\n",
-				bss_conf->aid, common->curbssid);
+		ath_dbg(common, CONFIG, "Bss Info ASSOC %d, bssid: %pM\n",
+			bss_conf->aid, common->curbssid);
 		ath_beacon_config(sc, vif);
 		/*
 		 * Request a re-configuration of Beacon related timers
@@ -2013,8 +1970,7 @@
 
 	/* Reconfigure bss info */
 	if (avp->primary_sta_vif && !bss_conf->assoc) {
-		ath_dbg(common, ATH_DBG_CONFIG,
-			"Bss Info DISASSOC %d, bssid %pM\n",
+		ath_dbg(common, CONFIG, "Bss Info DISASSOC %d, bssid %pM\n",
 			common->curaid, common->curbssid);
 		sc->sc_flags &= ~(SC_OP_PRIM_STA_VIF | SC_OP_BEACONS);
 		avp->primary_sta_vif = false;
@@ -2056,7 +2012,7 @@
 	if (changed & BSS_CHANGED_BSSID) {
 		ath9k_config_bss(sc, vif);
 
-		ath_dbg(common, ATH_DBG_CONFIG, "BSSID: %pM aid: 0x%x\n",
+		ath_dbg(common, CONFIG, "BSSID: %pM aid: 0x%x\n",
 			common->curbssid, common->curaid);
 	}
 
@@ -2134,7 +2090,7 @@
 	}
 
 	if (changed & BSS_CHANGED_ERP_PREAMBLE) {
-		ath_dbg(common, ATH_DBG_CONFIG, "BSS Changed PREAMBLE %d\n",
+		ath_dbg(common, CONFIG, "BSS Changed PREAMBLE %d\n",
 			bss_conf->use_short_preamble);
 		if (bss_conf->use_short_preamble)
 			sc->sc_flags |= SC_OP_PREAMBLE_SHORT;
@@ -2143,7 +2099,7 @@
 	}
 
 	if (changed & BSS_CHANGED_ERP_CTS_PROT) {
-		ath_dbg(common, ATH_DBG_CONFIG, "BSS Changed CTS PROT %d\n",
+		ath_dbg(common, CONFIG, "BSS Changed CTS PROT %d\n",
 			bss_conf->use_cts_prot);
 		if (bss_conf->use_cts_prot &&
 		    hw->conf.channel->band != IEEE80211_BAND_5GHZ)
@@ -2309,20 +2265,17 @@
 	cancel_delayed_work_sync(&sc->tx_complete_work);
 
 	if (ah->ah_flags & AH_UNPLUGGED) {
-		ath_dbg(common, ATH_DBG_ANY, "Device has been unplugged!\n");
+		ath_dbg(common, ANY, "Device has been unplugged!\n");
 		mutex_unlock(&sc->mutex);
 		return;
 	}
 
 	if (sc->sc_flags & SC_OP_INVALID) {
-		ath_dbg(common, ATH_DBG_ANY, "Device not present\n");
+		ath_dbg(common, ANY, "Device not present\n");
 		mutex_unlock(&sc->mutex);
 		return;
 	}
 
-	if (drop)
-		timeout = 1;
-
 	for (j = 0; j < timeout; j++) {
 		bool npend = false;
 
@@ -2340,21 +2293,22 @@
 		}
 
 		if (!npend)
-		    goto out;
+		    break;
 	}
 
-	ath9k_ps_wakeup(sc);
-	spin_lock_bh(&sc->sc_pcu_lock);
-	drain_txq = ath_drain_all_txq(sc, false);
-	spin_unlock_bh(&sc->sc_pcu_lock);
+	if (drop) {
+		ath9k_ps_wakeup(sc);
+		spin_lock_bh(&sc->sc_pcu_lock);
+		drain_txq = ath_drain_all_txq(sc, false);
+		spin_unlock_bh(&sc->sc_pcu_lock);
 
-	if (!drain_txq)
-		ath_reset(sc, false);
+		if (!drain_txq)
+			ath_reset(sc, false);
 
-	ath9k_ps_restore(sc);
-	ieee80211_wake_queues(hw);
+		ath9k_ps_restore(sc);
+		ieee80211_wake_queues(hw);
+	}
 
-out:
 	ieee80211_queue_delayed_work(hw, &sc->tx_complete_work, 0);
 	mutex_unlock(&sc->mutex);
 }
diff --git a/drivers/net/wireless/ath/ath9k/mci.c b/drivers/net/wireless/ath/ath9k/mci.c
new file mode 100644
index 0000000..05c23ea
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/mci.c
@@ -0,0 +1,668 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+
+#include "ath9k.h"
+#include "mci.h"
+
+static const u8 ath_mci_duty_cycle[] = { 0, 50, 60, 70, 80, 85, 90, 95, 98 };
+
+static struct ath_mci_profile_info*
+ath_mci_find_profile(struct ath_mci_profile *mci,
+		     struct ath_mci_profile_info *info)
+{
+	struct ath_mci_profile_info *entry;
+
+	list_for_each_entry(entry, &mci->info, list) {
+		if (entry->conn_handle == info->conn_handle)
+			break;
+	}
+	return entry;
+}
+
+static bool ath_mci_add_profile(struct ath_common *common,
+				struct ath_mci_profile *mci,
+				struct ath_mci_profile_info *info)
+{
+	struct ath_mci_profile_info *entry;
+
+	if ((mci->num_sco == ATH_MCI_MAX_SCO_PROFILE) &&
+	    (info->type == MCI_GPM_COEX_PROFILE_VOICE)) {
+		ath_dbg(common, MCI,
+			"Too many SCO profile, failed to add new profile\n");
+		return false;
+	}
+
+	if (((NUM_PROF(mci) - mci->num_sco) == ATH_MCI_MAX_ACL_PROFILE) &&
+	    (info->type != MCI_GPM_COEX_PROFILE_VOICE)) {
+		ath_dbg(common, MCI,
+			"Too many ACL profile, failed to add new profile\n");
+		return false;
+	}
+
+	entry = ath_mci_find_profile(mci, info);
+
+	if (entry)
+		memcpy(entry, info, 10);
+	else {
+		entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+		if (!entry)
+			return false;
+
+		memcpy(entry, info, 10);
+		INC_PROF(mci, info);
+		list_add_tail(&info->list, &mci->info);
+	}
+	return true;
+}
+
+static void ath_mci_del_profile(struct ath_common *common,
+				struct ath_mci_profile *mci,
+				struct ath_mci_profile_info *info)
+{
+	struct ath_mci_profile_info *entry;
+
+	entry = ath_mci_find_profile(mci, info);
+
+	if (!entry) {
+		ath_dbg(common, MCI, "Profile to be deleted not found\n");
+		return;
+	}
+	DEC_PROF(mci, entry);
+	list_del(&entry->list);
+	kfree(entry);
+}
+
+void ath_mci_flush_profile(struct ath_mci_profile *mci)
+{
+	struct ath_mci_profile_info *info, *tinfo;
+
+	list_for_each_entry_safe(info, tinfo, &mci->info, list) {
+		list_del(&info->list);
+		DEC_PROF(mci, info);
+		kfree(info);
+	}
+	mci->aggr_limit = 0;
+}
+
+static void ath_mci_adjust_aggr_limit(struct ath_btcoex *btcoex)
+{
+	struct ath_mci_profile *mci = &btcoex->mci;
+	u32 wlan_airtime = btcoex->btcoex_period *
+				(100 - btcoex->duty_cycle) / 100;
+
+	/*
+	 * Scale: wlan_airtime is in ms, aggr_limit is in 0.25 ms.
+	 * When wlan_airtime is less than 4ms, aggregation limit has to be
+	 * adjusted half of wlan_airtime to ensure that the aggregation can fit
+	 * without collision with BT traffic.
+	 */
+	if ((wlan_airtime <= 4) &&
+	    (!mci->aggr_limit || (mci->aggr_limit > (2 * wlan_airtime))))
+		mci->aggr_limit = 2 * wlan_airtime;
+}
+
+static void ath_mci_update_scheme(struct ath_softc *sc)
+{
+	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+	struct ath_btcoex *btcoex = &sc->btcoex;
+	struct ath_mci_profile *mci = &btcoex->mci;
+	struct ath_mci_profile_info *info;
+	u32 num_profile = NUM_PROF(mci);
+
+	if (num_profile == 1) {
+		info = list_first_entry(&mci->info,
+					struct ath_mci_profile_info,
+					list);
+		if (mci->num_sco && info->T == 12) {
+			mci->aggr_limit = 8;
+			ath_dbg(common, MCI,
+				"Single SCO, aggregation limit 2 ms\n");
+		} else if ((info->type == MCI_GPM_COEX_PROFILE_BNEP) &&
+			   !info->master) {
+			btcoex->btcoex_period = 60;
+			ath_dbg(common, MCI,
+				"Single slave PAN/FTP, bt period 60 ms\n");
+		} else if ((info->type == MCI_GPM_COEX_PROFILE_HID) &&
+			 (info->T > 0 && info->T < 50) &&
+			 (info->A > 1 || info->W > 1)) {
+			btcoex->duty_cycle = 30;
+			mci->aggr_limit = 8;
+			ath_dbg(common, MCI,
+				"Multiple attempt/timeout single HID "
+				"aggregation limit 2 ms dutycycle 30%%\n");
+		}
+	} else if ((num_profile == 2) && (mci->num_hid == 2)) {
+		btcoex->duty_cycle = 30;
+		mci->aggr_limit = 8;
+		ath_dbg(common, MCI,
+			"Two HIDs aggregation limit 2 ms dutycycle 30%%\n");
+	} else if (num_profile > 3) {
+		mci->aggr_limit = 6;
+		ath_dbg(common, MCI,
+			"Three or more profiles aggregation limit 1.5 ms\n");
+	}
+
+	if (IS_CHAN_2GHZ(sc->sc_ah->curchan)) {
+		if (IS_CHAN_HT(sc->sc_ah->curchan))
+			ath_mci_adjust_aggr_limit(btcoex);
+		else
+			btcoex->btcoex_period >>= 1;
+	}
+
+	ath9k_hw_btcoex_disable(sc->sc_ah);
+	ath9k_btcoex_timer_pause(sc);
+
+	if (IS_CHAN_5GHZ(sc->sc_ah->curchan))
+		return;
+
+	btcoex->duty_cycle += (mci->num_bdr ? ATH_MCI_MAX_DUTY_CYCLE : 0);
+	if (btcoex->duty_cycle > ATH_MCI_MAX_DUTY_CYCLE)
+		btcoex->duty_cycle = ATH_MCI_MAX_DUTY_CYCLE;
+
+	btcoex->btcoex_period *= 1000;
+	btcoex->btcoex_no_stomp =  btcoex->btcoex_period *
+					(100 - btcoex->duty_cycle) / 100;
+
+	ath9k_hw_btcoex_enable(sc->sc_ah);
+	ath9k_btcoex_timer_resume(sc);
+}
+
+
+static void ath_mci_cal_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
+{
+	struct ath_hw *ah = sc->sc_ah;
+	struct ath_common *common = ath9k_hw_common(ah);
+	u32 payload[4] = {0, 0, 0, 0};
+
+	switch (opcode) {
+	case MCI_GPM_BT_CAL_REQ:
+
+		ath_dbg(common, MCI, "MCI received BT_CAL_REQ\n");
+
+		if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_AWAKE) {
+			ar9003_mci_state(ah, MCI_STATE_SET_BT_CAL_START, NULL);
+			ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
+		} else
+			ath_dbg(common, MCI, "MCI State mismatches: %d\n",
+				ar9003_mci_state(ah, MCI_STATE_BT, NULL));
+
+		break;
+
+	case MCI_GPM_BT_CAL_DONE:
+
+		ath_dbg(common, MCI, "MCI received BT_CAL_DONE\n");
+
+		if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_CAL)
+			ath_dbg(common, MCI, "MCI error illegal!\n");
+		else
+			ath_dbg(common, MCI, "MCI BT not in CAL state\n");
+
+		break;
+
+	case MCI_GPM_BT_CAL_GRANT:
+
+		ath_dbg(common, MCI, "MCI received BT_CAL_GRANT\n");
+
+		/* Send WLAN_CAL_DONE for now */
+		ath_dbg(common, MCI, "MCI send WLAN_CAL_DONE\n");
+		MCI_GPM_SET_CAL_TYPE(payload, MCI_GPM_WLAN_CAL_DONE);
+		ar9003_mci_send_message(sc->sc_ah, MCI_GPM, 0, payload,
+					16, false, true);
+		break;
+
+	default:
+		ath_dbg(common, MCI, "MCI Unknown GPM CAL message\n");
+		break;
+	}
+}
+
+static void ath_mci_process_profile(struct ath_softc *sc,
+				    struct ath_mci_profile_info *info)
+{
+	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+	struct ath_btcoex *btcoex = &sc->btcoex;
+	struct ath_mci_profile *mci = &btcoex->mci;
+
+	if (info->start) {
+		if (!ath_mci_add_profile(common, mci, info))
+			return;
+	} else
+		ath_mci_del_profile(common, mci, info);
+
+	btcoex->btcoex_period = ATH_MCI_DEF_BT_PERIOD;
+	mci->aggr_limit = mci->num_sco ? 6 : 0;
+	if (NUM_PROF(mci)) {
+		btcoex->bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
+		btcoex->duty_cycle = ath_mci_duty_cycle[NUM_PROF(mci)];
+	} else {
+		btcoex->bt_stomp_type = mci->num_mgmt ? ATH_BTCOEX_STOMP_ALL :
+							ATH_BTCOEX_STOMP_LOW;
+		btcoex->duty_cycle = ATH_BTCOEX_DEF_DUTY_CYCLE;
+	}
+
+	ath_mci_update_scheme(sc);
+}
+
+static void ath_mci_process_status(struct ath_softc *sc,
+				   struct ath_mci_profile_status *status)
+{
+	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+	struct ath_btcoex *btcoex = &sc->btcoex;
+	struct ath_mci_profile *mci = &btcoex->mci;
+	struct ath_mci_profile_info info;
+	int i = 0, old_num_mgmt = mci->num_mgmt;
+
+	/* Link status type are not handled */
+	if (status->is_link) {
+		ath_dbg(common, MCI, "Skip link type status update\n");
+		return;
+	}
+
+	memset(&info, 0, sizeof(struct ath_mci_profile_info));
+
+	info.conn_handle = status->conn_handle;
+	if (ath_mci_find_profile(mci, &info)) {
+		ath_dbg(common, MCI,
+			"Skip non link state update for existing profile %d\n",
+			status->conn_handle);
+		return;
+	}
+	if (status->conn_handle >= ATH_MCI_MAX_PROFILE) {
+		ath_dbg(common, MCI, "Ignore too many non-link update\n");
+		return;
+	}
+	if (status->is_critical)
+		__set_bit(status->conn_handle, mci->status);
+	else
+		__clear_bit(status->conn_handle, mci->status);
+
+	mci->num_mgmt = 0;
+	do {
+		if (test_bit(i, mci->status))
+			mci->num_mgmt++;
+	} while (++i < ATH_MCI_MAX_PROFILE);
+
+	if (old_num_mgmt != mci->num_mgmt)
+		ath_mci_update_scheme(sc);
+}
+
+static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
+{
+	struct ath_hw *ah = sc->sc_ah;
+	struct ath_mci_profile_info profile_info;
+	struct ath_mci_profile_status profile_status;
+	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+	u32 version;
+	u8 major;
+	u8 minor;
+	u32 seq_num;
+
+	switch (opcode) {
+
+	case MCI_GPM_COEX_VERSION_QUERY:
+		ath_dbg(common, MCI, "MCI Recv GPM COEX Version Query\n");
+		version = ar9003_mci_state(ah,
+				MCI_STATE_SEND_WLAN_COEX_VERSION, NULL);
+		break;
+
+	case MCI_GPM_COEX_VERSION_RESPONSE:
+		ath_dbg(common, MCI, "MCI Recv GPM COEX Version Response\n");
+		major = *(rx_payload + MCI_GPM_COEX_B_MAJOR_VERSION);
+		minor = *(rx_payload + MCI_GPM_COEX_B_MINOR_VERSION);
+		ath_dbg(common, MCI, "MCI BT Coex version: %d.%d\n",
+			major, minor);
+		version = (major << 8) + minor;
+		version = ar9003_mci_state(ah,
+			  MCI_STATE_SET_BT_COEX_VERSION, &version);
+		break;
+
+	case MCI_GPM_COEX_STATUS_QUERY:
+		ath_dbg(common, MCI,
+			"MCI Recv GPM COEX Status Query = 0x%02x\n",
+			*(rx_payload + MCI_GPM_COEX_B_WLAN_BITMAP));
+		ar9003_mci_state(ah,
+		MCI_STATE_SEND_WLAN_CHANNELS, NULL);
+		break;
+
+	case MCI_GPM_COEX_BT_PROFILE_INFO:
+		ath_dbg(common, MCI, "MCI Recv GPM Coex BT profile info\n");
+		memcpy(&profile_info,
+		       (rx_payload + MCI_GPM_COEX_B_PROFILE_TYPE), 10);
+
+		if ((profile_info.type == MCI_GPM_COEX_PROFILE_UNKNOWN)
+		    || (profile_info.type >=
+					    MCI_GPM_COEX_PROFILE_MAX)) {
+
+			ath_dbg(common, MCI,
+				"illegal profile type = %d, state = %d\n",
+				profile_info.type,
+				profile_info.start);
+			break;
+		}
+
+		ath_mci_process_profile(sc, &profile_info);
+		break;
+
+	case MCI_GPM_COEX_BT_STATUS_UPDATE:
+		profile_status.is_link = *(rx_payload +
+					   MCI_GPM_COEX_B_STATUS_TYPE);
+		profile_status.conn_handle = *(rx_payload +
+					       MCI_GPM_COEX_B_STATUS_LINKID);
+		profile_status.is_critical = *(rx_payload +
+					       MCI_GPM_COEX_B_STATUS_STATE);
+
+		seq_num = *((u32 *)(rx_payload + 12));
+		ath_dbg(common, MCI,
+			"MCI Recv GPM COEX BT_Status_Update: is_link=%d, linkId=%d, state=%d, SEQ=%d\n",
+			profile_status.is_link, profile_status.conn_handle,
+			profile_status.is_critical, seq_num);
+
+		ath_mci_process_status(sc, &profile_status);
+		break;
+
+	default:
+		ath_dbg(common, MCI, "MCI Unknown GPM COEX message = 0x%02x\n",
+			opcode);
+		break;
+	}
+}
+
+static int ath_mci_buf_alloc(struct ath_softc *sc, struct ath_mci_buf *buf)
+{
+	int error = 0;
+
+	buf->bf_addr = dma_alloc_coherent(sc->dev, buf->bf_len,
+					  &buf->bf_paddr, GFP_KERNEL);
+
+	if (buf->bf_addr == NULL) {
+		error = -ENOMEM;
+		goto fail;
+	}
+
+	return 0;
+
+fail:
+	memset(buf, 0, sizeof(*buf));
+	return error;
+}
+
+static void ath_mci_buf_free(struct ath_softc *sc, struct ath_mci_buf *buf)
+{
+	if (buf->bf_addr) {
+		dma_free_coherent(sc->dev, buf->bf_len, buf->bf_addr,
+							buf->bf_paddr);
+		memset(buf, 0, sizeof(*buf));
+	}
+}
+
+int ath_mci_setup(struct ath_softc *sc)
+{
+	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+	struct ath_mci_coex *mci = &sc->mci_coex;
+	int error = 0;
+
+	if (!ATH9K_HW_CAP_MCI)
+		return 0;
+
+	mci->sched_buf.bf_len = ATH_MCI_SCHED_BUF_SIZE + ATH_MCI_GPM_BUF_SIZE;
+
+	if (ath_mci_buf_alloc(sc, &mci->sched_buf)) {
+		ath_dbg(common, FATAL, "MCI buffer alloc failed\n");
+		error = -ENOMEM;
+		goto fail;
+	}
+
+	mci->sched_buf.bf_len = ATH_MCI_SCHED_BUF_SIZE;
+
+	memset(mci->sched_buf.bf_addr, MCI_GPM_RSVD_PATTERN,
+						mci->sched_buf.bf_len);
+
+	mci->gpm_buf.bf_len = ATH_MCI_GPM_BUF_SIZE;
+	mci->gpm_buf.bf_addr = (u8 *)mci->sched_buf.bf_addr +
+							mci->sched_buf.bf_len;
+	mci->gpm_buf.bf_paddr = mci->sched_buf.bf_paddr + mci->sched_buf.bf_len;
+
+	/* initialize the buffer */
+	memset(mci->gpm_buf.bf_addr, MCI_GPM_RSVD_PATTERN, mci->gpm_buf.bf_len);
+
+	ar9003_mci_setup(sc->sc_ah, mci->gpm_buf.bf_paddr,
+			 mci->gpm_buf.bf_addr, (mci->gpm_buf.bf_len >> 4),
+			 mci->sched_buf.bf_paddr);
+fail:
+	return error;
+}
+
+void ath_mci_cleanup(struct ath_softc *sc)
+{
+	struct ath_hw *ah = sc->sc_ah;
+	struct ath_mci_coex *mci = &sc->mci_coex;
+
+	if (!ATH9K_HW_CAP_MCI)
+		return;
+
+	/*
+	 * both schedule and gpm buffers will be released
+	 */
+	ath_mci_buf_free(sc, &mci->sched_buf);
+	ar9003_mci_cleanup(ah);
+}
+
+void ath_mci_intr(struct ath_softc *sc)
+{
+	struct ath_mci_coex *mci = &sc->mci_coex;
+	struct ath_hw *ah = sc->sc_ah;
+	struct ath_common *common = ath9k_hw_common(ah);
+	u32 mci_int, mci_int_rxmsg;
+	u32 offset, subtype, opcode;
+	u32 *pgpm;
+	u32 more_data = MCI_GPM_MORE;
+	bool skip_gpm = false;
+
+	if (!ATH9K_HW_CAP_MCI)
+		return;
+
+	ar9003_mci_get_interrupt(sc->sc_ah, &mci_int, &mci_int_rxmsg);
+
+	if (ar9003_mci_state(ah, MCI_STATE_ENABLE, NULL) == 0) {
+
+		ar9003_mci_state(sc->sc_ah, MCI_STATE_INIT_GPM_OFFSET, NULL);
+		ath_dbg(common, MCI, "MCI interrupt but MCI disabled\n");
+
+		ath_dbg(common, MCI,
+			"MCI interrupt: intr = 0x%x, intr_rxmsg = 0x%x\n",
+			mci_int, mci_int_rxmsg);
+		return;
+	}
+
+	if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE) {
+		u32 payload[4] = { 0xffffffff, 0xffffffff,
+				   0xffffffff, 0xffffff00};
+
+		/*
+		 * The following REMOTE_RESET and SYS_WAKING used to sent
+		 * only when BT wake up. Now they are always sent, as a
+		 * recovery method to reset BT MCI's RX alignment.
+		 */
+		ath_dbg(common, MCI, "MCI interrupt send REMOTE_RESET\n");
+
+		ar9003_mci_send_message(ah, MCI_REMOTE_RESET, 0,
+					payload, 16, true, false);
+		ath_dbg(common, MCI, "MCI interrupt send SYS_WAKING\n");
+		ar9003_mci_send_message(ah, MCI_SYS_WAKING, 0,
+					NULL, 0, true, false);
+
+		mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE;
+		ar9003_mci_state(ah, MCI_STATE_RESET_REQ_WAKE, NULL);
+
+		/*
+		 * always do this for recovery and 2G/5G toggling and LNA_TRANS
+		 */
+		ath_dbg(common, MCI, "MCI Set BT state to AWAKE\n");
+		ar9003_mci_state(ah, MCI_STATE_SET_BT_AWAKE, NULL);
+	}
+
+	/* Processing SYS_WAKING/SYS_SLEEPING */
+	if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING) {
+		mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING;
+
+		if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_SLEEP) {
+
+			if (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP, NULL)
+					== MCI_BT_SLEEP)
+				ath_dbg(common, MCI,
+					"MCI BT stays in sleep mode\n");
+			else {
+				ath_dbg(common, MCI,
+					"MCI Set BT state to AWAKE\n");
+				ar9003_mci_state(ah,
+						 MCI_STATE_SET_BT_AWAKE, NULL);
+			}
+		} else
+			ath_dbg(common, MCI, "MCI BT stays in AWAKE mode\n");
+	}
+
+	if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING) {
+
+		mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING;
+
+		if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_AWAKE) {
+
+			if (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP, NULL)
+					== MCI_BT_AWAKE)
+				ath_dbg(common, MCI,
+					"MCI BT stays in AWAKE mode\n");
+			else {
+				ath_dbg(common, MCI,
+					"MCI SetBT state to SLEEP\n");
+				ar9003_mci_state(ah, MCI_STATE_SET_BT_SLEEP,
+						 NULL);
+			}
+		} else
+			ath_dbg(common, MCI, "MCI BT stays in SLEEP mode\n");
+	}
+
+	if ((mci_int & AR_MCI_INTERRUPT_RX_INVALID_HDR) ||
+	    (mci_int & AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT)) {
+
+		ath_dbg(common, MCI, "MCI RX broken, skip GPM msgs\n");
+		ar9003_mci_state(ah, MCI_STATE_RECOVER_RX, NULL);
+		skip_gpm = true;
+	}
+
+	if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO) {
+
+		mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO;
+		offset = ar9003_mci_state(ah, MCI_STATE_LAST_SCHD_MSG_OFFSET,
+					  NULL);
+	}
+
+	if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_GPM) {
+
+		mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_GPM;
+
+		while (more_data == MCI_GPM_MORE) {
+
+			pgpm = mci->gpm_buf.bf_addr;
+			offset = ar9003_mci_state(ah,
+					MCI_STATE_NEXT_GPM_OFFSET, &more_data);
+
+			if (offset == MCI_GPM_INVALID)
+				break;
+
+			pgpm += (offset >> 2);
+
+			/*
+			 * The first dword is timer.
+			 * The real data starts from 2nd dword.
+			 */
+
+			subtype = MCI_GPM_TYPE(pgpm);
+			opcode = MCI_GPM_OPCODE(pgpm);
+
+			if (!skip_gpm) {
+
+				if (MCI_GPM_IS_CAL_TYPE(subtype))
+					ath_mci_cal_msg(sc, subtype,
+							(u8 *) pgpm);
+				else {
+					switch (subtype) {
+					case MCI_GPM_COEX_AGENT:
+						ath_mci_msg(sc, opcode,
+							    (u8 *) pgpm);
+						break;
+					default:
+						break;
+					}
+				}
+			}
+			MCI_GPM_RECYCLE(pgpm);
+		}
+	}
+
+	if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_HW_MSG_MASK) {
+
+		if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL)
+			mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL;
+
+		if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_LNA_INFO) {
+			mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_LNA_INFO;
+			ath_dbg(common, MCI, "MCI LNA_INFO\n");
+		}
+
+		if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_INFO) {
+
+			int value_dbm = ar9003_mci_state(ah,
+					MCI_STATE_CONT_RSSI_POWER, NULL);
+
+			mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_INFO;
+
+			if (ar9003_mci_state(ah, MCI_STATE_CONT_TXRX, NULL))
+				ath_dbg(common, MCI,
+					"MCI CONT_INFO: (tx) pri = %d, pwr = %d dBm\n",
+					ar9003_mci_state(ah,
+						MCI_STATE_CONT_PRIORITY, NULL),
+					value_dbm);
+			else
+				ath_dbg(common, MCI,
+					"MCI CONT_INFO: (rx) pri = %d,pwr = %d dBm\n",
+					ar9003_mci_state(ah,
+						MCI_STATE_CONT_PRIORITY, NULL),
+					value_dbm);
+		}
+
+		if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_NACK) {
+			mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_NACK;
+			ath_dbg(common, MCI, "MCI CONT_NACK\n");
+		}
+
+		if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_RST) {
+			mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_RST;
+			ath_dbg(common, MCI, "MCI CONT_RST\n");
+		}
+	}
+
+	if ((mci_int & AR_MCI_INTERRUPT_RX_INVALID_HDR) ||
+	    (mci_int & AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT))
+		mci_int &= ~(AR_MCI_INTERRUPT_RX_INVALID_HDR |
+			     AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT);
+
+	if (mci_int_rxmsg & 0xfffffffe)
+		ath_dbg(common, MCI, "MCI not processed mci_int_rxmsg = 0x%x\n",
+			mci_int_rxmsg);
+}
diff --git a/drivers/net/wireless/ath/ath9k/mci.h b/drivers/net/wireless/ath/ath9k/mci.h
new file mode 100644
index 0000000..29e3e51
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/mci.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef MCI_H
+#define MCI_H
+
+#define ATH_MCI_SCHED_BUF_SIZE		(16 * 16) /* 16 entries, 4 dword each */
+#define ATH_MCI_GPM_MAX_ENTRY		16
+#define ATH_MCI_GPM_BUF_SIZE		(ATH_MCI_GPM_MAX_ENTRY * 16)
+#define ATH_MCI_DEF_BT_PERIOD		40
+#define ATH_MCI_BDR_DUTY_CYCLE		20
+#define ATH_MCI_MAX_DUTY_CYCLE		90
+
+#define ATH_MCI_DEF_AGGR_LIMIT		6 /* in 0.24 ms */
+#define ATH_MCI_MAX_ACL_PROFILE		7
+#define ATH_MCI_MAX_SCO_PROFILE		1
+#define ATH_MCI_MAX_PROFILE		(ATH_MCI_MAX_ACL_PROFILE +\
+					 ATH_MCI_MAX_SCO_PROFILE)
+
+#define INC_PROF(_mci, _info) do {		 \
+		switch (_info->type) {		 \
+		case MCI_GPM_COEX_PROFILE_RFCOMM:\
+			_mci->num_other_acl++;	 \
+			break;			 \
+		case MCI_GPM_COEX_PROFILE_A2DP:	 \
+			_mci->num_a2dp++;	 \
+			if (!_info->edr)	 \
+				_mci->num_bdr++; \
+			break;			 \
+		case MCI_GPM_COEX_PROFILE_HID:	 \
+			_mci->num_hid++;	 \
+			break;			 \
+		case MCI_GPM_COEX_PROFILE_BNEP:	 \
+			_mci->num_pan++;	 \
+			break;			 \
+		case MCI_GPM_COEX_PROFILE_VOICE: \
+			_mci->num_sco++;	 \
+			break;			 \
+		default:			 \
+			break;			 \
+		}				 \
+	} while (0)
+
+#define DEC_PROF(_mci, _info) do {		 \
+		switch (_info->type) {		 \
+		case MCI_GPM_COEX_PROFILE_RFCOMM:\
+			_mci->num_other_acl--;	 \
+			break;			 \
+		case MCI_GPM_COEX_PROFILE_A2DP:	 \
+			_mci->num_a2dp--;	 \
+			if (!_info->edr)	 \
+				_mci->num_bdr--; \
+			break;			 \
+		case MCI_GPM_COEX_PROFILE_HID:	 \
+			_mci->num_hid--;	 \
+			break;			 \
+		case MCI_GPM_COEX_PROFILE_BNEP:	 \
+			_mci->num_pan--;	 \
+			break;			 \
+		case MCI_GPM_COEX_PROFILE_VOICE: \
+			_mci->num_sco--;	 \
+			break;			 \
+		default:			 \
+			break;			 \
+		}				 \
+	} while (0)
+
+#define NUM_PROF(_mci)	(_mci->num_other_acl + _mci->num_a2dp + \
+			 _mci->num_hid + _mci->num_pan + _mci->num_sco)
+
+struct ath_mci_profile_info {
+	u8 type;
+	u8 conn_handle;
+	bool start;
+	bool master;
+	bool edr;
+	u8 voice_type;
+	u16 T;		/* Voice: Tvoice, HID: Tsniff,        in slots */
+	u8 W;		/* Voice: Wvoice, HID: Sniff timeout, in slots */
+	u8 A;		/*		  HID: Sniff attempt, in slots */
+	struct list_head list;
+};
+
+struct ath_mci_profile_status {
+	bool is_critical;
+	bool is_link;
+	u8 conn_handle;
+};
+
+struct ath_mci_profile {
+	struct list_head info;
+	DECLARE_BITMAP(status, ATH_MCI_MAX_PROFILE);
+	u16 aggr_limit;
+	u8 num_mgmt;
+	u8 num_sco;
+	u8 num_a2dp;
+	u8 num_hid;
+	u8 num_pan;
+	u8 num_other_acl;
+	u8 num_bdr;
+};
+
+
+struct ath_mci_buf {
+	void *bf_addr;		/* virtual addr of desc */
+	dma_addr_t bf_paddr;    /* physical addr of buffer */
+	u32 bf_len;		/* len of data */
+};
+
+struct ath_mci_coex {
+	atomic_t mci_cal_flag;
+	struct ath_mci_buf sched_buf;
+	struct ath_mci_buf gpm_buf;
+	u32 bt_cal_start;
+};
+
+void ath_mci_flush_profile(struct ath_mci_profile *mci);
+int ath_mci_setup(struct ath_softc *sc);
+void ath_mci_cleanup(struct ath_softc *sc);
+void ath_mci_intr(struct ath_softc *sc);
+#endif
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 2dcdf63..77dc327 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -121,7 +121,7 @@
 	if (!parent)
 		return;
 
-	if (ah->btcoex_hw.scheme != ATH_BTCOEX_CFG_NONE) {
+	if (ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) {
 		/* Bluetooth coexistance requires disabling ASPM. */
 		pci_read_config_byte(pdev, pos + PCI_EXP_LNKCTL, &aspm);
 		aspm &= ~(PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
@@ -307,12 +307,11 @@
 	struct ieee80211_hw *hw = pci_get_drvdata(pdev);
 	struct ath_softc *sc = hw->priv;
 
-	ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
-
 	/* The device has to be moved to FULLSLEEP forcibly.
 	 * Otherwise the chip never moved to full sleep,
 	 * when no interface is up.
 	 */
+	ath9k_hw_disable(sc->sc_ah);
 	ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP);
 
 	return 0;
@@ -321,8 +320,6 @@
 static int ath_pci_resume(struct device *device)
 {
 	struct pci_dev *pdev = to_pci_dev(device);
-	struct ieee80211_hw *hw = pci_get_drvdata(pdev);
-	struct ath_softc *sc = hw->priv;
 	u32 val;
 
 	/*
@@ -334,22 +331,6 @@
 	if ((val & 0x0000ff00) != 0)
 		pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
 
-	ath9k_ps_wakeup(sc);
-	/* Enable LED */
-	ath9k_hw_cfg_output(sc->sc_ah, sc->sc_ah->led_pin,
-			    AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
-	ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0);
-
-	  /*
-	   * Reset key cache to sane defaults (all entries cleared) instead of
-	   * semi-random values after suspend/resume.
-	   */
-	ath9k_cmn_init_crypto(sc->sc_ah);
-	ath9k_ps_restore(sc);
-
-	sc->ps_idle = true;
-	ath_radio_disable(sc, hw);
-
 	return 0;
 }
 
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index 888abc2..b3c3798 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -1199,7 +1199,7 @@
 			return &ar5416_11na_ratetable;
 		return &ar5416_11a_ratetable;
 	default:
-		ath_dbg(common, ATH_DBG_CONFIG, "Invalid band\n");
+		ath_dbg(common, CONFIG, "Invalid band\n");
 		return NULL;
 	}
 }
@@ -1271,11 +1271,12 @@
 
 	ath_rc_priv->max_valid_rate = k;
 	ath_rc_sort_validrates(rate_table, ath_rc_priv);
-	ath_rc_priv->rate_max_phy = ath_rc_priv->valid_rate_index[k-4];
+	ath_rc_priv->rate_max_phy = (k > 4) ?
+					ath_rc_priv->valid_rate_index[k-4] :
+					ath_rc_priv->valid_rate_index[k-1];
 	ath_rc_priv->rate_table = rate_table;
 
-	ath_dbg(common, ATH_DBG_CONFIG,
-		"RC Initialized with capabilities: 0x%x\n",
+	ath_dbg(common, CONFIG, "RC Initialized with capabilities: 0x%x\n",
 		ath_rc_priv->ht_cap);
 }
 
@@ -1472,7 +1473,7 @@
 						   oper_cw40, oper_sgi);
 			ath_rc_init(sc, priv_sta, sband, sta, rate_table);
 
-			ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_CONFIG,
+			ath_dbg(ath9k_hw_common(sc->sc_ah), CONFIG,
 				"Operating HT Bandwidth changed to: %d\n",
 				sc->hw->conf.channel_type);
 		}
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 67b862c..0e666fb 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -172,7 +172,7 @@
 	u32 nbuf = 0;
 
 	if (list_empty(&sc->rx.rxbuf)) {
-		ath_dbg(common, ATH_DBG_QUEUE, "No free rx buf available\n");
+		ath_dbg(common, QUEUE, "No free rx buf available\n");
 		return;
 	}
 
@@ -337,7 +337,7 @@
 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
 		return ath_rx_edma_init(sc, nbufs);
 	} else {
-		ath_dbg(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n",
+		ath_dbg(common, CONFIG, "cachelsz %u rxbufsize %u\n",
 			common->cachelsz, common->rx_bufsize);
 
 		/* Initialize rx descriptors */
@@ -475,7 +475,6 @@
 
 	return rfilt;
 
-#undef RX_FILTER_PRESERVE
 }
 
 int ath_startrecv(struct ath_softc *sc)
@@ -592,7 +591,7 @@
 
 	if (sc->ps_flags & PS_BEACON_SYNC) {
 		sc->ps_flags &= ~PS_BEACON_SYNC;
-		ath_dbg(common, ATH_DBG_PS,
+		ath_dbg(common, PS,
 			"Reconfigure Beacon timers based on timestamp from the AP\n");
 		ath_set_beacon(sc);
 	}
@@ -605,7 +604,7 @@
 		 * a backup trigger for returning into NETWORK SLEEP state,
 		 * so we are waiting for it as well.
 		 */
-		ath_dbg(common, ATH_DBG_PS,
+		ath_dbg(common, PS,
 			"Received DTIM beacon indicating buffered broadcast/multicast frame(s)\n");
 		sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON;
 		return;
@@ -618,8 +617,7 @@
 		 * been delivered.
 		 */
 		sc->ps_flags &= ~PS_WAIT_FOR_CAB;
-		ath_dbg(common, ATH_DBG_PS,
-			"PS wait for CAB frames timed out\n");
+		ath_dbg(common, PS, "PS wait for CAB frames timed out\n");
 	}
 }
 
@@ -644,13 +642,13 @@
 		 * point.
 		 */
 		sc->ps_flags &= ~(PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON);
-		ath_dbg(common, ATH_DBG_PS,
+		ath_dbg(common, PS,
 			"All PS CAB frames received, back to sleep\n");
 	} else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) &&
 		   !is_multicast_ether_addr(hdr->addr1) &&
 		   !ieee80211_has_morefrags(hdr->frame_control)) {
 		sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA;
-		ath_dbg(common, ATH_DBG_PS,
+		ath_dbg(common, PS,
 			"Going back to sleep after having received PS-Poll data (0x%lx)\n",
 			sc->ps_flags & (PS_WAIT_FOR_BEACON |
 					PS_WAIT_FOR_CAB |
@@ -933,7 +931,7 @@
 	 * No valid hardware bitrate found -- we should not get here
 	 * because hardware has already validated this frame as OK.
 	 */
-	ath_dbg(common, ATH_DBG_ANY,
+	ath_dbg(common, ANY,
 		"unsupported hw bitrate detected 0x%02x using 1 Mbit\n",
 		rx_stats->rs_rate);
 
@@ -1824,6 +1822,7 @@
 		hdr = (struct ieee80211_hdr *) (hdr_skb->data + rx_status_len);
 		rxs = IEEE80211_SKB_RXCB(hdr_skb);
 		if (ieee80211_is_beacon(hdr->frame_control) &&
+		    !is_zero_ether_addr(common->curbssid) &&
 		    !compare_ether_addr(hdr->addr3, common->curbssid))
 			rs.is_mybeacon = true;
 		else
@@ -1838,11 +1837,6 @@
 		if (sc->sc_flags & SC_OP_RXFLUSH)
 			goto requeue_drop_frag;
 
-		retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs,
-						 rxs, &decrypt_error);
-		if (retval)
-			goto requeue_drop_frag;
-
 		rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp;
 		if (rs.rs_tstamp > tsf_lower &&
 		    unlikely(rs.rs_tstamp - tsf_lower > 0x10000000))
@@ -1852,6 +1846,11 @@
 		    unlikely(tsf_lower - rs.rs_tstamp > 0x10000000))
 			rxs->mactime += 0x100000000ULL;
 
+		retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs,
+						 rxs, &decrypt_error);
+		if (retval)
+			goto requeue_drop_frag;
+
 		/* Ensure we always have an skb to requeue once we are done
 		 * processing the current buffer's skb */
 		requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC);
@@ -1923,15 +1922,20 @@
 			skb = hdr_skb;
 		}
 
-		/*
-		 * change the default rx antenna if rx diversity chooses the
-		 * other antenna 3 times in a row.
-		 */
-		if (sc->rx.defant != rs.rs_antenna) {
-			if (++sc->rx.rxotherant >= 3)
-				ath_setdefantenna(sc, rs.rs_antenna);
-		} else {
-			sc->rx.rxotherant = 0;
+
+		if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) {
+
+			/*
+			 * change the default rx antenna if rx diversity
+			 * chooses the other antenna 3 times in a row.
+			 */
+			if (sc->rx.defant != rs.rs_antenna) {
+				if (++sc->rx.rxotherant >= 3)
+					ath_setdefantenna(sc, rs.rs_antenna);
+			} else {
+				sc->rx.rxotherant = 0;
+			}
+
 		}
 
 		if (rxs->flag & RX_FLAG_MMIC_STRIPPED)
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 8fcb7e9..6e2f188 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -1006,6 +1006,8 @@
 #define AR_INTR_ASYNC_MASK                       (AR_SREV_9340(ah) ? 0x4018 : 0x4030)
 #define AR_INTR_ASYNC_MASK_GPIO                  0xFFFC0000
 #define AR_INTR_ASYNC_MASK_GPIO_S                18
+#define AR_INTR_ASYNC_MASK_MCI                   0x00000080
+#define AR_INTR_ASYNC_MASK_MCI_S                 7
 
 #define AR_INTR_SYNC_MASK                        (AR_SREV_9340(ah) ? 0x401c : 0x4034)
 #define AR_INTR_SYNC_MASK_GPIO                   0xFFFC0000
@@ -1013,6 +1015,14 @@
 
 #define AR_INTR_ASYNC_CAUSE_CLR                  (AR_SREV_9340(ah) ? 0x4020 : 0x4038)
 #define AR_INTR_ASYNC_CAUSE                      (AR_SREV_9340(ah) ? 0x4020 : 0x4038)
+#define AR_INTR_ASYNC_CAUSE_MCI			 0x00000080
+#define AR_INTR_ASYNC_USED			 (AR_INTR_MAC_IRQ | \
+						  AR_INTR_ASYNC_CAUSE_MCI)
+
+/* Asynchronous Interrupt Enable Register */
+#define AR_INTR_ASYNC_ENABLE_MCI         0x00000080
+#define AR_INTR_ASYNC_ENABLE_MCI_S       7
+
 
 #define AR_INTR_ASYNC_ENABLE                     (AR_SREV_9340(ah) ? 0x4024 : 0x403c)
 #define AR_INTR_ASYNC_ENABLE_GPIO                0xFFFC0000
@@ -1269,6 +1279,8 @@
 #define AR_RTC_INTR_MASK \
 	((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0058) : 0x7058)
 
+#define AR_RTC_KEEP_AWAKE	0x7034
+
 /* RTC_DERIVED_* - only for AR9100 */
 
 #define AR_RTC_DERIVED_CLK \
@@ -1555,6 +1567,8 @@
 #define AR_DIAG_FRAME_NV0           0x00020000
 #define AR_DIAG_OBS_PT_SEL1         0x000C0000
 #define AR_DIAG_OBS_PT_SEL1_S       18
+#define AR_DIAG_OBS_PT_SEL2         0x08000000
+#define AR_DIAG_OBS_PT_SEL2_S       27
 #define AR_DIAG_FORCE_RX_CLEAR      0x00100000 /* force rx_clear high */
 #define AR_DIAG_IGNORE_VIRT_CS      0x00200000
 #define AR_DIAG_FORCE_CH_IDLE_HIGH  0x00400000
@@ -1752,19 +1766,10 @@
 
 #define AR_BT_COEX_WL_WEIGHTS0     0x8174
 #define AR_BT_COEX_WL_WEIGHTS1     0x81c4
+#define AR_MCI_COEX_WL_WEIGHTS(_i) (0x18b0 + (_i << 2))
+#define AR_BT_COEX_BT_WEIGHTS(_i)  (0x83ac + (_i << 2))
 
-#define AR_BT_COEX_BT_WEIGHTS0     0x83ac
-#define AR_BT_COEX_BT_WEIGHTS1     0x83b0
-#define AR_BT_COEX_BT_WEIGHTS2     0x83b4
-#define AR_BT_COEX_BT_WEIGHTS3     0x83b8
-
-#define AR9300_BT_WGHT                     0xcccc4444
-#define AR9300_STOMP_ALL_WLAN_WGHT0        0xfffffff0
-#define AR9300_STOMP_ALL_WLAN_WGHT1        0xfffffff0
-#define AR9300_STOMP_LOW_WLAN_WGHT0        0x88888880
-#define AR9300_STOMP_LOW_WLAN_WGHT1        0x88888880
-#define AR9300_STOMP_NONE_WLAN_WGHT0       0x00000000
-#define AR9300_STOMP_NONE_WLAN_WGHT1       0x00000000
+#define AR9300_BT_WGHT             0xcccc4444
 
 #define AR_BT_COEX_MODE2           0x817c
 #define AR_BT_BCN_MISS_THRESH      0x000000ff
@@ -1938,37 +1943,277 @@
 #define AR_PHY_AGC_CONTROL_YCOK_MAX_S		6
 
 /* MCI Registers */
-#define AR_MCI_INTERRUPT_RX_MSG_EN		0x183c
-#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET    0x00000001
-#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET_S  0
-#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL     0x00000002
-#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL_S   1
-#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK       0x00000004
-#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK_S     2
-#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO       0x00000008
-#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO_S     3
-#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST        0x00000010
-#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST_S      4
-#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO       0x00000020
-#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO_S     5
-#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT         0x00000040
-#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT_S       6
-#define AR_MCI_INTERRUPT_RX_MSG_GPM             0x00000100
-#define AR_MCI_INTERRUPT_RX_MSG_GPM_S           8
-#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO        0x00000200
-#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO_S      9
-#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING    0x00000400
-#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING_S  10
-#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING      0x00000800
-#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING_S    11
-#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE        0x00001000
-#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE_S      12
-#define AR_MCI_INTERRUPT_RX_HW_MSG_MASK	(AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO  | \
+
+#define AR_MCI_COMMAND0				0x1800
+#define AR_MCI_COMMAND0_HEADER			0xFF
+#define AR_MCI_COMMAND0_HEADER_S		0
+#define AR_MCI_COMMAND0_LEN			0x1f00
+#define AR_MCI_COMMAND0_LEN_S			8
+#define AR_MCI_COMMAND0_DISABLE_TIMESTAMP	0x2000
+#define AR_MCI_COMMAND0_DISABLE_TIMESTAMP_S	13
+
+#define AR_MCI_COMMAND1				0x1804
+
+#define AR_MCI_COMMAND2				0x1808
+#define AR_MCI_COMMAND2_RESET_TX		0x01
+#define AR_MCI_COMMAND2_RESET_TX_S		0
+#define AR_MCI_COMMAND2_RESET_RX		0x02
+#define AR_MCI_COMMAND2_RESET_RX_S		1
+#define AR_MCI_COMMAND2_RESET_RX_NUM_CYCLES     0x3FC
+#define AR_MCI_COMMAND2_RESET_RX_NUM_CYCLES_S   2
+#define AR_MCI_COMMAND2_RESET_REQ_WAKEUP        0x400
+#define AR_MCI_COMMAND2_RESET_REQ_WAKEUP_S      10
+
+#define AR_MCI_RX_CTRL				0x180c
+
+#define AR_MCI_TX_CTRL				0x1810
+/* 0 = no division, 1 = divide by 2, 2 = divide by 4, 3 = divide by 8 */
+#define AR_MCI_TX_CTRL_CLK_DIV			0x03
+#define AR_MCI_TX_CTRL_CLK_DIV_S		0
+#define AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE	0x04
+#define AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE_S	2
+#define AR_MCI_TX_CTRL_GAIN_UPDATE_FREQ		0xFFFFF8
+#define AR_MCI_TX_CTRL_GAIN_UPDATE_FREQ_S	3
+#define AR_MCI_TX_CTRL_GAIN_UPDATE_NUM		0xF000000
+#define AR_MCI_TX_CTRL_GAIN_UPDATE_NUM_S	24
+
+#define AR_MCI_MSG_ATTRIBUTES_TABLE			0x1814
+#define AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM		0xFFFF
+#define AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM_S		0
+#define AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR		0xFFFF0000
+#define AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR_S	16
+
+#define AR_MCI_SCHD_TABLE_0				0x1818
+#define AR_MCI_SCHD_TABLE_1				0x181c
+#define AR_MCI_GPM_0					0x1820
+#define AR_MCI_GPM_1					0x1824
+#define AR_MCI_GPM_WRITE_PTR				0xFFFF0000
+#define AR_MCI_GPM_WRITE_PTR_S				16
+#define AR_MCI_GPM_BUF_LEN				0x0000FFFF
+#define AR_MCI_GPM_BUF_LEN_S				0
+
+#define AR_MCI_INTERRUPT_RAW				0x1828
+#define AR_MCI_INTERRUPT_EN				0x182c
+#define AR_MCI_INTERRUPT_SW_MSG_DONE			0x00000001
+#define AR_MCI_INTERRUPT_SW_MSG_DONE_S			0
+#define AR_MCI_INTERRUPT_CPU_INT_MSG			0x00000002
+#define AR_MCI_INTERRUPT_CPU_INT_MSG_S			1
+#define AR_MCI_INTERRUPT_RX_CKSUM_FAIL			0x00000004
+#define AR_MCI_INTERRUPT_RX_CKSUM_FAIL_S		2
+#define AR_MCI_INTERRUPT_RX_INVALID_HDR			0x00000008
+#define AR_MCI_INTERRUPT_RX_INVALID_HDR_S		3
+#define AR_MCI_INTERRUPT_RX_HW_MSG_FAIL			0x00000010
+#define AR_MCI_INTERRUPT_RX_HW_MSG_FAIL_S		4
+#define AR_MCI_INTERRUPT_RX_SW_MSG_FAIL			0x00000020
+#define AR_MCI_INTERRUPT_RX_SW_MSG_FAIL_S		5
+#define AR_MCI_INTERRUPT_TX_HW_MSG_FAIL			0x00000080
+#define AR_MCI_INTERRUPT_TX_HW_MSG_FAIL_S		7
+#define AR_MCI_INTERRUPT_TX_SW_MSG_FAIL			0x00000100
+#define AR_MCI_INTERRUPT_TX_SW_MSG_FAIL_S		8
+#define AR_MCI_INTERRUPT_RX_MSG				0x00000200
+#define AR_MCI_INTERRUPT_RX_MSG_S			9
+#define AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE		0x00000400
+#define AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE_S		10
+#define AR_MCI_INTERRUPT_BT_PRI				0x07fff800
+#define AR_MCI_INTERRUPT_BT_PRI_S			11
+#define AR_MCI_INTERRUPT_BT_PRI_THRESH			0x08000000
+#define AR_MCI_INTERRUPT_BT_PRI_THRESH_S		27
+#define AR_MCI_INTERRUPT_BT_FREQ			0x10000000
+#define AR_MCI_INTERRUPT_BT_FREQ_S			28
+#define AR_MCI_INTERRUPT_BT_STOMP			0x20000000
+#define AR_MCI_INTERRUPT_BT_STOMP_S			29
+#define AR_MCI_INTERRUPT_BB_AIC_IRQ			0x40000000
+#define AR_MCI_INTERRUPT_BB_AIC_IRQ_S			30
+#define AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT		0x80000000
+#define AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT_S		31
+
+#define AR_MCI_INTERRUPT_DEFAULT    (AR_MCI_INTERRUPT_SW_MSG_DONE	  | \
+				     AR_MCI_INTERRUPT_RX_INVALID_HDR	  | \
+				     AR_MCI_INTERRUPT_RX_HW_MSG_FAIL	  | \
+				     AR_MCI_INTERRUPT_RX_SW_MSG_FAIL	  | \
+				     AR_MCI_INTERRUPT_TX_HW_MSG_FAIL	  | \
+				     AR_MCI_INTERRUPT_TX_SW_MSG_FAIL	  | \
+				     AR_MCI_INTERRUPT_RX_MSG		  | \
+				     AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE | \
+				     AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT)
+
+#define AR_MCI_INTERRUPT_MSG_FAIL_MASK (AR_MCI_INTERRUPT_RX_HW_MSG_FAIL | \
+					AR_MCI_INTERRUPT_RX_SW_MSG_FAIL | \
+					AR_MCI_INTERRUPT_TX_HW_MSG_FAIL | \
+					AR_MCI_INTERRUPT_TX_SW_MSG_FAIL)
+
+#define AR_MCI_REMOTE_CPU_INT				0x1830
+#define AR_MCI_REMOTE_CPU_INT_EN			0x1834
+#define AR_MCI_INTERRUPT_RX_MSG_RAW			0x1838
+#define AR_MCI_INTERRUPT_RX_MSG_EN			0x183c
+#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET		0x00000001
+#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET_S		0
+#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL		0x00000002
+#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL_S		1
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK		0x00000004
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK_S		2
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO		0x00000008
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO_S		3
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST		0x00000010
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST_S		4
+#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO		0x00000020
+#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO_S		5
+#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT			0x00000040
+#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT_S		6
+#define AR_MCI_INTERRUPT_RX_MSG_GPM			0x00000100
+#define AR_MCI_INTERRUPT_RX_MSG_GPM_S			8
+#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO		0x00000200
+#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO_S		9
+#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING		0x00000400
+#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING_S		10
+#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING		0x00000800
+#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING_S		11
+#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE		0x00001000
+#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE_S		12
+#define AR_MCI_INTERRUPT_RX_HW_MSG_MASK	 (AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO  | \
 					  AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL| \
 					  AR_MCI_INTERRUPT_RX_MSG_LNA_INFO   | \
 					  AR_MCI_INTERRUPT_RX_MSG_CONT_NACK  | \
 					  AR_MCI_INTERRUPT_RX_MSG_CONT_INFO  | \
 					  AR_MCI_INTERRUPT_RX_MSG_CONT_RST)
 
+#define AR_MCI_INTERRUPT_RX_MSG_DEFAULT (AR_MCI_INTERRUPT_RX_MSG_GPM	 | \
+					 AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET| \
+					 AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING  | \
+					 AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING| \
+					 AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO   | \
+					 AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL | \
+					 AR_MCI_INTERRUPT_RX_MSG_LNA_INFO    | \
+					 AR_MCI_INTERRUPT_RX_MSG_CONT_NACK   | \
+					 AR_MCI_INTERRUPT_RX_MSG_CONT_INFO   | \
+					 AR_MCI_INTERRUPT_RX_MSG_CONT_RST    | \
+					 AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE)
+
+#define AR_MCI_CPU_INT					0x1840
+
+#define AR_MCI_RX_STATUS			0x1844
+#define AR_MCI_RX_LAST_SCHD_MSG_INDEX		0x00000F00
+#define AR_MCI_RX_LAST_SCHD_MSG_INDEX_S		8
+#define AR_MCI_RX_REMOTE_SLEEP			0x00001000
+#define AR_MCI_RX_REMOTE_SLEEP_S		12
+#define AR_MCI_RX_MCI_CLK_REQ			0x00002000
+#define AR_MCI_RX_MCI_CLK_REQ_S			13
+
+#define AR_MCI_CONT_STATUS			0x1848
+#define AR_MCI_CONT_RSSI_POWER			0x000000FF
+#define AR_MCI_CONT_RSSI_POWER_S		0
+#define AR_MCI_CONT_RRIORITY			0x0000FF00
+#define AR_MCI_CONT_RRIORITY_S			8
+#define AR_MCI_CONT_TXRX			0x00010000
+#define AR_MCI_CONT_TXRX_S			16
+
+#define AR_MCI_BT_PRI0				0x184c
+#define AR_MCI_BT_PRI1				0x1850
+#define AR_MCI_BT_PRI2				0x1854
+#define AR_MCI_BT_PRI3				0x1858
+#define AR_MCI_BT_PRI				0x185c
+#define AR_MCI_WL_FREQ0				0x1860
+#define AR_MCI_WL_FREQ1				0x1864
+#define AR_MCI_WL_FREQ2				0x1868
+#define AR_MCI_GAIN				0x186c
+#define AR_MCI_WBTIMER1				0x1870
+#define AR_MCI_WBTIMER2				0x1874
+#define AR_MCI_WBTIMER3				0x1878
+#define AR_MCI_WBTIMER4				0x187c
+#define AR_MCI_MAXGAIN				0x1880
+#define AR_MCI_HW_SCHD_TBL_CTL			0x1884
+#define AR_MCI_HW_SCHD_TBL_D0			0x1888
+#define AR_MCI_HW_SCHD_TBL_D1			0x188c
+#define AR_MCI_HW_SCHD_TBL_D2			0x1890
+#define AR_MCI_HW_SCHD_TBL_D3			0x1894
+#define AR_MCI_TX_PAYLOAD0			0x1898
+#define AR_MCI_TX_PAYLOAD1			0x189c
+#define AR_MCI_TX_PAYLOAD2			0x18a0
+#define AR_MCI_TX_PAYLOAD3			0x18a4
+#define AR_BTCOEX_WBTIMER			0x18a8
+
+#define AR_BTCOEX_CTRL					0x18ac
+#define AR_BTCOEX_CTRL_AR9462_MODE			0x00000001
+#define AR_BTCOEX_CTRL_AR9462_MODE_S			0
+#define AR_BTCOEX_CTRL_WBTIMER_EN			0x00000002
+#define AR_BTCOEX_CTRL_WBTIMER_EN_S			1
+#define AR_BTCOEX_CTRL_MCI_MODE_EN			0x00000004
+#define AR_BTCOEX_CTRL_MCI_MODE_EN_S			2
+#define AR_BTCOEX_CTRL_LNA_SHARED			0x00000008
+#define AR_BTCOEX_CTRL_LNA_SHARED_S			3
+#define AR_BTCOEX_CTRL_PA_SHARED			0x00000010
+#define AR_BTCOEX_CTRL_PA_SHARED_S			4
+#define AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN		0x00000020
+#define AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN_S		5
+#define AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN	0x00000040
+#define AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN_S	6
+#define AR_BTCOEX_CTRL_NUM_ANTENNAS			0x00000180
+#define AR_BTCOEX_CTRL_NUM_ANTENNAS_S			7
+#define AR_BTCOEX_CTRL_RX_CHAIN_MASK			0x00000E00
+#define AR_BTCOEX_CTRL_RX_CHAIN_MASK_S			9
+#define AR_BTCOEX_CTRL_AGGR_THRESH			0x00007000
+#define AR_BTCOEX_CTRL_AGGR_THRESH_S			12
+#define AR_BTCOEX_CTRL_1_CHAIN_BCN			0x00080000
+#define AR_BTCOEX_CTRL_1_CHAIN_BCN_S			19
+#define AR_BTCOEX_CTRL_1_CHAIN_ACK			0x00100000
+#define AR_BTCOEX_CTRL_1_CHAIN_ACK_S			20
+#define AR_BTCOEX_CTRL_WAIT_BA_MARGIN			0x1FE00000
+#define AR_BTCOEX_CTRL_WAIT_BA_MARGIN_S			28
+#define AR_BTCOEX_CTRL_REDUCE_TXPWR			0x20000000
+#define AR_BTCOEX_CTRL_REDUCE_TXPWR_S			29
+#define AR_BTCOEX_CTRL_SPDT_ENABLE_10			0x40000000
+#define AR_BTCOEX_CTRL_SPDT_ENABLE_10_S			30
+#define AR_BTCOEX_CTRL_SPDT_POLARITY			0x80000000
+#define AR_BTCOEX_CTRL_SPDT_POLARITY_S			31
+
+#define AR_BTCOEX_WL_WEIGHTS0				0x18b0
+#define AR_BTCOEX_WL_WEIGHTS1				0x18b4
+#define AR_BTCOEX_WL_WEIGHTS2				0x18b8
+#define AR_BTCOEX_WL_WEIGHTS3				0x18bc
+#define AR_BTCOEX_MAX_TXPWR(_x)				(0x18c0 + ((_x) << 2))
+#define AR_BTCOEX_WL_LNA				0x1940
+#define AR_BTCOEX_RFGAIN_CTRL				0x1944
+
+#define AR_BTCOEX_CTRL2					0x1948
+#define AR_BTCOEX_CTRL2_TXPWR_THRESH			0x0007F800
+#define AR_BTCOEX_CTRL2_TXPWR_THRESH_S			11
+#define AR_BTCOEX_CTRL2_TX_CHAIN_MASK			0x00380000
+#define AR_BTCOEX_CTRL2_TX_CHAIN_MASK_S			19
+#define AR_BTCOEX_CTRL2_RX_DEWEIGHT			0x00400000
+#define AR_BTCOEX_CTRL2_RX_DEWEIGHT_S			22
+#define AR_BTCOEX_CTRL2_GPIO_OBS_SEL			0x00800000
+#define AR_BTCOEX_CTRL2_GPIO_OBS_SEL_S			23
+#define AR_BTCOEX_CTRL2_MAC_BB_OBS_SEL			0x01000000
+#define AR_BTCOEX_CTRL2_MAC_BB_OBS_SEL_S		24
+#define AR_BTCOEX_CTRL2_DESC_BASED_TXPWR_ENABLE		0x02000000
+#define AR_BTCOEX_CTRL2_DESC_BASED_TXPWR_ENABLE_S	25
+
+#define AR_BTCOEX_CTRL_SPDT_ENABLE          0x00000001
+#define AR_BTCOEX_CTRL_SPDT_ENABLE_S        0
+#define AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL     0x00000002
+#define AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL_S   1
+#define AR_BTCOEX_CTRL_USE_LATCHED_BT_ANT   0x00000004
+#define AR_BTCOEX_CTRL_USE_LATCHED_BT_ANT_S 2
+#define AR_GLB_WLAN_UART_INTF_EN            0x00020000
+#define AR_GLB_WLAN_UART_INTF_EN_S          17
+#define AR_GLB_DS_JTAG_DISABLE              0x00040000
+#define AR_GLB_DS_JTAG_DISABLE_S            18
+
+#define AR_BTCOEX_RC                    0x194c
+#define AR_BTCOEX_MAX_RFGAIN(_x)        (0x1950 + ((_x) << 2))
+#define AR_BTCOEX_DBG                   0x1a50
+#define AR_MCI_LAST_HW_MSG_HDR          0x1a54
+#define AR_MCI_LAST_HW_MSG_BDY          0x1a58
+
+#define AR_MCI_SCHD_TABLE_2             0x1a5c
+#define AR_MCI_SCHD_TABLE_2_MEM_BASED   0x00000001
+#define AR_MCI_SCHD_TABLE_2_MEM_BASED_S 0
+#define AR_MCI_SCHD_TABLE_2_HW_BASED    0x00000002
+#define AR_MCI_SCHD_TABLE_2_HW_BASED_S  1
+
+#define AR_BTCOEX_CTRL3               0x1a60
+#define AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT	0x00000fff
+#define AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT_S	0
+
 
 #endif
diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c
index 35422fc..65c8894 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.c
+++ b/drivers/net/wireless/ath/ath9k/wmi.c
@@ -187,7 +187,7 @@
 						   fatal_work);
 	struct ath_common *common = ath9k_hw_common(priv->ah);
 
-	ath_dbg(common, ATH_DBG_FATAL, "FATAL Event received, resetting device\n");
+	ath_dbg(common, FATAL, "FATAL Event received, resetting device\n");
 	ath9k_htc_reset(priv);
 }
 
@@ -330,8 +330,7 @@
 
 	time_left = wait_for_completion_timeout(&wmi->cmd_wait, timeout);
 	if (!time_left) {
-		ath_dbg(common, ATH_DBG_WMI,
-			"Timeout waiting for WMI command: %s\n",
+		ath_dbg(common, WMI, "Timeout waiting for WMI command: %s\n",
 			wmi_cmd_to_name(cmd_id));
 		mutex_unlock(&wmi->op_mutex);
 		return -ETIMEDOUT;
@@ -342,8 +341,7 @@
 	return 0;
 
 out:
-	ath_dbg(common, ATH_DBG_WMI,
-		"WMI failure for: %s\n", wmi_cmd_to_name(cmd_id));
+	ath_dbg(common, WMI, "WMI failure for: %s\n", wmi_cmd_to_name(cmd_id));
 	mutex_unlock(&wmi->op_mutex);
 	kfree_skb(skb);
 
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 03b0a65..3182408 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -53,7 +53,7 @@
 			    int tx_flags, struct ath_txq *txq);
 static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
 				struct ath_txq *txq, struct list_head *bf_q,
-				struct ath_tx_status *ts, int txok, int sendbar);
+				struct ath_tx_status *ts, int txok);
 static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
 			     struct list_head *head, bool internal);
 static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
@@ -104,6 +104,32 @@
 /* Aggregation logic */
 /*********************/
 
+static void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq)
+	__acquires(&txq->axq_lock)
+{
+	spin_lock_bh(&txq->axq_lock);
+}
+
+static void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq)
+	__releases(&txq->axq_lock)
+{
+	spin_unlock_bh(&txq->axq_lock);
+}
+
+static void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq)
+	__releases(&txq->axq_lock)
+{
+	struct sk_buff_head q;
+	struct sk_buff *skb;
+
+	__skb_queue_head_init(&q);
+	skb_queue_splice_init(&txq->complete_q, &q);
+	spin_unlock_bh(&txq->axq_lock);
+
+	while ((skb = __skb_dequeue(&q)))
+		ieee80211_tx_status(sc->hw, skb);
+}
+
 static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
 {
 	struct ath_atx_ac *ac = tid->ac;
@@ -130,7 +156,7 @@
 
 	WARN_ON(!tid->paused);
 
-	spin_lock_bh(&txq->axq_lock);
+	ath_txq_lock(sc, txq);
 	tid->paused = false;
 
 	if (skb_queue_empty(&tid->buf_q))
@@ -139,7 +165,7 @@
 	ath_tx_queue_tid(txq, tid);
 	ath_txq_schedule(sc, txq);
 unlock:
-	spin_unlock_bh(&txq->axq_lock);
+	ath_txq_unlock_complete(sc, txq);
 }
 
 static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
@@ -150,6 +176,12 @@
 	return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
 }
 
+static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno)
+{
+	ieee80211_send_bar(tid->an->vif, tid->an->sta->addr, tid->tidno,
+			   seqno << IEEE80211_SEQ_SEQ_SHIFT);
+}
+
 static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
 {
 	struct ath_txq *txq = tid->ac->txq;
@@ -158,28 +190,36 @@
 	struct list_head bf_head;
 	struct ath_tx_status ts;
 	struct ath_frame_info *fi;
+	bool sendbar = false;
 
 	INIT_LIST_HEAD(&bf_head);
 
 	memset(&ts, 0, sizeof(ts));
-	spin_lock_bh(&txq->axq_lock);
 
 	while ((skb = __skb_dequeue(&tid->buf_q))) {
 		fi = get_frame_info(skb);
 		bf = fi->bf;
 
-		spin_unlock_bh(&txq->axq_lock);
 		if (bf && fi->retries) {
 			list_add_tail(&bf->list, &bf_head);
 			ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
-			ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 1);
+			ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
+			sendbar = true;
 		} else {
 			ath_tx_send_normal(sc, txq, NULL, skb);
 		}
-		spin_lock_bh(&txq->axq_lock);
 	}
 
-	spin_unlock_bh(&txq->axq_lock);
+	if (tid->baw_head == tid->baw_tail) {
+		tid->state &= ~AGGR_ADDBA_COMPLETE;
+		tid->state &= ~AGGR_CLEANUP;
+	}
+
+	if (sendbar) {
+		ath_txq_unlock(sc, txq);
+		ath_send_bar(tid, tid->seq_start);
+		ath_txq_lock(sc, txq);
+	}
 }
 
 static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
@@ -195,6 +235,8 @@
 	while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
 		INCR(tid->seq_start, IEEE80211_SEQ_MAX);
 		INCR(tid->baw_head, ATH_TID_MAX_BUFS);
+		if (tid->bar_index >= 0)
+			tid->bar_index--;
 	}
 }
 
@@ -238,9 +280,7 @@
 		bf = fi->bf;
 
 		if (!bf) {
-			spin_unlock(&txq->axq_lock);
 			ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
-			spin_lock(&txq->axq_lock);
 			continue;
 		}
 
@@ -249,24 +289,26 @@
 		if (fi->retries)
 			ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
 
-		spin_unlock(&txq->axq_lock);
-		ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
-		spin_lock(&txq->axq_lock);
+		ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
 	}
 
 	tid->seq_next = tid->seq_start;
 	tid->baw_tail = tid->baw_head;
+	tid->bar_index = -1;
 }
 
 static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
-			     struct sk_buff *skb)
+			     struct sk_buff *skb, int count)
 {
 	struct ath_frame_info *fi = get_frame_info(skb);
 	struct ath_buf *bf = fi->bf;
 	struct ieee80211_hdr *hdr;
+	int prev = fi->retries;
 
 	TX_STAT_INC(txq->axq_qnum, a_retries);
-	if (fi->retries++ > 0)
+	fi->retries += count;
+
+	if (prev > 0)
 		return;
 
 	hdr = (struct ieee80211_hdr *)skb->data;
@@ -365,7 +407,7 @@
 	struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
 	struct list_head bf_head;
 	struct sk_buff_head bf_pending;
-	u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
+	u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0, seq_first;
 	u32 ba[WME_BA_BMP_SIZE >> 5];
 	int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
 	bool rc_update = true;
@@ -374,6 +416,8 @@
 	int nframes;
 	u8 tidno;
 	bool flush = !!(ts->ts_status & ATH9K_TX_FLUSH);
+	int i, retries;
+	int bar_index = -1;
 
 	skb = bf->bf_mpdu;
 	hdr = (struct ieee80211_hdr *)skb->data;
@@ -382,6 +426,10 @@
 
 	memcpy(rates, tx_info->control.rates, sizeof(rates));
 
+	retries = ts->ts_longretry + 1;
+	for (i = 0; i < ts->ts_rateindex; i++)
+		retries += rates[i].count;
+
 	rcu_read_lock();
 
 	sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
@@ -395,8 +443,7 @@
 			if (!bf->bf_stale || bf_next != NULL)
 				list_move_tail(&bf->list, &bf_head);
 
-			ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
-				0, 0);
+			ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 0);
 
 			bf = bf_next;
 		}
@@ -406,6 +453,7 @@
 	an = (struct ath_node *)sta->drv_priv;
 	tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
 	tid = ATH_AN_2_TID(an, tidno);
+	seq_first = tid->seq_start;
 
 	/*
 	 * The hardware occasionally sends a tx status for the wrong TID.
@@ -455,25 +503,25 @@
 		} else if (!isaggr && txok) {
 			/* transmit completion */
 			acked_cnt++;
-		} else {
-			if ((tid->state & AGGR_CLEANUP) || !retry) {
-				/*
-				 * cleanup in progress, just fail
-				 * the un-acked sub-frames
-				 */
-				txfail = 1;
-			} else if (flush) {
-				txpending = 1;
-			} else if (fi->retries < ATH_MAX_SW_RETRIES) {
-				if (txok || !an->sleeping)
-					ath_tx_set_retry(sc, txq, bf->bf_mpdu);
+		} else if ((tid->state & AGGR_CLEANUP) || !retry) {
+			/*
+			 * cleanup in progress, just fail
+			 * the un-acked sub-frames
+			 */
+			txfail = 1;
+		} else if (flush) {
+			txpending = 1;
+		} else if (fi->retries < ATH_MAX_SW_RETRIES) {
+			if (txok || !an->sleeping)
+				ath_tx_set_retry(sc, txq, bf->bf_mpdu,
+						 retries);
 
-				txpending = 1;
-			} else {
-				txfail = 1;
-				sendbar = 1;
-				txfail_cnt++;
-			}
+			txpending = 1;
+		} else {
+			txfail = 1;
+			txfail_cnt++;
+			bar_index = max_t(int, bar_index,
+				ATH_BA_INDEX(seq_first, seqno));
 		}
 
 		/*
@@ -490,9 +538,7 @@
 			 * complete the acked-ones/xretried ones; update
 			 * block-ack window
 			 */
-			spin_lock_bh(&txq->axq_lock);
 			ath_tx_update_baw(sc, tid, seqno);
-			spin_unlock_bh(&txq->axq_lock);
 
 			if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
 				memcpy(tx_info->control.rates, rates, sizeof(rates));
@@ -501,33 +547,30 @@
 			}
 
 			ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
-				!txfail, sendbar);
+				!txfail);
 		} else {
 			/* retry the un-acked ones */
-			if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
-				if (bf->bf_next == NULL && bf_last->bf_stale) {
-					struct ath_buf *tbf;
+			if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
+			    bf->bf_next == NULL && bf_last->bf_stale) {
+				struct ath_buf *tbf;
 
-					tbf = ath_clone_txbuf(sc, bf_last);
-					/*
-					 * Update tx baw and complete the
-					 * frame with failed status if we
-					 * run out of tx buf.
-					 */
-					if (!tbf) {
-						spin_lock_bh(&txq->axq_lock);
-						ath_tx_update_baw(sc, tid, seqno);
-						spin_unlock_bh(&txq->axq_lock);
+				tbf = ath_clone_txbuf(sc, bf_last);
+				/*
+				 * Update tx baw and complete the
+				 * frame with failed status if we
+				 * run out of tx buf.
+				 */
+				if (!tbf) {
+					ath_tx_update_baw(sc, tid, seqno);
 
-						ath_tx_complete_buf(sc, bf, txq,
-								    &bf_head,
-								    ts, 0,
-								    !flush);
-						break;
-					}
-
-					fi->bf = tbf;
+					ath_tx_complete_buf(sc, bf, txq,
+							    &bf_head, ts, 0);
+					bar_index = max_t(int, bar_index,
+						ATH_BA_INDEX(seq_first, seqno));
+					break;
 				}
+
+				fi->bf = tbf;
 			}
 
 			/*
@@ -545,7 +588,6 @@
 		if (an->sleeping)
 			ieee80211_sta_set_buffered(sta, tid->tidno, true);
 
-		spin_lock_bh(&txq->axq_lock);
 		skb_queue_splice(&bf_pending, &tid->buf_q);
 		if (!an->sleeping) {
 			ath_tx_queue_tid(txq, tid);
@@ -553,18 +595,22 @@
 			if (ts->ts_status & ATH9K_TXERR_FILT)
 				tid->ac->clear_ps_filter = true;
 		}
-		spin_unlock_bh(&txq->axq_lock);
 	}
 
-	if (tid->state & AGGR_CLEANUP) {
+	if (bar_index >= 0) {
+		u16 bar_seq = ATH_BA_INDEX2SEQ(seq_first, bar_index);
+
+		if (BAW_WITHIN(tid->seq_start, tid->baw_size, bar_seq))
+			tid->bar_index = ATH_BA_INDEX(tid->seq_start, bar_seq);
+
+		ath_txq_unlock(sc, txq);
+		ath_send_bar(tid, ATH_BA_INDEX2SEQ(seq_first, bar_index + 1));
+		ath_txq_lock(sc, txq);
+	}
+
+	if (tid->state & AGGR_CLEANUP)
 		ath_tx_flush_tid(sc, tid);
 
-		if (tid->baw_head == tid->baw_tail) {
-			tid->state &= ~AGGR_ADDBA_COMPLETE;
-			tid->state &= ~AGGR_CLEANUP;
-		}
-	}
-
 	rcu_read_unlock();
 
 	if (needreset) {
@@ -601,6 +647,7 @@
 	struct sk_buff *skb;
 	struct ieee80211_tx_info *tx_info;
 	struct ieee80211_tx_rate *rates;
+	struct ath_mci_profile *mci = &sc->btcoex.mci;
 	u32 max_4ms_framelen, frmlen;
 	u16 aggr_limit, legacy = 0;
 	int i;
@@ -617,24 +664,26 @@
 	max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
 
 	for (i = 0; i < 4; i++) {
-		if (rates[i].count) {
-			int modeidx;
-			if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
-				legacy = 1;
-				break;
-			}
+		int modeidx;
 
-			if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
-				modeidx = MCS_HT40;
-			else
-				modeidx = MCS_HT20;
+		if (!rates[i].count)
+			continue;
 
-			if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
-				modeidx++;
-
-			frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
-			max_4ms_framelen = min(max_4ms_framelen, frmlen);
+		if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
+			legacy = 1;
+			break;
 		}
+
+		if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+			modeidx = MCS_HT40;
+		else
+			modeidx = MCS_HT20;
+
+		if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
+			modeidx++;
+
+		frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
+		max_4ms_framelen = min(max_4ms_framelen, frmlen);
 	}
 
 	/*
@@ -645,7 +694,9 @@
 	if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
 		return 0;
 
-	if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
+	if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI) && mci->aggr_limit)
+		aggr_limit = (max_4ms_framelen * mci->aggr_limit) >> 4;
+	else if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
 		aggr_limit = min((max_4ms_framelen * 3) / 8,
 				 (u32)ATH_AMPDU_LIMIT_MAX);
 	else
@@ -768,8 +819,6 @@
 
 		bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
 		seqno = bf->bf_state.seqno;
-		if (!bf_first)
-			bf_first = bf;
 
 		/* do not step over block-ack window */
 		if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
@@ -777,6 +826,21 @@
 			break;
 		}
 
+		if (tid->bar_index > ATH_BA_INDEX(tid->seq_start, seqno)) {
+			struct ath_tx_status ts = {};
+			struct list_head bf_head;
+
+			INIT_LIST_HEAD(&bf_head);
+			list_add(&bf->list, &bf_head);
+			__skb_unlink(skb, &tid->buf_q);
+			ath_tx_update_baw(sc, tid, seqno);
+			ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
+			continue;
+		}
+
+		if (!bf_first)
+			bf_first = bf;
+
 		if (!rl) {
 			aggr_limit = ath_lookup_rate(sc, bf, tid);
 			rl = 1;
@@ -1119,6 +1183,7 @@
 	txtid->state |= AGGR_ADDBA_PROGRESS;
 	txtid->paused = true;
 	*ssn = txtid->seq_start = txtid->seq_next;
+	txtid->bar_index = -1;
 
 	memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
 	txtid->baw_head = txtid->baw_tail = 0;
@@ -1140,7 +1205,7 @@
 		return;
 	}
 
-	spin_lock_bh(&txq->axq_lock);
+	ath_txq_lock(sc, txq);
 	txtid->paused = true;
 
 	/*
@@ -1153,9 +1218,9 @@
 		txtid->state |= AGGR_CLEANUP;
 	else
 		txtid->state &= ~AGGR_ADDBA_COMPLETE;
-	spin_unlock_bh(&txq->axq_lock);
 
 	ath_tx_flush_tid(sc, txtid);
+	ath_txq_unlock_complete(sc, txq);
 }
 
 void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
@@ -1176,7 +1241,7 @@
 		ac = tid->ac;
 		txq = ac->txq;
 
-		spin_lock_bh(&txq->axq_lock);
+		ath_txq_lock(sc, txq);
 
 		buffered = !skb_queue_empty(&tid->buf_q);
 
@@ -1188,7 +1253,7 @@
 			list_del(&ac->list);
 		}
 
-		spin_unlock_bh(&txq->axq_lock);
+		ath_txq_unlock(sc, txq);
 
 		ieee80211_sta_set_buffered(sta, tidno, buffered);
 	}
@@ -1207,7 +1272,7 @@
 		ac = tid->ac;
 		txq = ac->txq;
 
-		spin_lock_bh(&txq->axq_lock);
+		ath_txq_lock(sc, txq);
 		ac->clear_ps_filter = true;
 
 		if (!skb_queue_empty(&tid->buf_q) && !tid->paused) {
@@ -1215,7 +1280,7 @@
 			ath_txq_schedule(sc, txq);
 		}
 
-		spin_unlock_bh(&txq->axq_lock);
+		ath_txq_unlock_complete(sc, txq);
 	}
 }
 
@@ -1315,6 +1380,7 @@
 		txq->axq_qnum = axq_qnum;
 		txq->mac80211_qnum = -1;
 		txq->axq_link = NULL;
+		__skb_queue_head_init(&txq->complete_q);
 		INIT_LIST_HEAD(&txq->axq_q);
 		INIT_LIST_HEAD(&txq->axq_acq);
 		spin_lock_init(&txq->axq_lock);
@@ -1397,8 +1463,6 @@
 
 static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
 			       struct list_head *list, bool retry_tx)
-	__releases(txq->axq_lock)
-	__acquires(txq->axq_lock)
 {
 	struct ath_buf *bf, *lastbf;
 	struct list_head bf_head;
@@ -1425,13 +1489,11 @@
 		if (bf_is_ampdu_not_probing(bf))
 			txq->axq_ampdu_depth--;
 
-		spin_unlock_bh(&txq->axq_lock);
 		if (bf_isampdu(bf))
 			ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
 					     retry_tx);
 		else
-			ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
-		spin_lock_bh(&txq->axq_lock);
+			ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
 	}
 }
 
@@ -1443,7 +1505,8 @@
  */
 void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
 {
-	spin_lock_bh(&txq->axq_lock);
+	ath_txq_lock(sc, txq);
+
 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
 		int idx = txq->txq_tailidx;
 
@@ -1464,7 +1527,7 @@
 	if ((sc->sc_flags & SC_OP_TXAGGR) && !retry_tx)
 		ath_txq_drain_pending_buffers(sc, txq);
 
-	spin_unlock_bh(&txq->axq_lock);
+	ath_txq_unlock_complete(sc, txq);
 }
 
 bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
@@ -1558,11 +1621,9 @@
 				break;
 		}
 
-		if (!list_empty(&ac->tid_q)) {
-			if (!ac->sched) {
-				ac->sched = true;
-				list_add_tail(&ac->list, &txq->axq_acq);
-			}
+		if (!list_empty(&ac->tid_q) && !ac->sched) {
+			ac->sched = true;
+			list_add_tail(&ac->list, &txq->axq_acq);
 		}
 
 		if (ac == last_ac ||
@@ -1600,8 +1661,8 @@
 	bf = list_first_entry(head, struct ath_buf, list);
 	bf_last = list_entry(head->prev, struct ath_buf, list);
 
-	ath_dbg(common, ATH_DBG_QUEUE,
-		"qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
+	ath_dbg(common, QUEUE, "qnum: %d, txq depth: %d\n",
+		txq->axq_qnum, txq->axq_depth);
 
 	if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) {
 		list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]);
@@ -1612,8 +1673,7 @@
 
 		if (txq->axq_link) {
 			ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr);
-			ath_dbg(common, ATH_DBG_XMIT,
-				"link[%u] (%p)=%llx (%p)\n",
+			ath_dbg(common, XMIT, "link[%u] (%p)=%llx (%p)\n",
 				txq->axq_qnum, txq->axq_link,
 				ito64(bf->bf_daddr), bf->bf_desc);
 		} else if (!edma)
@@ -1625,7 +1685,7 @@
 	if (puttxbuf) {
 		TX_STAT_INC(txq->axq_qnum, puttxbuf);
 		ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
-		ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
+		ath_dbg(common, XMIT, "TXDP[%u] = %llx (%p)\n",
 			txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
 	}
 
@@ -1705,10 +1765,6 @@
 	list_add_tail(&bf->list, &bf_head);
 	bf->bf_state.bf_type = 0;
 
-	/* update starting sequence number for subsequent ADDBA request */
-	if (tid)
-		INCR(tid->seq_start, IEEE80211_SEQ_MAX);
-
 	bf->bf_lastbf = bf;
 	ath_tx_fill_desc(sc, bf, txq, fi->framelen);
 	ath_tx_txqaddbuf(sc, txq, &bf_head, false);
@@ -1771,7 +1827,7 @@
 
 	bf = ath_tx_get_buffer(sc);
 	if (!bf) {
-		ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
+		ath_dbg(common, XMIT, "TX buffers are full\n");
 		goto error;
 	}
 
@@ -1816,7 +1872,6 @@
 	struct ath_buf *bf;
 	u8 tidno;
 
-	spin_lock_bh(&txctl->txq->axq_lock);
 	if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an &&
 		ieee80211_is_data_qos(hdr->frame_control)) {
 		tidno = ieee80211_get_qos_ctl(hdr)[0] &
@@ -1835,7 +1890,7 @@
 	} else {
 		bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
 		if (!bf)
-			goto out;
+			return;
 
 		bf->bf_state.bfs_paprd = txctl->paprd;
 
@@ -1844,9 +1899,6 @@
 
 		ath_tx_send_normal(sc, txctl->txq, tid, skb);
 	}
-
-out:
-	spin_unlock_bh(&txctl->txq->axq_lock);
 }
 
 /* Upon failure caller should free skb */
@@ -1907,15 +1959,18 @@
 	 */
 
 	q = skb_get_queue_mapping(skb);
-	spin_lock_bh(&txq->axq_lock);
+
+	ath_txq_lock(sc, txq);
 	if (txq == sc->tx.txq_map[q] &&
 	    ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
 		ieee80211_stop_queue(sc->hw, q);
-		txq->stopped = 1;
+		txq->stopped = true;
 	}
-	spin_unlock_bh(&txq->axq_lock);
 
 	ath_tx_start_dma(sc, skb, txctl);
+
+	ath_txq_unlock(sc, txq);
+
 	return 0;
 }
 
@@ -1926,16 +1981,12 @@
 static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
 			    int tx_flags, struct ath_txq *txq)
 {
-	struct ieee80211_hw *hw = sc->hw;
 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 	struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
 	int q, padpos, padsize;
 
-	ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
-
-	if (tx_flags & ATH_TX_BAR)
-		tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
+	ath_dbg(common, XMIT, "TX complete: skb: %p\n", skb);
 
 	if (!(tx_flags & ATH_TX_ERROR))
 		/* Frame was ACKed */
@@ -1952,9 +2003,9 @@
 		skb_pull(skb, padsize);
 	}
 
-	if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
+	if ((sc->ps_flags & PS_WAIT_FOR_TX_ACK) && !txq->axq_depth) {
 		sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
-		ath_dbg(common, ATH_DBG_PS,
+		ath_dbg(common, PS,
 			"Going back to sleep after having received TX status (0x%lx)\n",
 			sc->ps_flags & (PS_WAIT_FOR_BEACON |
 					PS_WAIT_FOR_CAB |
@@ -1964,32 +2015,27 @@
 
 	q = skb_get_queue_mapping(skb);
 	if (txq == sc->tx.txq_map[q]) {
-		spin_lock_bh(&txq->axq_lock);
 		if (WARN_ON(--txq->pending_frames < 0))
 			txq->pending_frames = 0;
 
 		if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
 			ieee80211_wake_queue(sc->hw, q);
-			txq->stopped = 0;
+			txq->stopped = false;
 		}
-		spin_unlock_bh(&txq->axq_lock);
 	}
 
-	ieee80211_tx_status(hw, skb);
+	__skb_queue_tail(&txq->complete_q, skb);
 }
 
 static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
 				struct ath_txq *txq, struct list_head *bf_q,
-				struct ath_tx_status *ts, int txok, int sendbar)
+				struct ath_tx_status *ts, int txok)
 {
 	struct sk_buff *skb = bf->bf_mpdu;
 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
 	unsigned long flags;
 	int tx_flags = 0;
 
-	if (sendbar)
-		tx_flags = ATH_TX_BAR;
-
 	if (!txok)
 		tx_flags |= ATH_TX_ERROR;
 
@@ -2081,8 +2127,6 @@
 static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
 				  struct ath_tx_status *ts, struct ath_buf *bf,
 				  struct list_head *bf_head)
-	__releases(txq->axq_lock)
-	__acquires(txq->axq_lock)
 {
 	int txok;
 
@@ -2092,16 +2136,12 @@
 	if (bf_is_ampdu_not_probing(bf))
 		txq->axq_ampdu_depth--;
 
-	spin_unlock_bh(&txq->axq_lock);
-
 	if (!bf_isampdu(bf)) {
 		ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
-		ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok, 0);
+		ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok);
 	} else
 		ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true);
 
-	spin_lock_bh(&txq->axq_lock);
-
 	if (sc->sc_flags & SC_OP_TXAGGR)
 		ath_txq_schedule(sc, txq);
 }
@@ -2116,11 +2156,11 @@
 	struct ath_tx_status ts;
 	int status;
 
-	ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
+	ath_dbg(common, QUEUE, "tx queue %d (%x), link %p\n",
 		txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
 		txq->axq_link);
 
-	spin_lock_bh(&txq->axq_lock);
+	ath_txq_lock(sc, txq);
 	for (;;) {
 		if (work_pending(&sc->hw_reset_work))
 			break;
@@ -2179,7 +2219,7 @@
 
 		ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
 	}
-	spin_unlock_bh(&txq->axq_lock);
+	ath_txq_unlock_complete(sc, txq);
 }
 
 static void ath_tx_complete_poll_work(struct work_struct *work)
@@ -2196,21 +2236,21 @@
 	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
 		if (ATH_TXQ_SETUP(sc, i)) {
 			txq = &sc->tx.txq[i];
-			spin_lock_bh(&txq->axq_lock);
+			ath_txq_lock(sc, txq);
 			if (txq->axq_depth) {
 				if (txq->axq_tx_inprogress) {
 					needreset = true;
-					spin_unlock_bh(&txq->axq_lock);
+					ath_txq_unlock(sc, txq);
 					break;
 				} else {
 					txq->axq_tx_inprogress = true;
 				}
 			}
-			spin_unlock_bh(&txq->axq_lock);
+			ath_txq_unlock_complete(sc, txq);
 		}
 
 	if (needreset) {
-		ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
+		ath_dbg(ath9k_hw_common(sc->sc_ah), RESET,
 			"tx hung, resetting the chip\n");
 		RESET_STAT_INC(sc, RESET_TYPE_TX_HANG);
 		ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
@@ -2253,8 +2293,7 @@
 		if (status == -EINPROGRESS)
 			break;
 		if (status == -EIO) {
-			ath_dbg(common, ATH_DBG_XMIT,
-				"Error processing tx status\n");
+			ath_dbg(common, XMIT, "Error processing tx status\n");
 			break;
 		}
 
@@ -2264,10 +2303,10 @@
 
 		txq = &sc->tx.txq[ts.qid];
 
-		spin_lock_bh(&txq->axq_lock);
+		ath_txq_lock(sc, txq);
 
 		if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
-			spin_unlock_bh(&txq->axq_lock);
+			ath_txq_unlock(sc, txq);
 			return;
 		}
 
@@ -2293,7 +2332,7 @@
 		}
 
 		ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
-		spin_unlock_bh(&txq->axq_lock);
+		ath_txq_unlock_complete(sc, txq);
 	}
 }
 
@@ -2431,7 +2470,7 @@
 		ac = tid->ac;
 		txq = ac->txq;
 
-		spin_lock_bh(&txq->axq_lock);
+		ath_txq_lock(sc, txq);
 
 		if (tid->sched) {
 			list_del(&tid->list);
@@ -2447,6 +2486,6 @@
 		tid->state &= ~AGGR_ADDBA_COMPLETE;
 		tid->state &= ~AGGR_CLEANUP;
 
-		spin_unlock_bh(&txq->axq_lock);
+		ath_txq_unlock(sc, txq);
 	}
 }
diff --git a/drivers/net/wireless/ath/carl9170/debug.c b/drivers/net/wireless/ath/carl9170/debug.c
index de57f90..3c16422 100644
--- a/drivers/net/wireless/ath/carl9170/debug.c
+++ b/drivers/net/wireless/ath/carl9170/debug.c
@@ -56,7 +56,7 @@
 
 struct carl9170_debugfs_fops {
 	unsigned int read_bufsize;
-	mode_t attr;
+	umode_t attr;
 	char *(*read)(struct ar9170 *ar, char *buf, size_t bufsize,
 		      ssize_t *len);
 	ssize_t (*write)(struct ar9170 *aru, const char *buf, size_t size);
diff --git a/drivers/net/wireless/ath/carl9170/fw.c b/drivers/net/wireless/ath/carl9170/fw.c
index cba9d04..3de61ad 100644
--- a/drivers/net/wireless/ath/carl9170/fw.c
+++ b/drivers/net/wireless/ath/carl9170/fw.c
@@ -146,13 +146,15 @@
 	return false;
 }
 
-static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
+static int carl9170_fw_checksum(struct ar9170 *ar, const __u8 *data,
+				size_t len)
 {
 	const struct carl9170fw_otus_desc *otus_desc;
-	const struct carl9170fw_chk_desc *chk_desc;
 	const struct carl9170fw_last_desc *last_desc;
-	const struct carl9170fw_txsq_desc *txsq_desc;
-	u16 if_comb_types;
+	const struct carl9170fw_chk_desc *chk_desc;
+	unsigned long fin, diff;
+	unsigned int dsc_len;
+	u32 crc32;
 
 	last_desc = carl9170_fw_find_desc(ar, LAST_MAGIC,
 		sizeof(*last_desc), CARL9170FW_LAST_DESC_CUR_VER);
@@ -170,36 +172,68 @@
 	chk_desc = carl9170_fw_find_desc(ar, CHK_MAGIC,
 		sizeof(*chk_desc), CARL9170FW_CHK_DESC_CUR_VER);
 
-	if (chk_desc) {
-		unsigned long fin, diff;
-		unsigned int dsc_len;
-		u32 crc32;
+	if (!chk_desc) {
+		dev_warn(&ar->udev->dev, "Unprotected firmware image.\n");
+		return 0;
+	}
 
-		dsc_len = min_t(unsigned int, len,
+	dsc_len = min_t(unsigned int, len,
 			(unsigned long)chk_desc - (unsigned long)otus_desc);
 
-		fin = (unsigned long) last_desc + sizeof(*last_desc);
-		diff = fin - (unsigned long) otus_desc;
+	fin = (unsigned long) last_desc + sizeof(*last_desc);
+	diff = fin - (unsigned long) otus_desc;
 
-		if (diff < len)
-			len -= diff;
+	if (diff < len)
+		len -= diff;
 
-		if (len < 256)
-			return -EIO;
+	if (len < 256)
+		return -EIO;
 
-		crc32 = crc32_le(~0, data, len);
-		if (cpu_to_le32(crc32) != chk_desc->fw_crc32) {
-			dev_err(&ar->udev->dev, "fw checksum test failed.\n");
-			return -ENOEXEC;
-		}
+	crc32 = crc32_le(~0, data, len);
+	if (cpu_to_le32(crc32) != chk_desc->fw_crc32) {
+		dev_err(&ar->udev->dev, "fw checksum test failed.\n");
+		return -ENOEXEC;
+	}
 
-		crc32 = crc32_le(crc32, (void *)otus_desc, dsc_len);
-		if (cpu_to_le32(crc32) != chk_desc->hdr_crc32) {
-			dev_err(&ar->udev->dev, "descriptor check failed.\n");
+	crc32 = crc32_le(crc32, (void *)otus_desc, dsc_len);
+	if (cpu_to_le32(crc32) != chk_desc->hdr_crc32) {
+		dev_err(&ar->udev->dev, "descriptor check failed.\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int carl9170_fw_tx_sequence(struct ar9170 *ar)
+{
+	const struct carl9170fw_txsq_desc *txsq_desc;
+
+	txsq_desc = carl9170_fw_find_desc(ar, TXSQ_MAGIC, sizeof(*txsq_desc),
+					  CARL9170FW_TXSQ_DESC_CUR_VER);
+	if (txsq_desc) {
+		ar->fw.tx_seq_table = le32_to_cpu(txsq_desc->seq_table_addr);
+		if (!valid_cpu_addr(ar->fw.tx_seq_table))
 			return -EINVAL;
-		}
 	} else {
-		dev_warn(&ar->udev->dev, "Unprotected firmware image.\n");
+		ar->fw.tx_seq_table = 0;
+	}
+
+	return 0;
+}
+
+static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
+{
+	const struct carl9170fw_otus_desc *otus_desc;
+	int err;
+	u16 if_comb_types;
+
+	err = carl9170_fw_checksum(ar, data, len);
+	if (err)
+		return err;
+
+	otus_desc = carl9170_fw_find_desc(ar, OTUS_MAGIC,
+		sizeof(*otus_desc), CARL9170FW_OTUS_DESC_CUR_VER);
+	if (!otus_desc) {
+		return -ENODATA;
 	}
 
 #define SUPP(feat)						\
@@ -321,19 +355,8 @@
 
 	ar->hw->wiphy->interface_modes |= if_comb_types;
 
-	txsq_desc = carl9170_fw_find_desc(ar, TXSQ_MAGIC,
-		sizeof(*txsq_desc), CARL9170FW_TXSQ_DESC_CUR_VER);
-
-	if (txsq_desc) {
-		ar->fw.tx_seq_table = le32_to_cpu(txsq_desc->seq_table_addr);
-		if (!valid_cpu_addr(ar->fw.tx_seq_table))
-			return -EINVAL;
-	} else {
-		ar->fw.tx_seq_table = 0;
-	}
-
 #undef SUPPORTED
-	return 0;
+	return carl9170_fw_tx_sequence(ar);
 }
 
 static struct carl9170fw_desc_head *
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index f06e069..db77421 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -48,7 +48,7 @@
 #include "carl9170.h"
 #include "cmd.h"
 
-static int modparam_nohwcrypt;
+static bool modparam_nohwcrypt;
 module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
 MODULE_PARM_DESC(nohwcrypt, "Disable hardware crypto offload.");
 
@@ -446,7 +446,7 @@
 
 	mutex_lock(&ar->mutex);
 	if (IS_ACCEPTING_CMD(ar)) {
-		rcu_assign_pointer(ar->beacon_iter, NULL);
+		RCU_INIT_POINTER(ar->beacon_iter, NULL);
 
 		carl9170_led_set_state(ar, 0);
 
@@ -678,7 +678,7 @@
 		vif_priv->active = false;
 		bitmap_release_region(&ar->vif_bitmap, vif_id, 0);
 		ar->vifs--;
-		rcu_assign_pointer(ar->vif_priv[vif_id].vif, NULL);
+		RCU_INIT_POINTER(ar->vif_priv[vif_id].vif, NULL);
 		list_del_rcu(&vif_priv->list);
 		mutex_unlock(&ar->mutex);
 		synchronize_rcu();
@@ -716,7 +716,7 @@
 	WARN_ON(vif_priv->enable_beacon);
 	vif_priv->enable_beacon = false;
 	list_del_rcu(&vif_priv->list);
-	rcu_assign_pointer(ar->vif_priv[id].vif, NULL);
+	RCU_INIT_POINTER(ar->vif_priv[id].vif, NULL);
 
 	if (vif == main_vif) {
 		rcu_read_unlock();
@@ -1258,7 +1258,7 @@
 		}
 
 		for (i = 0; i < CARL9170_NUM_TID; i++)
-			rcu_assign_pointer(sta_info->agg[i], NULL);
+			RCU_INIT_POINTER(sta_info->agg[i], NULL);
 
 		sta_info->ampdu_max_len = 1 << (3 + sta->ht_cap.ampdu_factor);
 		sta_info->ht_sta = true;
@@ -1285,7 +1285,7 @@
 			struct carl9170_sta_tid *tid_info;
 
 			tid_info = rcu_dereference(sta_info->agg[i]);
-			rcu_assign_pointer(sta_info->agg[i], NULL);
+			RCU_INIT_POINTER(sta_info->agg[i], NULL);
 
 			if (!tid_info)
 				continue;
@@ -1398,7 +1398,7 @@
 			spin_unlock_bh(&ar->tx_ampdu_list_lock);
 		}
 
-		rcu_assign_pointer(sta_info->agg[tid], NULL);
+		RCU_INIT_POINTER(sta_info->agg[tid], NULL);
 		rcu_read_unlock();
 
 		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index 59472e1..d19a9ee 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -314,7 +314,7 @@
 			 *    feedback either [CTL_REQ_TX_STATUS not set]
 			 */
 
-			dev_kfree_skb_any(skb);
+			ieee80211_free_txskb(ar->hw, skb);
 			return;
 		} else {
 			/*
@@ -1432,7 +1432,7 @@
 
 err_free:
 	ar->tx_dropped++;
-	dev_kfree_skb_any(skb);
+	ieee80211_free_txskb(ar->hw, skb);
 }
 
 void carl9170_tx_scheduler(struct ar9170 *ar)
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
index 333b69e..89821e4 100644
--- a/drivers/net/wireless/ath/carl9170/usb.c
+++ b/drivers/net/wireless/ath/carl9170/usb.c
@@ -1161,15 +1161,4 @@
 #endif /* CONFIG_PM */
 };
 
-static int __init carl9170_usb_init(void)
-{
-	return usb_register(&carl9170_driver);
-}
-
-static void __exit carl9170_usb_exit(void)
-{
-	usb_deregister(&carl9170_driver);
-}
-
-module_init(carl9170_usb_init);
-module_exit(carl9170_usb_exit);
+module_usb_driver(carl9170_driver);
diff --git a/drivers/net/wireless/ath/key.c b/drivers/net/wireless/ath/key.c
index 4cf7c5e..0e81904 100644
--- a/drivers/net/wireless/ath/key.c
+++ b/drivers/net/wireless/ath/key.c
@@ -143,7 +143,7 @@
 		break;
 	case ATH_CIPHER_AES_CCM:
 		if (!(common->crypt_caps & ATH_CRYPT_CAP_CIPHER_AESCCM)) {
-			ath_dbg(common, ATH_DBG_ANY,
+			ath_dbg(common, ANY,
 				"AES-CCM not supported by this mac rev\n");
 			return false;
 		}
@@ -152,15 +152,15 @@
 	case ATH_CIPHER_TKIP:
 		keyType = AR_KEYTABLE_TYPE_TKIP;
 		if (entry + 64 >= common->keymax) {
-			ath_dbg(common, ATH_DBG_ANY,
+			ath_dbg(common, ANY,
 				"entry %u inappropriate for TKIP\n", entry);
 			return false;
 		}
 		break;
 	case ATH_CIPHER_WEP:
 		if (k->kv_len < WLAN_KEY_LEN_WEP40) {
-			ath_dbg(common, ATH_DBG_ANY,
-				"WEP key length %u too small\n", k->kv_len);
+			ath_dbg(common, ANY, "WEP key length %u too small\n",
+				k->kv_len);
 			return false;
 		}
 		if (k->kv_len <= WLAN_KEY_LEN_WEP40)
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index 65ecb5b..10dea37 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -21,6 +21,8 @@
 #include "regd.h"
 #include "regd_common.h"
 
+static int __ath_regd_init(struct ath_regulatory *reg);
+
 /*
  * This is a set of common rules used by our world regulatory domains.
  * We have 12 world regulatory domains. To save space we consolidate
@@ -347,10 +349,26 @@
 	}
 }
 
+static u16 ath_regd_find_country_by_name(char *alpha2)
+{
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(allCountries); i++) {
+		if (!memcmp(allCountries[i].isoName, alpha2, 2))
+			return allCountries[i].countryCode;
+	}
+
+	return -1;
+}
+
 int ath_reg_notifier_apply(struct wiphy *wiphy,
 			   struct regulatory_request *request,
 			   struct ath_regulatory *reg)
 {
+	struct ath_common *common = container_of(reg, struct ath_common,
+						 regulatory);
+	u16 country_code;
+
 	/* We always apply this */
 	ath_reg_apply_radar_flags(wiphy);
 
@@ -363,14 +381,37 @@
 		return 0;
 
 	switch (request->initiator) {
-	case NL80211_REGDOM_SET_BY_DRIVER:
 	case NL80211_REGDOM_SET_BY_CORE:
+		/*
+		 * If common->reg_world_copy is world roaming it means we *were*
+		 * world roaming... so we now have to restore that data.
+		 */
+		if (!ath_is_world_regd(&common->reg_world_copy))
+			break;
+
+		memcpy(reg, &common->reg_world_copy,
+		       sizeof(struct ath_regulatory));
+		break;
+	case NL80211_REGDOM_SET_BY_DRIVER:
 	case NL80211_REGDOM_SET_BY_USER:
 		break;
 	case NL80211_REGDOM_SET_BY_COUNTRY_IE:
-		if (ath_is_world_regd(reg))
-			ath_reg_apply_world_flags(wiphy, request->initiator,
-						  reg);
+		if (!ath_is_world_regd(reg))
+			break;
+
+		country_code = ath_regd_find_country_by_name(request->alpha2);
+		if (country_code == (u16) -1)
+			break;
+
+		reg->current_rd = COUNTRY_ERD_FLAG;
+		reg->current_rd |= country_code;
+
+		printk(KERN_DEBUG "ath: regdomain 0x%0x updated by CountryIE\n",
+			reg->current_rd);
+		__ath_regd_init(reg);
+
+		ath_reg_apply_world_flags(wiphy, request->initiator, reg);
+
 		break;
 	}
 
@@ -508,11 +549,7 @@
 	reg->current_rd = 0x64;
 }
 
-int
-ath_regd_init(struct ath_regulatory *reg,
-	      struct wiphy *wiphy,
-	      int (*reg_notifier)(struct wiphy *wiphy,
-				  struct regulatory_request *request))
+static int __ath_regd_init(struct ath_regulatory *reg)
 {
 	struct country_code_to_enum_rd *country = NULL;
 	u16 regdmn;
@@ -583,7 +620,29 @@
 	printk(KERN_DEBUG "ath: Regpair used: 0x%0x\n",
 		reg->regpair->regDmnEnum);
 
+	return 0;
+}
+
+int
+ath_regd_init(struct ath_regulatory *reg,
+	      struct wiphy *wiphy,
+	      int (*reg_notifier)(struct wiphy *wiphy,
+				  struct regulatory_request *request))
+{
+	struct ath_common *common = container_of(reg, struct ath_common,
+						 regulatory);
+	int r;
+
+	r = __ath_regd_init(reg);
+	if (r)
+		return r;
+
+	if (ath_is_world_regd(reg))
+		memcpy(&common->reg_world_copy, reg,
+		       sizeof(struct ath_regulatory));
+
 	ath_regd_init_wiphy(reg, wiphy, reg_notifier);
+
 	return 0;
 }
 EXPORT_SYMBOL(ath_regd_init);
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index 37110df..16e8f80 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -191,6 +191,9 @@
 #define B43_BFH_BUCKBOOST		0x0020	/* has buck/booster */
 #define B43_BFH_FEM_BT			0x0040	/* has FEM and switch to share antenna
 						 * with bluetooth */
+#define B43_BFH_NOCBUCK			0x0080
+#define B43_BFH_PALDO			0x0200
+#define B43_BFH_EXTLNA_5GHZ		0x1000	/* has an external LNA (5GHz mode) */
 
 /* SPROM boardflags2_lo values */
 #define B43_BFL2_RXBB_INT_REG_DIS	0x0001	/* external RX BB regulator present */
@@ -204,6 +207,14 @@
 #define B43_BFL2_SKWRKFEM_BRD		0x0100	/* 4321mcm93 uses Skyworks FEM */
 #define B43_BFL2_SPUR_WAR		0x0200	/* has a workaround for clock-harmonic spurs */
 #define B43_BFL2_GPLL_WAR		0x0400	/* altenative G-band PLL settings implemented */
+#define B43_BFL2_SINGLEANT_CCK		0x1000
+#define B43_BFL2_2G_SPUR_WAR		0x2000
+
+/* SPROM boardflags2_hi values */
+#define B43_BFH2_GPLL_WAR2		0x0001
+#define B43_BFH2_IPALVLSHIFT_3P3	0x0002
+#define B43_BFH2_INTERNDET_TXIQCAL	0x0004
+#define B43_BFH2_XTALBUFOUTEN		0x0008
 
 /* GPIO register offset, in both ChipCommon and PCI core. */
 #define B43_GPIO_CONTROL		0x6c
@@ -667,6 +678,7 @@
 };
 
 /* SHM offsets to the QOS data structures for the 4 different queues. */
+#define B43_QOS_QUEUE_NUM	4
 #define B43_QOS_PARAMS(queue)	(B43_SHM_SH_EDCFQ + \
 				 (B43_NR_QOSPARAMS * sizeof(u16) * (queue)))
 #define B43_QOS_BACKGROUND	B43_QOS_PARAMS(0)
@@ -904,7 +916,7 @@
 	struct work_struct beacon_update_trigger;
 
 	/* The current QOS parameters for the 4 queues. */
-	struct b43_qos_params qos_params[4];
+	struct b43_qos_params qos_params[B43_QOS_QUEUE_NUM];
 
 	/* Work for adjustment of the transmission power.
 	 * This is scheduled when we determine that the actual TX output
@@ -913,8 +925,12 @@
 
 	/* Packet transmit work */
 	struct work_struct tx_work;
+
 	/* Queue of packets to be transmitted. */
-	struct sk_buff_head tx_queue;
+	struct sk_buff_head tx_queue[B43_QOS_QUEUE_NUM];
+
+	/* Flag that implement the queues stopping. */
+	bool tx_queue_stopped[B43_QOS_QUEUE_NUM];
 
 	/* The device LEDs. */
 	struct b43_leds leds;
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index 5e45604..b5f1b91 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -890,7 +890,7 @@
 	else
 		ring->ops = &dma32_ops;
 	if (for_tx) {
-		ring->tx = 1;
+		ring->tx = true;
 		ring->current_slot = -1;
 	} else {
 		if (ring->index == 0) {
@@ -1061,7 +1061,7 @@
 static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask)
 {
 	u64 orig_mask = mask;
-	bool fallback = 0;
+	bool fallback = false;
 	int err;
 
 	/* Try to set the DMA mask. If it fails, try falling back to a
@@ -1075,12 +1075,12 @@
 		}
 		if (mask == DMA_BIT_MASK(64)) {
 			mask = DMA_BIT_MASK(32);
-			fallback = 1;
+			fallback = true;
 			continue;
 		}
 		if (mask == DMA_BIT_MASK(32)) {
 			mask = DMA_BIT_MASK(30);
-			fallback = 1;
+			fallback = true;
 			continue;
 		}
 		b43err(dev->wl, "The machine/kernel does not support "
@@ -1307,7 +1307,7 @@
 	memset(meta, 0, sizeof(*meta));
 
 	meta->skb = skb;
-	meta->is_last_fragment = 1;
+	meta->is_last_fragment = true;
 	priv_info->bouncebuffer = NULL;
 
 	meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
@@ -1465,8 +1465,10 @@
 	if ((free_slots(ring) < TX_SLOTS_PER_FRAME) ||
 	    should_inject_overflow(ring)) {
 		/* This TX ring is full. */
-		ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
-		ring->stopped = 1;
+		unsigned int skb_mapping = skb_get_queue_mapping(skb);
+		ieee80211_stop_queue(dev->wl->hw, skb_mapping);
+		dev->wl->tx_queue_stopped[skb_mapping] = 1;
+		ring->stopped = true;
 		if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
 			b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index);
 		}
@@ -1584,12 +1586,21 @@
 	}
 	if (ring->stopped) {
 		B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME);
+		ring->stopped = false;
+	}
+
+	if (dev->wl->tx_queue_stopped[ring->queue_prio]) {
+		dev->wl->tx_queue_stopped[ring->queue_prio] = 0;
+	} else {
+		/* If the driver queue is running wake the corresponding
+		 * mac80211 queue. */
 		ieee80211_wake_queue(dev->wl->hw, ring->queue_prio);
-		ring->stopped = 0;
 		if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
 			b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index);
 		}
 	}
+	/* Add work to the queue. */
+	ieee80211_queue_work(dev->wl->hw, &dev->wl->tx_work);
 }
 
 static void dma_rx(struct b43_dmaring *ring, int *slot)
diff --git a/drivers/net/wireless/b43/leds.c b/drivers/net/wireless/b43/leds.c
index a38c1c6..d79ab2a 100644
--- a/drivers/net/wireless/b43/leds.c
+++ b/drivers/net/wireless/b43/leds.c
@@ -74,7 +74,7 @@
 	if (radio_enabled)
 		turn_on = atomic_read(&led->state) != LED_OFF;
 	else
-		turn_on = 0;
+		turn_on = false;
 	if (turn_on == led->hw_state)
 		return;
 	led->hw_state = turn_on;
@@ -225,11 +225,11 @@
 	if (sprom[led_index] == 0xFF) {
 		/* There is no LED information in the SPROM
 		 * for this LED. Hardcode it here. */
-		*activelow = 0;
+		*activelow = false;
 		switch (led_index) {
 		case 0:
 			*behaviour = B43_LED_ACTIVITY;
-			*activelow = 1;
+			*activelow = true;
 			if (dev->dev->board_vendor == PCI_VENDOR_ID_COMPAQ)
 				*behaviour = B43_LED_RADIO_ALL;
 			break;
@@ -267,11 +267,11 @@
 	if (led->wl) {
 		if (dev->phy.radio_on && b43_is_hw_radio_enabled(dev)) {
 			b43_led_turn_on(dev, led->index, led->activelow);
-			led->hw_state = 1;
+			led->hw_state = true;
 			atomic_set(&led->state, 1);
 		} else {
 			b43_led_turn_off(dev, led->index, led->activelow);
-			led->hw_state = 0;
+			led->hw_state = false;
 			atomic_set(&led->state, 0);
 		}
 	}
@@ -280,19 +280,19 @@
 	led = &dev->wl->leds.led_tx;
 	if (led->wl) {
 		b43_led_turn_off(dev, led->index, led->activelow);
-		led->hw_state = 0;
+		led->hw_state = false;
 		atomic_set(&led->state, 0);
 	}
 	led = &dev->wl->leds.led_rx;
 	if (led->wl) {
 		b43_led_turn_off(dev, led->index, led->activelow);
-		led->hw_state = 0;
+		led->hw_state = false;
 		atomic_set(&led->state, 0);
 	}
 	led = &dev->wl->leds.led_assoc;
 	if (led->wl) {
 		b43_led_turn_off(dev, led->index, led->activelow);
-		led->hw_state = 0;
+		led->hw_state = false;
 		atomic_set(&led->state, 0);
 	}
 
diff --git a/drivers/net/wireless/b43/lo.c b/drivers/net/wireless/b43/lo.c
index 4c82d58..916123a 100644
--- a/drivers/net/wireless/b43/lo.c
+++ b/drivers/net/wireless/b43/lo.c
@@ -826,7 +826,7 @@
 	const struct b43_rfatt *rfatt;
 	const struct b43_bbatt *bbatt;
 	u64 power_vector;
-	bool table_changed = 0;
+	bool table_changed = false;
 
 	BUILD_BUG_ON(B43_DC_LT_SIZE != 32);
 	B43_WARN_ON(lo->rfatt_list.len * lo->bbatt_list.len > 64);
@@ -876,7 +876,7 @@
 			lo->dc_lt[idx] = (lo->dc_lt[idx] & 0xFF00)
 					 | (val & 0x00FF);
 		}
-		table_changed = 1;
+		table_changed = true;
 	}
 	if (table_changed) {
 		/* The table changed in memory. Update the hardware table. */
@@ -938,7 +938,7 @@
 	unsigned long now;
 	unsigned long expire;
 	struct b43_lo_calib *cal, *tmp;
-	bool current_item_expired = 0;
+	bool current_item_expired = false;
 	bool hwpctl;
 
 	if (!lo)
@@ -968,7 +968,7 @@
 		if (b43_compare_bbatt(&cal->bbatt, &gphy->bbatt) &&
 		    b43_compare_rfatt(&cal->rfatt, &gphy->rfatt)) {
 			B43_WARN_ON(current_item_expired);
-			current_item_expired = 1;
+			current_item_expired = true;
 		}
 		if (b43_debug(dev, B43_DBG_LO)) {
 			b43dbg(dev->wl, "LO: Item BB(%u), RF(%u,%u), "
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 5634d9a..1c6f193 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -1122,17 +1122,17 @@
 	B43_WARN_ON((ps_flags & B43_PS_AWAKE) && (ps_flags & B43_PS_ASLEEP));
 
 	if (ps_flags & B43_PS_ENABLED) {
-		hwps = 1;
+		hwps = true;
 	} else if (ps_flags & B43_PS_DISABLED) {
-		hwps = 0;
+		hwps = false;
 	} else {
 		//TODO: If powersave is not off and FIXME is not set and we are not in adhoc
 		//      and thus is not an AP and we are associated, set bit 25
 	}
 	if (ps_flags & B43_PS_AWAKE) {
-		awake = 1;
+		awake = true;
 	} else if (ps_flags & B43_PS_ASLEEP) {
-		awake = 0;
+		awake = false;
 	} else {
 		//TODO: If the device is awake or this is an AP, or we are scanning, or FIXME,
 		//      or we are associated, or FIXME, or the latest PS-Poll packet sent was
@@ -1140,8 +1140,8 @@
 	}
 
 /* FIXME: For now we force awake-on and hwps-off */
-	hwps = 0;
-	awake = 1;
+	hwps = false;
+	awake = true;
 
 	macctl = b43_read32(dev, B43_MMIO_MACCTL);
 	if (hwps)
@@ -1339,7 +1339,7 @@
 		return;
 	if (dev->noisecalc.calculation_running)
 		return;
-	dev->noisecalc.calculation_running = 1;
+	dev->noisecalc.calculation_running = true;
 	dev->noisecalc.nr_samples = 0;
 
 	b43_generate_noise_sample(dev);
@@ -1408,7 +1408,7 @@
 			average -= 48;
 
 		dev->stats.link_noise = average;
-		dev->noisecalc.calculation_running = 0;
+		dev->noisecalc.calculation_running = false;
 		return;
 	}
 generate_new:
@@ -1424,7 +1424,7 @@
 			b43_power_saving_ctl_bits(dev, 0);
 	}
 	if (b43_is_mode(dev->wl, NL80211_IFTYPE_ADHOC))
-		dev->dfq_valid = 1;
+		dev->dfq_valid = true;
 }
 
 static void handle_irq_atim_end(struct b43_wldev *dev)
@@ -1433,7 +1433,7 @@
 		b43_write32(dev, B43_MMIO_MACCMD,
 			    b43_read32(dev, B43_MMIO_MACCMD)
 			    | B43_MACCMD_DFQ_VALID);
-		dev->dfq_valid = 0;
+		dev->dfq_valid = false;
 	}
 }
 
@@ -1539,7 +1539,7 @@
 	unsigned int i, len, variable_len;
 	const struct ieee80211_mgmt *bcn;
 	const u8 *ie;
-	bool tim_found = 0;
+	bool tim_found = false;
 	unsigned int rate;
 	u16 ctl;
 	int antenna;
@@ -1588,7 +1588,7 @@
 			/* A valid TIM is at least 4 bytes long. */
 			if (ie_len < 4)
 				break;
-			tim_found = 1;
+			tim_found = true;
 
 			tim_position = sizeof(struct b43_plcp_hdr6);
 			tim_position += offsetof(struct ieee80211_mgmt, u.beacon.variable);
@@ -1625,7 +1625,7 @@
 	if (wl->beacon0_uploaded)
 		return;
 	b43_write_beacon_template(dev, 0x68, 0x18);
-	wl->beacon0_uploaded = 1;
+	wl->beacon0_uploaded = true;
 }
 
 static void b43_upload_beacon1(struct b43_wldev *dev)
@@ -1635,7 +1635,7 @@
 	if (wl->beacon1_uploaded)
 		return;
 	b43_write_beacon_template(dev, 0x468, 0x1A);
-	wl->beacon1_uploaded = 1;
+	wl->beacon1_uploaded = true;
 }
 
 static void handle_irq_beacon(struct b43_wldev *dev)
@@ -1667,7 +1667,7 @@
 	if (unlikely(wl->beacon_templates_virgin)) {
 		/* We never uploaded a beacon before.
 		 * Upload both templates now, but only mark one valid. */
-		wl->beacon_templates_virgin = 0;
+		wl->beacon_templates_virgin = false;
 		b43_upload_beacon0(dev);
 		b43_upload_beacon1(dev);
 		cmd = b43_read32(dev, B43_MMIO_MACCMD);
@@ -1755,8 +1755,8 @@
 	if (wl->current_beacon)
 		dev_kfree_skb_any(wl->current_beacon);
 	wl->current_beacon = beacon;
-	wl->beacon0_uploaded = 0;
-	wl->beacon1_uploaded = 0;
+	wl->beacon0_uploaded = false;
+	wl->beacon1_uploaded = false;
 	ieee80211_queue_work(wl->hw, &wl->beacon_update_trigger);
 }
 
@@ -1913,7 +1913,7 @@
 			b43err(dev->wl, "This device does not support DMA "
 			       "on your system. It will now be switched to PIO.\n");
 			/* Fall back to PIO transfers if we get fatal DMA errors! */
-			dev->use_pio = 1;
+			dev->use_pio = true;
 			b43_controller_restart(dev, "DMA error");
 			return;
 		}
@@ -2240,12 +2240,12 @@
 		filename = NULL;
 	else
 		goto err_no_pcm;
-	fw->pcm_request_failed = 0;
+	fw->pcm_request_failed = false;
 	err = b43_do_request_fw(ctx, filename, &fw->pcm);
 	if (err == -ENOENT) {
 		/* We did not find a PCM file? Not fatal, but
 		 * core rev <= 10 must do without hwcrypto then. */
-		fw->pcm_request_failed = 1;
+		fw->pcm_request_failed = true;
 	} else if (err)
 		goto err_load;
 
@@ -2535,7 +2535,7 @@
 	dev->wl->hw->queues = dev->wl->mac80211_initially_registered_queues;
 	dev->qos_enabled = !!modparam_qos;
 	/* Default to firmware/hardware crypto acceleration. */
-	dev->hwcrypto_enabled = 1;
+	dev->hwcrypto_enabled = true;
 
 	if (dev->fw.opensource) {
 		u16 fwcapa;
@@ -2549,7 +2549,7 @@
 		if (!(fwcapa & B43_FWCAPA_HWCRYPTO) || dev->fw.pcm_request_failed) {
 			b43info(dev->wl, "Hardware crypto acceleration not supported by firmware\n");
 			/* Disable hardware crypto and fall back to software crypto. */
-			dev->hwcrypto_enabled = 0;
+			dev->hwcrypto_enabled = false;
 		}
 		if (!(fwcapa & B43_FWCAPA_QOS)) {
 			b43info(dev->wl, "QoS not supported by firmware\n");
@@ -2557,7 +2557,7 @@
 			 * ieee80211_unregister to make sure the networking core can
 			 * properly free possible resources. */
 			dev->wl->hw->queues = 1;
-			dev->qos_enabled = 0;
+			dev->qos_enabled = false;
 		}
 	} else {
 		b43info(dev->wl, "Loading firmware version %u.%u "
@@ -3361,10 +3361,10 @@
 	wl->rng.name = wl->rng_name;
 	wl->rng.data_read = b43_rng_read;
 	wl->rng.priv = (unsigned long)wl;
-	wl->rng_initialized = 1;
+	wl->rng_initialized = true;
 	err = hwrng_register(&wl->rng);
 	if (err) {
-		wl->rng_initialized = 0;
+		wl->rng_initialized = false;
 		b43err(wl, "Failed to register the random "
 		       "number generator (%d)\n", err);
 	}
@@ -3378,6 +3378,7 @@
 	struct b43_wl *wl = container_of(work, struct b43_wl, tx_work);
 	struct b43_wldev *dev;
 	struct sk_buff *skb;
+	int queue_num;
 	int err = 0;
 
 	mutex_lock(&wl->mutex);
@@ -3387,15 +3388,26 @@
 		return;
 	}
 
-	while (skb_queue_len(&wl->tx_queue)) {
-		skb = skb_dequeue(&wl->tx_queue);
+	for (queue_num = 0; queue_num < B43_QOS_QUEUE_NUM; queue_num++) {
+		while (skb_queue_len(&wl->tx_queue[queue_num])) {
+			skb = skb_dequeue(&wl->tx_queue[queue_num]);
+			if (b43_using_pio_transfers(dev))
+				err = b43_pio_tx(dev, skb);
+			else
+				err = b43_dma_tx(dev, skb);
+			if (err == -ENOSPC) {
+				wl->tx_queue_stopped[queue_num] = 1;
+				ieee80211_stop_queue(wl->hw, queue_num);
+				skb_queue_head(&wl->tx_queue[queue_num], skb);
+				break;
+			}
+			if (unlikely(err))
+				dev_kfree_skb(skb); /* Drop it */
+			err = 0;
+		}
 
-		if (b43_using_pio_transfers(dev))
-			err = b43_pio_tx(dev, skb);
-		else
-			err = b43_dma_tx(dev, skb);
-		if (unlikely(err))
-			dev_kfree_skb(skb); /* Drop it */
+		if (!err)
+			wl->tx_queue_stopped[queue_num] = 0;
 	}
 
 #if B43_DEBUG
@@ -3416,8 +3428,12 @@
 	}
 	B43_WARN_ON(skb_shinfo(skb)->nr_frags);
 
-	skb_queue_tail(&wl->tx_queue, skb);
-	ieee80211_queue_work(wl->hw, &wl->tx_work);
+	skb_queue_tail(&wl->tx_queue[skb->queue_mapping], skb);
+	if (!wl->tx_queue_stopped[skb->queue_mapping]) {
+		ieee80211_queue_work(wl->hw, &wl->tx_work);
+	} else {
+		ieee80211_stop_queue(wl->hw, skb->queue_mapping);
+	}
 }
 
 static void b43_qos_params_upload(struct b43_wldev *dev,
@@ -3702,13 +3718,13 @@
 		case IEEE80211_BAND_5GHZ:
 			if (d->phy.supports_5ghz) {
 				up_dev = d;
-				gmode = 0;
+				gmode = false;
 			}
 			break;
 		case IEEE80211_BAND_2GHZ:
 			if (d->phy.supports_2ghz) {
 				up_dev = d;
-				gmode = 1;
+				gmode = true;
 			}
 			break;
 		default:
@@ -4147,6 +4163,7 @@
 	struct b43_wl *wl;
 	struct b43_wldev *orig_dev;
 	u32 mask;
+	int queue_num;
 
 	if (!dev)
 		return NULL;
@@ -4199,9 +4216,11 @@
 	mask = b43_read32(dev, B43_MMIO_GEN_IRQ_MASK);
 	B43_WARN_ON(mask != 0xFFFFFFFF && mask);
 
-	/* Drain the TX queue */
-	while (skb_queue_len(&wl->tx_queue))
-		dev_kfree_skb(skb_dequeue(&wl->tx_queue));
+	/* Drain all TX queues. */
+	for (queue_num = 0; queue_num < B43_QOS_QUEUE_NUM; queue_num++) {
+		while (skb_queue_len(&wl->tx_queue[queue_num]))
+			dev_kfree_skb(skb_dequeue(&wl->tx_queue[queue_num]));
+	}
 
 	b43_mac_suspend(dev);
 	b43_leds_exit(dev);
@@ -4425,18 +4444,18 @@
 	atomic_set(&phy->txerr_cnt, B43_PHY_TX_BADNESS_LIMIT);
 
 #if B43_DEBUG
-	phy->phy_locked = 0;
-	phy->radio_locked = 0;
+	phy->phy_locked = false;
+	phy->radio_locked = false;
 #endif
 }
 
 static void setup_struct_wldev_for_init(struct b43_wldev *dev)
 {
-	dev->dfq_valid = 0;
+	dev->dfq_valid = false;
 
 	/* Assume the radio is enabled. If it's not enabled, the state will
 	 * immediately get fixed on the first periodic work run. */
-	dev->radio_hw_enable = 1;
+	dev->radio_hw_enable = true;
 
 	/* Stats */
 	memset(&dev->stats, 0, sizeof(dev->stats));
@@ -4670,16 +4689,16 @@
 
 	if (b43_bus_host_is_pcmcia(dev->dev) ||
 	    b43_bus_host_is_sdio(dev->dev)) {
-		dev->__using_pio_transfers = 1;
+		dev->__using_pio_transfers = true;
 		err = b43_pio_init(dev);
 	} else if (dev->use_pio) {
 		b43warn(dev->wl, "Forced PIO by use_pio module parameter. "
 			"This should not be needed and will result in lower "
 			"performance.\n");
-		dev->__using_pio_transfers = 1;
+		dev->__using_pio_transfers = true;
 		err = b43_pio_init(dev);
 	} else {
-		dev->__using_pio_transfers = 0;
+		dev->__using_pio_transfers = false;
 		err = b43_dma_init(dev);
 	}
 	if (err)
@@ -4733,7 +4752,7 @@
 	b43dbg(wl, "Adding Interface type %d\n", vif->type);
 
 	dev = wl->current_dev;
-	wl->operating = 1;
+	wl->operating = true;
 	wl->vif = vif;
 	wl->if_type = vif->type;
 	memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
@@ -4767,7 +4786,7 @@
 	B43_WARN_ON(wl->vif != vif);
 	wl->vif = NULL;
 
-	wl->operating = 0;
+	wl->operating = false;
 
 	b43_adjust_opmode(dev);
 	memset(wl->mac_addr, 0, ETH_ALEN);
@@ -4789,12 +4808,12 @@
 	memset(wl->bssid, 0, ETH_ALEN);
 	memset(wl->mac_addr, 0, ETH_ALEN);
 	wl->filter_flags = 0;
-	wl->radiotap_enabled = 0;
+	wl->radiotap_enabled = false;
 	b43_qos_clear(wl);
-	wl->beacon0_uploaded = 0;
-	wl->beacon1_uploaded = 0;
-	wl->beacon_templates_virgin = 1;
-	wl->radio_enabled = 1;
+	wl->beacon0_uploaded = false;
+	wl->beacon1_uploaded = false;
+	wl->beacon_templates_virgin = true;
+	wl->radio_enabled = true;
 
 	mutex_lock(&wl->mutex);
 
@@ -4840,7 +4859,7 @@
 			goto out_unlock;
 	}
 	b43_wireless_core_exit(dev);
-	wl->radio_enabled = 0;
+	wl->radio_enabled = false;
 
 out_unlock:
 	mutex_unlock(&wl->mutex);
@@ -5028,7 +5047,7 @@
 	struct pci_dev *pdev = NULL;
 	int err;
 	u32 tmp;
-	bool have_2ghz_phy = 0, have_5ghz_phy = 0;
+	bool have_2ghz_phy = false, have_5ghz_phy = false;
 
 	/* Do NOT do any device initialization here.
 	 * Do it in wireless_core_init() instead.
@@ -5071,7 +5090,7 @@
 	}
 
 	dev->phy.gmode = have_2ghz_phy;
-	dev->phy.radio_on = 1;
+	dev->phy.radio_on = true;
 	b43_wireless_core_reset(dev, dev->phy.gmode);
 
 	err = b43_phy_versioning(dev);
@@ -5082,11 +5101,11 @@
 	    (pdev->device != 0x4312 &&
 	     pdev->device != 0x4319 && pdev->device != 0x4324)) {
 		/* No multiband support. */
-		have_2ghz_phy = 0;
-		have_5ghz_phy = 0;
+		have_2ghz_phy = false;
+		have_5ghz_phy = false;
 		switch (dev->phy.type) {
 		case B43_PHYTYPE_A:
-			have_5ghz_phy = 1;
+			have_5ghz_phy = true;
 			break;
 		case B43_PHYTYPE_LP: //FIXME not always!
 #if 0 //FIXME enabling 5GHz causes a NULL pointer dereference
@@ -5096,7 +5115,7 @@
 		case B43_PHYTYPE_N:
 		case B43_PHYTYPE_HT:
 		case B43_PHYTYPE_LCN:
-			have_2ghz_phy = 1;
+			have_2ghz_phy = true;
 			break;
 		default:
 			B43_WARN_ON(1);
@@ -5112,8 +5131,8 @@
 		/* FIXME: For now we disable the A-PHY on multi-PHY devices. */
 		if (dev->phy.type != B43_PHYTYPE_N &&
 		    dev->phy.type != B43_PHYTYPE_LP) {
-			have_2ghz_phy = 1;
-			have_5ghz_phy = 0;
+			have_2ghz_phy = true;
+			have_5ghz_phy = false;
 		}
 	}
 
@@ -5245,6 +5264,7 @@
 	struct ieee80211_hw *hw;
 	struct b43_wl *wl;
 	char chip_name[6];
+	int queue_num;
 
 	hw = ieee80211_alloc_hw(sizeof(*wl), &b43_hw_ops);
 	if (!hw) {
@@ -5264,7 +5284,7 @@
 		BIT(NL80211_IFTYPE_WDS) |
 		BIT(NL80211_IFTYPE_ADHOC);
 
-	hw->queues = modparam_qos ? 4 : 1;
+	hw->queues = modparam_qos ? B43_QOS_QUEUE_NUM : 1;
 	wl->mac80211_initially_registered_queues = hw->queues;
 	hw->max_rates = 2;
 	SET_IEEE80211_DEV(hw, dev->dev);
@@ -5281,7 +5301,12 @@
 	INIT_WORK(&wl->beacon_update_trigger, b43_beacon_update_trigger_work);
 	INIT_WORK(&wl->txpower_adjust_work, b43_phy_txpower_adjust_work);
 	INIT_WORK(&wl->tx_work, b43_tx_work);
-	skb_queue_head_init(&wl->tx_queue);
+
+	/* Initialize queues and flags. */
+	for (queue_num = 0; queue_num < B43_QOS_QUEUE_NUM; queue_num++) {
+		skb_queue_head_init(&wl->tx_queue[queue_num]);
+		wl->tx_queue_stopped[queue_num] = 0;
+	}
 
 	snprintf(chip_name, ARRAY_SIZE(chip_name),
 		 (dev->chip_id > 0x9999) ? "%d" : "%04X", dev->chip_id);
diff --git a/drivers/net/wireless/b43/phy_common.c b/drivers/net/wireless/b43/phy_common.c
index 3ea44bb..3f8883b 100644
--- a/drivers/net/wireless/b43/phy_common.c
+++ b/drivers/net/wireless/b43/phy_common.c
@@ -145,7 +145,7 @@
 
 #if B43_DEBUG
 	B43_WARN_ON(dev->phy.radio_locked);
-	dev->phy.radio_locked = 1;
+	dev->phy.radio_locked = true;
 #endif
 
 	macctl = b43_read32(dev, B43_MMIO_MACCTL);
@@ -163,7 +163,7 @@
 
 #if B43_DEBUG
 	B43_WARN_ON(!dev->phy.radio_locked);
-	dev->phy.radio_locked = 0;
+	dev->phy.radio_locked = false;
 #endif
 
 	/* Commit any write */
@@ -178,7 +178,7 @@
 {
 #if B43_DEBUG
 	B43_WARN_ON(dev->phy.phy_locked);
-	dev->phy.phy_locked = 1;
+	dev->phy.phy_locked = true;
 #endif
 	B43_WARN_ON(dev->dev->core_rev < 3);
 
@@ -190,7 +190,7 @@
 {
 #if B43_DEBUG
 	B43_WARN_ON(!dev->phy.phy_locked);
-	dev->phy.phy_locked = 0;
+	dev->phy.phy_locked = false;
 #endif
 	B43_WARN_ON(dev->dev->core_rev < 3);
 
diff --git a/drivers/net/wireless/b43/phy_g.c b/drivers/net/wireless/b43/phy_g.c
index 8e157bc..12f467b 100644
--- a/drivers/net/wireless/b43/phy_g.c
+++ b/drivers/net/wireless/b43/phy_g.c
@@ -897,7 +897,7 @@
 		if (b43_phy_read(dev, 0x0033) & 0x0800)
 			break;
 
-		gphy->aci_enable = 1;
+		gphy->aci_enable = true;
 
 		phy_stacksave(B43_PHY_RADIO_BITFIELD);
 		phy_stacksave(B43_PHY_G_CRS);
@@ -1038,7 +1038,7 @@
 		if (!(b43_phy_read(dev, 0x0033) & 0x0800))
 			break;
 
-		gphy->aci_enable = 0;
+		gphy->aci_enable = false;
 
 		phy_stackrestore(B43_PHY_RADIO_BITFIELD);
 		phy_stackrestore(B43_PHY_G_CRS);
@@ -1956,10 +1956,10 @@
 			bbatt.att = 11;
 			if (phy->radio_rev == 8) {
 				rfatt.att = 15;
-				rfatt.with_padmix = 1;
+				rfatt.with_padmix = true;
 			} else {
 				rfatt.att = 9;
-				rfatt.with_padmix = 0;
+				rfatt.with_padmix = false;
 			}
 			b43_set_txpower_g(dev, &bbatt, &rfatt, 0);
 		}
@@ -2137,7 +2137,7 @@
 	struct b43_bus_dev *bdev = dev->dev;
 	struct b43_phy *phy = &dev->phy;
 
-	rf->with_padmix = 0;
+	rf->with_padmix = false;
 
 	if (dev->dev->board_vendor == SSB_BOARDVENDOR_BCM &&
 	    dev->dev->board_type == SSB_BOARD_BCM4309G) {
@@ -2221,7 +2221,7 @@
 			return;
 		case 8:
 			rf->att = 0xA;
-			rf->with_padmix = 1;
+			rf->with_padmix = true;
 			return;
 		case 9:
 		default:
@@ -2389,7 +2389,7 @@
 	B43_WARN_ON((dev->dev->chip_id == 0x4301) &&
 		    (phy->radio_ver != 0x2050)); /* Not supported anymore */
 
-	gphy->dyn_tssi_tbl = 0;
+	gphy->dyn_tssi_tbl = false;
 
 	if (pab0 != 0 && pab1 != 0 && pab2 != 0 &&
 	    pab0 != -1 && pab1 != -1 && pab2 != -1) {
@@ -2404,7 +2404,7 @@
 							       pab1, pab2);
 		if (!gphy->tssi2dbm)
 			return -ENOMEM;
-		gphy->dyn_tssi_tbl = 1;
+		gphy->dyn_tssi_tbl = true;
 	} else {
 		/* pabX values not set in SPROM. */
 		gphy->tgt_idle_tssi = 52;
@@ -2504,7 +2504,7 @@
 
 	if (gphy->dyn_tssi_tbl)
 		kfree(gphy->tssi2dbm);
-	gphy->dyn_tssi_tbl = 0;
+	gphy->dyn_tssi_tbl = false;
 	gphy->tssi2dbm = NULL;
 
 	kfree(gphy);
@@ -2531,10 +2531,10 @@
 	if (phy->rev == 1) {
 		/* Workaround: Temporarly disable gmode through the early init
 		 * phase, as the gmode stuff is not needed for phy rev 1 */
-		phy->gmode = 0;
+		phy->gmode = false;
 		b43_wireless_core_reset(dev, 0);
 		b43_phy_initg(dev);
-		phy->gmode = 1;
+		phy->gmode = true;
 		b43_wireless_core_reset(dev, 1);
 	}
 
@@ -2613,7 +2613,7 @@
 				      gphy->radio_off_context.rfover);
 			b43_phy_write(dev, B43_PHY_RFOVERVAL,
 				      gphy->radio_off_context.rfoverval);
-			gphy->radio_off_context.valid = 0;
+			gphy->radio_off_context.valid = false;
 		}
 		channel = phy->channel;
 		b43_gphy_channel_switch(dev, 6, 1);
@@ -2626,7 +2626,7 @@
 		rfoverval = b43_phy_read(dev, B43_PHY_RFOVERVAL);
 		gphy->radio_off_context.rfover = rfover;
 		gphy->radio_off_context.rfoverval = rfoverval;
-		gphy->radio_off_context.valid = 1;
+		gphy->radio_off_context.valid = true;
 		b43_phy_write(dev, B43_PHY_RFOVER, rfover | 0x008C);
 		b43_phy_write(dev, B43_PHY_RFOVERVAL, rfoverval & 0xFF73);
 	}
@@ -2711,10 +2711,10 @@
 	if ((phy->rev == 0) || (!phy->gmode))
 		return -ENODEV;
 
-	gphy->aci_wlan_automatic = 0;
+	gphy->aci_wlan_automatic = false;
 	switch (mode) {
 	case B43_INTERFMODE_AUTOWLAN:
-		gphy->aci_wlan_automatic = 1;
+		gphy->aci_wlan_automatic = true;
 		if (gphy->aci_enable)
 			mode = B43_INTERFMODE_MANUALWLAN;
 		else
@@ -2735,8 +2735,8 @@
 		b43_radio_interference_mitigation_disable(dev, currentmode);
 
 	if (mode == B43_INTERFMODE_NONE) {
-		gphy->aci_enable = 0;
-		gphy->aci_hw_rssi = 0;
+		gphy->aci_enable = false;
+		gphy->aci_hw_rssi = false;
 	} else
 		b43_radio_interference_mitigation_enable(dev, mode);
 	gphy->interfmode = mode;
diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c
index f93d66b..3ae2856 100644
--- a/drivers/net/wireless/b43/phy_lp.c
+++ b/drivers/net/wireless/b43/phy_lp.c
@@ -736,9 +736,9 @@
 	struct b43_phy_lp *lpphy = dev->phy.lp;
 
 	if (user)
-		lpphy->crs_usr_disable = 1;
+		lpphy->crs_usr_disable = true;
 	else
-		lpphy->crs_sys_disable = 1;
+		lpphy->crs_sys_disable = true;
 	b43_phy_maskset(dev, B43_LPPHY_CRSGAIN_CTL, 0xFF1F, 0x80);
 }
 
@@ -747,9 +747,9 @@
 	struct b43_phy_lp *lpphy = dev->phy.lp;
 
 	if (user)
-		lpphy->crs_usr_disable = 0;
+		lpphy->crs_usr_disable = false;
 	else
-		lpphy->crs_sys_disable = 0;
+		lpphy->crs_sys_disable = false;
 
 	if (!lpphy->crs_usr_disable && !lpphy->crs_sys_disable) {
 		if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index b17d9b6..bf5a438 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -78,19 +78,6 @@
 	B43_NPHY_RSSI_TBD,
 };
 
-/* TODO: reorder functions */
-static void b43_nphy_stay_in_carrier_search(struct b43_wldev *dev,
-						bool enable);
-static void b43_nphy_set_rf_sequence(struct b43_wldev *dev, u8 cmd,
-					u8 *events, u8 *delays, u8 length);
-static void b43_nphy_force_rf_sequence(struct b43_wldev *dev,
-				       enum b43_nphy_rf_sequence seq);
-static void b43_nphy_rf_control_override(struct b43_wldev *dev, u16 field,
-						u16 value, u8 core, bool off);
-static void b43_nphy_rf_control_intc_override(struct b43_wldev *dev, u8 field,
-						u16 value, u8 core);
-static const u32 *b43_nphy_get_ipa_gain_table(struct b43_wldev *dev);
-
 static inline bool b43_nphy_ipa(struct b43_wldev *dev)
 {
 	enum ieee80211_band band = b43_current_band(dev->wl);
@@ -98,57 +85,394 @@
 		(dev->phy.n->ipa5g_on && band == IEEE80211_BAND_5GHZ));
 }
 
-void b43_nphy_set_rxantenna(struct b43_wldev *dev, int antenna)
-{//TODO
-}
-
-static void b43_nphy_op_adjust_txpower(struct b43_wldev *dev)
-{//TODO
-}
-
-static enum b43_txpwr_result b43_nphy_op_recalc_txpower(struct b43_wldev *dev,
-							bool ignore_tssi)
-{//TODO
-	return B43_TXPWR_RES_DONE;
-}
-
-static void b43_chantab_radio_upload(struct b43_wldev *dev,
-				const struct b43_nphy_channeltab_entry_rev2 *e)
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/GetIpaGainTbl */
+static const u32 *b43_nphy_get_ipa_gain_table(struct b43_wldev *dev)
 {
-	b43_radio_write(dev, B2055_PLL_REF, e->radio_pll_ref);
-	b43_radio_write(dev, B2055_RF_PLLMOD0, e->radio_rf_pllmod0);
-	b43_radio_write(dev, B2055_RF_PLLMOD1, e->radio_rf_pllmod1);
-	b43_radio_write(dev, B2055_VCO_CAPTAIL, e->radio_vco_captail);
-	b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
-
-	b43_radio_write(dev, B2055_VCO_CAL1, e->radio_vco_cal1);
-	b43_radio_write(dev, B2055_VCO_CAL2, e->radio_vco_cal2);
-	b43_radio_write(dev, B2055_PLL_LFC1, e->radio_pll_lfc1);
-	b43_radio_write(dev, B2055_PLL_LFR1, e->radio_pll_lfr1);
-	b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
-
-	b43_radio_write(dev, B2055_PLL_LFC2, e->radio_pll_lfc2);
-	b43_radio_write(dev, B2055_LGBUF_CENBUF, e->radio_lgbuf_cenbuf);
-	b43_radio_write(dev, B2055_LGEN_TUNE1, e->radio_lgen_tune1);
-	b43_radio_write(dev, B2055_LGEN_TUNE2, e->radio_lgen_tune2);
-	b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
-
-	b43_radio_write(dev, B2055_C1_LGBUF_ATUNE, e->radio_c1_lgbuf_atune);
-	b43_radio_write(dev, B2055_C1_LGBUF_GTUNE, e->radio_c1_lgbuf_gtune);
-	b43_radio_write(dev, B2055_C1_RX_RFR1, e->radio_c1_rx_rfr1);
-	b43_radio_write(dev, B2055_C1_TX_PGAPADTN, e->radio_c1_tx_pgapadtn);
-	b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
-
-	b43_radio_write(dev, B2055_C1_TX_MXBGTRIM, e->radio_c1_tx_mxbgtrim);
-	b43_radio_write(dev, B2055_C2_LGBUF_ATUNE, e->radio_c2_lgbuf_atune);
-	b43_radio_write(dev, B2055_C2_LGBUF_GTUNE, e->radio_c2_lgbuf_gtune);
-	b43_radio_write(dev, B2055_C2_RX_RFR1, e->radio_c2_rx_rfr1);
-	b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
-
-	b43_radio_write(dev, B2055_C2_TX_PGAPADTN, e->radio_c2_tx_pgapadtn);
-	b43_radio_write(dev, B2055_C2_TX_MXBGTRIM, e->radio_c2_tx_mxbgtrim);
+	if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+		if (dev->phy.rev >= 6) {
+			if (dev->dev->chip_id == 47162)
+				return txpwrctrl_tx_gain_ipa_rev5;
+			return txpwrctrl_tx_gain_ipa_rev6;
+		} else if (dev->phy.rev >= 5) {
+			return txpwrctrl_tx_gain_ipa_rev5;
+		} else {
+			return txpwrctrl_tx_gain_ipa;
+		}
+	} else {
+		return txpwrctrl_tx_gain_ipa_5g;
+	}
 }
 
+/**************************************************
+ * RF (just without b43_nphy_rf_control_intc_override)
+ **************************************************/
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ForceRFSeq */
+static void b43_nphy_force_rf_sequence(struct b43_wldev *dev,
+				       enum b43_nphy_rf_sequence seq)
+{
+	static const u16 trigger[] = {
+		[B43_RFSEQ_RX2TX]		= B43_NPHY_RFSEQTR_RX2TX,
+		[B43_RFSEQ_TX2RX]		= B43_NPHY_RFSEQTR_TX2RX,
+		[B43_RFSEQ_RESET2RX]		= B43_NPHY_RFSEQTR_RST2RX,
+		[B43_RFSEQ_UPDATE_GAINH]	= B43_NPHY_RFSEQTR_UPGH,
+		[B43_RFSEQ_UPDATE_GAINL]	= B43_NPHY_RFSEQTR_UPGL,
+		[B43_RFSEQ_UPDATE_GAINU]	= B43_NPHY_RFSEQTR_UPGU,
+	};
+	int i;
+	u16 seq_mode = b43_phy_read(dev, B43_NPHY_RFSEQMODE);
+
+	B43_WARN_ON(seq >= ARRAY_SIZE(trigger));
+
+	b43_phy_set(dev, B43_NPHY_RFSEQMODE,
+		    B43_NPHY_RFSEQMODE_CAOVER | B43_NPHY_RFSEQMODE_TROVER);
+	b43_phy_set(dev, B43_NPHY_RFSEQTR, trigger[seq]);
+	for (i = 0; i < 200; i++) {
+		if (!(b43_phy_read(dev, B43_NPHY_RFSEQST) & trigger[seq]))
+			goto ok;
+		msleep(1);
+	}
+	b43err(dev->wl, "RF sequence status timeout\n");
+ok:
+	b43_phy_write(dev, B43_NPHY_RFSEQMODE, seq_mode);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverride */
+static void b43_nphy_rf_control_override(struct b43_wldev *dev, u16 field,
+						u16 value, u8 core, bool off)
+{
+	int i;
+	u8 index = fls(field);
+	u8 addr, en_addr, val_addr;
+	/* we expect only one bit set */
+	B43_WARN_ON(field & (~(1 << (index - 1))));
+
+	if (dev->phy.rev >= 3) {
+		const struct nphy_rf_control_override_rev3 *rf_ctrl;
+		for (i = 0; i < 2; i++) {
+			if (index == 0 || index == 16) {
+				b43err(dev->wl,
+					"Unsupported RF Ctrl Override call\n");
+				return;
+			}
+
+			rf_ctrl = &tbl_rf_control_override_rev3[index - 1];
+			en_addr = B43_PHY_N((i == 0) ?
+				rf_ctrl->en_addr0 : rf_ctrl->en_addr1);
+			val_addr = B43_PHY_N((i == 0) ?
+				rf_ctrl->val_addr0 : rf_ctrl->val_addr1);
+
+			if (off) {
+				b43_phy_mask(dev, en_addr, ~(field));
+				b43_phy_mask(dev, val_addr,
+						~(rf_ctrl->val_mask));
+			} else {
+				if (core == 0 || ((1 << i) & core)) {
+					b43_phy_set(dev, en_addr, field);
+					b43_phy_maskset(dev, val_addr,
+						~(rf_ctrl->val_mask),
+						(value << rf_ctrl->val_shift));
+				}
+			}
+		}
+	} else {
+		const struct nphy_rf_control_override_rev2 *rf_ctrl;
+		if (off) {
+			b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, ~(field));
+			value = 0;
+		} else {
+			b43_phy_set(dev, B43_NPHY_RFCTL_OVER, field);
+		}
+
+		for (i = 0; i < 2; i++) {
+			if (index <= 1 || index == 16) {
+				b43err(dev->wl,
+					"Unsupported RF Ctrl Override call\n");
+				return;
+			}
+
+			if (index == 2 || index == 10 ||
+			    (index >= 13 && index <= 15)) {
+				core = 1;
+			}
+
+			rf_ctrl = &tbl_rf_control_override_rev2[index - 2];
+			addr = B43_PHY_N((i == 0) ?
+				rf_ctrl->addr0 : rf_ctrl->addr1);
+
+			if ((1 << i) & core)
+				b43_phy_maskset(dev, addr, ~(rf_ctrl->bmask),
+						(value << rf_ctrl->shift));
+
+			b43_phy_set(dev, B43_NPHY_RFCTL_OVER, 0x1);
+			b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
+					B43_NPHY_RFCTL_CMD_START);
+			udelay(1);
+			b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, 0xFFFE);
+		}
+	}
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlIntcOverride */
+static void b43_nphy_rf_control_intc_override(struct b43_wldev *dev, u8 field,
+						u16 value, u8 core)
+{
+	u8 i, j;
+	u16 reg, tmp, val;
+
+	B43_WARN_ON(dev->phy.rev < 3);
+	B43_WARN_ON(field > 4);
+
+	for (i = 0; i < 2; i++) {
+		if ((core == 1 && i == 1) || (core == 2 && !i))
+			continue;
+
+		reg = (i == 0) ?
+			B43_NPHY_RFCTL_INTC1 : B43_NPHY_RFCTL_INTC2;
+		b43_phy_mask(dev, reg, 0xFBFF);
+
+		switch (field) {
+		case 0:
+			b43_phy_write(dev, reg, 0);
+			b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX);
+			break;
+		case 1:
+			if (!i) {
+				b43_phy_maskset(dev, B43_NPHY_RFCTL_INTC1,
+						0xFC3F, (value << 6));
+				b43_phy_maskset(dev, B43_NPHY_TXF_40CO_B1S1,
+						0xFFFE, 1);
+				b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
+						B43_NPHY_RFCTL_CMD_START);
+				for (j = 0; j < 100; j++) {
+					if (b43_phy_read(dev, B43_NPHY_RFCTL_CMD) & B43_NPHY_RFCTL_CMD_START) {
+						j = 0;
+						break;
+					}
+					udelay(10);
+				}
+				if (j)
+					b43err(dev->wl,
+						"intc override timeout\n");
+				b43_phy_mask(dev, B43_NPHY_TXF_40CO_B1S1,
+						0xFFFE);
+			} else {
+				b43_phy_maskset(dev, B43_NPHY_RFCTL_INTC2,
+						0xFC3F, (value << 6));
+				b43_phy_maskset(dev, B43_NPHY_RFCTL_OVER,
+						0xFFFE, 1);
+				b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
+						B43_NPHY_RFCTL_CMD_RXTX);
+				for (j = 0; j < 100; j++) {
+					if (b43_phy_read(dev, B43_NPHY_RFCTL_CMD) & B43_NPHY_RFCTL_CMD_RXTX) {
+						j = 0;
+						break;
+					}
+					udelay(10);
+				}
+				if (j)
+					b43err(dev->wl,
+						"intc override timeout\n");
+				b43_phy_mask(dev, B43_NPHY_RFCTL_OVER,
+						0xFFFE);
+			}
+			break;
+		case 2:
+			if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+				tmp = 0x0020;
+				val = value << 5;
+			} else {
+				tmp = 0x0010;
+				val = value << 4;
+			}
+			b43_phy_maskset(dev, reg, ~tmp, val);
+			break;
+		case 3:
+			if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+				tmp = 0x0001;
+				val = value;
+			} else {
+				tmp = 0x0004;
+				val = value << 2;
+			}
+			b43_phy_maskset(dev, reg, ~tmp, val);
+			break;
+		case 4:
+			if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+				tmp = 0x0002;
+				val = value << 1;
+			} else {
+				tmp = 0x0008;
+				val = value << 3;
+			}
+			b43_phy_maskset(dev, reg, ~tmp, val);
+			break;
+		}
+	}
+}
+
+/**************************************************
+ * Various PHY ops
+ **************************************************/
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */
+static void b43_nphy_write_clip_detection(struct b43_wldev *dev,
+					  const u16 *clip_st)
+{
+	b43_phy_write(dev, B43_NPHY_C1_CLIP1THRES, clip_st[0]);
+	b43_phy_write(dev, B43_NPHY_C2_CLIP1THRES, clip_st[1]);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */
+static void b43_nphy_read_clip_detection(struct b43_wldev *dev, u16 *clip_st)
+{
+	clip_st[0] = b43_phy_read(dev, B43_NPHY_C1_CLIP1THRES);
+	clip_st[1] = b43_phy_read(dev, B43_NPHY_C2_CLIP1THRES);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/classifier */
+static u16 b43_nphy_classifier(struct b43_wldev *dev, u16 mask, u16 val)
+{
+	u16 tmp;
+
+	if (dev->dev->core_rev == 16)
+		b43_mac_suspend(dev);
+
+	tmp = b43_phy_read(dev, B43_NPHY_CLASSCTL);
+	tmp &= (B43_NPHY_CLASSCTL_CCKEN | B43_NPHY_CLASSCTL_OFDMEN |
+		B43_NPHY_CLASSCTL_WAITEDEN);
+	tmp &= ~mask;
+	tmp |= (val & mask);
+	b43_phy_maskset(dev, B43_NPHY_CLASSCTL, 0xFFF8, tmp);
+
+	if (dev->dev->core_rev == 16)
+		b43_mac_enable(dev);
+
+	return tmp;
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CCA */
+static void b43_nphy_reset_cca(struct b43_wldev *dev)
+{
+	u16 bbcfg;
+
+	b43_phy_force_clock(dev, 1);
+	bbcfg = b43_phy_read(dev, B43_NPHY_BBCFG);
+	b43_phy_write(dev, B43_NPHY_BBCFG, bbcfg | B43_NPHY_BBCFG_RSTCCA);
+	udelay(1);
+	b43_phy_write(dev, B43_NPHY_BBCFG, bbcfg & ~B43_NPHY_BBCFG_RSTCCA);
+	b43_phy_force_clock(dev, 0);
+	b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/carriersearch */
+static void b43_nphy_stay_in_carrier_search(struct b43_wldev *dev, bool enable)
+{
+	struct b43_phy *phy = &dev->phy;
+	struct b43_phy_n *nphy = phy->n;
+
+	if (enable) {
+		static const u16 clip[] = { 0xFFFF, 0xFFFF };
+		if (nphy->deaf_count++ == 0) {
+			nphy->classifier_state = b43_nphy_classifier(dev, 0, 0);
+			b43_nphy_classifier(dev, 0x7, 0);
+			b43_nphy_read_clip_detection(dev, nphy->clip_state);
+			b43_nphy_write_clip_detection(dev, clip);
+		}
+		b43_nphy_reset_cca(dev);
+	} else {
+		if (--nphy->deaf_count == 0) {
+			b43_nphy_classifier(dev, 0x7, nphy->classifier_state);
+			b43_nphy_write_clip_detection(dev, nphy->clip_state);
+		}
+	}
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/AdjustLnaGainTbl */
+static void b43_nphy_adjust_lna_gain_table(struct b43_wldev *dev)
+{
+	struct b43_phy_n *nphy = dev->phy.n;
+
+	u8 i;
+	s16 tmp;
+	u16 data[4];
+	s16 gain[2];
+	u16 minmax[2];
+	static const u16 lna_gain[4] = { -2, 10, 19, 25 };
+
+	if (nphy->hang_avoid)
+		b43_nphy_stay_in_carrier_search(dev, 1);
+
+	if (nphy->gain_boost) {
+		if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+			gain[0] = 6;
+			gain[1] = 6;
+		} else {
+			tmp = 40370 - 315 * dev->phy.channel;
+			gain[0] = ((tmp >> 13) + ((tmp >> 12) & 1));
+			tmp = 23242 - 224 * dev->phy.channel;
+			gain[1] = ((tmp >> 13) + ((tmp >> 12) & 1));
+		}
+	} else {
+		gain[0] = 0;
+		gain[1] = 0;
+	}
+
+	for (i = 0; i < 2; i++) {
+		if (nphy->elna_gain_config) {
+			data[0] = 19 + gain[i];
+			data[1] = 25 + gain[i];
+			data[2] = 25 + gain[i];
+			data[3] = 25 + gain[i];
+		} else {
+			data[0] = lna_gain[0] + gain[i];
+			data[1] = lna_gain[1] + gain[i];
+			data[2] = lna_gain[2] + gain[i];
+			data[3] = lna_gain[3] + gain[i];
+		}
+		b43_ntab_write_bulk(dev, B43_NTAB16(i, 8), 4, data);
+
+		minmax[i] = 23 + gain[i];
+	}
+
+	b43_phy_maskset(dev, B43_NPHY_C1_MINMAX_GAIN, ~B43_NPHY_C1_MINGAIN,
+				minmax[0] << B43_NPHY_C1_MINGAIN_SHIFT);
+	b43_phy_maskset(dev, B43_NPHY_C2_MINMAX_GAIN, ~B43_NPHY_C2_MINGAIN,
+				minmax[1] << B43_NPHY_C2_MINGAIN_SHIFT);
+
+	if (nphy->hang_avoid)
+		b43_nphy_stay_in_carrier_search(dev, 0);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SetRfSeq */
+static void b43_nphy_set_rf_sequence(struct b43_wldev *dev, u8 cmd,
+					u8 *events, u8 *delays, u8 length)
+{
+	struct b43_phy_n *nphy = dev->phy.n;
+	u8 i;
+	u8 end = (dev->phy.rev >= 3) ? 0x1F : 0x0F;
+	u16 offset1 = cmd << 4;
+	u16 offset2 = offset1 + 0x80;
+
+	if (nphy->hang_avoid)
+		b43_nphy_stay_in_carrier_search(dev, true);
+
+	b43_ntab_write_bulk(dev, B43_NTAB8(7, offset1), length, events);
+	b43_ntab_write_bulk(dev, B43_NTAB8(7, offset2), length, delays);
+
+	for (i = length; i < 16; i++) {
+		b43_ntab_write(dev, B43_NTAB8(7, offset1 + i), end);
+		b43_ntab_write(dev, B43_NTAB8(7, offset2 + i), 1);
+	}
+
+	if (nphy->hang_avoid)
+		b43_nphy_stay_in_carrier_search(dev, false);
+}
+
+/**************************************************
+ * Radio 0x2056
+ **************************************************/
+
 static void b43_chantab_radio_2056_upload(struct b43_wldev *dev,
 				const struct b43_nphy_channeltab_entry_rev3 *e)
 {
@@ -228,10 +552,98 @@
 static void b43_radio_2056_setup(struct b43_wldev *dev,
 				const struct b43_nphy_channeltab_entry_rev3 *e)
 {
+	struct ssb_sprom *sprom = dev->dev->bus_sprom;
+	enum ieee80211_band band = b43_current_band(dev->wl);
+	u16 offset;
+	u8 i;
+	u16 bias, cbias, pag_boost, pgag_boost, mixg_boost, padg_boost;
+
 	B43_WARN_ON(dev->phy.rev < 3);
 
 	b43_chantab_radio_2056_upload(dev, e);
-	/* TODO */
+	b2056_upload_syn_pll_cp2(dev, band == IEEE80211_BAND_5GHZ);
+
+	if (sprom->boardflags2_lo & B43_BFL2_GPLL_WAR &&
+	    b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+		b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1F);
+		b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, 0x1F);
+		if (dev->dev->chip_id == 0x4716) {
+			b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x14);
+			b43_radio_write(dev, B2056_SYN_PLL_CP2, 0);
+		} else {
+			b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x0B);
+			b43_radio_write(dev, B2056_SYN_PLL_CP2, 0x14);
+		}
+	}
+	if (sprom->boardflags2_lo & B43_BFL2_APLL_WAR &&
+	    b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+		b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1F);
+		b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, 0x1F);
+		b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x05);
+		b43_radio_write(dev, B2056_SYN_PLL_CP2, 0x0C);
+	}
+
+	if (dev->phy.n->ipa2g_on && band == IEEE80211_BAND_2GHZ) {
+		for (i = 0; i < 2; i++) {
+			offset = i ? B2056_TX1 : B2056_TX0;
+			if (dev->phy.rev >= 5) {
+				b43_radio_write(dev,
+					offset | B2056_TX_PADG_IDAC, 0xcc);
+
+				if (dev->dev->chip_id == 0x4716) {
+					bias = 0x40;
+					cbias = 0x45;
+					pag_boost = 0x5;
+					pgag_boost = 0x33;
+					mixg_boost = 0x55;
+				} else {
+					bias = 0x25;
+					cbias = 0x20;
+					pag_boost = 0x4;
+					pgag_boost = 0x03;
+					mixg_boost = 0x65;
+				}
+				padg_boost = 0x77;
+
+				b43_radio_write(dev,
+					offset | B2056_TX_INTPAG_IMAIN_STAT,
+					bias);
+				b43_radio_write(dev,
+					offset | B2056_TX_INTPAG_IAUX_STAT,
+					bias);
+				b43_radio_write(dev,
+					offset | B2056_TX_INTPAG_CASCBIAS,
+					cbias);
+				b43_radio_write(dev,
+					offset | B2056_TX_INTPAG_BOOST_TUNE,
+					pag_boost);
+				b43_radio_write(dev,
+					offset | B2056_TX_PGAG_BOOST_TUNE,
+					pgag_boost);
+				b43_radio_write(dev,
+					offset | B2056_TX_PADG_BOOST_TUNE,
+					padg_boost);
+				b43_radio_write(dev,
+					offset | B2056_TX_MIXG_BOOST_TUNE,
+					mixg_boost);
+			} else {
+				bias = dev->phy.is_40mhz ? 0x40 : 0x20;
+				b43_radio_write(dev,
+					offset | B2056_TX_INTPAG_IMAIN_STAT,
+					bias);
+				b43_radio_write(dev,
+					offset | B2056_TX_INTPAG_IAUX_STAT,
+					bias);
+				b43_radio_write(dev,
+					offset | B2056_TX_INTPAG_CASCBIAS,
+					0x30);
+			}
+			b43_radio_write(dev, offset | B2056_TX_PA_SPARE1, 0xee);
+		}
+	} else if (dev->phy.n->ipa5g_on && band == IEEE80211_BAND_5GHZ) {
+		/* TODO */
+	}
+
 	udelay(50);
 	/* VCO calibration */
 	b43_radio_write(dev, B2056_SYN_PLL_VCOCAL12, 0x00);
@@ -242,285 +654,84 @@
 	udelay(300);
 }
 
-static void b43_chantab_phy_upload(struct b43_wldev *dev,
-				   const struct b43_phy_n_sfo_cfg *e)
+static void b43_radio_init2056_pre(struct b43_wldev *dev)
 {
-	b43_phy_write(dev, B43_NPHY_BW1A, e->phy_bw1a);
-	b43_phy_write(dev, B43_NPHY_BW2, e->phy_bw2);
-	b43_phy_write(dev, B43_NPHY_BW3, e->phy_bw3);
-	b43_phy_write(dev, B43_NPHY_BW4, e->phy_bw4);
-	b43_phy_write(dev, B43_NPHY_BW5, e->phy_bw5);
-	b43_phy_write(dev, B43_NPHY_BW6, e->phy_bw6);
+	b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
+		     ~B43_NPHY_RFCTL_CMD_CHIP0PU);
+	/* Maybe wl meant to reset and set (order?) RFCTL_CMD_OEPORFORCE? */
+	b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
+		     B43_NPHY_RFCTL_CMD_OEPORFORCE);
+	b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
+		    ~B43_NPHY_RFCTL_CMD_OEPORFORCE);
+	b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
+		    B43_NPHY_RFCTL_CMD_CHIP0PU);
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlEnable */
-static void b43_nphy_tx_power_ctrl(struct b43_wldev *dev, bool enable)
+static void b43_radio_init2056_post(struct b43_wldev *dev)
 {
-	struct b43_phy_n *nphy = dev->phy.n;
-	u8 i;
-	u16 bmask, val, tmp;
-	enum ieee80211_band band = b43_current_band(dev->wl);
-
-	if (nphy->hang_avoid)
-		b43_nphy_stay_in_carrier_search(dev, 1);
-
-	nphy->txpwrctrl = enable;
-	if (!enable) {
-		if (dev->phy.rev >= 3 &&
-		    (b43_phy_read(dev, B43_NPHY_TXPCTL_CMD) &
-		     (B43_NPHY_TXPCTL_CMD_COEFF |
-		      B43_NPHY_TXPCTL_CMD_HWPCTLEN |
-		      B43_NPHY_TXPCTL_CMD_PCTLEN))) {
-			/* We disable enabled TX pwr ctl, save it's state */
-			nphy->tx_pwr_idx[0] = b43_phy_read(dev,
-						B43_NPHY_C1_TXPCTL_STAT) & 0x7f;
-			nphy->tx_pwr_idx[1] = b43_phy_read(dev,
-						B43_NPHY_C2_TXPCTL_STAT) & 0x7f;
-		}
-
-		b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x6840);
-		for (i = 0; i < 84; i++)
-			b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0);
-
-		b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x6C40);
-		for (i = 0; i < 84; i++)
-			b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0);
-
-		tmp = B43_NPHY_TXPCTL_CMD_COEFF | B43_NPHY_TXPCTL_CMD_HWPCTLEN;
-		if (dev->phy.rev >= 3)
-			tmp |= B43_NPHY_TXPCTL_CMD_PCTLEN;
-		b43_phy_mask(dev, B43_NPHY_TXPCTL_CMD, ~tmp);
-
-		if (dev->phy.rev >= 3) {
-			b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x0100);
-			b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0100);
-		} else {
-			b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x4000);
-		}
-
-		if (dev->phy.rev == 2)
-			b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3,
-				~B43_NPHY_BPHY_CTL3_SCALE, 0x53);
-		else if (dev->phy.rev < 2)
-			b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3,
-				~B43_NPHY_BPHY_CTL3_SCALE, 0x5A);
-
-		if (dev->phy.rev < 2 && dev->phy.is_40mhz)
-			b43_hf_write(dev, b43_hf_read(dev) | B43_HF_TSSIRPSMW);
-	} else {
-		b43_ntab_write_bulk(dev, B43_NTAB16(26, 64), 84,
-				    nphy->adj_pwr_tbl);
-		b43_ntab_write_bulk(dev, B43_NTAB16(27, 64), 84,
-				    nphy->adj_pwr_tbl);
-
-		bmask = B43_NPHY_TXPCTL_CMD_COEFF |
-			B43_NPHY_TXPCTL_CMD_HWPCTLEN;
-		/* wl does useless check for "enable" param here */
-		val = B43_NPHY_TXPCTL_CMD_COEFF | B43_NPHY_TXPCTL_CMD_HWPCTLEN;
-		if (dev->phy.rev >= 3) {
-			bmask |= B43_NPHY_TXPCTL_CMD_PCTLEN;
-			if (val)
-				val |= B43_NPHY_TXPCTL_CMD_PCTLEN;
-		}
-		b43_phy_maskset(dev, B43_NPHY_TXPCTL_CMD, ~(bmask), val);
-
-		if (band == IEEE80211_BAND_5GHZ) {
-			b43_phy_maskset(dev, B43_NPHY_TXPCTL_CMD,
-					~B43_NPHY_TXPCTL_CMD_INIT, 0x64);
-			if (dev->phy.rev > 1)
-				b43_phy_maskset(dev, B43_NPHY_TXPCTL_INIT,
-						~B43_NPHY_TXPCTL_INIT_PIDXI1,
-						0x64);
-		}
-
-		if (dev->phy.rev >= 3) {
-			if (nphy->tx_pwr_idx[0] != 128 &&
-			    nphy->tx_pwr_idx[1] != 128) {
-				/* Recover TX pwr ctl state */
-				b43_phy_maskset(dev, B43_NPHY_TXPCTL_CMD,
-						~B43_NPHY_TXPCTL_CMD_INIT,
-						nphy->tx_pwr_idx[0]);
-				if (dev->phy.rev > 1)
-					b43_phy_maskset(dev,
-						B43_NPHY_TXPCTL_INIT,
-						~0xff, nphy->tx_pwr_idx[1]);
-			}
-		}
-
-		if (dev->phy.rev >= 3) {
-			b43_phy_mask(dev, B43_NPHY_AFECTL_OVER1, ~0x100);
-			b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, ~0x100);
-		} else {
-			b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, ~0x4000);
-		}
-
-		if (dev->phy.rev == 2)
-			b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3, ~0xFF, 0x3b);
-		else if (dev->phy.rev < 2)
-			b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3, ~0xFF, 0x40);
-
-		if (dev->phy.rev < 2 && dev->phy.is_40mhz)
-			b43_hf_write(dev, b43_hf_read(dev) & ~B43_HF_TSSIRPSMW);
-
-		if (b43_nphy_ipa(dev)) {
-			b43_phy_mask(dev, B43_NPHY_PAPD_EN0, ~0x4);
-			b43_phy_mask(dev, B43_NPHY_PAPD_EN1, ~0x4);
-		}
-	}
-
-	if (nphy->hang_avoid)
-		b43_nphy_stay_in_carrier_search(dev, 0);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrFix */
-static void b43_nphy_tx_power_fix(struct b43_wldev *dev)
-{
-	struct b43_phy_n *nphy = dev->phy.n;
-	struct ssb_sprom *sprom = dev->dev->bus_sprom;
-
-	u8 txpi[2], bbmult, i;
-	u16 tmp, radio_gain, dac_gain;
-	u16 freq = dev->phy.channel_freq;
-	u32 txgain;
-	/* u32 gaintbl; rev3+ */
-
-	if (nphy->hang_avoid)
-		b43_nphy_stay_in_carrier_search(dev, 1);
-
-	if (dev->phy.rev >= 3) {
-		txpi[0] = 40;
-		txpi[1] = 40;
-	} else if (sprom->revision < 4) {
-		txpi[0] = 72;
-		txpi[1] = 72;
-	} else {
-		if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
-			txpi[0] = sprom->txpid2g[0];
-			txpi[1] = sprom->txpid2g[1];
-		} else if (freq >= 4900 && freq < 5100) {
-			txpi[0] = sprom->txpid5gl[0];
-			txpi[1] = sprom->txpid5gl[1];
-		} else if (freq >= 5100 && freq < 5500) {
-			txpi[0] = sprom->txpid5g[0];
-			txpi[1] = sprom->txpid5g[1];
-		} else if (freq >= 5500) {
-			txpi[0] = sprom->txpid5gh[0];
-			txpi[1] = sprom->txpid5gh[1];
-		} else {
-			txpi[0] = 91;
-			txpi[1] = 91;
-		}
-	}
-
+	b43_radio_set(dev, B2056_SYN_COM_CTRL, 0xB);
+	b43_radio_set(dev, B2056_SYN_COM_PU, 0x2);
+	b43_radio_set(dev, B2056_SYN_COM_RESET, 0x2);
+	msleep(1);
+	b43_radio_mask(dev, B2056_SYN_COM_RESET, ~0x2);
+	b43_radio_mask(dev, B2056_SYN_PLL_MAST2, ~0xFC);
+	b43_radio_mask(dev, B2056_SYN_RCCAL_CTRL0, ~0x1);
 	/*
-	for (i = 0; i < 2; i++) {
-		nphy->txpwrindex[i].index_internal = txpi[i];
-		nphy->txpwrindex[i].index_internal_save = txpi[i];
-	}
+	if (nphy->init_por)
+		Call Radio 2056 Recalibrate
 	*/
-
-	for (i = 0; i < 2; i++) {
-		if (dev->phy.rev >= 3) {
-			/* FIXME: support 5GHz */
-			txgain = b43_ntab_tx_gain_rev3plus_2ghz[txpi[i]];
-			radio_gain = (txgain >> 16) & 0x1FFFF;
-		} else {
-			txgain = b43_ntab_tx_gain_rev0_1_2[txpi[i]];
-			radio_gain = (txgain >> 16) & 0x1FFF;
-		}
-
-		dac_gain = (txgain >> 8) & 0x3F;
-		bbmult = txgain & 0xFF;
-
-		if (dev->phy.rev >= 3) {
-			if (i == 0)
-				b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x0100);
-			else
-				b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0100);
-		} else {
-			b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x4000);
-		}
-
-		if (i == 0)
-			b43_phy_write(dev, B43_NPHY_AFECTL_DACGAIN1, dac_gain);
-		else
-			b43_phy_write(dev, B43_NPHY_AFECTL_DACGAIN2, dac_gain);
-
-		b43_ntab_write(dev, B43_NTAB16(0x7, 0x110 + i), radio_gain);
-
-		tmp = b43_ntab_read(dev, B43_NTAB16(0xF, 0x57));
-		if (i == 0)
-			tmp = (tmp & 0x00FF) | (bbmult << 8);
-		else
-			tmp = (tmp & 0xFF00) | bbmult;
-		b43_ntab_write(dev, B43_NTAB16(0xF, 0x57), tmp);
-
-		if (b43_nphy_ipa(dev)) {
-			u32 tmp32;
-			u16 reg = (i == 0) ?
-				B43_NPHY_PAPD_EN0 : B43_NPHY_PAPD_EN1;
-			tmp32 = b43_ntab_read(dev, B43_NTAB32(26 + i, txpi[i]));
-			b43_phy_maskset(dev, reg, 0xE00F, (u32) tmp32 << 4);
-			b43_phy_set(dev, reg, 0x4);
-		}
-	}
-
-	b43_phy_mask(dev, B43_NPHY_BPHY_CTL2, ~B43_NPHY_BPHY_CTL2_LUT);
-
-	if (nphy->hang_avoid)
-		b43_nphy_stay_in_carrier_search(dev, 0);
 }
 
-static void b43_nphy_tx_gain_table_upload(struct b43_wldev *dev)
+/*
+ * Initialize a Broadcom 2056 N-radio
+ * http://bcm-v4.sipsolutions.net/802.11/Radio/2056/Init
+ */
+static void b43_radio_init2056(struct b43_wldev *dev)
 {
-	struct b43_phy *phy = &dev->phy;
+	b43_radio_init2056_pre(dev);
+	b2056_upload_inittabs(dev, 0, 0);
+	b43_radio_init2056_post(dev);
+}
 
-	const u32 *table = NULL;
-#if 0
-	TODO: b43_ntab_papd_pga_gain_delta_ipa_2*
-	u32 rfpwr_offset;
-	u8 pga_gain;
-	int i;
-#endif
+/**************************************************
+ * Radio 0x2055
+ **************************************************/
 
-	if (phy->rev >= 3) {
-		if (b43_nphy_ipa(dev)) {
-			table = b43_nphy_get_ipa_gain_table(dev);
-		} else {
-			if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
-				if (phy->rev == 3)
-					table = b43_ntab_tx_gain_rev3_5ghz;
-				if (phy->rev == 4)
-					table = b43_ntab_tx_gain_rev4_5ghz;
-				else
-					table = b43_ntab_tx_gain_rev5plus_5ghz;
-			} else {
-				table = b43_ntab_tx_gain_rev3plus_2ghz;
-			}
-		}
-	} else {
-		table = b43_ntab_tx_gain_rev0_1_2;
-	}
-	b43_ntab_write_bulk(dev, B43_NTAB32(26, 192), 128, table);
-	b43_ntab_write_bulk(dev, B43_NTAB32(27, 192), 128, table);
+static void b43_chantab_radio_upload(struct b43_wldev *dev,
+				const struct b43_nphy_channeltab_entry_rev2 *e)
+{
+	b43_radio_write(dev, B2055_PLL_REF, e->radio_pll_ref);
+	b43_radio_write(dev, B2055_RF_PLLMOD0, e->radio_rf_pllmod0);
+	b43_radio_write(dev, B2055_RF_PLLMOD1, e->radio_rf_pllmod1);
+	b43_radio_write(dev, B2055_VCO_CAPTAIL, e->radio_vco_captail);
+	b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
 
-	if (phy->rev >= 3) {
-#if 0
-		nphy->gmval = (table[0] >> 16) & 0x7000;
+	b43_radio_write(dev, B2055_VCO_CAL1, e->radio_vco_cal1);
+	b43_radio_write(dev, B2055_VCO_CAL2, e->radio_vco_cal2);
+	b43_radio_write(dev, B2055_PLL_LFC1, e->radio_pll_lfc1);
+	b43_radio_write(dev, B2055_PLL_LFR1, e->radio_pll_lfr1);
+	b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
 
-		for (i = 0; i < 128; i++) {
-			pga_gain = (table[i] >> 24) & 0xF;
-			if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
-				rfpwr_offset = b43_ntab_papd_pga_gain_delta_ipa_2g[pga_gain];
-			else
-				rfpwr_offset = b43_ntab_papd_pga_gain_delta_ipa_5g[pga_gain];
-			b43_ntab_write(dev, B43_NTAB32(26, 576 + i),
-				       rfpwr_offset);
-			b43_ntab_write(dev, B43_NTAB32(27, 576 + i),
-				       rfpwr_offset);
-		}
-#endif
-	}
+	b43_radio_write(dev, B2055_PLL_LFC2, e->radio_pll_lfc2);
+	b43_radio_write(dev, B2055_LGBUF_CENBUF, e->radio_lgbuf_cenbuf);
+	b43_radio_write(dev, B2055_LGEN_TUNE1, e->radio_lgen_tune1);
+	b43_radio_write(dev, B2055_LGEN_TUNE2, e->radio_lgen_tune2);
+	b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
+
+	b43_radio_write(dev, B2055_C1_LGBUF_ATUNE, e->radio_c1_lgbuf_atune);
+	b43_radio_write(dev, B2055_C1_LGBUF_GTUNE, e->radio_c1_lgbuf_gtune);
+	b43_radio_write(dev, B2055_C1_RX_RFR1, e->radio_c1_rx_rfr1);
+	b43_radio_write(dev, B2055_C1_TX_PGAPADTN, e->radio_c1_tx_pgapadtn);
+	b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
+
+	b43_radio_write(dev, B2055_C1_TX_MXBGTRIM, e->radio_c1_tx_mxbgtrim);
+	b43_radio_write(dev, B2055_C2_LGBUF_ATUNE, e->radio_c2_lgbuf_atune);
+	b43_radio_write(dev, B2055_C2_LGBUF_GTUNE, e->radio_c2_lgbuf_gtune);
+	b43_radio_write(dev, B2055_C2_RX_RFR1, e->radio_c2_rx_rfr1);
+	b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
+
+	b43_radio_write(dev, B2055_C2_TX_PGAPADTN, e->radio_c2_tx_pgapadtn);
+	b43_radio_write(dev, B2055_C2_TX_MXBGTRIM, e->radio_c2_tx_mxbgtrim);
 }
 
 /* http://bcm-v4.sipsolutions.net/802.11/PHY/Radio/2055Setup */
@@ -622,1089 +833,9 @@
 	b43_radio_init2055_post(dev);
 }
 
-static void b43_radio_init2056_pre(struct b43_wldev *dev)
-{
-	b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
-		     ~B43_NPHY_RFCTL_CMD_CHIP0PU);
-	/* Maybe wl meant to reset and set (order?) RFCTL_CMD_OEPORFORCE? */
-	b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
-		     B43_NPHY_RFCTL_CMD_OEPORFORCE);
-	b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
-		    ~B43_NPHY_RFCTL_CMD_OEPORFORCE);
-	b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
-		    B43_NPHY_RFCTL_CMD_CHIP0PU);
-}
-
-static void b43_radio_init2056_post(struct b43_wldev *dev)
-{
-	b43_radio_set(dev, B2056_SYN_COM_CTRL, 0xB);
-	b43_radio_set(dev, B2056_SYN_COM_PU, 0x2);
-	b43_radio_set(dev, B2056_SYN_COM_RESET, 0x2);
-	msleep(1);
-	b43_radio_mask(dev, B2056_SYN_COM_RESET, ~0x2);
-	b43_radio_mask(dev, B2056_SYN_PLL_MAST2, ~0xFC);
-	b43_radio_mask(dev, B2056_SYN_RCCAL_CTRL0, ~0x1);
-	/*
-	if (nphy->init_por)
-		Call Radio 2056 Recalibrate
-	*/
-}
-
-/*
- * Initialize a Broadcom 2056 N-radio
- * http://bcm-v4.sipsolutions.net/802.11/Radio/2056/Init
- */
-static void b43_radio_init2056(struct b43_wldev *dev)
-{
-	b43_radio_init2056_pre(dev);
-	b2056_upload_inittabs(dev, 0, 0);
-	b43_radio_init2056_post(dev);
-}
-
-/*
- * Upload the N-PHY tables.
- * http://bcm-v4.sipsolutions.net/802.11/PHY/N/InitTables
- */
-static void b43_nphy_tables_init(struct b43_wldev *dev)
-{
-	if (dev->phy.rev < 3)
-		b43_nphy_rev0_1_2_tables_init(dev);
-	else
-		b43_nphy_rev3plus_tables_init(dev);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/PA%20override */
-static void b43_nphy_pa_override(struct b43_wldev *dev, bool enable)
-{
-	struct b43_phy_n *nphy = dev->phy.n;
-	enum ieee80211_band band;
-	u16 tmp;
-
-	if (!enable) {
-		nphy->rfctrl_intc1_save = b43_phy_read(dev,
-						       B43_NPHY_RFCTL_INTC1);
-		nphy->rfctrl_intc2_save = b43_phy_read(dev,
-						       B43_NPHY_RFCTL_INTC2);
-		band = b43_current_band(dev->wl);
-		if (dev->phy.rev >= 3) {
-			if (band == IEEE80211_BAND_5GHZ)
-				tmp = 0x600;
-			else
-				tmp = 0x480;
-		} else {
-			if (band == IEEE80211_BAND_5GHZ)
-				tmp = 0x180;
-			else
-				tmp = 0x120;
-		}
-		b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, tmp);
-		b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, tmp);
-	} else {
-		b43_phy_write(dev, B43_NPHY_RFCTL_INTC1,
-				nphy->rfctrl_intc1_save);
-		b43_phy_write(dev, B43_NPHY_RFCTL_INTC2,
-				nphy->rfctrl_intc2_save);
-	}
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxLpFbw */
-static void b43_nphy_tx_lp_fbw(struct b43_wldev *dev)
-{
-	u16 tmp;
-
-	if (dev->phy.rev >= 3) {
-		if (b43_nphy_ipa(dev)) {
-			tmp = 4;
-			b43_phy_write(dev, B43_NPHY_TXF_40CO_B32S2,
-			      (((((tmp << 3) | tmp) << 3) | tmp) << 3) | tmp);
-		}
-
-		tmp = 1;
-		b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S2,
-			      (((((tmp << 3) | tmp) << 3) | tmp) << 3) | tmp);
-	}
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CCA */
-static void b43_nphy_reset_cca(struct b43_wldev *dev)
-{
-	u16 bbcfg;
-
-	b43_phy_force_clock(dev, 1);
-	bbcfg = b43_phy_read(dev, B43_NPHY_BBCFG);
-	b43_phy_write(dev, B43_NPHY_BBCFG, bbcfg | B43_NPHY_BBCFG_RSTCCA);
-	udelay(1);
-	b43_phy_write(dev, B43_NPHY_BBCFG, bbcfg & ~B43_NPHY_BBCFG_RSTCCA);
-	b43_phy_force_clock(dev, 0);
-	b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/MIMOConfig */
-static void b43_nphy_update_mimo_config(struct b43_wldev *dev, s32 preamble)
-{
-	u16 mimocfg = b43_phy_read(dev, B43_NPHY_MIMOCFG);
-
-	mimocfg |= B43_NPHY_MIMOCFG_AUTO;
-	if (preamble == 1)
-		mimocfg |= B43_NPHY_MIMOCFG_GFMIX;
-	else
-		mimocfg &= ~B43_NPHY_MIMOCFG_GFMIX;
-
-	b43_phy_write(dev, B43_NPHY_MIMOCFG, mimocfg);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/Chains */
-static void b43_nphy_update_txrx_chain(struct b43_wldev *dev)
-{
-	struct b43_phy_n *nphy = dev->phy.n;
-
-	bool override = false;
-	u16 chain = 0x33;
-
-	if (nphy->txrx_chain == 0) {
-		chain = 0x11;
-		override = true;
-	} else if (nphy->txrx_chain == 1) {
-		chain = 0x22;
-		override = true;
-	}
-
-	b43_phy_maskset(dev, B43_NPHY_RFSEQCA,
-			~(B43_NPHY_RFSEQCA_TXEN | B43_NPHY_RFSEQCA_RXEN),
-			chain);
-
-	if (override)
-		b43_phy_set(dev, B43_NPHY_RFSEQMODE,
-				B43_NPHY_RFSEQMODE_CAOVER);
-	else
-		b43_phy_mask(dev, B43_NPHY_RFSEQMODE,
-				~B43_NPHY_RFSEQMODE_CAOVER);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxIqEst */
-static void b43_nphy_rx_iq_est(struct b43_wldev *dev, struct nphy_iq_est *est,
-				u16 samps, u8 time, bool wait)
-{
-	int i;
-	u16 tmp;
-
-	b43_phy_write(dev, B43_NPHY_IQEST_SAMCNT, samps);
-	b43_phy_maskset(dev, B43_NPHY_IQEST_WT, ~B43_NPHY_IQEST_WT_VAL, time);
-	if (wait)
-		b43_phy_set(dev, B43_NPHY_IQEST_CMD, B43_NPHY_IQEST_CMD_MODE);
-	else
-		b43_phy_mask(dev, B43_NPHY_IQEST_CMD, ~B43_NPHY_IQEST_CMD_MODE);
-
-	b43_phy_set(dev, B43_NPHY_IQEST_CMD, B43_NPHY_IQEST_CMD_START);
-
-	for (i = 1000; i; i--) {
-		tmp = b43_phy_read(dev, B43_NPHY_IQEST_CMD);
-		if (!(tmp & B43_NPHY_IQEST_CMD_START)) {
-			est->i0_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_IPACC_HI0) << 16) |
-					b43_phy_read(dev, B43_NPHY_IQEST_IPACC_LO0);
-			est->q0_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_QPACC_HI0) << 16) |
-					b43_phy_read(dev, B43_NPHY_IQEST_QPACC_LO0);
-			est->iq0_prod = (b43_phy_read(dev, B43_NPHY_IQEST_IQACC_HI0) << 16) |
-					b43_phy_read(dev, B43_NPHY_IQEST_IQACC_LO0);
-
-			est->i1_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_IPACC_HI1) << 16) |
-					b43_phy_read(dev, B43_NPHY_IQEST_IPACC_LO1);
-			est->q1_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_QPACC_HI1) << 16) |
-					b43_phy_read(dev, B43_NPHY_IQEST_QPACC_LO1);
-			est->iq1_prod = (b43_phy_read(dev, B43_NPHY_IQEST_IQACC_HI1) << 16) |
-					b43_phy_read(dev, B43_NPHY_IQEST_IQACC_LO1);
-			return;
-		}
-		udelay(10);
-	}
-	memset(est, 0, sizeof(*est));
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxIqCoeffs */
-static void b43_nphy_rx_iq_coeffs(struct b43_wldev *dev, bool write,
-					struct b43_phy_n_iq_comp *pcomp)
-{
-	if (write) {
-		b43_phy_write(dev, B43_NPHY_C1_RXIQ_COMPA0, pcomp->a0);
-		b43_phy_write(dev, B43_NPHY_C1_RXIQ_COMPB0, pcomp->b0);
-		b43_phy_write(dev, B43_NPHY_C2_RXIQ_COMPA1, pcomp->a1);
-		b43_phy_write(dev, B43_NPHY_C2_RXIQ_COMPB1, pcomp->b1);
-	} else {
-		pcomp->a0 = b43_phy_read(dev, B43_NPHY_C1_RXIQ_COMPA0);
-		pcomp->b0 = b43_phy_read(dev, B43_NPHY_C1_RXIQ_COMPB0);
-		pcomp->a1 = b43_phy_read(dev, B43_NPHY_C2_RXIQ_COMPA1);
-		pcomp->b1 = b43_phy_read(dev, B43_NPHY_C2_RXIQ_COMPB1);
-	}
-}
-
-#if 0
-/* Ready but not used anywhere */
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCalPhyCleanup */
-static void b43_nphy_rx_cal_phy_cleanup(struct b43_wldev *dev, u8 core)
-{
-	u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs;
-
-	b43_phy_write(dev, B43_NPHY_RFSEQCA, regs[0]);
-	if (core == 0) {
-		b43_phy_write(dev, B43_NPHY_AFECTL_C1, regs[1]);
-		b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, regs[2]);
-	} else {
-		b43_phy_write(dev, B43_NPHY_AFECTL_C2, regs[1]);
-		b43_phy_write(dev, B43_NPHY_AFECTL_OVER, regs[2]);
-	}
-	b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, regs[3]);
-	b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, regs[4]);
-	b43_phy_write(dev, B43_NPHY_RFCTL_RSSIO1, regs[5]);
-	b43_phy_write(dev, B43_NPHY_RFCTL_RSSIO2, regs[6]);
-	b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S1, regs[7]);
-	b43_phy_write(dev, B43_NPHY_RFCTL_OVER, regs[8]);
-	b43_phy_write(dev, B43_NPHY_PAPD_EN0, regs[9]);
-	b43_phy_write(dev, B43_NPHY_PAPD_EN1, regs[10]);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCalPhySetup */
-static void b43_nphy_rx_cal_phy_setup(struct b43_wldev *dev, u8 core)
-{
-	u8 rxval, txval;
-	u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs;
-
-	regs[0] = b43_phy_read(dev, B43_NPHY_RFSEQCA);
-	if (core == 0) {
-		regs[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C1);
-		regs[2] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER1);
-	} else {
-		regs[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C2);
-		regs[2] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER);
-	}
-	regs[3] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1);
-	regs[4] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2);
-	regs[5] = b43_phy_read(dev, B43_NPHY_RFCTL_RSSIO1);
-	regs[6] = b43_phy_read(dev, B43_NPHY_RFCTL_RSSIO2);
-	regs[7] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B1S1);
-	regs[8] = b43_phy_read(dev, B43_NPHY_RFCTL_OVER);
-	regs[9] = b43_phy_read(dev, B43_NPHY_PAPD_EN0);
-	regs[10] = b43_phy_read(dev, B43_NPHY_PAPD_EN1);
-
-	b43_phy_mask(dev, B43_NPHY_PAPD_EN0, ~0x0001);
-	b43_phy_mask(dev, B43_NPHY_PAPD_EN1, ~0x0001);
-
-	b43_phy_maskset(dev, B43_NPHY_RFSEQCA,
-			~B43_NPHY_RFSEQCA_RXDIS & 0xFFFF,
-			((1 - core) << B43_NPHY_RFSEQCA_RXDIS_SHIFT));
-	b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_TXEN,
-			((1 - core) << B43_NPHY_RFSEQCA_TXEN_SHIFT));
-	b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_RXEN,
-			(core << B43_NPHY_RFSEQCA_RXEN_SHIFT));
-	b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_TXDIS,
-			(core << B43_NPHY_RFSEQCA_TXDIS_SHIFT));
-
-	if (core == 0) {
-		b43_phy_mask(dev, B43_NPHY_AFECTL_C1, ~0x0007);
-		b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x0007);
-	} else {
-		b43_phy_mask(dev, B43_NPHY_AFECTL_C2, ~0x0007);
-		b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0007);
-	}
-
-	b43_nphy_rf_control_intc_override(dev, 2, 0, 3);
-	b43_nphy_rf_control_override(dev, 8, 0, 3, false);
-	b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RX2TX);
-
-	if (core == 0) {
-		rxval = 1;
-		txval = 8;
-	} else {
-		rxval = 4;
-		txval = 2;
-	}
-	b43_nphy_rf_control_intc_override(dev, 1, rxval, (core + 1));
-	b43_nphy_rf_control_intc_override(dev, 1, txval, (2 - core));
-}
-#endif
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalcRxIqComp */
-static void b43_nphy_calc_rx_iq_comp(struct b43_wldev *dev, u8 mask)
-{
-	int i;
-	s32 iq;
-	u32 ii;
-	u32 qq;
-	int iq_nbits, qq_nbits;
-	int arsh, brsh;
-	u16 tmp, a, b;
-
-	struct nphy_iq_est est;
-	struct b43_phy_n_iq_comp old;
-	struct b43_phy_n_iq_comp new = { };
-	bool error = false;
-
-	if (mask == 0)
-		return;
-
-	b43_nphy_rx_iq_coeffs(dev, false, &old);
-	b43_nphy_rx_iq_coeffs(dev, true, &new);
-	b43_nphy_rx_iq_est(dev, &est, 0x4000, 32, false);
-	new = old;
-
-	for (i = 0; i < 2; i++) {
-		if (i == 0 && (mask & 1)) {
-			iq = est.iq0_prod;
-			ii = est.i0_pwr;
-			qq = est.q0_pwr;
-		} else if (i == 1 && (mask & 2)) {
-			iq = est.iq1_prod;
-			ii = est.i1_pwr;
-			qq = est.q1_pwr;
-		} else {
-			continue;
-		}
-
-		if (ii + qq < 2) {
-			error = true;
-			break;
-		}
-
-		iq_nbits = fls(abs(iq));
-		qq_nbits = fls(qq);
-
-		arsh = iq_nbits - 20;
-		if (arsh >= 0) {
-			a = -((iq << (30 - iq_nbits)) + (ii >> (1 + arsh)));
-			tmp = ii >> arsh;
-		} else {
-			a = -((iq << (30 - iq_nbits)) + (ii << (-1 - arsh)));
-			tmp = ii << -arsh;
-		}
-		if (tmp == 0) {
-			error = true;
-			break;
-		}
-		a /= tmp;
-
-		brsh = qq_nbits - 11;
-		if (brsh >= 0) {
-			b = (qq << (31 - qq_nbits));
-			tmp = ii >> brsh;
-		} else {
-			b = (qq << (31 - qq_nbits));
-			tmp = ii << -brsh;
-		}
-		if (tmp == 0) {
-			error = true;
-			break;
-		}
-		b = int_sqrt(b / tmp - a * a) - (1 << 10);
-
-		if (i == 0 && (mask & 0x1)) {
-			if (dev->phy.rev >= 3) {
-				new.a0 = a & 0x3FF;
-				new.b0 = b & 0x3FF;
-			} else {
-				new.a0 = b & 0x3FF;
-				new.b0 = a & 0x3FF;
-			}
-		} else if (i == 1 && (mask & 0x2)) {
-			if (dev->phy.rev >= 3) {
-				new.a1 = a & 0x3FF;
-				new.b1 = b & 0x3FF;
-			} else {
-				new.a1 = b & 0x3FF;
-				new.b1 = a & 0x3FF;
-			}
-		}
-	}
-
-	if (error)
-		new = old;
-
-	b43_nphy_rx_iq_coeffs(dev, true, &new);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxIqWar */
-static void b43_nphy_tx_iq_workaround(struct b43_wldev *dev)
-{
-	u16 array[4];
-	b43_ntab_read_bulk(dev, B43_NTAB16(0xF, 0x50), 4, array);
-
-	b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW0, array[0]);
-	b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW1, array[1]);
-	b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW2, array[2]);
-	b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW3, array[3]);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */
-static void b43_nphy_write_clip_detection(struct b43_wldev *dev,
-					  const u16 *clip_st)
-{
-	b43_phy_write(dev, B43_NPHY_C1_CLIP1THRES, clip_st[0]);
-	b43_phy_write(dev, B43_NPHY_C2_CLIP1THRES, clip_st[1]);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */
-static void b43_nphy_read_clip_detection(struct b43_wldev *dev, u16 *clip_st)
-{
-	clip_st[0] = b43_phy_read(dev, B43_NPHY_C1_CLIP1THRES);
-	clip_st[1] = b43_phy_read(dev, B43_NPHY_C2_CLIP1THRES);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SuperSwitchInit */
-static void b43_nphy_superswitch_init(struct b43_wldev *dev, bool init)
-{
-	if (dev->phy.rev >= 3) {
-		if (!init)
-			return;
-		if (0 /* FIXME */) {
-			b43_ntab_write(dev, B43_NTAB16(9, 2), 0x211);
-			b43_ntab_write(dev, B43_NTAB16(9, 3), 0x222);
-			b43_ntab_write(dev, B43_NTAB16(9, 8), 0x144);
-			b43_ntab_write(dev, B43_NTAB16(9, 12), 0x188);
-		}
-	} else {
-		b43_phy_write(dev, B43_NPHY_GPIO_LOOEN, 0);
-		b43_phy_write(dev, B43_NPHY_GPIO_HIOEN, 0);
-
-		switch (dev->dev->bus_type) {
-#ifdef CONFIG_B43_BCMA
-		case B43_BUS_BCMA:
-			bcma_chipco_gpio_control(&dev->dev->bdev->bus->drv_cc,
-						 0xFC00, 0xFC00);
-			break;
-#endif
-#ifdef CONFIG_B43_SSB
-		case B43_BUS_SSB:
-			ssb_chipco_gpio_control(&dev->dev->sdev->bus->chipco,
-						0xFC00, 0xFC00);
-			break;
-#endif
-		}
-
-		b43_write32(dev, B43_MMIO_MACCTL,
-			b43_read32(dev, B43_MMIO_MACCTL) &
-			~B43_MACCTL_GPOUTSMSK);
-		b43_write16(dev, B43_MMIO_GPIO_MASK,
-			b43_read16(dev, B43_MMIO_GPIO_MASK) | 0xFC00);
-		b43_write16(dev, B43_MMIO_GPIO_CONTROL,
-			b43_read16(dev, B43_MMIO_GPIO_CONTROL) & ~0xFC00);
-
-		if (init) {
-			b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO1, 0x2D8);
-			b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1, 0x301);
-			b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO2, 0x2D8);
-			b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0x301);
-		}
-	}
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/classifier */
-static u16 b43_nphy_classifier(struct b43_wldev *dev, u16 mask, u16 val)
-{
-	u16 tmp;
-
-	if (dev->dev->core_rev == 16)
-		b43_mac_suspend(dev);
-
-	tmp = b43_phy_read(dev, B43_NPHY_CLASSCTL);
-	tmp &= (B43_NPHY_CLASSCTL_CCKEN | B43_NPHY_CLASSCTL_OFDMEN |
-		B43_NPHY_CLASSCTL_WAITEDEN);
-	tmp &= ~mask;
-	tmp |= (val & mask);
-	b43_phy_maskset(dev, B43_NPHY_CLASSCTL, 0xFFF8, tmp);
-
-	if (dev->dev->core_rev == 16)
-		b43_mac_enable(dev);
-
-	return tmp;
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/carriersearch */
-static void b43_nphy_stay_in_carrier_search(struct b43_wldev *dev, bool enable)
-{
-	struct b43_phy *phy = &dev->phy;
-	struct b43_phy_n *nphy = phy->n;
-
-	if (enable) {
-		static const u16 clip[] = { 0xFFFF, 0xFFFF };
-		if (nphy->deaf_count++ == 0) {
-			nphy->classifier_state = b43_nphy_classifier(dev, 0, 0);
-			b43_nphy_classifier(dev, 0x7, 0);
-			b43_nphy_read_clip_detection(dev, nphy->clip_state);
-			b43_nphy_write_clip_detection(dev, clip);
-		}
-		b43_nphy_reset_cca(dev);
-	} else {
-		if (--nphy->deaf_count == 0) {
-			b43_nphy_classifier(dev, 0x7, nphy->classifier_state);
-			b43_nphy_write_clip_detection(dev, nphy->clip_state);
-		}
-	}
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/stop-playback */
-static void b43_nphy_stop_playback(struct b43_wldev *dev)
-{
-	struct b43_phy_n *nphy = dev->phy.n;
-	u16 tmp;
-
-	if (nphy->hang_avoid)
-		b43_nphy_stay_in_carrier_search(dev, 1);
-
-	tmp = b43_phy_read(dev, B43_NPHY_SAMP_STAT);
-	if (tmp & 0x1)
-		b43_phy_set(dev, B43_NPHY_SAMP_CMD, B43_NPHY_SAMP_CMD_STOP);
-	else if (tmp & 0x2)
-		b43_phy_mask(dev, B43_NPHY_IQLOCAL_CMDGCTL, 0x7FFF);
-
-	b43_phy_mask(dev, B43_NPHY_SAMP_CMD, ~0x0004);
-
-	if (nphy->bb_mult_save & 0x80000000) {
-		tmp = nphy->bb_mult_save & 0xFFFF;
-		b43_ntab_write(dev, B43_NTAB16(15, 87), tmp);
-		nphy->bb_mult_save = 0;
-	}
-
-	if (nphy->hang_avoid)
-		b43_nphy_stay_in_carrier_search(dev, 0);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SpurWar */
-static void b43_nphy_spur_workaround(struct b43_wldev *dev)
-{
-	struct b43_phy_n *nphy = dev->phy.n;
-
-	u8 channel = dev->phy.channel;
-	int tone[2] = { 57, 58 };
-	u32 noise[2] = { 0x3FF, 0x3FF };
-
-	B43_WARN_ON(dev->phy.rev < 3);
-
-	if (nphy->hang_avoid)
-		b43_nphy_stay_in_carrier_search(dev, 1);
-
-	if (nphy->gband_spurwar_en) {
-		/* TODO: N PHY Adjust Analog Pfbw (7) */
-		if (channel == 11 && dev->phy.is_40mhz)
-			; /* TODO: N PHY Adjust Min Noise Var(2, tone, noise)*/
-		else
-			; /* TODO: N PHY Adjust Min Noise Var(0, NULL, NULL)*/
-		/* TODO: N PHY Adjust CRS Min Power (0x1E) */
-	}
-
-	if (nphy->aband_spurwar_en) {
-		if (channel == 54) {
-			tone[0] = 0x20;
-			noise[0] = 0x25F;
-		} else if (channel == 38 || channel == 102 || channel == 118) {
-			if (0 /* FIXME */) {
-				tone[0] = 0x20;
-				noise[0] = 0x21F;
-			} else {
-				tone[0] = 0;
-				noise[0] = 0;
-			}
-		} else if (channel == 134) {
-			tone[0] = 0x20;
-			noise[0] = 0x21F;
-		} else if (channel == 151) {
-			tone[0] = 0x10;
-			noise[0] = 0x23F;
-		} else if (channel == 153 || channel == 161) {
-			tone[0] = 0x30;
-			noise[0] = 0x23F;
-		} else {
-			tone[0] = 0;
-			noise[0] = 0;
-		}
-
-		if (!tone[0] && !noise[0])
-			; /* TODO: N PHY Adjust Min Noise Var(1, tone, noise)*/
-		else
-			; /* TODO: N PHY Adjust Min Noise Var(0, NULL, NULL)*/
-	}
-
-	if (nphy->hang_avoid)
-		b43_nphy_stay_in_carrier_search(dev, 0);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/AdjustLnaGainTbl */
-static void b43_nphy_adjust_lna_gain_table(struct b43_wldev *dev)
-{
-	struct b43_phy_n *nphy = dev->phy.n;
-
-	u8 i;
-	s16 tmp;
-	u16 data[4];
-	s16 gain[2];
-	u16 minmax[2];
-	static const u16 lna_gain[4] = { -2, 10, 19, 25 };
-
-	if (nphy->hang_avoid)
-		b43_nphy_stay_in_carrier_search(dev, 1);
-
-	if (nphy->gain_boost) {
-		if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
-			gain[0] = 6;
-			gain[1] = 6;
-		} else {
-			tmp = 40370 - 315 * dev->phy.channel;
-			gain[0] = ((tmp >> 13) + ((tmp >> 12) & 1));
-			tmp = 23242 - 224 * dev->phy.channel;
-			gain[1] = ((tmp >> 13) + ((tmp >> 12) & 1));
-		}
-	} else {
-		gain[0] = 0;
-		gain[1] = 0;
-	}
-
-	for (i = 0; i < 2; i++) {
-		if (nphy->elna_gain_config) {
-			data[0] = 19 + gain[i];
-			data[1] = 25 + gain[i];
-			data[2] = 25 + gain[i];
-			data[3] = 25 + gain[i];
-		} else {
-			data[0] = lna_gain[0] + gain[i];
-			data[1] = lna_gain[1] + gain[i];
-			data[2] = lna_gain[2] + gain[i];
-			data[3] = lna_gain[3] + gain[i];
-		}
-		b43_ntab_write_bulk(dev, B43_NTAB16(i, 8), 4, data);
-
-		minmax[i] = 23 + gain[i];
-	}
-
-	b43_phy_maskset(dev, B43_NPHY_C1_MINMAX_GAIN, ~B43_NPHY_C1_MINGAIN,
-				minmax[0] << B43_NPHY_C1_MINGAIN_SHIFT);
-	b43_phy_maskset(dev, B43_NPHY_C2_MINMAX_GAIN, ~B43_NPHY_C2_MINGAIN,
-				minmax[1] << B43_NPHY_C2_MINGAIN_SHIFT);
-
-	if (nphy->hang_avoid)
-		b43_nphy_stay_in_carrier_search(dev, 0);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/WorkaroundsGainCtrl */
-static void b43_nphy_gain_ctrl_workarounds(struct b43_wldev *dev)
-{
-	struct b43_phy_n *nphy = dev->phy.n;
-	struct ssb_sprom *sprom = dev->dev->bus_sprom;
-
-	/* PHY rev 0, 1, 2 */
-	u8 i, j;
-	u8 code;
-	u16 tmp;
-	u8 rfseq_events[3] = { 6, 8, 7 };
-	u8 rfseq_delays[3] = { 10, 30, 1 };
-
-	/* PHY rev >= 3 */
-	bool ghz5;
-	bool ext_lna;
-	u16 rssi_gain;
-	struct nphy_gain_ctl_workaround_entry *e;
-	u8 lpf_gain[6] = { 0x00, 0x06, 0x0C, 0x12, 0x12, 0x12 };
-	u8 lpf_bits[6] = { 0, 1, 2, 3, 3, 3 };
-
-	if (dev->phy.rev >= 3) {
-		/* Prepare values */
-		ghz5 = b43_phy_read(dev, B43_NPHY_BANDCTL)
-			& B43_NPHY_BANDCTL_5GHZ;
-		ext_lna = sprom->boardflags_lo & B43_BFL_EXTLNA;
-		e = b43_nphy_get_gain_ctl_workaround_ent(dev, ghz5, ext_lna);
-		if (ghz5 && dev->phy.rev >= 5)
-			rssi_gain = 0x90;
-		else
-			rssi_gain = 0x50;
-
-		b43_phy_set(dev, B43_NPHY_RXCTL, 0x0040);
-
-		/* Set Clip 2 detect */
-		b43_phy_set(dev, B43_NPHY_C1_CGAINI,
-				B43_NPHY_C1_CGAINI_CL2DETECT);
-		b43_phy_set(dev, B43_NPHY_C2_CGAINI,
-				B43_NPHY_C2_CGAINI_CL2DETECT);
-
-		b43_radio_write(dev, B2056_RX0 | B2056_RX_BIASPOLE_LNAG1_IDAC,
-				0x17);
-		b43_radio_write(dev, B2056_RX1 | B2056_RX_BIASPOLE_LNAG1_IDAC,
-				0x17);
-		b43_radio_write(dev, B2056_RX0 | B2056_RX_LNAG2_IDAC, 0xF0);
-		b43_radio_write(dev, B2056_RX1 | B2056_RX_LNAG2_IDAC, 0xF0);
-		b43_radio_write(dev, B2056_RX0 | B2056_RX_RSSI_POLE, 0x00);
-		b43_radio_write(dev, B2056_RX1 | B2056_RX_RSSI_POLE, 0x00);
-		b43_radio_write(dev, B2056_RX0 | B2056_RX_RSSI_GAIN,
-				rssi_gain);
-		b43_radio_write(dev, B2056_RX1 | B2056_RX_RSSI_GAIN,
-				rssi_gain);
-		b43_radio_write(dev, B2056_RX0 | B2056_RX_BIASPOLE_LNAA1_IDAC,
-				0x17);
-		b43_radio_write(dev, B2056_RX1 | B2056_RX_BIASPOLE_LNAA1_IDAC,
-				0x17);
-		b43_radio_write(dev, B2056_RX0 | B2056_RX_LNAA2_IDAC, 0xFF);
-		b43_radio_write(dev, B2056_RX1 | B2056_RX_LNAA2_IDAC, 0xFF);
-
-		b43_ntab_write_bulk(dev, B43_NTAB8(0, 8), 4, e->lna1_gain);
-		b43_ntab_write_bulk(dev, B43_NTAB8(1, 8), 4, e->lna1_gain);
-		b43_ntab_write_bulk(dev, B43_NTAB8(0, 16), 4, e->lna2_gain);
-		b43_ntab_write_bulk(dev, B43_NTAB8(1, 16), 4, e->lna2_gain);
-		b43_ntab_write_bulk(dev, B43_NTAB8(0, 32), 10, e->gain_db);
-		b43_ntab_write_bulk(dev, B43_NTAB8(1, 32), 10, e->gain_db);
-		b43_ntab_write_bulk(dev, B43_NTAB8(2, 32), 10, e->gain_bits);
-		b43_ntab_write_bulk(dev, B43_NTAB8(3, 32), 10, e->gain_bits);
-		b43_ntab_write_bulk(dev, B43_NTAB8(0, 0x40), 6, lpf_gain);
-		b43_ntab_write_bulk(dev, B43_NTAB8(1, 0x40), 6, lpf_gain);
-		b43_ntab_write_bulk(dev, B43_NTAB8(2, 0x40), 6, lpf_bits);
-		b43_ntab_write_bulk(dev, B43_NTAB8(3, 0x40), 6, lpf_bits);
-
-		b43_phy_write(dev, B43_NPHY_C1_INITGAIN, e->init_gain);
-		b43_phy_write(dev, 0x2A7, e->init_gain);
-		b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x106), 2,
-					e->rfseq_init);
-		b43_phy_write(dev, B43_NPHY_C1_INITGAIN, e->init_gain);
-
-		/* TODO: check defines. Do not match variables names */
-		b43_phy_write(dev, B43_NPHY_C1_CLIP1_MEDGAIN, e->cliphi_gain);
-		b43_phy_write(dev, 0x2A9, e->cliphi_gain);
-		b43_phy_write(dev, B43_NPHY_C1_CLIP2_GAIN, e->clipmd_gain);
-		b43_phy_write(dev, 0x2AB, e->clipmd_gain);
-		b43_phy_write(dev, B43_NPHY_C2_CLIP1_HIGAIN, e->cliplo_gain);
-		b43_phy_write(dev, 0x2AD, e->cliplo_gain);
-
-		b43_phy_maskset(dev, 0x27D, 0xFF00, e->crsmin);
-		b43_phy_maskset(dev, 0x280, 0xFF00, e->crsminl);
-		b43_phy_maskset(dev, 0x283, 0xFF00, e->crsminu);
-		b43_phy_write(dev, B43_NPHY_C1_NBCLIPTHRES, e->nbclip);
-		b43_phy_write(dev, B43_NPHY_C2_NBCLIPTHRES, e->nbclip);
-		b43_phy_maskset(dev, B43_NPHY_C1_CLIPWBTHRES,
-				~B43_NPHY_C1_CLIPWBTHRES_CLIP2, e->wlclip);
-		b43_phy_maskset(dev, B43_NPHY_C2_CLIPWBTHRES,
-				~B43_NPHY_C2_CLIPWBTHRES_CLIP2, e->wlclip);
-		b43_phy_write(dev, B43_NPHY_CCK_SHIFTB_REF, 0x809C);
-	} else {
-		/* Set Clip 2 detect */
-		b43_phy_set(dev, B43_NPHY_C1_CGAINI,
-				B43_NPHY_C1_CGAINI_CL2DETECT);
-		b43_phy_set(dev, B43_NPHY_C2_CGAINI,
-				B43_NPHY_C2_CGAINI_CL2DETECT);
-
-		/* Set narrowband clip threshold */
-		b43_phy_write(dev, B43_NPHY_C1_NBCLIPTHRES, 0x84);
-		b43_phy_write(dev, B43_NPHY_C2_NBCLIPTHRES, 0x84);
-
-		if (!dev->phy.is_40mhz) {
-			/* Set dwell lengths */
-			b43_phy_write(dev, B43_NPHY_CLIP1_NBDWELL_LEN, 0x002B);
-			b43_phy_write(dev, B43_NPHY_CLIP2_NBDWELL_LEN, 0x002B);
-			b43_phy_write(dev, B43_NPHY_W1CLIP1_DWELL_LEN, 0x0009);
-			b43_phy_write(dev, B43_NPHY_W1CLIP2_DWELL_LEN, 0x0009);
-		}
-
-		/* Set wideband clip 2 threshold */
-		b43_phy_maskset(dev, B43_NPHY_C1_CLIPWBTHRES,
-				~B43_NPHY_C1_CLIPWBTHRES_CLIP2,
-				21);
-		b43_phy_maskset(dev, B43_NPHY_C2_CLIPWBTHRES,
-				~B43_NPHY_C2_CLIPWBTHRES_CLIP2,
-				21);
-
-		if (!dev->phy.is_40mhz) {
-			b43_phy_maskset(dev, B43_NPHY_C1_CGAINI,
-				~B43_NPHY_C1_CGAINI_GAINBKOFF, 0x1);
-			b43_phy_maskset(dev, B43_NPHY_C2_CGAINI,
-				~B43_NPHY_C2_CGAINI_GAINBKOFF, 0x1);
-			b43_phy_maskset(dev, B43_NPHY_C1_CCK_CGAINI,
-				~B43_NPHY_C1_CCK_CGAINI_GAINBKOFF, 0x1);
-			b43_phy_maskset(dev, B43_NPHY_C2_CCK_CGAINI,
-				~B43_NPHY_C2_CCK_CGAINI_GAINBKOFF, 0x1);
-		}
-
-		b43_phy_write(dev, B43_NPHY_CCK_SHIFTB_REF, 0x809C);
-
-		if (nphy->gain_boost) {
-			if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ &&
-			    dev->phy.is_40mhz)
-				code = 4;
-			else
-				code = 5;
-		} else {
-			code = dev->phy.is_40mhz ? 6 : 7;
-		}
-
-		/* Set HPVGA2 index */
-		b43_phy_maskset(dev, B43_NPHY_C1_INITGAIN,
-				~B43_NPHY_C1_INITGAIN_HPVGA2,
-				code << B43_NPHY_C1_INITGAIN_HPVGA2_SHIFT);
-		b43_phy_maskset(dev, B43_NPHY_C2_INITGAIN,
-				~B43_NPHY_C2_INITGAIN_HPVGA2,
-				code << B43_NPHY_C2_INITGAIN_HPVGA2_SHIFT);
-
-		b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x1D06);
-		/* specs say about 2 loops, but wl does 4 */
-		for (i = 0; i < 4; i++)
-			b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
-							(code << 8 | 0x7C));
-
-		b43_nphy_adjust_lna_gain_table(dev);
-
-		if (nphy->elna_gain_config) {
-			b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x0808);
-			b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0);
-			b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
-			b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
-			b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
-
-			b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x0C08);
-			b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0);
-			b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
-			b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
-			b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
-
-			b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x1D06);
-			/* specs say about 2 loops, but wl does 4 */
-			for (i = 0; i < 4; i++)
-				b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
-							(code << 8 | 0x74));
-		}
-
-		if (dev->phy.rev == 2) {
-			for (i = 0; i < 4; i++) {
-				b43_phy_write(dev, B43_NPHY_TABLE_ADDR,
-						(0x0400 * i) + 0x0020);
-				for (j = 0; j < 21; j++) {
-					tmp = j * (i < 2 ? 3 : 1);
-					b43_phy_write(dev,
-						B43_NPHY_TABLE_DATALO, tmp);
-				}
-			}
-		}
-
-		b43_nphy_set_rf_sequence(dev, 5,
-				rfseq_events, rfseq_delays, 3);
-		b43_phy_maskset(dev, B43_NPHY_OVER_DGAIN1,
-			~B43_NPHY_OVER_DGAIN_CCKDGECV & 0xFFFF,
-			0x5A << B43_NPHY_OVER_DGAIN_CCKDGECV_SHIFT);
-
-		if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
-			b43_phy_maskset(dev, B43_PHY_N(0xC5D),
-					0xFF80, 4);
-	}
-}
-
-static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
-{
-	struct b43_phy_n *nphy = dev->phy.n;
-	struct ssb_sprom *sprom = dev->dev->bus_sprom;
-
-	/* TX to RX */
-	u8 tx2rx_events[9] = { 0x4, 0x3, 0x6, 0x5, 0x2, 0x1, 0x8, 0x1F };
-	u8 tx2rx_delays[9] = { 8, 4, 2, 2, 4, 4, 6, 1 };
-	/* RX to TX */
-	u8 rx2tx_events_ipa[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0xF, 0x3,
-					0x1F };
-	u8 rx2tx_delays_ipa[9] = { 8, 6, 6, 4, 4, 16, 43, 1, 1 };
-	u8 rx2tx_events[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0x3, 0x4, 0x1F };
-	u8 rx2tx_delays[9] = { 8, 6, 6, 4, 4, 18, 42, 1, 1 };
-
-	u16 tmp16;
-	u32 tmp32;
-
-	tmp32 = b43_ntab_read(dev, B43_NTAB32(30, 0));
-	tmp32 &= 0xffffff;
-	b43_ntab_write(dev, B43_NTAB32(30, 0), tmp32);
-
-	b43_phy_write(dev, B43_NPHY_PHASETR_A0, 0x0125);
-	b43_phy_write(dev, B43_NPHY_PHASETR_A1, 0x01B3);
-	b43_phy_write(dev, B43_NPHY_PHASETR_A2, 0x0105);
-	b43_phy_write(dev, B43_NPHY_PHASETR_B0, 0x016E);
-	b43_phy_write(dev, B43_NPHY_PHASETR_B1, 0x00CD);
-	b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x0020);
-
-	b43_phy_write(dev, B43_NPHY_C2_CLIP1_MEDGAIN, 0x000C);
-	b43_phy_write(dev, 0x2AE, 0x000C);
-
-	/* TX to RX */
-	b43_nphy_set_rf_sequence(dev, 1, tx2rx_events, tx2rx_delays, 9);
-
-	/* RX to TX */
-	if (b43_nphy_ipa(dev))
-		b43_nphy_set_rf_sequence(dev, 1, rx2tx_events_ipa,
-					 rx2tx_delays_ipa, 9);
-	if (nphy->hw_phyrxchain != 3 &&
-	    nphy->hw_phyrxchain != nphy->hw_phytxchain) {
-		if (b43_nphy_ipa(dev)) {
-			rx2tx_delays[5] = 59;
-			rx2tx_delays[6] = 1;
-			rx2tx_events[7] = 0x1F;
-		}
-		b43_nphy_set_rf_sequence(dev, 1, rx2tx_events, rx2tx_delays, 9);
-	}
-
-	tmp16 = (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) ?
-		0x2 : 0x9C40;
-	b43_phy_write(dev, B43_NPHY_ENDROP_TLEN, tmp16);
-
-	b43_phy_maskset(dev, 0x294, 0xF0FF, 0x0700);
-
-	b43_ntab_write(dev, B43_NTAB32(16, 3), 0x18D);
-	b43_ntab_write(dev, B43_NTAB32(16, 127), 0x18D);
-
-	b43_nphy_gain_ctrl_workarounds(dev);
-
-	b43_ntab_write(dev, B43_NTAB32(8, 0), 2);
-	b43_ntab_write(dev, B43_NTAB32(8, 16), 2);
-
-	/* TODO */
-
-	b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_MAST_BIAS, 0x00);
-	b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_MAST_BIAS, 0x00);
-	b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_BIAS_MAIN, 0x06);
-	b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_BIAS_MAIN, 0x06);
-	b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_BIAS_AUX, 0x07);
-	b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_BIAS_AUX, 0x07);
-	b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_LOB_BIAS, 0x88);
-	b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_LOB_BIAS, 0x88);
-	b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXG_CMFB_IDAC, 0x00);
-	b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXG_CMFB_IDAC, 0x00);
-
-	/* N PHY WAR TX Chain Update with hw_phytxchain as argument */
-
-	if ((sprom->boardflags2_lo & B43_BFL2_APLL_WAR &&
-	     b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ||
-	    (sprom->boardflags2_lo & B43_BFL2_GPLL_WAR &&
-	     b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ))
-		tmp32 = 0x00088888;
-	else
-		tmp32 = 0x88888888;
-	b43_ntab_write(dev, B43_NTAB32(30, 1), tmp32);
-	b43_ntab_write(dev, B43_NTAB32(30, 2), tmp32);
-	b43_ntab_write(dev, B43_NTAB32(30, 3), tmp32);
-
-	if (dev->phy.rev == 4 &&
-		b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
-		b43_radio_write(dev, B2056_TX0 | B2056_TX_GMBB_IDAC,
-				0x70);
-		b43_radio_write(dev, B2056_TX1 | B2056_TX_GMBB_IDAC,
-				0x70);
-	}
-
-	b43_phy_write(dev, 0x224, 0x039C);
-	b43_phy_write(dev, 0x225, 0x0357);
-	b43_phy_write(dev, 0x226, 0x0317);
-	b43_phy_write(dev, 0x227, 0x02D7);
-	b43_phy_write(dev, 0x228, 0x039C);
-	b43_phy_write(dev, 0x229, 0x0357);
-	b43_phy_write(dev, 0x22A, 0x0317);
-	b43_phy_write(dev, 0x22B, 0x02D7);
-	b43_phy_write(dev, 0x22C, 0x039C);
-	b43_phy_write(dev, 0x22D, 0x0357);
-	b43_phy_write(dev, 0x22E, 0x0317);
-	b43_phy_write(dev, 0x22F, 0x02D7);
-}
-
-static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev)
-{
-	struct ssb_sprom *sprom = dev->dev->bus_sprom;
-	struct b43_phy *phy = &dev->phy;
-	struct b43_phy_n *nphy = phy->n;
-
-	u8 events1[7] = { 0x0, 0x1, 0x2, 0x8, 0x4, 0x5, 0x3 };
-	u8 delays1[7] = { 0x8, 0x6, 0x6, 0x2, 0x4, 0x3C, 0x1 };
-
-	u8 events2[7] = { 0x0, 0x3, 0x5, 0x4, 0x2, 0x1, 0x8 };
-	u8 delays2[7] = { 0x8, 0x6, 0x2, 0x4, 0x4, 0x6, 0x1 };
-
-	if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ &&
-	    nphy->band5g_pwrgain) {
-		b43_radio_mask(dev, B2055_C1_TX_RF_SPARE, ~0x8);
-		b43_radio_mask(dev, B2055_C2_TX_RF_SPARE, ~0x8);
-	} else {
-		b43_radio_set(dev, B2055_C1_TX_RF_SPARE, 0x8);
-		b43_radio_set(dev, B2055_C2_TX_RF_SPARE, 0x8);
-	}
-
-	b43_ntab_write(dev, B43_NTAB16(8, 0x00), 0x000A);
-	b43_ntab_write(dev, B43_NTAB16(8, 0x10), 0x000A);
-	b43_ntab_write(dev, B43_NTAB16(8, 0x02), 0xCDAA);
-	b43_ntab_write(dev, B43_NTAB16(8, 0x12), 0xCDAA);
-
-	if (dev->phy.rev < 2) {
-		b43_ntab_write(dev, B43_NTAB16(8, 0x08), 0x0000);
-		b43_ntab_write(dev, B43_NTAB16(8, 0x18), 0x0000);
-		b43_ntab_write(dev, B43_NTAB16(8, 0x07), 0x7AAB);
-		b43_ntab_write(dev, B43_NTAB16(8, 0x17), 0x7AAB);
-		b43_ntab_write(dev, B43_NTAB16(8, 0x06), 0x0800);
-		b43_ntab_write(dev, B43_NTAB16(8, 0x16), 0x0800);
-	}
-
-	b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO1, 0x2D8);
-	b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1, 0x301);
-	b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO2, 0x2D8);
-	b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0x301);
-
-	if (sprom->boardflags2_lo & B43_BFL2_SKWRKFEM_BRD &&
-	    dev->dev->board_type == 0x8B) {
-		delays1[0] = 0x1;
-		delays1[5] = 0x14;
-	}
-	b43_nphy_set_rf_sequence(dev, 0, events1, delays1, 7);
-	b43_nphy_set_rf_sequence(dev, 1, events2, delays2, 7);
-
-	b43_nphy_gain_ctrl_workarounds(dev);
-
-	if (dev->phy.rev < 2) {
-		if (b43_phy_read(dev, B43_NPHY_RXCTL) & 0x2)
-			b43_hf_write(dev, b43_hf_read(dev) |
-					B43_HF_MLADVW);
-	} else if (dev->phy.rev == 2) {
-		b43_phy_write(dev, B43_NPHY_CRSCHECK2, 0);
-		b43_phy_write(dev, B43_NPHY_CRSCHECK3, 0);
-	}
-
-	if (dev->phy.rev < 2)
-		b43_phy_mask(dev, B43_NPHY_SCRAM_SIGCTL,
-				~B43_NPHY_SCRAM_SIGCTL_SCM);
-
-	/* Set phase track alpha and beta */
-	b43_phy_write(dev, B43_NPHY_PHASETR_A0, 0x125);
-	b43_phy_write(dev, B43_NPHY_PHASETR_A1, 0x1B3);
-	b43_phy_write(dev, B43_NPHY_PHASETR_A2, 0x105);
-	b43_phy_write(dev, B43_NPHY_PHASETR_B0, 0x16E);
-	b43_phy_write(dev, B43_NPHY_PHASETR_B1, 0xCD);
-	b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x20);
-
-	b43_phy_mask(dev, B43_NPHY_PIL_DW1,
-			~B43_NPHY_PIL_DW_64QAM & 0xFFFF);
-	b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B1, 0xB5);
-	b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B2, 0xA4);
-	b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B3, 0x00);
-
-	if (dev->phy.rev == 2)
-		b43_phy_set(dev, B43_NPHY_FINERX2_CGC,
-				B43_NPHY_FINERX2_CGC_DECGC);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/Workarounds */
-static void b43_nphy_workarounds(struct b43_wldev *dev)
-{
-	struct b43_phy *phy = &dev->phy;
-	struct b43_phy_n *nphy = phy->n;
-
-	if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
-		b43_nphy_classifier(dev, 1, 0);
-	else
-		b43_nphy_classifier(dev, 1, 1);
-
-	if (nphy->hang_avoid)
-		b43_nphy_stay_in_carrier_search(dev, 1);
-
-	b43_phy_set(dev, B43_NPHY_IQFLIP,
-		    B43_NPHY_IQFLIP_ADC1 | B43_NPHY_IQFLIP_ADC2);
-
-	if (dev->phy.rev >= 3)
-		b43_nphy_workarounds_rev3plus(dev);
-	else
-		b43_nphy_workarounds_rev1_2(dev);
-
-	if (nphy->hang_avoid)
-		b43_nphy_stay_in_carrier_search(dev, 0);
-}
+/**************************************************
+ * Samples
+ **************************************************/
 
 /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/LoadSampleTable */
 static int b43_nphy_load_samples(struct b43_wldev *dev,
@@ -1825,7 +956,7 @@
 			b43_phy_write(dev, B43_NPHY_SAMP_CMD, 1);
 	}
 	for (i = 0; i < 100; i++) {
-		if (b43_phy_read(dev, B43_NPHY_RFSEQST) & 1) {
+		if (!(b43_phy_read(dev, B43_NPHY_RFSEQST) & 1)) {
 			i = 0;
 			break;
 		}
@@ -1837,333 +968,9 @@
 	b43_phy_write(dev, B43_NPHY_RFSEQMODE, seq_mode);
 }
 
-/*
- * Transmits a known value for LO calibration
- * http://bcm-v4.sipsolutions.net/802.11/PHY/N/TXTone
- */
-static int b43_nphy_tx_tone(struct b43_wldev *dev, u32 freq, u16 max_val,
-				bool iqmode, bool dac_test)
-{
-	u16 samp = b43_nphy_gen_load_samples(dev, freq, max_val, dac_test);
-	if (samp == 0)
-		return -1;
-	b43_nphy_run_samples(dev, samp, 0xFFFF, 0, iqmode, dac_test);
-	return 0;
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlCoefSetup */
-static void b43_nphy_tx_pwr_ctrl_coef_setup(struct b43_wldev *dev)
-{
-	struct b43_phy_n *nphy = dev->phy.n;
-	int i, j;
-	u32 tmp;
-	u32 cur_real, cur_imag, real_part, imag_part;
-
-	u16 buffer[7];
-
-	if (nphy->hang_avoid)
-		b43_nphy_stay_in_carrier_search(dev, true);
-
-	b43_ntab_read_bulk(dev, B43_NTAB16(15, 80), 7, buffer);
-
-	for (i = 0; i < 2; i++) {
-		tmp = ((buffer[i * 2] & 0x3FF) << 10) |
-			(buffer[i * 2 + 1] & 0x3FF);
-		b43_phy_write(dev, B43_NPHY_TABLE_ADDR,
-				(((i + 26) << 10) | 320));
-		for (j = 0; j < 128; j++) {
-			b43_phy_write(dev, B43_NPHY_TABLE_DATAHI,
-					((tmp >> 16) & 0xFFFF));
-			b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
-					(tmp & 0xFFFF));
-		}
-	}
-
-	for (i = 0; i < 2; i++) {
-		tmp = buffer[5 + i];
-		real_part = (tmp >> 8) & 0xFF;
-		imag_part = (tmp & 0xFF);
-		b43_phy_write(dev, B43_NPHY_TABLE_ADDR,
-				(((i + 26) << 10) | 448));
-
-		if (dev->phy.rev >= 3) {
-			cur_real = real_part;
-			cur_imag = imag_part;
-			tmp = ((cur_real & 0xFF) << 8) | (cur_imag & 0xFF);
-		}
-
-		for (j = 0; j < 128; j++) {
-			if (dev->phy.rev < 3) {
-				cur_real = (real_part * loscale[j] + 128) >> 8;
-				cur_imag = (imag_part * loscale[j] + 128) >> 8;
-				tmp = ((cur_real & 0xFF) << 8) |
-					(cur_imag & 0xFF);
-			}
-			b43_phy_write(dev, B43_NPHY_TABLE_DATAHI,
-					((tmp >> 16) & 0xFFFF));
-			b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
-					(tmp & 0xFFFF));
-		}
-	}
-
-	if (dev->phy.rev >= 3) {
-		b43_shm_write16(dev, B43_SHM_SHARED,
-				B43_SHM_SH_NPHY_TXPWR_INDX0, 0xFFFF);
-		b43_shm_write16(dev, B43_SHM_SHARED,
-				B43_SHM_SH_NPHY_TXPWR_INDX1, 0xFFFF);
-	}
-
-	if (nphy->hang_avoid)
-		b43_nphy_stay_in_carrier_search(dev, false);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SetRfSeq */
-static void b43_nphy_set_rf_sequence(struct b43_wldev *dev, u8 cmd,
-					u8 *events, u8 *delays, u8 length)
-{
-	struct b43_phy_n *nphy = dev->phy.n;
-	u8 i;
-	u8 end = (dev->phy.rev >= 3) ? 0x1F : 0x0F;
-	u16 offset1 = cmd << 4;
-	u16 offset2 = offset1 + 0x80;
-
-	if (nphy->hang_avoid)
-		b43_nphy_stay_in_carrier_search(dev, true);
-
-	b43_ntab_write_bulk(dev, B43_NTAB8(7, offset1), length, events);
-	b43_ntab_write_bulk(dev, B43_NTAB8(7, offset2), length, delays);
-
-	for (i = length; i < 16; i++) {
-		b43_ntab_write(dev, B43_NTAB8(7, offset1 + i), end);
-		b43_ntab_write(dev, B43_NTAB8(7, offset2 + i), 1);
-	}
-
-	if (nphy->hang_avoid)
-		b43_nphy_stay_in_carrier_search(dev, false);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ForceRFSeq */
-static void b43_nphy_force_rf_sequence(struct b43_wldev *dev,
-				       enum b43_nphy_rf_sequence seq)
-{
-	static const u16 trigger[] = {
-		[B43_RFSEQ_RX2TX]		= B43_NPHY_RFSEQTR_RX2TX,
-		[B43_RFSEQ_TX2RX]		= B43_NPHY_RFSEQTR_TX2RX,
-		[B43_RFSEQ_RESET2RX]		= B43_NPHY_RFSEQTR_RST2RX,
-		[B43_RFSEQ_UPDATE_GAINH]	= B43_NPHY_RFSEQTR_UPGH,
-		[B43_RFSEQ_UPDATE_GAINL]	= B43_NPHY_RFSEQTR_UPGL,
-		[B43_RFSEQ_UPDATE_GAINU]	= B43_NPHY_RFSEQTR_UPGU,
-	};
-	int i;
-	u16 seq_mode = b43_phy_read(dev, B43_NPHY_RFSEQMODE);
-
-	B43_WARN_ON(seq >= ARRAY_SIZE(trigger));
-
-	b43_phy_set(dev, B43_NPHY_RFSEQMODE,
-		    B43_NPHY_RFSEQMODE_CAOVER | B43_NPHY_RFSEQMODE_TROVER);
-	b43_phy_set(dev, B43_NPHY_RFSEQTR, trigger[seq]);
-	for (i = 0; i < 200; i++) {
-		if (!(b43_phy_read(dev, B43_NPHY_RFSEQST) & trigger[seq]))
-			goto ok;
-		msleep(1);
-	}
-	b43err(dev->wl, "RF sequence status timeout\n");
-ok:
-	b43_phy_write(dev, B43_NPHY_RFSEQMODE, seq_mode);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverride */
-static void b43_nphy_rf_control_override(struct b43_wldev *dev, u16 field,
-						u16 value, u8 core, bool off)
-{
-	int i;
-	u8 index = fls(field);
-	u8 addr, en_addr, val_addr;
-	/* we expect only one bit set */
-	B43_WARN_ON(field & (~(1 << (index - 1))));
-
-	if (dev->phy.rev >= 3) {
-		const struct nphy_rf_control_override_rev3 *rf_ctrl;
-		for (i = 0; i < 2; i++) {
-			if (index == 0 || index == 16) {
-				b43err(dev->wl,
-					"Unsupported RF Ctrl Override call\n");
-				return;
-			}
-
-			rf_ctrl = &tbl_rf_control_override_rev3[index - 1];
-			en_addr = B43_PHY_N((i == 0) ?
-				rf_ctrl->en_addr0 : rf_ctrl->en_addr1);
-			val_addr = B43_PHY_N((i == 0) ?
-				rf_ctrl->val_addr0 : rf_ctrl->val_addr1);
-
-			if (off) {
-				b43_phy_mask(dev, en_addr, ~(field));
-				b43_phy_mask(dev, val_addr,
-						~(rf_ctrl->val_mask));
-			} else {
-				if (core == 0 || ((1 << core) & i) != 0) {
-					b43_phy_set(dev, en_addr, field);
-					b43_phy_maskset(dev, val_addr,
-						~(rf_ctrl->val_mask),
-						(value << rf_ctrl->val_shift));
-				}
-			}
-		}
-	} else {
-		const struct nphy_rf_control_override_rev2 *rf_ctrl;
-		if (off) {
-			b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, ~(field));
-			value = 0;
-		} else {
-			b43_phy_set(dev, B43_NPHY_RFCTL_OVER, field);
-		}
-
-		for (i = 0; i < 2; i++) {
-			if (index <= 1 || index == 16) {
-				b43err(dev->wl,
-					"Unsupported RF Ctrl Override call\n");
-				return;
-			}
-
-			if (index == 2 || index == 10 ||
-			    (index >= 13 && index <= 15)) {
-				core = 1;
-			}
-
-			rf_ctrl = &tbl_rf_control_override_rev2[index - 2];
-			addr = B43_PHY_N((i == 0) ?
-				rf_ctrl->addr0 : rf_ctrl->addr1);
-
-			if ((core & (1 << i)) != 0)
-				b43_phy_maskset(dev, addr, ~(rf_ctrl->bmask),
-						(value << rf_ctrl->shift));
-
-			b43_phy_set(dev, B43_NPHY_RFCTL_OVER, 0x1);
-			b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
-					B43_NPHY_RFCTL_CMD_START);
-			udelay(1);
-			b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, 0xFFFE);
-		}
-	}
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlIntcOverride */
-static void b43_nphy_rf_control_intc_override(struct b43_wldev *dev, u8 field,
-						u16 value, u8 core)
-{
-	u8 i, j;
-	u16 reg, tmp, val;
-
-	B43_WARN_ON(dev->phy.rev < 3);
-	B43_WARN_ON(field > 4);
-
-	for (i = 0; i < 2; i++) {
-		if ((core == 1 && i == 1) || (core == 2 && !i))
-			continue;
-
-		reg = (i == 0) ?
-			B43_NPHY_RFCTL_INTC1 : B43_NPHY_RFCTL_INTC2;
-		b43_phy_mask(dev, reg, 0xFBFF);
-
-		switch (field) {
-		case 0:
-			b43_phy_write(dev, reg, 0);
-			b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX);
-			break;
-		case 1:
-			if (!i) {
-				b43_phy_maskset(dev, B43_NPHY_RFCTL_INTC1,
-						0xFC3F, (value << 6));
-				b43_phy_maskset(dev, B43_NPHY_TXF_40CO_B1S1,
-						0xFFFE, 1);
-				b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
-						B43_NPHY_RFCTL_CMD_START);
-				for (j = 0; j < 100; j++) {
-					if (b43_phy_read(dev, B43_NPHY_RFCTL_CMD) & B43_NPHY_RFCTL_CMD_START) {
-						j = 0;
-						break;
-					}
-					udelay(10);
-				}
-				if (j)
-					b43err(dev->wl,
-						"intc override timeout\n");
-				b43_phy_mask(dev, B43_NPHY_TXF_40CO_B1S1,
-						0xFFFE);
-			} else {
-				b43_phy_maskset(dev, B43_NPHY_RFCTL_INTC2,
-						0xFC3F, (value << 6));
-				b43_phy_maskset(dev, B43_NPHY_RFCTL_OVER,
-						0xFFFE, 1);
-				b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
-						B43_NPHY_RFCTL_CMD_RXTX);
-				for (j = 0; j < 100; j++) {
-					if (b43_phy_read(dev, B43_NPHY_RFCTL_CMD) & B43_NPHY_RFCTL_CMD_RXTX) {
-						j = 0;
-						break;
-					}
-					udelay(10);
-				}
-				if (j)
-					b43err(dev->wl,
-						"intc override timeout\n");
-				b43_phy_mask(dev, B43_NPHY_RFCTL_OVER,
-						0xFFFE);
-			}
-			break;
-		case 2:
-			if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
-				tmp = 0x0020;
-				val = value << 5;
-			} else {
-				tmp = 0x0010;
-				val = value << 4;
-			}
-			b43_phy_maskset(dev, reg, ~tmp, val);
-			break;
-		case 3:
-			if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
-				tmp = 0x0001;
-				val = value;
-			} else {
-				tmp = 0x0004;
-				val = value << 2;
-			}
-			b43_phy_maskset(dev, reg, ~tmp, val);
-			break;
-		case 4:
-			if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
-				tmp = 0x0002;
-				val = value << 1;
-			} else {
-				tmp = 0x0008;
-				val = value << 3;
-			}
-			b43_phy_maskset(dev, reg, ~tmp, val);
-			break;
-		}
-	}
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/BPHYInit */
-static void b43_nphy_bphy_init(struct b43_wldev *dev)
-{
-	unsigned int i;
-	u16 val;
-
-	val = 0x1E1F;
-	for (i = 0; i < 16; i++) {
-		b43_phy_write(dev, B43_PHY_N_BMODE(0x88 + i), val);
-		val -= 0x202;
-	}
-	val = 0x3E3F;
-	for (i = 0; i < 16; i++) {
-		b43_phy_write(dev, B43_PHY_N_BMODE(0x98 + i), val);
-		val -= 0x202;
-	}
-	b43_phy_write(dev, B43_PHY_N_BMODE(0x38), 0x668);
-}
+/**************************************************
+ * RSSI
+ **************************************************/
 
 /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ScaleOffsetRssi */
 static void b43_nphy_scale_offset_rssi(struct b43_wldev *dev, u16 scale,
@@ -2233,67 +1040,6 @@
 		b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_TSSI, tmp);
 }
 
-static void b43_nphy_rev2_rssi_select(struct b43_wldev *dev, u8 code, u8 type)
-{
-	u16 val;
-
-	if (type < 3)
-		val = 0;
-	else if (type == 6)
-		val = 1;
-	else if (type == 3)
-		val = 2;
-	else
-		val = 3;
-
-	val = (val << 12) | (val << 14);
-	b43_phy_maskset(dev, B43_NPHY_AFECTL_C1, 0x0FFF, val);
-	b43_phy_maskset(dev, B43_NPHY_AFECTL_C2, 0x0FFF, val);
-
-	if (type < 3) {
-		b43_phy_maskset(dev, B43_NPHY_RFCTL_RSSIO1, 0xFFCF,
-				(type + 1) << 4);
-		b43_phy_maskset(dev, B43_NPHY_RFCTL_RSSIO2, 0xFFCF,
-				(type + 1) << 4);
-	}
-
-	if (code == 0) {
-		b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, ~0x3000);
-		if (type < 3) {
-			b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
-				~(B43_NPHY_RFCTL_CMD_RXEN |
-				  B43_NPHY_RFCTL_CMD_CORESEL));
-			b43_phy_mask(dev, B43_NPHY_RFCTL_OVER,
-				~(0x1 << 12 |
-				  0x1 << 5 |
-				  0x1 << 1 |
-				  0x1));
-			b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
-				~B43_NPHY_RFCTL_CMD_START);
-			udelay(20);
-			b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, ~0x1);
-		}
-	} else {
-		b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x3000);
-		if (type < 3) {
-			b43_phy_maskset(dev, B43_NPHY_RFCTL_CMD,
-				~(B43_NPHY_RFCTL_CMD_RXEN |
-				  B43_NPHY_RFCTL_CMD_CORESEL),
-				(B43_NPHY_RFCTL_CMD_RXEN |
-				 code << B43_NPHY_RFCTL_CMD_CORESEL_SHIFT));
-			b43_phy_set(dev, B43_NPHY_RFCTL_OVER,
-				(0x1 << 12 |
-				  0x1 << 5 |
-				  0x1 << 1 |
-				  0x1));
-			b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
-				B43_NPHY_RFCTL_CMD_START);
-			udelay(20);
-			b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, ~0x1);
-		}
-	}
-}
-
 static void b43_nphy_rev3_rssi_select(struct b43_wldev *dev, u8 code, u8 type)
 {
 	u8 i;
@@ -2377,6 +1123,67 @@
 	}
 }
 
+static void b43_nphy_rev2_rssi_select(struct b43_wldev *dev, u8 code, u8 type)
+{
+	u16 val;
+
+	if (type < 3)
+		val = 0;
+	else if (type == 6)
+		val = 1;
+	else if (type == 3)
+		val = 2;
+	else
+		val = 3;
+
+	val = (val << 12) | (val << 14);
+	b43_phy_maskset(dev, B43_NPHY_AFECTL_C1, 0x0FFF, val);
+	b43_phy_maskset(dev, B43_NPHY_AFECTL_C2, 0x0FFF, val);
+
+	if (type < 3) {
+		b43_phy_maskset(dev, B43_NPHY_RFCTL_RSSIO1, 0xFFCF,
+				(type + 1) << 4);
+		b43_phy_maskset(dev, B43_NPHY_RFCTL_RSSIO2, 0xFFCF,
+				(type + 1) << 4);
+	}
+
+	if (code == 0) {
+		b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, ~0x3000);
+		if (type < 3) {
+			b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
+				~(B43_NPHY_RFCTL_CMD_RXEN |
+				  B43_NPHY_RFCTL_CMD_CORESEL));
+			b43_phy_mask(dev, B43_NPHY_RFCTL_OVER,
+				~(0x1 << 12 |
+				  0x1 << 5 |
+				  0x1 << 1 |
+				  0x1));
+			b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
+				~B43_NPHY_RFCTL_CMD_START);
+			udelay(20);
+			b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, ~0x1);
+		}
+	} else {
+		b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x3000);
+		if (type < 3) {
+			b43_phy_maskset(dev, B43_NPHY_RFCTL_CMD,
+				~(B43_NPHY_RFCTL_CMD_RXEN |
+				  B43_NPHY_RFCTL_CMD_CORESEL),
+				(B43_NPHY_RFCTL_CMD_RXEN |
+				 code << B43_NPHY_RFCTL_CMD_CORESEL_SHIFT));
+			b43_phy_set(dev, B43_NPHY_RFCTL_OVER,
+				(0x1 << 12 |
+				  0x1 << 5 |
+				  0x1 << 1 |
+				  0x1));
+			b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
+				B43_NPHY_RFCTL_CMD_START);
+			udelay(20);
+			b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, ~0x1);
+		}
+	}
+}
+
 /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSISel */
 static void b43_nphy_rssi_select(struct b43_wldev *dev, u8 code, u8 type)
 {
@@ -2686,6 +1493,1413 @@
 	}
 }
 
+/**************************************************
+ * Workarounds
+ **************************************************/
+
+static void b43_nphy_gain_ctl_workarounds_rev3plus(struct b43_wldev *dev)
+{
+	struct ssb_sprom *sprom = dev->dev->bus_sprom;
+
+	bool ghz5;
+	bool ext_lna;
+	u16 rssi_gain;
+	struct nphy_gain_ctl_workaround_entry *e;
+	u8 lpf_gain[6] = { 0x00, 0x06, 0x0C, 0x12, 0x12, 0x12 };
+	u8 lpf_bits[6] = { 0, 1, 2, 3, 3, 3 };
+
+	/* Prepare values */
+	ghz5 = b43_phy_read(dev, B43_NPHY_BANDCTL)
+		& B43_NPHY_BANDCTL_5GHZ;
+	ext_lna = ghz5 ? sprom->boardflags_hi & B43_BFH_EXTLNA_5GHZ :
+		sprom->boardflags_lo & B43_BFL_EXTLNA;
+	e = b43_nphy_get_gain_ctl_workaround_ent(dev, ghz5, ext_lna);
+	if (ghz5 && dev->phy.rev >= 5)
+		rssi_gain = 0x90;
+	else
+		rssi_gain = 0x50;
+
+	b43_phy_set(dev, B43_NPHY_RXCTL, 0x0040);
+
+	/* Set Clip 2 detect */
+	b43_phy_set(dev, B43_NPHY_C1_CGAINI,
+			B43_NPHY_C1_CGAINI_CL2DETECT);
+	b43_phy_set(dev, B43_NPHY_C2_CGAINI,
+			B43_NPHY_C2_CGAINI_CL2DETECT);
+
+	b43_radio_write(dev, B2056_RX0 | B2056_RX_BIASPOLE_LNAG1_IDAC,
+			0x17);
+	b43_radio_write(dev, B2056_RX1 | B2056_RX_BIASPOLE_LNAG1_IDAC,
+			0x17);
+	b43_radio_write(dev, B2056_RX0 | B2056_RX_LNAG2_IDAC, 0xF0);
+	b43_radio_write(dev, B2056_RX1 | B2056_RX_LNAG2_IDAC, 0xF0);
+	b43_radio_write(dev, B2056_RX0 | B2056_RX_RSSI_POLE, 0x00);
+	b43_radio_write(dev, B2056_RX1 | B2056_RX_RSSI_POLE, 0x00);
+	b43_radio_write(dev, B2056_RX0 | B2056_RX_RSSI_GAIN,
+			rssi_gain);
+	b43_radio_write(dev, B2056_RX1 | B2056_RX_RSSI_GAIN,
+			rssi_gain);
+	b43_radio_write(dev, B2056_RX0 | B2056_RX_BIASPOLE_LNAA1_IDAC,
+			0x17);
+	b43_radio_write(dev, B2056_RX1 | B2056_RX_BIASPOLE_LNAA1_IDAC,
+			0x17);
+	b43_radio_write(dev, B2056_RX0 | B2056_RX_LNAA2_IDAC, 0xFF);
+	b43_radio_write(dev, B2056_RX1 | B2056_RX_LNAA2_IDAC, 0xFF);
+
+	b43_ntab_write_bulk(dev, B43_NTAB8(0, 8), 4, e->lna1_gain);
+	b43_ntab_write_bulk(dev, B43_NTAB8(1, 8), 4, e->lna1_gain);
+	b43_ntab_write_bulk(dev, B43_NTAB8(0, 16), 4, e->lna2_gain);
+	b43_ntab_write_bulk(dev, B43_NTAB8(1, 16), 4, e->lna2_gain);
+	b43_ntab_write_bulk(dev, B43_NTAB8(0, 32), 10, e->gain_db);
+	b43_ntab_write_bulk(dev, B43_NTAB8(1, 32), 10, e->gain_db);
+	b43_ntab_write_bulk(dev, B43_NTAB8(2, 32), 10, e->gain_bits);
+	b43_ntab_write_bulk(dev, B43_NTAB8(3, 32), 10, e->gain_bits);
+	b43_ntab_write_bulk(dev, B43_NTAB8(0, 0x40), 6, lpf_gain);
+	b43_ntab_write_bulk(dev, B43_NTAB8(1, 0x40), 6, lpf_gain);
+	b43_ntab_write_bulk(dev, B43_NTAB8(2, 0x40), 6, lpf_bits);
+	b43_ntab_write_bulk(dev, B43_NTAB8(3, 0x40), 6, lpf_bits);
+
+	b43_phy_write(dev, B43_NPHY_C1_INITGAIN, e->init_gain);
+	b43_phy_write(dev, 0x2A7, e->init_gain);
+	b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x106), 2,
+				e->rfseq_init);
+
+	/* TODO: check defines. Do not match variables names */
+	b43_phy_write(dev, B43_NPHY_C1_CLIP1_MEDGAIN, e->cliphi_gain);
+	b43_phy_write(dev, 0x2A9, e->cliphi_gain);
+	b43_phy_write(dev, B43_NPHY_C1_CLIP2_GAIN, e->clipmd_gain);
+	b43_phy_write(dev, 0x2AB, e->clipmd_gain);
+	b43_phy_write(dev, B43_NPHY_C2_CLIP1_HIGAIN, e->cliplo_gain);
+	b43_phy_write(dev, 0x2AD, e->cliplo_gain);
+
+	b43_phy_maskset(dev, 0x27D, 0xFF00, e->crsmin);
+	b43_phy_maskset(dev, 0x280, 0xFF00, e->crsminl);
+	b43_phy_maskset(dev, 0x283, 0xFF00, e->crsminu);
+	b43_phy_write(dev, B43_NPHY_C1_NBCLIPTHRES, e->nbclip);
+	b43_phy_write(dev, B43_NPHY_C2_NBCLIPTHRES, e->nbclip);
+	b43_phy_maskset(dev, B43_NPHY_C1_CLIPWBTHRES,
+			~B43_NPHY_C1_CLIPWBTHRES_CLIP2, e->wlclip);
+	b43_phy_maskset(dev, B43_NPHY_C2_CLIPWBTHRES,
+			~B43_NPHY_C2_CLIPWBTHRES_CLIP2, e->wlclip);
+	b43_phy_write(dev, B43_NPHY_CCK_SHIFTB_REF, 0x809C);
+}
+
+static void b43_nphy_gain_ctl_workarounds_rev1_2(struct b43_wldev *dev)
+{
+	struct b43_phy_n *nphy = dev->phy.n;
+
+	u8 i, j;
+	u8 code;
+	u16 tmp;
+	u8 rfseq_events[3] = { 6, 8, 7 };
+	u8 rfseq_delays[3] = { 10, 30, 1 };
+
+	/* Set Clip 2 detect */
+	b43_phy_set(dev, B43_NPHY_C1_CGAINI, B43_NPHY_C1_CGAINI_CL2DETECT);
+	b43_phy_set(dev, B43_NPHY_C2_CGAINI, B43_NPHY_C2_CGAINI_CL2DETECT);
+
+	/* Set narrowband clip threshold */
+	b43_phy_write(dev, B43_NPHY_C1_NBCLIPTHRES, 0x84);
+	b43_phy_write(dev, B43_NPHY_C2_NBCLIPTHRES, 0x84);
+
+	if (!dev->phy.is_40mhz) {
+		/* Set dwell lengths */
+		b43_phy_write(dev, B43_NPHY_CLIP1_NBDWELL_LEN, 0x002B);
+		b43_phy_write(dev, B43_NPHY_CLIP2_NBDWELL_LEN, 0x002B);
+		b43_phy_write(dev, B43_NPHY_W1CLIP1_DWELL_LEN, 0x0009);
+		b43_phy_write(dev, B43_NPHY_W1CLIP2_DWELL_LEN, 0x0009);
+	}
+
+	/* Set wideband clip 2 threshold */
+	b43_phy_maskset(dev, B43_NPHY_C1_CLIPWBTHRES,
+			~B43_NPHY_C1_CLIPWBTHRES_CLIP2, 21);
+	b43_phy_maskset(dev, B43_NPHY_C2_CLIPWBTHRES,
+			~B43_NPHY_C2_CLIPWBTHRES_CLIP2, 21);
+
+	if (!dev->phy.is_40mhz) {
+		b43_phy_maskset(dev, B43_NPHY_C1_CGAINI,
+			~B43_NPHY_C1_CGAINI_GAINBKOFF, 0x1);
+		b43_phy_maskset(dev, B43_NPHY_C2_CGAINI,
+			~B43_NPHY_C2_CGAINI_GAINBKOFF, 0x1);
+		b43_phy_maskset(dev, B43_NPHY_C1_CCK_CGAINI,
+			~B43_NPHY_C1_CCK_CGAINI_GAINBKOFF, 0x1);
+		b43_phy_maskset(dev, B43_NPHY_C2_CCK_CGAINI,
+			~B43_NPHY_C2_CCK_CGAINI_GAINBKOFF, 0x1);
+	}
+
+	b43_phy_write(dev, B43_NPHY_CCK_SHIFTB_REF, 0x809C);
+
+	if (nphy->gain_boost) {
+		if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ &&
+			dev->phy.is_40mhz)
+			code = 4;
+		else
+			code = 5;
+	} else {
+		code = dev->phy.is_40mhz ? 6 : 7;
+	}
+
+	/* Set HPVGA2 index */
+	b43_phy_maskset(dev, B43_NPHY_C1_INITGAIN, ~B43_NPHY_C1_INITGAIN_HPVGA2,
+			code << B43_NPHY_C1_INITGAIN_HPVGA2_SHIFT);
+	b43_phy_maskset(dev, B43_NPHY_C2_INITGAIN, ~B43_NPHY_C2_INITGAIN_HPVGA2,
+			code << B43_NPHY_C2_INITGAIN_HPVGA2_SHIFT);
+
+	b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x1D06);
+	/* specs say about 2 loops, but wl does 4 */
+	for (i = 0; i < 4; i++)
+		b43_phy_write(dev, B43_NPHY_TABLE_DATALO, (code << 8 | 0x7C));
+
+	b43_nphy_adjust_lna_gain_table(dev);
+
+	if (nphy->elna_gain_config) {
+		b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x0808);
+		b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0);
+		b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
+		b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
+		b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
+
+		b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x0C08);
+		b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0);
+		b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
+		b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
+		b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
+
+		b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x1D06);
+		/* specs say about 2 loops, but wl does 4 */
+		for (i = 0; i < 4; i++)
+			b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
+						(code << 8 | 0x74));
+	}
+
+	if (dev->phy.rev == 2) {
+		for (i = 0; i < 4; i++) {
+			b43_phy_write(dev, B43_NPHY_TABLE_ADDR,
+					(0x0400 * i) + 0x0020);
+			for (j = 0; j < 21; j++) {
+				tmp = j * (i < 2 ? 3 : 1);
+				b43_phy_write(dev,
+					B43_NPHY_TABLE_DATALO, tmp);
+			}
+		}
+	}
+
+	b43_nphy_set_rf_sequence(dev, 5, rfseq_events, rfseq_delays, 3);
+	b43_phy_maskset(dev, B43_NPHY_OVER_DGAIN1,
+		~B43_NPHY_OVER_DGAIN_CCKDGECV & 0xFFFF,
+		0x5A << B43_NPHY_OVER_DGAIN_CCKDGECV_SHIFT);
+
+	if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+		b43_phy_maskset(dev, B43_PHY_N(0xC5D), 0xFF80, 4);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/WorkaroundsGainCtrl */
+static void b43_nphy_gain_ctl_workarounds(struct b43_wldev *dev)
+{
+	if (dev->phy.rev >= 3)
+		b43_nphy_gain_ctl_workarounds_rev3plus(dev);
+	else
+		b43_nphy_gain_ctl_workarounds_rev1_2(dev);
+}
+
+static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
+{
+	struct b43_phy_n *nphy = dev->phy.n;
+	struct ssb_sprom *sprom = dev->dev->bus_sprom;
+
+	/* TX to RX */
+	u8 tx2rx_events[8] = { 0x4, 0x3, 0x6, 0x5, 0x2, 0x1, 0x8, 0x1F };
+	u8 tx2rx_delays[8] = { 8, 4, 2, 2, 4, 4, 6, 1 };
+	/* RX to TX */
+	u8 rx2tx_events_ipa[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0xF, 0x3,
+					0x1F };
+	u8 rx2tx_delays_ipa[9] = { 8, 6, 6, 4, 4, 16, 43, 1, 1 };
+	u8 rx2tx_events[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0x3, 0x4, 0x1F };
+	u8 rx2tx_delays[9] = { 8, 6, 6, 4, 4, 18, 42, 1, 1 };
+
+	u16 tmp16;
+	u32 tmp32;
+
+	b43_phy_write(dev, 0x23f, 0x1f8);
+	b43_phy_write(dev, 0x240, 0x1f8);
+
+	tmp32 = b43_ntab_read(dev, B43_NTAB32(30, 0));
+	tmp32 &= 0xffffff;
+	b43_ntab_write(dev, B43_NTAB32(30, 0), tmp32);
+
+	b43_phy_write(dev, B43_NPHY_PHASETR_A0, 0x0125);
+	b43_phy_write(dev, B43_NPHY_PHASETR_A1, 0x01B3);
+	b43_phy_write(dev, B43_NPHY_PHASETR_A2, 0x0105);
+	b43_phy_write(dev, B43_NPHY_PHASETR_B0, 0x016E);
+	b43_phy_write(dev, B43_NPHY_PHASETR_B1, 0x00CD);
+	b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x0020);
+
+	b43_phy_write(dev, B43_NPHY_C2_CLIP1_MEDGAIN, 0x000C);
+	b43_phy_write(dev, 0x2AE, 0x000C);
+
+	/* TX to RX */
+	b43_nphy_set_rf_sequence(dev, 1, tx2rx_events, tx2rx_delays,
+				 ARRAY_SIZE(tx2rx_events));
+
+	/* RX to TX */
+	if (b43_nphy_ipa(dev))
+		b43_nphy_set_rf_sequence(dev, 0, rx2tx_events_ipa,
+				rx2tx_delays_ipa, ARRAY_SIZE(rx2tx_events_ipa));
+	if (nphy->hw_phyrxchain != 3 &&
+	    nphy->hw_phyrxchain != nphy->hw_phytxchain) {
+		if (b43_nphy_ipa(dev)) {
+			rx2tx_delays[5] = 59;
+			rx2tx_delays[6] = 1;
+			rx2tx_events[7] = 0x1F;
+		}
+		b43_nphy_set_rf_sequence(dev, 1, rx2tx_events, rx2tx_delays,
+					 ARRAY_SIZE(rx2tx_events));
+	}
+
+	tmp16 = (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) ?
+		0x2 : 0x9C40;
+	b43_phy_write(dev, B43_NPHY_ENDROP_TLEN, tmp16);
+
+	b43_phy_maskset(dev, 0x294, 0xF0FF, 0x0700);
+
+	b43_ntab_write(dev, B43_NTAB32(16, 3), 0x18D);
+	b43_ntab_write(dev, B43_NTAB32(16, 127), 0x18D);
+
+	b43_nphy_gain_ctl_workarounds(dev);
+
+	b43_ntab_write(dev, B43_NTAB16(8, 0), 2);
+	b43_ntab_write(dev, B43_NTAB16(8, 16), 2);
+
+	/* TODO */
+
+	b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_MAST_BIAS, 0x00);
+	b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_MAST_BIAS, 0x00);
+	b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_BIAS_MAIN, 0x06);
+	b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_BIAS_MAIN, 0x06);
+	b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_BIAS_AUX, 0x07);
+	b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_BIAS_AUX, 0x07);
+	b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_LOB_BIAS, 0x88);
+	b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_LOB_BIAS, 0x88);
+	b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_CMFB_IDAC, 0x00);
+	b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_CMFB_IDAC, 0x00);
+	b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXG_CMFB_IDAC, 0x00);
+	b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXG_CMFB_IDAC, 0x00);
+
+	/* N PHY WAR TX Chain Update with hw_phytxchain as argument */
+
+	if ((sprom->boardflags2_lo & B43_BFL2_APLL_WAR &&
+	     b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ||
+	    (sprom->boardflags2_lo & B43_BFL2_GPLL_WAR &&
+	     b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ))
+		tmp32 = 0x00088888;
+	else
+		tmp32 = 0x88888888;
+	b43_ntab_write(dev, B43_NTAB32(30, 1), tmp32);
+	b43_ntab_write(dev, B43_NTAB32(30, 2), tmp32);
+	b43_ntab_write(dev, B43_NTAB32(30, 3), tmp32);
+
+	if (dev->phy.rev == 4 &&
+		b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+		b43_radio_write(dev, B2056_TX0 | B2056_TX_GMBB_IDAC,
+				0x70);
+		b43_radio_write(dev, B2056_TX1 | B2056_TX_GMBB_IDAC,
+				0x70);
+	}
+
+	b43_phy_write(dev, 0x224, 0x03eb);
+	b43_phy_write(dev, 0x225, 0x03eb);
+	b43_phy_write(dev, 0x226, 0x0341);
+	b43_phy_write(dev, 0x227, 0x0341);
+	b43_phy_write(dev, 0x228, 0x042b);
+	b43_phy_write(dev, 0x229, 0x042b);
+	b43_phy_write(dev, 0x22a, 0x0381);
+	b43_phy_write(dev, 0x22b, 0x0381);
+	b43_phy_write(dev, 0x22c, 0x042b);
+	b43_phy_write(dev, 0x22d, 0x042b);
+	b43_phy_write(dev, 0x22e, 0x0381);
+	b43_phy_write(dev, 0x22f, 0x0381);
+}
+
+static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev)
+{
+	struct ssb_sprom *sprom = dev->dev->bus_sprom;
+	struct b43_phy *phy = &dev->phy;
+	struct b43_phy_n *nphy = phy->n;
+
+	u8 events1[7] = { 0x0, 0x1, 0x2, 0x8, 0x4, 0x5, 0x3 };
+	u8 delays1[7] = { 0x8, 0x6, 0x6, 0x2, 0x4, 0x3C, 0x1 };
+
+	u8 events2[7] = { 0x0, 0x3, 0x5, 0x4, 0x2, 0x1, 0x8 };
+	u8 delays2[7] = { 0x8, 0x6, 0x2, 0x4, 0x4, 0x6, 0x1 };
+
+	if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ &&
+	    nphy->band5g_pwrgain) {
+		b43_radio_mask(dev, B2055_C1_TX_RF_SPARE, ~0x8);
+		b43_radio_mask(dev, B2055_C2_TX_RF_SPARE, ~0x8);
+	} else {
+		b43_radio_set(dev, B2055_C1_TX_RF_SPARE, 0x8);
+		b43_radio_set(dev, B2055_C2_TX_RF_SPARE, 0x8);
+	}
+
+	b43_ntab_write(dev, B43_NTAB16(8, 0x00), 0x000A);
+	b43_ntab_write(dev, B43_NTAB16(8, 0x10), 0x000A);
+	b43_ntab_write(dev, B43_NTAB16(8, 0x02), 0xCDAA);
+	b43_ntab_write(dev, B43_NTAB16(8, 0x12), 0xCDAA);
+
+	if (dev->phy.rev < 2) {
+		b43_ntab_write(dev, B43_NTAB16(8, 0x08), 0x0000);
+		b43_ntab_write(dev, B43_NTAB16(8, 0x18), 0x0000);
+		b43_ntab_write(dev, B43_NTAB16(8, 0x07), 0x7AAB);
+		b43_ntab_write(dev, B43_NTAB16(8, 0x17), 0x7AAB);
+		b43_ntab_write(dev, B43_NTAB16(8, 0x06), 0x0800);
+		b43_ntab_write(dev, B43_NTAB16(8, 0x16), 0x0800);
+	}
+
+	b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO1, 0x2D8);
+	b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1, 0x301);
+	b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO2, 0x2D8);
+	b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0x301);
+
+	if (sprom->boardflags2_lo & B43_BFL2_SKWRKFEM_BRD &&
+	    dev->dev->board_type == 0x8B) {
+		delays1[0] = 0x1;
+		delays1[5] = 0x14;
+	}
+	b43_nphy_set_rf_sequence(dev, 0, events1, delays1, 7);
+	b43_nphy_set_rf_sequence(dev, 1, events2, delays2, 7);
+
+	b43_nphy_gain_ctl_workarounds(dev);
+
+	if (dev->phy.rev < 2) {
+		if (b43_phy_read(dev, B43_NPHY_RXCTL) & 0x2)
+			b43_hf_write(dev, b43_hf_read(dev) |
+					B43_HF_MLADVW);
+	} else if (dev->phy.rev == 2) {
+		b43_phy_write(dev, B43_NPHY_CRSCHECK2, 0);
+		b43_phy_write(dev, B43_NPHY_CRSCHECK3, 0);
+	}
+
+	if (dev->phy.rev < 2)
+		b43_phy_mask(dev, B43_NPHY_SCRAM_SIGCTL,
+				~B43_NPHY_SCRAM_SIGCTL_SCM);
+
+	/* Set phase track alpha and beta */
+	b43_phy_write(dev, B43_NPHY_PHASETR_A0, 0x125);
+	b43_phy_write(dev, B43_NPHY_PHASETR_A1, 0x1B3);
+	b43_phy_write(dev, B43_NPHY_PHASETR_A2, 0x105);
+	b43_phy_write(dev, B43_NPHY_PHASETR_B0, 0x16E);
+	b43_phy_write(dev, B43_NPHY_PHASETR_B1, 0xCD);
+	b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x20);
+
+	b43_phy_mask(dev, B43_NPHY_PIL_DW1,
+			~B43_NPHY_PIL_DW_64QAM & 0xFFFF);
+	b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B1, 0xB5);
+	b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B2, 0xA4);
+	b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B3, 0x00);
+
+	if (dev->phy.rev == 2)
+		b43_phy_set(dev, B43_NPHY_FINERX2_CGC,
+				B43_NPHY_FINERX2_CGC_DECGC);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/Workarounds */
+static void b43_nphy_workarounds(struct b43_wldev *dev)
+{
+	struct b43_phy *phy = &dev->phy;
+	struct b43_phy_n *nphy = phy->n;
+
+	if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+		b43_nphy_classifier(dev, 1, 0);
+	else
+		b43_nphy_classifier(dev, 1, 1);
+
+	if (nphy->hang_avoid)
+		b43_nphy_stay_in_carrier_search(dev, 1);
+
+	b43_phy_set(dev, B43_NPHY_IQFLIP,
+		    B43_NPHY_IQFLIP_ADC1 | B43_NPHY_IQFLIP_ADC2);
+
+	if (dev->phy.rev >= 3)
+		b43_nphy_workarounds_rev3plus(dev);
+	else
+		b43_nphy_workarounds_rev1_2(dev);
+
+	if (nphy->hang_avoid)
+		b43_nphy_stay_in_carrier_search(dev, 0);
+}
+
+/**************************************************
+ * Tx/Rx common
+ **************************************************/
+
+/*
+ * Transmits a known value for LO calibration
+ * http://bcm-v4.sipsolutions.net/802.11/PHY/N/TXTone
+ */
+static int b43_nphy_tx_tone(struct b43_wldev *dev, u32 freq, u16 max_val,
+				bool iqmode, bool dac_test)
+{
+	u16 samp = b43_nphy_gen_load_samples(dev, freq, max_val, dac_test);
+	if (samp == 0)
+		return -1;
+	b43_nphy_run_samples(dev, samp, 0xFFFF, 0, iqmode, dac_test);
+	return 0;
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/Chains */
+static void b43_nphy_update_txrx_chain(struct b43_wldev *dev)
+{
+	struct b43_phy_n *nphy = dev->phy.n;
+
+	bool override = false;
+	u16 chain = 0x33;
+
+	if (nphy->txrx_chain == 0) {
+		chain = 0x11;
+		override = true;
+	} else if (nphy->txrx_chain == 1) {
+		chain = 0x22;
+		override = true;
+	}
+
+	b43_phy_maskset(dev, B43_NPHY_RFSEQCA,
+			~(B43_NPHY_RFSEQCA_TXEN | B43_NPHY_RFSEQCA_RXEN),
+			chain);
+
+	if (override)
+		b43_phy_set(dev, B43_NPHY_RFSEQMODE,
+				B43_NPHY_RFSEQMODE_CAOVER);
+	else
+		b43_phy_mask(dev, B43_NPHY_RFSEQMODE,
+				~B43_NPHY_RFSEQMODE_CAOVER);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/stop-playback */
+static void b43_nphy_stop_playback(struct b43_wldev *dev)
+{
+	struct b43_phy_n *nphy = dev->phy.n;
+	u16 tmp;
+
+	if (nphy->hang_avoid)
+		b43_nphy_stay_in_carrier_search(dev, 1);
+
+	tmp = b43_phy_read(dev, B43_NPHY_SAMP_STAT);
+	if (tmp & 0x1)
+		b43_phy_set(dev, B43_NPHY_SAMP_CMD, B43_NPHY_SAMP_CMD_STOP);
+	else if (tmp & 0x2)
+		b43_phy_mask(dev, B43_NPHY_IQLOCAL_CMDGCTL, 0x7FFF);
+
+	b43_phy_mask(dev, B43_NPHY_SAMP_CMD, ~0x0004);
+
+	if (nphy->bb_mult_save & 0x80000000) {
+		tmp = nphy->bb_mult_save & 0xFFFF;
+		b43_ntab_write(dev, B43_NTAB16(15, 87), tmp);
+		nphy->bb_mult_save = 0;
+	}
+
+	if (nphy->hang_avoid)
+		b43_nphy_stay_in_carrier_search(dev, 0);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/IqCalGainParams */
+static void b43_nphy_iq_cal_gain_params(struct b43_wldev *dev, u16 core,
+					struct nphy_txgains target,
+					struct nphy_iqcal_params *params)
+{
+	int i, j, indx;
+	u16 gain;
+
+	if (dev->phy.rev >= 3) {
+		params->txgm = target.txgm[core];
+		params->pga = target.pga[core];
+		params->pad = target.pad[core];
+		params->ipa = target.ipa[core];
+		params->cal_gain = (params->txgm << 12) | (params->pga << 8) |
+					(params->pad << 4) | (params->ipa);
+		for (j = 0; j < 5; j++)
+			params->ncorr[j] = 0x79;
+	} else {
+		gain = (target.pad[core]) | (target.pga[core] << 4) |
+			(target.txgm[core] << 8);
+
+		indx = (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ?
+			1 : 0;
+		for (i = 0; i < 9; i++)
+			if (tbl_iqcal_gainparams[indx][i][0] == gain)
+				break;
+		i = min(i, 8);
+
+		params->txgm = tbl_iqcal_gainparams[indx][i][1];
+		params->pga = tbl_iqcal_gainparams[indx][i][2];
+		params->pad = tbl_iqcal_gainparams[indx][i][3];
+		params->cal_gain = (params->txgm << 7) | (params->pga << 4) |
+					(params->pad << 2);
+		for (j = 0; j < 4; j++)
+			params->ncorr[j] = tbl_iqcal_gainparams[indx][i][4 + j];
+	}
+}
+
+/**************************************************
+ * Tx and Rx
+ **************************************************/
+
+void b43_nphy_set_rxantenna(struct b43_wldev *dev, int antenna)
+{//TODO
+}
+
+static void b43_nphy_op_adjust_txpower(struct b43_wldev *dev)
+{//TODO
+}
+
+static enum b43_txpwr_result b43_nphy_op_recalc_txpower(struct b43_wldev *dev,
+							bool ignore_tssi)
+{//TODO
+	return B43_TXPWR_RES_DONE;
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlEnable */
+static void b43_nphy_tx_power_ctrl(struct b43_wldev *dev, bool enable)
+{
+	struct b43_phy_n *nphy = dev->phy.n;
+	u8 i;
+	u16 bmask, val, tmp;
+	enum ieee80211_band band = b43_current_band(dev->wl);
+
+	if (nphy->hang_avoid)
+		b43_nphy_stay_in_carrier_search(dev, 1);
+
+	nphy->txpwrctrl = enable;
+	if (!enable) {
+		if (dev->phy.rev >= 3 &&
+		    (b43_phy_read(dev, B43_NPHY_TXPCTL_CMD) &
+		     (B43_NPHY_TXPCTL_CMD_COEFF |
+		      B43_NPHY_TXPCTL_CMD_HWPCTLEN |
+		      B43_NPHY_TXPCTL_CMD_PCTLEN))) {
+			/* We disable enabled TX pwr ctl, save it's state */
+			nphy->tx_pwr_idx[0] = b43_phy_read(dev,
+						B43_NPHY_C1_TXPCTL_STAT) & 0x7f;
+			nphy->tx_pwr_idx[1] = b43_phy_read(dev,
+						B43_NPHY_C2_TXPCTL_STAT) & 0x7f;
+		}
+
+		b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x6840);
+		for (i = 0; i < 84; i++)
+			b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0);
+
+		b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x6C40);
+		for (i = 0; i < 84; i++)
+			b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0);
+
+		tmp = B43_NPHY_TXPCTL_CMD_COEFF | B43_NPHY_TXPCTL_CMD_HWPCTLEN;
+		if (dev->phy.rev >= 3)
+			tmp |= B43_NPHY_TXPCTL_CMD_PCTLEN;
+		b43_phy_mask(dev, B43_NPHY_TXPCTL_CMD, ~tmp);
+
+		if (dev->phy.rev >= 3) {
+			b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x0100);
+			b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0100);
+		} else {
+			b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x4000);
+		}
+
+		if (dev->phy.rev == 2)
+			b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3,
+				~B43_NPHY_BPHY_CTL3_SCALE, 0x53);
+		else if (dev->phy.rev < 2)
+			b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3,
+				~B43_NPHY_BPHY_CTL3_SCALE, 0x5A);
+
+		if (dev->phy.rev < 2 && dev->phy.is_40mhz)
+			b43_hf_write(dev, b43_hf_read(dev) | B43_HF_TSSIRPSMW);
+	} else {
+		b43_ntab_write_bulk(dev, B43_NTAB16(26, 64), 84,
+				    nphy->adj_pwr_tbl);
+		b43_ntab_write_bulk(dev, B43_NTAB16(27, 64), 84,
+				    nphy->adj_pwr_tbl);
+
+		bmask = B43_NPHY_TXPCTL_CMD_COEFF |
+			B43_NPHY_TXPCTL_CMD_HWPCTLEN;
+		/* wl does useless check for "enable" param here */
+		val = B43_NPHY_TXPCTL_CMD_COEFF | B43_NPHY_TXPCTL_CMD_HWPCTLEN;
+		if (dev->phy.rev >= 3) {
+			bmask |= B43_NPHY_TXPCTL_CMD_PCTLEN;
+			if (val)
+				val |= B43_NPHY_TXPCTL_CMD_PCTLEN;
+		}
+		b43_phy_maskset(dev, B43_NPHY_TXPCTL_CMD, ~(bmask), val);
+
+		if (band == IEEE80211_BAND_5GHZ) {
+			b43_phy_maskset(dev, B43_NPHY_TXPCTL_CMD,
+					~B43_NPHY_TXPCTL_CMD_INIT, 0x64);
+			if (dev->phy.rev > 1)
+				b43_phy_maskset(dev, B43_NPHY_TXPCTL_INIT,
+						~B43_NPHY_TXPCTL_INIT_PIDXI1,
+						0x64);
+		}
+
+		if (dev->phy.rev >= 3) {
+			if (nphy->tx_pwr_idx[0] != 128 &&
+			    nphy->tx_pwr_idx[1] != 128) {
+				/* Recover TX pwr ctl state */
+				b43_phy_maskset(dev, B43_NPHY_TXPCTL_CMD,
+						~B43_NPHY_TXPCTL_CMD_INIT,
+						nphy->tx_pwr_idx[0]);
+				if (dev->phy.rev > 1)
+					b43_phy_maskset(dev,
+						B43_NPHY_TXPCTL_INIT,
+						~0xff, nphy->tx_pwr_idx[1]);
+			}
+		}
+
+		if (dev->phy.rev >= 3) {
+			b43_phy_mask(dev, B43_NPHY_AFECTL_OVER1, ~0x100);
+			b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, ~0x100);
+		} else {
+			b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, ~0x4000);
+		}
+
+		if (dev->phy.rev == 2)
+			b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3, ~0xFF, 0x3b);
+		else if (dev->phy.rev < 2)
+			b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3, ~0xFF, 0x40);
+
+		if (dev->phy.rev < 2 && dev->phy.is_40mhz)
+			b43_hf_write(dev, b43_hf_read(dev) & ~B43_HF_TSSIRPSMW);
+
+		if (b43_nphy_ipa(dev)) {
+			b43_phy_mask(dev, B43_NPHY_PAPD_EN0, ~0x4);
+			b43_phy_mask(dev, B43_NPHY_PAPD_EN1, ~0x4);
+		}
+	}
+
+	if (nphy->hang_avoid)
+		b43_nphy_stay_in_carrier_search(dev, 0);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrFix */
+static void b43_nphy_tx_power_fix(struct b43_wldev *dev)
+{
+	struct b43_phy_n *nphy = dev->phy.n;
+	struct ssb_sprom *sprom = dev->dev->bus_sprom;
+
+	u8 txpi[2], bbmult, i;
+	u16 tmp, radio_gain, dac_gain;
+	u16 freq = dev->phy.channel_freq;
+	u32 txgain;
+	/* u32 gaintbl; rev3+ */
+
+	if (nphy->hang_avoid)
+		b43_nphy_stay_in_carrier_search(dev, 1);
+
+	if (dev->phy.rev >= 7) {
+		txpi[0] = txpi[1] = 30;
+	} else if (dev->phy.rev >= 3) {
+		txpi[0] = 40;
+		txpi[1] = 40;
+	} else if (sprom->revision < 4) {
+		txpi[0] = 72;
+		txpi[1] = 72;
+	} else {
+		if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+			txpi[0] = sprom->txpid2g[0];
+			txpi[1] = sprom->txpid2g[1];
+		} else if (freq >= 4900 && freq < 5100) {
+			txpi[0] = sprom->txpid5gl[0];
+			txpi[1] = sprom->txpid5gl[1];
+		} else if (freq >= 5100 && freq < 5500) {
+			txpi[0] = sprom->txpid5g[0];
+			txpi[1] = sprom->txpid5g[1];
+		} else if (freq >= 5500) {
+			txpi[0] = sprom->txpid5gh[0];
+			txpi[1] = sprom->txpid5gh[1];
+		} else {
+			txpi[0] = 91;
+			txpi[1] = 91;
+		}
+	}
+	if (dev->phy.rev < 7 &&
+	    (txpi[0] < 40 || txpi[0] > 100 || txpi[1] < 40 || txpi[1] > 100))
+		txpi[0] = txpi[1] = 91;
+
+	/*
+	for (i = 0; i < 2; i++) {
+		nphy->txpwrindex[i].index_internal = txpi[i];
+		nphy->txpwrindex[i].index_internal_save = txpi[i];
+	}
+	*/
+
+	for (i = 0; i < 2; i++) {
+		if (dev->phy.rev >= 3) {
+			if (b43_nphy_ipa(dev)) {
+				txgain = *(b43_nphy_get_ipa_gain_table(dev) +
+						txpi[i]);
+			} else if (b43_current_band(dev->wl) ==
+				   IEEE80211_BAND_5GHZ) {
+				/* FIXME: use 5GHz tables */
+				txgain =
+					b43_ntab_tx_gain_rev3plus_2ghz[txpi[i]];
+			} else {
+				if (dev->phy.rev >= 5 &&
+				    sprom->fem.ghz5.extpa_gain == 3)
+					; /* FIXME: 5GHz_txgain_HiPwrEPA */
+				txgain =
+					b43_ntab_tx_gain_rev3plus_2ghz[txpi[i]];
+			}
+			radio_gain = (txgain >> 16) & 0x1FFFF;
+		} else {
+			txgain = b43_ntab_tx_gain_rev0_1_2[txpi[i]];
+			radio_gain = (txgain >> 16) & 0x1FFF;
+		}
+
+		if (dev->phy.rev >= 7)
+			dac_gain = (txgain >> 8) & 0x7;
+		else
+			dac_gain = (txgain >> 8) & 0x3F;
+		bbmult = txgain & 0xFF;
+
+		if (dev->phy.rev >= 3) {
+			if (i == 0)
+				b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x0100);
+			else
+				b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0100);
+		} else {
+			b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x4000);
+		}
+
+		if (i == 0)
+			b43_phy_write(dev, B43_NPHY_AFECTL_DACGAIN1, dac_gain);
+		else
+			b43_phy_write(dev, B43_NPHY_AFECTL_DACGAIN2, dac_gain);
+
+		b43_ntab_write(dev, B43_NTAB16(0x7, 0x110 + i), radio_gain);
+
+		tmp = b43_ntab_read(dev, B43_NTAB16(0xF, 0x57));
+		if (i == 0)
+			tmp = (tmp & 0x00FF) | (bbmult << 8);
+		else
+			tmp = (tmp & 0xFF00) | bbmult;
+		b43_ntab_write(dev, B43_NTAB16(0xF, 0x57), tmp);
+
+		if (b43_nphy_ipa(dev)) {
+			u32 tmp32;
+			u16 reg = (i == 0) ?
+				B43_NPHY_PAPD_EN0 : B43_NPHY_PAPD_EN1;
+			tmp32 = b43_ntab_read(dev, B43_NTAB32(26 + i,
+							      576 + txpi[i]));
+			b43_phy_maskset(dev, reg, 0xE00F, (u32) tmp32 << 4);
+			b43_phy_set(dev, reg, 0x4);
+		}
+	}
+
+	b43_phy_mask(dev, B43_NPHY_BPHY_CTL2, ~B43_NPHY_BPHY_CTL2_LUT);
+
+	if (nphy->hang_avoid)
+		b43_nphy_stay_in_carrier_search(dev, 0);
+}
+
+static void b43_nphy_ipa_internal_tssi_setup(struct b43_wldev *dev)
+{
+	struct b43_phy *phy = &dev->phy;
+
+	u8 core;
+	u16 r; /* routing */
+
+	if (phy->rev >= 7) {
+		for (core = 0; core < 2; core++) {
+			r = core ? 0x190 : 0x170;
+			if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+				b43_radio_write(dev, r + 0x5, 0x5);
+				b43_radio_write(dev, r + 0x9, 0xE);
+				if (phy->rev != 5)
+					b43_radio_write(dev, r + 0xA, 0);
+				if (phy->rev != 7)
+					b43_radio_write(dev, r + 0xB, 1);
+				else
+					b43_radio_write(dev, r + 0xB, 0x31);
+			} else {
+				b43_radio_write(dev, r + 0x5, 0x9);
+				b43_radio_write(dev, r + 0x9, 0xC);
+				b43_radio_write(dev, r + 0xB, 0x0);
+				if (phy->rev != 5)
+					b43_radio_write(dev, r + 0xA, 1);
+				else
+					b43_radio_write(dev, r + 0xA, 0x31);
+			}
+			b43_radio_write(dev, r + 0x6, 0);
+			b43_radio_write(dev, r + 0x7, 0);
+			b43_radio_write(dev, r + 0x8, 3);
+			b43_radio_write(dev, r + 0xC, 0);
+		}
+	} else {
+		if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+			b43_radio_write(dev, B2056_SYN_RESERVED_ADDR31, 0x128);
+		else
+			b43_radio_write(dev, B2056_SYN_RESERVED_ADDR31, 0x80);
+		b43_radio_write(dev, B2056_SYN_RESERVED_ADDR30, 0);
+		b43_radio_write(dev, B2056_SYN_GPIO_MASTER1, 0x29);
+
+		for (core = 0; core < 2; core++) {
+			r = core ? B2056_TX1 : B2056_TX0;
+
+			b43_radio_write(dev, r | B2056_TX_IQCAL_VCM_HG, 0);
+			b43_radio_write(dev, r | B2056_TX_IQCAL_IDAC, 0);
+			b43_radio_write(dev, r | B2056_TX_TSSI_VCM, 3);
+			b43_radio_write(dev, r | B2056_TX_TX_AMP_DET, 0);
+			b43_radio_write(dev, r | B2056_TX_TSSI_MISC1, 8);
+			b43_radio_write(dev, r | B2056_TX_TSSI_MISC2, 0);
+			b43_radio_write(dev, r | B2056_TX_TSSI_MISC3, 0);
+			if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+				b43_radio_write(dev, r | B2056_TX_TX_SSI_MASTER,
+						0x5);
+				if (phy->rev != 5)
+					b43_radio_write(dev, r | B2056_TX_TSSIA,
+							0x00);
+				if (phy->rev >= 5)
+					b43_radio_write(dev, r | B2056_TX_TSSIG,
+							0x31);
+				else
+					b43_radio_write(dev, r | B2056_TX_TSSIG,
+							0x11);
+				b43_radio_write(dev, r | B2056_TX_TX_SSI_MUX,
+						0xE);
+			} else {
+				b43_radio_write(dev, r | B2056_TX_TX_SSI_MASTER,
+						0x9);
+				b43_radio_write(dev, r | B2056_TX_TSSIA, 0x31);
+				b43_radio_write(dev, r | B2056_TX_TSSIG, 0x0);
+				b43_radio_write(dev, r | B2056_TX_TX_SSI_MUX,
+						0xC);
+			}
+		}
+	}
+}
+
+/*
+ * Stop radio and transmit known signal. Then check received signal strength to
+ * get TSSI (Transmit Signal Strength Indicator).
+ * http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlIdleTssi
+ */
+static void b43_nphy_tx_power_ctl_idle_tssi(struct b43_wldev *dev)
+{
+	struct b43_phy *phy = &dev->phy;
+	struct b43_phy_n *nphy = dev->phy.n;
+
+	u32 tmp;
+	s32 rssi[4] = { };
+
+	/* TODO: check if we can transmit */
+
+	if (b43_nphy_ipa(dev))
+		b43_nphy_ipa_internal_tssi_setup(dev);
+
+	if (phy->rev >= 7)
+		; /* TODO: Override Rev7 with 0x2000, 0, 3, 0, 0 as arguments */
+	else if (phy->rev >= 3)
+		b43_nphy_rf_control_override(dev, 0x2000, 0, 3, false);
+
+	b43_nphy_stop_playback(dev);
+	b43_nphy_tx_tone(dev, 0xFA0, 0, false, false);
+	udelay(20);
+	tmp = b43_nphy_poll_rssi(dev, 4, rssi, 1);
+	b43_nphy_stop_playback(dev);
+	b43_nphy_rssi_select(dev, 0, 0);
+
+	if (phy->rev >= 7)
+		; /* TODO: Override Rev7 with 0x2000, 0, 3, 1, 0 as arguments */
+	else if (phy->rev >= 3)
+		b43_nphy_rf_control_override(dev, 0x2000, 0, 3, true);
+
+	if (phy->rev >= 3) {
+		nphy->pwr_ctl_info[0].idle_tssi_5g = (tmp >> 24) & 0xFF;
+		nphy->pwr_ctl_info[1].idle_tssi_5g = (tmp >> 8) & 0xFF;
+	} else {
+		nphy->pwr_ctl_info[0].idle_tssi_5g = (tmp >> 16) & 0xFF;
+		nphy->pwr_ctl_info[1].idle_tssi_5g = tmp & 0xFF;
+	}
+	nphy->pwr_ctl_info[0].idle_tssi_2g = (tmp >> 24) & 0xFF;
+	nphy->pwr_ctl_info[1].idle_tssi_2g = (tmp >> 8) & 0xFF;
+}
+
+static void b43_nphy_tx_gain_table_upload(struct b43_wldev *dev)
+{
+	struct b43_phy *phy = &dev->phy;
+
+	const u32 *table = NULL;
+#if 0
+	TODO: b43_ntab_papd_pga_gain_delta_ipa_2*
+	u32 rfpwr_offset;
+	u8 pga_gain;
+	int i;
+#endif
+
+	if (phy->rev >= 3) {
+		if (b43_nphy_ipa(dev)) {
+			table = b43_nphy_get_ipa_gain_table(dev);
+		} else {
+			if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+				if (phy->rev == 3)
+					table = b43_ntab_tx_gain_rev3_5ghz;
+				if (phy->rev == 4)
+					table = b43_ntab_tx_gain_rev4_5ghz;
+				else
+					table = b43_ntab_tx_gain_rev5plus_5ghz;
+			} else {
+				table = b43_ntab_tx_gain_rev3plus_2ghz;
+			}
+		}
+	} else {
+		table = b43_ntab_tx_gain_rev0_1_2;
+	}
+	b43_ntab_write_bulk(dev, B43_NTAB32(26, 192), 128, table);
+	b43_ntab_write_bulk(dev, B43_NTAB32(27, 192), 128, table);
+
+	if (phy->rev >= 3) {
+#if 0
+		nphy->gmval = (table[0] >> 16) & 0x7000;
+
+		for (i = 0; i < 128; i++) {
+			pga_gain = (table[i] >> 24) & 0xF;
+			if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+				rfpwr_offset = b43_ntab_papd_pga_gain_delta_ipa_2g[pga_gain];
+			else
+				rfpwr_offset = b43_ntab_papd_pga_gain_delta_ipa_5g[pga_gain];
+			b43_ntab_write(dev, B43_NTAB32(26, 576 + i),
+				       rfpwr_offset);
+			b43_ntab_write(dev, B43_NTAB32(27, 576 + i),
+				       rfpwr_offset);
+		}
+#endif
+	}
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/PA%20override */
+static void b43_nphy_pa_override(struct b43_wldev *dev, bool enable)
+{
+	struct b43_phy_n *nphy = dev->phy.n;
+	enum ieee80211_band band;
+	u16 tmp;
+
+	if (!enable) {
+		nphy->rfctrl_intc1_save = b43_phy_read(dev,
+						       B43_NPHY_RFCTL_INTC1);
+		nphy->rfctrl_intc2_save = b43_phy_read(dev,
+						       B43_NPHY_RFCTL_INTC2);
+		band = b43_current_band(dev->wl);
+		if (dev->phy.rev >= 3) {
+			if (band == IEEE80211_BAND_5GHZ)
+				tmp = 0x600;
+			else
+				tmp = 0x480;
+		} else {
+			if (band == IEEE80211_BAND_5GHZ)
+				tmp = 0x180;
+			else
+				tmp = 0x120;
+		}
+		b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, tmp);
+		b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, tmp);
+	} else {
+		b43_phy_write(dev, B43_NPHY_RFCTL_INTC1,
+				nphy->rfctrl_intc1_save);
+		b43_phy_write(dev, B43_NPHY_RFCTL_INTC2,
+				nphy->rfctrl_intc2_save);
+	}
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxLpFbw */
+static void b43_nphy_tx_lp_fbw(struct b43_wldev *dev)
+{
+	u16 tmp;
+
+	if (dev->phy.rev >= 3) {
+		if (b43_nphy_ipa(dev)) {
+			tmp = 4;
+			b43_phy_write(dev, B43_NPHY_TXF_40CO_B32S2,
+			      (((((tmp << 3) | tmp) << 3) | tmp) << 3) | tmp);
+		}
+
+		tmp = 1;
+		b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S2,
+			      (((((tmp << 3) | tmp) << 3) | tmp) << 3) | tmp);
+	}
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxIqEst */
+static void b43_nphy_rx_iq_est(struct b43_wldev *dev, struct nphy_iq_est *est,
+				u16 samps, u8 time, bool wait)
+{
+	int i;
+	u16 tmp;
+
+	b43_phy_write(dev, B43_NPHY_IQEST_SAMCNT, samps);
+	b43_phy_maskset(dev, B43_NPHY_IQEST_WT, ~B43_NPHY_IQEST_WT_VAL, time);
+	if (wait)
+		b43_phy_set(dev, B43_NPHY_IQEST_CMD, B43_NPHY_IQEST_CMD_MODE);
+	else
+		b43_phy_mask(dev, B43_NPHY_IQEST_CMD, ~B43_NPHY_IQEST_CMD_MODE);
+
+	b43_phy_set(dev, B43_NPHY_IQEST_CMD, B43_NPHY_IQEST_CMD_START);
+
+	for (i = 1000; i; i--) {
+		tmp = b43_phy_read(dev, B43_NPHY_IQEST_CMD);
+		if (!(tmp & B43_NPHY_IQEST_CMD_START)) {
+			est->i0_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_IPACC_HI0) << 16) |
+					b43_phy_read(dev, B43_NPHY_IQEST_IPACC_LO0);
+			est->q0_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_QPACC_HI0) << 16) |
+					b43_phy_read(dev, B43_NPHY_IQEST_QPACC_LO0);
+			est->iq0_prod = (b43_phy_read(dev, B43_NPHY_IQEST_IQACC_HI0) << 16) |
+					b43_phy_read(dev, B43_NPHY_IQEST_IQACC_LO0);
+
+			est->i1_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_IPACC_HI1) << 16) |
+					b43_phy_read(dev, B43_NPHY_IQEST_IPACC_LO1);
+			est->q1_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_QPACC_HI1) << 16) |
+					b43_phy_read(dev, B43_NPHY_IQEST_QPACC_LO1);
+			est->iq1_prod = (b43_phy_read(dev, B43_NPHY_IQEST_IQACC_HI1) << 16) |
+					b43_phy_read(dev, B43_NPHY_IQEST_IQACC_LO1);
+			return;
+		}
+		udelay(10);
+	}
+	memset(est, 0, sizeof(*est));
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxIqCoeffs */
+static void b43_nphy_rx_iq_coeffs(struct b43_wldev *dev, bool write,
+					struct b43_phy_n_iq_comp *pcomp)
+{
+	if (write) {
+		b43_phy_write(dev, B43_NPHY_C1_RXIQ_COMPA0, pcomp->a0);
+		b43_phy_write(dev, B43_NPHY_C1_RXIQ_COMPB0, pcomp->b0);
+		b43_phy_write(dev, B43_NPHY_C2_RXIQ_COMPA1, pcomp->a1);
+		b43_phy_write(dev, B43_NPHY_C2_RXIQ_COMPB1, pcomp->b1);
+	} else {
+		pcomp->a0 = b43_phy_read(dev, B43_NPHY_C1_RXIQ_COMPA0);
+		pcomp->b0 = b43_phy_read(dev, B43_NPHY_C1_RXIQ_COMPB0);
+		pcomp->a1 = b43_phy_read(dev, B43_NPHY_C2_RXIQ_COMPA1);
+		pcomp->b1 = b43_phy_read(dev, B43_NPHY_C2_RXIQ_COMPB1);
+	}
+}
+
+#if 0
+/* Ready but not used anywhere */
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCalPhyCleanup */
+static void b43_nphy_rx_cal_phy_cleanup(struct b43_wldev *dev, u8 core)
+{
+	u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs;
+
+	b43_phy_write(dev, B43_NPHY_RFSEQCA, regs[0]);
+	if (core == 0) {
+		b43_phy_write(dev, B43_NPHY_AFECTL_C1, regs[1]);
+		b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, regs[2]);
+	} else {
+		b43_phy_write(dev, B43_NPHY_AFECTL_C2, regs[1]);
+		b43_phy_write(dev, B43_NPHY_AFECTL_OVER, regs[2]);
+	}
+	b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, regs[3]);
+	b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, regs[4]);
+	b43_phy_write(dev, B43_NPHY_RFCTL_RSSIO1, regs[5]);
+	b43_phy_write(dev, B43_NPHY_RFCTL_RSSIO2, regs[6]);
+	b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S1, regs[7]);
+	b43_phy_write(dev, B43_NPHY_RFCTL_OVER, regs[8]);
+	b43_phy_write(dev, B43_NPHY_PAPD_EN0, regs[9]);
+	b43_phy_write(dev, B43_NPHY_PAPD_EN1, regs[10]);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCalPhySetup */
+static void b43_nphy_rx_cal_phy_setup(struct b43_wldev *dev, u8 core)
+{
+	u8 rxval, txval;
+	u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs;
+
+	regs[0] = b43_phy_read(dev, B43_NPHY_RFSEQCA);
+	if (core == 0) {
+		regs[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C1);
+		regs[2] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER1);
+	} else {
+		regs[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C2);
+		regs[2] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER);
+	}
+	regs[3] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1);
+	regs[4] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2);
+	regs[5] = b43_phy_read(dev, B43_NPHY_RFCTL_RSSIO1);
+	regs[6] = b43_phy_read(dev, B43_NPHY_RFCTL_RSSIO2);
+	regs[7] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B1S1);
+	regs[8] = b43_phy_read(dev, B43_NPHY_RFCTL_OVER);
+	regs[9] = b43_phy_read(dev, B43_NPHY_PAPD_EN0);
+	regs[10] = b43_phy_read(dev, B43_NPHY_PAPD_EN1);
+
+	b43_phy_mask(dev, B43_NPHY_PAPD_EN0, ~0x0001);
+	b43_phy_mask(dev, B43_NPHY_PAPD_EN1, ~0x0001);
+
+	b43_phy_maskset(dev, B43_NPHY_RFSEQCA,
+			~B43_NPHY_RFSEQCA_RXDIS & 0xFFFF,
+			((1 - core) << B43_NPHY_RFSEQCA_RXDIS_SHIFT));
+	b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_TXEN,
+			((1 - core) << B43_NPHY_RFSEQCA_TXEN_SHIFT));
+	b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_RXEN,
+			(core << B43_NPHY_RFSEQCA_RXEN_SHIFT));
+	b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_TXDIS,
+			(core << B43_NPHY_RFSEQCA_TXDIS_SHIFT));
+
+	if (core == 0) {
+		b43_phy_mask(dev, B43_NPHY_AFECTL_C1, ~0x0007);
+		b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x0007);
+	} else {
+		b43_phy_mask(dev, B43_NPHY_AFECTL_C2, ~0x0007);
+		b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0007);
+	}
+
+	b43_nphy_rf_control_intc_override(dev, 2, 0, 3);
+	b43_nphy_rf_control_override(dev, 8, 0, 3, false);
+	b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RX2TX);
+
+	if (core == 0) {
+		rxval = 1;
+		txval = 8;
+	} else {
+		rxval = 4;
+		txval = 2;
+	}
+	b43_nphy_rf_control_intc_override(dev, 1, rxval, (core + 1));
+	b43_nphy_rf_control_intc_override(dev, 1, txval, (2 - core));
+}
+#endif
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalcRxIqComp */
+static void b43_nphy_calc_rx_iq_comp(struct b43_wldev *dev, u8 mask)
+{
+	int i;
+	s32 iq;
+	u32 ii;
+	u32 qq;
+	int iq_nbits, qq_nbits;
+	int arsh, brsh;
+	u16 tmp, a, b;
+
+	struct nphy_iq_est est;
+	struct b43_phy_n_iq_comp old;
+	struct b43_phy_n_iq_comp new = { };
+	bool error = false;
+
+	if (mask == 0)
+		return;
+
+	b43_nphy_rx_iq_coeffs(dev, false, &old);
+	b43_nphy_rx_iq_coeffs(dev, true, &new);
+	b43_nphy_rx_iq_est(dev, &est, 0x4000, 32, false);
+	new = old;
+
+	for (i = 0; i < 2; i++) {
+		if (i == 0 && (mask & 1)) {
+			iq = est.iq0_prod;
+			ii = est.i0_pwr;
+			qq = est.q0_pwr;
+		} else if (i == 1 && (mask & 2)) {
+			iq = est.iq1_prod;
+			ii = est.i1_pwr;
+			qq = est.q1_pwr;
+		} else {
+			continue;
+		}
+
+		if (ii + qq < 2) {
+			error = true;
+			break;
+		}
+
+		iq_nbits = fls(abs(iq));
+		qq_nbits = fls(qq);
+
+		arsh = iq_nbits - 20;
+		if (arsh >= 0) {
+			a = -((iq << (30 - iq_nbits)) + (ii >> (1 + arsh)));
+			tmp = ii >> arsh;
+		} else {
+			a = -((iq << (30 - iq_nbits)) + (ii << (-1 - arsh)));
+			tmp = ii << -arsh;
+		}
+		if (tmp == 0) {
+			error = true;
+			break;
+		}
+		a /= tmp;
+
+		brsh = qq_nbits - 11;
+		if (brsh >= 0) {
+			b = (qq << (31 - qq_nbits));
+			tmp = ii >> brsh;
+		} else {
+			b = (qq << (31 - qq_nbits));
+			tmp = ii << -brsh;
+		}
+		if (tmp == 0) {
+			error = true;
+			break;
+		}
+		b = int_sqrt(b / tmp - a * a) - (1 << 10);
+
+		if (i == 0 && (mask & 0x1)) {
+			if (dev->phy.rev >= 3) {
+				new.a0 = a & 0x3FF;
+				new.b0 = b & 0x3FF;
+			} else {
+				new.a0 = b & 0x3FF;
+				new.b0 = a & 0x3FF;
+			}
+		} else if (i == 1 && (mask & 0x2)) {
+			if (dev->phy.rev >= 3) {
+				new.a1 = a & 0x3FF;
+				new.b1 = b & 0x3FF;
+			} else {
+				new.a1 = b & 0x3FF;
+				new.b1 = a & 0x3FF;
+			}
+		}
+	}
+
+	if (error)
+		new = old;
+
+	b43_nphy_rx_iq_coeffs(dev, true, &new);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxIqWar */
+static void b43_nphy_tx_iq_workaround(struct b43_wldev *dev)
+{
+	u16 array[4];
+	b43_ntab_read_bulk(dev, B43_NTAB16(0xF, 0x50), 4, array);
+
+	b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW0, array[0]);
+	b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW1, array[1]);
+	b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW2, array[2]);
+	b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW3, array[3]);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SpurWar */
+static void b43_nphy_spur_workaround(struct b43_wldev *dev)
+{
+	struct b43_phy_n *nphy = dev->phy.n;
+
+	u8 channel = dev->phy.channel;
+	int tone[2] = { 57, 58 };
+	u32 noise[2] = { 0x3FF, 0x3FF };
+
+	B43_WARN_ON(dev->phy.rev < 3);
+
+	if (nphy->hang_avoid)
+		b43_nphy_stay_in_carrier_search(dev, 1);
+
+	if (nphy->gband_spurwar_en) {
+		/* TODO: N PHY Adjust Analog Pfbw (7) */
+		if (channel == 11 && dev->phy.is_40mhz)
+			; /* TODO: N PHY Adjust Min Noise Var(2, tone, noise)*/
+		else
+			; /* TODO: N PHY Adjust Min Noise Var(0, NULL, NULL)*/
+		/* TODO: N PHY Adjust CRS Min Power (0x1E) */
+	}
+
+	if (nphy->aband_spurwar_en) {
+		if (channel == 54) {
+			tone[0] = 0x20;
+			noise[0] = 0x25F;
+		} else if (channel == 38 || channel == 102 || channel == 118) {
+			if (0 /* FIXME */) {
+				tone[0] = 0x20;
+				noise[0] = 0x21F;
+			} else {
+				tone[0] = 0;
+				noise[0] = 0;
+			}
+		} else if (channel == 134) {
+			tone[0] = 0x20;
+			noise[0] = 0x21F;
+		} else if (channel == 151) {
+			tone[0] = 0x10;
+			noise[0] = 0x23F;
+		} else if (channel == 153 || channel == 161) {
+			tone[0] = 0x30;
+			noise[0] = 0x23F;
+		} else {
+			tone[0] = 0;
+			noise[0] = 0;
+		}
+
+		if (!tone[0] && !noise[0])
+			; /* TODO: N PHY Adjust Min Noise Var(1, tone, noise)*/
+		else
+			; /* TODO: N PHY Adjust Min Noise Var(0, NULL, NULL)*/
+	}
+
+	if (nphy->hang_avoid)
+		b43_nphy_stay_in_carrier_search(dev, 0);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlCoefSetup */
+static void b43_nphy_tx_pwr_ctrl_coef_setup(struct b43_wldev *dev)
+{
+	struct b43_phy_n *nphy = dev->phy.n;
+	int i, j;
+	u32 tmp;
+	u32 cur_real, cur_imag, real_part, imag_part;
+
+	u16 buffer[7];
+
+	if (nphy->hang_avoid)
+		b43_nphy_stay_in_carrier_search(dev, true);
+
+	b43_ntab_read_bulk(dev, B43_NTAB16(15, 80), 7, buffer);
+
+	for (i = 0; i < 2; i++) {
+		tmp = ((buffer[i * 2] & 0x3FF) << 10) |
+			(buffer[i * 2 + 1] & 0x3FF);
+		b43_phy_write(dev, B43_NPHY_TABLE_ADDR,
+				(((i + 26) << 10) | 320));
+		for (j = 0; j < 128; j++) {
+			b43_phy_write(dev, B43_NPHY_TABLE_DATAHI,
+					((tmp >> 16) & 0xFFFF));
+			b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
+					(tmp & 0xFFFF));
+		}
+	}
+
+	for (i = 0; i < 2; i++) {
+		tmp = buffer[5 + i];
+		real_part = (tmp >> 8) & 0xFF;
+		imag_part = (tmp & 0xFF);
+		b43_phy_write(dev, B43_NPHY_TABLE_ADDR,
+				(((i + 26) << 10) | 448));
+
+		if (dev->phy.rev >= 3) {
+			cur_real = real_part;
+			cur_imag = imag_part;
+			tmp = ((cur_real & 0xFF) << 8) | (cur_imag & 0xFF);
+		}
+
+		for (j = 0; j < 128; j++) {
+			if (dev->phy.rev < 3) {
+				cur_real = (real_part * loscale[j] + 128) >> 8;
+				cur_imag = (imag_part * loscale[j] + 128) >> 8;
+				tmp = ((cur_real & 0xFF) << 8) |
+					(cur_imag & 0xFF);
+			}
+			b43_phy_write(dev, B43_NPHY_TABLE_DATAHI,
+					((tmp >> 16) & 0xFFFF));
+			b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
+					(tmp & 0xFFFF));
+		}
+	}
+
+	if (dev->phy.rev >= 3) {
+		b43_shm_write16(dev, B43_SHM_SHARED,
+				B43_SHM_SH_NPHY_TXPWR_INDX0, 0xFFFF);
+		b43_shm_write16(dev, B43_SHM_SHARED,
+				B43_SHM_SH_NPHY_TXPWR_INDX1, 0xFFFF);
+	}
+
+	if (nphy->hang_avoid)
+		b43_nphy_stay_in_carrier_search(dev, false);
+}
+
 /*
  * Restore RSSI Calibration
  * http://bcm-v4.sipsolutions.net/802.11/PHY/N/RestoreRssiCal
@@ -2729,24 +2943,6 @@
 	b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_Y, rssical_phy_regs[11]);
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/GetIpaGainTbl */
-static const u32 *b43_nphy_get_ipa_gain_table(struct b43_wldev *dev)
-{
-	if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
-		if (dev->phy.rev >= 6) {
-			if (dev->dev->chip_id == 47162)
-				return txpwrctrl_tx_gain_ipa_rev5;
-			return txpwrctrl_tx_gain_ipa_rev6;
-		} else if (dev->phy.rev >= 5) {
-			return txpwrctrl_tx_gain_ipa_rev5;
-		} else {
-			return txpwrctrl_tx_gain_ipa;
-		}
-	} else {
-		return txpwrctrl_tx_gain_ipa_5g;
-	}
-}
-
 /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxCalRadioSetup */
 static void b43_nphy_tx_cal_radio_setup(struct b43_wldev *dev)
 {
@@ -2841,44 +3037,6 @@
 	}
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/IqCalGainParams */
-static void b43_nphy_iq_cal_gain_params(struct b43_wldev *dev, u16 core,
-					struct nphy_txgains target,
-					struct nphy_iqcal_params *params)
-{
-	int i, j, indx;
-	u16 gain;
-
-	if (dev->phy.rev >= 3) {
-		params->txgm = target.txgm[core];
-		params->pga = target.pga[core];
-		params->pad = target.pad[core];
-		params->ipa = target.ipa[core];
-		params->cal_gain = (params->txgm << 12) | (params->pga << 8) |
-					(params->pad << 4) | (params->ipa);
-		for (j = 0; j < 5; j++)
-			params->ncorr[j] = 0x79;
-	} else {
-		gain = (target.pad[core]) | (target.pga[core] << 4) |
-			(target.txgm[core] << 8);
-
-		indx = (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ?
-			1 : 0;
-		for (i = 0; i < 9; i++)
-			if (tbl_iqcal_gainparams[indx][i][0] == gain)
-				break;
-		i = min(i, 8);
-
-		params->txgm = tbl_iqcal_gainparams[indx][i][1];
-		params->pga = tbl_iqcal_gainparams[indx][i][2];
-		params->pad = tbl_iqcal_gainparams[indx][i][3];
-		params->cal_gain = (params->txgm << 7) | (params->pga << 4) |
-					(params->pad << 2);
-		for (j = 0; j < 4; j++)
-			params->ncorr[j] = tbl_iqcal_gainparams[indx][i][4 + j];
-	}
-}
-
 /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/UpdateTxCalLadder */
 static void b43_nphy_update_tx_cal_ladder(struct b43_wldev *dev, u16 core)
 {
@@ -3258,7 +3416,7 @@
 
 	if (dev->phy.rev >= 4) {
 		avoid = nphy->hang_avoid;
-		nphy->hang_avoid = 0;
+		nphy->hang_avoid = false;
 	}
 
 	b43_ntab_read_bulk(dev, B43_NTAB16(7, 0x110), 2, save);
@@ -3368,7 +3526,7 @@
 
 			if (phy6or5x && updated[core] == 0) {
 				b43_nphy_update_tx_cal_ladder(dev, core);
-				updated[core] = 1;
+				updated[core] = true;
 			}
 
 			tmp = (params[core].ncorr[type] << 8) | 0x66;
@@ -3730,10 +3888,104 @@
 	b43_mac_enable(dev);
 }
 
+/**************************************************
+ * N-PHY init
+ **************************************************/
+
 /*
- * Init N-PHY
- * http://bcm-v4.sipsolutions.net/802.11/PHY/Init/N
+ * Upload the N-PHY tables.
+ * http://bcm-v4.sipsolutions.net/802.11/PHY/N/InitTables
  */
+static void b43_nphy_tables_init(struct b43_wldev *dev)
+{
+	if (dev->phy.rev < 3)
+		b43_nphy_rev0_1_2_tables_init(dev);
+	else
+		b43_nphy_rev3plus_tables_init(dev);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/MIMOConfig */
+static void b43_nphy_update_mimo_config(struct b43_wldev *dev, s32 preamble)
+{
+	u16 mimocfg = b43_phy_read(dev, B43_NPHY_MIMOCFG);
+
+	mimocfg |= B43_NPHY_MIMOCFG_AUTO;
+	if (preamble == 1)
+		mimocfg |= B43_NPHY_MIMOCFG_GFMIX;
+	else
+		mimocfg &= ~B43_NPHY_MIMOCFG_GFMIX;
+
+	b43_phy_write(dev, B43_NPHY_MIMOCFG, mimocfg);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/BPHYInit */
+static void b43_nphy_bphy_init(struct b43_wldev *dev)
+{
+	unsigned int i;
+	u16 val;
+
+	val = 0x1E1F;
+	for (i = 0; i < 16; i++) {
+		b43_phy_write(dev, B43_PHY_N_BMODE(0x88 + i), val);
+		val -= 0x202;
+	}
+	val = 0x3E3F;
+	for (i = 0; i < 16; i++) {
+		b43_phy_write(dev, B43_PHY_N_BMODE(0x98 + i), val);
+		val -= 0x202;
+	}
+	b43_phy_write(dev, B43_PHY_N_BMODE(0x38), 0x668);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SuperSwitchInit */
+static void b43_nphy_superswitch_init(struct b43_wldev *dev, bool init)
+{
+	if (dev->phy.rev >= 3) {
+		if (!init)
+			return;
+		if (0 /* FIXME */) {
+			b43_ntab_write(dev, B43_NTAB16(9, 2), 0x211);
+			b43_ntab_write(dev, B43_NTAB16(9, 3), 0x222);
+			b43_ntab_write(dev, B43_NTAB16(9, 8), 0x144);
+			b43_ntab_write(dev, B43_NTAB16(9, 12), 0x188);
+		}
+	} else {
+		b43_phy_write(dev, B43_NPHY_GPIO_LOOEN, 0);
+		b43_phy_write(dev, B43_NPHY_GPIO_HIOEN, 0);
+
+		switch (dev->dev->bus_type) {
+#ifdef CONFIG_B43_BCMA
+		case B43_BUS_BCMA:
+			bcma_chipco_gpio_control(&dev->dev->bdev->bus->drv_cc,
+						 0xFC00, 0xFC00);
+			break;
+#endif
+#ifdef CONFIG_B43_SSB
+		case B43_BUS_SSB:
+			ssb_chipco_gpio_control(&dev->dev->sdev->bus->chipco,
+						0xFC00, 0xFC00);
+			break;
+#endif
+		}
+
+		b43_write32(dev, B43_MMIO_MACCTL,
+			b43_read32(dev, B43_MMIO_MACCTL) &
+			~B43_MACCTL_GPOUTSMSK);
+		b43_write16(dev, B43_MMIO_GPIO_MASK,
+			b43_read16(dev, B43_MMIO_GPIO_MASK) | 0xFC00);
+		b43_write16(dev, B43_MMIO_GPIO_CONTROL,
+			b43_read16(dev, B43_MMIO_GPIO_CONTROL) & ~0xFC00);
+
+		if (init) {
+			b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO1, 0x2D8);
+			b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1, 0x301);
+			b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO2, 0x2D8);
+			b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0x301);
+		}
+	}
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/Init/N */
 int b43_phy_initn(struct b43_wldev *dev)
 {
 	struct ssb_sprom *sprom = dev->dev->bus_sprom;
@@ -3857,7 +4109,7 @@
 	tx_pwr_state = nphy->txpwrctrl;
 	b43_nphy_tx_power_ctrl(dev, false);
 	b43_nphy_tx_power_fix(dev);
-	/* TODO N PHY TX Power Control Idle TSSI */
+	b43_nphy_tx_power_ctl_idle_tssi(dev);
 	/* TODO N PHY TX Power Control Setup */
 	b43_nphy_tx_gain_table_upload(dev);
 
@@ -3928,6 +4180,91 @@
 	return 0;
 }
 
+/**************************************************
+ * Channel switching ops.
+ **************************************************/
+
+static void b43_chantab_phy_upload(struct b43_wldev *dev,
+				   const struct b43_phy_n_sfo_cfg *e)
+{
+	b43_phy_write(dev, B43_NPHY_BW1A, e->phy_bw1a);
+	b43_phy_write(dev, B43_NPHY_BW2, e->phy_bw2);
+	b43_phy_write(dev, B43_NPHY_BW3, e->phy_bw3);
+	b43_phy_write(dev, B43_NPHY_BW4, e->phy_bw4);
+	b43_phy_write(dev, B43_NPHY_BW5, e->phy_bw5);
+	b43_phy_write(dev, B43_NPHY_BW6, e->phy_bw6);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PmuSpurAvoid */
+static void b43_nphy_pmu_spur_avoid(struct b43_wldev *dev, bool avoid)
+{
+	struct bcma_drv_cc __maybe_unused *cc;
+	u32 __maybe_unused pmu_ctl;
+
+	switch (dev->dev->bus_type) {
+#ifdef CONFIG_B43_BCMA
+	case B43_BUS_BCMA:
+		cc = &dev->dev->bdev->bus->drv_cc;
+		if (dev->dev->chip_id == 43224 || dev->dev->chip_id == 43225) {
+			if (avoid) {
+				bcma_chipco_pll_write(cc, 0x0, 0x11500010);
+				bcma_chipco_pll_write(cc, 0x1, 0x000C0C06);
+				bcma_chipco_pll_write(cc, 0x2, 0x0F600a08);
+				bcma_chipco_pll_write(cc, 0x3, 0x00000000);
+				bcma_chipco_pll_write(cc, 0x4, 0x2001E920);
+				bcma_chipco_pll_write(cc, 0x5, 0x88888815);
+			} else {
+				bcma_chipco_pll_write(cc, 0x0, 0x11100010);
+				bcma_chipco_pll_write(cc, 0x1, 0x000c0c06);
+				bcma_chipco_pll_write(cc, 0x2, 0x03000a08);
+				bcma_chipco_pll_write(cc, 0x3, 0x00000000);
+				bcma_chipco_pll_write(cc, 0x4, 0x200005c0);
+				bcma_chipco_pll_write(cc, 0x5, 0x88888815);
+			}
+			pmu_ctl = BCMA_CC_PMU_CTL_PLL_UPD;
+		} else if (dev->dev->chip_id == 0x4716) {
+			if (avoid) {
+				bcma_chipco_pll_write(cc, 0x0, 0x11500060);
+				bcma_chipco_pll_write(cc, 0x1, 0x080C0C06);
+				bcma_chipco_pll_write(cc, 0x2, 0x0F600000);
+				bcma_chipco_pll_write(cc, 0x3, 0x00000000);
+				bcma_chipco_pll_write(cc, 0x4, 0x2001E924);
+				bcma_chipco_pll_write(cc, 0x5, 0x88888815);
+			} else {
+				bcma_chipco_pll_write(cc, 0x0, 0x11100060);
+				bcma_chipco_pll_write(cc, 0x1, 0x080c0c06);
+				bcma_chipco_pll_write(cc, 0x2, 0x03000000);
+				bcma_chipco_pll_write(cc, 0x3, 0x00000000);
+				bcma_chipco_pll_write(cc, 0x4, 0x200005c0);
+				bcma_chipco_pll_write(cc, 0x5, 0x88888815);
+			}
+			pmu_ctl = BCMA_CC_PMU_CTL_PLL_UPD |
+				  BCMA_CC_PMU_CTL_NOILPONW;
+		} else if (dev->dev->chip_id == 0x4322 ||
+			   dev->dev->chip_id == 0x4340 ||
+			   dev->dev->chip_id == 0x4341) {
+			bcma_chipco_pll_write(cc, 0x0, 0x11100070);
+			bcma_chipco_pll_write(cc, 0x1, 0x1014140a);
+			bcma_chipco_pll_write(cc, 0x5, 0x88888854);
+			if (avoid)
+				bcma_chipco_pll_write(cc, 0x2, 0x05201828);
+			else
+				bcma_chipco_pll_write(cc, 0x2, 0x05001828);
+			pmu_ctl = BCMA_CC_PMU_CTL_PLL_UPD;
+		} else {
+			return;
+		}
+		bcma_cc_set32(cc, BCMA_CC_PMU_CTL, pmu_ctl);
+		break;
+#endif
+#ifdef CONFIG_B43_SSB
+	case B43_BUS_SSB:
+		/* FIXME */
+		break;
+#endif
+	}
+}
+
 /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ChanspecSetup */
 static void b43_nphy_channel_setup(struct b43_wldev *dev,
 				const struct b43_phy_n_sfo_cfg *e,
@@ -3935,6 +4272,7 @@
 {
 	struct b43_phy *phy = &dev->phy;
 	struct b43_phy_n *nphy = dev->phy.n;
+	int ch = new_channel->hw_value;
 
 	u16 old_band_5ghz;
 	u32 tmp32;
@@ -3974,8 +4312,41 @@
 
 	b43_nphy_tx_lp_fbw(dev);
 
-	if (dev->phy.rev >= 3 && 0) {
-		/* TODO */
+	if (dev->phy.rev >= 3 &&
+	    dev->phy.n->spur_avoid != B43_SPUR_AVOID_DISABLE) {
+		bool avoid = false;
+		if (dev->phy.n->spur_avoid == B43_SPUR_AVOID_FORCE) {
+			avoid = true;
+		} else if (!b43_channel_type_is_40mhz(phy->channel_type)) {
+			if ((ch >= 5 && ch <= 8) || ch == 13 || ch == 14)
+				avoid = true;
+		} else { /* 40MHz */
+			if (nphy->aband_spurwar_en &&
+			    (ch == 38 || ch == 102 || ch == 118))
+				avoid = dev->dev->chip_id == 0x4716;
+		}
+
+		b43_nphy_pmu_spur_avoid(dev, avoid);
+
+		if (dev->dev->chip_id == 43222 || dev->dev->chip_id == 43224 ||
+		    dev->dev->chip_id == 43225) {
+			b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_LOW,
+				    avoid ? 0x5341 : 0x8889);
+			b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_HIGH, 0x8);
+		}
+
+		if (dev->phy.rev == 3 || dev->phy.rev == 4)
+			; /* TODO: reset PLL */
+
+		if (avoid)
+			b43_phy_set(dev, B43_NPHY_BBCFG, B43_NPHY_BBCFG_RSTRX);
+		else
+			b43_phy_mask(dev, B43_NPHY_BBCFG,
+				     ~B43_NPHY_BBCFG_RSTRX & 0xFFFF);
+
+		b43_nphy_reset_cca(dev);
+
+		/* wl sets useless phy_isspuravoid here */
 	}
 
 	b43_phy_write(dev, B43_NPHY_NDATAT_DUP40, 0x3830);
@@ -4039,6 +4410,10 @@
 	return 0;
 }
 
+/**************************************************
+ * Basic PHY ops.
+ **************************************************/
+
 static int b43_nphy_op_allocate(struct b43_wldev *dev)
 {
 	struct b43_phy_n *nphy;
@@ -4055,10 +4430,13 @@
 {
 	struct b43_phy *phy = &dev->phy;
 	struct b43_phy_n *nphy = phy->n;
+	struct ssb_sprom *sprom = dev->dev->bus_sprom;
 
 	memset(nphy, 0, sizeof(*nphy));
 
 	nphy->hang_avoid = (phy->rev == 3 || phy->rev == 4);
+	nphy->spur_avoid = (phy->rev >= 3) ?
+				B43_SPUR_AVOID_AUTO : B43_SPUR_AVOID_DISABLE;
 	nphy->gain_boost = true; /* this way we follow wl, assume it is true */
 	nphy->txrx_chain = 2; /* sth different than 0 and 1 for now */
 	nphy->phyrxchain = 3; /* to avoid b43_nphy_set_rx_core_state like wl */
@@ -4067,6 +4445,38 @@
 	 * 0x7f == 127 and we check for 128 when restoring TX pwr ctl. */
 	nphy->tx_pwr_idx[0] = 128;
 	nphy->tx_pwr_idx[1] = 128;
+
+	/* Hardware TX power control and 5GHz power gain */
+	nphy->txpwrctrl = false;
+	nphy->pwg_gain_5ghz = false;
+	if (dev->phy.rev >= 3 ||
+	    (dev->dev->board_vendor == PCI_VENDOR_ID_APPLE &&
+	     (dev->dev->core_rev == 11 || dev->dev->core_rev == 12))) {
+		nphy->txpwrctrl = true;
+		nphy->pwg_gain_5ghz = true;
+	} else if (sprom->revision >= 4) {
+		if (dev->phy.rev >= 2 &&
+		    (sprom->boardflags2_lo & B43_BFL2_TXPWRCTRL_EN)) {
+			nphy->txpwrctrl = true;
+#ifdef CONFIG_B43_SSB
+			if (dev->dev->bus_type == B43_BUS_SSB &&
+			    dev->dev->sdev->bus->bustype == SSB_BUSTYPE_PCI) {
+				struct pci_dev *pdev =
+					dev->dev->sdev->bus->host_pci;
+				if (pdev->device == 0x4328 ||
+				    pdev->device == 0x432a)
+					nphy->pwg_gain_5ghz = true;
+			}
+#endif
+		} else if (sprom->boardflags2_lo & B43_BFL2_5G_PWRGAIN) {
+			nphy->pwg_gain_5ghz = true;
+		}
+	}
+
+	if (dev->phy.rev >= 3) {
+		nphy->ipa2g_on = sprom->fem.ghz2.extpa_gain == 2;
+		nphy->ipa5g_on = sprom->fem.ghz5.extpa_gain == 2;
+	}
 }
 
 static void b43_nphy_op_free(struct b43_wldev *dev)
diff --git a/drivers/net/wireless/b43/phy_n.h b/drivers/net/wireless/b43/phy_n.h
index fbf5202..5de8f74 100644
--- a/drivers/net/wireless/b43/phy_n.h
+++ b/drivers/net/wireless/b43/phy_n.h
@@ -716,6 +716,12 @@
 
 struct b43_wldev;
 
+enum b43_nphy_spur_avoid {
+	B43_SPUR_AVOID_DISABLE,
+	B43_SPUR_AVOID_AUTO,
+	B43_SPUR_AVOID_FORCE,
+};
+
 struct b43_chanspec {
 	u16 center_freq;
 	enum nl80211_channel_type channel_type;
@@ -759,6 +765,11 @@
 	u16 locomp;
 };
 
+struct b43_phy_n_pwr_ctl_info {
+	u8 idle_tssi_2g;
+	u8 idle_tssi_5g;
+};
+
 struct b43_phy_n {
 	u8 antsel_type;
 	u8 cal_orig_pwr_idx[2];
@@ -785,12 +796,14 @@
 	u16 mphase_txcal_bestcoeffs[11];
 
 	bool txpwrctrl;
+	bool pwg_gain_5ghz;
 	u8 tx_pwr_idx[2];
 	u16 adj_pwr_tbl[84];
 	u16 txcal_bbmult;
 	u16 txiqlocal_bestc[11];
 	bool txiqlocal_coeffsvalid;
 	struct b43_phy_n_txpwrindex txpwrindex[2];
+	struct b43_phy_n_pwr_ctl_info pwr_ctl_info[2];
 	struct b43_chanspec txiqlocal_chanspec;
 
 	u8 txrx_chain;
@@ -803,6 +816,7 @@
 	u16 classifier_state;
 	u16 clip_state[2];
 
+	enum b43_nphy_spur_avoid spur_avoid;
 	bool aband_spurwar_en;
 	bool gband_spurwar_en;
 
diff --git a/drivers/net/wireless/b43/pio.c b/drivers/net/wireless/b43/pio.c
index fcff923..3533ab8 100644
--- a/drivers/net/wireless/b43/pio.c
+++ b/drivers/net/wireless/b43/pio.c
@@ -539,7 +539,7 @@
 		/* Not enough memory on the queue. */
 		err = -EBUSY;
 		ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
-		q->stopped = 1;
+		q->stopped = true;
 		goto out;
 	}
 
@@ -566,7 +566,7 @@
 	    (q->free_packet_slots == 0)) {
 		/* The queue is full. */
 		ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
-		q->stopped = 1;
+		q->stopped = true;
 	}
 
 out:
@@ -601,7 +601,7 @@
 
 	if (q->stopped) {
 		ieee80211_wake_queue(dev->wl->hw, q->queue_prio);
-		q->stopped = 0;
+		q->stopped = false;
 	}
 }
 
@@ -617,9 +617,19 @@
 	const char *err_msg = NULL;
 	struct b43_rxhdr_fw4 *rxhdr =
 		(struct b43_rxhdr_fw4 *)wl->pio_scratchspace;
+	size_t rxhdr_size = sizeof(*rxhdr);
 
 	BUILD_BUG_ON(sizeof(wl->pio_scratchspace) < sizeof(*rxhdr));
-	memset(rxhdr, 0, sizeof(*rxhdr));
+	switch (dev->fw.hdr_format) {
+	case B43_FW_HDR_410:
+	case B43_FW_HDR_351:
+		rxhdr_size -= sizeof(rxhdr->format_598) -
+			sizeof(rxhdr->format_351);
+		break;
+	case B43_FW_HDR_598:
+		break;
+	}
+	memset(rxhdr, 0, rxhdr_size);
 
 	/* Check if we have data and wait for it to get ready. */
 	if (q->rev >= 8) {
@@ -657,11 +667,11 @@
 
 	/* Get the preamble (RX header) */
 	if (q->rev >= 8) {
-		b43_block_read(dev, rxhdr, sizeof(*rxhdr),
+		b43_block_read(dev, rxhdr, rxhdr_size,
 			       q->mmio_base + B43_PIO8_RXDATA,
 			       sizeof(u32));
 	} else {
-		b43_block_read(dev, rxhdr, sizeof(*rxhdr),
+		b43_block_read(dev, rxhdr, rxhdr_size,
 			       q->mmio_base + B43_PIO_RXDATA,
 			       sizeof(u16));
 	}
diff --git a/drivers/net/wireless/b43/radio_2056.c b/drivers/net/wireless/b43/radio_2056.c
index a01f776..ce037fb 100644
--- a/drivers/net/wireless/b43/radio_2056.c
+++ b/drivers/net/wireless/b43/radio_2056.c
@@ -1572,14 +1572,14 @@
 	[B2056_SYN_PLL_XTAL5]		= { .ghz5 = 0x0077, .ghz2 = 0x0077, NOUPLOAD, },
 	[B2056_SYN_PLL_XTAL6]		= { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
 	[B2056_SYN_PLL_REFDIV]		= { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
-	[B2056_SYN_PLL_PFD]		= { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
+	[B2056_SYN_PLL_PFD]		= { .ghz5 = 0x0006, .ghz2 = 0x0006, UPLOAD, },
 	[B2056_SYN_PLL_CP1]		= { .ghz5 = 0x000f, .ghz2 = 0x000f, NOUPLOAD, },
-	[B2056_SYN_PLL_CP2]		= { .ghz5 = 0x0030, .ghz2 = 0x0030, NOUPLOAD, },
+	[B2056_SYN_PLL_CP2]		= { .ghz5 = 0x003f, .ghz2 = 0x003f, UPLOAD, },
 	[B2056_SYN_PLL_CP3]		= { .ghz5 = 0x0032, .ghz2 = 0x0032, NOUPLOAD, },
-	[B2056_SYN_PLL_LOOPFILTER1]	= { .ghz5 = 0x000d, .ghz2 = 0x000d, NOUPLOAD, },
-	[B2056_SYN_PLL_LOOPFILTER2]	= { .ghz5 = 0x000d, .ghz2 = 0x000d, NOUPLOAD, },
+	[B2056_SYN_PLL_LOOPFILTER1]	= { .ghz5 = 0x0006, .ghz2 = 0x0006, UPLOAD, },
+	[B2056_SYN_PLL_LOOPFILTER2]	= { .ghz5 = 0x0006, .ghz2 = 0x0006, UPLOAD, },
 	[B2056_SYN_PLL_LOOPFILTER3]	= { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
-	[B2056_SYN_PLL_LOOPFILTER4]	= { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
+	[B2056_SYN_PLL_LOOPFILTER4]	= { .ghz5 = 0x002b, .ghz2 = 0x002b, UPLOAD, },
 	[B2056_SYN_PLL_LOOPFILTER5]	= { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
 	[B2056_SYN_PLL_MMD1]		= { .ghz5 = 0x001c, .ghz2 = 0x001c, NOUPLOAD, },
 	[B2056_SYN_PLL_MMD2]		= { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
@@ -9055,6 +9055,21 @@
 				B2056_RX1, pts->rx, pts->rx_length);
 }
 
+void b2056_upload_syn_pll_cp2(struct b43_wldev *dev, bool ghz5)
+{
+	struct b2056_inittabs_pts *pts;
+	const struct b2056_inittab_entry *e;
+
+	if (dev->phy.rev >= ARRAY_SIZE(b2056_inittabs)) {
+		B43_WARN_ON(1);
+		return;
+	}
+	pts = &b2056_inittabs[dev->phy.rev];
+	e = &pts->syn[B2056_SYN_PLL_CP2];
+
+	b43_radio_write(dev, B2056_SYN_PLL_CP2, ghz5 ? e->ghz5 : e->ghz2);
+}
+
 const struct b43_nphy_channeltab_entry_rev3 *
 b43_nphy_get_chantabent_rev3(struct b43_wldev *dev, u16 freq)
 {
diff --git a/drivers/net/wireless/b43/radio_2056.h b/drivers/net/wireless/b43/radio_2056.h
index a7159d8..5b86673 100644
--- a/drivers/net/wireless/b43/radio_2056.h
+++ b/drivers/net/wireless/b43/radio_2056.h
@@ -1090,6 +1090,7 @@
 
 void b2056_upload_inittabs(struct b43_wldev *dev,
 			   bool ghz5, bool ignore_uploadflag);
+void b2056_upload_syn_pll_cp2(struct b43_wldev *dev, bool ghz5);
 
 /* Get the NPHY Channel Switch Table entry for a channel.
  * Returns NULL on failure to find an entry. */
diff --git a/drivers/net/wireless/b43/tables_nphy.c b/drivers/net/wireless/b43/tables_nphy.c
index 7b326f2..f7def135 100644
--- a/drivers/net/wireless/b43/tables_nphy.c
+++ b/drivers/net/wireless/b43/tables_nphy.c
@@ -2171,6 +2171,48 @@
 	0x0000, 0x0000,
 };
 
+/* volatile  tables, PHY revision >= 3 */
+
+/* indexed by antswctl2g */
+static const u16 b43_ntab_antswctl2g_r3[4][32] = {
+	{
+		0x0082, 0x0082, 0x0211, 0x0222, 0x0328,
+		0x0000, 0x0000, 0x0000, 0x0144, 0x0000,
+		0x0000, 0x0000, 0x0188, 0x0000, 0x0000,
+		0x0000, 0x0082, 0x0082, 0x0211, 0x0222,
+		0x0328, 0x0000, 0x0000, 0x0000, 0x0144,
+		0x0000, 0x0000, 0x0000, 0x0188, 0x0000,
+		0x0000, 0x0000,
+	},
+	{
+		0x0022, 0x0022, 0x0011, 0x0022, 0x0022,
+		0x0000, 0x0000, 0x0000, 0x0011, 0x0000,
+		0x0000, 0x0000, 0x0022, 0x0000, 0x0000,
+		0x0000, 0x0022, 0x0022, 0x0011, 0x0022,
+		0x0022, 0x0000, 0x0000, 0x0000, 0x0011,
+		0x0000, 0x0000, 0x0000, 0x0022, 0x0000,
+		0x0000, 0x0000,
+	},
+	{
+		0x0088, 0x0088, 0x0044, 0x0088, 0x0088,
+		0x0000, 0x0000, 0x0000, 0x0044, 0x0000,
+		0x0000, 0x0000, 0x0088, 0x0000, 0x0000,
+		0x0000, 0x0088, 0x0088, 0x0044, 0x0088,
+		0x0088, 0x0000, 0x0000, 0x0000, 0x0044,
+		0x0000, 0x0000, 0x0000, 0x0088, 0x0000,
+		0x0000, 0x0000,
+	},
+	{
+		0x0022, 0x0022, 0x0011, 0x0022, 0x0000,
+		0x0000, 0x0000, 0x0000, 0x0011, 0x0000,
+		0x0000, 0x0000, 0x0022, 0x0000, 0x0000,
+		0x03cc, 0x0022, 0x0022, 0x0011, 0x0022,
+		0x0000, 0x0000, 0x0000, 0x0000, 0x0011,
+		0x0000, 0x0000, 0x0000, 0x0022, 0x0000,
+		0x0000, 0x03cc,
+	}
+};
+
 /* TX gain tables */
 const u32 b43_ntab_tx_gain_rev0_1_2[] = {
 	0x03cc2b44, 0x03cc2b42, 0x03cc2a44, 0x03cc2a42,
@@ -2652,7 +2694,7 @@
 const s16 tbl_tx_filter_coef_rev4[7][15] = {
 	{  -377,   137,  -407,   208, -1527,
 	    956,    93,   186,    93,   230,
-	    -44,   230,    20,  -191,   201 },
+	    -44,   230,   201,  -191,   201 },
 	{   -77,    20,   -98,    49,   -93,
 	     60,    56,   111,    56,    26,
 	     -5,    26,    34,   -32,    34 },
@@ -2710,7 +2752,18 @@
 	{ 0x00C0,  6, 0xE7, 0xF9, 0xEC, 0xFB }  /* field == 0x4000 (fls 15) */
 };
 
-struct nphy_gain_ctl_workaround_entry nphy_gain_ctl_workaround[2][3] = {
+struct nphy_gain_ctl_workaround_entry nphy_gain_ctl_wa_phy6_radio11_ghz2 = {
+	{ 10, 14, 19, 27 },
+	{ -5, 6, 10, 15 },
+	{ 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA },
+	{ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 },
+	0x427E,
+	{ 0x413F, 0x413F, 0x413F, 0x413F },
+	0x007E, 0x0066, 0x1074,
+	0x18, 0x18, 0x18,
+	0x01D0, 0x5,
+};
+struct nphy_gain_ctl_workaround_entry nphy_gain_ctl_workaround[2][4] = {
 	{ /* 2GHz */
 		{ /* PHY rev 3 */
 			{ 7, 11, 16, 23 },
@@ -2734,15 +2787,26 @@
 			0x18, 0x18, 0x18,
 			0x01A1, 0x5,
 		},
-		{ /* PHY rev 5+ */
+		{ /* PHY rev 5 */
 			{ 9, 13, 18, 26 },
 			{ -3, 7, 11, 16 },
 			{ 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA },
 			{ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 },
 			0x427E, /* invalid for external LNA! */
 			{ 0x413F, 0x413F, 0x413F, 0x413F }, /* invalid for external LNA! */
-			0x1076, 0x0066, 0x106A,
-			0xC, 0xC, 0xC,
+			0x1076, 0x0066, 0x0000, /* low is invalid (the last one) */
+			0x18, 0x18, 0x18,
+			0x01D0, 0x9,
+		},
+		{ /* PHY rev 6+ */
+			{ 8, 13, 18, 25 },
+			{ -5, 6, 10, 14 },
+			{ 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA },
+			{ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 },
+			0x527E, /* invalid for external LNA! */
+			{ 0x513F, 0x513F, 0x513F, 0x513F }, /* invalid for external LNA! */
+			0x1076, 0x0066, 0x0000, /* low is invalid (the last one) */
+			0x18, 0x18, 0x18,
 			0x01D0, 0x5,
 		},
 	},
@@ -2769,7 +2833,7 @@
 			0x24, 0x24, 0x24,
 			0x0107, 25,
 		},
-		{ /* PHY rev 5+ */
+		{ /* PHY rev 5 */
 			{ 6, 10, 16, 21 },
 			{ -7, 0, 4, 8 },
 			{ 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD },
@@ -2780,6 +2844,17 @@
 			0x24, 0x24, 0x24,
 			0x00A9, 25,
 		},
+		{ /* PHY rev 6+ */
+			{ 6, 10, 16, 21 },
+			{ -7, 0, 4, 8 },
+			{ 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD },
+			{ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 },
+			0x729E,
+			{ 0x714F, 0x714F, 0x714F, 0x714F },
+			0x029E, 0x2084, 0x2086,
+			0x24, 0x24, 0x24, /* low is invalid for radio rev 11! */
+			0x00F0, 25,
+		},
 	},
 };
 
@@ -2838,9 +2913,8 @@
 		break;
 	case B43_NTAB_32BIT:
 		b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset);
-		value = b43_phy_read(dev, B43_NPHY_TABLE_DATAHI);
-		value <<= 16;
-		value |= b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
+		value = b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
+		value |= b43_phy_read(dev, B43_NPHY_TABLE_DATAHI) << 16;
 		break;
 	default:
 		B43_WARN_ON(1);
@@ -2864,6 +2938,12 @@
 	b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset);
 
 	for (i = 0; i < nr_elements; i++) {
+		/* Auto increment broken + caching issue on BCM43224? */
+		if (dev->dev->chip_id == 43224 && dev->dev->chip_rev == 1) {
+			b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
+			b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset + i);
+		}
+
 		switch (type) {
 		case B43_NTAB_8BIT:
 			*data = b43_phy_read(dev, B43_NPHY_TABLE_DATALO) & 0xFF;
@@ -2874,9 +2954,10 @@
 			data += 2;
 			break;
 		case B43_NTAB_32BIT:
-			*((u32 *)data) = b43_phy_read(dev, B43_NPHY_TABLE_DATAHI);
-			*((u32 *)data) <<= 16;
-			*((u32 *)data) |= b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
+			*((u32 *)data) =
+				b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
+			*((u32 *)data) |=
+				b43_phy_read(dev, B43_NPHY_TABLE_DATAHI) << 16;
 			data += 4;
 			break;
 		default:
@@ -2932,6 +3013,13 @@
 	b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset);
 
 	for (i = 0; i < nr_elements; i++) {
+		/* Auto increment broken + caching issue on BCM43224? */
+		if ((offset >> 10) == 9 && dev->dev->chip_id == 43224 &&
+		    dev->dev->chip_rev == 1) {
+			b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
+			b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset + i);
+		}
+
 		switch (type) {
 		case B43_NTAB_8BIT:
 			value = *data;
@@ -2999,6 +3087,8 @@
 	} while (0)
 void b43_nphy_rev3plus_tables_init(struct b43_wldev *dev)
 {
+	struct ssb_sprom *sprom = dev->dev->bus_sprom;
+
 	/* Static tables */
 	ntab_upload_r3(dev, B43_NTAB_FRAMESTRUCT_R3, b43_ntab_framestruct_r3);
 	ntab_upload_r3(dev, B43_NTAB_PILOT_R3, b43_ntab_pilot_r3);
@@ -3029,7 +3119,11 @@
 	ntab_upload_r3(dev, B43_NTAB_C1_LOFEEDTH_R3, b43_ntab_loftlt1_r3);
 
 	/* Volatile tables */
-	/* TODO */
+	if (sprom->fem.ghz2.antswlut < ARRAY_SIZE(b43_ntab_antswctl2g_r3))
+		ntab_upload_r3(dev, B43_NTAB_ANT_SW_CTL_R3,
+			       b43_ntab_antswctl2g_r3[sprom->fem.ghz2.antswlut]);
+	else
+		B43_WARN_ON(1);
 }
 
 struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent(
@@ -3037,26 +3131,67 @@
 {
 	struct nphy_gain_ctl_workaround_entry *e;
 	u8 phy_idx;
+	u8 tr_iso = ghz5 ? dev->dev->bus_sprom->fem.ghz5.tr_iso :
+			   dev->dev->bus_sprom->fem.ghz2.tr_iso;
+
+	if (!ghz5 && dev->phy.rev >= 6 && dev->phy.radio_rev == 11)
+		return &nphy_gain_ctl_wa_phy6_radio11_ghz2;
 
 	B43_WARN_ON(dev->phy.rev < 3);
-	if (dev->phy.rev >= 5)
+	if (dev->phy.rev >= 6)
+		phy_idx = 3;
+	else if (dev->phy.rev == 5)
 		phy_idx = 2;
 	else if (dev->phy.rev == 4)
 		phy_idx = 1;
 	else
 		phy_idx = 0;
-
 	e = &nphy_gain_ctl_workaround[ghz5][phy_idx];
 
-	/* Only one entry differs for external LNA, so instead making whole
-	 * table 2 times bigger, hack is here
-	 */
-	if (!ghz5 && dev->phy.rev >= 5 && ext_lna) {
-		e->rfseq_init[0] &= 0x0FFF;
-		e->rfseq_init[1] &= 0x0FFF;
-		e->rfseq_init[2] &= 0x0FFF;
-		e->rfseq_init[3] &= 0x0FFF;
-		e->init_gain &= 0x0FFF;
+	/* Some workarounds to the workarounds... */
+	if (ghz5 && dev->phy.rev >= 6) {
+		if (dev->phy.radio_rev == 11 &&
+		    !b43_channel_type_is_40mhz(dev->phy.channel_type))
+			e->cliplo_gain = 0x2d;
+	} else if (!ghz5 && dev->phy.rev >= 5) {
+		if (ext_lna) {
+			e->rfseq_init[0] &= ~0x4000;
+			e->rfseq_init[1] &= ~0x4000;
+			e->rfseq_init[2] &= ~0x4000;
+			e->rfseq_init[3] &= ~0x4000;
+			e->init_gain &= ~0x4000;
+		}
+		switch (tr_iso) {
+		case 0:
+			e->cliplo_gain = 0x0062;
+		case 1:
+			e->cliplo_gain = 0x0064;
+		case 2:
+			e->cliplo_gain = 0x006a;
+		case 3:
+			e->cliplo_gain = 0x106a;
+		case 4:
+			e->cliplo_gain = 0x106c;
+		case 5:
+			e->cliplo_gain = 0x1074;
+		case 6:
+			e->cliplo_gain = 0x107c;
+		case 7:
+			e->cliplo_gain = 0x207c;
+		default:
+			e->cliplo_gain = 0x106a;
+		}
+	} else if (ghz5 && dev->phy.rev == 4 && ext_lna) {
+		e->rfseq_init[0] &= ~0x4000;
+		e->rfseq_init[1] &= ~0x4000;
+		e->rfseq_init[2] &= ~0x4000;
+		e->rfseq_init[3] &= ~0x4000;
+		e->init_gain &= ~0x4000;
+		e->rfseq_init[0] |= 0x1000;
+		e->rfseq_init[1] |= 0x1000;
+		e->rfseq_init[2] |= 0x1000;
+		e->rfseq_init[3] |= 0x1000;
+		e->init_gain |= 0x1000;
 	}
 
 	return e;
diff --git a/drivers/net/wireless/b43/tables_nphy.h b/drivers/net/wireless/b43/tables_nphy.h
index a81696b..97038c4 100644
--- a/drivers/net/wireless/b43/tables_nphy.h
+++ b/drivers/net/wireless/b43/tables_nphy.h
@@ -126,26 +126,29 @@
 #define B43_NTAB_C1_LOFEEDTH		B43_NTAB16(0x1B, 0x1C0) /* Local Oscillator Feed Through Lookup Table Core 1 */
 #define B43_NTAB_C1_LOFEEDTH_SIZE	128
 
+/* Volatile N-PHY tables, PHY revision >= 3 */
+#define B43_NTAB_ANT_SW_CTL_R3		B43_NTAB16( 9,   0) /* antenna software control */
+
 /* Static N-PHY tables, PHY revision >= 3 */
-#define B43_NTAB_FRAMESTRUCT_R3		B43_NTAB32(10, 000) /* frame struct  */
-#define B43_NTAB_PILOT_R3		B43_NTAB16(11, 000) /* pilot  */
-#define B43_NTAB_TMAP_R3		B43_NTAB32(12, 000) /* TM AP  */
-#define B43_NTAB_INTLEVEL_R3		B43_NTAB32(13, 000) /* INT LV  */
-#define B43_NTAB_TDTRN_R3		B43_NTAB32(14, 000) /* TD TRN  */
-#define B43_NTAB_NOISEVAR0_R3		B43_NTAB32(16, 000) /* noise variance 0  */
+#define B43_NTAB_FRAMESTRUCT_R3		B43_NTAB32(10,   0) /* frame struct  */
+#define B43_NTAB_PILOT_R3		B43_NTAB16(11,   0) /* pilot  */
+#define B43_NTAB_TMAP_R3		B43_NTAB32(12,   0) /* TM AP  */
+#define B43_NTAB_INTLEVEL_R3		B43_NTAB32(13,   0) /* INT LV  */
+#define B43_NTAB_TDTRN_R3		B43_NTAB32(14,   0) /* TD TRN  */
+#define B43_NTAB_NOISEVAR0_R3		B43_NTAB32(16,   0) /* noise variance 0  */
 #define B43_NTAB_NOISEVAR1_R3		B43_NTAB32(16, 128) /* noise variance 1  */
-#define B43_NTAB_MCS_R3			B43_NTAB16(18, 000) /* MCS  */
+#define B43_NTAB_MCS_R3			B43_NTAB16(18,   0) /* MCS  */
 #define B43_NTAB_TDI20A0_R3		B43_NTAB32(19, 128) /* TDI 20/0  */
 #define B43_NTAB_TDI20A1_R3		B43_NTAB32(19, 256) /* TDI 20/1  */
 #define B43_NTAB_TDI40A0_R3		B43_NTAB32(19, 640) /* TDI 40/0  */
 #define B43_NTAB_TDI40A1_R3		B43_NTAB32(19, 768) /* TDI 40/1  */
-#define B43_NTAB_PILOTLT_R3		B43_NTAB32(20, 000) /* PLT lookup  */
-#define B43_NTAB_CHANEST_R3		B43_NTAB32(22, 000) /* channel estimate  */
-#define B43_NTAB_FRAMELT_R3		B43_NTAB8 (24, 000) /* frame lookup  */
-#define B43_NTAB_C0_ESTPLT_R3		B43_NTAB8 (26, 000) /* estimated power lookup 0  */
-#define B43_NTAB_C1_ESTPLT_R3		B43_NTAB8 (27, 000) /* estimated power lookup 1  */
-#define B43_NTAB_C0_ADJPLT_R3		B43_NTAB8 (26, 064) /* adjusted power lookup 0  */
-#define B43_NTAB_C1_ADJPLT_R3		B43_NTAB8 (27, 064) /* adjusted power lookup 1  */
+#define B43_NTAB_PILOTLT_R3		B43_NTAB32(20,   0) /* PLT lookup  */
+#define B43_NTAB_CHANEST_R3		B43_NTAB32(22,   0) /* channel estimate  */
+#define B43_NTAB_FRAMELT_R3		 B43_NTAB8(24,   0) /* frame lookup  */
+#define B43_NTAB_C0_ESTPLT_R3		 B43_NTAB8(26,   0) /* estimated power lookup 0  */
+#define B43_NTAB_C1_ESTPLT_R3		 B43_NTAB8(27,   0) /* estimated power lookup 1  */
+#define B43_NTAB_C0_ADJPLT_R3		 B43_NTAB8(26,  64) /* adjusted power lookup 0  */
+#define B43_NTAB_C1_ADJPLT_R3		 B43_NTAB8(27,  64) /* adjusted power lookup 1  */
 #define B43_NTAB_C0_GAINCTL_R3		B43_NTAB32(26, 192) /* gain control lookup 0  */
 #define B43_NTAB_C1_GAINCTL_R3		B43_NTAB32(27, 192) /* gain control lookup 1  */
 #define B43_NTAB_C0_IQLT_R3		B43_NTAB32(26, 320) /* I/Q lookup 0  */
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index 5f77cbe..2c53678 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -874,7 +874,7 @@
 			      struct ieee80211_tx_info *report,
 			      const struct b43_txstatus *status)
 {
-	bool frame_success = 1;
+	bool frame_success = true;
 	int retry_limit;
 
 	/* preserve the confiured retry limit before clearing the status
@@ -890,7 +890,7 @@
 		/* The frame was not ACKed... */
 		if (!(report->flags & IEEE80211_TX_CTL_NO_ACK)) {
 			/* ...but we expected an ACK. */
-			frame_success = 0;
+			frame_success = false;
 		}
 	}
 	if (status->frame_count == 0) {
diff --git a/drivers/net/wireless/b43legacy/b43legacy.h b/drivers/net/wireless/b43legacy/b43legacy.h
index 1d4fc9d..98e3d44 100644
--- a/drivers/net/wireless/b43legacy/b43legacy.h
+++ b/drivers/net/wireless/b43legacy/b43legacy.h
@@ -560,8 +560,16 @@
 	u8 algorithm;
 };
 
+#define B43legacy_QOS_QUEUE_NUM	4
+
 struct b43legacy_wldev;
 
+/* QOS parameters for a queue. */
+struct b43legacy_qos_params {
+	/* The QOS parameters */
+	struct ieee80211_tx_queue_params p;
+};
+
 /* Data structure for the WLAN parts (802.11 cores) of the b43legacy chip. */
 struct b43legacy_wl {
 	/* Pointer to the active wireless device on this chip */
@@ -611,6 +619,18 @@
 	bool beacon1_uploaded;
 	bool beacon_templates_virgin; /* Never wrote the templates? */
 	struct work_struct beacon_update_trigger;
+	/* The current QOS parameters for the 4 queues. */
+	struct b43legacy_qos_params qos_params[B43legacy_QOS_QUEUE_NUM];
+
+	/* Packet transmit work */
+	struct work_struct tx_work;
+
+	/* Queue of packets to be transmitted. */
+	struct sk_buff_head tx_queue[B43legacy_QOS_QUEUE_NUM];
+
+	/* Flag that implement the queues stopping. */
+	bool tx_queue_stopped[B43legacy_QOS_QUEUE_NUM];
+
 };
 
 /* Pointers to the firmware data and meta information about it. */
diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/b43legacy/dma.c
index c5535ad..f1f8bd0 100644
--- a/drivers/net/wireless/b43legacy/dma.c
+++ b/drivers/net/wireless/b43legacy/dma.c
@@ -715,7 +715,7 @@
 	ring->mmio_base = b43legacy_dmacontroller_base(type, controller_index);
 	ring->index = controller_index;
 	if (for_tx) {
-		ring->tx = 1;
+		ring->tx = true;
 		ring->current_slot = -1;
 	} else {
 		if (ring->index == 0) {
@@ -727,7 +727,6 @@
 		} else
 			B43legacy_WARN_ON(1);
 	}
-	spin_lock_init(&ring->lock);
 #ifdef CONFIG_B43LEGACY_DEBUG
 	ring->last_injected_overflow = jiffies;
 #endif
@@ -806,7 +805,7 @@
 static int b43legacy_dma_set_mask(struct b43legacy_wldev *dev, u64 mask)
 {
 	u64 orig_mask = mask;
-	bool fallback = 0;
+	bool fallback = false;
 	int err;
 
 	/* Try to set the DMA mask. If it fails, try falling back to a
@@ -820,12 +819,12 @@
 		}
 		if (mask == DMA_BIT_MASK(64)) {
 			mask = DMA_BIT_MASK(32);
-			fallback = 1;
+			fallback = true;
 			continue;
 		}
 		if (mask == DMA_BIT_MASK(32)) {
 			mask = DMA_BIT_MASK(30);
-			fallback = 1;
+			fallback = true;
 			continue;
 		}
 		b43legacyerr(dev->wl, "The machine/kernel does not support "
@@ -858,7 +857,7 @@
 #ifdef CONFIG_B43LEGACY_PIO
 		b43legacywarn(dev->wl, "DMA for this device not supported. "
 			"Falling back to PIO\n");
-		dev->__using_pio = 1;
+		dev->__using_pio = true;
 		return -EAGAIN;
 #else
 		b43legacyerr(dev->wl, "DMA for this device not supported and "
@@ -1068,7 +1067,7 @@
 	memset(meta, 0, sizeof(*meta));
 
 	meta->skb = skb;
-	meta->is_last_fragment = 1;
+	meta->is_last_fragment = true;
 
 	meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
 	/* create a bounce buffer in zone_dma on mapping failure. */
@@ -1144,10 +1143,8 @@
 {
 	struct b43legacy_dmaring *ring;
 	int err = 0;
-	unsigned long flags;
 
 	ring = priority_to_txring(dev, skb_get_queue_mapping(skb));
-	spin_lock_irqsave(&ring->lock, flags);
 	B43legacy_WARN_ON(!ring->tx);
 
 	if (unlikely(ring->stopped)) {
@@ -1157,16 +1154,14 @@
 		 * For now, just refuse the transmit. */
 		if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))
 			b43legacyerr(dev->wl, "Packet after queue stopped\n");
-		err = -ENOSPC;
-		goto out_unlock;
+		return -ENOSPC;
 	}
 
 	if (unlikely(WARN_ON(free_slots(ring) < SLOTS_PER_PACKET))) {
 		/* If we get here, we have a real error with the queue
 		 * full, but queues not stopped. */
 		b43legacyerr(dev->wl, "DMA queue overflow\n");
-		err = -ENOSPC;
-		goto out_unlock;
+		return -ENOSPC;
 	}
 
 	/* dma_tx_fragment might reallocate the skb, so invalidate pointers pointing
@@ -1176,25 +1171,23 @@
 		/* Drop this packet, as we don't have the encryption key
 		 * anymore and must not transmit it unencrypted. */
 		dev_kfree_skb_any(skb);
-		err = 0;
-		goto out_unlock;
+		return 0;
 	}
 	if (unlikely(err)) {
 		b43legacyerr(dev->wl, "DMA tx mapping failure\n");
-		goto out_unlock;
+		return err;
 	}
 	if ((free_slots(ring) < SLOTS_PER_PACKET) ||
 	    should_inject_overflow(ring)) {
 		/* This TX ring is full. */
-		ieee80211_stop_queue(dev->wl->hw, txring_to_priority(ring));
-		ring->stopped = 1;
+		unsigned int skb_mapping = skb_get_queue_mapping(skb);
+		ieee80211_stop_queue(dev->wl->hw, skb_mapping);
+		dev->wl->tx_queue_stopped[skb_mapping] = 1;
+		ring->stopped = true;
 		if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))
 			b43legacydbg(dev->wl, "Stopped TX ring %d\n",
 			       ring->index);
 	}
-out_unlock:
-	spin_unlock_irqrestore(&ring->lock, flags);
-
 	return err;
 }
 
@@ -1205,14 +1198,29 @@
 	struct b43legacy_dmadesc_meta *meta;
 	int retry_limit;
 	int slot;
+	int firstused;
 
 	ring = parse_cookie(dev, status->cookie, &slot);
 	if (unlikely(!ring))
 		return;
-	B43legacy_WARN_ON(!irqs_disabled());
-	spin_lock(&ring->lock);
-
 	B43legacy_WARN_ON(!ring->tx);
+
+	/* Sanity check: TX packets are processed in-order on one ring.
+	 * Check if the slot deduced from the cookie really is the first
+	 * used slot. */
+	firstused = ring->current_slot - ring->used_slots + 1;
+	if (firstused < 0)
+		firstused = ring->nr_slots + firstused;
+	if (unlikely(slot != firstused)) {
+		/* This possibly is a firmware bug and will result in
+		 * malfunction, memory leaks and/or stall of DMA functionality.
+		 */
+		b43legacydbg(dev->wl, "Out of order TX status report on DMA "
+			     "ring %d. Expected %d, but got %d\n",
+			     ring->index, firstused, slot);
+		return;
+	}
+
 	while (1) {
 		B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
 		op32_idx2desc(ring, slot, &meta);
@@ -1285,14 +1293,21 @@
 	dev->stats.last_tx = jiffies;
 	if (ring->stopped) {
 		B43legacy_WARN_ON(free_slots(ring) < SLOTS_PER_PACKET);
-		ieee80211_wake_queue(dev->wl->hw, txring_to_priority(ring));
-		ring->stopped = 0;
-		if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))
-			b43legacydbg(dev->wl, "Woke up TX ring %d\n",
-			       ring->index);
+		ring->stopped = false;
 	}
 
-	spin_unlock(&ring->lock);
+	if (dev->wl->tx_queue_stopped[ring->queue_prio]) {
+		dev->wl->tx_queue_stopped[ring->queue_prio] = 0;
+	} else {
+		/* If the driver queue is running wake the corresponding
+		 * mac80211 queue. */
+		ieee80211_wake_queue(dev->wl->hw, ring->queue_prio);
+		if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))
+			b43legacydbg(dev->wl, "Woke up TX ring %d\n",
+				     ring->index);
+	}
+	/* Add work to the queue. */
+	ieee80211_queue_work(dev->wl->hw, &dev->wl->tx_work);
 }
 
 static void dma_rx(struct b43legacy_dmaring *ring,
@@ -1415,22 +1430,14 @@
 
 static void b43legacy_dma_tx_suspend_ring(struct b43legacy_dmaring *ring)
 {
-	unsigned long flags;
-
-	spin_lock_irqsave(&ring->lock, flags);
 	B43legacy_WARN_ON(!ring->tx);
 	op32_tx_suspend(ring);
-	spin_unlock_irqrestore(&ring->lock, flags);
 }
 
 static void b43legacy_dma_tx_resume_ring(struct b43legacy_dmaring *ring)
 {
-	unsigned long flags;
-
-	spin_lock_irqsave(&ring->lock, flags);
 	B43legacy_WARN_ON(!ring->tx);
 	op32_tx_resume(ring);
-	spin_unlock_irqrestore(&ring->lock, flags);
 }
 
 void b43legacy_dma_tx_suspend(struct b43legacy_wldev *dev)
diff --git a/drivers/net/wireless/b43legacy/dma.h b/drivers/net/wireless/b43legacy/dma.h
index 504a587..c3282f9 100644
--- a/drivers/net/wireless/b43legacy/dma.h
+++ b/drivers/net/wireless/b43legacy/dma.h
@@ -150,8 +150,9 @@
 	enum b43legacy_dmatype type;
 	/* Boolean. Is this ring stopped at ieee80211 level? */
 	bool stopped;
-	/* Lock, only used for TX. */
-	spinlock_t lock;
+	/* The QOS priority assigned to this ring. Only used for TX rings.
+	 * This is the mac80211 "queue" value. */
+	u8 queue_prio;
 	struct b43legacy_wldev *dev;
 #ifdef CONFIG_B43LEGACY_DEBUG
 	/* Maximum number of used slots. */
diff --git a/drivers/net/wireless/b43legacy/leds.c b/drivers/net/wireless/b43legacy/leds.c
index 2f1bfdc..fd45653 100644
--- a/drivers/net/wireless/b43legacy/leds.c
+++ b/drivers/net/wireless/b43legacy/leds.c
@@ -203,11 +203,11 @@
 		if (sprom[i] == 0xFF) {
 			/* There is no LED information in the SPROM
 			 * for this LED. Hardcode it here. */
-			activelow = 0;
+			activelow = false;
 			switch (i) {
 			case 0:
 				behaviour = B43legacy_LED_ACTIVITY;
-				activelow = 1;
+				activelow = true;
 				if (bus->boardinfo.vendor == PCI_VENDOR_ID_COMPAQ)
 					behaviour = B43legacy_LED_RADIO_ALL;
 				break;
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index 20f0243..75e70bc 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -722,9 +722,9 @@
 	macctl &= ~B43legacy_MACCTL_GMODE;
 	if (flags & B43legacy_TMSLOW_GMODE) {
 		macctl |= B43legacy_MACCTL_GMODE;
-		dev->phy.gmode = 1;
+		dev->phy.gmode = true;
 	} else
-		dev->phy.gmode = 0;
+		dev->phy.gmode = false;
 	macctl |= B43legacy_MACCTL_IHR_ENABLED;
 	b43legacy_write32(dev, B43legacy_MMIO_MACCTL, macctl);
 }
@@ -811,7 +811,7 @@
 	if (dev->noisecalc.calculation_running)
 		return;
 	dev->noisecalc.channel_at_start = dev->phy.channel;
-	dev->noisecalc.calculation_running = 1;
+	dev->noisecalc.calculation_running = true;
 	dev->noisecalc.nr_samples = 0;
 
 	b43legacy_generate_noise_sample(dev);
@@ -873,7 +873,7 @@
 
 		dev->stats.link_noise = average;
 drop_calculation:
-		dev->noisecalc.calculation_running = 0;
+		dev->noisecalc.calculation_running = false;
 		return;
 	}
 generate_new:
@@ -889,7 +889,7 @@
 			b43legacy_power_saving_ctl_bits(dev, -1, -1);
 	}
 	if (b43legacy_is_mode(dev->wl, NL80211_IFTYPE_ADHOC))
-		dev->dfq_valid = 1;
+		dev->dfq_valid = true;
 }
 
 static void handle_irq_atim_end(struct b43legacy_wldev *dev)
@@ -898,7 +898,7 @@
 		b43legacy_write32(dev, B43legacy_MMIO_MACCMD,
 				  b43legacy_read32(dev, B43legacy_MMIO_MACCMD)
 				  | B43legacy_MACCMD_DFQ_VALID);
-		dev->dfq_valid = 0;
+		dev->dfq_valid = false;
 	}
 }
 
@@ -971,7 +971,7 @@
 	unsigned int i, len, variable_len;
 	const struct ieee80211_mgmt *bcn;
 	const u8 *ie;
-	bool tim_found = 0;
+	bool tim_found = false;
 	unsigned int rate;
 	u16 ctl;
 	int antenna;
@@ -1019,7 +1019,7 @@
 			/* A valid TIM is at least 4 bytes long. */
 			if (ie_len < 4)
 				break;
-			tim_found = 1;
+			tim_found = true;
 
 			tim_position = sizeof(struct b43legacy_plcp_hdr6);
 			tim_position += offsetof(struct ieee80211_mgmt,
@@ -1172,7 +1172,7 @@
 	 *        but we don't use that feature anyway. */
 	b43legacy_write_probe_resp_template(dev, 0x268, 0x4A,
 				      &__b43legacy_ratetable[3]);
-	wl->beacon0_uploaded = 1;
+	wl->beacon0_uploaded = true;
 }
 
 static void b43legacy_upload_beacon1(struct b43legacy_wldev *dev)
@@ -1182,7 +1182,7 @@
 	if (wl->beacon1_uploaded)
 		return;
 	b43legacy_write_beacon_template(dev, 0x468, 0x1A);
-	wl->beacon1_uploaded = 1;
+	wl->beacon1_uploaded = true;
 }
 
 static void handle_irq_beacon(struct b43legacy_wldev *dev)
@@ -1212,7 +1212,7 @@
 	if (unlikely(wl->beacon_templates_virgin)) {
 		/* We never uploaded a beacon before.
 		 * Upload both templates now, but only mark one valid. */
-		wl->beacon_templates_virgin = 0;
+		wl->beacon_templates_virgin = false;
 		b43legacy_upload_beacon0(dev);
 		b43legacy_upload_beacon1(dev);
 		cmd = b43legacy_read32(dev, B43legacy_MMIO_MACCMD);
@@ -1275,8 +1275,8 @@
 	if (wl->current_beacon)
 		dev_kfree_skb_any(wl->current_beacon);
 	wl->current_beacon = beacon;
-	wl->beacon0_uploaded = 0;
-	wl->beacon1_uploaded = 0;
+	wl->beacon0_uploaded = false;
+	wl->beacon1_uploaded = false;
 	ieee80211_queue_work(wl->hw, &wl->beacon_update_trigger);
 }
 
@@ -2440,30 +2440,64 @@
 	return err;
 }
 
+static void b43legacy_tx_work(struct work_struct *work)
+{
+	struct b43legacy_wl *wl = container_of(work, struct b43legacy_wl,
+				  tx_work);
+	struct b43legacy_wldev *dev;
+	struct sk_buff *skb;
+	int queue_num;
+	int err = 0;
+
+	mutex_lock(&wl->mutex);
+	dev = wl->current_dev;
+	if (unlikely(!dev || b43legacy_status(dev) < B43legacy_STAT_STARTED)) {
+		mutex_unlock(&wl->mutex);
+		return;
+	}
+
+	for (queue_num = 0; queue_num < B43legacy_QOS_QUEUE_NUM; queue_num++) {
+		while (skb_queue_len(&wl->tx_queue[queue_num])) {
+			skb = skb_dequeue(&wl->tx_queue[queue_num]);
+			if (b43legacy_using_pio(dev))
+				err = b43legacy_pio_tx(dev, skb);
+			else
+				err = b43legacy_dma_tx(dev, skb);
+			if (err == -ENOSPC) {
+				wl->tx_queue_stopped[queue_num] = 1;
+				ieee80211_stop_queue(wl->hw, queue_num);
+				skb_queue_head(&wl->tx_queue[queue_num], skb);
+				break;
+			}
+			if (unlikely(err))
+				dev_kfree_skb(skb); /* Drop it */
+			err = 0;
+		}
+
+		if (!err)
+			wl->tx_queue_stopped[queue_num] = 0;
+	}
+
+	mutex_unlock(&wl->mutex);
+}
+
 static void b43legacy_op_tx(struct ieee80211_hw *hw,
 			    struct sk_buff *skb)
 {
 	struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
-	struct b43legacy_wldev *dev = wl->current_dev;
-	int err = -ENODEV;
-	unsigned long flags;
 
-	if (unlikely(!dev))
-		goto out;
-	if (unlikely(b43legacy_status(dev) < B43legacy_STAT_STARTED))
-		goto out;
-	/* DMA-TX is done without a global lock. */
-	if (b43legacy_using_pio(dev)) {
-		spin_lock_irqsave(&wl->irq_lock, flags);
-		err = b43legacy_pio_tx(dev, skb);
-		spin_unlock_irqrestore(&wl->irq_lock, flags);
-	} else
-		err = b43legacy_dma_tx(dev, skb);
-out:
-	if (unlikely(err)) {
-		/* Drop the packet. */
+	if (unlikely(skb->len < 2 + 2 + 6)) {
+		/* Too short, this can't be a valid frame. */
 		dev_kfree_skb_any(skb);
+		return;
 	}
+	B43legacy_WARN_ON(skb_shinfo(skb)->nr_frags);
+
+	skb_queue_tail(&wl->tx_queue[skb->queue_mapping], skb);
+	if (!wl->tx_queue_stopped[skb->queue_mapping])
+		ieee80211_queue_work(wl->hw, &wl->tx_work);
+	else
+		ieee80211_stop_queue(wl->hw, skb->queue_mapping);
 }
 
 static int b43legacy_op_conf_tx(struct ieee80211_hw *hw,
@@ -2510,7 +2544,7 @@
 		if (d->phy.possible_phymodes & phymode) {
 			/* Ok, this device supports the PHY-mode.
 			 * Set the gmode bit. */
-			*gmode = 1;
+			*gmode = true;
 			*dev = d;
 
 			return 0;
@@ -2546,7 +2580,7 @@
 	struct b43legacy_wldev *uninitialized_var(up_dev);
 	struct b43legacy_wldev *down_dev;
 	int err;
-	bool gmode = 0;
+	bool gmode = false;
 	int prev_status;
 
 	err = find_wldev_for_phymode(wl, new_mode, &up_dev, &gmode);
@@ -2879,6 +2913,7 @@
 {
 	struct b43legacy_wl *wl = dev->wl;
 	unsigned long flags;
+	int queue_num;
 
 	if (b43legacy_status(dev) < B43legacy_STAT_STARTED)
 		return;
@@ -2898,11 +2933,16 @@
 	/* Must unlock as it would otherwise deadlock. No races here.
 	 * Cancel the possibly running self-rearming periodic work. */
 	cancel_delayed_work_sync(&dev->periodic_work);
+	cancel_work_sync(&wl->tx_work);
 	mutex_lock(&wl->mutex);
 
-	ieee80211_stop_queues(wl->hw); /* FIXME this could cause a deadlock */
+	/* Drain all TX queues. */
+	for (queue_num = 0; queue_num < B43legacy_QOS_QUEUE_NUM; queue_num++) {
+		while (skb_queue_len(&wl->tx_queue[queue_num]))
+			dev_kfree_skb(skb_dequeue(&wl->tx_queue[queue_num]));
+	}
 
-	b43legacy_mac_suspend(dev);
+b43legacy_mac_suspend(dev);
 	free_irq(dev->dev->irq, dev);
 	b43legacydbg(wl, "Wireless interface stopped\n");
 }
@@ -3044,12 +3084,12 @@
 
 	/* Assume the radio is enabled. If it's not enabled, the state will
 	 * immediately get fixed on the first periodic work run. */
-	dev->radio_hw_enable = 1;
+	dev->radio_hw_enable = true;
 
 	phy->savedpctlreg = 0xFFFF;
-	phy->aci_enable = 0;
-	phy->aci_wlan_automatic = 0;
-	phy->aci_hw_rssi = 0;
+	phy->aci_enable = false;
+	phy->aci_wlan_automatic = false;
+	phy->aci_hw_rssi = false;
 
 	lo = phy->_lo_pairs;
 	if (lo)
@@ -3081,7 +3121,7 @@
 static void setup_struct_wldev_for_init(struct b43legacy_wldev *dev)
 {
 	/* Flags */
-	dev->dfq_valid = 0;
+	dev->dfq_valid = false;
 
 	/* Stats */
 	memset(&dev->stats, 0, sizeof(dev->stats));
@@ -3187,9 +3227,9 @@
 	phy->lofcal = 0xFFFF;
 	phy->initval = 0xFFFF;
 
-	phy->aci_enable = 0;
-	phy->aci_wlan_automatic = 0;
-	phy->aci_hw_rssi = 0;
+	phy->aci_enable = false;
+	phy->aci_wlan_automatic = false;
+	phy->aci_hw_rssi = false;
 
 	phy->antenna_diversity = 0xFFFF;
 	memset(phy->minlowsig, 0xFF, sizeof(phy->minlowsig));
@@ -3355,7 +3395,7 @@
 	b43legacydbg(wl, "Adding Interface type %d\n", vif->type);
 
 	dev = wl->current_dev;
-	wl->operating = 1;
+	wl->operating = true;
 	wl->vif = vif;
 	wl->if_type = vif->type;
 	memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
@@ -3389,7 +3429,7 @@
 	B43legacy_WARN_ON(wl->vif != vif);
 	wl->vif = NULL;
 
-	wl->operating = 0;
+	wl->operating = false;
 
 	spin_lock_irqsave(&wl->irq_lock, flags);
 	b43legacy_adjust_opmode(dev);
@@ -3413,10 +3453,10 @@
 	memset(wl->bssid, 0, ETH_ALEN);
 	memset(wl->mac_addr, 0, ETH_ALEN);
 	wl->filter_flags = 0;
-	wl->beacon0_uploaded = 0;
-	wl->beacon1_uploaded = 0;
-	wl->beacon_templates_virgin = 1;
-	wl->radio_enabled = 1;
+	wl->beacon0_uploaded = false;
+	wl->beacon1_uploaded = false;
+	wl->beacon_templates_virgin = true;
+	wl->radio_enabled = true;
 
 	mutex_lock(&wl->mutex);
 
@@ -3455,7 +3495,7 @@
 	if (b43legacy_status(dev) >= B43legacy_STAT_STARTED)
 		b43legacy_wireless_core_stop(dev);
 	b43legacy_wireless_core_exit(dev);
-	wl->radio_enabled = 0;
+	wl->radio_enabled = false;
 	mutex_unlock(&wl->mutex);
 }
 
@@ -3614,7 +3654,7 @@
 		have_bphy = 1;
 
 	dev->phy.gmode = (have_gphy || have_bphy);
-	dev->phy.radio_on = 1;
+	dev->phy.radio_on = true;
 	tmp = dev->phy.gmode ? B43legacy_TMSLOW_GMODE : 0;
 	b43legacy_wireless_core_reset(dev, tmp);
 
@@ -3705,7 +3745,7 @@
 		     (void (*)(unsigned long))b43legacy_interrupt_tasklet,
 		     (unsigned long)wldev);
 	if (modparam_pio)
-		wldev->__using_pio = 1;
+		wldev->__using_pio = true;
 	INIT_LIST_HEAD(&wldev->list);
 
 	err = b43legacy_wireless_core_attach(wldev);
@@ -3748,6 +3788,7 @@
 	struct ieee80211_hw *hw;
 	struct b43legacy_wl *wl;
 	int err = -ENOMEM;
+	int queue_num;
 
 	b43legacy_sprom_fixup(dev->bus);
 
@@ -3782,6 +3823,13 @@
 	mutex_init(&wl->mutex);
 	INIT_LIST_HEAD(&wl->devlist);
 	INIT_WORK(&wl->beacon_update_trigger, b43legacy_beacon_update_trigger_work);
+	INIT_WORK(&wl->tx_work, b43legacy_tx_work);
+
+	/* Initialize queues and flags. */
+	for (queue_num = 0; queue_num < B43legacy_QOS_QUEUE_NUM; queue_num++) {
+		skb_queue_head_init(&wl->tx_queue[queue_num]);
+		wl->tx_queue_stopped[queue_num] = 0;
+	}
 
 	ssb_set_devtypedata(dev, wl);
 	b43legacyinfo(wl, "Broadcom %04X WLAN found (core revision %u)\n",
diff --git a/drivers/net/wireless/b43legacy/radio.c b/drivers/net/wireless/b43legacy/radio.c
index 475eb14..fcbafcd 100644
--- a/drivers/net/wireless/b43legacy/radio.c
+++ b/drivers/net/wireless/b43legacy/radio.c
@@ -1067,7 +1067,7 @@
 		if (b43legacy_phy_read(dev, 0x0033) & 0x0800)
 			break;
 
-		phy->aci_enable = 1;
+		phy->aci_enable = true;
 
 		phy_stacksave(B43legacy_PHY_RADIO_BITFIELD);
 		phy_stacksave(B43legacy_PHY_G_CRS);
@@ -1279,7 +1279,7 @@
 		if (!(b43legacy_phy_read(dev, 0x0033) & 0x0800))
 			break;
 
-		phy->aci_enable = 0;
+		phy->aci_enable = false;
 
 		phy_stackrestore(B43legacy_PHY_RADIO_BITFIELD);
 		phy_stackrestore(B43legacy_PHY_G_CRS);
@@ -1346,10 +1346,10 @@
 	    (phy->rev == 0) || (!phy->gmode))
 		return -ENODEV;
 
-	phy->aci_wlan_automatic = 0;
+	phy->aci_wlan_automatic = false;
 	switch (mode) {
 	case B43legacy_RADIO_INTERFMODE_AUTOWLAN:
-		phy->aci_wlan_automatic = 1;
+		phy->aci_wlan_automatic = true;
 		if (phy->aci_enable)
 			mode = B43legacy_RADIO_INTERFMODE_MANUALWLAN;
 		else
@@ -1371,8 +1371,8 @@
 								currentmode);
 
 	if (mode == B43legacy_RADIO_INTERFMODE_NONE) {
-		phy->aci_enable = 0;
-		phy->aci_hw_rssi = 0;
+		phy->aci_enable = false;
+		phy->aci_hw_rssi = false;
 	} else
 		b43legacy_radio_interference_mitigation_enable(dev, mode);
 	phy->interfmode = mode;
@@ -2102,7 +2102,7 @@
 					    phy->radio_off_context.rfover);
 			b43legacy_phy_write(dev, B43legacy_PHY_RFOVERVAL,
 					    phy->radio_off_context.rfoverval);
-			phy->radio_off_context.valid = 0;
+			phy->radio_off_context.valid = false;
 		}
 		channel = phy->channel;
 		err = b43legacy_radio_selectchannel(dev,
@@ -2113,7 +2113,7 @@
 	default:
 		B43legacy_BUG_ON(1);
 	}
-	phy->radio_on = 1;
+	phy->radio_on = true;
 }
 
 void b43legacy_radio_turn_off(struct b43legacy_wldev *dev, bool force)
@@ -2131,14 +2131,14 @@
 		if (!force) {
 			phy->radio_off_context.rfover = rfover;
 			phy->radio_off_context.rfoverval = rfoverval;
-			phy->radio_off_context.valid = 1;
+			phy->radio_off_context.valid = true;
 		}
 		b43legacy_phy_write(dev, B43legacy_PHY_RFOVER, rfover | 0x008C);
 		b43legacy_phy_write(dev, B43legacy_PHY_RFOVERVAL,
 				    rfoverval & 0xFF73);
 	} else
 		b43legacy_phy_write(dev, 0x0015, 0xAA00);
-	phy->radio_on = 0;
+	phy->radio_on = false;
 	b43legacydbg(dev->wl, "Radio initialized\n");
 }
 
diff --git a/drivers/net/wireless/brcm80211/Kconfig b/drivers/net/wireless/brcm80211/Kconfig
index 2069fc8..cd6375d 100644
--- a/drivers/net/wireless/brcm80211/Kconfig
+++ b/drivers/net/wireless/brcm80211/Kconfig
@@ -3,9 +3,8 @@
 
 config BRCMSMAC
 	tristate "Broadcom IEEE802.11n PCIe SoftMAC WLAN driver"
-	depends on PCI
 	depends on MAC80211
-	depends on BCMA=n
+	depends on BCMA
 	select BRCMUTIL
 	select FW_LOADER
 	select CRC_CCITT
@@ -18,16 +17,26 @@
 
 config BRCMFMAC
 	tristate "Broadcom IEEE802.11n embedded FullMAC WLAN driver"
-	depends on MMC
 	depends on CFG80211
 	select BRCMUTIL
-	select FW_LOADER
 	---help---
 	  This module adds support for embedded wireless adapters based on
-	  Broadcom IEEE802.11n FullMAC chipsets.  This driver uses the kernel's
-	  wireless extensions subsystem.  If you choose to build a module,
+	  Broadcom IEEE802.11n FullMAC chipsets. It has to work with at least
+	  one of the bus interface support. If you choose to build a module,
 	  it'll be called brcmfmac.ko.
 
+config BRCMFMAC_SDIO
+	bool "SDIO bus interface support for FullMAC"
+	depends on MMC
+	depends on BRCMFMAC
+	select FW_LOADER
+	default y
+	---help---
+	  This option enables the SDIO bus interface support for Broadcom
+	  FullMAC WLAN driver.
+	  Say Y if you want to use brcmfmac for a compatible SDIO interface
+	  wireless card.
+
 config BRCMDBG
 	bool "Broadcom driver debug functions"
 	depends on BRCMSMAC || BRCMFMAC
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
index b44e309..9ca9ea1 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/Makefile
+++ b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
@@ -19,15 +19,16 @@
 	-Idrivers/net/wireless/brcm80211/brcmfmac	\
 	-Idrivers/net/wireless/brcm80211/include
 
-DHDOFILES = \
-	wl_cfg80211.o \
-	dhd_cdc.o \
-	dhd_common.o \
-	dhd_sdio.o	\
-	dhd_linux.o \
-	bcmsdh.o \
-	bcmsdh_sdmmc.o
-
 obj-$(CONFIG_BRCMFMAC) += brcmfmac.o
-brcmfmac-objs += $(DHDOFILES)
+brcmfmac-objs += \
+		wl_cfg80211.o \
+		dhd_cdc.o \
+		dhd_common.o \
+		dhd_linux.o
+brcmfmac-$(CONFIG_BRCMFMAC_SDIO) += \
+		dhd_sdio.o \
+		bcmsdh.o \
+		bcmsdh_sdmmc.o \
+		sdio_chip.o
+
 ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmchip.h b/drivers/net/wireless/brcm80211/brcmfmac/bcmchip.h
deleted file mode 100644
index d7d3afd..0000000
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmchip.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (c) 2011 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _bcmchip_h_
-#define _bcmchip_h_
-
-/* bcm4329 */
-/* SDIO device core, ID 0x829 */
-#define BCM4329_CORE_BUS_BASE		0x18011000
-/* internal memory core, ID 0x80e */
-#define BCM4329_CORE_SOCRAM_BASE	0x18003000
-/* ARM Cortex M3 core, ID 0x82a */
-#define BCM4329_CORE_ARM_BASE		0x18002000
-#define BCM4329_RAMSIZE			0x48000
-/* firmware name */
-#define BCM4329_FW_NAME			"brcm/bcm4329-fullmac-4.bin"
-#define BCM4329_NV_NAME			"brcm/bcm4329-fullmac-4.txt"
-
-#endif				/* _bcmchip_h_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
index 89ff94d..4bc8d25 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
@@ -31,7 +31,6 @@
 #include <brcmu_utils.h>
 #include <brcmu_wifi.h>
 #include <soc.h>
-#include "dhd.h"
 #include "dhd_bus.h"
 #include "dhd_dbg.h"
 #include "sdio_host.h"
@@ -51,12 +50,18 @@
 	sdio_claim_host(func);
 }
 
+/* dummy handler for SDIO function 2 interrupt */
+static void brcmf_sdioh_dummy_irq_handler(struct sdio_func *func)
+{
+}
+
 int brcmf_sdcard_intr_reg(struct brcmf_sdio_dev *sdiodev)
 {
 	brcmf_dbg(TRACE, "Entering\n");
 
 	sdio_claim_host(sdiodev->func[1]);
 	sdio_claim_irq(sdiodev->func[1], brcmf_sdioh_irqhandler);
+	sdio_claim_irq(sdiodev->func[2], brcmf_sdioh_dummy_irq_handler);
 	sdio_release_host(sdiodev->func[1]);
 
 	return 0;
@@ -67,6 +72,7 @@
 	brcmf_dbg(TRACE, "Entering\n");
 
 	sdio_claim_host(sdiodev->func[1]);
+	sdio_release_irq(sdiodev->func[2]);
 	sdio_release_irq(sdiodev->func[1]);
 	sdio_release_host(sdiodev->func[1]);
 
@@ -222,19 +228,12 @@
 	return sdiodev->regfail;
 }
 
-int
-brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
-		      uint flags,
-		      u8 *buf, uint nbytes, struct sk_buff *pkt)
+static int brcmf_sdcard_recv_prepare(struct brcmf_sdio_dev *sdiodev, uint fn,
+				     uint flags, uint width, u32 *addr)
 {
-	int status;
-	uint incr_fix;
-	uint width;
-	uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK;
+	uint bar0 = *addr & ~SBSDIO_SB_OFT_ADDR_MASK;
 	int err = 0;
 
-	brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n", fn, addr, nbytes);
-
 	/* Async not implemented yet */
 	if (flags & SDIO_REQ_ASYNC)
 		return -ENOTSUPP;
@@ -247,29 +246,114 @@
 		sdiodev->sbwad = bar0;
 	}
 
-	addr &= SBSDIO_SB_OFT_ADDR_MASK;
+	*addr &= SBSDIO_SB_OFT_ADDR_MASK;
+
+	if (width == 4)
+		*addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+	return 0;
+}
+
+int
+brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+		      uint flags, u8 *buf, uint nbytes)
+{
+	struct sk_buff *mypkt;
+	int err;
+
+	mypkt = brcmu_pkt_buf_get_skb(nbytes);
+	if (!mypkt) {
+		brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n",
+			  nbytes);
+		return -EIO;
+	}
+
+	err = brcmf_sdcard_recv_pkt(sdiodev, addr, fn, flags, mypkt);
+	if (!err)
+		memcpy(buf, mypkt->data, nbytes);
+
+	brcmu_pkt_buf_free_skb(mypkt);
+	return err;
+}
+
+int
+brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+		      uint flags, struct sk_buff *pkt)
+{
+	uint incr_fix;
+	uint width;
+	int err = 0;
+
+	brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n",
+		  fn, addr, pkt->len);
+
+	width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
+	err = brcmf_sdcard_recv_prepare(sdiodev, fn, flags, width, &addr);
+	if (err)
+		return err;
 
 	incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
+	err = brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_READ,
+					 fn, addr, pkt);
+
+	return err;
+}
+
+int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+			    uint flags, struct sk_buff_head *pktq)
+{
+	uint incr_fix;
+	uint width;
+	int err = 0;
+
+	brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n",
+		  fn, addr, pktq->qlen);
+
 	width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
-	if (width == 4)
-		addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+	err = brcmf_sdcard_recv_prepare(sdiodev, fn, flags, width, &addr);
+	if (err)
+		return err;
 
-	status = brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_READ,
-					    fn, addr, width, nbytes, buf, pkt);
+	incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
+	err = brcmf_sdioh_request_chain(sdiodev, incr_fix, SDIOH_READ, fn, addr,
+					pktq);
 
-	return status;
+	return err;
 }
 
 int
 brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
-		      uint flags, u8 *buf, uint nbytes, struct sk_buff *pkt)
+		      uint flags, u8 *buf, uint nbytes)
+{
+	struct sk_buff *mypkt;
+	int err;
+
+	mypkt = brcmu_pkt_buf_get_skb(nbytes);
+	if (!mypkt) {
+		brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n",
+			  nbytes);
+		return -EIO;
+	}
+
+	memcpy(mypkt->data, buf, nbytes);
+	err = brcmf_sdcard_send_pkt(sdiodev, addr, fn, flags, mypkt);
+
+	brcmu_pkt_buf_free_skb(mypkt);
+	return err;
+
+}
+
+int
+brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+		      uint flags, struct sk_buff *pkt)
 {
 	uint incr_fix;
 	uint width;
 	uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK;
 	int err = 0;
 
-	brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n", fn, addr, nbytes);
+	brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n",
+		  fn, addr, pkt->len);
 
 	/* Async not implemented yet */
 	if (flags & SDIO_REQ_ASYNC)
@@ -291,18 +375,39 @@
 		addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
 
 	return brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_WRITE, fn,
-					  addr, width, nbytes, buf, pkt);
+					  addr, pkt);
 }
 
 int brcmf_sdcard_rwdata(struct brcmf_sdio_dev *sdiodev, uint rw, u32 addr,
 			u8 *buf, uint nbytes)
 {
+	struct sk_buff *mypkt;
+	bool write = rw ? SDIOH_WRITE : SDIOH_READ;
+	int err;
+
 	addr &= SBSDIO_SB_OFT_ADDR_MASK;
 	addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
 
-	return brcmf_sdioh_request_buffer(sdiodev, SDIOH_DATA_INC,
-		(rw ? SDIOH_WRITE : SDIOH_READ), SDIO_FUNC_1,
-		addr, 4, nbytes, buf, NULL);
+	mypkt = brcmu_pkt_buf_get_skb(nbytes);
+	if (!mypkt) {
+		brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n",
+			  nbytes);
+		return -EIO;
+	}
+
+	/* For a write, copy the buffer data into the packet. */
+	if (write)
+		memcpy(mypkt->data, buf, nbytes);
+
+	err = brcmf_sdioh_request_buffer(sdiodev, SDIOH_DATA_INC, write,
+					 SDIO_FUNC_1, addr, mypkt);
+
+	/* For a read, copy the packet data back to the buffer. */
+	if (!err && !write)
+		memcpy(buf, mypkt->data, nbytes);
+
+	brcmu_pkt_buf_free_skb(mypkt);
+	return err;
 }
 
 int brcmf_sdcard_abort(struct brcmf_sdio_dev *sdiodev, uint fn)
@@ -333,7 +438,7 @@
 	sdiodev->sbwad = SI_ENUM_BASE;
 
 	/* try to attach to the target device */
-	sdiodev->bus = brcmf_sdbrcm_probe(0, 0, 0, 0, regs, sdiodev);
+	sdiodev->bus = brcmf_sdbrcm_probe(regs, sdiodev);
 	if (!sdiodev->bus) {
 		brcmf_dbg(ERROR, "device attach failed\n");
 		ret = -ENODEV;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
index bbaeb2d..9b8c0ed 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
@@ -31,15 +31,15 @@
 #include <brcmu_utils.h>
 #include <brcmu_wifi.h>
 #include "sdio_host.h"
-#include "dhd.h"
 #include "dhd_dbg.h"
-#include "wl_cfg80211.h"
+#include "dhd_bus.h"
 
 #define SDIO_VENDOR_ID_BROADCOM		0x02d0
 
 #define DMA_ALIGN_MASK	0x03
 
 #define SDIO_DEVICE_ID_BROADCOM_4329	0x4329
+#define SDIO_DEVICE_ID_BROADCOM_4330	0x4330
 
 #define SDIO_FUNC1_BLOCKSIZE		64
 #define SDIO_FUNC2_BLOCKSIZE		512
@@ -47,6 +47,7 @@
 /* devices we support, null terminated */
 static const struct sdio_device_id brcmf_sdmmc_ids[] = {
 	{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329)},
+	{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4330)},
 	{ /* end: all zeroes */ },
 };
 MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
@@ -204,62 +205,75 @@
 	return err_ret;
 }
 
+/* precondition: host controller is claimed */
 static int
-brcmf_sdioh_request_packet(struct brcmf_sdio_dev *sdiodev, uint fix_inc,
-			   uint write, uint func, uint addr,
-			   struct sk_buff *pkt)
+brcmf_sdioh_request_data(struct brcmf_sdio_dev *sdiodev, uint write, bool fifo,
+			 uint func, uint addr, struct sk_buff *pkt, uint pktlen)
+{
+	int err_ret = 0;
+
+	if ((write) && (!fifo)) {
+		err_ret = sdio_memcpy_toio(sdiodev->func[func], addr,
+					   ((u8 *) (pkt->data)), pktlen);
+	} else if (write) {
+		err_ret = sdio_memcpy_toio(sdiodev->func[func], addr,
+					   ((u8 *) (pkt->data)), pktlen);
+	} else if (fifo) {
+		err_ret = sdio_readsb(sdiodev->func[func],
+				      ((u8 *) (pkt->data)), addr, pktlen);
+	} else {
+		err_ret = sdio_memcpy_fromio(sdiodev->func[func],
+					     ((u8 *) (pkt->data)),
+					     addr, pktlen);
+	}
+
+	return err_ret;
+}
+
+/*
+ * This function takes a queue of packets. The packets on the queue
+ * are assumed to be properly aligned by the caller.
+ */
+int
+brcmf_sdioh_request_chain(struct brcmf_sdio_dev *sdiodev, uint fix_inc,
+			  uint write, uint func, uint addr,
+			  struct sk_buff_head *pktq)
 {
 	bool fifo = (fix_inc == SDIOH_DATA_FIX);
 	u32 SGCount = 0;
 	int err_ret = 0;
 
-	struct sk_buff *pnext;
+	struct sk_buff *pkt;
 
 	brcmf_dbg(TRACE, "Enter\n");
 
-	brcmf_pm_resume_wait(sdiodev, &sdiodev->request_packet_wait);
+	brcmf_pm_resume_wait(sdiodev, &sdiodev->request_chain_wait);
 	if (brcmf_pm_resume_error(sdiodev))
 		return -EIO;
 
 	/* Claim host controller */
 	sdio_claim_host(sdiodev->func[func]);
-	for (pnext = pkt; pnext; pnext = pnext->next) {
-		uint pkt_len = pnext->len;
+
+	skb_queue_walk(pktq, pkt) {
+		uint pkt_len = pkt->len;
 		pkt_len += 3;
 		pkt_len &= 0xFFFFFFFC;
 
-		if ((write) && (!fifo)) {
-			err_ret = sdio_memcpy_toio(sdiodev->func[func], addr,
-						   ((u8 *) (pnext->data)),
-						   pkt_len);
-		} else if (write) {
-			err_ret = sdio_memcpy_toio(sdiodev->func[func], addr,
-						   ((u8 *) (pnext->data)),
-						   pkt_len);
-		} else if (fifo) {
-			err_ret = sdio_readsb(sdiodev->func[func],
-					      ((u8 *) (pnext->data)),
-					      addr, pkt_len);
-		} else {
-			err_ret = sdio_memcpy_fromio(sdiodev->func[func],
-						     ((u8 *) (pnext->data)),
-						     addr, pkt_len);
-		}
-
+		err_ret = brcmf_sdioh_request_data(sdiodev, write, fifo, func,
+						   addr, pkt, pkt_len);
 		if (err_ret) {
 			brcmf_dbg(ERROR, "%s FAILED %p[%d], addr=0x%05x, pkt_len=%d, ERR=0x%08x\n",
-				  write ? "TX" : "RX", pnext, SGCount, addr,
+				  write ? "TX" : "RX", pkt, SGCount, addr,
 				  pkt_len, err_ret);
 		} else {
 			brcmf_dbg(TRACE, "%s xfr'd %p[%d], addr=0x%05x, len=%d\n",
-				  write ? "TX" : "RX", pnext, SGCount, addr,
+				  write ? "TX" : "RX", pkt, SGCount, addr,
 				  pkt_len);
 		}
-
 		if (!fifo)
 			addr += pkt_len;
-		SGCount++;
 
+		SGCount++;
 	}
 
 	/* Release host controller */
@@ -270,91 +284,45 @@
 }
 
 /*
- * This function takes a buffer or packet, and fixes everything up
- * so that in the end, a DMA-able packet is created.
- *
- * A buffer does not have an associated packet pointer,
- * and may or may not be aligned.
- * A packet may consist of a single packet, or a packet chain.
- * If it is a packet chain, then all the packets in the chain
- * must be properly aligned.
- *
- * If the packet data is not aligned, then there may only be
- * one packet, and in this case,  it is copied to a new
- * aligned packet.
- *
+ * This function takes a single DMA-able packet.
  */
 int brcmf_sdioh_request_buffer(struct brcmf_sdio_dev *sdiodev,
 			       uint fix_inc, uint write, uint func, uint addr,
-			       uint reg_width, uint buflen_u, u8 *buffer,
 			       struct sk_buff *pkt)
 {
-	int Status;
-	struct sk_buff *mypkt = NULL;
+	int status;
+	uint pkt_len = pkt->len;
+	bool fifo = (fix_inc == SDIOH_DATA_FIX);
 
 	brcmf_dbg(TRACE, "Enter\n");
 
+	if (pkt == NULL)
+		return -EINVAL;
+
 	brcmf_pm_resume_wait(sdiodev, &sdiodev->request_buffer_wait);
 	if (brcmf_pm_resume_error(sdiodev))
 		return -EIO;
-	/* Case 1: we don't have a packet. */
-	if (pkt == NULL) {
-		brcmf_dbg(DATA, "Creating new %s Packet, len=%d\n",
-			  write ? "TX" : "RX", buflen_u);
-		mypkt = brcmu_pkt_buf_get_skb(buflen_u);
-		if (!mypkt) {
-			brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n",
-				  buflen_u);
-			return -EIO;
-		}
 
-		/* For a write, copy the buffer data into the packet. */
-		if (write)
-			memcpy(mypkt->data, buffer, buflen_u);
+	/* Claim host controller */
+	sdio_claim_host(sdiodev->func[func]);
 
-		Status = brcmf_sdioh_request_packet(sdiodev, fix_inc, write,
-						    func, addr, mypkt);
+	pkt_len += 3;
+	pkt_len &= (uint)~3;
 
-		/* For a read, copy the packet data back to the buffer. */
-		if (!write)
-			memcpy(buffer, mypkt->data, buflen_u);
-
-		brcmu_pkt_buf_free_skb(mypkt);
-	} else if (((ulong) (pkt->data) & DMA_ALIGN_MASK) != 0) {
-		/*
-		 * Case 2: We have a packet, but it is unaligned.
-		 * In this case, we cannot have a chain (pkt->next == NULL)
-		 */
-		brcmf_dbg(DATA, "Creating aligned %s Packet, len=%d\n",
-			  write ? "TX" : "RX", pkt->len);
-		mypkt = brcmu_pkt_buf_get_skb(pkt->len);
-		if (!mypkt) {
-			brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n",
-				  pkt->len);
-			return -EIO;
-		}
-
-		/* For a write, copy the buffer data into the packet. */
-		if (write)
-			memcpy(mypkt->data, pkt->data, pkt->len);
-
-		Status = brcmf_sdioh_request_packet(sdiodev, fix_inc, write,
-						    func, addr, mypkt);
-
-		/* For a read, copy the packet data back to the buffer. */
-		if (!write)
-			memcpy(pkt->data, mypkt->data, mypkt->len);
-
-		brcmu_pkt_buf_free_skb(mypkt);
-	} else {		/* case 3: We have a packet and
-				 it is aligned. */
-		brcmf_dbg(DATA, "Aligned %s Packet, direct DMA\n",
-			  write ? "Tx" : "Rx");
-		Status = brcmf_sdioh_request_packet(sdiodev, fix_inc, write,
-						    func, addr, pkt);
+	status = brcmf_sdioh_request_data(sdiodev, write, fifo, func,
+					   addr, pkt, pkt_len);
+	if (status) {
+		brcmf_dbg(ERROR, "%s FAILED %p, addr=0x%05x, pkt_len=%d, ERR=0x%08x\n",
+			  write ? "TX" : "RX", pkt, addr, pkt_len, status);
+	} else {
+		brcmf_dbg(TRACE, "%s xfr'd %p, addr=0x%05x, len=%d\n",
+			  write ? "TX" : "RX", pkt, addr, pkt_len);
 	}
 
-	return Status;
+	/* Release host controller */
+	sdio_release_host(sdiodev->func[func]);
+
+	return status;
 }
 
 /* Read client card reg */
@@ -494,6 +462,7 @@
 {
 	int ret = 0;
 	struct brcmf_sdio_dev *sdiodev;
+	struct brcmf_bus *bus_if;
 	brcmf_dbg(TRACE, "Enter\n");
 	brcmf_dbg(TRACE, "func->class=%x\n", func->class);
 	brcmf_dbg(TRACE, "sdio_vendor: 0x%04x\n", func->vendor);
@@ -505,17 +474,26 @@
 			brcmf_dbg(ERROR, "card private drvdata occupied\n");
 			return -ENXIO;
 		}
-		sdiodev = kzalloc(sizeof(struct brcmf_sdio_dev), GFP_KERNEL);
-		if (!sdiodev)
+		bus_if = kzalloc(sizeof(struct brcmf_bus), GFP_KERNEL);
+		if (!bus_if)
 			return -ENOMEM;
+		sdiodev = kzalloc(sizeof(struct brcmf_sdio_dev), GFP_KERNEL);
+		if (!sdiodev) {
+			kfree(bus_if);
+			return -ENOMEM;
+		}
 		sdiodev->func[0] = func->card->sdio_func[0];
 		sdiodev->func[1] = func;
+		sdiodev->bus_if = bus_if;
+		bus_if->bus_priv = sdiodev;
+		bus_if->type = SDIO_BUS;
+		bus_if->align = BRCMF_SDALIGN;
 		dev_set_drvdata(&func->card->dev, sdiodev);
 
 		atomic_set(&sdiodev->suspend, false);
 		init_waitqueue_head(&sdiodev->request_byte_wait);
 		init_waitqueue_head(&sdiodev->request_word_wait);
-		init_waitqueue_head(&sdiodev->request_packet_wait);
+		init_waitqueue_head(&sdiodev->request_chain_wait);
 		init_waitqueue_head(&sdiodev->request_buffer_wait);
 	}
 
@@ -525,6 +503,10 @@
 			return -ENODEV;
 		sdiodev->func[2] = func;
 
+		bus_if = sdiodev->bus_if;
+		sdiodev->dev = &func->dev;
+		dev_set_drvdata(&func->dev, bus_if);
+
 		brcmf_dbg(TRACE, "F2 found, calling brcmf_sdio_probe...\n");
 		ret = brcmf_sdio_probe(sdiodev);
 	}
@@ -534,6 +516,7 @@
 
 static void brcmf_ops_sdio_remove(struct sdio_func *func)
 {
+	struct brcmf_bus *bus_if;
 	struct brcmf_sdio_dev *sdiodev;
 	brcmf_dbg(TRACE, "Enter\n");
 	brcmf_dbg(INFO, "func->class=%x\n", func->class);
@@ -542,10 +525,13 @@
 	brcmf_dbg(INFO, "Function#: 0x%04x\n", func->num);
 
 	if (func->num == 2) {
-		sdiodev = dev_get_drvdata(&func->card->dev);
+		bus_if = dev_get_drvdata(&func->dev);
+		sdiodev = bus_if->bus_priv;
 		brcmf_dbg(TRACE, "F2 found, calling brcmf_sdio_remove...\n");
 		brcmf_sdio_remove(sdiodev);
 		dev_set_drvdata(&func->card->dev, NULL);
+		dev_set_drvdata(&func->dev, NULL);
+		kfree(bus_if);
 		kfree(sdiodev);
 	}
 }
@@ -554,14 +540,12 @@
 static int brcmf_sdio_suspend(struct device *dev)
 {
 	mmc_pm_flag_t sdio_flags;
-	struct brcmf_sdio_dev *sdiodev;
 	struct sdio_func *func = dev_to_sdio_func(dev);
+	struct brcmf_sdio_dev *sdiodev = dev_get_drvdata(&func->card->dev);
 	int ret = 0;
 
 	brcmf_dbg(TRACE, "\n");
 
-	sdiodev = dev_get_drvdata(&func->card->dev);
-
 	atomic_set(&sdiodev->suspend, true);
 
 	sdio_flags = sdio_get_host_pm_caps(sdiodev->func[1]);
@@ -583,10 +567,9 @@
 
 static int brcmf_sdio_resume(struct device *dev)
 {
-	struct brcmf_sdio_dev *sdiodev;
 	struct sdio_func *func = dev_to_sdio_func(dev);
+	struct brcmf_sdio_dev *sdiodev = dev_get_drvdata(&func->card->dev);
 
-	sdiodev = dev_get_drvdata(&func->card->dev);
 	brcmf_sdio_wdtmr_enable(sdiodev, true);
 	atomic_set(&sdiodev->suspend, false);
 	return 0;
@@ -610,17 +593,26 @@
 #endif	/* CONFIG_PM_SLEEP */
 };
 
-/* bus register interface */
-int brcmf_bus_register(void)
-{
-	brcmf_dbg(TRACE, "Enter\n");
-
-	return sdio_register_driver(&brcmf_sdmmc_driver);
-}
-
-void brcmf_bus_unregister(void)
+static void __exit brcmf_sdio_exit(void)
 {
 	brcmf_dbg(TRACE, "Enter\n");
 
 	sdio_unregister_driver(&brcmf_sdmmc_driver);
 }
+
+static int __init brcmf_sdio_init(void)
+{
+	int ret;
+
+	brcmf_dbg(TRACE, "Enter\n");
+
+	ret = sdio_register_driver(&brcmf_sdmmc_driver);
+
+	if (ret)
+		brcmf_dbg(ERROR, "sdio_register_driver failed: %d\n", ret);
+
+	return ret;
+}
+
+module_init(brcmf_sdio_init);
+module_exit(brcmf_sdio_exit);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
index 4645766..e58ea40 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
@@ -87,7 +87,7 @@
 #define TOE_TX_CSUM_OL		0x00000001
 #define TOE_RX_CSUM_OL		0x00000002
 
-#define	BRCMF_BSS_INFO_VERSION	108 /* current ver of brcmf_bss_info struct */
+#define	BRCMF_BSS_INFO_VERSION	109 /* curr ver of brcmf_bss_info_le struct */
 
 /* size of brcmf_scan_params not including variable length array */
 #define BRCMF_SCAN_PARAMS_FIXED_SIZE 64
@@ -122,8 +122,6 @@
 
 /* For supporting multiple interfaces */
 #define BRCMF_MAX_IFS	16
-#define BRCMF_DEL_IF	-0xe
-#define BRCMF_BAD_IF	-0xf
 
 #define DOT11_BSSTYPE_ANY			2
 #define DOT11_MAX_DEFAULT_KEYS	4
@@ -158,18 +156,6 @@
 	struct brcmf_event_msg msg;
 } __packed;
 
-struct dngl_stats {
-	unsigned long rx_packets;	/* total packets received */
-	unsigned long tx_packets;	/* total packets transmitted */
-	unsigned long rx_bytes;	/* total bytes received */
-	unsigned long tx_bytes;	/* total bytes transmitted */
-	unsigned long rx_errors;	/* bad packets received */
-	unsigned long tx_errors;	/* packet transmit problems */
-	unsigned long rx_dropped;	/* packets dropped by dongle */
-	unsigned long tx_dropped;	/* packets dropped by dongle */
-	unsigned long multicast;	/* multicast packets received */
-};
-
 /* event codes sent by the dongle to this driver */
 #define BRCMF_E_SET_SSID			0
 #define BRCMF_E_JOIN				1
@@ -319,13 +305,6 @@
 #define BRCMF_E_LINK_ASSOC_REC			3
 #define BRCMF_E_LINK_BSSCFG_DIS			4
 
-/* The level of bus communication with the dongle */
-enum brcmf_bus_state {
-	BRCMF_BUS_DOWN,		/* Not ready for frame transfers */
-	BRCMF_BUS_LOAD,		/* Download access only (CPU reset) */
-	BRCMF_BUS_DATA		/* Ready for frame transfers */
-};
-
 /* Pattern matching filter. Specifies an offset within received packets to
  * start matching, the pattern to match, the size of the pattern, and a bitmask
  * that indicates which bits within the pattern should be matched.
@@ -365,7 +344,7 @@
  * Applications MUST CHECK ie_offset field and length field to access IEs and
  * next bss_info structure in a vector (in struct brcmf_scan_results)
  */
-struct brcmf_bss_info {
+struct brcmf_bss_info_le {
 	__le32 version;		/* version field */
 	__le32 length;		/* byte length of data in this record,
 				 * starting at version and including IEs
@@ -466,14 +445,13 @@
 	u32 buflen;
 	u32 version;
 	u32 count;
-	struct brcmf_bss_info bss_info[1];
+	struct brcmf_bss_info_le bss_info_le[];
 };
 
 struct brcmf_scan_results_le {
 	__le32 buflen;
 	__le32 version;
 	__le32 count;
-	struct brcmf_bss_info bss_info[1];
 };
 
 /* used for association with a specific BSSID and chanspec list */
@@ -493,10 +471,6 @@
 	struct brcmf_assoc_params_le params_le;
 };
 
-/* size of brcmf_scan_results not including variable length array */
-#define BRCMF_SCAN_RESULTS_FIXED_SIZE \
-	(sizeof(struct brcmf_scan_results) - sizeof(struct brcmf_bss_info))
-
 /* incremental scan results struct */
 struct brcmf_iscan_results {
 	union {
@@ -511,7 +485,7 @@
 
 /* size of brcmf_iscan_results not including variable length array */
 #define BRCMF_ISCAN_RESULTS_FIXED_SIZE \
-	(BRCMF_SCAN_RESULTS_FIXED_SIZE + \
+	(sizeof(struct brcmf_scan_results) + \
 	 offsetof(struct brcmf_iscan_results, results))
 
 struct brcmf_wsec_key {
@@ -579,25 +553,19 @@
 };
 
 /* Forward decls for struct brcmf_pub (see below) */
-struct brcmf_bus;		/* device bus info */
 struct brcmf_proto;	/* device communication protocol info */
-struct brcmf_info;	/* device driver info */
 struct brcmf_cfg80211_dev; /* cfg80211 device info */
 
 /* Common structure for module and instance linkage */
 struct brcmf_pub {
 	/* Linkage ponters */
-	struct brcmf_bus *bus;
+	struct brcmf_bus *bus_if;
 	struct brcmf_proto *prot;
-	struct brcmf_info *info;
 	struct brcmf_cfg80211_dev *config;
+	struct device *dev;		/* fullmac dongle device pointer */
 
 	/* Internal brcmf items */
-	bool up;		/* Driver up/down (to OS) */
-	bool txoff;		/* Transmit flow-controlled */
-	enum brcmf_bus_state busstate;
 	uint hdrlen;		/* Total BRCMF header length (proto + bus) */
-	uint maxctl;		/* Max size rxctl request from proto to bus */
 	uint rxsz;		/* Rx buffer size bus module should use */
 	u8 wme_dp;		/* wme discard priority */
 
@@ -605,48 +573,21 @@
 	bool iswl;		/* Dongle-resident driver is wl */
 	unsigned long drv_version;	/* Version of dongle-resident driver */
 	u8 mac[ETH_ALEN];		/* MAC address obtained from dongle */
-	struct dngl_stats dstats;	/* Stats for dongle-based data */
 
 	/* Additional stats for the bus level */
 
-	/* Data packets sent to dongle */
-	unsigned long tx_packets;
 	/* Multicast data packets sent to dongle */
 	unsigned long tx_multicast;
-	/* Errors in sending data to dongle */
-	unsigned long tx_errors;
-	/* Control packets sent to dongle */
-	unsigned long tx_ctlpkts;
-	/* Errors sending control frames to dongle */
-	unsigned long tx_ctlerrs;
-	/* Packets sent up the network interface */
-	unsigned long rx_packets;
-	/* Multicast packets sent up the network interface */
-	unsigned long rx_multicast;
-	/* Errors processing rx data packets */
-	unsigned long rx_errors;
-	/* Control frames processed from dongle */
-	unsigned long rx_ctlpkts;
-
-	/* Errors in processing rx control frames */
-	unsigned long rx_ctlerrs;
-	/* Packets dropped locally (no memory) */
-	unsigned long rx_dropped;
 	/* Packets flushed due to unscheduled sendup thread */
 	unsigned long rx_flushed;
 	/* Number of times dpc scheduled by watchdog timer */
 	unsigned long wd_dpc_sched;
 
-	/* Number of packets where header read-ahead was used. */
-	unsigned long rx_readahead_cnt;
-	/* Number of tx packets we had to realloc for headroom */
-	unsigned long tx_realloc;
 	/* Number of flow control pkts recvd */
 	unsigned long fc_packets;
 
 	/* Last error return */
 	int bcmerror;
-	uint tickcnt;
 
 	/* Last error from dongle */
 	int dongle_error;
@@ -664,6 +605,14 @@
 	u8 country_code[BRCM_CNTRY_BUF_SZ];
 	char eventmask[BRCMF_EVENTING_MASK_LEN];
 
+	struct brcmf_if *iflist[BRCMF_MAX_IFS];
+
+	struct mutex proto_block;
+
+	struct work_struct setmacaddr_work;
+	struct work_struct multicast_work;
+	u8 macvalue[ETH_ALEN];
+	atomic_t pend_8021x_cnt;
 };
 
 struct brcmf_if_event {
@@ -683,67 +632,33 @@
 extern uint brcmf_c_mkiovar(char *name, char *data, uint datalen,
 			  char *buf, uint len);
 
-/* Indication from bus module regarding presence/insertion of dongle.
- * Return struct brcmf_pub pointer, used as handle to OS module in later calls.
- * Returned structure should have bus and prot pointers filled in.
- * bus_hdrlen specifies required headroom for bus module header.
- */
-extern struct brcmf_pub *brcmf_attach(struct brcmf_bus *bus,
-				      uint bus_hdrlen);
 extern int brcmf_net_attach(struct brcmf_pub *drvr, int idx);
 extern int brcmf_netdev_wait_pend8021x(struct net_device *ndev);
 
 extern s32 brcmf_exec_dcmd(struct net_device *dev, u32 cmd, void *arg, u32 len);
 
-/* Indication from bus module regarding removal/absence of dongle */
-extern void brcmf_detach(struct brcmf_pub *drvr);
-
-/* Indication from bus module to change flow-control state */
-extern void brcmf_txflowcontrol(struct brcmf_pub *drvr, int ifidx, bool on);
-
-extern bool brcmf_c_prec_enq(struct brcmf_pub *drvr, struct pktq *q,
-			 struct sk_buff *pkt, int prec);
-
-/* Receive frame for delivery to OS.  Callee disposes of rxp. */
-extern void brcmf_rx_frame(struct brcmf_pub *drvr, int ifidx,
-			 struct sk_buff *rxp, int numpkt);
-
 /* Return pointer to interface name */
 extern char *brcmf_ifname(struct brcmf_pub *drvr, int idx);
 
-/* Notify tx completion */
-extern void brcmf_txcomplete(struct brcmf_pub *drvr, struct sk_buff *txp,
-			     bool success);
-
 /* Query dongle */
 extern int brcmf_proto_cdc_query_dcmd(struct brcmf_pub *drvr, int ifidx,
 				       uint cmd, void *buf, uint len);
 
-/* OS independent layer functions */
-extern int brcmf_os_proto_block(struct brcmf_pub *drvr);
-extern int brcmf_os_proto_unblock(struct brcmf_pub *drvr);
 #ifdef BCMDBG
 extern int brcmf_write_to_file(struct brcmf_pub *drvr, const u8 *buf, int size);
 #endif				/* BCMDBG */
 
-extern int brcmf_ifname2idx(struct brcmf_info *drvr_priv, char *name);
-extern int brcmf_c_host_event(struct brcmf_info *drvr_priv, int *idx,
+extern int brcmf_ifname2idx(struct brcmf_pub *drvr, char *name);
+extern int brcmf_c_host_event(struct brcmf_pub *drvr, int *idx,
 			      void *pktdata, struct brcmf_event_msg *,
 			      void **data_ptr);
 
-extern void brcmf_c_init(void);
-
-extern int brcmf_add_if(struct brcmf_info *drvr_priv, int ifidx,
-			struct net_device *ndev, char *name, u8 *mac_addr,
-			u32 flags, u8 bssidx);
-extern void brcmf_del_if(struct brcmf_info *drvr_priv, int ifidx);
+extern void brcmf_del_if(struct brcmf_pub *drvr, int ifidx);
 
 /* Send packet to dongle via data channel */
 extern int brcmf_sendpkt(struct brcmf_pub *drvr, int ifidx,\
 			 struct sk_buff *pkt);
 
-extern int brcmf_bus_start(struct brcmf_pub *drvr);
-
 extern void brcmf_c_pktfilter_offload_set(struct brcmf_pub *drvr, char *arg);
 extern void brcmf_c_pktfilter_offload_enable(struct brcmf_pub *drvr, char *arg,
 					     int enable, int master_mode);
@@ -752,25 +667,4 @@
 #define BRCMF_DCMD_MEDLEN	1536	/* "med" cmd buffer required */
 #define	BRCMF_DCMD_MAXLEN	8192	/* max length cmd buffer required */
 
-/* message levels */
-#define BRCMF_ERROR_VAL	0x0001
-#define BRCMF_TRACE_VAL	0x0002
-#define BRCMF_INFO_VAL	0x0004
-#define BRCMF_DATA_VAL	0x0008
-#define BRCMF_CTL_VAL	0x0010
-#define BRCMF_TIMER_VAL	0x0020
-#define BRCMF_HDRS_VAL	0x0040
-#define BRCMF_BYTES_VAL	0x0080
-#define BRCMF_INTR_VAL	0x0100
-#define BRCMF_GLOM_VAL	0x0400
-#define BRCMF_EVENT_VAL	0x0800
-#define BRCMF_BTA_VAL	0x1000
-#define BRCMF_ISCAN_VAL 0x2000
-
-/* Enter idle immediately (no timeout) */
-#define BRCMF_IDLE_IMMEDIATE	(-1)
-#define BRCMF_IDLE_ACTIVE	0	/* Do not request any SD clock change
-				 when idle */
-#define BRCMF_IDLE_INTERVAL	1
-
 #endif				/* _BRCMF_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
index a249407..ad9be24 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
@@ -17,41 +17,89 @@
 #ifndef _BRCMF_BUS_H_
 #define _BRCMF_BUS_H_
 
-/* Packet alignment for most efficient SDIO (can change based on platform) */
-#define BRCMF_SDALIGN	(1 << 6)
+/* The level of bus communication with the dongle */
+enum brcmf_bus_state {
+	BRCMF_BUS_DOWN,		/* Not ready for frame transfers */
+	BRCMF_BUS_LOAD,		/* Download access only (CPU reset) */
+	BRCMF_BUS_DATA		/* Ready for frame transfers */
+};
 
-/* watchdog polling interval in ms */
-#define BRCMF_WD_POLL_MS	10
+struct dngl_stats {
+	unsigned long rx_packets;	/* total packets received */
+	unsigned long tx_packets;	/* total packets transmitted */
+	unsigned long rx_bytes;	/* total bytes received */
+	unsigned long tx_bytes;	/* total bytes transmitted */
+	unsigned long rx_errors;	/* bad packets received */
+	unsigned long tx_errors;	/* packet transmit problems */
+	unsigned long rx_dropped;	/* packets dropped by dongle */
+	unsigned long tx_dropped;	/* packets dropped by dongle */
+	unsigned long multicast;	/* multicast packets received */
+};
+
+/* interface structure between common and bus layer */
+struct brcmf_bus {
+	u8 type;		/* bus type */
+	void *bus_priv;		/* pointer to bus private structure */
+	void *drvr;		/* pointer to driver pub structure brcmf_pub */
+	enum brcmf_bus_state state;
+	uint maxctl;		/* Max size rxctl request from proto to bus */
+	bool drvr_up;		/* Status flag of driver up/down */
+	unsigned long tx_realloc;	/* Tx packets realloced for headroom */
+	struct dngl_stats dstats;	/* Stats for dongle-based data */
+	u8 align;		/* bus alignment requirement */
+
+	/* interface functions pointers */
+	/* Stop bus module: clear pending frames, disable data flow */
+	void (*brcmf_bus_stop)(struct device *);
+	/* Initialize bus module: prepare for communication w/dongle */
+	int (*brcmf_bus_init)(struct device *);
+	/* Send a data frame to the dongle.  Callee disposes of txp. */
+	int (*brcmf_bus_txdata)(struct device *, struct sk_buff *);
+	/* Send/receive a control message to/from the dongle.
+	 * Expects caller to enforce a single outstanding transaction.
+	 */
+	int (*brcmf_bus_txctl)(struct device *, unsigned char *, uint);
+	int (*brcmf_bus_rxctl)(struct device *, unsigned char *, uint);
+};
 
 /*
- * Exported from brcmf bus module (brcmf_usb, brcmf_sdio)
+ * interface functions from common layer
  */
 
-/* Indicate (dis)interest in finding dongles. */
-extern int brcmf_bus_register(void);
-extern void brcmf_bus_unregister(void);
+/* Remove any protocol-specific data header. */
+extern int brcmf_proto_hdrpull(struct device *dev, int *ifidx,
+			       struct sk_buff *rxp);
 
-/* obtain linux device object providing bus function */
-extern struct device *brcmf_bus_get_device(struct brcmf_bus *bus);
+extern bool brcmf_c_prec_enq(struct device *dev, struct pktq *q,
+			 struct sk_buff *pkt, int prec);
 
-/* Stop bus module: clear pending frames, disable data flow */
-extern void brcmf_sdbrcm_bus_stop(struct brcmf_bus *bus);
+/* Receive frame for delivery to OS.  Callee disposes of rxp. */
+extern void brcmf_rx_frame(struct device *dev, int ifidx,
+			   struct sk_buff_head *rxlist);
+static inline void brcmf_rx_packet(struct device *dev, int ifidx,
+				   struct sk_buff *pkt)
+{
+	struct sk_buff_head q;
 
-/* Initialize bus module: prepare for communication w/dongle */
-extern int brcmf_sdbrcm_bus_init(struct brcmf_pub *drvr);
+	skb_queue_head_init(&q);
+	skb_queue_tail(&q, pkt);
+	brcmf_rx_frame(dev, ifidx, &q);
+}
 
-/* Send a data frame to the dongle.  Callee disposes of txp. */
-extern int brcmf_sdbrcm_bus_txdata(struct brcmf_bus *bus, struct sk_buff *txp);
+/* Indication from bus module regarding presence/insertion of dongle. */
+extern int brcmf_attach(uint bus_hdrlen, struct device *dev);
+/* Indication from bus module regarding removal/absence of dongle */
+extern void brcmf_detach(struct device *dev);
 
-/* Send/receive a control message to/from the dongle.
- * Expects caller to enforce a single outstanding transaction.
- */
-extern int
-brcmf_sdbrcm_bus_txctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen);
+/* Indication from bus module to change flow-control state */
+extern void brcmf_txflowcontrol(struct device *dev, int ifidx, bool on);
 
-extern int
-brcmf_sdbrcm_bus_rxctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen);
+/* Notify tx completion */
+extern void brcmf_txcomplete(struct device *dev, struct sk_buff *txp,
+			     bool success);
 
-extern void brcmf_sdbrcm_wd_timer(struct brcmf_bus *bus, uint wdtick);
+extern int brcmf_bus_start(struct device *dev);
 
+extern int brcmf_add_if(struct device *dev, int ifidx,
+			char *name, u8 *mac_addr);
 #endif				/* _BRCMF_BUS_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
index e34c5c3..ac8d1f4 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
@@ -58,7 +58,7 @@
  * Used on data packets to convey priority across USB.
  */
 #define	BDC_HEADER_LEN		4
-#define BDC_PROTO_VER		1	/* Protocol version */
+#define BDC_PROTO_VER		2	/* Protocol version */
 #define BDC_FLAG_VER_MASK	0xf0	/* Protocol version mask */
 #define BDC_FLAG_VER_SHIFT	4	/* Protocol version shift */
 #define BDC_FLAG_SUM_GOOD	0x04	/* Good RX checksums */
@@ -77,18 +77,19 @@
 	u8 flags;
 	u8 priority;	/* 802.1d Priority, 4:7 flow control info for usb */
 	u8 flags2;
-	u8 rssi;
+	u8 data_offset;
 };
 
 
 #define RETRIES 2 /* # of retries to retrieve matching dcmd response */
-#define BUS_HEADER_LEN	(16+BRCMF_SDALIGN) /* Must be atleast SDPCM_RESERVE
+#define BUS_HEADER_LEN	(16+64)		/* Must be atleast SDPCM_RESERVE
 					 * (amount of header tha might be added)
 					 * plus any space that might be needed
-					 * for alignment padding.
+					 * for bus alignment padding.
 					 */
-#define ROUND_UP_MARGIN	2048	/* Biggest SDIO block size possible for
+#define ROUND_UP_MARGIN	2048	/* Biggest bus block size possible for
 				 * round off at the end of buffer
+				 * Currently is SDIO
 				 */
 
 struct brcmf_proto {
@@ -116,8 +117,9 @@
 		len = CDC_MAX_MSG_SIZE;
 
 	/* Send request */
-	return brcmf_sdbrcm_bus_txctl(drvr->bus, (unsigned char *)&prot->msg,
-				      len);
+	return drvr->bus_if->brcmf_bus_txctl(drvr->dev,
+					     (unsigned char *)&prot->msg,
+					     len);
 }
 
 static int brcmf_proto_cdc_cmplt(struct brcmf_pub *drvr, u32 id, u32 len)
@@ -128,7 +130,7 @@
 	brcmf_dbg(TRACE, "Enter\n");
 
 	do {
-		ret = brcmf_sdbrcm_bus_rxctl(drvr->bus,
+		ret = drvr->bus_if->brcmf_bus_rxctl(drvr->dev,
 				(unsigned char *)&prot->msg,
 				len + sizeof(struct brcmf_proto_cdc_dcmd));
 		if (ret < 0)
@@ -280,11 +282,11 @@
 	struct brcmf_proto *prot = drvr->prot;
 	int ret = -1;
 
-	if (drvr->busstate == BRCMF_BUS_DOWN) {
+	if (drvr->bus_if->state == BRCMF_BUS_DOWN) {
 		brcmf_dbg(ERROR, "bus is down. we have nothing to do.\n");
 		return ret;
 	}
-	brcmf_os_proto_block(drvr);
+	mutex_lock(&drvr->proto_block);
 
 	brcmf_dbg(TRACE, "Enter\n");
 
@@ -338,7 +340,7 @@
 	prot->pending = false;
 
 done:
-	brcmf_os_proto_unblock(drvr);
+	mutex_unlock(&drvr->proto_block);
 
 	return ret;
 }
@@ -372,14 +374,16 @@
 
 	h->priority = (pktbuf->priority & BDC_PRIORITY_MASK);
 	h->flags2 = 0;
-	h->rssi = 0;
+	h->data_offset = 0;
 	BDC_SET_IF_IDX(h, ifidx);
 }
 
-int brcmf_proto_hdrpull(struct brcmf_pub *drvr, int *ifidx,
+int brcmf_proto_hdrpull(struct device *dev, int *ifidx,
 			struct sk_buff *pktbuf)
 {
 	struct brcmf_proto_bdc_header *h;
+	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+	struct brcmf_pub *drvr = bus_if->drvr;
 
 	brcmf_dbg(TRACE, "Enter\n");
 
@@ -435,7 +439,7 @@
 
 	drvr->prot = cdc;
 	drvr->hdrlen += BDC_HEADER_LEN;
-	drvr->maxctl = BRCMF_DCMD_MAXLEN +
+	drvr->bus_if->maxctl = BRCMF_DCMD_MAXLEN +
 			sizeof(struct brcmf_proto_cdc_dcmd) + ROUND_UP_MARGIN;
 	return 0;
 
@@ -451,18 +455,6 @@
 	drvr->prot = NULL;
 }
 
-void brcmf_proto_dstats(struct brcmf_pub *drvr)
-{
-	/* No stats from dongle added yet, copy bus stats */
-	drvr->dstats.tx_packets = drvr->tx_packets;
-	drvr->dstats.tx_errors = drvr->tx_errors;
-	drvr->dstats.rx_packets = drvr->rx_packets;
-	drvr->dstats.rx_errors = drvr->rx_errors;
-	drvr->dstats.rx_dropped = drvr->rx_dropped;
-	drvr->dstats.multicast = drvr->rx_multicast;
-	return;
-}
-
 int brcmf_proto_init(struct brcmf_pub *drvr)
 {
 	int ret = 0;
@@ -470,19 +462,19 @@
 
 	brcmf_dbg(TRACE, "Enter\n");
 
-	brcmf_os_proto_block(drvr);
+	mutex_lock(&drvr->proto_block);
 
 	/* Get the device MAC address */
 	strcpy(buf, "cur_etheraddr");
 	ret = brcmf_proto_cdc_query_dcmd(drvr, 0, BRCMF_C_GET_VAR,
 					  buf, sizeof(buf));
 	if (ret < 0) {
-		brcmf_os_proto_unblock(drvr);
+		mutex_unlock(&drvr->proto_block);
 		return ret;
 	}
 	memcpy(drvr->mac, buf, ETH_ALEN);
 
-	brcmf_os_proto_unblock(drvr);
+	mutex_unlock(&drvr->proto_block);
 
 	ret = brcmf_c_preinit_dcmds(drvr);
 
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
index 8918261..a51d8f5 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
@@ -32,8 +32,6 @@
 #define PKTFILTER_BUF_SIZE		2048
 #define BRCMF_ARPOL_MODE		0xb	/* agent|snoop|peer_autoreply */
 
-int brcmf_msg_level;
-
 #define MSGTRACE_VERSION	1
 
 #define BRCMF_PKT_FILTER_FIXED_LEN	offsetof(struct brcmf_pkt_filter_le, u)
@@ -85,25 +83,14 @@
 	return len;
 }
 
-void brcmf_c_init(void)
-{
-	/* Init global variables at run-time, not as part of the declaration.
-	 * This is required to support init/de-init of the driver.
-	 * Initialization
-	 * of globals as part of the declaration results in non-deterministic
-	 * behaviour since the value of the globals may be different on the
-	 * first time that the driver is initialized vs subsequent
-	 * initializations.
-	 */
-	brcmf_msg_level = BRCMF_ERROR_VAL;
-}
-
-bool brcmf_c_prec_enq(struct brcmf_pub *drvr, struct pktq *q,
+bool brcmf_c_prec_enq(struct device *dev, struct pktq *q,
 		      struct sk_buff *pkt, int prec)
 {
 	struct sk_buff *p;
 	int eprec = -1;		/* precedence to evict from */
 	bool discard_oldest;
+	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+	struct brcmf_pub *drvr = bus_if->drvr;
 
 	/* Fast case, precedence queue is not full and we are also not
 	 * exceeding total queue length
@@ -446,7 +433,7 @@
 #endif				/* BCMDBG */
 
 int
-brcmf_c_host_event(struct brcmf_info *drvr_priv, int *ifidx, void *pktdata,
+brcmf_c_host_event(struct brcmf_pub *drvr, int *ifidx, void *pktdata,
 		   struct brcmf_event_msg *event, void **data_ptr)
 {
 	/* check whether packet is a BRCM event pkt */
@@ -488,19 +475,18 @@
 
 		if (ifevent->ifidx > 0 && ifevent->ifidx < BRCMF_MAX_IFS) {
 			if (ifevent->action == BRCMF_E_IF_ADD)
-				brcmf_add_if(drvr_priv, ifevent->ifidx, NULL,
+				brcmf_add_if(drvr->dev, ifevent->ifidx,
 					     event->ifname,
-					     pvt_data->eth.h_dest,
-					     ifevent->flags, ifevent->bssidx);
+					     pvt_data->eth.h_dest);
 			else
-				brcmf_del_if(drvr_priv, ifevent->ifidx);
+				brcmf_del_if(drvr, ifevent->ifidx);
 		} else {
 			brcmf_dbg(ERROR, "Invalid ifidx %d for %s\n",
 				  ifevent->ifidx, event->ifname);
 		}
 
 		/* send up the if event: btamp user needs it */
-		*ifidx = brcmf_ifname2idx(drvr_priv, event->ifname);
+		*ifidx = brcmf_ifname2idx(drvr, event->ifname);
 		break;
 
 		/* These are what external supplicant/authenticator wants */
@@ -512,7 +498,7 @@
 	default:
 		/* Fall through: this should get _everything_  */
 
-		*ifidx = brcmf_ifname2idx(drvr_priv, event->ifname);
+		*ifidx = brcmf_ifname2idx(drvr, event->ifname);
 		brcmf_dbg(TRACE, "MAC event %d, flags %x, status %x\n",
 			  type, flags, status);
 
@@ -812,7 +798,7 @@
 				 "event_msgs" + '\0' + bitvec  */
 	uint up = 0;
 	char buf[128], *ptr;
-	u32 dongle_align = BRCMF_SDALIGN;
+	u32 dongle_align = drvr->bus_if->align;
 	u32 glom = 0;
 	u32 roaming = 1;
 	uint bcn_timeout = 3;
@@ -820,7 +806,7 @@
 	int scan_unassoc_time = 40;
 	int i;
 
-	brcmf_os_proto_block(drvr);
+	mutex_lock(&drvr->proto_block);
 
 	/* Set Country code */
 	if (drvr->country_code[0] != 0) {
@@ -889,7 +875,7 @@
 						 0, true);
 	}
 
-	brcmf_os_proto_unblock(drvr);
+	mutex_unlock(&drvr->proto_block);
 
 	return 0;
 }
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
index 7467922..bb26ee3 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
@@ -17,6 +17,21 @@
 #ifndef _BRCMF_DBG_H_
 #define _BRCMF_DBG_H_
 
+/* message levels */
+#define BRCMF_ERROR_VAL	0x0001
+#define BRCMF_TRACE_VAL	0x0002
+#define BRCMF_INFO_VAL	0x0004
+#define BRCMF_DATA_VAL	0x0008
+#define BRCMF_CTL_VAL	0x0010
+#define BRCMF_TIMER_VAL	0x0020
+#define BRCMF_HDRS_VAL	0x0040
+#define BRCMF_BYTES_VAL	0x0080
+#define BRCMF_INTR_VAL	0x0100
+#define BRCMF_GLOM_VAL	0x0400
+#define BRCMF_EVENT_VAL	0x0800
+#define BRCMF_BTA_VAL	0x1000
+#define BRCMF_ISCAN_VAL 0x2000
+
 #if defined(BCMDBG)
 
 #define brcmf_dbg(level, fmt, ...)					\
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index 4acbac5..eb9eb76 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -43,7 +43,6 @@
 #include "dhd_proto.h"
 #include "dhd_dbg.h"
 #include "wl_cfg80211.h"
-#include "bcmchip.h"
 
 MODULE_AUTHOR("Broadcom Corporation");
 MODULE_DESCRIPTION("Broadcom 802.11n wireless LAN fullmac driver.");
@@ -53,48 +52,19 @@
 
 /* Interface control information */
 struct brcmf_if {
-	struct brcmf_info *info;	/* back pointer to brcmf_info */
+	struct brcmf_pub *drvr;	/* back pointer to brcmf_pub */
 	/* OS/stack specifics */
 	struct net_device *ndev;
 	struct net_device_stats stats;
 	int idx;		/* iface idx in dongle */
-	int state;		/* interface state */
 	u8 mac_addr[ETH_ALEN];	/* assigned MAC address */
 };
 
-/* Local private structure (extension of pub) */
-struct brcmf_info {
-	struct brcmf_pub pub;
-
-	/* OS/stack specifics */
-	struct brcmf_if *iflist[BRCMF_MAX_IFS];
-
-	struct mutex proto_block;
-
-	struct work_struct setmacaddr_work;
-	struct work_struct multicast_work;
-	u8 macvalue[ETH_ALEN];
-	atomic_t pend_8021x_cnt;
-};
-
 /* Error bits */
+int brcmf_msg_level = BRCMF_ERROR_VAL;
 module_param(brcmf_msg_level, int, 0);
 
-
-static int brcmf_net2idx(struct brcmf_info *drvr_priv, struct net_device *ndev)
-{
-	int i = 0;
-
-	while (i < BRCMF_MAX_IFS) {
-		if (drvr_priv->iflist[i] && drvr_priv->iflist[i]->ndev == ndev)
-			return i;
-		i++;
-	}
-
-	return BRCMF_BAD_IF;
-}
-
-int brcmf_ifname2idx(struct brcmf_info *drvr_priv, char *name)
+int brcmf_ifname2idx(struct brcmf_pub *drvr, char *name)
 {
 	int i = BRCMF_MAX_IFS;
 	struct brcmf_if *ifp;
@@ -103,7 +73,7 @@
 		return 0;
 
 	while (--i > 0) {
-		ifp = drvr_priv->iflist[i];
+		ifp = drvr->iflist[i];
 		if (ifp && !strncmp(ifp->ndev->name, name, IFNAMSIZ))
 			break;
 	}
@@ -115,20 +85,18 @@
 
 char *brcmf_ifname(struct brcmf_pub *drvr, int ifidx)
 {
-	struct brcmf_info *drvr_priv = drvr->info;
-
 	if (ifidx < 0 || ifidx >= BRCMF_MAX_IFS) {
 		brcmf_dbg(ERROR, "ifidx %d out of range\n", ifidx);
 		return "<if_bad>";
 	}
 
-	if (drvr_priv->iflist[ifidx] == NULL) {
+	if (drvr->iflist[ifidx] == NULL) {
 		brcmf_dbg(ERROR, "null i/f %d\n", ifidx);
 		return "<if_null>";
 	}
 
-	if (drvr_priv->iflist[ifidx]->ndev)
-		return drvr_priv->iflist[ifidx]->ndev->name;
+	if (drvr->iflist[ifidx]->ndev)
+		return drvr->iflist[ifidx]->ndev->name;
 
 	return "<if_none>";
 }
@@ -146,10 +114,10 @@
 	uint buflen;
 	int ret;
 
-	struct brcmf_info *drvr_priv = container_of(work, struct brcmf_info,
+	struct brcmf_pub *drvr = container_of(work, struct brcmf_pub,
 						    multicast_work);
 
-	ndev = drvr_priv->iflist[0]->ndev;
+	ndev = drvr->iflist[0]->ndev;
 	cnt = netdev_mc_count(ndev);
 
 	/* Determine initial value of allmulti flag */
@@ -183,10 +151,10 @@
 	dcmd.len = buflen;
 	dcmd.set = true;
 
-	ret = brcmf_proto_dcmd(&drvr_priv->pub, 0, &dcmd, dcmd.len);
+	ret = brcmf_proto_dcmd(drvr, 0, &dcmd, dcmd.len);
 	if (ret < 0) {
 		brcmf_dbg(ERROR, "%s: set mcast_list failed, cnt %d\n",
-			  brcmf_ifname(&drvr_priv->pub, 0), cnt);
+			  brcmf_ifname(drvr, 0), cnt);
 		dcmd_value = cnt ? true : dcmd_value;
 	}
 
@@ -208,7 +176,7 @@
 	    ("allmulti", (void *)&dcmd_le_value,
 	    sizeof(dcmd_le_value), buf, buflen)) {
 		brcmf_dbg(ERROR, "%s: mkiovar failed for allmulti, datalen %d buflen %u\n",
-			  brcmf_ifname(&drvr_priv->pub, 0),
+			  brcmf_ifname(drvr, 0),
 			  (int)sizeof(dcmd_value), buflen);
 		kfree(buf);
 		return;
@@ -220,10 +188,10 @@
 	dcmd.len = buflen;
 	dcmd.set = true;
 
-	ret = brcmf_proto_dcmd(&drvr_priv->pub, 0, &dcmd, dcmd.len);
+	ret = brcmf_proto_dcmd(drvr, 0, &dcmd, dcmd.len);
 	if (ret < 0) {
 		brcmf_dbg(ERROR, "%s: set allmulti %d failed\n",
-			  brcmf_ifname(&drvr_priv->pub, 0),
+			  brcmf_ifname(drvr, 0),
 			  le32_to_cpu(dcmd_le_value));
 	}
 
@@ -241,10 +209,10 @@
 	dcmd.len = sizeof(dcmd_le_value);
 	dcmd.set = true;
 
-	ret = brcmf_proto_dcmd(&drvr_priv->pub, 0, &dcmd, dcmd.len);
+	ret = brcmf_proto_dcmd(drvr, 0, &dcmd, dcmd.len);
 	if (ret < 0) {
 		brcmf_dbg(ERROR, "%s: set promisc %d failed\n",
-			  brcmf_ifname(&drvr_priv->pub, 0),
+			  brcmf_ifname(drvr, 0),
 			  le32_to_cpu(dcmd_le_value));
 	}
 }
@@ -256,14 +224,14 @@
 	struct brcmf_dcmd dcmd;
 	int ret;
 
-	struct brcmf_info *drvr_priv = container_of(work, struct brcmf_info,
+	struct brcmf_pub *drvr = container_of(work, struct brcmf_pub,
 						    setmacaddr_work);
 
 	brcmf_dbg(TRACE, "enter\n");
-	if (!brcmf_c_mkiovar("cur_etheraddr", (char *)drvr_priv->macvalue,
+	if (!brcmf_c_mkiovar("cur_etheraddr", (char *)drvr->macvalue,
 			   ETH_ALEN, buf, 32)) {
 		brcmf_dbg(ERROR, "%s: mkiovar failed for cur_etheraddr\n",
-			  brcmf_ifname(&drvr_priv->pub, 0));
+			  brcmf_ifname(drvr, 0));
 		return;
 	}
 	memset(&dcmd, 0, sizeof(dcmd));
@@ -272,52 +240,40 @@
 	dcmd.len = 32;
 	dcmd.set = true;
 
-	ret = brcmf_proto_dcmd(&drvr_priv->pub, 0, &dcmd, dcmd.len);
+	ret = brcmf_proto_dcmd(drvr, 0, &dcmd, dcmd.len);
 	if (ret < 0)
 		brcmf_dbg(ERROR, "%s: set cur_etheraddr failed\n",
-			  brcmf_ifname(&drvr_priv->pub, 0));
+			  brcmf_ifname(drvr, 0));
 	else
-		memcpy(drvr_priv->iflist[0]->ndev->dev_addr,
-		       drvr_priv->macvalue, ETH_ALEN);
+		memcpy(drvr->iflist[0]->ndev->dev_addr,
+		       drvr->macvalue, ETH_ALEN);
 
 	return;
 }
 
 static int brcmf_netdev_set_mac_address(struct net_device *ndev, void *addr)
 {
-	struct brcmf_info *drvr_priv = *(struct brcmf_info **)
-					netdev_priv(ndev);
+	struct brcmf_if *ifp = netdev_priv(ndev);
+	struct brcmf_pub *drvr = ifp->drvr;
 	struct sockaddr *sa = (struct sockaddr *)addr;
-	int ifidx;
 
-	ifidx = brcmf_net2idx(drvr_priv, ndev);
-	if (ifidx == BRCMF_BAD_IF)
-		return -1;
-
-	memcpy(&drvr_priv->macvalue, sa->sa_data, ETH_ALEN);
-	schedule_work(&drvr_priv->setmacaddr_work);
+	memcpy(&drvr->macvalue, sa->sa_data, ETH_ALEN);
+	schedule_work(&drvr->setmacaddr_work);
 	return 0;
 }
 
 static void brcmf_netdev_set_multicast_list(struct net_device *ndev)
 {
-	struct brcmf_info *drvr_priv = *(struct brcmf_info **)
-					netdev_priv(ndev);
-	int ifidx;
+	struct brcmf_if *ifp = netdev_priv(ndev);
+	struct brcmf_pub *drvr = ifp->drvr;
 
-	ifidx = brcmf_net2idx(drvr_priv, ndev);
-	if (ifidx == BRCMF_BAD_IF)
-		return;
-
-	schedule_work(&drvr_priv->multicast_work);
+	schedule_work(&drvr->multicast_work);
 }
 
 int brcmf_sendpkt(struct brcmf_pub *drvr, int ifidx, struct sk_buff *pktbuf)
 {
-	struct brcmf_info *drvr_priv = drvr->info;
-
 	/* Reject if down */
-	if (!drvr->up || (drvr->busstate == BRCMF_BUS_DOWN))
+	if (!drvr->bus_if->drvr_up || (drvr->bus_if->state == BRCMF_BUS_DOWN))
 		return -ENODEV;
 
 	/* Update multicast statistic */
@@ -328,122 +284,118 @@
 		if (is_multicast_ether_addr(eh->h_dest))
 			drvr->tx_multicast++;
 		if (ntohs(eh->h_proto) == ETH_P_PAE)
-			atomic_inc(&drvr_priv->pend_8021x_cnt);
+			atomic_inc(&drvr->pend_8021x_cnt);
 	}
 
 	/* If the protocol uses a data header, apply it */
 	brcmf_proto_hdrpush(drvr, ifidx, pktbuf);
 
 	/* Use bus module to send data frame */
-	return brcmf_sdbrcm_bus_txdata(drvr->bus, pktbuf);
+	return drvr->bus_if->brcmf_bus_txdata(drvr->dev, pktbuf);
 }
 
 static int brcmf_netdev_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 {
 	int ret;
-	struct brcmf_info *drvr_priv = *(struct brcmf_info **)
-					netdev_priv(ndev);
-	int ifidx;
+	struct brcmf_if *ifp = netdev_priv(ndev);
+	struct brcmf_pub *drvr = ifp->drvr;
 
 	brcmf_dbg(TRACE, "Enter\n");
 
 	/* Reject if down */
-	if (!drvr_priv->pub.up || (drvr_priv->pub.busstate == BRCMF_BUS_DOWN)) {
-		brcmf_dbg(ERROR, "xmit rejected pub.up=%d busstate=%d\n",
-			  drvr_priv->pub.up, drvr_priv->pub.busstate);
+	if (!drvr->bus_if->drvr_up ||
+	    (drvr->bus_if->state == BRCMF_BUS_DOWN)) {
+		brcmf_dbg(ERROR, "xmit rejected drvup=%d state=%d\n",
+			  drvr->bus_if->drvr_up,
+			  drvr->bus_if->state);
 		netif_stop_queue(ndev);
 		return -ENODEV;
 	}
 
-	ifidx = brcmf_net2idx(drvr_priv, ndev);
-	if (ifidx == BRCMF_BAD_IF) {
-		brcmf_dbg(ERROR, "bad ifidx %d\n", ifidx);
+	if (!drvr->iflist[ifp->idx]) {
+		brcmf_dbg(ERROR, "bad ifidx %d\n", ifp->idx);
 		netif_stop_queue(ndev);
 		return -ENODEV;
 	}
 
 	/* Make sure there's enough room for any header */
-	if (skb_headroom(skb) < drvr_priv->pub.hdrlen) {
+	if (skb_headroom(skb) < drvr->hdrlen) {
 		struct sk_buff *skb2;
 
 		brcmf_dbg(INFO, "%s: insufficient headroom\n",
-			  brcmf_ifname(&drvr_priv->pub, ifidx));
-		drvr_priv->pub.tx_realloc++;
-		skb2 = skb_realloc_headroom(skb, drvr_priv->pub.hdrlen);
+			  brcmf_ifname(drvr, ifp->idx));
+		drvr->bus_if->tx_realloc++;
+		skb2 = skb_realloc_headroom(skb, drvr->hdrlen);
 		dev_kfree_skb(skb);
 		skb = skb2;
 		if (skb == NULL) {
 			brcmf_dbg(ERROR, "%s: skb_realloc_headroom failed\n",
-				  brcmf_ifname(&drvr_priv->pub, ifidx));
+				  brcmf_ifname(drvr, ifp->idx));
 			ret = -ENOMEM;
 			goto done;
 		}
 	}
 
-	ret = brcmf_sendpkt(&drvr_priv->pub, ifidx, skb);
+	ret = brcmf_sendpkt(drvr, ifp->idx, skb);
 
 done:
 	if (ret)
-		drvr_priv->pub.dstats.tx_dropped++;
+		drvr->bus_if->dstats.tx_dropped++;
 	else
-		drvr_priv->pub.tx_packets++;
+		drvr->bus_if->dstats.tx_packets++;
 
 	/* Return ok: we always eat the packet */
 	return 0;
 }
 
-void brcmf_txflowcontrol(struct brcmf_pub *drvr, int ifidx, bool state)
+void brcmf_txflowcontrol(struct device *dev, int ifidx, bool state)
 {
 	struct net_device *ndev;
-	struct brcmf_info *drvr_priv = drvr->info;
+	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+	struct brcmf_pub *drvr = bus_if->drvr;
 
 	brcmf_dbg(TRACE, "Enter\n");
 
-	drvr->txoff = state;
-	ndev = drvr_priv->iflist[ifidx]->ndev;
+	ndev = drvr->iflist[ifidx]->ndev;
 	if (state == ON)
 		netif_stop_queue(ndev);
 	else
 		netif_wake_queue(ndev);
 }
 
-static int brcmf_host_event(struct brcmf_info *drvr_priv, int *ifidx,
+static int brcmf_host_event(struct brcmf_pub *drvr, int *ifidx,
 			    void *pktdata, struct brcmf_event_msg *event,
 			    void **data)
 {
 	int bcmerror = 0;
 
-	bcmerror = brcmf_c_host_event(drvr_priv, ifidx, pktdata, event, data);
+	bcmerror = brcmf_c_host_event(drvr, ifidx, pktdata, event, data);
 	if (bcmerror != 0)
 		return bcmerror;
 
-	if (drvr_priv->iflist[*ifidx]->ndev)
-		brcmf_cfg80211_event(drvr_priv->iflist[*ifidx]->ndev,
+	if (drvr->iflist[*ifidx]->ndev)
+		brcmf_cfg80211_event(drvr->iflist[*ifidx]->ndev,
 				     event, *data);
 
 	return bcmerror;
 }
 
-void brcmf_rx_frame(struct brcmf_pub *drvr, int ifidx, struct sk_buff *skb,
-		  int numpkt)
+void brcmf_rx_frame(struct device *dev, int ifidx,
+		    struct sk_buff_head *skb_list)
 {
-	struct brcmf_info *drvr_priv = drvr->info;
 	unsigned char *eth;
 	uint len;
 	void *data;
-	struct sk_buff *pnext, *save_pktbuf;
-	int i;
+	struct sk_buff *skb, *pnext;
 	struct brcmf_if *ifp;
 	struct brcmf_event_msg event;
+	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+	struct brcmf_pub *drvr = bus_if->drvr;
 
 	brcmf_dbg(TRACE, "Enter\n");
 
-	save_pktbuf = skb;
-
-	for (i = 0; skb && i < numpkt; i++, skb = pnext) {
-
-		pnext = skb->next;
-		skb->next = NULL;
+	skb_queue_walk_safe(skb_list, skb, pnext) {
+		skb_unlink(skb, skb_list);
 
 		/* Get the protocol, maintain skb around eth_type_trans()
 		 * The main reason for this hack is for the limitation of
@@ -460,15 +412,21 @@
 		eth = skb->data;
 		len = skb->len;
 
-		ifp = drvr_priv->iflist[ifidx];
+		ifp = drvr->iflist[ifidx];
 		if (ifp == NULL)
-			ifp = drvr_priv->iflist[0];
+			ifp = drvr->iflist[0];
+
+		if (!ifp || !ifp->ndev ||
+		    ifp->ndev->reg_state != NETREG_REGISTERED) {
+			brcmu_pkt_buf_free_skb(skb);
+			continue;
+		}
 
 		skb->dev = ifp->ndev;
 		skb->protocol = eth_type_trans(skb, skb->dev);
 
 		if (skb->pkt_type == PACKET_MULTICAST)
-			drvr_priv->pub.rx_multicast++;
+			bus_if->dstats.multicast++;
 
 		skb->data = eth;
 		skb->len = len;
@@ -478,19 +436,17 @@
 
 		/* Process special event packets and then discard them */
 		if (ntohs(skb->protocol) == ETH_P_LINK_CTL)
-			brcmf_host_event(drvr_priv, &ifidx,
+			brcmf_host_event(drvr, &ifidx,
 					  skb_mac_header(skb),
 					  &event, &data);
 
-		if (drvr_priv->iflist[ifidx] &&
-		    !drvr_priv->iflist[ifidx]->state)
-			ifp = drvr_priv->iflist[ifidx];
-
-		if (ifp->ndev)
+		if (drvr->iflist[ifidx]) {
+			ifp = drvr->iflist[ifidx];
 			ifp->ndev->last_rx = jiffies;
+		}
 
-		drvr->dstats.rx_bytes += skb->len;
-		drvr->rx_packets++;	/* Local count */
+		bus_if->dstats.rx_bytes += skb->len;
+		bus_if->dstats.rx_packets++;	/* Local count */
 
 		if (in_interrupt())
 			netif_rx(skb);
@@ -505,59 +461,48 @@
 	}
 }
 
-void brcmf_txcomplete(struct brcmf_pub *drvr, struct sk_buff *txp, bool success)
+void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success)
 {
 	uint ifidx;
-	struct brcmf_info *drvr_priv = drvr->info;
 	struct ethhdr *eh;
 	u16 type;
+	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+	struct brcmf_pub *drvr = bus_if->drvr;
 
-	brcmf_proto_hdrpull(drvr, &ifidx, txp);
+	brcmf_proto_hdrpull(dev, &ifidx, txp);
 
 	eh = (struct ethhdr *)(txp->data);
 	type = ntohs(eh->h_proto);
 
 	if (type == ETH_P_PAE)
-		atomic_dec(&drvr_priv->pend_8021x_cnt);
+		atomic_dec(&drvr->pend_8021x_cnt);
 
 }
 
 static struct net_device_stats *brcmf_netdev_get_stats(struct net_device *ndev)
 {
-	struct brcmf_info *drvr_priv = *(struct brcmf_info **)
-					netdev_priv(ndev);
-	struct brcmf_if *ifp;
-	int ifidx;
+	struct brcmf_if *ifp = netdev_priv(ndev);
+	struct brcmf_bus *bus_if = ifp->drvr->bus_if;
 
 	brcmf_dbg(TRACE, "Enter\n");
 
-	ifidx = brcmf_net2idx(drvr_priv, ndev);
-	if (ifidx == BRCMF_BAD_IF)
-		return NULL;
-
-	ifp = drvr_priv->iflist[ifidx];
-
-	if (drvr_priv->pub.up)
-		/* Use the protocol to get dongle stats */
-		brcmf_proto_dstats(&drvr_priv->pub);
-
 	/* Copy dongle stats to net device stats */
-	ifp->stats.rx_packets = drvr_priv->pub.dstats.rx_packets;
-	ifp->stats.tx_packets = drvr_priv->pub.dstats.tx_packets;
-	ifp->stats.rx_bytes = drvr_priv->pub.dstats.rx_bytes;
-	ifp->stats.tx_bytes = drvr_priv->pub.dstats.tx_bytes;
-	ifp->stats.rx_errors = drvr_priv->pub.dstats.rx_errors;
-	ifp->stats.tx_errors = drvr_priv->pub.dstats.tx_errors;
-	ifp->stats.rx_dropped = drvr_priv->pub.dstats.rx_dropped;
-	ifp->stats.tx_dropped = drvr_priv->pub.dstats.tx_dropped;
-	ifp->stats.multicast = drvr_priv->pub.dstats.multicast;
+	ifp->stats.rx_packets = bus_if->dstats.rx_packets;
+	ifp->stats.tx_packets = bus_if->dstats.tx_packets;
+	ifp->stats.rx_bytes = bus_if->dstats.rx_bytes;
+	ifp->stats.tx_bytes = bus_if->dstats.tx_bytes;
+	ifp->stats.rx_errors = bus_if->dstats.rx_errors;
+	ifp->stats.tx_errors = bus_if->dstats.tx_errors;
+	ifp->stats.rx_dropped = bus_if->dstats.rx_dropped;
+	ifp->stats.tx_dropped = bus_if->dstats.tx_dropped;
+	ifp->stats.multicast = bus_if->dstats.multicast;
 
 	return &ifp->stats;
 }
 
 /* Retrieve current toe component enables, which are kept
 	 as a bitmap in toe_ol iovar */
-static int brcmf_toe_get(struct brcmf_info *drvr_priv, int ifidx, u32 *toe_ol)
+static int brcmf_toe_get(struct brcmf_pub *drvr, int ifidx, u32 *toe_ol)
 {
 	struct brcmf_dcmd dcmd;
 	__le32 toe_le;
@@ -572,17 +517,17 @@
 	dcmd.set = false;
 
 	strcpy(buf, "toe_ol");
-	ret = brcmf_proto_dcmd(&drvr_priv->pub, ifidx, &dcmd, dcmd.len);
+	ret = brcmf_proto_dcmd(drvr, ifidx, &dcmd, dcmd.len);
 	if (ret < 0) {
 		/* Check for older dongle image that doesn't support toe_ol */
 		if (ret == -EIO) {
 			brcmf_dbg(ERROR, "%s: toe not supported by device\n",
-				  brcmf_ifname(&drvr_priv->pub, ifidx));
+				  brcmf_ifname(drvr, ifidx));
 			return -EOPNOTSUPP;
 		}
 
 		brcmf_dbg(INFO, "%s: could not get toe_ol: ret=%d\n",
-			  brcmf_ifname(&drvr_priv->pub, ifidx), ret);
+			  brcmf_ifname(drvr, ifidx), ret);
 		return ret;
 	}
 
@@ -593,7 +538,7 @@
 
 /* Set current toe component enables in toe_ol iovar,
 	 and set toe global enable iovar */
-static int brcmf_toe_set(struct brcmf_info *drvr_priv, int ifidx, u32 toe_ol)
+static int brcmf_toe_set(struct brcmf_pub *drvr, int ifidx, u32 toe_ol)
 {
 	struct brcmf_dcmd dcmd;
 	char buf[32];
@@ -611,10 +556,10 @@
 	strcpy(buf, "toe_ol");
 	memcpy(&buf[sizeof("toe_ol")], &toe_le, sizeof(u32));
 
-	ret = brcmf_proto_dcmd(&drvr_priv->pub, ifidx, &dcmd, dcmd.len);
+	ret = brcmf_proto_dcmd(drvr, ifidx, &dcmd, dcmd.len);
 	if (ret < 0) {
 		brcmf_dbg(ERROR, "%s: could not set toe_ol: ret=%d\n",
-			  brcmf_ifname(&drvr_priv->pub, ifidx), ret);
+			  brcmf_ifname(drvr, ifidx), ret);
 		return ret;
 	}
 
@@ -624,10 +569,10 @@
 	strcpy(buf, "toe");
 	memcpy(&buf[sizeof("toe")], &toe_le, sizeof(u32));
 
-	ret = brcmf_proto_dcmd(&drvr_priv->pub, ifidx, &dcmd, dcmd.len);
+	ret = brcmf_proto_dcmd(drvr, ifidx, &dcmd, dcmd.len);
 	if (ret < 0) {
 		brcmf_dbg(ERROR, "%s: could not set toe: ret=%d\n",
-			  brcmf_ifname(&drvr_priv->pub, ifidx), ret);
+			  brcmf_ifname(drvr, ifidx), ret);
 		return ret;
 	}
 
@@ -637,21 +582,19 @@
 static void brcmf_ethtool_get_drvinfo(struct net_device *ndev,
 				    struct ethtool_drvinfo *info)
 {
-	struct brcmf_info *drvr_priv = *(struct brcmf_info **)
-					netdev_priv(ndev);
+	struct brcmf_if *ifp = netdev_priv(ndev);
+	struct brcmf_pub *drvr = ifp->drvr;
 
 	sprintf(info->driver, KBUILD_MODNAME);
-	sprintf(info->version, "%lu", drvr_priv->pub.drv_version);
-	sprintf(info->fw_version, "%s", BCM4329_FW_NAME);
-	sprintf(info->bus_info, "%s",
-		dev_name(brcmf_bus_get_device(drvr_priv->pub.bus)));
+	sprintf(info->version, "%lu", drvr->drv_version);
+	sprintf(info->bus_info, "%s", dev_name(drvr->dev));
 }
 
 static struct ethtool_ops brcmf_ethtool_ops = {
 	.get_drvinfo = brcmf_ethtool_get_drvinfo
 };
 
-static int brcmf_ethtool(struct brcmf_info *drvr_priv, void __user *uaddr)
+static int brcmf_ethtool(struct brcmf_pub *drvr, void __user *uaddr)
 {
 	struct ethtool_drvinfo info;
 	char drvname[sizeof(info.driver)];
@@ -685,18 +628,18 @@
 		}
 
 		/* otherwise, require dongle to be up */
-		else if (!drvr_priv->pub.up) {
+		else if (!drvr->bus_if->drvr_up) {
 			brcmf_dbg(ERROR, "dongle is not up\n");
 			return -ENODEV;
 		}
 
 		/* finally, report dongle driver type */
-		else if (drvr_priv->pub.iswl)
+		else if (drvr->iswl)
 			sprintf(info.driver, "wl");
 		else
 			sprintf(info.driver, "xx");
 
-		sprintf(info.version, "%lu", drvr_priv->pub.drv_version);
+		sprintf(info.version, "%lu", drvr->drv_version);
 		if (copy_to_user(uaddr, &info, sizeof(info)))
 			return -EFAULT;
 		brcmf_dbg(CTL, "given %*s, returning %s\n",
@@ -706,7 +649,7 @@
 		/* Get toe offload components from dongle */
 	case ETHTOOL_GRXCSUM:
 	case ETHTOOL_GTXCSUM:
-		ret = brcmf_toe_get(drvr_priv, 0, &toe_cmpnt);
+		ret = brcmf_toe_get(drvr, 0, &toe_cmpnt);
 		if (ret < 0)
 			return ret;
 
@@ -727,7 +670,7 @@
 			return -EFAULT;
 
 		/* Read the current settings, update and write back */
-		ret = brcmf_toe_get(drvr_priv, 0, &toe_cmpnt);
+		ret = brcmf_toe_get(drvr, 0, &toe_cmpnt);
 		if (ret < 0)
 			return ret;
 
@@ -739,17 +682,17 @@
 		else
 			toe_cmpnt &= ~csum_dir;
 
-		ret = brcmf_toe_set(drvr_priv, 0, toe_cmpnt);
+		ret = brcmf_toe_set(drvr, 0, toe_cmpnt);
 		if (ret < 0)
 			return ret;
 
 		/* If setting TX checksum mode, tell Linux the new mode */
 		if (cmd == ETHTOOL_STXCSUM) {
 			if (edata.data)
-				drvr_priv->iflist[0]->ndev->features |=
+				drvr->iflist[0]->ndev->features |=
 				    NETIF_F_IP_CSUM;
 			else
-				drvr_priv->iflist[0]->ndev->features &=
+				drvr->iflist[0]->ndev->features &=
 				    ~NETIF_F_IP_CSUM;
 		}
 
@@ -765,18 +708,16 @@
 static int brcmf_netdev_ioctl_entry(struct net_device *ndev, struct ifreq *ifr,
 				    int cmd)
 {
-	struct brcmf_info *drvr_priv = *(struct brcmf_info **)
-					netdev_priv(ndev);
-	int ifidx;
+	struct brcmf_if *ifp = netdev_priv(ndev);
+	struct brcmf_pub *drvr = ifp->drvr;
 
-	ifidx = brcmf_net2idx(drvr_priv, ndev);
-	brcmf_dbg(TRACE, "ifidx %d, cmd 0x%04x\n", ifidx, cmd);
+	brcmf_dbg(TRACE, "ifidx %d, cmd 0x%04x\n", ifp->idx, cmd);
 
-	if (ifidx == BRCMF_BAD_IF)
+	if (!drvr->iflist[ifp->idx])
 		return -1;
 
 	if (cmd == SIOCETHTOOL)
-		return brcmf_ethtool(drvr_priv, ifr->ifr_data);
+		return brcmf_ethtool(drvr, ifr->ifr_data);
 
 	return -EOPNOTSUPP;
 }
@@ -788,28 +729,25 @@
 	s32 err = 0;
 	int buflen = 0;
 	bool is_set_key_cmd;
-	struct brcmf_info *drvr_priv = *(struct brcmf_info **)
-					netdev_priv(ndev);
-	int ifidx;
+	struct brcmf_if *ifp = netdev_priv(ndev);
+	struct brcmf_pub *drvr = ifp->drvr;
 
 	memset(&dcmd, 0, sizeof(dcmd));
 	dcmd.cmd = cmd;
 	dcmd.buf = arg;
 	dcmd.len = len;
 
-	ifidx = brcmf_net2idx(drvr_priv, ndev);
-
 	if (dcmd.buf != NULL)
 		buflen = min_t(uint, dcmd.len, BRCMF_DCMD_MAXLEN);
 
 	/* send to dongle (must be up, and wl) */
-	if ((drvr_priv->pub.busstate != BRCMF_BUS_DATA)) {
+	if ((drvr->bus_if->state != BRCMF_BUS_DATA)) {
 		brcmf_dbg(ERROR, "DONGLE_DOWN\n");
 		err = -EIO;
 		goto done;
 	}
 
-	if (!drvr_priv->pub.iswl) {
+	if (!drvr->iswl) {
 		err = -EIO;
 		goto done;
 	}
@@ -826,7 +764,7 @@
 	if (is_set_key_cmd)
 		brcmf_netdev_wait_pend8021x(ndev);
 
-	err = brcmf_proto_dcmd(&drvr_priv->pub, ifidx, &dcmd, buflen);
+	err = brcmf_proto_dcmd(drvr, ifp->idx, &dcmd, buflen);
 
 done:
 	if (err > 0)
@@ -837,15 +775,16 @@
 
 static int brcmf_netdev_stop(struct net_device *ndev)
 {
-	struct brcmf_pub *drvr = *(struct brcmf_pub **) netdev_priv(ndev);
+	struct brcmf_if *ifp = netdev_priv(ndev);
+	struct brcmf_pub *drvr = ifp->drvr;
 
 	brcmf_dbg(TRACE, "Enter\n");
 	brcmf_cfg80211_down(drvr->config);
-	if (drvr->up == 0)
+	if (drvr->bus_if->drvr_up == 0)
 		return 0;
 
 	/* Set state and stop OS transmissions */
-	drvr->up = 0;
+	drvr->bus_if->drvr_up = false;
 	netif_stop_queue(ndev);
 
 	return 0;
@@ -853,39 +792,37 @@
 
 static int brcmf_netdev_open(struct net_device *ndev)
 {
-	struct brcmf_info *drvr_priv = *(struct brcmf_info **)
-					netdev_priv(ndev);
+	struct brcmf_if *ifp = netdev_priv(ndev);
+	struct brcmf_pub *drvr = ifp->drvr;
 	u32 toe_ol;
-	int ifidx = brcmf_net2idx(drvr_priv, ndev);
 	s32 ret = 0;
 
-	brcmf_dbg(TRACE, "ifidx %d\n", ifidx);
+	brcmf_dbg(TRACE, "ifidx %d\n", ifp->idx);
 
-	if (ifidx == 0) {	/* do it only for primary eth0 */
-
+	if (ifp->idx == 0) {	/* do it only for primary eth0 */
 		/* try to bring up bus */
-		ret = brcmf_bus_start(&drvr_priv->pub);
+		ret = brcmf_bus_start(drvr->dev);
 		if (ret != 0) {
 			brcmf_dbg(ERROR, "failed with code %d\n", ret);
 			return -1;
 		}
-		atomic_set(&drvr_priv->pend_8021x_cnt, 0);
+		atomic_set(&drvr->pend_8021x_cnt, 0);
 
-		memcpy(ndev->dev_addr, drvr_priv->pub.mac, ETH_ALEN);
+		memcpy(ndev->dev_addr, drvr->mac, ETH_ALEN);
 
 		/* Get current TOE mode from dongle */
-		if (brcmf_toe_get(drvr_priv, ifidx, &toe_ol) >= 0
+		if (brcmf_toe_get(drvr, ifp->idx, &toe_ol) >= 0
 		    && (toe_ol & TOE_TX_CSUM_OL) != 0)
-			drvr_priv->iflist[ifidx]->ndev->features |=
+			drvr->iflist[ifp->idx]->ndev->features |=
 				NETIF_F_IP_CSUM;
 		else
-			drvr_priv->iflist[ifidx]->ndev->features &=
+			drvr->iflist[ifp->idx]->ndev->features &=
 				~NETIF_F_IP_CSUM;
 	}
 	/* Allow transmit calls */
 	netif_start_queue(ndev);
-	drvr_priv->pub.up = 1;
-	if (brcmf_cfg80211_up(drvr_priv->pub.config)) {
+	drvr->bus_if->drvr_up = true;
+	if (brcmf_cfg80211_up(drvr->config)) {
 		brcmf_dbg(ERROR, "failed to bring up cfg80211\n");
 		return -1;
 	}
@@ -893,193 +830,155 @@
 	return ret;
 }
 
+static const struct net_device_ops brcmf_netdev_ops_pri = {
+	.ndo_open = brcmf_netdev_open,
+	.ndo_stop = brcmf_netdev_stop,
+	.ndo_get_stats = brcmf_netdev_get_stats,
+	.ndo_do_ioctl = brcmf_netdev_ioctl_entry,
+	.ndo_start_xmit = brcmf_netdev_start_xmit,
+	.ndo_set_mac_address = brcmf_netdev_set_mac_address,
+	.ndo_set_rx_mode = brcmf_netdev_set_multicast_list
+};
+
 int
-brcmf_add_if(struct brcmf_info *drvr_priv, int ifidx, struct net_device *ndev,
-	     char *name, u8 *mac_addr, u32 flags, u8 bssidx)
+brcmf_add_if(struct device *dev, int ifidx, char *name, u8 *mac_addr)
 {
 	struct brcmf_if *ifp;
-	int ret = 0, err = 0;
+	struct net_device *ndev;
+	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+	struct brcmf_pub *drvr = bus_if->drvr;
 
-	brcmf_dbg(TRACE, "idx %d, handle->%p\n", ifidx, ndev);
+	brcmf_dbg(TRACE, "idx %d\n", ifidx);
 
-	ifp = drvr_priv->iflist[ifidx];
-	if (!ifp) {
-		ifp = kmalloc(sizeof(struct brcmf_if), GFP_ATOMIC);
-		if (!ifp)
-			return -ENOMEM;
+	ifp = drvr->iflist[ifidx];
+	/*
+	 * Delete the existing interface before overwriting it
+	 * in case we missed the BRCMF_E_IF_DEL event.
+	 */
+	if (ifp) {
+		brcmf_dbg(ERROR, "ERROR: netdev:%s already exists, try free & unregister\n",
+			  ifp->ndev->name);
+		netif_stop_queue(ifp->ndev);
+		unregister_netdev(ifp->ndev);
+		free_netdev(ifp->ndev);
+		drvr->iflist[ifidx] = NULL;
 	}
 
-	memset(ifp, 0, sizeof(struct brcmf_if));
-	ifp->info = drvr_priv;
-	drvr_priv->iflist[ifidx] = ifp;
+	/* Allocate netdev, including space for private structure */
+	ndev = alloc_netdev(sizeof(struct brcmf_if), name, ether_setup);
+	if (!ndev) {
+		brcmf_dbg(ERROR, "OOM - alloc_netdev\n");
+		return -ENOMEM;
+	}
+
+	ifp = netdev_priv(ndev);
+	ifp->ndev = ndev;
+	ifp->drvr = drvr;
+	drvr->iflist[ifidx] = ifp;
+	ifp->idx = ifidx;
 	if (mac_addr != NULL)
 		memcpy(&ifp->mac_addr, mac_addr, ETH_ALEN);
 
-	if (ndev == NULL) {
-		ifp->state = BRCMF_E_IF_ADD;
-		ifp->idx = ifidx;
-		/*
-		 * Delete the existing interface before overwriting it
-		 * in case we missed the BRCMF_E_IF_DEL event.
-		 */
-		if (ifp->ndev != NULL) {
-			brcmf_dbg(ERROR, "ERROR: netdev:%s already exists, try free & unregister\n",
-				  ifp->ndev->name);
-			netif_stop_queue(ifp->ndev);
-			unregister_netdev(ifp->ndev);
-			free_netdev(ifp->ndev);
-		}
+	if (brcmf_net_attach(drvr, ifp->idx)) {
+		brcmf_dbg(ERROR, "brcmf_net_attach failed");
+		free_netdev(ifp->ndev);
+		drvr->iflist[ifidx] = NULL;
+		return -EOPNOTSUPP;
+	}
 
-		/* Allocate netdev, including space for private structure */
-		ifp->ndev = alloc_netdev(sizeof(drvr_priv), "wlan%d",
-					 ether_setup);
-		if (!ifp->ndev) {
-			brcmf_dbg(ERROR, "OOM - alloc_netdev\n");
-			ret = -ENOMEM;
-		}
-
-		if (ret == 0) {
-			memcpy(netdev_priv(ifp->ndev), &drvr_priv,
-			       sizeof(drvr_priv));
-			err = brcmf_net_attach(&drvr_priv->pub, ifp->idx);
-			if (err != 0) {
-				brcmf_dbg(ERROR, "brcmf_net_attach failed, err %d\n",
-					  err);
-				ret = -EOPNOTSUPP;
-			} else {
-				brcmf_dbg(TRACE, " ==== pid:%x, net_device for if:%s created ===\n",
-					  current->pid, ifp->ndev->name);
-				ifp->state = 0;
-			}
-		}
-
-		if (ret < 0) {
-			if (ifp->ndev)
-				free_netdev(ifp->ndev);
-
-			drvr_priv->iflist[ifp->idx] = NULL;
-			kfree(ifp);
-		}
-	} else
-		ifp->ndev = ndev;
+	brcmf_dbg(TRACE, " ==== pid:%x, net_device for if:%s created ===\n",
+		  current->pid, ifp->ndev->name);
 
 	return 0;
 }
 
-void brcmf_del_if(struct brcmf_info *drvr_priv, int ifidx)
+void brcmf_del_if(struct brcmf_pub *drvr, int ifidx)
 {
 	struct brcmf_if *ifp;
 
 	brcmf_dbg(TRACE, "idx %d\n", ifidx);
 
-	ifp = drvr_priv->iflist[ifidx];
+	ifp = drvr->iflist[ifidx];
 	if (!ifp) {
 		brcmf_dbg(ERROR, "Null interface\n");
 		return;
 	}
+	if (ifp->ndev) {
+		if (ifidx == 0) {
+			if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) {
+				rtnl_lock();
+				brcmf_netdev_stop(ifp->ndev);
+				rtnl_unlock();
+			}
+		} else {
+			netif_stop_queue(ifp->ndev);
+		}
 
-	ifp->state = BRCMF_E_IF_DEL;
-	ifp->idx = ifidx;
-	if (ifp->ndev != NULL) {
-		netif_stop_queue(ifp->ndev);
 		unregister_netdev(ifp->ndev);
+		drvr->iflist[ifidx] = NULL;
+		if (ifidx == 0)
+			brcmf_cfg80211_detach(drvr->config);
 		free_netdev(ifp->ndev);
-		drvr_priv->iflist[ifidx] = NULL;
-		kfree(ifp);
 	}
 }
 
-struct brcmf_pub *brcmf_attach(struct brcmf_bus *bus, uint bus_hdrlen)
+int brcmf_attach(uint bus_hdrlen, struct device *dev)
 {
-	struct brcmf_info *drvr_priv = NULL;
-	struct net_device *ndev;
+	struct brcmf_pub *drvr = NULL;
+	int ret = 0;
 
 	brcmf_dbg(TRACE, "Enter\n");
 
-	/* Allocate netdev, including space for private structure */
-	ndev = alloc_netdev(sizeof(drvr_priv), "wlan%d", ether_setup);
-	if (!ndev) {
-		brcmf_dbg(ERROR, "OOM - alloc_netdev\n");
-		goto fail;
-	}
-
 	/* Allocate primary brcmf_info */
-	drvr_priv = kzalloc(sizeof(struct brcmf_info), GFP_ATOMIC);
-	if (!drvr_priv)
-		goto fail;
+	drvr = kzalloc(sizeof(struct brcmf_pub), GFP_ATOMIC);
+	if (!drvr)
+		return -ENOMEM;
 
-	/*
-	 * Save the brcmf_info into the priv
-	 */
-	memcpy(netdev_priv(ndev), &drvr_priv, sizeof(drvr_priv));
-
-	if (brcmf_add_if(drvr_priv, 0, ndev, ndev->name, NULL, 0, 0) ==
-	    BRCMF_BAD_IF)
-		goto fail;
-
-	ndev->netdev_ops = NULL;
-	mutex_init(&drvr_priv->proto_block);
-
-	/* Link to info module */
-	drvr_priv->pub.info = drvr_priv;
+	mutex_init(&drvr->proto_block);
 
 	/* Link to bus module */
-	drvr_priv->pub.bus = bus;
-	drvr_priv->pub.hdrlen = bus_hdrlen;
+	drvr->hdrlen = bus_hdrlen;
+	drvr->bus_if = dev_get_drvdata(dev);
+	drvr->bus_if->drvr = drvr;
+	drvr->dev = dev;
 
 	/* Attach and link in the protocol */
-	if (brcmf_proto_attach(&drvr_priv->pub) != 0) {
+	ret = brcmf_proto_attach(drvr);
+	if (ret != 0) {
 		brcmf_dbg(ERROR, "brcmf_prot_attach failed\n");
 		goto fail;
 	}
 
-	/* Attach and link in the cfg80211 */
-	drvr_priv->pub.config =
-			brcmf_cfg80211_attach(ndev,
-					      brcmf_bus_get_device(bus),
-					      &drvr_priv->pub);
-	if (drvr_priv->pub.config == NULL) {
-		brcmf_dbg(ERROR, "wl_cfg80211_attach failed\n");
-		goto fail;
-	}
+	INIT_WORK(&drvr->setmacaddr_work, _brcmf_set_mac_address);
+	INIT_WORK(&drvr->multicast_work, _brcmf_set_multicast_list);
 
-	INIT_WORK(&drvr_priv->setmacaddr_work, _brcmf_set_mac_address);
-	INIT_WORK(&drvr_priv->multicast_work, _brcmf_set_multicast_list);
-
-	/*
-	 * Save the brcmf_info into the priv
-	 */
-	memcpy(netdev_priv(ndev), &drvr_priv, sizeof(drvr_priv));
-
-	return &drvr_priv->pub;
+	return ret;
 
 fail:
-	if (ndev)
-		free_netdev(ndev);
-	if (drvr_priv)
-		brcmf_detach(&drvr_priv->pub);
+	brcmf_detach(dev);
 
-	return NULL;
+	return ret;
 }
 
-int brcmf_bus_start(struct brcmf_pub *drvr)
+int brcmf_bus_start(struct device *dev)
 {
 	int ret = -1;
-	struct brcmf_info *drvr_priv = drvr->info;
 	/* Room for "event_msgs" + '\0' + bitvec */
 	char iovbuf[BRCMF_EVENTING_MASK_LEN + 12];
+	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+	struct brcmf_pub *drvr = bus_if->drvr;
 
 	brcmf_dbg(TRACE, "\n");
 
 	/* Bring up the bus */
-	ret = brcmf_sdbrcm_bus_init(&drvr_priv->pub);
+	ret = bus_if->brcmf_bus_init(dev);
 	if (ret != 0) {
 		brcmf_dbg(ERROR, "brcmf_sdbrcm_bus_init failed %d\n", ret);
 		return ret;
 	}
 
 	/* If bus is not ready, can't come up */
-	if (drvr_priv->pub.busstate != BRCMF_BUS_DATA) {
+	if (bus_if->state != BRCMF_BUS_DATA) {
 		brcmf_dbg(ERROR, "failed bus is not ready\n");
 		return -ENODEV;
 	}
@@ -1116,33 +1015,22 @@
 	drvr->pktfilter[0] = "100 0 0 0 0x01 0x00";
 
 	/* Bus is ready, do any protocol initialization */
-	ret = brcmf_proto_init(&drvr_priv->pub);
+	ret = brcmf_proto_init(drvr);
 	if (ret < 0)
 		return ret;
 
 	return 0;
 }
 
-static struct net_device_ops brcmf_netdev_ops_pri = {
-	.ndo_open = brcmf_netdev_open,
-	.ndo_stop = brcmf_netdev_stop,
-	.ndo_get_stats = brcmf_netdev_get_stats,
-	.ndo_do_ioctl = brcmf_netdev_ioctl_entry,
-	.ndo_start_xmit = brcmf_netdev_start_xmit,
-	.ndo_set_mac_address = brcmf_netdev_set_mac_address,
-	.ndo_set_rx_mode = brcmf_netdev_set_multicast_list
-};
-
 int brcmf_net_attach(struct brcmf_pub *drvr, int ifidx)
 {
-	struct brcmf_info *drvr_priv = drvr->info;
 	struct net_device *ndev;
 	u8 temp_addr[ETH_ALEN] = {
 		0x00, 0x90, 0x4c, 0x11, 0x22, 0x33};
 
 	brcmf_dbg(TRACE, "ifidx %d\n", ifidx);
 
-	ndev = drvr_priv->iflist[ifidx]->ndev;
+	ndev = drvr->iflist[ifidx]->ndev;
 	ndev->netdev_ops = &brcmf_netdev_ops_pri;
 
 	/*
@@ -1150,7 +1038,7 @@
 	 */
 	if (ifidx != 0) {
 		/* for virtual interfaces use the primary MAC  */
-		memcpy(temp_addr, drvr_priv->pub.mac, ETH_ALEN);
+		memcpy(temp_addr, drvr->mac, ETH_ALEN);
 
 	}
 
@@ -1161,14 +1049,23 @@
 			 - Locally Administered address  */
 
 	}
-	ndev->hard_header_len = ETH_HLEN + drvr_priv->pub.hdrlen;
+	ndev->hard_header_len = ETH_HLEN + drvr->hdrlen;
 	ndev->ethtool_ops = &brcmf_ethtool_ops;
 
-	drvr_priv->pub.rxsz = ndev->mtu + ndev->hard_header_len +
-			      drvr_priv->pub.hdrlen;
+	drvr->rxsz = ndev->mtu + ndev->hard_header_len +
+			      drvr->hdrlen;
 
 	memcpy(ndev->dev_addr, temp_addr, ETH_ALEN);
 
+	/* attach to cfg80211 for primary interface */
+	if (!ifidx) {
+		drvr->config = brcmf_cfg80211_attach(ndev, drvr->dev, drvr);
+		if (drvr->config == NULL) {
+			brcmf_dbg(ERROR, "wl_cfg80211_attach failed\n");
+			goto fail;
+		}
+	}
+
 	if (register_netdev(ndev) != 0) {
 		brcmf_dbg(ERROR, "couldn't register the net device\n");
 		goto fail;
@@ -1185,127 +1082,57 @@
 
 static void brcmf_bus_detach(struct brcmf_pub *drvr)
 {
-	struct brcmf_info *drvr_priv;
-
 	brcmf_dbg(TRACE, "Enter\n");
 
 	if (drvr) {
-		drvr_priv = drvr->info;
-		if (drvr_priv) {
-			/* Stop the protocol module */
-			brcmf_proto_stop(&drvr_priv->pub);
+		/* Stop the protocol module */
+		brcmf_proto_stop(drvr);
 
-			/* Stop the bus module */
-			brcmf_sdbrcm_bus_stop(drvr_priv->pub.bus);
-		}
+		/* Stop the bus module */
+		drvr->bus_if->brcmf_bus_stop(drvr->dev);
 	}
 }
 
-void brcmf_detach(struct brcmf_pub *drvr)
+void brcmf_detach(struct device *dev)
 {
-	struct brcmf_info *drvr_priv;
+	int i;
+	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+	struct brcmf_pub *drvr = bus_if->drvr;
 
 	brcmf_dbg(TRACE, "Enter\n");
 
-	if (drvr) {
-		drvr_priv = drvr->info;
-		if (drvr_priv) {
-			struct brcmf_if *ifp;
-			int i;
 
-			for (i = 1; i < BRCMF_MAX_IFS; i++)
-				if (drvr_priv->iflist[i])
-					brcmf_del_if(drvr_priv, i);
+	/* make sure primary interface removed last */
+	for (i = BRCMF_MAX_IFS-1; i > -1; i--)
+		if (drvr->iflist[i])
+			brcmf_del_if(drvr, i);
 
-			ifp = drvr_priv->iflist[0];
-			if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) {
-				rtnl_lock();
-				brcmf_netdev_stop(ifp->ndev);
-				rtnl_unlock();
-				unregister_netdev(ifp->ndev);
-			}
+	cancel_work_sync(&drvr->setmacaddr_work);
+	cancel_work_sync(&drvr->multicast_work);
 
-			cancel_work_sync(&drvr_priv->setmacaddr_work);
-			cancel_work_sync(&drvr_priv->multicast_work);
+	brcmf_bus_detach(drvr);
 
-			brcmf_bus_detach(drvr);
+	if (drvr->prot)
+		brcmf_proto_detach(drvr);
 
-			if (drvr->prot)
-				brcmf_proto_detach(drvr);
-
-			brcmf_cfg80211_detach(drvr->config);
-
-			free_netdev(ifp->ndev);
-			kfree(ifp);
-			kfree(drvr_priv);
-		}
-	}
+	bus_if->drvr = NULL;
+	kfree(drvr);
 }
 
-static void __exit brcmf_module_cleanup(void)
+static int brcmf_get_pend_8021x_cnt(struct brcmf_pub *drvr)
 {
-	brcmf_dbg(TRACE, "Enter\n");
-
-	brcmf_bus_unregister();
-}
-
-static int __init brcmf_module_init(void)
-{
-	int error;
-
-	brcmf_dbg(TRACE, "Enter\n");
-
-	error = brcmf_bus_register();
-
-	if (error) {
-		brcmf_dbg(ERROR, "brcmf_bus_register failed\n");
-		goto failed;
-	}
-	return 0;
-
-failed:
-	return -EINVAL;
-}
-
-module_init(brcmf_module_init);
-module_exit(brcmf_module_cleanup);
-
-int brcmf_os_proto_block(struct brcmf_pub *drvr)
-{
-	struct brcmf_info *drvr_priv = drvr->info;
-
-	if (drvr_priv) {
-		mutex_lock(&drvr_priv->proto_block);
-		return 1;
-	}
-	return 0;
-}
-
-int brcmf_os_proto_unblock(struct brcmf_pub *drvr)
-{
-	struct brcmf_info *drvr_priv = drvr->info;
-
-	if (drvr_priv) {
-		mutex_unlock(&drvr_priv->proto_block);
-		return 1;
-	}
-
-	return 0;
-}
-
-static int brcmf_get_pend_8021x_cnt(struct brcmf_info *drvr_priv)
-{
-	return atomic_read(&drvr_priv->pend_8021x_cnt);
+	return atomic_read(&drvr->pend_8021x_cnt);
 }
 
 #define MAX_WAIT_FOR_8021X_TX	10
 
 int brcmf_netdev_wait_pend8021x(struct net_device *ndev)
 {
-	struct brcmf_info *drvr_priv = *(struct brcmf_info **)netdev_priv(ndev);
+	struct brcmf_if *ifp = netdev_priv(ndev);
+	struct brcmf_pub *drvr = ifp->drvr;
 	int timeout = 10 * HZ / 1000;
 	int ntimes = MAX_WAIT_FOR_8021X_TX;
-	int pend = brcmf_get_pend_8021x_cnt(drvr_priv);
+	int pend = brcmf_get_pend_8021x_cnt(drvr);
 
 	while (ntimes && pend) {
 		if (pend) {
@@ -1314,7 +1141,7 @@
 			set_current_state(TASK_RUNNING);
 			ntimes--;
 		}
-		pend = brcmf_get_pend_8021x_cnt(drvr_priv);
+		pend = brcmf_get_pend_8021x_cnt(drvr);
 	}
 	return pend;
 }
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h
index 4ee1ea8..6bc4425 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h
@@ -41,17 +41,10 @@
 extern void brcmf_proto_hdrpush(struct brcmf_pub *, int ifidx,
 				struct sk_buff *txp);
 
-/* Remove any protocol-specific data header. */
-extern int brcmf_proto_hdrpull(struct brcmf_pub *, int *ifidx,
-			       struct sk_buff *rxp);
-
 /* Use protocol to issue command to dongle */
 extern int brcmf_proto_dcmd(struct brcmf_pub *drvr, int ifidx,
 				struct brcmf_dcmd *dcmd, int len);
 
-/* Update local copy of dongle statistics */
-extern void brcmf_proto_dstats(struct brcmf_pub *drvr);
-
 extern int brcmf_c_preinit_dcmds(struct brcmf_pub *drvr);
 
 extern int brcmf_proto_cdc_set_dcmd(struct brcmf_pub *drvr, int ifidx,
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index 313b8bf..5a002a2 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -28,6 +28,7 @@
 #include <linux/semaphore.h>
 #include <linux/firmware.h>
 #include <linux/module.h>
+#include <linux/bcma/bcma.h>
 #include <asm/unaligned.h>
 #include <defs.h>
 #include <brcmu_wifi.h>
@@ -35,6 +36,7 @@
 #include <brcm_hw_ids.h>
 #include <soc.h>
 #include "sdio_host.h"
+#include "sdio_chip.h"
 
 #define DCMD_RESP_TIMEOUT  2000	/* In milli second */
 
@@ -85,11 +87,8 @@
 #endif				/* BCMDBG */
 #include <chipcommon.h>
 
-#include "dhd.h"
 #include "dhd_bus.h"
-#include "dhd_proto.h"
 #include "dhd_dbg.h"
-#include <bcmchip.h>
 
 #define TXQLEN		2048	/* bulk tx queue length */
 #define TXHI		(TXQLEN - 256)	/* turn on flow control above TXHI */
@@ -134,33 +133,6 @@
 /*   Force no backplane reset */
 #define SBSDIO_DEVCTL_RST_NOBPRESET	0x20
 
-/* SBSDIO_FUNC1_CHIPCLKCSR */
-
-/* Force ALP request to backplane */
-#define SBSDIO_FORCE_ALP		0x01
-/* Force HT request to backplane */
-#define SBSDIO_FORCE_HT			0x02
-/* Force ILP request to backplane */
-#define SBSDIO_FORCE_ILP		0x04
-/* Make ALP ready (power up xtal) */
-#define SBSDIO_ALP_AVAIL_REQ		0x08
-/* Make HT ready (power up PLL) */
-#define SBSDIO_HT_AVAIL_REQ		0x10
-/* Squelch clock requests from HW */
-#define SBSDIO_FORCE_HW_CLKREQ_OFF	0x20
-/* Status: ALP is ready */
-#define SBSDIO_ALP_AVAIL		0x40
-/* Status: HT is ready */
-#define SBSDIO_HT_AVAIL			0x80
-
-#define SBSDIO_AVBITS		(SBSDIO_HT_AVAIL | SBSDIO_ALP_AVAIL)
-#define SBSDIO_ALPAV(regval)	((regval) & SBSDIO_AVBITS)
-#define SBSDIO_HTAV(regval)	(((regval) & SBSDIO_AVBITS) == SBSDIO_AVBITS)
-#define SBSDIO_ALPONLY(regval)	(SBSDIO_ALPAV(regval) && !SBSDIO_HTAV(regval))
-
-#define SBSDIO_CLKAV(regval, alponly) \
-	(SBSDIO_ALPAV(regval) && (alponly ? 1 : SBSDIO_HTAV(regval)))
-
 /* direct(mapped) cis space */
 
 /* MAPPED common CIS address */
@@ -335,49 +307,16 @@
 /* Flags for SDH calls */
 #define F2SYNC	(SDIO_REQ_4BYTE | SDIO_REQ_FIXED)
 
-/* sbimstate */
-#define	SBIM_IBE		0x20000	/* inbanderror */
-#define	SBIM_TO			0x40000	/* timeout */
-#define	SBIM_BY			0x01800000	/* busy (sonics >= 2.3) */
-#define	SBIM_RJ			0x02000000	/* reject (sonics >= 2.3) */
+#define BRCMFMAC_FW_NAME	"brcm/brcmfmac.bin"
+#define BRCMFMAC_NV_NAME	"brcm/brcmfmac.txt"
+MODULE_FIRMWARE(BRCMFMAC_FW_NAME);
+MODULE_FIRMWARE(BRCMFMAC_NV_NAME);
 
-/* sbtmstatelow */
-
-/* reset */
-#define	SBTML_RESET		0x0001
-/* reject field */
-#define	SBTML_REJ_MASK		0x0006
-/* reject */
-#define	SBTML_REJ		0x0002
-/* temporary reject, for error recovery */
-#define	SBTML_TMPREJ		0x0004
-
-/* Shift to locate the SI control flags in sbtml */
-#define	SBTML_SICF_SHIFT	16
-
-/* sbtmstatehigh */
-#define	SBTMH_SERR		0x0001	/* serror */
-#define	SBTMH_INT		0x0002	/* interrupt */
-#define	SBTMH_BUSY		0x0004	/* busy */
-#define	SBTMH_TO		0x0020	/* timeout (sonics >= 2.3) */
-
-/* Shift to locate the SI status flags in sbtmh */
-#define	SBTMH_SISF_SHIFT	16
-
-/* sbidlow */
-#define	SBIDL_INIT		0x80	/* initiator */
-
-/* sbidhigh */
-#define	SBIDH_RC_MASK		0x000f	/* revision code */
-#define	SBIDH_RCE_MASK		0x7000	/* revision code extension field */
-#define	SBIDH_RCE_SHIFT		8
-#define	SBCOREREV(sbidh) \
-	((((sbidh) & SBIDH_RCE_MASK) >> SBIDH_RCE_SHIFT) | \
-	  ((sbidh) & SBIDH_RC_MASK))
-#define	SBIDH_CC_MASK		0x8ff0	/* core code */
-#define	SBIDH_CC_SHIFT		4
-#define	SBIDH_VC_MASK		0xffff0000	/* vendor code */
-#define	SBIDH_VC_SHIFT		16
+#define BRCMF_IDLE_IMMEDIATE	(-1)	/* Enter idle immediately */
+#define BRCMF_IDLE_ACTIVE	0	/* Do not request any SD clock change
+					 * when idle
+					 */
+#define BRCMF_IDLE_INTERVAL	1
 
 /*
  * Conversion of 802.1D priority to precedence level
@@ -388,17 +327,6 @@
 	       (prio^2) : prio;
 }
 
-/*
- * Core reg address translation.
- * Both macro's returns a 32 bits byte address on the backplane bus.
- */
-#define CORE_CC_REG(base, field) \
-		(base + offsetof(struct chipcregs, field))
-#define CORE_BUS_REG(base, field) \
-		(base + offsetof(struct sdpcmd_regs, field))
-#define CORE_SB(base, field) \
-		(base + SBCONFIGOFF + offsetof(struct sbconfig, field))
-
 /* core registers */
 struct sdpcmd_regs {
 	u32 corecontrol;		/* 0x00, rev8 */
@@ -524,25 +452,8 @@
 
 
 /* misc chip info needed by some of the routines */
-struct chip_info {
-	u32 chip;
-	u32 chiprev;
-	u32 cccorebase;
-	u32 ccrev;
-	u32 cccaps;
-	u32 buscorebase; /* 32 bits backplane bus address */
-	u32 buscorerev;
-	u32 buscoretype;
-	u32 ramcorebase;
-	u32 armcorebase;
-	u32 pmurev;
-	u32 ramsize;
-};
-
 /* Private data for SDIO bus interaction */
-struct brcmf_bus {
-	struct brcmf_pub *drvr;
-
+struct brcmf_sdio {
 	struct brcmf_sdio_dev *sdiodev;	/* sdio device handler */
 	struct chip_info *ci;	/* Chip info struct */
 	char *vars;		/* Variables (from CIS and/or other) */
@@ -574,7 +485,7 @@
 	uint txminmax;
 
 	struct sk_buff *glomd;	/* Packet containing glomming descriptor */
-	struct sk_buff *glom;	/* Packet chain for glommed superframe */
+	struct sk_buff_head glom; /* Packet list for glommed superframe */
 	uint glomerr;		/* Glom packet read errors */
 
 	u8 *rxbuf;		/* Buffer for receiving control packets */
@@ -637,6 +548,13 @@
 	uint f2rxdata;		/* Number of frame data reads */
 	uint f2txdata;		/* Number of f2 frame writes */
 	uint f1regdata;		/* Number of f1 register accesses */
+	uint tickcnt;		/* Number of watchdog been schedule */
+	unsigned long tx_ctlerrs;	/* Err of sending ctrl frames */
+	unsigned long tx_ctlpkts;	/* Ctrl frames sent to dongle */
+	unsigned long rx_ctlerrs;	/* Err of processing rx ctrl frames */
+	unsigned long rx_ctlpkts;	/* Ctrl frames processed from dongle */
+	unsigned long rx_readahead_cnt;	/* Number of packets where header
+					 * read-ahead was used. */
 
 	u8 *ctrl_frame_buf;
 	u32 ctrl_frame_len;
@@ -657,50 +575,10 @@
 
 	struct semaphore sdsem;
 
-	const char *fw_name;
 	const struct firmware *firmware;
-	const char *nv_name;
 	u32 fw_ptr;
-};
 
-struct sbconfig {
-	u32 PAD[2];
-	u32 sbipsflag;	/* initiator port ocp slave flag */
-	u32 PAD[3];
-	u32 sbtpsflag;	/* target port ocp slave flag */
-	u32 PAD[11];
-	u32 sbtmerrloga;	/* (sonics >= 2.3) */
-	u32 PAD;
-	u32 sbtmerrlog;	/* (sonics >= 2.3) */
-	u32 PAD[3];
-	u32 sbadmatch3;	/* address match3 */
-	u32 PAD;
-	u32 sbadmatch2;	/* address match2 */
-	u32 PAD;
-	u32 sbadmatch1;	/* address match1 */
-	u32 PAD[7];
-	u32 sbimstate;	/* initiator agent state */
-	u32 sbintvec;	/* interrupt mask */
-	u32 sbtmstatelow;	/* target state */
-	u32 sbtmstatehigh;	/* target state */
-	u32 sbbwa0;		/* bandwidth allocation table0 */
-	u32 PAD;
-	u32 sbimconfiglow;	/* initiator configuration */
-	u32 sbimconfighigh;	/* initiator configuration */
-	u32 sbadmatch0;	/* address match0 */
-	u32 PAD;
-	u32 sbtmconfiglow;	/* target configuration */
-	u32 sbtmconfighigh;	/* target configuration */
-	u32 sbbconfig;	/* broadcast configuration */
-	u32 PAD;
-	u32 sbbstate;	/* broadcast state */
-	u32 PAD[3];
-	u32 sbactcnfg;	/* activate configuration */
-	u32 PAD[3];
-	u32 sbflagst;	/* current sbflags */
-	u32 PAD[3];
-	u32 sbidlow;		/* identification */
-	u32 sbidhigh;	/* identification */
+	bool txoff;		/* Transmit flow-controlled */
 };
 
 /* clkstate */
@@ -737,7 +615,7 @@
 }
 
 /* To check if there's window offered */
-static bool data_ok(struct brcmf_bus *bus)
+static bool data_ok(struct brcmf_sdio *bus)
 {
 	return (u8)(bus->tx_max - bus->tx_seq) != 0 &&
 	       ((u8)(bus->tx_max - bus->tx_seq) & 0x80) == 0;
@@ -748,12 +626,14 @@
  * adresses on the 32 bit backplane bus.
  */
 static void
-r_sdreg32(struct brcmf_bus *bus, u32 *regvar, u32 reg_offset, u32 *retryvar)
+r_sdreg32(struct brcmf_sdio *bus, u32 *regvar, u32 reg_offset, u32 *retryvar)
 {
+	u8 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
 	*retryvar = 0;
 	do {
 		*regvar = brcmf_sdcard_reg_read(bus->sdiodev,
-				bus->ci->buscorebase + reg_offset, sizeof(u32));
+				bus->ci->c_inf[idx].base + reg_offset,
+				sizeof(u32));
 	} while (brcmf_sdcard_regfail(bus->sdiodev) &&
 		 (++(*retryvar) <= retry_limit));
 	if (*retryvar) {
@@ -766,12 +646,13 @@
 }
 
 static void
-w_sdreg32(struct brcmf_bus *bus, u32 regval, u32 reg_offset, u32 *retryvar)
+w_sdreg32(struct brcmf_sdio *bus, u32 regval, u32 reg_offset, u32 *retryvar)
 {
+	u8 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
 	*retryvar = 0;
 	do {
 		brcmf_sdcard_reg_write(bus->sdiodev,
-				       bus->ci->buscorebase + reg_offset,
+				       bus->ci->c_inf[idx].base + reg_offset,
 				       sizeof(u32), regval);
 	} while (brcmf_sdcard_regfail(bus->sdiodev) &&
 		 (++(*retryvar) <= retry_limit));
@@ -790,14 +671,14 @@
 /* Packet free applicable unconditionally for sdio and sdspi.
  * Conditional if bufpool was present for gspi bus.
  */
-static void brcmf_sdbrcm_pktfree2(struct brcmf_bus *bus, struct sk_buff *pkt)
+static void brcmf_sdbrcm_pktfree2(struct brcmf_sdio *bus, struct sk_buff *pkt)
 {
 	if (bus->usebufpool)
 		brcmu_pkt_buf_free_skb(pkt);
 }
 
 /* Turn backplane clock on or off */
-static int brcmf_sdbrcm_htclk(struct brcmf_bus *bus, bool on, bool pendok)
+static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
 {
 	int err;
 	u8 clkctl, clkreq, devctl;
@@ -812,10 +693,6 @@
 		clkreq =
 		    bus->alp_only ? SBSDIO_ALP_AVAIL_REQ : SBSDIO_HT_AVAIL_REQ;
 
-		if ((bus->ci->chip == BCM4329_CHIP_ID)
-		    && (bus->ci->chiprev == 0))
-			clkreq |= SBSDIO_FORCE_ALP;
-
 		brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
 				       SBSDIO_FUNC1_CHIPCLKCSR, clkreq, &err);
 		if (err) {
@@ -823,14 +700,6 @@
 			return -EBADE;
 		}
 
-		if (pendok && ((bus->ci->buscoretype == PCMCIA_CORE_ID)
-			       && (bus->ci->buscorerev == 9))) {
-			u32 dummy, retries;
-			r_sdreg32(bus, &dummy,
-				  offsetof(struct sdpcmd_regs, clockctlstatus),
-				  &retries);
-		}
-
 		/* Check current status */
 		clkctl = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
 					       SBSDIO_FUNC1_CHIPCLKCSR, &err);
@@ -930,7 +799,7 @@
 }
 
 /* Change idle/active SD state */
-static int brcmf_sdbrcm_sdclk(struct brcmf_bus *bus, bool on)
+static int brcmf_sdbrcm_sdclk(struct brcmf_sdio *bus, bool on)
 {
 	brcmf_dbg(TRACE, "Enter\n");
 
@@ -943,7 +812,7 @@
 }
 
 /* Transition SD and backplane clock readiness */
-static int brcmf_sdbrcm_clkctl(struct brcmf_bus *bus, uint target, bool pendok)
+static int brcmf_sdbrcm_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
 {
 #ifdef BCMDBG
 	uint oldstate = bus->clkstate;
@@ -999,7 +868,7 @@
 	return 0;
 }
 
-static int brcmf_sdbrcm_bussleep(struct brcmf_bus *bus, bool sleep)
+static int brcmf_sdbrcm_bussleep(struct brcmf_sdio *bus, bool sleep)
 {
 	uint retries = 0;
 
@@ -1034,11 +903,9 @@
 			SBSDIO_FORCE_HW_CLKREQ_OFF, NULL);
 
 		/* Isolate the bus */
-		if (bus->ci->chip != BCM4329_CHIP_ID) {
-			brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
-				SBSDIO_DEVICE_CTL,
-				SBSDIO_DEVCTL_PADS_ISO, NULL);
-		}
+		brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
+			SBSDIO_DEVICE_CTL,
+			SBSDIO_DEVCTL_PADS_ISO, NULL);
 
 		/* Change state */
 		bus->sleeping = true;
@@ -1049,13 +916,6 @@
 		brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
 			SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
 
-		/* Force pad isolation off if possible
-			 (in case power never toggled) */
-		if ((bus->ci->buscoretype == PCMCIA_CORE_ID)
-		    && (bus->ci->buscorerev >= 10))
-			brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
-				SBSDIO_DEVICE_CTL, 0, NULL);
-
 		/* Make sure the controller has the bus up */
 		brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
 
@@ -1080,13 +940,13 @@
 	return 0;
 }
 
-static void bus_wake(struct brcmf_bus *bus)
+static void bus_wake(struct brcmf_sdio *bus)
 {
 	if (bus->sleeping)
 		brcmf_sdbrcm_bussleep(bus, false);
 }
 
-static u32 brcmf_sdbrcm_hostmail(struct brcmf_bus *bus)
+static u32 brcmf_sdbrcm_hostmail(struct brcmf_sdio *bus)
 {
 	u32 intstatus = 0;
 	u32 hmb_data;
@@ -1162,7 +1022,7 @@
 	return intstatus;
 }
 
-static void brcmf_sdbrcm_rxfail(struct brcmf_bus *bus, bool abort, bool rtx)
+static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
 {
 	uint retries = 0;
 	u16 lastrbc;
@@ -1219,16 +1079,61 @@
 
 	/* If we can't reach the device, signal failure */
 	if (err || brcmf_sdcard_regfail(bus->sdiodev))
-		bus->drvr->busstate = BRCMF_BUS_DOWN;
+		bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
 }
 
-static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
+/* copy a buffer into a pkt buffer chain */
+static uint brcmf_sdbrcm_glom_from_buf(struct brcmf_sdio *bus, uint len)
+{
+	uint n, ret = 0;
+	struct sk_buff *p;
+	u8 *buf;
+
+	buf = bus->dataptr;
+
+	/* copy the data */
+	skb_queue_walk(&bus->glom, p) {
+		n = min_t(uint, p->len, len);
+		memcpy(p->data, buf, n);
+		buf += n;
+		len -= n;
+		ret += n;
+		if (!len)
+			break;
+	}
+
+	return ret;
+}
+
+/* return total length of buffer chain */
+static uint brcmf_sdbrcm_glom_len(struct brcmf_sdio *bus)
+{
+	struct sk_buff *p;
+	uint total;
+
+	total = 0;
+	skb_queue_walk(&bus->glom, p)
+		total += p->len;
+	return total;
+}
+
+static void brcmf_sdbrcm_free_glom(struct brcmf_sdio *bus)
+{
+	struct sk_buff *cur, *next;
+
+	skb_queue_walk_safe(&bus->glom, cur, next) {
+		skb_unlink(cur, &bus->glom);
+		brcmu_pkt_buf_free_skb(cur);
+	}
+}
+
+static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
 {
 	u16 dlen, totlen;
 	u8 *dptr, num = 0;
 
 	u16 sublen, check;
-	struct sk_buff *pfirst, *plast, *pnext, *save_pfirst;
+	struct sk_buff *pfirst, *pnext;
 
 	int errcode;
 	u8 chan, seq, doff, sfdoff;
@@ -1240,11 +1145,12 @@
 	/* If packets, issue read(s) and send up packet chain */
 	/* Return sequence numbers consumed? */
 
-	brcmf_dbg(TRACE, "start: glomd %p glom %p\n", bus->glomd, bus->glom);
+	brcmf_dbg(TRACE, "start: glomd %p glom %p\n",
+		  bus->glomd, skb_peek(&bus->glom));
 
 	/* If there's a descriptor, generate the packet chain */
 	if (bus->glomd) {
-		pfirst = plast = pnext = NULL;
+		pfirst = pnext = NULL;
 		dlen = (u16) (bus->glomd->len);
 		dptr = bus->glomd->data;
 		if (!dlen || (dlen & 1)) {
@@ -1287,12 +1193,7 @@
 					  num, sublen);
 				break;
 			}
-			if (!pfirst) {
-				pfirst = plast = pnext;
-			} else {
-				plast->next = pnext;
-				plast = pnext;
-			}
+			skb_queue_tail(&bus->glom, pnext);
 
 			/* Adhere to start alignment requirements */
 			pkt_align(pnext, sublen, BRCMF_SDALIGN);
@@ -1308,12 +1209,9 @@
 				brcmf_dbg(GLOM, "glomdesc mismatch: nextlen %d glomdesc %d rxseq %d\n",
 					  bus->nextlen, totlen, rxseq);
 			}
-			bus->glom = pfirst;
 			pfirst = pnext = NULL;
 		} else {
-			if (pfirst)
-				brcmu_pkt_buf_free_skb(pfirst);
-			bus->glom = NULL;
+			brcmf_sdbrcm_free_glom(bus);
 			num = 0;
 		}
 
@@ -1325,37 +1223,33 @@
 
 	/* Ok -- either we just generated a packet chain,
 		 or had one from before */
-	if (bus->glom) {
+	if (!skb_queue_empty(&bus->glom)) {
 		if (BRCMF_GLOM_ON()) {
 			brcmf_dbg(GLOM, "try superframe read, packet chain:\n");
-			for (pnext = bus->glom; pnext; pnext = pnext->next) {
+			skb_queue_walk(&bus->glom, pnext) {
 				brcmf_dbg(GLOM, "    %p: %p len 0x%04x (%d)\n",
 					  pnext, (u8 *) (pnext->data),
 					  pnext->len, pnext->len);
 			}
 		}
 
-		pfirst = bus->glom;
-		dlen = (u16) brcmu_pkttotlen(pfirst);
+		pfirst = skb_peek(&bus->glom);
+		dlen = (u16) brcmf_sdbrcm_glom_len(bus);
 
 		/* Do an SDIO read for the superframe.  Configurable iovar to
 		 * read directly into the chained packet, or allocate a large
 		 * packet and and copy into the chain.
 		 */
 		if (usechain) {
-			errcode = brcmf_sdcard_recv_buf(bus->sdiodev,
+			errcode = brcmf_sdcard_recv_chain(bus->sdiodev,
 					bus->sdiodev->sbwad,
-					SDIO_FUNC_2,
-					F2SYNC, (u8 *) pfirst->data, dlen,
-					pfirst);
+					SDIO_FUNC_2, F2SYNC, &bus->glom);
 		} else if (bus->dataptr) {
 			errcode = brcmf_sdcard_recv_buf(bus->sdiodev,
 					bus->sdiodev->sbwad,
-					SDIO_FUNC_2,
-					F2SYNC, bus->dataptr, dlen,
-					NULL);
-			sublen = (u16) brcmu_pktfrombuf(pfirst, 0, dlen,
-						bus->dataptr);
+					SDIO_FUNC_2, F2SYNC,
+					bus->dataptr, dlen);
+			sublen = (u16) brcmf_sdbrcm_glom_from_buf(bus, dlen);
 			if (sublen != dlen) {
 				brcmf_dbg(ERROR, "FAILED TO COPY, dlen %d sublen %d\n",
 					  dlen, sublen);
@@ -1373,16 +1267,15 @@
 		if (errcode < 0) {
 			brcmf_dbg(ERROR, "glom read of %d bytes failed: %d\n",
 				  dlen, errcode);
-			bus->drvr->rx_errors++;
+			bus->sdiodev->bus_if->dstats.rx_errors++;
 
 			if (bus->glomerr++ < 3) {
 				brcmf_sdbrcm_rxfail(bus, true, true);
 			} else {
 				bus->glomerr = 0;
 				brcmf_sdbrcm_rxfail(bus, true, false);
-				brcmu_pkt_buf_free_skb(bus->glom);
 				bus->rxglomfail++;
-				bus->glom = NULL;
+				brcmf_sdbrcm_free_glom(bus);
 			}
 			return 0;
 		}
@@ -1455,10 +1348,14 @@
 		/* Remove superframe header, remember offset */
 		skb_pull(pfirst, doff);
 		sfdoff = doff;
+		num = 0;
 
 		/* Validate all the subframe headers */
-		for (num = 0, pnext = pfirst; pnext && !errcode;
-		     num++, pnext = pnext->next) {
+		skb_queue_walk(&bus->glom, pnext) {
+			/* leave when invalid subframe is found */
+			if (errcode)
+				break;
+
 			dptr = (u8 *) (pnext->data);
 			dlen = (u16) (pnext->len);
 			sublen = get_unaligned_le16(dptr);
@@ -1491,6 +1388,8 @@
 					  num, doff, sublen, SDPCM_HDRLEN);
 				errcode = -1;
 			}
+			/* increase the subframe count */
+			num++;
 		}
 
 		if (errcode) {
@@ -1503,23 +1402,16 @@
 			} else {
 				bus->glomerr = 0;
 				brcmf_sdbrcm_rxfail(bus, true, false);
-				brcmu_pkt_buf_free_skb(bus->glom);
 				bus->rxglomfail++;
-				bus->glom = NULL;
+				brcmf_sdbrcm_free_glom(bus);
 			}
 			bus->nextlen = 0;
 			return 0;
 		}
 
 		/* Basic SD framing looks ok - process each packet (header) */
-		save_pfirst = pfirst;
-		bus->glom = NULL;
-		plast = NULL;
 
-		for (num = 0; pfirst; rxseq++, pfirst = pnext) {
-			pnext = pfirst->next;
-			pfirst->next = NULL;
-
+		skb_queue_walk_safe(&bus->glom, pfirst, pnext) {
 			dptr = (u8 *) (pfirst->data);
 			sublen = get_unaligned_le16(dptr);
 			chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
@@ -1539,6 +1431,8 @@
 				bus->rx_badseq++;
 				rxseq = seq;
 			}
+			rxseq++;
+
 #ifdef BCMDBG
 			if (BRCMF_BYTES_ON() && BRCMF_DATA_ON()) {
 				printk(KERN_DEBUG "Rx Subframe Data:\n");
@@ -1551,36 +1445,22 @@
 			skb_pull(pfirst, doff);
 
 			if (pfirst->len == 0) {
+				skb_unlink(pfirst, &bus->glom);
 				brcmu_pkt_buf_free_skb(pfirst);
-				if (plast)
-					plast->next = pnext;
-				else
-					save_pfirst = pnext;
-
 				continue;
-			} else if (brcmf_proto_hdrpull(bus->drvr, &ifidx,
-						       pfirst) != 0) {
+			} else if (brcmf_proto_hdrpull(bus->sdiodev->dev,
+						       &ifidx, pfirst) != 0) {
 				brcmf_dbg(ERROR, "rx protocol error\n");
-				bus->drvr->rx_errors++;
+				bus->sdiodev->bus_if->dstats.rx_errors++;
+				skb_unlink(pfirst, &bus->glom);
 				brcmu_pkt_buf_free_skb(pfirst);
-				if (plast)
-					plast->next = pnext;
-				else
-					save_pfirst = pnext;
-
 				continue;
 			}
 
-			/* this packet will go up, link back into
-				 chain and count it */
-			pfirst->next = pnext;
-			plast = pfirst;
-			num++;
-
 #ifdef BCMDBG
 			if (BRCMF_GLOM_ON()) {
 				brcmf_dbg(GLOM, "subframe %d to stack, %p (%p/%d) nxt/lnk %p/%p\n",
-					  num, pfirst, pfirst->data,
+					  bus->glom.qlen, pfirst, pfirst->data,
 					  pfirst->len, pfirst->next,
 					  pfirst->prev);
 				print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
@@ -1589,19 +1469,20 @@
 			}
 #endif				/* BCMDBG */
 		}
-		if (num) {
+		/* sent any remaining packets up */
+		if (bus->glom.qlen) {
 			up(&bus->sdsem);
-			brcmf_rx_frame(bus->drvr, ifidx, save_pfirst, num);
+			brcmf_rx_frame(bus->sdiodev->dev, ifidx, &bus->glom);
 			down(&bus->sdsem);
 		}
 
 		bus->rxglomframes++;
-		bus->rxglompkts += num;
+		bus->rxglompkts += bus->glom.qlen;
 	}
 	return num;
 }
 
-static int brcmf_sdbrcm_dcmd_resp_wait(struct brcmf_bus *bus, uint *condition,
+static int brcmf_sdbrcm_dcmd_resp_wait(struct brcmf_sdio *bus, uint *condition,
 					bool *pending)
 {
 	DECLARE_WAITQUEUE(wait, current);
@@ -1623,7 +1504,7 @@
 	return timeout;
 }
 
-static int brcmf_sdbrcm_dcmd_resp_wake(struct brcmf_bus *bus)
+static int brcmf_sdbrcm_dcmd_resp_wake(struct brcmf_sdio *bus)
 {
 	if (waitqueue_active(&bus->dcmd_resp_wait))
 		wake_up_interruptible(&bus->dcmd_resp_wait);
@@ -1631,7 +1512,7 @@
 	return 0;
 }
 static void
-brcmf_sdbrcm_read_control(struct brcmf_bus *bus, u8 *hdr, uint len, uint doff)
+brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
 {
 	uint rdlen, pad;
 
@@ -1657,7 +1538,7 @@
 	if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) {
 		pad = bus->blocksize - (rdlen % bus->blocksize);
 		if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
-		    ((len + pad) < bus->drvr->maxctl))
+		    ((len + pad) < bus->sdiodev->bus_if->maxctl))
 			rdlen += pad;
 	} else if (rdlen % BRCMF_SDALIGN) {
 		rdlen += BRCMF_SDALIGN - (rdlen % BRCMF_SDALIGN);
@@ -1668,18 +1549,18 @@
 		rdlen = roundup(rdlen, ALIGNMENT);
 
 	/* Drop if the read is too big or it exceeds our maximum */
-	if ((rdlen + BRCMF_FIRSTREAD) > bus->drvr->maxctl) {
+	if ((rdlen + BRCMF_FIRSTREAD) > bus->sdiodev->bus_if->maxctl) {
 		brcmf_dbg(ERROR, "%d-byte control read exceeds %d-byte buffer\n",
-			  rdlen, bus->drvr->maxctl);
-		bus->drvr->rx_errors++;
+			  rdlen, bus->sdiodev->bus_if->maxctl);
+		bus->sdiodev->bus_if->dstats.rx_errors++;
 		brcmf_sdbrcm_rxfail(bus, false, false);
 		goto done;
 	}
 
-	if ((len - doff) > bus->drvr->maxctl) {
+	if ((len - doff) > bus->sdiodev->bus_if->maxctl) {
 		brcmf_dbg(ERROR, "%d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n",
-			  len, len - doff, bus->drvr->maxctl);
-		bus->drvr->rx_errors++;
+			  len, len - doff, bus->sdiodev->bus_if->maxctl);
+		bus->sdiodev->bus_if->dstats.rx_errors++;
 		bus->rx_toolong++;
 		brcmf_sdbrcm_rxfail(bus, false, false);
 		goto done;
@@ -1689,8 +1570,7 @@
 	sdret = brcmf_sdcard_recv_buf(bus->sdiodev,
 				bus->sdiodev->sbwad,
 				SDIO_FUNC_2,
-				F2SYNC, (bus->rxctl + BRCMF_FIRSTREAD), rdlen,
-				NULL);
+				F2SYNC, (bus->rxctl + BRCMF_FIRSTREAD), rdlen);
 	bus->f2rxdata++;
 
 	/* Control frame failures need retransmission */
@@ -1721,7 +1601,7 @@
 }
 
 /* Pad read to blocksize for efficiency */
-static void brcmf_pad(struct brcmf_bus *bus, u16 *pad, u16 *rdlen)
+static void brcmf_pad(struct brcmf_sdio *bus, u16 *pad, u16 *rdlen)
 {
 	if (bus->roundup && bus->blocksize && *rdlen > bus->blocksize) {
 		*pad = bus->blocksize - (*rdlen % bus->blocksize);
@@ -1734,7 +1614,7 @@
 }
 
 static void
-brcmf_alloc_pkt_and_read(struct brcmf_bus *bus, u16 rdlen,
+brcmf_alloc_pkt_and_read(struct brcmf_sdio *bus, u16 rdlen,
 			 struct sk_buff **pkt, u8 **rxbuf)
 {
 	int sdret;		/* Return code from calls */
@@ -1746,16 +1626,15 @@
 	pkt_align(*pkt, rdlen, BRCMF_SDALIGN);
 	*rxbuf = (u8 *) ((*pkt)->data);
 	/* Read the entire frame */
-	sdret = brcmf_sdcard_recv_buf(bus->sdiodev, bus->sdiodev->sbwad,
-				      SDIO_FUNC_2, F2SYNC,
-				      *rxbuf, rdlen, *pkt);
+	sdret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad,
+				      SDIO_FUNC_2, F2SYNC, *pkt);
 	bus->f2rxdata++;
 
 	if (sdret < 0) {
 		brcmf_dbg(ERROR, "(nextlen): read %d bytes failed: %d\n",
 			  rdlen, sdret);
 		brcmu_pkt_buf_free_skb(*pkt);
-		bus->drvr->rx_errors++;
+		bus->sdiodev->bus_if->dstats.rx_errors++;
 		/* Force retry w/normal header read.
 		 * Don't attempt NAK for
 		 * gSPI
@@ -1767,7 +1646,7 @@
 
 /* Checks the header */
 static int
-brcmf_check_rxbuf(struct brcmf_bus *bus, struct sk_buff *pkt, u8 *rxbuf,
+brcmf_check_rxbuf(struct brcmf_sdio *bus, struct sk_buff *pkt, u8 *rxbuf,
 		  u8 rxseq, u16 nextlen, u16 *len)
 {
 	u16 check;
@@ -1823,7 +1702,7 @@
 
 /* Return true if there may be more frames to read */
 static uint
-brcmf_sdbrcm_readframes(struct brcmf_bus *bus, uint maxframes, bool *finished)
+brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
 {
 	u16 len, check;	/* Extracted hardware header fields */
 	u8 chan, seq, doff;	/* Extracted software header fields */
@@ -1846,14 +1725,15 @@
 	*finished = false;
 
 	for (rxseq = bus->rx_seq, rxleft = maxframes;
-	     !bus->rxskip && rxleft && bus->drvr->busstate != BRCMF_BUS_DOWN;
+	     !bus->rxskip && rxleft &&
+	     bus->sdiodev->bus_if->state != BRCMF_BUS_DOWN;
 	     rxseq++, rxleft--) {
 
 		/* Handle glomming separately */
-		if (bus->glom || bus->glomd) {
+		if (bus->glomd || !skb_queue_empty(&bus->glom)) {
 			u8 cnt;
 			brcmf_dbg(GLOM, "calling rxglom: glomd %p, glom %p\n",
-				  bus->glomd, bus->glom);
+				  bus->glomd, skb_peek(&bus->glom));
 			cnt = brcmf_sdbrcm_rxglom(bus, rxseq);
 			brcmf_dbg(GLOM, "rxglom returned %d\n", cnt);
 			rxseq += cnt - 1;
@@ -1905,7 +1785,7 @@
 				bus->nextlen = 0;
 			}
 
-			bus->drvr->rx_readahead_cnt++;
+			bus->rx_readahead_cnt++;
 
 			/* Handle Flow Control */
 			fcbits = SDPCM_FCMASK_VALUE(
@@ -1976,7 +1856,7 @@
 		/* Read frame header (hardware and software) */
 		sdret = brcmf_sdcard_recv_buf(bus->sdiodev, bus->sdiodev->sbwad,
 					      SDIO_FUNC_2, F2SYNC, bus->rxhdr,
-					      BRCMF_FIRSTREAD, NULL);
+					      BRCMF_FIRSTREAD);
 		bus->f2rxhdrs++;
 
 		if (sdret < 0) {
@@ -2103,7 +1983,7 @@
 			/* Too long -- skip this frame */
 			brcmf_dbg(ERROR, "too long: len %d rdlen %d\n",
 				  len, rdlen);
-			bus->drvr->rx_errors++;
+			bus->sdiodev->bus_if->dstats.rx_errors++;
 			bus->rx_toolong++;
 			brcmf_sdbrcm_rxfail(bus, false, false);
 			continue;
@@ -2115,7 +1995,7 @@
 			/* Give up on data, request rtx of events */
 			brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: rdlen %d chan %d\n",
 				  rdlen, chan);
-			bus->drvr->rx_dropped++;
+			bus->sdiodev->bus_if->dstats.rx_dropped++;
 			brcmf_sdbrcm_rxfail(bus, false, RETRYCHAN(chan));
 			continue;
 		}
@@ -2125,9 +2005,8 @@
 		pkt_align(pkt, rdlen, BRCMF_SDALIGN);
 
 		/* Read the remaining frame data */
-		sdret = brcmf_sdcard_recv_buf(bus->sdiodev, bus->sdiodev->sbwad,
-				SDIO_FUNC_2, F2SYNC, ((u8 *) (pkt->data)),
-				rdlen, pkt);
+		sdret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad,
+					      SDIO_FUNC_2, F2SYNC, pkt);
 		bus->f2rxdata++;
 
 		if (sdret < 0) {
@@ -2136,7 +2015,7 @@
 				   : ((chan == SDPCM_DATA_CHANNEL) ? "data"
 				      : "test")), sdret);
 			brcmu_pkt_buf_free_skb(pkt);
-			bus->drvr->rx_errors++;
+			bus->sdiodev->bus_if->dstats.rx_errors++;
 			brcmf_sdbrcm_rxfail(bus, true, RETRYCHAN(chan));
 			continue;
 		}
@@ -2185,16 +2064,17 @@
 		if (pkt->len == 0) {
 			brcmu_pkt_buf_free_skb(pkt);
 			continue;
-		} else if (brcmf_proto_hdrpull(bus->drvr, &ifidx, pkt) != 0) {
+		} else if (brcmf_proto_hdrpull(bus->sdiodev->dev, &ifidx,
+			   pkt) != 0) {
 			brcmf_dbg(ERROR, "rx protocol error\n");
 			brcmu_pkt_buf_free_skb(pkt);
-			bus->drvr->rx_errors++;
+			bus->sdiodev->bus_if->dstats.rx_errors++;
 			continue;
 		}
 
 		/* Unlock during rx call */
 		up(&bus->sdsem);
-		brcmf_rx_frame(bus->drvr, ifidx, pkt, 1);
+		brcmf_rx_packet(bus->sdiodev->dev, ifidx, pkt);
 		down(&bus->sdsem);
 	}
 	rxcount = maxframes - rxleft;
@@ -2214,16 +2094,8 @@
 	return rxcount;
 }
 
-static int
-brcmf_sdbrcm_send_buf(struct brcmf_bus *bus, u32 addr, uint fn, uint flags,
-		    u8 *buf, uint nbytes, struct sk_buff *pkt)
-{
-	return brcmf_sdcard_send_buf
-		(bus->sdiodev, addr, fn, flags, buf, nbytes, pkt);
-}
-
 static void
-brcmf_sdbrcm_wait_for_event(struct brcmf_bus *bus, bool *lockvar)
+brcmf_sdbrcm_wait_for_event(struct brcmf_sdio *bus, bool *lockvar)
 {
 	up(&bus->sdsem);
 	wait_event_interruptible_timeout(bus->ctrl_wait,
@@ -2233,7 +2105,7 @@
 }
 
 static void
-brcmf_sdbrcm_wait_event_wakeup(struct brcmf_bus *bus)
+brcmf_sdbrcm_wait_event_wakeup(struct brcmf_sdio *bus)
 {
 	if (waitqueue_active(&bus->ctrl_wait))
 		wake_up_interruptible(&bus->ctrl_wait);
@@ -2242,7 +2114,7 @@
 
 /* Writes a HW/SW header into the packet and sends it. */
 /* Assumes: (a) header space already there, (b) caller holds lock */
-static int brcmf_sdbrcm_txpkt(struct brcmf_bus *bus, struct sk_buff *pkt,
+static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt,
 			      uint chan, bool free_pkt)
 {
 	int ret;
@@ -2262,7 +2134,7 @@
 		if (skb_headroom(pkt) < pad) {
 			brcmf_dbg(INFO, "insufficient headroom %d for %d pad\n",
 				  skb_headroom(pkt), pad);
-			bus->drvr->tx_realloc++;
+			bus->sdiodev->bus_if->tx_realloc++;
 			new = brcmu_pkt_buf_get_skb(pkt->len + BRCMF_SDALIGN);
 			if (!new) {
 				brcmf_dbg(ERROR, "couldn't allocate new %d-byte packet\n",
@@ -2331,9 +2203,8 @@
 	if (len & (ALIGNMENT - 1))
 			len = roundup(len, ALIGNMENT);
 
-	ret = brcmf_sdbrcm_send_buf(bus, bus->sdiodev->sbwad,
-				    SDIO_FUNC_2, F2SYNC, frame,
-				    len, pkt);
+	ret = brcmf_sdcard_send_pkt(bus->sdiodev, bus->sdiodev->sbwad,
+				    SDIO_FUNC_2, F2SYNC, pkt);
 	bus->f2txdata++;
 
 	if (ret < 0) {
@@ -2371,7 +2242,7 @@
 	/* restore pkt buffer pointer before calling tx complete routine */
 	skb_pull(pkt, SDPCM_HDRLEN + pad);
 	up(&bus->sdsem);
-	brcmf_txcomplete(bus->drvr, pkt, ret != 0);
+	brcmf_txcomplete(bus->sdiodev->dev, pkt, ret != 0);
 	down(&bus->sdsem);
 
 	if (free_pkt)
@@ -2380,7 +2251,7 @@
 	return ret;
 }
 
-static uint brcmf_sdbrcm_sendfromq(struct brcmf_bus *bus, uint maxframes)
+static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
 {
 	struct sk_buff *pkt;
 	u32 intstatus = 0;
@@ -2390,8 +2261,6 @@
 	uint datalen;
 	u8 tx_prec_map;
 
-	struct brcmf_pub *drvr = bus->drvr;
-
 	brcmf_dbg(TRACE, "Enter\n");
 
 	tx_prec_map = ~bus->flowcontrol;
@@ -2409,9 +2278,9 @@
 
 		ret = brcmf_sdbrcm_txpkt(bus, pkt, SDPCM_DATA_CHANNEL, true);
 		if (ret)
-			bus->drvr->tx_errors++;
+			bus->sdiodev->bus_if->dstats.tx_errors++;
 		else
-			bus->drvr->dstats.tx_bytes += datalen;
+			bus->sdiodev->bus_if->dstats.tx_bytes += datalen;
 
 		/* In poll mode, need to check for other events */
 		if (!bus->intr && cnt) {
@@ -2428,14 +2297,98 @@
 	}
 
 	/* Deflow-control stack if needed */
-	if (drvr->up && (drvr->busstate == BRCMF_BUS_DATA) &&
-	    drvr->txoff && (pktq_len(&bus->txq) < TXLOW))
-		brcmf_txflowcontrol(drvr, 0, OFF);
+	if (bus->sdiodev->bus_if->drvr_up &&
+	    (bus->sdiodev->bus_if->state == BRCMF_BUS_DATA) &&
+	    bus->txoff && (pktq_len(&bus->txq) < TXLOW)) {
+		bus->txoff = OFF;
+		brcmf_txflowcontrol(bus->sdiodev->dev, 0, OFF);
+	}
 
 	return cnt;
 }
 
-static bool brcmf_sdbrcm_dpc(struct brcmf_bus *bus)
+static void brcmf_sdbrcm_bus_stop(struct device *dev)
+{
+	u32 local_hostintmask;
+	u8 saveclk;
+	uint retries;
+	int err;
+	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv;
+	struct brcmf_sdio *bus = sdiodev->bus;
+
+	brcmf_dbg(TRACE, "Enter\n");
+
+	if (bus->watchdog_tsk) {
+		send_sig(SIGTERM, bus->watchdog_tsk, 1);
+		kthread_stop(bus->watchdog_tsk);
+		bus->watchdog_tsk = NULL;
+	}
+
+	if (bus->dpc_tsk && bus->dpc_tsk != current) {
+		send_sig(SIGTERM, bus->dpc_tsk, 1);
+		kthread_stop(bus->dpc_tsk);
+		bus->dpc_tsk = NULL;
+	}
+
+	down(&bus->sdsem);
+
+	bus_wake(bus);
+
+	/* Enable clock for device interrupts */
+	brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
+
+	/* Disable and clear interrupts at the chip level also */
+	w_sdreg32(bus, 0, offsetof(struct sdpcmd_regs, hostintmask), &retries);
+	local_hostintmask = bus->hostintmask;
+	bus->hostintmask = 0;
+
+	/* Change our idea of bus state */
+	bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
+
+	/* Force clocks on backplane to be sure F2 interrupt propagates */
+	saveclk = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
+					SBSDIO_FUNC1_CHIPCLKCSR, &err);
+	if (!err) {
+		brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
+				       SBSDIO_FUNC1_CHIPCLKCSR,
+				       (saveclk | SBSDIO_FORCE_HT), &err);
+	}
+	if (err)
+		brcmf_dbg(ERROR, "Failed to force clock for F2: err %d\n", err);
+
+	/* Turn off the bus (F2), free any pending packets */
+	brcmf_dbg(INTR, "disable SDIO interrupts\n");
+	brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_0, SDIO_CCCR_IOEx,
+			 SDIO_FUNC_ENABLE_1, NULL);
+
+	/* Clear any pending interrupts now that F2 is disabled */
+	w_sdreg32(bus, local_hostintmask,
+		  offsetof(struct sdpcmd_regs, intstatus), &retries);
+
+	/* Turn off the backplane clock (only) */
+	brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false);
+
+	/* Clear the data packet queues */
+	brcmu_pktq_flush(&bus->txq, true, NULL, NULL);
+
+	/* Clear any held glomming stuff */
+	if (bus->glomd)
+		brcmu_pkt_buf_free_skb(bus->glomd);
+	brcmf_sdbrcm_free_glom(bus);
+
+	/* Clear rx control and wake any waiters */
+	bus->rxlen = 0;
+	brcmf_sdbrcm_dcmd_resp_wake(bus);
+
+	/* Reset some F2 state stuff */
+	bus->rxskip = false;
+	bus->tx_seq = bus->rx_seq = 0;
+
+	up(&bus->sdsem);
+}
+
+static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
 {
 	u32 intstatus, newstatus = 0;
 	uint retries = 0;
@@ -2463,7 +2416,7 @@
 					       SBSDIO_DEVICE_CTL, &err);
 		if (err) {
 			brcmf_dbg(ERROR, "error reading DEVCTL: %d\n", err);
-			bus->drvr->busstate = BRCMF_BUS_DOWN;
+			bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
 		}
 #endif				/* BCMDBG */
 
@@ -2473,7 +2426,7 @@
 		if (err) {
 			brcmf_dbg(ERROR, "error reading CSR: %d\n",
 				  err);
-			bus->drvr->busstate = BRCMF_BUS_DOWN;
+			bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
 		}
 
 		brcmf_dbg(INFO, "DPC: PENDING, devctl 0x%02x clkctl 0x%02x\n",
@@ -2486,7 +2439,7 @@
 			if (err) {
 				brcmf_dbg(ERROR, "error reading DEVCTL: %d\n",
 					  err);
-				bus->drvr->busstate = BRCMF_BUS_DOWN;
+				bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
 			}
 			devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
 			brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
@@ -2494,7 +2447,7 @@
 			if (err) {
 				brcmf_dbg(ERROR, "error writing DEVCTL: %d\n",
 					  err);
-				bus->drvr->busstate = BRCMF_BUS_DOWN;
+				bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
 			}
 			bus->clkstate = CLK_AVAIL;
 		} else {
@@ -2596,9 +2549,9 @@
 		(bus->clkstate == CLK_AVAIL)) {
 		int ret, i;
 
-		ret = brcmf_sdbrcm_send_buf(bus, bus->sdiodev->sbwad,
+		ret = brcmf_sdcard_send_buf(bus->sdiodev, bus->sdiodev->sbwad,
 			SDIO_FUNC_2, F2SYNC, (u8 *) bus->ctrl_frame_buf,
-			(u32) bus->ctrl_frame_len, NULL);
+			(u32) bus->ctrl_frame_len);
 
 		if (ret < 0) {
 			/* On failure, abort the command and
@@ -2650,11 +2603,11 @@
 		 else await next interrupt */
 	/* On failed register access, all bets are off:
 		 no resched or interrupts */
-	if ((bus->drvr->busstate == BRCMF_BUS_DOWN) ||
+	if ((bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) ||
 	    brcmf_sdcard_regfail(bus->sdiodev)) {
 		brcmf_dbg(ERROR, "failed backplane access over SDIO, halting operation %d\n",
 			  brcmf_sdcard_regfail(bus->sdiodev));
-		bus->drvr->busstate = BRCMF_BUS_DOWN;
+		bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
 		bus->intstatus = 0;
 	} else if (bus->clkstate == CLK_PENDING) {
 		brcmf_dbg(INFO, "rescheduled due to CLK_PENDING awaiting I_CHIPACTIVE interrupt\n");
@@ -2681,7 +2634,7 @@
 
 static int brcmf_sdbrcm_dpc_thread(void *data)
 {
-	struct brcmf_bus *bus = (struct brcmf_bus *) data;
+	struct brcmf_sdio *bus = (struct brcmf_sdio *) data;
 
 	allow_signal(SIGTERM);
 	/* Run until signal received */
@@ -2691,12 +2644,12 @@
 		if (!wait_for_completion_interruptible(&bus->dpc_wait)) {
 			/* Call bus dpc unless it indicated down
 			(then clean stop) */
-			if (bus->drvr->busstate != BRCMF_BUS_DOWN) {
+			if (bus->sdiodev->bus_if->state != BRCMF_BUS_DOWN) {
 				if (brcmf_sdbrcm_dpc(bus))
 					complete(&bus->dpc_wait);
 			} else {
 				/* after stopping the bus, exit thread */
-				brcmf_sdbrcm_bus_stop(bus);
+				brcmf_sdbrcm_bus_stop(bus->sdiodev->dev);
 				bus->dpc_tsk = NULL;
 				break;
 			}
@@ -2706,10 +2659,13 @@
 	return 0;
 }
 
-int brcmf_sdbrcm_bus_txdata(struct brcmf_bus *bus, struct sk_buff *pkt)
+static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
 {
 	int ret = -EBADE;
 	uint datalen, prec;
+	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv;
+	struct brcmf_sdio *bus = sdiodev->bus;
 
 	brcmf_dbg(TRACE, "Enter\n");
 
@@ -2728,9 +2684,10 @@
 
 	/* Priority based enq */
 	spin_lock_bh(&bus->txqlock);
-	if (brcmf_c_prec_enq(bus->drvr, &bus->txq, pkt, prec) == false) {
+	if (brcmf_c_prec_enq(bus->sdiodev->dev, &bus->txq, pkt, prec) ==
+	    false) {
 		skb_pull(pkt, SDPCM_HDRLEN);
-		brcmf_txcomplete(bus->drvr, pkt, false);
+		brcmf_txcomplete(bus->sdiodev->dev, pkt, false);
 		brcmu_pkt_buf_free_skb(pkt);
 		brcmf_dbg(ERROR, "out of bus->txq !!!\n");
 		ret = -ENOSR;
@@ -2739,8 +2696,10 @@
 	}
 	spin_unlock_bh(&bus->txqlock);
 
-	if (pktq_len(&bus->txq) >= TXHI)
-		brcmf_txflowcontrol(bus->drvr, 0, ON);
+	if (pktq_len(&bus->txq) >= TXHI) {
+		bus->txoff = ON;
+		brcmf_txflowcontrol(bus->sdiodev->dev, 0, ON);
+	}
 
 #ifdef BCMDBG
 	if (pktq_plen(&bus->txq, prec) > qcount[prec])
@@ -2757,7 +2716,7 @@
 }
 
 static int
-brcmf_sdbrcm_membytes(struct brcmf_bus *bus, bool write, u32 address, u8 *data,
+brcmf_sdbrcm_membytes(struct brcmf_sdio *bus, bool write, u32 address, u8 *data,
 		 uint size)
 {
 	int bcmerror = 0;
@@ -2818,7 +2777,7 @@
 #ifdef BCMDBG
 #define CONSOLE_LINE_MAX	192
 
-static int brcmf_sdbrcm_readconsole(struct brcmf_bus *bus)
+static int brcmf_sdbrcm_readconsole(struct brcmf_sdio *bus)
 {
 	struct brcmf_console *c = &bus->console;
 	u8 line[CONSOLE_LINE_MAX], ch;
@@ -2895,14 +2854,14 @@
 }
 #endif				/* BCMDBG */
 
-static int brcmf_tx_frame(struct brcmf_bus *bus, u8 *frame, u16 len)
+static int brcmf_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len)
 {
 	int i;
 	int ret;
 
 	bus->ctrl_frame_stat = false;
-	ret = brcmf_sdbrcm_send_buf(bus, bus->sdiodev->sbwad,
-				    SDIO_FUNC_2, F2SYNC, frame, len, NULL);
+	ret = brcmf_sdcard_send_buf(bus->sdiodev, bus->sdiodev->sbwad,
+				    SDIO_FUNC_2, F2SYNC, frame, len);
 
 	if (ret < 0) {
 		/* On failure, abort the command and terminate the frame */
@@ -2937,8 +2896,8 @@
 	return ret;
 }
 
-int
-brcmf_sdbrcm_bus_txctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen)
+static int
+brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
 {
 	u8 *frame;
 	u16 len;
@@ -2946,6 +2905,9 @@
 	uint retries = 0;
 	u8 doff = 0;
 	int ret = -1;
+	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv;
+	struct brcmf_sdio *bus = sdiodev->bus;
 
 	brcmf_dbg(TRACE, "Enter\n");
 
@@ -3045,19 +3007,22 @@
 	up(&bus->sdsem);
 
 	if (ret)
-		bus->drvr->tx_ctlerrs++;
+		bus->tx_ctlerrs++;
 	else
-		bus->drvr->tx_ctlpkts++;
+		bus->tx_ctlpkts++;
 
 	return ret ? -EIO : 0;
 }
 
-int
-brcmf_sdbrcm_bus_rxctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen)
+static int
+brcmf_sdbrcm_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen)
 {
 	int timeleft;
 	uint rxlen = 0;
 	bool pending;
+	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv;
+	struct brcmf_sdio *bus = sdiodev->bus;
 
 	brcmf_dbg(TRACE, "Enter\n");
 
@@ -3083,21 +3048,21 @@
 	}
 
 	if (rxlen)
-		bus->drvr->rx_ctlpkts++;
+		bus->rx_ctlpkts++;
 	else
-		bus->drvr->rx_ctlerrs++;
+		bus->rx_ctlerrs++;
 
 	return rxlen ? (int)rxlen : -ETIMEDOUT;
 }
 
-static int brcmf_sdbrcm_downloadvars(struct brcmf_bus *bus, void *arg, int len)
+static int brcmf_sdbrcm_downloadvars(struct brcmf_sdio *bus, void *arg, int len)
 {
 	int bcmerror = 0;
 
 	brcmf_dbg(TRACE, "Enter\n");
 
 	/* Basic sanity checks */
-	if (bus->drvr->up) {
+	if (bus->sdiodev->bus_if->drvr_up) {
 		bcmerror = -EISCONN;
 		goto err;
 	}
@@ -3123,7 +3088,7 @@
 	return bcmerror;
 }
 
-static int brcmf_sdbrcm_write_vars(struct brcmf_bus *bus)
+static int brcmf_sdbrcm_write_vars(struct brcmf_sdio *bus)
 {
 	int bcmerror = 0;
 	u32 varsize;
@@ -3210,135 +3175,11 @@
 	return bcmerror;
 }
 
-static void
-brcmf_sdbrcm_chip_disablecore(struct brcmf_sdio_dev *sdiodev, u32 corebase)
-{
-	u32 regdata;
-
-	regdata = brcmf_sdcard_reg_read(sdiodev,
-		CORE_SB(corebase, sbtmstatelow), 4);
-	if (regdata & SBTML_RESET)
-		return;
-
-	regdata = brcmf_sdcard_reg_read(sdiodev,
-		CORE_SB(corebase, sbtmstatelow), 4);
-	if ((regdata & (SICF_CLOCK_EN << SBTML_SICF_SHIFT)) != 0) {
-		/*
-		 * set target reject and spin until busy is clear
-		 * (preserve core-specific bits)
-		 */
-		regdata = brcmf_sdcard_reg_read(sdiodev,
-			CORE_SB(corebase, sbtmstatelow), 4);
-		brcmf_sdcard_reg_write(sdiodev, CORE_SB(corebase, sbtmstatelow),
-				       4, regdata | SBTML_REJ);
-
-		regdata = brcmf_sdcard_reg_read(sdiodev,
-			CORE_SB(corebase, sbtmstatelow), 4);
-		udelay(1);
-		SPINWAIT((brcmf_sdcard_reg_read(sdiodev,
-			CORE_SB(corebase, sbtmstatehigh), 4) &
-			SBTMH_BUSY), 100000);
-
-		regdata = brcmf_sdcard_reg_read(sdiodev,
-			CORE_SB(corebase, sbtmstatehigh), 4);
-		if (regdata & SBTMH_BUSY)
-			brcmf_dbg(ERROR, "ARM core still busy\n");
-
-		regdata = brcmf_sdcard_reg_read(sdiodev,
-			CORE_SB(corebase, sbidlow), 4);
-		if (regdata & SBIDL_INIT) {
-			regdata = brcmf_sdcard_reg_read(sdiodev,
-				CORE_SB(corebase, sbimstate), 4) |
-				SBIM_RJ;
-			brcmf_sdcard_reg_write(sdiodev,
-				CORE_SB(corebase, sbimstate), 4,
-				regdata);
-			regdata = brcmf_sdcard_reg_read(sdiodev,
-				CORE_SB(corebase, sbimstate), 4);
-			udelay(1);
-			SPINWAIT((brcmf_sdcard_reg_read(sdiodev,
-				CORE_SB(corebase, sbimstate), 4) &
-				SBIM_BY), 100000);
-		}
-
-		/* set reset and reject while enabling the clocks */
-		brcmf_sdcard_reg_write(sdiodev,
-			CORE_SB(corebase, sbtmstatelow), 4,
-			(((SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
-			SBTML_REJ | SBTML_RESET));
-		regdata = brcmf_sdcard_reg_read(sdiodev,
-			CORE_SB(corebase, sbtmstatelow), 4);
-		udelay(10);
-
-		/* clear the initiator reject bit */
-		regdata = brcmf_sdcard_reg_read(sdiodev,
-			CORE_SB(corebase, sbidlow), 4);
-		if (regdata & SBIDL_INIT) {
-			regdata = brcmf_sdcard_reg_read(sdiodev,
-				CORE_SB(corebase, sbimstate), 4) &
-				~SBIM_RJ;
-			brcmf_sdcard_reg_write(sdiodev,
-				CORE_SB(corebase, sbimstate), 4,
-				regdata);
-		}
-	}
-
-	/* leave reset and reject asserted */
-	brcmf_sdcard_reg_write(sdiodev, CORE_SB(corebase, sbtmstatelow), 4,
-		(SBTML_REJ | SBTML_RESET));
-	udelay(1);
-}
-
-static void
-brcmf_sdbrcm_chip_resetcore(struct brcmf_sdio_dev *sdiodev, u32 corebase)
-{
-	u32 regdata;
-
-	/*
-	 * Must do the disable sequence first to work for
-	 * arbitrary current core state.
-	 */
-	brcmf_sdbrcm_chip_disablecore(sdiodev, corebase);
-
-	/*
-	 * Now do the initialization sequence.
-	 * set reset while enabling the clock and
-	 * forcing them on throughout the core
-	 */
-	brcmf_sdcard_reg_write(sdiodev, CORE_SB(corebase, sbtmstatelow), 4,
-		((SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
-		SBTML_RESET);
-	udelay(1);
-
-	regdata = brcmf_sdcard_reg_read(sdiodev,
-					CORE_SB(corebase, sbtmstatehigh), 4);
-	if (regdata & SBTMH_SERR)
-		brcmf_sdcard_reg_write(sdiodev,
-				       CORE_SB(corebase, sbtmstatehigh), 4, 0);
-
-	regdata = brcmf_sdcard_reg_read(sdiodev,
-					CORE_SB(corebase, sbimstate), 4);
-	if (regdata & (SBIM_IBE | SBIM_TO))
-		brcmf_sdcard_reg_write(sdiodev, CORE_SB(corebase, sbimstate), 4,
-			regdata & ~(SBIM_IBE | SBIM_TO));
-
-	/* clear reset and allow it to propagate throughout the core */
-	brcmf_sdcard_reg_write(sdiodev, CORE_SB(corebase, sbtmstatelow), 4,
-		(SICF_FGC << SBTML_SICF_SHIFT) |
-		(SICF_CLOCK_EN << SBTML_SICF_SHIFT));
-	udelay(1);
-
-	/* leave clock enabled */
-	brcmf_sdcard_reg_write(sdiodev, CORE_SB(corebase, sbtmstatelow), 4,
-		(SICF_CLOCK_EN << SBTML_SICF_SHIFT));
-	udelay(1);
-}
-
-static int brcmf_sdbrcm_download_state(struct brcmf_bus *bus, bool enter)
+static int brcmf_sdbrcm_download_state(struct brcmf_sdio *bus, bool enter)
 {
 	uint retries;
-	u32 regdata;
 	int bcmerror = 0;
+	struct chip_info *ci = bus->ci;
 
 	/* To enter download state, disable ARM and reset SOCRAM.
 	 * To exit download state, simply reset ARM (default is RAM boot).
@@ -3346,10 +3187,9 @@
 	if (enter) {
 		bus->alp_only = true;
 
-		brcmf_sdbrcm_chip_disablecore(bus->sdiodev,
-					      bus->ci->armcorebase);
+		ci->coredisable(bus->sdiodev, ci, BCMA_CORE_ARM_CM3);
 
-		brcmf_sdbrcm_chip_resetcore(bus->sdiodev, bus->ci->ramcorebase);
+		ci->resetcore(bus->sdiodev, ci, BCMA_CORE_INTERNAL_MEM);
 
 		/* Clear the top bit of memory */
 		if (bus->ramsize) {
@@ -3358,11 +3198,7 @@
 					 (u8 *)&zeros, 4);
 		}
 	} else {
-		regdata = brcmf_sdcard_reg_read(bus->sdiodev,
-			CORE_SB(bus->ci->ramcorebase, sbtmstatelow), 4);
-		regdata &= (SBTML_RESET | SBTML_REJ_MASK |
-			(SICF_CLOCK_EN << SBTML_SICF_SHIFT));
-		if ((SICF_CLOCK_EN << SBTML_SICF_SHIFT) != regdata) {
+		if (!ci->iscoreup(bus->sdiodev, ci, BCMA_CORE_INTERNAL_MEM)) {
 			brcmf_dbg(ERROR, "SOCRAM core is down after reset?\n");
 			bcmerror = -EBADE;
 			goto fail;
@@ -3377,18 +3213,18 @@
 		w_sdreg32(bus, 0xFFFFFFFF,
 			  offsetof(struct sdpcmd_regs, intstatus), &retries);
 
-		brcmf_sdbrcm_chip_resetcore(bus->sdiodev, bus->ci->armcorebase);
+		ci->resetcore(bus->sdiodev, ci, BCMA_CORE_ARM_CM3);
 
 		/* Allow HT Clock now that the ARM is running. */
 		bus->alp_only = false;
 
-		bus->drvr->busstate = BRCMF_BUS_LOAD;
+		bus->sdiodev->bus_if->state = BRCMF_BUS_LOAD;
 	}
 fail:
 	return bcmerror;
 }
 
-static int brcmf_sdbrcm_get_image(char *buf, int len, struct brcmf_bus *bus)
+static int brcmf_sdbrcm_get_image(char *buf, int len, struct brcmf_sdio *bus)
 {
 	if (bus->firmware->size < bus->fw_ptr + len)
 		len = bus->firmware->size - bus->fw_ptr;
@@ -3398,10 +3234,7 @@
 	return len;
 }
 
-MODULE_FIRMWARE(BCM4329_FW_NAME);
-MODULE_FIRMWARE(BCM4329_NV_NAME);
-
-static int brcmf_sdbrcm_download_code_file(struct brcmf_bus *bus)
+static int brcmf_sdbrcm_download_code_file(struct brcmf_sdio *bus)
 {
 	int offset = 0;
 	uint len;
@@ -3410,8 +3243,7 @@
 
 	brcmf_dbg(INFO, "Enter\n");
 
-	bus->fw_name = BCM4329_FW_NAME;
-	ret = request_firmware(&bus->firmware, bus->fw_name,
+	ret = request_firmware(&bus->firmware, BRCMFMAC_FW_NAME,
 			       &bus->sdiodev->func[2]->dev);
 	if (ret) {
 		brcmf_dbg(ERROR, "Fail to request firmware %d\n", ret);
@@ -3501,15 +3333,14 @@
 	return buf_len;
 }
 
-static int brcmf_sdbrcm_download_nvram(struct brcmf_bus *bus)
+static int brcmf_sdbrcm_download_nvram(struct brcmf_sdio *bus)
 {
 	uint len;
 	char *memblock = NULL;
 	char *bufp;
 	int ret;
 
-	bus->nv_name = BCM4329_NV_NAME;
-	ret = request_firmware(&bus->firmware, bus->nv_name,
+	ret = request_firmware(&bus->firmware, BRCMFMAC_NV_NAME,
 			       &bus->sdiodev->func[2]->dev);
 	if (ret) {
 		brcmf_dbg(ERROR, "Fail to request nvram %d\n", ret);
@@ -3549,7 +3380,7 @@
 	return ret;
 }
 
-static int _brcmf_sdbrcm_download_firmware(struct brcmf_bus *bus)
+static int _brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus)
 {
 	int bcmerror = -1;
 
@@ -3582,7 +3413,7 @@
 }
 
 static bool
-brcmf_sdbrcm_download_firmware(struct brcmf_bus *bus)
+brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus)
 {
 	bool ret;
 
@@ -3596,91 +3427,11 @@
 	return ret;
 }
 
-void brcmf_sdbrcm_bus_stop(struct brcmf_bus *bus)
+static int brcmf_sdbrcm_bus_init(struct device *dev)
 {
-	u32 local_hostintmask;
-	u8 saveclk;
-	uint retries;
-	int err;
-
-	brcmf_dbg(TRACE, "Enter\n");
-
-	if (bus->watchdog_tsk) {
-		send_sig(SIGTERM, bus->watchdog_tsk, 1);
-		kthread_stop(bus->watchdog_tsk);
-		bus->watchdog_tsk = NULL;
-	}
-
-	if (bus->dpc_tsk && bus->dpc_tsk != current) {
-		send_sig(SIGTERM, bus->dpc_tsk, 1);
-		kthread_stop(bus->dpc_tsk);
-		bus->dpc_tsk = NULL;
-	}
-
-	down(&bus->sdsem);
-
-	bus_wake(bus);
-
-	/* Enable clock for device interrupts */
-	brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
-
-	/* Disable and clear interrupts at the chip level also */
-	w_sdreg32(bus, 0, offsetof(struct sdpcmd_regs, hostintmask), &retries);
-	local_hostintmask = bus->hostintmask;
-	bus->hostintmask = 0;
-
-	/* Change our idea of bus state */
-	bus->drvr->busstate = BRCMF_BUS_DOWN;
-
-	/* Force clocks on backplane to be sure F2 interrupt propagates */
-	saveclk = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
-					SBSDIO_FUNC1_CHIPCLKCSR, &err);
-	if (!err) {
-		brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
-				       SBSDIO_FUNC1_CHIPCLKCSR,
-				       (saveclk | SBSDIO_FORCE_HT), &err);
-	}
-	if (err)
-		brcmf_dbg(ERROR, "Failed to force clock for F2: err %d\n", err);
-
-	/* Turn off the bus (F2), free any pending packets */
-	brcmf_dbg(INTR, "disable SDIO interrupts\n");
-	brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_0, SDIO_CCCR_IOEx,
-			 SDIO_FUNC_ENABLE_1, NULL);
-
-	/* Clear any pending interrupts now that F2 is disabled */
-	w_sdreg32(bus, local_hostintmask,
-		  offsetof(struct sdpcmd_regs, intstatus), &retries);
-
-	/* Turn off the backplane clock (only) */
-	brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false);
-
-	/* Clear the data packet queues */
-	brcmu_pktq_flush(&bus->txq, true, NULL, NULL);
-
-	/* Clear any held glomming stuff */
-	if (bus->glomd)
-		brcmu_pkt_buf_free_skb(bus->glomd);
-
-	if (bus->glom)
-		brcmu_pkt_buf_free_skb(bus->glom);
-
-	bus->glom = bus->glomd = NULL;
-
-	/* Clear rx control and wake any waiters */
-	bus->rxlen = 0;
-	brcmf_sdbrcm_dcmd_resp_wake(bus);
-
-	/* Reset some F2 state stuff */
-	bus->rxskip = false;
-	bus->tx_seq = bus->rx_seq = 0;
-
-	up(&bus->sdsem);
-}
-
-int brcmf_sdbrcm_bus_init(struct brcmf_pub *drvr)
-{
-	struct brcmf_bus *bus = drvr->bus;
+	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv;
+	struct brcmf_sdio *bus = sdiodev->bus;
 	unsigned long timeout;
 	uint retries = 0;
 	u8 ready, enable;
@@ -3690,16 +3441,16 @@
 	brcmf_dbg(TRACE, "Enter\n");
 
 	/* try to download image and nvram to the dongle */
-	if (drvr->busstate == BRCMF_BUS_DOWN) {
+	if (bus_if->state == BRCMF_BUS_DOWN) {
 		if (!(brcmf_sdbrcm_download_firmware(bus)))
 			return -1;
 	}
 
-	if (!bus->drvr)
+	if (!bus->sdiodev->bus_if->drvr)
 		return 0;
 
 	/* Start the watchdog timer */
-	bus->drvr->tickcnt = 0;
+	bus->tickcnt = 0;
 	brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
 
 	down(&bus->sdsem);
@@ -3756,7 +3507,7 @@
 				       SBSDIO_WATERMARK, 8, &err);
 
 		/* Set bus state according to enable result */
-		drvr->busstate = BRCMF_BUS_DATA;
+		bus_if->state = BRCMF_BUS_DATA;
 	}
 
 	else {
@@ -3771,7 +3522,7 @@
 			       SBSDIO_FUNC1_CHIPCLKCSR, saveclk, &err);
 
 	/* If we didn't come up, turn off backplane clock */
-	if (drvr->busstate != BRCMF_BUS_DATA)
+	if (bus_if->state != BRCMF_BUS_DATA)
 		brcmf_sdbrcm_clkctl(bus, CLK_NONE, false);
 
 exit:
@@ -3782,7 +3533,7 @@
 
 void brcmf_sdbrcm_isr(void *arg)
 {
-	struct brcmf_bus *bus = (struct brcmf_bus *) arg;
+	struct brcmf_sdio *bus = (struct brcmf_sdio *) arg;
 
 	brcmf_dbg(TRACE, "Enter\n");
 
@@ -3791,7 +3542,7 @@
 		return;
 	}
 
-	if (bus->drvr->busstate == BRCMF_BUS_DOWN) {
+	if (bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) {
 		brcmf_dbg(ERROR, "bus is down. we have nothing to do\n");
 		return;
 	}
@@ -3814,14 +3565,14 @@
 		complete(&bus->dpc_wait);
 }
 
-static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_pub *drvr)
+static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
 {
-	struct brcmf_bus *bus;
+#ifdef BCMDBG
+	struct brcmf_bus *bus_if = dev_get_drvdata(bus->sdiodev->dev);
+#endif	/* BCMDBG */
 
 	brcmf_dbg(TIMER, "Enter\n");
 
-	bus = drvr->bus;
-
 	/* Ignore the timer if simulating bus down */
 	if (bus->sleeping)
 		return false;
@@ -3865,7 +3616,8 @@
 	}
 #ifdef BCMDBG
 	/* Poll for console output periodically */
-	if (drvr->busstate == BRCMF_BUS_DATA && bus->console_interval != 0) {
+	if (bus_if->state == BRCMF_BUS_DATA &&
+	    bus->console_interval != 0) {
 		bus->console.count += BRCMF_WD_POLL_MS;
 		if (bus->console.count >= bus->console_interval) {
 			bus->console.count -= bus->console_interval;
@@ -3900,10 +3652,12 @@
 {
 	if (chipid == BCM4329_CHIP_ID)
 		return true;
+	if (chipid == BCM4330_CHIP_ID)
+		return true;
 	return false;
 }
 
-static void brcmf_sdbrcm_release_malloc(struct brcmf_bus *bus)
+static void brcmf_sdbrcm_release_malloc(struct brcmf_sdio *bus)
 {
 	brcmf_dbg(TRACE, "Enter\n");
 
@@ -3915,13 +3669,13 @@
 	bus->databuf = NULL;
 }
 
-static bool brcmf_sdbrcm_probe_malloc(struct brcmf_bus *bus)
+static bool brcmf_sdbrcm_probe_malloc(struct brcmf_sdio *bus)
 {
 	brcmf_dbg(TRACE, "Enter\n");
 
-	if (bus->drvr->maxctl) {
+	if (bus->sdiodev->bus_if->maxctl) {
 		bus->rxblen =
-		    roundup((bus->drvr->maxctl + SDPCM_HDRLEN),
+		    roundup((bus->sdiodev->bus_if->maxctl + SDPCM_HDRLEN),
 			    ALIGNMENT) + BRCMF_SDALIGN;
 		bus->rxbuf = kmalloc(bus->rxblen, GFP_ATOMIC);
 		if (!(bus->rxbuf))
@@ -3950,276 +3704,14 @@
 	return false;
 }
 
-/* SDIO Pad drive strength to select value mappings */
-struct sdiod_drive_str {
-	u8 strength;	/* Pad Drive Strength in mA */
-	u8 sel;		/* Chip-specific select value */
-};
-
-/* SDIO Drive Strength to sel value table for PMU Rev 1 */
-static const struct sdiod_drive_str sdiod_drive_strength_tab1[] = {
-	{
-	4, 0x2}, {
-	2, 0x3}, {
-	1, 0x0}, {
-	0, 0x0}
-	};
-
-/* SDIO Drive Strength to sel value table for PMU Rev 2, 3 */
-static const struct sdiod_drive_str sdiod_drive_strength_tab2[] = {
-	{
-	12, 0x7}, {
-	10, 0x6}, {
-	8, 0x5}, {
-	6, 0x4}, {
-	4, 0x2}, {
-	2, 0x1}, {
-	0, 0x0}
-	};
-
-/* SDIO Drive Strength to sel value table for PMU Rev 8 (1.8V) */
-static const struct sdiod_drive_str sdiod_drive_strength_tab3[] = {
-	{
-	32, 0x7}, {
-	26, 0x6}, {
-	22, 0x5}, {
-	16, 0x4}, {
-	12, 0x3}, {
-	8, 0x2}, {
-	4, 0x1}, {
-	0, 0x0}
-	};
-
-#define SDIOD_DRVSTR_KEY(chip, pmu)     (((chip) << 16) | (pmu))
-
-static char *brcmf_chipname(uint chipid, char *buf, uint len)
-{
-	const char *fmt;
-
-	fmt = ((chipid > 0xa000) || (chipid < 0x4000)) ? "%d" : "%x";
-	snprintf(buf, len, fmt, chipid);
-	return buf;
-}
-
-static void brcmf_sdbrcm_sdiod_drive_strength_init(struct brcmf_bus *bus,
-						   u32 drivestrength) {
-	struct sdiod_drive_str *str_tab = NULL;
-	u32 str_mask = 0;
-	u32 str_shift = 0;
-	char chn[8];
-
-	if (!(bus->ci->cccaps & CC_CAP_PMU))
-		return;
-
-	switch (SDIOD_DRVSTR_KEY(bus->ci->chip, bus->ci->pmurev)) {
-	case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 1):
-		str_tab = (struct sdiod_drive_str *)&sdiod_drive_strength_tab1;
-		str_mask = 0x30000000;
-		str_shift = 28;
-		break;
-	case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 2):
-	case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 3):
-		str_tab = (struct sdiod_drive_str *)&sdiod_drive_strength_tab2;
-		str_mask = 0x00003800;
-		str_shift = 11;
-		break;
-	case SDIOD_DRVSTR_KEY(BCM4336_CHIP_ID, 8):
-		str_tab = (struct sdiod_drive_str *)&sdiod_drive_strength_tab3;
-		str_mask = 0x00003800;
-		str_shift = 11;
-		break;
-	default:
-		brcmf_dbg(ERROR, "No SDIO Drive strength init done for chip %s rev %d pmurev %d\n",
-			  brcmf_chipname(bus->ci->chip, chn, 8),
-			  bus->ci->chiprev, bus->ci->pmurev);
-		break;
-	}
-
-	if (str_tab != NULL) {
-		u32 drivestrength_sel = 0;
-		u32 cc_data_temp;
-		int i;
-
-		for (i = 0; str_tab[i].strength != 0; i++) {
-			if (drivestrength >= str_tab[i].strength) {
-				drivestrength_sel = str_tab[i].sel;
-				break;
-			}
-		}
-
-		brcmf_sdcard_reg_write(bus->sdiodev,
-			CORE_CC_REG(bus->ci->cccorebase, chipcontrol_addr),
-			4, 1);
-		cc_data_temp = brcmf_sdcard_reg_read(bus->sdiodev,
-			CORE_CC_REG(bus->ci->cccorebase, chipcontrol_addr), 4);
-		cc_data_temp &= ~str_mask;
-		drivestrength_sel <<= str_shift;
-		cc_data_temp |= drivestrength_sel;
-		brcmf_sdcard_reg_write(bus->sdiodev,
-			CORE_CC_REG(bus->ci->cccorebase, chipcontrol_addr),
-			4, cc_data_temp);
-
-		brcmf_dbg(INFO, "SDIO: %dmA drive strength selected, set to 0x%08x\n",
-			  drivestrength, cc_data_temp);
-	}
-}
-
-static int
-brcmf_sdbrcm_chip_recognition(struct brcmf_sdio_dev *sdiodev,
-			      struct chip_info *ci, u32 regs)
-{
-	u32 regdata;
-
-	/*
-	 * Get CC core rev
-	 * Chipid is assume to be at offset 0 from regs arg
-	 * For different chiptypes or old sdio hosts w/o chipcommon,
-	 * other ways of recognition should be added here.
-	 */
-	ci->cccorebase = regs;
-	regdata = brcmf_sdcard_reg_read(sdiodev,
-				CORE_CC_REG(ci->cccorebase, chipid), 4);
-	ci->chip = regdata & CID_ID_MASK;
-	ci->chiprev = (regdata & CID_REV_MASK) >> CID_REV_SHIFT;
-
-	brcmf_dbg(INFO, "chipid=0x%x chiprev=%d\n", ci->chip, ci->chiprev);
-
-	/* Address of cores for new chips should be added here */
-	switch (ci->chip) {
-	case BCM4329_CHIP_ID:
-		ci->buscorebase = BCM4329_CORE_BUS_BASE;
-		ci->ramcorebase = BCM4329_CORE_SOCRAM_BASE;
-		ci->armcorebase	= BCM4329_CORE_ARM_BASE;
-		ci->ramsize = BCM4329_RAMSIZE;
-		break;
-	default:
-		brcmf_dbg(ERROR, "chipid 0x%x is not supported\n", ci->chip);
-		return -ENODEV;
-	}
-
-	regdata = brcmf_sdcard_reg_read(sdiodev,
-		CORE_SB(ci->cccorebase, sbidhigh), 4);
-	ci->ccrev = SBCOREREV(regdata);
-
-	regdata = brcmf_sdcard_reg_read(sdiodev,
-		CORE_CC_REG(ci->cccorebase, pmucapabilities), 4);
-	ci->pmurev = regdata & PCAP_REV_MASK;
-
-	regdata = brcmf_sdcard_reg_read(sdiodev,
-					CORE_SB(ci->buscorebase, sbidhigh), 4);
-	ci->buscorerev = SBCOREREV(regdata);
-	ci->buscoretype = (regdata & SBIDH_CC_MASK) >> SBIDH_CC_SHIFT;
-
-	brcmf_dbg(INFO, "ccrev=%d, pmurev=%d, buscore rev/type=%d/0x%x\n",
-		  ci->ccrev, ci->pmurev, ci->buscorerev, ci->buscoretype);
-
-	/* get chipcommon capabilites */
-	ci->cccaps = brcmf_sdcard_reg_read(sdiodev,
-		CORE_CC_REG(ci->cccorebase, capabilities), 4);
-
-	return 0;
-}
-
-static int
-brcmf_sdbrcm_chip_attach(struct brcmf_bus *bus, u32 regs)
-{
-	struct chip_info *ci;
-	int err;
-	u8 clkval, clkset;
-
-	brcmf_dbg(TRACE, "Enter\n");
-
-	/* alloc chip_info_t */
-	ci = kzalloc(sizeof(struct chip_info), GFP_ATOMIC);
-	if (NULL == ci)
-		return -ENOMEM;
-
-	/* bus/core/clk setup for register access */
-	/* Try forcing SDIO core to do ALPAvail request only */
-	clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
-	brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
-			       SBSDIO_FUNC1_CHIPCLKCSR,	clkset, &err);
-	if (err) {
-		brcmf_dbg(ERROR, "error writing for HT off\n");
-		goto fail;
-	}
-
-	/* If register supported, wait for ALPAvail and then force ALP */
-	/* This may take up to 15 milliseconds */
-	clkval = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
-			SBSDIO_FUNC1_CHIPCLKCSR, NULL);
-	if ((clkval & ~SBSDIO_AVBITS) == clkset) {
-		SPINWAIT(((clkval =
-				brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
-						SBSDIO_FUNC1_CHIPCLKCSR,
-						NULL)),
-				!SBSDIO_ALPAV(clkval)),
-				PMU_MAX_TRANSITION_DLY);
-		if (!SBSDIO_ALPAV(clkval)) {
-			brcmf_dbg(ERROR, "timeout on ALPAV wait, clkval 0x%02x\n",
-				  clkval);
-			err = -EBUSY;
-			goto fail;
-		}
-		clkset = SBSDIO_FORCE_HW_CLKREQ_OFF |
-				SBSDIO_FORCE_ALP;
-		brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
-				SBSDIO_FUNC1_CHIPCLKCSR,
-				clkset, &err);
-		udelay(65);
-	} else {
-		brcmf_dbg(ERROR, "ChipClkCSR access: wrote 0x%02x read 0x%02x\n",
-			  clkset, clkval);
-		err = -EACCES;
-		goto fail;
-	}
-
-	/* Also, disable the extra SDIO pull-ups */
-	brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
-			       SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
-
-	err = brcmf_sdbrcm_chip_recognition(bus->sdiodev, ci, regs);
-	if (err)
-		goto fail;
-
-	/*
-	 * Make sure any on-chip ARM is off (in case strapping is wrong),
-	 * or downloaded code was already running.
-	 */
-	brcmf_sdbrcm_chip_disablecore(bus->sdiodev, ci->armcorebase);
-
-	brcmf_sdcard_reg_write(bus->sdiodev,
-		CORE_CC_REG(ci->cccorebase, gpiopullup), 4, 0);
-	brcmf_sdcard_reg_write(bus->sdiodev,
-		CORE_CC_REG(ci->cccorebase, gpiopulldown), 4, 0);
-
-	/* Disable F2 to clear any intermediate frame state on the dongle */
-	brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_0, SDIO_CCCR_IOEx,
-		SDIO_FUNC_ENABLE_1, NULL);
-
-	/* WAR: cmd52 backplane read so core HW will drop ALPReq */
-	clkval = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
-			0, NULL);
-
-	/* Done with backplane-dependent accesses, can drop clock... */
-	brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
-			       SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
-
-	bus->ci = ci;
-	return 0;
-fail:
-	bus->ci = NULL;
-	kfree(ci);
-	return err;
-}
-
 static bool
-brcmf_sdbrcm_probe_attach(struct brcmf_bus *bus, u32 regsva)
+brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva)
 {
 	u8 clkctl = 0;
 	int err = 0;
 	int reg_addr;
 	u32 reg_val;
+	u8 idx;
 
 	bus->alp_only = true;
 
@@ -4234,7 +3726,7 @@
 #endif				/* BCMDBG */
 
 	/*
-	 * Force PLL off until brcmf_sdbrcm_chip_attach()
+	 * Force PLL off until brcmf_sdio_chip_attach()
 	 * programs PLL control regs
 	 */
 
@@ -4252,8 +3744,8 @@
 		goto fail;
 	}
 
-	if (brcmf_sdbrcm_chip_attach(bus, regsva)) {
-		brcmf_dbg(ERROR, "brcmf_sdbrcm_chip_attach failed!\n");
+	if (brcmf_sdio_chip_attach(bus->sdiodev, &bus->ci, regsva)) {
+		brcmf_dbg(ERROR, "brcmf_sdio_chip_attach failed!\n");
 		goto fail;
 	}
 
@@ -4262,11 +3754,10 @@
 		goto fail;
 	}
 
-	brcmf_sdbrcm_sdiod_drive_strength_init(bus, SDIO_DRIVE_STRENGTH);
+	brcmf_sdio_chip_drivestrengthinit(bus->sdiodev, bus->ci,
+					  SDIO_DRIVE_STRENGTH);
 
-	/* Get info on the ARM and SOCRAM cores... */
-	brcmf_sdcard_reg_read(bus->sdiodev,
-		  CORE_SB(bus->ci->armcorebase, sbidhigh), 4);
+	/* Get info on the SOCRAM cores... */
 	bus->ramsize = bus->ci->ramsize;
 	if (!(bus->ramsize)) {
 		brcmf_dbg(ERROR, "failed to find SOCRAM memory!\n");
@@ -4274,7 +3765,8 @@
 	}
 
 	/* Set core control so an SDIO reset does a backplane reset */
-	reg_addr = bus->ci->buscorebase +
+	idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
+	reg_addr = bus->ci->c_inf[idx].base +
 		   offsetof(struct sdpcmd_regs, corecontrol);
 	reg_val = brcmf_sdcard_reg_read(bus->sdiodev, reg_addr, sizeof(u32));
 	brcmf_sdcard_reg_write(bus->sdiodev, reg_addr, sizeof(u32),
@@ -4298,7 +3790,7 @@
 	return false;
 }
 
-static bool brcmf_sdbrcm_probe_init(struct brcmf_bus *bus)
+static bool brcmf_sdbrcm_probe_init(struct brcmf_sdio *bus)
 {
 	brcmf_dbg(TRACE, "Enter\n");
 
@@ -4306,7 +3798,7 @@
 	brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_0, SDIO_CCCR_IOEx,
 			       SDIO_FUNC_ENABLE_1, NULL);
 
-	bus->drvr->busstate = BRCMF_BUS_DOWN;
+	bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
 	bus->sleeping = false;
 	bus->rxflow = false;
 
@@ -4333,7 +3825,7 @@
 static int
 brcmf_sdbrcm_watchdog_thread(void *data)
 {
-	struct brcmf_bus *bus = (struct brcmf_bus *)data;
+	struct brcmf_sdio *bus = (struct brcmf_sdio *)data;
 
 	allow_signal(SIGTERM);
 	/* Run until signal received */
@@ -4341,9 +3833,9 @@
 		if (kthread_should_stop())
 			break;
 		if (!wait_for_completion_interruptible(&bus->watchdog_wait)) {
-			brcmf_sdbrcm_bus_watchdog(bus->drvr);
+			brcmf_sdbrcm_bus_watchdog(bus);
 			/* Count the tick for reference */
-			bus->drvr->tickcnt++;
+			bus->tickcnt++;
 		} else
 			break;
 	}
@@ -4353,7 +3845,7 @@
 static void
 brcmf_sdbrcm_watchdog(unsigned long data)
 {
-	struct brcmf_bus *bus = (struct brcmf_bus *)data;
+	struct brcmf_sdio *bus = (struct brcmf_sdio *)data;
 
 	if (bus->watchdog_tsk) {
 		complete(&bus->watchdog_wait);
@@ -4364,23 +3856,14 @@
 	}
 }
 
-static void
-brcmf_sdbrcm_chip_detach(struct brcmf_bus *bus)
-{
-	brcmf_dbg(TRACE, "Enter\n");
-
-	kfree(bus->ci);
-	bus->ci = NULL;
-}
-
-static void brcmf_sdbrcm_release_dongle(struct brcmf_bus *bus)
+static void brcmf_sdbrcm_release_dongle(struct brcmf_sdio *bus)
 {
 	brcmf_dbg(TRACE, "Enter\n");
 
 	if (bus->ci) {
 		brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
 		brcmf_sdbrcm_clkctl(bus, CLK_NONE, false);
-		brcmf_sdbrcm_chip_detach(bus);
+		brcmf_sdio_chip_detach(&bus->ci);
 		if (bus->vars && bus->varsz)
 			kfree(bus->vars);
 		bus->vars = NULL;
@@ -4390,7 +3873,7 @@
 }
 
 /* Detach and free everything */
-static void brcmf_sdbrcm_release(struct brcmf_bus *bus)
+static void brcmf_sdbrcm_release(struct brcmf_sdio *bus)
 {
 	brcmf_dbg(TRACE, "Enter\n");
 
@@ -4398,10 +3881,9 @@
 		/* De-register interrupt handler */
 		brcmf_sdcard_intr_dereg(bus->sdiodev);
 
-		if (bus->drvr) {
-			brcmf_detach(bus->drvr);
+		if (bus->sdiodev->bus_if->drvr) {
+			brcmf_detach(bus->sdiodev->dev);
 			brcmf_sdbrcm_release_dongle(bus);
-			bus->drvr = NULL;
 		}
 
 		brcmf_sdbrcm_release_malloc(bus);
@@ -4412,21 +3894,10 @@
 	brcmf_dbg(TRACE, "Disconnected\n");
 }
 
-void *brcmf_sdbrcm_probe(u16 bus_no, u16 slot, u16 func, uint bustype,
-			 u32 regsva, struct brcmf_sdio_dev *sdiodev)
+void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
 {
 	int ret;
-	struct brcmf_bus *bus;
-
-	/* Init global variables at run-time, not as part of the declaration.
-	 * This is required to support init/de-init of the driver.
-	 * Initialization
-	 * of globals as part of the declaration results in non-deterministic
-	 * behavior since the value of the globals may be different on the
-	 * first time that the driver is initialized vs subsequent
-	 * initializations.
-	 */
-	brcmf_c_init();
+	struct brcmf_sdio *bus;
 
 	brcmf_dbg(TRACE, "Enter\n");
 
@@ -4434,12 +3905,13 @@
 	 * regsva == SI_ENUM_BASE*/
 
 	/* Allocate private bus interface state */
-	bus = kzalloc(sizeof(struct brcmf_bus), GFP_ATOMIC);
+	bus = kzalloc(sizeof(struct brcmf_sdio), GFP_ATOMIC);
 	if (!bus)
 		goto fail;
 
 	bus->sdiodev = sdiodev;
 	sdiodev->bus = bus;
+	skb_queue_head_init(&bus->glom);
 	bus->txbound = BRCMF_TXBOUND;
 	bus->rxbound = BRCMF_RXBOUND;
 	bus->txminmax = BRCMF_TXMINMAX;
@@ -4484,9 +3956,15 @@
 		bus->dpc_tsk = NULL;
 	}
 
+	/* Assign bus interface call back */
+	bus->sdiodev->bus_if->brcmf_bus_stop = brcmf_sdbrcm_bus_stop;
+	bus->sdiodev->bus_if->brcmf_bus_init = brcmf_sdbrcm_bus_init;
+	bus->sdiodev->bus_if->brcmf_bus_txdata = brcmf_sdbrcm_bus_txdata;
+	bus->sdiodev->bus_if->brcmf_bus_txctl = brcmf_sdbrcm_bus_txctl;
+	bus->sdiodev->bus_if->brcmf_bus_rxctl = brcmf_sdbrcm_bus_rxctl;
 	/* Attach to the brcmf/OS/network interface */
-	bus->drvr = brcmf_attach(bus, SDPCM_RESERVE);
-	if (!bus->drvr) {
+	ret = brcmf_attach(SDPCM_RESERVE, bus->sdiodev->dev);
+	if (ret != 0) {
 		brcmf_dbg(ERROR, "brcmf_attach failed\n");
 		goto fail;
 	}
@@ -4514,16 +3992,17 @@
 	brcmf_dbg(INFO, "completed!!\n");
 
 	/* if firmware path present try to download and bring up bus */
-	ret = brcmf_bus_start(bus->drvr);
+	ret = brcmf_bus_start(bus->sdiodev->dev);
 	if (ret != 0) {
 		if (ret == -ENOLINK) {
 			brcmf_dbg(ERROR, "dongle is not responding\n");
 			goto fail;
 		}
 	}
-	/* Ok, have the per-port tell the stack we're open for business */
-	if (brcmf_net_attach(bus->drvr, 0) != 0) {
-		brcmf_dbg(ERROR, "Net attach failed!!\n");
+
+	/* add interface and open for business */
+	if (brcmf_add_if(bus->sdiodev->dev, 0, "wlan%d", NULL)) {
+		brcmf_dbg(ERROR, "Add primary net device interface failed!!\n");
 		goto fail;
 	}
 
@@ -4536,7 +4015,7 @@
 
 void brcmf_sdbrcm_disconnect(void *ptr)
 {
-	struct brcmf_bus *bus = (struct brcmf_bus *)ptr;
+	struct brcmf_sdio *bus = (struct brcmf_sdio *)ptr;
 
 	brcmf_dbg(TRACE, "Enter\n");
 
@@ -4546,18 +4025,9 @@
 	brcmf_dbg(TRACE, "Disconnected\n");
 }
 
-struct device *brcmf_bus_get_device(struct brcmf_bus *bus)
-{
-	return &bus->sdiodev->func[2]->dev;
-}
-
 void
-brcmf_sdbrcm_wd_timer(struct brcmf_bus *bus, uint wdtick)
+brcmf_sdbrcm_wd_timer(struct brcmf_sdio *bus, uint wdtick)
 {
-	/* don't start the wd until fw is loaded */
-	if (bus->drvr->busstate == BRCMF_BUS_DOWN)
-		return;
-
 	/* Totally stop the timer */
 	if (!wdtick && bus->wd_timer_valid == true) {
 		del_timer_sync(&bus->timer);
@@ -4566,6 +4036,10 @@
 		return;
 	}
 
+	/* don't start the wd until fw is loaded */
+	if (bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN)
+		return;
+
 	if (wdtick) {
 		if (bus->save_ms != BRCMF_WD_POLL_MS) {
 			if (bus->wd_timer_valid == true)
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
new file mode 100644
index 0000000..11b2d7c
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
@@ -0,0 +1,607 @@
+/*
+ * Copyright (c) 2011 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+/* ***** SDIO interface chip backplane handle functions ***** */
+
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/mmc/card.h>
+#include <linux/ssb/ssb_regs.h>
+#include <linux/bcma/bcma.h>
+
+#include <chipcommon.h>
+#include <brcm_hw_ids.h>
+#include <brcmu_wifi.h>
+#include <brcmu_utils.h>
+#include <soc.h>
+#include "dhd_dbg.h"
+#include "sdio_host.h"
+#include "sdio_chip.h"
+
+/* chip core base & ramsize */
+/* bcm4329 */
+/* SDIO device core, ID 0x829 */
+#define BCM4329_CORE_BUS_BASE		0x18011000
+/* internal memory core, ID 0x80e */
+#define BCM4329_CORE_SOCRAM_BASE	0x18003000
+/* ARM Cortex M3 core, ID 0x82a */
+#define BCM4329_CORE_ARM_BASE		0x18002000
+#define BCM4329_RAMSIZE			0x48000
+
+#define	SBCOREREV(sbidh) \
+	((((sbidh) & SSB_IDHIGH_RCHI) >> SSB_IDHIGH_RCHI_SHIFT) | \
+	  ((sbidh) & SSB_IDHIGH_RCLO))
+
+/* SOC Interconnect types (aka chip types) */
+#define SOCI_SB		0
+#define SOCI_AI		1
+
+/* EROM CompIdentB */
+#define CIB_REV_MASK		0xff000000
+#define CIB_REV_SHIFT		24
+
+#define SDIOD_DRVSTR_KEY(chip, pmu)     (((chip) << 16) | (pmu))
+/* SDIO Pad drive strength to select value mappings */
+struct sdiod_drive_str {
+	u8 strength;	/* Pad Drive Strength in mA */
+	u8 sel;		/* Chip-specific select value */
+};
+/* SDIO Drive Strength to sel value table for PMU Rev 11 (1.8V) */
+static const struct sdiod_drive_str sdiod_drvstr_tab1_1v8[] = {
+	{32, 0x6},
+	{26, 0x7},
+	{22, 0x4},
+	{16, 0x5},
+	{12, 0x2},
+	{8, 0x3},
+	{4, 0x0},
+	{0, 0x1}
+};
+
+u8
+brcmf_sdio_chip_getinfidx(struct chip_info *ci, u16 coreid)
+{
+	u8 idx;
+
+	for (idx = 0; idx < BRCMF_MAX_CORENUM; idx++)
+		if (coreid == ci->c_inf[idx].id)
+			return idx;
+
+	return BRCMF_MAX_CORENUM;
+}
+
+static u32
+brcmf_sdio_sb_corerev(struct brcmf_sdio_dev *sdiodev,
+		      struct chip_info *ci, u16 coreid)
+{
+	u32 regdata;
+	u8 idx;
+
+	idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+
+	regdata = brcmf_sdcard_reg_read(sdiodev,
+			CORE_SB(ci->c_inf[idx].base, sbidhigh), 4);
+	return SBCOREREV(regdata);
+}
+
+static u32
+brcmf_sdio_ai_corerev(struct brcmf_sdio_dev *sdiodev,
+		      struct chip_info *ci, u16 coreid)
+{
+	u8 idx;
+
+	idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+
+	return (ci->c_inf[idx].cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
+}
+
+static bool
+brcmf_sdio_sb_iscoreup(struct brcmf_sdio_dev *sdiodev,
+		       struct chip_info *ci, u16 coreid)
+{
+	u32 regdata;
+	u8 idx;
+
+	idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+
+	regdata = brcmf_sdcard_reg_read(sdiodev,
+			CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
+	regdata &= (SSB_TMSLOW_RESET | SSB_TMSLOW_REJECT |
+		    SSB_IMSTATE_REJECT | SSB_TMSLOW_CLOCK);
+	return (SSB_TMSLOW_CLOCK == regdata);
+}
+
+static bool
+brcmf_sdio_ai_iscoreup(struct brcmf_sdio_dev *sdiodev,
+		       struct chip_info *ci, u16 coreid)
+{
+	u32 regdata;
+	u8 idx;
+	bool ret;
+
+	idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+
+	regdata = brcmf_sdcard_reg_read(sdiodev,
+					ci->c_inf[idx].wrapbase+BCMA_IOCTL, 4);
+	ret = (regdata & (BCMA_IOCTL_FGC | BCMA_IOCTL_CLK)) == BCMA_IOCTL_CLK;
+
+	regdata = brcmf_sdcard_reg_read(sdiodev,
+					ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
+					4);
+	ret = ret && ((regdata & BCMA_RESET_CTL_RESET) == 0);
+
+	return ret;
+}
+
+static void
+brcmf_sdio_sb_coredisable(struct brcmf_sdio_dev *sdiodev,
+			  struct chip_info *ci, u16 coreid)
+{
+	u32 regdata;
+	u8 idx;
+
+	idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+
+	regdata = brcmf_sdcard_reg_read(sdiodev,
+		CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
+	if (regdata & SSB_TMSLOW_RESET)
+		return;
+
+	regdata = brcmf_sdcard_reg_read(sdiodev,
+		CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
+	if ((regdata & SSB_TMSLOW_CLOCK) != 0) {
+		/*
+		 * set target reject and spin until busy is clear
+		 * (preserve core-specific bits)
+		 */
+		regdata = brcmf_sdcard_reg_read(sdiodev,
+			CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
+		brcmf_sdcard_reg_write(sdiodev,
+				CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
+				4, regdata | SSB_TMSLOW_REJECT);
+
+		regdata = brcmf_sdcard_reg_read(sdiodev,
+			CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
+		udelay(1);
+		SPINWAIT((brcmf_sdcard_reg_read(sdiodev,
+			CORE_SB(ci->c_inf[idx].base, sbtmstatehigh), 4) &
+			SSB_TMSHIGH_BUSY), 100000);
+
+		regdata = brcmf_sdcard_reg_read(sdiodev,
+			CORE_SB(ci->c_inf[idx].base, sbtmstatehigh), 4);
+		if (regdata & SSB_TMSHIGH_BUSY)
+			brcmf_dbg(ERROR, "core state still busy\n");
+
+		regdata = brcmf_sdcard_reg_read(sdiodev,
+			CORE_SB(ci->c_inf[idx].base, sbidlow), 4);
+		if (regdata & SSB_IDLOW_INITIATOR) {
+			regdata = brcmf_sdcard_reg_read(sdiodev,
+				CORE_SB(ci->c_inf[idx].base, sbimstate), 4) |
+				SSB_IMSTATE_REJECT;
+			brcmf_sdcard_reg_write(sdiodev,
+				CORE_SB(ci->c_inf[idx].base, sbimstate), 4,
+				regdata);
+			regdata = brcmf_sdcard_reg_read(sdiodev,
+				CORE_SB(ci->c_inf[idx].base, sbimstate), 4);
+			udelay(1);
+			SPINWAIT((brcmf_sdcard_reg_read(sdiodev,
+				CORE_SB(ci->c_inf[idx].base, sbimstate), 4) &
+				SSB_IMSTATE_BUSY), 100000);
+		}
+
+		/* set reset and reject while enabling the clocks */
+		brcmf_sdcard_reg_write(sdiodev,
+			CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4,
+			(SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK |
+			SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET));
+		regdata = brcmf_sdcard_reg_read(sdiodev,
+			CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
+		udelay(10);
+
+		/* clear the initiator reject bit */
+		regdata = brcmf_sdcard_reg_read(sdiodev,
+			CORE_SB(ci->c_inf[idx].base, sbidlow), 4);
+		if (regdata & SSB_IDLOW_INITIATOR) {
+			regdata = brcmf_sdcard_reg_read(sdiodev,
+				CORE_SB(ci->c_inf[idx].base, sbimstate), 4) &
+				~SSB_IMSTATE_REJECT;
+			brcmf_sdcard_reg_write(sdiodev,
+				CORE_SB(ci->c_inf[idx].base, sbimstate), 4,
+				regdata);
+		}
+	}
+
+	/* leave reset and reject asserted */
+	brcmf_sdcard_reg_write(sdiodev,
+		CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4,
+		(SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET));
+	udelay(1);
+}
+
+static void
+brcmf_sdio_ai_coredisable(struct brcmf_sdio_dev *sdiodev,
+			  struct chip_info *ci, u16 coreid)
+{
+	u8 idx;
+	u32 regdata;
+
+	idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+
+	/* if core is already in reset, just return */
+	regdata = brcmf_sdcard_reg_read(sdiodev,
+					ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
+					4);
+	if ((regdata & BCMA_RESET_CTL_RESET) != 0)
+		return;
+
+	brcmf_sdcard_reg_write(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
+			       4, 0);
+	regdata = brcmf_sdcard_reg_read(sdiodev,
+					ci->c_inf[idx].wrapbase+BCMA_IOCTL, 4);
+	udelay(10);
+
+	brcmf_sdcard_reg_write(sdiodev, ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
+			       4, BCMA_RESET_CTL_RESET);
+	udelay(1);
+}
+
+static void
+brcmf_sdio_sb_resetcore(struct brcmf_sdio_dev *sdiodev,
+			struct chip_info *ci, u16 coreid)
+{
+	u32 regdata;
+	u8 idx;
+
+	idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+
+	/*
+	 * Must do the disable sequence first to work for
+	 * arbitrary current core state.
+	 */
+	brcmf_sdio_sb_coredisable(sdiodev, ci, coreid);
+
+	/*
+	 * Now do the initialization sequence.
+	 * set reset while enabling the clock and
+	 * forcing them on throughout the core
+	 */
+	brcmf_sdcard_reg_write(sdiodev,
+			CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4,
+			SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK | SSB_TMSLOW_RESET);
+	regdata = brcmf_sdcard_reg_read(sdiodev,
+				CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
+	udelay(1);
+
+	/* clear any serror */
+	regdata = brcmf_sdcard_reg_read(sdiodev,
+				CORE_SB(ci->c_inf[idx].base, sbtmstatehigh), 4);
+	if (regdata & SSB_TMSHIGH_SERR)
+		brcmf_sdcard_reg_write(sdiodev,
+			CORE_SB(ci->c_inf[idx].base, sbtmstatehigh), 4, 0);
+
+	regdata = brcmf_sdcard_reg_read(sdiodev,
+				CORE_SB(ci->c_inf[idx].base, sbimstate), 4);
+	if (regdata & (SSB_IMSTATE_IBE | SSB_IMSTATE_TO))
+		brcmf_sdcard_reg_write(sdiodev,
+			CORE_SB(ci->c_inf[idx].base, sbimstate), 4,
+			regdata & ~(SSB_IMSTATE_IBE | SSB_IMSTATE_TO));
+
+	/* clear reset and allow it to propagate throughout the core */
+	brcmf_sdcard_reg_write(sdiodev,
+		CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4,
+		SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK);
+	regdata = brcmf_sdcard_reg_read(sdiodev,
+				CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
+	udelay(1);
+
+	/* leave clock enabled */
+	brcmf_sdcard_reg_write(sdiodev,
+			       CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
+			       4, SSB_TMSLOW_CLOCK);
+	regdata = brcmf_sdcard_reg_read(sdiodev,
+				CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
+	udelay(1);
+}
+
+static void
+brcmf_sdio_ai_resetcore(struct brcmf_sdio_dev *sdiodev,
+			struct chip_info *ci, u16 coreid)
+{
+	u8 idx;
+	u32 regdata;
+
+	idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+
+	/* must disable first to work for arbitrary current core state */
+	brcmf_sdio_ai_coredisable(sdiodev, ci, coreid);
+
+	/* now do initialization sequence */
+	brcmf_sdcard_reg_write(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
+			       4, BCMA_IOCTL_FGC | BCMA_IOCTL_CLK);
+	regdata = brcmf_sdcard_reg_read(sdiodev,
+					ci->c_inf[idx].wrapbase+BCMA_IOCTL, 4);
+	brcmf_sdcard_reg_write(sdiodev, ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
+			       4, 0);
+	udelay(1);
+
+	brcmf_sdcard_reg_write(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
+			       4, BCMA_IOCTL_CLK);
+	regdata = brcmf_sdcard_reg_read(sdiodev,
+					ci->c_inf[idx].wrapbase+BCMA_IOCTL, 4);
+	udelay(1);
+}
+
+static int brcmf_sdio_chip_recognition(struct brcmf_sdio_dev *sdiodev,
+				       struct chip_info *ci, u32 regs)
+{
+	u32 regdata;
+
+	/*
+	 * Get CC core rev
+	 * Chipid is assume to be at offset 0 from regs arg
+	 * For different chiptypes or old sdio hosts w/o chipcommon,
+	 * other ways of recognition should be added here.
+	 */
+	ci->c_inf[0].id = BCMA_CORE_CHIPCOMMON;
+	ci->c_inf[0].base = regs;
+	regdata = brcmf_sdcard_reg_read(sdiodev,
+			CORE_CC_REG(ci->c_inf[0].base, chipid), 4);
+	ci->chip = regdata & CID_ID_MASK;
+	ci->chiprev = (regdata & CID_REV_MASK) >> CID_REV_SHIFT;
+	ci->socitype = (regdata & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
+
+	brcmf_dbg(INFO, "chipid=0x%x chiprev=%d\n", ci->chip, ci->chiprev);
+
+	/* Address of cores for new chips should be added here */
+	switch (ci->chip) {
+	case BCM4329_CHIP_ID:
+		ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
+		ci->c_inf[1].base = BCM4329_CORE_BUS_BASE;
+		ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
+		ci->c_inf[2].base = BCM4329_CORE_SOCRAM_BASE;
+		ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
+		ci->c_inf[3].base = BCM4329_CORE_ARM_BASE;
+		ci->ramsize = BCM4329_RAMSIZE;
+		break;
+	case BCM4330_CHIP_ID:
+		ci->c_inf[0].wrapbase = 0x18100000;
+		ci->c_inf[0].cib = 0x27004211;
+		ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
+		ci->c_inf[1].base = 0x18002000;
+		ci->c_inf[1].wrapbase = 0x18102000;
+		ci->c_inf[1].cib = 0x07004211;
+		ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
+		ci->c_inf[2].base = 0x18004000;
+		ci->c_inf[2].wrapbase = 0x18104000;
+		ci->c_inf[2].cib = 0x0d080401;
+		ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
+		ci->c_inf[3].base = 0x18003000;
+		ci->c_inf[3].wrapbase = 0x18103000;
+		ci->c_inf[3].cib = 0x03004211;
+		ci->ramsize = 0x48000;
+		break;
+	default:
+		brcmf_dbg(ERROR, "chipid 0x%x is not supported\n", ci->chip);
+		return -ENODEV;
+	}
+
+	switch (ci->socitype) {
+	case SOCI_SB:
+		ci->iscoreup = brcmf_sdio_sb_iscoreup;
+		ci->corerev = brcmf_sdio_sb_corerev;
+		ci->coredisable = brcmf_sdio_sb_coredisable;
+		ci->resetcore = brcmf_sdio_sb_resetcore;
+		break;
+	case SOCI_AI:
+		ci->iscoreup = brcmf_sdio_ai_iscoreup;
+		ci->corerev = brcmf_sdio_ai_corerev;
+		ci->coredisable = brcmf_sdio_ai_coredisable;
+		ci->resetcore = brcmf_sdio_ai_resetcore;
+		break;
+	default:
+		brcmf_dbg(ERROR, "socitype %u not supported\n", ci->socitype);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static int
+brcmf_sdio_chip_buscoreprep(struct brcmf_sdio_dev *sdiodev)
+{
+	int err = 0;
+	u8 clkval, clkset;
+
+	/* Try forcing SDIO core to do ALPAvail request only */
+	clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
+	brcmf_sdcard_cfg_write(sdiodev, SDIO_FUNC_1,
+			       SBSDIO_FUNC1_CHIPCLKCSR,	clkset, &err);
+	if (err) {
+		brcmf_dbg(ERROR, "error writing for HT off\n");
+		return err;
+	}
+
+	/* If register supported, wait for ALPAvail and then force ALP */
+	/* This may take up to 15 milliseconds */
+	clkval = brcmf_sdcard_cfg_read(sdiodev, SDIO_FUNC_1,
+				       SBSDIO_FUNC1_CHIPCLKCSR, NULL);
+
+	if ((clkval & ~SBSDIO_AVBITS) != clkset) {
+		brcmf_dbg(ERROR, "ChipClkCSR access: wrote 0x%02x read 0x%02x\n",
+			  clkset, clkval);
+		return -EACCES;
+	}
+
+	SPINWAIT(((clkval = brcmf_sdcard_cfg_read(sdiodev, SDIO_FUNC_1,
+				SBSDIO_FUNC1_CHIPCLKCSR, NULL)),
+			!SBSDIO_ALPAV(clkval)),
+			PMU_MAX_TRANSITION_DLY);
+	if (!SBSDIO_ALPAV(clkval)) {
+		brcmf_dbg(ERROR, "timeout on ALPAV wait, clkval 0x%02x\n",
+			  clkval);
+		return -EBUSY;
+	}
+
+	clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP;
+	brcmf_sdcard_cfg_write(sdiodev, SDIO_FUNC_1,
+			       SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
+	udelay(65);
+
+	/* Also, disable the extra SDIO pull-ups */
+	brcmf_sdcard_cfg_write(sdiodev, SDIO_FUNC_1,
+			       SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
+
+	return 0;
+}
+
+static void
+brcmf_sdio_chip_buscoresetup(struct brcmf_sdio_dev *sdiodev,
+			     struct chip_info *ci)
+{
+	/* get chipcommon rev */
+	ci->c_inf[0].rev = ci->corerev(sdiodev, ci, ci->c_inf[0].id);
+
+	/* get chipcommon capabilites */
+	ci->c_inf[0].caps =
+		brcmf_sdcard_reg_read(sdiodev,
+		CORE_CC_REG(ci->c_inf[0].base, capabilities), 4);
+
+	/* get pmu caps & rev */
+	if (ci->c_inf[0].caps & CC_CAP_PMU) {
+		ci->pmucaps = brcmf_sdcard_reg_read(sdiodev,
+			CORE_CC_REG(ci->c_inf[0].base, pmucapabilities), 4);
+		ci->pmurev = ci->pmucaps & PCAP_REV_MASK;
+	}
+
+	ci->c_inf[1].rev = ci->corerev(sdiodev, ci, ci->c_inf[1].id);
+
+	brcmf_dbg(INFO, "ccrev=%d, pmurev=%d, buscore rev/type=%d/0x%x\n",
+		  ci->c_inf[0].rev, ci->pmurev,
+		  ci->c_inf[1].rev, ci->c_inf[1].id);
+
+	/*
+	 * Make sure any on-chip ARM is off (in case strapping is wrong),
+	 * or downloaded code was already running.
+	 */
+	ci->coredisable(sdiodev, ci, BCMA_CORE_ARM_CM3);
+}
+
+int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev,
+			   struct chip_info **ci_ptr, u32 regs)
+{
+	int ret;
+	struct chip_info *ci;
+
+	brcmf_dbg(TRACE, "Enter\n");
+
+	/* alloc chip_info_t */
+	ci = kzalloc(sizeof(struct chip_info), GFP_ATOMIC);
+	if (!ci)
+		return -ENOMEM;
+
+	ret = brcmf_sdio_chip_buscoreprep(sdiodev);
+	if (ret != 0)
+		goto err;
+
+	ret = brcmf_sdio_chip_recognition(sdiodev, ci, regs);
+	if (ret != 0)
+		goto err;
+
+	brcmf_sdio_chip_buscoresetup(sdiodev, ci);
+
+	brcmf_sdcard_reg_write(sdiodev,
+		CORE_CC_REG(ci->c_inf[0].base, gpiopullup), 4, 0);
+	brcmf_sdcard_reg_write(sdiodev,
+		CORE_CC_REG(ci->c_inf[0].base, gpiopulldown), 4, 0);
+
+	*ci_ptr = ci;
+	return 0;
+
+err:
+	kfree(ci);
+	return ret;
+}
+
+void
+brcmf_sdio_chip_detach(struct chip_info **ci_ptr)
+{
+	brcmf_dbg(TRACE, "Enter\n");
+
+	kfree(*ci_ptr);
+	*ci_ptr = NULL;
+}
+
+static char *brcmf_sdio_chip_name(uint chipid, char *buf, uint len)
+{
+	const char *fmt;
+
+	fmt = ((chipid > 0xa000) || (chipid < 0x4000)) ? "%d" : "%x";
+	snprintf(buf, len, fmt, chipid);
+	return buf;
+}
+
+void
+brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
+				  struct chip_info *ci, u32 drivestrength)
+{
+	struct sdiod_drive_str *str_tab = NULL;
+	u32 str_mask = 0;
+	u32 str_shift = 0;
+	char chn[8];
+
+	if (!(ci->c_inf[0].caps & CC_CAP_PMU))
+		return;
+
+	switch (SDIOD_DRVSTR_KEY(ci->chip, ci->pmurev)) {
+	case SDIOD_DRVSTR_KEY(BCM4330_CHIP_ID, 12):
+		str_tab = (struct sdiod_drive_str *)&sdiod_drvstr_tab1_1v8;
+		str_mask = 0x00003800;
+		str_shift = 11;
+		break;
+	default:
+		brcmf_dbg(ERROR, "No SDIO Drive strength init done for chip %s rev %d pmurev %d\n",
+			  brcmf_sdio_chip_name(ci->chip, chn, 8),
+			  ci->chiprev, ci->pmurev);
+		break;
+	}
+
+	if (str_tab != NULL) {
+		u32 drivestrength_sel = 0;
+		u32 cc_data_temp;
+		int i;
+
+		for (i = 0; str_tab[i].strength != 0; i++) {
+			if (drivestrength >= str_tab[i].strength) {
+				drivestrength_sel = str_tab[i].sel;
+				break;
+			}
+		}
+
+		brcmf_sdcard_reg_write(sdiodev,
+			CORE_CC_REG(ci->c_inf[0].base, chipcontrol_addr),
+			4, 1);
+		cc_data_temp = brcmf_sdcard_reg_read(sdiodev,
+			CORE_CC_REG(ci->c_inf[0].base, chipcontrol_addr), 4);
+		cc_data_temp &= ~str_mask;
+		drivestrength_sel <<= str_shift;
+		cc_data_temp |= drivestrength_sel;
+		brcmf_sdcard_reg_write(sdiodev,
+			CORE_CC_REG(ci->c_inf[0].base, chipcontrol_addr),
+			4, cc_data_temp);
+
+		brcmf_dbg(INFO, "SDIO: %dmA drive strength selected, set to 0x%08x\n",
+			  drivestrength, cc_data_temp);
+	}
+}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h
new file mode 100644
index 0000000..ce974d7
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h
@@ -0,0 +1,136 @@
+/*
+ * Copyright (c) 2011 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _BRCMFMAC_SDIO_CHIP_H_
+#define _BRCMFMAC_SDIO_CHIP_H_
+
+/*
+ * Core reg address translation.
+ * Both macro's returns a 32 bits byte address on the backplane bus.
+ */
+#define CORE_CC_REG(base, field) \
+		(base + offsetof(struct chipcregs, field))
+#define CORE_BUS_REG(base, field) \
+		(base + offsetof(struct sdpcmd_regs, field))
+#define CORE_SB(base, field) \
+		(base + SBCONFIGOFF + offsetof(struct sbconfig, field))
+
+/* SDIO function 1 register CHIPCLKCSR */
+/* Force ALP request to backplane */
+#define SBSDIO_FORCE_ALP		0x01
+/* Force HT request to backplane */
+#define SBSDIO_FORCE_HT			0x02
+/* Force ILP request to backplane */
+#define SBSDIO_FORCE_ILP		0x04
+/* Make ALP ready (power up xtal) */
+#define SBSDIO_ALP_AVAIL_REQ		0x08
+/* Make HT ready (power up PLL) */
+#define SBSDIO_HT_AVAIL_REQ		0x10
+/* Squelch clock requests from HW */
+#define SBSDIO_FORCE_HW_CLKREQ_OFF	0x20
+/* Status: ALP is ready */
+#define SBSDIO_ALP_AVAIL		0x40
+/* Status: HT is ready */
+#define SBSDIO_HT_AVAIL			0x80
+#define SBSDIO_AVBITS		(SBSDIO_HT_AVAIL | SBSDIO_ALP_AVAIL)
+#define SBSDIO_ALPAV(regval)	((regval) & SBSDIO_AVBITS)
+#define SBSDIO_HTAV(regval)	(((regval) & SBSDIO_AVBITS) == SBSDIO_AVBITS)
+#define SBSDIO_ALPONLY(regval)	(SBSDIO_ALPAV(regval) && !SBSDIO_HTAV(regval))
+#define SBSDIO_CLKAV(regval, alponly) \
+	(SBSDIO_ALPAV(regval) && (alponly ? 1 : SBSDIO_HTAV(regval)))
+
+#define BRCMF_MAX_CORENUM	6
+
+struct chip_core_info {
+	u16 id;
+	u16 rev;
+	u32 base;
+	u32 wrapbase;
+	u32 caps;
+	u32 cib;
+};
+
+struct chip_info {
+	u32 chip;
+	u32 chiprev;
+	u32 socitype;
+	/* core info */
+	/* always put chipcommon core at 0, bus core at 1 */
+	struct chip_core_info c_inf[BRCMF_MAX_CORENUM];
+	u32 pmurev;
+	u32 pmucaps;
+	u32 ramsize;
+
+	bool (*iscoreup)(struct brcmf_sdio_dev *sdiodev, struct chip_info *ci,
+			 u16 coreid);
+	u32 (*corerev)(struct brcmf_sdio_dev *sdiodev, struct chip_info *ci,
+			 u16 coreid);
+	void (*coredisable)(struct brcmf_sdio_dev *sdiodev,
+			struct chip_info *ci, u16 coreid);
+	void (*resetcore)(struct brcmf_sdio_dev *sdiodev,
+			struct chip_info *ci, u16 coreid);
+};
+
+struct sbconfig {
+	u32 PAD[2];
+	u32 sbipsflag;	/* initiator port ocp slave flag */
+	u32 PAD[3];
+	u32 sbtpsflag;	/* target port ocp slave flag */
+	u32 PAD[11];
+	u32 sbtmerrloga;	/* (sonics >= 2.3) */
+	u32 PAD;
+	u32 sbtmerrlog;	/* (sonics >= 2.3) */
+	u32 PAD[3];
+	u32 sbadmatch3;	/* address match3 */
+	u32 PAD;
+	u32 sbadmatch2;	/* address match2 */
+	u32 PAD;
+	u32 sbadmatch1;	/* address match1 */
+	u32 PAD[7];
+	u32 sbimstate;	/* initiator agent state */
+	u32 sbintvec;	/* interrupt mask */
+	u32 sbtmstatelow;	/* target state */
+	u32 sbtmstatehigh;	/* target state */
+	u32 sbbwa0;		/* bandwidth allocation table0 */
+	u32 PAD;
+	u32 sbimconfiglow;	/* initiator configuration */
+	u32 sbimconfighigh;	/* initiator configuration */
+	u32 sbadmatch0;	/* address match0 */
+	u32 PAD;
+	u32 sbtmconfiglow;	/* target configuration */
+	u32 sbtmconfighigh;	/* target configuration */
+	u32 sbbconfig;	/* broadcast configuration */
+	u32 PAD;
+	u32 sbbstate;	/* broadcast state */
+	u32 PAD[3];
+	u32 sbactcnfg;	/* activate configuration */
+	u32 PAD[3];
+	u32 sbflagst;	/* current sbflags */
+	u32 PAD[3];
+	u32 sbidlow;		/* identification */
+	u32 sbidhigh;	/* identification */
+};
+
+extern int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev,
+				  struct chip_info **ci_ptr, u32 regs);
+extern void brcmf_sdio_chip_detach(struct chip_info **ci_ptr);
+extern void brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
+					      struct chip_info *ci,
+					      u32 drivestrength);
+extern u8 brcmf_sdio_chip_getinfidx(struct chip_info *ci, u16 coreid);
+
+
+#endif		/* _BRCMFMAC_SDIO_CHIP_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
index 726fa89..0281d20 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
@@ -116,12 +116,20 @@
 #define SUCCESS	0
 #define ERROR	1
 
+/* Packet alignment for most efficient SDIO (can change based on platform) */
+#define BRCMF_SDALIGN	(1 << 6)
+
+/* watchdog polling interval in ms */
+#define BRCMF_WD_POLL_MS	10
+
 struct brcmf_sdreg {
 	int func;
 	int offset;
 	int value;
 };
 
+struct brcmf_sdio;
+
 struct brcmf_sdio_dev {
 	struct sdio_func *func[SDIO_MAX_FUNCS];
 	u8 num_funcs;			/* Supported funcs on client */
@@ -132,9 +140,10 @@
 	atomic_t suspend;		/* suspend flag */
 	wait_queue_head_t request_byte_wait;
 	wait_queue_head_t request_word_wait;
-	wait_queue_head_t request_packet_wait;
+	wait_queue_head_t request_chain_wait;
 	wait_queue_head_t request_buffer_wait;
-
+	struct device *dev;
+	struct brcmf_bus *bus_if;
 };
 
 /* Register/deregister device interrupt handler. */
@@ -182,11 +191,21 @@
  * NOTE: Async operation is not currently supported.
  */
 extern int
+brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+		      uint flags, struct sk_buff *pkt);
+extern int
 brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
-		      uint flags, u8 *buf, uint nbytes, struct sk_buff *pkt);
+		      uint flags, u8 *buf, uint nbytes);
+
+extern int
+brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+		      uint flags, struct sk_buff *pkt);
 extern int
 brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
-		      uint flags, u8 *buf, uint nbytes, struct sk_buff *pkt);
+		      uint flags, u8 *buf, uint nbytes);
+extern int
+brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+			uint flags, struct sk_buff_head *pktq);
 
 /* Flags bits */
 
@@ -237,16 +256,20 @@
 /* read or write any buffer using cmd53 */
 extern int
 brcmf_sdioh_request_buffer(struct brcmf_sdio_dev *sdiodev,
-			   uint fix_inc, uint rw, uint fnc_num,
-			   u32 addr, uint regwidth,
-			   u32 buflen, u8 *buffer, struct sk_buff *pkt);
+			   uint fix_inc, uint rw, uint fnc_num, u32 addr,
+			   struct sk_buff *pkt);
+extern int
+brcmf_sdioh_request_chain(struct brcmf_sdio_dev *sdiodev, uint fix_inc,
+			  uint write, uint func, uint addr,
+			  struct sk_buff_head *pktq);
 
 /* Watchdog timer interface for pm ops */
 extern void brcmf_sdio_wdtmr_enable(struct brcmf_sdio_dev *sdiodev,
 				    bool enable);
 
-extern void *brcmf_sdbrcm_probe(u16 bus_no, u16 slot, u16 func, uint bustype,
-				u32 regsva, struct brcmf_sdio_dev *sdiodev);
+extern void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev);
 extern void brcmf_sdbrcm_disconnect(void *ptr);
 extern void brcmf_sdbrcm_isr(void *arg);
+
+extern void brcmf_sdbrcm_wd_timer(struct brcmf_sdio *bus, uint wdtick);
 #endif				/* _BRCM_SDH_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index 5eddabe..f23b0c3 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -1429,7 +1429,7 @@
 
 static s32
 brcmf_cfg80211_set_tx_power(struct wiphy *wiphy,
-			 enum nl80211_tx_power_setting type, s32 dbm)
+			    enum nl80211_tx_power_setting type, s32 mbm)
 {
 
 	struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
@@ -1437,6 +1437,7 @@
 	u16 txpwrmw;
 	s32 err = 0;
 	s32 disable = 0;
+	s32 dbm = MBM_TO_DBM(mbm);
 
 	WL_TRACE("Enter\n");
 	if (!check_sys_up(wiphy))
@@ -1446,12 +1447,6 @@
 	case NL80211_TX_POWER_AUTOMATIC:
 		break;
 	case NL80211_TX_POWER_LIMITED:
-		if (dbm < 0) {
-			WL_ERR("TX_POWER_LIMITED - dbm is negative\n");
-			err = -EINVAL;
-			goto done;
-		}
-		break;
 	case NL80211_TX_POWER_FIXED:
 		if (dbm < 0) {
 			WL_ERR("TX_POWER_FIXED - dbm is negative\n");
@@ -1997,7 +1992,7 @@
 }
 
 static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_priv *cfg_priv,
-				   struct brcmf_bss_info *bi)
+				   struct brcmf_bss_info_le *bi)
 {
 	struct wiphy *wiphy = cfg_to_wiphy(cfg_priv);
 	struct ieee80211_channel *notify_channel;
@@ -2049,18 +2044,27 @@
 		notify_timestamp, notify_capability, notify_interval, notify_ie,
 		notify_ielen, notify_signal, GFP_KERNEL);
 
-	if (!bss) {
-		WL_ERR("cfg80211_inform_bss_frame error\n");
-		return -EINVAL;
-	}
+	if (!bss)
+		return -ENOMEM;
+
+	cfg80211_put_bss(bss);
 
 	return err;
 }
 
+static struct brcmf_bss_info_le *
+next_bss_le(struct brcmf_scan_results *list, struct brcmf_bss_info_le *bss)
+{
+	if (bss == NULL)
+		return list->bss_info_le;
+	return (struct brcmf_bss_info_le *)((unsigned long)bss +
+					    le32_to_cpu(bss->length));
+}
+
 static s32 brcmf_inform_bss(struct brcmf_cfg80211_priv *cfg_priv)
 {
 	struct brcmf_scan_results *bss_list;
-	struct brcmf_bss_info *bi = NULL;	/* must be initialized */
+	struct brcmf_bss_info_le *bi = NULL;	/* must be initialized */
 	s32 err = 0;
 	int i;
 
@@ -2072,7 +2076,7 @@
 	}
 	WL_SCAN("scanned AP count (%d)\n", bss_list->count);
 	for (i = 0; i < bss_list->count && i < WL_AP_MAX; i++) {
-		bi = next_bss(bss_list, bi);
+		bi = next_bss_le(bss_list, bi);
 		err = brcmf_inform_single_bss(cfg_priv, bi);
 		if (err)
 			break;
@@ -2085,8 +2089,9 @@
 {
 	struct wiphy *wiphy = cfg_to_wiphy(cfg_priv);
 	struct ieee80211_channel *notify_channel;
-	struct brcmf_bss_info *bi = NULL;
+	struct brcmf_bss_info_le *bi = NULL;
 	struct ieee80211_supported_band *band;
+	struct cfg80211_bss *bss;
 	u8 *buf = NULL;
 	s32 err = 0;
 	u16 channel;
@@ -2114,7 +2119,7 @@
 		goto CleanUp;
 	}
 
-	bi = (struct brcmf_bss_info *)(buf + 4);
+	bi = (struct brcmf_bss_info_le *)(buf + 4);
 
 	channel = bi->ctl_ch ? bi->ctl_ch :
 				CHSPEC_CHANNEL(le16_to_cpu(bi->chanspec));
@@ -2140,10 +2145,17 @@
 	WL_CONN("signal: %d\n", notify_signal);
 	WL_CONN("notify_timestamp: %#018llx\n", notify_timestamp);
 
-	cfg80211_inform_bss(wiphy, notify_channel, bssid,
+	bss = cfg80211_inform_bss(wiphy, notify_channel, bssid,
 		notify_timestamp, notify_capability, notify_interval,
 		notify_ie, notify_ielen, notify_signal, GFP_KERNEL);
 
+	if (!bss) {
+		err = -ENOMEM;
+		goto CleanUp;
+	}
+
+	cfg80211_put_bss(bss);
+
 CleanUp:
 
 	kfree(buf);
@@ -2188,7 +2200,7 @@
 
 static s32 brcmf_update_bss_info(struct brcmf_cfg80211_priv *cfg_priv)
 {
-	struct brcmf_bss_info *bi;
+	struct brcmf_bss_info_le *bi;
 	struct brcmf_ssid *ssid;
 	struct brcmf_tlv *tim;
 	u16 beacon_interval;
@@ -2211,7 +2223,7 @@
 		goto update_bss_info_out;
 	}
 
-	bi = (struct brcmf_bss_info *)(cfg_priv->extra_buf + 4);
+	bi = (struct brcmf_bss_info_le *)(cfg_priv->extra_buf + 4);
 	err = brcmf_inform_single_bss(cfg_priv, bi);
 	if (err)
 		goto update_bss_info_out;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
index 62dc461..a613b49 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
@@ -352,15 +352,6 @@
 	return &cfg->conn_info;
 }
 
-static inline struct brcmf_bss_info *next_bss(struct brcmf_scan_results *list,
-					   struct brcmf_bss_info *bss)
-{
-	return bss = bss ?
-		(struct brcmf_bss_info *)((unsigned long)bss +
-				       le32_to_cpu(bss->length)) :
-		list->bss_info;
-}
-
 extern struct brcmf_cfg80211_dev *brcmf_cfg80211_attach(struct net_device *ndev,
 							struct device *busdev,
 							void *data);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
index 025fa0e..ab9bb11a 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
@@ -16,6 +16,8 @@
  * File contents: support functions for PCI/PCIe
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/delay.h>
 #include <linux/pci.h>
 
@@ -316,51 +318,24 @@
 
 #define	BADIDX		(SI_MAXCORES + 1)
 
-/* Newer chips can access PCI/PCIE and CC core without requiring to change
- * PCI BAR0 WIN
- */
-#define SI_FAST(si) (((si)->pub.buscoretype == PCIE_CORE_ID) ||	\
-		     (((si)->pub.buscoretype == PCI_CORE_ID) && \
-		      (si)->pub.buscorerev >= 13))
-
-#define CCREGS_FAST(si) (((char __iomem *)((si)->curmap) + \
-			  PCI_16KB0_CCREGS_OFFSET))
-
 #define	IS_SIM(chippkg)	\
 	((chippkg == HDLSIM_PKG_ID) || (chippkg == HWSIM_PKG_ID))
 
-/*
- * Macros to disable/restore function core(D11, ENET, ILINE20, etc) interrupts
- * before after core switching to avoid invalid register accesss inside ISR.
- */
-#define INTR_OFF(si, intr_val) \
-	if ((si)->intrsoff_fn && \
-	    (si)->coreid[(si)->curidx] == (si)->dev_coreid) \
-		intr_val = (*(si)->intrsoff_fn)((si)->intr_arg)
+#define PCI(sih)	(ai_get_buscoretype(sih) == PCI_CORE_ID)
+#define PCIE(sih)	(ai_get_buscoretype(sih) == PCIE_CORE_ID)
 
-#define INTR_RESTORE(si, intr_val) \
-	if ((si)->intrsrestore_fn && \
-	    (si)->coreid[(si)->curidx] == (si)->dev_coreid) \
-		(*(si)->intrsrestore_fn)((si)->intr_arg, intr_val)
-
-#define PCI(si)		((si)->pub.buscoretype == PCI_CORE_ID)
-#define PCIE(si)	((si)->pub.buscoretype == PCIE_CORE_ID)
-
-#define PCI_FORCEHT(si)	(PCIE(si) && (si->pub.chip == BCM4716_CHIP_ID))
+#define PCI_FORCEHT(sih) (PCIE(sih) && (ai_get_chip_id(sih) == BCM4716_CHIP_ID))
 
 #ifdef BCMDBG
-#define	SI_MSG(args)	printk args
+#define	SI_MSG(fmt, ...)	pr_debug(fmt, ##__VA_ARGS__)
 #else
-#define	SI_MSG(args)
+#define	SI_MSG(fmt, ...)	no_printk(fmt, ##__VA_ARGS__)
 #endif				/* BCMDBG */
 
 #define	GOODCOREADDR(x, b) \
 	(((x) >= (b)) && ((x) < ((b) + SI_MAXCORES * SI_CORE_SIZE)) && \
 		IS_ALIGNED((x), SI_CORE_SIZE))
 
-#define PCIEREGS(si) ((__iomem char *)((si)->curmap) + \
-			PCI_16KB0_PCIREGS_OFFSET)
-
 struct aidmp {
 	u32 oobselina30;	/* 0x000 */
 	u32 oobselina74;	/* 0x004 */
@@ -479,406 +454,13 @@
 	u32 componentid3;	/* 0xffc */
 };
 
-/* EROM parsing */
-
-static u32
-get_erom_ent(struct si_pub *sih, u32 __iomem **eromptr, u32 mask, u32 match)
-{
-	u32 ent;
-	uint inv = 0, nom = 0;
-
-	while (true) {
-		ent = R_REG(*eromptr);
-		(*eromptr)++;
-
-		if (mask == 0)
-			break;
-
-		if ((ent & ER_VALID) == 0) {
-			inv++;
-			continue;
-		}
-
-		if (ent == (ER_END | ER_VALID))
-			break;
-
-		if ((ent & mask) == match)
-			break;
-
-		nom++;
-	}
-
-	return ent;
-}
-
-static u32
-get_asd(struct si_pub *sih, u32 __iomem **eromptr, uint sp, uint ad, uint st,
-	u32 *addrl, u32 *addrh, u32 *sizel, u32 *sizeh)
-{
-	u32 asd, sz, szd;
-
-	asd = get_erom_ent(sih, eromptr, ER_VALID, ER_VALID);
-	if (((asd & ER_TAG1) != ER_ADD) ||
-	    (((asd & AD_SP_MASK) >> AD_SP_SHIFT) != sp) ||
-	    ((asd & AD_ST_MASK) != st)) {
-		/* This is not what we want, "push" it back */
-		(*eromptr)--;
-		return 0;
-	}
-	*addrl = asd & AD_ADDR_MASK;
-	if (asd & AD_AG32)
-		*addrh = get_erom_ent(sih, eromptr, 0, 0);
-	else
-		*addrh = 0;
-	*sizeh = 0;
-	sz = asd & AD_SZ_MASK;
-	if (sz == AD_SZ_SZD) {
-		szd = get_erom_ent(sih, eromptr, 0, 0);
-		*sizel = szd & SD_SZ_MASK;
-		if (szd & SD_SG32)
-			*sizeh = get_erom_ent(sih, eromptr, 0, 0);
-	} else
-		*sizel = AD_SZ_BASE << (sz >> AD_SZ_SHIFT);
-
-	return asd;
-}
-
-static void ai_hwfixup(struct si_info *sii)
-{
-}
-
-/* parse the enumeration rom to identify all cores */
-static void ai_scan(struct si_pub *sih, struct chipcregs __iomem *cc)
-{
-	struct si_info *sii = (struct si_info *)sih;
-
-	u32 erombase;
-	u32 __iomem *eromptr, *eromlim;
-	void __iomem *regs = cc;
-
-	erombase = R_REG(&cc->eromptr);
-
-	/* Set wrappers address */
-	sii->curwrap = (void *)((unsigned long)cc + SI_CORE_SIZE);
-
-	/* Now point the window at the erom */
-	pci_write_config_dword(sii->pbus, PCI_BAR0_WIN, erombase);
-	eromptr = regs;
-	eromlim = eromptr + (ER_REMAPCONTROL / sizeof(u32));
-
-	while (eromptr < eromlim) {
-		u32 cia, cib, cid, mfg, crev, nmw, nsw, nmp, nsp;
-		u32 mpd, asd, addrl, addrh, sizel, sizeh;
-		u32 __iomem *base;
-		uint i, j, idx;
-		bool br;
-
-		br = false;
-
-		/* Grok a component */
-		cia = get_erom_ent(sih, &eromptr, ER_TAG, ER_CI);
-		if (cia == (ER_END | ER_VALID)) {
-			/*  Found END of erom */
-			ai_hwfixup(sii);
-			return;
-		}
-		base = eromptr - 1;
-		cib = get_erom_ent(sih, &eromptr, 0, 0);
-
-		if ((cib & ER_TAG) != ER_CI) {
-			/* CIA not followed by CIB */
-			goto error;
-		}
-
-		cid = (cia & CIA_CID_MASK) >> CIA_CID_SHIFT;
-		mfg = (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT;
-		crev = (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
-		nmw = (cib & CIB_NMW_MASK) >> CIB_NMW_SHIFT;
-		nsw = (cib & CIB_NSW_MASK) >> CIB_NSW_SHIFT;
-		nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
-		nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
-
-		if (((mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) || (nsp == 0))
-			continue;
-		if ((nmw + nsw == 0)) {
-			/* A component which is not a core */
-			if (cid == OOB_ROUTER_CORE_ID) {
-				asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE,
-					      &addrl, &addrh, &sizel, &sizeh);
-				if (asd != 0)
-					sii->oob_router = addrl;
-			}
-			continue;
-		}
-
-		idx = sii->numcores;
-/*		sii->eromptr[idx] = base; */
-		sii->cia[idx] = cia;
-		sii->cib[idx] = cib;
-		sii->coreid[idx] = cid;
-
-		for (i = 0; i < nmp; i++) {
-			mpd = get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
-			if ((mpd & ER_TAG) != ER_MP) {
-				/* Not enough MP entries for component */
-				goto error;
-			}
-		}
-
-		/* First Slave Address Descriptor should be port 0:
-		 * the main register space for the core
-		 */
-		asd =
-		    get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh,
-			    &sizel, &sizeh);
-		if (asd == 0) {
-			/* Try again to see if it is a bridge */
-			asd =
-			    get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl,
-				    &addrh, &sizel, &sizeh);
-			if (asd != 0)
-				br = true;
-			else if ((addrh != 0) || (sizeh != 0)
-				 || (sizel != SI_CORE_SIZE)) {
-				/* First Slave ASD for core malformed */
-				goto error;
-			}
-		}
-		sii->coresba[idx] = addrl;
-		sii->coresba_size[idx] = sizel;
-		/* Get any more ASDs in port 0 */
-		j = 1;
-		do {
-			asd =
-			    get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl,
-				    &addrh, &sizel, &sizeh);
-			if ((asd != 0) && (j == 1) && (sizel == SI_CORE_SIZE)) {
-				sii->coresba2[idx] = addrl;
-				sii->coresba2_size[idx] = sizel;
-			}
-			j++;
-		} while (asd != 0);
-
-		/* Go through the ASDs for other slave ports */
-		for (i = 1; i < nsp; i++) {
-			j = 0;
-			do {
-				asd =
-				    get_asd(sih, &eromptr, i, j++, AD_ST_SLAVE,
-					    &addrl, &addrh, &sizel, &sizeh);
-			} while (asd != 0);
-			if (j == 0) {
-				/* SP has no address descriptors */
-				goto error;
-			}
-		}
-
-		/* Now get master wrappers */
-		for (i = 0; i < nmw; i++) {
-			asd =
-			    get_asd(sih, &eromptr, i, 0, AD_ST_MWRAP, &addrl,
-				    &addrh, &sizel, &sizeh);
-			if (asd == 0) {
-				/* Missing descriptor for MW */
-				goto error;
-			}
-			if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
-				/* Master wrapper %d is not 4KB */
-				goto error;
-			}
-			if (i == 0)
-				sii->wrapba[idx] = addrl;
-		}
-
-		/* And finally slave wrappers */
-		for (i = 0; i < nsw; i++) {
-			uint fwp = (nsp == 1) ? 0 : 1;
-			asd =
-			    get_asd(sih, &eromptr, fwp + i, 0, AD_ST_SWRAP,
-				    &addrl, &addrh, &sizel, &sizeh);
-			if (asd == 0) {
-				/* Missing descriptor for SW */
-				goto error;
-			}
-			if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
-				/* Slave wrapper is not 4KB */
-				goto error;
-			}
-			if ((nmw == 0) && (i == 0))
-				sii->wrapba[idx] = addrl;
-		}
-
-		/* Don't record bridges */
-		if (br)
-			continue;
-
-		/* Done with core */
-		sii->numcores++;
-	}
-
- error:
-	/* Reached end of erom without finding END */
-	sii->numcores = 0;
-	return;
-}
-
-/*
- * This function changes the logical "focus" to the indicated core.
- * Return the current core's virtual address. Since each core starts with the
- * same set of registers (BIST, clock control, etc), the returned address
- * contains the first register of this 'common' register block (not to be
- * confused with 'common core').
- */
-void __iomem *ai_setcoreidx(struct si_pub *sih, uint coreidx)
-{
-	struct si_info *sii = (struct si_info *)sih;
-	u32 addr = sii->coresba[coreidx];
-	u32 wrap = sii->wrapba[coreidx];
-
-	if (coreidx >= sii->numcores)
-		return NULL;
-
-	/* point bar0 window */
-	pci_write_config_dword(sii->pbus, PCI_BAR0_WIN, addr);
-	/* point bar0 2nd 4KB window */
-	pci_write_config_dword(sii->pbus, PCI_BAR0_WIN2, wrap);
-	sii->curidx = coreidx;
-
-	return sii->curmap;
-}
-
-/* Return the number of address spaces in current core */
-int ai_numaddrspaces(struct si_pub *sih)
-{
-	return 2;
-}
-
-/* Return the address of the nth address space in the current core */
-u32 ai_addrspace(struct si_pub *sih, uint asidx)
-{
-	struct si_info *sii;
-	uint cidx;
-
-	sii = (struct si_info *)sih;
-	cidx = sii->curidx;
-
-	if (asidx == 0)
-		return sii->coresba[cidx];
-	else if (asidx == 1)
-		return sii->coresba2[cidx];
-	else {
-		/* Need to parse the erom again to find addr space */
-		return 0;
-	}
-}
-
-/* Return the size of the nth address space in the current core */
-u32 ai_addrspacesize(struct si_pub *sih, uint asidx)
-{
-	struct si_info *sii;
-	uint cidx;
-
-	sii = (struct si_info *)sih;
-	cidx = sii->curidx;
-
-	if (asidx == 0)
-		return sii->coresba_size[cidx];
-	else if (asidx == 1)
-		return sii->coresba2_size[cidx];
-	else {
-		/* Need to parse the erom again to find addr */
-		return 0;
-	}
-}
-
-uint ai_flag(struct si_pub *sih)
-{
-	struct si_info *sii;
-	struct aidmp *ai;
-
-	sii = (struct si_info *)sih;
-	ai = sii->curwrap;
-
-	return R_REG(&ai->oobselouta30) & 0x1f;
-}
-
-void ai_setint(struct si_pub *sih, int siflag)
-{
-}
-
-uint ai_corevendor(struct si_pub *sih)
-{
-	struct si_info *sii;
-	u32 cia;
-
-	sii = (struct si_info *)sih;
-	cia = sii->cia[sii->curidx];
-	return (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT;
-}
-
-uint ai_corerev(struct si_pub *sih)
-{
-	struct si_info *sii;
-	u32 cib;
-
-	sii = (struct si_info *)sih;
-	cib = sii->cib[sii->curidx];
-	return (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
-}
-
-bool ai_iscoreup(struct si_pub *sih)
-{
-	struct si_info *sii;
-	struct aidmp *ai;
-
-	sii = (struct si_info *)sih;
-	ai = sii->curwrap;
-
-	return (((R_REG(&ai->ioctrl) & (SICF_FGC | SICF_CLOCK_EN)) ==
-		 SICF_CLOCK_EN)
-		&& ((R_REG(&ai->resetctrl) & AIRC_RESET) == 0));
-}
-
-void ai_core_cflags_wo(struct si_pub *sih, u32 mask, u32 val)
-{
-	struct si_info *sii;
-	struct aidmp *ai;
-	u32 w;
-
-	sii = (struct si_info *)sih;
-
-	ai = sii->curwrap;
-
-	if (mask || val) {
-		w = ((R_REG(&ai->ioctrl) & ~mask) | val);
-		W_REG(&ai->ioctrl, w);
-	}
-}
-
-u32 ai_core_cflags(struct si_pub *sih, u32 mask, u32 val)
-{
-	struct si_info *sii;
-	struct aidmp *ai;
-	u32 w;
-
-	sii = (struct si_info *)sih;
-	ai = sii->curwrap;
-
-	if (mask || val) {
-		w = ((R_REG(&ai->ioctrl) & ~mask) | val);
-		W_REG(&ai->ioctrl, w);
-	}
-
-	return R_REG(&ai->ioctrl);
-}
-
 /* return true if PCIE capability exists in the pci config space */
 static bool ai_ispcie(struct si_info *sii)
 {
 	u8 cap_ptr;
 
 	cap_ptr =
-	    pcicore_find_pci_capability(sii->pbus, PCI_CAP_ID_EXP, NULL,
+	    pcicore_find_pci_capability(sii->pcibus, PCI_CAP_ID_EXP, NULL,
 					NULL);
 	if (!cap_ptr)
 		return false;
@@ -894,117 +476,69 @@
 	return true;
 }
 
-u32 ai_core_sflags(struct si_pub *sih, u32 mask, u32 val)
-{
-	struct si_info *sii;
-	struct aidmp *ai;
-	u32 w;
-
-	sii = (struct si_info *)sih;
-	ai = sii->curwrap;
-
-	if (mask || val) {
-		w = ((R_REG(&ai->iostatus) & ~mask) | val);
-		W_REG(&ai->iostatus, w);
-	}
-
-	return R_REG(&ai->iostatus);
-}
-
 static bool
-ai_buscore_setup(struct si_info *sii, u32 savewin, uint *origidx)
+ai_buscore_setup(struct si_info *sii, struct bcma_device *cc)
 {
-	bool pci, pcie;
-	uint i;
-	uint pciidx, pcieidx, pcirev, pcierev;
-	struct chipcregs __iomem *cc;
+	struct bcma_device *pci = NULL;
+	struct bcma_device *pcie = NULL;
+	struct bcma_device *core;
 
-	cc = ai_setcoreidx(&sii->pub, SI_CC_IDX);
+
+	/* no cores found, bail out */
+	if (cc->bus->nr_cores == 0)
+		return false;
 
 	/* get chipcommon rev */
-	sii->pub.ccrev = (int)ai_corerev(&sii->pub);
+	sii->pub.ccrev = cc->id.rev;
 
 	/* get chipcommon chipstatus */
-	if (sii->pub.ccrev >= 11)
-		sii->pub.chipst = R_REG(&cc->chipstatus);
+	if (ai_get_ccrev(&sii->pub) >= 11)
+		sii->chipst = bcma_read32(cc, CHIPCREGOFFS(chipstatus));
 
 	/* get chipcommon capabilites */
-	sii->pub.cccaps = R_REG(&cc->capabilities);
-	/* get chipcommon extended capabilities */
-
-	if (sii->pub.ccrev >= 35)
-		sii->pub.cccaps_ext = R_REG(&cc->capabilities_ext);
+	sii->pub.cccaps = bcma_read32(cc, CHIPCREGOFFS(capabilities));
 
 	/* get pmu rev and caps */
-	if (sii->pub.cccaps & CC_CAP_PMU) {
-		sii->pub.pmucaps = R_REG(&cc->pmucapabilities);
+	if (ai_get_cccaps(&sii->pub) & CC_CAP_PMU) {
+		sii->pub.pmucaps = bcma_read32(cc,
+					       CHIPCREGOFFS(pmucapabilities));
 		sii->pub.pmurev = sii->pub.pmucaps & PCAP_REV_MASK;
 	}
 
-	/* figure out bus/orignal core idx */
-	sii->pub.buscoretype = NODEV_CORE_ID;
-	sii->pub.buscorerev = NOREV;
-	sii->pub.buscoreidx = BADIDX;
-
-	pci = pcie = false;
-	pcirev = pcierev = NOREV;
-	pciidx = pcieidx = BADIDX;
-
-	for (i = 0; i < sii->numcores; i++) {
+	/* figure out buscore */
+	list_for_each_entry(core, &cc->bus->cores, list) {
 		uint cid, crev;
 
-		ai_setcoreidx(&sii->pub, i);
-		cid = ai_coreid(&sii->pub);
-		crev = ai_corerev(&sii->pub);
+		cid = core->id.id;
+		crev = core->id.rev;
 
 		if (cid == PCI_CORE_ID) {
-			pciidx = i;
-			pcirev = crev;
-			pci = true;
+			pci = core;
 		} else if (cid == PCIE_CORE_ID) {
-			pcieidx = i;
-			pcierev = crev;
-			pcie = true;
+			pcie = core;
 		}
-
-		/* find the core idx before entering this func. */
-		if ((savewin && (savewin == sii->coresba[i])) ||
-		    (cc == sii->regs[i]))
-			*origidx = i;
 	}
 
 	if (pci && pcie) {
 		if (ai_ispcie(sii))
-			pci = false;
+			pci = NULL;
 		else
-			pcie = false;
+			pcie = NULL;
 	}
 	if (pci) {
-		sii->pub.buscoretype = PCI_CORE_ID;
-		sii->pub.buscorerev = pcirev;
-		sii->pub.buscoreidx = pciidx;
+		sii->buscore = pci;
 	} else if (pcie) {
-		sii->pub.buscoretype = PCIE_CORE_ID;
-		sii->pub.buscorerev = pcierev;
-		sii->pub.buscoreidx = pcieidx;
+		sii->buscore = pcie;
 	}
 
 	/* fixup necessary chip/core configurations */
-	if (SI_FAST(sii)) {
-		if (!sii->pch) {
-			sii->pch = pcicore_init(&sii->pub, sii->pbus,
-						(__iomem void *)PCIEREGS(sii));
-			if (sii->pch == NULL)
-				return false;
-		}
+	if (!sii->pch) {
+		sii->pch = pcicore_init(&sii->pub, sii->icbus->drv_pci.core);
+		if (sii->pch == NULL)
+			return false;
 	}
-	if (ai_pci_fixcfg(&sii->pub)) {
-		/* si_doattach: si_pci_fixcfg failed */
+	if (ai_pci_fixcfg(&sii->pub))
 		return false;
-	}
-
-	/* return to the original core */
-	ai_setcoreidx(&sii->pub, *origidx);
 
 	return true;
 }
@@ -1017,39 +551,27 @@
 	uint w = 0;
 
 	/* do a pci config read to get subsystem id and subvendor id */
-	pci_read_config_dword(sii->pbus, PCI_SUBSYSTEM_VENDOR_ID, &w);
+	pci_read_config_dword(sii->pcibus, PCI_SUBSYSTEM_VENDOR_ID, &w);
 
 	sii->pub.boardvendor = w & 0xffff;
 	sii->pub.boardtype = (w >> 16) & 0xffff;
-	sii->pub.boardflags = getintvar(&sii->pub, BRCMS_SROM_BOARDFLAGS);
 }
 
 static struct si_info *ai_doattach(struct si_info *sii,
-				   void __iomem *regs, struct pci_dev *pbus)
+				   struct bcma_bus *pbus)
 {
 	struct si_pub *sih = &sii->pub;
 	u32 w, savewin;
-	struct chipcregs __iomem *cc;
+	struct bcma_device *cc;
 	uint socitype;
-	uint origidx;
-
-	memset((unsigned char *) sii, 0, sizeof(struct si_info));
 
 	savewin = 0;
 
-	sih->buscoreidx = BADIDX;
+	sii->icbus = pbus;
+	sii->pcibus = pbus->host_pci;
 
-	sii->curmap = regs;
-	sii->pbus = pbus;
-
-	/* find Chipcommon address */
-	pci_read_config_dword(sii->pbus, PCI_BAR0_WIN, &savewin);
-	if (!GOODCOREADDR(savewin, SI_ENUM_BASE))
-		savewin = SI_ENUM_BASE;
-
-	pci_write_config_dword(sii->pbus, PCI_BAR0_WIN,
-			       SI_ENUM_BASE);
-	cc = (struct chipcregs __iomem *) regs;
+	/* switch to Chipcommon core */
+	cc = pbus->drv_cc.core;
 
 	/* bus/core/clk setup for register access */
 	if (!ai_buscore_prep(sii))
@@ -1062,94 +584,74 @@
 	 *   hosts w/o chipcommon), some way of recognizing them needs to
 	 *   be added here.
 	 */
-	w = R_REG(&cc->chipid);
+	w = bcma_read32(cc, CHIPCREGOFFS(chipid));
 	socitype = (w & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
 	/* Might as wll fill in chip id rev & pkg */
 	sih->chip = w & CID_ID_MASK;
 	sih->chiprev = (w & CID_REV_MASK) >> CID_REV_SHIFT;
 	sih->chippkg = (w & CID_PKG_MASK) >> CID_PKG_SHIFT;
 
-	sih->issim = false;
-
 	/* scan for cores */
-	if (socitype == SOCI_AI) {
-		SI_MSG(("Found chip type AI (0x%08x)\n", w));
-		/* pass chipc address instead of original core base */
-		ai_scan(&sii->pub, cc);
-	} else {
-		/* Found chip of unknown type */
-		return NULL;
-	}
-	/* no cores found, bail out */
-	if (sii->numcores == 0)
+	if (socitype != SOCI_AI)
 		return NULL;
 
-	/* bus/core/clk setup */
-	origidx = SI_CC_IDX;
-	if (!ai_buscore_setup(sii, savewin, &origidx))
+	SI_MSG("Found chip type AI (0x%08x)\n", w);
+	if (!ai_buscore_setup(sii, cc))
 		goto exit;
 
 	/* Init nvram from sprom/otp if they exist */
-	if (srom_var_init(&sii->pub, cc))
+	if (srom_var_init(&sii->pub))
 		goto exit;
 
 	ai_nvram_process(sii);
 
 	/* === NVRAM, clock is ready === */
-	cc = (struct chipcregs __iomem *) ai_setcore(sih, CC_CORE_ID, 0);
-	W_REG(&cc->gpiopullup, 0);
-	W_REG(&cc->gpiopulldown, 0);
-	ai_setcoreidx(sih, origidx);
+	bcma_write32(cc, CHIPCREGOFFS(gpiopullup), 0);
+	bcma_write32(cc, CHIPCREGOFFS(gpiopulldown), 0);
 
 	/* PMU specific initializations */
-	if (sih->cccaps & CC_CAP_PMU) {
-		u32 xtalfreq;
+	if (ai_get_cccaps(sih) & CC_CAP_PMU) {
 		si_pmu_init(sih);
-		si_pmu_chip_init(sih);
-
-		xtalfreq = si_pmu_measure_alpclk(sih);
-		si_pmu_pll_init(sih, xtalfreq);
+		(void)si_pmu_measure_alpclk(sih);
 		si_pmu_res_init(sih);
-		si_pmu_swreg_init(sih);
 	}
 
 	/* setup the GPIO based LED powersave register */
 	w = getintvar(sih, BRCMS_SROM_LEDDC);
 	if (w == 0)
 		w = DEFAULT_GPIOTIMERVAL;
-	ai_corereg(sih, SI_CC_IDX, offsetof(struct chipcregs, gpiotimerval),
-		   ~0, w);
+	ai_cc_reg(sih, offsetof(struct chipcregs, gpiotimerval),
+		  ~0, w);
 
-	if (PCIE(sii))
+	if (PCIE(sih))
 		pcicore_attach(sii->pch, SI_DOATTACH);
 
-	if (sih->chip == BCM43224_CHIP_ID) {
+	if (ai_get_chip_id(sih) == BCM43224_CHIP_ID) {
 		/*
 		 * enable 12 mA drive strenth for 43224 and
 		 * set chipControl register bit 15
 		 */
-		if (sih->chiprev == 0) {
-			SI_MSG(("Applying 43224A0 WARs\n"));
-			ai_corereg(sih, SI_CC_IDX,
-				   offsetof(struct chipcregs, chipcontrol),
-				   CCTRL43224_GPIO_TOGGLE,
-				   CCTRL43224_GPIO_TOGGLE);
+		if (ai_get_chiprev(sih) == 0) {
+			SI_MSG("Applying 43224A0 WARs\n");
+			ai_cc_reg(sih, offsetof(struct chipcregs, chipcontrol),
+				  CCTRL43224_GPIO_TOGGLE,
+				  CCTRL43224_GPIO_TOGGLE);
 			si_pmu_chipcontrol(sih, 0, CCTRL_43224A0_12MA_LED_DRIVE,
 					   CCTRL_43224A0_12MA_LED_DRIVE);
 		}
-		if (sih->chiprev >= 1) {
-			SI_MSG(("Applying 43224B0+ WARs\n"));
+		if (ai_get_chiprev(sih) >= 1) {
+			SI_MSG("Applying 43224B0+ WARs\n");
 			si_pmu_chipcontrol(sih, 0, CCTRL_43224B0_12MA_LED_DRIVE,
 					   CCTRL_43224B0_12MA_LED_DRIVE);
 		}
 	}
 
-	if (sih->chip == BCM4313_CHIP_ID) {
+	if (ai_get_chip_id(sih) == BCM4313_CHIP_ID) {
 		/*
 		 * enable 12 mA drive strenth for 4313 and
 		 * set chipControl register bit 1
 		 */
-		SI_MSG(("Applying 4313 WARs\n"));
+		SI_MSG("Applying 4313 WARs\n");
 		si_pmu_chipcontrol(sih, 0, CCTRL_4313_12MA_LED_DRIVE,
 				   CCTRL_4313_12MA_LED_DRIVE);
 	}
@@ -1165,22 +667,19 @@
 }
 
 /*
- * Allocate a si handle.
- * devid - pci device id (used to determine chip#)
- * osh - opaque OS handle
- * regs - virtual address of initial core registers
+ * Allocate a si handle and do the attach.
  */
 struct si_pub *
-ai_attach(void __iomem *regs, struct pci_dev *sdh)
+ai_attach(struct bcma_bus *pbus)
 {
 	struct si_info *sii;
 
 	/* alloc struct si_info */
-	sii = kmalloc(sizeof(struct si_info), GFP_ATOMIC);
+	sii = kzalloc(sizeof(struct si_info), GFP_ATOMIC);
 	if (sii == NULL)
 		return NULL;
 
-	if (ai_doattach(sii, regs, sdh) == NULL) {
+	if (ai_doattach(sii, pbus) == NULL) {
 		kfree(sii);
 		return NULL;
 	}
@@ -1209,292 +708,66 @@
 	kfree(sii);
 }
 
-/* register driver interrupt disabling and restoring callback functions */
-void
-ai_register_intr_callback(struct si_pub *sih, void *intrsoff_fn,
-			  void *intrsrestore_fn,
-			  void *intrsenabled_fn, void *intr_arg)
-{
-	struct si_info *sii;
-
-	sii = (struct si_info *)sih;
-	sii->intr_arg = intr_arg;
-	sii->intrsoff_fn = (u32 (*)(void *)) intrsoff_fn;
-	sii->intrsrestore_fn = (void (*) (void *, u32)) intrsrestore_fn;
-	sii->intrsenabled_fn = (bool (*)(void *)) intrsenabled_fn;
-	/* save current core id.  when this function called, the current core
-	 * must be the core which provides driver functions(il, et, wl, etc.)
-	 */
-	sii->dev_coreid = sii->coreid[sii->curidx];
-}
-
-void ai_deregister_intr_callback(struct si_pub *sih)
-{
-	struct si_info *sii;
-
-	sii = (struct si_info *)sih;
-	sii->intrsoff_fn = NULL;
-}
-
-uint ai_coreid(struct si_pub *sih)
-{
-	struct si_info *sii;
-
-	sii = (struct si_info *)sih;
-	return sii->coreid[sii->curidx];
-}
-
-uint ai_coreidx(struct si_pub *sih)
-{
-	struct si_info *sii;
-
-	sii = (struct si_info *)sih;
-	return sii->curidx;
-}
-
-bool ai_backplane64(struct si_pub *sih)
-{
-	return (sih->cccaps & CC_CAP_BKPLN64) != 0;
-}
-
 /* return index of coreid or BADIDX if not found */
-uint ai_findcoreidx(struct si_pub *sih, uint coreid, uint coreunit)
+struct bcma_device *ai_findcore(struct si_pub *sih, u16 coreid, u16 coreunit)
 {
+	struct bcma_device *core;
 	struct si_info *sii;
 	uint found;
-	uint i;
 
 	sii = (struct si_info *)sih;
 
 	found = 0;
 
-	for (i = 0; i < sii->numcores; i++)
-		if (sii->coreid[i] == coreid) {
+	list_for_each_entry(core, &sii->icbus->cores, list)
+		if (core->id.id == coreid) {
 			if (found == coreunit)
-				return i;
+				return core;
 			found++;
 		}
 
-	return BADIDX;
+	return NULL;
 }
 
 /*
- * This function changes logical "focus" to the indicated core;
- * must be called with interrupts off.
- * Moreover, callers should keep interrupts off during switching
- * out of and back to d11 core.
+ * read/modify chipcommon core register.
  */
-void __iomem *ai_setcore(struct si_pub *sih, uint coreid, uint coreunit)
+uint ai_cc_reg(struct si_pub *sih, uint regoff, u32 mask, u32 val)
 {
-	uint idx;
-
-	idx = ai_findcoreidx(sih, coreid, coreunit);
-	if (idx >= SI_MAXCORES)
-		return NULL;
-
-	return ai_setcoreidx(sih, idx);
-}
-
-/* Turn off interrupt as required by ai_setcore, before switch core */
-void __iomem *ai_switch_core(struct si_pub *sih, uint coreid, uint *origidx,
-			     uint *intr_val)
-{
-	void __iomem *cc;
+	struct bcma_device *cc;
+	u32 w;
 	struct si_info *sii;
 
 	sii = (struct si_info *)sih;
-
-	if (SI_FAST(sii)) {
-		/* Overloading the origidx variable to remember the coreid,
-		 * this works because the core ids cannot be confused with
-		 * core indices.
-		 */
-		*origidx = coreid;
-		if (coreid == CC_CORE_ID)
-			return CCREGS_FAST(sii);
-		else if (coreid == sih->buscoretype)
-			return PCIEREGS(sii);
-	}
-	INTR_OFF(sii, *intr_val);
-	*origidx = sii->curidx;
-	cc = ai_setcore(sih, coreid, 0);
-	return cc;
-}
-
-/* restore coreidx and restore interrupt */
-void ai_restore_core(struct si_pub *sih, uint coreid, uint intr_val)
-{
-	struct si_info *sii;
-
-	sii = (struct si_info *)sih;
-	if (SI_FAST(sii)
-	    && ((coreid == CC_CORE_ID) || (coreid == sih->buscoretype)))
-		return;
-
-	ai_setcoreidx(sih, coreid);
-	INTR_RESTORE(sii, intr_val);
-}
-
-void ai_write_wrapperreg(struct si_pub *sih, u32 offset, u32 val)
-{
-	struct si_info *sii = (struct si_info *)sih;
-	u32 *w = (u32 *) sii->curwrap;
-	W_REG(w + (offset / 4), val);
-	return;
-}
-
-/*
- * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set
- * operation, switch back to the original core, and return the new value.
- *
- * When using the silicon backplane, no fiddling with interrupts or core
- * switches is needed.
- *
- * Also, when using pci/pcie, we can optimize away the core switching for pci
- * registers and (on newer pci cores) chipcommon registers.
- */
-uint ai_corereg(struct si_pub *sih, uint coreidx, uint regoff, uint mask,
-		uint val)
-{
-	uint origidx = 0;
-	u32 __iomem *r = NULL;
-	uint w;
-	uint intr_val = 0;
-	bool fast = false;
-	struct si_info *sii;
-
-	sii = (struct si_info *)sih;
-
-	if (coreidx >= SI_MAXCORES)
-		return 0;
-
-	/*
-	 * If pci/pcie, we can get at pci/pcie regs
-	 * and on newer cores to chipc
-	 */
-	if ((sii->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
-		/* Chipc registers are mapped at 12KB */
-		fast = true;
-		r = (u32 __iomem *)((__iomem char *)sii->curmap +
-				    PCI_16KB0_CCREGS_OFFSET + regoff);
-	} else if (sii->pub.buscoreidx == coreidx) {
-		/*
-		 * pci registers are at either in the last 2KB of
-		 * an 8KB window or, in pcie and pci rev 13 at 8KB
-		 */
-		fast = true;
-		if (SI_FAST(sii))
-			r = (u32 __iomem *)((__iomem char *)sii->curmap +
-				    PCI_16KB0_PCIREGS_OFFSET + regoff);
-		else
-			r = (u32 __iomem *)((__iomem char *)sii->curmap +
-				    ((regoff >= SBCONFIGOFF) ?
-				      PCI_BAR0_PCISBR_OFFSET :
-				      PCI_BAR0_PCIREGS_OFFSET) + regoff);
-	}
-
-	if (!fast) {
-		INTR_OFF(sii, intr_val);
-
-		/* save current core index */
-		origidx = ai_coreidx(&sii->pub);
-
-		/* switch core */
-		r = (u32 __iomem *) ((unsigned char __iomem *)
-			ai_setcoreidx(&sii->pub, coreidx) + regoff);
-	}
+	cc = sii->icbus->drv_cc.core;
 
 	/* mask and set */
 	if (mask || val) {
-		w = (R_REG(r) & ~mask) | val;
-		W_REG(r, w);
+		bcma_maskset32(cc, regoff, ~mask, val);
 	}
 
 	/* readback */
-	w = R_REG(r);
-
-	if (!fast) {
-		/* restore core index */
-		if (origidx != coreidx)
-			ai_setcoreidx(&sii->pub, origidx);
-
-		INTR_RESTORE(sii, intr_val);
-	}
+	w = bcma_read32(cc, regoff);
 
 	return w;
 }
 
-void ai_core_disable(struct si_pub *sih, u32 bits)
-{
-	struct si_info *sii;
-	u32 dummy;
-	struct aidmp *ai;
-
-	sii = (struct si_info *)sih;
-
-	ai = sii->curwrap;
-
-	/* if core is already in reset, just return */
-	if (R_REG(&ai->resetctrl) & AIRC_RESET)
-		return;
-
-	W_REG(&ai->ioctrl, bits);
-	dummy = R_REG(&ai->ioctrl);
-	udelay(10);
-
-	W_REG(&ai->resetctrl, AIRC_RESET);
-	udelay(1);
-}
-
-/* reset and re-enable a core
- * inputs:
- * bits - core specific bits that are set during and after reset sequence
- * resetbits - core specific bits that are set only during reset sequence
- */
-void ai_core_reset(struct si_pub *sih, u32 bits, u32 resetbits)
-{
-	struct si_info *sii;
-	struct aidmp *ai;
-	u32 dummy;
-
-	sii = (struct si_info *)sih;
-	ai = sii->curwrap;
-
-	/*
-	 * Must do the disable sequence first to work
-	 * for arbitrary current core state.
-	 */
-	ai_core_disable(sih, (bits | resetbits));
-
-	/*
-	 * Now do the initialization sequence.
-	 */
-	W_REG(&ai->ioctrl, (bits | SICF_FGC | SICF_CLOCK_EN));
-	dummy = R_REG(&ai->ioctrl);
-	W_REG(&ai->resetctrl, 0);
-	udelay(1);
-
-	W_REG(&ai->ioctrl, (bits | SICF_CLOCK_EN));
-	dummy = R_REG(&ai->ioctrl);
-	udelay(1);
-}
-
 /* return the slow clock source - LPO, XTAL, or PCI */
-static uint ai_slowclk_src(struct si_info *sii)
+static uint ai_slowclk_src(struct si_pub *sih, struct bcma_device *cc)
 {
-	struct chipcregs __iomem *cc;
+	struct si_info *sii;
 	u32 val;
 
-	if (sii->pub.ccrev < 6) {
-		pci_read_config_dword(sii->pbus, PCI_GPIO_OUT,
+	sii = (struct si_info *)sih;
+	if (ai_get_ccrev(&sii->pub) < 6) {
+		pci_read_config_dword(sii->pcibus, PCI_GPIO_OUT,
 				      &val);
 		if (val & PCI_CFG_GPIO_SCS)
 			return SCC_SS_PCI;
 		return SCC_SS_XTAL;
-	} else if (sii->pub.ccrev < 10) {
-		cc = (struct chipcregs __iomem *)
-			ai_setcoreidx(&sii->pub, sii->curidx);
-		return R_REG(&cc->slow_clk_ctl) & SCC_SS_MASK;
+	} else if (ai_get_ccrev(&sii->pub) < 10) {
+		return bcma_read32(cc, CHIPCREGOFFS(slow_clk_ctl)) &
+		       SCC_SS_MASK;
 	} else			/* Insta-clock */
 		return SCC_SS_XTAL;
 }
@@ -1503,24 +776,24 @@
 * return the ILP (slowclock) min or max frequency
 * precondition: we've established the chip has dynamic clk control
 */
-static uint ai_slowclk_freq(struct si_info *sii, bool max_freq,
-			    struct chipcregs __iomem *cc)
+static uint ai_slowclk_freq(struct si_pub *sih, bool max_freq,
+			    struct bcma_device *cc)
 {
 	u32 slowclk;
 	uint div;
 
-	slowclk = ai_slowclk_src(sii);
-	if (sii->pub.ccrev < 6) {
+	slowclk = ai_slowclk_src(sih, cc);
+	if (ai_get_ccrev(sih) < 6) {
 		if (slowclk == SCC_SS_PCI)
 			return max_freq ? (PCIMAXFREQ / 64)
 				: (PCIMINFREQ / 64);
 		else
 			return max_freq ? (XTALMAXFREQ / 32)
 				: (XTALMINFREQ / 32);
-	} else if (sii->pub.ccrev < 10) {
+	} else if (ai_get_ccrev(sih) < 10) {
 		div = 4 *
-		    (((R_REG(&cc->slow_clk_ctl) & SCC_CD_MASK) >>
-		      SCC_CD_SHIFT) + 1);
+		    (((bcma_read32(cc, CHIPCREGOFFS(slow_clk_ctl)) &
+		      SCC_CD_MASK) >> SCC_CD_SHIFT) + 1);
 		if (slowclk == SCC_SS_LPO)
 			return max_freq ? LPOMAXFREQ : LPOMINFREQ;
 		else if (slowclk == SCC_SS_XTAL)
@@ -1531,15 +804,15 @@
 				: (PCIMINFREQ / div);
 	} else {
 		/* Chipc rev 10 is InstaClock */
-		div = R_REG(&cc->system_clk_ctl) >> SYCC_CD_SHIFT;
-		div = 4 * (div + 1);
+		div = bcma_read32(cc, CHIPCREGOFFS(system_clk_ctl));
+		div = 4 * ((div >> SYCC_CD_SHIFT) + 1);
 		return max_freq ? XTALMAXFREQ : (XTALMINFREQ / div);
 	}
 	return 0;
 }
 
 static void
-ai_clkctl_setdelay(struct si_info *sii, struct chipcregs __iomem *cc)
+ai_clkctl_setdelay(struct si_pub *sih, struct bcma_device *cc)
 {
 	uint slowmaxfreq, pll_delay, slowclk;
 	uint pll_on_delay, fref_sel_delay;
@@ -1552,55 +825,40 @@
 	 * powered down by dynamic clk control logic.
 	 */
 
-	slowclk = ai_slowclk_src(sii);
+	slowclk = ai_slowclk_src(sih, cc);
 	if (slowclk != SCC_SS_XTAL)
 		pll_delay += XTAL_ON_DELAY;
 
 	/* Starting with 4318 it is ILP that is used for the delays */
 	slowmaxfreq =
-	    ai_slowclk_freq(sii, (sii->pub.ccrev >= 10) ? false : true, cc);
+	    ai_slowclk_freq(sih,
+			    (ai_get_ccrev(sih) >= 10) ? false : true, cc);
 
 	pll_on_delay = ((slowmaxfreq * pll_delay) + 999999) / 1000000;
 	fref_sel_delay = ((slowmaxfreq * FREF_DELAY) + 999999) / 1000000;
 
-	W_REG(&cc->pll_on_delay, pll_on_delay);
-	W_REG(&cc->fref_sel_delay, fref_sel_delay);
+	bcma_write32(cc, CHIPCREGOFFS(pll_on_delay), pll_on_delay);
+	bcma_write32(cc, CHIPCREGOFFS(fref_sel_delay), fref_sel_delay);
 }
 
 /* initialize power control delay registers */
 void ai_clkctl_init(struct si_pub *sih)
 {
-	struct si_info *sii;
-	uint origidx = 0;
-	struct chipcregs __iomem *cc;
-	bool fast;
+	struct bcma_device *cc;
 
-	if (!(sih->cccaps & CC_CAP_PWR_CTL))
+	if (!(ai_get_cccaps(sih) & CC_CAP_PWR_CTL))
 		return;
 
-	sii = (struct si_info *)sih;
-	fast = SI_FAST(sii);
-	if (!fast) {
-		origidx = sii->curidx;
-		cc = (struct chipcregs __iomem *)
-			ai_setcore(sih, CC_CORE_ID, 0);
-		if (cc == NULL)
-			return;
-	} else {
-		cc = (struct chipcregs __iomem *) CCREGS_FAST(sii);
-		if (cc == NULL)
-			return;
-	}
+	cc = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
+	if (cc == NULL)
+		return;
 
 	/* set all Instaclk chip ILP to 1 MHz */
-	if (sih->ccrev >= 10)
-		SET_REG(&cc->system_clk_ctl, SYCC_CD_MASK,
-			(ILP_DIV_1MHZ << SYCC_CD_SHIFT));
+	if (ai_get_ccrev(sih) >= 10)
+		bcma_maskset32(cc, CHIPCREGOFFS(system_clk_ctl), SYCC_CD_MASK,
+			       (ILP_DIV_1MHZ << SYCC_CD_SHIFT));
 
-	ai_clkctl_setdelay(sii, cc);
-
-	if (!fast)
-		ai_setcoreidx(sih, origidx);
+	ai_clkctl_setdelay(sih, cc);
 }
 
 /*
@@ -1610,47 +868,25 @@
 u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih)
 {
 	struct si_info *sii;
-	uint origidx = 0;
-	struct chipcregs __iomem *cc;
+	struct bcma_device *cc;
 	uint slowminfreq;
 	u16 fpdelay;
-	uint intr_val = 0;
-	bool fast;
 
 	sii = (struct si_info *)sih;
-	if (sih->cccaps & CC_CAP_PMU) {
-		INTR_OFF(sii, intr_val);
+	if (ai_get_cccaps(sih) & CC_CAP_PMU) {
 		fpdelay = si_pmu_fast_pwrup_delay(sih);
-		INTR_RESTORE(sii, intr_val);
 		return fpdelay;
 	}
 
-	if (!(sih->cccaps & CC_CAP_PWR_CTL))
+	if (!(ai_get_cccaps(sih) & CC_CAP_PWR_CTL))
 		return 0;
 
-	fast = SI_FAST(sii);
 	fpdelay = 0;
-	if (!fast) {
-		origidx = sii->curidx;
-		INTR_OFF(sii, intr_val);
-		cc = (struct chipcregs __iomem *)
-			ai_setcore(sih, CC_CORE_ID, 0);
-		if (cc == NULL)
-			goto done;
-	} else {
-		cc = (struct chipcregs __iomem *) CCREGS_FAST(sii);
-		if (cc == NULL)
-			goto done;
-	}
-
-	slowminfreq = ai_slowclk_freq(sii, false, cc);
-	fpdelay = (((R_REG(&cc->pll_on_delay) + 2) * 1000000) +
-		   (slowminfreq - 1)) / slowminfreq;
-
- done:
-	if (!fast) {
-		ai_setcoreidx(sih, origidx);
-		INTR_RESTORE(sii, intr_val);
+	cc = ai_findcore(sih, CC_CORE_ID, 0);
+	if (cc) {
+		slowminfreq = ai_slowclk_freq(sih, false, cc);
+		fpdelay = (((bcma_read32(cc, CHIPCREGOFFS(pll_on_delay)) + 2)
+			    * 1000000) + (slowminfreq - 1)) / slowminfreq;
 	}
 	return fpdelay;
 }
@@ -1664,12 +900,12 @@
 	sii = (struct si_info *)sih;
 
 	/* pcie core doesn't have any mapping to control the xtal pu */
-	if (PCIE(sii))
+	if (PCIE(sih))
 		return -1;
 
-	pci_read_config_dword(sii->pbus, PCI_GPIO_IN, &in);
-	pci_read_config_dword(sii->pbus, PCI_GPIO_OUT, &out);
-	pci_read_config_dword(sii->pbus, PCI_GPIO_OUTEN, &outen);
+	pci_read_config_dword(sii->pcibus, PCI_GPIO_IN, &in);
+	pci_read_config_dword(sii->pcibus, PCI_GPIO_OUT, &out);
+	pci_read_config_dword(sii->pcibus, PCI_GPIO_OUTEN, &outen);
 
 	/*
 	 * Avoid glitching the clock if GPRS is already using it.
@@ -1690,9 +926,9 @@
 			out |= PCI_CFG_GPIO_XTAL;
 			if (what & PLL)
 				out |= PCI_CFG_GPIO_PLL;
-			pci_write_config_dword(sii->pbus,
+			pci_write_config_dword(sii->pcibus,
 					       PCI_GPIO_OUT, out);
-			pci_write_config_dword(sii->pbus,
+			pci_write_config_dword(sii->pcibus,
 					       PCI_GPIO_OUTEN, outen);
 			udelay(XTAL_ON_DELAY);
 		}
@@ -1700,7 +936,7 @@
 		/* turn pll on */
 		if (what & PLL) {
 			out &= ~PCI_CFG_GPIO_PLL;
-			pci_write_config_dword(sii->pbus,
+			pci_write_config_dword(sii->pcibus,
 					       PCI_GPIO_OUT, out);
 			mdelay(2);
 		}
@@ -1709,9 +945,9 @@
 			out &= ~PCI_CFG_GPIO_XTAL;
 		if (what & PLL)
 			out |= PCI_CFG_GPIO_PLL;
-		pci_write_config_dword(sii->pbus,
+		pci_write_config_dword(sii->pcibus,
 				       PCI_GPIO_OUT, out);
-		pci_write_config_dword(sii->pbus,
+		pci_write_config_dword(sii->pcibus,
 				       PCI_GPIO_OUTEN, outen);
 	}
 
@@ -1721,63 +957,52 @@
 /* clk control mechanism through chipcommon, no policy checking */
 static bool _ai_clkctl_cc(struct si_info *sii, uint mode)
 {
-	uint origidx = 0;
-	struct chipcregs __iomem *cc;
+	struct bcma_device *cc;
 	u32 scc;
-	uint intr_val = 0;
-	bool fast = SI_FAST(sii);
 
 	/* chipcommon cores prior to rev6 don't support dynamic clock control */
-	if (sii->pub.ccrev < 6)
+	if (ai_get_ccrev(&sii->pub) < 6)
 		return false;
 
-	if (!fast) {
-		INTR_OFF(sii, intr_val);
-		origidx = sii->curidx;
-		cc = (struct chipcregs __iomem *)
-					ai_setcore(&sii->pub, CC_CORE_ID, 0);
-	} else {
-		cc = (struct chipcregs __iomem *) CCREGS_FAST(sii);
-		if (cc == NULL)
-			goto done;
-	}
+	cc = ai_findcore(&sii->pub, BCMA_CORE_CHIPCOMMON, 0);
 
-	if (!(sii->pub.cccaps & CC_CAP_PWR_CTL) && (sii->pub.ccrev < 20))
-		goto done;
+	if (!(ai_get_cccaps(&sii->pub) & CC_CAP_PWR_CTL) &&
+	    (ai_get_ccrev(&sii->pub) < 20))
+		return mode == CLK_FAST;
 
 	switch (mode) {
 	case CLK_FAST:		/* FORCEHT, fast (pll) clock */
-		if (sii->pub.ccrev < 10) {
+		if (ai_get_ccrev(&sii->pub) < 10) {
 			/*
 			 * don't forget to force xtal back
 			 * on before we clear SCC_DYN_XTAL..
 			 */
 			ai_clkctl_xtal(&sii->pub, XTAL, ON);
-			SET_REG(&cc->slow_clk_ctl,
-				(SCC_XC | SCC_FS | SCC_IP), SCC_IP);
-		} else if (sii->pub.ccrev < 20) {
-			OR_REG(&cc->system_clk_ctl, SYCC_HR);
+			bcma_maskset32(cc, CHIPCREGOFFS(slow_clk_ctl),
+				       (SCC_XC | SCC_FS | SCC_IP), SCC_IP);
+		} else if (ai_get_ccrev(&sii->pub) < 20) {
+			bcma_set32(cc, CHIPCREGOFFS(system_clk_ctl), SYCC_HR);
 		} else {
-			OR_REG(&cc->clk_ctl_st, CCS_FORCEHT);
+			bcma_set32(cc, CHIPCREGOFFS(clk_ctl_st), CCS_FORCEHT);
 		}
 
 		/* wait for the PLL */
-		if (sii->pub.cccaps & CC_CAP_PMU) {
+		if (ai_get_cccaps(&sii->pub) & CC_CAP_PMU) {
 			u32 htavail = CCS_HTAVAIL;
-			SPINWAIT(((R_REG(&cc->clk_ctl_st) & htavail)
-				  == 0), PMU_MAX_TRANSITION_DLY);
+			SPINWAIT(((bcma_read32(cc, CHIPCREGOFFS(clk_ctl_st)) &
+				   htavail) == 0), PMU_MAX_TRANSITION_DLY);
 		} else {
 			udelay(PLL_DELAY);
 		}
 		break;
 
 	case CLK_DYNAMIC:	/* enable dynamic clock control */
-		if (sii->pub.ccrev < 10) {
-			scc = R_REG(&cc->slow_clk_ctl);
+		if (ai_get_ccrev(&sii->pub) < 10) {
+			scc = bcma_read32(cc, CHIPCREGOFFS(slow_clk_ctl));
 			scc &= ~(SCC_FS | SCC_IP | SCC_XC);
 			if ((scc & SCC_SS_MASK) != SCC_SS_XTAL)
 				scc |= SCC_XC;
-			W_REG(&cc->slow_clk_ctl, scc);
+			bcma_write32(cc, CHIPCREGOFFS(slow_clk_ctl), scc);
 
 			/*
 			 * for dynamic control, we have to
@@ -1785,11 +1010,11 @@
 			 */
 			if (scc & SCC_XC)
 				ai_clkctl_xtal(&sii->pub, XTAL, OFF);
-		} else if (sii->pub.ccrev < 20) {
+		} else if (ai_get_ccrev(&sii->pub) < 20) {
 			/* Instaclock */
-			AND_REG(&cc->system_clk_ctl, ~SYCC_HR);
+			bcma_mask32(cc, CHIPCREGOFFS(system_clk_ctl), ~SYCC_HR);
 		} else {
-			AND_REG(&cc->clk_ctl_st, ~CCS_FORCEHT);
+			bcma_mask32(cc, CHIPCREGOFFS(clk_ctl_st), ~CCS_FORCEHT);
 		}
 		break;
 
@@ -1797,11 +1022,6 @@
 		break;
 	}
 
- done:
-	if (!fast) {
-		ai_setcoreidx(&sii->pub, origidx);
-		INTR_RESTORE(sii, intr_val);
-	}
 	return mode == CLK_FAST;
 }
 
@@ -1820,46 +1040,25 @@
 	sii = (struct si_info *)sih;
 
 	/* chipcommon cores prior to rev6 don't support dynamic clock control */
-	if (sih->ccrev < 6)
+	if (ai_get_ccrev(sih) < 6)
 		return false;
 
-	if (PCI_FORCEHT(sii))
+	if (PCI_FORCEHT(sih))
 		return mode == CLK_FAST;
 
 	return _ai_clkctl_cc(sii, mode);
 }
 
-/* Build device path */
-int ai_devpath(struct si_pub *sih, char *path, int size)
-{
-	int slen;
-
-	if (!path || size <= 0)
-		return -1;
-
-	slen = snprintf(path, (size_t) size, "pci/%u/%u/",
-		((struct si_info *)sih)->pbus->bus->number,
-		PCI_SLOT(((struct pci_dev *)
-				(((struct si_info *)(sih))->pbus))->devfn));
-
-	if (slen < 0 || slen >= size) {
-		path[0] = '\0';
-		return -1;
-	}
-
-	return 0;
-}
-
 void ai_pci_up(struct si_pub *sih)
 {
 	struct si_info *sii;
 
 	sii = (struct si_info *)sih;
 
-	if (PCI_FORCEHT(sii))
+	if (PCI_FORCEHT(sih))
 		_ai_clkctl_cc(sii, CLK_FAST);
 
-	if (PCIE(sii))
+	if (PCIE(sih))
 		pcicore_up(sii->pch, SI_PCIUP);
 
 }
@@ -1882,7 +1081,7 @@
 	sii = (struct si_info *)sih;
 
 	/* release FORCEHT since chip is going to "down" state */
-	if (PCI_FORCEHT(sii))
+	if (PCI_FORCEHT(sih))
 		_ai_clkctl_cc(sii, CLK_DYNAMIC);
 
 	pcicore_down(sii->pch, SI_PCIDOWN);
@@ -1895,42 +1094,23 @@
 void ai_pci_setup(struct si_pub *sih, uint coremask)
 {
 	struct si_info *sii;
-	struct sbpciregs __iomem *regs = NULL;
-	u32 siflag = 0, w;
-	uint idx = 0;
+	u32 w;
 
 	sii = (struct si_info *)sih;
 
-	if (PCI(sii)) {
-		/* get current core index */
-		idx = sii->curidx;
-
-		/* we interrupt on this backplane flag number */
-		siflag = ai_flag(sih);
-
-		/* switch over to pci core */
-		regs = ai_setcoreidx(sih, sii->pub.buscoreidx);
-	}
-
 	/*
 	 * Enable sb->pci interrupts.  Assume
 	 * PCI rev 2.3 support was added in pci core rev 6 and things changed..
 	 */
-	if (PCIE(sii) || (PCI(sii) && ((sii->pub.buscorerev) >= 6))) {
+	if (PCIE(sih) || (PCI(sih) && (ai_get_buscorerev(sih) >= 6))) {
 		/* pci config write to set this core bit in PCIIntMask */
-		pci_read_config_dword(sii->pbus, PCI_INT_MASK, &w);
+		pci_read_config_dword(sii->pcibus, PCI_INT_MASK, &w);
 		w |= (coremask << PCI_SBIM_SHIFT);
-		pci_write_config_dword(sii->pbus, PCI_INT_MASK, w);
-	} else {
-		/* set sbintvec bit for our flag number */
-		ai_setint(sih, siflag);
+		pci_write_config_dword(sii->pcibus, PCI_INT_MASK, w);
 	}
 
-	if (PCI(sii)) {
-		pcicore_pci_setup(sii->pch, regs);
-
-		/* switch back to previous core */
-		ai_setcoreidx(sih, idx);
+	if (PCI(sih)) {
+		pcicore_pci_setup(sii->pch);
 	}
 }
 
@@ -1940,25 +1120,11 @@
  */
 int ai_pci_fixcfg(struct si_pub *sih)
 {
-	uint origidx;
-	void __iomem *regs = NULL;
 	struct si_info *sii = (struct si_info *)sih;
 
 	/* Fixup PI in SROM shadow area to enable the correct PCI core access */
-	/* save the current index */
-	origidx = ai_coreidx(&sii->pub);
-
 	/* check 'pi' is correct and fix it if not */
-	regs = ai_setcore(&sii->pub, sii->pub.buscoretype, 0);
-	if (sii->pub.buscoretype == PCIE_CORE_ID)
-		pcicore_fixcfg_pcie(sii->pch,
-				    (struct sbpcieregs __iomem *)regs);
-	else if (sii->pub.buscoretype == PCI_CORE_ID)
-		pcicore_fixcfg_pci(sii->pch, (struct sbpciregs __iomem *)regs);
-
-	/* restore the original index */
-	ai_setcoreidx(&sii->pub, origidx);
-
+	pcicore_fixcfg(sii->pch);
 	pcicore_hwup(sii->pch);
 	return 0;
 }
@@ -1969,58 +1135,42 @@
 	uint regoff;
 
 	regoff = offsetof(struct chipcregs, gpiocontrol);
-	return ai_corereg(sih, SI_CC_IDX, regoff, mask, val);
+	return ai_cc_reg(sih, regoff, mask, val);
 }
 
 void ai_chipcontrl_epa4331(struct si_pub *sih, bool on)
 {
-	struct si_info *sii;
-	struct chipcregs __iomem *cc;
-	uint origidx;
+	struct bcma_device *cc;
 	u32 val;
 
-	sii = (struct si_info *)sih;
-	origidx = ai_coreidx(sih);
-
-	cc = (struct chipcregs __iomem *) ai_setcore(sih, CC_CORE_ID, 0);
-
-	val = R_REG(&cc->chipcontrol);
+	cc = ai_findcore(sih, CC_CORE_ID, 0);
 
 	if (on) {
-		if (sih->chippkg == 9 || sih->chippkg == 0xb)
+		if (ai_get_chippkg(sih) == 9 || ai_get_chippkg(sih) == 0xb)
 			/* Ext PA Controls for 4331 12x9 Package */
-			W_REG(&cc->chipcontrol, val |
-			      CCTRL4331_EXTPA_EN |
-			      CCTRL4331_EXTPA_ON_GPIO2_5);
+			bcma_set32(cc, CHIPCREGOFFS(chipcontrol),
+				   CCTRL4331_EXTPA_EN |
+				   CCTRL4331_EXTPA_ON_GPIO2_5);
 		else
 			/* Ext PA Controls for 4331 12x12 Package */
-			W_REG(&cc->chipcontrol,
-			      val | CCTRL4331_EXTPA_EN);
+			bcma_set32(cc, CHIPCREGOFFS(chipcontrol),
+				   CCTRL4331_EXTPA_EN);
 	} else {
 		val &= ~(CCTRL4331_EXTPA_EN | CCTRL4331_EXTPA_ON_GPIO2_5);
-		W_REG(&cc->chipcontrol, val);
+		bcma_mask32(cc, CHIPCREGOFFS(chipcontrol),
+			    ~(CCTRL4331_EXTPA_EN | CCTRL4331_EXTPA_ON_GPIO2_5));
 	}
-
-	ai_setcoreidx(sih, origidx);
 }
 
 /* Enable BT-COEX & Ex-PA for 4313 */
 void ai_epa_4313war(struct si_pub *sih)
 {
-	struct si_info *sii;
-	struct chipcregs __iomem *cc;
-	uint origidx;
+	struct bcma_device *cc;
 
-	sii = (struct si_info *)sih;
-	origidx = ai_coreidx(sih);
-
-	cc = ai_setcore(sih, CC_CORE_ID, 0);
+	cc = ai_findcore(sih, CC_CORE_ID, 0);
 
 	/* EPA Fix */
-	W_REG(&cc->gpiocontrol,
-	      R_REG(&cc->gpiocontrol) | GPIO_CTRL_EPA_EN_MASK);
-
-	ai_setcoreidx(sih, origidx);
+	bcma_set32(cc, CHIPCREGOFFS(gpiocontrol), GPIO_CTRL_EPA_EN_MASK);
 }
 
 /* check if the device is removed */
@@ -2031,7 +1181,7 @@
 
 	sii = (struct si_info *)sih;
 
-	pci_read_config_dword(sii->pbus, PCI_VENDOR_ID, &w);
+	pci_read_config_dword(sii->pcibus, PCI_VENDOR_ID, &w);
 	if ((w & 0xFFFF) != PCI_VENDOR_ID_BROADCOM)
 		return true;
 
@@ -2040,26 +1190,23 @@
 
 bool ai_is_sprom_available(struct si_pub *sih)
 {
-	if (sih->ccrev >= 31) {
-		struct si_info *sii;
-		uint origidx;
-		struct chipcregs __iomem *cc;
+	struct si_info *sii = (struct si_info *)sih;
+
+	if (ai_get_ccrev(sih) >= 31) {
+		struct bcma_device *cc;
 		u32 sromctrl;
 
-		if ((sih->cccaps & CC_CAP_SROM) == 0)
+		if ((ai_get_cccaps(sih) & CC_CAP_SROM) == 0)
 			return false;
 
-		sii = (struct si_info *)sih;
-		origidx = sii->curidx;
-		cc = ai_setcoreidx(sih, SI_CC_IDX);
-		sromctrl = R_REG(&cc->sromcontrol);
-		ai_setcoreidx(sih, origidx);
+		cc = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
+		sromctrl = bcma_read32(cc, CHIPCREGOFFS(sromcontrol));
 		return sromctrl & SRC_PRESENT;
 	}
 
-	switch (sih->chip) {
+	switch (ai_get_chip_id(sih)) {
 	case BCM4313_CHIP_ID:
-		return (sih->chipst & CST4313_SPROM_PRESENT) != 0;
+		return (sii->chipst & CST4313_SPROM_PRESENT) != 0;
 	default:
 		return true;
 	}
@@ -2067,9 +1214,11 @@
 
 bool ai_is_otp_disabled(struct si_pub *sih)
 {
-	switch (sih->chip) {
+	struct si_info *sii = (struct si_info *)sih;
+
+	switch (ai_get_chip_id(sih)) {
 	case BCM4313_CHIP_ID:
-		return (sih->chipst & CST4313_OTP_PRESENT) == 0;
+		return (sii->chipst & CST4313_OTP_PRESENT) == 0;
 		/* These chips always have their OTP on */
 	case BCM43224_CHIP_ID:
 	case BCM43225_CHIP_ID:
@@ -2077,3 +1226,15 @@
 		return false;
 	}
 }
+
+uint ai_get_buscoretype(struct si_pub *sih)
+{
+	struct si_info *sii = (struct si_info *)sih;
+	return sii->buscore->id.id;
+}
+
+uint ai_get_buscorerev(struct si_pub *sih)
+{
+	struct si_info *sii = (struct si_info *)sih;
+	return sii->buscore->id.rev;
+}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
index 106a742..f84c6f7 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
@@ -17,6 +17,8 @@
 #ifndef	_BRCM_AIUTILS_H_
 #define	_BRCM_AIUTILS_H_
 
+#include <linux/bcma/bcma.h>
+
 #include "types.h"
 
 /*
@@ -38,88 +40,12 @@
 /* PCIE Client Mode sb2pcitranslation2 (2 ZettaBytes), high 32 bits */
 #define SI_PCIE_DMA_H32		0x80000000
 
-/* core codes */
-#define	NODEV_CORE_ID		0x700	/* Invalid coreid */
-#define	CC_CORE_ID		0x800	/* chipcommon core */
-#define	ILINE20_CORE_ID		0x801	/* iline20 core */
-#define	SRAM_CORE_ID		0x802	/* sram core */
-#define	SDRAM_CORE_ID		0x803	/* sdram core */
-#define	PCI_CORE_ID		0x804	/* pci core */
-#define	MIPS_CORE_ID		0x805	/* mips core */
-#define	ENET_CORE_ID		0x806	/* enet mac core */
-#define	CODEC_CORE_ID		0x807	/* v90 codec core */
-#define	USB_CORE_ID		0x808	/* usb 1.1 host/device core */
-#define	ADSL_CORE_ID		0x809	/* ADSL core */
-#define	ILINE100_CORE_ID	0x80a	/* iline100 core */
-#define	IPSEC_CORE_ID		0x80b	/* ipsec core */
-#define	UTOPIA_CORE_ID		0x80c	/* utopia core */
-#define	PCMCIA_CORE_ID		0x80d	/* pcmcia core */
-#define	SOCRAM_CORE_ID		0x80e	/* internal memory core */
-#define	MEMC_CORE_ID		0x80f	/* memc sdram core */
-#define	OFDM_CORE_ID		0x810	/* OFDM phy core */
-#define	EXTIF_CORE_ID		0x811	/* external interface core */
-#define	D11_CORE_ID		0x812	/* 802.11 MAC core */
-#define	APHY_CORE_ID		0x813	/* 802.11a phy core */
-#define	BPHY_CORE_ID		0x814	/* 802.11b phy core */
-#define	GPHY_CORE_ID		0x815	/* 802.11g phy core */
-#define	MIPS33_CORE_ID		0x816	/* mips3302 core */
-#define	USB11H_CORE_ID		0x817	/* usb 1.1 host core */
-#define	USB11D_CORE_ID		0x818	/* usb 1.1 device core */
-#define	USB20H_CORE_ID		0x819	/* usb 2.0 host core */
-#define	USB20D_CORE_ID		0x81a	/* usb 2.0 device core */
-#define	SDIOH_CORE_ID		0x81b	/* sdio host core */
-#define	ROBO_CORE_ID		0x81c	/* roboswitch core */
-#define	ATA100_CORE_ID		0x81d	/* parallel ATA core */
-#define	SATAXOR_CORE_ID		0x81e	/* serial ATA & XOR DMA core */
-#define	GIGETH_CORE_ID		0x81f	/* gigabit ethernet core */
-#define	PCIE_CORE_ID		0x820	/* pci express core */
-#define	NPHY_CORE_ID		0x821	/* 802.11n 2x2 phy core */
-#define	SRAMC_CORE_ID		0x822	/* SRAM controller core */
-#define	MINIMAC_CORE_ID		0x823	/* MINI MAC/phy core */
-#define	ARM11_CORE_ID		0x824	/* ARM 1176 core */
-#define	ARM7S_CORE_ID		0x825	/* ARM7tdmi-s core */
-#define	LPPHY_CORE_ID		0x826	/* 802.11a/b/g phy core */
-#define	PMU_CORE_ID		0x827	/* PMU core */
-#define	SSNPHY_CORE_ID		0x828	/* 802.11n single-stream phy core */
-#define	SDIOD_CORE_ID		0x829	/* SDIO device core */
-#define	ARMCM3_CORE_ID		0x82a	/* ARM Cortex M3 core */
-#define	HTPHY_CORE_ID		0x82b	/* 802.11n 4x4 phy core */
-#define	MIPS74K_CORE_ID		0x82c	/* mips 74k core */
-#define	GMAC_CORE_ID		0x82d	/* Gigabit MAC core */
-#define	DMEMC_CORE_ID		0x82e	/* DDR1/2 memory controller core */
-#define	PCIERC_CORE_ID		0x82f	/* PCIE Root Complex core */
-#define	OCP_CORE_ID		0x830	/* OCP2OCP bridge core */
-#define	SC_CORE_ID		0x831	/* shared common core */
-#define	AHB_CORE_ID		0x832	/* OCP2AHB bridge core */
-#define	SPIH_CORE_ID		0x833	/* SPI host core */
-#define	I2S_CORE_ID		0x834	/* I2S core */
-#define	DMEMS_CORE_ID		0x835	/* SDR/DDR1 memory controller core */
-#define	DEF_SHIM_COMP		0x837	/* SHIM component in ubus/6362 */
-#define OOB_ROUTER_CORE_ID	0x367	/* OOB router core ID */
-#define	DEF_AI_COMP		0xfff	/* Default component, in ai chips it
-					 * maps all unused address ranges
-					 */
-
 /* chipcommon being the first core: */
 #define	SI_CC_IDX		0
 
 /* SOC Interconnect types (aka chip types) */
 #define	SOCI_AI			1
 
-/* Common core control flags */
-#define	SICF_BIST_EN		0x8000
-#define	SICF_PME_EN		0x4000
-#define	SICF_CORE_BITS		0x3ffc
-#define	SICF_FGC		0x0002
-#define	SICF_CLOCK_EN		0x0001
-
-/* Common core status flags */
-#define	SISF_BIST_DONE		0x8000
-#define	SISF_BIST_ERROR		0x4000
-#define	SISF_GATED_CLK		0x2000
-#define	SISF_DMA64		0x1000
-#define	SISF_CORE_BITS		0x0fff
-
 /* A register that is common to all cores to
  * communicate w/PMU regarding clock control.
  */
@@ -220,26 +146,15 @@
  *   public (read-only) portion of aiutils handle returned by si_attach()
  */
 struct si_pub {
-	uint buscoretype;	/* PCI_CORE_ID, PCIE_CORE_ID, PCMCIA_CORE_ID */
-	uint buscorerev;	/* buscore rev */
-	uint buscoreidx;	/* buscore index */
 	int ccrev;		/* chip common core rev */
 	u32 cccaps;		/* chip common capabilities */
-	u32 cccaps_ext;	/* chip common capabilities extension */
 	int pmurev;		/* pmu core rev */
 	u32 pmucaps;		/* pmu capabilities */
 	uint boardtype;		/* board type */
 	uint boardvendor;	/* board vendor */
-	uint boardflags;	/* board flags */
-	uint boardflags2;	/* board flags2 */
 	uint chip;		/* chip number */
 	uint chiprev;		/* chip revision */
 	uint chippkg;		/* chip package option */
-	u32 chipst;		/* chip status */
-	bool issim;		/* chip is in simulation or emulation */
-	uint socirev;		/* SOC interconnect rev */
-	bool pci_pr32414;
-
 };
 
 struct pci_dev;
@@ -255,38 +170,13 @@
 /* misc si info needed by some of the routines */
 struct si_info {
 	struct si_pub pub;	/* back plane public state (must be first) */
-	struct pci_dev *pbus;	/* handle to pci bus */
-	uint dev_coreid;	/* the core provides driver functions */
-	void *intr_arg;		/* interrupt callback function arg */
-	u32 (*intrsoff_fn) (void *intr_arg); /* turns chip interrupts off */
-	/* restore chip interrupts */
-	void (*intrsrestore_fn) (void *intr_arg, u32 arg);
-	/* check if interrupts are enabled */
-	bool (*intrsenabled_fn) (void *intr_arg);
-
+	struct bcma_bus *icbus;	/* handle to soc interconnect bus */
+	struct pci_dev *pcibus;	/* handle to pci bus */
 	struct pcicore_info *pch; /* PCI/E core handle */
-
+	struct bcma_device *buscore;
 	struct list_head var_list; /* list of srom variables */
 
-	void __iomem *curmap;			/* current regs va */
-	void __iomem *regs[SI_MAXCORES];	/* other regs va */
-
-	uint curidx;		/* current core index */
-	uint numcores;		/* # discovered cores */
-	uint coreid[SI_MAXCORES]; /* id of each core */
-	u32 coresba[SI_MAXCORES]; /* backplane address of each core */
-	void *regs2[SI_MAXCORES]; /* 2nd virtual address per core (usbh20) */
-	u32 coresba2[SI_MAXCORES]; /* 2nd phys address per core (usbh20) */
-	u32 coresba_size[SI_MAXCORES]; /* backplane address space size */
-	u32 coresba2_size[SI_MAXCORES];	/* second address space size */
-
-	void *curwrap;		/* current wrapper va */
-	void *wrappers[SI_MAXCORES];	/* other cores wrapper va */
-	u32 wrapba[SI_MAXCORES];	/* address of controlling wrapper */
-
-	u32 cia[SI_MAXCORES];	/* erom cia entry for each core */
-	u32 cib[SI_MAXCORES];	/* erom cia entry for each core */
-	u32 oob_router;	/* oob router registers for axi */
+	u32 chipst;		/* chip status */
 };
 
 /*
@@ -299,52 +189,15 @@
 
 
 /* AMBA Interconnect exported externs */
-extern uint ai_flag(struct si_pub *sih);
-extern void ai_setint(struct si_pub *sih, int siflag);
-extern uint ai_coreidx(struct si_pub *sih);
-extern uint ai_corevendor(struct si_pub *sih);
-extern uint ai_corerev(struct si_pub *sih);
-extern bool ai_iscoreup(struct si_pub *sih);
-extern u32 ai_core_cflags(struct si_pub *sih, u32 mask, u32 val);
-extern void ai_core_cflags_wo(struct si_pub *sih, u32 mask, u32 val);
-extern u32 ai_core_sflags(struct si_pub *sih, u32 mask, u32 val);
-extern uint ai_corereg(struct si_pub *sih, uint coreidx, uint regoff, uint mask,
-		       uint val);
-extern void ai_core_reset(struct si_pub *sih, u32 bits, u32 resetbits);
-extern void ai_core_disable(struct si_pub *sih, u32 bits);
-extern int ai_numaddrspaces(struct si_pub *sih);
-extern u32 ai_addrspace(struct si_pub *sih, uint asidx);
-extern u32 ai_addrspacesize(struct si_pub *sih, uint asidx);
-extern void ai_write_wrap_reg(struct si_pub *sih, u32 offset, u32 val);
+extern struct bcma_device *ai_findcore(struct si_pub *sih,
+				       u16 coreid, u16 coreunit);
+extern u32 ai_core_cflags(struct bcma_device *core, u32 mask, u32 val);
 
 /* === exported functions === */
-extern struct si_pub *ai_attach(void __iomem *regs, struct pci_dev *sdh);
+extern struct si_pub *ai_attach(struct bcma_bus *pbus);
 extern void ai_detach(struct si_pub *sih);
-extern uint ai_coreid(struct si_pub *sih);
-extern uint ai_corerev(struct si_pub *sih);
-extern uint ai_corereg(struct si_pub *sih, uint coreidx, uint regoff, uint mask,
-		uint val);
-extern void ai_write_wrapperreg(struct si_pub *sih, u32 offset, u32 val);
-extern u32 ai_core_cflags(struct si_pub *sih, u32 mask, u32 val);
-extern u32 ai_core_sflags(struct si_pub *sih, u32 mask, u32 val);
-extern bool ai_iscoreup(struct si_pub *sih);
-extern uint ai_findcoreidx(struct si_pub *sih, uint coreid, uint coreunit);
-extern void __iomem *ai_setcoreidx(struct si_pub *sih, uint coreidx);
-extern void __iomem *ai_setcore(struct si_pub *sih, uint coreid, uint coreunit);
-extern void __iomem *ai_switch_core(struct si_pub *sih, uint coreid,
-				    uint *origidx, uint *intr_val);
-extern void ai_restore_core(struct si_pub *sih, uint coreid, uint intr_val);
-extern void ai_core_reset(struct si_pub *sih, u32 bits, u32 resetbits);
-extern void ai_core_disable(struct si_pub *sih, u32 bits);
-extern u32 ai_alp_clock(struct si_pub *sih);
-extern u32 ai_ilp_clock(struct si_pub *sih);
+extern uint ai_cc_reg(struct si_pub *sih, uint regoff, u32 mask, u32 val);
 extern void ai_pci_setup(struct si_pub *sih, uint coremask);
-extern void ai_setint(struct si_pub *sih, int siflag);
-extern bool ai_backplane64(struct si_pub *sih);
-extern void ai_register_intr_callback(struct si_pub *sih, void *intrsoff_fn,
-				      void *intrsrestore_fn,
-				      void *intrsenabled_fn, void *intr_arg);
-extern void ai_deregister_intr_callback(struct si_pub *sih);
 extern void ai_clkctl_init(struct si_pub *sih);
 extern u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih);
 extern bool ai_clkctl_cc(struct si_pub *sih, uint mode);
@@ -359,13 +212,6 @@
 /* SPROM availability */
 extern bool ai_is_sprom_available(struct si_pub *sih);
 
-/*
- * Build device path. Path size must be >= SI_DEVPATH_BUFSZ.
- * The returned path is NULL terminated and has trailing '/'.
- * Return 0 on success, nonzero otherwise.
- */
-extern int ai_devpath(struct si_pub *sih, char *path, int size);
-
 extern void ai_pci_sleep(struct si_pub *sih);
 extern void ai_pci_down(struct si_pub *sih);
 extern void ai_pci_up(struct si_pub *sih);
@@ -375,4 +221,52 @@
 /* Enable Ex-PA for 4313 */
 extern void ai_epa_4313war(struct si_pub *sih);
 
+extern uint ai_get_buscoretype(struct si_pub *sih);
+extern uint ai_get_buscorerev(struct si_pub *sih);
+
+static inline int ai_get_ccrev(struct si_pub *sih)
+{
+	return sih->ccrev;
+}
+
+static inline u32 ai_get_cccaps(struct si_pub *sih)
+{
+	return sih->cccaps;
+}
+
+static inline int ai_get_pmurev(struct si_pub *sih)
+{
+	return sih->pmurev;
+}
+
+static inline u32 ai_get_pmucaps(struct si_pub *sih)
+{
+	return sih->pmucaps;
+}
+
+static inline uint ai_get_boardtype(struct si_pub *sih)
+{
+	return sih->boardtype;
+}
+
+static inline uint ai_get_boardvendor(struct si_pub *sih)
+{
+	return sih->boardvendor;
+}
+
+static inline uint ai_get_chip_id(struct si_pub *sih)
+{
+	return sih->chip;
+}
+
+static inline uint ai_get_chiprev(struct si_pub *sih)
+{
+	return sih->chiprev;
+}
+
+static inline uint ai_get_chippkg(struct si_pub *sih)
+{
+	return sih->chippkg;
+}
+
 #endif				/* _BRCM_AIUTILS_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
index 7f27dbd..90911ee 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
@@ -649,7 +649,7 @@
 		len = roundup(len, 4);
 		ampdu_len += (len + (ndelim + 1) * AMPDU_DELIMITER_LEN);
 
-		dma_len += (u16) brcmu_pkttotlen(p);
+		dma_len += (u16) p->len;
 
 		BCMMSG(wlc->wiphy, "wl%d: ampdu_len %d"
 			" seg_cnt %d null delim %d\n",
@@ -741,9 +741,7 @@
 		if (p) {
 			if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) &&
 			    ((u8) (p->priority) == tid)) {
-
-				plen = brcmu_pkttotlen(p) +
-				       AMPDU_MAX_MPDU_OVERHEAD;
+				plen = p->len + AMPDU_MAX_MPDU_OVERHEAD;
 				plen = max(scb_ampdu->min_len, plen);
 
 				if ((plen + ampdu_len) > max_ampdu_bytes) {
@@ -1120,14 +1118,17 @@
 		u8 status_delay = 0;
 
 		/* wait till the next 8 bytes of txstatus is available */
-		while (((s1 = R_REG(&wlc->regs->frmtxstatus)) & TXS_V) == 0) {
+		s1 = bcma_read32(wlc->hw->d11core, D11REGOFFS(frmtxstatus));
+		while ((s1 & TXS_V) == 0) {
 			udelay(1);
 			status_delay++;
 			if (status_delay > 10)
 				return; /* error condition */
+			s1 = bcma_read32(wlc->hw->d11core,
+					 D11REGOFFS(frmtxstatus));
 		}
 
-		s2 = R_REG(&wlc->regs->frmtxstatus2);
+		s2 = bcma_read32(wlc->hw->d11core, D11REGOFFS(frmtxstatus2));
 	}
 
 	if (scb) {
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/channel.c b/drivers/net/wireless/brcm80211/brcmsmac/channel.c
index 89ad1b7..55e9f45 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/channel.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/channel.c
@@ -1153,121 +1153,6 @@
 			      &txpwr);
 }
 
-#ifdef POWER_DBG
-static void wlc_phy_txpower_limits_dump(struct txpwr_limits *txpwr)
-{
-	int i;
-	char buf[80];
-	char fraction[4][4] = { "   ", ".25", ".5 ", ".75" };
-
-	sprintf(buf, "CCK                ");
-	for (i = 0; i < BRCMS_NUM_RATES_CCK; i++)
-		sprintf(buf[strlen(buf)], " %2d%s",
-			txpwr->cck[i] / BRCMS_TXPWR_DB_FACTOR,
-			fraction[txpwr->cck[i] % BRCMS_TXPWR_DB_FACTOR]);
-	printk(KERN_DEBUG "%s\n", buf);
-
-	sprintf(buf, "20 MHz OFDM SISO   ");
-	for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++)
-		sprintf(buf[strlen(buf)], " %2d%s",
-			txpwr->ofdm[i] / BRCMS_TXPWR_DB_FACTOR,
-			fraction[txpwr->ofdm[i] % BRCMS_TXPWR_DB_FACTOR]);
-	printk(KERN_DEBUG "%s\n", buf);
-
-	sprintf(buf, "20 MHz OFDM CDD    ");
-	for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++)
-		sprintf(buf[strlen(buf)], " %2d%s",
-			txpwr->ofdm_cdd[i] / BRCMS_TXPWR_DB_FACTOR,
-			fraction[txpwr->ofdm_cdd[i] % BRCMS_TXPWR_DB_FACTOR]);
-	printk(KERN_DEBUG "%s\n", buf);
-
-	sprintf(buf, "40 MHz OFDM SISO   ");
-	for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++)
-		sprintf(buf[strlen(buf)], " %2d%s",
-			txpwr->ofdm_40_siso[i] / BRCMS_TXPWR_DB_FACTOR,
-			fraction[txpwr->ofdm_40_siso[i] %
-							BRCMS_TXPWR_DB_FACTOR]);
-	printk(KERN_DEBUG "%s\n", buf);
-
-	sprintf(buf, "40 MHz OFDM CDD    ");
-	for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++)
-		sprintf(buf[strlen(buf)], " %2d%s",
-			txpwr->ofdm_40_cdd[i] / BRCMS_TXPWR_DB_FACTOR,
-			fraction[txpwr->ofdm_40_cdd[i] %
-							BRCMS_TXPWR_DB_FACTOR]);
-	printk(KERN_DEBUG "%s\n", buf);
-
-	sprintf(buf, "20 MHz MCS0-7 SISO ");
-	for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++)
-		sprintf(buf[strlen(buf)], " %2d%s",
-			txpwr->mcs_20_siso[i] / BRCMS_TXPWR_DB_FACTOR,
-			fraction[txpwr->mcs_20_siso[i] %
-							BRCMS_TXPWR_DB_FACTOR]);
-	printk(KERN_DEBUG "%s\n", buf);
-
-	sprintf(buf, "20 MHz MCS0-7 CDD  ");
-	for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++)
-		sprintf(buf[strlen(buf)], " %2d%s",
-			txpwr->mcs_20_cdd[i] / BRCMS_TXPWR_DB_FACTOR,
-			fraction[txpwr->mcs_20_cdd[i] %
-							BRCMS_TXPWR_DB_FACTOR]);
-	printk(KERN_DEBUG "%s\n", buf);
-
-	sprintf(buf, "20 MHz MCS0-7 STBC ");
-	for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++)
-		sprintf(buf[strlen(buf)], " %2d%s",
-			txpwr->mcs_20_stbc[i] / BRCMS_TXPWR_DB_FACTOR,
-			fraction[txpwr->mcs_20_stbc[i] %
-							BRCMS_TXPWR_DB_FACTOR]);
-	printk(KERN_DEBUG "%s\n", buf);
-
-	sprintf(buf, "20 MHz MCS8-15 SDM ");
-	for (i = 0; i < BRCMS_NUM_RATES_MCS_2_STREAM; i++)
-		sprintf(buf[strlen(buf)], " %2d%s",
-			txpwr->mcs_20_mimo[i] / BRCMS_TXPWR_DB_FACTOR,
-			fraction[txpwr->mcs_20_mimo[i] %
-							BRCMS_TXPWR_DB_FACTOR]);
-	printk(KERN_DEBUG "%s\n", buf);
-
-	sprintf(buf, "40 MHz MCS0-7 SISO ");
-	for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++)
-		sprintf(buf[strlen(buf)], " %2d%s",
-			txpwr->mcs_40_siso[i] / BRCMS_TXPWR_DB_FACTOR,
-			fraction[txpwr->mcs_40_siso[i] %
-							BRCMS_TXPWR_DB_FACTOR]);
-	printk(KERN_DEBUG "%s\n", buf);
-
-	sprintf(buf, "40 MHz MCS0-7 CDD  ");
-	for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++)
-		sprintf(buf[strlen(buf)], " %2d%s",
-			txpwr->mcs_40_cdd[i] / BRCMS_TXPWR_DB_FACTOR,
-			fraction[txpwr->mcs_40_cdd[i] %
-							BRCMS_TXPWR_DB_FACTOR]);
-	printk(KERN_DEBUG "%s\n", buf);
-
-	sprintf(buf, "40 MHz MCS0-7 STBC ");
-	for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++)
-		sprintf(buf[strlen(buf)], " %2d%s",
-			txpwr->mcs_40_stbc[i] / BRCMS_TXPWR_DB_FACTOR,
-			fraction[txpwr->mcs_40_stbc[i] %
-							BRCMS_TXPWR_DB_FACTOR]);
-	printk(KERN_DEBUG "%s\n", buf);
-
-	sprintf(buf, "40 MHz MCS8-15 SDM ");
-	for (i = 0; i < BRCMS_NUM_RATES_MCS_2_STREAM; i++)
-		sprintf(buf[strlen(buf)], " %2d%s",
-			txpwr->mcs_40_mimo[i] / BRCMS_TXPWR_DB_FACTOR,
-			fraction[txpwr->mcs_40_mimo[i] %
-							BRCMS_TXPWR_DB_FACTOR]);
-	}
-	printk(KERN_DEBUG "%s\n", buf);
-
-	printk(KERN_DEBUG "MCS32               %2d%s\n",
-	       txpwr->mcs32 / BRCMS_TXPWR_DB_FACTOR,
-	       fraction[txpwr->mcs32 % BRCMS_TXPWR_DB_FACTOR]);
-}
-#endif				/* POWER_DBG */
-
 void
 brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm, u16 chanspec,
 		       struct txpwr_limits *txpwr)
@@ -1478,9 +1363,6 @@
 			txpwr->mcs_40_stbc[i] = txpwr->mcs_40_cdd[i];
 	}
 
-#ifdef POWER_DBG
-	wlc_phy_txpower_limits_dump(txpwr);
-#endif
 	return;
 }
 
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/d11.h b/drivers/net/wireless/brcm80211/brcmsmac/d11.h
index ed51616..1948cb2 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/d11.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/d11.h
@@ -430,6 +430,9 @@
 	u16 PAD[0x380];	/* 0x800 - 0xEFE */
 };
 
+/* d11 register field offset */
+#define D11REGOFFS(field)	offsetof(struct d11regs, field)
+
 #define	PIHR_BASE	0x0400	/* byte address of packed IHR region */
 
 /* biststatus */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/dma.c b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
index 6ebec8f..2e90a9a 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/dma.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
@@ -13,8 +13,10 @@
  * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
  * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/slab.h>
-#include <linux/skbuff.h>
 #include <linux/delay.h>
 #include <linux/pci.h>
 
@@ -22,6 +24,14 @@
 #include <aiutils.h>
 #include "types.h"
 #include "dma.h"
+#include "soc.h"
+
+/*
+ * dma register field offset calculation
+ */
+#define DMA64REGOFFS(field)		offsetof(struct dma64regs, field)
+#define DMA64TXREGOFFS(di, field)	(di->d64txregbase + DMA64REGOFFS(field))
+#define DMA64RXREGOFFS(di, field)	(di->d64rxregbase + DMA64REGOFFS(field))
 
 /*
  * DMA hardware requires each descriptor ring to be 8kB aligned, and fit within
@@ -168,26 +178,25 @@
 
 /* debug/trace */
 #ifdef BCMDBG
-#define	DMA_ERROR(args) \
-	do { \
-		if (!(*di->msg_level & 1)) \
-			; \
-		else \
-			printk args; \
-	} while (0)
-#define	DMA_TRACE(args) \
-	do { \
-		if (!(*di->msg_level & 2)) \
-			; \
-		else \
-			printk args; \
-	} while (0)
+#define	DMA_ERROR(fmt, ...)					\
+do {								\
+	if (*di->msg_level & 1)					\
+		pr_debug("%s: " fmt, __func__, ##__VA_ARGS__);	\
+} while (0)
+#define	DMA_TRACE(fmt, ...)					\
+do {								\
+	if (*di->msg_level & 2)					\
+		pr_debug("%s: " fmt, __func__, ##__VA_ARGS__);	\
+} while (0)
 #else
-#define	DMA_ERROR(args)
-#define	DMA_TRACE(args)
+#define	DMA_ERROR(fmt, ...)			\
+	no_printk(fmt, ##__VA_ARGS__)
+#define	DMA_TRACE(fmt, ...)			\
+	no_printk(fmt, ##__VA_ARGS__)
 #endif				/* BCMDBG */
 
-#define	DMA_NONE(args)
+#define	DMA_NONE(fmt, ...)			\
+	no_printk(fmt, ##__VA_ARGS__)
 
 #define	MAXNAMEL	8	/* 8 char names */
 
@@ -218,15 +227,16 @@
 	uint *msg_level;	/* message level pointer */
 	char name[MAXNAMEL];	/* callers name for diag msgs */
 
-	struct pci_dev *pbus;		/* bus handle */
+	struct bcma_device *core;
+	struct device *dmadev;
 
 	bool dma64;	/* this dma engine is operating in 64-bit mode */
 	bool addrext;	/* this dma engine supports DmaExtendedAddrChanges */
 
 	/* 64-bit dma tx engine registers */
-	struct dma64regs __iomem *d64txregs;
+	uint d64txregbase;
 	/* 64-bit dma rx engine registers */
-	struct dma64regs __iomem *d64rxregs;
+	uint d64rxregbase;
 	/* pointer to dma64 tx descriptor ring */
 	struct dma64desc *txd64;
 	/* pointer to dma64 rx descriptor ring */
@@ -361,7 +371,7 @@
 	uint dmactrlflags;
 
 	if (di == NULL) {
-		DMA_ERROR(("_dma_ctrlflags: NULL dma handle\n"));
+		DMA_ERROR("NULL dma handle\n");
 		return 0;
 	}
 
@@ -373,15 +383,16 @@
 	if (dmactrlflags & DMA_CTRL_PEN) {
 		u32 control;
 
-		control = R_REG(&di->d64txregs->control);
-		W_REG(&di->d64txregs->control,
+		control = bcma_read32(di->core, DMA64TXREGOFFS(di, control));
+		bcma_write32(di->core, DMA64TXREGOFFS(di, control),
 		      control | D64_XC_PD);
-		if (R_REG(&di->d64txregs->control) & D64_XC_PD)
+		if (bcma_read32(di->core, DMA64TXREGOFFS(di, control)) &
+		    D64_XC_PD)
 			/* We *can* disable it so it is supported,
 			 * restore control register
 			 */
-			W_REG(&di->d64txregs->control,
-			control);
+			bcma_write32(di->core, DMA64TXREGOFFS(di, control),
+				     control);
 		else
 			/* Not supported, don't allow it to be enabled */
 			dmactrlflags &= ~DMA_CTRL_PEN;
@@ -392,12 +403,12 @@
 	return dmactrlflags;
 }
 
-static bool _dma64_addrext(struct dma64regs __iomem *dma64regs)
+static bool _dma64_addrext(struct dma_info *di, uint ctrl_offset)
 {
 	u32 w;
-	OR_REG(&dma64regs->control, D64_XC_AE);
-	w = R_REG(&dma64regs->control);
-	AND_REG(&dma64regs->control, ~D64_XC_AE);
+	bcma_set32(di->core, ctrl_offset, D64_XC_AE);
+	w = bcma_read32(di->core, ctrl_offset);
+	bcma_mask32(di->core, ctrl_offset, ~D64_XC_AE);
 	return (w & D64_XC_AE) == D64_XC_AE;
 }
 
@@ -410,15 +421,15 @@
 	/* DMA64 supports full 32- or 64-bit operation. AE is always valid */
 
 	/* not all tx or rx channel are available */
-	if (di->d64txregs != NULL) {
-		if (!_dma64_addrext(di->d64txregs))
-			DMA_ERROR(("%s: _dma_isaddrext: DMA64 tx doesn't have "
-				   "AE set\n", di->name));
+	if (di->d64txregbase != 0) {
+		if (!_dma64_addrext(di, DMA64TXREGOFFS(di, control)))
+			DMA_ERROR("%s: DMA64 tx doesn't have AE set\n",
+				  di->name);
 		return true;
-	} else if (di->d64rxregs != NULL) {
-		if (!_dma64_addrext(di->d64rxregs))
-			DMA_ERROR(("%s: _dma_isaddrext: DMA64 rx doesn't have "
-				   "AE set\n", di->name));
+	} else if (di->d64rxregbase != 0) {
+		if (!_dma64_addrext(di, DMA64RXREGOFFS(di, control)))
+			DMA_ERROR("%s: DMA64 rx doesn't have AE set\n",
+				  di->name);
 		return true;
 	}
 
@@ -430,14 +441,14 @@
 	u32 addrl;
 
 	/* Check to see if the descriptors need to be aligned on 4K/8K or not */
-	if (di->d64txregs != NULL) {
-		W_REG(&di->d64txregs->addrlow, 0xff0);
-		addrl = R_REG(&di->d64txregs->addrlow);
+	if (di->d64txregbase != 0) {
+		bcma_write32(di->core, DMA64TXREGOFFS(di, addrlow), 0xff0);
+		addrl = bcma_read32(di->core, DMA64TXREGOFFS(di, addrlow));
 		if (addrl != 0)
 			return false;
-	} else if (di->d64rxregs != NULL) {
-		W_REG(&di->d64rxregs->addrlow, 0xff0);
-		addrl = R_REG(&di->d64rxregs->addrlow);
+	} else if (di->d64rxregbase != 0) {
+		bcma_write32(di->core, DMA64RXREGOFFS(di, addrlow), 0xff0);
+		addrl = bcma_read32(di->core, DMA64RXREGOFFS(di, addrlow));
 		if (addrl != 0)
 			return false;
 	}
@@ -448,7 +459,7 @@
  * Descriptor table must start at the DMA hardware dictated alignment, so
  * allocated memory must be large enough to support this requirement.
  */
-static void *dma_alloc_consistent(struct pci_dev *pdev, uint size,
+static void *dma_alloc_consistent(struct dma_info *di, uint size,
 				  u16 align_bits, uint *alloced,
 				  dma_addr_t *pap)
 {
@@ -458,7 +469,7 @@
 			size += align;
 		*alloced = size;
 	}
-	return pci_alloc_consistent(pdev, size, pap);
+	return dma_alloc_coherent(di->dmadev, size, pap, GFP_ATOMIC);
 }
 
 static
@@ -484,7 +495,7 @@
 	u32 desc_strtaddr;
 	u32 alignbytes = 1 << *alignbits;
 
-	va = dma_alloc_consistent(di->pbus, size, *alignbits, alloced, descpa);
+	va = dma_alloc_consistent(di, size, *alignbits, alloced, descpa);
 
 	if (NULL == va)
 		return NULL;
@@ -493,8 +504,8 @@
 	if (((desc_strtaddr + size - 1) & boundary) != (desc_strtaddr
 							& boundary)) {
 		*alignbits = dma_align_sizetobits(size);
-		pci_free_consistent(di->pbus, size, va, *descpa);
-		va = dma_alloc_consistent(di->pbus, size, *alignbits,
+		dma_free_coherent(di->dmadev, size, va, *descpa);
+		va = dma_alloc_consistent(di, size, *alignbits,
 			alloced, descpa);
 	}
 	return va;
@@ -519,8 +530,8 @@
 		va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits,
 			&alloced, &di->txdpaorig);
 		if (va == NULL) {
-			DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(ntxd)"
-				   " failed\n", di->name));
+			DMA_ERROR("%s: DMA_ALLOC_CONSISTENT(ntxd) failed\n",
+				  di->name);
 			return false;
 		}
 		align = (1 << align_bits);
@@ -533,8 +544,8 @@
 		va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits,
 			&alloced, &di->rxdpaorig);
 		if (va == NULL) {
-			DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(nrxd)"
-				   " failed\n", di->name));
+			DMA_ERROR("%s: DMA_ALLOC_CONSISTENT(nrxd) failed\n",
+				  di->name);
 			return false;
 		}
 		align = (1 << align_bits);
@@ -554,12 +565,13 @@
 }
 
 struct dma_pub *dma_attach(char *name, struct si_pub *sih,
-		     void __iomem *dmaregstx, void __iomem *dmaregsrx,
-		     uint ntxd, uint nrxd,
-		     uint rxbufsize, int rxextheadroom,
-		     uint nrxpost, uint rxoffset, uint *msg_level)
+			   struct bcma_device *core,
+			   uint txregbase, uint rxregbase, uint ntxd, uint nrxd,
+			   uint rxbufsize, int rxextheadroom,
+			   uint nrxpost, uint rxoffset, uint *msg_level)
 {
 	struct dma_info *di;
+	u8 rev = core->id.rev;
 	uint size;
 
 	/* allocate private info structure */
@@ -570,11 +582,13 @@
 	di->msg_level = msg_level ? msg_level : &dma_msg_level;
 
 
-	di->dma64 = ((ai_core_sflags(sih, 0, 0) & SISF_DMA64) == SISF_DMA64);
+	di->dma64 =
+		((bcma_aread32(core, BCMA_IOST) & SISF_DMA64) == SISF_DMA64);
 
-	/* init dma reg pointer */
-	di->d64txregs = (struct dma64regs __iomem *) dmaregstx;
-	di->d64rxregs = (struct dma64regs __iomem *) dmaregsrx;
+	/* init dma reg info */
+	di->core = core;
+	di->d64txregbase = txregbase;
+	di->d64rxregbase = rxregbase;
 
 	/*
 	 * Default flags (which can be changed by the driver calling
@@ -583,17 +597,17 @@
 	 */
 	_dma_ctrlflags(di, DMA_CTRL_ROC | DMA_CTRL_PEN, 0);
 
-	DMA_TRACE(("%s: dma_attach: %s flags 0x%x ntxd %d nrxd %d "
-		   "rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d "
-		   "dmaregstx %p dmaregsrx %p\n", name, "DMA64",
-		   di->dma.dmactrlflags, ntxd, nrxd, rxbufsize,
-		   rxextheadroom, nrxpost, rxoffset, dmaregstx, dmaregsrx));
+	DMA_TRACE("%s: %s flags 0x%x ntxd %d nrxd %d "
+		  "rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d "
+		  "txregbase %u rxregbase %u\n", name, "DMA64",
+		  di->dma.dmactrlflags, ntxd, nrxd, rxbufsize,
+		  rxextheadroom, nrxpost, rxoffset, txregbase, rxregbase);
 
 	/* make a private copy of our callers name */
 	strncpy(di->name, name, MAXNAMEL);
 	di->name[MAXNAMEL - 1] = '\0';
 
-	di->pbus = ((struct si_info *)sih)->pbus;
+	di->dmadev = core->dma_dev;
 
 	/* save tunables */
 	di->ntxd = (u16) ntxd;
@@ -625,12 +639,12 @@
 	di->dataoffsetlow = di->ddoffsetlow;
 	di->dataoffsethigh = di->ddoffsethigh;
 	/* WAR64450 : DMACtl.Addr ext fields are not supported in SDIOD core. */
-	if ((ai_coreid(sih) == SDIOD_CORE_ID)
-	    && ((ai_corerev(sih) > 0) && (ai_corerev(sih) <= 2)))
-		di->addrext = 0;
-	else if ((ai_coreid(sih) == I2S_CORE_ID) &&
-		 ((ai_corerev(sih) == 0) || (ai_corerev(sih) == 1)))
-		di->addrext = 0;
+	if ((core->id.id == SDIOD_CORE_ID)
+	    && ((rev > 0) && (rev <= 2)))
+		di->addrext = false;
+	else if ((core->id.id == I2S_CORE_ID) &&
+		 ((rev == 0) || (rev == 1)))
+		di->addrext = false;
 	else
 		di->addrext = _dma_isaddrext(di);
 
@@ -645,8 +659,8 @@
 		di->dmadesc_align = 4;	/* 16 byte alignment */
 	}
 
-	DMA_NONE(("DMA descriptor align_needed %d, align %d\n",
-		  di->aligndesc_4k, di->dmadesc_align));
+	DMA_NONE("DMA descriptor align_needed %d, align %d\n",
+		 di->aligndesc_4k, di->dmadesc_align);
 
 	/* allocate tx packet pointer vector */
 	if (ntxd) {
@@ -684,21 +698,21 @@
 
 	if ((di->ddoffsetlow != 0) && !di->addrext) {
 		if (di->txdpa > SI_PCI_DMA_SZ) {
-			DMA_ERROR(("%s: dma_attach: txdpa 0x%x: addrext not "
-				   "supported\n", di->name, (u32)di->txdpa));
+			DMA_ERROR("%s: txdpa 0x%x: addrext not supported\n",
+				  di->name, (u32)di->txdpa);
 			goto fail;
 		}
 		if (di->rxdpa > SI_PCI_DMA_SZ) {
-			DMA_ERROR(("%s: dma_attach: rxdpa 0x%x: addrext not "
-				   "supported\n", di->name, (u32)di->rxdpa));
+			DMA_ERROR("%s: rxdpa 0x%x: addrext not supported\n",
+				  di->name, (u32)di->rxdpa);
 			goto fail;
 		}
 	}
 
-	DMA_TRACE(("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x "
-		   "dataoffsethigh " "0x%x addrext %d\n", di->ddoffsetlow,
-		   di->ddoffsethigh, di->dataoffsetlow, di->dataoffsethigh,
-		   di->addrext));
+	DMA_TRACE("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh 0x%x addrext %d\n",
+		  di->ddoffsetlow, di->ddoffsethigh,
+		  di->dataoffsetlow, di->dataoffsethigh,
+		  di->addrext);
 
 	return (struct dma_pub *) di;
 
@@ -744,17 +758,17 @@
 {
 	struct dma_info *di = (struct dma_info *)pub;
 
-	DMA_TRACE(("%s: dma_detach\n", di->name));
+	DMA_TRACE("%s:\n", di->name);
 
 	/* free dma descriptor rings */
 	if (di->txd64)
-		pci_free_consistent(di->pbus, di->txdalloc,
-				    ((s8 *)di->txd64 - di->txdalign),
-				    (di->txdpaorig));
+		dma_free_coherent(di->dmadev, di->txdalloc,
+				  ((s8 *)di->txd64 - di->txdalign),
+				  (di->txdpaorig));
 	if (di->rxd64)
-		pci_free_consistent(di->pbus, di->rxdalloc,
-				    ((s8 *)di->rxd64 - di->rxdalign),
-				    (di->rxdpaorig));
+		dma_free_coherent(di->dmadev, di->rxdalloc,
+				  ((s8 *)di->rxd64 - di->rxdalign),
+				  (di->rxdpaorig));
 
 	/* free packet pointer vectors */
 	kfree(di->txp);
@@ -779,11 +793,15 @@
 	if ((di->ddoffsetlow == 0)
 	    || !(pa & PCI32ADDR_HIGH)) {
 		if (direction == DMA_TX) {
-			W_REG(&di->d64txregs->addrlow, pa + di->ddoffsetlow);
-			W_REG(&di->d64txregs->addrhigh, di->ddoffsethigh);
+			bcma_write32(di->core, DMA64TXREGOFFS(di, addrlow),
+				     pa + di->ddoffsetlow);
+			bcma_write32(di->core, DMA64TXREGOFFS(di, addrhigh),
+				     di->ddoffsethigh);
 		} else {
-			W_REG(&di->d64rxregs->addrlow, pa + di->ddoffsetlow);
-			W_REG(&di->d64rxregs->addrhigh, di->ddoffsethigh);
+			bcma_write32(di->core, DMA64RXREGOFFS(di, addrlow),
+				     pa + di->ddoffsetlow);
+			bcma_write32(di->core, DMA64RXREGOFFS(di, addrhigh),
+				     di->ddoffsethigh);
 		}
 	} else {
 		/* DMA64 32bits address extension */
@@ -794,15 +812,19 @@
 		pa &= ~PCI32ADDR_HIGH;
 
 		if (direction == DMA_TX) {
-			W_REG(&di->d64txregs->addrlow, pa + di->ddoffsetlow);
-			W_REG(&di->d64txregs->addrhigh, di->ddoffsethigh);
-			SET_REG(&di->d64txregs->control,
-				D64_XC_AE, (ae << D64_XC_AE_SHIFT));
+			bcma_write32(di->core, DMA64TXREGOFFS(di, addrlow),
+				     pa + di->ddoffsetlow);
+			bcma_write32(di->core, DMA64TXREGOFFS(di, addrhigh),
+				     di->ddoffsethigh);
+			bcma_maskset32(di->core, DMA64TXREGOFFS(di, control),
+				       D64_XC_AE, (ae << D64_XC_AE_SHIFT));
 		} else {
-			W_REG(&di->d64rxregs->addrlow, pa + di->ddoffsetlow);
-			W_REG(&di->d64rxregs->addrhigh, di->ddoffsethigh);
-			SET_REG(&di->d64rxregs->control,
-				D64_RC_AE, (ae << D64_RC_AE_SHIFT));
+			bcma_write32(di->core, DMA64RXREGOFFS(di, addrlow),
+				     pa + di->ddoffsetlow);
+			bcma_write32(di->core, DMA64RXREGOFFS(di, addrhigh),
+				     di->ddoffsethigh);
+			bcma_maskset32(di->core, DMA64RXREGOFFS(di, control),
+				       D64_RC_AE, (ae << D64_RC_AE_SHIFT));
 		}
 	}
 }
@@ -812,11 +834,11 @@
 	uint dmactrlflags = di->dma.dmactrlflags;
 	u32 control;
 
-	DMA_TRACE(("%s: dma_rxenable\n", di->name));
+	DMA_TRACE("%s:\n", di->name);
 
-	control =
-	    (R_REG(&di->d64rxregs->control) & D64_RC_AE) |
-	    D64_RC_RE;
+	control = D64_RC_RE | (bcma_read32(di->core,
+					   DMA64RXREGOFFS(di, control)) &
+			       D64_RC_AE);
 
 	if ((dmactrlflags & DMA_CTRL_PEN) == 0)
 		control |= D64_RC_PD;
@@ -824,7 +846,7 @@
 	if (dmactrlflags & DMA_CTRL_ROC)
 		control |= D64_RC_OC;
 
-	W_REG(&di->d64rxregs->control,
+	bcma_write32(di->core, DMA64RXREGOFFS(di, control),
 		((di->rxoffset << D64_RC_RO_SHIFT) | control));
 }
 
@@ -832,7 +854,7 @@
 {
 	struct dma_info *di = (struct dma_info *)pub;
 
-	DMA_TRACE(("%s: dma_rxinit\n", di->name));
+	DMA_TRACE("%s:\n", di->name);
 
 	if (di->nrxd == 0)
 		return;
@@ -867,7 +889,8 @@
 		return NULL;
 
 	curr =
-	    B2I(((R_REG(&di->d64rxregs->status0) & D64_RS0_CD_MASK) -
+	    B2I(((bcma_read32(di->core,
+			      DMA64RXREGOFFS(di, status0)) & D64_RS0_CD_MASK) -
 		 di->rcvptrbase) & D64_RS0_CD_MASK, struct dma64desc);
 
 	/* ignore curr if forceall */
@@ -881,7 +904,7 @@
 	pa = le32_to_cpu(di->rxd64[i].addrlow) - di->dataoffsetlow;
 
 	/* clear this packet from the descriptor ring */
-	pci_unmap_single(di->pbus, pa, di->rxbufsize, PCI_DMA_FROMDEVICE);
+	dma_unmap_single(di->dmadev, pa, di->rxbufsize, DMA_FROM_DEVICE);
 
 	di->rxd64[i].addrlow = cpu_to_le32(0xdeadbeef);
 	di->rxd64[i].addrhigh = cpu_to_le32(0xdeadbeef);
@@ -901,7 +924,7 @@
 
 /*
  * !! rx entry routine
- * returns a pointer to the next frame received, or NULL if there are no more
+ * returns the number packages in the next frame, or 0 if there are no more
  *   if DMA_CTRL_RXMULTI is defined, DMA scattering(multiple buffers) is
  *   supported with pkts chain
  *   otherwise, it's treated as giant pkt and will be tossed.
@@ -909,74 +932,83 @@
  *   buffer data. After it reaches the max size of buffer, the data continues
  *   in next DMA descriptor buffer WITHOUT DMA header
  */
-struct sk_buff *dma_rx(struct dma_pub *pub)
+int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list)
 {
 	struct dma_info *di = (struct dma_info *)pub;
-	struct sk_buff *p, *head, *tail;
+	struct sk_buff_head dma_frames;
+	struct sk_buff *p, *next;
 	uint len;
 	uint pkt_len;
 	int resid = 0;
+	int pktcnt = 1;
 
+	skb_queue_head_init(&dma_frames);
  next_frame:
-	head = _dma_getnextrxp(di, false);
-	if (head == NULL)
-		return NULL;
+	p = _dma_getnextrxp(di, false);
+	if (p == NULL)
+		return 0;
 
-	len = le16_to_cpu(*(__le16 *) (head->data));
-	DMA_TRACE(("%s: dma_rx len %d\n", di->name, len));
-	dma_spin_for_len(len, head);
+	len = le16_to_cpu(*(__le16 *) (p->data));
+	DMA_TRACE("%s: dma_rx len %d\n", di->name, len);
+	dma_spin_for_len(len, p);
 
 	/* set actual length */
 	pkt_len = min((di->rxoffset + len), di->rxbufsize);
-	__skb_trim(head, pkt_len);
+	__skb_trim(p, pkt_len);
+	skb_queue_tail(&dma_frames, p);
 	resid = len - (di->rxbufsize - di->rxoffset);
 
 	/* check for single or multi-buffer rx */
 	if (resid > 0) {
-		tail = head;
 		while ((resid > 0) && (p = _dma_getnextrxp(di, false))) {
-			tail->next = p;
 			pkt_len = min_t(uint, resid, di->rxbufsize);
 			__skb_trim(p, pkt_len);
-
-			tail = p;
+			skb_queue_tail(&dma_frames, p);
 			resid -= di->rxbufsize;
+			pktcnt++;
 		}
 
 #ifdef BCMDBG
 		if (resid > 0) {
 			uint cur;
 			cur =
-			    B2I(((R_REG(&di->d64rxregs->status0) &
-				  D64_RS0_CD_MASK) -
-				 di->rcvptrbase) & D64_RS0_CD_MASK,
-				struct dma64desc);
-			DMA_ERROR(("dma_rx, rxin %d rxout %d, hw_curr %d\n",
-				   di->rxin, di->rxout, cur));
+			    B2I(((bcma_read32(di->core,
+					      DMA64RXREGOFFS(di, status0)) &
+				  D64_RS0_CD_MASK) - di->rcvptrbase) &
+				D64_RS0_CD_MASK, struct dma64desc);
+			DMA_ERROR("rxin %d rxout %d, hw_curr %d\n",
+				   di->rxin, di->rxout, cur);
 		}
 #endif				/* BCMDBG */
 
 		if ((di->dma.dmactrlflags & DMA_CTRL_RXMULTI) == 0) {
-			DMA_ERROR(("%s: dma_rx: bad frame length (%d)\n",
-				   di->name, len));
-			brcmu_pkt_buf_free_skb(head);
+			DMA_ERROR("%s: bad frame length (%d)\n",
+				  di->name, len);
+			skb_queue_walk_safe(&dma_frames, p, next) {
+				skb_unlink(p, &dma_frames);
+				brcmu_pkt_buf_free_skb(p);
+			}
 			di->dma.rxgiants++;
+			pktcnt = 1;
 			goto next_frame;
 		}
 	}
 
-	return head;
+	skb_queue_splice_tail(&dma_frames, skb_list);
+	return pktcnt;
 }
 
 static bool dma64_rxidle(struct dma_info *di)
 {
-	DMA_TRACE(("%s: dma_rxidle\n", di->name));
+	DMA_TRACE("%s:\n", di->name);
 
 	if (di->nrxd == 0)
 		return true;
 
-	return ((R_REG(&di->d64rxregs->status0) & D64_RS0_CD_MASK) ==
-		(R_REG(&di->d64rxregs->ptr) & D64_RS0_CD_MASK));
+	return ((bcma_read32(di->core,
+			     DMA64RXREGOFFS(di, status0)) & D64_RS0_CD_MASK) ==
+		(bcma_read32(di->core, DMA64RXREGOFFS(di, ptr)) &
+		 D64_RS0_CD_MASK));
 }
 
 /*
@@ -1010,7 +1042,7 @@
 
 	n = di->nrxpost - nrxdactive(di, rxin, rxout);
 
-	DMA_TRACE(("%s: dma_rxfill: post %d\n", di->name, n));
+	DMA_TRACE("%s: post %d\n", di->name, n);
 
 	if (di->rxbufsize > BCMEXTRAHDROOM)
 		extra_offset = di->rxextrahdrroom;
@@ -1023,11 +1055,9 @@
 		p = brcmu_pkt_buf_get_skb(di->rxbufsize + extra_offset);
 
 		if (p == NULL) {
-			DMA_ERROR(("%s: dma_rxfill: out of rxbufs\n",
-				   di->name));
+			DMA_ERROR("%s: out of rxbufs\n", di->name);
 			if (i == 0 && dma64_rxidle(di)) {
-				DMA_ERROR(("%s: rxfill64: ring is empty !\n",
-					   di->name));
+				DMA_ERROR("%s: ring is empty !\n", di->name);
 				ring_empty = true;
 			}
 			di->dma.rxnobuf++;
@@ -1042,8 +1072,8 @@
 		 */
 		*(u32 *) (p->data) = 0;
 
-		pa = pci_map_single(di->pbus, p->data,
-			di->rxbufsize, PCI_DMA_FROMDEVICE);
+		pa = dma_map_single(di->dmadev, p->data, di->rxbufsize,
+				    DMA_FROM_DEVICE);
 
 		/* save the free packet pointer */
 		di->rxp[rxout] = p;
@@ -1061,7 +1091,7 @@
 	di->rxout = rxout;
 
 	/* update the chip lastdscr pointer */
-	W_REG(&di->d64rxregs->ptr,
+	bcma_write32(di->core, DMA64RXREGOFFS(di, ptr),
 	      di->rcvptrbase + I2B(rxout, struct dma64desc));
 
 	return ring_empty;
@@ -1072,7 +1102,7 @@
 	struct dma_info *di = (struct dma_info *)pub;
 	struct sk_buff *p;
 
-	DMA_TRACE(("%s: dma_rxreclaim\n", di->name));
+	DMA_TRACE("%s:\n", di->name);
 
 	while ((p = _dma_getnextrxp(di, true)))
 		brcmu_pkt_buf_free_skb(p);
@@ -1103,7 +1133,7 @@
 	struct dma_info *di = (struct dma_info *)pub;
 	u32 control = D64_XC_XE;
 
-	DMA_TRACE(("%s: dma_txinit\n", di->name));
+	DMA_TRACE("%s:\n", di->name);
 
 	if (di->ntxd == 0)
 		return;
@@ -1122,7 +1152,7 @@
 
 	if ((di->dma.dmactrlflags & DMA_CTRL_PEN) == 0)
 		control |= D64_XC_PD;
-	OR_REG(&di->d64txregs->control, control);
+	bcma_set32(di->core, DMA64TXREGOFFS(di, control), control);
 
 	/* DMA engine with alignment requirement requires table to be inited
 	 * before enabling the engine
@@ -1135,24 +1165,24 @@
 {
 	struct dma_info *di = (struct dma_info *)pub;
 
-	DMA_TRACE(("%s: dma_txsuspend\n", di->name));
+	DMA_TRACE("%s:\n", di->name);
 
 	if (di->ntxd == 0)
 		return;
 
-	OR_REG(&di->d64txregs->control, D64_XC_SE);
+	bcma_set32(di->core, DMA64TXREGOFFS(di, control), D64_XC_SE);
 }
 
 void dma_txresume(struct dma_pub *pub)
 {
 	struct dma_info *di = (struct dma_info *)pub;
 
-	DMA_TRACE(("%s: dma_txresume\n", di->name));
+	DMA_TRACE("%s:\n", di->name);
 
 	if (di->ntxd == 0)
 		return;
 
-	AND_REG(&di->d64txregs->control, ~D64_XC_SE);
+	bcma_mask32(di->core, DMA64TXREGOFFS(di, control), ~D64_XC_SE);
 }
 
 bool dma_txsuspended(struct dma_pub *pub)
@@ -1160,8 +1190,9 @@
 	struct dma_info *di = (struct dma_info *)pub;
 
 	return (di->ntxd == 0) ||
-	    ((R_REG(&di->d64txregs->control) & D64_XC_SE) ==
-	     D64_XC_SE);
+	       ((bcma_read32(di->core,
+			     DMA64TXREGOFFS(di, control)) & D64_XC_SE) ==
+		D64_XC_SE);
 }
 
 void dma_txreclaim(struct dma_pub *pub, enum txd_range range)
@@ -1169,11 +1200,11 @@
 	struct dma_info *di = (struct dma_info *)pub;
 	struct sk_buff *p;
 
-	DMA_TRACE(("%s: dma_txreclaim %s\n", di->name,
-		   (range == DMA_RANGE_ALL) ? "all" :
-		   ((range ==
-		     DMA_RANGE_TRANSMITTED) ? "transmitted" :
-		    "transferred")));
+	DMA_TRACE("%s: %s\n",
+		  di->name,
+		  range == DMA_RANGE_ALL ? "all" :
+		  range == DMA_RANGE_TRANSMITTED ? "transmitted" :
+		  "transferred");
 
 	if (di->txin == di->txout)
 		return;
@@ -1194,16 +1225,17 @@
 		return true;
 
 	/* suspend tx DMA first */
-	W_REG(&di->d64txregs->control, D64_XC_SE);
+	bcma_write32(di->core, DMA64TXREGOFFS(di, control), D64_XC_SE);
 	SPINWAIT(((status =
-		   (R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK))
-		  != D64_XS0_XS_DISABLED) && (status != D64_XS0_XS_IDLE)
-		 && (status != D64_XS0_XS_STOPPED), 10000);
+		   (bcma_read32(di->core, DMA64TXREGOFFS(di, status0)) &
+		    D64_XS0_XS_MASK)) != D64_XS0_XS_DISABLED) &&
+		  (status != D64_XS0_XS_IDLE) && (status != D64_XS0_XS_STOPPED),
+		 10000);
 
-	W_REG(&di->d64txregs->control, 0);
+	bcma_write32(di->core, DMA64TXREGOFFS(di, control), 0);
 	SPINWAIT(((status =
-		   (R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK))
-		  != D64_XS0_XS_DISABLED), 10000);
+		   (bcma_read32(di->core, DMA64TXREGOFFS(di, status0)) &
+		    D64_XS0_XS_MASK)) != D64_XS0_XS_DISABLED), 10000);
 
 	/* wait for the last transaction to complete */
 	udelay(300);
@@ -1219,10 +1251,10 @@
 	if (di->nrxd == 0)
 		return true;
 
-	W_REG(&di->d64rxregs->control, 0);
+	bcma_write32(di->core, DMA64RXREGOFFS(di, control), 0);
 	SPINWAIT(((status =
-		   (R_REG(&di->d64rxregs->status0) & D64_RS0_RS_MASK))
-		  != D64_RS0_RS_DISABLED), 10000);
+		   (bcma_read32(di->core, DMA64RXREGOFFS(di, status0)) &
+		    D64_RS0_RS_MASK)) != D64_RS0_RS_DISABLED), 10000);
 
 	return status == D64_RS0_RS_DISABLED;
 }
@@ -1233,72 +1265,58 @@
  *   the error(toss frames) could be fatal and cause many subsequent hard
  *   to debug problems
  */
-int dma_txfast(struct dma_pub *pub, struct sk_buff *p0, bool commit)
+int dma_txfast(struct dma_pub *pub, struct sk_buff *p, bool commit)
 {
 	struct dma_info *di = (struct dma_info *)pub;
-	struct sk_buff *p, *next;
 	unsigned char *data;
 	uint len;
 	u16 txout;
 	u32 flags = 0;
 	dma_addr_t pa;
 
-	DMA_TRACE(("%s: dma_txfast\n", di->name));
+	DMA_TRACE("%s:\n", di->name);
 
 	txout = di->txout;
 
 	/*
-	 * Walk the chain of packet buffers
-	 * allocating and initializing transmit descriptor entries.
+	 * obtain and initialize transmit descriptor entry.
 	 */
-	for (p = p0; p; p = next) {
-		data = p->data;
-		len = p->len;
-		next = p->next;
+	data = p->data;
+	len = p->len;
 
-		/* return nonzero if out of tx descriptors */
-		if (nexttxd(di, txout) == di->txin)
-			goto outoftxd;
+	/* no use to transmit a zero length packet */
+	if (len == 0)
+		return 0;
 
-		if (len == 0)
-			continue;
+	/* return nonzero if out of tx descriptors */
+	if (nexttxd(di, txout) == di->txin)
+		goto outoftxd;
 
-		/* get physical address of buffer start */
-		pa = pci_map_single(di->pbus, data, len, PCI_DMA_TODEVICE);
+	/* get physical address of buffer start */
+	pa = dma_map_single(di->dmadev, data, len, DMA_TO_DEVICE);
 
-		flags = 0;
-		if (p == p0)
-			flags |= D64_CTRL1_SOF;
+	/* With a DMA segment list, Descriptor table is filled
+	 * using the segment list instead of looping over
+	 * buffers in multi-chain DMA. Therefore, EOF for SGLIST
+	 * is when end of segment list is reached.
+	 */
+	flags = D64_CTRL1_SOF | D64_CTRL1_IOC | D64_CTRL1_EOF;
+	if (txout == (di->ntxd - 1))
+		flags |= D64_CTRL1_EOT;
 
-		/* With a DMA segment list, Descriptor table is filled
-		 * using the segment list instead of looping over
-		 * buffers in multi-chain DMA. Therefore, EOF for SGLIST
-		 * is when end of segment list is reached.
-		 */
-		if (next == NULL)
-			flags |= (D64_CTRL1_IOC | D64_CTRL1_EOF);
-		if (txout == (di->ntxd - 1))
-			flags |= D64_CTRL1_EOT;
+	dma64_dd_upd(di, di->txd64, pa, txout, &flags, len);
 
-		dma64_dd_upd(di, di->txd64, pa, txout, &flags, len);
-
-		txout = nexttxd(di, txout);
-	}
-
-	/* if last txd eof not set, fix it */
-	if (!(flags & D64_CTRL1_EOF))
-		di->txd64[prevtxd(di, txout)].ctrl1 =
-		     cpu_to_le32(flags | D64_CTRL1_IOC | D64_CTRL1_EOF);
+	txout = nexttxd(di, txout);
 
 	/* save the packet */
-	di->txp[prevtxd(di, txout)] = p0;
+	di->txp[prevtxd(di, txout)] = p;
 
 	/* bump the tx descriptor index */
 	di->txout = txout;
 
 	/* kick the chip */
 	if (commit)
-		W_REG(&di->d64txregs->ptr,
+		bcma_write32(di->core, DMA64TXREGOFFS(di, ptr),
 		      di->xmtptrbase + I2B(txout, struct dma64desc));
 
 	/* tx flow control */
@@ -1307,8 +1325,8 @@
 	return 0;
 
  outoftxd:
-	DMA_ERROR(("%s: dma_txfast: out of txds !!!\n", di->name));
-	brcmu_pkt_buf_free_skb(p0);
+	DMA_ERROR("%s: out of txds !!!\n", di->name);
+	brcmu_pkt_buf_free_skb(p);
 	di->dma.txavail = 0;
 	di->dma.txnobuf++;
 	return -1;
@@ -1331,11 +1349,11 @@
 	u16 active_desc;
 	struct sk_buff *txp;
 
-	DMA_TRACE(("%s: dma_getnexttxp %s\n", di->name,
-		   (range == DMA_RANGE_ALL) ? "all" :
-		   ((range ==
-		     DMA_RANGE_TRANSMITTED) ? "transmitted" :
-		    "transferred")));
+	DMA_TRACE("%s: %s\n",
+		  di->name,
+		  range == DMA_RANGE_ALL ? "all" :
+		  range == DMA_RANGE_TRANSMITTED ? "transmitted" :
+		  "transferred");
 
 	if (di->ntxd == 0)
 		return NULL;
@@ -1346,16 +1364,15 @@
 	if (range == DMA_RANGE_ALL)
 		end = di->txout;
 	else {
-		struct dma64regs __iomem *dregs = di->d64txregs;
-
-		end = (u16) (B2I(((R_REG(&dregs->status0) &
-				 D64_XS0_CD_MASK) -
-				 di->xmtptrbase) & D64_XS0_CD_MASK,
-				 struct dma64desc));
+		end = (u16) (B2I(((bcma_read32(di->core,
+					       DMA64TXREGOFFS(di, status0)) &
+				   D64_XS0_CD_MASK) - di->xmtptrbase) &
+				 D64_XS0_CD_MASK, struct dma64desc));
 
 		if (range == DMA_RANGE_TRANSFERED) {
 			active_desc =
-			    (u16) (R_REG(&dregs->status1) &
+				(u16)(bcma_read32(di->core,
+						  DMA64TXREGOFFS(di, status1)) &
 				      D64_XS1_AD_MASK);
 			active_desc =
 			    (active_desc - di->xmtptrbase) & D64_XS0_CD_MASK;
@@ -1384,7 +1401,7 @@
 		txp = di->txp[i];
 		di->txp[i] = NULL;
 
-		pci_unmap_single(di->pbus, pa, size, PCI_DMA_TODEVICE);
+		dma_unmap_single(di->dmadev, pa, size, DMA_TO_DEVICE);
 	}
 
 	di->txin = i;
@@ -1395,8 +1412,8 @@
 	return txp;
 
  bogus:
-	DMA_NONE(("dma_getnexttxp: bogus curr: start %d end %d txout %d "
-		  "force %d\n", start, end, di->txout, forceall));
+	DMA_NONE("bogus curr: start %d end %d txout %d\n",
+		 start, end, di->txout);
 	return NULL;
 }
 
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/dma.h b/drivers/net/wireless/brcm80211/brcmsmac/dma.h
index ebc5bc5..cc269ee 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/dma.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/dma.h
@@ -18,6 +18,7 @@
 #define	_BRCM_DMA_H_
 
 #include <linux/delay.h>
+#include <linux/skbuff.h>
 #include "types.h"		/* forward structure declarations */
 
 /* map/unmap direction */
@@ -74,13 +75,14 @@
 };
 
 extern struct dma_pub *dma_attach(char *name, struct si_pub *sih,
-			    void __iomem *dmaregstx, void __iomem *dmaregsrx,
-			    uint ntxd, uint nrxd,
-			    uint rxbufsize, int rxextheadroom,
-			    uint nrxpost, uint rxoffset, uint *msg_level);
+				  struct bcma_device *d11core,
+				  uint txregbase, uint rxregbase,
+				  uint ntxd, uint nrxd,
+				  uint rxbufsize, int rxextheadroom,
+				  uint nrxpost, uint rxoffset, uint *msg_level);
 
 void dma_rxinit(struct dma_pub *pub);
-struct sk_buff *dma_rx(struct dma_pub *pub);
+int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list);
 bool dma_rxfill(struct dma_pub *pub);
 bool dma_rxreset(struct dma_pub *pub);
 bool dma_txreset(struct dma_pub *pub);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index 0d8a9cd..d106576 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -17,11 +17,11 @@
 #define __UNDEF_NO_VERSION__
 
 #include <linux/etherdevice.h>
-#include <linux/pci.h>
 #include <linux/sched.h>
 #include <linux/firmware.h>
 #include <linux/interrupt.h>
 #include <linux/module.h>
+#include <linux/bcma/bcma.h>
 #include <net/mac80211.h>
 #include <defs.h>
 #include "nicpci.h"
@@ -40,10 +40,10 @@
 #define MAC_FILTERS (FIF_PROMISC_IN_BSS | \
 	FIF_ALLMULTI | \
 	FIF_FCSFAIL | \
-	FIF_PLCPFAIL | \
 	FIF_CONTROL | \
 	FIF_OTHER_BSS | \
-	FIF_BCN_PRBRESP_PROMISC)
+	FIF_BCN_PRBRESP_PROMISC | \
+	FIF_PSPOLL)
 
 #define CHAN2GHZ(channel, freqency, chflags)  { \
 	.band = IEEE80211_BAND_2GHZ, \
@@ -87,16 +87,14 @@
 MODULE_SUPPORTED_DEVICE("Broadcom 802.11n WLAN cards");
 MODULE_LICENSE("Dual BSD/GPL");
 
-/* recognized PCI IDs */
-static DEFINE_PCI_DEVICE_TABLE(brcms_pci_id_table) = {
-	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) }, /* 43225 2G */
-	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) }, /* 43224 DUAL */
-	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) }, /* 4313 DUAL */
-	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) }, /* 43224 Ven */
-	{0}
-};
 
-MODULE_DEVICE_TABLE(pci, brcms_pci_id_table);
+/* recognized BCMA Core IDs */
+static struct bcma_device_id brcms_coreid_table[] = {
+	BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 23, BCMA_ANY_CLASS),
+	BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 24, BCMA_ANY_CLASS),
+	BCMA_CORETABLE_END
+};
+MODULE_DEVICE_TABLE(bcma, brcms_coreid_table);
 
 #ifdef BCMDBG
 static int msglevel = 0xdeadbeef;
@@ -216,8 +214,7 @@
 	.ht_cap = {
 		   /* from include/linux/ieee80211.h */
 		   .cap = IEEE80211_HT_CAP_GRN_FLD |
-		   IEEE80211_HT_CAP_SGI_20 |
-		   IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_40MHZ_INTOLERANT,
+			  IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40,
 		   .ht_supported = true,
 		   .ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K,
 		   .ampdu_density = AMPDU_DEF_MPDU_DENSITY,
@@ -238,8 +235,7 @@
 			BRCMS_LEGACY_5G_RATE_OFFSET,
 	.ht_cap = {
 		   .cap = IEEE80211_HT_CAP_GRN_FLD | IEEE80211_HT_CAP_SGI_20 |
-			  IEEE80211_HT_CAP_SGI_40 |
-			  IEEE80211_HT_CAP_40MHZ_INTOLERANT, /* No 40 mhz yet */
+			  IEEE80211_HT_CAP_SGI_40,
 		   .ht_supported = true,
 		   .ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K,
 		   .ampdu_density = AMPDU_DEF_MPDU_DENSITY,
@@ -287,6 +283,7 @@
 {
 	struct brcms_info *wl = hw->priv;
 	bool blocked;
+	int err;
 
 	ieee80211_wake_queues(hw);
 	spin_lock_bh(&wl->lock);
@@ -295,33 +292,10 @@
 	if (!blocked)
 		wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy);
 
-	return 0;
-}
-
-static void brcms_ops_stop(struct ieee80211_hw *hw)
-{
-	ieee80211_stop_queues(hw);
-}
-
-static int
-brcms_ops_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
-{
-	struct brcms_info *wl;
-	int err;
-
-	/* Just STA for now */
-	if (vif->type != NL80211_IFTYPE_AP &&
-	    vif->type != NL80211_IFTYPE_MESH_POINT &&
-	    vif->type != NL80211_IFTYPE_STATION &&
-	    vif->type != NL80211_IFTYPE_WDS &&
-	    vif->type != NL80211_IFTYPE_ADHOC) {
-		wiphy_err(hw->wiphy, "%s: Attempt to add type %d, only"
-			  " STA for now\n", __func__, vif->type);
-		return -EOPNOTSUPP;
-	}
-
-	wl = hw->priv;
 	spin_lock_bh(&wl->lock);
+	/* avoid acknowledging frames before a non-monitor device is added */
+	wl->mute_tx = true;
+
 	if (!wl->pub->up)
 		err = brcms_up(wl);
 	else
@@ -331,16 +305,28 @@
 	if (err != 0)
 		wiphy_err(hw->wiphy, "%s: brcms_up() returned %d\n", __func__,
 			  err);
-
 	return err;
 }
 
-static void
-brcms_ops_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+static void brcms_ops_stop(struct ieee80211_hw *hw)
 {
-	struct brcms_info *wl;
+	struct brcms_info *wl = hw->priv;
+	int status;
 
-	wl = hw->priv;
+	ieee80211_stop_queues(hw);
+
+	if (wl->wlc == NULL)
+		return;
+
+	spin_lock_bh(&wl->lock);
+	status = brcms_c_chipmatch(wl->wlc->hw->vendorid,
+				   wl->wlc->hw->deviceid);
+	spin_unlock_bh(&wl->lock);
+	if (!status) {
+		wiphy_err(wl->wiphy,
+			  "wl: brcms_ops_stop: chipmatch failed\n");
+		return;
+	}
 
 	/* put driver in down state */
 	spin_lock_bh(&wl->lock);
@@ -348,6 +334,29 @@
 	spin_unlock_bh(&wl->lock);
 }
 
+static int
+brcms_ops_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+	struct brcms_info *wl = hw->priv;
+
+	/* Just STA for now */
+	if (vif->type != NL80211_IFTYPE_STATION) {
+		wiphy_err(hw->wiphy, "%s: Attempt to add type %d, only"
+			  " STA for now\n", __func__, vif->type);
+		return -EOPNOTSUPP;
+	}
+
+	wl->mute_tx = false;
+	brcms_c_mute(wl->wlc, false);
+
+	return 0;
+}
+
+static void
+brcms_ops_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+}
+
 static int brcms_ops_config(struct ieee80211_hw *hw, u32 changed)
 {
 	struct ieee80211_conf *conf = &hw->conf;
@@ -362,7 +371,7 @@
 						   conf->listen_interval);
 	}
 	if (changed & IEEE80211_CONF_CHANGE_MONITOR)
-		wiphy_err(wiphy, "%s: change monitor mode: %s (implement)\n",
+		wiphy_dbg(wiphy, "%s: change monitor mode: %s\n",
 			  __func__, conf->flags & IEEE80211_CONF_MONITOR ?
 			  "true" : "false");
 	if (changed & IEEE80211_CONF_CHANGE_PS)
@@ -539,29 +548,25 @@
 
 	changed_flags &= MAC_FILTERS;
 	*total_flags &= MAC_FILTERS;
+
 	if (changed_flags & FIF_PROMISC_IN_BSS)
-		wiphy_err(wiphy, "FIF_PROMISC_IN_BSS\n");
+		wiphy_dbg(wiphy, "FIF_PROMISC_IN_BSS\n");
 	if (changed_flags & FIF_ALLMULTI)
-		wiphy_err(wiphy, "FIF_ALLMULTI\n");
+		wiphy_dbg(wiphy, "FIF_ALLMULTI\n");
 	if (changed_flags & FIF_FCSFAIL)
-		wiphy_err(wiphy, "FIF_FCSFAIL\n");
-	if (changed_flags & FIF_PLCPFAIL)
-		wiphy_err(wiphy, "FIF_PLCPFAIL\n");
+		wiphy_dbg(wiphy, "FIF_FCSFAIL\n");
 	if (changed_flags & FIF_CONTROL)
-		wiphy_err(wiphy, "FIF_CONTROL\n");
+		wiphy_dbg(wiphy, "FIF_CONTROL\n");
 	if (changed_flags & FIF_OTHER_BSS)
-		wiphy_err(wiphy, "FIF_OTHER_BSS\n");
-	if (changed_flags & FIF_BCN_PRBRESP_PROMISC) {
-		spin_lock_bh(&wl->lock);
-		if (*total_flags & FIF_BCN_PRBRESP_PROMISC) {
-			wl->pub->mac80211_state |= MAC80211_PROMISC_BCNS;
-			brcms_c_mac_bcn_promisc_change(wl->wlc, 1);
-		} else {
-			brcms_c_mac_bcn_promisc_change(wl->wlc, 0);
-			wl->pub->mac80211_state &= ~MAC80211_PROMISC_BCNS;
-		}
-		spin_unlock_bh(&wl->lock);
-	}
+		wiphy_dbg(wiphy, "FIF_OTHER_BSS\n");
+	if (changed_flags & FIF_PSPOLL)
+		wiphy_dbg(wiphy, "FIF_PSPOLL\n");
+	if (changed_flags & FIF_BCN_PRBRESP_PROMISC)
+		wiphy_dbg(wiphy, "FIF_BCN_PRBRESP_PROMISC\n");
+
+	spin_lock_bh(&wl->lock);
+	brcms_c_mac_promisc(wl->wlc, *total_flags);
+	spin_unlock_bh(&wl->lock);
 	return;
 }
 
@@ -609,13 +614,6 @@
 	wl->pub->global_ampdu->scb = scb;
 	wl->pub->global_ampdu->max_pdu = 16;
 
-	sta->ht_cap.ht_supported = true;
-	sta->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
-	sta->ht_cap.ampdu_density = AMPDU_DEF_MPDU_DENSITY;
-	sta->ht_cap.cap = IEEE80211_HT_CAP_GRN_FLD |
-	    IEEE80211_HT_CAP_SGI_20 |
-	    IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_40MHZ_INTOLERANT;
-
 	/*
 	 * minstrel_ht initiates addBA on our behalf by calling
 	 * ieee80211_start_tx_ba_session()
@@ -724,7 +722,7 @@
 };
 
 /*
- * is called in brcms_pci_probe() context, therefore no locking required.
+ * is called in brcms_bcma_probe() context, therefore no locking required.
  */
 static int brcms_set_hint(struct brcms_info *wl, char *abbrev)
 {
@@ -864,56 +862,26 @@
 #endif
 		kfree(t);
 	}
-
-	/*
-	 * unregister_netdev() calls get_stats() which may read chip
-	 * registers so we cannot unmap the chip registers until
-	 * after calling unregister_netdev() .
-	 */
-	if (wl->regsva)
-		iounmap(wl->regsva);
-
-	wl->regsva = NULL;
 }
 
 /*
-* called from both kernel as from this kernel module.
+* called from both kernel as from this kernel module (error flow on attach)
 * precondition: perimeter lock is not acquired.
 */
-static void brcms_remove(struct pci_dev *pdev)
+static void brcms_remove(struct bcma_device *pdev)
 {
-	struct brcms_info *wl;
-	struct ieee80211_hw *hw;
-	int status;
+	struct ieee80211_hw *hw = bcma_get_drvdata(pdev);
+	struct brcms_info *wl = hw->priv;
 
-	hw = pci_get_drvdata(pdev);
-	wl = hw->priv;
-	if (!wl) {
-		pr_err("wl: brcms_remove: pci_get_drvdata failed\n");
-		return;
-	}
-
-	spin_lock_bh(&wl->lock);
-	status = brcms_c_chipmatch(pdev->vendor, pdev->device);
-	spin_unlock_bh(&wl->lock);
-	if (!status) {
-		wiphy_err(wl->wiphy, "wl: brcms_remove: chipmatch "
-				     "failed\n");
-		return;
-	}
 	if (wl->wlc) {
 		wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, false);
 		wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy);
 		ieee80211_unregister_hw(hw);
-		spin_lock_bh(&wl->lock);
-		brcms_down(wl);
-		spin_unlock_bh(&wl->lock);
 	}
-	pci_disable_device(pdev);
 
 	brcms_free(wl);
 
-	pci_set_drvdata(pdev, NULL);
+	bcma_set_drvdata(pdev, NULL);
 	ieee80211_free_hw(hw);
 }
 
@@ -1021,11 +989,9 @@
  * it as static.
  *
  *
- * is called in brcms_pci_probe() context, therefore no locking required.
+ * is called in brcms_bcma_probe() context, therefore no locking required.
  */
-static struct brcms_info *brcms_attach(u16 vendor, u16 device,
-				       resource_size_t regs,
-				       struct pci_dev *btparam, uint irq)
+static struct brcms_info *brcms_attach(struct bcma_device *pdev)
 {
 	struct brcms_info *wl = NULL;
 	int unit, err;
@@ -1039,7 +1005,7 @@
 		return NULL;
 
 	/* allocate private info */
-	hw = pci_get_drvdata(btparam);	/* btparam == pdev */
+	hw = bcma_get_drvdata(pdev);
 	if (hw != NULL)
 		wl = hw->priv;
 	if (WARN_ON(hw == NULL) || WARN_ON(wl == NULL))
@@ -1051,26 +1017,20 @@
 	/* setup the bottom half handler */
 	tasklet_init(&wl->tasklet, brcms_dpc, (unsigned long) wl);
 
-	wl->regsva = ioremap_nocache(regs, PCI_BAR0_WINSZ);
-	if (wl->regsva == NULL) {
-		wiphy_err(wl->wiphy, "wl%d: ioremap() failed\n", unit);
-		goto fail;
-	}
 	spin_lock_init(&wl->lock);
 	spin_lock_init(&wl->isr_lock);
 
 	/* prepare ucode */
-	if (brcms_request_fw(wl, btparam) < 0) {
+	if (brcms_request_fw(wl, pdev->bus->host_pci) < 0) {
 		wiphy_err(wl->wiphy, "%s: Failed to find firmware usually in "
 			  "%s\n", KBUILD_MODNAME, "/lib/firmware/brcm");
 		brcms_release_fw(wl);
-		brcms_remove(btparam);
+		brcms_remove(pdev);
 		return NULL;
 	}
 
 	/* common load-time initialization */
-	wl->wlc = brcms_c_attach(wl, vendor, device, unit, false,
-				 wl->regsva, btparam, &err);
+	wl->wlc = brcms_c_attach((void *)wl, pdev, unit, false, &err);
 	brcms_release_fw(wl);
 	if (!wl->wlc) {
 		wiphy_err(wl->wiphy, "%s: attach() failed with code %d\n",
@@ -1081,15 +1041,13 @@
 
 	wl->pub->ieee_hw = hw;
 
-	/* disable mpc */
-	brcms_c_set_radio_mpc(wl->wlc, false);
-
 	/* register our interrupt handler */
-	if (request_irq(irq, brcms_isr, IRQF_SHARED, KBUILD_MODNAME, wl)) {
+	if (request_irq(pdev->bus->host_pci->irq, brcms_isr,
+			IRQF_SHARED, KBUILD_MODNAME, wl)) {
 		wiphy_err(wl->wiphy, "wl%d: request_irq() failed\n", unit);
 		goto fail;
 	}
-	wl->irq = irq;
+	wl->irq = pdev->bus->host_pci->irq;
 
 	/* register module */
 	brcms_c_module_register(wl->pub, "linux", wl, NULL);
@@ -1136,38 +1094,19 @@
  *
  * Perimeter lock is initialized in the course of this function.
  */
-static int __devinit
-brcms_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int __devinit brcms_bcma_probe(struct bcma_device *pdev)
 {
-	int rc;
 	struct brcms_info *wl;
 	struct ieee80211_hw *hw;
-	u32 val;
 
-	dev_info(&pdev->dev, "bus %d slot %d func %d irq %d\n",
-	       pdev->bus->number, PCI_SLOT(pdev->devfn),
-	       PCI_FUNC(pdev->devfn), pdev->irq);
+	dev_info(&pdev->dev, "mfg %x core %x rev %d class %d irq %d\n",
+		 pdev->id.manuf, pdev->id.id, pdev->id.rev, pdev->id.class,
+		 pdev->bus->host_pci->irq);
 
-	if ((pdev->vendor != PCI_VENDOR_ID_BROADCOM) ||
-	    ((pdev->device != 0x0576) &&
-	     ((pdev->device & 0xff00) != 0x4300) &&
-	     ((pdev->device & 0xff00) != 0x4700) &&
-	     ((pdev->device < 43000) || (pdev->device > 43999))))
+	if ((pdev->id.manuf != BCMA_MANUF_BCM) ||
+	    (pdev->id.id != BCMA_CORE_80211))
 		return -ENODEV;
 
-	rc = pci_enable_device(pdev);
-	if (rc) {
-		pr_err("%s: Cannot enable device %d-%d_%d\n",
-		       __func__, pdev->bus->number, PCI_SLOT(pdev->devfn),
-		       PCI_FUNC(pdev->devfn));
-		return -ENODEV;
-	}
-	pci_set_master(pdev);
-
-	pci_read_config_dword(pdev, 0x40, &val);
-	if ((val & 0x0000ff00) != 0)
-		pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
-
 	hw = ieee80211_alloc_hw(sizeof(struct brcms_info), &brcms_ops);
 	if (!hw) {
 		pr_err("%s: ieee80211_alloc_hw failed\n", __func__);
@@ -1176,14 +1115,11 @@
 
 	SET_IEEE80211_DEV(hw, &pdev->dev);
 
-	pci_set_drvdata(pdev, hw);
+	bcma_set_drvdata(pdev, hw);
 
 	memset(hw->priv, 0, sizeof(*wl));
 
-	wl = brcms_attach(pdev->vendor, pdev->device,
-			  pci_resource_start(pdev, 0), pdev,
-			  pdev->irq);
-
+	wl = brcms_attach(pdev);
 	if (!wl) {
 		pr_err("%s: %s: brcms_attach failed!\n", KBUILD_MODNAME,
 		       __func__);
@@ -1192,16 +1128,23 @@
 	return 0;
 }
 
-static int brcms_suspend(struct pci_dev *pdev, pm_message_t state)
+static int brcms_pci_suspend(struct pci_dev *pdev)
+{
+	pci_save_state(pdev);
+	pci_disable_device(pdev);
+	return pci_set_power_state(pdev, PCI_D3hot);
+}
+
+static int brcms_suspend(struct bcma_device *pdev, pm_message_t state)
 {
 	struct brcms_info *wl;
 	struct ieee80211_hw *hw;
 
-	hw = pci_get_drvdata(pdev);
+	hw = bcma_get_drvdata(pdev);
 	wl = hw->priv;
 	if (!wl) {
 		wiphy_err(wl->wiphy,
-			  "brcms_suspend: pci_get_drvdata failed\n");
+			  "brcms_suspend: bcma_get_drvdata failed\n");
 		return -ENODEV;
 	}
 
@@ -1210,25 +1153,14 @@
 	wl->pub->hw_up = false;
 	spin_unlock_bh(&wl->lock);
 
-	pci_save_state(pdev);
-	pci_disable_device(pdev);
-	return pci_set_power_state(pdev, PCI_D3hot);
+	/* temporarily do suspend ourselves */
+	return brcms_pci_suspend(pdev->bus->host_pci);
 }
 
-static int brcms_resume(struct pci_dev *pdev)
+static int brcms_pci_resume(struct pci_dev *pdev)
 {
-	struct brcms_info *wl;
-	struct ieee80211_hw *hw;
 	int err = 0;
-	u32 val;
-
-	hw = pci_get_drvdata(pdev);
-	wl = hw->priv;
-	if (!wl) {
-		wiphy_err(wl->wiphy,
-			  "wl: brcms_resume: pci_get_drvdata failed\n");
-		return -ENODEV;
-	}
+	uint val;
 
 	err = pci_set_power_state(pdev, PCI_D0);
 	if (err)
@@ -1246,24 +1178,28 @@
 	if ((val & 0x0000ff00) != 0)
 		pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
 
-	/*
-	*  done. driver will be put in up state
-	*  in brcms_ops_add_interface() call.
-	*/
-	return err;
+	return 0;
 }
 
-static struct pci_driver brcms_pci_driver = {
+static int brcms_resume(struct bcma_device *pdev)
+{
+	/*
+	*  just do pci resume for now until bcma supports it.
+	*/
+	return brcms_pci_resume(pdev->bus->host_pci);
+}
+
+static struct bcma_driver brcms_bcma_driver = {
 	.name     = KBUILD_MODNAME,
-	.probe    = brcms_pci_probe,
+	.probe    = brcms_bcma_probe,
 	.suspend  = brcms_suspend,
 	.resume   = brcms_resume,
 	.remove   = __devexit_p(brcms_remove),
-	.id_table = brcms_pci_id_table,
+	.id_table = brcms_coreid_table,
 };
 
 /**
- * This is the main entry point for the WL driver.
+ * This is the main entry point for the brcmsmac driver.
  *
  * This function determines if a device pointed to by pdev is a WL device,
  * and if so, performs a brcms_attach() on it.
@@ -1278,26 +1214,24 @@
 		brcm_msg_level = msglevel;
 #endif				/* BCMDBG */
 
-	error = pci_register_driver(&brcms_pci_driver);
+	error = bcma_driver_register(&brcms_bcma_driver);
+	printk(KERN_ERR "%s: register returned %d\n", __func__, error);
 	if (!error)
 		return 0;
 
-
-
 	return error;
 }
 
 /**
- * This function unloads the WL driver from the system.
+ * This function unloads the brcmsmac driver from the system.
  *
- * This function unconditionally unloads the WL driver module from the
+ * This function unconditionally unloads the brcmsmac driver module from the
  * system.
  *
  */
 static void __exit brcms_module_exit(void)
 {
-	pci_unregister_driver(&brcms_pci_driver);
-
+	bcma_driver_unregister(&brcms_bcma_driver);
 }
 
 module_init(brcms_module_init);
@@ -1319,8 +1253,7 @@
 {
 	BCMMSG(wl->pub->ieee_hw->wiphy, "wl%d\n", wl->pub->unit);
 	brcms_reset(wl);
-
-	brcms_c_init(wl->wlc);
+	brcms_c_init(wl->wlc, wl->mute_tx);
 }
 
 /*
@@ -1332,11 +1265,19 @@
 	brcms_c_reset(wl->wlc);
 
 	/* dpc will not be rescheduled */
-	wl->resched = 0;
+	wl->resched = false;
 
 	return 0;
 }
 
+void brcms_fatal_error(struct brcms_info *wl)
+{
+	wiphy_err(wl->wlc->wiphy, "wl%d: fatal error, reinitializing\n",
+		  wl->wlc->pub->unit);
+	brcms_reset(wl);
+	ieee80211_restart_hw(wl->pub->ieee_hw);
+}
+
 /*
  * These are interrupt on/off entry points. Disable interrupts
  * during interrupt state transition.
@@ -1561,11 +1502,10 @@
 			if (le32_to_cpu(hdr->idx) == idx) {
 				pdata = wl->fw.fw_bin[i]->data +
 					le32_to_cpu(hdr->offset);
-				*pbuf = kmalloc(len, GFP_ATOMIC);
+				*pbuf = kmemdup(pdata, len, GFP_ATOMIC);
 				if (*pbuf == NULL)
 					goto fail;
 
-				memcpy(*pbuf, pdata, len);
 				return 0;
 			}
 		}
@@ -1578,7 +1518,7 @@
 }
 
 /*
- * Precondition: Since this function is called in brcms_pci_probe() context,
+ * Precondition: Since this function is called in brcms_bcma_probe() context,
  * no locking is required.
  */
 int brcms_ucode_init_uint(struct brcms_info *wl, size_t *n_bytes, u32 idx)
@@ -1618,7 +1558,7 @@
 /*
  * checks validity of all firmware images loaded from user space
  *
- * Precondition: Since this function is called in brcms_pci_probe() context,
+ * Precondition: Since this function is called in brcms_bcma_probe() context,
  * no locking is required.
  */
 int brcms_check_firmwares(struct brcms_info *wl)
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
index 177f0e4..8f60419 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
@@ -68,8 +68,6 @@
 	spinlock_t lock;	/* per-device perimeter lock */
 	spinlock_t isr_lock;	/* per-device ISR synchronization lock */
 
-	/* regsva for unmap in brcms_free() */
-	void __iomem *regsva;	/* opaque chip registers virtual address */
 
 	/* timer related fields */
 	atomic_t callbacks;	/* # outstanding callback functions */
@@ -80,6 +78,7 @@
 	struct brcms_firmware fw;
 	struct wiphy *wiphy;
 	struct brcms_ucode ucode;
+	bool mute_tx;
 };
 
 /* misc callbacks */
@@ -104,5 +103,6 @@
 extern void brcms_msleep(struct brcms_info *wl, uint ms);
 extern void brcms_dpc(unsigned long data);
 extern void brcms_timer(struct brcms_timer *t);
+extern void brcms_fatal_error(struct brcms_info *wl);
 
 #endif				/* _BRCM_MAC80211_IF_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index 510e9bb..f7ed340 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -30,44 +30,21 @@
 #include "mac80211_if.h"
 #include "ucode_loader.h"
 #include "main.h"
+#include "soc.h"
 
 /*
  * Indication for txflowcontrol that all priority bits in
  * TXQ_STOP_FOR_PRIOFC_MASK are to be considered.
  */
-#define ALLPRIO		-1
-
-/*
- * 32 SSID chars, max of 4 chars for each SSID char "\xFF", plus NULL.
- */
-#define SSID_FMT_BUF_LEN	((4 * IEEE80211_MAX_SSID_LEN) + 1)
+#define ALLPRIO				-1
 
 /* watchdog timer, in unit of ms */
-#define	TIMER_INTERVAL_WATCHDOG	1000
+#define TIMER_INTERVAL_WATCHDOG		1000
 /* radio monitor timer, in unit of ms */
-#define	TIMER_INTERVAL_RADIOCHK	800
-
-/* Max MPC timeout, in unit of watchdog */
-#ifndef BRCMS_MPC_MAX_DELAYCNT
-#define	BRCMS_MPC_MAX_DELAYCNT	10
-#endif
-
-/* Min MPC timeout, in unit of watchdog */
-#define	BRCMS_MPC_MIN_DELAYCNT	1
-#define	BRCMS_MPC_THRESHOLD	3	/* MPC count threshold level */
+#define TIMER_INTERVAL_RADIOCHK		800
 
 /* beacon interval, in unit of 1024TU */
-#define	BEACON_INTERVAL_DEFAULT	100
-/* DTIM interval, in unit of beacon interval */
-#define	DTIM_INTERVAL_DEFAULT	3
-
-/* Scale down delays to accommodate QT slow speed */
-/* beacon interval, in unit of 1024TU */
-#define	BEACON_INTERVAL_DEF_QT	20
-/* DTIM interval, in unit of beacon interval */
-#define	DTIM_INTERVAL_DEF_QT	1
-
-#define	TBTT_ALIGN_LEEWAY_US	100	/* min leeway before first TBTT in us */
+#define BEACON_INTERVAL_DEFAULT		100
 
 /* n-mode support capability */
 /* 2x2 includes both 1x1 & 2x2 devices
@@ -78,113 +55,71 @@
 #define WL_11N_3x3			3
 #define WL_11N_4x4			4
 
-/* define 11n feature disable flags */
-#define WLFEATURE_DISABLE_11N		0x00000001
-#define WLFEATURE_DISABLE_11N_STBC_TX	0x00000002
-#define WLFEATURE_DISABLE_11N_STBC_RX	0x00000004
-#define WLFEATURE_DISABLE_11N_SGI_TX	0x00000008
-#define WLFEATURE_DISABLE_11N_SGI_RX	0x00000010
-#define WLFEATURE_DISABLE_11N_AMPDU_TX	0x00000020
-#define WLFEATURE_DISABLE_11N_AMPDU_RX	0x00000040
-#define WLFEATURE_DISABLE_11N_GF	0x00000080
+#define EDCF_ACI_MASK			0x60
+#define EDCF_ACI_SHIFT			5
+#define EDCF_ECWMIN_MASK		0x0f
+#define EDCF_ECWMAX_SHIFT		4
+#define EDCF_AIFSN_MASK			0x0f
+#define EDCF_AIFSN_MAX			15
+#define EDCF_ECWMAX_MASK		0xf0
 
-#define EDCF_ACI_MASK                0x60
-#define EDCF_ACI_SHIFT               5
-#define EDCF_ECWMIN_MASK             0x0f
-#define EDCF_ECWMAX_SHIFT            4
-#define EDCF_AIFSN_MASK              0x0f
-#define EDCF_AIFSN_MAX               15
-#define EDCF_ECWMAX_MASK             0xf0
+#define EDCF_AC_BE_TXOP_STA		0x0000
+#define EDCF_AC_BK_TXOP_STA		0x0000
+#define EDCF_AC_VO_ACI_STA		0x62
+#define EDCF_AC_VO_ECW_STA		0x32
+#define EDCF_AC_VI_ACI_STA		0x42
+#define EDCF_AC_VI_ECW_STA		0x43
+#define EDCF_AC_BK_ECW_STA		0xA4
+#define EDCF_AC_VI_TXOP_STA		0x005e
+#define EDCF_AC_VO_TXOP_STA		0x002f
+#define EDCF_AC_BE_ACI_STA		0x03
+#define EDCF_AC_BE_ECW_STA		0xA4
+#define EDCF_AC_BK_ACI_STA		0x27
+#define EDCF_AC_VO_TXOP_AP		0x002f
 
-#define EDCF_AC_BE_TXOP_STA          0x0000
-#define EDCF_AC_BK_TXOP_STA          0x0000
-#define EDCF_AC_VO_ACI_STA           0x62
-#define EDCF_AC_VO_ECW_STA           0x32
-#define EDCF_AC_VI_ACI_STA           0x42
-#define EDCF_AC_VI_ECW_STA           0x43
-#define EDCF_AC_BK_ECW_STA           0xA4
-#define EDCF_AC_VI_TXOP_STA          0x005e
-#define EDCF_AC_VO_TXOP_STA          0x002f
-#define EDCF_AC_BE_ACI_STA           0x03
-#define EDCF_AC_BE_ECW_STA           0xA4
-#define EDCF_AC_BK_ACI_STA           0x27
-#define EDCF_AC_VO_TXOP_AP           0x002f
+#define EDCF_TXOP2USEC(txop)		((txop) << 5)
+#define EDCF_ECW2CW(exp)		((1 << (exp)) - 1)
 
-#define EDCF_TXOP2USEC(txop)         ((txop) << 5)
-#define EDCF_ECW2CW(exp)             ((1 << (exp)) - 1)
+#define APHY_SYMBOL_TIME		4
+#define APHY_PREAMBLE_TIME		16
+#define APHY_SIGNAL_TIME		4
+#define APHY_SIFS_TIME			16
+#define APHY_SERVICE_NBITS		16
+#define APHY_TAIL_NBITS			6
+#define BPHY_SIFS_TIME			10
+#define BPHY_PLCP_SHORT_TIME		96
 
-#define APHY_SYMBOL_TIME	4
-#define APHY_PREAMBLE_TIME	16
-#define APHY_SIGNAL_TIME	4
-#define APHY_SIFS_TIME		16
-#define APHY_SERVICE_NBITS	16
-#define APHY_TAIL_NBITS		6
-#define BPHY_SIFS_TIME		10
-#define BPHY_PLCP_SHORT_TIME	96
-
-#define PREN_PREAMBLE		24
-#define PREN_MM_EXT		12
-#define PREN_PREAMBLE_EXT	4
+#define PREN_PREAMBLE			24
+#define PREN_MM_EXT			12
+#define PREN_PREAMBLE_EXT		4
 
 #define DOT11_MAC_HDR_LEN		24
-#define	DOT11_ACK_LEN		10
-#define DOT11_BA_LEN		4
+#define DOT11_ACK_LEN			10
+#define DOT11_BA_LEN			4
 #define DOT11_OFDM_SIGNAL_EXTENSION	6
 #define DOT11_MIN_FRAG_LEN		256
-#define	DOT11_RTS_LEN		16
-#define	DOT11_CTS_LEN		10
+#define DOT11_RTS_LEN			16
+#define DOT11_CTS_LEN			10
 #define DOT11_BA_BITMAP_LEN		128
 #define DOT11_MIN_BEACON_PERIOD		1
 #define DOT11_MAX_BEACON_PERIOD		0xFFFF
-#define	DOT11_MAXNUMFRAGS	16
+#define DOT11_MAXNUMFRAGS		16
 #define DOT11_MAX_FRAG_LEN		2346
 
-#define BPHY_PLCP_TIME		192
-#define RIFS_11N_TIME		2
+#define BPHY_PLCP_TIME			192
+#define RIFS_11N_TIME			2
 
-#define WME_VER			1
-#define WME_SUBTYPE_PARAM_IE	1
-#define WME_TYPE		2
-#define WME_OUI			"\x00\x50\xf2"
-
-#define AC_BE			0
-#define AC_BK			1
-#define AC_VI			2
-#define AC_VO			3
-
-#define	BCN_TMPL_LEN		512	/* length of the BCN template area */
+/* length of the BCN template area */
+#define BCN_TMPL_LEN			512
 
 /* brcms_bss_info flag bit values */
-#define BRCMS_BSS_HT		0x0020	/* BSS is HT (MIMO) capable */
+#define BRCMS_BSS_HT			0x0020	/* BSS is HT (MIMO) capable */
 
-/* Flags used in brcms_c_txq_info.stopped */
-/* per prio flow control bits */
-#define TXQ_STOP_FOR_PRIOFC_MASK	0x000000FF
-/* stop txq enqueue for packet drain */
-#define TXQ_STOP_FOR_PKT_DRAIN		0x00000100
-/* stop txq enqueue for ampdu flow control */
-#define TXQ_STOP_FOR_AMPDU_FLOW_CNTRL	0x00000200
-
-#define	BRCMS_HWRXOFF		38	/* chip rx buffer offset */
-
-/* Find basic rate for a given rate */
-static u8 brcms_basic_rate(struct brcms_c_info *wlc, u32 rspec)
-{
-	if (is_mcs_rate(rspec))
-		return wlc->band->basic_rate[mcs_table[rspec & RSPEC_RATE_MASK]
-		       .leg_ofdm];
-	return wlc->band->basic_rate[rspec & RSPEC_RATE_MASK];
-}
-
-static u16 frametype(u32 rspec, u8 mimoframe)
-{
-	if (is_mcs_rate(rspec))
-		return mimoframe;
-	return is_cck_rate(rspec) ? FT_CCK : FT_OFDM;
-}
+/* chip rx buffer offset */
+#define BRCMS_HWRXOFF			38
 
 /* rfdisable delay timer 500 ms, runs of ALP clock */
-#define RFDISABLE_DEFAULT	10000000
+#define RFDISABLE_DEFAULT		10000000
 
 #define BRCMS_TEMPSENSE_PERIOD		10	/* 10 second timeout */
 
@@ -194,87 +129,83 @@
  * These constants are used ONLY by wlc_prio2prec_map.  Do not use them
  * elsewhere.
  */
-#define	_BRCMS_PREC_NONE		0	/* None = - */
-#define	_BRCMS_PREC_BK		2	/* BK - Background */
-#define	_BRCMS_PREC_BE		4	/* BE - Best-effort */
-#define	_BRCMS_PREC_EE		6	/* EE - Excellent-effort */
-#define	_BRCMS_PREC_CL		8	/* CL - Controlled Load */
-#define	_BRCMS_PREC_VI		10	/* Vi - Video */
-#define	_BRCMS_PREC_VO		12	/* Vo - Voice */
-#define	_BRCMS_PREC_NC		14	/* NC - Network Control */
+#define _BRCMS_PREC_NONE		0	/* None = - */
+#define _BRCMS_PREC_BK			2	/* BK - Background */
+#define _BRCMS_PREC_BE			4	/* BE - Best-effort */
+#define _BRCMS_PREC_EE			6	/* EE - Excellent-effort */
+#define _BRCMS_PREC_CL			8	/* CL - Controlled Load */
+#define _BRCMS_PREC_VI			10	/* Vi - Video */
+#define _BRCMS_PREC_VO			12	/* Vo - Voice */
+#define _BRCMS_PREC_NC			14	/* NC - Network Control */
 
-/* The BSS is generating beacons in HW */
-#define BRCMS_BSSCFG_HW_BCN	0x20
+/* synthpu_dly times in us */
+#define SYNTHPU_DLY_APHY_US		3700
+#define SYNTHPU_DLY_BPHY_US		1050
+#define SYNTHPU_DLY_NPHY_US		2048
+#define SYNTHPU_DLY_LPPHY_US		300
 
-#define	SYNTHPU_DLY_APHY_US	3700	/* a phy synthpu_dly time in us */
-#define	SYNTHPU_DLY_BPHY_US	1050	/* b/g phy synthpu_dly time in us */
-#define	SYNTHPU_DLY_NPHY_US	2048	/* n phy REV3 synthpu_dly time in us */
-#define	SYNTHPU_DLY_LPPHY_US	300	/* lpphy synthpu_dly time in us */
-
-#define	SYNTHPU_DLY_PHY_US_QT	100	/* QT synthpu_dly time in us */
-
-#define	ANTCNT			10	/* vanilla M_MAX_ANTCNT value */
+#define ANTCNT				10	/* vanilla M_MAX_ANTCNT val */
 
 /* Per-AC retry limit register definitions; uses defs.h bitfield macros */
-#define EDCF_SHORT_S            0
-#define EDCF_SFB_S              4
-#define EDCF_LONG_S             8
-#define EDCF_LFB_S              12
-#define EDCF_SHORT_M            BITFIELD_MASK(4)
-#define EDCF_SFB_M              BITFIELD_MASK(4)
-#define EDCF_LONG_M             BITFIELD_MASK(4)
-#define EDCF_LFB_M              BITFIELD_MASK(4)
+#define EDCF_SHORT_S			0
+#define EDCF_SFB_S			4
+#define EDCF_LONG_S			8
+#define EDCF_LFB_S			12
+#define EDCF_SHORT_M			BITFIELD_MASK(4)
+#define EDCF_SFB_M			BITFIELD_MASK(4)
+#define EDCF_LONG_M			BITFIELD_MASK(4)
+#define EDCF_LFB_M			BITFIELD_MASK(4)
 
-#define	RETRY_SHORT_DEF			7	/* Default Short retry Limit */
-#define	RETRY_SHORT_MAX			255	/* Maximum Short retry Limit */
-#define	RETRY_LONG_DEF			4	/* Default Long retry count */
-#define	RETRY_SHORT_FB			3 /* Short count for fallback rate */
-#define	RETRY_LONG_FB			2 /* Long count for fallback rate */
+#define RETRY_SHORT_DEF			7	/* Default Short retry Limit */
+#define RETRY_SHORT_MAX			255	/* Maximum Short retry Limit */
+#define RETRY_LONG_DEF			4	/* Default Long retry count */
+#define RETRY_SHORT_FB			3	/* Short count for fb rate */
+#define RETRY_LONG_FB			2	/* Long count for fb rate */
 
-#define	APHY_CWMIN		15
-#define PHY_CWMAX		1023
+#define APHY_CWMIN			15
+#define PHY_CWMAX			1023
 
-#define EDCF_AIFSN_MIN               1
+#define EDCF_AIFSN_MIN			1
 
-#define FRAGNUM_MASK		0xF
+#define FRAGNUM_MASK			0xF
 
-#define APHY_SLOT_TIME		9
-#define BPHY_SLOT_TIME		20
+#define APHY_SLOT_TIME			9
+#define BPHY_SLOT_TIME			20
 
-#define	WL_SPURAVOID_OFF	0
-#define	WL_SPURAVOID_ON1	1
-#define	WL_SPURAVOID_ON2	2
+#define WL_SPURAVOID_OFF		0
+#define WL_SPURAVOID_ON1		1
+#define WL_SPURAVOID_ON2		2
 
 /* invalid core flags, use the saved coreflags */
-#define BRCMS_USE_COREFLAGS	0xffffffff
+#define BRCMS_USE_COREFLAGS		0xffffffff
 
 /* values for PLCPHdr_override */
-#define BRCMS_PLCP_AUTO	-1
-#define BRCMS_PLCP_SHORT	0
-#define BRCMS_PLCP_LONG	1
+#define BRCMS_PLCP_AUTO			-1
+#define BRCMS_PLCP_SHORT		0
+#define BRCMS_PLCP_LONG			1
 
 /* values for g_protection_override and n_protection_override */
 #define BRCMS_PROTECTION_AUTO		-1
 #define BRCMS_PROTECTION_OFF		0
 #define BRCMS_PROTECTION_ON		1
 #define BRCMS_PROTECTION_MMHDR_ONLY	2
-#define BRCMS_PROTECTION_CTS_ONLY		3
+#define BRCMS_PROTECTION_CTS_ONLY	3
 
 /* values for g_protection_control and n_protection_control */
-#define BRCMS_PROTECTION_CTL_OFF		0
+#define BRCMS_PROTECTION_CTL_OFF	0
 #define BRCMS_PROTECTION_CTL_LOCAL	1
 #define BRCMS_PROTECTION_CTL_OVERLAP	2
 
 /* values for n_protection */
 #define BRCMS_N_PROTECTION_OFF		0
 #define BRCMS_N_PROTECTION_OPTIONAL	1
-#define BRCMS_N_PROTECTION_20IN40		2
+#define BRCMS_N_PROTECTION_20IN40	2
 #define BRCMS_N_PROTECTION_MIXEDMODE	3
 
 /* values for band specific 40MHz capabilities */
-#define BRCMS_N_BW_20ALL			0
-#define BRCMS_N_BW_40ALL			1
-#define BRCMS_N_BW_20IN2G_40IN5G		2
+#define BRCMS_N_BW_20ALL		0
+#define BRCMS_N_BW_40ALL		1
+#define BRCMS_N_BW_20IN2G_40IN5G	2
 
 /* bitflags for SGI support (sgi_rx iovar) */
 #define BRCMS_N_SGI_20			0x01
@@ -282,48 +213,42 @@
 
 /* defines used by the nrate iovar */
 /* MSC in use,indicates b0-6 holds an mcs */
-#define NRATE_MCS_INUSE	0x00000080
+#define NRATE_MCS_INUSE			0x00000080
 /* rate/mcs value */
-#define NRATE_RATE_MASK 0x0000007f
+#define NRATE_RATE_MASK			0x0000007f
 /* stf mode mask: siso, cdd, stbc, sdm */
-#define NRATE_STF_MASK	0x0000ff00
+#define NRATE_STF_MASK			0x0000ff00
 /* stf mode shift */
-#define NRATE_STF_SHIFT	8
-/* bit indicates override both rate & mode */
-#define NRATE_OVERRIDE	0x80000000
+#define NRATE_STF_SHIFT			8
 /* bit indicate to override mcs only */
-#define NRATE_OVERRIDE_MCS_ONLY 0x40000000
-#define NRATE_SGI_MASK  0x00800000	/* sgi mode */
-#define NRATE_SGI_SHIFT 23	/* sgi mode */
-#define NRATE_LDPC_CODING 0x00400000	/* bit indicates adv coding in use */
-#define NRATE_LDPC_SHIFT 22	/* ldpc shift */
+#define NRATE_OVERRIDE_MCS_ONLY		0x40000000
+#define NRATE_SGI_MASK			0x00800000	/* sgi mode */
+#define NRATE_SGI_SHIFT			23		/* sgi mode */
+#define NRATE_LDPC_CODING		0x00400000	/* adv coding in use */
+#define NRATE_LDPC_SHIFT		22		/* ldpc shift */
 
-#define NRATE_STF_SISO	0	/* stf mode SISO */
-#define NRATE_STF_CDD	1	/* stf mode CDD */
-#define NRATE_STF_STBC	2	/* stf mode STBC */
-#define NRATE_STF_SDM	3	/* stf mode SDM */
+#define NRATE_STF_SISO			0		/* stf mode SISO */
+#define NRATE_STF_CDD			1		/* stf mode CDD */
+#define NRATE_STF_STBC			2		/* stf mode STBC */
+#define NRATE_STF_SDM			3		/* stf mode SDM */
 
-#define MAX_DMA_SEGS 4
+#define MAX_DMA_SEGS			4
 
 /* Max # of entries in Tx FIFO based on 4kb page size */
-#define NTXD		256
+#define NTXD				256
 /* Max # of entries in Rx FIFO based on 4kb page size */
-#define NRXD		256
+#define NRXD				256
 
 /* try to keep this # rbufs posted to the chip */
-#define	NRXBUFPOST	32
+#define NRXBUFPOST			32
 
 /* data msg txq hiwat mark */
-#define BRCMS_DATAHIWAT		50
+#define BRCMS_DATAHIWAT			50
 
-/* bounded rx loops */
-#define RXBND		8 /* max # frames to process in brcms_c_recv() */
-#define TXSBND		8 /* max # tx status to process in wlc_txstatus() */
-
-/*
- * 32 SSID chars, max of 4 chars for each SSID char "\xFF", plus NULL.
- */
-#define SSID_FMT_BUF_LEN	((4 * IEEE80211_MAX_SSID_LEN) + 1)
+/* max # frames to process in brcms_c_recv() */
+#define RXBND				8
+/* max # tx status to process in wlc_txstatus() */
+#define TXSBND				8
 
 /* brcmu_format_flags() bit description structure */
 struct brcms_c_bit_desc {
@@ -375,10 +300,22 @@
 #endif				/* BCMDBG */
 
 /* TX FIFO number to WME/802.1E Access Category */
-static const u8 wme_fifo2ac[] = { AC_BK, AC_BE, AC_VI, AC_VO, AC_BE, AC_BE };
+static const u8 wme_fifo2ac[] = {
+	IEEE80211_AC_BK,
+	IEEE80211_AC_BE,
+	IEEE80211_AC_VI,
+	IEEE80211_AC_VO,
+	IEEE80211_AC_BE,
+	IEEE80211_AC_BE
+};
 
-/* WME/802.1E Access Category to TX FIFO number */
-static const u8 wme_ac2fifo[] = { 1, 0, 2, 3 };
+/* ieee80211 Access Category to TX FIFO number */
+static const u8 wme_ac2fifo[] = {
+	TX_AC_VO_FIFO,
+	TX_AC_VI_FIFO,
+	TX_AC_BE_FIFO,
+	TX_AC_BK_FIFO
+};
 
 /* 802.1D Priority to precedence queue mapping */
 const u8 wlc_prio2prec_map[] = {
@@ -405,13 +342,6 @@
 	{9, 58, 22, 14, 14, 5},
 };
 
-static const u8 acbitmap2maxprio[] = {
-	PRIO_8021D_BE, PRIO_8021D_BE, PRIO_8021D_BK, PRIO_8021D_BK,
-	PRIO_8021D_VI, PRIO_8021D_VI, PRIO_8021D_VI, PRIO_8021D_VI,
-	PRIO_8021D_VO, PRIO_8021D_VO, PRIO_8021D_VO, PRIO_8021D_VO,
-	PRIO_8021D_VO, PRIO_8021D_VO, PRIO_8021D_VO, PRIO_8021D_VO
-};
-
 #ifdef BCMDBG
 static const char * const fifo_names[] = {
 	"AC_BK", "AC_BE", "AC_VI", "AC_VO", "BCMC", "ATIM" };
@@ -424,6 +354,22 @@
 static struct brcms_c_info *wlc_info_dbg = (struct brcms_c_info *) (NULL);
 #endif
 
+/* Find basic rate for a given rate */
+static u8 brcms_basic_rate(struct brcms_c_info *wlc, u32 rspec)
+{
+	if (is_mcs_rate(rspec))
+		return wlc->band->basic_rate[mcs_table[rspec & RSPEC_RATE_MASK]
+		       .leg_ofdm];
+	return wlc->band->basic_rate[rspec & RSPEC_RATE_MASK];
+}
+
+static u16 frametype(u32 rspec, u8 mimoframe)
+{
+	if (is_mcs_rate(rspec))
+		return mimoframe;
+	return is_cck_rate(rspec) ? FT_CCK : FT_OFDM;
+}
+
 /* currently the best mechanism for determining SIFS is the band in use */
 static u16 get_sifs(struct brcms_band *band)
 {
@@ -442,10 +388,13 @@
  */
 static bool brcms_deviceremoved(struct brcms_c_info *wlc)
 {
+	u32 macctrl;
+
 	if (!wlc->hw->clk)
 		return ai_deviceremoved(wlc->hw->sih);
-	return (R_REG(&wlc->hw->regs->maccontrol) &
-		(MCTL_PSM_JMP_0 | MCTL_IHR_EN)) != MCTL_IHR_EN;
+	macctrl = bcma_read32(wlc->hw->d11core,
+			      D11REGOFFS(maccontrol));
+	return (macctrl & (MCTL_PSM_JMP_0 | MCTL_IHR_EN)) != MCTL_IHR_EN;
 }
 
 /* sum the individual fifo tx pending packet counts */
@@ -470,20 +419,6 @@
 	return BRCMS_10_MHZ;
 }
 
-/*
- * return true if Minimum Power Consumption should
- * be entered, false otherwise
- */
-static bool brcms_c_is_non_delay_mpc(struct brcms_c_info *wlc)
-{
-	return false;
-}
-
-static bool brcms_c_ismpc(struct brcms_c_info *wlc)
-{
-	return (wlc->mpc_delay_off == 0) && (brcms_c_is_non_delay_mpc(wlc));
-}
-
 static void brcms_c_bsscfg_mfree(struct brcms_bss_cfg *cfg)
 {
 	if (cfg == NULL)
@@ -650,17 +585,15 @@
 static void brcms_b_update_slot_timing(struct brcms_hardware *wlc_hw,
 					bool shortslot)
 {
-	struct d11regs __iomem *regs;
-
-	regs = wlc_hw->regs;
+	struct bcma_device *core = wlc_hw->d11core;
 
 	if (shortslot) {
 		/* 11g short slot: 11a timing */
-		W_REG(&regs->ifs_slot, 0x0207);	/* APHY_SLOT_TIME */
+		bcma_write16(core, D11REGOFFS(ifs_slot), 0x0207);
 		brcms_b_write_shm(wlc_hw, M_DOT11_SLOT, APHY_SLOT_TIME);
 	} else {
 		/* 11g long slot: 11b timing */
-		W_REG(&regs->ifs_slot, 0x0212);	/* BPHY_SLOT_TIME */
+		bcma_write16(core, D11REGOFFS(ifs_slot), 0x0212);
 		brcms_b_write_shm(wlc_hw, M_DOT11_SLOT, BPHY_SLOT_TIME);
 	}
 }
@@ -669,9 +602,8 @@
  * calculate frame duration of a given rate and length, return
  * time in usec unit
  */
-uint
-brcms_c_calc_frame_time(struct brcms_c_info *wlc, u32 ratespec,
-			u8 preamble_type, uint mac_len)
+static uint brcms_c_calc_frame_time(struct brcms_c_info *wlc, u32 ratespec,
+				    u8 preamble_type, uint mac_len)
 {
 	uint nsyms, dur = 0, Ndps, kNdps;
 	uint rate = rspec2rate(ratespec);
@@ -741,24 +673,22 @@
 static void brcms_c_write_inits(struct brcms_hardware *wlc_hw,
 				const struct d11init *inits)
 {
+	struct bcma_device *core = wlc_hw->d11core;
 	int i;
-	u8 __iomem *base;
-	u8 __iomem *addr;
+	uint offset;
 	u16 size;
 	u32 value;
 
 	BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
 
-	base = (u8 __iomem *)wlc_hw->regs;
-
 	for (i = 0; inits[i].addr != cpu_to_le16(0xffff); i++) {
 		size = le16_to_cpu(inits[i].size);
-		addr = base + le16_to_cpu(inits[i].addr);
+		offset = le16_to_cpu(inits[i].addr);
 		value = le32_to_cpu(inits[i].value);
 		if (size == 2)
-			W_REG((u16 __iomem *)addr, value);
+			bcma_write16(core, offset, value);
 		else if (size == 4)
-			W_REG((u32 __iomem *)addr, value);
+			bcma_write32(core, offset, value);
 		else
 			break;
 	}
@@ -808,6 +738,14 @@
 	}
 }
 
+static void brcms_b_core_ioctl(struct brcms_hardware *wlc_hw, u32 m, u32 v)
+{
+	struct bcma_device *core = wlc_hw->d11core;
+	u32 ioctl = bcma_aread32(core, BCMA_IOCTL) & ~m;
+
+	bcma_awrite32(core, BCMA_IOCTL, ioctl | v);
+}
+
 static void brcms_b_core_phy_clk(struct brcms_hardware *wlc_hw, bool clk)
 {
 	BCMMSG(wlc_hw->wlc->wiphy, "wl%d: clk %d\n", wlc_hw->unit, clk);
@@ -816,17 +754,17 @@
 
 	if (OFF == clk) {	/* clear gmode bit, put phy into reset */
 
-		ai_core_cflags(wlc_hw->sih, (SICF_PRST | SICF_FGC | SICF_GMODE),
-			       (SICF_PRST | SICF_FGC));
+		brcms_b_core_ioctl(wlc_hw, (SICF_PRST | SICF_FGC | SICF_GMODE),
+				   (SICF_PRST | SICF_FGC));
 		udelay(1);
-		ai_core_cflags(wlc_hw->sih, (SICF_PRST | SICF_FGC), SICF_PRST);
+		brcms_b_core_ioctl(wlc_hw, (SICF_PRST | SICF_FGC), SICF_PRST);
 		udelay(1);
 
 	} else {		/* take phy out of reset */
 
-		ai_core_cflags(wlc_hw->sih, (SICF_PRST | SICF_FGC), SICF_FGC);
+		brcms_b_core_ioctl(wlc_hw, (SICF_PRST | SICF_FGC), SICF_FGC);
 		udelay(1);
-		ai_core_cflags(wlc_hw->sih, (SICF_FGC), 0);
+		brcms_b_core_ioctl(wlc_hw, SICF_FGC, 0);
 		udelay(1);
 
 	}
@@ -847,9 +785,14 @@
 	wlc_hw->wlc->band = wlc_hw->wlc->bandstate[bandunit];
 
 	/* set gmode core flag */
-	if (wlc_hw->sbclk && !wlc_hw->noreset)
-		ai_core_cflags(wlc_hw->sih, SICF_GMODE,
-			       ((bandunit == 0) ? SICF_GMODE : 0));
+	if (wlc_hw->sbclk && !wlc_hw->noreset) {
+		u32 gmode = 0;
+
+		if (bandunit == 0)
+			gmode = SICF_GMODE;
+
+		brcms_b_core_ioctl(wlc_hw, SICF_GMODE, gmode);
+	}
 }
 
 /* switch to new band but leave it inactive */
@@ -857,10 +800,12 @@
 {
 	struct brcms_hardware *wlc_hw = wlc->hw;
 	u32 macintmask;
+	u32 macctrl;
 
 	BCMMSG(wlc->wiphy, "wl%d\n", wlc_hw->unit);
-
-	WARN_ON((R_REG(&wlc_hw->regs->maccontrol) & MCTL_EN_MAC) != 0);
+	macctrl = bcma_read32(wlc_hw->d11core,
+			      D11REGOFFS(maccontrol));
+	WARN_ON((macctrl & MCTL_EN_MAC) != 0);
 
 	/* disable interrupts */
 	macintmask = brcms_intrsoff(wlc->wl);
@@ -969,7 +914,7 @@
 		    lfbl,	/* Long Frame Rate Fallback Limit */
 		    fbl;
 
-		if (queue < AC_COUNT) {
+		if (queue < IEEE80211_NUM_ACS) {
 			sfbl = GFIELD(wlc->wme_retries[wme_fifo2ac[queue]],
 				      EDCF_SFB);
 			lfbl = GFIELD(wlc->wme_retries[wme_fifo2ac[queue]],
@@ -1018,14 +963,12 @@
 			tx_info->flags |= IEEE80211_TX_STAT_ACK;
 	}
 
-	totlen = brcmu_pkttotlen(p);
+	totlen = p->len;
 	free_pdu = true;
 
 	brcms_c_txfifo_complete(wlc, queue, 1);
 
 	if (lastframe) {
-		p->next = NULL;
-		p->prev = NULL;
 		/* remove PLCP & Broadcom tx descriptor header */
 		skb_pull(p, D11_PHY_HDR_LEN);
 		skb_pull(p, D11_TXH_LEN);
@@ -1053,7 +996,7 @@
 {
 	bool morepending = false;
 	struct brcms_c_info *wlc = wlc_hw->wlc;
-	struct d11regs __iomem *regs;
+	struct bcma_device *core;
 	struct tx_status txstatus, *txs;
 	u32 s1, s2;
 	uint n = 0;
@@ -1066,18 +1009,18 @@
 	BCMMSG(wlc->wiphy, "wl%d\n", wlc_hw->unit);
 
 	txs = &txstatus;
-	regs = wlc_hw->regs;
+	core = wlc_hw->d11core;
 	*fatal = false;
+	s1 = bcma_read32(core, D11REGOFFS(frmtxstatus));
 	while (!(*fatal)
-	       && (s1 = R_REG(&regs->frmtxstatus)) & TXS_V) {
+	       && (s1 & TXS_V)) {
 
 		if (s1 == 0xffffffff) {
 			wiphy_err(wlc->wiphy, "wl%d: %s: dead chip\n",
 				wlc_hw->unit, __func__);
 			return morepending;
 		}
-
-		s2 = R_REG(&regs->frmtxstatus2);
+		s2 = bcma_read32(core, D11REGOFFS(frmtxstatus2));
 
 		txs->status = s1 & TXS_STATUS_MASK;
 		txs->frameid = (s1 & TXS_FID_MASK) >> TXS_FID_SHIFT;
@@ -1090,6 +1033,7 @@
 		/* !give others some time to run! */
 		if (++n >= max_tx_num)
 			break;
+		s1 = bcma_read32(core, D11REGOFFS(frmtxstatus));
 	}
 
 	if (*fatal)
@@ -1134,12 +1078,12 @@
 	}
 }
 
-static struct dma64regs __iomem *
-dmareg(struct brcms_hardware *hw, uint direction, uint fifonum)
+static uint
+dmareg(uint direction, uint fifonum)
 {
 	if (direction == DMA_TX)
-		return &(hw->regs->fifo64regs[fifonum].dmaxmt);
-	return &(hw->regs->fifo64regs[fifonum].dmarcv);
+		return offsetof(struct d11regs, fifo64regs[fifonum].dmaxmt);
+	return offsetof(struct d11regs, fifo64regs[fifonum].dmarcv);
 }
 
 static bool brcms_b_attach_dmapio(struct brcms_c_info *wlc, uint j, bool wme)
@@ -1165,9 +1109,9 @@
 		 * TX: TX_AC_BK_FIFO (TX AC Background data packets)
 		 * RX: RX_FIFO (RX data packets)
 		 */
-		wlc_hw->di[0] = dma_attach(name, wlc_hw->sih,
-					   (wme ? dmareg(wlc_hw, DMA_TX, 0) :
-					    NULL), dmareg(wlc_hw, DMA_RX, 0),
+		wlc_hw->di[0] = dma_attach(name, wlc_hw->sih, wlc_hw->d11core,
+					   (wme ? dmareg(DMA_TX, 0) : 0),
+					   dmareg(DMA_RX, 0),
 					   (wme ? NTXD : 0), NRXD,
 					   RXBUFSZ, -1, NRXBUFPOST,
 					   BRCMS_HWRXOFF, &brcm_msg_level);
@@ -1179,8 +1123,8 @@
 		 *   (legacy) TX_DATA_FIFO (TX data packets)
 		 * RX: UNUSED
 		 */
-		wlc_hw->di[1] = dma_attach(name, wlc_hw->sih,
-					   dmareg(wlc_hw, DMA_TX, 1), NULL,
+		wlc_hw->di[1] = dma_attach(name, wlc_hw->sih, wlc_hw->d11core,
+					   dmareg(DMA_TX, 1), 0,
 					   NTXD, 0, 0, -1, 0, 0,
 					   &brcm_msg_level);
 		dma_attach_err |= (NULL == wlc_hw->di[1]);
@@ -1190,8 +1134,8 @@
 		 * TX: TX_AC_VI_FIFO (TX AC Video data packets)
 		 * RX: UNUSED
 		 */
-		wlc_hw->di[2] = dma_attach(name, wlc_hw->sih,
-					   dmareg(wlc_hw, DMA_TX, 2), NULL,
+		wlc_hw->di[2] = dma_attach(name, wlc_hw->sih, wlc_hw->d11core,
+					   dmareg(DMA_TX, 2), 0,
 					   NTXD, 0, 0, -1, 0, 0,
 					   &brcm_msg_level);
 		dma_attach_err |= (NULL == wlc_hw->di[2]);
@@ -1200,9 +1144,9 @@
 		 * TX: TX_AC_VO_FIFO (TX AC Voice data packets)
 		 *   (legacy) TX_CTL_FIFO (TX control & mgmt packets)
 		 */
-		wlc_hw->di[3] = dma_attach(name, wlc_hw->sih,
-					   dmareg(wlc_hw, DMA_TX, 3),
-					   NULL, NTXD, 0, 0, -1,
+		wlc_hw->di[3] = dma_attach(name, wlc_hw->sih, wlc_hw->d11core,
+					   dmareg(DMA_TX, 3),
+					   0, NTXD, 0, 0, -1,
 					   0, 0, &brcm_msg_level);
 		dma_attach_err |= (NULL == wlc_hw->di[3]);
 /* Cleaner to leave this as if with AP defined */
@@ -1276,7 +1220,7 @@
 /* control chip clock to save power, enable dynamic clock or force fast clock */
 static void brcms_b_clkctl_clk(struct brcms_hardware *wlc_hw, uint mode)
 {
-	if (wlc_hw->sih->cccaps & CC_CAP_PMU) {
+	if (ai_get_cccaps(wlc_hw->sih) & CC_CAP_PMU) {
 		/* new chips with PMU, CCS_FORCEHT will distribute the HT clock
 		 * on backplane, but mac core will still run on ALP(not HT) when
 		 * it enters powersave mode, which means the FCA bit may not be
@@ -1285,29 +1229,33 @@
 
 		if (wlc_hw->clk) {
 			if (mode == CLK_FAST) {
-				OR_REG(&wlc_hw->regs->clk_ctl_st,
-				       CCS_FORCEHT);
+				bcma_set32(wlc_hw->d11core,
+					   D11REGOFFS(clk_ctl_st),
+					   CCS_FORCEHT);
 
 				udelay(64);
 
-				SPINWAIT(((R_REG
-					   (&wlc_hw->regs->
-					    clk_ctl_st) & CCS_HTAVAIL) == 0),
-					 PMU_MAX_TRANSITION_DLY);
-				WARN_ON(!(R_REG
-					  (&wlc_hw->regs->
-					   clk_ctl_st) & CCS_HTAVAIL));
+				SPINWAIT(
+				    ((bcma_read32(wlc_hw->d11core,
+				      D11REGOFFS(clk_ctl_st)) &
+				      CCS_HTAVAIL) == 0),
+				      PMU_MAX_TRANSITION_DLY);
+				WARN_ON(!(bcma_read32(wlc_hw->d11core,
+					D11REGOFFS(clk_ctl_st)) &
+					CCS_HTAVAIL));
 			} else {
-				if ((wlc_hw->sih->pmurev == 0) &&
-				    (R_REG
-				     (&wlc_hw->regs->
-				      clk_ctl_st) & (CCS_FORCEHT | CCS_HTAREQ)))
-					SPINWAIT(((R_REG
-						   (&wlc_hw->regs->
-						    clk_ctl_st) & CCS_HTAVAIL)
-						  == 0),
-						 PMU_MAX_TRANSITION_DLY);
-				AND_REG(&wlc_hw->regs->clk_ctl_st,
+				if ((ai_get_pmurev(wlc_hw->sih) == 0) &&
+				    (bcma_read32(wlc_hw->d11core,
+					D11REGOFFS(clk_ctl_st)) &
+					(CCS_FORCEHT | CCS_HTAREQ)))
+					SPINWAIT(
+					    ((bcma_read32(wlc_hw->d11core,
+					      offsetof(struct d11regs,
+						       clk_ctl_st)) &
+					      CCS_HTAVAIL) == 0),
+					      PMU_MAX_TRANSITION_DLY);
+				bcma_mask32(wlc_hw->d11core,
+					D11REGOFFS(clk_ctl_st),
 					~CCS_FORCEHT);
 			}
 		}
@@ -1322,7 +1270,7 @@
 
 		/* check fast clock is available (if core is not in reset) */
 		if (wlc_hw->forcefastclk && wlc_hw->clk)
-			WARN_ON(!(ai_core_sflags(wlc_hw->sih, 0, 0) &
+			WARN_ON(!(bcma_aread32(wlc_hw->d11core, BCMA_IOST) &
 				  SISF_FCLKA));
 
 		/*
@@ -1439,7 +1387,8 @@
 		maccontrol |= MCTL_INFRA;
 	}
 
-	W_REG(&wlc_hw->regs->maccontrol, maccontrol);
+	bcma_write32(wlc_hw->d11core, D11REGOFFS(maccontrol),
+		     maccontrol);
 }
 
 /* set or clear maccontrol bits */
@@ -1533,7 +1482,7 @@
 brcms_b_set_addrmatch(struct brcms_hardware *wlc_hw, int match_reg_offset,
 		       const u8 *addr)
 {
-	struct d11regs __iomem *regs;
+	struct bcma_device *core = wlc_hw->d11core;
 	u16 mac_l;
 	u16 mac_m;
 	u16 mac_h;
@@ -1541,38 +1490,36 @@
 	BCMMSG(wlc_hw->wlc->wiphy, "wl%d: brcms_b_set_addrmatch\n",
 		 wlc_hw->unit);
 
-	regs = wlc_hw->regs;
 	mac_l = addr[0] | (addr[1] << 8);
 	mac_m = addr[2] | (addr[3] << 8);
 	mac_h = addr[4] | (addr[5] << 8);
 
 	/* enter the MAC addr into the RXE match registers */
-	W_REG(&regs->rcm_ctl, RCM_INC_DATA | match_reg_offset);
-	W_REG(&regs->rcm_mat_data, mac_l);
-	W_REG(&regs->rcm_mat_data, mac_m);
-	W_REG(&regs->rcm_mat_data, mac_h);
-
+	bcma_write16(core, D11REGOFFS(rcm_ctl),
+		     RCM_INC_DATA | match_reg_offset);
+	bcma_write16(core, D11REGOFFS(rcm_mat_data), mac_l);
+	bcma_write16(core, D11REGOFFS(rcm_mat_data), mac_m);
+	bcma_write16(core, D11REGOFFS(rcm_mat_data), mac_h);
 }
 
 void
 brcms_b_write_template_ram(struct brcms_hardware *wlc_hw, int offset, int len,
 			    void *buf)
 {
-	struct d11regs __iomem *regs;
+	struct bcma_device *core = wlc_hw->d11core;
 	u32 word;
 	__le32 word_le;
 	__be32 word_be;
 	bool be_bit;
 	BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
 
-	regs = wlc_hw->regs;
-	W_REG(&regs->tplatewrptr, offset);
+	bcma_write32(core, D11REGOFFS(tplatewrptr), offset);
 
 	/* if MCTL_BIGEND bit set in mac control register,
 	 * the chip swaps data in fifo, as well as data in
 	 * template ram
 	 */
-	be_bit = (R_REG(&regs->maccontrol) & MCTL_BIGEND) != 0;
+	be_bit = (bcma_read32(core, D11REGOFFS(maccontrol)) & MCTL_BIGEND) != 0;
 
 	while (len > 0) {
 		memcpy(&word, buf, sizeof(u32));
@@ -1585,7 +1532,7 @@
 			word = *(u32 *)&word_le;
 		}
 
-		W_REG(&regs->tplatewrdata, word);
+		bcma_write32(core, D11REGOFFS(tplatewrdata), word);
 
 		buf = (u8 *) buf + sizeof(u32);
 		len -= sizeof(u32);
@@ -1596,18 +1543,20 @@
 {
 	wlc_hw->band->CWmin = newmin;
 
-	W_REG(&wlc_hw->regs->objaddr, OBJADDR_SCR_SEL | S_DOT11_CWMIN);
-	(void)R_REG(&wlc_hw->regs->objaddr);
-	W_REG(&wlc_hw->regs->objdata, newmin);
+	bcma_write32(wlc_hw->d11core, D11REGOFFS(objaddr),
+		     OBJADDR_SCR_SEL | S_DOT11_CWMIN);
+	(void)bcma_read32(wlc_hw->d11core, D11REGOFFS(objaddr));
+	bcma_write32(wlc_hw->d11core, D11REGOFFS(objdata), newmin);
 }
 
 static void brcms_b_set_cwmax(struct brcms_hardware *wlc_hw, u16 newmax)
 {
 	wlc_hw->band->CWmax = newmax;
 
-	W_REG(&wlc_hw->regs->objaddr, OBJADDR_SCR_SEL | S_DOT11_CWMAX);
-	(void)R_REG(&wlc_hw->regs->objaddr);
-	W_REG(&wlc_hw->regs->objdata, newmax);
+	bcma_write32(wlc_hw->d11core, D11REGOFFS(objaddr),
+		     OBJADDR_SCR_SEL | S_DOT11_CWMAX);
+	(void)bcma_read32(wlc_hw->d11core, D11REGOFFS(objaddr));
+	bcma_write32(wlc_hw->d11core, D11REGOFFS(objdata), newmax);
 }
 
 void brcms_b_bw_set(struct brcms_hardware *wlc_hw, u16 bw)
@@ -1773,17 +1722,17 @@
 {
 	BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
 
-	ai_corereg(wlc_hw->sih, SI_CC_IDX,
-		   offsetof(struct chipcregs, chipcontrol_addr), ~0, 0);
+	ai_cc_reg(wlc_hw->sih, offsetof(struct chipcregs, chipcontrol_addr),
+		  ~0, 0);
 	udelay(1);
-	ai_corereg(wlc_hw->sih, SI_CC_IDX,
-		   offsetof(struct chipcregs, chipcontrol_data), 0x4, 0);
+	ai_cc_reg(wlc_hw->sih, offsetof(struct chipcregs, chipcontrol_data),
+		  0x4, 0);
 	udelay(1);
-	ai_corereg(wlc_hw->sih, SI_CC_IDX,
-		   offsetof(struct chipcregs, chipcontrol_data), 0x4, 4);
+	ai_cc_reg(wlc_hw->sih, offsetof(struct chipcregs, chipcontrol_data),
+		  0x4, 4);
 	udelay(1);
-	ai_corereg(wlc_hw->sih, SI_CC_IDX,
-		   offsetof(struct chipcregs, chipcontrol_data), 0x4, 0);
+	ai_cc_reg(wlc_hw->sih, offsetof(struct chipcregs, chipcontrol_data),
+		  0x4, 0);
 	udelay(1);
 }
 
@@ -1797,18 +1746,18 @@
 		return;
 
 	if (ON == clk)
-		ai_core_cflags(wlc_hw->sih, SICF_FGC, SICF_FGC);
+		brcms_b_core_ioctl(wlc_hw, SICF_FGC, SICF_FGC);
 	else
-		ai_core_cflags(wlc_hw->sih, SICF_FGC, 0);
+		brcms_b_core_ioctl(wlc_hw, SICF_FGC, 0);
 
 }
 
 void brcms_b_macphyclk_set(struct brcms_hardware *wlc_hw, bool clk)
 {
 	if (ON == clk)
-		ai_core_cflags(wlc_hw->sih, SICF_MPCLKE, SICF_MPCLKE);
+		brcms_b_core_ioctl(wlc_hw, SICF_MPCLKE, SICF_MPCLKE);
 	else
-		ai_core_cflags(wlc_hw->sih, SICF_MPCLKE, 0);
+		brcms_b_core_ioctl(wlc_hw, SICF_MPCLKE, 0);
 }
 
 void brcms_b_phy_reset(struct brcms_hardware *wlc_hw)
@@ -1828,7 +1777,7 @@
 	if (BRCMS_ISNPHY(wlc_hw->band) && NREV_GE(wlc_hw->band->phyrev, 3) &&
 	    NREV_LE(wlc_hw->band->phyrev, 4)) {
 		/* Set the PHY bandwidth */
-		ai_core_cflags(wlc_hw->sih, SICF_BWMASK, phy_bw_clkbits);
+		brcms_b_core_ioctl(wlc_hw, SICF_BWMASK, phy_bw_clkbits);
 
 		udelay(1);
 
@@ -1836,13 +1785,13 @@
 		brcms_b_core_phypll_reset(wlc_hw);
 
 		/* reset the PHY */
-		ai_core_cflags(wlc_hw->sih, (SICF_PRST | SICF_PCLKE),
-			       (SICF_PRST | SICF_PCLKE));
+		brcms_b_core_ioctl(wlc_hw, (SICF_PRST | SICF_PCLKE),
+				   (SICF_PRST | SICF_PCLKE));
 		phy_in_reset = true;
 	} else {
-		ai_core_cflags(wlc_hw->sih,
-			       (SICF_PRST | SICF_PCLKE | SICF_BWMASK),
-			       (SICF_PRST | SICF_PCLKE | phy_bw_clkbits));
+		brcms_b_core_ioctl(wlc_hw,
+				   (SICF_PRST | SICF_PCLKE | SICF_BWMASK),
+				   (SICF_PRST | SICF_PCLKE | phy_bw_clkbits));
 	}
 
 	udelay(2);
@@ -1859,8 +1808,8 @@
 	u32 macintmask;
 
 	/* Enable the d11 core before accessing it */
-	if (!ai_iscoreup(wlc_hw->sih)) {
-		ai_core_reset(wlc_hw->sih, 0, 0);
+	if (!bcma_core_is_enabled(wlc_hw->d11core)) {
+		bcma_core_enable(wlc_hw->d11core, 0);
 		brcms_c_mctrl_reset(wlc_hw);
 	}
 
@@ -1886,7 +1835,8 @@
 	brcms_intrsrestore(wlc->wl, macintmask);
 
 	/* ucode should still be suspended.. */
-	WARN_ON((R_REG(&wlc_hw->regs->maccontrol) & MCTL_EN_MAC) != 0);
+	WARN_ON((bcma_read32(wlc_hw->d11core, D11REGOFFS(maccontrol)) &
+		 MCTL_EN_MAC) != 0);
 }
 
 static bool brcms_c_isgoodchip(struct brcms_hardware *wlc_hw)
@@ -1914,7 +1864,7 @@
 	uint b2 = boardrev & 0xf;
 
 	/* voards from other vendors are always considered valid */
-	if (wlc_hw->sih->boardvendor != PCI_VENDOR_ID_BROADCOM)
+	if (ai_get_boardvendor(wlc_hw->sih) != PCI_VENDOR_ID_BROADCOM)
 		return true;
 
 	/* do some boardrev sanity checks when boardvendor is Broadcom */
@@ -1986,7 +1936,7 @@
 static bool brcms_b_radio_read_hwdisabled(struct brcms_hardware *wlc_hw)
 {
 	bool v, clk, xtal;
-	u32 resetbits = 0, flags = 0;
+	u32 flags = 0;
 
 	xtal = wlc_hw->sbclk;
 	if (!xtal)
@@ -2003,22 +1953,22 @@
 		flags |= SICF_PCLKE;
 
 		/*
+		 * TODO: test suspend/resume
+		 *
 		 * AI chip doesn't restore bar0win2 on
 		 * hibernation/resume, need sw fixup
 		 */
-		if ((wlc_hw->sih->chip == BCM43224_CHIP_ID) ||
-		    (wlc_hw->sih->chip == BCM43225_CHIP_ID))
-			wlc_hw->regs = (struct d11regs __iomem *)
-					ai_setcore(wlc_hw->sih, D11_CORE_ID, 0);
-		ai_core_reset(wlc_hw->sih, flags, resetbits);
+
+		bcma_core_enable(wlc_hw->d11core, flags);
 		brcms_c_mctrl_reset(wlc_hw);
 	}
 
-	v = ((R_REG(&wlc_hw->regs->phydebug) & PDBG_RFD) != 0);
+	v = ((bcma_read32(wlc_hw->d11core,
+			  D11REGOFFS(phydebug)) & PDBG_RFD) != 0);
 
 	/* put core back into reset */
 	if (!clk)
-		ai_core_disable(wlc_hw->sih, 0);
+		bcma_core_disable(wlc_hw->d11core, 0);
 
 	if (!xtal)
 		brcms_b_xtal(wlc_hw, OFF);
@@ -2042,25 +1992,21 @@
  */
 void brcms_b_corereset(struct brcms_hardware *wlc_hw, u32 flags)
 {
-	struct d11regs __iomem *regs;
 	uint i;
 	bool fastclk;
-	u32 resetbits = 0;
 
 	if (flags == BRCMS_USE_COREFLAGS)
 		flags = (wlc_hw->band->pi ? wlc_hw->band->core_flags : 0);
 
 	BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
 
-	regs = wlc_hw->regs;
-
 	/* request FAST clock if not on  */
 	fastclk = wlc_hw->forcefastclk;
 	if (!fastclk)
 		brcms_b_clkctl_clk(wlc_hw, CLK_FAST);
 
 	/* reset the dma engines except first time thru */
-	if (ai_iscoreup(wlc_hw->sih)) {
+	if (bcma_core_is_enabled(wlc_hw->d11core)) {
 		for (i = 0; i < NFIFO; i++)
 			if ((wlc_hw->di[i]) && (!dma_txreset(wlc_hw->di[i])))
 				wiphy_err(wlc_hw->wlc->wiphy, "wl%d: %s: "
@@ -2098,14 +2044,14 @@
 	 * they may touch chipcommon as well.
 	 */
 	wlc_hw->clk = false;
-	ai_core_reset(wlc_hw->sih, flags, resetbits);
+	bcma_core_enable(wlc_hw->d11core, flags);
 	wlc_hw->clk = true;
 	if (wlc_hw->band && wlc_hw->band->pi)
 		wlc_phy_hw_clk_state_upd(wlc_hw->band->pi, true);
 
 	brcms_c_mctrl_reset(wlc_hw);
 
-	if (wlc_hw->sih->cccaps & CC_CAP_PMU)
+	if (ai_get_cccaps(wlc_hw->sih) & CC_CAP_PMU)
 		brcms_b_clkctl_clk(wlc_hw, CLK_FAST);
 
 	brcms_b_phy_reset(wlc_hw);
@@ -2126,7 +2072,7 @@
  */
 static void brcms_b_corerev_fifofixup(struct brcms_hardware *wlc_hw)
 {
-	struct d11regs __iomem *regs = wlc_hw->regs;
+	struct bcma_device *core = wlc_hw->d11core;
 	u16 fifo_nu;
 	u16 txfifo_startblk = TXFIFO_START_BLK, txfifo_endblk;
 	u16 txfifo_def, txfifo_def1;
@@ -2147,11 +2093,11 @@
 		txfifo_cmd =
 		    TXFIFOCMD_RESET_MASK | (fifo_nu << TXFIFOCMD_FIFOSEL_SHIFT);
 
-		W_REG(&regs->xmtfifocmd, txfifo_cmd);
-		W_REG(&regs->xmtfifodef, txfifo_def);
-		W_REG(&regs->xmtfifodef1, txfifo_def1);
+		bcma_write16(core, D11REGOFFS(xmtfifocmd), txfifo_cmd);
+		bcma_write16(core, D11REGOFFS(xmtfifodef), txfifo_def);
+		bcma_write16(core, D11REGOFFS(xmtfifodef1), txfifo_def1);
 
-		W_REG(&regs->xmtfifocmd, txfifo_cmd);
+		bcma_write16(core, D11REGOFFS(xmtfifocmd), txfifo_cmd);
 
 		txfifo_startblk += wlc_hw->xmtfifo_sz[fifo_nu];
 	}
@@ -2186,27 +2132,27 @@
 
 void brcms_b_switch_macfreq(struct brcms_hardware *wlc_hw, u8 spurmode)
 {
-	struct d11regs __iomem *regs = wlc_hw->regs;
+	struct bcma_device *core = wlc_hw->d11core;
 
-	if ((wlc_hw->sih->chip == BCM43224_CHIP_ID) ||
-	    (wlc_hw->sih->chip == BCM43225_CHIP_ID)) {
+	if ((ai_get_chip_id(wlc_hw->sih) == BCM43224_CHIP_ID) ||
+	    (ai_get_chip_id(wlc_hw->sih) == BCM43225_CHIP_ID)) {
 		if (spurmode == WL_SPURAVOID_ON2) {	/* 126Mhz */
-			W_REG(&regs->tsf_clk_frac_l, 0x2082);
-			W_REG(&regs->tsf_clk_frac_h, 0x8);
+			bcma_write16(core, D11REGOFFS(tsf_clk_frac_l), 0x2082);
+			bcma_write16(core, D11REGOFFS(tsf_clk_frac_h), 0x8);
 		} else if (spurmode == WL_SPURAVOID_ON1) {	/* 123Mhz */
-			W_REG(&regs->tsf_clk_frac_l, 0x5341);
-			W_REG(&regs->tsf_clk_frac_h, 0x8);
+			bcma_write16(core, D11REGOFFS(tsf_clk_frac_l), 0x5341);
+			bcma_write16(core, D11REGOFFS(tsf_clk_frac_h), 0x8);
 		} else {	/* 120Mhz */
-			W_REG(&regs->tsf_clk_frac_l, 0x8889);
-			W_REG(&regs->tsf_clk_frac_h, 0x8);
+			bcma_write16(core, D11REGOFFS(tsf_clk_frac_l), 0x8889);
+			bcma_write16(core, D11REGOFFS(tsf_clk_frac_h), 0x8);
 		}
 	} else if (BRCMS_ISLCNPHY(wlc_hw->band)) {
 		if (spurmode == WL_SPURAVOID_ON1) {	/* 82Mhz */
-			W_REG(&regs->tsf_clk_frac_l, 0x7CE0);
-			W_REG(&regs->tsf_clk_frac_h, 0xC);
+			bcma_write16(core, D11REGOFFS(tsf_clk_frac_l), 0x7CE0);
+			bcma_write16(core, D11REGOFFS(tsf_clk_frac_h), 0xC);
 		} else {	/* 80Mhz */
-			W_REG(&regs->tsf_clk_frac_l, 0xCCCD);
-			W_REG(&regs->tsf_clk_frac_h, 0xC);
+			bcma_write16(core, D11REGOFFS(tsf_clk_frac_l), 0xCCCD);
+			bcma_write16(core, D11REGOFFS(tsf_clk_frac_h), 0xC);
 		}
 	}
 }
@@ -2215,11 +2161,8 @@
 static void brcms_c_gpio_init(struct brcms_c_info *wlc)
 {
 	struct brcms_hardware *wlc_hw = wlc->hw;
-	struct d11regs __iomem *regs;
 	u32 gc, gm;
 
-	regs = wlc_hw->regs;
-
 	/* use GPIO select 0 to get all gpio signals from the gpio out reg */
 	brcms_b_mctrl(wlc_hw, MCTL_GPOUT_SEL_MASK, 0);
 
@@ -2250,10 +2193,10 @@
 		 * The board itself is powered by these GPIOs
 		 * (when not sending pattern) so set them high
 		 */
-		OR_REG(&regs->psm_gpio_oe,
-		       (BOARD_GPIO_12 | BOARD_GPIO_13));
-		OR_REG(&regs->psm_gpio_out,
-		       (BOARD_GPIO_12 | BOARD_GPIO_13));
+		bcma_set16(wlc_hw->d11core, D11REGOFFS(psm_gpio_oe),
+			   (BOARD_GPIO_12 | BOARD_GPIO_13));
+		bcma_set16(wlc_hw->d11core, D11REGOFFS(psm_gpio_out),
+			   (BOARD_GPIO_12 | BOARD_GPIO_13));
 
 		/* Enable antenna diversity, use 2x4 mode */
 		brcms_b_mhf(wlc_hw, MHF3, MHF3_ANTSEL_EN,
@@ -2280,7 +2223,7 @@
 static void brcms_ucode_write(struct brcms_hardware *wlc_hw,
 			      const __le32 ucode[], const size_t nbytes)
 {
-	struct d11regs __iomem *regs = wlc_hw->regs;
+	struct bcma_device *core = wlc_hw->d11core;
 	uint i;
 	uint count;
 
@@ -2288,10 +2231,11 @@
 
 	count = (nbytes / sizeof(u32));
 
-	W_REG(&regs->objaddr, (OBJADDR_AUTO_INC | OBJADDR_UCM_SEL));
-	(void)R_REG(&regs->objaddr);
+	bcma_write32(core, D11REGOFFS(objaddr),
+		     OBJADDR_AUTO_INC | OBJADDR_UCM_SEL);
+	(void)bcma_read32(core, D11REGOFFS(objaddr));
 	for (i = 0; i < count; i++)
-		W_REG(&regs->objdata, le32_to_cpu(ucode[i]));
+		bcma_write32(core, D11REGOFFS(objdata), le32_to_cpu(ucode[i]));
 
 }
 
@@ -2352,19 +2296,12 @@
 	wlc_phy_antsel_type_set(wlc_hw->band->pi, antsel_type);
 }
 
-static void brcms_c_fatal_error(struct brcms_c_info *wlc)
-{
-	wiphy_err(wlc->wiphy, "wl%d: fatal error, reinitializing\n",
-		  wlc->pub->unit);
-	brcms_init(wlc->wl);
-}
-
 static void brcms_b_fifoerrors(struct brcms_hardware *wlc_hw)
 {
 	bool fatal = false;
 	uint unit;
 	uint intstatus, idx;
-	struct d11regs __iomem *regs = wlc_hw->regs;
+	struct bcma_device *core = wlc_hw->d11core;
 	struct wiphy *wiphy = wlc_hw->wlc->wiphy;
 
 	unit = wlc_hw->unit;
@@ -2372,7 +2309,9 @@
 	for (idx = 0; idx < NFIFO; idx++) {
 		/* read intstatus register and ignore any non-error bits */
 		intstatus =
-		    R_REG(&regs->intctrlregs[idx].intstatus) & I_ERRORS;
+			bcma_read32(core,
+				    D11REGOFFS(intctrlregs[idx].intstatus)) &
+			I_ERRORS;
 		if (!intstatus)
 			continue;
 
@@ -2414,11 +2353,12 @@
 		}
 
 		if (fatal) {
-			brcms_c_fatal_error(wlc_hw->wlc);	/* big hammer */
+			brcms_fatal_error(wlc_hw->wlc->wl); /* big hammer */
 			break;
 		} else
-			W_REG(&regs->intctrlregs[idx].intstatus,
-			      intstatus);
+			bcma_write32(core,
+				     D11REGOFFS(intctrlregs[idx].intstatus),
+				     intstatus);
 	}
 }
 
@@ -2426,28 +2366,7 @@
 {
 	struct brcms_hardware *wlc_hw = wlc->hw;
 	wlc->macintmask = wlc->defmacintmask;
-	W_REG(&wlc_hw->regs->macintmask, wlc->macintmask);
-}
-
-/*
- * callback for siutils.c, which has only wlc handler, no wl they both check
- * up, not only because there is no need to off/restore d11 interrupt but also
- * because per-port code may require sync with valid interrupt.
- */
-static u32 brcms_c_wlintrsoff(struct brcms_c_info *wlc)
-{
-	if (!wlc->hw->up)
-		return 0;
-
-	return brcms_intrsoff(wlc->wl);
-}
-
-static void brcms_c_wlintrsrestore(struct brcms_c_info *wlc, u32 macintmask)
-{
-	if (!wlc->hw->up)
-		return;
-
-	brcms_intrsrestore(wlc->wl, macintmask);
+	bcma_write32(wlc_hw->d11core, D11REGOFFS(macintmask), wlc->macintmask);
 }
 
 u32 brcms_c_intrsoff(struct brcms_c_info *wlc)
@@ -2460,8 +2379,8 @@
 
 	macintmask = wlc->macintmask;	/* isr can still happen */
 
-	W_REG(&wlc_hw->regs->macintmask, 0);
-	(void)R_REG(&wlc_hw->regs->macintmask);	/* sync readback */
+	bcma_write32(wlc_hw->d11core, D11REGOFFS(macintmask), 0);
+	(void)bcma_read32(wlc_hw->d11core, D11REGOFFS(macintmask));
 	udelay(1);		/* ensure int line is no longer driven */
 	wlc->macintmask = 0;
 
@@ -2476,9 +2395,10 @@
 		return;
 
 	wlc->macintmask = macintmask;
-	W_REG(&wlc_hw->regs->macintmask, wlc->macintmask);
+	bcma_write32(wlc_hw->d11core, D11REGOFFS(macintmask), wlc->macintmask);
 }
 
+/* assumes that the d11 MAC is enabled */
 static void brcms_b_tx_fifo_suspend(struct brcms_hardware *wlc_hw,
 				    uint tx_fifo)
 {
@@ -2535,11 +2455,12 @@
 	}
 }
 
-static void brcms_b_mute(struct brcms_hardware *wlc_hw, bool on, u32 flags)
+/* precondition: requires the mac core to be enabled */
+static void brcms_b_mute(struct brcms_hardware *wlc_hw, bool mute_tx)
 {
 	static const u8 null_ether_addr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
 
-	if (on) {
+	if (mute_tx) {
 		/* suspend tx fifos */
 		brcms_b_tx_fifo_suspend(wlc_hw, TX_DATA_FIFO);
 		brcms_b_tx_fifo_suspend(wlc_hw, TX_CTL_FIFO);
@@ -2561,14 +2482,20 @@
 				       wlc_hw->etheraddr);
 	}
 
-	wlc_phy_mute_upd(wlc_hw->band->pi, on, flags);
+	wlc_phy_mute_upd(wlc_hw->band->pi, mute_tx, 0);
 
-	if (on)
+	if (mute_tx)
 		brcms_c_ucode_mute_override_set(wlc_hw);
 	else
 		brcms_c_ucode_mute_override_clear(wlc_hw);
 }
 
+void
+brcms_c_mute(struct brcms_c_info *wlc, bool mute_tx)
+{
+	brcms_b_mute(wlc->hw, mute_tx);
+}
+
 /*
  * Read and clear macintmask and macintstatus and intstatus registers.
  * This routine should be called with interrupts off
@@ -2580,11 +2507,11 @@
 static inline u32 wlc_intstatus(struct brcms_c_info *wlc, bool in_isr)
 {
 	struct brcms_hardware *wlc_hw = wlc->hw;
-	struct d11regs __iomem *regs = wlc_hw->regs;
+	struct bcma_device *core = wlc_hw->d11core;
 	u32 macintstatus;
 
 	/* macintstatus includes a DMA interrupt summary bit */
-	macintstatus = R_REG(&regs->macintstatus);
+	macintstatus = bcma_read32(core, D11REGOFFS(macintstatus));
 
 	BCMMSG(wlc->wiphy, "wl%d: macintstatus: 0x%x\n", wlc_hw->unit,
 		 macintstatus);
@@ -2611,12 +2538,12 @@
 	 * consequences
 	 */
 	/* turn off the interrupts */
-	W_REG(&regs->macintmask, 0);
-	(void)R_REG(&regs->macintmask);	/* sync readback */
+	bcma_write32(core, D11REGOFFS(macintmask), 0);
+	(void)bcma_read32(core, D11REGOFFS(macintmask));
 	wlc->macintmask = 0;
 
 	/* clear device interrupts */
-	W_REG(&regs->macintstatus, macintstatus);
+	bcma_write32(core, D11REGOFFS(macintstatus), macintstatus);
 
 	/* MI_DMAINT is indication of non-zero intstatus */
 	if (macintstatus & MI_DMAINT)
@@ -2625,8 +2552,8 @@
 		 * RX_FIFO. If MI_DMAINT is set, assume it
 		 * is set and clear the interrupt.
 		 */
-		W_REG(&regs->intctrlregs[RX_FIFO].intstatus,
-		      DEF_RXINTMASK);
+		bcma_write32(core, D11REGOFFS(intctrlregs[RX_FIFO].intstatus),
+			     DEF_RXINTMASK);
 
 	return macintstatus;
 }
@@ -2689,7 +2616,7 @@
 void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc)
 {
 	struct brcms_hardware *wlc_hw = wlc->hw;
-	struct d11regs __iomem *regs = wlc_hw->regs;
+	struct bcma_device *core = wlc_hw->d11core;
 	u32 mc, mi;
 	struct wiphy *wiphy = wlc->wiphy;
 
@@ -2706,7 +2633,7 @@
 	/* force the core awake */
 	brcms_c_ucode_wake_override_set(wlc_hw, BRCMS_WAKE_OVERRIDE_MACSUSPEND);
 
-	mc = R_REG(&regs->maccontrol);
+	mc = bcma_read32(core, D11REGOFFS(maccontrol));
 
 	if (mc == 0xffffffff) {
 		wiphy_err(wiphy, "wl%d: %s: dead chip\n", wlc_hw->unit,
@@ -2718,7 +2645,7 @@
 	WARN_ON(!(mc & MCTL_PSM_RUN));
 	WARN_ON(!(mc & MCTL_EN_MAC));
 
-	mi = R_REG(&regs->macintstatus);
+	mi = bcma_read32(core, D11REGOFFS(macintstatus));
 	if (mi == 0xffffffff) {
 		wiphy_err(wiphy, "wl%d: %s: dead chip\n", wlc_hw->unit,
 			  __func__);
@@ -2729,21 +2656,21 @@
 
 	brcms_b_mctrl(wlc_hw, MCTL_EN_MAC, 0);
 
-	SPINWAIT(!(R_REG(&regs->macintstatus) & MI_MACSSPNDD),
+	SPINWAIT(!(bcma_read32(core, D11REGOFFS(macintstatus)) & MI_MACSSPNDD),
 		 BRCMS_MAX_MAC_SUSPEND);
 
-	if (!(R_REG(&regs->macintstatus) & MI_MACSSPNDD)) {
+	if (!(bcma_read32(core, D11REGOFFS(macintstatus)) & MI_MACSSPNDD)) {
 		wiphy_err(wiphy, "wl%d: wlc_suspend_mac_and_wait: waited %d uS"
 			  " and MI_MACSSPNDD is still not on.\n",
 			  wlc_hw->unit, BRCMS_MAX_MAC_SUSPEND);
 		wiphy_err(wiphy, "wl%d: psmdebug 0x%08x, phydebug 0x%08x, "
 			  "psm_brc 0x%04x\n", wlc_hw->unit,
-			  R_REG(&regs->psmdebug),
-			  R_REG(&regs->phydebug),
-			  R_REG(&regs->psm_brc));
+			  bcma_read32(core, D11REGOFFS(psmdebug)),
+			  bcma_read32(core, D11REGOFFS(phydebug)),
+			  bcma_read16(core, D11REGOFFS(psm_brc)));
 	}
 
-	mc = R_REG(&regs->maccontrol);
+	mc = bcma_read32(core, D11REGOFFS(maccontrol));
 	if (mc == 0xffffffff) {
 		wiphy_err(wiphy, "wl%d: %s: dead chip\n", wlc_hw->unit,
 			  __func__);
@@ -2758,7 +2685,7 @@
 void brcms_c_enable_mac(struct brcms_c_info *wlc)
 {
 	struct brcms_hardware *wlc_hw = wlc->hw;
-	struct d11regs __iomem *regs = wlc_hw->regs;
+	struct bcma_device *core = wlc_hw->d11core;
 	u32 mc, mi;
 
 	BCMMSG(wlc->wiphy, "wl%d: bandunit %d\n", wlc_hw->unit,
@@ -2771,20 +2698,20 @@
 	if (wlc_hw->mac_suspend_depth > 0)
 		return;
 
-	mc = R_REG(&regs->maccontrol);
+	mc = bcma_read32(core, D11REGOFFS(maccontrol));
 	WARN_ON(mc & MCTL_PSM_JMP_0);
 	WARN_ON(mc & MCTL_EN_MAC);
 	WARN_ON(!(mc & MCTL_PSM_RUN));
 
 	brcms_b_mctrl(wlc_hw, MCTL_EN_MAC, MCTL_EN_MAC);
-	W_REG(&regs->macintstatus, MI_MACSSPNDD);
+	bcma_write32(core, D11REGOFFS(macintstatus), MI_MACSSPNDD);
 
-	mc = R_REG(&regs->maccontrol);
+	mc = bcma_read32(core, D11REGOFFS(maccontrol));
 	WARN_ON(mc & MCTL_PSM_JMP_0);
 	WARN_ON(!(mc & MCTL_EN_MAC));
 	WARN_ON(!(mc & MCTL_PSM_RUN));
 
-	mi = R_REG(&regs->macintstatus);
+	mi = bcma_read32(core, D11REGOFFS(macintstatus));
 	WARN_ON(mi & MI_MACSSPNDD);
 
 	brcms_c_ucode_wake_override_clear(wlc_hw,
@@ -2801,55 +2728,53 @@
 
 static bool brcms_b_validate_chip_access(struct brcms_hardware *wlc_hw)
 {
-	struct d11regs __iomem *regs;
+	struct bcma_device *core = wlc_hw->d11core;
 	u32 w, val;
 	struct wiphy *wiphy = wlc_hw->wlc->wiphy;
 
 	BCMMSG(wiphy, "wl%d\n", wlc_hw->unit);
 
-	regs = wlc_hw->regs;
-
 	/* Validate dchip register access */
 
-	W_REG(&regs->objaddr, OBJADDR_SHM_SEL | 0);
-	(void)R_REG(&regs->objaddr);
-	w = R_REG(&regs->objdata);
+	bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0);
+	(void)bcma_read32(core, D11REGOFFS(objaddr));
+	w = bcma_read32(core, D11REGOFFS(objdata));
 
 	/* Can we write and read back a 32bit register? */
-	W_REG(&regs->objaddr, OBJADDR_SHM_SEL | 0);
-	(void)R_REG(&regs->objaddr);
-	W_REG(&regs->objdata, (u32) 0xaa5555aa);
+	bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0);
+	(void)bcma_read32(core, D11REGOFFS(objaddr));
+	bcma_write32(core, D11REGOFFS(objdata), (u32) 0xaa5555aa);
 
-	W_REG(&regs->objaddr, OBJADDR_SHM_SEL | 0);
-	(void)R_REG(&regs->objaddr);
-	val = R_REG(&regs->objdata);
+	bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0);
+	(void)bcma_read32(core, D11REGOFFS(objaddr));
+	val = bcma_read32(core, D11REGOFFS(objdata));
 	if (val != (u32) 0xaa5555aa) {
 		wiphy_err(wiphy, "wl%d: validate_chip_access: SHM = 0x%x, "
 			  "expected 0xaa5555aa\n", wlc_hw->unit, val);
 		return false;
 	}
 
-	W_REG(&regs->objaddr, OBJADDR_SHM_SEL | 0);
-	(void)R_REG(&regs->objaddr);
-	W_REG(&regs->objdata, (u32) 0x55aaaa55);
+	bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0);
+	(void)bcma_read32(core, D11REGOFFS(objaddr));
+	bcma_write32(core, D11REGOFFS(objdata), (u32) 0x55aaaa55);
 
-	W_REG(&regs->objaddr, OBJADDR_SHM_SEL | 0);
-	(void)R_REG(&regs->objaddr);
-	val = R_REG(&regs->objdata);
+	bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0);
+	(void)bcma_read32(core, D11REGOFFS(objaddr));
+	val = bcma_read32(core, D11REGOFFS(objdata));
 	if (val != (u32) 0x55aaaa55) {
 		wiphy_err(wiphy, "wl%d: validate_chip_access: SHM = 0x%x, "
 			  "expected 0x55aaaa55\n", wlc_hw->unit, val);
 		return false;
 	}
 
-	W_REG(&regs->objaddr, OBJADDR_SHM_SEL | 0);
-	(void)R_REG(&regs->objaddr);
-	W_REG(&regs->objdata, w);
+	bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0);
+	(void)bcma_read32(core, D11REGOFFS(objaddr));
+	bcma_write32(core, D11REGOFFS(objdata), w);
 
 	/* clear CFPStart */
-	W_REG(&regs->tsf_cfpstart, 0);
+	bcma_write32(core, D11REGOFFS(tsf_cfpstart), 0);
 
-	w = R_REG(&regs->maccontrol);
+	w = bcma_read32(core, D11REGOFFS(maccontrol));
 	if ((w != (MCTL_IHR_EN | MCTL_WAKE)) &&
 	    (w != (MCTL_IHR_EN | MCTL_GMODE | MCTL_WAKE))) {
 		wiphy_err(wiphy, "wl%d: validate_chip_access: maccontrol = "
@@ -2866,38 +2791,38 @@
 
 void brcms_b_core_phypll_ctl(struct brcms_hardware *wlc_hw, bool on)
 {
-	struct d11regs __iomem *regs;
+	struct bcma_device *core = wlc_hw->d11core;
 	u32 tmp;
 
 	BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
 
 	tmp = 0;
-	regs = wlc_hw->regs;
 
 	if (on) {
-		if ((wlc_hw->sih->chip == BCM4313_CHIP_ID)) {
-			OR_REG(&regs->clk_ctl_st,
-			       (CCS_ERSRC_REQ_HT | CCS_ERSRC_REQ_D11PLL |
-				CCS_ERSRC_REQ_PHYPLL));
-			SPINWAIT((R_REG(&regs->clk_ctl_st) &
-				  (CCS_ERSRC_AVAIL_HT)) != (CCS_ERSRC_AVAIL_HT),
+		if ((ai_get_chip_id(wlc_hw->sih) == BCM4313_CHIP_ID)) {
+			bcma_set32(core, D11REGOFFS(clk_ctl_st),
+				   CCS_ERSRC_REQ_HT |
+				   CCS_ERSRC_REQ_D11PLL |
+				   CCS_ERSRC_REQ_PHYPLL);
+			SPINWAIT((bcma_read32(core, D11REGOFFS(clk_ctl_st)) &
+				  CCS_ERSRC_AVAIL_HT) != CCS_ERSRC_AVAIL_HT,
 				 PHYPLL_WAIT_US);
 
-			tmp = R_REG(&regs->clk_ctl_st);
-			if ((tmp & (CCS_ERSRC_AVAIL_HT)) !=
-			    (CCS_ERSRC_AVAIL_HT))
+			tmp = bcma_read32(core, D11REGOFFS(clk_ctl_st));
+			if ((tmp & CCS_ERSRC_AVAIL_HT) != CCS_ERSRC_AVAIL_HT)
 				wiphy_err(wlc_hw->wlc->wiphy, "%s: turn on PHY"
 					  " PLL failed\n", __func__);
 		} else {
-			OR_REG(&regs->clk_ctl_st,
-			       (CCS_ERSRC_REQ_D11PLL | CCS_ERSRC_REQ_PHYPLL));
-			SPINWAIT((R_REG(&regs->clk_ctl_st) &
+			bcma_set32(core, D11REGOFFS(clk_ctl_st),
+				   tmp | CCS_ERSRC_REQ_D11PLL |
+				   CCS_ERSRC_REQ_PHYPLL);
+			SPINWAIT((bcma_read32(core, D11REGOFFS(clk_ctl_st)) &
 				  (CCS_ERSRC_AVAIL_D11PLL |
 				   CCS_ERSRC_AVAIL_PHYPLL)) !=
 				 (CCS_ERSRC_AVAIL_D11PLL |
 				  CCS_ERSRC_AVAIL_PHYPLL), PHYPLL_WAIT_US);
 
-			tmp = R_REG(&regs->clk_ctl_st);
+			tmp = bcma_read32(core, D11REGOFFS(clk_ctl_st));
 			if ((tmp &
 			     (CCS_ERSRC_AVAIL_D11PLL | CCS_ERSRC_AVAIL_PHYPLL))
 			    !=
@@ -2911,8 +2836,9 @@
 		 * be requesting it; so we'll deassert the request but
 		 * not wait for status to comply.
 		 */
-		AND_REG(&regs->clk_ctl_st, ~CCS_ERSRC_REQ_PHYPLL);
-		tmp = R_REG(&regs->clk_ctl_st);
+		bcma_mask32(core, D11REGOFFS(clk_ctl_st),
+			    ~CCS_ERSRC_REQ_PHYPLL);
+		(void)bcma_read32(core, D11REGOFFS(clk_ctl_st));
 	}
 }
 
@@ -2940,7 +2866,7 @@
 	brcms_b_core_phypll_ctl(wlc_hw, false);
 
 	wlc_hw->clk = false;
-	ai_core_disable(wlc_hw->sih, 0);
+	bcma_core_disable(wlc_hw->d11core, 0);
 	wlc_phy_hw_clk_state_upd(wlc_hw->band->pi, false);
 }
 
@@ -2964,35 +2890,31 @@
 static u16
 brcms_b_read_objmem(struct brcms_hardware *wlc_hw, uint offset, u32 sel)
 {
-	struct d11regs __iomem *regs = wlc_hw->regs;
-	u16 __iomem *objdata_lo = (u16 __iomem *)&regs->objdata;
-	u16 __iomem *objdata_hi = objdata_lo + 1;
-	u16 v;
+	struct bcma_device *core = wlc_hw->d11core;
+	u16 objoff = D11REGOFFS(objdata);
 
-	W_REG(&regs->objaddr, sel | (offset >> 2));
-	(void)R_REG(&regs->objaddr);
+	bcma_write32(core, D11REGOFFS(objaddr), sel | (offset >> 2));
+	(void)bcma_read32(core, D11REGOFFS(objaddr));
 	if (offset & 2)
-		v = R_REG(objdata_hi);
-	else
-		v = R_REG(objdata_lo);
+		objoff += 2;
 
-	return v;
+	return bcma_read16(core, objoff);
+;
 }
 
 static void
 brcms_b_write_objmem(struct brcms_hardware *wlc_hw, uint offset, u16 v,
 		     u32 sel)
 {
-	struct d11regs __iomem *regs = wlc_hw->regs;
-	u16 __iomem *objdata_lo = (u16 __iomem *)&regs->objdata;
-	u16 __iomem *objdata_hi = objdata_lo + 1;
+	struct bcma_device *core = wlc_hw->d11core;
+	u16 objoff = D11REGOFFS(objdata);
 
-	W_REG(&regs->objaddr, sel | (offset >> 2));
-	(void)R_REG(&regs->objaddr);
+	bcma_write32(core, D11REGOFFS(objaddr), sel | (offset >> 2));
+	(void)bcma_read32(core, D11REGOFFS(objaddr));
 	if (offset & 2)
-		W_REG(objdata_hi, v);
-	else
-		W_REG(objdata_lo, v);
+		objoff += 2;
+
+	bcma_write16(core, objoff, v);
 }
 
 /*
@@ -3078,14 +3000,14 @@
 
 	/* write retry limit to SCR, shouldn't need to suspend */
 	if (wlc_hw->up) {
-		W_REG(&wlc_hw->regs->objaddr,
-		      OBJADDR_SCR_SEL | S_DOT11_SRC_LMT);
-		(void)R_REG(&wlc_hw->regs->objaddr);
-		W_REG(&wlc_hw->regs->objdata, wlc_hw->SRL);
-		W_REG(&wlc_hw->regs->objaddr,
-		      OBJADDR_SCR_SEL | S_DOT11_LRC_LMT);
-		(void)R_REG(&wlc_hw->regs->objaddr);
-		W_REG(&wlc_hw->regs->objdata, wlc_hw->LRL);
+		bcma_write32(wlc_hw->d11core, D11REGOFFS(objaddr),
+			     OBJADDR_SCR_SEL | S_DOT11_SRC_LMT);
+		(void)bcma_read32(wlc_hw->d11core, D11REGOFFS(objaddr));
+		bcma_write32(wlc_hw->d11core, D11REGOFFS(objdata), wlc_hw->SRL);
+		bcma_write32(wlc_hw->d11core, D11REGOFFS(objaddr),
+			     OBJADDR_SCR_SEL | S_DOT11_LRC_LMT);
+		(void)bcma_read32(wlc_hw->d11core, D11REGOFFS(objaddr));
+		bcma_write32(wlc_hw->d11core, D11REGOFFS(objdata), wlc_hw->LRL);
 	}
 }
 
@@ -3132,7 +3054,7 @@
 		return false;
 
 	/* disallow PS when one of these meets when not scanning */
-	if (wlc->monitor)
+	if (wlc->filter_flags & FIF_PROMISC_IN_BSS)
 		return false;
 
 	if (cfg->associated) {
@@ -3267,9 +3189,9 @@
 static void brcms_b_coreinit(struct brcms_c_info *wlc)
 {
 	struct brcms_hardware *wlc_hw = wlc->hw;
-	struct d11regs __iomem *regs;
+	struct bcma_device *core = wlc_hw->d11core;
 	u32 sflags;
-	uint bcnint_us;
+	u32 bcnint_us;
 	uint i = 0;
 	bool fifosz_fixup = false;
 	int err = 0;
@@ -3277,8 +3199,6 @@
 	struct wiphy *wiphy = wlc->wiphy;
 	struct brcms_ucode *ucode = &wlc_hw->wlc->wl->ucode;
 
-	regs = wlc_hw->regs;
-
 	BCMMSG(wlc->wiphy, "wl%d\n", wlc_hw->unit);
 
 	/* reset PSM */
@@ -3291,20 +3211,20 @@
 	fifosz_fixup = true;
 
 	/* let the PSM run to the suspended state, set mode to BSS STA */
-	W_REG(&regs->macintstatus, -1);
+	bcma_write32(core, D11REGOFFS(macintstatus), -1);
 	brcms_b_mctrl(wlc_hw, ~0,
 		       (MCTL_IHR_EN | MCTL_INFRA | MCTL_PSM_RUN | MCTL_WAKE));
 
 	/* wait for ucode to self-suspend after auto-init */
-	SPINWAIT(((R_REG(&regs->macintstatus) & MI_MACSSPNDD) == 0),
-		 1000 * 1000);
-	if ((R_REG(&regs->macintstatus) & MI_MACSSPNDD) == 0)
+	SPINWAIT(((bcma_read32(core, D11REGOFFS(macintstatus)) &
+		   MI_MACSSPNDD) == 0), 1000 * 1000);
+	if ((bcma_read32(core, D11REGOFFS(macintstatus)) & MI_MACSSPNDD) == 0)
 		wiphy_err(wiphy, "wl%d: wlc_coreinit: ucode did not self-"
 			  "suspend!\n", wlc_hw->unit);
 
 	brcms_c_gpio_init(wlc);
 
-	sflags = ai_core_sflags(wlc_hw->sih, 0, 0);
+	sflags = bcma_aread32(core, BCMA_IOST);
 
 	if (D11REV_IS(wlc_hw->corerev, 23)) {
 		if (BRCMS_ISNPHY(wlc_hw->band))
@@ -3368,7 +3288,7 @@
 			  wlc_hw->xmtfifo_sz[i], i);
 
 	/* make sure we can still talk to the mac */
-	WARN_ON(R_REG(&regs->maccontrol) == 0xffffffff);
+	WARN_ON(bcma_read32(core, D11REGOFFS(maccontrol)) == 0xffffffff);
 
 	/* band-specific inits done by wlc_bsinit() */
 
@@ -3377,7 +3297,7 @@
 	brcms_b_write_shm(wlc_hw, M_MAX_ANTCNT, ANTCNT);
 
 	/* enable one rx interrupt per received frame */
-	W_REG(&regs->intrcvlazy[0], (1 << IRL_FC_SHIFT));
+	bcma_write32(core, D11REGOFFS(intrcvlazy[0]), (1 << IRL_FC_SHIFT));
 
 	/* set the station mode (BSS STA) */
 	brcms_b_mctrl(wlc_hw,
@@ -3386,19 +3306,21 @@
 
 	/* set up Beacon interval */
 	bcnint_us = 0x8000 << 10;
-	W_REG(&regs->tsf_cfprep, (bcnint_us << CFPREP_CBI_SHIFT));
-	W_REG(&regs->tsf_cfpstart, bcnint_us);
-	W_REG(&regs->macintstatus, MI_GP1);
+	bcma_write32(core, D11REGOFFS(tsf_cfprep),
+		     (bcnint_us << CFPREP_CBI_SHIFT));
+	bcma_write32(core, D11REGOFFS(tsf_cfpstart), bcnint_us);
+	bcma_write32(core, D11REGOFFS(macintstatus), MI_GP1);
 
 	/* write interrupt mask */
-	W_REG(&regs->intctrlregs[RX_FIFO].intmask, DEF_RXINTMASK);
+	bcma_write32(core, D11REGOFFS(intctrlregs[RX_FIFO].intmask),
+		     DEF_RXINTMASK);
 
 	/* allow the MAC to control the PHY clock (dynamic on/off) */
 	brcms_b_macphyclk_set(wlc_hw, ON);
 
 	/* program dynamic clock control fast powerup delay register */
 	wlc->fastpwrup_dly = ai_clkctl_fast_pwrup_delay(wlc_hw->sih);
-	W_REG(&regs->scc_fastpwrup_dly, wlc->fastpwrup_dly);
+	bcma_write16(core, D11REGOFFS(scc_fastpwrup_dly), wlc->fastpwrup_dly);
 
 	/* tell the ucode the corerev */
 	brcms_b_write_shm(wlc_hw, M_MACHW_VER, (u16) wlc_hw->corerev);
@@ -3411,19 +3333,21 @@
 				      machwcap >> 16) & 0xffff));
 
 	/* write retry limits to SCR, this done after PSM init */
-	W_REG(&regs->objaddr, OBJADDR_SCR_SEL | S_DOT11_SRC_LMT);
-	(void)R_REG(&regs->objaddr);
-	W_REG(&regs->objdata, wlc_hw->SRL);
-	W_REG(&regs->objaddr, OBJADDR_SCR_SEL | S_DOT11_LRC_LMT);
-	(void)R_REG(&regs->objaddr);
-	W_REG(&regs->objdata, wlc_hw->LRL);
+	bcma_write32(core, D11REGOFFS(objaddr),
+		     OBJADDR_SCR_SEL | S_DOT11_SRC_LMT);
+	(void)bcma_read32(core, D11REGOFFS(objaddr));
+	bcma_write32(core, D11REGOFFS(objdata), wlc_hw->SRL);
+	bcma_write32(core, D11REGOFFS(objaddr),
+		     OBJADDR_SCR_SEL | S_DOT11_LRC_LMT);
+	(void)bcma_read32(core, D11REGOFFS(objaddr));
+	bcma_write32(core, D11REGOFFS(objdata), wlc_hw->LRL);
 
 	/* write rate fallback retry limits */
 	brcms_b_write_shm(wlc_hw, M_SFRMTXCNTFBRTHSD, wlc_hw->SFBL);
 	brcms_b_write_shm(wlc_hw, M_LFRMTXCNTFBRTHSD, wlc_hw->LFBL);
 
-	AND_REG(&regs->ifs_ctl, 0x0FFF);
-	W_REG(&regs->ifs_aifsn, EDCF_AIFSN_MIN);
+	bcma_mask16(core, D11REGOFFS(ifs_ctl), 0x0FFF);
+	bcma_write16(core, D11REGOFFS(ifs_aifsn), EDCF_AIFSN_MIN);
 
 	/* init the tx dma engines */
 	for (i = 0; i < NFIFO; i++) {
@@ -3437,8 +3361,7 @@
 }
 
 void
-static brcms_b_init(struct brcms_hardware *wlc_hw, u16 chanspec,
-			  bool mute) {
+static brcms_b_init(struct brcms_hardware *wlc_hw, u16 chanspec) {
 	u32 macintmask;
 	bool fastclk;
 	struct brcms_c_info *wlc = wlc_hw->wlc;
@@ -3463,10 +3386,6 @@
 	/* core-specific initialization */
 	brcms_b_coreinit(wlc);
 
-	/* suspend the tx fifos and mute the phy for preism cac time */
-	if (mute)
-		brcms_b_mute(wlc_hw, ON, PHY_MUTE_FOR_PREISM);
-
 	/* band-specific inits */
 	brcms_b_bsinit(wlc, chanspec);
 
@@ -3656,42 +3575,32 @@
 	brcms_c_set_phy_chanspec(wlc, chanspec);
 }
 
-static void brcms_c_mac_bcn_promisc(struct brcms_c_info *wlc)
-{
-	if (wlc->bcnmisc_monitor)
-		brcms_b_mctrl(wlc->hw, MCTL_BCNS_PROMISC, MCTL_BCNS_PROMISC);
-	else
-		brcms_b_mctrl(wlc->hw, MCTL_BCNS_PROMISC, 0);
-}
-
-void brcms_c_mac_bcn_promisc_change(struct brcms_c_info *wlc, bool promisc)
-{
-	wlc->bcnmisc_monitor = promisc;
-	brcms_c_mac_bcn_promisc(wlc);
-}
-
-/* set or clear maccontrol bits MCTL_PROMISC and MCTL_KEEPCONTROL */
-static void brcms_c_mac_promisc(struct brcms_c_info *wlc)
+/*
+ * Set or clear filtering related maccontrol bits based on
+ * specified filter flags
+ */
+void brcms_c_mac_promisc(struct brcms_c_info *wlc, uint filter_flags)
 {
 	u32 promisc_bits = 0;
 
-	/*
-	 * promiscuous mode just sets MCTL_PROMISC
-	 * Note: APs get all BSS traffic without the need to set
-	 * the MCTL_PROMISC bit since all BSS data traffic is
-	 * directed at the AP
-	 */
-	if (wlc->pub->promisc)
+	wlc->filter_flags = filter_flags;
+
+	if (filter_flags & (FIF_PROMISC_IN_BSS | FIF_OTHER_BSS))
 		promisc_bits |= MCTL_PROMISC;
 
-	/* monitor mode needs both MCTL_PROMISC and MCTL_KEEPCONTROL
-	 * Note: monitor mode also needs MCTL_BCNS_PROMISC, but that is
-	 * handled in brcms_c_mac_bcn_promisc()
-	 */
-	if (wlc->monitor)
-		promisc_bits |= MCTL_PROMISC | MCTL_KEEPCONTROL;
+	if (filter_flags & FIF_BCN_PRBRESP_PROMISC)
+		promisc_bits |= MCTL_BCNS_PROMISC;
 
-	brcms_b_mctrl(wlc->hw, MCTL_PROMISC | MCTL_KEEPCONTROL, promisc_bits);
+	if (filter_flags & FIF_FCSFAIL)
+		promisc_bits |= MCTL_KEEPBADFCS;
+
+	if (filter_flags & (FIF_CONTROL | FIF_PSPOLL))
+		promisc_bits |= MCTL_KEEPCONTROL;
+
+	brcms_b_mctrl(wlc->hw,
+		MCTL_PROMISC | MCTL_BCNS_PROMISC |
+		MCTL_KEEPCONTROL | MCTL_KEEPBADFCS,
+		promisc_bits);
 }
 
 /*
@@ -3721,10 +3630,6 @@
 	} else {
 		/* disable an active IBSS if we are not on the home channel */
 	}
-
-	/* update the various promisc bits */
-	brcms_c_mac_bcn_promisc(wlc);
-	brcms_c_mac_promisc(wlc);
 }
 
 static void brcms_c_write_rate_shm(struct brcms_c_info *wlc, u8 rate,
@@ -3899,7 +3804,7 @@
 
 	BCMMSG(wlc->wiphy, "wl%d: hps %d\n", wlc->pub->unit, hps);
 
-	v1 = R_REG(&wlc->regs->maccontrol);
+	v1 = bcma_read32(wlc->hw->d11core, D11REGOFFS(maccontrol));
 	v2 = MCTL_WAKE;
 	if (hps)
 		v2 |= MCTL_HPS;
@@ -3979,7 +3884,7 @@
 
 void
 brcms_b_set_chanspec(struct brcms_hardware *wlc_hw, u16 chanspec,
-		      bool mute, struct txpwr_limits *txpwr)
+		      bool mute_tx, struct txpwr_limits *txpwr)
 {
 	uint bandunit;
 
@@ -4005,7 +3910,7 @@
 		}
 	}
 
-	wlc_phy_initcal_enable(wlc_hw->band->pi, !mute);
+	wlc_phy_initcal_enable(wlc_hw->band->pi, !mute_tx);
 
 	if (!wlc_hw->up) {
 		if (wlc_hw->clk)
@@ -4017,7 +3922,7 @@
 		wlc_phy_txpower_limit_set(wlc_hw->band->pi, txpwr, chanspec);
 
 		/* Update muting of the channel */
-		brcms_b_mute(wlc_hw, mute, 0);
+		brcms_b_mute(wlc_hw, mute_tx);
 	}
 }
 
@@ -4205,7 +4110,7 @@
 	    EDCF_TXOP2USEC(acp_shm.txop);
 	acp_shm.aifs = (params->aifs & EDCF_AIFSN_MASK);
 
-	if (aci == AC_VI && acp_shm.txop == 0
+	if (aci == IEEE80211_AC_VI && acp_shm.txop == 0
 	    && acp_shm.aifs < EDCF_AIFSN_MAX)
 		acp_shm.aifs++;
 
@@ -4218,7 +4123,8 @@
 		acp_shm.cwmax = params->cw_max;
 		acp_shm.cwcur = acp_shm.cwmin;
 		acp_shm.bslots =
-		    R_REG(&wlc->regs->tsf_random) & acp_shm.cwcur;
+			bcma_read16(wlc->hw->d11core, D11REGOFFS(tsf_random)) &
+			acp_shm.cwcur;
 		acp_shm.reggap = acp_shm.bslots + acp_shm.aifs;
 		/* Indicate the new params to the ucode */
 		acp_shm.status = brcms_b_read_shm(wlc->hw, (M_EDCF_QINFO +
@@ -4242,7 +4148,7 @@
 	}
 }
 
-void brcms_c_edcf_setparams(struct brcms_c_info *wlc, bool suspend)
+static void brcms_c_edcf_setparams(struct brcms_c_info *wlc, bool suspend)
 {
 	u16 aci;
 	int i_ac;
@@ -4255,7 +4161,7 @@
 	}; /* ucode needs these parameters during its initialization */
 	const struct edcf_acparam *edcf_acp = &default_edcf_acparams[0];
 
-	for (i_ac = 0; i_ac < AC_COUNT; i_ac++, edcf_acp++) {
+	for (i_ac = 0; i_ac < IEEE80211_NUM_ACS; i_ac++, edcf_acp++) {
 		/* find out which ac this set of params applies to */
 		aci = (edcf_acp->ACI & EDCF_ACI_MASK) >> EDCF_ACI_SHIFT;
 
@@ -4277,17 +4183,6 @@
 	}
 }
 
-/* maintain LED behavior in down state */
-static void brcms_c_down_led_upd(struct brcms_c_info *wlc)
-{
-	/*
-	 * maintain LEDs while in down state, turn on sbclk if
-	 * not available yet. Turn on sbclk if necessary
-	 */
-	brcms_b_pllreq(wlc->hw, true, BRCMS_PLLREQ_FLIP);
-	brcms_b_pllreq(wlc->hw, false, BRCMS_PLLREQ_FLIP);
-}
-
 static void brcms_c_radio_monitor_start(struct brcms_c_info *wlc)
 {
 	/* Don't start the timer if HWRADIO feature is disabled */
@@ -4299,28 +4194,6 @@
 	brcms_add_timer(wlc->radio_timer, TIMER_INTERVAL_RADIOCHK, true);
 }
 
-static void brcms_c_radio_disable(struct brcms_c_info *wlc)
-{
-	if (!wlc->pub->up) {
-		brcms_c_down_led_upd(wlc);
-		return;
-	}
-
-	brcms_c_radio_monitor_start(wlc);
-	brcms_down(wlc->wl);
-}
-
-static void brcms_c_radio_enable(struct brcms_c_info *wlc)
-{
-	if (wlc->pub->up)
-		return;
-
-	if (brcms_deviceremoved(wlc))
-		return;
-
-	brcms_up(wlc->wl);
-}
-
 static bool brcms_c_radio_monitor_stop(struct brcms_c_info *wlc)
 {
 	if (!wlc->radio_monitor)
@@ -4343,18 +4216,6 @@
 		mboolclr(wlc->pub->radio_disabled, WL_RADIO_HW_DISABLE);
 }
 
-/*
- * centralized radio disable/enable function,
- * invoke radio enable/disable after updating hwradio status
- */
-static void brcms_c_radio_upd(struct brcms_c_info *wlc)
-{
-	if (wlc->pub->radio_disabled)
-		brcms_c_radio_disable(wlc);
-	else
-		brcms_c_radio_enable(wlc);
-}
-
 /* update hwradio status and return it */
 bool brcms_c_check_radio_disabled(struct brcms_c_info *wlc)
 {
@@ -4376,12 +4237,7 @@
 		return;
 	}
 
-	/* cap mpc off count */
-	if (wlc->mpc_offcnt < BRCMS_MPC_MAX_DELAYCNT)
-		wlc->mpc_offcnt++;
-
 	brcms_c_radio_hwdisable_upd(wlc);
-	brcms_c_radio_upd(wlc);
 }
 
 /* common low-level watchdog code */
@@ -4407,60 +4263,6 @@
 	wlc_phy_watchdog(wlc_hw->band->pi);
 }
 
-static void brcms_c_radio_mpc_upd(struct brcms_c_info *wlc)
-{
-	bool mpc_radio, radio_state;
-
-	/*
-	 * Clear the WL_RADIO_MPC_DISABLE bit when mpc feature is disabled
-	 * in case the WL_RADIO_MPC_DISABLE bit was set. Stop the radio
-	 * monitor also when WL_RADIO_MPC_DISABLE is the only reason that
-	 * the radio is going down.
-	 */
-	if (!wlc->mpc) {
-		if (!wlc->pub->radio_disabled)
-			return;
-		mboolclr(wlc->pub->radio_disabled, WL_RADIO_MPC_DISABLE);
-		brcms_c_radio_upd(wlc);
-		if (!wlc->pub->radio_disabled)
-			brcms_c_radio_monitor_stop(wlc);
-		return;
-	}
-
-	/*
-	 * sync ismpc logic with WL_RADIO_MPC_DISABLE bit in
-	 * wlc->pub->radio_disabled to go ON, always call radio_upd
-	 * synchronously to go OFF, postpone radio_upd to later when
-	 * context is safe(e.g. watchdog)
-	 */
-	radio_state =
-	    (mboolisset(wlc->pub->radio_disabled, WL_RADIO_MPC_DISABLE) ? OFF :
-	     ON);
-	mpc_radio = (brcms_c_ismpc(wlc) == true) ? OFF : ON;
-
-	if (radio_state == ON && mpc_radio == OFF)
-		wlc->mpc_delay_off = wlc->mpc_dlycnt;
-	else if (radio_state == OFF && mpc_radio == ON) {
-		mboolclr(wlc->pub->radio_disabled, WL_RADIO_MPC_DISABLE);
-		brcms_c_radio_upd(wlc);
-		if (wlc->mpc_offcnt < BRCMS_MPC_THRESHOLD)
-			wlc->mpc_dlycnt = BRCMS_MPC_MAX_DELAYCNT;
-		else
-			wlc->mpc_dlycnt = BRCMS_MPC_MIN_DELAYCNT;
-	}
-	/*
-	 * Below logic is meant to capture the transition from mpc off
-	 * to mpc on for reasons other than wlc->mpc_delay_off keeping
-	 * the mpc off. In that case reset wlc->mpc_delay_off to
-	 * wlc->mpc_dlycnt, so that we restart the countdown of mpc_delay_off
-	 */
-	if ((wlc->prev_non_delay_mpc == false) &&
-	    (brcms_c_is_non_delay_mpc(wlc) == true) && wlc->mpc_delay_off)
-		wlc->mpc_delay_off = wlc->mpc_dlycnt;
-
-	wlc->prev_non_delay_mpc = brcms_c_is_non_delay_mpc(wlc);
-}
-
 /* common watchdog code */
 static void brcms_c_watchdog(void *arg)
 {
@@ -4481,21 +4283,7 @@
 	/* increment second count */
 	wlc->pub->now++;
 
-	/* delay radio disable */
-	if (wlc->mpc_delay_off) {
-		if (--wlc->mpc_delay_off == 0) {
-			mboolset(wlc->pub->radio_disabled,
-				 WL_RADIO_MPC_DISABLE);
-			if (wlc->mpc && brcms_c_ismpc(wlc))
-				wlc->mpc_offcnt = 0;
-		}
-	}
-
-	/* mpc sync */
-	brcms_c_radio_mpc_upd(wlc);
-	/* radio sync: sw/hw/mpc --> radio_disable/radio_enable */
 	brcms_c_radio_hwdisable_upd(wlc);
-	brcms_c_radio_upd(wlc);
 	/* if radio is disable, driver may be down, quit here */
 	if (wlc->pub->radio_disabled)
 		return;
@@ -4599,9 +4387,6 @@
 	/* WME QoS mode is Auto by default */
 	wlc->pub->_ampdu = AMPDU_AGG_HOST;
 	wlc->pub->bcmerror = 0;
-
-	/* initialize mpc delay */
-	wlc->mpc_delay_off = wlc->mpc_dlycnt = BRCMS_MPC_MIN_DELAYCNT;
 }
 
 static uint brcms_c_attach_module(struct brcms_c_info *wlc)
@@ -4647,21 +4432,21 @@
  *    initialize software state for each core and band
  *    put the whole chip in reset(driver down state), no clock
  */
-static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device,
-			  uint unit, bool piomode, void __iomem *regsva,
-			  struct pci_dev *btparam)
+static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core,
+			  uint unit, bool piomode)
 {
 	struct brcms_hardware *wlc_hw;
-	struct d11regs __iomem *regs;
 	char *macaddr = NULL;
 	uint err = 0;
 	uint j;
 	bool wme = false;
 	struct shared_phy_params sha_params;
 	struct wiphy *wiphy = wlc->wiphy;
+	struct pci_dev *pcidev = core->bus->host_pci;
 
-	BCMMSG(wlc->wiphy, "wl%d: vendor 0x%x device 0x%x\n", unit, vendor,
-		device);
+	BCMMSG(wlc->wiphy, "wl%d: vendor 0x%x device 0x%x\n", unit,
+	       pcidev->vendor,
+	       pcidev->device);
 
 	wme = true;
 
@@ -4678,7 +4463,7 @@
 	 * Do the hardware portion of the attach. Also initialize software
 	 * state that depends on the particular hardware we are running.
 	 */
-	wlc_hw->sih = ai_attach(regsva, btparam);
+	wlc_hw->sih = ai_attach(core->bus);
 	if (wlc_hw->sih == NULL) {
 		wiphy_err(wiphy, "wl%d: brcms_b_attach: si_attach failed\n",
 			  unit);
@@ -4687,25 +4472,19 @@
 	}
 
 	/* verify again the device is supported */
-	if (!brcms_c_chipmatch(vendor, device)) {
+	if (!brcms_c_chipmatch(pcidev->vendor, pcidev->device)) {
 		wiphy_err(wiphy, "wl%d: brcms_b_attach: Unsupported "
 			"vendor/device (0x%x/0x%x)\n",
-			 unit, vendor, device);
+			 unit, pcidev->vendor, pcidev->device);
 		err = 12;
 		goto fail;
 	}
 
-	wlc_hw->vendorid = vendor;
-	wlc_hw->deviceid = device;
+	wlc_hw->vendorid = pcidev->vendor;
+	wlc_hw->deviceid = pcidev->device;
 
-	/* set bar0 window to point at D11 core */
-	wlc_hw->regs = (struct d11regs __iomem *)
-				ai_setcore(wlc_hw->sih, D11_CORE_ID, 0);
-	wlc_hw->corerev = ai_corerev(wlc_hw->sih);
-
-	regs = wlc_hw->regs;
-
-	wlc->regs = wlc_hw->regs;
+	wlc_hw->d11core = core;
+	wlc_hw->corerev = core->id.rev;
 
 	/* validate chip, chiprev and corerev */
 	if (!brcms_c_isgoodchip(wlc_hw)) {
@@ -4740,8 +4519,9 @@
 	wlc_hw->boardrev = (u16) j;
 	if (!brcms_c_validboardtype(wlc_hw)) {
 		wiphy_err(wiphy, "wl%d: brcms_b_attach: Unsupported Broadcom "
-			"board type (0x%x)" " or revision level (0x%x)\n",
-			 unit, wlc_hw->sih->boardtype, wlc_hw->boardrev);
+			  "board type (0x%x)" " or revision level (0x%x)\n",
+			  unit, ai_get_boardtype(wlc_hw->sih),
+			  wlc_hw->boardrev);
 		err = 15;
 		goto fail;
 	}
@@ -4762,7 +4542,7 @@
 	else
 		wlc_hw->_nbands = 1;
 
-	if ((wlc_hw->sih->chip == BCM43225_CHIP_ID))
+	if ((ai_get_chip_id(wlc_hw->sih) == BCM43225_CHIP_ID))
 		wlc_hw->_nbands = 1;
 
 	/* BMAC_NOTE: remove init of pub values when brcms_c_attach()
@@ -4794,16 +4574,14 @@
 	sha_params.corerev = wlc_hw->corerev;
 	sha_params.vid = wlc_hw->vendorid;
 	sha_params.did = wlc_hw->deviceid;
-	sha_params.chip = wlc_hw->sih->chip;
-	sha_params.chiprev = wlc_hw->sih->chiprev;
-	sha_params.chippkg = wlc_hw->sih->chippkg;
+	sha_params.chip = ai_get_chip_id(wlc_hw->sih);
+	sha_params.chiprev = ai_get_chiprev(wlc_hw->sih);
+	sha_params.chippkg = ai_get_chippkg(wlc_hw->sih);
 	sha_params.sromrev = wlc_hw->sromrev;
-	sha_params.boardtype = wlc_hw->sih->boardtype;
+	sha_params.boardtype = ai_get_boardtype(wlc_hw->sih);
 	sha_params.boardrev = wlc_hw->boardrev;
-	sha_params.boardvendor = wlc_hw->sih->boardvendor;
 	sha_params.boardflags = wlc_hw->boardflags;
 	sha_params.boardflags2 = wlc_hw->boardflags2;
-	sha_params.buscorerev = wlc_hw->sih->buscorerev;
 
 	/* alloc and save pointer to shared phy state area */
 	wlc_hw->phy_sh = wlc_phy_shared_attach(&sha_params);
@@ -4825,9 +4603,9 @@
 		wlc_hw->band->bandtype = j ? BRCM_BAND_5G : BRCM_BAND_2G;
 		wlc->band->bandunit = j;
 		wlc->band->bandtype = j ? BRCM_BAND_5G : BRCM_BAND_2G;
-		wlc->core->coreidx = ai_coreidx(wlc_hw->sih);
+		wlc->core->coreidx = core->core_index;
 
-		wlc_hw->machwcap = R_REG(&regs->machwcap);
+		wlc_hw->machwcap = bcma_read32(core, D11REGOFFS(machwcap));
 		wlc_hw->machwcap_backup = wlc_hw->machwcap;
 
 		/* init tx fifo size */
@@ -4836,7 +4614,7 @@
 
 		/* Get a phy for this band */
 		wlc_hw->band->pi =
-			wlc_phy_attach(wlc_hw->phy_sh, regs,
+			wlc_phy_attach(wlc_hw->phy_sh, core,
 				       wlc_hw->band->bandtype,
 				       wlc->wiphy);
 		if (wlc_hw->band->pi == NULL) {
@@ -4910,10 +4688,6 @@
 	/* Match driver "down" state */
 	ai_pci_down(wlc_hw->sih);
 
-	/* register sb interrupt callback functions */
-	ai_register_intr_callback(wlc_hw->sih, (void *)brcms_c_wlintrsoff,
-				  (void *)brcms_c_wlintrsrestore, NULL, wlc);
-
 	/* turn off pll and xtal to match driver "down" state */
 	brcms_b_xtal(wlc_hw, OFF);
 
@@ -4944,10 +4718,9 @@
 		goto fail;
 	}
 
-	BCMMSG(wlc->wiphy,
-		 "deviceid 0x%x nbands %d board 0x%x macaddr: %s\n",
-		 wlc_hw->deviceid, wlc_hw->_nbands,
-		 wlc_hw->sih->boardtype, macaddr);
+	BCMMSG(wlc->wiphy, "deviceid 0x%x nbands %d board 0x%x macaddr: %s\n",
+	       wlc_hw->deviceid, wlc_hw->_nbands, ai_get_boardtype(wlc_hw->sih),
+	       macaddr);
 
 	return err;
 
@@ -5185,7 +4958,6 @@
 		 * and per-port interrupt object may has been freed. this must
 		 * be done before sb core switch
 		 */
-		ai_deregister_intr_callback(wlc_hw->sih);
 		ai_pci_sleep(wlc_hw->sih);
 	}
 
@@ -5259,9 +5031,6 @@
 {
 	/* STA-BSS; short capable */
 	wlc->PLCPHdr_override = BRCMS_PLCP_SHORT;
-
-	/* fixup mpc */
-	wlc->mpc = true;
 }
 
 /* Initialize just the hardware when coming out of POR or S3/S5 system states */
@@ -5283,13 +5052,11 @@
 	ai_pci_fixcfg(wlc_hw->sih);
 
 	/*
+	 * TODO: test suspend/resume
+	 *
 	 * AI chip doesn't restore bar0win2 on
 	 * hibernation/resume, need sw fixup
 	 */
-	if ((wlc_hw->sih->chip == BCM43224_CHIP_ID) ||
-	    (wlc_hw->sih->chip == BCM43225_CHIP_ID))
-		wlc_hw->regs = (struct d11regs __iomem *)
-				ai_setcore(wlc_hw->sih, D11_CORE_ID, 0);
 
 	/*
 	 * Inform phy that a POR reset has occurred so
@@ -5301,7 +5068,7 @@
 	wlc_hw->wlc->pub->hw_up = true;
 
 	if ((wlc_hw->boardflags & BFL_FEM)
-	    && (wlc_hw->sih->chip == BCM4313_CHIP_ID)) {
+	    && (ai_get_chip_id(wlc_hw->sih) == BCM4313_CHIP_ID)) {
 		if (!
 		    (wlc_hw->boardrev >= 0x1250
 		     && (wlc_hw->boardflags & BFL_FEM_BT)))
@@ -5376,7 +5143,7 @@
 	if (!wlc->clk)
 		return;
 
-	for (ac = 0; ac < AC_COUNT; ac++)
+	for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
 		brcms_b_write_shm(wlc->hw, M_AC_TXLMT_ADDR(ac),
 				  wlc->wme_retries[ac]);
 }
@@ -5396,7 +5163,7 @@
 	}
 
 	if ((wlc->pub->boardflags & BFL_FEM)
-	    && (wlc->pub->sih->chip == BCM4313_CHIP_ID)) {
+	    && (ai_get_chip_id(wlc->hw->sih) == BCM4313_CHIP_ID)) {
 		if (wlc->pub->boardrev >= 0x1250
 		    && (wlc->pub->boardflags & BFL_FEM_BT))
 			brcms_b_mhf(wlc->hw, MHF5, MHF5_4313_GPIOCTRL,
@@ -5533,9 +5300,9 @@
 	} else {
 
 		/* Reset and disable the core */
-		if (ai_iscoreup(wlc_hw->sih)) {
-			if (R_REG(&wlc_hw->regs->maccontrol) &
-			    MCTL_EN_MAC)
+		if (bcma_core_is_enabled(wlc_hw->d11core)) {
+			if (bcma_read32(wlc_hw->d11core,
+					D11REGOFFS(maccontrol)) & MCTL_EN_MAC)
 				brcms_c_suspend_mac_and_wait(wlc_hw->wlc);
 			callbacks += brcms_reset(wlc_hw->wlc->wl);
 			brcms_c_coredisable(wlc_hw);
@@ -5575,7 +5342,6 @@
 	if (!wlc->pub->up)
 		return callbacks;
 
-	/* in between, mpc could try to bring down again.. */
 	wlc->going_down = true;
 
 	callbacks += brcms_b_bmac_down_prep(wlc->hw);
@@ -5852,7 +5618,7 @@
 
 	brcms_b_retrylimit_upd(wlc->hw, wlc->SRL, wlc->LRL);
 
-	for (ac = 0; ac < AC_COUNT; ac++) {
+	for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
 		wlc->wme_retries[ac] =	SFIELD(wlc->wme_retries[ac],
 					       EDCF_SHORT,  wlc->SRL);
 		wlc->wme_retries[ac] =	SFIELD(wlc->wme_retries[ac],
@@ -6103,7 +5869,6 @@
 
 	u8 *rtsph = txh->RTSPhyHeader;
 	struct ieee80211_rts rts = txh->rts_frame;
-	char hexbuf[256];
 
 	/* add plcp header along with txh descriptor */
 	printk(KERN_DEBUG "Raw TxDesc + plcp header:\n");
@@ -6124,17 +5889,16 @@
 	printk(KERN_DEBUG "XtraFrameTypes: %04x ", xtraft);
 	printk(KERN_DEBUG "\n");
 
-	brcmu_format_hex(hexbuf, iv, sizeof(txh->IV));
-	printk(KERN_DEBUG "SecIV:       %s\n", hexbuf);
-	brcmu_format_hex(hexbuf, ra, sizeof(txh->TxFrameRA));
-	printk(KERN_DEBUG "RA:          %s\n", hexbuf);
+	print_hex_dump_bytes("SecIV:", DUMP_PREFIX_OFFSET, iv, sizeof(txh->IV));
+	print_hex_dump_bytes("RA:", DUMP_PREFIX_OFFSET,
+			     ra, sizeof(txh->TxFrameRA));
 
 	printk(KERN_DEBUG "Fb FES Time: %04x ", tfestfb);
-	brcmu_format_hex(hexbuf, rtspfb, sizeof(txh->RTSPLCPFallback));
-	printk(KERN_DEBUG "RTS PLCP: %s ", hexbuf);
+	print_hex_dump_bytes("Fb RTS PLCP:", DUMP_PREFIX_OFFSET,
+			     rtspfb, sizeof(txh->RTSPLCPFallback));
 	printk(KERN_DEBUG "RTS DUR: %04x ", rtsdfb);
-	brcmu_format_hex(hexbuf, fragpfb, sizeof(txh->FragPLCPFallback));
-	printk(KERN_DEBUG "PLCP: %s ", hexbuf);
+	print_hex_dump_bytes("PLCP:", DUMP_PREFIX_OFFSET,
+			     fragpfb, sizeof(txh->FragPLCPFallback));
 	printk(KERN_DEBUG "DUR: %04x", fragdfb);
 	printk(KERN_DEBUG "\n");
 
@@ -6149,18 +5913,18 @@
 	printk(KERN_DEBUG "MaxAggbyte_fb:  %04x\n", mabyte_f);
 	printk(KERN_DEBUG "MinByte:     %04x\n", mmbyte);
 
-	brcmu_format_hex(hexbuf, rtsph, sizeof(txh->RTSPhyHeader));
-	printk(KERN_DEBUG "RTS PLCP: %s ", hexbuf);
-	brcmu_format_hex(hexbuf, (u8 *) &rts, sizeof(txh->rts_frame));
-	printk(KERN_DEBUG "RTS Frame: %s", hexbuf);
+	print_hex_dump_bytes("RTS PLCP:", DUMP_PREFIX_OFFSET,
+			     rtsph, sizeof(txh->RTSPhyHeader));
+	print_hex_dump_bytes("RTS Frame:", DUMP_PREFIX_OFFSET,
+			     (u8 *)&rts, sizeof(txh->rts_frame));
 	printk(KERN_DEBUG "\n");
 }
 #endif				/* defined(BCMDBG) */
 
 #if defined(BCMDBG)
-int
+static int
 brcms_c_format_flags(const struct brcms_c_bit_desc *bd, u32 flags, char *buf,
-		   int len)
+		     int len)
 {
 	int i;
 	char *p = buf;
@@ -6916,7 +6680,7 @@
 	qos = ieee80211_is_data_qos(h->frame_control);
 
 	/* compute length of frame in bytes for use in PLCP computations */
-	len = brcmu_pkttotlen(p);
+	len = p->len;
 	phylen = len + FCS_LEN;
 
 	/* Get tx_info */
@@ -7695,11 +7459,11 @@
 brcms_b_read_tsf(struct brcms_hardware *wlc_hw, u32 *tsf_l_ptr,
 		  u32 *tsf_h_ptr)
 {
-	struct d11regs __iomem *regs = wlc_hw->regs;
+	struct bcma_device *core = wlc_hw->d11core;
 
 	/* read the tsf timer low, then high to get an atomic read */
-	*tsf_l_ptr = R_REG(&regs->tsf_timerlow);
-	*tsf_h_ptr = R_REG(&regs->tsf_timerhigh);
+	*tsf_l_ptr = bcma_read32(core, D11REGOFFS(tsf_timerlow));
+	*tsf_h_ptr = bcma_read32(core, D11REGOFFS(tsf_timerhigh));
 }
 
 /*
@@ -8253,12 +8017,6 @@
 	return (int)(qdbm / BRCMS_TXPWR_DB_FACTOR);
 }
 
-void brcms_c_set_radio_mpc(struct brcms_c_info *wlc, bool mpc)
-{
-	wlc->mpc = mpc;
-	brcms_c_radio_mpc_upd(wlc);
-}
-
 /* Process received frames */
 /*
  * Return true if more frames need to be processed. false otherwise.
@@ -8293,14 +8051,8 @@
 	len = p->len;
 
 	if (rxh->RxStatus1 & RXS_FCSERR) {
-		if (wlc->pub->mac80211_state & MAC80211_PROMISC_BCNS) {
-			wiphy_err(wlc->wiphy, "FCSERR while scanning******* -"
-				  " tossing\n");
+		if (!(wlc->filter_flags & FIF_FCSFAIL))
 			goto toss;
-		} else {
-			wiphy_err(wlc->wiphy, "RCSERR!!!\n");
-			goto toss;
-		}
 	}
 
 	/* check received pkt has at least frame control field */
@@ -8328,21 +8080,17 @@
 brcms_b_recv(struct brcms_hardware *wlc_hw, uint fifo, bool bound)
 {
 	struct sk_buff *p;
-	struct sk_buff *head = NULL;
-	struct sk_buff *tail = NULL;
+	struct sk_buff *next = NULL;
+	struct sk_buff_head recv_frames;
+
 	uint n = 0;
 	uint bound_limit = bound ? RXBND : -1;
 
 	BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
-	/* gather received frames */
-	while ((p = dma_rx(wlc_hw->di[fifo]))) {
+	skb_queue_head_init(&recv_frames);
 
-		if (!tail)
-			head = tail = p;
-		else {
-			tail->prev = p;
-			tail = p;
-		}
+	/* gather received frames */
+	while (dma_rx(wlc_hw->di[fifo], &recv_frames)) {
 
 		/* !give others some time to run! */
 		if (++n >= bound_limit)
@@ -8353,12 +8101,11 @@
 	dma_rxfill(wlc_hw->di[fifo]);
 
 	/* process each frame */
-	while ((p = head) != NULL) {
+	skb_queue_walk_safe(&recv_frames, p, next) {
 		struct d11rxhdr_le *rxh_le;
 		struct d11rxhdr *rxh;
-		head = head->prev;
-		p->prev = NULL;
 
+		skb_unlink(p, &recv_frames);
 		rxh_le = (struct d11rxhdr_le *)p->data;
 		rxh = (struct d11rxhdr *)p->data;
 
@@ -8389,7 +8136,7 @@
 {
 	u32 macintstatus;
 	struct brcms_hardware *wlc_hw = wlc->hw;
-	struct d11regs __iomem *regs = wlc_hw->regs;
+	struct bcma_device *core = wlc_hw->d11core;
 	struct wiphy *wiphy = wlc->wiphy;
 
 	if (brcms_deviceremoved(wlc)) {
@@ -8425,7 +8172,7 @@
 	/* ATIM window end */
 	if (macintstatus & MI_ATIMWINEND) {
 		BCMMSG(wlc->wiphy, "end of ATIM window\n");
-		OR_REG(&regs->maccommand, wlc->qvalid);
+		bcma_set32(core, D11REGOFFS(maccommand), wlc->qvalid);
 		wlc->qvalid = 0;
 	}
 
@@ -8443,18 +8190,17 @@
 
 	if (macintstatus & MI_GP0) {
 		wiphy_err(wiphy, "wl%d: PSM microcode watchdog fired at %d "
-			"(seconds). Resetting.\n", wlc_hw->unit, wlc_hw->now);
+			  "(seconds). Resetting.\n", wlc_hw->unit, wlc_hw->now);
 
 		printk_once("%s : PSM Watchdog, chipid 0x%x, chiprev 0x%x\n",
-					__func__, wlc_hw->sih->chip,
-					wlc_hw->sih->chiprev);
-		/* big hammer */
-		brcms_init(wlc->wl);
+			    __func__, ai_get_chip_id(wlc_hw->sih),
+			    ai_get_chiprev(wlc_hw->sih));
+		brcms_fatal_error(wlc_hw->wlc->wl);
 	}
 
 	/* gptimer timeout */
 	if (macintstatus & MI_TO)
-		W_REG(&regs->gptimer, 0);
+		bcma_write32(core, D11REGOFFS(gptimer), 0);
 
 	if (macintstatus & MI_RFDISABLE) {
 		BCMMSG(wlc->wiphy, "wl%d: BMAC Detected a change on the"
@@ -8470,20 +8216,17 @@
 	return wlc->macintstatus != 0;
 
  fatal:
-	brcms_init(wlc->wl);
+	brcms_fatal_error(wlc_hw->wlc->wl);
 	return wlc->macintstatus != 0;
 }
 
-void brcms_c_init(struct brcms_c_info *wlc)
+void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx)
 {
-	struct d11regs __iomem *regs;
+	struct bcma_device *core = wlc->hw->d11core;
 	u16 chanspec;
-	bool mute = false;
 
 	BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
 
-	regs = wlc->regs;
-
 	/*
 	 * This will happen if a big-hammer was executed. In
 	 * that case, we want to go back to the channel that
@@ -8494,7 +8237,7 @@
 	else
 		chanspec = brcms_c_init_chanspec(wlc);
 
-	brcms_b_init(wlc->hw, chanspec, mute);
+	brcms_b_init(wlc->hw, chanspec);
 
 	/* update beacon listen interval */
 	brcms_c_bcn_li_upd(wlc);
@@ -8513,8 +8256,8 @@
 		 * update since init path would reset
 		 * to default value
 		 */
-		W_REG(&regs->tsf_cfprep,
-		      (bi << CFPREP_CBI_SHIFT));
+		bcma_write32(core, D11REGOFFS(tsf_cfprep),
+			     bi << CFPREP_CBI_SHIFT);
 
 		/* Update maccontrol PM related bits */
 		brcms_c_set_ps_ctrl(wlc);
@@ -8544,7 +8287,7 @@
 	brcms_c_bsinit(wlc);
 
 	/* Enable EDCF mode (while the MAC is suspended) */
-	OR_REG(&regs->ifs_ctl, IFS_USEEDCF);
+	bcma_set16(core, D11REGOFFS(ifs_ctl), IFS_USEEDCF);
 	brcms_c_edcf_setparams(wlc, false);
 
 	/* Init precedence maps for empty FIFOs */
@@ -8560,14 +8303,15 @@
 	/* ..now really unleash hell (allow the MAC out of suspend) */
 	brcms_c_enable_mac(wlc);
 
+	/* suspend the tx fifos and mute the phy for preism cac time */
+	if (mute_tx)
+		brcms_b_mute(wlc->hw, true);
+
 	/* clear tx flow control */
 	brcms_c_txflowcontrol_reset(wlc);
 
 	/* enable the RF Disable Delay timer */
-	W_REG(&wlc->regs->rfdisabledly, RFDISABLE_DEFAULT);
-
-	/* initialize mpc delay */
-	wlc->mpc_delay_off = wlc->mpc_dlycnt = BRCMS_MPC_MIN_DELAYCNT;
+	bcma_write32(core, D11REGOFFS(rfdisabledly), RFDISABLE_DEFAULT);
 
 	/*
 	 * Initialize WME parameters; if they haven't been set by some other
@@ -8577,7 +8321,7 @@
 		/* Uninitialized; read from HW */
 		int ac;
 
-		for (ac = 0; ac < AC_COUNT; ac++)
+		for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
 			wlc->wme_retries[ac] =
 			    brcms_b_read_shm(wlc->hw, M_AC_TXLMT_ADDR(ac));
 	}
@@ -8587,9 +8331,8 @@
  * The common driver entry routine. Error codes should be unique
  */
 struct brcms_c_info *
-brcms_c_attach(struct brcms_info *wl, u16 vendor, u16 device, uint unit,
-	       bool piomode, void __iomem *regsva, struct pci_dev *btparam,
-	       uint *perr)
+brcms_c_attach(struct brcms_info *wl, struct bcma_device *core, uint unit,
+	       bool piomode, uint *perr)
 {
 	struct brcms_c_info *wlc;
 	uint err = 0;
@@ -8597,7 +8340,7 @@
 	struct brcms_pub *pub;
 
 	/* allocate struct brcms_c_info state and its substructures */
-	wlc = (struct brcms_c_info *) brcms_c_attach_malloc(unit, &err, device);
+	wlc = (struct brcms_c_info *) brcms_c_attach_malloc(unit, &err, 0);
 	if (wlc == NULL)
 		goto fail;
 	wlc->wiphy = wl->wiphy;
@@ -8624,8 +8367,7 @@
 	 * low level attach steps(all hw accesses go
 	 * inside, no more in rest of the attach)
 	 */
-	err = brcms_b_attach(wlc, vendor, device, unit, piomode, regsva,
-			     btparam);
+	err = brcms_b_attach(wlc, core, unit, piomode);
 	if (err)
 		goto fail;
 
@@ -8754,8 +8496,6 @@
 		brcms_c_ht_update_sgi_rx(wlc, 0);
 	}
 
-	/* initialize radio_mpc_disable according to wlc->mpc */
-	brcms_c_radio_mpc_upd(wlc);
 	brcms_b_antsel_set(wlc->hw, wlc->asi->antsel_avail);
 
 	if (perr)
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.h b/drivers/net/wireless/brcm80211/brcmsmac/main.h
index c0e0fcf..adb136e 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.h
@@ -44,8 +44,6 @@
 /* transmit buffer max headroom for protocol headers */
 #define TXOFF (D11_TXH_LEN + D11_PHY_HDR_LEN)
 
-#define AC_COUNT		4
-
 /* Macros for doing definition and get/set of bitfields
  * Usage example, e.g. a three-bit field (bits 4-6):
  *    #define <NAME>_M	BITFIELD_MASK(3)
@@ -336,7 +334,7 @@
 	u32 machwcap_backup;	/* backup of machwcap */
 
 	struct si_pub *sih;	/* SI handle (cookie for siutils calls) */
-	struct d11regs __iomem *regs;	/* pointer to device registers */
+	struct bcma_device *d11core;	/* pointer to 802.11 core */
 	struct phy_shim_info *physhim; /* phy shim layer handler */
 	struct shared_phy *phy_sh;	/* pointer to shared phy state */
 	struct brcms_hw_band *band;/* pointer to active per-band state */
@@ -402,7 +400,6 @@
  *
  * pub: pointer to driver public state.
  * wl: pointer to specific private state.
- * regs: pointer to device registers.
  * hw: HW related state.
  * clkreq_override: setting for clkreq for PCIE : Auto, 0, 1.
  * fastpwrup_dly: time in us needed to bring up d11 fast clock.
@@ -427,11 +424,6 @@
  * bandinit_pending: track band init in auto band.
  * radio_monitor: radio timer is running.
  * going_down: down path intermediate variable.
- * mpc: enable minimum power consumption.
- * mpc_dlycnt: # of watchdog cnt before turn disable radio.
- * mpc_offcnt: # of watchdog cnt that radio is disabled.
- * mpc_delay_off: delay radio disable by # of watchdog cnt.
- * prev_non_delay_mpc: prev state brcms_c_is_non_delay_mpc.
  * wdtimer: timer for watchdog routine.
  * radio_timer: timer for hw radio button monitor routine.
  * monitor: monitor (MPDU sniffing) mode.
@@ -441,7 +433,7 @@
  * bcn_li_dtim: beacon listen interval in # dtims.
  * WDarmed: watchdog timer is armed.
  * WDlast: last time wlc_watchdog() was called.
- * edcf_txop[AC_COUNT]: current txop for each ac.
+ * edcf_txop[IEEE80211_NUM_ACS]: current txop for each ac.
  * wme_retries: per-AC retry limits.
  * tx_prec_map: Precedence map based on HW FIFO space.
  * fifo2prec_map[NFIFO]: pointer to fifo2_prec map based on WME.
@@ -484,7 +476,6 @@
 struct brcms_c_info {
 	struct brcms_pub *pub;
 	struct brcms_info *wl;
-	struct d11regs __iomem *regs;
 	struct brcms_hardware *hw;
 
 	/* clock */
@@ -522,18 +513,11 @@
 	bool radio_monitor;
 	bool going_down;
 
-	bool mpc;
-	u8 mpc_dlycnt;
-	u8 mpc_offcnt;
-	u8 mpc_delay_off;
-	u8 prev_non_delay_mpc;
-
 	struct brcms_timer *wdtimer;
 	struct brcms_timer *radio_timer;
 
 	/* promiscuous */
-	bool monitor;
-	bool bcnmisc_monitor;
+	uint filter_flags;
 
 	/* driver feature */
 	bool _rifs;
@@ -546,9 +530,9 @@
 	u32 WDlast;
 
 	/* WME */
-	u16 edcf_txop[AC_COUNT];
+	u16 edcf_txop[IEEE80211_NUM_ACS];
 
-	u16 wme_retries[AC_COUNT];
+	u16 wme_retries[IEEE80211_NUM_ACS];
 	u16 tx_prec_map;
 	u16 fifo2prec_map[NFIFO];
 
@@ -671,8 +655,7 @@
 #endif
 
 extern int brcms_c_set_gmode(struct brcms_c_info *wlc, u8 gmode, bool config);
-extern void brcms_c_mac_bcn_promisc_change(struct brcms_c_info *wlc,
-					   bool promisc);
+extern void brcms_c_mac_promisc(struct brcms_c_info *wlc, uint filter_flags);
 extern void brcms_c_send_q(struct brcms_c_info *wlc);
 extern int brcms_c_prep_pdu(struct brcms_c_info *wlc, struct sk_buff *pdu,
 			    uint *fifo);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/nicpci.c b/drivers/net/wireless/brcm80211/brcmsmac/nicpci.c
index 0bcb267..7fad6dc 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/nicpci.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/nicpci.c
@@ -139,6 +139,9 @@
 #define SRSH_PI_MASK	0xf000	/* bit 15:12 */
 #define SRSH_PI_SHIFT	12	/* bit 15:12 */
 
+#define PCIREGOFFS(field)	offsetof(struct sbpciregs, field)
+#define PCIEREGOFFS(field)	offsetof(struct sbpcieregs, field)
+
 /* Sonics side: PCI core and host control registers */
 struct sbpciregs {
 	u32 control;		/* PCI control */
@@ -205,11 +208,7 @@
 };
 
 struct pcicore_info {
-	union {
-		struct sbpcieregs __iomem *pcieregs;
-		struct sbpciregs __iomem *pciregs;
-	} regs;			/* Memory mapped register to the core */
-
+	struct bcma_device *core;
 	struct si_pub *sih;	/* System interconnect handle */
 	struct pci_dev *dev;
 	u8 pciecap_lcreg_offset;/* PCIE capability LCreg offset
@@ -224,9 +223,9 @@
 };
 
 #define PCIE_ASPM(sih)							\
-	(((sih)->buscoretype == PCIE_CORE_ID) &&			\
-	 (((sih)->buscorerev >= 3) &&					\
-	  ((sih)->buscorerev <= 5)))
+	((ai_get_buscoretype(sih) == PCIE_CORE_ID) &&			\
+	 ((ai_get_buscorerev(sih) >= 3) &&				\
+	  (ai_get_buscorerev(sih) <= 5)))
 
 
 /* delay needed between the mdio control/ mdiodata register data access */
@@ -238,8 +237,7 @@
 /* Initialize the PCI core.
  * It's caller's responsibility to make sure that this is done only once
  */
-struct pcicore_info *pcicore_init(struct si_pub *sih, struct pci_dev *pdev,
-				  void __iomem *regs)
+struct pcicore_info *pcicore_init(struct si_pub *sih, struct bcma_device *core)
 {
 	struct pcicore_info *pi;
 
@@ -249,17 +247,15 @@
 		return NULL;
 
 	pi->sih = sih;
-	pi->dev = pdev;
+	pi->dev = core->bus->host_pci;
+	pi->core = core;
 
-	if (sih->buscoretype == PCIE_CORE_ID) {
+	if (core->id.id == PCIE_CORE_ID) {
 		u8 cap_ptr;
-		pi->regs.pcieregs = regs;
 		cap_ptr = pcicore_find_pci_capability(pi->dev, PCI_CAP_ID_EXP,
 						      NULL, NULL);
 		pi->pciecap_lcreg_offset = cap_ptr + PCIE_CAP_LINKCTRL_OFFSET;
-	} else
-		pi->regs.pciregs = regs;
-
+	}
 	return pi;
 }
 
@@ -334,37 +330,37 @@
 
 /* ***** Register Access API */
 static uint
-pcie_readreg(struct sbpcieregs __iomem *pcieregs, uint addrtype, uint offset)
+pcie_readreg(struct bcma_device *core, uint addrtype, uint offset)
 {
 	uint retval = 0xFFFFFFFF;
 
 	switch (addrtype) {
 	case PCIE_CONFIGREGS:
-		W_REG(&pcieregs->configaddr, offset);
-		(void)R_REG((&pcieregs->configaddr));
-		retval = R_REG(&pcieregs->configdata);
+		bcma_write32(core, PCIEREGOFFS(configaddr), offset);
+		(void)bcma_read32(core, PCIEREGOFFS(configaddr));
+		retval = bcma_read32(core, PCIEREGOFFS(configdata));
 		break;
 	case PCIE_PCIEREGS:
-		W_REG(&pcieregs->pcieindaddr, offset);
-		(void)R_REG(&pcieregs->pcieindaddr);
-		retval = R_REG(&pcieregs->pcieinddata);
+		bcma_write32(core, PCIEREGOFFS(pcieindaddr), offset);
+		(void)bcma_read32(core, PCIEREGOFFS(pcieindaddr));
+		retval = bcma_read32(core, PCIEREGOFFS(pcieinddata));
 		break;
 	}
 
 	return retval;
 }
 
-static uint pcie_writereg(struct sbpcieregs __iomem *pcieregs, uint addrtype,
+static uint pcie_writereg(struct bcma_device *core, uint addrtype,
 			  uint offset, uint val)
 {
 	switch (addrtype) {
 	case PCIE_CONFIGREGS:
-		W_REG((&pcieregs->configaddr), offset);
-		W_REG((&pcieregs->configdata), val);
+		bcma_write32(core, PCIEREGOFFS(configaddr), offset);
+		bcma_write32(core, PCIEREGOFFS(configdata), val);
 		break;
 	case PCIE_PCIEREGS:
-		W_REG((&pcieregs->pcieindaddr), offset);
-		W_REG((&pcieregs->pcieinddata), val);
+		bcma_write32(core, PCIEREGOFFS(pcieindaddr), offset);
+		bcma_write32(core, PCIEREGOFFS(pcieinddata), val);
 		break;
 	default:
 		break;
@@ -374,7 +370,6 @@
 
 static bool pcie_mdiosetblock(struct pcicore_info *pi, uint blk)
 {
-	struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs;
 	uint mdiodata, i = 0;
 	uint pcie_serdes_spinwait = 200;
 
@@ -382,12 +377,13 @@
 		    (MDIODATA_DEV_ADDR << MDIODATA_DEVADDR_SHF) |
 		    (MDIODATA_BLK_ADDR << MDIODATA_REGADDR_SHF) |
 		    (blk << 4));
-	W_REG(&pcieregs->mdiodata, mdiodata);
+	bcma_write32(pi->core, PCIEREGOFFS(mdiodata), mdiodata);
 
 	pr28829_delay();
 	/* retry till the transaction is complete */
 	while (i < pcie_serdes_spinwait) {
-		if (R_REG(&pcieregs->mdiocontrol) & MDIOCTL_ACCESS_DONE)
+		if (bcma_read32(pi->core, PCIEREGOFFS(mdiocontrol)) &
+		    MDIOCTL_ACCESS_DONE)
 			break;
 
 		udelay(1000);
@@ -404,15 +400,15 @@
 pcie_mdioop(struct pcicore_info *pi, uint physmedia, uint regaddr, bool write,
 	    uint *val)
 {
-	struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs;
 	uint mdiodata;
 	uint i = 0;
 	uint pcie_serdes_spinwait = 10;
 
 	/* enable mdio access to SERDES */
-	W_REG(&pcieregs->mdiocontrol, MDIOCTL_PREAM_EN | MDIOCTL_DIVISOR_VAL);
+	bcma_write32(pi->core, PCIEREGOFFS(mdiocontrol),
+		     MDIOCTL_PREAM_EN | MDIOCTL_DIVISOR_VAL);
 
-	if (pi->sih->buscorerev >= 10) {
+	if (ai_get_buscorerev(pi->sih) >= 10) {
 		/* new serdes is slower in rw,
 		 * using two layers of reg address mapping
 		 */
@@ -432,20 +428,22 @@
 		mdiodata |= (MDIODATA_START | MDIODATA_WRITE | MDIODATA_TA |
 			     *val);
 
-	W_REG(&pcieregs->mdiodata, mdiodata);
+	bcma_write32(pi->core, PCIEREGOFFS(mdiodata), mdiodata);
 
 	pr28829_delay();
 
 	/* retry till the transaction is complete */
 	while (i < pcie_serdes_spinwait) {
-		if (R_REG(&pcieregs->mdiocontrol) & MDIOCTL_ACCESS_DONE) {
+		if (bcma_read32(pi->core, PCIEREGOFFS(mdiocontrol)) &
+		    MDIOCTL_ACCESS_DONE) {
 			if (!write) {
 				pr28829_delay();
-				*val = (R_REG(&pcieregs->mdiodata) &
+				*val = (bcma_read32(pi->core,
+						    PCIEREGOFFS(mdiodata)) &
 					MDIODATA_MASK);
 			}
 			/* Disable mdio access to SERDES */
-			W_REG(&pcieregs->mdiocontrol, 0);
+			bcma_write32(pi->core, PCIEREGOFFS(mdiocontrol), 0);
 			return 0;
 		}
 		udelay(1000);
@@ -453,7 +451,7 @@
 	}
 
 	/* Timed out. Disable mdio access to SERDES. */
-	W_REG(&pcieregs->mdiocontrol, 0);
+	bcma_write32(pi->core, PCIEREGOFFS(mdiocontrol), 0);
 	return 1;
 }
 
@@ -502,18 +500,18 @@
 {
 	u32 w;
 	struct si_pub *sih = pi->sih;
-	struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs;
 
-	if (sih->buscoretype != PCIE_CORE_ID || sih->buscorerev < 7)
+	if (ai_get_buscoretype(sih) != PCIE_CORE_ID ||
+	    ai_get_buscorerev(sih) < 7)
 		return;
 
-	w = pcie_readreg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG);
+	w = pcie_readreg(pi->core, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG);
 	if (extend)
 		w |= PCIE_ASPMTIMER_EXTEND;
 	else
 		w &= ~PCIE_ASPMTIMER_EXTEND;
-	pcie_writereg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG, w);
-	w = pcie_readreg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG);
+	pcie_writereg(pi->core, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG, w);
+	w = pcie_readreg(pi->core, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG);
 }
 
 /* centralized clkreq control policy */
@@ -527,25 +525,27 @@
 			pcie_clkreq(pi, 1, 0);
 		break;
 	case SI_PCIDOWN:
-		if (sih->buscorerev == 6) {	/* turn on serdes PLL down */
-			ai_corereg(sih, SI_CC_IDX,
-				   offsetof(struct chipcregs, chipcontrol_addr),
-				   ~0, 0);
-			ai_corereg(sih, SI_CC_IDX,
-				   offsetof(struct chipcregs, chipcontrol_data),
-				   ~0x40, 0);
+		/* turn on serdes PLL down */
+		if (ai_get_buscorerev(sih) == 6) {
+			ai_cc_reg(sih,
+				  offsetof(struct chipcregs, chipcontrol_addr),
+				  ~0, 0);
+			ai_cc_reg(sih,
+				  offsetof(struct chipcregs, chipcontrol_data),
+				  ~0x40, 0);
 		} else if (pi->pcie_pr42767) {
 			pcie_clkreq(pi, 1, 1);
 		}
 		break;
 	case SI_PCIUP:
-		if (sih->buscorerev == 6) {	/* turn off serdes PLL down */
-			ai_corereg(sih, SI_CC_IDX,
-				   offsetof(struct chipcregs, chipcontrol_addr),
-				   ~0, 0);
-			ai_corereg(sih, SI_CC_IDX,
-				   offsetof(struct chipcregs, chipcontrol_data),
-				   ~0x40, 0x40);
+		/* turn off serdes PLL down */
+		if (ai_get_buscorerev(sih) == 6) {
+			ai_cc_reg(sih,
+				  offsetof(struct chipcregs, chipcontrol_addr),
+				  ~0, 0);
+			ai_cc_reg(sih,
+				  offsetof(struct chipcregs, chipcontrol_data),
+				  ~0x40, 0x40);
 		} else if (PCIE_ASPM(sih)) {	/* disable clkreq */
 			pcie_clkreq(pi, 1, 0);
 		}
@@ -562,7 +562,7 @@
 	if (pi->pcie_polarity != 0)
 		return;
 
-	w = pcie_readreg(pi->regs.pcieregs, PCIE_PCIEREGS, PCIE_PLP_STATUSREG);
+	w = pcie_readreg(pi->core, PCIE_PCIEREGS, PCIE_PLP_STATUSREG);
 
 	/* Detect the current polarity at attach and force that polarity and
 	 * disable changing the polarity
@@ -581,18 +581,15 @@
  */
 static void pcie_war_aspm_clkreq(struct pcicore_info *pi)
 {
-	struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs;
 	struct si_pub *sih = pi->sih;
 	u16 val16;
-	u16 __iomem *reg16;
 	u32 w;
 
 	if (!PCIE_ASPM(sih))
 		return;
 
 	/* bypass this on QT or VSIM */
-	reg16 = &pcieregs->sprom[SRSH_ASPM_OFFSET];
-	val16 = R_REG(reg16);
+	val16 = bcma_read16(pi->core, PCIEREGOFFS(sprom[SRSH_ASPM_OFFSET]));
 
 	val16 &= ~SRSH_ASPM_ENB;
 	if (pi->pcie_war_aspm_ovr == PCIE_ASPM_ENAB)
@@ -602,15 +599,15 @@
 	else if (pi->pcie_war_aspm_ovr == PCIE_ASPM_L0s_ENAB)
 		val16 |= SRSH_ASPM_L0s_ENB;
 
-	W_REG(reg16, val16);
+	bcma_write16(pi->core, PCIEREGOFFS(sprom[SRSH_ASPM_OFFSET]), val16);
 
 	pci_read_config_dword(pi->dev, pi->pciecap_lcreg_offset, &w);
 	w &= ~PCIE_ASPM_ENAB;
 	w |= pi->pcie_war_aspm_ovr;
 	pci_write_config_dword(pi->dev, pi->pciecap_lcreg_offset, w);
 
-	reg16 = &pcieregs->sprom[SRSH_CLKREQ_OFFSET_REV5];
-	val16 = R_REG(reg16);
+	val16 = bcma_read16(pi->core,
+			    PCIEREGOFFS(sprom[SRSH_CLKREQ_OFFSET_REV5]));
 
 	if (pi->pcie_war_aspm_ovr != PCIE_ASPM_DISAB) {
 		val16 |= SRSH_CLKREQ_ENB;
@@ -618,7 +615,8 @@
 	} else
 		val16 &= ~SRSH_CLKREQ_ENB;
 
-	W_REG(reg16, val16);
+	bcma_write16(pi->core, PCIEREGOFFS(sprom[SRSH_CLKREQ_OFFSET_REV5]),
+		     val16);
 }
 
 /* Apply the polarity determined at the start */
@@ -642,16 +640,15 @@
 /* Needs to happen when coming out of 'standby'/'hibernate' */
 static void pcie_misc_config_fixup(struct pcicore_info *pi)
 {
-	struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs;
 	u16 val16;
-	u16 __iomem *reg16;
 
-	reg16 = &pcieregs->sprom[SRSH_PCIE_MISC_CONFIG];
-	val16 = R_REG(reg16);
+	val16 = bcma_read16(pi->core,
+			    PCIEREGOFFS(sprom[SRSH_PCIE_MISC_CONFIG]));
 
 	if ((val16 & SRSH_L23READY_EXIT_NOPERST) == 0) {
 		val16 |= SRSH_L23READY_EXIT_NOPERST;
-		W_REG(reg16, val16);
+		bcma_write16(pi->core,
+			     PCIEREGOFFS(sprom[SRSH_PCIE_MISC_CONFIG]), val16);
 	}
 }
 
@@ -659,62 +656,57 @@
 /* Needs to happen when coming out of 'standby'/'hibernate' */
 static void pcie_war_noplldown(struct pcicore_info *pi)
 {
-	struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs;
-	u16 __iomem *reg16;
-
 	/* turn off serdes PLL down */
-	ai_corereg(pi->sih, SI_CC_IDX, offsetof(struct chipcregs, chipcontrol),
-		   CHIPCTRL_4321_PLL_DOWN, CHIPCTRL_4321_PLL_DOWN);
+	ai_cc_reg(pi->sih, offsetof(struct chipcregs, chipcontrol),
+		  CHIPCTRL_4321_PLL_DOWN, CHIPCTRL_4321_PLL_DOWN);
 
 	/* clear srom shadow backdoor */
-	reg16 = &pcieregs->sprom[SRSH_BD_OFFSET];
-	W_REG(reg16, 0);
+	bcma_write16(pi->core, PCIEREGOFFS(sprom[SRSH_BD_OFFSET]), 0);
 }
 
 /* Needs to happen when coming out of 'standby'/'hibernate' */
 static void pcie_war_pci_setup(struct pcicore_info *pi)
 {
 	struct si_pub *sih = pi->sih;
-	struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs;
 	u32 w;
 
-	if (sih->buscorerev == 0 || sih->buscorerev == 1) {
-		w = pcie_readreg(pcieregs, PCIE_PCIEREGS,
+	if (ai_get_buscorerev(sih) == 0 || ai_get_buscorerev(sih) == 1) {
+		w = pcie_readreg(pi->core, PCIE_PCIEREGS,
 				 PCIE_TLP_WORKAROUNDSREG);
 		w |= 0x8;
-		pcie_writereg(pcieregs, PCIE_PCIEREGS,
+		pcie_writereg(pi->core, PCIE_PCIEREGS,
 			      PCIE_TLP_WORKAROUNDSREG, w);
 	}
 
-	if (sih->buscorerev == 1) {
-		w = pcie_readreg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_LCREG);
+	if (ai_get_buscorerev(sih) == 1) {
+		w = pcie_readreg(pi->core, PCIE_PCIEREGS, PCIE_DLLP_LCREG);
 		w |= 0x40;
-		pcie_writereg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_LCREG, w);
+		pcie_writereg(pi->core, PCIE_PCIEREGS, PCIE_DLLP_LCREG, w);
 	}
 
-	if (sih->buscorerev == 0) {
+	if (ai_get_buscorerev(sih) == 0) {
 		pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_TIMER1, 0x8128);
 		pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_CDR, 0x0100);
 		pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_CDRBW, 0x1466);
 	} else if (PCIE_ASPM(sih)) {
 		/* Change the L1 threshold for better performance */
-		w = pcie_readreg(pcieregs, PCIE_PCIEREGS,
+		w = pcie_readreg(pi->core, PCIE_PCIEREGS,
 				 PCIE_DLLP_PMTHRESHREG);
 		w &= ~PCIE_L1THRESHOLDTIME_MASK;
 		w |= PCIE_L1THRESHOLD_WARVAL << PCIE_L1THRESHOLDTIME_SHIFT;
-		pcie_writereg(pcieregs, PCIE_PCIEREGS,
+		pcie_writereg(pi->core, PCIE_PCIEREGS,
 			      PCIE_DLLP_PMTHRESHREG, w);
 
 		pcie_war_serdes(pi);
 
 		pcie_war_aspm_clkreq(pi);
-	} else if (pi->sih->buscorerev == 7)
+	} else if (ai_get_buscorerev(pi->sih) == 7)
 		pcie_war_noplldown(pi);
 
 	/* Note that the fix is actually in the SROM,
 	 * that's why this is open-ended
 	 */
-	if (pi->sih->buscorerev >= 6)
+	if (ai_get_buscorerev(pi->sih) >= 6)
 		pcie_misc_config_fixup(pi);
 }
 
@@ -745,7 +737,7 @@
 
 void pcicore_hwup(struct pcicore_info *pi)
 {
-	if (!pi || pi->sih->buscoretype != PCIE_CORE_ID)
+	if (!pi || ai_get_buscoretype(pi->sih) != PCIE_CORE_ID)
 		return;
 
 	pcie_war_pci_setup(pi);
@@ -753,7 +745,7 @@
 
 void pcicore_up(struct pcicore_info *pi, int state)
 {
-	if (!pi || pi->sih->buscoretype != PCIE_CORE_ID)
+	if (!pi || ai_get_buscoretype(pi->sih) != PCIE_CORE_ID)
 		return;
 
 	/* Restore L1 timer for better performance */
@@ -781,7 +773,7 @@
 
 void pcicore_down(struct pcicore_info *pi, int state)
 {
-	if (!pi || pi->sih->buscoretype != PCIE_CORE_ID)
+	if (!pi || ai_get_buscoretype(pi->sih) != PCIE_CORE_ID)
 		return;
 
 	pcie_clkreq_upd(pi, state);
@@ -790,46 +782,45 @@
 	pcie_extendL1timer(pi, false);
 }
 
-/* precondition: current core is sii->buscoretype */
-static void pcicore_fixcfg(struct pcicore_info *pi, u16 __iomem *reg16)
+void pcicore_fixcfg(struct pcicore_info *pi)
 {
-	struct si_info *sii = (struct si_info *)(pi->sih);
+	struct bcma_device *core = pi->core;
 	u16 val16;
-	uint pciidx;
+	uint regoff;
 
-	pciidx = ai_coreidx(&sii->pub);
-	val16 = R_REG(reg16);
-	if (((val16 & SRSH_PI_MASK) >> SRSH_PI_SHIFT) != (u16)pciidx) {
-		val16 = (u16)(pciidx << SRSH_PI_SHIFT) |
-			(val16 & ~SRSH_PI_MASK);
-		W_REG(reg16, val16);
+	switch (pi->core->id.id) {
+	case BCMA_CORE_PCI:
+		regoff = PCIREGOFFS(sprom[SRSH_PI_OFFSET]);
+		break;
+
+	case BCMA_CORE_PCIE:
+		regoff = PCIEREGOFFS(sprom[SRSH_PI_OFFSET]);
+		break;
+
+	default:
+		return;
 	}
-}
 
-void
-pcicore_fixcfg_pci(struct pcicore_info *pi, struct sbpciregs __iomem *pciregs)
-{
-	pcicore_fixcfg(pi, &pciregs->sprom[SRSH_PI_OFFSET]);
-}
-
-void pcicore_fixcfg_pcie(struct pcicore_info *pi,
-			 struct sbpcieregs __iomem *pcieregs)
-{
-	pcicore_fixcfg(pi, &pcieregs->sprom[SRSH_PI_OFFSET]);
+	val16 = bcma_read16(pi->core, regoff);
+	if (((val16 & SRSH_PI_MASK) >> SRSH_PI_SHIFT) !=
+	    (u16)core->core_index) {
+		val16 = ((u16)core->core_index << SRSH_PI_SHIFT) |
+			(val16 & ~SRSH_PI_MASK);
+		bcma_write16(pi->core, regoff, val16);
+	}
 }
 
 /* precondition: current core is pci core */
 void
-pcicore_pci_setup(struct pcicore_info *pi, struct sbpciregs __iomem *pciregs)
+pcicore_pci_setup(struct pcicore_info *pi)
 {
-	u32 w;
+	bcma_set32(pi->core, PCIREGOFFS(sbtopci2),
+		   SBTOPCI_PREF | SBTOPCI_BURST);
 
-	OR_REG(&pciregs->sbtopci2, SBTOPCI_PREF | SBTOPCI_BURST);
-
-	if (((struct si_info *)(pi->sih))->pub.buscorerev >= 11) {
-		OR_REG(&pciregs->sbtopci2, SBTOPCI_RC_READMULTI);
-		w = R_REG(&pciregs->clkrun);
-		W_REG(&pciregs->clkrun, w | PCI_CLKRUN_DSBL);
-		w = R_REG(&pciregs->clkrun);
+	if (pi->core->id.rev >= 11) {
+		bcma_set32(pi->core, PCIREGOFFS(sbtopci2),
+			   SBTOPCI_RC_READMULTI);
+		bcma_set32(pi->core, PCIREGOFFS(clkrun), PCI_CLKRUN_DSBL);
+		(void)bcma_read32(pi->core, PCIREGOFFS(clkrun));
 	}
 }
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/nicpci.h b/drivers/net/wireless/brcm80211/brcmsmac/nicpci.h
index 58aa80d..9fc3ead 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/nicpci.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/nicpci.h
@@ -62,8 +62,7 @@
 struct sbpcieregs;
 
 extern struct pcicore_info *pcicore_init(struct si_pub *sih,
-					 struct pci_dev *pdev,
-					 void __iomem *regs);
+					 struct bcma_device *core);
 extern void pcicore_deinit(struct pcicore_info *pch);
 extern void pcicore_attach(struct pcicore_info *pch, int state);
 extern void pcicore_hwup(struct pcicore_info *pch);
@@ -72,11 +71,7 @@
 extern void pcicore_down(struct pcicore_info *pch, int state);
 extern u8 pcicore_find_pci_capability(struct pci_dev *dev, u8 req_cap_id,
 				      unsigned char *buf, u32 *buflen);
-extern void pcicore_fixcfg_pci(struct pcicore_info *pch,
-			       struct sbpciregs __iomem *pciregs);
-extern void pcicore_fixcfg_pcie(struct pcicore_info *pch,
-				struct sbpcieregs __iomem *pciregs);
-extern void pcicore_pci_setup(struct pcicore_info *pch,
-			      struct sbpciregs __iomem *pciregs);
+extern void pcicore_fixcfg(struct pcicore_info *pch);
+extern void pcicore_pci_setup(struct pcicore_info *pch);
 
 #endif /* _BRCM_NICPCI_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/otp.c b/drivers/net/wireless/brcm80211/brcmsmac/otp.c
index edf5515..f1ca126 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/otp.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/otp.c
@@ -77,7 +77,7 @@
 };
 
 struct otpinfo {
-	uint ccrev;		/* chipc revision */
+	struct bcma_device *core; /* chipc core */
 	const struct otp_fn_s *fn;	/* OTP functions */
 	struct si_pub *sih;		/* Saved sb handle */
 
@@ -133,9 +133,10 @@
 #define OTP_SZ_FU_144		(144/8)	/* 144 bits */
 
 static u16
-ipxotp_otpr(struct otpinfo *oi, struct chipcregs __iomem *cc, uint wn)
+ipxotp_otpr(struct otpinfo *oi, uint wn)
 {
-	return R_REG(&cc->sromotp[wn]);
+	return bcma_read16(oi->core,
+			   CHIPCREGOFFS(sromotp[wn]));
 }
 
 /*
@@ -146,7 +147,7 @@
 {
 	int ret = 0;
 
-	switch (sih->chip) {
+	switch (ai_get_chip_id(sih)) {
 	case BCM43224_CHIP_ID:
 	case BCM43225_CHIP_ID:
 		ret = osizew * 2 - OTP_SZ_FU_72 - OTP_SZ_CHECKSUM;
@@ -161,19 +162,21 @@
 	return ret;
 }
 
-static void _ipxotp_init(struct otpinfo *oi, struct chipcregs __iomem *cc)
+static void _ipxotp_init(struct otpinfo *oi)
 {
 	uint k;
 	u32 otpp, st;
+	int ccrev = ai_get_ccrev(oi->sih);
+
 
 	/*
 	 * record word offset of General Use Region
 	 * for various chipcommon revs
 	 */
-	if (oi->sih->ccrev == 21 || oi->sih->ccrev == 24
-	    || oi->sih->ccrev == 27) {
+	if (ccrev == 21 || ccrev == 24
+	    || ccrev == 27) {
 		oi->otpgu_base = REVA4_OTPGU_BASE;
-	} else if (oi->sih->ccrev == 36) {
+	} else if (ccrev == 36) {
 		/*
 		 * OTP size greater than equal to 2KB (128 words),
 		 * otpgu_base is similar to rev23
@@ -182,7 +185,7 @@
 			oi->otpgu_base = REVB8_OTPGU_BASE;
 		else
 			oi->otpgu_base = REV36_OTPGU_BASE;
-	} else if (oi->sih->ccrev == 23 || oi->sih->ccrev >= 25) {
+	} else if (ccrev == 23 || ccrev >= 25) {
 		oi->otpgu_base = REVB8_OTPGU_BASE;
 	}
 
@@ -190,24 +193,21 @@
 	otpp =
 	    OTPP_START_BUSY | ((OTPPOC_INIT << OTPP_OC_SHIFT) & OTPP_OC_MASK);
 
-	W_REG(&cc->otpprog, otpp);
-	for (k = 0;
-	     ((st = R_REG(&cc->otpprog)) & OTPP_START_BUSY)
-	     && (k < OTPP_TRIES); k++)
-		;
+	bcma_write32(oi->core, CHIPCREGOFFS(otpprog), otpp);
+	st = bcma_read32(oi->core, CHIPCREGOFFS(otpprog));
+	for (k = 0; (st & OTPP_START_BUSY) && (k < OTPP_TRIES); k++)
+		st = bcma_read32(oi->core, CHIPCREGOFFS(otpprog));
 	if (k >= OTPP_TRIES)
 		return;
 
 	/* Read OTP lock bits and subregion programmed indication bits */
-	oi->status = R_REG(&cc->otpstatus);
+	oi->status = bcma_read32(oi->core, CHIPCREGOFFS(otpstatus));
 
-	if ((oi->sih->chip == BCM43224_CHIP_ID)
-	    || (oi->sih->chip == BCM43225_CHIP_ID)) {
+	if ((ai_get_chip_id(oi->sih) == BCM43224_CHIP_ID)
+	    || (ai_get_chip_id(oi->sih) == BCM43225_CHIP_ID)) {
 		u32 p_bits;
-		p_bits =
-		    (ipxotp_otpr(oi, cc, oi->otpgu_base + OTPGU_P_OFF) &
-		     OTPGU_P_MSK)
-		    >> OTPGU_P_SHIFT;
+		p_bits = (ipxotp_otpr(oi, oi->otpgu_base + OTPGU_P_OFF) &
+			  OTPGU_P_MSK) >> OTPGU_P_SHIFT;
 		oi->status |= (p_bits << OTPS_GUP_SHIFT);
 	}
 
@@ -220,7 +220,7 @@
 	oi->hwlim = oi->wsize;
 	if (oi->status & OTPS_GUP_HW) {
 		oi->hwlim =
-		    ipxotp_otpr(oi, cc, oi->otpgu_base + OTPGU_HSB_OFF) / 16;
+		    ipxotp_otpr(oi, oi->otpgu_base + OTPGU_HSB_OFF) / 16;
 		oi->swbase = oi->hwlim;
 	} else
 		oi->swbase = oi->hwbase;
@@ -230,7 +230,7 @@
 
 	if (oi->status & OTPS_GUP_SW) {
 		oi->swlim =
-		    ipxotp_otpr(oi, cc, oi->otpgu_base + OTPGU_SFB_OFF) / 16;
+		    ipxotp_otpr(oi, oi->otpgu_base + OTPGU_SFB_OFF) / 16;
 		oi->fbase = oi->swlim;
 	} else
 		oi->fbase = oi->swbase;
@@ -240,11 +240,8 @@
 
 static int ipxotp_init(struct si_pub *sih, struct otpinfo *oi)
 {
-	uint idx;
-	struct chipcregs __iomem *cc;
-
 	/* Make sure we're running IPX OTP */
-	if (!OTPTYPE_IPX(sih->ccrev))
+	if (!OTPTYPE_IPX(ai_get_ccrev(sih)))
 		return -EBADE;
 
 	/* Make sure OTP is not disabled */
@@ -252,7 +249,7 @@
 		return -EBADE;
 
 	/* Check for otp size */
-	switch ((sih->cccaps & CC_CAP_OTPSIZE) >> CC_CAP_OTPSIZE_SHIFT) {
+	switch ((ai_get_cccaps(sih) & CC_CAP_OTPSIZE) >> CC_CAP_OTPSIZE_SHIFT) {
 	case 0:
 		/* Nothing there */
 		return -EBADE;
@@ -282,21 +279,13 @@
 	}
 
 	/* Retrieve OTP region info */
-	idx = ai_coreidx(sih);
-	cc = ai_setcoreidx(sih, SI_CC_IDX);
-
-	_ipxotp_init(oi, cc);
-
-	ai_setcoreidx(sih, idx);
-
+	_ipxotp_init(oi);
 	return 0;
 }
 
 static int
 ipxotp_read_region(struct otpinfo *oi, int region, u16 *data, uint *wlen)
 {
-	uint idx;
-	struct chipcregs __iomem *cc;
 	uint base, i, sz;
 
 	/* Validate region selection */
@@ -365,14 +354,10 @@
 		return -EINVAL;
 	}
 
-	idx = ai_coreidx(oi->sih);
-	cc = ai_setcoreidx(oi->sih, SI_CC_IDX);
-
 	/* Read the data */
 	for (i = 0; i < sz; i++)
-		data[i] = ipxotp_otpr(oi, cc, base + i);
+		data[i] = ipxotp_otpr(oi, base + i);
 
-	ai_setcoreidx(oi->sih, idx);
 	*wlen = sz;
 	return 0;
 }
@@ -384,14 +369,13 @@
 
 static int otp_init(struct si_pub *sih, struct otpinfo *oi)
 {
-
 	int ret;
 
 	memset(oi, 0, sizeof(struct otpinfo));
 
-	oi->ccrev = sih->ccrev;
+	oi->core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
 
-	if (OTPTYPE_IPX(oi->ccrev))
+	if (OTPTYPE_IPX(ai_get_ccrev(sih)))
 		oi->fn = &ipxotp_fn;
 
 	if (oi->fn == NULL)
@@ -399,7 +383,7 @@
 
 	oi->sih = sih;
 
-	ret = (oi->fn->init) (sih, oi);
+	ret = (oi->fn->init)(sih, oi);
 
 	return ret;
 }
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c
index a314925..264f8c4 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c
@@ -109,10 +109,10 @@
 	{204, 5020},
 	{208, 5040},
 	{212, 5060},
-	{216, 50800}
+	{216, 5080}
 };
 
-const u8 ofdm_rate_lookup[] = {
+static const u8 ofdm_rate_lookup[] = {
 
 	BRCM_RATE_48M,
 	BRCM_RATE_24M,
@@ -149,9 +149,8 @@
 void wlc_radioreg_exit(struct brcms_phy_pub *pih)
 {
 	struct brcms_phy *pi = (struct brcms_phy *) pih;
-	u16 dummy;
 
-	dummy = R_REG(&pi->regs->phyversion);
+	(void)bcma_read16(pi->d11core, D11REGOFFS(phyversion));
 	pi->phy_wreg = 0;
 	wlapi_bmac_mctrl(pi->sh->physhim, MCTL_LOCK_RADIO, 0);
 }
@@ -186,19 +185,11 @@
 	if ((D11REV_GE(pi->sh->corerev, 24)) ||
 	    (D11REV_IS(pi->sh->corerev, 22)
 	     && (pi->pubpi.phy_type != PHY_TYPE_SSN))) {
-		W_REG_FLUSH(&pi->regs->radioregaddr, addr);
-		data = R_REG(&pi->regs->radioregdata);
+		bcma_wflush16(pi->d11core, D11REGOFFS(radioregaddr), addr);
+		data = bcma_read16(pi->d11core, D11REGOFFS(radioregdata));
 	} else {
-		W_REG_FLUSH(&pi->regs->phy4waddr, addr);
-
-#ifdef __ARM_ARCH_4T__
-		__asm__(" .align 4 ");
-		__asm__(" nop ");
-		data = R_REG(&pi->regs->phy4wdatalo);
-#else
-		data = R_REG(&pi->regs->phy4wdatalo);
-#endif
-
+		bcma_wflush16(pi->d11core, D11REGOFFS(phy4waddr), addr);
+		data = bcma_read16(pi->d11core, D11REGOFFS(phy4wdatalo));
 	}
 	pi->phy_wreg = 0;
 
@@ -211,15 +202,15 @@
 	    (D11REV_IS(pi->sh->corerev, 22)
 	     && (pi->pubpi.phy_type != PHY_TYPE_SSN))) {
 
-		W_REG_FLUSH(&pi->regs->radioregaddr, addr);
-		W_REG(&pi->regs->radioregdata, val);
+		bcma_wflush16(pi->d11core, D11REGOFFS(radioregaddr), addr);
+		bcma_write16(pi->d11core, D11REGOFFS(radioregdata), val);
 	} else {
-		W_REG_FLUSH(&pi->regs->phy4waddr, addr);
-		W_REG(&pi->regs->phy4wdatalo, val);
+		bcma_wflush16(pi->d11core, D11REGOFFS(phy4waddr), addr);
+		bcma_write16(pi->d11core, D11REGOFFS(phy4wdatalo), val);
 	}
 
 	if (++pi->phy_wreg >= pi->phy_wreg_limit) {
-		(void)R_REG(&pi->regs->maccontrol);
+		(void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol));
 		pi->phy_wreg = 0;
 	}
 }
@@ -231,19 +222,20 @@
 	if (D11REV_GE(pi->sh->corerev, 24)) {
 		u32 b0, b1, b2;
 
-		W_REG_FLUSH(&pi->regs->radioregaddr, 0);
-		b0 = (u32) R_REG(&pi->regs->radioregdata);
-		W_REG_FLUSH(&pi->regs->radioregaddr, 1);
-		b1 = (u32) R_REG(&pi->regs->radioregdata);
-		W_REG_FLUSH(&pi->regs->radioregaddr, 2);
-		b2 = (u32) R_REG(&pi->regs->radioregdata);
+		bcma_wflush16(pi->d11core, D11REGOFFS(radioregaddr), 0);
+		b0 = (u32) bcma_read16(pi->d11core, D11REGOFFS(radioregdata));
+		bcma_wflush16(pi->d11core, D11REGOFFS(radioregaddr), 1);
+		b1 = (u32) bcma_read16(pi->d11core, D11REGOFFS(radioregdata));
+		bcma_wflush16(pi->d11core, D11REGOFFS(radioregaddr), 2);
+		b2 = (u32) bcma_read16(pi->d11core, D11REGOFFS(radioregdata));
 
 		id = ((b0 & 0xf) << 28) | (((b2 << 8) | b1) << 12) | ((b0 >> 4)
 								      & 0xf);
 	} else {
-		W_REG_FLUSH(&pi->regs->phy4waddr, RADIO_IDCODE);
-		id = (u32) R_REG(&pi->regs->phy4wdatalo);
-		id |= (u32) R_REG(&pi->regs->phy4wdatahi) << 16;
+		bcma_wflush16(pi->d11core, D11REGOFFS(phy4waddr), RADIO_IDCODE);
+		id = (u32) bcma_read16(pi->d11core, D11REGOFFS(phy4wdatalo));
+		id |= (u32) bcma_read16(pi->d11core,
+					D11REGOFFS(phy4wdatahi)) << 16;
 	}
 	pi->phy_wreg = 0;
 	return id;
@@ -283,75 +275,52 @@
 
 void write_phy_channel_reg(struct brcms_phy *pi, uint val)
 {
-	W_REG(&pi->regs->phychannel, val);
+	bcma_write16(pi->d11core, D11REGOFFS(phychannel), val);
 }
 
 u16 read_phy_reg(struct brcms_phy *pi, u16 addr)
 {
-	struct d11regs __iomem *regs;
-
-	regs = pi->regs;
-
-	W_REG_FLUSH(&regs->phyregaddr, addr);
+	bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr), addr);
 
 	pi->phy_wreg = 0;
-	return R_REG(&regs->phyregdata);
+	return bcma_read16(pi->d11core, D11REGOFFS(phyregdata));
 }
 
 void write_phy_reg(struct brcms_phy *pi, u16 addr, u16 val)
 {
-	struct d11regs __iomem *regs;
-
-	regs = pi->regs;
-
 #ifdef CONFIG_BCM47XX
-	W_REG_FLUSH(&regs->phyregaddr, addr);
-	W_REG(&regs->phyregdata, val);
+	bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr), addr);
+	bcma_write16(pi->d11core, D11REGOFFS(phyregdata), val);
 	if (addr == 0x72)
-		(void)R_REG(&regs->phyregdata);
+		(void)bcma_read16(pi->d11core, D11REGOFFS(phyversion));
 #else
-	W_REG((u32 __iomem *)(&regs->phyregaddr), addr | (val << 16));
+	bcma_write32(pi->d11core, D11REGOFFS(phyregaddr), addr | (val << 16));
 	if (++pi->phy_wreg >= pi->phy_wreg_limit) {
 		pi->phy_wreg = 0;
-		(void)R_REG(&regs->phyversion);
+		(void)bcma_read16(pi->d11core, D11REGOFFS(phyversion));
 	}
 #endif
 }
 
 void and_phy_reg(struct brcms_phy *pi, u16 addr, u16 val)
 {
-	struct d11regs __iomem *regs;
-
-	regs = pi->regs;
-
-	W_REG_FLUSH(&regs->phyregaddr, addr);
-
-	W_REG(&regs->phyregdata, (R_REG(&regs->phyregdata) & val));
+	bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr), addr);
+	bcma_mask16(pi->d11core, D11REGOFFS(phyregdata), val);
 	pi->phy_wreg = 0;
 }
 
 void or_phy_reg(struct brcms_phy *pi, u16 addr, u16 val)
 {
-	struct d11regs __iomem *regs;
-
-	regs = pi->regs;
-
-	W_REG_FLUSH(&regs->phyregaddr, addr);
-
-	W_REG(&regs->phyregdata, (R_REG(&regs->phyregdata) | val));
+	bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr), addr);
+	bcma_set16(pi->d11core, D11REGOFFS(phyregdata), val);
 	pi->phy_wreg = 0;
 }
 
 void mod_phy_reg(struct brcms_phy *pi, u16 addr, u16 mask, u16 val)
 {
-	struct d11regs __iomem *regs;
-
-	regs = pi->regs;
-
-	W_REG_FLUSH(&regs->phyregaddr, addr);
-
-	W_REG(&regs->phyregdata,
-	      ((R_REG(&regs->phyregdata) & ~mask) | (val & mask)));
+	val &= mask;
+	bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr), addr);
+	bcma_maskset16(pi->d11core, D11REGOFFS(phyregdata), ~mask, val);
 	pi->phy_wreg = 0;
 }
 
@@ -412,10 +381,8 @@
 	sh->sromrev = shp->sromrev;
 	sh->boardtype = shp->boardtype;
 	sh->boardrev = shp->boardrev;
-	sh->boardvendor = shp->boardvendor;
 	sh->boardflags = shp->boardflags;
 	sh->boardflags2 = shp->boardflags2;
-	sh->buscorerev = shp->buscorerev;
 
 	sh->fast_timer = PHY_SW_TIMER_FAST;
 	sh->slow_timer = PHY_SW_TIMER_SLOW;
@@ -458,7 +425,7 @@
 }
 
 struct brcms_phy_pub *
-wlc_phy_attach(struct shared_phy *sh, struct d11regs __iomem *regs,
+wlc_phy_attach(struct shared_phy *sh, struct bcma_device *d11core,
 	       int bandtype, struct wiphy *wiphy)
 {
 	struct brcms_phy *pi;
@@ -470,7 +437,7 @@
 	if (D11REV_IS(sh->corerev, 4))
 		sflags = SISF_2G_PHY | SISF_5G_PHY;
 	else
-		sflags = ai_core_sflags(sh->sih, 0, 0);
+		sflags = bcma_aread32(d11core, BCMA_IOST);
 
 	if (bandtype == BRCM_BAND_5G) {
 		if ((sflags & (SISF_5G_PHY | SISF_DB_PHY)) == 0)
@@ -488,7 +455,7 @@
 	if (pi == NULL)
 		return NULL;
 	pi->wiphy = wiphy;
-	pi->regs = regs;
+	pi->d11core = d11core;
 	pi->sh = sh;
 	pi->phy_init_por = true;
 	pi->phy_wreg_limit = PHY_WREG_LIMIT;
@@ -503,7 +470,7 @@
 		pi->pubpi.coreflags = SICF_GMODE;
 
 	wlapi_bmac_corereset(pi->sh->physhim, pi->pubpi.coreflags);
-	phyversion = R_REG(&pi->regs->phyversion);
+	phyversion = bcma_read16(pi->d11core, D11REGOFFS(phyversion));
 
 	pi->pubpi.phy_type = PHY_TYPE(phyversion);
 	pi->pubpi.phy_rev = phyversion & PV_PV_MASK;
@@ -515,8 +482,8 @@
 	pi->pubpi.phy_corenum = PHY_CORE_NUM_2;
 	pi->pubpi.ana_rev = (phyversion & PV_AV_MASK) >> PV_AV_SHIFT;
 
-	if (!pi->pubpi.phy_type == PHY_TYPE_N &&
-	    !pi->pubpi.phy_type == PHY_TYPE_LCN)
+	if (pi->pubpi.phy_type != PHY_TYPE_N &&
+	    pi->pubpi.phy_type != PHY_TYPE_LCN)
 		goto err;
 
 	if (bandtype == BRCM_BAND_5G) {
@@ -787,14 +754,14 @@
 
 	pi->radio_chanspec = chanspec;
 
-	mc = R_REG(&pi->regs->maccontrol);
+	mc = bcma_read32(pi->d11core, D11REGOFFS(maccontrol));
 	if (WARN(mc & MCTL_EN_MAC, "HW error MAC running on init"))
 		return;
 
 	if (!(pi->measure_hold & PHY_HOLD_FOR_SCAN))
 		pi->measure_hold |= PHY_HOLD_FOR_NOT_ASSOC;
 
-	if (WARN(!(ai_core_sflags(pi->sh->sih, 0, 0) & SISF_FCLKA),
+	if (WARN(!(bcma_aread32(pi->d11core, BCMA_IOST) & SISF_FCLKA),
 		 "HW error SISF_FCLKA\n"))
 		return;
 
@@ -833,8 +800,8 @@
 	struct brcms_phy *pi = (struct brcms_phy *) pih;
 	void (*cal_init)(struct brcms_phy *) = NULL;
 
-	if (WARN((R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC) != 0,
-		 "HW error: MAC enabled during phy cal\n"))
+	if (WARN((bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
+		  MCTL_EN_MAC) != 0, "HW error: MAC enabled during phy cal\n"))
 		return;
 
 	if (!pi->initialized) {
@@ -1025,7 +992,7 @@
 void wlc_phy_do_dummy_tx(struct brcms_phy *pi, bool ofdm, bool pa_on)
 {
 #define DUMMY_PKT_LEN   20
-	struct d11regs __iomem *regs = pi->regs;
+	struct bcma_device *core = pi->d11core;
 	int i, count;
 	u8 ofdmpkt[DUMMY_PKT_LEN] = {
 		0xcc, 0x01, 0x02, 0x00, 0x00, 0x00, 0xd4, 0x00, 0x00, 0x00,
@@ -1041,26 +1008,28 @@
 	wlapi_bmac_write_template_ram(pi->sh->physhim, 0, DUMMY_PKT_LEN,
 				      dummypkt);
 
-	W_REG(&regs->xmtsel, 0);
+	bcma_write16(core, D11REGOFFS(xmtsel), 0);
 
 	if (D11REV_GE(pi->sh->corerev, 11))
-		W_REG(&regs->wepctl, 0x100);
+		bcma_write16(core, D11REGOFFS(wepctl), 0x100);
 	else
-		W_REG(&regs->wepctl, 0);
+		bcma_write16(core, D11REGOFFS(wepctl), 0);
 
-	W_REG(&regs->txe_phyctl, (ofdm ? 1 : 0) | PHY_TXC_ANT_0);
+	bcma_write16(core, D11REGOFFS(txe_phyctl),
+		     (ofdm ? 1 : 0) | PHY_TXC_ANT_0);
 	if (ISNPHY(pi) || ISLCNPHY(pi))
-		W_REG(&regs->txe_phyctl1, 0x1A02);
+		bcma_write16(core, D11REGOFFS(txe_phyctl1), 0x1A02);
 
-	W_REG(&regs->txe_wm_0, 0);
-	W_REG(&regs->txe_wm_1, 0);
+	bcma_write16(core, D11REGOFFS(txe_wm_0), 0);
+	bcma_write16(core, D11REGOFFS(txe_wm_1), 0);
 
-	W_REG(&regs->xmttplatetxptr, 0);
-	W_REG(&regs->xmttxcnt, DUMMY_PKT_LEN);
+	bcma_write16(core, D11REGOFFS(xmttplatetxptr), 0);
+	bcma_write16(core, D11REGOFFS(xmttxcnt), DUMMY_PKT_LEN);
 
-	W_REG(&regs->xmtsel, ((8 << 8) | (1 << 5) | (1 << 2) | 2));
+	bcma_write16(core, D11REGOFFS(xmtsel),
+		     ((8 << 8) | (1 << 5) | (1 << 2) | 2));
 
-	W_REG(&regs->txe_ctl, 0);
+	bcma_write16(core, D11REGOFFS(txe_ctl), 0);
 
 	if (!pa_on) {
 		if (ISNPHY(pi))
@@ -1068,27 +1037,28 @@
 	}
 
 	if (ISNPHY(pi) || ISLCNPHY(pi))
-		W_REG(&regs->txe_aux, 0xD0);
+		bcma_write16(core, D11REGOFFS(txe_aux), 0xD0);
 	else
-		W_REG(&regs->txe_aux, ((1 << 5) | (1 << 4)));
+		bcma_write16(core, D11REGOFFS(txe_aux), ((1 << 5) | (1 << 4)));
 
-	(void)R_REG(&regs->txe_aux);
+	(void)bcma_read16(core, D11REGOFFS(txe_aux));
 
 	i = 0;
 	count = ofdm ? 30 : 250;
 	while ((i++ < count)
-	       && (R_REG(&regs->txe_status) & (1 << 7)))
+	       && (bcma_read16(core, D11REGOFFS(txe_status)) & (1 << 7)))
 		udelay(10);
 
 	i = 0;
 
-	while ((i++ < 10)
-	       && ((R_REG(&regs->txe_status) & (1 << 10)) == 0))
+	while ((i++ < 10) &&
+	       ((bcma_read16(core, D11REGOFFS(txe_status)) & (1 << 10)) == 0))
 		udelay(10);
 
 	i = 0;
 
-	while ((i++ < 10) && ((R_REG(&regs->ifsstat) & (1 << 8))))
+	while ((i++ < 10) &&
+	       ((bcma_read16(core, D11REGOFFS(ifsstat)) & (1 << 8))))
 		udelay(10);
 
 	if (!pa_on) {
@@ -1145,7 +1115,7 @@
 void wlc_phy_switch_radio(struct brcms_phy_pub *pih, bool on)
 {
 	struct brcms_phy *pi = (struct brcms_phy *) pih;
-	(void)R_REG(&pi->regs->maccontrol);
+	(void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol));
 
 	if (ISNPHY(pi)) {
 		wlc_phy_switch_radio_nphy(pi, on);
@@ -1385,7 +1355,7 @@
 	memcpy(&pi->tx_user_target[TXP_FIRST_MCS_40_SDM],
 	       &txpwr->mcs_40_mimo[0], BRCMS_NUM_RATES_MCS_2_STREAM);
 
-	if (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC)
+	if (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) & MCTL_EN_MAC)
 		mac_enabled = true;
 
 	if (mac_enabled)
@@ -1415,7 +1385,8 @@
 		if (!SCAN_INPROG_PHY(pi)) {
 			bool suspend;
 
-			suspend = (0 == (R_REG(&pi->regs->maccontrol) &
+			suspend = (0 == (bcma_read32(pi->d11core,
+						     D11REGOFFS(maccontrol)) &
 					 MCTL_EN_MAC));
 
 			if (!suspend)
@@ -1868,18 +1839,17 @@
 
 		if (NREV_IS(pi->pubpi.phy_rev, 3)
 		    || NREV_IS(pi->pubpi.phy_rev, 4)) {
-			W_REG(&pi->regs->phyregaddr, 0xa0);
-			(void)R_REG(&pi->regs->phyregaddr);
-			rxc = R_REG(&pi->regs->phyregdata);
-			W_REG(&pi->regs->phyregdata,
-			      (0x1 << 15) | rxc);
+			bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr),
+				      0xa0);
+			bcma_set16(pi->d11core, D11REGOFFS(phyregdata),
+				   0x1 << 15);
 		}
 	} else {
 		if (NREV_IS(pi->pubpi.phy_rev, 3)
 		    || NREV_IS(pi->pubpi.phy_rev, 4)) {
-			W_REG(&pi->regs->phyregaddr, 0xa0);
-			(void)R_REG(&pi->regs->phyregaddr);
-			W_REG(&pi->regs->phyregdata, rxc);
+			bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr),
+				      0xa0);
+			bcma_write16(pi->d11core, D11REGOFFS(phyregdata), rxc);
 		}
 
 		wlc_phy_por_inform(ppi);
@@ -1999,7 +1969,9 @@
 	pi->txpwrctrl = hwpwrctrl;
 
 	if (ISNPHY(pi)) {
-		suspend = (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+		suspend = (0 == (bcma_read32(pi->d11core,
+					     D11REGOFFS(maccontrol)) &
+				 MCTL_EN_MAC));
 		if (!suspend)
 			wlapi_suspend_mac_and_wait(pi->sh->physhim);
 
@@ -2201,7 +2173,8 @@
 	if (!pi->sh->clk)
 		return;
 
-	suspend = (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+	suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
+			 MCTL_EN_MAC));
 	if (!suspend)
 		wlapi_suspend_mac_and_wait(pi->sh->physhim);
 
@@ -2419,8 +2392,8 @@
 			wlapi_bmac_write_shm(pi->sh->physhim, M_PWRIND_MAP2, 0);
 			wlapi_bmac_write_shm(pi->sh->physhim, M_PWRIND_MAP3, 0);
 
-			OR_REG(&pi->regs->maccommand,
-			       MCMD_BG_NOISE);
+			bcma_set32(pi->d11core, D11REGOFFS(maccommand),
+				   MCMD_BG_NOISE);
 		} else {
 			wlapi_suspend_mac_and_wait(pi->sh->physhim);
 			wlc_lcnphy_deaf_mode(pi, (bool) 0);
@@ -2438,8 +2411,8 @@
 			wlapi_bmac_write_shm(pi->sh->physhim, M_PWRIND_MAP2, 0);
 			wlapi_bmac_write_shm(pi->sh->physhim, M_PWRIND_MAP3, 0);
 
-			OR_REG(&pi->regs->maccommand,
-			       MCMD_BG_NOISE);
+			bcma_set32(pi->d11core, D11REGOFFS(maccommand),
+				   MCMD_BG_NOISE);
 		} else {
 			struct phy_iq_est est[PHY_CORE_MAX];
 			u32 cmplx_pwr[PHY_CORE_MAX];
@@ -2932,29 +2905,29 @@
 				mod_phy_reg(pi, 0x44c, (0x1 << 2), (1) << 2);
 
 			}
-			ai_corereg(pi->sh->sih, SI_CC_IDX,
-				   offsetof(struct chipcregs, gpiocontrol),
-				   ~0x0, 0x0);
-			ai_corereg(pi->sh->sih, SI_CC_IDX,
-				   offsetof(struct chipcregs, gpioout), 0x40,
-				   0x40);
-			ai_corereg(pi->sh->sih, SI_CC_IDX,
-				   offsetof(struct chipcregs, gpioouten), 0x40,
-				   0x40);
+			ai_cc_reg(pi->sh->sih,
+				  offsetof(struct chipcregs, gpiocontrol),
+				  ~0x0, 0x0);
+			ai_cc_reg(pi->sh->sih,
+				  offsetof(struct chipcregs, gpioout),
+				  0x40, 0x40);
+			ai_cc_reg(pi->sh->sih,
+				  offsetof(struct chipcregs, gpioouten),
+				  0x40, 0x40);
 		} else {
 			mod_phy_reg(pi, 0x44c, (0x1 << 2), (0) << 2);
 
 			mod_phy_reg(pi, 0x44d, (0x1 << 2), (0) << 2);
 
-			ai_corereg(pi->sh->sih, SI_CC_IDX,
-				   offsetof(struct chipcregs, gpioout), 0x40,
-				   0x00);
-			ai_corereg(pi->sh->sih, SI_CC_IDX,
-				   offsetof(struct chipcregs, gpioouten), 0x40,
-				   0x0);
-			ai_corereg(pi->sh->sih, SI_CC_IDX,
-				   offsetof(struct chipcregs, gpiocontrol),
-				   ~0x0, 0x40);
+			ai_cc_reg(pi->sh->sih,
+				  offsetof(struct chipcregs, gpioout),
+				  0x40, 0x00);
+			ai_cc_reg(pi->sh->sih,
+				  offsetof(struct chipcregs, gpioouten),
+				  0x40, 0x0);
+			ai_cc_reg(pi->sh->sih,
+				  offsetof(struct chipcregs, gpiocontrol),
+				  ~0x0, 0x40);
 		}
 	}
 }
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h
index 96e1516..e34a71e 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h
@@ -166,7 +166,6 @@
 	struct phy_shim_info *physhim;
 	uint unit;
 	uint corerev;
-	uint buscorerev;
 	u16 vid;
 	u16 did;
 	uint chip;
@@ -175,7 +174,6 @@
 	uint sromrev;
 	uint boardtype;
 	uint boardrev;
-	uint boardvendor;
 	u32 boardflags;
 	u32 boardflags2;
 };
@@ -183,7 +181,7 @@
 
 extern struct shared_phy *wlc_phy_shared_attach(struct shared_phy_params *shp);
 extern struct brcms_phy_pub *wlc_phy_attach(struct shared_phy *sh,
-					    struct d11regs __iomem *regs,
+					    struct bcma_device *d11core,
 					    int bandtype, struct wiphy *wiphy);
 extern void wlc_phy_detach(struct brcms_phy_pub *ppi);
 
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
index bea8524..af00e2c 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
@@ -503,10 +503,8 @@
 	uint sromrev;
 	uint boardtype;
 	uint boardrev;
-	uint boardvendor;
 	u32 boardflags;
 	u32 boardflags2;
-	uint buscorerev;
 	uint fast_timer;
 	uint slow_timer;
 	uint glacial_timer;
@@ -559,7 +557,7 @@
 	} u;
 	bool user_txpwr_at_rfport;
 
-	struct d11regs __iomem *regs;
+	struct bcma_device *d11core;
 	struct brcms_phy *next;
 	struct brcms_phy_pub pubpi;
 
@@ -774,11 +772,6 @@
 	s16 nphy_noise_win[PHY_CORE_MAX][PHY_NOISE_WINDOW_SZ];
 	u8 nphy_noise_index;
 
-	u8 nphy_txpid2g[PHY_CORE_NUM_2];
-	u8 nphy_txpid5g[PHY_CORE_NUM_2];
-	u8 nphy_txpid5gl[PHY_CORE_NUM_2];
-	u8 nphy_txpid5gh[PHY_CORE_NUM_2];
-
 	bool nphy_gain_boost;
 	bool nphy_elna_gain_config;
 	u16 old_bphy_test;
@@ -1095,7 +1088,7 @@
 
 #define BRCMS_PHY_WAR_PR51571(pi) \
 	if (NREV_LT((pi)->pubpi.phy_rev, 3)) \
-		(void)R_REG(&(pi)->regs->maccontrol)
+		(void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol))
 
 extern void wlc_phy_cal_perical_nphy_run(struct brcms_phy *pi, u8 caltype);
 extern void wlc_phy_aci_reset_nphy(struct brcms_phy *pi);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
index a63aa99..ce8562a 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
@@ -1603,7 +1603,7 @@
 		si_pmu_pllupd(pi->sh->sih);
 		write_phy_reg(pi, 0x942, 0);
 		wlc_lcnphy_txrx_spur_avoidance_mode(pi, false);
-		pi_lcn->lcnphy_spurmod = 0;
+		pi_lcn->lcnphy_spurmod = false;
 		mod_phy_reg(pi, 0x424, (0xff << 8), (0x1b) << 8);
 
 		write_phy_reg(pi, 0x425, 0x5907);
@@ -1616,7 +1616,7 @@
 		write_phy_reg(pi, 0x942, 0);
 		wlc_lcnphy_txrx_spur_avoidance_mode(pi, true);
 
-		pi_lcn->lcnphy_spurmod = 0;
+		pi_lcn->lcnphy_spurmod = false;
 		mod_phy_reg(pi, 0x424, (0xff << 8), (0x1f) << 8);
 
 		write_phy_reg(pi, 0x425, 0x590a);
@@ -2325,7 +2325,7 @@
 {
 	s8 index, delta_brd, delta_temp, new_index, tempcorrx;
 	s16 manp, meas_temp, temp_diff;
-	bool neg = 0;
+	bool neg = false;
 	u16 temp;
 	struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
 
@@ -2348,7 +2348,7 @@
 	manp = LCNPHY_TEMPSENSE(pi_lcn->lcnphy_rawtempsense);
 	temp_diff = manp - meas_temp;
 	if (temp_diff < 0) {
-		neg = 1;
+		neg = true;
 		temp_diff = -temp_diff;
 	}
 
@@ -2813,10 +2813,8 @@
 	u16 SAVE_jtag_auxpga = read_radio_reg(pi, RADIO_2064_REG0FF) & 0x10;
 	u16 SAVE_iqadc_aux_en = read_radio_reg(pi, RADIO_2064_REG11F) & 4;
 	idleTssi = read_phy_reg(pi, 0x4ab);
-	suspend =
-		(0 ==
-		 (R_REG(&((struct brcms_phy *) pi)->regs->maccontrol) &
-		  MCTL_EN_MAC));
+	suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
+			 MCTL_EN_MAC));
 	if (!suspend)
 		wlapi_suspend_mac_and_wait(pi->sh->physhim);
 	wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF);
@@ -2890,7 +2888,8 @@
 
 	for (i = 0; i < 14; i++)
 		values_to_save[i] = read_phy_reg(pi, tempsense_phy_regs[i]);
-	suspend = (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+	suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
+			 MCTL_EN_MAC));
 	if (!suspend)
 		wlapi_suspend_mac_and_wait(pi->sh->physhim);
 	save_txpwrCtrlEn = read_radio_reg(pi, 0x4a4);
@@ -3016,8 +3015,8 @@
 	bool suspend;
 	struct brcms_phy *pi = (struct brcms_phy *) ppi;
 
-	suspend =
-		(0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+	suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
+			 MCTL_EN_MAC));
 	if (!suspend)
 		wlapi_suspend_mac_and_wait(pi->sh->physhim);
 
@@ -3535,15 +3534,17 @@
 	timer = 0;
 	old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da);
 
-	curval1 = R_REG(&pi->regs->psm_corectlsts);
+	curval1 = bcma_read16(pi->d11core, D11REGOFFS(psm_corectlsts));
 	ptr[130] = 0;
-	W_REG(&pi->regs->psm_corectlsts, ((1 << 6) | curval1));
+	bcma_write16(pi->d11core, D11REGOFFS(psm_corectlsts),
+		     ((1 << 6) | curval1));
 
-	W_REG(&pi->regs->smpl_clct_strptr, 0x7E00);
-	W_REG(&pi->regs->smpl_clct_stpptr, 0x8000);
+	bcma_write16(pi->d11core, D11REGOFFS(smpl_clct_strptr), 0x7E00);
+	bcma_write16(pi->d11core, D11REGOFFS(smpl_clct_stpptr), 0x8000);
 	udelay(20);
-	curval2 = R_REG(&pi->regs->psm_phy_hdr_param);
-	W_REG(&pi->regs->psm_phy_hdr_param, curval2 | 0x30);
+	curval2 = bcma_read16(pi->d11core, D11REGOFFS(psm_phy_hdr_param));
+	bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param),
+		     curval2 | 0x30);
 
 	write_phy_reg(pi, 0x555, 0x0);
 	write_phy_reg(pi, 0x5a6, 0x5);
@@ -3560,19 +3561,19 @@
 
 	sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da);
 	write_phy_reg(pi, 0x6da, (u32) (sslpnCalibClkEnCtrl | 0x2008));
-	stpptr = R_REG(&pi->regs->smpl_clct_stpptr);
-	curptr = R_REG(&pi->regs->smpl_clct_curptr);
+	stpptr = bcma_read16(pi->d11core, D11REGOFFS(smpl_clct_stpptr));
+	curptr = bcma_read16(pi->d11core, D11REGOFFS(smpl_clct_curptr));
 	do {
 		udelay(10);
-		curptr = R_REG(&pi->regs->smpl_clct_curptr);
+		curptr = bcma_read16(pi->d11core, D11REGOFFS(smpl_clct_curptr));
 		timer++;
 	} while ((curptr != stpptr) && (timer < 500));
 
-	W_REG(&pi->regs->psm_phy_hdr_param, 0x2);
+	bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param), 0x2);
 	strptr = 0x7E00;
-	W_REG(&pi->regs->tplatewrptr, strptr);
+	bcma_write32(pi->d11core, D11REGOFFS(tplatewrptr), strptr);
 	while (strptr < 0x8000) {
-		val = R_REG(&pi->regs->tplatewrdata);
+		val = bcma_read32(pi->d11core, D11REGOFFS(tplatewrdata));
 		imag = ((val >> 16) & 0x3ff);
 		real = ((val) & 0x3ff);
 		if (imag > 511)
@@ -3597,8 +3598,8 @@
 	}
 
 	write_phy_reg(pi, 0x6da, old_sslpnCalibClkEnCtrl);
-	W_REG(&pi->regs->psm_phy_hdr_param, curval2);
-	W_REG(&pi->regs->psm_corectlsts, curval1);
+	bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param), curval2);
+	bcma_write16(pi->d11core, D11REGOFFS(psm_corectlsts), curval1);
 }
 
 static void
@@ -3681,8 +3682,8 @@
 	wlc_lcnphy_set_cc(pi, cal_type, phy_c15, phy_c16);
 	udelay(20);
 	for (phy_c8 = 0; phy_c7 != 0 && phy_c8 < num_levels; phy_c8++) {
-		phy_c23 = 1;
-		phy_c22 = 0;
+		phy_c23 = true;
+		phy_c22 = false;
 		switch (cal_type) {
 		case 0:
 			phy_c10 = 511;
@@ -3700,18 +3701,18 @@
 
 		phy_c9 = read_phy_reg(pi, 0x93d);
 		phy_c9 = 2 * phy_c9;
-		phy_c24 = 0;
+		phy_c24 = false;
 		phy_c5 = 7;
-		phy_c25 = 1;
+		phy_c25 = true;
 		while (1) {
 			write_radio_reg(pi, RADIO_2064_REG026,
 					(phy_c5 & 0x7) | ((phy_c5 & 0x7) << 4));
 			udelay(50);
-			phy_c22 = 0;
+			phy_c22 = false;
 			ptr[130] = 0;
 			wlc_lcnphy_samp_cap(pi, 1, phy_c9, &ptr[0], 2);
 			if (ptr[130] == 1)
-				phy_c22 = 1;
+				phy_c22 = true;
 			if (phy_c22)
 				phy_c5 -= 1;
 			if ((phy_c22 != phy_c24) && (!phy_c25))
@@ -3721,7 +3722,7 @@
 			if (phy_c5 <= 0 || phy_c5 >= 7)
 				break;
 			phy_c24 = phy_c22;
-			phy_c25 = 0;
+			phy_c25 = false;
 		}
 
 		if (phy_c5 < 0)
@@ -3772,10 +3773,10 @@
 					phy_c13 = phy_c11;
 					phy_c14 = phy_c12;
 				}
-				phy_c23 = 0;
+				phy_c23 = false;
 			}
 		}
-		phy_c23 = 1;
+		phy_c23 = true;
 		phy_c15 = phy_c13;
 		phy_c16 = phy_c14;
 		phy_c7 = phy_c7 >> 1;
@@ -3965,12 +3966,12 @@
 {
 	u16 tempsenseval1, tempsenseval2;
 	s16 avg = 0;
-	bool suspend = 0;
+	bool suspend = false;
 
 	if (mode == 1) {
-		suspend =
-			(0 ==
-			 (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+		suspend = (0 == (bcma_read32(pi->d11core,
+					     D11REGOFFS(maccontrol)) &
+				 MCTL_EN_MAC));
 		if (!suspend)
 			wlapi_suspend_mac_and_wait(pi->sh->physhim);
 		wlc_lcnphy_vbat_temp_sense_setup(pi, TEMPSENSE);
@@ -4007,14 +4008,14 @@
 {
 	u16 tempsenseval1, tempsenseval2;
 	s32 avg = 0;
-	bool suspend = 0;
+	bool suspend = false;
 	u16 SAVE_txpwrctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
 	struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
 
 	if (mode == 1) {
-		suspend =
-			(0 ==
-			 (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+		suspend = (0 == (bcma_read32(pi->d11core,
+					     D11REGOFFS(maccontrol)) &
+				 MCTL_EN_MAC));
 		if (!suspend)
 			wlapi_suspend_mac_and_wait(pi->sh->physhim);
 		wlc_lcnphy_vbat_temp_sense_setup(pi, TEMPSENSE);
@@ -4075,12 +4076,12 @@
 {
 	u16 vbatsenseval;
 	s32 avg = 0;
-	bool suspend = 0;
+	bool suspend = false;
 
 	if (mode == 1) {
-		suspend =
-			(0 ==
-			 (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+		suspend = (0 == (bcma_read32(pi->d11core,
+					     D11REGOFFS(maccontrol)) &
+				 MCTL_EN_MAC));
 		if (!suspend)
 			wlapi_suspend_mac_and_wait(pi->sh->physhim);
 		wlc_lcnphy_vbat_temp_sense_setup(pi, VBATSENSE);
@@ -4127,8 +4128,8 @@
 	s8 index;
 	u16 SAVE_pwrctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
 	struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
-	suspend =
-		(0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+	suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
+			 MCTL_EN_MAC));
 	if (!suspend)
 		wlapi_suspend_mac_and_wait(pi->sh->physhim);
 	wlc_lcnphy_deaf_mode(pi, true);
@@ -4166,8 +4167,8 @@
 	pi_lcn->lcnphy_full_cal_channel = CHSPEC_CHANNEL(pi->radio_chanspec);
 	index = pi_lcn->lcnphy_current_index;
 
-	suspend =
-		(0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+	suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
+			 MCTL_EN_MAC));
 	if (!suspend) {
 		wlapi_bmac_write_shm(pi->sh->physhim, M_CTS_DURATION, 10000);
 		wlapi_suspend_mac_and_wait(pi->sh->physhim);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
index cd19c2f..a16f1ab 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
@@ -29,6 +29,7 @@
 #include "phy_radio.h"
 #include "phyreg_n.h"
 #include "phytbl_n.h"
+#include "soc.h"
 
 #define READ_RADIO_REG2(pi, radio_type, jspace, core, reg_name)	\
 	read_radio_reg(pi, radio_type##_##jspace##_##reg_name |	\
@@ -14417,12 +14418,6 @@
 		switch (band_num) {
 		case 0:
 
-			pi->nphy_txpid2g[PHY_CORE_0] =
-				(u8) wlapi_getintvar(shim,
-						     BRCMS_SROM_TXPID2GA0);
-			pi->nphy_txpid2g[PHY_CORE_1] =
-				(u8) wlapi_getintvar(shim,
-						     BRCMS_SROM_TXPID2GA1);
 			pi->nphy_pwrctrl_info[PHY_CORE_0].max_pwr_2g =
 				(s8) wlapi_getintvar(shim,
 						     BRCMS_SROM_MAXP2GA0);
@@ -14486,12 +14481,6 @@
 			break;
 		case 1:
 
-			pi->nphy_txpid5g[PHY_CORE_0] =
-				(u8) wlapi_getintvar(shim,
-						     BRCMS_SROM_TXPID5GA0);
-			pi->nphy_txpid5g[PHY_CORE_1] =
-				(u8) wlapi_getintvar(shim,
-						     BRCMS_SROM_TXPID5GA1);
 			pi->nphy_pwrctrl_info[PHY_CORE_0].max_pwr_5gm =
 				(s8) wlapi_getintvar(shim, BRCMS_SROM_MAXP5GA0);
 			pi->nphy_pwrctrl_info[PHY_CORE_1].max_pwr_5gm =
@@ -14551,12 +14540,6 @@
 			break;
 		case 2:
 
-			pi->nphy_txpid5gl[0] =
-				(u8) wlapi_getintvar(shim,
-						     BRCMS_SROM_TXPID5GLA0);
-			pi->nphy_txpid5gl[1] =
-				(u8) wlapi_getintvar(shim,
-						     BRCMS_SROM_TXPID5GLA1);
 			pi->nphy_pwrctrl_info[0].max_pwr_5gl =
 				(s8) wlapi_getintvar(shim,
 						     BRCMS_SROM_MAXP5GLA0);
@@ -14615,12 +14598,6 @@
 			break;
 		case 3:
 
-			pi->nphy_txpid5gh[0] =
-				(u8) wlapi_getintvar(shim,
-						     BRCMS_SROM_TXPID5GHA0);
-			pi->nphy_txpid5gh[1] =
-				(u8) wlapi_getintvar(shim,
-						     BRCMS_SROM_TXPID5GHA1);
 			pi->nphy_pwrctrl_info[0].max_pwr_5gh =
 				(s8) wlapi_getintvar(shim,
 						     BRCMS_SROM_MAXP5GHA0);
@@ -17825,7 +17802,7 @@
 
 	if (D11REV_IS(pi->sh->corerev, 11) || D11REV_IS(pi->sh->corerev, 12)) {
 		wlapi_bmac_mctrl(pi->sh->physhim, MCTL_PHYLOCK, MCTL_PHYLOCK);
-		(void)R_REG(&pi->regs->maccontrol);
+		(void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol));
 		udelay(1);
 	}
 
@@ -17976,7 +17953,7 @@
 
 	if (D11REV_IS(pi->sh->corerev, 11) || D11REV_IS(pi->sh->corerev, 12)) {
 		wlapi_bmac_mctrl(pi->sh->physhim, MCTL_PHYLOCK, MCTL_PHYLOCK);
-		(void)R_REG(&pi->regs->maccontrol);
+		(void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol));
 		udelay(1);
 	}
 
@@ -19470,8 +19447,6 @@
 	u8 tx_pwr_ctrl_state;
 	bool do_nphy_cal = false;
 	uint core;
-	uint origidx, intr_val;
-	struct d11regs __iomem *regs;
 	u32 d11_clk_ctl_st;
 	bool do_rssi_cal = false;
 
@@ -19485,25 +19460,21 @@
 	     (pi->sh->chippkg == BCM4718_PKG_ID))) {
 		if ((pi->sh->boardflags & BFL_EXTLNA) &&
 		    (CHSPEC_IS2G(pi->radio_chanspec)))
-			ai_corereg(pi->sh->sih, SI_CC_IDX,
-				   offsetof(struct chipcregs, chipcontrol),
-				   0x40, 0x40);
+			ai_cc_reg(pi->sh->sih,
+				  offsetof(struct chipcregs, chipcontrol),
+				  0x40, 0x40);
 	}
 
 	if ((pi->nphy_gband_spurwar2_en) && CHSPEC_IS2G(pi->radio_chanspec) &&
 	    CHSPEC_IS40(pi->radio_chanspec)) {
 
-		regs = (struct d11regs __iomem *)
-				ai_switch_core(pi->sh->sih,
-					       D11_CORE_ID, &origidx,
-					       &intr_val);
-		d11_clk_ctl_st = R_REG(&regs->clk_ctl_st);
-		AND_REG(&regs->clk_ctl_st,
-			~(CCS_FORCEHT | CCS_HTAREQ));
+		d11_clk_ctl_st = bcma_read32(pi->d11core,
+					     D11REGOFFS(clk_ctl_st));
+		bcma_mask32(pi->d11core, D11REGOFFS(clk_ctl_st),
+			    ~(CCS_FORCEHT | CCS_HTAREQ));
 
-		W_REG(&regs->clk_ctl_st, d11_clk_ctl_st);
-
-		ai_restore_core(pi->sh->sih, origidx, intr_val);
+		bcma_write32(pi->d11core, D11REGOFFS(clk_ctl_st),
+			     d11_clk_ctl_st);
 	}
 
 	pi->use_int_tx_iqlo_cal_nphy =
@@ -19908,7 +19879,8 @@
 	if (!pi->sh->clk)
 		return;
 
-	suspend = (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+	suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
+			 MCTL_EN_MAC));
 	if (!suspend)
 		wlapi_suspend_mac_and_wait(pi->sh->physhim);
 
@@ -21286,28 +21258,28 @@
 	val = read_phy_reg(pi, 0x09) & NPHY_BandControl_currentBand;
 	if (CHSPEC_IS5G(chanspec) && !val) {
 
-		val = R_REG(&pi->regs->psm_phy_hdr_param);
-		W_REG(&pi->regs->psm_phy_hdr_param,
+		val = bcma_read16(pi->d11core, D11REGOFFS(psm_phy_hdr_param));
+		bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param),
 		      (val | MAC_PHY_FORCE_CLK));
 
 		or_phy_reg(pi, (NPHY_TO_BPHY_OFF + BPHY_BB_CONFIG),
 			   (BBCFG_RESETCCA | BBCFG_RESETRX));
 
-		W_REG(&pi->regs->psm_phy_hdr_param, val);
+		bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param), val);
 
 		or_phy_reg(pi, 0x09, NPHY_BandControl_currentBand);
 	} else if (!CHSPEC_IS5G(chanspec) && val) {
 
 		and_phy_reg(pi, 0x09, ~NPHY_BandControl_currentBand);
 
-		val = R_REG(&pi->regs->psm_phy_hdr_param);
-		W_REG(&pi->regs->psm_phy_hdr_param,
+		val = bcma_read16(pi->d11core, D11REGOFFS(psm_phy_hdr_param));
+		bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param),
 		      (val | MAC_PHY_FORCE_CLK));
 
 		and_phy_reg(pi, (NPHY_TO_BPHY_OFF + BPHY_BB_CONFIG),
 			    (u16) (~(BBCFG_RESETCCA | BBCFG_RESETRX)));
 
-		W_REG(&pi->regs->psm_phy_hdr_param, val);
+		bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param), val);
 	}
 
 	write_phy_reg(pi, 0x1ce, ci->PHY_BW1a);
@@ -21365,24 +21337,23 @@
 			spuravoid = 1;
 
 		wlapi_bmac_core_phypll_ctl(pi->sh->physhim, false);
-		si_pmu_spuravoid(pi->sh->sih, spuravoid);
+		si_pmu_spuravoid_pllupdate(pi->sh->sih, spuravoid);
 		wlapi_bmac_core_phypll_ctl(pi->sh->physhim, true);
 
 		if ((pi->sh->chip == BCM43224_CHIP_ID) ||
 		    (pi->sh->chip == BCM43225_CHIP_ID)) {
-
 			if (spuravoid == 1) {
-
-				W_REG(&pi->regs->tsf_clk_frac_l,
-				      0x5341);
-				W_REG(&pi->regs->tsf_clk_frac_h,
-				      0x8);
+				bcma_write16(pi->d11core,
+					     D11REGOFFS(tsf_clk_frac_l),
+					     0x5341);
+				bcma_write16(pi->d11core,
+					     D11REGOFFS(tsf_clk_frac_h), 0x8);
 			} else {
-
-				W_REG(&pi->regs->tsf_clk_frac_l,
-				      0x8889);
-				W_REG(&pi->regs->tsf_clk_frac_h,
-				      0x8);
+				bcma_write16(pi->d11core,
+					     D11REGOFFS(tsf_clk_frac_l),
+					     0x8889);
+				bcma_write16(pi->d11core,
+					     D11REGOFFS(tsf_clk_frac_h), 0x8);
 			}
 		}
 
@@ -21522,13 +21493,13 @@
 
 		ai_gpiocontrol(pi->sh->sih, mask, mask, GPIO_DRV_PRIORITY);
 
-		mc = R_REG(&pi->regs->maccontrol);
+		mc = bcma_read32(pi->d11core, D11REGOFFS(maccontrol));
 		mc &= ~MCTL_GPOUT_SEL_MASK;
-		W_REG(&pi->regs->maccontrol, mc);
+		bcma_write32(pi->d11core, D11REGOFFS(maccontrol), mc);
 
-		OR_REG(&pi->regs->psm_gpio_oe, mask);
+		bcma_set16(pi->d11core, D11REGOFFS(psm_gpio_oe), mask);
 
-		AND_REG(&pi->regs->psm_gpio_out, ~mask);
+		bcma_mask16(pi->d11core, D11REGOFFS(psm_gpio_out), ~mask);
 
 		if (lut_init) {
 			write_phy_reg(pi, 0xf8, 0x02d8);
@@ -21545,9 +21516,8 @@
 	bool suspended = false;
 
 	if (D11REV_IS(pi->sh->corerev, 16)) {
-		suspended =
-			(R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC) ?
-			false : true;
+		suspended = (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
+			     MCTL_EN_MAC) ? false : true;
 		if (!suspended)
 			wlapi_suspend_mac_and_wait(pi->sh->physhim);
 	}
@@ -25406,7 +25376,8 @@
 	if (pi->nphy_papd_skip == 1)
 		return;
 
-	phy_b3 = (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+	phy_b3 = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
+			MCTL_EN_MAC));
 	if (!phy_b3)
 		wlapi_suspend_mac_and_wait(pi->sh->physhim);
 
@@ -27994,20 +27965,11 @@
 		chan_freq_range = wlc_phy_get_chan_freq_range_nphy(pi, 0);
 		switch (chan_freq_range) {
 		case WL_CHAN_FREQ_RANGE_2G:
-			txpi[0] = pi->nphy_txpid2g[0];
-			txpi[1] = pi->nphy_txpid2g[1];
-			break;
 		case WL_CHAN_FREQ_RANGE_5GL:
-			txpi[0] = pi->nphy_txpid5gl[0];
-			txpi[1] = pi->nphy_txpid5gl[1];
-			break;
 		case WL_CHAN_FREQ_RANGE_5GM:
-			txpi[0] = pi->nphy_txpid5g[0];
-			txpi[1] = pi->nphy_txpid5g[1];
-			break;
 		case WL_CHAN_FREQ_RANGE_5GH:
-			txpi[0] = pi->nphy_txpid5gh[0];
-			txpi[1] = pi->nphy_txpid5gh[1];
+			txpi[0] = 0;
+			txpi[1] = 0;
 			break;
 		default:
 			txpi[0] = txpi[1] = 91;
@@ -28389,7 +28351,7 @@
 
 	if (D11REV_IS(pi->sh->corerev, 11) || D11REV_IS(pi->sh->corerev, 12)) {
 		wlapi_bmac_mctrl(pi->sh->physhim, MCTL_PHYLOCK, MCTL_PHYLOCK);
-		(void)R_REG(&pi->regs->maccontrol);
+		(void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol));
 		udelay(1);
 	}
 
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/pmu.c b/drivers/net/wireless/brcm80211/brcmsmac/pmu.c
index 3b36e3a..4931d29 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/pmu.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/pmu.c
@@ -23,6 +23,7 @@
 #include "pub.h"
 #include "aiutils.h"
 #include "pmu.h"
+#include "soc.h"
 
 /*
  * external LPO crystal frequency
@@ -114,10 +115,10 @@
 	uint rsrcs;
 
 	/* # resources */
-	rsrcs = (sih->pmucaps & PCAP_RC_MASK) >> PCAP_RC_SHIFT;
+	rsrcs = (ai_get_pmucaps(sih) & PCAP_RC_MASK) >> PCAP_RC_SHIFT;
 
 	/* determine min/max rsrc masks */
-	switch (sih->chip) {
+	switch (ai_get_chip_id(sih)) {
 	case BCM43224_CHIP_ID:
 	case BCM43225_CHIP_ID:
 		/* ??? */
@@ -138,75 +139,84 @@
 	*pmax = max_mask;
 }
 
-static void
-si_pmu_spuravoid_pllupdate(struct si_pub *sih, struct chipcregs __iomem *cc,
-			   u8 spuravoid)
+void si_pmu_spuravoid_pllupdate(struct si_pub *sih, u8 spuravoid)
 {
 	u32 tmp = 0;
+	struct bcma_device *core;
 
-	switch (sih->chip) {
+	/* switch to chipc */
+	core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
+
+	switch (ai_get_chip_id(sih)) {
 	case BCM43224_CHIP_ID:
 	case BCM43225_CHIP_ID:
 		if (spuravoid == 1) {
-			W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL0);
-			W_REG(&cc->pllcontrol_data, 0x11500010);
-			W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL1);
-			W_REG(&cc->pllcontrol_data, 0x000C0C06);
-			W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL2);
-			W_REG(&cc->pllcontrol_data, 0x0F600a08);
-			W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL3);
-			W_REG(&cc->pllcontrol_data, 0x00000000);
-			W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL4);
-			W_REG(&cc->pllcontrol_data, 0x2001E920);
-			W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL5);
-			W_REG(&cc->pllcontrol_data, 0x88888815);
+			bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+				     PMU1_PLL0_PLLCTL0);
+			bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+				     0x11500010);
+			bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+				     PMU1_PLL0_PLLCTL1);
+			bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+				     0x000C0C06);
+			bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+				     PMU1_PLL0_PLLCTL2);
+			bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+				     0x0F600a08);
+			bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+				     PMU1_PLL0_PLLCTL3);
+			bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+				     0x00000000);
+			bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+				     PMU1_PLL0_PLLCTL4);
+			bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+				     0x2001E920);
+			bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+				     PMU1_PLL0_PLLCTL5);
+			bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+				     0x88888815);
 		} else {
-			W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL0);
-			W_REG(&cc->pllcontrol_data, 0x11100010);
-			W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL1);
-			W_REG(&cc->pllcontrol_data, 0x000c0c06);
-			W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL2);
-			W_REG(&cc->pllcontrol_data, 0x03000a08);
-			W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL3);
-			W_REG(&cc->pllcontrol_data, 0x00000000);
-			W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL4);
-			W_REG(&cc->pllcontrol_data, 0x200005c0);
-			W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL5);
-			W_REG(&cc->pllcontrol_data, 0x88888815);
+			bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+				     PMU1_PLL0_PLLCTL0);
+			bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+				     0x11100010);
+			bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+				     PMU1_PLL0_PLLCTL1);
+			bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+				     0x000c0c06);
+			bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+				     PMU1_PLL0_PLLCTL2);
+			bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+				     0x03000a08);
+			bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+				     PMU1_PLL0_PLLCTL3);
+			bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+				     0x00000000);
+			bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+				     PMU1_PLL0_PLLCTL4);
+			bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+				     0x200005c0);
+			bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+				     PMU1_PLL0_PLLCTL5);
+			bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+				     0x88888815);
 		}
 		tmp = 1 << 10;
 		break;
 
-		W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL0);
-		W_REG(&cc->pllcontrol_data, 0x11100008);
-		W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL1);
-		W_REG(&cc->pllcontrol_data, 0x0c000c06);
-		W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL2);
-		W_REG(&cc->pllcontrol_data, 0x03000a08);
-		W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL3);
-		W_REG(&cc->pllcontrol_data, 0x00000000);
-		W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL4);
-		W_REG(&cc->pllcontrol_data, 0x200005c0);
-		W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL5);
-		W_REG(&cc->pllcontrol_data, 0x88888855);
-
-		tmp = 1 << 10;
-		break;
-
 	default:
 		/* bail out */
 		return;
 	}
 
-	tmp |= R_REG(&cc->pmucontrol);
-	W_REG(&cc->pmucontrol, tmp);
+	bcma_set32(core, CHIPCREGOFFS(pmucontrol), tmp);
 }
 
 u16 si_pmu_fast_pwrup_delay(struct si_pub *sih)
 {
 	uint delay = PMU_MAX_TRANSITION_DLY;
 
-	switch (sih->chip) {
+	switch (ai_get_chip_id(sih)) {
 	case BCM43224_CHIP_ID:
 	case BCM43225_CHIP_ID:
 	case BCM4313_CHIP_ID:
@@ -219,54 +229,35 @@
 	return (u16) delay;
 }
 
-void si_pmu_sprom_enable(struct si_pub *sih, bool enable)
-{
-	struct chipcregs __iomem *cc;
-	uint origidx;
-
-	/* Remember original core before switch to chipc */
-	origidx = ai_coreidx(sih);
-	cc = ai_setcoreidx(sih, SI_CC_IDX);
-
-	/* Return to original core */
-	ai_setcoreidx(sih, origidx);
-}
-
 /* Read/write a chipcontrol reg */
 u32 si_pmu_chipcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val)
 {
-	ai_corereg(sih, SI_CC_IDX, offsetof(struct chipcregs, chipcontrol_addr),
-		   ~0, reg);
-	return ai_corereg(sih, SI_CC_IDX,
-			  offsetof(struct chipcregs, chipcontrol_data), mask,
-			  val);
+	ai_cc_reg(sih, offsetof(struct chipcregs, chipcontrol_addr), ~0, reg);
+	return ai_cc_reg(sih, offsetof(struct chipcregs, chipcontrol_data),
+			 mask, val);
 }
 
 /* Read/write a regcontrol reg */
 u32 si_pmu_regcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val)
 {
-	ai_corereg(sih, SI_CC_IDX, offsetof(struct chipcregs, regcontrol_addr),
-		   ~0, reg);
-	return ai_corereg(sih, SI_CC_IDX,
-			  offsetof(struct chipcregs, regcontrol_data), mask,
-			  val);
+	ai_cc_reg(sih, offsetof(struct chipcregs, regcontrol_addr), ~0, reg);
+	return ai_cc_reg(sih, offsetof(struct chipcregs, regcontrol_data),
+			 mask, val);
 }
 
 /* Read/write a pllcontrol reg */
 u32 si_pmu_pllcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val)
 {
-	ai_corereg(sih, SI_CC_IDX, offsetof(struct chipcregs, pllcontrol_addr),
-		   ~0, reg);
-	return ai_corereg(sih, SI_CC_IDX,
-			  offsetof(struct chipcregs, pllcontrol_data), mask,
-			  val);
+	ai_cc_reg(sih, offsetof(struct chipcregs, pllcontrol_addr), ~0, reg);
+	return ai_cc_reg(sih, offsetof(struct chipcregs, pllcontrol_data),
+			 mask, val);
 }
 
 /* PMU PLL update */
 void si_pmu_pllupd(struct si_pub *sih)
 {
-	ai_corereg(sih, SI_CC_IDX, offsetof(struct chipcregs, pmucontrol),
-		   PCTL_PLL_PLLCTL_UPD, PCTL_PLL_PLLCTL_UPD);
+	ai_cc_reg(sih, offsetof(struct chipcregs, pmucontrol),
+		  PCTL_PLL_PLLCTL_UPD, PCTL_PLL_PLLCTL_UPD);
 }
 
 /* query alp/xtal clock frequency */
@@ -275,10 +266,10 @@
 	u32 clock = ALP_CLOCK;
 
 	/* bail out with default */
-	if (!(sih->cccaps & CC_CAP_PMU))
+	if (!(ai_get_cccaps(sih) & CC_CAP_PMU))
 		return clock;
 
-	switch (sih->chip) {
+	switch (ai_get_chip_id(sih)) {
 	case BCM43224_CHIP_ID:
 	case BCM43225_CHIP_ID:
 	case BCM4313_CHIP_ID:
@@ -292,95 +283,29 @@
 	return clock;
 }
 
-void si_pmu_spuravoid(struct si_pub *sih, u8 spuravoid)
-{
-	struct chipcregs __iomem *cc;
-	uint origidx, intr_val;
-
-	/* Remember original core before switch to chipc */
-	cc = (struct chipcregs __iomem *)
-			ai_switch_core(sih, CC_CORE_ID, &origidx, &intr_val);
-
-	/* update the pll changes */
-	si_pmu_spuravoid_pllupdate(sih, cc, spuravoid);
-
-	/* Return to original core */
-	ai_restore_core(sih, origidx, intr_val);
-}
-
 /* initialize PMU */
 void si_pmu_init(struct si_pub *sih)
 {
-	struct chipcregs __iomem *cc;
-	uint origidx;
+	struct bcma_device *core;
 
-	/* Remember original core before switch to chipc */
-	origidx = ai_coreidx(sih);
-	cc = ai_setcoreidx(sih, SI_CC_IDX);
+	/* select chipc */
+	core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
 
-	if (sih->pmurev == 1)
-		AND_REG(&cc->pmucontrol, ~PCTL_NOILP_ON_WAIT);
-	else if (sih->pmurev >= 2)
-		OR_REG(&cc->pmucontrol, PCTL_NOILP_ON_WAIT);
-
-	/* Return to original core */
-	ai_setcoreidx(sih, origidx);
-}
-
-/* initialize PMU chip controls and other chip level stuff */
-void si_pmu_chip_init(struct si_pub *sih)
-{
-	uint origidx;
-
-	/* Gate off SPROM clock and chip select signals */
-	si_pmu_sprom_enable(sih, false);
-
-	/* Remember original core */
-	origidx = ai_coreidx(sih);
-
-	/* Return to original core */
-	ai_setcoreidx(sih, origidx);
-}
-
-/* initialize PMU switch/regulators */
-void si_pmu_swreg_init(struct si_pub *sih)
-{
-}
-
-/* initialize PLL */
-void si_pmu_pll_init(struct si_pub *sih, uint xtalfreq)
-{
-	struct chipcregs __iomem *cc;
-	uint origidx;
-
-	/* Remember original core before switch to chipc */
-	origidx = ai_coreidx(sih);
-	cc = ai_setcoreidx(sih, SI_CC_IDX);
-
-	switch (sih->chip) {
-	case BCM4313_CHIP_ID:
-	case BCM43224_CHIP_ID:
-	case BCM43225_CHIP_ID:
-		/* ??? */
-		break;
-	default:
-		break;
-	}
-
-	/* Return to original core */
-	ai_setcoreidx(sih, origidx);
+	if (ai_get_pmurev(sih) == 1)
+		bcma_mask32(core, CHIPCREGOFFS(pmucontrol),
+			    ~PCTL_NOILP_ON_WAIT);
+	else if (ai_get_pmurev(sih) >= 2)
+		bcma_set32(core, CHIPCREGOFFS(pmucontrol), PCTL_NOILP_ON_WAIT);
 }
 
 /* initialize PMU resources */
 void si_pmu_res_init(struct si_pub *sih)
 {
-	struct chipcregs __iomem *cc;
-	uint origidx;
+	struct bcma_device *core;
 	u32 min_mask = 0, max_mask = 0;
 
-	/* Remember original core before switch to chipc */
-	origidx = ai_coreidx(sih);
-	cc = ai_setcoreidx(sih, SI_CC_IDX);
+	/* select to chipc */
+	core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
 
 	/* Determine min/max rsrc masks */
 	si_pmu_res_masks(sih, &min_mask, &max_mask);
@@ -390,55 +315,50 @@
 	/* Program max resource mask */
 
 	if (max_mask)
-		W_REG(&cc->max_res_mask, max_mask);
+		bcma_write32(core, CHIPCREGOFFS(max_res_mask), max_mask);
 
 	/* Program min resource mask */
 
 	if (min_mask)
-		W_REG(&cc->min_res_mask, min_mask);
+		bcma_write32(core, CHIPCREGOFFS(min_res_mask), min_mask);
 
 	/* Add some delay; allow resources to come up and settle. */
 	mdelay(2);
-
-	/* Return to original core */
-	ai_setcoreidx(sih, origidx);
 }
 
 u32 si_pmu_measure_alpclk(struct si_pub *sih)
 {
-	struct chipcregs __iomem *cc;
-	uint origidx;
+	struct bcma_device *core;
 	u32 alp_khz;
 
-	if (sih->pmurev < 10)
+	if (ai_get_pmurev(sih) < 10)
 		return 0;
 
 	/* Remember original core before switch to chipc */
-	origidx = ai_coreidx(sih);
-	cc = ai_setcoreidx(sih, SI_CC_IDX);
+	core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
 
-	if (R_REG(&cc->pmustatus) & PST_EXTLPOAVAIL) {
+	if (bcma_read32(core, CHIPCREGOFFS(pmustatus)) & PST_EXTLPOAVAIL) {
 		u32 ilp_ctr, alp_hz;
 
 		/*
 		 * Enable the reg to measure the freq,
 		 * in case it was disabled before
 		 */
-		W_REG(&cc->pmu_xtalfreq,
-		      1U << PMU_XTALFREQ_REG_MEASURE_SHIFT);
+		bcma_write32(core, CHIPCREGOFFS(pmu_xtalfreq),
+			    1U << PMU_XTALFREQ_REG_MEASURE_SHIFT);
 
 		/* Delay for well over 4 ILP clocks */
 		udelay(1000);
 
 		/* Read the latched number of ALP ticks per 4 ILP ticks */
-		ilp_ctr =
-		    R_REG(&cc->pmu_xtalfreq) & PMU_XTALFREQ_REG_ILPCTR_MASK;
+		ilp_ctr = bcma_read32(core, CHIPCREGOFFS(pmu_xtalfreq)) &
+			  PMU_XTALFREQ_REG_ILPCTR_MASK;
 
 		/*
 		 * Turn off the PMU_XTALFREQ_REG_MEASURE_SHIFT
 		 * bit to save power
 		 */
-		W_REG(&cc->pmu_xtalfreq, 0);
+		bcma_write32(core, CHIPCREGOFFS(pmu_xtalfreq), 0);
 
 		/* Calculate ALP frequency */
 		alp_hz = (ilp_ctr * EXT_ILP_HZ) / 4;
@@ -451,8 +371,5 @@
 	} else
 		alp_khz = 0;
 
-	/* Return to original core */
-	ai_setcoreidx(sih, origidx);
-
 	return alp_khz;
 }
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/pmu.h b/drivers/net/wireless/brcm80211/brcmsmac/pmu.h
index 3a08c62..3e39c5e 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/pmu.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/pmu.h
@@ -26,13 +26,10 @@
 extern u32 si_pmu_regcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val);
 extern u32 si_pmu_alp_clock(struct si_pub *sih);
 extern void si_pmu_pllupd(struct si_pub *sih);
-extern void si_pmu_spuravoid(struct si_pub *sih, u8 spuravoid);
+extern void si_pmu_spuravoid_pllupdate(struct si_pub *sih, u8 spuravoid);
 extern u32 si_pmu_pllcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val);
 extern void si_pmu_init(struct si_pub *sih);
-extern void si_pmu_chip_init(struct si_pub *sih);
-extern void si_pmu_pll_init(struct si_pub *sih, u32 xtalfreq);
 extern void si_pmu_res_init(struct si_pub *sih);
-extern void si_pmu_swreg_init(struct si_pub *sih);
 extern u32 si_pmu_measure_alpclk(struct si_pub *sih);
 
 #endif /* _BRCM_PMU_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/pub.h b/drivers/net/wireless/brcm80211/brcmsmac/pub.h
index 37bb2dc..f0038ad 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/pub.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/pub.h
@@ -17,6 +17,7 @@
 #ifndef _BRCM_PUB_H_
 #define _BRCM_PUB_H_
 
+#include <linux/bcma/bcma.h>
 #include <brcmu_wifi.h>
 #include "types.h"
 #include "defs.h"
@@ -170,22 +171,6 @@
 	BRCMS_SROM_TSSIPOS2G,
 	BRCMS_SROM_TSSIPOS5G,
 	BRCMS_SROM_TXCHAIN,
-	BRCMS_SROM_TXPID2GA0,
-	BRCMS_SROM_TXPID2GA1,
-	BRCMS_SROM_TXPID2GA2,
-	BRCMS_SROM_TXPID2GA3,
-	BRCMS_SROM_TXPID5GA0,
-	BRCMS_SROM_TXPID5GA1,
-	BRCMS_SROM_TXPID5GA2,
-	BRCMS_SROM_TXPID5GA3,
-	BRCMS_SROM_TXPID5GHA0,
-	BRCMS_SROM_TXPID5GHA1,
-	BRCMS_SROM_TXPID5GHA2,
-	BRCMS_SROM_TXPID5GHA3,
-	BRCMS_SROM_TXPID5GLA0,
-	BRCMS_SROM_TXPID5GLA1,
-	BRCMS_SROM_TXPID5GLA2,
-	BRCMS_SROM_TXPID5GLA3,
 	/*
 	 * per-path identifiers (see srom.c)
 	 */
@@ -225,10 +210,6 @@
 	BRCMS_SROM_PA2GW2A1,
 	BRCMS_SROM_PA2GW2A2,
 	BRCMS_SROM_PA2GW2A3,
-	BRCMS_SROM_PA2GW3A0,
-	BRCMS_SROM_PA2GW3A1,
-	BRCMS_SROM_PA2GW3A2,
-	BRCMS_SROM_PA2GW3A3,
 	BRCMS_SROM_PA5GHW0A0,
 	BRCMS_SROM_PA5GHW0A1,
 	BRCMS_SROM_PA5GHW0A2,
@@ -241,10 +222,6 @@
 	BRCMS_SROM_PA5GHW2A1,
 	BRCMS_SROM_PA5GHW2A2,
 	BRCMS_SROM_PA5GHW2A3,
-	BRCMS_SROM_PA5GHW3A0,
-	BRCMS_SROM_PA5GHW3A1,
-	BRCMS_SROM_PA5GHW3A2,
-	BRCMS_SROM_PA5GHW3A3,
 	BRCMS_SROM_PA5GLW0A0,
 	BRCMS_SROM_PA5GLW0A1,
 	BRCMS_SROM_PA5GLW0A2,
@@ -257,10 +234,6 @@
 	BRCMS_SROM_PA5GLW2A1,
 	BRCMS_SROM_PA5GLW2A2,
 	BRCMS_SROM_PA5GLW2A3,
-	BRCMS_SROM_PA5GLW3A0,
-	BRCMS_SROM_PA5GLW3A1,
-	BRCMS_SROM_PA5GLW3A2,
-	BRCMS_SROM_PA5GLW3A3,
 	BRCMS_SROM_PA5GW0A0,
 	BRCMS_SROM_PA5GW0A1,
 	BRCMS_SROM_PA5GW0A2,
@@ -273,14 +246,9 @@
 	BRCMS_SROM_PA5GW2A1,
 	BRCMS_SROM_PA5GW2A2,
 	BRCMS_SROM_PA5GW2A3,
-	BRCMS_SROM_PA5GW3A0,
-	BRCMS_SROM_PA5GW3A1,
-	BRCMS_SROM_PA5GW3A2,
-	BRCMS_SROM_PA5GW3A3,
 };
 
 #define	BRCMS_NUMRATES	16	/* max # of rates in a rateset */
-#define	D11_PHY_HDR_LEN	6	/* Phy header length - 6 bytes */
 
 /* phy types */
 #define	PHY_TYPE_A	0	/* Phy type A */
@@ -414,7 +382,6 @@
 	uint _nbands;		/* # bands supported */
 	uint now;		/* # elapsed seconds */
 
-	bool promisc;		/* promiscuous destination address */
 	bool delayed_down;	/* down delayed */
 	bool associated;	/* true:part of [I]BSS, false: not */
 	/* (union of stas_associated, aps_associated) */
@@ -564,15 +531,14 @@
 
 /* common functions for every port */
 extern struct brcms_c_info *
-brcms_c_attach(struct brcms_info *wl, u16 vendor, u16 device, uint unit,
-	       bool piomode, void __iomem *regsva, struct pci_dev *btparam,
-	       uint *perr);
+brcms_c_attach(struct brcms_info *wl, struct bcma_device *core, uint unit,
+	       bool piomode, uint *perr);
 extern uint brcms_c_detach(struct brcms_c_info *wlc);
 extern int brcms_c_up(struct brcms_c_info *wlc);
 extern uint brcms_c_down(struct brcms_c_info *wlc);
 
 extern bool brcms_c_chipmatch(u16 vendor, u16 device);
-extern void brcms_c_init(struct brcms_c_info *wlc);
+extern void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx);
 extern void brcms_c_reset(struct brcms_c_info *wlc);
 
 extern void brcms_c_intrson(struct brcms_c_info *wlc);
@@ -628,7 +594,7 @@
 					u8 interval);
 extern int brcms_c_set_tx_power(struct brcms_c_info *wlc, int txpwr);
 extern int brcms_c_get_tx_power(struct brcms_c_info *wlc);
-extern void brcms_c_set_radio_mpc(struct brcms_c_info *wlc, bool mpc);
 extern bool brcms_c_check_radio_disabled(struct brcms_c_info *wlc);
+extern void brcms_c_mute(struct brcms_c_info *wlc, bool on);
 
 #endif				/* _BRCM_PUB_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/rate.h b/drivers/net/wireless/brcm80211/brcmsmac/rate.h
index e7b9dc2..980d578 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/rate.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/rate.h
@@ -19,6 +19,7 @@
 
 #include "types.h"
 #include "d11.h"
+#include "phy_hal.h"
 
 extern const u8 rate_info[];
 extern const struct brcms_c_rateset cck_ofdm_mimo_rates;
@@ -198,11 +199,9 @@
 
 /* Convert encoded rate value in plcp header to numerical rates in 500 KHz
  * increments */
-extern const u8 ofdm_rate_lookup[];
-
 static inline u8 ofdm_phy2mac_rate(u8 rlpt)
 {
-	return ofdm_rate_lookup[rlpt & 0x7];
+	return wlc_phy_get_ofdm_rate_lookup()[rlpt & 0x7];
 }
 
 static inline u8 cck_phy2mac_rate(u8 signal)
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/srom.c b/drivers/net/wireless/brcm80211/brcmsmac/srom.c
index 99f7910..6109215 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/srom.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/srom.c
@@ -28,6 +28,7 @@
 #include "aiutils.h"
 #include "otp.h"
 #include "srom.h"
+#include "soc.h"
 
 /*
  * SROM CRC8 polynomial value:
@@ -62,9 +63,6 @@
 #define	SROM_MACHI_ET1		42
 #define	SROM_MACMID_ET1		43
 #define	SROM_MACLO_ET1		44
-#define	SROM3_MACHI		37
-#define	SROM3_MACMID		38
-#define	SROM3_MACLO		39
 
 #define	SROM_BXARSSI2G		40
 #define	SROM_BXARSSI5G		41
@@ -101,7 +99,6 @@
 
 #define	SROM_BFL		57
 #define	SROM_BFL2		28
-#define	SROM3_BFL2		61
 
 #define	SROM_AG10		58
 
@@ -109,99 +106,16 @@
 
 #define	SROM_OPO		60
 
-#define	SROM3_LEDDC		62
-
 #define	SROM_CRCREV		63
 
-/* SROM Rev 4: Reallocate the software part of the srom to accommodate
- * MIMO features. It assumes up to two PCIE functions and 440 bytes
- * of usable srom i.e. the usable storage in chips with OTP that
- * implements hardware redundancy.
- */
-
 #define	SROM4_WORDS		220
 
-#define	SROM4_SIGN		32
-#define	SROM4_SIGNATURE		0x5372
-
-#define	SROM4_BREV		33
-
-#define	SROM4_BFL0		34
-#define	SROM4_BFL1		35
-#define	SROM4_BFL2		36
-#define	SROM4_BFL3		37
-#define	SROM5_BFL0		37
-#define	SROM5_BFL1		38
-#define	SROM5_BFL2		39
-#define	SROM5_BFL3		40
-
-#define	SROM4_MACHI		38
-#define	SROM4_MACMID		39
-#define	SROM4_MACLO		40
-#define	SROM5_MACHI		41
-#define	SROM5_MACMID		42
-#define	SROM5_MACLO		43
-
-#define	SROM4_CCODE		41
-#define	SROM4_REGREV		42
-#define	SROM5_CCODE		34
-#define	SROM5_REGREV		35
-
-#define	SROM4_LEDBH10		43
-#define	SROM4_LEDBH32		44
-#define	SROM5_LEDBH10		59
-#define	SROM5_LEDBH32		60
-
-#define	SROM4_LEDDC		45
-#define	SROM5_LEDDC		45
-
-#define	SROM4_AA		46
-
-#define	SROM4_AG10		47
-#define	SROM4_AG32		48
-
-#define	SROM4_TXPID2G		49
-#define	SROM4_TXPID5G		51
-#define	SROM4_TXPID5GL		53
-#define	SROM4_TXPID5GH		55
-
-#define SROM4_TXRXC		61
 #define SROM4_TXCHAIN_MASK	0x000f
-#define SROM4_TXCHAIN_SHIFT	0
 #define SROM4_RXCHAIN_MASK	0x00f0
-#define SROM4_RXCHAIN_SHIFT	4
 #define SROM4_SWITCH_MASK	0xff00
-#define SROM4_SWITCH_SHIFT	8
 
 /* Per-path fields */
 #define	MAX_PATH_SROM		4
-#define	SROM4_PATH0		64
-#define	SROM4_PATH1		87
-#define	SROM4_PATH2		110
-#define	SROM4_PATH3		133
-
-#define	SROM4_2G_ITT_MAXP	0
-#define	SROM4_2G_PA		1
-#define	SROM4_5G_ITT_MAXP	5
-#define	SROM4_5GLH_MAXP		6
-#define	SROM4_5G_PA		7
-#define	SROM4_5GL_PA		11
-#define	SROM4_5GH_PA		15
-
-/* All the miriad power offsets */
-#define	SROM4_2G_CCKPO		156
-#define	SROM4_2G_OFDMPO		157
-#define	SROM4_5G_OFDMPO		159
-#define	SROM4_5GL_OFDMPO	161
-#define	SROM4_5GH_OFDMPO	163
-#define	SROM4_2G_MCSPO		165
-#define	SROM4_5G_MCSPO		173
-#define	SROM4_5GL_MCSPO		181
-#define	SROM4_5GH_MCSPO		189
-#define	SROM4_CDDPO		197
-#define	SROM4_STBCPO		198
-#define	SROM4_BW40PO		199
-#define	SROM4_BWDUPPO		200
 
 #define	SROM4_CRCREV		219
 
@@ -424,103 +338,32 @@
 static const struct brcms_sromvar pci_sromvars[] = {
 	{BRCMS_SROM_DEVID, 0xffffff00, SRFL_PRHEX | SRFL_NOVAR, PCI_F0DEVID,
 	 0xffff},
-	{BRCMS_SROM_BOARDREV, 0x0000000e, SRFL_PRHEX, SROM_AABREV,
-	 SROM_BR_MASK},
-	{BRCMS_SROM_BOARDREV, 0x000000f0, SRFL_PRHEX, SROM4_BREV, 0xffff},
 	{BRCMS_SROM_BOARDREV, 0xffffff00, SRFL_PRHEX, SROM8_BREV, 0xffff},
-	{BRCMS_SROM_BOARDFLAGS, 0x00000002, SRFL_PRHEX, SROM_BFL, 0xffff},
-	{BRCMS_SROM_BOARDFLAGS, 0x00000004, SRFL_PRHEX | SRFL_MORE, SROM_BFL,
-	 0xffff},
-	{BRCMS_SROM_CONT, 0, 0, SROM_BFL2, 0xffff},
-	{BRCMS_SROM_BOARDFLAGS, 0x00000008, SRFL_PRHEX | SRFL_MORE, SROM_BFL,
-	 0xffff},
-	{BRCMS_SROM_CONT, 0, 0, SROM3_BFL2, 0xffff},
-	{BRCMS_SROM_BOARDFLAGS, 0x00000010, SRFL_PRHEX | SRFL_MORE, SROM4_BFL0,
-	 0xffff},
-	{BRCMS_SROM_CONT, 0, 0, SROM4_BFL1, 0xffff},
-	{BRCMS_SROM_BOARDFLAGS, 0x000000e0, SRFL_PRHEX | SRFL_MORE, SROM5_BFL0,
-	 0xffff},
-	{BRCMS_SROM_CONT, 0, 0, SROM5_BFL1, 0xffff},
 	{BRCMS_SROM_BOARDFLAGS, 0xffffff00, SRFL_PRHEX | SRFL_MORE, SROM8_BFL0,
 	 0xffff},
 	{BRCMS_SROM_CONT, 0, 0, SROM8_BFL1, 0xffff},
-	{BRCMS_SROM_BOARDFLAGS2, 0x00000010, SRFL_PRHEX | SRFL_MORE, SROM4_BFL2,
-	 0xffff},
-	{BRCMS_SROM_CONT, 0, 0, SROM4_BFL3, 0xffff},
-	{BRCMS_SROM_BOARDFLAGS2, 0x000000e0, SRFL_PRHEX | SRFL_MORE, SROM5_BFL2,
-	 0xffff},
-	{BRCMS_SROM_CONT, 0, 0, SROM5_BFL3, 0xffff},
 	{BRCMS_SROM_BOARDFLAGS2, 0xffffff00, SRFL_PRHEX | SRFL_MORE, SROM8_BFL2,
 	 0xffff},
 	{BRCMS_SROM_CONT, 0, 0, SROM8_BFL3, 0xffff},
 	{BRCMS_SROM_BOARDTYPE, 0xfffffffc, SRFL_PRHEX, SROM_SSID, 0xffff},
-	{BRCMS_SROM_BOARDNUM, 0x00000006, 0, SROM_MACLO_IL0, 0xffff},
-	{BRCMS_SROM_BOARDNUM, 0x00000008, 0, SROM3_MACLO, 0xffff},
-	{BRCMS_SROM_BOARDNUM, 0x00000010, 0, SROM4_MACLO, 0xffff},
-	{BRCMS_SROM_BOARDNUM, 0x000000e0, 0, SROM5_MACLO, 0xffff},
 	{BRCMS_SROM_BOARDNUM, 0xffffff00, 0, SROM8_MACLO, 0xffff},
-	{BRCMS_SROM_CC, 0x00000002, 0, SROM_AABREV, SROM_CC_MASK},
-	{BRCMS_SROM_REGREV, 0x00000008, 0, SROM_OPO, 0xff00},
-	{BRCMS_SROM_REGREV, 0x00000010, 0, SROM4_REGREV, 0x00ff},
-	{BRCMS_SROM_REGREV, 0x000000e0, 0, SROM5_REGREV, 0x00ff},
 	{BRCMS_SROM_REGREV, 0xffffff00, 0, SROM8_REGREV, 0x00ff},
-	{BRCMS_SROM_LEDBH0, 0x0000000e, SRFL_NOFFS, SROM_LEDBH10, 0x00ff},
-	{BRCMS_SROM_LEDBH1, 0x0000000e, SRFL_NOFFS, SROM_LEDBH10, 0xff00},
-	{BRCMS_SROM_LEDBH2, 0x0000000e, SRFL_NOFFS, SROM_LEDBH32, 0x00ff},
-	{BRCMS_SROM_LEDBH3, 0x0000000e, SRFL_NOFFS, SROM_LEDBH32, 0xff00},
-	{BRCMS_SROM_LEDBH0, 0x00000010, SRFL_NOFFS, SROM4_LEDBH10, 0x00ff},
-	{BRCMS_SROM_LEDBH1, 0x00000010, SRFL_NOFFS, SROM4_LEDBH10, 0xff00},
-	{BRCMS_SROM_LEDBH2, 0x00000010, SRFL_NOFFS, SROM4_LEDBH32, 0x00ff},
-	{BRCMS_SROM_LEDBH3, 0x00000010, SRFL_NOFFS, SROM4_LEDBH32, 0xff00},
-	{BRCMS_SROM_LEDBH0, 0x000000e0, SRFL_NOFFS, SROM5_LEDBH10, 0x00ff},
-	{BRCMS_SROM_LEDBH1, 0x000000e0, SRFL_NOFFS, SROM5_LEDBH10, 0xff00},
-	{BRCMS_SROM_LEDBH2, 0x000000e0, SRFL_NOFFS, SROM5_LEDBH32, 0x00ff},
-	{BRCMS_SROM_LEDBH3, 0x000000e0, SRFL_NOFFS, SROM5_LEDBH32, 0xff00},
 	{BRCMS_SROM_LEDBH0, 0xffffff00, SRFL_NOFFS, SROM8_LEDBH10, 0x00ff},
 	{BRCMS_SROM_LEDBH1, 0xffffff00, SRFL_NOFFS, SROM8_LEDBH10, 0xff00},
 	{BRCMS_SROM_LEDBH2, 0xffffff00, SRFL_NOFFS, SROM8_LEDBH32, 0x00ff},
 	{BRCMS_SROM_LEDBH3, 0xffffff00, SRFL_NOFFS, SROM8_LEDBH32, 0xff00},
-	{BRCMS_SROM_PA0B0, 0x0000000e, SRFL_PRHEX, SROM_WL0PAB0, 0xffff},
-	{BRCMS_SROM_PA0B1, 0x0000000e, SRFL_PRHEX, SROM_WL0PAB1, 0xffff},
-	{BRCMS_SROM_PA0B2, 0x0000000e, SRFL_PRHEX, SROM_WL0PAB2, 0xffff},
-	{BRCMS_SROM_PA0ITSSIT, 0x0000000e, 0, SROM_ITT, 0x00ff},
-	{BRCMS_SROM_PA0MAXPWR, 0x0000000e, 0, SROM_WL10MAXP, 0x00ff},
 	{BRCMS_SROM_PA0B0, 0xffffff00, SRFL_PRHEX, SROM8_W0_PAB0, 0xffff},
 	{BRCMS_SROM_PA0B1, 0xffffff00, SRFL_PRHEX, SROM8_W0_PAB1, 0xffff},
 	{BRCMS_SROM_PA0B2, 0xffffff00, SRFL_PRHEX, SROM8_W0_PAB2, 0xffff},
 	{BRCMS_SROM_PA0ITSSIT, 0xffffff00, 0, SROM8_W0_ITTMAXP, 0xff00},
 	{BRCMS_SROM_PA0MAXPWR, 0xffffff00, 0, SROM8_W0_ITTMAXP, 0x00ff},
-	{BRCMS_SROM_OPO, 0x0000000c, 0, SROM_OPO, 0x00ff},
 	{BRCMS_SROM_OPO, 0xffffff00, 0, SROM8_2G_OFDMPO, 0x00ff},
-	{BRCMS_SROM_AA2G, 0x0000000e, 0, SROM_AABREV, SROM_AA0_MASK},
-	{BRCMS_SROM_AA2G, 0x000000f0, 0, SROM4_AA, 0x00ff},
 	{BRCMS_SROM_AA2G, 0xffffff00, 0, SROM8_AA, 0x00ff},
-	{BRCMS_SROM_AA5G, 0x0000000e, 0, SROM_AABREV, SROM_AA1_MASK},
-	{BRCMS_SROM_AA5G, 0x000000f0, 0, SROM4_AA, 0xff00},
 	{BRCMS_SROM_AA5G, 0xffffff00, 0, SROM8_AA, 0xff00},
-	{BRCMS_SROM_AG0, 0x0000000e, 0, SROM_AG10, 0x00ff},
-	{BRCMS_SROM_AG1, 0x0000000e, 0, SROM_AG10, 0xff00},
-	{BRCMS_SROM_AG0, 0x000000f0, 0, SROM4_AG10, 0x00ff},
-	{BRCMS_SROM_AG1, 0x000000f0, 0, SROM4_AG10, 0xff00},
-	{BRCMS_SROM_AG2, 0x000000f0, 0, SROM4_AG32, 0x00ff},
-	{BRCMS_SROM_AG3, 0x000000f0, 0, SROM4_AG32, 0xff00},
 	{BRCMS_SROM_AG0, 0xffffff00, 0, SROM8_AG10, 0x00ff},
 	{BRCMS_SROM_AG1, 0xffffff00, 0, SROM8_AG10, 0xff00},
 	{BRCMS_SROM_AG2, 0xffffff00, 0, SROM8_AG32, 0x00ff},
 	{BRCMS_SROM_AG3, 0xffffff00, 0, SROM8_AG32, 0xff00},
-	{BRCMS_SROM_PA1B0, 0x0000000e, SRFL_PRHEX, SROM_WL1PAB0, 0xffff},
-	{BRCMS_SROM_PA1B1, 0x0000000e, SRFL_PRHEX, SROM_WL1PAB1, 0xffff},
-	{BRCMS_SROM_PA1B2, 0x0000000e, SRFL_PRHEX, SROM_WL1PAB2, 0xffff},
-	{BRCMS_SROM_PA1LOB0, 0x0000000c, SRFL_PRHEX, SROM_WL1LPAB0, 0xffff},
-	{BRCMS_SROM_PA1LOB1, 0x0000000c, SRFL_PRHEX, SROM_WL1LPAB1, 0xffff},
-	{BRCMS_SROM_PA1LOB2, 0x0000000c, SRFL_PRHEX, SROM_WL1LPAB2, 0xffff},
-	{BRCMS_SROM_PA1HIB0, 0x0000000c, SRFL_PRHEX, SROM_WL1HPAB0, 0xffff},
-	{BRCMS_SROM_PA1HIB1, 0x0000000c, SRFL_PRHEX, SROM_WL1HPAB1, 0xffff},
-	{BRCMS_SROM_PA1HIB2, 0x0000000c, SRFL_PRHEX, SROM_WL1HPAB2, 0xffff},
-	{BRCMS_SROM_PA1ITSSIT, 0x0000000e, 0, SROM_ITT, 0xff00},
-	{BRCMS_SROM_PA1MAXPWR, 0x0000000e, 0, SROM_WL10MAXP, 0xff00},
-	{BRCMS_SROM_PA1LOMAXPWR, 0x0000000c, 0, SROM_WL1LHMAXP, 0xff00},
-	{BRCMS_SROM_PA1HIMAXPWR, 0x0000000c, 0, SROM_WL1LHMAXP, 0x00ff},
 	{BRCMS_SROM_PA1B0, 0xffffff00, SRFL_PRHEX, SROM8_W1_PAB0, 0xffff},
 	{BRCMS_SROM_PA1B1, 0xffffff00, SRFL_PRHEX, SROM8_W1_PAB1, 0xffff},
 	{BRCMS_SROM_PA1B2, 0xffffff00, SRFL_PRHEX, SROM8_W1_PAB2, 0xffff},
@@ -534,40 +377,20 @@
 	{BRCMS_SROM_PA1MAXPWR, 0xffffff00, 0, SROM8_W1_ITTMAXP, 0x00ff},
 	{BRCMS_SROM_PA1LOMAXPWR, 0xffffff00, 0, SROM8_W1_MAXP_LCHC, 0xff00},
 	{BRCMS_SROM_PA1HIMAXPWR, 0xffffff00, 0, SROM8_W1_MAXP_LCHC, 0x00ff},
-	{BRCMS_SROM_BXA2G, 0x00000008, 0, SROM_BXARSSI2G, 0x1800},
-	{BRCMS_SROM_RSSISAV2G, 0x00000008, 0, SROM_BXARSSI2G, 0x0700},
-	{BRCMS_SROM_RSSISMC2G, 0x00000008, 0, SROM_BXARSSI2G, 0x00f0},
-	{BRCMS_SROM_RSSISMF2G, 0x00000008, 0, SROM_BXARSSI2G, 0x000f},
 	{BRCMS_SROM_BXA2G, 0xffffff00, 0, SROM8_BXARSSI2G, 0x1800},
 	{BRCMS_SROM_RSSISAV2G, 0xffffff00, 0, SROM8_BXARSSI2G, 0x0700},
 	{BRCMS_SROM_RSSISMC2G, 0xffffff00, 0, SROM8_BXARSSI2G, 0x00f0},
 	{BRCMS_SROM_RSSISMF2G, 0xffffff00, 0, SROM8_BXARSSI2G, 0x000f},
-	{BRCMS_SROM_BXA5G, 0x00000008, 0, SROM_BXARSSI5G, 0x1800},
-	{BRCMS_SROM_RSSISAV5G, 0x00000008, 0, SROM_BXARSSI5G, 0x0700},
-	{BRCMS_SROM_RSSISMC5G, 0x00000008, 0, SROM_BXARSSI5G, 0x00f0},
-	{BRCMS_SROM_RSSISMF5G, 0x00000008, 0, SROM_BXARSSI5G, 0x000f},
 	{BRCMS_SROM_BXA5G, 0xffffff00, 0, SROM8_BXARSSI5G, 0x1800},
 	{BRCMS_SROM_RSSISAV5G, 0xffffff00, 0, SROM8_BXARSSI5G, 0x0700},
 	{BRCMS_SROM_RSSISMC5G, 0xffffff00, 0, SROM8_BXARSSI5G, 0x00f0},
 	{BRCMS_SROM_RSSISMF5G, 0xffffff00, 0, SROM8_BXARSSI5G, 0x000f},
-	{BRCMS_SROM_TRI2G, 0x00000008, 0, SROM_TRI52G, 0x00ff},
-	{BRCMS_SROM_TRI5G, 0x00000008, 0, SROM_TRI52G, 0xff00},
-	{BRCMS_SROM_TRI5GL, 0x00000008, 0, SROM_TRI5GHL, 0x00ff},
-	{BRCMS_SROM_TRI5GH, 0x00000008, 0, SROM_TRI5GHL, 0xff00},
 	{BRCMS_SROM_TRI2G, 0xffffff00, 0, SROM8_TRI52G, 0x00ff},
 	{BRCMS_SROM_TRI5G, 0xffffff00, 0, SROM8_TRI52G, 0xff00},
 	{BRCMS_SROM_TRI5GL, 0xffffff00, 0, SROM8_TRI5GHL, 0x00ff},
 	{BRCMS_SROM_TRI5GH, 0xffffff00, 0, SROM8_TRI5GHL, 0xff00},
-	{BRCMS_SROM_RXPO2G, 0x00000008, SRFL_PRSIGN, SROM_RXPO52G, 0x00ff},
-	{BRCMS_SROM_RXPO5G, 0x00000008, SRFL_PRSIGN, SROM_RXPO52G, 0xff00},
 	{BRCMS_SROM_RXPO2G, 0xffffff00, SRFL_PRSIGN, SROM8_RXPO52G, 0x00ff},
 	{BRCMS_SROM_RXPO5G, 0xffffff00, SRFL_PRSIGN, SROM8_RXPO52G, 0xff00},
-	{BRCMS_SROM_TXCHAIN, 0x000000f0, SRFL_NOFFS, SROM4_TXRXC,
-	 SROM4_TXCHAIN_MASK},
-	{BRCMS_SROM_RXCHAIN, 0x000000f0, SRFL_NOFFS, SROM4_TXRXC,
-	 SROM4_RXCHAIN_MASK},
-	{BRCMS_SROM_ANTSWITCH, 0x000000f0, SRFL_NOFFS, SROM4_TXRXC,
-	 SROM4_SWITCH_MASK},
 	{BRCMS_SROM_TXCHAIN, 0xffffff00, SRFL_NOFFS, SROM8_TXRXC,
 	 SROM4_TXCHAIN_MASK},
 	{BRCMS_SROM_RXCHAIN, 0xffffff00, SRFL_NOFFS, SROM8_TXRXC,
@@ -594,43 +417,11 @@
 	 SROM8_FEM_ANTSWLUT_MASK},
 	{BRCMS_SROM_TEMPTHRESH, 0xffffff00, 0, SROM8_THERMAL, 0xff00},
 	{BRCMS_SROM_TEMPOFFSET, 0xffffff00, 0, SROM8_THERMAL, 0x00ff},
-	{BRCMS_SROM_TXPID2GA0, 0x000000f0, 0, SROM4_TXPID2G, 0x00ff},
-	{BRCMS_SROM_TXPID2GA1, 0x000000f0, 0, SROM4_TXPID2G, 0xff00},
-	{BRCMS_SROM_TXPID2GA2, 0x000000f0, 0, SROM4_TXPID2G + 1, 0x00ff},
-	{BRCMS_SROM_TXPID2GA3, 0x000000f0, 0, SROM4_TXPID2G + 1, 0xff00},
-	{BRCMS_SROM_TXPID5GA0, 0x000000f0, 0, SROM4_TXPID5G, 0x00ff},
-	{BRCMS_SROM_TXPID5GA1, 0x000000f0, 0, SROM4_TXPID5G, 0xff00},
-	{BRCMS_SROM_TXPID5GA2, 0x000000f0, 0, SROM4_TXPID5G + 1, 0x00ff},
-	{BRCMS_SROM_TXPID5GA3, 0x000000f0, 0, SROM4_TXPID5G + 1, 0xff00},
-	{BRCMS_SROM_TXPID5GLA0, 0x000000f0, 0, SROM4_TXPID5GL, 0x00ff},
-	{BRCMS_SROM_TXPID5GLA1, 0x000000f0, 0, SROM4_TXPID5GL, 0xff00},
-	{BRCMS_SROM_TXPID5GLA2, 0x000000f0, 0, SROM4_TXPID5GL + 1, 0x00ff},
-	{BRCMS_SROM_TXPID5GLA3, 0x000000f0, 0, SROM4_TXPID5GL + 1, 0xff00},
-	{BRCMS_SROM_TXPID5GHA0, 0x000000f0, 0, SROM4_TXPID5GH, 0x00ff},
-	{BRCMS_SROM_TXPID5GHA1, 0x000000f0, 0, SROM4_TXPID5GH, 0xff00},
-	{BRCMS_SROM_TXPID5GHA2, 0x000000f0, 0, SROM4_TXPID5GH + 1, 0x00ff},
-	{BRCMS_SROM_TXPID5GHA3, 0x000000f0, 0, SROM4_TXPID5GH + 1, 0xff00},
 
-	{BRCMS_SROM_CCODE, 0x0000000f, SRFL_CCODE, SROM_CCODE, 0xffff},
-	{BRCMS_SROM_CCODE, 0x00000010, SRFL_CCODE, SROM4_CCODE, 0xffff},
-	{BRCMS_SROM_CCODE, 0x000000e0, SRFL_CCODE, SROM5_CCODE, 0xffff},
 	{BRCMS_SROM_CCODE, 0xffffff00, SRFL_CCODE, SROM8_CCODE, 0xffff},
 	{BRCMS_SROM_MACADDR, 0xffffff00, SRFL_ETHADDR, SROM8_MACHI, 0xffff},
-	{BRCMS_SROM_MACADDR, 0x000000e0, SRFL_ETHADDR, SROM5_MACHI, 0xffff},
-	{BRCMS_SROM_MACADDR, 0x00000010, SRFL_ETHADDR, SROM4_MACHI, 0xffff},
-	{BRCMS_SROM_MACADDR, 0x00000008, SRFL_ETHADDR, SROM3_MACHI, 0xffff},
-	{BRCMS_SROM_IL0MACADDR, 0x00000007, SRFL_ETHADDR, SROM_MACHI_IL0,
-	 0xffff},
-	{BRCMS_SROM_ET1MACADDR, 0x00000007, SRFL_ETHADDR, SROM_MACHI_ET1,
-	 0xffff},
 	{BRCMS_SROM_LEDDC, 0xffffff00, SRFL_NOFFS | SRFL_LEDDC, SROM8_LEDDC,
 	 0xffff},
-	{BRCMS_SROM_LEDDC, 0x000000e0, SRFL_NOFFS | SRFL_LEDDC, SROM5_LEDDC,
-	 0xffff},
-	{BRCMS_SROM_LEDDC, 0x00000010, SRFL_NOFFS | SRFL_LEDDC, SROM4_LEDDC,
-	 0xffff},
-	{BRCMS_SROM_LEDDC, 0x00000008, SRFL_NOFFS | SRFL_LEDDC, SROM3_LEDDC,
-	 0xffff},
 	{BRCMS_SROM_RAWTEMPSENSE, 0xffffff00, SRFL_PRHEX, SROM8_MPWR_RAWTS,
 	 0x01ff},
 	{BRCMS_SROM_MEASPOWER, 0xffffff00, SRFL_PRHEX, SROM8_MPWR_RAWTS,
@@ -650,16 +441,7 @@
 	{BRCMS_SROM_PHYCAL_TEMPDELTA, 0xffffff00, 0, SROM8_PHYCAL_TEMPDELTA,
 	 0x00ff},
 
-	{BRCMS_SROM_CCK2GPO, 0x000000f0, 0, SROM4_2G_CCKPO, 0xffff},
 	{BRCMS_SROM_CCK2GPO, 0x00000100, 0, SROM8_2G_CCKPO, 0xffff},
-	{BRCMS_SROM_OFDM2GPO, 0x000000f0, SRFL_MORE, SROM4_2G_OFDMPO, 0xffff},
-	{BRCMS_SROM_CONT, 0, 0, SROM4_2G_OFDMPO + 1, 0xffff},
-	{BRCMS_SROM_OFDM5GPO, 0x000000f0, SRFL_MORE, SROM4_5G_OFDMPO, 0xffff},
-	{BRCMS_SROM_CONT, 0, 0, SROM4_5G_OFDMPO + 1, 0xffff},
-	{BRCMS_SROM_OFDM5GLPO, 0x000000f0, SRFL_MORE, SROM4_5GL_OFDMPO, 0xffff},
-	{BRCMS_SROM_CONT, 0, 0, SROM4_5GL_OFDMPO + 1, 0xffff},
-	{BRCMS_SROM_OFDM5GHPO, 0x000000f0, SRFL_MORE, SROM4_5GH_OFDMPO, 0xffff},
-	{BRCMS_SROM_CONT, 0, 0, SROM4_5GH_OFDMPO + 1, 0xffff},
 	{BRCMS_SROM_OFDM2GPO, 0x00000100, SRFL_MORE, SROM8_2G_OFDMPO, 0xffff},
 	{BRCMS_SROM_CONT, 0, 0, SROM8_2G_OFDMPO + 1, 0xffff},
 	{BRCMS_SROM_OFDM5GPO, 0x00000100, SRFL_MORE, SROM8_5G_OFDMPO, 0xffff},
@@ -668,38 +450,6 @@
 	{BRCMS_SROM_CONT, 0, 0, SROM8_5GL_OFDMPO + 1, 0xffff},
 	{BRCMS_SROM_OFDM5GHPO, 0x00000100, SRFL_MORE, SROM8_5GH_OFDMPO, 0xffff},
 	{BRCMS_SROM_CONT, 0, 0, SROM8_5GH_OFDMPO + 1, 0xffff},
-	{BRCMS_SROM_MCS2GPO0, 0x000000f0, 0, SROM4_2G_MCSPO, 0xffff},
-	{BRCMS_SROM_MCS2GPO1, 0x000000f0, 0, SROM4_2G_MCSPO + 1, 0xffff},
-	{BRCMS_SROM_MCS2GPO2, 0x000000f0, 0, SROM4_2G_MCSPO + 2, 0xffff},
-	{BRCMS_SROM_MCS2GPO3, 0x000000f0, 0, SROM4_2G_MCSPO + 3, 0xffff},
-	{BRCMS_SROM_MCS2GPO4, 0x000000f0, 0, SROM4_2G_MCSPO + 4, 0xffff},
-	{BRCMS_SROM_MCS2GPO5, 0x000000f0, 0, SROM4_2G_MCSPO + 5, 0xffff},
-	{BRCMS_SROM_MCS2GPO6, 0x000000f0, 0, SROM4_2G_MCSPO + 6, 0xffff},
-	{BRCMS_SROM_MCS2GPO7, 0x000000f0, 0, SROM4_2G_MCSPO + 7, 0xffff},
-	{BRCMS_SROM_MCS5GPO0, 0x000000f0, 0, SROM4_5G_MCSPO, 0xffff},
-	{BRCMS_SROM_MCS5GPO1, 0x000000f0, 0, SROM4_5G_MCSPO + 1, 0xffff},
-	{BRCMS_SROM_MCS5GPO2, 0x000000f0, 0, SROM4_5G_MCSPO + 2, 0xffff},
-	{BRCMS_SROM_MCS5GPO3, 0x000000f0, 0, SROM4_5G_MCSPO + 3, 0xffff},
-	{BRCMS_SROM_MCS5GPO4, 0x000000f0, 0, SROM4_5G_MCSPO + 4, 0xffff},
-	{BRCMS_SROM_MCS5GPO5, 0x000000f0, 0, SROM4_5G_MCSPO + 5, 0xffff},
-	{BRCMS_SROM_MCS5GPO6, 0x000000f0, 0, SROM4_5G_MCSPO + 6, 0xffff},
-	{BRCMS_SROM_MCS5GPO7, 0x000000f0, 0, SROM4_5G_MCSPO + 7, 0xffff},
-	{BRCMS_SROM_MCS5GLPO0, 0x000000f0, 0, SROM4_5GL_MCSPO, 0xffff},
-	{BRCMS_SROM_MCS5GLPO1, 0x000000f0, 0, SROM4_5GL_MCSPO + 1, 0xffff},
-	{BRCMS_SROM_MCS5GLPO2, 0x000000f0, 0, SROM4_5GL_MCSPO + 2, 0xffff},
-	{BRCMS_SROM_MCS5GLPO3, 0x000000f0, 0, SROM4_5GL_MCSPO + 3, 0xffff},
-	{BRCMS_SROM_MCS5GLPO4, 0x000000f0, 0, SROM4_5GL_MCSPO + 4, 0xffff},
-	{BRCMS_SROM_MCS5GLPO5, 0x000000f0, 0, SROM4_5GL_MCSPO + 5, 0xffff},
-	{BRCMS_SROM_MCS5GLPO6, 0x000000f0, 0, SROM4_5GL_MCSPO + 6, 0xffff},
-	{BRCMS_SROM_MCS5GLPO7, 0x000000f0, 0, SROM4_5GL_MCSPO + 7, 0xffff},
-	{BRCMS_SROM_MCS5GHPO0, 0x000000f0, 0, SROM4_5GH_MCSPO, 0xffff},
-	{BRCMS_SROM_MCS5GHPO1, 0x000000f0, 0, SROM4_5GH_MCSPO + 1, 0xffff},
-	{BRCMS_SROM_MCS5GHPO2, 0x000000f0, 0, SROM4_5GH_MCSPO + 2, 0xffff},
-	{BRCMS_SROM_MCS5GHPO3, 0x000000f0, 0, SROM4_5GH_MCSPO + 3, 0xffff},
-	{BRCMS_SROM_MCS5GHPO4, 0x000000f0, 0, SROM4_5GH_MCSPO + 4, 0xffff},
-	{BRCMS_SROM_MCS5GHPO5, 0x000000f0, 0, SROM4_5GH_MCSPO + 5, 0xffff},
-	{BRCMS_SROM_MCS5GHPO6, 0x000000f0, 0, SROM4_5GH_MCSPO + 6, 0xffff},
-	{BRCMS_SROM_MCS5GHPO7, 0x000000f0, 0, SROM4_5GH_MCSPO + 7, 0xffff},
 	{BRCMS_SROM_MCS2GPO0, 0x00000100, 0, SROM8_2G_MCSPO, 0xffff},
 	{BRCMS_SROM_MCS2GPO1, 0x00000100, 0, SROM8_2G_MCSPO + 1, 0xffff},
 	{BRCMS_SROM_MCS2GPO2, 0x00000100, 0, SROM8_2G_MCSPO + 2, 0xffff},
@@ -732,10 +482,6 @@
 	{BRCMS_SROM_MCS5GHPO5, 0x00000100, 0, SROM8_5GH_MCSPO + 5, 0xffff},
 	{BRCMS_SROM_MCS5GHPO6, 0x00000100, 0, SROM8_5GH_MCSPO + 6, 0xffff},
 	{BRCMS_SROM_MCS5GHPO7, 0x00000100, 0, SROM8_5GH_MCSPO + 7, 0xffff},
-	{BRCMS_SROM_CDDPO, 0x000000f0, 0, SROM4_CDDPO, 0xffff},
-	{BRCMS_SROM_STBCPO, 0x000000f0, 0, SROM4_STBCPO, 0xffff},
-	{BRCMS_SROM_BW40PO, 0x000000f0, 0, SROM4_BW40PO, 0xffff},
-	{BRCMS_SROM_BWDUPPO, 0x000000f0, 0, SROM4_BWDUPPO, 0xffff},
 	{BRCMS_SROM_CDDPO, 0x00000100, 0, SROM8_CDDPO, 0xffff},
 	{BRCMS_SROM_STBCPO, 0x00000100, 0, SROM8_STBCPO, 0xffff},
 	{BRCMS_SROM_BW40PO, 0x00000100, 0, SROM8_BW40PO, 0xffff},
@@ -811,34 +557,6 @@
 };
 
 static const struct brcms_sromvar perpath_pci_sromvars[] = {
-	{BRCMS_SROM_MAXP2GA0, 0x000000f0, 0, SROM4_2G_ITT_MAXP, 0x00ff},
-	{BRCMS_SROM_ITT2GA0, 0x000000f0, 0, SROM4_2G_ITT_MAXP, 0xff00},
-	{BRCMS_SROM_ITT5GA0, 0x000000f0, 0, SROM4_5G_ITT_MAXP, 0xff00},
-	{BRCMS_SROM_PA2GW0A0, 0x000000f0, SRFL_PRHEX, SROM4_2G_PA, 0xffff},
-	{BRCMS_SROM_PA2GW1A0, 0x000000f0, SRFL_PRHEX, SROM4_2G_PA + 1, 0xffff},
-	{BRCMS_SROM_PA2GW2A0, 0x000000f0, SRFL_PRHEX, SROM4_2G_PA + 2, 0xffff},
-	{BRCMS_SROM_PA2GW3A0, 0x000000f0, SRFL_PRHEX, SROM4_2G_PA + 3, 0xffff},
-	{BRCMS_SROM_MAXP5GA0, 0x000000f0, 0, SROM4_5G_ITT_MAXP, 0x00ff},
-	{BRCMS_SROM_MAXP5GHA0, 0x000000f0, 0, SROM4_5GLH_MAXP, 0x00ff},
-	{BRCMS_SROM_MAXP5GLA0, 0x000000f0, 0, SROM4_5GLH_MAXP, 0xff00},
-	{BRCMS_SROM_PA5GW0A0, 0x000000f0, SRFL_PRHEX, SROM4_5G_PA, 0xffff},
-	{BRCMS_SROM_PA5GW1A0, 0x000000f0, SRFL_PRHEX, SROM4_5G_PA + 1, 0xffff},
-	{BRCMS_SROM_PA5GW2A0, 0x000000f0, SRFL_PRHEX, SROM4_5G_PA + 2, 0xffff},
-	{BRCMS_SROM_PA5GW3A0, 0x000000f0, SRFL_PRHEX, SROM4_5G_PA + 3, 0xffff},
-	{BRCMS_SROM_PA5GLW0A0, 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA, 0xffff},
-	{BRCMS_SROM_PA5GLW1A0, 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA + 1,
-	 0xffff},
-	{BRCMS_SROM_PA5GLW2A0, 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA + 2,
-	 0xffff},
-	{BRCMS_SROM_PA5GLW3A0, 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA + 3,
-	 0xffff},
-	{BRCMS_SROM_PA5GHW0A0, 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA, 0xffff},
-	{BRCMS_SROM_PA5GHW1A0, 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA + 1,
-	 0xffff},
-	{BRCMS_SROM_PA5GHW2A0, 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA + 2,
-	 0xffff},
-	{BRCMS_SROM_PA5GHW3A0, 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA + 3,
-	 0xffff},
 	{BRCMS_SROM_MAXP2GA0, 0xffffff00, 0, SROM8_2G_ITT_MAXP, 0x00ff},
 	{BRCMS_SROM_ITT2GA0, 0xffffff00, 0, SROM8_2G_ITT_MAXP, 0xff00},
 	{BRCMS_SROM_ITT5GA0, 0xffffff00, 0, SROM8_5G_ITT_MAXP, 0xff00},
@@ -868,24 +586,6 @@
  * shared between devices. */
 static u8 brcms_srom_crc8_table[CRC8_TABLE_SIZE];
 
-static u16 __iomem *
-srom_window_address(struct si_pub *sih, u8 __iomem *curmap)
-{
-	if (sih->ccrev < 32)
-		return (u16 __iomem *)(curmap + PCI_BAR0_SPROM_OFFSET);
-	if (sih->cccaps & CC_CAP_SROM)
-		return (u16 __iomem *)
-		       (curmap + PCI_16KB0_CCREGS_OFFSET + CC_SROM_OTP);
-
-	return NULL;
-}
-
-/* Parse SROM and create name=value pairs. 'srom' points to
- * the SROM word array. 'off' specifies the offset of the
- * first word 'srom' points to, which should be either 0 or
- * SROM3_SWRG_OFF (full SROM or software region).
- */
-
 static uint mask_shift(u16 mask)
 {
 	uint i;
@@ -906,18 +606,16 @@
 	return 0;
 }
 
-static inline void ltoh16_buf(u16 *buf, unsigned int size)
+static inline void le16_to_cpu_buf(u16 *buf, uint nwords)
 {
-	size /= 2;
-	while (size--)
-		*(buf + size) = le16_to_cpu(*(__le16 *)(buf + size));
+	while (nwords--)
+		*(buf + nwords) = le16_to_cpu(*(__le16 *)(buf + nwords));
 }
 
-static inline void htol16_buf(u16 *buf, unsigned int size)
+static inline void cpu_to_le16_buf(u16 *buf, uint nwords)
 {
-	size /= 2;
-	while (size--)
-		*(__le16 *)(buf + size) = cpu_to_le16(*(buf + size));
+	while (nwords--)
+		*(__le16 *)(buf + nwords) = cpu_to_le16(*(buf + nwords));
 }
 
 /*
@@ -929,11 +627,14 @@
 	struct brcms_srom_list_head *entry;
 	enum brcms_srom_id id;
 	u16 w;
-	u32 val;
+	u32 val = 0;
 	const struct brcms_sromvar *srv;
 	uint width;
 	uint flags;
 	u32 sr = (1 << sromrev);
+	uint p;
+	uint pb =  SROM8_PATH0;
+	const uint psz = SROM8_PATH1 - SROM8_PATH0;
 
 	/* first store the srom revision */
 	entry = kzalloc(sizeof(struct brcms_srom_list_head), GFP_KERNEL);
@@ -1031,47 +732,34 @@
 		list_add(&entry->var_list, var_list);
 	}
 
-	if (sromrev >= 4) {
-		/* Do per-path variables */
-		uint p, pb, psz;
+	for (p = 0; p < MAX_PATH_SROM; p++) {
+		for (srv = perpath_pci_sromvars;
+		     srv->varid != BRCMS_SROM_NULL; srv++) {
+			if ((srv->revmask & sr) == 0)
+				continue;
 
-		if (sromrev >= 8) {
-			pb = SROM8_PATH0;
-			psz = SROM8_PATH1 - SROM8_PATH0;
-		} else {
-			pb = SROM4_PATH0;
-			psz = SROM4_PATH1 - SROM4_PATH0;
+			if (srv->flags & SRFL_NOVAR)
+				continue;
+
+			w = srom[pb + srv->off];
+			val = (w & srv->mask) >> mask_shift(srv->mask);
+			width = mask_width(srv->mask);
+
+			/* Cheating: no per-path var is more than
+			 * 1 word */
+			if ((srv->flags & SRFL_NOFFS)
+			    && ((int)val == (1 << width) - 1))
+				continue;
+
+			entry =
+			    kzalloc(sizeof(struct brcms_srom_list_head),
+				    GFP_KERNEL);
+			entry->varid = srv->varid+p;
+			entry->var_type = BRCMS_SROM_UNUMBER;
+			entry->uval = val;
+			list_add(&entry->var_list, var_list);
 		}
-
-		for (p = 0; p < MAX_PATH_SROM; p++) {
-			for (srv = perpath_pci_sromvars;
-			     srv->varid != BRCMS_SROM_NULL; srv++) {
-				if ((srv->revmask & sr) == 0)
-					continue;
-
-				if (srv->flags & SRFL_NOVAR)
-					continue;
-
-				w = srom[pb + srv->off];
-				val = (w & srv->mask) >> mask_shift(srv->mask);
-				width = mask_width(srv->mask);
-
-				/* Cheating: no per-path var is more than
-				 * 1 word */
-				if ((srv->flags & SRFL_NOFFS)
-				    && ((int)val == (1 << width) - 1))
-					continue;
-
-				entry =
-				    kzalloc(sizeof(struct brcms_srom_list_head),
-					    GFP_KERNEL);
-				entry->varid = srv->varid+p;
-				entry->var_type = BRCMS_SROM_UNUMBER;
-				entry->uval = val;
-				list_add(&entry->var_list, var_list);
-			}
-			pb += psz;
-		}
+		pb += psz;
 	}
 }
 
@@ -1080,41 +768,48 @@
  * Return 0 on success, nonzero on error.
  */
 static int
-sprom_read_pci(struct si_pub *sih, u16 __iomem *sprom, uint wordoff,
-	       u16 *buf, uint nwords, bool check_crc)
+sprom_read_pci(struct si_pub *sih, u16 *buf, uint nwords, bool check_crc)
 {
 	int err = 0;
 	uint i;
+	u8 *bbuf = (u8 *)buf; /* byte buffer */
+	uint nbytes = nwords << 1;
+	struct bcma_device *core;
+	uint sprom_offset;
 
-	/* read the sprom */
-	for (i = 0; i < nwords; i++)
-		buf[i] = R_REG(&sprom[wordoff + i]);
-
-	if (check_crc) {
-
-		if (buf[0] == 0xffff)
-			/*
-			 * The hardware thinks that an srom that starts with
-			 * 0xffff is blank, regardless of the rest of the
-			 * content, so declare it bad.
-			 */
-			return -ENODATA;
-
-		/* fixup the endianness so crc8 will pass */
-		htol16_buf(buf, nwords * 2);
-		if (crc8(brcms_srom_crc8_table, (u8 *) buf, nwords * 2,
-			 CRC8_INIT_VALUE) !=
-			 CRC8_GOOD_VALUE(brcms_srom_crc8_table))
-			/* DBG only pci always read srom4 first, then srom8/9 */
-			err = -EIO;
-
-		/* now correct the endianness of the byte array */
-		ltoh16_buf(buf, nwords * 2);
+	/* determine core to read */
+	if (ai_get_ccrev(sih) < 32) {
+		core = ai_findcore(sih, BCMA_CORE_80211, 0);
+		sprom_offset = PCI_BAR0_SPROM_OFFSET;
+	} else {
+		core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
+		sprom_offset = CHIPCREGOFFS(sromotp);
 	}
+
+	/* read the sprom in bytes */
+	for (i = 0; i < nbytes; i++)
+		bbuf[i] = bcma_read8(core, sprom_offset+i);
+
+	if (buf[0] == 0xffff)
+		/*
+		 * The hardware thinks that an srom that starts with
+		 * 0xffff is blank, regardless of the rest of the
+		 * content, so declare it bad.
+		 */
+		return -ENODATA;
+
+	if (check_crc &&
+	    crc8(brcms_srom_crc8_table, bbuf, nbytes, CRC8_INIT_VALUE) !=
+		 CRC8_GOOD_VALUE(brcms_srom_crc8_table))
+		err = -EIO;
+	else
+		/* now correct the endianness of the byte array */
+		le16_to_cpu_buf(buf, nwords);
+
 	return err;
 }
 
-static int otp_read_pci(struct si_pub *sih, u16 *buf, uint bufsz)
+static int otp_read_pci(struct si_pub *sih, u16 *buf, uint nwords)
 {
 	u8 *otp;
 	uint sz = OTP_SZ_MAX / 2;	/* size in words */
@@ -1126,7 +821,8 @@
 
 	err = otp_read_region(sih, OTP_HW_RGN, (u16 *) otp, &sz);
 
-	memcpy(buf, otp, bufsz);
+	sz = min_t(uint, sz, nwords);
+	memcpy(buf, otp, sz * 2);
 
 	kfree(otp);
 
@@ -1139,13 +835,13 @@
 		return -ENODATA;
 
 	/* fixup the endianness so crc8 will pass */
-	htol16_buf(buf, bufsz);
-	if (crc8(brcms_srom_crc8_table, (u8 *) buf, SROM4_WORDS * 2,
+	cpu_to_le16_buf(buf, sz);
+	if (crc8(brcms_srom_crc8_table, (u8 *) buf, sz * 2,
 		 CRC8_INIT_VALUE) != CRC8_GOOD_VALUE(brcms_srom_crc8_table))
 		err = -EIO;
-
-	/* now correct the endianness of the byte array */
-	ltoh16_buf(buf, bufsz);
+	else
+		/* now correct the endianness of the byte array */
+		le16_to_cpu_buf(buf, sz);
 
 	return err;
 }
@@ -1154,10 +850,9 @@
  * Initialize nonvolatile variable table from sprom.
  * Return 0 on success, nonzero on error.
  */
-static int initvars_srom_pci(struct si_pub *sih, void __iomem *curmap)
+int srom_var_init(struct si_pub *sih)
 {
 	u16 *srom;
-	u16 __iomem *sromwindow;
 	u8 sromrev = 0;
 	u32 sr;
 	int err = 0;
@@ -1169,33 +864,17 @@
 	if (!srom)
 		return -ENOMEM;
 
-	sromwindow = srom_window_address(sih, curmap);
-
 	crc8_populate_lsb(brcms_srom_crc8_table, SROM_CRC8_POLY);
 	if (ai_is_sprom_available(sih)) {
-		err = sprom_read_pci(sih, sromwindow, 0, srom, SROM_WORDS,
-				     true);
+		err = sprom_read_pci(sih, srom, SROM4_WORDS, true);
 
-		if ((srom[SROM4_SIGN] == SROM4_SIGNATURE) ||
-		    (((sih->buscoretype == PCIE_CORE_ID)
-		      && (sih->buscorerev >= 6))
-		     || ((sih->buscoretype == PCI_CORE_ID)
-			 && (sih->buscorerev >= 0xe)))) {
-			/* sromrev >= 4, read more */
-			err = sprom_read_pci(sih, sromwindow, 0, srom,
-					     SROM4_WORDS, true);
-			sromrev = srom[SROM4_CRCREV] & 0xff;
-		} else if (err == 0) {
-			/* srom is good and is rev < 4 */
+		if (err == 0)
+			/* srom read and passed crc */
 			/* top word of sprom contains version and crc8 */
-			sromrev = srom[SROM_CRCREV] & 0xff;
-			/* bcm4401 sroms misprogrammed */
-			if (sromrev == 0x10)
-				sromrev = 1;
-		}
+			sromrev = srom[SROM4_CRCREV] & 0xff;
 	} else {
 		/* Use OTP if SPROM not available */
-		err = otp_read_pci(sih, srom, SROM_MAX);
+		err = otp_read_pci(sih, srom, SROM4_WORDS);
 		if (err == 0)
 			/* OTP only contain SROM rev8/rev9 for now */
 			sromrev = srom[SROM4_CRCREV] & 0xff;
@@ -1208,10 +887,9 @@
 		sr = 1 << sromrev;
 
 		/*
-		 * srom version check: Current valid versions: 1, 2, 3, 4, 5, 8,
-		 * 9
+		 * srom version check: Current valid versions: 8, 9
 		 */
-		if ((sr & 0x33e) == 0) {
+		if ((sr & 0x300) == 0) {
 			err = -EINVAL;
 			goto errout;
 		}
@@ -1238,21 +916,6 @@
 		kfree(entry);
 	}
 }
-/*
- * Initialize local vars from the right source for this platform.
- * Return 0 on success, nonzero on error.
- */
-int srom_var_init(struct si_pub *sih, void __iomem *curmap)
-{
-	uint len;
-
-	len = 0;
-
-	if (curmap != NULL)
-		return initvars_srom_pci(sih, curmap);
-
-	return -EINVAL;
-}
 
 /*
  * Search the name=value vars for a specific one and return its value.
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/srom.h b/drivers/net/wireless/brcm80211/brcmsmac/srom.h
index 708c43f..f2a58f2 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/srom.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/srom.h
@@ -20,15 +20,10 @@
 #include "types.h"
 
 /* Prototypes */
-extern int srom_var_init(struct si_pub *sih, void __iomem *curmap);
+extern int srom_var_init(struct si_pub *sih);
 extern void srom_free_vars(struct si_pub *sih);
 
 extern int srom_read(struct si_pub *sih, uint bus, void *curmap,
 		     uint byteoff, uint nbytes, u16 *buf, bool check_crc);
 
-/* parse standard PCMCIA cis, normally used by SB/PCMCIA/SDIO/SPI/OTP
- *   and extract from it into name=value pairs
- */
-extern int srom_parsecis(u8 **pcis, uint ciscnt,
-			 char **vars, uint *count);
 #endif				/* _BRCM_SROM_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/types.h b/drivers/net/wireless/brcm80211/brcmsmac/types.h
index 27a814b..e11ae83 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/types.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/types.h
@@ -250,66 +250,18 @@
 		wiphy_err(dev, "%s: " fmt, __func__, ##args);	\
 } while (0)
 
-/*
- * Register access macros.
- *
- * These macro's take a pointer to the address to read as one of their
- * arguments. The macro itself deduces the size of the IO transaction (u8, u16
- * or u32). Advantage of this approach in combination with using a struct to
- * define the registers in a register block, is that access size and access
- * location are defined in only one spot. This reduces the risk of the
- * programmer trying to use an unsupported transaction size on a register.
- *
- */
-
-#define R_REG(r) \
-	({ \
-		__typeof(*(r)) __osl_v; \
-		switch (sizeof(*(r))) { \
-		case sizeof(u8): \
-			__osl_v = readb((u8 __iomem *)(r)); \
-			break; \
-		case sizeof(u16): \
-			__osl_v = readw((u16 __iomem *)(r)); \
-			break; \
-		case sizeof(u32): \
-			__osl_v = readl((u32 __iomem *)(r)); \
-			break; \
-		} \
-		__osl_v; \
-	})
-
-#define W_REG(r, v) do { \
-		switch (sizeof(*(r))) { \
-		case sizeof(u8):	\
-			writeb((u8)((v) & 0xFF), (u8 __iomem *)(r)); \
-			break; \
-		case sizeof(u16):	\
-			writew((u16)((v) & 0xFFFF), (u16 __iomem *)(r)); \
-			break; \
-		case sizeof(u32):	\
-			writel((u32)(v), (u32 __iomem *)(r)); \
-			break; \
-		} \
-	} while (0)
-
 #ifdef CONFIG_BCM47XX
 /*
  * bcm4716 (which includes 4717 & 4718), plus 4706 on PCIe can reorder
  * transactions. As a fix, a read after write is performed on certain places
  * in the code. Older chips and the newer 5357 family don't require this fix.
  */
-#define W_REG_FLUSH(r, v)	({ W_REG((r), (v)); (void)R_REG(r); })
+#define bcma_wflush16(c, o, v) \
+	({ bcma_write16(c, o, v); (void)bcma_read16(c, o); })
 #else
-#define W_REG_FLUSH(r, v)	W_REG((r), (v))
+#define bcma_wflush16(c, o, v)	bcma_write16(c, o, v)
 #endif				/* CONFIG_BCM47XX */
 
-#define AND_REG(r, v)	W_REG((r), R_REG(r) & (v))
-#define OR_REG(r, v)	W_REG((r), R_REG(r) | (v))
-
-#define SET_REG(r, mask, val) \
-		W_REG((r), ((R_REG(r) & ~(mask)) | (val)))
-
 /* multi-bool data type: set of bools, mbool is true if any is set */
 
 /* set one bool */
diff --git a/drivers/net/wireless/brcm80211/brcmutil/utils.c b/drivers/net/wireless/brcm80211/brcmutil/utils.c
index f27c489..b7537f7 100644
--- a/drivers/net/wireless/brcm80211/brcmutil/utils.c
+++ b/drivers/net/wireless/brcm80211/brcmutil/utils.c
@@ -16,6 +16,7 @@
 
 #include <linux/netdevice.h>
 #include <linux/module.h>
+
 #include <brcmu_utils.h>
 
 MODULE_AUTHOR("Broadcom Corporation");
@@ -40,74 +41,20 @@
 /* Free the driver packet. Free the tag if present */
 void brcmu_pkt_buf_free_skb(struct sk_buff *skb)
 {
-	struct sk_buff *nskb;
-	int nest = 0;
-
-	/* perversion: we use skb->next to chain multi-skb packets */
-	while (skb) {
-		nskb = skb->next;
-		skb->next = NULL;
-
-		if (skb->destructor)
-			/* cannot kfree_skb() on hard IRQ (net/core/skbuff.c) if
-			 * destructor exists
-			 */
-			dev_kfree_skb_any(skb);
-		else
-			/* can free immediately (even in_irq()) if destructor
-			 * does not exist
-			 */
-			dev_kfree_skb(skb);
-
-		nest++;
-		skb = nskb;
-	}
+	WARN_ON(skb->next);
+	if (skb->destructor)
+		/* cannot kfree_skb() on hard IRQ (net/core/skbuff.c) if
+		 * destructor exists
+		 */
+		dev_kfree_skb_any(skb);
+	else
+		/* can free immediately (even in_irq()) if destructor
+		 * does not exist
+		 */
+		dev_kfree_skb(skb);
 }
 EXPORT_SYMBOL(brcmu_pkt_buf_free_skb);
 
-
-/* copy a buffer into a pkt buffer chain */
-uint brcmu_pktfrombuf(struct sk_buff *p, uint offset, int len,
-		unsigned char *buf)
-{
-	uint n, ret = 0;
-
-	/* skip 'offset' bytes */
-	for (; p && offset; p = p->next) {
-		if (offset < (uint) (p->len))
-			break;
-		offset -= p->len;
-	}
-
-	if (!p)
-		return 0;
-
-	/* copy the data */
-	for (; p && len; p = p->next) {
-		n = min((uint) (p->len) - offset, (uint) len);
-		memcpy(p->data + offset, buf, n);
-		buf += n;
-		len -= n;
-		ret += n;
-		offset = 0;
-	}
-
-	return ret;
-}
-EXPORT_SYMBOL(brcmu_pktfrombuf);
-
-/* return total length of buffer chain */
-uint brcmu_pkttotlen(struct sk_buff *p)
-{
-	uint total;
-
-	total = 0;
-	for (; p; p = p->next)
-		total += p->len;
-	return total;
-}
-EXPORT_SYMBOL(brcmu_pkttotlen);
-
 /*
  * osl multiple-precedence packet queue
  * hi_prec is always >= the number of the highest non-empty precedence
@@ -115,21 +62,13 @@
 struct sk_buff *brcmu_pktq_penq(struct pktq *pq, int prec,
 				      struct sk_buff *p)
 {
-	struct pktq_prec *q;
+	struct sk_buff_head *q;
 
 	if (pktq_full(pq) || pktq_pfull(pq, prec))
 		return NULL;
 
-	q = &pq->q[prec];
-
-	if (q->head)
-		q->tail->prev = p;
-	else
-		q->head = p;
-
-	q->tail = p;
-	q->len++;
-
+	q = &pq->q[prec].skblist;
+	skb_queue_tail(q, p);
 	pq->len++;
 
 	if (pq->hi_prec < prec)
@@ -142,20 +81,13 @@
 struct sk_buff *brcmu_pktq_penq_head(struct pktq *pq, int prec,
 					   struct sk_buff *p)
 {
-	struct pktq_prec *q;
+	struct sk_buff_head *q;
 
 	if (pktq_full(pq) || pktq_pfull(pq, prec))
 		return NULL;
 
-	q = &pq->q[prec];
-
-	if (q->head == NULL)
-		q->tail = p;
-
-	p->prev = q->head;
-	q->head = p;
-	q->len++;
-
+	q = &pq->q[prec].skblist;
+	skb_queue_head(q, p);
 	pq->len++;
 
 	if (pq->hi_prec < prec)
@@ -167,53 +99,30 @@
 
 struct sk_buff *brcmu_pktq_pdeq(struct pktq *pq, int prec)
 {
-	struct pktq_prec *q;
+	struct sk_buff_head *q;
 	struct sk_buff *p;
 
-	q = &pq->q[prec];
-
-	p = q->head;
+	q = &pq->q[prec].skblist;
+	p = skb_dequeue(q);
 	if (p == NULL)
 		return NULL;
 
-	q->head = p->prev;
-	if (q->head == NULL)
-		q->tail = NULL;
-
-	q->len--;
-
 	pq->len--;
-
-	p->prev = NULL;
-
 	return p;
 }
 EXPORT_SYMBOL(brcmu_pktq_pdeq);
 
 struct sk_buff *brcmu_pktq_pdeq_tail(struct pktq *pq, int prec)
 {
-	struct pktq_prec *q;
-	struct sk_buff *p, *prev;
+	struct sk_buff_head *q;
+	struct sk_buff *p;
 
-	q = &pq->q[prec];
-
-	p = q->head;
+	q = &pq->q[prec].skblist;
+	p = skb_dequeue_tail(q);
 	if (p == NULL)
 		return NULL;
 
-	for (prev = NULL; p != q->tail; p = p->prev)
-		prev = p;
-
-	if (prev)
-		prev->prev = NULL;
-	else
-		q->head = NULL;
-
-	q->tail = prev;
-	q->len--;
-
 	pq->len--;
-
 	return p;
 }
 EXPORT_SYMBOL(brcmu_pktq_pdeq_tail);
@@ -222,31 +131,17 @@
 brcmu_pktq_pflush(struct pktq *pq, int prec, bool dir,
 		  bool (*fn)(struct sk_buff *, void *), void *arg)
 {
-	struct pktq_prec *q;
-	struct sk_buff *p, *prev = NULL;
+	struct sk_buff_head *q;
+	struct sk_buff *p, *next;
 
-	q = &pq->q[prec];
-	p = q->head;
-	while (p) {
+	q = &pq->q[prec].skblist;
+	skb_queue_walk_safe(q, p, next) {
 		if (fn == NULL || (*fn) (p, arg)) {
-			bool head = (p == q->head);
-			if (head)
-				q->head = p->prev;
-			else
-				prev->prev = p->prev;
-			p->prev = NULL;
+			skb_unlink(p, q);
 			brcmu_pkt_buf_free_skb(p);
-			q->len--;
 			pq->len--;
-			p = (head ? q->head : prev->prev);
-		} else {
-			prev = p;
-			p = p->prev;
 		}
 	}
-
-	if (q->head == NULL)
-		q->tail = NULL;
 }
 EXPORT_SYMBOL(brcmu_pktq_pflush);
 
@@ -271,8 +166,10 @@
 
 	pq->max = (u16) max_len;
 
-	for (prec = 0; prec < num_prec; prec++)
+	for (prec = 0; prec < num_prec; prec++) {
 		pq->q[prec].max = pq->max;
+		skb_queue_head_init(&pq->q[prec].skblist);
+	}
 }
 EXPORT_SYMBOL(brcmu_pktq_init);
 
@@ -284,13 +181,13 @@
 		return NULL;
 
 	for (prec = 0; prec < pq->hi_prec; prec++)
-		if (pq->q[prec].head)
+		if (!skb_queue_empty(&pq->q[prec].skblist))
 			break;
 
 	if (prec_out)
 		*prec_out = prec;
 
-	return pq->q[prec].tail;
+	return skb_peek_tail(&pq->q[prec].skblist);
 }
 EXPORT_SYMBOL(brcmu_pktq_peek_tail);
 
@@ -303,7 +200,7 @@
 
 	for (prec = 0; prec <= pq->hi_prec; prec++)
 		if (prec_bmp & (1 << prec))
-			len += pq->q[prec].len;
+			len += pq->q[prec].skblist.qlen;
 
 	return len;
 }
@@ -313,39 +210,32 @@
 struct sk_buff *brcmu_pktq_mdeq(struct pktq *pq, uint prec_bmp,
 				      int *prec_out)
 {
-	struct pktq_prec *q;
+	struct sk_buff_head *q;
 	struct sk_buff *p;
 	int prec;
 
 	if (pq->len == 0)
 		return NULL;
 
-	while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
+	while ((prec = pq->hi_prec) > 0 &&
+	       skb_queue_empty(&pq->q[prec].skblist))
 		pq->hi_prec--;
 
-	while ((prec_bmp & (1 << prec)) == 0 || pq->q[prec].head == NULL)
+	while ((prec_bmp & (1 << prec)) == 0 ||
+	       skb_queue_empty(&pq->q[prec].skblist))
 		if (prec-- == 0)
 			return NULL;
 
-	q = &pq->q[prec];
-
-	p = q->head;
+	q = &pq->q[prec].skblist;
+	p = skb_dequeue(q);
 	if (p == NULL)
 		return NULL;
 
-	q->head = p->prev;
-	if (q->head == NULL)
-		q->tail = NULL;
-
-	q->len--;
+	pq->len--;
 
 	if (prec_out)
 		*prec_out = prec;
 
-	pq->len--;
-
-	p->prev = NULL;
-
 	return p;
 }
 EXPORT_SYMBOL(brcmu_pktq_mdeq);
@@ -364,23 +254,3 @@
 }
 EXPORT_SYMBOL(brcmu_prpkt);
 #endif				/* defined(BCMDBG) */
-
-#if defined(BCMDBG)
-/*
- * print bytes formatted as hex to a string. return the resulting
- * string length
- */
-int brcmu_format_hex(char *str, const void *bytes, int len)
-{
-	int i;
-	char *p = str;
-	const u8 *src = (const u8 *)bytes;
-
-	for (i = 0; i < len; i++) {
-		p += snprintf(p, 3, "%02X", *src);
-		src++;
-	}
-	return (int)(p - str);
-}
-EXPORT_SYMBOL(brcmu_format_hex);
-#endif				/* defined(BCMDBG) */
diff --git a/drivers/net/wireless/brcm80211/include/brcmu_utils.h b/drivers/net/wireless/brcm80211/include/brcmu_utils.h
index 7d0f46e..ad249a0 100644
--- a/drivers/net/wireless/brcm80211/include/brcmu_utils.h
+++ b/drivers/net/wireless/brcm80211/include/brcmu_utils.h
@@ -65,9 +65,7 @@
 #define ETHER_ADDR_STR_LEN	18
 
 struct pktq_prec {
-	struct sk_buff *head;	/* first packet to dequeue */
-	struct sk_buff *tail;	/* last packet to dequeue */
-	u16 len;		/* number of queued packets */
+	struct sk_buff_head skblist;
 	u16 max;		/* maximum number of queued packets */
 };
 
@@ -88,32 +86,32 @@
 
 static inline int pktq_plen(struct pktq *pq, int prec)
 {
-	return pq->q[prec].len;
+	return pq->q[prec].skblist.qlen;
 }
 
 static inline int pktq_pavail(struct pktq *pq, int prec)
 {
-	return pq->q[prec].max - pq->q[prec].len;
+	return pq->q[prec].max - pq->q[prec].skblist.qlen;
 }
 
 static inline bool pktq_pfull(struct pktq *pq, int prec)
 {
-	return pq->q[prec].len >= pq->q[prec].max;
+	return pq->q[prec].skblist.qlen >= pq->q[prec].max;
 }
 
 static inline bool pktq_pempty(struct pktq *pq, int prec)
 {
-	return pq->q[prec].len == 0;
+	return skb_queue_empty(&pq->q[prec].skblist);
 }
 
 static inline struct sk_buff *pktq_ppeek(struct pktq *pq, int prec)
 {
-	return pq->q[prec].head;
+	return skb_peek(&pq->q[prec].skblist);
 }
 
 static inline struct sk_buff *pktq_ppeek_tail(struct pktq *pq, int prec)
 {
-	return pq->q[prec].tail;
+	return skb_peek_tail(&pq->q[prec].skblist);
 }
 
 extern struct sk_buff *brcmu_pktq_penq(struct pktq *pq, int prec,
@@ -172,24 +170,16 @@
 		bool (*fn)(struct sk_buff *, void *), void *arg);
 
 /* externs */
-/* packet */
-extern uint brcmu_pktfrombuf(struct sk_buff *p,
-	uint offset, int len, unsigned char *buf);
-extern uint brcmu_pkttotlen(struct sk_buff *p);
-
 /* ip address */
 struct ipv4_addr;
 
+
+/* externs */
+/* format/print */
 #ifdef BCMDBG
 extern void brcmu_prpkt(const char *msg, struct sk_buff *p0);
 #else
 #define brcmu_prpkt(a, b)
 #endif				/* BCMDBG */
 
-/* externs */
-/* format/print */
-#if defined(BCMDBG)
-extern int brcmu_format_hex(char *str, const void *bytes, int len);
-#endif
-
 #endif				/* _BRCMU_UTILS_H_ */
diff --git a/drivers/net/wireless/brcm80211/include/chipcommon.h b/drivers/net/wireless/brcm80211/include/chipcommon.h
index fefabc3..f96834a 100644
--- a/drivers/net/wireless/brcm80211/include/chipcommon.h
+++ b/drivers/net/wireless/brcm80211/include/chipcommon.h
@@ -19,6 +19,8 @@
 
 #include "defs.h"		/* for PAD macro */
 
+#define CHIPCREGOFFS(field)	offsetof(struct chipcregs, field)
+
 struct chipcregs {
 	u32 chipid;		/* 0x0 */
 	u32 capabilities;
diff --git a/drivers/net/wireless/brcm80211/include/defs.h b/drivers/net/wireless/brcm80211/include/defs.h
index 1e5f310..f0d8c04 100644
--- a/drivers/net/wireless/brcm80211/include/defs.h
+++ b/drivers/net/wireless/brcm80211/include/defs.h
@@ -62,7 +62,6 @@
 
 #define WL_RADIO_SW_DISABLE		(1<<0)
 #define WL_RADIO_HW_DISABLE		(1<<1)
-#define WL_RADIO_MPC_DISABLE		(1<<2)
 /* some countries don't support any channel */
 #define WL_RADIO_COUNTRY_DISABLE	(1<<3)
 
diff --git a/drivers/net/wireless/brcm80211/include/soc.h b/drivers/net/wireless/brcm80211/include/soc.h
index 4fcb956..4e9b7e4 100644
--- a/drivers/net/wireless/brcm80211/include/soc.h
+++ b/drivers/net/wireless/brcm80211/include/soc.h
@@ -77,8 +77,9 @@
 #define	DMEMS_CORE_ID		0x835	/* SDR/DDR1 memory controller core */
 #define	DEF_SHIM_COMP		0x837	/* SHIM component in ubus/6362 */
 #define OOB_ROUTER_CORE_ID	0x367	/* OOB router core ID */
-/* Default component, in ai chips it maps all unused address ranges */
-#define	DEF_AI_COMP		0xfff
+#define	DEF_AI_COMP		0xfff	/* Default component, in ai chips it
+					 * maps all unused address ranges
+					 */
 
 /* Common core control flags */
 #define	SICF_BIST_EN		0x8000
@@ -87,4 +88,11 @@
 #define	SICF_FGC		0x0002
 #define	SICF_CLOCK_EN		0x0001
 
+/* Common core status flags */
+#define	SISF_BIST_DONE		0x8000
+#define	SISF_BIST_ERROR		0x4000
+#define	SISF_GATED_CLK		0x2000
+#define	SISF_DMA64		0x1000
+#define	SISF_CORE_BITS		0x0fff
+
 #endif				/* _BRCM_SOC_H */
diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c
index 5441ad1..89e9d3a 100644
--- a/drivers/net/wireless/hostap/hostap_cs.c
+++ b/drivers/net/wireless/hostap/hostap_cs.c
@@ -656,6 +656,9 @@
 		"Addtron", "AWP-100 Wireless PCMCIA", "Version 01.02",
 		0xe6ec52ce, 0x08649af2, 0x4b74baa0),
 	PCMCIA_DEVICE_PROD_ID123(
+		"Canon", "Wireless LAN CF Card K30225", "Version 01.00",
+		0x96ef6fe2, 0x263fcbab, 0xa57adb8c),
+	PCMCIA_DEVICE_PROD_ID123(
 		"D", "Link DWL-650 11Mbps WLAN Card", "Version 01.02",
 		0x71b18589, 0xb6f1b0ab, 0x4b74baa0),
 	PCMCIA_DEVICE_PROD_ID123(
diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c
index 045a936..18054d9 100644
--- a/drivers/net/wireless/hostap/hostap_ioctl.c
+++ b/drivers/net/wireless/hostap/hostap_ioctl.c
@@ -3872,8 +3872,8 @@
 	iface = netdev_priv(dev);
 	local = iface->local;
 
-	strncpy(info->driver, "hostap", sizeof(info->driver) - 1);
-	snprintf(info->fw_version, sizeof(info->fw_version) - 1,
+	strlcpy(info->driver, "hostap", sizeof(info->driver));
+	snprintf(info->fw_version, sizeof(info->fw_version),
 		 "%d.%d.%d", (local->sta_fw_ver >> 16) & 0xff,
 		 (local->sta_fw_ver >> 8) & 0xff,
 		 local->sta_fw_ver & 0xff);
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 127e9c6..a0e5c21 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -5981,8 +5981,8 @@
 	struct ipw2100_priv *priv = libipw_priv(dev);
 	char fw_ver[64], ucode_ver[64];
 
-	strcpy(info->driver, DRV_NAME);
-	strcpy(info->version, DRV_VERSION);
+	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
 
 	ipw2100_get_fwversion(priv, fw_ver, sizeof(fw_ver));
 	ipw2100_get_ucodeversion(priv, ucode_ver, sizeof(ucode_ver));
@@ -5990,7 +5990,8 @@
 	snprintf(info->fw_version, sizeof(info->fw_version), "%s:%d:%s",
 		 fw_ver, priv->eeprom_version, ucode_ver);
 
-	strcpy(info->bus_info, pci_name(priv->pci_dev));
+	strlcpy(info->bus_info, pci_name(priv->pci_dev),
+		sizeof(info->bus_info));
 }
 
 static u32 ipw2100_ethtool_get_link(struct net_device *dev)
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 99a710d..018a8de 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -131,6 +131,14 @@
 #define ipw2200_bg_rates	(ipw2200_rates + 0)
 #define ipw2200_num_bg_rates	12
 
+/* Ugly macro to convert literal channel numbers into their mhz equivalents
+ * There are certianly some conditions that will break this (like feeding it '30')
+ * but they shouldn't arise since nothing talks on channel 30. */
+#define ieee80211chan2mhz(x) \
+	(((x) <= 14) ? \
+	(((x) == 14) ? 2484 : ((x) * 5) + 2407) : \
+	((x) + 1000) * 5)
+
 #ifdef CONFIG_IPW2200_QOS
 static int qos_enable = 0;
 static int qos_burst_enable = 0;
@@ -10540,8 +10548,8 @@
 	char date[32];
 	u32 len;
 
-	strcpy(info->driver, DRV_NAME);
-	strcpy(info->version, DRV_VERSION);
+	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
 
 	len = sizeof(vers);
 	ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
@@ -10550,7 +10558,8 @@
 
 	snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)",
 		 vers, date);
-	strcpy(info->bus_info, pci_name(p->pci_dev));
+	strlcpy(info->bus_info, pci_name(p->pci_dev),
+		sizeof(info->bus_info));
 	info->eedump_len = IPW_EEPROM_IMAGE_SIZE;
 }
 
diff --git a/drivers/net/wireless/ipw2x00/libipw.h b/drivers/net/wireless/ipw2x00/libipw.h
index 70f5586..8874588 100644
--- a/drivers/net/wireless/ipw2x00/libipw.h
+++ b/drivers/net/wireless/ipw2x00/libipw.h
@@ -66,16 +66,8 @@
 do { if (libipw_debug_level & (level)) \
   printk(KERN_DEBUG "libipw: %c %s " fmt, \
          in_interrupt() ? 'I' : 'U', __func__ , ## args); } while (0)
-static inline bool libipw_ratelimit_debug(u32 level)
-{
-	return (libipw_debug_level & level) && net_ratelimit();
-}
 #else
 #define LIBIPW_DEBUG(level, fmt, args...) do {} while (0)
-static inline bool libipw_ratelimit_debug(u32 level)
-{
-	return false;
-}
 #endif				/* CONFIG_LIBIPW_DEBUG */
 
 /*
@@ -813,9 +805,6 @@
 	/* WEP and other encryption related settings at the device level */
 	int open_wep;		/* Set to 1 to allow unencrypted frames */
 
-	int reset_on_keychange;	/* Set to 1 if the HW needs to be reset on
-				 * WEP key changes */
-
 	/* If the host performs {en,de}cryption, then set to 1 */
 	int host_encrypt;
 	int host_encrypt_msdu;
@@ -868,7 +857,6 @@
 			      struct libipw_security * sec);
 	netdev_tx_t (*hard_start_xmit) (struct libipw_txb * txb,
 					struct net_device * dev, int pri);
-	int (*reset_port) (struct net_device * dev);
 	int (*is_queue_full) (struct net_device * dev, int pri);
 
 	int (*handle_management) (struct net_device * dev,
diff --git a/drivers/net/wireless/ipw2x00/libipw_wx.c b/drivers/net/wireless/ipw2x00/libipw_wx.c
index 6623e50..1571505 100644
--- a/drivers/net/wireless/ipw2x00/libipw_wx.c
+++ b/drivers/net/wireless/ipw2x00/libipw_wx.c
@@ -474,17 +474,6 @@
 	if (ieee->set_security)
 		ieee->set_security(dev, &sec);
 
-	/* Do not reset port if card is in Managed mode since resetting will
-	 * generate new IEEE 802.11 authentication which may end up in looping
-	 * with IEEE 802.1X.  If your hardware requires a reset after WEP
-	 * configuration (for example... Prism2), implement the reset_port in
-	 * the callbacks structures used to initialize the 802.11 stack. */
-	if (ieee->reset_on_keychange &&
-	    ieee->iw_mode != IW_MODE_INFRA &&
-	    ieee->reset_port && ieee->reset_port(dev)) {
-		printk(KERN_DEBUG "%s: reset_port failed\n", dev->name);
-		return -EINVAL;
-	}
 	return 0;
 }
 
@@ -688,20 +677,6 @@
 	if (ieee->set_security)
 		ieee->set_security(ieee->dev, &sec);
 
-	/*
-	 * Do not reset port if card is in Managed mode since resetting will
-	 * generate new IEEE 802.11 authentication which may end up in looping
-	 * with IEEE 802.1X. If your hardware requires a reset after WEP
-	 * configuration (for example... Prism2), implement the reset_port in
-	 * the callbacks structures used to initialize the 802.11 stack.
-	 */
-	if (ieee->reset_on_keychange &&
-	    ieee->iw_mode != IW_MODE_INFRA &&
-	    ieee->reset_port && ieee->reset_port(dev)) {
-		LIBIPW_DEBUG_WX("%s: reset_port failed\n", dev->name);
-		return -EINVAL;
-	}
-
 	return ret;
 }
 
diff --git a/drivers/net/wireless/iwlegacy/3945-debug.c b/drivers/net/wireless/iwlegacy/3945-debug.c
new file mode 100644
index 0000000..5e1a19f
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/3945-debug.c
@@ -0,0 +1,505 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+
+#include "common.h"
+#include "3945.h"
+
+static int
+il3945_stats_flag(struct il_priv *il, char *buf, int bufsz)
+{
+	int p = 0;
+
+	p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n",
+		       le32_to_cpu(il->_3945.stats.flag));
+	if (le32_to_cpu(il->_3945.stats.flag) & UCODE_STATS_CLEAR_MSK)
+		p += scnprintf(buf + p, bufsz - p,
+			       "\tStatistics have been cleared\n");
+	p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
+		       (le32_to_cpu(il->_3945.stats.flag) &
+			UCODE_STATS_FREQUENCY_MSK) ? "2.4 GHz" : "5.2 GHz");
+	p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
+		       (le32_to_cpu(il->_3945.stats.flag) &
+			UCODE_STATS_NARROW_BAND_MSK) ? "enabled" : "disabled");
+	return p;
+}
+
+ssize_t
+il3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
+			   size_t count, loff_t *ppos)
+{
+	struct il_priv *il = file->private_data;
+	int pos = 0;
+	char *buf;
+	int bufsz =
+	    sizeof(struct iwl39_stats_rx_phy) * 40 +
+	    sizeof(struct iwl39_stats_rx_non_phy) * 40 + 400;
+	ssize_t ret;
+	struct iwl39_stats_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
+	struct iwl39_stats_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
+	struct iwl39_stats_rx_non_phy *general, *accum_general;
+	struct iwl39_stats_rx_non_phy *delta_general, *max_general;
+
+	if (!il_is_alive(il))
+		return -EAGAIN;
+
+	buf = kzalloc(bufsz, GFP_KERNEL);
+	if (!buf) {
+		IL_ERR("Can not allocate Buffer\n");
+		return -ENOMEM;
+	}
+
+	/*
+	 * The statistic information display here is based on
+	 * the last stats notification from uCode
+	 * might not reflect the current uCode activity
+	 */
+	ofdm = &il->_3945.stats.rx.ofdm;
+	cck = &il->_3945.stats.rx.cck;
+	general = &il->_3945.stats.rx.general;
+	accum_ofdm = &il->_3945.accum_stats.rx.ofdm;
+	accum_cck = &il->_3945.accum_stats.rx.cck;
+	accum_general = &il->_3945.accum_stats.rx.general;
+	delta_ofdm = &il->_3945.delta_stats.rx.ofdm;
+	delta_cck = &il->_3945.delta_stats.rx.cck;
+	delta_general = &il->_3945.delta_stats.rx.general;
+	max_ofdm = &il->_3945.max_delta.rx.ofdm;
+	max_cck = &il->_3945.max_delta.rx.cck;
+	max_general = &il->_3945.max_delta.rx.general;
+
+	pos += il3945_stats_flag(il, buf, bufsz);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "%-32s     current"
+		      "acumulative       delta         max\n",
+		      "Statistics_Rx - OFDM:");
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "ina_cnt:",
+		      le32_to_cpu(ofdm->ina_cnt), accum_ofdm->ina_cnt,
+		      delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "fina_cnt:",
+		      le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
+		      delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "plcp_err:",
+		      le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
+		      delta_ofdm->plcp_err, max_ofdm->plcp_err);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "crc32_err:",
+		      le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
+		      delta_ofdm->crc32_err, max_ofdm->crc32_err);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "overrun_err:",
+		      le32_to_cpu(ofdm->overrun_err), accum_ofdm->overrun_err,
+		      delta_ofdm->overrun_err, max_ofdm->overrun_err);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "early_overrun_err:",
+		      le32_to_cpu(ofdm->early_overrun_err),
+		      accum_ofdm->early_overrun_err,
+		      delta_ofdm->early_overrun_err,
+		      max_ofdm->early_overrun_err);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "crc32_good:",
+		      le32_to_cpu(ofdm->crc32_good), accum_ofdm->crc32_good,
+		      delta_ofdm->crc32_good, max_ofdm->crc32_good);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "false_alarm_cnt:",
+		      le32_to_cpu(ofdm->false_alarm_cnt),
+		      accum_ofdm->false_alarm_cnt, delta_ofdm->false_alarm_cnt,
+		      max_ofdm->false_alarm_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "fina_sync_err_cnt:",
+		      le32_to_cpu(ofdm->fina_sync_err_cnt),
+		      accum_ofdm->fina_sync_err_cnt,
+		      delta_ofdm->fina_sync_err_cnt,
+		      max_ofdm->fina_sync_err_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "sfd_timeout:",
+		      le32_to_cpu(ofdm->sfd_timeout), accum_ofdm->sfd_timeout,
+		      delta_ofdm->sfd_timeout, max_ofdm->sfd_timeout);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "fina_timeout:",
+		      le32_to_cpu(ofdm->fina_timeout), accum_ofdm->fina_timeout,
+		      delta_ofdm->fina_timeout, max_ofdm->fina_timeout);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "unresponded_rts:",
+		      le32_to_cpu(ofdm->unresponded_rts),
+		      accum_ofdm->unresponded_rts, delta_ofdm->unresponded_rts,
+		      max_ofdm->unresponded_rts);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n",
+		      "rxe_frame_lmt_ovrun:",
+		      le32_to_cpu(ofdm->rxe_frame_limit_overrun),
+		      accum_ofdm->rxe_frame_limit_overrun,
+		      delta_ofdm->rxe_frame_limit_overrun,
+		      max_ofdm->rxe_frame_limit_overrun);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "sent_ack_cnt:",
+		      le32_to_cpu(ofdm->sent_ack_cnt), accum_ofdm->sent_ack_cnt,
+		      delta_ofdm->sent_ack_cnt, max_ofdm->sent_ack_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "sent_cts_cnt:",
+		      le32_to_cpu(ofdm->sent_cts_cnt), accum_ofdm->sent_cts_cnt,
+		      delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt);
+
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "%-32s     current"
+		      "acumulative       delta         max\n",
+		      "Statistics_Rx - CCK:");
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "ina_cnt:",
+		      le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
+		      delta_cck->ina_cnt, max_cck->ina_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "fina_cnt:",
+		      le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
+		      delta_cck->fina_cnt, max_cck->fina_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "plcp_err:",
+		      le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
+		      delta_cck->plcp_err, max_cck->plcp_err);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "crc32_err:",
+		      le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
+		      delta_cck->crc32_err, max_cck->crc32_err);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "overrun_err:",
+		      le32_to_cpu(cck->overrun_err), accum_cck->overrun_err,
+		      delta_cck->overrun_err, max_cck->overrun_err);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "early_overrun_err:",
+		      le32_to_cpu(cck->early_overrun_err),
+		      accum_cck->early_overrun_err,
+		      delta_cck->early_overrun_err, max_cck->early_overrun_err);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "crc32_good:",
+		      le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
+		      delta_cck->crc32_good, max_cck->crc32_good);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "false_alarm_cnt:",
+		      le32_to_cpu(cck->false_alarm_cnt),
+		      accum_cck->false_alarm_cnt, delta_cck->false_alarm_cnt,
+		      max_cck->false_alarm_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "fina_sync_err_cnt:",
+		      le32_to_cpu(cck->fina_sync_err_cnt),
+		      accum_cck->fina_sync_err_cnt,
+		      delta_cck->fina_sync_err_cnt, max_cck->fina_sync_err_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "sfd_timeout:",
+		      le32_to_cpu(cck->sfd_timeout), accum_cck->sfd_timeout,
+		      delta_cck->sfd_timeout, max_cck->sfd_timeout);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "fina_timeout:",
+		      le32_to_cpu(cck->fina_timeout), accum_cck->fina_timeout,
+		      delta_cck->fina_timeout, max_cck->fina_timeout);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "unresponded_rts:",
+		      le32_to_cpu(cck->unresponded_rts),
+		      accum_cck->unresponded_rts, delta_cck->unresponded_rts,
+		      max_cck->unresponded_rts);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n",
+		      "rxe_frame_lmt_ovrun:",
+		      le32_to_cpu(cck->rxe_frame_limit_overrun),
+		      accum_cck->rxe_frame_limit_overrun,
+		      delta_cck->rxe_frame_limit_overrun,
+		      max_cck->rxe_frame_limit_overrun);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "sent_ack_cnt:",
+		      le32_to_cpu(cck->sent_ack_cnt), accum_cck->sent_ack_cnt,
+		      delta_cck->sent_ack_cnt, max_cck->sent_ack_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "sent_cts_cnt:",
+		      le32_to_cpu(cck->sent_cts_cnt), accum_cck->sent_cts_cnt,
+		      delta_cck->sent_cts_cnt, max_cck->sent_cts_cnt);
+
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "%-32s     current"
+		      "acumulative       delta         max\n",
+		      "Statistics_Rx - GENERAL:");
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "bogus_cts:",
+		      le32_to_cpu(general->bogus_cts), accum_general->bogus_cts,
+		      delta_general->bogus_cts, max_general->bogus_cts);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "bogus_ack:",
+		      le32_to_cpu(general->bogus_ack), accum_general->bogus_ack,
+		      delta_general->bogus_ack, max_general->bogus_ack);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "non_bssid_frames:",
+		      le32_to_cpu(general->non_bssid_frames),
+		      accum_general->non_bssid_frames,
+		      delta_general->non_bssid_frames,
+		      max_general->non_bssid_frames);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "filtered_frames:",
+		      le32_to_cpu(general->filtered_frames),
+		      accum_general->filtered_frames,
+		      delta_general->filtered_frames,
+		      max_general->filtered_frames);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n",
+		      "non_channel_beacons:",
+		      le32_to_cpu(general->non_channel_beacons),
+		      accum_general->non_channel_beacons,
+		      delta_general->non_channel_beacons,
+		      max_general->non_channel_beacons);
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+	kfree(buf);
+	return ret;
+}
+
+ssize_t
+il3945_ucode_tx_stats_read(struct file *file, char __user *user_buf,
+			   size_t count, loff_t *ppos)
+{
+	struct il_priv *il = file->private_data;
+	int pos = 0;
+	char *buf;
+	int bufsz = (sizeof(struct iwl39_stats_tx) * 48) + 250;
+	ssize_t ret;
+	struct iwl39_stats_tx *tx, *accum_tx, *delta_tx, *max_tx;
+
+	if (!il_is_alive(il))
+		return -EAGAIN;
+
+	buf = kzalloc(bufsz, GFP_KERNEL);
+	if (!buf) {
+		IL_ERR("Can not allocate Buffer\n");
+		return -ENOMEM;
+	}
+
+	/*
+	 * The statistic information display here is based on
+	 * the last stats notification from uCode
+	 * might not reflect the current uCode activity
+	 */
+	tx = &il->_3945.stats.tx;
+	accum_tx = &il->_3945.accum_stats.tx;
+	delta_tx = &il->_3945.delta_stats.tx;
+	max_tx = &il->_3945.max_delta.tx;
+	pos += il3945_stats_flag(il, buf, bufsz);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "%-32s     current"
+		      "acumulative       delta         max\n",
+		      "Statistics_Tx:");
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "preamble:",
+		      le32_to_cpu(tx->preamble_cnt), accum_tx->preamble_cnt,
+		      delta_tx->preamble_cnt, max_tx->preamble_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "rx_detected_cnt:",
+		      le32_to_cpu(tx->rx_detected_cnt),
+		      accum_tx->rx_detected_cnt, delta_tx->rx_detected_cnt,
+		      max_tx->rx_detected_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "bt_prio_defer_cnt:",
+		      le32_to_cpu(tx->bt_prio_defer_cnt),
+		      accum_tx->bt_prio_defer_cnt, delta_tx->bt_prio_defer_cnt,
+		      max_tx->bt_prio_defer_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "bt_prio_kill_cnt:",
+		      le32_to_cpu(tx->bt_prio_kill_cnt),
+		      accum_tx->bt_prio_kill_cnt, delta_tx->bt_prio_kill_cnt,
+		      max_tx->bt_prio_kill_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "few_bytes_cnt:",
+		      le32_to_cpu(tx->few_bytes_cnt), accum_tx->few_bytes_cnt,
+		      delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "cts_timeout:",
+		      le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
+		      delta_tx->cts_timeout, max_tx->cts_timeout);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "ack_timeout:",
+		      le32_to_cpu(tx->ack_timeout), accum_tx->ack_timeout,
+		      delta_tx->ack_timeout, max_tx->ack_timeout);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "expected_ack_cnt:",
+		      le32_to_cpu(tx->expected_ack_cnt),
+		      accum_tx->expected_ack_cnt, delta_tx->expected_ack_cnt,
+		      max_tx->expected_ack_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "actual_ack_cnt:",
+		      le32_to_cpu(tx->actual_ack_cnt), accum_tx->actual_ack_cnt,
+		      delta_tx->actual_ack_cnt, max_tx->actual_ack_cnt);
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+	kfree(buf);
+	return ret;
+}
+
+ssize_t
+il3945_ucode_general_stats_read(struct file *file, char __user *user_buf,
+				size_t count, loff_t *ppos)
+{
+	struct il_priv *il = file->private_data;
+	int pos = 0;
+	char *buf;
+	int bufsz = sizeof(struct iwl39_stats_general) * 10 + 300;
+	ssize_t ret;
+	struct iwl39_stats_general *general, *accum_general;
+	struct iwl39_stats_general *delta_general, *max_general;
+	struct stats_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
+	struct iwl39_stats_div *div, *accum_div, *delta_div, *max_div;
+
+	if (!il_is_alive(il))
+		return -EAGAIN;
+
+	buf = kzalloc(bufsz, GFP_KERNEL);
+	if (!buf) {
+		IL_ERR("Can not allocate Buffer\n");
+		return -ENOMEM;
+	}
+
+	/*
+	 * The statistic information display here is based on
+	 * the last stats notification from uCode
+	 * might not reflect the current uCode activity
+	 */
+	general = &il->_3945.stats.general;
+	dbg = &il->_3945.stats.general.dbg;
+	div = &il->_3945.stats.general.div;
+	accum_general = &il->_3945.accum_stats.general;
+	delta_general = &il->_3945.delta_stats.general;
+	max_general = &il->_3945.max_delta.general;
+	accum_dbg = &il->_3945.accum_stats.general.dbg;
+	delta_dbg = &il->_3945.delta_stats.general.dbg;
+	max_dbg = &il->_3945.max_delta.general.dbg;
+	accum_div = &il->_3945.accum_stats.general.div;
+	delta_div = &il->_3945.delta_stats.general.div;
+	max_div = &il->_3945.max_delta.general.div;
+	pos += il3945_stats_flag(il, buf, bufsz);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "%-32s     current"
+		      "acumulative       delta         max\n",
+		      "Statistics_General:");
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "burst_check:",
+		      le32_to_cpu(dbg->burst_check), accum_dbg->burst_check,
+		      delta_dbg->burst_check, max_dbg->burst_check);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "burst_count:",
+		      le32_to_cpu(dbg->burst_count), accum_dbg->burst_count,
+		      delta_dbg->burst_count, max_dbg->burst_count);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "sleep_time:",
+		      le32_to_cpu(general->sleep_time),
+		      accum_general->sleep_time, delta_general->sleep_time,
+		      max_general->sleep_time);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "slots_out:",
+		      le32_to_cpu(general->slots_out), accum_general->slots_out,
+		      delta_general->slots_out, max_general->slots_out);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "slots_idle:",
+		      le32_to_cpu(general->slots_idle),
+		      accum_general->slots_idle, delta_general->slots_idle,
+		      max_general->slots_idle);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "ttl_timestamp:\t\t\t%u\n",
+		      le32_to_cpu(general->ttl_timestamp));
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "tx_on_a:",
+		      le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
+		      delta_div->tx_on_a, max_div->tx_on_a);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "tx_on_b:",
+		      le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
+		      delta_div->tx_on_b, max_div->tx_on_b);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "exec_time:",
+		      le32_to_cpu(div->exec_time), accum_div->exec_time,
+		      delta_div->exec_time, max_div->exec_time);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "probe_time:",
+		      le32_to_cpu(div->probe_time), accum_div->probe_time,
+		      delta_div->probe_time, max_div->probe_time);
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+	kfree(buf);
+	return ret;
+}
diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
new file mode 100644
index 0000000..54b2d39
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
@@ -0,0 +1,3976 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci-aspm.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/firmware.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+
+#include <net/ieee80211_radiotap.h>
+#include <net/mac80211.h>
+
+#include <asm/div64.h>
+
+#define DRV_NAME	"iwl3945"
+
+#include "commands.h"
+#include "common.h"
+#include "3945.h"
+#include "iwl-spectrum.h"
+
+/*
+ * module name, copyright, version, etc.
+ */
+
+#define DRV_DESCRIPTION	\
+"Intel(R) PRO/Wireless 3945ABG/BG Network Connection driver for Linux"
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+#define VD "d"
+#else
+#define VD
+#endif
+
+/*
+ * add "s" to indicate spectrum measurement included.
+ * we add it here to be consistent with previous releases in which
+ * this was configurable.
+ */
+#define DRV_VERSION  IWLWIFI_VERSION VD "s"
+#define DRV_COPYRIGHT	"Copyright(c) 2003-2011 Intel Corporation"
+#define DRV_AUTHOR     "<ilw@linux.intel.com>"
+
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_VERSION(DRV_VERSION);
+MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
+MODULE_LICENSE("GPL");
+
+ /* module parameters */
+struct il_mod_params il3945_mod_params = {
+	.sw_crypto = 1,
+	.restart_fw = 1,
+	.disable_hw_scan = 1,
+	/* the rest are 0 by default */
+};
+
+/**
+ * il3945_get_antenna_flags - Get antenna flags for RXON command
+ * @il: eeprom and antenna fields are used to determine antenna flags
+ *
+ * il->eeprom39  is used to determine if antenna AUX/MAIN are reversed
+ * il3945_mod_params.antenna specifies the antenna diversity mode:
+ *
+ * IL_ANTENNA_DIVERSITY - NIC selects best antenna by itself
+ * IL_ANTENNA_MAIN      - Force MAIN antenna
+ * IL_ANTENNA_AUX       - Force AUX antenna
+ */
+__le32
+il3945_get_antenna_flags(const struct il_priv *il)
+{
+	struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
+
+	switch (il3945_mod_params.antenna) {
+	case IL_ANTENNA_DIVERSITY:
+		return 0;
+
+	case IL_ANTENNA_MAIN:
+		if (eeprom->antenna_switch_type)
+			return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK;
+		return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK;
+
+	case IL_ANTENNA_AUX:
+		if (eeprom->antenna_switch_type)
+			return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK;
+		return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK;
+	}
+
+	/* bad antenna selector value */
+	IL_ERR("Bad antenna selector value (0x%x)\n",
+	       il3945_mod_params.antenna);
+
+	return 0;		/* "diversity" is default if error */
+}
+
+static int
+il3945_set_ccmp_dynamic_key_info(struct il_priv *il,
+				 struct ieee80211_key_conf *keyconf, u8 sta_id)
+{
+	unsigned long flags;
+	__le16 key_flags = 0;
+	int ret;
+
+	key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
+	key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
+
+	if (sta_id == il->ctx.bcast_sta_id)
+		key_flags |= STA_KEY_MULTICAST_MSK;
+
+	keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+	keyconf->hw_key_idx = keyconf->keyidx;
+	key_flags &= ~STA_KEY_FLG_INVALID;
+
+	spin_lock_irqsave(&il->sta_lock, flags);
+	il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
+	il->stations[sta_id].keyinfo.keylen = keyconf->keylen;
+	memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen);
+
+	memcpy(il->stations[sta_id].sta.key.key, keyconf->key, keyconf->keylen);
+
+	if ((il->stations[sta_id].sta.key.
+	     key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
+		il->stations[sta_id].sta.key.key_offset =
+		    il_get_free_ucode_key_idx(il);
+	/* else, we are overriding an existing key => no need to allocated room
+	 * in uCode. */
+
+	WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
+	     "no space for a new key");
+
+	il->stations[sta_id].sta.key.key_flags = key_flags;
+	il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+	il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+
+	D_INFO("hwcrypto: modify ucode station key info\n");
+
+	ret = il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC);
+
+	spin_unlock_irqrestore(&il->sta_lock, flags);
+
+	return ret;
+}
+
+static int
+il3945_set_tkip_dynamic_key_info(struct il_priv *il,
+				 struct ieee80211_key_conf *keyconf, u8 sta_id)
+{
+	return -EOPNOTSUPP;
+}
+
+static int
+il3945_set_wep_dynamic_key_info(struct il_priv *il,
+				struct ieee80211_key_conf *keyconf, u8 sta_id)
+{
+	return -EOPNOTSUPP;
+}
+
+static int
+il3945_clear_sta_key_info(struct il_priv *il, u8 sta_id)
+{
+	unsigned long flags;
+	struct il_addsta_cmd sta_cmd;
+
+	spin_lock_irqsave(&il->sta_lock, flags);
+	memset(&il->stations[sta_id].keyinfo, 0, sizeof(struct il_hw_key));
+	memset(&il->stations[sta_id].sta.key, 0, sizeof(struct il4965_keyinfo));
+	il->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC;
+	il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+	il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+	memcpy(&sta_cmd, &il->stations[sta_id].sta,
+	       sizeof(struct il_addsta_cmd));
+	spin_unlock_irqrestore(&il->sta_lock, flags);
+
+	D_INFO("hwcrypto: clear ucode station key info\n");
+	return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
+}
+
+static int
+il3945_set_dynamic_key(struct il_priv *il, struct ieee80211_key_conf *keyconf,
+		       u8 sta_id)
+{
+	int ret = 0;
+
+	keyconf->hw_key_idx = HW_KEY_DYNAMIC;
+
+	switch (keyconf->cipher) {
+	case WLAN_CIPHER_SUITE_CCMP:
+		ret = il3945_set_ccmp_dynamic_key_info(il, keyconf, sta_id);
+		break;
+	case WLAN_CIPHER_SUITE_TKIP:
+		ret = il3945_set_tkip_dynamic_key_info(il, keyconf, sta_id);
+		break;
+	case WLAN_CIPHER_SUITE_WEP40:
+	case WLAN_CIPHER_SUITE_WEP104:
+		ret = il3945_set_wep_dynamic_key_info(il, keyconf, sta_id);
+		break;
+	default:
+		IL_ERR("Unknown alg: %s alg=%x\n", __func__, keyconf->cipher);
+		ret = -EINVAL;
+	}
+
+	D_WEP("Set dynamic key: alg=%x len=%d idx=%d sta=%d ret=%d\n",
+	      keyconf->cipher, keyconf->keylen, keyconf->keyidx, sta_id, ret);
+
+	return ret;
+}
+
+static int
+il3945_remove_static_key(struct il_priv *il)
+{
+	int ret = -EOPNOTSUPP;
+
+	return ret;
+}
+
+static int
+il3945_set_static_key(struct il_priv *il, struct ieee80211_key_conf *key)
+{
+	if (key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+	    key->cipher == WLAN_CIPHER_SUITE_WEP104)
+		return -EOPNOTSUPP;
+
+	IL_ERR("Static key invalid: cipher %x\n", key->cipher);
+	return -EINVAL;
+}
+
+static void
+il3945_clear_free_frames(struct il_priv *il)
+{
+	struct list_head *element;
+
+	D_INFO("%d frames on pre-allocated heap on clear.\n", il->frames_count);
+
+	while (!list_empty(&il->free_frames)) {
+		element = il->free_frames.next;
+		list_del(element);
+		kfree(list_entry(element, struct il3945_frame, list));
+		il->frames_count--;
+	}
+
+	if (il->frames_count) {
+		IL_WARN("%d frames still in use.  Did we lose one?\n",
+			il->frames_count);
+		il->frames_count = 0;
+	}
+}
+
+static struct il3945_frame *
+il3945_get_free_frame(struct il_priv *il)
+{
+	struct il3945_frame *frame;
+	struct list_head *element;
+	if (list_empty(&il->free_frames)) {
+		frame = kzalloc(sizeof(*frame), GFP_KERNEL);
+		if (!frame) {
+			IL_ERR("Could not allocate frame!\n");
+			return NULL;
+		}
+
+		il->frames_count++;
+		return frame;
+	}
+
+	element = il->free_frames.next;
+	list_del(element);
+	return list_entry(element, struct il3945_frame, list);
+}
+
+static void
+il3945_free_frame(struct il_priv *il, struct il3945_frame *frame)
+{
+	memset(frame, 0, sizeof(*frame));
+	list_add(&frame->list, &il->free_frames);
+}
+
+unsigned int
+il3945_fill_beacon_frame(struct il_priv *il, struct ieee80211_hdr *hdr,
+			 int left)
+{
+
+	if (!il_is_associated(il) || !il->beacon_skb)
+		return 0;
+
+	if (il->beacon_skb->len > left)
+		return 0;
+
+	memcpy(hdr, il->beacon_skb->data, il->beacon_skb->len);
+
+	return il->beacon_skb->len;
+}
+
+static int
+il3945_send_beacon_cmd(struct il_priv *il)
+{
+	struct il3945_frame *frame;
+	unsigned int frame_size;
+	int rc;
+	u8 rate;
+
+	frame = il3945_get_free_frame(il);
+
+	if (!frame) {
+		IL_ERR("Could not obtain free frame buffer for beacon "
+		       "command.\n");
+		return -ENOMEM;
+	}
+
+	rate = il_get_lowest_plcp(il, &il->ctx);
+
+	frame_size = il3945_hw_get_beacon_cmd(il, frame, rate);
+
+	rc = il_send_cmd_pdu(il, C_TX_BEACON, frame_size, &frame->u.cmd[0]);
+
+	il3945_free_frame(il, frame);
+
+	return rc;
+}
+
+static void
+il3945_unset_hw_params(struct il_priv *il)
+{
+	if (il->_3945.shared_virt)
+		dma_free_coherent(&il->pci_dev->dev,
+				  sizeof(struct il3945_shared),
+				  il->_3945.shared_virt, il->_3945.shared_phys);
+}
+
+static void
+il3945_build_tx_cmd_hwcrypto(struct il_priv *il, struct ieee80211_tx_info *info,
+			     struct il_device_cmd *cmd,
+			     struct sk_buff *skb_frag, int sta_id)
+{
+	struct il3945_tx_cmd *tx_cmd = (struct il3945_tx_cmd *)cmd->cmd.payload;
+	struct il_hw_key *keyinfo = &il->stations[sta_id].keyinfo;
+
+	tx_cmd->sec_ctl = 0;
+
+	switch (keyinfo->cipher) {
+	case WLAN_CIPHER_SUITE_CCMP:
+		tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
+		memcpy(tx_cmd->key, keyinfo->key, keyinfo->keylen);
+		D_TX("tx_cmd with AES hwcrypto\n");
+		break;
+
+	case WLAN_CIPHER_SUITE_TKIP:
+		break;
+
+	case WLAN_CIPHER_SUITE_WEP104:
+		tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
+		/* fall through */
+	case WLAN_CIPHER_SUITE_WEP40:
+		tx_cmd->sec_ctl |=
+		    TX_CMD_SEC_WEP | (info->control.hw_key->
+				      hw_key_idx & TX_CMD_SEC_MSK) <<
+		    TX_CMD_SEC_SHIFT;
+
+		memcpy(&tx_cmd->key[3], keyinfo->key, keyinfo->keylen);
+
+		D_TX("Configuring packet for WEP encryption " "with key %d\n",
+		     info->control.hw_key->hw_key_idx);
+		break;
+
+	default:
+		IL_ERR("Unknown encode cipher %x\n", keyinfo->cipher);
+		break;
+	}
+}
+
+/*
+ * handle build C_TX command notification.
+ */
+static void
+il3945_build_tx_cmd_basic(struct il_priv *il, struct il_device_cmd *cmd,
+			  struct ieee80211_tx_info *info,
+			  struct ieee80211_hdr *hdr, u8 std_id)
+{
+	struct il3945_tx_cmd *tx_cmd = (struct il3945_tx_cmd *)cmd->cmd.payload;
+	__le32 tx_flags = tx_cmd->tx_flags;
+	__le16 fc = hdr->frame_control;
+
+	tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+	if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
+		tx_flags |= TX_CMD_FLG_ACK_MSK;
+		if (ieee80211_is_mgmt(fc))
+			tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+		if (ieee80211_is_probe_resp(fc) &&
+		    !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
+			tx_flags |= TX_CMD_FLG_TSF_MSK;
+	} else {
+		tx_flags &= (~TX_CMD_FLG_ACK_MSK);
+		tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+	}
+
+	tx_cmd->sta_id = std_id;
+	if (ieee80211_has_morefrags(fc))
+		tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
+
+	if (ieee80211_is_data_qos(fc)) {
+		u8 *qc = ieee80211_get_qos_ctl(hdr);
+		tx_cmd->tid_tspec = qc[0] & 0xf;
+		tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
+	} else {
+		tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+	}
+
+	il_tx_cmd_protection(il, info, fc, &tx_flags);
+
+	tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
+	if (ieee80211_is_mgmt(fc)) {
+		if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
+			tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
+		else
+			tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
+	} else {
+		tx_cmd->timeout.pm_frame_timeout = 0;
+	}
+
+	tx_cmd->driver_txop = 0;
+	tx_cmd->tx_flags = tx_flags;
+	tx_cmd->next_frame_len = 0;
+}
+
+/*
+ * start C_TX command process
+ */
+static int
+il3945_tx_skb(struct il_priv *il, struct sk_buff *skb)
+{
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct il3945_tx_cmd *tx_cmd;
+	struct il_tx_queue *txq = NULL;
+	struct il_queue *q = NULL;
+	struct il_device_cmd *out_cmd;
+	struct il_cmd_meta *out_meta;
+	dma_addr_t phys_addr;
+	dma_addr_t txcmd_phys;
+	int txq_id = skb_get_queue_mapping(skb);
+	u16 len, idx, hdr_len;
+	u8 id;
+	u8 unicast;
+	u8 sta_id;
+	u8 tid = 0;
+	__le16 fc;
+	u8 wait_write_ptr = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&il->lock, flags);
+	if (il_is_rfkill(il)) {
+		D_DROP("Dropping - RF KILL\n");
+		goto drop_unlock;
+	}
+
+	if ((ieee80211_get_tx_rate(il->hw, info)->hw_value & 0xFF) ==
+	    IL_INVALID_RATE) {
+		IL_ERR("ERROR: No TX rate available.\n");
+		goto drop_unlock;
+	}
+
+	unicast = !is_multicast_ether_addr(hdr->addr1);
+	id = 0;
+
+	fc = hdr->frame_control;
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+	if (ieee80211_is_auth(fc))
+		D_TX("Sending AUTH frame\n");
+	else if (ieee80211_is_assoc_req(fc))
+		D_TX("Sending ASSOC frame\n");
+	else if (ieee80211_is_reassoc_req(fc))
+		D_TX("Sending REASSOC frame\n");
+#endif
+
+	spin_unlock_irqrestore(&il->lock, flags);
+
+	hdr_len = ieee80211_hdrlen(fc);
+
+	/* Find idx into station table for destination station */
+	sta_id = il_sta_id_or_broadcast(il, &il->ctx, info->control.sta);
+	if (sta_id == IL_INVALID_STATION) {
+		D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1);
+		goto drop;
+	}
+
+	D_RATE("station Id %d\n", sta_id);
+
+	if (ieee80211_is_data_qos(fc)) {
+		u8 *qc = ieee80211_get_qos_ctl(hdr);
+		tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
+		if (unlikely(tid >= MAX_TID_COUNT))
+			goto drop;
+	}
+
+	/* Descriptor for chosen Tx queue */
+	txq = &il->txq[txq_id];
+	q = &txq->q;
+
+	if ((il_queue_space(q) < q->high_mark))
+		goto drop;
+
+	spin_lock_irqsave(&il->lock, flags);
+
+	idx = il_get_cmd_idx(q, q->write_ptr, 0);
+
+	/* Set up driver data for this TFD */
+	memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct il_tx_info));
+	txq->txb[q->write_ptr].skb = skb;
+	txq->txb[q->write_ptr].ctx = &il->ctx;
+
+	/* Init first empty entry in queue's array of Tx/cmd buffers */
+	out_cmd = txq->cmd[idx];
+	out_meta = &txq->meta[idx];
+	tx_cmd = (struct il3945_tx_cmd *)out_cmd->cmd.payload;
+	memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
+	memset(tx_cmd, 0, sizeof(*tx_cmd));
+
+	/*
+	 * Set up the Tx-command (not MAC!) header.
+	 * Store the chosen Tx queue and TFD idx within the sequence field;
+	 * after Tx, uCode's Tx response will return this value so driver can
+	 * locate the frame within the tx queue and do post-tx processing.
+	 */
+	out_cmd->hdr.cmd = C_TX;
+	out_cmd->hdr.sequence =
+	    cpu_to_le16((u16)
+			(QUEUE_TO_SEQ(txq_id) | IDX_TO_SEQ(q->write_ptr)));
+
+	/* Copy MAC header from skb into command buffer */
+	memcpy(tx_cmd->hdr, hdr, hdr_len);
+
+	if (info->control.hw_key)
+		il3945_build_tx_cmd_hwcrypto(il, info, out_cmd, skb, sta_id);
+
+	/* TODO need this for burst mode later on */
+	il3945_build_tx_cmd_basic(il, out_cmd, info, hdr, sta_id);
+
+	il3945_hw_build_tx_cmd_rate(il, out_cmd, info, hdr, sta_id);
+
+	/* Total # bytes to be transmitted */
+	len = (u16) skb->len;
+	tx_cmd->len = cpu_to_le16(len);
+
+	il_dbg_log_tx_data_frame(il, len, hdr);
+	il_update_stats(il, true, fc, len);
+	tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
+	tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
+
+	if (!ieee80211_has_morefrags(hdr->frame_control)) {
+		txq->need_update = 1;
+	} else {
+		wait_write_ptr = 1;
+		txq->need_update = 0;
+	}
+
+	D_TX("sequence nr = 0X%x\n", le16_to_cpu(out_cmd->hdr.sequence));
+	D_TX("tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
+	il_print_hex_dump(il, IL_DL_TX, tx_cmd, sizeof(*tx_cmd));
+	il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd->hdr,
+			  ieee80211_hdrlen(fc));
+
+	/*
+	 * Use the first empty entry in this queue's command buffer array
+	 * to contain the Tx command and MAC header concatenated together
+	 * (payload data will be in another buffer).
+	 * Size of this varies, due to varying MAC header length.
+	 * If end is not dword aligned, we'll have 2 extra bytes at the end
+	 * of the MAC header (device reads on dword boundaries).
+	 * We'll tell device about this padding later.
+	 */
+	len =
+	    sizeof(struct il3945_tx_cmd) + sizeof(struct il_cmd_header) +
+	    hdr_len;
+	len = (len + 3) & ~3;
+
+	/* Physical address of this Tx command's header (not MAC header!),
+	 * within command buffer array. */
+	txcmd_phys =
+	    pci_map_single(il->pci_dev, &out_cmd->hdr, len, PCI_DMA_TODEVICE);
+	/* we do not map meta data ... so we can safely access address to
+	 * provide to unmap command*/
+	dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
+	dma_unmap_len_set(out_meta, len, len);
+
+	/* Add buffer containing Tx command and MAC(!) header to TFD's
+	 * first entry */
+	il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, txcmd_phys, len, 1,
+						 0);
+
+	/* Set up TFD's 2nd entry to point directly to remainder of skb,
+	 * if any (802.11 null frames have no payload). */
+	len = skb->len - hdr_len;
+	if (len) {
+		phys_addr =
+		    pci_map_single(il->pci_dev, skb->data + hdr_len, len,
+				   PCI_DMA_TODEVICE);
+		il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, phys_addr,
+							 len, 0, U32_PAD(len));
+	}
+
+	/* Tell device the write idx *just past* this latest filled TFD */
+	q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
+	il_txq_update_write_ptr(il, txq);
+	spin_unlock_irqrestore(&il->lock, flags);
+
+	if (il_queue_space(q) < q->high_mark && il->mac80211_registered) {
+		if (wait_write_ptr) {
+			spin_lock_irqsave(&il->lock, flags);
+			txq->need_update = 1;
+			il_txq_update_write_ptr(il, txq);
+			spin_unlock_irqrestore(&il->lock, flags);
+		}
+
+		il_stop_queue(il, txq);
+	}
+
+	return 0;
+
+drop_unlock:
+	spin_unlock_irqrestore(&il->lock, flags);
+drop:
+	return -1;
+}
+
+static int
+il3945_get_measurement(struct il_priv *il,
+		       struct ieee80211_measurement_params *params, u8 type)
+{
+	struct il_spectrum_cmd spectrum;
+	struct il_rx_pkt *pkt;
+	struct il_host_cmd cmd = {
+		.id = C_SPECTRUM_MEASUREMENT,
+		.data = (void *)&spectrum,
+		.flags = CMD_WANT_SKB,
+	};
+	u32 add_time = le64_to_cpu(params->start_time);
+	int rc;
+	int spectrum_resp_status;
+	int duration = le16_to_cpu(params->duration);
+	struct il_rxon_context *ctx = &il->ctx;
+
+	if (il_is_associated(il))
+		add_time =
+		    il_usecs_to_beacons(il,
+					le64_to_cpu(params->start_time) -
+					il->_3945.last_tsf,
+					le16_to_cpu(ctx->timing.
+						    beacon_interval));
+
+	memset(&spectrum, 0, sizeof(spectrum));
+
+	spectrum.channel_count = cpu_to_le16(1);
+	spectrum.flags =
+	    RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK;
+	spectrum.filter_flags = MEASUREMENT_FILTER_FLAG;
+	cmd.len = sizeof(spectrum);
+	spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len));
+
+	if (il_is_associated(il))
+		spectrum.start_time =
+		    il_add_beacon_time(il, il->_3945.last_beacon_time, add_time,
+				       le16_to_cpu(ctx->timing.
+						   beacon_interval));
+	else
+		spectrum.start_time = 0;
+
+	spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT);
+	spectrum.channels[0].channel = params->channel;
+	spectrum.channels[0].type = type;
+	if (ctx->active.flags & RXON_FLG_BAND_24G_MSK)
+		spectrum.flags |=
+		    RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK |
+		    RXON_FLG_TGG_PROTECT_MSK;
+
+	rc = il_send_cmd_sync(il, &cmd);
+	if (rc)
+		return rc;
+
+	pkt = (struct il_rx_pkt *)cmd.reply_page;
+	if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
+		IL_ERR("Bad return from N_RX_ON_ASSOC command\n");
+		rc = -EIO;
+	}
+
+	spectrum_resp_status = le16_to_cpu(pkt->u.spectrum.status);
+	switch (spectrum_resp_status) {
+	case 0:		/* Command will be handled */
+		if (pkt->u.spectrum.id != 0xff) {
+			D_INFO("Replaced existing measurement: %d\n",
+			       pkt->u.spectrum.id);
+			il->measurement_status &= ~MEASUREMENT_READY;
+		}
+		il->measurement_status |= MEASUREMENT_ACTIVE;
+		rc = 0;
+		break;
+
+	case 1:		/* Command will not be handled */
+		rc = -EAGAIN;
+		break;
+	}
+
+	il_free_pages(il, cmd.reply_page);
+
+	return rc;
+}
+
+static void
+il3945_hdl_alive(struct il_priv *il, struct il_rx_buf *rxb)
+{
+	struct il_rx_pkt *pkt = rxb_addr(rxb);
+	struct il_alive_resp *palive;
+	struct delayed_work *pwork;
+
+	palive = &pkt->u.alive_frame;
+
+	D_INFO("Alive ucode status 0x%08X revision " "0x%01X 0x%01X\n",
+	       palive->is_valid, palive->ver_type, palive->ver_subtype);
+
+	if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
+		D_INFO("Initialization Alive received.\n");
+		memcpy(&il->card_alive_init, &pkt->u.alive_frame,
+		       sizeof(struct il_alive_resp));
+		pwork = &il->init_alive_start;
+	} else {
+		D_INFO("Runtime Alive received.\n");
+		memcpy(&il->card_alive, &pkt->u.alive_frame,
+		       sizeof(struct il_alive_resp));
+		pwork = &il->alive_start;
+		il3945_disable_events(il);
+	}
+
+	/* We delay the ALIVE response by 5ms to
+	 * give the HW RF Kill time to activate... */
+	if (palive->is_valid == UCODE_VALID_OK)
+		queue_delayed_work(il->workqueue, pwork, msecs_to_jiffies(5));
+	else
+		IL_WARN("uCode did not respond OK.\n");
+}
+
+static void
+il3945_hdl_add_sta(struct il_priv *il, struct il_rx_buf *rxb)
+{
+#ifdef CONFIG_IWLEGACY_DEBUG
+	struct il_rx_pkt *pkt = rxb_addr(rxb);
+#endif
+
+	D_RX("Received C_ADD_STA: 0x%02X\n", pkt->u.status);
+}
+
+static void
+il3945_hdl_beacon(struct il_priv *il, struct il_rx_buf *rxb)
+{
+	struct il_rx_pkt *pkt = rxb_addr(rxb);
+	struct il3945_beacon_notif *beacon = &(pkt->u.beacon_status);
+#ifdef CONFIG_IWLEGACY_DEBUG
+	u8 rate = beacon->beacon_notify_hdr.rate;
+
+	D_RX("beacon status %x retries %d iss %d " "tsf %d %d rate %d\n",
+	     le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK,
+	     beacon->beacon_notify_hdr.failure_frame,
+	     le32_to_cpu(beacon->ibss_mgr_status),
+	     le32_to_cpu(beacon->high_tsf), le32_to_cpu(beacon->low_tsf), rate);
+#endif
+
+	il->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
+
+}
+
+/* Handle notification from uCode that card's power state is changing
+ * due to software, hardware, or critical temperature RFKILL */
+static void
+il3945_hdl_card_state(struct il_priv *il, struct il_rx_buf *rxb)
+{
+	struct il_rx_pkt *pkt = rxb_addr(rxb);
+	u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
+	unsigned long status = il->status;
+
+	IL_WARN("Card state received: HW:%s SW:%s\n",
+		(flags & HW_CARD_DISABLED) ? "Kill" : "On",
+		(flags & SW_CARD_DISABLED) ? "Kill" : "On");
+
+	_il_wr(il, CSR_UCODE_DRV_GP1_SET, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+
+	if (flags & HW_CARD_DISABLED)
+		set_bit(S_RF_KILL_HW, &il->status);
+	else
+		clear_bit(S_RF_KILL_HW, &il->status);
+
+	il_scan_cancel(il);
+
+	if ((test_bit(S_RF_KILL_HW, &status) !=
+	     test_bit(S_RF_KILL_HW, &il->status)))
+		wiphy_rfkill_set_hw_state(il->hw->wiphy,
+					  test_bit(S_RF_KILL_HW, &il->status));
+	else
+		wake_up(&il->wait_command_queue);
+}
+
+/**
+ * il3945_setup_handlers - Initialize Rx handler callbacks
+ *
+ * Setup the RX handlers for each of the reply types sent from the uCode
+ * to the host.
+ *
+ * This function chains into the hardware specific files for them to setup
+ * any hardware specific handlers as well.
+ */
+static void
+il3945_setup_handlers(struct il_priv *il)
+{
+	il->handlers[N_ALIVE] = il3945_hdl_alive;
+	il->handlers[C_ADD_STA] = il3945_hdl_add_sta;
+	il->handlers[N_ERROR] = il_hdl_error;
+	il->handlers[N_CHANNEL_SWITCH] = il_hdl_csa;
+	il->handlers[N_SPECTRUM_MEASUREMENT] = il_hdl_spectrum_measurement;
+	il->handlers[N_PM_SLEEP] = il_hdl_pm_sleep;
+	il->handlers[N_PM_DEBUG_STATS] = il_hdl_pm_debug_stats;
+	il->handlers[N_BEACON] = il3945_hdl_beacon;
+
+	/*
+	 * The same handler is used for both the REPLY to a discrete
+	 * stats request from the host as well as for the periodic
+	 * stats notifications (after received beacons) from the uCode.
+	 */
+	il->handlers[C_STATS] = il3945_hdl_c_stats;
+	il->handlers[N_STATS] = il3945_hdl_stats;
+
+	il_setup_rx_scan_handlers(il);
+	il->handlers[N_CARD_STATE] = il3945_hdl_card_state;
+
+	/* Set up hardware specific Rx handlers */
+	il3945_hw_handler_setup(il);
+}
+
+/************************** RX-FUNCTIONS ****************************/
+/*
+ * Rx theory of operation
+ *
+ * The host allocates 32 DMA target addresses and passes the host address
+ * to the firmware at register IL_RFDS_TBL_LOWER + N * RFD_SIZE where N is
+ * 0 to 31
+ *
+ * Rx Queue Indexes
+ * The host/firmware share two idx registers for managing the Rx buffers.
+ *
+ * The READ idx maps to the first position that the firmware may be writing
+ * to -- the driver can read up to (but not including) this position and get
+ * good data.
+ * The READ idx is managed by the firmware once the card is enabled.
+ *
+ * The WRITE idx maps to the last position the driver has read from -- the
+ * position preceding WRITE is the last slot the firmware can place a packet.
+ *
+ * The queue is empty (no good data) if WRITE = READ - 1, and is full if
+ * WRITE = READ.
+ *
+ * During initialization, the host sets up the READ queue position to the first
+ * IDX position, and WRITE to the last (READ - 1 wrapped)
+ *
+ * When the firmware places a packet in a buffer, it will advance the READ idx
+ * and fire the RX interrupt.  The driver can then query the READ idx and
+ * process as many packets as possible, moving the WRITE idx forward as it
+ * resets the Rx queue buffers with new memory.
+ *
+ * The management in the driver is as follows:
+ * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free.  When
+ *   iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
+ *   to replenish the iwl->rxq->rx_free.
+ * + In il3945_rx_replenish (scheduled) if 'processed' != 'read' then the
+ *   iwl->rxq is replenished and the READ IDX is updated (updating the
+ *   'processed' and 'read' driver idxes as well)
+ * + A received packet is processed and handed to the kernel network stack,
+ *   detached from the iwl->rxq.  The driver 'processed' idx is updated.
+ * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
+ *   list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
+ *   IDX is not incremented and iwl->status(RX_STALLED) is set.  If there
+ *   were enough free buffers and RX_STALLED is set it is cleared.
+ *
+ *
+ * Driver sequence:
+ *
+ * il3945_rx_replenish()     Replenishes rx_free list from rx_used, and calls
+ *                            il3945_rx_queue_restock
+ * il3945_rx_queue_restock() Moves available buffers from rx_free into Rx
+ *                            queue, updates firmware pointers, and updates
+ *                            the WRITE idx.  If insufficient rx_free buffers
+ *                            are available, schedules il3945_rx_replenish
+ *
+ * -- enable interrupts --
+ * ISR - il3945_rx()         Detach il_rx_bufs from pool up to the
+ *                            READ IDX, detaching the SKB from the pool.
+ *                            Moves the packet buffer from queue to rx_used.
+ *                            Calls il3945_rx_queue_restock to refill any empty
+ *                            slots.
+ * ...
+ *
+ */
+
+/**
+ * il3945_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
+ */
+static inline __le32
+il3945_dma_addr2rbd_ptr(struct il_priv *il, dma_addr_t dma_addr)
+{
+	return cpu_to_le32((u32) dma_addr);
+}
+
+/**
+ * il3945_rx_queue_restock - refill RX queue from pre-allocated pool
+ *
+ * If there are slots in the RX queue that need to be restocked,
+ * and we have free pre-allocated buffers, fill the ranks as much
+ * as we can, pulling from rx_free.
+ *
+ * This moves the 'write' idx forward to catch up with 'processed', and
+ * also updates the memory address in the firmware to reference the new
+ * target buffer.
+ */
+static void
+il3945_rx_queue_restock(struct il_priv *il)
+{
+	struct il_rx_queue *rxq = &il->rxq;
+	struct list_head *element;
+	struct il_rx_buf *rxb;
+	unsigned long flags;
+	int write;
+
+	spin_lock_irqsave(&rxq->lock, flags);
+	write = rxq->write & ~0x7;
+	while (il_rx_queue_space(rxq) > 0 && rxq->free_count) {
+		/* Get next free Rx buffer, remove from free list */
+		element = rxq->rx_free.next;
+		rxb = list_entry(element, struct il_rx_buf, list);
+		list_del(element);
+
+		/* Point to Rx buffer via next RBD in circular buffer */
+		rxq->bd[rxq->write] =
+		    il3945_dma_addr2rbd_ptr(il, rxb->page_dma);
+		rxq->queue[rxq->write] = rxb;
+		rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
+		rxq->free_count--;
+	}
+	spin_unlock_irqrestore(&rxq->lock, flags);
+	/* If the pre-allocated buffer pool is dropping low, schedule to
+	 * refill it */
+	if (rxq->free_count <= RX_LOW_WATERMARK)
+		queue_work(il->workqueue, &il->rx_replenish);
+
+	/* If we've added more space for the firmware to place data, tell it.
+	 * Increment device's write pointer in multiples of 8. */
+	if (rxq->write_actual != (rxq->write & ~0x7) ||
+	    abs(rxq->write - rxq->read) > 7) {
+		spin_lock_irqsave(&rxq->lock, flags);
+		rxq->need_update = 1;
+		spin_unlock_irqrestore(&rxq->lock, flags);
+		il_rx_queue_update_write_ptr(il, rxq);
+	}
+}
+
+/**
+ * il3945_rx_replenish - Move all used packet from rx_used to rx_free
+ *
+ * When moving to rx_free an SKB is allocated for the slot.
+ *
+ * Also restock the Rx queue via il3945_rx_queue_restock.
+ * This is called as a scheduled work item (except for during initialization)
+ */
+static void
+il3945_rx_allocate(struct il_priv *il, gfp_t priority)
+{
+	struct il_rx_queue *rxq = &il->rxq;
+	struct list_head *element;
+	struct il_rx_buf *rxb;
+	struct page *page;
+	unsigned long flags;
+	gfp_t gfp_mask = priority;
+
+	while (1) {
+		spin_lock_irqsave(&rxq->lock, flags);
+
+		if (list_empty(&rxq->rx_used)) {
+			spin_unlock_irqrestore(&rxq->lock, flags);
+			return;
+		}
+		spin_unlock_irqrestore(&rxq->lock, flags);
+
+		if (rxq->free_count > RX_LOW_WATERMARK)
+			gfp_mask |= __GFP_NOWARN;
+
+		if (il->hw_params.rx_page_order > 0)
+			gfp_mask |= __GFP_COMP;
+
+		/* Alloc a new receive buffer */
+		page = alloc_pages(gfp_mask, il->hw_params.rx_page_order);
+		if (!page) {
+			if (net_ratelimit())
+				D_INFO("Failed to allocate SKB buffer.\n");
+			if (rxq->free_count <= RX_LOW_WATERMARK &&
+			    net_ratelimit())
+				IL_ERR("Failed to allocate SKB buffer with %0x."
+				       "Only %u free buffers remaining.\n",
+				       priority, rxq->free_count);
+			/* We don't reschedule replenish work here -- we will
+			 * call the restock method and if it still needs
+			 * more buffers it will schedule replenish */
+			break;
+		}
+
+		spin_lock_irqsave(&rxq->lock, flags);
+		if (list_empty(&rxq->rx_used)) {
+			spin_unlock_irqrestore(&rxq->lock, flags);
+			__free_pages(page, il->hw_params.rx_page_order);
+			return;
+		}
+		element = rxq->rx_used.next;
+		rxb = list_entry(element, struct il_rx_buf, list);
+		list_del(element);
+		spin_unlock_irqrestore(&rxq->lock, flags);
+
+		rxb->page = page;
+		/* Get physical address of RB/SKB */
+		rxb->page_dma =
+		    pci_map_page(il->pci_dev, page, 0,
+				 PAGE_SIZE << il->hw_params.rx_page_order,
+				 PCI_DMA_FROMDEVICE);
+
+		spin_lock_irqsave(&rxq->lock, flags);
+
+		list_add_tail(&rxb->list, &rxq->rx_free);
+		rxq->free_count++;
+		il->alloc_rxb_page++;
+
+		spin_unlock_irqrestore(&rxq->lock, flags);
+	}
+}
+
+void
+il3945_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq)
+{
+	unsigned long flags;
+	int i;
+	spin_lock_irqsave(&rxq->lock, flags);
+	INIT_LIST_HEAD(&rxq->rx_free);
+	INIT_LIST_HEAD(&rxq->rx_used);
+	/* Fill the rx_used queue with _all_ of the Rx buffers */
+	for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
+		/* In the reset function, these buffers may have been allocated
+		 * to an SKB, so we need to unmap and free potential storage */
+		if (rxq->pool[i].page != NULL) {
+			pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
+				       PAGE_SIZE << il->hw_params.rx_page_order,
+				       PCI_DMA_FROMDEVICE);
+			__il_free_pages(il, rxq->pool[i].page);
+			rxq->pool[i].page = NULL;
+		}
+		list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
+	}
+
+	/* Set us so that we have processed and used all buffers, but have
+	 * not restocked the Rx queue with fresh buffers */
+	rxq->read = rxq->write = 0;
+	rxq->write_actual = 0;
+	rxq->free_count = 0;
+	spin_unlock_irqrestore(&rxq->lock, flags);
+}
+
+void
+il3945_rx_replenish(void *data)
+{
+	struct il_priv *il = data;
+	unsigned long flags;
+
+	il3945_rx_allocate(il, GFP_KERNEL);
+
+	spin_lock_irqsave(&il->lock, flags);
+	il3945_rx_queue_restock(il);
+	spin_unlock_irqrestore(&il->lock, flags);
+}
+
+static void
+il3945_rx_replenish_now(struct il_priv *il)
+{
+	il3945_rx_allocate(il, GFP_ATOMIC);
+
+	il3945_rx_queue_restock(il);
+}
+
+/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
+ * If an SKB has been detached, the POOL needs to have its SKB set to NULL
+ * This free routine walks the list of POOL entries and if SKB is set to
+ * non NULL it is unmapped and freed
+ */
+static void
+il3945_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq)
+{
+	int i;
+	for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
+		if (rxq->pool[i].page != NULL) {
+			pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
+				       PAGE_SIZE << il->hw_params.rx_page_order,
+				       PCI_DMA_FROMDEVICE);
+			__il_free_pages(il, rxq->pool[i].page);
+			rxq->pool[i].page = NULL;
+		}
+	}
+
+	dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
+			  rxq->bd_dma);
+	dma_free_coherent(&il->pci_dev->dev, sizeof(struct il_rb_status),
+			  rxq->rb_stts, rxq->rb_stts_dma);
+	rxq->bd = NULL;
+	rxq->rb_stts = NULL;
+}
+
+/* Convert linear signal-to-noise ratio into dB */
+static u8 ratio2dB[100] = {
+/*	 0   1   2   3   4   5   6   7   8   9 */
+	0, 0, 6, 10, 12, 14, 16, 17, 18, 19,	/* 00 - 09 */
+	20, 21, 22, 22, 23, 23, 24, 25, 26, 26,	/* 10 - 19 */
+	26, 26, 26, 27, 27, 28, 28, 28, 29, 29,	/* 20 - 29 */
+	29, 30, 30, 30, 31, 31, 31, 31, 32, 32,	/* 30 - 39 */
+	32, 32, 32, 33, 33, 33, 33, 33, 34, 34,	/* 40 - 49 */
+	34, 34, 34, 34, 35, 35, 35, 35, 35, 35,	/* 50 - 59 */
+	36, 36, 36, 36, 36, 36, 36, 37, 37, 37,	/* 60 - 69 */
+	37, 37, 37, 37, 37, 38, 38, 38, 38, 38,	/* 70 - 79 */
+	38, 38, 38, 38, 38, 39, 39, 39, 39, 39,	/* 80 - 89 */
+	39, 39, 39, 39, 39, 40, 40, 40, 40, 40	/* 90 - 99 */
+};
+
+/* Calculates a relative dB value from a ratio of linear
+ *   (i.e. not dB) signal levels.
+ * Conversion assumes that levels are voltages (20*log), not powers (10*log). */
+int
+il3945_calc_db_from_ratio(int sig_ratio)
+{
+	/* 1000:1 or higher just report as 60 dB */
+	if (sig_ratio >= 1000)
+		return 60;
+
+	/* 100:1 or higher, divide by 10 and use table,
+	 *   add 20 dB to make up for divide by 10 */
+	if (sig_ratio >= 100)
+		return 20 + (int)ratio2dB[sig_ratio / 10];
+
+	/* We shouldn't see this */
+	if (sig_ratio < 1)
+		return 0;
+
+	/* Use table for ratios 1:1 - 99:1 */
+	return (int)ratio2dB[sig_ratio];
+}
+
+/**
+ * il3945_rx_handle - Main entry function for receiving responses from uCode
+ *
+ * Uses the il->handlers callback function array to invoke
+ * the appropriate handlers, including command responses,
+ * frame-received notifications, and other notifications.
+ */
+static void
+il3945_rx_handle(struct il_priv *il)
+{
+	struct il_rx_buf *rxb;
+	struct il_rx_pkt *pkt;
+	struct il_rx_queue *rxq = &il->rxq;
+	u32 r, i;
+	int reclaim;
+	unsigned long flags;
+	u8 fill_rx = 0;
+	u32 count = 8;
+	int total_empty = 0;
+
+	/* uCode's read idx (stored in shared DRAM) indicates the last Rx
+	 * buffer that the driver may process (last buffer filled by ucode). */
+	r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
+	i = rxq->read;
+
+	/* calculate total frames need to be restock after handling RX */
+	total_empty = r - rxq->write_actual;
+	if (total_empty < 0)
+		total_empty += RX_QUEUE_SIZE;
+
+	if (total_empty > (RX_QUEUE_SIZE / 2))
+		fill_rx = 1;
+	/* Rx interrupt, but nothing sent from uCode */
+	if (i == r)
+		D_RX("r = %d, i = %d\n", r, i);
+
+	while (i != r) {
+		int len;
+
+		rxb = rxq->queue[i];
+
+		/* If an RXB doesn't have a Rx queue slot associated with it,
+		 * then a bug has been introduced in the queue refilling
+		 * routines -- catch it here */
+		BUG_ON(rxb == NULL);
+
+		rxq->queue[i] = NULL;
+
+		pci_unmap_page(il->pci_dev, rxb->page_dma,
+			       PAGE_SIZE << il->hw_params.rx_page_order,
+			       PCI_DMA_FROMDEVICE);
+		pkt = rxb_addr(rxb);
+
+		len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
+		len += sizeof(u32);	/* account for status word */
+
+		/* Reclaim a command buffer only if this packet is a response
+		 *   to a (driver-originated) command.
+		 * If the packet (e.g. Rx frame) originated from uCode,
+		 *   there is no command buffer to reclaim.
+		 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
+		 *   but apparently a few don't get set; catch them here. */
+		reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
+		    pkt->hdr.cmd != N_STATS && pkt->hdr.cmd != C_TX;
+
+		/* Based on type of command response or notification,
+		 *   handle those that need handling via function in
+		 *   handlers table.  See il3945_setup_handlers() */
+		if (il->handlers[pkt->hdr.cmd]) {
+			D_RX("r = %d, i = %d, %s, 0x%02x\n", r, i,
+			     il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
+			il->isr_stats.handlers[pkt->hdr.cmd]++;
+			il->handlers[pkt->hdr.cmd] (il, rxb);
+		} else {
+			/* No handling needed */
+			D_RX("r %d i %d No handler needed for %s, 0x%02x\n", r,
+			     i, il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
+		}
+
+		/*
+		 * XXX: After here, we should always check rxb->page
+		 * against NULL before touching it or its virtual
+		 * memory (pkt). Because some handler might have
+		 * already taken or freed the pages.
+		 */
+
+		if (reclaim) {
+			/* Invoke any callbacks, transfer the buffer to caller,
+			 * and fire off the (possibly) blocking il_send_cmd()
+			 * as we reclaim the driver command queue */
+			if (rxb->page)
+				il_tx_cmd_complete(il, rxb);
+			else
+				IL_WARN("Claim null rxb?\n");
+		}
+
+		/* Reuse the page if possible. For notification packets and
+		 * SKBs that fail to Rx correctly, add them back into the
+		 * rx_free list for reuse later. */
+		spin_lock_irqsave(&rxq->lock, flags);
+		if (rxb->page != NULL) {
+			rxb->page_dma =
+			    pci_map_page(il->pci_dev, rxb->page, 0,
+					 PAGE_SIZE << il->hw_params.
+					 rx_page_order, PCI_DMA_FROMDEVICE);
+			list_add_tail(&rxb->list, &rxq->rx_free);
+			rxq->free_count++;
+		} else
+			list_add_tail(&rxb->list, &rxq->rx_used);
+
+		spin_unlock_irqrestore(&rxq->lock, flags);
+
+		i = (i + 1) & RX_QUEUE_MASK;
+		/* If there are a lot of unused frames,
+		 * restock the Rx queue so ucode won't assert. */
+		if (fill_rx) {
+			count++;
+			if (count >= 8) {
+				rxq->read = i;
+				il3945_rx_replenish_now(il);
+				count = 0;
+			}
+		}
+	}
+
+	/* Backtrack one entry */
+	rxq->read = i;
+	if (fill_rx)
+		il3945_rx_replenish_now(il);
+	else
+		il3945_rx_queue_restock(il);
+}
+
+/* call this function to flush any scheduled tasklet */
+static inline void
+il3945_synchronize_irq(struct il_priv *il)
+{
+	/* wait to make sure we flush pending tasklet */
+	synchronize_irq(il->pci_dev->irq);
+	tasklet_kill(&il->irq_tasklet);
+}
+
+static const char *
+il3945_desc_lookup(int i)
+{
+	switch (i) {
+	case 1:
+		return "FAIL";
+	case 2:
+		return "BAD_PARAM";
+	case 3:
+		return "BAD_CHECKSUM";
+	case 4:
+		return "NMI_INTERRUPT";
+	case 5:
+		return "SYSASSERT";
+	case 6:
+		return "FATAL_ERROR";
+	}
+
+	return "UNKNOWN";
+}
+
+#define ERROR_START_OFFSET  (1 * sizeof(u32))
+#define ERROR_ELEM_SIZE     (7 * sizeof(u32))
+
+void
+il3945_dump_nic_error_log(struct il_priv *il)
+{
+	u32 i;
+	u32 desc, time, count, base, data1;
+	u32 blink1, blink2, ilink1, ilink2;
+
+	base = le32_to_cpu(il->card_alive.error_event_table_ptr);
+
+	if (!il3945_hw_valid_rtc_data_addr(base)) {
+		IL_ERR("Not valid error log pointer 0x%08X\n", base);
+		return;
+	}
+
+	count = il_read_targ_mem(il, base);
+
+	if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
+		IL_ERR("Start IWL Error Log Dump:\n");
+		IL_ERR("Status: 0x%08lX, count: %d\n", il->status, count);
+	}
+
+	IL_ERR("Desc       Time       asrtPC  blink2 "
+	       "ilink1  nmiPC   Line\n");
+	for (i = ERROR_START_OFFSET;
+	     i < (count * ERROR_ELEM_SIZE) + ERROR_START_OFFSET;
+	     i += ERROR_ELEM_SIZE) {
+		desc = il_read_targ_mem(il, base + i);
+		time = il_read_targ_mem(il, base + i + 1 * sizeof(u32));
+		blink1 = il_read_targ_mem(il, base + i + 2 * sizeof(u32));
+		blink2 = il_read_targ_mem(il, base + i + 3 * sizeof(u32));
+		ilink1 = il_read_targ_mem(il, base + i + 4 * sizeof(u32));
+		ilink2 = il_read_targ_mem(il, base + i + 5 * sizeof(u32));
+		data1 = il_read_targ_mem(il, base + i + 6 * sizeof(u32));
+
+		IL_ERR("%-13s (0x%X) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n",
+		       il3945_desc_lookup(desc), desc, time, blink1, blink2,
+		       ilink1, ilink2, data1);
+	}
+}
+
+static void
+il3945_irq_tasklet(struct il_priv *il)
+{
+	u32 inta, handled = 0;
+	u32 inta_fh;
+	unsigned long flags;
+#ifdef CONFIG_IWLEGACY_DEBUG
+	u32 inta_mask;
+#endif
+
+	spin_lock_irqsave(&il->lock, flags);
+
+	/* Ack/clear/reset pending uCode interrupts.
+	 * Note:  Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
+	 *  and will clear only when CSR_FH_INT_STATUS gets cleared. */
+	inta = _il_rd(il, CSR_INT);
+	_il_wr(il, CSR_INT, inta);
+
+	/* Ack/clear/reset pending flow-handler (DMA) interrupts.
+	 * Any new interrupts that happen after this, either while we're
+	 * in this tasklet, or later, will show up in next ISR/tasklet. */
+	inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
+	_il_wr(il, CSR_FH_INT_STATUS, inta_fh);
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+	if (il_get_debug_level(il) & IL_DL_ISR) {
+		/* just for debug */
+		inta_mask = _il_rd(il, CSR_INT_MASK);
+		D_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta,
+		      inta_mask, inta_fh);
+	}
+#endif
+
+	spin_unlock_irqrestore(&il->lock, flags);
+
+	/* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
+	 * atomic, make sure that inta covers all the interrupts that
+	 * we've discovered, even if FH interrupt came in just after
+	 * reading CSR_INT. */
+	if (inta_fh & CSR39_FH_INT_RX_MASK)
+		inta |= CSR_INT_BIT_FH_RX;
+	if (inta_fh & CSR39_FH_INT_TX_MASK)
+		inta |= CSR_INT_BIT_FH_TX;
+
+	/* Now service all interrupt bits discovered above. */
+	if (inta & CSR_INT_BIT_HW_ERR) {
+		IL_ERR("Hardware error detected.  Restarting.\n");
+
+		/* Tell the device to stop sending interrupts */
+		il_disable_interrupts(il);
+
+		il->isr_stats.hw++;
+		il_irq_handle_error(il);
+
+		handled |= CSR_INT_BIT_HW_ERR;
+
+		return;
+	}
+#ifdef CONFIG_IWLEGACY_DEBUG
+	if (il_get_debug_level(il) & (IL_DL_ISR)) {
+		/* NIC fires this, but we don't use it, redundant with WAKEUP */
+		if (inta & CSR_INT_BIT_SCD) {
+			D_ISR("Scheduler finished to transmit "
+			      "the frame/frames.\n");
+			il->isr_stats.sch++;
+		}
+
+		/* Alive notification via Rx interrupt will do the real work */
+		if (inta & CSR_INT_BIT_ALIVE) {
+			D_ISR("Alive interrupt\n");
+			il->isr_stats.alive++;
+		}
+	}
+#endif
+	/* Safely ignore these bits for debug checks below */
+	inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
+
+	/* Error detected by uCode */
+	if (inta & CSR_INT_BIT_SW_ERR) {
+		IL_ERR("Microcode SW error detected. " "Restarting 0x%X.\n",
+		       inta);
+		il->isr_stats.sw++;
+		il_irq_handle_error(il);
+		handled |= CSR_INT_BIT_SW_ERR;
+	}
+
+	/* uCode wakes up after power-down sleep */
+	if (inta & CSR_INT_BIT_WAKEUP) {
+		D_ISR("Wakeup interrupt\n");
+		il_rx_queue_update_write_ptr(il, &il->rxq);
+		il_txq_update_write_ptr(il, &il->txq[0]);
+		il_txq_update_write_ptr(il, &il->txq[1]);
+		il_txq_update_write_ptr(il, &il->txq[2]);
+		il_txq_update_write_ptr(il, &il->txq[3]);
+		il_txq_update_write_ptr(il, &il->txq[4]);
+		il_txq_update_write_ptr(il, &il->txq[5]);
+
+		il->isr_stats.wakeup++;
+		handled |= CSR_INT_BIT_WAKEUP;
+	}
+
+	/* All uCode command responses, including Tx command responses,
+	 * Rx "responses" (frame-received notification), and other
+	 * notifications from uCode come through here*/
+	if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
+		il3945_rx_handle(il);
+		il->isr_stats.rx++;
+		handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
+	}
+
+	if (inta & CSR_INT_BIT_FH_TX) {
+		D_ISR("Tx interrupt\n");
+		il->isr_stats.tx++;
+
+		_il_wr(il, CSR_FH_INT_STATUS, (1 << 6));
+		il_wr(il, FH39_TCSR_CREDIT(FH39_SRVC_CHNL), 0x0);
+		handled |= CSR_INT_BIT_FH_TX;
+	}
+
+	if (inta & ~handled) {
+		IL_ERR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
+		il->isr_stats.unhandled++;
+	}
+
+	if (inta & ~il->inta_mask) {
+		IL_WARN("Disabled INTA bits 0x%08x were pending\n",
+			inta & ~il->inta_mask);
+		IL_WARN("   with inta_fh = 0x%08x\n", inta_fh);
+	}
+
+	/* Re-enable all interrupts */
+	/* only Re-enable if disabled by irq */
+	if (test_bit(S_INT_ENABLED, &il->status))
+		il_enable_interrupts(il);
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+	if (il_get_debug_level(il) & (IL_DL_ISR)) {
+		inta = _il_rd(il, CSR_INT);
+		inta_mask = _il_rd(il, CSR_INT_MASK);
+		inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
+		D_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
+		      "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
+	}
+#endif
+}
+
+static int
+il3945_get_channels_for_scan(struct il_priv *il, enum ieee80211_band band,
+			     u8 is_active, u8 n_probes,
+			     struct il3945_scan_channel *scan_ch,
+			     struct ieee80211_vif *vif)
+{
+	struct ieee80211_channel *chan;
+	const struct ieee80211_supported_band *sband;
+	const struct il_channel_info *ch_info;
+	u16 passive_dwell = 0;
+	u16 active_dwell = 0;
+	int added, i;
+
+	sband = il_get_hw_mode(il, band);
+	if (!sband)
+		return 0;
+
+	active_dwell = il_get_active_dwell_time(il, band, n_probes);
+	passive_dwell = il_get_passive_dwell_time(il, band, vif);
+
+	if (passive_dwell <= active_dwell)
+		passive_dwell = active_dwell + 1;
+
+	for (i = 0, added = 0; i < il->scan_request->n_channels; i++) {
+		chan = il->scan_request->channels[i];
+
+		if (chan->band != band)
+			continue;
+
+		scan_ch->channel = chan->hw_value;
+
+		ch_info = il_get_channel_info(il, band, scan_ch->channel);
+		if (!il_is_channel_valid(ch_info)) {
+			D_SCAN("Channel %d is INVALID for this band.\n",
+			       scan_ch->channel);
+			continue;
+		}
+
+		scan_ch->active_dwell = cpu_to_le16(active_dwell);
+		scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
+		/* If passive , set up for auto-switch
+		 *  and use long active_dwell time.
+		 */
+		if (!is_active || il_is_channel_passive(ch_info) ||
+		    (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)) {
+			scan_ch->type = 0;	/* passive */
+			if (IL_UCODE_API(il->ucode_ver) == 1)
+				scan_ch->active_dwell =
+				    cpu_to_le16(passive_dwell - 1);
+		} else {
+			scan_ch->type = 1;	/* active */
+		}
+
+		/* Set direct probe bits. These may be used both for active
+		 * scan channels (probes gets sent right away),
+		 * or for passive channels (probes get se sent only after
+		 * hearing clear Rx packet).*/
+		if (IL_UCODE_API(il->ucode_ver) >= 2) {
+			if (n_probes)
+				scan_ch->type |= IL39_SCAN_PROBE_MASK(n_probes);
+		} else {
+			/* uCode v1 does not allow setting direct probe bits on
+			 * passive channel. */
+			if ((scan_ch->type & 1) && n_probes)
+				scan_ch->type |= IL39_SCAN_PROBE_MASK(n_probes);
+		}
+
+		/* Set txpower levels to defaults */
+		scan_ch->tpc.dsp_atten = 110;
+		/* scan_pwr_info->tpc.dsp_atten; */
+
+		/*scan_pwr_info->tpc.tx_gain; */
+		if (band == IEEE80211_BAND_5GHZ)
+			scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3;
+		else {
+			scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3));
+			/* NOTE: if we were doing 6Mb OFDM for scans we'd use
+			 * power level:
+			 * scan_ch->tpc.tx_gain = ((1 << 5) | (2 << 3)) | 3;
+			 */
+		}
+
+		D_SCAN("Scanning %d [%s %d]\n", scan_ch->channel,
+		       (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE",
+		       (scan_ch->type & 1) ? active_dwell : passive_dwell);
+
+		scan_ch++;
+		added++;
+	}
+
+	D_SCAN("total channels to scan %d\n", added);
+	return added;
+}
+
+static void
+il3945_init_hw_rates(struct il_priv *il, struct ieee80211_rate *rates)
+{
+	int i;
+
+	for (i = 0; i < RATE_COUNT_LEGACY; i++) {
+		rates[i].bitrate = il3945_rates[i].ieee * 5;
+		rates[i].hw_value = i;	/* Rate scaling will work on idxes */
+		rates[i].hw_value_short = i;
+		rates[i].flags = 0;
+		if (i > IL39_LAST_OFDM_RATE || i < IL_FIRST_OFDM_RATE) {
+			/*
+			 * If CCK != 1M then set short preamble rate flag.
+			 */
+			rates[i].flags |=
+			    (il3945_rates[i].plcp ==
+			     10) ? 0 : IEEE80211_RATE_SHORT_PREAMBLE;
+		}
+	}
+}
+
+/******************************************************************************
+ *
+ * uCode download functions
+ *
+ ******************************************************************************/
+
+static void
+il3945_dealloc_ucode_pci(struct il_priv *il)
+{
+	il_free_fw_desc(il->pci_dev, &il->ucode_code);
+	il_free_fw_desc(il->pci_dev, &il->ucode_data);
+	il_free_fw_desc(il->pci_dev, &il->ucode_data_backup);
+	il_free_fw_desc(il->pci_dev, &il->ucode_init);
+	il_free_fw_desc(il->pci_dev, &il->ucode_init_data);
+	il_free_fw_desc(il->pci_dev, &il->ucode_boot);
+}
+
+/**
+ * il3945_verify_inst_full - verify runtime uCode image in card vs. host,
+ *     looking at all data.
+ */
+static int
+il3945_verify_inst_full(struct il_priv *il, __le32 * image, u32 len)
+{
+	u32 val;
+	u32 save_len = len;
+	int rc = 0;
+	u32 errcnt;
+
+	D_INFO("ucode inst image size is %u\n", len);
+
+	il_wr(il, HBUS_TARG_MEM_RADDR, IL39_RTC_INST_LOWER_BOUND);
+
+	errcnt = 0;
+	for (; len > 0; len -= sizeof(u32), image++) {
+		/* read data comes through single port, auto-incr addr */
+		/* NOTE: Use the debugless read so we don't flood kernel log
+		 * if IL_DL_IO is set */
+		val = _il_rd(il, HBUS_TARG_MEM_RDAT);
+		if (val != le32_to_cpu(*image)) {
+			IL_ERR("uCode INST section is invalid at "
+			       "offset 0x%x, is 0x%x, s/b 0x%x\n",
+			       save_len - len, val, le32_to_cpu(*image));
+			rc = -EIO;
+			errcnt++;
+			if (errcnt >= 20)
+				break;
+		}
+	}
+
+	if (!errcnt)
+		D_INFO("ucode image in INSTRUCTION memory is good\n");
+
+	return rc;
+}
+
+/**
+ * il3945_verify_inst_sparse - verify runtime uCode image in card vs. host,
+ *   using sample data 100 bytes apart.  If these sample points are good,
+ *   it's a pretty good bet that everything between them is good, too.
+ */
+static int
+il3945_verify_inst_sparse(struct il_priv *il, __le32 * image, u32 len)
+{
+	u32 val;
+	int rc = 0;
+	u32 errcnt = 0;
+	u32 i;
+
+	D_INFO("ucode inst image size is %u\n", len);
+
+	for (i = 0; i < len; i += 100, image += 100 / sizeof(u32)) {
+		/* read data comes through single port, auto-incr addr */
+		/* NOTE: Use the debugless read so we don't flood kernel log
+		 * if IL_DL_IO is set */
+		il_wr(il, HBUS_TARG_MEM_RADDR, i + IL39_RTC_INST_LOWER_BOUND);
+		val = _il_rd(il, HBUS_TARG_MEM_RDAT);
+		if (val != le32_to_cpu(*image)) {
+#if 0				/* Enable this if you want to see details */
+			IL_ERR("uCode INST section is invalid at "
+			       "offset 0x%x, is 0x%x, s/b 0x%x\n", i, val,
+			       *image);
+#endif
+			rc = -EIO;
+			errcnt++;
+			if (errcnt >= 3)
+				break;
+		}
+	}
+
+	return rc;
+}
+
+/**
+ * il3945_verify_ucode - determine which instruction image is in SRAM,
+ *    and verify its contents
+ */
+static int
+il3945_verify_ucode(struct il_priv *il)
+{
+	__le32 *image;
+	u32 len;
+	int rc = 0;
+
+	/* Try bootstrap */
+	image = (__le32 *) il->ucode_boot.v_addr;
+	len = il->ucode_boot.len;
+	rc = il3945_verify_inst_sparse(il, image, len);
+	if (rc == 0) {
+		D_INFO("Bootstrap uCode is good in inst SRAM\n");
+		return 0;
+	}
+
+	/* Try initialize */
+	image = (__le32 *) il->ucode_init.v_addr;
+	len = il->ucode_init.len;
+	rc = il3945_verify_inst_sparse(il, image, len);
+	if (rc == 0) {
+		D_INFO("Initialize uCode is good in inst SRAM\n");
+		return 0;
+	}
+
+	/* Try runtime/protocol */
+	image = (__le32 *) il->ucode_code.v_addr;
+	len = il->ucode_code.len;
+	rc = il3945_verify_inst_sparse(il, image, len);
+	if (rc == 0) {
+		D_INFO("Runtime uCode is good in inst SRAM\n");
+		return 0;
+	}
+
+	IL_ERR("NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
+
+	/* Since nothing seems to match, show first several data entries in
+	 * instruction SRAM, so maybe visual inspection will give a clue.
+	 * Selection of bootstrap image (vs. other images) is arbitrary. */
+	image = (__le32 *) il->ucode_boot.v_addr;
+	len = il->ucode_boot.len;
+	rc = il3945_verify_inst_full(il, image, len);
+
+	return rc;
+}
+
+static void
+il3945_nic_start(struct il_priv *il)
+{
+	/* Remove all resets to allow NIC to operate */
+	_il_wr(il, CSR_RESET, 0);
+}
+
+#define IL3945_UCODE_GET(item)						\
+static u32 il3945_ucode_get_##item(const struct il_ucode_header *ucode)\
+{									\
+	return le32_to_cpu(ucode->v1.item);				\
+}
+
+static u32
+il3945_ucode_get_header_size(u32 api_ver)
+{
+	return 24;
+}
+
+static u8 *
+il3945_ucode_get_data(const struct il_ucode_header *ucode)
+{
+	return (u8 *) ucode->v1.data;
+}
+
+IL3945_UCODE_GET(inst_size);
+IL3945_UCODE_GET(data_size);
+IL3945_UCODE_GET(init_size);
+IL3945_UCODE_GET(init_data_size);
+IL3945_UCODE_GET(boot_size);
+
+/**
+ * il3945_read_ucode - Read uCode images from disk file.
+ *
+ * Copy into buffers for card to fetch via bus-mastering
+ */
+static int
+il3945_read_ucode(struct il_priv *il)
+{
+	const struct il_ucode_header *ucode;
+	int ret = -EINVAL, idx;
+	const struct firmware *ucode_raw;
+	/* firmware file name contains uCode/driver compatibility version */
+	const char *name_pre = il->cfg->fw_name_pre;
+	const unsigned int api_max = il->cfg->ucode_api_max;
+	const unsigned int api_min = il->cfg->ucode_api_min;
+	char buf[25];
+	u8 *src;
+	size_t len;
+	u32 api_ver, inst_size, data_size, init_size, init_data_size, boot_size;
+
+	/* Ask kernel firmware_class module to get the boot firmware off disk.
+	 * request_firmware() is synchronous, file is in memory on return. */
+	for (idx = api_max; idx >= api_min; idx--) {
+		sprintf(buf, "%s%u%s", name_pre, idx, ".ucode");
+		ret = request_firmware(&ucode_raw, buf, &il->pci_dev->dev);
+		if (ret < 0) {
+			IL_ERR("%s firmware file req failed: %d\n", buf, ret);
+			if (ret == -ENOENT)
+				continue;
+			else
+				goto error;
+		} else {
+			if (idx < api_max)
+				IL_ERR("Loaded firmware %s, "
+				       "which is deprecated. "
+				       " Please use API v%u instead.\n", buf,
+				       api_max);
+			D_INFO("Got firmware '%s' file "
+			       "(%zd bytes) from disk\n", buf, ucode_raw->size);
+			break;
+		}
+	}
+
+	if (ret < 0)
+		goto error;
+
+	/* Make sure that we got at least our header! */
+	if (ucode_raw->size < il3945_ucode_get_header_size(1)) {
+		IL_ERR("File size way too small!\n");
+		ret = -EINVAL;
+		goto err_release;
+	}
+
+	/* Data from ucode file:  header followed by uCode images */
+	ucode = (struct il_ucode_header *)ucode_raw->data;
+
+	il->ucode_ver = le32_to_cpu(ucode->ver);
+	api_ver = IL_UCODE_API(il->ucode_ver);
+	inst_size = il3945_ucode_get_inst_size(ucode);
+	data_size = il3945_ucode_get_data_size(ucode);
+	init_size = il3945_ucode_get_init_size(ucode);
+	init_data_size = il3945_ucode_get_init_data_size(ucode);
+	boot_size = il3945_ucode_get_boot_size(ucode);
+	src = il3945_ucode_get_data(ucode);
+
+	/* api_ver should match the api version forming part of the
+	 * firmware filename ... but we don't check for that and only rely
+	 * on the API version read from firmware header from here on forward */
+
+	if (api_ver < api_min || api_ver > api_max) {
+		IL_ERR("Driver unable to support your firmware API. "
+		       "Driver supports v%u, firmware is v%u.\n", api_max,
+		       api_ver);
+		il->ucode_ver = 0;
+		ret = -EINVAL;
+		goto err_release;
+	}
+	if (api_ver != api_max)
+		IL_ERR("Firmware has old API version. Expected %u, "
+		       "got %u. New firmware can be obtained "
+		       "from http://www.intellinuxwireless.org.\n", api_max,
+		       api_ver);
+
+	IL_INFO("loaded firmware version %u.%u.%u.%u\n",
+		IL_UCODE_MAJOR(il->ucode_ver), IL_UCODE_MINOR(il->ucode_ver),
+		IL_UCODE_API(il->ucode_ver), IL_UCODE_SERIAL(il->ucode_ver));
+
+	snprintf(il->hw->wiphy->fw_version, sizeof(il->hw->wiphy->fw_version),
+		 "%u.%u.%u.%u", IL_UCODE_MAJOR(il->ucode_ver),
+		 IL_UCODE_MINOR(il->ucode_ver), IL_UCODE_API(il->ucode_ver),
+		 IL_UCODE_SERIAL(il->ucode_ver));
+
+	D_INFO("f/w package hdr ucode version raw = 0x%x\n", il->ucode_ver);
+	D_INFO("f/w package hdr runtime inst size = %u\n", inst_size);
+	D_INFO("f/w package hdr runtime data size = %u\n", data_size);
+	D_INFO("f/w package hdr init inst size = %u\n", init_size);
+	D_INFO("f/w package hdr init data size = %u\n", init_data_size);
+	D_INFO("f/w package hdr boot inst size = %u\n", boot_size);
+
+	/* Verify size of file vs. image size info in file's header */
+	if (ucode_raw->size !=
+	    il3945_ucode_get_header_size(api_ver) + inst_size + data_size +
+	    init_size + init_data_size + boot_size) {
+
+		D_INFO("uCode file size %zd does not match expected size\n",
+		       ucode_raw->size);
+		ret = -EINVAL;
+		goto err_release;
+	}
+
+	/* Verify that uCode images will fit in card's SRAM */
+	if (inst_size > IL39_MAX_INST_SIZE) {
+		D_INFO("uCode instr len %d too large to fit in\n", inst_size);
+		ret = -EINVAL;
+		goto err_release;
+	}
+
+	if (data_size > IL39_MAX_DATA_SIZE) {
+		D_INFO("uCode data len %d too large to fit in\n", data_size);
+		ret = -EINVAL;
+		goto err_release;
+	}
+	if (init_size > IL39_MAX_INST_SIZE) {
+		D_INFO("uCode init instr len %d too large to fit in\n",
+		       init_size);
+		ret = -EINVAL;
+		goto err_release;
+	}
+	if (init_data_size > IL39_MAX_DATA_SIZE) {
+		D_INFO("uCode init data len %d too large to fit in\n",
+		       init_data_size);
+		ret = -EINVAL;
+		goto err_release;
+	}
+	if (boot_size > IL39_MAX_BSM_SIZE) {
+		D_INFO("uCode boot instr len %d too large to fit in\n",
+		       boot_size);
+		ret = -EINVAL;
+		goto err_release;
+	}
+
+	/* Allocate ucode buffers for card's bus-master loading ... */
+
+	/* Runtime instructions and 2 copies of data:
+	 * 1) unmodified from disk
+	 * 2) backup cache for save/restore during power-downs */
+	il->ucode_code.len = inst_size;
+	il_alloc_fw_desc(il->pci_dev, &il->ucode_code);
+
+	il->ucode_data.len = data_size;
+	il_alloc_fw_desc(il->pci_dev, &il->ucode_data);
+
+	il->ucode_data_backup.len = data_size;
+	il_alloc_fw_desc(il->pci_dev, &il->ucode_data_backup);
+
+	if (!il->ucode_code.v_addr || !il->ucode_data.v_addr ||
+	    !il->ucode_data_backup.v_addr)
+		goto err_pci_alloc;
+
+	/* Initialization instructions and data */
+	if (init_size && init_data_size) {
+		il->ucode_init.len = init_size;
+		il_alloc_fw_desc(il->pci_dev, &il->ucode_init);
+
+		il->ucode_init_data.len = init_data_size;
+		il_alloc_fw_desc(il->pci_dev, &il->ucode_init_data);
+
+		if (!il->ucode_init.v_addr || !il->ucode_init_data.v_addr)
+			goto err_pci_alloc;
+	}
+
+	/* Bootstrap (instructions only, no data) */
+	if (boot_size) {
+		il->ucode_boot.len = boot_size;
+		il_alloc_fw_desc(il->pci_dev, &il->ucode_boot);
+
+		if (!il->ucode_boot.v_addr)
+			goto err_pci_alloc;
+	}
+
+	/* Copy images into buffers for card's bus-master reads ... */
+
+	/* Runtime instructions (first block of data in file) */
+	len = inst_size;
+	D_INFO("Copying (but not loading) uCode instr len %zd\n", len);
+	memcpy(il->ucode_code.v_addr, src, len);
+	src += len;
+
+	D_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
+	       il->ucode_code.v_addr, (u32) il->ucode_code.p_addr);
+
+	/* Runtime data (2nd block)
+	 * NOTE:  Copy into backup buffer will be done in il3945_up()  */
+	len = data_size;
+	D_INFO("Copying (but not loading) uCode data len %zd\n", len);
+	memcpy(il->ucode_data.v_addr, src, len);
+	memcpy(il->ucode_data_backup.v_addr, src, len);
+	src += len;
+
+	/* Initialization instructions (3rd block) */
+	if (init_size) {
+		len = init_size;
+		D_INFO("Copying (but not loading) init instr len %zd\n", len);
+		memcpy(il->ucode_init.v_addr, src, len);
+		src += len;
+	}
+
+	/* Initialization data (4th block) */
+	if (init_data_size) {
+		len = init_data_size;
+		D_INFO("Copying (but not loading) init data len %zd\n", len);
+		memcpy(il->ucode_init_data.v_addr, src, len);
+		src += len;
+	}
+
+	/* Bootstrap instructions (5th block) */
+	len = boot_size;
+	D_INFO("Copying (but not loading) boot instr len %zd\n", len);
+	memcpy(il->ucode_boot.v_addr, src, len);
+
+	/* We have our copies now, allow OS release its copies */
+	release_firmware(ucode_raw);
+	return 0;
+
+err_pci_alloc:
+	IL_ERR("failed to allocate pci memory\n");
+	ret = -ENOMEM;
+	il3945_dealloc_ucode_pci(il);
+
+err_release:
+	release_firmware(ucode_raw);
+
+error:
+	return ret;
+}
+
+/**
+ * il3945_set_ucode_ptrs - Set uCode address location
+ *
+ * Tell initialization uCode where to find runtime uCode.
+ *
+ * BSM registers initially contain pointers to initialization uCode.
+ * We need to replace them to load runtime uCode inst and data,
+ * and to save runtime data when powering down.
+ */
+static int
+il3945_set_ucode_ptrs(struct il_priv *il)
+{
+	dma_addr_t pinst;
+	dma_addr_t pdata;
+
+	/* bits 31:0 for 3945 */
+	pinst = il->ucode_code.p_addr;
+	pdata = il->ucode_data_backup.p_addr;
+
+	/* Tell bootstrap uCode where to find image to load */
+	il_wr_prph(il, BSM_DRAM_INST_PTR_REG, pinst);
+	il_wr_prph(il, BSM_DRAM_DATA_PTR_REG, pdata);
+	il_wr_prph(il, BSM_DRAM_DATA_BYTECOUNT_REG, il->ucode_data.len);
+
+	/* Inst byte count must be last to set up, bit 31 signals uCode
+	 *   that all new ptr/size info is in place */
+	il_wr_prph(il, BSM_DRAM_INST_BYTECOUNT_REG,
+		   il->ucode_code.len | BSM_DRAM_INST_LOAD);
+
+	D_INFO("Runtime uCode pointers are set.\n");
+
+	return 0;
+}
+
+/**
+ * il3945_init_alive_start - Called after N_ALIVE notification received
+ *
+ * Called after N_ALIVE notification received from "initialize" uCode.
+ *
+ * Tell "initialize" uCode to go ahead and load the runtime uCode.
+ */
+static void
+il3945_init_alive_start(struct il_priv *il)
+{
+	/* Check alive response for "valid" sign from uCode */
+	if (il->card_alive_init.is_valid != UCODE_VALID_OK) {
+		/* We had an error bringing up the hardware, so take it
+		 * all the way back down so we can try again */
+		D_INFO("Initialize Alive failed.\n");
+		goto restart;
+	}
+
+	/* Bootstrap uCode has loaded initialize uCode ... verify inst image.
+	 * This is a paranoid check, because we would not have gotten the
+	 * "initialize" alive if code weren't properly loaded.  */
+	if (il3945_verify_ucode(il)) {
+		/* Runtime instruction load was bad;
+		 * take it all the way back down so we can try again */
+		D_INFO("Bad \"initialize\" uCode load.\n");
+		goto restart;
+	}
+
+	/* Send pointers to protocol/runtime uCode image ... init code will
+	 * load and launch runtime uCode, which will send us another "Alive"
+	 * notification. */
+	D_INFO("Initialization Alive received.\n");
+	if (il3945_set_ucode_ptrs(il)) {
+		/* Runtime instruction load won't happen;
+		 * take it all the way back down so we can try again */
+		D_INFO("Couldn't set up uCode pointers.\n");
+		goto restart;
+	}
+	return;
+
+restart:
+	queue_work(il->workqueue, &il->restart);
+}
+
+/**
+ * il3945_alive_start - called after N_ALIVE notification received
+ *                   from protocol/runtime uCode (initialization uCode's
+ *                   Alive gets handled by il3945_init_alive_start()).
+ */
+static void
+il3945_alive_start(struct il_priv *il)
+{
+	int thermal_spin = 0;
+	u32 rfkill;
+	struct il_rxon_context *ctx = &il->ctx;
+
+	D_INFO("Runtime Alive received.\n");
+
+	if (il->card_alive.is_valid != UCODE_VALID_OK) {
+		/* We had an error bringing up the hardware, so take it
+		 * all the way back down so we can try again */
+		D_INFO("Alive failed.\n");
+		goto restart;
+	}
+
+	/* Initialize uCode has loaded Runtime uCode ... verify inst image.
+	 * This is a paranoid check, because we would not have gotten the
+	 * "runtime" alive if code weren't properly loaded.  */
+	if (il3945_verify_ucode(il)) {
+		/* Runtime instruction load was bad;
+		 * take it all the way back down so we can try again */
+		D_INFO("Bad runtime uCode load.\n");
+		goto restart;
+	}
+
+	rfkill = il_rd_prph(il, APMG_RFKILL_REG);
+	D_INFO("RFKILL status: 0x%x\n", rfkill);
+
+	if (rfkill & 0x1) {
+		clear_bit(S_RF_KILL_HW, &il->status);
+		/* if RFKILL is not on, then wait for thermal
+		 * sensor in adapter to kick in */
+		while (il3945_hw_get_temperature(il) == 0) {
+			thermal_spin++;
+			udelay(10);
+		}
+
+		if (thermal_spin)
+			D_INFO("Thermal calibration took %dus\n",
+			       thermal_spin * 10);
+	} else
+		set_bit(S_RF_KILL_HW, &il->status);
+
+	/* After the ALIVE response, we can send commands to 3945 uCode */
+	set_bit(S_ALIVE, &il->status);
+
+	/* Enable watchdog to monitor the driver tx queues */
+	il_setup_watchdog(il);
+
+	if (il_is_rfkill(il))
+		return;
+
+	ieee80211_wake_queues(il->hw);
+
+	il->active_rate = RATES_MASK_3945;
+
+	il_power_update_mode(il, true);
+
+	if (il_is_associated(il)) {
+		struct il3945_rxon_cmd *active_rxon =
+		    (struct il3945_rxon_cmd *)(&ctx->active);
+
+		ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+		active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+	} else {
+		/* Initialize our rx_config data */
+		il_connection_init_rx_config(il, ctx);
+	}
+
+	/* Configure Bluetooth device coexistence support */
+	il_send_bt_config(il);
+
+	set_bit(S_READY, &il->status);
+
+	/* Configure the adapter for unassociated operation */
+	il3945_commit_rxon(il, ctx);
+
+	il3945_reg_txpower_periodic(il);
+
+	D_INFO("ALIVE processing complete.\n");
+	wake_up(&il->wait_command_queue);
+
+	return;
+
+restart:
+	queue_work(il->workqueue, &il->restart);
+}
+
+static void il3945_cancel_deferred_work(struct il_priv *il);
+
+static void
+__il3945_down(struct il_priv *il)
+{
+	unsigned long flags;
+	int exit_pending;
+
+	D_INFO(DRV_NAME " is going down\n");
+
+	il_scan_cancel_timeout(il, 200);
+
+	exit_pending = test_and_set_bit(S_EXIT_PENDING, &il->status);
+
+	/* Stop TX queues watchdog. We need to have S_EXIT_PENDING bit set
+	 * to prevent rearm timer */
+	del_timer_sync(&il->watchdog);
+
+	/* Station information will now be cleared in device */
+	il_clear_ucode_stations(il, NULL);
+	il_dealloc_bcast_stations(il);
+	il_clear_driver_stations(il);
+
+	/* Unblock any waiting calls */
+	wake_up_all(&il->wait_command_queue);
+
+	/* Wipe out the EXIT_PENDING status bit if we are not actually
+	 * exiting the module */
+	if (!exit_pending)
+		clear_bit(S_EXIT_PENDING, &il->status);
+
+	/* stop and reset the on-board processor */
+	_il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
+
+	/* tell the device to stop sending interrupts */
+	spin_lock_irqsave(&il->lock, flags);
+	il_disable_interrupts(il);
+	spin_unlock_irqrestore(&il->lock, flags);
+	il3945_synchronize_irq(il);
+
+	if (il->mac80211_registered)
+		ieee80211_stop_queues(il->hw);
+
+	/* If we have not previously called il3945_init() then
+	 * clear all bits but the RF Kill bits and return */
+	if (!il_is_init(il)) {
+		il->status =
+		    test_bit(S_RF_KILL_HW,
+			     &il->
+			     status) << S_RF_KILL_HW |
+		    test_bit(S_GEO_CONFIGURED,
+			     &il->
+			     status) << S_GEO_CONFIGURED |
+		    test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
+		goto exit;
+	}
+
+	/* ...otherwise clear out all the status bits but the RF Kill
+	 * bit and continue taking the NIC down. */
+	il->status &=
+	    test_bit(S_RF_KILL_HW,
+		     &il->status) << S_RF_KILL_HW | test_bit(S_GEO_CONFIGURED,
+							     &il->
+							     status) <<
+	    S_GEO_CONFIGURED | test_bit(S_FW_ERROR,
+					&il->
+					status) << S_FW_ERROR |
+	    test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
+
+	il3945_hw_txq_ctx_stop(il);
+	il3945_hw_rxq_stop(il);
+
+	/* Power-down device's busmaster DMA clocks */
+	il_wr_prph(il, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
+	udelay(5);
+
+	/* Stop the device, and put it in low power state */
+	il_apm_stop(il);
+
+exit:
+	memset(&il->card_alive, 0, sizeof(struct il_alive_resp));
+
+	if (il->beacon_skb)
+		dev_kfree_skb(il->beacon_skb);
+	il->beacon_skb = NULL;
+
+	/* clear out any free frames */
+	il3945_clear_free_frames(il);
+}
+
+static void
+il3945_down(struct il_priv *il)
+{
+	mutex_lock(&il->mutex);
+	__il3945_down(il);
+	mutex_unlock(&il->mutex);
+
+	il3945_cancel_deferred_work(il);
+}
+
+#define MAX_HW_RESTARTS 5
+
+static int
+il3945_alloc_bcast_station(struct il_priv *il)
+{
+	struct il_rxon_context *ctx = &il->ctx;
+	unsigned long flags;
+	u8 sta_id;
+
+	spin_lock_irqsave(&il->sta_lock, flags);
+	sta_id = il_prep_station(il, ctx, il_bcast_addr, false, NULL);
+	if (sta_id == IL_INVALID_STATION) {
+		IL_ERR("Unable to prepare broadcast station\n");
+		spin_unlock_irqrestore(&il->sta_lock, flags);
+
+		return -EINVAL;
+	}
+
+	il->stations[sta_id].used |= IL_STA_DRIVER_ACTIVE;
+	il->stations[sta_id].used |= IL_STA_BCAST;
+	spin_unlock_irqrestore(&il->sta_lock, flags);
+
+	return 0;
+}
+
+static int
+__il3945_up(struct il_priv *il)
+{
+	int rc, i;
+
+	rc = il3945_alloc_bcast_station(il);
+	if (rc)
+		return rc;
+
+	if (test_bit(S_EXIT_PENDING, &il->status)) {
+		IL_WARN("Exit pending; will not bring the NIC up\n");
+		return -EIO;
+	}
+
+	if (!il->ucode_data_backup.v_addr || !il->ucode_data.v_addr) {
+		IL_ERR("ucode not available for device bring up\n");
+		return -EIO;
+	}
+
+	/* If platform's RF_KILL switch is NOT set to KILL */
+	if (_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
+		clear_bit(S_RF_KILL_HW, &il->status);
+	else {
+		set_bit(S_RF_KILL_HW, &il->status);
+		IL_WARN("Radio disabled by HW RF Kill switch\n");
+		return -ENODEV;
+	}
+
+	_il_wr(il, CSR_INT, 0xFFFFFFFF);
+
+	rc = il3945_hw_nic_init(il);
+	if (rc) {
+		IL_ERR("Unable to int nic\n");
+		return rc;
+	}
+
+	/* make sure rfkill handshake bits are cleared */
+	_il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+	_il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+
+	/* clear (again), then enable host interrupts */
+	_il_wr(il, CSR_INT, 0xFFFFFFFF);
+	il_enable_interrupts(il);
+
+	/* really make sure rfkill handshake bits are cleared */
+	_il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+	_il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+
+	/* Copy original ucode data image from disk into backup cache.
+	 * This will be used to initialize the on-board processor's
+	 * data SRAM for a clean start when the runtime program first loads. */
+	memcpy(il->ucode_data_backup.v_addr, il->ucode_data.v_addr,
+	       il->ucode_data.len);
+
+	/* We return success when we resume from suspend and rf_kill is on. */
+	if (test_bit(S_RF_KILL_HW, &il->status))
+		return 0;
+
+	for (i = 0; i < MAX_HW_RESTARTS; i++) {
+
+		/* load bootstrap state machine,
+		 * load bootstrap program into processor's memory,
+		 * prepare to load the "initialize" uCode */
+		rc = il->cfg->ops->lib->load_ucode(il);
+
+		if (rc) {
+			IL_ERR("Unable to set up bootstrap uCode: %d\n", rc);
+			continue;
+		}
+
+		/* start card; "initialize" will load runtime ucode */
+		il3945_nic_start(il);
+
+		D_INFO(DRV_NAME " is coming up\n");
+
+		return 0;
+	}
+
+	set_bit(S_EXIT_PENDING, &il->status);
+	__il3945_down(il);
+	clear_bit(S_EXIT_PENDING, &il->status);
+
+	/* tried to restart and config the device for as long as our
+	 * patience could withstand */
+	IL_ERR("Unable to initialize device after %d attempts.\n", i);
+	return -EIO;
+}
+
+/*****************************************************************************
+ *
+ * Workqueue callbacks
+ *
+ *****************************************************************************/
+
+static void
+il3945_bg_init_alive_start(struct work_struct *data)
+{
+	struct il_priv *il =
+	    container_of(data, struct il_priv, init_alive_start.work);
+
+	mutex_lock(&il->mutex);
+	if (test_bit(S_EXIT_PENDING, &il->status))
+		goto out;
+
+	il3945_init_alive_start(il);
+out:
+	mutex_unlock(&il->mutex);
+}
+
+static void
+il3945_bg_alive_start(struct work_struct *data)
+{
+	struct il_priv *il =
+	    container_of(data, struct il_priv, alive_start.work);
+
+	mutex_lock(&il->mutex);
+	if (test_bit(S_EXIT_PENDING, &il->status))
+		goto out;
+
+	il3945_alive_start(il);
+out:
+	mutex_unlock(&il->mutex);
+}
+
+/*
+ * 3945 cannot interrupt driver when hardware rf kill switch toggles;
+ * driver must poll CSR_GP_CNTRL_REG register for change.  This register
+ * *is* readable even when device has been SW_RESET into low power mode
+ * (e.g. during RF KILL).
+ */
+static void
+il3945_rfkill_poll(struct work_struct *data)
+{
+	struct il_priv *il =
+	    container_of(data, struct il_priv, _3945.rfkill_poll.work);
+	bool old_rfkill = test_bit(S_RF_KILL_HW, &il->status);
+	bool new_rfkill =
+	    !(_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
+
+	if (new_rfkill != old_rfkill) {
+		if (new_rfkill)
+			set_bit(S_RF_KILL_HW, &il->status);
+		else
+			clear_bit(S_RF_KILL_HW, &il->status);
+
+		wiphy_rfkill_set_hw_state(il->hw->wiphy, new_rfkill);
+
+		D_RF_KILL("RF_KILL bit toggled to %s.\n",
+			  new_rfkill ? "disable radio" : "enable radio");
+	}
+
+	/* Keep this running, even if radio now enabled.  This will be
+	 * cancelled in mac_start() if system decides to start again */
+	queue_delayed_work(il->workqueue, &il->_3945.rfkill_poll,
+			   round_jiffies_relative(2 * HZ));
+
+}
+
+int
+il3945_request_scan(struct il_priv *il, struct ieee80211_vif *vif)
+{
+	struct il_host_cmd cmd = {
+		.id = C_SCAN,
+		.len = sizeof(struct il3945_scan_cmd),
+		.flags = CMD_SIZE_HUGE,
+	};
+	struct il3945_scan_cmd *scan;
+	u8 n_probes = 0;
+	enum ieee80211_band band;
+	bool is_active = false;
+	int ret;
+	u16 len;
+
+	lockdep_assert_held(&il->mutex);
+
+	if (!il->scan_cmd) {
+		il->scan_cmd =
+		    kmalloc(sizeof(struct il3945_scan_cmd) + IL_MAX_SCAN_SIZE,
+			    GFP_KERNEL);
+		if (!il->scan_cmd) {
+			D_SCAN("Fail to allocate scan memory\n");
+			return -ENOMEM;
+		}
+	}
+	scan = il->scan_cmd;
+	memset(scan, 0, sizeof(struct il3945_scan_cmd) + IL_MAX_SCAN_SIZE);
+
+	scan->quiet_plcp_th = IL_PLCP_QUIET_THRESH;
+	scan->quiet_time = IL_ACTIVE_QUIET_TIME;
+
+	if (il_is_associated(il)) {
+		u16 interval;
+		u32 extra;
+		u32 suspend_time = 100;
+		u32 scan_suspend_time = 100;
+
+		D_INFO("Scanning while associated...\n");
+
+		interval = vif->bss_conf.beacon_int;
+
+		scan->suspend_time = 0;
+		scan->max_out_time = cpu_to_le32(200 * 1024);
+		if (!interval)
+			interval = suspend_time;
+		/*
+		 * suspend time format:
+		 *  0-19: beacon interval in usec (time before exec.)
+		 * 20-23: 0
+		 * 24-31: number of beacons (suspend between channels)
+		 */
+
+		extra = (suspend_time / interval) << 24;
+		scan_suspend_time =
+		    0xFF0FFFFF & (extra | ((suspend_time % interval) * 1024));
+
+		scan->suspend_time = cpu_to_le32(scan_suspend_time);
+		D_SCAN("suspend_time 0x%X beacon interval %d\n",
+		       scan_suspend_time, interval);
+	}
+
+	if (il->scan_request->n_ssids) {
+		int i, p = 0;
+		D_SCAN("Kicking off active scan\n");
+		for (i = 0; i < il->scan_request->n_ssids; i++) {
+			/* always does wildcard anyway */
+			if (!il->scan_request->ssids[i].ssid_len)
+				continue;
+			scan->direct_scan[p].id = WLAN_EID_SSID;
+			scan->direct_scan[p].len =
+			    il->scan_request->ssids[i].ssid_len;
+			memcpy(scan->direct_scan[p].ssid,
+			       il->scan_request->ssids[i].ssid,
+			       il->scan_request->ssids[i].ssid_len);
+			n_probes++;
+			p++;
+		}
+		is_active = true;
+	} else
+		D_SCAN("Kicking off passive scan.\n");
+
+	/* We don't build a direct scan probe request; the uCode will do
+	 * that based on the direct_mask added to each channel entry */
+	scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
+	scan->tx_cmd.sta_id = il->ctx.bcast_sta_id;
+	scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+
+	/* flags + rate selection */
+
+	switch (il->scan_band) {
+	case IEEE80211_BAND_2GHZ:
+		scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
+		scan->tx_cmd.rate = RATE_1M_PLCP;
+		band = IEEE80211_BAND_2GHZ;
+		break;
+	case IEEE80211_BAND_5GHZ:
+		scan->tx_cmd.rate = RATE_6M_PLCP;
+		band = IEEE80211_BAND_5GHZ;
+		break;
+	default:
+		IL_WARN("Invalid scan band\n");
+		return -EIO;
+	}
+
+	/*
+	 * If active scaning is requested but a certain channel is marked
+	 * passive, we can do active scanning if we detect transmissions. For
+	 * passive only scanning disable switching to active on any channel.
+	 */
+	scan->good_CRC_th =
+	    is_active ? IL_GOOD_CRC_TH_DEFAULT : IL_GOOD_CRC_TH_NEVER;
+
+	len =
+	    il_fill_probe_req(il, (struct ieee80211_mgmt *)scan->data,
+			      vif->addr, il->scan_request->ie,
+			      il->scan_request->ie_len,
+			      IL_MAX_SCAN_SIZE - sizeof(*scan));
+	scan->tx_cmd.len = cpu_to_le16(len);
+
+	/* select Rx antennas */
+	scan->flags |= il3945_get_antenna_flags(il);
+
+	scan->channel_count =
+	    il3945_get_channels_for_scan(il, band, is_active, n_probes,
+					 (void *)&scan->data[len], vif);
+	if (scan->channel_count == 0) {
+		D_SCAN("channel count %d\n", scan->channel_count);
+		return -EIO;
+	}
+
+	cmd.len +=
+	    le16_to_cpu(scan->tx_cmd.len) +
+	    scan->channel_count * sizeof(struct il3945_scan_channel);
+	cmd.data = scan;
+	scan->len = cpu_to_le16(cmd.len);
+
+	set_bit(S_SCAN_HW, &il->status);
+	ret = il_send_cmd_sync(il, &cmd);
+	if (ret)
+		clear_bit(S_SCAN_HW, &il->status);
+	return ret;
+}
+
+void
+il3945_post_scan(struct il_priv *il)
+{
+	struct il_rxon_context *ctx = &il->ctx;
+
+	/*
+	 * Since setting the RXON may have been deferred while
+	 * performing the scan, fire one off if needed
+	 */
+	if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
+		il3945_commit_rxon(il, ctx);
+}
+
+static void
+il3945_bg_restart(struct work_struct *data)
+{
+	struct il_priv *il = container_of(data, struct il_priv, restart);
+
+	if (test_bit(S_EXIT_PENDING, &il->status))
+		return;
+
+	if (test_and_clear_bit(S_FW_ERROR, &il->status)) {
+		mutex_lock(&il->mutex);
+		il->ctx.vif = NULL;
+		il->is_open = 0;
+		mutex_unlock(&il->mutex);
+		il3945_down(il);
+		ieee80211_restart_hw(il->hw);
+	} else {
+		il3945_down(il);
+
+		mutex_lock(&il->mutex);
+		if (test_bit(S_EXIT_PENDING, &il->status)) {
+			mutex_unlock(&il->mutex);
+			return;
+		}
+
+		__il3945_up(il);
+		mutex_unlock(&il->mutex);
+	}
+}
+
+static void
+il3945_bg_rx_replenish(struct work_struct *data)
+{
+	struct il_priv *il = container_of(data, struct il_priv, rx_replenish);
+
+	mutex_lock(&il->mutex);
+	if (test_bit(S_EXIT_PENDING, &il->status))
+		goto out;
+
+	il3945_rx_replenish(il);
+out:
+	mutex_unlock(&il->mutex);
+}
+
+void
+il3945_post_associate(struct il_priv *il)
+{
+	int rc = 0;
+	struct ieee80211_conf *conf = NULL;
+	struct il_rxon_context *ctx = &il->ctx;
+
+	if (!ctx->vif || !il->is_open)
+		return;
+
+	D_ASSOC("Associated as %d to: %pM\n", ctx->vif->bss_conf.aid,
+		ctx->active.bssid_addr);
+
+	if (test_bit(S_EXIT_PENDING, &il->status))
+		return;
+
+	il_scan_cancel_timeout(il, 200);
+
+	conf = &il->hw->conf;
+
+	ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+	il3945_commit_rxon(il, ctx);
+
+	rc = il_send_rxon_timing(il, ctx);
+	if (rc)
+		IL_WARN("C_RXON_TIMING failed - " "Attempting to continue.\n");
+
+	ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+
+	ctx->staging.assoc_id = cpu_to_le16(ctx->vif->bss_conf.aid);
+
+	D_ASSOC("assoc id %d beacon interval %d\n", ctx->vif->bss_conf.aid,
+		ctx->vif->bss_conf.beacon_int);
+
+	if (ctx->vif->bss_conf.use_short_preamble)
+		ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+	else
+		ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+
+	if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
+		if (ctx->vif->bss_conf.use_short_slot)
+			ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+		else
+			ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
+	}
+
+	il3945_commit_rxon(il, ctx);
+
+	switch (ctx->vif->type) {
+	case NL80211_IFTYPE_STATION:
+		il3945_rate_scale_init(il->hw, IL_AP_ID);
+		break;
+	case NL80211_IFTYPE_ADHOC:
+		il3945_send_beacon_cmd(il);
+		break;
+	default:
+		IL_ERR("%s Should not be called in %d mode\n", __func__,
+		       ctx->vif->type);
+		break;
+	}
+}
+
+/*****************************************************************************
+ *
+ * mac80211 entry point functions
+ *
+ *****************************************************************************/
+
+#define UCODE_READY_TIMEOUT	(2 * HZ)
+
+static int
+il3945_mac_start(struct ieee80211_hw *hw)
+{
+	struct il_priv *il = hw->priv;
+	int ret;
+
+	D_MAC80211("enter\n");
+
+	/* we should be verifying the device is ready to be opened */
+	mutex_lock(&il->mutex);
+
+	/* fetch ucode file from disk, alloc and copy to bus-master buffers ...
+	 * ucode filename and max sizes are card-specific. */
+
+	if (!il->ucode_code.len) {
+		ret = il3945_read_ucode(il);
+		if (ret) {
+			IL_ERR("Could not read microcode: %d\n", ret);
+			mutex_unlock(&il->mutex);
+			goto out_release_irq;
+		}
+	}
+
+	ret = __il3945_up(il);
+
+	mutex_unlock(&il->mutex);
+
+	if (ret)
+		goto out_release_irq;
+
+	D_INFO("Start UP work.\n");
+
+	/* Wait for START_ALIVE from ucode. Otherwise callbacks from
+	 * mac80211 will not be run successfully. */
+	ret = wait_event_timeout(il->wait_command_queue,
+				 test_bit(S_READY, &il->status),
+				 UCODE_READY_TIMEOUT);
+	if (!ret) {
+		if (!test_bit(S_READY, &il->status)) {
+			IL_ERR("Wait for START_ALIVE timeout after %dms.\n",
+			       jiffies_to_msecs(UCODE_READY_TIMEOUT));
+			ret = -ETIMEDOUT;
+			goto out_release_irq;
+		}
+	}
+
+	/* ucode is running and will send rfkill notifications,
+	 * no need to poll the killswitch state anymore */
+	cancel_delayed_work(&il->_3945.rfkill_poll);
+
+	il->is_open = 1;
+	D_MAC80211("leave\n");
+	return 0;
+
+out_release_irq:
+	il->is_open = 0;
+	D_MAC80211("leave - failed\n");
+	return ret;
+}
+
+static void
+il3945_mac_stop(struct ieee80211_hw *hw)
+{
+	struct il_priv *il = hw->priv;
+
+	D_MAC80211("enter\n");
+
+	if (!il->is_open) {
+		D_MAC80211("leave - skip\n");
+		return;
+	}
+
+	il->is_open = 0;
+
+	il3945_down(il);
+
+	flush_workqueue(il->workqueue);
+
+	/* start polling the killswitch state again */
+	queue_delayed_work(il->workqueue, &il->_3945.rfkill_poll,
+			   round_jiffies_relative(2 * HZ));
+
+	D_MAC80211("leave\n");
+}
+
+static void
+il3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+	struct il_priv *il = hw->priv;
+
+	D_MAC80211("enter\n");
+
+	D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
+	     ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
+
+	if (il3945_tx_skb(il, skb))
+		dev_kfree_skb_any(skb);
+
+	D_MAC80211("leave\n");
+}
+
+void
+il3945_config_ap(struct il_priv *il)
+{
+	struct il_rxon_context *ctx = &il->ctx;
+	struct ieee80211_vif *vif = ctx->vif;
+	int rc = 0;
+
+	if (test_bit(S_EXIT_PENDING, &il->status))
+		return;
+
+	/* The following should be done only at AP bring up */
+	if (!(il_is_associated(il))) {
+
+		/* RXON - unassoc (to set timing command) */
+		ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+		il3945_commit_rxon(il, ctx);
+
+		/* RXON Timing */
+		rc = il_send_rxon_timing(il, ctx);
+		if (rc)
+			IL_WARN("C_RXON_TIMING failed - "
+				"Attempting to continue.\n");
+
+		ctx->staging.assoc_id = 0;
+
+		if (vif->bss_conf.use_short_preamble)
+			ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+		else
+			ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+
+		if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
+			if (vif->bss_conf.use_short_slot)
+				ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+			else
+				ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
+		}
+		/* restore RXON assoc */
+		ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+		il3945_commit_rxon(il, ctx);
+	}
+	il3945_send_beacon_cmd(il);
+}
+
+static int
+il3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+		   struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+		   struct ieee80211_key_conf *key)
+{
+	struct il_priv *il = hw->priv;
+	int ret = 0;
+	u8 sta_id = IL_INVALID_STATION;
+	u8 static_key;
+
+	D_MAC80211("enter\n");
+
+	if (il3945_mod_params.sw_crypto) {
+		D_MAC80211("leave - hwcrypto disabled\n");
+		return -EOPNOTSUPP;
+	}
+
+	/*
+	 * To support IBSS RSN, don't program group keys in IBSS, the
+	 * hardware will then not attempt to decrypt the frames.
+	 */
+	if (vif->type == NL80211_IFTYPE_ADHOC &&
+	    !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+		return -EOPNOTSUPP;
+
+	static_key = !il_is_associated(il);
+
+	if (!static_key) {
+		sta_id = il_sta_id_or_broadcast(il, &il->ctx, sta);
+		if (sta_id == IL_INVALID_STATION)
+			return -EINVAL;
+	}
+
+	mutex_lock(&il->mutex);
+	il_scan_cancel_timeout(il, 100);
+
+	switch (cmd) {
+	case SET_KEY:
+		if (static_key)
+			ret = il3945_set_static_key(il, key);
+		else
+			ret = il3945_set_dynamic_key(il, key, sta_id);
+		D_MAC80211("enable hwcrypto key\n");
+		break;
+	case DISABLE_KEY:
+		if (static_key)
+			ret = il3945_remove_static_key(il);
+		else
+			ret = il3945_clear_sta_key_info(il, sta_id);
+		D_MAC80211("disable hwcrypto key\n");
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	mutex_unlock(&il->mutex);
+	D_MAC80211("leave\n");
+
+	return ret;
+}
+
+static int
+il3945_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		   struct ieee80211_sta *sta)
+{
+	struct il_priv *il = hw->priv;
+	struct il3945_sta_priv *sta_priv = (void *)sta->drv_priv;
+	int ret;
+	bool is_ap = vif->type == NL80211_IFTYPE_STATION;
+	u8 sta_id;
+
+	D_INFO("received request to add station %pM\n", sta->addr);
+	mutex_lock(&il->mutex);
+	D_INFO("proceeding to add station %pM\n", sta->addr);
+	sta_priv->common.sta_id = IL_INVALID_STATION;
+
+	ret =
+	    il_add_station_common(il, &il->ctx, sta->addr, is_ap, sta, &sta_id);
+	if (ret) {
+		IL_ERR("Unable to add station %pM (%d)\n", sta->addr, ret);
+		/* Should we return success if return code is EEXIST ? */
+		mutex_unlock(&il->mutex);
+		return ret;
+	}
+
+	sta_priv->common.sta_id = sta_id;
+
+	/* Initialize rate scaling */
+	D_INFO("Initializing rate scaling for station %pM\n", sta->addr);
+	il3945_rs_rate_init(il, sta, sta_id);
+	mutex_unlock(&il->mutex);
+
+	return 0;
+}
+
+static void
+il3945_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
+			unsigned int *total_flags, u64 multicast)
+{
+	struct il_priv *il = hw->priv;
+	__le32 filter_or = 0, filter_nand = 0;
+	struct il_rxon_context *ctx = &il->ctx;
+
+#define CHK(test, flag)	do { \
+	if (*total_flags & (test))		\
+		filter_or |= (flag);		\
+	else					\
+		filter_nand |= (flag);		\
+	} while (0)
+
+	D_MAC80211("Enter: changed: 0x%x, total: 0x%x\n", changed_flags,
+		   *total_flags);
+
+	CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
+	CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK);
+	CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
+
+#undef CHK
+
+	mutex_lock(&il->mutex);
+
+	ctx->staging.filter_flags &= ~filter_nand;
+	ctx->staging.filter_flags |= filter_or;
+
+	/*
+	 * Not committing directly because hardware can perform a scan,
+	 * but even if hw is ready, committing here breaks for some reason,
+	 * we'll eventually commit the filter flags change anyway.
+	 */
+
+	mutex_unlock(&il->mutex);
+
+	/*
+	 * Receiving all multicast frames is always enabled by the
+	 * default flags setup in il_connection_init_rx_config()
+	 * since we currently do not support programming multicast
+	 * filters into the device.
+	 */
+	*total_flags &=
+	    FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
+	    FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
+}
+
+/*****************************************************************************
+ *
+ * sysfs attributes
+ *
+ *****************************************************************************/
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+
+/*
+ * The following adds a new attribute to the sysfs representation
+ * of this device driver (i.e. a new file in /sys/bus/pci/drivers/iwl/)
+ * used for controlling the debug level.
+ *
+ * See the level definitions in iwl for details.
+ *
+ * The debug_level being managed using sysfs below is a per device debug
+ * level that is used instead of the global debug level if it (the per
+ * device debug level) is set.
+ */
+static ssize_t
+il3945_show_debug_level(struct device *d, struct device_attribute *attr,
+			char *buf)
+{
+	struct il_priv *il = dev_get_drvdata(d);
+	return sprintf(buf, "0x%08X\n", il_get_debug_level(il));
+}
+
+static ssize_t
+il3945_store_debug_level(struct device *d, struct device_attribute *attr,
+			 const char *buf, size_t count)
+{
+	struct il_priv *il = dev_get_drvdata(d);
+	unsigned long val;
+	int ret;
+
+	ret = strict_strtoul(buf, 0, &val);
+	if (ret)
+		IL_INFO("%s is not in hex or decimal form.\n", buf);
+	else {
+		il->debug_level = val;
+		if (il_alloc_traffic_mem(il))
+			IL_ERR("Not enough memory to generate traffic log\n");
+	}
+	return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO, il3945_show_debug_level,
+		   il3945_store_debug_level);
+
+#endif /* CONFIG_IWLEGACY_DEBUG */
+
+static ssize_t
+il3945_show_temperature(struct device *d, struct device_attribute *attr,
+			char *buf)
+{
+	struct il_priv *il = dev_get_drvdata(d);
+
+	if (!il_is_alive(il))
+		return -EAGAIN;
+
+	return sprintf(buf, "%d\n", il3945_hw_get_temperature(il));
+}
+
+static DEVICE_ATTR(temperature, S_IRUGO, il3945_show_temperature, NULL);
+
+static ssize_t
+il3945_show_tx_power(struct device *d, struct device_attribute *attr, char *buf)
+{
+	struct il_priv *il = dev_get_drvdata(d);
+	return sprintf(buf, "%d\n", il->tx_power_user_lmt);
+}
+
+static ssize_t
+il3945_store_tx_power(struct device *d, struct device_attribute *attr,
+		      const char *buf, size_t count)
+{
+	struct il_priv *il = dev_get_drvdata(d);
+	char *p = (char *)buf;
+	u32 val;
+
+	val = simple_strtoul(p, &p, 10);
+	if (p == buf)
+		IL_INFO(": %s is not in decimal form.\n", buf);
+	else
+		il3945_hw_reg_set_txpower(il, val);
+
+	return count;
+}
+
+static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, il3945_show_tx_power,
+		   il3945_store_tx_power);
+
+static ssize_t
+il3945_show_flags(struct device *d, struct device_attribute *attr, char *buf)
+{
+	struct il_priv *il = dev_get_drvdata(d);
+	struct il_rxon_context *ctx = &il->ctx;
+
+	return sprintf(buf, "0x%04X\n", ctx->active.flags);
+}
+
+static ssize_t
+il3945_store_flags(struct device *d, struct device_attribute *attr,
+		   const char *buf, size_t count)
+{
+	struct il_priv *il = dev_get_drvdata(d);
+	u32 flags = simple_strtoul(buf, NULL, 0);
+	struct il_rxon_context *ctx = &il->ctx;
+
+	mutex_lock(&il->mutex);
+	if (le32_to_cpu(ctx->staging.flags) != flags) {
+		/* Cancel any currently running scans... */
+		if (il_scan_cancel_timeout(il, 100))
+			IL_WARN("Could not cancel scan.\n");
+		else {
+			D_INFO("Committing rxon.flags = 0x%04X\n", flags);
+			ctx->staging.flags = cpu_to_le32(flags);
+			il3945_commit_rxon(il, ctx);
+		}
+	}
+	mutex_unlock(&il->mutex);
+
+	return count;
+}
+
+static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, il3945_show_flags,
+		   il3945_store_flags);
+
+static ssize_t
+il3945_show_filter_flags(struct device *d, struct device_attribute *attr,
+			 char *buf)
+{
+	struct il_priv *il = dev_get_drvdata(d);
+	struct il_rxon_context *ctx = &il->ctx;
+
+	return sprintf(buf, "0x%04X\n", le32_to_cpu(ctx->active.filter_flags));
+}
+
+static ssize_t
+il3945_store_filter_flags(struct device *d, struct device_attribute *attr,
+			  const char *buf, size_t count)
+{
+	struct il_priv *il = dev_get_drvdata(d);
+	struct il_rxon_context *ctx = &il->ctx;
+	u32 filter_flags = simple_strtoul(buf, NULL, 0);
+
+	mutex_lock(&il->mutex);
+	if (le32_to_cpu(ctx->staging.filter_flags) != filter_flags) {
+		/* Cancel any currently running scans... */
+		if (il_scan_cancel_timeout(il, 100))
+			IL_WARN("Could not cancel scan.\n");
+		else {
+			D_INFO("Committing rxon.filter_flags = " "0x%04X\n",
+			       filter_flags);
+			ctx->staging.filter_flags = cpu_to_le32(filter_flags);
+			il3945_commit_rxon(il, ctx);
+		}
+	}
+	mutex_unlock(&il->mutex);
+
+	return count;
+}
+
+static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, il3945_show_filter_flags,
+		   il3945_store_filter_flags);
+
+static ssize_t
+il3945_show_measurement(struct device *d, struct device_attribute *attr,
+			char *buf)
+{
+	struct il_priv *il = dev_get_drvdata(d);
+	struct il_spectrum_notification measure_report;
+	u32 size = sizeof(measure_report), len = 0, ofs = 0;
+	u8 *data = (u8 *) &measure_report;
+	unsigned long flags;
+
+	spin_lock_irqsave(&il->lock, flags);
+	if (!(il->measurement_status & MEASUREMENT_READY)) {
+		spin_unlock_irqrestore(&il->lock, flags);
+		return 0;
+	}
+	memcpy(&measure_report, &il->measure_report, size);
+	il->measurement_status = 0;
+	spin_unlock_irqrestore(&il->lock, flags);
+
+	while (size && PAGE_SIZE - len) {
+		hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
+				   PAGE_SIZE - len, 1);
+		len = strlen(buf);
+		if (PAGE_SIZE - len)
+			buf[len++] = '\n';
+
+		ofs += 16;
+		size -= min(size, 16U);
+	}
+
+	return len;
+}
+
+static ssize_t
+il3945_store_measurement(struct device *d, struct device_attribute *attr,
+			 const char *buf, size_t count)
+{
+	struct il_priv *il = dev_get_drvdata(d);
+	struct il_rxon_context *ctx = &il->ctx;
+	struct ieee80211_measurement_params params = {
+		.channel = le16_to_cpu(ctx->active.channel),
+		.start_time = cpu_to_le64(il->_3945.last_tsf),
+		.duration = cpu_to_le16(1),
+	};
+	u8 type = IL_MEASURE_BASIC;
+	u8 buffer[32];
+	u8 channel;
+
+	if (count) {
+		char *p = buffer;
+		strncpy(buffer, buf, min(sizeof(buffer), count));
+		channel = simple_strtoul(p, NULL, 0);
+		if (channel)
+			params.channel = channel;
+
+		p = buffer;
+		while (*p && *p != ' ')
+			p++;
+		if (*p)
+			type = simple_strtoul(p + 1, NULL, 0);
+	}
+
+	D_INFO("Invoking measurement of type %d on " "channel %d (for '%s')\n",
+	       type, params.channel, buf);
+	il3945_get_measurement(il, &params, type);
+
+	return count;
+}
+
+static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR, il3945_show_measurement,
+		   il3945_store_measurement);
+
+static ssize_t
+il3945_store_retry_rate(struct device *d, struct device_attribute *attr,
+			const char *buf, size_t count)
+{
+	struct il_priv *il = dev_get_drvdata(d);
+
+	il->retry_rate = simple_strtoul(buf, NULL, 0);
+	if (il->retry_rate <= 0)
+		il->retry_rate = 1;
+
+	return count;
+}
+
+static ssize_t
+il3945_show_retry_rate(struct device *d, struct device_attribute *attr,
+		       char *buf)
+{
+	struct il_priv *il = dev_get_drvdata(d);
+	return sprintf(buf, "%d", il->retry_rate);
+}
+
+static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, il3945_show_retry_rate,
+		   il3945_store_retry_rate);
+
+static ssize_t
+il3945_show_channels(struct device *d, struct device_attribute *attr, char *buf)
+{
+	/* all this shit doesn't belong into sysfs anyway */
+	return 0;
+}
+
+static DEVICE_ATTR(channels, S_IRUSR, il3945_show_channels, NULL);
+
+static ssize_t
+il3945_show_antenna(struct device *d, struct device_attribute *attr, char *buf)
+{
+	struct il_priv *il = dev_get_drvdata(d);
+
+	if (!il_is_alive(il))
+		return -EAGAIN;
+
+	return sprintf(buf, "%d\n", il3945_mod_params.antenna);
+}
+
+static ssize_t
+il3945_store_antenna(struct device *d, struct device_attribute *attr,
+		     const char *buf, size_t count)
+{
+	struct il_priv *il __maybe_unused = dev_get_drvdata(d);
+	int ant;
+
+	if (count == 0)
+		return 0;
+
+	if (sscanf(buf, "%1i", &ant) != 1) {
+		D_INFO("not in hex or decimal form.\n");
+		return count;
+	}
+
+	if (ant >= 0 && ant <= 2) {
+		D_INFO("Setting antenna select to %d.\n", ant);
+		il3945_mod_params.antenna = (enum il3945_antenna)ant;
+	} else
+		D_INFO("Bad antenna select value %d.\n", ant);
+
+	return count;
+}
+
+static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, il3945_show_antenna,
+		   il3945_store_antenna);
+
+static ssize_t
+il3945_show_status(struct device *d, struct device_attribute *attr, char *buf)
+{
+	struct il_priv *il = dev_get_drvdata(d);
+	if (!il_is_alive(il))
+		return -EAGAIN;
+	return sprintf(buf, "0x%08x\n", (int)il->status);
+}
+
+static DEVICE_ATTR(status, S_IRUGO, il3945_show_status, NULL);
+
+static ssize_t
+il3945_dump_error_log(struct device *d, struct device_attribute *attr,
+		      const char *buf, size_t count)
+{
+	struct il_priv *il = dev_get_drvdata(d);
+	char *p = (char *)buf;
+
+	if (p[0] == '1')
+		il3945_dump_nic_error_log(il);
+
+	return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, il3945_dump_error_log);
+
+/*****************************************************************************
+ *
+ * driver setup and tear down
+ *
+ *****************************************************************************/
+
+static void
+il3945_setup_deferred_work(struct il_priv *il)
+{
+	il->workqueue = create_singlethread_workqueue(DRV_NAME);
+
+	init_waitqueue_head(&il->wait_command_queue);
+
+	INIT_WORK(&il->restart, il3945_bg_restart);
+	INIT_WORK(&il->rx_replenish, il3945_bg_rx_replenish);
+	INIT_DELAYED_WORK(&il->init_alive_start, il3945_bg_init_alive_start);
+	INIT_DELAYED_WORK(&il->alive_start, il3945_bg_alive_start);
+	INIT_DELAYED_WORK(&il->_3945.rfkill_poll, il3945_rfkill_poll);
+
+	il_setup_scan_deferred_work(il);
+
+	il3945_hw_setup_deferred_work(il);
+
+	init_timer(&il->watchdog);
+	il->watchdog.data = (unsigned long)il;
+	il->watchdog.function = il_bg_watchdog;
+
+	tasklet_init(&il->irq_tasklet,
+		     (void (*)(unsigned long))il3945_irq_tasklet,
+		     (unsigned long)il);
+}
+
+static void
+il3945_cancel_deferred_work(struct il_priv *il)
+{
+	il3945_hw_cancel_deferred_work(il);
+
+	cancel_delayed_work_sync(&il->init_alive_start);
+	cancel_delayed_work(&il->alive_start);
+
+	il_cancel_scan_deferred_work(il);
+}
+
+static struct attribute *il3945_sysfs_entries[] = {
+	&dev_attr_antenna.attr,
+	&dev_attr_channels.attr,
+	&dev_attr_dump_errors.attr,
+	&dev_attr_flags.attr,
+	&dev_attr_filter_flags.attr,
+	&dev_attr_measurement.attr,
+	&dev_attr_retry_rate.attr,
+	&dev_attr_status.attr,
+	&dev_attr_temperature.attr,
+	&dev_attr_tx_power.attr,
+#ifdef CONFIG_IWLEGACY_DEBUG
+	&dev_attr_debug_level.attr,
+#endif
+	NULL
+};
+
+static struct attribute_group il3945_attribute_group = {
+	.name = NULL,		/* put in device directory */
+	.attrs = il3945_sysfs_entries,
+};
+
+struct ieee80211_ops il3945_hw_ops = {
+	.tx = il3945_mac_tx,
+	.start = il3945_mac_start,
+	.stop = il3945_mac_stop,
+	.add_interface = il_mac_add_interface,
+	.remove_interface = il_mac_remove_interface,
+	.change_interface = il_mac_change_interface,
+	.config = il_mac_config,
+	.configure_filter = il3945_configure_filter,
+	.set_key = il3945_mac_set_key,
+	.conf_tx = il_mac_conf_tx,
+	.reset_tsf = il_mac_reset_tsf,
+	.bss_info_changed = il_mac_bss_info_changed,
+	.hw_scan = il_mac_hw_scan,
+	.sta_add = il3945_mac_sta_add,
+	.sta_remove = il_mac_sta_remove,
+	.tx_last_beacon = il_mac_tx_last_beacon,
+};
+
+static int
+il3945_init_drv(struct il_priv *il)
+{
+	int ret;
+	struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
+
+	il->retry_rate = 1;
+	il->beacon_skb = NULL;
+
+	spin_lock_init(&il->sta_lock);
+	spin_lock_init(&il->hcmd_lock);
+
+	INIT_LIST_HEAD(&il->free_frames);
+
+	mutex_init(&il->mutex);
+
+	il->ieee_channels = NULL;
+	il->ieee_rates = NULL;
+	il->band = IEEE80211_BAND_2GHZ;
+
+	il->iw_mode = NL80211_IFTYPE_STATION;
+	il->missed_beacon_threshold = IL_MISSED_BEACON_THRESHOLD_DEF;
+
+	/* initialize force reset */
+	il->force_reset.reset_duration = IL_DELAY_NEXT_FORCE_FW_RELOAD;
+
+	if (eeprom->version < EEPROM_3945_EEPROM_VERSION) {
+		IL_WARN("Unsupported EEPROM version: 0x%04X\n",
+			eeprom->version);
+		ret = -EINVAL;
+		goto err;
+	}
+	ret = il_init_channel_map(il);
+	if (ret) {
+		IL_ERR("initializing regulatory failed: %d\n", ret);
+		goto err;
+	}
+
+	/* Set up txpower settings in driver for all channels */
+	if (il3945_txpower_set_from_eeprom(il)) {
+		ret = -EIO;
+		goto err_free_channel_map;
+	}
+
+	ret = il_init_geos(il);
+	if (ret) {
+		IL_ERR("initializing geos failed: %d\n", ret);
+		goto err_free_channel_map;
+	}
+	il3945_init_hw_rates(il, il->ieee_rates);
+
+	return 0;
+
+err_free_channel_map:
+	il_free_channel_map(il);
+err:
+	return ret;
+}
+
+#define IL3945_MAX_PROBE_REQUEST	200
+
+static int
+il3945_setup_mac(struct il_priv *il)
+{
+	int ret;
+	struct ieee80211_hw *hw = il->hw;
+
+	hw->rate_control_algorithm = "iwl-3945-rs";
+	hw->sta_data_size = sizeof(struct il3945_sta_priv);
+	hw->vif_data_size = sizeof(struct il_vif_priv);
+
+	/* Tell mac80211 our characteristics */
+	hw->flags = IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_SPECTRUM_MGMT;
+
+	hw->wiphy->interface_modes = il->ctx.interface_modes;
+
+	hw->wiphy->flags |=
+	    WIPHY_FLAG_CUSTOM_REGULATORY | WIPHY_FLAG_DISABLE_BEACON_HINTS |
+	    WIPHY_FLAG_IBSS_RSN;
+
+	hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945;
+	/* we create the 802.11 header and a zero-length SSID element */
+	hw->wiphy->max_scan_ie_len = IL3945_MAX_PROBE_REQUEST - 24 - 2;
+
+	/* Default value; 4 EDCA QOS priorities */
+	hw->queues = 4;
+
+	if (il->bands[IEEE80211_BAND_2GHZ].n_channels)
+		il->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+		    &il->bands[IEEE80211_BAND_2GHZ];
+
+	if (il->bands[IEEE80211_BAND_5GHZ].n_channels)
+		il->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
+		    &il->bands[IEEE80211_BAND_5GHZ];
+
+	il_leds_init(il);
+
+	ret = ieee80211_register_hw(il->hw);
+	if (ret) {
+		IL_ERR("Failed to register hw (error %d)\n", ret);
+		return ret;
+	}
+	il->mac80211_registered = 1;
+
+	return 0;
+}
+
+static int
+il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	int err = 0;
+	struct il_priv *il;
+	struct ieee80211_hw *hw;
+	struct il_cfg *cfg = (struct il_cfg *)(ent->driver_data);
+	struct il3945_eeprom *eeprom;
+	unsigned long flags;
+
+	/***********************
+	 * 1. Allocating HW data
+	 * ********************/
+
+	/* mac80211 allocates memory for this device instance, including
+	 *   space for this driver's ilate structure */
+	hw = il_alloc_all(cfg);
+	if (hw == NULL) {
+		pr_err("Can not allocate network device\n");
+		err = -ENOMEM;
+		goto out;
+	}
+	il = hw->priv;
+	SET_IEEE80211_DEV(hw, &pdev->dev);
+
+	il->cmd_queue = IL39_CMD_QUEUE_NUM;
+
+	il->ctx.ctxid = 0;
+
+	il->ctx.rxon_cmd = C_RXON;
+	il->ctx.rxon_timing_cmd = C_RXON_TIMING;
+	il->ctx.rxon_assoc_cmd = C_RXON_ASSOC;
+	il->ctx.qos_cmd = C_QOS_PARAM;
+	il->ctx.ap_sta_id = IL_AP_ID;
+	il->ctx.wep_key_cmd = C_WEPKEY;
+	il->ctx.interface_modes =
+	    BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC);
+	il->ctx.ibss_devtype = RXON_DEV_TYPE_IBSS;
+	il->ctx.station_devtype = RXON_DEV_TYPE_ESS;
+	il->ctx.unused_devtype = RXON_DEV_TYPE_ESS;
+
+	/*
+	 * Disabling hardware scan means that mac80211 will perform scans
+	 * "the hard way", rather than using device's scan.
+	 */
+	if (il3945_mod_params.disable_hw_scan) {
+		D_INFO("Disabling hw_scan\n");
+		il3945_hw_ops.hw_scan = NULL;
+	}
+
+	D_INFO("*** LOAD DRIVER ***\n");
+	il->cfg = cfg;
+	il->pci_dev = pdev;
+	il->inta_mask = CSR_INI_SET_MASK;
+
+	if (il_alloc_traffic_mem(il))
+		IL_ERR("Not enough memory to generate traffic log\n");
+
+	/***************************
+	 * 2. Initializing PCI bus
+	 * *************************/
+	pci_disable_link_state(pdev,
+			       PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
+			       PCIE_LINK_STATE_CLKPM);
+
+	if (pci_enable_device(pdev)) {
+		err = -ENODEV;
+		goto out_ieee80211_free_hw;
+	}
+
+	pci_set_master(pdev);
+
+	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+	if (!err)
+		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+	if (err) {
+		IL_WARN("No suitable DMA available.\n");
+		goto out_pci_disable_device;
+	}
+
+	pci_set_drvdata(pdev, il);
+	err = pci_request_regions(pdev, DRV_NAME);
+	if (err)
+		goto out_pci_disable_device;
+
+	/***********************
+	 * 3. Read REV Register
+	 * ********************/
+	il->hw_base = pci_iomap(pdev, 0, 0);
+	if (!il->hw_base) {
+		err = -ENODEV;
+		goto out_pci_release_regions;
+	}
+
+	D_INFO("pci_resource_len = 0x%08llx\n",
+	       (unsigned long long)pci_resource_len(pdev, 0));
+	D_INFO("pci_resource_base = %p\n", il->hw_base);
+
+	/* We disable the RETRY_TIMEOUT register (0x41) to keep
+	 * PCI Tx retries from interfering with C3 CPU state */
+	pci_write_config_byte(pdev, 0x41, 0x00);
+
+	/* these spin locks will be used in apm_ops.init and EEPROM access
+	 * we should init now
+	 */
+	spin_lock_init(&il->reg_lock);
+	spin_lock_init(&il->lock);
+
+	/*
+	 * stop and reset the on-board processor just in case it is in a
+	 * strange state ... like being left stranded by a primary kernel
+	 * and this is now the kdump kernel trying to start up
+	 */
+	_il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
+
+	/***********************
+	 * 4. Read EEPROM
+	 * ********************/
+
+	/* Read the EEPROM */
+	err = il_eeprom_init(il);
+	if (err) {
+		IL_ERR("Unable to init EEPROM\n");
+		goto out_iounmap;
+	}
+	/* MAC Address location in EEPROM same for 3945/4965 */
+	eeprom = (struct il3945_eeprom *)il->eeprom;
+	D_INFO("MAC address: %pM\n", eeprom->mac_address);
+	SET_IEEE80211_PERM_ADDR(il->hw, eeprom->mac_address);
+
+	/***********************
+	 * 5. Setup HW Constants
+	 * ********************/
+	/* Device-specific setup */
+	if (il3945_hw_set_hw_params(il)) {
+		IL_ERR("failed to set hw settings\n");
+		goto out_eeprom_free;
+	}
+
+	/***********************
+	 * 6. Setup il
+	 * ********************/
+
+	err = il3945_init_drv(il);
+	if (err) {
+		IL_ERR("initializing driver failed\n");
+		goto out_unset_hw_params;
+	}
+
+	IL_INFO("Detected Intel Wireless WiFi Link %s\n", il->cfg->name);
+
+	/***********************
+	 * 7. Setup Services
+	 * ********************/
+
+	spin_lock_irqsave(&il->lock, flags);
+	il_disable_interrupts(il);
+	spin_unlock_irqrestore(&il->lock, flags);
+
+	pci_enable_msi(il->pci_dev);
+
+	err = request_irq(il->pci_dev->irq, il_isr, IRQF_SHARED, DRV_NAME, il);
+	if (err) {
+		IL_ERR("Error allocating IRQ %d\n", il->pci_dev->irq);
+		goto out_disable_msi;
+	}
+
+	err = sysfs_create_group(&pdev->dev.kobj, &il3945_attribute_group);
+	if (err) {
+		IL_ERR("failed to create sysfs device attributes\n");
+		goto out_release_irq;
+	}
+
+	il_set_rxon_channel(il, &il->bands[IEEE80211_BAND_2GHZ].channels[5],
+			    &il->ctx);
+	il3945_setup_deferred_work(il);
+	il3945_setup_handlers(il);
+	il_power_initialize(il);
+
+	/*********************************
+	 * 8. Setup and Register mac80211
+	 * *******************************/
+
+	il_enable_interrupts(il);
+
+	err = il3945_setup_mac(il);
+	if (err)
+		goto out_remove_sysfs;
+
+	err = il_dbgfs_register(il, DRV_NAME);
+	if (err)
+		IL_ERR("failed to create debugfs files. Ignoring error: %d\n",
+		       err);
+
+	/* Start monitoring the killswitch */
+	queue_delayed_work(il->workqueue, &il->_3945.rfkill_poll, 2 * HZ);
+
+	return 0;
+
+out_remove_sysfs:
+	destroy_workqueue(il->workqueue);
+	il->workqueue = NULL;
+	sysfs_remove_group(&pdev->dev.kobj, &il3945_attribute_group);
+out_release_irq:
+	free_irq(il->pci_dev->irq, il);
+out_disable_msi:
+	pci_disable_msi(il->pci_dev);
+	il_free_geos(il);
+	il_free_channel_map(il);
+out_unset_hw_params:
+	il3945_unset_hw_params(il);
+out_eeprom_free:
+	il_eeprom_free(il);
+out_iounmap:
+	pci_iounmap(pdev, il->hw_base);
+out_pci_release_regions:
+	pci_release_regions(pdev);
+out_pci_disable_device:
+	pci_set_drvdata(pdev, NULL);
+	pci_disable_device(pdev);
+out_ieee80211_free_hw:
+	il_free_traffic_mem(il);
+	ieee80211_free_hw(il->hw);
+out:
+	return err;
+}
+
+static void __devexit
+il3945_pci_remove(struct pci_dev *pdev)
+{
+	struct il_priv *il = pci_get_drvdata(pdev);
+	unsigned long flags;
+
+	if (!il)
+		return;
+
+	D_INFO("*** UNLOAD DRIVER ***\n");
+
+	il_dbgfs_unregister(il);
+
+	set_bit(S_EXIT_PENDING, &il->status);
+
+	il_leds_exit(il);
+
+	if (il->mac80211_registered) {
+		ieee80211_unregister_hw(il->hw);
+		il->mac80211_registered = 0;
+	} else {
+		il3945_down(il);
+	}
+
+	/*
+	 * Make sure device is reset to low power before unloading driver.
+	 * This may be redundant with il_down(), but there are paths to
+	 * run il_down() without calling apm_ops.stop(), and there are
+	 * paths to avoid running il_down() at all before leaving driver.
+	 * This (inexpensive) call *makes sure* device is reset.
+	 */
+	il_apm_stop(il);
+
+	/* make sure we flush any pending irq or
+	 * tasklet for the driver
+	 */
+	spin_lock_irqsave(&il->lock, flags);
+	il_disable_interrupts(il);
+	spin_unlock_irqrestore(&il->lock, flags);
+
+	il3945_synchronize_irq(il);
+
+	sysfs_remove_group(&pdev->dev.kobj, &il3945_attribute_group);
+
+	cancel_delayed_work_sync(&il->_3945.rfkill_poll);
+
+	il3945_dealloc_ucode_pci(il);
+
+	if (il->rxq.bd)
+		il3945_rx_queue_free(il, &il->rxq);
+	il3945_hw_txq_ctx_free(il);
+
+	il3945_unset_hw_params(il);
+
+	/*netif_stop_queue(dev); */
+	flush_workqueue(il->workqueue);
+
+	/* ieee80211_unregister_hw calls il3945_mac_stop, which flushes
+	 * il->workqueue... so we can't take down the workqueue
+	 * until now... */
+	destroy_workqueue(il->workqueue);
+	il->workqueue = NULL;
+	il_free_traffic_mem(il);
+
+	free_irq(pdev->irq, il);
+	pci_disable_msi(pdev);
+
+	pci_iounmap(pdev, il->hw_base);
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+	pci_set_drvdata(pdev, NULL);
+
+	il_free_channel_map(il);
+	il_free_geos(il);
+	kfree(il->scan_cmd);
+	if (il->beacon_skb)
+		dev_kfree_skb(il->beacon_skb);
+
+	ieee80211_free_hw(il->hw);
+}
+
+/*****************************************************************************
+ *
+ * driver and module entry point
+ *
+ *****************************************************************************/
+
+static struct pci_driver il3945_driver = {
+	.name = DRV_NAME,
+	.id_table = il3945_hw_card_ids,
+	.probe = il3945_pci_probe,
+	.remove = __devexit_p(il3945_pci_remove),
+	.driver.pm = IL_LEGACY_PM_OPS,
+};
+
+static int __init
+il3945_init(void)
+{
+
+	int ret;
+	pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
+	pr_info(DRV_COPYRIGHT "\n");
+
+	ret = il3945_rate_control_register();
+	if (ret) {
+		pr_err("Unable to register rate control algorithm: %d\n", ret);
+		return ret;
+	}
+
+	ret = pci_register_driver(&il3945_driver);
+	if (ret) {
+		pr_err("Unable to initialize PCI module\n");
+		goto error_register;
+	}
+
+	return ret;
+
+error_register:
+	il3945_rate_control_unregister();
+	return ret;
+}
+
+static void __exit
+il3945_exit(void)
+{
+	pci_unregister_driver(&il3945_driver);
+	il3945_rate_control_unregister();
+}
+
+MODULE_FIRMWARE(IL3945_MODULE_FIRMWARE(IL3945_UCODE_API_MAX));
+
+module_param_named(antenna, il3945_mod_params.antenna, int, S_IRUGO);
+MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
+module_param_named(swcrypto, il3945_mod_params.sw_crypto, int, S_IRUGO);
+MODULE_PARM_DESC(swcrypto, "using software crypto (default 1 [software])");
+module_param_named(disable_hw_scan, il3945_mod_params.disable_hw_scan, int,
+		   S_IRUGO);
+MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 1)");
+#ifdef CONFIG_IWLEGACY_DEBUG
+module_param_named(debug, il_debug_level, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "debug output mask");
+#endif
+module_param_named(fw_restart, il3945_mod_params.restart_fw, int, S_IRUGO);
+MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
+
+module_exit(il3945_exit);
+module_init(il3945_init);
diff --git a/drivers/net/wireless/iwlegacy/3945-rs.c b/drivers/net/wireless/iwlegacy/3945-rs.c
new file mode 100644
index 0000000..d7a83f2
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/3945-rs.c
@@ -0,0 +1,986 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <net/mac80211.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+
+#include <linux/workqueue.h>
+
+#include "commands.h"
+#include "3945.h"
+
+#define RS_NAME "iwl-3945-rs"
+
+static s32 il3945_expected_tpt_g[RATE_COUNT_3945] = {
+	7, 13, 35, 58, 0, 0, 76, 104, 130, 168, 191, 202
+};
+
+static s32 il3945_expected_tpt_g_prot[RATE_COUNT_3945] = {
+	7, 13, 35, 58, 0, 0, 0, 80, 93, 113, 123, 125
+};
+
+static s32 il3945_expected_tpt_a[RATE_COUNT_3945] = {
+	0, 0, 0, 0, 40, 57, 72, 98, 121, 154, 177, 186
+};
+
+static s32 il3945_expected_tpt_b[RATE_COUNT_3945] = {
+	7, 13, 35, 58, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+struct il3945_tpt_entry {
+	s8 min_rssi;
+	u8 idx;
+};
+
+static struct il3945_tpt_entry il3945_tpt_table_a[] = {
+	{-60, RATE_54M_IDX},
+	{-64, RATE_48M_IDX},
+	{-72, RATE_36M_IDX},
+	{-80, RATE_24M_IDX},
+	{-84, RATE_18M_IDX},
+	{-85, RATE_12M_IDX},
+	{-87, RATE_9M_IDX},
+	{-89, RATE_6M_IDX}
+};
+
+static struct il3945_tpt_entry il3945_tpt_table_g[] = {
+	{-60, RATE_54M_IDX},
+	{-64, RATE_48M_IDX},
+	{-68, RATE_36M_IDX},
+	{-80, RATE_24M_IDX},
+	{-84, RATE_18M_IDX},
+	{-85, RATE_12M_IDX},
+	{-86, RATE_11M_IDX},
+	{-88, RATE_5M_IDX},
+	{-90, RATE_2M_IDX},
+	{-92, RATE_1M_IDX}
+};
+
+#define RATE_MAX_WINDOW		62
+#define RATE_FLUSH		(3*HZ)
+#define RATE_WIN_FLUSH		(HZ/2)
+#define IL39_RATE_HIGH_TH	11520
+#define IL_SUCCESS_UP_TH	8960
+#define IL_SUCCESS_DOWN_TH	10880
+#define RATE_MIN_FAILURE_TH	6
+#define RATE_MIN_SUCCESS_TH	8
+#define RATE_DECREASE_TH	1920
+#define RATE_RETRY_TH		15
+
+static u8
+il3945_get_rate_idx_by_rssi(s32 rssi, enum ieee80211_band band)
+{
+	u32 idx = 0;
+	u32 table_size = 0;
+	struct il3945_tpt_entry *tpt_table = NULL;
+
+	if (rssi < IL_MIN_RSSI_VAL || rssi > IL_MAX_RSSI_VAL)
+		rssi = IL_MIN_RSSI_VAL;
+
+	switch (band) {
+	case IEEE80211_BAND_2GHZ:
+		tpt_table = il3945_tpt_table_g;
+		table_size = ARRAY_SIZE(il3945_tpt_table_g);
+		break;
+	case IEEE80211_BAND_5GHZ:
+		tpt_table = il3945_tpt_table_a;
+		table_size = ARRAY_SIZE(il3945_tpt_table_a);
+		break;
+	default:
+		BUG();
+		break;
+	}
+
+	while (idx < table_size && rssi < tpt_table[idx].min_rssi)
+		idx++;
+
+	idx = min(idx, table_size - 1);
+
+	return tpt_table[idx].idx;
+}
+
+static void
+il3945_clear_win(struct il3945_rate_scale_data *win)
+{
+	win->data = 0;
+	win->success_counter = 0;
+	win->success_ratio = -1;
+	win->counter = 0;
+	win->average_tpt = IL_INVALID_VALUE;
+	win->stamp = 0;
+}
+
+/**
+ * il3945_rate_scale_flush_wins - flush out the rate scale wins
+ *
+ * Returns the number of wins that have gathered data but were
+ * not flushed.  If there were any that were not flushed, then
+ * reschedule the rate flushing routine.
+ */
+static int
+il3945_rate_scale_flush_wins(struct il3945_rs_sta *rs_sta)
+{
+	int unflushed = 0;
+	int i;
+	unsigned long flags;
+	struct il_priv *il __maybe_unused = rs_sta->il;
+
+	/*
+	 * For each rate, if we have collected data on that rate
+	 * and it has been more than RATE_WIN_FLUSH
+	 * since we flushed, clear out the gathered stats
+	 */
+	for (i = 0; i < RATE_COUNT_3945; i++) {
+		if (!rs_sta->win[i].counter)
+			continue;
+
+		spin_lock_irqsave(&rs_sta->lock, flags);
+		if (time_after(jiffies, rs_sta->win[i].stamp + RATE_WIN_FLUSH)) {
+			D_RATE("flushing %d samples of rate " "idx %d\n",
+			       rs_sta->win[i].counter, i);
+			il3945_clear_win(&rs_sta->win[i]);
+		} else
+			unflushed++;
+		spin_unlock_irqrestore(&rs_sta->lock, flags);
+	}
+
+	return unflushed;
+}
+
+#define RATE_FLUSH_MAX              5000	/* msec */
+#define RATE_FLUSH_MIN              50	/* msec */
+#define IL_AVERAGE_PACKETS             1500
+
+static void
+il3945_bg_rate_scale_flush(unsigned long data)
+{
+	struct il3945_rs_sta *rs_sta = (void *)data;
+	struct il_priv *il __maybe_unused = rs_sta->il;
+	int unflushed = 0;
+	unsigned long flags;
+	u32 packet_count, duration, pps;
+
+	D_RATE("enter\n");
+
+	unflushed = il3945_rate_scale_flush_wins(rs_sta);
+
+	spin_lock_irqsave(&rs_sta->lock, flags);
+
+	/* Number of packets Rx'd since last time this timer ran */
+	packet_count = (rs_sta->tx_packets - rs_sta->last_tx_packets) + 1;
+
+	rs_sta->last_tx_packets = rs_sta->tx_packets + 1;
+
+	if (unflushed) {
+		duration =
+		    jiffies_to_msecs(jiffies - rs_sta->last_partial_flush);
+
+		D_RATE("Tx'd %d packets in %dms\n", packet_count, duration);
+
+		/* Determine packets per second */
+		if (duration)
+			pps = (packet_count * 1000) / duration;
+		else
+			pps = 0;
+
+		if (pps) {
+			duration = (IL_AVERAGE_PACKETS * 1000) / pps;
+			if (duration < RATE_FLUSH_MIN)
+				duration = RATE_FLUSH_MIN;
+			else if (duration > RATE_FLUSH_MAX)
+				duration = RATE_FLUSH_MAX;
+		} else
+			duration = RATE_FLUSH_MAX;
+
+		rs_sta->flush_time = msecs_to_jiffies(duration);
+
+		D_RATE("new flush period: %d msec ave %d\n", duration,
+		       packet_count);
+
+		mod_timer(&rs_sta->rate_scale_flush,
+			  jiffies + rs_sta->flush_time);
+
+		rs_sta->last_partial_flush = jiffies;
+	} else {
+		rs_sta->flush_time = RATE_FLUSH;
+		rs_sta->flush_pending = 0;
+	}
+	/* If there weren't any unflushed entries, we don't schedule the timer
+	 * to run again */
+
+	rs_sta->last_flush = jiffies;
+
+	spin_unlock_irqrestore(&rs_sta->lock, flags);
+
+	D_RATE("leave\n");
+}
+
+/**
+ * il3945_collect_tx_data - Update the success/failure sliding win
+ *
+ * We keep a sliding win of the last 64 packets transmitted
+ * at this rate.  win->data contains the bitmask of successful
+ * packets.
+ */
+static void
+il3945_collect_tx_data(struct il3945_rs_sta *rs_sta,
+		       struct il3945_rate_scale_data *win, int success,
+		       int retries, int idx)
+{
+	unsigned long flags;
+	s32 fail_count;
+	struct il_priv *il __maybe_unused = rs_sta->il;
+
+	if (!retries) {
+		D_RATE("leave: retries == 0 -- should be at least 1\n");
+		return;
+	}
+
+	spin_lock_irqsave(&rs_sta->lock, flags);
+
+	/*
+	 * Keep track of only the latest 62 tx frame attempts in this rate's
+	 * history win; anything older isn't really relevant any more.
+	 * If we have filled up the sliding win, drop the oldest attempt;
+	 * if the oldest attempt (highest bit in bitmap) shows "success",
+	 * subtract "1" from the success counter (this is the main reason
+	 * we keep these bitmaps!).
+	 * */
+	while (retries > 0) {
+		if (win->counter >= RATE_MAX_WINDOW) {
+
+			/* remove earliest */
+			win->counter = RATE_MAX_WINDOW - 1;
+
+			if (win->data & (1ULL << (RATE_MAX_WINDOW - 1))) {
+				win->data &= ~(1ULL << (RATE_MAX_WINDOW - 1));
+				win->success_counter--;
+			}
+		}
+
+		/* Increment frames-attempted counter */
+		win->counter++;
+
+		/* Shift bitmap by one frame (throw away oldest history),
+		 * OR in "1", and increment "success" if this
+		 * frame was successful. */
+		win->data <<= 1;
+		if (success > 0) {
+			win->success_counter++;
+			win->data |= 0x1;
+			success--;
+		}
+
+		retries--;
+	}
+
+	/* Calculate current success ratio, avoid divide-by-0! */
+	if (win->counter > 0)
+		win->success_ratio =
+		    128 * (100 * win->success_counter) / win->counter;
+	else
+		win->success_ratio = IL_INVALID_VALUE;
+
+	fail_count = win->counter - win->success_counter;
+
+	/* Calculate average throughput, if we have enough history. */
+	if (fail_count >= RATE_MIN_FAILURE_TH ||
+	    win->success_counter >= RATE_MIN_SUCCESS_TH)
+		win->average_tpt =
+		    ((win->success_ratio * rs_sta->expected_tpt[idx] +
+		      64) / 128);
+	else
+		win->average_tpt = IL_INVALID_VALUE;
+
+	/* Tag this win as having been updated */
+	win->stamp = jiffies;
+
+	spin_unlock_irqrestore(&rs_sta->lock, flags);
+}
+
+/*
+ * Called after adding a new station to initialize rate scaling
+ */
+void
+il3945_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta, u8 sta_id)
+{
+	struct ieee80211_hw *hw = il->hw;
+	struct ieee80211_conf *conf = &il->hw->conf;
+	struct il3945_sta_priv *psta;
+	struct il3945_rs_sta *rs_sta;
+	struct ieee80211_supported_band *sband;
+	int i;
+
+	D_INFO("enter\n");
+	if (sta_id == il->ctx.bcast_sta_id)
+		goto out;
+
+	psta = (struct il3945_sta_priv *)sta->drv_priv;
+	rs_sta = &psta->rs_sta;
+	sband = hw->wiphy->bands[conf->channel->band];
+
+	rs_sta->il = il;
+
+	rs_sta->start_rate = RATE_INVALID;
+
+	/* default to just 802.11b */
+	rs_sta->expected_tpt = il3945_expected_tpt_b;
+
+	rs_sta->last_partial_flush = jiffies;
+	rs_sta->last_flush = jiffies;
+	rs_sta->flush_time = RATE_FLUSH;
+	rs_sta->last_tx_packets = 0;
+
+	rs_sta->rate_scale_flush.data = (unsigned long)rs_sta;
+	rs_sta->rate_scale_flush.function = il3945_bg_rate_scale_flush;
+
+	for (i = 0; i < RATE_COUNT_3945; i++)
+		il3945_clear_win(&rs_sta->win[i]);
+
+	/* TODO: what is a good starting rate for STA? About middle? Maybe not
+	 * the lowest or the highest rate.. Could consider using RSSI from
+	 * previous packets? Need to have IEEE 802.1X auth succeed immediately
+	 * after assoc.. */
+
+	for (i = sband->n_bitrates - 1; i >= 0; i--) {
+		if (sta->supp_rates[sband->band] & (1 << i)) {
+			rs_sta->last_txrate_idx = i;
+			break;
+		}
+	}
+
+	il->_3945.sta_supp_rates = sta->supp_rates[sband->band];
+	/* For 5 GHz band it start at IL_FIRST_OFDM_RATE */
+	if (sband->band == IEEE80211_BAND_5GHZ) {
+		rs_sta->last_txrate_idx += IL_FIRST_OFDM_RATE;
+		il->_3945.sta_supp_rates <<= IL_FIRST_OFDM_RATE;
+	}
+
+out:
+	il->stations[sta_id].used &= ~IL_STA_UCODE_INPROGRESS;
+
+	D_INFO("leave\n");
+}
+
+static void *
+il3945_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
+{
+	return hw->priv;
+}
+
+/* rate scale requires free function to be implemented */
+static void
+il3945_rs_free(void *il)
+{
+}
+
+static void *
+il3945_rs_alloc_sta(void *il_priv, struct ieee80211_sta *sta, gfp_t gfp)
+{
+	struct il3945_rs_sta *rs_sta;
+	struct il3945_sta_priv *psta = (void *)sta->drv_priv;
+	struct il_priv *il __maybe_unused = il_priv;
+
+	D_RATE("enter\n");
+
+	rs_sta = &psta->rs_sta;
+
+	spin_lock_init(&rs_sta->lock);
+	init_timer(&rs_sta->rate_scale_flush);
+
+	D_RATE("leave\n");
+
+	return rs_sta;
+}
+
+static void
+il3945_rs_free_sta(void *il_priv, struct ieee80211_sta *sta, void *il_sta)
+{
+	struct il3945_rs_sta *rs_sta = il_sta;
+
+	/*
+	 * Be careful not to use any members of il3945_rs_sta (like trying
+	 * to use il_priv to print out debugging) since it may not be fully
+	 * initialized at this point.
+	 */
+	del_timer_sync(&rs_sta->rate_scale_flush);
+}
+
+/**
+ * il3945_rs_tx_status - Update rate control values based on Tx results
+ *
+ * NOTE: Uses il_priv->retry_rate for the # of retries attempted by
+ * the hardware for each rate.
+ */
+static void
+il3945_rs_tx_status(void *il_rate, struct ieee80211_supported_band *sband,
+		    struct ieee80211_sta *sta, void *il_sta,
+		    struct sk_buff *skb)
+{
+	s8 retries = 0, current_count;
+	int scale_rate_idx, first_idx, last_idx;
+	unsigned long flags;
+	struct il_priv *il = (struct il_priv *)il_rate;
+	struct il3945_rs_sta *rs_sta = il_sta;
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+	D_RATE("enter\n");
+
+	retries = info->status.rates[0].count;
+	/* Sanity Check for retries */
+	if (retries > RATE_RETRY_TH)
+		retries = RATE_RETRY_TH;
+
+	first_idx = sband->bitrates[info->status.rates[0].idx].hw_value;
+	if (first_idx < 0 || first_idx >= RATE_COUNT_3945) {
+		D_RATE("leave: Rate out of bounds: %d\n", first_idx);
+		return;
+	}
+
+	if (!il_sta) {
+		D_RATE("leave: No STA il data to update!\n");
+		return;
+	}
+
+	/* Treat uninitialized rate scaling data same as non-existing. */
+	if (!rs_sta->il) {
+		D_RATE("leave: STA il data uninitialized!\n");
+		return;
+	}
+
+	rs_sta->tx_packets++;
+
+	scale_rate_idx = first_idx;
+	last_idx = first_idx;
+
+	/*
+	 * Update the win for each rate.  We determine which rates
+	 * were Tx'd based on the total number of retries vs. the number
+	 * of retries configured for each rate -- currently set to the
+	 * il value 'retry_rate' vs. rate specific
+	 *
+	 * On exit from this while loop last_idx indicates the rate
+	 * at which the frame was finally transmitted (or failed if no
+	 * ACK)
+	 */
+	while (retries > 1) {
+		if ((retries - 1) < il->retry_rate) {
+			current_count = (retries - 1);
+			last_idx = scale_rate_idx;
+		} else {
+			current_count = il->retry_rate;
+			last_idx = il3945_rs_next_rate(il, scale_rate_idx);
+		}
+
+		/* Update this rate accounting for as many retries
+		 * as was used for it (per current_count) */
+		il3945_collect_tx_data(rs_sta, &rs_sta->win[scale_rate_idx], 0,
+				       current_count, scale_rate_idx);
+		D_RATE("Update rate %d for %d retries.\n", scale_rate_idx,
+		       current_count);
+
+		retries -= current_count;
+
+		scale_rate_idx = last_idx;
+	}
+
+	/* Update the last idx win with success/failure based on ACK */
+	D_RATE("Update rate %d with %s.\n", last_idx,
+	       (info->flags & IEEE80211_TX_STAT_ACK) ? "success" : "failure");
+	il3945_collect_tx_data(rs_sta, &rs_sta->win[last_idx],
+			       info->flags & IEEE80211_TX_STAT_ACK, 1,
+			       last_idx);
+
+	/* We updated the rate scale win -- if its been more than
+	 * flush_time since the last run, schedule the flush
+	 * again */
+	spin_lock_irqsave(&rs_sta->lock, flags);
+
+	if (!rs_sta->flush_pending &&
+	    time_after(jiffies, rs_sta->last_flush + rs_sta->flush_time)) {
+
+		rs_sta->last_partial_flush = jiffies;
+		rs_sta->flush_pending = 1;
+		mod_timer(&rs_sta->rate_scale_flush,
+			  jiffies + rs_sta->flush_time);
+	}
+
+	spin_unlock_irqrestore(&rs_sta->lock, flags);
+
+	D_RATE("leave\n");
+}
+
+static u16
+il3945_get_adjacent_rate(struct il3945_rs_sta *rs_sta, u8 idx, u16 rate_mask,
+			 enum ieee80211_band band)
+{
+	u8 high = RATE_INVALID;
+	u8 low = RATE_INVALID;
+	struct il_priv *il __maybe_unused = rs_sta->il;
+
+	/* 802.11A walks to the next literal adjacent rate in
+	 * the rate table */
+	if (unlikely(band == IEEE80211_BAND_5GHZ)) {
+		int i;
+		u32 mask;
+
+		/* Find the previous rate that is in the rate mask */
+		i = idx - 1;
+		for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
+			if (rate_mask & mask) {
+				low = i;
+				break;
+			}
+		}
+
+		/* Find the next rate that is in the rate mask */
+		i = idx + 1;
+		for (mask = (1 << i); i < RATE_COUNT_3945; i++, mask <<= 1) {
+			if (rate_mask & mask) {
+				high = i;
+				break;
+			}
+		}
+
+		return (high << 8) | low;
+	}
+
+	low = idx;
+	while (low != RATE_INVALID) {
+		if (rs_sta->tgg)
+			low = il3945_rates[low].prev_rs_tgg;
+		else
+			low = il3945_rates[low].prev_rs;
+		if (low == RATE_INVALID)
+			break;
+		if (rate_mask & (1 << low))
+			break;
+		D_RATE("Skipping masked lower rate: %d\n", low);
+	}
+
+	high = idx;
+	while (high != RATE_INVALID) {
+		if (rs_sta->tgg)
+			high = il3945_rates[high].next_rs_tgg;
+		else
+			high = il3945_rates[high].next_rs;
+		if (high == RATE_INVALID)
+			break;
+		if (rate_mask & (1 << high))
+			break;
+		D_RATE("Skipping masked higher rate: %d\n", high);
+	}
+
+	return (high << 8) | low;
+}
+
+/**
+ * il3945_rs_get_rate - find the rate for the requested packet
+ *
+ * Returns the ieee80211_rate structure allocated by the driver.
+ *
+ * The rate control algorithm has no internal mapping between hw_mode's
+ * rate ordering and the rate ordering used by the rate control algorithm.
+ *
+ * The rate control algorithm uses a single table of rates that goes across
+ * the entire A/B/G spectrum vs. being limited to just one particular
+ * hw_mode.
+ *
+ * As such, we can't convert the idx obtained below into the hw_mode's
+ * rate table and must reference the driver allocated rate table
+ *
+ */
+static void
+il3945_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta,
+		   struct ieee80211_tx_rate_control *txrc)
+{
+	struct ieee80211_supported_band *sband = txrc->sband;
+	struct sk_buff *skb = txrc->skb;
+	u8 low = RATE_INVALID;
+	u8 high = RATE_INVALID;
+	u16 high_low;
+	int idx;
+	struct il3945_rs_sta *rs_sta = il_sta;
+	struct il3945_rate_scale_data *win = NULL;
+	int current_tpt = IL_INVALID_VALUE;
+	int low_tpt = IL_INVALID_VALUE;
+	int high_tpt = IL_INVALID_VALUE;
+	u32 fail_count;
+	s8 scale_action = 0;
+	unsigned long flags;
+	u16 rate_mask;
+	s8 max_rate_idx = -1;
+	struct il_priv *il __maybe_unused = (struct il_priv *)il_r;
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+	D_RATE("enter\n");
+
+	/* Treat uninitialized rate scaling data same as non-existing. */
+	if (rs_sta && !rs_sta->il) {
+		D_RATE("Rate scaling information not initialized yet.\n");
+		il_sta = NULL;
+	}
+
+	if (rate_control_send_low(sta, il_sta, txrc))
+		return;
+
+	rate_mask = sta->supp_rates[sband->band];
+
+	/* get user max rate if set */
+	max_rate_idx = txrc->max_rate_idx;
+	if (sband->band == IEEE80211_BAND_5GHZ && max_rate_idx != -1)
+		max_rate_idx += IL_FIRST_OFDM_RATE;
+	if (max_rate_idx < 0 || max_rate_idx >= RATE_COUNT)
+		max_rate_idx = -1;
+
+	idx = min(rs_sta->last_txrate_idx & 0xffff, RATE_COUNT_3945 - 1);
+
+	if (sband->band == IEEE80211_BAND_5GHZ)
+		rate_mask = rate_mask << IL_FIRST_OFDM_RATE;
+
+	spin_lock_irqsave(&rs_sta->lock, flags);
+
+	/* for recent assoc, choose best rate regarding
+	 * to rssi value
+	 */
+	if (rs_sta->start_rate != RATE_INVALID) {
+		if (rs_sta->start_rate < idx &&
+		    (rate_mask & (1 << rs_sta->start_rate)))
+			idx = rs_sta->start_rate;
+		rs_sta->start_rate = RATE_INVALID;
+	}
+
+	/* force user max rate if set by user */
+	if (max_rate_idx != -1 && max_rate_idx < idx) {
+		if (rate_mask & (1 << max_rate_idx))
+			idx = max_rate_idx;
+	}
+
+	win = &(rs_sta->win[idx]);
+
+	fail_count = win->counter - win->success_counter;
+
+	if (fail_count < RATE_MIN_FAILURE_TH &&
+	    win->success_counter < RATE_MIN_SUCCESS_TH) {
+		spin_unlock_irqrestore(&rs_sta->lock, flags);
+
+		D_RATE("Invalid average_tpt on rate %d: "
+		       "counter: %d, success_counter: %d, "
+		       "expected_tpt is %sNULL\n", idx, win->counter,
+		       win->success_counter,
+		       rs_sta->expected_tpt ? "not " : "");
+
+		/* Can't calculate this yet; not enough history */
+		win->average_tpt = IL_INVALID_VALUE;
+		goto out;
+
+	}
+
+	current_tpt = win->average_tpt;
+
+	high_low =
+	    il3945_get_adjacent_rate(rs_sta, idx, rate_mask, sband->band);
+	low = high_low & 0xff;
+	high = (high_low >> 8) & 0xff;
+
+	/* If user set max rate, dont allow higher than user constrain */
+	if (max_rate_idx != -1 && max_rate_idx < high)
+		high = RATE_INVALID;
+
+	/* Collect Measured throughputs of adjacent rates */
+	if (low != RATE_INVALID)
+		low_tpt = rs_sta->win[low].average_tpt;
+
+	if (high != RATE_INVALID)
+		high_tpt = rs_sta->win[high].average_tpt;
+
+	spin_unlock_irqrestore(&rs_sta->lock, flags);
+
+	scale_action = 0;
+
+	/* Low success ratio , need to drop the rate */
+	if (win->success_ratio < RATE_DECREASE_TH || !current_tpt) {
+		D_RATE("decrease rate because of low success_ratio\n");
+		scale_action = -1;
+		/* No throughput measured yet for adjacent rates,
+		 * try increase */
+	} else if (low_tpt == IL_INVALID_VALUE && high_tpt == IL_INVALID_VALUE) {
+
+		if (high != RATE_INVALID &&
+		    win->success_ratio >= RATE_INCREASE_TH)
+			scale_action = 1;
+		else if (low != RATE_INVALID)
+			scale_action = 0;
+
+		/* Both adjacent throughputs are measured, but neither one has
+		 * better throughput; we're using the best rate, don't change
+		 * it! */
+	} else if (low_tpt != IL_INVALID_VALUE && high_tpt != IL_INVALID_VALUE
+		   && low_tpt < current_tpt && high_tpt < current_tpt) {
+
+		D_RATE("No action -- low [%d] & high [%d] < "
+		       "current_tpt [%d]\n", low_tpt, high_tpt, current_tpt);
+		scale_action = 0;
+
+		/* At least one of the rates has better throughput */
+	} else {
+		if (high_tpt != IL_INVALID_VALUE) {
+
+			/* High rate has better throughput, Increase
+			 * rate */
+			if (high_tpt > current_tpt &&
+			    win->success_ratio >= RATE_INCREASE_TH)
+				scale_action = 1;
+			else {
+				D_RATE("decrease rate because of high tpt\n");
+				scale_action = 0;
+			}
+		} else if (low_tpt != IL_INVALID_VALUE) {
+			if (low_tpt > current_tpt) {
+				D_RATE("decrease rate because of low tpt\n");
+				scale_action = -1;
+			} else if (win->success_ratio >= RATE_INCREASE_TH) {
+				/* Lower rate has better
+				 * throughput,decrease rate */
+				scale_action = 1;
+			}
+		}
+	}
+
+	/* Sanity check; asked for decrease, but success rate or throughput
+	 * has been good at old rate.  Don't change it. */
+	if (scale_action == -1 && low != RATE_INVALID &&
+	    (win->success_ratio > RATE_HIGH_TH ||
+	     current_tpt > 100 * rs_sta->expected_tpt[low]))
+		scale_action = 0;
+
+	switch (scale_action) {
+	case -1:
+		/* Decrese rate */
+		if (low != RATE_INVALID)
+			idx = low;
+		break;
+	case 1:
+		/* Increase rate */
+		if (high != RATE_INVALID)
+			idx = high;
+
+		break;
+	case 0:
+	default:
+		/* No change */
+		break;
+	}
+
+	D_RATE("Selected %d (action %d) - low %d high %d\n", idx, scale_action,
+	       low, high);
+
+out:
+
+	if (sband->band == IEEE80211_BAND_5GHZ) {
+		if (WARN_ON_ONCE(idx < IL_FIRST_OFDM_RATE))
+			idx = IL_FIRST_OFDM_RATE;
+		rs_sta->last_txrate_idx = idx;
+		info->control.rates[0].idx = idx - IL_FIRST_OFDM_RATE;
+	} else {
+		rs_sta->last_txrate_idx = idx;
+		info->control.rates[0].idx = rs_sta->last_txrate_idx;
+	}
+
+	D_RATE("leave: %d\n", idx);
+}
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+static int
+il3945_open_file_generic(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t
+il3945_sta_dbgfs_stats_table_read(struct file *file, char __user *user_buf,
+				  size_t count, loff_t *ppos)
+{
+	char *buff;
+	int desc = 0;
+	int j;
+	ssize_t ret;
+	struct il3945_rs_sta *lq_sta = file->private_data;
+
+	buff = kmalloc(1024, GFP_KERNEL);
+	if (!buff)
+		return -ENOMEM;
+
+	desc +=
+	    sprintf(buff + desc,
+		    "tx packets=%d last rate idx=%d\n"
+		    "rate=0x%X flush time %d\n", lq_sta->tx_packets,
+		    lq_sta->last_txrate_idx, lq_sta->start_rate,
+		    jiffies_to_msecs(lq_sta->flush_time));
+	for (j = 0; j < RATE_COUNT_3945; j++) {
+		desc +=
+		    sprintf(buff + desc, "counter=%d success=%d %%=%d\n",
+			    lq_sta->win[j].counter,
+			    lq_sta->win[j].success_counter,
+			    lq_sta->win[j].success_ratio);
+	}
+	ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+	kfree(buff);
+	return ret;
+}
+
+static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
+	.read = il3945_sta_dbgfs_stats_table_read,
+	.open = il3945_open_file_generic,
+	.llseek = default_llseek,
+};
+
+static void
+il3945_add_debugfs(void *il, void *il_sta, struct dentry *dir)
+{
+	struct il3945_rs_sta *lq_sta = il_sta;
+
+	lq_sta->rs_sta_dbgfs_stats_table_file =
+	    debugfs_create_file("rate_stats_table", 0600, dir, lq_sta,
+				&rs_sta_dbgfs_stats_table_ops);
+
+}
+
+static void
+il3945_remove_debugfs(void *il, void *il_sta)
+{
+	struct il3945_rs_sta *lq_sta = il_sta;
+	debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
+}
+#endif
+
+/*
+ * Initialization of rate scaling information is done by driver after
+ * the station is added. Since mac80211 calls this function before a
+ * station is added we ignore it.
+ */
+static void
+il3945_rs_rate_init_stub(void *il_r, struct ieee80211_supported_band *sband,
+			 struct ieee80211_sta *sta, void *il_sta)
+{
+}
+
+static struct rate_control_ops rs_ops = {
+	.module = NULL,
+	.name = RS_NAME,
+	.tx_status = il3945_rs_tx_status,
+	.get_rate = il3945_rs_get_rate,
+	.rate_init = il3945_rs_rate_init_stub,
+	.alloc = il3945_rs_alloc,
+	.free = il3945_rs_free,
+	.alloc_sta = il3945_rs_alloc_sta,
+	.free_sta = il3945_rs_free_sta,
+#ifdef CONFIG_MAC80211_DEBUGFS
+	.add_sta_debugfs = il3945_add_debugfs,
+	.remove_sta_debugfs = il3945_remove_debugfs,
+#endif
+
+};
+
+void
+il3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
+{
+	struct il_priv *il = hw->priv;
+	s32 rssi = 0;
+	unsigned long flags;
+	struct il3945_rs_sta *rs_sta;
+	struct ieee80211_sta *sta;
+	struct il3945_sta_priv *psta;
+
+	D_RATE("enter\n");
+
+	rcu_read_lock();
+
+	sta =
+	    ieee80211_find_sta(il->ctx.vif, il->stations[sta_id].sta.sta.addr);
+	if (!sta) {
+		D_RATE("Unable to find station to initialize rate scaling.\n");
+		rcu_read_unlock();
+		return;
+	}
+
+	psta = (void *)sta->drv_priv;
+	rs_sta = &psta->rs_sta;
+
+	spin_lock_irqsave(&rs_sta->lock, flags);
+
+	rs_sta->tgg = 0;
+	switch (il->band) {
+	case IEEE80211_BAND_2GHZ:
+		/* TODO: this always does G, not a regression */
+		if (il->ctx.active.flags & RXON_FLG_TGG_PROTECT_MSK) {
+			rs_sta->tgg = 1;
+			rs_sta->expected_tpt = il3945_expected_tpt_g_prot;
+		} else
+			rs_sta->expected_tpt = il3945_expected_tpt_g;
+		break;
+	case IEEE80211_BAND_5GHZ:
+		rs_sta->expected_tpt = il3945_expected_tpt_a;
+		break;
+	case IEEE80211_NUM_BANDS:
+		BUG();
+		break;
+	}
+
+	spin_unlock_irqrestore(&rs_sta->lock, flags);
+
+	rssi = il->_3945.last_rx_rssi;
+	if (rssi == 0)
+		rssi = IL_MIN_RSSI_VAL;
+
+	D_RATE("Network RSSI: %d\n", rssi);
+
+	rs_sta->start_rate = il3945_get_rate_idx_by_rssi(rssi, il->band);
+
+	D_RATE("leave: rssi %d assign rate idx: " "%d (plcp 0x%x)\n", rssi,
+	       rs_sta->start_rate, il3945_rates[rs_sta->start_rate].plcp);
+	rcu_read_unlock();
+}
+
+int
+il3945_rate_control_register(void)
+{
+	return ieee80211_rate_control_register(&rs_ops);
+}
+
+void
+il3945_rate_control_unregister(void)
+{
+	ieee80211_rate_control_unregister(&rs_ops);
+}
diff --git a/drivers/net/wireless/iwlegacy/3945.c b/drivers/net/wireless/iwlegacy/3945.c
new file mode 100644
index 0000000..1489b15
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/3945.c
@@ -0,0 +1,2743 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/firmware.h>
+#include <linux/etherdevice.h>
+#include <asm/unaligned.h>
+#include <net/mac80211.h>
+
+#include "common.h"
+#include "3945.h"
+
+/* Send led command */
+static int
+il3945_send_led_cmd(struct il_priv *il, struct il_led_cmd *led_cmd)
+{
+	struct il_host_cmd cmd = {
+		.id = C_LEDS,
+		.len = sizeof(struct il_led_cmd),
+		.data = led_cmd,
+		.flags = CMD_ASYNC,
+		.callback = NULL,
+	};
+
+	return il_send_cmd(il, &cmd);
+}
+
+const struct il_led_ops il3945_led_ops = {
+	.cmd = il3945_send_led_cmd,
+};
+
+#define IL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np)    \
+	[RATE_##r##M_IDX] = { RATE_##r##M_PLCP,   \
+				    RATE_##r##M_IEEE,   \
+				    RATE_##ip##M_IDX, \
+				    RATE_##in##M_IDX, \
+				    RATE_##rp##M_IDX, \
+				    RATE_##rn##M_IDX, \
+				    RATE_##pp##M_IDX, \
+				    RATE_##np##M_IDX, \
+				    RATE_##r##M_IDX_TBL, \
+				    RATE_##ip##M_IDX_TBL }
+
+/*
+ * Parameter order:
+ *   rate, prev rate, next rate, prev tgg rate, next tgg rate
+ *
+ * If there isn't a valid next or previous rate then INV is used which
+ * maps to RATE_INVALID
+ *
+ */
+const struct il3945_rate_info il3945_rates[RATE_COUNT_3945] = {
+	IL_DECLARE_RATE_INFO(1, INV, 2, INV, 2, INV, 2),	/*  1mbps */
+	IL_DECLARE_RATE_INFO(2, 1, 5, 1, 5, 1, 5),	/*  2mbps */
+	IL_DECLARE_RATE_INFO(5, 2, 6, 2, 11, 2, 11),	/*5.5mbps */
+	IL_DECLARE_RATE_INFO(11, 9, 12, 5, 12, 5, 18),	/* 11mbps */
+	IL_DECLARE_RATE_INFO(6, 5, 9, 5, 11, 5, 11),	/*  6mbps */
+	IL_DECLARE_RATE_INFO(9, 6, 11, 5, 11, 5, 11),	/*  9mbps */
+	IL_DECLARE_RATE_INFO(12, 11, 18, 11, 18, 11, 18),	/* 12mbps */
+	IL_DECLARE_RATE_INFO(18, 12, 24, 12, 24, 11, 24),	/* 18mbps */
+	IL_DECLARE_RATE_INFO(24, 18, 36, 18, 36, 18, 36),	/* 24mbps */
+	IL_DECLARE_RATE_INFO(36, 24, 48, 24, 48, 24, 48),	/* 36mbps */
+	IL_DECLARE_RATE_INFO(48, 36, 54, 36, 54, 36, 54),	/* 48mbps */
+	IL_DECLARE_RATE_INFO(54, 48, INV, 48, INV, 48, INV),	/* 54mbps */
+};
+
+static inline u8
+il3945_get_prev_ieee_rate(u8 rate_idx)
+{
+	u8 rate = il3945_rates[rate_idx].prev_ieee;
+
+	if (rate == RATE_INVALID)
+		rate = rate_idx;
+	return rate;
+}
+
+/* 1 = enable the il3945_disable_events() function */
+#define IL_EVT_DISABLE (0)
+#define IL_EVT_DISABLE_SIZE (1532/32)
+
+/**
+ * il3945_disable_events - Disable selected events in uCode event log
+ *
+ * Disable an event by writing "1"s into "disable"
+ *   bitmap in SRAM.  Bit position corresponds to Event # (id/type).
+ *   Default values of 0 enable uCode events to be logged.
+ * Use for only special debugging.  This function is just a placeholder as-is,
+ *   you'll need to provide the special bits! ...
+ *   ... and set IL_EVT_DISABLE to 1. */
+void
+il3945_disable_events(struct il_priv *il)
+{
+	int i;
+	u32 base;		/* SRAM address of event log header */
+	u32 disable_ptr;	/* SRAM address of event-disable bitmap array */
+	u32 array_size;		/* # of u32 entries in array */
+	static const u32 evt_disable[IL_EVT_DISABLE_SIZE] = {
+		0x00000000,	/*   31 -    0  Event id numbers */
+		0x00000000,	/*   63 -   32 */
+		0x00000000,	/*   95 -   64 */
+		0x00000000,	/*  127 -   96 */
+		0x00000000,	/*  159 -  128 */
+		0x00000000,	/*  191 -  160 */
+		0x00000000,	/*  223 -  192 */
+		0x00000000,	/*  255 -  224 */
+		0x00000000,	/*  287 -  256 */
+		0x00000000,	/*  319 -  288 */
+		0x00000000,	/*  351 -  320 */
+		0x00000000,	/*  383 -  352 */
+		0x00000000,	/*  415 -  384 */
+		0x00000000,	/*  447 -  416 */
+		0x00000000,	/*  479 -  448 */
+		0x00000000,	/*  511 -  480 */
+		0x00000000,	/*  543 -  512 */
+		0x00000000,	/*  575 -  544 */
+		0x00000000,	/*  607 -  576 */
+		0x00000000,	/*  639 -  608 */
+		0x00000000,	/*  671 -  640 */
+		0x00000000,	/*  703 -  672 */
+		0x00000000,	/*  735 -  704 */
+		0x00000000,	/*  767 -  736 */
+		0x00000000,	/*  799 -  768 */
+		0x00000000,	/*  831 -  800 */
+		0x00000000,	/*  863 -  832 */
+		0x00000000,	/*  895 -  864 */
+		0x00000000,	/*  927 -  896 */
+		0x00000000,	/*  959 -  928 */
+		0x00000000,	/*  991 -  960 */
+		0x00000000,	/* 1023 -  992 */
+		0x00000000,	/* 1055 - 1024 */
+		0x00000000,	/* 1087 - 1056 */
+		0x00000000,	/* 1119 - 1088 */
+		0x00000000,	/* 1151 - 1120 */
+		0x00000000,	/* 1183 - 1152 */
+		0x00000000,	/* 1215 - 1184 */
+		0x00000000,	/* 1247 - 1216 */
+		0x00000000,	/* 1279 - 1248 */
+		0x00000000,	/* 1311 - 1280 */
+		0x00000000,	/* 1343 - 1312 */
+		0x00000000,	/* 1375 - 1344 */
+		0x00000000,	/* 1407 - 1376 */
+		0x00000000,	/* 1439 - 1408 */
+		0x00000000,	/* 1471 - 1440 */
+		0x00000000,	/* 1503 - 1472 */
+	};
+
+	base = le32_to_cpu(il->card_alive.log_event_table_ptr);
+	if (!il3945_hw_valid_rtc_data_addr(base)) {
+		IL_ERR("Invalid event log pointer 0x%08X\n", base);
+		return;
+	}
+
+	disable_ptr = il_read_targ_mem(il, base + (4 * sizeof(u32)));
+	array_size = il_read_targ_mem(il, base + (5 * sizeof(u32)));
+
+	if (IL_EVT_DISABLE && array_size == IL_EVT_DISABLE_SIZE) {
+		D_INFO("Disabling selected uCode log events at 0x%x\n",
+		       disable_ptr);
+		for (i = 0; i < IL_EVT_DISABLE_SIZE; i++)
+			il_write_targ_mem(il, disable_ptr + (i * sizeof(u32)),
+					  evt_disable[i]);
+
+	} else {
+		D_INFO("Selected uCode log events may be disabled\n");
+		D_INFO("  by writing \"1\"s into disable bitmap\n");
+		D_INFO("  in SRAM at 0x%x, size %d u32s\n", disable_ptr,
+		       array_size);
+	}
+
+}
+
+static int
+il3945_hwrate_to_plcp_idx(u8 plcp)
+{
+	int idx;
+
+	for (idx = 0; idx < RATE_COUNT_3945; idx++)
+		if (il3945_rates[idx].plcp == plcp)
+			return idx;
+	return -1;
+}
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+#define TX_STATUS_ENTRY(x) case TX_3945_STATUS_FAIL_ ## x: return #x
+
+static const char *
+il3945_get_tx_fail_reason(u32 status)
+{
+	switch (status & TX_STATUS_MSK) {
+	case TX_3945_STATUS_SUCCESS:
+		return "SUCCESS";
+		TX_STATUS_ENTRY(SHORT_LIMIT);
+		TX_STATUS_ENTRY(LONG_LIMIT);
+		TX_STATUS_ENTRY(FIFO_UNDERRUN);
+		TX_STATUS_ENTRY(MGMNT_ABORT);
+		TX_STATUS_ENTRY(NEXT_FRAG);
+		TX_STATUS_ENTRY(LIFE_EXPIRE);
+		TX_STATUS_ENTRY(DEST_PS);
+		TX_STATUS_ENTRY(ABORTED);
+		TX_STATUS_ENTRY(BT_RETRY);
+		TX_STATUS_ENTRY(STA_INVALID);
+		TX_STATUS_ENTRY(FRAG_DROPPED);
+		TX_STATUS_ENTRY(TID_DISABLE);
+		TX_STATUS_ENTRY(FRAME_FLUSHED);
+		TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
+		TX_STATUS_ENTRY(TX_LOCKED);
+		TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
+	}
+
+	return "UNKNOWN";
+}
+#else
+static inline const char *
+il3945_get_tx_fail_reason(u32 status)
+{
+	return "";
+}
+#endif
+
+/*
+ * get ieee prev rate from rate scale table.
+ * for A and B mode we need to overright prev
+ * value
+ */
+int
+il3945_rs_next_rate(struct il_priv *il, int rate)
+{
+	int next_rate = il3945_get_prev_ieee_rate(rate);
+
+	switch (il->band) {
+	case IEEE80211_BAND_5GHZ:
+		if (rate == RATE_12M_IDX)
+			next_rate = RATE_9M_IDX;
+		else if (rate == RATE_6M_IDX)
+			next_rate = RATE_6M_IDX;
+		break;
+	case IEEE80211_BAND_2GHZ:
+		if (!(il->_3945.sta_supp_rates & IL_OFDM_RATES_MASK) &&
+		    il_is_associated(il)) {
+			if (rate == RATE_11M_IDX)
+				next_rate = RATE_5M_IDX;
+		}
+		break;
+
+	default:
+		break;
+	}
+
+	return next_rate;
+}
+
+/**
+ * il3945_tx_queue_reclaim - Reclaim Tx queue entries already Tx'd
+ *
+ * When FW advances 'R' idx, all entries between old and new 'R' idx
+ * need to be reclaimed. As result, some free space forms. If there is
+ * enough free space (> low mark), wake the stack that feeds us.
+ */
+static void
+il3945_tx_queue_reclaim(struct il_priv *il, int txq_id, int idx)
+{
+	struct il_tx_queue *txq = &il->txq[txq_id];
+	struct il_queue *q = &txq->q;
+	struct il_tx_info *tx_info;
+
+	BUG_ON(txq_id == IL39_CMD_QUEUE_NUM);
+
+	for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
+	     q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+
+		tx_info = &txq->txb[txq->q.read_ptr];
+		ieee80211_tx_status_irqsafe(il->hw, tx_info->skb);
+		tx_info->skb = NULL;
+		il->cfg->ops->lib->txq_free_tfd(il, txq);
+	}
+
+	if (il_queue_space(q) > q->low_mark && txq_id >= 0 &&
+	    txq_id != IL39_CMD_QUEUE_NUM && il->mac80211_registered)
+		il_wake_queue(il, txq);
+}
+
+/**
+ * il3945_hdl_tx - Handle Tx response
+ */
+static void
+il3945_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb)
+{
+	struct il_rx_pkt *pkt = rxb_addr(rxb);
+	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
+	int txq_id = SEQ_TO_QUEUE(sequence);
+	int idx = SEQ_TO_IDX(sequence);
+	struct il_tx_queue *txq = &il->txq[txq_id];
+	struct ieee80211_tx_info *info;
+	struct il3945_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
+	u32 status = le32_to_cpu(tx_resp->status);
+	int rate_idx;
+	int fail;
+
+	if (idx >= txq->q.n_bd || il_queue_used(&txq->q, idx) == 0) {
+		IL_ERR("Read idx for DMA queue txq_id (%d) idx %d "
+		       "is out of range [0-%d] %d %d\n", txq_id, idx,
+		       txq->q.n_bd, txq->q.write_ptr, txq->q.read_ptr);
+		return;
+	}
+
+	txq->time_stamp = jiffies;
+	info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
+	ieee80211_tx_info_clear_status(info);
+
+	/* Fill the MRR chain with some info about on-chip retransmissions */
+	rate_idx = il3945_hwrate_to_plcp_idx(tx_resp->rate);
+	if (info->band == IEEE80211_BAND_5GHZ)
+		rate_idx -= IL_FIRST_OFDM_RATE;
+
+	fail = tx_resp->failure_frame;
+
+	info->status.rates[0].idx = rate_idx;
+	info->status.rates[0].count = fail + 1;	/* add final attempt */
+
+	/* tx_status->rts_retry_count = tx_resp->failure_rts; */
+	info->flags |=
+	    ((status & TX_STATUS_MSK) ==
+	     TX_STATUS_SUCCESS) ? IEEE80211_TX_STAT_ACK : 0;
+
+	D_TX("Tx queue %d Status %s (0x%08x) plcp rate %d retries %d\n", txq_id,
+	     il3945_get_tx_fail_reason(status), status, tx_resp->rate,
+	     tx_resp->failure_frame);
+
+	D_TX_REPLY("Tx queue reclaim %d\n", idx);
+	il3945_tx_queue_reclaim(il, txq_id, idx);
+
+	if (status & TX_ABORT_REQUIRED_MSK)
+		IL_ERR("TODO:  Implement Tx ABORT REQUIRED!!!\n");
+}
+
+/*****************************************************************************
+ *
+ * Intel PRO/Wireless 3945ABG/BG Network Connection
+ *
+ *  RX handler implementations
+ *
+ *****************************************************************************/
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+static void
+il3945_accumulative_stats(struct il_priv *il, __le32 * stats)
+{
+	int i;
+	__le32 *prev_stats;
+	u32 *accum_stats;
+	u32 *delta, *max_delta;
+
+	prev_stats = (__le32 *) &il->_3945.stats;
+	accum_stats = (u32 *) &il->_3945.accum_stats;
+	delta = (u32 *) &il->_3945.delta_stats;
+	max_delta = (u32 *) &il->_3945.max_delta;
+
+	for (i = sizeof(__le32); i < sizeof(struct il3945_notif_stats);
+	     i +=
+	     sizeof(__le32), stats++, prev_stats++, delta++, max_delta++,
+	     accum_stats++) {
+		if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
+			*delta =
+			    (le32_to_cpu(*stats) - le32_to_cpu(*prev_stats));
+			*accum_stats += *delta;
+			if (*delta > *max_delta)
+				*max_delta = *delta;
+		}
+	}
+
+	/* reset accumulative stats for "no-counter" type stats */
+	il->_3945.accum_stats.general.temperature =
+	    il->_3945.stats.general.temperature;
+	il->_3945.accum_stats.general.ttl_timestamp =
+	    il->_3945.stats.general.ttl_timestamp;
+}
+#endif
+
+void
+il3945_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb)
+{
+	struct il_rx_pkt *pkt = rxb_addr(rxb);
+
+	D_RX("Statistics notification received (%d vs %d).\n",
+	     (int)sizeof(struct il3945_notif_stats),
+	     le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK);
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+	il3945_accumulative_stats(il, (__le32 *) &pkt->u.raw);
+#endif
+
+	memcpy(&il->_3945.stats, pkt->u.raw, sizeof(il->_3945.stats));
+}
+
+void
+il3945_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb)
+{
+	struct il_rx_pkt *pkt = rxb_addr(rxb);
+	__le32 *flag = (__le32 *) &pkt->u.raw;
+
+	if (le32_to_cpu(*flag) & UCODE_STATS_CLEAR_MSK) {
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+		memset(&il->_3945.accum_stats, 0,
+		       sizeof(struct il3945_notif_stats));
+		memset(&il->_3945.delta_stats, 0,
+		       sizeof(struct il3945_notif_stats));
+		memset(&il->_3945.max_delta, 0,
+		       sizeof(struct il3945_notif_stats));
+#endif
+		D_RX("Statistics have been cleared\n");
+	}
+	il3945_hdl_stats(il, rxb);
+}
+
+/******************************************************************************
+ *
+ * Misc. internal state and helper functions
+ *
+ ******************************************************************************/
+
+/* This is necessary only for a number of stats, see the caller. */
+static int
+il3945_is_network_packet(struct il_priv *il, struct ieee80211_hdr *header)
+{
+	/* Filter incoming packets to determine if they are targeted toward
+	 * this network, discarding packets coming from ourselves */
+	switch (il->iw_mode) {
+	case NL80211_IFTYPE_ADHOC:	/* Header: Dest. | Source    | BSSID */
+		/* packets to our IBSS update information */
+		return !compare_ether_addr(header->addr3, il->bssid);
+	case NL80211_IFTYPE_STATION:	/* Header: Dest. | AP{BSSID} | Source */
+		/* packets to our IBSS update information */
+		return !compare_ether_addr(header->addr2, il->bssid);
+	default:
+		return 1;
+	}
+}
+
+static void
+il3945_pass_packet_to_mac80211(struct il_priv *il, struct il_rx_buf *rxb,
+			       struct ieee80211_rx_status *stats)
+{
+	struct il_rx_pkt *pkt = rxb_addr(rxb);
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)IL_RX_DATA(pkt);
+	struct il3945_rx_frame_hdr *rx_hdr = IL_RX_HDR(pkt);
+	struct il3945_rx_frame_end *rx_end = IL_RX_END(pkt);
+	u16 len = le16_to_cpu(rx_hdr->len);
+	struct sk_buff *skb;
+	__le16 fc = hdr->frame_control;
+
+	/* We received data from the HW, so stop the watchdog */
+	if (unlikely
+	    (len + IL39_RX_FRAME_SIZE >
+	     PAGE_SIZE << il->hw_params.rx_page_order)) {
+		D_DROP("Corruption detected!\n");
+		return;
+	}
+
+	/* We only process data packets if the interface is open */
+	if (unlikely(!il->is_open)) {
+		D_DROP("Dropping packet while interface is not open.\n");
+		return;
+	}
+
+	skb = dev_alloc_skb(128);
+	if (!skb) {
+		IL_ERR("dev_alloc_skb failed\n");
+		return;
+	}
+
+	if (!il3945_mod_params.sw_crypto)
+		il_set_decrypted_flag(il, (struct ieee80211_hdr *)rxb_addr(rxb),
+				      le32_to_cpu(rx_end->status), stats);
+
+	skb_add_rx_frag(skb, 0, rxb->page,
+			(void *)rx_hdr->payload - (void *)pkt, len);
+
+	il_update_stats(il, false, fc, len);
+	memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
+
+	ieee80211_rx(il->hw, skb);
+	il->alloc_rxb_page--;
+	rxb->page = NULL;
+}
+
+#define IL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
+
+static void
+il3945_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
+{
+	struct ieee80211_hdr *header;
+	struct ieee80211_rx_status rx_status;
+	struct il_rx_pkt *pkt = rxb_addr(rxb);
+	struct il3945_rx_frame_stats *rx_stats = IL_RX_STATS(pkt);
+	struct il3945_rx_frame_hdr *rx_hdr = IL_RX_HDR(pkt);
+	struct il3945_rx_frame_end *rx_end = IL_RX_END(pkt);
+	u16 rx_stats_sig_avg __maybe_unused = le16_to_cpu(rx_stats->sig_avg);
+	u16 rx_stats_noise_diff __maybe_unused =
+	    le16_to_cpu(rx_stats->noise_diff);
+	u8 network_packet;
+
+	rx_status.flag = 0;
+	rx_status.mactime = le64_to_cpu(rx_end->timestamp);
+	rx_status.band =
+	    (rx_hdr->
+	     phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? IEEE80211_BAND_2GHZ :
+	    IEEE80211_BAND_5GHZ;
+	rx_status.freq =
+	    ieee80211_channel_to_frequency(le16_to_cpu(rx_hdr->channel),
+					   rx_status.band);
+
+	rx_status.rate_idx = il3945_hwrate_to_plcp_idx(rx_hdr->rate);
+	if (rx_status.band == IEEE80211_BAND_5GHZ)
+		rx_status.rate_idx -= IL_FIRST_OFDM_RATE;
+
+	rx_status.antenna =
+	    (le16_to_cpu(rx_hdr->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK) >>
+	    4;
+
+	/* set the preamble flag if appropriate */
+	if (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
+		rx_status.flag |= RX_FLAG_SHORTPRE;
+
+	if ((unlikely(rx_stats->phy_count > 20))) {
+		D_DROP("dsp size out of range [0,20]: %d/n",
+		       rx_stats->phy_count);
+		return;
+	}
+
+	if (!(rx_end->status & RX_RES_STATUS_NO_CRC32_ERROR) ||
+	    !(rx_end->status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
+		D_RX("Bad CRC or FIFO: 0x%08X.\n", rx_end->status);
+		return;
+	}
+
+	/* Convert 3945's rssi indicator to dBm */
+	rx_status.signal = rx_stats->rssi - IL39_RSSI_OFFSET;
+
+	D_STATS("Rssi %d sig_avg %d noise_diff %d\n", rx_status.signal,
+		rx_stats_sig_avg, rx_stats_noise_diff);
+
+	header = (struct ieee80211_hdr *)IL_RX_DATA(pkt);
+
+	network_packet = il3945_is_network_packet(il, header);
+
+	D_STATS("[%c] %d RSSI:%d Signal:%u, Rate:%u\n",
+		network_packet ? '*' : ' ', le16_to_cpu(rx_hdr->channel),
+		rx_status.signal, rx_status.signal, rx_status.rate_idx);
+
+	il_dbg_log_rx_data_frame(il, le16_to_cpu(rx_hdr->len), header);
+
+	if (network_packet) {
+		il->_3945.last_beacon_time =
+		    le32_to_cpu(rx_end->beacon_timestamp);
+		il->_3945.last_tsf = le64_to_cpu(rx_end->timestamp);
+		il->_3945.last_rx_rssi = rx_status.signal;
+	}
+
+	il3945_pass_packet_to_mac80211(il, rxb, &rx_status);
+}
+
+int
+il3945_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
+				dma_addr_t addr, u16 len, u8 reset, u8 pad)
+{
+	int count;
+	struct il_queue *q;
+	struct il3945_tfd *tfd, *tfd_tmp;
+
+	q = &txq->q;
+	tfd_tmp = (struct il3945_tfd *)txq->tfds;
+	tfd = &tfd_tmp[q->write_ptr];
+
+	if (reset)
+		memset(tfd, 0, sizeof(*tfd));
+
+	count = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
+
+	if (count >= NUM_TFD_CHUNKS || count < 0) {
+		IL_ERR("Error can not send more than %d chunks\n",
+		       NUM_TFD_CHUNKS);
+		return -EINVAL;
+	}
+
+	tfd->tbs[count].addr = cpu_to_le32(addr);
+	tfd->tbs[count].len = cpu_to_le32(len);
+
+	count++;
+
+	tfd->control_flags =
+	    cpu_to_le32(TFD_CTL_COUNT_SET(count) | TFD_CTL_PAD_SET(pad));
+
+	return 0;
+}
+
+/**
+ * il3945_hw_txq_free_tfd - Free one TFD, those at idx [txq->q.read_ptr]
+ *
+ * Does NOT advance any idxes
+ */
+void
+il3945_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq)
+{
+	struct il3945_tfd *tfd_tmp = (struct il3945_tfd *)txq->tfds;
+	int idx = txq->q.read_ptr;
+	struct il3945_tfd *tfd = &tfd_tmp[idx];
+	struct pci_dev *dev = il->pci_dev;
+	int i;
+	int counter;
+
+	/* sanity check */
+	counter = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
+	if (counter > NUM_TFD_CHUNKS) {
+		IL_ERR("Too many chunks: %i\n", counter);
+		/* @todo issue fatal error, it is quite serious situation */
+		return;
+	}
+
+	/* Unmap tx_cmd */
+	if (counter)
+		pci_unmap_single(dev, dma_unmap_addr(&txq->meta[idx], mapping),
+				 dma_unmap_len(&txq->meta[idx], len),
+				 PCI_DMA_TODEVICE);
+
+	/* unmap chunks if any */
+
+	for (i = 1; i < counter; i++)
+		pci_unmap_single(dev, le32_to_cpu(tfd->tbs[i].addr),
+				 le32_to_cpu(tfd->tbs[i].len),
+				 PCI_DMA_TODEVICE);
+
+	/* free SKB */
+	if (txq->txb) {
+		struct sk_buff *skb;
+
+		skb = txq->txb[txq->q.read_ptr].skb;
+
+		/* can be called from irqs-disabled context */
+		if (skb) {
+			dev_kfree_skb_any(skb);
+			txq->txb[txq->q.read_ptr].skb = NULL;
+		}
+	}
+}
+
+/**
+ * il3945_hw_build_tx_cmd_rate - Add rate portion to TX_CMD:
+ *
+*/
+void
+il3945_hw_build_tx_cmd_rate(struct il_priv *il, struct il_device_cmd *cmd,
+			    struct ieee80211_tx_info *info,
+			    struct ieee80211_hdr *hdr, int sta_id)
+{
+	u16 hw_value = ieee80211_get_tx_rate(il->hw, info)->hw_value;
+	u16 rate_idx = min(hw_value & 0xffff, RATE_COUNT_3945 - 1);
+	u16 rate_mask;
+	int rate;
+	const u8 rts_retry_limit = 7;
+	u8 data_retry_limit;
+	__le32 tx_flags;
+	__le16 fc = hdr->frame_control;
+	struct il3945_tx_cmd *tx_cmd = (struct il3945_tx_cmd *)cmd->cmd.payload;
+
+	rate = il3945_rates[rate_idx].plcp;
+	tx_flags = tx_cmd->tx_flags;
+
+	/* We need to figure out how to get the sta->supp_rates while
+	 * in this running context */
+	rate_mask = RATES_MASK_3945;
+
+	/* Set retry limit on DATA packets and Probe Responses */
+	if (ieee80211_is_probe_resp(fc))
+		data_retry_limit = 3;
+	else
+		data_retry_limit = IL_DEFAULT_TX_RETRY;
+	tx_cmd->data_retry_limit = data_retry_limit;
+	/* Set retry limit on RTS packets */
+	tx_cmd->rts_retry_limit = min(data_retry_limit, rts_retry_limit);
+
+	tx_cmd->rate = rate;
+	tx_cmd->tx_flags = tx_flags;
+
+	/* OFDM */
+	tx_cmd->supp_rates[0] =
+	    ((rate_mask & IL_OFDM_RATES_MASK) >> IL_FIRST_OFDM_RATE) & 0xFF;
+
+	/* CCK */
+	tx_cmd->supp_rates[1] = (rate_mask & 0xF);
+
+	D_RATE("Tx sta id: %d, rate: %d (plcp), flags: 0x%4X "
+	       "cck/ofdm mask: 0x%x/0x%x\n", sta_id, tx_cmd->rate,
+	       le32_to_cpu(tx_cmd->tx_flags), tx_cmd->supp_rates[1],
+	       tx_cmd->supp_rates[0]);
+}
+
+static u8
+il3945_sync_sta(struct il_priv *il, int sta_id, u16 tx_rate)
+{
+	unsigned long flags_spin;
+	struct il_station_entry *station;
+
+	if (sta_id == IL_INVALID_STATION)
+		return IL_INVALID_STATION;
+
+	spin_lock_irqsave(&il->sta_lock, flags_spin);
+	station = &il->stations[sta_id];
+
+	station->sta.sta.modify_mask = STA_MODIFY_TX_RATE_MSK;
+	station->sta.rate_n_flags = cpu_to_le16(tx_rate);
+	station->sta.mode = STA_CONTROL_MODIFY_MSK;
+	il_send_add_sta(il, &station->sta, CMD_ASYNC);
+	spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+
+	D_RATE("SCALE sync station %d to rate %d\n", sta_id, tx_rate);
+	return sta_id;
+}
+
+static void
+il3945_set_pwr_vmain(struct il_priv *il)
+{
+/*
+ * (for documentation purposes)
+ * to set power to V_AUX, do
+
+		if (pci_pme_capable(il->pci_dev, PCI_D3cold)) {
+			il_set_bits_mask_prph(il, APMG_PS_CTRL_REG,
+					APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
+					~APMG_PS_CTRL_MSK_PWR_SRC);
+
+			_il_poll_bit(il, CSR_GPIO_IN,
+				     CSR_GPIO_IN_VAL_VAUX_PWR_SRC,
+				     CSR_GPIO_IN_BIT_AUX_POWER, 5000);
+		}
+ */
+
+	il_set_bits_mask_prph(il, APMG_PS_CTRL_REG,
+			      APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
+			      ~APMG_PS_CTRL_MSK_PWR_SRC);
+
+	_il_poll_bit(il, CSR_GPIO_IN, CSR_GPIO_IN_VAL_VMAIN_PWR_SRC,
+		     CSR_GPIO_IN_BIT_AUX_POWER, 5000);
+}
+
+static int
+il3945_rx_init(struct il_priv *il, struct il_rx_queue *rxq)
+{
+	il_wr(il, FH39_RCSR_RBD_BASE(0), rxq->bd_dma);
+	il_wr(il, FH39_RCSR_RPTR_ADDR(0), rxq->rb_stts_dma);
+	il_wr(il, FH39_RCSR_WPTR(0), 0);
+	il_wr(il, FH39_RCSR_CONFIG(0),
+	      FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE |
+	      FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE |
+	      FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN |
+	      FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 | (RX_QUEUE_SIZE_LOG
+							       <<
+							       FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE)
+	      | FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST | (1 <<
+								 FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH)
+	      | FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH);
+
+	/* fake read to flush all prev I/O */
+	il_rd(il, FH39_RSSR_CTRL);
+
+	return 0;
+}
+
+static int
+il3945_tx_reset(struct il_priv *il)
+{
+
+	/* bypass mode */
+	il_wr_prph(il, ALM_SCD_MODE_REG, 0x2);
+
+	/* RA 0 is active */
+	il_wr_prph(il, ALM_SCD_ARASTAT_REG, 0x01);
+
+	/* all 6 fifo are active */
+	il_wr_prph(il, ALM_SCD_TXFACT_REG, 0x3f);
+
+	il_wr_prph(il, ALM_SCD_SBYP_MODE_1_REG, 0x010000);
+	il_wr_prph(il, ALM_SCD_SBYP_MODE_2_REG, 0x030002);
+	il_wr_prph(il, ALM_SCD_TXF4MF_REG, 0x000004);
+	il_wr_prph(il, ALM_SCD_TXF5MF_REG, 0x000005);
+
+	il_wr(il, FH39_TSSR_CBB_BASE, il->_3945.shared_phys);
+
+	il_wr(il, FH39_TSSR_MSG_CONFIG,
+	      FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON |
+	      FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON |
+	      FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B |
+	      FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON |
+	      FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON |
+	      FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH |
+	      FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH);
+
+	return 0;
+}
+
+/**
+ * il3945_txq_ctx_reset - Reset TX queue context
+ *
+ * Destroys all DMA structures and initialize them again
+ */
+static int
+il3945_txq_ctx_reset(struct il_priv *il)
+{
+	int rc;
+	int txq_id, slots_num;
+
+	il3945_hw_txq_ctx_free(il);
+
+	/* allocate tx queue structure */
+	rc = il_alloc_txq_mem(il);
+	if (rc)
+		return rc;
+
+	/* Tx CMD queue */
+	rc = il3945_tx_reset(il);
+	if (rc)
+		goto error;
+
+	/* Tx queue(s) */
+	for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) {
+		slots_num =
+		    (txq_id ==
+		     IL39_CMD_QUEUE_NUM) ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+		rc = il_tx_queue_init(il, &il->txq[txq_id], slots_num, txq_id);
+		if (rc) {
+			IL_ERR("Tx %d queue init failed\n", txq_id);
+			goto error;
+		}
+	}
+
+	return rc;
+
+error:
+	il3945_hw_txq_ctx_free(il);
+	return rc;
+}
+
+/*
+ * Start up 3945's basic functionality after it has been reset
+ * (e.g. after platform boot, or shutdown via il_apm_stop())
+ * NOTE:  This does not load uCode nor start the embedded processor
+ */
+static int
+il3945_apm_init(struct il_priv *il)
+{
+	int ret = il_apm_init(il);
+
+	/* Clear APMG (NIC's internal power management) interrupts */
+	il_wr_prph(il, APMG_RTC_INT_MSK_REG, 0x0);
+	il_wr_prph(il, APMG_RTC_INT_STT_REG, 0xFFFFFFFF);
+
+	/* Reset radio chip */
+	il_set_bits_prph(il, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ);
+	udelay(5);
+	il_clear_bits_prph(il, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ);
+
+	return ret;
+}
+
+static void
+il3945_nic_config(struct il_priv *il)
+{
+	struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
+	unsigned long flags;
+	u8 rev_id = il->pci_dev->revision;
+
+	spin_lock_irqsave(&il->lock, flags);
+
+	/* Determine HW type */
+	D_INFO("HW Revision ID = 0x%X\n", rev_id);
+
+	if (rev_id & PCI_CFG_REV_ID_BIT_RTP)
+		D_INFO("RTP type\n");
+	else if (rev_id & PCI_CFG_REV_ID_BIT_BASIC_SKU) {
+		D_INFO("3945 RADIO-MB type\n");
+		il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+			   CSR39_HW_IF_CONFIG_REG_BIT_3945_MB);
+	} else {
+		D_INFO("3945 RADIO-MM type\n");
+		il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+			   CSR39_HW_IF_CONFIG_REG_BIT_3945_MM);
+	}
+
+	if (EEPROM_SKU_CAP_OP_MODE_MRC == eeprom->sku_cap) {
+		D_INFO("SKU OP mode is mrc\n");
+		il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+			   CSR39_HW_IF_CONFIG_REG_BIT_SKU_MRC);
+	} else
+		D_INFO("SKU OP mode is basic\n");
+
+	if ((eeprom->board_revision & 0xF0) == 0xD0) {
+		D_INFO("3945ABG revision is 0x%X\n", eeprom->board_revision);
+		il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+			   CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
+	} else {
+		D_INFO("3945ABG revision is 0x%X\n", eeprom->board_revision);
+		il_clear_bit(il, CSR_HW_IF_CONFIG_REG,
+			     CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
+	}
+
+	if (eeprom->almgor_m_version <= 1) {
+		il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+			   CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A);
+		D_INFO("Card M type A version is 0x%X\n",
+		       eeprom->almgor_m_version);
+	} else {
+		D_INFO("Card M type B version is 0x%X\n",
+		       eeprom->almgor_m_version);
+		il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+			   CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B);
+	}
+	spin_unlock_irqrestore(&il->lock, flags);
+
+	if (eeprom->sku_cap & EEPROM_SKU_CAP_SW_RF_KILL_ENABLE)
+		D_RF_KILL("SW RF KILL supported in EEPROM.\n");
+
+	if (eeprom->sku_cap & EEPROM_SKU_CAP_HW_RF_KILL_ENABLE)
+		D_RF_KILL("HW RF KILL supported in EEPROM.\n");
+}
+
+int
+il3945_hw_nic_init(struct il_priv *il)
+{
+	int rc;
+	unsigned long flags;
+	struct il_rx_queue *rxq = &il->rxq;
+
+	spin_lock_irqsave(&il->lock, flags);
+	il->cfg->ops->lib->apm_ops.init(il);
+	spin_unlock_irqrestore(&il->lock, flags);
+
+	il3945_set_pwr_vmain(il);
+
+	il->cfg->ops->lib->apm_ops.config(il);
+
+	/* Allocate the RX queue, or reset if it is already allocated */
+	if (!rxq->bd) {
+		rc = il_rx_queue_alloc(il);
+		if (rc) {
+			IL_ERR("Unable to initialize Rx queue\n");
+			return -ENOMEM;
+		}
+	} else
+		il3945_rx_queue_reset(il, rxq);
+
+	il3945_rx_replenish(il);
+
+	il3945_rx_init(il, rxq);
+
+	/* Look at using this instead:
+	   rxq->need_update = 1;
+	   il_rx_queue_update_write_ptr(il, rxq);
+	 */
+
+	il_wr(il, FH39_RCSR_WPTR(0), rxq->write & ~7);
+
+	rc = il3945_txq_ctx_reset(il);
+	if (rc)
+		return rc;
+
+	set_bit(S_INIT, &il->status);
+
+	return 0;
+}
+
+/**
+ * il3945_hw_txq_ctx_free - Free TXQ Context
+ *
+ * Destroy all TX DMA queues and structures
+ */
+void
+il3945_hw_txq_ctx_free(struct il_priv *il)
+{
+	int txq_id;
+
+	/* Tx queues */
+	if (il->txq)
+		for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
+			if (txq_id == IL39_CMD_QUEUE_NUM)
+				il_cmd_queue_free(il);
+			else
+				il_tx_queue_free(il, txq_id);
+
+	/* free tx queue structure */
+	il_txq_mem(il);
+}
+
+void
+il3945_hw_txq_ctx_stop(struct il_priv *il)
+{
+	int txq_id;
+
+	/* stop SCD */
+	il_wr_prph(il, ALM_SCD_MODE_REG, 0);
+	il_wr_prph(il, ALM_SCD_TXFACT_REG, 0);
+
+	/* reset TFD queues */
+	for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) {
+		il_wr(il, FH39_TCSR_CONFIG(txq_id), 0x0);
+		il_poll_bit(il, FH39_TSSR_TX_STATUS,
+			    FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(txq_id),
+			    1000);
+	}
+
+	il3945_hw_txq_ctx_free(il);
+}
+
+/**
+ * il3945_hw_reg_adjust_power_by_temp
+ * return idx delta into power gain settings table
+*/
+static int
+il3945_hw_reg_adjust_power_by_temp(int new_reading, int old_reading)
+{
+	return (new_reading - old_reading) * (-11) / 100;
+}
+
+/**
+ * il3945_hw_reg_temp_out_of_range - Keep temperature in sane range
+ */
+static inline int
+il3945_hw_reg_temp_out_of_range(int temperature)
+{
+	return (temperature < -260 || temperature > 25) ? 1 : 0;
+}
+
+int
+il3945_hw_get_temperature(struct il_priv *il)
+{
+	return _il_rd(il, CSR_UCODE_DRV_GP2);
+}
+
+/**
+ * il3945_hw_reg_txpower_get_temperature
+ * get the current temperature by reading from NIC
+*/
+static int
+il3945_hw_reg_txpower_get_temperature(struct il_priv *il)
+{
+	struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
+	int temperature;
+
+	temperature = il3945_hw_get_temperature(il);
+
+	/* driver's okay range is -260 to +25.
+	 *   human readable okay range is 0 to +285 */
+	D_INFO("Temperature: %d\n", temperature + IL_TEMP_CONVERT);
+
+	/* handle insane temp reading */
+	if (il3945_hw_reg_temp_out_of_range(temperature)) {
+		IL_ERR("Error bad temperature value  %d\n", temperature);
+
+		/* if really really hot(?),
+		 *   substitute the 3rd band/group's temp measured at factory */
+		if (il->last_temperature > 100)
+			temperature = eeprom->groups[2].temperature;
+		else		/* else use most recent "sane" value from driver */
+			temperature = il->last_temperature;
+	}
+
+	return temperature;	/* raw, not "human readable" */
+}
+
+/* Adjust Txpower only if temperature variance is greater than threshold.
+ *
+ * Both are lower than older versions' 9 degrees */
+#define IL_TEMPERATURE_LIMIT_TIMER   6
+
+/**
+ * il3945_is_temp_calib_needed - determines if new calibration is needed
+ *
+ * records new temperature in tx_mgr->temperature.
+ * replaces tx_mgr->last_temperature *only* if calib needed
+ *    (assumes caller will actually do the calibration!). */
+static int
+il3945_is_temp_calib_needed(struct il_priv *il)
+{
+	int temp_diff;
+
+	il->temperature = il3945_hw_reg_txpower_get_temperature(il);
+	temp_diff = il->temperature - il->last_temperature;
+
+	/* get absolute value */
+	if (temp_diff < 0) {
+		D_POWER("Getting cooler, delta %d,\n", temp_diff);
+		temp_diff = -temp_diff;
+	} else if (temp_diff == 0)
+		D_POWER("Same temp,\n");
+	else
+		D_POWER("Getting warmer, delta %d,\n", temp_diff);
+
+	/* if we don't need calibration, *don't* update last_temperature */
+	if (temp_diff < IL_TEMPERATURE_LIMIT_TIMER) {
+		D_POWER("Timed thermal calib not needed\n");
+		return 0;
+	}
+
+	D_POWER("Timed thermal calib needed\n");
+
+	/* assume that caller will actually do calib ...
+	 *   update the "last temperature" value */
+	il->last_temperature = il->temperature;
+	return 1;
+}
+
+#define IL_MAX_GAIN_ENTRIES 78
+#define IL_CCK_FROM_OFDM_POWER_DIFF  -5
+#define IL_CCK_FROM_OFDM_IDX_DIFF (10)
+
+/* radio and DSP power table, each step is 1/2 dB.
+ * 1st number is for RF analog gain, 2nd number is for DSP pre-DAC gain. */
+static struct il3945_tx_power power_gain_table[2][IL_MAX_GAIN_ENTRIES] = {
+	{
+	 {251, 127},		/* 2.4 GHz, highest power */
+	 {251, 127},
+	 {251, 127},
+	 {251, 127},
+	 {251, 125},
+	 {251, 110},
+	 {251, 105},
+	 {251, 98},
+	 {187, 125},
+	 {187, 115},
+	 {187, 108},
+	 {187, 99},
+	 {243, 119},
+	 {243, 111},
+	 {243, 105},
+	 {243, 97},
+	 {243, 92},
+	 {211, 106},
+	 {211, 100},
+	 {179, 120},
+	 {179, 113},
+	 {179, 107},
+	 {147, 125},
+	 {147, 119},
+	 {147, 112},
+	 {147, 106},
+	 {147, 101},
+	 {147, 97},
+	 {147, 91},
+	 {115, 107},
+	 {235, 121},
+	 {235, 115},
+	 {235, 109},
+	 {203, 127},
+	 {203, 121},
+	 {203, 115},
+	 {203, 108},
+	 {203, 102},
+	 {203, 96},
+	 {203, 92},
+	 {171, 110},
+	 {171, 104},
+	 {171, 98},
+	 {139, 116},
+	 {227, 125},
+	 {227, 119},
+	 {227, 113},
+	 {227, 107},
+	 {227, 101},
+	 {227, 96},
+	 {195, 113},
+	 {195, 106},
+	 {195, 102},
+	 {195, 95},
+	 {163, 113},
+	 {163, 106},
+	 {163, 102},
+	 {163, 95},
+	 {131, 113},
+	 {131, 106},
+	 {131, 102},
+	 {131, 95},
+	 {99, 113},
+	 {99, 106},
+	 {99, 102},
+	 {99, 95},
+	 {67, 113},
+	 {67, 106},
+	 {67, 102},
+	 {67, 95},
+	 {35, 113},
+	 {35, 106},
+	 {35, 102},
+	 {35, 95},
+	 {3, 113},
+	 {3, 106},
+	 {3, 102},
+	 {3, 95}		/* 2.4 GHz, lowest power */
+	},
+	{
+	 {251, 127},		/* 5.x GHz, highest power */
+	 {251, 120},
+	 {251, 114},
+	 {219, 119},
+	 {219, 101},
+	 {187, 113},
+	 {187, 102},
+	 {155, 114},
+	 {155, 103},
+	 {123, 117},
+	 {123, 107},
+	 {123, 99},
+	 {123, 92},
+	 {91, 108},
+	 {59, 125},
+	 {59, 118},
+	 {59, 109},
+	 {59, 102},
+	 {59, 96},
+	 {59, 90},
+	 {27, 104},
+	 {27, 98},
+	 {27, 92},
+	 {115, 118},
+	 {115, 111},
+	 {115, 104},
+	 {83, 126},
+	 {83, 121},
+	 {83, 113},
+	 {83, 105},
+	 {83, 99},
+	 {51, 118},
+	 {51, 111},
+	 {51, 104},
+	 {51, 98},
+	 {19, 116},
+	 {19, 109},
+	 {19, 102},
+	 {19, 98},
+	 {19, 93},
+	 {171, 113},
+	 {171, 107},
+	 {171, 99},
+	 {139, 120},
+	 {139, 113},
+	 {139, 107},
+	 {139, 99},
+	 {107, 120},
+	 {107, 113},
+	 {107, 107},
+	 {107, 99},
+	 {75, 120},
+	 {75, 113},
+	 {75, 107},
+	 {75, 99},
+	 {43, 120},
+	 {43, 113},
+	 {43, 107},
+	 {43, 99},
+	 {11, 120},
+	 {11, 113},
+	 {11, 107},
+	 {11, 99},
+	 {131, 107},
+	 {131, 99},
+	 {99, 120},
+	 {99, 113},
+	 {99, 107},
+	 {99, 99},
+	 {67, 120},
+	 {67, 113},
+	 {67, 107},
+	 {67, 99},
+	 {35, 120},
+	 {35, 113},
+	 {35, 107},
+	 {35, 99},
+	 {3, 120}		/* 5.x GHz, lowest power */
+	}
+};
+
+static inline u8
+il3945_hw_reg_fix_power_idx(int idx)
+{
+	if (idx < 0)
+		return 0;
+	if (idx >= IL_MAX_GAIN_ENTRIES)
+		return IL_MAX_GAIN_ENTRIES - 1;
+	return (u8) idx;
+}
+
+/* Kick off thermal recalibration check every 60 seconds */
+#define REG_RECALIB_PERIOD (60)
+
+/**
+ * il3945_hw_reg_set_scan_power - Set Tx power for scan probe requests
+ *
+ * Set (in our channel info database) the direct scan Tx power for 1 Mbit (CCK)
+ * or 6 Mbit (OFDM) rates.
+ */
+static void
+il3945_hw_reg_set_scan_power(struct il_priv *il, u32 scan_tbl_idx, s32 rate_idx,
+			     const s8 *clip_pwrs,
+			     struct il_channel_info *ch_info, int band_idx)
+{
+	struct il3945_scan_power_info *scan_power_info;
+	s8 power;
+	u8 power_idx;
+
+	scan_power_info = &ch_info->scan_pwr_info[scan_tbl_idx];
+
+	/* use this channel group's 6Mbit clipping/saturation pwr,
+	 *   but cap at regulatory scan power restriction (set during init
+	 *   based on eeprom channel data) for this channel.  */
+	power = min(ch_info->scan_power, clip_pwrs[RATE_6M_IDX_TBL]);
+
+	power = min(power, il->tx_power_user_lmt);
+	scan_power_info->requested_power = power;
+
+	/* find difference between new scan *power* and current "normal"
+	 *   Tx *power* for 6Mb.  Use this difference (x2) to adjust the
+	 *   current "normal" temperature-compensated Tx power *idx* for
+	 *   this rate (1Mb or 6Mb) to yield new temp-compensated scan power
+	 *   *idx*. */
+	power_idx =
+	    ch_info->power_info[rate_idx].power_table_idx - (power -
+							     ch_info->
+							     power_info
+							     [RATE_6M_IDX_TBL].
+							     requested_power) *
+	    2;
+
+	/* store reference idx that we use when adjusting *all* scan
+	 *   powers.  So we can accommodate user (all channel) or spectrum
+	 *   management (single channel) power changes "between" temperature
+	 *   feedback compensation procedures.
+	 * don't force fit this reference idx into gain table; it may be a
+	 *   negative number.  This will help avoid errors when we're at
+	 *   the lower bounds (highest gains, for warmest temperatures)
+	 *   of the table. */
+
+	/* don't exceed table bounds for "real" setting */
+	power_idx = il3945_hw_reg_fix_power_idx(power_idx);
+
+	scan_power_info->power_table_idx = power_idx;
+	scan_power_info->tpc.tx_gain =
+	    power_gain_table[band_idx][power_idx].tx_gain;
+	scan_power_info->tpc.dsp_atten =
+	    power_gain_table[band_idx][power_idx].dsp_atten;
+}
+
+/**
+ * il3945_send_tx_power - fill in Tx Power command with gain settings
+ *
+ * Configures power settings for all rates for the current channel,
+ * using values from channel info struct, and send to NIC
+ */
+static int
+il3945_send_tx_power(struct il_priv *il)
+{
+	int rate_idx, i;
+	const struct il_channel_info *ch_info = NULL;
+	struct il3945_txpowertable_cmd txpower = {
+		.channel = il->ctx.active.channel,
+	};
+	u16 chan;
+
+	if (WARN_ONCE
+	    (test_bit(S_SCAN_HW, &il->status),
+	     "TX Power requested while scanning!\n"))
+		return -EAGAIN;
+
+	chan = le16_to_cpu(il->ctx.active.channel);
+
+	txpower.band = (il->band == IEEE80211_BAND_5GHZ) ? 0 : 1;
+	ch_info = il_get_channel_info(il, il->band, chan);
+	if (!ch_info) {
+		IL_ERR("Failed to get channel info for channel %d [%d]\n", chan,
+		       il->band);
+		return -EINVAL;
+	}
+
+	if (!il_is_channel_valid(ch_info)) {
+		D_POWER("Not calling TX_PWR_TBL_CMD on " "non-Tx channel.\n");
+		return 0;
+	}
+
+	/* fill cmd with power settings for all rates for current channel */
+	/* Fill OFDM rate */
+	for (rate_idx = IL_FIRST_OFDM_RATE, i = 0;
+	     rate_idx <= IL39_LAST_OFDM_RATE; rate_idx++, i++) {
+
+		txpower.power[i].tpc = ch_info->power_info[i].tpc;
+		txpower.power[i].rate = il3945_rates[rate_idx].plcp;
+
+		D_POWER("ch %d:%d rf %d dsp %3d rate code 0x%02x\n",
+			le16_to_cpu(txpower.channel), txpower.band,
+			txpower.power[i].tpc.tx_gain,
+			txpower.power[i].tpc.dsp_atten, txpower.power[i].rate);
+	}
+	/* Fill CCK rates */
+	for (rate_idx = IL_FIRST_CCK_RATE; rate_idx <= IL_LAST_CCK_RATE;
+	     rate_idx++, i++) {
+		txpower.power[i].tpc = ch_info->power_info[i].tpc;
+		txpower.power[i].rate = il3945_rates[rate_idx].plcp;
+
+		D_POWER("ch %d:%d rf %d dsp %3d rate code 0x%02x\n",
+			le16_to_cpu(txpower.channel), txpower.band,
+			txpower.power[i].tpc.tx_gain,
+			txpower.power[i].tpc.dsp_atten, txpower.power[i].rate);
+	}
+
+	return il_send_cmd_pdu(il, C_TX_PWR_TBL,
+			       sizeof(struct il3945_txpowertable_cmd),
+			       &txpower);
+
+}
+
+/**
+ * il3945_hw_reg_set_new_power - Configures power tables at new levels
+ * @ch_info: Channel to update.  Uses power_info.requested_power.
+ *
+ * Replace requested_power and base_power_idx ch_info fields for
+ * one channel.
+ *
+ * Called if user or spectrum management changes power preferences.
+ * Takes into account h/w and modulation limitations (clip power).
+ *
+ * This does *not* send anything to NIC, just sets up ch_info for one channel.
+ *
+ * NOTE: reg_compensate_for_temperature_dif() *must* be run after this to
+ *	 properly fill out the scan powers, and actual h/w gain settings,
+ *	 and send changes to NIC
+ */
+static int
+il3945_hw_reg_set_new_power(struct il_priv *il, struct il_channel_info *ch_info)
+{
+	struct il3945_channel_power_info *power_info;
+	int power_changed = 0;
+	int i;
+	const s8 *clip_pwrs;
+	int power;
+
+	/* Get this chnlgrp's rate-to-max/clip-powers table */
+	clip_pwrs = il->_3945.clip_groups[ch_info->group_idx].clip_powers;
+
+	/* Get this channel's rate-to-current-power settings table */
+	power_info = ch_info->power_info;
+
+	/* update OFDM Txpower settings */
+	for (i = RATE_6M_IDX_TBL; i <= RATE_54M_IDX_TBL; i++, ++power_info) {
+		int delta_idx;
+
+		/* limit new power to be no more than h/w capability */
+		power = min(ch_info->curr_txpow, clip_pwrs[i]);
+		if (power == power_info->requested_power)
+			continue;
+
+		/* find difference between old and new requested powers,
+		 *    update base (non-temp-compensated) power idx */
+		delta_idx = (power - power_info->requested_power) * 2;
+		power_info->base_power_idx -= delta_idx;
+
+		/* save new requested power value */
+		power_info->requested_power = power;
+
+		power_changed = 1;
+	}
+
+	/* update CCK Txpower settings, based on OFDM 12M setting ...
+	 *    ... all CCK power settings for a given channel are the *same*. */
+	if (power_changed) {
+		power =
+		    ch_info->power_info[RATE_12M_IDX_TBL].requested_power +
+		    IL_CCK_FROM_OFDM_POWER_DIFF;
+
+		/* do all CCK rates' il3945_channel_power_info structures */
+		for (i = RATE_1M_IDX_TBL; i <= RATE_11M_IDX_TBL; i++) {
+			power_info->requested_power = power;
+			power_info->base_power_idx =
+			    ch_info->power_info[RATE_12M_IDX_TBL].
+			    base_power_idx + IL_CCK_FROM_OFDM_IDX_DIFF;
+			++power_info;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * il3945_hw_reg_get_ch_txpower_limit - returns new power limit for channel
+ *
+ * NOTE: Returned power limit may be less (but not more) than requested,
+ *	 based strictly on regulatory (eeprom and spectrum mgt) limitations
+ *	 (no consideration for h/w clipping limitations).
+ */
+static int
+il3945_hw_reg_get_ch_txpower_limit(struct il_channel_info *ch_info)
+{
+	s8 max_power;
+
+#if 0
+	/* if we're using TGd limits, use lower of TGd or EEPROM */
+	if (ch_info->tgd_data.max_power != 0)
+		max_power =
+		    min(ch_info->tgd_data.max_power,
+			ch_info->eeprom.max_power_avg);
+
+	/* else just use EEPROM limits */
+	else
+#endif
+		max_power = ch_info->eeprom.max_power_avg;
+
+	return min(max_power, ch_info->max_power_avg);
+}
+
+/**
+ * il3945_hw_reg_comp_txpower_temp - Compensate for temperature
+ *
+ * Compensate txpower settings of *all* channels for temperature.
+ * This only accounts for the difference between current temperature
+ *   and the factory calibration temperatures, and bases the new settings
+ *   on the channel's base_power_idx.
+ *
+ * If RxOn is "associated", this sends the new Txpower to NIC!
+ */
+static int
+il3945_hw_reg_comp_txpower_temp(struct il_priv *il)
+{
+	struct il_channel_info *ch_info = NULL;
+	struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
+	int delta_idx;
+	const s8 *clip_pwrs;	/* array of h/w max power levels for each rate */
+	u8 a_band;
+	u8 rate_idx;
+	u8 scan_tbl_idx;
+	u8 i;
+	int ref_temp;
+	int temperature = il->temperature;
+
+	if (il->disable_tx_power_cal || test_bit(S_SCANNING, &il->status)) {
+		/* do not perform tx power calibration */
+		return 0;
+	}
+	/* set up new Tx power info for each and every channel, 2.4 and 5.x */
+	for (i = 0; i < il->channel_count; i++) {
+		ch_info = &il->channel_info[i];
+		a_band = il_is_channel_a_band(ch_info);
+
+		/* Get this chnlgrp's factory calibration temperature */
+		ref_temp = (s16) eeprom->groups[ch_info->group_idx].temperature;
+
+		/* get power idx adjustment based on current and factory
+		 * temps */
+		delta_idx =
+		    il3945_hw_reg_adjust_power_by_temp(temperature, ref_temp);
+
+		/* set tx power value for all rates, OFDM and CCK */
+		for (rate_idx = 0; rate_idx < RATE_COUNT_3945; rate_idx++) {
+			int power_idx =
+			    ch_info->power_info[rate_idx].base_power_idx;
+
+			/* temperature compensate */
+			power_idx += delta_idx;
+
+			/* stay within table range */
+			power_idx = il3945_hw_reg_fix_power_idx(power_idx);
+			ch_info->power_info[rate_idx].power_table_idx =
+			    (u8) power_idx;
+			ch_info->power_info[rate_idx].tpc =
+			    power_gain_table[a_band][power_idx];
+		}
+
+		/* Get this chnlgrp's rate-to-max/clip-powers table */
+		clip_pwrs =
+		    il->_3945.clip_groups[ch_info->group_idx].clip_powers;
+
+		/* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */
+		for (scan_tbl_idx = 0; scan_tbl_idx < IL_NUM_SCAN_RATES;
+		     scan_tbl_idx++) {
+			s32 actual_idx =
+			    (scan_tbl_idx ==
+			     0) ? RATE_1M_IDX_TBL : RATE_6M_IDX_TBL;
+			il3945_hw_reg_set_scan_power(il, scan_tbl_idx,
+						     actual_idx, clip_pwrs,
+						     ch_info, a_band);
+		}
+	}
+
+	/* send Txpower command for current channel to ucode */
+	return il->cfg->ops->lib->send_tx_power(il);
+}
+
+int
+il3945_hw_reg_set_txpower(struct il_priv *il, s8 power)
+{
+	struct il_channel_info *ch_info;
+	s8 max_power;
+	u8 a_band;
+	u8 i;
+
+	if (il->tx_power_user_lmt == power) {
+		D_POWER("Requested Tx power same as current " "limit: %ddBm.\n",
+			power);
+		return 0;
+	}
+
+	D_POWER("Setting upper limit clamp to %ddBm.\n", power);
+	il->tx_power_user_lmt = power;
+
+	/* set up new Tx powers for each and every channel, 2.4 and 5.x */
+
+	for (i = 0; i < il->channel_count; i++) {
+		ch_info = &il->channel_info[i];
+		a_band = il_is_channel_a_band(ch_info);
+
+		/* find minimum power of all user and regulatory constraints
+		 *    (does not consider h/w clipping limitations) */
+		max_power = il3945_hw_reg_get_ch_txpower_limit(ch_info);
+		max_power = min(power, max_power);
+		if (max_power != ch_info->curr_txpow) {
+			ch_info->curr_txpow = max_power;
+
+			/* this considers the h/w clipping limitations */
+			il3945_hw_reg_set_new_power(il, ch_info);
+		}
+	}
+
+	/* update txpower settings for all channels,
+	 *   send to NIC if associated. */
+	il3945_is_temp_calib_needed(il);
+	il3945_hw_reg_comp_txpower_temp(il);
+
+	return 0;
+}
+
+static int
+il3945_send_rxon_assoc(struct il_priv *il, struct il_rxon_context *ctx)
+{
+	int rc = 0;
+	struct il_rx_pkt *pkt;
+	struct il3945_rxon_assoc_cmd rxon_assoc;
+	struct il_host_cmd cmd = {
+		.id = C_RXON_ASSOC,
+		.len = sizeof(rxon_assoc),
+		.flags = CMD_WANT_SKB,
+		.data = &rxon_assoc,
+	};
+	const struct il_rxon_cmd *rxon1 = &ctx->staging;
+	const struct il_rxon_cmd *rxon2 = &ctx->active;
+
+	if (rxon1->flags == rxon2->flags &&
+	    rxon1->filter_flags == rxon2->filter_flags &&
+	    rxon1->cck_basic_rates == rxon2->cck_basic_rates &&
+	    rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates) {
+		D_INFO("Using current RXON_ASSOC.  Not resending.\n");
+		return 0;
+	}
+
+	rxon_assoc.flags = ctx->staging.flags;
+	rxon_assoc.filter_flags = ctx->staging.filter_flags;
+	rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
+	rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
+	rxon_assoc.reserved = 0;
+
+	rc = il_send_cmd_sync(il, &cmd);
+	if (rc)
+		return rc;
+
+	pkt = (struct il_rx_pkt *)cmd.reply_page;
+	if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
+		IL_ERR("Bad return from C_RXON_ASSOC command\n");
+		rc = -EIO;
+	}
+
+	il_free_pages(il, cmd.reply_page);
+
+	return rc;
+}
+
+/**
+ * il3945_commit_rxon - commit staging_rxon to hardware
+ *
+ * The RXON command in staging_rxon is committed to the hardware and
+ * the active_rxon structure is updated with the new data.  This
+ * function correctly transitions out of the RXON_ASSOC_MSK state if
+ * a HW tune is required based on the RXON structure changes.
+ */
+int
+il3945_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx)
+{
+	/* cast away the const for active_rxon in this function */
+	struct il3945_rxon_cmd *active_rxon = (void *)&ctx->active;
+	struct il3945_rxon_cmd *staging_rxon = (void *)&ctx->staging;
+	int rc = 0;
+	bool new_assoc = !!(staging_rxon->filter_flags & RXON_FILTER_ASSOC_MSK);
+
+	if (test_bit(S_EXIT_PENDING, &il->status))
+		return -EINVAL;
+
+	if (!il_is_alive(il))
+		return -1;
+
+	/* always get timestamp with Rx frame */
+	staging_rxon->flags |= RXON_FLG_TSF2HOST_MSK;
+
+	/* select antenna */
+	staging_rxon->flags &= ~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK);
+	staging_rxon->flags |= il3945_get_antenna_flags(il);
+
+	rc = il_check_rxon_cmd(il, ctx);
+	if (rc) {
+		IL_ERR("Invalid RXON configuration.  Not committing.\n");
+		return -EINVAL;
+	}
+
+	/* If we don't need to send a full RXON, we can use
+	 * il3945_rxon_assoc_cmd which is used to reconfigure filter
+	 * and other flags for the current radio configuration. */
+	if (!il_full_rxon_required(il, &il->ctx)) {
+		rc = il_send_rxon_assoc(il, &il->ctx);
+		if (rc) {
+			IL_ERR("Error setting RXON_ASSOC "
+			       "configuration (%d).\n", rc);
+			return rc;
+		}
+
+		memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
+		/*
+		 * We do not commit tx power settings while channel changing,
+		 * do it now if tx power changed.
+		 */
+		il_set_tx_power(il, il->tx_power_next, false);
+		return 0;
+	}
+
+	/* If we are currently associated and the new config requires
+	 * an RXON_ASSOC and the new config wants the associated mask enabled,
+	 * we must clear the associated from the active configuration
+	 * before we apply the new config */
+	if (il_is_associated(il) && new_assoc) {
+		D_INFO("Toggling associated bit on current RXON\n");
+		active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+
+		/*
+		 * reserved4 and 5 could have been filled by the iwlcore code.
+		 * Let's clear them before pushing to the 3945.
+		 */
+		active_rxon->reserved4 = 0;
+		active_rxon->reserved5 = 0;
+		rc = il_send_cmd_pdu(il, C_RXON, sizeof(struct il3945_rxon_cmd),
+				     &il->ctx.active);
+
+		/* If the mask clearing failed then we set
+		 * active_rxon back to what it was previously */
+		if (rc) {
+			active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
+			IL_ERR("Error clearing ASSOC_MSK on current "
+			       "configuration (%d).\n", rc);
+			return rc;
+		}
+		il_clear_ucode_stations(il, &il->ctx);
+		il_restore_stations(il, &il->ctx);
+	}
+
+	D_INFO("Sending RXON\n" "* with%s RXON_FILTER_ASSOC_MSK\n"
+	       "* channel = %d\n" "* bssid = %pM\n", (new_assoc ? "" : "out"),
+	       le16_to_cpu(staging_rxon->channel), staging_rxon->bssid_addr);
+
+	/*
+	 * reserved4 and 5 could have been filled by the iwlcore code.
+	 * Let's clear them before pushing to the 3945.
+	 */
+	staging_rxon->reserved4 = 0;
+	staging_rxon->reserved5 = 0;
+
+	il_set_rxon_hwcrypto(il, ctx, !il3945_mod_params.sw_crypto);
+
+	/* Apply the new configuration */
+	rc = il_send_cmd_pdu(il, C_RXON, sizeof(struct il3945_rxon_cmd),
+			     staging_rxon);
+	if (rc) {
+		IL_ERR("Error setting new configuration (%d).\n", rc);
+		return rc;
+	}
+
+	memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
+
+	if (!new_assoc) {
+		il_clear_ucode_stations(il, &il->ctx);
+		il_restore_stations(il, &il->ctx);
+	}
+
+	/* If we issue a new RXON command which required a tune then we must
+	 * send a new TXPOWER command or we won't be able to Tx any frames */
+	rc = il_set_tx_power(il, il->tx_power_next, true);
+	if (rc) {
+		IL_ERR("Error setting Tx power (%d).\n", rc);
+		return rc;
+	}
+
+	/* Init the hardware's rate fallback order based on the band */
+	rc = il3945_init_hw_rate_table(il);
+	if (rc) {
+		IL_ERR("Error setting HW rate table: %02X\n", rc);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+/**
+ * il3945_reg_txpower_periodic -  called when time to check our temperature.
+ *
+ * -- reset periodic timer
+ * -- see if temp has changed enough to warrant re-calibration ... if so:
+ *     -- correct coeffs for temp (can reset temp timer)
+ *     -- save this temp as "last",
+ *     -- send new set of gain settings to NIC
+ * NOTE:  This should continue working, even when we're not associated,
+ *   so we can keep our internal table of scan powers current. */
+void
+il3945_reg_txpower_periodic(struct il_priv *il)
+{
+	/* This will kick in the "brute force"
+	 * il3945_hw_reg_comp_txpower_temp() below */
+	if (!il3945_is_temp_calib_needed(il))
+		goto reschedule;
+
+	/* Set up a new set of temp-adjusted TxPowers, send to NIC.
+	 * This is based *only* on current temperature,
+	 * ignoring any previous power measurements */
+	il3945_hw_reg_comp_txpower_temp(il);
+
+reschedule:
+	queue_delayed_work(il->workqueue, &il->_3945.thermal_periodic,
+			   REG_RECALIB_PERIOD * HZ);
+}
+
+static void
+il3945_bg_reg_txpower_periodic(struct work_struct *work)
+{
+	struct il_priv *il = container_of(work, struct il_priv,
+					  _3945.thermal_periodic.work);
+
+	if (test_bit(S_EXIT_PENDING, &il->status))
+		return;
+
+	mutex_lock(&il->mutex);
+	il3945_reg_txpower_periodic(il);
+	mutex_unlock(&il->mutex);
+}
+
+/**
+ * il3945_hw_reg_get_ch_grp_idx - find the channel-group idx (0-4) for channel.
+ *
+ * This function is used when initializing channel-info structs.
+ *
+ * NOTE: These channel groups do *NOT* match the bands above!
+ *	 These channel groups are based on factory-tested channels;
+ *	 on A-band, EEPROM's "group frequency" entries represent the top
+ *	 channel in each group 1-4.  Group 5 All B/G channels are in group 0.
+ */
+static u16
+il3945_hw_reg_get_ch_grp_idx(struct il_priv *il,
+			     const struct il_channel_info *ch_info)
+{
+	struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
+	struct il3945_eeprom_txpower_group *ch_grp = &eeprom->groups[0];
+	u8 group;
+	u16 group_idx = 0;	/* based on factory calib frequencies */
+	u8 grp_channel;
+
+	/* Find the group idx for the channel ... don't use idx 1(?) */
+	if (il_is_channel_a_band(ch_info)) {
+		for (group = 1; group < 5; group++) {
+			grp_channel = ch_grp[group].group_channel;
+			if (ch_info->channel <= grp_channel) {
+				group_idx = group;
+				break;
+			}
+		}
+		/* group 4 has a few channels *above* its factory cal freq */
+		if (group == 5)
+			group_idx = 4;
+	} else
+		group_idx = 0;	/* 2.4 GHz, group 0 */
+
+	D_POWER("Chnl %d mapped to grp %d\n", ch_info->channel, group_idx);
+	return group_idx;
+}
+
+/**
+ * il3945_hw_reg_get_matched_power_idx - Interpolate to get nominal idx
+ *
+ * Interpolate to get nominal (i.e. at factory calibration temperature) idx
+ *   into radio/DSP gain settings table for requested power.
+ */
+static int
+il3945_hw_reg_get_matched_power_idx(struct il_priv *il, s8 requested_power,
+				    s32 setting_idx, s32 *new_idx)
+{
+	const struct il3945_eeprom_txpower_group *chnl_grp = NULL;
+	struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
+	s32 idx0, idx1;
+	s32 power = 2 * requested_power;
+	s32 i;
+	const struct il3945_eeprom_txpower_sample *samples;
+	s32 gains0, gains1;
+	s32 res;
+	s32 denominator;
+
+	chnl_grp = &eeprom->groups[setting_idx];
+	samples = chnl_grp->samples;
+	for (i = 0; i < 5; i++) {
+		if (power == samples[i].power) {
+			*new_idx = samples[i].gain_idx;
+			return 0;
+		}
+	}
+
+	if (power > samples[1].power) {
+		idx0 = 0;
+		idx1 = 1;
+	} else if (power > samples[2].power) {
+		idx0 = 1;
+		idx1 = 2;
+	} else if (power > samples[3].power) {
+		idx0 = 2;
+		idx1 = 3;
+	} else {
+		idx0 = 3;
+		idx1 = 4;
+	}
+
+	denominator = (s32) samples[idx1].power - (s32) samples[idx0].power;
+	if (denominator == 0)
+		return -EINVAL;
+	gains0 = (s32) samples[idx0].gain_idx * (1 << 19);
+	gains1 = (s32) samples[idx1].gain_idx * (1 << 19);
+	res =
+	    gains0 + (gains1 - gains0) * ((s32) power -
+					  (s32) samples[idx0].power) /
+	    denominator + (1 << 18);
+	*new_idx = res >> 19;
+	return 0;
+}
+
+static void
+il3945_hw_reg_init_channel_groups(struct il_priv *il)
+{
+	u32 i;
+	s32 rate_idx;
+	struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
+	const struct il3945_eeprom_txpower_group *group;
+
+	D_POWER("Initializing factory calib info from EEPROM\n");
+
+	for (i = 0; i < IL_NUM_TX_CALIB_GROUPS; i++) {
+		s8 *clip_pwrs;	/* table of power levels for each rate */
+		s8 satur_pwr;	/* saturation power for each chnl group */
+		group = &eeprom->groups[i];
+
+		/* sanity check on factory saturation power value */
+		if (group->saturation_power < 40) {
+			IL_WARN("Error: saturation power is %d, "
+				"less than minimum expected 40\n",
+				group->saturation_power);
+			return;
+		}
+
+		/*
+		 * Derive requested power levels for each rate, based on
+		 *   hardware capabilities (saturation power for band).
+		 * Basic value is 3dB down from saturation, with further
+		 *   power reductions for highest 3 data rates.  These
+		 *   backoffs provide headroom for high rate modulation
+		 *   power peaks, without too much distortion (clipping).
+		 */
+		/* we'll fill in this array with h/w max power levels */
+		clip_pwrs = (s8 *) il->_3945.clip_groups[i].clip_powers;
+
+		/* divide factory saturation power by 2 to find -3dB level */
+		satur_pwr = (s8) (group->saturation_power >> 1);
+
+		/* fill in channel group's nominal powers for each rate */
+		for (rate_idx = 0; rate_idx < RATE_COUNT_3945;
+		     rate_idx++, clip_pwrs++) {
+			switch (rate_idx) {
+			case RATE_36M_IDX_TBL:
+				if (i == 0)	/* B/G */
+					*clip_pwrs = satur_pwr;
+				else	/* A */
+					*clip_pwrs = satur_pwr - 5;
+				break;
+			case RATE_48M_IDX_TBL:
+				if (i == 0)
+					*clip_pwrs = satur_pwr - 7;
+				else
+					*clip_pwrs = satur_pwr - 10;
+				break;
+			case RATE_54M_IDX_TBL:
+				if (i == 0)
+					*clip_pwrs = satur_pwr - 9;
+				else
+					*clip_pwrs = satur_pwr - 12;
+				break;
+			default:
+				*clip_pwrs = satur_pwr;
+				break;
+			}
+		}
+	}
+}
+
+/**
+ * il3945_txpower_set_from_eeprom - Set channel power info based on EEPROM
+ *
+ * Second pass (during init) to set up il->channel_info
+ *
+ * Set up Tx-power settings in our channel info database for each VALID
+ * (for this geo/SKU) channel, at all Tx data rates, based on eeprom values
+ * and current temperature.
+ *
+ * Since this is based on current temperature (at init time), these values may
+ * not be valid for very long, but it gives us a starting/default point,
+ * and allows us to active (i.e. using Tx) scan.
+ *
+ * This does *not* write values to NIC, just sets up our internal table.
+ */
+int
+il3945_txpower_set_from_eeprom(struct il_priv *il)
+{
+	struct il_channel_info *ch_info = NULL;
+	struct il3945_channel_power_info *pwr_info;
+	struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
+	int delta_idx;
+	u8 rate_idx;
+	u8 scan_tbl_idx;
+	const s8 *clip_pwrs;	/* array of power levels for each rate */
+	u8 gain, dsp_atten;
+	s8 power;
+	u8 pwr_idx, base_pwr_idx, a_band;
+	u8 i;
+	int temperature;
+
+	/* save temperature reference,
+	 *   so we can determine next time to calibrate */
+	temperature = il3945_hw_reg_txpower_get_temperature(il);
+	il->last_temperature = temperature;
+
+	il3945_hw_reg_init_channel_groups(il);
+
+	/* initialize Tx power info for each and every channel, 2.4 and 5.x */
+	for (i = 0, ch_info = il->channel_info; i < il->channel_count;
+	     i++, ch_info++) {
+		a_band = il_is_channel_a_band(ch_info);
+		if (!il_is_channel_valid(ch_info))
+			continue;
+
+		/* find this channel's channel group (*not* "band") idx */
+		ch_info->group_idx = il3945_hw_reg_get_ch_grp_idx(il, ch_info);
+
+		/* Get this chnlgrp's rate->max/clip-powers table */
+		clip_pwrs =
+		    il->_3945.clip_groups[ch_info->group_idx].clip_powers;
+
+		/* calculate power idx *adjustment* value according to
+		 *  diff between current temperature and factory temperature */
+		delta_idx =
+		    il3945_hw_reg_adjust_power_by_temp(temperature,
+						       eeprom->groups[ch_info->
+								      group_idx].
+						       temperature);
+
+		D_POWER("Delta idx for channel %d: %d [%d]\n", ch_info->channel,
+			delta_idx, temperature + IL_TEMP_CONVERT);
+
+		/* set tx power value for all OFDM rates */
+		for (rate_idx = 0; rate_idx < IL_OFDM_RATES; rate_idx++) {
+			s32 uninitialized_var(power_idx);
+			int rc;
+
+			/* use channel group's clip-power table,
+			 *   but don't exceed channel's max power */
+			s8 pwr = min(ch_info->max_power_avg,
+				     clip_pwrs[rate_idx]);
+
+			pwr_info = &ch_info->power_info[rate_idx];
+
+			/* get base (i.e. at factory-measured temperature)
+			 *    power table idx for this rate's power */
+			rc = il3945_hw_reg_get_matched_power_idx(il, pwr,
+								 ch_info->
+								 group_idx,
+								 &power_idx);
+			if (rc) {
+				IL_ERR("Invalid power idx\n");
+				return rc;
+			}
+			pwr_info->base_power_idx = (u8) power_idx;
+
+			/* temperature compensate */
+			power_idx += delta_idx;
+
+			/* stay within range of gain table */
+			power_idx = il3945_hw_reg_fix_power_idx(power_idx);
+
+			/* fill 1 OFDM rate's il3945_channel_power_info struct */
+			pwr_info->requested_power = pwr;
+			pwr_info->power_table_idx = (u8) power_idx;
+			pwr_info->tpc.tx_gain =
+			    power_gain_table[a_band][power_idx].tx_gain;
+			pwr_info->tpc.dsp_atten =
+			    power_gain_table[a_band][power_idx].dsp_atten;
+		}
+
+		/* set tx power for CCK rates, based on OFDM 12 Mbit settings */
+		pwr_info = &ch_info->power_info[RATE_12M_IDX_TBL];
+		power = pwr_info->requested_power + IL_CCK_FROM_OFDM_POWER_DIFF;
+		pwr_idx = pwr_info->power_table_idx + IL_CCK_FROM_OFDM_IDX_DIFF;
+		base_pwr_idx =
+		    pwr_info->base_power_idx + IL_CCK_FROM_OFDM_IDX_DIFF;
+
+		/* stay within table range */
+		pwr_idx = il3945_hw_reg_fix_power_idx(pwr_idx);
+		gain = power_gain_table[a_band][pwr_idx].tx_gain;
+		dsp_atten = power_gain_table[a_band][pwr_idx].dsp_atten;
+
+		/* fill each CCK rate's il3945_channel_power_info structure
+		 * NOTE:  All CCK-rate Txpwrs are the same for a given chnl!
+		 * NOTE:  CCK rates start at end of OFDM rates! */
+		for (rate_idx = 0; rate_idx < IL_CCK_RATES; rate_idx++) {
+			pwr_info =
+			    &ch_info->power_info[rate_idx + IL_OFDM_RATES];
+			pwr_info->requested_power = power;
+			pwr_info->power_table_idx = pwr_idx;
+			pwr_info->base_power_idx = base_pwr_idx;
+			pwr_info->tpc.tx_gain = gain;
+			pwr_info->tpc.dsp_atten = dsp_atten;
+		}
+
+		/* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */
+		for (scan_tbl_idx = 0; scan_tbl_idx < IL_NUM_SCAN_RATES;
+		     scan_tbl_idx++) {
+			s32 actual_idx =
+			    (scan_tbl_idx ==
+			     0) ? RATE_1M_IDX_TBL : RATE_6M_IDX_TBL;
+			il3945_hw_reg_set_scan_power(il, scan_tbl_idx,
+						     actual_idx, clip_pwrs,
+						     ch_info, a_band);
+		}
+	}
+
+	return 0;
+}
+
+int
+il3945_hw_rxq_stop(struct il_priv *il)
+{
+	int rc;
+
+	il_wr(il, FH39_RCSR_CONFIG(0), 0);
+	rc = il_poll_bit(il, FH39_RSSR_STATUS,
+			 FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
+	if (rc < 0)
+		IL_ERR("Can't stop Rx DMA.\n");
+
+	return 0;
+}
+
+int
+il3945_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq)
+{
+	int txq_id = txq->q.id;
+
+	struct il3945_shared *shared_data = il->_3945.shared_virt;
+
+	shared_data->tx_base_ptr[txq_id] = cpu_to_le32((u32) txq->q.dma_addr);
+
+	il_wr(il, FH39_CBCC_CTRL(txq_id), 0);
+	il_wr(il, FH39_CBCC_BASE(txq_id), 0);
+
+	il_wr(il, FH39_TCSR_CONFIG(txq_id),
+	      FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT |
+	      FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF |
+	      FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD |
+	      FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL |
+	      FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE);
+
+	/* fake read to flush all prev. writes */
+	_il_rd(il, FH39_TSSR_CBB_BASE);
+
+	return 0;
+}
+
+/*
+ * HCMD utils
+ */
+static u16
+il3945_get_hcmd_size(u8 cmd_id, u16 len)
+{
+	switch (cmd_id) {
+	case C_RXON:
+		return sizeof(struct il3945_rxon_cmd);
+	case C_POWER_TBL:
+		return sizeof(struct il3945_powertable_cmd);
+	default:
+		return len;
+	}
+}
+
+static u16
+il3945_build_addsta_hcmd(const struct il_addsta_cmd *cmd, u8 * data)
+{
+	struct il3945_addsta_cmd *addsta = (struct il3945_addsta_cmd *)data;
+	addsta->mode = cmd->mode;
+	memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify));
+	memcpy(&addsta->key, &cmd->key, sizeof(struct il4965_keyinfo));
+	addsta->station_flags = cmd->station_flags;
+	addsta->station_flags_msk = cmd->station_flags_msk;
+	addsta->tid_disable_tx = cpu_to_le16(0);
+	addsta->rate_n_flags = cmd->rate_n_flags;
+	addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
+	addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
+	addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
+
+	return (u16) sizeof(struct il3945_addsta_cmd);
+}
+
+static int
+il3945_add_bssid_station(struct il_priv *il, const u8 * addr, u8 * sta_id_r)
+{
+	struct il_rxon_context *ctx = &il->ctx;
+	int ret;
+	u8 sta_id;
+	unsigned long flags;
+
+	if (sta_id_r)
+		*sta_id_r = IL_INVALID_STATION;
+
+	ret = il_add_station_common(il, ctx, addr, 0, NULL, &sta_id);
+	if (ret) {
+		IL_ERR("Unable to add station %pM\n", addr);
+		return ret;
+	}
+
+	if (sta_id_r)
+		*sta_id_r = sta_id;
+
+	spin_lock_irqsave(&il->sta_lock, flags);
+	il->stations[sta_id].used |= IL_STA_LOCAL;
+	spin_unlock_irqrestore(&il->sta_lock, flags);
+
+	return 0;
+}
+
+static int
+il3945_manage_ibss_station(struct il_priv *il, struct ieee80211_vif *vif,
+			   bool add)
+{
+	struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
+	int ret;
+
+	if (add) {
+		ret =
+		    il3945_add_bssid_station(il, vif->bss_conf.bssid,
+					     &vif_priv->ibss_bssid_sta_id);
+		if (ret)
+			return ret;
+
+		il3945_sync_sta(il, vif_priv->ibss_bssid_sta_id,
+				(il->band ==
+				 IEEE80211_BAND_5GHZ) ? RATE_6M_PLCP :
+				RATE_1M_PLCP);
+		il3945_rate_scale_init(il->hw, vif_priv->ibss_bssid_sta_id);
+
+		return 0;
+	}
+
+	return il_remove_station(il, vif_priv->ibss_bssid_sta_id,
+				 vif->bss_conf.bssid);
+}
+
+/**
+ * il3945_init_hw_rate_table - Initialize the hardware rate fallback table
+ */
+int
+il3945_init_hw_rate_table(struct il_priv *il)
+{
+	int rc, i, idx, prev_idx;
+	struct il3945_rate_scaling_cmd rate_cmd = {
+		.reserved = {0, 0, 0},
+	};
+	struct il3945_rate_scaling_info *table = rate_cmd.table;
+
+	for (i = 0; i < ARRAY_SIZE(il3945_rates); i++) {
+		idx = il3945_rates[i].table_rs_idx;
+
+		table[idx].rate_n_flags = cpu_to_le16(il3945_rates[i].plcp);
+		table[idx].try_cnt = il->retry_rate;
+		prev_idx = il3945_get_prev_ieee_rate(i);
+		table[idx].next_rate_idx = il3945_rates[prev_idx].table_rs_idx;
+	}
+
+	switch (il->band) {
+	case IEEE80211_BAND_5GHZ:
+		D_RATE("Select A mode rate scale\n");
+		/* If one of the following CCK rates is used,
+		 * have it fall back to the 6M OFDM rate */
+		for (i = RATE_1M_IDX_TBL; i <= RATE_11M_IDX_TBL; i++)
+			table[i].next_rate_idx =
+			    il3945_rates[IL_FIRST_OFDM_RATE].table_rs_idx;
+
+		/* Don't fall back to CCK rates */
+		table[RATE_12M_IDX_TBL].next_rate_idx = RATE_9M_IDX_TBL;
+
+		/* Don't drop out of OFDM rates */
+		table[RATE_6M_IDX_TBL].next_rate_idx =
+		    il3945_rates[IL_FIRST_OFDM_RATE].table_rs_idx;
+		break;
+
+	case IEEE80211_BAND_2GHZ:
+		D_RATE("Select B/G mode rate scale\n");
+		/* If an OFDM rate is used, have it fall back to the
+		 * 1M CCK rates */
+
+		if (!(il->_3945.sta_supp_rates & IL_OFDM_RATES_MASK) &&
+		    il_is_associated(il)) {
+
+			idx = IL_FIRST_CCK_RATE;
+			for (i = RATE_6M_IDX_TBL; i <= RATE_54M_IDX_TBL; i++)
+				table[i].next_rate_idx =
+				    il3945_rates[idx].table_rs_idx;
+
+			idx = RATE_11M_IDX_TBL;
+			/* CCK shouldn't fall back to OFDM... */
+			table[idx].next_rate_idx = RATE_5M_IDX_TBL;
+		}
+		break;
+
+	default:
+		WARN_ON(1);
+		break;
+	}
+
+	/* Update the rate scaling for control frame Tx */
+	rate_cmd.table_id = 0;
+	rc = il_send_cmd_pdu(il, C_RATE_SCALE, sizeof(rate_cmd), &rate_cmd);
+	if (rc)
+		return rc;
+
+	/* Update the rate scaling for data frame Tx */
+	rate_cmd.table_id = 1;
+	return il_send_cmd_pdu(il, C_RATE_SCALE, sizeof(rate_cmd), &rate_cmd);
+}
+
+/* Called when initializing driver */
+int
+il3945_hw_set_hw_params(struct il_priv *il)
+{
+	memset((void *)&il->hw_params, 0, sizeof(struct il_hw_params));
+
+	il->_3945.shared_virt =
+	    dma_alloc_coherent(&il->pci_dev->dev, sizeof(struct il3945_shared),
+			       &il->_3945.shared_phys, GFP_KERNEL);
+	if (!il->_3945.shared_virt) {
+		IL_ERR("failed to allocate pci memory\n");
+		return -ENOMEM;
+	}
+
+	/* Assign number of Usable TX queues */
+	il->hw_params.max_txq_num = il->cfg->base_params->num_of_queues;
+
+	il->hw_params.tfd_size = sizeof(struct il3945_tfd);
+	il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_3K);
+	il->hw_params.max_rxq_size = RX_QUEUE_SIZE;
+	il->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
+	il->hw_params.max_stations = IL3945_STATION_COUNT;
+	il->ctx.bcast_sta_id = IL3945_BROADCAST_ID;
+
+	il->sta_key_max_num = STA_KEY_MAX_NUM;
+
+	il->hw_params.rx_wrt_ptr_reg = FH39_RSCSR_CHNL0_WPTR;
+	il->hw_params.max_beacon_itrvl = IL39_MAX_UCODE_BEACON_INTERVAL;
+	il->hw_params.beacon_time_tsf_bits = IL3945_EXT_BEACON_TIME_POS;
+
+	return 0;
+}
+
+unsigned int
+il3945_hw_get_beacon_cmd(struct il_priv *il, struct il3945_frame *frame,
+			 u8 rate)
+{
+	struct il3945_tx_beacon_cmd *tx_beacon_cmd;
+	unsigned int frame_size;
+
+	tx_beacon_cmd = (struct il3945_tx_beacon_cmd *)&frame->u;
+	memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
+
+	tx_beacon_cmd->tx.sta_id = il->ctx.bcast_sta_id;
+	tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+
+	frame_size =
+	    il3945_fill_beacon_frame(il, tx_beacon_cmd->frame,
+				     sizeof(frame->u) - sizeof(*tx_beacon_cmd));
+
+	BUG_ON(frame_size > MAX_MPDU_SIZE);
+	tx_beacon_cmd->tx.len = cpu_to_le16((u16) frame_size);
+
+	tx_beacon_cmd->tx.rate = rate;
+	tx_beacon_cmd->tx.tx_flags =
+	    (TX_CMD_FLG_SEQ_CTL_MSK | TX_CMD_FLG_TSF_MSK);
+
+	/* supp_rates[0] == OFDM start at IL_FIRST_OFDM_RATE */
+	tx_beacon_cmd->tx.supp_rates[0] =
+	    (IL_OFDM_BASIC_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF;
+
+	tx_beacon_cmd->tx.supp_rates[1] = (IL_CCK_BASIC_RATES_MASK & 0xF);
+
+	return sizeof(struct il3945_tx_beacon_cmd) + frame_size;
+}
+
+void
+il3945_hw_handler_setup(struct il_priv *il)
+{
+	il->handlers[C_TX] = il3945_hdl_tx;
+	il->handlers[N_3945_RX] = il3945_hdl_rx;
+}
+
+void
+il3945_hw_setup_deferred_work(struct il_priv *il)
+{
+	INIT_DELAYED_WORK(&il->_3945.thermal_periodic,
+			  il3945_bg_reg_txpower_periodic);
+}
+
+void
+il3945_hw_cancel_deferred_work(struct il_priv *il)
+{
+	cancel_delayed_work(&il->_3945.thermal_periodic);
+}
+
+/* check contents of special bootstrap uCode SRAM */
+static int
+il3945_verify_bsm(struct il_priv *il)
+{
+	__le32 *image = il->ucode_boot.v_addr;
+	u32 len = il->ucode_boot.len;
+	u32 reg;
+	u32 val;
+
+	D_INFO("Begin verify bsm\n");
+
+	/* verify BSM SRAM contents */
+	val = il_rd_prph(il, BSM_WR_DWCOUNT_REG);
+	for (reg = BSM_SRAM_LOWER_BOUND; reg < BSM_SRAM_LOWER_BOUND + len;
+	     reg += sizeof(u32), image++) {
+		val = il_rd_prph(il, reg);
+		if (val != le32_to_cpu(*image)) {
+			IL_ERR("BSM uCode verification failed at "
+			       "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
+			       BSM_SRAM_LOWER_BOUND, reg - BSM_SRAM_LOWER_BOUND,
+			       len, val, le32_to_cpu(*image));
+			return -EIO;
+		}
+	}
+
+	D_INFO("BSM bootstrap uCode image OK\n");
+
+	return 0;
+}
+
+/******************************************************************************
+ *
+ * EEPROM related functions
+ *
+ ******************************************************************************/
+
+/*
+ * Clear the OWNER_MSK, to establish driver (instead of uCode running on
+ * embedded controller) as EEPROM reader; each read is a series of pulses
+ * to/from the EEPROM chip, not a single event, so even reads could conflict
+ * if they weren't arbitrated by some ownership mechanism.  Here, the driver
+ * simply claims ownership, which should be safe when this function is called
+ * (i.e. before loading uCode!).
+ */
+static int
+il3945_eeprom_acquire_semaphore(struct il_priv *il)
+{
+	_il_clear_bit(il, CSR_EEPROM_GP, CSR_EEPROM_GP_IF_OWNER_MSK);
+	return 0;
+}
+
+static void
+il3945_eeprom_release_semaphore(struct il_priv *il)
+{
+	return;
+}
+
+ /**
+  * il3945_load_bsm - Load bootstrap instructions
+  *
+  * BSM operation:
+  *
+  * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
+  * in special SRAM that does not power down during RFKILL.  When powering back
+  * up after power-saving sleeps (or during initial uCode load), the BSM loads
+  * the bootstrap program into the on-board processor, and starts it.
+  *
+  * The bootstrap program loads (via DMA) instructions and data for a new
+  * program from host DRAM locations indicated by the host driver in the
+  * BSM_DRAM_* registers.  Once the new program is loaded, it starts
+  * automatically.
+  *
+  * When initializing the NIC, the host driver points the BSM to the
+  * "initialize" uCode image.  This uCode sets up some internal data, then
+  * notifies host via "initialize alive" that it is complete.
+  *
+  * The host then replaces the BSM_DRAM_* pointer values to point to the
+  * normal runtime uCode instructions and a backup uCode data cache buffer
+  * (filled initially with starting data values for the on-board processor),
+  * then triggers the "initialize" uCode to load and launch the runtime uCode,
+  * which begins normal operation.
+  *
+  * When doing a power-save shutdown, runtime uCode saves data SRAM into
+  * the backup data cache in DRAM before SRAM is powered down.
+  *
+  * When powering back up, the BSM loads the bootstrap program.  This reloads
+  * the runtime uCode instructions and the backup data cache into SRAM,
+  * and re-launches the runtime uCode from where it left off.
+  */
+static int
+il3945_load_bsm(struct il_priv *il)
+{
+	__le32 *image = il->ucode_boot.v_addr;
+	u32 len = il->ucode_boot.len;
+	dma_addr_t pinst;
+	dma_addr_t pdata;
+	u32 inst_len;
+	u32 data_len;
+	int rc;
+	int i;
+	u32 done;
+	u32 reg_offset;
+
+	D_INFO("Begin load bsm\n");
+
+	/* make sure bootstrap program is no larger than BSM's SRAM size */
+	if (len > IL39_MAX_BSM_SIZE)
+		return -EINVAL;
+
+	/* Tell bootstrap uCode where to find the "Initialize" uCode
+	 *   in host DRAM ... host DRAM physical address bits 31:0 for 3945.
+	 * NOTE:  il3945_initialize_alive_start() will replace these values,
+	 *        after the "initialize" uCode has run, to point to
+	 *        runtime/protocol instructions and backup data cache. */
+	pinst = il->ucode_init.p_addr;
+	pdata = il->ucode_init_data.p_addr;
+	inst_len = il->ucode_init.len;
+	data_len = il->ucode_init_data.len;
+
+	il_wr_prph(il, BSM_DRAM_INST_PTR_REG, pinst);
+	il_wr_prph(il, BSM_DRAM_DATA_PTR_REG, pdata);
+	il_wr_prph(il, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
+	il_wr_prph(il, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
+
+	/* Fill BSM memory with bootstrap instructions */
+	for (reg_offset = BSM_SRAM_LOWER_BOUND;
+	     reg_offset < BSM_SRAM_LOWER_BOUND + len;
+	     reg_offset += sizeof(u32), image++)
+		_il_wr_prph(il, reg_offset, le32_to_cpu(*image));
+
+	rc = il3945_verify_bsm(il);
+	if (rc)
+		return rc;
+
+	/* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
+	il_wr_prph(il, BSM_WR_MEM_SRC_REG, 0x0);
+	il_wr_prph(il, BSM_WR_MEM_DST_REG, IL39_RTC_INST_LOWER_BOUND);
+	il_wr_prph(il, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
+
+	/* Load bootstrap code into instruction SRAM now,
+	 *   to prepare to load "initialize" uCode */
+	il_wr_prph(il, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
+
+	/* Wait for load of bootstrap uCode to finish */
+	for (i = 0; i < 100; i++) {
+		done = il_rd_prph(il, BSM_WR_CTRL_REG);
+		if (!(done & BSM_WR_CTRL_REG_BIT_START))
+			break;
+		udelay(10);
+	}
+	if (i < 100)
+		D_INFO("BSM write complete, poll %d iterations\n", i);
+	else {
+		IL_ERR("BSM write did not complete!\n");
+		return -EIO;
+	}
+
+	/* Enable future boot loads whenever power management unit triggers it
+	 *   (e.g. when powering back up after power-save shutdown) */
+	il_wr_prph(il, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
+
+	return 0;
+}
+
+static struct il_hcmd_ops il3945_hcmd = {
+	.rxon_assoc = il3945_send_rxon_assoc,
+	.commit_rxon = il3945_commit_rxon,
+};
+
+static struct il_lib_ops il3945_lib = {
+	.txq_attach_buf_to_tfd = il3945_hw_txq_attach_buf_to_tfd,
+	.txq_free_tfd = il3945_hw_txq_free_tfd,
+	.txq_init = il3945_hw_tx_queue_init,
+	.load_ucode = il3945_load_bsm,
+	.dump_nic_error_log = il3945_dump_nic_error_log,
+	.apm_ops = {
+		    .init = il3945_apm_init,
+		    .config = il3945_nic_config,
+		    },
+	.eeprom_ops = {
+		       .regulatory_bands = {
+					    EEPROM_REGULATORY_BAND_1_CHANNELS,
+					    EEPROM_REGULATORY_BAND_2_CHANNELS,
+					    EEPROM_REGULATORY_BAND_3_CHANNELS,
+					    EEPROM_REGULATORY_BAND_4_CHANNELS,
+					    EEPROM_REGULATORY_BAND_5_CHANNELS,
+					    EEPROM_REGULATORY_BAND_NO_HT40,
+					    EEPROM_REGULATORY_BAND_NO_HT40,
+					    },
+		       .acquire_semaphore = il3945_eeprom_acquire_semaphore,
+		       .release_semaphore = il3945_eeprom_release_semaphore,
+		       },
+	.send_tx_power = il3945_send_tx_power,
+	.is_valid_rtc_data_addr = il3945_hw_valid_rtc_data_addr,
+
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+	.debugfs_ops = {
+			.rx_stats_read = il3945_ucode_rx_stats_read,
+			.tx_stats_read = il3945_ucode_tx_stats_read,
+			.general_stats_read = il3945_ucode_general_stats_read,
+			},
+#endif
+};
+
+static const struct il_legacy_ops il3945_legacy_ops = {
+	.post_associate = il3945_post_associate,
+	.config_ap = il3945_config_ap,
+	.manage_ibss_station = il3945_manage_ibss_station,
+};
+
+static struct il_hcmd_utils_ops il3945_hcmd_utils = {
+	.get_hcmd_size = il3945_get_hcmd_size,
+	.build_addsta_hcmd = il3945_build_addsta_hcmd,
+	.request_scan = il3945_request_scan,
+	.post_scan = il3945_post_scan,
+};
+
+static const struct il_ops il3945_ops = {
+	.lib = &il3945_lib,
+	.hcmd = &il3945_hcmd,
+	.utils = &il3945_hcmd_utils,
+	.led = &il3945_led_ops,
+	.legacy = &il3945_legacy_ops,
+	.ieee80211_ops = &il3945_hw_ops,
+};
+
+static struct il_base_params il3945_base_params = {
+	.eeprom_size = IL3945_EEPROM_IMG_SIZE,
+	.num_of_queues = IL39_NUM_QUEUES,
+	.pll_cfg_val = CSR39_ANA_PLL_CFG_VAL,
+	.set_l0s = false,
+	.use_bsm = true,
+	.led_compensation = 64,
+	.wd_timeout = IL_DEF_WD_TIMEOUT,
+};
+
+static struct il_cfg il3945_bg_cfg = {
+	.name = "3945BG",
+	.fw_name_pre = IL3945_FW_PRE,
+	.ucode_api_max = IL3945_UCODE_API_MAX,
+	.ucode_api_min = IL3945_UCODE_API_MIN,
+	.sku = IL_SKU_G,
+	.eeprom_ver = EEPROM_3945_EEPROM_VERSION,
+	.ops = &il3945_ops,
+	.mod_params = &il3945_mod_params,
+	.base_params = &il3945_base_params,
+	.led_mode = IL_LED_BLINK,
+};
+
+static struct il_cfg il3945_abg_cfg = {
+	.name = "3945ABG",
+	.fw_name_pre = IL3945_FW_PRE,
+	.ucode_api_max = IL3945_UCODE_API_MAX,
+	.ucode_api_min = IL3945_UCODE_API_MIN,
+	.sku = IL_SKU_A | IL_SKU_G,
+	.eeprom_ver = EEPROM_3945_EEPROM_VERSION,
+	.ops = &il3945_ops,
+	.mod_params = &il3945_mod_params,
+	.base_params = &il3945_base_params,
+	.led_mode = IL_LED_BLINK,
+};
+
+DEFINE_PCI_DEVICE_TABLE(il3945_hw_card_ids) = {
+	{IL_PCI_DEVICE(0x4222, 0x1005, il3945_bg_cfg)},
+	{IL_PCI_DEVICE(0x4222, 0x1034, il3945_bg_cfg)},
+	{IL_PCI_DEVICE(0x4222, 0x1044, il3945_bg_cfg)},
+	{IL_PCI_DEVICE(0x4227, 0x1014, il3945_bg_cfg)},
+	{IL_PCI_DEVICE(0x4222, PCI_ANY_ID, il3945_abg_cfg)},
+	{IL_PCI_DEVICE(0x4227, PCI_ANY_ID, il3945_abg_cfg)},
+	{0}
+};
+
+MODULE_DEVICE_TABLE(pci, il3945_hw_card_ids);
diff --git a/drivers/net/wireless/iwlegacy/3945.h b/drivers/net/wireless/iwlegacy/3945.h
new file mode 100644
index 0000000..9f42f79
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/3945.h
@@ -0,0 +1,607 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#ifndef __il_3945_h__
+#define __il_3945_h__
+
+#include <linux/pci.h>		/* for struct pci_device_id */
+#include <linux/kernel.h>
+#include <net/ieee80211_radiotap.h>
+
+/* Hardware specific file defines the PCI IDs table for that hardware module */
+extern const struct pci_device_id il3945_hw_card_ids[];
+
+#include "common.h"
+
+/* Highest firmware API version supported */
+#define IL3945_UCODE_API_MAX 2
+
+/* Lowest firmware API version supported */
+#define IL3945_UCODE_API_MIN 1
+
+#define IL3945_FW_PRE	"iwlwifi-3945-"
+#define _IL3945_MODULE_FIRMWARE(api) IL3945_FW_PRE #api ".ucode"
+#define IL3945_MODULE_FIRMWARE(api) _IL3945_MODULE_FIRMWARE(api)
+
+/* Default noise level to report when noise measurement is not available.
+ *   This may be because we're:
+ *   1)  Not associated (4965, no beacon stats being sent to driver)
+ *   2)  Scanning (noise measurement does not apply to associated channel)
+ *   3)  Receiving CCK (3945 delivers noise info only for OFDM frames)
+ * Use default noise value of -127 ... this is below the range of measurable
+ *   Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user.
+ *   Also, -127 works better than 0 when averaging frames with/without
+ *   noise info (e.g. averaging might be done in app); measured dBm values are
+ *   always negative ... using a negative value as the default keeps all
+ *   averages within an s8's (used in some apps) range of negative values. */
+#define IL_NOISE_MEAS_NOT_AVAILABLE (-127)
+
+/* Module parameters accessible from iwl-*.c */
+extern struct il_mod_params il3945_mod_params;
+
+struct il3945_rate_scale_data {
+	u64 data;
+	s32 success_counter;
+	s32 success_ratio;
+	s32 counter;
+	s32 average_tpt;
+	unsigned long stamp;
+};
+
+struct il3945_rs_sta {
+	spinlock_t lock;
+	struct il_priv *il;
+	s32 *expected_tpt;
+	unsigned long last_partial_flush;
+	unsigned long last_flush;
+	u32 flush_time;
+	u32 last_tx_packets;
+	u32 tx_packets;
+	u8 tgg;
+	u8 flush_pending;
+	u8 start_rate;
+	struct timer_list rate_scale_flush;
+	struct il3945_rate_scale_data win[RATE_COUNT_3945];
+#ifdef CONFIG_MAC80211_DEBUGFS
+	struct dentry *rs_sta_dbgfs_stats_table_file;
+#endif
+
+	/* used to be in sta_info */
+	int last_txrate_idx;
+};
+
+/*
+ * The common struct MUST be first because it is shared between
+ * 3945 and 4965!
+ */
+struct il3945_sta_priv {
+	struct il_station_priv_common common;
+	struct il3945_rs_sta rs_sta;
+};
+
+enum il3945_antenna {
+	IL_ANTENNA_DIVERSITY,
+	IL_ANTENNA_MAIN,
+	IL_ANTENNA_AUX
+};
+
+/*
+ * RTS threshold here is total size [2347] minus 4 FCS bytes
+ * Per spec:
+ *   a value of 0 means RTS on all data/management packets
+ *   a value > max MSDU size means no RTS
+ * else RTS for data/management frames where MPDU is larger
+ *   than RTS value.
+ */
+#define DEFAULT_RTS_THRESHOLD     2347U
+#define MIN_RTS_THRESHOLD         0U
+#define MAX_RTS_THRESHOLD         2347U
+#define MAX_MSDU_SIZE		  2304U
+#define MAX_MPDU_SIZE		  2346U
+#define DEFAULT_BEACON_INTERVAL   100U
+#define	DEFAULT_SHORT_RETRY_LIMIT 7U
+#define	DEFAULT_LONG_RETRY_LIMIT  4U
+
+#define IL_TX_FIFO_AC0	0
+#define IL_TX_FIFO_AC1	1
+#define IL_TX_FIFO_AC2	2
+#define IL_TX_FIFO_AC3	3
+#define IL_TX_FIFO_HCCA_1	5
+#define IL_TX_FIFO_HCCA_2	6
+#define IL_TX_FIFO_NONE	7
+
+#define IEEE80211_DATA_LEN              2304
+#define IEEE80211_4ADDR_LEN             30
+#define IEEE80211_HLEN                  (IEEE80211_4ADDR_LEN)
+#define IEEE80211_FRAME_LEN             (IEEE80211_DATA_LEN + IEEE80211_HLEN)
+
+struct il3945_frame {
+	union {
+		struct ieee80211_hdr frame;
+		struct il3945_tx_beacon_cmd beacon;
+		u8 raw[IEEE80211_FRAME_LEN];
+		u8 cmd[360];
+	} u;
+	struct list_head list;
+};
+
+#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
+#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
+#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
+
+#define SUP_RATE_11A_MAX_NUM_CHANNELS  8
+#define SUP_RATE_11B_MAX_NUM_CHANNELS  4
+#define SUP_RATE_11G_MAX_NUM_CHANNELS  12
+
+#define IL_SUPPORTED_RATES_IE_LEN         8
+
+#define SCAN_INTERVAL 100
+
+#define MAX_TID_COUNT        9
+
+#define IL_INVALID_RATE     0xFF
+#define IL_INVALID_VALUE    -1
+
+#define STA_PS_STATUS_WAKE             0
+#define STA_PS_STATUS_SLEEP            1
+
+struct il3945_ibss_seq {
+	u8 mac[ETH_ALEN];
+	u16 seq_num;
+	u16 frag_num;
+	unsigned long packet_time;
+	struct list_head list;
+};
+
+#define IL_RX_HDR(x) ((struct il3945_rx_frame_hdr *)(\
+		       x->u.rx_frame.stats.payload + \
+		       x->u.rx_frame.stats.phy_count))
+#define IL_RX_END(x) ((struct il3945_rx_frame_end *)(\
+		       IL_RX_HDR(x)->payload + \
+		       le16_to_cpu(IL_RX_HDR(x)->len)))
+#define IL_RX_STATS(x) (&x->u.rx_frame.stats)
+#define IL_RX_DATA(x) (IL_RX_HDR(x)->payload)
+
+/******************************************************************************
+ *
+ * Functions implemented in iwl3945-base.c which are forward declared here
+ * for use by iwl-*.c
+ *
+ *****************************************************************************/
+extern int il3945_calc_db_from_ratio(int sig_ratio);
+extern void il3945_rx_replenish(void *data);
+extern void il3945_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq);
+extern unsigned int il3945_fill_beacon_frame(struct il_priv *il,
+					     struct ieee80211_hdr *hdr,
+					     int left);
+extern int il3945_dump_nic_event_log(struct il_priv *il, bool full_log,
+				     char **buf, bool display);
+extern void il3945_dump_nic_error_log(struct il_priv *il);
+
+/******************************************************************************
+ *
+ * Functions implemented in iwl-[34]*.c which are forward declared here
+ * for use by iwl3945-base.c
+ *
+ * NOTE:  The implementation of these functions are hardware specific
+ * which is why they are in the hardware specific files (vs. iwl-base.c)
+ *
+ * Naming convention --
+ * il3945_         <-- Its part of iwlwifi (should be changed to il3945_)
+ * il3945_hw_      <-- Hardware specific (implemented in iwl-XXXX.c by all HW)
+ * iwlXXXX_     <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
+ * il3945_bg_      <-- Called from work queue context
+ * il3945_mac_     <-- mac80211 callback
+ *
+ ****************************************************************************/
+extern void il3945_hw_handler_setup(struct il_priv *il);
+extern void il3945_hw_setup_deferred_work(struct il_priv *il);
+extern void il3945_hw_cancel_deferred_work(struct il_priv *il);
+extern int il3945_hw_rxq_stop(struct il_priv *il);
+extern int il3945_hw_set_hw_params(struct il_priv *il);
+extern int il3945_hw_nic_init(struct il_priv *il);
+extern int il3945_hw_nic_stop_master(struct il_priv *il);
+extern void il3945_hw_txq_ctx_free(struct il_priv *il);
+extern void il3945_hw_txq_ctx_stop(struct il_priv *il);
+extern int il3945_hw_nic_reset(struct il_priv *il);
+extern int il3945_hw_txq_attach_buf_to_tfd(struct il_priv *il,
+					   struct il_tx_queue *txq,
+					   dma_addr_t addr, u16 len, u8 reset,
+					   u8 pad);
+extern void il3945_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq);
+extern int il3945_hw_get_temperature(struct il_priv *il);
+extern int il3945_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq);
+extern unsigned int il3945_hw_get_beacon_cmd(struct il_priv *il,
+					     struct il3945_frame *frame,
+					     u8 rate);
+void il3945_hw_build_tx_cmd_rate(struct il_priv *il, struct il_device_cmd *cmd,
+				 struct ieee80211_tx_info *info,
+				 struct ieee80211_hdr *hdr, int sta_id);
+extern int il3945_hw_reg_send_txpower(struct il_priv *il);
+extern int il3945_hw_reg_set_txpower(struct il_priv *il, s8 power);
+extern void il3945_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb);
+void il3945_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb);
+extern void il3945_disable_events(struct il_priv *il);
+extern int il4965_get_temperature(const struct il_priv *il);
+extern void il3945_post_associate(struct il_priv *il);
+extern void il3945_config_ap(struct il_priv *il);
+
+extern int il3945_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx);
+
+/**
+ * il3945_hw_find_station - Find station id for a given BSSID
+ * @bssid: MAC address of station ID to find
+ *
+ * NOTE:  This should not be hardware specific but the code has
+ * not yet been merged into a single common layer for managing the
+ * station tables.
+ */
+extern u8 il3945_hw_find_station(struct il_priv *il, const u8 * bssid);
+
+extern struct ieee80211_ops il3945_hw_ops;
+
+extern __le32 il3945_get_antenna_flags(const struct il_priv *il);
+extern int il3945_init_hw_rate_table(struct il_priv *il);
+extern void il3945_reg_txpower_periodic(struct il_priv *il);
+extern int il3945_txpower_set_from_eeprom(struct il_priv *il);
+
+extern int il3945_rs_next_rate(struct il_priv *il, int rate);
+
+/* scanning */
+int il3945_request_scan(struct il_priv *il, struct ieee80211_vif *vif);
+void il3945_post_scan(struct il_priv *il);
+
+/* rates */
+extern const struct il3945_rate_info il3945_rates[RATE_COUNT_3945];
+
+/* RSSI to dBm */
+#define IL39_RSSI_OFFSET	95
+
+/*
+ * EEPROM related constants, enums, and structures.
+ */
+#define EEPROM_SKU_CAP_OP_MODE_MRC                      (1 << 7)
+
+/*
+ * Mapping of a Tx power level, at factory calibration temperature,
+ *   to a radio/DSP gain table idx.
+ * One for each of 5 "sample" power levels in each band.
+ * v_det is measured at the factory, using the 3945's built-in power amplifier
+ *   (PA) output voltage detector.  This same detector is used during Tx of
+ *   long packets in normal operation to provide feedback as to proper output
+ *   level.
+ * Data copied from EEPROM.
+ * DO NOT ALTER THIS STRUCTURE!!!
+ */
+struct il3945_eeprom_txpower_sample {
+	u8 gain_idx;		/* idx into power (gain) setup table ... */
+	s8 power;		/* ... for this pwr level for this chnl group */
+	u16 v_det;		/* PA output voltage */
+} __packed;
+
+/*
+ * Mappings of Tx power levels -> nominal radio/DSP gain table idxes.
+ * One for each channel group (a.k.a. "band") (1 for BG, 4 for A).
+ * Tx power setup code interpolates between the 5 "sample" power levels
+ *    to determine the nominal setup for a requested power level.
+ * Data copied from EEPROM.
+ * DO NOT ALTER THIS STRUCTURE!!!
+ */
+struct il3945_eeprom_txpower_group {
+	struct il3945_eeprom_txpower_sample samples[5];	/* 5 power levels */
+	s32 a, b, c, d, e;	/* coefficients for voltage->power
+				 * formula (signed) */
+	s32 Fa, Fb, Fc, Fd, Fe;	/* these modify coeffs based on
+				 * frequency (signed) */
+	s8 saturation_power;	/* highest power possible by h/w in this
+				 * band */
+	u8 group_channel;	/* "representative" channel # in this band */
+	s16 temperature;	/* h/w temperature at factory calib this band
+				 * (signed) */
+} __packed;
+
+/*
+ * Temperature-based Tx-power compensation data, not band-specific.
+ * These coefficients are use to modify a/b/c/d/e coeffs based on
+ *   difference between current temperature and factory calib temperature.
+ * Data copied from EEPROM.
+ */
+struct il3945_eeprom_temperature_corr {
+	u32 Ta;
+	u32 Tb;
+	u32 Tc;
+	u32 Td;
+	u32 Te;
+} __packed;
+
+/*
+ * EEPROM map
+ */
+struct il3945_eeprom {
+	u8 reserved0[16];
+	u16 device_id;		/* abs.ofs: 16 */
+	u8 reserved1[2];
+	u16 pmc;		/* abs.ofs: 20 */
+	u8 reserved2[20];
+	u8 mac_address[6];	/* abs.ofs: 42 */
+	u8 reserved3[58];
+	u16 board_revision;	/* abs.ofs: 106 */
+	u8 reserved4[11];
+	u8 board_pba_number[9];	/* abs.ofs: 119 */
+	u8 reserved5[8];
+	u16 version;		/* abs.ofs: 136 */
+	u8 sku_cap;		/* abs.ofs: 138 */
+	u8 leds_mode;		/* abs.ofs: 139 */
+	u16 oem_mode;
+	u16 wowlan_mode;	/* abs.ofs: 142 */
+	u16 leds_time_interval;	/* abs.ofs: 144 */
+	u8 leds_off_time;	/* abs.ofs: 146 */
+	u8 leds_on_time;	/* abs.ofs: 147 */
+	u8 almgor_m_version;	/* abs.ofs: 148 */
+	u8 antenna_switch_type;	/* abs.ofs: 149 */
+	u8 reserved6[42];
+	u8 sku_id[4];		/* abs.ofs: 192 */
+
+/*
+ * Per-channel regulatory data.
+ *
+ * Each channel that *might* be supported by 3945 has a fixed location
+ * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
+ * txpower (MSB).
+ *
+ * Entries immediately below are for 20 MHz channel width.
+ *
+ * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
+ */
+	u16 band_1_count;	/* abs.ofs: 196 */
+	struct il_eeprom_channel band_1_channels[14];	/* abs.ofs: 198 */
+
+/*
+ * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196,
+ * 5.0 GHz channels 7, 8, 11, 12, 16
+ * (4915-5080MHz) (none of these is ever supported)
+ */
+	u16 band_2_count;	/* abs.ofs: 226 */
+	struct il_eeprom_channel band_2_channels[13];	/* abs.ofs: 228 */
+
+/*
+ * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
+ * (5170-5320MHz)
+ */
+	u16 band_3_count;	/* abs.ofs: 254 */
+	struct il_eeprom_channel band_3_channels[12];	/* abs.ofs: 256 */
+
+/*
+ * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
+ * (5500-5700MHz)
+ */
+	u16 band_4_count;	/* abs.ofs: 280 */
+	struct il_eeprom_channel band_4_channels[11];	/* abs.ofs: 282 */
+
+/*
+ * 5.7 GHz channels 145, 149, 153, 157, 161, 165
+ * (5725-5825MHz)
+ */
+	u16 band_5_count;	/* abs.ofs: 304 */
+	struct il_eeprom_channel band_5_channels[6];	/* abs.ofs: 306 */
+
+	u8 reserved9[194];
+
+/*
+ * 3945 Txpower calibration data.
+ */
+#define IL_NUM_TX_CALIB_GROUPS 5
+	struct il3945_eeprom_txpower_group groups[IL_NUM_TX_CALIB_GROUPS];
+/* abs.ofs: 512 */
+	struct il3945_eeprom_temperature_corr corrections;	/* abs.ofs: 832 */
+	u8 reserved16[172];	/* fill out to full 1024 byte block */
+} __packed;
+
+#define IL3945_EEPROM_IMG_SIZE 1024
+
+/* End of EEPROM */
+
+#define PCI_CFG_REV_ID_BIT_BASIC_SKU                (0x40)	/* bit 6    */
+#define PCI_CFG_REV_ID_BIT_RTP                      (0x80)	/* bit 7    */
+
+/* 4 DATA + 1 CMD. There are 2 HCCA queues that are not used. */
+#define IL39_NUM_QUEUES        5
+#define IL39_CMD_QUEUE_NUM	4
+
+#define IL_DEFAULT_TX_RETRY  15
+
+/*********************************************/
+
+#define RFD_SIZE                              4
+#define NUM_TFD_CHUNKS                        4
+
+#define TFD_CTL_COUNT_SET(n)       (n << 24)
+#define TFD_CTL_COUNT_GET(ctl)     ((ctl >> 24) & 7)
+#define TFD_CTL_PAD_SET(n)         (n << 28)
+#define TFD_CTL_PAD_GET(ctl)       (ctl >> 28)
+
+/* Sizes and addresses for instruction and data memory (SRAM) in
+ * 3945's embedded processor.  Driver access is via HBUS_TARG_MEM_* regs. */
+#define IL39_RTC_INST_LOWER_BOUND		(0x000000)
+#define IL39_RTC_INST_UPPER_BOUND		(0x014000)
+
+#define IL39_RTC_DATA_LOWER_BOUND		(0x800000)
+#define IL39_RTC_DATA_UPPER_BOUND		(0x808000)
+
+#define IL39_RTC_INST_SIZE (IL39_RTC_INST_UPPER_BOUND - \
+				IL39_RTC_INST_LOWER_BOUND)
+#define IL39_RTC_DATA_SIZE (IL39_RTC_DATA_UPPER_BOUND - \
+				IL39_RTC_DATA_LOWER_BOUND)
+
+#define IL39_MAX_INST_SIZE IL39_RTC_INST_SIZE
+#define IL39_MAX_DATA_SIZE IL39_RTC_DATA_SIZE
+
+/* Size of uCode instruction memory in bootstrap state machine */
+#define IL39_MAX_BSM_SIZE IL39_RTC_INST_SIZE
+
+static inline int
+il3945_hw_valid_rtc_data_addr(u32 addr)
+{
+	return (addr >= IL39_RTC_DATA_LOWER_BOUND &&
+		addr < IL39_RTC_DATA_UPPER_BOUND);
+}
+
+/* Base physical address of il3945_shared is provided to FH39_TSSR_CBB_BASE
+ * and &il3945_shared.rx_read_ptr[0] is provided to FH39_RCSR_RPTR_ADDR(0) */
+struct il3945_shared {
+	__le32 tx_base_ptr[8];
+} __packed;
+
+/************************************/
+/* iwl3945 Flow Handler Definitions */
+/************************************/
+
+/**
+ * This I/O area is directly read/writable by driver (e.g. Linux uses writel())
+ * Addresses are offsets from device's PCI hardware base address.
+ */
+#define FH39_MEM_LOWER_BOUND                   (0x0800)
+#define FH39_MEM_UPPER_BOUND                   (0x1000)
+
+#define FH39_CBCC_TBL		(FH39_MEM_LOWER_BOUND + 0x140)
+#define FH39_TFDB_TBL		(FH39_MEM_LOWER_BOUND + 0x180)
+#define FH39_RCSR_TBL		(FH39_MEM_LOWER_BOUND + 0x400)
+#define FH39_RSSR_TBL		(FH39_MEM_LOWER_BOUND + 0x4c0)
+#define FH39_TCSR_TBL		(FH39_MEM_LOWER_BOUND + 0x500)
+#define FH39_TSSR_TBL		(FH39_MEM_LOWER_BOUND + 0x680)
+
+/* TFDB (Transmit Frame Buffer Descriptor) */
+#define FH39_TFDB(_ch, buf)			(FH39_TFDB_TBL + \
+						 ((_ch) * 2 + (buf)) * 0x28)
+#define FH39_TFDB_CHNL_BUF_CTRL_REG(_ch)	(FH39_TFDB_TBL + 0x50 * (_ch))
+
+/* CBCC channel is [0,2] */
+#define FH39_CBCC(_ch)		(FH39_CBCC_TBL + (_ch) * 0x8)
+#define FH39_CBCC_CTRL(_ch)	(FH39_CBCC(_ch) + 0x00)
+#define FH39_CBCC_BASE(_ch)	(FH39_CBCC(_ch) + 0x04)
+
+/* RCSR channel is [0,2] */
+#define FH39_RCSR(_ch)			(FH39_RCSR_TBL + (_ch) * 0x40)
+#define FH39_RCSR_CONFIG(_ch)		(FH39_RCSR(_ch) + 0x00)
+#define FH39_RCSR_RBD_BASE(_ch)		(FH39_RCSR(_ch) + 0x04)
+#define FH39_RCSR_WPTR(_ch)		(FH39_RCSR(_ch) + 0x20)
+#define FH39_RCSR_RPTR_ADDR(_ch)	(FH39_RCSR(_ch) + 0x24)
+
+#define FH39_RSCSR_CHNL0_WPTR		(FH39_RCSR_WPTR(0))
+
+/* RSSR */
+#define FH39_RSSR_CTRL			(FH39_RSSR_TBL + 0x000)
+#define FH39_RSSR_STATUS		(FH39_RSSR_TBL + 0x004)
+
+/* TCSR */
+#define FH39_TCSR(_ch)			(FH39_TCSR_TBL + (_ch) * 0x20)
+#define FH39_TCSR_CONFIG(_ch)		(FH39_TCSR(_ch) + 0x00)
+#define FH39_TCSR_CREDIT(_ch)		(FH39_TCSR(_ch) + 0x04)
+#define FH39_TCSR_BUFF_STTS(_ch)	(FH39_TCSR(_ch) + 0x08)
+
+/* TSSR */
+#define FH39_TSSR_CBB_BASE        (FH39_TSSR_TBL + 0x000)
+#define FH39_TSSR_MSG_CONFIG      (FH39_TSSR_TBL + 0x008)
+#define FH39_TSSR_TX_STATUS       (FH39_TSSR_TBL + 0x010)
+
+/* DBM */
+
+#define FH39_SRVC_CHNL                            (6)
+
+#define FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE     (20)
+#define FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH      (4)
+
+#define FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN    (0x08000000)
+
+#define FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE        (0x80000000)
+
+#define FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE           (0x20000000)
+
+#define FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128		(0x01000000)
+
+#define FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST		(0x00001000)
+
+#define FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH			(0x00000000)
+
+#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF		(0x00000000)
+#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRIVER		(0x00000001)
+
+#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL	(0x00000000)
+#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL	(0x00000008)
+
+#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD		(0x00200000)
+
+#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT		(0x00000000)
+
+#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE		(0x00000000)
+#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE		(0x80000000)
+
+#define FH39_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID		(0x00004000)
+
+#define FH39_TCSR_CHNL_TX_BUF_STS_REG_BIT_TFDB_WPTR		(0x00000001)
+
+#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON	(0xFF000000)
+#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON	(0x00FF0000)
+
+#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B	(0x00000400)
+
+#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON		(0x00000100)
+#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON		(0x00000080)
+
+#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH	(0x00000020)
+#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH		(0x00000005)
+
+#define FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch)	(BIT(_ch) << 24)
+#define FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch)	(BIT(_ch) << 16)
+
+#define FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_ch) \
+	(FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch) | \
+	 FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch))
+
+#define FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE			(0x01000000)
+
+struct il3945_tfd_tb {
+	__le32 addr;
+	__le32 len;
+} __packed;
+
+struct il3945_tfd {
+	__le32 control_flags;
+	struct il3945_tfd_tb tbs[4];
+	u8 __pad[28];
+} __packed;
+
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+ssize_t il3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
+				   size_t count, loff_t *ppos);
+ssize_t il3945_ucode_tx_stats_read(struct file *file, char __user *user_buf,
+				   size_t count, loff_t *ppos);
+ssize_t il3945_ucode_general_stats_read(struct file *file,
+					char __user *user_buf, size_t count,
+					loff_t *ppos);
+#endif
+
+#endif
diff --git a/drivers/net/wireless/iwlegacy/4965-calib.c b/drivers/net/wireless/iwlegacy/4965-calib.c
new file mode 100644
index 0000000..d3248e3
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/4965-calib.c
@@ -0,0 +1,948 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#include <linux/slab.h>
+#include <net/mac80211.h>
+
+#include "common.h"
+#include "4965.h"
+
+/*****************************************************************************
+ * INIT calibrations framework
+ *****************************************************************************/
+
+struct stats_general_data {
+	u32 beacon_silence_rssi_a;
+	u32 beacon_silence_rssi_b;
+	u32 beacon_silence_rssi_c;
+	u32 beacon_energy_a;
+	u32 beacon_energy_b;
+	u32 beacon_energy_c;
+};
+
+void
+il4965_calib_free_results(struct il_priv *il)
+{
+	int i;
+
+	for (i = 0; i < IL_CALIB_MAX; i++) {
+		kfree(il->calib_results[i].buf);
+		il->calib_results[i].buf = NULL;
+		il->calib_results[i].buf_len = 0;
+	}
+}
+
+/*****************************************************************************
+ * RUNTIME calibrations framework
+ *****************************************************************************/
+
+/* "false alarms" are signals that our DSP tries to lock onto,
+ *   but then determines that they are either noise, or transmissions
+ *   from a distant wireless network (also "noise", really) that get
+ *   "stepped on" by stronger transmissions within our own network.
+ * This algorithm attempts to set a sensitivity level that is high
+ *   enough to receive all of our own network traffic, but not so
+ *   high that our DSP gets too busy trying to lock onto non-network
+ *   activity/noise. */
+static int
+il4965_sens_energy_cck(struct il_priv *il, u32 norm_fa, u32 rx_enable_time,
+		       struct stats_general_data *rx_info)
+{
+	u32 max_nrg_cck = 0;
+	int i = 0;
+	u8 max_silence_rssi = 0;
+	u32 silence_ref = 0;
+	u8 silence_rssi_a = 0;
+	u8 silence_rssi_b = 0;
+	u8 silence_rssi_c = 0;
+	u32 val;
+
+	/* "false_alarms" values below are cross-multiplications to assess the
+	 *   numbers of false alarms within the measured period of actual Rx
+	 *   (Rx is off when we're txing), vs the min/max expected false alarms
+	 *   (some should be expected if rx is sensitive enough) in a
+	 *   hypothetical listening period of 200 time units (TU), 204.8 msec:
+	 *
+	 * MIN_FA/fixed-time < false_alarms/actual-rx-time < MAX_FA/beacon-time
+	 *
+	 * */
+	u32 false_alarms = norm_fa * 200 * 1024;
+	u32 max_false_alarms = MAX_FA_CCK * rx_enable_time;
+	u32 min_false_alarms = MIN_FA_CCK * rx_enable_time;
+	struct il_sensitivity_data *data = NULL;
+	const struct il_sensitivity_ranges *ranges = il->hw_params.sens;
+
+	data = &(il->sensitivity_data);
+
+	data->nrg_auto_corr_silence_diff = 0;
+
+	/* Find max silence rssi among all 3 receivers.
+	 * This is background noise, which may include transmissions from other
+	 *    networks, measured during silence before our network's beacon */
+	silence_rssi_a =
+	    (u8) ((rx_info->beacon_silence_rssi_a & ALL_BAND_FILTER) >> 8);
+	silence_rssi_b =
+	    (u8) ((rx_info->beacon_silence_rssi_b & ALL_BAND_FILTER) >> 8);
+	silence_rssi_c =
+	    (u8) ((rx_info->beacon_silence_rssi_c & ALL_BAND_FILTER) >> 8);
+
+	val = max(silence_rssi_b, silence_rssi_c);
+	max_silence_rssi = max(silence_rssi_a, (u8) val);
+
+	/* Store silence rssi in 20-beacon history table */
+	data->nrg_silence_rssi[data->nrg_silence_idx] = max_silence_rssi;
+	data->nrg_silence_idx++;
+	if (data->nrg_silence_idx >= NRG_NUM_PREV_STAT_L)
+		data->nrg_silence_idx = 0;
+
+	/* Find max silence rssi across 20 beacon history */
+	for (i = 0; i < NRG_NUM_PREV_STAT_L; i++) {
+		val = data->nrg_silence_rssi[i];
+		silence_ref = max(silence_ref, val);
+	}
+	D_CALIB("silence a %u, b %u, c %u, 20-bcn max %u\n", silence_rssi_a,
+		silence_rssi_b, silence_rssi_c, silence_ref);
+
+	/* Find max rx energy (min value!) among all 3 receivers,
+	 *   measured during beacon frame.
+	 * Save it in 10-beacon history table. */
+	i = data->nrg_energy_idx;
+	val = min(rx_info->beacon_energy_b, rx_info->beacon_energy_c);
+	data->nrg_value[i] = min(rx_info->beacon_energy_a, val);
+
+	data->nrg_energy_idx++;
+	if (data->nrg_energy_idx >= 10)
+		data->nrg_energy_idx = 0;
+
+	/* Find min rx energy (max value) across 10 beacon history.
+	 * This is the minimum signal level that we want to receive well.
+	 * Add backoff (margin so we don't miss slightly lower energy frames).
+	 * This establishes an upper bound (min value) for energy threshold. */
+	max_nrg_cck = data->nrg_value[0];
+	for (i = 1; i < 10; i++)
+		max_nrg_cck = (u32) max(max_nrg_cck, (data->nrg_value[i]));
+	max_nrg_cck += 6;
+
+	D_CALIB("rx energy a %u, b %u, c %u, 10-bcn max/min %u\n",
+		rx_info->beacon_energy_a, rx_info->beacon_energy_b,
+		rx_info->beacon_energy_c, max_nrg_cck - 6);
+
+	/* Count number of consecutive beacons with fewer-than-desired
+	 *   false alarms. */
+	if (false_alarms < min_false_alarms)
+		data->num_in_cck_no_fa++;
+	else
+		data->num_in_cck_no_fa = 0;
+	D_CALIB("consecutive bcns with few false alarms = %u\n",
+		data->num_in_cck_no_fa);
+
+	/* If we got too many false alarms this time, reduce sensitivity */
+	if (false_alarms > max_false_alarms &&
+	    data->auto_corr_cck > AUTO_CORR_MAX_TH_CCK) {
+		D_CALIB("norm FA %u > max FA %u\n", false_alarms,
+			max_false_alarms);
+		D_CALIB("... reducing sensitivity\n");
+		data->nrg_curr_state = IL_FA_TOO_MANY;
+		/* Store for "fewer than desired" on later beacon */
+		data->nrg_silence_ref = silence_ref;
+
+		/* increase energy threshold (reduce nrg value)
+		 *   to decrease sensitivity */
+		data->nrg_th_cck = data->nrg_th_cck - NRG_STEP_CCK;
+		/* Else if we got fewer than desired, increase sensitivity */
+	} else if (false_alarms < min_false_alarms) {
+		data->nrg_curr_state = IL_FA_TOO_FEW;
+
+		/* Compare silence level with silence level for most recent
+		 *   healthy number or too many false alarms */
+		data->nrg_auto_corr_silence_diff =
+		    (s32) data->nrg_silence_ref - (s32) silence_ref;
+
+		D_CALIB("norm FA %u < min FA %u, silence diff %d\n",
+			false_alarms, min_false_alarms,
+			data->nrg_auto_corr_silence_diff);
+
+		/* Increase value to increase sensitivity, but only if:
+		 * 1a) previous beacon did *not* have *too many* false alarms
+		 * 1b) AND there's a significant difference in Rx levels
+		 *      from a previous beacon with too many, or healthy # FAs
+		 * OR 2) We've seen a lot of beacons (100) with too few
+		 *       false alarms */
+		if (data->nrg_prev_state != IL_FA_TOO_MANY &&
+		    (data->nrg_auto_corr_silence_diff > NRG_DIFF ||
+		     data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA)) {
+
+			D_CALIB("... increasing sensitivity\n");
+			/* Increase nrg value to increase sensitivity */
+			val = data->nrg_th_cck + NRG_STEP_CCK;
+			data->nrg_th_cck = min((u32) ranges->min_nrg_cck, val);
+		} else {
+			D_CALIB("... but not changing sensitivity\n");
+		}
+
+		/* Else we got a healthy number of false alarms, keep status quo */
+	} else {
+		D_CALIB(" FA in safe zone\n");
+		data->nrg_curr_state = IL_FA_GOOD_RANGE;
+
+		/* Store for use in "fewer than desired" with later beacon */
+		data->nrg_silence_ref = silence_ref;
+
+		/* If previous beacon had too many false alarms,
+		 *   give it some extra margin by reducing sensitivity again
+		 *   (but don't go below measured energy of desired Rx) */
+		if (IL_FA_TOO_MANY == data->nrg_prev_state) {
+			D_CALIB("... increasing margin\n");
+			if (data->nrg_th_cck > (max_nrg_cck + NRG_MARGIN))
+				data->nrg_th_cck -= NRG_MARGIN;
+			else
+				data->nrg_th_cck = max_nrg_cck;
+		}
+	}
+
+	/* Make sure the energy threshold does not go above the measured
+	 * energy of the desired Rx signals (reduced by backoff margin),
+	 * or else we might start missing Rx frames.
+	 * Lower value is higher energy, so we use max()!
+	 */
+	data->nrg_th_cck = max(max_nrg_cck, data->nrg_th_cck);
+	D_CALIB("new nrg_th_cck %u\n", data->nrg_th_cck);
+
+	data->nrg_prev_state = data->nrg_curr_state;
+
+	/* Auto-correlation CCK algorithm */
+	if (false_alarms > min_false_alarms) {
+
+		/* increase auto_corr values to decrease sensitivity
+		 * so the DSP won't be disturbed by the noise
+		 */
+		if (data->auto_corr_cck < AUTO_CORR_MAX_TH_CCK)
+			data->auto_corr_cck = AUTO_CORR_MAX_TH_CCK + 1;
+		else {
+			val = data->auto_corr_cck + AUTO_CORR_STEP_CCK;
+			data->auto_corr_cck =
+			    min((u32) ranges->auto_corr_max_cck, val);
+		}
+		val = data->auto_corr_cck_mrc + AUTO_CORR_STEP_CCK;
+		data->auto_corr_cck_mrc =
+		    min((u32) ranges->auto_corr_max_cck_mrc, val);
+	} else if (false_alarms < min_false_alarms &&
+		   (data->nrg_auto_corr_silence_diff > NRG_DIFF ||
+		    data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA)) {
+
+		/* Decrease auto_corr values to increase sensitivity */
+		val = data->auto_corr_cck - AUTO_CORR_STEP_CCK;
+		data->auto_corr_cck = max((u32) ranges->auto_corr_min_cck, val);
+		val = data->auto_corr_cck_mrc - AUTO_CORR_STEP_CCK;
+		data->auto_corr_cck_mrc =
+		    max((u32) ranges->auto_corr_min_cck_mrc, val);
+	}
+
+	return 0;
+}
+
+static int
+il4965_sens_auto_corr_ofdm(struct il_priv *il, u32 norm_fa, u32 rx_enable_time)
+{
+	u32 val;
+	u32 false_alarms = norm_fa * 200 * 1024;
+	u32 max_false_alarms = MAX_FA_OFDM * rx_enable_time;
+	u32 min_false_alarms = MIN_FA_OFDM * rx_enable_time;
+	struct il_sensitivity_data *data = NULL;
+	const struct il_sensitivity_ranges *ranges = il->hw_params.sens;
+
+	data = &(il->sensitivity_data);
+
+	/* If we got too many false alarms this time, reduce sensitivity */
+	if (false_alarms > max_false_alarms) {
+
+		D_CALIB("norm FA %u > max FA %u)\n", false_alarms,
+			max_false_alarms);
+
+		val = data->auto_corr_ofdm + AUTO_CORR_STEP_OFDM;
+		data->auto_corr_ofdm =
+		    min((u32) ranges->auto_corr_max_ofdm, val);
+
+		val = data->auto_corr_ofdm_mrc + AUTO_CORR_STEP_OFDM;
+		data->auto_corr_ofdm_mrc =
+		    min((u32) ranges->auto_corr_max_ofdm_mrc, val);
+
+		val = data->auto_corr_ofdm_x1 + AUTO_CORR_STEP_OFDM;
+		data->auto_corr_ofdm_x1 =
+		    min((u32) ranges->auto_corr_max_ofdm_x1, val);
+
+		val = data->auto_corr_ofdm_mrc_x1 + AUTO_CORR_STEP_OFDM;
+		data->auto_corr_ofdm_mrc_x1 =
+		    min((u32) ranges->auto_corr_max_ofdm_mrc_x1, val);
+	}
+
+	/* Else if we got fewer than desired, increase sensitivity */
+	else if (false_alarms < min_false_alarms) {
+
+		D_CALIB("norm FA %u < min FA %u\n", false_alarms,
+			min_false_alarms);
+
+		val = data->auto_corr_ofdm - AUTO_CORR_STEP_OFDM;
+		data->auto_corr_ofdm =
+		    max((u32) ranges->auto_corr_min_ofdm, val);
+
+		val = data->auto_corr_ofdm_mrc - AUTO_CORR_STEP_OFDM;
+		data->auto_corr_ofdm_mrc =
+		    max((u32) ranges->auto_corr_min_ofdm_mrc, val);
+
+		val = data->auto_corr_ofdm_x1 - AUTO_CORR_STEP_OFDM;
+		data->auto_corr_ofdm_x1 =
+		    max((u32) ranges->auto_corr_min_ofdm_x1, val);
+
+		val = data->auto_corr_ofdm_mrc_x1 - AUTO_CORR_STEP_OFDM;
+		data->auto_corr_ofdm_mrc_x1 =
+		    max((u32) ranges->auto_corr_min_ofdm_mrc_x1, val);
+	} else {
+		D_CALIB("min FA %u < norm FA %u < max FA %u OK\n",
+			min_false_alarms, false_alarms, max_false_alarms);
+	}
+	return 0;
+}
+
+static void
+il4965_prepare_legacy_sensitivity_tbl(struct il_priv *il,
+				      struct il_sensitivity_data *data,
+				      __le16 *tbl)
+{
+	tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_IDX] =
+	    cpu_to_le16((u16) data->auto_corr_ofdm);
+	tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX] =
+	    cpu_to_le16((u16) data->auto_corr_ofdm_mrc);
+	tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_IDX] =
+	    cpu_to_le16((u16) data->auto_corr_ofdm_x1);
+	tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX] =
+	    cpu_to_le16((u16) data->auto_corr_ofdm_mrc_x1);
+
+	tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX] =
+	    cpu_to_le16((u16) data->auto_corr_cck);
+	tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX] =
+	    cpu_to_le16((u16) data->auto_corr_cck_mrc);
+
+	tbl[HD_MIN_ENERGY_CCK_DET_IDX] = cpu_to_le16((u16) data->nrg_th_cck);
+	tbl[HD_MIN_ENERGY_OFDM_DET_IDX] = cpu_to_le16((u16) data->nrg_th_ofdm);
+
+	tbl[HD_BARKER_CORR_TH_ADD_MIN_IDX] =
+	    cpu_to_le16(data->barker_corr_th_min);
+	tbl[HD_BARKER_CORR_TH_ADD_MIN_MRC_IDX] =
+	    cpu_to_le16(data->barker_corr_th_min_mrc);
+	tbl[HD_OFDM_ENERGY_TH_IN_IDX] = cpu_to_le16(data->nrg_th_cca);
+
+	D_CALIB("ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n",
+		data->auto_corr_ofdm, data->auto_corr_ofdm_mrc,
+		data->auto_corr_ofdm_x1, data->auto_corr_ofdm_mrc_x1,
+		data->nrg_th_ofdm);
+
+	D_CALIB("cck: ac %u mrc %u thresh %u\n", data->auto_corr_cck,
+		data->auto_corr_cck_mrc, data->nrg_th_cck);
+}
+
+/* Prepare a C_SENSITIVITY, send to uCode if values have changed */
+static int
+il4965_sensitivity_write(struct il_priv *il)
+{
+	struct il_sensitivity_cmd cmd;
+	struct il_sensitivity_data *data = NULL;
+	struct il_host_cmd cmd_out = {
+		.id = C_SENSITIVITY,
+		.len = sizeof(struct il_sensitivity_cmd),
+		.flags = CMD_ASYNC,
+		.data = &cmd,
+	};
+
+	data = &(il->sensitivity_data);
+
+	memset(&cmd, 0, sizeof(cmd));
+
+	il4965_prepare_legacy_sensitivity_tbl(il, data, &cmd.table[0]);
+
+	/* Update uCode's "work" table, and copy it to DSP */
+	cmd.control = C_SENSITIVITY_CONTROL_WORK_TBL;
+
+	/* Don't send command to uCode if nothing has changed */
+	if (!memcmp
+	    (&cmd.table[0], &(il->sensitivity_tbl[0]),
+	     sizeof(u16) * HD_TBL_SIZE)) {
+		D_CALIB("No change in C_SENSITIVITY\n");
+		return 0;
+	}
+
+	/* Copy table for comparison next time */
+	memcpy(&(il->sensitivity_tbl[0]), &(cmd.table[0]),
+	       sizeof(u16) * HD_TBL_SIZE);
+
+	return il_send_cmd(il, &cmd_out);
+}
+
+void
+il4965_init_sensitivity(struct il_priv *il)
+{
+	int ret = 0;
+	int i;
+	struct il_sensitivity_data *data = NULL;
+	const struct il_sensitivity_ranges *ranges = il->hw_params.sens;
+
+	if (il->disable_sens_cal)
+		return;
+
+	D_CALIB("Start il4965_init_sensitivity\n");
+
+	/* Clear driver's sensitivity algo data */
+	data = &(il->sensitivity_data);
+
+	if (ranges == NULL)
+		return;
+
+	memset(data, 0, sizeof(struct il_sensitivity_data));
+
+	data->num_in_cck_no_fa = 0;
+	data->nrg_curr_state = IL_FA_TOO_MANY;
+	data->nrg_prev_state = IL_FA_TOO_MANY;
+	data->nrg_silence_ref = 0;
+	data->nrg_silence_idx = 0;
+	data->nrg_energy_idx = 0;
+
+	for (i = 0; i < 10; i++)
+		data->nrg_value[i] = 0;
+
+	for (i = 0; i < NRG_NUM_PREV_STAT_L; i++)
+		data->nrg_silence_rssi[i] = 0;
+
+	data->auto_corr_ofdm = ranges->auto_corr_min_ofdm;
+	data->auto_corr_ofdm_mrc = ranges->auto_corr_min_ofdm_mrc;
+	data->auto_corr_ofdm_x1 = ranges->auto_corr_min_ofdm_x1;
+	data->auto_corr_ofdm_mrc_x1 = ranges->auto_corr_min_ofdm_mrc_x1;
+	data->auto_corr_cck = AUTO_CORR_CCK_MIN_VAL_DEF;
+	data->auto_corr_cck_mrc = ranges->auto_corr_min_cck_mrc;
+	data->nrg_th_cck = ranges->nrg_th_cck;
+	data->nrg_th_ofdm = ranges->nrg_th_ofdm;
+	data->barker_corr_th_min = ranges->barker_corr_th_min;
+	data->barker_corr_th_min_mrc = ranges->barker_corr_th_min_mrc;
+	data->nrg_th_cca = ranges->nrg_th_cca;
+
+	data->last_bad_plcp_cnt_ofdm = 0;
+	data->last_fa_cnt_ofdm = 0;
+	data->last_bad_plcp_cnt_cck = 0;
+	data->last_fa_cnt_cck = 0;
+
+	ret |= il4965_sensitivity_write(il);
+	D_CALIB("<<return 0x%X\n", ret);
+}
+
+void
+il4965_sensitivity_calibration(struct il_priv *il, void *resp)
+{
+	u32 rx_enable_time;
+	u32 fa_cck;
+	u32 fa_ofdm;
+	u32 bad_plcp_cck;
+	u32 bad_plcp_ofdm;
+	u32 norm_fa_ofdm;
+	u32 norm_fa_cck;
+	struct il_sensitivity_data *data = NULL;
+	struct stats_rx_non_phy *rx_info;
+	struct stats_rx_phy *ofdm, *cck;
+	unsigned long flags;
+	struct stats_general_data statis;
+
+	if (il->disable_sens_cal)
+		return;
+
+	data = &(il->sensitivity_data);
+
+	if (!il_is_any_associated(il)) {
+		D_CALIB("<< - not associated\n");
+		return;
+	}
+
+	spin_lock_irqsave(&il->lock, flags);
+
+	rx_info = &(((struct il_notif_stats *)resp)->rx.general);
+	ofdm = &(((struct il_notif_stats *)resp)->rx.ofdm);
+	cck = &(((struct il_notif_stats *)resp)->rx.cck);
+
+	if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
+		D_CALIB("<< invalid data.\n");
+		spin_unlock_irqrestore(&il->lock, flags);
+		return;
+	}
+
+	/* Extract Statistics: */
+	rx_enable_time = le32_to_cpu(rx_info->channel_load);
+	fa_cck = le32_to_cpu(cck->false_alarm_cnt);
+	fa_ofdm = le32_to_cpu(ofdm->false_alarm_cnt);
+	bad_plcp_cck = le32_to_cpu(cck->plcp_err);
+	bad_plcp_ofdm = le32_to_cpu(ofdm->plcp_err);
+
+	statis.beacon_silence_rssi_a =
+	    le32_to_cpu(rx_info->beacon_silence_rssi_a);
+	statis.beacon_silence_rssi_b =
+	    le32_to_cpu(rx_info->beacon_silence_rssi_b);
+	statis.beacon_silence_rssi_c =
+	    le32_to_cpu(rx_info->beacon_silence_rssi_c);
+	statis.beacon_energy_a = le32_to_cpu(rx_info->beacon_energy_a);
+	statis.beacon_energy_b = le32_to_cpu(rx_info->beacon_energy_b);
+	statis.beacon_energy_c = le32_to_cpu(rx_info->beacon_energy_c);
+
+	spin_unlock_irqrestore(&il->lock, flags);
+
+	D_CALIB("rx_enable_time = %u usecs\n", rx_enable_time);
+
+	if (!rx_enable_time) {
+		D_CALIB("<< RX Enable Time == 0!\n");
+		return;
+	}
+
+	/* These stats increase monotonically, and do not reset
+	 *   at each beacon.  Calculate difference from last value, or just
+	 *   use the new stats value if it has reset or wrapped around. */
+	if (data->last_bad_plcp_cnt_cck > bad_plcp_cck)
+		data->last_bad_plcp_cnt_cck = bad_plcp_cck;
+	else {
+		bad_plcp_cck -= data->last_bad_plcp_cnt_cck;
+		data->last_bad_plcp_cnt_cck += bad_plcp_cck;
+	}
+
+	if (data->last_bad_plcp_cnt_ofdm > bad_plcp_ofdm)
+		data->last_bad_plcp_cnt_ofdm = bad_plcp_ofdm;
+	else {
+		bad_plcp_ofdm -= data->last_bad_plcp_cnt_ofdm;
+		data->last_bad_plcp_cnt_ofdm += bad_plcp_ofdm;
+	}
+
+	if (data->last_fa_cnt_ofdm > fa_ofdm)
+		data->last_fa_cnt_ofdm = fa_ofdm;
+	else {
+		fa_ofdm -= data->last_fa_cnt_ofdm;
+		data->last_fa_cnt_ofdm += fa_ofdm;
+	}
+
+	if (data->last_fa_cnt_cck > fa_cck)
+		data->last_fa_cnt_cck = fa_cck;
+	else {
+		fa_cck -= data->last_fa_cnt_cck;
+		data->last_fa_cnt_cck += fa_cck;
+	}
+
+	/* Total aborted signal locks */
+	norm_fa_ofdm = fa_ofdm + bad_plcp_ofdm;
+	norm_fa_cck = fa_cck + bad_plcp_cck;
+
+	D_CALIB("cck: fa %u badp %u  ofdm: fa %u badp %u\n", fa_cck,
+		bad_plcp_cck, fa_ofdm, bad_plcp_ofdm);
+
+	il4965_sens_auto_corr_ofdm(il, norm_fa_ofdm, rx_enable_time);
+	il4965_sens_energy_cck(il, norm_fa_cck, rx_enable_time, &statis);
+
+	il4965_sensitivity_write(il);
+}
+
+static inline u8
+il4965_find_first_chain(u8 mask)
+{
+	if (mask & ANT_A)
+		return CHAIN_A;
+	if (mask & ANT_B)
+		return CHAIN_B;
+	return CHAIN_C;
+}
+
+/**
+ * Run disconnected antenna algorithm to find out which antennas are
+ * disconnected.
+ */
+static void
+il4965_find_disconn_antenna(struct il_priv *il, u32 * average_sig,
+			    struct il_chain_noise_data *data)
+{
+	u32 active_chains = 0;
+	u32 max_average_sig;
+	u16 max_average_sig_antenna_i;
+	u8 num_tx_chains;
+	u8 first_chain;
+	u16 i = 0;
+
+	average_sig[0] =
+	    data->chain_signal_a /
+	    il->cfg->base_params->chain_noise_num_beacons;
+	average_sig[1] =
+	    data->chain_signal_b /
+	    il->cfg->base_params->chain_noise_num_beacons;
+	average_sig[2] =
+	    data->chain_signal_c /
+	    il->cfg->base_params->chain_noise_num_beacons;
+
+	if (average_sig[0] >= average_sig[1]) {
+		max_average_sig = average_sig[0];
+		max_average_sig_antenna_i = 0;
+		active_chains = (1 << max_average_sig_antenna_i);
+	} else {
+		max_average_sig = average_sig[1];
+		max_average_sig_antenna_i = 1;
+		active_chains = (1 << max_average_sig_antenna_i);
+	}
+
+	if (average_sig[2] >= max_average_sig) {
+		max_average_sig = average_sig[2];
+		max_average_sig_antenna_i = 2;
+		active_chains = (1 << max_average_sig_antenna_i);
+	}
+
+	D_CALIB("average_sig: a %d b %d c %d\n", average_sig[0], average_sig[1],
+		average_sig[2]);
+	D_CALIB("max_average_sig = %d, antenna %d\n", max_average_sig,
+		max_average_sig_antenna_i);
+
+	/* Compare signal strengths for all 3 receivers. */
+	for (i = 0; i < NUM_RX_CHAINS; i++) {
+		if (i != max_average_sig_antenna_i) {
+			s32 rssi_delta = (max_average_sig - average_sig[i]);
+
+			/* If signal is very weak, compared with
+			 * strongest, mark it as disconnected. */
+			if (rssi_delta > MAXIMUM_ALLOWED_PATHLOSS)
+				data->disconn_array[i] = 1;
+			else
+				active_chains |= (1 << i);
+			D_CALIB("i = %d  rssiDelta = %d  "
+				"disconn_array[i] = %d\n", i, rssi_delta,
+				data->disconn_array[i]);
+		}
+	}
+
+	/*
+	 * The above algorithm sometimes fails when the ucode
+	 * reports 0 for all chains. It's not clear why that
+	 * happens to start with, but it is then causing trouble
+	 * because this can make us enable more chains than the
+	 * hardware really has.
+	 *
+	 * To be safe, simply mask out any chains that we know
+	 * are not on the device.
+	 */
+	active_chains &= il->hw_params.valid_rx_ant;
+
+	num_tx_chains = 0;
+	for (i = 0; i < NUM_RX_CHAINS; i++) {
+		/* loops on all the bits of
+		 * il->hw_setting.valid_tx_ant */
+		u8 ant_msk = (1 << i);
+		if (!(il->hw_params.valid_tx_ant & ant_msk))
+			continue;
+
+		num_tx_chains++;
+		if (data->disconn_array[i] == 0)
+			/* there is a Tx antenna connected */
+			break;
+		if (num_tx_chains == il->hw_params.tx_chains_num &&
+		    data->disconn_array[i]) {
+			/*
+			 * If all chains are disconnected
+			 * connect the first valid tx chain
+			 */
+			first_chain =
+			    il4965_find_first_chain(il->cfg->valid_tx_ant);
+			data->disconn_array[first_chain] = 0;
+			active_chains |= BIT(first_chain);
+			D_CALIB("All Tx chains are disconnected"
+				"- declare %d as connected\n", first_chain);
+			break;
+		}
+	}
+
+	if (active_chains != il->hw_params.valid_rx_ant &&
+	    active_chains != il->chain_noise_data.active_chains)
+		D_CALIB("Detected that not all antennas are connected! "
+			"Connected: %#x, valid: %#x.\n", active_chains,
+			il->hw_params.valid_rx_ant);
+
+	/* Save for use within RXON, TX, SCAN commands, etc. */
+	data->active_chains = active_chains;
+	D_CALIB("active_chains (bitwise) = 0x%x\n", active_chains);
+}
+
+static void
+il4965_gain_computation(struct il_priv *il, u32 * average_noise,
+			u16 min_average_noise_antenna_i, u32 min_average_noise,
+			u8 default_chain)
+{
+	int i, ret;
+	struct il_chain_noise_data *data = &il->chain_noise_data;
+
+	data->delta_gain_code[min_average_noise_antenna_i] = 0;
+
+	for (i = default_chain; i < NUM_RX_CHAINS; i++) {
+		s32 delta_g = 0;
+
+		if (!data->disconn_array[i] &&
+		    data->delta_gain_code[i] ==
+		    CHAIN_NOISE_DELTA_GAIN_INIT_VAL) {
+			delta_g = average_noise[i] - min_average_noise;
+			data->delta_gain_code[i] = (u8) ((delta_g * 10) / 15);
+			data->delta_gain_code[i] =
+			    min(data->delta_gain_code[i],
+				(u8) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
+
+			data->delta_gain_code[i] =
+			    (data->delta_gain_code[i] | (1 << 2));
+		} else {
+			data->delta_gain_code[i] = 0;
+		}
+	}
+	D_CALIB("delta_gain_codes: a %d b %d c %d\n", data->delta_gain_code[0],
+		data->delta_gain_code[1], data->delta_gain_code[2]);
+
+	/* Differential gain gets sent to uCode only once */
+	if (!data->radio_write) {
+		struct il_calib_diff_gain_cmd cmd;
+		data->radio_write = 1;
+
+		memset(&cmd, 0, sizeof(cmd));
+		cmd.hdr.op_code = IL_PHY_CALIBRATE_DIFF_GAIN_CMD;
+		cmd.diff_gain_a = data->delta_gain_code[0];
+		cmd.diff_gain_b = data->delta_gain_code[1];
+		cmd.diff_gain_c = data->delta_gain_code[2];
+		ret = il_send_cmd_pdu(il, C_PHY_CALIBRATION, sizeof(cmd), &cmd);
+		if (ret)
+			D_CALIB("fail sending cmd " "C_PHY_CALIBRATION\n");
+
+		/* TODO we might want recalculate
+		 * rx_chain in rxon cmd */
+
+		/* Mark so we run this algo only once! */
+		data->state = IL_CHAIN_NOISE_CALIBRATED;
+	}
+}
+
+/*
+ * Accumulate 16 beacons of signal and noise stats for each of
+ *   3 receivers/antennas/rx-chains, then figure out:
+ * 1)  Which antennas are connected.
+ * 2)  Differential rx gain settings to balance the 3 receivers.
+ */
+void
+il4965_chain_noise_calibration(struct il_priv *il, void *stat_resp)
+{
+	struct il_chain_noise_data *data = NULL;
+
+	u32 chain_noise_a;
+	u32 chain_noise_b;
+	u32 chain_noise_c;
+	u32 chain_sig_a;
+	u32 chain_sig_b;
+	u32 chain_sig_c;
+	u32 average_sig[NUM_RX_CHAINS] = { INITIALIZATION_VALUE };
+	u32 average_noise[NUM_RX_CHAINS] = { INITIALIZATION_VALUE };
+	u32 min_average_noise = MIN_AVERAGE_NOISE_MAX_VALUE;
+	u16 min_average_noise_antenna_i = INITIALIZATION_VALUE;
+	u16 i = 0;
+	u16 rxon_chnum = INITIALIZATION_VALUE;
+	u16 stat_chnum = INITIALIZATION_VALUE;
+	u8 rxon_band24;
+	u8 stat_band24;
+	unsigned long flags;
+	struct stats_rx_non_phy *rx_info;
+
+	struct il_rxon_context *ctx = &il->ctx;
+
+	if (il->disable_chain_noise_cal)
+		return;
+
+	data = &(il->chain_noise_data);
+
+	/*
+	 * Accumulate just the first "chain_noise_num_beacons" after
+	 * the first association, then we're done forever.
+	 */
+	if (data->state != IL_CHAIN_NOISE_ACCUMULATE) {
+		if (data->state == IL_CHAIN_NOISE_ALIVE)
+			D_CALIB("Wait for noise calib reset\n");
+		return;
+	}
+
+	spin_lock_irqsave(&il->lock, flags);
+
+	rx_info = &(((struct il_notif_stats *)stat_resp)->rx.general);
+
+	if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
+		D_CALIB(" << Interference data unavailable\n");
+		spin_unlock_irqrestore(&il->lock, flags);
+		return;
+	}
+
+	rxon_band24 = !!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK);
+	rxon_chnum = le16_to_cpu(ctx->staging.channel);
+
+	stat_band24 =
+	    !!(((struct il_notif_stats *)stat_resp)->
+	       flag & STATS_REPLY_FLG_BAND_24G_MSK);
+	stat_chnum =
+	    le32_to_cpu(((struct il_notif_stats *)stat_resp)->flag) >> 16;
+
+	/* Make sure we accumulate data for just the associated channel
+	 *   (even if scanning). */
+	if (rxon_chnum != stat_chnum || rxon_band24 != stat_band24) {
+		D_CALIB("Stats not from chan=%d, band24=%d\n", rxon_chnum,
+			rxon_band24);
+		spin_unlock_irqrestore(&il->lock, flags);
+		return;
+	}
+
+	/*
+	 *  Accumulate beacon stats values across
+	 * "chain_noise_num_beacons"
+	 */
+	chain_noise_a =
+	    le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
+	chain_noise_b =
+	    le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
+	chain_noise_c =
+	    le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
+
+	chain_sig_a = le32_to_cpu(rx_info->beacon_rssi_a) & IN_BAND_FILTER;
+	chain_sig_b = le32_to_cpu(rx_info->beacon_rssi_b) & IN_BAND_FILTER;
+	chain_sig_c = le32_to_cpu(rx_info->beacon_rssi_c) & IN_BAND_FILTER;
+
+	spin_unlock_irqrestore(&il->lock, flags);
+
+	data->beacon_count++;
+
+	data->chain_noise_a = (chain_noise_a + data->chain_noise_a);
+	data->chain_noise_b = (chain_noise_b + data->chain_noise_b);
+	data->chain_noise_c = (chain_noise_c + data->chain_noise_c);
+
+	data->chain_signal_a = (chain_sig_a + data->chain_signal_a);
+	data->chain_signal_b = (chain_sig_b + data->chain_signal_b);
+	data->chain_signal_c = (chain_sig_c + data->chain_signal_c);
+
+	D_CALIB("chan=%d, band24=%d, beacon=%d\n", rxon_chnum, rxon_band24,
+		data->beacon_count);
+	D_CALIB("chain_sig: a %d b %d c %d\n", chain_sig_a, chain_sig_b,
+		chain_sig_c);
+	D_CALIB("chain_noise: a %d b %d c %d\n", chain_noise_a, chain_noise_b,
+		chain_noise_c);
+
+	/* If this is the "chain_noise_num_beacons", determine:
+	 * 1)  Disconnected antennas (using signal strengths)
+	 * 2)  Differential gain (using silence noise) to balance receivers */
+	if (data->beacon_count != il->cfg->base_params->chain_noise_num_beacons)
+		return;
+
+	/* Analyze signal for disconnected antenna */
+	il4965_find_disconn_antenna(il, average_sig, data);
+
+	/* Analyze noise for rx balance */
+	average_noise[0] =
+	    data->chain_noise_a / il->cfg->base_params->chain_noise_num_beacons;
+	average_noise[1] =
+	    data->chain_noise_b / il->cfg->base_params->chain_noise_num_beacons;
+	average_noise[2] =
+	    data->chain_noise_c / il->cfg->base_params->chain_noise_num_beacons;
+
+	for (i = 0; i < NUM_RX_CHAINS; i++) {
+		if (!data->disconn_array[i] &&
+		    average_noise[i] <= min_average_noise) {
+			/* This means that chain i is active and has
+			 * lower noise values so far: */
+			min_average_noise = average_noise[i];
+			min_average_noise_antenna_i = i;
+		}
+	}
+
+	D_CALIB("average_noise: a %d b %d c %d\n", average_noise[0],
+		average_noise[1], average_noise[2]);
+
+	D_CALIB("min_average_noise = %d, antenna %d\n", min_average_noise,
+		min_average_noise_antenna_i);
+
+	il4965_gain_computation(il, average_noise, min_average_noise_antenna_i,
+				min_average_noise,
+				il4965_find_first_chain(il->cfg->valid_rx_ant));
+
+	/* Some power changes may have been made during the calibration.
+	 * Update and commit the RXON
+	 */
+	if (il->cfg->ops->lib->update_chain_flags)
+		il->cfg->ops->lib->update_chain_flags(il);
+
+	data->state = IL_CHAIN_NOISE_DONE;
+	il_power_update_mode(il, false);
+}
+
+void
+il4965_reset_run_time_calib(struct il_priv *il)
+{
+	int i;
+	memset(&(il->sensitivity_data), 0, sizeof(struct il_sensitivity_data));
+	memset(&(il->chain_noise_data), 0, sizeof(struct il_chain_noise_data));
+	for (i = 0; i < NUM_RX_CHAINS; i++)
+		il->chain_noise_data.delta_gain_code[i] =
+		    CHAIN_NOISE_DELTA_GAIN_INIT_VAL;
+
+	/* Ask for stats now, the uCode will send notification
+	 * periodically after association */
+	il_send_stats_request(il, CMD_ASYNC, true);
+}
diff --git a/drivers/net/wireless/iwlegacy/4965-debug.c b/drivers/net/wireless/iwlegacy/4965-debug.c
new file mode 100644
index 0000000..98ec39f
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/4965-debug.c
@@ -0,0 +1,746 @@
+/******************************************************************************
+*
+* GPL LICENSE SUMMARY
+*
+* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of version 2 of the GNU General Public License as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful, but
+* WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+* General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+* USA
+*
+* The full GNU General Public License is included in this distribution
+* in the file called LICENSE.GPL.
+*
+* Contact Information:
+*  Intel Linux Wireless <ilw@linux.intel.com>
+* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+*****************************************************************************/
+#include "common.h"
+#include "4965.h"
+
+static const char *fmt_value = "  %-30s %10u\n";
+static const char *fmt_table = "  %-30s %10u  %10u  %10u  %10u\n";
+static const char *fmt_header =
+    "%-32s    current  cumulative       delta         max\n";
+
+static int
+il4965_stats_flag(struct il_priv *il, char *buf, int bufsz)
+{
+	int p = 0;
+	u32 flag;
+
+	flag = le32_to_cpu(il->_4965.stats.flag);
+
+	p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n", flag);
+	if (flag & UCODE_STATS_CLEAR_MSK)
+		p += scnprintf(buf + p, bufsz - p,
+			       "\tStatistics have been cleared\n");
+	p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
+		       (flag & UCODE_STATS_FREQUENCY_MSK) ? "2.4 GHz" :
+		       "5.2 GHz");
+	p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
+		       (flag & UCODE_STATS_NARROW_BAND_MSK) ? "enabled" :
+		       "disabled");
+
+	return p;
+}
+
+ssize_t
+il4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
+			   size_t count, loff_t *ppos)
+{
+	struct il_priv *il = file->private_data;
+	int pos = 0;
+	char *buf;
+	int bufsz =
+	    sizeof(struct stats_rx_phy) * 40 +
+	    sizeof(struct stats_rx_non_phy) * 40 +
+	    sizeof(struct stats_rx_ht_phy) * 40 + 400;
+	ssize_t ret;
+	struct stats_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
+	struct stats_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
+	struct stats_rx_non_phy *general, *accum_general;
+	struct stats_rx_non_phy *delta_general, *max_general;
+	struct stats_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht;
+
+	if (!il_is_alive(il))
+		return -EAGAIN;
+
+	buf = kzalloc(bufsz, GFP_KERNEL);
+	if (!buf) {
+		IL_ERR("Can not allocate Buffer\n");
+		return -ENOMEM;
+	}
+
+	/*
+	 * the statistic information display here is based on
+	 * the last stats notification from uCode
+	 * might not reflect the current uCode activity
+	 */
+	ofdm = &il->_4965.stats.rx.ofdm;
+	cck = &il->_4965.stats.rx.cck;
+	general = &il->_4965.stats.rx.general;
+	ht = &il->_4965.stats.rx.ofdm_ht;
+	accum_ofdm = &il->_4965.accum_stats.rx.ofdm;
+	accum_cck = &il->_4965.accum_stats.rx.cck;
+	accum_general = &il->_4965.accum_stats.rx.general;
+	accum_ht = &il->_4965.accum_stats.rx.ofdm_ht;
+	delta_ofdm = &il->_4965.delta_stats.rx.ofdm;
+	delta_cck = &il->_4965.delta_stats.rx.cck;
+	delta_general = &il->_4965.delta_stats.rx.general;
+	delta_ht = &il->_4965.delta_stats.rx.ofdm_ht;
+	max_ofdm = &il->_4965.max_delta.rx.ofdm;
+	max_cck = &il->_4965.max_delta.rx.cck;
+	max_general = &il->_4965.max_delta.rx.general;
+	max_ht = &il->_4965.max_delta.rx.ofdm_ht;
+
+	pos += il4965_stats_flag(il, buf, bufsz);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_header,
+		      "Statistics_Rx - OFDM:");
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "ina_cnt:",
+		      le32_to_cpu(ofdm->ina_cnt), accum_ofdm->ina_cnt,
+		      delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_cnt:",
+		      le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
+		      delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "plcp_err:",
+		      le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
+		      delta_ofdm->plcp_err, max_ofdm->plcp_err);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_err:",
+		      le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
+		      delta_ofdm->crc32_err, max_ofdm->crc32_err);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "overrun_err:",
+		      le32_to_cpu(ofdm->overrun_err), accum_ofdm->overrun_err,
+		      delta_ofdm->overrun_err, max_ofdm->overrun_err);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "early_overrun_err:",
+		      le32_to_cpu(ofdm->early_overrun_err),
+		      accum_ofdm->early_overrun_err,
+		      delta_ofdm->early_overrun_err,
+		      max_ofdm->early_overrun_err);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_good:",
+		      le32_to_cpu(ofdm->crc32_good), accum_ofdm->crc32_good,
+		      delta_ofdm->crc32_good, max_ofdm->crc32_good);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "false_alarm_cnt:",
+		      le32_to_cpu(ofdm->false_alarm_cnt),
+		      accum_ofdm->false_alarm_cnt, delta_ofdm->false_alarm_cnt,
+		      max_ofdm->false_alarm_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_sync_err_cnt:",
+		      le32_to_cpu(ofdm->fina_sync_err_cnt),
+		      accum_ofdm->fina_sync_err_cnt,
+		      delta_ofdm->fina_sync_err_cnt,
+		      max_ofdm->fina_sync_err_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "sfd_timeout:",
+		      le32_to_cpu(ofdm->sfd_timeout), accum_ofdm->sfd_timeout,
+		      delta_ofdm->sfd_timeout, max_ofdm->sfd_timeout);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_timeout:",
+		      le32_to_cpu(ofdm->fina_timeout), accum_ofdm->fina_timeout,
+		      delta_ofdm->fina_timeout, max_ofdm->fina_timeout);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "unresponded_rts:",
+		      le32_to_cpu(ofdm->unresponded_rts),
+		      accum_ofdm->unresponded_rts, delta_ofdm->unresponded_rts,
+		      max_ofdm->unresponded_rts);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "rxe_frame_lmt_ovrun:",
+		      le32_to_cpu(ofdm->rxe_frame_limit_overrun),
+		      accum_ofdm->rxe_frame_limit_overrun,
+		      delta_ofdm->rxe_frame_limit_overrun,
+		      max_ofdm->rxe_frame_limit_overrun);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_ack_cnt:",
+		      le32_to_cpu(ofdm->sent_ack_cnt), accum_ofdm->sent_ack_cnt,
+		      delta_ofdm->sent_ack_cnt, max_ofdm->sent_ack_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_cts_cnt:",
+		      le32_to_cpu(ofdm->sent_cts_cnt), accum_ofdm->sent_cts_cnt,
+		      delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_ba_rsp_cnt:",
+		      le32_to_cpu(ofdm->sent_ba_rsp_cnt),
+		      accum_ofdm->sent_ba_rsp_cnt, delta_ofdm->sent_ba_rsp_cnt,
+		      max_ofdm->sent_ba_rsp_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "dsp_self_kill:",
+		      le32_to_cpu(ofdm->dsp_self_kill),
+		      accum_ofdm->dsp_self_kill, delta_ofdm->dsp_self_kill,
+		      max_ofdm->dsp_self_kill);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "mh_format_err:",
+		      le32_to_cpu(ofdm->mh_format_err),
+		      accum_ofdm->mh_format_err, delta_ofdm->mh_format_err,
+		      max_ofdm->mh_format_err);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table,
+		      "re_acq_main_rssi_sum:",
+		      le32_to_cpu(ofdm->re_acq_main_rssi_sum),
+		      accum_ofdm->re_acq_main_rssi_sum,
+		      delta_ofdm->re_acq_main_rssi_sum,
+		      max_ofdm->re_acq_main_rssi_sum);
+
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_header,
+		      "Statistics_Rx - CCK:");
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "ina_cnt:",
+		      le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
+		      delta_cck->ina_cnt, max_cck->ina_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_cnt:",
+		      le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
+		      delta_cck->fina_cnt, max_cck->fina_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "plcp_err:",
+		      le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
+		      delta_cck->plcp_err, max_cck->plcp_err);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_err:",
+		      le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
+		      delta_cck->crc32_err, max_cck->crc32_err);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "overrun_err:",
+		      le32_to_cpu(cck->overrun_err), accum_cck->overrun_err,
+		      delta_cck->overrun_err, max_cck->overrun_err);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "early_overrun_err:",
+		      le32_to_cpu(cck->early_overrun_err),
+		      accum_cck->early_overrun_err,
+		      delta_cck->early_overrun_err, max_cck->early_overrun_err);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_good:",
+		      le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
+		      delta_cck->crc32_good, max_cck->crc32_good);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "false_alarm_cnt:",
+		      le32_to_cpu(cck->false_alarm_cnt),
+		      accum_cck->false_alarm_cnt, delta_cck->false_alarm_cnt,
+		      max_cck->false_alarm_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_sync_err_cnt:",
+		      le32_to_cpu(cck->fina_sync_err_cnt),
+		      accum_cck->fina_sync_err_cnt,
+		      delta_cck->fina_sync_err_cnt, max_cck->fina_sync_err_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "sfd_timeout:",
+		      le32_to_cpu(cck->sfd_timeout), accum_cck->sfd_timeout,
+		      delta_cck->sfd_timeout, max_cck->sfd_timeout);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_timeout:",
+		      le32_to_cpu(cck->fina_timeout), accum_cck->fina_timeout,
+		      delta_cck->fina_timeout, max_cck->fina_timeout);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "unresponded_rts:",
+		      le32_to_cpu(cck->unresponded_rts),
+		      accum_cck->unresponded_rts, delta_cck->unresponded_rts,
+		      max_cck->unresponded_rts);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "rxe_frame_lmt_ovrun:",
+		      le32_to_cpu(cck->rxe_frame_limit_overrun),
+		      accum_cck->rxe_frame_limit_overrun,
+		      delta_cck->rxe_frame_limit_overrun,
+		      max_cck->rxe_frame_limit_overrun);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_ack_cnt:",
+		      le32_to_cpu(cck->sent_ack_cnt), accum_cck->sent_ack_cnt,
+		      delta_cck->sent_ack_cnt, max_cck->sent_ack_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_cts_cnt:",
+		      le32_to_cpu(cck->sent_cts_cnt), accum_cck->sent_cts_cnt,
+		      delta_cck->sent_cts_cnt, max_cck->sent_cts_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_ba_rsp_cnt:",
+		      le32_to_cpu(cck->sent_ba_rsp_cnt),
+		      accum_cck->sent_ba_rsp_cnt, delta_cck->sent_ba_rsp_cnt,
+		      max_cck->sent_ba_rsp_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "dsp_self_kill:",
+		      le32_to_cpu(cck->dsp_self_kill), accum_cck->dsp_self_kill,
+		      delta_cck->dsp_self_kill, max_cck->dsp_self_kill);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "mh_format_err:",
+		      le32_to_cpu(cck->mh_format_err), accum_cck->mh_format_err,
+		      delta_cck->mh_format_err, max_cck->mh_format_err);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table,
+		      "re_acq_main_rssi_sum:",
+		      le32_to_cpu(cck->re_acq_main_rssi_sum),
+		      accum_cck->re_acq_main_rssi_sum,
+		      delta_cck->re_acq_main_rssi_sum,
+		      max_cck->re_acq_main_rssi_sum);
+
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_header,
+		      "Statistics_Rx - GENERAL:");
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "bogus_cts:",
+		      le32_to_cpu(general->bogus_cts), accum_general->bogus_cts,
+		      delta_general->bogus_cts, max_general->bogus_cts);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "bogus_ack:",
+		      le32_to_cpu(general->bogus_ack), accum_general->bogus_ack,
+		      delta_general->bogus_ack, max_general->bogus_ack);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "non_bssid_frames:",
+		      le32_to_cpu(general->non_bssid_frames),
+		      accum_general->non_bssid_frames,
+		      delta_general->non_bssid_frames,
+		      max_general->non_bssid_frames);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "filtered_frames:",
+		      le32_to_cpu(general->filtered_frames),
+		      accum_general->filtered_frames,
+		      delta_general->filtered_frames,
+		      max_general->filtered_frames);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "non_channel_beacons:",
+		      le32_to_cpu(general->non_channel_beacons),
+		      accum_general->non_channel_beacons,
+		      delta_general->non_channel_beacons,
+		      max_general->non_channel_beacons);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "channel_beacons:",
+		      le32_to_cpu(general->channel_beacons),
+		      accum_general->channel_beacons,
+		      delta_general->channel_beacons,
+		      max_general->channel_beacons);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "num_missed_bcon:",
+		      le32_to_cpu(general->num_missed_bcon),
+		      accum_general->num_missed_bcon,
+		      delta_general->num_missed_bcon,
+		      max_general->num_missed_bcon);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table,
+		      "adc_rx_saturation_time:",
+		      le32_to_cpu(general->adc_rx_saturation_time),
+		      accum_general->adc_rx_saturation_time,
+		      delta_general->adc_rx_saturation_time,
+		      max_general->adc_rx_saturation_time);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table,
+		      "ina_detect_search_tm:",
+		      le32_to_cpu(general->ina_detection_search_time),
+		      accum_general->ina_detection_search_time,
+		      delta_general->ina_detection_search_time,
+		      max_general->ina_detection_search_time);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table,
+		      "beacon_silence_rssi_a:",
+		      le32_to_cpu(general->beacon_silence_rssi_a),
+		      accum_general->beacon_silence_rssi_a,
+		      delta_general->beacon_silence_rssi_a,
+		      max_general->beacon_silence_rssi_a);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table,
+		      "beacon_silence_rssi_b:",
+		      le32_to_cpu(general->beacon_silence_rssi_b),
+		      accum_general->beacon_silence_rssi_b,
+		      delta_general->beacon_silence_rssi_b,
+		      max_general->beacon_silence_rssi_b);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table,
+		      "beacon_silence_rssi_c:",
+		      le32_to_cpu(general->beacon_silence_rssi_c),
+		      accum_general->beacon_silence_rssi_c,
+		      delta_general->beacon_silence_rssi_c,
+		      max_general->beacon_silence_rssi_c);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table,
+		      "interference_data_flag:",
+		      le32_to_cpu(general->interference_data_flag),
+		      accum_general->interference_data_flag,
+		      delta_general->interference_data_flag,
+		      max_general->interference_data_flag);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "channel_load:",
+		      le32_to_cpu(general->channel_load),
+		      accum_general->channel_load, delta_general->channel_load,
+		      max_general->channel_load);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "dsp_false_alarms:",
+		      le32_to_cpu(general->dsp_false_alarms),
+		      accum_general->dsp_false_alarms,
+		      delta_general->dsp_false_alarms,
+		      max_general->dsp_false_alarms);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_rssi_a:",
+		      le32_to_cpu(general->beacon_rssi_a),
+		      accum_general->beacon_rssi_a,
+		      delta_general->beacon_rssi_a, max_general->beacon_rssi_a);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_rssi_b:",
+		      le32_to_cpu(general->beacon_rssi_b),
+		      accum_general->beacon_rssi_b,
+		      delta_general->beacon_rssi_b, max_general->beacon_rssi_b);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_rssi_c:",
+		      le32_to_cpu(general->beacon_rssi_c),
+		      accum_general->beacon_rssi_c,
+		      delta_general->beacon_rssi_c, max_general->beacon_rssi_c);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_energy_a:",
+		      le32_to_cpu(general->beacon_energy_a),
+		      accum_general->beacon_energy_a,
+		      delta_general->beacon_energy_a,
+		      max_general->beacon_energy_a);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_energy_b:",
+		      le32_to_cpu(general->beacon_energy_b),
+		      accum_general->beacon_energy_b,
+		      delta_general->beacon_energy_b,
+		      max_general->beacon_energy_b);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_energy_c:",
+		      le32_to_cpu(general->beacon_energy_c),
+		      accum_general->beacon_energy_c,
+		      delta_general->beacon_energy_c,
+		      max_general->beacon_energy_c);
+
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_header,
+		      "Statistics_Rx - OFDM_HT:");
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "plcp_err:",
+		      le32_to_cpu(ht->plcp_err), accum_ht->plcp_err,
+		      delta_ht->plcp_err, max_ht->plcp_err);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "overrun_err:",
+		      le32_to_cpu(ht->overrun_err), accum_ht->overrun_err,
+		      delta_ht->overrun_err, max_ht->overrun_err);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "early_overrun_err:",
+		      le32_to_cpu(ht->early_overrun_err),
+		      accum_ht->early_overrun_err, delta_ht->early_overrun_err,
+		      max_ht->early_overrun_err);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_good:",
+		      le32_to_cpu(ht->crc32_good), accum_ht->crc32_good,
+		      delta_ht->crc32_good, max_ht->crc32_good);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_err:",
+		      le32_to_cpu(ht->crc32_err), accum_ht->crc32_err,
+		      delta_ht->crc32_err, max_ht->crc32_err);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "mh_format_err:",
+		      le32_to_cpu(ht->mh_format_err), accum_ht->mh_format_err,
+		      delta_ht->mh_format_err, max_ht->mh_format_err);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "agg_crc32_good:",
+		      le32_to_cpu(ht->agg_crc32_good), accum_ht->agg_crc32_good,
+		      delta_ht->agg_crc32_good, max_ht->agg_crc32_good);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "agg_mpdu_cnt:",
+		      le32_to_cpu(ht->agg_mpdu_cnt), accum_ht->agg_mpdu_cnt,
+		      delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "agg_cnt:",
+		      le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt,
+		      delta_ht->agg_cnt, max_ht->agg_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "unsupport_mcs:",
+		      le32_to_cpu(ht->unsupport_mcs), accum_ht->unsupport_mcs,
+		      delta_ht->unsupport_mcs, max_ht->unsupport_mcs);
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+	kfree(buf);
+	return ret;
+}
+
+ssize_t
+il4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
+			   size_t count, loff_t *ppos)
+{
+	struct il_priv *il = file->private_data;
+	int pos = 0;
+	char *buf;
+	int bufsz = (sizeof(struct stats_tx) * 48) + 250;
+	ssize_t ret;
+	struct stats_tx *tx, *accum_tx, *delta_tx, *max_tx;
+
+	if (!il_is_alive(il))
+		return -EAGAIN;
+
+	buf = kzalloc(bufsz, GFP_KERNEL);
+	if (!buf) {
+		IL_ERR("Can not allocate Buffer\n");
+		return -ENOMEM;
+	}
+
+	/* the statistic information display here is based on
+	 * the last stats notification from uCode
+	 * might not reflect the current uCode activity
+	 */
+	tx = &il->_4965.stats.tx;
+	accum_tx = &il->_4965.accum_stats.tx;
+	delta_tx = &il->_4965.delta_stats.tx;
+	max_tx = &il->_4965.max_delta.tx;
+
+	pos += il4965_stats_flag(il, buf, bufsz);
+	pos += scnprintf(buf + pos, bufsz - pos, fmt_header, "Statistics_Tx:");
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "preamble:",
+		      le32_to_cpu(tx->preamble_cnt), accum_tx->preamble_cnt,
+		      delta_tx->preamble_cnt, max_tx->preamble_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "rx_detected_cnt:",
+		      le32_to_cpu(tx->rx_detected_cnt),
+		      accum_tx->rx_detected_cnt, delta_tx->rx_detected_cnt,
+		      max_tx->rx_detected_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "bt_prio_defer_cnt:",
+		      le32_to_cpu(tx->bt_prio_defer_cnt),
+		      accum_tx->bt_prio_defer_cnt, delta_tx->bt_prio_defer_cnt,
+		      max_tx->bt_prio_defer_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "bt_prio_kill_cnt:",
+		      le32_to_cpu(tx->bt_prio_kill_cnt),
+		      accum_tx->bt_prio_kill_cnt, delta_tx->bt_prio_kill_cnt,
+		      max_tx->bt_prio_kill_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "few_bytes_cnt:",
+		      le32_to_cpu(tx->few_bytes_cnt), accum_tx->few_bytes_cnt,
+		      delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "cts_timeout:",
+		      le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
+		      delta_tx->cts_timeout, max_tx->cts_timeout);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "ack_timeout:",
+		      le32_to_cpu(tx->ack_timeout), accum_tx->ack_timeout,
+		      delta_tx->ack_timeout, max_tx->ack_timeout);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "expected_ack_cnt:",
+		      le32_to_cpu(tx->expected_ack_cnt),
+		      accum_tx->expected_ack_cnt, delta_tx->expected_ack_cnt,
+		      max_tx->expected_ack_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "actual_ack_cnt:",
+		      le32_to_cpu(tx->actual_ack_cnt), accum_tx->actual_ack_cnt,
+		      delta_tx->actual_ack_cnt, max_tx->actual_ack_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "dump_msdu_cnt:",
+		      le32_to_cpu(tx->dump_msdu_cnt), accum_tx->dump_msdu_cnt,
+		      delta_tx->dump_msdu_cnt, max_tx->dump_msdu_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table,
+		      "abort_nxt_frame_mismatch:",
+		      le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt),
+		      accum_tx->burst_abort_next_frame_mismatch_cnt,
+		      delta_tx->burst_abort_next_frame_mismatch_cnt,
+		      max_tx->burst_abort_next_frame_mismatch_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table,
+		      "abort_missing_nxt_frame:",
+		      le32_to_cpu(tx->burst_abort_missing_next_frame_cnt),
+		      accum_tx->burst_abort_missing_next_frame_cnt,
+		      delta_tx->burst_abort_missing_next_frame_cnt,
+		      max_tx->burst_abort_missing_next_frame_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table,
+		      "cts_timeout_collision:",
+		      le32_to_cpu(tx->cts_timeout_collision),
+		      accum_tx->cts_timeout_collision,
+		      delta_tx->cts_timeout_collision,
+		      max_tx->cts_timeout_collision);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table,
+		      "ack_ba_timeout_collision:",
+		      le32_to_cpu(tx->ack_or_ba_timeout_collision),
+		      accum_tx->ack_or_ba_timeout_collision,
+		      delta_tx->ack_or_ba_timeout_collision,
+		      max_tx->ack_or_ba_timeout_collision);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "agg ba_timeout:",
+		      le32_to_cpu(tx->agg.ba_timeout), accum_tx->agg.ba_timeout,
+		      delta_tx->agg.ba_timeout, max_tx->agg.ba_timeout);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table,
+		      "agg ba_resched_frames:",
+		      le32_to_cpu(tx->agg.ba_reschedule_frames),
+		      accum_tx->agg.ba_reschedule_frames,
+		      delta_tx->agg.ba_reschedule_frames,
+		      max_tx->agg.ba_reschedule_frames);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table,
+		      "agg scd_query_agg_frame:",
+		      le32_to_cpu(tx->agg.scd_query_agg_frame_cnt),
+		      accum_tx->agg.scd_query_agg_frame_cnt,
+		      delta_tx->agg.scd_query_agg_frame_cnt,
+		      max_tx->agg.scd_query_agg_frame_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table,
+		      "agg scd_query_no_agg:",
+		      le32_to_cpu(tx->agg.scd_query_no_agg),
+		      accum_tx->agg.scd_query_no_agg,
+		      delta_tx->agg.scd_query_no_agg,
+		      max_tx->agg.scd_query_no_agg);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "agg scd_query_agg:",
+		      le32_to_cpu(tx->agg.scd_query_agg),
+		      accum_tx->agg.scd_query_agg, delta_tx->agg.scd_query_agg,
+		      max_tx->agg.scd_query_agg);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table,
+		      "agg scd_query_mismatch:",
+		      le32_to_cpu(tx->agg.scd_query_mismatch),
+		      accum_tx->agg.scd_query_mismatch,
+		      delta_tx->agg.scd_query_mismatch,
+		      max_tx->agg.scd_query_mismatch);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "agg frame_not_ready:",
+		      le32_to_cpu(tx->agg.frame_not_ready),
+		      accum_tx->agg.frame_not_ready,
+		      delta_tx->agg.frame_not_ready,
+		      max_tx->agg.frame_not_ready);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "agg underrun:",
+		      le32_to_cpu(tx->agg.underrun), accum_tx->agg.underrun,
+		      delta_tx->agg.underrun, max_tx->agg.underrun);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "agg bt_prio_kill:",
+		      le32_to_cpu(tx->agg.bt_prio_kill),
+		      accum_tx->agg.bt_prio_kill, delta_tx->agg.bt_prio_kill,
+		      max_tx->agg.bt_prio_kill);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "agg rx_ba_rsp_cnt:",
+		      le32_to_cpu(tx->agg.rx_ba_rsp_cnt),
+		      accum_tx->agg.rx_ba_rsp_cnt, delta_tx->agg.rx_ba_rsp_cnt,
+		      max_tx->agg.rx_ba_rsp_cnt);
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+	kfree(buf);
+	return ret;
+}
+
+ssize_t
+il4965_ucode_general_stats_read(struct file *file, char __user *user_buf,
+				size_t count, loff_t *ppos)
+{
+	struct il_priv *il = file->private_data;
+	int pos = 0;
+	char *buf;
+	int bufsz = sizeof(struct stats_general) * 10 + 300;
+	ssize_t ret;
+	struct stats_general_common *general, *accum_general;
+	struct stats_general_common *delta_general, *max_general;
+	struct stats_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
+	struct stats_div *div, *accum_div, *delta_div, *max_div;
+
+	if (!il_is_alive(il))
+		return -EAGAIN;
+
+	buf = kzalloc(bufsz, GFP_KERNEL);
+	if (!buf) {
+		IL_ERR("Can not allocate Buffer\n");
+		return -ENOMEM;
+	}
+
+	/* the statistic information display here is based on
+	 * the last stats notification from uCode
+	 * might not reflect the current uCode activity
+	 */
+	general = &il->_4965.stats.general.common;
+	dbg = &il->_4965.stats.general.common.dbg;
+	div = &il->_4965.stats.general.common.div;
+	accum_general = &il->_4965.accum_stats.general.common;
+	accum_dbg = &il->_4965.accum_stats.general.common.dbg;
+	accum_div = &il->_4965.accum_stats.general.common.div;
+	delta_general = &il->_4965.delta_stats.general.common;
+	max_general = &il->_4965.max_delta.general.common;
+	delta_dbg = &il->_4965.delta_stats.general.common.dbg;
+	max_dbg = &il->_4965.max_delta.general.common.dbg;
+	delta_div = &il->_4965.delta_stats.general.common.div;
+	max_div = &il->_4965.max_delta.general.common.div;
+
+	pos += il4965_stats_flag(il, buf, bufsz);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_header,
+		      "Statistics_General:");
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_value, "temperature:",
+		      le32_to_cpu(general->temperature));
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_value, "ttl_timestamp:",
+		      le32_to_cpu(general->ttl_timestamp));
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "burst_check:",
+		      le32_to_cpu(dbg->burst_check), accum_dbg->burst_check,
+		      delta_dbg->burst_check, max_dbg->burst_check);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "burst_count:",
+		      le32_to_cpu(dbg->burst_count), accum_dbg->burst_count,
+		      delta_dbg->burst_count, max_dbg->burst_count);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table,
+		      "wait_for_silence_timeout_count:",
+		      le32_to_cpu(dbg->wait_for_silence_timeout_cnt),
+		      accum_dbg->wait_for_silence_timeout_cnt,
+		      delta_dbg->wait_for_silence_timeout_cnt,
+		      max_dbg->wait_for_silence_timeout_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "sleep_time:",
+		      le32_to_cpu(general->sleep_time),
+		      accum_general->sleep_time, delta_general->sleep_time,
+		      max_general->sleep_time);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "slots_out:",
+		      le32_to_cpu(general->slots_out), accum_general->slots_out,
+		      delta_general->slots_out, max_general->slots_out);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "slots_idle:",
+		      le32_to_cpu(general->slots_idle),
+		      accum_general->slots_idle, delta_general->slots_idle,
+		      max_general->slots_idle);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "tx_on_a:",
+		      le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
+		      delta_div->tx_on_a, max_div->tx_on_a);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "tx_on_b:",
+		      le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
+		      delta_div->tx_on_b, max_div->tx_on_b);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "exec_time:",
+		      le32_to_cpu(div->exec_time), accum_div->exec_time,
+		      delta_div->exec_time, max_div->exec_time);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "probe_time:",
+		      le32_to_cpu(div->probe_time), accum_div->probe_time,
+		      delta_div->probe_time, max_div->probe_time);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "rx_enable_counter:",
+		      le32_to_cpu(general->rx_enable_counter),
+		      accum_general->rx_enable_counter,
+		      delta_general->rx_enable_counter,
+		      max_general->rx_enable_counter);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "num_of_sos_states:",
+		      le32_to_cpu(general->num_of_sos_states),
+		      accum_general->num_of_sos_states,
+		      delta_general->num_of_sos_states,
+		      max_general->num_of_sos_states);
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+	kfree(buf);
+	return ret;
+}
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
new file mode 100644
index 0000000..1667232
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -0,0 +1,6515 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci-aspm.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/firmware.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+
+#include <net/mac80211.h>
+
+#include <asm/div64.h>
+
+#define DRV_NAME        "iwl4965"
+
+#include "common.h"
+#include "4965.h"
+
+/******************************************************************************
+ *
+ * module boiler plate
+ *
+ ******************************************************************************/
+
+/*
+ * module name, copyright, version, etc.
+ */
+#define DRV_DESCRIPTION	"Intel(R) Wireless WiFi 4965 driver for Linux"
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+#define VD "d"
+#else
+#define VD
+#endif
+
+#define DRV_VERSION     IWLWIFI_VERSION VD
+
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_VERSION(DRV_VERSION);
+MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("iwl4965");
+
+void
+il4965_check_abort_status(struct il_priv *il, u8 frame_count, u32 status)
+{
+	if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
+		IL_ERR("Tx flush command to flush out all frames\n");
+		if (!test_bit(S_EXIT_PENDING, &il->status))
+			queue_work(il->workqueue, &il->tx_flush);
+	}
+}
+
+/*
+ * EEPROM
+ */
+struct il_mod_params il4965_mod_params = {
+	.amsdu_size_8K = 1,
+	.restart_fw = 1,
+	/* the rest are 0 by default */
+};
+
+void
+il4965_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq)
+{
+	unsigned long flags;
+	int i;
+	spin_lock_irqsave(&rxq->lock, flags);
+	INIT_LIST_HEAD(&rxq->rx_free);
+	INIT_LIST_HEAD(&rxq->rx_used);
+	/* Fill the rx_used queue with _all_ of the Rx buffers */
+	for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
+		/* In the reset function, these buffers may have been allocated
+		 * to an SKB, so we need to unmap and free potential storage */
+		if (rxq->pool[i].page != NULL) {
+			pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
+				       PAGE_SIZE << il->hw_params.rx_page_order,
+				       PCI_DMA_FROMDEVICE);
+			__il_free_pages(il, rxq->pool[i].page);
+			rxq->pool[i].page = NULL;
+		}
+		list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
+	}
+
+	for (i = 0; i < RX_QUEUE_SIZE; i++)
+		rxq->queue[i] = NULL;
+
+	/* Set us so that we have processed and used all buffers, but have
+	 * not restocked the Rx queue with fresh buffers */
+	rxq->read = rxq->write = 0;
+	rxq->write_actual = 0;
+	rxq->free_count = 0;
+	spin_unlock_irqrestore(&rxq->lock, flags);
+}
+
+int
+il4965_rx_init(struct il_priv *il, struct il_rx_queue *rxq)
+{
+	u32 rb_size;
+	const u32 rfdnlog = RX_QUEUE_SIZE_LOG;	/* 256 RBDs */
+	u32 rb_timeout = 0;
+
+	if (il->cfg->mod_params->amsdu_size_8K)
+		rb_size = FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
+	else
+		rb_size = FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
+
+	/* Stop Rx DMA */
+	il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+
+	/* Reset driver's Rx queue write idx */
+	il_wr(il, FH49_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
+
+	/* Tell device where to find RBD circular buffer in DRAM */
+	il_wr(il, FH49_RSCSR_CHNL0_RBDCB_BASE_REG, (u32) (rxq->bd_dma >> 8));
+
+	/* Tell device where in DRAM to update its Rx status */
+	il_wr(il, FH49_RSCSR_CHNL0_STTS_WPTR_REG, rxq->rb_stts_dma >> 4);
+
+	/* Enable Rx DMA
+	 * Direct rx interrupts to hosts
+	 * Rx buffer size 4 or 8k
+	 * RB timeout 0x10
+	 * 256 RBDs
+	 */
+	il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG,
+	      FH49_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
+	      FH49_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
+	      FH49_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
+	      rb_size |
+	      (rb_timeout << FH49_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
+	      (rfdnlog << FH49_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
+
+	/* Set interrupt coalescing timer to default (2048 usecs) */
+	il_write8(il, CSR_INT_COALESCING, IL_HOST_INT_TIMEOUT_DEF);
+
+	return 0;
+}
+
+static void
+il4965_set_pwr_vmain(struct il_priv *il)
+{
+/*
+ * (for documentation purposes)
+ * to set power to V_AUX, do:
+
+		if (pci_pme_capable(il->pci_dev, PCI_D3cold))
+			il_set_bits_mask_prph(il, APMG_PS_CTRL_REG,
+					       APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
+					       ~APMG_PS_CTRL_MSK_PWR_SRC);
+ */
+
+	il_set_bits_mask_prph(il, APMG_PS_CTRL_REG,
+			      APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
+			      ~APMG_PS_CTRL_MSK_PWR_SRC);
+}
+
+int
+il4965_hw_nic_init(struct il_priv *il)
+{
+	unsigned long flags;
+	struct il_rx_queue *rxq = &il->rxq;
+	int ret;
+
+	/* nic_init */
+	spin_lock_irqsave(&il->lock, flags);
+	il->cfg->ops->lib->apm_ops.init(il);
+
+	/* Set interrupt coalescing calibration timer to default (512 usecs) */
+	il_write8(il, CSR_INT_COALESCING, IL_HOST_INT_CALIB_TIMEOUT_DEF);
+
+	spin_unlock_irqrestore(&il->lock, flags);
+
+	il4965_set_pwr_vmain(il);
+
+	il->cfg->ops->lib->apm_ops.config(il);
+
+	/* Allocate the RX queue, or reset if it is already allocated */
+	if (!rxq->bd) {
+		ret = il_rx_queue_alloc(il);
+		if (ret) {
+			IL_ERR("Unable to initialize Rx queue\n");
+			return -ENOMEM;
+		}
+	} else
+		il4965_rx_queue_reset(il, rxq);
+
+	il4965_rx_replenish(il);
+
+	il4965_rx_init(il, rxq);
+
+	spin_lock_irqsave(&il->lock, flags);
+
+	rxq->need_update = 1;
+	il_rx_queue_update_write_ptr(il, rxq);
+
+	spin_unlock_irqrestore(&il->lock, flags);
+
+	/* Allocate or reset and init all Tx and Command queues */
+	if (!il->txq) {
+		ret = il4965_txq_ctx_alloc(il);
+		if (ret)
+			return ret;
+	} else
+		il4965_txq_ctx_reset(il);
+
+	set_bit(S_INIT, &il->status);
+
+	return 0;
+}
+
+/**
+ * il4965_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
+ */
+static inline __le32
+il4965_dma_addr2rbd_ptr(struct il_priv *il, dma_addr_t dma_addr)
+{
+	return cpu_to_le32((u32) (dma_addr >> 8));
+}
+
+/**
+ * il4965_rx_queue_restock - refill RX queue from pre-allocated pool
+ *
+ * If there are slots in the RX queue that need to be restocked,
+ * and we have free pre-allocated buffers, fill the ranks as much
+ * as we can, pulling from rx_free.
+ *
+ * This moves the 'write' idx forward to catch up with 'processed', and
+ * also updates the memory address in the firmware to reference the new
+ * target buffer.
+ */
+void
+il4965_rx_queue_restock(struct il_priv *il)
+{
+	struct il_rx_queue *rxq = &il->rxq;
+	struct list_head *element;
+	struct il_rx_buf *rxb;
+	unsigned long flags;
+
+	spin_lock_irqsave(&rxq->lock, flags);
+	while (il_rx_queue_space(rxq) > 0 && rxq->free_count) {
+		/* The overwritten rxb must be a used one */
+		rxb = rxq->queue[rxq->write];
+		BUG_ON(rxb && rxb->page);
+
+		/* Get next free Rx buffer, remove from free list */
+		element = rxq->rx_free.next;
+		rxb = list_entry(element, struct il_rx_buf, list);
+		list_del(element);
+
+		/* Point to Rx buffer via next RBD in circular buffer */
+		rxq->bd[rxq->write] =
+		    il4965_dma_addr2rbd_ptr(il, rxb->page_dma);
+		rxq->queue[rxq->write] = rxb;
+		rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
+		rxq->free_count--;
+	}
+	spin_unlock_irqrestore(&rxq->lock, flags);
+	/* If the pre-allocated buffer pool is dropping low, schedule to
+	 * refill it */
+	if (rxq->free_count <= RX_LOW_WATERMARK)
+		queue_work(il->workqueue, &il->rx_replenish);
+
+	/* If we've added more space for the firmware to place data, tell it.
+	 * Increment device's write pointer in multiples of 8. */
+	if (rxq->write_actual != (rxq->write & ~0x7)) {
+		spin_lock_irqsave(&rxq->lock, flags);
+		rxq->need_update = 1;
+		spin_unlock_irqrestore(&rxq->lock, flags);
+		il_rx_queue_update_write_ptr(il, rxq);
+	}
+}
+
+/**
+ * il4965_rx_replenish - Move all used packet from rx_used to rx_free
+ *
+ * When moving to rx_free an SKB is allocated for the slot.
+ *
+ * Also restock the Rx queue via il_rx_queue_restock.
+ * This is called as a scheduled work item (except for during initialization)
+ */
+static void
+il4965_rx_allocate(struct il_priv *il, gfp_t priority)
+{
+	struct il_rx_queue *rxq = &il->rxq;
+	struct list_head *element;
+	struct il_rx_buf *rxb;
+	struct page *page;
+	unsigned long flags;
+	gfp_t gfp_mask = priority;
+
+	while (1) {
+		spin_lock_irqsave(&rxq->lock, flags);
+		if (list_empty(&rxq->rx_used)) {
+			spin_unlock_irqrestore(&rxq->lock, flags);
+			return;
+		}
+		spin_unlock_irqrestore(&rxq->lock, flags);
+
+		if (rxq->free_count > RX_LOW_WATERMARK)
+			gfp_mask |= __GFP_NOWARN;
+
+		if (il->hw_params.rx_page_order > 0)
+			gfp_mask |= __GFP_COMP;
+
+		/* Alloc a new receive buffer */
+		page = alloc_pages(gfp_mask, il->hw_params.rx_page_order);
+		if (!page) {
+			if (net_ratelimit())
+				D_INFO("alloc_pages failed, " "order: %d\n",
+				       il->hw_params.rx_page_order);
+
+			if (rxq->free_count <= RX_LOW_WATERMARK &&
+			    net_ratelimit())
+				IL_ERR("Failed to alloc_pages with %s. "
+				       "Only %u free buffers remaining.\n",
+				       priority ==
+				       GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
+				       rxq->free_count);
+			/* We don't reschedule replenish work here -- we will
+			 * call the restock method and if it still needs
+			 * more buffers it will schedule replenish */
+			return;
+		}
+
+		spin_lock_irqsave(&rxq->lock, flags);
+
+		if (list_empty(&rxq->rx_used)) {
+			spin_unlock_irqrestore(&rxq->lock, flags);
+			__free_pages(page, il->hw_params.rx_page_order);
+			return;
+		}
+		element = rxq->rx_used.next;
+		rxb = list_entry(element, struct il_rx_buf, list);
+		list_del(element);
+
+		spin_unlock_irqrestore(&rxq->lock, flags);
+
+		BUG_ON(rxb->page);
+		rxb->page = page;
+		/* Get physical address of the RB */
+		rxb->page_dma =
+		    pci_map_page(il->pci_dev, page, 0,
+				 PAGE_SIZE << il->hw_params.rx_page_order,
+				 PCI_DMA_FROMDEVICE);
+		/* dma address must be no more than 36 bits */
+		BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
+		/* and also 256 byte aligned! */
+		BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
+
+		spin_lock_irqsave(&rxq->lock, flags);
+
+		list_add_tail(&rxb->list, &rxq->rx_free);
+		rxq->free_count++;
+		il->alloc_rxb_page++;
+
+		spin_unlock_irqrestore(&rxq->lock, flags);
+	}
+}
+
+void
+il4965_rx_replenish(struct il_priv *il)
+{
+	unsigned long flags;
+
+	il4965_rx_allocate(il, GFP_KERNEL);
+
+	spin_lock_irqsave(&il->lock, flags);
+	il4965_rx_queue_restock(il);
+	spin_unlock_irqrestore(&il->lock, flags);
+}
+
+void
+il4965_rx_replenish_now(struct il_priv *il)
+{
+	il4965_rx_allocate(il, GFP_ATOMIC);
+
+	il4965_rx_queue_restock(il);
+}
+
+/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
+ * If an SKB has been detached, the POOL needs to have its SKB set to NULL
+ * This free routine walks the list of POOL entries and if SKB is set to
+ * non NULL it is unmapped and freed
+ */
+void
+il4965_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq)
+{
+	int i;
+	for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
+		if (rxq->pool[i].page != NULL) {
+			pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
+				       PAGE_SIZE << il->hw_params.rx_page_order,
+				       PCI_DMA_FROMDEVICE);
+			__il_free_pages(il, rxq->pool[i].page);
+			rxq->pool[i].page = NULL;
+		}
+	}
+
+	dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
+			  rxq->bd_dma);
+	dma_free_coherent(&il->pci_dev->dev, sizeof(struct il_rb_status),
+			  rxq->rb_stts, rxq->rb_stts_dma);
+	rxq->bd = NULL;
+	rxq->rb_stts = NULL;
+}
+
+int
+il4965_rxq_stop(struct il_priv *il)
+{
+
+	/* stop Rx DMA */
+	il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+	il_poll_bit(il, FH49_MEM_RSSR_RX_STATUS_REG,
+		    FH49_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
+
+	return 0;
+}
+
+int
+il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
+{
+	int idx = 0;
+	int band_offset = 0;
+
+	/* HT rate format: mac80211 wants an MCS number, which is just LSB */
+	if (rate_n_flags & RATE_MCS_HT_MSK) {
+		idx = (rate_n_flags & 0xff);
+		return idx;
+		/* Legacy rate format, search for match in table */
+	} else {
+		if (band == IEEE80211_BAND_5GHZ)
+			band_offset = IL_FIRST_OFDM_RATE;
+		for (idx = band_offset; idx < RATE_COUNT_LEGACY; idx++)
+			if (il_rates[idx].plcp == (rate_n_flags & 0xFF))
+				return idx - band_offset;
+	}
+
+	return -1;
+}
+
+static int
+il4965_calc_rssi(struct il_priv *il, struct il_rx_phy_res *rx_resp)
+{
+	/* data from PHY/DSP regarding signal strength, etc.,
+	 *   contents are always there, not configurable by host.  */
+	struct il4965_rx_non_cfg_phy *ncphy =
+	    (struct il4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
+	u32 agc =
+	    (le16_to_cpu(ncphy->agc_info) & IL49_AGC_DB_MASK) >>
+	    IL49_AGC_DB_POS;
+
+	u32 valid_antennae =
+	    (le16_to_cpu(rx_resp->phy_flags) & IL49_RX_PHY_FLAGS_ANTENNAE_MASK)
+	    >> IL49_RX_PHY_FLAGS_ANTENNAE_OFFSET;
+	u8 max_rssi = 0;
+	u32 i;
+
+	/* Find max rssi among 3 possible receivers.
+	 * These values are measured by the digital signal processor (DSP).
+	 * They should stay fairly constant even as the signal strength varies,
+	 *   if the radio's automatic gain control (AGC) is working right.
+	 * AGC value (see below) will provide the "interesting" info. */
+	for (i = 0; i < 3; i++)
+		if (valid_antennae & (1 << i))
+			max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
+
+	D_STATS("Rssi In A %d B %d C %d Max %d AGC dB %d\n",
+		ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
+		max_rssi, agc);
+
+	/* dBm = max_rssi dB - agc dB - constant.
+	 * Higher AGC (higher radio gain) means lower signal. */
+	return max_rssi - agc - IL4965_RSSI_OFFSET;
+}
+
+static u32
+il4965_translate_rx_status(struct il_priv *il, u32 decrypt_in)
+{
+	u32 decrypt_out = 0;
+
+	if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
+	    RX_RES_STATUS_STATION_FOUND)
+		decrypt_out |=
+		    (RX_RES_STATUS_STATION_FOUND |
+		     RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
+
+	decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
+
+	/* packet was not encrypted */
+	if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
+	    RX_RES_STATUS_SEC_TYPE_NONE)
+		return decrypt_out;
+
+	/* packet was encrypted with unknown alg */
+	if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
+	    RX_RES_STATUS_SEC_TYPE_ERR)
+		return decrypt_out;
+
+	/* decryption was not done in HW */
+	if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
+	    RX_MPDU_RES_STATUS_DEC_DONE_MSK)
+		return decrypt_out;
+
+	switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
+
+	case RX_RES_STATUS_SEC_TYPE_CCMP:
+		/* alg is CCM: check MIC only */
+		if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
+			/* Bad MIC */
+			decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
+		else
+			decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
+
+		break;
+
+	case RX_RES_STATUS_SEC_TYPE_TKIP:
+		if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
+			/* Bad TTAK */
+			decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
+			break;
+		}
+		/* fall through if TTAK OK */
+	default:
+		if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
+			decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
+		else
+			decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
+		break;
+	}
+
+	D_RX("decrypt_in:0x%x  decrypt_out = 0x%x\n", decrypt_in, decrypt_out);
+
+	return decrypt_out;
+}
+
+static void
+il4965_pass_packet_to_mac80211(struct il_priv *il, struct ieee80211_hdr *hdr,
+			       u16 len, u32 ampdu_status, struct il_rx_buf *rxb,
+			       struct ieee80211_rx_status *stats)
+{
+	struct sk_buff *skb;
+	__le16 fc = hdr->frame_control;
+
+	/* We only process data packets if the interface is open */
+	if (unlikely(!il->is_open)) {
+		D_DROP("Dropping packet while interface is not open.\n");
+		return;
+	}
+
+	/* In case of HW accelerated crypto and bad decryption, drop */
+	if (!il->cfg->mod_params->sw_crypto &&
+	    il_set_decrypted_flag(il, hdr, ampdu_status, stats))
+		return;
+
+	skb = dev_alloc_skb(128);
+	if (!skb) {
+		IL_ERR("dev_alloc_skb failed\n");
+		return;
+	}
+
+	skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
+
+	il_update_stats(il, false, fc, len);
+	memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
+
+	ieee80211_rx(il->hw, skb);
+	il->alloc_rxb_page--;
+	rxb->page = NULL;
+}
+
+/* Called for N_RX (legacy ABG frames), or
+ * N_RX_MPDU (HT high-throughput N frames). */
+void
+il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
+{
+	struct ieee80211_hdr *header;
+	struct ieee80211_rx_status rx_status;
+	struct il_rx_pkt *pkt = rxb_addr(rxb);
+	struct il_rx_phy_res *phy_res;
+	__le32 rx_pkt_status;
+	struct il_rx_mpdu_res_start *amsdu;
+	u32 len;
+	u32 ampdu_status;
+	u32 rate_n_flags;
+
+	/**
+	 * N_RX and N_RX_MPDU are handled differently.
+	 *	N_RX: physical layer info is in this buffer
+	 *	N_RX_MPDU: physical layer info was sent in separate
+	 *		command and cached in il->last_phy_res
+	 *
+	 * Here we set up local variables depending on which command is
+	 * received.
+	 */
+	if (pkt->hdr.cmd == N_RX) {
+		phy_res = (struct il_rx_phy_res *)pkt->u.raw;
+		header =
+		    (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res) +
+					     phy_res->cfg_phy_cnt);
+
+		len = le16_to_cpu(phy_res->byte_count);
+		rx_pkt_status =
+		    *(__le32 *) (pkt->u.raw + sizeof(*phy_res) +
+				 phy_res->cfg_phy_cnt + len);
+		ampdu_status = le32_to_cpu(rx_pkt_status);
+	} else {
+		if (!il->_4965.last_phy_res_valid) {
+			IL_ERR("MPDU frame without cached PHY data\n");
+			return;
+		}
+		phy_res = &il->_4965.last_phy_res;
+		amsdu = (struct il_rx_mpdu_res_start *)pkt->u.raw;
+		header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
+		len = le16_to_cpu(amsdu->byte_count);
+		rx_pkt_status = *(__le32 *) (pkt->u.raw + sizeof(*amsdu) + len);
+		ampdu_status =
+		    il4965_translate_rx_status(il, le32_to_cpu(rx_pkt_status));
+	}
+
+	if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
+		D_DROP("dsp size out of range [0,20]: %d/n",
+		       phy_res->cfg_phy_cnt);
+		return;
+	}
+
+	if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
+	    !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
+		D_RX("Bad CRC or FIFO: 0x%08X.\n", le32_to_cpu(rx_pkt_status));
+		return;
+	}
+
+	/* This will be used in several places later */
+	rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
+
+	/* rx_status carries information about the packet to mac80211 */
+	rx_status.mactime = le64_to_cpu(phy_res->timestamp);
+	rx_status.band =
+	    (phy_res->
+	     phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? IEEE80211_BAND_2GHZ :
+	    IEEE80211_BAND_5GHZ;
+	rx_status.freq =
+	    ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
+					   rx_status.band);
+	rx_status.rate_idx =
+	    il4965_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
+	rx_status.flag = 0;
+
+	/* TSF isn't reliable. In order to allow smooth user experience,
+	 * this W/A doesn't propagate it to the mac80211 */
+	/*rx_status.flag |= RX_FLAG_MACTIME_MPDU; */
+
+	il->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
+
+	/* Find max signal strength (dBm) among 3 antenna/receiver chains */
+	rx_status.signal = il4965_calc_rssi(il, phy_res);
+
+	il_dbg_log_rx_data_frame(il, len, header);
+	D_STATS("Rssi %d, TSF %llu\n", rx_status.signal,
+		(unsigned long long)rx_status.mactime);
+
+	/*
+	 * "antenna number"
+	 *
+	 * It seems that the antenna field in the phy flags value
+	 * is actually a bit field. This is undefined by radiotap,
+	 * it wants an actual antenna number but I always get "7"
+	 * for most legacy frames I receive indicating that the
+	 * same frame was received on all three RX chains.
+	 *
+	 * I think this field should be removed in favor of a
+	 * new 802.11n radiotap field "RX chains" that is defined
+	 * as a bitmask.
+	 */
+	rx_status.antenna =
+	    (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK) >>
+	    RX_RES_PHY_FLAGS_ANTENNA_POS;
+
+	/* set the preamble flag if appropriate */
+	if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
+		rx_status.flag |= RX_FLAG_SHORTPRE;
+
+	/* Set up the HT phy flags */
+	if (rate_n_flags & RATE_MCS_HT_MSK)
+		rx_status.flag |= RX_FLAG_HT;
+	if (rate_n_flags & RATE_MCS_HT40_MSK)
+		rx_status.flag |= RX_FLAG_40MHZ;
+	if (rate_n_flags & RATE_MCS_SGI_MSK)
+		rx_status.flag |= RX_FLAG_SHORT_GI;
+
+	il4965_pass_packet_to_mac80211(il, header, len, ampdu_status, rxb,
+				       &rx_status);
+}
+
+/* Cache phy data (Rx signal strength, etc) for HT frame (N_RX_PHY).
+ * This will be used later in il_hdl_rx() for N_RX_MPDU. */
+void
+il4965_hdl_rx_phy(struct il_priv *il, struct il_rx_buf *rxb)
+{
+	struct il_rx_pkt *pkt = rxb_addr(rxb);
+	il->_4965.last_phy_res_valid = true;
+	memcpy(&il->_4965.last_phy_res, pkt->u.raw,
+	       sizeof(struct il_rx_phy_res));
+}
+
+static int
+il4965_get_channels_for_scan(struct il_priv *il, struct ieee80211_vif *vif,
+			     enum ieee80211_band band, u8 is_active,
+			     u8 n_probes, struct il_scan_channel *scan_ch)
+{
+	struct ieee80211_channel *chan;
+	const struct ieee80211_supported_band *sband;
+	const struct il_channel_info *ch_info;
+	u16 passive_dwell = 0;
+	u16 active_dwell = 0;
+	int added, i;
+	u16 channel;
+
+	sband = il_get_hw_mode(il, band);
+	if (!sband)
+		return 0;
+
+	active_dwell = il_get_active_dwell_time(il, band, n_probes);
+	passive_dwell = il_get_passive_dwell_time(il, band, vif);
+
+	if (passive_dwell <= active_dwell)
+		passive_dwell = active_dwell + 1;
+
+	for (i = 0, added = 0; i < il->scan_request->n_channels; i++) {
+		chan = il->scan_request->channels[i];
+
+		if (chan->band != band)
+			continue;
+
+		channel = chan->hw_value;
+		scan_ch->channel = cpu_to_le16(channel);
+
+		ch_info = il_get_channel_info(il, band, channel);
+		if (!il_is_channel_valid(ch_info)) {
+			D_SCAN("Channel %d is INVALID for this band.\n",
+			       channel);
+			continue;
+		}
+
+		if (!is_active || il_is_channel_passive(ch_info) ||
+		    (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
+			scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
+		else
+			scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
+
+		if (n_probes)
+			scan_ch->type |= IL_SCAN_PROBE_MASK(n_probes);
+
+		scan_ch->active_dwell = cpu_to_le16(active_dwell);
+		scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
+
+		/* Set txpower levels to defaults */
+		scan_ch->dsp_atten = 110;
+
+		/* NOTE: if we were doing 6Mb OFDM for scans we'd use
+		 * power level:
+		 * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
+		 */
+		if (band == IEEE80211_BAND_5GHZ)
+			scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
+		else
+			scan_ch->tx_gain = ((1 << 5) | (5 << 3));
+
+		D_SCAN("Scanning ch=%d prob=0x%X [%s %d]\n", channel,
+		       le32_to_cpu(scan_ch->type),
+		       (scan_ch->
+			type & SCAN_CHANNEL_TYPE_ACTIVE) ? "ACTIVE" : "PASSIVE",
+		       (scan_ch->
+			type & SCAN_CHANNEL_TYPE_ACTIVE) ? active_dwell :
+		       passive_dwell);
+
+		scan_ch++;
+		added++;
+	}
+
+	D_SCAN("total channels to scan %d\n", added);
+	return added;
+}
+
+static void
+il4965_toggle_tx_ant(struct il_priv *il, u8 *ant, u8 valid)
+{
+	int i;
+	u8 ind = *ant;
+
+	for (i = 0; i < RATE_ANT_NUM - 1; i++) {
+		ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0;
+		if (valid & BIT(ind)) {
+			*ant = ind;
+			return;
+		}
+	}
+}
+
+int
+il4965_request_scan(struct il_priv *il, struct ieee80211_vif *vif)
+{
+	struct il_host_cmd cmd = {
+		.id = C_SCAN,
+		.len = sizeof(struct il_scan_cmd),
+		.flags = CMD_SIZE_HUGE,
+	};
+	struct il_scan_cmd *scan;
+	struct il_rxon_context *ctx = &il->ctx;
+	u32 rate_flags = 0;
+	u16 cmd_len;
+	u16 rx_chain = 0;
+	enum ieee80211_band band;
+	u8 n_probes = 0;
+	u8 rx_ant = il->hw_params.valid_rx_ant;
+	u8 rate;
+	bool is_active = false;
+	int chan_mod;
+	u8 active_chains;
+	u8 scan_tx_antennas = il->hw_params.valid_tx_ant;
+	int ret;
+
+	lockdep_assert_held(&il->mutex);
+
+	ctx = il_rxon_ctx_from_vif(vif);
+
+	if (!il->scan_cmd) {
+		il->scan_cmd =
+		    kmalloc(sizeof(struct il_scan_cmd) + IL_MAX_SCAN_SIZE,
+			    GFP_KERNEL);
+		if (!il->scan_cmd) {
+			D_SCAN("fail to allocate memory for scan\n");
+			return -ENOMEM;
+		}
+	}
+	scan = il->scan_cmd;
+	memset(scan, 0, sizeof(struct il_scan_cmd) + IL_MAX_SCAN_SIZE);
+
+	scan->quiet_plcp_th = IL_PLCP_QUIET_THRESH;
+	scan->quiet_time = IL_ACTIVE_QUIET_TIME;
+
+	if (il_is_any_associated(il)) {
+		u16 interval;
+		u32 extra;
+		u32 suspend_time = 100;
+		u32 scan_suspend_time = 100;
+
+		D_INFO("Scanning while associated...\n");
+		interval = vif->bss_conf.beacon_int;
+
+		scan->suspend_time = 0;
+		scan->max_out_time = cpu_to_le32(200 * 1024);
+		if (!interval)
+			interval = suspend_time;
+
+		extra = (suspend_time / interval) << 22;
+		scan_suspend_time =
+		    (extra | ((suspend_time % interval) * 1024));
+		scan->suspend_time = cpu_to_le32(scan_suspend_time);
+		D_SCAN("suspend_time 0x%X beacon interval %d\n",
+		       scan_suspend_time, interval);
+	}
+
+	if (il->scan_request->n_ssids) {
+		int i, p = 0;
+		D_SCAN("Kicking off active scan\n");
+		for (i = 0; i < il->scan_request->n_ssids; i++) {
+			/* always does wildcard anyway */
+			if (!il->scan_request->ssids[i].ssid_len)
+				continue;
+			scan->direct_scan[p].id = WLAN_EID_SSID;
+			scan->direct_scan[p].len =
+			    il->scan_request->ssids[i].ssid_len;
+			memcpy(scan->direct_scan[p].ssid,
+			       il->scan_request->ssids[i].ssid,
+			       il->scan_request->ssids[i].ssid_len);
+			n_probes++;
+			p++;
+		}
+		is_active = true;
+	} else
+		D_SCAN("Start passive scan.\n");
+
+	scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
+	scan->tx_cmd.sta_id = ctx->bcast_sta_id;
+	scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+
+	switch (il->scan_band) {
+	case IEEE80211_BAND_2GHZ:
+		scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
+		chan_mod =
+		    le32_to_cpu(il->ctx.active.
+				flags & RXON_FLG_CHANNEL_MODE_MSK) >>
+		    RXON_FLG_CHANNEL_MODE_POS;
+		if (chan_mod == CHANNEL_MODE_PURE_40) {
+			rate = RATE_6M_PLCP;
+		} else {
+			rate = RATE_1M_PLCP;
+			rate_flags = RATE_MCS_CCK_MSK;
+		}
+		break;
+	case IEEE80211_BAND_5GHZ:
+		rate = RATE_6M_PLCP;
+		break;
+	default:
+		IL_WARN("Invalid scan band\n");
+		return -EIO;
+	}
+
+	/*
+	 * If active scanning is requested but a certain channel is
+	 * marked passive, we can do active scanning if we detect
+	 * transmissions.
+	 *
+	 * There is an issue with some firmware versions that triggers
+	 * a sysassert on a "good CRC threshold" of zero (== disabled),
+	 * on a radar channel even though this means that we should NOT
+	 * send probes.
+	 *
+	 * The "good CRC threshold" is the number of frames that we
+	 * need to receive during our dwell time on a channel before
+	 * sending out probes -- setting this to a huge value will
+	 * mean we never reach it, but at the same time work around
+	 * the aforementioned issue. Thus use IL_GOOD_CRC_TH_NEVER
+	 * here instead of IL_GOOD_CRC_TH_DISABLED.
+	 */
+	scan->good_CRC_th =
+	    is_active ? IL_GOOD_CRC_TH_DEFAULT : IL_GOOD_CRC_TH_NEVER;
+
+	band = il->scan_band;
+
+	if (il->cfg->scan_rx_antennas[band])
+		rx_ant = il->cfg->scan_rx_antennas[band];
+
+	il4965_toggle_tx_ant(il, &il->scan_tx_ant[band], scan_tx_antennas);
+	rate_flags |= BIT(il->scan_tx_ant[band]) << RATE_MCS_ANT_POS;
+	scan->tx_cmd.rate_n_flags = cpu_to_le32(rate | rate_flags);
+
+	/* In power save mode use one chain, otherwise use all chains */
+	if (test_bit(S_POWER_PMI, &il->status)) {
+		/* rx_ant has been set to all valid chains previously */
+		active_chains =
+		    rx_ant & ((u8) (il->chain_noise_data.active_chains));
+		if (!active_chains)
+			active_chains = rx_ant;
+
+		D_SCAN("chain_noise_data.active_chains: %u\n",
+		       il->chain_noise_data.active_chains);
+
+		rx_ant = il4965_first_antenna(active_chains);
+	}
+
+	/* MIMO is not used here, but value is required */
+	rx_chain |= il->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
+	rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
+	rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
+	rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
+	scan->rx_chain = cpu_to_le16(rx_chain);
+
+	cmd_len =
+	    il_fill_probe_req(il, (struct ieee80211_mgmt *)scan->data,
+			      vif->addr, il->scan_request->ie,
+			      il->scan_request->ie_len,
+			      IL_MAX_SCAN_SIZE - sizeof(*scan));
+	scan->tx_cmd.len = cpu_to_le16(cmd_len);
+
+	scan->filter_flags |=
+	    (RXON_FILTER_ACCEPT_GRP_MSK | RXON_FILTER_BCON_AWARE_MSK);
+
+	scan->channel_count =
+	    il4965_get_channels_for_scan(il, vif, band, is_active, n_probes,
+					 (void *)&scan->data[cmd_len]);
+	if (scan->channel_count == 0) {
+		D_SCAN("channel count %d\n", scan->channel_count);
+		return -EIO;
+	}
+
+	cmd.len +=
+	    le16_to_cpu(scan->tx_cmd.len) +
+	    scan->channel_count * sizeof(struct il_scan_channel);
+	cmd.data = scan;
+	scan->len = cpu_to_le16(cmd.len);
+
+	set_bit(S_SCAN_HW, &il->status);
+
+	ret = il_send_cmd_sync(il, &cmd);
+	if (ret)
+		clear_bit(S_SCAN_HW, &il->status);
+
+	return ret;
+}
+
+int
+il4965_manage_ibss_station(struct il_priv *il, struct ieee80211_vif *vif,
+			   bool add)
+{
+	struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
+
+	if (add)
+		return il4965_add_bssid_station(il, vif_priv->ctx,
+						vif->bss_conf.bssid,
+						&vif_priv->ibss_bssid_sta_id);
+	return il_remove_station(il, vif_priv->ibss_bssid_sta_id,
+				 vif->bss_conf.bssid);
+}
+
+void
+il4965_free_tfds_in_queue(struct il_priv *il, int sta_id, int tid, int freed)
+{
+	lockdep_assert_held(&il->sta_lock);
+
+	if (il->stations[sta_id].tid[tid].tfds_in_queue >= freed)
+		il->stations[sta_id].tid[tid].tfds_in_queue -= freed;
+	else {
+		D_TX("free more than tfds_in_queue (%u:%d)\n",
+		     il->stations[sta_id].tid[tid].tfds_in_queue, freed);
+		il->stations[sta_id].tid[tid].tfds_in_queue = 0;
+	}
+}
+
+#define IL_TX_QUEUE_MSK	0xfffff
+
+static bool
+il4965_is_single_rx_stream(struct il_priv *il)
+{
+	return il->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
+	    il->current_ht_config.single_chain_sufficient;
+}
+
+#define IL_NUM_RX_CHAINS_MULTIPLE	3
+#define IL_NUM_RX_CHAINS_SINGLE	2
+#define IL_NUM_IDLE_CHAINS_DUAL	2
+#define IL_NUM_IDLE_CHAINS_SINGLE	1
+
+/*
+ * Determine how many receiver/antenna chains to use.
+ *
+ * More provides better reception via diversity.  Fewer saves power
+ * at the expense of throughput, but only when not in powersave to
+ * start with.
+ *
+ * MIMO (dual stream) requires at least 2, but works better with 3.
+ * This does not determine *which* chains to use, just how many.
+ */
+static int
+il4965_get_active_rx_chain_count(struct il_priv *il)
+{
+	/* # of Rx chains to use when expecting MIMO. */
+	if (il4965_is_single_rx_stream(il))
+		return IL_NUM_RX_CHAINS_SINGLE;
+	else
+		return IL_NUM_RX_CHAINS_MULTIPLE;
+}
+
+/*
+ * When we are in power saving mode, unless device support spatial
+ * multiplexing power save, use the active count for rx chain count.
+ */
+static int
+il4965_get_idle_rx_chain_count(struct il_priv *il, int active_cnt)
+{
+	/* # Rx chains when idling, depending on SMPS mode */
+	switch (il->current_ht_config.smps) {
+	case IEEE80211_SMPS_STATIC:
+	case IEEE80211_SMPS_DYNAMIC:
+		return IL_NUM_IDLE_CHAINS_SINGLE;
+	case IEEE80211_SMPS_OFF:
+		return active_cnt;
+	default:
+		WARN(1, "invalid SMPS mode %d", il->current_ht_config.smps);
+		return active_cnt;
+	}
+}
+
+/* up to 4 chains */
+static u8
+il4965_count_chain_bitmap(u32 chain_bitmap)
+{
+	u8 res;
+	res = (chain_bitmap & BIT(0)) >> 0;
+	res += (chain_bitmap & BIT(1)) >> 1;
+	res += (chain_bitmap & BIT(2)) >> 2;
+	res += (chain_bitmap & BIT(3)) >> 3;
+	return res;
+}
+
+/**
+ * il4965_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
+ *
+ * Selects how many and which Rx receivers/antennas/chains to use.
+ * This should not be used for scan command ... it puts data in wrong place.
+ */
+void
+il4965_set_rxon_chain(struct il_priv *il, struct il_rxon_context *ctx)
+{
+	bool is_single = il4965_is_single_rx_stream(il);
+	bool is_cam = !test_bit(S_POWER_PMI, &il->status);
+	u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
+	u32 active_chains;
+	u16 rx_chain;
+
+	/* Tell uCode which antennas are actually connected.
+	 * Before first association, we assume all antennas are connected.
+	 * Just after first association, il4965_chain_noise_calibration()
+	 *    checks which antennas actually *are* connected. */
+	if (il->chain_noise_data.active_chains)
+		active_chains = il->chain_noise_data.active_chains;
+	else
+		active_chains = il->hw_params.valid_rx_ant;
+
+	rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS;
+
+	/* How many receivers should we use? */
+	active_rx_cnt = il4965_get_active_rx_chain_count(il);
+	idle_rx_cnt = il4965_get_idle_rx_chain_count(il, active_rx_cnt);
+
+	/* correct rx chain count according hw settings
+	 * and chain noise calibration
+	 */
+	valid_rx_cnt = il4965_count_chain_bitmap(active_chains);
+	if (valid_rx_cnt < active_rx_cnt)
+		active_rx_cnt = valid_rx_cnt;
+
+	if (valid_rx_cnt < idle_rx_cnt)
+		idle_rx_cnt = valid_rx_cnt;
+
+	rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
+	rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS;
+
+	ctx->staging.rx_chain = cpu_to_le16(rx_chain);
+
+	if (!is_single && active_rx_cnt >= IL_NUM_RX_CHAINS_SINGLE && is_cam)
+		ctx->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
+	else
+		ctx->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
+
+	D_ASSOC("rx_chain=0x%X active=%d idle=%d\n", ctx->staging.rx_chain,
+		active_rx_cnt, idle_rx_cnt);
+
+	WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 ||
+		active_rx_cnt < idle_rx_cnt);
+}
+
+static const char *
+il4965_get_fh_string(int cmd)
+{
+	switch (cmd) {
+		IL_CMD(FH49_RSCSR_CHNL0_STTS_WPTR_REG);
+		IL_CMD(FH49_RSCSR_CHNL0_RBDCB_BASE_REG);
+		IL_CMD(FH49_RSCSR_CHNL0_WPTR);
+		IL_CMD(FH49_MEM_RCSR_CHNL0_CONFIG_REG);
+		IL_CMD(FH49_MEM_RSSR_SHARED_CTRL_REG);
+		IL_CMD(FH49_MEM_RSSR_RX_STATUS_REG);
+		IL_CMD(FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
+		IL_CMD(FH49_TSSR_TX_STATUS_REG);
+		IL_CMD(FH49_TSSR_TX_ERROR_REG);
+	default:
+		return "UNKNOWN";
+	}
+}
+
+int
+il4965_dump_fh(struct il_priv *il, char **buf, bool display)
+{
+	int i;
+#ifdef CONFIG_IWLEGACY_DEBUG
+	int pos = 0;
+	size_t bufsz = 0;
+#endif
+	static const u32 fh_tbl[] = {
+		FH49_RSCSR_CHNL0_STTS_WPTR_REG,
+		FH49_RSCSR_CHNL0_RBDCB_BASE_REG,
+		FH49_RSCSR_CHNL0_WPTR,
+		FH49_MEM_RCSR_CHNL0_CONFIG_REG,
+		FH49_MEM_RSSR_SHARED_CTRL_REG,
+		FH49_MEM_RSSR_RX_STATUS_REG,
+		FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
+		FH49_TSSR_TX_STATUS_REG,
+		FH49_TSSR_TX_ERROR_REG
+	};
+#ifdef CONFIG_IWLEGACY_DEBUG
+	if (display) {
+		bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
+		*buf = kmalloc(bufsz, GFP_KERNEL);
+		if (!*buf)
+			return -ENOMEM;
+		pos +=
+		    scnprintf(*buf + pos, bufsz - pos, "FH register values:\n");
+		for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
+			pos +=
+			    scnprintf(*buf + pos, bufsz - pos,
+				      "  %34s: 0X%08x\n",
+				      il4965_get_fh_string(fh_tbl[i]),
+				      il_rd(il, fh_tbl[i]));
+		}
+		return pos;
+	}
+#endif
+	IL_ERR("FH register values:\n");
+	for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
+		IL_ERR("  %34s: 0X%08x\n", il4965_get_fh_string(fh_tbl[i]),
+		       il_rd(il, fh_tbl[i]));
+	}
+	return 0;
+}
+
+void
+il4965_hdl_missed_beacon(struct il_priv *il, struct il_rx_buf *rxb)
+{
+	struct il_rx_pkt *pkt = rxb_addr(rxb);
+	struct il_missed_beacon_notif *missed_beacon;
+
+	missed_beacon = &pkt->u.missed_beacon;
+	if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
+	    il->missed_beacon_threshold) {
+		D_CALIB("missed bcn cnsq %d totl %d rcd %d expctd %d\n",
+			le32_to_cpu(missed_beacon->consecutive_missed_beacons),
+			le32_to_cpu(missed_beacon->total_missed_becons),
+			le32_to_cpu(missed_beacon->num_recvd_beacons),
+			le32_to_cpu(missed_beacon->num_expected_beacons));
+		if (!test_bit(S_SCANNING, &il->status))
+			il4965_init_sensitivity(il);
+	}
+}
+
+/* Calculate noise level, based on measurements during network silence just
+ *   before arriving beacon.  This measurement can be done only if we know
+ *   exactly when to expect beacons, therefore only when we're associated. */
+static void
+il4965_rx_calc_noise(struct il_priv *il)
+{
+	struct stats_rx_non_phy *rx_info;
+	int num_active_rx = 0;
+	int total_silence = 0;
+	int bcn_silence_a, bcn_silence_b, bcn_silence_c;
+	int last_rx_noise;
+
+	rx_info = &(il->_4965.stats.rx.general);
+	bcn_silence_a =
+	    le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
+	bcn_silence_b =
+	    le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
+	bcn_silence_c =
+	    le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
+
+	if (bcn_silence_a) {
+		total_silence += bcn_silence_a;
+		num_active_rx++;
+	}
+	if (bcn_silence_b) {
+		total_silence += bcn_silence_b;
+		num_active_rx++;
+	}
+	if (bcn_silence_c) {
+		total_silence += bcn_silence_c;
+		num_active_rx++;
+	}
+
+	/* Average among active antennas */
+	if (num_active_rx)
+		last_rx_noise = (total_silence / num_active_rx) - 107;
+	else
+		last_rx_noise = IL_NOISE_MEAS_NOT_AVAILABLE;
+
+	D_CALIB("inband silence a %u, b %u, c %u, dBm %d\n", bcn_silence_a,
+		bcn_silence_b, bcn_silence_c, last_rx_noise);
+}
+
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+/*
+ *  based on the assumption of all stats counter are in DWORD
+ *  FIXME: This function is for debugging, do not deal with
+ *  the case of counters roll-over.
+ */
+static void
+il4965_accumulative_stats(struct il_priv *il, __le32 * stats)
+{
+	int i, size;
+	__le32 *prev_stats;
+	u32 *accum_stats;
+	u32 *delta, *max_delta;
+	struct stats_general_common *general, *accum_general;
+	struct stats_tx *tx, *accum_tx;
+
+	prev_stats = (__le32 *) &il->_4965.stats;
+	accum_stats = (u32 *) &il->_4965.accum_stats;
+	size = sizeof(struct il_notif_stats);
+	general = &il->_4965.stats.general.common;
+	accum_general = &il->_4965.accum_stats.general.common;
+	tx = &il->_4965.stats.tx;
+	accum_tx = &il->_4965.accum_stats.tx;
+	delta = (u32 *) &il->_4965.delta_stats;
+	max_delta = (u32 *) &il->_4965.max_delta;
+
+	for (i = sizeof(__le32); i < size;
+	     i +=
+	     sizeof(__le32), stats++, prev_stats++, delta++, max_delta++,
+	     accum_stats++) {
+		if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
+			*delta =
+			    (le32_to_cpu(*stats) - le32_to_cpu(*prev_stats));
+			*accum_stats += *delta;
+			if (*delta > *max_delta)
+				*max_delta = *delta;
+		}
+	}
+
+	/* reset accumulative stats for "no-counter" type stats */
+	accum_general->temperature = general->temperature;
+	accum_general->ttl_timestamp = general->ttl_timestamp;
+}
+#endif
+
+#define REG_RECALIB_PERIOD (60)
+
+void
+il4965_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb)
+{
+	int change;
+	struct il_rx_pkt *pkt = rxb_addr(rxb);
+
+	D_RX("Statistics notification received (%d vs %d).\n",
+	     (int)sizeof(struct il_notif_stats),
+	     le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK);
+
+	change =
+	    ((il->_4965.stats.general.common.temperature !=
+	      pkt->u.stats.general.common.temperature) ||
+	     ((il->_4965.stats.flag & STATS_REPLY_FLG_HT40_MODE_MSK) !=
+	      (pkt->u.stats.flag & STATS_REPLY_FLG_HT40_MODE_MSK)));
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+	il4965_accumulative_stats(il, (__le32 *) &pkt->u.stats);
+#endif
+
+	/* TODO: reading some of stats is unneeded */
+	memcpy(&il->_4965.stats, &pkt->u.stats, sizeof(il->_4965.stats));
+
+	set_bit(S_STATS, &il->status);
+
+	/* Reschedule the stats timer to occur in
+	 * REG_RECALIB_PERIOD seconds to ensure we get a
+	 * thermal update even if the uCode doesn't give
+	 * us one */
+	mod_timer(&il->stats_periodic,
+		  jiffies + msecs_to_jiffies(REG_RECALIB_PERIOD * 1000));
+
+	if (unlikely(!test_bit(S_SCANNING, &il->status)) &&
+	    (pkt->hdr.cmd == N_STATS)) {
+		il4965_rx_calc_noise(il);
+		queue_work(il->workqueue, &il->run_time_calib_work);
+	}
+	if (il->cfg->ops->lib->temp_ops.temperature && change)
+		il->cfg->ops->lib->temp_ops.temperature(il);
+}
+
+void
+il4965_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb)
+{
+	struct il_rx_pkt *pkt = rxb_addr(rxb);
+
+	if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATS_CLEAR_MSK) {
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+		memset(&il->_4965.accum_stats, 0,
+		       sizeof(struct il_notif_stats));
+		memset(&il->_4965.delta_stats, 0,
+		       sizeof(struct il_notif_stats));
+		memset(&il->_4965.max_delta, 0, sizeof(struct il_notif_stats));
+#endif
+		D_RX("Statistics have been cleared\n");
+	}
+	il4965_hdl_stats(il, rxb);
+}
+
+
+/*
+ * mac80211 queues, ACs, hardware queues, FIFOs.
+ *
+ * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
+ *
+ * Mac80211 uses the following numbers, which we get as from it
+ * by way of skb_get_queue_mapping(skb):
+ *
+ *     VO      0
+ *     VI      1
+ *     BE      2
+ *     BK      3
+ *
+ *
+ * Regular (not A-MPDU) frames are put into hardware queues corresponding
+ * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
+ * own queue per aggregation session (RA/TID combination), such queues are
+ * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
+ * order to map frames to the right queue, we also need an AC->hw queue
+ * mapping. This is implemented here.
+ *
+ * Due to the way hw queues are set up (by the hw specific modules like
+ * 4965.c), the AC->hw queue mapping is the identity
+ * mapping.
+ */
+
+static const u8 tid_to_ac[] = {
+	IEEE80211_AC_BE,
+	IEEE80211_AC_BK,
+	IEEE80211_AC_BK,
+	IEEE80211_AC_BE,
+	IEEE80211_AC_VI,
+	IEEE80211_AC_VI,
+	IEEE80211_AC_VO,
+	IEEE80211_AC_VO
+};
+
+static inline int
+il4965_get_ac_from_tid(u16 tid)
+{
+	if (likely(tid < ARRAY_SIZE(tid_to_ac)))
+		return tid_to_ac[tid];
+
+	/* no support for TIDs 8-15 yet */
+	return -EINVAL;
+}
+
+static inline int
+il4965_get_fifo_from_tid(struct il_rxon_context *ctx, u16 tid)
+{
+	if (likely(tid < ARRAY_SIZE(tid_to_ac)))
+		return ctx->ac_to_fifo[tid_to_ac[tid]];
+
+	/* no support for TIDs 8-15 yet */
+	return -EINVAL;
+}
+
+/*
+ * handle build C_TX command notification.
+ */
+static void
+il4965_tx_cmd_build_basic(struct il_priv *il, struct sk_buff *skb,
+			  struct il_tx_cmd *tx_cmd,
+			  struct ieee80211_tx_info *info,
+			  struct ieee80211_hdr *hdr, u8 std_id)
+{
+	__le16 fc = hdr->frame_control;
+	__le32 tx_flags = tx_cmd->tx_flags;
+
+	tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+	if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
+		tx_flags |= TX_CMD_FLG_ACK_MSK;
+		if (ieee80211_is_mgmt(fc))
+			tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+		if (ieee80211_is_probe_resp(fc) &&
+		    !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
+			tx_flags |= TX_CMD_FLG_TSF_MSK;
+	} else {
+		tx_flags &= (~TX_CMD_FLG_ACK_MSK);
+		tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+	}
+
+	if (ieee80211_is_back_req(fc))
+		tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
+
+	tx_cmd->sta_id = std_id;
+	if (ieee80211_has_morefrags(fc))
+		tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
+
+	if (ieee80211_is_data_qos(fc)) {
+		u8 *qc = ieee80211_get_qos_ctl(hdr);
+		tx_cmd->tid_tspec = qc[0] & 0xf;
+		tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
+	} else {
+		tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+	}
+
+	il_tx_cmd_protection(il, info, fc, &tx_flags);
+
+	tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
+	if (ieee80211_is_mgmt(fc)) {
+		if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
+			tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
+		else
+			tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
+	} else {
+		tx_cmd->timeout.pm_frame_timeout = 0;
+	}
+
+	tx_cmd->driver_txop = 0;
+	tx_cmd->tx_flags = tx_flags;
+	tx_cmd->next_frame_len = 0;
+}
+
+static void
+il4965_tx_cmd_build_rate(struct il_priv *il, struct il_tx_cmd *tx_cmd,
+			 struct ieee80211_tx_info *info, __le16 fc)
+{
+	const u8 rts_retry_limit = 60;
+	u32 rate_flags;
+	int rate_idx;
+	u8 data_retry_limit;
+	u8 rate_plcp;
+
+	/* Set retry limit on DATA packets and Probe Responses */
+	if (ieee80211_is_probe_resp(fc))
+		data_retry_limit = 3;
+	else
+		data_retry_limit = IL4965_DEFAULT_TX_RETRY;
+	tx_cmd->data_retry_limit = data_retry_limit;
+	/* Set retry limit on RTS packets */
+	tx_cmd->rts_retry_limit = min(data_retry_limit, rts_retry_limit);
+
+	/* DATA packets will use the uCode station table for rate/antenna
+	 * selection */
+	if (ieee80211_is_data(fc)) {
+		tx_cmd->initial_rate_idx = 0;
+		tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
+		return;
+	}
+
+	/**
+	 * If the current TX rate stored in mac80211 has the MCS bit set, it's
+	 * not really a TX rate.  Thus, we use the lowest supported rate for
+	 * this band.  Also use the lowest supported rate if the stored rate
+	 * idx is invalid.
+	 */
+	rate_idx = info->control.rates[0].idx;
+	if ((info->control.rates[0].flags & IEEE80211_TX_RC_MCS) || rate_idx < 0
+	    || rate_idx > RATE_COUNT_LEGACY)
+		rate_idx =
+		    rate_lowest_index(&il->bands[info->band],
+				      info->control.sta);
+	/* For 5 GHZ band, remap mac80211 rate indices into driver indices */
+	if (info->band == IEEE80211_BAND_5GHZ)
+		rate_idx += IL_FIRST_OFDM_RATE;
+	/* Get PLCP rate for tx_cmd->rate_n_flags */
+	rate_plcp = il_rates[rate_idx].plcp;
+	/* Zero out flags for this packet */
+	rate_flags = 0;
+
+	/* Set CCK flag as needed */
+	if (rate_idx >= IL_FIRST_CCK_RATE && rate_idx <= IL_LAST_CCK_RATE)
+		rate_flags |= RATE_MCS_CCK_MSK;
+
+	/* Set up antennas */
+	il4965_toggle_tx_ant(il, &il->mgmt_tx_ant, il->hw_params.valid_tx_ant);
+	rate_flags |= BIT(il->mgmt_tx_ant) << RATE_MCS_ANT_POS;
+
+	/* Set the rate in the TX cmd */
+	tx_cmd->rate_n_flags = cpu_to_le32(rate_plcp | rate_flags);
+}
+
+static void
+il4965_tx_cmd_build_hwcrypto(struct il_priv *il, struct ieee80211_tx_info *info,
+			     struct il_tx_cmd *tx_cmd, struct sk_buff *skb_frag,
+			     int sta_id)
+{
+	struct ieee80211_key_conf *keyconf = info->control.hw_key;
+
+	switch (keyconf->cipher) {
+	case WLAN_CIPHER_SUITE_CCMP:
+		tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
+		memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
+		if (info->flags & IEEE80211_TX_CTL_AMPDU)
+			tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
+		D_TX("tx_cmd with AES hwcrypto\n");
+		break;
+
+	case WLAN_CIPHER_SUITE_TKIP:
+		tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
+		ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
+		D_TX("tx_cmd with tkip hwcrypto\n");
+		break;
+
+	case WLAN_CIPHER_SUITE_WEP104:
+		tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
+		/* fall through */
+	case WLAN_CIPHER_SUITE_WEP40:
+		tx_cmd->sec_ctl |=
+		    (TX_CMD_SEC_WEP | (keyconf->keyidx & TX_CMD_SEC_MSK) <<
+		     TX_CMD_SEC_SHIFT);
+
+		memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
+
+		D_TX("Configuring packet for WEP encryption " "with key %d\n",
+		     keyconf->keyidx);
+		break;
+
+	default:
+		IL_ERR("Unknown encode cipher %x\n", keyconf->cipher);
+		break;
+	}
+}
+
+/*
+ * start C_TX command process
+ */
+int
+il4965_tx_skb(struct il_priv *il, struct sk_buff *skb)
+{
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct ieee80211_sta *sta = info->control.sta;
+	struct il_station_priv *sta_priv = NULL;
+	struct il_tx_queue *txq;
+	struct il_queue *q;
+	struct il_device_cmd *out_cmd;
+	struct il_cmd_meta *out_meta;
+	struct il_tx_cmd *tx_cmd;
+	struct il_rxon_context *ctx = &il->ctx;
+	int txq_id;
+	dma_addr_t phys_addr;
+	dma_addr_t txcmd_phys;
+	dma_addr_t scratch_phys;
+	u16 len, firstlen, secondlen;
+	u16 seq_number = 0;
+	__le16 fc;
+	u8 hdr_len;
+	u8 sta_id;
+	u8 wait_write_ptr = 0;
+	u8 tid = 0;
+	u8 *qc = NULL;
+	unsigned long flags;
+	bool is_agg = false;
+
+	if (info->control.vif)
+		ctx = il_rxon_ctx_from_vif(info->control.vif);
+
+	spin_lock_irqsave(&il->lock, flags);
+	if (il_is_rfkill(il)) {
+		D_DROP("Dropping - RF KILL\n");
+		goto drop_unlock;
+	}
+
+	fc = hdr->frame_control;
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+	if (ieee80211_is_auth(fc))
+		D_TX("Sending AUTH frame\n");
+	else if (ieee80211_is_assoc_req(fc))
+		D_TX("Sending ASSOC frame\n");
+	else if (ieee80211_is_reassoc_req(fc))
+		D_TX("Sending REASSOC frame\n");
+#endif
+
+	hdr_len = ieee80211_hdrlen(fc);
+
+	/* For management frames use broadcast id to do not break aggregation */
+	if (!ieee80211_is_data(fc))
+		sta_id = ctx->bcast_sta_id;
+	else {
+		/* Find idx into station table for destination station */
+		sta_id = il_sta_id_or_broadcast(il, ctx, info->control.sta);
+
+		if (sta_id == IL_INVALID_STATION) {
+			D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1);
+			goto drop_unlock;
+		}
+	}
+
+	D_TX("station Id %d\n", sta_id);
+
+	if (sta)
+		sta_priv = (void *)sta->drv_priv;
+
+	if (sta_priv && sta_priv->asleep &&
+	    (info->flags & IEEE80211_TX_CTL_POLL_RESPONSE)) {
+		/*
+		 * This sends an asynchronous command to the device,
+		 * but we can rely on it being processed before the
+		 * next frame is processed -- and the next frame to
+		 * this station is the one that will consume this
+		 * counter.
+		 * For now set the counter to just 1 since we do not
+		 * support uAPSD yet.
+		 */
+		il4965_sta_modify_sleep_tx_count(il, sta_id, 1);
+	}
+
+	/*
+	 * Send this frame after DTIM -- there's a special queue
+	 * reserved for this for contexts that support AP mode.
+	 */
+	if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
+		txq_id = ctx->mcast_queue;
+		/*
+		 * The microcode will clear the more data
+		 * bit in the last frame it transmits.
+		 */
+		hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+	} else
+		txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)];
+
+	/* irqs already disabled/saved above when locking il->lock */
+	spin_lock(&il->sta_lock);
+
+	if (ieee80211_is_data_qos(fc)) {
+		qc = ieee80211_get_qos_ctl(hdr);
+		tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
+		if (WARN_ON_ONCE(tid >= MAX_TID_COUNT)) {
+			spin_unlock(&il->sta_lock);
+			goto drop_unlock;
+		}
+		seq_number = il->stations[sta_id].tid[tid].seq_number;
+		seq_number &= IEEE80211_SCTL_SEQ;
+		hdr->seq_ctrl =
+		    hdr->seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG);
+		hdr->seq_ctrl |= cpu_to_le16(seq_number);
+		seq_number += 0x10;
+		/* aggregation is on for this <sta,tid> */
+		if (info->flags & IEEE80211_TX_CTL_AMPDU &&
+		    il->stations[sta_id].tid[tid].agg.state == IL_AGG_ON) {
+			txq_id = il->stations[sta_id].tid[tid].agg.txq_id;
+			is_agg = true;
+		}
+	}
+
+	txq = &il->txq[txq_id];
+	q = &txq->q;
+
+	if (unlikely(il_queue_space(q) < q->high_mark)) {
+		spin_unlock(&il->sta_lock);
+		goto drop_unlock;
+	}
+
+	if (ieee80211_is_data_qos(fc)) {
+		il->stations[sta_id].tid[tid].tfds_in_queue++;
+		if (!ieee80211_has_morefrags(fc))
+			il->stations[sta_id].tid[tid].seq_number = seq_number;
+	}
+
+	spin_unlock(&il->sta_lock);
+
+	/* Set up driver data for this TFD */
+	memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct il_tx_info));
+	txq->txb[q->write_ptr].skb = skb;
+	txq->txb[q->write_ptr].ctx = ctx;
+
+	/* Set up first empty entry in queue's array of Tx/cmd buffers */
+	out_cmd = txq->cmd[q->write_ptr];
+	out_meta = &txq->meta[q->write_ptr];
+	tx_cmd = &out_cmd->cmd.tx;
+	memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
+	memset(tx_cmd, 0, sizeof(struct il_tx_cmd));
+
+	/*
+	 * Set up the Tx-command (not MAC!) header.
+	 * Store the chosen Tx queue and TFD idx within the sequence field;
+	 * after Tx, uCode's Tx response will return this value so driver can
+	 * locate the frame within the tx queue and do post-tx processing.
+	 */
+	out_cmd->hdr.cmd = C_TX;
+	out_cmd->hdr.sequence =
+	    cpu_to_le16((u16)
+			(QUEUE_TO_SEQ(txq_id) | IDX_TO_SEQ(q->write_ptr)));
+
+	/* Copy MAC header from skb into command buffer */
+	memcpy(tx_cmd->hdr, hdr, hdr_len);
+
+	/* Total # bytes to be transmitted */
+	len = (u16) skb->len;
+	tx_cmd->len = cpu_to_le16(len);
+
+	if (info->control.hw_key)
+		il4965_tx_cmd_build_hwcrypto(il, info, tx_cmd, skb, sta_id);
+
+	/* TODO need this for burst mode later on */
+	il4965_tx_cmd_build_basic(il, skb, tx_cmd, info, hdr, sta_id);
+	il_dbg_log_tx_data_frame(il, len, hdr);
+
+	il4965_tx_cmd_build_rate(il, tx_cmd, info, fc);
+
+	il_update_stats(il, true, fc, len);
+	/*
+	 * Use the first empty entry in this queue's command buffer array
+	 * to contain the Tx command and MAC header concatenated together
+	 * (payload data will be in another buffer).
+	 * Size of this varies, due to varying MAC header length.
+	 * If end is not dword aligned, we'll have 2 extra bytes at the end
+	 * of the MAC header (device reads on dword boundaries).
+	 * We'll tell device about this padding later.
+	 */
+	len = sizeof(struct il_tx_cmd) + sizeof(struct il_cmd_header) + hdr_len;
+	firstlen = (len + 3) & ~3;
+
+	/* Tell NIC about any 2-byte padding after MAC header */
+	if (firstlen != len)
+		tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
+
+	/* Physical address of this Tx command's header (not MAC header!),
+	 * within command buffer array. */
+	txcmd_phys =
+	    pci_map_single(il->pci_dev, &out_cmd->hdr, firstlen,
+			   PCI_DMA_BIDIRECTIONAL);
+	dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
+	dma_unmap_len_set(out_meta, len, firstlen);
+	/* Add buffer containing Tx command and MAC(!) header to TFD's
+	 * first entry */
+	il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, txcmd_phys, firstlen,
+						 1, 0);
+
+	if (!ieee80211_has_morefrags(hdr->frame_control)) {
+		txq->need_update = 1;
+	} else {
+		wait_write_ptr = 1;
+		txq->need_update = 0;
+	}
+
+	/* Set up TFD's 2nd entry to point directly to remainder of skb,
+	 * if any (802.11 null frames have no payload). */
+	secondlen = skb->len - hdr_len;
+	if (secondlen > 0) {
+		phys_addr =
+		    pci_map_single(il->pci_dev, skb->data + hdr_len, secondlen,
+				   PCI_DMA_TODEVICE);
+		il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, phys_addr,
+							 secondlen, 0, 0);
+	}
+
+	scratch_phys =
+	    txcmd_phys + sizeof(struct il_cmd_header) +
+	    offsetof(struct il_tx_cmd, scratch);
+
+	/* take back ownership of DMA buffer to enable update */
+	pci_dma_sync_single_for_cpu(il->pci_dev, txcmd_phys, firstlen,
+				    PCI_DMA_BIDIRECTIONAL);
+	tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
+	tx_cmd->dram_msb_ptr = il_get_dma_hi_addr(scratch_phys);
+
+	D_TX("sequence nr = 0X%x\n", le16_to_cpu(out_cmd->hdr.sequence));
+	D_TX("tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
+	il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd, sizeof(*tx_cmd));
+	il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd->hdr, hdr_len);
+
+	/* Set up entry for this TFD in Tx byte-count array */
+	if (info->flags & IEEE80211_TX_CTL_AMPDU)
+		il->cfg->ops->lib->txq_update_byte_cnt_tbl(il, txq,
+							   le16_to_cpu(tx_cmd->
+								       len));
+
+	pci_dma_sync_single_for_device(il->pci_dev, txcmd_phys, firstlen,
+				       PCI_DMA_BIDIRECTIONAL);
+
+	/* Tell device the write idx *just past* this latest filled TFD */
+	q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
+	il_txq_update_write_ptr(il, txq);
+	spin_unlock_irqrestore(&il->lock, flags);
+
+	/*
+	 * At this point the frame is "transmitted" successfully
+	 * and we will get a TX status notification eventually,
+	 * regardless of the value of ret. "ret" only indicates
+	 * whether or not we should update the write pointer.
+	 */
+
+	/*
+	 * Avoid atomic ops if it isn't an associated client.
+	 * Also, if this is a packet for aggregation, don't
+	 * increase the counter because the ucode will stop
+	 * aggregation queues when their respective station
+	 * goes to sleep.
+	 */
+	if (sta_priv && sta_priv->client && !is_agg)
+		atomic_inc(&sta_priv->pending_frames);
+
+	if (il_queue_space(q) < q->high_mark && il->mac80211_registered) {
+		if (wait_write_ptr) {
+			spin_lock_irqsave(&il->lock, flags);
+			txq->need_update = 1;
+			il_txq_update_write_ptr(il, txq);
+			spin_unlock_irqrestore(&il->lock, flags);
+		} else {
+			il_stop_queue(il, txq);
+		}
+	}
+
+	return 0;
+
+drop_unlock:
+	spin_unlock_irqrestore(&il->lock, flags);
+	return -1;
+}
+
+static inline int
+il4965_alloc_dma_ptr(struct il_priv *il, struct il_dma_ptr *ptr, size_t size)
+{
+	ptr->addr =
+	    dma_alloc_coherent(&il->pci_dev->dev, size, &ptr->dma, GFP_KERNEL);
+	if (!ptr->addr)
+		return -ENOMEM;
+	ptr->size = size;
+	return 0;
+}
+
+static inline void
+il4965_free_dma_ptr(struct il_priv *il, struct il_dma_ptr *ptr)
+{
+	if (unlikely(!ptr->addr))
+		return;
+
+	dma_free_coherent(&il->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
+	memset(ptr, 0, sizeof(*ptr));
+}
+
+/**
+ * il4965_hw_txq_ctx_free - Free TXQ Context
+ *
+ * Destroy all TX DMA queues and structures
+ */
+void
+il4965_hw_txq_ctx_free(struct il_priv *il)
+{
+	int txq_id;
+
+	/* Tx queues */
+	if (il->txq) {
+		for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
+			if (txq_id == il->cmd_queue)
+				il_cmd_queue_free(il);
+			else
+				il_tx_queue_free(il, txq_id);
+	}
+	il4965_free_dma_ptr(il, &il->kw);
+
+	il4965_free_dma_ptr(il, &il->scd_bc_tbls);
+
+	/* free tx queue structure */
+	il_txq_mem(il);
+}
+
+/**
+ * il4965_txq_ctx_alloc - allocate TX queue context
+ * Allocate all Tx DMA structures and initialize them
+ *
+ * @param il
+ * @return error code
+ */
+int
+il4965_txq_ctx_alloc(struct il_priv *il)
+{
+	int ret;
+	int txq_id, slots_num;
+	unsigned long flags;
+
+	/* Free all tx/cmd queues and keep-warm buffer */
+	il4965_hw_txq_ctx_free(il);
+
+	ret =
+	    il4965_alloc_dma_ptr(il, &il->scd_bc_tbls,
+				 il->hw_params.scd_bc_tbls_size);
+	if (ret) {
+		IL_ERR("Scheduler BC Table allocation failed\n");
+		goto error_bc_tbls;
+	}
+	/* Alloc keep-warm buffer */
+	ret = il4965_alloc_dma_ptr(il, &il->kw, IL_KW_SIZE);
+	if (ret) {
+		IL_ERR("Keep Warm allocation failed\n");
+		goto error_kw;
+	}
+
+	/* allocate tx queue structure */
+	ret = il_alloc_txq_mem(il);
+	if (ret)
+		goto error;
+
+	spin_lock_irqsave(&il->lock, flags);
+
+	/* Turn off all Tx DMA fifos */
+	il4965_txq_set_sched(il, 0);
+
+	/* Tell NIC where to find the "keep warm" buffer */
+	il_wr(il, FH49_KW_MEM_ADDR_REG, il->kw.dma >> 4);
+
+	spin_unlock_irqrestore(&il->lock, flags);
+
+	/* Alloc and init all Tx queues, including the command queue (#4/#9) */
+	for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) {
+		slots_num =
+		    (txq_id ==
+		     il->cmd_queue) ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+		ret = il_tx_queue_init(il, &il->txq[txq_id], slots_num, txq_id);
+		if (ret) {
+			IL_ERR("Tx %d queue init failed\n", txq_id);
+			goto error;
+		}
+	}
+
+	return ret;
+
+error:
+	il4965_hw_txq_ctx_free(il);
+	il4965_free_dma_ptr(il, &il->kw);
+error_kw:
+	il4965_free_dma_ptr(il, &il->scd_bc_tbls);
+error_bc_tbls:
+	return ret;
+}
+
+void
+il4965_txq_ctx_reset(struct il_priv *il)
+{
+	int txq_id, slots_num;
+	unsigned long flags;
+
+	spin_lock_irqsave(&il->lock, flags);
+
+	/* Turn off all Tx DMA fifos */
+	il4965_txq_set_sched(il, 0);
+
+	/* Tell NIC where to find the "keep warm" buffer */
+	il_wr(il, FH49_KW_MEM_ADDR_REG, il->kw.dma >> 4);
+
+	spin_unlock_irqrestore(&il->lock, flags);
+
+	/* Alloc and init all Tx queues, including the command queue (#4) */
+	for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) {
+		slots_num =
+		    txq_id == il->cmd_queue ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+		il_tx_queue_reset(il, &il->txq[txq_id], slots_num, txq_id);
+	}
+}
+
+/**
+ * il4965_txq_ctx_stop - Stop all Tx DMA channels
+ */
+void
+il4965_txq_ctx_stop(struct il_priv *il)
+{
+	int ch, txq_id;
+	unsigned long flags;
+
+	/* Turn off all Tx DMA fifos */
+	spin_lock_irqsave(&il->lock, flags);
+
+	il4965_txq_set_sched(il, 0);
+
+	/* Stop each Tx DMA channel, and wait for it to be idle */
+	for (ch = 0; ch < il->hw_params.dma_chnl_num; ch++) {
+		il_wr(il, FH49_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
+		if (il_poll_bit
+		    (il, FH49_TSSR_TX_STATUS_REG,
+		     FH49_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 1000))
+			IL_ERR("Failing on timeout while stopping"
+			       " DMA channel %d [0x%08x]", ch,
+			       il_rd(il, FH49_TSSR_TX_STATUS_REG));
+	}
+	spin_unlock_irqrestore(&il->lock, flags);
+
+	if (!il->txq)
+		return;
+
+	/* Unmap DMA from host system and free skb's */
+	for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
+		if (txq_id == il->cmd_queue)
+			il_cmd_queue_unmap(il);
+		else
+			il_tx_queue_unmap(il, txq_id);
+}
+
+/*
+ * Find first available (lowest unused) Tx Queue, mark it "active".
+ * Called only when finding queue for aggregation.
+ * Should never return anything < 7, because they should already
+ * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
+ */
+static int
+il4965_txq_ctx_activate_free(struct il_priv *il)
+{
+	int txq_id;
+
+	for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
+		if (!test_and_set_bit(txq_id, &il->txq_ctx_active_msk))
+			return txq_id;
+	return -1;
+}
+
+/**
+ * il4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
+ */
+static void
+il4965_tx_queue_stop_scheduler(struct il_priv *il, u16 txq_id)
+{
+	/* Simply stop the queue, but don't change any configuration;
+	 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
+	il_wr_prph(il, IL49_SCD_QUEUE_STATUS_BITS(txq_id),
+		   (0 << IL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
+		   (1 << IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
+}
+
+/**
+ * il4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
+ */
+static int
+il4965_tx_queue_set_q2ratid(struct il_priv *il, u16 ra_tid, u16 txq_id)
+{
+	u32 tbl_dw_addr;
+	u32 tbl_dw;
+	u16 scd_q2ratid;
+
+	scd_q2ratid = ra_tid & IL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
+
+	tbl_dw_addr =
+	    il->scd_base_addr + IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
+
+	tbl_dw = il_read_targ_mem(il, tbl_dw_addr);
+
+	if (txq_id & 0x1)
+		tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
+	else
+		tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
+
+	il_write_targ_mem(il, tbl_dw_addr, tbl_dw);
+
+	return 0;
+}
+
+/**
+ * il4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
+ *
+ * NOTE:  txq_id must be greater than IL49_FIRST_AMPDU_QUEUE,
+ *        i.e. it must be one of the higher queues used for aggregation
+ */
+static int
+il4965_txq_agg_enable(struct il_priv *il, int txq_id, int tx_fifo, int sta_id,
+		      int tid, u16 ssn_idx)
+{
+	unsigned long flags;
+	u16 ra_tid;
+	int ret;
+
+	if ((IL49_FIRST_AMPDU_QUEUE > txq_id) ||
+	    (IL49_FIRST_AMPDU_QUEUE +
+	     il->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
+		IL_WARN("queue number out of range: %d, must be %d to %d\n",
+			txq_id, IL49_FIRST_AMPDU_QUEUE,
+			IL49_FIRST_AMPDU_QUEUE +
+			il->cfg->base_params->num_of_ampdu_queues - 1);
+		return -EINVAL;
+	}
+
+	ra_tid = BUILD_RAxTID(sta_id, tid);
+
+	/* Modify device's station table to Tx this TID */
+	ret = il4965_sta_tx_modify_enable_tid(il, sta_id, tid);
+	if (ret)
+		return ret;
+
+	spin_lock_irqsave(&il->lock, flags);
+
+	/* Stop this Tx queue before configuring it */
+	il4965_tx_queue_stop_scheduler(il, txq_id);
+
+	/* Map receiver-address / traffic-ID to this queue */
+	il4965_tx_queue_set_q2ratid(il, ra_tid, txq_id);
+
+	/* Set this queue as a chain-building queue */
+	il_set_bits_prph(il, IL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
+
+	/* Place first TFD at idx corresponding to start sequence number.
+	 * Assumes that ssn_idx is valid (!= 0xFFF) */
+	il->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
+	il->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
+	il4965_set_wr_ptrs(il, txq_id, ssn_idx);
+
+	/* Set up Tx win size and frame limit for this queue */
+	il_write_targ_mem(il,
+			  il->scd_base_addr +
+			  IL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id),
+			  (SCD_WIN_SIZE << IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS)
+			  & IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
+
+	il_write_targ_mem(il,
+			  il->scd_base_addr +
+			  IL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
+			  (SCD_FRAME_LIMIT <<
+			   IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
+			  IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
+
+	il_set_bits_prph(il, IL49_SCD_INTERRUPT_MASK, (1 << txq_id));
+
+	/* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
+	il4965_tx_queue_set_status(il, &il->txq[txq_id], tx_fifo, 1);
+
+	spin_unlock_irqrestore(&il->lock, flags);
+
+	return 0;
+}
+
+int
+il4965_tx_agg_start(struct il_priv *il, struct ieee80211_vif *vif,
+		    struct ieee80211_sta *sta, u16 tid, u16 * ssn)
+{
+	int sta_id;
+	int tx_fifo;
+	int txq_id;
+	int ret;
+	unsigned long flags;
+	struct il_tid_data *tid_data;
+
+	tx_fifo = il4965_get_fifo_from_tid(il_rxon_ctx_from_vif(vif), tid);
+	if (unlikely(tx_fifo < 0))
+		return tx_fifo;
+
+	D_HT("%s on ra = %pM tid = %d\n", __func__, sta->addr, tid);
+
+	sta_id = il_sta_id(sta);
+	if (sta_id == IL_INVALID_STATION) {
+		IL_ERR("Start AGG on invalid station\n");
+		return -ENXIO;
+	}
+	if (unlikely(tid >= MAX_TID_COUNT))
+		return -EINVAL;
+
+	if (il->stations[sta_id].tid[tid].agg.state != IL_AGG_OFF) {
+		IL_ERR("Start AGG when state is not IL_AGG_OFF !\n");
+		return -ENXIO;
+	}
+
+	txq_id = il4965_txq_ctx_activate_free(il);
+	if (txq_id == -1) {
+		IL_ERR("No free aggregation queue available\n");
+		return -ENXIO;
+	}
+
+	spin_lock_irqsave(&il->sta_lock, flags);
+	tid_data = &il->stations[sta_id].tid[tid];
+	*ssn = SEQ_TO_SN(tid_data->seq_number);
+	tid_data->agg.txq_id = txq_id;
+	il_set_swq_id(&il->txq[txq_id], il4965_get_ac_from_tid(tid), txq_id);
+	spin_unlock_irqrestore(&il->sta_lock, flags);
+
+	ret = il4965_txq_agg_enable(il, txq_id, tx_fifo, sta_id, tid, *ssn);
+	if (ret)
+		return ret;
+
+	spin_lock_irqsave(&il->sta_lock, flags);
+	tid_data = &il->stations[sta_id].tid[tid];
+	if (tid_data->tfds_in_queue == 0) {
+		D_HT("HW queue is empty\n");
+		tid_data->agg.state = IL_AGG_ON;
+		ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+	} else {
+		D_HT("HW queue is NOT empty: %d packets in HW queue\n",
+		     tid_data->tfds_in_queue);
+		tid_data->agg.state = IL_EMPTYING_HW_QUEUE_ADDBA;
+	}
+	spin_unlock_irqrestore(&il->sta_lock, flags);
+	return ret;
+}
+
+/**
+ * txq_id must be greater than IL49_FIRST_AMPDU_QUEUE
+ * il->lock must be held by the caller
+ */
+static int
+il4965_txq_agg_disable(struct il_priv *il, u16 txq_id, u16 ssn_idx, u8 tx_fifo)
+{
+	if ((IL49_FIRST_AMPDU_QUEUE > txq_id) ||
+	    (IL49_FIRST_AMPDU_QUEUE +
+	     il->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
+		IL_WARN("queue number out of range: %d, must be %d to %d\n",
+			txq_id, IL49_FIRST_AMPDU_QUEUE,
+			IL49_FIRST_AMPDU_QUEUE +
+			il->cfg->base_params->num_of_ampdu_queues - 1);
+		return -EINVAL;
+	}
+
+	il4965_tx_queue_stop_scheduler(il, txq_id);
+
+	il_clear_bits_prph(il, IL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
+
+	il->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
+	il->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
+	/* supposes that ssn_idx is valid (!= 0xFFF) */
+	il4965_set_wr_ptrs(il, txq_id, ssn_idx);
+
+	il_clear_bits_prph(il, IL49_SCD_INTERRUPT_MASK, (1 << txq_id));
+	il_txq_ctx_deactivate(il, txq_id);
+	il4965_tx_queue_set_status(il, &il->txq[txq_id], tx_fifo, 0);
+
+	return 0;
+}
+
+int
+il4965_tx_agg_stop(struct il_priv *il, struct ieee80211_vif *vif,
+		   struct ieee80211_sta *sta, u16 tid)
+{
+	int tx_fifo_id, txq_id, sta_id, ssn;
+	struct il_tid_data *tid_data;
+	int write_ptr, read_ptr;
+	unsigned long flags;
+
+	tx_fifo_id = il4965_get_fifo_from_tid(il_rxon_ctx_from_vif(vif), tid);
+	if (unlikely(tx_fifo_id < 0))
+		return tx_fifo_id;
+
+	sta_id = il_sta_id(sta);
+
+	if (sta_id == IL_INVALID_STATION) {
+		IL_ERR("Invalid station for AGG tid %d\n", tid);
+		return -ENXIO;
+	}
+
+	spin_lock_irqsave(&il->sta_lock, flags);
+
+	tid_data = &il->stations[sta_id].tid[tid];
+	ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
+	txq_id = tid_data->agg.txq_id;
+
+	switch (il->stations[sta_id].tid[tid].agg.state) {
+	case IL_EMPTYING_HW_QUEUE_ADDBA:
+		/*
+		 * This can happen if the peer stops aggregation
+		 * again before we've had a chance to drain the
+		 * queue we selected previously, i.e. before the
+		 * session was really started completely.
+		 */
+		D_HT("AGG stop before setup done\n");
+		goto turn_off;
+	case IL_AGG_ON:
+		break;
+	default:
+		IL_WARN("Stopping AGG while state not ON or starting\n");
+	}
+
+	write_ptr = il->txq[txq_id].q.write_ptr;
+	read_ptr = il->txq[txq_id].q.read_ptr;
+
+	/* The queue is not empty */
+	if (write_ptr != read_ptr) {
+		D_HT("Stopping a non empty AGG HW QUEUE\n");
+		il->stations[sta_id].tid[tid].agg.state =
+		    IL_EMPTYING_HW_QUEUE_DELBA;
+		spin_unlock_irqrestore(&il->sta_lock, flags);
+		return 0;
+	}
+
+	D_HT("HW queue is empty\n");
+turn_off:
+	il->stations[sta_id].tid[tid].agg.state = IL_AGG_OFF;
+
+	/* do not restore/save irqs */
+	spin_unlock(&il->sta_lock);
+	spin_lock(&il->lock);
+
+	/*
+	 * the only reason this call can fail is queue number out of range,
+	 * which can happen if uCode is reloaded and all the station
+	 * information are lost. if it is outside the range, there is no need
+	 * to deactivate the uCode queue, just return "success" to allow
+	 *  mac80211 to clean up it own data.
+	 */
+	il4965_txq_agg_disable(il, txq_id, ssn, tx_fifo_id);
+	spin_unlock_irqrestore(&il->lock, flags);
+
+	ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+
+	return 0;
+}
+
+int
+il4965_txq_check_empty(struct il_priv *il, int sta_id, u8 tid, int txq_id)
+{
+	struct il_queue *q = &il->txq[txq_id].q;
+	u8 *addr = il->stations[sta_id].sta.sta.addr;
+	struct il_tid_data *tid_data = &il->stations[sta_id].tid[tid];
+	struct il_rxon_context *ctx;
+
+	ctx = &il->ctx;
+
+	lockdep_assert_held(&il->sta_lock);
+
+	switch (il->stations[sta_id].tid[tid].agg.state) {
+	case IL_EMPTYING_HW_QUEUE_DELBA:
+		/* We are reclaiming the last packet of the */
+		/* aggregated HW queue */
+		if (txq_id == tid_data->agg.txq_id &&
+		    q->read_ptr == q->write_ptr) {
+			u16 ssn = SEQ_TO_SN(tid_data->seq_number);
+			int tx_fifo = il4965_get_fifo_from_tid(ctx, tid);
+			D_HT("HW queue empty: continue DELBA flow\n");
+			il4965_txq_agg_disable(il, txq_id, ssn, tx_fifo);
+			tid_data->agg.state = IL_AGG_OFF;
+			ieee80211_stop_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
+		}
+		break;
+	case IL_EMPTYING_HW_QUEUE_ADDBA:
+		/* We are reclaiming the last packet of the queue */
+		if (tid_data->tfds_in_queue == 0) {
+			D_HT("HW queue empty: continue ADDBA flow\n");
+			tid_data->agg.state = IL_AGG_ON;
+			ieee80211_start_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
+		}
+		break;
+	}
+
+	return 0;
+}
+
+static void
+il4965_non_agg_tx_status(struct il_priv *il, struct il_rxon_context *ctx,
+			 const u8 *addr1)
+{
+	struct ieee80211_sta *sta;
+	struct il_station_priv *sta_priv;
+
+	rcu_read_lock();
+	sta = ieee80211_find_sta(ctx->vif, addr1);
+	if (sta) {
+		sta_priv = (void *)sta->drv_priv;
+		/* avoid atomic ops if this isn't a client */
+		if (sta_priv->client &&
+		    atomic_dec_return(&sta_priv->pending_frames) == 0)
+			ieee80211_sta_block_awake(il->hw, sta, false);
+	}
+	rcu_read_unlock();
+}
+
+static void
+il4965_tx_status(struct il_priv *il, struct il_tx_info *tx_info, bool is_agg)
+{
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
+
+	if (!is_agg)
+		il4965_non_agg_tx_status(il, tx_info->ctx, hdr->addr1);
+
+	ieee80211_tx_status_irqsafe(il->hw, tx_info->skb);
+}
+
+int
+il4965_tx_queue_reclaim(struct il_priv *il, int txq_id, int idx)
+{
+	struct il_tx_queue *txq = &il->txq[txq_id];
+	struct il_queue *q = &txq->q;
+	struct il_tx_info *tx_info;
+	int nfreed = 0;
+	struct ieee80211_hdr *hdr;
+
+	if (idx >= q->n_bd || il_queue_used(q, idx) == 0) {
+		IL_ERR("Read idx for DMA queue txq id (%d), idx %d, "
+		       "is out of range [0-%d] %d %d.\n", txq_id, idx, q->n_bd,
+		       q->write_ptr, q->read_ptr);
+		return 0;
+	}
+
+	for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
+	     q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+
+		tx_info = &txq->txb[txq->q.read_ptr];
+
+		if (WARN_ON_ONCE(tx_info->skb == NULL))
+			continue;
+
+		hdr = (struct ieee80211_hdr *)tx_info->skb->data;
+		if (ieee80211_is_data_qos(hdr->frame_control))
+			nfreed++;
+
+		il4965_tx_status(il, tx_info,
+				 txq_id >= IL4965_FIRST_AMPDU_QUEUE);
+		tx_info->skb = NULL;
+
+		il->cfg->ops->lib->txq_free_tfd(il, txq);
+	}
+	return nfreed;
+}
+
+/**
+ * il4965_tx_status_reply_compressed_ba - Update tx status from block-ack
+ *
+ * Go through block-ack's bitmap of ACK'd frames, update driver's record of
+ * ACK vs. not.  This gets sent to mac80211, then to rate scaling algo.
+ */
+static int
+il4965_tx_status_reply_compressed_ba(struct il_priv *il, struct il_ht_agg *agg,
+				     struct il_compressed_ba_resp *ba_resp)
+{
+	int i, sh, ack;
+	u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
+	u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
+	int successes = 0;
+	struct ieee80211_tx_info *info;
+	u64 bitmap, sent_bitmap;
+
+	if (unlikely(!agg->wait_for_ba)) {
+		if (unlikely(ba_resp->bitmap))
+			IL_ERR("Received BA when not expected\n");
+		return -EINVAL;
+	}
+
+	/* Mark that the expected block-ack response arrived */
+	agg->wait_for_ba = 0;
+	D_TX_REPLY("BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
+
+	/* Calculate shift to align block-ack bits with our Tx win bits */
+	sh = agg->start_idx - SEQ_TO_IDX(seq_ctl >> 4);
+	if (sh < 0)		/* tbw something is wrong with indices */
+		sh += 0x100;
+
+	if (agg->frame_count > (64 - sh)) {
+		D_TX_REPLY("more frames than bitmap size");
+		return -1;
+	}
+
+	/* don't use 64-bit values for now */
+	bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
+
+	/* check for success or failure according to the
+	 * transmitted bitmap and block-ack bitmap */
+	sent_bitmap = bitmap & agg->bitmap;
+
+	/* For each frame attempted in aggregation,
+	 * update driver's record of tx frame's status. */
+	i = 0;
+	while (sent_bitmap) {
+		ack = sent_bitmap & 1ULL;
+		successes += ack;
+		D_TX_REPLY("%s ON i=%d idx=%d raw=%d\n", ack ? "ACK" : "NACK",
+			   i, (agg->start_idx + i) & 0xff, agg->start_idx + i);
+		sent_bitmap >>= 1;
+		++i;
+	}
+
+	D_TX_REPLY("Bitmap %llx\n", (unsigned long long)bitmap);
+
+	info = IEEE80211_SKB_CB(il->txq[scd_flow].txb[agg->start_idx].skb);
+	memset(&info->status, 0, sizeof(info->status));
+	info->flags |= IEEE80211_TX_STAT_ACK;
+	info->flags |= IEEE80211_TX_STAT_AMPDU;
+	info->status.ampdu_ack_len = successes;
+	info->status.ampdu_len = agg->frame_count;
+	il4965_hwrate_to_tx_control(il, agg->rate_n_flags, info);
+
+	return 0;
+}
+
+/**
+ * translate ucode response to mac80211 tx status control values
+ */
+void
+il4965_hwrate_to_tx_control(struct il_priv *il, u32 rate_n_flags,
+			    struct ieee80211_tx_info *info)
+{
+	struct ieee80211_tx_rate *r = &info->control.rates[0];
+
+	info->antenna_sel_tx =
+	    ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
+	if (rate_n_flags & RATE_MCS_HT_MSK)
+		r->flags |= IEEE80211_TX_RC_MCS;
+	if (rate_n_flags & RATE_MCS_GF_MSK)
+		r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
+	if (rate_n_flags & RATE_MCS_HT40_MSK)
+		r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+	if (rate_n_flags & RATE_MCS_DUP_MSK)
+		r->flags |= IEEE80211_TX_RC_DUP_DATA;
+	if (rate_n_flags & RATE_MCS_SGI_MSK)
+		r->flags |= IEEE80211_TX_RC_SHORT_GI;
+	r->idx = il4965_hwrate_to_mac80211_idx(rate_n_flags, info->band);
+}
+
+/**
+ * il4965_hdl_compressed_ba - Handler for N_COMPRESSED_BA
+ *
+ * Handles block-acknowledge notification from device, which reports success
+ * of frames sent via aggregation.
+ */
+void
+il4965_hdl_compressed_ba(struct il_priv *il, struct il_rx_buf *rxb)
+{
+	struct il_rx_pkt *pkt = rxb_addr(rxb);
+	struct il_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
+	struct il_tx_queue *txq = NULL;
+	struct il_ht_agg *agg;
+	int idx;
+	int sta_id;
+	int tid;
+	unsigned long flags;
+
+	/* "flow" corresponds to Tx queue */
+	u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
+
+	/* "ssn" is start of block-ack Tx win, corresponds to idx
+	 * (in Tx queue's circular buffer) of first TFD/frame in win */
+	u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
+
+	if (scd_flow >= il->hw_params.max_txq_num) {
+		IL_ERR("BUG_ON scd_flow is bigger than number of queues\n");
+		return;
+	}
+
+	txq = &il->txq[scd_flow];
+	sta_id = ba_resp->sta_id;
+	tid = ba_resp->tid;
+	agg = &il->stations[sta_id].tid[tid].agg;
+	if (unlikely(agg->txq_id != scd_flow)) {
+		/*
+		 * FIXME: this is a uCode bug which need to be addressed,
+		 * log the information and return for now!
+		 * since it is possible happen very often and in order
+		 * not to fill the syslog, don't enable the logging by default
+		 */
+		D_TX_REPLY("BA scd_flow %d does not match txq_id %d\n",
+			   scd_flow, agg->txq_id);
+		return;
+	}
+
+	/* Find idx just before block-ack win */
+	idx = il_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
+
+	spin_lock_irqsave(&il->sta_lock, flags);
+
+	D_TX_REPLY("N_COMPRESSED_BA [%d] Received from %pM, " "sta_id = %d\n",
+		   agg->wait_for_ba, (u8 *) &ba_resp->sta_addr_lo32,
+		   ba_resp->sta_id);
+	D_TX_REPLY("TID = %d, SeqCtl = %d, bitmap = 0x%llx," "scd_flow = "
+		   "%d, scd_ssn = %d\n", ba_resp->tid, ba_resp->seq_ctl,
+		   (unsigned long long)le64_to_cpu(ba_resp->bitmap),
+		   ba_resp->scd_flow, ba_resp->scd_ssn);
+	D_TX_REPLY("DAT start_idx = %d, bitmap = 0x%llx\n", agg->start_idx,
+		   (unsigned long long)agg->bitmap);
+
+	/* Update driver's record of ACK vs. not for each frame in win */
+	il4965_tx_status_reply_compressed_ba(il, agg, ba_resp);
+
+	/* Release all TFDs before the SSN, i.e. all TFDs in front of
+	 * block-ack win (we assume that they've been successfully
+	 * transmitted ... if not, it's too late anyway). */
+	if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
+		/* calculate mac80211 ampdu sw queue to wake */
+		int freed = il4965_tx_queue_reclaim(il, scd_flow, idx);
+		il4965_free_tfds_in_queue(il, sta_id, tid, freed);
+
+		if (il_queue_space(&txq->q) > txq->q.low_mark &&
+		    il->mac80211_registered &&
+		    agg->state != IL_EMPTYING_HW_QUEUE_DELBA)
+			il_wake_queue(il, txq);
+
+		il4965_txq_check_empty(il, sta_id, tid, scd_flow);
+	}
+
+	spin_unlock_irqrestore(&il->sta_lock, flags);
+}
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+const char *
+il4965_get_tx_fail_reason(u32 status)
+{
+#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
+#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
+
+	switch (status & TX_STATUS_MSK) {
+	case TX_STATUS_SUCCESS:
+		return "SUCCESS";
+		TX_STATUS_POSTPONE(DELAY);
+		TX_STATUS_POSTPONE(FEW_BYTES);
+		TX_STATUS_POSTPONE(QUIET_PERIOD);
+		TX_STATUS_POSTPONE(CALC_TTAK);
+		TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
+		TX_STATUS_FAIL(SHORT_LIMIT);
+		TX_STATUS_FAIL(LONG_LIMIT);
+		TX_STATUS_FAIL(FIFO_UNDERRUN);
+		TX_STATUS_FAIL(DRAIN_FLOW);
+		TX_STATUS_FAIL(RFKILL_FLUSH);
+		TX_STATUS_FAIL(LIFE_EXPIRE);
+		TX_STATUS_FAIL(DEST_PS);
+		TX_STATUS_FAIL(HOST_ABORTED);
+		TX_STATUS_FAIL(BT_RETRY);
+		TX_STATUS_FAIL(STA_INVALID);
+		TX_STATUS_FAIL(FRAG_DROPPED);
+		TX_STATUS_FAIL(TID_DISABLE);
+		TX_STATUS_FAIL(FIFO_FLUSHED);
+		TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
+		TX_STATUS_FAIL(PASSIVE_NO_RX);
+		TX_STATUS_FAIL(NO_BEACON_ON_RADAR);
+	}
+
+	return "UNKNOWN";
+
+#undef TX_STATUS_FAIL
+#undef TX_STATUS_POSTPONE
+}
+#endif /* CONFIG_IWLEGACY_DEBUG */
+
+static struct il_link_quality_cmd *
+il4965_sta_alloc_lq(struct il_priv *il, u8 sta_id)
+{
+	int i, r;
+	struct il_link_quality_cmd *link_cmd;
+	u32 rate_flags = 0;
+	__le32 rate_n_flags;
+
+	link_cmd = kzalloc(sizeof(struct il_link_quality_cmd), GFP_KERNEL);
+	if (!link_cmd) {
+		IL_ERR("Unable to allocate memory for LQ cmd.\n");
+		return NULL;
+	}
+	/* Set up the rate scaling to start at selected rate, fall back
+	 * all the way down to 1M in IEEE order, and then spin on 1M */
+	if (il->band == IEEE80211_BAND_5GHZ)
+		r = RATE_6M_IDX;
+	else
+		r = RATE_1M_IDX;
+
+	if (r >= IL_FIRST_CCK_RATE && r <= IL_LAST_CCK_RATE)
+		rate_flags |= RATE_MCS_CCK_MSK;
+
+	rate_flags |=
+	    il4965_first_antenna(il->hw_params.
+				 valid_tx_ant) << RATE_MCS_ANT_POS;
+	rate_n_flags = cpu_to_le32(il_rates[r].plcp | rate_flags);
+	for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
+		link_cmd->rs_table[i].rate_n_flags = rate_n_flags;
+
+	link_cmd->general_params.single_stream_ant_msk =
+	    il4965_first_antenna(il->hw_params.valid_tx_ant);
+
+	link_cmd->general_params.dual_stream_ant_msk =
+	    il->hw_params.valid_tx_ant & ~il4965_first_antenna(il->hw_params.
+							       valid_tx_ant);
+	if (!link_cmd->general_params.dual_stream_ant_msk) {
+		link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
+	} else if (il4965_num_of_ant(il->hw_params.valid_tx_ant) == 2) {
+		link_cmd->general_params.dual_stream_ant_msk =
+		    il->hw_params.valid_tx_ant;
+	}
+
+	link_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
+	link_cmd->agg_params.agg_time_limit =
+	    cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
+
+	link_cmd->sta_id = sta_id;
+
+	return link_cmd;
+}
+
+/*
+ * il4965_add_bssid_station - Add the special IBSS BSSID station
+ *
+ * Function sleeps.
+ */
+int
+il4965_add_bssid_station(struct il_priv *il, struct il_rxon_context *ctx,
+			 const u8 *addr, u8 *sta_id_r)
+{
+	int ret;
+	u8 sta_id;
+	struct il_link_quality_cmd *link_cmd;
+	unsigned long flags;
+
+	if (sta_id_r)
+		*sta_id_r = IL_INVALID_STATION;
+
+	ret = il_add_station_common(il, ctx, addr, 0, NULL, &sta_id);
+	if (ret) {
+		IL_ERR("Unable to add station %pM\n", addr);
+		return ret;
+	}
+
+	if (sta_id_r)
+		*sta_id_r = sta_id;
+
+	spin_lock_irqsave(&il->sta_lock, flags);
+	il->stations[sta_id].used |= IL_STA_LOCAL;
+	spin_unlock_irqrestore(&il->sta_lock, flags);
+
+	/* Set up default rate scaling table in device's station table */
+	link_cmd = il4965_sta_alloc_lq(il, sta_id);
+	if (!link_cmd) {
+		IL_ERR("Unable to initialize rate scaling for station %pM.\n",
+		       addr);
+		return -ENOMEM;
+	}
+
+	ret = il_send_lq_cmd(il, ctx, link_cmd, CMD_SYNC, true);
+	if (ret)
+		IL_ERR("Link quality command failed (%d)\n", ret);
+
+	spin_lock_irqsave(&il->sta_lock, flags);
+	il->stations[sta_id].lq = link_cmd;
+	spin_unlock_irqrestore(&il->sta_lock, flags);
+
+	return 0;
+}
+
+static int
+il4965_static_wepkey_cmd(struct il_priv *il, struct il_rxon_context *ctx,
+			 bool send_if_empty)
+{
+	int i, not_empty = 0;
+	u8 buff[sizeof(struct il_wep_cmd) +
+		sizeof(struct il_wep_key) * WEP_KEYS_MAX];
+	struct il_wep_cmd *wep_cmd = (struct il_wep_cmd *)buff;
+	size_t cmd_size = sizeof(struct il_wep_cmd);
+	struct il_host_cmd cmd = {
+		.id = ctx->wep_key_cmd,
+		.data = wep_cmd,
+		.flags = CMD_SYNC,
+	};
+
+	might_sleep();
+
+	memset(wep_cmd, 0,
+	       cmd_size + (sizeof(struct il_wep_key) * WEP_KEYS_MAX));
+
+	for (i = 0; i < WEP_KEYS_MAX; i++) {
+		wep_cmd->key[i].key_idx = i;
+		if (ctx->wep_keys[i].key_size) {
+			wep_cmd->key[i].key_offset = i;
+			not_empty = 1;
+		} else {
+			wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET;
+		}
+
+		wep_cmd->key[i].key_size = ctx->wep_keys[i].key_size;
+		memcpy(&wep_cmd->key[i].key[3], ctx->wep_keys[i].key,
+		       ctx->wep_keys[i].key_size);
+	}
+
+	wep_cmd->global_key_type = WEP_KEY_WEP_TYPE;
+	wep_cmd->num_keys = WEP_KEYS_MAX;
+
+	cmd_size += sizeof(struct il_wep_key) * WEP_KEYS_MAX;
+
+	cmd.len = cmd_size;
+
+	if (not_empty || send_if_empty)
+		return il_send_cmd(il, &cmd);
+	else
+		return 0;
+}
+
+int
+il4965_restore_default_wep_keys(struct il_priv *il, struct il_rxon_context *ctx)
+{
+	lockdep_assert_held(&il->mutex);
+
+	return il4965_static_wepkey_cmd(il, ctx, false);
+}
+
+int
+il4965_remove_default_wep_key(struct il_priv *il, struct il_rxon_context *ctx,
+			      struct ieee80211_key_conf *keyconf)
+{
+	int ret;
+
+	lockdep_assert_held(&il->mutex);
+
+	D_WEP("Removing default WEP key: idx=%d\n", keyconf->keyidx);
+
+	memset(&ctx->wep_keys[keyconf->keyidx], 0, sizeof(ctx->wep_keys[0]));
+	if (il_is_rfkill(il)) {
+		D_WEP("Not sending C_WEPKEY command due to RFKILL.\n");
+		/* but keys in device are clear anyway so return success */
+		return 0;
+	}
+	ret = il4965_static_wepkey_cmd(il, ctx, 1);
+	D_WEP("Remove default WEP key: idx=%d ret=%d\n", keyconf->keyidx, ret);
+
+	return ret;
+}
+
+int
+il4965_set_default_wep_key(struct il_priv *il, struct il_rxon_context *ctx,
+			   struct ieee80211_key_conf *keyconf)
+{
+	int ret;
+
+	lockdep_assert_held(&il->mutex);
+
+	if (keyconf->keylen != WEP_KEY_LEN_128 &&
+	    keyconf->keylen != WEP_KEY_LEN_64) {
+		D_WEP("Bad WEP key length %d\n", keyconf->keylen);
+		return -EINVAL;
+	}
+
+	keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
+	keyconf->hw_key_idx = HW_KEY_DEFAULT;
+	il->stations[ctx->ap_sta_id].keyinfo.cipher = keyconf->cipher;
+
+	ctx->wep_keys[keyconf->keyidx].key_size = keyconf->keylen;
+	memcpy(&ctx->wep_keys[keyconf->keyidx].key, &keyconf->key,
+	       keyconf->keylen);
+
+	ret = il4965_static_wepkey_cmd(il, ctx, false);
+	D_WEP("Set default WEP key: len=%d idx=%d ret=%d\n", keyconf->keylen,
+	      keyconf->keyidx, ret);
+
+	return ret;
+}
+
+static int
+il4965_set_wep_dynamic_key_info(struct il_priv *il, struct il_rxon_context *ctx,
+				struct ieee80211_key_conf *keyconf, u8 sta_id)
+{
+	unsigned long flags;
+	__le16 key_flags = 0;
+	struct il_addsta_cmd sta_cmd;
+
+	lockdep_assert_held(&il->mutex);
+
+	keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
+
+	key_flags |= (STA_KEY_FLG_WEP | STA_KEY_FLG_MAP_KEY_MSK);
+	key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
+	key_flags &= ~STA_KEY_FLG_INVALID;
+
+	if (keyconf->keylen == WEP_KEY_LEN_128)
+		key_flags |= STA_KEY_FLG_KEY_SIZE_MSK;
+
+	if (sta_id == ctx->bcast_sta_id)
+		key_flags |= STA_KEY_MULTICAST_MSK;
+
+	spin_lock_irqsave(&il->sta_lock, flags);
+
+	il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
+	il->stations[sta_id].keyinfo.keylen = keyconf->keylen;
+	il->stations[sta_id].keyinfo.keyidx = keyconf->keyidx;
+
+	memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen);
+
+	memcpy(&il->stations[sta_id].sta.key.key[3], keyconf->key,
+	       keyconf->keylen);
+
+	if ((il->stations[sta_id].sta.key.
+	     key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
+		il->stations[sta_id].sta.key.key_offset =
+		    il_get_free_ucode_key_idx(il);
+	/* else, we are overriding an existing key => no need to allocated room
+	 * in uCode. */
+
+	WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
+	     "no space for a new key");
+
+	il->stations[sta_id].sta.key.key_flags = key_flags;
+	il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+	il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+
+	memcpy(&sta_cmd, &il->stations[sta_id].sta,
+	       sizeof(struct il_addsta_cmd));
+	spin_unlock_irqrestore(&il->sta_lock, flags);
+
+	return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
+}
+
+static int
+il4965_set_ccmp_dynamic_key_info(struct il_priv *il,
+				 struct il_rxon_context *ctx,
+				 struct ieee80211_key_conf *keyconf, u8 sta_id)
+{
+	unsigned long flags;
+	__le16 key_flags = 0;
+	struct il_addsta_cmd sta_cmd;
+
+	lockdep_assert_held(&il->mutex);
+
+	key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
+	key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
+	key_flags &= ~STA_KEY_FLG_INVALID;
+
+	if (sta_id == ctx->bcast_sta_id)
+		key_flags |= STA_KEY_MULTICAST_MSK;
+
+	keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+
+	spin_lock_irqsave(&il->sta_lock, flags);
+	il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
+	il->stations[sta_id].keyinfo.keylen = keyconf->keylen;
+
+	memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen);
+
+	memcpy(il->stations[sta_id].sta.key.key, keyconf->key, keyconf->keylen);
+
+	if ((il->stations[sta_id].sta.key.
+	     key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
+		il->stations[sta_id].sta.key.key_offset =
+		    il_get_free_ucode_key_idx(il);
+	/* else, we are overriding an existing key => no need to allocated room
+	 * in uCode. */
+
+	WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
+	     "no space for a new key");
+
+	il->stations[sta_id].sta.key.key_flags = key_flags;
+	il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+	il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+
+	memcpy(&sta_cmd, &il->stations[sta_id].sta,
+	       sizeof(struct il_addsta_cmd));
+	spin_unlock_irqrestore(&il->sta_lock, flags);
+
+	return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
+}
+
+static int
+il4965_set_tkip_dynamic_key_info(struct il_priv *il,
+				 struct il_rxon_context *ctx,
+				 struct ieee80211_key_conf *keyconf, u8 sta_id)
+{
+	unsigned long flags;
+	int ret = 0;
+	__le16 key_flags = 0;
+
+	key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
+	key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
+	key_flags &= ~STA_KEY_FLG_INVALID;
+
+	if (sta_id == ctx->bcast_sta_id)
+		key_flags |= STA_KEY_MULTICAST_MSK;
+
+	keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+	keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+
+	spin_lock_irqsave(&il->sta_lock, flags);
+
+	il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
+	il->stations[sta_id].keyinfo.keylen = 16;
+
+	if ((il->stations[sta_id].sta.key.
+	     key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
+		il->stations[sta_id].sta.key.key_offset =
+		    il_get_free_ucode_key_idx(il);
+	/* else, we are overriding an existing key => no need to allocated room
+	 * in uCode. */
+
+	WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
+	     "no space for a new key");
+
+	il->stations[sta_id].sta.key.key_flags = key_flags;
+
+	/* This copy is acutally not needed: we get the key with each TX */
+	memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, 16);
+
+	memcpy(il->stations[sta_id].sta.key.key, keyconf->key, 16);
+
+	spin_unlock_irqrestore(&il->sta_lock, flags);
+
+	return ret;
+}
+
+void
+il4965_update_tkip_key(struct il_priv *il, struct il_rxon_context *ctx,
+		       struct ieee80211_key_conf *keyconf,
+		       struct ieee80211_sta *sta, u32 iv32, u16 * phase1key)
+{
+	u8 sta_id;
+	unsigned long flags;
+	int i;
+
+	if (il_scan_cancel(il)) {
+		/* cancel scan failed, just live w/ bad key and rely
+		   briefly on SW decryption */
+		return;
+	}
+
+	sta_id = il_sta_id_or_broadcast(il, ctx, sta);
+	if (sta_id == IL_INVALID_STATION)
+		return;
+
+	spin_lock_irqsave(&il->sta_lock, flags);
+
+	il->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32;
+
+	for (i = 0; i < 5; i++)
+		il->stations[sta_id].sta.key.tkip_rx_ttak[i] =
+		    cpu_to_le16(phase1key[i]);
+
+	il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+	il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+
+	il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC);
+
+	spin_unlock_irqrestore(&il->sta_lock, flags);
+
+}
+
+int
+il4965_remove_dynamic_key(struct il_priv *il, struct il_rxon_context *ctx,
+			  struct ieee80211_key_conf *keyconf, u8 sta_id)
+{
+	unsigned long flags;
+	u16 key_flags;
+	u8 keyidx;
+	struct il_addsta_cmd sta_cmd;
+
+	lockdep_assert_held(&il->mutex);
+
+	ctx->key_mapping_keys--;
+
+	spin_lock_irqsave(&il->sta_lock, flags);
+	key_flags = le16_to_cpu(il->stations[sta_id].sta.key.key_flags);
+	keyidx = (key_flags >> STA_KEY_FLG_KEYID_POS) & 0x3;
+
+	D_WEP("Remove dynamic key: idx=%d sta=%d\n", keyconf->keyidx, sta_id);
+
+	if (keyconf->keyidx != keyidx) {
+		/* We need to remove a key with idx different that the one
+		 * in the uCode. This means that the key we need to remove has
+		 * been replaced by another one with different idx.
+		 * Don't do anything and return ok
+		 */
+		spin_unlock_irqrestore(&il->sta_lock, flags);
+		return 0;
+	}
+
+	if (il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) {
+		IL_WARN("Removing wrong key %d 0x%x\n", keyconf->keyidx,
+			key_flags);
+		spin_unlock_irqrestore(&il->sta_lock, flags);
+		return 0;
+	}
+
+	if (!test_and_clear_bit
+	    (il->stations[sta_id].sta.key.key_offset, &il->ucode_key_table))
+		IL_ERR("idx %d not used in uCode key table.\n",
+		       il->stations[sta_id].sta.key.key_offset);
+	memset(&il->stations[sta_id].keyinfo, 0, sizeof(struct il_hw_key));
+	memset(&il->stations[sta_id].sta.key, 0, sizeof(struct il4965_keyinfo));
+	il->stations[sta_id].sta.key.key_flags =
+	    STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
+	il->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET;
+	il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+	il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+
+	if (il_is_rfkill(il)) {
+		D_WEP
+		    ("Not sending C_ADD_STA command because RFKILL enabled.\n");
+		spin_unlock_irqrestore(&il->sta_lock, flags);
+		return 0;
+	}
+	memcpy(&sta_cmd, &il->stations[sta_id].sta,
+	       sizeof(struct il_addsta_cmd));
+	spin_unlock_irqrestore(&il->sta_lock, flags);
+
+	return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
+}
+
+int
+il4965_set_dynamic_key(struct il_priv *il, struct il_rxon_context *ctx,
+		       struct ieee80211_key_conf *keyconf, u8 sta_id)
+{
+	int ret;
+
+	lockdep_assert_held(&il->mutex);
+
+	ctx->key_mapping_keys++;
+	keyconf->hw_key_idx = HW_KEY_DYNAMIC;
+
+	switch (keyconf->cipher) {
+	case WLAN_CIPHER_SUITE_CCMP:
+		ret =
+		    il4965_set_ccmp_dynamic_key_info(il, ctx, keyconf, sta_id);
+		break;
+	case WLAN_CIPHER_SUITE_TKIP:
+		ret =
+		    il4965_set_tkip_dynamic_key_info(il, ctx, keyconf, sta_id);
+		break;
+	case WLAN_CIPHER_SUITE_WEP40:
+	case WLAN_CIPHER_SUITE_WEP104:
+		ret = il4965_set_wep_dynamic_key_info(il, ctx, keyconf, sta_id);
+		break;
+	default:
+		IL_ERR("Unknown alg: %s cipher = %x\n", __func__,
+		       keyconf->cipher);
+		ret = -EINVAL;
+	}
+
+	D_WEP("Set dynamic key: cipher=%x len=%d idx=%d sta=%d ret=%d\n",
+	      keyconf->cipher, keyconf->keylen, keyconf->keyidx, sta_id, ret);
+
+	return ret;
+}
+
+/**
+ * il4965_alloc_bcast_station - add broadcast station into driver's station table.
+ *
+ * This adds the broadcast station into the driver's station table
+ * and marks it driver active, so that it will be restored to the
+ * device at the next best time.
+ */
+int
+il4965_alloc_bcast_station(struct il_priv *il, struct il_rxon_context *ctx)
+{
+	struct il_link_quality_cmd *link_cmd;
+	unsigned long flags;
+	u8 sta_id;
+
+	spin_lock_irqsave(&il->sta_lock, flags);
+	sta_id = il_prep_station(il, ctx, il_bcast_addr, false, NULL);
+	if (sta_id == IL_INVALID_STATION) {
+		IL_ERR("Unable to prepare broadcast station\n");
+		spin_unlock_irqrestore(&il->sta_lock, flags);
+
+		return -EINVAL;
+	}
+
+	il->stations[sta_id].used |= IL_STA_DRIVER_ACTIVE;
+	il->stations[sta_id].used |= IL_STA_BCAST;
+	spin_unlock_irqrestore(&il->sta_lock, flags);
+
+	link_cmd = il4965_sta_alloc_lq(il, sta_id);
+	if (!link_cmd) {
+		IL_ERR
+		    ("Unable to initialize rate scaling for bcast station.\n");
+		return -ENOMEM;
+	}
+
+	spin_lock_irqsave(&il->sta_lock, flags);
+	il->stations[sta_id].lq = link_cmd;
+	spin_unlock_irqrestore(&il->sta_lock, flags);
+
+	return 0;
+}
+
+/**
+ * il4965_update_bcast_station - update broadcast station's LQ command
+ *
+ * Only used by iwl4965. Placed here to have all bcast station management
+ * code together.
+ */
+static int
+il4965_update_bcast_station(struct il_priv *il, struct il_rxon_context *ctx)
+{
+	unsigned long flags;
+	struct il_link_quality_cmd *link_cmd;
+	u8 sta_id = ctx->bcast_sta_id;
+
+	link_cmd = il4965_sta_alloc_lq(il, sta_id);
+	if (!link_cmd) {
+		IL_ERR("Unable to initialize rate scaling for bcast sta.\n");
+		return -ENOMEM;
+	}
+
+	spin_lock_irqsave(&il->sta_lock, flags);
+	if (il->stations[sta_id].lq)
+		kfree(il->stations[sta_id].lq);
+	else
+		D_INFO("Bcast sta rate scaling has not been initialized.\n");
+	il->stations[sta_id].lq = link_cmd;
+	spin_unlock_irqrestore(&il->sta_lock, flags);
+
+	return 0;
+}
+
+int
+il4965_update_bcast_stations(struct il_priv *il)
+{
+	return il4965_update_bcast_station(il, &il->ctx);
+}
+
+/**
+ * il4965_sta_tx_modify_enable_tid - Enable Tx for this TID in station table
+ */
+int
+il4965_sta_tx_modify_enable_tid(struct il_priv *il, int sta_id, int tid)
+{
+	unsigned long flags;
+	struct il_addsta_cmd sta_cmd;
+
+	lockdep_assert_held(&il->mutex);
+
+	/* Remove "disable" flag, to enable Tx for this TID */
+	spin_lock_irqsave(&il->sta_lock, flags);
+	il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
+	il->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
+	il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+	memcpy(&sta_cmd, &il->stations[sta_id].sta,
+	       sizeof(struct il_addsta_cmd));
+	spin_unlock_irqrestore(&il->sta_lock, flags);
+
+	return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
+}
+
+int
+il4965_sta_rx_agg_start(struct il_priv *il, struct ieee80211_sta *sta, int tid,
+			u16 ssn)
+{
+	unsigned long flags;
+	int sta_id;
+	struct il_addsta_cmd sta_cmd;
+
+	lockdep_assert_held(&il->mutex);
+
+	sta_id = il_sta_id(sta);
+	if (sta_id == IL_INVALID_STATION)
+		return -ENXIO;
+
+	spin_lock_irqsave(&il->sta_lock, flags);
+	il->stations[sta_id].sta.station_flags_msk = 0;
+	il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
+	il->stations[sta_id].sta.add_immediate_ba_tid = (u8) tid;
+	il->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
+	il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+	memcpy(&sta_cmd, &il->stations[sta_id].sta,
+	       sizeof(struct il_addsta_cmd));
+	spin_unlock_irqrestore(&il->sta_lock, flags);
+
+	return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
+}
+
+int
+il4965_sta_rx_agg_stop(struct il_priv *il, struct ieee80211_sta *sta, int tid)
+{
+	unsigned long flags;
+	int sta_id;
+	struct il_addsta_cmd sta_cmd;
+
+	lockdep_assert_held(&il->mutex);
+
+	sta_id = il_sta_id(sta);
+	if (sta_id == IL_INVALID_STATION) {
+		IL_ERR("Invalid station for AGG tid %d\n", tid);
+		return -ENXIO;
+	}
+
+	spin_lock_irqsave(&il->sta_lock, flags);
+	il->stations[sta_id].sta.station_flags_msk = 0;
+	il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
+	il->stations[sta_id].sta.remove_immediate_ba_tid = (u8) tid;
+	il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+	memcpy(&sta_cmd, &il->stations[sta_id].sta,
+	       sizeof(struct il_addsta_cmd));
+	spin_unlock_irqrestore(&il->sta_lock, flags);
+
+	return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
+}
+
+void
+il4965_sta_modify_sleep_tx_count(struct il_priv *il, int sta_id, int cnt)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&il->sta_lock, flags);
+	il->stations[sta_id].sta.station_flags |= STA_FLG_PWR_SAVE_MSK;
+	il->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
+	il->stations[sta_id].sta.sta.modify_mask =
+	    STA_MODIFY_SLEEP_TX_COUNT_MSK;
+	il->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt);
+	il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+	il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC);
+	spin_unlock_irqrestore(&il->sta_lock, flags);
+
+}
+
+void
+il4965_update_chain_flags(struct il_priv *il)
+{
+	if (il->cfg->ops->hcmd->set_rxon_chain) {
+		il->cfg->ops->hcmd->set_rxon_chain(il, &il->ctx);
+		if (il->ctx.active.rx_chain != il->ctx.staging.rx_chain)
+			il_commit_rxon(il, &il->ctx);
+	}
+}
+
+static void
+il4965_clear_free_frames(struct il_priv *il)
+{
+	struct list_head *element;
+
+	D_INFO("%d frames on pre-allocated heap on clear.\n", il->frames_count);
+
+	while (!list_empty(&il->free_frames)) {
+		element = il->free_frames.next;
+		list_del(element);
+		kfree(list_entry(element, struct il_frame, list));
+		il->frames_count--;
+	}
+
+	if (il->frames_count) {
+		IL_WARN("%d frames still in use.  Did we lose one?\n",
+			il->frames_count);
+		il->frames_count = 0;
+	}
+}
+
+static struct il_frame *
+il4965_get_free_frame(struct il_priv *il)
+{
+	struct il_frame *frame;
+	struct list_head *element;
+	if (list_empty(&il->free_frames)) {
+		frame = kzalloc(sizeof(*frame), GFP_KERNEL);
+		if (!frame) {
+			IL_ERR("Could not allocate frame!\n");
+			return NULL;
+		}
+
+		il->frames_count++;
+		return frame;
+	}
+
+	element = il->free_frames.next;
+	list_del(element);
+	return list_entry(element, struct il_frame, list);
+}
+
+static void
+il4965_free_frame(struct il_priv *il, struct il_frame *frame)
+{
+	memset(frame, 0, sizeof(*frame));
+	list_add(&frame->list, &il->free_frames);
+}
+
+static u32
+il4965_fill_beacon_frame(struct il_priv *il, struct ieee80211_hdr *hdr,
+			 int left)
+{
+	lockdep_assert_held(&il->mutex);
+
+	if (!il->beacon_skb)
+		return 0;
+
+	if (il->beacon_skb->len > left)
+		return 0;
+
+	memcpy(hdr, il->beacon_skb->data, il->beacon_skb->len);
+
+	return il->beacon_skb->len;
+}
+
+/* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */
+static void
+il4965_set_beacon_tim(struct il_priv *il,
+		      struct il_tx_beacon_cmd *tx_beacon_cmd, u8 * beacon,
+		      u32 frame_size)
+{
+	u16 tim_idx;
+	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon;
+
+	/*
+	 * The idx is relative to frame start but we start looking at the
+	 * variable-length part of the beacon.
+	 */
+	tim_idx = mgmt->u.beacon.variable - beacon;
+
+	/* Parse variable-length elements of beacon to find WLAN_EID_TIM */
+	while ((tim_idx < (frame_size - 2)) &&
+	       (beacon[tim_idx] != WLAN_EID_TIM))
+		tim_idx += beacon[tim_idx + 1] + 2;
+
+	/* If TIM field was found, set variables */
+	if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
+		tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx);
+		tx_beacon_cmd->tim_size = beacon[tim_idx + 1];
+	} else
+		IL_WARN("Unable to find TIM Element in beacon\n");
+}
+
+static unsigned int
+il4965_hw_get_beacon_cmd(struct il_priv *il, struct il_frame *frame)
+{
+	struct il_tx_beacon_cmd *tx_beacon_cmd;
+	u32 frame_size;
+	u32 rate_flags;
+	u32 rate;
+	/*
+	 * We have to set up the TX command, the TX Beacon command, and the
+	 * beacon contents.
+	 */
+
+	lockdep_assert_held(&il->mutex);
+
+	if (!il->beacon_ctx) {
+		IL_ERR("trying to build beacon w/o beacon context!\n");
+		return 0;
+	}
+
+	/* Initialize memory */
+	tx_beacon_cmd = &frame->u.beacon;
+	memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
+
+	/* Set up TX beacon contents */
+	frame_size =
+	    il4965_fill_beacon_frame(il, tx_beacon_cmd->frame,
+				     sizeof(frame->u) - sizeof(*tx_beacon_cmd));
+	if (WARN_ON_ONCE(frame_size > MAX_MPDU_SIZE))
+		return 0;
+	if (!frame_size)
+		return 0;
+
+	/* Set up TX command fields */
+	tx_beacon_cmd->tx.len = cpu_to_le16((u16) frame_size);
+	tx_beacon_cmd->tx.sta_id = il->beacon_ctx->bcast_sta_id;
+	tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+	tx_beacon_cmd->tx.tx_flags =
+	    TX_CMD_FLG_SEQ_CTL_MSK | TX_CMD_FLG_TSF_MSK |
+	    TX_CMD_FLG_STA_RATE_MSK;
+
+	/* Set up TX beacon command fields */
+	il4965_set_beacon_tim(il, tx_beacon_cmd, (u8 *) tx_beacon_cmd->frame,
+			      frame_size);
+
+	/* Set up packet rate and flags */
+	rate = il_get_lowest_plcp(il, il->beacon_ctx);
+	il4965_toggle_tx_ant(il, &il->mgmt_tx_ant, il->hw_params.valid_tx_ant);
+	rate_flags = BIT(il->mgmt_tx_ant) << RATE_MCS_ANT_POS;
+	if ((rate >= IL_FIRST_CCK_RATE) && (rate <= IL_LAST_CCK_RATE))
+		rate_flags |= RATE_MCS_CCK_MSK;
+	tx_beacon_cmd->tx.rate_n_flags = cpu_to_le32(rate | rate_flags);
+
+	return sizeof(*tx_beacon_cmd) + frame_size;
+}
+
+int
+il4965_send_beacon_cmd(struct il_priv *il)
+{
+	struct il_frame *frame;
+	unsigned int frame_size;
+	int rc;
+
+	frame = il4965_get_free_frame(il);
+	if (!frame) {
+		IL_ERR("Could not obtain free frame buffer for beacon "
+		       "command.\n");
+		return -ENOMEM;
+	}
+
+	frame_size = il4965_hw_get_beacon_cmd(il, frame);
+	if (!frame_size) {
+		IL_ERR("Error configuring the beacon command\n");
+		il4965_free_frame(il, frame);
+		return -EINVAL;
+	}
+
+	rc = il_send_cmd_pdu(il, C_TX_BEACON, frame_size, &frame->u.cmd[0]);
+
+	il4965_free_frame(il, frame);
+
+	return rc;
+}
+
+static inline dma_addr_t
+il4965_tfd_tb_get_addr(struct il_tfd *tfd, u8 idx)
+{
+	struct il_tfd_tb *tb = &tfd->tbs[idx];
+
+	dma_addr_t addr = get_unaligned_le32(&tb->lo);
+	if (sizeof(dma_addr_t) > sizeof(u32))
+		addr |=
+		    ((dma_addr_t) (le16_to_cpu(tb->hi_n_len) & 0xF) << 16) <<
+		    16;
+
+	return addr;
+}
+
+static inline u16
+il4965_tfd_tb_get_len(struct il_tfd *tfd, u8 idx)
+{
+	struct il_tfd_tb *tb = &tfd->tbs[idx];
+
+	return le16_to_cpu(tb->hi_n_len) >> 4;
+}
+
+static inline void
+il4965_tfd_set_tb(struct il_tfd *tfd, u8 idx, dma_addr_t addr, u16 len)
+{
+	struct il_tfd_tb *tb = &tfd->tbs[idx];
+	u16 hi_n_len = len << 4;
+
+	put_unaligned_le32(addr, &tb->lo);
+	if (sizeof(dma_addr_t) > sizeof(u32))
+		hi_n_len |= ((addr >> 16) >> 16) & 0xF;
+
+	tb->hi_n_len = cpu_to_le16(hi_n_len);
+
+	tfd->num_tbs = idx + 1;
+}
+
+static inline u8
+il4965_tfd_get_num_tbs(struct il_tfd *tfd)
+{
+	return tfd->num_tbs & 0x1f;
+}
+
+/**
+ * il4965_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
+ * @il - driver ilate data
+ * @txq - tx queue
+ *
+ * Does NOT advance any TFD circular buffer read/write idxes
+ * Does NOT free the TFD itself (which is within circular buffer)
+ */
+void
+il4965_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq)
+{
+	struct il_tfd *tfd_tmp = (struct il_tfd *)txq->tfds;
+	struct il_tfd *tfd;
+	struct pci_dev *dev = il->pci_dev;
+	int idx = txq->q.read_ptr;
+	int i;
+	int num_tbs;
+
+	tfd = &tfd_tmp[idx];
+
+	/* Sanity check on number of chunks */
+	num_tbs = il4965_tfd_get_num_tbs(tfd);
+
+	if (num_tbs >= IL_NUM_OF_TBS) {
+		IL_ERR("Too many chunks: %i\n", num_tbs);
+		/* @todo issue fatal error, it is quite serious situation */
+		return;
+	}
+
+	/* Unmap tx_cmd */
+	if (num_tbs)
+		pci_unmap_single(dev, dma_unmap_addr(&txq->meta[idx], mapping),
+				 dma_unmap_len(&txq->meta[idx], len),
+				 PCI_DMA_BIDIRECTIONAL);
+
+	/* Unmap chunks, if any. */
+	for (i = 1; i < num_tbs; i++)
+		pci_unmap_single(dev, il4965_tfd_tb_get_addr(tfd, i),
+				 il4965_tfd_tb_get_len(tfd, i),
+				 PCI_DMA_TODEVICE);
+
+	/* free SKB */
+	if (txq->txb) {
+		struct sk_buff *skb;
+
+		skb = txq->txb[txq->q.read_ptr].skb;
+
+		/* can be called from irqs-disabled context */
+		if (skb) {
+			dev_kfree_skb_any(skb);
+			txq->txb[txq->q.read_ptr].skb = NULL;
+		}
+	}
+}
+
+int
+il4965_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
+				dma_addr_t addr, u16 len, u8 reset, u8 pad)
+{
+	struct il_queue *q;
+	struct il_tfd *tfd, *tfd_tmp;
+	u32 num_tbs;
+
+	q = &txq->q;
+	tfd_tmp = (struct il_tfd *)txq->tfds;
+	tfd = &tfd_tmp[q->write_ptr];
+
+	if (reset)
+		memset(tfd, 0, sizeof(*tfd));
+
+	num_tbs = il4965_tfd_get_num_tbs(tfd);
+
+	/* Each TFD can point to a maximum 20 Tx buffers */
+	if (num_tbs >= IL_NUM_OF_TBS) {
+		IL_ERR("Error can not send more than %d chunks\n",
+		       IL_NUM_OF_TBS);
+		return -EINVAL;
+	}
+
+	BUG_ON(addr & ~DMA_BIT_MASK(36));
+	if (unlikely(addr & ~IL_TX_DMA_MASK))
+		IL_ERR("Unaligned address = %llx\n", (unsigned long long)addr);
+
+	il4965_tfd_set_tb(tfd, num_tbs, addr, len);
+
+	return 0;
+}
+
+/*
+ * Tell nic where to find circular buffer of Tx Frame Descriptors for
+ * given Tx queue, and enable the DMA channel used for that queue.
+ *
+ * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
+ * channels supported in hardware.
+ */
+int
+il4965_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq)
+{
+	int txq_id = txq->q.id;
+
+	/* Circular buffer (TFD queue in DRAM) physical base address */
+	il_wr(il, FH49_MEM_CBBC_QUEUE(txq_id), txq->q.dma_addr >> 8);
+
+	return 0;
+}
+
+/******************************************************************************
+ *
+ * Generic RX handler implementations
+ *
+ ******************************************************************************/
+static void
+il4965_hdl_alive(struct il_priv *il, struct il_rx_buf *rxb)
+{
+	struct il_rx_pkt *pkt = rxb_addr(rxb);
+	struct il_alive_resp *palive;
+	struct delayed_work *pwork;
+
+	palive = &pkt->u.alive_frame;
+
+	D_INFO("Alive ucode status 0x%08X revision " "0x%01X 0x%01X\n",
+	       palive->is_valid, palive->ver_type, palive->ver_subtype);
+
+	if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
+		D_INFO("Initialization Alive received.\n");
+		memcpy(&il->card_alive_init, &pkt->u.alive_frame,
+		       sizeof(struct il_init_alive_resp));
+		pwork = &il->init_alive_start;
+	} else {
+		D_INFO("Runtime Alive received.\n");
+		memcpy(&il->card_alive, &pkt->u.alive_frame,
+		       sizeof(struct il_alive_resp));
+		pwork = &il->alive_start;
+	}
+
+	/* We delay the ALIVE response by 5ms to
+	 * give the HW RF Kill time to activate... */
+	if (palive->is_valid == UCODE_VALID_OK)
+		queue_delayed_work(il->workqueue, pwork, msecs_to_jiffies(5));
+	else
+		IL_WARN("uCode did not respond OK.\n");
+}
+
+/**
+ * il4965_bg_stats_periodic - Timer callback to queue stats
+ *
+ * This callback is provided in order to send a stats request.
+ *
+ * This timer function is continually reset to execute within
+ * REG_RECALIB_PERIOD seconds since the last N_STATS
+ * was received.  We need to ensure we receive the stats in order
+ * to update the temperature used for calibrating the TXPOWER.
+ */
+static void
+il4965_bg_stats_periodic(unsigned long data)
+{
+	struct il_priv *il = (struct il_priv *)data;
+
+	if (test_bit(S_EXIT_PENDING, &il->status))
+		return;
+
+	/* dont send host command if rf-kill is on */
+	if (!il_is_ready_rf(il))
+		return;
+
+	il_send_stats_request(il, CMD_ASYNC, false);
+}
+
+static void
+il4965_hdl_beacon(struct il_priv *il, struct il_rx_buf *rxb)
+{
+	struct il_rx_pkt *pkt = rxb_addr(rxb);
+	struct il4965_beacon_notif *beacon =
+	    (struct il4965_beacon_notif *)pkt->u.raw;
+#ifdef CONFIG_IWLEGACY_DEBUG
+	u8 rate = il4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
+
+	D_RX("beacon status %x retries %d iss %d tsf:0x%.8x%.8x rate %d\n",
+	     le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
+	     beacon->beacon_notify_hdr.failure_frame,
+	     le32_to_cpu(beacon->ibss_mgr_status),
+	     le32_to_cpu(beacon->high_tsf), le32_to_cpu(beacon->low_tsf), rate);
+#endif
+	il->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
+}
+
+static void
+il4965_perform_ct_kill_task(struct il_priv *il)
+{
+	unsigned long flags;
+
+	D_POWER("Stop all queues\n");
+
+	if (il->mac80211_registered)
+		ieee80211_stop_queues(il->hw);
+
+	_il_wr(il, CSR_UCODE_DRV_GP1_SET,
+	       CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
+	_il_rd(il, CSR_UCODE_DRV_GP1);
+
+	spin_lock_irqsave(&il->reg_lock, flags);
+	if (!_il_grab_nic_access(il))
+		_il_release_nic_access(il);
+	spin_unlock_irqrestore(&il->reg_lock, flags);
+}
+
+/* Handle notification from uCode that card's power state is changing
+ * due to software, hardware, or critical temperature RFKILL */
+static void
+il4965_hdl_card_state(struct il_priv *il, struct il_rx_buf *rxb)
+{
+	struct il_rx_pkt *pkt = rxb_addr(rxb);
+	u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
+	unsigned long status = il->status;
+
+	D_RF_KILL("Card state received: HW:%s SW:%s CT:%s\n",
+		  (flags & HW_CARD_DISABLED) ? "Kill" : "On",
+		  (flags & SW_CARD_DISABLED) ? "Kill" : "On",
+		  (flags & CT_CARD_DISABLED) ? "Reached" : "Not reached");
+
+	if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED | CT_CARD_DISABLED)) {
+
+		_il_wr(il, CSR_UCODE_DRV_GP1_SET,
+		       CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+
+		il_wr(il, HBUS_TARG_MBX_C, HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
+
+		if (!(flags & RXON_CARD_DISABLED)) {
+			_il_wr(il, CSR_UCODE_DRV_GP1_CLR,
+			       CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+			il_wr(il, HBUS_TARG_MBX_C,
+			      HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
+		}
+	}
+
+	if (flags & CT_CARD_DISABLED)
+		il4965_perform_ct_kill_task(il);
+
+	if (flags & HW_CARD_DISABLED)
+		set_bit(S_RF_KILL_HW, &il->status);
+	else
+		clear_bit(S_RF_KILL_HW, &il->status);
+
+	if (!(flags & RXON_CARD_DISABLED))
+		il_scan_cancel(il);
+
+	if ((test_bit(S_RF_KILL_HW, &status) !=
+	     test_bit(S_RF_KILL_HW, &il->status)))
+		wiphy_rfkill_set_hw_state(il->hw->wiphy,
+					  test_bit(S_RF_KILL_HW, &il->status));
+	else
+		wake_up(&il->wait_command_queue);
+}
+
+/**
+ * il4965_setup_handlers - Initialize Rx handler callbacks
+ *
+ * Setup the RX handlers for each of the reply types sent from the uCode
+ * to the host.
+ *
+ * This function chains into the hardware specific files for them to setup
+ * any hardware specific handlers as well.
+ */
+static void
+il4965_setup_handlers(struct il_priv *il)
+{
+	il->handlers[N_ALIVE] = il4965_hdl_alive;
+	il->handlers[N_ERROR] = il_hdl_error;
+	il->handlers[N_CHANNEL_SWITCH] = il_hdl_csa;
+	il->handlers[N_SPECTRUM_MEASUREMENT] = il_hdl_spectrum_measurement;
+	il->handlers[N_PM_SLEEP] = il_hdl_pm_sleep;
+	il->handlers[N_PM_DEBUG_STATS] = il_hdl_pm_debug_stats;
+	il->handlers[N_BEACON] = il4965_hdl_beacon;
+
+	/*
+	 * The same handler is used for both the REPLY to a discrete
+	 * stats request from the host as well as for the periodic
+	 * stats notifications (after received beacons) from the uCode.
+	 */
+	il->handlers[C_STATS] = il4965_hdl_c_stats;
+	il->handlers[N_STATS] = il4965_hdl_stats;
+
+	il_setup_rx_scan_handlers(il);
+
+	/* status change handler */
+	il->handlers[N_CARD_STATE] = il4965_hdl_card_state;
+
+	il->handlers[N_MISSED_BEACONS] = il4965_hdl_missed_beacon;
+	/* Rx handlers */
+	il->handlers[N_RX_PHY] = il4965_hdl_rx_phy;
+	il->handlers[N_RX_MPDU] = il4965_hdl_rx;
+	/* block ack */
+	il->handlers[N_COMPRESSED_BA] = il4965_hdl_compressed_ba;
+	/* Set up hardware specific Rx handlers */
+	il->cfg->ops->lib->handler_setup(il);
+}
+
+/**
+ * il4965_rx_handle - Main entry function for receiving responses from uCode
+ *
+ * Uses the il->handlers callback function array to invoke
+ * the appropriate handlers, including command responses,
+ * frame-received notifications, and other notifications.
+ */
+void
+il4965_rx_handle(struct il_priv *il)
+{
+	struct il_rx_buf *rxb;
+	struct il_rx_pkt *pkt;
+	struct il_rx_queue *rxq = &il->rxq;
+	u32 r, i;
+	int reclaim;
+	unsigned long flags;
+	u8 fill_rx = 0;
+	u32 count = 8;
+	int total_empty;
+
+	/* uCode's read idx (stored in shared DRAM) indicates the last Rx
+	 * buffer that the driver may process (last buffer filled by ucode). */
+	r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
+	i = rxq->read;
+
+	/* Rx interrupt, but nothing sent from uCode */
+	if (i == r)
+		D_RX("r = %d, i = %d\n", r, i);
+
+	/* calculate total frames need to be restock after handling RX */
+	total_empty = r - rxq->write_actual;
+	if (total_empty < 0)
+		total_empty += RX_QUEUE_SIZE;
+
+	if (total_empty > (RX_QUEUE_SIZE / 2))
+		fill_rx = 1;
+
+	while (i != r) {
+		int len;
+
+		rxb = rxq->queue[i];
+
+		/* If an RXB doesn't have a Rx queue slot associated with it,
+		 * then a bug has been introduced in the queue refilling
+		 * routines -- catch it here */
+		BUG_ON(rxb == NULL);
+
+		rxq->queue[i] = NULL;
+
+		pci_unmap_page(il->pci_dev, rxb->page_dma,
+			       PAGE_SIZE << il->hw_params.rx_page_order,
+			       PCI_DMA_FROMDEVICE);
+		pkt = rxb_addr(rxb);
+
+		len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
+		len += sizeof(u32);	/* account for status word */
+
+		/* Reclaim a command buffer only if this packet is a response
+		 *   to a (driver-originated) command.
+		 * If the packet (e.g. Rx frame) originated from uCode,
+		 *   there is no command buffer to reclaim.
+		 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
+		 *   but apparently a few don't get set; catch them here. */
+		reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
+		    (pkt->hdr.cmd != N_RX_PHY) && (pkt->hdr.cmd != N_RX) &&
+		    (pkt->hdr.cmd != N_RX_MPDU) &&
+		    (pkt->hdr.cmd != N_COMPRESSED_BA) &&
+		    (pkt->hdr.cmd != N_STATS) && (pkt->hdr.cmd != C_TX);
+
+		/* Based on type of command response or notification,
+		 *   handle those that need handling via function in
+		 *   handlers table.  See il4965_setup_handlers() */
+		if (il->handlers[pkt->hdr.cmd]) {
+			D_RX("r = %d, i = %d, %s, 0x%02x\n", r, i,
+			     il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
+			il->isr_stats.handlers[pkt->hdr.cmd]++;
+			il->handlers[pkt->hdr.cmd] (il, rxb);
+		} else {
+			/* No handling needed */
+			D_RX("r %d i %d No handler needed for %s, 0x%02x\n", r,
+			     i, il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
+		}
+
+		/*
+		 * XXX: After here, we should always check rxb->page
+		 * against NULL before touching it or its virtual
+		 * memory (pkt). Because some handler might have
+		 * already taken or freed the pages.
+		 */
+
+		if (reclaim) {
+			/* Invoke any callbacks, transfer the buffer to caller,
+			 * and fire off the (possibly) blocking il_send_cmd()
+			 * as we reclaim the driver command queue */
+			if (rxb->page)
+				il_tx_cmd_complete(il, rxb);
+			else
+				IL_WARN("Claim null rxb?\n");
+		}
+
+		/* Reuse the page if possible. For notification packets and
+		 * SKBs that fail to Rx correctly, add them back into the
+		 * rx_free list for reuse later. */
+		spin_lock_irqsave(&rxq->lock, flags);
+		if (rxb->page != NULL) {
+			rxb->page_dma =
+			    pci_map_page(il->pci_dev, rxb->page, 0,
+					 PAGE_SIZE << il->hw_params.
+					 rx_page_order, PCI_DMA_FROMDEVICE);
+			list_add_tail(&rxb->list, &rxq->rx_free);
+			rxq->free_count++;
+		} else
+			list_add_tail(&rxb->list, &rxq->rx_used);
+
+		spin_unlock_irqrestore(&rxq->lock, flags);
+
+		i = (i + 1) & RX_QUEUE_MASK;
+		/* If there are a lot of unused frames,
+		 * restock the Rx queue so ucode wont assert. */
+		if (fill_rx) {
+			count++;
+			if (count >= 8) {
+				rxq->read = i;
+				il4965_rx_replenish_now(il);
+				count = 0;
+			}
+		}
+	}
+
+	/* Backtrack one entry */
+	rxq->read = i;
+	if (fill_rx)
+		il4965_rx_replenish_now(il);
+	else
+		il4965_rx_queue_restock(il);
+}
+
+/* call this function to flush any scheduled tasklet */
+static inline void
+il4965_synchronize_irq(struct il_priv *il)
+{
+	/* wait to make sure we flush pending tasklet */
+	synchronize_irq(il->pci_dev->irq);
+	tasklet_kill(&il->irq_tasklet);
+}
+
+static void
+il4965_irq_tasklet(struct il_priv *il)
+{
+	u32 inta, handled = 0;
+	u32 inta_fh;
+	unsigned long flags;
+	u32 i;
+#ifdef CONFIG_IWLEGACY_DEBUG
+	u32 inta_mask;
+#endif
+
+	spin_lock_irqsave(&il->lock, flags);
+
+	/* Ack/clear/reset pending uCode interrupts.
+	 * Note:  Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
+	 *  and will clear only when CSR_FH_INT_STATUS gets cleared. */
+	inta = _il_rd(il, CSR_INT);
+	_il_wr(il, CSR_INT, inta);
+
+	/* Ack/clear/reset pending flow-handler (DMA) interrupts.
+	 * Any new interrupts that happen after this, either while we're
+	 * in this tasklet, or later, will show up in next ISR/tasklet. */
+	inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
+	_il_wr(il, CSR_FH_INT_STATUS, inta_fh);
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+	if (il_get_debug_level(il) & IL_DL_ISR) {
+		/* just for debug */
+		inta_mask = _il_rd(il, CSR_INT_MASK);
+		D_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta,
+		      inta_mask, inta_fh);
+	}
+#endif
+
+	spin_unlock_irqrestore(&il->lock, flags);
+
+	/* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
+	 * atomic, make sure that inta covers all the interrupts that
+	 * we've discovered, even if FH interrupt came in just after
+	 * reading CSR_INT. */
+	if (inta_fh & CSR49_FH_INT_RX_MASK)
+		inta |= CSR_INT_BIT_FH_RX;
+	if (inta_fh & CSR49_FH_INT_TX_MASK)
+		inta |= CSR_INT_BIT_FH_TX;
+
+	/* Now service all interrupt bits discovered above. */
+	if (inta & CSR_INT_BIT_HW_ERR) {
+		IL_ERR("Hardware error detected.  Restarting.\n");
+
+		/* Tell the device to stop sending interrupts */
+		il_disable_interrupts(il);
+
+		il->isr_stats.hw++;
+		il_irq_handle_error(il);
+
+		handled |= CSR_INT_BIT_HW_ERR;
+
+		return;
+	}
+#ifdef CONFIG_IWLEGACY_DEBUG
+	if (il_get_debug_level(il) & (IL_DL_ISR)) {
+		/* NIC fires this, but we don't use it, redundant with WAKEUP */
+		if (inta & CSR_INT_BIT_SCD) {
+			D_ISR("Scheduler finished to transmit "
+			      "the frame/frames.\n");
+			il->isr_stats.sch++;
+		}
+
+		/* Alive notification via Rx interrupt will do the real work */
+		if (inta & CSR_INT_BIT_ALIVE) {
+			D_ISR("Alive interrupt\n");
+			il->isr_stats.alive++;
+		}
+	}
+#endif
+	/* Safely ignore these bits for debug checks below */
+	inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
+
+	/* HW RF KILL switch toggled */
+	if (inta & CSR_INT_BIT_RF_KILL) {
+		int hw_rf_kill = 0;
+		if (!
+		    (_il_rd(il, CSR_GP_CNTRL) &
+		     CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
+			hw_rf_kill = 1;
+
+		IL_WARN("RF_KILL bit toggled to %s.\n",
+			hw_rf_kill ? "disable radio" : "enable radio");
+
+		il->isr_stats.rfkill++;
+
+		/* driver only loads ucode once setting the interface up.
+		 * the driver allows loading the ucode even if the radio
+		 * is killed. Hence update the killswitch state here. The
+		 * rfkill handler will care about restarting if needed.
+		 */
+		if (!test_bit(S_ALIVE, &il->status)) {
+			if (hw_rf_kill)
+				set_bit(S_RF_KILL_HW, &il->status);
+			else
+				clear_bit(S_RF_KILL_HW, &il->status);
+			wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill);
+		}
+
+		handled |= CSR_INT_BIT_RF_KILL;
+	}
+
+	/* Chip got too hot and stopped itself */
+	if (inta & CSR_INT_BIT_CT_KILL) {
+		IL_ERR("Microcode CT kill error detected.\n");
+		il->isr_stats.ctkill++;
+		handled |= CSR_INT_BIT_CT_KILL;
+	}
+
+	/* Error detected by uCode */
+	if (inta & CSR_INT_BIT_SW_ERR) {
+		IL_ERR("Microcode SW error detected. " " Restarting 0x%X.\n",
+		       inta);
+		il->isr_stats.sw++;
+		il_irq_handle_error(il);
+		handled |= CSR_INT_BIT_SW_ERR;
+	}
+
+	/*
+	 * uCode wakes up after power-down sleep.
+	 * Tell device about any new tx or host commands enqueued,
+	 * and about any Rx buffers made available while asleep.
+	 */
+	if (inta & CSR_INT_BIT_WAKEUP) {
+		D_ISR("Wakeup interrupt\n");
+		il_rx_queue_update_write_ptr(il, &il->rxq);
+		for (i = 0; i < il->hw_params.max_txq_num; i++)
+			il_txq_update_write_ptr(il, &il->txq[i]);
+		il->isr_stats.wakeup++;
+		handled |= CSR_INT_BIT_WAKEUP;
+	}
+
+	/* All uCode command responses, including Tx command responses,
+	 * Rx "responses" (frame-received notification), and other
+	 * notifications from uCode come through here*/
+	if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
+		il4965_rx_handle(il);
+		il->isr_stats.rx++;
+		handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
+	}
+
+	/* This "Tx" DMA channel is used only for loading uCode */
+	if (inta & CSR_INT_BIT_FH_TX) {
+		D_ISR("uCode load interrupt\n");
+		il->isr_stats.tx++;
+		handled |= CSR_INT_BIT_FH_TX;
+		/* Wake up uCode load routine, now that load is complete */
+		il->ucode_write_complete = 1;
+		wake_up(&il->wait_command_queue);
+	}
+
+	if (inta & ~handled) {
+		IL_ERR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
+		il->isr_stats.unhandled++;
+	}
+
+	if (inta & ~(il->inta_mask)) {
+		IL_WARN("Disabled INTA bits 0x%08x were pending\n",
+			inta & ~il->inta_mask);
+		IL_WARN("   with FH49_INT = 0x%08x\n", inta_fh);
+	}
+
+	/* Re-enable all interrupts */
+	/* only Re-enable if disabled by irq */
+	if (test_bit(S_INT_ENABLED, &il->status))
+		il_enable_interrupts(il);
+	/* Re-enable RF_KILL if it occurred */
+	else if (handled & CSR_INT_BIT_RF_KILL)
+		il_enable_rfkill_int(il);
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+	if (il_get_debug_level(il) & (IL_DL_ISR)) {
+		inta = _il_rd(il, CSR_INT);
+		inta_mask = _il_rd(il, CSR_INT_MASK);
+		inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
+		D_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
+		      "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
+	}
+#endif
+}
+
+/*****************************************************************************
+ *
+ * sysfs attributes
+ *
+ *****************************************************************************/
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+
+/*
+ * The following adds a new attribute to the sysfs representation
+ * of this device driver (i.e. a new file in /sys/class/net/wlan0/device/)
+ * used for controlling the debug level.
+ *
+ * See the level definitions in iwl for details.
+ *
+ * The debug_level being managed using sysfs below is a per device debug
+ * level that is used instead of the global debug level if it (the per
+ * device debug level) is set.
+ */
+static ssize_t
+il4965_show_debug_level(struct device *d, struct device_attribute *attr,
+			char *buf)
+{
+	struct il_priv *il = dev_get_drvdata(d);
+	return sprintf(buf, "0x%08X\n", il_get_debug_level(il));
+}
+
+static ssize_t
+il4965_store_debug_level(struct device *d, struct device_attribute *attr,
+			 const char *buf, size_t count)
+{
+	struct il_priv *il = dev_get_drvdata(d);
+	unsigned long val;
+	int ret;
+
+	ret = strict_strtoul(buf, 0, &val);
+	if (ret)
+		IL_ERR("%s is not in hex or decimal form.\n", buf);
+	else {
+		il->debug_level = val;
+		if (il_alloc_traffic_mem(il))
+			IL_ERR("Not enough memory to generate traffic log\n");
+	}
+	return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO, il4965_show_debug_level,
+		   il4965_store_debug_level);
+
+#endif /* CONFIG_IWLEGACY_DEBUG */
+
+static ssize_t
+il4965_show_temperature(struct device *d, struct device_attribute *attr,
+			char *buf)
+{
+	struct il_priv *il = dev_get_drvdata(d);
+
+	if (!il_is_alive(il))
+		return -EAGAIN;
+
+	return sprintf(buf, "%d\n", il->temperature);
+}
+
+static DEVICE_ATTR(temperature, S_IRUGO, il4965_show_temperature, NULL);
+
+static ssize_t
+il4965_show_tx_power(struct device *d, struct device_attribute *attr, char *buf)
+{
+	struct il_priv *il = dev_get_drvdata(d);
+
+	if (!il_is_ready_rf(il))
+		return sprintf(buf, "off\n");
+	else
+		return sprintf(buf, "%d\n", il->tx_power_user_lmt);
+}
+
+static ssize_t
+il4965_store_tx_power(struct device *d, struct device_attribute *attr,
+		      const char *buf, size_t count)
+{
+	struct il_priv *il = dev_get_drvdata(d);
+	unsigned long val;
+	int ret;
+
+	ret = strict_strtoul(buf, 10, &val);
+	if (ret)
+		IL_INFO("%s is not in decimal form.\n", buf);
+	else {
+		ret = il_set_tx_power(il, val, false);
+		if (ret)
+			IL_ERR("failed setting tx power (0x%d).\n", ret);
+		else
+			ret = count;
+	}
+	return ret;
+}
+
+static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, il4965_show_tx_power,
+		   il4965_store_tx_power);
+
+static struct attribute *il_sysfs_entries[] = {
+	&dev_attr_temperature.attr,
+	&dev_attr_tx_power.attr,
+#ifdef CONFIG_IWLEGACY_DEBUG
+	&dev_attr_debug_level.attr,
+#endif
+	NULL
+};
+
+static struct attribute_group il_attribute_group = {
+	.name = NULL,		/* put in device directory */
+	.attrs = il_sysfs_entries,
+};
+
+/******************************************************************************
+ *
+ * uCode download functions
+ *
+ ******************************************************************************/
+
+static void
+il4965_dealloc_ucode_pci(struct il_priv *il)
+{
+	il_free_fw_desc(il->pci_dev, &il->ucode_code);
+	il_free_fw_desc(il->pci_dev, &il->ucode_data);
+	il_free_fw_desc(il->pci_dev, &il->ucode_data_backup);
+	il_free_fw_desc(il->pci_dev, &il->ucode_init);
+	il_free_fw_desc(il->pci_dev, &il->ucode_init_data);
+	il_free_fw_desc(il->pci_dev, &il->ucode_boot);
+}
+
+static void
+il4965_nic_start(struct il_priv *il)
+{
+	/* Remove all resets to allow NIC to operate */
+	_il_wr(il, CSR_RESET, 0);
+}
+
+static void il4965_ucode_callback(const struct firmware *ucode_raw,
+				  void *context);
+static int il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length);
+
+static int __must_check
+il4965_request_firmware(struct il_priv *il, bool first)
+{
+	const char *name_pre = il->cfg->fw_name_pre;
+	char tag[8];
+
+	if (first) {
+		il->fw_idx = il->cfg->ucode_api_max;
+		sprintf(tag, "%d", il->fw_idx);
+	} else {
+		il->fw_idx--;
+		sprintf(tag, "%d", il->fw_idx);
+	}
+
+	if (il->fw_idx < il->cfg->ucode_api_min) {
+		IL_ERR("no suitable firmware found!\n");
+		return -ENOENT;
+	}
+
+	sprintf(il->firmware_name, "%s%s%s", name_pre, tag, ".ucode");
+
+	D_INFO("attempting to load firmware '%s'\n", il->firmware_name);
+
+	return request_firmware_nowait(THIS_MODULE, 1, il->firmware_name,
+				       &il->pci_dev->dev, GFP_KERNEL, il,
+				       il4965_ucode_callback);
+}
+
+struct il4965_firmware_pieces {
+	const void *inst, *data, *init, *init_data, *boot;
+	size_t inst_size, data_size, init_size, init_data_size, boot_size;
+};
+
+static int
+il4965_load_firmware(struct il_priv *il, const struct firmware *ucode_raw,
+		     struct il4965_firmware_pieces *pieces)
+{
+	struct il_ucode_header *ucode = (void *)ucode_raw->data;
+	u32 api_ver, hdr_size;
+	const u8 *src;
+
+	il->ucode_ver = le32_to_cpu(ucode->ver);
+	api_ver = IL_UCODE_API(il->ucode_ver);
+
+	switch (api_ver) {
+	default:
+	case 0:
+	case 1:
+	case 2:
+		hdr_size = 24;
+		if (ucode_raw->size < hdr_size) {
+			IL_ERR("File size too small!\n");
+			return -EINVAL;
+		}
+		pieces->inst_size = le32_to_cpu(ucode->v1.inst_size);
+		pieces->data_size = le32_to_cpu(ucode->v1.data_size);
+		pieces->init_size = le32_to_cpu(ucode->v1.init_size);
+		pieces->init_data_size = le32_to_cpu(ucode->v1.init_data_size);
+		pieces->boot_size = le32_to_cpu(ucode->v1.boot_size);
+		src = ucode->v1.data;
+		break;
+	}
+
+	/* Verify size of file vs. image size info in file's header */
+	if (ucode_raw->size !=
+	    hdr_size + pieces->inst_size + pieces->data_size +
+	    pieces->init_size + pieces->init_data_size + pieces->boot_size) {
+
+		IL_ERR("uCode file size %d does not match expected size\n",
+		       (int)ucode_raw->size);
+		return -EINVAL;
+	}
+
+	pieces->inst = src;
+	src += pieces->inst_size;
+	pieces->data = src;
+	src += pieces->data_size;
+	pieces->init = src;
+	src += pieces->init_size;
+	pieces->init_data = src;
+	src += pieces->init_data_size;
+	pieces->boot = src;
+	src += pieces->boot_size;
+
+	return 0;
+}
+
+/**
+ * il4965_ucode_callback - callback when firmware was loaded
+ *
+ * If loaded successfully, copies the firmware into buffers
+ * for the card to fetch (via DMA).
+ */
+static void
+il4965_ucode_callback(const struct firmware *ucode_raw, void *context)
+{
+	struct il_priv *il = context;
+	struct il_ucode_header *ucode;
+	int err;
+	struct il4965_firmware_pieces pieces;
+	const unsigned int api_max = il->cfg->ucode_api_max;
+	const unsigned int api_min = il->cfg->ucode_api_min;
+	u32 api_ver;
+
+	u32 max_probe_length = 200;
+	u32 standard_phy_calibration_size =
+	    IL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
+
+	memset(&pieces, 0, sizeof(pieces));
+
+	if (!ucode_raw) {
+		if (il->fw_idx <= il->cfg->ucode_api_max)
+			IL_ERR("request for firmware file '%s' failed.\n",
+			       il->firmware_name);
+		goto try_again;
+	}
+
+	D_INFO("Loaded firmware file '%s' (%zd bytes).\n", il->firmware_name,
+	       ucode_raw->size);
+
+	/* Make sure that we got at least the API version number */
+	if (ucode_raw->size < 4) {
+		IL_ERR("File size way too small!\n");
+		goto try_again;
+	}
+
+	/* Data from ucode file:  header followed by uCode images */
+	ucode = (struct il_ucode_header *)ucode_raw->data;
+
+	err = il4965_load_firmware(il, ucode_raw, &pieces);
+
+	if (err)
+		goto try_again;
+
+	api_ver = IL_UCODE_API(il->ucode_ver);
+
+	/*
+	 * api_ver should match the api version forming part of the
+	 * firmware filename ... but we don't check for that and only rely
+	 * on the API version read from firmware header from here on forward
+	 */
+	if (api_ver < api_min || api_ver > api_max) {
+		IL_ERR("Driver unable to support your firmware API. "
+		       "Driver supports v%u, firmware is v%u.\n", api_max,
+		       api_ver);
+		goto try_again;
+	}
+
+	if (api_ver != api_max)
+		IL_ERR("Firmware has old API version. Expected v%u, "
+		       "got v%u. New firmware can be obtained "
+		       "from http://www.intellinuxwireless.org.\n", api_max,
+		       api_ver);
+
+	IL_INFO("loaded firmware version %u.%u.%u.%u\n",
+		IL_UCODE_MAJOR(il->ucode_ver), IL_UCODE_MINOR(il->ucode_ver),
+		IL_UCODE_API(il->ucode_ver), IL_UCODE_SERIAL(il->ucode_ver));
+
+	snprintf(il->hw->wiphy->fw_version, sizeof(il->hw->wiphy->fw_version),
+		 "%u.%u.%u.%u", IL_UCODE_MAJOR(il->ucode_ver),
+		 IL_UCODE_MINOR(il->ucode_ver), IL_UCODE_API(il->ucode_ver),
+		 IL_UCODE_SERIAL(il->ucode_ver));
+
+	/*
+	 * For any of the failures below (before allocating pci memory)
+	 * we will try to load a version with a smaller API -- maybe the
+	 * user just got a corrupted version of the latest API.
+	 */
+
+	D_INFO("f/w package hdr ucode version raw = 0x%x\n", il->ucode_ver);
+	D_INFO("f/w package hdr runtime inst size = %Zd\n", pieces.inst_size);
+	D_INFO("f/w package hdr runtime data size = %Zd\n", pieces.data_size);
+	D_INFO("f/w package hdr init inst size = %Zd\n", pieces.init_size);
+	D_INFO("f/w package hdr init data size = %Zd\n", pieces.init_data_size);
+	D_INFO("f/w package hdr boot inst size = %Zd\n", pieces.boot_size);
+
+	/* Verify that uCode images will fit in card's SRAM */
+	if (pieces.inst_size > il->hw_params.max_inst_size) {
+		IL_ERR("uCode instr len %Zd too large to fit in\n",
+		       pieces.inst_size);
+		goto try_again;
+	}
+
+	if (pieces.data_size > il->hw_params.max_data_size) {
+		IL_ERR("uCode data len %Zd too large to fit in\n",
+		       pieces.data_size);
+		goto try_again;
+	}
+
+	if (pieces.init_size > il->hw_params.max_inst_size) {
+		IL_ERR("uCode init instr len %Zd too large to fit in\n",
+		       pieces.init_size);
+		goto try_again;
+	}
+
+	if (pieces.init_data_size > il->hw_params.max_data_size) {
+		IL_ERR("uCode init data len %Zd too large to fit in\n",
+		       pieces.init_data_size);
+		goto try_again;
+	}
+
+	if (pieces.boot_size > il->hw_params.max_bsm_size) {
+		IL_ERR("uCode boot instr len %Zd too large to fit in\n",
+		       pieces.boot_size);
+		goto try_again;
+	}
+
+	/* Allocate ucode buffers for card's bus-master loading ... */
+
+	/* Runtime instructions and 2 copies of data:
+	 * 1) unmodified from disk
+	 * 2) backup cache for save/restore during power-downs */
+	il->ucode_code.len = pieces.inst_size;
+	il_alloc_fw_desc(il->pci_dev, &il->ucode_code);
+
+	il->ucode_data.len = pieces.data_size;
+	il_alloc_fw_desc(il->pci_dev, &il->ucode_data);
+
+	il->ucode_data_backup.len = pieces.data_size;
+	il_alloc_fw_desc(il->pci_dev, &il->ucode_data_backup);
+
+	if (!il->ucode_code.v_addr || !il->ucode_data.v_addr ||
+	    !il->ucode_data_backup.v_addr)
+		goto err_pci_alloc;
+
+	/* Initialization instructions and data */
+	if (pieces.init_size && pieces.init_data_size) {
+		il->ucode_init.len = pieces.init_size;
+		il_alloc_fw_desc(il->pci_dev, &il->ucode_init);
+
+		il->ucode_init_data.len = pieces.init_data_size;
+		il_alloc_fw_desc(il->pci_dev, &il->ucode_init_data);
+
+		if (!il->ucode_init.v_addr || !il->ucode_init_data.v_addr)
+			goto err_pci_alloc;
+	}
+
+	/* Bootstrap (instructions only, no data) */
+	if (pieces.boot_size) {
+		il->ucode_boot.len = pieces.boot_size;
+		il_alloc_fw_desc(il->pci_dev, &il->ucode_boot);
+
+		if (!il->ucode_boot.v_addr)
+			goto err_pci_alloc;
+	}
+
+	/* Now that we can no longer fail, copy information */
+
+	il->sta_key_max_num = STA_KEY_MAX_NUM;
+
+	/* Copy images into buffers for card's bus-master reads ... */
+
+	/* Runtime instructions (first block of data in file) */
+	D_INFO("Copying (but not loading) uCode instr len %Zd\n",
+	       pieces.inst_size);
+	memcpy(il->ucode_code.v_addr, pieces.inst, pieces.inst_size);
+
+	D_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
+	       il->ucode_code.v_addr, (u32) il->ucode_code.p_addr);
+
+	/*
+	 * Runtime data
+	 * NOTE:  Copy into backup buffer will be done in il_up()
+	 */
+	D_INFO("Copying (but not loading) uCode data len %Zd\n",
+	       pieces.data_size);
+	memcpy(il->ucode_data.v_addr, pieces.data, pieces.data_size);
+	memcpy(il->ucode_data_backup.v_addr, pieces.data, pieces.data_size);
+
+	/* Initialization instructions */
+	if (pieces.init_size) {
+		D_INFO("Copying (but not loading) init instr len %Zd\n",
+		       pieces.init_size);
+		memcpy(il->ucode_init.v_addr, pieces.init, pieces.init_size);
+	}
+
+	/* Initialization data */
+	if (pieces.init_data_size) {
+		D_INFO("Copying (but not loading) init data len %Zd\n",
+		       pieces.init_data_size);
+		memcpy(il->ucode_init_data.v_addr, pieces.init_data,
+		       pieces.init_data_size);
+	}
+
+	/* Bootstrap instructions */
+	D_INFO("Copying (but not loading) boot instr len %Zd\n",
+	       pieces.boot_size);
+	memcpy(il->ucode_boot.v_addr, pieces.boot, pieces.boot_size);
+
+	/*
+	 * figure out the offset of chain noise reset and gain commands
+	 * base on the size of standard phy calibration commands table size
+	 */
+	il->_4965.phy_calib_chain_noise_reset_cmd =
+	    standard_phy_calibration_size;
+	il->_4965.phy_calib_chain_noise_gain_cmd =
+	    standard_phy_calibration_size + 1;
+
+	/**************************************************
+	 * This is still part of probe() in a sense...
+	 *
+	 * 9. Setup and register with mac80211 and debugfs
+	 **************************************************/
+	err = il4965_mac_setup_register(il, max_probe_length);
+	if (err)
+		goto out_unbind;
+
+	err = il_dbgfs_register(il, DRV_NAME);
+	if (err)
+		IL_ERR("failed to create debugfs files. Ignoring error: %d\n",
+		       err);
+
+	err = sysfs_create_group(&il->pci_dev->dev.kobj, &il_attribute_group);
+	if (err) {
+		IL_ERR("failed to create sysfs device attributes\n");
+		goto out_unbind;
+	}
+
+	/* We have our copies now, allow OS release its copies */
+	release_firmware(ucode_raw);
+	complete(&il->_4965.firmware_loading_complete);
+	return;
+
+try_again:
+	/* try next, if any */
+	if (il4965_request_firmware(il, false))
+		goto out_unbind;
+	release_firmware(ucode_raw);
+	return;
+
+err_pci_alloc:
+	IL_ERR("failed to allocate pci memory\n");
+	il4965_dealloc_ucode_pci(il);
+out_unbind:
+	complete(&il->_4965.firmware_loading_complete);
+	device_release_driver(&il->pci_dev->dev);
+	release_firmware(ucode_raw);
+}
+
+static const char *const desc_lookup_text[] = {
+	"OK",
+	"FAIL",
+	"BAD_PARAM",
+	"BAD_CHECKSUM",
+	"NMI_INTERRUPT_WDG",
+	"SYSASSERT",
+	"FATAL_ERROR",
+	"BAD_COMMAND",
+	"HW_ERROR_TUNE_LOCK",
+	"HW_ERROR_TEMPERATURE",
+	"ILLEGAL_CHAN_FREQ",
+	"VCC_NOT_STBL",
+	"FH49_ERROR",
+	"NMI_INTERRUPT_HOST",
+	"NMI_INTERRUPT_ACTION_PT",
+	"NMI_INTERRUPT_UNKNOWN",
+	"UCODE_VERSION_MISMATCH",
+	"HW_ERROR_ABS_LOCK",
+	"HW_ERROR_CAL_LOCK_FAIL",
+	"NMI_INTERRUPT_INST_ACTION_PT",
+	"NMI_INTERRUPT_DATA_ACTION_PT",
+	"NMI_TRM_HW_ER",
+	"NMI_INTERRUPT_TRM",
+	"NMI_INTERRUPT_BREAK_POINT",
+	"DEBUG_0",
+	"DEBUG_1",
+	"DEBUG_2",
+	"DEBUG_3",
+};
+
+static struct {
+	char *name;
+	u8 num;
+} advanced_lookup[] = {
+	{
+	"NMI_INTERRUPT_WDG", 0x34}, {
+	"SYSASSERT", 0x35}, {
+	"UCODE_VERSION_MISMATCH", 0x37}, {
+	"BAD_COMMAND", 0x38}, {
+	"NMI_INTERRUPT_DATA_ACTION_PT", 0x3C}, {
+	"FATAL_ERROR", 0x3D}, {
+	"NMI_TRM_HW_ERR", 0x46}, {
+	"NMI_INTERRUPT_TRM", 0x4C}, {
+	"NMI_INTERRUPT_BREAK_POINT", 0x54}, {
+	"NMI_INTERRUPT_WDG_RXF_FULL", 0x5C}, {
+	"NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64}, {
+	"NMI_INTERRUPT_HOST", 0x66}, {
+	"NMI_INTERRUPT_ACTION_PT", 0x7C}, {
+	"NMI_INTERRUPT_UNKNOWN", 0x84}, {
+	"NMI_INTERRUPT_INST_ACTION_PT", 0x86}, {
+"ADVANCED_SYSASSERT", 0},};
+
+static const char *
+il4965_desc_lookup(u32 num)
+{
+	int i;
+	int max = ARRAY_SIZE(desc_lookup_text);
+
+	if (num < max)
+		return desc_lookup_text[num];
+
+	max = ARRAY_SIZE(advanced_lookup) - 1;
+	for (i = 0; i < max; i++) {
+		if (advanced_lookup[i].num == num)
+			break;
+	}
+	return advanced_lookup[i].name;
+}
+
+#define ERROR_START_OFFSET  (1 * sizeof(u32))
+#define ERROR_ELEM_SIZE     (7 * sizeof(u32))
+
+void
+il4965_dump_nic_error_log(struct il_priv *il)
+{
+	u32 data2, line;
+	u32 desc, time, count, base, data1;
+	u32 blink1, blink2, ilink1, ilink2;
+	u32 pc, hcmd;
+
+	if (il->ucode_type == UCODE_INIT)
+		base = le32_to_cpu(il->card_alive_init.error_event_table_ptr);
+	else
+		base = le32_to_cpu(il->card_alive.error_event_table_ptr);
+
+	if (!il->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
+		IL_ERR("Not valid error log pointer 0x%08X for %s uCode\n",
+		       base, (il->ucode_type == UCODE_INIT) ? "Init" : "RT");
+		return;
+	}
+
+	count = il_read_targ_mem(il, base);
+
+	if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
+		IL_ERR("Start IWL Error Log Dump:\n");
+		IL_ERR("Status: 0x%08lX, count: %d\n", il->status, count);
+	}
+
+	desc = il_read_targ_mem(il, base + 1 * sizeof(u32));
+	il->isr_stats.err_code = desc;
+	pc = il_read_targ_mem(il, base + 2 * sizeof(u32));
+	blink1 = il_read_targ_mem(il, base + 3 * sizeof(u32));
+	blink2 = il_read_targ_mem(il, base + 4 * sizeof(u32));
+	ilink1 = il_read_targ_mem(il, base + 5 * sizeof(u32));
+	ilink2 = il_read_targ_mem(il, base + 6 * sizeof(u32));
+	data1 = il_read_targ_mem(il, base + 7 * sizeof(u32));
+	data2 = il_read_targ_mem(il, base + 8 * sizeof(u32));
+	line = il_read_targ_mem(il, base + 9 * sizeof(u32));
+	time = il_read_targ_mem(il, base + 11 * sizeof(u32));
+	hcmd = il_read_targ_mem(il, base + 22 * sizeof(u32));
+
+	IL_ERR("Desc                                  Time       "
+	       "data1      data2      line\n");
+	IL_ERR("%-28s (0x%04X) %010u 0x%08X 0x%08X %u\n",
+	       il4965_desc_lookup(desc), desc, time, data1, data2, line);
+	IL_ERR("pc      blink1  blink2  ilink1  ilink2  hcmd\n");
+	IL_ERR("0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n", pc, blink1,
+	       blink2, ilink1, ilink2, hcmd);
+}
+
+static void
+il4965_rf_kill_ct_config(struct il_priv *il)
+{
+	struct il_ct_kill_config cmd;
+	unsigned long flags;
+	int ret = 0;
+
+	spin_lock_irqsave(&il->lock, flags);
+	_il_wr(il, CSR_UCODE_DRV_GP1_CLR,
+	       CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
+	spin_unlock_irqrestore(&il->lock, flags);
+
+	cmd.critical_temperature_R =
+	    cpu_to_le32(il->hw_params.ct_kill_threshold);
+
+	ret = il_send_cmd_pdu(il, C_CT_KILL_CONFIG, sizeof(cmd), &cmd);
+	if (ret)
+		IL_ERR("C_CT_KILL_CONFIG failed\n");
+	else
+		D_INFO("C_CT_KILL_CONFIG " "succeeded, "
+		       "critical temperature is %d\n",
+		       il->hw_params.ct_kill_threshold);
+}
+
+static const s8 default_queue_to_tx_fifo[] = {
+	IL_TX_FIFO_VO,
+	IL_TX_FIFO_VI,
+	IL_TX_FIFO_BE,
+	IL_TX_FIFO_BK,
+	IL49_CMD_FIFO_NUM,
+	IL_TX_FIFO_UNUSED,
+	IL_TX_FIFO_UNUSED,
+};
+
+#define IL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
+
+static int
+il4965_alive_notify(struct il_priv *il)
+{
+	u32 a;
+	unsigned long flags;
+	int i, chan;
+	u32 reg_val;
+
+	spin_lock_irqsave(&il->lock, flags);
+
+	/* Clear 4965's internal Tx Scheduler data base */
+	il->scd_base_addr = il_rd_prph(il, IL49_SCD_SRAM_BASE_ADDR);
+	a = il->scd_base_addr + IL49_SCD_CONTEXT_DATA_OFFSET;
+	for (; a < il->scd_base_addr + IL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4)
+		il_write_targ_mem(il, a, 0);
+	for (; a < il->scd_base_addr + IL49_SCD_TRANSLATE_TBL_OFFSET; a += 4)
+		il_write_targ_mem(il, a, 0);
+	for (;
+	     a <
+	     il->scd_base_addr +
+	     IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(il->hw_params.max_txq_num);
+	     a += 4)
+		il_write_targ_mem(il, a, 0);
+
+	/* Tel 4965 where to find Tx byte count tables */
+	il_wr_prph(il, IL49_SCD_DRAM_BASE_ADDR, il->scd_bc_tbls.dma >> 10);
+
+	/* Enable DMA channel */
+	for (chan = 0; chan < FH49_TCSR_CHNL_NUM; chan++)
+		il_wr(il, FH49_TCSR_CHNL_TX_CONFIG_REG(chan),
+		      FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
+		      FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
+
+	/* Update FH chicken bits */
+	reg_val = il_rd(il, FH49_TX_CHICKEN_BITS_REG);
+	il_wr(il, FH49_TX_CHICKEN_BITS_REG,
+	      reg_val | FH49_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
+
+	/* Disable chain mode for all queues */
+	il_wr_prph(il, IL49_SCD_QUEUECHAIN_SEL, 0);
+
+	/* Initialize each Tx queue (including the command queue) */
+	for (i = 0; i < il->hw_params.max_txq_num; i++) {
+
+		/* TFD circular buffer read/write idxes */
+		il_wr_prph(il, IL49_SCD_QUEUE_RDPTR(i), 0);
+		il_wr(il, HBUS_TARG_WRPTR, 0 | (i << 8));
+
+		/* Max Tx Window size for Scheduler-ACK mode */
+		il_write_targ_mem(il,
+				  il->scd_base_addr +
+				  IL49_SCD_CONTEXT_QUEUE_OFFSET(i),
+				  (SCD_WIN_SIZE <<
+				   IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
+				  IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
+
+		/* Frame limit */
+		il_write_targ_mem(il,
+				  il->scd_base_addr +
+				  IL49_SCD_CONTEXT_QUEUE_OFFSET(i) +
+				  sizeof(u32),
+				  (SCD_FRAME_LIMIT <<
+				   IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
+				  IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
+
+	}
+	il_wr_prph(il, IL49_SCD_INTERRUPT_MASK,
+		   (1 << il->hw_params.max_txq_num) - 1);
+
+	/* Activate all Tx DMA/FIFO channels */
+	il4965_txq_set_sched(il, IL_MASK(0, 6));
+
+	il4965_set_wr_ptrs(il, IL_DEFAULT_CMD_QUEUE_NUM, 0);
+
+	/* make sure all queue are not stopped */
+	memset(&il->queue_stopped[0], 0, sizeof(il->queue_stopped));
+	for (i = 0; i < 4; i++)
+		atomic_set(&il->queue_stop_count[i], 0);
+
+	/* reset to 0 to enable all the queue first */
+	il->txq_ctx_active_msk = 0;
+	/* Map each Tx/cmd queue to its corresponding fifo */
+	BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7);
+
+	for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
+		int ac = default_queue_to_tx_fifo[i];
+
+		il_txq_ctx_activate(il, i);
+
+		if (ac == IL_TX_FIFO_UNUSED)
+			continue;
+
+		il4965_tx_queue_set_status(il, &il->txq[i], ac, 0);
+	}
+
+	spin_unlock_irqrestore(&il->lock, flags);
+
+	return 0;
+}
+
+/**
+ * il4965_alive_start - called after N_ALIVE notification received
+ *                   from protocol/runtime uCode (initialization uCode's
+ *                   Alive gets handled by il_init_alive_start()).
+ */
+static void
+il4965_alive_start(struct il_priv *il)
+{
+	int ret = 0;
+	struct il_rxon_context *ctx = &il->ctx;
+
+	D_INFO("Runtime Alive received.\n");
+
+	if (il->card_alive.is_valid != UCODE_VALID_OK) {
+		/* We had an error bringing up the hardware, so take it
+		 * all the way back down so we can try again */
+		D_INFO("Alive failed.\n");
+		goto restart;
+	}
+
+	/* Initialize uCode has loaded Runtime uCode ... verify inst image.
+	 * This is a paranoid check, because we would not have gotten the
+	 * "runtime" alive if code weren't properly loaded.  */
+	if (il4965_verify_ucode(il)) {
+		/* Runtime instruction load was bad;
+		 * take it all the way back down so we can try again */
+		D_INFO("Bad runtime uCode load.\n");
+		goto restart;
+	}
+
+	ret = il4965_alive_notify(il);
+	if (ret) {
+		IL_WARN("Could not complete ALIVE transition [ntf]: %d\n", ret);
+		goto restart;
+	}
+
+	/* After the ALIVE response, we can send host commands to the uCode */
+	set_bit(S_ALIVE, &il->status);
+
+	/* Enable watchdog to monitor the driver tx queues */
+	il_setup_watchdog(il);
+
+	if (il_is_rfkill(il))
+		return;
+
+	ieee80211_wake_queues(il->hw);
+
+	il->active_rate = RATES_MASK;
+
+	if (il_is_associated_ctx(ctx)) {
+		struct il_rxon_cmd *active_rxon =
+		    (struct il_rxon_cmd *)&ctx->active;
+		/* apply any changes in staging */
+		ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+		active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+	} else {
+		/* Initialize our rx_config data */
+		il_connection_init_rx_config(il, &il->ctx);
+
+		if (il->cfg->ops->hcmd->set_rxon_chain)
+			il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
+	}
+
+	/* Configure bluetooth coexistence if enabled */
+	il_send_bt_config(il);
+
+	il4965_reset_run_time_calib(il);
+
+	set_bit(S_READY, &il->status);
+
+	/* Configure the adapter for unassociated operation */
+	il_commit_rxon(il, ctx);
+
+	/* At this point, the NIC is initialized and operational */
+	il4965_rf_kill_ct_config(il);
+
+	D_INFO("ALIVE processing complete.\n");
+	wake_up(&il->wait_command_queue);
+
+	il_power_update_mode(il, true);
+	D_INFO("Updated power mode\n");
+
+	return;
+
+restart:
+	queue_work(il->workqueue, &il->restart);
+}
+
+static void il4965_cancel_deferred_work(struct il_priv *il);
+
+static void
+__il4965_down(struct il_priv *il)
+{
+	unsigned long flags;
+	int exit_pending;
+
+	D_INFO(DRV_NAME " is going down\n");
+
+	il_scan_cancel_timeout(il, 200);
+
+	exit_pending = test_and_set_bit(S_EXIT_PENDING, &il->status);
+
+	/* Stop TX queues watchdog. We need to have S_EXIT_PENDING bit set
+	 * to prevent rearm timer */
+	del_timer_sync(&il->watchdog);
+
+	il_clear_ucode_stations(il, NULL);
+	il_dealloc_bcast_stations(il);
+	il_clear_driver_stations(il);
+
+	/* Unblock any waiting calls */
+	wake_up_all(&il->wait_command_queue);
+
+	/* Wipe out the EXIT_PENDING status bit if we are not actually
+	 * exiting the module */
+	if (!exit_pending)
+		clear_bit(S_EXIT_PENDING, &il->status);
+
+	/* stop and reset the on-board processor */
+	_il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
+
+	/* tell the device to stop sending interrupts */
+	spin_lock_irqsave(&il->lock, flags);
+	il_disable_interrupts(il);
+	spin_unlock_irqrestore(&il->lock, flags);
+	il4965_synchronize_irq(il);
+
+	if (il->mac80211_registered)
+		ieee80211_stop_queues(il->hw);
+
+	/* If we have not previously called il_init() then
+	 * clear all bits but the RF Kill bit and return */
+	if (!il_is_init(il)) {
+		il->status =
+		    test_bit(S_RF_KILL_HW,
+			     &il->
+			     status) << S_RF_KILL_HW |
+		    test_bit(S_GEO_CONFIGURED,
+			     &il->
+			     status) << S_GEO_CONFIGURED |
+		    test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
+		goto exit;
+	}
+
+	/* ...otherwise clear out all the status bits but the RF Kill
+	 * bit and continue taking the NIC down. */
+	il->status &=
+	    test_bit(S_RF_KILL_HW,
+		     &il->status) << S_RF_KILL_HW | test_bit(S_GEO_CONFIGURED,
+							     &il->
+							     status) <<
+	    S_GEO_CONFIGURED | test_bit(S_FW_ERROR,
+					&il->
+					status) << S_FW_ERROR |
+	    test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
+
+	il4965_txq_ctx_stop(il);
+	il4965_rxq_stop(il);
+
+	/* Power-down device's busmaster DMA clocks */
+	il_wr_prph(il, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
+	udelay(5);
+
+	/* Make sure (redundant) we've released our request to stay awake */
+	il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+
+	/* Stop the device, and put it in low power state */
+	il_apm_stop(il);
+
+exit:
+	memset(&il->card_alive, 0, sizeof(struct il_alive_resp));
+
+	dev_kfree_skb(il->beacon_skb);
+	il->beacon_skb = NULL;
+
+	/* clear out any free frames */
+	il4965_clear_free_frames(il);
+}
+
+static void
+il4965_down(struct il_priv *il)
+{
+	mutex_lock(&il->mutex);
+	__il4965_down(il);
+	mutex_unlock(&il->mutex);
+
+	il4965_cancel_deferred_work(il);
+}
+
+#define HW_READY_TIMEOUT (50)
+
+static int
+il4965_set_hw_ready(struct il_priv *il)
+{
+	int ret = 0;
+
+	il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+		   CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
+
+	/* See if we got it */
+	ret =
+	    _il_poll_bit(il, CSR_HW_IF_CONFIG_REG,
+			 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
+			 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, HW_READY_TIMEOUT);
+	if (ret != -ETIMEDOUT)
+		il->hw_ready = true;
+	else
+		il->hw_ready = false;
+
+	D_INFO("hardware %s\n", (il->hw_ready == 1) ? "ready" : "not ready");
+	return ret;
+}
+
+static int
+il4965_prepare_card_hw(struct il_priv *il)
+{
+	int ret = 0;
+
+	D_INFO("il4965_prepare_card_hw enter\n");
+
+	ret = il4965_set_hw_ready(il);
+	if (il->hw_ready)
+		return ret;
+
+	/* If HW is not ready, prepare the conditions to check again */
+	il_set_bit(il, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_PREPARE);
+
+	ret =
+	    _il_poll_bit(il, CSR_HW_IF_CONFIG_REG,
+			 ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
+			 CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
+
+	/* HW should be ready by now, check again. */
+	if (ret != -ETIMEDOUT)
+		il4965_set_hw_ready(il);
+
+	return ret;
+}
+
+#define MAX_HW_RESTARTS 5
+
+static int
+__il4965_up(struct il_priv *il)
+{
+	int i;
+	int ret;
+
+	if (test_bit(S_EXIT_PENDING, &il->status)) {
+		IL_WARN("Exit pending; will not bring the NIC up\n");
+		return -EIO;
+	}
+
+	if (!il->ucode_data_backup.v_addr || !il->ucode_data.v_addr) {
+		IL_ERR("ucode not available for device bringup\n");
+		return -EIO;
+	}
+
+	ret = il4965_alloc_bcast_station(il, &il->ctx);
+	if (ret) {
+		il_dealloc_bcast_stations(il);
+		return ret;
+	}
+
+	il4965_prepare_card_hw(il);
+
+	if (!il->hw_ready) {
+		IL_WARN("Exit HW not ready\n");
+		return -EIO;
+	}
+
+	/* If platform's RF_KILL switch is NOT set to KILL */
+	if (_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
+		clear_bit(S_RF_KILL_HW, &il->status);
+	else
+		set_bit(S_RF_KILL_HW, &il->status);
+
+	if (il_is_rfkill(il)) {
+		wiphy_rfkill_set_hw_state(il->hw->wiphy, true);
+
+		il_enable_interrupts(il);
+		IL_WARN("Radio disabled by HW RF Kill switch\n");
+		return 0;
+	}
+
+	_il_wr(il, CSR_INT, 0xFFFFFFFF);
+
+	/* must be initialised before il_hw_nic_init */
+	il->cmd_queue = IL_DEFAULT_CMD_QUEUE_NUM;
+
+	ret = il4965_hw_nic_init(il);
+	if (ret) {
+		IL_ERR("Unable to init nic\n");
+		return ret;
+	}
+
+	/* make sure rfkill handshake bits are cleared */
+	_il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+	_il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+
+	/* clear (again), then enable host interrupts */
+	_il_wr(il, CSR_INT, 0xFFFFFFFF);
+	il_enable_interrupts(il);
+
+	/* really make sure rfkill handshake bits are cleared */
+	_il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+	_il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+
+	/* Copy original ucode data image from disk into backup cache.
+	 * This will be used to initialize the on-board processor's
+	 * data SRAM for a clean start when the runtime program first loads. */
+	memcpy(il->ucode_data_backup.v_addr, il->ucode_data.v_addr,
+	       il->ucode_data.len);
+
+	for (i = 0; i < MAX_HW_RESTARTS; i++) {
+
+		/* load bootstrap state machine,
+		 * load bootstrap program into processor's memory,
+		 * prepare to load the "initialize" uCode */
+		ret = il->cfg->ops->lib->load_ucode(il);
+
+		if (ret) {
+			IL_ERR("Unable to set up bootstrap uCode: %d\n", ret);
+			continue;
+		}
+
+		/* start card; "initialize" will load runtime ucode */
+		il4965_nic_start(il);
+
+		D_INFO(DRV_NAME " is coming up\n");
+
+		return 0;
+	}
+
+	set_bit(S_EXIT_PENDING, &il->status);
+	__il4965_down(il);
+	clear_bit(S_EXIT_PENDING, &il->status);
+
+	/* tried to restart and config the device for as long as our
+	 * patience could withstand */
+	IL_ERR("Unable to initialize device after %d attempts.\n", i);
+	return -EIO;
+}
+
+/*****************************************************************************
+ *
+ * Workqueue callbacks
+ *
+ *****************************************************************************/
+
+static void
+il4965_bg_init_alive_start(struct work_struct *data)
+{
+	struct il_priv *il =
+	    container_of(data, struct il_priv, init_alive_start.work);
+
+	mutex_lock(&il->mutex);
+	if (test_bit(S_EXIT_PENDING, &il->status))
+		goto out;
+
+	il->cfg->ops->lib->init_alive_start(il);
+out:
+	mutex_unlock(&il->mutex);
+}
+
+static void
+il4965_bg_alive_start(struct work_struct *data)
+{
+	struct il_priv *il =
+	    container_of(data, struct il_priv, alive_start.work);
+
+	mutex_lock(&il->mutex);
+	if (test_bit(S_EXIT_PENDING, &il->status))
+		goto out;
+
+	il4965_alive_start(il);
+out:
+	mutex_unlock(&il->mutex);
+}
+
+static void
+il4965_bg_run_time_calib_work(struct work_struct *work)
+{
+	struct il_priv *il = container_of(work, struct il_priv,
+					  run_time_calib_work);
+
+	mutex_lock(&il->mutex);
+
+	if (test_bit(S_EXIT_PENDING, &il->status) ||
+	    test_bit(S_SCANNING, &il->status)) {
+		mutex_unlock(&il->mutex);
+		return;
+	}
+
+	if (il->start_calib) {
+		il4965_chain_noise_calibration(il, (void *)&il->_4965.stats);
+		il4965_sensitivity_calibration(il, (void *)&il->_4965.stats);
+	}
+
+	mutex_unlock(&il->mutex);
+}
+
+static void
+il4965_bg_restart(struct work_struct *data)
+{
+	struct il_priv *il = container_of(data, struct il_priv, restart);
+
+	if (test_bit(S_EXIT_PENDING, &il->status))
+		return;
+
+	if (test_and_clear_bit(S_FW_ERROR, &il->status)) {
+		mutex_lock(&il->mutex);
+		il->ctx.vif = NULL;
+		il->is_open = 0;
+
+		__il4965_down(il);
+
+		mutex_unlock(&il->mutex);
+		il4965_cancel_deferred_work(il);
+		ieee80211_restart_hw(il->hw);
+	} else {
+		il4965_down(il);
+
+		mutex_lock(&il->mutex);
+		if (test_bit(S_EXIT_PENDING, &il->status)) {
+			mutex_unlock(&il->mutex);
+			return;
+		}
+
+		__il4965_up(il);
+		mutex_unlock(&il->mutex);
+	}
+}
+
+static void
+il4965_bg_rx_replenish(struct work_struct *data)
+{
+	struct il_priv *il = container_of(data, struct il_priv, rx_replenish);
+
+	if (test_bit(S_EXIT_PENDING, &il->status))
+		return;
+
+	mutex_lock(&il->mutex);
+	il4965_rx_replenish(il);
+	mutex_unlock(&il->mutex);
+}
+
+/*****************************************************************************
+ *
+ * mac80211 entry point functions
+ *
+ *****************************************************************************/
+
+#define UCODE_READY_TIMEOUT	(4 * HZ)
+
+/*
+ * Not a mac80211 entry point function, but it fits in with all the
+ * other mac80211 functions grouped here.
+ */
+static int
+il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length)
+{
+	int ret;
+	struct ieee80211_hw *hw = il->hw;
+
+	hw->rate_control_algorithm = "iwl-4965-rs";
+
+	/* Tell mac80211 our characteristics */
+	hw->flags =
+	    IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_AMPDU_AGGREGATION |
+	    IEEE80211_HW_NEED_DTIM_PERIOD | IEEE80211_HW_SPECTRUM_MGMT |
+	    IEEE80211_HW_REPORTS_TX_ACK_STATUS;
+
+	if (il->cfg->sku & IL_SKU_N)
+		hw->flags |=
+		    IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
+		    IEEE80211_HW_SUPPORTS_STATIC_SMPS;
+
+	hw->sta_data_size = sizeof(struct il_station_priv);
+	hw->vif_data_size = sizeof(struct il_vif_priv);
+
+	hw->wiphy->interface_modes |= il->ctx.interface_modes;
+	hw->wiphy->interface_modes |= il->ctx.exclusive_interface_modes;
+
+	hw->wiphy->flags |=
+	    WIPHY_FLAG_CUSTOM_REGULATORY | WIPHY_FLAG_DISABLE_BEACON_HINTS;
+
+	/*
+	 * For now, disable PS by default because it affects
+	 * RX performance significantly.
+	 */
+	hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
+	hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
+	/* we create the 802.11 header and a zero-length SSID element */
+	hw->wiphy->max_scan_ie_len = max_probe_length - 24 - 2;
+
+	/* Default value; 4 EDCA QOS priorities */
+	hw->queues = 4;
+
+	hw->max_listen_interval = IL_CONN_MAX_LISTEN_INTERVAL;
+
+	if (il->bands[IEEE80211_BAND_2GHZ].n_channels)
+		il->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+		    &il->bands[IEEE80211_BAND_2GHZ];
+	if (il->bands[IEEE80211_BAND_5GHZ].n_channels)
+		il->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
+		    &il->bands[IEEE80211_BAND_5GHZ];
+
+	il_leds_init(il);
+
+	ret = ieee80211_register_hw(il->hw);
+	if (ret) {
+		IL_ERR("Failed to register hw (error %d)\n", ret);
+		return ret;
+	}
+	il->mac80211_registered = 1;
+
+	return 0;
+}
+
+int
+il4965_mac_start(struct ieee80211_hw *hw)
+{
+	struct il_priv *il = hw->priv;
+	int ret;
+
+	D_MAC80211("enter\n");
+
+	/* we should be verifying the device is ready to be opened */
+	mutex_lock(&il->mutex);
+	ret = __il4965_up(il);
+	mutex_unlock(&il->mutex);
+
+	if (ret)
+		return ret;
+
+	if (il_is_rfkill(il))
+		goto out;
+
+	D_INFO("Start UP work done.\n");
+
+	/* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from
+	 * mac80211 will not be run successfully. */
+	ret = wait_event_timeout(il->wait_command_queue,
+				 test_bit(S_READY, &il->status),
+				 UCODE_READY_TIMEOUT);
+	if (!ret) {
+		if (!test_bit(S_READY, &il->status)) {
+			IL_ERR("START_ALIVE timeout after %dms.\n",
+				jiffies_to_msecs(UCODE_READY_TIMEOUT));
+			return -ETIMEDOUT;
+		}
+	}
+
+	il4965_led_enable(il);
+
+out:
+	il->is_open = 1;
+	D_MAC80211("leave\n");
+	return 0;
+}
+
+void
+il4965_mac_stop(struct ieee80211_hw *hw)
+{
+	struct il_priv *il = hw->priv;
+
+	D_MAC80211("enter\n");
+
+	if (!il->is_open)
+		return;
+
+	il->is_open = 0;
+
+	il4965_down(il);
+
+	flush_workqueue(il->workqueue);
+
+	/* User space software may expect getting rfkill changes
+	 * even if interface is down */
+	_il_wr(il, CSR_INT, 0xFFFFFFFF);
+	il_enable_rfkill_int(il);
+
+	D_MAC80211("leave\n");
+}
+
+void
+il4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+	struct il_priv *il = hw->priv;
+
+	D_MACDUMP("enter\n");
+
+	D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
+	     ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
+
+	if (il4965_tx_skb(il, skb))
+		dev_kfree_skb_any(skb);
+
+	D_MACDUMP("leave\n");
+}
+
+void
+il4965_mac_update_tkip_key(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+			   struct ieee80211_key_conf *keyconf,
+			   struct ieee80211_sta *sta, u32 iv32, u16 * phase1key)
+{
+	struct il_priv *il = hw->priv;
+	struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
+
+	D_MAC80211("enter\n");
+
+	il4965_update_tkip_key(il, vif_priv->ctx, keyconf, sta, iv32,
+			       phase1key);
+
+	D_MAC80211("leave\n");
+}
+
+int
+il4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+		   struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+		   struct ieee80211_key_conf *key)
+{
+	struct il_priv *il = hw->priv;
+	struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
+	struct il_rxon_context *ctx = vif_priv->ctx;
+	int ret;
+	u8 sta_id;
+	bool is_default_wep_key = false;
+
+	D_MAC80211("enter\n");
+
+	if (il->cfg->mod_params->sw_crypto) {
+		D_MAC80211("leave - hwcrypto disabled\n");
+		return -EOPNOTSUPP;
+	}
+
+	sta_id = il_sta_id_or_broadcast(il, vif_priv->ctx, sta);
+	if (sta_id == IL_INVALID_STATION)
+		return -EINVAL;
+
+	mutex_lock(&il->mutex);
+	il_scan_cancel_timeout(il, 100);
+
+	/*
+	 * If we are getting WEP group key and we didn't receive any key mapping
+	 * so far, we are in legacy wep mode (group key only), otherwise we are
+	 * in 1X mode.
+	 * In legacy wep mode, we use another host command to the uCode.
+	 */
+	if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+	     key->cipher == WLAN_CIPHER_SUITE_WEP104) && !sta) {
+		if (cmd == SET_KEY)
+			is_default_wep_key = !ctx->key_mapping_keys;
+		else
+			is_default_wep_key =
+			    (key->hw_key_idx == HW_KEY_DEFAULT);
+	}
+
+	switch (cmd) {
+	case SET_KEY:
+		if (is_default_wep_key)
+			ret =
+			    il4965_set_default_wep_key(il, vif_priv->ctx, key);
+		else
+			ret =
+			    il4965_set_dynamic_key(il, vif_priv->ctx, key,
+						   sta_id);
+
+		D_MAC80211("enable hwcrypto key\n");
+		break;
+	case DISABLE_KEY:
+		if (is_default_wep_key)
+			ret = il4965_remove_default_wep_key(il, ctx, key);
+		else
+			ret = il4965_remove_dynamic_key(il, ctx, key, sta_id);
+
+		D_MAC80211("disable hwcrypto key\n");
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	mutex_unlock(&il->mutex);
+	D_MAC80211("leave\n");
+
+	return ret;
+}
+
+int
+il4965_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+			enum ieee80211_ampdu_mlme_action action,
+			struct ieee80211_sta *sta, u16 tid, u16 * ssn,
+			u8 buf_size)
+{
+	struct il_priv *il = hw->priv;
+	int ret = -EINVAL;
+
+	D_HT("A-MPDU action on addr %pM tid %d\n", sta->addr, tid);
+
+	if (!(il->cfg->sku & IL_SKU_N))
+		return -EACCES;
+
+	mutex_lock(&il->mutex);
+
+	switch (action) {
+	case IEEE80211_AMPDU_RX_START:
+		D_HT("start Rx\n");
+		ret = il4965_sta_rx_agg_start(il, sta, tid, *ssn);
+		break;
+	case IEEE80211_AMPDU_RX_STOP:
+		D_HT("stop Rx\n");
+		ret = il4965_sta_rx_agg_stop(il, sta, tid);
+		if (test_bit(S_EXIT_PENDING, &il->status))
+			ret = 0;
+		break;
+	case IEEE80211_AMPDU_TX_START:
+		D_HT("start Tx\n");
+		ret = il4965_tx_agg_start(il, vif, sta, tid, ssn);
+		break;
+	case IEEE80211_AMPDU_TX_STOP:
+		D_HT("stop Tx\n");
+		ret = il4965_tx_agg_stop(il, vif, sta, tid);
+		if (test_bit(S_EXIT_PENDING, &il->status))
+			ret = 0;
+		break;
+	case IEEE80211_AMPDU_TX_OPERATIONAL:
+		ret = 0;
+		break;
+	}
+	mutex_unlock(&il->mutex);
+
+	return ret;
+}
+
+int
+il4965_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		   struct ieee80211_sta *sta)
+{
+	struct il_priv *il = hw->priv;
+	struct il_station_priv *sta_priv = (void *)sta->drv_priv;
+	struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
+	bool is_ap = vif->type == NL80211_IFTYPE_STATION;
+	int ret;
+	u8 sta_id;
+
+	D_INFO("received request to add station %pM\n", sta->addr);
+	mutex_lock(&il->mutex);
+	D_INFO("proceeding to add station %pM\n", sta->addr);
+	sta_priv->common.sta_id = IL_INVALID_STATION;
+
+	atomic_set(&sta_priv->pending_frames, 0);
+
+	ret =
+	    il_add_station_common(il, vif_priv->ctx, sta->addr, is_ap, sta,
+				  &sta_id);
+	if (ret) {
+		IL_ERR("Unable to add station %pM (%d)\n", sta->addr, ret);
+		/* Should we return success if return code is EEXIST ? */
+		mutex_unlock(&il->mutex);
+		return ret;
+	}
+
+	sta_priv->common.sta_id = sta_id;
+
+	/* Initialize rate scaling */
+	D_INFO("Initializing rate scaling for station %pM\n", sta->addr);
+	il4965_rs_rate_init(il, sta, sta_id);
+	mutex_unlock(&il->mutex);
+
+	return 0;
+}
+
+void
+il4965_mac_channel_switch(struct ieee80211_hw *hw,
+			  struct ieee80211_channel_switch *ch_switch)
+{
+	struct il_priv *il = hw->priv;
+	const struct il_channel_info *ch_info;
+	struct ieee80211_conf *conf = &hw->conf;
+	struct ieee80211_channel *channel = ch_switch->channel;
+	struct il_ht_config *ht_conf = &il->current_ht_config;
+
+	struct il_rxon_context *ctx = &il->ctx;
+	u16 ch;
+
+	D_MAC80211("enter\n");
+
+	mutex_lock(&il->mutex);
+
+	if (il_is_rfkill(il))
+		goto out;
+
+	if (test_bit(S_EXIT_PENDING, &il->status) ||
+	    test_bit(S_SCANNING, &il->status) ||
+	    test_bit(S_CHANNEL_SWITCH_PENDING, &il->status))
+		goto out;
+
+	if (!il_is_associated_ctx(ctx))
+		goto out;
+
+	if (!il->cfg->ops->lib->set_channel_switch)
+		goto out;
+
+	ch = channel->hw_value;
+	if (le16_to_cpu(ctx->active.channel) == ch)
+		goto out;
+
+	ch_info = il_get_channel_info(il, channel->band, ch);
+	if (!il_is_channel_valid(ch_info)) {
+		D_MAC80211("invalid channel\n");
+		goto out;
+	}
+
+	spin_lock_irq(&il->lock);
+
+	il->current_ht_config.smps = conf->smps_mode;
+
+	/* Configure HT40 channels */
+	ctx->ht.enabled = conf_is_ht(conf);
+	if (ctx->ht.enabled) {
+		if (conf_is_ht40_minus(conf)) {
+			ctx->ht.extension_chan_offset =
+			    IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+			ctx->ht.is_40mhz = true;
+		} else if (conf_is_ht40_plus(conf)) {
+			ctx->ht.extension_chan_offset =
+			    IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+			ctx->ht.is_40mhz = true;
+		} else {
+			ctx->ht.extension_chan_offset =
+			    IEEE80211_HT_PARAM_CHA_SEC_NONE;
+			ctx->ht.is_40mhz = false;
+		}
+	} else
+		ctx->ht.is_40mhz = false;
+
+	if ((le16_to_cpu(ctx->staging.channel) != ch))
+		ctx->staging.flags = 0;
+
+	il_set_rxon_channel(il, channel, ctx);
+	il_set_rxon_ht(il, ht_conf);
+	il_set_flags_for_band(il, ctx, channel->band, ctx->vif);
+
+	spin_unlock_irq(&il->lock);
+
+	il_set_rate(il);
+	/*
+	 * at this point, staging_rxon has the
+	 * configuration for channel switch
+	 */
+	set_bit(S_CHANNEL_SWITCH_PENDING, &il->status);
+	il->switch_channel = cpu_to_le16(ch);
+	if (il->cfg->ops->lib->set_channel_switch(il, ch_switch)) {
+		clear_bit(S_CHANNEL_SWITCH_PENDING, &il->status);
+		il->switch_channel = 0;
+		ieee80211_chswitch_done(ctx->vif, false);
+	}
+
+out:
+	mutex_unlock(&il->mutex);
+	D_MAC80211("leave\n");
+}
+
+void
+il4965_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
+			unsigned int *total_flags, u64 multicast)
+{
+	struct il_priv *il = hw->priv;
+	__le32 filter_or = 0, filter_nand = 0;
+
+#define CHK(test, flag)	do { \
+	if (*total_flags & (test))		\
+		filter_or |= (flag);		\
+	else					\
+		filter_nand |= (flag);		\
+	} while (0)
+
+	D_MAC80211("Enter: changed: 0x%x, total: 0x%x\n", changed_flags,
+		   *total_flags);
+
+	CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
+	/* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
+	CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
+	CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
+
+#undef CHK
+
+	mutex_lock(&il->mutex);
+
+	il->ctx.staging.filter_flags &= ~filter_nand;
+	il->ctx.staging.filter_flags |= filter_or;
+
+	/*
+	 * Not committing directly because hardware can perform a scan,
+	 * but we'll eventually commit the filter flags change anyway.
+	 */
+
+	mutex_unlock(&il->mutex);
+
+	/*
+	 * Receiving all multicast frames is always enabled by the
+	 * default flags setup in il_connection_init_rx_config()
+	 * since we currently do not support programming multicast
+	 * filters into the device.
+	 */
+	*total_flags &=
+	    FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
+	    FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
+}
+
+/*****************************************************************************
+ *
+ * driver setup and teardown
+ *
+ *****************************************************************************/
+
+static void
+il4965_bg_txpower_work(struct work_struct *work)
+{
+	struct il_priv *il = container_of(work, struct il_priv,
+					  txpower_work);
+
+	mutex_lock(&il->mutex);
+
+	/* If a scan happened to start before we got here
+	 * then just return; the stats notification will
+	 * kick off another scheduled work to compensate for
+	 * any temperature delta we missed here. */
+	if (test_bit(S_EXIT_PENDING, &il->status) ||
+	    test_bit(S_SCANNING, &il->status))
+		goto out;
+
+	/* Regardless of if we are associated, we must reconfigure the
+	 * TX power since frames can be sent on non-radar channels while
+	 * not associated */
+	il->cfg->ops->lib->send_tx_power(il);
+
+	/* Update last_temperature to keep is_calib_needed from running
+	 * when it isn't needed... */
+	il->last_temperature = il->temperature;
+out:
+	mutex_unlock(&il->mutex);
+}
+
+static void
+il4965_setup_deferred_work(struct il_priv *il)
+{
+	il->workqueue = create_singlethread_workqueue(DRV_NAME);
+
+	init_waitqueue_head(&il->wait_command_queue);
+
+	INIT_WORK(&il->restart, il4965_bg_restart);
+	INIT_WORK(&il->rx_replenish, il4965_bg_rx_replenish);
+	INIT_WORK(&il->run_time_calib_work, il4965_bg_run_time_calib_work);
+	INIT_DELAYED_WORK(&il->init_alive_start, il4965_bg_init_alive_start);
+	INIT_DELAYED_WORK(&il->alive_start, il4965_bg_alive_start);
+
+	il_setup_scan_deferred_work(il);
+
+	INIT_WORK(&il->txpower_work, il4965_bg_txpower_work);
+
+	init_timer(&il->stats_periodic);
+	il->stats_periodic.data = (unsigned long)il;
+	il->stats_periodic.function = il4965_bg_stats_periodic;
+
+	init_timer(&il->watchdog);
+	il->watchdog.data = (unsigned long)il;
+	il->watchdog.function = il_bg_watchdog;
+
+	tasklet_init(&il->irq_tasklet,
+		     (void (*)(unsigned long))il4965_irq_tasklet,
+		     (unsigned long)il);
+}
+
+static void
+il4965_cancel_deferred_work(struct il_priv *il)
+{
+	cancel_work_sync(&il->txpower_work);
+	cancel_delayed_work_sync(&il->init_alive_start);
+	cancel_delayed_work(&il->alive_start);
+	cancel_work_sync(&il->run_time_calib_work);
+
+	il_cancel_scan_deferred_work(il);
+
+	del_timer_sync(&il->stats_periodic);
+}
+
+static void
+il4965_init_hw_rates(struct il_priv *il, struct ieee80211_rate *rates)
+{
+	int i;
+
+	for (i = 0; i < RATE_COUNT_LEGACY; i++) {
+		rates[i].bitrate = il_rates[i].ieee * 5;
+		rates[i].hw_value = i;	/* Rate scaling will work on idxes */
+		rates[i].hw_value_short = i;
+		rates[i].flags = 0;
+		if ((i >= IL_FIRST_CCK_RATE) && (i <= IL_LAST_CCK_RATE)) {
+			/*
+			 * If CCK != 1M then set short preamble rate flag.
+			 */
+			rates[i].flags |=
+			    (il_rates[i].plcp ==
+			     RATE_1M_PLCP) ? 0 : IEEE80211_RATE_SHORT_PREAMBLE;
+		}
+	}
+}
+
+/*
+ * Acquire il->lock before calling this function !
+ */
+void
+il4965_set_wr_ptrs(struct il_priv *il, int txq_id, u32 idx)
+{
+	il_wr(il, HBUS_TARG_WRPTR, (idx & 0xff) | (txq_id << 8));
+	il_wr_prph(il, IL49_SCD_QUEUE_RDPTR(txq_id), idx);
+}
+
+void
+il4965_tx_queue_set_status(struct il_priv *il, struct il_tx_queue *txq,
+			   int tx_fifo_id, int scd_retry)
+{
+	int txq_id = txq->q.id;
+
+	/* Find out whether to activate Tx queue */
+	int active = test_bit(txq_id, &il->txq_ctx_active_msk) ? 1 : 0;
+
+	/* Set up and activate */
+	il_wr_prph(il, IL49_SCD_QUEUE_STATUS_BITS(txq_id),
+		   (active << IL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
+		   (tx_fifo_id << IL49_SCD_QUEUE_STTS_REG_POS_TXF) |
+		   (scd_retry << IL49_SCD_QUEUE_STTS_REG_POS_WSL) |
+		   (scd_retry << IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
+		   IL49_SCD_QUEUE_STTS_REG_MSK);
+
+	txq->sched_retry = scd_retry;
+
+	D_INFO("%s %s Queue %d on AC %d\n", active ? "Activate" : "Deactivate",
+	       scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
+}
+
+static int
+il4965_init_drv(struct il_priv *il)
+{
+	int ret;
+
+	spin_lock_init(&il->sta_lock);
+	spin_lock_init(&il->hcmd_lock);
+
+	INIT_LIST_HEAD(&il->free_frames);
+
+	mutex_init(&il->mutex);
+
+	il->ieee_channels = NULL;
+	il->ieee_rates = NULL;
+	il->band = IEEE80211_BAND_2GHZ;
+
+	il->iw_mode = NL80211_IFTYPE_STATION;
+	il->current_ht_config.smps = IEEE80211_SMPS_STATIC;
+	il->missed_beacon_threshold = IL_MISSED_BEACON_THRESHOLD_DEF;
+
+	/* initialize force reset */
+	il->force_reset.reset_duration = IL_DELAY_NEXT_FORCE_FW_RELOAD;
+
+	/* Choose which receivers/antennas to use */
+	if (il->cfg->ops->hcmd->set_rxon_chain)
+		il->cfg->ops->hcmd->set_rxon_chain(il, &il->ctx);
+
+	il_init_scan_params(il);
+
+	ret = il_init_channel_map(il);
+	if (ret) {
+		IL_ERR("initializing regulatory failed: %d\n", ret);
+		goto err;
+	}
+
+	ret = il_init_geos(il);
+	if (ret) {
+		IL_ERR("initializing geos failed: %d\n", ret);
+		goto err_free_channel_map;
+	}
+	il4965_init_hw_rates(il, il->ieee_rates);
+
+	return 0;
+
+err_free_channel_map:
+	il_free_channel_map(il);
+err:
+	return ret;
+}
+
+static void
+il4965_uninit_drv(struct il_priv *il)
+{
+	il4965_calib_free_results(il);
+	il_free_geos(il);
+	il_free_channel_map(il);
+	kfree(il->scan_cmd);
+}
+
+static void
+il4965_hw_detect(struct il_priv *il)
+{
+	il->hw_rev = _il_rd(il, CSR_HW_REV);
+	il->hw_wa_rev = _il_rd(il, CSR_HW_REV_WA_REG);
+	il->rev_id = il->pci_dev->revision;
+	D_INFO("HW Revision ID = 0x%X\n", il->rev_id);
+}
+
+static int
+il4965_set_hw_params(struct il_priv *il)
+{
+	il->hw_params.max_rxq_size = RX_QUEUE_SIZE;
+	il->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
+	if (il->cfg->mod_params->amsdu_size_8K)
+		il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_8K);
+	else
+		il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_4K);
+
+	il->hw_params.max_beacon_itrvl = IL_MAX_UCODE_BEACON_INTERVAL;
+
+	if (il->cfg->mod_params->disable_11n)
+		il->cfg->sku &= ~IL_SKU_N;
+
+	/* Device-specific setup */
+	return il->cfg->ops->lib->set_hw_params(il);
+}
+
+static const u8 il4965_bss_ac_to_fifo[] = {
+	IL_TX_FIFO_VO,
+	IL_TX_FIFO_VI,
+	IL_TX_FIFO_BE,
+	IL_TX_FIFO_BK,
+};
+
+static const u8 il4965_bss_ac_to_queue[] = {
+	0, 1, 2, 3,
+};
+
+static int
+il4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	int err = 0;
+	struct il_priv *il;
+	struct ieee80211_hw *hw;
+	struct il_cfg *cfg = (struct il_cfg *)(ent->driver_data);
+	unsigned long flags;
+	u16 pci_cmd;
+
+	/************************
+	 * 1. Allocating HW data
+	 ************************/
+
+	hw = il_alloc_all(cfg);
+	if (!hw) {
+		err = -ENOMEM;
+		goto out;
+	}
+	il = hw->priv;
+	/* At this point both hw and il are allocated. */
+
+	il->ctx.ctxid = 0;
+
+	il->ctx.always_active = true;
+	il->ctx.is_active = true;
+	il->ctx.rxon_cmd = C_RXON;
+	il->ctx.rxon_timing_cmd = C_RXON_TIMING;
+	il->ctx.rxon_assoc_cmd = C_RXON_ASSOC;
+	il->ctx.qos_cmd = C_QOS_PARAM;
+	il->ctx.ap_sta_id = IL_AP_ID;
+	il->ctx.wep_key_cmd = C_WEPKEY;
+	il->ctx.ac_to_fifo = il4965_bss_ac_to_fifo;
+	il->ctx.ac_to_queue = il4965_bss_ac_to_queue;
+	il->ctx.exclusive_interface_modes = BIT(NL80211_IFTYPE_ADHOC);
+	il->ctx.interface_modes = BIT(NL80211_IFTYPE_STATION);
+	il->ctx.ap_devtype = RXON_DEV_TYPE_AP;
+	il->ctx.ibss_devtype = RXON_DEV_TYPE_IBSS;
+	il->ctx.station_devtype = RXON_DEV_TYPE_ESS;
+	il->ctx.unused_devtype = RXON_DEV_TYPE_ESS;
+
+	SET_IEEE80211_DEV(hw, &pdev->dev);
+
+	D_INFO("*** LOAD DRIVER ***\n");
+	il->cfg = cfg;
+	il->pci_dev = pdev;
+	il->inta_mask = CSR_INI_SET_MASK;
+
+	if (il_alloc_traffic_mem(il))
+		IL_ERR("Not enough memory to generate traffic log\n");
+
+	/**************************
+	 * 2. Initializing PCI bus
+	 **************************/
+	pci_disable_link_state(pdev,
+			       PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
+			       PCIE_LINK_STATE_CLKPM);
+
+	if (pci_enable_device(pdev)) {
+		err = -ENODEV;
+		goto out_ieee80211_free_hw;
+	}
+
+	pci_set_master(pdev);
+
+	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
+	if (!err)
+		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
+	if (err) {
+		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+		if (!err)
+			err =
+			    pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+		/* both attempts failed: */
+		if (err) {
+			IL_WARN("No suitable DMA available.\n");
+			goto out_pci_disable_device;
+		}
+	}
+
+	err = pci_request_regions(pdev, DRV_NAME);
+	if (err)
+		goto out_pci_disable_device;
+
+	pci_set_drvdata(pdev, il);
+
+	/***********************
+	 * 3. Read REV register
+	 ***********************/
+	il->hw_base = pci_iomap(pdev, 0, 0);
+	if (!il->hw_base) {
+		err = -ENODEV;
+		goto out_pci_release_regions;
+	}
+
+	D_INFO("pci_resource_len = 0x%08llx\n",
+	       (unsigned long long)pci_resource_len(pdev, 0));
+	D_INFO("pci_resource_base = %p\n", il->hw_base);
+
+	/* these spin locks will be used in apm_ops.init and EEPROM access
+	 * we should init now
+	 */
+	spin_lock_init(&il->reg_lock);
+	spin_lock_init(&il->lock);
+
+	/*
+	 * stop and reset the on-board processor just in case it is in a
+	 * strange state ... like being left stranded by a primary kernel
+	 * and this is now the kdump kernel trying to start up
+	 */
+	_il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
+
+	il4965_hw_detect(il);
+	IL_INFO("Detected %s, REV=0x%X\n", il->cfg->name, il->hw_rev);
+
+	/* We disable the RETRY_TIMEOUT register (0x41) to keep
+	 * PCI Tx retries from interfering with C3 CPU state */
+	pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
+
+	il4965_prepare_card_hw(il);
+	if (!il->hw_ready) {
+		IL_WARN("Failed, HW not ready\n");
+		goto out_iounmap;
+	}
+
+	/*****************
+	 * 4. Read EEPROM
+	 *****************/
+	/* Read the EEPROM */
+	err = il_eeprom_init(il);
+	if (err) {
+		IL_ERR("Unable to init EEPROM\n");
+		goto out_iounmap;
+	}
+	err = il4965_eeprom_check_version(il);
+	if (err)
+		goto out_free_eeprom;
+
+	if (err)
+		goto out_free_eeprom;
+
+	/* extract MAC Address */
+	il4965_eeprom_get_mac(il, il->addresses[0].addr);
+	D_INFO("MAC address: %pM\n", il->addresses[0].addr);
+	il->hw->wiphy->addresses = il->addresses;
+	il->hw->wiphy->n_addresses = 1;
+
+	/************************
+	 * 5. Setup HW constants
+	 ************************/
+	if (il4965_set_hw_params(il)) {
+		IL_ERR("failed to set hw parameters\n");
+		goto out_free_eeprom;
+	}
+
+	/*******************
+	 * 6. Setup il
+	 *******************/
+
+	err = il4965_init_drv(il);
+	if (err)
+		goto out_free_eeprom;
+	/* At this point both hw and il are initialized. */
+
+	/********************
+	 * 7. Setup services
+	 ********************/
+	spin_lock_irqsave(&il->lock, flags);
+	il_disable_interrupts(il);
+	spin_unlock_irqrestore(&il->lock, flags);
+
+	pci_enable_msi(il->pci_dev);
+
+	err = request_irq(il->pci_dev->irq, il_isr, IRQF_SHARED, DRV_NAME, il);
+	if (err) {
+		IL_ERR("Error allocating IRQ %d\n", il->pci_dev->irq);
+		goto out_disable_msi;
+	}
+
+	il4965_setup_deferred_work(il);
+	il4965_setup_handlers(il);
+
+	/*********************************************
+	 * 8. Enable interrupts and read RFKILL state
+	 *********************************************/
+
+	/* enable rfkill interrupt: hw bug w/a */
+	pci_read_config_word(il->pci_dev, PCI_COMMAND, &pci_cmd);
+	if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
+		pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
+		pci_write_config_word(il->pci_dev, PCI_COMMAND, pci_cmd);
+	}
+
+	il_enable_rfkill_int(il);
+
+	/* If platform's RF_KILL switch is NOT set to KILL */
+	if (_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
+		clear_bit(S_RF_KILL_HW, &il->status);
+	else
+		set_bit(S_RF_KILL_HW, &il->status);
+
+	wiphy_rfkill_set_hw_state(il->hw->wiphy,
+				  test_bit(S_RF_KILL_HW, &il->status));
+
+	il_power_initialize(il);
+
+	init_completion(&il->_4965.firmware_loading_complete);
+
+	err = il4965_request_firmware(il, true);
+	if (err)
+		goto out_destroy_workqueue;
+
+	return 0;
+
+out_destroy_workqueue:
+	destroy_workqueue(il->workqueue);
+	il->workqueue = NULL;
+	free_irq(il->pci_dev->irq, il);
+out_disable_msi:
+	pci_disable_msi(il->pci_dev);
+	il4965_uninit_drv(il);
+out_free_eeprom:
+	il_eeprom_free(il);
+out_iounmap:
+	pci_iounmap(pdev, il->hw_base);
+out_pci_release_regions:
+	pci_set_drvdata(pdev, NULL);
+	pci_release_regions(pdev);
+out_pci_disable_device:
+	pci_disable_device(pdev);
+out_ieee80211_free_hw:
+	il_free_traffic_mem(il);
+	ieee80211_free_hw(il->hw);
+out:
+	return err;
+}
+
+static void __devexit
+il4965_pci_remove(struct pci_dev *pdev)
+{
+	struct il_priv *il = pci_get_drvdata(pdev);
+	unsigned long flags;
+
+	if (!il)
+		return;
+
+	wait_for_completion(&il->_4965.firmware_loading_complete);
+
+	D_INFO("*** UNLOAD DRIVER ***\n");
+
+	il_dbgfs_unregister(il);
+	sysfs_remove_group(&pdev->dev.kobj, &il_attribute_group);
+
+	/* ieee80211_unregister_hw call wil cause il_mac_stop to
+	 * to be called and il4965_down since we are removing the device
+	 * we need to set S_EXIT_PENDING bit.
+	 */
+	set_bit(S_EXIT_PENDING, &il->status);
+
+	il_leds_exit(il);
+
+	if (il->mac80211_registered) {
+		ieee80211_unregister_hw(il->hw);
+		il->mac80211_registered = 0;
+	} else {
+		il4965_down(il);
+	}
+
+	/*
+	 * Make sure device is reset to low power before unloading driver.
+	 * This may be redundant with il4965_down(), but there are paths to
+	 * run il4965_down() without calling apm_ops.stop(), and there are
+	 * paths to avoid running il4965_down() at all before leaving driver.
+	 * This (inexpensive) call *makes sure* device is reset.
+	 */
+	il_apm_stop(il);
+
+	/* make sure we flush any pending irq or
+	 * tasklet for the driver
+	 */
+	spin_lock_irqsave(&il->lock, flags);
+	il_disable_interrupts(il);
+	spin_unlock_irqrestore(&il->lock, flags);
+
+	il4965_synchronize_irq(il);
+
+	il4965_dealloc_ucode_pci(il);
+
+	if (il->rxq.bd)
+		il4965_rx_queue_free(il, &il->rxq);
+	il4965_hw_txq_ctx_free(il);
+
+	il_eeprom_free(il);
+
+	/*netif_stop_queue(dev); */
+	flush_workqueue(il->workqueue);
+
+	/* ieee80211_unregister_hw calls il_mac_stop, which flushes
+	 * il->workqueue... so we can't take down the workqueue
+	 * until now... */
+	destroy_workqueue(il->workqueue);
+	il->workqueue = NULL;
+	il_free_traffic_mem(il);
+
+	free_irq(il->pci_dev->irq, il);
+	pci_disable_msi(il->pci_dev);
+	pci_iounmap(pdev, il->hw_base);
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+	pci_set_drvdata(pdev, NULL);
+
+	il4965_uninit_drv(il);
+
+	dev_kfree_skb(il->beacon_skb);
+
+	ieee80211_free_hw(il->hw);
+}
+
+/*
+ * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
+ * must be called under il->lock and mac access
+ */
+void
+il4965_txq_set_sched(struct il_priv *il, u32 mask)
+{
+	il_wr_prph(il, IL49_SCD_TXFACT, mask);
+}
+
+/*****************************************************************************
+ *
+ * driver and module entry point
+ *
+ *****************************************************************************/
+
+/* Hardware specific file defines the PCI IDs table for that hardware module */
+static DEFINE_PCI_DEVICE_TABLE(il4965_hw_card_ids) = {
+	{IL_PCI_DEVICE(0x4229, PCI_ANY_ID, il4965_cfg)},
+	{IL_PCI_DEVICE(0x4230, PCI_ANY_ID, il4965_cfg)},
+	{0}
+};
+MODULE_DEVICE_TABLE(pci, il4965_hw_card_ids);
+
+static struct pci_driver il4965_driver = {
+	.name = DRV_NAME,
+	.id_table = il4965_hw_card_ids,
+	.probe = il4965_pci_probe,
+	.remove = __devexit_p(il4965_pci_remove),
+	.driver.pm = IL_LEGACY_PM_OPS,
+};
+
+static int __init
+il4965_init(void)
+{
+
+	int ret;
+	pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
+	pr_info(DRV_COPYRIGHT "\n");
+
+	ret = il4965_rate_control_register();
+	if (ret) {
+		pr_err("Unable to register rate control algorithm: %d\n", ret);
+		return ret;
+	}
+
+	ret = pci_register_driver(&il4965_driver);
+	if (ret) {
+		pr_err("Unable to initialize PCI module\n");
+		goto error_register;
+	}
+
+	return ret;
+
+error_register:
+	il4965_rate_control_unregister();
+	return ret;
+}
+
+static void __exit
+il4965_exit(void)
+{
+	pci_unregister_driver(&il4965_driver);
+	il4965_rate_control_unregister();
+}
+
+module_exit(il4965_exit);
+module_init(il4965_init);
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+module_param_named(debug, il_debug_level, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "debug output mask");
+#endif
+
+module_param_named(swcrypto, il4965_mod_params.sw_crypto, int, S_IRUGO);
+MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
+module_param_named(queues_num, il4965_mod_params.num_of_queues, int, S_IRUGO);
+MODULE_PARM_DESC(queues_num, "number of hw queues.");
+module_param_named(11n_disable, il4965_mod_params.disable_11n, int, S_IRUGO);
+MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
+module_param_named(amsdu_size_8K, il4965_mod_params.amsdu_size_8K, int,
+		   S_IRUGO);
+MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
+module_param_named(fw_restart, il4965_mod_params.restart_fw, int, S_IRUGO);
+MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
diff --git a/drivers/net/wireless/iwlegacy/4965-rs.c b/drivers/net/wireless/iwlegacy/4965-rs.c
new file mode 100644
index 0000000..467d0cb
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/4965-rs.c
@@ -0,0 +1,2860 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <net/mac80211.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+
+#include <linux/workqueue.h>
+
+#include "common.h"
+#include "4965.h"
+
+#define IL4965_RS_NAME "iwl-4965-rs"
+
+#define NUM_TRY_BEFORE_ANT_TOGGLE 1
+#define IL_NUMBER_TRY      1
+#define IL_HT_NUMBER_TRY   3
+
+#define RATE_MAX_WINDOW		62	/* # tx in history win */
+#define RATE_MIN_FAILURE_TH		6	/* min failures to calc tpt */
+#define RATE_MIN_SUCCESS_TH		8	/* min successes to calc tpt */
+
+/* max allowed rate miss before sync LQ cmd */
+#define IL_MISSED_RATE_MAX		15
+/* max time to accum history 2 seconds */
+#define RATE_SCALE_FLUSH_INTVL   (3*HZ)
+
+static u8 rs_ht_to_legacy[] = {
+	RATE_6M_IDX, RATE_6M_IDX,
+	RATE_6M_IDX, RATE_6M_IDX,
+	RATE_6M_IDX,
+	RATE_6M_IDX, RATE_9M_IDX,
+	RATE_12M_IDX, RATE_18M_IDX,
+	RATE_24M_IDX, RATE_36M_IDX,
+	RATE_48M_IDX, RATE_54M_IDX
+};
+
+static const u8 ant_toggle_lookup[] = {
+	/*ANT_NONE -> */ ANT_NONE,
+	/*ANT_A    -> */ ANT_B,
+	/*ANT_B    -> */ ANT_C,
+	/*ANT_AB   -> */ ANT_BC,
+	/*ANT_C    -> */ ANT_A,
+	/*ANT_AC   -> */ ANT_AB,
+	/*ANT_BC   -> */ ANT_AC,
+	/*ANT_ABC  -> */ ANT_ABC,
+};
+
+#define IL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np)    \
+	[RATE_##r##M_IDX] = { RATE_##r##M_PLCP,      \
+				    RATE_SISO_##s##M_PLCP, \
+				    RATE_MIMO2_##s##M_PLCP,\
+				    RATE_##r##M_IEEE,      \
+				    RATE_##ip##M_IDX,    \
+				    RATE_##in##M_IDX,    \
+				    RATE_##rp##M_IDX,    \
+				    RATE_##rn##M_IDX,    \
+				    RATE_##pp##M_IDX,    \
+				    RATE_##np##M_IDX }
+
+/*
+ * Parameter order:
+ *   rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
+ *
+ * If there isn't a valid next or previous rate then INV is used which
+ * maps to RATE_INVALID
+ *
+ */
+const struct il_rate_info il_rates[RATE_COUNT] = {
+	IL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2),	/*  1mbps */
+	IL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5),		/*  2mbps */
+	IL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11),	/*5.5mbps */
+	IL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18),	/* 11mbps */
+	IL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11),		/*  6mbps */
+	IL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11),	/*  9mbps */
+	IL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18),	/* 12mbps */
+	IL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24),	/* 18mbps */
+	IL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36),	/* 24mbps */
+	IL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48),	/* 36mbps */
+	IL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54),	/* 48mbps */
+	IL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */
+	IL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
+};
+
+static int
+il4965_hwrate_to_plcp_idx(u32 rate_n_flags)
+{
+	int idx = 0;
+
+	/* HT rate format */
+	if (rate_n_flags & RATE_MCS_HT_MSK) {
+		idx = (rate_n_flags & 0xff);
+
+		if (idx >= RATE_MIMO2_6M_PLCP)
+			idx = idx - RATE_MIMO2_6M_PLCP;
+
+		idx += IL_FIRST_OFDM_RATE;
+		/* skip 9M not supported in ht */
+		if (idx >= RATE_9M_IDX)
+			idx += 1;
+		if (idx >= IL_FIRST_OFDM_RATE && idx <= IL_LAST_OFDM_RATE)
+			return idx;
+
+		/* legacy rate format, search for match in table */
+	} else {
+		for (idx = 0; idx < ARRAY_SIZE(il_rates); idx++)
+			if (il_rates[idx].plcp == (rate_n_flags & 0xFF))
+				return idx;
+	}
+
+	return -1;
+}
+
+static void il4965_rs_rate_scale_perform(struct il_priv *il,
+					 struct sk_buff *skb,
+					 struct ieee80211_sta *sta,
+					 struct il_lq_sta *lq_sta);
+static void il4965_rs_fill_link_cmd(struct il_priv *il,
+				    struct il_lq_sta *lq_sta, u32 rate_n_flags);
+static void il4965_rs_stay_in_table(struct il_lq_sta *lq_sta,
+				    bool force_search);
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+static void il4965_rs_dbgfs_set_mcs(struct il_lq_sta *lq_sta,
+				    u32 *rate_n_flags, int idx);
+#else
+static void
+il4965_rs_dbgfs_set_mcs(struct il_lq_sta *lq_sta, u32 * rate_n_flags, int idx)
+{
+}
+#endif
+
+/**
+ * The following tables contain the expected throughput metrics for all rates
+ *
+ *	1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits
+ *
+ * where invalid entries are zeros.
+ *
+ * CCK rates are only valid in legacy table and will only be used in G
+ * (2.4 GHz) band.
+ */
+
+static s32 expected_tpt_legacy[RATE_COUNT] = {
+	7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0
+};
+
+static s32 expected_tpt_siso20MHz[4][RATE_COUNT] = {
+	{0, 0, 0, 0, 42, 0, 76, 102, 124, 158, 183, 193, 202},	/* Norm */
+	{0, 0, 0, 0, 46, 0, 82, 110, 132, 167, 192, 202, 210},	/* SGI */
+	{0, 0, 0, 0, 48, 0, 93, 135, 176, 251, 319, 351, 381},	/* AGG */
+	{0, 0, 0, 0, 53, 0, 102, 149, 193, 275, 348, 381, 413},	/* AGG+SGI */
+};
+
+static s32 expected_tpt_siso40MHz[4][RATE_COUNT] = {
+	{0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257},	/* Norm */
+	{0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264},	/* SGI */
+	{0, 0, 0, 0, 96, 0, 182, 259, 328, 451, 553, 598, 640},	/* AGG */
+	{0, 0, 0, 0, 106, 0, 199, 282, 357, 487, 593, 640, 683},	/* AGG+SGI */
+};
+
+static s32 expected_tpt_mimo2_20MHz[4][RATE_COUNT] = {
+	{0, 0, 0, 0, 74, 0, 123, 155, 179, 213, 235, 243, 250},	/* Norm */
+	{0, 0, 0, 0, 81, 0, 131, 164, 187, 221, 242, 250, 256},	/* SGI */
+	{0, 0, 0, 0, 92, 0, 175, 250, 317, 436, 534, 578, 619},	/* AGG */
+	{0, 0, 0, 0, 102, 0, 192, 273, 344, 470, 573, 619, 660},	/* AGG+SGI */
+};
+
+static s32 expected_tpt_mimo2_40MHz[4][RATE_COUNT] = {
+	{0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289},	/* Norm */
+	{0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293},	/* SGI */
+	{0, 0, 0, 0, 180, 0, 327, 446, 545, 708, 828, 878, 922},	/* AGG */
+	{0, 0, 0, 0, 197, 0, 355, 481, 584, 752, 872, 922, 966},	/* AGG+SGI */
+};
+
+/* mbps, mcs */
+static const struct il_rate_mcs_info il_rate_mcs[RATE_COUNT] = {
+	{"1", "BPSK DSSS"},
+	{"2", "QPSK DSSS"},
+	{"5.5", "BPSK CCK"},
+	{"11", "QPSK CCK"},
+	{"6", "BPSK 1/2"},
+	{"9", "BPSK 1/2"},
+	{"12", "QPSK 1/2"},
+	{"18", "QPSK 3/4"},
+	{"24", "16QAM 1/2"},
+	{"36", "16QAM 3/4"},
+	{"48", "64QAM 2/3"},
+	{"54", "64QAM 3/4"},
+	{"60", "64QAM 5/6"},
+};
+
+#define MCS_IDX_PER_STREAM	(8)
+
+static inline u8
+il4965_rs_extract_rate(u32 rate_n_flags)
+{
+	return (u8) (rate_n_flags & 0xFF);
+}
+
+static void
+il4965_rs_rate_scale_clear_win(struct il_rate_scale_data *win)
+{
+	win->data = 0;
+	win->success_counter = 0;
+	win->success_ratio = IL_INVALID_VALUE;
+	win->counter = 0;
+	win->average_tpt = IL_INVALID_VALUE;
+	win->stamp = 0;
+}
+
+static inline u8
+il4965_rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
+{
+	return (ant_type & valid_antenna) == ant_type;
+}
+
+/*
+ *	removes the old data from the stats. All data that is older than
+ *	TID_MAX_TIME_DIFF, will be deleted.
+ */
+static void
+il4965_rs_tl_rm_old_stats(struct il_traffic_load *tl, u32 curr_time)
+{
+	/* The oldest age we want to keep */
+	u32 oldest_time = curr_time - TID_MAX_TIME_DIFF;
+
+	while (tl->queue_count && tl->time_stamp < oldest_time) {
+		tl->total -= tl->packet_count[tl->head];
+		tl->packet_count[tl->head] = 0;
+		tl->time_stamp += TID_QUEUE_CELL_SPACING;
+		tl->queue_count--;
+		tl->head++;
+		if (tl->head >= TID_QUEUE_MAX_SIZE)
+			tl->head = 0;
+	}
+}
+
+/*
+ *	increment traffic load value for tid and also remove
+ *	any old values if passed the certain time period
+ */
+static u8
+il4965_rs_tl_add_packet(struct il_lq_sta *lq_data, struct ieee80211_hdr *hdr)
+{
+	u32 curr_time = jiffies_to_msecs(jiffies);
+	u32 time_diff;
+	s32 idx;
+	struct il_traffic_load *tl = NULL;
+	u8 tid;
+
+	if (ieee80211_is_data_qos(hdr->frame_control)) {
+		u8 *qc = ieee80211_get_qos_ctl(hdr);
+		tid = qc[0] & 0xf;
+	} else
+		return MAX_TID_COUNT;
+
+	if (unlikely(tid >= TID_MAX_LOAD_COUNT))
+		return MAX_TID_COUNT;
+
+	tl = &lq_data->load[tid];
+
+	curr_time -= curr_time % TID_ROUND_VALUE;
+
+	/* Happens only for the first packet. Initialize the data */
+	if (!(tl->queue_count)) {
+		tl->total = 1;
+		tl->time_stamp = curr_time;
+		tl->queue_count = 1;
+		tl->head = 0;
+		tl->packet_count[0] = 1;
+		return MAX_TID_COUNT;
+	}
+
+	time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
+	idx = time_diff / TID_QUEUE_CELL_SPACING;
+
+	/* The history is too long: remove data that is older than */
+	/* TID_MAX_TIME_DIFF */
+	if (idx >= TID_QUEUE_MAX_SIZE)
+		il4965_rs_tl_rm_old_stats(tl, curr_time);
+
+	idx = (tl->head + idx) % TID_QUEUE_MAX_SIZE;
+	tl->packet_count[idx] = tl->packet_count[idx] + 1;
+	tl->total = tl->total + 1;
+
+	if ((idx + 1) > tl->queue_count)
+		tl->queue_count = idx + 1;
+
+	return tid;
+}
+
+/*
+	get the traffic load value for tid
+*/
+static u32
+il4965_rs_tl_get_load(struct il_lq_sta *lq_data, u8 tid)
+{
+	u32 curr_time = jiffies_to_msecs(jiffies);
+	u32 time_diff;
+	s32 idx;
+	struct il_traffic_load *tl = NULL;
+
+	if (tid >= TID_MAX_LOAD_COUNT)
+		return 0;
+
+	tl = &(lq_data->load[tid]);
+
+	curr_time -= curr_time % TID_ROUND_VALUE;
+
+	if (!(tl->queue_count))
+		return 0;
+
+	time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
+	idx = time_diff / TID_QUEUE_CELL_SPACING;
+
+	/* The history is too long: remove data that is older than */
+	/* TID_MAX_TIME_DIFF */
+	if (idx >= TID_QUEUE_MAX_SIZE)
+		il4965_rs_tl_rm_old_stats(tl, curr_time);
+
+	return tl->total;
+}
+
+static int
+il4965_rs_tl_turn_on_agg_for_tid(struct il_priv *il, struct il_lq_sta *lq_data,
+				 u8 tid, struct ieee80211_sta *sta)
+{
+	int ret = -EAGAIN;
+	u32 load;
+
+	load = il4965_rs_tl_get_load(lq_data, tid);
+
+	if (load > IL_AGG_LOAD_THRESHOLD) {
+		D_HT("Starting Tx agg: STA: %pM tid: %d\n", sta->addr, tid);
+		ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
+		if (ret == -EAGAIN) {
+			/*
+			 * driver and mac80211 is out of sync
+			 * this might be cause by reloading firmware
+			 * stop the tx ba session here
+			 */
+			IL_ERR("Fail start Tx agg on tid: %d\n", tid);
+			ieee80211_stop_tx_ba_session(sta, tid);
+		}
+	} else
+		D_HT("Aggregation not enabled for tid %d because load = %u\n",
+		     tid, load);
+
+	return ret;
+}
+
+static void
+il4965_rs_tl_turn_on_agg(struct il_priv *il, u8 tid, struct il_lq_sta *lq_data,
+			 struct ieee80211_sta *sta)
+{
+	if (tid < TID_MAX_LOAD_COUNT)
+		il4965_rs_tl_turn_on_agg_for_tid(il, lq_data, tid, sta);
+	else
+		IL_ERR("tid exceeds max load count: %d/%d\n", tid,
+		       TID_MAX_LOAD_COUNT);
+}
+
+static inline int
+il4965_get_il4965_num_of_ant_from_rate(u32 rate_n_flags)
+{
+	return !!(rate_n_flags & RATE_MCS_ANT_A_MSK) +
+	    !!(rate_n_flags & RATE_MCS_ANT_B_MSK) +
+	    !!(rate_n_flags & RATE_MCS_ANT_C_MSK);
+}
+
+/*
+ * Static function to get the expected throughput from an il_scale_tbl_info
+ * that wraps a NULL pointer check
+ */
+static s32
+il4965_get_expected_tpt(struct il_scale_tbl_info *tbl, int rs_idx)
+{
+	if (tbl->expected_tpt)
+		return tbl->expected_tpt[rs_idx];
+	return 0;
+}
+
+/**
+ * il4965_rs_collect_tx_data - Update the success/failure sliding win
+ *
+ * We keep a sliding win of the last 62 packets transmitted
+ * at this rate.  win->data contains the bitmask of successful
+ * packets.
+ */
+static int
+il4965_rs_collect_tx_data(struct il_scale_tbl_info *tbl, int scale_idx,
+			  int attempts, int successes)
+{
+	struct il_rate_scale_data *win = NULL;
+	static const u64 mask = (((u64) 1) << (RATE_MAX_WINDOW - 1));
+	s32 fail_count, tpt;
+
+	if (scale_idx < 0 || scale_idx >= RATE_COUNT)
+		return -EINVAL;
+
+	/* Select win for current tx bit rate */
+	win = &(tbl->win[scale_idx]);
+
+	/* Get expected throughput */
+	tpt = il4965_get_expected_tpt(tbl, scale_idx);
+
+	/*
+	 * Keep track of only the latest 62 tx frame attempts in this rate's
+	 * history win; anything older isn't really relevant any more.
+	 * If we have filled up the sliding win, drop the oldest attempt;
+	 * if the oldest attempt (highest bit in bitmap) shows "success",
+	 * subtract "1" from the success counter (this is the main reason
+	 * we keep these bitmaps!).
+	 */
+	while (attempts > 0) {
+		if (win->counter >= RATE_MAX_WINDOW) {
+
+			/* remove earliest */
+			win->counter = RATE_MAX_WINDOW - 1;
+
+			if (win->data & mask) {
+				win->data &= ~mask;
+				win->success_counter--;
+			}
+		}
+
+		/* Increment frames-attempted counter */
+		win->counter++;
+
+		/* Shift bitmap by one frame to throw away oldest history */
+		win->data <<= 1;
+
+		/* Mark the most recent #successes attempts as successful */
+		if (successes > 0) {
+			win->success_counter++;
+			win->data |= 0x1;
+			successes--;
+		}
+
+		attempts--;
+	}
+
+	/* Calculate current success ratio, avoid divide-by-0! */
+	if (win->counter > 0)
+		win->success_ratio =
+		    128 * (100 * win->success_counter) / win->counter;
+	else
+		win->success_ratio = IL_INVALID_VALUE;
+
+	fail_count = win->counter - win->success_counter;
+
+	/* Calculate average throughput, if we have enough history. */
+	if (fail_count >= RATE_MIN_FAILURE_TH ||
+	    win->success_counter >= RATE_MIN_SUCCESS_TH)
+		win->average_tpt = (win->success_ratio * tpt + 64) / 128;
+	else
+		win->average_tpt = IL_INVALID_VALUE;
+
+	/* Tag this win as having been updated */
+	win->stamp = jiffies;
+
+	return 0;
+}
+
+/*
+ * Fill uCode API rate_n_flags field, based on "search" or "active" table.
+ */
+static u32
+il4965_rate_n_flags_from_tbl(struct il_priv *il, struct il_scale_tbl_info *tbl,
+			     int idx, u8 use_green)
+{
+	u32 rate_n_flags = 0;
+
+	if (is_legacy(tbl->lq_type)) {
+		rate_n_flags = il_rates[idx].plcp;
+		if (idx >= IL_FIRST_CCK_RATE && idx <= IL_LAST_CCK_RATE)
+			rate_n_flags |= RATE_MCS_CCK_MSK;
+
+	} else if (is_Ht(tbl->lq_type)) {
+		if (idx > IL_LAST_OFDM_RATE) {
+			IL_ERR("Invalid HT rate idx %d\n", idx);
+			idx = IL_LAST_OFDM_RATE;
+		}
+		rate_n_flags = RATE_MCS_HT_MSK;
+
+		if (is_siso(tbl->lq_type))
+			rate_n_flags |= il_rates[idx].plcp_siso;
+		else
+			rate_n_flags |= il_rates[idx].plcp_mimo2;
+	} else {
+		IL_ERR("Invalid tbl->lq_type %d\n", tbl->lq_type);
+	}
+
+	rate_n_flags |=
+	    ((tbl->ant_type << RATE_MCS_ANT_POS) & RATE_MCS_ANT_ABC_MSK);
+
+	if (is_Ht(tbl->lq_type)) {
+		if (tbl->is_ht40) {
+			if (tbl->is_dup)
+				rate_n_flags |= RATE_MCS_DUP_MSK;
+			else
+				rate_n_flags |= RATE_MCS_HT40_MSK;
+		}
+		if (tbl->is_SGI)
+			rate_n_flags |= RATE_MCS_SGI_MSK;
+
+		if (use_green) {
+			rate_n_flags |= RATE_MCS_GF_MSK;
+			if (is_siso(tbl->lq_type) && tbl->is_SGI) {
+				rate_n_flags &= ~RATE_MCS_SGI_MSK;
+				IL_ERR("GF was set with SGI:SISO\n");
+			}
+		}
+	}
+	return rate_n_flags;
+}
+
+/*
+ * Interpret uCode API's rate_n_flags format,
+ * fill "search" or "active" tx mode table.
+ */
+static int
+il4965_rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
+				enum ieee80211_band band,
+				struct il_scale_tbl_info *tbl, int *rate_idx)
+{
+	u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK);
+	u8 il4965_num_of_ant =
+	    il4965_get_il4965_num_of_ant_from_rate(rate_n_flags);
+	u8 mcs;
+
+	memset(tbl, 0, sizeof(struct il_scale_tbl_info));
+	*rate_idx = il4965_hwrate_to_plcp_idx(rate_n_flags);
+
+	if (*rate_idx == RATE_INVALID) {
+		*rate_idx = -1;
+		return -EINVAL;
+	}
+	tbl->is_SGI = 0;	/* default legacy setup */
+	tbl->is_ht40 = 0;
+	tbl->is_dup = 0;
+	tbl->ant_type = (ant_msk >> RATE_MCS_ANT_POS);
+	tbl->lq_type = LQ_NONE;
+	tbl->max_search = IL_MAX_SEARCH;
+
+	/* legacy rate format */
+	if (!(rate_n_flags & RATE_MCS_HT_MSK)) {
+		if (il4965_num_of_ant == 1) {
+			if (band == IEEE80211_BAND_5GHZ)
+				tbl->lq_type = LQ_A;
+			else
+				tbl->lq_type = LQ_G;
+		}
+		/* HT rate format */
+	} else {
+		if (rate_n_flags & RATE_MCS_SGI_MSK)
+			tbl->is_SGI = 1;
+
+		if ((rate_n_flags & RATE_MCS_HT40_MSK) ||
+		    (rate_n_flags & RATE_MCS_DUP_MSK))
+			tbl->is_ht40 = 1;
+
+		if (rate_n_flags & RATE_MCS_DUP_MSK)
+			tbl->is_dup = 1;
+
+		mcs = il4965_rs_extract_rate(rate_n_flags);
+
+		/* SISO */
+		if (mcs <= RATE_SISO_60M_PLCP) {
+			if (il4965_num_of_ant == 1)
+				tbl->lq_type = LQ_SISO;	/*else NONE */
+			/* MIMO2 */
+		} else {
+			if (il4965_num_of_ant == 2)
+				tbl->lq_type = LQ_MIMO2;
+		}
+	}
+	return 0;
+}
+
+/* switch to another antenna/antennas and return 1 */
+/* if no other valid antenna found, return 0 */
+static int
+il4965_rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
+			 struct il_scale_tbl_info *tbl)
+{
+	u8 new_ant_type;
+
+	if (!tbl->ant_type || tbl->ant_type > ANT_ABC)
+		return 0;
+
+	if (!il4965_rs_is_valid_ant(valid_ant, tbl->ant_type))
+		return 0;
+
+	new_ant_type = ant_toggle_lookup[tbl->ant_type];
+
+	while (new_ant_type != tbl->ant_type &&
+	       !il4965_rs_is_valid_ant(valid_ant, new_ant_type))
+		new_ant_type = ant_toggle_lookup[new_ant_type];
+
+	if (new_ant_type == tbl->ant_type)
+		return 0;
+
+	tbl->ant_type = new_ant_type;
+	*rate_n_flags &= ~RATE_MCS_ANT_ABC_MSK;
+	*rate_n_flags |= new_ant_type << RATE_MCS_ANT_POS;
+	return 1;
+}
+
+/**
+ * Green-field mode is valid if the station supports it and
+ * there are no non-GF stations present in the BSS.
+ */
+static bool
+il4965_rs_use_green(struct ieee80211_sta *sta)
+{
+	struct il_station_priv *sta_priv = (void *)sta->drv_priv;
+	struct il_rxon_context *ctx = sta_priv->common.ctx;
+
+	return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
+	    !(ctx->ht.non_gf_sta_present);
+}
+
+/**
+ * il4965_rs_get_supported_rates - get the available rates
+ *
+ * if management frame or broadcast frame only return
+ * basic available rates.
+ *
+ */
+static u16
+il4965_rs_get_supported_rates(struct il_lq_sta *lq_sta,
+			      struct ieee80211_hdr *hdr,
+			      enum il_table_type rate_type)
+{
+	if (is_legacy(rate_type)) {
+		return lq_sta->active_legacy_rate;
+	} else {
+		if (is_siso(rate_type))
+			return lq_sta->active_siso_rate;
+		else
+			return lq_sta->active_mimo2_rate;
+	}
+}
+
+static u16
+il4965_rs_get_adjacent_rate(struct il_priv *il, u8 idx, u16 rate_mask,
+			    int rate_type)
+{
+	u8 high = RATE_INVALID;
+	u8 low = RATE_INVALID;
+
+	/* 802.11A or ht walks to the next literal adjacent rate in
+	 * the rate table */
+	if (is_a_band(rate_type) || !is_legacy(rate_type)) {
+		int i;
+		u32 mask;
+
+		/* Find the previous rate that is in the rate mask */
+		i = idx - 1;
+		for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
+			if (rate_mask & mask) {
+				low = i;
+				break;
+			}
+		}
+
+		/* Find the next rate that is in the rate mask */
+		i = idx + 1;
+		for (mask = (1 << i); i < RATE_COUNT; i++, mask <<= 1) {
+			if (rate_mask & mask) {
+				high = i;
+				break;
+			}
+		}
+
+		return (high << 8) | low;
+	}
+
+	low = idx;
+	while (low != RATE_INVALID) {
+		low = il_rates[low].prev_rs;
+		if (low == RATE_INVALID)
+			break;
+		if (rate_mask & (1 << low))
+			break;
+		D_RATE("Skipping masked lower rate: %d\n", low);
+	}
+
+	high = idx;
+	while (high != RATE_INVALID) {
+		high = il_rates[high].next_rs;
+		if (high == RATE_INVALID)
+			break;
+		if (rate_mask & (1 << high))
+			break;
+		D_RATE("Skipping masked higher rate: %d\n", high);
+	}
+
+	return (high << 8) | low;
+}
+
+static u32
+il4965_rs_get_lower_rate(struct il_lq_sta *lq_sta,
+			 struct il_scale_tbl_info *tbl, u8 scale_idx,
+			 u8 ht_possible)
+{
+	s32 low;
+	u16 rate_mask;
+	u16 high_low;
+	u8 switch_to_legacy = 0;
+	u8 is_green = lq_sta->is_green;
+	struct il_priv *il = lq_sta->drv;
+
+	/* check if we need to switch from HT to legacy rates.
+	 * assumption is that mandatory rates (1Mbps or 6Mbps)
+	 * are always supported (spec demand) */
+	if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_idx)) {
+		switch_to_legacy = 1;
+		scale_idx = rs_ht_to_legacy[scale_idx];
+		if (lq_sta->band == IEEE80211_BAND_5GHZ)
+			tbl->lq_type = LQ_A;
+		else
+			tbl->lq_type = LQ_G;
+
+		if (il4965_num_of_ant(tbl->ant_type) > 1)
+			tbl->ant_type =
+			    il4965_first_antenna(il->hw_params.valid_tx_ant);
+
+		tbl->is_ht40 = 0;
+		tbl->is_SGI = 0;
+		tbl->max_search = IL_MAX_SEARCH;
+	}
+
+	rate_mask = il4965_rs_get_supported_rates(lq_sta, NULL, tbl->lq_type);
+
+	/* Mask with station rate restriction */
+	if (is_legacy(tbl->lq_type)) {
+		/* supp_rates has no CCK bits in A mode */
+		if (lq_sta->band == IEEE80211_BAND_5GHZ)
+			rate_mask =
+			    (u16) (rate_mask &
+				   (lq_sta->supp_rates << IL_FIRST_OFDM_RATE));
+		else
+			rate_mask = (u16) (rate_mask & lq_sta->supp_rates);
+	}
+
+	/* If we switched from HT to legacy, check current rate */
+	if (switch_to_legacy && (rate_mask & (1 << scale_idx))) {
+		low = scale_idx;
+		goto out;
+	}
+
+	high_low =
+	    il4965_rs_get_adjacent_rate(lq_sta->drv, scale_idx, rate_mask,
+					tbl->lq_type);
+	low = high_low & 0xff;
+
+	if (low == RATE_INVALID)
+		low = scale_idx;
+
+out:
+	return il4965_rate_n_flags_from_tbl(lq_sta->drv, tbl, low, is_green);
+}
+
+/*
+ * Simple function to compare two rate scale table types
+ */
+static bool
+il4965_table_type_matches(struct il_scale_tbl_info *a,
+			  struct il_scale_tbl_info *b)
+{
+	return (a->lq_type == b->lq_type && a->ant_type == b->ant_type &&
+		a->is_SGI == b->is_SGI);
+}
+
+/*
+ * mac80211 sends us Tx status
+ */
+static void
+il4965_rs_tx_status(void *il_r, struct ieee80211_supported_band *sband,
+		    struct ieee80211_sta *sta, void *il_sta,
+		    struct sk_buff *skb)
+{
+	int legacy_success;
+	int retries;
+	int rs_idx, mac_idx, i;
+	struct il_lq_sta *lq_sta = il_sta;
+	struct il_link_quality_cmd *table;
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+	struct il_priv *il = (struct il_priv *)il_r;
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	enum mac80211_rate_control_flags mac_flags;
+	u32 tx_rate;
+	struct il_scale_tbl_info tbl_type;
+	struct il_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
+	struct il_station_priv *sta_priv = (void *)sta->drv_priv;
+	struct il_rxon_context *ctx = sta_priv->common.ctx;
+
+	D_RATE("get frame ack response, update rate scale win\n");
+
+	/* Treat uninitialized rate scaling data same as non-existing. */
+	if (!lq_sta) {
+		D_RATE("Station rate scaling not created yet.\n");
+		return;
+	} else if (!lq_sta->drv) {
+		D_RATE("Rate scaling not initialized yet.\n");
+		return;
+	}
+
+	if (!ieee80211_is_data(hdr->frame_control) ||
+	    (info->flags & IEEE80211_TX_CTL_NO_ACK))
+		return;
+
+	/* This packet was aggregated but doesn't carry status info */
+	if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
+	    !(info->flags & IEEE80211_TX_STAT_AMPDU))
+		return;
+
+	/*
+	 * Ignore this Tx frame response if its initial rate doesn't match
+	 * that of latest Link Quality command.  There may be stragglers
+	 * from a previous Link Quality command, but we're no longer interested
+	 * in those; they're either from the "active" mode while we're trying
+	 * to check "search" mode, or a prior "search" mode after we've moved
+	 * to a new "search" mode (which might become the new "active" mode).
+	 */
+	table = &lq_sta->lq;
+	tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
+	il4965_rs_get_tbl_info_from_mcs(tx_rate, il->band, &tbl_type, &rs_idx);
+	if (il->band == IEEE80211_BAND_5GHZ)
+		rs_idx -= IL_FIRST_OFDM_RATE;
+	mac_flags = info->status.rates[0].flags;
+	mac_idx = info->status.rates[0].idx;
+	/* For HT packets, map MCS to PLCP */
+	if (mac_flags & IEEE80211_TX_RC_MCS) {
+		mac_idx &= RATE_MCS_CODE_MSK;	/* Remove # of streams */
+		if (mac_idx >= (RATE_9M_IDX - IL_FIRST_OFDM_RATE))
+			mac_idx++;
+		/*
+		 * mac80211 HT idx is always zero-idxed; we need to move
+		 * HT OFDM rates after CCK rates in 2.4 GHz band
+		 */
+		if (il->band == IEEE80211_BAND_2GHZ)
+			mac_idx += IL_FIRST_OFDM_RATE;
+	}
+	/* Here we actually compare this rate to the latest LQ command */
+	if (mac_idx < 0 ||
+	    tbl_type.is_SGI != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI) ||
+	    tbl_type.is_ht40 != !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ||
+	    tbl_type.is_dup != !!(mac_flags & IEEE80211_TX_RC_DUP_DATA) ||
+	    tbl_type.ant_type != info->antenna_sel_tx ||
+	    !!(tx_rate & RATE_MCS_HT_MSK) != !!(mac_flags & IEEE80211_TX_RC_MCS)
+	    || !!(tx_rate & RATE_MCS_GF_MSK) !=
+	    !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD) || rs_idx != mac_idx) {
+		D_RATE("initial rate %d does not match %d (0x%x)\n", mac_idx,
+		       rs_idx, tx_rate);
+		/*
+		 * Since rates mis-match, the last LQ command may have failed.
+		 * After IL_MISSED_RATE_MAX mis-matches, resync the uCode with
+		 * ... driver.
+		 */
+		lq_sta->missed_rate_counter++;
+		if (lq_sta->missed_rate_counter > IL_MISSED_RATE_MAX) {
+			lq_sta->missed_rate_counter = 0;
+			il_send_lq_cmd(il, ctx, &lq_sta->lq, CMD_ASYNC, false);
+		}
+		/* Regardless, ignore this status info for outdated rate */
+		return;
+	} else
+		/* Rate did match, so reset the missed_rate_counter */
+		lq_sta->missed_rate_counter = 0;
+
+	/* Figure out if rate scale algorithm is in active or search table */
+	if (il4965_table_type_matches
+	    (&tbl_type, &(lq_sta->lq_info[lq_sta->active_tbl]))) {
+		curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+		other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
+	} else
+	    if (il4965_table_type_matches
+		(&tbl_type, &lq_sta->lq_info[1 - lq_sta->active_tbl])) {
+		curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
+		other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+	} else {
+		D_RATE("Neither active nor search matches tx rate\n");
+		tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+		D_RATE("active- lq:%x, ant:%x, SGI:%d\n", tmp_tbl->lq_type,
+		       tmp_tbl->ant_type, tmp_tbl->is_SGI);
+		tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
+		D_RATE("search- lq:%x, ant:%x, SGI:%d\n", tmp_tbl->lq_type,
+		       tmp_tbl->ant_type, tmp_tbl->is_SGI);
+		D_RATE("actual- lq:%x, ant:%x, SGI:%d\n", tbl_type.lq_type,
+		       tbl_type.ant_type, tbl_type.is_SGI);
+		/*
+		 * no matching table found, let's by-pass the data collection
+		 * and continue to perform rate scale to find the rate table
+		 */
+		il4965_rs_stay_in_table(lq_sta, true);
+		goto done;
+	}
+
+	/*
+	 * Updating the frame history depends on whether packets were
+	 * aggregated.
+	 *
+	 * For aggregation, all packets were transmitted at the same rate, the
+	 * first idx into rate scale table.
+	 */
+	if (info->flags & IEEE80211_TX_STAT_AMPDU) {
+		tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
+		il4965_rs_get_tbl_info_from_mcs(tx_rate, il->band, &tbl_type,
+						&rs_idx);
+		il4965_rs_collect_tx_data(curr_tbl, rs_idx,
+					  info->status.ampdu_len,
+					  info->status.ampdu_ack_len);
+
+		/* Update success/fail counts if not searching for new mode */
+		if (lq_sta->stay_in_tbl) {
+			lq_sta->total_success += info->status.ampdu_ack_len;
+			lq_sta->total_failed +=
+			    (info->status.ampdu_len -
+			     info->status.ampdu_ack_len);
+		}
+	} else {
+		/*
+		 * For legacy, update frame history with for each Tx retry.
+		 */
+		retries = info->status.rates[0].count - 1;
+		/* HW doesn't send more than 15 retries */
+		retries = min(retries, 15);
+
+		/* The last transmission may have been successful */
+		legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK);
+		/* Collect data for each rate used during failed TX attempts */
+		for (i = 0; i <= retries; ++i) {
+			tx_rate = le32_to_cpu(table->rs_table[i].rate_n_flags);
+			il4965_rs_get_tbl_info_from_mcs(tx_rate, il->band,
+							&tbl_type, &rs_idx);
+			/*
+			 * Only collect stats if retried rate is in the same RS
+			 * table as active/search.
+			 */
+			if (il4965_table_type_matches(&tbl_type, curr_tbl))
+				tmp_tbl = curr_tbl;
+			else if (il4965_table_type_matches
+				 (&tbl_type, other_tbl))
+				tmp_tbl = other_tbl;
+			else
+				continue;
+			il4965_rs_collect_tx_data(tmp_tbl, rs_idx, 1,
+						  i <
+						  retries ? 0 : legacy_success);
+		}
+
+		/* Update success/fail counts if not searching for new mode */
+		if (lq_sta->stay_in_tbl) {
+			lq_sta->total_success += legacy_success;
+			lq_sta->total_failed += retries + (1 - legacy_success);
+		}
+	}
+	/* The last TX rate is cached in lq_sta; it's set in if/else above */
+	lq_sta->last_rate_n_flags = tx_rate;
+done:
+	/* See if there's a better rate or modulation mode to try. */
+	if (sta->supp_rates[sband->band])
+		il4965_rs_rate_scale_perform(il, skb, sta, lq_sta);
+}
+
+/*
+ * Begin a period of staying with a selected modulation mode.
+ * Set "stay_in_tbl" flag to prevent any mode switches.
+ * Set frame tx success limits according to legacy vs. high-throughput,
+ * and reset overall (spanning all rates) tx success history stats.
+ * These control how long we stay using same modulation mode before
+ * searching for a new mode.
+ */
+static void
+il4965_rs_set_stay_in_table(struct il_priv *il, u8 is_legacy,
+			    struct il_lq_sta *lq_sta)
+{
+	D_RATE("we are staying in the same table\n");
+	lq_sta->stay_in_tbl = 1;	/* only place this gets set */
+	if (is_legacy) {
+		lq_sta->table_count_limit = IL_LEGACY_TBL_COUNT;
+		lq_sta->max_failure_limit = IL_LEGACY_FAILURE_LIMIT;
+		lq_sta->max_success_limit = IL_LEGACY_SUCCESS_LIMIT;
+	} else {
+		lq_sta->table_count_limit = IL_NONE_LEGACY_TBL_COUNT;
+		lq_sta->max_failure_limit = IL_NONE_LEGACY_FAILURE_LIMIT;
+		lq_sta->max_success_limit = IL_NONE_LEGACY_SUCCESS_LIMIT;
+	}
+	lq_sta->table_count = 0;
+	lq_sta->total_failed = 0;
+	lq_sta->total_success = 0;
+	lq_sta->flush_timer = jiffies;
+	lq_sta->action_counter = 0;
+}
+
+/*
+ * Find correct throughput table for given mode of modulation
+ */
+static void
+il4965_rs_set_expected_tpt_table(struct il_lq_sta *lq_sta,
+				 struct il_scale_tbl_info *tbl)
+{
+	/* Used to choose among HT tables */
+	s32(*ht_tbl_pointer)[RATE_COUNT];
+
+	/* Check for invalid LQ type */
+	if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) {
+		tbl->expected_tpt = expected_tpt_legacy;
+		return;
+	}
+
+	/* Legacy rates have only one table */
+	if (is_legacy(tbl->lq_type)) {
+		tbl->expected_tpt = expected_tpt_legacy;
+		return;
+	}
+
+	/* Choose among many HT tables depending on number of streams
+	 * (SISO/MIMO2), channel width (20/40), SGI, and aggregation
+	 * status */
+	if (is_siso(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
+		ht_tbl_pointer = expected_tpt_siso20MHz;
+	else if (is_siso(tbl->lq_type))
+		ht_tbl_pointer = expected_tpt_siso40MHz;
+	else if (is_mimo2(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
+		ht_tbl_pointer = expected_tpt_mimo2_20MHz;
+	else			/* if (is_mimo2(tbl->lq_type)) <-- must be true */
+		ht_tbl_pointer = expected_tpt_mimo2_40MHz;
+
+	if (!tbl->is_SGI && !lq_sta->is_agg)	/* Normal */
+		tbl->expected_tpt = ht_tbl_pointer[0];
+	else if (tbl->is_SGI && !lq_sta->is_agg)	/* SGI */
+		tbl->expected_tpt = ht_tbl_pointer[1];
+	else if (!tbl->is_SGI && lq_sta->is_agg)	/* AGG */
+		tbl->expected_tpt = ht_tbl_pointer[2];
+	else			/* AGG+SGI */
+		tbl->expected_tpt = ht_tbl_pointer[3];
+}
+
+/*
+ * Find starting rate for new "search" high-throughput mode of modulation.
+ * Goal is to find lowest expected rate (under perfect conditions) that is
+ * above the current measured throughput of "active" mode, to give new mode
+ * a fair chance to prove itself without too many challenges.
+ *
+ * This gets called when transitioning to more aggressive modulation
+ * (i.e. legacy to SISO or MIMO, or SISO to MIMO), as well as less aggressive
+ * (i.e. MIMO to SISO).  When moving to MIMO, bit rate will typically need
+ * to decrease to match "active" throughput.  When moving from MIMO to SISO,
+ * bit rate will typically need to increase, but not if performance was bad.
+ */
+static s32
+il4965_rs_get_best_rate(struct il_priv *il, struct il_lq_sta *lq_sta,
+			struct il_scale_tbl_info *tbl,	/* "search" */
+			u16 rate_mask, s8 idx)
+{
+	/* "active" values */
+	struct il_scale_tbl_info *active_tbl =
+	    &(lq_sta->lq_info[lq_sta->active_tbl]);
+	s32 active_sr = active_tbl->win[idx].success_ratio;
+	s32 active_tpt = active_tbl->expected_tpt[idx];
+
+	/* expected "search" throughput */
+	s32 *tpt_tbl = tbl->expected_tpt;
+
+	s32 new_rate, high, low, start_hi;
+	u16 high_low;
+	s8 rate = idx;
+
+	new_rate = high = low = start_hi = RATE_INVALID;
+
+	for (;;) {
+		high_low =
+		    il4965_rs_get_adjacent_rate(il, rate, rate_mask,
+						tbl->lq_type);
+
+		low = high_low & 0xff;
+		high = (high_low >> 8) & 0xff;
+
+		/*
+		 * Lower the "search" bit rate, to give new "search" mode
+		 * approximately the same throughput as "active" if:
+		 *
+		 * 1) "Active" mode has been working modestly well (but not
+		 *    great), and expected "search" throughput (under perfect
+		 *    conditions) at candidate rate is above the actual
+		 *    measured "active" throughput (but less than expected
+		 *    "active" throughput under perfect conditions).
+		 * OR
+		 * 2) "Active" mode has been working perfectly or very well
+		 *    and expected "search" throughput (under perfect
+		 *    conditions) at candidate rate is above expected
+		 *    "active" throughput (under perfect conditions).
+		 */
+		if ((100 * tpt_tbl[rate] > lq_sta->last_tpt &&
+		     (active_sr > RATE_DECREASE_TH && active_sr <= RATE_HIGH_TH
+		      && tpt_tbl[rate] <= active_tpt)) ||
+		    (active_sr >= RATE_SCALE_SWITCH &&
+		     tpt_tbl[rate] > active_tpt)) {
+
+			/* (2nd or later pass)
+			 * If we've already tried to raise the rate, and are
+			 * now trying to lower it, use the higher rate. */
+			if (start_hi != RATE_INVALID) {
+				new_rate = start_hi;
+				break;
+			}
+
+			new_rate = rate;
+
+			/* Loop again with lower rate */
+			if (low != RATE_INVALID)
+				rate = low;
+
+			/* Lower rate not available, use the original */
+			else
+				break;
+
+			/* Else try to raise the "search" rate to match "active" */
+		} else {
+			/* (2nd or later pass)
+			 * If we've already tried to lower the rate, and are
+			 * now trying to raise it, use the lower rate. */
+			if (new_rate != RATE_INVALID)
+				break;
+
+			/* Loop again with higher rate */
+			else if (high != RATE_INVALID) {
+				start_hi = high;
+				rate = high;
+
+				/* Higher rate not available, use the original */
+			} else {
+				new_rate = rate;
+				break;
+			}
+		}
+	}
+
+	return new_rate;
+}
+
+/*
+ * Set up search table for MIMO2
+ */
+static int
+il4965_rs_switch_to_mimo2(struct il_priv *il, struct il_lq_sta *lq_sta,
+			  struct ieee80211_conf *conf,
+			  struct ieee80211_sta *sta,
+			  struct il_scale_tbl_info *tbl, int idx)
+{
+	u16 rate_mask;
+	s32 rate;
+	s8 is_green = lq_sta->is_green;
+	struct il_station_priv *sta_priv = (void *)sta->drv_priv;
+	struct il_rxon_context *ctx = sta_priv->common.ctx;
+
+	if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
+		return -1;
+
+	if (((sta->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 2) ==
+	    WLAN_HT_CAP_SM_PS_STATIC)
+		return -1;
+
+	/* Need both Tx chains/antennas to support MIMO */
+	if (il->hw_params.tx_chains_num < 2)
+		return -1;
+
+	D_RATE("LQ: try to switch to MIMO2\n");
+
+	tbl->lq_type = LQ_MIMO2;
+	tbl->is_dup = lq_sta->is_dup;
+	tbl->action = 0;
+	tbl->max_search = IL_MAX_SEARCH;
+	rate_mask = lq_sta->active_mimo2_rate;
+
+	if (il_is_ht40_tx_allowed(il, ctx, &sta->ht_cap))
+		tbl->is_ht40 = 1;
+	else
+		tbl->is_ht40 = 0;
+
+	il4965_rs_set_expected_tpt_table(lq_sta, tbl);
+
+	rate = il4965_rs_get_best_rate(il, lq_sta, tbl, rate_mask, idx);
+
+	D_RATE("LQ: MIMO2 best rate %d mask %X\n", rate, rate_mask);
+	if (rate == RATE_INVALID || !((1 << rate) & rate_mask)) {
+		D_RATE("Can't switch with idx %d rate mask %x\n", rate,
+		       rate_mask);
+		return -1;
+	}
+	tbl->current_rate =
+	    il4965_rate_n_flags_from_tbl(il, tbl, rate, is_green);
+
+	D_RATE("LQ: Switch to new mcs %X idx is green %X\n", tbl->current_rate,
+	       is_green);
+	return 0;
+}
+
+/*
+ * Set up search table for SISO
+ */
+static int
+il4965_rs_switch_to_siso(struct il_priv *il, struct il_lq_sta *lq_sta,
+			 struct ieee80211_conf *conf, struct ieee80211_sta *sta,
+			 struct il_scale_tbl_info *tbl, int idx)
+{
+	u16 rate_mask;
+	u8 is_green = lq_sta->is_green;
+	s32 rate;
+	struct il_station_priv *sta_priv = (void *)sta->drv_priv;
+	struct il_rxon_context *ctx = sta_priv->common.ctx;
+
+	if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
+		return -1;
+
+	D_RATE("LQ: try to switch to SISO\n");
+
+	tbl->is_dup = lq_sta->is_dup;
+	tbl->lq_type = LQ_SISO;
+	tbl->action = 0;
+	tbl->max_search = IL_MAX_SEARCH;
+	rate_mask = lq_sta->active_siso_rate;
+
+	if (il_is_ht40_tx_allowed(il, ctx, &sta->ht_cap))
+		tbl->is_ht40 = 1;
+	else
+		tbl->is_ht40 = 0;
+
+	if (is_green)
+		tbl->is_SGI = 0;	/*11n spec: no SGI in SISO+Greenfield */
+
+	il4965_rs_set_expected_tpt_table(lq_sta, tbl);
+	rate = il4965_rs_get_best_rate(il, lq_sta, tbl, rate_mask, idx);
+
+	D_RATE("LQ: get best rate %d mask %X\n", rate, rate_mask);
+	if (rate == RATE_INVALID || !((1 << rate) & rate_mask)) {
+		D_RATE("can not switch with idx %d rate mask %x\n", rate,
+		       rate_mask);
+		return -1;
+	}
+	tbl->current_rate =
+	    il4965_rate_n_flags_from_tbl(il, tbl, rate, is_green);
+	D_RATE("LQ: Switch to new mcs %X idx is green %X\n", tbl->current_rate,
+	       is_green);
+	return 0;
+}
+
+/*
+ * Try to switch to new modulation mode from legacy
+ */
+static int
+il4965_rs_move_legacy_other(struct il_priv *il, struct il_lq_sta *lq_sta,
+			    struct ieee80211_conf *conf,
+			    struct ieee80211_sta *sta, int idx)
+{
+	struct il_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+	struct il_scale_tbl_info *search_tbl =
+	    &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+	struct il_rate_scale_data *win = &(tbl->win[idx]);
+	u32 sz =
+	    (sizeof(struct il_scale_tbl_info) -
+	     (sizeof(struct il_rate_scale_data) * RATE_COUNT));
+	u8 start_action;
+	u8 valid_tx_ant = il->hw_params.valid_tx_ant;
+	u8 tx_chains_num = il->hw_params.tx_chains_num;
+	int ret = 0;
+	u8 update_search_tbl_counter = 0;
+
+	tbl->action = IL_LEGACY_SWITCH_SISO;
+
+	start_action = tbl->action;
+	for (;;) {
+		lq_sta->action_counter++;
+		switch (tbl->action) {
+		case IL_LEGACY_SWITCH_ANTENNA1:
+		case IL_LEGACY_SWITCH_ANTENNA2:
+			D_RATE("LQ: Legacy toggle Antenna\n");
+
+			if ((tbl->action == IL_LEGACY_SWITCH_ANTENNA1 &&
+			     tx_chains_num <= 1) ||
+			    (tbl->action == IL_LEGACY_SWITCH_ANTENNA2 &&
+			     tx_chains_num <= 2))
+				break;
+
+			/* Don't change antenna if success has been great */
+			if (win->success_ratio >= IL_RS_GOOD_RATIO)
+				break;
+
+			/* Set up search table to try other antenna */
+			memcpy(search_tbl, tbl, sz);
+
+			if (il4965_rs_toggle_antenna
+			    (valid_tx_ant, &search_tbl->current_rate,
+			     search_tbl)) {
+				update_search_tbl_counter = 1;
+				il4965_rs_set_expected_tpt_table(lq_sta,
+								 search_tbl);
+				goto out;
+			}
+			break;
+		case IL_LEGACY_SWITCH_SISO:
+			D_RATE("LQ: Legacy switch to SISO\n");
+
+			/* Set up search table to try SISO */
+			memcpy(search_tbl, tbl, sz);
+			search_tbl->is_SGI = 0;
+			ret =
+			    il4965_rs_switch_to_siso(il, lq_sta, conf, sta,
+						     search_tbl, idx);
+			if (!ret) {
+				lq_sta->action_counter = 0;
+				goto out;
+			}
+
+			break;
+		case IL_LEGACY_SWITCH_MIMO2_AB:
+		case IL_LEGACY_SWITCH_MIMO2_AC:
+		case IL_LEGACY_SWITCH_MIMO2_BC:
+			D_RATE("LQ: Legacy switch to MIMO2\n");
+
+			/* Set up search table to try MIMO */
+			memcpy(search_tbl, tbl, sz);
+			search_tbl->is_SGI = 0;
+
+			if (tbl->action == IL_LEGACY_SWITCH_MIMO2_AB)
+				search_tbl->ant_type = ANT_AB;
+			else if (tbl->action == IL_LEGACY_SWITCH_MIMO2_AC)
+				search_tbl->ant_type = ANT_AC;
+			else
+				search_tbl->ant_type = ANT_BC;
+
+			if (!il4965_rs_is_valid_ant
+			    (valid_tx_ant, search_tbl->ant_type))
+				break;
+
+			ret =
+			    il4965_rs_switch_to_mimo2(il, lq_sta, conf, sta,
+						      search_tbl, idx);
+			if (!ret) {
+				lq_sta->action_counter = 0;
+				goto out;
+			}
+			break;
+		}
+		tbl->action++;
+		if (tbl->action > IL_LEGACY_SWITCH_MIMO2_BC)
+			tbl->action = IL_LEGACY_SWITCH_ANTENNA1;
+
+		if (tbl->action == start_action)
+			break;
+
+	}
+	search_tbl->lq_type = LQ_NONE;
+	return 0;
+
+out:
+	lq_sta->search_better_tbl = 1;
+	tbl->action++;
+	if (tbl->action > IL_LEGACY_SWITCH_MIMO2_BC)
+		tbl->action = IL_LEGACY_SWITCH_ANTENNA1;
+	if (update_search_tbl_counter)
+		search_tbl->action = tbl->action;
+	return 0;
+
+}
+
+/*
+ * Try to switch to new modulation mode from SISO
+ */
+static int
+il4965_rs_move_siso_to_other(struct il_priv *il, struct il_lq_sta *lq_sta,
+			     struct ieee80211_conf *conf,
+			     struct ieee80211_sta *sta, int idx)
+{
+	u8 is_green = lq_sta->is_green;
+	struct il_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+	struct il_scale_tbl_info *search_tbl =
+	    &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+	struct il_rate_scale_data *win = &(tbl->win[idx]);
+	struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+	u32 sz =
+	    (sizeof(struct il_scale_tbl_info) -
+	     (sizeof(struct il_rate_scale_data) * RATE_COUNT));
+	u8 start_action;
+	u8 valid_tx_ant = il->hw_params.valid_tx_ant;
+	u8 tx_chains_num = il->hw_params.tx_chains_num;
+	u8 update_search_tbl_counter = 0;
+	int ret;
+
+	start_action = tbl->action;
+
+	for (;;) {
+		lq_sta->action_counter++;
+		switch (tbl->action) {
+		case IL_SISO_SWITCH_ANTENNA1:
+		case IL_SISO_SWITCH_ANTENNA2:
+			D_RATE("LQ: SISO toggle Antenna\n");
+			if ((tbl->action == IL_SISO_SWITCH_ANTENNA1 &&
+			     tx_chains_num <= 1) ||
+			    (tbl->action == IL_SISO_SWITCH_ANTENNA2 &&
+			     tx_chains_num <= 2))
+				break;
+
+			if (win->success_ratio >= IL_RS_GOOD_RATIO)
+				break;
+
+			memcpy(search_tbl, tbl, sz);
+			if (il4965_rs_toggle_antenna
+			    (valid_tx_ant, &search_tbl->current_rate,
+			     search_tbl)) {
+				update_search_tbl_counter = 1;
+				goto out;
+			}
+			break;
+		case IL_SISO_SWITCH_MIMO2_AB:
+		case IL_SISO_SWITCH_MIMO2_AC:
+		case IL_SISO_SWITCH_MIMO2_BC:
+			D_RATE("LQ: SISO switch to MIMO2\n");
+			memcpy(search_tbl, tbl, sz);
+			search_tbl->is_SGI = 0;
+
+			if (tbl->action == IL_SISO_SWITCH_MIMO2_AB)
+				search_tbl->ant_type = ANT_AB;
+			else if (tbl->action == IL_SISO_SWITCH_MIMO2_AC)
+				search_tbl->ant_type = ANT_AC;
+			else
+				search_tbl->ant_type = ANT_BC;
+
+			if (!il4965_rs_is_valid_ant
+			    (valid_tx_ant, search_tbl->ant_type))
+				break;
+
+			ret =
+			    il4965_rs_switch_to_mimo2(il, lq_sta, conf, sta,
+						      search_tbl, idx);
+			if (!ret)
+				goto out;
+			break;
+		case IL_SISO_SWITCH_GI:
+			if (!tbl->is_ht40 &&
+			    !(ht_cap->cap & IEEE80211_HT_CAP_SGI_20))
+				break;
+			if (tbl->is_ht40 &&
+			    !(ht_cap->cap & IEEE80211_HT_CAP_SGI_40))
+				break;
+
+			D_RATE("LQ: SISO toggle SGI/NGI\n");
+
+			memcpy(search_tbl, tbl, sz);
+			if (is_green) {
+				if (!tbl->is_SGI)
+					break;
+				else
+					IL_ERR("SGI was set in GF+SISO\n");
+			}
+			search_tbl->is_SGI = !tbl->is_SGI;
+			il4965_rs_set_expected_tpt_table(lq_sta, search_tbl);
+			if (tbl->is_SGI) {
+				s32 tpt = lq_sta->last_tpt / 100;
+				if (tpt >= search_tbl->expected_tpt[idx])
+					break;
+			}
+			search_tbl->current_rate =
+			    il4965_rate_n_flags_from_tbl(il, search_tbl, idx,
+							 is_green);
+			update_search_tbl_counter = 1;
+			goto out;
+		}
+		tbl->action++;
+		if (tbl->action > IL_SISO_SWITCH_GI)
+			tbl->action = IL_SISO_SWITCH_ANTENNA1;
+
+		if (tbl->action == start_action)
+			break;
+	}
+	search_tbl->lq_type = LQ_NONE;
+	return 0;
+
+out:
+	lq_sta->search_better_tbl = 1;
+	tbl->action++;
+	if (tbl->action > IL_SISO_SWITCH_GI)
+		tbl->action = IL_SISO_SWITCH_ANTENNA1;
+	if (update_search_tbl_counter)
+		search_tbl->action = tbl->action;
+
+	return 0;
+}
+
+/*
+ * Try to switch to new modulation mode from MIMO2
+ */
+static int
+il4965_rs_move_mimo2_to_other(struct il_priv *il, struct il_lq_sta *lq_sta,
+			      struct ieee80211_conf *conf,
+			      struct ieee80211_sta *sta, int idx)
+{
+	s8 is_green = lq_sta->is_green;
+	struct il_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+	struct il_scale_tbl_info *search_tbl =
+	    &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+	struct il_rate_scale_data *win = &(tbl->win[idx]);
+	struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+	u32 sz =
+	    (sizeof(struct il_scale_tbl_info) -
+	     (sizeof(struct il_rate_scale_data) * RATE_COUNT));
+	u8 start_action;
+	u8 valid_tx_ant = il->hw_params.valid_tx_ant;
+	u8 tx_chains_num = il->hw_params.tx_chains_num;
+	u8 update_search_tbl_counter = 0;
+	int ret;
+
+	start_action = tbl->action;
+	for (;;) {
+		lq_sta->action_counter++;
+		switch (tbl->action) {
+		case IL_MIMO2_SWITCH_ANTENNA1:
+		case IL_MIMO2_SWITCH_ANTENNA2:
+			D_RATE("LQ: MIMO2 toggle Antennas\n");
+
+			if (tx_chains_num <= 2)
+				break;
+
+			if (win->success_ratio >= IL_RS_GOOD_RATIO)
+				break;
+
+			memcpy(search_tbl, tbl, sz);
+			if (il4965_rs_toggle_antenna
+			    (valid_tx_ant, &search_tbl->current_rate,
+			     search_tbl)) {
+				update_search_tbl_counter = 1;
+				goto out;
+			}
+			break;
+		case IL_MIMO2_SWITCH_SISO_A:
+		case IL_MIMO2_SWITCH_SISO_B:
+		case IL_MIMO2_SWITCH_SISO_C:
+			D_RATE("LQ: MIMO2 switch to SISO\n");
+
+			/* Set up new search table for SISO */
+			memcpy(search_tbl, tbl, sz);
+
+			if (tbl->action == IL_MIMO2_SWITCH_SISO_A)
+				search_tbl->ant_type = ANT_A;
+			else if (tbl->action == IL_MIMO2_SWITCH_SISO_B)
+				search_tbl->ant_type = ANT_B;
+			else
+				search_tbl->ant_type = ANT_C;
+
+			if (!il4965_rs_is_valid_ant
+			    (valid_tx_ant, search_tbl->ant_type))
+				break;
+
+			ret =
+			    il4965_rs_switch_to_siso(il, lq_sta, conf, sta,
+						     search_tbl, idx);
+			if (!ret)
+				goto out;
+
+			break;
+
+		case IL_MIMO2_SWITCH_GI:
+			if (!tbl->is_ht40 &&
+			    !(ht_cap->cap & IEEE80211_HT_CAP_SGI_20))
+				break;
+			if (tbl->is_ht40 &&
+			    !(ht_cap->cap & IEEE80211_HT_CAP_SGI_40))
+				break;
+
+			D_RATE("LQ: MIMO2 toggle SGI/NGI\n");
+
+			/* Set up new search table for MIMO2 */
+			memcpy(search_tbl, tbl, sz);
+			search_tbl->is_SGI = !tbl->is_SGI;
+			il4965_rs_set_expected_tpt_table(lq_sta, search_tbl);
+			/*
+			 * If active table already uses the fastest possible
+			 * modulation (dual stream with short guard interval),
+			 * and it's working well, there's no need to look
+			 * for a better type of modulation!
+			 */
+			if (tbl->is_SGI) {
+				s32 tpt = lq_sta->last_tpt / 100;
+				if (tpt >= search_tbl->expected_tpt[idx])
+					break;
+			}
+			search_tbl->current_rate =
+			    il4965_rate_n_flags_from_tbl(il, search_tbl, idx,
+							 is_green);
+			update_search_tbl_counter = 1;
+			goto out;
+
+		}
+		tbl->action++;
+		if (tbl->action > IL_MIMO2_SWITCH_GI)
+			tbl->action = IL_MIMO2_SWITCH_ANTENNA1;
+
+		if (tbl->action == start_action)
+			break;
+	}
+	search_tbl->lq_type = LQ_NONE;
+	return 0;
+out:
+	lq_sta->search_better_tbl = 1;
+	tbl->action++;
+	if (tbl->action > IL_MIMO2_SWITCH_GI)
+		tbl->action = IL_MIMO2_SWITCH_ANTENNA1;
+	if (update_search_tbl_counter)
+		search_tbl->action = tbl->action;
+
+	return 0;
+
+}
+
+/*
+ * Check whether we should continue using same modulation mode, or
+ * begin search for a new mode, based on:
+ * 1) # tx successes or failures while using this mode
+ * 2) # times calling this function
+ * 3) elapsed time in this mode (not used, for now)
+ */
+static void
+il4965_rs_stay_in_table(struct il_lq_sta *lq_sta, bool force_search)
+{
+	struct il_scale_tbl_info *tbl;
+	int i;
+	int active_tbl;
+	int flush_interval_passed = 0;
+	struct il_priv *il;
+
+	il = lq_sta->drv;
+	active_tbl = lq_sta->active_tbl;
+
+	tbl = &(lq_sta->lq_info[active_tbl]);
+
+	/* If we've been disallowing search, see if we should now allow it */
+	if (lq_sta->stay_in_tbl) {
+
+		/* Elapsed time using current modulation mode */
+		if (lq_sta->flush_timer)
+			flush_interval_passed =
+			    time_after(jiffies,
+				       (unsigned long)(lq_sta->flush_timer +
+						       RATE_SCALE_FLUSH_INTVL));
+
+		/*
+		 * Check if we should allow search for new modulation mode.
+		 * If many frames have failed or succeeded, or we've used
+		 * this same modulation for a long time, allow search, and
+		 * reset history stats that keep track of whether we should
+		 * allow a new search.  Also (below) reset all bitmaps and
+		 * stats in active history.
+		 */
+		if (force_search ||
+		    lq_sta->total_failed > lq_sta->max_failure_limit ||
+		    lq_sta->total_success > lq_sta->max_success_limit ||
+		    (!lq_sta->search_better_tbl && lq_sta->flush_timer &&
+		     flush_interval_passed)) {
+			D_RATE("LQ: stay is expired %d %d %d\n:",
+			       lq_sta->total_failed, lq_sta->total_success,
+			       flush_interval_passed);
+
+			/* Allow search for new mode */
+			lq_sta->stay_in_tbl = 0;	/* only place reset */
+			lq_sta->total_failed = 0;
+			lq_sta->total_success = 0;
+			lq_sta->flush_timer = 0;
+
+			/*
+			 * Else if we've used this modulation mode enough repetitions
+			 * (regardless of elapsed time or success/failure), reset
+			 * history bitmaps and rate-specific stats for all rates in
+			 * active table.
+			 */
+		} else {
+			lq_sta->table_count++;
+			if (lq_sta->table_count >= lq_sta->table_count_limit) {
+				lq_sta->table_count = 0;
+
+				D_RATE("LQ: stay in table clear win\n");
+				for (i = 0; i < RATE_COUNT; i++)
+					il4965_rs_rate_scale_clear_win(&
+								       (tbl->
+									win
+									[i]));
+			}
+		}
+
+		/* If transitioning to allow "search", reset all history
+		 * bitmaps and stats in active table (this will become the new
+		 * "search" table). */
+		if (!lq_sta->stay_in_tbl) {
+			for (i = 0; i < RATE_COUNT; i++)
+				il4965_rs_rate_scale_clear_win(&(tbl->win[i]));
+		}
+	}
+}
+
+/*
+ * setup rate table in uCode
+ */
+static void
+il4965_rs_update_rate_tbl(struct il_priv *il, struct il_rxon_context *ctx,
+			  struct il_lq_sta *lq_sta,
+			  struct il_scale_tbl_info *tbl, int idx, u8 is_green)
+{
+	u32 rate;
+
+	/* Update uCode's rate table. */
+	rate = il4965_rate_n_flags_from_tbl(il, tbl, idx, is_green);
+	il4965_rs_fill_link_cmd(il, lq_sta, rate);
+	il_send_lq_cmd(il, ctx, &lq_sta->lq, CMD_ASYNC, false);
+}
+
+/*
+ * Do rate scaling and search for new modulation mode.
+ */
+static void
+il4965_rs_rate_scale_perform(struct il_priv *il, struct sk_buff *skb,
+			     struct ieee80211_sta *sta,
+			     struct il_lq_sta *lq_sta)
+{
+	struct ieee80211_hw *hw = il->hw;
+	struct ieee80211_conf *conf = &hw->conf;
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+	int low = RATE_INVALID;
+	int high = RATE_INVALID;
+	int idx;
+	int i;
+	struct il_rate_scale_data *win = NULL;
+	int current_tpt = IL_INVALID_VALUE;
+	int low_tpt = IL_INVALID_VALUE;
+	int high_tpt = IL_INVALID_VALUE;
+	u32 fail_count;
+	s8 scale_action = 0;
+	u16 rate_mask;
+	u8 update_lq = 0;
+	struct il_scale_tbl_info *tbl, *tbl1;
+	u16 rate_scale_idx_msk = 0;
+	u8 is_green = 0;
+	u8 active_tbl = 0;
+	u8 done_search = 0;
+	u16 high_low;
+	s32 sr;
+	u8 tid = MAX_TID_COUNT;
+	struct il_tid_data *tid_data;
+	struct il_station_priv *sta_priv = (void *)sta->drv_priv;
+	struct il_rxon_context *ctx = sta_priv->common.ctx;
+
+	D_RATE("rate scale calculate new rate for skb\n");
+
+	/* Send management frames and NO_ACK data using lowest rate. */
+	/* TODO: this could probably be improved.. */
+	if (!ieee80211_is_data(hdr->frame_control) ||
+	    (info->flags & IEEE80211_TX_CTL_NO_ACK))
+		return;
+
+	lq_sta->supp_rates = sta->supp_rates[lq_sta->band];
+
+	tid = il4965_rs_tl_add_packet(lq_sta, hdr);
+	if (tid != MAX_TID_COUNT && (lq_sta->tx_agg_tid_en & (1 << tid))) {
+		tid_data = &il->stations[lq_sta->lq.sta_id].tid[tid];
+		if (tid_data->agg.state == IL_AGG_OFF)
+			lq_sta->is_agg = 0;
+		else
+			lq_sta->is_agg = 1;
+	} else
+		lq_sta->is_agg = 0;
+
+	/*
+	 * Select rate-scale / modulation-mode table to work with in
+	 * the rest of this function:  "search" if searching for better
+	 * modulation mode, or "active" if doing rate scaling within a mode.
+	 */
+	if (!lq_sta->search_better_tbl)
+		active_tbl = lq_sta->active_tbl;
+	else
+		active_tbl = 1 - lq_sta->active_tbl;
+
+	tbl = &(lq_sta->lq_info[active_tbl]);
+	if (is_legacy(tbl->lq_type))
+		lq_sta->is_green = 0;
+	else
+		lq_sta->is_green = il4965_rs_use_green(sta);
+	is_green = lq_sta->is_green;
+
+	/* current tx rate */
+	idx = lq_sta->last_txrate_idx;
+
+	D_RATE("Rate scale idx %d for type %d\n", idx, tbl->lq_type);
+
+	/* rates available for this association, and for modulation mode */
+	rate_mask = il4965_rs_get_supported_rates(lq_sta, hdr, tbl->lq_type);
+
+	D_RATE("mask 0x%04X\n", rate_mask);
+
+	/* mask with station rate restriction */
+	if (is_legacy(tbl->lq_type)) {
+		if (lq_sta->band == IEEE80211_BAND_5GHZ)
+			/* supp_rates has no CCK bits in A mode */
+			rate_scale_idx_msk =
+			    (u16) (rate_mask &
+				   (lq_sta->supp_rates << IL_FIRST_OFDM_RATE));
+		else
+			rate_scale_idx_msk =
+			    (u16) (rate_mask & lq_sta->supp_rates);
+
+	} else
+		rate_scale_idx_msk = rate_mask;
+
+	if (!rate_scale_idx_msk)
+		rate_scale_idx_msk = rate_mask;
+
+	if (!((1 << idx) & rate_scale_idx_msk)) {
+		IL_ERR("Current Rate is not valid\n");
+		if (lq_sta->search_better_tbl) {
+			/* revert to active table if search table is not valid */
+			tbl->lq_type = LQ_NONE;
+			lq_sta->search_better_tbl = 0;
+			tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+			/* get "active" rate info */
+			idx = il4965_hwrate_to_plcp_idx(tbl->current_rate);
+			il4965_rs_update_rate_tbl(il, ctx, lq_sta, tbl, idx,
+						      is_green);
+		}
+		return;
+	}
+
+	/* Get expected throughput table and history win for current rate */
+	if (!tbl->expected_tpt) {
+		IL_ERR("tbl->expected_tpt is NULL\n");
+		return;
+	}
+
+	/* force user max rate if set by user */
+	if (lq_sta->max_rate_idx != -1 && lq_sta->max_rate_idx < idx) {
+		idx = lq_sta->max_rate_idx;
+		update_lq = 1;
+		win = &(tbl->win[idx]);
+		goto lq_update;
+	}
+
+	win = &(tbl->win[idx]);
+
+	/*
+	 * If there is not enough history to calculate actual average
+	 * throughput, keep analyzing results of more tx frames, without
+	 * changing rate or mode (bypass most of the rest of this function).
+	 * Set up new rate table in uCode only if old rate is not supported
+	 * in current association (use new rate found above).
+	 */
+	fail_count = win->counter - win->success_counter;
+	if (fail_count < RATE_MIN_FAILURE_TH &&
+	    win->success_counter < RATE_MIN_SUCCESS_TH) {
+		D_RATE("LQ: still below TH. succ=%d total=%d " "for idx %d\n",
+		       win->success_counter, win->counter, idx);
+
+		/* Can't calculate this yet; not enough history */
+		win->average_tpt = IL_INVALID_VALUE;
+
+		/* Should we stay with this modulation mode,
+		 * or search for a new one? */
+		il4965_rs_stay_in_table(lq_sta, false);
+
+		goto out;
+	}
+	/* Else we have enough samples; calculate estimate of
+	 * actual average throughput */
+	if (win->average_tpt !=
+	    ((win->success_ratio * tbl->expected_tpt[idx] + 64) / 128)) {
+		IL_ERR("expected_tpt should have been calculated by now\n");
+		win->average_tpt =
+		    ((win->success_ratio * tbl->expected_tpt[idx] + 64) / 128);
+	}
+
+	/* If we are searching for better modulation mode, check success. */
+	if (lq_sta->search_better_tbl) {
+		/* If good success, continue using the "search" mode;
+		 * no need to send new link quality command, since we're
+		 * continuing to use the setup that we've been trying. */
+		if (win->average_tpt > lq_sta->last_tpt) {
+
+			D_RATE("LQ: SWITCHING TO NEW TBL "
+			       "suc=%d cur-tpt=%d old-tpt=%d\n",
+			       win->success_ratio, win->average_tpt,
+			       lq_sta->last_tpt);
+
+			if (!is_legacy(tbl->lq_type))
+				lq_sta->enable_counter = 1;
+
+			/* Swap tables; "search" becomes "active" */
+			lq_sta->active_tbl = active_tbl;
+			current_tpt = win->average_tpt;
+
+			/* Else poor success; go back to mode in "active" table */
+		} else {
+
+			D_RATE("LQ: GOING BACK TO THE OLD TBL "
+			       "suc=%d cur-tpt=%d old-tpt=%d\n",
+			       win->success_ratio, win->average_tpt,
+			       lq_sta->last_tpt);
+
+			/* Nullify "search" table */
+			tbl->lq_type = LQ_NONE;
+
+			/* Revert to "active" table */
+			active_tbl = lq_sta->active_tbl;
+			tbl = &(lq_sta->lq_info[active_tbl]);
+
+			/* Revert to "active" rate and throughput info */
+			idx = il4965_hwrate_to_plcp_idx(tbl->current_rate);
+			current_tpt = lq_sta->last_tpt;
+
+			/* Need to set up a new rate table in uCode */
+			update_lq = 1;
+		}
+
+		/* Either way, we've made a decision; modulation mode
+		 * search is done, allow rate adjustment next time. */
+		lq_sta->search_better_tbl = 0;
+		done_search = 1;	/* Don't switch modes below! */
+		goto lq_update;
+	}
+
+	/* (Else) not in search of better modulation mode, try for better
+	 * starting rate, while staying in this mode. */
+	high_low =
+	    il4965_rs_get_adjacent_rate(il, idx, rate_scale_idx_msk,
+					tbl->lq_type);
+	low = high_low & 0xff;
+	high = (high_low >> 8) & 0xff;
+
+	/* If user set max rate, dont allow higher than user constrain */
+	if (lq_sta->max_rate_idx != -1 && lq_sta->max_rate_idx < high)
+		high = RATE_INVALID;
+
+	sr = win->success_ratio;
+
+	/* Collect measured throughputs for current and adjacent rates */
+	current_tpt = win->average_tpt;
+	if (low != RATE_INVALID)
+		low_tpt = tbl->win[low].average_tpt;
+	if (high != RATE_INVALID)
+		high_tpt = tbl->win[high].average_tpt;
+
+	scale_action = 0;
+
+	/* Too many failures, decrease rate */
+	if (sr <= RATE_DECREASE_TH || current_tpt == 0) {
+		D_RATE("decrease rate because of low success_ratio\n");
+		scale_action = -1;
+
+		/* No throughput measured yet for adjacent rates; try increase. */
+	} else if (low_tpt == IL_INVALID_VALUE && high_tpt == IL_INVALID_VALUE) {
+
+		if (high != RATE_INVALID && sr >= RATE_INCREASE_TH)
+			scale_action = 1;
+		else if (low != RATE_INVALID)
+			scale_action = 0;
+	}
+
+	/* Both adjacent throughputs are measured, but neither one has better
+	 * throughput; we're using the best rate, don't change it! */
+	else if (low_tpt != IL_INVALID_VALUE && high_tpt != IL_INVALID_VALUE &&
+		 low_tpt < current_tpt && high_tpt < current_tpt)
+		scale_action = 0;
+
+	/* At least one adjacent rate's throughput is measured,
+	 * and may have better performance. */
+	else {
+		/* Higher adjacent rate's throughput is measured */
+		if (high_tpt != IL_INVALID_VALUE) {
+			/* Higher rate has better throughput */
+			if (high_tpt > current_tpt && sr >= RATE_INCREASE_TH)
+				scale_action = 1;
+			else
+				scale_action = 0;
+
+			/* Lower adjacent rate's throughput is measured */
+		} else if (low_tpt != IL_INVALID_VALUE) {
+			/* Lower rate has better throughput */
+			if (low_tpt > current_tpt) {
+				D_RATE("decrease rate because of low tpt\n");
+				scale_action = -1;
+			} else if (sr >= RATE_INCREASE_TH) {
+				scale_action = 1;
+			}
+		}
+	}
+
+	/* Sanity check; asked for decrease, but success rate or throughput
+	 * has been good at old rate.  Don't change it. */
+	if (scale_action == -1 && low != RATE_INVALID &&
+	    (sr > RATE_HIGH_TH || current_tpt > 100 * tbl->expected_tpt[low]))
+		scale_action = 0;
+
+	switch (scale_action) {
+	case -1:
+		/* Decrease starting rate, update uCode's rate table */
+		if (low != RATE_INVALID) {
+			update_lq = 1;
+			idx = low;
+		}
+
+		break;
+	case 1:
+		/* Increase starting rate, update uCode's rate table */
+		if (high != RATE_INVALID) {
+			update_lq = 1;
+			idx = high;
+		}
+
+		break;
+	case 0:
+		/* No change */
+	default:
+		break;
+	}
+
+	D_RATE("choose rate scale idx %d action %d low %d " "high %d type %d\n",
+	       idx, scale_action, low, high, tbl->lq_type);
+
+lq_update:
+	/* Replace uCode's rate table for the destination station. */
+	if (update_lq)
+		il4965_rs_update_rate_tbl(il, ctx, lq_sta, tbl, idx,
+					      is_green);
+
+	/* Should we stay with this modulation mode,
+	 * or search for a new one? */
+	il4965_rs_stay_in_table(lq_sta, false);
+
+	/*
+	 * Search for new modulation mode if we're:
+	 * 1)  Not changing rates right now
+	 * 2)  Not just finishing up a search
+	 * 3)  Allowing a new search
+	 */
+	if (!update_lq && !done_search && !lq_sta->stay_in_tbl && win->counter) {
+		/* Save current throughput to compare with "search" throughput */
+		lq_sta->last_tpt = current_tpt;
+
+		/* Select a new "search" modulation mode to try.
+		 * If one is found, set up the new "search" table. */
+		if (is_legacy(tbl->lq_type))
+			il4965_rs_move_legacy_other(il, lq_sta, conf, sta, idx);
+		else if (is_siso(tbl->lq_type))
+			il4965_rs_move_siso_to_other(il, lq_sta, conf, sta,
+						     idx);
+		else		/* (is_mimo2(tbl->lq_type)) */
+			il4965_rs_move_mimo2_to_other(il, lq_sta, conf, sta,
+						      idx);
+
+		/* If new "search" mode was selected, set up in uCode table */
+		if (lq_sta->search_better_tbl) {
+			/* Access the "search" table, clear its history. */
+			tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+			for (i = 0; i < RATE_COUNT; i++)
+				il4965_rs_rate_scale_clear_win(&(tbl->win[i]));
+
+			/* Use new "search" start rate */
+			idx = il4965_hwrate_to_plcp_idx(tbl->current_rate);
+
+			D_RATE("Switch current  mcs: %X idx: %d\n",
+			       tbl->current_rate, idx);
+			il4965_rs_fill_link_cmd(il, lq_sta, tbl->current_rate);
+			il_send_lq_cmd(il, ctx, &lq_sta->lq, CMD_ASYNC, false);
+		} else
+			done_search = 1;
+	}
+
+	if (done_search && !lq_sta->stay_in_tbl) {
+		/* If the "active" (non-search) mode was legacy,
+		 * and we've tried switching antennas,
+		 * but we haven't been able to try HT modes (not available),
+		 * stay with best antenna legacy modulation for a while
+		 * before next round of mode comparisons. */
+		tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]);
+		if (is_legacy(tbl1->lq_type) && !conf_is_ht(conf) &&
+		    lq_sta->action_counter > tbl1->max_search) {
+			D_RATE("LQ: STAY in legacy table\n");
+			il4965_rs_set_stay_in_table(il, 1, lq_sta);
+		}
+
+		/* If we're in an HT mode, and all 3 mode switch actions
+		 * have been tried and compared, stay in this best modulation
+		 * mode for a while before next round of mode comparisons. */
+		if (lq_sta->enable_counter &&
+		    lq_sta->action_counter >= tbl1->max_search) {
+			if (lq_sta->last_tpt > IL_AGG_TPT_THREHOLD &&
+			    (lq_sta->tx_agg_tid_en & (1 << tid)) &&
+			    tid != MAX_TID_COUNT) {
+				tid_data =
+				    &il->stations[lq_sta->lq.sta_id].tid[tid];
+				if (tid_data->agg.state == IL_AGG_OFF) {
+					D_RATE("try to aggregate tid %d\n",
+					       tid);
+					il4965_rs_tl_turn_on_agg(il, tid,
+								 lq_sta, sta);
+				}
+			}
+			il4965_rs_set_stay_in_table(il, 0, lq_sta);
+		}
+	}
+
+out:
+	tbl->current_rate =
+	    il4965_rate_n_flags_from_tbl(il, tbl, idx, is_green);
+	i = idx;
+	lq_sta->last_txrate_idx = i;
+}
+
+/**
+ * il4965_rs_initialize_lq - Initialize a station's hardware rate table
+ *
+ * The uCode's station table contains a table of fallback rates
+ * for automatic fallback during transmission.
+ *
+ * NOTE: This sets up a default set of values.  These will be replaced later
+ *       if the driver's iwl-4965-rs rate scaling algorithm is used, instead of
+ *       rc80211_simple.
+ *
+ * NOTE: Run C_ADD_STA command to set up station table entry, before
+ *       calling this function (which runs C_TX_LINK_QUALITY_CMD,
+ *       which requires station table entry to exist).
+ */
+static void
+il4965_rs_initialize_lq(struct il_priv *il, struct ieee80211_conf *conf,
+			struct ieee80211_sta *sta, struct il_lq_sta *lq_sta)
+{
+	struct il_scale_tbl_info *tbl;
+	int rate_idx;
+	int i;
+	u32 rate;
+	u8 use_green = il4965_rs_use_green(sta);
+	u8 active_tbl = 0;
+	u8 valid_tx_ant;
+	struct il_station_priv *sta_priv;
+	struct il_rxon_context *ctx;
+
+	if (!sta || !lq_sta)
+		return;
+
+	sta_priv = (void *)sta->drv_priv;
+	ctx = sta_priv->common.ctx;
+
+	i = lq_sta->last_txrate_idx;
+
+	valid_tx_ant = il->hw_params.valid_tx_ant;
+
+	if (!lq_sta->search_better_tbl)
+		active_tbl = lq_sta->active_tbl;
+	else
+		active_tbl = 1 - lq_sta->active_tbl;
+
+	tbl = &(lq_sta->lq_info[active_tbl]);
+
+	if (i < 0 || i >= RATE_COUNT)
+		i = 0;
+
+	rate = il_rates[i].plcp;
+	tbl->ant_type = il4965_first_antenna(valid_tx_ant);
+	rate |= tbl->ant_type << RATE_MCS_ANT_POS;
+
+	if (i >= IL_FIRST_CCK_RATE && i <= IL_LAST_CCK_RATE)
+		rate |= RATE_MCS_CCK_MSK;
+
+	il4965_rs_get_tbl_info_from_mcs(rate, il->band, tbl, &rate_idx);
+	if (!il4965_rs_is_valid_ant(valid_tx_ant, tbl->ant_type))
+		il4965_rs_toggle_antenna(valid_tx_ant, &rate, tbl);
+
+	rate = il4965_rate_n_flags_from_tbl(il, tbl, rate_idx, use_green);
+	tbl->current_rate = rate;
+	il4965_rs_set_expected_tpt_table(lq_sta, tbl);
+	il4965_rs_fill_link_cmd(NULL, lq_sta, rate);
+	il->stations[lq_sta->lq.sta_id].lq = &lq_sta->lq;
+	il_send_lq_cmd(il, ctx, &lq_sta->lq, CMD_SYNC, true);
+}
+
+static void
+il4965_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta,
+		   struct ieee80211_tx_rate_control *txrc)
+{
+
+	struct sk_buff *skb = txrc->skb;
+	struct ieee80211_supported_band *sband = txrc->sband;
+	struct il_priv *il __maybe_unused = (struct il_priv *)il_r;
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct il_lq_sta *lq_sta = il_sta;
+	int rate_idx;
+
+	D_RATE("rate scale calculate new rate for skb\n");
+
+	/* Get max rate if user set max rate */
+	if (lq_sta) {
+		lq_sta->max_rate_idx = txrc->max_rate_idx;
+		if (sband->band == IEEE80211_BAND_5GHZ &&
+		    lq_sta->max_rate_idx != -1)
+			lq_sta->max_rate_idx += IL_FIRST_OFDM_RATE;
+		if (lq_sta->max_rate_idx < 0 ||
+		    lq_sta->max_rate_idx >= RATE_COUNT)
+			lq_sta->max_rate_idx = -1;
+	}
+
+	/* Treat uninitialized rate scaling data same as non-existing. */
+	if (lq_sta && !lq_sta->drv) {
+		D_RATE("Rate scaling not initialized yet.\n");
+		il_sta = NULL;
+	}
+
+	/* Send management frames and NO_ACK data using lowest rate. */
+	if (rate_control_send_low(sta, il_sta, txrc))
+		return;
+
+	if (!lq_sta)
+		return;
+
+	rate_idx = lq_sta->last_txrate_idx;
+
+	if (lq_sta->last_rate_n_flags & RATE_MCS_HT_MSK) {
+		rate_idx -= IL_FIRST_OFDM_RATE;
+		/* 6M and 9M shared same MCS idx */
+		rate_idx = (rate_idx > 0) ? (rate_idx - 1) : 0;
+		if (il4965_rs_extract_rate(lq_sta->last_rate_n_flags) >=
+		    RATE_MIMO2_6M_PLCP)
+			rate_idx = rate_idx + MCS_IDX_PER_STREAM;
+		info->control.rates[0].flags = IEEE80211_TX_RC_MCS;
+		if (lq_sta->last_rate_n_flags & RATE_MCS_SGI_MSK)
+			info->control.rates[0].flags |=
+			    IEEE80211_TX_RC_SHORT_GI;
+		if (lq_sta->last_rate_n_flags & RATE_MCS_DUP_MSK)
+			info->control.rates[0].flags |=
+			    IEEE80211_TX_RC_DUP_DATA;
+		if (lq_sta->last_rate_n_flags & RATE_MCS_HT40_MSK)
+			info->control.rates[0].flags |=
+			    IEEE80211_TX_RC_40_MHZ_WIDTH;
+		if (lq_sta->last_rate_n_flags & RATE_MCS_GF_MSK)
+			info->control.rates[0].flags |=
+			    IEEE80211_TX_RC_GREEN_FIELD;
+	} else {
+		/* Check for invalid rates */
+		if (rate_idx < 0 || rate_idx >= RATE_COUNT_LEGACY ||
+		    (sband->band == IEEE80211_BAND_5GHZ &&
+		     rate_idx < IL_FIRST_OFDM_RATE))
+			rate_idx = rate_lowest_index(sband, sta);
+		/* On valid 5 GHz rate, adjust idx */
+		else if (sband->band == IEEE80211_BAND_5GHZ)
+			rate_idx -= IL_FIRST_OFDM_RATE;
+		info->control.rates[0].flags = 0;
+	}
+	info->control.rates[0].idx = rate_idx;
+
+}
+
+static void *
+il4965_rs_alloc_sta(void *il_rate, struct ieee80211_sta *sta, gfp_t gfp)
+{
+	struct il_station_priv *sta_priv =
+	    (struct il_station_priv *)sta->drv_priv;
+	struct il_priv *il;
+
+	il = (struct il_priv *)il_rate;
+	D_RATE("create station rate scale win\n");
+
+	return &sta_priv->lq_sta;
+}
+
+/*
+ * Called after adding a new station to initialize rate scaling
+ */
+void
+il4965_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta, u8 sta_id)
+{
+	int i, j;
+	struct ieee80211_hw *hw = il->hw;
+	struct ieee80211_conf *conf = &il->hw->conf;
+	struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+	struct il_station_priv *sta_priv;
+	struct il_lq_sta *lq_sta;
+	struct ieee80211_supported_band *sband;
+
+	sta_priv = (struct il_station_priv *)sta->drv_priv;
+	lq_sta = &sta_priv->lq_sta;
+	sband = hw->wiphy->bands[conf->channel->band];
+
+	lq_sta->lq.sta_id = sta_id;
+
+	for (j = 0; j < LQ_SIZE; j++)
+		for (i = 0; i < RATE_COUNT; i++)
+			il4965_rs_rate_scale_clear_win(&lq_sta->lq_info[j].
+						       win[i]);
+
+	lq_sta->flush_timer = 0;
+	lq_sta->supp_rates = sta->supp_rates[sband->band];
+	for (j = 0; j < LQ_SIZE; j++)
+		for (i = 0; i < RATE_COUNT; i++)
+			il4965_rs_rate_scale_clear_win(&lq_sta->lq_info[j].
+						       win[i]);
+
+	D_RATE("LQ:" "*** rate scale station global init for station %d ***\n",
+	       sta_id);
+	/* TODO: what is a good starting rate for STA? About middle? Maybe not
+	 * the lowest or the highest rate.. Could consider using RSSI from
+	 * previous packets? Need to have IEEE 802.1X auth succeed immediately
+	 * after assoc.. */
+
+	lq_sta->is_dup = 0;
+	lq_sta->max_rate_idx = -1;
+	lq_sta->missed_rate_counter = IL_MISSED_RATE_MAX;
+	lq_sta->is_green = il4965_rs_use_green(sta);
+	lq_sta->active_legacy_rate = il->active_rate & ~(0x1000);
+	lq_sta->band = il->band;
+	/*
+	 * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3),
+	 * supp_rates[] does not; shift to convert format, force 9 MBits off.
+	 */
+	lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1;
+	lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1;
+	lq_sta->active_siso_rate &= ~((u16) 0x2);
+	lq_sta->active_siso_rate <<= IL_FIRST_OFDM_RATE;
+
+	/* Same here */
+	lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1;
+	lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1;
+	lq_sta->active_mimo2_rate &= ~((u16) 0x2);
+	lq_sta->active_mimo2_rate <<= IL_FIRST_OFDM_RATE;
+
+	/* These values will be overridden later */
+	lq_sta->lq.general_params.single_stream_ant_msk =
+	    il4965_first_antenna(il->hw_params.valid_tx_ant);
+	lq_sta->lq.general_params.dual_stream_ant_msk =
+	    il->hw_params.valid_tx_ant & ~il4965_first_antenna(il->hw_params.
+							       valid_tx_ant);
+	if (!lq_sta->lq.general_params.dual_stream_ant_msk) {
+		lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB;
+	} else if (il4965_num_of_ant(il->hw_params.valid_tx_ant) == 2) {
+		lq_sta->lq.general_params.dual_stream_ant_msk =
+		    il->hw_params.valid_tx_ant;
+	}
+
+	/* as default allow aggregation for all tids */
+	lq_sta->tx_agg_tid_en = IL_AGG_ALL_TID;
+	lq_sta->drv = il;
+
+	/* Set last_txrate_idx to lowest rate */
+	lq_sta->last_txrate_idx = rate_lowest_index(sband, sta);
+	if (sband->band == IEEE80211_BAND_5GHZ)
+		lq_sta->last_txrate_idx += IL_FIRST_OFDM_RATE;
+	lq_sta->is_agg = 0;
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+	lq_sta->dbg_fixed_rate = 0;
+#endif
+
+	il4965_rs_initialize_lq(il, conf, sta, lq_sta);
+}
+
+static void
+il4965_rs_fill_link_cmd(struct il_priv *il, struct il_lq_sta *lq_sta,
+			u32 new_rate)
+{
+	struct il_scale_tbl_info tbl_type;
+	int idx = 0;
+	int rate_idx;
+	int repeat_rate = 0;
+	u8 ant_toggle_cnt = 0;
+	u8 use_ht_possible = 1;
+	u8 valid_tx_ant = 0;
+	struct il_link_quality_cmd *lq_cmd = &lq_sta->lq;
+
+	/* Override starting rate (idx 0) if needed for debug purposes */
+	il4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, idx);
+
+	/* Interpret new_rate (rate_n_flags) */
+	il4965_rs_get_tbl_info_from_mcs(new_rate, lq_sta->band, &tbl_type,
+					&rate_idx);
+
+	/* How many times should we repeat the initial rate? */
+	if (is_legacy(tbl_type.lq_type)) {
+		ant_toggle_cnt = 1;
+		repeat_rate = IL_NUMBER_TRY;
+	} else {
+		repeat_rate = IL_HT_NUMBER_TRY;
+	}
+
+	lq_cmd->general_params.mimo_delimiter =
+	    is_mimo(tbl_type.lq_type) ? 1 : 0;
+
+	/* Fill 1st table entry (idx 0) */
+	lq_cmd->rs_table[idx].rate_n_flags = cpu_to_le32(new_rate);
+
+	if (il4965_num_of_ant(tbl_type.ant_type) == 1) {
+		lq_cmd->general_params.single_stream_ant_msk =
+		    tbl_type.ant_type;
+	} else if (il4965_num_of_ant(tbl_type.ant_type) == 2) {
+		lq_cmd->general_params.dual_stream_ant_msk = tbl_type.ant_type;
+	}
+	/* otherwise we don't modify the existing value */
+	idx++;
+	repeat_rate--;
+	if (il)
+		valid_tx_ant = il->hw_params.valid_tx_ant;
+
+	/* Fill rest of rate table */
+	while (idx < LINK_QUAL_MAX_RETRY_NUM) {
+		/* Repeat initial/next rate.
+		 * For legacy IL_NUMBER_TRY == 1, this loop will not execute.
+		 * For HT IL_HT_NUMBER_TRY == 3, this executes twice. */
+		while (repeat_rate > 0 && idx < LINK_QUAL_MAX_RETRY_NUM) {
+			if (is_legacy(tbl_type.lq_type)) {
+				if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
+					ant_toggle_cnt++;
+				else if (il &&
+					 il4965_rs_toggle_antenna(valid_tx_ant,
+								  &new_rate,
+								  &tbl_type))
+					ant_toggle_cnt = 1;
+			}
+
+			/* Override next rate if needed for debug purposes */
+			il4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, idx);
+
+			/* Fill next table entry */
+			lq_cmd->rs_table[idx].rate_n_flags =
+			    cpu_to_le32(new_rate);
+			repeat_rate--;
+			idx++;
+		}
+
+		il4965_rs_get_tbl_info_from_mcs(new_rate, lq_sta->band,
+						&tbl_type, &rate_idx);
+
+		/* Indicate to uCode which entries might be MIMO.
+		 * If initial rate was MIMO, this will finally end up
+		 * as (IL_HT_NUMBER_TRY * 2), after 2nd pass, otherwise 0. */
+		if (is_mimo(tbl_type.lq_type))
+			lq_cmd->general_params.mimo_delimiter = idx;
+
+		/* Get next rate */
+		new_rate =
+		    il4965_rs_get_lower_rate(lq_sta, &tbl_type, rate_idx,
+					     use_ht_possible);
+
+		/* How many times should we repeat the next rate? */
+		if (is_legacy(tbl_type.lq_type)) {
+			if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
+				ant_toggle_cnt++;
+			else if (il &&
+				 il4965_rs_toggle_antenna(valid_tx_ant,
+							  &new_rate, &tbl_type))
+				ant_toggle_cnt = 1;
+
+			repeat_rate = IL_NUMBER_TRY;
+		} else {
+			repeat_rate = IL_HT_NUMBER_TRY;
+		}
+
+		/* Don't allow HT rates after next pass.
+		 * il4965_rs_get_lower_rate() will change type to LQ_A or LQ_G. */
+		use_ht_possible = 0;
+
+		/* Override next rate if needed for debug purposes */
+		il4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, idx);
+
+		/* Fill next table entry */
+		lq_cmd->rs_table[idx].rate_n_flags = cpu_to_le32(new_rate);
+
+		idx++;
+		repeat_rate--;
+	}
+
+	lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
+	lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
+
+	lq_cmd->agg_params.agg_time_limit =
+	    cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
+}
+
+static void *
+il4965_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
+{
+	return hw->priv;
+}
+
+/* rate scale requires free function to be implemented */
+static void
+il4965_rs_free(void *il_rate)
+{
+	return;
+}
+
+static void
+il4965_rs_free_sta(void *il_r, struct ieee80211_sta *sta, void *il_sta)
+{
+	struct il_priv *il __maybe_unused = il_r;
+
+	D_RATE("enter\n");
+	D_RATE("leave\n");
+}
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+static int
+il4965_open_file_generic(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static void
+il4965_rs_dbgfs_set_mcs(struct il_lq_sta *lq_sta, u32 * rate_n_flags, int idx)
+{
+	struct il_priv *il;
+	u8 valid_tx_ant;
+	u8 ant_sel_tx;
+
+	il = lq_sta->drv;
+	valid_tx_ant = il->hw_params.valid_tx_ant;
+	if (lq_sta->dbg_fixed_rate) {
+		ant_sel_tx =
+		    ((lq_sta->
+		      dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK) >>
+		     RATE_MCS_ANT_POS);
+		if ((valid_tx_ant & ant_sel_tx) == ant_sel_tx) {
+			*rate_n_flags = lq_sta->dbg_fixed_rate;
+			D_RATE("Fixed rate ON\n");
+		} else {
+			lq_sta->dbg_fixed_rate = 0;
+			IL_ERR
+			    ("Invalid antenna selection 0x%X, Valid is 0x%X\n",
+			     ant_sel_tx, valid_tx_ant);
+			D_RATE("Fixed rate OFF\n");
+		}
+	} else {
+		D_RATE("Fixed rate OFF\n");
+	}
+}
+
+static ssize_t
+il4965_rs_sta_dbgfs_scale_table_write(struct file *file,
+				      const char __user *user_buf,
+				      size_t count, loff_t *ppos)
+{
+	struct il_lq_sta *lq_sta = file->private_data;
+	struct il_priv *il;
+	char buf[64];
+	size_t buf_size;
+	u32 parsed_rate;
+	struct il_station_priv *sta_priv =
+	    container_of(lq_sta, struct il_station_priv, lq_sta);
+	struct il_rxon_context *ctx = sta_priv->common.ctx;
+
+	il = lq_sta->drv;
+	memset(buf, 0, sizeof(buf));
+	buf_size = min(count, sizeof(buf) - 1);
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+
+	if (sscanf(buf, "%x", &parsed_rate) == 1)
+		lq_sta->dbg_fixed_rate = parsed_rate;
+	else
+		lq_sta->dbg_fixed_rate = 0;
+
+	lq_sta->active_legacy_rate = 0x0FFF;	/* 1 - 54 MBits, includes CCK */
+	lq_sta->active_siso_rate = 0x1FD0;	/* 6 - 60 MBits, no 9, no CCK */
+	lq_sta->active_mimo2_rate = 0x1FD0;	/* 6 - 60 MBits, no 9, no CCK */
+
+	D_RATE("sta_id %d rate 0x%X\n", lq_sta->lq.sta_id,
+	       lq_sta->dbg_fixed_rate);
+
+	if (lq_sta->dbg_fixed_rate) {
+		il4965_rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate);
+		il_send_lq_cmd(lq_sta->drv, ctx, &lq_sta->lq, CMD_ASYNC, false);
+	}
+
+	return count;
+}
+
+static ssize_t
+il4965_rs_sta_dbgfs_scale_table_read(struct file *file, char __user *user_buf,
+				     size_t count, loff_t *ppos)
+{
+	char *buff;
+	int desc = 0;
+	int i = 0;
+	int idx = 0;
+	ssize_t ret;
+
+	struct il_lq_sta *lq_sta = file->private_data;
+	struct il_priv *il;
+	struct il_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+
+	il = lq_sta->drv;
+	buff = kmalloc(1024, GFP_KERNEL);
+	if (!buff)
+		return -ENOMEM;
+
+	desc += sprintf(buff + desc, "sta_id %d\n", lq_sta->lq.sta_id);
+	desc +=
+	    sprintf(buff + desc, "failed=%d success=%d rate=0%X\n",
+		    lq_sta->total_failed, lq_sta->total_success,
+		    lq_sta->active_legacy_rate);
+	desc +=
+	    sprintf(buff + desc, "fixed rate 0x%X\n", lq_sta->dbg_fixed_rate);
+	desc +=
+	    sprintf(buff + desc, "valid_tx_ant %s%s%s\n",
+		    (il->hw_params.valid_tx_ant & ANT_A) ? "ANT_A," : "",
+		    (il->hw_params.valid_tx_ant & ANT_B) ? "ANT_B," : "",
+		    (il->hw_params.valid_tx_ant & ANT_C) ? "ANT_C" : "");
+	desc +=
+	    sprintf(buff + desc, "lq type %s\n",
+		    (is_legacy(tbl->lq_type)) ? "legacy" : "HT");
+	if (is_Ht(tbl->lq_type)) {
+		desc +=
+		    sprintf(buff + desc, " %s",
+			    (is_siso(tbl->lq_type)) ? "SISO" : "MIMO2");
+		desc +=
+		    sprintf(buff + desc, " %s",
+			    (tbl->is_ht40) ? "40MHz" : "20MHz");
+		desc +=
+		    sprintf(buff + desc, " %s %s %s\n",
+			    (tbl->is_SGI) ? "SGI" : "",
+			    (lq_sta->is_green) ? "GF enabled" : "",
+			    (lq_sta->is_agg) ? "AGG on" : "");
+	}
+	desc +=
+	    sprintf(buff + desc, "last tx rate=0x%X\n",
+		    lq_sta->last_rate_n_flags);
+	desc +=
+	    sprintf(buff + desc,
+		    "general:" "flags=0x%X mimo-d=%d s-ant0x%x d-ant=0x%x\n",
+		    lq_sta->lq.general_params.flags,
+		    lq_sta->lq.general_params.mimo_delimiter,
+		    lq_sta->lq.general_params.single_stream_ant_msk,
+		    lq_sta->lq.general_params.dual_stream_ant_msk);
+
+	desc +=
+	    sprintf(buff + desc,
+		    "agg:"
+		    "time_limit=%d dist_start_th=%d frame_cnt_limit=%d\n",
+		    le16_to_cpu(lq_sta->lq.agg_params.agg_time_limit),
+		    lq_sta->lq.agg_params.agg_dis_start_th,
+		    lq_sta->lq.agg_params.agg_frame_cnt_limit);
+
+	desc +=
+	    sprintf(buff + desc,
+		    "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
+		    lq_sta->lq.general_params.start_rate_idx[0],
+		    lq_sta->lq.general_params.start_rate_idx[1],
+		    lq_sta->lq.general_params.start_rate_idx[2],
+		    lq_sta->lq.general_params.start_rate_idx[3]);
+
+	for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
+		idx =
+		    il4965_hwrate_to_plcp_idx(le32_to_cpu
+					      (lq_sta->lq.rs_table[i].
+					       rate_n_flags));
+		if (is_legacy(tbl->lq_type)) {
+			desc +=
+			    sprintf(buff + desc, " rate[%d] 0x%X %smbps\n", i,
+				    le32_to_cpu(lq_sta->lq.rs_table[i].
+						rate_n_flags),
+				    il_rate_mcs[idx].mbps);
+		} else {
+			desc +=
+			    sprintf(buff + desc, " rate[%d] 0x%X %smbps (%s)\n",
+				    i,
+				    le32_to_cpu(lq_sta->lq.rs_table[i].
+						rate_n_flags),
+				    il_rate_mcs[idx].mbps,
+				    il_rate_mcs[idx].mcs);
+		}
+	}
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+	kfree(buff);
+	return ret;
+}
+
+static const struct file_operations rs_sta_dbgfs_scale_table_ops = {
+	.write = il4965_rs_sta_dbgfs_scale_table_write,
+	.read = il4965_rs_sta_dbgfs_scale_table_read,
+	.open = il4965_open_file_generic,
+	.llseek = default_llseek,
+};
+
+static ssize_t
+il4965_rs_sta_dbgfs_stats_table_read(struct file *file, char __user *user_buf,
+				     size_t count, loff_t *ppos)
+{
+	char *buff;
+	int desc = 0;
+	int i, j;
+	ssize_t ret;
+
+	struct il_lq_sta *lq_sta = file->private_data;
+
+	buff = kmalloc(1024, GFP_KERNEL);
+	if (!buff)
+		return -ENOMEM;
+
+	for (i = 0; i < LQ_SIZE; i++) {
+		desc +=
+		    sprintf(buff + desc,
+			    "%s type=%d SGI=%d HT40=%d DUP=%d GF=%d\n"
+			    "rate=0x%X\n", lq_sta->active_tbl == i ? "*" : "x",
+			    lq_sta->lq_info[i].lq_type,
+			    lq_sta->lq_info[i].is_SGI,
+			    lq_sta->lq_info[i].is_ht40,
+			    lq_sta->lq_info[i].is_dup, lq_sta->is_green,
+			    lq_sta->lq_info[i].current_rate);
+		for (j = 0; j < RATE_COUNT; j++) {
+			desc +=
+			    sprintf(buff + desc,
+				    "counter=%d success=%d %%=%d\n",
+				    lq_sta->lq_info[i].win[j].counter,
+				    lq_sta->lq_info[i].win[j].success_counter,
+				    lq_sta->lq_info[i].win[j].success_ratio);
+		}
+	}
+	ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+	kfree(buff);
+	return ret;
+}
+
+static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
+	.read = il4965_rs_sta_dbgfs_stats_table_read,
+	.open = il4965_open_file_generic,
+	.llseek = default_llseek,
+};
+
+static ssize_t
+il4965_rs_sta_dbgfs_rate_scale_data_read(struct file *file,
+					 char __user *user_buf, size_t count,
+					 loff_t *ppos)
+{
+	char buff[120];
+	int desc = 0;
+	struct il_lq_sta *lq_sta = file->private_data;
+	struct il_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl];
+
+	if (is_Ht(tbl->lq_type))
+		desc +=
+		    sprintf(buff + desc, "Bit Rate= %d Mb/s\n",
+			    tbl->expected_tpt[lq_sta->last_txrate_idx]);
+	else
+		desc +=
+		    sprintf(buff + desc, "Bit Rate= %d Mb/s\n",
+			    il_rates[lq_sta->last_txrate_idx].ieee >> 1);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+}
+
+static const struct file_operations rs_sta_dbgfs_rate_scale_data_ops = {
+	.read = il4965_rs_sta_dbgfs_rate_scale_data_read,
+	.open = il4965_open_file_generic,
+	.llseek = default_llseek,
+};
+
+static void
+il4965_rs_add_debugfs(void *il, void *il_sta, struct dentry *dir)
+{
+	struct il_lq_sta *lq_sta = il_sta;
+	lq_sta->rs_sta_dbgfs_scale_table_file =
+	    debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir,
+				lq_sta, &rs_sta_dbgfs_scale_table_ops);
+	lq_sta->rs_sta_dbgfs_stats_table_file =
+	    debugfs_create_file("rate_stats_table", S_IRUSR, dir, lq_sta,
+				&rs_sta_dbgfs_stats_table_ops);
+	lq_sta->rs_sta_dbgfs_rate_scale_data_file =
+	    debugfs_create_file("rate_scale_data", S_IRUSR, dir, lq_sta,
+				&rs_sta_dbgfs_rate_scale_data_ops);
+	lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file =
+	    debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir,
+			      &lq_sta->tx_agg_tid_en);
+
+}
+
+static void
+il4965_rs_remove_debugfs(void *il, void *il_sta)
+{
+	struct il_lq_sta *lq_sta = il_sta;
+	debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
+	debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
+	debugfs_remove(lq_sta->rs_sta_dbgfs_rate_scale_data_file);
+	debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
+}
+#endif
+
+/*
+ * Initialization of rate scaling information is done by driver after
+ * the station is added. Since mac80211 calls this function before a
+ * station is added we ignore it.
+ */
+static void
+il4965_rs_rate_init_stub(void *il_r, struct ieee80211_supported_band *sband,
+			 struct ieee80211_sta *sta, void *il_sta)
+{
+}
+
+static struct rate_control_ops rs_4965_ops = {
+	.module = NULL,
+	.name = IL4965_RS_NAME,
+	.tx_status = il4965_rs_tx_status,
+	.get_rate = il4965_rs_get_rate,
+	.rate_init = il4965_rs_rate_init_stub,
+	.alloc = il4965_rs_alloc,
+	.free = il4965_rs_free,
+	.alloc_sta = il4965_rs_alloc_sta,
+	.free_sta = il4965_rs_free_sta,
+#ifdef CONFIG_MAC80211_DEBUGFS
+	.add_sta_debugfs = il4965_rs_add_debugfs,
+	.remove_sta_debugfs = il4965_rs_remove_debugfs,
+#endif
+};
+
+int
+il4965_rate_control_register(void)
+{
+	return ieee80211_rate_control_register(&rs_4965_ops);
+}
+
+void
+il4965_rate_control_unregister(void)
+{
+	ieee80211_rate_control_unregister(&rs_4965_ops);
+}
diff --git a/drivers/net/wireless/iwlegacy/4965.c b/drivers/net/wireless/iwlegacy/4965.c
new file mode 100644
index 0000000..cacbc03
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/4965.c
@@ -0,0 +1,2402 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <net/mac80211.h>
+#include <linux/etherdevice.h>
+#include <asm/unaligned.h>
+
+#include "common.h"
+#include "4965.h"
+
+/**
+ * il_verify_inst_sparse - verify runtime uCode image in card vs. host,
+ *   using sample data 100 bytes apart.  If these sample points are good,
+ *   it's a pretty good bet that everything between them is good, too.
+ */
+static int
+il4965_verify_inst_sparse(struct il_priv *il, __le32 * image, u32 len)
+{
+	u32 val;
+	int ret = 0;
+	u32 errcnt = 0;
+	u32 i;
+
+	D_INFO("ucode inst image size is %u\n", len);
+
+	for (i = 0; i < len; i += 100, image += 100 / sizeof(u32)) {
+		/* read data comes through single port, auto-incr addr */
+		/* NOTE: Use the debugless read so we don't flood kernel log
+		 * if IL_DL_IO is set */
+		il_wr(il, HBUS_TARG_MEM_RADDR, i + IL4965_RTC_INST_LOWER_BOUND);
+		val = _il_rd(il, HBUS_TARG_MEM_RDAT);
+		if (val != le32_to_cpu(*image)) {
+			ret = -EIO;
+			errcnt++;
+			if (errcnt >= 3)
+				break;
+		}
+	}
+
+	return ret;
+}
+
+/**
+ * il4965_verify_inst_full - verify runtime uCode image in card vs. host,
+ *     looking at all data.
+ */
+static int
+il4965_verify_inst_full(struct il_priv *il, __le32 * image, u32 len)
+{
+	u32 val;
+	u32 save_len = len;
+	int ret = 0;
+	u32 errcnt;
+
+	D_INFO("ucode inst image size is %u\n", len);
+
+	il_wr(il, HBUS_TARG_MEM_RADDR, IL4965_RTC_INST_LOWER_BOUND);
+
+	errcnt = 0;
+	for (; len > 0; len -= sizeof(u32), image++) {
+		/* read data comes through single port, auto-incr addr */
+		/* NOTE: Use the debugless read so we don't flood kernel log
+		 * if IL_DL_IO is set */
+		val = _il_rd(il, HBUS_TARG_MEM_RDAT);
+		if (val != le32_to_cpu(*image)) {
+			IL_ERR("uCode INST section is invalid at "
+			       "offset 0x%x, is 0x%x, s/b 0x%x\n",
+			       save_len - len, val, le32_to_cpu(*image));
+			ret = -EIO;
+			errcnt++;
+			if (errcnt >= 20)
+				break;
+		}
+	}
+
+	if (!errcnt)
+		D_INFO("ucode image in INSTRUCTION memory is good\n");
+
+	return ret;
+}
+
+/**
+ * il4965_verify_ucode - determine which instruction image is in SRAM,
+ *    and verify its contents
+ */
+int
+il4965_verify_ucode(struct il_priv *il)
+{
+	__le32 *image;
+	u32 len;
+	int ret;
+
+	/* Try bootstrap */
+	image = (__le32 *) il->ucode_boot.v_addr;
+	len = il->ucode_boot.len;
+	ret = il4965_verify_inst_sparse(il, image, len);
+	if (!ret) {
+		D_INFO("Bootstrap uCode is good in inst SRAM\n");
+		return 0;
+	}
+
+	/* Try initialize */
+	image = (__le32 *) il->ucode_init.v_addr;
+	len = il->ucode_init.len;
+	ret = il4965_verify_inst_sparse(il, image, len);
+	if (!ret) {
+		D_INFO("Initialize uCode is good in inst SRAM\n");
+		return 0;
+	}
+
+	/* Try runtime/protocol */
+	image = (__le32 *) il->ucode_code.v_addr;
+	len = il->ucode_code.len;
+	ret = il4965_verify_inst_sparse(il, image, len);
+	if (!ret) {
+		D_INFO("Runtime uCode is good in inst SRAM\n");
+		return 0;
+	}
+
+	IL_ERR("NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
+
+	/* Since nothing seems to match, show first several data entries in
+	 * instruction SRAM, so maybe visual inspection will give a clue.
+	 * Selection of bootstrap image (vs. other images) is arbitrary. */
+	image = (__le32 *) il->ucode_boot.v_addr;
+	len = il->ucode_boot.len;
+	ret = il4965_verify_inst_full(il, image, len);
+
+	return ret;
+}
+
+/******************************************************************************
+ *
+ * EEPROM related functions
+ *
+******************************************************************************/
+
+/*
+ * The device's EEPROM semaphore prevents conflicts between driver and uCode
+ * when accessing the EEPROM; each access is a series of pulses to/from the
+ * EEPROM chip, not a single event, so even reads could conflict if they
+ * weren't arbitrated by the semaphore.
+ */
+int
+il4965_eeprom_acquire_semaphore(struct il_priv *il)
+{
+	u16 count;
+	int ret;
+
+	for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
+		/* Request semaphore */
+		il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+			   CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
+
+		/* See if we got it */
+		ret =
+		    _il_poll_bit(il, CSR_HW_IF_CONFIG_REG,
+				 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
+				 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
+				 EEPROM_SEM_TIMEOUT);
+		if (ret >= 0)
+			return ret;
+	}
+
+	return ret;
+}
+
+void
+il4965_eeprom_release_semaphore(struct il_priv *il)
+{
+	il_clear_bit(il, CSR_HW_IF_CONFIG_REG,
+		     CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
+
+}
+
+int
+il4965_eeprom_check_version(struct il_priv *il)
+{
+	u16 eeprom_ver;
+	u16 calib_ver;
+
+	eeprom_ver = il_eeprom_query16(il, EEPROM_VERSION);
+	calib_ver = il_eeprom_query16(il, EEPROM_4965_CALIB_VERSION_OFFSET);
+
+	if (eeprom_ver < il->cfg->eeprom_ver ||
+	    calib_ver < il->cfg->eeprom_calib_ver)
+		goto err;
+
+	IL_INFO("device EEPROM VER=0x%x, CALIB=0x%x\n", eeprom_ver, calib_ver);
+
+	return 0;
+err:
+	IL_ERR("Unsupported (too old) EEPROM VER=0x%x < 0x%x "
+	       "CALIB=0x%x < 0x%x\n", eeprom_ver, il->cfg->eeprom_ver,
+	       calib_ver, il->cfg->eeprom_calib_ver);
+	return -EINVAL;
+
+}
+
+void
+il4965_eeprom_get_mac(const struct il_priv *il, u8 * mac)
+{
+	const u8 *addr = il_eeprom_query_addr(il,
+					      EEPROM_MAC_ADDRESS);
+	memcpy(mac, addr, ETH_ALEN);
+}
+
+/* Send led command */
+static int
+il4965_send_led_cmd(struct il_priv *il, struct il_led_cmd *led_cmd)
+{
+	struct il_host_cmd cmd = {
+		.id = C_LEDS,
+		.len = sizeof(struct il_led_cmd),
+		.data = led_cmd,
+		.flags = CMD_ASYNC,
+		.callback = NULL,
+	};
+	u32 reg;
+
+	reg = _il_rd(il, CSR_LED_REG);
+	if (reg != (reg & CSR_LED_BSM_CTRL_MSK))
+		_il_wr(il, CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK);
+
+	return il_send_cmd(il, &cmd);
+}
+
+/* Set led register off */
+void
+il4965_led_enable(struct il_priv *il)
+{
+	_il_wr(il, CSR_LED_REG, CSR_LED_REG_TRUN_ON);
+}
+
+const struct il_led_ops il4965_led_ops = {
+	.cmd = il4965_send_led_cmd,
+};
+
+static int il4965_send_tx_power(struct il_priv *il);
+static int il4965_hw_get_temperature(struct il_priv *il);
+
+/* Highest firmware API version supported */
+#define IL4965_UCODE_API_MAX 2
+
+/* Lowest firmware API version supported */
+#define IL4965_UCODE_API_MIN 2
+
+#define IL4965_FW_PRE "iwlwifi-4965-"
+#define _IL4965_MODULE_FIRMWARE(api) IL4965_FW_PRE #api ".ucode"
+#define IL4965_MODULE_FIRMWARE(api) _IL4965_MODULE_FIRMWARE(api)
+
+/* check contents of special bootstrap uCode SRAM */
+static int
+il4965_verify_bsm(struct il_priv *il)
+{
+	__le32 *image = il->ucode_boot.v_addr;
+	u32 len = il->ucode_boot.len;
+	u32 reg;
+	u32 val;
+
+	D_INFO("Begin verify bsm\n");
+
+	/* verify BSM SRAM contents */
+	val = il_rd_prph(il, BSM_WR_DWCOUNT_REG);
+	for (reg = BSM_SRAM_LOWER_BOUND; reg < BSM_SRAM_LOWER_BOUND + len;
+	     reg += sizeof(u32), image++) {
+		val = il_rd_prph(il, reg);
+		if (val != le32_to_cpu(*image)) {
+			IL_ERR("BSM uCode verification failed at "
+			       "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
+			       BSM_SRAM_LOWER_BOUND, reg - BSM_SRAM_LOWER_BOUND,
+			       len, val, le32_to_cpu(*image));
+			return -EIO;
+		}
+	}
+
+	D_INFO("BSM bootstrap uCode image OK\n");
+
+	return 0;
+}
+
+/**
+ * il4965_load_bsm - Load bootstrap instructions
+ *
+ * BSM operation:
+ *
+ * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
+ * in special SRAM that does not power down during RFKILL.  When powering back
+ * up after power-saving sleeps (or during initial uCode load), the BSM loads
+ * the bootstrap program into the on-board processor, and starts it.
+ *
+ * The bootstrap program loads (via DMA) instructions and data for a new
+ * program from host DRAM locations indicated by the host driver in the
+ * BSM_DRAM_* registers.  Once the new program is loaded, it starts
+ * automatically.
+ *
+ * When initializing the NIC, the host driver points the BSM to the
+ * "initialize" uCode image.  This uCode sets up some internal data, then
+ * notifies host via "initialize alive" that it is complete.
+ *
+ * The host then replaces the BSM_DRAM_* pointer values to point to the
+ * normal runtime uCode instructions and a backup uCode data cache buffer
+ * (filled initially with starting data values for the on-board processor),
+ * then triggers the "initialize" uCode to load and launch the runtime uCode,
+ * which begins normal operation.
+ *
+ * When doing a power-save shutdown, runtime uCode saves data SRAM into
+ * the backup data cache in DRAM before SRAM is powered down.
+ *
+ * When powering back up, the BSM loads the bootstrap program.  This reloads
+ * the runtime uCode instructions and the backup data cache into SRAM,
+ * and re-launches the runtime uCode from where it left off.
+ */
+static int
+il4965_load_bsm(struct il_priv *il)
+{
+	__le32 *image = il->ucode_boot.v_addr;
+	u32 len = il->ucode_boot.len;
+	dma_addr_t pinst;
+	dma_addr_t pdata;
+	u32 inst_len;
+	u32 data_len;
+	int i;
+	u32 done;
+	u32 reg_offset;
+	int ret;
+
+	D_INFO("Begin load bsm\n");
+
+	il->ucode_type = UCODE_RT;
+
+	/* make sure bootstrap program is no larger than BSM's SRAM size */
+	if (len > IL49_MAX_BSM_SIZE)
+		return -EINVAL;
+
+	/* Tell bootstrap uCode where to find the "Initialize" uCode
+	 *   in host DRAM ... host DRAM physical address bits 35:4 for 4965.
+	 * NOTE:  il_init_alive_start() will replace these values,
+	 *        after the "initialize" uCode has run, to point to
+	 *        runtime/protocol instructions and backup data cache.
+	 */
+	pinst = il->ucode_init.p_addr >> 4;
+	pdata = il->ucode_init_data.p_addr >> 4;
+	inst_len = il->ucode_init.len;
+	data_len = il->ucode_init_data.len;
+
+	il_wr_prph(il, BSM_DRAM_INST_PTR_REG, pinst);
+	il_wr_prph(il, BSM_DRAM_DATA_PTR_REG, pdata);
+	il_wr_prph(il, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
+	il_wr_prph(il, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
+
+	/* Fill BSM memory with bootstrap instructions */
+	for (reg_offset = BSM_SRAM_LOWER_BOUND;
+	     reg_offset < BSM_SRAM_LOWER_BOUND + len;
+	     reg_offset += sizeof(u32), image++)
+		_il_wr_prph(il, reg_offset, le32_to_cpu(*image));
+
+	ret = il4965_verify_bsm(il);
+	if (ret)
+		return ret;
+
+	/* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
+	il_wr_prph(il, BSM_WR_MEM_SRC_REG, 0x0);
+	il_wr_prph(il, BSM_WR_MEM_DST_REG, IL49_RTC_INST_LOWER_BOUND);
+	il_wr_prph(il, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
+
+	/* Load bootstrap code into instruction SRAM now,
+	 *   to prepare to load "initialize" uCode */
+	il_wr_prph(il, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
+
+	/* Wait for load of bootstrap uCode to finish */
+	for (i = 0; i < 100; i++) {
+		done = il_rd_prph(il, BSM_WR_CTRL_REG);
+		if (!(done & BSM_WR_CTRL_REG_BIT_START))
+			break;
+		udelay(10);
+	}
+	if (i < 100)
+		D_INFO("BSM write complete, poll %d iterations\n", i);
+	else {
+		IL_ERR("BSM write did not complete!\n");
+		return -EIO;
+	}
+
+	/* Enable future boot loads whenever power management unit triggers it
+	 *   (e.g. when powering back up after power-save shutdown) */
+	il_wr_prph(il, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
+
+	return 0;
+}
+
+/**
+ * il4965_set_ucode_ptrs - Set uCode address location
+ *
+ * Tell initialization uCode where to find runtime uCode.
+ *
+ * BSM registers initially contain pointers to initialization uCode.
+ * We need to replace them to load runtime uCode inst and data,
+ * and to save runtime data when powering down.
+ */
+static int
+il4965_set_ucode_ptrs(struct il_priv *il)
+{
+	dma_addr_t pinst;
+	dma_addr_t pdata;
+	int ret = 0;
+
+	/* bits 35:4 for 4965 */
+	pinst = il->ucode_code.p_addr >> 4;
+	pdata = il->ucode_data_backup.p_addr >> 4;
+
+	/* Tell bootstrap uCode where to find image to load */
+	il_wr_prph(il, BSM_DRAM_INST_PTR_REG, pinst);
+	il_wr_prph(il, BSM_DRAM_DATA_PTR_REG, pdata);
+	il_wr_prph(il, BSM_DRAM_DATA_BYTECOUNT_REG, il->ucode_data.len);
+
+	/* Inst byte count must be last to set up, bit 31 signals uCode
+	 *   that all new ptr/size info is in place */
+	il_wr_prph(il, BSM_DRAM_INST_BYTECOUNT_REG,
+		   il->ucode_code.len | BSM_DRAM_INST_LOAD);
+	D_INFO("Runtime uCode pointers are set.\n");
+
+	return ret;
+}
+
+/**
+ * il4965_init_alive_start - Called after N_ALIVE notification received
+ *
+ * Called after N_ALIVE notification received from "initialize" uCode.
+ *
+ * The 4965 "initialize" ALIVE reply contains calibration data for:
+ *   Voltage, temperature, and MIMO tx gain correction, now stored in il
+ *   (3945 does not contain this data).
+ *
+ * Tell "initialize" uCode to go ahead and load the runtime uCode.
+*/
+static void
+il4965_init_alive_start(struct il_priv *il)
+{
+	/* Bootstrap uCode has loaded initialize uCode ... verify inst image.
+	 * This is a paranoid check, because we would not have gotten the
+	 * "initialize" alive if code weren't properly loaded.  */
+	if (il4965_verify_ucode(il)) {
+		/* Runtime instruction load was bad;
+		 * take it all the way back down so we can try again */
+		D_INFO("Bad \"initialize\" uCode load.\n");
+		goto restart;
+	}
+
+	/* Calculate temperature */
+	il->temperature = il4965_hw_get_temperature(il);
+
+	/* Send pointers to protocol/runtime uCode image ... init code will
+	 * load and launch runtime uCode, which will send us another "Alive"
+	 * notification. */
+	D_INFO("Initialization Alive received.\n");
+	if (il4965_set_ucode_ptrs(il)) {
+		/* Runtime instruction load won't happen;
+		 * take it all the way back down so we can try again */
+		D_INFO("Couldn't set up uCode pointers.\n");
+		goto restart;
+	}
+	return;
+
+restart:
+	queue_work(il->workqueue, &il->restart);
+}
+
+static bool
+iw4965_is_ht40_channel(__le32 rxon_flags)
+{
+	int chan_mod =
+	    le32_to_cpu(rxon_flags & RXON_FLG_CHANNEL_MODE_MSK) >>
+	    RXON_FLG_CHANNEL_MODE_POS;
+	return (chan_mod == CHANNEL_MODE_PURE_40 ||
+		chan_mod == CHANNEL_MODE_MIXED);
+}
+
+static void
+il4965_nic_config(struct il_priv *il)
+{
+	unsigned long flags;
+	u16 radio_cfg;
+
+	spin_lock_irqsave(&il->lock, flags);
+
+	radio_cfg = il_eeprom_query16(il, EEPROM_RADIO_CONFIG);
+
+	/* write radio config values to register */
+	if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) == EEPROM_4965_RF_CFG_TYPE_MAX)
+		il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+			   EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
+			   EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
+			   EEPROM_RF_CFG_DASH_MSK(radio_cfg));
+
+	/* set CSR_HW_CONFIG_REG for uCode use */
+	il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+		   CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
+		   CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
+
+	il->calib_info =
+	    (struct il_eeprom_calib_info *)
+	    il_eeprom_query_addr(il, EEPROM_4965_CALIB_TXPOWER_OFFSET);
+
+	spin_unlock_irqrestore(&il->lock, flags);
+}
+
+/* Reset differential Rx gains in NIC to prepare for chain noise calibration.
+ * Called after every association, but this runs only once!
+ *  ... once chain noise is calibrated the first time, it's good forever.  */
+static void
+il4965_chain_noise_reset(struct il_priv *il)
+{
+	struct il_chain_noise_data *data = &(il->chain_noise_data);
+
+	if (data->state == IL_CHAIN_NOISE_ALIVE && il_is_any_associated(il)) {
+		struct il_calib_diff_gain_cmd cmd;
+
+		/* clear data for chain noise calibration algorithm */
+		data->chain_noise_a = 0;
+		data->chain_noise_b = 0;
+		data->chain_noise_c = 0;
+		data->chain_signal_a = 0;
+		data->chain_signal_b = 0;
+		data->chain_signal_c = 0;
+		data->beacon_count = 0;
+
+		memset(&cmd, 0, sizeof(cmd));
+		cmd.hdr.op_code = IL_PHY_CALIBRATE_DIFF_GAIN_CMD;
+		cmd.diff_gain_a = 0;
+		cmd.diff_gain_b = 0;
+		cmd.diff_gain_c = 0;
+		if (il_send_cmd_pdu(il, C_PHY_CALIBRATION, sizeof(cmd), &cmd))
+			IL_ERR("Could not send C_PHY_CALIBRATION\n");
+		data->state = IL_CHAIN_NOISE_ACCUMULATE;
+		D_CALIB("Run chain_noise_calibrate\n");
+	}
+}
+
+static struct il_sensitivity_ranges il4965_sensitivity = {
+	.min_nrg_cck = 97,
+	.max_nrg_cck = 0,	/* not used, set to 0 */
+
+	.auto_corr_min_ofdm = 85,
+	.auto_corr_min_ofdm_mrc = 170,
+	.auto_corr_min_ofdm_x1 = 105,
+	.auto_corr_min_ofdm_mrc_x1 = 220,
+
+	.auto_corr_max_ofdm = 120,
+	.auto_corr_max_ofdm_mrc = 210,
+	.auto_corr_max_ofdm_x1 = 140,
+	.auto_corr_max_ofdm_mrc_x1 = 270,
+
+	.auto_corr_min_cck = 125,
+	.auto_corr_max_cck = 200,
+	.auto_corr_min_cck_mrc = 200,
+	.auto_corr_max_cck_mrc = 400,
+
+	.nrg_th_cck = 100,
+	.nrg_th_ofdm = 100,
+
+	.barker_corr_th_min = 190,
+	.barker_corr_th_min_mrc = 390,
+	.nrg_th_cca = 62,
+};
+
+static void
+il4965_set_ct_threshold(struct il_priv *il)
+{
+	/* want Kelvin */
+	il->hw_params.ct_kill_threshold =
+	    CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY);
+}
+
+/**
+ * il4965_hw_set_hw_params
+ *
+ * Called when initializing driver
+ */
+static int
+il4965_hw_set_hw_params(struct il_priv *il)
+{
+	if (il->cfg->mod_params->num_of_queues >= IL_MIN_NUM_QUEUES &&
+	    il->cfg->mod_params->num_of_queues <= IL49_NUM_QUEUES)
+		il->cfg->base_params->num_of_queues =
+		    il->cfg->mod_params->num_of_queues;
+
+	il->hw_params.max_txq_num = il->cfg->base_params->num_of_queues;
+	il->hw_params.dma_chnl_num = FH49_TCSR_CHNL_NUM;
+	il->hw_params.scd_bc_tbls_size =
+	    il->cfg->base_params->num_of_queues *
+	    sizeof(struct il4965_scd_bc_tbl);
+	il->hw_params.tfd_size = sizeof(struct il_tfd);
+	il->hw_params.max_stations = IL4965_STATION_COUNT;
+	il->ctx.bcast_sta_id = IL4965_BROADCAST_ID;
+	il->hw_params.max_data_size = IL49_RTC_DATA_SIZE;
+	il->hw_params.max_inst_size = IL49_RTC_INST_SIZE;
+	il->hw_params.max_bsm_size = BSM_SRAM_SIZE;
+	il->hw_params.ht40_channel = BIT(IEEE80211_BAND_5GHZ);
+
+	il->hw_params.rx_wrt_ptr_reg = FH49_RSCSR_CHNL0_WPTR;
+
+	il->hw_params.tx_chains_num = il4965_num_of_ant(il->cfg->valid_tx_ant);
+	il->hw_params.rx_chains_num = il4965_num_of_ant(il->cfg->valid_rx_ant);
+	il->hw_params.valid_tx_ant = il->cfg->valid_tx_ant;
+	il->hw_params.valid_rx_ant = il->cfg->valid_rx_ant;
+
+	il4965_set_ct_threshold(il);
+
+	il->hw_params.sens = &il4965_sensitivity;
+	il->hw_params.beacon_time_tsf_bits = IL4965_EXT_BEACON_TIME_POS;
+
+	return 0;
+}
+
+static s32
+il4965_math_div_round(s32 num, s32 denom, s32 * res)
+{
+	s32 sign = 1;
+
+	if (num < 0) {
+		sign = -sign;
+		num = -num;
+	}
+	if (denom < 0) {
+		sign = -sign;
+		denom = -denom;
+	}
+	*res = 1;
+	*res = ((num * 2 + denom) / (denom * 2)) * sign;
+
+	return 1;
+}
+
+/**
+ * il4965_get_voltage_compensation - Power supply voltage comp for txpower
+ *
+ * Determines power supply voltage compensation for txpower calculations.
+ * Returns number of 1/2-dB steps to subtract from gain table idx,
+ * to compensate for difference between power supply voltage during
+ * factory measurements, vs. current power supply voltage.
+ *
+ * Voltage indication is higher for lower voltage.
+ * Lower voltage requires more gain (lower gain table idx).
+ */
+static s32
+il4965_get_voltage_compensation(s32 eeprom_voltage, s32 current_voltage)
+{
+	s32 comp = 0;
+
+	if (TX_POWER_IL_ILLEGAL_VOLTAGE == eeprom_voltage ||
+	    TX_POWER_IL_ILLEGAL_VOLTAGE == current_voltage)
+		return 0;
+
+	il4965_math_div_round(current_voltage - eeprom_voltage,
+			      TX_POWER_IL_VOLTAGE_CODES_PER_03V, &comp);
+
+	if (current_voltage > eeprom_voltage)
+		comp *= 2;
+	if ((comp < -2) || (comp > 2))
+		comp = 0;
+
+	return comp;
+}
+
+static s32
+il4965_get_tx_atten_grp(u16 channel)
+{
+	if (channel >= CALIB_IL_TX_ATTEN_GR5_FCH &&
+	    channel <= CALIB_IL_TX_ATTEN_GR5_LCH)
+		return CALIB_CH_GROUP_5;
+
+	if (channel >= CALIB_IL_TX_ATTEN_GR1_FCH &&
+	    channel <= CALIB_IL_TX_ATTEN_GR1_LCH)
+		return CALIB_CH_GROUP_1;
+
+	if (channel >= CALIB_IL_TX_ATTEN_GR2_FCH &&
+	    channel <= CALIB_IL_TX_ATTEN_GR2_LCH)
+		return CALIB_CH_GROUP_2;
+
+	if (channel >= CALIB_IL_TX_ATTEN_GR3_FCH &&
+	    channel <= CALIB_IL_TX_ATTEN_GR3_LCH)
+		return CALIB_CH_GROUP_3;
+
+	if (channel >= CALIB_IL_TX_ATTEN_GR4_FCH &&
+	    channel <= CALIB_IL_TX_ATTEN_GR4_LCH)
+		return CALIB_CH_GROUP_4;
+
+	return -EINVAL;
+}
+
+static u32
+il4965_get_sub_band(const struct il_priv *il, u32 channel)
+{
+	s32 b = -1;
+
+	for (b = 0; b < EEPROM_TX_POWER_BANDS; b++) {
+		if (il->calib_info->band_info[b].ch_from == 0)
+			continue;
+
+		if (channel >= il->calib_info->band_info[b].ch_from &&
+		    channel <= il->calib_info->band_info[b].ch_to)
+			break;
+	}
+
+	return b;
+}
+
+static s32
+il4965_interpolate_value(s32 x, s32 x1, s32 y1, s32 x2, s32 y2)
+{
+	s32 val;
+
+	if (x2 == x1)
+		return y1;
+	else {
+		il4965_math_div_round((x2 - x) * (y1 - y2), (x2 - x1), &val);
+		return val + y2;
+	}
+}
+
+/**
+ * il4965_interpolate_chan - Interpolate factory measurements for one channel
+ *
+ * Interpolates factory measurements from the two sample channels within a
+ * sub-band, to apply to channel of interest.  Interpolation is proportional to
+ * differences in channel frequencies, which is proportional to differences
+ * in channel number.
+ */
+static int
+il4965_interpolate_chan(struct il_priv *il, u32 channel,
+			struct il_eeprom_calib_ch_info *chan_info)
+{
+	s32 s = -1;
+	u32 c;
+	u32 m;
+	const struct il_eeprom_calib_measure *m1;
+	const struct il_eeprom_calib_measure *m2;
+	struct il_eeprom_calib_measure *omeas;
+	u32 ch_i1;
+	u32 ch_i2;
+
+	s = il4965_get_sub_band(il, channel);
+	if (s >= EEPROM_TX_POWER_BANDS) {
+		IL_ERR("Tx Power can not find channel %d\n", channel);
+		return -1;
+	}
+
+	ch_i1 = il->calib_info->band_info[s].ch1.ch_num;
+	ch_i2 = il->calib_info->band_info[s].ch2.ch_num;
+	chan_info->ch_num = (u8) channel;
+
+	D_TXPOWER("channel %d subband %d factory cal ch %d & %d\n", channel, s,
+		  ch_i1, ch_i2);
+
+	for (c = 0; c < EEPROM_TX_POWER_TX_CHAINS; c++) {
+		for (m = 0; m < EEPROM_TX_POWER_MEASUREMENTS; m++) {
+			m1 = &(il->calib_info->band_info[s].ch1.
+			       measurements[c][m]);
+			m2 = &(il->calib_info->band_info[s].ch2.
+			       measurements[c][m]);
+			omeas = &(chan_info->measurements[c][m]);
+
+			omeas->actual_pow =
+			    (u8) il4965_interpolate_value(channel, ch_i1,
+							  m1->actual_pow, ch_i2,
+							  m2->actual_pow);
+			omeas->gain_idx =
+			    (u8) il4965_interpolate_value(channel, ch_i1,
+							  m1->gain_idx, ch_i2,
+							  m2->gain_idx);
+			omeas->temperature =
+			    (u8) il4965_interpolate_value(channel, ch_i1,
+							  m1->temperature,
+							  ch_i2,
+							  m2->temperature);
+			omeas->pa_det =
+			    (s8) il4965_interpolate_value(channel, ch_i1,
+							  m1->pa_det, ch_i2,
+							  m2->pa_det);
+
+			D_TXPOWER("chain %d meas %d AP1=%d AP2=%d AP=%d\n", c,
+				  m, m1->actual_pow, m2->actual_pow,
+				  omeas->actual_pow);
+			D_TXPOWER("chain %d meas %d NI1=%d NI2=%d NI=%d\n", c,
+				  m, m1->gain_idx, m2->gain_idx,
+				  omeas->gain_idx);
+			D_TXPOWER("chain %d meas %d PA1=%d PA2=%d PA=%d\n", c,
+				  m, m1->pa_det, m2->pa_det, omeas->pa_det);
+			D_TXPOWER("chain %d meas %d  T1=%d  T2=%d  T=%d\n", c,
+				  m, m1->temperature, m2->temperature,
+				  omeas->temperature);
+		}
+	}
+
+	return 0;
+}
+
+/* bit-rate-dependent table to prevent Tx distortion, in half-dB units,
+ * for OFDM 6, 12, 18, 24, 36, 48, 54, 60 MBit, and CCK all rates. */
+static s32 back_off_table[] = {
+	10, 10, 10, 10, 10, 15, 17, 20,	/* OFDM SISO 20 MHz */
+	10, 10, 10, 10, 10, 15, 17, 20,	/* OFDM MIMO 20 MHz */
+	10, 10, 10, 10, 10, 15, 17, 20,	/* OFDM SISO 40 MHz */
+	10, 10, 10, 10, 10, 15, 17, 20,	/* OFDM MIMO 40 MHz */
+	10			/* CCK */
+};
+
+/* Thermal compensation values for txpower for various frequency ranges ...
+ *   ratios from 3:1 to 4.5:1 of degrees (Celsius) per half-dB gain adjust */
+static struct il4965_txpower_comp_entry {
+	s32 degrees_per_05db_a;
+	s32 degrees_per_05db_a_denom;
+} tx_power_cmp_tble[CALIB_CH_GROUP_MAX] = {
+	{
+	9, 2},			/* group 0 5.2, ch  34-43 */
+	{
+	4, 1},			/* group 1 5.2, ch  44-70 */
+	{
+	4, 1},			/* group 2 5.2, ch  71-124 */
+	{
+	4, 1},			/* group 3 5.2, ch 125-200 */
+	{
+	3, 1}			/* group 4 2.4, ch   all */
+};
+
+static s32
+get_min_power_idx(s32 rate_power_idx, u32 band)
+{
+	if (!band) {
+		if ((rate_power_idx & 7) <= 4)
+			return MIN_TX_GAIN_IDX_52GHZ_EXT;
+	}
+	return MIN_TX_GAIN_IDX;
+}
+
+struct gain_entry {
+	u8 dsp;
+	u8 radio;
+};
+
+static const struct gain_entry gain_table[2][108] = {
+	/* 5.2GHz power gain idx table */
+	{
+	 {123, 0x3F},		/* highest txpower */
+	 {117, 0x3F},
+	 {110, 0x3F},
+	 {104, 0x3F},
+	 {98, 0x3F},
+	 {110, 0x3E},
+	 {104, 0x3E},
+	 {98, 0x3E},
+	 {110, 0x3D},
+	 {104, 0x3D},
+	 {98, 0x3D},
+	 {110, 0x3C},
+	 {104, 0x3C},
+	 {98, 0x3C},
+	 {110, 0x3B},
+	 {104, 0x3B},
+	 {98, 0x3B},
+	 {110, 0x3A},
+	 {104, 0x3A},
+	 {98, 0x3A},
+	 {110, 0x39},
+	 {104, 0x39},
+	 {98, 0x39},
+	 {110, 0x38},
+	 {104, 0x38},
+	 {98, 0x38},
+	 {110, 0x37},
+	 {104, 0x37},
+	 {98, 0x37},
+	 {110, 0x36},
+	 {104, 0x36},
+	 {98, 0x36},
+	 {110, 0x35},
+	 {104, 0x35},
+	 {98, 0x35},
+	 {110, 0x34},
+	 {104, 0x34},
+	 {98, 0x34},
+	 {110, 0x33},
+	 {104, 0x33},
+	 {98, 0x33},
+	 {110, 0x32},
+	 {104, 0x32},
+	 {98, 0x32},
+	 {110, 0x31},
+	 {104, 0x31},
+	 {98, 0x31},
+	 {110, 0x30},
+	 {104, 0x30},
+	 {98, 0x30},
+	 {110, 0x25},
+	 {104, 0x25},
+	 {98, 0x25},
+	 {110, 0x24},
+	 {104, 0x24},
+	 {98, 0x24},
+	 {110, 0x23},
+	 {104, 0x23},
+	 {98, 0x23},
+	 {110, 0x22},
+	 {104, 0x18},
+	 {98, 0x18},
+	 {110, 0x17},
+	 {104, 0x17},
+	 {98, 0x17},
+	 {110, 0x16},
+	 {104, 0x16},
+	 {98, 0x16},
+	 {110, 0x15},
+	 {104, 0x15},
+	 {98, 0x15},
+	 {110, 0x14},
+	 {104, 0x14},
+	 {98, 0x14},
+	 {110, 0x13},
+	 {104, 0x13},
+	 {98, 0x13},
+	 {110, 0x12},
+	 {104, 0x08},
+	 {98, 0x08},
+	 {110, 0x07},
+	 {104, 0x07},
+	 {98, 0x07},
+	 {110, 0x06},
+	 {104, 0x06},
+	 {98, 0x06},
+	 {110, 0x05},
+	 {104, 0x05},
+	 {98, 0x05},
+	 {110, 0x04},
+	 {104, 0x04},
+	 {98, 0x04},
+	 {110, 0x03},
+	 {104, 0x03},
+	 {98, 0x03},
+	 {110, 0x02},
+	 {104, 0x02},
+	 {98, 0x02},
+	 {110, 0x01},
+	 {104, 0x01},
+	 {98, 0x01},
+	 {110, 0x00},
+	 {104, 0x00},
+	 {98, 0x00},
+	 {93, 0x00},
+	 {88, 0x00},
+	 {83, 0x00},
+	 {78, 0x00},
+	 },
+	/* 2.4GHz power gain idx table */
+	{
+	 {110, 0x3f},		/* highest txpower */
+	 {104, 0x3f},
+	 {98, 0x3f},
+	 {110, 0x3e},
+	 {104, 0x3e},
+	 {98, 0x3e},
+	 {110, 0x3d},
+	 {104, 0x3d},
+	 {98, 0x3d},
+	 {110, 0x3c},
+	 {104, 0x3c},
+	 {98, 0x3c},
+	 {110, 0x3b},
+	 {104, 0x3b},
+	 {98, 0x3b},
+	 {110, 0x3a},
+	 {104, 0x3a},
+	 {98, 0x3a},
+	 {110, 0x39},
+	 {104, 0x39},
+	 {98, 0x39},
+	 {110, 0x38},
+	 {104, 0x38},
+	 {98, 0x38},
+	 {110, 0x37},
+	 {104, 0x37},
+	 {98, 0x37},
+	 {110, 0x36},
+	 {104, 0x36},
+	 {98, 0x36},
+	 {110, 0x35},
+	 {104, 0x35},
+	 {98, 0x35},
+	 {110, 0x34},
+	 {104, 0x34},
+	 {98, 0x34},
+	 {110, 0x33},
+	 {104, 0x33},
+	 {98, 0x33},
+	 {110, 0x32},
+	 {104, 0x32},
+	 {98, 0x32},
+	 {110, 0x31},
+	 {104, 0x31},
+	 {98, 0x31},
+	 {110, 0x30},
+	 {104, 0x30},
+	 {98, 0x30},
+	 {110, 0x6},
+	 {104, 0x6},
+	 {98, 0x6},
+	 {110, 0x5},
+	 {104, 0x5},
+	 {98, 0x5},
+	 {110, 0x4},
+	 {104, 0x4},
+	 {98, 0x4},
+	 {110, 0x3},
+	 {104, 0x3},
+	 {98, 0x3},
+	 {110, 0x2},
+	 {104, 0x2},
+	 {98, 0x2},
+	 {110, 0x1},
+	 {104, 0x1},
+	 {98, 0x1},
+	 {110, 0x0},
+	 {104, 0x0},
+	 {98, 0x0},
+	 {97, 0},
+	 {96, 0},
+	 {95, 0},
+	 {94, 0},
+	 {93, 0},
+	 {92, 0},
+	 {91, 0},
+	 {90, 0},
+	 {89, 0},
+	 {88, 0},
+	 {87, 0},
+	 {86, 0},
+	 {85, 0},
+	 {84, 0},
+	 {83, 0},
+	 {82, 0},
+	 {81, 0},
+	 {80, 0},
+	 {79, 0},
+	 {78, 0},
+	 {77, 0},
+	 {76, 0},
+	 {75, 0},
+	 {74, 0},
+	 {73, 0},
+	 {72, 0},
+	 {71, 0},
+	 {70, 0},
+	 {69, 0},
+	 {68, 0},
+	 {67, 0},
+	 {66, 0},
+	 {65, 0},
+	 {64, 0},
+	 {63, 0},
+	 {62, 0},
+	 {61, 0},
+	 {60, 0},
+	 {59, 0},
+	 }
+};
+
+static int
+il4965_fill_txpower_tbl(struct il_priv *il, u8 band, u16 channel, u8 is_ht40,
+			u8 ctrl_chan_high,
+			struct il4965_tx_power_db *tx_power_tbl)
+{
+	u8 saturation_power;
+	s32 target_power;
+	s32 user_target_power;
+	s32 power_limit;
+	s32 current_temp;
+	s32 reg_limit;
+	s32 current_regulatory;
+	s32 txatten_grp = CALIB_CH_GROUP_MAX;
+	int i;
+	int c;
+	const struct il_channel_info *ch_info = NULL;
+	struct il_eeprom_calib_ch_info ch_eeprom_info;
+	const struct il_eeprom_calib_measure *measurement;
+	s16 voltage;
+	s32 init_voltage;
+	s32 voltage_compensation;
+	s32 degrees_per_05db_num;
+	s32 degrees_per_05db_denom;
+	s32 factory_temp;
+	s32 temperature_comp[2];
+	s32 factory_gain_idx[2];
+	s32 factory_actual_pwr[2];
+	s32 power_idx;
+
+	/* tx_power_user_lmt is in dBm, convert to half-dBm (half-dB units
+	 *   are used for idxing into txpower table) */
+	user_target_power = 2 * il->tx_power_user_lmt;
+
+	/* Get current (RXON) channel, band, width */
+	D_TXPOWER("chan %d band %d is_ht40 %d\n", channel, band, is_ht40);
+
+	ch_info = il_get_channel_info(il, il->band, channel);
+
+	if (!il_is_channel_valid(ch_info))
+		return -EINVAL;
+
+	/* get txatten group, used to select 1) thermal txpower adjustment
+	 *   and 2) mimo txpower balance between Tx chains. */
+	txatten_grp = il4965_get_tx_atten_grp(channel);
+	if (txatten_grp < 0) {
+		IL_ERR("Can't find txatten group for channel %d.\n", channel);
+		return txatten_grp;
+	}
+
+	D_TXPOWER("channel %d belongs to txatten group %d\n", channel,
+		  txatten_grp);
+
+	if (is_ht40) {
+		if (ctrl_chan_high)
+			channel -= 2;
+		else
+			channel += 2;
+	}
+
+	/* hardware txpower limits ...
+	 * saturation (clipping distortion) txpowers are in half-dBm */
+	if (band)
+		saturation_power = il->calib_info->saturation_power24;
+	else
+		saturation_power = il->calib_info->saturation_power52;
+
+	if (saturation_power < IL_TX_POWER_SATURATION_MIN ||
+	    saturation_power > IL_TX_POWER_SATURATION_MAX) {
+		if (band)
+			saturation_power = IL_TX_POWER_DEFAULT_SATURATION_24;
+		else
+			saturation_power = IL_TX_POWER_DEFAULT_SATURATION_52;
+	}
+
+	/* regulatory txpower limits ... reg_limit values are in half-dBm,
+	 *   max_power_avg values are in dBm, convert * 2 */
+	if (is_ht40)
+		reg_limit = ch_info->ht40_max_power_avg * 2;
+	else
+		reg_limit = ch_info->max_power_avg * 2;
+
+	if ((reg_limit < IL_TX_POWER_REGULATORY_MIN) ||
+	    (reg_limit > IL_TX_POWER_REGULATORY_MAX)) {
+		if (band)
+			reg_limit = IL_TX_POWER_DEFAULT_REGULATORY_24;
+		else
+			reg_limit = IL_TX_POWER_DEFAULT_REGULATORY_52;
+	}
+
+	/* Interpolate txpower calibration values for this channel,
+	 *   based on factory calibration tests on spaced channels. */
+	il4965_interpolate_chan(il, channel, &ch_eeprom_info);
+
+	/* calculate tx gain adjustment based on power supply voltage */
+	voltage = le16_to_cpu(il->calib_info->voltage);
+	init_voltage = (s32) le32_to_cpu(il->card_alive_init.voltage);
+	voltage_compensation =
+	    il4965_get_voltage_compensation(voltage, init_voltage);
+
+	D_TXPOWER("curr volt %d eeprom volt %d volt comp %d\n", init_voltage,
+		  voltage, voltage_compensation);
+
+	/* get current temperature (Celsius) */
+	current_temp = max(il->temperature, IL_TX_POWER_TEMPERATURE_MIN);
+	current_temp = min(il->temperature, IL_TX_POWER_TEMPERATURE_MAX);
+	current_temp = KELVIN_TO_CELSIUS(current_temp);
+
+	/* select thermal txpower adjustment params, based on channel group
+	 *   (same frequency group used for mimo txatten adjustment) */
+	degrees_per_05db_num =
+	    tx_power_cmp_tble[txatten_grp].degrees_per_05db_a;
+	degrees_per_05db_denom =
+	    tx_power_cmp_tble[txatten_grp].degrees_per_05db_a_denom;
+
+	/* get per-chain txpower values from factory measurements */
+	for (c = 0; c < 2; c++) {
+		measurement = &ch_eeprom_info.measurements[c][1];
+
+		/* txgain adjustment (in half-dB steps) based on difference
+		 *   between factory and current temperature */
+		factory_temp = measurement->temperature;
+		il4965_math_div_round((current_temp -
+				       factory_temp) * degrees_per_05db_denom,
+				      degrees_per_05db_num,
+				      &temperature_comp[c]);
+
+		factory_gain_idx[c] = measurement->gain_idx;
+		factory_actual_pwr[c] = measurement->actual_pow;
+
+		D_TXPOWER("chain = %d\n", c);
+		D_TXPOWER("fctry tmp %d, " "curr tmp %d, comp %d steps\n",
+			  factory_temp, current_temp, temperature_comp[c]);
+
+		D_TXPOWER("fctry idx %d, fctry pwr %d\n", factory_gain_idx[c],
+			  factory_actual_pwr[c]);
+	}
+
+	/* for each of 33 bit-rates (including 1 for CCK) */
+	for (i = 0; i < POWER_TBL_NUM_ENTRIES; i++) {
+		u8 is_mimo_rate;
+		union il4965_tx_power_dual_stream tx_power;
+
+		/* for mimo, reduce each chain's txpower by half
+		 * (3dB, 6 steps), so total output power is regulatory
+		 * compliant. */
+		if (i & 0x8) {
+			current_regulatory =
+			    reg_limit -
+			    IL_TX_POWER_MIMO_REGULATORY_COMPENSATION;
+			is_mimo_rate = 1;
+		} else {
+			current_regulatory = reg_limit;
+			is_mimo_rate = 0;
+		}
+
+		/* find txpower limit, either hardware or regulatory */
+		power_limit = saturation_power - back_off_table[i];
+		if (power_limit > current_regulatory)
+			power_limit = current_regulatory;
+
+		/* reduce user's txpower request if necessary
+		 * for this rate on this channel */
+		target_power = user_target_power;
+		if (target_power > power_limit)
+			target_power = power_limit;
+
+		D_TXPOWER("rate %d sat %d reg %d usr %d tgt %d\n", i,
+			  saturation_power - back_off_table[i],
+			  current_regulatory, user_target_power, target_power);
+
+		/* for each of 2 Tx chains (radio transmitters) */
+		for (c = 0; c < 2; c++) {
+			s32 atten_value;
+
+			if (is_mimo_rate)
+				atten_value =
+				    (s32) le32_to_cpu(il->card_alive_init.
+						      tx_atten[txatten_grp][c]);
+			else
+				atten_value = 0;
+
+			/* calculate idx; higher idx means lower txpower */
+			power_idx =
+			    (u8) (factory_gain_idx[c] -
+				  (target_power - factory_actual_pwr[c]) -
+				  temperature_comp[c] - voltage_compensation +
+				  atten_value);
+
+/*			D_TXPOWER("calculated txpower idx %d\n",
+						power_idx); */
+
+			if (power_idx < get_min_power_idx(i, band))
+				power_idx = get_min_power_idx(i, band);
+
+			/* adjust 5 GHz idx to support negative idxes */
+			if (!band)
+				power_idx += 9;
+
+			/* CCK, rate 32, reduce txpower for CCK */
+			if (i == POWER_TBL_CCK_ENTRY)
+				power_idx +=
+				    IL_TX_POWER_CCK_COMPENSATION_C_STEP;
+
+			/* stay within the table! */
+			if (power_idx > 107) {
+				IL_WARN("txpower idx %d > 107\n", power_idx);
+				power_idx = 107;
+			}
+			if (power_idx < 0) {
+				IL_WARN("txpower idx %d < 0\n", power_idx);
+				power_idx = 0;
+			}
+
+			/* fill txpower command for this rate/chain */
+			tx_power.s.radio_tx_gain[c] =
+			    gain_table[band][power_idx].radio;
+			tx_power.s.dsp_predis_atten[c] =
+			    gain_table[band][power_idx].dsp;
+
+			D_TXPOWER("chain %d mimo %d idx %d "
+				  "gain 0x%02x dsp %d\n", c, atten_value,
+				  power_idx, tx_power.s.radio_tx_gain[c],
+				  tx_power.s.dsp_predis_atten[c]);
+		}		/* for each chain */
+
+		tx_power_tbl->power_tbl[i].dw = cpu_to_le32(tx_power.dw);
+
+	}			/* for each rate */
+
+	return 0;
+}
+
+/**
+ * il4965_send_tx_power - Configure the TXPOWER level user limit
+ *
+ * Uses the active RXON for channel, band, and characteristics (ht40, high)
+ * The power limit is taken from il->tx_power_user_lmt.
+ */
+static int
+il4965_send_tx_power(struct il_priv *il)
+{
+	struct il4965_txpowertable_cmd cmd = { 0 };
+	int ret;
+	u8 band = 0;
+	bool is_ht40 = false;
+	u8 ctrl_chan_high = 0;
+	struct il_rxon_context *ctx = &il->ctx;
+
+	if (WARN_ONCE
+	    (test_bit(S_SCAN_HW, &il->status),
+	     "TX Power requested while scanning!\n"))
+		return -EAGAIN;
+
+	band = il->band == IEEE80211_BAND_2GHZ;
+
+	is_ht40 = iw4965_is_ht40_channel(ctx->active.flags);
+
+	if (is_ht40 && (ctx->active.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
+		ctrl_chan_high = 1;
+
+	cmd.band = band;
+	cmd.channel = ctx->active.channel;
+
+	ret =
+	    il4965_fill_txpower_tbl(il, band, le16_to_cpu(ctx->active.channel),
+				    is_ht40, ctrl_chan_high, &cmd.tx_power);
+	if (ret)
+		goto out;
+
+	ret = il_send_cmd_pdu(il, C_TX_PWR_TBL, sizeof(cmd), &cmd);
+
+out:
+	return ret;
+}
+
+static int
+il4965_send_rxon_assoc(struct il_priv *il, struct il_rxon_context *ctx)
+{
+	int ret = 0;
+	struct il4965_rxon_assoc_cmd rxon_assoc;
+	const struct il_rxon_cmd *rxon1 = &ctx->staging;
+	const struct il_rxon_cmd *rxon2 = &ctx->active;
+
+	if (rxon1->flags == rxon2->flags &&
+	    rxon1->filter_flags == rxon2->filter_flags &&
+	    rxon1->cck_basic_rates == rxon2->cck_basic_rates &&
+	    rxon1->ofdm_ht_single_stream_basic_rates ==
+	    rxon2->ofdm_ht_single_stream_basic_rates &&
+	    rxon1->ofdm_ht_dual_stream_basic_rates ==
+	    rxon2->ofdm_ht_dual_stream_basic_rates &&
+	    rxon1->rx_chain == rxon2->rx_chain &&
+	    rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates) {
+		D_INFO("Using current RXON_ASSOC.  Not resending.\n");
+		return 0;
+	}
+
+	rxon_assoc.flags = ctx->staging.flags;
+	rxon_assoc.filter_flags = ctx->staging.filter_flags;
+	rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
+	rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
+	rxon_assoc.reserved = 0;
+	rxon_assoc.ofdm_ht_single_stream_basic_rates =
+	    ctx->staging.ofdm_ht_single_stream_basic_rates;
+	rxon_assoc.ofdm_ht_dual_stream_basic_rates =
+	    ctx->staging.ofdm_ht_dual_stream_basic_rates;
+	rxon_assoc.rx_chain_select_flags = ctx->staging.rx_chain;
+
+	ret =
+	    il_send_cmd_pdu_async(il, C_RXON_ASSOC, sizeof(rxon_assoc),
+				  &rxon_assoc, NULL);
+
+	return ret;
+}
+
+static int
+il4965_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx)
+{
+	/* cast away the const for active_rxon in this function */
+	struct il_rxon_cmd *active_rxon = (void *)&ctx->active;
+	int ret;
+	bool new_assoc = !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
+
+	if (!il_is_alive(il))
+		return -EBUSY;
+
+	if (!ctx->is_active)
+		return 0;
+
+	/* always get timestamp with Rx frame */
+	ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
+
+	ret = il_check_rxon_cmd(il, ctx);
+	if (ret) {
+		IL_ERR("Invalid RXON configuration.  Not committing.\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * receive commit_rxon request
+	 * abort any previous channel switch if still in process
+	 */
+	if (test_bit(S_CHANNEL_SWITCH_PENDING, &il->status) &&
+	    il->switch_channel != ctx->staging.channel) {
+		D_11H("abort channel switch on %d\n",
+		      le16_to_cpu(il->switch_channel));
+		il_chswitch_done(il, false);
+	}
+
+	/* If we don't need to send a full RXON, we can use
+	 * il_rxon_assoc_cmd which is used to reconfigure filter
+	 * and other flags for the current radio configuration. */
+	if (!il_full_rxon_required(il, ctx)) {
+		ret = il_send_rxon_assoc(il, ctx);
+		if (ret) {
+			IL_ERR("Error setting RXON_ASSOC (%d)\n", ret);
+			return ret;
+		}
+
+		memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
+		il_print_rx_config_cmd(il, ctx);
+		/*
+		 * We do not commit tx power settings while channel changing,
+		 * do it now if tx power changed.
+		 */
+		il_set_tx_power(il, il->tx_power_next, false);
+		return 0;
+	}
+
+	/* If we are currently associated and the new config requires
+	 * an RXON_ASSOC and the new config wants the associated mask enabled,
+	 * we must clear the associated from the active configuration
+	 * before we apply the new config */
+	if (il_is_associated_ctx(ctx) && new_assoc) {
+		D_INFO("Toggling associated bit on current RXON\n");
+		active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+
+		ret =
+		    il_send_cmd_pdu(il, ctx->rxon_cmd,
+				    sizeof(struct il_rxon_cmd), active_rxon);
+
+		/* If the mask clearing failed then we set
+		 * active_rxon back to what it was previously */
+		if (ret) {
+			active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
+			IL_ERR("Error clearing ASSOC_MSK (%d)\n", ret);
+			return ret;
+		}
+		il_clear_ucode_stations(il, ctx);
+		il_restore_stations(il, ctx);
+		ret = il4965_restore_default_wep_keys(il, ctx);
+		if (ret) {
+			IL_ERR("Failed to restore WEP keys (%d)\n", ret);
+			return ret;
+		}
+	}
+
+	D_INFO("Sending RXON\n" "* with%s RXON_FILTER_ASSOC_MSK\n"
+	       "* channel = %d\n" "* bssid = %pM\n", (new_assoc ? "" : "out"),
+	       le16_to_cpu(ctx->staging.channel), ctx->staging.bssid_addr);
+
+	il_set_rxon_hwcrypto(il, ctx, !il->cfg->mod_params->sw_crypto);
+
+	/* Apply the new configuration
+	 * RXON unassoc clears the station table in uCode so restoration of
+	 * stations is needed after it (the RXON command) completes
+	 */
+	if (!new_assoc) {
+		ret =
+		    il_send_cmd_pdu(il, ctx->rxon_cmd,
+				    sizeof(struct il_rxon_cmd), &ctx->staging);
+		if (ret) {
+			IL_ERR("Error setting new RXON (%d)\n", ret);
+			return ret;
+		}
+		D_INFO("Return from !new_assoc RXON.\n");
+		memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
+		il_clear_ucode_stations(il, ctx);
+		il_restore_stations(il, ctx);
+		ret = il4965_restore_default_wep_keys(il, ctx);
+		if (ret) {
+			IL_ERR("Failed to restore WEP keys (%d)\n", ret);
+			return ret;
+		}
+	}
+	if (new_assoc) {
+		il->start_calib = 0;
+		/* Apply the new configuration
+		 * RXON assoc doesn't clear the station table in uCode,
+		 */
+		ret =
+		    il_send_cmd_pdu(il, ctx->rxon_cmd,
+				    sizeof(struct il_rxon_cmd), &ctx->staging);
+		if (ret) {
+			IL_ERR("Error setting new RXON (%d)\n", ret);
+			return ret;
+		}
+		memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
+	}
+	il_print_rx_config_cmd(il, ctx);
+
+	il4965_init_sensitivity(il);
+
+	/* If we issue a new RXON command which required a tune then we must
+	 * send a new TXPOWER command or we won't be able to Tx any frames */
+	ret = il_set_tx_power(il, il->tx_power_next, true);
+	if (ret) {
+		IL_ERR("Error sending TX power (%d)\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
+il4965_hw_channel_switch(struct il_priv *il,
+			 struct ieee80211_channel_switch *ch_switch)
+{
+	struct il_rxon_context *ctx = &il->ctx;
+	int rc;
+	u8 band = 0;
+	bool is_ht40 = false;
+	u8 ctrl_chan_high = 0;
+	struct il4965_channel_switch_cmd cmd;
+	const struct il_channel_info *ch_info;
+	u32 switch_time_in_usec, ucode_switch_time;
+	u16 ch;
+	u32 tsf_low;
+	u8 switch_count;
+	u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
+	struct ieee80211_vif *vif = ctx->vif;
+	band = il->band == IEEE80211_BAND_2GHZ;
+
+	is_ht40 = iw4965_is_ht40_channel(ctx->staging.flags);
+
+	if (is_ht40 && (ctx->staging.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
+		ctrl_chan_high = 1;
+
+	cmd.band = band;
+	cmd.expect_beacon = 0;
+	ch = ch_switch->channel->hw_value;
+	cmd.channel = cpu_to_le16(ch);
+	cmd.rxon_flags = ctx->staging.flags;
+	cmd.rxon_filter_flags = ctx->staging.filter_flags;
+	switch_count = ch_switch->count;
+	tsf_low = ch_switch->timestamp & 0x0ffffffff;
+	/*
+	 * calculate the ucode channel switch time
+	 * adding TSF as one of the factor for when to switch
+	 */
+	if (il->ucode_beacon_time > tsf_low && beacon_interval) {
+		if (switch_count >
+		    ((il->ucode_beacon_time - tsf_low) / beacon_interval)) {
+			switch_count -=
+			    (il->ucode_beacon_time - tsf_low) / beacon_interval;
+		} else
+			switch_count = 0;
+	}
+	if (switch_count <= 1)
+		cmd.switch_time = cpu_to_le32(il->ucode_beacon_time);
+	else {
+		switch_time_in_usec =
+		    vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
+		ucode_switch_time =
+		    il_usecs_to_beacons(il, switch_time_in_usec,
+					beacon_interval);
+		cmd.switch_time =
+		    il_add_beacon_time(il, il->ucode_beacon_time,
+				       ucode_switch_time, beacon_interval);
+	}
+	D_11H("uCode time for the switch is 0x%x\n", cmd.switch_time);
+	ch_info = il_get_channel_info(il, il->band, ch);
+	if (ch_info)
+		cmd.expect_beacon = il_is_channel_radar(ch_info);
+	else {
+		IL_ERR("invalid channel switch from %u to %u\n",
+		       ctx->active.channel, ch);
+		return -EFAULT;
+	}
+
+	rc = il4965_fill_txpower_tbl(il, band, ch, is_ht40, ctrl_chan_high,
+				     &cmd.tx_power);
+	if (rc) {
+		D_11H("error:%d  fill txpower_tbl\n", rc);
+		return rc;
+	}
+
+	return il_send_cmd_pdu(il, C_CHANNEL_SWITCH, sizeof(cmd), &cmd);
+}
+
+/**
+ * il4965_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
+ */
+static void
+il4965_txq_update_byte_cnt_tbl(struct il_priv *il, struct il_tx_queue *txq,
+			       u16 byte_cnt)
+{
+	struct il4965_scd_bc_tbl *scd_bc_tbl = il->scd_bc_tbls.addr;
+	int txq_id = txq->q.id;
+	int write_ptr = txq->q.write_ptr;
+	int len = byte_cnt + IL_TX_CRC_SIZE + IL_TX_DELIMITER_SIZE;
+	__le16 bc_ent;
+
+	WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
+
+	bc_ent = cpu_to_le16(len & 0xFFF);
+	/* Set up byte count within first 256 entries */
+	scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
+
+	/* If within first 64 entries, duplicate at end */
+	if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
+		scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] =
+		    bc_ent;
+}
+
+/**
+ * il4965_hw_get_temperature - return the calibrated temperature (in Kelvin)
+ * @stats: Provides the temperature reading from the uCode
+ *
+ * A return of <0 indicates bogus data in the stats
+ */
+static int
+il4965_hw_get_temperature(struct il_priv *il)
+{
+	s32 temperature;
+	s32 vt;
+	s32 R1, R2, R3;
+	u32 R4;
+
+	if (test_bit(S_TEMPERATURE, &il->status) &&
+	    (il->_4965.stats.flag & STATS_REPLY_FLG_HT40_MODE_MSK)) {
+		D_TEMP("Running HT40 temperature calibration\n");
+		R1 = (s32) le32_to_cpu(il->card_alive_init.therm_r1[1]);
+		R2 = (s32) le32_to_cpu(il->card_alive_init.therm_r2[1]);
+		R3 = (s32) le32_to_cpu(il->card_alive_init.therm_r3[1]);
+		R4 = le32_to_cpu(il->card_alive_init.therm_r4[1]);
+	} else {
+		D_TEMP("Running temperature calibration\n");
+		R1 = (s32) le32_to_cpu(il->card_alive_init.therm_r1[0]);
+		R2 = (s32) le32_to_cpu(il->card_alive_init.therm_r2[0]);
+		R3 = (s32) le32_to_cpu(il->card_alive_init.therm_r3[0]);
+		R4 = le32_to_cpu(il->card_alive_init.therm_r4[0]);
+	}
+
+	/*
+	 * Temperature is only 23 bits, so sign extend out to 32.
+	 *
+	 * NOTE If we haven't received a stats notification yet
+	 * with an updated temperature, use R4 provided to us in the
+	 * "initialize" ALIVE response.
+	 */
+	if (!test_bit(S_TEMPERATURE, &il->status))
+		vt = sign_extend32(R4, 23);
+	else
+		vt = sign_extend32(le32_to_cpu
+				   (il->_4965.stats.general.common.temperature),
+				   23);
+
+	D_TEMP("Calib values R[1-3]: %d %d %d R4: %d\n", R1, R2, R3, vt);
+
+	if (R3 == R1) {
+		IL_ERR("Calibration conflict R1 == R3\n");
+		return -1;
+	}
+
+	/* Calculate temperature in degrees Kelvin, adjust by 97%.
+	 * Add offset to center the adjustment around 0 degrees Centigrade. */
+	temperature = TEMPERATURE_CALIB_A_VAL * (vt - R2);
+	temperature /= (R3 - R1);
+	temperature =
+	    (temperature * 97) / 100 + TEMPERATURE_CALIB_KELVIN_OFFSET;
+
+	D_TEMP("Calibrated temperature: %dK, %dC\n", temperature,
+	       KELVIN_TO_CELSIUS(temperature));
+
+	return temperature;
+}
+
+/* Adjust Txpower only if temperature variance is greater than threshold. */
+#define IL_TEMPERATURE_THRESHOLD   3
+
+/**
+ * il4965_is_temp_calib_needed - determines if new calibration is needed
+ *
+ * If the temperature changed has changed sufficiently, then a recalibration
+ * is needed.
+ *
+ * Assumes caller will replace il->last_temperature once calibration
+ * executed.
+ */
+static int
+il4965_is_temp_calib_needed(struct il_priv *il)
+{
+	int temp_diff;
+
+	if (!test_bit(S_STATS, &il->status)) {
+		D_TEMP("Temperature not updated -- no stats.\n");
+		return 0;
+	}
+
+	temp_diff = il->temperature - il->last_temperature;
+
+	/* get absolute value */
+	if (temp_diff < 0) {
+		D_POWER("Getting cooler, delta %d\n", temp_diff);
+		temp_diff = -temp_diff;
+	} else if (temp_diff == 0)
+		D_POWER("Temperature unchanged\n");
+	else
+		D_POWER("Getting warmer, delta %d\n", temp_diff);
+
+	if (temp_diff < IL_TEMPERATURE_THRESHOLD) {
+		D_POWER(" => thermal txpower calib not needed\n");
+		return 0;
+	}
+
+	D_POWER(" => thermal txpower calib needed\n");
+
+	return 1;
+}
+
+static void
+il4965_temperature_calib(struct il_priv *il)
+{
+	s32 temp;
+
+	temp = il4965_hw_get_temperature(il);
+	if (IL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(temp))
+		return;
+
+	if (il->temperature != temp) {
+		if (il->temperature)
+			D_TEMP("Temperature changed " "from %dC to %dC\n",
+			       KELVIN_TO_CELSIUS(il->temperature),
+			       KELVIN_TO_CELSIUS(temp));
+		else
+			D_TEMP("Temperature " "initialized to %dC\n",
+			       KELVIN_TO_CELSIUS(temp));
+	}
+
+	il->temperature = temp;
+	set_bit(S_TEMPERATURE, &il->status);
+
+	if (!il->disable_tx_power_cal &&
+	    unlikely(!test_bit(S_SCANNING, &il->status)) &&
+	    il4965_is_temp_calib_needed(il))
+		queue_work(il->workqueue, &il->txpower_work);
+}
+
+static u16
+il4965_get_hcmd_size(u8 cmd_id, u16 len)
+{
+	switch (cmd_id) {
+	case C_RXON:
+		return (u16) sizeof(struct il4965_rxon_cmd);
+	default:
+		return len;
+	}
+}
+
+static u16
+il4965_build_addsta_hcmd(const struct il_addsta_cmd *cmd, u8 * data)
+{
+	struct il4965_addsta_cmd *addsta = (struct il4965_addsta_cmd *)data;
+	addsta->mode = cmd->mode;
+	memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify));
+	memcpy(&addsta->key, &cmd->key, sizeof(struct il4965_keyinfo));
+	addsta->station_flags = cmd->station_flags;
+	addsta->station_flags_msk = cmd->station_flags_msk;
+	addsta->tid_disable_tx = cmd->tid_disable_tx;
+	addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
+	addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
+	addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
+	addsta->sleep_tx_count = cmd->sleep_tx_count;
+	addsta->reserved1 = cpu_to_le16(0);
+	addsta->reserved2 = cpu_to_le16(0);
+
+	return (u16) sizeof(struct il4965_addsta_cmd);
+}
+
+static inline u32
+il4965_get_scd_ssn(struct il4965_tx_resp *tx_resp)
+{
+	return le32_to_cpup(&tx_resp->u.status + tx_resp->frame_count) & MAX_SN;
+}
+
+static inline u32
+il4965_tx_status_to_mac80211(u32 status)
+{
+	status &= TX_STATUS_MSK;
+
+	switch (status) {
+	case TX_STATUS_SUCCESS:
+	case TX_STATUS_DIRECT_DONE:
+		return IEEE80211_TX_STAT_ACK;
+	case TX_STATUS_FAIL_DEST_PS:
+		return IEEE80211_TX_STAT_TX_FILTERED;
+	default:
+		return 0;
+	}
+}
+
+static inline bool
+il4965_is_tx_success(u32 status)
+{
+	status &= TX_STATUS_MSK;
+	return (status == TX_STATUS_SUCCESS || status == TX_STATUS_DIRECT_DONE);
+}
+
+/**
+ * il4965_tx_status_reply_tx - Handle Tx response for frames in aggregation queue
+ */
+static int
+il4965_tx_status_reply_tx(struct il_priv *il, struct il_ht_agg *agg,
+			  struct il4965_tx_resp *tx_resp, int txq_id,
+			  u16 start_idx)
+{
+	u16 status;
+	struct agg_tx_status *frame_status = tx_resp->u.agg_status;
+	struct ieee80211_tx_info *info = NULL;
+	struct ieee80211_hdr *hdr = NULL;
+	u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
+	int i, sh, idx;
+	u16 seq;
+	if (agg->wait_for_ba)
+		D_TX_REPLY("got tx response w/o block-ack\n");
+
+	agg->frame_count = tx_resp->frame_count;
+	agg->start_idx = start_idx;
+	agg->rate_n_flags = rate_n_flags;
+	agg->bitmap = 0;
+
+	/* num frames attempted by Tx command */
+	if (agg->frame_count == 1) {
+		/* Only one frame was attempted; no block-ack will arrive */
+		status = le16_to_cpu(frame_status[0].status);
+		idx = start_idx;
+
+		D_TX_REPLY("FrameCnt = %d, StartIdx=%d idx=%d\n",
+			   agg->frame_count, agg->start_idx, idx);
+
+		info = IEEE80211_SKB_CB(il->txq[txq_id].txb[idx].skb);
+		info->status.rates[0].count = tx_resp->failure_frame + 1;
+		info->flags &= ~IEEE80211_TX_CTL_AMPDU;
+		info->flags |= il4965_tx_status_to_mac80211(status);
+		il4965_hwrate_to_tx_control(il, rate_n_flags, info);
+
+		D_TX_REPLY("1 Frame 0x%x failure :%d\n", status & 0xff,
+			   tx_resp->failure_frame);
+		D_TX_REPLY("Rate Info rate_n_flags=%x\n", rate_n_flags);
+
+		agg->wait_for_ba = 0;
+	} else {
+		/* Two or more frames were attempted; expect block-ack */
+		u64 bitmap = 0;
+		int start = agg->start_idx;
+
+		/* Construct bit-map of pending frames within Tx win */
+		for (i = 0; i < agg->frame_count; i++) {
+			u16 sc;
+			status = le16_to_cpu(frame_status[i].status);
+			seq = le16_to_cpu(frame_status[i].sequence);
+			idx = SEQ_TO_IDX(seq);
+			txq_id = SEQ_TO_QUEUE(seq);
+
+			if (status &
+			    (AGG_TX_STATE_FEW_BYTES_MSK |
+			     AGG_TX_STATE_ABORT_MSK))
+				continue;
+
+			D_TX_REPLY("FrameCnt = %d, txq_id=%d idx=%d\n",
+				   agg->frame_count, txq_id, idx);
+
+			hdr = il_tx_queue_get_hdr(il, txq_id, idx);
+			if (!hdr) {
+				IL_ERR("BUG_ON idx doesn't point to valid skb"
+				       " idx=%d, txq_id=%d\n", idx, txq_id);
+				return -1;
+			}
+
+			sc = le16_to_cpu(hdr->seq_ctrl);
+			if (idx != (SEQ_TO_SN(sc) & 0xff)) {
+				IL_ERR("BUG_ON idx doesn't match seq control"
+				       " idx=%d, seq_idx=%d, seq=%d\n", idx,
+				       SEQ_TO_SN(sc), hdr->seq_ctrl);
+				return -1;
+			}
+
+			D_TX_REPLY("AGG Frame i=%d idx %d seq=%d\n", i, idx,
+				   SEQ_TO_SN(sc));
+
+			sh = idx - start;
+			if (sh > 64) {
+				sh = (start - idx) + 0xff;
+				bitmap = bitmap << sh;
+				sh = 0;
+				start = idx;
+			} else if (sh < -64)
+				sh = 0xff - (start - idx);
+			else if (sh < 0) {
+				sh = start - idx;
+				start = idx;
+				bitmap = bitmap << sh;
+				sh = 0;
+			}
+			bitmap |= 1ULL << sh;
+			D_TX_REPLY("start=%d bitmap=0x%llx\n", start,
+				   (unsigned long long)bitmap);
+		}
+
+		agg->bitmap = bitmap;
+		agg->start_idx = start;
+		D_TX_REPLY("Frames %d start_idx=%d bitmap=0x%llx\n",
+			   agg->frame_count, agg->start_idx,
+			   (unsigned long long)agg->bitmap);
+
+		if (bitmap)
+			agg->wait_for_ba = 1;
+	}
+	return 0;
+}
+
+static u8
+il4965_find_station(struct il_priv *il, const u8 * addr)
+{
+	int i;
+	int start = 0;
+	int ret = IL_INVALID_STATION;
+	unsigned long flags;
+
+	if ((il->iw_mode == NL80211_IFTYPE_ADHOC))
+		start = IL_STA_ID;
+
+	if (is_broadcast_ether_addr(addr))
+		return il->ctx.bcast_sta_id;
+
+	spin_lock_irqsave(&il->sta_lock, flags);
+	for (i = start; i < il->hw_params.max_stations; i++)
+		if (il->stations[i].used &&
+		    (!compare_ether_addr(il->stations[i].sta.sta.addr, addr))) {
+			ret = i;
+			goto out;
+		}
+
+	D_ASSOC("can not find STA %pM total %d\n", addr, il->num_stations);
+
+out:
+	/*
+	 * It may be possible that more commands interacting with stations
+	 * arrive before we completed processing the adding of
+	 * station
+	 */
+	if (ret != IL_INVALID_STATION &&
+	    (!(il->stations[ret].used & IL_STA_UCODE_ACTIVE) ||
+	     ((il->stations[ret].used & IL_STA_UCODE_ACTIVE) &&
+	      (il->stations[ret].used & IL_STA_UCODE_INPROGRESS)))) {
+		IL_ERR("Requested station info for sta %d before ready.\n",
+		       ret);
+		ret = IL_INVALID_STATION;
+	}
+	spin_unlock_irqrestore(&il->sta_lock, flags);
+	return ret;
+}
+
+static int
+il4965_get_ra_sta_id(struct il_priv *il, struct ieee80211_hdr *hdr)
+{
+	if (il->iw_mode == NL80211_IFTYPE_STATION) {
+		return IL_AP_ID;
+	} else {
+		u8 *da = ieee80211_get_DA(hdr);
+		return il4965_find_station(il, da);
+	}
+}
+
+/**
+ * il4965_hdl_tx - Handle standard (non-aggregation) Tx response
+ */
+static void
+il4965_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb)
+{
+	struct il_rx_pkt *pkt = rxb_addr(rxb);
+	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
+	int txq_id = SEQ_TO_QUEUE(sequence);
+	int idx = SEQ_TO_IDX(sequence);
+	struct il_tx_queue *txq = &il->txq[txq_id];
+	struct ieee80211_hdr *hdr;
+	struct ieee80211_tx_info *info;
+	struct il4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
+	u32 status = le32_to_cpu(tx_resp->u.status);
+	int uninitialized_var(tid);
+	int sta_id;
+	int freed;
+	u8 *qc = NULL;
+	unsigned long flags;
+
+	if (idx >= txq->q.n_bd || il_queue_used(&txq->q, idx) == 0) {
+		IL_ERR("Read idx for DMA queue txq_id (%d) idx %d "
+		       "is out of range [0-%d] %d %d\n", txq_id, idx,
+		       txq->q.n_bd, txq->q.write_ptr, txq->q.read_ptr);
+		return;
+	}
+
+	txq->time_stamp = jiffies;
+	info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
+	memset(&info->status, 0, sizeof(info->status));
+
+	hdr = il_tx_queue_get_hdr(il, txq_id, idx);
+	if (ieee80211_is_data_qos(hdr->frame_control)) {
+		qc = ieee80211_get_qos_ctl(hdr);
+		tid = qc[0] & 0xf;
+	}
+
+	sta_id = il4965_get_ra_sta_id(il, hdr);
+	if (txq->sched_retry && unlikely(sta_id == IL_INVALID_STATION)) {
+		IL_ERR("Station not known\n");
+		return;
+	}
+
+	spin_lock_irqsave(&il->sta_lock, flags);
+	if (txq->sched_retry) {
+		const u32 scd_ssn = il4965_get_scd_ssn(tx_resp);
+		struct il_ht_agg *agg = NULL;
+		WARN_ON(!qc);
+
+		agg = &il->stations[sta_id].tid[tid].agg;
+
+		il4965_tx_status_reply_tx(il, agg, tx_resp, txq_id, idx);
+
+		/* check if BAR is needed */
+		if ((tx_resp->frame_count == 1) &&
+		    !il4965_is_tx_success(status))
+			info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
+
+		if (txq->q.read_ptr != (scd_ssn & 0xff)) {
+			idx = il_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
+			D_TX_REPLY("Retry scheduler reclaim scd_ssn "
+				   "%d idx %d\n", scd_ssn, idx);
+			freed = il4965_tx_queue_reclaim(il, txq_id, idx);
+			if (qc)
+				il4965_free_tfds_in_queue(il, sta_id, tid,
+							  freed);
+
+			if (il->mac80211_registered &&
+			    il_queue_space(&txq->q) > txq->q.low_mark &&
+			    agg->state != IL_EMPTYING_HW_QUEUE_DELBA)
+				il_wake_queue(il, txq);
+		}
+	} else {
+		info->status.rates[0].count = tx_resp->failure_frame + 1;
+		info->flags |= il4965_tx_status_to_mac80211(status);
+		il4965_hwrate_to_tx_control(il,
+					    le32_to_cpu(tx_resp->rate_n_flags),
+					    info);
+
+		D_TX_REPLY("TXQ %d status %s (0x%08x) "
+			   "rate_n_flags 0x%x retries %d\n", txq_id,
+			   il4965_get_tx_fail_reason(status), status,
+			   le32_to_cpu(tx_resp->rate_n_flags),
+			   tx_resp->failure_frame);
+
+		freed = il4965_tx_queue_reclaim(il, txq_id, idx);
+		if (qc && likely(sta_id != IL_INVALID_STATION))
+			il4965_free_tfds_in_queue(il, sta_id, tid, freed);
+		else if (sta_id == IL_INVALID_STATION)
+			D_TX_REPLY("Station not known\n");
+
+		if (il->mac80211_registered &&
+		    il_queue_space(&txq->q) > txq->q.low_mark)
+			il_wake_queue(il, txq);
+	}
+	if (qc && likely(sta_id != IL_INVALID_STATION))
+		il4965_txq_check_empty(il, sta_id, tid, txq_id);
+
+	il4965_check_abort_status(il, tx_resp->frame_count, status);
+
+	spin_unlock_irqrestore(&il->sta_lock, flags);
+}
+
+/* Set up 4965-specific Rx frame reply handlers */
+static void
+il4965_handler_setup(struct il_priv *il)
+{
+	/* Legacy Rx frames */
+	il->handlers[N_RX] = il4965_hdl_rx;
+	/* Tx response */
+	il->handlers[C_TX] = il4965_hdl_tx;
+}
+
+static struct il_hcmd_ops il4965_hcmd = {
+	.rxon_assoc = il4965_send_rxon_assoc,
+	.commit_rxon = il4965_commit_rxon,
+	.set_rxon_chain = il4965_set_rxon_chain,
+};
+
+static void
+il4965_post_scan(struct il_priv *il)
+{
+	struct il_rxon_context *ctx = &il->ctx;
+
+	/*
+	 * Since setting the RXON may have been deferred while
+	 * performing the scan, fire one off if needed
+	 */
+	if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
+		il_commit_rxon(il, ctx);
+}
+
+static void
+il4965_post_associate(struct il_priv *il)
+{
+	struct il_rxon_context *ctx = &il->ctx;
+	struct ieee80211_vif *vif = ctx->vif;
+	struct ieee80211_conf *conf = NULL;
+	int ret = 0;
+
+	if (!vif || !il->is_open)
+		return;
+
+	if (test_bit(S_EXIT_PENDING, &il->status))
+		return;
+
+	il_scan_cancel_timeout(il, 200);
+
+	conf = &il->hw->conf;
+
+	ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+	il_commit_rxon(il, ctx);
+
+	ret = il_send_rxon_timing(il, ctx);
+	if (ret)
+		IL_WARN("RXON timing - " "Attempting to continue.\n");
+
+	ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+
+	il_set_rxon_ht(il, &il->current_ht_config);
+
+	if (il->cfg->ops->hcmd->set_rxon_chain)
+		il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
+
+	ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid);
+
+	D_ASSOC("assoc id %d beacon interval %d\n", vif->bss_conf.aid,
+		vif->bss_conf.beacon_int);
+
+	if (vif->bss_conf.use_short_preamble)
+		ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+	else
+		ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+
+	if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
+		if (vif->bss_conf.use_short_slot)
+			ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+		else
+			ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
+	}
+
+	il_commit_rxon(il, ctx);
+
+	D_ASSOC("Associated as %d to: %pM\n", vif->bss_conf.aid,
+		ctx->active.bssid_addr);
+
+	switch (vif->type) {
+	case NL80211_IFTYPE_STATION:
+		break;
+	case NL80211_IFTYPE_ADHOC:
+		il4965_send_beacon_cmd(il);
+		break;
+	default:
+		IL_ERR("%s Should not be called in %d mode\n", __func__,
+		       vif->type);
+		break;
+	}
+
+	/* the chain noise calibration will enabled PM upon completion
+	 * If chain noise has already been run, then we need to enable
+	 * power management here */
+	if (il->chain_noise_data.state == IL_CHAIN_NOISE_DONE)
+		il_power_update_mode(il, false);
+
+	/* Enable Rx differential gain and sensitivity calibrations */
+	il4965_chain_noise_reset(il);
+	il->start_calib = 1;
+}
+
+static void
+il4965_config_ap(struct il_priv *il)
+{
+	struct il_rxon_context *ctx = &il->ctx;
+	struct ieee80211_vif *vif = ctx->vif;
+	int ret = 0;
+
+	lockdep_assert_held(&il->mutex);
+
+	if (test_bit(S_EXIT_PENDING, &il->status))
+		return;
+
+	/* The following should be done only at AP bring up */
+	if (!il_is_associated_ctx(ctx)) {
+
+		/* RXON - unassoc (to set timing command) */
+		ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+		il_commit_rxon(il, ctx);
+
+		/* RXON Timing */
+		ret = il_send_rxon_timing(il, ctx);
+		if (ret)
+			IL_WARN("RXON timing failed - "
+				"Attempting to continue.\n");
+
+		/* AP has all antennas */
+		il->chain_noise_data.active_chains = il->hw_params.valid_rx_ant;
+		il_set_rxon_ht(il, &il->current_ht_config);
+		if (il->cfg->ops->hcmd->set_rxon_chain)
+			il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
+
+		ctx->staging.assoc_id = 0;
+
+		if (vif->bss_conf.use_short_preamble)
+			ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+		else
+			ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+
+		if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
+			if (vif->bss_conf.use_short_slot)
+				ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+			else
+				ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
+		}
+		/* need to send beacon cmd before committing assoc RXON! */
+		il4965_send_beacon_cmd(il);
+		/* restore RXON assoc */
+		ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+		il_commit_rxon(il, ctx);
+	}
+	il4965_send_beacon_cmd(il);
+}
+
+static struct il_hcmd_utils_ops il4965_hcmd_utils = {
+	.get_hcmd_size = il4965_get_hcmd_size,
+	.build_addsta_hcmd = il4965_build_addsta_hcmd,
+	.request_scan = il4965_request_scan,
+	.post_scan = il4965_post_scan,
+};
+
+static struct il_lib_ops il4965_lib = {
+	.set_hw_params = il4965_hw_set_hw_params,
+	.txq_update_byte_cnt_tbl = il4965_txq_update_byte_cnt_tbl,
+	.txq_attach_buf_to_tfd = il4965_hw_txq_attach_buf_to_tfd,
+	.txq_free_tfd = il4965_hw_txq_free_tfd,
+	.txq_init = il4965_hw_tx_queue_init,
+	.handler_setup = il4965_handler_setup,
+	.is_valid_rtc_data_addr = il4965_hw_valid_rtc_data_addr,
+	.init_alive_start = il4965_init_alive_start,
+	.load_ucode = il4965_load_bsm,
+	.dump_nic_error_log = il4965_dump_nic_error_log,
+	.dump_fh = il4965_dump_fh,
+	.set_channel_switch = il4965_hw_channel_switch,
+	.apm_ops = {
+		    .init = il_apm_init,
+		    .config = il4965_nic_config,
+		    },
+	.eeprom_ops = {
+		       .regulatory_bands = {
+					    EEPROM_REGULATORY_BAND_1_CHANNELS,
+					    EEPROM_REGULATORY_BAND_2_CHANNELS,
+					    EEPROM_REGULATORY_BAND_3_CHANNELS,
+					    EEPROM_REGULATORY_BAND_4_CHANNELS,
+					    EEPROM_REGULATORY_BAND_5_CHANNELS,
+					    EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS,
+					    EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS},
+		       .acquire_semaphore = il4965_eeprom_acquire_semaphore,
+		       .release_semaphore = il4965_eeprom_release_semaphore,
+		       },
+	.send_tx_power = il4965_send_tx_power,
+	.update_chain_flags = il4965_update_chain_flags,
+	.temp_ops = {
+		     .temperature = il4965_temperature_calib,
+		     },
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+	.debugfs_ops = {
+			.rx_stats_read = il4965_ucode_rx_stats_read,
+			.tx_stats_read = il4965_ucode_tx_stats_read,
+			.general_stats_read = il4965_ucode_general_stats_read,
+			},
+#endif
+};
+
+static const struct il_legacy_ops il4965_legacy_ops = {
+	.post_associate = il4965_post_associate,
+	.config_ap = il4965_config_ap,
+	.manage_ibss_station = il4965_manage_ibss_station,
+	.update_bcast_stations = il4965_update_bcast_stations,
+};
+
+struct ieee80211_ops il4965_hw_ops = {
+	.tx = il4965_mac_tx,
+	.start = il4965_mac_start,
+	.stop = il4965_mac_stop,
+	.add_interface = il_mac_add_interface,
+	.remove_interface = il_mac_remove_interface,
+	.change_interface = il_mac_change_interface,
+	.config = il_mac_config,
+	.configure_filter = il4965_configure_filter,
+	.set_key = il4965_mac_set_key,
+	.update_tkip_key = il4965_mac_update_tkip_key,
+	.conf_tx = il_mac_conf_tx,
+	.reset_tsf = il_mac_reset_tsf,
+	.bss_info_changed = il_mac_bss_info_changed,
+	.ampdu_action = il4965_mac_ampdu_action,
+	.hw_scan = il_mac_hw_scan,
+	.sta_add = il4965_mac_sta_add,
+	.sta_remove = il_mac_sta_remove,
+	.channel_switch = il4965_mac_channel_switch,
+	.tx_last_beacon = il_mac_tx_last_beacon,
+};
+
+static const struct il_ops il4965_ops = {
+	.lib = &il4965_lib,
+	.hcmd = &il4965_hcmd,
+	.utils = &il4965_hcmd_utils,
+	.led = &il4965_led_ops,
+	.legacy = &il4965_legacy_ops,
+	.ieee80211_ops = &il4965_hw_ops,
+};
+
+static struct il_base_params il4965_base_params = {
+	.eeprom_size = IL4965_EEPROM_IMG_SIZE,
+	.num_of_queues = IL49_NUM_QUEUES,
+	.num_of_ampdu_queues = IL49_NUM_AMPDU_QUEUES,
+	.pll_cfg_val = 0,
+	.set_l0s = true,
+	.use_bsm = true,
+	.led_compensation = 61,
+	.chain_noise_num_beacons = IL4965_CAL_NUM_BEACONS,
+	.wd_timeout = IL_DEF_WD_TIMEOUT,
+	.temperature_kelvin = true,
+	.ucode_tracing = true,
+	.sensitivity_calib_by_driver = true,
+	.chain_noise_calib_by_driver = true,
+};
+
+struct il_cfg il4965_cfg = {
+	.name = "Intel(R) Wireless WiFi Link 4965AGN",
+	.fw_name_pre = IL4965_FW_PRE,
+	.ucode_api_max = IL4965_UCODE_API_MAX,
+	.ucode_api_min = IL4965_UCODE_API_MIN,
+	.sku = IL_SKU_A | IL_SKU_G | IL_SKU_N,
+	.valid_tx_ant = ANT_AB,
+	.valid_rx_ant = ANT_ABC,
+	.eeprom_ver = EEPROM_4965_EEPROM_VERSION,
+	.eeprom_calib_ver = EEPROM_4965_TX_POWER_VERSION,
+	.ops = &il4965_ops,
+	.mod_params = &il4965_mod_params,
+	.base_params = &il4965_base_params,
+	.led_mode = IL_LED_BLINK,
+	/*
+	 * Force use of chains B and C for scan RX on 5 GHz band
+	 * because the device has off-channel reception on chain A.
+	 */
+	.scan_rx_antennas[IEEE80211_BAND_5GHZ] = ANT_BC,
+};
+
+/* Module firmware */
+MODULE_FIRMWARE(IL4965_MODULE_FIRMWARE(IL4965_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlegacy/4965.h b/drivers/net/wireless/iwlegacy/4965.h
new file mode 100644
index 0000000..f280e01
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/4965.h
@@ -0,0 +1,1301 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#ifndef __il_4965_h__
+#define __il_4965_h__
+
+struct il_rx_queue;
+struct il_rx_buf;
+struct il_rx_pkt;
+struct il_tx_queue;
+struct il_rxon_context;
+
+/* configuration for the _4965 devices */
+extern struct il_cfg il4965_cfg;
+
+extern struct il_mod_params il4965_mod_params;
+
+extern struct ieee80211_ops il4965_hw_ops;
+
+/* tx queue */
+void il4965_free_tfds_in_queue(struct il_priv *il, int sta_id, int tid,
+			       int freed);
+
+/* RXON */
+void il4965_set_rxon_chain(struct il_priv *il, struct il_rxon_context *ctx);
+
+/* uCode */
+int il4965_verify_ucode(struct il_priv *il);
+
+/* lib */
+void il4965_check_abort_status(struct il_priv *il, u8 frame_count, u32 status);
+
+void il4965_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq);
+int il4965_rx_init(struct il_priv *il, struct il_rx_queue *rxq);
+int il4965_hw_nic_init(struct il_priv *il);
+int il4965_dump_fh(struct il_priv *il, char **buf, bool display);
+
+/* rx */
+void il4965_rx_queue_restock(struct il_priv *il);
+void il4965_rx_replenish(struct il_priv *il);
+void il4965_rx_replenish_now(struct il_priv *il);
+void il4965_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq);
+int il4965_rxq_stop(struct il_priv *il);
+int il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
+void il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb);
+void il4965_hdl_rx_phy(struct il_priv *il, struct il_rx_buf *rxb);
+void il4965_rx_handle(struct il_priv *il);
+
+/* tx */
+void il4965_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq);
+int il4965_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
+				    dma_addr_t addr, u16 len, u8 reset, u8 pad);
+int il4965_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq);
+void il4965_hwrate_to_tx_control(struct il_priv *il, u32 rate_n_flags,
+				 struct ieee80211_tx_info *info);
+int il4965_tx_skb(struct il_priv *il, struct sk_buff *skb);
+int il4965_tx_agg_start(struct il_priv *il, struct ieee80211_vif *vif,
+			struct ieee80211_sta *sta, u16 tid, u16 * ssn);
+int il4965_tx_agg_stop(struct il_priv *il, struct ieee80211_vif *vif,
+		       struct ieee80211_sta *sta, u16 tid);
+int il4965_txq_check_empty(struct il_priv *il, int sta_id, u8 tid, int txq_id);
+void il4965_hdl_compressed_ba(struct il_priv *il, struct il_rx_buf *rxb);
+int il4965_tx_queue_reclaim(struct il_priv *il, int txq_id, int idx);
+void il4965_hw_txq_ctx_free(struct il_priv *il);
+int il4965_txq_ctx_alloc(struct il_priv *il);
+void il4965_txq_ctx_reset(struct il_priv *il);
+void il4965_txq_ctx_stop(struct il_priv *il);
+void il4965_txq_set_sched(struct il_priv *il, u32 mask);
+
+/*
+ * Acquire il->lock before calling this function !
+ */
+void il4965_set_wr_ptrs(struct il_priv *il, int txq_id, u32 idx);
+/**
+ * il4965_tx_queue_set_status - (optionally) start Tx/Cmd queue
+ * @tx_fifo_id: Tx DMA/FIFO channel (range 0-7) that the queue will feed
+ * @scd_retry: (1) Indicates queue will be used in aggregation mode
+ *
+ * NOTE:  Acquire il->lock before calling this function !
+ */
+void il4965_tx_queue_set_status(struct il_priv *il, struct il_tx_queue *txq,
+				int tx_fifo_id, int scd_retry);
+
+/* rx */
+void il4965_hdl_missed_beacon(struct il_priv *il, struct il_rx_buf *rxb);
+bool il4965_good_plcp_health(struct il_priv *il, struct il_rx_pkt *pkt);
+void il4965_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb);
+void il4965_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb);
+
+/* scan */
+int il4965_request_scan(struct il_priv *il, struct ieee80211_vif *vif);
+
+/* station mgmt */
+int il4965_manage_ibss_station(struct il_priv *il, struct ieee80211_vif *vif,
+			       bool add);
+
+/* hcmd */
+int il4965_send_beacon_cmd(struct il_priv *il);
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+const char *il4965_get_tx_fail_reason(u32 status);
+#else
+static inline const char *
+il4965_get_tx_fail_reason(u32 status)
+{
+	return "";
+}
+#endif
+
+/* station management */
+int il4965_alloc_bcast_station(struct il_priv *il, struct il_rxon_context *ctx);
+int il4965_add_bssid_station(struct il_priv *il, struct il_rxon_context *ctx,
+			     const u8 *addr, u8 *sta_id_r);
+int il4965_remove_default_wep_key(struct il_priv *il,
+				  struct il_rxon_context *ctx,
+				  struct ieee80211_key_conf *key);
+int il4965_set_default_wep_key(struct il_priv *il, struct il_rxon_context *ctx,
+			       struct ieee80211_key_conf *key);
+int il4965_restore_default_wep_keys(struct il_priv *il,
+				    struct il_rxon_context *ctx);
+int il4965_set_dynamic_key(struct il_priv *il, struct il_rxon_context *ctx,
+			   struct ieee80211_key_conf *key, u8 sta_id);
+int il4965_remove_dynamic_key(struct il_priv *il, struct il_rxon_context *ctx,
+			      struct ieee80211_key_conf *key, u8 sta_id);
+void il4965_update_tkip_key(struct il_priv *il, struct il_rxon_context *ctx,
+			    struct ieee80211_key_conf *keyconf,
+			    struct ieee80211_sta *sta, u32 iv32,
+			    u16 *phase1key);
+int il4965_sta_tx_modify_enable_tid(struct il_priv *il, int sta_id, int tid);
+int il4965_sta_rx_agg_start(struct il_priv *il, struct ieee80211_sta *sta,
+			    int tid, u16 ssn);
+int il4965_sta_rx_agg_stop(struct il_priv *il, struct ieee80211_sta *sta,
+			   int tid);
+void il4965_sta_modify_sleep_tx_count(struct il_priv *il, int sta_id, int cnt);
+int il4965_update_bcast_stations(struct il_priv *il);
+
+/* rate */
+static inline u8
+il4965_hw_get_rate(__le32 rate_n_flags)
+{
+	return le32_to_cpu(rate_n_flags) & 0xFF;
+}
+
+/* eeprom */
+void il4965_eeprom_get_mac(const struct il_priv *il, u8 * mac);
+int il4965_eeprom_acquire_semaphore(struct il_priv *il);
+void il4965_eeprom_release_semaphore(struct il_priv *il);
+int il4965_eeprom_check_version(struct il_priv *il);
+
+/* mac80211 handlers (for 4965) */
+void il4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
+int il4965_mac_start(struct ieee80211_hw *hw);
+void il4965_mac_stop(struct ieee80211_hw *hw);
+void il4965_configure_filter(struct ieee80211_hw *hw,
+			     unsigned int changed_flags,
+			     unsigned int *total_flags, u64 multicast);
+int il4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+		       struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+		       struct ieee80211_key_conf *key);
+void il4965_mac_update_tkip_key(struct ieee80211_hw *hw,
+				struct ieee80211_vif *vif,
+				struct ieee80211_key_conf *keyconf,
+				struct ieee80211_sta *sta, u32 iv32,
+				u16 *phase1key);
+int il4965_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+			    enum ieee80211_ampdu_mlme_action action,
+			    struct ieee80211_sta *sta, u16 tid, u16 * ssn,
+			    u8 buf_size);
+int il4965_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		       struct ieee80211_sta *sta);
+void il4965_mac_channel_switch(struct ieee80211_hw *hw,
+			       struct ieee80211_channel_switch *ch_switch);
+
+void il4965_led_enable(struct il_priv *il);
+
+/* EEPROM */
+#define IL4965_EEPROM_IMG_SIZE			1024
+
+/*
+ * uCode queue management definitions ...
+ * The first queue used for block-ack aggregation is #7 (4965 only).
+ * All block-ack aggregation queues should map to Tx DMA/FIFO channel 7.
+ */
+#define IL49_FIRST_AMPDU_QUEUE	7
+
+/* Sizes and addresses for instruction and data memory (SRAM) in
+ * 4965's embedded processor.  Driver access is via HBUS_TARG_MEM_* regs. */
+#define IL49_RTC_INST_LOWER_BOUND		(0x000000)
+#define IL49_RTC_INST_UPPER_BOUND		(0x018000)
+
+#define IL49_RTC_DATA_LOWER_BOUND		(0x800000)
+#define IL49_RTC_DATA_UPPER_BOUND		(0x80A000)
+
+#define IL49_RTC_INST_SIZE  (IL49_RTC_INST_UPPER_BOUND - \
+				IL49_RTC_INST_LOWER_BOUND)
+#define IL49_RTC_DATA_SIZE  (IL49_RTC_DATA_UPPER_BOUND - \
+				IL49_RTC_DATA_LOWER_BOUND)
+
+#define IL49_MAX_INST_SIZE IL49_RTC_INST_SIZE
+#define IL49_MAX_DATA_SIZE IL49_RTC_DATA_SIZE
+
+/* Size of uCode instruction memory in bootstrap state machine */
+#define IL49_MAX_BSM_SIZE BSM_SRAM_SIZE
+
+static inline int
+il4965_hw_valid_rtc_data_addr(u32 addr)
+{
+	return (addr >= IL49_RTC_DATA_LOWER_BOUND &&
+		addr < IL49_RTC_DATA_UPPER_BOUND);
+}
+
+/********************* START TEMPERATURE *************************************/
+
+/**
+ * 4965 temperature calculation.
+ *
+ * The driver must calculate the device temperature before calculating
+ * a txpower setting (amplifier gain is temperature dependent).  The
+ * calculation uses 4 measurements, 3 of which (R1, R2, R3) are calibration
+ * values used for the life of the driver, and one of which (R4) is the
+ * real-time temperature indicator.
+ *
+ * uCode provides all 4 values to the driver via the "initialize alive"
+ * notification (see struct il4965_init_alive_resp).  After the runtime uCode
+ * image loads, uCode updates the R4 value via stats notifications
+ * (see N_STATS), which occur after each received beacon
+ * when associated, or can be requested via C_STATS.
+ *
+ * NOTE:  uCode provides the R4 value as a 23-bit signed value.  Driver
+ *        must sign-extend to 32 bits before applying formula below.
+ *
+ * Formula:
+ *
+ * degrees Kelvin = ((97 * 259 * (R4 - R2) / (R3 - R1)) / 100) + 8
+ *
+ * NOTE:  The basic formula is 259 * (R4-R2) / (R3-R1).  The 97/100 is
+ * an additional correction, which should be centered around 0 degrees
+ * Celsius (273 degrees Kelvin).  The 8 (3 percent of 273) compensates for
+ * centering the 97/100 correction around 0 degrees K.
+ *
+ * Add 273 to Kelvin value to find degrees Celsius, for comparing current
+ * temperature with factory-measured temperatures when calculating txpower
+ * settings.
+ */
+#define TEMPERATURE_CALIB_KELVIN_OFFSET 8
+#define TEMPERATURE_CALIB_A_VAL 259
+
+/* Limit range of calculated temperature to be between these Kelvin values */
+#define IL_TX_POWER_TEMPERATURE_MIN  (263)
+#define IL_TX_POWER_TEMPERATURE_MAX  (410)
+
+#define IL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(t) \
+	((t) < IL_TX_POWER_TEMPERATURE_MIN || \
+	 (t) > IL_TX_POWER_TEMPERATURE_MAX)
+
+/********************* END TEMPERATURE ***************************************/
+
+/********************* START TXPOWER *****************************************/
+
+/**
+ * 4965 txpower calculations rely on information from three sources:
+ *
+ *     1) EEPROM
+ *     2) "initialize" alive notification
+ *     3) stats notifications
+ *
+ * EEPROM data consists of:
+ *
+ * 1)  Regulatory information (max txpower and channel usage flags) is provided
+ *     separately for each channel that can possibly supported by 4965.
+ *     40 MHz wide (.11n HT40) channels are listed separately from 20 MHz
+ *     (legacy) channels.
+ *
+ *     See struct il4965_eeprom_channel for format, and struct il4965_eeprom
+ *     for locations in EEPROM.
+ *
+ * 2)  Factory txpower calibration information is provided separately for
+ *     sub-bands of contiguous channels.  2.4GHz has just one sub-band,
+ *     but 5 GHz has several sub-bands.
+ *
+ *     In addition, per-band (2.4 and 5 Ghz) saturation txpowers are provided.
+ *
+ *     See struct il4965_eeprom_calib_info (and the tree of structures
+ *     contained within it) for format, and struct il4965_eeprom for
+ *     locations in EEPROM.
+ *
+ * "Initialization alive" notification (see struct il4965_init_alive_resp)
+ * consists of:
+ *
+ * 1)  Temperature calculation parameters.
+ *
+ * 2)  Power supply voltage measurement.
+ *
+ * 3)  Tx gain compensation to balance 2 transmitters for MIMO use.
+ *
+ * Statistics notifications deliver:
+ *
+ * 1)  Current values for temperature param R4.
+ */
+
+/**
+ * To calculate a txpower setting for a given desired target txpower, channel,
+ * modulation bit rate, and transmitter chain (4965 has 2 transmitters to
+ * support MIMO and transmit diversity), driver must do the following:
+ *
+ * 1)  Compare desired txpower vs. (EEPROM) regulatory limit for this channel.
+ *     Do not exceed regulatory limit; reduce target txpower if necessary.
+ *
+ *     If setting up txpowers for MIMO rates (rate idxes 8-15, 24-31),
+ *     2 transmitters will be used simultaneously; driver must reduce the
+ *     regulatory limit by 3 dB (half-power) for each transmitter, so the
+ *     combined total output of the 2 transmitters is within regulatory limits.
+ *
+ *
+ * 2)  Compare target txpower vs. (EEPROM) saturation txpower *reduced by
+ *     backoff for this bit rate*.  Do not exceed (saturation - backoff[rate]);
+ *     reduce target txpower if necessary.
+ *
+ *     Backoff values below are in 1/2 dB units (equivalent to steps in
+ *     txpower gain tables):
+ *
+ *     OFDM 6 - 36 MBit:  10 steps (5 dB)
+ *     OFDM 48 MBit:      15 steps (7.5 dB)
+ *     OFDM 54 MBit:      17 steps (8.5 dB)
+ *     OFDM 60 MBit:      20 steps (10 dB)
+ *     CCK all rates:     10 steps (5 dB)
+ *
+ *     Backoff values apply to saturation txpower on a per-transmitter basis;
+ *     when using MIMO (2 transmitters), each transmitter uses the same
+ *     saturation level provided in EEPROM, and the same backoff values;
+ *     no reduction (such as with regulatory txpower limits) is required.
+ *
+ *     Saturation and Backoff values apply equally to 20 Mhz (legacy) channel
+ *     widths and 40 Mhz (.11n HT40) channel widths; there is no separate
+ *     factory measurement for ht40 channels.
+ *
+ *     The result of this step is the final target txpower.  The rest of
+ *     the steps figure out the proper settings for the device to achieve
+ *     that target txpower.
+ *
+ *
+ * 3)  Determine (EEPROM) calibration sub band for the target channel, by
+ *     comparing against first and last channels in each sub band
+ *     (see struct il4965_eeprom_calib_subband_info).
+ *
+ *
+ * 4)  Linearly interpolate (EEPROM) factory calibration measurement sets,
+ *     referencing the 2 factory-measured (sample) channels within the sub band.
+ *
+ *     Interpolation is based on difference between target channel's frequency
+ *     and the sample channels' frequencies.  Since channel numbers are based
+ *     on frequency (5 MHz between each channel number), this is equivalent
+ *     to interpolating based on channel number differences.
+ *
+ *     Note that the sample channels may or may not be the channels at the
+ *     edges of the sub band.  The target channel may be "outside" of the
+ *     span of the sampled channels.
+ *
+ *     Driver may choose the pair (for 2 Tx chains) of measurements (see
+ *     struct il4965_eeprom_calib_ch_info) for which the actual measured
+ *     txpower comes closest to the desired txpower.  Usually, though,
+ *     the middle set of measurements is closest to the regulatory limits,
+ *     and is therefore a good choice for all txpower calculations (this
+ *     assumes that high accuracy is needed for maximizing legal txpower,
+ *     while lower txpower configurations do not need as much accuracy).
+ *
+ *     Driver should interpolate both members of the chosen measurement pair,
+ *     i.e. for both Tx chains (radio transmitters), unless the driver knows
+ *     that only one of the chains will be used (e.g. only one tx antenna
+ *     connected, but this should be unusual).  The rate scaling algorithm
+ *     switches antennas to find best performance, so both Tx chains will
+ *     be used (although only one at a time) even for non-MIMO transmissions.
+ *
+ *     Driver should interpolate factory values for temperature, gain table
+ *     idx, and actual power.  The power amplifier detector values are
+ *     not used by the driver.
+ *
+ *     Sanity check:  If the target channel happens to be one of the sample
+ *     channels, the results should agree with the sample channel's
+ *     measurements!
+ *
+ *
+ * 5)  Find difference between desired txpower and (interpolated)
+ *     factory-measured txpower.  Using (interpolated) factory gain table idx
+ *     (shown elsewhere) as a starting point, adjust this idx lower to
+ *     increase txpower, or higher to decrease txpower, until the target
+ *     txpower is reached.  Each step in the gain table is 1/2 dB.
+ *
+ *     For example, if factory measured txpower is 16 dBm, and target txpower
+ *     is 13 dBm, add 6 steps to the factory gain idx to reduce txpower
+ *     by 3 dB.
+ *
+ *
+ * 6)  Find difference between current device temperature and (interpolated)
+ *     factory-measured temperature for sub-band.  Factory values are in
+ *     degrees Celsius.  To calculate current temperature, see comments for
+ *     "4965 temperature calculation".
+ *
+ *     If current temperature is higher than factory temperature, driver must
+ *     increase gain (lower gain table idx), and vice verse.
+ *
+ *     Temperature affects gain differently for different channels:
+ *
+ *     2.4 GHz all channels:  3.5 degrees per half-dB step
+ *     5 GHz channels 34-43:  4.5 degrees per half-dB step
+ *     5 GHz channels >= 44:  4.0 degrees per half-dB step
+ *
+ *     NOTE:  Temperature can increase rapidly when transmitting, especially
+ *            with heavy traffic at high txpowers.  Driver should update
+ *            temperature calculations often under these conditions to
+ *            maintain strong txpower in the face of rising temperature.
+ *
+ *
+ * 7)  Find difference between current power supply voltage indicator
+ *     (from "initialize alive") and factory-measured power supply voltage
+ *     indicator (EEPROM).
+ *
+ *     If the current voltage is higher (indicator is lower) than factory
+ *     voltage, gain should be reduced (gain table idx increased) by:
+ *
+ *     (eeprom - current) / 7
+ *
+ *     If the current voltage is lower (indicator is higher) than factory
+ *     voltage, gain should be increased (gain table idx decreased) by:
+ *
+ *     2 * (current - eeprom) / 7
+ *
+ *     If number of idx steps in either direction turns out to be > 2,
+ *     something is wrong ... just use 0.
+ *
+ *     NOTE:  Voltage compensation is independent of band/channel.
+ *
+ *     NOTE:  "Initialize" uCode measures current voltage, which is assumed
+ *            to be constant after this initial measurement.  Voltage
+ *            compensation for txpower (number of steps in gain table)
+ *            may be calculated once and used until the next uCode bootload.
+ *
+ *
+ * 8)  If setting up txpowers for MIMO rates (rate idxes 8-15, 24-31),
+ *     adjust txpower for each transmitter chain, so txpower is balanced
+ *     between the two chains.  There are 5 pairs of tx_atten[group][chain]
+ *     values in "initialize alive", one pair for each of 5 channel ranges:
+ *
+ *     Group 0:  5 GHz channel 34-43
+ *     Group 1:  5 GHz channel 44-70
+ *     Group 2:  5 GHz channel 71-124
+ *     Group 3:  5 GHz channel 125-200
+ *     Group 4:  2.4 GHz all channels
+ *
+ *     Add the tx_atten[group][chain] value to the idx for the target chain.
+ *     The values are signed, but are in pairs of 0 and a non-negative number,
+ *     so as to reduce gain (if necessary) of the "hotter" channel.  This
+ *     avoids any need to double-check for regulatory compliance after
+ *     this step.
+ *
+ *
+ * 9)  If setting up for a CCK rate, lower the gain by adding a CCK compensation
+ *     value to the idx:
+ *
+ *     Hardware rev B:  9 steps (4.5 dB)
+ *     Hardware rev C:  5 steps (2.5 dB)
+ *
+ *     Hardware rev for 4965 can be determined by reading CSR_HW_REV_WA_REG,
+ *     bits [3:2], 1 = B, 2 = C.
+ *
+ *     NOTE:  This compensation is in addition to any saturation backoff that
+ *            might have been applied in an earlier step.
+ *
+ *
+ * 10) Select the gain table, based on band (2.4 vs 5 GHz).
+ *
+ *     Limit the adjusted idx to stay within the table!
+ *
+ *
+ * 11) Read gain table entries for DSP and radio gain, place into appropriate
+ *     location(s) in command (struct il4965_txpowertable_cmd).
+ */
+
+/**
+ * When MIMO is used (2 transmitters operating simultaneously), driver should
+ * limit each transmitter to deliver a max of 3 dB below the regulatory limit
+ * for the device.  That is, use half power for each transmitter, so total
+ * txpower is within regulatory limits.
+ *
+ * The value "6" represents number of steps in gain table to reduce power 3 dB.
+ * Each step is 1/2 dB.
+ */
+#define IL_TX_POWER_MIMO_REGULATORY_COMPENSATION (6)
+
+/**
+ * CCK gain compensation.
+ *
+ * When calculating txpowers for CCK, after making sure that the target power
+ * is within regulatory and saturation limits, driver must additionally
+ * back off gain by adding these values to the gain table idx.
+ *
+ * Hardware rev for 4965 can be determined by reading CSR_HW_REV_WA_REG,
+ * bits [3:2], 1 = B, 2 = C.
+ */
+#define IL_TX_POWER_CCK_COMPENSATION_B_STEP (9)
+#define IL_TX_POWER_CCK_COMPENSATION_C_STEP (5)
+
+/*
+ * 4965 power supply voltage compensation for txpower
+ */
+#define TX_POWER_IL_VOLTAGE_CODES_PER_03V   (7)
+
+/**
+ * Gain tables.
+ *
+ * The following tables contain pair of values for setting txpower, i.e.
+ * gain settings for the output of the device's digital signal processor (DSP),
+ * and for the analog gain structure of the transmitter.
+ *
+ * Each entry in the gain tables represents a step of 1/2 dB.  Note that these
+ * are *relative* steps, not indications of absolute output power.  Output
+ * power varies with temperature, voltage, and channel frequency, and also
+ * requires consideration of average power (to satisfy regulatory constraints),
+ * and peak power (to avoid distortion of the output signal).
+ *
+ * Each entry contains two values:
+ * 1)  DSP gain (or sometimes called DSP attenuation).  This is a fine-grained
+ *     linear value that multiplies the output of the digital signal processor,
+ *     before being sent to the analog radio.
+ * 2)  Radio gain.  This sets the analog gain of the radio Tx path.
+ *     It is a coarser setting, and behaves in a logarithmic (dB) fashion.
+ *
+ * EEPROM contains factory calibration data for txpower.  This maps actual
+ * measured txpower levels to gain settings in the "well known" tables
+ * below ("well-known" means here that both factory calibration *and* the
+ * driver work with the same table).
+ *
+ * There are separate tables for 2.4 GHz and 5 GHz bands.  The 5 GHz table
+ * has an extension (into negative idxes), in case the driver needs to
+ * boost power setting for high device temperatures (higher than would be
+ * present during factory calibration).  A 5 Ghz EEPROM idx of "40"
+ * corresponds to the 49th entry in the table used by the driver.
+ */
+#define MIN_TX_GAIN_IDX		(0)	/* highest gain, lowest idx, 2.4 */
+#define MIN_TX_GAIN_IDX_52GHZ_EXT	(-9)	/* highest gain, lowest idx, 5 */
+
+/**
+ * 2.4 GHz gain table
+ *
+ * Index    Dsp gain   Radio gain
+ *   0        110         0x3f      (highest gain)
+ *   1        104         0x3f
+ *   2         98         0x3f
+ *   3        110         0x3e
+ *   4        104         0x3e
+ *   5         98         0x3e
+ *   6        110         0x3d
+ *   7        104         0x3d
+ *   8         98         0x3d
+ *   9        110         0x3c
+ *  10        104         0x3c
+ *  11         98         0x3c
+ *  12        110         0x3b
+ *  13        104         0x3b
+ *  14         98         0x3b
+ *  15        110         0x3a
+ *  16        104         0x3a
+ *  17         98         0x3a
+ *  18        110         0x39
+ *  19        104         0x39
+ *  20         98         0x39
+ *  21        110         0x38
+ *  22        104         0x38
+ *  23         98         0x38
+ *  24        110         0x37
+ *  25        104         0x37
+ *  26         98         0x37
+ *  27        110         0x36
+ *  28        104         0x36
+ *  29         98         0x36
+ *  30        110         0x35
+ *  31        104         0x35
+ *  32         98         0x35
+ *  33        110         0x34
+ *  34        104         0x34
+ *  35         98         0x34
+ *  36        110         0x33
+ *  37        104         0x33
+ *  38         98         0x33
+ *  39        110         0x32
+ *  40        104         0x32
+ *  41         98         0x32
+ *  42        110         0x31
+ *  43        104         0x31
+ *  44         98         0x31
+ *  45        110         0x30
+ *  46        104         0x30
+ *  47         98         0x30
+ *  48        110          0x6
+ *  49        104          0x6
+ *  50         98          0x6
+ *  51        110          0x5
+ *  52        104          0x5
+ *  53         98          0x5
+ *  54        110          0x4
+ *  55        104          0x4
+ *  56         98          0x4
+ *  57        110          0x3
+ *  58        104          0x3
+ *  59         98          0x3
+ *  60        110          0x2
+ *  61        104          0x2
+ *  62         98          0x2
+ *  63        110          0x1
+ *  64        104          0x1
+ *  65         98          0x1
+ *  66        110          0x0
+ *  67        104          0x0
+ *  68         98          0x0
+ *  69         97            0
+ *  70         96            0
+ *  71         95            0
+ *  72         94            0
+ *  73         93            0
+ *  74         92            0
+ *  75         91            0
+ *  76         90            0
+ *  77         89            0
+ *  78         88            0
+ *  79         87            0
+ *  80         86            0
+ *  81         85            0
+ *  82         84            0
+ *  83         83            0
+ *  84         82            0
+ *  85         81            0
+ *  86         80            0
+ *  87         79            0
+ *  88         78            0
+ *  89         77            0
+ *  90         76            0
+ *  91         75            0
+ *  92         74            0
+ *  93         73            0
+ *  94         72            0
+ *  95         71            0
+ *  96         70            0
+ *  97         69            0
+ *  98         68            0
+ */
+
+/**
+ * 5 GHz gain table
+ *
+ * Index    Dsp gain   Radio gain
+ *  -9 	      123         0x3F      (highest gain)
+ *  -8 	      117         0x3F
+ *  -7        110         0x3F
+ *  -6        104         0x3F
+ *  -5         98         0x3F
+ *  -4        110         0x3E
+ *  -3        104         0x3E
+ *  -2         98         0x3E
+ *  -1        110         0x3D
+ *   0        104         0x3D
+ *   1         98         0x3D
+ *   2        110         0x3C
+ *   3        104         0x3C
+ *   4         98         0x3C
+ *   5        110         0x3B
+ *   6        104         0x3B
+ *   7         98         0x3B
+ *   8        110         0x3A
+ *   9        104         0x3A
+ *  10         98         0x3A
+ *  11        110         0x39
+ *  12        104         0x39
+ *  13         98         0x39
+ *  14        110         0x38
+ *  15        104         0x38
+ *  16         98         0x38
+ *  17        110         0x37
+ *  18        104         0x37
+ *  19         98         0x37
+ *  20        110         0x36
+ *  21        104         0x36
+ *  22         98         0x36
+ *  23        110         0x35
+ *  24        104         0x35
+ *  25         98         0x35
+ *  26        110         0x34
+ *  27        104         0x34
+ *  28         98         0x34
+ *  29        110         0x33
+ *  30        104         0x33
+ *  31         98         0x33
+ *  32        110         0x32
+ *  33        104         0x32
+ *  34         98         0x32
+ *  35        110         0x31
+ *  36        104         0x31
+ *  37         98         0x31
+ *  38        110         0x30
+ *  39        104         0x30
+ *  40         98         0x30
+ *  41        110         0x25
+ *  42        104         0x25
+ *  43         98         0x25
+ *  44        110         0x24
+ *  45        104         0x24
+ *  46         98         0x24
+ *  47        110         0x23
+ *  48        104         0x23
+ *  49         98         0x23
+ *  50        110         0x22
+ *  51        104         0x18
+ *  52         98         0x18
+ *  53        110         0x17
+ *  54        104         0x17
+ *  55         98         0x17
+ *  56        110         0x16
+ *  57        104         0x16
+ *  58         98         0x16
+ *  59        110         0x15
+ *  60        104         0x15
+ *  61         98         0x15
+ *  62        110         0x14
+ *  63        104         0x14
+ *  64         98         0x14
+ *  65        110         0x13
+ *  66        104         0x13
+ *  67         98         0x13
+ *  68        110         0x12
+ *  69        104         0x08
+ *  70         98         0x08
+ *  71        110         0x07
+ *  72        104         0x07
+ *  73         98         0x07
+ *  74        110         0x06
+ *  75        104         0x06
+ *  76         98         0x06
+ *  77        110         0x05
+ *  78        104         0x05
+ *  79         98         0x05
+ *  80        110         0x04
+ *  81        104         0x04
+ *  82         98         0x04
+ *  83        110         0x03
+ *  84        104         0x03
+ *  85         98         0x03
+ *  86        110         0x02
+ *  87        104         0x02
+ *  88         98         0x02
+ *  89        110         0x01
+ *  90        104         0x01
+ *  91         98         0x01
+ *  92        110         0x00
+ *  93        104         0x00
+ *  94         98         0x00
+ *  95         93         0x00
+ *  96         88         0x00
+ *  97         83         0x00
+ *  98         78         0x00
+ */
+
+/**
+ * Sanity checks and default values for EEPROM regulatory levels.
+ * If EEPROM values fall outside MIN/MAX range, use default values.
+ *
+ * Regulatory limits refer to the maximum average txpower allowed by
+ * regulatory agencies in the geographies in which the device is meant
+ * to be operated.  These limits are SKU-specific (i.e. geography-specific),
+ * and channel-specific; each channel has an individual regulatory limit
+ * listed in the EEPROM.
+ *
+ * Units are in half-dBm (i.e. "34" means 17 dBm).
+ */
+#define IL_TX_POWER_DEFAULT_REGULATORY_24   (34)
+#define IL_TX_POWER_DEFAULT_REGULATORY_52   (34)
+#define IL_TX_POWER_REGULATORY_MIN          (0)
+#define IL_TX_POWER_REGULATORY_MAX          (34)
+
+/**
+ * Sanity checks and default values for EEPROM saturation levels.
+ * If EEPROM values fall outside MIN/MAX range, use default values.
+ *
+ * Saturation is the highest level that the output power amplifier can produce
+ * without significant clipping distortion.  This is a "peak" power level.
+ * Different types of modulation (i.e. various "rates", and OFDM vs. CCK)
+ * require differing amounts of backoff, relative to their average power output,
+ * in order to avoid clipping distortion.
+ *
+ * Driver must make sure that it is violating neither the saturation limit,
+ * nor the regulatory limit, when calculating Tx power settings for various
+ * rates.
+ *
+ * Units are in half-dBm (i.e. "38" means 19 dBm).
+ */
+#define IL_TX_POWER_DEFAULT_SATURATION_24   (38)
+#define IL_TX_POWER_DEFAULT_SATURATION_52   (38)
+#define IL_TX_POWER_SATURATION_MIN          (20)
+#define IL_TX_POWER_SATURATION_MAX          (50)
+
+/**
+ * Channel groups used for Tx Attenuation calibration (MIMO tx channel balance)
+ * and thermal Txpower calibration.
+ *
+ * When calculating txpower, driver must compensate for current device
+ * temperature; higher temperature requires higher gain.  Driver must calculate
+ * current temperature (see "4965 temperature calculation"), then compare vs.
+ * factory calibration temperature in EEPROM; if current temperature is higher
+ * than factory temperature, driver must *increase* gain by proportions shown
+ * in table below.  If current temperature is lower than factory, driver must
+ * *decrease* gain.
+ *
+ * Different frequency ranges require different compensation, as shown below.
+ */
+/* Group 0, 5.2 GHz ch 34-43:  4.5 degrees per 1/2 dB. */
+#define CALIB_IL_TX_ATTEN_GR1_FCH 34
+#define CALIB_IL_TX_ATTEN_GR1_LCH 43
+
+/* Group 1, 5.3 GHz ch 44-70:  4.0 degrees per 1/2 dB. */
+#define CALIB_IL_TX_ATTEN_GR2_FCH 44
+#define CALIB_IL_TX_ATTEN_GR2_LCH 70
+
+/* Group 2, 5.5 GHz ch 71-124:  4.0 degrees per 1/2 dB. */
+#define CALIB_IL_TX_ATTEN_GR3_FCH 71
+#define CALIB_IL_TX_ATTEN_GR3_LCH 124
+
+/* Group 3, 5.7 GHz ch 125-200:  4.0 degrees per 1/2 dB. */
+#define CALIB_IL_TX_ATTEN_GR4_FCH 125
+#define CALIB_IL_TX_ATTEN_GR4_LCH 200
+
+/* Group 4, 2.4 GHz all channels:  3.5 degrees per 1/2 dB. */
+#define CALIB_IL_TX_ATTEN_GR5_FCH 1
+#define CALIB_IL_TX_ATTEN_GR5_LCH 20
+
+enum {
+	CALIB_CH_GROUP_1 = 0,
+	CALIB_CH_GROUP_2 = 1,
+	CALIB_CH_GROUP_3 = 2,
+	CALIB_CH_GROUP_4 = 3,
+	CALIB_CH_GROUP_5 = 4,
+	CALIB_CH_GROUP_MAX
+};
+
+/********************* END TXPOWER *****************************************/
+
+/**
+ * Tx/Rx Queues
+ *
+ * Most communication between driver and 4965 is via queues of data buffers.
+ * For example, all commands that the driver issues to device's embedded
+ * controller (uCode) are via the command queue (one of the Tx queues).  All
+ * uCode command responses/replies/notifications, including Rx frames, are
+ * conveyed from uCode to driver via the Rx queue.
+ *
+ * Most support for these queues, including handshake support, resides in
+ * structures in host DRAM, shared between the driver and the device.  When
+ * allocating this memory, the driver must make sure that data written by
+ * the host CPU updates DRAM immediately (and does not get "stuck" in CPU's
+ * cache memory), so DRAM and cache are consistent, and the device can
+ * immediately see changes made by the driver.
+ *
+ * 4965 supports up to 16 DRAM-based Tx queues, and services these queues via
+ * up to 7 DMA channels (FIFOs).  Each Tx queue is supported by a circular array
+ * in DRAM containing 256 Transmit Frame Descriptors (TFDs).
+ */
+#define IL49_NUM_FIFOS	7
+#define IL49_CMD_FIFO_NUM	4
+#define IL49_NUM_QUEUES	16
+#define IL49_NUM_AMPDU_QUEUES	8
+
+/**
+ * struct il4965_schedq_bc_tbl
+ *
+ * Byte Count table
+ *
+ * Each Tx queue uses a byte-count table containing 320 entries:
+ * one 16-bit entry for each of 256 TFDs, plus an additional 64 entries that
+ * duplicate the first 64 entries (to avoid wrap-around within a Tx win;
+ * max Tx win is 64 TFDs).
+ *
+ * When driver sets up a new TFD, it must also enter the total byte count
+ * of the frame to be transmitted into the corresponding entry in the byte
+ * count table for the chosen Tx queue.  If the TFD idx is 0-63, the driver
+ * must duplicate the byte count entry in corresponding idx 256-319.
+ *
+ * padding puts each byte count table on a 1024-byte boundary;
+ * 4965 assumes tables are separated by 1024 bytes.
+ */
+struct il4965_scd_bc_tbl {
+	__le16 tfd_offset[TFD_QUEUE_BC_SIZE];
+	u8 pad[1024 - (TFD_QUEUE_BC_SIZE) * sizeof(__le16)];
+} __packed;
+
+#define IL4965_RTC_INST_LOWER_BOUND		(0x000000)
+
+/* RSSI to dBm */
+#define IL4965_RSSI_OFFSET	44
+
+/* PCI registers */
+#define PCI_CFG_RETRY_TIMEOUT	0x041
+
+/* PCI register values */
+#define PCI_CFG_LINK_CTRL_VAL_L0S_EN	0x01
+#define PCI_CFG_LINK_CTRL_VAL_L1_EN	0x02
+
+#define IL4965_DEFAULT_TX_RETRY  15
+
+/* EEPROM */
+#define IL4965_FIRST_AMPDU_QUEUE	10
+
+/* Calibration */
+void il4965_chain_noise_calibration(struct il_priv *il, void *stat_resp);
+void il4965_sensitivity_calibration(struct il_priv *il, void *resp);
+void il4965_init_sensitivity(struct il_priv *il);
+void il4965_reset_run_time_calib(struct il_priv *il);
+void il4965_calib_free_results(struct il_priv *il);
+
+/* Debug */
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+ssize_t il4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
+				   size_t count, loff_t *ppos);
+ssize_t il4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
+				   size_t count, loff_t *ppos);
+ssize_t il4965_ucode_general_stats_read(struct file *file,
+					char __user *user_buf, size_t count,
+					loff_t *ppos);
+#endif
+
+/****************************/
+/* Flow Handler Definitions */
+/****************************/
+
+/**
+ * This I/O area is directly read/writable by driver (e.g. Linux uses writel())
+ * Addresses are offsets from device's PCI hardware base address.
+ */
+#define FH49_MEM_LOWER_BOUND                   (0x1000)
+#define FH49_MEM_UPPER_BOUND                   (0x2000)
+
+/**
+ * Keep-Warm (KW) buffer base address.
+ *
+ * Driver must allocate a 4KByte buffer that is used by 4965 for keeping the
+ * host DRAM powered on (via dummy accesses to DRAM) to maintain low-latency
+ * DRAM access when 4965 is Txing or Rxing.  The dummy accesses prevent host
+ * from going into a power-savings mode that would cause higher DRAM latency,
+ * and possible data over/under-runs, before all Tx/Rx is complete.
+ *
+ * Driver loads FH49_KW_MEM_ADDR_REG with the physical address (bits 35:4)
+ * of the buffer, which must be 4K aligned.  Once this is set up, the 4965
+ * automatically invokes keep-warm accesses when normal accesses might not
+ * be sufficient to maintain fast DRAM response.
+ *
+ * Bit fields:
+ *  31-0:  Keep-warm buffer physical base address [35:4], must be 4K aligned
+ */
+#define FH49_KW_MEM_ADDR_REG		     (FH49_MEM_LOWER_BOUND + 0x97C)
+
+/**
+ * TFD Circular Buffers Base (CBBC) addresses
+ *
+ * 4965 has 16 base pointer registers, one for each of 16 host-DRAM-resident
+ * circular buffers (CBs/queues) containing Transmit Frame Descriptors (TFDs)
+ * (see struct il_tfd_frame).  These 16 pointer registers are offset by 0x04
+ * bytes from one another.  Each TFD circular buffer in DRAM must be 256-byte
+ * aligned (address bits 0-7 must be 0).
+ *
+ * Bit fields in each pointer register:
+ *  27-0: TFD CB physical base address [35:8], must be 256-byte aligned
+ */
+#define FH49_MEM_CBBC_LOWER_BOUND          (FH49_MEM_LOWER_BOUND + 0x9D0)
+#define FH49_MEM_CBBC_UPPER_BOUND          (FH49_MEM_LOWER_BOUND + 0xA10)
+
+/* Find TFD CB base pointer for given queue (range 0-15). */
+#define FH49_MEM_CBBC_QUEUE(x)  (FH49_MEM_CBBC_LOWER_BOUND + (x) * 0x4)
+
+/**
+ * Rx SRAM Control and Status Registers (RSCSR)
+ *
+ * These registers provide handshake between driver and 4965 for the Rx queue
+ * (this queue handles *all* command responses, notifications, Rx data, etc.
+ * sent from 4965 uCode to host driver).  Unlike Tx, there is only one Rx
+ * queue, and only one Rx DMA/FIFO channel.  Also unlike Tx, which can
+ * concatenate up to 20 DRAM buffers to form a Tx frame, each Receive Buffer
+ * Descriptor (RBD) points to only one Rx Buffer (RB); there is a 1:1
+ * mapping between RBDs and RBs.
+ *
+ * Driver must allocate host DRAM memory for the following, and set the
+ * physical address of each into 4965 registers:
+ *
+ * 1)  Receive Buffer Descriptor (RBD) circular buffer (CB), typically with 256
+ *     entries (although any power of 2, up to 4096, is selectable by driver).
+ *     Each entry (1 dword) points to a receive buffer (RB) of consistent size
+ *     (typically 4K, although 8K or 16K are also selectable by driver).
+ *     Driver sets up RB size and number of RBDs in the CB via Rx config
+ *     register FH49_MEM_RCSR_CHNL0_CONFIG_REG.
+ *
+ *     Bit fields within one RBD:
+ *     27-0:  Receive Buffer physical address bits [35:8], 256-byte aligned
+ *
+ *     Driver sets physical address [35:8] of base of RBD circular buffer
+ *     into FH49_RSCSR_CHNL0_RBDCB_BASE_REG [27:0].
+ *
+ * 2)  Rx status buffer, 8 bytes, in which 4965 indicates which Rx Buffers
+ *     (RBs) have been filled, via a "write pointer", actually the idx of
+ *     the RB's corresponding RBD within the circular buffer.  Driver sets
+ *     physical address [35:4] into FH49_RSCSR_CHNL0_STTS_WPTR_REG [31:0].
+ *
+ *     Bit fields in lower dword of Rx status buffer (upper dword not used
+ *     by driver; see struct il4965_shared, val0):
+ *     31-12:  Not used by driver
+ *     11- 0:  Index of last filled Rx buffer descriptor
+ *             (4965 writes, driver reads this value)
+ *
+ * As the driver prepares Receive Buffers (RBs) for 4965 to fill, driver must
+ * enter pointers to these RBs into contiguous RBD circular buffer entries,
+ * and update the 4965's "write" idx register,
+ * FH49_RSCSR_CHNL0_RBDCB_WPTR_REG.
+ *
+ * This "write" idx corresponds to the *next* RBD that the driver will make
+ * available, i.e. one RBD past the tail of the ready-to-fill RBDs within
+ * the circular buffer.  This value should initially be 0 (before preparing any
+ * RBs), should be 8 after preparing the first 8 RBs (for example), and must
+ * wrap back to 0 at the end of the circular buffer (but don't wrap before
+ * "read" idx has advanced past 1!  See below).
+ * NOTE:  4965 EXPECTS THE WRITE IDX TO BE INCREMENTED IN MULTIPLES OF 8.
+ *
+ * As the 4965 fills RBs (referenced from contiguous RBDs within the circular
+ * buffer), it updates the Rx status buffer in host DRAM, 2) described above,
+ * to tell the driver the idx of the latest filled RBD.  The driver must
+ * read this "read" idx from DRAM after receiving an Rx interrupt from 4965.
+ *
+ * The driver must also internally keep track of a third idx, which is the
+ * next RBD to process.  When receiving an Rx interrupt, driver should process
+ * all filled but unprocessed RBs up to, but not including, the RB
+ * corresponding to the "read" idx.  For example, if "read" idx becomes "1",
+ * driver may process the RB pointed to by RBD 0.  Depending on volume of
+ * traffic, there may be many RBs to process.
+ *
+ * If read idx == write idx, 4965 thinks there is no room to put new data.
+ * Due to this, the maximum number of filled RBs is 255, instead of 256.  To
+ * be safe, make sure that there is a gap of at least 2 RBDs between "write"
+ * and "read" idxes; that is, make sure that there are no more than 254
+ * buffers waiting to be filled.
+ */
+#define FH49_MEM_RSCSR_LOWER_BOUND	(FH49_MEM_LOWER_BOUND + 0xBC0)
+#define FH49_MEM_RSCSR_UPPER_BOUND	(FH49_MEM_LOWER_BOUND + 0xC00)
+#define FH49_MEM_RSCSR_CHNL0		(FH49_MEM_RSCSR_LOWER_BOUND)
+
+/**
+ * Physical base address of 8-byte Rx Status buffer.
+ * Bit fields:
+ *  31-0: Rx status buffer physical base address [35:4], must 16-byte aligned.
+ */
+#define FH49_RSCSR_CHNL0_STTS_WPTR_REG	(FH49_MEM_RSCSR_CHNL0)
+
+/**
+ * Physical base address of Rx Buffer Descriptor Circular Buffer.
+ * Bit fields:
+ *  27-0:  RBD CD physical base address [35:8], must be 256-byte aligned.
+ */
+#define FH49_RSCSR_CHNL0_RBDCB_BASE_REG	(FH49_MEM_RSCSR_CHNL0 + 0x004)
+
+/**
+ * Rx write pointer (idx, really!).
+ * Bit fields:
+ *  11-0:  Index of driver's most recent prepared-to-be-filled RBD, + 1.
+ *         NOTE:  For 256-entry circular buffer, use only bits [7:0].
+ */
+#define FH49_RSCSR_CHNL0_RBDCB_WPTR_REG	(FH49_MEM_RSCSR_CHNL0 + 0x008)
+#define FH49_RSCSR_CHNL0_WPTR        (FH49_RSCSR_CHNL0_RBDCB_WPTR_REG)
+
+/**
+ * Rx Config/Status Registers (RCSR)
+ * Rx Config Reg for channel 0 (only channel used)
+ *
+ * Driver must initialize FH49_MEM_RCSR_CHNL0_CONFIG_REG as follows for
+ * normal operation (see bit fields).
+ *
+ * Clearing FH49_MEM_RCSR_CHNL0_CONFIG_REG to 0 turns off Rx DMA.
+ * Driver should poll FH49_MEM_RSSR_RX_STATUS_REG	for
+ * FH49_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (bit 24) before continuing.
+ *
+ * Bit fields:
+ * 31-30: Rx DMA channel enable: '00' off/pause, '01' pause at end of frame,
+ *        '10' operate normally
+ * 29-24: reserved
+ * 23-20: # RBDs in circular buffer = 2^value; use "8" for 256 RBDs (normal),
+ *        min "5" for 32 RBDs, max "12" for 4096 RBDs.
+ * 19-18: reserved
+ * 17-16: size of each receive buffer; '00' 4K (normal), '01' 8K,
+ *        '10' 12K, '11' 16K.
+ * 15-14: reserved
+ * 13-12: IRQ destination; '00' none, '01' host driver (normal operation)
+ * 11- 4: timeout for closing Rx buffer and interrupting host (units 32 usec)
+ *        typical value 0x10 (about 1/2 msec)
+ *  3- 0: reserved
+ */
+#define FH49_MEM_RCSR_LOWER_BOUND      (FH49_MEM_LOWER_BOUND + 0xC00)
+#define FH49_MEM_RCSR_UPPER_BOUND      (FH49_MEM_LOWER_BOUND + 0xCC0)
+#define FH49_MEM_RCSR_CHNL0            (FH49_MEM_RCSR_LOWER_BOUND)
+
+#define FH49_MEM_RCSR_CHNL0_CONFIG_REG	(FH49_MEM_RCSR_CHNL0)
+
+#define FH49_RCSR_CHNL0_RX_CONFIG_RB_TIMEOUT_MSK (0x00000FF0)	/* bits 4-11 */
+#define FH49_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_MSK   (0x00001000)	/* bits 12 */
+#define FH49_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK (0x00008000)	/* bit 15 */
+#define FH49_RCSR_CHNL0_RX_CONFIG_RB_SIZE_MSK   (0x00030000)	/* bits 16-17 */
+#define FH49_RCSR_CHNL0_RX_CONFIG_RBDBC_SIZE_MSK (0x00F00000)	/* bits 20-23 */
+#define FH49_RCSR_CHNL0_RX_CONFIG_DMA_CHNL_EN_MSK (0xC0000000)	/* bits 30-31 */
+
+#define FH49_RCSR_RX_CONFIG_RBDCB_SIZE_POS	(20)
+#define FH49_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS	(4)
+#define RX_RB_TIMEOUT	(0x10)
+
+#define FH49_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL         (0x00000000)
+#define FH49_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL     (0x40000000)
+#define FH49_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL        (0x80000000)
+
+#define FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K    (0x00000000)
+#define FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K    (0x00010000)
+#define FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K   (0x00020000)
+#define FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_16K   (0x00030000)
+
+#define FH49_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY              (0x00000004)
+#define FH49_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL    (0x00000000)
+#define FH49_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL  (0x00001000)
+
+/**
+ * Rx Shared Status Registers (RSSR)
+ *
+ * After stopping Rx DMA channel (writing 0 to
+ * FH49_MEM_RCSR_CHNL0_CONFIG_REG), driver must poll
+ * FH49_MEM_RSSR_RX_STATUS_REG until Rx channel is idle.
+ *
+ * Bit fields:
+ *  24:  1 = Channel 0 is idle
+ *
+ * FH49_MEM_RSSR_SHARED_CTRL_REG and FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV
+ * contain default values that should not be altered by the driver.
+ */
+#define FH49_MEM_RSSR_LOWER_BOUND           (FH49_MEM_LOWER_BOUND + 0xC40)
+#define FH49_MEM_RSSR_UPPER_BOUND           (FH49_MEM_LOWER_BOUND + 0xD00)
+
+#define FH49_MEM_RSSR_SHARED_CTRL_REG       (FH49_MEM_RSSR_LOWER_BOUND)
+#define FH49_MEM_RSSR_RX_STATUS_REG	(FH49_MEM_RSSR_LOWER_BOUND + 0x004)
+#define FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV\
+					(FH49_MEM_RSSR_LOWER_BOUND + 0x008)
+
+#define FH49_RSSR_CHNL0_RX_STATUS_CHNL_IDLE	(0x01000000)
+
+#define FH49_MEM_TFDIB_REG1_ADDR_BITSHIFT	28
+
+/* TFDB  Area - TFDs buffer table */
+#define FH49_MEM_TFDIB_DRAM_ADDR_LSB_MSK      (0xFFFFFFFF)
+#define FH49_TFDIB_LOWER_BOUND       (FH49_MEM_LOWER_BOUND + 0x900)
+#define FH49_TFDIB_UPPER_BOUND       (FH49_MEM_LOWER_BOUND + 0x958)
+#define FH49_TFDIB_CTRL0_REG(_chnl)  (FH49_TFDIB_LOWER_BOUND + 0x8 * (_chnl))
+#define FH49_TFDIB_CTRL1_REG(_chnl)  (FH49_TFDIB_LOWER_BOUND + 0x8 * (_chnl) + 0x4)
+
+/**
+ * Transmit DMA Channel Control/Status Registers (TCSR)
+ *
+ * 4965 has one configuration register for each of 8 Tx DMA/FIFO channels
+ * supported in hardware (don't confuse these with the 16 Tx queues in DRAM,
+ * which feed the DMA/FIFO channels); config regs are separated by 0x20 bytes.
+ *
+ * To use a Tx DMA channel, driver must initialize its
+ * FH49_TCSR_CHNL_TX_CONFIG_REG(chnl) with:
+ *
+ * FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
+ * FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL
+ *
+ * All other bits should be 0.
+ *
+ * Bit fields:
+ * 31-30: Tx DMA channel enable: '00' off/pause, '01' pause at end of frame,
+ *        '10' operate normally
+ * 29- 4: Reserved, set to "0"
+ *     3: Enable internal DMA requests (1, normal operation), disable (0)
+ *  2- 0: Reserved, set to "0"
+ */
+#define FH49_TCSR_LOWER_BOUND  (FH49_MEM_LOWER_BOUND + 0xD00)
+#define FH49_TCSR_UPPER_BOUND  (FH49_MEM_LOWER_BOUND + 0xE60)
+
+/* Find Control/Status reg for given Tx DMA/FIFO channel */
+#define FH49_TCSR_CHNL_NUM                            (7)
+#define FH50_TCSR_CHNL_NUM                            (8)
+
+/* TCSR: tx_config register values */
+#define FH49_TCSR_CHNL_TX_CONFIG_REG(_chnl)	\
+		(FH49_TCSR_LOWER_BOUND + 0x20 * (_chnl))
+#define FH49_TCSR_CHNL_TX_CREDIT_REG(_chnl)	\
+		(FH49_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x4)
+#define FH49_TCSR_CHNL_TX_BUF_STS_REG(_chnl)	\
+		(FH49_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x8)
+
+#define FH49_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF		(0x00000000)
+#define FH49_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRV		(0x00000001)
+
+#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE	(0x00000000)
+#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE	(0x00000008)
+
+#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_NOINT	(0x00000000)
+#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD	(0x00100000)
+#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD	(0x00200000)
+
+#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT	(0x00000000)
+#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_ENDTFD	(0x00400000)
+#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_IFTFD	(0x00800000)
+
+#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE	(0x00000000)
+#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE_EOF	(0x40000000)
+#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE	(0x80000000)
+
+#define FH49_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_EMPTY	(0x00000000)
+#define FH49_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_WAIT	(0x00002000)
+#define FH49_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID	(0x00000003)
+
+#define FH49_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM		(20)
+#define FH49_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX		(12)
+
+/**
+ * Tx Shared Status Registers (TSSR)
+ *
+ * After stopping Tx DMA channel (writing 0 to
+ * FH49_TCSR_CHNL_TX_CONFIG_REG(chnl)), driver must poll
+ * FH49_TSSR_TX_STATUS_REG until selected Tx channel is idle
+ * (channel's buffers empty | no pending requests).
+ *
+ * Bit fields:
+ * 31-24:  1 = Channel buffers empty (channel 7:0)
+ * 23-16:  1 = No pending requests (channel 7:0)
+ */
+#define FH49_TSSR_LOWER_BOUND		(FH49_MEM_LOWER_BOUND + 0xEA0)
+#define FH49_TSSR_UPPER_BOUND		(FH49_MEM_LOWER_BOUND + 0xEC0)
+
+#define FH49_TSSR_TX_STATUS_REG		(FH49_TSSR_LOWER_BOUND + 0x010)
+
+/**
+ * Bit fields for TSSR(Tx Shared Status & Control) error status register:
+ * 31:  Indicates an address error when accessed to internal memory
+ *	uCode/driver must write "1" in order to clear this flag
+ * 30:  Indicates that Host did not send the expected number of dwords to FH
+ *	uCode/driver must write "1" in order to clear this flag
+ * 16-9:Each status bit is for one channel. Indicates that an (Error) ActDMA
+ *	command was received from the scheduler while the TRB was already full
+ *	with previous command
+ *	uCode/driver must write "1" in order to clear this flag
+ * 7-0: Each status bit indicates a channel's TxCredit error. When an error
+ *	bit is set, it indicates that the FH has received a full indication
+ *	from the RTC TxFIFO and the current value of the TxCredit counter was
+ *	not equal to zero. This mean that the credit mechanism was not
+ *	synchronized to the TxFIFO status
+ *	uCode/driver must write "1" in order to clear this flag
+ */
+#define FH49_TSSR_TX_ERROR_REG		(FH49_TSSR_LOWER_BOUND + 0x018)
+
+#define FH49_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl) ((1 << (_chnl)) << 16)
+
+/* Tx service channels */
+#define FH49_SRVC_CHNL		(9)
+#define FH49_SRVC_LOWER_BOUND	(FH49_MEM_LOWER_BOUND + 0x9C8)
+#define FH49_SRVC_UPPER_BOUND	(FH49_MEM_LOWER_BOUND + 0x9D0)
+#define FH49_SRVC_CHNL_SRAM_ADDR_REG(_chnl) \
+		(FH49_SRVC_LOWER_BOUND + ((_chnl) - 9) * 0x4)
+
+#define FH49_TX_CHICKEN_BITS_REG	(FH49_MEM_LOWER_BOUND + 0xE98)
+/* Instruct FH to increment the retry count of a packet when
+ * it is brought from the memory to TX-FIFO
+ */
+#define FH49_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN	(0x00000002)
+
+/* Keep Warm Size */
+#define IL_KW_SIZE 0x1000	/* 4k */
+
+#endif /* __il_4965_h__ */
diff --git a/drivers/net/wireless/iwlegacy/Kconfig b/drivers/net/wireless/iwlegacy/Kconfig
index aef65cd..05bd375 100644
--- a/drivers/net/wireless/iwlegacy/Kconfig
+++ b/drivers/net/wireless/iwlegacy/Kconfig
@@ -1,4 +1,4 @@
-config IWLWIFI_LEGACY
+config IWLEGACY
 	tristate
 	select FW_LOADER
 	select NEW_LEDS
@@ -7,13 +7,13 @@
 	select MAC80211_LEDS
 
 menu "Debugging Options"
-	depends on IWLWIFI_LEGACY
+	depends on IWLEGACY
 
-config IWLWIFI_LEGACY_DEBUG
-	bool "Enable full debugging output in 4965 and 3945 drivers"
-	depends on IWLWIFI_LEGACY
+config IWLEGACY_DEBUG
+	bool "Enable full debugging output in iwlegacy (iwl 3945/4965) drivers"
+	depends on IWLEGACY
 	---help---
-	  This option will enable debug tracing output for the iwlwifilegacy
+	  This option will enable debug tracing output for the iwlegacy
 	  drivers.
 
 	  This will result in the kernel module being ~100k larger.  You can
@@ -29,43 +29,26 @@
 		  % echo 0x43fff > /sys/class/net/wlan0/device/debug_level
 
 	  You can find the list of debug mask values in:
-		  drivers/net/wireless/iwlwifilegacy/iwl-debug.h
+		  drivers/net/wireless/iwlegacy/common.h
 
 	  If this is your first time using this driver, you should say Y here
 	  as the debug information can assist others in helping you resolve
 	  any problems you may encounter.
 
-config IWLWIFI_LEGACY_DEBUGFS
-        bool "4965 and 3945 debugfs support"
-        depends on IWLWIFI_LEGACY && MAC80211_DEBUGFS
+config IWLEGACY_DEBUGFS
+        bool "iwlegacy (iwl 3945/4965) debugfs support"
+        depends on IWLEGACY && MAC80211_DEBUGFS
         ---help---
-	  Enable creation of debugfs files for the iwlwifilegacy drivers. This
+	  Enable creation of debugfs files for the iwlegacy drivers. This
 	  is a low-impact option that allows getting insight into the
 	  driver's state at runtime.
 
-config IWLWIFI_LEGACY_DEVICE_TRACING
-	bool "iwlwifilegacy legacy device access tracing"
-	depends on IWLWIFI_LEGACY
-	depends on EVENT_TRACING
-	help
-	  Say Y here to trace all commands, including TX frames and IO
-	  accesses, sent to the device. If you say yes, iwlwifilegacy will
-	  register with the ftrace framework for event tracing and dump
-	  all this information to the ringbuffer, you may need to
-	  increase the ringbuffer size. See the ftrace documentation
-	  for more information.
-
-	  When tracing is not enabled, this option still has some
-	  (though rather small) overhead.
-
-	  If unsure, say Y so we can help you better when problems
-	  occur.
 endmenu
 
 config IWL4965
 	tristate "Intel Wireless WiFi 4965AGN (iwl4965)"
 	depends on PCI && MAC80211
-	select IWLWIFI_LEGACY
+	select IWLEGACY
 	---help---
 	  This option enables support for
 
@@ -93,7 +76,7 @@
 config IWL3945
 	tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)"
 	depends on PCI && MAC80211
-	select IWLWIFI_LEGACY
+	select IWLEGACY
 	---help---
 	  Select to build the driver supporting the:
 
diff --git a/drivers/net/wireless/iwlegacy/Makefile b/drivers/net/wireless/iwlegacy/Makefile
index d56aeb3..c985a01 100644
--- a/drivers/net/wireless/iwlegacy/Makefile
+++ b/drivers/net/wireless/iwlegacy/Makefile
@@ -1,25 +1,17 @@
-obj-$(CONFIG_IWLWIFI_LEGACY)	+= iwl-legacy.o
-iwl-legacy-objs 		:= iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o
-iwl-legacy-objs 		+= iwl-rx.o iwl-tx.o iwl-sta.o
-iwl-legacy-objs 		+= iwl-scan.o iwl-led.o
-iwl-legacy-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-debugfs.o
-iwl-legacy-$(CONFIG_IWLWIFI_LEGACY_DEVICE_TRACING) += iwl-devtrace.o
+obj-$(CONFIG_IWLEGACY)	+= iwlegacy.o
+iwlegacy-objs 		:= common.o
+iwlegacy-$(CONFIG_IWLEGACY_DEBUGFS) += debug.o
 
-iwl-legacy-objs += $(iwl-legacy-m)
-
-CFLAGS_iwl-devtrace.o := -I$(src)
+iwlegacy-objs += $(iwlegacy-m)
 
 # 4965
 obj-$(CONFIG_IWL4965)	+= iwl4965.o
-iwl4965-objs		:= iwl-4965.o iwl4965-base.o iwl-4965-rs.o iwl-4965-led.o
-iwl4965-objs		+= iwl-4965-ucode.o iwl-4965-tx.o
-iwl4965-objs		+= iwl-4965-lib.o iwl-4965-rx.o iwl-4965-calib.o
-iwl4965-objs		+= iwl-4965-sta.o iwl-4965-eeprom.o
-iwl4965-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-4965-debugfs.o
+iwl4965-objs		:= 4965.o 4965-mac.o 4965-rs.o 4965-calib.o
+iwl4965-$(CONFIG_IWLEGACY_DEBUGFS) += 4965-debug.o
 
 # 3945
 obj-$(CONFIG_IWL3945)	+= iwl3945.o
-iwl3945-objs		:= iwl3945-base.o iwl-3945.o iwl-3945-rs.o iwl-3945-led.o
-iwl3945-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-3945-debugfs.o
+iwl3945-objs		:= 3945-mac.o 3945.o 3945-rs.o
+iwl3945-$(CONFIG_IWLEGACY_DEBUGFS) += 3945-debug.o
 
 ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/iwlegacy/iwl-commands.h b/drivers/net/wireless/iwlegacy/commands.h
similarity index 79%
rename from drivers/net/wireless/iwlegacy/iwl-commands.h
rename to drivers/net/wireless/iwlegacy/commands.h
index 8990405..25dd7d2 100644
--- a/drivers/net/wireless/iwlegacy/iwl-commands.h
+++ b/drivers/net/wireless/iwlegacy/commands.h
@@ -60,100 +60,96 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  *****************************************************************************/
-/*
- * Please use this file (iwl-commands.h) only for uCode API definitions.
- * Please use iwl-xxxx-hw.h for hardware-related definitions.
- * Please use iwl-dev.h for driver implementation definitions.
- */
 
-#ifndef __iwl_legacy_commands_h__
-#define __iwl_legacy_commands_h__
+#ifndef __il_commands_h__
+#define __il_commands_h__
 
-struct iwl_priv;
+#include <linux/ieee80211.h>
+
+struct il_priv;
 
 /* uCode version contains 4 values: Major/Minor/API/Serial */
-#define IWL_UCODE_MAJOR(ver)	(((ver) & 0xFF000000) >> 24)
-#define IWL_UCODE_MINOR(ver)	(((ver) & 0x00FF0000) >> 16)
-#define IWL_UCODE_API(ver)	(((ver) & 0x0000FF00) >> 8)
-#define IWL_UCODE_SERIAL(ver)	((ver) & 0x000000FF)
-
+#define IL_UCODE_MAJOR(ver)	(((ver) & 0xFF000000) >> 24)
+#define IL_UCODE_MINOR(ver)	(((ver) & 0x00FF0000) >> 16)
+#define IL_UCODE_API(ver)	(((ver) & 0x0000FF00) >> 8)
+#define IL_UCODE_SERIAL(ver)	((ver) & 0x000000FF)
 
 /* Tx rates */
-#define IWL_CCK_RATES	4
-#define IWL_OFDM_RATES	8
-#define IWL_MAX_RATES	(IWL_CCK_RATES + IWL_OFDM_RATES)
+#define IL_CCK_RATES	4
+#define IL_OFDM_RATES	8
+#define IL_MAX_RATES	(IL_CCK_RATES + IL_OFDM_RATES)
 
 enum {
-	REPLY_ALIVE = 0x1,
-	REPLY_ERROR = 0x2,
+	N_ALIVE = 0x1,
+	N_ERROR = 0x2,
 
 	/* RXON and QOS commands */
-	REPLY_RXON = 0x10,
-	REPLY_RXON_ASSOC = 0x11,
-	REPLY_QOS_PARAM = 0x13,
-	REPLY_RXON_TIMING = 0x14,
+	C_RXON = 0x10,
+	C_RXON_ASSOC = 0x11,
+	C_QOS_PARAM = 0x13,
+	C_RXON_TIMING = 0x14,
 
 	/* Multi-Station support */
-	REPLY_ADD_STA = 0x18,
-	REPLY_REMOVE_STA = 0x19,
+	C_ADD_STA = 0x18,
+	C_REM_STA = 0x19,
 
 	/* Security */
-	REPLY_WEPKEY = 0x20,
+	C_WEPKEY = 0x20,
 
 	/* RX, TX, LEDs */
-	REPLY_3945_RX = 0x1b,           /* 3945 only */
-	REPLY_TX = 0x1c,
-	REPLY_RATE_SCALE = 0x47,	/* 3945 only */
-	REPLY_LEDS_CMD = 0x48,
-	REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* for 4965 and up */
+	N_3945_RX = 0x1b,	/* 3945 only */
+	C_TX = 0x1c,
+	C_RATE_SCALE = 0x47,	/* 3945 only */
+	C_LEDS = 0x48,
+	C_TX_LINK_QUALITY_CMD = 0x4e,	/* for 4965 */
 
 	/* 802.11h related */
-	REPLY_CHANNEL_SWITCH = 0x72,
-	CHANNEL_SWITCH_NOTIFICATION = 0x73,
-	REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74,
-	SPECTRUM_MEASURE_NOTIFICATION = 0x75,
+	C_CHANNEL_SWITCH = 0x72,
+	N_CHANNEL_SWITCH = 0x73,
+	C_SPECTRUM_MEASUREMENT = 0x74,
+	N_SPECTRUM_MEASUREMENT = 0x75,
 
 	/* Power Management */
-	POWER_TABLE_CMD = 0x77,
-	PM_SLEEP_NOTIFICATION = 0x7A,
-	PM_DEBUG_STATISTIC_NOTIFIC = 0x7B,
+	C_POWER_TBL = 0x77,
+	N_PM_SLEEP = 0x7A,
+	N_PM_DEBUG_STATS = 0x7B,
 
 	/* Scan commands and notifications */
-	REPLY_SCAN_CMD = 0x80,
-	REPLY_SCAN_ABORT_CMD = 0x81,
-	SCAN_START_NOTIFICATION = 0x82,
-	SCAN_RESULTS_NOTIFICATION = 0x83,
-	SCAN_COMPLETE_NOTIFICATION = 0x84,
+	C_SCAN = 0x80,
+	C_SCAN_ABORT = 0x81,
+	N_SCAN_START = 0x82,
+	N_SCAN_RESULTS = 0x83,
+	N_SCAN_COMPLETE = 0x84,
 
 	/* IBSS/AP commands */
-	BEACON_NOTIFICATION = 0x90,
-	REPLY_TX_BEACON = 0x91,
+	N_BEACON = 0x90,
+	C_TX_BEACON = 0x91,
 
 	/* Miscellaneous commands */
-	REPLY_TX_PWR_TABLE_CMD = 0x97,
+	C_TX_PWR_TBL = 0x97,
 
 	/* Bluetooth device coexistence config command */
-	REPLY_BT_CONFIG = 0x9b,
+	C_BT_CONFIG = 0x9b,
 
 	/* Statistics */
-	REPLY_STATISTICS_CMD = 0x9c,
-	STATISTICS_NOTIFICATION = 0x9d,
+	C_STATS = 0x9c,
+	N_STATS = 0x9d,
 
 	/* RF-KILL commands and notifications */
-	CARD_STATE_NOTIFICATION = 0xa1,
+	N_CARD_STATE = 0xa1,
 
 	/* Missed beacons notification */
-	MISSED_BEACONS_NOTIFICATION = 0xa2,
+	N_MISSED_BEACONS = 0xa2,
 
-	REPLY_CT_KILL_CONFIG_CMD = 0xa4,
-	SENSITIVITY_CMD = 0xa8,
-	REPLY_PHY_CALIBRATION_CMD = 0xb0,
-	REPLY_RX_PHY_CMD = 0xc0,
-	REPLY_RX_MPDU_CMD = 0xc1,
-	REPLY_RX = 0xc3,
-	REPLY_COMPRESSED_BA = 0xc5,
+	C_CT_KILL_CONFIG = 0xa4,
+	C_SENSITIVITY = 0xa8,
+	C_PHY_CALIBRATION = 0xb0,
+	N_RX_PHY = 0xc0,
+	N_RX_MPDU = 0xc1,
+	N_RX = 0xc3,
+	N_COMPRESSED_BA = 0xc5,
 
-	REPLY_MAX = 0xff
+	IL_CN_MAX = 0xff
 };
 
 /******************************************************************************
@@ -163,25 +159,25 @@
  *
  *****************************************************************************/
 
-/* iwl_cmd_header flags value */
-#define IWL_CMD_FAILED_MSK 0x40
+/* il_cmd_header flags value */
+#define IL_CMD_FAILED_MSK 0x40
 
 #define SEQ_TO_QUEUE(s)	(((s) >> 8) & 0x1f)
 #define QUEUE_TO_SEQ(q)	(((q) & 0x1f) << 8)
-#define SEQ_TO_INDEX(s)	((s) & 0xff)
-#define INDEX_TO_SEQ(i)	((i) & 0xff)
+#define SEQ_TO_IDX(s)	((s) & 0xff)
+#define IDX_TO_SEQ(i)	((i) & 0xff)
 #define SEQ_HUGE_FRAME	cpu_to_le16(0x4000)
 #define SEQ_RX_FRAME	cpu_to_le16(0x8000)
 
 /**
- * struct iwl_cmd_header
+ * struct il_cmd_header
  *
  * This header format appears in the beginning of each command sent from the
  * driver, and each response/notification received from uCode.
  */
-struct iwl_cmd_header {
-	u8 cmd;		/* Command ID:  REPLY_RXON, etc. */
-	u8 flags;	/* 0:5 reserved, 6 abort, 7 internal */
+struct il_cmd_header {
+	u8 cmd;			/* Command ID:  C_RXON, etc. */
+	u8 flags;		/* 0:5 reserved, 6 abort, 7 internal */
 	/*
 	 * The driver sets up the sequence number to values of its choosing.
 	 * uCode does not use this value, but passes it back to the driver
@@ -192,29 +188,28 @@
 	 * There is one exception:  uCode sets bit 15 when it originates
 	 * the response/notification, i.e. when the response/notification
 	 * is not a direct response to a command sent by the driver.  For
-	 * example, uCode issues REPLY_3945_RX when it sends a received frame
+	 * example, uCode issues N_3945_RX when it sends a received frame
 	 * to the driver; it is not a direct response to any driver command.
 	 *
 	 * The Linux driver uses the following format:
 	 *
-	 *  0:7		tfd index - position within TX queue
-	 *  8:12	TX queue id
-	 *  13		reserved
-	 *  14		huge - driver sets this to indicate command is in the
-	 *  		'huge' storage at the end of the command buffers
-	 *  15		unsolicited RX or uCode-originated notification
-	*/
+	 *  0:7         tfd idx - position within TX queue
+	 *  8:12        TX queue id
+	 *  13          reserved
+	 *  14          huge - driver sets this to indicate command is in the
+	 *              'huge' storage at the end of the command buffers
+	 *  15          unsolicited RX or uCode-originated notification
+	 */
 	__le16 sequence;
 
 	/* command or response/notification data follows immediately */
 	u8 data[0];
 } __packed;
 
-
 /**
- * struct iwl3945_tx_power
+ * struct il3945_tx_power
  *
- * Used in REPLY_TX_PWR_TABLE_CMD, REPLY_SCAN_CMD, REPLY_CHANNEL_SWITCH
+ * Used in C_TX_PWR_TBL, C_SCAN, C_CHANNEL_SWITCH
  *
  * Each entry contains two values:
  * 1)  DSP gain (or sometimes called DSP attenuation).  This is a fine-grained
@@ -223,21 +218,21 @@
  * 2)  Radio gain.  This sets the analog gain of the radio Tx path.
  *     It is a coarser setting, and behaves in a logarithmic (dB) fashion.
  *
- * Driver obtains values from struct iwl3945_tx_power power_gain_table[][].
+ * Driver obtains values from struct il3945_tx_power power_gain_table[][].
  */
-struct iwl3945_tx_power {
+struct il3945_tx_power {
 	u8 tx_gain;		/* gain for analog radio */
 	u8 dsp_atten;		/* gain for DSP */
 } __packed;
 
 /**
- * struct iwl3945_power_per_rate
+ * struct il3945_power_per_rate
  *
- * Used in REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
+ * Used in C_TX_PWR_TBL, C_CHANNEL_SWITCH
  */
-struct iwl3945_power_per_rate {
+struct il3945_power_per_rate {
 	u8 rate;		/* plcp */
-	struct iwl3945_tx_power tpc;
+	struct il3945_tx_power tpc;
 	u8 reserved;
 } __packed;
 
@@ -245,10 +240,10 @@
  * iwl4965 rate_n_flags bit fields
  *
  * rate_n_flags format is used in following iwl4965 commands:
- *  REPLY_RX (response only)
- *  REPLY_RX_MPDU (response only)
- *  REPLY_TX (both command and response)
- *  REPLY_TX_LINK_QUALITY_CMD
+ *  N_RX (response only)
+ *  N_RX_MPDU (response only)
+ *  C_TX (both command and response)
+ *  C_TX_LINK_QUALITY_CMD
  *
  * High-throughput (HT) rate format for bits 7:0 (bit 8 must be "1"):
  *  2-0:  0)   6 Mbps
@@ -326,17 +321,17 @@
 #define RATE_MCS_ANT_ABC_MSK	(RATE_MCS_ANT_AB_MSK | RATE_MCS_ANT_C_MSK)
 #define RATE_ANT_NUM 3
 
-#define POWER_TABLE_NUM_ENTRIES			33
-#define POWER_TABLE_NUM_HT_OFDM_ENTRIES		32
-#define POWER_TABLE_CCK_ENTRY			32
+#define POWER_TBL_NUM_ENTRIES			33
+#define POWER_TBL_NUM_HT_OFDM_ENTRIES		32
+#define POWER_TBL_CCK_ENTRY			32
 
-#define IWL_PWR_NUM_HT_OFDM_ENTRIES		24
-#define IWL_PWR_CCK_ENTRIES			2
+#define IL_PWR_NUM_HT_OFDM_ENTRIES		24
+#define IL_PWR_CCK_ENTRIES			2
 
 /**
- * union iwl4965_tx_power_dual_stream
+ * union il4965_tx_power_dual_stream
  *
- * Host format used for REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
+ * Host format used for C_TX_PWR_TBL, C_CHANNEL_SWITCH
  * Use __le32 version (struct tx_power_dual_stream) when building command.
  *
  * Driver provides radio gain and DSP attenuation settings to device in pairs,
@@ -347,9 +342,9 @@
  * For MIMO rates, one value may be different from the other,
  * in order to balance the Tx output between the two transmitters.
  *
- * See more details in doc for TXPOWER in iwl-4965-hw.h.
+ * See more details in doc for TXPOWER in 4965.h.
  */
-union iwl4965_tx_power_dual_stream {
+union il4965_tx_power_dual_stream {
 	struct {
 		u8 radio_tx_gain[2];
 		u8 dsp_predis_atten[2];
@@ -360,21 +355,21 @@
 /**
  * struct tx_power_dual_stream
  *
- * Table entries in REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
+ * Table entries in C_TX_PWR_TBL, C_CHANNEL_SWITCH
  *
- * Same format as iwl_tx_power_dual_stream, but __le32
+ * Same format as il_tx_power_dual_stream, but __le32
  */
 struct tx_power_dual_stream {
 	__le32 dw;
 } __packed;
 
 /**
- * struct iwl4965_tx_power_db
+ * struct il4965_tx_power_db
  *
- * Entire table within REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
+ * Entire table within C_TX_PWR_TBL, C_CHANNEL_SWITCH
  */
-struct iwl4965_tx_power_db {
-	struct tx_power_dual_stream power_tbl[POWER_TABLE_NUM_ENTRIES];
+struct il4965_tx_power_db {
+	struct tx_power_dual_stream power_tbl[POWER_TBL_NUM_ENTRIES];
 } __packed;
 
 /******************************************************************************
@@ -387,7 +382,7 @@
 #define INITIALIZE_SUBTYPE    (9)
 
 /*
- * ("Initialize") REPLY_ALIVE = 0x1 (response only, not a command)
+ * ("Initialize") N_ALIVE = 0x1 (response only, not a command)
  *
  * uCode issues this "initialize alive" notification once the initialization
  * uCode image has completed its work, and is ready to load the runtime image.
@@ -410,7 +405,7 @@
  * 3)  Tx gain compensation to balance 4965's 2 Tx chains for MIMO operation,
  *     for each of 5 frequency ranges.
  */
-struct iwl_init_alive_resp {
+struct il_init_alive_resp {
 	u8 ucode_minor;
 	u8 ucode_major;
 	__le16 reserved1;
@@ -433,9 +428,8 @@
 				 * 2 Tx chains */
 } __packed;
 
-
 /**
- * REPLY_ALIVE = 0x1 (response only, not a command)
+ * N_ALIVE = 0x1 (response only, not a command)
  *
  * uCode issues this "alive" notification once the runtime image is ready
  * to receive commands from the driver.  This is the *second* "alive"
@@ -454,7 +448,7 @@
  *	__le32 log_size;     log capacity (in number of entries)
  *	__le32 type;         (1) timestamp with each entry, (0) no timestamp
  *	__le32 wraps;        # times uCode has wrapped to top of circular buffer
- *      __le32 write_index;  next circular buffer entry that uCode would fill
+ *      __le32 write_idx;  next circular buffer entry that uCode would fill
  *
  *     The header is followed by the circular buffer of log entries.  Entries
  *     with timestamps have the following format:
@@ -511,13 +505,13 @@
  * The Linux driver can print both logs to the system log when a uCode error
  * occurs.
  */
-struct iwl_alive_resp {
+struct il_alive_resp {
 	u8 ucode_minor;
 	u8 ucode_major;
 	__le16 reserved1;
 	u8 sw_rev[8];
 	u8 ver_type;
-	u8 ver_subtype;			/* not "9" for runtime alive */
+	u8 ver_subtype;		/* not "9" for runtime alive */
 	__le16 reserved2;
 	__le32 log_event_table_ptr;	/* SRAM address for event log */
 	__le32 error_event_table_ptr;	/* SRAM address for error log */
@@ -526,9 +520,9 @@
 } __packed;
 
 /*
- * REPLY_ERROR = 0x2 (response only, not a command)
+ * N_ERROR = 0x2 (response only, not a command)
  */
-struct iwl_error_resp {
+struct il_error_resp {
 	__le32 error_type;
 	u8 cmd_id;
 	u8 reserved1;
@@ -554,7 +548,6 @@
 	RXON_DEV_TYPE_SNIFFER = 6,
 };
 
-
 #define RXON_RX_CHAIN_DRIVER_FORCE_MSK		cpu_to_le16(0x1 << 0)
 #define RXON_RX_CHAIN_DRIVER_FORCE_POS		(0)
 #define RXON_RX_CHAIN_VALID_MSK			cpu_to_le16(0x7 << 1)
@@ -593,7 +586,6 @@
 * (according to ON_AIR deassertion) */
 #define RXON_FLG_TSF2HOST_MSK           cpu_to_le32(1 << 15)
 
-
 /* HT flags */
 #define RXON_FLG_CTRL_CHANNEL_LOC_POS		(22)
 #define RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK	cpu_to_le32(0x1 << 22)
@@ -640,7 +632,7 @@
 #define RXON_FILTER_BCON_AWARE_MSK      cpu_to_le32(1 << 6)
 
 /**
- * REPLY_RXON = 0x10 (command, has simple generic response)
+ * C_RXON = 0x10 (command, has simple generic response)
  *
  * RXON tunes the radio tuner to a service channel, and sets up a number
  * of parameters that are used primarily for Rx, but also for Tx operations.
@@ -653,11 +645,11 @@
  *        channel.
  *
  * NOTE:  All RXONs wipe clean the internal txpower table.  Driver must
- *        issue a new REPLY_TX_PWR_TABLE_CMD after each REPLY_RXON (0x10),
+ *        issue a new C_TX_PWR_TBL after each C_RXON (0x10),
  *        regardless of whether RXON_FILTER_ASSOC_MSK is set.
  */
 
-struct iwl3945_rxon_cmd {
+struct il3945_rxon_cmd {
 	u8 node_addr[6];
 	__le16 reserved1;
 	u8 bssid_addr[6];
@@ -676,7 +668,7 @@
 	__le16 reserved5;
 } __packed;
 
-struct iwl4965_rxon_cmd {
+struct il4965_rxon_cmd {
 	u8 node_addr[6];
 	__le16 reserved1;
 	u8 bssid_addr[6];
@@ -699,7 +691,7 @@
 /* Create a common rxon cmd which will be typecast into the 3945 or 4965
  * specific rxon cmd, depending on where it is called from.
  */
-struct iwl_legacy_rxon_cmd {
+struct il_rxon_cmd {
 	u8 node_addr[6];
 	__le16 reserved1;
 	u8 bssid_addr[6];
@@ -721,11 +713,10 @@
 	u8 reserved5;
 } __packed;
 
-
 /*
- * REPLY_RXON_ASSOC = 0x11 (command, has simple generic response)
+ * C_RXON_ASSOC = 0x11 (command, has simple generic response)
  */
-struct iwl3945_rxon_assoc_cmd {
+struct il3945_rxon_assoc_cmd {
 	__le32 flags;
 	__le32 filter_flags;
 	u8 ofdm_basic_rates;
@@ -733,7 +724,7 @@
 	__le16 reserved;
 } __packed;
 
-struct iwl4965_rxon_assoc_cmd {
+struct il4965_rxon_assoc_cmd {
 	__le32 flags;
 	__le32 filter_flags;
 	u8 ofdm_basic_rates;
@@ -744,17 +735,17 @@
 	__le16 reserved;
 } __packed;
 
-#define IWL_CONN_MAX_LISTEN_INTERVAL	10
-#define IWL_MAX_UCODE_BEACON_INTERVAL	4 /* 4096 */
-#define IWL39_MAX_UCODE_BEACON_INTERVAL	1 /* 1024 */
+#define IL_CONN_MAX_LISTEN_INTERVAL	10
+#define IL_MAX_UCODE_BEACON_INTERVAL	4	/* 4096 */
+#define IL39_MAX_UCODE_BEACON_INTERVAL	1	/* 1024 */
 
 /*
- * REPLY_RXON_TIMING = 0x14 (command, has simple generic response)
+ * C_RXON_TIMING = 0x14 (command, has simple generic response)
  */
-struct iwl_rxon_time_cmd {
+struct il_rxon_time_cmd {
 	__le64 timestamp;
 	__le16 beacon_interval;
-	__le16 atim_window;
+	__le16 atim_win;
 	__le32 beacon_init_val;
 	__le16 listen_interval;
 	u8 dtim_period;
@@ -762,32 +753,32 @@
 } __packed;
 
 /*
- * REPLY_CHANNEL_SWITCH = 0x72 (command, has simple generic response)
+ * C_CHANNEL_SWITCH = 0x72 (command, has simple generic response)
  */
-struct iwl3945_channel_switch_cmd {
+struct il3945_channel_switch_cmd {
 	u8 band;
 	u8 expect_beacon;
 	__le16 channel;
 	__le32 rxon_flags;
 	__le32 rxon_filter_flags;
 	__le32 switch_time;
-	struct iwl3945_power_per_rate power[IWL_MAX_RATES];
+	struct il3945_power_per_rate power[IL_MAX_RATES];
 } __packed;
 
-struct iwl4965_channel_switch_cmd {
+struct il4965_channel_switch_cmd {
 	u8 band;
 	u8 expect_beacon;
 	__le16 channel;
 	__le32 rxon_flags;
 	__le32 rxon_filter_flags;
 	__le32 switch_time;
-	struct iwl4965_tx_power_db tx_power;
+	struct il4965_tx_power_db tx_power;
 } __packed;
 
 /*
- * CHANNEL_SWITCH_NOTIFICATION = 0x73 (notification only, not a command)
+ * N_CHANNEL_SWITCH = 0x73 (notification only, not a command)
  */
-struct iwl_csa_notification {
+struct il_csa_notification {
 	__le16 band;
 	__le16 channel;
 	__le32 status;		/* 0 - OK, 1 - fail */
@@ -800,22 +791,22 @@
  *****************************************************************************/
 
 /**
- * struct iwl_ac_qos -- QOS timing params for REPLY_QOS_PARAM
- * One for each of 4 EDCA access categories in struct iwl_qosparam_cmd
+ * struct il_ac_qos -- QOS timing params for C_QOS_PARAM
+ * One for each of 4 EDCA access categories in struct il_qosparam_cmd
  *
- * @cw_min: Contention window, start value in numbers of slots.
+ * @cw_min: Contention win, start value in numbers of slots.
  *          Should be a power-of-2, minus 1.  Device's default is 0x0f.
- * @cw_max: Contention window, max value in numbers of slots.
+ * @cw_max: Contention win, max value in numbers of slots.
  *          Should be a power-of-2, minus 1.  Device's default is 0x3f.
  * @aifsn:  Number of slots in Arbitration Interframe Space (before
  *          performing random backoff timing prior to Tx).  Device default 1.
  * @edca_txop:  Length of Tx opportunity, in uSecs.  Device default is 0.
  *
- * Device will automatically increase contention window by (2*CW) + 1 for each
+ * Device will automatically increase contention win by (2*CW) + 1 for each
  * transmission retry.  Device uses cw_max as a bit mask, ANDed with new CW
  * value, to cap the CW value.
  */
-struct iwl_ac_qos {
+struct il_ac_qos {
 	__le16 cw_min;
 	__le16 cw_max;
 	u8 aifsn;
@@ -832,14 +823,14 @@
 #define AC_NUM                4
 
 /*
- * REPLY_QOS_PARAM = 0x13 (command, has simple generic response)
+ * C_QOS_PARAM = 0x13 (command, has simple generic response)
  *
  * This command sets up timings for each of the 4 prioritized EDCA Tx FIFOs
  * 0: Background, 1: Best Effort, 2: Video, 3: Voice.
  */
-struct iwl_qosparam_cmd {
+struct il_qosparam_cmd {
 	__le32 qos_flags;
-	struct iwl_ac_qos ac[AC_NUM];
+	struct il_ac_qos ac[AC_NUM];
 } __packed;
 
 /******************************************************************************
@@ -852,15 +843,15 @@
  */
 
 /* Special, dedicated locations within device's station table */
-#define	IWL_AP_ID		0
-#define	IWL_STA_ID		2
-#define	IWL3945_BROADCAST_ID	24
-#define IWL3945_STATION_COUNT	25
-#define IWL4965_BROADCAST_ID	31
-#define	IWL4965_STATION_COUNT	32
+#define	IL_AP_ID		0
+#define	IL_STA_ID		2
+#define	IL3945_BROADCAST_ID	24
+#define IL3945_STATION_COUNT	25
+#define IL4965_BROADCAST_ID	31
+#define	IL4965_STATION_COUNT	32
 
-#define	IWL_STATION_COUNT	32 	/* MAX(3945,4965)*/
-#define	IWL_INVALID_STATION	255
+#define	IL_STATION_COUNT	32	/* MAX(3945,4965) */
+#define	IL_INVALID_STATION	255
 
 #define STA_FLG_TX_RATE_MSK		cpu_to_le32(1 << 2)
 #define STA_FLG_PWR_SAVE_MSK		cpu_to_le32(1 << 8)
@@ -901,11 +892,11 @@
 #define STA_MODIFY_DELBA_TID_MSK	0x10
 #define STA_MODIFY_SLEEP_TX_COUNT_MSK	0x20
 
-/* Receiver address (actually, Rx station's index into station table),
+/* Receiver address (actually, Rx station's idx into station table),
  * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */
 #define BUILD_RAxTID(sta_id, tid)	(((sta_id) << 4) + (tid))
 
-struct iwl4965_keyinfo {
+struct il4965_keyinfo {
 	__le16 key_flags;
 	u8 tkip_rx_tsc_byte2;	/* TSC[2] for key mix ph1 detection */
 	u8 reserved1;
@@ -918,12 +909,12 @@
 /**
  * struct sta_id_modify
  * @addr[ETH_ALEN]: station's MAC address
- * @sta_id: index of station in uCode's station table
+ * @sta_id: idx of station in uCode's station table
  * @modify_mask: STA_MODIFY_*, 1: modify, 0: don't change
  *
- * Driver selects unused table index when adding new station,
- * or the index to a pre-existing station entry when modifying that station.
- * Some indexes have special purposes (IWL_AP_ID, index 0, is for AP).
+ * Driver selects unused table idx when adding new station,
+ * or the idx to a pre-existing station entry when modifying that station.
+ * Some idxes have special purposes (IL_AP_ID, idx 0, is for AP).
  *
  * modify_mask flags select which parameters to modify vs. leave alone.
  */
@@ -936,15 +927,15 @@
 } __packed;
 
 /*
- * REPLY_ADD_STA = 0x18 (command)
+ * C_ADD_STA = 0x18 (command)
  *
  * The device contains an internal table of per-station information,
  * with info on security keys, aggregation parameters, and Tx rates for
  * initial Tx attempt and any retries (4965 devices uses
- * REPLY_TX_LINK_QUALITY_CMD,
- * 3945 uses REPLY_RATE_SCALE to set up rate tables).
+ * C_TX_LINK_QUALITY_CMD,
+ * 3945 uses C_RATE_SCALE to set up rate tables).
  *
- * REPLY_ADD_STA sets up the table entry for one station, either creating
+ * C_ADD_STA sets up the table entry for one station, either creating
  * a new entry, or modifying a pre-existing one.
  *
  * NOTE:  RXON command (without "associated" bit set) wipes the station table
@@ -954,20 +945,20 @@
  *        their own txpower/rate setup data).
  *
  *        When getting started on a new channel, driver must set up the
- *        IWL_BROADCAST_ID entry (last entry in the table).  For a client
+ *        IL_BROADCAST_ID entry (last entry in the table).  For a client
  *        station in a BSS, once an AP is selected, driver sets up the AP STA
- *        in the IWL_AP_ID entry (1st entry in the table).  BROADCAST and AP
+ *        in the IL_AP_ID entry (1st entry in the table).  BROADCAST and AP
  *        are all that are needed for a BSS client station.  If the device is
  *        used as AP, or in an IBSS network, driver must set up station table
- *        entries for all STAs in network, starting with index IWL_STA_ID.
+ *        entries for all STAs in network, starting with idx IL_STA_ID.
  */
 
-struct iwl3945_addsta_cmd {
+struct il3945_addsta_cmd {
 	u8 mode;		/* 1: modify existing, 0: add new station */
 	u8 reserved[3];
 	struct sta_id_modify sta;
-	struct iwl4965_keyinfo key;
-	__le32 station_flags;		/* STA_FLG_* */
+	struct il4965_keyinfo key;
+	__le32 station_flags;	/* STA_FLG_* */
 	__le32 station_flags_msk;	/* STA_FLG_* */
 
 	/* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
@@ -990,12 +981,12 @@
 	__le16 add_immediate_ba_ssn;
 } __packed;
 
-struct iwl4965_addsta_cmd {
+struct il4965_addsta_cmd {
 	u8 mode;		/* 1: modify existing, 0: add new station */
 	u8 reserved[3];
 	struct sta_id_modify sta;
-	struct iwl4965_keyinfo key;
-	__le32 station_flags;		/* STA_FLG_* */
+	struct il4965_keyinfo key;
+	__le32 station_flags;	/* STA_FLG_* */
 	__le32 station_flags_msk;	/* STA_FLG_* */
 
 	/* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
@@ -1003,7 +994,7 @@
 	 * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
 	__le16 tid_disable_tx;
 
-	__le16	reserved1;
+	__le16 reserved1;
 
 	/* TID for which to add block-ack support.
 	 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
@@ -1028,12 +1019,12 @@
 } __packed;
 
 /* Wrapper struct for 3945 and 4965 addsta_cmd structures */
-struct iwl_legacy_addsta_cmd {
+struct il_addsta_cmd {
 	u8 mode;		/* 1: modify existing, 0: add new station */
 	u8 reserved[3];
 	struct sta_id_modify sta;
-	struct iwl4965_keyinfo key;
-	__le32 station_flags;		/* STA_FLG_* */
+	struct il4965_keyinfo key;
+	__le32 station_flags;	/* STA_FLG_* */
 	__le32 station_flags_msk;	/* STA_FLG_* */
 
 	/* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
@@ -1041,7 +1032,7 @@
 	 * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
 	__le16 tid_disable_tx;
 
-	__le16	rate_n_flags;		/* 3945 only */
+	__le16 rate_n_flags;	/* 3945 only */
 
 	/* TID for which to add block-ack support.
 	 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
@@ -1065,51 +1056,50 @@
 	__le16 reserved2;
 } __packed;
 
-
 #define ADD_STA_SUCCESS_MSK		0x1
-#define ADD_STA_NO_ROOM_IN_TABLE	0x2
+#define ADD_STA_NO_ROOM_IN_TBL	0x2
 #define ADD_STA_NO_BLOCK_ACK_RESOURCE	0x4
 #define ADD_STA_MODIFY_NON_EXIST_STA	0x8
 /*
- * REPLY_ADD_STA = 0x18 (response)
+ * C_ADD_STA = 0x18 (response)
  */
-struct iwl_add_sta_resp {
-	u8 status;	/* ADD_STA_* */
+struct il_add_sta_resp {
+	u8 status;		/* ADD_STA_* */
 } __packed;
 
 #define REM_STA_SUCCESS_MSK              0x1
 /*
- *  REPLY_REM_STA = 0x19 (response)
+ *  C_REM_STA = 0x19 (response)
  */
-struct iwl_rem_sta_resp {
+struct il_rem_sta_resp {
 	u8 status;
 } __packed;
 
 /*
- *  REPLY_REM_STA = 0x19 (command)
+ *  C_REM_STA = 0x19 (command)
  */
-struct iwl_rem_sta_cmd {
-	u8 num_sta;     /* number of removed stations */
+struct il_rem_sta_cmd {
+	u8 num_sta;		/* number of removed stations */
 	u8 reserved[3];
-	u8 addr[ETH_ALEN]; /* MAC addr of the first station */
+	u8 addr[ETH_ALEN];	/* MAC addr of the first station */
 	u8 reserved2[2];
 } __packed;
 
-#define IWL_TX_FIFO_BK_MSK		cpu_to_le32(BIT(0))
-#define IWL_TX_FIFO_BE_MSK		cpu_to_le32(BIT(1))
-#define IWL_TX_FIFO_VI_MSK		cpu_to_le32(BIT(2))
-#define IWL_TX_FIFO_VO_MSK		cpu_to_le32(BIT(3))
-#define IWL_AGG_TX_QUEUE_MSK		cpu_to_le32(0xffc00)
+#define IL_TX_FIFO_BK_MSK		cpu_to_le32(BIT(0))
+#define IL_TX_FIFO_BE_MSK		cpu_to_le32(BIT(1))
+#define IL_TX_FIFO_VI_MSK		cpu_to_le32(BIT(2))
+#define IL_TX_FIFO_VO_MSK		cpu_to_le32(BIT(3))
+#define IL_AGG_TX_QUEUE_MSK		cpu_to_le32(0xffc00)
 
-#define IWL_DROP_SINGLE		0
-#define IWL_DROP_SELECTED	1
-#define IWL_DROP_ALL		2
+#define IL_DROP_SINGLE		0
+#define IL_DROP_SELECTED	1
+#define IL_DROP_ALL		2
 
 /*
  * REPLY_WEP_KEY = 0x20
  */
-struct iwl_wep_key {
-	u8 key_index;
+struct il_wep_key {
+	u8 key_idx;
 	u8 key_offset;
 	u8 reserved1[2];
 	u8 key_size;
@@ -1117,12 +1107,12 @@
 	u8 key[16];
 } __packed;
 
-struct iwl_wep_cmd {
+struct il_wep_cmd {
 	u8 num_keys;
 	u8 global_key_type;
 	u8 flags;
 	u8 reserved;
-	struct iwl_wep_key key[0];
+	struct il_wep_key key[0];
 } __packed;
 
 #define WEP_KEY_WEP_TYPE 1
@@ -1168,8 +1158,7 @@
 #define RX_MPDU_RES_STATUS_TTAK_OK	(1 << 7)
 #define RX_MPDU_RES_STATUS_DEC_DONE_MSK	(0x800)
 
-
-struct iwl3945_rx_frame_stats {
+struct il3945_rx_frame_stats {
 	u8 phy_count;
 	u8 id;
 	u8 rssi;
@@ -1179,7 +1168,7 @@
 	u8 payload[0];
 } __packed;
 
-struct iwl3945_rx_frame_hdr {
+struct il3945_rx_frame_hdr {
 	__le16 channel;
 	__le16 phy_flags;
 	u8 reserved1;
@@ -1188,73 +1177,71 @@
 	u8 payload[0];
 } __packed;
 
-struct iwl3945_rx_frame_end {
+struct il3945_rx_frame_end {
 	__le32 status;
 	__le64 timestamp;
 	__le32 beacon_timestamp;
 } __packed;
 
 /*
- * REPLY_3945_RX = 0x1b (response only, not a command)
+ * N_3945_RX = 0x1b (response only, not a command)
  *
  * NOTE:  DO NOT dereference from casts to this structure
  * It is provided only for calculating minimum data set size.
  * The actual offsets of the hdr and end are dynamic based on
  * stats.phy_count
  */
-struct iwl3945_rx_frame {
-	struct iwl3945_rx_frame_stats stats;
-	struct iwl3945_rx_frame_hdr hdr;
-	struct iwl3945_rx_frame_end end;
+struct il3945_rx_frame {
+	struct il3945_rx_frame_stats stats;
+	struct il3945_rx_frame_hdr hdr;
+	struct il3945_rx_frame_end end;
 } __packed;
 
-#define IWL39_RX_FRAME_SIZE	(4 + sizeof(struct iwl3945_rx_frame))
+#define IL39_RX_FRAME_SIZE	(4 + sizeof(struct il3945_rx_frame))
 
 /* Fixed (non-configurable) rx data from phy */
 
-#define IWL49_RX_RES_PHY_CNT 14
-#define IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET	(4)
-#define IWL49_RX_PHY_FLAGS_ANTENNAE_MASK	(0x70)
-#define IWL49_AGC_DB_MASK			(0x3f80)	/* MASK(7,13) */
-#define IWL49_AGC_DB_POS			(7)
-struct iwl4965_rx_non_cfg_phy {
+#define IL49_RX_RES_PHY_CNT 14
+#define IL49_RX_PHY_FLAGS_ANTENNAE_OFFSET	(4)
+#define IL49_RX_PHY_FLAGS_ANTENNAE_MASK	(0x70)
+#define IL49_AGC_DB_MASK			(0x3f80)	/* MASK(7,13) */
+#define IL49_AGC_DB_POS			(7)
+struct il4965_rx_non_cfg_phy {
 	__le16 ant_selection;	/* ant A bit 4, ant B bit 5, ant C bit 6 */
 	__le16 agc_info;	/* agc code 0:6, agc dB 7:13, reserved 14:15 */
 	u8 rssi_info[6];	/* we use even entries, 0/2/4 for A/B/C rssi */
 	u8 pad[0];
 } __packed;
 
-
 /*
- * REPLY_RX = 0xc3 (response only, not a command)
+ * N_RX = 0xc3 (response only, not a command)
  * Used only for legacy (non 11n) frames.
  */
-struct iwl_rx_phy_res {
-	u8 non_cfg_phy_cnt;     /* non configurable DSP phy data byte count */
+struct il_rx_phy_res {
+	u8 non_cfg_phy_cnt;	/* non configurable DSP phy data byte count */
 	u8 cfg_phy_cnt;		/* configurable DSP phy data byte count */
 	u8 stat_id;		/* configurable DSP phy data set ID */
 	u8 reserved1;
 	__le64 timestamp;	/* TSF at on air rise */
-	__le32 beacon_time_stamp; /* beacon at on-air rise */
+	__le32 beacon_time_stamp;	/* beacon at on-air rise */
 	__le16 phy_flags;	/* general phy flags: band, modulation, ... */
 	__le16 channel;		/* channel number */
-	u8 non_cfg_phy_buf[32]; /* for various implementations of non_cfg_phy */
+	u8 non_cfg_phy_buf[32];	/* for various implementations of non_cfg_phy */
 	__le32 rate_n_flags;	/* RATE_MCS_* */
 	__le16 byte_count;	/* frame's byte-count */
 	__le16 frame_time;	/* frame's time on the air */
 } __packed;
 
-struct iwl_rx_mpdu_res_start {
+struct il_rx_mpdu_res_start {
 	__le16 byte_count;
 	__le16 reserved;
 } __packed;
 
-
 /******************************************************************************
  * (5)
  * Tx Commands & Responses:
  *
- * Driver must place each REPLY_TX command into one of the prioritized Tx
+ * Driver must place each C_TX command into one of the prioritized Tx
  * queues in host DRAM, shared between driver and device (see comments for
  * SCD registers and Tx/Rx Queues).  When the device's Tx scheduler and uCode
  * are preparing to transmit, the device pulls the Tx command over the PCI
@@ -1264,18 +1251,18 @@
  * uCode handles all timing and protocol related to control frames
  * (RTS/CTS/ACK), based on flags in the Tx command.  uCode and Tx scheduler
  * handle reception of block-acks; uCode updates the host driver via
- * REPLY_COMPRESSED_BA.
+ * N_COMPRESSED_BA.
  *
  * uCode handles retrying Tx when an ACK is expected but not received.
  * This includes trying lower data rates than the one requested in the Tx
- * command, as set up by the REPLY_RATE_SCALE (for 3945) or
- * REPLY_TX_LINK_QUALITY_CMD (4965).
+ * command, as set up by the C_RATE_SCALE (for 3945) or
+ * C_TX_LINK_QUALITY_CMD (4965).
  *
- * Driver sets up transmit power for various rates via REPLY_TX_PWR_TABLE_CMD.
+ * Driver sets up transmit power for various rates via C_TX_PWR_TBL.
  * This command must be executed after every RXON command, before Tx can occur.
  *****************************************************************************/
 
-/* REPLY_TX Tx flags field */
+/* C_TX Tx flags field */
 
 /*
  * 1: Use Request-To-Send protocol before this frame.
@@ -1296,8 +1283,8 @@
 #define TX_CMD_FLG_ACK_MSK cpu_to_le32(1 << 3)
 
 /* For 4965 devices:
- * 1: Use rate scale table (see REPLY_TX_LINK_QUALITY_CMD).
- *    Tx command's initial_rate_index indicates first rate to try;
+ * 1: Use rate scale table (see C_TX_LINK_QUALITY_CMD).
+ *    Tx command's initial_rate_idx indicates first rate to try;
  *    uCode walks through table for additional Tx attempts.
  * 0: Use Tx rate/MCS from Tx command's rate_n_flags field.
  *    This rate will be used for all Tx attempts; it will not be scaled. */
@@ -1322,7 +1309,7 @@
 /* 1: uCode overrides sequence control field in MAC header.
  * 0: Driver provides sequence control field in MAC header.
  * Set this for management frames, non-QOS data frames, non-unicast frames,
- * and also in Tx command embedded in REPLY_SCAN_CMD for active scans. */
+ * and also in Tx command embedded in C_SCAN for active scans. */
 #define TX_CMD_FLG_SEQ_CTL_MSK cpu_to_le32(1 << 13)
 
 /* 1: This frame is non-last MPDU; more fragments are coming.
@@ -1349,7 +1336,6 @@
 /* HCCA-AP - disable duration overwriting. */
 #define TX_CMD_FLG_DUR_MSK cpu_to_le32(1 << 25)
 
-
 /*
  * TX command security control
  */
@@ -1369,10 +1355,10 @@
 #define TKIP_ICV_LEN 4
 
 /*
- * REPLY_TX = 0x1c (command)
+ * C_TX = 0x1c (command)
  */
 
-struct iwl3945_tx_cmd {
+struct il3945_tx_cmd {
 	/*
 	 * MPDU byte count:
 	 * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size,
@@ -1434,9 +1420,9 @@
 } __packed;
 
 /*
- * REPLY_TX = 0x1c (response)
+ * C_TX = 0x1c (response)
  */
-struct iwl3945_tx_resp {
+struct il3945_tx_resp {
 	u8 failure_rts;
 	u8 failure_frame;
 	u8 bt_kill_count;
@@ -1445,19 +1431,18 @@
 	__le32 status;		/* TX status */
 } __packed;
 
-
 /*
  * 4965 uCode updates these Tx attempt count values in host DRAM.
  * Used for managing Tx retries when expecting block-acks.
  * Driver should set these fields to 0.
  */
-struct iwl_dram_scratch {
+struct il_dram_scratch {
 	u8 try_cnt;		/* Tx attempts */
 	u8 bt_kill_cnt;		/* Tx attempts blocked by Bluetooth device */
 	__le16 reserved;
 } __packed;
 
-struct iwl_tx_cmd {
+struct il_tx_cmd {
 	/*
 	 * MPDU byte count:
 	 * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size,
@@ -1481,7 +1466,7 @@
 
 	/* uCode may modify this field of the Tx command (in host DRAM!).
 	 * Driver must also set dram_lsb_ptr and dram_msb_ptr in this cmd. */
-	struct iwl_dram_scratch scratch;
+	struct il_dram_scratch scratch;
 
 	/* Rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is cleared. */
 	__le32 rate_n_flags;	/* RATE_MCS_* */
@@ -1493,13 +1478,13 @@
 	u8 sec_ctl;		/* TX_CMD_SEC_* */
 
 	/*
-	 * Index into rate table (see REPLY_TX_LINK_QUALITY_CMD) for initial
+	 * Index into rate table (see C_TX_LINK_QUALITY_CMD) for initial
 	 * Tx attempt, if TX_CMD_FLG_STA_RATE_MSK is set.  Normally "0" for
 	 * data frames, this field may be used to selectively reduce initial
 	 * rate (via non-0 value) for special frames (e.g. management), while
 	 * still supporting rate scaling for all frames.
 	 */
-	u8 initial_rate_index;
+	u8 initial_rate_idx;
 	u8 reserved;
 	u8 key[16];
 	__le16 next_frame_flags;
@@ -1628,12 +1613,12 @@
 };
 
 enum {
-	TX_STATUS_MSK = 0x000000ff,		/* bits 0:7 */
+	TX_STATUS_MSK = 0x000000ff,	/* bits 0:7 */
 	TX_STATUS_DELAY_MSK = 0x00000040,
 	TX_STATUS_ABORT_MSK = 0x00000080,
 	TX_PACKET_MODE_MSK = 0x0000ff00,	/* bits 8:15 */
 	TX_FIFO_NUMBER_MSK = 0x00070000,	/* bits 16:18 */
-	TX_RESERVED = 0x00780000,		/* bits 19:22 */
+	TX_RESERVED = 0x00780000,	/* bits 19:22 */
 	TX_POWER_PA_DETECT_MSK = 0x7f800000,	/* bits 23:30 */
 	TX_ABORT_REQUIRED_MSK = 0x80000000,	/* bits 31:31 */
 };
@@ -1671,7 +1656,7 @@
 #define AGG_TX_STATE_SEQ_NUM_MSK 0xffff0000
 
 /*
- * REPLY_TX = 0x1c (response)
+ * C_TX = 0x1c (response)
  *
  * This response may be in one of two slightly different formats, indicated
  * by the frame_count field:
@@ -1697,7 +1682,7 @@
 	__le16 sequence;
 } __packed;
 
-struct iwl4965_tx_resp {
+struct il4965_tx_resp {
 	u8 frame_count;		/* 1 no aggregation, >1 aggregation */
 	u8 bt_kill_count;	/* # blocked by bluetooth (unused for agg) */
 	u8 failure_rts;		/* # failures due to unsuccessful RTS */
@@ -1730,16 +1715,16 @@
 	 */
 	union {
 		__le32 status;
-		struct agg_tx_status agg_status[0]; /* for each agg frame */
+		struct agg_tx_status agg_status[0];	/* for each agg frame */
 	} u;
 } __packed;
 
 /*
- * REPLY_COMPRESSED_BA = 0xc5 (response only, not a command)
+ * N_COMPRESSED_BA = 0xc5 (response only, not a command)
  *
  * Reports Block-Acknowledge from recipient station
  */
-struct iwl_compressed_ba_resp {
+struct il_compressed_ba_resp {
 	__le32 sta_addr_lo32;
 	__le16 sta_addr_hi16;
 	__le16 reserved;
@@ -1754,30 +1739,29 @@
 } __packed;
 
 /*
- * REPLY_TX_PWR_TABLE_CMD = 0x97 (command, has simple generic response)
+ * C_TX_PWR_TBL = 0x97 (command, has simple generic response)
  *
- * See details under "TXPOWER" in iwl-4965-hw.h.
+ * See details under "TXPOWER" in 4965.h.
  */
 
-struct iwl3945_txpowertable_cmd {
+struct il3945_txpowertable_cmd {
 	u8 band;		/* 0: 5 GHz, 1: 2.4 GHz */
 	u8 reserved;
 	__le16 channel;
-	struct iwl3945_power_per_rate power[IWL_MAX_RATES];
+	struct il3945_power_per_rate power[IL_MAX_RATES];
 } __packed;
 
-struct iwl4965_txpowertable_cmd {
+struct il4965_txpowertable_cmd {
 	u8 band;		/* 0: 5 GHz, 1: 2.4 GHz */
 	u8 reserved;
 	__le16 channel;
-	struct iwl4965_tx_power_db tx_power;
+	struct il4965_tx_power_db tx_power;
 } __packed;
 
-
 /**
- * struct iwl3945_rate_scaling_cmd - Rate Scaling Command & Response
+ * struct il3945_rate_scaling_cmd - Rate Scaling Command & Response
  *
- * REPLY_RATE_SCALE = 0x47 (command, has simple generic response)
+ * C_RATE_SCALE = 0x47 (command, has simple generic response)
  *
  * NOTE: The table of rates passed to the uCode via the
  * RATE_SCALE command sets up the corresponding order of
@@ -1786,22 +1770,21 @@
  *
  * For example, if you set 9MB (PLCP 0x0f) as the first
  * rate in the rate table, the bit mask for that rate
- * when passed through ofdm_basic_rates on the REPLY_RXON
+ * when passed through ofdm_basic_rates on the C_RXON
  * command would be bit 0 (1 << 0)
  */
-struct iwl3945_rate_scaling_info {
+struct il3945_rate_scaling_info {
 	__le16 rate_n_flags;
 	u8 try_cnt;
-	u8 next_rate_index;
+	u8 next_rate_idx;
 } __packed;
 
-struct iwl3945_rate_scaling_cmd {
+struct il3945_rate_scaling_cmd {
 	u8 table_id;
 	u8 reserved[3];
-	struct iwl3945_rate_scaling_info table[IWL_MAX_RATES];
+	struct il3945_rate_scaling_info table[IL_MAX_RATES];
 } __packed;
 
-
 /*RS_NEW_API: only TLC_RTS remains and moved to bit 0 */
 #define  LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK	(1 << 0)
 
@@ -1816,28 +1799,27 @@
 #define  LINK_QUAL_ANT_B_MSK (1 << 1)
 #define  LINK_QUAL_ANT_MSK   (LINK_QUAL_ANT_A_MSK|LINK_QUAL_ANT_B_MSK)
 
-
 /**
- * struct iwl_link_qual_general_params
+ * struct il_link_qual_general_params
  *
- * Used in REPLY_TX_LINK_QUALITY_CMD
+ * Used in C_TX_LINK_QUALITY_CMD
  */
-struct iwl_link_qual_general_params {
+struct il_link_qual_general_params {
 	u8 flags;
 
-	/* No entries at or above this (driver chosen) index contain MIMO */
+	/* No entries at or above this (driver chosen) idx contain MIMO */
 	u8 mimo_delimiter;
 
 	/* Best single antenna to use for single stream (legacy, SISO). */
 	u8 single_stream_ant_msk;	/* LINK_QUAL_ANT_* */
 
 	/* Best antennas to use for MIMO (unused for 4965, assumes both). */
-	u8 dual_stream_ant_msk;		/* LINK_QUAL_ANT_* */
+	u8 dual_stream_ant_msk;	/* LINK_QUAL_ANT_* */
 
 	/*
 	 * If driver needs to use different initial rates for different
 	 * EDCA QOS access categories (as implemented by tx fifos 0-3),
-	 * this table will set that up, by indicating the indexes in the
+	 * this table will set that up, by indicating the idxes in the
 	 * rs_table[LINK_QUAL_MAX_RETRY_NUM] rate table at which to start.
 	 * Otherwise, driver should set all entries to 0.
 	 *
@@ -1845,10 +1827,10 @@
 	 * 0 = Background, 1 = Best Effort (normal), 2 = Video, 3 = Voice
 	 * TX FIFOs above 3 use same value (typically 0) as TX FIFO 3.
 	 */
-	u8 start_rate_index[LINK_QUAL_AC_NUM];
+	u8 start_rate_idx[LINK_QUAL_AC_NUM];
 } __packed;
 
-#define LINK_QUAL_AGG_TIME_LIMIT_DEF	(4000) /* 4 milliseconds */
+#define LINK_QUAL_AGG_TIME_LIMIT_DEF	(4000)	/* 4 milliseconds */
 #define LINK_QUAL_AGG_TIME_LIMIT_MAX	(8000)
 #define LINK_QUAL_AGG_TIME_LIMIT_MIN	(100)
 
@@ -1861,11 +1843,11 @@
 #define LINK_QUAL_AGG_FRAME_LIMIT_MIN	(0)
 
 /**
- * struct iwl_link_qual_agg_params
+ * struct il_link_qual_agg_params
  *
- * Used in REPLY_TX_LINK_QUALITY_CMD
+ * Used in C_TX_LINK_QUALITY_CMD
  */
-struct iwl_link_qual_agg_params {
+struct il_link_qual_agg_params {
 
 	/*
 	 *Maximum number of uSec in aggregation.
@@ -1892,9 +1874,9 @@
 } __packed;
 
 /*
- * REPLY_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response)
+ * C_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response)
  *
- * For 4965 devices only; 3945 uses REPLY_RATE_SCALE.
+ * For 4965 devices only; 3945 uses C_RATE_SCALE.
  *
  * Each station in the 4965 device's internal station table has its own table
  * of 16
@@ -1903,13 +1885,13 @@
  * one station.
  *
  * NOTE:  Station must already be in 4965 device's station table.
- *	  Use REPLY_ADD_STA.
+ *	  Use C_ADD_STA.
  *
  * The rate scaling procedures described below work well.  Of course, other
  * procedures are possible, and may work better for particular environments.
  *
  *
- * FILLING THE RATE TABLE
+ * FILLING THE RATE TBL
  *
  * Given a particular initial rate and mode, as determined by the rate
  * scaling algorithm described below, the Linux driver uses the following
@@ -1948,13 +1930,13 @@
  * speculative mode as the new current active mode.
  *
  * Each history set contains, separately for each possible rate, data for a
- * sliding window of the 62 most recent tx attempts at that rate.  The data
+ * sliding win of the 62 most recent tx attempts at that rate.  The data
  * includes a shifting bitmap of success(1)/failure(0), and sums of successful
  * and attempted frames, from which the driver can additionally calculate a
  * success ratio (success / attempted) and number of failures
- * (attempted - success), and control the size of the window (attempted).
+ * (attempted - success), and control the size of the win (attempted).
  * The driver uses the bit map to remove successes from the success sum, as
- * the oldest tx attempts fall out of the window.
+ * the oldest tx attempts fall out of the win.
  *
  * When the 4965 device makes multiple tx attempts for a given frame, each
  * attempt might be at a different rate, and have different modulation
@@ -1966,7 +1948,7 @@
  *
  * When using block-ack (aggregation), all frames are transmitted at the same
  * rate, since there is no per-attempt acknowledgment from the destination
- * station.  The Tx response struct iwl_tx_resp indicates the Tx rate in
+ * station.  The Tx response struct il_tx_resp indicates the Tx rate in
  * rate_n_flags field.  After receiving a block-ack, the driver can update
  * history for the entire block all at once.
  *
@@ -2016,8 +1998,8 @@
  *         good performance; higher rate is sure to have poorer success.
  *
  * 6)  Re-evaluate the rate after each tx frame.  If working with block-
- *     acknowledge, history and statistics may be calculated for the entire
- *     block (including prior history that fits within the history windows),
+ *     acknowledge, history and stats may be calculated for the entire
+ *     block (including prior history that fits within the history wins),
  *     before re-evaluation.
  *
  * FINDING BEST STARTING MODULATION MODE:
@@ -2079,22 +2061,22 @@
  * legacy), and then repeat the search process.
  *
  */
-struct iwl_link_quality_cmd {
+struct il_link_quality_cmd {
 
 	/* Index of destination/recipient station in uCode's station table */
 	u8 sta_id;
 	u8 reserved1;
 	__le16 control;		/* not used */
-	struct iwl_link_qual_general_params general_params;
-	struct iwl_link_qual_agg_params agg_params;
+	struct il_link_qual_general_params general_params;
+	struct il_link_qual_agg_params agg_params;
 
 	/*
-	 * Rate info; when using rate-scaling, Tx command's initial_rate_index
-	 * specifies 1st Tx rate attempted, via index into this table.
+	 * Rate info; when using rate-scaling, Tx command's initial_rate_idx
+	 * specifies 1st Tx rate attempted, via idx into this table.
 	 * 4965 devices works its way through table when retrying Tx.
 	 */
 	struct {
-		__le32 rate_n_flags;	/* RATE_MCS_*, IWL_RATE_* */
+		__le32 rate_n_flags;	/* RATE_MCS_*, RATE_* */
 	} rs_table[LINK_QUAL_MAX_RETRY_NUM];
 	__le32 reserved2;
 } __packed;
@@ -2117,13 +2099,13 @@
 #define BT_MAX_KILL_DEF (0x5)
 
 /*
- * REPLY_BT_CONFIG = 0x9b (command, has simple generic response)
+ * C_BT_CONFIG = 0x9b (command, has simple generic response)
  *
  * 3945 and 4965 devices support hardware handshake with Bluetooth device on
  * same platform.  Bluetooth device alerts wireless device when it will Tx;
  * wireless device can delay or kill its own Tx to accommodate.
  */
-struct iwl_bt_cmd {
+struct il_bt_cmd {
 	u8 flags;
 	u8 lead_time;
 	u8 max_kill;
@@ -2132,7 +2114,6 @@
 	__le32 kill_cts_mask;
 } __packed;
 
-
 /******************************************************************************
  * (6)
  * Spectrum Management (802.11h) Commands, Responses, Notifications:
@@ -2150,18 +2131,18 @@
 				 RXON_FILTER_ASSOC_MSK           | \
 				 RXON_FILTER_BCON_AWARE_MSK)
 
-struct iwl_measure_channel {
+struct il_measure_channel {
 	__le32 duration;	/* measurement duration in extended beacon
 				 * format */
 	u8 channel;		/* channel to measure */
-	u8 type;		/* see enum iwl_measure_type */
+	u8 type;		/* see enum il_measure_type */
 	__le16 reserved;
 } __packed;
 
 /*
- * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (command)
+ * C_SPECTRUM_MEASUREMENT = 0x74 (command)
  */
-struct iwl_spectrum_cmd {
+struct il_spectrum_cmd {
 	__le16 len;		/* number of bytes starting from token */
 	u8 token;		/* token id */
 	u8 id;			/* measurement id -- 0 or 1 */
@@ -2174,13 +2155,13 @@
 	__le32 filter_flags;	/* rxon filter flags */
 	__le16 channel_count;	/* minimum 1, maximum 10 */
 	__le16 reserved3;
-	struct iwl_measure_channel channels[10];
+	struct il_measure_channel channels[10];
 } __packed;
 
 /*
- * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (response)
+ * C_SPECTRUM_MEASUREMENT = 0x74 (response)
  */
-struct iwl_spectrum_resp {
+struct il_spectrum_resp {
 	u8 token;
 	u8 id;			/* id of the prior command replaced, or 0xff */
 	__le16 status;		/* 0 - command will be handled
@@ -2188,57 +2169,57 @@
 				 *     measurement) */
 } __packed;
 
-enum iwl_measurement_state {
-	IWL_MEASUREMENT_START = 0,
-	IWL_MEASUREMENT_STOP = 1,
+enum il_measurement_state {
+	IL_MEASUREMENT_START = 0,
+	IL_MEASUREMENT_STOP = 1,
 };
 
-enum iwl_measurement_status {
-	IWL_MEASUREMENT_OK = 0,
-	IWL_MEASUREMENT_CONCURRENT = 1,
-	IWL_MEASUREMENT_CSA_CONFLICT = 2,
-	IWL_MEASUREMENT_TGH_CONFLICT = 3,
+enum il_measurement_status {
+	IL_MEASUREMENT_OK = 0,
+	IL_MEASUREMENT_CONCURRENT = 1,
+	IL_MEASUREMENT_CSA_CONFLICT = 2,
+	IL_MEASUREMENT_TGH_CONFLICT = 3,
 	/* 4-5 reserved */
-	IWL_MEASUREMENT_STOPPED = 6,
-	IWL_MEASUREMENT_TIMEOUT = 7,
-	IWL_MEASUREMENT_PERIODIC_FAILED = 8,
+	IL_MEASUREMENT_STOPPED = 6,
+	IL_MEASUREMENT_TIMEOUT = 7,
+	IL_MEASUREMENT_PERIODIC_FAILED = 8,
 };
 
 #define NUM_ELEMENTS_IN_HISTOGRAM 8
 
-struct iwl_measurement_histogram {
+struct il_measurement_histogram {
 	__le32 ofdm[NUM_ELEMENTS_IN_HISTOGRAM];	/* in 0.8usec counts */
 	__le32 cck[NUM_ELEMENTS_IN_HISTOGRAM];	/* in 1usec counts */
 } __packed;
 
 /* clear channel availability counters */
-struct iwl_measurement_cca_counters {
+struct il_measurement_cca_counters {
 	__le32 ofdm;
 	__le32 cck;
 } __packed;
 
-enum iwl_measure_type {
-	IWL_MEASURE_BASIC = (1 << 0),
-	IWL_MEASURE_CHANNEL_LOAD = (1 << 1),
-	IWL_MEASURE_HISTOGRAM_RPI = (1 << 2),
-	IWL_MEASURE_HISTOGRAM_NOISE = (1 << 3),
-	IWL_MEASURE_FRAME = (1 << 4),
+enum il_measure_type {
+	IL_MEASURE_BASIC = (1 << 0),
+	IL_MEASURE_CHANNEL_LOAD = (1 << 1),
+	IL_MEASURE_HISTOGRAM_RPI = (1 << 2),
+	IL_MEASURE_HISTOGRAM_NOISE = (1 << 3),
+	IL_MEASURE_FRAME = (1 << 4),
 	/* bits 5:6 are reserved */
-	IWL_MEASURE_IDLE = (1 << 7),
+	IL_MEASURE_IDLE = (1 << 7),
 };
 
 /*
- * SPECTRUM_MEASURE_NOTIFICATION = 0x75 (notification only, not a command)
+ * N_SPECTRUM_MEASUREMENT = 0x75 (notification only, not a command)
  */
-struct iwl_spectrum_notification {
+struct il_spectrum_notification {
 	u8 id;			/* measurement id -- 0 or 1 */
 	u8 token;
-	u8 channel_index;	/* index in measurement channel list */
+	u8 channel_idx;		/* idx in measurement channel list */
 	u8 state;		/* 0 - start, 1 - stop */
 	__le32 start_time;	/* lower 32-bits of TSF */
 	u8 band;		/* 0 - 5.2GHz, 1 - 2.4GHz */
 	u8 channel;
-	u8 type;		/* see enum iwl_measurement_type */
+	u8 type;		/* see enum il_measurement_type */
 	u8 reserved1;
 	/* NOTE:  cca_ofdm, cca_cck, basic_type, and histogram are only only
 	 * valid if applicable for measurement type requested. */
@@ -2248,9 +2229,9 @@
 	u8 basic_type;		/* 0 - bss, 1 - ofdm preamble, 2 -
 				 * unidentified */
 	u8 reserved2[3];
-	struct iwl_measurement_histogram histogram;
+	struct il_measurement_histogram histogram;
 	__le32 stop_time;	/* lower 32-bits of TSF */
-	__le32 status;		/* see iwl_measurement_status */
+	__le32 status;		/* see il_measurement_status */
 } __packed;
 
 /******************************************************************************
@@ -2260,10 +2241,10 @@
  *****************************************************************************/
 
 /**
- * struct iwl_powertable_cmd - Power Table Command
+ * struct il_powertable_cmd - Power Table Command
  * @flags: See below:
  *
- * POWER_TABLE_CMD = 0x77 (command, has simple generic response)
+ * C_POWER_TBL = 0x77 (command, has simple generic response)
  *
  * PM allow:
  *   bit 0 - '0' Driver not allow power management
@@ -2290,38 +2271,38 @@
  *              '10' force xtal sleep
  *              '11' Illegal set
  *
- * NOTE: if sleep_interval[SLEEP_INTRVL_TABLE_SIZE-1] > DTIM period then
+ * NOTE: if sleep_interval[SLEEP_INTRVL_TBL_SIZE-1] > DTIM period then
  * ucode assume sleep over DTIM is allowed and we don't need to wake up
  * for every DTIM.
  */
-#define IWL_POWER_VEC_SIZE 5
+#define IL_POWER_VEC_SIZE 5
 
-#define IWL_POWER_DRIVER_ALLOW_SLEEP_MSK	cpu_to_le16(BIT(0))
-#define IWL_POWER_PCI_PM_MSK			cpu_to_le16(BIT(3))
+#define IL_POWER_DRIVER_ALLOW_SLEEP_MSK	cpu_to_le16(BIT(0))
+#define IL_POWER_PCI_PM_MSK			cpu_to_le16(BIT(3))
 
-struct iwl3945_powertable_cmd {
+struct il3945_powertable_cmd {
 	__le16 flags;
 	u8 reserved[2];
 	__le32 rx_data_timeout;
 	__le32 tx_data_timeout;
-	__le32 sleep_interval[IWL_POWER_VEC_SIZE];
+	__le32 sleep_interval[IL_POWER_VEC_SIZE];
 } __packed;
 
-struct iwl_powertable_cmd {
+struct il_powertable_cmd {
 	__le16 flags;
-	u8 keep_alive_seconds;		/* 3945 reserved */
-	u8 debug_flags;			/* 3945 reserved */
+	u8 keep_alive_seconds;	/* 3945 reserved */
+	u8 debug_flags;		/* 3945 reserved */
 	__le32 rx_data_timeout;
 	__le32 tx_data_timeout;
-	__le32 sleep_interval[IWL_POWER_VEC_SIZE];
+	__le32 sleep_interval[IL_POWER_VEC_SIZE];
 	__le32 keep_alive_beacons;
 } __packed;
 
 /*
- * PM_SLEEP_NOTIFICATION = 0x7A (notification only, not a command)
+ * N_PM_SLEEP = 0x7A (notification only, not a command)
  * all devices identical.
  */
-struct iwl_sleep_notification {
+struct il_sleep_notification {
 	u8 pm_sleep_mode;
 	u8 pm_wakeup_src;
 	__le16 reserved;
@@ -2332,23 +2313,23 @@
 
 /* Sleep states.  all devices identical. */
 enum {
-	IWL_PM_NO_SLEEP = 0,
-	IWL_PM_SLP_MAC = 1,
-	IWL_PM_SLP_FULL_MAC_UNASSOCIATE = 2,
-	IWL_PM_SLP_FULL_MAC_CARD_STATE = 3,
-	IWL_PM_SLP_PHY = 4,
-	IWL_PM_SLP_REPENT = 5,
-	IWL_PM_WAKEUP_BY_TIMER = 6,
-	IWL_PM_WAKEUP_BY_DRIVER = 7,
-	IWL_PM_WAKEUP_BY_RFKILL = 8,
+	IL_PM_NO_SLEEP = 0,
+	IL_PM_SLP_MAC = 1,
+	IL_PM_SLP_FULL_MAC_UNASSOCIATE = 2,
+	IL_PM_SLP_FULL_MAC_CARD_STATE = 3,
+	IL_PM_SLP_PHY = 4,
+	IL_PM_SLP_REPENT = 5,
+	IL_PM_WAKEUP_BY_TIMER = 6,
+	IL_PM_WAKEUP_BY_DRIVER = 7,
+	IL_PM_WAKEUP_BY_RFKILL = 8,
 	/* 3 reserved */
-	IWL_PM_NUM_OF_MODES = 12,
+	IL_PM_NUM_OF_MODES = 12,
 };
 
 /*
- * CARD_STATE_NOTIFICATION = 0xa1 (notification only, not a command)
+ * N_CARD_STATE = 0xa1 (notification only, not a command)
  */
-struct iwl_card_state_notif {
+struct il_card_state_notif {
 	__le32 flags;
 } __packed;
 
@@ -2357,11 +2338,11 @@
 #define CT_CARD_DISABLED   0x04
 #define RXON_CARD_DISABLED 0x10
 
-struct iwl_ct_kill_config {
-	__le32   reserved;
-	__le32   critical_temperature_M;
-	__le32   critical_temperature_R;
-}  __packed;
+struct il_ct_kill_config {
+	__le32 reserved;
+	__le32 critical_temperature_M;
+	__le32 critical_temperature_R;
+} __packed;
 
 /******************************************************************************
  * (8)
@@ -2373,7 +2354,7 @@
 #define SCAN_CHANNEL_TYPE_ACTIVE  cpu_to_le32(1)
 
 /**
- * struct iwl_scan_channel - entry in REPLY_SCAN_CMD channel table
+ * struct il_scan_channel - entry in C_SCAN channel table
  *
  * One for each channel in the scan list.
  * Each channel can independently select:
@@ -2383,7 +2364,7 @@
  *     quiet_plcp_th, good_CRC_th)
  *
  * To avoid uCode errors, make sure the following are true (see comments
- * under struct iwl_scan_cmd about max_out_time and quiet_time):
+ * under struct il_scan_cmd about max_out_time and quiet_time):
  * 1)  If using passive_dwell (i.e. passive_dwell != 0):
  *     active_dwell <= passive_dwell (< max_out_time if max_out_time != 0)
  * 2)  quiet_time <= active_dwell
@@ -2391,7 +2372,7 @@
  *     passive_dwell < max_out_time
  *     active_dwell < max_out_time
  */
-struct iwl3945_scan_channel {
+struct il3945_scan_channel {
 	/*
 	 * type is defined as:
 	 * 0:0 1 = active, 0 = passive
@@ -2400,16 +2381,16 @@
 	 * 5:7 reserved
 	 */
 	u8 type;
-	u8 channel;	/* band is selected by iwl3945_scan_cmd "flags" field */
-	struct iwl3945_tx_power tpc;
+	u8 channel;		/* band is selected by il3945_scan_cmd "flags" field */
+	struct il3945_tx_power tpc;
 	__le16 active_dwell;	/* in 1024-uSec TU (time units), typ 5-50 */
 	__le16 passive_dwell;	/* in 1024-uSec TU (time units), typ 20-500 */
 } __packed;
 
 /* set number of direct probes u8 type */
-#define IWL39_SCAN_PROBE_MASK(n) ((BIT(n) | (BIT(n) - BIT(1))))
+#define IL39_SCAN_PROBE_MASK(n) ((BIT(n) | (BIT(n) - BIT(1))))
 
-struct iwl_scan_channel {
+struct il_scan_channel {
 	/*
 	 * type is defined as:
 	 * 0:0 1 = active, 0 = passive
@@ -2418,7 +2399,7 @@
 	 * 21:31 reserved
 	 */
 	__le32 type;
-	__le16 channel;	/* band is selected by iwl_scan_cmd "flags" field */
+	__le16 channel;		/* band is selected by il_scan_cmd "flags" field */
 	u8 tx_gain;		/* gain for analog radio */
 	u8 dsp_atten;		/* gain for DSP */
 	__le16 active_dwell;	/* in 1024-uSec TU (time units), typ 5-50 */
@@ -2426,17 +2407,17 @@
 } __packed;
 
 /* set number of direct probes __le32 type */
-#define IWL_SCAN_PROBE_MASK(n)	cpu_to_le32((BIT(n) | (BIT(n) - BIT(1))))
+#define IL_SCAN_PROBE_MASK(n)	cpu_to_le32((BIT(n) | (BIT(n) - BIT(1))))
 
 /**
- * struct iwl_ssid_ie - directed scan network information element
+ * struct il_ssid_ie - directed scan network information element
  *
- * Up to 20 of these may appear in REPLY_SCAN_CMD (Note: Only 4 are in
- * 3945 SCAN api), selected by "type" bit field in struct iwl_scan_channel;
+ * Up to 20 of these may appear in C_SCAN (Note: Only 4 are in
+ * 3945 SCAN api), selected by "type" bit field in struct il_scan_channel;
  * each channel may select different ssids from among the 20 (4) entries.
  * SSID IEs get transmitted in reverse order of entry.
  */
-struct iwl_ssid_ie {
+struct il_ssid_ie {
 	u8 id;
 	u8 len;
 	u8 ssid[32];
@@ -2445,14 +2426,14 @@
 #define PROBE_OPTION_MAX_3945		4
 #define PROBE_OPTION_MAX		20
 #define TX_CMD_LIFE_TIME_INFINITE	cpu_to_le32(0xFFFFFFFF)
-#define IWL_GOOD_CRC_TH_DISABLED	0
-#define IWL_GOOD_CRC_TH_DEFAULT		cpu_to_le16(1)
-#define IWL_GOOD_CRC_TH_NEVER		cpu_to_le16(0xffff)
-#define IWL_MAX_SCAN_SIZE 1024
-#define IWL_MAX_CMD_SIZE 4096
+#define IL_GOOD_CRC_TH_DISABLED	0
+#define IL_GOOD_CRC_TH_DEFAULT		cpu_to_le16(1)
+#define IL_GOOD_CRC_TH_NEVER		cpu_to_le16(0xffff)
+#define IL_MAX_SCAN_SIZE 1024
+#define IL_MAX_CMD_SIZE 4096
 
 /*
- * REPLY_SCAN_CMD = 0x80 (command)
+ * C_SCAN = 0x80 (command)
  *
  * The hardware scan command is very powerful; the driver can set it up to
  * maintain (relatively) normal network traffic while doing a scan in the
@@ -2501,10 +2482,10 @@
  * Driver must use separate scan commands for 2.4 vs. 5 GHz bands.
  *
  * To avoid uCode errors, see timing restrictions described under
- * struct iwl_scan_channel.
+ * struct il_scan_channel.
  */
 
-struct iwl3945_scan_cmd {
+struct il3945_scan_cmd {
 	__le16 len;
 	u8 reserved0;
 	u8 channel_count;	/* # channels in channel list */
@@ -2525,10 +2506,10 @@
 
 	/* For active scans (set to all-0s for passive scans).
 	 * Does not include payload.  Must specify Tx rate; no rate scaling. */
-	struct iwl3945_tx_cmd tx_cmd;
+	struct il3945_tx_cmd tx_cmd;
 
 	/* For directed active scans (set to all-0s otherwise) */
-	struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX_3945];
+	struct il_ssid_ie direct_scan[PROBE_OPTION_MAX_3945];
 
 	/*
 	 * Probe request frame, followed by channel list.
@@ -2538,17 +2519,17 @@
 	 * Number of channels in list is specified by channel_count.
 	 * Each channel in list is of type:
 	 *
-	 * struct iwl3945_scan_channel channels[0];
+	 * struct il3945_scan_channel channels[0];
 	 *
 	 * NOTE:  Only one band of channels can be scanned per pass.  You
 	 * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait
-	 * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION)
+	 * for one scan to complete (i.e. receive N_SCAN_COMPLETE)
 	 * before requesting another scan.
 	 */
 	u8 data[0];
 } __packed;
 
-struct iwl_scan_cmd {
+struct il_scan_cmd {
 	__le16 len;
 	u8 reserved0;
 	u8 channel_count;	/* # channels in channel list */
@@ -2569,10 +2550,10 @@
 
 	/* For active scans (set to all-0s for passive scans).
 	 * Does not include payload.  Must specify Tx rate; no rate scaling. */
-	struct iwl_tx_cmd tx_cmd;
+	struct il_tx_cmd tx_cmd;
 
 	/* For directed active scans (set to all-0s otherwise) */
-	struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
+	struct il_ssid_ie direct_scan[PROBE_OPTION_MAX];
 
 	/*
 	 * Probe request frame, followed by channel list.
@@ -2582,11 +2563,11 @@
 	 * Number of channels in list is specified by channel_count.
 	 * Each channel in list is of type:
 	 *
-	 * struct iwl_scan_channel channels[0];
+	 * struct il_scan_channel channels[0];
 	 *
 	 * NOTE:  Only one band of channels can be scanned per pass.  You
 	 * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait
-	 * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION)
+	 * for one scan to complete (i.e. receive N_SCAN_COMPLETE)
 	 * before requesting another scan.
 	 */
 	u8 data[0];
@@ -2598,16 +2579,16 @@
 #define ABORT_STATUS            0x2
 
 /*
- * REPLY_SCAN_CMD = 0x80 (response)
+ * C_SCAN = 0x80 (response)
  */
-struct iwl_scanreq_notification {
+struct il_scanreq_notification {
 	__le32 status;		/* 1: okay, 2: cannot fulfill request */
 } __packed;
 
 /*
- * SCAN_START_NOTIFICATION = 0x82 (notification only, not a command)
+ * N_SCAN_START = 0x82 (notification only, not a command)
  */
-struct iwl_scanstart_notification {
+struct il_scanstart_notification {
 	__le32 tsf_low;
 	__le32 tsf_high;
 	__le32 beacon_timer;
@@ -2620,30 +2601,30 @@
 #define  SCAN_OWNER_STATUS 0x1
 #define  MEASURE_OWNER_STATUS 0x2
 
-#define IWL_PROBE_STATUS_OK		0
-#define IWL_PROBE_STATUS_TX_FAILED	BIT(0)
+#define IL_PROBE_STATUS_OK		0
+#define IL_PROBE_STATUS_TX_FAILED	BIT(0)
 /* error statuses combined with TX_FAILED */
-#define IWL_PROBE_STATUS_FAIL_TTL	BIT(1)
-#define IWL_PROBE_STATUS_FAIL_BT	BIT(2)
+#define IL_PROBE_STATUS_FAIL_TTL	BIT(1)
+#define IL_PROBE_STATUS_FAIL_BT	BIT(2)
 
-#define NUMBER_OF_STATISTICS 1	/* first __le32 is good CRC */
+#define NUMBER_OF_STATS 1	/* first __le32 is good CRC */
 /*
- * SCAN_RESULTS_NOTIFICATION = 0x83 (notification only, not a command)
+ * N_SCAN_RESULTS = 0x83 (notification only, not a command)
  */
-struct iwl_scanresults_notification {
+struct il_scanresults_notification {
 	u8 channel;
 	u8 band;
 	u8 probe_status;
-	u8 num_probe_not_sent; /* not enough time to send */
+	u8 num_probe_not_sent;	/* not enough time to send */
 	__le32 tsf_low;
 	__le32 tsf_high;
-	__le32 statistics[NUMBER_OF_STATISTICS];
+	__le32 stats[NUMBER_OF_STATS];
 } __packed;
 
 /*
- * SCAN_COMPLETE_NOTIFICATION = 0x84 (notification only, not a command)
+ * N_SCAN_COMPLETE = 0x84 (notification only, not a command)
  */
-struct iwl_scancomplete_notification {
+struct il_scancomplete_notification {
 	u8 scanned_channels;
 	u8 status;
 	u8 last_channel;
@@ -2651,50 +2632,49 @@
 	__le32 tsf_high;
 } __packed;
 
-
 /******************************************************************************
  * (9)
  * IBSS/AP Commands and Notifications:
  *
  *****************************************************************************/
 
-enum iwl_ibss_manager {
-	IWL_NOT_IBSS_MANAGER = 0,
-	IWL_IBSS_MANAGER = 1,
+enum il_ibss_manager {
+	IL_NOT_IBSS_MANAGER = 0,
+	IL_IBSS_MANAGER = 1,
 };
 
 /*
- * BEACON_NOTIFICATION = 0x90 (notification only, not a command)
+ * N_BEACON = 0x90 (notification only, not a command)
  */
 
-struct iwl3945_beacon_notif {
-	struct iwl3945_tx_resp beacon_notify_hdr;
+struct il3945_beacon_notif {
+	struct il3945_tx_resp beacon_notify_hdr;
 	__le32 low_tsf;
 	__le32 high_tsf;
 	__le32 ibss_mgr_status;
 } __packed;
 
-struct iwl4965_beacon_notif {
-	struct iwl4965_tx_resp beacon_notify_hdr;
+struct il4965_beacon_notif {
+	struct il4965_tx_resp beacon_notify_hdr;
 	__le32 low_tsf;
 	__le32 high_tsf;
 	__le32 ibss_mgr_status;
 } __packed;
 
 /*
- * REPLY_TX_BEACON = 0x91 (command, has simple generic response)
+ * C_TX_BEACON= 0x91 (command, has simple generic response)
  */
 
-struct iwl3945_tx_beacon_cmd {
-	struct iwl3945_tx_cmd tx;
+struct il3945_tx_beacon_cmd {
+	struct il3945_tx_cmd tx;
 	__le16 tim_idx;
 	u8 tim_size;
 	u8 reserved1;
 	struct ieee80211_hdr frame[0];	/* beacon frame */
 } __packed;
 
-struct iwl_tx_beacon_cmd {
-	struct iwl_tx_cmd tx;
+struct il_tx_beacon_cmd {
+	struct il_tx_cmd tx;
 	__le16 tim_idx;
 	u8 tim_size;
 	u8 reserved1;
@@ -2707,7 +2687,7 @@
  *
  *****************************************************************************/
 
-#define IWL_TEMP_CONVERT 260
+#define IL_TEMP_CONVERT 260
 
 #define SUP_RATE_11A_MAX_NUM_CHANNELS  8
 #define SUP_RATE_11B_MAX_NUM_CHANNELS  4
@@ -2727,9 +2707,9 @@
 	} failed;
 } __packed;
 
-/* statistics command response */
+/* stats command response */
 
-struct iwl39_statistics_rx_phy {
+struct iwl39_stats_rx_phy {
 	__le32 ina_cnt;
 	__le32 fina_cnt;
 	__le32 plcp_err;
@@ -2747,7 +2727,7 @@
 	__le32 sent_cts_cnt;
 } __packed;
 
-struct iwl39_statistics_rx_non_phy {
+struct iwl39_stats_rx_non_phy {
 	__le32 bogus_cts;	/* CTS received when not expecting CTS */
 	__le32 bogus_ack;	/* ACK received when not expecting ACK */
 	__le32 non_bssid_frames;	/* number of frames with BSSID that
@@ -2758,13 +2738,13 @@
 					 * our serving channel */
 } __packed;
 
-struct iwl39_statistics_rx {
-	struct iwl39_statistics_rx_phy ofdm;
-	struct iwl39_statistics_rx_phy cck;
-	struct iwl39_statistics_rx_non_phy general;
+struct iwl39_stats_rx {
+	struct iwl39_stats_rx_phy ofdm;
+	struct iwl39_stats_rx_phy cck;
+	struct iwl39_stats_rx_non_phy general;
 } __packed;
 
-struct iwl39_statistics_tx {
+struct iwl39_stats_tx {
 	__le32 preamble_cnt;
 	__le32 rx_detected_cnt;
 	__le32 bt_prio_defer_cnt;
@@ -2776,31 +2756,31 @@
 	__le32 actual_ack_cnt;
 } __packed;
 
-struct statistics_dbg {
+struct stats_dbg {
 	__le32 burst_check;
 	__le32 burst_count;
 	__le32 wait_for_silence_timeout_cnt;
 	__le32 reserved[3];
 } __packed;
 
-struct iwl39_statistics_div {
+struct iwl39_stats_div {
 	__le32 tx_on_a;
 	__le32 tx_on_b;
 	__le32 exec_time;
 	__le32 probe_time;
 } __packed;
 
-struct iwl39_statistics_general {
+struct iwl39_stats_general {
 	__le32 temperature;
-	struct statistics_dbg dbg;
+	struct stats_dbg dbg;
 	__le32 sleep_time;
 	__le32 slots_out;
 	__le32 slots_idle;
 	__le32 ttl_timestamp;
-	struct iwl39_statistics_div div;
+	struct iwl39_stats_div div;
 } __packed;
 
-struct statistics_rx_phy {
+struct stats_rx_phy {
 	__le32 ina_cnt;
 	__le32 fina_cnt;
 	__le32 plcp_err;
@@ -2823,7 +2803,7 @@
 	__le32 reserved3;
 } __packed;
 
-struct statistics_rx_ht_phy {
+struct stats_rx_ht_phy {
 	__le32 plcp_err;
 	__le32 overrun_err;
 	__le32 early_overrun_err;
@@ -2838,7 +2818,7 @@
 
 #define INTERFERENCE_DATA_AVAILABLE      cpu_to_le32(1)
 
-struct statistics_rx_non_phy {
+struct stats_rx_non_phy {
 	__le32 bogus_cts;	/* CTS received when not expecting CTS */
 	__le32 bogus_ack;	/* ACK received when not expecting ACK */
 	__le32 non_bssid_frames;	/* number of frames with BSSID that
@@ -2852,15 +2832,15 @@
 	__le32 num_missed_bcon;	/* number of missed beacons */
 	__le32 adc_rx_saturation_time;	/* count in 0.8us units the time the
 					 * ADC was in saturation */
-	__le32 ina_detection_search_time;/* total time (in 0.8us) searched
-					  * for INA */
+	__le32 ina_detection_search_time;	/* total time (in 0.8us) searched
+						 * for INA */
 	__le32 beacon_silence_rssi_a;	/* RSSI silence after beacon frame */
 	__le32 beacon_silence_rssi_b;	/* RSSI silence after beacon frame */
 	__le32 beacon_silence_rssi_c;	/* RSSI silence after beacon frame */
 	__le32 interference_data_flag;	/* flag for interference data
 					 * availability. 1 when data is
 					 * available. */
-	__le32 channel_load;		/* counts RX Enable time in uSec */
+	__le32 channel_load;	/* counts RX Enable time in uSec */
 	__le32 dsp_false_alarms;	/* DSP false alarm (both OFDM
 					 * and CCK) counter */
 	__le32 beacon_rssi_a;
@@ -2871,28 +2851,28 @@
 	__le32 beacon_energy_c;
 } __packed;
 
-struct statistics_rx {
-	struct statistics_rx_phy ofdm;
-	struct statistics_rx_phy cck;
-	struct statistics_rx_non_phy general;
-	struct statistics_rx_ht_phy ofdm_ht;
+struct stats_rx {
+	struct stats_rx_phy ofdm;
+	struct stats_rx_phy cck;
+	struct stats_rx_non_phy general;
+	struct stats_rx_ht_phy ofdm_ht;
 } __packed;
 
 /**
- * struct statistics_tx_power - current tx power
+ * struct stats_tx_power - current tx power
  *
  * @ant_a: current tx power on chain a in 1/2 dB step
  * @ant_b: current tx power on chain b in 1/2 dB step
  * @ant_c: current tx power on chain c in 1/2 dB step
  */
-struct statistics_tx_power {
+struct stats_tx_power {
 	u8 ant_a;
 	u8 ant_b;
 	u8 ant_c;
 	u8 reserved;
 } __packed;
 
-struct statistics_tx_non_phy_agg {
+struct stats_tx_non_phy_agg {
 	__le32 ba_timeout;
 	__le32 ba_reschedule_frames;
 	__le32 scd_query_agg_frame_cnt;
@@ -2905,7 +2885,7 @@
 	__le32 rx_ba_rsp_cnt;
 } __packed;
 
-struct statistics_tx {
+struct stats_tx {
 	__le32 preamble_cnt;
 	__le32 rx_detected_cnt;
 	__le32 bt_prio_defer_cnt;
@@ -2920,13 +2900,12 @@
 	__le32 burst_abort_missing_next_frame_cnt;
 	__le32 cts_timeout_collision;
 	__le32 ack_or_ba_timeout_collision;
-	struct statistics_tx_non_phy_agg agg;
+	struct stats_tx_non_phy_agg agg;
 
 	__le32 reserved1;
 } __packed;
 
-
-struct statistics_div {
+struct stats_div {
 	__le32 tx_on_a;
 	__le32 tx_on_b;
 	__le32 exec_time;
@@ -2935,14 +2914,14 @@
 	__le32 reserved2;
 } __packed;
 
-struct statistics_general_common {
-	__le32 temperature;   /* radio temperature */
-	struct statistics_dbg dbg;
+struct stats_general_common {
+	__le32 temperature;	/* radio temperature */
+	struct stats_dbg dbg;
 	__le32 sleep_time;
 	__le32 slots_out;
 	__le32 slots_idle;
 	__le32 ttl_timestamp;
-	struct statistics_div div;
+	struct stats_div div;
 	__le32 rx_enable_counter;
 	/*
 	 * num_of_sos_states:
@@ -2952,73 +2931,73 @@
 	__le32 num_of_sos_states;
 } __packed;
 
-struct statistics_general {
-	struct statistics_general_common common;
+struct stats_general {
+	struct stats_general_common common;
 	__le32 reserved2;
 	__le32 reserved3;
 } __packed;
 
-#define UCODE_STATISTICS_CLEAR_MSK		(0x1 << 0)
-#define UCODE_STATISTICS_FREQUENCY_MSK		(0x1 << 1)
-#define UCODE_STATISTICS_NARROW_BAND_MSK	(0x1 << 2)
+#define UCODE_STATS_CLEAR_MSK		(0x1 << 0)
+#define UCODE_STATS_FREQUENCY_MSK		(0x1 << 1)
+#define UCODE_STATS_NARROW_BAND_MSK	(0x1 << 2)
 
 /*
- * REPLY_STATISTICS_CMD = 0x9c,
+ * C_STATS = 0x9c,
  * all devices identical.
  *
- * This command triggers an immediate response containing uCode statistics.
- * The response is in the same format as STATISTICS_NOTIFICATION 0x9d, below.
+ * This command triggers an immediate response containing uCode stats.
+ * The response is in the same format as N_STATS 0x9d, below.
  *
  * If the CLEAR_STATS configuration flag is set, uCode will clear its
- * internal copy of the statistics (counters) after issuing the response.
- * This flag does not affect STATISTICS_NOTIFICATIONs after beacons (see below).
+ * internal copy of the stats (counters) after issuing the response.
+ * This flag does not affect N_STATSs after beacons (see below).
  *
  * If the DISABLE_NOTIF configuration flag is set, uCode will not issue
- * STATISTICS_NOTIFICATIONs after received beacons (see below).  This flag
- * does not affect the response to the REPLY_STATISTICS_CMD 0x9c itself.
+ * N_STATSs after received beacons (see below).  This flag
+ * does not affect the response to the C_STATS 0x9c itself.
  */
-#define IWL_STATS_CONF_CLEAR_STATS cpu_to_le32(0x1)	/* see above */
-#define IWL_STATS_CONF_DISABLE_NOTIF cpu_to_le32(0x2)/* see above */
-struct iwl_statistics_cmd {
-	__le32 configuration_flags;	/* IWL_STATS_CONF_* */
+#define IL_STATS_CONF_CLEAR_STATS cpu_to_le32(0x1)	/* see above */
+#define IL_STATS_CONF_DISABLE_NOTIF cpu_to_le32(0x2)	/* see above */
+struct il_stats_cmd {
+	__le32 configuration_flags;	/* IL_STATS_CONF_* */
 } __packed;
 
 /*
- * STATISTICS_NOTIFICATION = 0x9d (notification only, not a command)
+ * N_STATS = 0x9d (notification only, not a command)
  *
  * By default, uCode issues this notification after receiving a beacon
  * while associated.  To disable this behavior, set DISABLE_NOTIF flag in the
- * REPLY_STATISTICS_CMD 0x9c, above.
+ * C_STATS 0x9c, above.
  *
  * Statistics counters continue to increment beacon after beacon, but are
- * cleared when changing channels or when driver issues REPLY_STATISTICS_CMD
+ * cleared when changing channels or when driver issues C_STATS
  * 0x9c with CLEAR_STATS bit set (see above).
  *
- * uCode also issues this notification during scans.  uCode clears statistics
- * appropriately so that each notification contains statistics for only the
+ * uCode also issues this notification during scans.  uCode clears stats
+ * appropriately so that each notification contains stats for only the
  * one channel that has just been scanned.
  */
-#define STATISTICS_REPLY_FLG_BAND_24G_MSK         cpu_to_le32(0x2)
-#define STATISTICS_REPLY_FLG_HT40_MODE_MSK        cpu_to_le32(0x8)
+#define STATS_REPLY_FLG_BAND_24G_MSK         cpu_to_le32(0x2)
+#define STATS_REPLY_FLG_HT40_MODE_MSK        cpu_to_le32(0x8)
 
-struct iwl3945_notif_statistics {
+struct il3945_notif_stats {
 	__le32 flag;
-	struct iwl39_statistics_rx rx;
-	struct iwl39_statistics_tx tx;
-	struct iwl39_statistics_general general;
+	struct iwl39_stats_rx rx;
+	struct iwl39_stats_tx tx;
+	struct iwl39_stats_general general;
 } __packed;
 
-struct iwl_notif_statistics {
+struct il_notif_stats {
 	__le32 flag;
-	struct statistics_rx rx;
-	struct statistics_tx tx;
-	struct statistics_general general;
+	struct stats_rx rx;
+	struct stats_tx tx;
+	struct stats_general general;
 } __packed;
 
 /*
- * MISSED_BEACONS_NOTIFICATION = 0xa2 (notification only, not a command)
+ * N_MISSED_BEACONS = 0xa2 (notification only, not a command)
  *
- * uCode send MISSED_BEACONS_NOTIFICATION to driver when detect beacon missed
+ * uCode send N_MISSED_BEACONS to driver when detect beacon missed
  * in regardless of how many missed beacons, which mean when driver receive the
  * notification, inside the command, it can find all the beacons information
  * which include number of total missed beacons, number of consecutive missed
@@ -3035,18 +3014,17 @@
  *
  */
 
-#define IWL_MISSED_BEACON_THRESHOLD_MIN	(1)
-#define IWL_MISSED_BEACON_THRESHOLD_DEF	(5)
-#define IWL_MISSED_BEACON_THRESHOLD_MAX	IWL_MISSED_BEACON_THRESHOLD_DEF
+#define IL_MISSED_BEACON_THRESHOLD_MIN	(1)
+#define IL_MISSED_BEACON_THRESHOLD_DEF	(5)
+#define IL_MISSED_BEACON_THRESHOLD_MAX	IL_MISSED_BEACON_THRESHOLD_DEF
 
-struct iwl_missed_beacon_notif {
+struct il_missed_beacon_notif {
 	__le32 consecutive_missed_beacons;
 	__le32 total_missed_becons;
 	__le32 num_expected_beacons;
 	__le32 num_recvd_beacons;
 } __packed;
 
-
 /******************************************************************************
  * (11)
  * Rx Calibration Commands:
@@ -3062,7 +3040,7 @@
  *****************************************************************************/
 
 /**
- * SENSITIVITY_CMD = 0xa8 (command, has simple generic response)
+ * C_SENSITIVITY = 0xa8 (command, has simple generic response)
  *
  * This command sets up the Rx signal detector for a sensitivity level that
  * is high enough to lock onto all signals within the associated network,
@@ -3076,12 +3054,12 @@
  * time listening, not transmitting).  Driver must adjust sensitivity so that
  * the ratio of actual false alarms to actual Rx time falls within this range.
  *
- * While associated, uCode delivers STATISTICS_NOTIFICATIONs after each
+ * While associated, uCode delivers N_STATSs after each
  * received beacon.  These provide information to the driver to analyze the
- * sensitivity.  Don't analyze statistics that come in from scanning, or any
- * other non-associated-network source.  Pertinent statistics include:
+ * sensitivity.  Don't analyze stats that come in from scanning, or any
+ * other non-associated-network source.  Pertinent stats include:
  *
- * From "general" statistics (struct statistics_rx_non_phy):
+ * From "general" stats (struct stats_rx_non_phy):
  *
  * (beacon_energy_[abc] & 0x0FF00) >> 8 (unsigned, higher value is lower level)
  *   Measure of energy of desired signal.  Used for establishing a level
@@ -3094,7 +3072,7 @@
  *   uSecs of actual Rx time during beacon period (varies according to
  *   how much time was spent transmitting).
  *
- * From "cck" and "ofdm" statistics (struct statistics_rx_phy), separately:
+ * From "cck" and "ofdm" stats (struct stats_rx_phy), separately:
  *
  * false_alarm_cnt
  *   Signal locks abandoned early (before phy-level header).
@@ -3111,15 +3089,15 @@
  *
  * Total number of false alarms = false_alarms + plcp_errs
  *
- * For OFDM, adjust the following table entries in struct iwl_sensitivity_cmd
+ * For OFDM, adjust the following table entries in struct il_sensitivity_cmd
  * (notice that the start points for OFDM are at or close to settings for
  * maximum sensitivity):
  *
  *                                             START  /  MIN  /  MAX
- *   HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX          90   /   85  /  120
- *   HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX     170   /  170  /  210
- *   HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX         105   /  105  /  140
- *   HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX     220   /  220  /  270
+ *   HD_AUTO_CORR32_X1_TH_ADD_MIN_IDX          90   /   85  /  120
+ *   HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX     170   /  170  /  210
+ *   HD_AUTO_CORR32_X4_TH_ADD_MIN_IDX         105   /  105  /  140
+ *   HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX     220   /  220  /  270
  *
  *   If actual rate of OFDM false alarms (+ plcp_errors) is too high
  *   (greater than 50 for each 204.8 msecs listening), reduce sensitivity
@@ -3152,30 +3130,30 @@
  *        Reset this to 0 at the first beacon period that falls within the
  *        "good" range (5 to 50 false alarms per 204.8 milliseconds rx).
  *
- * Then, adjust the following CCK table entries in struct iwl_sensitivity_cmd
+ * Then, adjust the following CCK table entries in struct il_sensitivity_cmd
  * (notice that the start points for CCK are at maximum sensitivity):
  *
  *                                             START  /  MIN  /  MAX
- *   HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX         125   /  125  /  200
- *   HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX     200   /  200  /  400
- *   HD_MIN_ENERGY_CCK_DET_INDEX                100   /    0  /  100
+ *   HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX         125   /  125  /  200
+ *   HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX     200   /  200  /  400
+ *   HD_MIN_ENERGY_CCK_DET_IDX                100   /    0  /  100
  *
  *   If actual rate of CCK false alarms (+ plcp_errors) is too high
  *   (greater than 50 for each 204.8 msecs listening), method for reducing
  *   sensitivity is:
  *
- *   1)  *Add* 3 to value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX,
+ *   1)  *Add* 3 to value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX,
  *       up to max 400.
  *
- *   2)  If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX is < 160,
+ *   2)  If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX is < 160,
  *       sensitivity has been reduced a significant amount; bring it up to
  *       a moderate 161.  Otherwise, *add* 3, up to max 200.
  *
- *   3)  a)  If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX is > 160,
+ *   3)  a)  If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX is > 160,
  *       sensitivity has been reduced only a moderate or small amount;
- *       *subtract* 2 from value in HD_MIN_ENERGY_CCK_DET_INDEX,
+ *       *subtract* 2 from value in HD_MIN_ENERGY_CCK_DET_IDX,
  *       down to min 0.  Otherwise (if gain has been significantly reduced),
- *       don't change the HD_MIN_ENERGY_CCK_DET_INDEX value.
+ *       don't change the HD_MIN_ENERGY_CCK_DET_IDX value.
  *
  *       b)  Save a snapshot of the "silence reference".
  *
@@ -3191,13 +3169,13 @@
  *
  *   Method for increasing sensitivity:
  *
- *   1)  *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX,
+ *   1)  *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX,
  *       down to min 125.
  *
- *   2)  *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX,
+ *   2)  *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX,
  *       down to min 200.
  *
- *   3)  *Add* 2 to value in HD_MIN_ENERGY_CCK_DET_INDEX, up to max 100.
+ *   3)  *Add* 2 to value in HD_MIN_ENERGY_CCK_DET_IDX, up to max 100.
  *
  *   If actual rate of CCK false alarms (+ plcp_errors) is within good range
  *   (between 5 and 50 for each 204.8 msecs listening):
@@ -3206,57 +3184,56 @@
  *
  *   2)  If previous beacon had too many CCK false alarms (+ plcp_errors),
  *       give some extra margin to energy threshold by *subtracting* 8
- *       from value in HD_MIN_ENERGY_CCK_DET_INDEX.
+ *       from value in HD_MIN_ENERGY_CCK_DET_IDX.
  *
  *   For all cases (too few, too many, good range), make sure that the CCK
  *   detection threshold (energy) is below the energy level for robust
  *   detection over the past 10 beacon periods, the "Max cck energy".
  *   Lower values mean higher energy; this means making sure that the value
- *   in HD_MIN_ENERGY_CCK_DET_INDEX is at or *above* "Max cck energy".
+ *   in HD_MIN_ENERGY_CCK_DET_IDX is at or *above* "Max cck energy".
  *
  */
 
 /*
- * Table entries in SENSITIVITY_CMD (struct iwl_sensitivity_cmd)
+ * Table entries in C_SENSITIVITY (struct il_sensitivity_cmd)
  */
-#define HD_TABLE_SIZE  (11)	/* number of entries */
-#define HD_MIN_ENERGY_CCK_DET_INDEX                 (0)	/* table indexes */
-#define HD_MIN_ENERGY_OFDM_DET_INDEX                (1)
-#define HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX          (2)
-#define HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX      (3)
-#define HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX      (4)
-#define HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX          (5)
-#define HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX      (6)
-#define HD_BARKER_CORR_TH_ADD_MIN_INDEX             (7)
-#define HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX         (8)
-#define HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX          (9)
-#define HD_OFDM_ENERGY_TH_IN_INDEX                  (10)
+#define HD_TBL_SIZE  (11)	/* number of entries */
+#define HD_MIN_ENERGY_CCK_DET_IDX                 (0)	/* table idxes */
+#define HD_MIN_ENERGY_OFDM_DET_IDX                (1)
+#define HD_AUTO_CORR32_X1_TH_ADD_MIN_IDX          (2)
+#define HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX      (3)
+#define HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX      (4)
+#define HD_AUTO_CORR32_X4_TH_ADD_MIN_IDX          (5)
+#define HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX      (6)
+#define HD_BARKER_CORR_TH_ADD_MIN_IDX             (7)
+#define HD_BARKER_CORR_TH_ADD_MIN_MRC_IDX         (8)
+#define HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX          (9)
+#define HD_OFDM_ENERGY_TH_IN_IDX                  (10)
 
-/* Control field in struct iwl_sensitivity_cmd */
-#define SENSITIVITY_CMD_CONTROL_DEFAULT_TABLE	cpu_to_le16(0)
-#define SENSITIVITY_CMD_CONTROL_WORK_TABLE	cpu_to_le16(1)
+/* Control field in struct il_sensitivity_cmd */
+#define C_SENSITIVITY_CONTROL_DEFAULT_TBL	cpu_to_le16(0)
+#define C_SENSITIVITY_CONTROL_WORK_TBL	cpu_to_le16(1)
 
 /**
- * struct iwl_sensitivity_cmd
+ * struct il_sensitivity_cmd
  * @control:  (1) updates working table, (0) updates default table
- * @table:  energy threshold values, use HD_* as index into table
+ * @table:  energy threshold values, use HD_* as idx into table
  *
  * Always use "1" in "control" to update uCode's working table and DSP.
  */
-struct iwl_sensitivity_cmd {
-	__le16 control;			/* always use "1" */
-	__le16 table[HD_TABLE_SIZE];	/* use HD_* as index */
+struct il_sensitivity_cmd {
+	__le16 control;		/* always use "1" */
+	__le16 table[HD_TBL_SIZE];	/* use HD_* as idx */
 } __packed;
 
-
 /**
- * REPLY_PHY_CALIBRATION_CMD = 0xb0 (command, has simple generic response)
+ * C_PHY_CALIBRATION = 0xb0 (command, has simple generic response)
  *
  * This command sets the relative gains of 4965 device's 3 radio receiver chains.
  *
  * After the first association, driver should accumulate signal and noise
- * statistics from the STATISTICS_NOTIFICATIONs that follow the first 20
- * beacons from the associated network (don't collect statistics that come
+ * stats from the N_STATSs that follow the first 20
+ * beacons from the associated network (don't collect stats that come
  * in from scanning, or any other non-network source).
  *
  * DISCONNECTED ANTENNA:
@@ -3264,7 +3241,7 @@
  * Driver should determine which antennas are actually connected, by comparing
  * average beacon signal levels for the 3 Rx chains.  Accumulate (add) the
  * following values over 20 beacons, one accumulator for each of the chains
- * a/b/c, from struct statistics_rx_non_phy:
+ * a/b/c, from struct stats_rx_non_phy:
  *
  * beacon_rssi_[abc] & 0x0FF (unsigned, units in dB)
  *
@@ -3283,7 +3260,7 @@
  * to antennas, see above) for gain, by comparing the average signal levels
  * detected during the silence after each beacon (background noise).
  * Accumulate (add) the following values over 20 beacons, one accumulator for
- * each of the chains a/b/c, from struct statistics_rx_non_phy:
+ * each of the chains a/b/c, from struct stats_rx_non_phy:
  *
  * beacon_silence_rssi_[abc] & 0x0FF (unsigned, units in dB)
  *
@@ -3294,7 +3271,7 @@
  * (accum_noise[i] - accum_noise[reference]) / 30
  *
  * The "30" adjusts the dB in the 20 accumulated samples to units of 1.5 dB.
- * For use in diff_gain_[abc] fields of struct iwl_calibration_cmd, the
+ * For use in diff_gain_[abc] fields of struct il_calibration_cmd, the
  * driver should limit the difference results to a range of 0-3 (0-4.5 dB),
  * and set bit 2 to indicate "reduce gain".  The value for the reference
  * (weakest) chain should be "0".
@@ -3306,24 +3283,24 @@
 
 /* Phy calibration command for series */
 /* The default calibrate table size if not specified by firmware */
-#define IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE	18
+#define IL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE	18
 enum {
-	IWL_PHY_CALIBRATE_DIFF_GAIN_CMD		= 7,
-	IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE	= 19,
+	IL_PHY_CALIBRATE_DIFF_GAIN_CMD = 7,
+	IL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE = 19,
 };
 
-#define IWL_MAX_PHY_CALIBRATE_TBL_SIZE		(253)
+#define IL_MAX_PHY_CALIBRATE_TBL_SIZE		(253)
 
-struct iwl_calib_hdr {
+struct il_calib_hdr {
 	u8 op_code;
 	u8 first_group;
 	u8 groups_num;
 	u8 data_valid;
 } __packed;
 
-/* IWL_PHY_CALIBRATE_DIFF_GAIN_CMD (7) */
-struct iwl_calib_diff_gain_cmd {
-	struct iwl_calib_hdr hdr;
+/* IL_PHY_CALIBRATE_DIFF_GAIN_CMD (7) */
+struct il_calib_diff_gain_cmd {
+	struct il_calib_hdr hdr;
 	s8 diff_gain_a;		/* see above */
 	s8 diff_gain_b;
 	s8 diff_gain_c;
@@ -3338,12 +3315,12 @@
 
 /*
  * LEDs Command & Response
- * REPLY_LEDS_CMD = 0x48 (command, has simple generic response)
+ * C_LEDS = 0x48 (command, has simple generic response)
  *
  * For each of 3 possible LEDs (Activity/Link/Tech, selected by "id" field),
  * this command turns it on or off, or sets up a periodic blinking cycle.
  */
-struct iwl_led_cmd {
+struct il_led_cmd {
 	__le32 interval;	/* "interval" in uSec */
 	u8 id;			/* 1: Activity, 2: Link, 3: Tech */
 	u8 off;			/* # intervals off while blinking;
@@ -3353,14 +3330,15 @@
 	u8 reserved;
 } __packed;
 
-
 /******************************************************************************
  * (13)
  * Union of all expected notifications/responses:
  *
  *****************************************************************************/
 
-struct iwl_rx_packet {
+#define IL_RX_FRAME_SIZE_MSK	0x00003fff
+
+struct il_rx_pkt {
 	/*
 	 * The first 4 bytes of the RX frame header contain both the RX frame
 	 * size and some flags.
@@ -3372,27 +3350,27 @@
 	 * 13-00: RX frame size
 	 */
 	__le32 len_n_flags;
-	struct iwl_cmd_header hdr;
+	struct il_cmd_header hdr;
 	union {
-		struct iwl3945_rx_frame rx_frame;
-		struct iwl3945_tx_resp tx_resp;
-		struct iwl3945_beacon_notif beacon_status;
+		struct il3945_rx_frame rx_frame;
+		struct il3945_tx_resp tx_resp;
+		struct il3945_beacon_notif beacon_status;
 
-		struct iwl_alive_resp alive_frame;
-		struct iwl_spectrum_notification spectrum_notif;
-		struct iwl_csa_notification csa_notif;
-		struct iwl_error_resp err_resp;
-		struct iwl_card_state_notif card_state_notif;
-		struct iwl_add_sta_resp add_sta;
-		struct iwl_rem_sta_resp rem_sta;
-		struct iwl_sleep_notification sleep_notif;
-		struct iwl_spectrum_resp spectrum;
-		struct iwl_notif_statistics stats;
-		struct iwl_compressed_ba_resp compressed_ba;
-		struct iwl_missed_beacon_notif missed_beacon;
+		struct il_alive_resp alive_frame;
+		struct il_spectrum_notification spectrum_notif;
+		struct il_csa_notification csa_notif;
+		struct il_error_resp err_resp;
+		struct il_card_state_notif card_state_notif;
+		struct il_add_sta_resp add_sta;
+		struct il_rem_sta_resp rem_sta;
+		struct il_sleep_notification sleep_notif;
+		struct il_spectrum_resp spectrum;
+		struct il_notif_stats stats;
+		struct il_compressed_ba_resp compressed_ba;
+		struct il_missed_beacon_notif missed_beacon;
 		__le32 status;
 		u8 raw[0];
 	} u;
 } __packed;
 
-#endif				/* __iwl_legacy_commands_h__ */
+#endif /* __il_commands_h__ */
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c
new file mode 100644
index 0000000..36454d0b
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/common.c
@@ -0,0 +1,5867 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/etherdevice.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/lockdep.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/skbuff.h>
+#include <net/mac80211.h>
+
+#include "common.h"
+
+int
+_il_poll_bit(struct il_priv *il, u32 addr, u32 bits, u32 mask, int timeout)
+{
+	const int interval = 10; /* microseconds */
+	int t = 0;
+
+	do {
+		if ((_il_rd(il, addr) & mask) == (bits & mask))
+			return t;
+		udelay(interval);
+		t += interval;
+	} while (t < timeout);
+
+	return -ETIMEDOUT;
+}
+EXPORT_SYMBOL(_il_poll_bit);
+
+void
+il_set_bit(struct il_priv *p, u32 r, u32 m)
+{
+	unsigned long reg_flags;
+
+	spin_lock_irqsave(&p->reg_lock, reg_flags);
+	_il_set_bit(p, r, m);
+	spin_unlock_irqrestore(&p->reg_lock, reg_flags);
+}
+EXPORT_SYMBOL(il_set_bit);
+
+void
+il_clear_bit(struct il_priv *p, u32 r, u32 m)
+{
+	unsigned long reg_flags;
+
+	spin_lock_irqsave(&p->reg_lock, reg_flags);
+	_il_clear_bit(p, r, m);
+	spin_unlock_irqrestore(&p->reg_lock, reg_flags);
+}
+EXPORT_SYMBOL(il_clear_bit);
+
+int
+_il_grab_nic_access(struct il_priv *il)
+{
+	int ret;
+	u32 val;
+
+	/* this bit wakes up the NIC */
+	_il_set_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+
+	/*
+	 * These bits say the device is running, and should keep running for
+	 * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
+	 * but they do not indicate that embedded SRAM is restored yet;
+	 * 3945 and 4965 have volatile SRAM, and must save/restore contents
+	 * to/from host DRAM when sleeping/waking for power-saving.
+	 * Each direction takes approximately 1/4 millisecond; with this
+	 * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
+	 * series of register accesses are expected (e.g. reading Event Log),
+	 * to keep device from sleeping.
+	 *
+	 * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
+	 * SRAM is okay/restored.  We don't check that here because this call
+	 * is just for hardware register access; but GP1 MAC_SLEEP check is a
+	 * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
+	 *
+	 */
+	ret =
+	    _il_poll_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
+			 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
+			  CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
+	if (ret < 0) {
+		val = _il_rd(il, CSR_GP_CNTRL);
+		IL_ERR("MAC is in deep sleep!.  CSR_GP_CNTRL = 0x%08X\n", val);
+		_il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
+		return -EIO;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(_il_grab_nic_access);
+
+int
+il_poll_bit(struct il_priv *il, u32 addr, u32 mask, int timeout)
+{
+	const int interval = 10; /* microseconds */
+	int t = 0;
+
+	do {
+		if ((il_rd(il, addr) & mask) == mask)
+			return t;
+		udelay(interval);
+		t += interval;
+	} while (t < timeout);
+
+	return -ETIMEDOUT;
+}
+EXPORT_SYMBOL(il_poll_bit);
+
+u32
+il_rd_prph(struct il_priv *il, u32 reg)
+{
+	unsigned long reg_flags;
+	u32 val;
+
+	spin_lock_irqsave(&il->reg_lock, reg_flags);
+	_il_grab_nic_access(il);
+	val = _il_rd_prph(il, reg);
+	_il_release_nic_access(il);
+	spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+	return val;
+}
+EXPORT_SYMBOL(il_rd_prph);
+
+void
+il_wr_prph(struct il_priv *il, u32 addr, u32 val)
+{
+	unsigned long reg_flags;
+
+	spin_lock_irqsave(&il->reg_lock, reg_flags);
+	if (!_il_grab_nic_access(il)) {
+		_il_wr_prph(il, addr, val);
+		_il_release_nic_access(il);
+	}
+	spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+}
+EXPORT_SYMBOL(il_wr_prph);
+
+u32
+il_read_targ_mem(struct il_priv *il, u32 addr)
+{
+	unsigned long reg_flags;
+	u32 value;
+
+	spin_lock_irqsave(&il->reg_lock, reg_flags);
+	_il_grab_nic_access(il);
+
+	_il_wr(il, HBUS_TARG_MEM_RADDR, addr);
+	rmb();
+	value = _il_rd(il, HBUS_TARG_MEM_RDAT);
+
+	_il_release_nic_access(il);
+	spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+	return value;
+}
+EXPORT_SYMBOL(il_read_targ_mem);
+
+void
+il_write_targ_mem(struct il_priv *il, u32 addr, u32 val)
+{
+	unsigned long reg_flags;
+
+	spin_lock_irqsave(&il->reg_lock, reg_flags);
+	if (!_il_grab_nic_access(il)) {
+		_il_wr(il, HBUS_TARG_MEM_WADDR, addr);
+		wmb();
+		_il_wr(il, HBUS_TARG_MEM_WDAT, val);
+		_il_release_nic_access(il);
+	}
+	spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+}
+EXPORT_SYMBOL(il_write_targ_mem);
+
+const char *
+il_get_cmd_string(u8 cmd)
+{
+	switch (cmd) {
+		IL_CMD(N_ALIVE);
+		IL_CMD(N_ERROR);
+		IL_CMD(C_RXON);
+		IL_CMD(C_RXON_ASSOC);
+		IL_CMD(C_QOS_PARAM);
+		IL_CMD(C_RXON_TIMING);
+		IL_CMD(C_ADD_STA);
+		IL_CMD(C_REM_STA);
+		IL_CMD(C_WEPKEY);
+		IL_CMD(N_3945_RX);
+		IL_CMD(C_TX);
+		IL_CMD(C_RATE_SCALE);
+		IL_CMD(C_LEDS);
+		IL_CMD(C_TX_LINK_QUALITY_CMD);
+		IL_CMD(C_CHANNEL_SWITCH);
+		IL_CMD(N_CHANNEL_SWITCH);
+		IL_CMD(C_SPECTRUM_MEASUREMENT);
+		IL_CMD(N_SPECTRUM_MEASUREMENT);
+		IL_CMD(C_POWER_TBL);
+		IL_CMD(N_PM_SLEEP);
+		IL_CMD(N_PM_DEBUG_STATS);
+		IL_CMD(C_SCAN);
+		IL_CMD(C_SCAN_ABORT);
+		IL_CMD(N_SCAN_START);
+		IL_CMD(N_SCAN_RESULTS);
+		IL_CMD(N_SCAN_COMPLETE);
+		IL_CMD(N_BEACON);
+		IL_CMD(C_TX_BEACON);
+		IL_CMD(C_TX_PWR_TBL);
+		IL_CMD(C_BT_CONFIG);
+		IL_CMD(C_STATS);
+		IL_CMD(N_STATS);
+		IL_CMD(N_CARD_STATE);
+		IL_CMD(N_MISSED_BEACONS);
+		IL_CMD(C_CT_KILL_CONFIG);
+		IL_CMD(C_SENSITIVITY);
+		IL_CMD(C_PHY_CALIBRATION);
+		IL_CMD(N_RX_PHY);
+		IL_CMD(N_RX_MPDU);
+		IL_CMD(N_RX);
+		IL_CMD(N_COMPRESSED_BA);
+	default:
+		return "UNKNOWN";
+
+	}
+}
+EXPORT_SYMBOL(il_get_cmd_string);
+
+#define HOST_COMPLETE_TIMEOUT (HZ / 2)
+
+static void
+il_generic_cmd_callback(struct il_priv *il, struct il_device_cmd *cmd,
+			struct il_rx_pkt *pkt)
+{
+	if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
+		IL_ERR("Bad return from %s (0x%08X)\n",
+		       il_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
+		return;
+	}
+#ifdef CONFIG_IWLEGACY_DEBUG
+	switch (cmd->hdr.cmd) {
+	case C_TX_LINK_QUALITY_CMD:
+	case C_SENSITIVITY:
+		D_HC_DUMP("back from %s (0x%08X)\n",
+			  il_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
+		break;
+	default:
+		D_HC("back from %s (0x%08X)\n", il_get_cmd_string(cmd->hdr.cmd),
+		     pkt->hdr.flags);
+	}
+#endif
+}
+
+static int
+il_send_cmd_async(struct il_priv *il, struct il_host_cmd *cmd)
+{
+	int ret;
+
+	BUG_ON(!(cmd->flags & CMD_ASYNC));
+
+	/* An asynchronous command can not expect an SKB to be set. */
+	BUG_ON(cmd->flags & CMD_WANT_SKB);
+
+	/* Assign a generic callback if one is not provided */
+	if (!cmd->callback)
+		cmd->callback = il_generic_cmd_callback;
+
+	if (test_bit(S_EXIT_PENDING, &il->status))
+		return -EBUSY;
+
+	ret = il_enqueue_hcmd(il, cmd);
+	if (ret < 0) {
+		IL_ERR("Error sending %s: enqueue_hcmd failed: %d\n",
+		       il_get_cmd_string(cmd->id), ret);
+		return ret;
+	}
+	return 0;
+}
+
+int
+il_send_cmd_sync(struct il_priv *il, struct il_host_cmd *cmd)
+{
+	int cmd_idx;
+	int ret;
+
+	lockdep_assert_held(&il->mutex);
+
+	BUG_ON(cmd->flags & CMD_ASYNC);
+
+	/* A synchronous command can not have a callback set. */
+	BUG_ON(cmd->callback);
+
+	D_INFO("Attempting to send sync command %s\n",
+	       il_get_cmd_string(cmd->id));
+
+	set_bit(S_HCMD_ACTIVE, &il->status);
+	D_INFO("Setting HCMD_ACTIVE for command %s\n",
+	       il_get_cmd_string(cmd->id));
+
+	cmd_idx = il_enqueue_hcmd(il, cmd);
+	if (cmd_idx < 0) {
+		ret = cmd_idx;
+		IL_ERR("Error sending %s: enqueue_hcmd failed: %d\n",
+		       il_get_cmd_string(cmd->id), ret);
+		goto out;
+	}
+
+	ret = wait_event_timeout(il->wait_command_queue,
+				 !test_bit(S_HCMD_ACTIVE, &il->status),
+				 HOST_COMPLETE_TIMEOUT);
+	if (!ret) {
+		if (test_bit(S_HCMD_ACTIVE, &il->status)) {
+			IL_ERR("Error sending %s: time out after %dms.\n",
+			       il_get_cmd_string(cmd->id),
+			       jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
+
+			clear_bit(S_HCMD_ACTIVE, &il->status);
+			D_INFO("Clearing HCMD_ACTIVE for command %s\n",
+			       il_get_cmd_string(cmd->id));
+			ret = -ETIMEDOUT;
+			goto cancel;
+		}
+	}
+
+	if (test_bit(S_RF_KILL_HW, &il->status)) {
+		IL_ERR("Command %s aborted: RF KILL Switch\n",
+		       il_get_cmd_string(cmd->id));
+		ret = -ECANCELED;
+		goto fail;
+	}
+	if (test_bit(S_FW_ERROR, &il->status)) {
+		IL_ERR("Command %s failed: FW Error\n",
+		       il_get_cmd_string(cmd->id));
+		ret = -EIO;
+		goto fail;
+	}
+	if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
+		IL_ERR("Error: Response NULL in '%s'\n",
+		       il_get_cmd_string(cmd->id));
+		ret = -EIO;
+		goto cancel;
+	}
+
+	ret = 0;
+	goto out;
+
+cancel:
+	if (cmd->flags & CMD_WANT_SKB) {
+		/*
+		 * Cancel the CMD_WANT_SKB flag for the cmd in the
+		 * TX cmd queue. Otherwise in case the cmd comes
+		 * in later, it will possibly set an invalid
+		 * address (cmd->meta.source).
+		 */
+		il->txq[il->cmd_queue].meta[cmd_idx].flags &= ~CMD_WANT_SKB;
+	}
+fail:
+	if (cmd->reply_page) {
+		il_free_pages(il, cmd->reply_page);
+		cmd->reply_page = 0;
+	}
+out:
+	return ret;
+}
+EXPORT_SYMBOL(il_send_cmd_sync);
+
+int
+il_send_cmd(struct il_priv *il, struct il_host_cmd *cmd)
+{
+	if (cmd->flags & CMD_ASYNC)
+		return il_send_cmd_async(il, cmd);
+
+	return il_send_cmd_sync(il, cmd);
+}
+EXPORT_SYMBOL(il_send_cmd);
+
+int
+il_send_cmd_pdu(struct il_priv *il, u8 id, u16 len, const void *data)
+{
+	struct il_host_cmd cmd = {
+		.id = id,
+		.len = len,
+		.data = data,
+	};
+
+	return il_send_cmd_sync(il, &cmd);
+}
+EXPORT_SYMBOL(il_send_cmd_pdu);
+
+int
+il_send_cmd_pdu_async(struct il_priv *il, u8 id, u16 len, const void *data,
+		      void (*callback) (struct il_priv *il,
+					struct il_device_cmd *cmd,
+					struct il_rx_pkt *pkt))
+{
+	struct il_host_cmd cmd = {
+		.id = id,
+		.len = len,
+		.data = data,
+	};
+
+	cmd.flags |= CMD_ASYNC;
+	cmd.callback = callback;
+
+	return il_send_cmd_async(il, &cmd);
+}
+EXPORT_SYMBOL(il_send_cmd_pdu_async);
+
+/* default: IL_LED_BLINK(0) using blinking idx table */
+static int led_mode;
+module_param(led_mode, int, S_IRUGO);
+MODULE_PARM_DESC(led_mode,
+		 "0=system default, " "1=On(RF On)/Off(RF Off), 2=blinking");
+
+/* Throughput		OFF time(ms)	ON time (ms)
+ *	>300			25		25
+ *	>200 to 300		40		40
+ *	>100 to 200		55		55
+ *	>70 to 100		65		65
+ *	>50 to 70		75		75
+ *	>20 to 50		85		85
+ *	>10 to 20		95		95
+ *	>5 to 10		110		110
+ *	>1 to 5			130		130
+ *	>0 to 1			167		167
+ *	<=0					SOLID ON
+ */
+static const struct ieee80211_tpt_blink il_blink[] = {
+	{.throughput = 0,		.blink_time = 334},
+	{.throughput = 1 * 1024 - 1,	.blink_time = 260},
+	{.throughput = 5 * 1024 - 1,	.blink_time = 220},
+	{.throughput = 10 * 1024 - 1,	.blink_time = 190},
+	{.throughput = 20 * 1024 - 1,	.blink_time = 170},
+	{.throughput = 50 * 1024 - 1,	.blink_time = 150},
+	{.throughput = 70 * 1024 - 1,	.blink_time = 130},
+	{.throughput = 100 * 1024 - 1,	.blink_time = 110},
+	{.throughput = 200 * 1024 - 1,	.blink_time = 80},
+	{.throughput = 300 * 1024 - 1,	.blink_time = 50},
+};
+
+/*
+ * Adjust led blink rate to compensate on a MAC Clock difference on every HW
+ * Led blink rate analysis showed an average deviation of 0% on 3945,
+ * 5% on 4965 HW.
+ * Need to compensate on the led on/off time per HW according to the deviation
+ * to achieve the desired led frequency
+ * The calculation is: (100-averageDeviation)/100 * blinkTime
+ * For code efficiency the calculation will be:
+ *     compensation = (100 - averageDeviation) * 64 / 100
+ *     NewBlinkTime = (compensation * BlinkTime) / 64
+ */
+static inline u8
+il_blink_compensation(struct il_priv *il, u8 time, u16 compensation)
+{
+	if (!compensation) {
+		IL_ERR("undefined blink compensation: "
+		       "use pre-defined blinking time\n");
+		return time;
+	}
+
+	return (u8) ((time * compensation) >> 6);
+}
+
+/* Set led pattern command */
+static int
+il_led_cmd(struct il_priv *il, unsigned long on, unsigned long off)
+{
+	struct il_led_cmd led_cmd = {
+		.id = IL_LED_LINK,
+		.interval = IL_DEF_LED_INTRVL
+	};
+	int ret;
+
+	if (!test_bit(S_READY, &il->status))
+		return -EBUSY;
+
+	if (il->blink_on == on && il->blink_off == off)
+		return 0;
+
+	if (off == 0) {
+		/* led is SOLID_ON */
+		on = IL_LED_SOLID;
+	}
+
+	D_LED("Led blink time compensation=%u\n",
+	      il->cfg->base_params->led_compensation);
+	led_cmd.on =
+	    il_blink_compensation(il, on,
+				  il->cfg->base_params->led_compensation);
+	led_cmd.off =
+	    il_blink_compensation(il, off,
+				  il->cfg->base_params->led_compensation);
+
+	ret = il->cfg->ops->led->cmd(il, &led_cmd);
+	if (!ret) {
+		il->blink_on = on;
+		il->blink_off = off;
+	}
+	return ret;
+}
+
+static void
+il_led_brightness_set(struct led_classdev *led_cdev,
+		      enum led_brightness brightness)
+{
+	struct il_priv *il = container_of(led_cdev, struct il_priv, led);
+	unsigned long on = 0;
+
+	if (brightness > 0)
+		on = IL_LED_SOLID;
+
+	il_led_cmd(il, on, 0);
+}
+
+static int
+il_led_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on,
+		 unsigned long *delay_off)
+{
+	struct il_priv *il = container_of(led_cdev, struct il_priv, led);
+
+	return il_led_cmd(il, *delay_on, *delay_off);
+}
+
+void
+il_leds_init(struct il_priv *il)
+{
+	int mode = led_mode;
+	int ret;
+
+	if (mode == IL_LED_DEFAULT)
+		mode = il->cfg->led_mode;
+
+	il->led.name =
+	    kasprintf(GFP_KERNEL, "%s-led", wiphy_name(il->hw->wiphy));
+	il->led.brightness_set = il_led_brightness_set;
+	il->led.blink_set = il_led_blink_set;
+	il->led.max_brightness = 1;
+
+	switch (mode) {
+	case IL_LED_DEFAULT:
+		WARN_ON(1);
+		break;
+	case IL_LED_BLINK:
+		il->led.default_trigger =
+		    ieee80211_create_tpt_led_trigger(il->hw,
+						     IEEE80211_TPT_LEDTRIG_FL_CONNECTED,
+						     il_blink,
+						     ARRAY_SIZE(il_blink));
+		break;
+	case IL_LED_RF_STATE:
+		il->led.default_trigger = ieee80211_get_radio_led_name(il->hw);
+		break;
+	}
+
+	ret = led_classdev_register(&il->pci_dev->dev, &il->led);
+	if (ret) {
+		kfree(il->led.name);
+		return;
+	}
+
+	il->led_registered = true;
+}
+EXPORT_SYMBOL(il_leds_init);
+
+void
+il_leds_exit(struct il_priv *il)
+{
+	if (!il->led_registered)
+		return;
+
+	led_classdev_unregister(&il->led);
+	kfree(il->led.name);
+}
+EXPORT_SYMBOL(il_leds_exit);
+
+/************************** EEPROM BANDS ****************************
+ *
+ * The il_eeprom_band definitions below provide the mapping from the
+ * EEPROM contents to the specific channel number supported for each
+ * band.
+ *
+ * For example, il_priv->eeprom.band_3_channels[4] from the band_3
+ * definition below maps to physical channel 42 in the 5.2GHz spectrum.
+ * The specific geography and calibration information for that channel
+ * is contained in the eeprom map itself.
+ *
+ * During init, we copy the eeprom information and channel map
+ * information into il->channel_info_24/52 and il->channel_map_24/52
+ *
+ * channel_map_24/52 provides the idx in the channel_info array for a
+ * given channel.  We have to have two separate maps as there is channel
+ * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and
+ * band_2
+ *
+ * A value of 0xff stored in the channel_map indicates that the channel
+ * is not supported by the hardware at all.
+ *
+ * A value of 0xfe in the channel_map indicates that the channel is not
+ * valid for Tx with the current hardware.  This means that
+ * while the system can tune and receive on a given channel, it may not
+ * be able to associate or transmit any frames on that
+ * channel.  There is no corresponding channel information for that
+ * entry.
+ *
+ *********************************************************************/
+
+/* 2.4 GHz */
+const u8 il_eeprom_band_1[14] = {
+	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
+};
+
+/* 5.2 GHz bands */
+static const u8 il_eeprom_band_2[] = {	/* 4915-5080MHz */
+	183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
+};
+
+static const u8 il_eeprom_band_3[] = {	/* 5170-5320MHz */
+	34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
+};
+
+static const u8 il_eeprom_band_4[] = {	/* 5500-5700MHz */
+	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
+};
+
+static const u8 il_eeprom_band_5[] = {	/* 5725-5825MHz */
+	145, 149, 153, 157, 161, 165
+};
+
+static const u8 il_eeprom_band_6[] = {	/* 2.4 ht40 channel */
+	1, 2, 3, 4, 5, 6, 7
+};
+
+static const u8 il_eeprom_band_7[] = {	/* 5.2 ht40 channel */
+	36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
+};
+
+/******************************************************************************
+ *
+ * EEPROM related functions
+ *
+******************************************************************************/
+
+static int
+il_eeprom_verify_signature(struct il_priv *il)
+{
+	u32 gp = _il_rd(il, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
+	int ret = 0;
+
+	D_EEPROM("EEPROM signature=0x%08x\n", gp);
+	switch (gp) {
+	case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
+	case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
+		break;
+	default:
+		IL_ERR("bad EEPROM signature," "EEPROM_GP=0x%08x\n", gp);
+		ret = -ENOENT;
+		break;
+	}
+	return ret;
+}
+
+const u8 *
+il_eeprom_query_addr(const struct il_priv *il, size_t offset)
+{
+	BUG_ON(offset >= il->cfg->base_params->eeprom_size);
+	return &il->eeprom[offset];
+}
+EXPORT_SYMBOL(il_eeprom_query_addr);
+
+u16
+il_eeprom_query16(const struct il_priv *il, size_t offset)
+{
+	if (!il->eeprom)
+		return 0;
+	return (u16) il->eeprom[offset] | ((u16) il->eeprom[offset + 1] << 8);
+}
+EXPORT_SYMBOL(il_eeprom_query16);
+
+/**
+ * il_eeprom_init - read EEPROM contents
+ *
+ * Load the EEPROM contents from adapter into il->eeprom
+ *
+ * NOTE:  This routine uses the non-debug IO access functions.
+ */
+int
+il_eeprom_init(struct il_priv *il)
+{
+	__le16 *e;
+	u32 gp = _il_rd(il, CSR_EEPROM_GP);
+	int sz;
+	int ret;
+	u16 addr;
+
+	/* allocate eeprom */
+	sz = il->cfg->base_params->eeprom_size;
+	D_EEPROM("NVM size = %d\n", sz);
+	il->eeprom = kzalloc(sz, GFP_KERNEL);
+	if (!il->eeprom) {
+		ret = -ENOMEM;
+		goto alloc_err;
+	}
+	e = (__le16 *) il->eeprom;
+
+	il->cfg->ops->lib->apm_ops.init(il);
+
+	ret = il_eeprom_verify_signature(il);
+	if (ret < 0) {
+		IL_ERR("EEPROM not found, EEPROM_GP=0x%08x\n", gp);
+		ret = -ENOENT;
+		goto err;
+	}
+
+	/* Make sure driver (instead of uCode) is allowed to read EEPROM */
+	ret = il->cfg->ops->lib->eeprom_ops.acquire_semaphore(il);
+	if (ret < 0) {
+		IL_ERR("Failed to acquire EEPROM semaphore.\n");
+		ret = -ENOENT;
+		goto err;
+	}
+
+	/* eeprom is an array of 16bit values */
+	for (addr = 0; addr < sz; addr += sizeof(u16)) {
+		u32 r;
+
+		_il_wr(il, CSR_EEPROM_REG,
+		       CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
+
+		ret =
+		    _il_poll_bit(il, CSR_EEPROM_REG,
+				 CSR_EEPROM_REG_READ_VALID_MSK,
+				 CSR_EEPROM_REG_READ_VALID_MSK,
+				 IL_EEPROM_ACCESS_TIMEOUT);
+		if (ret < 0) {
+			IL_ERR("Time out reading EEPROM[%d]\n", addr);
+			goto done;
+		}
+		r = _il_rd(il, CSR_EEPROM_REG);
+		e[addr / 2] = cpu_to_le16(r >> 16);
+	}
+
+	D_EEPROM("NVM Type: %s, version: 0x%x\n", "EEPROM",
+		 il_eeprom_query16(il, EEPROM_VERSION));
+
+	ret = 0;
+done:
+	il->cfg->ops->lib->eeprom_ops.release_semaphore(il);
+
+err:
+	if (ret)
+		il_eeprom_free(il);
+	/* Reset chip to save power until we load uCode during "up". */
+	il_apm_stop(il);
+alloc_err:
+	return ret;
+}
+EXPORT_SYMBOL(il_eeprom_init);
+
+void
+il_eeprom_free(struct il_priv *il)
+{
+	kfree(il->eeprom);
+	il->eeprom = NULL;
+}
+EXPORT_SYMBOL(il_eeprom_free);
+
+static void
+il_init_band_reference(const struct il_priv *il, int eep_band,
+		       int *eeprom_ch_count,
+		       const struct il_eeprom_channel **eeprom_ch_info,
+		       const u8 **eeprom_ch_idx)
+{
+	u32 offset =
+	    il->cfg->ops->lib->eeprom_ops.regulatory_bands[eep_band - 1];
+	switch (eep_band) {
+	case 1:		/* 2.4GHz band */
+		*eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_1);
+		*eeprom_ch_info =
+		    (struct il_eeprom_channel *)il_eeprom_query_addr(il,
+								     offset);
+		*eeprom_ch_idx = il_eeprom_band_1;
+		break;
+	case 2:		/* 4.9GHz band */
+		*eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_2);
+		*eeprom_ch_info =
+		    (struct il_eeprom_channel *)il_eeprom_query_addr(il,
+								     offset);
+		*eeprom_ch_idx = il_eeprom_band_2;
+		break;
+	case 3:		/* 5.2GHz band */
+		*eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_3);
+		*eeprom_ch_info =
+		    (struct il_eeprom_channel *)il_eeprom_query_addr(il,
+								     offset);
+		*eeprom_ch_idx = il_eeprom_band_3;
+		break;
+	case 4:		/* 5.5GHz band */
+		*eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_4);
+		*eeprom_ch_info =
+		    (struct il_eeprom_channel *)il_eeprom_query_addr(il,
+								     offset);
+		*eeprom_ch_idx = il_eeprom_band_4;
+		break;
+	case 5:		/* 5.7GHz band */
+		*eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_5);
+		*eeprom_ch_info =
+		    (struct il_eeprom_channel *)il_eeprom_query_addr(il,
+								     offset);
+		*eeprom_ch_idx = il_eeprom_band_5;
+		break;
+	case 6:		/* 2.4GHz ht40 channels */
+		*eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_6);
+		*eeprom_ch_info =
+		    (struct il_eeprom_channel *)il_eeprom_query_addr(il,
+								     offset);
+		*eeprom_ch_idx = il_eeprom_band_6;
+		break;
+	case 7:		/* 5 GHz ht40 channels */
+		*eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_7);
+		*eeprom_ch_info =
+		    (struct il_eeprom_channel *)il_eeprom_query_addr(il,
+								     offset);
+		*eeprom_ch_idx = il_eeprom_band_7;
+		break;
+	default:
+		BUG();
+	}
+}
+
+#define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \
+			    ? # x " " : "")
+/**
+ * il_mod_ht40_chan_info - Copy ht40 channel info into driver's il.
+ *
+ * Does not set up a command, or touch hardware.
+ */
+static int
+il_mod_ht40_chan_info(struct il_priv *il, enum ieee80211_band band, u16 channel,
+		      const struct il_eeprom_channel *eeprom_ch,
+		      u8 clear_ht40_extension_channel)
+{
+	struct il_channel_info *ch_info;
+
+	ch_info =
+	    (struct il_channel_info *)il_get_channel_info(il, band, channel);
+
+	if (!il_is_channel_valid(ch_info))
+		return -1;
+
+	D_EEPROM("HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm):"
+		 " Ad-Hoc %ssupported\n", ch_info->channel,
+		 il_is_channel_a_band(ch_info) ? "5.2" : "2.4",
+		 CHECK_AND_PRINT(IBSS), CHECK_AND_PRINT(ACTIVE),
+		 CHECK_AND_PRINT(RADAR), CHECK_AND_PRINT(WIDE),
+		 CHECK_AND_PRINT(DFS), eeprom_ch->flags,
+		 eeprom_ch->max_power_avg,
+		 ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS) &&
+		  !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ? "" : "not ");
+
+	ch_info->ht40_eeprom = *eeprom_ch;
+	ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg;
+	ch_info->ht40_flags = eeprom_ch->flags;
+	if (eeprom_ch->flags & EEPROM_CHANNEL_VALID)
+		ch_info->ht40_extension_channel &=
+		    ~clear_ht40_extension_channel;
+
+	return 0;
+}
+
+#define CHECK_AND_PRINT_I(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
+			    ? # x " " : "")
+
+/**
+ * il_init_channel_map - Set up driver's info for all possible channels
+ */
+int
+il_init_channel_map(struct il_priv *il)
+{
+	int eeprom_ch_count = 0;
+	const u8 *eeprom_ch_idx = NULL;
+	const struct il_eeprom_channel *eeprom_ch_info = NULL;
+	int band, ch;
+	struct il_channel_info *ch_info;
+
+	if (il->channel_count) {
+		D_EEPROM("Channel map already initialized.\n");
+		return 0;
+	}
+
+	D_EEPROM("Initializing regulatory info from EEPROM\n");
+
+	il->channel_count =
+	    ARRAY_SIZE(il_eeprom_band_1) + ARRAY_SIZE(il_eeprom_band_2) +
+	    ARRAY_SIZE(il_eeprom_band_3) + ARRAY_SIZE(il_eeprom_band_4) +
+	    ARRAY_SIZE(il_eeprom_band_5);
+
+	D_EEPROM("Parsing data for %d channels.\n", il->channel_count);
+
+	il->channel_info =
+	    kzalloc(sizeof(struct il_channel_info) * il->channel_count,
+		    GFP_KERNEL);
+	if (!il->channel_info) {
+		IL_ERR("Could not allocate channel_info\n");
+		il->channel_count = 0;
+		return -ENOMEM;
+	}
+
+	ch_info = il->channel_info;
+
+	/* Loop through the 5 EEPROM bands adding them in order to the
+	 * channel map we maintain (that contains additional information than
+	 * what just in the EEPROM) */
+	for (band = 1; band <= 5; band++) {
+
+		il_init_band_reference(il, band, &eeprom_ch_count,
+				       &eeprom_ch_info, &eeprom_ch_idx);
+
+		/* Loop through each band adding each of the channels */
+		for (ch = 0; ch < eeprom_ch_count; ch++) {
+			ch_info->channel = eeprom_ch_idx[ch];
+			ch_info->band =
+			    (band ==
+			     1) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+
+			/* permanently store EEPROM's channel regulatory flags
+			 *   and max power in channel info database. */
+			ch_info->eeprom = eeprom_ch_info[ch];
+
+			/* Copy the run-time flags so they are there even on
+			 * invalid channels */
+			ch_info->flags = eeprom_ch_info[ch].flags;
+			/* First write that ht40 is not enabled, and then enable
+			 * one by one */
+			ch_info->ht40_extension_channel =
+			    IEEE80211_CHAN_NO_HT40;
+
+			if (!(il_is_channel_valid(ch_info))) {
+				D_EEPROM("Ch. %d Flags %x [%sGHz] - "
+					 "No traffic\n", ch_info->channel,
+					 ch_info->flags,
+					 il_is_channel_a_band(ch_info) ? "5.2" :
+					 "2.4");
+				ch_info++;
+				continue;
+			}
+
+			/* Initialize regulatory-based run-time data */
+			ch_info->max_power_avg = ch_info->curr_txpow =
+			    eeprom_ch_info[ch].max_power_avg;
+			ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
+			ch_info->min_power = 0;
+
+			D_EEPROM("Ch. %d [%sGHz] " "%s%s%s%s%s%s(0x%02x %ddBm):"
+				 " Ad-Hoc %ssupported\n", ch_info->channel,
+				 il_is_channel_a_band(ch_info) ? "5.2" : "2.4",
+				 CHECK_AND_PRINT_I(VALID),
+				 CHECK_AND_PRINT_I(IBSS),
+				 CHECK_AND_PRINT_I(ACTIVE),
+				 CHECK_AND_PRINT_I(RADAR),
+				 CHECK_AND_PRINT_I(WIDE),
+				 CHECK_AND_PRINT_I(DFS),
+				 eeprom_ch_info[ch].flags,
+				 eeprom_ch_info[ch].max_power_avg,
+				 ((eeprom_ch_info[ch].
+				   flags & EEPROM_CHANNEL_IBSS) &&
+				  !(eeprom_ch_info[ch].
+				    flags & EEPROM_CHANNEL_RADAR)) ? "" :
+				 "not ");
+
+			ch_info++;
+		}
+	}
+
+	/* Check if we do have HT40 channels */
+	if (il->cfg->ops->lib->eeprom_ops.regulatory_bands[5] ==
+	    EEPROM_REGULATORY_BAND_NO_HT40 &&
+	    il->cfg->ops->lib->eeprom_ops.regulatory_bands[6] ==
+	    EEPROM_REGULATORY_BAND_NO_HT40)
+		return 0;
+
+	/* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */
+	for (band = 6; band <= 7; band++) {
+		enum ieee80211_band ieeeband;
+
+		il_init_band_reference(il, band, &eeprom_ch_count,
+				       &eeprom_ch_info, &eeprom_ch_idx);
+
+		/* EEPROM band 6 is 2.4, band 7 is 5 GHz */
+		ieeeband =
+		    (band == 6) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+
+		/* Loop through each band adding each of the channels */
+		for (ch = 0; ch < eeprom_ch_count; ch++) {
+			/* Set up driver's info for lower half */
+			il_mod_ht40_chan_info(il, ieeeband, eeprom_ch_idx[ch],
+					      &eeprom_ch_info[ch],
+					      IEEE80211_CHAN_NO_HT40PLUS);
+
+			/* Set up driver's info for upper half */
+			il_mod_ht40_chan_info(il, ieeeband,
+					      eeprom_ch_idx[ch] + 4,
+					      &eeprom_ch_info[ch],
+					      IEEE80211_CHAN_NO_HT40MINUS);
+		}
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(il_init_channel_map);
+
+/*
+ * il_free_channel_map - undo allocations in il_init_channel_map
+ */
+void
+il_free_channel_map(struct il_priv *il)
+{
+	kfree(il->channel_info);
+	il->channel_count = 0;
+}
+EXPORT_SYMBOL(il_free_channel_map);
+
+/**
+ * il_get_channel_info - Find driver's ilate channel info
+ *
+ * Based on band and channel number.
+ */
+const struct il_channel_info *
+il_get_channel_info(const struct il_priv *il, enum ieee80211_band band,
+		    u16 channel)
+{
+	int i;
+
+	switch (band) {
+	case IEEE80211_BAND_5GHZ:
+		for (i = 14; i < il->channel_count; i++) {
+			if (il->channel_info[i].channel == channel)
+				return &il->channel_info[i];
+		}
+		break;
+	case IEEE80211_BAND_2GHZ:
+		if (channel >= 1 && channel <= 14)
+			return &il->channel_info[channel - 1];
+		break;
+	default:
+		BUG();
+	}
+
+	return NULL;
+}
+EXPORT_SYMBOL(il_get_channel_info);
+
+/*
+ * Setting power level allows the card to go to sleep when not busy.
+ *
+ * We calculate a sleep command based on the required latency, which
+ * we get from mac80211. In order to handle thermal throttling, we can
+ * also use pre-defined power levels.
+ */
+
+/*
+ * This defines the old power levels. They are still used by default
+ * (level 1) and for thermal throttle (levels 3 through 5)
+ */
+
+struct il_power_vec_entry {
+	struct il_powertable_cmd cmd;
+	u8 no_dtim;		/* number of skip dtim */
+};
+
+static void
+il_power_sleep_cam_cmd(struct il_priv *il, struct il_powertable_cmd *cmd)
+{
+	memset(cmd, 0, sizeof(*cmd));
+
+	if (il->power_data.pci_pm)
+		cmd->flags |= IL_POWER_PCI_PM_MSK;
+
+	D_POWER("Sleep command for CAM\n");
+}
+
+static int
+il_set_power(struct il_priv *il, struct il_powertable_cmd *cmd)
+{
+	D_POWER("Sending power/sleep command\n");
+	D_POWER("Flags value = 0x%08X\n", cmd->flags);
+	D_POWER("Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout));
+	D_POWER("Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout));
+	D_POWER("Sleep interval vector = { %d , %d , %d , %d , %d }\n",
+		le32_to_cpu(cmd->sleep_interval[0]),
+		le32_to_cpu(cmd->sleep_interval[1]),
+		le32_to_cpu(cmd->sleep_interval[2]),
+		le32_to_cpu(cmd->sleep_interval[3]),
+		le32_to_cpu(cmd->sleep_interval[4]));
+
+	return il_send_cmd_pdu(il, C_POWER_TBL,
+			       sizeof(struct il_powertable_cmd), cmd);
+}
+
+int
+il_power_set_mode(struct il_priv *il, struct il_powertable_cmd *cmd, bool force)
+{
+	int ret;
+	bool update_chains;
+
+	lockdep_assert_held(&il->mutex);
+
+	/* Don't update the RX chain when chain noise calibration is running */
+	update_chains = il->chain_noise_data.state == IL_CHAIN_NOISE_DONE ||
+	    il->chain_noise_data.state == IL_CHAIN_NOISE_ALIVE;
+
+	if (!memcmp(&il->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force)
+		return 0;
+
+	if (!il_is_ready_rf(il))
+		return -EIO;
+
+	/* scan complete use sleep_power_next, need to be updated */
+	memcpy(&il->power_data.sleep_cmd_next, cmd, sizeof(*cmd));
+	if (test_bit(S_SCANNING, &il->status) && !force) {
+		D_INFO("Defer power set mode while scanning\n");
+		return 0;
+	}
+
+	if (cmd->flags & IL_POWER_DRIVER_ALLOW_SLEEP_MSK)
+		set_bit(S_POWER_PMI, &il->status);
+
+	ret = il_set_power(il, cmd);
+	if (!ret) {
+		if (!(cmd->flags & IL_POWER_DRIVER_ALLOW_SLEEP_MSK))
+			clear_bit(S_POWER_PMI, &il->status);
+
+		if (il->cfg->ops->lib->update_chain_flags && update_chains)
+			il->cfg->ops->lib->update_chain_flags(il);
+		else if (il->cfg->ops->lib->update_chain_flags)
+			D_POWER("Cannot update the power, chain noise "
+				"calibration running: %d\n",
+				il->chain_noise_data.state);
+
+		memcpy(&il->power_data.sleep_cmd, cmd, sizeof(*cmd));
+	} else
+		IL_ERR("set power fail, ret = %d", ret);
+
+	return ret;
+}
+
+int
+il_power_update_mode(struct il_priv *il, bool force)
+{
+	struct il_powertable_cmd cmd;
+
+	il_power_sleep_cam_cmd(il, &cmd);
+	return il_power_set_mode(il, &cmd, force);
+}
+EXPORT_SYMBOL(il_power_update_mode);
+
+/* initialize to default */
+void
+il_power_initialize(struct il_priv *il)
+{
+	u16 lctl = il_pcie_link_ctl(il);
+
+	il->power_data.pci_pm = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN);
+
+	il->power_data.debug_sleep_level_override = -1;
+
+	memset(&il->power_data.sleep_cmd, 0, sizeof(il->power_data.sleep_cmd));
+}
+EXPORT_SYMBOL(il_power_initialize);
+
+/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
+ * sending probe req.  This should be set long enough to hear probe responses
+ * from more than one AP.  */
+#define IL_ACTIVE_DWELL_TIME_24    (30)	/* all times in msec */
+#define IL_ACTIVE_DWELL_TIME_52    (20)
+
+#define IL_ACTIVE_DWELL_FACTOR_24GHZ (3)
+#define IL_ACTIVE_DWELL_FACTOR_52GHZ (2)
+
+/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel.
+ * Must be set longer than active dwell time.
+ * For the most reliable scan, set > AP beacon interval (typically 100msec). */
+#define IL_PASSIVE_DWELL_TIME_24   (20)	/* all times in msec */
+#define IL_PASSIVE_DWELL_TIME_52   (10)
+#define IL_PASSIVE_DWELL_BASE      (100)
+#define IL_CHANNEL_TUNE_TIME       5
+
+static int
+il_send_scan_abort(struct il_priv *il)
+{
+	int ret;
+	struct il_rx_pkt *pkt;
+	struct il_host_cmd cmd = {
+		.id = C_SCAN_ABORT,
+		.flags = CMD_WANT_SKB,
+	};
+
+	/* Exit instantly with error when device is not ready
+	 * to receive scan abort command or it does not perform
+	 * hardware scan currently */
+	if (!test_bit(S_READY, &il->status) ||
+	    !test_bit(S_GEO_CONFIGURED, &il->status) ||
+	    !test_bit(S_SCAN_HW, &il->status) ||
+	    test_bit(S_FW_ERROR, &il->status) ||
+	    test_bit(S_EXIT_PENDING, &il->status))
+		return -EIO;
+
+	ret = il_send_cmd_sync(il, &cmd);
+	if (ret)
+		return ret;
+
+	pkt = (struct il_rx_pkt *)cmd.reply_page;
+	if (pkt->u.status != CAN_ABORT_STATUS) {
+		/* The scan abort will return 1 for success or
+		 * 2 for "failure".  A failure condition can be
+		 * due to simply not being in an active scan which
+		 * can occur if we send the scan abort before we
+		 * the microcode has notified us that a scan is
+		 * completed. */
+		D_SCAN("SCAN_ABORT ret %d.\n", pkt->u.status);
+		ret = -EIO;
+	}
+
+	il_free_pages(il, cmd.reply_page);
+	return ret;
+}
+
+static void
+il_complete_scan(struct il_priv *il, bool aborted)
+{
+	/* check if scan was requested from mac80211 */
+	if (il->scan_request) {
+		D_SCAN("Complete scan in mac80211\n");
+		ieee80211_scan_completed(il->hw, aborted);
+	}
+
+	il->scan_vif = NULL;
+	il->scan_request = NULL;
+}
+
+void
+il_force_scan_end(struct il_priv *il)
+{
+	lockdep_assert_held(&il->mutex);
+
+	if (!test_bit(S_SCANNING, &il->status)) {
+		D_SCAN("Forcing scan end while not scanning\n");
+		return;
+	}
+
+	D_SCAN("Forcing scan end\n");
+	clear_bit(S_SCANNING, &il->status);
+	clear_bit(S_SCAN_HW, &il->status);
+	clear_bit(S_SCAN_ABORTING, &il->status);
+	il_complete_scan(il, true);
+}
+
+static void
+il_do_scan_abort(struct il_priv *il)
+{
+	int ret;
+
+	lockdep_assert_held(&il->mutex);
+
+	if (!test_bit(S_SCANNING, &il->status)) {
+		D_SCAN("Not performing scan to abort\n");
+		return;
+	}
+
+	if (test_and_set_bit(S_SCAN_ABORTING, &il->status)) {
+		D_SCAN("Scan abort in progress\n");
+		return;
+	}
+
+	ret = il_send_scan_abort(il);
+	if (ret) {
+		D_SCAN("Send scan abort failed %d\n", ret);
+		il_force_scan_end(il);
+	} else
+		D_SCAN("Successfully send scan abort\n");
+}
+
+/**
+ * il_scan_cancel - Cancel any currently executing HW scan
+ */
+int
+il_scan_cancel(struct il_priv *il)
+{
+	D_SCAN("Queuing abort scan\n");
+	queue_work(il->workqueue, &il->abort_scan);
+	return 0;
+}
+EXPORT_SYMBOL(il_scan_cancel);
+
+/**
+ * il_scan_cancel_timeout - Cancel any currently executing HW scan
+ * @ms: amount of time to wait (in milliseconds) for scan to abort
+ *
+ */
+int
+il_scan_cancel_timeout(struct il_priv *il, unsigned long ms)
+{
+	unsigned long timeout = jiffies + msecs_to_jiffies(ms);
+
+	lockdep_assert_held(&il->mutex);
+
+	D_SCAN("Scan cancel timeout\n");
+
+	il_do_scan_abort(il);
+
+	while (time_before_eq(jiffies, timeout)) {
+		if (!test_bit(S_SCAN_HW, &il->status))
+			break;
+		msleep(20);
+	}
+
+	return test_bit(S_SCAN_HW, &il->status);
+}
+EXPORT_SYMBOL(il_scan_cancel_timeout);
+
+/* Service response to C_SCAN (0x80) */
+static void
+il_hdl_scan(struct il_priv *il, struct il_rx_buf *rxb)
+{
+#ifdef CONFIG_IWLEGACY_DEBUG
+	struct il_rx_pkt *pkt = rxb_addr(rxb);
+	struct il_scanreq_notification *notif =
+	    (struct il_scanreq_notification *)pkt->u.raw;
+
+	D_SCAN("Scan request status = 0x%x\n", notif->status);
+#endif
+}
+
+/* Service N_SCAN_START (0x82) */
+static void
+il_hdl_scan_start(struct il_priv *il, struct il_rx_buf *rxb)
+{
+	struct il_rx_pkt *pkt = rxb_addr(rxb);
+	struct il_scanstart_notification *notif =
+	    (struct il_scanstart_notification *)pkt->u.raw;
+	il->scan_start_tsf = le32_to_cpu(notif->tsf_low);
+	D_SCAN("Scan start: " "%d [802.11%s] "
+	       "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n", notif->channel,
+	       notif->band ? "bg" : "a", le32_to_cpu(notif->tsf_high),
+	       le32_to_cpu(notif->tsf_low), notif->status, notif->beacon_timer);
+}
+
+/* Service N_SCAN_RESULTS (0x83) */
+static void
+il_hdl_scan_results(struct il_priv *il, struct il_rx_buf *rxb)
+{
+#ifdef CONFIG_IWLEGACY_DEBUG
+	struct il_rx_pkt *pkt = rxb_addr(rxb);
+	struct il_scanresults_notification *notif =
+	    (struct il_scanresults_notification *)pkt->u.raw;
+
+	D_SCAN("Scan ch.res: " "%d [802.11%s] " "(TSF: 0x%08X:%08X) - %d "
+	       "elapsed=%lu usec\n", notif->channel, notif->band ? "bg" : "a",
+	       le32_to_cpu(notif->tsf_high), le32_to_cpu(notif->tsf_low),
+	       le32_to_cpu(notif->stats[0]),
+	       le32_to_cpu(notif->tsf_low) - il->scan_start_tsf);
+#endif
+}
+
+/* Service N_SCAN_COMPLETE (0x84) */
+static void
+il_hdl_scan_complete(struct il_priv *il, struct il_rx_buf *rxb)
+{
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+	struct il_rx_pkt *pkt = rxb_addr(rxb);
+	struct il_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
+#endif
+
+	D_SCAN("Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
+	       scan_notif->scanned_channels, scan_notif->tsf_low,
+	       scan_notif->tsf_high, scan_notif->status);
+
+	/* The HW is no longer scanning */
+	clear_bit(S_SCAN_HW, &il->status);
+
+	D_SCAN("Scan on %sGHz took %dms\n",
+	       (il->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2",
+	       jiffies_to_msecs(jiffies - il->scan_start));
+
+	queue_work(il->workqueue, &il->scan_completed);
+}
+
+void
+il_setup_rx_scan_handlers(struct il_priv *il)
+{
+	/* scan handlers */
+	il->handlers[C_SCAN] = il_hdl_scan;
+	il->handlers[N_SCAN_START] = il_hdl_scan_start;
+	il->handlers[N_SCAN_RESULTS] = il_hdl_scan_results;
+	il->handlers[N_SCAN_COMPLETE] = il_hdl_scan_complete;
+}
+EXPORT_SYMBOL(il_setup_rx_scan_handlers);
+
+inline u16
+il_get_active_dwell_time(struct il_priv *il, enum ieee80211_band band,
+			 u8 n_probes)
+{
+	if (band == IEEE80211_BAND_5GHZ)
+		return IL_ACTIVE_DWELL_TIME_52 +
+		    IL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1);
+	else
+		return IL_ACTIVE_DWELL_TIME_24 +
+		    IL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1);
+}
+EXPORT_SYMBOL(il_get_active_dwell_time);
+
+u16
+il_get_passive_dwell_time(struct il_priv *il, enum ieee80211_band band,
+			  struct ieee80211_vif *vif)
+{
+	struct il_rxon_context *ctx = &il->ctx;
+	u16 value;
+
+	u16 passive =
+	    (band ==
+	     IEEE80211_BAND_2GHZ) ? IL_PASSIVE_DWELL_BASE +
+	    IL_PASSIVE_DWELL_TIME_24 : IL_PASSIVE_DWELL_BASE +
+	    IL_PASSIVE_DWELL_TIME_52;
+
+	if (il_is_any_associated(il)) {
+		/*
+		 * If we're associated, we clamp the maximum passive
+		 * dwell time to be 98% of the smallest beacon interval
+		 * (minus 2 * channel tune time)
+		 */
+		value = ctx->vif ? ctx->vif->bss_conf.beacon_int : 0;
+		if (value > IL_PASSIVE_DWELL_BASE || !value)
+			value = IL_PASSIVE_DWELL_BASE;
+		value = (value * 98) / 100 - IL_CHANNEL_TUNE_TIME * 2;
+		passive = min(value, passive);
+	}
+
+	return passive;
+}
+EXPORT_SYMBOL(il_get_passive_dwell_time);
+
+void
+il_init_scan_params(struct il_priv *il)
+{
+	u8 ant_idx = fls(il->hw_params.valid_tx_ant) - 1;
+	if (!il->scan_tx_ant[IEEE80211_BAND_5GHZ])
+		il->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx;
+	if (!il->scan_tx_ant[IEEE80211_BAND_2GHZ])
+		il->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx;
+}
+EXPORT_SYMBOL(il_init_scan_params);
+
+static int
+il_scan_initiate(struct il_priv *il, struct ieee80211_vif *vif)
+{
+	int ret;
+
+	lockdep_assert_held(&il->mutex);
+
+	if (WARN_ON(!il->cfg->ops->utils->request_scan))
+		return -EOPNOTSUPP;
+
+	cancel_delayed_work(&il->scan_check);
+
+	if (!il_is_ready_rf(il)) {
+		IL_WARN("Request scan called when driver not ready.\n");
+		return -EIO;
+	}
+
+	if (test_bit(S_SCAN_HW, &il->status)) {
+		D_SCAN("Multiple concurrent scan requests in parallel.\n");
+		return -EBUSY;
+	}
+
+	if (test_bit(S_SCAN_ABORTING, &il->status)) {
+		D_SCAN("Scan request while abort pending.\n");
+		return -EBUSY;
+	}
+
+	D_SCAN("Starting scan...\n");
+
+	set_bit(S_SCANNING, &il->status);
+	il->scan_start = jiffies;
+
+	ret = il->cfg->ops->utils->request_scan(il, vif);
+	if (ret) {
+		clear_bit(S_SCANNING, &il->status);
+		return ret;
+	}
+
+	queue_delayed_work(il->workqueue, &il->scan_check,
+			   IL_SCAN_CHECK_WATCHDOG);
+
+	return 0;
+}
+
+int
+il_mac_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+	       struct cfg80211_scan_request *req)
+{
+	struct il_priv *il = hw->priv;
+	int ret;
+
+	D_MAC80211("enter\n");
+
+	if (req->n_channels == 0)
+		return -EINVAL;
+
+	mutex_lock(&il->mutex);
+
+	if (test_bit(S_SCANNING, &il->status)) {
+		D_SCAN("Scan already in progress.\n");
+		ret = -EAGAIN;
+		goto out_unlock;
+	}
+
+	/* mac80211 will only ask for one band at a time */
+	il->scan_request = req;
+	il->scan_vif = vif;
+	il->scan_band = req->channels[0]->band;
+
+	ret = il_scan_initiate(il, vif);
+
+	D_MAC80211("leave\n");
+
+out_unlock:
+	mutex_unlock(&il->mutex);
+
+	return ret;
+}
+EXPORT_SYMBOL(il_mac_hw_scan);
+
+static void
+il_bg_scan_check(struct work_struct *data)
+{
+	struct il_priv *il =
+	    container_of(data, struct il_priv, scan_check.work);
+
+	D_SCAN("Scan check work\n");
+
+	/* Since we are here firmware does not finish scan and
+	 * most likely is in bad shape, so we don't bother to
+	 * send abort command, just force scan complete to mac80211 */
+	mutex_lock(&il->mutex);
+	il_force_scan_end(il);
+	mutex_unlock(&il->mutex);
+}
+
+/**
+ * il_fill_probe_req - fill in all required fields and IE for probe request
+ */
+
+u16
+il_fill_probe_req(struct il_priv *il, struct ieee80211_mgmt *frame,
+		  const u8 *ta, const u8 *ies, int ie_len, int left)
+{
+	int len = 0;
+	u8 *pos = NULL;
+
+	/* Make sure there is enough space for the probe request,
+	 * two mandatory IEs and the data */
+	left -= 24;
+	if (left < 0)
+		return 0;
+
+	frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
+	memcpy(frame->da, il_bcast_addr, ETH_ALEN);
+	memcpy(frame->sa, ta, ETH_ALEN);
+	memcpy(frame->bssid, il_bcast_addr, ETH_ALEN);
+	frame->seq_ctrl = 0;
+
+	len += 24;
+
+	/* ...next IE... */
+	pos = &frame->u.probe_req.variable[0];
+
+	/* fill in our indirect SSID IE */
+	left -= 2;
+	if (left < 0)
+		return 0;
+	*pos++ = WLAN_EID_SSID;
+	*pos++ = 0;
+
+	len += 2;
+
+	if (WARN_ON(left < ie_len))
+		return len;
+
+	if (ies && ie_len) {
+		memcpy(pos, ies, ie_len);
+		len += ie_len;
+	}
+
+	return (u16) len;
+}
+EXPORT_SYMBOL(il_fill_probe_req);
+
+static void
+il_bg_abort_scan(struct work_struct *work)
+{
+	struct il_priv *il = container_of(work, struct il_priv, abort_scan);
+
+	D_SCAN("Abort scan work\n");
+
+	/* We keep scan_check work queued in case when firmware will not
+	 * report back scan completed notification */
+	mutex_lock(&il->mutex);
+	il_scan_cancel_timeout(il, 200);
+	mutex_unlock(&il->mutex);
+}
+
+static void
+il_bg_scan_completed(struct work_struct *work)
+{
+	struct il_priv *il = container_of(work, struct il_priv, scan_completed);
+	bool aborted;
+
+	D_SCAN("Completed scan.\n");
+
+	cancel_delayed_work(&il->scan_check);
+
+	mutex_lock(&il->mutex);
+
+	aborted = test_and_clear_bit(S_SCAN_ABORTING, &il->status);
+	if (aborted)
+		D_SCAN("Aborted scan completed.\n");
+
+	if (!test_and_clear_bit(S_SCANNING, &il->status)) {
+		D_SCAN("Scan already completed.\n");
+		goto out_settings;
+	}
+
+	il_complete_scan(il, aborted);
+
+out_settings:
+	/* Can we still talk to firmware ? */
+	if (!il_is_ready_rf(il))
+		goto out;
+
+	/*
+	 * We do not commit power settings while scan is pending,
+	 * do it now if the settings changed.
+	 */
+	il_power_set_mode(il, &il->power_data.sleep_cmd_next, false);
+	il_set_tx_power(il, il->tx_power_next, false);
+
+	il->cfg->ops->utils->post_scan(il);
+
+out:
+	mutex_unlock(&il->mutex);
+}
+
+void
+il_setup_scan_deferred_work(struct il_priv *il)
+{
+	INIT_WORK(&il->scan_completed, il_bg_scan_completed);
+	INIT_WORK(&il->abort_scan, il_bg_abort_scan);
+	INIT_DELAYED_WORK(&il->scan_check, il_bg_scan_check);
+}
+EXPORT_SYMBOL(il_setup_scan_deferred_work);
+
+void
+il_cancel_scan_deferred_work(struct il_priv *il)
+{
+	cancel_work_sync(&il->abort_scan);
+	cancel_work_sync(&il->scan_completed);
+
+	if (cancel_delayed_work_sync(&il->scan_check)) {
+		mutex_lock(&il->mutex);
+		il_force_scan_end(il);
+		mutex_unlock(&il->mutex);
+	}
+}
+EXPORT_SYMBOL(il_cancel_scan_deferred_work);
+
+/* il->sta_lock must be held */
+static void
+il_sta_ucode_activate(struct il_priv *il, u8 sta_id)
+{
+
+	if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE))
+		IL_ERR("ACTIVATE a non DRIVER active station id %u addr %pM\n",
+		       sta_id, il->stations[sta_id].sta.sta.addr);
+
+	if (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) {
+		D_ASSOC("STA id %u addr %pM already present"
+			" in uCode (according to driver)\n", sta_id,
+			il->stations[sta_id].sta.sta.addr);
+	} else {
+		il->stations[sta_id].used |= IL_STA_UCODE_ACTIVE;
+		D_ASSOC("Added STA id %u addr %pM to uCode\n", sta_id,
+			il->stations[sta_id].sta.sta.addr);
+	}
+}
+
+static int
+il_process_add_sta_resp(struct il_priv *il, struct il_addsta_cmd *addsta,
+			struct il_rx_pkt *pkt, bool sync)
+{
+	u8 sta_id = addsta->sta.sta_id;
+	unsigned long flags;
+	int ret = -EIO;
+
+	if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
+		IL_ERR("Bad return from C_ADD_STA (0x%08X)\n", pkt->hdr.flags);
+		return ret;
+	}
+
+	D_INFO("Processing response for adding station %u\n", sta_id);
+
+	spin_lock_irqsave(&il->sta_lock, flags);
+
+	switch (pkt->u.add_sta.status) {
+	case ADD_STA_SUCCESS_MSK:
+		D_INFO("C_ADD_STA PASSED\n");
+		il_sta_ucode_activate(il, sta_id);
+		ret = 0;
+		break;
+	case ADD_STA_NO_ROOM_IN_TBL:
+		IL_ERR("Adding station %d failed, no room in table.\n", sta_id);
+		break;
+	case ADD_STA_NO_BLOCK_ACK_RESOURCE:
+		IL_ERR("Adding station %d failed, no block ack resource.\n",
+		       sta_id);
+		break;
+	case ADD_STA_MODIFY_NON_EXIST_STA:
+		IL_ERR("Attempting to modify non-existing station %d\n",
+		       sta_id);
+		break;
+	default:
+		D_ASSOC("Received C_ADD_STA:(0x%08X)\n", pkt->u.add_sta.status);
+		break;
+	}
+
+	D_INFO("%s station id %u addr %pM\n",
+	       il->stations[sta_id].sta.mode ==
+	       STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", sta_id,
+	       il->stations[sta_id].sta.sta.addr);
+
+	/*
+	 * XXX: The MAC address in the command buffer is often changed from
+	 * the original sent to the device. That is, the MAC address
+	 * written to the command buffer often is not the same MAC address
+	 * read from the command buffer when the command returns. This
+	 * issue has not yet been resolved and this debugging is left to
+	 * observe the problem.
+	 */
+	D_INFO("%s station according to cmd buffer %pM\n",
+	       il->stations[sta_id].sta.mode ==
+	       STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", addsta->sta.addr);
+	spin_unlock_irqrestore(&il->sta_lock, flags);
+
+	return ret;
+}
+
+static void
+il_add_sta_callback(struct il_priv *il, struct il_device_cmd *cmd,
+		    struct il_rx_pkt *pkt)
+{
+	struct il_addsta_cmd *addsta = (struct il_addsta_cmd *)cmd->cmd.payload;
+
+	il_process_add_sta_resp(il, addsta, pkt, false);
+
+}
+
+int
+il_send_add_sta(struct il_priv *il, struct il_addsta_cmd *sta, u8 flags)
+{
+	struct il_rx_pkt *pkt = NULL;
+	int ret = 0;
+	u8 data[sizeof(*sta)];
+	struct il_host_cmd cmd = {
+		.id = C_ADD_STA,
+		.flags = flags,
+		.data = data,
+	};
+	u8 sta_id __maybe_unused = sta->sta.sta_id;
+
+	D_INFO("Adding sta %u (%pM) %ssynchronously\n", sta_id, sta->sta.addr,
+	       flags & CMD_ASYNC ? "a" : "");
+
+	if (flags & CMD_ASYNC)
+		cmd.callback = il_add_sta_callback;
+	else {
+		cmd.flags |= CMD_WANT_SKB;
+		might_sleep();
+	}
+
+	cmd.len = il->cfg->ops->utils->build_addsta_hcmd(sta, data);
+	ret = il_send_cmd(il, &cmd);
+
+	if (ret || (flags & CMD_ASYNC))
+		return ret;
+
+	if (ret == 0) {
+		pkt = (struct il_rx_pkt *)cmd.reply_page;
+		ret = il_process_add_sta_resp(il, sta, pkt, true);
+	}
+	il_free_pages(il, cmd.reply_page);
+
+	return ret;
+}
+EXPORT_SYMBOL(il_send_add_sta);
+
+static void
+il_set_ht_add_station(struct il_priv *il, u8 idx, struct ieee80211_sta *sta,
+		      struct il_rxon_context *ctx)
+{
+	struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap;
+	__le32 sta_flags;
+	u8 mimo_ps_mode;
+
+	if (!sta || !sta_ht_inf->ht_supported)
+		goto done;
+
+	mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2;
+	D_ASSOC("spatial multiplexing power save mode: %s\n",
+		(mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ? "static" :
+		(mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ? "dynamic" :
+		"disabled");
+
+	sta_flags = il->stations[idx].sta.station_flags;
+
+	sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
+
+	switch (mimo_ps_mode) {
+	case WLAN_HT_CAP_SM_PS_STATIC:
+		sta_flags |= STA_FLG_MIMO_DIS_MSK;
+		break;
+	case WLAN_HT_CAP_SM_PS_DYNAMIC:
+		sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
+		break;
+	case WLAN_HT_CAP_SM_PS_DISABLED:
+		break;
+	default:
+		IL_WARN("Invalid MIMO PS mode %d\n", mimo_ps_mode);
+		break;
+	}
+
+	sta_flags |=
+	    cpu_to_le32((u32) sta_ht_inf->
+			ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
+
+	sta_flags |=
+	    cpu_to_le32((u32) sta_ht_inf->
+			ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
+
+	if (il_is_ht40_tx_allowed(il, ctx, &sta->ht_cap))
+		sta_flags |= STA_FLG_HT40_EN_MSK;
+	else
+		sta_flags &= ~STA_FLG_HT40_EN_MSK;
+
+	il->stations[idx].sta.station_flags = sta_flags;
+done:
+	return;
+}
+
+/**
+ * il_prep_station - Prepare station information for addition
+ *
+ * should be called with sta_lock held
+ */
+u8
+il_prep_station(struct il_priv *il, struct il_rxon_context *ctx,
+		const u8 *addr, bool is_ap, struct ieee80211_sta *sta)
+{
+	struct il_station_entry *station;
+	int i;
+	u8 sta_id = IL_INVALID_STATION;
+	u16 rate;
+
+	if (is_ap)
+		sta_id = ctx->ap_sta_id;
+	else if (is_broadcast_ether_addr(addr))
+		sta_id = ctx->bcast_sta_id;
+	else
+		for (i = IL_STA_ID; i < il->hw_params.max_stations; i++) {
+			if (!compare_ether_addr
+			    (il->stations[i].sta.sta.addr, addr)) {
+				sta_id = i;
+				break;
+			}
+
+			if (!il->stations[i].used &&
+			    sta_id == IL_INVALID_STATION)
+				sta_id = i;
+		}
+
+	/*
+	 * These two conditions have the same outcome, but keep them
+	 * separate
+	 */
+	if (unlikely(sta_id == IL_INVALID_STATION))
+		return sta_id;
+
+	/*
+	 * uCode is not able to deal with multiple requests to add a
+	 * station. Keep track if one is in progress so that we do not send
+	 * another.
+	 */
+	if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) {
+		D_INFO("STA %d already in process of being added.\n", sta_id);
+		return sta_id;
+	}
+
+	if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) &&
+	    (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) &&
+	    !compare_ether_addr(il->stations[sta_id].sta.sta.addr, addr)) {
+		D_ASSOC("STA %d (%pM) already added, not adding again.\n",
+			sta_id, addr);
+		return sta_id;
+	}
+
+	station = &il->stations[sta_id];
+	station->used = IL_STA_DRIVER_ACTIVE;
+	D_ASSOC("Add STA to driver ID %d: %pM\n", sta_id, addr);
+	il->num_stations++;
+
+	/* Set up the C_ADD_STA command to send to device */
+	memset(&station->sta, 0, sizeof(struct il_addsta_cmd));
+	memcpy(station->sta.sta.addr, addr, ETH_ALEN);
+	station->sta.mode = 0;
+	station->sta.sta.sta_id = sta_id;
+	station->sta.station_flags = ctx->station_flags;
+	station->ctxid = ctx->ctxid;
+
+	if (sta) {
+		struct il_station_priv_common *sta_priv;
+
+		sta_priv = (void *)sta->drv_priv;
+		sta_priv->ctx = ctx;
+	}
+
+	/*
+	 * OK to call unconditionally, since local stations (IBSS BSSID
+	 * STA and broadcast STA) pass in a NULL sta, and mac80211
+	 * doesn't allow HT IBSS.
+	 */
+	il_set_ht_add_station(il, sta_id, sta, ctx);
+
+	/* 3945 only */
+	rate = (il->band == IEEE80211_BAND_5GHZ) ? RATE_6M_PLCP : RATE_1M_PLCP;
+	/* Turn on both antennas for the station... */
+	station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK);
+
+	return sta_id;
+
+}
+EXPORT_SYMBOL_GPL(il_prep_station);
+
+#define STA_WAIT_TIMEOUT (HZ/2)
+
+/**
+ * il_add_station_common -
+ */
+int
+il_add_station_common(struct il_priv *il, struct il_rxon_context *ctx,
+		      const u8 *addr, bool is_ap, struct ieee80211_sta *sta,
+		      u8 *sta_id_r)
+{
+	unsigned long flags_spin;
+	int ret = 0;
+	u8 sta_id;
+	struct il_addsta_cmd sta_cmd;
+
+	*sta_id_r = 0;
+	spin_lock_irqsave(&il->sta_lock, flags_spin);
+	sta_id = il_prep_station(il, ctx, addr, is_ap, sta);
+	if (sta_id == IL_INVALID_STATION) {
+		IL_ERR("Unable to prepare station %pM for addition\n", addr);
+		spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+		return -EINVAL;
+	}
+
+	/*
+	 * uCode is not able to deal with multiple requests to add a
+	 * station. Keep track if one is in progress so that we do not send
+	 * another.
+	 */
+	if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) {
+		D_INFO("STA %d already in process of being added.\n", sta_id);
+		spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+		return -EEXIST;
+	}
+
+	if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) &&
+	    (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) {
+		D_ASSOC("STA %d (%pM) already added, not adding again.\n",
+			sta_id, addr);
+		spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+		return -EEXIST;
+	}
+
+	il->stations[sta_id].used |= IL_STA_UCODE_INPROGRESS;
+	memcpy(&sta_cmd, &il->stations[sta_id].sta,
+	       sizeof(struct il_addsta_cmd));
+	spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+
+	/* Add station to device's station table */
+	ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC);
+	if (ret) {
+		spin_lock_irqsave(&il->sta_lock, flags_spin);
+		IL_ERR("Adding station %pM failed.\n",
+		       il->stations[sta_id].sta.sta.addr);
+		il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE;
+		il->stations[sta_id].used &= ~IL_STA_UCODE_INPROGRESS;
+		spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+	}
+	*sta_id_r = sta_id;
+	return ret;
+}
+EXPORT_SYMBOL(il_add_station_common);
+
+/**
+ * il_sta_ucode_deactivate - deactivate ucode status for a station
+ *
+ * il->sta_lock must be held
+ */
+static void
+il_sta_ucode_deactivate(struct il_priv *il, u8 sta_id)
+{
+	/* Ucode must be active and driver must be non active */
+	if ((il->stations[sta_id].
+	     used & (IL_STA_UCODE_ACTIVE | IL_STA_DRIVER_ACTIVE)) !=
+	    IL_STA_UCODE_ACTIVE)
+		IL_ERR("removed non active STA %u\n", sta_id);
+
+	il->stations[sta_id].used &= ~IL_STA_UCODE_ACTIVE;
+
+	memset(&il->stations[sta_id], 0, sizeof(struct il_station_entry));
+	D_ASSOC("Removed STA %u\n", sta_id);
+}
+
+static int
+il_send_remove_station(struct il_priv *il, const u8 * addr, int sta_id,
+		       bool temporary)
+{
+	struct il_rx_pkt *pkt;
+	int ret;
+
+	unsigned long flags_spin;
+	struct il_rem_sta_cmd rm_sta_cmd;
+
+	struct il_host_cmd cmd = {
+		.id = C_REM_STA,
+		.len = sizeof(struct il_rem_sta_cmd),
+		.flags = CMD_SYNC,
+		.data = &rm_sta_cmd,
+	};
+
+	memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
+	rm_sta_cmd.num_sta = 1;
+	memcpy(&rm_sta_cmd.addr, addr, ETH_ALEN);
+
+	cmd.flags |= CMD_WANT_SKB;
+
+	ret = il_send_cmd(il, &cmd);
+
+	if (ret)
+		return ret;
+
+	pkt = (struct il_rx_pkt *)cmd.reply_page;
+	if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
+		IL_ERR("Bad return from C_REM_STA (0x%08X)\n", pkt->hdr.flags);
+		ret = -EIO;
+	}
+
+	if (!ret) {
+		switch (pkt->u.rem_sta.status) {
+		case REM_STA_SUCCESS_MSK:
+			if (!temporary) {
+				spin_lock_irqsave(&il->sta_lock, flags_spin);
+				il_sta_ucode_deactivate(il, sta_id);
+				spin_unlock_irqrestore(&il->sta_lock,
+						       flags_spin);
+			}
+			D_ASSOC("C_REM_STA PASSED\n");
+			break;
+		default:
+			ret = -EIO;
+			IL_ERR("C_REM_STA failed\n");
+			break;
+		}
+	}
+	il_free_pages(il, cmd.reply_page);
+
+	return ret;
+}
+
+/**
+ * il_remove_station - Remove driver's knowledge of station.
+ */
+int
+il_remove_station(struct il_priv *il, const u8 sta_id, const u8 * addr)
+{
+	unsigned long flags;
+
+	if (!il_is_ready(il)) {
+		D_INFO("Unable to remove station %pM, device not ready.\n",
+		       addr);
+		/*
+		 * It is typical for stations to be removed when we are
+		 * going down. Return success since device will be down
+		 * soon anyway
+		 */
+		return 0;
+	}
+
+	D_ASSOC("Removing STA from driver:%d  %pM\n", sta_id, addr);
+
+	if (WARN_ON(sta_id == IL_INVALID_STATION))
+		return -EINVAL;
+
+	spin_lock_irqsave(&il->sta_lock, flags);
+
+	if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE)) {
+		D_INFO("Removing %pM but non DRIVER active\n", addr);
+		goto out_err;
+	}
+
+	if (!(il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) {
+		D_INFO("Removing %pM but non UCODE active\n", addr);
+		goto out_err;
+	}
+
+	if (il->stations[sta_id].used & IL_STA_LOCAL) {
+		kfree(il->stations[sta_id].lq);
+		il->stations[sta_id].lq = NULL;
+	}
+
+	il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE;
+
+	il->num_stations--;
+
+	BUG_ON(il->num_stations < 0);
+
+	spin_unlock_irqrestore(&il->sta_lock, flags);
+
+	return il_send_remove_station(il, addr, sta_id, false);
+out_err:
+	spin_unlock_irqrestore(&il->sta_lock, flags);
+	return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(il_remove_station);
+
+/**
+ * il_clear_ucode_stations - clear ucode station table bits
+ *
+ * This function clears all the bits in the driver indicating
+ * which stations are active in the ucode. Call when something
+ * other than explicit station management would cause this in
+ * the ucode, e.g. unassociated RXON.
+ */
+void
+il_clear_ucode_stations(struct il_priv *il, struct il_rxon_context *ctx)
+{
+	int i;
+	unsigned long flags_spin;
+	bool cleared = false;
+
+	D_INFO("Clearing ucode stations in driver\n");
+
+	spin_lock_irqsave(&il->sta_lock, flags_spin);
+	for (i = 0; i < il->hw_params.max_stations; i++) {
+		if (ctx && ctx->ctxid != il->stations[i].ctxid)
+			continue;
+
+		if (il->stations[i].used & IL_STA_UCODE_ACTIVE) {
+			D_INFO("Clearing ucode active for station %d\n", i);
+			il->stations[i].used &= ~IL_STA_UCODE_ACTIVE;
+			cleared = true;
+		}
+	}
+	spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+
+	if (!cleared)
+		D_INFO("No active stations found to be cleared\n");
+}
+EXPORT_SYMBOL(il_clear_ucode_stations);
+
+/**
+ * il_restore_stations() - Restore driver known stations to device
+ *
+ * All stations considered active by driver, but not present in ucode, is
+ * restored.
+ *
+ * Function sleeps.
+ */
+void
+il_restore_stations(struct il_priv *il, struct il_rxon_context *ctx)
+{
+	struct il_addsta_cmd sta_cmd;
+	struct il_link_quality_cmd lq;
+	unsigned long flags_spin;
+	int i;
+	bool found = false;
+	int ret;
+	bool send_lq;
+
+	if (!il_is_ready(il)) {
+		D_INFO("Not ready yet, not restoring any stations.\n");
+		return;
+	}
+
+	D_ASSOC("Restoring all known stations ... start.\n");
+	spin_lock_irqsave(&il->sta_lock, flags_spin);
+	for (i = 0; i < il->hw_params.max_stations; i++) {
+		if (ctx->ctxid != il->stations[i].ctxid)
+			continue;
+		if ((il->stations[i].used & IL_STA_DRIVER_ACTIVE) &&
+		    !(il->stations[i].used & IL_STA_UCODE_ACTIVE)) {
+			D_ASSOC("Restoring sta %pM\n",
+				il->stations[i].sta.sta.addr);
+			il->stations[i].sta.mode = 0;
+			il->stations[i].used |= IL_STA_UCODE_INPROGRESS;
+			found = true;
+		}
+	}
+
+	for (i = 0; i < il->hw_params.max_stations; i++) {
+		if ((il->stations[i].used & IL_STA_UCODE_INPROGRESS)) {
+			memcpy(&sta_cmd, &il->stations[i].sta,
+			       sizeof(struct il_addsta_cmd));
+			send_lq = false;
+			if (il->stations[i].lq) {
+				memcpy(&lq, il->stations[i].lq,
+				       sizeof(struct il_link_quality_cmd));
+				send_lq = true;
+			}
+			spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+			ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC);
+			if (ret) {
+				spin_lock_irqsave(&il->sta_lock, flags_spin);
+				IL_ERR("Adding station %pM failed.\n",
+				       il->stations[i].sta.sta.addr);
+				il->stations[i].used &= ~IL_STA_DRIVER_ACTIVE;
+				il->stations[i].used &=
+				    ~IL_STA_UCODE_INPROGRESS;
+				spin_unlock_irqrestore(&il->sta_lock,
+						       flags_spin);
+			}
+			/*
+			 * Rate scaling has already been initialized, send
+			 * current LQ command
+			 */
+			if (send_lq)
+				il_send_lq_cmd(il, ctx, &lq, CMD_SYNC, true);
+			spin_lock_irqsave(&il->sta_lock, flags_spin);
+			il->stations[i].used &= ~IL_STA_UCODE_INPROGRESS;
+		}
+	}
+
+	spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+	if (!found)
+		D_INFO("Restoring all known stations"
+		       " .... no stations to be restored.\n");
+	else
+		D_INFO("Restoring all known stations" " .... complete.\n");
+}
+EXPORT_SYMBOL(il_restore_stations);
+
+int
+il_get_free_ucode_key_idx(struct il_priv *il)
+{
+	int i;
+
+	for (i = 0; i < il->sta_key_max_num; i++)
+		if (!test_and_set_bit(i, &il->ucode_key_table))
+			return i;
+
+	return WEP_INVALID_OFFSET;
+}
+EXPORT_SYMBOL(il_get_free_ucode_key_idx);
+
+void
+il_dealloc_bcast_stations(struct il_priv *il)
+{
+	unsigned long flags;
+	int i;
+
+	spin_lock_irqsave(&il->sta_lock, flags);
+	for (i = 0; i < il->hw_params.max_stations; i++) {
+		if (!(il->stations[i].used & IL_STA_BCAST))
+			continue;
+
+		il->stations[i].used &= ~IL_STA_UCODE_ACTIVE;
+		il->num_stations--;
+		BUG_ON(il->num_stations < 0);
+		kfree(il->stations[i].lq);
+		il->stations[i].lq = NULL;
+	}
+	spin_unlock_irqrestore(&il->sta_lock, flags);
+}
+EXPORT_SYMBOL_GPL(il_dealloc_bcast_stations);
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+static void
+il_dump_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq)
+{
+	int i;
+	D_RATE("lq station id 0x%x\n", lq->sta_id);
+	D_RATE("lq ant 0x%X 0x%X\n", lq->general_params.single_stream_ant_msk,
+	       lq->general_params.dual_stream_ant_msk);
+
+	for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
+		D_RATE("lq idx %d 0x%X\n", i, lq->rs_table[i].rate_n_flags);
+}
+#else
+static inline void
+il_dump_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq)
+{
+}
+#endif
+
+/**
+ * il_is_lq_table_valid() - Test one aspect of LQ cmd for validity
+ *
+ * It sometimes happens when a HT rate has been in use and we
+ * loose connectivity with AP then mac80211 will first tell us that the
+ * current channel is not HT anymore before removing the station. In such a
+ * scenario the RXON flags will be updated to indicate we are not
+ * communicating HT anymore, but the LQ command may still contain HT rates.
+ * Test for this to prevent driver from sending LQ command between the time
+ * RXON flags are updated and when LQ command is updated.
+ */
+static bool
+il_is_lq_table_valid(struct il_priv *il, struct il_rxon_context *ctx,
+		     struct il_link_quality_cmd *lq)
+{
+	int i;
+
+	if (ctx->ht.enabled)
+		return true;
+
+	D_INFO("Channel %u is not an HT channel\n", ctx->active.channel);
+	for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
+		if (le32_to_cpu(lq->rs_table[i].rate_n_flags) & RATE_MCS_HT_MSK) {
+			D_INFO("idx %d of LQ expects HT channel\n", i);
+			return false;
+		}
+	}
+	return true;
+}
+
+/**
+ * il_send_lq_cmd() - Send link quality command
+ * @init: This command is sent as part of station initialization right
+ *        after station has been added.
+ *
+ * The link quality command is sent as the last step of station creation.
+ * This is the special case in which init is set and we call a callback in
+ * this case to clear the state indicating that station creation is in
+ * progress.
+ */
+int
+il_send_lq_cmd(struct il_priv *il, struct il_rxon_context *ctx,
+	       struct il_link_quality_cmd *lq, u8 flags, bool init)
+{
+	int ret = 0;
+	unsigned long flags_spin;
+
+	struct il_host_cmd cmd = {
+		.id = C_TX_LINK_QUALITY_CMD,
+		.len = sizeof(struct il_link_quality_cmd),
+		.flags = flags,
+		.data = lq,
+	};
+
+	if (WARN_ON(lq->sta_id == IL_INVALID_STATION))
+		return -EINVAL;
+
+	spin_lock_irqsave(&il->sta_lock, flags_spin);
+	if (!(il->stations[lq->sta_id].used & IL_STA_DRIVER_ACTIVE)) {
+		spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+		return -EINVAL;
+	}
+	spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+
+	il_dump_lq_cmd(il, lq);
+	BUG_ON(init && (cmd.flags & CMD_ASYNC));
+
+	if (il_is_lq_table_valid(il, ctx, lq))
+		ret = il_send_cmd(il, &cmd);
+	else
+		ret = -EINVAL;
+
+	if (cmd.flags & CMD_ASYNC)
+		return ret;
+
+	if (init) {
+		D_INFO("init LQ command complete,"
+		       " clearing sta addition status for sta %d\n",
+		       lq->sta_id);
+		spin_lock_irqsave(&il->sta_lock, flags_spin);
+		il->stations[lq->sta_id].used &= ~IL_STA_UCODE_INPROGRESS;
+		spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+	}
+	return ret;
+}
+EXPORT_SYMBOL(il_send_lq_cmd);
+
+int
+il_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		  struct ieee80211_sta *sta)
+{
+	struct il_priv *il = hw->priv;
+	struct il_station_priv_common *sta_common = (void *)sta->drv_priv;
+	int ret;
+
+	D_INFO("received request to remove station %pM\n", sta->addr);
+	mutex_lock(&il->mutex);
+	D_INFO("proceeding to remove station %pM\n", sta->addr);
+	ret = il_remove_station(il, sta_common->sta_id, sta->addr);
+	if (ret)
+		IL_ERR("Error removing station %pM\n", sta->addr);
+	mutex_unlock(&il->mutex);
+	return ret;
+}
+EXPORT_SYMBOL(il_mac_sta_remove);
+
+/************************** RX-FUNCTIONS ****************************/
+/*
+ * Rx theory of operation
+ *
+ * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
+ * each of which point to Receive Buffers to be filled by the NIC.  These get
+ * used not only for Rx frames, but for any command response or notification
+ * from the NIC.  The driver and NIC manage the Rx buffers by means
+ * of idxes into the circular buffer.
+ *
+ * Rx Queue Indexes
+ * The host/firmware share two idx registers for managing the Rx buffers.
+ *
+ * The READ idx maps to the first position that the firmware may be writing
+ * to -- the driver can read up to (but not including) this position and get
+ * good data.
+ * The READ idx is managed by the firmware once the card is enabled.
+ *
+ * The WRITE idx maps to the last position the driver has read from -- the
+ * position preceding WRITE is the last slot the firmware can place a packet.
+ *
+ * The queue is empty (no good data) if WRITE = READ - 1, and is full if
+ * WRITE = READ.
+ *
+ * During initialization, the host sets up the READ queue position to the first
+ * IDX position, and WRITE to the last (READ - 1 wrapped)
+ *
+ * When the firmware places a packet in a buffer, it will advance the READ idx
+ * and fire the RX interrupt.  The driver can then query the READ idx and
+ * process as many packets as possible, moving the WRITE idx forward as it
+ * resets the Rx queue buffers with new memory.
+ *
+ * The management in the driver is as follows:
+ * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free.  When
+ *   iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
+ *   to replenish the iwl->rxq->rx_free.
+ * + In il_rx_replenish (scheduled) if 'processed' != 'read' then the
+ *   iwl->rxq is replenished and the READ IDX is updated (updating the
+ *   'processed' and 'read' driver idxes as well)
+ * + A received packet is processed and handed to the kernel network stack,
+ *   detached from the iwl->rxq.  The driver 'processed' idx is updated.
+ * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
+ *   list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
+ *   IDX is not incremented and iwl->status(RX_STALLED) is set.  If there
+ *   were enough free buffers and RX_STALLED is set it is cleared.
+ *
+ *
+ * Driver sequence:
+ *
+ * il_rx_queue_alloc()   Allocates rx_free
+ * il_rx_replenish()     Replenishes rx_free list from rx_used, and calls
+ *                            il_rx_queue_restock
+ * il_rx_queue_restock() Moves available buffers from rx_free into Rx
+ *                            queue, updates firmware pointers, and updates
+ *                            the WRITE idx.  If insufficient rx_free buffers
+ *                            are available, schedules il_rx_replenish
+ *
+ * -- enable interrupts --
+ * ISR - il_rx()         Detach il_rx_bufs from pool up to the
+ *                            READ IDX, detaching the SKB from the pool.
+ *                            Moves the packet buffer from queue to rx_used.
+ *                            Calls il_rx_queue_restock to refill any empty
+ *                            slots.
+ * ...
+ *
+ */
+
+/**
+ * il_rx_queue_space - Return number of free slots available in queue.
+ */
+int
+il_rx_queue_space(const struct il_rx_queue *q)
+{
+	int s = q->read - q->write;
+	if (s <= 0)
+		s += RX_QUEUE_SIZE;
+	/* keep some buffer to not confuse full and empty queue */
+	s -= 2;
+	if (s < 0)
+		s = 0;
+	return s;
+}
+EXPORT_SYMBOL(il_rx_queue_space);
+
+/**
+ * il_rx_queue_update_write_ptr - Update the write pointer for the RX queue
+ */
+void
+il_rx_queue_update_write_ptr(struct il_priv *il, struct il_rx_queue *q)
+{
+	unsigned long flags;
+	u32 rx_wrt_ptr_reg = il->hw_params.rx_wrt_ptr_reg;
+	u32 reg;
+
+	spin_lock_irqsave(&q->lock, flags);
+
+	if (q->need_update == 0)
+		goto exit_unlock;
+
+	/* If power-saving is in use, make sure device is awake */
+	if (test_bit(S_POWER_PMI, &il->status)) {
+		reg = _il_rd(il, CSR_UCODE_DRV_GP1);
+
+		if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
+			D_INFO("Rx queue requesting wakeup," " GP1 = 0x%x\n",
+			       reg);
+			il_set_bit(il, CSR_GP_CNTRL,
+				   CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+			goto exit_unlock;
+		}
+
+		q->write_actual = (q->write & ~0x7);
+		il_wr(il, rx_wrt_ptr_reg, q->write_actual);
+
+		/* Else device is assumed to be awake */
+	} else {
+		/* Device expects a multiple of 8 */
+		q->write_actual = (q->write & ~0x7);
+		il_wr(il, rx_wrt_ptr_reg, q->write_actual);
+	}
+
+	q->need_update = 0;
+
+exit_unlock:
+	spin_unlock_irqrestore(&q->lock, flags);
+}
+EXPORT_SYMBOL(il_rx_queue_update_write_ptr);
+
+int
+il_rx_queue_alloc(struct il_priv *il)
+{
+	struct il_rx_queue *rxq = &il->rxq;
+	struct device *dev = &il->pci_dev->dev;
+	int i;
+
+	spin_lock_init(&rxq->lock);
+	INIT_LIST_HEAD(&rxq->rx_free);
+	INIT_LIST_HEAD(&rxq->rx_used);
+
+	/* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
+	rxq->bd =
+	    dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma,
+			       GFP_KERNEL);
+	if (!rxq->bd)
+		goto err_bd;
+
+	rxq->rb_stts =
+	    dma_alloc_coherent(dev, sizeof(struct il_rb_status),
+			       &rxq->rb_stts_dma, GFP_KERNEL);
+	if (!rxq->rb_stts)
+		goto err_rb;
+
+	/* Fill the rx_used queue with _all_ of the Rx buffers */
+	for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
+		list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
+
+	/* Set us so that we have processed and used all buffers, but have
+	 * not restocked the Rx queue with fresh buffers */
+	rxq->read = rxq->write = 0;
+	rxq->write_actual = 0;
+	rxq->free_count = 0;
+	rxq->need_update = 0;
+	return 0;
+
+err_rb:
+	dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
+			  rxq->bd_dma);
+err_bd:
+	return -ENOMEM;
+}
+EXPORT_SYMBOL(il_rx_queue_alloc);
+
+void
+il_hdl_spectrum_measurement(struct il_priv *il, struct il_rx_buf *rxb)
+{
+	struct il_rx_pkt *pkt = rxb_addr(rxb);
+	struct il_spectrum_notification *report = &(pkt->u.spectrum_notif);
+
+	if (!report->state) {
+		D_11H("Spectrum Measure Notification: Start\n");
+		return;
+	}
+
+	memcpy(&il->measure_report, report, sizeof(*report));
+	il->measurement_status |= MEASUREMENT_READY;
+}
+EXPORT_SYMBOL(il_hdl_spectrum_measurement);
+
+/*
+ * returns non-zero if packet should be dropped
+ */
+int
+il_set_decrypted_flag(struct il_priv *il, struct ieee80211_hdr *hdr,
+		      u32 decrypt_res, struct ieee80211_rx_status *stats)
+{
+	u16 fc = le16_to_cpu(hdr->frame_control);
+
+	/*
+	 * All contexts have the same setting here due to it being
+	 * a module parameter, so OK to check any context.
+	 */
+	if (il->ctx.active.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK)
+		return 0;
+
+	if (!(fc & IEEE80211_FCTL_PROTECTED))
+		return 0;
+
+	D_RX("decrypt_res:0x%x\n", decrypt_res);
+	switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
+	case RX_RES_STATUS_SEC_TYPE_TKIP:
+		/* The uCode has got a bad phase 1 Key, pushes the packet.
+		 * Decryption will be done in SW. */
+		if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
+		    RX_RES_STATUS_BAD_KEY_TTAK)
+			break;
+
+	case RX_RES_STATUS_SEC_TYPE_WEP:
+		if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
+		    RX_RES_STATUS_BAD_ICV_MIC) {
+			/* bad ICV, the packet is destroyed since the
+			 * decryption is inplace, drop it */
+			D_RX("Packet destroyed\n");
+			return -1;
+		}
+	case RX_RES_STATUS_SEC_TYPE_CCMP:
+		if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
+		    RX_RES_STATUS_DECRYPT_OK) {
+			D_RX("hw decrypt successfully!!!\n");
+			stats->flag |= RX_FLAG_DECRYPTED;
+		}
+		break;
+
+	default:
+		break;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(il_set_decrypted_flag);
+
+/**
+ * il_txq_update_write_ptr - Send new write idx to hardware
+ */
+void
+il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq)
+{
+	u32 reg = 0;
+	int txq_id = txq->q.id;
+
+	if (txq->need_update == 0)
+		return;
+
+	/* if we're trying to save power */
+	if (test_bit(S_POWER_PMI, &il->status)) {
+		/* wake up nic if it's powered down ...
+		 * uCode will wake up, and interrupt us again, so next
+		 * time we'll skip this part. */
+		reg = _il_rd(il, CSR_UCODE_DRV_GP1);
+
+		if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
+			D_INFO("Tx queue %d requesting wakeup," " GP1 = 0x%x\n",
+			       txq_id, reg);
+			il_set_bit(il, CSR_GP_CNTRL,
+				   CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+			return;
+		}
+
+		il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
+
+		/*
+		 * else not in power-save mode,
+		 * uCode will never sleep when we're
+		 * trying to tx (during RFKILL, we're not trying to tx).
+		 */
+	} else
+		_il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
+	txq->need_update = 0;
+}
+EXPORT_SYMBOL(il_txq_update_write_ptr);
+
+/**
+ * il_tx_queue_unmap -  Unmap any remaining DMA mappings and free skb's
+ */
+void
+il_tx_queue_unmap(struct il_priv *il, int txq_id)
+{
+	struct il_tx_queue *txq = &il->txq[txq_id];
+	struct il_queue *q = &txq->q;
+
+	if (q->n_bd == 0)
+		return;
+
+	while (q->write_ptr != q->read_ptr) {
+		il->cfg->ops->lib->txq_free_tfd(il, txq);
+		q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd);
+	}
+}
+EXPORT_SYMBOL(il_tx_queue_unmap);
+
+/**
+ * il_tx_queue_free - Deallocate DMA queue.
+ * @txq: Transmit queue to deallocate.
+ *
+ * Empty queue by removing and destroying all BD's.
+ * Free all buffers.
+ * 0-fill, but do not free "txq" descriptor structure.
+ */
+void
+il_tx_queue_free(struct il_priv *il, int txq_id)
+{
+	struct il_tx_queue *txq = &il->txq[txq_id];
+	struct device *dev = &il->pci_dev->dev;
+	int i;
+
+	il_tx_queue_unmap(il, txq_id);
+
+	/* De-alloc array of command/tx buffers */
+	for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
+		kfree(txq->cmd[i]);
+
+	/* De-alloc circular buffer of TFDs */
+	if (txq->q.n_bd)
+		dma_free_coherent(dev, il->hw_params.tfd_size * txq->q.n_bd,
+				  txq->tfds, txq->q.dma_addr);
+
+	/* De-alloc array of per-TFD driver data */
+	kfree(txq->txb);
+	txq->txb = NULL;
+
+	/* deallocate arrays */
+	kfree(txq->cmd);
+	kfree(txq->meta);
+	txq->cmd = NULL;
+	txq->meta = NULL;
+
+	/* 0-fill queue descriptor structure */
+	memset(txq, 0, sizeof(*txq));
+}
+EXPORT_SYMBOL(il_tx_queue_free);
+
+/**
+ * il_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue
+ */
+void
+il_cmd_queue_unmap(struct il_priv *il)
+{
+	struct il_tx_queue *txq = &il->txq[il->cmd_queue];
+	struct il_queue *q = &txq->q;
+	int i;
+
+	if (q->n_bd == 0)
+		return;
+
+	while (q->read_ptr != q->write_ptr) {
+		i = il_get_cmd_idx(q, q->read_ptr, 0);
+
+		if (txq->meta[i].flags & CMD_MAPPED) {
+			pci_unmap_single(il->pci_dev,
+					 dma_unmap_addr(&txq->meta[i], mapping),
+					 dma_unmap_len(&txq->meta[i], len),
+					 PCI_DMA_BIDIRECTIONAL);
+			txq->meta[i].flags = 0;
+		}
+
+		q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd);
+	}
+
+	i = q->n_win;
+	if (txq->meta[i].flags & CMD_MAPPED) {
+		pci_unmap_single(il->pci_dev,
+				 dma_unmap_addr(&txq->meta[i], mapping),
+				 dma_unmap_len(&txq->meta[i], len),
+				 PCI_DMA_BIDIRECTIONAL);
+		txq->meta[i].flags = 0;
+	}
+}
+EXPORT_SYMBOL(il_cmd_queue_unmap);
+
+/**
+ * il_cmd_queue_free - Deallocate DMA queue.
+ * @txq: Transmit queue to deallocate.
+ *
+ * Empty queue by removing and destroying all BD's.
+ * Free all buffers.
+ * 0-fill, but do not free "txq" descriptor structure.
+ */
+void
+il_cmd_queue_free(struct il_priv *il)
+{
+	struct il_tx_queue *txq = &il->txq[il->cmd_queue];
+	struct device *dev = &il->pci_dev->dev;
+	int i;
+
+	il_cmd_queue_unmap(il);
+
+	/* De-alloc array of command/tx buffers */
+	for (i = 0; i <= TFD_CMD_SLOTS; i++)
+		kfree(txq->cmd[i]);
+
+	/* De-alloc circular buffer of TFDs */
+	if (txq->q.n_bd)
+		dma_free_coherent(dev, il->hw_params.tfd_size * txq->q.n_bd,
+				  txq->tfds, txq->q.dma_addr);
+
+	/* deallocate arrays */
+	kfree(txq->cmd);
+	kfree(txq->meta);
+	txq->cmd = NULL;
+	txq->meta = NULL;
+
+	/* 0-fill queue descriptor structure */
+	memset(txq, 0, sizeof(*txq));
+}
+EXPORT_SYMBOL(il_cmd_queue_free);
+
+/*************** DMA-QUEUE-GENERAL-FUNCTIONS  *****
+ * DMA services
+ *
+ * Theory of operation
+ *
+ * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
+ * of buffer descriptors, each of which points to one or more data buffers for
+ * the device to read from or fill.  Driver and device exchange status of each
+ * queue via "read" and "write" pointers.  Driver keeps minimum of 2 empty
+ * entries in each circular buffer, to protect against confusing empty and full
+ * queue states.
+ *
+ * The device reads or writes the data in the queues via the device's several
+ * DMA/FIFO channels.  Each queue is mapped to a single DMA channel.
+ *
+ * For Tx queue, there are low mark and high mark limits. If, after queuing
+ * the packet for Tx, free space become < low mark, Tx queue stopped. When
+ * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
+ * Tx queue resumed.
+ *
+ * See more detailed info in 4965.h.
+ ***************************************************/
+
+int
+il_queue_space(const struct il_queue *q)
+{
+	int s = q->read_ptr - q->write_ptr;
+
+	if (q->read_ptr > q->write_ptr)
+		s -= q->n_bd;
+
+	if (s <= 0)
+		s += q->n_win;
+	/* keep some reserve to not confuse empty and full situations */
+	s -= 2;
+	if (s < 0)
+		s = 0;
+	return s;
+}
+EXPORT_SYMBOL(il_queue_space);
+
+
+/**
+ * il_queue_init - Initialize queue's high/low-water and read/write idxes
+ */
+static int
+il_queue_init(struct il_priv *il, struct il_queue *q, int count, int slots_num,
+	      u32 id)
+{
+	q->n_bd = count;
+	q->n_win = slots_num;
+	q->id = id;
+
+	/* count must be power-of-two size, otherwise il_queue_inc_wrap
+	 * and il_queue_dec_wrap are broken. */
+	BUG_ON(!is_power_of_2(count));
+
+	/* slots_num must be power-of-two size, otherwise
+	 * il_get_cmd_idx is broken. */
+	BUG_ON(!is_power_of_2(slots_num));
+
+	q->low_mark = q->n_win / 4;
+	if (q->low_mark < 4)
+		q->low_mark = 4;
+
+	q->high_mark = q->n_win / 8;
+	if (q->high_mark < 2)
+		q->high_mark = 2;
+
+	q->write_ptr = q->read_ptr = 0;
+
+	return 0;
+}
+
+/**
+ * il_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
+ */
+static int
+il_tx_queue_alloc(struct il_priv *il, struct il_tx_queue *txq, u32 id)
+{
+	struct device *dev = &il->pci_dev->dev;
+	size_t tfd_sz = il->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
+
+	/* Driver ilate data, only for Tx (not command) queues,
+	 * not shared with device. */
+	if (id != il->cmd_queue) {
+		txq->txb = kcalloc(TFD_QUEUE_SIZE_MAX, sizeof(txq->txb[0]),
+				   GFP_KERNEL);
+		if (!txq->txb) {
+			IL_ERR("kmalloc for auxiliary BD "
+			       "structures failed\n");
+			goto error;
+		}
+	} else {
+		txq->txb = NULL;
+	}
+
+	/* Circular buffer of transmit frame descriptors (TFDs),
+	 * shared with device */
+	txq->tfds =
+	    dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr, GFP_KERNEL);
+	if (!txq->tfds) {
+		IL_ERR("pci_alloc_consistent(%zd) failed\n", tfd_sz);
+		goto error;
+	}
+	txq->q.id = id;
+
+	return 0;
+
+error:
+	kfree(txq->txb);
+	txq->txb = NULL;
+
+	return -ENOMEM;
+}
+
+/**
+ * il_tx_queue_init - Allocate and initialize one tx/cmd queue
+ */
+int
+il_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq, int slots_num,
+		 u32 txq_id)
+{
+	int i, len;
+	int ret;
+	int actual_slots = slots_num;
+
+	/*
+	 * Alloc buffer array for commands (Tx or other types of commands).
+	 * For the command queue (#4/#9), allocate command space + one big
+	 * command for scan, since scan command is very huge; the system will
+	 * not have two scans at the same time, so only one is needed.
+	 * For normal Tx queues (all other queues), no super-size command
+	 * space is needed.
+	 */
+	if (txq_id == il->cmd_queue)
+		actual_slots++;
+
+	txq->meta =
+	    kzalloc(sizeof(struct il_cmd_meta) * actual_slots, GFP_KERNEL);
+	txq->cmd =
+	    kzalloc(sizeof(struct il_device_cmd *) * actual_slots, GFP_KERNEL);
+
+	if (!txq->meta || !txq->cmd)
+		goto out_free_arrays;
+
+	len = sizeof(struct il_device_cmd);
+	for (i = 0; i < actual_slots; i++) {
+		/* only happens for cmd queue */
+		if (i == slots_num)
+			len = IL_MAX_CMD_SIZE;
+
+		txq->cmd[i] = kmalloc(len, GFP_KERNEL);
+		if (!txq->cmd[i])
+			goto err;
+	}
+
+	/* Alloc driver data array and TFD circular buffer */
+	ret = il_tx_queue_alloc(il, txq, txq_id);
+	if (ret)
+		goto err;
+
+	txq->need_update = 0;
+
+	/*
+	 * For the default queues 0-3, set up the swq_id
+	 * already -- all others need to get one later
+	 * (if they need one at all).
+	 */
+	if (txq_id < 4)
+		il_set_swq_id(txq, txq_id, txq_id);
+
+	/* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
+	 * il_queue_inc_wrap and il_queue_dec_wrap are broken. */
+	BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
+
+	/* Initialize queue's high/low-water marks, and head/tail idxes */
+	il_queue_init(il, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
+
+	/* Tell device where to find queue */
+	il->cfg->ops->lib->txq_init(il, txq);
+
+	return 0;
+err:
+	for (i = 0; i < actual_slots; i++)
+		kfree(txq->cmd[i]);
+out_free_arrays:
+	kfree(txq->meta);
+	kfree(txq->cmd);
+
+	return -ENOMEM;
+}
+EXPORT_SYMBOL(il_tx_queue_init);
+
+void
+il_tx_queue_reset(struct il_priv *il, struct il_tx_queue *txq, int slots_num,
+		  u32 txq_id)
+{
+	int actual_slots = slots_num;
+
+	if (txq_id == il->cmd_queue)
+		actual_slots++;
+
+	memset(txq->meta, 0, sizeof(struct il_cmd_meta) * actual_slots);
+
+	txq->need_update = 0;
+
+	/* Initialize queue's high/low-water marks, and head/tail idxes */
+	il_queue_init(il, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
+
+	/* Tell device where to find queue */
+	il->cfg->ops->lib->txq_init(il, txq);
+}
+EXPORT_SYMBOL(il_tx_queue_reset);
+
+/*************** HOST COMMAND QUEUE FUNCTIONS   *****/
+
+/**
+ * il_enqueue_hcmd - enqueue a uCode command
+ * @il: device ilate data point
+ * @cmd: a point to the ucode command structure
+ *
+ * The function returns < 0 values to indicate the operation is
+ * failed. On success, it turns the idx (> 0) of command in the
+ * command queue.
+ */
+int
+il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd)
+{
+	struct il_tx_queue *txq = &il->txq[il->cmd_queue];
+	struct il_queue *q = &txq->q;
+	struct il_device_cmd *out_cmd;
+	struct il_cmd_meta *out_meta;
+	dma_addr_t phys_addr;
+	unsigned long flags;
+	int len;
+	u32 idx;
+	u16 fix_size;
+
+	cmd->len = il->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
+	fix_size = (u16) (cmd->len + sizeof(out_cmd->hdr));
+
+	/* If any of the command structures end up being larger than
+	 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
+	 * we will need to increase the size of the TFD entries
+	 * Also, check to see if command buffer should not exceed the size
+	 * of device_cmd and max_cmd_size. */
+	BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
+	       !(cmd->flags & CMD_SIZE_HUGE));
+	BUG_ON(fix_size > IL_MAX_CMD_SIZE);
+
+	if (il_is_rfkill(il) || il_is_ctkill(il)) {
+		IL_WARN("Not sending command - %s KILL\n",
+			il_is_rfkill(il) ? "RF" : "CT");
+		return -EIO;
+	}
+
+	spin_lock_irqsave(&il->hcmd_lock, flags);
+
+	if (il_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
+		spin_unlock_irqrestore(&il->hcmd_lock, flags);
+
+		IL_ERR("Restarting adapter due to command queue full\n");
+		queue_work(il->workqueue, &il->restart);
+		return -ENOSPC;
+	}
+
+	idx = il_get_cmd_idx(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE);
+	out_cmd = txq->cmd[idx];
+	out_meta = &txq->meta[idx];
+
+	if (WARN_ON(out_meta->flags & CMD_MAPPED)) {
+		spin_unlock_irqrestore(&il->hcmd_lock, flags);
+		return -ENOSPC;
+	}
+
+	memset(out_meta, 0, sizeof(*out_meta));	/* re-initialize to NULL */
+	out_meta->flags = cmd->flags | CMD_MAPPED;
+	if (cmd->flags & CMD_WANT_SKB)
+		out_meta->source = cmd;
+	if (cmd->flags & CMD_ASYNC)
+		out_meta->callback = cmd->callback;
+
+	out_cmd->hdr.cmd = cmd->id;
+	memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
+
+	/* At this point, the out_cmd now has all of the incoming cmd
+	 * information */
+
+	out_cmd->hdr.flags = 0;
+	out_cmd->hdr.sequence =
+	    cpu_to_le16(QUEUE_TO_SEQ(il->cmd_queue) | IDX_TO_SEQ(q->write_ptr));
+	if (cmd->flags & CMD_SIZE_HUGE)
+		out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
+	len = sizeof(struct il_device_cmd);
+	if (idx == TFD_CMD_SLOTS)
+		len = IL_MAX_CMD_SIZE;
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+	switch (out_cmd->hdr.cmd) {
+	case C_TX_LINK_QUALITY_CMD:
+	case C_SENSITIVITY:
+		D_HC_DUMP("Sending command %s (#%x), seq: 0x%04X, "
+			  "%d bytes at %d[%d]:%d\n",
+			  il_get_cmd_string(out_cmd->hdr.cmd), out_cmd->hdr.cmd,
+			  le16_to_cpu(out_cmd->hdr.sequence), fix_size,
+			  q->write_ptr, idx, il->cmd_queue);
+		break;
+	default:
+		D_HC("Sending command %s (#%x), seq: 0x%04X, "
+		     "%d bytes at %d[%d]:%d\n",
+		     il_get_cmd_string(out_cmd->hdr.cmd), out_cmd->hdr.cmd,
+		     le16_to_cpu(out_cmd->hdr.sequence), fix_size, q->write_ptr,
+		     idx, il->cmd_queue);
+	}
+#endif
+	txq->need_update = 1;
+
+	if (il->cfg->ops->lib->txq_update_byte_cnt_tbl)
+		/* Set up entry in queue's byte count circular buffer */
+		il->cfg->ops->lib->txq_update_byte_cnt_tbl(il, txq, 0);
+
+	phys_addr =
+	    pci_map_single(il->pci_dev, &out_cmd->hdr, fix_size,
+			   PCI_DMA_BIDIRECTIONAL);
+	dma_unmap_addr_set(out_meta, mapping, phys_addr);
+	dma_unmap_len_set(out_meta, len, fix_size);
+
+	il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, phys_addr, fix_size,
+						 1, U32_PAD(cmd->len));
+
+	/* Increment and update queue's write idx */
+	q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
+	il_txq_update_write_ptr(il, txq);
+
+	spin_unlock_irqrestore(&il->hcmd_lock, flags);
+	return idx;
+}
+
+/**
+ * il_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
+ *
+ * When FW advances 'R' idx, all entries between old and new 'R' idx
+ * need to be reclaimed. As result, some free space forms.  If there is
+ * enough free space (> low mark), wake the stack that feeds us.
+ */
+static void
+il_hcmd_queue_reclaim(struct il_priv *il, int txq_id, int idx, int cmd_idx)
+{
+	struct il_tx_queue *txq = &il->txq[txq_id];
+	struct il_queue *q = &txq->q;
+	int nfreed = 0;
+
+	if (idx >= q->n_bd || il_queue_used(q, idx) == 0) {
+		IL_ERR("Read idx for DMA queue txq id (%d), idx %d, "
+		       "is out of range [0-%d] %d %d.\n", txq_id, idx, q->n_bd,
+		       q->write_ptr, q->read_ptr);
+		return;
+	}
+
+	for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
+	     q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+
+		if (nfreed++ > 0) {
+			IL_ERR("HCMD skipped: idx (%d) %d %d\n", idx,
+			       q->write_ptr, q->read_ptr);
+			queue_work(il->workqueue, &il->restart);
+		}
+
+	}
+}
+
+/**
+ * il_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
+ * @rxb: Rx buffer to reclaim
+ *
+ * If an Rx buffer has an async callback associated with it the callback
+ * will be executed.  The attached skb (if present) will only be freed
+ * if the callback returns 1
+ */
+void
+il_tx_cmd_complete(struct il_priv *il, struct il_rx_buf *rxb)
+{
+	struct il_rx_pkt *pkt = rxb_addr(rxb);
+	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
+	int txq_id = SEQ_TO_QUEUE(sequence);
+	int idx = SEQ_TO_IDX(sequence);
+	int cmd_idx;
+	bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
+	struct il_device_cmd *cmd;
+	struct il_cmd_meta *meta;
+	struct il_tx_queue *txq = &il->txq[il->cmd_queue];
+	unsigned long flags;
+
+	/* If a Tx command is being handled and it isn't in the actual
+	 * command queue then there a command routing bug has been introduced
+	 * in the queue management code. */
+	if (WARN
+	    (txq_id != il->cmd_queue,
+	     "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
+	     txq_id, il->cmd_queue, sequence, il->txq[il->cmd_queue].q.read_ptr,
+	     il->txq[il->cmd_queue].q.write_ptr)) {
+		il_print_hex_error(il, pkt, 32);
+		return;
+	}
+
+	cmd_idx = il_get_cmd_idx(&txq->q, idx, huge);
+	cmd = txq->cmd[cmd_idx];
+	meta = &txq->meta[cmd_idx];
+
+	txq->time_stamp = jiffies;
+
+	pci_unmap_single(il->pci_dev, dma_unmap_addr(meta, mapping),
+			 dma_unmap_len(meta, len), PCI_DMA_BIDIRECTIONAL);
+
+	/* Input error checking is done when commands are added to queue. */
+	if (meta->flags & CMD_WANT_SKB) {
+		meta->source->reply_page = (unsigned long)rxb_addr(rxb);
+		rxb->page = NULL;
+	} else if (meta->callback)
+		meta->callback(il, cmd, pkt);
+
+	spin_lock_irqsave(&il->hcmd_lock, flags);
+
+	il_hcmd_queue_reclaim(il, txq_id, idx, cmd_idx);
+
+	if (!(meta->flags & CMD_ASYNC)) {
+		clear_bit(S_HCMD_ACTIVE, &il->status);
+		D_INFO("Clearing HCMD_ACTIVE for command %s\n",
+		       il_get_cmd_string(cmd->hdr.cmd));
+		wake_up(&il->wait_command_queue);
+	}
+
+	/* Mark as unmapped */
+	meta->flags = 0;
+
+	spin_unlock_irqrestore(&il->hcmd_lock, flags);
+}
+EXPORT_SYMBOL(il_tx_cmd_complete);
+
+MODULE_DESCRIPTION("iwl-legacy: common functions for 3945 and 4965");
+MODULE_VERSION(IWLWIFI_VERSION);
+MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
+MODULE_LICENSE("GPL");
+
+/*
+ * set bt_coex_active to true, uCode will do kill/defer
+ * every time the priority line is asserted (BT is sending signals on the
+ * priority line in the PCIx).
+ * set bt_coex_active to false, uCode will ignore the BT activity and
+ * perform the normal operation
+ *
+ * User might experience transmit issue on some platform due to WiFi/BT
+ * co-exist problem. The possible behaviors are:
+ *   Able to scan and finding all the available AP
+ *   Not able to associate with any AP
+ * On those platforms, WiFi communication can be restored by set
+ * "bt_coex_active" module parameter to "false"
+ *
+ * default: bt_coex_active = true (BT_COEX_ENABLE)
+ */
+static bool bt_coex_active = true;
+module_param(bt_coex_active, bool, S_IRUGO);
+MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist");
+
+u32 il_debug_level;
+EXPORT_SYMBOL(il_debug_level);
+
+const u8 il_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+EXPORT_SYMBOL(il_bcast_addr);
+
+/* This function both allocates and initializes hw and il. */
+struct ieee80211_hw *
+il_alloc_all(struct il_cfg *cfg)
+{
+	struct il_priv *il;
+	/* mac80211 allocates memory for this device instance, including
+	 *   space for this driver's ilate structure */
+	struct ieee80211_hw *hw;
+
+	hw = ieee80211_alloc_hw(sizeof(struct il_priv),
+				cfg->ops->ieee80211_ops);
+	if (hw == NULL) {
+		pr_err("%s: Can not allocate network device\n", cfg->name);
+		goto out;
+	}
+
+	il = hw->priv;
+	il->hw = hw;
+
+out:
+	return hw;
+}
+EXPORT_SYMBOL(il_alloc_all);
+
+#define MAX_BIT_RATE_40_MHZ 150	/* Mbps */
+#define MAX_BIT_RATE_20_MHZ 72	/* Mbps */
+static void
+il_init_ht_hw_capab(const struct il_priv *il,
+		    struct ieee80211_sta_ht_cap *ht_info,
+		    enum ieee80211_band band)
+{
+	u16 max_bit_rate = 0;
+	u8 rx_chains_num = il->hw_params.rx_chains_num;
+	u8 tx_chains_num = il->hw_params.tx_chains_num;
+
+	ht_info->cap = 0;
+	memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
+
+	ht_info->ht_supported = true;
+
+	ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
+	max_bit_rate = MAX_BIT_RATE_20_MHZ;
+	if (il->hw_params.ht40_channel & BIT(band)) {
+		ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+		ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
+		ht_info->mcs.rx_mask[4] = 0x01;
+		max_bit_rate = MAX_BIT_RATE_40_MHZ;
+	}
+
+	if (il->cfg->mod_params->amsdu_size_8K)
+		ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
+
+	ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
+	ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
+
+	ht_info->mcs.rx_mask[0] = 0xFF;
+	if (rx_chains_num >= 2)
+		ht_info->mcs.rx_mask[1] = 0xFF;
+	if (rx_chains_num >= 3)
+		ht_info->mcs.rx_mask[2] = 0xFF;
+
+	/* Highest supported Rx data rate */
+	max_bit_rate *= rx_chains_num;
+	WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
+	ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
+
+	/* Tx MCS capabilities */
+	ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+	if (tx_chains_num != rx_chains_num) {
+		ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
+		ht_info->mcs.tx_params |=
+		    ((tx_chains_num -
+		      1) << IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
+	}
+}
+
+/**
+ * il_init_geos - Initialize mac80211's geo/channel info based from eeprom
+ */
+int
+il_init_geos(struct il_priv *il)
+{
+	struct il_channel_info *ch;
+	struct ieee80211_supported_band *sband;
+	struct ieee80211_channel *channels;
+	struct ieee80211_channel *geo_ch;
+	struct ieee80211_rate *rates;
+	int i = 0;
+	s8 max_tx_power = 0;
+
+	if (il->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
+	    il->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
+		D_INFO("Geography modes already initialized.\n");
+		set_bit(S_GEO_CONFIGURED, &il->status);
+		return 0;
+	}
+
+	channels =
+	    kzalloc(sizeof(struct ieee80211_channel) * il->channel_count,
+		    GFP_KERNEL);
+	if (!channels)
+		return -ENOMEM;
+
+	rates =
+	    kzalloc((sizeof(struct ieee80211_rate) * RATE_COUNT_LEGACY),
+		    GFP_KERNEL);
+	if (!rates) {
+		kfree(channels);
+		return -ENOMEM;
+	}
+
+	/* 5.2GHz channels start after the 2.4GHz channels */
+	sband = &il->bands[IEEE80211_BAND_5GHZ];
+	sband->channels = &channels[ARRAY_SIZE(il_eeprom_band_1)];
+	/* just OFDM */
+	sband->bitrates = &rates[IL_FIRST_OFDM_RATE];
+	sband->n_bitrates = RATE_COUNT_LEGACY - IL_FIRST_OFDM_RATE;
+
+	if (il->cfg->sku & IL_SKU_N)
+		il_init_ht_hw_capab(il, &sband->ht_cap, IEEE80211_BAND_5GHZ);
+
+	sband = &il->bands[IEEE80211_BAND_2GHZ];
+	sband->channels = channels;
+	/* OFDM & CCK */
+	sband->bitrates = rates;
+	sband->n_bitrates = RATE_COUNT_LEGACY;
+
+	if (il->cfg->sku & IL_SKU_N)
+		il_init_ht_hw_capab(il, &sband->ht_cap, IEEE80211_BAND_2GHZ);
+
+	il->ieee_channels = channels;
+	il->ieee_rates = rates;
+
+	for (i = 0; i < il->channel_count; i++) {
+		ch = &il->channel_info[i];
+
+		if (!il_is_channel_valid(ch))
+			continue;
+
+		sband = &il->bands[ch->band];
+
+		geo_ch = &sband->channels[sband->n_channels++];
+
+		geo_ch->center_freq =
+		    ieee80211_channel_to_frequency(ch->channel, ch->band);
+		geo_ch->max_power = ch->max_power_avg;
+		geo_ch->max_antenna_gain = 0xff;
+		geo_ch->hw_value = ch->channel;
+
+		if (il_is_channel_valid(ch)) {
+			if (!(ch->flags & EEPROM_CHANNEL_IBSS))
+				geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
+
+			if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
+				geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
+
+			if (ch->flags & EEPROM_CHANNEL_RADAR)
+				geo_ch->flags |= IEEE80211_CHAN_RADAR;
+
+			geo_ch->flags |= ch->ht40_extension_channel;
+
+			if (ch->max_power_avg > max_tx_power)
+				max_tx_power = ch->max_power_avg;
+		} else {
+			geo_ch->flags |= IEEE80211_CHAN_DISABLED;
+		}
+
+		D_INFO("Channel %d Freq=%d[%sGHz] %s flag=0x%X\n", ch->channel,
+		       geo_ch->center_freq,
+		       il_is_channel_a_band(ch) ? "5.2" : "2.4",
+		       geo_ch->
+		       flags & IEEE80211_CHAN_DISABLED ? "restricted" : "valid",
+		       geo_ch->flags);
+	}
+
+	il->tx_power_device_lmt = max_tx_power;
+	il->tx_power_user_lmt = max_tx_power;
+	il->tx_power_next = max_tx_power;
+
+	if (il->bands[IEEE80211_BAND_5GHZ].n_channels == 0 &&
+	    (il->cfg->sku & IL_SKU_A)) {
+		IL_INFO("Incorrectly detected BG card as ABG. "
+			"Please send your PCI ID 0x%04X:0x%04X to maintainer.\n",
+			il->pci_dev->device, il->pci_dev->subsystem_device);
+		il->cfg->sku &= ~IL_SKU_A;
+	}
+
+	IL_INFO("Tunable channels: %d 802.11bg, %d 802.11a channels\n",
+		il->bands[IEEE80211_BAND_2GHZ].n_channels,
+		il->bands[IEEE80211_BAND_5GHZ].n_channels);
+
+	set_bit(S_GEO_CONFIGURED, &il->status);
+
+	return 0;
+}
+EXPORT_SYMBOL(il_init_geos);
+
+/*
+ * il_free_geos - undo allocations in il_init_geos
+ */
+void
+il_free_geos(struct il_priv *il)
+{
+	kfree(il->ieee_channels);
+	kfree(il->ieee_rates);
+	clear_bit(S_GEO_CONFIGURED, &il->status);
+}
+EXPORT_SYMBOL(il_free_geos);
+
+static bool
+il_is_channel_extension(struct il_priv *il, enum ieee80211_band band,
+			u16 channel, u8 extension_chan_offset)
+{
+	const struct il_channel_info *ch_info;
+
+	ch_info = il_get_channel_info(il, band, channel);
+	if (!il_is_channel_valid(ch_info))
+		return false;
+
+	if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
+		return !(ch_info->
+			 ht40_extension_channel & IEEE80211_CHAN_NO_HT40PLUS);
+	else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW)
+		return !(ch_info->
+			 ht40_extension_channel & IEEE80211_CHAN_NO_HT40MINUS);
+
+	return false;
+}
+
+bool
+il_is_ht40_tx_allowed(struct il_priv *il, struct il_rxon_context *ctx,
+		      struct ieee80211_sta_ht_cap *ht_cap)
+{
+	if (!ctx->ht.enabled || !ctx->ht.is_40mhz)
+		return false;
+
+	/*
+	 * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40
+	 * the bit will not set if it is pure 40MHz case
+	 */
+	if (ht_cap && !ht_cap->ht_supported)
+		return false;
+
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+	if (il->disable_ht40)
+		return false;
+#endif
+
+	return il_is_channel_extension(il, il->band,
+				       le16_to_cpu(ctx->staging.channel),
+				       ctx->ht.extension_chan_offset);
+}
+EXPORT_SYMBOL(il_is_ht40_tx_allowed);
+
+static u16
+il_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
+{
+	u16 new_val;
+	u16 beacon_factor;
+
+	/*
+	 * If mac80211 hasn't given us a beacon interval, program
+	 * the default into the device.
+	 */
+	if (!beacon_val)
+		return DEFAULT_BEACON_INTERVAL;
+
+	/*
+	 * If the beacon interval we obtained from the peer
+	 * is too large, we'll have to wake up more often
+	 * (and in IBSS case, we'll beacon too much)
+	 *
+	 * For example, if max_beacon_val is 4096, and the
+	 * requested beacon interval is 7000, we'll have to
+	 * use 3500 to be able to wake up on the beacons.
+	 *
+	 * This could badly influence beacon detection stats.
+	 */
+
+	beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val;
+	new_val = beacon_val / beacon_factor;
+
+	if (!new_val)
+		new_val = max_beacon_val;
+
+	return new_val;
+}
+
+int
+il_send_rxon_timing(struct il_priv *il, struct il_rxon_context *ctx)
+{
+	u64 tsf;
+	s32 interval_tm, rem;
+	struct ieee80211_conf *conf = NULL;
+	u16 beacon_int;
+	struct ieee80211_vif *vif = ctx->vif;
+
+	conf = &il->hw->conf;
+
+	lockdep_assert_held(&il->mutex);
+
+	memset(&ctx->timing, 0, sizeof(struct il_rxon_time_cmd));
+
+	ctx->timing.timestamp = cpu_to_le64(il->timestamp);
+	ctx->timing.listen_interval = cpu_to_le16(conf->listen_interval);
+
+	beacon_int = vif ? vif->bss_conf.beacon_int : 0;
+
+	/*
+	 * TODO: For IBSS we need to get atim_win from mac80211,
+	 *       for now just always use 0
+	 */
+	ctx->timing.atim_win = 0;
+
+	beacon_int =
+	    il_adjust_beacon_interval(beacon_int,
+				      il->hw_params.max_beacon_itrvl *
+				      TIME_UNIT);
+	ctx->timing.beacon_interval = cpu_to_le16(beacon_int);
+
+	tsf = il->timestamp;	/* tsf is modifed by do_div: copy it */
+	interval_tm = beacon_int * TIME_UNIT;
+	rem = do_div(tsf, interval_tm);
+	ctx->timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
+
+	ctx->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ? : 1) : 1;
+
+	D_ASSOC("beacon interval %d beacon timer %d beacon tim %d\n",
+		le16_to_cpu(ctx->timing.beacon_interval),
+		le32_to_cpu(ctx->timing.beacon_init_val),
+		le16_to_cpu(ctx->timing.atim_win));
+
+	return il_send_cmd_pdu(il, ctx->rxon_timing_cmd, sizeof(ctx->timing),
+			       &ctx->timing);
+}
+EXPORT_SYMBOL(il_send_rxon_timing);
+
+void
+il_set_rxon_hwcrypto(struct il_priv *il, struct il_rxon_context *ctx,
+		     int hw_decrypt)
+{
+	struct il_rxon_cmd *rxon = &ctx->staging;
+
+	if (hw_decrypt)
+		rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
+	else
+		rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
+
+}
+EXPORT_SYMBOL(il_set_rxon_hwcrypto);
+
+/* validate RXON structure is valid */
+int
+il_check_rxon_cmd(struct il_priv *il, struct il_rxon_context *ctx)
+{
+	struct il_rxon_cmd *rxon = &ctx->staging;
+	bool error = false;
+
+	if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
+		if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) {
+			IL_WARN("check 2.4G: wrong narrow\n");
+			error = true;
+		}
+		if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) {
+			IL_WARN("check 2.4G: wrong radar\n");
+			error = true;
+		}
+	} else {
+		if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) {
+			IL_WARN("check 5.2G: not short slot!\n");
+			error = true;
+		}
+		if (rxon->flags & RXON_FLG_CCK_MSK) {
+			IL_WARN("check 5.2G: CCK!\n");
+			error = true;
+		}
+	}
+	if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) {
+		IL_WARN("mac/bssid mcast!\n");
+		error = true;
+	}
+
+	/* make sure basic rates 6Mbps and 1Mbps are supported */
+	if ((rxon->ofdm_basic_rates & RATE_6M_MASK) == 0 &&
+	    (rxon->cck_basic_rates & RATE_1M_MASK) == 0) {
+		IL_WARN("neither 1 nor 6 are basic\n");
+		error = true;
+	}
+
+	if (le16_to_cpu(rxon->assoc_id) > 2007) {
+		IL_WARN("aid > 2007\n");
+		error = true;
+	}
+
+	if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) ==
+	    (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) {
+		IL_WARN("CCK and short slot\n");
+		error = true;
+	}
+
+	if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) ==
+	    (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) {
+		IL_WARN("CCK and auto detect");
+		error = true;
+	}
+
+	if ((rxon->
+	     flags & (RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK)) ==
+	    RXON_FLG_TGG_PROTECT_MSK) {
+		IL_WARN("TGg but no auto-detect\n");
+		error = true;
+	}
+
+	if (error)
+		IL_WARN("Tuning to channel %d\n", le16_to_cpu(rxon->channel));
+
+	if (error) {
+		IL_ERR("Invalid RXON\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(il_check_rxon_cmd);
+
+/**
+ * il_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
+ * @il: staging_rxon is compared to active_rxon
+ *
+ * If the RXON structure is changing enough to require a new tune,
+ * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
+ * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
+ */
+int
+il_full_rxon_required(struct il_priv *il, struct il_rxon_context *ctx)
+{
+	const struct il_rxon_cmd *staging = &ctx->staging;
+	const struct il_rxon_cmd *active = &ctx->active;
+
+#define CHK(cond)							\
+	if ((cond)) {							\
+		D_INFO("need full RXON - " #cond "\n");	\
+		return 1;						\
+	}
+
+#define CHK_NEQ(c1, c2)						\
+	if ((c1) != (c2)) {					\
+		D_INFO("need full RXON - "	\
+			       #c1 " != " #c2 " - %d != %d\n",	\
+			       (c1), (c2));			\
+		return 1;					\
+	}
+
+	/* These items are only settable from the full RXON command */
+	CHK(!il_is_associated_ctx(ctx));
+	CHK(compare_ether_addr(staging->bssid_addr, active->bssid_addr));
+	CHK(compare_ether_addr(staging->node_addr, active->node_addr));
+	CHK(compare_ether_addr
+	    (staging->wlap_bssid_addr, active->wlap_bssid_addr));
+	CHK_NEQ(staging->dev_type, active->dev_type);
+	CHK_NEQ(staging->channel, active->channel);
+	CHK_NEQ(staging->air_propagation, active->air_propagation);
+	CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates,
+		active->ofdm_ht_single_stream_basic_rates);
+	CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates,
+		active->ofdm_ht_dual_stream_basic_rates);
+	CHK_NEQ(staging->assoc_id, active->assoc_id);
+
+	/* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
+	 * be updated with the RXON_ASSOC command -- however only some
+	 * flag transitions are allowed using RXON_ASSOC */
+
+	/* Check if we are not switching bands */
+	CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK,
+		active->flags & RXON_FLG_BAND_24G_MSK);
+
+	/* Check if we are switching association toggle */
+	CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK,
+		active->filter_flags & RXON_FILTER_ASSOC_MSK);
+
+#undef CHK
+#undef CHK_NEQ
+
+	return 0;
+}
+EXPORT_SYMBOL(il_full_rxon_required);
+
+u8
+il_get_lowest_plcp(struct il_priv *il, struct il_rxon_context *ctx)
+{
+	/*
+	 * Assign the lowest rate -- should really get this from
+	 * the beacon skb from mac80211.
+	 */
+	if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK)
+		return RATE_1M_PLCP;
+	else
+		return RATE_6M_PLCP;
+}
+EXPORT_SYMBOL(il_get_lowest_plcp);
+
+static void
+_il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf,
+		struct il_rxon_context *ctx)
+{
+	struct il_rxon_cmd *rxon = &ctx->staging;
+
+	if (!ctx->ht.enabled) {
+		rxon->flags &=
+		    ~(RXON_FLG_CHANNEL_MODE_MSK |
+		      RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK | RXON_FLG_HT40_PROT_MSK
+		      | RXON_FLG_HT_PROT_MSK);
+		return;
+	}
+
+	rxon->flags |=
+	    cpu_to_le32(ctx->ht.protection << RXON_FLG_HT_OPERATING_MODE_POS);
+
+	/* Set up channel bandwidth:
+	 * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */
+	/* clear the HT channel mode before set the mode */
+	rxon->flags &=
+	    ~(RXON_FLG_CHANNEL_MODE_MSK | RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
+	if (il_is_ht40_tx_allowed(il, ctx, NULL)) {
+		/* pure ht40 */
+		if (ctx->ht.protection == IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
+			rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
+			/* Note: control channel is opposite of extension channel */
+			switch (ctx->ht.extension_chan_offset) {
+			case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
+				rxon->flags &=
+				    ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
+				break;
+			case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
+				rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
+				break;
+			}
+		} else {
+			/* Note: control channel is opposite of extension channel */
+			switch (ctx->ht.extension_chan_offset) {
+			case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
+				rxon->flags &=
+				    ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
+				rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
+				break;
+			case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
+				rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
+				rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
+				break;
+			case IEEE80211_HT_PARAM_CHA_SEC_NONE:
+			default:
+				/* channel location only valid if in Mixed mode */
+				IL_ERR("invalid extension channel offset\n");
+				break;
+			}
+		}
+	} else {
+		rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY;
+	}
+
+	if (il->cfg->ops->hcmd->set_rxon_chain)
+		il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
+
+	D_ASSOC("rxon flags 0x%X operation mode :0x%X "
+		"extension channel offset 0x%x\n", le32_to_cpu(rxon->flags),
+		ctx->ht.protection, ctx->ht.extension_chan_offset);
+}
+
+void
+il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf)
+{
+	_il_set_rxon_ht(il, ht_conf, &il->ctx);
+}
+EXPORT_SYMBOL(il_set_rxon_ht);
+
+/* Return valid, unused, channel for a passive scan to reset the RF */
+u8
+il_get_single_channel_number(struct il_priv *il, enum ieee80211_band band)
+{
+	const struct il_channel_info *ch_info;
+	int i;
+	u8 channel = 0;
+	u8 min, max;
+
+	if (band == IEEE80211_BAND_5GHZ) {
+		min = 14;
+		max = il->channel_count;
+	} else {
+		min = 0;
+		max = 14;
+	}
+
+	for (i = min; i < max; i++) {
+		channel = il->channel_info[i].channel;
+		if (channel == le16_to_cpu(il->ctx.staging.channel))
+			continue;
+
+		ch_info = il_get_channel_info(il, band, channel);
+		if (il_is_channel_valid(ch_info))
+			break;
+	}
+
+	return channel;
+}
+EXPORT_SYMBOL(il_get_single_channel_number);
+
+/**
+ * il_set_rxon_channel - Set the band and channel values in staging RXON
+ * @ch: requested channel as a pointer to struct ieee80211_channel
+
+ * NOTE:  Does not commit to the hardware; it sets appropriate bit fields
+ * in the staging RXON flag structure based on the ch->band
+ */
+int
+il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch,
+		    struct il_rxon_context *ctx)
+{
+	enum ieee80211_band band = ch->band;
+	u16 channel = ch->hw_value;
+
+	if (le16_to_cpu(ctx->staging.channel) == channel && il->band == band)
+		return 0;
+
+	ctx->staging.channel = cpu_to_le16(channel);
+	if (band == IEEE80211_BAND_5GHZ)
+		ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
+	else
+		ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
+
+	il->band = band;
+
+	D_INFO("Staging channel set to %d [%d]\n", channel, band);
+
+	return 0;
+}
+EXPORT_SYMBOL(il_set_rxon_channel);
+
+void
+il_set_flags_for_band(struct il_priv *il, struct il_rxon_context *ctx,
+		      enum ieee80211_band band, struct ieee80211_vif *vif)
+{
+	if (band == IEEE80211_BAND_5GHZ) {
+		ctx->staging.flags &=
+		    ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK |
+		      RXON_FLG_CCK_MSK);
+		ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+	} else {
+		/* Copied from il_post_associate() */
+		if (vif && vif->bss_conf.use_short_slot)
+			ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+		else
+			ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
+
+		ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
+		ctx->staging.flags |= RXON_FLG_AUTO_DETECT_MSK;
+		ctx->staging.flags &= ~RXON_FLG_CCK_MSK;
+	}
+}
+EXPORT_SYMBOL(il_set_flags_for_band);
+
+/*
+ * initialize rxon structure with default values from eeprom
+ */
+void
+il_connection_init_rx_config(struct il_priv *il, struct il_rxon_context *ctx)
+{
+	const struct il_channel_info *ch_info;
+
+	memset(&ctx->staging, 0, sizeof(ctx->staging));
+
+	if (!ctx->vif) {
+		ctx->staging.dev_type = ctx->unused_devtype;
+	} else
+		switch (ctx->vif->type) {
+
+		case NL80211_IFTYPE_STATION:
+			ctx->staging.dev_type = ctx->station_devtype;
+			ctx->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
+			break;
+
+		case NL80211_IFTYPE_ADHOC:
+			ctx->staging.dev_type = ctx->ibss_devtype;
+			ctx->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
+			ctx->staging.filter_flags =
+			    RXON_FILTER_BCON_AWARE_MSK |
+			    RXON_FILTER_ACCEPT_GRP_MSK;
+			break;
+
+		default:
+			IL_ERR("Unsupported interface type %d\n",
+			       ctx->vif->type);
+			break;
+		}
+
+#if 0
+	/* TODO:  Figure out when short_preamble would be set and cache from
+	 * that */
+	if (!hw_to_local(il->hw)->short_preamble)
+		ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+	else
+		ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+#endif
+
+	ch_info =
+	    il_get_channel_info(il, il->band, le16_to_cpu(ctx->active.channel));
+
+	if (!ch_info)
+		ch_info = &il->channel_info[0];
+
+	ctx->staging.channel = cpu_to_le16(ch_info->channel);
+	il->band = ch_info->band;
+
+	il_set_flags_for_band(il, ctx, il->band, ctx->vif);
+
+	ctx->staging.ofdm_basic_rates =
+	    (IL_OFDM_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF;
+	ctx->staging.cck_basic_rates =
+	    (IL_CCK_RATES_MASK >> IL_FIRST_CCK_RATE) & 0xF;
+
+	/* clear both MIX and PURE40 mode flag */
+	ctx->staging.flags &=
+	    ~(RXON_FLG_CHANNEL_MODE_MIXED | RXON_FLG_CHANNEL_MODE_PURE_40);
+	if (ctx->vif)
+		memcpy(ctx->staging.node_addr, ctx->vif->addr, ETH_ALEN);
+
+	ctx->staging.ofdm_ht_single_stream_basic_rates = 0xff;
+	ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
+}
+EXPORT_SYMBOL(il_connection_init_rx_config);
+
+void
+il_set_rate(struct il_priv *il)
+{
+	const struct ieee80211_supported_band *hw = NULL;
+	struct ieee80211_rate *rate;
+	int i;
+
+	hw = il_get_hw_mode(il, il->band);
+	if (!hw) {
+		IL_ERR("Failed to set rate: unable to get hw mode\n");
+		return;
+	}
+
+	il->active_rate = 0;
+
+	for (i = 0; i < hw->n_bitrates; i++) {
+		rate = &(hw->bitrates[i]);
+		if (rate->hw_value < RATE_COUNT_LEGACY)
+			il->active_rate |= (1 << rate->hw_value);
+	}
+
+	D_RATE("Set active_rate = %0x\n", il->active_rate);
+
+	il->ctx.staging.cck_basic_rates =
+	    (IL_CCK_BASIC_RATES_MASK >> IL_FIRST_CCK_RATE) & 0xF;
+
+	il->ctx.staging.ofdm_basic_rates =
+	    (IL_OFDM_BASIC_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF;
+}
+EXPORT_SYMBOL(il_set_rate);
+
+void
+il_chswitch_done(struct il_priv *il, bool is_success)
+{
+	struct il_rxon_context *ctx = &il->ctx;
+
+	if (test_bit(S_EXIT_PENDING, &il->status))
+		return;
+
+	if (test_and_clear_bit(S_CHANNEL_SWITCH_PENDING, &il->status))
+		ieee80211_chswitch_done(ctx->vif, is_success);
+}
+EXPORT_SYMBOL(il_chswitch_done);
+
+void
+il_hdl_csa(struct il_priv *il, struct il_rx_buf *rxb)
+{
+	struct il_rx_pkt *pkt = rxb_addr(rxb);
+	struct il_csa_notification *csa = &(pkt->u.csa_notif);
+
+	struct il_rxon_context *ctx = &il->ctx;
+	struct il_rxon_cmd *rxon = (void *)&ctx->active;
+
+	if (!test_bit(S_CHANNEL_SWITCH_PENDING, &il->status))
+		return;
+
+	if (!le32_to_cpu(csa->status) && csa->channel == il->switch_channel) {
+		rxon->channel = csa->channel;
+		ctx->staging.channel = csa->channel;
+		D_11H("CSA notif: channel %d\n", le16_to_cpu(csa->channel));
+		il_chswitch_done(il, true);
+	} else {
+		IL_ERR("CSA notif (fail) : channel %d\n",
+		       le16_to_cpu(csa->channel));
+		il_chswitch_done(il, false);
+	}
+}
+EXPORT_SYMBOL(il_hdl_csa);
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+void
+il_print_rx_config_cmd(struct il_priv *il, struct il_rxon_context *ctx)
+{
+	struct il_rxon_cmd *rxon = &ctx->staging;
+
+	D_RADIO("RX CONFIG:\n");
+	il_print_hex_dump(il, IL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
+	D_RADIO("u16 channel: 0x%x\n", le16_to_cpu(rxon->channel));
+	D_RADIO("u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
+	D_RADIO("u32 filter_flags: 0x%08x\n", le32_to_cpu(rxon->filter_flags));
+	D_RADIO("u8 dev_type: 0x%x\n", rxon->dev_type);
+	D_RADIO("u8 ofdm_basic_rates: 0x%02x\n", rxon->ofdm_basic_rates);
+	D_RADIO("u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates);
+	D_RADIO("u8[6] node_addr: %pM\n", rxon->node_addr);
+	D_RADIO("u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
+	D_RADIO("u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
+}
+EXPORT_SYMBOL(il_print_rx_config_cmd);
+#endif
+/**
+ * il_irq_handle_error - called for HW or SW error interrupt from card
+ */
+void
+il_irq_handle_error(struct il_priv *il)
+{
+	/* Set the FW error flag -- cleared on il_down */
+	set_bit(S_FW_ERROR, &il->status);
+
+	/* Cancel currently queued command. */
+	clear_bit(S_HCMD_ACTIVE, &il->status);
+
+	IL_ERR("Loaded firmware version: %s\n", il->hw->wiphy->fw_version);
+
+	il->cfg->ops->lib->dump_nic_error_log(il);
+	if (il->cfg->ops->lib->dump_fh)
+		il->cfg->ops->lib->dump_fh(il, NULL, false);
+#ifdef CONFIG_IWLEGACY_DEBUG
+	if (il_get_debug_level(il) & IL_DL_FW_ERRORS)
+		il_print_rx_config_cmd(il, &il->ctx);
+#endif
+
+	wake_up(&il->wait_command_queue);
+
+	/* Keep the restart process from trying to send host
+	 * commands by clearing the INIT status bit */
+	clear_bit(S_READY, &il->status);
+
+	if (!test_bit(S_EXIT_PENDING, &il->status)) {
+		IL_DBG(IL_DL_FW_ERRORS,
+		       "Restarting adapter due to uCode error.\n");
+
+		if (il->cfg->mod_params->restart_fw)
+			queue_work(il->workqueue, &il->restart);
+	}
+}
+EXPORT_SYMBOL(il_irq_handle_error);
+
+static int
+il_apm_stop_master(struct il_priv *il)
+{
+	int ret = 0;
+
+	/* stop device's busmaster DMA activity */
+	il_set_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
+
+	ret =
+	    _il_poll_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED,
+			 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
+	if (ret)
+		IL_WARN("Master Disable Timed Out, 100 usec\n");
+
+	D_INFO("stop master\n");
+
+	return ret;
+}
+
+void
+il_apm_stop(struct il_priv *il)
+{
+	D_INFO("Stop card, put in low power state\n");
+
+	/* Stop device's DMA activity */
+	il_apm_stop_master(il);
+
+	/* Reset the entire device */
+	il_set_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+
+	udelay(10);
+
+	/*
+	 * Clear "initialization complete" bit to move adapter from
+	 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
+	 */
+	il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+}
+EXPORT_SYMBOL(il_apm_stop);
+
+/*
+ * Start up NIC's basic functionality after it has been reset
+ * (e.g. after platform boot, or shutdown via il_apm_stop())
+ * NOTE:  This does not load uCode nor start the embedded processor
+ */
+int
+il_apm_init(struct il_priv *il)
+{
+	int ret = 0;
+	u16 lctl;
+
+	D_INFO("Init card's basic functions\n");
+
+	/*
+	 * Use "set_bit" below rather than "write", to preserve any hardware
+	 * bits already set by default after reset.
+	 */
+
+	/* Disable L0S exit timer (platform NMI Work/Around) */
+	il_set_bit(il, CSR_GIO_CHICKEN_BITS,
+		   CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
+
+	/*
+	 * Disable L0s without affecting L1;
+	 *  don't wait for ICH L0s (ICH bug W/A)
+	 */
+	il_set_bit(il, CSR_GIO_CHICKEN_BITS,
+		   CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
+
+	/* Set FH wait threshold to maximum (HW error during stress W/A) */
+	il_set_bit(il, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
+
+	/*
+	 * Enable HAP INTA (interrupt from management bus) to
+	 * wake device's PCI Express link L1a -> L0s
+	 * NOTE:  This is no-op for 3945 (non-existent bit)
+	 */
+	il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+		   CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
+
+	/*
+	 * HW bug W/A for instability in PCIe bus L0->L0S->L1 transition.
+	 * Check if BIOS (or OS) enabled L1-ASPM on this device.
+	 * If so (likely), disable L0S, so device moves directly L0->L1;
+	 *    costs negligible amount of power savings.
+	 * If not (unlikely), enable L0S, so there is at least some
+	 *    power savings, even without L1.
+	 */
+	if (il->cfg->base_params->set_l0s) {
+		lctl = il_pcie_link_ctl(il);
+		if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
+		    PCI_CFG_LINK_CTRL_VAL_L1_EN) {
+			/* L1-ASPM enabled; disable(!) L0S  */
+			il_set_bit(il, CSR_GIO_REG,
+				   CSR_GIO_REG_VAL_L0S_ENABLED);
+			D_POWER("L1 Enabled; Disabling L0S\n");
+		} else {
+			/* L1-ASPM disabled; enable(!) L0S */
+			il_clear_bit(il, CSR_GIO_REG,
+				     CSR_GIO_REG_VAL_L0S_ENABLED);
+			D_POWER("L1 Disabled; Enabling L0S\n");
+		}
+	}
+
+	/* Configure analog phase-lock-loop before activating to D0A */
+	if (il->cfg->base_params->pll_cfg_val)
+		il_set_bit(il, CSR_ANA_PLL_CFG,
+			   il->cfg->base_params->pll_cfg_val);
+
+	/*
+	 * Set "initialization complete" bit to move adapter from
+	 * D0U* --> D0A* (powered-up active) state.
+	 */
+	il_set_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+
+	/*
+	 * Wait for clock stabilization; once stabilized, access to
+	 * device-internal resources is supported, e.g. il_wr_prph()
+	 * and accesses to uCode SRAM.
+	 */
+	ret =
+	    _il_poll_bit(il, CSR_GP_CNTRL,
+			 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+			 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
+	if (ret < 0) {
+		D_INFO("Failed to init the card\n");
+		goto out;
+	}
+
+	/*
+	 * Enable DMA and BSM (if used) clocks, wait for them to stabilize.
+	 * BSM (Boostrap State Machine) is only in 3945 and 4965.
+	 *
+	 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
+	 * do not disable clocks.  This preserves any hardware bits already
+	 * set by default in "CLK_CTRL_REG" after reset.
+	 */
+	if (il->cfg->base_params->use_bsm)
+		il_wr_prph(il, APMG_CLK_EN_REG,
+			   APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT);
+	else
+		il_wr_prph(il, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
+	udelay(20);
+
+	/* Disable L1-Active */
+	il_set_bits_prph(il, APMG_PCIDEV_STT_REG,
+			 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
+
+out:
+	return ret;
+}
+EXPORT_SYMBOL(il_apm_init);
+
+int
+il_set_tx_power(struct il_priv *il, s8 tx_power, bool force)
+{
+	int ret;
+	s8 prev_tx_power;
+	bool defer;
+	struct il_rxon_context *ctx = &il->ctx;
+
+	lockdep_assert_held(&il->mutex);
+
+	if (il->tx_power_user_lmt == tx_power && !force)
+		return 0;
+
+	if (!il->cfg->ops->lib->send_tx_power)
+		return -EOPNOTSUPP;
+
+	/* 0 dBm mean 1 milliwatt */
+	if (tx_power < 0) {
+		IL_WARN("Requested user TXPOWER %d below 1 mW.\n", tx_power);
+		return -EINVAL;
+	}
+
+	if (tx_power > il->tx_power_device_lmt) {
+		IL_WARN("Requested user TXPOWER %d above upper limit %d.\n",
+			tx_power, il->tx_power_device_lmt);
+		return -EINVAL;
+	}
+
+	if (!il_is_ready_rf(il))
+		return -EIO;
+
+	/* scan complete and commit_rxon use tx_power_next value,
+	 * it always need to be updated for newest request */
+	il->tx_power_next = tx_power;
+
+	/* do not set tx power when scanning or channel changing */
+	defer = test_bit(S_SCANNING, &il->status) ||
+	    memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging));
+	if (defer && !force) {
+		D_INFO("Deferring tx power set\n");
+		return 0;
+	}
+
+	prev_tx_power = il->tx_power_user_lmt;
+	il->tx_power_user_lmt = tx_power;
+
+	ret = il->cfg->ops->lib->send_tx_power(il);
+
+	/* if fail to set tx_power, restore the orig. tx power */
+	if (ret) {
+		il->tx_power_user_lmt = prev_tx_power;
+		il->tx_power_next = prev_tx_power;
+	}
+	return ret;
+}
+EXPORT_SYMBOL(il_set_tx_power);
+
+void
+il_send_bt_config(struct il_priv *il)
+{
+	struct il_bt_cmd bt_cmd = {
+		.lead_time = BT_LEAD_TIME_DEF,
+		.max_kill = BT_MAX_KILL_DEF,
+		.kill_ack_mask = 0,
+		.kill_cts_mask = 0,
+	};
+
+	if (!bt_coex_active)
+		bt_cmd.flags = BT_COEX_DISABLE;
+	else
+		bt_cmd.flags = BT_COEX_ENABLE;
+
+	D_INFO("BT coex %s\n",
+	       (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
+
+	if (il_send_cmd_pdu(il, C_BT_CONFIG, sizeof(struct il_bt_cmd), &bt_cmd))
+		IL_ERR("failed to send BT Coex Config\n");
+}
+EXPORT_SYMBOL(il_send_bt_config);
+
+int
+il_send_stats_request(struct il_priv *il, u8 flags, bool clear)
+{
+	struct il_stats_cmd stats_cmd = {
+		.configuration_flags = clear ? IL_STATS_CONF_CLEAR_STATS : 0,
+	};
+
+	if (flags & CMD_ASYNC)
+		return il_send_cmd_pdu_async(il, C_STATS, sizeof(struct il_stats_cmd),
+					     &stats_cmd, NULL);
+	else
+		return il_send_cmd_pdu(il, C_STATS, sizeof(struct il_stats_cmd),
+				       &stats_cmd);
+}
+EXPORT_SYMBOL(il_send_stats_request);
+
+void
+il_hdl_pm_sleep(struct il_priv *il, struct il_rx_buf *rxb)
+{
+#ifdef CONFIG_IWLEGACY_DEBUG
+	struct il_rx_pkt *pkt = rxb_addr(rxb);
+	struct il_sleep_notification *sleep = &(pkt->u.sleep_notif);
+	D_RX("sleep mode: %d, src: %d\n",
+	     sleep->pm_sleep_mode, sleep->pm_wakeup_src);
+#endif
+}
+EXPORT_SYMBOL(il_hdl_pm_sleep);
+
+void
+il_hdl_pm_debug_stats(struct il_priv *il, struct il_rx_buf *rxb)
+{
+	struct il_rx_pkt *pkt = rxb_addr(rxb);
+	u32 len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
+	D_RADIO("Dumping %d bytes of unhandled notification for %s:\n", len,
+		il_get_cmd_string(pkt->hdr.cmd));
+	il_print_hex_dump(il, IL_DL_RADIO, pkt->u.raw, len);
+}
+EXPORT_SYMBOL(il_hdl_pm_debug_stats);
+
+void
+il_hdl_error(struct il_priv *il, struct il_rx_buf *rxb)
+{
+	struct il_rx_pkt *pkt = rxb_addr(rxb);
+
+	IL_ERR("Error Reply type 0x%08X cmd %s (0x%02X) "
+	       "seq 0x%04X ser 0x%08X\n",
+	       le32_to_cpu(pkt->u.err_resp.error_type),
+	       il_get_cmd_string(pkt->u.err_resp.cmd_id),
+	       pkt->u.err_resp.cmd_id,
+	       le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
+	       le32_to_cpu(pkt->u.err_resp.error_info));
+}
+EXPORT_SYMBOL(il_hdl_error);
+
+void
+il_clear_isr_stats(struct il_priv *il)
+{
+	memset(&il->isr_stats, 0, sizeof(il->isr_stats));
+}
+
+int
+il_mac_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
+	       const struct ieee80211_tx_queue_params *params)
+{
+	struct il_priv *il = hw->priv;
+	unsigned long flags;
+	int q;
+
+	D_MAC80211("enter\n");
+
+	if (!il_is_ready_rf(il)) {
+		D_MAC80211("leave - RF not ready\n");
+		return -EIO;
+	}
+
+	if (queue >= AC_NUM) {
+		D_MAC80211("leave - queue >= AC_NUM %d\n", queue);
+		return 0;
+	}
+
+	q = AC_NUM - 1 - queue;
+
+	spin_lock_irqsave(&il->lock, flags);
+
+	il->ctx.qos_data.def_qos_parm.ac[q].cw_min =
+	    cpu_to_le16(params->cw_min);
+	il->ctx.qos_data.def_qos_parm.ac[q].cw_max =
+	    cpu_to_le16(params->cw_max);
+	il->ctx.qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
+	il->ctx.qos_data.def_qos_parm.ac[q].edca_txop =
+	    cpu_to_le16((params->txop * 32));
+
+	il->ctx.qos_data.def_qos_parm.ac[q].reserved1 = 0;
+
+	spin_unlock_irqrestore(&il->lock, flags);
+
+	D_MAC80211("leave\n");
+	return 0;
+}
+EXPORT_SYMBOL(il_mac_conf_tx);
+
+int
+il_mac_tx_last_beacon(struct ieee80211_hw *hw)
+{
+	struct il_priv *il = hw->priv;
+
+	return il->ibss_manager == IL_IBSS_MANAGER;
+}
+EXPORT_SYMBOL_GPL(il_mac_tx_last_beacon);
+
+static int
+il_set_mode(struct il_priv *il, struct il_rxon_context *ctx)
+{
+	il_connection_init_rx_config(il, ctx);
+
+	if (il->cfg->ops->hcmd->set_rxon_chain)
+		il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
+
+	return il_commit_rxon(il, ctx);
+}
+
+static int
+il_setup_interface(struct il_priv *il, struct il_rxon_context *ctx)
+{
+	struct ieee80211_vif *vif = ctx->vif;
+	int err;
+
+	lockdep_assert_held(&il->mutex);
+
+	/*
+	 * This variable will be correct only when there's just
+	 * a single context, but all code using it is for hardware
+	 * that supports only one context.
+	 */
+	il->iw_mode = vif->type;
+
+	ctx->is_active = true;
+
+	err = il_set_mode(il, ctx);
+	if (err) {
+		if (!ctx->always_active)
+			ctx->is_active = false;
+		return err;
+	}
+
+	return 0;
+}
+
+int
+il_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+	struct il_priv *il = hw->priv;
+	struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
+	int err;
+	u32 modes;
+
+	D_MAC80211("enter: type %d, addr %pM\n", vif->type, vif->addr);
+
+	mutex_lock(&il->mutex);
+
+	if (!il_is_ready_rf(il)) {
+		IL_WARN("Try to add interface when device not ready\n");
+		err = -EINVAL;
+		goto out;
+	}
+
+	/* check if busy context is exclusive */
+	if (il->ctx.vif &&
+	    (il->ctx.exclusive_interface_modes & BIT(il->ctx.vif->type))) {
+		err = -EINVAL;
+		goto out;
+	}
+
+	modes = il->ctx.interface_modes | il->ctx.exclusive_interface_modes;
+	if (!(modes & BIT(vif->type))) {
+		err = -EOPNOTSUPP;
+		goto out;
+	}
+
+	vif_priv->ctx = &il->ctx;
+	il->ctx.vif = vif;
+
+	err = il_setup_interface(il, &il->ctx);
+	if (err) {
+		il->ctx.vif = NULL;
+		il->iw_mode = NL80211_IFTYPE_STATION;
+	}
+
+out:
+	mutex_unlock(&il->mutex);
+
+	D_MAC80211("leave\n");
+	return err;
+}
+EXPORT_SYMBOL(il_mac_add_interface);
+
+static void
+il_teardown_interface(struct il_priv *il, struct ieee80211_vif *vif,
+		      bool mode_change)
+{
+	struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
+
+	lockdep_assert_held(&il->mutex);
+
+	if (il->scan_vif == vif) {
+		il_scan_cancel_timeout(il, 200);
+		il_force_scan_end(il);
+	}
+
+	if (!mode_change) {
+		il_set_mode(il, ctx);
+		if (!ctx->always_active)
+			ctx->is_active = false;
+	}
+}
+
+void
+il_mac_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+	struct il_priv *il = hw->priv;
+	struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
+
+	D_MAC80211("enter\n");
+
+	mutex_lock(&il->mutex);
+
+	WARN_ON(ctx->vif != vif);
+	ctx->vif = NULL;
+
+	il_teardown_interface(il, vif, false);
+
+	memset(il->bssid, 0, ETH_ALEN);
+	mutex_unlock(&il->mutex);
+
+	D_MAC80211("leave\n");
+
+}
+EXPORT_SYMBOL(il_mac_remove_interface);
+
+int
+il_alloc_txq_mem(struct il_priv *il)
+{
+	if (!il->txq)
+		il->txq =
+		    kzalloc(sizeof(struct il_tx_queue) *
+			    il->cfg->base_params->num_of_queues, GFP_KERNEL);
+	if (!il->txq) {
+		IL_ERR("Not enough memory for txq\n");
+		return -ENOMEM;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(il_alloc_txq_mem);
+
+void
+il_txq_mem(struct il_priv *il)
+{
+	kfree(il->txq);
+	il->txq = NULL;
+}
+EXPORT_SYMBOL(il_txq_mem);
+
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+
+#define IL_TRAFFIC_DUMP_SIZE	(IL_TRAFFIC_ENTRY_SIZE * IL_TRAFFIC_ENTRIES)
+
+void
+il_reset_traffic_log(struct il_priv *il)
+{
+	il->tx_traffic_idx = 0;
+	il->rx_traffic_idx = 0;
+	if (il->tx_traffic)
+		memset(il->tx_traffic, 0, IL_TRAFFIC_DUMP_SIZE);
+	if (il->rx_traffic)
+		memset(il->rx_traffic, 0, IL_TRAFFIC_DUMP_SIZE);
+}
+
+int
+il_alloc_traffic_mem(struct il_priv *il)
+{
+	u32 traffic_size = IL_TRAFFIC_DUMP_SIZE;
+
+	if (il_debug_level & IL_DL_TX) {
+		if (!il->tx_traffic) {
+			il->tx_traffic = kzalloc(traffic_size, GFP_KERNEL);
+			if (!il->tx_traffic)
+				return -ENOMEM;
+		}
+	}
+	if (il_debug_level & IL_DL_RX) {
+		if (!il->rx_traffic) {
+			il->rx_traffic = kzalloc(traffic_size, GFP_KERNEL);
+			if (!il->rx_traffic)
+				return -ENOMEM;
+		}
+	}
+	il_reset_traffic_log(il);
+	return 0;
+}
+EXPORT_SYMBOL(il_alloc_traffic_mem);
+
+void
+il_free_traffic_mem(struct il_priv *il)
+{
+	kfree(il->tx_traffic);
+	il->tx_traffic = NULL;
+
+	kfree(il->rx_traffic);
+	il->rx_traffic = NULL;
+}
+EXPORT_SYMBOL(il_free_traffic_mem);
+
+void
+il_dbg_log_tx_data_frame(struct il_priv *il, u16 length,
+			 struct ieee80211_hdr *header)
+{
+	__le16 fc;
+	u16 len;
+
+	if (likely(!(il_debug_level & IL_DL_TX)))
+		return;
+
+	if (!il->tx_traffic)
+		return;
+
+	fc = header->frame_control;
+	if (ieee80211_is_data(fc)) {
+		len =
+		    (length >
+		     IL_TRAFFIC_ENTRY_SIZE) ? IL_TRAFFIC_ENTRY_SIZE : length;
+		memcpy((il->tx_traffic +
+			(il->tx_traffic_idx * IL_TRAFFIC_ENTRY_SIZE)), header,
+		       len);
+		il->tx_traffic_idx =
+		    (il->tx_traffic_idx + 1) % IL_TRAFFIC_ENTRIES;
+	}
+}
+EXPORT_SYMBOL(il_dbg_log_tx_data_frame);
+
+void
+il_dbg_log_rx_data_frame(struct il_priv *il, u16 length,
+			 struct ieee80211_hdr *header)
+{
+	__le16 fc;
+	u16 len;
+
+	if (likely(!(il_debug_level & IL_DL_RX)))
+		return;
+
+	if (!il->rx_traffic)
+		return;
+
+	fc = header->frame_control;
+	if (ieee80211_is_data(fc)) {
+		len =
+		    (length >
+		     IL_TRAFFIC_ENTRY_SIZE) ? IL_TRAFFIC_ENTRY_SIZE : length;
+		memcpy((il->rx_traffic +
+			(il->rx_traffic_idx * IL_TRAFFIC_ENTRY_SIZE)), header,
+		       len);
+		il->rx_traffic_idx =
+		    (il->rx_traffic_idx + 1) % IL_TRAFFIC_ENTRIES;
+	}
+}
+EXPORT_SYMBOL(il_dbg_log_rx_data_frame);
+
+const char *
+il_get_mgmt_string(int cmd)
+{
+	switch (cmd) {
+		IL_CMD(MANAGEMENT_ASSOC_REQ);
+		IL_CMD(MANAGEMENT_ASSOC_RESP);
+		IL_CMD(MANAGEMENT_REASSOC_REQ);
+		IL_CMD(MANAGEMENT_REASSOC_RESP);
+		IL_CMD(MANAGEMENT_PROBE_REQ);
+		IL_CMD(MANAGEMENT_PROBE_RESP);
+		IL_CMD(MANAGEMENT_BEACON);
+		IL_CMD(MANAGEMENT_ATIM);
+		IL_CMD(MANAGEMENT_DISASSOC);
+		IL_CMD(MANAGEMENT_AUTH);
+		IL_CMD(MANAGEMENT_DEAUTH);
+		IL_CMD(MANAGEMENT_ACTION);
+	default:
+		return "UNKNOWN";
+
+	}
+}
+
+const char *
+il_get_ctrl_string(int cmd)
+{
+	switch (cmd) {
+		IL_CMD(CONTROL_BACK_REQ);
+		IL_CMD(CONTROL_BACK);
+		IL_CMD(CONTROL_PSPOLL);
+		IL_CMD(CONTROL_RTS);
+		IL_CMD(CONTROL_CTS);
+		IL_CMD(CONTROL_ACK);
+		IL_CMD(CONTROL_CFEND);
+		IL_CMD(CONTROL_CFENDACK);
+	default:
+		return "UNKNOWN";
+
+	}
+}
+
+void
+il_clear_traffic_stats(struct il_priv *il)
+{
+	memset(&il->tx_stats, 0, sizeof(struct traffic_stats));
+	memset(&il->rx_stats, 0, sizeof(struct traffic_stats));
+}
+
+/*
+ * if CONFIG_IWLEGACY_DEBUGFS defined,
+ * il_update_stats function will
+ * record all the MGMT, CTRL and DATA pkt for both TX and Rx pass
+ * Use debugFs to display the rx/rx_stats
+ * if CONFIG_IWLEGACY_DEBUGFS not being defined, then no MGMT and CTRL
+ * information will be recorded, but DATA pkt still will be recorded
+ * for the reason of il_led.c need to control the led blinking based on
+ * number of tx and rx data.
+ *
+ */
+void
+il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len)
+{
+	struct traffic_stats *stats;
+
+	if (is_tx)
+		stats = &il->tx_stats;
+	else
+		stats = &il->rx_stats;
+
+	if (ieee80211_is_mgmt(fc)) {
+		switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
+		case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
+			stats->mgmt[MANAGEMENT_ASSOC_REQ]++;
+			break;
+		case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
+			stats->mgmt[MANAGEMENT_ASSOC_RESP]++;
+			break;
+		case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
+			stats->mgmt[MANAGEMENT_REASSOC_REQ]++;
+			break;
+		case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
+			stats->mgmt[MANAGEMENT_REASSOC_RESP]++;
+			break;
+		case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
+			stats->mgmt[MANAGEMENT_PROBE_REQ]++;
+			break;
+		case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
+			stats->mgmt[MANAGEMENT_PROBE_RESP]++;
+			break;
+		case cpu_to_le16(IEEE80211_STYPE_BEACON):
+			stats->mgmt[MANAGEMENT_BEACON]++;
+			break;
+		case cpu_to_le16(IEEE80211_STYPE_ATIM):
+			stats->mgmt[MANAGEMENT_ATIM]++;
+			break;
+		case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
+			stats->mgmt[MANAGEMENT_DISASSOC]++;
+			break;
+		case cpu_to_le16(IEEE80211_STYPE_AUTH):
+			stats->mgmt[MANAGEMENT_AUTH]++;
+			break;
+		case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
+			stats->mgmt[MANAGEMENT_DEAUTH]++;
+			break;
+		case cpu_to_le16(IEEE80211_STYPE_ACTION):
+			stats->mgmt[MANAGEMENT_ACTION]++;
+			break;
+		}
+	} else if (ieee80211_is_ctl(fc)) {
+		switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
+		case cpu_to_le16(IEEE80211_STYPE_BACK_REQ):
+			stats->ctrl[CONTROL_BACK_REQ]++;
+			break;
+		case cpu_to_le16(IEEE80211_STYPE_BACK):
+			stats->ctrl[CONTROL_BACK]++;
+			break;
+		case cpu_to_le16(IEEE80211_STYPE_PSPOLL):
+			stats->ctrl[CONTROL_PSPOLL]++;
+			break;
+		case cpu_to_le16(IEEE80211_STYPE_RTS):
+			stats->ctrl[CONTROL_RTS]++;
+			break;
+		case cpu_to_le16(IEEE80211_STYPE_CTS):
+			stats->ctrl[CONTROL_CTS]++;
+			break;
+		case cpu_to_le16(IEEE80211_STYPE_ACK):
+			stats->ctrl[CONTROL_ACK]++;
+			break;
+		case cpu_to_le16(IEEE80211_STYPE_CFEND):
+			stats->ctrl[CONTROL_CFEND]++;
+			break;
+		case cpu_to_le16(IEEE80211_STYPE_CFENDACK):
+			stats->ctrl[CONTROL_CFENDACK]++;
+			break;
+		}
+	} else {
+		/* data */
+		stats->data_cnt++;
+		stats->data_bytes += len;
+	}
+}
+EXPORT_SYMBOL(il_update_stats);
+#endif
+
+int
+il_force_reset(struct il_priv *il, bool external)
+{
+	struct il_force_reset *force_reset;
+
+	if (test_bit(S_EXIT_PENDING, &il->status))
+		return -EINVAL;
+
+	force_reset = &il->force_reset;
+	force_reset->reset_request_count++;
+	if (!external) {
+		if (force_reset->last_force_reset_jiffies &&
+		    time_after(force_reset->last_force_reset_jiffies +
+			       force_reset->reset_duration, jiffies)) {
+			D_INFO("force reset rejected\n");
+			force_reset->reset_reject_count++;
+			return -EAGAIN;
+		}
+	}
+	force_reset->reset_success_count++;
+	force_reset->last_force_reset_jiffies = jiffies;
+
+	/*
+	 * if the request is from external(ex: debugfs),
+	 * then always perform the request in regardless the module
+	 * parameter setting
+	 * if the request is from internal (uCode error or driver
+	 * detect failure), then fw_restart module parameter
+	 * need to be check before performing firmware reload
+	 */
+
+	if (!external && !il->cfg->mod_params->restart_fw) {
+		D_INFO("Cancel firmware reload based on "
+		       "module parameter setting\n");
+		return 0;
+	}
+
+	IL_ERR("On demand firmware reload\n");
+
+	/* Set the FW error flag -- cleared on il_down */
+	set_bit(S_FW_ERROR, &il->status);
+	wake_up(&il->wait_command_queue);
+	/*
+	 * Keep the restart process from trying to send host
+	 * commands by clearing the INIT status bit
+	 */
+	clear_bit(S_READY, &il->status);
+	queue_work(il->workqueue, &il->restart);
+
+	return 0;
+}
+
+int
+il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+			enum nl80211_iftype newtype, bool newp2p)
+{
+	struct il_priv *il = hw->priv;
+	struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
+	u32 modes;
+	int err;
+
+	newtype = ieee80211_iftype_p2p(newtype, newp2p);
+
+	mutex_lock(&il->mutex);
+
+	if (!ctx->vif || !il_is_ready_rf(il)) {
+		/*
+		 * Huh? But wait ... this can maybe happen when
+		 * we're in the middle of a firmware restart!
+		 */
+		err = -EBUSY;
+		goto out;
+	}
+
+	modes = ctx->interface_modes | ctx->exclusive_interface_modes;
+	if (!(modes & BIT(newtype))) {
+		err = -EOPNOTSUPP;
+		goto out;
+	}
+
+	if ((il->ctx.exclusive_interface_modes & BIT(il->ctx.vif->type)) ||
+	    (il->ctx.exclusive_interface_modes & BIT(newtype))) {
+		err = -EINVAL;
+		goto out;
+	}
+
+	/* success */
+	il_teardown_interface(il, vif, true);
+	vif->type = newtype;
+	vif->p2p = newp2p;
+	err = il_setup_interface(il, ctx);
+	WARN_ON(err);
+	/*
+	 * We've switched internally, but submitting to the
+	 * device may have failed for some reason. Mask this
+	 * error, because otherwise mac80211 will not switch
+	 * (and set the interface type back) and we'll be
+	 * out of sync with it.
+	 */
+	err = 0;
+
+out:
+	mutex_unlock(&il->mutex);
+	return err;
+}
+EXPORT_SYMBOL(il_mac_change_interface);
+
+/*
+ * On every watchdog tick we check (latest) time stamp. If it does not
+ * change during timeout period and queue is not empty we reset firmware.
+ */
+static int
+il_check_stuck_queue(struct il_priv *il, int cnt)
+{
+	struct il_tx_queue *txq = &il->txq[cnt];
+	struct il_queue *q = &txq->q;
+	unsigned long timeout;
+	int ret;
+
+	if (q->read_ptr == q->write_ptr) {
+		txq->time_stamp = jiffies;
+		return 0;
+	}
+
+	timeout =
+	    txq->time_stamp +
+	    msecs_to_jiffies(il->cfg->base_params->wd_timeout);
+
+	if (time_after(jiffies, timeout)) {
+		IL_ERR("Queue %d stuck for %u ms.\n", q->id,
+		       il->cfg->base_params->wd_timeout);
+		ret = il_force_reset(il, false);
+		return (ret == -EAGAIN) ? 0 : 1;
+	}
+
+	return 0;
+}
+
+/*
+ * Making watchdog tick be a quarter of timeout assure we will
+ * discover the queue hung between timeout and 1.25*timeout
+ */
+#define IL_WD_TICK(timeout) ((timeout) / 4)
+
+/*
+ * Watchdog timer callback, we check each tx queue for stuck, if if hung
+ * we reset the firmware. If everything is fine just rearm the timer.
+ */
+void
+il_bg_watchdog(unsigned long data)
+{
+	struct il_priv *il = (struct il_priv *)data;
+	int cnt;
+	unsigned long timeout;
+
+	if (test_bit(S_EXIT_PENDING, &il->status))
+		return;
+
+	timeout = il->cfg->base_params->wd_timeout;
+	if (timeout == 0)
+		return;
+
+	/* monitor and check for stuck cmd queue */
+	if (il_check_stuck_queue(il, il->cmd_queue))
+		return;
+
+	/* monitor and check for other stuck queues */
+	if (il_is_any_associated(il)) {
+		for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
+			/* skip as we already checked the command queue */
+			if (cnt == il->cmd_queue)
+				continue;
+			if (il_check_stuck_queue(il, cnt))
+				return;
+		}
+	}
+
+	mod_timer(&il->watchdog,
+		  jiffies + msecs_to_jiffies(IL_WD_TICK(timeout)));
+}
+EXPORT_SYMBOL(il_bg_watchdog);
+
+void
+il_setup_watchdog(struct il_priv *il)
+{
+	unsigned int timeout = il->cfg->base_params->wd_timeout;
+
+	if (timeout)
+		mod_timer(&il->watchdog,
+			  jiffies + msecs_to_jiffies(IL_WD_TICK(timeout)));
+	else
+		del_timer(&il->watchdog);
+}
+EXPORT_SYMBOL(il_setup_watchdog);
+
+/*
+ * extended beacon time format
+ * time in usec will be changed into a 32-bit value in extended:internal format
+ * the extended part is the beacon counts
+ * the internal part is the time in usec within one beacon interval
+ */
+u32
+il_usecs_to_beacons(struct il_priv *il, u32 usec, u32 beacon_interval)
+{
+	u32 quot;
+	u32 rem;
+	u32 interval = beacon_interval * TIME_UNIT;
+
+	if (!interval || !usec)
+		return 0;
+
+	quot =
+	    (usec /
+	     interval) & (il_beacon_time_mask_high(il,
+						   il->hw_params.
+						   beacon_time_tsf_bits) >> il->
+			  hw_params.beacon_time_tsf_bits);
+	rem =
+	    (usec % interval) & il_beacon_time_mask_low(il,
+							il->hw_params.
+							beacon_time_tsf_bits);
+
+	return (quot << il->hw_params.beacon_time_tsf_bits) + rem;
+}
+EXPORT_SYMBOL(il_usecs_to_beacons);
+
+/* base is usually what we get from ucode with each received frame,
+ * the same as HW timer counter counting down
+ */
+__le32
+il_add_beacon_time(struct il_priv *il, u32 base, u32 addon,
+		   u32 beacon_interval)
+{
+	u32 base_low = base & il_beacon_time_mask_low(il,
+						      il->hw_params.
+						      beacon_time_tsf_bits);
+	u32 addon_low = addon & il_beacon_time_mask_low(il,
+							il->hw_params.
+							beacon_time_tsf_bits);
+	u32 interval = beacon_interval * TIME_UNIT;
+	u32 res = (base & il_beacon_time_mask_high(il,
+						   il->hw_params.
+						   beacon_time_tsf_bits)) +
+	    (addon & il_beacon_time_mask_high(il,
+					      il->hw_params.
+					      beacon_time_tsf_bits));
+
+	if (base_low > addon_low)
+		res += base_low - addon_low;
+	else if (base_low < addon_low) {
+		res += interval + base_low - addon_low;
+		res += (1 << il->hw_params.beacon_time_tsf_bits);
+	} else
+		res += (1 << il->hw_params.beacon_time_tsf_bits);
+
+	return cpu_to_le32(res);
+}
+EXPORT_SYMBOL(il_add_beacon_time);
+
+#ifdef CONFIG_PM
+
+int
+il_pci_suspend(struct device *device)
+{
+	struct pci_dev *pdev = to_pci_dev(device);
+	struct il_priv *il = pci_get_drvdata(pdev);
+
+	/*
+	 * This function is called when system goes into suspend state
+	 * mac80211 will call il_mac_stop() from the mac80211 suspend function
+	 * first but since il_mac_stop() has no knowledge of who the caller is,
+	 * it will not call apm_ops.stop() to stop the DMA operation.
+	 * Calling apm_ops.stop here to make sure we stop the DMA.
+	 */
+	il_apm_stop(il);
+
+	return 0;
+}
+EXPORT_SYMBOL(il_pci_suspend);
+
+int
+il_pci_resume(struct device *device)
+{
+	struct pci_dev *pdev = to_pci_dev(device);
+	struct il_priv *il = pci_get_drvdata(pdev);
+	bool hw_rfkill = false;
+
+	/*
+	 * We disable the RETRY_TIMEOUT register (0x41) to keep
+	 * PCI Tx retries from interfering with C3 CPU state.
+	 */
+	pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
+
+	il_enable_interrupts(il);
+
+	if (!(_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
+		hw_rfkill = true;
+
+	if (hw_rfkill)
+		set_bit(S_RF_KILL_HW, &il->status);
+	else
+		clear_bit(S_RF_KILL_HW, &il->status);
+
+	wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rfkill);
+
+	return 0;
+}
+EXPORT_SYMBOL(il_pci_resume);
+
+const struct dev_pm_ops il_pm_ops = {
+	.suspend = il_pci_suspend,
+	.resume = il_pci_resume,
+	.freeze = il_pci_suspend,
+	.thaw = il_pci_resume,
+	.poweroff = il_pci_suspend,
+	.restore = il_pci_resume,
+};
+EXPORT_SYMBOL(il_pm_ops);
+
+#endif /* CONFIG_PM */
+
+static void
+il_update_qos(struct il_priv *il, struct il_rxon_context *ctx)
+{
+	if (test_bit(S_EXIT_PENDING, &il->status))
+		return;
+
+	if (!ctx->is_active)
+		return;
+
+	ctx->qos_data.def_qos_parm.qos_flags = 0;
+
+	if (ctx->qos_data.qos_active)
+		ctx->qos_data.def_qos_parm.qos_flags |=
+		    QOS_PARAM_FLG_UPDATE_EDCA_MSK;
+
+	if (ctx->ht.enabled)
+		ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
+
+	D_QOS("send QoS cmd with Qos active=%d FLAGS=0x%X\n",
+	      ctx->qos_data.qos_active, ctx->qos_data.def_qos_parm.qos_flags);
+
+	il_send_cmd_pdu_async(il, ctx->qos_cmd, sizeof(struct il_qosparam_cmd),
+			      &ctx->qos_data.def_qos_parm, NULL);
+}
+
+/**
+ * il_mac_config - mac80211 config callback
+ */
+int
+il_mac_config(struct ieee80211_hw *hw, u32 changed)
+{
+	struct il_priv *il = hw->priv;
+	const struct il_channel_info *ch_info;
+	struct ieee80211_conf *conf = &hw->conf;
+	struct ieee80211_channel *channel = conf->channel;
+	struct il_ht_config *ht_conf = &il->current_ht_config;
+	struct il_rxon_context *ctx = &il->ctx;
+	unsigned long flags = 0;
+	int ret = 0;
+	u16 ch;
+	int scan_active = 0;
+	bool ht_changed = false;
+
+	if (WARN_ON(!il->cfg->ops->legacy))
+		return -EOPNOTSUPP;
+
+	mutex_lock(&il->mutex);
+
+	D_MAC80211("enter to channel %d changed 0x%X\n", channel->hw_value,
+		   changed);
+
+	if (unlikely(test_bit(S_SCANNING, &il->status))) {
+		scan_active = 1;
+		D_MAC80211("scan active\n");
+	}
+
+	if (changed &
+	    (IEEE80211_CONF_CHANGE_SMPS | IEEE80211_CONF_CHANGE_CHANNEL)) {
+		/* mac80211 uses static for non-HT which is what we want */
+		il->current_ht_config.smps = conf->smps_mode;
+
+		/*
+		 * Recalculate chain counts.
+		 *
+		 * If monitor mode is enabled then mac80211 will
+		 * set up the SM PS mode to OFF if an HT channel is
+		 * configured.
+		 */
+		if (il->cfg->ops->hcmd->set_rxon_chain)
+			il->cfg->ops->hcmd->set_rxon_chain(il, &il->ctx);
+	}
+
+	/* during scanning mac80211 will delay channel setting until
+	 * scan finish with changed = 0
+	 */
+	if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
+
+		if (scan_active)
+			goto set_ch_out;
+
+		ch = channel->hw_value;
+		ch_info = il_get_channel_info(il, channel->band, ch);
+		if (!il_is_channel_valid(ch_info)) {
+			D_MAC80211("leave - invalid channel\n");
+			ret = -EINVAL;
+			goto set_ch_out;
+		}
+
+		if (il->iw_mode == NL80211_IFTYPE_ADHOC &&
+		    !il_is_channel_ibss(ch_info)) {
+			D_MAC80211("leave - not IBSS channel\n");
+			ret = -EINVAL;
+			goto set_ch_out;
+		}
+
+		spin_lock_irqsave(&il->lock, flags);
+
+		/* Configure HT40 channels */
+		if (ctx->ht.enabled != conf_is_ht(conf)) {
+			ctx->ht.enabled = conf_is_ht(conf);
+			ht_changed = true;
+		}
+		if (ctx->ht.enabled) {
+			if (conf_is_ht40_minus(conf)) {
+				ctx->ht.extension_chan_offset =
+				    IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+				ctx->ht.is_40mhz = true;
+			} else if (conf_is_ht40_plus(conf)) {
+				ctx->ht.extension_chan_offset =
+				    IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+				ctx->ht.is_40mhz = true;
+			} else {
+				ctx->ht.extension_chan_offset =
+				    IEEE80211_HT_PARAM_CHA_SEC_NONE;
+				ctx->ht.is_40mhz = false;
+			}
+		} else
+			ctx->ht.is_40mhz = false;
+
+		/*
+		 * Default to no protection. Protection mode will
+		 * later be set from BSS config in il_ht_conf
+		 */
+		ctx->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
+
+		/* if we are switching from ht to 2.4 clear flags
+		 * from any ht related info since 2.4 does not
+		 * support ht */
+		if ((le16_to_cpu(ctx->staging.channel) != ch))
+			ctx->staging.flags = 0;
+
+		il_set_rxon_channel(il, channel, ctx);
+		il_set_rxon_ht(il, ht_conf);
+
+		il_set_flags_for_band(il, ctx, channel->band, ctx->vif);
+
+		spin_unlock_irqrestore(&il->lock, flags);
+
+		if (il->cfg->ops->legacy->update_bcast_stations)
+			ret = il->cfg->ops->legacy->update_bcast_stations(il);
+
+set_ch_out:
+		/* The list of supported rates and rate mask can be different
+		 * for each band; since the band may have changed, reset
+		 * the rate mask to what mac80211 lists */
+		il_set_rate(il);
+	}
+
+	if (changed & (IEEE80211_CONF_CHANGE_PS | IEEE80211_CONF_CHANGE_IDLE)) {
+		ret = il_power_update_mode(il, false);
+		if (ret)
+			D_MAC80211("Error setting sleep level\n");
+	}
+
+	if (changed & IEEE80211_CONF_CHANGE_POWER) {
+		D_MAC80211("TX Power old=%d new=%d\n", il->tx_power_user_lmt,
+			   conf->power_level);
+
+		il_set_tx_power(il, conf->power_level, false);
+	}
+
+	if (!il_is_ready(il)) {
+		D_MAC80211("leave - not ready\n");
+		goto out;
+	}
+
+	if (scan_active)
+		goto out;
+
+	if (memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging)))
+		il_commit_rxon(il, ctx);
+	else
+		D_INFO("Not re-sending same RXON configuration.\n");
+	if (ht_changed)
+		il_update_qos(il, ctx);
+
+out:
+	D_MAC80211("leave\n");
+	mutex_unlock(&il->mutex);
+	return ret;
+}
+EXPORT_SYMBOL(il_mac_config);
+
+void
+il_mac_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+	struct il_priv *il = hw->priv;
+	unsigned long flags;
+	struct il_rxon_context *ctx = &il->ctx;
+
+	if (WARN_ON(!il->cfg->ops->legacy))
+		return;
+
+	mutex_lock(&il->mutex);
+	D_MAC80211("enter\n");
+
+	spin_lock_irqsave(&il->lock, flags);
+	memset(&il->current_ht_config, 0, sizeof(struct il_ht_config));
+	spin_unlock_irqrestore(&il->lock, flags);
+
+	spin_lock_irqsave(&il->lock, flags);
+
+	/* new association get rid of ibss beacon skb */
+	if (il->beacon_skb)
+		dev_kfree_skb(il->beacon_skb);
+
+	il->beacon_skb = NULL;
+
+	il->timestamp = 0;
+
+	spin_unlock_irqrestore(&il->lock, flags);
+
+	il_scan_cancel_timeout(il, 100);
+	if (!il_is_ready_rf(il)) {
+		D_MAC80211("leave - not ready\n");
+		mutex_unlock(&il->mutex);
+		return;
+	}
+
+	/* we are restarting association process
+	 * clear RXON_FILTER_ASSOC_MSK bit
+	 */
+	ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+	il_commit_rxon(il, ctx);
+
+	il_set_rate(il);
+
+	mutex_unlock(&il->mutex);
+
+	D_MAC80211("leave\n");
+}
+EXPORT_SYMBOL(il_mac_reset_tsf);
+
+static void
+il_ht_conf(struct il_priv *il, struct ieee80211_vif *vif)
+{
+	struct il_ht_config *ht_conf = &il->current_ht_config;
+	struct ieee80211_sta *sta;
+	struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+	struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
+
+	D_ASSOC("enter:\n");
+
+	if (!ctx->ht.enabled)
+		return;
+
+	ctx->ht.protection =
+	    bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
+	ctx->ht.non_gf_sta_present =
+	    !!(bss_conf->
+	       ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
+
+	ht_conf->single_chain_sufficient = false;
+
+	switch (vif->type) {
+	case NL80211_IFTYPE_STATION:
+		rcu_read_lock();
+		sta = ieee80211_find_sta(vif, bss_conf->bssid);
+		if (sta) {
+			struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+			int maxstreams;
+
+			maxstreams =
+			    (ht_cap->mcs.
+			     tx_params & IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
+			    >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
+			maxstreams += 1;
+
+			if (ht_cap->mcs.rx_mask[1] == 0 &&
+			    ht_cap->mcs.rx_mask[2] == 0)
+				ht_conf->single_chain_sufficient = true;
+			if (maxstreams <= 1)
+				ht_conf->single_chain_sufficient = true;
+		} else {
+			/*
+			 * If at all, this can only happen through a race
+			 * when the AP disconnects us while we're still
+			 * setting up the connection, in that case mac80211
+			 * will soon tell us about that.
+			 */
+			ht_conf->single_chain_sufficient = true;
+		}
+		rcu_read_unlock();
+		break;
+	case NL80211_IFTYPE_ADHOC:
+		ht_conf->single_chain_sufficient = true;
+		break;
+	default:
+		break;
+	}
+
+	D_ASSOC("leave\n");
+}
+
+static inline void
+il_set_no_assoc(struct il_priv *il, struct ieee80211_vif *vif)
+{
+	struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
+
+	/*
+	 * inform the ucode that there is no longer an
+	 * association and that no more packets should be
+	 * sent
+	 */
+	ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+	ctx->staging.assoc_id = 0;
+	il_commit_rxon(il, ctx);
+}
+
+static void
+il_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+	struct il_priv *il = hw->priv;
+	unsigned long flags;
+	__le64 timestamp;
+	struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
+
+	if (!skb)
+		return;
+
+	D_MAC80211("enter\n");
+
+	lockdep_assert_held(&il->mutex);
+
+	if (!il->beacon_ctx) {
+		IL_ERR("update beacon but no beacon context!\n");
+		dev_kfree_skb(skb);
+		return;
+	}
+
+	spin_lock_irqsave(&il->lock, flags);
+
+	if (il->beacon_skb)
+		dev_kfree_skb(il->beacon_skb);
+
+	il->beacon_skb = skb;
+
+	timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
+	il->timestamp = le64_to_cpu(timestamp);
+
+	D_MAC80211("leave\n");
+	spin_unlock_irqrestore(&il->lock, flags);
+
+	if (!il_is_ready_rf(il)) {
+		D_MAC80211("leave - RF not ready\n");
+		return;
+	}
+
+	il->cfg->ops->legacy->post_associate(il);
+}
+
+void
+il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+			struct ieee80211_bss_conf *bss_conf, u32 changes)
+{
+	struct il_priv *il = hw->priv;
+	struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
+	int ret;
+
+	if (WARN_ON(!il->cfg->ops->legacy))
+		return;
+
+	D_MAC80211("changes = 0x%X\n", changes);
+
+	mutex_lock(&il->mutex);
+
+	if (!il_is_alive(il)) {
+		mutex_unlock(&il->mutex);
+		return;
+	}
+
+	if (changes & BSS_CHANGED_QOS) {
+		unsigned long flags;
+
+		spin_lock_irqsave(&il->lock, flags);
+		ctx->qos_data.qos_active = bss_conf->qos;
+		il_update_qos(il, ctx);
+		spin_unlock_irqrestore(&il->lock, flags);
+	}
+
+	if (changes & BSS_CHANGED_BEACON_ENABLED) {
+		/*
+		 * the add_interface code must make sure we only ever
+		 * have a single interface that could be beaconing at
+		 * any time.
+		 */
+		if (vif->bss_conf.enable_beacon)
+			il->beacon_ctx = ctx;
+		else
+			il->beacon_ctx = NULL;
+	}
+
+	if (changes & BSS_CHANGED_BSSID) {
+		D_MAC80211("BSSID %pM\n", bss_conf->bssid);
+
+		/*
+		 * If there is currently a HW scan going on in the
+		 * background then we need to cancel it else the RXON
+		 * below/in post_associate will fail.
+		 */
+		if (il_scan_cancel_timeout(il, 100)) {
+			IL_WARN("Aborted scan still in progress after 100ms\n");
+			D_MAC80211("leaving - scan abort failed.\n");
+			mutex_unlock(&il->mutex);
+			return;
+		}
+
+		/* mac80211 only sets assoc when in STATION mode */
+		if (vif->type == NL80211_IFTYPE_ADHOC || bss_conf->assoc) {
+			memcpy(ctx->staging.bssid_addr, bss_conf->bssid,
+			       ETH_ALEN);
+
+			/* currently needed in a few places */
+			memcpy(il->bssid, bss_conf->bssid, ETH_ALEN);
+		} else {
+			ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+		}
+
+	}
+
+	/*
+	 * This needs to be after setting the BSSID in case
+	 * mac80211 decides to do both changes at once because
+	 * it will invoke post_associate.
+	 */
+	if (vif->type == NL80211_IFTYPE_ADHOC && (changes & BSS_CHANGED_BEACON))
+		il_beacon_update(hw, vif);
+
+	if (changes & BSS_CHANGED_ERP_PREAMBLE) {
+		D_MAC80211("ERP_PREAMBLE %d\n", bss_conf->use_short_preamble);
+		if (bss_conf->use_short_preamble)
+			ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+		else
+			ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+	}
+
+	if (changes & BSS_CHANGED_ERP_CTS_PROT) {
+		D_MAC80211("ERP_CTS %d\n", bss_conf->use_cts_prot);
+		if (bss_conf->use_cts_prot && il->band != IEEE80211_BAND_5GHZ)
+			ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
+		else
+			ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
+		if (bss_conf->use_cts_prot)
+			ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
+		else
+			ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
+	}
+
+	if (changes & BSS_CHANGED_BASIC_RATES) {
+		/* XXX use this information
+		 *
+		 * To do that, remove code from il_set_rate() and put something
+		 * like this here:
+		 *
+		 if (A-band)
+		 ctx->staging.ofdm_basic_rates =
+		 bss_conf->basic_rates;
+		 else
+		 ctx->staging.ofdm_basic_rates =
+		 bss_conf->basic_rates >> 4;
+		 ctx->staging.cck_basic_rates =
+		 bss_conf->basic_rates & 0xF;
+		 */
+	}
+
+	if (changes & BSS_CHANGED_HT) {
+		il_ht_conf(il, vif);
+
+		if (il->cfg->ops->hcmd->set_rxon_chain)
+			il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
+	}
+
+	if (changes & BSS_CHANGED_ASSOC) {
+		D_MAC80211("ASSOC %d\n", bss_conf->assoc);
+		if (bss_conf->assoc) {
+			il->timestamp = bss_conf->timestamp;
+
+			if (!il_is_rfkill(il))
+				il->cfg->ops->legacy->post_associate(il);
+		} else
+			il_set_no_assoc(il, vif);
+	}
+
+	if (changes && il_is_associated_ctx(ctx) && bss_conf->aid) {
+		D_MAC80211("Changes (%#x) while associated\n", changes);
+		ret = il_send_rxon_assoc(il, ctx);
+		if (!ret) {
+			/* Sync active_rxon with latest change. */
+			memcpy((void *)&ctx->active, &ctx->staging,
+			       sizeof(struct il_rxon_cmd));
+		}
+	}
+
+	if (changes & BSS_CHANGED_BEACON_ENABLED) {
+		if (vif->bss_conf.enable_beacon) {
+			memcpy(ctx->staging.bssid_addr, bss_conf->bssid,
+			       ETH_ALEN);
+			memcpy(il->bssid, bss_conf->bssid, ETH_ALEN);
+			il->cfg->ops->legacy->config_ap(il);
+		} else
+			il_set_no_assoc(il, vif);
+	}
+
+	if (changes & BSS_CHANGED_IBSS) {
+		ret =
+		    il->cfg->ops->legacy->manage_ibss_station(il, vif,
+							      bss_conf->
+							      ibss_joined);
+		if (ret)
+			IL_ERR("failed to %s IBSS station %pM\n",
+			       bss_conf->ibss_joined ? "add" : "remove",
+			       bss_conf->bssid);
+	}
+
+	mutex_unlock(&il->mutex);
+
+	D_MAC80211("leave\n");
+}
+EXPORT_SYMBOL(il_mac_bss_info_changed);
+
+irqreturn_t
+il_isr(int irq, void *data)
+{
+	struct il_priv *il = data;
+	u32 inta, inta_mask;
+	u32 inta_fh;
+	unsigned long flags;
+	if (!il)
+		return IRQ_NONE;
+
+	spin_lock_irqsave(&il->lock, flags);
+
+	/* Disable (but don't clear!) interrupts here to avoid
+	 *    back-to-back ISRs and sporadic interrupts from our NIC.
+	 * If we have something to service, the tasklet will re-enable ints.
+	 * If we *don't* have something, we'll re-enable before leaving here. */
+	inta_mask = _il_rd(il, CSR_INT_MASK);	/* just for debug */
+	_il_wr(il, CSR_INT_MASK, 0x00000000);
+
+	/* Discover which interrupts are active/pending */
+	inta = _il_rd(il, CSR_INT);
+	inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
+
+	/* Ignore interrupt if there's nothing in NIC to service.
+	 * This may be due to IRQ shared with another device,
+	 * or due to sporadic interrupts thrown from our NIC. */
+	if (!inta && !inta_fh) {
+		D_ISR("Ignore interrupt, inta == 0, inta_fh == 0\n");
+		goto none;
+	}
+
+	if (inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0) {
+		/* Hardware disappeared. It might have already raised
+		 * an interrupt */
+		IL_WARN("HARDWARE GONE?? INTA == 0x%08x\n", inta);
+		goto unplugged;
+	}
+
+	D_ISR("ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta, inta_mask,
+	      inta_fh);
+
+	inta &= ~CSR_INT_BIT_SCD;
+
+	/* il_irq_tasklet() will service interrupts and re-enable them */
+	if (likely(inta || inta_fh))
+		tasklet_schedule(&il->irq_tasklet);
+
+unplugged:
+	spin_unlock_irqrestore(&il->lock, flags);
+	return IRQ_HANDLED;
+
+none:
+	/* re-enable interrupts here since we don't have anything to service. */
+	/* only Re-enable if disabled by irq */
+	if (test_bit(S_INT_ENABLED, &il->status))
+		il_enable_interrupts(il);
+	spin_unlock_irqrestore(&il->lock, flags);
+	return IRQ_NONE;
+}
+EXPORT_SYMBOL(il_isr);
+
+/*
+ *  il_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this
+ *  function.
+ */
+void
+il_tx_cmd_protection(struct il_priv *il, struct ieee80211_tx_info *info,
+		     __le16 fc, __le32 *tx_flags)
+{
+	if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
+		*tx_flags |= TX_CMD_FLG_RTS_MSK;
+		*tx_flags &= ~TX_CMD_FLG_CTS_MSK;
+		*tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
+
+		if (!ieee80211_is_mgmt(fc))
+			return;
+
+		switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
+		case cpu_to_le16(IEEE80211_STYPE_AUTH):
+		case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
+		case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
+		case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
+			*tx_flags &= ~TX_CMD_FLG_RTS_MSK;
+			*tx_flags |= TX_CMD_FLG_CTS_MSK;
+			break;
+		}
+	} else if (info->control.rates[0].
+		   flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
+		*tx_flags &= ~TX_CMD_FLG_RTS_MSK;
+		*tx_flags |= TX_CMD_FLG_CTS_MSK;
+		*tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
+	}
+}
+EXPORT_SYMBOL(il_tx_cmd_protection);
diff --git a/drivers/net/wireless/iwlegacy/common.h b/drivers/net/wireless/iwlegacy/common.h
new file mode 100644
index 0000000..abfa388
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/common.h
@@ -0,0 +1,3246 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+#ifndef __il_core_h__
+#define __il_core_h__
+
+#include <linux/interrupt.h>
+#include <linux/pci.h>		/* for struct pci_device_id */
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/wait.h>
+#include <linux/io.h>
+#include <net/mac80211.h>
+#include <net/ieee80211_radiotap.h>
+
+#include "commands.h"
+#include "csr.h"
+#include "prph.h"
+
+struct il_host_cmd;
+struct il_cmd;
+struct il_tx_queue;
+
+#define IL_ERR(f, a...) dev_err(&il->pci_dev->dev, f, ## a)
+#define IL_WARN(f, a...) dev_warn(&il->pci_dev->dev, f, ## a)
+#define IL_INFO(f, a...) dev_info(&il->pci_dev->dev, f, ## a)
+
+#define RX_QUEUE_SIZE                         256
+#define RX_QUEUE_MASK                         255
+#define RX_QUEUE_SIZE_LOG                     8
+
+/*
+ * RX related structures and functions
+ */
+#define RX_FREE_BUFFERS 64
+#define RX_LOW_WATERMARK 8
+
+#define U32_PAD(n)		((4-(n))&0x3)
+
+/* CT-KILL constants */
+#define CT_KILL_THRESHOLD_LEGACY   110	/* in Celsius */
+
+/* Default noise level to report when noise measurement is not available.
+ *   This may be because we're:
+ *   1)  Not associated (4965, no beacon stats being sent to driver)
+ *   2)  Scanning (noise measurement does not apply to associated channel)
+ *   3)  Receiving CCK (3945 delivers noise info only for OFDM frames)
+ * Use default noise value of -127 ... this is below the range of measurable
+ *   Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user.
+ *   Also, -127 works better than 0 when averaging frames with/without
+ *   noise info (e.g. averaging might be done in app); measured dBm values are
+ *   always negative ... using a negative value as the default keeps all
+ *   averages within an s8's (used in some apps) range of negative values. */
+#define IL_NOISE_MEAS_NOT_AVAILABLE (-127)
+
+/*
+ * RTS threshold here is total size [2347] minus 4 FCS bytes
+ * Per spec:
+ *   a value of 0 means RTS on all data/management packets
+ *   a value > max MSDU size means no RTS
+ * else RTS for data/management frames where MPDU is larger
+ *   than RTS value.
+ */
+#define DEFAULT_RTS_THRESHOLD     2347U
+#define MIN_RTS_THRESHOLD         0U
+#define MAX_RTS_THRESHOLD         2347U
+#define MAX_MSDU_SIZE		  2304U
+#define MAX_MPDU_SIZE		  2346U
+#define DEFAULT_BEACON_INTERVAL   100U
+#define	DEFAULT_SHORT_RETRY_LIMIT 7U
+#define	DEFAULT_LONG_RETRY_LIMIT  4U
+
+struct il_rx_buf {
+	dma_addr_t page_dma;
+	struct page *page;
+	struct list_head list;
+};
+
+#define rxb_addr(r) page_address(r->page)
+
+/* defined below */
+struct il_device_cmd;
+
+struct il_cmd_meta {
+	/* only for SYNC commands, iff the reply skb is wanted */
+	struct il_host_cmd *source;
+	/*
+	 * only for ASYNC commands
+	 * (which is somewhat stupid -- look at common.c for instance
+	 * which duplicates a bunch of code because the callback isn't
+	 * invoked for SYNC commands, if it were and its result passed
+	 * through it would be simpler...)
+	 */
+	void (*callback) (struct il_priv *il, struct il_device_cmd *cmd,
+			  struct il_rx_pkt *pkt);
+
+	/* The CMD_SIZE_HUGE flag bit indicates that the command
+	 * structure is stored at the end of the shared queue memory. */
+	u32 flags;
+
+	 DEFINE_DMA_UNMAP_ADDR(mapping);
+	 DEFINE_DMA_UNMAP_LEN(len);
+};
+
+/*
+ * Generic queue structure
+ *
+ * Contains common data for Rx and Tx queues
+ */
+struct il_queue {
+	int n_bd;		/* number of BDs in this queue */
+	int write_ptr;		/* 1-st empty entry (idx) host_w */
+	int read_ptr;		/* last used entry (idx) host_r */
+	/* use for monitoring and recovering the stuck queue */
+	dma_addr_t dma_addr;	/* physical addr for BD's */
+	int n_win;		/* safe queue win */
+	u32 id;
+	int low_mark;		/* low watermark, resume queue if free
+				 * space more than this */
+	int high_mark;		/* high watermark, stop queue if free
+				 * space less than this */
+};
+
+/* One for each TFD */
+struct il_tx_info {
+	struct sk_buff *skb;
+	struct il_rxon_context *ctx;
+};
+
+/**
+ * struct il_tx_queue - Tx Queue for DMA
+ * @q: generic Rx/Tx queue descriptor
+ * @bd: base of circular buffer of TFDs
+ * @cmd: array of command/TX buffer pointers
+ * @meta: array of meta data for each command/tx buffer
+ * @dma_addr_cmd: physical address of cmd/tx buffer array
+ * @txb: array of per-TFD driver data
+ * @time_stamp: time (in jiffies) of last read_ptr change
+ * @need_update: indicates need to update read/write idx
+ * @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled
+ *
+ * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
+ * descriptors) and required locking structures.
+ */
+#define TFD_TX_CMD_SLOTS 256
+#define TFD_CMD_SLOTS 32
+
+struct il_tx_queue {
+	struct il_queue q;
+	void *tfds;
+	struct il_device_cmd **cmd;
+	struct il_cmd_meta *meta;
+	struct il_tx_info *txb;
+	unsigned long time_stamp;
+	u8 need_update;
+	u8 sched_retry;
+	u8 active;
+	u8 swq_id;
+};
+
+/*
+ * EEPROM access time values:
+ *
+ * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG.
+ * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1).
+ * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec.
+ * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG.
+ */
+#define IL_EEPROM_ACCESS_TIMEOUT	5000	/* uSec */
+
+#define IL_EEPROM_SEM_TIMEOUT		10	/* microseconds */
+#define IL_EEPROM_SEM_RETRY_LIMIT	1000	/* number of attempts (not time) */
+
+/*
+ * Regulatory channel usage flags in EEPROM struct il4965_eeprom_channel.flags.
+ *
+ * IBSS and/or AP operation is allowed *only* on those channels with
+ * (VALID && IBSS && ACTIVE && !RADAR).  This restriction is in place because
+ * RADAR detection is not supported by the 4965 driver, but is a
+ * requirement for establishing a new network for legal operation on channels
+ * requiring RADAR detection or restricting ACTIVE scanning.
+ *
+ * NOTE:  "WIDE" flag does not indicate anything about "HT40" 40 MHz channels.
+ *        It only indicates that 20 MHz channel use is supported; HT40 channel
+ *        usage is indicated by a separate set of regulatory flags for each
+ *        HT40 channel pair.
+ *
+ * NOTE:  Using a channel inappropriately will result in a uCode error!
+ */
+#define IL_NUM_TX_CALIB_GROUPS 5
+enum {
+	EEPROM_CHANNEL_VALID = (1 << 0),	/* usable for this SKU/geo */
+	EEPROM_CHANNEL_IBSS = (1 << 1),	/* usable as an IBSS channel */
+	/* Bit 2 Reserved */
+	EEPROM_CHANNEL_ACTIVE = (1 << 3),	/* active scanning allowed */
+	EEPROM_CHANNEL_RADAR = (1 << 4),	/* radar detection required */
+	EEPROM_CHANNEL_WIDE = (1 << 5),	/* 20 MHz channel okay */
+	/* Bit 6 Reserved (was Narrow Channel) */
+	EEPROM_CHANNEL_DFS = (1 << 7),	/* dynamic freq selection candidate */
+};
+
+/* SKU Capabilities */
+/* 3945 only */
+#define EEPROM_SKU_CAP_SW_RF_KILL_ENABLE                (1 << 0)
+#define EEPROM_SKU_CAP_HW_RF_KILL_ENABLE                (1 << 1)
+
+/* *regulatory* channel data format in eeprom, one for each channel.
+ * There are separate entries for HT40 (40 MHz) vs. normal (20 MHz) channels. */
+struct il_eeprom_channel {
+	u8 flags;		/* EEPROM_CHANNEL_* flags copied from EEPROM */
+	s8 max_power_avg;	/* max power (dBm) on this chnl, limit 31 */
+} __packed;
+
+/* 3945 Specific */
+#define EEPROM_3945_EEPROM_VERSION	(0x2f)
+
+/* 4965 has two radio transmitters (and 3 radio receivers) */
+#define EEPROM_TX_POWER_TX_CHAINS      (2)
+
+/* 4965 has room for up to 8 sets of txpower calibration data */
+#define EEPROM_TX_POWER_BANDS          (8)
+
+/* 4965 factory calibration measures txpower gain settings for
+ * each of 3 target output levels */
+#define EEPROM_TX_POWER_MEASUREMENTS   (3)
+
+/* 4965 Specific */
+/* 4965 driver does not work with txpower calibration version < 5 */
+#define EEPROM_4965_TX_POWER_VERSION    (5)
+#define EEPROM_4965_EEPROM_VERSION	(0x2f)
+#define EEPROM_4965_CALIB_VERSION_OFFSET       (2*0xB6)	/* 2 bytes */
+#define EEPROM_4965_CALIB_TXPOWER_OFFSET       (2*0xE8)	/* 48  bytes */
+#define EEPROM_4965_BOARD_REVISION             (2*0x4F)	/* 2 bytes */
+#define EEPROM_4965_BOARD_PBA                  (2*0x56+1)	/* 9 bytes */
+
+/* 2.4 GHz */
+extern const u8 il_eeprom_band_1[14];
+
+/*
+ * factory calibration data for one txpower level, on one channel,
+ * measured on one of the 2 tx chains (radio transmitter and associated
+ * antenna).  EEPROM contains:
+ *
+ * 1)  Temperature (degrees Celsius) of device when measurement was made.
+ *
+ * 2)  Gain table idx used to achieve the target measurement power.
+ *     This refers to the "well-known" gain tables (see 4965.h).
+ *
+ * 3)  Actual measured output power, in half-dBm ("34" = 17 dBm).
+ *
+ * 4)  RF power amplifier detector level measurement (not used).
+ */
+struct il_eeprom_calib_measure {
+	u8 temperature;		/* Device temperature (Celsius) */
+	u8 gain_idx;		/* Index into gain table */
+	u8 actual_pow;		/* Measured RF output power, half-dBm */
+	s8 pa_det;		/* Power amp detector level (not used) */
+} __packed;
+
+/*
+ * measurement set for one channel.  EEPROM contains:
+ *
+ * 1)  Channel number measured
+ *
+ * 2)  Measurements for each of 3 power levels for each of 2 radio transmitters
+ *     (a.k.a. "tx chains") (6 measurements altogether)
+ */
+struct il_eeprom_calib_ch_info {
+	u8 ch_num;
+	struct il_eeprom_calib_measure
+	    measurements[EEPROM_TX_POWER_TX_CHAINS]
+	    [EEPROM_TX_POWER_MEASUREMENTS];
+} __packed;
+
+/*
+ * txpower subband info.
+ *
+ * For each frequency subband, EEPROM contains the following:
+ *
+ * 1)  First and last channels within range of the subband.  "0" values
+ *     indicate that this sample set is not being used.
+ *
+ * 2)  Sample measurement sets for 2 channels close to the range endpoints.
+ */
+struct il_eeprom_calib_subband_info {
+	u8 ch_from;		/* channel number of lowest channel in subband */
+	u8 ch_to;		/* channel number of highest channel in subband */
+	struct il_eeprom_calib_ch_info ch1;
+	struct il_eeprom_calib_ch_info ch2;
+} __packed;
+
+/*
+ * txpower calibration info.  EEPROM contains:
+ *
+ * 1)  Factory-measured saturation power levels (maximum levels at which
+ *     tx power amplifier can output a signal without too much distortion).
+ *     There is one level for 2.4 GHz band and one for 5 GHz band.  These
+ *     values apply to all channels within each of the bands.
+ *
+ * 2)  Factory-measured power supply voltage level.  This is assumed to be
+ *     constant (i.e. same value applies to all channels/bands) while the
+ *     factory measurements are being made.
+ *
+ * 3)  Up to 8 sets of factory-measured txpower calibration values.
+ *     These are for different frequency ranges, since txpower gain
+ *     characteristics of the analog radio circuitry vary with frequency.
+ *
+ *     Not all sets need to be filled with data;
+ *     struct il_eeprom_calib_subband_info contains range of channels
+ *     (0 if unused) for each set of data.
+ */
+struct il_eeprom_calib_info {
+	u8 saturation_power24;	/* half-dBm (e.g. "34" = 17 dBm) */
+	u8 saturation_power52;	/* half-dBm */
+	__le16 voltage;		/* signed */
+	struct il_eeprom_calib_subband_info band_info[EEPROM_TX_POWER_BANDS];
+} __packed;
+
+/* General */
+#define EEPROM_DEVICE_ID                    (2*0x08)	/* 2 bytes */
+#define EEPROM_MAC_ADDRESS                  (2*0x15)	/* 6  bytes */
+#define EEPROM_BOARD_REVISION               (2*0x35)	/* 2  bytes */
+#define EEPROM_BOARD_PBA_NUMBER             (2*0x3B+1)	/* 9  bytes */
+#define EEPROM_VERSION                      (2*0x44)	/* 2  bytes */
+#define EEPROM_SKU_CAP                      (2*0x45)	/* 2  bytes */
+#define EEPROM_OEM_MODE                     (2*0x46)	/* 2  bytes */
+#define EEPROM_WOWLAN_MODE                  (2*0x47)	/* 2  bytes */
+#define EEPROM_RADIO_CONFIG                 (2*0x48)	/* 2  bytes */
+#define EEPROM_NUM_MAC_ADDRESS              (2*0x4C)	/* 2  bytes */
+
+/* The following masks are to be applied on EEPROM_RADIO_CONFIG */
+#define EEPROM_RF_CFG_TYPE_MSK(x)   (x & 0x3)	/* bits 0-1   */
+#define EEPROM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3)	/* bits 2-3   */
+#define EEPROM_RF_CFG_DASH_MSK(x)   ((x >> 4)  & 0x3)	/* bits 4-5   */
+#define EEPROM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3)	/* bits 6-7   */
+#define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF)	/* bits 8-11  */
+#define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF)	/* bits 12-15 */
+
+#define EEPROM_3945_RF_CFG_TYPE_MAX  0x0
+#define EEPROM_4965_RF_CFG_TYPE_MAX  0x1
+
+/*
+ * Per-channel regulatory data.
+ *
+ * Each channel that *might* be supported by iwl has a fixed location
+ * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
+ * txpower (MSB).
+ *
+ * Entries immediately below are for 20 MHz channel width.  HT40 (40 MHz)
+ * channels (only for 4965, not supported by 3945) appear later in the EEPROM.
+ *
+ * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
+ */
+#define EEPROM_REGULATORY_SKU_ID            (2*0x60)	/* 4  bytes */
+#define EEPROM_REGULATORY_BAND_1            (2*0x62)	/* 2  bytes */
+#define EEPROM_REGULATORY_BAND_1_CHANNELS   (2*0x63)	/* 28 bytes */
+
+/*
+ * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196,
+ * 5.0 GHz channels 7, 8, 11, 12, 16
+ * (4915-5080MHz) (none of these is ever supported)
+ */
+#define EEPROM_REGULATORY_BAND_2            (2*0x71)	/* 2  bytes */
+#define EEPROM_REGULATORY_BAND_2_CHANNELS   (2*0x72)	/* 26 bytes */
+
+/*
+ * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
+ * (5170-5320MHz)
+ */
+#define EEPROM_REGULATORY_BAND_3            (2*0x7F)	/* 2  bytes */
+#define EEPROM_REGULATORY_BAND_3_CHANNELS   (2*0x80)	/* 24 bytes */
+
+/*
+ * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
+ * (5500-5700MHz)
+ */
+#define EEPROM_REGULATORY_BAND_4            (2*0x8C)	/* 2  bytes */
+#define EEPROM_REGULATORY_BAND_4_CHANNELS   (2*0x8D)	/* 22 bytes */
+
+/*
+ * 5.7 GHz channels 145, 149, 153, 157, 161, 165
+ * (5725-5825MHz)
+ */
+#define EEPROM_REGULATORY_BAND_5            (2*0x98)	/* 2  bytes */
+#define EEPROM_REGULATORY_BAND_5_CHANNELS   (2*0x99)	/* 12 bytes */
+
+/*
+ * 2.4 GHz HT40 channels 1 (5), 2 (6), 3 (7), 4 (8), 5 (9), 6 (10), 7 (11)
+ *
+ * The channel listed is the center of the lower 20 MHz half of the channel.
+ * The overall center frequency is actually 2 channels (10 MHz) above that,
+ * and the upper half of each HT40 channel is centered 4 channels (20 MHz) away
+ * from the lower half; e.g. the upper half of HT40 channel 1 is channel 5,
+ * and the overall HT40 channel width centers on channel 3.
+ *
+ * NOTE:  The RXON command uses 20 MHz channel numbers to specify the
+ *        control channel to which to tune.  RXON also specifies whether the
+ *        control channel is the upper or lower half of a HT40 channel.
+ *
+ * NOTE:  4965 does not support HT40 channels on 2.4 GHz.
+ */
+#define EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS (2*0xA0)	/* 14 bytes */
+
+/*
+ * 5.2 GHz HT40 channels 36 (40), 44 (48), 52 (56), 60 (64),
+ * 100 (104), 108 (112), 116 (120), 124 (128), 132 (136), 149 (153), 157 (161)
+ */
+#define EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS (2*0xA8)	/* 22 bytes */
+
+#define EEPROM_REGULATORY_BAND_NO_HT40			(0)
+
+struct il_eeprom_ops {
+	const u32 regulatory_bands[7];
+	int (*acquire_semaphore) (struct il_priv *il);
+	void (*release_semaphore) (struct il_priv *il);
+};
+
+int il_eeprom_init(struct il_priv *il);
+void il_eeprom_free(struct il_priv *il);
+const u8 *il_eeprom_query_addr(const struct il_priv *il, size_t offset);
+u16 il_eeprom_query16(const struct il_priv *il, size_t offset);
+int il_init_channel_map(struct il_priv *il);
+void il_free_channel_map(struct il_priv *il);
+const struct il_channel_info *il_get_channel_info(const struct il_priv *il,
+						  enum ieee80211_band band,
+						  u16 channel);
+
+#define IL_NUM_SCAN_RATES         (2)
+
+struct il4965_channel_tgd_info {
+	u8 type;
+	s8 max_power;
+};
+
+struct il4965_channel_tgh_info {
+	s64 last_radar_time;
+};
+
+#define IL4965_MAX_RATE (33)
+
+struct il3945_clip_group {
+	/* maximum power level to prevent clipping for each rate, derived by
+	 *   us from this band's saturation power in EEPROM */
+	const s8 clip_powers[IL_MAX_RATES];
+};
+
+/* current Tx power values to use, one for each rate for each channel.
+ * requested power is limited by:
+ * -- regulatory EEPROM limits for this channel
+ * -- hardware capabilities (clip-powers)
+ * -- spectrum management
+ * -- user preference (e.g. iwconfig)
+ * when requested power is set, base power idx must also be set. */
+struct il3945_channel_power_info {
+	struct il3945_tx_power tpc;	/* actual radio and DSP gain settings */
+	s8 power_table_idx;	/* actual (compenst'd) idx into gain table */
+	s8 base_power_idx;	/* gain idx for power at factory temp. */
+	s8 requested_power;	/* power (dBm) requested for this chnl/rate */
+};
+
+/* current scan Tx power values to use, one for each scan rate for each
+ * channel. */
+struct il3945_scan_power_info {
+	struct il3945_tx_power tpc;	/* actual radio and DSP gain settings */
+	s8 power_table_idx;	/* actual (compenst'd) idx into gain table */
+	s8 requested_power;	/* scan pwr (dBm) requested for chnl/rate */
+};
+
+/*
+ * One for each channel, holds all channel setup data
+ * Some of the fields (e.g. eeprom and flags/max_power_avg) are redundant
+ *     with one another!
+ */
+struct il_channel_info {
+	struct il4965_channel_tgd_info tgd;
+	struct il4965_channel_tgh_info tgh;
+	struct il_eeprom_channel eeprom;	/* EEPROM regulatory limit */
+	struct il_eeprom_channel ht40_eeprom;	/* EEPROM regulatory limit for
+						 * HT40 channel */
+
+	u8 channel;		/* channel number */
+	u8 flags;		/* flags copied from EEPROM */
+	s8 max_power_avg;	/* (dBm) regul. eeprom, normal Tx, any rate */
+	s8 curr_txpow;		/* (dBm) regulatory/spectrum/user (not h/w) limit */
+	s8 min_power;		/* always 0 */
+	s8 scan_power;		/* (dBm) regul. eeprom, direct scans, any rate */
+
+	u8 group_idx;		/* 0-4, maps channel to group1/2/3/4/5 */
+	u8 band_idx;		/* 0-4, maps channel to band1/2/3/4/5 */
+	enum ieee80211_band band;
+
+	/* HT40 channel info */
+	s8 ht40_max_power_avg;	/* (dBm) regul. eeprom, normal Tx, any rate */
+	u8 ht40_flags;		/* flags copied from EEPROM */
+	u8 ht40_extension_channel;	/* HT_IE_EXT_CHANNEL_* */
+
+	/* Radio/DSP gain settings for each "normal" data Tx rate.
+	 * These include, in addition to RF and DSP gain, a few fields for
+	 *   remembering/modifying gain settings (idxes). */
+	struct il3945_channel_power_info power_info[IL4965_MAX_RATE];
+
+	/* Radio/DSP gain settings for each scan rate, for directed scans. */
+	struct il3945_scan_power_info scan_pwr_info[IL_NUM_SCAN_RATES];
+};
+
+#define IL_TX_FIFO_BK		0	/* shared */
+#define IL_TX_FIFO_BE		1
+#define IL_TX_FIFO_VI		2	/* shared */
+#define IL_TX_FIFO_VO		3
+#define IL_TX_FIFO_UNUSED	-1
+
+/* Minimum number of queues. MAX_NUM is defined in hw specific files.
+ * Set the minimum to accommodate the 4 standard TX queues, 1 command
+ * queue, 2 (unused) HCCA queues, and 4 HT queues (one for each AC) */
+#define IL_MIN_NUM_QUEUES	10
+
+#define IL_DEFAULT_CMD_QUEUE_NUM	4
+
+#define IEEE80211_DATA_LEN              2304
+#define IEEE80211_4ADDR_LEN             30
+#define IEEE80211_HLEN                  (IEEE80211_4ADDR_LEN)
+#define IEEE80211_FRAME_LEN             (IEEE80211_DATA_LEN + IEEE80211_HLEN)
+
+struct il_frame {
+	union {
+		struct ieee80211_hdr frame;
+		struct il_tx_beacon_cmd beacon;
+		u8 raw[IEEE80211_FRAME_LEN];
+		u8 cmd[360];
+	} u;
+	struct list_head list;
+};
+
+#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
+#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
+#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
+
+enum {
+	CMD_SYNC = 0,
+	CMD_SIZE_NORMAL = 0,
+	CMD_NO_SKB = 0,
+	CMD_SIZE_HUGE = (1 << 0),
+	CMD_ASYNC = (1 << 1),
+	CMD_WANT_SKB = (1 << 2),
+	CMD_MAPPED = (1 << 3),
+};
+
+#define DEF_CMD_PAYLOAD_SIZE 320
+
+/**
+ * struct il_device_cmd
+ *
+ * For allocation of the command and tx queues, this establishes the overall
+ * size of the largest command we send to uCode, except for a scan command
+ * (which is relatively huge; space is allocated separately).
+ */
+struct il_device_cmd {
+	struct il_cmd_header hdr;	/* uCode API */
+	union {
+		u32 flags;
+		u8 val8;
+		u16 val16;
+		u32 val32;
+		struct il_tx_cmd tx;
+		u8 payload[DEF_CMD_PAYLOAD_SIZE];
+	} __packed cmd;
+} __packed;
+
+#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct il_device_cmd))
+
+struct il_host_cmd {
+	const void *data;
+	unsigned long reply_page;
+	void (*callback) (struct il_priv *il, struct il_device_cmd *cmd,
+			  struct il_rx_pkt *pkt);
+	u32 flags;
+	u16 len;
+	u8 id;
+};
+
+#define SUP_RATE_11A_MAX_NUM_CHANNELS  8
+#define SUP_RATE_11B_MAX_NUM_CHANNELS  4
+#define SUP_RATE_11G_MAX_NUM_CHANNELS  12
+
+/**
+ * struct il_rx_queue - Rx queue
+ * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
+ * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
+ * @read: Shared idx to newest available Rx buffer
+ * @write: Shared idx to oldest written Rx packet
+ * @free_count: Number of pre-allocated buffers in rx_free
+ * @rx_free: list of free SKBs for use
+ * @rx_used: List of Rx buffers with no SKB
+ * @need_update: flag to indicate we need to update read/write idx
+ * @rb_stts: driver's pointer to receive buffer status
+ * @rb_stts_dma: bus address of receive buffer status
+ *
+ * NOTE:  rx_free and rx_used are used as a FIFO for il_rx_bufs
+ */
+struct il_rx_queue {
+	__le32 *bd;
+	dma_addr_t bd_dma;
+	struct il_rx_buf pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
+	struct il_rx_buf *queue[RX_QUEUE_SIZE];
+	u32 read;
+	u32 write;
+	u32 free_count;
+	u32 write_actual;
+	struct list_head rx_free;
+	struct list_head rx_used;
+	int need_update;
+	struct il_rb_status *rb_stts;
+	dma_addr_t rb_stts_dma;
+	spinlock_t lock;
+};
+
+#define IL_SUPPORTED_RATES_IE_LEN         8
+
+#define MAX_TID_COUNT        9
+
+#define IL_INVALID_RATE     0xFF
+#define IL_INVALID_VALUE    -1
+
+/**
+ * struct il_ht_agg -- aggregation status while waiting for block-ack
+ * @txq_id: Tx queue used for Tx attempt
+ * @frame_count: # frames attempted by Tx command
+ * @wait_for_ba: Expect block-ack before next Tx reply
+ * @start_idx: Index of 1st Transmit Frame Descriptor (TFD) in Tx win
+ * @bitmap0: Low order bitmap, one bit for each frame pending ACK in Tx win
+ * @bitmap1: High order, one bit for each frame pending ACK in Tx win
+ * @rate_n_flags: Rate at which Tx was attempted
+ *
+ * If C_TX indicates that aggregation was attempted, driver must wait
+ * for block ack (N_COMPRESSED_BA).  This struct stores tx reply info
+ * until block ack arrives.
+ */
+struct il_ht_agg {
+	u16 txq_id;
+	u16 frame_count;
+	u16 wait_for_ba;
+	u16 start_idx;
+	u64 bitmap;
+	u32 rate_n_flags;
+#define IL_AGG_OFF 0
+#define IL_AGG_ON 1
+#define IL_EMPTYING_HW_QUEUE_ADDBA 2
+#define IL_EMPTYING_HW_QUEUE_DELBA 3
+	u8 state;
+};
+
+struct il_tid_data {
+	u16 seq_number;		/* 4965 only */
+	u16 tfds_in_queue;
+	struct il_ht_agg agg;
+};
+
+struct il_hw_key {
+	u32 cipher;
+	int keylen;
+	u8 keyidx;
+	u8 key[32];
+};
+
+union il_ht_rate_supp {
+	u16 rates;
+	struct {
+		u8 siso_rate;
+		u8 mimo_rate;
+	};
+};
+
+#define CFG_HT_RX_AMPDU_FACTOR_8K   (0x0)
+#define CFG_HT_RX_AMPDU_FACTOR_16K  (0x1)
+#define CFG_HT_RX_AMPDU_FACTOR_32K  (0x2)
+#define CFG_HT_RX_AMPDU_FACTOR_64K  (0x3)
+#define CFG_HT_RX_AMPDU_FACTOR_DEF  CFG_HT_RX_AMPDU_FACTOR_64K
+#define CFG_HT_RX_AMPDU_FACTOR_MAX  CFG_HT_RX_AMPDU_FACTOR_64K
+#define CFG_HT_RX_AMPDU_FACTOR_MIN  CFG_HT_RX_AMPDU_FACTOR_8K
+
+/*
+ * Maximal MPDU density for TX aggregation
+ * 4 - 2us density
+ * 5 - 4us density
+ * 6 - 8us density
+ * 7 - 16us density
+ */
+#define CFG_HT_MPDU_DENSITY_2USEC   (0x4)
+#define CFG_HT_MPDU_DENSITY_4USEC   (0x5)
+#define CFG_HT_MPDU_DENSITY_8USEC   (0x6)
+#define CFG_HT_MPDU_DENSITY_16USEC  (0x7)
+#define CFG_HT_MPDU_DENSITY_DEF CFG_HT_MPDU_DENSITY_4USEC
+#define CFG_HT_MPDU_DENSITY_MAX CFG_HT_MPDU_DENSITY_16USEC
+#define CFG_HT_MPDU_DENSITY_MIN     (0x1)
+
+struct il_ht_config {
+	bool single_chain_sufficient;
+	enum ieee80211_smps_mode smps;	/* current smps mode */
+};
+
+/* QoS structures */
+struct il_qos_info {
+	int qos_active;
+	struct il_qosparam_cmd def_qos_parm;
+};
+
+/*
+ * Structure should be accessed with sta_lock held. When station addition
+ * is in progress (IL_STA_UCODE_INPROGRESS) it is possible to access only
+ * the commands (il_addsta_cmd and il_link_quality_cmd) without
+ * sta_lock held.
+ */
+struct il_station_entry {
+	struct il_addsta_cmd sta;
+	struct il_tid_data tid[MAX_TID_COUNT];
+	u8 used, ctxid;
+	struct il_hw_key keyinfo;
+	struct il_link_quality_cmd *lq;
+};
+
+struct il_station_priv_common {
+	struct il_rxon_context *ctx;
+	u8 sta_id;
+};
+
+/**
+ * struct il_vif_priv - driver's ilate per-interface information
+ *
+ * When mac80211 allocates a virtual interface, it can allocate
+ * space for us to put data into.
+ */
+struct il_vif_priv {
+	struct il_rxon_context *ctx;
+	u8 ibss_bssid_sta_id;
+};
+
+/* one for each uCode image (inst/data, boot/init/runtime) */
+struct fw_desc {
+	void *v_addr;		/* access by driver */
+	dma_addr_t p_addr;	/* access by card's busmaster DMA */
+	u32 len;		/* bytes */
+};
+
+/* uCode file layout */
+struct il_ucode_header {
+	__le32 ver;		/* major/minor/API/serial */
+	struct {
+		__le32 inst_size;	/* bytes of runtime code */
+		__le32 data_size;	/* bytes of runtime data */
+		__le32 init_size;	/* bytes of init code */
+		__le32 init_data_size;	/* bytes of init data */
+		__le32 boot_size;	/* bytes of bootstrap code */
+		u8 data[0];	/* in same order as sizes */
+	} v1;
+};
+
+struct il4965_ibss_seq {
+	u8 mac[ETH_ALEN];
+	u16 seq_num;
+	u16 frag_num;
+	unsigned long packet_time;
+	struct list_head list;
+};
+
+struct il_sensitivity_ranges {
+	u16 min_nrg_cck;
+	u16 max_nrg_cck;
+
+	u16 nrg_th_cck;
+	u16 nrg_th_ofdm;
+
+	u16 auto_corr_min_ofdm;
+	u16 auto_corr_min_ofdm_mrc;
+	u16 auto_corr_min_ofdm_x1;
+	u16 auto_corr_min_ofdm_mrc_x1;
+
+	u16 auto_corr_max_ofdm;
+	u16 auto_corr_max_ofdm_mrc;
+	u16 auto_corr_max_ofdm_x1;
+	u16 auto_corr_max_ofdm_mrc_x1;
+
+	u16 auto_corr_max_cck;
+	u16 auto_corr_max_cck_mrc;
+	u16 auto_corr_min_cck;
+	u16 auto_corr_min_cck_mrc;
+
+	u16 barker_corr_th_min;
+	u16 barker_corr_th_min_mrc;
+	u16 nrg_th_cca;
+};
+
+#define KELVIN_TO_CELSIUS(x) ((x)-273)
+#define CELSIUS_TO_KELVIN(x) ((x)+273)
+
+/**
+ * struct il_hw_params
+ * @max_txq_num: Max # Tx queues supported
+ * @dma_chnl_num: Number of Tx DMA/FIFO channels
+ * @scd_bc_tbls_size: size of scheduler byte count tables
+ * @tfd_size: TFD size
+ * @tx/rx_chains_num: Number of TX/RX chains
+ * @valid_tx/rx_ant: usable antennas
+ * @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2)
+ * @max_rxq_log: Log-base-2 of max_rxq_size
+ * @rx_page_order: Rx buffer page order
+ * @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR
+ * @max_stations:
+ * @ht40_channel: is 40MHz width possible in band 2.4
+ * BIT(IEEE80211_BAND_5GHZ) BIT(IEEE80211_BAND_5GHZ)
+ * @sw_crypto: 0 for hw, 1 for sw
+ * @max_xxx_size: for ucode uses
+ * @ct_kill_threshold: temperature threshold
+ * @beacon_time_tsf_bits: number of valid tsf bits for beacon time
+ * @struct il_sensitivity_ranges: range of sensitivity values
+ */
+struct il_hw_params {
+	u8 max_txq_num;
+	u8 dma_chnl_num;
+	u16 scd_bc_tbls_size;
+	u32 tfd_size;
+	u8 tx_chains_num;
+	u8 rx_chains_num;
+	u8 valid_tx_ant;
+	u8 valid_rx_ant;
+	u16 max_rxq_size;
+	u16 max_rxq_log;
+	u32 rx_page_order;
+	u32 rx_wrt_ptr_reg;
+	u8 max_stations;
+	u8 ht40_channel;
+	u8 max_beacon_itrvl;	/* in 1024 ms */
+	u32 max_inst_size;
+	u32 max_data_size;
+	u32 max_bsm_size;
+	u32 ct_kill_threshold;	/* value in hw-dependent units */
+	u16 beacon_time_tsf_bits;
+	const struct il_sensitivity_ranges *sens;
+};
+
+/******************************************************************************
+ *
+ * Functions implemented in core module which are forward declared here
+ * for use by iwl-[4-5].c
+ *
+ * NOTE:  The implementation of these functions are not hardware specific
+ * which is why they are in the core module files.
+ *
+ * Naming convention --
+ * il_         <-- Is part of iwlwifi
+ * iwlXXXX_     <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
+ * il4965_bg_      <-- Called from work queue context
+ * il4965_mac_     <-- mac80211 callback
+ *
+ ****************************************************************************/
+extern void il4965_update_chain_flags(struct il_priv *il);
+extern const u8 il_bcast_addr[ETH_ALEN];
+extern int il_queue_space(const struct il_queue *q);
+static inline int
+il_queue_used(const struct il_queue *q, int i)
+{
+	return q->write_ptr >= q->read_ptr ? (i >= q->read_ptr &&
+					      i < q->write_ptr) : !(i <
+								    q->read_ptr
+								    && i >=
+								    q->
+								    write_ptr);
+}
+
+static inline u8
+il_get_cmd_idx(struct il_queue *q, u32 idx, int is_huge)
+{
+	/*
+	 * This is for init calibration result and scan command which
+	 * required buffer > TFD_MAX_PAYLOAD_SIZE,
+	 * the big buffer at end of command array
+	 */
+	if (is_huge)
+		return q->n_win;	/* must be power of 2 */
+
+	/* Otherwise, use normal size buffers */
+	return idx & (q->n_win - 1);
+}
+
+struct il_dma_ptr {
+	dma_addr_t dma;
+	void *addr;
+	size_t size;
+};
+
+#define IL_OPERATION_MODE_AUTO     0
+#define IL_OPERATION_MODE_HT_ONLY  1
+#define IL_OPERATION_MODE_MIXED    2
+#define IL_OPERATION_MODE_20MHZ    3
+
+#define IL_TX_CRC_SIZE 4
+#define IL_TX_DELIMITER_SIZE 4
+
+#define TX_POWER_IL_ILLEGAL_VOLTAGE -10000
+
+/* Sensitivity and chain noise calibration */
+#define INITIALIZATION_VALUE		0xFFFF
+#define IL4965_CAL_NUM_BEACONS		20
+#define IL_CAL_NUM_BEACONS		16
+#define MAXIMUM_ALLOWED_PATHLOSS	15
+
+#define CHAIN_NOISE_MAX_DELTA_GAIN_CODE 3
+
+#define MAX_FA_OFDM  50
+#define MIN_FA_OFDM  5
+#define MAX_FA_CCK   50
+#define MIN_FA_CCK   5
+
+#define AUTO_CORR_STEP_OFDM       1
+
+#define AUTO_CORR_STEP_CCK     3
+#define AUTO_CORR_MAX_TH_CCK   160
+
+#define NRG_DIFF               2
+#define NRG_STEP_CCK           2
+#define NRG_MARGIN             8
+#define MAX_NUMBER_CCK_NO_FA 100
+
+#define AUTO_CORR_CCK_MIN_VAL_DEF    (125)
+
+#define CHAIN_A             0
+#define CHAIN_B             1
+#define CHAIN_C             2
+#define CHAIN_NOISE_DELTA_GAIN_INIT_VAL 4
+#define ALL_BAND_FILTER			0xFF00
+#define IN_BAND_FILTER			0xFF
+#define MIN_AVERAGE_NOISE_MAX_VALUE	0xFFFFFFFF
+
+#define NRG_NUM_PREV_STAT_L     20
+#define NUM_RX_CHAINS           3
+
+enum il4965_false_alarm_state {
+	IL_FA_TOO_MANY = 0,
+	IL_FA_TOO_FEW = 1,
+	IL_FA_GOOD_RANGE = 2,
+};
+
+enum il4965_chain_noise_state {
+	IL_CHAIN_NOISE_ALIVE = 0,	/* must be 0 */
+	IL_CHAIN_NOISE_ACCUMULATE,
+	IL_CHAIN_NOISE_CALIBRATED,
+	IL_CHAIN_NOISE_DONE,
+};
+
+enum il4965_calib_enabled_state {
+	IL_CALIB_DISABLED = 0,	/* must be 0 */
+	IL_CALIB_ENABLED = 1,
+};
+
+/*
+ * enum il_calib
+ * defines the order in which results of initial calibrations
+ * should be sent to the runtime uCode
+ */
+enum il_calib {
+	IL_CALIB_MAX,
+};
+
+/* Opaque calibration results */
+struct il_calib_result {
+	void *buf;
+	size_t buf_len;
+};
+
+enum ucode_type {
+	UCODE_NONE = 0,
+	UCODE_INIT,
+	UCODE_RT
+};
+
+/* Sensitivity calib data */
+struct il_sensitivity_data {
+	u32 auto_corr_ofdm;
+	u32 auto_corr_ofdm_mrc;
+	u32 auto_corr_ofdm_x1;
+	u32 auto_corr_ofdm_mrc_x1;
+	u32 auto_corr_cck;
+	u32 auto_corr_cck_mrc;
+
+	u32 last_bad_plcp_cnt_ofdm;
+	u32 last_fa_cnt_ofdm;
+	u32 last_bad_plcp_cnt_cck;
+	u32 last_fa_cnt_cck;
+
+	u32 nrg_curr_state;
+	u32 nrg_prev_state;
+	u32 nrg_value[10];
+	u8 nrg_silence_rssi[NRG_NUM_PREV_STAT_L];
+	u32 nrg_silence_ref;
+	u32 nrg_energy_idx;
+	u32 nrg_silence_idx;
+	u32 nrg_th_cck;
+	s32 nrg_auto_corr_silence_diff;
+	u32 num_in_cck_no_fa;
+	u32 nrg_th_ofdm;
+
+	u16 barker_corr_th_min;
+	u16 barker_corr_th_min_mrc;
+	u16 nrg_th_cca;
+};
+
+/* Chain noise (differential Rx gain) calib data */
+struct il_chain_noise_data {
+	u32 active_chains;
+	u32 chain_noise_a;
+	u32 chain_noise_b;
+	u32 chain_noise_c;
+	u32 chain_signal_a;
+	u32 chain_signal_b;
+	u32 chain_signal_c;
+	u16 beacon_count;
+	u8 disconn_array[NUM_RX_CHAINS];
+	u8 delta_gain_code[NUM_RX_CHAINS];
+	u8 radio_write;
+	u8 state;
+};
+
+#define	EEPROM_SEM_TIMEOUT 10	/* milliseconds */
+#define EEPROM_SEM_RETRY_LIMIT 1000	/* number of attempts (not time) */
+
+#define IL_TRAFFIC_ENTRIES	(256)
+#define IL_TRAFFIC_ENTRY_SIZE  (64)
+
+enum {
+	MEASUREMENT_READY = (1 << 0),
+	MEASUREMENT_ACTIVE = (1 << 1),
+};
+
+/* interrupt stats */
+struct isr_stats {
+	u32 hw;
+	u32 sw;
+	u32 err_code;
+	u32 sch;
+	u32 alive;
+	u32 rfkill;
+	u32 ctkill;
+	u32 wakeup;
+	u32 rx;
+	u32 handlers[IL_CN_MAX];
+	u32 tx;
+	u32 unhandled;
+};
+
+/* management stats */
+enum il_mgmt_stats {
+	MANAGEMENT_ASSOC_REQ = 0,
+	MANAGEMENT_ASSOC_RESP,
+	MANAGEMENT_REASSOC_REQ,
+	MANAGEMENT_REASSOC_RESP,
+	MANAGEMENT_PROBE_REQ,
+	MANAGEMENT_PROBE_RESP,
+	MANAGEMENT_BEACON,
+	MANAGEMENT_ATIM,
+	MANAGEMENT_DISASSOC,
+	MANAGEMENT_AUTH,
+	MANAGEMENT_DEAUTH,
+	MANAGEMENT_ACTION,
+	MANAGEMENT_MAX,
+};
+/* control stats */
+enum il_ctrl_stats {
+	CONTROL_BACK_REQ = 0,
+	CONTROL_BACK,
+	CONTROL_PSPOLL,
+	CONTROL_RTS,
+	CONTROL_CTS,
+	CONTROL_ACK,
+	CONTROL_CFEND,
+	CONTROL_CFENDACK,
+	CONTROL_MAX,
+};
+
+struct traffic_stats {
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+	u32 mgmt[MANAGEMENT_MAX];
+	u32 ctrl[CONTROL_MAX];
+	u32 data_cnt;
+	u64 data_bytes;
+#endif
+};
+
+/*
+ * host interrupt timeout value
+ * used with setting interrupt coalescing timer
+ * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit
+ *
+ * default interrupt coalescing timer is 64 x 32 = 2048 usecs
+ * default interrupt coalescing calibration timer is 16 x 32 = 512 usecs
+ */
+#define IL_HOST_INT_TIMEOUT_MAX	(0xFF)
+#define IL_HOST_INT_TIMEOUT_DEF	(0x40)
+#define IL_HOST_INT_TIMEOUT_MIN	(0x0)
+#define IL_HOST_INT_CALIB_TIMEOUT_MAX	(0xFF)
+#define IL_HOST_INT_CALIB_TIMEOUT_DEF	(0x10)
+#define IL_HOST_INT_CALIB_TIMEOUT_MIN	(0x0)
+
+#define IL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5)
+
+/* TX queue watchdog timeouts in mSecs */
+#define IL_DEF_WD_TIMEOUT	(2000)
+#define IL_LONG_WD_TIMEOUT	(10000)
+#define IL_MAX_WD_TIMEOUT	(120000)
+
+struct il_force_reset {
+	int reset_request_count;
+	int reset_success_count;
+	int reset_reject_count;
+	unsigned long reset_duration;
+	unsigned long last_force_reset_jiffies;
+};
+
+/* extend beacon time format bit shifting  */
+/*
+ * for _3945 devices
+ * bits 31:24 - extended
+ * bits 23:0  - interval
+ */
+#define IL3945_EXT_BEACON_TIME_POS	24
+/*
+ * for _4965 devices
+ * bits 31:22 - extended
+ * bits 21:0  - interval
+ */
+#define IL4965_EXT_BEACON_TIME_POS	22
+
+struct il_rxon_context {
+	struct ieee80211_vif *vif;
+
+	const u8 *ac_to_fifo;
+	const u8 *ac_to_queue;
+	u8 mcast_queue;
+
+	/*
+	 * We could use the vif to indicate active, but we
+	 * also need it to be active during disabling when
+	 * we already removed the vif for type setting.
+	 */
+	bool always_active, is_active;
+
+	bool ht_need_multiple_chains;
+
+	int ctxid;
+
+	u32 interface_modes, exclusive_interface_modes;
+	u8 unused_devtype, ap_devtype, ibss_devtype, station_devtype;
+
+	/*
+	 * We declare this const so it can only be
+	 * changed via explicit cast within the
+	 * routines that actually update the physical
+	 * hardware.
+	 */
+	const struct il_rxon_cmd active;
+	struct il_rxon_cmd staging;
+
+	struct il_rxon_time_cmd timing;
+
+	struct il_qos_info qos_data;
+
+	u8 bcast_sta_id, ap_sta_id;
+
+	u8 rxon_cmd, rxon_assoc_cmd, rxon_timing_cmd;
+	u8 qos_cmd;
+	u8 wep_key_cmd;
+
+	struct il_wep_key wep_keys[WEP_KEYS_MAX];
+	u8 key_mapping_keys;
+
+	__le32 station_flags;
+
+	struct {
+		bool non_gf_sta_present;
+		u8 protection;
+		bool enabled, is_40mhz;
+		u8 extension_chan_offset;
+	} ht;
+};
+
+struct il_power_mgr {
+	struct il_powertable_cmd sleep_cmd;
+	struct il_powertable_cmd sleep_cmd_next;
+	int debug_sleep_level_override;
+	bool pci_pm;
+};
+
+struct il_priv {
+
+	/* ieee device used by generic ieee processing code */
+	struct ieee80211_hw *hw;
+	struct ieee80211_channel *ieee_channels;
+	struct ieee80211_rate *ieee_rates;
+	struct il_cfg *cfg;
+
+	/* temporary frame storage list */
+	struct list_head free_frames;
+	int frames_count;
+
+	enum ieee80211_band band;
+	int alloc_rxb_page;
+
+	void (*handlers[IL_CN_MAX]) (struct il_priv *il,
+				     struct il_rx_buf *rxb);
+
+	struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
+
+	/* spectrum measurement report caching */
+	struct il_spectrum_notification measure_report;
+	u8 measurement_status;
+
+	/* ucode beacon time */
+	u32 ucode_beacon_time;
+	int missed_beacon_threshold;
+
+	/* track IBSS manager (last beacon) status */
+	u32 ibss_manager;
+
+	/* force reset */
+	struct il_force_reset force_reset;
+
+	/* we allocate array of il_channel_info for NIC's valid channels.
+	 *    Access via channel # using indirect idx array */
+	struct il_channel_info *channel_info;	/* channel info array */
+	u8 channel_count;	/* # of channels */
+
+	/* thermal calibration */
+	s32 temperature;	/* degrees Kelvin */
+	s32 last_temperature;
+
+	/* init calibration results */
+	struct il_calib_result calib_results[IL_CALIB_MAX];
+
+	/* Scan related variables */
+	unsigned long scan_start;
+	unsigned long scan_start_tsf;
+	void *scan_cmd;
+	enum ieee80211_band scan_band;
+	struct cfg80211_scan_request *scan_request;
+	struct ieee80211_vif *scan_vif;
+	u8 scan_tx_ant[IEEE80211_NUM_BANDS];
+	u8 mgmt_tx_ant;
+
+	/* spinlock */
+	spinlock_t lock;	/* protect general shared data */
+	spinlock_t hcmd_lock;	/* protect hcmd */
+	spinlock_t reg_lock;	/* protect hw register access */
+	struct mutex mutex;
+
+	/* basic pci-network driver stuff */
+	struct pci_dev *pci_dev;
+
+	/* pci hardware address support */
+	void __iomem *hw_base;
+	u32 hw_rev;
+	u32 hw_wa_rev;
+	u8 rev_id;
+
+	/* command queue number */
+	u8 cmd_queue;
+
+	/* max number of station keys */
+	u8 sta_key_max_num;
+
+	/* EEPROM MAC addresses */
+	struct mac_address addresses[1];
+
+	/* uCode images, save to reload in case of failure */
+	int fw_idx;		/* firmware we're trying to load */
+	u32 ucode_ver;		/* version of ucode, copy of
+				   il_ucode.ver */
+	struct fw_desc ucode_code;	/* runtime inst */
+	struct fw_desc ucode_data;	/* runtime data original */
+	struct fw_desc ucode_data_backup;	/* runtime data save/restore */
+	struct fw_desc ucode_init;	/* initialization inst */
+	struct fw_desc ucode_init_data;	/* initialization data */
+	struct fw_desc ucode_boot;	/* bootstrap inst */
+	enum ucode_type ucode_type;
+	u8 ucode_write_complete;	/* the image write is complete */
+	char firmware_name[25];
+
+	struct il_rxon_context ctx;
+
+	__le16 switch_channel;
+
+	/* 1st responses from initialize and runtime uCode images.
+	 * _4965's initialize alive response contains some calibration data. */
+	struct il_init_alive_resp card_alive_init;
+	struct il_alive_resp card_alive;
+
+	u16 active_rate;
+
+	u8 start_calib;
+	struct il_sensitivity_data sensitivity_data;
+	struct il_chain_noise_data chain_noise_data;
+	__le16 sensitivity_tbl[HD_TBL_SIZE];
+
+	struct il_ht_config current_ht_config;
+
+	/* Rate scaling data */
+	u8 retry_rate;
+
+	wait_queue_head_t wait_command_queue;
+
+	int activity_timer_active;
+
+	/* Rx and Tx DMA processing queues */
+	struct il_rx_queue rxq;
+	struct il_tx_queue *txq;
+	unsigned long txq_ctx_active_msk;
+	struct il_dma_ptr kw;	/* keep warm address */
+	struct il_dma_ptr scd_bc_tbls;
+
+	u32 scd_base_addr;	/* scheduler sram base address */
+
+	unsigned long status;
+
+	/* counts mgmt, ctl, and data packets */
+	struct traffic_stats tx_stats;
+	struct traffic_stats rx_stats;
+
+	/* counts interrupts */
+	struct isr_stats isr_stats;
+
+	struct il_power_mgr power_data;
+
+	/* context information */
+	u8 bssid[ETH_ALEN];	/* used only on 3945 but filled by core */
+
+	/* station table variables */
+
+	/* Note: if lock and sta_lock are needed, lock must be acquired first */
+	spinlock_t sta_lock;
+	int num_stations;
+	struct il_station_entry stations[IL_STATION_COUNT];
+	unsigned long ucode_key_table;
+
+	/* queue refcounts */
+#define IL_MAX_HW_QUEUES	32
+	unsigned long queue_stopped[BITS_TO_LONGS(IL_MAX_HW_QUEUES)];
+	/* for each AC */
+	atomic_t queue_stop_count[4];
+
+	/* Indication if ieee80211_ops->open has been called */
+	u8 is_open;
+
+	u8 mac80211_registered;
+
+	/* eeprom -- this is in the card's little endian byte order */
+	u8 *eeprom;
+	struct il_eeprom_calib_info *calib_info;
+
+	enum nl80211_iftype iw_mode;
+
+	/* Last Rx'd beacon timestamp */
+	u64 timestamp;
+
+	union {
+#if defined(CONFIG_IWL3945) || defined(CONFIG_IWL3945_MODULE)
+		struct {
+			void *shared_virt;
+			dma_addr_t shared_phys;
+
+			struct delayed_work thermal_periodic;
+			struct delayed_work rfkill_poll;
+
+			struct il3945_notif_stats stats;
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+			struct il3945_notif_stats accum_stats;
+			struct il3945_notif_stats delta_stats;
+			struct il3945_notif_stats max_delta;
+#endif
+
+			u32 sta_supp_rates;
+			int last_rx_rssi;	/* From Rx packet stats */
+
+			/* Rx'd packet timing information */
+			u32 last_beacon_time;
+			u64 last_tsf;
+
+			/*
+			 * each calibration channel group in the
+			 * EEPROM has a derived clip setting for
+			 * each rate.
+			 */
+			const struct il3945_clip_group clip_groups[5];
+
+		} _3945;
+#endif
+#if defined(CONFIG_IWL4965) || defined(CONFIG_IWL4965_MODULE)
+		struct {
+			struct il_rx_phy_res last_phy_res;
+			bool last_phy_res_valid;
+
+			struct completion firmware_loading_complete;
+
+			/*
+			 * chain noise reset and gain commands are the
+			 * two extra calibration commands follows the standard
+			 * phy calibration commands
+			 */
+			u8 phy_calib_chain_noise_reset_cmd;
+			u8 phy_calib_chain_noise_gain_cmd;
+
+			struct il_notif_stats stats;
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+			struct il_notif_stats accum_stats;
+			struct il_notif_stats delta_stats;
+			struct il_notif_stats max_delta;
+#endif
+
+		} _4965;
+#endif
+	};
+
+	struct il_hw_params hw_params;
+
+	u32 inta_mask;
+
+	struct workqueue_struct *workqueue;
+
+	struct work_struct restart;
+	struct work_struct scan_completed;
+	struct work_struct rx_replenish;
+	struct work_struct abort_scan;
+
+	struct il_rxon_context *beacon_ctx;
+	struct sk_buff *beacon_skb;
+
+	struct work_struct tx_flush;
+
+	struct tasklet_struct irq_tasklet;
+
+	struct delayed_work init_alive_start;
+	struct delayed_work alive_start;
+	struct delayed_work scan_check;
+
+	/* TX Power */
+	s8 tx_power_user_lmt;
+	s8 tx_power_device_lmt;
+	s8 tx_power_next;
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+	/* debugging info */
+	u32 debug_level;	/* per device debugging will override global
+				   il_debug_level if set */
+#endif				/* CONFIG_IWLEGACY_DEBUG */
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+	/* debugfs */
+	u16 tx_traffic_idx;
+	u16 rx_traffic_idx;
+	u8 *tx_traffic;
+	u8 *rx_traffic;
+	struct dentry *debugfs_dir;
+	u32 dbgfs_sram_offset, dbgfs_sram_len;
+	bool disable_ht40;
+#endif				/* CONFIG_IWLEGACY_DEBUGFS */
+
+	struct work_struct txpower_work;
+	u32 disable_sens_cal;
+	u32 disable_chain_noise_cal;
+	u32 disable_tx_power_cal;
+	struct work_struct run_time_calib_work;
+	struct timer_list stats_periodic;
+	struct timer_list watchdog;
+	bool hw_ready;
+
+	struct led_classdev led;
+	unsigned long blink_on, blink_off;
+	bool led_registered;
+};				/*il_priv */
+
+static inline void
+il_txq_ctx_activate(struct il_priv *il, int txq_id)
+{
+	set_bit(txq_id, &il->txq_ctx_active_msk);
+}
+
+static inline void
+il_txq_ctx_deactivate(struct il_priv *il, int txq_id)
+{
+	clear_bit(txq_id, &il->txq_ctx_active_msk);
+}
+
+static inline struct ieee80211_hdr *
+il_tx_queue_get_hdr(struct il_priv *il, int txq_id, int idx)
+{
+	if (il->txq[txq_id].txb[idx].skb)
+		return (struct ieee80211_hdr *)il->txq[txq_id].txb[idx].skb->
+		    data;
+	return NULL;
+}
+
+static inline struct il_rxon_context *
+il_rxon_ctx_from_vif(struct ieee80211_vif *vif)
+{
+	struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
+
+	return vif_priv->ctx;
+}
+
+#define for_each_context(il, _ctx) \
+	for (_ctx = &il->ctx; _ctx == &il->ctx; _ctx++)
+
+static inline int
+il_is_associated(struct il_priv *il)
+{
+	return (il->ctx.active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0;
+}
+
+static inline int
+il_is_any_associated(struct il_priv *il)
+{
+	return il_is_associated(il);
+}
+
+static inline int
+il_is_associated_ctx(struct il_rxon_context *ctx)
+{
+	return (ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0;
+}
+
+static inline int
+il_is_channel_valid(const struct il_channel_info *ch_info)
+{
+	if (ch_info == NULL)
+		return 0;
+	return (ch_info->flags & EEPROM_CHANNEL_VALID) ? 1 : 0;
+}
+
+static inline int
+il_is_channel_radar(const struct il_channel_info *ch_info)
+{
+	return (ch_info->flags & EEPROM_CHANNEL_RADAR) ? 1 : 0;
+}
+
+static inline u8
+il_is_channel_a_band(const struct il_channel_info *ch_info)
+{
+	return ch_info->band == IEEE80211_BAND_5GHZ;
+}
+
+static inline int
+il_is_channel_passive(const struct il_channel_info *ch)
+{
+	return (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) ? 1 : 0;
+}
+
+static inline int
+il_is_channel_ibss(const struct il_channel_info *ch)
+{
+	return (ch->flags & EEPROM_CHANNEL_IBSS) ? 1 : 0;
+}
+
+static inline void
+__il_free_pages(struct il_priv *il, struct page *page)
+{
+	__free_pages(page, il->hw_params.rx_page_order);
+	il->alloc_rxb_page--;
+}
+
+static inline void
+il_free_pages(struct il_priv *il, unsigned long page)
+{
+	free_pages(page, il->hw_params.rx_page_order);
+	il->alloc_rxb_page--;
+}
+
+#define IWLWIFI_VERSION "in-tree:"
+#define DRV_COPYRIGHT	"Copyright(c) 2003-2011 Intel Corporation"
+#define DRV_AUTHOR     "<ilw@linux.intel.com>"
+
+#define IL_PCI_DEVICE(dev, subdev, cfg) \
+	.vendor = PCI_VENDOR_ID_INTEL,  .device = (dev), \
+	.subvendor = PCI_ANY_ID, .subdevice = (subdev), \
+	.driver_data = (kernel_ulong_t)&(cfg)
+
+#define TIME_UNIT		1024
+
+#define IL_SKU_G       0x1
+#define IL_SKU_A       0x2
+#define IL_SKU_N       0x8
+
+#define IL_CMD(x) case x: return #x
+
+/* Size of one Rx buffer in host DRAM */
+#define IL_RX_BUF_SIZE_3K (3 * 1000)	/* 3945 only */
+#define IL_RX_BUF_SIZE_4K (4 * 1024)
+#define IL_RX_BUF_SIZE_8K (8 * 1024)
+
+struct il_hcmd_ops {
+	int (*rxon_assoc) (struct il_priv *il, struct il_rxon_context *ctx);
+	int (*commit_rxon) (struct il_priv *il, struct il_rxon_context *ctx);
+	void (*set_rxon_chain) (struct il_priv *il,
+				struct il_rxon_context *ctx);
+};
+
+struct il_hcmd_utils_ops {
+	u16(*get_hcmd_size) (u8 cmd_id, u16 len);
+	u16(*build_addsta_hcmd) (const struct il_addsta_cmd *cmd, u8 *data);
+	int (*request_scan) (struct il_priv *il, struct ieee80211_vif *vif);
+	void (*post_scan) (struct il_priv *il);
+};
+
+struct il_apm_ops {
+	int (*init) (struct il_priv *il);
+	void (*config) (struct il_priv *il);
+};
+
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+struct il_debugfs_ops {
+	ssize_t(*rx_stats_read) (struct file *file, char __user *user_buf,
+				 size_t count, loff_t *ppos);
+	ssize_t(*tx_stats_read) (struct file *file, char __user *user_buf,
+				 size_t count, loff_t *ppos);
+	ssize_t(*general_stats_read) (struct file *file,
+				      char __user *user_buf, size_t count,
+				      loff_t *ppos);
+};
+#endif
+
+struct il_temp_ops {
+	void (*temperature) (struct il_priv *il);
+};
+
+struct il_lib_ops {
+	/* set hw dependent parameters */
+	int (*set_hw_params) (struct il_priv *il);
+	/* Handling TX */
+	void (*txq_update_byte_cnt_tbl) (struct il_priv *il,
+					 struct il_tx_queue *txq,
+					 u16 byte_cnt);
+	int (*txq_attach_buf_to_tfd) (struct il_priv *il,
+				      struct il_tx_queue *txq, dma_addr_t addr,
+				      u16 len, u8 reset, u8 pad);
+	void (*txq_free_tfd) (struct il_priv *il, struct il_tx_queue *txq);
+	int (*txq_init) (struct il_priv *il, struct il_tx_queue *txq);
+	/* setup Rx handler */
+	void (*handler_setup) (struct il_priv *il);
+	/* alive notification after init uCode load */
+	void (*init_alive_start) (struct il_priv *il);
+	/* check validity of rtc data address */
+	int (*is_valid_rtc_data_addr) (u32 addr);
+	/* 1st ucode load */
+	int (*load_ucode) (struct il_priv *il);
+
+	void (*dump_nic_error_log) (struct il_priv *il);
+	int (*dump_fh) (struct il_priv *il, char **buf, bool display);
+	int (*set_channel_switch) (struct il_priv *il,
+				   struct ieee80211_channel_switch *ch_switch);
+	/* power management */
+	struct il_apm_ops apm_ops;
+
+	/* power */
+	int (*send_tx_power) (struct il_priv *il);
+	void (*update_chain_flags) (struct il_priv *il);
+
+	/* eeprom operations */
+	struct il_eeprom_ops eeprom_ops;
+
+	/* temperature */
+	struct il_temp_ops temp_ops;
+
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+	struct il_debugfs_ops debugfs_ops;
+#endif
+
+};
+
+struct il_led_ops {
+	int (*cmd) (struct il_priv *il, struct il_led_cmd *led_cmd);
+};
+
+struct il_legacy_ops {
+	void (*post_associate) (struct il_priv *il);
+	void (*config_ap) (struct il_priv *il);
+	/* station management */
+	int (*update_bcast_stations) (struct il_priv *il);
+	int (*manage_ibss_station) (struct il_priv *il,
+				    struct ieee80211_vif *vif, bool add);
+};
+
+struct il_ops {
+	const struct il_lib_ops *lib;
+	const struct il_hcmd_ops *hcmd;
+	const struct il_hcmd_utils_ops *utils;
+	const struct il_led_ops *led;
+	const struct il_nic_ops *nic;
+	const struct il_legacy_ops *legacy;
+	const struct ieee80211_ops *ieee80211_ops;
+};
+
+struct il_mod_params {
+	int sw_crypto;		/* def: 0 = using hardware encryption */
+	int disable_hw_scan;	/* def: 0 = use h/w scan */
+	int num_of_queues;	/* def: HW dependent */
+	int disable_11n;	/* def: 0 = 11n capabilities enabled */
+	int amsdu_size_8K;	/* def: 1 = enable 8K amsdu size */
+	int antenna;		/* def: 0 = both antennas (use diversity) */
+	int restart_fw;		/* def: 1 = restart firmware */
+};
+
+/*
+ * @led_compensation: compensate on the led on/off time per HW according
+ *	to the deviation to achieve the desired led frequency.
+ *	The detail algorithm is described in common.c
+ * @chain_noise_num_beacons: number of beacons used to compute chain noise
+ * @wd_timeout: TX queues watchdog timeout
+ * @temperature_kelvin: temperature report by uCode in kelvin
+ * @ucode_tracing: support ucode continuous tracing
+ * @sensitivity_calib_by_driver: driver has the capability to perform
+ *	sensitivity calibration operation
+ * @chain_noise_calib_by_driver: driver has the capability to perform
+ *	chain noise calibration operation
+ */
+struct il_base_params {
+	int eeprom_size;
+	int num_of_queues;	/* def: HW dependent */
+	int num_of_ampdu_queues;	/* def: HW dependent */
+	/* for il_apm_init() */
+	u32 pll_cfg_val;
+	bool set_l0s;
+	bool use_bsm;
+
+	u16 led_compensation;
+	int chain_noise_num_beacons;
+	unsigned int wd_timeout;
+	bool temperature_kelvin;
+	const bool ucode_tracing;
+	const bool sensitivity_calib_by_driver;
+	const bool chain_noise_calib_by_driver;
+};
+
+#define IL_LED_SOLID 11
+#define IL_DEF_LED_INTRVL cpu_to_le32(1000)
+
+#define IL_LED_ACTIVITY       (0<<1)
+#define IL_LED_LINK           (1<<1)
+
+/*
+ * LED mode
+ *    IL_LED_DEFAULT:  use device default
+ *    IL_LED_RF_STATE: turn LED on/off based on RF state
+ *			LED ON  = RF ON
+ *			LED OFF = RF OFF
+ *    IL_LED_BLINK:    adjust led blink rate based on blink table
+ */
+enum il_led_mode {
+	IL_LED_DEFAULT,
+	IL_LED_RF_STATE,
+	IL_LED_BLINK,
+};
+
+void il_leds_init(struct il_priv *il);
+void il_leds_exit(struct il_priv *il);
+
+/**
+ * struct il_cfg
+ * @fw_name_pre: Firmware filename prefix. The api version and extension
+ *	(.ucode) will be added to filename before loading from disk. The
+ *	filename is constructed as fw_name_pre<api>.ucode.
+ * @ucode_api_max: Highest version of uCode API supported by driver.
+ * @ucode_api_min: Lowest version of uCode API supported by driver.
+ * @scan_antennas: available antenna for scan operation
+ * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
+ *
+ * We enable the driver to be backward compatible wrt API version. The
+ * driver specifies which APIs it supports (with @ucode_api_max being the
+ * highest and @ucode_api_min the lowest). Firmware will only be loaded if
+ * it has a supported API version. The firmware's API version will be
+ * stored in @il_priv, enabling the driver to make runtime changes based
+ * on firmware version used.
+ *
+ * For example,
+ * if (IL_UCODE_API(il->ucode_ver) >= 2) {
+ *	Driver interacts with Firmware API version >= 2.
+ * } else {
+ *	Driver interacts with Firmware API version 1.
+ * }
+ *
+ * The ideal usage of this infrastructure is to treat a new ucode API
+ * release as a new hardware revision. That is, through utilizing the
+ * il_hcmd_utils_ops etc. we accommodate different command structures
+ * and flows between hardware versions as well as their API
+ * versions.
+ *
+ */
+struct il_cfg {
+	/* params specific to an individual device within a device family */
+	const char *name;
+	const char *fw_name_pre;
+	const unsigned int ucode_api_max;
+	const unsigned int ucode_api_min;
+	u8 valid_tx_ant;
+	u8 valid_rx_ant;
+	unsigned int sku;
+	u16 eeprom_ver;
+	u16 eeprom_calib_ver;
+	const struct il_ops *ops;
+	/* module based parameters which can be set from modprobe cmd */
+	const struct il_mod_params *mod_params;
+	/* params not likely to change within a device family */
+	struct il_base_params *base_params;
+	/* params likely to change within a device family */
+	u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
+	enum il_led_mode led_mode;
+};
+
+/***************************
+ *   L i b                 *
+ ***************************/
+
+struct ieee80211_hw *il_alloc_all(struct il_cfg *cfg);
+int il_mac_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		   u16 queue, const struct ieee80211_tx_queue_params *params);
+int il_mac_tx_last_beacon(struct ieee80211_hw *hw);
+
+void il_set_rxon_hwcrypto(struct il_priv *il, struct il_rxon_context *ctx,
+			  int hw_decrypt);
+int il_check_rxon_cmd(struct il_priv *il, struct il_rxon_context *ctx);
+int il_full_rxon_required(struct il_priv *il, struct il_rxon_context *ctx);
+int il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch,
+			struct il_rxon_context *ctx);
+void il_set_flags_for_band(struct il_priv *il, struct il_rxon_context *ctx,
+			   enum ieee80211_band band, struct ieee80211_vif *vif);
+u8 il_get_single_channel_number(struct il_priv *il, enum ieee80211_band band);
+void il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf);
+bool il_is_ht40_tx_allowed(struct il_priv *il, struct il_rxon_context *ctx,
+			   struct ieee80211_sta_ht_cap *ht_cap);
+void il_connection_init_rx_config(struct il_priv *il,
+				  struct il_rxon_context *ctx);
+void il_set_rate(struct il_priv *il);
+int il_set_decrypted_flag(struct il_priv *il, struct ieee80211_hdr *hdr,
+			  u32 decrypt_res, struct ieee80211_rx_status *stats);
+void il_irq_handle_error(struct il_priv *il);
+int il_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+void il_mac_remove_interface(struct ieee80211_hw *hw,
+			     struct ieee80211_vif *vif);
+int il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+			    enum nl80211_iftype newtype, bool newp2p);
+int il_alloc_txq_mem(struct il_priv *il);
+void il_txq_mem(struct il_priv *il);
+
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+int il_alloc_traffic_mem(struct il_priv *il);
+void il_free_traffic_mem(struct il_priv *il);
+void il_reset_traffic_log(struct il_priv *il);
+void il_dbg_log_tx_data_frame(struct il_priv *il, u16 length,
+			      struct ieee80211_hdr *header);
+void il_dbg_log_rx_data_frame(struct il_priv *il, u16 length,
+			      struct ieee80211_hdr *header);
+const char *il_get_mgmt_string(int cmd);
+const char *il_get_ctrl_string(int cmd);
+void il_clear_traffic_stats(struct il_priv *il);
+void il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len);
+#else
+static inline int
+il_alloc_traffic_mem(struct il_priv *il)
+{
+	return 0;
+}
+
+static inline void
+il_free_traffic_mem(struct il_priv *il)
+{
+}
+
+static inline void
+il_reset_traffic_log(struct il_priv *il)
+{
+}
+
+static inline void
+il_dbg_log_tx_data_frame(struct il_priv *il, u16 length,
+			 struct ieee80211_hdr *header)
+{
+}
+
+static inline void
+il_dbg_log_rx_data_frame(struct il_priv *il, u16 length,
+			 struct ieee80211_hdr *header)
+{
+}
+
+static inline void
+il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len)
+{
+}
+#endif
+/*****************************************************
+ * RX handlers.
+ * **************************************************/
+void il_hdl_pm_sleep(struct il_priv *il, struct il_rx_buf *rxb);
+void il_hdl_pm_debug_stats(struct il_priv *il, struct il_rx_buf *rxb);
+void il_hdl_error(struct il_priv *il, struct il_rx_buf *rxb);
+
+/*****************************************************
+* RX
+******************************************************/
+void il_cmd_queue_unmap(struct il_priv *il);
+void il_cmd_queue_free(struct il_priv *il);
+int il_rx_queue_alloc(struct il_priv *il);
+void il_rx_queue_update_write_ptr(struct il_priv *il, struct il_rx_queue *q);
+int il_rx_queue_space(const struct il_rx_queue *q);
+void il_tx_cmd_complete(struct il_priv *il, struct il_rx_buf *rxb);
+/* Handlers */
+void il_hdl_spectrum_measurement(struct il_priv *il, struct il_rx_buf *rxb);
+void il_recover_from_stats(struct il_priv *il, struct il_rx_pkt *pkt);
+void il_chswitch_done(struct il_priv *il, bool is_success);
+void il_hdl_csa(struct il_priv *il, struct il_rx_buf *rxb);
+
+/* TX helpers */
+
+/*****************************************************
+* TX
+******************************************************/
+void il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq);
+int il_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq, int slots_num,
+		     u32 txq_id);
+void il_tx_queue_reset(struct il_priv *il, struct il_tx_queue *txq,
+		       int slots_num, u32 txq_id);
+void il_tx_queue_unmap(struct il_priv *il, int txq_id);
+void il_tx_queue_free(struct il_priv *il, int txq_id);
+void il_setup_watchdog(struct il_priv *il);
+/*****************************************************
+ * TX power
+ ****************************************************/
+int il_set_tx_power(struct il_priv *il, s8 tx_power, bool force);
+
+/*******************************************************************************
+ * Rate
+ ******************************************************************************/
+
+u8 il_get_lowest_plcp(struct il_priv *il, struct il_rxon_context *ctx);
+
+/*******************************************************************************
+ * Scanning
+ ******************************************************************************/
+void il_init_scan_params(struct il_priv *il);
+int il_scan_cancel(struct il_priv *il);
+int il_scan_cancel_timeout(struct il_priv *il, unsigned long ms);
+void il_force_scan_end(struct il_priv *il);
+int il_mac_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		   struct cfg80211_scan_request *req);
+void il_internal_short_hw_scan(struct il_priv *il);
+int il_force_reset(struct il_priv *il, bool external);
+u16 il_fill_probe_req(struct il_priv *il, struct ieee80211_mgmt *frame,
+		      const u8 *ta, const u8 *ie, int ie_len, int left);
+void il_setup_rx_scan_handlers(struct il_priv *il);
+u16 il_get_active_dwell_time(struct il_priv *il, enum ieee80211_band band,
+			     u8 n_probes);
+u16 il_get_passive_dwell_time(struct il_priv *il, enum ieee80211_band band,
+			      struct ieee80211_vif *vif);
+void il_setup_scan_deferred_work(struct il_priv *il);
+void il_cancel_scan_deferred_work(struct il_priv *il);
+
+/* For faster active scanning, scan will move to the next channel if fewer than
+ * PLCP_QUIET_THRESH packets are heard on this channel within
+ * ACTIVE_QUIET_TIME after sending probe request.  This shortens the dwell
+ * time if it's a quiet channel (nothing responded to our probe, and there's
+ * no other traffic).
+ * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */
+#define IL_ACTIVE_QUIET_TIME       cpu_to_le16(10)	/* msec */
+#define IL_PLCP_QUIET_THRESH       cpu_to_le16(1)	/* packets */
+
+#define IL_SCAN_CHECK_WATCHDOG		(HZ * 7)
+
+/*****************************************************
+ *   S e n d i n g     H o s t     C o m m a n d s   *
+ *****************************************************/
+
+const char *il_get_cmd_string(u8 cmd);
+int __must_check il_send_cmd_sync(struct il_priv *il, struct il_host_cmd *cmd);
+int il_send_cmd(struct il_priv *il, struct il_host_cmd *cmd);
+int __must_check il_send_cmd_pdu(struct il_priv *il, u8 id, u16 len,
+				 const void *data);
+int il_send_cmd_pdu_async(struct il_priv *il, u8 id, u16 len, const void *data,
+			  void (*callback) (struct il_priv *il,
+					    struct il_device_cmd *cmd,
+					    struct il_rx_pkt *pkt));
+
+int il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd);
+
+/*****************************************************
+ * PCI						     *
+ *****************************************************/
+
+static inline u16
+il_pcie_link_ctl(struct il_priv *il)
+{
+	int pos;
+	u16 pci_lnk_ctl;
+	pos = pci_pcie_cap(il->pci_dev);
+	pci_read_config_word(il->pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl);
+	return pci_lnk_ctl;
+}
+
+void il_bg_watchdog(unsigned long data);
+u32 il_usecs_to_beacons(struct il_priv *il, u32 usec, u32 beacon_interval);
+__le32 il_add_beacon_time(struct il_priv *il, u32 base, u32 addon,
+			  u32 beacon_interval);
+
+#ifdef CONFIG_PM
+int il_pci_suspend(struct device *device);
+int il_pci_resume(struct device *device);
+extern const struct dev_pm_ops il_pm_ops;
+
+#define IL_LEGACY_PM_OPS	(&il_pm_ops)
+
+#else /* !CONFIG_PM */
+
+#define IL_LEGACY_PM_OPS	NULL
+
+#endif /* !CONFIG_PM */
+
+/*****************************************************
+*  Error Handling Debugging
+******************************************************/
+void il4965_dump_nic_error_log(struct il_priv *il);
+#ifdef CONFIG_IWLEGACY_DEBUG
+void il_print_rx_config_cmd(struct il_priv *il, struct il_rxon_context *ctx);
+#else
+static inline void
+il_print_rx_config_cmd(struct il_priv *il, struct il_rxon_context *ctx)
+{
+}
+#endif
+
+void il_clear_isr_stats(struct il_priv *il);
+
+/*****************************************************
+*  GEOS
+******************************************************/
+int il_init_geos(struct il_priv *il);
+void il_free_geos(struct il_priv *il);
+
+/*************** DRIVER STATUS FUNCTIONS   *****/
+
+#define S_HCMD_ACTIVE	0	/* host command in progress */
+/* 1 is unused (used to be S_HCMD_SYNC_ACTIVE) */
+#define S_INT_ENABLED	2
+#define S_RF_KILL_HW	3
+#define S_CT_KILL		4
+#define S_INIT		5
+#define S_ALIVE		6
+#define S_READY		7
+#define S_TEMPERATURE	8
+#define S_GEO_CONFIGURED	9
+#define S_EXIT_PENDING	10
+#define S_STATS		12
+#define S_SCANNING		13
+#define S_SCAN_ABORTING	14
+#define S_SCAN_HW		15
+#define S_POWER_PMI	16
+#define S_FW_ERROR		17
+#define S_CHANNEL_SWITCH_PENDING 18
+
+static inline int
+il_is_ready(struct il_priv *il)
+{
+	/* The adapter is 'ready' if READY and GEO_CONFIGURED bits are
+	 * set but EXIT_PENDING is not */
+	return test_bit(S_READY, &il->status) &&
+	    test_bit(S_GEO_CONFIGURED, &il->status) &&
+	    !test_bit(S_EXIT_PENDING, &il->status);
+}
+
+static inline int
+il_is_alive(struct il_priv *il)
+{
+	return test_bit(S_ALIVE, &il->status);
+}
+
+static inline int
+il_is_init(struct il_priv *il)
+{
+	return test_bit(S_INIT, &il->status);
+}
+
+static inline int
+il_is_rfkill_hw(struct il_priv *il)
+{
+	return test_bit(S_RF_KILL_HW, &il->status);
+}
+
+static inline int
+il_is_rfkill(struct il_priv *il)
+{
+	return il_is_rfkill_hw(il);
+}
+
+static inline int
+il_is_ctkill(struct il_priv *il)
+{
+	return test_bit(S_CT_KILL, &il->status);
+}
+
+static inline int
+il_is_ready_rf(struct il_priv *il)
+{
+
+	if (il_is_rfkill(il))
+		return 0;
+
+	return il_is_ready(il);
+}
+
+extern void il_send_bt_config(struct il_priv *il);
+extern int il_send_stats_request(struct il_priv *il, u8 flags, bool clear);
+void il_apm_stop(struct il_priv *il);
+int il_apm_init(struct il_priv *il);
+
+int il_send_rxon_timing(struct il_priv *il, struct il_rxon_context *ctx);
+static inline int
+il_send_rxon_assoc(struct il_priv *il, struct il_rxon_context *ctx)
+{
+	return il->cfg->ops->hcmd->rxon_assoc(il, ctx);
+}
+
+static inline int
+il_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx)
+{
+	return il->cfg->ops->hcmd->commit_rxon(il, ctx);
+}
+
+static inline const struct ieee80211_supported_band *
+il_get_hw_mode(struct il_priv *il, enum ieee80211_band band)
+{
+	return il->hw->wiphy->bands[band];
+}
+
+/* mac80211 handlers */
+int il_mac_config(struct ieee80211_hw *hw, u32 changed);
+void il_mac_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+void il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+			     struct ieee80211_bss_conf *bss_conf, u32 changes);
+void il_tx_cmd_protection(struct il_priv *il, struct ieee80211_tx_info *info,
+			  __le16 fc, __le32 *tx_flags);
+
+irqreturn_t il_isr(int irq, void *data);
+
+extern void il_set_bit(struct il_priv *p, u32 r, u32 m);
+extern void il_clear_bit(struct il_priv *p, u32 r, u32 m);
+extern int _il_grab_nic_access(struct il_priv *il);
+extern int _il_poll_bit(struct il_priv *il, u32 addr, u32 bits, u32 mask, int timeout);
+extern int il_poll_bit(struct il_priv *il, u32 addr, u32 mask, int timeout);
+extern u32 il_rd_prph(struct il_priv *il, u32 reg);
+extern void il_wr_prph(struct il_priv *il, u32 addr, u32 val);
+extern u32 il_read_targ_mem(struct il_priv *il, u32 addr);
+extern void il_write_targ_mem(struct il_priv *il, u32 addr, u32 val);
+
+static inline void
+_il_write8(struct il_priv *il, u32 ofs, u8 val)
+{
+	iowrite8(val, il->hw_base + ofs);
+}
+#define il_write8(il, ofs, val) _il_write8(il, ofs, val)
+
+static inline void
+_il_wr(struct il_priv *il, u32 ofs, u32 val)
+{
+	iowrite32(val, il->hw_base + ofs);
+}
+
+static inline u32
+_il_rd(struct il_priv *il, u32 ofs)
+{
+	return ioread32(il->hw_base + ofs);
+}
+
+static inline void
+_il_clear_bit(struct il_priv *il, u32 reg, u32 mask)
+{
+	_il_wr(il, reg, _il_rd(il, reg) & ~mask);
+}
+
+static inline void
+_il_set_bit(struct il_priv *il, u32 reg, u32 mask)
+{
+	_il_wr(il, reg, _il_rd(il, reg) | mask);
+}
+
+static inline void
+_il_release_nic_access(struct il_priv *il)
+{
+	_il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+}
+
+static inline u32
+il_rd(struct il_priv *il, u32 reg)
+{
+	u32 value;
+	unsigned long reg_flags;
+
+	spin_lock_irqsave(&il->reg_lock, reg_flags);
+	_il_grab_nic_access(il);
+	value = _il_rd(il, reg);
+	_il_release_nic_access(il);
+	spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+	return value;
+}
+
+static inline void
+il_wr(struct il_priv *il, u32 reg, u32 value)
+{
+	unsigned long reg_flags;
+
+	spin_lock_irqsave(&il->reg_lock, reg_flags);
+	if (!_il_grab_nic_access(il)) {
+		_il_wr(il, reg, value);
+		_il_release_nic_access(il);
+	}
+	spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+}
+
+static inline u32
+_il_rd_prph(struct il_priv *il, u32 reg)
+{
+	_il_wr(il, HBUS_TARG_PRPH_RADDR, reg | (3 << 24));
+	rmb();
+	return _il_rd(il, HBUS_TARG_PRPH_RDAT);
+}
+
+static inline void
+_il_wr_prph(struct il_priv *il, u32 addr, u32 val)
+{
+	_il_wr(il, HBUS_TARG_PRPH_WADDR, ((addr & 0x0000FFFF) | (3 << 24)));
+	wmb();
+	_il_wr(il, HBUS_TARG_PRPH_WDAT, val);
+}
+
+static inline void
+il_set_bits_prph(struct il_priv *il, u32 reg, u32 mask)
+{
+	unsigned long reg_flags;
+
+	spin_lock_irqsave(&il->reg_lock, reg_flags);
+	_il_grab_nic_access(il);
+	_il_wr_prph(il, reg, (_il_rd_prph(il, reg) | mask));
+	_il_release_nic_access(il);
+	spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+}
+
+static inline void
+il_set_bits_mask_prph(struct il_priv *il, u32 reg, u32 bits, u32 mask)
+{
+	unsigned long reg_flags;
+
+	spin_lock_irqsave(&il->reg_lock, reg_flags);
+	_il_grab_nic_access(il);
+	_il_wr_prph(il, reg, ((_il_rd_prph(il, reg) & mask) | bits));
+	_il_release_nic_access(il);
+	spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+}
+
+static inline void
+il_clear_bits_prph(struct il_priv *il, u32 reg, u32 mask)
+{
+	unsigned long reg_flags;
+	u32 val;
+
+	spin_lock_irqsave(&il->reg_lock, reg_flags);
+	_il_grab_nic_access(il);
+	val = _il_rd_prph(il, reg);
+	_il_wr_prph(il, reg, (val & ~mask));
+	_il_release_nic_access(il);
+	spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+}
+
+#define HW_KEY_DYNAMIC 0
+#define HW_KEY_DEFAULT 1
+
+#define IL_STA_DRIVER_ACTIVE BIT(0)	/* driver entry is active */
+#define IL_STA_UCODE_ACTIVE  BIT(1)	/* ucode entry is active */
+#define IL_STA_UCODE_INPROGRESS  BIT(2)	/* ucode entry is in process of
+					   being activated */
+#define IL_STA_LOCAL BIT(3)	/* station state not directed by mac80211;
+				   (this is for the IBSS BSSID stations) */
+#define IL_STA_BCAST BIT(4)	/* this station is the special bcast station */
+
+void il_restore_stations(struct il_priv *il, struct il_rxon_context *ctx);
+void il_clear_ucode_stations(struct il_priv *il, struct il_rxon_context *ctx);
+void il_dealloc_bcast_stations(struct il_priv *il);
+int il_get_free_ucode_key_idx(struct il_priv *il);
+int il_send_add_sta(struct il_priv *il, struct il_addsta_cmd *sta, u8 flags);
+int il_add_station_common(struct il_priv *il, struct il_rxon_context *ctx,
+			  const u8 *addr, bool is_ap,
+			  struct ieee80211_sta *sta, u8 *sta_id_r);
+int il_remove_station(struct il_priv *il, const u8 sta_id, const u8 * addr);
+int il_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		      struct ieee80211_sta *sta);
+
+u8 il_prep_station(struct il_priv *il, struct il_rxon_context *ctx,
+		   const u8 *addr, bool is_ap, struct ieee80211_sta *sta);
+
+int il_send_lq_cmd(struct il_priv *il, struct il_rxon_context *ctx,
+		   struct il_link_quality_cmd *lq, u8 flags, bool init);
+
+/**
+ * il_clear_driver_stations - clear knowledge of all stations from driver
+ * @il: iwl il struct
+ *
+ * This is called during il_down() to make sure that in the case
+ * we're coming there from a hardware restart mac80211 will be
+ * able to reconfigure stations -- if we're getting there in the
+ * normal down flow then the stations will already be cleared.
+ */
+static inline void
+il_clear_driver_stations(struct il_priv *il)
+{
+	unsigned long flags;
+	struct il_rxon_context *ctx = &il->ctx;
+
+	spin_lock_irqsave(&il->sta_lock, flags);
+	memset(il->stations, 0, sizeof(il->stations));
+	il->num_stations = 0;
+
+	il->ucode_key_table = 0;
+
+	/*
+	 * Remove all key information that is not stored as part
+	 * of station information since mac80211 may not have had
+	 * a chance to remove all the keys. When device is
+	 * reconfigured by mac80211 after an error all keys will
+	 * be reconfigured.
+	 */
+	memset(ctx->wep_keys, 0, sizeof(ctx->wep_keys));
+	ctx->key_mapping_keys = 0;
+
+	spin_unlock_irqrestore(&il->sta_lock, flags);
+}
+
+static inline int
+il_sta_id(struct ieee80211_sta *sta)
+{
+	if (WARN_ON(!sta))
+		return IL_INVALID_STATION;
+
+	return ((struct il_station_priv_common *)sta->drv_priv)->sta_id;
+}
+
+/**
+ * il_sta_id_or_broadcast - return sta_id or broadcast sta
+ * @il: iwl il
+ * @context: the current context
+ * @sta: mac80211 station
+ *
+ * In certain circumstances mac80211 passes a station pointer
+ * that may be %NULL, for example during TX or key setup. In
+ * that case, we need to use the broadcast station, so this
+ * inline wraps that pattern.
+ */
+static inline int
+il_sta_id_or_broadcast(struct il_priv *il, struct il_rxon_context *context,
+		       struct ieee80211_sta *sta)
+{
+	int sta_id;
+
+	if (!sta)
+		return context->bcast_sta_id;
+
+	sta_id = il_sta_id(sta);
+
+	/*
+	 * mac80211 should not be passing a partially
+	 * initialised station!
+	 */
+	WARN_ON(sta_id == IL_INVALID_STATION);
+
+	return sta_id;
+}
+
+/**
+ * il_queue_inc_wrap - increment queue idx, wrap back to beginning
+ * @idx -- current idx
+ * @n_bd -- total number of entries in queue (must be power of 2)
+ */
+static inline int
+il_queue_inc_wrap(int idx, int n_bd)
+{
+	return ++idx & (n_bd - 1);
+}
+
+/**
+ * il_queue_dec_wrap - decrement queue idx, wrap back to end
+ * @idx -- current idx
+ * @n_bd -- total number of entries in queue (must be power of 2)
+ */
+static inline int
+il_queue_dec_wrap(int idx, int n_bd)
+{
+	return --idx & (n_bd - 1);
+}
+
+/* TODO: Move fw_desc functions to iwl-pci.ko */
+static inline void
+il_free_fw_desc(struct pci_dev *pci_dev, struct fw_desc *desc)
+{
+	if (desc->v_addr)
+		dma_free_coherent(&pci_dev->dev, desc->len, desc->v_addr,
+				  desc->p_addr);
+	desc->v_addr = NULL;
+	desc->len = 0;
+}
+
+static inline int
+il_alloc_fw_desc(struct pci_dev *pci_dev, struct fw_desc *desc)
+{
+	if (!desc->len) {
+		desc->v_addr = NULL;
+		return -EINVAL;
+	}
+
+	desc->v_addr =
+	    dma_alloc_coherent(&pci_dev->dev, desc->len, &desc->p_addr,
+			       GFP_KERNEL);
+	return (desc->v_addr != NULL) ? 0 : -ENOMEM;
+}
+
+/*
+ * we have 8 bits used like this:
+ *
+ * 7 6 5 4 3 2 1 0
+ * | | | | | | | |
+ * | | | | | | +-+-------- AC queue (0-3)
+ * | | | | | |
+ * | +-+-+-+-+------------ HW queue ID
+ * |
+ * +---------------------- unused
+ */
+static inline void
+il_set_swq_id(struct il_tx_queue *txq, u8 ac, u8 hwq)
+{
+	BUG_ON(ac > 3);		/* only have 2 bits */
+	BUG_ON(hwq > 31);	/* only use 5 bits */
+
+	txq->swq_id = (hwq << 2) | ac;
+}
+
+static inline void
+il_wake_queue(struct il_priv *il, struct il_tx_queue *txq)
+{
+	u8 queue = txq->swq_id;
+	u8 ac = queue & 3;
+	u8 hwq = (queue >> 2) & 0x1f;
+
+	if (test_and_clear_bit(hwq, il->queue_stopped))
+		if (atomic_dec_return(&il->queue_stop_count[ac]) <= 0)
+			ieee80211_wake_queue(il->hw, ac);
+}
+
+static inline void
+il_stop_queue(struct il_priv *il, struct il_tx_queue *txq)
+{
+	u8 queue = txq->swq_id;
+	u8 ac = queue & 3;
+	u8 hwq = (queue >> 2) & 0x1f;
+
+	if (!test_and_set_bit(hwq, il->queue_stopped))
+		if (atomic_inc_return(&il->queue_stop_count[ac]) > 0)
+			ieee80211_stop_queue(il->hw, ac);
+}
+
+#ifdef ieee80211_stop_queue
+#undef ieee80211_stop_queue
+#endif
+
+#define ieee80211_stop_queue DO_NOT_USE_ieee80211_stop_queue
+
+#ifdef ieee80211_wake_queue
+#undef ieee80211_wake_queue
+#endif
+
+#define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue
+
+static inline void
+il_disable_interrupts(struct il_priv *il)
+{
+	clear_bit(S_INT_ENABLED, &il->status);
+
+	/* disable interrupts from uCode/NIC to host */
+	_il_wr(il, CSR_INT_MASK, 0x00000000);
+
+	/* acknowledge/clear/reset any interrupts still pending
+	 * from uCode or flow handler (Rx/Tx DMA) */
+	_il_wr(il, CSR_INT, 0xffffffff);
+	_il_wr(il, CSR_FH_INT_STATUS, 0xffffffff);
+}
+
+static inline void
+il_enable_rfkill_int(struct il_priv *il)
+{
+	_il_wr(il, CSR_INT_MASK, CSR_INT_BIT_RF_KILL);
+}
+
+static inline void
+il_enable_interrupts(struct il_priv *il)
+{
+	set_bit(S_INT_ENABLED, &il->status);
+	_il_wr(il, CSR_INT_MASK, il->inta_mask);
+}
+
+/**
+ * il_beacon_time_mask_low - mask of lower 32 bit of beacon time
+ * @il -- pointer to il_priv data structure
+ * @tsf_bits -- number of bits need to shift for masking)
+ */
+static inline u32
+il_beacon_time_mask_low(struct il_priv *il, u16 tsf_bits)
+{
+	return (1 << tsf_bits) - 1;
+}
+
+/**
+ * il_beacon_time_mask_high - mask of higher 32 bit of beacon time
+ * @il -- pointer to il_priv data structure
+ * @tsf_bits -- number of bits need to shift for masking)
+ */
+static inline u32
+il_beacon_time_mask_high(struct il_priv *il, u16 tsf_bits)
+{
+	return ((1 << (32 - tsf_bits)) - 1) << tsf_bits;
+}
+
+/**
+ * struct il_rb_status - reseve buffer status host memory mapped FH registers
+ *
+ * @closed_rb_num [0:11] - Indicates the idx of the RB which was closed
+ * @closed_fr_num [0:11] - Indicates the idx of the RX Frame which was closed
+ * @finished_rb_num [0:11] - Indicates the idx of the current RB
+ *			     in which the last frame was written to
+ * @finished_fr_num [0:11] - Indicates the idx of the RX Frame
+ *			     which was transferred
+ */
+struct il_rb_status {
+	__le16 closed_rb_num;
+	__le16 closed_fr_num;
+	__le16 finished_rb_num;
+	__le16 finished_fr_nam;
+	__le32 __unused;	/* 3945 only */
+} __packed;
+
+#define TFD_QUEUE_SIZE_MAX      (256)
+#define TFD_QUEUE_SIZE_BC_DUP	(64)
+#define TFD_QUEUE_BC_SIZE	(TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)
+#define IL_TX_DMA_MASK        DMA_BIT_MASK(36)
+#define IL_NUM_OF_TBS		20
+
+static inline u8
+il_get_dma_hi_addr(dma_addr_t addr)
+{
+	return (sizeof(addr) > sizeof(u32) ? (addr >> 16) >> 16 : 0) & 0xF;
+}
+
+/**
+ * struct il_tfd_tb transmit buffer descriptor within transmit frame descriptor
+ *
+ * This structure contains dma address and length of transmission address
+ *
+ * @lo: low [31:0] portion of the dma address of TX buffer every even is
+ *	unaligned on 16 bit boundary
+ * @hi_n_len: 0-3 [35:32] portion of dma
+ *	      4-15 length of the tx buffer
+ */
+struct il_tfd_tb {
+	__le32 lo;
+	__le16 hi_n_len;
+} __packed;
+
+/**
+ * struct il_tfd
+ *
+ * Transmit Frame Descriptor (TFD)
+ *
+ * @ __reserved1[3] reserved
+ * @ num_tbs 0-4 number of active tbs
+ *	     5   reserved
+ * 	     6-7 padding (not used)
+ * @ tbs[20]	transmit frame buffer descriptors
+ * @ __pad	padding
+ *
+ * Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM.
+ * Both driver and device share these circular buffers, each of which must be
+ * contiguous 256 TFDs x 128 bytes-per-TFD = 32 KBytes
+ *
+ * Driver must indicate the physical address of the base of each
+ * circular buffer via the FH49_MEM_CBBC_QUEUE registers.
+ *
+ * Each TFD contains pointer/size information for up to 20 data buffers
+ * in host DRAM.  These buffers collectively contain the (one) frame described
+ * by the TFD.  Each buffer must be a single contiguous block of memory within
+ * itself, but buffers may be scattered in host DRAM.  Each buffer has max size
+ * of (4K - 4).  The concatenates all of a TFD's buffers into a single
+ * Tx frame, up to 8 KBytes in size.
+ *
+ * A maximum of 255 (not 256!) TFDs may be on a queue waiting for Tx.
+ */
+struct il_tfd {
+	u8 __reserved1[3];
+	u8 num_tbs;
+	struct il_tfd_tb tbs[IL_NUM_OF_TBS];
+	__le32 __pad;
+} __packed;
+/* PCI registers */
+#define PCI_CFG_RETRY_TIMEOUT	0x041
+
+/* PCI register values */
+#define PCI_CFG_LINK_CTRL_VAL_L0S_EN	0x01
+#define PCI_CFG_LINK_CTRL_VAL_L1_EN	0x02
+
+struct il_rate_info {
+	u8 plcp;		/* uCode API:  RATE_6M_PLCP, etc. */
+	u8 plcp_siso;		/* uCode API:  RATE_SISO_6M_PLCP, etc. */
+	u8 plcp_mimo2;		/* uCode API:  RATE_MIMO2_6M_PLCP, etc. */
+	u8 ieee;		/* MAC header:  RATE_6M_IEEE, etc. */
+	u8 prev_ieee;		/* previous rate in IEEE speeds */
+	u8 next_ieee;		/* next rate in IEEE speeds */
+	u8 prev_rs;		/* previous rate used in rs algo */
+	u8 next_rs;		/* next rate used in rs algo */
+	u8 prev_rs_tgg;		/* previous rate used in TGG rs algo */
+	u8 next_rs_tgg;		/* next rate used in TGG rs algo */
+};
+
+struct il3945_rate_info {
+	u8 plcp;		/* uCode API:  RATE_6M_PLCP, etc. */
+	u8 ieee;		/* MAC header:  RATE_6M_IEEE, etc. */
+	u8 prev_ieee;		/* previous rate in IEEE speeds */
+	u8 next_ieee;		/* next rate in IEEE speeds */
+	u8 prev_rs;		/* previous rate used in rs algo */
+	u8 next_rs;		/* next rate used in rs algo */
+	u8 prev_rs_tgg;		/* previous rate used in TGG rs algo */
+	u8 next_rs_tgg;		/* next rate used in TGG rs algo */
+	u8 table_rs_idx;	/* idx in rate scale table cmd */
+	u8 prev_table_rs;	/* prev in rate table cmd */
+};
+
+/*
+ * These serve as idxes into
+ * struct il_rate_info il_rates[RATE_COUNT];
+ */
+enum {
+	RATE_1M_IDX = 0,
+	RATE_2M_IDX,
+	RATE_5M_IDX,
+	RATE_11M_IDX,
+	RATE_6M_IDX,
+	RATE_9M_IDX,
+	RATE_12M_IDX,
+	RATE_18M_IDX,
+	RATE_24M_IDX,
+	RATE_36M_IDX,
+	RATE_48M_IDX,
+	RATE_54M_IDX,
+	RATE_60M_IDX,
+	RATE_COUNT,
+	RATE_COUNT_LEGACY = RATE_COUNT - 1,	/* Excluding 60M */
+	RATE_COUNT_3945 = RATE_COUNT - 1,
+	RATE_INVM_IDX = RATE_COUNT,
+	RATE_INVALID = RATE_COUNT,
+};
+
+enum {
+	RATE_6M_IDX_TBL = 0,
+	RATE_9M_IDX_TBL,
+	RATE_12M_IDX_TBL,
+	RATE_18M_IDX_TBL,
+	RATE_24M_IDX_TBL,
+	RATE_36M_IDX_TBL,
+	RATE_48M_IDX_TBL,
+	RATE_54M_IDX_TBL,
+	RATE_1M_IDX_TBL,
+	RATE_2M_IDX_TBL,
+	RATE_5M_IDX_TBL,
+	RATE_11M_IDX_TBL,
+	RATE_INVM_IDX_TBL = RATE_INVM_IDX - 1,
+};
+
+enum {
+	IL_FIRST_OFDM_RATE = RATE_6M_IDX,
+	IL39_LAST_OFDM_RATE = RATE_54M_IDX,
+	IL_LAST_OFDM_RATE = RATE_60M_IDX,
+	IL_FIRST_CCK_RATE = RATE_1M_IDX,
+	IL_LAST_CCK_RATE = RATE_11M_IDX,
+};
+
+/* #define vs. enum to keep from defaulting to 'large integer' */
+#define	RATE_6M_MASK   (1 << RATE_6M_IDX)
+#define	RATE_9M_MASK   (1 << RATE_9M_IDX)
+#define	RATE_12M_MASK  (1 << RATE_12M_IDX)
+#define	RATE_18M_MASK  (1 << RATE_18M_IDX)
+#define	RATE_24M_MASK  (1 << RATE_24M_IDX)
+#define	RATE_36M_MASK  (1 << RATE_36M_IDX)
+#define	RATE_48M_MASK  (1 << RATE_48M_IDX)
+#define	RATE_54M_MASK  (1 << RATE_54M_IDX)
+#define RATE_60M_MASK  (1 << RATE_60M_IDX)
+#define	RATE_1M_MASK   (1 << RATE_1M_IDX)
+#define	RATE_2M_MASK   (1 << RATE_2M_IDX)
+#define	RATE_5M_MASK   (1 << RATE_5M_IDX)
+#define	RATE_11M_MASK  (1 << RATE_11M_IDX)
+
+/* uCode API values for legacy bit rates, both OFDM and CCK */
+enum {
+	RATE_6M_PLCP = 13,
+	RATE_9M_PLCP = 15,
+	RATE_12M_PLCP = 5,
+	RATE_18M_PLCP = 7,
+	RATE_24M_PLCP = 9,
+	RATE_36M_PLCP = 11,
+	RATE_48M_PLCP = 1,
+	RATE_54M_PLCP = 3,
+	RATE_60M_PLCP = 3,	/*FIXME:RS:should be removed */
+	RATE_1M_PLCP = 10,
+	RATE_2M_PLCP = 20,
+	RATE_5M_PLCP = 55,
+	RATE_11M_PLCP = 110,
+	/*FIXME:RS:add RATE_LEGACY_INVM_PLCP = 0, */
+};
+
+/* uCode API values for OFDM high-throughput (HT) bit rates */
+enum {
+	RATE_SISO_6M_PLCP = 0,
+	RATE_SISO_12M_PLCP = 1,
+	RATE_SISO_18M_PLCP = 2,
+	RATE_SISO_24M_PLCP = 3,
+	RATE_SISO_36M_PLCP = 4,
+	RATE_SISO_48M_PLCP = 5,
+	RATE_SISO_54M_PLCP = 6,
+	RATE_SISO_60M_PLCP = 7,
+	RATE_MIMO2_6M_PLCP = 0x8,
+	RATE_MIMO2_12M_PLCP = 0x9,
+	RATE_MIMO2_18M_PLCP = 0xa,
+	RATE_MIMO2_24M_PLCP = 0xb,
+	RATE_MIMO2_36M_PLCP = 0xc,
+	RATE_MIMO2_48M_PLCP = 0xd,
+	RATE_MIMO2_54M_PLCP = 0xe,
+	RATE_MIMO2_60M_PLCP = 0xf,
+	RATE_SISO_INVM_PLCP,
+	RATE_MIMO2_INVM_PLCP = RATE_SISO_INVM_PLCP,
+};
+
+/* MAC header values for bit rates */
+enum {
+	RATE_6M_IEEE = 12,
+	RATE_9M_IEEE = 18,
+	RATE_12M_IEEE = 24,
+	RATE_18M_IEEE = 36,
+	RATE_24M_IEEE = 48,
+	RATE_36M_IEEE = 72,
+	RATE_48M_IEEE = 96,
+	RATE_54M_IEEE = 108,
+	RATE_60M_IEEE = 120,
+	RATE_1M_IEEE = 2,
+	RATE_2M_IEEE = 4,
+	RATE_5M_IEEE = 11,
+	RATE_11M_IEEE = 22,
+};
+
+#define IL_CCK_BASIC_RATES_MASK    \
+	(RATE_1M_MASK          | \
+	RATE_2M_MASK)
+
+#define IL_CCK_RATES_MASK          \
+	(IL_CCK_BASIC_RATES_MASK  | \
+	RATE_5M_MASK          | \
+	RATE_11M_MASK)
+
+#define IL_OFDM_BASIC_RATES_MASK   \
+	(RATE_6M_MASK         | \
+	RATE_12M_MASK         | \
+	RATE_24M_MASK)
+
+#define IL_OFDM_RATES_MASK         \
+	(IL_OFDM_BASIC_RATES_MASK | \
+	RATE_9M_MASK          | \
+	RATE_18M_MASK         | \
+	RATE_36M_MASK         | \
+	RATE_48M_MASK         | \
+	RATE_54M_MASK)
+
+#define IL_BASIC_RATES_MASK         \
+	(IL_OFDM_BASIC_RATES_MASK | \
+	 IL_CCK_BASIC_RATES_MASK)
+
+#define RATES_MASK ((1 << RATE_COUNT) - 1)
+#define RATES_MASK_3945 ((1 << RATE_COUNT_3945) - 1)
+
+#define IL_INVALID_VALUE    -1
+
+#define IL_MIN_RSSI_VAL                 -100
+#define IL_MAX_RSSI_VAL                    0
+
+/* These values specify how many Tx frame attempts before
+ * searching for a new modulation mode */
+#define IL_LEGACY_FAILURE_LIMIT	160
+#define IL_LEGACY_SUCCESS_LIMIT	480
+#define IL_LEGACY_TBL_COUNT		160
+
+#define IL_NONE_LEGACY_FAILURE_LIMIT	400
+#define IL_NONE_LEGACY_SUCCESS_LIMIT	4500
+#define IL_NONE_LEGACY_TBL_COUNT	1500
+
+/* Success ratio (ACKed / attempted tx frames) values (perfect is 128 * 100) */
+#define IL_RS_GOOD_RATIO		12800	/* 100% */
+#define RATE_SCALE_SWITCH		10880	/*  85% */
+#define RATE_HIGH_TH		10880	/*  85% */
+#define RATE_INCREASE_TH		6400	/*  50% */
+#define RATE_DECREASE_TH		1920	/*  15% */
+
+/* possible actions when in legacy mode */
+#define IL_LEGACY_SWITCH_ANTENNA1      0
+#define IL_LEGACY_SWITCH_ANTENNA2      1
+#define IL_LEGACY_SWITCH_SISO          2
+#define IL_LEGACY_SWITCH_MIMO2_AB      3
+#define IL_LEGACY_SWITCH_MIMO2_AC      4
+#define IL_LEGACY_SWITCH_MIMO2_BC      5
+
+/* possible actions when in siso mode */
+#define IL_SISO_SWITCH_ANTENNA1        0
+#define IL_SISO_SWITCH_ANTENNA2        1
+#define IL_SISO_SWITCH_MIMO2_AB        2
+#define IL_SISO_SWITCH_MIMO2_AC        3
+#define IL_SISO_SWITCH_MIMO2_BC        4
+#define IL_SISO_SWITCH_GI              5
+
+/* possible actions when in mimo mode */
+#define IL_MIMO2_SWITCH_ANTENNA1       0
+#define IL_MIMO2_SWITCH_ANTENNA2       1
+#define IL_MIMO2_SWITCH_SISO_A         2
+#define IL_MIMO2_SWITCH_SISO_B         3
+#define IL_MIMO2_SWITCH_SISO_C         4
+#define IL_MIMO2_SWITCH_GI             5
+
+#define IL_MAX_SEARCH IL_MIMO2_SWITCH_GI
+
+#define IL_ACTION_LIMIT		3	/* # possible actions */
+
+#define LQ_SIZE		2	/* 2 mode tables:  "Active" and "Search" */
+
+/* load per tid defines for A-MPDU activation */
+#define IL_AGG_TPT_THREHOLD	0
+#define IL_AGG_LOAD_THRESHOLD	10
+#define IL_AGG_ALL_TID		0xff
+#define TID_QUEUE_CELL_SPACING	50	/*mS */
+#define TID_QUEUE_MAX_SIZE	20
+#define TID_ROUND_VALUE		5	/* mS */
+#define TID_MAX_LOAD_COUNT	8
+
+#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING)
+#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y))
+
+extern const struct il_rate_info il_rates[RATE_COUNT];
+
+enum il_table_type {
+	LQ_NONE,
+	LQ_G,			/* legacy types */
+	LQ_A,
+	LQ_SISO,		/* high-throughput types */
+	LQ_MIMO2,
+	LQ_MAX,
+};
+
+#define is_legacy(tbl) ((tbl) == LQ_G || (tbl) == LQ_A)
+#define is_siso(tbl) ((tbl) == LQ_SISO)
+#define is_mimo2(tbl) ((tbl) == LQ_MIMO2)
+#define is_mimo(tbl) (is_mimo2(tbl))
+#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl))
+#define is_a_band(tbl) ((tbl) == LQ_A)
+#define is_g_and(tbl) ((tbl) == LQ_G)
+
+#define	ANT_NONE	0x0
+#define	ANT_A		BIT(0)
+#define	ANT_B		BIT(1)
+#define	ANT_AB		(ANT_A | ANT_B)
+#define ANT_C		BIT(2)
+#define	ANT_AC		(ANT_A | ANT_C)
+#define ANT_BC		(ANT_B | ANT_C)
+#define ANT_ABC		(ANT_AB | ANT_C)
+
+#define IL_MAX_MCS_DISPLAY_SIZE	12
+
+struct il_rate_mcs_info {
+	char mbps[IL_MAX_MCS_DISPLAY_SIZE];
+	char mcs[IL_MAX_MCS_DISPLAY_SIZE];
+};
+
+/**
+ * struct il_rate_scale_data -- tx success history for one rate
+ */
+struct il_rate_scale_data {
+	u64 data;		/* bitmap of successful frames */
+	s32 success_counter;	/* number of frames successful */
+	s32 success_ratio;	/* per-cent * 128  */
+	s32 counter;		/* number of frames attempted */
+	s32 average_tpt;	/* success ratio * expected throughput */
+	unsigned long stamp;
+};
+
+/**
+ * struct il_scale_tbl_info -- tx params and success history for all rates
+ *
+ * There are two of these in struct il_lq_sta,
+ * one for "active", and one for "search".
+ */
+struct il_scale_tbl_info {
+	enum il_table_type lq_type;
+	u8 ant_type;
+	u8 is_SGI;		/* 1 = short guard interval */
+	u8 is_ht40;		/* 1 = 40 MHz channel width */
+	u8 is_dup;		/* 1 = duplicated data streams */
+	u8 action;		/* change modulation; IL_[LEGACY/SISO/MIMO]_SWITCH_* */
+	u8 max_search;		/* maximun number of tables we can search */
+	s32 *expected_tpt;	/* throughput metrics; expected_tpt_G, etc. */
+	u32 current_rate;	/* rate_n_flags, uCode API format */
+	struct il_rate_scale_data win[RATE_COUNT];	/* rate histories */
+};
+
+struct il_traffic_load {
+	unsigned long time_stamp;	/* age of the oldest stats */
+	u32 packet_count[TID_QUEUE_MAX_SIZE];	/* packet count in this time
+						 * slice */
+	u32 total;		/* total num of packets during the
+				 * last TID_MAX_TIME_DIFF */
+	u8 queue_count;		/* number of queues that has
+				 * been used since the last cleanup */
+	u8 head;		/* start of the circular buffer */
+};
+
+/**
+ * struct il_lq_sta -- driver's rate scaling ilate structure
+ *
+ * Pointer to this gets passed back and forth between driver and mac80211.
+ */
+struct il_lq_sta {
+	u8 active_tbl;		/* idx of active table, range 0-1 */
+	u8 enable_counter;	/* indicates HT mode */
+	u8 stay_in_tbl;		/* 1: disallow, 0: allow search for new mode */
+	u8 search_better_tbl;	/* 1: currently trying alternate mode */
+	s32 last_tpt;
+
+	/* The following determine when to search for a new mode */
+	u32 table_count_limit;
+	u32 max_failure_limit;	/* # failed frames before new search */
+	u32 max_success_limit;	/* # successful frames before new search */
+	u32 table_count;
+	u32 total_failed;	/* total failed frames, any/all rates */
+	u32 total_success;	/* total successful frames, any/all rates */
+	u64 flush_timer;	/* time staying in mode before new search */
+
+	u8 action_counter;	/* # mode-switch actions tried */
+	u8 is_green;
+	u8 is_dup;
+	enum ieee80211_band band;
+
+	/* The following are bitmaps of rates; RATE_6M_MASK, etc. */
+	u32 supp_rates;
+	u16 active_legacy_rate;
+	u16 active_siso_rate;
+	u16 active_mimo2_rate;
+	s8 max_rate_idx;	/* Max rate set by user */
+	u8 missed_rate_counter;
+
+	struct il_link_quality_cmd lq;
+	struct il_scale_tbl_info lq_info[LQ_SIZE];	/* "active", "search" */
+	struct il_traffic_load load[TID_MAX_LOAD_COUNT];
+	u8 tx_agg_tid_en;
+#ifdef CONFIG_MAC80211_DEBUGFS
+	struct dentry *rs_sta_dbgfs_scale_table_file;
+	struct dentry *rs_sta_dbgfs_stats_table_file;
+	struct dentry *rs_sta_dbgfs_rate_scale_data_file;
+	struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
+	u32 dbg_fixed_rate;
+#endif
+	struct il_priv *drv;
+
+	/* used to be in sta_info */
+	int last_txrate_idx;
+	/* last tx rate_n_flags */
+	u32 last_rate_n_flags;
+	/* packets destined for this STA are aggregated */
+	u8 is_agg;
+};
+
+/*
+ * il_station_priv: Driver's ilate station information
+ *
+ * When mac80211 creates a station it reserves some space (hw->sta_data_size)
+ * in the structure for use by driver. This structure is places in that
+ * space.
+ *
+ * The common struct MUST be first because it is shared between
+ * 3945 and 4965!
+ */
+struct il_station_priv {
+	struct il_station_priv_common common;
+	struct il_lq_sta lq_sta;
+	atomic_t pending_frames;
+	bool client;
+	bool asleep;
+};
+
+static inline u8
+il4965_num_of_ant(u8 m)
+{
+	return !!(m & ANT_A) + !!(m & ANT_B) + !!(m & ANT_C);
+}
+
+static inline u8
+il4965_first_antenna(u8 mask)
+{
+	if (mask & ANT_A)
+		return ANT_A;
+	if (mask & ANT_B)
+		return ANT_B;
+	return ANT_C;
+}
+
+/**
+ * il3945_rate_scale_init - Initialize the rate scale table based on assoc info
+ *
+ * The specific throughput table used is based on the type of network
+ * the associated with, including A, B, G, and G w/ TGG protection
+ */
+extern void il3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id);
+
+/* Initialize station's rate scaling information after adding station */
+extern void il4965_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta,
+				u8 sta_id);
+extern void il3945_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta,
+				u8 sta_id);
+
+/**
+ * il_rate_control_register - Register the rate control algorithm callbacks
+ *
+ * Since the rate control algorithm is hardware specific, there is no need
+ * or reason to place it as a stand alone module.  The driver can call
+ * il_rate_control_register in order to register the rate control callbacks
+ * with the mac80211 subsystem.  This should be performed prior to calling
+ * ieee80211_register_hw
+ *
+ */
+extern int il4965_rate_control_register(void);
+extern int il3945_rate_control_register(void);
+
+/**
+ * il_rate_control_unregister - Unregister the rate control callbacks
+ *
+ * This should be called after calling ieee80211_unregister_hw, but before
+ * the driver is unloaded.
+ */
+extern void il4965_rate_control_unregister(void);
+extern void il3945_rate_control_unregister(void);
+
+extern int il_power_update_mode(struct il_priv *il, bool force);
+extern void il_power_initialize(struct il_priv *il);
+
+extern u32 il_debug_level;
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+/*
+ * il_get_debug_level: Return active debug level for device
+ *
+ * Using sysfs it is possible to set per device debug level. This debug
+ * level will be used if set, otherwise the global debug level which can be
+ * set via module parameter is used.
+ */
+static inline u32
+il_get_debug_level(struct il_priv *il)
+{
+	if (il->debug_level)
+		return il->debug_level;
+	else
+		return il_debug_level;
+}
+#else
+static inline u32
+il_get_debug_level(struct il_priv *il)
+{
+	return il_debug_level;
+}
+#endif
+
+#define il_print_hex_error(il, p, len)					\
+do {									\
+	print_hex_dump(KERN_ERR, "iwl data: ",				\
+		       DUMP_PREFIX_OFFSET, 16, 1, p, len, 1);		\
+} while (0)
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+#define IL_DBG(level, fmt, args...)					\
+do {									\
+	if (il_get_debug_level(il) & level)				\
+		dev_printk(KERN_ERR, &il->hw->wiphy->dev,		\
+			 "%c %s " fmt, in_interrupt() ? 'I' : 'U',	\
+			__func__ , ## args);				\
+} while (0)
+
+#define il_print_hex_dump(il, level, p, len)				\
+do {									\
+	if (il_get_debug_level(il) & level)				\
+		print_hex_dump(KERN_DEBUG, "iwl data: ",		\
+			       DUMP_PREFIX_OFFSET, 16, 1, p, len, 1);	\
+} while (0)
+
+#else
+#define IL_DBG(level, fmt, args...)
+static inline void
+il_print_hex_dump(struct il_priv *il, int level, const void *p, u32 len)
+{
+}
+#endif /* CONFIG_IWLEGACY_DEBUG */
+
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+int il_dbgfs_register(struct il_priv *il, const char *name);
+void il_dbgfs_unregister(struct il_priv *il);
+#else
+static inline int
+il_dbgfs_register(struct il_priv *il, const char *name)
+{
+	return 0;
+}
+
+static inline void
+il_dbgfs_unregister(struct il_priv *il)
+{
+}
+#endif /* CONFIG_IWLEGACY_DEBUGFS */
+
+/*
+ * To use the debug system:
+ *
+ * If you are defining a new debug classification, simply add it to the #define
+ * list here in the form of
+ *
+ * #define IL_DL_xxxx VALUE
+ *
+ * where xxxx should be the name of the classification (for example, WEP).
+ *
+ * You then need to either add a IL_xxxx_DEBUG() macro definition for your
+ * classification, or use IL_DBG(IL_DL_xxxx, ...) whenever you want
+ * to send output to that classification.
+ *
+ * The active debug levels can be accessed via files
+ *
+ *	/sys/module/iwl4965/parameters/debug
+ *	/sys/module/iwl3945/parameters/debug
+ *	/sys/class/net/wlan0/device/debug_level
+ *
+ * when CONFIG_IWLEGACY_DEBUG=y.
+ */
+
+/* 0x0000000F - 0x00000001 */
+#define IL_DL_INFO		(1 << 0)
+#define IL_DL_MAC80211		(1 << 1)
+#define IL_DL_HCMD		(1 << 2)
+#define IL_DL_STATE		(1 << 3)
+/* 0x000000F0 - 0x00000010 */
+#define IL_DL_MACDUMP		(1 << 4)
+#define IL_DL_HCMD_DUMP		(1 << 5)
+#define IL_DL_EEPROM		(1 << 6)
+#define IL_DL_RADIO		(1 << 7)
+/* 0x00000F00 - 0x00000100 */
+#define IL_DL_POWER		(1 << 8)
+#define IL_DL_TEMP		(1 << 9)
+#define IL_DL_NOTIF		(1 << 10)
+#define IL_DL_SCAN		(1 << 11)
+/* 0x0000F000 - 0x00001000 */
+#define IL_DL_ASSOC		(1 << 12)
+#define IL_DL_DROP		(1 << 13)
+#define IL_DL_TXPOWER		(1 << 14)
+#define IL_DL_AP		(1 << 15)
+/* 0x000F0000 - 0x00010000 */
+#define IL_DL_FW		(1 << 16)
+#define IL_DL_RF_KILL		(1 << 17)
+#define IL_DL_FW_ERRORS		(1 << 18)
+#define IL_DL_LED		(1 << 19)
+/* 0x00F00000 - 0x00100000 */
+#define IL_DL_RATE		(1 << 20)
+#define IL_DL_CALIB		(1 << 21)
+#define IL_DL_WEP		(1 << 22)
+#define IL_DL_TX		(1 << 23)
+/* 0x0F000000 - 0x01000000 */
+#define IL_DL_RX		(1 << 24)
+#define IL_DL_ISR		(1 << 25)
+#define IL_DL_HT		(1 << 26)
+/* 0xF0000000 - 0x10000000 */
+#define IL_DL_11H		(1 << 28)
+#define IL_DL_STATS		(1 << 29)
+#define IL_DL_TX_REPLY		(1 << 30)
+#define IL_DL_QOS		(1 << 31)
+
+#define D_INFO(f, a...)		IL_DBG(IL_DL_INFO, f, ## a)
+#define D_MAC80211(f, a...)	IL_DBG(IL_DL_MAC80211, f, ## a)
+#define D_MACDUMP(f, a...)	IL_DBG(IL_DL_MACDUMP, f, ## a)
+#define D_TEMP(f, a...)		IL_DBG(IL_DL_TEMP, f, ## a)
+#define D_SCAN(f, a...)		IL_DBG(IL_DL_SCAN, f, ## a)
+#define D_RX(f, a...)		IL_DBG(IL_DL_RX, f, ## a)
+#define D_TX(f, a...)		IL_DBG(IL_DL_TX, f, ## a)
+#define D_ISR(f, a...)		IL_DBG(IL_DL_ISR, f, ## a)
+#define D_LED(f, a...)		IL_DBG(IL_DL_LED, f, ## a)
+#define D_WEP(f, a...)		IL_DBG(IL_DL_WEP, f, ## a)
+#define D_HC(f, a...)		IL_DBG(IL_DL_HCMD, f, ## a)
+#define D_HC_DUMP(f, a...)	IL_DBG(IL_DL_HCMD_DUMP, f, ## a)
+#define D_EEPROM(f, a...)	IL_DBG(IL_DL_EEPROM, f, ## a)
+#define D_CALIB(f, a...)	IL_DBG(IL_DL_CALIB, f, ## a)
+#define D_FW(f, a...)		IL_DBG(IL_DL_FW, f, ## a)
+#define D_RF_KILL(f, a...)	IL_DBG(IL_DL_RF_KILL, f, ## a)
+#define D_DROP(f, a...)		IL_DBG(IL_DL_DROP, f, ## a)
+#define D_AP(f, a...)		IL_DBG(IL_DL_AP, f, ## a)
+#define D_TXPOWER(f, a...)	IL_DBG(IL_DL_TXPOWER, f, ## a)
+#define D_RATE(f, a...)		IL_DBG(IL_DL_RATE, f, ## a)
+#define D_NOTIF(f, a...)	IL_DBG(IL_DL_NOTIF, f, ## a)
+#define D_ASSOC(f, a...)	IL_DBG(IL_DL_ASSOC, f, ## a)
+#define D_HT(f, a...)		IL_DBG(IL_DL_HT, f, ## a)
+#define D_STATS(f, a...)	IL_DBG(IL_DL_STATS, f, ## a)
+#define D_TX_REPLY(f, a...)	IL_DBG(IL_DL_TX_REPLY, f, ## a)
+#define D_QOS(f, a...)		IL_DBG(IL_DL_QOS, f, ## a)
+#define D_RADIO(f, a...)	IL_DBG(IL_DL_RADIO, f, ## a)
+#define D_POWER(f, a...)	IL_DBG(IL_DL_POWER, f, ## a)
+#define D_11H(f, a...)		IL_DBG(IL_DL_11H, f, ## a)
+
+#endif /* __il_core_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-csr.h b/drivers/net/wireless/iwlegacy/csr.h
similarity index 84%
rename from drivers/net/wireless/iwlegacy/iwl-csr.h
rename to drivers/net/wireless/iwlegacy/csr.h
index 668a961..9138e15 100644
--- a/drivers/net/wireless/iwlegacy/iwl-csr.h
+++ b/drivers/net/wireless/iwlegacy/csr.h
@@ -60,8 +60,8 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  *****************************************************************************/
-#ifndef __iwl_legacy_csr_h__
-#define __iwl_legacy_csr_h__
+#ifndef __il_csr_h__
+#define __il_csr_h__
 /*
  * CSR (control and status registers)
  *
@@ -70,9 +70,9 @@
  * low power states due to driver-invoked device resets
  * (e.g. CSR_RESET_REG_FLAG_SW_RESET) or uCode-driven power-saving modes.
  *
- * Use iwl_write32() and iwl_read32() family to access these registers;
+ * Use _il_wr() and _il_rd() family to access these registers;
  * these provide simple PCI bus access, without waking up the MAC.
- * Do not use iwl_legacy_write_direct32() family for these registers;
+ * Do not use il_wr() family for these registers;
  * no need to "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ.
  * The MAC (uCode processor, etc.) does not need to be powered up for accessing
  * the CSR registers.
@@ -82,16 +82,16 @@
  */
 #define CSR_BASE    (0x000)
 
-#define CSR_HW_IF_CONFIG_REG    (CSR_BASE+0x000) /* hardware interface config */
-#define CSR_INT_COALESCING      (CSR_BASE+0x004) /* accum ints, 32-usec units */
-#define CSR_INT                 (CSR_BASE+0x008) /* host interrupt status/ack */
-#define CSR_INT_MASK            (CSR_BASE+0x00c) /* host interrupt enable */
-#define CSR_FH_INT_STATUS       (CSR_BASE+0x010) /* busmaster int status/ack*/
-#define CSR_GPIO_IN             (CSR_BASE+0x018) /* read external chip pins */
-#define CSR_RESET               (CSR_BASE+0x020) /* busmaster enable, NMI, etc*/
+#define CSR_HW_IF_CONFIG_REG    (CSR_BASE+0x000)	/* hardware interface config */
+#define CSR_INT_COALESCING      (CSR_BASE+0x004)	/* accum ints, 32-usec units */
+#define CSR_INT                 (CSR_BASE+0x008)	/* host interrupt status/ack */
+#define CSR_INT_MASK            (CSR_BASE+0x00c)	/* host interrupt enable */
+#define CSR_FH_INT_STATUS       (CSR_BASE+0x010)	/* busmaster int status/ack */
+#define CSR_GPIO_IN             (CSR_BASE+0x018)	/* read external chip pins */
+#define CSR_RESET               (CSR_BASE+0x020)	/* busmaster enable, NMI, etc */
 #define CSR_GP_CNTRL            (CSR_BASE+0x024)
 
-/* 2nd byte of CSR_INT_COALESCING, not accessible via iwl_write32()! */
+/* 2nd byte of CSR_INT_COALESCING, not accessible via _il_wr()! */
 #define CSR_INT_PERIODIC_REG	(CSR_BASE+0x005)
 
 /*
@@ -166,26 +166,26 @@
 
 #define CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A	(0x00080000)
 #define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM	(0x00200000)
-#define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY	(0x00400000) /* PCI_OWN_SEM */
-#define CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000) /* ME_OWN */
-#define CSR_HW_IF_CONFIG_REG_PREPARE		  (0x08000000) /* WAKE_ME */
+#define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY	(0x00400000)	/* PCI_OWN_SEM */
+#define CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000)	/* ME_OWN */
+#define CSR_HW_IF_CONFIG_REG_PREPARE		  (0x08000000)	/* WAKE_ME */
 
-#define CSR_INT_PERIODIC_DIS			(0x00) /* disable periodic int*/
-#define CSR_INT_PERIODIC_ENA			(0xFF) /* 255*32 usec ~ 8 msec*/
+#define CSR_INT_PERIODIC_DIS			(0x00)	/* disable periodic int */
+#define CSR_INT_PERIODIC_ENA			(0xFF)	/* 255*32 usec ~ 8 msec */
 
 /* interrupt flags in INTA, set by uCode or hardware (e.g. dma),
  * acknowledged (reset) by host writing "1" to flagged bits. */
-#define CSR_INT_BIT_FH_RX        (1 << 31) /* Rx DMA, cmd responses, FH_INT[17:16] */
-#define CSR_INT_BIT_HW_ERR       (1 << 29) /* DMA hardware error FH_INT[31] */
-#define CSR_INT_BIT_RX_PERIODIC	 (1 << 28) /* Rx periodic */
-#define CSR_INT_BIT_FH_TX        (1 << 27) /* Tx DMA FH_INT[1:0] */
-#define CSR_INT_BIT_SCD          (1 << 26) /* TXQ pointer advanced */
-#define CSR_INT_BIT_SW_ERR       (1 << 25) /* uCode error */
-#define CSR_INT_BIT_RF_KILL      (1 << 7)  /* HW RFKILL switch GP_CNTRL[27] toggled */
-#define CSR_INT_BIT_CT_KILL      (1 << 6)  /* Critical temp (chip too hot) rfkill */
-#define CSR_INT_BIT_SW_RX        (1 << 3)  /* Rx, command responses, 3945 */
-#define CSR_INT_BIT_WAKEUP       (1 << 1)  /* NIC controller waking up (pwr mgmt) */
-#define CSR_INT_BIT_ALIVE        (1 << 0)  /* uCode interrupts once it initializes */
+#define CSR_INT_BIT_FH_RX        (1 << 31)	/* Rx DMA, cmd responses, FH_INT[17:16] */
+#define CSR_INT_BIT_HW_ERR       (1 << 29)	/* DMA hardware error FH_INT[31] */
+#define CSR_INT_BIT_RX_PERIODIC	 (1 << 28)	/* Rx periodic */
+#define CSR_INT_BIT_FH_TX        (1 << 27)	/* Tx DMA FH_INT[1:0] */
+#define CSR_INT_BIT_SCD          (1 << 26)	/* TXQ pointer advanced */
+#define CSR_INT_BIT_SW_ERR       (1 << 25)	/* uCode error */
+#define CSR_INT_BIT_RF_KILL      (1 << 7)	/* HW RFKILL switch GP_CNTRL[27] toggled */
+#define CSR_INT_BIT_CT_KILL      (1 << 6)	/* Critical temp (chip too hot) rfkill */
+#define CSR_INT_BIT_SW_RX        (1 << 3)	/* Rx, command responses, 3945 */
+#define CSR_INT_BIT_WAKEUP       (1 << 1)	/* NIC controller waking up (pwr mgmt) */
+#define CSR_INT_BIT_ALIVE        (1 << 0)	/* uCode interrupts once it initializes */
 
 #define CSR_INI_SET_MASK	(CSR_INT_BIT_FH_RX   | \
 				 CSR_INT_BIT_HW_ERR  | \
@@ -197,21 +197,20 @@
 				 CSR_INT_BIT_ALIVE)
 
 /* interrupt flags in FH (flow handler) (PCI busmaster DMA) */
-#define CSR_FH_INT_BIT_ERR       (1 << 31) /* Error */
-#define CSR_FH_INT_BIT_HI_PRIOR  (1 << 30) /* High priority Rx, bypass coalescing */
-#define CSR39_FH_INT_BIT_RX_CHNL2  (1 << 18) /* Rx channel 2 (3945 only) */
-#define CSR_FH_INT_BIT_RX_CHNL1  (1 << 17) /* Rx channel 1 */
-#define CSR_FH_INT_BIT_RX_CHNL0  (1 << 16) /* Rx channel 0 */
-#define CSR39_FH_INT_BIT_TX_CHNL6  (1 << 6)  /* Tx channel 6 (3945 only) */
-#define CSR_FH_INT_BIT_TX_CHNL1  (1 << 1)  /* Tx channel 1 */
-#define CSR_FH_INT_BIT_TX_CHNL0  (1 << 0)  /* Tx channel 0 */
+#define CSR_FH_INT_BIT_ERR       (1 << 31)	/* Error */
+#define CSR_FH_INT_BIT_HI_PRIOR  (1 << 30)	/* High priority Rx, bypass coalescing */
+#define CSR39_FH_INT_BIT_RX_CHNL2  (1 << 18)	/* Rx channel 2 (3945 only) */
+#define CSR_FH_INT_BIT_RX_CHNL1  (1 << 17)	/* Rx channel 1 */
+#define CSR_FH_INT_BIT_RX_CHNL0  (1 << 16)	/* Rx channel 0 */
+#define CSR39_FH_INT_BIT_TX_CHNL6  (1 << 6)	/* Tx channel 6 (3945 only) */
+#define CSR_FH_INT_BIT_TX_CHNL1  (1 << 1)	/* Tx channel 1 */
+#define CSR_FH_INT_BIT_TX_CHNL0  (1 << 0)	/* Tx channel 0 */
 
 #define CSR39_FH_INT_RX_MASK	(CSR_FH_INT_BIT_HI_PRIOR | \
 				 CSR39_FH_INT_BIT_RX_CHNL2 | \
 				 CSR_FH_INT_BIT_RX_CHNL1 | \
 				 CSR_FH_INT_BIT_RX_CHNL0)
 
-
 #define CSR39_FH_INT_TX_MASK	(CSR39_FH_INT_BIT_TX_CHNL6 | \
 				 CSR_FH_INT_BIT_TX_CHNL1 | \
 				 CSR_FH_INT_BIT_TX_CHNL0)
@@ -285,7 +284,6 @@
 #define CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE         (0x04000000)
 #define CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW          (0x08000000)
 
-
 /* EEPROM REG */
 #define CSR_EEPROM_REG_READ_VALID_MSK	(0x00000001)
 #define CSR_EEPROM_REG_BIT_CMD		(0x00000002)
@@ -293,19 +291,18 @@
 #define CSR_EEPROM_REG_MSK_DATA		(0xFFFF0000)
 
 /* EEPROM GP */
-#define CSR_EEPROM_GP_VALID_MSK		(0x00000007) /* signature */
+#define CSR_EEPROM_GP_VALID_MSK		(0x00000007)	/* signature */
 #define CSR_EEPROM_GP_IF_OWNER_MSK	(0x00000180)
 #define CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K		(0x00000002)
 #define CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K		(0x00000004)
 
 /* GP REG */
-#define CSR_GP_REG_POWER_SAVE_STATUS_MSK            (0x03000000) /* bit 24/25 */
+#define CSR_GP_REG_POWER_SAVE_STATUS_MSK            (0x03000000)	/* bit 24/25 */
 #define CSR_GP_REG_NO_POWER_SAVE            (0x00000000)
 #define CSR_GP_REG_MAC_POWER_SAVE           (0x01000000)
 #define CSR_GP_REG_PHY_POWER_SAVE           (0x02000000)
 #define CSR_GP_REG_POWER_SAVE_ERROR         (0x03000000)
 
-
 /* CSR GIO */
 #define CSR_GIO_REG_VAL_L0S_ENABLED	(0x00000002)
 
@@ -357,7 +354,7 @@
 /* HPET MEM debug */
 #define CSR_DBG_HPET_MEM_REG_VAL	(0xFFFF0000)
 
-/* DRAM INT TABLE */
+/* DRAM INT TBL */
 #define CSR_DRAM_INT_TBL_ENABLE		(1 << 31)
 #define CSR_DRAM_INIT_TBL_WRAP_CHECK	(1 << 27)
 
@@ -368,13 +365,13 @@
  * to indirectly access device's internal memory or registers that
  * may be powered-down.
  *
- * Use iwl_legacy_write_direct32()/iwl_legacy_read_direct32() family
+ * Use il_wr()/il_rd() family
  * for these registers;
  * host must "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
  * to make sure the MAC (uCode processor, etc.) is powered up for accessing
  * internal resources.
  *
- * Do not use iwl_write32()/iwl_read32() family to access these registers;
+ * Do not use _il_wr()/_il_rd() family to access these registers;
  * these provide only simple PCI bus access, without waking up the MAC.
  */
 #define HBUS_BASE	(0x400)
@@ -411,12 +408,12 @@
 #define HBUS_TARG_PRPH_RDAT     (HBUS_BASE+0x050)
 
 /*
- * Per-Tx-queue write pointer (index, really!)
- * Indicates index to next TFD that driver will fill (1 past latest filled).
+ * Per-Tx-queue write pointer (idx, really!)
+ * Indicates idx to next TFD that driver will fill (1 past latest filled).
  * Bit usage:
- *  0-7:  queue write index
+ *  0-7:  queue write idx
  * 11-8:  queue selector
  */
 #define HBUS_TARG_WRPTR         (HBUS_BASE+0x060)
 
-#endif /* !__iwl_legacy_csr_h__ */
+#endif /* !__il_csr_h__ */
diff --git a/drivers/net/wireless/iwlegacy/debug.c b/drivers/net/wireless/iwlegacy/debug.c
new file mode 100644
index 0000000..b1b8926
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/debug.c
@@ -0,0 +1,1411 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+#include <linux/ieee80211.h>
+#include <linux/export.h>
+#include <net/mac80211.h>
+
+#include "common.h"
+
+/* create and remove of files */
+#define DEBUGFS_ADD_FILE(name, parent, mode) do {			\
+	if (!debugfs_create_file(#name, mode, parent, il,		\
+			 &il_dbgfs_##name##_ops))		\
+		goto err;						\
+} while (0)
+
+#define DEBUGFS_ADD_BOOL(name, parent, ptr) do {			\
+	struct dentry *__tmp;						\
+	__tmp = debugfs_create_bool(#name, S_IWUSR | S_IRUSR,		\
+				    parent, ptr);			\
+	if (IS_ERR(__tmp) || !__tmp)					\
+		goto err;						\
+} while (0)
+
+#define DEBUGFS_ADD_X32(name, parent, ptr) do {				\
+	struct dentry *__tmp;						\
+	__tmp = debugfs_create_x32(#name, S_IWUSR | S_IRUSR,		\
+				   parent, ptr);			\
+	if (IS_ERR(__tmp) || !__tmp)					\
+		goto err;						\
+} while (0)
+
+/* file operation */
+#define DEBUGFS_READ_FUNC(name)                                         \
+static ssize_t il_dbgfs_##name##_read(struct file *file,               \
+					char __user *user_buf,          \
+					size_t count, loff_t *ppos);
+
+#define DEBUGFS_WRITE_FUNC(name)                                        \
+static ssize_t il_dbgfs_##name##_write(struct file *file,              \
+					const char __user *user_buf,    \
+					size_t count, loff_t *ppos);
+
+static int
+il_dbgfs_open_file_generic(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+#define DEBUGFS_READ_FILE_OPS(name)				\
+	DEBUGFS_READ_FUNC(name);				\
+static const struct file_operations il_dbgfs_##name##_ops = {	\
+	.read = il_dbgfs_##name##_read,				\
+	.open = il_dbgfs_open_file_generic,			\
+	.llseek = generic_file_llseek,				\
+};
+
+#define DEBUGFS_WRITE_FILE_OPS(name)				\
+	DEBUGFS_WRITE_FUNC(name);				\
+static const struct file_operations il_dbgfs_##name##_ops = {	\
+	.write = il_dbgfs_##name##_write,			\
+	.open = il_dbgfs_open_file_generic,			\
+	.llseek = generic_file_llseek,				\
+};
+
+#define DEBUGFS_READ_WRITE_FILE_OPS(name)			\
+	DEBUGFS_READ_FUNC(name);				\
+	DEBUGFS_WRITE_FUNC(name);				\
+static const struct file_operations il_dbgfs_##name##_ops = {	\
+	.write = il_dbgfs_##name##_write,			\
+	.read = il_dbgfs_##name##_read,				\
+	.open = il_dbgfs_open_file_generic,			\
+	.llseek = generic_file_llseek,				\
+};
+
+static ssize_t
+il_dbgfs_tx_stats_read(struct file *file, char __user *user_buf, size_t count,
+		       loff_t *ppos)
+{
+
+	struct il_priv *il = file->private_data;
+	char *buf;
+	int pos = 0;
+
+	int cnt;
+	ssize_t ret;
+	const size_t bufsz =
+	    100 + sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
+	buf = kzalloc(bufsz, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+	pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
+	for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
+		pos +=
+		    scnprintf(buf + pos, bufsz - pos, "\t%25s\t\t: %u\n",
+			      il_get_mgmt_string(cnt), il->tx_stats.mgmt[cnt]);
+	}
+	pos += scnprintf(buf + pos, bufsz - pos, "Control\n");
+	for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
+		pos +=
+		    scnprintf(buf + pos, bufsz - pos, "\t%25s\t\t: %u\n",
+			      il_get_ctrl_string(cnt), il->tx_stats.ctrl[cnt]);
+	}
+	pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
+		      il->tx_stats.data_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
+		      il->tx_stats.data_bytes);
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t
+il_dbgfs_clear_traffic_stats_write(struct file *file,
+				   const char __user *user_buf, size_t count,
+				   loff_t *ppos)
+{
+	struct il_priv *il = file->private_data;
+	u32 clear_flag;
+	char buf[8];
+	int buf_size;
+
+	memset(buf, 0, sizeof(buf));
+	buf_size = min(count, sizeof(buf) - 1);
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+	if (sscanf(buf, "%x", &clear_flag) != 1)
+		return -EFAULT;
+	il_clear_traffic_stats(il);
+
+	return count;
+}
+
+static ssize_t
+il_dbgfs_rx_stats_read(struct file *file, char __user *user_buf, size_t count,
+		       loff_t *ppos)
+{
+
+	struct il_priv *il = file->private_data;
+	char *buf;
+	int pos = 0;
+	int cnt;
+	ssize_t ret;
+	const size_t bufsz =
+	    100 + sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
+	buf = kzalloc(bufsz, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
+	for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
+		pos +=
+		    scnprintf(buf + pos, bufsz - pos, "\t%25s\t\t: %u\n",
+			      il_get_mgmt_string(cnt), il->rx_stats.mgmt[cnt]);
+	}
+	pos += scnprintf(buf + pos, bufsz - pos, "Control:\n");
+	for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
+		pos +=
+		    scnprintf(buf + pos, bufsz - pos, "\t%25s\t\t: %u\n",
+			      il_get_ctrl_string(cnt), il->rx_stats.ctrl[cnt]);
+	}
+	pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
+		      il->rx_stats.data_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
+		      il->rx_stats.data_bytes);
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+	kfree(buf);
+	return ret;
+}
+
+#define BYTE1_MASK 0x000000ff;
+#define BYTE2_MASK 0x0000ffff;
+#define BYTE3_MASK 0x00ffffff;
+static ssize_t
+il_dbgfs_sram_read(struct file *file, char __user *user_buf, size_t count,
+		   loff_t *ppos)
+{
+	u32 val;
+	char *buf;
+	ssize_t ret;
+	int i;
+	int pos = 0;
+	struct il_priv *il = file->private_data;
+	size_t bufsz;
+
+	/* default is to dump the entire data segment */
+	if (!il->dbgfs_sram_offset && !il->dbgfs_sram_len) {
+		il->dbgfs_sram_offset = 0x800000;
+		if (il->ucode_type == UCODE_INIT)
+			il->dbgfs_sram_len = il->ucode_init_data.len;
+		else
+			il->dbgfs_sram_len = il->ucode_data.len;
+	}
+	bufsz = 30 + il->dbgfs_sram_len * sizeof(char) * 10;
+	buf = kmalloc(bufsz, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n",
+		      il->dbgfs_sram_len);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n",
+		      il->dbgfs_sram_offset);
+	for (i = il->dbgfs_sram_len; i > 0; i -= 4) {
+		val =
+		    il_read_targ_mem(il,
+				     il->dbgfs_sram_offset +
+				     il->dbgfs_sram_len - i);
+		if (i < 4) {
+			switch (i) {
+			case 1:
+				val &= BYTE1_MASK;
+				break;
+			case 2:
+				val &= BYTE2_MASK;
+				break;
+			case 3:
+				val &= BYTE3_MASK;
+				break;
+			}
+		}
+		if (!(i % 16))
+			pos += scnprintf(buf + pos, bufsz - pos, "\n");
+		pos += scnprintf(buf + pos, bufsz - pos, "0x%08x ", val);
+	}
+	pos += scnprintf(buf + pos, bufsz - pos, "\n");
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t
+il_dbgfs_sram_write(struct file *file, const char __user *user_buf,
+		    size_t count, loff_t *ppos)
+{
+	struct il_priv *il = file->private_data;
+	char buf[64];
+	int buf_size;
+	u32 offset, len;
+
+	memset(buf, 0, sizeof(buf));
+	buf_size = min(count, sizeof(buf) - 1);
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+
+	if (sscanf(buf, "%x,%x", &offset, &len) == 2) {
+		il->dbgfs_sram_offset = offset;
+		il->dbgfs_sram_len = len;
+	} else {
+		il->dbgfs_sram_offset = 0;
+		il->dbgfs_sram_len = 0;
+	}
+
+	return count;
+}
+
+static ssize_t
+il_dbgfs_stations_read(struct file *file, char __user *user_buf, size_t count,
+		       loff_t *ppos)
+{
+	struct il_priv *il = file->private_data;
+	struct il_station_entry *station;
+	int max_sta = il->hw_params.max_stations;
+	char *buf;
+	int i, j, pos = 0;
+	ssize_t ret;
+	/* Add 30 for initial string */
+	const size_t bufsz = 30 + sizeof(char) * 500 * (il->num_stations);
+
+	buf = kmalloc(bufsz, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "num of stations: %d\n\n",
+		      il->num_stations);
+
+	for (i = 0; i < max_sta; i++) {
+		station = &il->stations[i];
+		if (!station->used)
+			continue;
+		pos +=
+		    scnprintf(buf + pos, bufsz - pos,
+			      "station %d - addr: %pM, flags: %#x\n", i,
+			      station->sta.sta.addr,
+			      station->sta.station_flags_msk);
+		pos +=
+		    scnprintf(buf + pos, bufsz - pos,
+			      "TID\tseq_num\ttxq_id\tframes\ttfds\t");
+		pos +=
+		    scnprintf(buf + pos, bufsz - pos,
+			      "start_idx\tbitmap\t\t\trate_n_flags\n");
+
+		for (j = 0; j < MAX_TID_COUNT; j++) {
+			pos +=
+			    scnprintf(buf + pos, bufsz - pos,
+				      "%d:\t%#x\t%#x\t%u\t%u\t%u\t\t%#.16llx\t%#x",
+				      j, station->tid[j].seq_number,
+				      station->tid[j].agg.txq_id,
+				      station->tid[j].agg.frame_count,
+				      station->tid[j].tfds_in_queue,
+				      station->tid[j].agg.start_idx,
+				      station->tid[j].agg.bitmap,
+				      station->tid[j].agg.rate_n_flags);
+
+			if (station->tid[j].agg.wait_for_ba)
+				pos +=
+				    scnprintf(buf + pos, bufsz - pos,
+					      " - waitforba");
+			pos += scnprintf(buf + pos, bufsz - pos, "\n");
+		}
+
+		pos += scnprintf(buf + pos, bufsz - pos, "\n");
+	}
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t
+il_dbgfs_nvm_read(struct file *file, char __user *user_buf, size_t count,
+		  loff_t *ppos)
+{
+	ssize_t ret;
+	struct il_priv *il = file->private_data;
+	int pos = 0, ofs = 0, buf_size = 0;
+	const u8 *ptr;
+	char *buf;
+	u16 eeprom_ver;
+	size_t eeprom_len = il->cfg->base_params->eeprom_size;
+	buf_size = 4 * eeprom_len + 256;
+
+	if (eeprom_len % 16) {
+		IL_ERR("NVM size is not multiple of 16.\n");
+		return -ENODATA;
+	}
+
+	ptr = il->eeprom;
+	if (!ptr) {
+		IL_ERR("Invalid EEPROM memory\n");
+		return -ENOMEM;
+	}
+
+	/* 4 characters for byte 0xYY */
+	buf = kzalloc(buf_size, GFP_KERNEL);
+	if (!buf) {
+		IL_ERR("Can not allocate Buffer\n");
+		return -ENOMEM;
+	}
+	eeprom_ver = il_eeprom_query16(il, EEPROM_VERSION);
+	pos +=
+	    scnprintf(buf + pos, buf_size - pos, "EEPROM " "version: 0x%x\n",
+		      eeprom_ver);
+	for (ofs = 0; ofs < eeprom_len; ofs += 16) {
+		pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
+		hex_dump_to_buffer(ptr + ofs, 16, 16, 2, buf + pos,
+				   buf_size - pos, 0);
+		pos += strlen(buf + pos);
+		if (buf_size - pos > 0)
+			buf[pos++] = '\n';
+	}
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t
+il_dbgfs_channels_read(struct file *file, char __user *user_buf, size_t count,
+		       loff_t *ppos)
+{
+	struct il_priv *il = file->private_data;
+	struct ieee80211_channel *channels = NULL;
+	const struct ieee80211_supported_band *supp_band = NULL;
+	int pos = 0, i, bufsz = PAGE_SIZE;
+	char *buf;
+	ssize_t ret;
+
+	if (!test_bit(S_GEO_CONFIGURED, &il->status))
+		return -EAGAIN;
+
+	buf = kzalloc(bufsz, GFP_KERNEL);
+	if (!buf) {
+		IL_ERR("Can not allocate Buffer\n");
+		return -ENOMEM;
+	}
+
+	supp_band = il_get_hw_mode(il, IEEE80211_BAND_2GHZ);
+	if (supp_band) {
+		channels = supp_band->channels;
+
+		pos +=
+		    scnprintf(buf + pos, bufsz - pos,
+			      "Displaying %d channels in 2.4GHz band 802.11bg):\n",
+			      supp_band->n_channels);
+
+		for (i = 0; i < supp_band->n_channels; i++)
+			pos +=
+			    scnprintf(buf + pos, bufsz - pos,
+				      "%d: %ddBm: BSS%s%s, %s.\n",
+				      channels[i].hw_value,
+				      channels[i].max_power,
+				      channels[i].
+				      flags & IEEE80211_CHAN_RADAR ?
+				      " (IEEE 802.11h required)" : "",
+				      ((channels[i].
+					flags & IEEE80211_CHAN_NO_IBSS) ||
+				       (channels[i].
+					flags & IEEE80211_CHAN_RADAR)) ? "" :
+				      ", IBSS",
+				      channels[i].
+				      flags & IEEE80211_CHAN_PASSIVE_SCAN ?
+				      "passive only" : "active/passive");
+	}
+	supp_band = il_get_hw_mode(il, IEEE80211_BAND_5GHZ);
+	if (supp_band) {
+		channels = supp_band->channels;
+
+		pos +=
+		    scnprintf(buf + pos, bufsz - pos,
+			      "Displaying %d channels in 5.2GHz band (802.11a)\n",
+			      supp_band->n_channels);
+
+		for (i = 0; i < supp_band->n_channels; i++)
+			pos +=
+			    scnprintf(buf + pos, bufsz - pos,
+				      "%d: %ddBm: BSS%s%s, %s.\n",
+				      channels[i].hw_value,
+				      channels[i].max_power,
+				      channels[i].
+				      flags & IEEE80211_CHAN_RADAR ?
+				      " (IEEE 802.11h required)" : "",
+				      ((channels[i].
+					flags & IEEE80211_CHAN_NO_IBSS) ||
+				       (channels[i].
+					flags & IEEE80211_CHAN_RADAR)) ? "" :
+				      ", IBSS",
+				      channels[i].
+				      flags & IEEE80211_CHAN_PASSIVE_SCAN ?
+				      "passive only" : "active/passive");
+	}
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t
+il_dbgfs_status_read(struct file *file, char __user *user_buf, size_t count,
+		     loff_t *ppos)
+{
+
+	struct il_priv *il = file->private_data;
+	char buf[512];
+	int pos = 0;
+	const size_t bufsz = sizeof(buf);
+
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "S_HCMD_ACTIVE:\t %d\n",
+		      test_bit(S_HCMD_ACTIVE, &il->status));
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "S_INT_ENABLED:\t %d\n",
+		      test_bit(S_INT_ENABLED, &il->status));
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "S_RF_KILL_HW:\t %d\n",
+		      test_bit(S_RF_KILL_HW, &il->status));
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "S_CT_KILL:\t\t %d\n",
+		      test_bit(S_CT_KILL, &il->status));
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "S_INIT:\t\t %d\n",
+		      test_bit(S_INIT, &il->status));
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "S_ALIVE:\t\t %d\n",
+		      test_bit(S_ALIVE, &il->status));
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "S_READY:\t\t %d\n",
+		      test_bit(S_READY, &il->status));
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "S_TEMPERATURE:\t %d\n",
+		      test_bit(S_TEMPERATURE, &il->status));
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "S_GEO_CONFIGURED:\t %d\n",
+		      test_bit(S_GEO_CONFIGURED, &il->status));
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "S_EXIT_PENDING:\t %d\n",
+		      test_bit(S_EXIT_PENDING, &il->status));
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "S_STATS:\t %d\n",
+		      test_bit(S_STATS, &il->status));
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "S_SCANNING:\t %d\n",
+		      test_bit(S_SCANNING, &il->status));
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "S_SCAN_ABORTING:\t %d\n",
+		      test_bit(S_SCAN_ABORTING, &il->status));
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "S_SCAN_HW:\t\t %d\n",
+		      test_bit(S_SCAN_HW, &il->status));
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "S_POWER_PMI:\t %d\n",
+		      test_bit(S_POWER_PMI, &il->status));
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "S_FW_ERROR:\t %d\n",
+		      test_bit(S_FW_ERROR, &il->status));
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t
+il_dbgfs_interrupt_read(struct file *file, char __user *user_buf, size_t count,
+			loff_t *ppos)
+{
+
+	struct il_priv *il = file->private_data;
+	int pos = 0;
+	int cnt = 0;
+	char *buf;
+	int bufsz = 24 * 64;	/* 24 items * 64 char per item */
+	ssize_t ret;
+
+	buf = kzalloc(bufsz, GFP_KERNEL);
+	if (!buf) {
+		IL_ERR("Can not allocate Buffer\n");
+		return -ENOMEM;
+	}
+
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "Interrupt Statistics Report:\n");
+
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
+		      il->isr_stats.hw);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
+		      il->isr_stats.sw);
+	if (il->isr_stats.sw || il->isr_stats.hw) {
+		pos +=
+		    scnprintf(buf + pos, bufsz - pos,
+			      "\tLast Restarting Code:  0x%X\n",
+			      il->isr_stats.err_code);
+	}
+#ifdef CONFIG_IWLEGACY_DEBUG
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
+		      il->isr_stats.sch);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
+		      il->isr_stats.alive);
+#endif
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "HW RF KILL switch toggled:\t %u\n",
+		      il->isr_stats.rfkill);
+
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
+		      il->isr_stats.ctkill);
+
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
+		      il->isr_stats.wakeup);
+
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "Rx command responses:\t\t %u\n",
+		      il->isr_stats.rx);
+	for (cnt = 0; cnt < IL_CN_MAX; cnt++) {
+		if (il->isr_stats.handlers[cnt] > 0)
+			pos +=
+			    scnprintf(buf + pos, bufsz - pos,
+				      "\tRx handler[%36s]:\t\t %u\n",
+				      il_get_cmd_string(cnt),
+				      il->isr_stats.handlers[cnt]);
+	}
+
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
+		      il->isr_stats.tx);
+
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
+		      il->isr_stats.unhandled);
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t
+il_dbgfs_interrupt_write(struct file *file, const char __user *user_buf,
+			 size_t count, loff_t *ppos)
+{
+	struct il_priv *il = file->private_data;
+	char buf[8];
+	int buf_size;
+	u32 reset_flag;
+
+	memset(buf, 0, sizeof(buf));
+	buf_size = min(count, sizeof(buf) - 1);
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+	if (sscanf(buf, "%x", &reset_flag) != 1)
+		return -EFAULT;
+	if (reset_flag == 0)
+		il_clear_isr_stats(il);
+
+	return count;
+}
+
+static ssize_t
+il_dbgfs_qos_read(struct file *file, char __user *user_buf, size_t count,
+		  loff_t *ppos)
+{
+	struct il_priv *il = file->private_data;
+	struct il_rxon_context *ctx = &il->ctx;
+	int pos = 0, i;
+	char buf[256];
+	const size_t bufsz = sizeof(buf);
+
+	pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n", ctx->ctxid);
+	for (i = 0; i < AC_NUM; i++) {
+		pos +=
+		    scnprintf(buf + pos, bufsz - pos,
+			      "\tcw_min\tcw_max\taifsn\ttxop\n");
+		pos +=
+		    scnprintf(buf + pos, bufsz - pos,
+			      "AC[%d]\t%u\t%u\t%u\t%u\n", i,
+			      ctx->qos_data.def_qos_parm.ac[i].cw_min,
+			      ctx->qos_data.def_qos_parm.ac[i].cw_max,
+			      ctx->qos_data.def_qos_parm.ac[i].aifsn,
+			      ctx->qos_data.def_qos_parm.ac[i].edca_txop);
+	}
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t
+il_dbgfs_disable_ht40_write(struct file *file, const char __user *user_buf,
+			    size_t count, loff_t *ppos)
+{
+	struct il_priv *il = file->private_data;
+	char buf[8];
+	int buf_size;
+	int ht40;
+
+	memset(buf, 0, sizeof(buf));
+	buf_size = min(count, sizeof(buf) - 1);
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+	if (sscanf(buf, "%d", &ht40) != 1)
+		return -EFAULT;
+	if (!il_is_any_associated(il))
+		il->disable_ht40 = ht40 ? true : false;
+	else {
+		IL_ERR("Sta associated with AP - "
+		       "Change to 40MHz channel support is not allowed\n");
+		return -EINVAL;
+	}
+
+	return count;
+}
+
+static ssize_t
+il_dbgfs_disable_ht40_read(struct file *file, char __user *user_buf,
+			   size_t count, loff_t *ppos)
+{
+	struct il_priv *il = file->private_data;
+	char buf[100];
+	int pos = 0;
+	const size_t bufsz = sizeof(buf);
+
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "11n 40MHz Mode: %s\n",
+		      il->disable_ht40 ? "Disabled" : "Enabled");
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+DEBUGFS_READ_WRITE_FILE_OPS(sram);
+DEBUGFS_READ_FILE_OPS(nvm);
+DEBUGFS_READ_FILE_OPS(stations);
+DEBUGFS_READ_FILE_OPS(channels);
+DEBUGFS_READ_FILE_OPS(status);
+DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
+DEBUGFS_READ_FILE_OPS(qos);
+DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40);
+
+static ssize_t
+il_dbgfs_traffic_log_read(struct file *file, char __user *user_buf,
+			  size_t count, loff_t *ppos)
+{
+	struct il_priv *il = file->private_data;
+	int pos = 0, ofs = 0;
+	int cnt = 0, entry;
+	struct il_tx_queue *txq;
+	struct il_queue *q;
+	struct il_rx_queue *rxq = &il->rxq;
+	char *buf;
+	int bufsz =
+	    ((IL_TRAFFIC_ENTRIES * IL_TRAFFIC_ENTRY_SIZE * 64) * 2) +
+	    (il->cfg->base_params->num_of_queues * 32 * 8) + 400;
+	const u8 *ptr;
+	ssize_t ret;
+
+	if (!il->txq) {
+		IL_ERR("txq not ready\n");
+		return -EAGAIN;
+	}
+	buf = kzalloc(bufsz, GFP_KERNEL);
+	if (!buf) {
+		IL_ERR("Can not allocate buffer\n");
+		return -ENOMEM;
+	}
+	pos += scnprintf(buf + pos, bufsz - pos, "Tx Queue\n");
+	for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
+		txq = &il->txq[cnt];
+		q = &txq->q;
+		pos +=
+		    scnprintf(buf + pos, bufsz - pos,
+			      "q[%d]: read_ptr: %u, write_ptr: %u\n", cnt,
+			      q->read_ptr, q->write_ptr);
+	}
+	if (il->tx_traffic && (il_debug_level & IL_DL_TX)) {
+		ptr = il->tx_traffic;
+		pos +=
+		    scnprintf(buf + pos, bufsz - pos, "Tx Traffic idx: %u\n",
+			      il->tx_traffic_idx);
+		for (cnt = 0, ofs = 0; cnt < IL_TRAFFIC_ENTRIES; cnt++) {
+			for (entry = 0; entry < IL_TRAFFIC_ENTRY_SIZE / 16;
+			     entry++, ofs += 16) {
+				pos +=
+				    scnprintf(buf + pos, bufsz - pos, "0x%.4x ",
+					      ofs);
+				hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
+						   buf + pos, bufsz - pos, 0);
+				pos += strlen(buf + pos);
+				if (bufsz - pos > 0)
+					buf[pos++] = '\n';
+			}
+		}
+	}
+
+	pos += scnprintf(buf + pos, bufsz - pos, "Rx Queue\n");
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "read: %u, write: %u\n",
+		      rxq->read, rxq->write);
+
+	if (il->rx_traffic && (il_debug_level & IL_DL_RX)) {
+		ptr = il->rx_traffic;
+		pos +=
+		    scnprintf(buf + pos, bufsz - pos, "Rx Traffic idx: %u\n",
+			      il->rx_traffic_idx);
+		for (cnt = 0, ofs = 0; cnt < IL_TRAFFIC_ENTRIES; cnt++) {
+			for (entry = 0; entry < IL_TRAFFIC_ENTRY_SIZE / 16;
+			     entry++, ofs += 16) {
+				pos +=
+				    scnprintf(buf + pos, bufsz - pos, "0x%.4x ",
+					      ofs);
+				hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
+						   buf + pos, bufsz - pos, 0);
+				pos += strlen(buf + pos);
+				if (bufsz - pos > 0)
+					buf[pos++] = '\n';
+			}
+		}
+	}
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t
+il_dbgfs_traffic_log_write(struct file *file, const char __user *user_buf,
+			   size_t count, loff_t *ppos)
+{
+	struct il_priv *il = file->private_data;
+	char buf[8];
+	int buf_size;
+	int traffic_log;
+
+	memset(buf, 0, sizeof(buf));
+	buf_size = min(count, sizeof(buf) - 1);
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+	if (sscanf(buf, "%d", &traffic_log) != 1)
+		return -EFAULT;
+	if (traffic_log == 0)
+		il_reset_traffic_log(il);
+
+	return count;
+}
+
+static ssize_t
+il_dbgfs_tx_queue_read(struct file *file, char __user *user_buf, size_t count,
+		       loff_t *ppos)
+{
+
+	struct il_priv *il = file->private_data;
+	struct il_tx_queue *txq;
+	struct il_queue *q;
+	char *buf;
+	int pos = 0;
+	int cnt;
+	int ret;
+	const size_t bufsz =
+	    sizeof(char) * 64 * il->cfg->base_params->num_of_queues;
+
+	if (!il->txq) {
+		IL_ERR("txq not ready\n");
+		return -EAGAIN;
+	}
+	buf = kzalloc(bufsz, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
+		txq = &il->txq[cnt];
+		q = &txq->q;
+		pos +=
+		    scnprintf(buf + pos, bufsz - pos,
+			      "hwq %.2d: read=%u write=%u stop=%d"
+			      " swq_id=%#.2x (ac %d/hwq %d)\n", cnt,
+			      q->read_ptr, q->write_ptr,
+			      !!test_bit(cnt, il->queue_stopped),
+			      txq->swq_id, txq->swq_id & 3,
+			      (txq->swq_id >> 2) & 0x1f);
+		if (cnt >= 4)
+			continue;
+		/* for the ACs, display the stop count too */
+		pos +=
+		    scnprintf(buf + pos, bufsz - pos,
+			      "        stop-count: %d\n",
+			      atomic_read(&il->queue_stop_count[cnt]));
+	}
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t
+il_dbgfs_rx_queue_read(struct file *file, char __user *user_buf, size_t count,
+		       loff_t *ppos)
+{
+
+	struct il_priv *il = file->private_data;
+	struct il_rx_queue *rxq = &il->rxq;
+	char buf[256];
+	int pos = 0;
+	const size_t bufsz = sizeof(buf);
+
+	pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n", rxq->read);
+	pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n", rxq->write);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
+		      rxq->free_count);
+	if (rxq->rb_stts) {
+		pos +=
+		    scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
+			      le16_to_cpu(rxq->rb_stts->
+					  closed_rb_num) & 0x0FFF);
+	} else {
+		pos +=
+		    scnprintf(buf + pos, bufsz - pos,
+			      "closed_rb_num: Not Allocated\n");
+	}
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t
+il_dbgfs_ucode_rx_stats_read(struct file *file, char __user *user_buf,
+			     size_t count, loff_t *ppos)
+{
+	struct il_priv *il = file->private_data;
+	return il->cfg->ops->lib->debugfs_ops.rx_stats_read(file, user_buf,
+							    count, ppos);
+}
+
+static ssize_t
+il_dbgfs_ucode_tx_stats_read(struct file *file, char __user *user_buf,
+			     size_t count, loff_t *ppos)
+{
+	struct il_priv *il = file->private_data;
+	return il->cfg->ops->lib->debugfs_ops.tx_stats_read(file, user_buf,
+							    count, ppos);
+}
+
+static ssize_t
+il_dbgfs_ucode_general_stats_read(struct file *file, char __user *user_buf,
+				  size_t count, loff_t *ppos)
+{
+	struct il_priv *il = file->private_data;
+	return il->cfg->ops->lib->debugfs_ops.general_stats_read(file, user_buf,
+								 count, ppos);
+}
+
+static ssize_t
+il_dbgfs_sensitivity_read(struct file *file, char __user *user_buf,
+			  size_t count, loff_t *ppos)
+{
+
+	struct il_priv *il = file->private_data;
+	int pos = 0;
+	int cnt = 0;
+	char *buf;
+	int bufsz = sizeof(struct il_sensitivity_data) * 4 + 100;
+	ssize_t ret;
+	struct il_sensitivity_data *data;
+
+	data = &il->sensitivity_data;
+	buf = kzalloc(bufsz, GFP_KERNEL);
+	if (!buf) {
+		IL_ERR("Can not allocate Buffer\n");
+		return -ENOMEM;
+	}
+
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm:\t\t\t %u\n",
+		      data->auto_corr_ofdm);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_mrc:\t\t %u\n",
+		      data->auto_corr_ofdm_mrc);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_x1:\t\t %u\n",
+		      data->auto_corr_ofdm_x1);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_mrc_x1:\t\t %u\n",
+		      data->auto_corr_ofdm_mrc_x1);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "auto_corr_cck:\t\t\t %u\n",
+		      data->auto_corr_cck);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "auto_corr_cck_mrc:\t\t %u\n",
+		      data->auto_corr_cck_mrc);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "last_bad_plcp_cnt_ofdm:\t\t %u\n",
+		      data->last_bad_plcp_cnt_ofdm);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_ofdm:\t\t %u\n",
+		      data->last_fa_cnt_ofdm);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "last_bad_plcp_cnt_cck:\t\t %u\n",
+		      data->last_bad_plcp_cnt_cck);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_cck:\t\t %u\n",
+		      data->last_fa_cnt_cck);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "nrg_curr_state:\t\t\t %u\n",
+		      data->nrg_curr_state);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "nrg_prev_state:\t\t\t %u\n",
+		      data->nrg_prev_state);
+	pos += scnprintf(buf + pos, bufsz - pos, "nrg_value:\t\t\t");
+	for (cnt = 0; cnt < 10; cnt++) {
+		pos +=
+		    scnprintf(buf + pos, bufsz - pos, " %u",
+			      data->nrg_value[cnt]);
+	}
+	pos += scnprintf(buf + pos, bufsz - pos, "\n");
+	pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_rssi:\t\t");
+	for (cnt = 0; cnt < NRG_NUM_PREV_STAT_L; cnt++) {
+		pos +=
+		    scnprintf(buf + pos, bufsz - pos, " %u",
+			      data->nrg_silence_rssi[cnt]);
+	}
+	pos += scnprintf(buf + pos, bufsz - pos, "\n");
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "nrg_silence_ref:\t\t %u\n",
+		      data->nrg_silence_ref);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "nrg_energy_idx:\t\t\t %u\n",
+		      data->nrg_energy_idx);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "nrg_silence_idx:\t\t %u\n",
+		      data->nrg_silence_idx);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "nrg_th_cck:\t\t\t %u\n",
+		      data->nrg_th_cck);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "nrg_auto_corr_silence_diff:\t %u\n",
+		      data->nrg_auto_corr_silence_diff);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "num_in_cck_no_fa:\t\t %u\n",
+		      data->num_in_cck_no_fa);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "nrg_th_ofdm:\t\t\t %u\n",
+		      data->nrg_th_ofdm);
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t
+il_dbgfs_chain_noise_read(struct file *file, char __user *user_buf,
+			  size_t count, loff_t *ppos)
+{
+
+	struct il_priv *il = file->private_data;
+	int pos = 0;
+	int cnt = 0;
+	char *buf;
+	int bufsz = sizeof(struct il_chain_noise_data) * 4 + 100;
+	ssize_t ret;
+	struct il_chain_noise_data *data;
+
+	data = &il->chain_noise_data;
+	buf = kzalloc(bufsz, GFP_KERNEL);
+	if (!buf) {
+		IL_ERR("Can not allocate Buffer\n");
+		return -ENOMEM;
+	}
+
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "active_chains:\t\t\t %u\n",
+		      data->active_chains);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "chain_noise_a:\t\t\t %u\n",
+		      data->chain_noise_a);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "chain_noise_b:\t\t\t %u\n",
+		      data->chain_noise_b);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "chain_noise_c:\t\t\t %u\n",
+		      data->chain_noise_c);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "chain_signal_a:\t\t\t %u\n",
+		      data->chain_signal_a);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "chain_signal_b:\t\t\t %u\n",
+		      data->chain_signal_b);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "chain_signal_c:\t\t\t %u\n",
+		      data->chain_signal_c);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "beacon_count:\t\t\t %u\n",
+		      data->beacon_count);
+
+	pos += scnprintf(buf + pos, bufsz - pos, "disconn_array:\t\t\t");
+	for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
+		pos +=
+		    scnprintf(buf + pos, bufsz - pos, " %u",
+			      data->disconn_array[cnt]);
+	}
+	pos += scnprintf(buf + pos, bufsz - pos, "\n");
+	pos += scnprintf(buf + pos, bufsz - pos, "delta_gain_code:\t\t");
+	for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
+		pos +=
+		    scnprintf(buf + pos, bufsz - pos, " %u",
+			      data->delta_gain_code[cnt]);
+	}
+	pos += scnprintf(buf + pos, bufsz - pos, "\n");
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "radio_write:\t\t\t %u\n",
+		      data->radio_write);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "state:\t\t\t\t %u\n",
+		      data->state);
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t
+il_dbgfs_power_save_status_read(struct file *file, char __user *user_buf,
+				size_t count, loff_t *ppos)
+{
+	struct il_priv *il = file->private_data;
+	char buf[60];
+	int pos = 0;
+	const size_t bufsz = sizeof(buf);
+	u32 pwrsave_status;
+
+	pwrsave_status =
+	    _il_rd(il, CSR_GP_CNTRL) & CSR_GP_REG_POWER_SAVE_STATUS_MSK;
+
+	pos += scnprintf(buf + pos, bufsz - pos, "Power Save Status: ");
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "%s\n",
+		      (pwrsave_status == CSR_GP_REG_NO_POWER_SAVE) ? "none" :
+		      (pwrsave_status == CSR_GP_REG_MAC_POWER_SAVE) ? "MAC" :
+		      (pwrsave_status == CSR_GP_REG_PHY_POWER_SAVE) ? "PHY" :
+		      "error");
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t
+il_dbgfs_clear_ucode_stats_write(struct file *file,
+				 const char __user *user_buf, size_t count,
+				 loff_t *ppos)
+{
+	struct il_priv *il = file->private_data;
+	char buf[8];
+	int buf_size;
+	int clear;
+
+	memset(buf, 0, sizeof(buf));
+	buf_size = min(count, sizeof(buf) - 1);
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+	if (sscanf(buf, "%d", &clear) != 1)
+		return -EFAULT;
+
+	/* make request to uCode to retrieve stats information */
+	mutex_lock(&il->mutex);
+	il_send_stats_request(il, CMD_SYNC, true);
+	mutex_unlock(&il->mutex);
+
+	return count;
+}
+
+static ssize_t
+il_dbgfs_rxon_flags_read(struct file *file, char __user *user_buf,
+			 size_t count, loff_t *ppos)
+{
+
+	struct il_priv *il = file->private_data;
+	int len = 0;
+	char buf[20];
+
+	len = sprintf(buf, "0x%04X\n", le32_to_cpu(il->ctx.active.flags));
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t
+il_dbgfs_rxon_filter_flags_read(struct file *file, char __user *user_buf,
+				size_t count, loff_t *ppos)
+{
+
+	struct il_priv *il = file->private_data;
+	int len = 0;
+	char buf[20];
+
+	len =
+	    sprintf(buf, "0x%04X\n", le32_to_cpu(il->ctx.active.filter_flags));
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t
+il_dbgfs_fh_reg_read(struct file *file, char __user *user_buf, size_t count,
+		     loff_t *ppos)
+{
+	struct il_priv *il = file->private_data;
+	char *buf;
+	int pos = 0;
+	ssize_t ret = -EFAULT;
+
+	if (il->cfg->ops->lib->dump_fh) {
+		ret = pos = il->cfg->ops->lib->dump_fh(il, &buf, true);
+		if (buf) {
+			ret =
+			    simple_read_from_buffer(user_buf, count, ppos, buf,
+						    pos);
+			kfree(buf);
+		}
+	}
+
+	return ret;
+}
+
+static ssize_t
+il_dbgfs_missed_beacon_read(struct file *file, char __user *user_buf,
+			    size_t count, loff_t *ppos)
+{
+
+	struct il_priv *il = file->private_data;
+	int pos = 0;
+	char buf[12];
+	const size_t bufsz = sizeof(buf);
+
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "%d\n",
+		      il->missed_beacon_threshold);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t
+il_dbgfs_missed_beacon_write(struct file *file, const char __user *user_buf,
+			     size_t count, loff_t *ppos)
+{
+	struct il_priv *il = file->private_data;
+	char buf[8];
+	int buf_size;
+	int missed;
+
+	memset(buf, 0, sizeof(buf));
+	buf_size = min(count, sizeof(buf) - 1);
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+	if (sscanf(buf, "%d", &missed) != 1)
+		return -EINVAL;
+
+	if (missed < IL_MISSED_BEACON_THRESHOLD_MIN ||
+	    missed > IL_MISSED_BEACON_THRESHOLD_MAX)
+		il->missed_beacon_threshold = IL_MISSED_BEACON_THRESHOLD_DEF;
+	else
+		il->missed_beacon_threshold = missed;
+
+	return count;
+}
+
+static ssize_t
+il_dbgfs_force_reset_read(struct file *file, char __user *user_buf,
+			  size_t count, loff_t *ppos)
+{
+
+	struct il_priv *il = file->private_data;
+	int pos = 0;
+	char buf[300];
+	const size_t bufsz = sizeof(buf);
+	struct il_force_reset *force_reset;
+
+	force_reset = &il->force_reset;
+
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "\tnumber of reset request: %d\n",
+		      force_reset->reset_request_count);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "\tnumber of reset request success: %d\n",
+		      force_reset->reset_success_count);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "\tnumber of reset request reject: %d\n",
+		      force_reset->reset_reject_count);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "\treset duration: %lu\n",
+		      force_reset->reset_duration);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t
+il_dbgfs_force_reset_write(struct file *file, const char __user *user_buf,
+			   size_t count, loff_t *ppos)
+{
+
+	int ret;
+	struct il_priv *il = file->private_data;
+
+	ret = il_force_reset(il, true);
+
+	return ret ? ret : count;
+}
+
+static ssize_t
+il_dbgfs_wd_timeout_write(struct file *file, const char __user *user_buf,
+			  size_t count, loff_t *ppos)
+{
+
+	struct il_priv *il = file->private_data;
+	char buf[8];
+	int buf_size;
+	int timeout;
+
+	memset(buf, 0, sizeof(buf));
+	buf_size = min(count, sizeof(buf) - 1);
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+	if (sscanf(buf, "%d", &timeout) != 1)
+		return -EINVAL;
+	if (timeout < 0 || timeout > IL_MAX_WD_TIMEOUT)
+		timeout = IL_DEF_WD_TIMEOUT;
+
+	il->cfg->base_params->wd_timeout = timeout;
+	il_setup_watchdog(il);
+	return count;
+}
+
+DEBUGFS_READ_FILE_OPS(rx_stats);
+DEBUGFS_READ_FILE_OPS(tx_stats);
+DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
+DEBUGFS_READ_FILE_OPS(rx_queue);
+DEBUGFS_READ_FILE_OPS(tx_queue);
+DEBUGFS_READ_FILE_OPS(ucode_rx_stats);
+DEBUGFS_READ_FILE_OPS(ucode_tx_stats);
+DEBUGFS_READ_FILE_OPS(ucode_general_stats);
+DEBUGFS_READ_FILE_OPS(sensitivity);
+DEBUGFS_READ_FILE_OPS(chain_noise);
+DEBUGFS_READ_FILE_OPS(power_save_status);
+DEBUGFS_WRITE_FILE_OPS(clear_ucode_stats);
+DEBUGFS_WRITE_FILE_OPS(clear_traffic_stats);
+DEBUGFS_READ_FILE_OPS(fh_reg);
+DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon);
+DEBUGFS_READ_WRITE_FILE_OPS(force_reset);
+DEBUGFS_READ_FILE_OPS(rxon_flags);
+DEBUGFS_READ_FILE_OPS(rxon_filter_flags);
+DEBUGFS_WRITE_FILE_OPS(wd_timeout);
+
+/*
+ * Create the debugfs files and directories
+ *
+ */
+int
+il_dbgfs_register(struct il_priv *il, const char *name)
+{
+	struct dentry *phyd = il->hw->wiphy->debugfsdir;
+	struct dentry *dir_drv, *dir_data, *dir_rf, *dir_debug;
+
+	dir_drv = debugfs_create_dir(name, phyd);
+	if (!dir_drv)
+		return -ENOMEM;
+
+	il->debugfs_dir = dir_drv;
+
+	dir_data = debugfs_create_dir("data", dir_drv);
+	if (!dir_data)
+		goto err;
+	dir_rf = debugfs_create_dir("rf", dir_drv);
+	if (!dir_rf)
+		goto err;
+	dir_debug = debugfs_create_dir("debug", dir_drv);
+	if (!dir_debug)
+		goto err;
+
+	DEBUGFS_ADD_FILE(nvm, dir_data, S_IRUSR);
+	DEBUGFS_ADD_FILE(sram, dir_data, S_IWUSR | S_IRUSR);
+	DEBUGFS_ADD_FILE(stations, dir_data, S_IRUSR);
+	DEBUGFS_ADD_FILE(channels, dir_data, S_IRUSR);
+	DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR);
+	DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR);
+	DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR);
+	DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR);
+	DEBUGFS_ADD_FILE(rx_stats, dir_debug, S_IRUSR);
+	DEBUGFS_ADD_FILE(tx_stats, dir_debug, S_IRUSR);
+	DEBUGFS_ADD_FILE(traffic_log, dir_debug, S_IWUSR | S_IRUSR);
+	DEBUGFS_ADD_FILE(rx_queue, dir_debug, S_IRUSR);
+	DEBUGFS_ADD_FILE(tx_queue, dir_debug, S_IRUSR);
+	DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR);
+	DEBUGFS_ADD_FILE(clear_ucode_stats, dir_debug, S_IWUSR);
+	DEBUGFS_ADD_FILE(clear_traffic_stats, dir_debug, S_IWUSR);
+	DEBUGFS_ADD_FILE(fh_reg, dir_debug, S_IRUSR);
+	DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR);
+	DEBUGFS_ADD_FILE(force_reset, dir_debug, S_IWUSR | S_IRUSR);
+	DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR);
+	DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR);
+	DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR);
+
+	if (il->cfg->base_params->sensitivity_calib_by_driver)
+		DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR);
+	if (il->cfg->base_params->chain_noise_calib_by_driver)
+		DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR);
+	DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
+	DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
+	DEBUGFS_ADD_FILE(wd_timeout, dir_debug, S_IWUSR);
+	if (il->cfg->base_params->sensitivity_calib_by_driver)
+		DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf,
+				 &il->disable_sens_cal);
+	if (il->cfg->base_params->chain_noise_calib_by_driver)
+		DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf,
+				 &il->disable_chain_noise_cal);
+	DEBUGFS_ADD_BOOL(disable_tx_power, dir_rf, &il->disable_tx_power_cal);
+	return 0;
+
+err:
+	IL_ERR("Can't create the debugfs directory\n");
+	il_dbgfs_unregister(il);
+	return -ENOMEM;
+}
+EXPORT_SYMBOL(il_dbgfs_register);
+
+/**
+ * Remove the debugfs files and directories
+ *
+ */
+void
+il_dbgfs_unregister(struct il_priv *il)
+{
+	if (!il->debugfs_dir)
+		return;
+
+	debugfs_remove_recursive(il->debugfs_dir);
+	il->debugfs_dir = NULL;
+}
+EXPORT_SYMBOL(il_dbgfs_unregister);
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c b/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c
deleted file mode 100644
index cfabb38..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c
+++ /dev/null
@@ -1,523 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *****************************************************************************/
-
-#include "iwl-3945-debugfs.h"
-
-
-static int iwl3945_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
-{
-	int p = 0;
-
-	p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n",
-		       le32_to_cpu(priv->_3945.statistics.flag));
-	if (le32_to_cpu(priv->_3945.statistics.flag) &
-			UCODE_STATISTICS_CLEAR_MSK)
-		p += scnprintf(buf + p, bufsz - p,
-			       "\tStatistics have been cleared\n");
-	p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
-		       (le32_to_cpu(priv->_3945.statistics.flag) &
-			UCODE_STATISTICS_FREQUENCY_MSK)
-			? "2.4 GHz" : "5.2 GHz");
-	p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
-		       (le32_to_cpu(priv->_3945.statistics.flag) &
-			UCODE_STATISTICS_NARROW_BAND_MSK)
-			? "enabled" : "disabled");
-	return p;
-}
-
-ssize_t iwl3945_ucode_rx_stats_read(struct file *file,
-				    char __user *user_buf,
-				    size_t count, loff_t *ppos)
-{
-	struct iwl_priv *priv = file->private_data;
-	int pos = 0;
-	char *buf;
-	int bufsz = sizeof(struct iwl39_statistics_rx_phy) * 40 +
-		    sizeof(struct iwl39_statistics_rx_non_phy) * 40 + 400;
-	ssize_t ret;
-	struct iwl39_statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm,
-					*max_ofdm;
-	struct iwl39_statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
-	struct iwl39_statistics_rx_non_phy *general, *accum_general;
-	struct iwl39_statistics_rx_non_phy *delta_general, *max_general;
-
-	if (!iwl_legacy_is_alive(priv))
-		return -EAGAIN;
-
-	buf = kzalloc(bufsz, GFP_KERNEL);
-	if (!buf) {
-		IWL_ERR(priv, "Can not allocate Buffer\n");
-		return -ENOMEM;
-	}
-
-	/*
-	 * The statistic information display here is based on
-	 * the last statistics notification from uCode
-	 * might not reflect the current uCode activity
-	 */
-	ofdm = &priv->_3945.statistics.rx.ofdm;
-	cck = &priv->_3945.statistics.rx.cck;
-	general = &priv->_3945.statistics.rx.general;
-	accum_ofdm = &priv->_3945.accum_statistics.rx.ofdm;
-	accum_cck = &priv->_3945.accum_statistics.rx.cck;
-	accum_general = &priv->_3945.accum_statistics.rx.general;
-	delta_ofdm = &priv->_3945.delta_statistics.rx.ofdm;
-	delta_cck = &priv->_3945.delta_statistics.rx.cck;
-	delta_general = &priv->_3945.delta_statistics.rx.general;
-	max_ofdm = &priv->_3945.max_delta.rx.ofdm;
-	max_cck = &priv->_3945.max_delta.rx.cck;
-	max_general = &priv->_3945.max_delta.rx.general;
-
-	pos += iwl3945_statistics_flag(priv, buf, bufsz);
-	pos += scnprintf(buf + pos, bufsz - pos, "%-32s     current"
-			 "acumulative       delta         max\n",
-			 "Statistics_Rx - OFDM:");
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 "  %-30s %10u  %10u  %10u  %10u\n",
-			 "ina_cnt:", le32_to_cpu(ofdm->ina_cnt),
-			 accum_ofdm->ina_cnt,
-			 delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 "  %-30s %10u  %10u  %10u  %10u\n",
-			 "fina_cnt:",
-			 le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
-			 delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 "  %-30s %10u  %10u  %10u  %10u\n", "plcp_err:",
-			 le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
-			 delta_ofdm->plcp_err, max_ofdm->plcp_err);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 "  %-30s %10u  %10u  %10u  %10u\n",  "crc32_err:",
-			 le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
-			 delta_ofdm->crc32_err, max_ofdm->crc32_err);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 "  %-30s %10u  %10u  %10u  %10u\n", "overrun_err:",
-			 le32_to_cpu(ofdm->overrun_err),
-			 accum_ofdm->overrun_err, delta_ofdm->overrun_err,
-			 max_ofdm->overrun_err);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 "  %-30s %10u  %10u  %10u  %10u\n",
-			 "early_overrun_err:",
-			 le32_to_cpu(ofdm->early_overrun_err),
-			 accum_ofdm->early_overrun_err,
-			 delta_ofdm->early_overrun_err,
-			 max_ofdm->early_overrun_err);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 "  %-30s %10u  %10u  %10u  %10u\n",
-			 "crc32_good:", le32_to_cpu(ofdm->crc32_good),
-			 accum_ofdm->crc32_good, delta_ofdm->crc32_good,
-			 max_ofdm->crc32_good);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 "  %-30s %10u  %10u  %10u  %10u\n", "false_alarm_cnt:",
-			 le32_to_cpu(ofdm->false_alarm_cnt),
-			 accum_ofdm->false_alarm_cnt,
-			 delta_ofdm->false_alarm_cnt,
-			 max_ofdm->false_alarm_cnt);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 "  %-30s %10u  %10u  %10u  %10u\n",
-			 "fina_sync_err_cnt:",
-			 le32_to_cpu(ofdm->fina_sync_err_cnt),
-			 accum_ofdm->fina_sync_err_cnt,
-			 delta_ofdm->fina_sync_err_cnt,
-			 max_ofdm->fina_sync_err_cnt);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 "  %-30s %10u  %10u  %10u  %10u\n",
-			 "sfd_timeout:",
-			 le32_to_cpu(ofdm->sfd_timeout),
-			 accum_ofdm->sfd_timeout,
-			 delta_ofdm->sfd_timeout,
-			 max_ofdm->sfd_timeout);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 "  %-30s %10u  %10u  %10u  %10u\n",
-			 "fina_timeout:",
-			 le32_to_cpu(ofdm->fina_timeout),
-			 accum_ofdm->fina_timeout,
-			 delta_ofdm->fina_timeout,
-			 max_ofdm->fina_timeout);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 "  %-30s %10u  %10u  %10u  %10u\n",
-			 "unresponded_rts:",
-			 le32_to_cpu(ofdm->unresponded_rts),
-			 accum_ofdm->unresponded_rts,
-			 delta_ofdm->unresponded_rts,
-			 max_ofdm->unresponded_rts);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 "  %-30s %10u  %10u  %10u  %10u\n",
-			 "rxe_frame_lmt_ovrun:",
-			 le32_to_cpu(ofdm->rxe_frame_limit_overrun),
-			 accum_ofdm->rxe_frame_limit_overrun,
-			 delta_ofdm->rxe_frame_limit_overrun,
-			 max_ofdm->rxe_frame_limit_overrun);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 "  %-30s %10u  %10u  %10u  %10u\n",
-			 "sent_ack_cnt:",
-			 le32_to_cpu(ofdm->sent_ack_cnt),
-			 accum_ofdm->sent_ack_cnt,
-			 delta_ofdm->sent_ack_cnt,
-			 max_ofdm->sent_ack_cnt);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 "  %-30s %10u  %10u  %10u  %10u\n",
-			 "sent_cts_cnt:",
-			 le32_to_cpu(ofdm->sent_cts_cnt),
-			 accum_ofdm->sent_cts_cnt,
-			 delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt);
-
-	pos += scnprintf(buf + pos, bufsz - pos, "%-32s     current"
-			 "acumulative       delta         max\n",
-			 "Statistics_Rx - CCK:");
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 "  %-30s %10u  %10u  %10u  %10u\n",
-			 "ina_cnt:",
-			 le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
-			 delta_cck->ina_cnt, max_cck->ina_cnt);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 "  %-30s %10u  %10u  %10u  %10u\n",
-			 "fina_cnt:",
-			 le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
-			 delta_cck->fina_cnt, max_cck->fina_cnt);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 "  %-30s %10u  %10u  %10u  %10u\n",
-			 "plcp_err:",
-			 le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
-			 delta_cck->plcp_err, max_cck->plcp_err);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 "  %-30s %10u  %10u  %10u  %10u\n",
-			 "crc32_err:",
-			 le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
-			 delta_cck->crc32_err, max_cck->crc32_err);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 "  %-30s %10u  %10u  %10u  %10u\n",
-			 "overrun_err:",
-			 le32_to_cpu(cck->overrun_err),
-			 accum_cck->overrun_err,
-			 delta_cck->overrun_err, max_cck->overrun_err);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 "  %-30s %10u  %10u  %10u  %10u\n",
-			 "early_overrun_err:",
-			 le32_to_cpu(cck->early_overrun_err),
-			 accum_cck->early_overrun_err,
-			 delta_cck->early_overrun_err,
-			 max_cck->early_overrun_err);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 "  %-30s %10u  %10u  %10u  %10u\n",
-			 "crc32_good:",
-			 le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
-			 delta_cck->crc32_good,
-			 max_cck->crc32_good);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 "  %-30s %10u  %10u  %10u  %10u\n",
-			 "false_alarm_cnt:",
-			 le32_to_cpu(cck->false_alarm_cnt),
-			 accum_cck->false_alarm_cnt,
-			 delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 "  %-30s %10u  %10u  %10u  %10u\n",
-			 "fina_sync_err_cnt:",
-			 le32_to_cpu(cck->fina_sync_err_cnt),
-			 accum_cck->fina_sync_err_cnt,
-			 delta_cck->fina_sync_err_cnt,
-			 max_cck->fina_sync_err_cnt);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 "  %-30s %10u  %10u  %10u  %10u\n",
-			 "sfd_timeout:",
-			 le32_to_cpu(cck->sfd_timeout),
-			 accum_cck->sfd_timeout,
-			 delta_cck->sfd_timeout, max_cck->sfd_timeout);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 "  %-30s %10u  %10u  %10u  %10u\n",
-			 "fina_timeout:",
-			 le32_to_cpu(cck->fina_timeout),
-			 accum_cck->fina_timeout,
-			 delta_cck->fina_timeout, max_cck->fina_timeout);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 "  %-30s %10u  %10u  %10u  %10u\n",
-			 "unresponded_rts:",
-			 le32_to_cpu(cck->unresponded_rts),
-			 accum_cck->unresponded_rts,
-			 delta_cck->unresponded_rts,
-			 max_cck->unresponded_rts);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 "  %-30s %10u  %10u  %10u  %10u\n",
-			 "rxe_frame_lmt_ovrun:",
-			 le32_to_cpu(cck->rxe_frame_limit_overrun),
-			 accum_cck->rxe_frame_limit_overrun,
-			 delta_cck->rxe_frame_limit_overrun,
-			 max_cck->rxe_frame_limit_overrun);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 "  %-30s %10u  %10u  %10u  %10u\n",
-			 "sent_ack_cnt:",
-			 le32_to_cpu(cck->sent_ack_cnt),
-			 accum_cck->sent_ack_cnt,
-			 delta_cck->sent_ack_cnt,
-			 max_cck->sent_ack_cnt);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 "  %-30s %10u  %10u  %10u  %10u\n",
-			 "sent_cts_cnt:",
-			 le32_to_cpu(cck->sent_cts_cnt),
-			 accum_cck->sent_cts_cnt,
-			 delta_cck->sent_cts_cnt,
-			 max_cck->sent_cts_cnt);
-
-	pos += scnprintf(buf + pos, bufsz - pos, "%-32s     current"
-			 "acumulative       delta         max\n",
-			 "Statistics_Rx - GENERAL:");
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 "  %-30s %10u  %10u  %10u  %10u\n",
-			 "bogus_cts:",
-			 le32_to_cpu(general->bogus_cts),
-			 accum_general->bogus_cts,
-			 delta_general->bogus_cts, max_general->bogus_cts);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 "  %-30s %10u  %10u  %10u  %10u\n",
-			 "bogus_ack:",
-			 le32_to_cpu(general->bogus_ack),
-			 accum_general->bogus_ack,
-			 delta_general->bogus_ack, max_general->bogus_ack);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 "  %-30s %10u  %10u  %10u  %10u\n",
-			 "non_bssid_frames:",
-			 le32_to_cpu(general->non_bssid_frames),
-			 accum_general->non_bssid_frames,
-			 delta_general->non_bssid_frames,
-			 max_general->non_bssid_frames);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 "  %-30s %10u  %10u  %10u  %10u\n",
-			 "filtered_frames:",
-			 le32_to_cpu(general->filtered_frames),
-			 accum_general->filtered_frames,
-			 delta_general->filtered_frames,
-			 max_general->filtered_frames);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 "  %-30s %10u  %10u  %10u  %10u\n",
-			 "non_channel_beacons:",
-			 le32_to_cpu(general->non_channel_beacons),
-			 accum_general->non_channel_beacons,
-			 delta_general->non_channel_beacons,
-			 max_general->non_channel_beacons);
-
-	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-	kfree(buf);
-	return ret;
-}
-
-ssize_t iwl3945_ucode_tx_stats_read(struct file *file,
-				    char __user *user_buf,
-				    size_t count, loff_t *ppos)
-{
-	struct iwl_priv *priv = file->private_data;
-	int pos = 0;
-	char *buf;
-	int bufsz = (sizeof(struct iwl39_statistics_tx) * 48) + 250;
-	ssize_t ret;
-	struct iwl39_statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
-
-	if (!iwl_legacy_is_alive(priv))
-		return -EAGAIN;
-
-	buf = kzalloc(bufsz, GFP_KERNEL);
-	if (!buf) {
-		IWL_ERR(priv, "Can not allocate Buffer\n");
-		return -ENOMEM;
-	}
-
-	/*
-	 * The statistic information display here is based on
-	 * the last statistics notification from uCode
-	 * might not reflect the current uCode activity
-	 */
-	tx = &priv->_3945.statistics.tx;
-	accum_tx = &priv->_3945.accum_statistics.tx;
-	delta_tx = &priv->_3945.delta_statistics.tx;
-	max_tx = &priv->_3945.max_delta.tx;
-	pos += iwl3945_statistics_flag(priv, buf, bufsz);
-	pos += scnprintf(buf + pos, bufsz - pos, "%-32s     current"
-			 "acumulative       delta         max\n",
-			 "Statistics_Tx:");
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 "  %-30s %10u  %10u  %10u  %10u\n",
-			 "preamble:",
-			 le32_to_cpu(tx->preamble_cnt),
-			 accum_tx->preamble_cnt,
-			 delta_tx->preamble_cnt, max_tx->preamble_cnt);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 "  %-30s %10u  %10u  %10u  %10u\n",
-			 "rx_detected_cnt:",
-			 le32_to_cpu(tx->rx_detected_cnt),
-			 accum_tx->rx_detected_cnt,
-			 delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 "  %-30s %10u  %10u  %10u  %10u\n",
-			 "bt_prio_defer_cnt:",
-			 le32_to_cpu(tx->bt_prio_defer_cnt),
-			 accum_tx->bt_prio_defer_cnt,
-			 delta_tx->bt_prio_defer_cnt,
-			 max_tx->bt_prio_defer_cnt);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 "  %-30s %10u  %10u  %10u  %10u\n",
-			 "bt_prio_kill_cnt:",
-			 le32_to_cpu(tx->bt_prio_kill_cnt),
-			 accum_tx->bt_prio_kill_cnt,
-			 delta_tx->bt_prio_kill_cnt,
-			 max_tx->bt_prio_kill_cnt);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 "  %-30s %10u  %10u  %10u  %10u\n",
-			 "few_bytes_cnt:",
-			 le32_to_cpu(tx->few_bytes_cnt),
-			 accum_tx->few_bytes_cnt,
-			 delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 "  %-30s %10u  %10u  %10u  %10u\n",
-			 "cts_timeout:",
-			 le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
-			 delta_tx->cts_timeout, max_tx->cts_timeout);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 "  %-30s %10u  %10u  %10u  %10u\n",
-			 "ack_timeout:",
-			 le32_to_cpu(tx->ack_timeout),
-			 accum_tx->ack_timeout,
-			 delta_tx->ack_timeout, max_tx->ack_timeout);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 "  %-30s %10u  %10u  %10u  %10u\n",
-			 "expected_ack_cnt:",
-			 le32_to_cpu(tx->expected_ack_cnt),
-			 accum_tx->expected_ack_cnt,
-			 delta_tx->expected_ack_cnt,
-			 max_tx->expected_ack_cnt);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 "  %-30s %10u  %10u  %10u  %10u\n",
-			 "actual_ack_cnt:",
-			 le32_to_cpu(tx->actual_ack_cnt),
-			 accum_tx->actual_ack_cnt,
-			 delta_tx->actual_ack_cnt,
-			 max_tx->actual_ack_cnt);
-
-	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-	kfree(buf);
-	return ret;
-}
-
-ssize_t iwl3945_ucode_general_stats_read(struct file *file,
-					 char __user *user_buf,
-					 size_t count, loff_t *ppos)
-{
-	struct iwl_priv *priv = file->private_data;
-	int pos = 0;
-	char *buf;
-	int bufsz = sizeof(struct iwl39_statistics_general) * 10 + 300;
-	ssize_t ret;
-	struct iwl39_statistics_general *general, *accum_general;
-	struct iwl39_statistics_general *delta_general, *max_general;
-	struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
-	struct iwl39_statistics_div *div, *accum_div, *delta_div, *max_div;
-
-	if (!iwl_legacy_is_alive(priv))
-		return -EAGAIN;
-
-	buf = kzalloc(bufsz, GFP_KERNEL);
-	if (!buf) {
-		IWL_ERR(priv, "Can not allocate Buffer\n");
-		return -ENOMEM;
-	}
-
-	/*
-	 * The statistic information display here is based on
-	 * the last statistics notification from uCode
-	 * might not reflect the current uCode activity
-	 */
-	general = &priv->_3945.statistics.general;
-	dbg = &priv->_3945.statistics.general.dbg;
-	div = &priv->_3945.statistics.general.div;
-	accum_general = &priv->_3945.accum_statistics.general;
-	delta_general = &priv->_3945.delta_statistics.general;
-	max_general = &priv->_3945.max_delta.general;
-	accum_dbg = &priv->_3945.accum_statistics.general.dbg;
-	delta_dbg = &priv->_3945.delta_statistics.general.dbg;
-	max_dbg = &priv->_3945.max_delta.general.dbg;
-	accum_div = &priv->_3945.accum_statistics.general.div;
-	delta_div = &priv->_3945.delta_statistics.general.div;
-	max_div = &priv->_3945.max_delta.general.div;
-	pos += iwl3945_statistics_flag(priv, buf, bufsz);
-	pos += scnprintf(buf + pos, bufsz - pos, "%-32s     current"
-			 "acumulative       delta         max\n",
-			 "Statistics_General:");
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 "  %-30s %10u  %10u  %10u  %10u\n",
-			 "burst_check:",
-			 le32_to_cpu(dbg->burst_check),
-			 accum_dbg->burst_check,
-			 delta_dbg->burst_check, max_dbg->burst_check);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 "  %-30s %10u  %10u  %10u  %10u\n",
-			 "burst_count:",
-			 le32_to_cpu(dbg->burst_count),
-			 accum_dbg->burst_count,
-			 delta_dbg->burst_count, max_dbg->burst_count);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 "  %-30s %10u  %10u  %10u  %10u\n",
-			 "sleep_time:",
-			 le32_to_cpu(general->sleep_time),
-			 accum_general->sleep_time,
-			 delta_general->sleep_time, max_general->sleep_time);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 "  %-30s %10u  %10u  %10u  %10u\n",
-			 "slots_out:",
-			 le32_to_cpu(general->slots_out),
-			 accum_general->slots_out,
-			 delta_general->slots_out, max_general->slots_out);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 "  %-30s %10u  %10u  %10u  %10u\n",
-			 "slots_idle:",
-			 le32_to_cpu(general->slots_idle),
-			 accum_general->slots_idle,
-			 delta_general->slots_idle, max_general->slots_idle);
-	pos += scnprintf(buf + pos, bufsz - pos, "ttl_timestamp:\t\t\t%u\n",
-			 le32_to_cpu(general->ttl_timestamp));
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 "  %-30s %10u  %10u  %10u  %10u\n",
-			 "tx_on_a:",
-			 le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
-			 delta_div->tx_on_a, max_div->tx_on_a);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 "  %-30s %10u  %10u  %10u  %10u\n",
-			 "tx_on_b:",
-			 le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
-			 delta_div->tx_on_b, max_div->tx_on_b);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 "  %-30s %10u  %10u  %10u  %10u\n",
-			 "exec_time:",
-			 le32_to_cpu(div->exec_time), accum_div->exec_time,
-			 delta_div->exec_time, max_div->exec_time);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 "  %-30s %10u  %10u  %10u  %10u\n",
-			 "probe_time:",
-			 le32_to_cpu(div->probe_time), accum_div->probe_time,
-			 delta_div->probe_time, max_div->probe_time);
-	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-	kfree(buf);
-	return ret;
-}
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h b/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h
deleted file mode 100644
index 8fef4b3..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *****************************************************************************/
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-debug.h"
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
-ssize_t iwl3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
-				    size_t count, loff_t *ppos);
-ssize_t iwl3945_ucode_tx_stats_read(struct file *file, char __user *user_buf,
-				    size_t count, loff_t *ppos);
-ssize_t iwl3945_ucode_general_stats_read(struct file *file,
-					 char __user *user_buf, size_t count,
-					 loff_t *ppos);
-#else
-static ssize_t iwl3945_ucode_rx_stats_read(struct file *file,
-					   char __user *user_buf, size_t count,
-					   loff_t *ppos)
-{
-	return 0;
-}
-static ssize_t iwl3945_ucode_tx_stats_read(struct file *file,
-					   char __user *user_buf, size_t count,
-					   loff_t *ppos)
-{
-	return 0;
-}
-static ssize_t iwl3945_ucode_general_stats_read(struct file *file,
-						char __user *user_buf,
-						size_t count, loff_t *ppos)
-{
-	return 0;
-}
-#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-fh.h b/drivers/net/wireless/iwlegacy/iwl-3945-fh.h
deleted file mode 100644
index 836c991..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945-fh.h
+++ /dev/null
@@ -1,187 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license.  When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- *  * Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *  * Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *  * Neither the name Intel Corporation nor the names of its
- *    contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-#ifndef __iwl_3945_fh_h__
-#define __iwl_3945_fh_h__
-
-/************************************/
-/* iwl3945 Flow Handler Definitions */
-/************************************/
-
-/**
- * This I/O area is directly read/writable by driver (e.g. Linux uses writel())
- * Addresses are offsets from device's PCI hardware base address.
- */
-#define FH39_MEM_LOWER_BOUND                   (0x0800)
-#define FH39_MEM_UPPER_BOUND                   (0x1000)
-
-#define FH39_CBCC_TABLE		(FH39_MEM_LOWER_BOUND + 0x140)
-#define FH39_TFDB_TABLE		(FH39_MEM_LOWER_BOUND + 0x180)
-#define FH39_RCSR_TABLE		(FH39_MEM_LOWER_BOUND + 0x400)
-#define FH39_RSSR_TABLE		(FH39_MEM_LOWER_BOUND + 0x4c0)
-#define FH39_TCSR_TABLE		(FH39_MEM_LOWER_BOUND + 0x500)
-#define FH39_TSSR_TABLE		(FH39_MEM_LOWER_BOUND + 0x680)
-
-/* TFDB (Transmit Frame Buffer Descriptor) */
-#define FH39_TFDB(_ch, buf)			(FH39_TFDB_TABLE + \
-						 ((_ch) * 2 + (buf)) * 0x28)
-#define FH39_TFDB_CHNL_BUF_CTRL_REG(_ch)	(FH39_TFDB_TABLE + 0x50 * (_ch))
-
-/* CBCC channel is [0,2] */
-#define FH39_CBCC(_ch)		(FH39_CBCC_TABLE + (_ch) * 0x8)
-#define FH39_CBCC_CTRL(_ch)	(FH39_CBCC(_ch) + 0x00)
-#define FH39_CBCC_BASE(_ch)	(FH39_CBCC(_ch) + 0x04)
-
-/* RCSR channel is [0,2] */
-#define FH39_RCSR(_ch)			(FH39_RCSR_TABLE + (_ch) * 0x40)
-#define FH39_RCSR_CONFIG(_ch)		(FH39_RCSR(_ch) + 0x00)
-#define FH39_RCSR_RBD_BASE(_ch)		(FH39_RCSR(_ch) + 0x04)
-#define FH39_RCSR_WPTR(_ch)		(FH39_RCSR(_ch) + 0x20)
-#define FH39_RCSR_RPTR_ADDR(_ch)	(FH39_RCSR(_ch) + 0x24)
-
-#define FH39_RSCSR_CHNL0_WPTR		(FH39_RCSR_WPTR(0))
-
-/* RSSR */
-#define FH39_RSSR_CTRL			(FH39_RSSR_TABLE + 0x000)
-#define FH39_RSSR_STATUS		(FH39_RSSR_TABLE + 0x004)
-
-/* TCSR */
-#define FH39_TCSR(_ch)			(FH39_TCSR_TABLE + (_ch) * 0x20)
-#define FH39_TCSR_CONFIG(_ch)		(FH39_TCSR(_ch) + 0x00)
-#define FH39_TCSR_CREDIT(_ch)		(FH39_TCSR(_ch) + 0x04)
-#define FH39_TCSR_BUFF_STTS(_ch)	(FH39_TCSR(_ch) + 0x08)
-
-/* TSSR */
-#define FH39_TSSR_CBB_BASE        (FH39_TSSR_TABLE + 0x000)
-#define FH39_TSSR_MSG_CONFIG      (FH39_TSSR_TABLE + 0x008)
-#define FH39_TSSR_TX_STATUS       (FH39_TSSR_TABLE + 0x010)
-
-
-/* DBM */
-
-#define FH39_SRVC_CHNL                            (6)
-
-#define FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE     (20)
-#define FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH      (4)
-
-#define FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN    (0x08000000)
-
-#define FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE        (0x80000000)
-
-#define FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE           (0x20000000)
-
-#define FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128		(0x01000000)
-
-#define FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST		(0x00001000)
-
-#define FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH			(0x00000000)
-
-#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF		(0x00000000)
-#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRIVER		(0x00000001)
-
-#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL	(0x00000000)
-#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL	(0x00000008)
-
-#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD		(0x00200000)
-
-#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT		(0x00000000)
-
-#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE		(0x00000000)
-#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE		(0x80000000)
-
-#define FH39_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID		(0x00004000)
-
-#define FH39_TCSR_CHNL_TX_BUF_STS_REG_BIT_TFDB_WPTR		(0x00000001)
-
-#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON	(0xFF000000)
-#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON	(0x00FF0000)
-
-#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B	(0x00000400)
-
-#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON		(0x00000100)
-#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON		(0x00000080)
-
-#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH	(0x00000020)
-#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH		(0x00000005)
-
-#define FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch)	(BIT(_ch) << 24)
-#define FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch)	(BIT(_ch) << 16)
-
-#define FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_ch) \
-	(FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch) | \
-	 FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch))
-
-#define FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE			(0x01000000)
-
-struct iwl3945_tfd_tb {
-	__le32 addr;
-	__le32 len;
-} __packed;
-
-struct iwl3945_tfd {
-	__le32 control_flags;
-	struct iwl3945_tfd_tb tbs[4];
-	u8 __pad[28];
-} __packed;
-
-
-#endif /* __iwl_3945_fh_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-hw.h b/drivers/net/wireless/iwlegacy/iwl-3945-hw.h
deleted file mode 100644
index 5c3a68d..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945-hw.h
+++ /dev/null
@@ -1,291 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license.  When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- *  * Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *  * Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *  * Neither the name Intel Corporation nor the names of its
- *    contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-/*
- * Please use this file (iwl-3945-hw.h) only for hardware-related definitions.
- * Please use iwl-commands.h for uCode API definitions.
- * Please use iwl-3945.h for driver implementation definitions.
- */
-
-#ifndef __iwl_3945_hw__
-#define __iwl_3945_hw__
-
-#include "iwl-eeprom.h"
-
-/* RSSI to dBm */
-#define IWL39_RSSI_OFFSET	95
-
-/*
- * EEPROM related constants, enums, and structures.
- */
-#define EEPROM_SKU_CAP_OP_MODE_MRC                      (1 << 7)
-
-/*
- * Mapping of a Tx power level, at factory calibration temperature,
- *   to a radio/DSP gain table index.
- * One for each of 5 "sample" power levels in each band.
- * v_det is measured at the factory, using the 3945's built-in power amplifier
- *   (PA) output voltage detector.  This same detector is used during Tx of
- *   long packets in normal operation to provide feedback as to proper output
- *   level.
- * Data copied from EEPROM.
- * DO NOT ALTER THIS STRUCTURE!!!
- */
-struct iwl3945_eeprom_txpower_sample {
-	u8 gain_index;		/* index into power (gain) setup table ... */
-	s8 power;		/* ... for this pwr level for this chnl group */
-	u16 v_det;		/* PA output voltage */
-} __packed;
-
-/*
- * Mappings of Tx power levels -> nominal radio/DSP gain table indexes.
- * One for each channel group (a.k.a. "band") (1 for BG, 4 for A).
- * Tx power setup code interpolates between the 5 "sample" power levels
- *    to determine the nominal setup for a requested power level.
- * Data copied from EEPROM.
- * DO NOT ALTER THIS STRUCTURE!!!
- */
-struct iwl3945_eeprom_txpower_group {
-	struct iwl3945_eeprom_txpower_sample samples[5];  /* 5 power levels */
-	s32 a, b, c, d, e;	/* coefficients for voltage->power
-				 * formula (signed) */
-	s32 Fa, Fb, Fc, Fd, Fe;	/* these modify coeffs based on
-				 * frequency (signed) */
-	s8 saturation_power;	/* highest power possible by h/w in this
-				 * band */
-	u8 group_channel;	/* "representative" channel # in this band */
-	s16 temperature;	/* h/w temperature at factory calib this band
-				 * (signed) */
-} __packed;
-
-/*
- * Temperature-based Tx-power compensation data, not band-specific.
- * These coefficients are use to modify a/b/c/d/e coeffs based on
- *   difference between current temperature and factory calib temperature.
- * Data copied from EEPROM.
- */
-struct iwl3945_eeprom_temperature_corr {
-	u32 Ta;
-	u32 Tb;
-	u32 Tc;
-	u32 Td;
-	u32 Te;
-} __packed;
-
-/*
- * EEPROM map
- */
-struct iwl3945_eeprom {
-	u8 reserved0[16];
-	u16 device_id;	/* abs.ofs: 16 */
-	u8 reserved1[2];
-	u16 pmc;		/* abs.ofs: 20 */
-	u8 reserved2[20];
-	u8 mac_address[6];	/* abs.ofs: 42 */
-	u8 reserved3[58];
-	u16 board_revision;	/* abs.ofs: 106 */
-	u8 reserved4[11];
-	u8 board_pba_number[9];	/* abs.ofs: 119 */
-	u8 reserved5[8];
-	u16 version;		/* abs.ofs: 136 */
-	u8 sku_cap;		/* abs.ofs: 138 */
-	u8 leds_mode;		/* abs.ofs: 139 */
-	u16 oem_mode;
-	u16 wowlan_mode;	/* abs.ofs: 142 */
-	u16 leds_time_interval;	/* abs.ofs: 144 */
-	u8 leds_off_time;	/* abs.ofs: 146 */
-	u8 leds_on_time;	/* abs.ofs: 147 */
-	u8 almgor_m_version;	/* abs.ofs: 148 */
-	u8 antenna_switch_type;	/* abs.ofs: 149 */
-	u8 reserved6[42];
-	u8 sku_id[4];		/* abs.ofs: 192 */
-
-/*
- * Per-channel regulatory data.
- *
- * Each channel that *might* be supported by 3945 has a fixed location
- * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
- * txpower (MSB).
- *
- * Entries immediately below are for 20 MHz channel width.
- *
- * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
- */
-	u16 band_1_count;	/* abs.ofs: 196 */
-	struct iwl_eeprom_channel band_1_channels[14];  /* abs.ofs: 198 */
-
-/*
- * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196,
- * 5.0 GHz channels 7, 8, 11, 12, 16
- * (4915-5080MHz) (none of these is ever supported)
- */
-	u16 band_2_count;	/* abs.ofs: 226 */
-	struct iwl_eeprom_channel band_2_channels[13];  /* abs.ofs: 228 */
-
-/*
- * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
- * (5170-5320MHz)
- */
-	u16 band_3_count;	/* abs.ofs: 254 */
-	struct iwl_eeprom_channel band_3_channels[12];  /* abs.ofs: 256 */
-
-/*
- * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
- * (5500-5700MHz)
- */
-	u16 band_4_count;	/* abs.ofs: 280 */
-	struct iwl_eeprom_channel band_4_channels[11];  /* abs.ofs: 282 */
-
-/*
- * 5.7 GHz channels 145, 149, 153, 157, 161, 165
- * (5725-5825MHz)
- */
-	u16 band_5_count;	/* abs.ofs: 304 */
-	struct iwl_eeprom_channel band_5_channels[6];  /* abs.ofs: 306 */
-
-	u8 reserved9[194];
-
-/*
- * 3945 Txpower calibration data.
- */
-#define IWL_NUM_TX_CALIB_GROUPS 5
-	struct iwl3945_eeprom_txpower_group groups[IWL_NUM_TX_CALIB_GROUPS];
-/* abs.ofs: 512 */
-	struct iwl3945_eeprom_temperature_corr corrections;  /* abs.ofs: 832 */
-	u8 reserved16[172];	/* fill out to full 1024 byte block */
-} __packed;
-
-#define IWL3945_EEPROM_IMG_SIZE 1024
-
-/* End of EEPROM */
-
-#define PCI_CFG_REV_ID_BIT_BASIC_SKU                (0x40)	/* bit 6    */
-#define PCI_CFG_REV_ID_BIT_RTP                      (0x80)	/* bit 7    */
-
-/* 4 DATA + 1 CMD. There are 2 HCCA queues that are not used. */
-#define IWL39_NUM_QUEUES        5
-#define IWL39_CMD_QUEUE_NUM	4
-
-#define IWL_DEFAULT_TX_RETRY  15
-
-/*********************************************/
-
-#define RFD_SIZE                              4
-#define NUM_TFD_CHUNKS                        4
-
-#define RX_QUEUE_SIZE                         256
-#define RX_QUEUE_MASK                         255
-#define RX_QUEUE_SIZE_LOG                     8
-
-#define U32_PAD(n)		((4-(n))&0x3)
-
-#define TFD_CTL_COUNT_SET(n)       (n << 24)
-#define TFD_CTL_COUNT_GET(ctl)     ((ctl >> 24) & 7)
-#define TFD_CTL_PAD_SET(n)         (n << 28)
-#define TFD_CTL_PAD_GET(ctl)       (ctl >> 28)
-
-/* Sizes and addresses for instruction and data memory (SRAM) in
- * 3945's embedded processor.  Driver access is via HBUS_TARG_MEM_* regs. */
-#define IWL39_RTC_INST_LOWER_BOUND		(0x000000)
-#define IWL39_RTC_INST_UPPER_BOUND		(0x014000)
-
-#define IWL39_RTC_DATA_LOWER_BOUND		(0x800000)
-#define IWL39_RTC_DATA_UPPER_BOUND		(0x808000)
-
-#define IWL39_RTC_INST_SIZE (IWL39_RTC_INST_UPPER_BOUND - \
-				IWL39_RTC_INST_LOWER_BOUND)
-#define IWL39_RTC_DATA_SIZE (IWL39_RTC_DATA_UPPER_BOUND - \
-				IWL39_RTC_DATA_LOWER_BOUND)
-
-#define IWL39_MAX_INST_SIZE IWL39_RTC_INST_SIZE
-#define IWL39_MAX_DATA_SIZE IWL39_RTC_DATA_SIZE
-
-/* Size of uCode instruction memory in bootstrap state machine */
-#define IWL39_MAX_BSM_SIZE IWL39_RTC_INST_SIZE
-
-static inline int iwl3945_hw_valid_rtc_data_addr(u32 addr)
-{
-	return (addr >= IWL39_RTC_DATA_LOWER_BOUND) &&
-	       (addr < IWL39_RTC_DATA_UPPER_BOUND);
-}
-
-/* Base physical address of iwl3945_shared is provided to FH_TSSR_CBB_BASE
- * and &iwl3945_shared.rx_read_ptr[0] is provided to FH_RCSR_RPTR_ADDR(0) */
-struct iwl3945_shared {
-	__le32 tx_base_ptr[8];
-} __packed;
-
-static inline u8 iwl3945_hw_get_rate(__le16 rate_n_flags)
-{
-	return le16_to_cpu(rate_n_flags) & 0xFF;
-}
-
-static inline u16 iwl3945_hw_get_rate_n_flags(__le16 rate_n_flags)
-{
-	return le16_to_cpu(rate_n_flags);
-}
-
-static inline __le16 iwl3945_hw_set_rate_n_flags(u8 rate, u16 flags)
-{
-	return cpu_to_le16((u16)rate|flags);
-}
-#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-led.c b/drivers/net/wireless/iwlegacy/iwl-3945-led.c
deleted file mode 100644
index 7a7f0f3..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945-led.c
+++ /dev/null
@@ -1,63 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/delay.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <net/mac80211.h>
-#include <linux/etherdevice.h>
-#include <asm/unaligned.h>
-
-#include "iwl-commands.h"
-#include "iwl-3945.h"
-#include "iwl-core.h"
-#include "iwl-dev.h"
-#include "iwl-3945-led.h"
-
-
-/* Send led command */
-static int iwl3945_send_led_cmd(struct iwl_priv *priv,
-				struct iwl_led_cmd *led_cmd)
-{
-	struct iwl_host_cmd cmd = {
-		.id = REPLY_LEDS_CMD,
-		.len = sizeof(struct iwl_led_cmd),
-		.data = led_cmd,
-		.flags = CMD_ASYNC,
-		.callback = NULL,
-	};
-
-	return iwl_legacy_send_cmd(priv, &cmd);
-}
-
-const struct iwl_led_ops iwl3945_led_ops = {
-	.cmd = iwl3945_send_led_cmd,
-};
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-led.h b/drivers/net/wireless/iwlegacy/iwl-3945-led.h
deleted file mode 100644
index 9671627..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945-led.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#ifndef __iwl_3945_led_h__
-#define __iwl_3945_led_h__
-
-extern const struct iwl_led_ops iwl3945_led_ops;
-
-#endif /* __iwl_3945_led_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-rs.c b/drivers/net/wireless/iwlegacy/iwl-3945-rs.c
deleted file mode 100644
index 8faeaf2..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945-rs.c
+++ /dev/null
@@ -1,996 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/skbuff.h>
-#include <linux/slab.h>
-#include <net/mac80211.h>
-
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/delay.h>
-
-#include <linux/workqueue.h>
-
-#include "iwl-commands.h"
-#include "iwl-3945.h"
-#include "iwl-sta.h"
-
-#define RS_NAME "iwl-3945-rs"
-
-static s32 iwl3945_expected_tpt_g[IWL_RATE_COUNT_3945] = {
-	7, 13, 35, 58, 0, 0, 76, 104, 130, 168, 191, 202
-};
-
-static s32 iwl3945_expected_tpt_g_prot[IWL_RATE_COUNT_3945] = {
-	7, 13, 35, 58, 0, 0, 0, 80, 93, 113, 123, 125
-};
-
-static s32 iwl3945_expected_tpt_a[IWL_RATE_COUNT_3945] = {
-	0, 0, 0, 0, 40, 57, 72, 98, 121, 154, 177, 186
-};
-
-static s32 iwl3945_expected_tpt_b[IWL_RATE_COUNT_3945] = {
-	7, 13, 35, 58, 0, 0, 0, 0, 0, 0, 0, 0
-};
-
-struct iwl3945_tpt_entry {
-	s8 min_rssi;
-	u8 index;
-};
-
-static struct iwl3945_tpt_entry iwl3945_tpt_table_a[] = {
-	{-60, IWL_RATE_54M_INDEX},
-	{-64, IWL_RATE_48M_INDEX},
-	{-72, IWL_RATE_36M_INDEX},
-	{-80, IWL_RATE_24M_INDEX},
-	{-84, IWL_RATE_18M_INDEX},
-	{-85, IWL_RATE_12M_INDEX},
-	{-87, IWL_RATE_9M_INDEX},
-	{-89, IWL_RATE_6M_INDEX}
-};
-
-static struct iwl3945_tpt_entry iwl3945_tpt_table_g[] = {
-	{-60, IWL_RATE_54M_INDEX},
-	{-64, IWL_RATE_48M_INDEX},
-	{-68, IWL_RATE_36M_INDEX},
-	{-80, IWL_RATE_24M_INDEX},
-	{-84, IWL_RATE_18M_INDEX},
-	{-85, IWL_RATE_12M_INDEX},
-	{-86, IWL_RATE_11M_INDEX},
-	{-88, IWL_RATE_5M_INDEX},
-	{-90, IWL_RATE_2M_INDEX},
-	{-92, IWL_RATE_1M_INDEX}
-};
-
-#define IWL_RATE_MAX_WINDOW          62
-#define IWL_RATE_FLUSH		(3*HZ)
-#define IWL_RATE_WIN_FLUSH       (HZ/2)
-#define IWL39_RATE_HIGH_TH          11520
-#define IWL_SUCCESS_UP_TH	   8960
-#define IWL_SUCCESS_DOWN_TH	  10880
-#define IWL_RATE_MIN_FAILURE_TH       6
-#define IWL_RATE_MIN_SUCCESS_TH       8
-#define IWL_RATE_DECREASE_TH       1920
-#define IWL_RATE_RETRY_TH	     15
-
-static u8 iwl3945_get_rate_index_by_rssi(s32 rssi, enum ieee80211_band band)
-{
-	u32 index = 0;
-	u32 table_size = 0;
-	struct iwl3945_tpt_entry *tpt_table = NULL;
-
-	if ((rssi < IWL_MIN_RSSI_VAL) || (rssi > IWL_MAX_RSSI_VAL))
-		rssi = IWL_MIN_RSSI_VAL;
-
-	switch (band) {
-	case IEEE80211_BAND_2GHZ:
-		tpt_table = iwl3945_tpt_table_g;
-		table_size = ARRAY_SIZE(iwl3945_tpt_table_g);
-		break;
-
-	case IEEE80211_BAND_5GHZ:
-		tpt_table = iwl3945_tpt_table_a;
-		table_size = ARRAY_SIZE(iwl3945_tpt_table_a);
-		break;
-
-	default:
-		BUG();
-		break;
-	}
-
-	while ((index < table_size) && (rssi < tpt_table[index].min_rssi))
-		index++;
-
-	index = min(index, (table_size - 1));
-
-	return tpt_table[index].index;
-}
-
-static void iwl3945_clear_window(struct iwl3945_rate_scale_data *window)
-{
-	window->data = 0;
-	window->success_counter = 0;
-	window->success_ratio = -1;
-	window->counter = 0;
-	window->average_tpt = IWL_INVALID_VALUE;
-	window->stamp = 0;
-}
-
-/**
- * iwl3945_rate_scale_flush_windows - flush out the rate scale windows
- *
- * Returns the number of windows that have gathered data but were
- * not flushed.  If there were any that were not flushed, then
- * reschedule the rate flushing routine.
- */
-static int iwl3945_rate_scale_flush_windows(struct iwl3945_rs_sta *rs_sta)
-{
-	int unflushed = 0;
-	int i;
-	unsigned long flags;
-	struct iwl_priv *priv __maybe_unused = rs_sta->priv;
-
-	/*
-	 * For each rate, if we have collected data on that rate
-	 * and it has been more than IWL_RATE_WIN_FLUSH
-	 * since we flushed, clear out the gathered statistics
-	 */
-	for (i = 0; i < IWL_RATE_COUNT_3945; i++) {
-		if (!rs_sta->win[i].counter)
-			continue;
-
-		spin_lock_irqsave(&rs_sta->lock, flags);
-		if (time_after(jiffies, rs_sta->win[i].stamp +
-			       IWL_RATE_WIN_FLUSH)) {
-			IWL_DEBUG_RATE(priv, "flushing %d samples of rate "
-				       "index %d\n",
-				       rs_sta->win[i].counter, i);
-			iwl3945_clear_window(&rs_sta->win[i]);
-		} else
-			unflushed++;
-		spin_unlock_irqrestore(&rs_sta->lock, flags);
-	}
-
-	return unflushed;
-}
-
-#define IWL_RATE_FLUSH_MAX              5000	/* msec */
-#define IWL_RATE_FLUSH_MIN              50	/* msec */
-#define IWL_AVERAGE_PACKETS             1500
-
-static void iwl3945_bg_rate_scale_flush(unsigned long data)
-{
-	struct iwl3945_rs_sta *rs_sta = (void *)data;
-	struct iwl_priv *priv __maybe_unused = rs_sta->priv;
-	int unflushed = 0;
-	unsigned long flags;
-	u32 packet_count, duration, pps;
-
-	IWL_DEBUG_RATE(priv, "enter\n");
-
-	unflushed = iwl3945_rate_scale_flush_windows(rs_sta);
-
-	spin_lock_irqsave(&rs_sta->lock, flags);
-
-	/* Number of packets Rx'd since last time this timer ran */
-	packet_count = (rs_sta->tx_packets - rs_sta->last_tx_packets) + 1;
-
-	rs_sta->last_tx_packets = rs_sta->tx_packets + 1;
-
-	if (unflushed) {
-		duration =
-		    jiffies_to_msecs(jiffies - rs_sta->last_partial_flush);
-
-		IWL_DEBUG_RATE(priv, "Tx'd %d packets in %dms\n",
-			       packet_count, duration);
-
-		/* Determine packets per second */
-		if (duration)
-			pps = (packet_count * 1000) / duration;
-		else
-			pps = 0;
-
-		if (pps) {
-			duration = (IWL_AVERAGE_PACKETS * 1000) / pps;
-			if (duration < IWL_RATE_FLUSH_MIN)
-				duration = IWL_RATE_FLUSH_MIN;
-			else if (duration > IWL_RATE_FLUSH_MAX)
-				duration = IWL_RATE_FLUSH_MAX;
-		} else
-			duration = IWL_RATE_FLUSH_MAX;
-
-		rs_sta->flush_time = msecs_to_jiffies(duration);
-
-		IWL_DEBUG_RATE(priv, "new flush period: %d msec ave %d\n",
-			       duration, packet_count);
-
-		mod_timer(&rs_sta->rate_scale_flush, jiffies +
-			  rs_sta->flush_time);
-
-		rs_sta->last_partial_flush = jiffies;
-	} else {
-		rs_sta->flush_time = IWL_RATE_FLUSH;
-		rs_sta->flush_pending = 0;
-	}
-	/* If there weren't any unflushed entries, we don't schedule the timer
-	 * to run again */
-
-	rs_sta->last_flush = jiffies;
-
-	spin_unlock_irqrestore(&rs_sta->lock, flags);
-
-	IWL_DEBUG_RATE(priv, "leave\n");
-}
-
-/**
- * iwl3945_collect_tx_data - Update the success/failure sliding window
- *
- * We keep a sliding window of the last 64 packets transmitted
- * at this rate.  window->data contains the bitmask of successful
- * packets.
- */
-static void iwl3945_collect_tx_data(struct iwl3945_rs_sta *rs_sta,
-				struct iwl3945_rate_scale_data *window,
-				int success, int retries, int index)
-{
-	unsigned long flags;
-	s32 fail_count;
-	struct iwl_priv *priv __maybe_unused = rs_sta->priv;
-
-	if (!retries) {
-		IWL_DEBUG_RATE(priv, "leave: retries == 0 -- should be at least 1\n");
-		return;
-	}
-
-	spin_lock_irqsave(&rs_sta->lock, flags);
-
-	/*
-	 * Keep track of only the latest 62 tx frame attempts in this rate's
-	 * history window; anything older isn't really relevant any more.
-	 * If we have filled up the sliding window, drop the oldest attempt;
-	 * if the oldest attempt (highest bit in bitmap) shows "success",
-	 * subtract "1" from the success counter (this is the main reason
-	 * we keep these bitmaps!).
-	 * */
-	while (retries > 0) {
-		if (window->counter >= IWL_RATE_MAX_WINDOW) {
-
-			/* remove earliest */
-			window->counter = IWL_RATE_MAX_WINDOW - 1;
-
-			if (window->data & (1ULL << (IWL_RATE_MAX_WINDOW - 1))) {
-				window->data &= ~(1ULL << (IWL_RATE_MAX_WINDOW - 1));
-				window->success_counter--;
-			}
-		}
-
-		/* Increment frames-attempted counter */
-		window->counter++;
-
-		/* Shift bitmap by one frame (throw away oldest history),
-		 * OR in "1", and increment "success" if this
-		 * frame was successful. */
-		window->data <<= 1;
-		if (success > 0) {
-			window->success_counter++;
-			window->data |= 0x1;
-			success--;
-		}
-
-		retries--;
-	}
-
-	/* Calculate current success ratio, avoid divide-by-0! */
-	if (window->counter > 0)
-		window->success_ratio = 128 * (100 * window->success_counter)
-					/ window->counter;
-	else
-		window->success_ratio = IWL_INVALID_VALUE;
-
-	fail_count = window->counter - window->success_counter;
-
-	/* Calculate average throughput, if we have enough history. */
-	if ((fail_count >= IWL_RATE_MIN_FAILURE_TH) ||
-	    (window->success_counter >= IWL_RATE_MIN_SUCCESS_TH))
-		window->average_tpt = ((window->success_ratio *
-				rs_sta->expected_tpt[index] + 64) / 128);
-	else
-		window->average_tpt = IWL_INVALID_VALUE;
-
-	/* Tag this window as having been updated */
-	window->stamp = jiffies;
-
-	spin_unlock_irqrestore(&rs_sta->lock, flags);
-
-}
-
-/*
- * Called after adding a new station to initialize rate scaling
- */
-void iwl3945_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_id)
-{
-	struct ieee80211_hw *hw = priv->hw;
-	struct ieee80211_conf *conf = &priv->hw->conf;
-	struct iwl3945_sta_priv *psta;
-	struct iwl3945_rs_sta *rs_sta;
-	struct ieee80211_supported_band *sband;
-	int i;
-
-	IWL_DEBUG_INFO(priv, "enter\n");
-	if (sta_id == priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id)
-		goto out;
-
-	psta = (struct iwl3945_sta_priv *) sta->drv_priv;
-	rs_sta = &psta->rs_sta;
-	sband = hw->wiphy->bands[conf->channel->band];
-
-	rs_sta->priv = priv;
-
-	rs_sta->start_rate = IWL_RATE_INVALID;
-
-	/* default to just 802.11b */
-	rs_sta->expected_tpt = iwl3945_expected_tpt_b;
-
-	rs_sta->last_partial_flush = jiffies;
-	rs_sta->last_flush = jiffies;
-	rs_sta->flush_time = IWL_RATE_FLUSH;
-	rs_sta->last_tx_packets = 0;
-
-	rs_sta->rate_scale_flush.data = (unsigned long)rs_sta;
-	rs_sta->rate_scale_flush.function = iwl3945_bg_rate_scale_flush;
-
-	for (i = 0; i < IWL_RATE_COUNT_3945; i++)
-		iwl3945_clear_window(&rs_sta->win[i]);
-
-	/* TODO: what is a good starting rate for STA? About middle? Maybe not
-	 * the lowest or the highest rate.. Could consider using RSSI from
-	 * previous packets? Need to have IEEE 802.1X auth succeed immediately
-	 * after assoc.. */
-
-	for (i = sband->n_bitrates - 1; i >= 0; i--) {
-		if (sta->supp_rates[sband->band] & (1 << i)) {
-			rs_sta->last_txrate_idx = i;
-			break;
-		}
-	}
-
-	priv->_3945.sta_supp_rates = sta->supp_rates[sband->band];
-	/* For 5 GHz band it start at IWL_FIRST_OFDM_RATE */
-	if (sband->band == IEEE80211_BAND_5GHZ) {
-		rs_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
-		priv->_3945.sta_supp_rates = priv->_3945.sta_supp_rates <<
-						IWL_FIRST_OFDM_RATE;
-	}
-
-out:
-	priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
-
-	IWL_DEBUG_INFO(priv, "leave\n");
-}
-
-static void *iwl3945_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
-{
-	return hw->priv;
-}
-
-/* rate scale requires free function to be implemented */
-static void iwl3945_rs_free(void *priv)
-{
-	return;
-}
-
-static void *iwl3945_rs_alloc_sta(void *iwl_priv, struct ieee80211_sta *sta, gfp_t gfp)
-{
-	struct iwl3945_rs_sta *rs_sta;
-	struct iwl3945_sta_priv *psta = (void *) sta->drv_priv;
-	struct iwl_priv *priv __maybe_unused = iwl_priv;
-
-	IWL_DEBUG_RATE(priv, "enter\n");
-
-	rs_sta = &psta->rs_sta;
-
-	spin_lock_init(&rs_sta->lock);
-	init_timer(&rs_sta->rate_scale_flush);
-
-	IWL_DEBUG_RATE(priv, "leave\n");
-
-	return rs_sta;
-}
-
-static void iwl3945_rs_free_sta(void *iwl_priv, struct ieee80211_sta *sta,
-			void *priv_sta)
-{
-	struct iwl3945_rs_sta *rs_sta = priv_sta;
-
-	/*
-	 * Be careful not to use any members of iwl3945_rs_sta (like trying
-	 * to use iwl_priv to print out debugging) since it may not be fully
-	 * initialized at this point.
-	 */
-	del_timer_sync(&rs_sta->rate_scale_flush);
-}
-
-
-/**
- * iwl3945_rs_tx_status - Update rate control values based on Tx results
- *
- * NOTE: Uses iwl_priv->retry_rate for the # of retries attempted by
- * the hardware for each rate.
- */
-static void iwl3945_rs_tx_status(void *priv_rate, struct ieee80211_supported_band *sband,
-			 struct ieee80211_sta *sta, void *priv_sta,
-			 struct sk_buff *skb)
-{
-	s8 retries = 0, current_count;
-	int scale_rate_index, first_index, last_index;
-	unsigned long flags;
-	struct iwl_priv *priv = (struct iwl_priv *)priv_rate;
-	struct iwl3945_rs_sta *rs_sta = priv_sta;
-	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-
-	IWL_DEBUG_RATE(priv, "enter\n");
-
-	retries = info->status.rates[0].count;
-	/* Sanity Check for retries */
-	if (retries > IWL_RATE_RETRY_TH)
-		retries = IWL_RATE_RETRY_TH;
-
-	first_index = sband->bitrates[info->status.rates[0].idx].hw_value;
-	if ((first_index < 0) || (first_index >= IWL_RATE_COUNT_3945)) {
-		IWL_DEBUG_RATE(priv, "leave: Rate out of bounds: %d\n", first_index);
-		return;
-	}
-
-	if (!priv_sta) {
-		IWL_DEBUG_RATE(priv, "leave: No STA priv data to update!\n");
-		return;
-	}
-
-	/* Treat uninitialized rate scaling data same as non-existing. */
-	if (!rs_sta->priv) {
-		IWL_DEBUG_RATE(priv, "leave: STA priv data uninitialized!\n");
-		return;
-	}
-
-
-	rs_sta->tx_packets++;
-
-	scale_rate_index = first_index;
-	last_index = first_index;
-
-	/*
-	 * Update the window for each rate.  We determine which rates
-	 * were Tx'd based on the total number of retries vs. the number
-	 * of retries configured for each rate -- currently set to the
-	 * priv value 'retry_rate' vs. rate specific
-	 *
-	 * On exit from this while loop last_index indicates the rate
-	 * at which the frame was finally transmitted (or failed if no
-	 * ACK)
-	 */
-	while (retries > 1) {
-		if ((retries - 1) < priv->retry_rate) {
-			current_count = (retries - 1);
-			last_index = scale_rate_index;
-		} else {
-			current_count = priv->retry_rate;
-			last_index = iwl3945_rs_next_rate(priv,
-							 scale_rate_index);
-		}
-
-		/* Update this rate accounting for as many retries
-		 * as was used for it (per current_count) */
-		iwl3945_collect_tx_data(rs_sta,
-				    &rs_sta->win[scale_rate_index],
-				    0, current_count, scale_rate_index);
-		IWL_DEBUG_RATE(priv, "Update rate %d for %d retries.\n",
-			       scale_rate_index, current_count);
-
-		retries -= current_count;
-
-		scale_rate_index = last_index;
-	}
-
-
-	/* Update the last index window with success/failure based on ACK */
-	IWL_DEBUG_RATE(priv, "Update rate %d with %s.\n",
-		       last_index,
-		       (info->flags & IEEE80211_TX_STAT_ACK) ?
-		       "success" : "failure");
-	iwl3945_collect_tx_data(rs_sta,
-			    &rs_sta->win[last_index],
-			    info->flags & IEEE80211_TX_STAT_ACK, 1, last_index);
-
-	/* We updated the rate scale window -- if its been more than
-	 * flush_time since the last run, schedule the flush
-	 * again */
-	spin_lock_irqsave(&rs_sta->lock, flags);
-
-	if (!rs_sta->flush_pending &&
-	    time_after(jiffies, rs_sta->last_flush +
-		       rs_sta->flush_time)) {
-
-		rs_sta->last_partial_flush = jiffies;
-		rs_sta->flush_pending = 1;
-		mod_timer(&rs_sta->rate_scale_flush,
-			  jiffies + rs_sta->flush_time);
-	}
-
-	spin_unlock_irqrestore(&rs_sta->lock, flags);
-
-	IWL_DEBUG_RATE(priv, "leave\n");
-}
-
-static u16 iwl3945_get_adjacent_rate(struct iwl3945_rs_sta *rs_sta,
-				 u8 index, u16 rate_mask, enum ieee80211_band band)
-{
-	u8 high = IWL_RATE_INVALID;
-	u8 low = IWL_RATE_INVALID;
-	struct iwl_priv *priv __maybe_unused = rs_sta->priv;
-
-	/* 802.11A walks to the next literal adjacent rate in
-	 * the rate table */
-	if (unlikely(band == IEEE80211_BAND_5GHZ)) {
-		int i;
-		u32 mask;
-
-		/* Find the previous rate that is in the rate mask */
-		i = index - 1;
-		for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
-			if (rate_mask & mask) {
-				low = i;
-				break;
-			}
-		}
-
-		/* Find the next rate that is in the rate mask */
-		i = index + 1;
-		for (mask = (1 << i); i < IWL_RATE_COUNT_3945;
-		     i++, mask <<= 1) {
-			if (rate_mask & mask) {
-				high = i;
-				break;
-			}
-		}
-
-		return (high << 8) | low;
-	}
-
-	low = index;
-	while (low != IWL_RATE_INVALID) {
-		if (rs_sta->tgg)
-			low = iwl3945_rates[low].prev_rs_tgg;
-		else
-			low = iwl3945_rates[low].prev_rs;
-		if (low == IWL_RATE_INVALID)
-			break;
-		if (rate_mask & (1 << low))
-			break;
-		IWL_DEBUG_RATE(priv, "Skipping masked lower rate: %d\n", low);
-	}
-
-	high = index;
-	while (high != IWL_RATE_INVALID) {
-		if (rs_sta->tgg)
-			high = iwl3945_rates[high].next_rs_tgg;
-		else
-			high = iwl3945_rates[high].next_rs;
-		if (high == IWL_RATE_INVALID)
-			break;
-		if (rate_mask & (1 << high))
-			break;
-		IWL_DEBUG_RATE(priv, "Skipping masked higher rate: %d\n", high);
-	}
-
-	return (high << 8) | low;
-}
-
-/**
- * iwl3945_rs_get_rate - find the rate for the requested packet
- *
- * Returns the ieee80211_rate structure allocated by the driver.
- *
- * The rate control algorithm has no internal mapping between hw_mode's
- * rate ordering and the rate ordering used by the rate control algorithm.
- *
- * The rate control algorithm uses a single table of rates that goes across
- * the entire A/B/G spectrum vs. being limited to just one particular
- * hw_mode.
- *
- * As such, we can't convert the index obtained below into the hw_mode's
- * rate table and must reference the driver allocated rate table
- *
- */
-static void iwl3945_rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
-			void *priv_sta,	struct ieee80211_tx_rate_control *txrc)
-{
-	struct ieee80211_supported_band *sband = txrc->sband;
-	struct sk_buff *skb = txrc->skb;
-	u8 low = IWL_RATE_INVALID;
-	u8 high = IWL_RATE_INVALID;
-	u16 high_low;
-	int index;
-	struct iwl3945_rs_sta *rs_sta = priv_sta;
-	struct iwl3945_rate_scale_data *window = NULL;
-	int current_tpt = IWL_INVALID_VALUE;
-	int low_tpt = IWL_INVALID_VALUE;
-	int high_tpt = IWL_INVALID_VALUE;
-	u32 fail_count;
-	s8 scale_action = 0;
-	unsigned long flags;
-	u16 rate_mask;
-	s8 max_rate_idx = -1;
-	struct iwl_priv *priv __maybe_unused = (struct iwl_priv *)priv_r;
-	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-
-	IWL_DEBUG_RATE(priv, "enter\n");
-
-	/* Treat uninitialized rate scaling data same as non-existing. */
-	if (rs_sta && !rs_sta->priv) {
-		IWL_DEBUG_RATE(priv, "Rate scaling information not initialized yet.\n");
-		priv_sta = NULL;
-	}
-
-	if (rate_control_send_low(sta, priv_sta, txrc))
-		return;
-
-	rate_mask = sta->supp_rates[sband->band];
-
-	/* get user max rate if set */
-	max_rate_idx = txrc->max_rate_idx;
-	if ((sband->band == IEEE80211_BAND_5GHZ) && (max_rate_idx != -1))
-		max_rate_idx += IWL_FIRST_OFDM_RATE;
-	if ((max_rate_idx < 0) || (max_rate_idx >= IWL_RATE_COUNT))
-		max_rate_idx = -1;
-
-	index = min(rs_sta->last_txrate_idx & 0xffff, IWL_RATE_COUNT_3945 - 1);
-
-	if (sband->band == IEEE80211_BAND_5GHZ)
-		rate_mask = rate_mask << IWL_FIRST_OFDM_RATE;
-
-	spin_lock_irqsave(&rs_sta->lock, flags);
-
-	/* for recent assoc, choose best rate regarding
-	 * to rssi value
-	 */
-	if (rs_sta->start_rate != IWL_RATE_INVALID) {
-		if (rs_sta->start_rate < index &&
-		   (rate_mask & (1 << rs_sta->start_rate)))
-			index = rs_sta->start_rate;
-		rs_sta->start_rate = IWL_RATE_INVALID;
-	}
-
-	/* force user max rate if set by user */
-	if ((max_rate_idx != -1) && (max_rate_idx < index)) {
-		if (rate_mask & (1 << max_rate_idx))
-			index = max_rate_idx;
-	}
-
-	window = &(rs_sta->win[index]);
-
-	fail_count = window->counter - window->success_counter;
-
-	if (((fail_count < IWL_RATE_MIN_FAILURE_TH) &&
-	     (window->success_counter < IWL_RATE_MIN_SUCCESS_TH))) {
-		spin_unlock_irqrestore(&rs_sta->lock, flags);
-
-		IWL_DEBUG_RATE(priv, "Invalid average_tpt on rate %d: "
-			       "counter: %d, success_counter: %d, "
-			       "expected_tpt is %sNULL\n",
-			       index,
-			       window->counter,
-			       window->success_counter,
-			       rs_sta->expected_tpt ? "not " : "");
-
-	   /* Can't calculate this yet; not enough history */
-		window->average_tpt = IWL_INVALID_VALUE;
-		goto out;
-
-	}
-
-	current_tpt = window->average_tpt;
-
-	high_low = iwl3945_get_adjacent_rate(rs_sta, index, rate_mask,
-					     sband->band);
-	low = high_low & 0xff;
-	high = (high_low >> 8) & 0xff;
-
-	/* If user set max rate, dont allow higher than user constrain */
-	if ((max_rate_idx != -1) && (max_rate_idx < high))
-		high = IWL_RATE_INVALID;
-
-	/* Collect Measured throughputs of adjacent rates */
-	if (low != IWL_RATE_INVALID)
-		low_tpt = rs_sta->win[low].average_tpt;
-
-	if (high != IWL_RATE_INVALID)
-		high_tpt = rs_sta->win[high].average_tpt;
-
-	spin_unlock_irqrestore(&rs_sta->lock, flags);
-
-	scale_action = 0;
-
-	/* Low success ratio , need to drop the rate */
-	if ((window->success_ratio < IWL_RATE_DECREASE_TH) || !current_tpt) {
-		IWL_DEBUG_RATE(priv, "decrease rate because of low success_ratio\n");
-		scale_action = -1;
-	/* No throughput measured yet for adjacent rates,
-	 * try increase */
-	} else if ((low_tpt == IWL_INVALID_VALUE) &&
-		   (high_tpt == IWL_INVALID_VALUE)) {
-
-		if (high != IWL_RATE_INVALID && window->success_ratio >= IWL_RATE_INCREASE_TH)
-			scale_action = 1;
-		else if (low != IWL_RATE_INVALID)
-			scale_action = 0;
-
-	/* Both adjacent throughputs are measured, but neither one has
-	 * better throughput; we're using the best rate, don't change
-	 * it! */
-	} else if ((low_tpt != IWL_INVALID_VALUE) &&
-		 (high_tpt != IWL_INVALID_VALUE) &&
-		 (low_tpt < current_tpt) && (high_tpt < current_tpt)) {
-
-		IWL_DEBUG_RATE(priv, "No action -- low [%d] & high [%d] < "
-			       "current_tpt [%d]\n",
-			       low_tpt, high_tpt, current_tpt);
-		scale_action = 0;
-
-	/* At least one of the rates has better throughput */
-	} else {
-		if (high_tpt != IWL_INVALID_VALUE) {
-
-			/* High rate has better throughput, Increase
-			 * rate */
-			if (high_tpt > current_tpt &&
-				window->success_ratio >= IWL_RATE_INCREASE_TH)
-				scale_action = 1;
-			else {
-				IWL_DEBUG_RATE(priv,
-				    "decrease rate because of high tpt\n");
-				scale_action = 0;
-			}
-		} else if (low_tpt != IWL_INVALID_VALUE) {
-			if (low_tpt > current_tpt) {
-				IWL_DEBUG_RATE(priv,
-				    "decrease rate because of low tpt\n");
-				scale_action = -1;
-			} else if (window->success_ratio >= IWL_RATE_INCREASE_TH) {
-				/* Lower rate has better
-				 * throughput,decrease rate */
-				scale_action = 1;
-			}
-		}
-	}
-
-	/* Sanity check; asked for decrease, but success rate or throughput
-	 * has been good at old rate.  Don't change it. */
-	if ((scale_action == -1) && (low != IWL_RATE_INVALID) &&
-		    ((window->success_ratio > IWL_RATE_HIGH_TH) ||
-		     (current_tpt > (100 * rs_sta->expected_tpt[low]))))
-		scale_action = 0;
-
-	switch (scale_action) {
-	case -1:
-
-		/* Decrese rate */
-		if (low != IWL_RATE_INVALID)
-			index = low;
-		break;
-
-	case 1:
-		/* Increase rate */
-		if (high != IWL_RATE_INVALID)
-			index = high;
-
-		break;
-
-	case 0:
-	default:
-		/* No change */
-		break;
-	}
-
-	IWL_DEBUG_RATE(priv, "Selected %d (action %d) - low %d high %d\n",
-		       index, scale_action, low, high);
-
- out:
-
-	if (sband->band == IEEE80211_BAND_5GHZ) {
-		if (WARN_ON_ONCE(index < IWL_FIRST_OFDM_RATE))
-			index = IWL_FIRST_OFDM_RATE;
-		rs_sta->last_txrate_idx = index;
-		info->control.rates[0].idx = index - IWL_FIRST_OFDM_RATE;
-	} else {
-		rs_sta->last_txrate_idx = index;
-		info->control.rates[0].idx = rs_sta->last_txrate_idx;
-	}
-
-	IWL_DEBUG_RATE(priv, "leave: %d\n", index);
-}
-
-#ifdef CONFIG_MAC80211_DEBUGFS
-static int iwl3945_open_file_generic(struct inode *inode, struct file *file)
-{
-	file->private_data = inode->i_private;
-	return 0;
-}
-
-static ssize_t iwl3945_sta_dbgfs_stats_table_read(struct file *file,
-						  char __user *user_buf,
-						  size_t count, loff_t *ppos)
-{
-	char *buff;
-	int desc = 0;
-	int j;
-	ssize_t ret;
-	struct iwl3945_rs_sta *lq_sta = file->private_data;
-
-	buff = kmalloc(1024, GFP_KERNEL);
-	if (!buff)
-		return -ENOMEM;
-
-	desc += sprintf(buff + desc, "tx packets=%d last rate index=%d\n"
-			"rate=0x%X flush time %d\n",
-			lq_sta->tx_packets,
-			lq_sta->last_txrate_idx,
-			lq_sta->start_rate, jiffies_to_msecs(lq_sta->flush_time));
-	for (j = 0; j < IWL_RATE_COUNT_3945; j++) {
-		desc += sprintf(buff+desc,
-				"counter=%d success=%d %%=%d\n",
-				lq_sta->win[j].counter,
-				lq_sta->win[j].success_counter,
-				lq_sta->win[j].success_ratio);
-	}
-	ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
-	kfree(buff);
-	return ret;
-}
-
-static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
-	.read = iwl3945_sta_dbgfs_stats_table_read,
-	.open = iwl3945_open_file_generic,
-	.llseek = default_llseek,
-};
-
-static void iwl3945_add_debugfs(void *priv, void *priv_sta,
-				struct dentry *dir)
-{
-	struct iwl3945_rs_sta *lq_sta = priv_sta;
-
-	lq_sta->rs_sta_dbgfs_stats_table_file =
-		debugfs_create_file("rate_stats_table", 0600, dir,
-		lq_sta, &rs_sta_dbgfs_stats_table_ops);
-
-}
-
-static void iwl3945_remove_debugfs(void *priv, void *priv_sta)
-{
-	struct iwl3945_rs_sta *lq_sta = priv_sta;
-	debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
-}
-#endif
-
-/*
- * Initialization of rate scaling information is done by driver after
- * the station is added. Since mac80211 calls this function before a
- * station is added we ignore it.
- */
-static void iwl3945_rs_rate_init_stub(void *priv_r,
-				struct ieee80211_supported_band *sband,
-			      struct ieee80211_sta *sta, void *priv_sta)
-{
-}
-
-static struct rate_control_ops rs_ops = {
-	.module = NULL,
-	.name = RS_NAME,
-	.tx_status = iwl3945_rs_tx_status,
-	.get_rate = iwl3945_rs_get_rate,
-	.rate_init = iwl3945_rs_rate_init_stub,
-	.alloc = iwl3945_rs_alloc,
-	.free = iwl3945_rs_free,
-	.alloc_sta = iwl3945_rs_alloc_sta,
-	.free_sta = iwl3945_rs_free_sta,
-#ifdef CONFIG_MAC80211_DEBUGFS
-	.add_sta_debugfs = iwl3945_add_debugfs,
-	.remove_sta_debugfs = iwl3945_remove_debugfs,
-#endif
-
-};
-void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
-{
-	struct iwl_priv *priv = hw->priv;
-	s32 rssi = 0;
-	unsigned long flags;
-	struct iwl3945_rs_sta *rs_sta;
-	struct ieee80211_sta *sta;
-	struct iwl3945_sta_priv *psta;
-
-	IWL_DEBUG_RATE(priv, "enter\n");
-
-	rcu_read_lock();
-
-	sta = ieee80211_find_sta(priv->contexts[IWL_RXON_CTX_BSS].vif,
-				 priv->stations[sta_id].sta.sta.addr);
-	if (!sta) {
-		IWL_DEBUG_RATE(priv, "Unable to find station to initialize rate scaling.\n");
-		rcu_read_unlock();
-		return;
-	}
-
-	psta = (void *) sta->drv_priv;
-	rs_sta = &psta->rs_sta;
-
-	spin_lock_irqsave(&rs_sta->lock, flags);
-
-	rs_sta->tgg = 0;
-	switch (priv->band) {
-	case IEEE80211_BAND_2GHZ:
-		/* TODO: this always does G, not a regression */
-		if (priv->contexts[IWL_RXON_CTX_BSS].active.flags &
-						RXON_FLG_TGG_PROTECT_MSK) {
-			rs_sta->tgg = 1;
-			rs_sta->expected_tpt = iwl3945_expected_tpt_g_prot;
-		} else
-			rs_sta->expected_tpt = iwl3945_expected_tpt_g;
-		break;
-
-	case IEEE80211_BAND_5GHZ:
-		rs_sta->expected_tpt = iwl3945_expected_tpt_a;
-		break;
-	case IEEE80211_NUM_BANDS:
-		BUG();
-		break;
-	}
-
-	spin_unlock_irqrestore(&rs_sta->lock, flags);
-
-	rssi = priv->_3945.last_rx_rssi;
-	if (rssi == 0)
-		rssi = IWL_MIN_RSSI_VAL;
-
-	IWL_DEBUG_RATE(priv, "Network RSSI: %d\n", rssi);
-
-	rs_sta->start_rate = iwl3945_get_rate_index_by_rssi(rssi, priv->band);
-
-	IWL_DEBUG_RATE(priv, "leave: rssi %d assign rate index: "
-		       "%d (plcp 0x%x)\n", rssi, rs_sta->start_rate,
-		       iwl3945_rates[rs_sta->start_rate].plcp);
-	rcu_read_unlock();
-}
-
-int iwl3945_rate_control_register(void)
-{
-	return ieee80211_rate_control_register(&rs_ops);
-}
-
-void iwl3945_rate_control_unregister(void)
-{
-	ieee80211_rate_control_unregister(&rs_ops);
-}
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945.c b/drivers/net/wireless/iwlegacy/iwl-3945.c
deleted file mode 100644
index f7c0a74..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945.c
+++ /dev/null
@@ -1,2741 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/firmware.h>
-#include <linux/etherdevice.h>
-#include <asm/unaligned.h>
-#include <net/mac80211.h>
-
-#include "iwl-fh.h"
-#include "iwl-3945-fh.h"
-#include "iwl-commands.h"
-#include "iwl-sta.h"
-#include "iwl-3945.h"
-#include "iwl-eeprom.h"
-#include "iwl-core.h"
-#include "iwl-helpers.h"
-#include "iwl-led.h"
-#include "iwl-3945-led.h"
-#include "iwl-3945-debugfs.h"
-
-#define IWL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np)    \
-	[IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP,   \
-				    IWL_RATE_##r##M_IEEE,   \
-				    IWL_RATE_##ip##M_INDEX, \
-				    IWL_RATE_##in##M_INDEX, \
-				    IWL_RATE_##rp##M_INDEX, \
-				    IWL_RATE_##rn##M_INDEX, \
-				    IWL_RATE_##pp##M_INDEX, \
-				    IWL_RATE_##np##M_INDEX, \
-				    IWL_RATE_##r##M_INDEX_TABLE, \
-				    IWL_RATE_##ip##M_INDEX_TABLE }
-
-/*
- * Parameter order:
- *   rate, prev rate, next rate, prev tgg rate, next tgg rate
- *
- * If there isn't a valid next or previous rate then INV is used which
- * maps to IWL_RATE_INVALID
- *
- */
-const struct iwl3945_rate_info iwl3945_rates[IWL_RATE_COUNT_3945] = {
-	IWL_DECLARE_RATE_INFO(1, INV, 2, INV, 2, INV, 2),    /*  1mbps */
-	IWL_DECLARE_RATE_INFO(2, 1, 5, 1, 5, 1, 5),          /*  2mbps */
-	IWL_DECLARE_RATE_INFO(5, 2, 6, 2, 11, 2, 11),        /*5.5mbps */
-	IWL_DECLARE_RATE_INFO(11, 9, 12, 5, 12, 5, 18),      /* 11mbps */
-	IWL_DECLARE_RATE_INFO(6, 5, 9, 5, 11, 5, 11),        /*  6mbps */
-	IWL_DECLARE_RATE_INFO(9, 6, 11, 5, 11, 5, 11),       /*  9mbps */
-	IWL_DECLARE_RATE_INFO(12, 11, 18, 11, 18, 11, 18),   /* 12mbps */
-	IWL_DECLARE_RATE_INFO(18, 12, 24, 12, 24, 11, 24),   /* 18mbps */
-	IWL_DECLARE_RATE_INFO(24, 18, 36, 18, 36, 18, 36),   /* 24mbps */
-	IWL_DECLARE_RATE_INFO(36, 24, 48, 24, 48, 24, 48),   /* 36mbps */
-	IWL_DECLARE_RATE_INFO(48, 36, 54, 36, 54, 36, 54),   /* 48mbps */
-	IWL_DECLARE_RATE_INFO(54, 48, INV, 48, INV, 48, INV),/* 54mbps */
-};
-
-static inline u8 iwl3945_get_prev_ieee_rate(u8 rate_index)
-{
-	u8 rate = iwl3945_rates[rate_index].prev_ieee;
-
-	if (rate == IWL_RATE_INVALID)
-		rate = rate_index;
-	return rate;
-}
-
-/* 1 = enable the iwl3945_disable_events() function */
-#define IWL_EVT_DISABLE (0)
-#define IWL_EVT_DISABLE_SIZE (1532/32)
-
-/**
- * iwl3945_disable_events - Disable selected events in uCode event log
- *
- * Disable an event by writing "1"s into "disable"
- *   bitmap in SRAM.  Bit position corresponds to Event # (id/type).
- *   Default values of 0 enable uCode events to be logged.
- * Use for only special debugging.  This function is just a placeholder as-is,
- *   you'll need to provide the special bits! ...
- *   ... and set IWL_EVT_DISABLE to 1. */
-void iwl3945_disable_events(struct iwl_priv *priv)
-{
-	int i;
-	u32 base;		/* SRAM address of event log header */
-	u32 disable_ptr;	/* SRAM address of event-disable bitmap array */
-	u32 array_size;		/* # of u32 entries in array */
-	static const u32 evt_disable[IWL_EVT_DISABLE_SIZE] = {
-		0x00000000,	/*   31 -    0  Event id numbers */
-		0x00000000,	/*   63 -   32 */
-		0x00000000,	/*   95 -   64 */
-		0x00000000,	/*  127 -   96 */
-		0x00000000,	/*  159 -  128 */
-		0x00000000,	/*  191 -  160 */
-		0x00000000,	/*  223 -  192 */
-		0x00000000,	/*  255 -  224 */
-		0x00000000,	/*  287 -  256 */
-		0x00000000,	/*  319 -  288 */
-		0x00000000,	/*  351 -  320 */
-		0x00000000,	/*  383 -  352 */
-		0x00000000,	/*  415 -  384 */
-		0x00000000,	/*  447 -  416 */
-		0x00000000,	/*  479 -  448 */
-		0x00000000,	/*  511 -  480 */
-		0x00000000,	/*  543 -  512 */
-		0x00000000,	/*  575 -  544 */
-		0x00000000,	/*  607 -  576 */
-		0x00000000,	/*  639 -  608 */
-		0x00000000,	/*  671 -  640 */
-		0x00000000,	/*  703 -  672 */
-		0x00000000,	/*  735 -  704 */
-		0x00000000,	/*  767 -  736 */
-		0x00000000,	/*  799 -  768 */
-		0x00000000,	/*  831 -  800 */
-		0x00000000,	/*  863 -  832 */
-		0x00000000,	/*  895 -  864 */
-		0x00000000,	/*  927 -  896 */
-		0x00000000,	/*  959 -  928 */
-		0x00000000,	/*  991 -  960 */
-		0x00000000,	/* 1023 -  992 */
-		0x00000000,	/* 1055 - 1024 */
-		0x00000000,	/* 1087 - 1056 */
-		0x00000000,	/* 1119 - 1088 */
-		0x00000000,	/* 1151 - 1120 */
-		0x00000000,	/* 1183 - 1152 */
-		0x00000000,	/* 1215 - 1184 */
-		0x00000000,	/* 1247 - 1216 */
-		0x00000000,	/* 1279 - 1248 */
-		0x00000000,	/* 1311 - 1280 */
-		0x00000000,	/* 1343 - 1312 */
-		0x00000000,	/* 1375 - 1344 */
-		0x00000000,	/* 1407 - 1376 */
-		0x00000000,	/* 1439 - 1408 */
-		0x00000000,	/* 1471 - 1440 */
-		0x00000000,	/* 1503 - 1472 */
-	};
-
-	base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
-	if (!iwl3945_hw_valid_rtc_data_addr(base)) {
-		IWL_ERR(priv, "Invalid event log pointer 0x%08X\n", base);
-		return;
-	}
-
-	disable_ptr = iwl_legacy_read_targ_mem(priv, base + (4 * sizeof(u32)));
-	array_size = iwl_legacy_read_targ_mem(priv, base + (5 * sizeof(u32)));
-
-	if (IWL_EVT_DISABLE && (array_size == IWL_EVT_DISABLE_SIZE)) {
-		IWL_DEBUG_INFO(priv, "Disabling selected uCode log events at 0x%x\n",
-			       disable_ptr);
-		for (i = 0; i < IWL_EVT_DISABLE_SIZE; i++)
-			iwl_legacy_write_targ_mem(priv,
-					   disable_ptr + (i * sizeof(u32)),
-					   evt_disable[i]);
-
-	} else {
-		IWL_DEBUG_INFO(priv, "Selected uCode log events may be disabled\n");
-		IWL_DEBUG_INFO(priv, "  by writing \"1\"s into disable bitmap\n");
-		IWL_DEBUG_INFO(priv, "  in SRAM at 0x%x, size %d u32s\n",
-			       disable_ptr, array_size);
-	}
-
-}
-
-static int iwl3945_hwrate_to_plcp_idx(u8 plcp)
-{
-	int idx;
-
-	for (idx = 0; idx < IWL_RATE_COUNT_3945; idx++)
-		if (iwl3945_rates[idx].plcp == plcp)
-			return idx;
-	return -1;
-}
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-#define TX_STATUS_ENTRY(x) case TX_3945_STATUS_FAIL_ ## x: return #x
-
-static const char *iwl3945_get_tx_fail_reason(u32 status)
-{
-	switch (status & TX_STATUS_MSK) {
-	case TX_3945_STATUS_SUCCESS:
-		return "SUCCESS";
-		TX_STATUS_ENTRY(SHORT_LIMIT);
-		TX_STATUS_ENTRY(LONG_LIMIT);
-		TX_STATUS_ENTRY(FIFO_UNDERRUN);
-		TX_STATUS_ENTRY(MGMNT_ABORT);
-		TX_STATUS_ENTRY(NEXT_FRAG);
-		TX_STATUS_ENTRY(LIFE_EXPIRE);
-		TX_STATUS_ENTRY(DEST_PS);
-		TX_STATUS_ENTRY(ABORTED);
-		TX_STATUS_ENTRY(BT_RETRY);
-		TX_STATUS_ENTRY(STA_INVALID);
-		TX_STATUS_ENTRY(FRAG_DROPPED);
-		TX_STATUS_ENTRY(TID_DISABLE);
-		TX_STATUS_ENTRY(FRAME_FLUSHED);
-		TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
-		TX_STATUS_ENTRY(TX_LOCKED);
-		TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
-	}
-
-	return "UNKNOWN";
-}
-#else
-static inline const char *iwl3945_get_tx_fail_reason(u32 status)
-{
-	return "";
-}
-#endif
-
-/*
- * get ieee prev rate from rate scale table.
- * for A and B mode we need to overright prev
- * value
- */
-int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate)
-{
-	int next_rate = iwl3945_get_prev_ieee_rate(rate);
-
-	switch (priv->band) {
-	case IEEE80211_BAND_5GHZ:
-		if (rate == IWL_RATE_12M_INDEX)
-			next_rate = IWL_RATE_9M_INDEX;
-		else if (rate == IWL_RATE_6M_INDEX)
-			next_rate = IWL_RATE_6M_INDEX;
-		break;
-	case IEEE80211_BAND_2GHZ:
-		if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) &&
-		    iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
-			if (rate == IWL_RATE_11M_INDEX)
-				next_rate = IWL_RATE_5M_INDEX;
-		}
-		break;
-
-	default:
-		break;
-	}
-
-	return next_rate;
-}
-
-
-/**
- * iwl3945_tx_queue_reclaim - Reclaim Tx queue entries already Tx'd
- *
- * When FW advances 'R' index, all entries between old and new 'R' index
- * need to be reclaimed. As result, some free space forms. If there is
- * enough free space (> low mark), wake the stack that feeds us.
- */
-static void iwl3945_tx_queue_reclaim(struct iwl_priv *priv,
-				     int txq_id, int index)
-{
-	struct iwl_tx_queue *txq = &priv->txq[txq_id];
-	struct iwl_queue *q = &txq->q;
-	struct iwl_tx_info *tx_info;
-
-	BUG_ON(txq_id == IWL39_CMD_QUEUE_NUM);
-
-	for (index = iwl_legacy_queue_inc_wrap(index, q->n_bd);
-		q->read_ptr != index;
-		q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) {
-
-		tx_info = &txq->txb[txq->q.read_ptr];
-		ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb);
-		tx_info->skb = NULL;
-		priv->cfg->ops->lib->txq_free_tfd(priv, txq);
-	}
-
-	if (iwl_legacy_queue_space(q) > q->low_mark && (txq_id >= 0) &&
-			(txq_id != IWL39_CMD_QUEUE_NUM) &&
-			priv->mac80211_registered)
-		iwl_legacy_wake_queue(priv, txq);
-}
-
-/**
- * iwl3945_rx_reply_tx - Handle Tx response
- */
-static void iwl3945_rx_reply_tx(struct iwl_priv *priv,
-				struct iwl_rx_mem_buffer *rxb)
-{
-	struct iwl_rx_packet *pkt = rxb_addr(rxb);
-	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
-	int txq_id = SEQ_TO_QUEUE(sequence);
-	int index = SEQ_TO_INDEX(sequence);
-	struct iwl_tx_queue *txq = &priv->txq[txq_id];
-	struct ieee80211_tx_info *info;
-	struct iwl3945_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
-	u32  status = le32_to_cpu(tx_resp->status);
-	int rate_idx;
-	int fail;
-
-	if ((index >= txq->q.n_bd) || (iwl_legacy_queue_used(&txq->q, index) == 0)) {
-		IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
-			  "is out of range [0-%d] %d %d\n", txq_id,
-			  index, txq->q.n_bd, txq->q.write_ptr,
-			  txq->q.read_ptr);
-		return;
-	}
-
-	txq->time_stamp = jiffies;
-	info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
-	ieee80211_tx_info_clear_status(info);
-
-	/* Fill the MRR chain with some info about on-chip retransmissions */
-	rate_idx = iwl3945_hwrate_to_plcp_idx(tx_resp->rate);
-	if (info->band == IEEE80211_BAND_5GHZ)
-		rate_idx -= IWL_FIRST_OFDM_RATE;
-
-	fail = tx_resp->failure_frame;
-
-	info->status.rates[0].idx = rate_idx;
-	info->status.rates[0].count = fail + 1; /* add final attempt */
-
-	/* tx_status->rts_retry_count = tx_resp->failure_rts; */
-	info->flags |= ((status & TX_STATUS_MSK) == TX_STATUS_SUCCESS) ?
-				IEEE80211_TX_STAT_ACK : 0;
-
-	IWL_DEBUG_TX(priv, "Tx queue %d Status %s (0x%08x) plcp rate %d retries %d\n",
-			txq_id, iwl3945_get_tx_fail_reason(status), status,
-			tx_resp->rate, tx_resp->failure_frame);
-
-	IWL_DEBUG_TX_REPLY(priv, "Tx queue reclaim %d\n", index);
-	iwl3945_tx_queue_reclaim(priv, txq_id, index);
-
-	if (status & TX_ABORT_REQUIRED_MSK)
-		IWL_ERR(priv, "TODO:  Implement Tx ABORT REQUIRED!!!\n");
-}
-
-
-
-/*****************************************************************************
- *
- * Intel PRO/Wireless 3945ABG/BG Network Connection
- *
- *  RX handler implementations
- *
- *****************************************************************************/
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
-static void iwl3945_accumulative_statistics(struct iwl_priv *priv,
-					    __le32 *stats)
-{
-	int i;
-	__le32 *prev_stats;
-	u32 *accum_stats;
-	u32 *delta, *max_delta;
-
-	prev_stats = (__le32 *)&priv->_3945.statistics;
-	accum_stats = (u32 *)&priv->_3945.accum_statistics;
-	delta = (u32 *)&priv->_3945.delta_statistics;
-	max_delta = (u32 *)&priv->_3945.max_delta;
-
-	for (i = sizeof(__le32); i < sizeof(struct iwl3945_notif_statistics);
-	     i += sizeof(__le32), stats++, prev_stats++, delta++,
-	     max_delta++, accum_stats++) {
-		if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
-			*delta = (le32_to_cpu(*stats) -
-				le32_to_cpu(*prev_stats));
-			*accum_stats += *delta;
-			if (*delta > *max_delta)
-				*max_delta = *delta;
-		}
-	}
-
-	/* reset accumulative statistics for "no-counter" type statistics */
-	priv->_3945.accum_statistics.general.temperature =
-		priv->_3945.statistics.general.temperature;
-	priv->_3945.accum_statistics.general.ttl_timestamp =
-		priv->_3945.statistics.general.ttl_timestamp;
-}
-#endif
-
-void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
-		struct iwl_rx_mem_buffer *rxb)
-{
-	struct iwl_rx_packet *pkt = rxb_addr(rxb);
-
-	IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
-		     (int)sizeof(struct iwl3945_notif_statistics),
-		     le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
-	iwl3945_accumulative_statistics(priv, (__le32 *)&pkt->u.raw);
-#endif
-
-	memcpy(&priv->_3945.statistics, pkt->u.raw, sizeof(priv->_3945.statistics));
-}
-
-void iwl3945_reply_statistics(struct iwl_priv *priv,
-			      struct iwl_rx_mem_buffer *rxb)
-{
-	struct iwl_rx_packet *pkt = rxb_addr(rxb);
-	__le32 *flag = (__le32 *)&pkt->u.raw;
-
-	if (le32_to_cpu(*flag) & UCODE_STATISTICS_CLEAR_MSK) {
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
-		memset(&priv->_3945.accum_statistics, 0,
-			sizeof(struct iwl3945_notif_statistics));
-		memset(&priv->_3945.delta_statistics, 0,
-			sizeof(struct iwl3945_notif_statistics));
-		memset(&priv->_3945.max_delta, 0,
-			sizeof(struct iwl3945_notif_statistics));
-#endif
-		IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
-	}
-	iwl3945_hw_rx_statistics(priv, rxb);
-}
-
-
-/******************************************************************************
- *
- * Misc. internal state and helper functions
- *
- ******************************************************************************/
-
-/* This is necessary only for a number of statistics, see the caller. */
-static int iwl3945_is_network_packet(struct iwl_priv *priv,
-		struct ieee80211_hdr *header)
-{
-	/* Filter incoming packets to determine if they are targeted toward
-	 * this network, discarding packets coming from ourselves */
-	switch (priv->iw_mode) {
-	case NL80211_IFTYPE_ADHOC: /* Header: Dest. | Source    | BSSID */
-		/* packets to our IBSS update information */
-		return !compare_ether_addr(header->addr3, priv->bssid);
-	case NL80211_IFTYPE_STATION: /* Header: Dest. | AP{BSSID} | Source */
-		/* packets to our IBSS update information */
-		return !compare_ether_addr(header->addr2, priv->bssid);
-	default:
-		return 1;
-	}
-}
-
-static void iwl3945_pass_packet_to_mac80211(struct iwl_priv *priv,
-				   struct iwl_rx_mem_buffer *rxb,
-				   struct ieee80211_rx_status *stats)
-{
-	struct iwl_rx_packet *pkt = rxb_addr(rxb);
-	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)IWL_RX_DATA(pkt);
-	struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
-	struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt);
-	u16 len = le16_to_cpu(rx_hdr->len);
-	struct sk_buff *skb;
-	__le16 fc = hdr->frame_control;
-
-	/* We received data from the HW, so stop the watchdog */
-	if (unlikely(len + IWL39_RX_FRAME_SIZE >
-		     PAGE_SIZE << priv->hw_params.rx_page_order)) {
-		IWL_DEBUG_DROP(priv, "Corruption detected!\n");
-		return;
-	}
-
-	/* We only process data packets if the interface is open */
-	if (unlikely(!priv->is_open)) {
-		IWL_DEBUG_DROP_LIMIT(priv,
-			"Dropping packet while interface is not open.\n");
-		return;
-	}
-
-	skb = dev_alloc_skb(128);
-	if (!skb) {
-		IWL_ERR(priv, "dev_alloc_skb failed\n");
-		return;
-	}
-
-	if (!iwl3945_mod_params.sw_crypto)
-		iwl_legacy_set_decrypted_flag(priv,
-				       (struct ieee80211_hdr *)rxb_addr(rxb),
-				       le32_to_cpu(rx_end->status), stats);
-
-	skb_add_rx_frag(skb, 0, rxb->page,
-			(void *)rx_hdr->payload - (void *)pkt, len);
-
-	iwl_legacy_update_stats(priv, false, fc, len);
-	memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
-
-	ieee80211_rx(priv->hw, skb);
-	priv->alloc_rxb_page--;
-	rxb->page = NULL;
-}
-
-#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
-
-static void iwl3945_rx_reply_rx(struct iwl_priv *priv,
-				struct iwl_rx_mem_buffer *rxb)
-{
-	struct ieee80211_hdr *header;
-	struct ieee80211_rx_status rx_status;
-	struct iwl_rx_packet *pkt = rxb_addr(rxb);
-	struct iwl3945_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt);
-	struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
-	struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt);
-	u16 rx_stats_sig_avg __maybe_unused = le16_to_cpu(rx_stats->sig_avg);
-	u16 rx_stats_noise_diff __maybe_unused = le16_to_cpu(rx_stats->noise_diff);
-	u8 network_packet;
-
-	rx_status.flag = 0;
-	rx_status.mactime = le64_to_cpu(rx_end->timestamp);
-	rx_status.band = (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
-				IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
-	rx_status.freq =
-		ieee80211_channel_to_frequency(le16_to_cpu(rx_hdr->channel),
-					       rx_status.band);
-
-	rx_status.rate_idx = iwl3945_hwrate_to_plcp_idx(rx_hdr->rate);
-	if (rx_status.band == IEEE80211_BAND_5GHZ)
-		rx_status.rate_idx -= IWL_FIRST_OFDM_RATE;
-
-	rx_status.antenna = (le16_to_cpu(rx_hdr->phy_flags) &
-					RX_RES_PHY_FLAGS_ANTENNA_MSK) >> 4;
-
-	/* set the preamble flag if appropriate */
-	if (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
-		rx_status.flag |= RX_FLAG_SHORTPRE;
-
-	if ((unlikely(rx_stats->phy_count > 20))) {
-		IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
-				rx_stats->phy_count);
-		return;
-	}
-
-	if (!(rx_end->status & RX_RES_STATUS_NO_CRC32_ERROR)
-	    || !(rx_end->status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
-		IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n", rx_end->status);
-		return;
-	}
-
-
-
-	/* Convert 3945's rssi indicator to dBm */
-	rx_status.signal = rx_stats->rssi - IWL39_RSSI_OFFSET;
-
-	IWL_DEBUG_STATS(priv, "Rssi %d sig_avg %d noise_diff %d\n",
-			rx_status.signal, rx_stats_sig_avg,
-			rx_stats_noise_diff);
-
-	header = (struct ieee80211_hdr *)IWL_RX_DATA(pkt);
-
-	network_packet = iwl3945_is_network_packet(priv, header);
-
-	IWL_DEBUG_STATS_LIMIT(priv, "[%c] %d RSSI:%d Signal:%u, Rate:%u\n",
-			      network_packet ? '*' : ' ',
-			      le16_to_cpu(rx_hdr->channel),
-			      rx_status.signal, rx_status.signal,
-			      rx_status.rate_idx);
-
-	iwl_legacy_dbg_log_rx_data_frame(priv, le16_to_cpu(rx_hdr->len),
-						header);
-
-	if (network_packet) {
-		priv->_3945.last_beacon_time =
-			le32_to_cpu(rx_end->beacon_timestamp);
-		priv->_3945.last_tsf = le64_to_cpu(rx_end->timestamp);
-		priv->_3945.last_rx_rssi = rx_status.signal;
-	}
-
-	iwl3945_pass_packet_to_mac80211(priv, rxb, &rx_status);
-}
-
-int iwl3945_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
-				     struct iwl_tx_queue *txq,
-				     dma_addr_t addr, u16 len, u8 reset, u8 pad)
-{
-	int count;
-	struct iwl_queue *q;
-	struct iwl3945_tfd *tfd, *tfd_tmp;
-
-	q = &txq->q;
-	tfd_tmp = (struct iwl3945_tfd *)txq->tfds;
-	tfd = &tfd_tmp[q->write_ptr];
-
-	if (reset)
-		memset(tfd, 0, sizeof(*tfd));
-
-	count = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
-
-	if ((count >= NUM_TFD_CHUNKS) || (count < 0)) {
-		IWL_ERR(priv, "Error can not send more than %d chunks\n",
-			  NUM_TFD_CHUNKS);
-		return -EINVAL;
-	}
-
-	tfd->tbs[count].addr = cpu_to_le32(addr);
-	tfd->tbs[count].len = cpu_to_le32(len);
-
-	count++;
-
-	tfd->control_flags = cpu_to_le32(TFD_CTL_COUNT_SET(count) |
-					 TFD_CTL_PAD_SET(pad));
-
-	return 0;
-}
-
-/**
- * iwl3945_hw_txq_free_tfd - Free one TFD, those at index [txq->q.read_ptr]
- *
- * Does NOT advance any indexes
- */
-void iwl3945_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
-{
-	struct iwl3945_tfd *tfd_tmp = (struct iwl3945_tfd *)txq->tfds;
-	int index = txq->q.read_ptr;
-	struct iwl3945_tfd *tfd = &tfd_tmp[index];
-	struct pci_dev *dev = priv->pci_dev;
-	int i;
-	int counter;
-
-	/* sanity check */
-	counter = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
-	if (counter > NUM_TFD_CHUNKS) {
-		IWL_ERR(priv, "Too many chunks: %i\n", counter);
-		/* @todo issue fatal error, it is quite serious situation */
-		return;
-	}
-
-	/* Unmap tx_cmd */
-	if (counter)
-		pci_unmap_single(dev,
-				dma_unmap_addr(&txq->meta[index], mapping),
-				dma_unmap_len(&txq->meta[index], len),
-				PCI_DMA_TODEVICE);
-
-	/* unmap chunks if any */
-
-	for (i = 1; i < counter; i++)
-		pci_unmap_single(dev, le32_to_cpu(tfd->tbs[i].addr),
-			 le32_to_cpu(tfd->tbs[i].len), PCI_DMA_TODEVICE);
-
-	/* free SKB */
-	if (txq->txb) {
-		struct sk_buff *skb;
-
-		skb = txq->txb[txq->q.read_ptr].skb;
-
-		/* can be called from irqs-disabled context */
-		if (skb) {
-			dev_kfree_skb_any(skb);
-			txq->txb[txq->q.read_ptr].skb = NULL;
-		}
-	}
-}
-
-/**
- * iwl3945_hw_build_tx_cmd_rate - Add rate portion to TX_CMD:
- *
-*/
-void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv,
-				  struct iwl_device_cmd *cmd,
-				  struct ieee80211_tx_info *info,
-				  struct ieee80211_hdr *hdr,
-				  int sta_id, int tx_id)
-{
-	u16 hw_value = ieee80211_get_tx_rate(priv->hw, info)->hw_value;
-	u16 rate_index = min(hw_value & 0xffff, IWL_RATE_COUNT_3945);
-	u16 rate_mask;
-	int rate;
-	u8 rts_retry_limit;
-	u8 data_retry_limit;
-	__le32 tx_flags;
-	__le16 fc = hdr->frame_control;
-	struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
-
-	rate = iwl3945_rates[rate_index].plcp;
-	tx_flags = tx_cmd->tx_flags;
-
-	/* We need to figure out how to get the sta->supp_rates while
-	 * in this running context */
-	rate_mask = IWL_RATES_MASK_3945;
-
-	/* Set retry limit on DATA packets and Probe Responses*/
-	if (ieee80211_is_probe_resp(fc))
-		data_retry_limit = 3;
-	else
-		data_retry_limit = IWL_DEFAULT_TX_RETRY;
-	tx_cmd->data_retry_limit = data_retry_limit;
-
-	if (tx_id >= IWL39_CMD_QUEUE_NUM)
-		rts_retry_limit = 3;
-	else
-		rts_retry_limit = 7;
-
-	if (data_retry_limit < rts_retry_limit)
-		rts_retry_limit = data_retry_limit;
-	tx_cmd->rts_retry_limit = rts_retry_limit;
-
-	tx_cmd->rate = rate;
-	tx_cmd->tx_flags = tx_flags;
-
-	/* OFDM */
-	tx_cmd->supp_rates[0] =
-	   ((rate_mask & IWL_OFDM_RATES_MASK) >> IWL_FIRST_OFDM_RATE) & 0xFF;
-
-	/* CCK */
-	tx_cmd->supp_rates[1] = (rate_mask & 0xF);
-
-	IWL_DEBUG_RATE(priv, "Tx sta id: %d, rate: %d (plcp), flags: 0x%4X "
-		       "cck/ofdm mask: 0x%x/0x%x\n", sta_id,
-		       tx_cmd->rate, le32_to_cpu(tx_cmd->tx_flags),
-		       tx_cmd->supp_rates[1], tx_cmd->supp_rates[0]);
-}
-
-static u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id, u16 tx_rate)
-{
-	unsigned long flags_spin;
-	struct iwl_station_entry *station;
-
-	if (sta_id == IWL_INVALID_STATION)
-		return IWL_INVALID_STATION;
-
-	spin_lock_irqsave(&priv->sta_lock, flags_spin);
-	station = &priv->stations[sta_id];
-
-	station->sta.sta.modify_mask = STA_MODIFY_TX_RATE_MSK;
-	station->sta.rate_n_flags = cpu_to_le16(tx_rate);
-	station->sta.mode = STA_CONTROL_MODIFY_MSK;
-	iwl_legacy_send_add_sta(priv, &station->sta, CMD_ASYNC);
-	spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
-
-	IWL_DEBUG_RATE(priv, "SCALE sync station %d to rate %d\n",
-			sta_id, tx_rate);
-	return sta_id;
-}
-
-static void iwl3945_set_pwr_vmain(struct iwl_priv *priv)
-{
-/*
- * (for documentation purposes)
- * to set power to V_AUX, do
-
-		if (pci_pme_capable(priv->pci_dev, PCI_D3cold)) {
-			iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
-					APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
-					~APMG_PS_CTRL_MSK_PWR_SRC);
-
-			iwl_poll_bit(priv, CSR_GPIO_IN,
-				     CSR_GPIO_IN_VAL_VAUX_PWR_SRC,
-				     CSR_GPIO_IN_BIT_AUX_POWER, 5000);
-		}
- */
-
-	iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
-			APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
-			~APMG_PS_CTRL_MSK_PWR_SRC);
-
-	iwl_poll_bit(priv, CSR_GPIO_IN, CSR_GPIO_IN_VAL_VMAIN_PWR_SRC,
-		     CSR_GPIO_IN_BIT_AUX_POWER, 5000);	/* uS */
-}
-
-static int iwl3945_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
-{
-	iwl_legacy_write_direct32(priv, FH39_RCSR_RBD_BASE(0), rxq->bd_dma);
-	iwl_legacy_write_direct32(priv, FH39_RCSR_RPTR_ADDR(0),
-					rxq->rb_stts_dma);
-	iwl_legacy_write_direct32(priv, FH39_RCSR_WPTR(0), 0);
-	iwl_legacy_write_direct32(priv, FH39_RCSR_CONFIG(0),
-		FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE |
-		FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE |
-		FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN |
-		FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 |
-		(RX_QUEUE_SIZE_LOG << FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE) |
-		FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST |
-		(1 << FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH) |
-		FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH);
-
-	/* fake read to flush all prev I/O */
-	iwl_legacy_read_direct32(priv, FH39_RSSR_CTRL);
-
-	return 0;
-}
-
-static int iwl3945_tx_reset(struct iwl_priv *priv)
-{
-
-	/* bypass mode */
-	iwl_legacy_write_prph(priv, ALM_SCD_MODE_REG, 0x2);
-
-	/* RA 0 is active */
-	iwl_legacy_write_prph(priv, ALM_SCD_ARASTAT_REG, 0x01);
-
-	/* all 6 fifo are active */
-	iwl_legacy_write_prph(priv, ALM_SCD_TXFACT_REG, 0x3f);
-
-	iwl_legacy_write_prph(priv, ALM_SCD_SBYP_MODE_1_REG, 0x010000);
-	iwl_legacy_write_prph(priv, ALM_SCD_SBYP_MODE_2_REG, 0x030002);
-	iwl_legacy_write_prph(priv, ALM_SCD_TXF4MF_REG, 0x000004);
-	iwl_legacy_write_prph(priv, ALM_SCD_TXF5MF_REG, 0x000005);
-
-	iwl_legacy_write_direct32(priv, FH39_TSSR_CBB_BASE,
-			     priv->_3945.shared_phys);
-
-	iwl_legacy_write_direct32(priv, FH39_TSSR_MSG_CONFIG,
-		FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON |
-		FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON |
-		FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B |
-		FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON |
-		FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON |
-		FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH |
-		FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH);
-
-
-	return 0;
-}
-
-/**
- * iwl3945_txq_ctx_reset - Reset TX queue context
- *
- * Destroys all DMA structures and initialize them again
- */
-static int iwl3945_txq_ctx_reset(struct iwl_priv *priv)
-{
-	int rc;
-	int txq_id, slots_num;
-
-	iwl3945_hw_txq_ctx_free(priv);
-
-	/* allocate tx queue structure */
-	rc = iwl_legacy_alloc_txq_mem(priv);
-	if (rc)
-		return rc;
-
-	/* Tx CMD queue */
-	rc = iwl3945_tx_reset(priv);
-	if (rc)
-		goto error;
-
-	/* Tx queue(s) */
-	for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
-		slots_num = (txq_id == IWL39_CMD_QUEUE_NUM) ?
-				TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
-		rc = iwl_legacy_tx_queue_init(priv, &priv->txq[txq_id],
-						slots_num, txq_id);
-		if (rc) {
-			IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
-			goto error;
-		}
-	}
-
-	return rc;
-
- error:
-	iwl3945_hw_txq_ctx_free(priv);
-	return rc;
-}
-
-
-/*
- * Start up 3945's basic functionality after it has been reset
- * (e.g. after platform boot, or shutdown via iwl_legacy_apm_stop())
- * NOTE:  This does not load uCode nor start the embedded processor
- */
-static int iwl3945_apm_init(struct iwl_priv *priv)
-{
-	int ret = iwl_legacy_apm_init(priv);
-
-	/* Clear APMG (NIC's internal power management) interrupts */
-	iwl_legacy_write_prph(priv, APMG_RTC_INT_MSK_REG, 0x0);
-	iwl_legacy_write_prph(priv, APMG_RTC_INT_STT_REG, 0xFFFFFFFF);
-
-	/* Reset radio chip */
-	iwl_legacy_set_bits_prph(priv, APMG_PS_CTRL_REG,
-				APMG_PS_CTRL_VAL_RESET_REQ);
-	udelay(5);
-	iwl_legacy_clear_bits_prph(priv, APMG_PS_CTRL_REG,
-				APMG_PS_CTRL_VAL_RESET_REQ);
-
-	return ret;
-}
-
-static void iwl3945_nic_config(struct iwl_priv *priv)
-{
-	struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
-	unsigned long flags;
-	u8 rev_id = priv->pci_dev->revision;
-
-	spin_lock_irqsave(&priv->lock, flags);
-
-	/* Determine HW type */
-	IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", rev_id);
-
-	if (rev_id & PCI_CFG_REV_ID_BIT_RTP)
-		IWL_DEBUG_INFO(priv, "RTP type\n");
-	else if (rev_id & PCI_CFG_REV_ID_BIT_BASIC_SKU) {
-		IWL_DEBUG_INFO(priv, "3945 RADIO-MB type\n");
-		iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
-			    CSR39_HW_IF_CONFIG_REG_BIT_3945_MB);
-	} else {
-		IWL_DEBUG_INFO(priv, "3945 RADIO-MM type\n");
-		iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
-			    CSR39_HW_IF_CONFIG_REG_BIT_3945_MM);
-	}
-
-	if (EEPROM_SKU_CAP_OP_MODE_MRC == eeprom->sku_cap) {
-		IWL_DEBUG_INFO(priv, "SKU OP mode is mrc\n");
-		iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
-			    CSR39_HW_IF_CONFIG_REG_BIT_SKU_MRC);
-	} else
-		IWL_DEBUG_INFO(priv, "SKU OP mode is basic\n");
-
-	if ((eeprom->board_revision & 0xF0) == 0xD0) {
-		IWL_DEBUG_INFO(priv, "3945ABG revision is 0x%X\n",
-			       eeprom->board_revision);
-		iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
-			    CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
-	} else {
-		IWL_DEBUG_INFO(priv, "3945ABG revision is 0x%X\n",
-			       eeprom->board_revision);
-		iwl_legacy_clear_bit(priv, CSR_HW_IF_CONFIG_REG,
-			      CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
-	}
-
-	if (eeprom->almgor_m_version <= 1) {
-		iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
-			    CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A);
-		IWL_DEBUG_INFO(priv, "Card M type A version is 0x%X\n",
-			       eeprom->almgor_m_version);
-	} else {
-		IWL_DEBUG_INFO(priv, "Card M type B version is 0x%X\n",
-			       eeprom->almgor_m_version);
-		iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
-			    CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B);
-	}
-	spin_unlock_irqrestore(&priv->lock, flags);
-
-	if (eeprom->sku_cap & EEPROM_SKU_CAP_SW_RF_KILL_ENABLE)
-		IWL_DEBUG_RF_KILL(priv, "SW RF KILL supported in EEPROM.\n");
-
-	if (eeprom->sku_cap & EEPROM_SKU_CAP_HW_RF_KILL_ENABLE)
-		IWL_DEBUG_RF_KILL(priv, "HW RF KILL supported in EEPROM.\n");
-}
-
-int iwl3945_hw_nic_init(struct iwl_priv *priv)
-{
-	int rc;
-	unsigned long flags;
-	struct iwl_rx_queue *rxq = &priv->rxq;
-
-	spin_lock_irqsave(&priv->lock, flags);
-	priv->cfg->ops->lib->apm_ops.init(priv);
-	spin_unlock_irqrestore(&priv->lock, flags);
-
-	iwl3945_set_pwr_vmain(priv);
-
-	priv->cfg->ops->lib->apm_ops.config(priv);
-
-	/* Allocate the RX queue, or reset if it is already allocated */
-	if (!rxq->bd) {
-		rc = iwl_legacy_rx_queue_alloc(priv);
-		if (rc) {
-			IWL_ERR(priv, "Unable to initialize Rx queue\n");
-			return -ENOMEM;
-		}
-	} else
-		iwl3945_rx_queue_reset(priv, rxq);
-
-	iwl3945_rx_replenish(priv);
-
-	iwl3945_rx_init(priv, rxq);
-
-
-	/* Look at using this instead:
-	rxq->need_update = 1;
-	iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
-	*/
-
-	iwl_legacy_write_direct32(priv, FH39_RCSR_WPTR(0), rxq->write & ~7);
-
-	rc = iwl3945_txq_ctx_reset(priv);
-	if (rc)
-		return rc;
-
-	set_bit(STATUS_INIT, &priv->status);
-
-	return 0;
-}
-
-/**
- * iwl3945_hw_txq_ctx_free - Free TXQ Context
- *
- * Destroy all TX DMA queues and structures
- */
-void iwl3945_hw_txq_ctx_free(struct iwl_priv *priv)
-{
-	int txq_id;
-
-	/* Tx queues */
-	if (priv->txq)
-		for (txq_id = 0; txq_id < priv->hw_params.max_txq_num;
-		     txq_id++)
-			if (txq_id == IWL39_CMD_QUEUE_NUM)
-				iwl_legacy_cmd_queue_free(priv);
-			else
-				iwl_legacy_tx_queue_free(priv, txq_id);
-
-	/* free tx queue structure */
-	iwl_legacy_txq_mem(priv);
-}
-
-void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv)
-{
-	int txq_id;
-
-	/* stop SCD */
-	iwl_legacy_write_prph(priv, ALM_SCD_MODE_REG, 0);
-	iwl_legacy_write_prph(priv, ALM_SCD_TXFACT_REG, 0);
-
-	/* reset TFD queues */
-	for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
-		iwl_legacy_write_direct32(priv, FH39_TCSR_CONFIG(txq_id), 0x0);
-		iwl_poll_direct_bit(priv, FH39_TSSR_TX_STATUS,
-				FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(txq_id),
-				1000);
-	}
-
-	iwl3945_hw_txq_ctx_free(priv);
-}
-
-/**
- * iwl3945_hw_reg_adjust_power_by_temp
- * return index delta into power gain settings table
-*/
-static int iwl3945_hw_reg_adjust_power_by_temp(int new_reading, int old_reading)
-{
-	return (new_reading - old_reading) * (-11) / 100;
-}
-
-/**
- * iwl3945_hw_reg_temp_out_of_range - Keep temperature in sane range
- */
-static inline int iwl3945_hw_reg_temp_out_of_range(int temperature)
-{
-	return ((temperature < -260) || (temperature > 25)) ? 1 : 0;
-}
-
-int iwl3945_hw_get_temperature(struct iwl_priv *priv)
-{
-	return iwl_read32(priv, CSR_UCODE_DRV_GP2);
-}
-
-/**
- * iwl3945_hw_reg_txpower_get_temperature
- * get the current temperature by reading from NIC
-*/
-static int iwl3945_hw_reg_txpower_get_temperature(struct iwl_priv *priv)
-{
-	struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
-	int temperature;
-
-	temperature = iwl3945_hw_get_temperature(priv);
-
-	/* driver's okay range is -260 to +25.
-	 *   human readable okay range is 0 to +285 */
-	IWL_DEBUG_INFO(priv, "Temperature: %d\n", temperature + IWL_TEMP_CONVERT);
-
-	/* handle insane temp reading */
-	if (iwl3945_hw_reg_temp_out_of_range(temperature)) {
-		IWL_ERR(priv, "Error bad temperature value  %d\n", temperature);
-
-		/* if really really hot(?),
-		 *   substitute the 3rd band/group's temp measured at factory */
-		if (priv->last_temperature > 100)
-			temperature = eeprom->groups[2].temperature;
-		else /* else use most recent "sane" value from driver */
-			temperature = priv->last_temperature;
-	}
-
-	return temperature;	/* raw, not "human readable" */
-}
-
-/* Adjust Txpower only if temperature variance is greater than threshold.
- *
- * Both are lower than older versions' 9 degrees */
-#define IWL_TEMPERATURE_LIMIT_TIMER   6
-
-/**
- * iwl3945_is_temp_calib_needed - determines if new calibration is needed
- *
- * records new temperature in tx_mgr->temperature.
- * replaces tx_mgr->last_temperature *only* if calib needed
- *    (assumes caller will actually do the calibration!). */
-static int iwl3945_is_temp_calib_needed(struct iwl_priv *priv)
-{
-	int temp_diff;
-
-	priv->temperature = iwl3945_hw_reg_txpower_get_temperature(priv);
-	temp_diff = priv->temperature - priv->last_temperature;
-
-	/* get absolute value */
-	if (temp_diff < 0) {
-		IWL_DEBUG_POWER(priv, "Getting cooler, delta %d,\n", temp_diff);
-		temp_diff = -temp_diff;
-	} else if (temp_diff == 0)
-		IWL_DEBUG_POWER(priv, "Same temp,\n");
-	else
-		IWL_DEBUG_POWER(priv, "Getting warmer, delta %d,\n", temp_diff);
-
-	/* if we don't need calibration, *don't* update last_temperature */
-	if (temp_diff < IWL_TEMPERATURE_LIMIT_TIMER) {
-		IWL_DEBUG_POWER(priv, "Timed thermal calib not needed\n");
-		return 0;
-	}
-
-	IWL_DEBUG_POWER(priv, "Timed thermal calib needed\n");
-
-	/* assume that caller will actually do calib ...
-	 *   update the "last temperature" value */
-	priv->last_temperature = priv->temperature;
-	return 1;
-}
-
-#define IWL_MAX_GAIN_ENTRIES 78
-#define IWL_CCK_FROM_OFDM_POWER_DIFF  -5
-#define IWL_CCK_FROM_OFDM_INDEX_DIFF (10)
-
-/* radio and DSP power table, each step is 1/2 dB.
- * 1st number is for RF analog gain, 2nd number is for DSP pre-DAC gain. */
-static struct iwl3945_tx_power power_gain_table[2][IWL_MAX_GAIN_ENTRIES] = {
-	{
-	 {251, 127},		/* 2.4 GHz, highest power */
-	 {251, 127},
-	 {251, 127},
-	 {251, 127},
-	 {251, 125},
-	 {251, 110},
-	 {251, 105},
-	 {251, 98},
-	 {187, 125},
-	 {187, 115},
-	 {187, 108},
-	 {187, 99},
-	 {243, 119},
-	 {243, 111},
-	 {243, 105},
-	 {243, 97},
-	 {243, 92},
-	 {211, 106},
-	 {211, 100},
-	 {179, 120},
-	 {179, 113},
-	 {179, 107},
-	 {147, 125},
-	 {147, 119},
-	 {147, 112},
-	 {147, 106},
-	 {147, 101},
-	 {147, 97},
-	 {147, 91},
-	 {115, 107},
-	 {235, 121},
-	 {235, 115},
-	 {235, 109},
-	 {203, 127},
-	 {203, 121},
-	 {203, 115},
-	 {203, 108},
-	 {203, 102},
-	 {203, 96},
-	 {203, 92},
-	 {171, 110},
-	 {171, 104},
-	 {171, 98},
-	 {139, 116},
-	 {227, 125},
-	 {227, 119},
-	 {227, 113},
-	 {227, 107},
-	 {227, 101},
-	 {227, 96},
-	 {195, 113},
-	 {195, 106},
-	 {195, 102},
-	 {195, 95},
-	 {163, 113},
-	 {163, 106},
-	 {163, 102},
-	 {163, 95},
-	 {131, 113},
-	 {131, 106},
-	 {131, 102},
-	 {131, 95},
-	 {99, 113},
-	 {99, 106},
-	 {99, 102},
-	 {99, 95},
-	 {67, 113},
-	 {67, 106},
-	 {67, 102},
-	 {67, 95},
-	 {35, 113},
-	 {35, 106},
-	 {35, 102},
-	 {35, 95},
-	 {3, 113},
-	 {3, 106},
-	 {3, 102},
-	 {3, 95} },		/* 2.4 GHz, lowest power */
-	{
-	 {251, 127},		/* 5.x GHz, highest power */
-	 {251, 120},
-	 {251, 114},
-	 {219, 119},
-	 {219, 101},
-	 {187, 113},
-	 {187, 102},
-	 {155, 114},
-	 {155, 103},
-	 {123, 117},
-	 {123, 107},
-	 {123, 99},
-	 {123, 92},
-	 {91, 108},
-	 {59, 125},
-	 {59, 118},
-	 {59, 109},
-	 {59, 102},
-	 {59, 96},
-	 {59, 90},
-	 {27, 104},
-	 {27, 98},
-	 {27, 92},
-	 {115, 118},
-	 {115, 111},
-	 {115, 104},
-	 {83, 126},
-	 {83, 121},
-	 {83, 113},
-	 {83, 105},
-	 {83, 99},
-	 {51, 118},
-	 {51, 111},
-	 {51, 104},
-	 {51, 98},
-	 {19, 116},
-	 {19, 109},
-	 {19, 102},
-	 {19, 98},
-	 {19, 93},
-	 {171, 113},
-	 {171, 107},
-	 {171, 99},
-	 {139, 120},
-	 {139, 113},
-	 {139, 107},
-	 {139, 99},
-	 {107, 120},
-	 {107, 113},
-	 {107, 107},
-	 {107, 99},
-	 {75, 120},
-	 {75, 113},
-	 {75, 107},
-	 {75, 99},
-	 {43, 120},
-	 {43, 113},
-	 {43, 107},
-	 {43, 99},
-	 {11, 120},
-	 {11, 113},
-	 {11, 107},
-	 {11, 99},
-	 {131, 107},
-	 {131, 99},
-	 {99, 120},
-	 {99, 113},
-	 {99, 107},
-	 {99, 99},
-	 {67, 120},
-	 {67, 113},
-	 {67, 107},
-	 {67, 99},
-	 {35, 120},
-	 {35, 113},
-	 {35, 107},
-	 {35, 99},
-	 {3, 120} }		/* 5.x GHz, lowest power */
-};
-
-static inline u8 iwl3945_hw_reg_fix_power_index(int index)
-{
-	if (index < 0)
-		return 0;
-	if (index >= IWL_MAX_GAIN_ENTRIES)
-		return IWL_MAX_GAIN_ENTRIES - 1;
-	return (u8) index;
-}
-
-/* Kick off thermal recalibration check every 60 seconds */
-#define REG_RECALIB_PERIOD (60)
-
-/**
- * iwl3945_hw_reg_set_scan_power - Set Tx power for scan probe requests
- *
- * Set (in our channel info database) the direct scan Tx power for 1 Mbit (CCK)
- * or 6 Mbit (OFDM) rates.
- */
-static void iwl3945_hw_reg_set_scan_power(struct iwl_priv *priv, u32 scan_tbl_index,
-			       s32 rate_index, const s8 *clip_pwrs,
-			       struct iwl_channel_info *ch_info,
-			       int band_index)
-{
-	struct iwl3945_scan_power_info *scan_power_info;
-	s8 power;
-	u8 power_index;
-
-	scan_power_info = &ch_info->scan_pwr_info[scan_tbl_index];
-
-	/* use this channel group's 6Mbit clipping/saturation pwr,
-	 *   but cap at regulatory scan power restriction (set during init
-	 *   based on eeprom channel data) for this channel.  */
-	power = min(ch_info->scan_power, clip_pwrs[IWL_RATE_6M_INDEX_TABLE]);
-
-	power = min(power, priv->tx_power_user_lmt);
-	scan_power_info->requested_power = power;
-
-	/* find difference between new scan *power* and current "normal"
-	 *   Tx *power* for 6Mb.  Use this difference (x2) to adjust the
-	 *   current "normal" temperature-compensated Tx power *index* for
-	 *   this rate (1Mb or 6Mb) to yield new temp-compensated scan power
-	 *   *index*. */
-	power_index = ch_info->power_info[rate_index].power_table_index
-	    - (power - ch_info->power_info
-	       [IWL_RATE_6M_INDEX_TABLE].requested_power) * 2;
-
-	/* store reference index that we use when adjusting *all* scan
-	 *   powers.  So we can accommodate user (all channel) or spectrum
-	 *   management (single channel) power changes "between" temperature
-	 *   feedback compensation procedures.
-	 * don't force fit this reference index into gain table; it may be a
-	 *   negative number.  This will help avoid errors when we're at
-	 *   the lower bounds (highest gains, for warmest temperatures)
-	 *   of the table. */
-
-	/* don't exceed table bounds for "real" setting */
-	power_index = iwl3945_hw_reg_fix_power_index(power_index);
-
-	scan_power_info->power_table_index = power_index;
-	scan_power_info->tpc.tx_gain =
-	    power_gain_table[band_index][power_index].tx_gain;
-	scan_power_info->tpc.dsp_atten =
-	    power_gain_table[band_index][power_index].dsp_atten;
-}
-
-/**
- * iwl3945_send_tx_power - fill in Tx Power command with gain settings
- *
- * Configures power settings for all rates for the current channel,
- * using values from channel info struct, and send to NIC
- */
-static int iwl3945_send_tx_power(struct iwl_priv *priv)
-{
-	int rate_idx, i;
-	const struct iwl_channel_info *ch_info = NULL;
-	struct iwl3945_txpowertable_cmd txpower = {
-		.channel = priv->contexts[IWL_RXON_CTX_BSS].active.channel,
-	};
-	u16 chan;
-
-	if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status),
-		      "TX Power requested while scanning!\n"))
-		return -EAGAIN;
-
-	chan = le16_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.channel);
-
-	txpower.band = (priv->band == IEEE80211_BAND_5GHZ) ? 0 : 1;
-	ch_info = iwl_legacy_get_channel_info(priv, priv->band, chan);
-	if (!ch_info) {
-		IWL_ERR(priv,
-			"Failed to get channel info for channel %d [%d]\n",
-			chan, priv->band);
-		return -EINVAL;
-	}
-
-	if (!iwl_legacy_is_channel_valid(ch_info)) {
-		IWL_DEBUG_POWER(priv, "Not calling TX_PWR_TABLE_CMD on "
-				"non-Tx channel.\n");
-		return 0;
-	}
-
-	/* fill cmd with power settings for all rates for current channel */
-	/* Fill OFDM rate */
-	for (rate_idx = IWL_FIRST_OFDM_RATE, i = 0;
-	     rate_idx <= IWL39_LAST_OFDM_RATE; rate_idx++, i++) {
-
-		txpower.power[i].tpc = ch_info->power_info[i].tpc;
-		txpower.power[i].rate = iwl3945_rates[rate_idx].plcp;
-
-		IWL_DEBUG_POWER(priv, "ch %d:%d rf %d dsp %3d rate code 0x%02x\n",
-				le16_to_cpu(txpower.channel),
-				txpower.band,
-				txpower.power[i].tpc.tx_gain,
-				txpower.power[i].tpc.dsp_atten,
-				txpower.power[i].rate);
-	}
-	/* Fill CCK rates */
-	for (rate_idx = IWL_FIRST_CCK_RATE;
-	     rate_idx <= IWL_LAST_CCK_RATE; rate_idx++, i++) {
-		txpower.power[i].tpc = ch_info->power_info[i].tpc;
-		txpower.power[i].rate = iwl3945_rates[rate_idx].plcp;
-
-		IWL_DEBUG_POWER(priv, "ch %d:%d rf %d dsp %3d rate code 0x%02x\n",
-				le16_to_cpu(txpower.channel),
-				txpower.band,
-				txpower.power[i].tpc.tx_gain,
-				txpower.power[i].tpc.dsp_atten,
-				txpower.power[i].rate);
-	}
-
-	return iwl_legacy_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD,
-				sizeof(struct iwl3945_txpowertable_cmd),
-				&txpower);
-
-}
-
-/**
- * iwl3945_hw_reg_set_new_power - Configures power tables at new levels
- * @ch_info: Channel to update.  Uses power_info.requested_power.
- *
- * Replace requested_power and base_power_index ch_info fields for
- * one channel.
- *
- * Called if user or spectrum management changes power preferences.
- * Takes into account h/w and modulation limitations (clip power).
- *
- * This does *not* send anything to NIC, just sets up ch_info for one channel.
- *
- * NOTE: reg_compensate_for_temperature_dif() *must* be run after this to
- *	 properly fill out the scan powers, and actual h/w gain settings,
- *	 and send changes to NIC
- */
-static int iwl3945_hw_reg_set_new_power(struct iwl_priv *priv,
-			     struct iwl_channel_info *ch_info)
-{
-	struct iwl3945_channel_power_info *power_info;
-	int power_changed = 0;
-	int i;
-	const s8 *clip_pwrs;
-	int power;
-
-	/* Get this chnlgrp's rate-to-max/clip-powers table */
-	clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers;
-
-	/* Get this channel's rate-to-current-power settings table */
-	power_info = ch_info->power_info;
-
-	/* update OFDM Txpower settings */
-	for (i = IWL_RATE_6M_INDEX_TABLE; i <= IWL_RATE_54M_INDEX_TABLE;
-	     i++, ++power_info) {
-		int delta_idx;
-
-		/* limit new power to be no more than h/w capability */
-		power = min(ch_info->curr_txpow, clip_pwrs[i]);
-		if (power == power_info->requested_power)
-			continue;
-
-		/* find difference between old and new requested powers,
-		 *    update base (non-temp-compensated) power index */
-		delta_idx = (power - power_info->requested_power) * 2;
-		power_info->base_power_index -= delta_idx;
-
-		/* save new requested power value */
-		power_info->requested_power = power;
-
-		power_changed = 1;
-	}
-
-	/* update CCK Txpower settings, based on OFDM 12M setting ...
-	 *    ... all CCK power settings for a given channel are the *same*. */
-	if (power_changed) {
-		power =
-		    ch_info->power_info[IWL_RATE_12M_INDEX_TABLE].
-		    requested_power + IWL_CCK_FROM_OFDM_POWER_DIFF;
-
-		/* do all CCK rates' iwl3945_channel_power_info structures */
-		for (i = IWL_RATE_1M_INDEX_TABLE; i <= IWL_RATE_11M_INDEX_TABLE; i++) {
-			power_info->requested_power = power;
-			power_info->base_power_index =
-			    ch_info->power_info[IWL_RATE_12M_INDEX_TABLE].
-			    base_power_index + IWL_CCK_FROM_OFDM_INDEX_DIFF;
-			++power_info;
-		}
-	}
-
-	return 0;
-}
-
-/**
- * iwl3945_hw_reg_get_ch_txpower_limit - returns new power limit for channel
- *
- * NOTE: Returned power limit may be less (but not more) than requested,
- *	 based strictly on regulatory (eeprom and spectrum mgt) limitations
- *	 (no consideration for h/w clipping limitations).
- */
-static int iwl3945_hw_reg_get_ch_txpower_limit(struct iwl_channel_info *ch_info)
-{
-	s8 max_power;
-
-#if 0
-	/* if we're using TGd limits, use lower of TGd or EEPROM */
-	if (ch_info->tgd_data.max_power != 0)
-		max_power = min(ch_info->tgd_data.max_power,
-				ch_info->eeprom.max_power_avg);
-
-	/* else just use EEPROM limits */
-	else
-#endif
-		max_power = ch_info->eeprom.max_power_avg;
-
-	return min(max_power, ch_info->max_power_avg);
-}
-
-/**
- * iwl3945_hw_reg_comp_txpower_temp - Compensate for temperature
- *
- * Compensate txpower settings of *all* channels for temperature.
- * This only accounts for the difference between current temperature
- *   and the factory calibration temperatures, and bases the new settings
- *   on the channel's base_power_index.
- *
- * If RxOn is "associated", this sends the new Txpower to NIC!
- */
-static int iwl3945_hw_reg_comp_txpower_temp(struct iwl_priv *priv)
-{
-	struct iwl_channel_info *ch_info = NULL;
-	struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
-	int delta_index;
-	const s8 *clip_pwrs; /* array of h/w max power levels for each rate */
-	u8 a_band;
-	u8 rate_index;
-	u8 scan_tbl_index;
-	u8 i;
-	int ref_temp;
-	int temperature = priv->temperature;
-
-	if (priv->disable_tx_power_cal ||
-	    test_bit(STATUS_SCANNING, &priv->status)) {
-		/* do not perform tx power calibration */
-		return 0;
-	}
-	/* set up new Tx power info for each and every channel, 2.4 and 5.x */
-	for (i = 0; i < priv->channel_count; i++) {
-		ch_info = &priv->channel_info[i];
-		a_band = iwl_legacy_is_channel_a_band(ch_info);
-
-		/* Get this chnlgrp's factory calibration temperature */
-		ref_temp = (s16)eeprom->groups[ch_info->group_index].
-		    temperature;
-
-		/* get power index adjustment based on current and factory
-		 * temps */
-		delta_index = iwl3945_hw_reg_adjust_power_by_temp(temperature,
-							      ref_temp);
-
-		/* set tx power value for all rates, OFDM and CCK */
-		for (rate_index = 0; rate_index < IWL_RATE_COUNT_3945;
-		     rate_index++) {
-			int power_idx =
-			    ch_info->power_info[rate_index].base_power_index;
-
-			/* temperature compensate */
-			power_idx += delta_index;
-
-			/* stay within table range */
-			power_idx = iwl3945_hw_reg_fix_power_index(power_idx);
-			ch_info->power_info[rate_index].
-			    power_table_index = (u8) power_idx;
-			ch_info->power_info[rate_index].tpc =
-			    power_gain_table[a_band][power_idx];
-		}
-
-		/* Get this chnlgrp's rate-to-max/clip-powers table */
-		clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers;
-
-		/* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */
-		for (scan_tbl_index = 0;
-		     scan_tbl_index < IWL_NUM_SCAN_RATES; scan_tbl_index++) {
-			s32 actual_index = (scan_tbl_index == 0) ?
-			    IWL_RATE_1M_INDEX_TABLE : IWL_RATE_6M_INDEX_TABLE;
-			iwl3945_hw_reg_set_scan_power(priv, scan_tbl_index,
-					   actual_index, clip_pwrs,
-					   ch_info, a_band);
-		}
-	}
-
-	/* send Txpower command for current channel to ucode */
-	return priv->cfg->ops->lib->send_tx_power(priv);
-}
-
-int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power)
-{
-	struct iwl_channel_info *ch_info;
-	s8 max_power;
-	u8 a_band;
-	u8 i;
-
-	if (priv->tx_power_user_lmt == power) {
-		IWL_DEBUG_POWER(priv, "Requested Tx power same as current "
-				"limit: %ddBm.\n", power);
-		return 0;
-	}
-
-	IWL_DEBUG_POWER(priv, "Setting upper limit clamp to %ddBm.\n", power);
-	priv->tx_power_user_lmt = power;
-
-	/* set up new Tx powers for each and every channel, 2.4 and 5.x */
-
-	for (i = 0; i < priv->channel_count; i++) {
-		ch_info = &priv->channel_info[i];
-		a_band = iwl_legacy_is_channel_a_band(ch_info);
-
-		/* find minimum power of all user and regulatory constraints
-		 *    (does not consider h/w clipping limitations) */
-		max_power = iwl3945_hw_reg_get_ch_txpower_limit(ch_info);
-		max_power = min(power, max_power);
-		if (max_power != ch_info->curr_txpow) {
-			ch_info->curr_txpow = max_power;
-
-			/* this considers the h/w clipping limitations */
-			iwl3945_hw_reg_set_new_power(priv, ch_info);
-		}
-	}
-
-	/* update txpower settings for all channels,
-	 *   send to NIC if associated. */
-	iwl3945_is_temp_calib_needed(priv);
-	iwl3945_hw_reg_comp_txpower_temp(priv);
-
-	return 0;
-}
-
-static int iwl3945_send_rxon_assoc(struct iwl_priv *priv,
-				   struct iwl_rxon_context *ctx)
-{
-	int rc = 0;
-	struct iwl_rx_packet *pkt;
-	struct iwl3945_rxon_assoc_cmd rxon_assoc;
-	struct iwl_host_cmd cmd = {
-		.id = REPLY_RXON_ASSOC,
-		.len = sizeof(rxon_assoc),
-		.flags = CMD_WANT_SKB,
-		.data = &rxon_assoc,
-	};
-	const struct iwl_legacy_rxon_cmd *rxon1 = &ctx->staging;
-	const struct iwl_legacy_rxon_cmd *rxon2 = &ctx->active;
-
-	if ((rxon1->flags == rxon2->flags) &&
-	    (rxon1->filter_flags == rxon2->filter_flags) &&
-	    (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
-	    (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
-		IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC.  Not resending.\n");
-		return 0;
-	}
-
-	rxon_assoc.flags = ctx->staging.flags;
-	rxon_assoc.filter_flags = ctx->staging.filter_flags;
-	rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
-	rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
-	rxon_assoc.reserved = 0;
-
-	rc = iwl_legacy_send_cmd_sync(priv, &cmd);
-	if (rc)
-		return rc;
-
-	pkt = (struct iwl_rx_packet *)cmd.reply_page;
-	if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
-		IWL_ERR(priv, "Bad return from REPLY_RXON_ASSOC command\n");
-		rc = -EIO;
-	}
-
-	iwl_legacy_free_pages(priv, cmd.reply_page);
-
-	return rc;
-}
-
-/**
- * iwl3945_commit_rxon - commit staging_rxon to hardware
- *
- * The RXON command in staging_rxon is committed to the hardware and
- * the active_rxon structure is updated with the new data.  This
- * function correctly transitions out of the RXON_ASSOC_MSK state if
- * a HW tune is required based on the RXON structure changes.
- */
-int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
-	/* cast away the const for active_rxon in this function */
-	struct iwl3945_rxon_cmd *active_rxon = (void *)&ctx->active;
-	struct iwl3945_rxon_cmd *staging_rxon = (void *)&ctx->staging;
-	int rc = 0;
-	bool new_assoc = !!(staging_rxon->filter_flags & RXON_FILTER_ASSOC_MSK);
-
-	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-		return -EINVAL;
-
-	if (!iwl_legacy_is_alive(priv))
-		return -1;
-
-	/* always get timestamp with Rx frame */
-	staging_rxon->flags |= RXON_FLG_TSF2HOST_MSK;
-
-	/* select antenna */
-	staging_rxon->flags &=
-	    ~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK);
-	staging_rxon->flags |= iwl3945_get_antenna_flags(priv);
-
-	rc = iwl_legacy_check_rxon_cmd(priv, ctx);
-	if (rc) {
-		IWL_ERR(priv, "Invalid RXON configuration.  Not committing.\n");
-		return -EINVAL;
-	}
-
-	/* If we don't need to send a full RXON, we can use
-	 * iwl3945_rxon_assoc_cmd which is used to reconfigure filter
-	 * and other flags for the current radio configuration. */
-	if (!iwl_legacy_full_rxon_required(priv,
-			&priv->contexts[IWL_RXON_CTX_BSS])) {
-		rc = iwl_legacy_send_rxon_assoc(priv,
-					 &priv->contexts[IWL_RXON_CTX_BSS]);
-		if (rc) {
-			IWL_ERR(priv, "Error setting RXON_ASSOC "
-				  "configuration (%d).\n", rc);
-			return rc;
-		}
-
-		memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
-		/*
-		 * We do not commit tx power settings while channel changing,
-		 * do it now if tx power changed.
-		 */
-		iwl_legacy_set_tx_power(priv, priv->tx_power_next, false);
-		return 0;
-	}
-
-	/* If we are currently associated and the new config requires
-	 * an RXON_ASSOC and the new config wants the associated mask enabled,
-	 * we must clear the associated from the active configuration
-	 * before we apply the new config */
-	if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS) && new_assoc) {
-		IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n");
-		active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
-
-		/*
-		 * reserved4 and 5 could have been filled by the iwlcore code.
-		 * Let's clear them before pushing to the 3945.
-		 */
-		active_rxon->reserved4 = 0;
-		active_rxon->reserved5 = 0;
-		rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RXON,
-				      sizeof(struct iwl3945_rxon_cmd),
-				      &priv->contexts[IWL_RXON_CTX_BSS].active);
-
-		/* If the mask clearing failed then we set
-		 * active_rxon back to what it was previously */
-		if (rc) {
-			active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
-			IWL_ERR(priv, "Error clearing ASSOC_MSK on current "
-				  "configuration (%d).\n", rc);
-			return rc;
-		}
-		iwl_legacy_clear_ucode_stations(priv,
-					 &priv->contexts[IWL_RXON_CTX_BSS]);
-		iwl_legacy_restore_stations(priv,
-					 &priv->contexts[IWL_RXON_CTX_BSS]);
-	}
-
-	IWL_DEBUG_INFO(priv, "Sending RXON\n"
-		       "* with%s RXON_FILTER_ASSOC_MSK\n"
-		       "* channel = %d\n"
-		       "* bssid = %pM\n",
-		       (new_assoc ? "" : "out"),
-		       le16_to_cpu(staging_rxon->channel),
-		       staging_rxon->bssid_addr);
-
-	/*
-	 * reserved4 and 5 could have been filled by the iwlcore code.
-	 * Let's clear them before pushing to the 3945.
-	 */
-	staging_rxon->reserved4 = 0;
-	staging_rxon->reserved5 = 0;
-
-	iwl_legacy_set_rxon_hwcrypto(priv, ctx, !iwl3945_mod_params.sw_crypto);
-
-	/* Apply the new configuration */
-	rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RXON,
-			      sizeof(struct iwl3945_rxon_cmd),
-			      staging_rxon);
-	if (rc) {
-		IWL_ERR(priv, "Error setting new configuration (%d).\n", rc);
-		return rc;
-	}
-
-	memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
-
-	if (!new_assoc) {
-		iwl_legacy_clear_ucode_stations(priv,
-					 &priv->contexts[IWL_RXON_CTX_BSS]);
-		iwl_legacy_restore_stations(priv,
-					&priv->contexts[IWL_RXON_CTX_BSS]);
-	}
-
-	/* If we issue a new RXON command which required a tune then we must
-	 * send a new TXPOWER command or we won't be able to Tx any frames */
-	rc = iwl_legacy_set_tx_power(priv, priv->tx_power_next, true);
-	if (rc) {
-		IWL_ERR(priv, "Error setting Tx power (%d).\n", rc);
-		return rc;
-	}
-
-	/* Init the hardware's rate fallback order based on the band */
-	rc = iwl3945_init_hw_rate_table(priv);
-	if (rc) {
-		IWL_ERR(priv, "Error setting HW rate table: %02X\n", rc);
-		return -EIO;
-	}
-
-	return 0;
-}
-
-/**
- * iwl3945_reg_txpower_periodic -  called when time to check our temperature.
- *
- * -- reset periodic timer
- * -- see if temp has changed enough to warrant re-calibration ... if so:
- *     -- correct coeffs for temp (can reset temp timer)
- *     -- save this temp as "last",
- *     -- send new set of gain settings to NIC
- * NOTE:  This should continue working, even when we're not associated,
- *   so we can keep our internal table of scan powers current. */
-void iwl3945_reg_txpower_periodic(struct iwl_priv *priv)
-{
-	/* This will kick in the "brute force"
-	 * iwl3945_hw_reg_comp_txpower_temp() below */
-	if (!iwl3945_is_temp_calib_needed(priv))
-		goto reschedule;
-
-	/* Set up a new set of temp-adjusted TxPowers, send to NIC.
-	 * This is based *only* on current temperature,
-	 * ignoring any previous power measurements */
-	iwl3945_hw_reg_comp_txpower_temp(priv);
-
- reschedule:
-	queue_delayed_work(priv->workqueue,
-			   &priv->_3945.thermal_periodic, REG_RECALIB_PERIOD * HZ);
-}
-
-static void iwl3945_bg_reg_txpower_periodic(struct work_struct *work)
-{
-	struct iwl_priv *priv = container_of(work, struct iwl_priv,
-					     _3945.thermal_periodic.work);
-
-	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-		return;
-
-	mutex_lock(&priv->mutex);
-	iwl3945_reg_txpower_periodic(priv);
-	mutex_unlock(&priv->mutex);
-}
-
-/**
- * iwl3945_hw_reg_get_ch_grp_index - find the channel-group index (0-4)
- * 				   for the channel.
- *
- * This function is used when initializing channel-info structs.
- *
- * NOTE: These channel groups do *NOT* match the bands above!
- *	 These channel groups are based on factory-tested channels;
- *	 on A-band, EEPROM's "group frequency" entries represent the top
- *	 channel in each group 1-4.  Group 5 All B/G channels are in group 0.
- */
-static u16 iwl3945_hw_reg_get_ch_grp_index(struct iwl_priv *priv,
-				       const struct iwl_channel_info *ch_info)
-{
-	struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
-	struct iwl3945_eeprom_txpower_group *ch_grp = &eeprom->groups[0];
-	u8 group;
-	u16 group_index = 0;	/* based on factory calib frequencies */
-	u8 grp_channel;
-
-	/* Find the group index for the channel ... don't use index 1(?) */
-	if (iwl_legacy_is_channel_a_band(ch_info)) {
-		for (group = 1; group < 5; group++) {
-			grp_channel = ch_grp[group].group_channel;
-			if (ch_info->channel <= grp_channel) {
-				group_index = group;
-				break;
-			}
-		}
-		/* group 4 has a few channels *above* its factory cal freq */
-		if (group == 5)
-			group_index = 4;
-	} else
-		group_index = 0;	/* 2.4 GHz, group 0 */
-
-	IWL_DEBUG_POWER(priv, "Chnl %d mapped to grp %d\n", ch_info->channel,
-			group_index);
-	return group_index;
-}
-
-/**
- * iwl3945_hw_reg_get_matched_power_index - Interpolate to get nominal index
- *
- * Interpolate to get nominal (i.e. at factory calibration temperature) index
- *   into radio/DSP gain settings table for requested power.
- */
-static int iwl3945_hw_reg_get_matched_power_index(struct iwl_priv *priv,
-				       s8 requested_power,
-				       s32 setting_index, s32 *new_index)
-{
-	const struct iwl3945_eeprom_txpower_group *chnl_grp = NULL;
-	struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
-	s32 index0, index1;
-	s32 power = 2 * requested_power;
-	s32 i;
-	const struct iwl3945_eeprom_txpower_sample *samples;
-	s32 gains0, gains1;
-	s32 res;
-	s32 denominator;
-
-	chnl_grp = &eeprom->groups[setting_index];
-	samples = chnl_grp->samples;
-	for (i = 0; i < 5; i++) {
-		if (power == samples[i].power) {
-			*new_index = samples[i].gain_index;
-			return 0;
-		}
-	}
-
-	if (power > samples[1].power) {
-		index0 = 0;
-		index1 = 1;
-	} else if (power > samples[2].power) {
-		index0 = 1;
-		index1 = 2;
-	} else if (power > samples[3].power) {
-		index0 = 2;
-		index1 = 3;
-	} else {
-		index0 = 3;
-		index1 = 4;
-	}
-
-	denominator = (s32) samples[index1].power - (s32) samples[index0].power;
-	if (denominator == 0)
-		return -EINVAL;
-	gains0 = (s32) samples[index0].gain_index * (1 << 19);
-	gains1 = (s32) samples[index1].gain_index * (1 << 19);
-	res = gains0 + (gains1 - gains0) *
-	    ((s32) power - (s32) samples[index0].power) / denominator +
-	    (1 << 18);
-	*new_index = res >> 19;
-	return 0;
-}
-
-static void iwl3945_hw_reg_init_channel_groups(struct iwl_priv *priv)
-{
-	u32 i;
-	s32 rate_index;
-	struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
-	const struct iwl3945_eeprom_txpower_group *group;
-
-	IWL_DEBUG_POWER(priv, "Initializing factory calib info from EEPROM\n");
-
-	for (i = 0; i < IWL_NUM_TX_CALIB_GROUPS; i++) {
-		s8 *clip_pwrs;	/* table of power levels for each rate */
-		s8 satur_pwr;	/* saturation power for each chnl group */
-		group = &eeprom->groups[i];
-
-		/* sanity check on factory saturation power value */
-		if (group->saturation_power < 40) {
-			IWL_WARN(priv, "Error: saturation power is %d, "
-				    "less than minimum expected 40\n",
-				    group->saturation_power);
-			return;
-		}
-
-		/*
-		 * Derive requested power levels for each rate, based on
-		 *   hardware capabilities (saturation power for band).
-		 * Basic value is 3dB down from saturation, with further
-		 *   power reductions for highest 3 data rates.  These
-		 *   backoffs provide headroom for high rate modulation
-		 *   power peaks, without too much distortion (clipping).
-		 */
-		/* we'll fill in this array with h/w max power levels */
-		clip_pwrs = (s8 *) priv->_3945.clip_groups[i].clip_powers;
-
-		/* divide factory saturation power by 2 to find -3dB level */
-		satur_pwr = (s8) (group->saturation_power >> 1);
-
-		/* fill in channel group's nominal powers for each rate */
-		for (rate_index = 0;
-		     rate_index < IWL_RATE_COUNT_3945; rate_index++, clip_pwrs++) {
-			switch (rate_index) {
-			case IWL_RATE_36M_INDEX_TABLE:
-				if (i == 0)	/* B/G */
-					*clip_pwrs = satur_pwr;
-				else	/* A */
-					*clip_pwrs = satur_pwr - 5;
-				break;
-			case IWL_RATE_48M_INDEX_TABLE:
-				if (i == 0)
-					*clip_pwrs = satur_pwr - 7;
-				else
-					*clip_pwrs = satur_pwr - 10;
-				break;
-			case IWL_RATE_54M_INDEX_TABLE:
-				if (i == 0)
-					*clip_pwrs = satur_pwr - 9;
-				else
-					*clip_pwrs = satur_pwr - 12;
-				break;
-			default:
-				*clip_pwrs = satur_pwr;
-				break;
-			}
-		}
-	}
-}
-
-/**
- * iwl3945_txpower_set_from_eeprom - Set channel power info based on EEPROM
- *
- * Second pass (during init) to set up priv->channel_info
- *
- * Set up Tx-power settings in our channel info database for each VALID
- * (for this geo/SKU) channel, at all Tx data rates, based on eeprom values
- * and current temperature.
- *
- * Since this is based on current temperature (at init time), these values may
- * not be valid for very long, but it gives us a starting/default point,
- * and allows us to active (i.e. using Tx) scan.
- *
- * This does *not* write values to NIC, just sets up our internal table.
- */
-int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv)
-{
-	struct iwl_channel_info *ch_info = NULL;
-	struct iwl3945_channel_power_info *pwr_info;
-	struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
-	int delta_index;
-	u8 rate_index;
-	u8 scan_tbl_index;
-	const s8 *clip_pwrs;	/* array of power levels for each rate */
-	u8 gain, dsp_atten;
-	s8 power;
-	u8 pwr_index, base_pwr_index, a_band;
-	u8 i;
-	int temperature;
-
-	/* save temperature reference,
-	 *   so we can determine next time to calibrate */
-	temperature = iwl3945_hw_reg_txpower_get_temperature(priv);
-	priv->last_temperature = temperature;
-
-	iwl3945_hw_reg_init_channel_groups(priv);
-
-	/* initialize Tx power info for each and every channel, 2.4 and 5.x */
-	for (i = 0, ch_info = priv->channel_info; i < priv->channel_count;
-	     i++, ch_info++) {
-		a_band = iwl_legacy_is_channel_a_band(ch_info);
-		if (!iwl_legacy_is_channel_valid(ch_info))
-			continue;
-
-		/* find this channel's channel group (*not* "band") index */
-		ch_info->group_index =
-			iwl3945_hw_reg_get_ch_grp_index(priv, ch_info);
-
-		/* Get this chnlgrp's rate->max/clip-powers table */
-		clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers;
-
-		/* calculate power index *adjustment* value according to
-		 *  diff between current temperature and factory temperature */
-		delta_index = iwl3945_hw_reg_adjust_power_by_temp(temperature,
-				eeprom->groups[ch_info->group_index].
-				temperature);
-
-		IWL_DEBUG_POWER(priv, "Delta index for channel %d: %d [%d]\n",
-				ch_info->channel, delta_index, temperature +
-				IWL_TEMP_CONVERT);
-
-		/* set tx power value for all OFDM rates */
-		for (rate_index = 0; rate_index < IWL_OFDM_RATES;
-		     rate_index++) {
-			s32 uninitialized_var(power_idx);
-			int rc;
-
-			/* use channel group's clip-power table,
-			 *   but don't exceed channel's max power */
-			s8 pwr = min(ch_info->max_power_avg,
-				     clip_pwrs[rate_index]);
-
-			pwr_info = &ch_info->power_info[rate_index];
-
-			/* get base (i.e. at factory-measured temperature)
-			 *    power table index for this rate's power */
-			rc = iwl3945_hw_reg_get_matched_power_index(priv, pwr,
-							 ch_info->group_index,
-							 &power_idx);
-			if (rc) {
-				IWL_ERR(priv, "Invalid power index\n");
-				return rc;
-			}
-			pwr_info->base_power_index = (u8) power_idx;
-
-			/* temperature compensate */
-			power_idx += delta_index;
-
-			/* stay within range of gain table */
-			power_idx = iwl3945_hw_reg_fix_power_index(power_idx);
-
-			/* fill 1 OFDM rate's iwl3945_channel_power_info struct */
-			pwr_info->requested_power = pwr;
-			pwr_info->power_table_index = (u8) power_idx;
-			pwr_info->tpc.tx_gain =
-			    power_gain_table[a_band][power_idx].tx_gain;
-			pwr_info->tpc.dsp_atten =
-			    power_gain_table[a_band][power_idx].dsp_atten;
-		}
-
-		/* set tx power for CCK rates, based on OFDM 12 Mbit settings*/
-		pwr_info = &ch_info->power_info[IWL_RATE_12M_INDEX_TABLE];
-		power = pwr_info->requested_power +
-			IWL_CCK_FROM_OFDM_POWER_DIFF;
-		pwr_index = pwr_info->power_table_index +
-			IWL_CCK_FROM_OFDM_INDEX_DIFF;
-		base_pwr_index = pwr_info->base_power_index +
-			IWL_CCK_FROM_OFDM_INDEX_DIFF;
-
-		/* stay within table range */
-		pwr_index = iwl3945_hw_reg_fix_power_index(pwr_index);
-		gain = power_gain_table[a_band][pwr_index].tx_gain;
-		dsp_atten = power_gain_table[a_band][pwr_index].dsp_atten;
-
-		/* fill each CCK rate's iwl3945_channel_power_info structure
-		 * NOTE:  All CCK-rate Txpwrs are the same for a given chnl!
-		 * NOTE:  CCK rates start at end of OFDM rates! */
-		for (rate_index = 0;
-		     rate_index < IWL_CCK_RATES; rate_index++) {
-			pwr_info = &ch_info->power_info[rate_index+IWL_OFDM_RATES];
-			pwr_info->requested_power = power;
-			pwr_info->power_table_index = pwr_index;
-			pwr_info->base_power_index = base_pwr_index;
-			pwr_info->tpc.tx_gain = gain;
-			pwr_info->tpc.dsp_atten = dsp_atten;
-		}
-
-		/* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */
-		for (scan_tbl_index = 0;
-		     scan_tbl_index < IWL_NUM_SCAN_RATES; scan_tbl_index++) {
-			s32 actual_index = (scan_tbl_index == 0) ?
-				IWL_RATE_1M_INDEX_TABLE : IWL_RATE_6M_INDEX_TABLE;
-			iwl3945_hw_reg_set_scan_power(priv, scan_tbl_index,
-				actual_index, clip_pwrs, ch_info, a_band);
-		}
-	}
-
-	return 0;
-}
-
-int iwl3945_hw_rxq_stop(struct iwl_priv *priv)
-{
-	int rc;
-
-	iwl_legacy_write_direct32(priv, FH39_RCSR_CONFIG(0), 0);
-	rc = iwl_poll_direct_bit(priv, FH39_RSSR_STATUS,
-			FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
-	if (rc < 0)
-		IWL_ERR(priv, "Can't stop Rx DMA.\n");
-
-	return 0;
-}
-
-int iwl3945_hw_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq)
-{
-	int txq_id = txq->q.id;
-
-	struct iwl3945_shared *shared_data = priv->_3945.shared_virt;
-
-	shared_data->tx_base_ptr[txq_id] = cpu_to_le32((u32)txq->q.dma_addr);
-
-	iwl_legacy_write_direct32(priv, FH39_CBCC_CTRL(txq_id), 0);
-	iwl_legacy_write_direct32(priv, FH39_CBCC_BASE(txq_id), 0);
-
-	iwl_legacy_write_direct32(priv, FH39_TCSR_CONFIG(txq_id),
-		FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT |
-		FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF |
-		FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD |
-		FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL |
-		FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE);
-
-	/* fake read to flush all prev. writes */
-	iwl_read32(priv, FH39_TSSR_CBB_BASE);
-
-	return 0;
-}
-
-/*
- * HCMD utils
- */
-static u16 iwl3945_get_hcmd_size(u8 cmd_id, u16 len)
-{
-	switch (cmd_id) {
-	case REPLY_RXON:
-		return sizeof(struct iwl3945_rxon_cmd);
-	case POWER_TABLE_CMD:
-		return sizeof(struct iwl3945_powertable_cmd);
-	default:
-		return len;
-	}
-}
-
-
-static u16 iwl3945_build_addsta_hcmd(const struct iwl_legacy_addsta_cmd *cmd,
-								u8 *data)
-{
-	struct iwl3945_addsta_cmd *addsta = (struct iwl3945_addsta_cmd *)data;
-	addsta->mode = cmd->mode;
-	memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify));
-	memcpy(&addsta->key, &cmd->key, sizeof(struct iwl4965_keyinfo));
-	addsta->station_flags = cmd->station_flags;
-	addsta->station_flags_msk = cmd->station_flags_msk;
-	addsta->tid_disable_tx = cpu_to_le16(0);
-	addsta->rate_n_flags = cmd->rate_n_flags;
-	addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
-	addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
-	addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
-
-	return (u16)sizeof(struct iwl3945_addsta_cmd);
-}
-
-static int iwl3945_add_bssid_station(struct iwl_priv *priv,
-				     const u8 *addr, u8 *sta_id_r)
-{
-	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-	int ret;
-	u8 sta_id;
-	unsigned long flags;
-
-	if (sta_id_r)
-		*sta_id_r = IWL_INVALID_STATION;
-
-	ret = iwl_legacy_add_station_common(priv, ctx, addr, 0, NULL, &sta_id);
-	if (ret) {
-		IWL_ERR(priv, "Unable to add station %pM\n", addr);
-		return ret;
-	}
-
-	if (sta_id_r)
-		*sta_id_r = sta_id;
-
-	spin_lock_irqsave(&priv->sta_lock, flags);
-	priv->stations[sta_id].used |= IWL_STA_LOCAL;
-	spin_unlock_irqrestore(&priv->sta_lock, flags);
-
-	return 0;
-}
-static int iwl3945_manage_ibss_station(struct iwl_priv *priv,
-				       struct ieee80211_vif *vif, bool add)
-{
-	struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
-	int ret;
-
-	if (add) {
-		ret = iwl3945_add_bssid_station(priv, vif->bss_conf.bssid,
-						&vif_priv->ibss_bssid_sta_id);
-		if (ret)
-			return ret;
-
-		iwl3945_sync_sta(priv, vif_priv->ibss_bssid_sta_id,
-				 (priv->band == IEEE80211_BAND_5GHZ) ?
-				 IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP);
-		iwl3945_rate_scale_init(priv->hw, vif_priv->ibss_bssid_sta_id);
-
-		return 0;
-	}
-
-	return iwl_legacy_remove_station(priv, vif_priv->ibss_bssid_sta_id,
-				  vif->bss_conf.bssid);
-}
-
-/**
- * iwl3945_init_hw_rate_table - Initialize the hardware rate fallback table
- */
-int iwl3945_init_hw_rate_table(struct iwl_priv *priv)
-{
-	int rc, i, index, prev_index;
-	struct iwl3945_rate_scaling_cmd rate_cmd = {
-		.reserved = {0, 0, 0},
-	};
-	struct iwl3945_rate_scaling_info *table = rate_cmd.table;
-
-	for (i = 0; i < ARRAY_SIZE(iwl3945_rates); i++) {
-		index = iwl3945_rates[i].table_rs_index;
-
-		table[index].rate_n_flags =
-			iwl3945_hw_set_rate_n_flags(iwl3945_rates[i].plcp, 0);
-		table[index].try_cnt = priv->retry_rate;
-		prev_index = iwl3945_get_prev_ieee_rate(i);
-		table[index].next_rate_index =
-				iwl3945_rates[prev_index].table_rs_index;
-	}
-
-	switch (priv->band) {
-	case IEEE80211_BAND_5GHZ:
-		IWL_DEBUG_RATE(priv, "Select A mode rate scale\n");
-		/* If one of the following CCK rates is used,
-		 * have it fall back to the 6M OFDM rate */
-		for (i = IWL_RATE_1M_INDEX_TABLE;
-			i <= IWL_RATE_11M_INDEX_TABLE; i++)
-			table[i].next_rate_index =
-			  iwl3945_rates[IWL_FIRST_OFDM_RATE].table_rs_index;
-
-		/* Don't fall back to CCK rates */
-		table[IWL_RATE_12M_INDEX_TABLE].next_rate_index =
-						IWL_RATE_9M_INDEX_TABLE;
-
-		/* Don't drop out of OFDM rates */
-		table[IWL_RATE_6M_INDEX_TABLE].next_rate_index =
-		    iwl3945_rates[IWL_FIRST_OFDM_RATE].table_rs_index;
-		break;
-
-	case IEEE80211_BAND_2GHZ:
-		IWL_DEBUG_RATE(priv, "Select B/G mode rate scale\n");
-		/* If an OFDM rate is used, have it fall back to the
-		 * 1M CCK rates */
-
-		if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) &&
-		    iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
-
-			index = IWL_FIRST_CCK_RATE;
-			for (i = IWL_RATE_6M_INDEX_TABLE;
-			     i <= IWL_RATE_54M_INDEX_TABLE; i++)
-				table[i].next_rate_index =
-					iwl3945_rates[index].table_rs_index;
-
-			index = IWL_RATE_11M_INDEX_TABLE;
-			/* CCK shouldn't fall back to OFDM... */
-			table[index].next_rate_index = IWL_RATE_5M_INDEX_TABLE;
-		}
-		break;
-
-	default:
-		WARN_ON(1);
-		break;
-	}
-
-	/* Update the rate scaling for control frame Tx */
-	rate_cmd.table_id = 0;
-	rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd),
-			      &rate_cmd);
-	if (rc)
-		return rc;
-
-	/* Update the rate scaling for data frame Tx */
-	rate_cmd.table_id = 1;
-	return iwl_legacy_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd),
-				&rate_cmd);
-}
-
-/* Called when initializing driver */
-int iwl3945_hw_set_hw_params(struct iwl_priv *priv)
-{
-	memset((void *)&priv->hw_params, 0,
-	       sizeof(struct iwl_hw_params));
-
-	priv->_3945.shared_virt =
-		dma_alloc_coherent(&priv->pci_dev->dev,
-				   sizeof(struct iwl3945_shared),
-				   &priv->_3945.shared_phys, GFP_KERNEL);
-	if (!priv->_3945.shared_virt) {
-		IWL_ERR(priv, "failed to allocate pci memory\n");
-		return -ENOMEM;
-	}
-
-	/* Assign number of Usable TX queues */
-	priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
-
-	priv->hw_params.tfd_size = sizeof(struct iwl3945_tfd);
-	priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_3K);
-	priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
-	priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
-	priv->hw_params.max_stations = IWL3945_STATION_COUNT;
-	priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWL3945_BROADCAST_ID;
-
-	priv->sta_key_max_num = STA_KEY_MAX_NUM;
-
-	priv->hw_params.rx_wrt_ptr_reg = FH39_RSCSR_CHNL0_WPTR;
-	priv->hw_params.max_beacon_itrvl = IWL39_MAX_UCODE_BEACON_INTERVAL;
-	priv->hw_params.beacon_time_tsf_bits = IWL3945_EXT_BEACON_TIME_POS;
-
-	return 0;
-}
-
-unsigned int iwl3945_hw_get_beacon_cmd(struct iwl_priv *priv,
-			  struct iwl3945_frame *frame, u8 rate)
-{
-	struct iwl3945_tx_beacon_cmd *tx_beacon_cmd;
-	unsigned int frame_size;
-
-	tx_beacon_cmd = (struct iwl3945_tx_beacon_cmd *)&frame->u;
-	memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
-
-	tx_beacon_cmd->tx.sta_id =
-		priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id;
-	tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
-
-	frame_size = iwl3945_fill_beacon_frame(priv,
-				tx_beacon_cmd->frame,
-				sizeof(frame->u) - sizeof(*tx_beacon_cmd));
-
-	BUG_ON(frame_size > MAX_MPDU_SIZE);
-	tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
-
-	tx_beacon_cmd->tx.rate = rate;
-	tx_beacon_cmd->tx.tx_flags = (TX_CMD_FLG_SEQ_CTL_MSK |
-				      TX_CMD_FLG_TSF_MSK);
-
-	/* supp_rates[0] == OFDM start at IWL_FIRST_OFDM_RATE*/
-	tx_beacon_cmd->tx.supp_rates[0] =
-		(IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
-
-	tx_beacon_cmd->tx.supp_rates[1] =
-		(IWL_CCK_BASIC_RATES_MASK & 0xF);
-
-	return sizeof(struct iwl3945_tx_beacon_cmd) + frame_size;
-}
-
-void iwl3945_hw_rx_handler_setup(struct iwl_priv *priv)
-{
-	priv->rx_handlers[REPLY_TX] = iwl3945_rx_reply_tx;
-	priv->rx_handlers[REPLY_3945_RX] = iwl3945_rx_reply_rx;
-}
-
-void iwl3945_hw_setup_deferred_work(struct iwl_priv *priv)
-{
-	INIT_DELAYED_WORK(&priv->_3945.thermal_periodic,
-			  iwl3945_bg_reg_txpower_periodic);
-}
-
-void iwl3945_hw_cancel_deferred_work(struct iwl_priv *priv)
-{
-	cancel_delayed_work(&priv->_3945.thermal_periodic);
-}
-
-/* check contents of special bootstrap uCode SRAM */
-static int iwl3945_verify_bsm(struct iwl_priv *priv)
- {
-	__le32 *image = priv->ucode_boot.v_addr;
-	u32 len = priv->ucode_boot.len;
-	u32 reg;
-	u32 val;
-
-	IWL_DEBUG_INFO(priv, "Begin verify bsm\n");
-
-	/* verify BSM SRAM contents */
-	val = iwl_legacy_read_prph(priv, BSM_WR_DWCOUNT_REG);
-	for (reg = BSM_SRAM_LOWER_BOUND;
-	     reg < BSM_SRAM_LOWER_BOUND + len;
-	     reg += sizeof(u32), image++) {
-		val = iwl_legacy_read_prph(priv, reg);
-		if (val != le32_to_cpu(*image)) {
-			IWL_ERR(priv, "BSM uCode verification failed at "
-				  "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
-				  BSM_SRAM_LOWER_BOUND,
-				  reg - BSM_SRAM_LOWER_BOUND, len,
-				  val, le32_to_cpu(*image));
-			return -EIO;
-		}
-	}
-
-	IWL_DEBUG_INFO(priv, "BSM bootstrap uCode image OK\n");
-
-	return 0;
-}
-
-
-/******************************************************************************
- *
- * EEPROM related functions
- *
- ******************************************************************************/
-
-/*
- * Clear the OWNER_MSK, to establish driver (instead of uCode running on
- * embedded controller) as EEPROM reader; each read is a series of pulses
- * to/from the EEPROM chip, not a single event, so even reads could conflict
- * if they weren't arbitrated by some ownership mechanism.  Here, the driver
- * simply claims ownership, which should be safe when this function is called
- * (i.e. before loading uCode!).
- */
-static int iwl3945_eeprom_acquire_semaphore(struct iwl_priv *priv)
-{
-	_iwl_legacy_clear_bit(priv, CSR_EEPROM_GP, CSR_EEPROM_GP_IF_OWNER_MSK);
-	return 0;
-}
-
-
-static void iwl3945_eeprom_release_semaphore(struct iwl_priv *priv)
-{
-	return;
-}
-
- /**
-  * iwl3945_load_bsm - Load bootstrap instructions
-  *
-  * BSM operation:
-  *
-  * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
-  * in special SRAM that does not power down during RFKILL.  When powering back
-  * up after power-saving sleeps (or during initial uCode load), the BSM loads
-  * the bootstrap program into the on-board processor, and starts it.
-  *
-  * The bootstrap program loads (via DMA) instructions and data for a new
-  * program from host DRAM locations indicated by the host driver in the
-  * BSM_DRAM_* registers.  Once the new program is loaded, it starts
-  * automatically.
-  *
-  * When initializing the NIC, the host driver points the BSM to the
-  * "initialize" uCode image.  This uCode sets up some internal data, then
-  * notifies host via "initialize alive" that it is complete.
-  *
-  * The host then replaces the BSM_DRAM_* pointer values to point to the
-  * normal runtime uCode instructions and a backup uCode data cache buffer
-  * (filled initially with starting data values for the on-board processor),
-  * then triggers the "initialize" uCode to load and launch the runtime uCode,
-  * which begins normal operation.
-  *
-  * When doing a power-save shutdown, runtime uCode saves data SRAM into
-  * the backup data cache in DRAM before SRAM is powered down.
-  *
-  * When powering back up, the BSM loads the bootstrap program.  This reloads
-  * the runtime uCode instructions and the backup data cache into SRAM,
-  * and re-launches the runtime uCode from where it left off.
-  */
-static int iwl3945_load_bsm(struct iwl_priv *priv)
-{
-	__le32 *image = priv->ucode_boot.v_addr;
-	u32 len = priv->ucode_boot.len;
-	dma_addr_t pinst;
-	dma_addr_t pdata;
-	u32 inst_len;
-	u32 data_len;
-	int rc;
-	int i;
-	u32 done;
-	u32 reg_offset;
-
-	IWL_DEBUG_INFO(priv, "Begin load bsm\n");
-
-	/* make sure bootstrap program is no larger than BSM's SRAM size */
-	if (len > IWL39_MAX_BSM_SIZE)
-		return -EINVAL;
-
-	/* Tell bootstrap uCode where to find the "Initialize" uCode
-	*   in host DRAM ... host DRAM physical address bits 31:0 for 3945.
-	* NOTE:  iwl3945_initialize_alive_start() will replace these values,
-	*        after the "initialize" uCode has run, to point to
-	*        runtime/protocol instructions and backup data cache. */
-	pinst = priv->ucode_init.p_addr;
-	pdata = priv->ucode_init_data.p_addr;
-	inst_len = priv->ucode_init.len;
-	data_len = priv->ucode_init_data.len;
-
-	iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
-	iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
-	iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
-	iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
-
-	/* Fill BSM memory with bootstrap instructions */
-	for (reg_offset = BSM_SRAM_LOWER_BOUND;
-	     reg_offset < BSM_SRAM_LOWER_BOUND + len;
-	     reg_offset += sizeof(u32), image++)
-		_iwl_legacy_write_prph(priv, reg_offset,
-					  le32_to_cpu(*image));
-
-	rc = iwl3945_verify_bsm(priv);
-	if (rc)
-		return rc;
-
-	/* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
-	iwl_legacy_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
-	iwl_legacy_write_prph(priv, BSM_WR_MEM_DST_REG,
-				 IWL39_RTC_INST_LOWER_BOUND);
-	iwl_legacy_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
-
-	/* Load bootstrap code into instruction SRAM now,
-	 *   to prepare to load "initialize" uCode */
-	iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG,
-		BSM_WR_CTRL_REG_BIT_START);
-
-	/* Wait for load of bootstrap uCode to finish */
-	for (i = 0; i < 100; i++) {
-		done = iwl_legacy_read_prph(priv, BSM_WR_CTRL_REG);
-		if (!(done & BSM_WR_CTRL_REG_BIT_START))
-			break;
-		udelay(10);
-	}
-	if (i < 100)
-		IWL_DEBUG_INFO(priv, "BSM write complete, poll %d iterations\n", i);
-	else {
-		IWL_ERR(priv, "BSM write did not complete!\n");
-		return -EIO;
-	}
-
-	/* Enable future boot loads whenever power management unit triggers it
-	 *   (e.g. when powering back up after power-save shutdown) */
-	iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG,
-		BSM_WR_CTRL_REG_BIT_START_EN);
-
-	return 0;
-}
-
-static struct iwl_hcmd_ops iwl3945_hcmd = {
-	.rxon_assoc = iwl3945_send_rxon_assoc,
-	.commit_rxon = iwl3945_commit_rxon,
-};
-
-static struct iwl_lib_ops iwl3945_lib = {
-	.txq_attach_buf_to_tfd = iwl3945_hw_txq_attach_buf_to_tfd,
-	.txq_free_tfd = iwl3945_hw_txq_free_tfd,
-	.txq_init = iwl3945_hw_tx_queue_init,
-	.load_ucode = iwl3945_load_bsm,
-	.dump_nic_error_log = iwl3945_dump_nic_error_log,
-	.apm_ops = {
-		.init = iwl3945_apm_init,
-		.config = iwl3945_nic_config,
-	},
-	.eeprom_ops = {
-		.regulatory_bands = {
-			EEPROM_REGULATORY_BAND_1_CHANNELS,
-			EEPROM_REGULATORY_BAND_2_CHANNELS,
-			EEPROM_REGULATORY_BAND_3_CHANNELS,
-			EEPROM_REGULATORY_BAND_4_CHANNELS,
-			EEPROM_REGULATORY_BAND_5_CHANNELS,
-			EEPROM_REGULATORY_BAND_NO_HT40,
-			EEPROM_REGULATORY_BAND_NO_HT40,
-		},
-		.acquire_semaphore = iwl3945_eeprom_acquire_semaphore,
-		.release_semaphore = iwl3945_eeprom_release_semaphore,
-	},
-	.send_tx_power	= iwl3945_send_tx_power,
-	.is_valid_rtc_data_addr = iwl3945_hw_valid_rtc_data_addr,
-
-	.debugfs_ops = {
-		.rx_stats_read = iwl3945_ucode_rx_stats_read,
-		.tx_stats_read = iwl3945_ucode_tx_stats_read,
-		.general_stats_read = iwl3945_ucode_general_stats_read,
-	},
-};
-
-static const struct iwl_legacy_ops iwl3945_legacy_ops = {
-	.post_associate = iwl3945_post_associate,
-	.config_ap = iwl3945_config_ap,
-	.manage_ibss_station = iwl3945_manage_ibss_station,
-};
-
-static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
-	.get_hcmd_size = iwl3945_get_hcmd_size,
-	.build_addsta_hcmd = iwl3945_build_addsta_hcmd,
-	.request_scan = iwl3945_request_scan,
-	.post_scan = iwl3945_post_scan,
-};
-
-static const struct iwl_ops iwl3945_ops = {
-	.lib = &iwl3945_lib,
-	.hcmd = &iwl3945_hcmd,
-	.utils = &iwl3945_hcmd_utils,
-	.led = &iwl3945_led_ops,
-	.legacy = &iwl3945_legacy_ops,
-	.ieee80211_ops = &iwl3945_hw_ops,
-};
-
-static struct iwl_base_params iwl3945_base_params = {
-	.eeprom_size = IWL3945_EEPROM_IMG_SIZE,
-	.num_of_queues = IWL39_NUM_QUEUES,
-	.pll_cfg_val = CSR39_ANA_PLL_CFG_VAL,
-	.set_l0s = false,
-	.use_bsm = true,
-	.led_compensation = 64,
-	.wd_timeout = IWL_DEF_WD_TIMEOUT,
-};
-
-static struct iwl_cfg iwl3945_bg_cfg = {
-	.name = "3945BG",
-	.fw_name_pre = IWL3945_FW_PRE,
-	.ucode_api_max = IWL3945_UCODE_API_MAX,
-	.ucode_api_min = IWL3945_UCODE_API_MIN,
-	.sku = IWL_SKU_G,
-	.eeprom_ver = EEPROM_3945_EEPROM_VERSION,
-	.ops = &iwl3945_ops,
-	.mod_params = &iwl3945_mod_params,
-	.base_params = &iwl3945_base_params,
-	.led_mode = IWL_LED_BLINK,
-};
-
-static struct iwl_cfg iwl3945_abg_cfg = {
-	.name = "3945ABG",
-	.fw_name_pre = IWL3945_FW_PRE,
-	.ucode_api_max = IWL3945_UCODE_API_MAX,
-	.ucode_api_min = IWL3945_UCODE_API_MIN,
-	.sku = IWL_SKU_A|IWL_SKU_G,
-	.eeprom_ver = EEPROM_3945_EEPROM_VERSION,
-	.ops = &iwl3945_ops,
-	.mod_params = &iwl3945_mod_params,
-	.base_params = &iwl3945_base_params,
-	.led_mode = IWL_LED_BLINK,
-};
-
-DEFINE_PCI_DEVICE_TABLE(iwl3945_hw_card_ids) = {
-	{IWL_PCI_DEVICE(0x4222, 0x1005, iwl3945_bg_cfg)},
-	{IWL_PCI_DEVICE(0x4222, 0x1034, iwl3945_bg_cfg)},
-	{IWL_PCI_DEVICE(0x4222, 0x1044, iwl3945_bg_cfg)},
-	{IWL_PCI_DEVICE(0x4227, 0x1014, iwl3945_bg_cfg)},
-	{IWL_PCI_DEVICE(0x4222, PCI_ANY_ID, iwl3945_abg_cfg)},
-	{IWL_PCI_DEVICE(0x4227, PCI_ANY_ID, iwl3945_abg_cfg)},
-	{0}
-};
-
-MODULE_DEVICE_TABLE(pci, iwl3945_hw_card_ids);
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945.h b/drivers/net/wireless/iwlegacy/iwl-3945.h
deleted file mode 100644
index b118b59..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945.h
+++ /dev/null
@@ -1,308 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-/*
- * Please use this file (iwl-3945.h) for driver implementation definitions.
- * Please use iwl-3945-commands.h for uCode API definitions.
- * Please use iwl-3945-hw.h for hardware-related definitions.
- */
-
-#ifndef __iwl_3945_h__
-#define __iwl_3945_h__
-
-#include <linux/pci.h> /* for struct pci_device_id */
-#include <linux/kernel.h>
-#include <net/ieee80211_radiotap.h>
-
-/* Hardware specific file defines the PCI IDs table for that hardware module */
-extern const struct pci_device_id iwl3945_hw_card_ids[];
-
-#include "iwl-csr.h"
-#include "iwl-prph.h"
-#include "iwl-fh.h"
-#include "iwl-3945-hw.h"
-#include "iwl-debug.h"
-#include "iwl-power.h"
-#include "iwl-dev.h"
-#include "iwl-led.h"
-
-/* Highest firmware API version supported */
-#define IWL3945_UCODE_API_MAX 2
-
-/* Lowest firmware API version supported */
-#define IWL3945_UCODE_API_MIN 1
-
-#define IWL3945_FW_PRE	"iwlwifi-3945-"
-#define _IWL3945_MODULE_FIRMWARE(api) IWL3945_FW_PRE #api ".ucode"
-#define IWL3945_MODULE_FIRMWARE(api) _IWL3945_MODULE_FIRMWARE(api)
-
-/* Default noise level to report when noise measurement is not available.
- *   This may be because we're:
- *   1)  Not associated (4965, no beacon statistics being sent to driver)
- *   2)  Scanning (noise measurement does not apply to associated channel)
- *   3)  Receiving CCK (3945 delivers noise info only for OFDM frames)
- * Use default noise value of -127 ... this is below the range of measurable
- *   Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user.
- *   Also, -127 works better than 0 when averaging frames with/without
- *   noise info (e.g. averaging might be done in app); measured dBm values are
- *   always negative ... using a negative value as the default keeps all
- *   averages within an s8's (used in some apps) range of negative values. */
-#define IWL_NOISE_MEAS_NOT_AVAILABLE (-127)
-
-/* Module parameters accessible from iwl-*.c */
-extern struct iwl_mod_params iwl3945_mod_params;
-
-struct iwl3945_rate_scale_data {
-	u64 data;
-	s32 success_counter;
-	s32 success_ratio;
-	s32 counter;
-	s32 average_tpt;
-	unsigned long stamp;
-};
-
-struct iwl3945_rs_sta {
-	spinlock_t lock;
-	struct iwl_priv *priv;
-	s32 *expected_tpt;
-	unsigned long last_partial_flush;
-	unsigned long last_flush;
-	u32 flush_time;
-	u32 last_tx_packets;
-	u32 tx_packets;
-	u8 tgg;
-	u8 flush_pending;
-	u8 start_rate;
-	struct timer_list rate_scale_flush;
-	struct iwl3945_rate_scale_data win[IWL_RATE_COUNT_3945];
-#ifdef CONFIG_MAC80211_DEBUGFS
-	struct dentry *rs_sta_dbgfs_stats_table_file;
-#endif
-
-	/* used to be in sta_info */
-	int last_txrate_idx;
-};
-
-
-/*
- * The common struct MUST be first because it is shared between
- * 3945 and 4965!
- */
-struct iwl3945_sta_priv {
-	struct iwl_station_priv_common common;
-	struct iwl3945_rs_sta rs_sta;
-};
-
-enum iwl3945_antenna {
-	IWL_ANTENNA_DIVERSITY,
-	IWL_ANTENNA_MAIN,
-	IWL_ANTENNA_AUX
-};
-
-/*
- * RTS threshold here is total size [2347] minus 4 FCS bytes
- * Per spec:
- *   a value of 0 means RTS on all data/management packets
- *   a value > max MSDU size means no RTS
- * else RTS for data/management frames where MPDU is larger
- *   than RTS value.
- */
-#define DEFAULT_RTS_THRESHOLD     2347U
-#define MIN_RTS_THRESHOLD         0U
-#define MAX_RTS_THRESHOLD         2347U
-#define MAX_MSDU_SIZE		  2304U
-#define MAX_MPDU_SIZE		  2346U
-#define DEFAULT_BEACON_INTERVAL   100U
-#define	DEFAULT_SHORT_RETRY_LIMIT 7U
-#define	DEFAULT_LONG_RETRY_LIMIT  4U
-
-#define IWL_TX_FIFO_AC0	0
-#define IWL_TX_FIFO_AC1	1
-#define IWL_TX_FIFO_AC2	2
-#define IWL_TX_FIFO_AC3	3
-#define IWL_TX_FIFO_HCCA_1	5
-#define IWL_TX_FIFO_HCCA_2	6
-#define IWL_TX_FIFO_NONE	7
-
-#define IEEE80211_DATA_LEN              2304
-#define IEEE80211_4ADDR_LEN             30
-#define IEEE80211_HLEN                  (IEEE80211_4ADDR_LEN)
-#define IEEE80211_FRAME_LEN             (IEEE80211_DATA_LEN + IEEE80211_HLEN)
-
-struct iwl3945_frame {
-	union {
-		struct ieee80211_hdr frame;
-		struct iwl3945_tx_beacon_cmd beacon;
-		u8 raw[IEEE80211_FRAME_LEN];
-		u8 cmd[360];
-	} u;
-	struct list_head list;
-};
-
-#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
-#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
-#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
-
-#define SUP_RATE_11A_MAX_NUM_CHANNELS  8
-#define SUP_RATE_11B_MAX_NUM_CHANNELS  4
-#define SUP_RATE_11G_MAX_NUM_CHANNELS  12
-
-#define IWL_SUPPORTED_RATES_IE_LEN         8
-
-#define SCAN_INTERVAL 100
-
-#define MAX_TID_COUNT        9
-
-#define IWL_INVALID_RATE     0xFF
-#define IWL_INVALID_VALUE    -1
-
-#define STA_PS_STATUS_WAKE             0
-#define STA_PS_STATUS_SLEEP            1
-
-struct iwl3945_ibss_seq {
-	u8 mac[ETH_ALEN];
-	u16 seq_num;
-	u16 frag_num;
-	unsigned long packet_time;
-	struct list_head list;
-};
-
-#define IWL_RX_HDR(x) ((struct iwl3945_rx_frame_hdr *)(\
-		       x->u.rx_frame.stats.payload + \
-		       x->u.rx_frame.stats.phy_count))
-#define IWL_RX_END(x) ((struct iwl3945_rx_frame_end *)(\
-		       IWL_RX_HDR(x)->payload + \
-		       le16_to_cpu(IWL_RX_HDR(x)->len)))
-#define IWL_RX_STATS(x) (&x->u.rx_frame.stats)
-#define IWL_RX_DATA(x) (IWL_RX_HDR(x)->payload)
-
-
-/******************************************************************************
- *
- * Functions implemented in iwl3945-base.c which are forward declared here
- * for use by iwl-*.c
- *
- *****************************************************************************/
-extern int iwl3945_calc_db_from_ratio(int sig_ratio);
-extern void iwl3945_rx_replenish(void *data);
-extern void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
-extern unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
-					struct ieee80211_hdr *hdr, int left);
-extern int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
-				       char **buf, bool display);
-extern void iwl3945_dump_nic_error_log(struct iwl_priv *priv);
-
-/******************************************************************************
- *
- * Functions implemented in iwl-[34]*.c which are forward declared here
- * for use by iwl3945-base.c
- *
- * NOTE:  The implementation of these functions are hardware specific
- * which is why they are in the hardware specific files (vs. iwl-base.c)
- *
- * Naming convention --
- * iwl3945_         <-- Its part of iwlwifi (should be changed to iwl3945_)
- * iwl3945_hw_      <-- Hardware specific (implemented in iwl-XXXX.c by all HW)
- * iwlXXXX_     <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
- * iwl3945_bg_      <-- Called from work queue context
- * iwl3945_mac_     <-- mac80211 callback
- *
- ****************************************************************************/
-extern void iwl3945_hw_rx_handler_setup(struct iwl_priv *priv);
-extern void iwl3945_hw_setup_deferred_work(struct iwl_priv *priv);
-extern void iwl3945_hw_cancel_deferred_work(struct iwl_priv *priv);
-extern int iwl3945_hw_rxq_stop(struct iwl_priv *priv);
-extern int iwl3945_hw_set_hw_params(struct iwl_priv *priv);
-extern int iwl3945_hw_nic_init(struct iwl_priv *priv);
-extern int iwl3945_hw_nic_stop_master(struct iwl_priv *priv);
-extern void iwl3945_hw_txq_ctx_free(struct iwl_priv *priv);
-extern void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv);
-extern int iwl3945_hw_nic_reset(struct iwl_priv *priv);
-extern int iwl3945_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
-					    struct iwl_tx_queue *txq,
-					    dma_addr_t addr, u16 len,
-					    u8 reset, u8 pad);
-extern void iwl3945_hw_txq_free_tfd(struct iwl_priv *priv,
-				    struct iwl_tx_queue *txq);
-extern int iwl3945_hw_get_temperature(struct iwl_priv *priv);
-extern int iwl3945_hw_tx_queue_init(struct iwl_priv *priv,
-				struct iwl_tx_queue *txq);
-extern unsigned int iwl3945_hw_get_beacon_cmd(struct iwl_priv *priv,
-				 struct iwl3945_frame *frame, u8 rate);
-void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv,
-				  struct iwl_device_cmd *cmd,
-				  struct ieee80211_tx_info *info,
-				  struct ieee80211_hdr *hdr,
-				  int sta_id, int tx_id);
-extern int iwl3945_hw_reg_send_txpower(struct iwl_priv *priv);
-extern int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power);
-extern void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
-				 struct iwl_rx_mem_buffer *rxb);
-void iwl3945_reply_statistics(struct iwl_priv *priv,
-			      struct iwl_rx_mem_buffer *rxb);
-extern void iwl3945_disable_events(struct iwl_priv *priv);
-extern int iwl4965_get_temperature(const struct iwl_priv *priv);
-extern void iwl3945_post_associate(struct iwl_priv *priv);
-extern void iwl3945_config_ap(struct iwl_priv *priv);
-
-extern int iwl3945_commit_rxon(struct iwl_priv *priv,
-			       struct iwl_rxon_context *ctx);
-
-/**
- * iwl3945_hw_find_station - Find station id for a given BSSID
- * @bssid: MAC address of station ID to find
- *
- * NOTE:  This should not be hardware specific but the code has
- * not yet been merged into a single common layer for managing the
- * station tables.
- */
-extern u8 iwl3945_hw_find_station(struct iwl_priv *priv, const u8 *bssid);
-
-extern struct ieee80211_ops iwl3945_hw_ops;
-
-/*
- * Forward declare iwl-3945.c functions for iwl3945-base.c
- */
-extern __le32 iwl3945_get_antenna_flags(const struct iwl_priv *priv);
-extern int iwl3945_init_hw_rate_table(struct iwl_priv *priv);
-extern void iwl3945_reg_txpower_periodic(struct iwl_priv *priv);
-extern int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv);
-
-extern const struct iwl_channel_info *iwl3945_get_channel_info(
-	const struct iwl_priv *priv, enum ieee80211_band band, u16 channel);
-
-extern int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate);
-
-/* scanning */
-int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif);
-void iwl3945_post_scan(struct iwl_priv *priv);
-
-/* rates */
-extern const struct iwl3945_rate_info iwl3945_rates[IWL_RATE_COUNT_3945];
-
-/* Requires full declaration of iwl_priv before including */
-#include "iwl-io.h"
-
-#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-calib.c b/drivers/net/wireless/iwlegacy/iwl-4965-calib.c
deleted file mode 100644
index 162d877..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-calib.c
+++ /dev/null
@@ -1,967 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license.  When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- *  * Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *  * Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *  * Neither the name Intel Corporation nor the names of its
- *    contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *****************************************************************************/
-
-#include <linux/slab.h>
-#include <net/mac80211.h>
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-4965-calib.h"
-
-/*****************************************************************************
- * INIT calibrations framework
- *****************************************************************************/
-
-struct statistics_general_data {
-	u32 beacon_silence_rssi_a;
-	u32 beacon_silence_rssi_b;
-	u32 beacon_silence_rssi_c;
-	u32 beacon_energy_a;
-	u32 beacon_energy_b;
-	u32 beacon_energy_c;
-};
-
-void iwl4965_calib_free_results(struct iwl_priv *priv)
-{
-	int i;
-
-	for (i = 0; i < IWL_CALIB_MAX; i++) {
-		kfree(priv->calib_results[i].buf);
-		priv->calib_results[i].buf = NULL;
-		priv->calib_results[i].buf_len = 0;
-	}
-}
-
-/*****************************************************************************
- * RUNTIME calibrations framework
- *****************************************************************************/
-
-/* "false alarms" are signals that our DSP tries to lock onto,
- *   but then determines that they are either noise, or transmissions
- *   from a distant wireless network (also "noise", really) that get
- *   "stepped on" by stronger transmissions within our own network.
- * This algorithm attempts to set a sensitivity level that is high
- *   enough to receive all of our own network traffic, but not so
- *   high that our DSP gets too busy trying to lock onto non-network
- *   activity/noise. */
-static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
-				   u32 norm_fa,
-				   u32 rx_enable_time,
-				   struct statistics_general_data *rx_info)
-{
-	u32 max_nrg_cck = 0;
-	int i = 0;
-	u8 max_silence_rssi = 0;
-	u32 silence_ref = 0;
-	u8 silence_rssi_a = 0;
-	u8 silence_rssi_b = 0;
-	u8 silence_rssi_c = 0;
-	u32 val;
-
-	/* "false_alarms" values below are cross-multiplications to assess the
-	 *   numbers of false alarms within the measured period of actual Rx
-	 *   (Rx is off when we're txing), vs the min/max expected false alarms
-	 *   (some should be expected if rx is sensitive enough) in a
-	 *   hypothetical listening period of 200 time units (TU), 204.8 msec:
-	 *
-	 * MIN_FA/fixed-time < false_alarms/actual-rx-time < MAX_FA/beacon-time
-	 *
-	 * */
-	u32 false_alarms = norm_fa * 200 * 1024;
-	u32 max_false_alarms = MAX_FA_CCK * rx_enable_time;
-	u32 min_false_alarms = MIN_FA_CCK * rx_enable_time;
-	struct iwl_sensitivity_data *data = NULL;
-	const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
-
-	data = &(priv->sensitivity_data);
-
-	data->nrg_auto_corr_silence_diff = 0;
-
-	/* Find max silence rssi among all 3 receivers.
-	 * This is background noise, which may include transmissions from other
-	 *    networks, measured during silence before our network's beacon */
-	silence_rssi_a = (u8)((rx_info->beacon_silence_rssi_a &
-			    ALL_BAND_FILTER) >> 8);
-	silence_rssi_b = (u8)((rx_info->beacon_silence_rssi_b &
-			    ALL_BAND_FILTER) >> 8);
-	silence_rssi_c = (u8)((rx_info->beacon_silence_rssi_c &
-			    ALL_BAND_FILTER) >> 8);
-
-	val = max(silence_rssi_b, silence_rssi_c);
-	max_silence_rssi = max(silence_rssi_a, (u8) val);
-
-	/* Store silence rssi in 20-beacon history table */
-	data->nrg_silence_rssi[data->nrg_silence_idx] = max_silence_rssi;
-	data->nrg_silence_idx++;
-	if (data->nrg_silence_idx >= NRG_NUM_PREV_STAT_L)
-		data->nrg_silence_idx = 0;
-
-	/* Find max silence rssi across 20 beacon history */
-	for (i = 0; i < NRG_NUM_PREV_STAT_L; i++) {
-		val = data->nrg_silence_rssi[i];
-		silence_ref = max(silence_ref, val);
-	}
-	IWL_DEBUG_CALIB(priv, "silence a %u, b %u, c %u, 20-bcn max %u\n",
-			silence_rssi_a, silence_rssi_b, silence_rssi_c,
-			silence_ref);
-
-	/* Find max rx energy (min value!) among all 3 receivers,
-	 *   measured during beacon frame.
-	 * Save it in 10-beacon history table. */
-	i = data->nrg_energy_idx;
-	val = min(rx_info->beacon_energy_b, rx_info->beacon_energy_c);
-	data->nrg_value[i] = min(rx_info->beacon_energy_a, val);
-
-	data->nrg_energy_idx++;
-	if (data->nrg_energy_idx >= 10)
-		data->nrg_energy_idx = 0;
-
-	/* Find min rx energy (max value) across 10 beacon history.
-	 * This is the minimum signal level that we want to receive well.
-	 * Add backoff (margin so we don't miss slightly lower energy frames).
-	 * This establishes an upper bound (min value) for energy threshold. */
-	max_nrg_cck = data->nrg_value[0];
-	for (i = 1; i < 10; i++)
-		max_nrg_cck = (u32) max(max_nrg_cck, (data->nrg_value[i]));
-	max_nrg_cck += 6;
-
-	IWL_DEBUG_CALIB(priv, "rx energy a %u, b %u, c %u, 10-bcn max/min %u\n",
-			rx_info->beacon_energy_a, rx_info->beacon_energy_b,
-			rx_info->beacon_energy_c, max_nrg_cck - 6);
-
-	/* Count number of consecutive beacons with fewer-than-desired
-	 *   false alarms. */
-	if (false_alarms < min_false_alarms)
-		data->num_in_cck_no_fa++;
-	else
-		data->num_in_cck_no_fa = 0;
-	IWL_DEBUG_CALIB(priv, "consecutive bcns with few false alarms = %u\n",
-			data->num_in_cck_no_fa);
-
-	/* If we got too many false alarms this time, reduce sensitivity */
-	if ((false_alarms > max_false_alarms) &&
-		(data->auto_corr_cck > AUTO_CORR_MAX_TH_CCK)) {
-		IWL_DEBUG_CALIB(priv, "norm FA %u > max FA %u\n",
-		     false_alarms, max_false_alarms);
-		IWL_DEBUG_CALIB(priv, "... reducing sensitivity\n");
-		data->nrg_curr_state = IWL_FA_TOO_MANY;
-		/* Store for "fewer than desired" on later beacon */
-		data->nrg_silence_ref = silence_ref;
-
-		/* increase energy threshold (reduce nrg value)
-		 *   to decrease sensitivity */
-		data->nrg_th_cck = data->nrg_th_cck - NRG_STEP_CCK;
-	/* Else if we got fewer than desired, increase sensitivity */
-	} else if (false_alarms < min_false_alarms) {
-		data->nrg_curr_state = IWL_FA_TOO_FEW;
-
-		/* Compare silence level with silence level for most recent
-		 *   healthy number or too many false alarms */
-		data->nrg_auto_corr_silence_diff = (s32)data->nrg_silence_ref -
-						   (s32)silence_ref;
-
-		IWL_DEBUG_CALIB(priv,
-			 "norm FA %u < min FA %u, silence diff %d\n",
-			 false_alarms, min_false_alarms,
-			 data->nrg_auto_corr_silence_diff);
-
-		/* Increase value to increase sensitivity, but only if:
-		 * 1a) previous beacon did *not* have *too many* false alarms
-		 * 1b) AND there's a significant difference in Rx levels
-		 *      from a previous beacon with too many, or healthy # FAs
-		 * OR 2) We've seen a lot of beacons (100) with too few
-		 *       false alarms */
-		if ((data->nrg_prev_state != IWL_FA_TOO_MANY) &&
-			((data->nrg_auto_corr_silence_diff > NRG_DIFF) ||
-			(data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) {
-
-			IWL_DEBUG_CALIB(priv, "... increasing sensitivity\n");
-			/* Increase nrg value to increase sensitivity */
-			val = data->nrg_th_cck + NRG_STEP_CCK;
-			data->nrg_th_cck = min((u32)ranges->min_nrg_cck, val);
-		} else {
-			IWL_DEBUG_CALIB(priv,
-					 "... but not changing sensitivity\n");
-		}
-
-	/* Else we got a healthy number of false alarms, keep status quo */
-	} else {
-		IWL_DEBUG_CALIB(priv, " FA in safe zone\n");
-		data->nrg_curr_state = IWL_FA_GOOD_RANGE;
-
-		/* Store for use in "fewer than desired" with later beacon */
-		data->nrg_silence_ref = silence_ref;
-
-		/* If previous beacon had too many false alarms,
-		 *   give it some extra margin by reducing sensitivity again
-		 *   (but don't go below measured energy of desired Rx) */
-		if (IWL_FA_TOO_MANY == data->nrg_prev_state) {
-			IWL_DEBUG_CALIB(priv, "... increasing margin\n");
-			if (data->nrg_th_cck > (max_nrg_cck + NRG_MARGIN))
-				data->nrg_th_cck -= NRG_MARGIN;
-			else
-				data->nrg_th_cck = max_nrg_cck;
-		}
-	}
-
-	/* Make sure the energy threshold does not go above the measured
-	 * energy of the desired Rx signals (reduced by backoff margin),
-	 * or else we might start missing Rx frames.
-	 * Lower value is higher energy, so we use max()!
-	 */
-	data->nrg_th_cck = max(max_nrg_cck, data->nrg_th_cck);
-	IWL_DEBUG_CALIB(priv, "new nrg_th_cck %u\n", data->nrg_th_cck);
-
-	data->nrg_prev_state = data->nrg_curr_state;
-
-	/* Auto-correlation CCK algorithm */
-	if (false_alarms > min_false_alarms) {
-
-		/* increase auto_corr values to decrease sensitivity
-		 * so the DSP won't be disturbed by the noise
-		 */
-		if (data->auto_corr_cck < AUTO_CORR_MAX_TH_CCK)
-			data->auto_corr_cck = AUTO_CORR_MAX_TH_CCK + 1;
-		else {
-			val = data->auto_corr_cck + AUTO_CORR_STEP_CCK;
-			data->auto_corr_cck =
-				min((u32)ranges->auto_corr_max_cck, val);
-		}
-		val = data->auto_corr_cck_mrc + AUTO_CORR_STEP_CCK;
-		data->auto_corr_cck_mrc =
-			min((u32)ranges->auto_corr_max_cck_mrc, val);
-	} else if ((false_alarms < min_false_alarms) &&
-	   ((data->nrg_auto_corr_silence_diff > NRG_DIFF) ||
-	   (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) {
-
-		/* Decrease auto_corr values to increase sensitivity */
-		val = data->auto_corr_cck - AUTO_CORR_STEP_CCK;
-		data->auto_corr_cck =
-			max((u32)ranges->auto_corr_min_cck, val);
-		val = data->auto_corr_cck_mrc - AUTO_CORR_STEP_CCK;
-		data->auto_corr_cck_mrc =
-			max((u32)ranges->auto_corr_min_cck_mrc, val);
-	}
-
-	return 0;
-}
-
-
-static int iwl4965_sens_auto_corr_ofdm(struct iwl_priv *priv,
-				       u32 norm_fa,
-				       u32 rx_enable_time)
-{
-	u32 val;
-	u32 false_alarms = norm_fa * 200 * 1024;
-	u32 max_false_alarms = MAX_FA_OFDM * rx_enable_time;
-	u32 min_false_alarms = MIN_FA_OFDM * rx_enable_time;
-	struct iwl_sensitivity_data *data = NULL;
-	const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
-
-	data = &(priv->sensitivity_data);
-
-	/* If we got too many false alarms this time, reduce sensitivity */
-	if (false_alarms > max_false_alarms) {
-
-		IWL_DEBUG_CALIB(priv, "norm FA %u > max FA %u)\n",
-			     false_alarms, max_false_alarms);
-
-		val = data->auto_corr_ofdm + AUTO_CORR_STEP_OFDM;
-		data->auto_corr_ofdm =
-			min((u32)ranges->auto_corr_max_ofdm, val);
-
-		val = data->auto_corr_ofdm_mrc + AUTO_CORR_STEP_OFDM;
-		data->auto_corr_ofdm_mrc =
-			min((u32)ranges->auto_corr_max_ofdm_mrc, val);
-
-		val = data->auto_corr_ofdm_x1 + AUTO_CORR_STEP_OFDM;
-		data->auto_corr_ofdm_x1 =
-			min((u32)ranges->auto_corr_max_ofdm_x1, val);
-
-		val = data->auto_corr_ofdm_mrc_x1 + AUTO_CORR_STEP_OFDM;
-		data->auto_corr_ofdm_mrc_x1 =
-			min((u32)ranges->auto_corr_max_ofdm_mrc_x1, val);
-	}
-
-	/* Else if we got fewer than desired, increase sensitivity */
-	else if (false_alarms < min_false_alarms) {
-
-		IWL_DEBUG_CALIB(priv, "norm FA %u < min FA %u\n",
-			     false_alarms, min_false_alarms);
-
-		val = data->auto_corr_ofdm - AUTO_CORR_STEP_OFDM;
-		data->auto_corr_ofdm =
-			max((u32)ranges->auto_corr_min_ofdm, val);
-
-		val = data->auto_corr_ofdm_mrc - AUTO_CORR_STEP_OFDM;
-		data->auto_corr_ofdm_mrc =
-			max((u32)ranges->auto_corr_min_ofdm_mrc, val);
-
-		val = data->auto_corr_ofdm_x1 - AUTO_CORR_STEP_OFDM;
-		data->auto_corr_ofdm_x1 =
-			max((u32)ranges->auto_corr_min_ofdm_x1, val);
-
-		val = data->auto_corr_ofdm_mrc_x1 - AUTO_CORR_STEP_OFDM;
-		data->auto_corr_ofdm_mrc_x1 =
-			max((u32)ranges->auto_corr_min_ofdm_mrc_x1, val);
-	} else {
-		IWL_DEBUG_CALIB(priv, "min FA %u < norm FA %u < max FA %u OK\n",
-			 min_false_alarms, false_alarms, max_false_alarms);
-	}
-	return 0;
-}
-
-static void iwl4965_prepare_legacy_sensitivity_tbl(struct iwl_priv *priv,
-				struct iwl_sensitivity_data *data,
-				__le16 *tbl)
-{
-	tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX] =
-				cpu_to_le16((u16)data->auto_corr_ofdm);
-	tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX] =
-				cpu_to_le16((u16)data->auto_corr_ofdm_mrc);
-	tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX] =
-				cpu_to_le16((u16)data->auto_corr_ofdm_x1);
-	tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX] =
-				cpu_to_le16((u16)data->auto_corr_ofdm_mrc_x1);
-
-	tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX] =
-				cpu_to_le16((u16)data->auto_corr_cck);
-	tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX] =
-				cpu_to_le16((u16)data->auto_corr_cck_mrc);
-
-	tbl[HD_MIN_ENERGY_CCK_DET_INDEX] =
-				cpu_to_le16((u16)data->nrg_th_cck);
-	tbl[HD_MIN_ENERGY_OFDM_DET_INDEX] =
-				cpu_to_le16((u16)data->nrg_th_ofdm);
-
-	tbl[HD_BARKER_CORR_TH_ADD_MIN_INDEX] =
-				cpu_to_le16(data->barker_corr_th_min);
-	tbl[HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX] =
-				cpu_to_le16(data->barker_corr_th_min_mrc);
-	tbl[HD_OFDM_ENERGY_TH_IN_INDEX] =
-				cpu_to_le16(data->nrg_th_cca);
-
-	IWL_DEBUG_CALIB(priv, "ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n",
-			data->auto_corr_ofdm, data->auto_corr_ofdm_mrc,
-			data->auto_corr_ofdm_x1, data->auto_corr_ofdm_mrc_x1,
-			data->nrg_th_ofdm);
-
-	IWL_DEBUG_CALIB(priv, "cck: ac %u mrc %u thresh %u\n",
-			data->auto_corr_cck, data->auto_corr_cck_mrc,
-			data->nrg_th_cck);
-}
-
-/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */
-static int iwl4965_sensitivity_write(struct iwl_priv *priv)
-{
-	struct iwl_sensitivity_cmd cmd;
-	struct iwl_sensitivity_data *data = NULL;
-	struct iwl_host_cmd cmd_out = {
-		.id = SENSITIVITY_CMD,
-		.len = sizeof(struct iwl_sensitivity_cmd),
-		.flags = CMD_ASYNC,
-		.data = &cmd,
-	};
-
-	data = &(priv->sensitivity_data);
-
-	memset(&cmd, 0, sizeof(cmd));
-
-	iwl4965_prepare_legacy_sensitivity_tbl(priv, data, &cmd.table[0]);
-
-	/* Update uCode's "work" table, and copy it to DSP */
-	cmd.control = SENSITIVITY_CMD_CONTROL_WORK_TABLE;
-
-	/* Don't send command to uCode if nothing has changed */
-	if (!memcmp(&cmd.table[0], &(priv->sensitivity_tbl[0]),
-		    sizeof(u16)*HD_TABLE_SIZE)) {
-		IWL_DEBUG_CALIB(priv, "No change in SENSITIVITY_CMD\n");
-		return 0;
-	}
-
-	/* Copy table for comparison next time */
-	memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]),
-	       sizeof(u16)*HD_TABLE_SIZE);
-
-	return iwl_legacy_send_cmd(priv, &cmd_out);
-}
-
-void iwl4965_init_sensitivity(struct iwl_priv *priv)
-{
-	int ret = 0;
-	int i;
-	struct iwl_sensitivity_data *data = NULL;
-	const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
-
-	if (priv->disable_sens_cal)
-		return;
-
-	IWL_DEBUG_CALIB(priv, "Start iwl4965_init_sensitivity\n");
-
-	/* Clear driver's sensitivity algo data */
-	data = &(priv->sensitivity_data);
-
-	if (ranges == NULL)
-		return;
-
-	memset(data, 0, sizeof(struct iwl_sensitivity_data));
-
-	data->num_in_cck_no_fa = 0;
-	data->nrg_curr_state = IWL_FA_TOO_MANY;
-	data->nrg_prev_state = IWL_FA_TOO_MANY;
-	data->nrg_silence_ref = 0;
-	data->nrg_silence_idx = 0;
-	data->nrg_energy_idx = 0;
-
-	for (i = 0; i < 10; i++)
-		data->nrg_value[i] = 0;
-
-	for (i = 0; i < NRG_NUM_PREV_STAT_L; i++)
-		data->nrg_silence_rssi[i] = 0;
-
-	data->auto_corr_ofdm =  ranges->auto_corr_min_ofdm;
-	data->auto_corr_ofdm_mrc = ranges->auto_corr_min_ofdm_mrc;
-	data->auto_corr_ofdm_x1  = ranges->auto_corr_min_ofdm_x1;
-	data->auto_corr_ofdm_mrc_x1 = ranges->auto_corr_min_ofdm_mrc_x1;
-	data->auto_corr_cck = AUTO_CORR_CCK_MIN_VAL_DEF;
-	data->auto_corr_cck_mrc = ranges->auto_corr_min_cck_mrc;
-	data->nrg_th_cck = ranges->nrg_th_cck;
-	data->nrg_th_ofdm = ranges->nrg_th_ofdm;
-	data->barker_corr_th_min = ranges->barker_corr_th_min;
-	data->barker_corr_th_min_mrc = ranges->barker_corr_th_min_mrc;
-	data->nrg_th_cca = ranges->nrg_th_cca;
-
-	data->last_bad_plcp_cnt_ofdm = 0;
-	data->last_fa_cnt_ofdm = 0;
-	data->last_bad_plcp_cnt_cck = 0;
-	data->last_fa_cnt_cck = 0;
-
-	ret |= iwl4965_sensitivity_write(priv);
-	IWL_DEBUG_CALIB(priv, "<<return 0x%X\n", ret);
-}
-
-void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp)
-{
-	u32 rx_enable_time;
-	u32 fa_cck;
-	u32 fa_ofdm;
-	u32 bad_plcp_cck;
-	u32 bad_plcp_ofdm;
-	u32 norm_fa_ofdm;
-	u32 norm_fa_cck;
-	struct iwl_sensitivity_data *data = NULL;
-	struct statistics_rx_non_phy *rx_info;
-	struct statistics_rx_phy *ofdm, *cck;
-	unsigned long flags;
-	struct statistics_general_data statis;
-
-	if (priv->disable_sens_cal)
-		return;
-
-	data = &(priv->sensitivity_data);
-
-	if (!iwl_legacy_is_any_associated(priv)) {
-		IWL_DEBUG_CALIB(priv, "<< - not associated\n");
-		return;
-	}
-
-	spin_lock_irqsave(&priv->lock, flags);
-
-	rx_info = &(((struct iwl_notif_statistics *)resp)->rx.general);
-	ofdm = &(((struct iwl_notif_statistics *)resp)->rx.ofdm);
-	cck = &(((struct iwl_notif_statistics *)resp)->rx.cck);
-
-	if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
-		IWL_DEBUG_CALIB(priv, "<< invalid data.\n");
-		spin_unlock_irqrestore(&priv->lock, flags);
-		return;
-	}
-
-	/* Extract Statistics: */
-	rx_enable_time = le32_to_cpu(rx_info->channel_load);
-	fa_cck = le32_to_cpu(cck->false_alarm_cnt);
-	fa_ofdm = le32_to_cpu(ofdm->false_alarm_cnt);
-	bad_plcp_cck = le32_to_cpu(cck->plcp_err);
-	bad_plcp_ofdm = le32_to_cpu(ofdm->plcp_err);
-
-	statis.beacon_silence_rssi_a =
-			le32_to_cpu(rx_info->beacon_silence_rssi_a);
-	statis.beacon_silence_rssi_b =
-			le32_to_cpu(rx_info->beacon_silence_rssi_b);
-	statis.beacon_silence_rssi_c =
-			le32_to_cpu(rx_info->beacon_silence_rssi_c);
-	statis.beacon_energy_a =
-			le32_to_cpu(rx_info->beacon_energy_a);
-	statis.beacon_energy_b =
-			le32_to_cpu(rx_info->beacon_energy_b);
-	statis.beacon_energy_c =
-			le32_to_cpu(rx_info->beacon_energy_c);
-
-	spin_unlock_irqrestore(&priv->lock, flags);
-
-	IWL_DEBUG_CALIB(priv, "rx_enable_time = %u usecs\n", rx_enable_time);
-
-	if (!rx_enable_time) {
-		IWL_DEBUG_CALIB(priv, "<< RX Enable Time == 0!\n");
-		return;
-	}
-
-	/* These statistics increase monotonically, and do not reset
-	 *   at each beacon.  Calculate difference from last value, or just
-	 *   use the new statistics value if it has reset or wrapped around. */
-	if (data->last_bad_plcp_cnt_cck > bad_plcp_cck)
-		data->last_bad_plcp_cnt_cck = bad_plcp_cck;
-	else {
-		bad_plcp_cck -= data->last_bad_plcp_cnt_cck;
-		data->last_bad_plcp_cnt_cck += bad_plcp_cck;
-	}
-
-	if (data->last_bad_plcp_cnt_ofdm > bad_plcp_ofdm)
-		data->last_bad_plcp_cnt_ofdm = bad_plcp_ofdm;
-	else {
-		bad_plcp_ofdm -= data->last_bad_plcp_cnt_ofdm;
-		data->last_bad_plcp_cnt_ofdm += bad_plcp_ofdm;
-	}
-
-	if (data->last_fa_cnt_ofdm > fa_ofdm)
-		data->last_fa_cnt_ofdm = fa_ofdm;
-	else {
-		fa_ofdm -= data->last_fa_cnt_ofdm;
-		data->last_fa_cnt_ofdm += fa_ofdm;
-	}
-
-	if (data->last_fa_cnt_cck > fa_cck)
-		data->last_fa_cnt_cck = fa_cck;
-	else {
-		fa_cck -= data->last_fa_cnt_cck;
-		data->last_fa_cnt_cck += fa_cck;
-	}
-
-	/* Total aborted signal locks */
-	norm_fa_ofdm = fa_ofdm + bad_plcp_ofdm;
-	norm_fa_cck = fa_cck + bad_plcp_cck;
-
-	IWL_DEBUG_CALIB(priv,
-			 "cck: fa %u badp %u  ofdm: fa %u badp %u\n", fa_cck,
-			bad_plcp_cck, fa_ofdm, bad_plcp_ofdm);
-
-	iwl4965_sens_auto_corr_ofdm(priv, norm_fa_ofdm, rx_enable_time);
-	iwl4965_sens_energy_cck(priv, norm_fa_cck, rx_enable_time, &statis);
-
-	iwl4965_sensitivity_write(priv);
-}
-
-static inline u8 iwl4965_find_first_chain(u8 mask)
-{
-	if (mask & ANT_A)
-		return CHAIN_A;
-	if (mask & ANT_B)
-		return CHAIN_B;
-	return CHAIN_C;
-}
-
-/**
- * Run disconnected antenna algorithm to find out which antennas are
- * disconnected.
- */
-static void
-iwl4965_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
-				     struct iwl_chain_noise_data *data)
-{
-	u32 active_chains = 0;
-	u32 max_average_sig;
-	u16 max_average_sig_antenna_i;
-	u8 num_tx_chains;
-	u8 first_chain;
-	u16 i = 0;
-
-	average_sig[0] = data->chain_signal_a /
-			 priv->cfg->base_params->chain_noise_num_beacons;
-	average_sig[1] = data->chain_signal_b /
-			 priv->cfg->base_params->chain_noise_num_beacons;
-	average_sig[2] = data->chain_signal_c /
-			 priv->cfg->base_params->chain_noise_num_beacons;
-
-	if (average_sig[0] >= average_sig[1]) {
-		max_average_sig = average_sig[0];
-		max_average_sig_antenna_i = 0;
-		active_chains = (1 << max_average_sig_antenna_i);
-	} else {
-		max_average_sig = average_sig[1];
-		max_average_sig_antenna_i = 1;
-		active_chains = (1 << max_average_sig_antenna_i);
-	}
-
-	if (average_sig[2] >= max_average_sig) {
-		max_average_sig = average_sig[2];
-		max_average_sig_antenna_i = 2;
-		active_chains = (1 << max_average_sig_antenna_i);
-	}
-
-	IWL_DEBUG_CALIB(priv, "average_sig: a %d b %d c %d\n",
-		     average_sig[0], average_sig[1], average_sig[2]);
-	IWL_DEBUG_CALIB(priv, "max_average_sig = %d, antenna %d\n",
-		     max_average_sig, max_average_sig_antenna_i);
-
-	/* Compare signal strengths for all 3 receivers. */
-	for (i = 0; i < NUM_RX_CHAINS; i++) {
-		if (i != max_average_sig_antenna_i) {
-			s32 rssi_delta = (max_average_sig - average_sig[i]);
-
-			/* If signal is very weak, compared with
-			 * strongest, mark it as disconnected. */
-			if (rssi_delta > MAXIMUM_ALLOWED_PATHLOSS)
-				data->disconn_array[i] = 1;
-			else
-				active_chains |= (1 << i);
-			IWL_DEBUG_CALIB(priv, "i = %d  rssiDelta = %d  "
-			     "disconn_array[i] = %d\n",
-			     i, rssi_delta, data->disconn_array[i]);
-		}
-	}
-
-	/*
-	 * The above algorithm sometimes fails when the ucode
-	 * reports 0 for all chains. It's not clear why that
-	 * happens to start with, but it is then causing trouble
-	 * because this can make us enable more chains than the
-	 * hardware really has.
-	 *
-	 * To be safe, simply mask out any chains that we know
-	 * are not on the device.
-	 */
-	active_chains &= priv->hw_params.valid_rx_ant;
-
-	num_tx_chains = 0;
-	for (i = 0; i < NUM_RX_CHAINS; i++) {
-		/* loops on all the bits of
-		 * priv->hw_setting.valid_tx_ant */
-		u8 ant_msk = (1 << i);
-		if (!(priv->hw_params.valid_tx_ant & ant_msk))
-			continue;
-
-		num_tx_chains++;
-		if (data->disconn_array[i] == 0)
-			/* there is a Tx antenna connected */
-			break;
-		if (num_tx_chains == priv->hw_params.tx_chains_num &&
-		    data->disconn_array[i]) {
-			/*
-			 * If all chains are disconnected
-			 * connect the first valid tx chain
-			 */
-			first_chain =
-			iwl4965_find_first_chain(priv->cfg->valid_tx_ant);
-			data->disconn_array[first_chain] = 0;
-			active_chains |= BIT(first_chain);
-			IWL_DEBUG_CALIB(priv,
-					"All Tx chains are disconnected W/A - declare %d as connected\n",
-					first_chain);
-			break;
-		}
-	}
-
-	if (active_chains != priv->hw_params.valid_rx_ant &&
-	    active_chains != priv->chain_noise_data.active_chains)
-		IWL_DEBUG_CALIB(priv,
-				"Detected that not all antennas are connected! "
-				"Connected: %#x, valid: %#x.\n",
-				active_chains, priv->hw_params.valid_rx_ant);
-
-	/* Save for use within RXON, TX, SCAN commands, etc. */
-	data->active_chains = active_chains;
-	IWL_DEBUG_CALIB(priv, "active_chains (bitwise) = 0x%x\n",
-			active_chains);
-}
-
-static void iwl4965_gain_computation(struct iwl_priv *priv,
-		u32 *average_noise,
-		u16 min_average_noise_antenna_i,
-		u32 min_average_noise,
-		u8 default_chain)
-{
-	int i, ret;
-	struct iwl_chain_noise_data *data = &priv->chain_noise_data;
-
-	data->delta_gain_code[min_average_noise_antenna_i] = 0;
-
-	for (i = default_chain; i < NUM_RX_CHAINS; i++) {
-		s32 delta_g = 0;
-
-		if (!(data->disconn_array[i]) &&
-		    (data->delta_gain_code[i] ==
-			     CHAIN_NOISE_DELTA_GAIN_INIT_VAL)) {
-			delta_g = average_noise[i] - min_average_noise;
-			data->delta_gain_code[i] = (u8)((delta_g * 10) / 15);
-			data->delta_gain_code[i] =
-				min(data->delta_gain_code[i],
-				(u8) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
-
-			data->delta_gain_code[i] =
-				(data->delta_gain_code[i] | (1 << 2));
-		} else {
-			data->delta_gain_code[i] = 0;
-		}
-	}
-	IWL_DEBUG_CALIB(priv, "delta_gain_codes: a %d b %d c %d\n",
-		     data->delta_gain_code[0],
-		     data->delta_gain_code[1],
-		     data->delta_gain_code[2]);
-
-	/* Differential gain gets sent to uCode only once */
-	if (!data->radio_write) {
-		struct iwl_calib_diff_gain_cmd cmd;
-		data->radio_write = 1;
-
-		memset(&cmd, 0, sizeof(cmd));
-		cmd.hdr.op_code = IWL_PHY_CALIBRATE_DIFF_GAIN_CMD;
-		cmd.diff_gain_a = data->delta_gain_code[0];
-		cmd.diff_gain_b = data->delta_gain_code[1];
-		cmd.diff_gain_c = data->delta_gain_code[2];
-		ret = iwl_legacy_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
-				      sizeof(cmd), &cmd);
-		if (ret)
-			IWL_DEBUG_CALIB(priv, "fail sending cmd "
-				     "REPLY_PHY_CALIBRATION_CMD\n");
-
-		/* TODO we might want recalculate
-		 * rx_chain in rxon cmd */
-
-		/* Mark so we run this algo only once! */
-		data->state = IWL_CHAIN_NOISE_CALIBRATED;
-	}
-}
-
-
-
-/*
- * Accumulate 16 beacons of signal and noise statistics for each of
- *   3 receivers/antennas/rx-chains, then figure out:
- * 1)  Which antennas are connected.
- * 2)  Differential rx gain settings to balance the 3 receivers.
- */
-void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
-{
-	struct iwl_chain_noise_data *data = NULL;
-
-	u32 chain_noise_a;
-	u32 chain_noise_b;
-	u32 chain_noise_c;
-	u32 chain_sig_a;
-	u32 chain_sig_b;
-	u32 chain_sig_c;
-	u32 average_sig[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
-	u32 average_noise[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
-	u32 min_average_noise = MIN_AVERAGE_NOISE_MAX_VALUE;
-	u16 min_average_noise_antenna_i = INITIALIZATION_VALUE;
-	u16 i = 0;
-	u16 rxon_chnum = INITIALIZATION_VALUE;
-	u16 stat_chnum = INITIALIZATION_VALUE;
-	u8 rxon_band24;
-	u8 stat_band24;
-	unsigned long flags;
-	struct statistics_rx_non_phy *rx_info;
-
-	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
-	if (priv->disable_chain_noise_cal)
-		return;
-
-	data = &(priv->chain_noise_data);
-
-	/*
-	 * Accumulate just the first "chain_noise_num_beacons" after
-	 * the first association, then we're done forever.
-	 */
-	if (data->state != IWL_CHAIN_NOISE_ACCUMULATE) {
-		if (data->state == IWL_CHAIN_NOISE_ALIVE)
-			IWL_DEBUG_CALIB(priv, "Wait for noise calib reset\n");
-		return;
-	}
-
-	spin_lock_irqsave(&priv->lock, flags);
-
-	rx_info = &(((struct iwl_notif_statistics *)stat_resp)->
-		      rx.general);
-
-	if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
-		IWL_DEBUG_CALIB(priv, " << Interference data unavailable\n");
-		spin_unlock_irqrestore(&priv->lock, flags);
-		return;
-	}
-
-	rxon_band24 = !!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK);
-	rxon_chnum = le16_to_cpu(ctx->staging.channel);
-
-	stat_band24 = !!(((struct iwl_notif_statistics *)
-			 stat_resp)->flag &
-			 STATISTICS_REPLY_FLG_BAND_24G_MSK);
-	stat_chnum = le32_to_cpu(((struct iwl_notif_statistics *)
-				 stat_resp)->flag) >> 16;
-
-	/* Make sure we accumulate data for just the associated channel
-	 *   (even if scanning). */
-	if ((rxon_chnum != stat_chnum) || (rxon_band24 != stat_band24)) {
-		IWL_DEBUG_CALIB(priv, "Stats not from chan=%d, band24=%d\n",
-				rxon_chnum, rxon_band24);
-		spin_unlock_irqrestore(&priv->lock, flags);
-		return;
-	}
-
-	/*
-	 *  Accumulate beacon statistics values across
-	 * "chain_noise_num_beacons"
-	 */
-	chain_noise_a = le32_to_cpu(rx_info->beacon_silence_rssi_a) &
-				IN_BAND_FILTER;
-	chain_noise_b = le32_to_cpu(rx_info->beacon_silence_rssi_b) &
-				IN_BAND_FILTER;
-	chain_noise_c = le32_to_cpu(rx_info->beacon_silence_rssi_c) &
-				IN_BAND_FILTER;
-
-	chain_sig_a = le32_to_cpu(rx_info->beacon_rssi_a) & IN_BAND_FILTER;
-	chain_sig_b = le32_to_cpu(rx_info->beacon_rssi_b) & IN_BAND_FILTER;
-	chain_sig_c = le32_to_cpu(rx_info->beacon_rssi_c) & IN_BAND_FILTER;
-
-	spin_unlock_irqrestore(&priv->lock, flags);
-
-	data->beacon_count++;
-
-	data->chain_noise_a = (chain_noise_a + data->chain_noise_a);
-	data->chain_noise_b = (chain_noise_b + data->chain_noise_b);
-	data->chain_noise_c = (chain_noise_c + data->chain_noise_c);
-
-	data->chain_signal_a = (chain_sig_a + data->chain_signal_a);
-	data->chain_signal_b = (chain_sig_b + data->chain_signal_b);
-	data->chain_signal_c = (chain_sig_c + data->chain_signal_c);
-
-	IWL_DEBUG_CALIB(priv, "chan=%d, band24=%d, beacon=%d\n",
-			rxon_chnum, rxon_band24, data->beacon_count);
-	IWL_DEBUG_CALIB(priv, "chain_sig: a %d b %d c %d\n",
-			chain_sig_a, chain_sig_b, chain_sig_c);
-	IWL_DEBUG_CALIB(priv, "chain_noise: a %d b %d c %d\n",
-			chain_noise_a, chain_noise_b, chain_noise_c);
-
-	/* If this is the "chain_noise_num_beacons", determine:
-	 * 1)  Disconnected antennas (using signal strengths)
-	 * 2)  Differential gain (using silence noise) to balance receivers */
-	if (data->beacon_count !=
-		priv->cfg->base_params->chain_noise_num_beacons)
-		return;
-
-	/* Analyze signal for disconnected antenna */
-	iwl4965_find_disconn_antenna(priv, average_sig, data);
-
-	/* Analyze noise for rx balance */
-	average_noise[0] = data->chain_noise_a /
-			   priv->cfg->base_params->chain_noise_num_beacons;
-	average_noise[1] = data->chain_noise_b /
-			   priv->cfg->base_params->chain_noise_num_beacons;
-	average_noise[2] = data->chain_noise_c /
-			   priv->cfg->base_params->chain_noise_num_beacons;
-
-	for (i = 0; i < NUM_RX_CHAINS; i++) {
-		if (!(data->disconn_array[i]) &&
-		   (average_noise[i] <= min_average_noise)) {
-			/* This means that chain i is active and has
-			 * lower noise values so far: */
-			min_average_noise = average_noise[i];
-			min_average_noise_antenna_i = i;
-		}
-	}
-
-	IWL_DEBUG_CALIB(priv, "average_noise: a %d b %d c %d\n",
-			average_noise[0], average_noise[1],
-			average_noise[2]);
-
-	IWL_DEBUG_CALIB(priv, "min_average_noise = %d, antenna %d\n",
-			min_average_noise, min_average_noise_antenna_i);
-
-	iwl4965_gain_computation(priv, average_noise,
-			min_average_noise_antenna_i, min_average_noise,
-			iwl4965_find_first_chain(priv->cfg->valid_rx_ant));
-
-	/* Some power changes may have been made during the calibration.
-	 * Update and commit the RXON
-	 */
-	if (priv->cfg->ops->lib->update_chain_flags)
-		priv->cfg->ops->lib->update_chain_flags(priv);
-
-	data->state = IWL_CHAIN_NOISE_DONE;
-	iwl_legacy_power_update_mode(priv, false);
-}
-
-void iwl4965_reset_run_time_calib(struct iwl_priv *priv)
-{
-	int i;
-	memset(&(priv->sensitivity_data), 0,
-	       sizeof(struct iwl_sensitivity_data));
-	memset(&(priv->chain_noise_data), 0,
-	       sizeof(struct iwl_chain_noise_data));
-	for (i = 0; i < NUM_RX_CHAINS; i++)
-		priv->chain_noise_data.delta_gain_code[i] =
-				CHAIN_NOISE_DELTA_GAIN_INIT_VAL;
-
-	/* Ask for statistics now, the uCode will send notification
-	 * periodically after association */
-	iwl_legacy_send_statistics_request(priv, CMD_ASYNC, true);
-}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c b/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c
deleted file mode 100644
index 1c93665..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c
+++ /dev/null
@@ -1,774 +0,0 @@
-/******************************************************************************
-*
-* GPL LICENSE SUMMARY
-*
-* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
-*
-* This program is free software; you can redistribute it and/or modify
-* it under the terms of version 2 of the GNU General Public License as
-* published by the Free Software Foundation.
-*
-* This program is distributed in the hope that it will be useful, but
-* WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-* General Public License for more details.
-*
-* You should have received a copy of the GNU General Public License
-* along with this program; if not, write to the Free Software
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
-* USA
-*
-* The full GNU General Public License is included in this distribution
-* in the file called LICENSE.GPL.
-*
-* Contact Information:
-*  Intel Linux Wireless <ilw@linux.intel.com>
-* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-*****************************************************************************/
-#include "iwl-4965.h"
-#include "iwl-4965-debugfs.h"
-
-static const char *fmt_value = "  %-30s %10u\n";
-static const char *fmt_table = "  %-30s %10u  %10u  %10u  %10u\n";
-static const char *fmt_header =
-	"%-32s    current  cumulative       delta         max\n";
-
-static int iwl4965_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
-{
-	int p = 0;
-	u32 flag;
-
-	flag = le32_to_cpu(priv->_4965.statistics.flag);
-
-	p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n", flag);
-	if (flag & UCODE_STATISTICS_CLEAR_MSK)
-		p += scnprintf(buf + p, bufsz - p,
-		"\tStatistics have been cleared\n");
-	p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
-		(flag & UCODE_STATISTICS_FREQUENCY_MSK)
-		? "2.4 GHz" : "5.2 GHz");
-	p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
-		(flag & UCODE_STATISTICS_NARROW_BAND_MSK)
-		 ? "enabled" : "disabled");
-
-	return p;
-}
-
-ssize_t iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
-				size_t count, loff_t *ppos)
-{
-	struct iwl_priv *priv = file->private_data;
-	int pos = 0;
-	char *buf;
-	int bufsz = sizeof(struct statistics_rx_phy) * 40 +
-		    sizeof(struct statistics_rx_non_phy) * 40 +
-		    sizeof(struct statistics_rx_ht_phy) * 40 + 400;
-	ssize_t ret;
-	struct statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
-	struct statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
-	struct statistics_rx_non_phy *general, *accum_general;
-	struct statistics_rx_non_phy *delta_general, *max_general;
-	struct statistics_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht;
-
-	if (!iwl_legacy_is_alive(priv))
-		return -EAGAIN;
-
-	buf = kzalloc(bufsz, GFP_KERNEL);
-	if (!buf) {
-		IWL_ERR(priv, "Can not allocate Buffer\n");
-		return -ENOMEM;
-	}
-
-	/*
-	 * the statistic information display here is based on
-	 * the last statistics notification from uCode
-	 * might not reflect the current uCode activity
-	 */
-	ofdm = &priv->_4965.statistics.rx.ofdm;
-	cck = &priv->_4965.statistics.rx.cck;
-	general = &priv->_4965.statistics.rx.general;
-	ht = &priv->_4965.statistics.rx.ofdm_ht;
-	accum_ofdm = &priv->_4965.accum_statistics.rx.ofdm;
-	accum_cck = &priv->_4965.accum_statistics.rx.cck;
-	accum_general = &priv->_4965.accum_statistics.rx.general;
-	accum_ht = &priv->_4965.accum_statistics.rx.ofdm_ht;
-	delta_ofdm = &priv->_4965.delta_statistics.rx.ofdm;
-	delta_cck = &priv->_4965.delta_statistics.rx.cck;
-	delta_general = &priv->_4965.delta_statistics.rx.general;
-	delta_ht = &priv->_4965.delta_statistics.rx.ofdm_ht;
-	max_ofdm = &priv->_4965.max_delta.rx.ofdm;
-	max_cck = &priv->_4965.max_delta.rx.cck;
-	max_general = &priv->_4965.max_delta.rx.general;
-	max_ht = &priv->_4965.max_delta.rx.ofdm_ht;
-
-	pos += iwl4965_statistics_flag(priv, buf, bufsz);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_header, "Statistics_Rx - OFDM:");
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "ina_cnt:",
-			 le32_to_cpu(ofdm->ina_cnt),
-			 accum_ofdm->ina_cnt,
-			 delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "fina_cnt:",
-			 le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
-			 delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "plcp_err:",
-			 le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
-			 delta_ofdm->plcp_err, max_ofdm->plcp_err);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "crc32_err:",
-			 le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
-			 delta_ofdm->crc32_err, max_ofdm->crc32_err);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "overrun_err:",
-			 le32_to_cpu(ofdm->overrun_err),
-			 accum_ofdm->overrun_err, delta_ofdm->overrun_err,
-			 max_ofdm->overrun_err);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "early_overrun_err:",
-			 le32_to_cpu(ofdm->early_overrun_err),
-			 accum_ofdm->early_overrun_err,
-			 delta_ofdm->early_overrun_err,
-			 max_ofdm->early_overrun_err);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "crc32_good:",
-			 le32_to_cpu(ofdm->crc32_good),
-			 accum_ofdm->crc32_good, delta_ofdm->crc32_good,
-			 max_ofdm->crc32_good);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "false_alarm_cnt:",
-			 le32_to_cpu(ofdm->false_alarm_cnt),
-			 accum_ofdm->false_alarm_cnt,
-			 delta_ofdm->false_alarm_cnt,
-			 max_ofdm->false_alarm_cnt);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "fina_sync_err_cnt:",
-			 le32_to_cpu(ofdm->fina_sync_err_cnt),
-			 accum_ofdm->fina_sync_err_cnt,
-			 delta_ofdm->fina_sync_err_cnt,
-			 max_ofdm->fina_sync_err_cnt);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "sfd_timeout:",
-			 le32_to_cpu(ofdm->sfd_timeout),
-			 accum_ofdm->sfd_timeout, delta_ofdm->sfd_timeout,
-			 max_ofdm->sfd_timeout);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "fina_timeout:",
-			 le32_to_cpu(ofdm->fina_timeout),
-			 accum_ofdm->fina_timeout, delta_ofdm->fina_timeout,
-			 max_ofdm->fina_timeout);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "unresponded_rts:",
-			 le32_to_cpu(ofdm->unresponded_rts),
-			 accum_ofdm->unresponded_rts,
-			 delta_ofdm->unresponded_rts,
-			 max_ofdm->unresponded_rts);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "rxe_frame_lmt_ovrun:",
-			 le32_to_cpu(ofdm->rxe_frame_limit_overrun),
-			 accum_ofdm->rxe_frame_limit_overrun,
-			 delta_ofdm->rxe_frame_limit_overrun,
-			 max_ofdm->rxe_frame_limit_overrun);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "sent_ack_cnt:",
-			 le32_to_cpu(ofdm->sent_ack_cnt),
-			 accum_ofdm->sent_ack_cnt, delta_ofdm->sent_ack_cnt,
-			 max_ofdm->sent_ack_cnt);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "sent_cts_cnt:",
-			 le32_to_cpu(ofdm->sent_cts_cnt),
-			 accum_ofdm->sent_cts_cnt, delta_ofdm->sent_cts_cnt,
-			 max_ofdm->sent_cts_cnt);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "sent_ba_rsp_cnt:",
-			 le32_to_cpu(ofdm->sent_ba_rsp_cnt),
-			 accum_ofdm->sent_ba_rsp_cnt,
-			 delta_ofdm->sent_ba_rsp_cnt,
-			 max_ofdm->sent_ba_rsp_cnt);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "dsp_self_kill:",
-			 le32_to_cpu(ofdm->dsp_self_kill),
-			 accum_ofdm->dsp_self_kill,
-			 delta_ofdm->dsp_self_kill,
-			 max_ofdm->dsp_self_kill);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "mh_format_err:",
-			 le32_to_cpu(ofdm->mh_format_err),
-			 accum_ofdm->mh_format_err,
-			 delta_ofdm->mh_format_err,
-			 max_ofdm->mh_format_err);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "re_acq_main_rssi_sum:",
-			 le32_to_cpu(ofdm->re_acq_main_rssi_sum),
-			 accum_ofdm->re_acq_main_rssi_sum,
-			 delta_ofdm->re_acq_main_rssi_sum,
-			 max_ofdm->re_acq_main_rssi_sum);
-
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_header, "Statistics_Rx - CCK:");
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "ina_cnt:",
-			 le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
-			 delta_cck->ina_cnt, max_cck->ina_cnt);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "fina_cnt:",
-			 le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
-			 delta_cck->fina_cnt, max_cck->fina_cnt);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "plcp_err:",
-			 le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
-			 delta_cck->plcp_err, max_cck->plcp_err);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "crc32_err:",
-			 le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
-			 delta_cck->crc32_err, max_cck->crc32_err);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "overrun_err:",
-			 le32_to_cpu(cck->overrun_err),
-			 accum_cck->overrun_err, delta_cck->overrun_err,
-			 max_cck->overrun_err);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "early_overrun_err:",
-			 le32_to_cpu(cck->early_overrun_err),
-			 accum_cck->early_overrun_err,
-			 delta_cck->early_overrun_err,
-			 max_cck->early_overrun_err);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "crc32_good:",
-			 le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
-			 delta_cck->crc32_good, max_cck->crc32_good);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "false_alarm_cnt:",
-			 le32_to_cpu(cck->false_alarm_cnt),
-			 accum_cck->false_alarm_cnt,
-			 delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "fina_sync_err_cnt:",
-			 le32_to_cpu(cck->fina_sync_err_cnt),
-			 accum_cck->fina_sync_err_cnt,
-			 delta_cck->fina_sync_err_cnt,
-			 max_cck->fina_sync_err_cnt);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "sfd_timeout:",
-			 le32_to_cpu(cck->sfd_timeout),
-			 accum_cck->sfd_timeout, delta_cck->sfd_timeout,
-			 max_cck->sfd_timeout);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "fina_timeout:",
-			 le32_to_cpu(cck->fina_timeout),
-			 accum_cck->fina_timeout, delta_cck->fina_timeout,
-			 max_cck->fina_timeout);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "unresponded_rts:",
-			 le32_to_cpu(cck->unresponded_rts),
-			 accum_cck->unresponded_rts, delta_cck->unresponded_rts,
-			 max_cck->unresponded_rts);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "rxe_frame_lmt_ovrun:",
-			 le32_to_cpu(cck->rxe_frame_limit_overrun),
-			 accum_cck->rxe_frame_limit_overrun,
-			 delta_cck->rxe_frame_limit_overrun,
-			 max_cck->rxe_frame_limit_overrun);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "sent_ack_cnt:",
-			 le32_to_cpu(cck->sent_ack_cnt),
-			 accum_cck->sent_ack_cnt, delta_cck->sent_ack_cnt,
-			 max_cck->sent_ack_cnt);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "sent_cts_cnt:",
-			 le32_to_cpu(cck->sent_cts_cnt),
-			 accum_cck->sent_cts_cnt, delta_cck->sent_cts_cnt,
-			 max_cck->sent_cts_cnt);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "sent_ba_rsp_cnt:",
-			 le32_to_cpu(cck->sent_ba_rsp_cnt),
-			 accum_cck->sent_ba_rsp_cnt,
-			 delta_cck->sent_ba_rsp_cnt,
-			 max_cck->sent_ba_rsp_cnt);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "dsp_self_kill:",
-			 le32_to_cpu(cck->dsp_self_kill),
-			 accum_cck->dsp_self_kill, delta_cck->dsp_self_kill,
-			 max_cck->dsp_self_kill);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "mh_format_err:",
-			 le32_to_cpu(cck->mh_format_err),
-			 accum_cck->mh_format_err, delta_cck->mh_format_err,
-			 max_cck->mh_format_err);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "re_acq_main_rssi_sum:",
-			 le32_to_cpu(cck->re_acq_main_rssi_sum),
-			 accum_cck->re_acq_main_rssi_sum,
-			 delta_cck->re_acq_main_rssi_sum,
-			 max_cck->re_acq_main_rssi_sum);
-
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_header, "Statistics_Rx - GENERAL:");
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "bogus_cts:",
-			 le32_to_cpu(general->bogus_cts),
-			 accum_general->bogus_cts, delta_general->bogus_cts,
-			 max_general->bogus_cts);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "bogus_ack:",
-			 le32_to_cpu(general->bogus_ack),
-			 accum_general->bogus_ack, delta_general->bogus_ack,
-			 max_general->bogus_ack);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "non_bssid_frames:",
-			 le32_to_cpu(general->non_bssid_frames),
-			 accum_general->non_bssid_frames,
-			 delta_general->non_bssid_frames,
-			 max_general->non_bssid_frames);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "filtered_frames:",
-			 le32_to_cpu(general->filtered_frames),
-			 accum_general->filtered_frames,
-			 delta_general->filtered_frames,
-			 max_general->filtered_frames);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "non_channel_beacons:",
-			 le32_to_cpu(general->non_channel_beacons),
-			 accum_general->non_channel_beacons,
-			 delta_general->non_channel_beacons,
-			 max_general->non_channel_beacons);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "channel_beacons:",
-			 le32_to_cpu(general->channel_beacons),
-			 accum_general->channel_beacons,
-			 delta_general->channel_beacons,
-			 max_general->channel_beacons);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "num_missed_bcon:",
-			 le32_to_cpu(general->num_missed_bcon),
-			 accum_general->num_missed_bcon,
-			 delta_general->num_missed_bcon,
-			 max_general->num_missed_bcon);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "adc_rx_saturation_time:",
-			 le32_to_cpu(general->adc_rx_saturation_time),
-			 accum_general->adc_rx_saturation_time,
-			 delta_general->adc_rx_saturation_time,
-			 max_general->adc_rx_saturation_time);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "ina_detect_search_tm:",
-			 le32_to_cpu(general->ina_detection_search_time),
-			 accum_general->ina_detection_search_time,
-			 delta_general->ina_detection_search_time,
-			 max_general->ina_detection_search_time);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "beacon_silence_rssi_a:",
-			 le32_to_cpu(general->beacon_silence_rssi_a),
-			 accum_general->beacon_silence_rssi_a,
-			 delta_general->beacon_silence_rssi_a,
-			 max_general->beacon_silence_rssi_a);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "beacon_silence_rssi_b:",
-			 le32_to_cpu(general->beacon_silence_rssi_b),
-			 accum_general->beacon_silence_rssi_b,
-			 delta_general->beacon_silence_rssi_b,
-			 max_general->beacon_silence_rssi_b);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "beacon_silence_rssi_c:",
-			 le32_to_cpu(general->beacon_silence_rssi_c),
-			 accum_general->beacon_silence_rssi_c,
-			 delta_general->beacon_silence_rssi_c,
-			 max_general->beacon_silence_rssi_c);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "interference_data_flag:",
-			 le32_to_cpu(general->interference_data_flag),
-			 accum_general->interference_data_flag,
-			 delta_general->interference_data_flag,
-			 max_general->interference_data_flag);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "channel_load:",
-			 le32_to_cpu(general->channel_load),
-			 accum_general->channel_load,
-			 delta_general->channel_load,
-			 max_general->channel_load);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "dsp_false_alarms:",
-			 le32_to_cpu(general->dsp_false_alarms),
-			 accum_general->dsp_false_alarms,
-			 delta_general->dsp_false_alarms,
-			 max_general->dsp_false_alarms);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "beacon_rssi_a:",
-			 le32_to_cpu(general->beacon_rssi_a),
-			 accum_general->beacon_rssi_a,
-			 delta_general->beacon_rssi_a,
-			 max_general->beacon_rssi_a);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "beacon_rssi_b:",
-			 le32_to_cpu(general->beacon_rssi_b),
-			 accum_general->beacon_rssi_b,
-			 delta_general->beacon_rssi_b,
-			 max_general->beacon_rssi_b);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "beacon_rssi_c:",
-			 le32_to_cpu(general->beacon_rssi_c),
-			 accum_general->beacon_rssi_c,
-			 delta_general->beacon_rssi_c,
-			 max_general->beacon_rssi_c);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "beacon_energy_a:",
-			 le32_to_cpu(general->beacon_energy_a),
-			 accum_general->beacon_energy_a,
-			 delta_general->beacon_energy_a,
-			 max_general->beacon_energy_a);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "beacon_energy_b:",
-			 le32_to_cpu(general->beacon_energy_b),
-			 accum_general->beacon_energy_b,
-			 delta_general->beacon_energy_b,
-			 max_general->beacon_energy_b);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "beacon_energy_c:",
-			 le32_to_cpu(general->beacon_energy_c),
-			 accum_general->beacon_energy_c,
-			 delta_general->beacon_energy_c,
-			 max_general->beacon_energy_c);
-
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_header, "Statistics_Rx - OFDM_HT:");
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "plcp_err:",
-			 le32_to_cpu(ht->plcp_err), accum_ht->plcp_err,
-			 delta_ht->plcp_err, max_ht->plcp_err);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "overrun_err:",
-			 le32_to_cpu(ht->overrun_err), accum_ht->overrun_err,
-			 delta_ht->overrun_err, max_ht->overrun_err);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "early_overrun_err:",
-			 le32_to_cpu(ht->early_overrun_err),
-			 accum_ht->early_overrun_err,
-			 delta_ht->early_overrun_err,
-			 max_ht->early_overrun_err);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "crc32_good:",
-			 le32_to_cpu(ht->crc32_good), accum_ht->crc32_good,
-			 delta_ht->crc32_good, max_ht->crc32_good);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "crc32_err:",
-			 le32_to_cpu(ht->crc32_err), accum_ht->crc32_err,
-			 delta_ht->crc32_err, max_ht->crc32_err);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "mh_format_err:",
-			 le32_to_cpu(ht->mh_format_err),
-			 accum_ht->mh_format_err,
-			 delta_ht->mh_format_err, max_ht->mh_format_err);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "agg_crc32_good:",
-			 le32_to_cpu(ht->agg_crc32_good),
-			 accum_ht->agg_crc32_good,
-			 delta_ht->agg_crc32_good, max_ht->agg_crc32_good);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "agg_mpdu_cnt:",
-			 le32_to_cpu(ht->agg_mpdu_cnt),
-			 accum_ht->agg_mpdu_cnt,
-			 delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "agg_cnt:",
-			 le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt,
-			 delta_ht->agg_cnt, max_ht->agg_cnt);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "unsupport_mcs:",
-			 le32_to_cpu(ht->unsupport_mcs),
-			 accum_ht->unsupport_mcs,
-			 delta_ht->unsupport_mcs, max_ht->unsupport_mcs);
-
-	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-	kfree(buf);
-	return ret;
-}
-
-ssize_t iwl4965_ucode_tx_stats_read(struct file *file,
-				char __user *user_buf,
-				size_t count, loff_t *ppos)
-{
-	struct iwl_priv *priv = file->private_data;
-	int pos = 0;
-	char *buf;
-	int bufsz = (sizeof(struct statistics_tx) * 48) + 250;
-	ssize_t ret;
-	struct statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
-
-	if (!iwl_legacy_is_alive(priv))
-		return -EAGAIN;
-
-	buf = kzalloc(bufsz, GFP_KERNEL);
-	if (!buf) {
-		IWL_ERR(priv, "Can not allocate Buffer\n");
-		return -ENOMEM;
-	}
-
-	/* the statistic information display here is based on
-	  * the last statistics notification from uCode
-	  * might not reflect the current uCode activity
-	  */
-	tx = &priv->_4965.statistics.tx;
-	accum_tx = &priv->_4965.accum_statistics.tx;
-	delta_tx = &priv->_4965.delta_statistics.tx;
-	max_tx = &priv->_4965.max_delta.tx;
-
-	pos += iwl4965_statistics_flag(priv, buf, bufsz);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_header, "Statistics_Tx:");
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "preamble:",
-			 le32_to_cpu(tx->preamble_cnt),
-			 accum_tx->preamble_cnt,
-			 delta_tx->preamble_cnt, max_tx->preamble_cnt);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "rx_detected_cnt:",
-			 le32_to_cpu(tx->rx_detected_cnt),
-			 accum_tx->rx_detected_cnt,
-			 delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "bt_prio_defer_cnt:",
-			 le32_to_cpu(tx->bt_prio_defer_cnt),
-			 accum_tx->bt_prio_defer_cnt,
-			 delta_tx->bt_prio_defer_cnt,
-			 max_tx->bt_prio_defer_cnt);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "bt_prio_kill_cnt:",
-			 le32_to_cpu(tx->bt_prio_kill_cnt),
-			 accum_tx->bt_prio_kill_cnt,
-			 delta_tx->bt_prio_kill_cnt,
-			 max_tx->bt_prio_kill_cnt);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "few_bytes_cnt:",
-			 le32_to_cpu(tx->few_bytes_cnt),
-			 accum_tx->few_bytes_cnt,
-			 delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "cts_timeout:",
-			 le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
-			 delta_tx->cts_timeout, max_tx->cts_timeout);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "ack_timeout:",
-			 le32_to_cpu(tx->ack_timeout),
-			 accum_tx->ack_timeout,
-			 delta_tx->ack_timeout, max_tx->ack_timeout);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "expected_ack_cnt:",
-			 le32_to_cpu(tx->expected_ack_cnt),
-			 accum_tx->expected_ack_cnt,
-			 delta_tx->expected_ack_cnt,
-			 max_tx->expected_ack_cnt);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "actual_ack_cnt:",
-			 le32_to_cpu(tx->actual_ack_cnt),
-			 accum_tx->actual_ack_cnt,
-			 delta_tx->actual_ack_cnt,
-			 max_tx->actual_ack_cnt);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "dump_msdu_cnt:",
-			 le32_to_cpu(tx->dump_msdu_cnt),
-			 accum_tx->dump_msdu_cnt,
-			 delta_tx->dump_msdu_cnt,
-			 max_tx->dump_msdu_cnt);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "abort_nxt_frame_mismatch:",
-			 le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt),
-			 accum_tx->burst_abort_next_frame_mismatch_cnt,
-			 delta_tx->burst_abort_next_frame_mismatch_cnt,
-			 max_tx->burst_abort_next_frame_mismatch_cnt);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "abort_missing_nxt_frame:",
-			 le32_to_cpu(tx->burst_abort_missing_next_frame_cnt),
-			 accum_tx->burst_abort_missing_next_frame_cnt,
-			 delta_tx->burst_abort_missing_next_frame_cnt,
-			 max_tx->burst_abort_missing_next_frame_cnt);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "cts_timeout_collision:",
-			 le32_to_cpu(tx->cts_timeout_collision),
-			 accum_tx->cts_timeout_collision,
-			 delta_tx->cts_timeout_collision,
-			 max_tx->cts_timeout_collision);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "ack_ba_timeout_collision:",
-			 le32_to_cpu(tx->ack_or_ba_timeout_collision),
-			 accum_tx->ack_or_ba_timeout_collision,
-			 delta_tx->ack_or_ba_timeout_collision,
-			 max_tx->ack_or_ba_timeout_collision);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "agg ba_timeout:",
-			 le32_to_cpu(tx->agg.ba_timeout),
-			 accum_tx->agg.ba_timeout,
-			 delta_tx->agg.ba_timeout,
-			 max_tx->agg.ba_timeout);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "agg ba_resched_frames:",
-			 le32_to_cpu(tx->agg.ba_reschedule_frames),
-			 accum_tx->agg.ba_reschedule_frames,
-			 delta_tx->agg.ba_reschedule_frames,
-			 max_tx->agg.ba_reschedule_frames);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "agg scd_query_agg_frame:",
-			 le32_to_cpu(tx->agg.scd_query_agg_frame_cnt),
-			 accum_tx->agg.scd_query_agg_frame_cnt,
-			 delta_tx->agg.scd_query_agg_frame_cnt,
-			 max_tx->agg.scd_query_agg_frame_cnt);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "agg scd_query_no_agg:",
-			 le32_to_cpu(tx->agg.scd_query_no_agg),
-			 accum_tx->agg.scd_query_no_agg,
-			 delta_tx->agg.scd_query_no_agg,
-			 max_tx->agg.scd_query_no_agg);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "agg scd_query_agg:",
-			 le32_to_cpu(tx->agg.scd_query_agg),
-			 accum_tx->agg.scd_query_agg,
-			 delta_tx->agg.scd_query_agg,
-			 max_tx->agg.scd_query_agg);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "agg scd_query_mismatch:",
-			 le32_to_cpu(tx->agg.scd_query_mismatch),
-			 accum_tx->agg.scd_query_mismatch,
-			 delta_tx->agg.scd_query_mismatch,
-			 max_tx->agg.scd_query_mismatch);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "agg frame_not_ready:",
-			 le32_to_cpu(tx->agg.frame_not_ready),
-			 accum_tx->agg.frame_not_ready,
-			 delta_tx->agg.frame_not_ready,
-			 max_tx->agg.frame_not_ready);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "agg underrun:",
-			 le32_to_cpu(tx->agg.underrun),
-			 accum_tx->agg.underrun,
-			 delta_tx->agg.underrun, max_tx->agg.underrun);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "agg bt_prio_kill:",
-			 le32_to_cpu(tx->agg.bt_prio_kill),
-			 accum_tx->agg.bt_prio_kill,
-			 delta_tx->agg.bt_prio_kill,
-			 max_tx->agg.bt_prio_kill);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "agg rx_ba_rsp_cnt:",
-			 le32_to_cpu(tx->agg.rx_ba_rsp_cnt),
-			 accum_tx->agg.rx_ba_rsp_cnt,
-			 delta_tx->agg.rx_ba_rsp_cnt,
-			 max_tx->agg.rx_ba_rsp_cnt);
-
-	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-	kfree(buf);
-	return ret;
-}
-
-ssize_t
-iwl4965_ucode_general_stats_read(struct file *file, char __user *user_buf,
-				     size_t count, loff_t *ppos)
-{
-	struct iwl_priv *priv = file->private_data;
-	int pos = 0;
-	char *buf;
-	int bufsz = sizeof(struct statistics_general) * 10 + 300;
-	ssize_t ret;
-	struct statistics_general_common *general, *accum_general;
-	struct statistics_general_common *delta_general, *max_general;
-	struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
-	struct statistics_div *div, *accum_div, *delta_div, *max_div;
-
-	if (!iwl_legacy_is_alive(priv))
-		return -EAGAIN;
-
-	buf = kzalloc(bufsz, GFP_KERNEL);
-	if (!buf) {
-		IWL_ERR(priv, "Can not allocate Buffer\n");
-		return -ENOMEM;
-	}
-
-	/* the statistic information display here is based on
-	  * the last statistics notification from uCode
-	  * might not reflect the current uCode activity
-	  */
-	general = &priv->_4965.statistics.general.common;
-	dbg = &priv->_4965.statistics.general.common.dbg;
-	div = &priv->_4965.statistics.general.common.div;
-	accum_general = &priv->_4965.accum_statistics.general.common;
-	accum_dbg = &priv->_4965.accum_statistics.general.common.dbg;
-	accum_div = &priv->_4965.accum_statistics.general.common.div;
-	delta_general = &priv->_4965.delta_statistics.general.common;
-	max_general = &priv->_4965.max_delta.general.common;
-	delta_dbg = &priv->_4965.delta_statistics.general.common.dbg;
-	max_dbg = &priv->_4965.max_delta.general.common.dbg;
-	delta_div = &priv->_4965.delta_statistics.general.common.div;
-	max_div = &priv->_4965.max_delta.general.common.div;
-
-	pos += iwl4965_statistics_flag(priv, buf, bufsz);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_header, "Statistics_General:");
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_value, "temperature:",
-			 le32_to_cpu(general->temperature));
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_value, "ttl_timestamp:",
-			 le32_to_cpu(general->ttl_timestamp));
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "burst_check:",
-			 le32_to_cpu(dbg->burst_check),
-			 accum_dbg->burst_check,
-			 delta_dbg->burst_check, max_dbg->burst_check);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "burst_count:",
-			 le32_to_cpu(dbg->burst_count),
-			 accum_dbg->burst_count,
-			 delta_dbg->burst_count, max_dbg->burst_count);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "wait_for_silence_timeout_count:",
-			 le32_to_cpu(dbg->wait_for_silence_timeout_cnt),
-			 accum_dbg->wait_for_silence_timeout_cnt,
-			 delta_dbg->wait_for_silence_timeout_cnt,
-			 max_dbg->wait_for_silence_timeout_cnt);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "sleep_time:",
-			 le32_to_cpu(general->sleep_time),
-			 accum_general->sleep_time,
-			 delta_general->sleep_time, max_general->sleep_time);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "slots_out:",
-			 le32_to_cpu(general->slots_out),
-			 accum_general->slots_out,
-			 delta_general->slots_out, max_general->slots_out);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "slots_idle:",
-			 le32_to_cpu(general->slots_idle),
-			 accum_general->slots_idle,
-			 delta_general->slots_idle, max_general->slots_idle);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "tx_on_a:",
-			 le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
-			 delta_div->tx_on_a, max_div->tx_on_a);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "tx_on_b:",
-			 le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
-			 delta_div->tx_on_b, max_div->tx_on_b);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "exec_time:",
-			 le32_to_cpu(div->exec_time), accum_div->exec_time,
-			 delta_div->exec_time, max_div->exec_time);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "probe_time:",
-			 le32_to_cpu(div->probe_time), accum_div->probe_time,
-			 delta_div->probe_time, max_div->probe_time);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "rx_enable_counter:",
-			 le32_to_cpu(general->rx_enable_counter),
-			 accum_general->rx_enable_counter,
-			 delta_general->rx_enable_counter,
-			 max_general->rx_enable_counter);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			 fmt_table, "num_of_sos_states:",
-			 le32_to_cpu(general->num_of_sos_states),
-			 accum_general->num_of_sos_states,
-			 delta_general->num_of_sos_states,
-			 max_general->num_of_sos_states);
-	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-	kfree(buf);
-	return ret;
-}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h b/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h
deleted file mode 100644
index 6c8e353..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *****************************************************************************/
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-debug.h"
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
-ssize_t iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
-				size_t count, loff_t *ppos);
-ssize_t iwl4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
-				size_t count, loff_t *ppos);
-ssize_t iwl4965_ucode_general_stats_read(struct file *file,
-			char __user *user_buf, size_t count, loff_t *ppos);
-#else
-static ssize_t
-iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
-				       size_t count, loff_t *ppos)
-{
-	return 0;
-}
-static ssize_t
-iwl4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
-				       size_t count, loff_t *ppos)
-{
-	return 0;
-}
-static ssize_t
-iwl4965_ucode_general_stats_read(struct file *file, char __user *user_buf,
-					    size_t count, loff_t *ppos)
-{
-	return 0;
-}
-#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c b/drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c
deleted file mode 100644
index cb9baab..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c
+++ /dev/null
@@ -1,154 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license.  When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- *  * Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *  * Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *  * Neither the name Intel Corporation nor the names of its
- *    contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *****************************************************************************/
-
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-
-#include <net/mac80211.h>
-
-#include "iwl-commands.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-debug.h"
-#include "iwl-4965.h"
-#include "iwl-io.h"
-
-/******************************************************************************
- *
- * EEPROM related functions
- *
-******************************************************************************/
-
-/*
- * The device's EEPROM semaphore prevents conflicts between driver and uCode
- * when accessing the EEPROM; each access is a series of pulses to/from the
- * EEPROM chip, not a single event, so even reads could conflict if they
- * weren't arbitrated by the semaphore.
- */
-int iwl4965_eeprom_acquire_semaphore(struct iwl_priv *priv)
-{
-	u16 count;
-	int ret;
-
-	for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
-		/* Request semaphore */
-		iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
-			    CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
-
-		/* See if we got it */
-		ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
-				CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
-				CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
-				EEPROM_SEM_TIMEOUT);
-		if (ret >= 0) {
-			IWL_DEBUG_IO(priv,
-				"Acquired semaphore after %d tries.\n",
-				count+1);
-			return ret;
-		}
-	}
-
-	return ret;
-}
-
-void iwl4965_eeprom_release_semaphore(struct iwl_priv *priv)
-{
-	iwl_legacy_clear_bit(priv, CSR_HW_IF_CONFIG_REG,
-		CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
-
-}
-
-int iwl4965_eeprom_check_version(struct iwl_priv *priv)
-{
-	u16 eeprom_ver;
-	u16 calib_ver;
-
-	eeprom_ver = iwl_legacy_eeprom_query16(priv, EEPROM_VERSION);
-	calib_ver = iwl_legacy_eeprom_query16(priv,
-			EEPROM_4965_CALIB_VERSION_OFFSET);
-
-	if (eeprom_ver < priv->cfg->eeprom_ver ||
-	    calib_ver < priv->cfg->eeprom_calib_ver)
-		goto err;
-
-	IWL_INFO(priv, "device EEPROM VER=0x%x, CALIB=0x%x\n",
-		 eeprom_ver, calib_ver);
-
-	return 0;
-err:
-	IWL_ERR(priv, "Unsupported (too old) EEPROM VER=0x%x < 0x%x "
-		  "CALIB=0x%x < 0x%x\n",
-		  eeprom_ver, priv->cfg->eeprom_ver,
-		  calib_ver,  priv->cfg->eeprom_calib_ver);
-	return -EINVAL;
-
-}
-
-void iwl4965_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac)
-{
-	const u8 *addr = iwl_legacy_eeprom_query_addr(priv,
-					EEPROM_MAC_ADDRESS);
-	memcpy(mac, addr, ETH_ALEN);
-}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-hw.h b/drivers/net/wireless/iwlegacy/iwl-4965-hw.h
deleted file mode 100644
index fc6fa28..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-hw.h
+++ /dev/null
@@ -1,811 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license.  When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- *  * Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *  * Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *  * Neither the name Intel Corporation nor the names of its
- *    contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-/*
- * Please use this file (iwl-4965-hw.h) only for hardware-related definitions.
- * Use iwl-commands.h for uCode API definitions.
- * Use iwl-dev.h for driver implementation definitions.
- */
-
-#ifndef __iwl_4965_hw_h__
-#define __iwl_4965_hw_h__
-
-#include "iwl-fh.h"
-
-/* EEPROM */
-#define IWL4965_EEPROM_IMG_SIZE			1024
-
-/*
- * uCode queue management definitions ...
- * The first queue used for block-ack aggregation is #7 (4965 only).
- * All block-ack aggregation queues should map to Tx DMA/FIFO channel 7.
- */
-#define IWL49_FIRST_AMPDU_QUEUE	7
-
-/* Sizes and addresses for instruction and data memory (SRAM) in
- * 4965's embedded processor.  Driver access is via HBUS_TARG_MEM_* regs. */
-#define IWL49_RTC_INST_LOWER_BOUND		(0x000000)
-#define IWL49_RTC_INST_UPPER_BOUND		(0x018000)
-
-#define IWL49_RTC_DATA_LOWER_BOUND		(0x800000)
-#define IWL49_RTC_DATA_UPPER_BOUND		(0x80A000)
-
-#define IWL49_RTC_INST_SIZE  (IWL49_RTC_INST_UPPER_BOUND - \
-				IWL49_RTC_INST_LOWER_BOUND)
-#define IWL49_RTC_DATA_SIZE  (IWL49_RTC_DATA_UPPER_BOUND - \
-				IWL49_RTC_DATA_LOWER_BOUND)
-
-#define IWL49_MAX_INST_SIZE IWL49_RTC_INST_SIZE
-#define IWL49_MAX_DATA_SIZE IWL49_RTC_DATA_SIZE
-
-/* Size of uCode instruction memory in bootstrap state machine */
-#define IWL49_MAX_BSM_SIZE BSM_SRAM_SIZE
-
-static inline int iwl4965_hw_valid_rtc_data_addr(u32 addr)
-{
-	return (addr >= IWL49_RTC_DATA_LOWER_BOUND) &&
-	       (addr < IWL49_RTC_DATA_UPPER_BOUND);
-}
-
-/********************* START TEMPERATURE *************************************/
-
-/**
- * 4965 temperature calculation.
- *
- * The driver must calculate the device temperature before calculating
- * a txpower setting (amplifier gain is temperature dependent).  The
- * calculation uses 4 measurements, 3 of which (R1, R2, R3) are calibration
- * values used for the life of the driver, and one of which (R4) is the
- * real-time temperature indicator.
- *
- * uCode provides all 4 values to the driver via the "initialize alive"
- * notification (see struct iwl4965_init_alive_resp).  After the runtime uCode
- * image loads, uCode updates the R4 value via statistics notifications
- * (see STATISTICS_NOTIFICATION), which occur after each received beacon
- * when associated, or can be requested via REPLY_STATISTICS_CMD.
- *
- * NOTE:  uCode provides the R4 value as a 23-bit signed value.  Driver
- *        must sign-extend to 32 bits before applying formula below.
- *
- * Formula:
- *
- * degrees Kelvin = ((97 * 259 * (R4 - R2) / (R3 - R1)) / 100) + 8
- *
- * NOTE:  The basic formula is 259 * (R4-R2) / (R3-R1).  The 97/100 is
- * an additional correction, which should be centered around 0 degrees
- * Celsius (273 degrees Kelvin).  The 8 (3 percent of 273) compensates for
- * centering the 97/100 correction around 0 degrees K.
- *
- * Add 273 to Kelvin value to find degrees Celsius, for comparing current
- * temperature with factory-measured temperatures when calculating txpower
- * settings.
- */
-#define TEMPERATURE_CALIB_KELVIN_OFFSET 8
-#define TEMPERATURE_CALIB_A_VAL 259
-
-/* Limit range of calculated temperature to be between these Kelvin values */
-#define IWL_TX_POWER_TEMPERATURE_MIN  (263)
-#define IWL_TX_POWER_TEMPERATURE_MAX  (410)
-
-#define IWL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(t) \
-	(((t) < IWL_TX_POWER_TEMPERATURE_MIN) || \
-	 ((t) > IWL_TX_POWER_TEMPERATURE_MAX))
-
-/********************* END TEMPERATURE ***************************************/
-
-/********************* START TXPOWER *****************************************/
-
-/**
- * 4965 txpower calculations rely on information from three sources:
- *
- *     1) EEPROM
- *     2) "initialize" alive notification
- *     3) statistics notifications
- *
- * EEPROM data consists of:
- *
- * 1)  Regulatory information (max txpower and channel usage flags) is provided
- *     separately for each channel that can possibly supported by 4965.
- *     40 MHz wide (.11n HT40) channels are listed separately from 20 MHz
- *     (legacy) channels.
- *
- *     See struct iwl4965_eeprom_channel for format, and struct iwl4965_eeprom
- *     for locations in EEPROM.
- *
- * 2)  Factory txpower calibration information is provided separately for
- *     sub-bands of contiguous channels.  2.4GHz has just one sub-band,
- *     but 5 GHz has several sub-bands.
- *
- *     In addition, per-band (2.4 and 5 Ghz) saturation txpowers are provided.
- *
- *     See struct iwl4965_eeprom_calib_info (and the tree of structures
- *     contained within it) for format, and struct iwl4965_eeprom for
- *     locations in EEPROM.
- *
- * "Initialization alive" notification (see struct iwl4965_init_alive_resp)
- * consists of:
- *
- * 1)  Temperature calculation parameters.
- *
- * 2)  Power supply voltage measurement.
- *
- * 3)  Tx gain compensation to balance 2 transmitters for MIMO use.
- *
- * Statistics notifications deliver:
- *
- * 1)  Current values for temperature param R4.
- */
-
-/**
- * To calculate a txpower setting for a given desired target txpower, channel,
- * modulation bit rate, and transmitter chain (4965 has 2 transmitters to
- * support MIMO and transmit diversity), driver must do the following:
- *
- * 1)  Compare desired txpower vs. (EEPROM) regulatory limit for this channel.
- *     Do not exceed regulatory limit; reduce target txpower if necessary.
- *
- *     If setting up txpowers for MIMO rates (rate indexes 8-15, 24-31),
- *     2 transmitters will be used simultaneously; driver must reduce the
- *     regulatory limit by 3 dB (half-power) for each transmitter, so the
- *     combined total output of the 2 transmitters is within regulatory limits.
- *
- *
- * 2)  Compare target txpower vs. (EEPROM) saturation txpower *reduced by
- *     backoff for this bit rate*.  Do not exceed (saturation - backoff[rate]);
- *     reduce target txpower if necessary.
- *
- *     Backoff values below are in 1/2 dB units (equivalent to steps in
- *     txpower gain tables):
- *
- *     OFDM 6 - 36 MBit:  10 steps (5 dB)
- *     OFDM 48 MBit:      15 steps (7.5 dB)
- *     OFDM 54 MBit:      17 steps (8.5 dB)
- *     OFDM 60 MBit:      20 steps (10 dB)
- *     CCK all rates:     10 steps (5 dB)
- *
- *     Backoff values apply to saturation txpower on a per-transmitter basis;
- *     when using MIMO (2 transmitters), each transmitter uses the same
- *     saturation level provided in EEPROM, and the same backoff values;
- *     no reduction (such as with regulatory txpower limits) is required.
- *
- *     Saturation and Backoff values apply equally to 20 Mhz (legacy) channel
- *     widths and 40 Mhz (.11n HT40) channel widths; there is no separate
- *     factory measurement for ht40 channels.
- *
- *     The result of this step is the final target txpower.  The rest of
- *     the steps figure out the proper settings for the device to achieve
- *     that target txpower.
- *
- *
- * 3)  Determine (EEPROM) calibration sub band for the target channel, by
- *     comparing against first and last channels in each sub band
- *     (see struct iwl4965_eeprom_calib_subband_info).
- *
- *
- * 4)  Linearly interpolate (EEPROM) factory calibration measurement sets,
- *     referencing the 2 factory-measured (sample) channels within the sub band.
- *
- *     Interpolation is based on difference between target channel's frequency
- *     and the sample channels' frequencies.  Since channel numbers are based
- *     on frequency (5 MHz between each channel number), this is equivalent
- *     to interpolating based on channel number differences.
- *
- *     Note that the sample channels may or may not be the channels at the
- *     edges of the sub band.  The target channel may be "outside" of the
- *     span of the sampled channels.
- *
- *     Driver may choose the pair (for 2 Tx chains) of measurements (see
- *     struct iwl4965_eeprom_calib_ch_info) for which the actual measured
- *     txpower comes closest to the desired txpower.  Usually, though,
- *     the middle set of measurements is closest to the regulatory limits,
- *     and is therefore a good choice for all txpower calculations (this
- *     assumes that high accuracy is needed for maximizing legal txpower,
- *     while lower txpower configurations do not need as much accuracy).
- *
- *     Driver should interpolate both members of the chosen measurement pair,
- *     i.e. for both Tx chains (radio transmitters), unless the driver knows
- *     that only one of the chains will be used (e.g. only one tx antenna
- *     connected, but this should be unusual).  The rate scaling algorithm
- *     switches antennas to find best performance, so both Tx chains will
- *     be used (although only one at a time) even for non-MIMO transmissions.
- *
- *     Driver should interpolate factory values for temperature, gain table
- *     index, and actual power.  The power amplifier detector values are
- *     not used by the driver.
- *
- *     Sanity check:  If the target channel happens to be one of the sample
- *     channels, the results should agree with the sample channel's
- *     measurements!
- *
- *
- * 5)  Find difference between desired txpower and (interpolated)
- *     factory-measured txpower.  Using (interpolated) factory gain table index
- *     (shown elsewhere) as a starting point, adjust this index lower to
- *     increase txpower, or higher to decrease txpower, until the target
- *     txpower is reached.  Each step in the gain table is 1/2 dB.
- *
- *     For example, if factory measured txpower is 16 dBm, and target txpower
- *     is 13 dBm, add 6 steps to the factory gain index to reduce txpower
- *     by 3 dB.
- *
- *
- * 6)  Find difference between current device temperature and (interpolated)
- *     factory-measured temperature for sub-band.  Factory values are in
- *     degrees Celsius.  To calculate current temperature, see comments for
- *     "4965 temperature calculation".
- *
- *     If current temperature is higher than factory temperature, driver must
- *     increase gain (lower gain table index), and vice verse.
- *
- *     Temperature affects gain differently for different channels:
- *
- *     2.4 GHz all channels:  3.5 degrees per half-dB step
- *     5 GHz channels 34-43:  4.5 degrees per half-dB step
- *     5 GHz channels >= 44:  4.0 degrees per half-dB step
- *
- *     NOTE:  Temperature can increase rapidly when transmitting, especially
- *            with heavy traffic at high txpowers.  Driver should update
- *            temperature calculations often under these conditions to
- *            maintain strong txpower in the face of rising temperature.
- *
- *
- * 7)  Find difference between current power supply voltage indicator
- *     (from "initialize alive") and factory-measured power supply voltage
- *     indicator (EEPROM).
- *
- *     If the current voltage is higher (indicator is lower) than factory
- *     voltage, gain should be reduced (gain table index increased) by:
- *
- *     (eeprom - current) / 7
- *
- *     If the current voltage is lower (indicator is higher) than factory
- *     voltage, gain should be increased (gain table index decreased) by:
- *
- *     2 * (current - eeprom) / 7
- *
- *     If number of index steps in either direction turns out to be > 2,
- *     something is wrong ... just use 0.
- *
- *     NOTE:  Voltage compensation is independent of band/channel.
- *
- *     NOTE:  "Initialize" uCode measures current voltage, which is assumed
- *            to be constant after this initial measurement.  Voltage
- *            compensation for txpower (number of steps in gain table)
- *            may be calculated once and used until the next uCode bootload.
- *
- *
- * 8)  If setting up txpowers for MIMO rates (rate indexes 8-15, 24-31),
- *     adjust txpower for each transmitter chain, so txpower is balanced
- *     between the two chains.  There are 5 pairs of tx_atten[group][chain]
- *     values in "initialize alive", one pair for each of 5 channel ranges:
- *
- *     Group 0:  5 GHz channel 34-43
- *     Group 1:  5 GHz channel 44-70
- *     Group 2:  5 GHz channel 71-124
- *     Group 3:  5 GHz channel 125-200
- *     Group 4:  2.4 GHz all channels
- *
- *     Add the tx_atten[group][chain] value to the index for the target chain.
- *     The values are signed, but are in pairs of 0 and a non-negative number,
- *     so as to reduce gain (if necessary) of the "hotter" channel.  This
- *     avoids any need to double-check for regulatory compliance after
- *     this step.
- *
- *
- * 9)  If setting up for a CCK rate, lower the gain by adding a CCK compensation
- *     value to the index:
- *
- *     Hardware rev B:  9 steps (4.5 dB)
- *     Hardware rev C:  5 steps (2.5 dB)
- *
- *     Hardware rev for 4965 can be determined by reading CSR_HW_REV_WA_REG,
- *     bits [3:2], 1 = B, 2 = C.
- *
- *     NOTE:  This compensation is in addition to any saturation backoff that
- *            might have been applied in an earlier step.
- *
- *
- * 10) Select the gain table, based on band (2.4 vs 5 GHz).
- *
- *     Limit the adjusted index to stay within the table!
- *
- *
- * 11) Read gain table entries for DSP and radio gain, place into appropriate
- *     location(s) in command (struct iwl4965_txpowertable_cmd).
- */
-
-/**
- * When MIMO is used (2 transmitters operating simultaneously), driver should
- * limit each transmitter to deliver a max of 3 dB below the regulatory limit
- * for the device.  That is, use half power for each transmitter, so total
- * txpower is within regulatory limits.
- *
- * The value "6" represents number of steps in gain table to reduce power 3 dB.
- * Each step is 1/2 dB.
- */
-#define IWL_TX_POWER_MIMO_REGULATORY_COMPENSATION (6)
-
-/**
- * CCK gain compensation.
- *
- * When calculating txpowers for CCK, after making sure that the target power
- * is within regulatory and saturation limits, driver must additionally
- * back off gain by adding these values to the gain table index.
- *
- * Hardware rev for 4965 can be determined by reading CSR_HW_REV_WA_REG,
- * bits [3:2], 1 = B, 2 = C.
- */
-#define IWL_TX_POWER_CCK_COMPENSATION_B_STEP (9)
-#define IWL_TX_POWER_CCK_COMPENSATION_C_STEP (5)
-
-/*
- * 4965 power supply voltage compensation for txpower
- */
-#define TX_POWER_IWL_VOLTAGE_CODES_PER_03V   (7)
-
-/**
- * Gain tables.
- *
- * The following tables contain pair of values for setting txpower, i.e.
- * gain settings for the output of the device's digital signal processor (DSP),
- * and for the analog gain structure of the transmitter.
- *
- * Each entry in the gain tables represents a step of 1/2 dB.  Note that these
- * are *relative* steps, not indications of absolute output power.  Output
- * power varies with temperature, voltage, and channel frequency, and also
- * requires consideration of average power (to satisfy regulatory constraints),
- * and peak power (to avoid distortion of the output signal).
- *
- * Each entry contains two values:
- * 1)  DSP gain (or sometimes called DSP attenuation).  This is a fine-grained
- *     linear value that multiplies the output of the digital signal processor,
- *     before being sent to the analog radio.
- * 2)  Radio gain.  This sets the analog gain of the radio Tx path.
- *     It is a coarser setting, and behaves in a logarithmic (dB) fashion.
- *
- * EEPROM contains factory calibration data for txpower.  This maps actual
- * measured txpower levels to gain settings in the "well known" tables
- * below ("well-known" means here that both factory calibration *and* the
- * driver work with the same table).
- *
- * There are separate tables for 2.4 GHz and 5 GHz bands.  The 5 GHz table
- * has an extension (into negative indexes), in case the driver needs to
- * boost power setting for high device temperatures (higher than would be
- * present during factory calibration).  A 5 Ghz EEPROM index of "40"
- * corresponds to the 49th entry in the table used by the driver.
- */
-#define MIN_TX_GAIN_INDEX		(0)  /* highest gain, lowest idx, 2.4 */
-#define MIN_TX_GAIN_INDEX_52GHZ_EXT	(-9) /* highest gain, lowest idx, 5 */
-
-/**
- * 2.4 GHz gain table
- *
- * Index    Dsp gain   Radio gain
- *   0        110         0x3f      (highest gain)
- *   1        104         0x3f
- *   2         98         0x3f
- *   3        110         0x3e
- *   4        104         0x3e
- *   5         98         0x3e
- *   6        110         0x3d
- *   7        104         0x3d
- *   8         98         0x3d
- *   9        110         0x3c
- *  10        104         0x3c
- *  11         98         0x3c
- *  12        110         0x3b
- *  13        104         0x3b
- *  14         98         0x3b
- *  15        110         0x3a
- *  16        104         0x3a
- *  17         98         0x3a
- *  18        110         0x39
- *  19        104         0x39
- *  20         98         0x39
- *  21        110         0x38
- *  22        104         0x38
- *  23         98         0x38
- *  24        110         0x37
- *  25        104         0x37
- *  26         98         0x37
- *  27        110         0x36
- *  28        104         0x36
- *  29         98         0x36
- *  30        110         0x35
- *  31        104         0x35
- *  32         98         0x35
- *  33        110         0x34
- *  34        104         0x34
- *  35         98         0x34
- *  36        110         0x33
- *  37        104         0x33
- *  38         98         0x33
- *  39        110         0x32
- *  40        104         0x32
- *  41         98         0x32
- *  42        110         0x31
- *  43        104         0x31
- *  44         98         0x31
- *  45        110         0x30
- *  46        104         0x30
- *  47         98         0x30
- *  48        110          0x6
- *  49        104          0x6
- *  50         98          0x6
- *  51        110          0x5
- *  52        104          0x5
- *  53         98          0x5
- *  54        110          0x4
- *  55        104          0x4
- *  56         98          0x4
- *  57        110          0x3
- *  58        104          0x3
- *  59         98          0x3
- *  60        110          0x2
- *  61        104          0x2
- *  62         98          0x2
- *  63        110          0x1
- *  64        104          0x1
- *  65         98          0x1
- *  66        110          0x0
- *  67        104          0x0
- *  68         98          0x0
- *  69         97            0
- *  70         96            0
- *  71         95            0
- *  72         94            0
- *  73         93            0
- *  74         92            0
- *  75         91            0
- *  76         90            0
- *  77         89            0
- *  78         88            0
- *  79         87            0
- *  80         86            0
- *  81         85            0
- *  82         84            0
- *  83         83            0
- *  84         82            0
- *  85         81            0
- *  86         80            0
- *  87         79            0
- *  88         78            0
- *  89         77            0
- *  90         76            0
- *  91         75            0
- *  92         74            0
- *  93         73            0
- *  94         72            0
- *  95         71            0
- *  96         70            0
- *  97         69            0
- *  98         68            0
- */
-
-/**
- * 5 GHz gain table
- *
- * Index    Dsp gain   Radio gain
- *  -9 	      123         0x3F      (highest gain)
- *  -8 	      117         0x3F
- *  -7        110         0x3F
- *  -6        104         0x3F
- *  -5         98         0x3F
- *  -4        110         0x3E
- *  -3        104         0x3E
- *  -2         98         0x3E
- *  -1        110         0x3D
- *   0        104         0x3D
- *   1         98         0x3D
- *   2        110         0x3C
- *   3        104         0x3C
- *   4         98         0x3C
- *   5        110         0x3B
- *   6        104         0x3B
- *   7         98         0x3B
- *   8        110         0x3A
- *   9        104         0x3A
- *  10         98         0x3A
- *  11        110         0x39
- *  12        104         0x39
- *  13         98         0x39
- *  14        110         0x38
- *  15        104         0x38
- *  16         98         0x38
- *  17        110         0x37
- *  18        104         0x37
- *  19         98         0x37
- *  20        110         0x36
- *  21        104         0x36
- *  22         98         0x36
- *  23        110         0x35
- *  24        104         0x35
- *  25         98         0x35
- *  26        110         0x34
- *  27        104         0x34
- *  28         98         0x34
- *  29        110         0x33
- *  30        104         0x33
- *  31         98         0x33
- *  32        110         0x32
- *  33        104         0x32
- *  34         98         0x32
- *  35        110         0x31
- *  36        104         0x31
- *  37         98         0x31
- *  38        110         0x30
- *  39        104         0x30
- *  40         98         0x30
- *  41        110         0x25
- *  42        104         0x25
- *  43         98         0x25
- *  44        110         0x24
- *  45        104         0x24
- *  46         98         0x24
- *  47        110         0x23
- *  48        104         0x23
- *  49         98         0x23
- *  50        110         0x22
- *  51        104         0x18
- *  52         98         0x18
- *  53        110         0x17
- *  54        104         0x17
- *  55         98         0x17
- *  56        110         0x16
- *  57        104         0x16
- *  58         98         0x16
- *  59        110         0x15
- *  60        104         0x15
- *  61         98         0x15
- *  62        110         0x14
- *  63        104         0x14
- *  64         98         0x14
- *  65        110         0x13
- *  66        104         0x13
- *  67         98         0x13
- *  68        110         0x12
- *  69        104         0x08
- *  70         98         0x08
- *  71        110         0x07
- *  72        104         0x07
- *  73         98         0x07
- *  74        110         0x06
- *  75        104         0x06
- *  76         98         0x06
- *  77        110         0x05
- *  78        104         0x05
- *  79         98         0x05
- *  80        110         0x04
- *  81        104         0x04
- *  82         98         0x04
- *  83        110         0x03
- *  84        104         0x03
- *  85         98         0x03
- *  86        110         0x02
- *  87        104         0x02
- *  88         98         0x02
- *  89        110         0x01
- *  90        104         0x01
- *  91         98         0x01
- *  92        110         0x00
- *  93        104         0x00
- *  94         98         0x00
- *  95         93         0x00
- *  96         88         0x00
- *  97         83         0x00
- *  98         78         0x00
- */
-
-
-/**
- * Sanity checks and default values for EEPROM regulatory levels.
- * If EEPROM values fall outside MIN/MAX range, use default values.
- *
- * Regulatory limits refer to the maximum average txpower allowed by
- * regulatory agencies in the geographies in which the device is meant
- * to be operated.  These limits are SKU-specific (i.e. geography-specific),
- * and channel-specific; each channel has an individual regulatory limit
- * listed in the EEPROM.
- *
- * Units are in half-dBm (i.e. "34" means 17 dBm).
- */
-#define IWL_TX_POWER_DEFAULT_REGULATORY_24   (34)
-#define IWL_TX_POWER_DEFAULT_REGULATORY_52   (34)
-#define IWL_TX_POWER_REGULATORY_MIN          (0)
-#define IWL_TX_POWER_REGULATORY_MAX          (34)
-
-/**
- * Sanity checks and default values for EEPROM saturation levels.
- * If EEPROM values fall outside MIN/MAX range, use default values.
- *
- * Saturation is the highest level that the output power amplifier can produce
- * without significant clipping distortion.  This is a "peak" power level.
- * Different types of modulation (i.e. various "rates", and OFDM vs. CCK)
- * require differing amounts of backoff, relative to their average power output,
- * in order to avoid clipping distortion.
- *
- * Driver must make sure that it is violating neither the saturation limit,
- * nor the regulatory limit, when calculating Tx power settings for various
- * rates.
- *
- * Units are in half-dBm (i.e. "38" means 19 dBm).
- */
-#define IWL_TX_POWER_DEFAULT_SATURATION_24   (38)
-#define IWL_TX_POWER_DEFAULT_SATURATION_52   (38)
-#define IWL_TX_POWER_SATURATION_MIN          (20)
-#define IWL_TX_POWER_SATURATION_MAX          (50)
-
-/**
- * Channel groups used for Tx Attenuation calibration (MIMO tx channel balance)
- * and thermal Txpower calibration.
- *
- * When calculating txpower, driver must compensate for current device
- * temperature; higher temperature requires higher gain.  Driver must calculate
- * current temperature (see "4965 temperature calculation"), then compare vs.
- * factory calibration temperature in EEPROM; if current temperature is higher
- * than factory temperature, driver must *increase* gain by proportions shown
- * in table below.  If current temperature is lower than factory, driver must
- * *decrease* gain.
- *
- * Different frequency ranges require different compensation, as shown below.
- */
-/* Group 0, 5.2 GHz ch 34-43:  4.5 degrees per 1/2 dB. */
-#define CALIB_IWL_TX_ATTEN_GR1_FCH 34
-#define CALIB_IWL_TX_ATTEN_GR1_LCH 43
-
-/* Group 1, 5.3 GHz ch 44-70:  4.0 degrees per 1/2 dB. */
-#define CALIB_IWL_TX_ATTEN_GR2_FCH 44
-#define CALIB_IWL_TX_ATTEN_GR2_LCH 70
-
-/* Group 2, 5.5 GHz ch 71-124:  4.0 degrees per 1/2 dB. */
-#define CALIB_IWL_TX_ATTEN_GR3_FCH 71
-#define CALIB_IWL_TX_ATTEN_GR3_LCH 124
-
-/* Group 3, 5.7 GHz ch 125-200:  4.0 degrees per 1/2 dB. */
-#define CALIB_IWL_TX_ATTEN_GR4_FCH 125
-#define CALIB_IWL_TX_ATTEN_GR4_LCH 200
-
-/* Group 4, 2.4 GHz all channels:  3.5 degrees per 1/2 dB. */
-#define CALIB_IWL_TX_ATTEN_GR5_FCH 1
-#define CALIB_IWL_TX_ATTEN_GR5_LCH 20
-
-enum {
-	CALIB_CH_GROUP_1 = 0,
-	CALIB_CH_GROUP_2 = 1,
-	CALIB_CH_GROUP_3 = 2,
-	CALIB_CH_GROUP_4 = 3,
-	CALIB_CH_GROUP_5 = 4,
-	CALIB_CH_GROUP_MAX
-};
-
-/********************* END TXPOWER *****************************************/
-
-
-/**
- * Tx/Rx Queues
- *
- * Most communication between driver and 4965 is via queues of data buffers.
- * For example, all commands that the driver issues to device's embedded
- * controller (uCode) are via the command queue (one of the Tx queues).  All
- * uCode command responses/replies/notifications, including Rx frames, are
- * conveyed from uCode to driver via the Rx queue.
- *
- * Most support for these queues, including handshake support, resides in
- * structures in host DRAM, shared between the driver and the device.  When
- * allocating this memory, the driver must make sure that data written by
- * the host CPU updates DRAM immediately (and does not get "stuck" in CPU's
- * cache memory), so DRAM and cache are consistent, and the device can
- * immediately see changes made by the driver.
- *
- * 4965 supports up to 16 DRAM-based Tx queues, and services these queues via
- * up to 7 DMA channels (FIFOs).  Each Tx queue is supported by a circular array
- * in DRAM containing 256 Transmit Frame Descriptors (TFDs).
- */
-#define IWL49_NUM_FIFOS 	7
-#define IWL49_CMD_FIFO_NUM	4
-#define IWL49_NUM_QUEUES	16
-#define IWL49_NUM_AMPDU_QUEUES	8
-
-
-/**
- * struct iwl4965_schedq_bc_tbl
- *
- * Byte Count table
- *
- * Each Tx queue uses a byte-count table containing 320 entries:
- * one 16-bit entry for each of 256 TFDs, plus an additional 64 entries that
- * duplicate the first 64 entries (to avoid wrap-around within a Tx window;
- * max Tx window is 64 TFDs).
- *
- * When driver sets up a new TFD, it must also enter the total byte count
- * of the frame to be transmitted into the corresponding entry in the byte
- * count table for the chosen Tx queue.  If the TFD index is 0-63, the driver
- * must duplicate the byte count entry in corresponding index 256-319.
- *
- * padding puts each byte count table on a 1024-byte boundary;
- * 4965 assumes tables are separated by 1024 bytes.
- */
-struct iwl4965_scd_bc_tbl {
-	__le16 tfd_offset[TFD_QUEUE_BC_SIZE];
-	u8 pad[1024 - (TFD_QUEUE_BC_SIZE) * sizeof(__le16)];
-} __packed;
-
-
-#define IWL4965_RTC_INST_LOWER_BOUND		(0x000000)
-
-/* RSSI to dBm */
-#define IWL4965_RSSI_OFFSET	44
-
-/* PCI registers */
-#define PCI_CFG_RETRY_TIMEOUT	0x041
-
-/* PCI register values */
-#define PCI_CFG_LINK_CTRL_VAL_L0S_EN	0x01
-#define PCI_CFG_LINK_CTRL_VAL_L1_EN	0x02
-
-#define IWL4965_DEFAULT_TX_RETRY  15
-
-/* EEPROM */
-#define IWL4965_FIRST_AMPDU_QUEUE	10
-
-
-#endif /* !__iwl_4965_hw_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-led.c b/drivers/net/wireless/iwlegacy/iwl-4965-led.c
deleted file mode 100644
index 6862fdc..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-led.c
+++ /dev/null
@@ -1,73 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/delay.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <net/mac80211.h>
-#include <linux/etherdevice.h>
-#include <asm/unaligned.h>
-
-#include "iwl-commands.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-#include "iwl-4965-led.h"
-
-/* Send led command */
-static int
-iwl4965_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd)
-{
-	struct iwl_host_cmd cmd = {
-		.id = REPLY_LEDS_CMD,
-		.len = sizeof(struct iwl_led_cmd),
-		.data = led_cmd,
-		.flags = CMD_ASYNC,
-		.callback = NULL,
-	};
-	u32 reg;
-
-	reg = iwl_read32(priv, CSR_LED_REG);
-	if (reg != (reg & CSR_LED_BSM_CTRL_MSK))
-		iwl_write32(priv, CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK);
-
-	return iwl_legacy_send_cmd(priv, &cmd);
-}
-
-/* Set led register off */
-void iwl4965_led_enable(struct iwl_priv *priv)
-{
-	iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_ON);
-}
-
-const struct iwl_led_ops iwl4965_led_ops = {
-	.cmd = iwl4965_send_led_cmd,
-};
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-led.h b/drivers/net/wireless/iwlegacy/iwl-4965-led.h
deleted file mode 100644
index 5ed3615..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-led.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#ifndef __iwl_4965_led_h__
-#define __iwl_4965_led_h__
-
-extern const struct iwl_led_ops iwl4965_led_ops;
-void iwl4965_led_enable(struct iwl_priv *priv);
-
-#endif /* __iwl_4965_led_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-lib.c b/drivers/net/wireless/iwlegacy/iwl-4965-lib.c
deleted file mode 100644
index 2be6d9e..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-lib.c
+++ /dev/null
@@ -1,1194 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-#include <linux/etherdevice.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-#include "iwl-helpers.h"
-#include "iwl-4965-hw.h"
-#include "iwl-4965.h"
-#include "iwl-sta.h"
-
-void iwl4965_check_abort_status(struct iwl_priv *priv,
-			    u8 frame_count, u32 status)
-{
-	if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
-		IWL_ERR(priv, "Tx flush command to flush out all frames\n");
-		if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
-			queue_work(priv->workqueue, &priv->tx_flush);
-	}
-}
-
-/*
- * EEPROM
- */
-struct iwl_mod_params iwl4965_mod_params = {
-	.amsdu_size_8K = 1,
-	.restart_fw = 1,
-	/* the rest are 0 by default */
-};
-
-void iwl4965_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
-{
-	unsigned long flags;
-	int i;
-	spin_lock_irqsave(&rxq->lock, flags);
-	INIT_LIST_HEAD(&rxq->rx_free);
-	INIT_LIST_HEAD(&rxq->rx_used);
-	/* Fill the rx_used queue with _all_ of the Rx buffers */
-	for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
-		/* In the reset function, these buffers may have been allocated
-		 * to an SKB, so we need to unmap and free potential storage */
-		if (rxq->pool[i].page != NULL) {
-			pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
-				PAGE_SIZE << priv->hw_params.rx_page_order,
-				PCI_DMA_FROMDEVICE);
-			__iwl_legacy_free_pages(priv, rxq->pool[i].page);
-			rxq->pool[i].page = NULL;
-		}
-		list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
-	}
-
-	for (i = 0; i < RX_QUEUE_SIZE; i++)
-		rxq->queue[i] = NULL;
-
-	/* Set us so that we have processed and used all buffers, but have
-	 * not restocked the Rx queue with fresh buffers */
-	rxq->read = rxq->write = 0;
-	rxq->write_actual = 0;
-	rxq->free_count = 0;
-	spin_unlock_irqrestore(&rxq->lock, flags);
-}
-
-int iwl4965_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
-{
-	u32 rb_size;
-	const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
-	u32 rb_timeout = 0;
-
-	if (priv->cfg->mod_params->amsdu_size_8K)
-		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
-	else
-		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
-
-	/* Stop Rx DMA */
-	iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
-
-	/* Reset driver's Rx queue write index */
-	iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
-
-	/* Tell device where to find RBD circular buffer in DRAM */
-	iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
-			   (u32)(rxq->bd_dma >> 8));
-
-	/* Tell device where in DRAM to update its Rx status */
-	iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
-			   rxq->rb_stts_dma >> 4);
-
-	/* Enable Rx DMA
-	 * Direct rx interrupts to hosts
-	 * Rx buffer size 4 or 8k
-	 * RB timeout 0x10
-	 * 256 RBDs
-	 */
-	iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
-			   FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
-			   FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
-			   FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
-			   rb_size|
-			   (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
-			   (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
-
-	/* Set interrupt coalescing timer to default (2048 usecs) */
-	iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
-
-	return 0;
-}
-
-static void iwl4965_set_pwr_vmain(struct iwl_priv *priv)
-{
-/*
- * (for documentation purposes)
- * to set power to V_AUX, do:
-
-		if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
-			iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
-					       APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
-					       ~APMG_PS_CTRL_MSK_PWR_SRC);
- */
-
-	iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
-			       APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
-			       ~APMG_PS_CTRL_MSK_PWR_SRC);
-}
-
-int iwl4965_hw_nic_init(struct iwl_priv *priv)
-{
-	unsigned long flags;
-	struct iwl_rx_queue *rxq = &priv->rxq;
-	int ret;
-
-	/* nic_init */
-	spin_lock_irqsave(&priv->lock, flags);
-	priv->cfg->ops->lib->apm_ops.init(priv);
-
-	/* Set interrupt coalescing calibration timer to default (512 usecs) */
-	iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
-
-	spin_unlock_irqrestore(&priv->lock, flags);
-
-	iwl4965_set_pwr_vmain(priv);
-
-	priv->cfg->ops->lib->apm_ops.config(priv);
-
-	/* Allocate the RX queue, or reset if it is already allocated */
-	if (!rxq->bd) {
-		ret = iwl_legacy_rx_queue_alloc(priv);
-		if (ret) {
-			IWL_ERR(priv, "Unable to initialize Rx queue\n");
-			return -ENOMEM;
-		}
-	} else
-		iwl4965_rx_queue_reset(priv, rxq);
-
-	iwl4965_rx_replenish(priv);
-
-	iwl4965_rx_init(priv, rxq);
-
-	spin_lock_irqsave(&priv->lock, flags);
-
-	rxq->need_update = 1;
-	iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
-
-	spin_unlock_irqrestore(&priv->lock, flags);
-
-	/* Allocate or reset and init all Tx and Command queues */
-	if (!priv->txq) {
-		ret = iwl4965_txq_ctx_alloc(priv);
-		if (ret)
-			return ret;
-	} else
-		iwl4965_txq_ctx_reset(priv);
-
-	set_bit(STATUS_INIT, &priv->status);
-
-	return 0;
-}
-
-/**
- * iwl4965_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
- */
-static inline __le32 iwl4965_dma_addr2rbd_ptr(struct iwl_priv *priv,
-					  dma_addr_t dma_addr)
-{
-	return cpu_to_le32((u32)(dma_addr >> 8));
-}
-
-/**
- * iwl4965_rx_queue_restock - refill RX queue from pre-allocated pool
- *
- * If there are slots in the RX queue that need to be restocked,
- * and we have free pre-allocated buffers, fill the ranks as much
- * as we can, pulling from rx_free.
- *
- * This moves the 'write' index forward to catch up with 'processed', and
- * also updates the memory address in the firmware to reference the new
- * target buffer.
- */
-void iwl4965_rx_queue_restock(struct iwl_priv *priv)
-{
-	struct iwl_rx_queue *rxq = &priv->rxq;
-	struct list_head *element;
-	struct iwl_rx_mem_buffer *rxb;
-	unsigned long flags;
-
-	spin_lock_irqsave(&rxq->lock, flags);
-	while ((iwl_legacy_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
-		/* The overwritten rxb must be a used one */
-		rxb = rxq->queue[rxq->write];
-		BUG_ON(rxb && rxb->page);
-
-		/* Get next free Rx buffer, remove from free list */
-		element = rxq->rx_free.next;
-		rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
-		list_del(element);
-
-		/* Point to Rx buffer via next RBD in circular buffer */
-		rxq->bd[rxq->write] = iwl4965_dma_addr2rbd_ptr(priv,
-							      rxb->page_dma);
-		rxq->queue[rxq->write] = rxb;
-		rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
-		rxq->free_count--;
-	}
-	spin_unlock_irqrestore(&rxq->lock, flags);
-	/* If the pre-allocated buffer pool is dropping low, schedule to
-	 * refill it */
-	if (rxq->free_count <= RX_LOW_WATERMARK)
-		queue_work(priv->workqueue, &priv->rx_replenish);
-
-
-	/* If we've added more space for the firmware to place data, tell it.
-	 * Increment device's write pointer in multiples of 8. */
-	if (rxq->write_actual != (rxq->write & ~0x7)) {
-		spin_lock_irqsave(&rxq->lock, flags);
-		rxq->need_update = 1;
-		spin_unlock_irqrestore(&rxq->lock, flags);
-		iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
-	}
-}
-
-/**
- * iwl4965_rx_replenish - Move all used packet from rx_used to rx_free
- *
- * When moving to rx_free an SKB is allocated for the slot.
- *
- * Also restock the Rx queue via iwl_rx_queue_restock.
- * This is called as a scheduled work item (except for during initialization)
- */
-static void iwl4965_rx_allocate(struct iwl_priv *priv, gfp_t priority)
-{
-	struct iwl_rx_queue *rxq = &priv->rxq;
-	struct list_head *element;
-	struct iwl_rx_mem_buffer *rxb;
-	struct page *page;
-	unsigned long flags;
-	gfp_t gfp_mask = priority;
-
-	while (1) {
-		spin_lock_irqsave(&rxq->lock, flags);
-		if (list_empty(&rxq->rx_used)) {
-			spin_unlock_irqrestore(&rxq->lock, flags);
-			return;
-		}
-		spin_unlock_irqrestore(&rxq->lock, flags);
-
-		if (rxq->free_count > RX_LOW_WATERMARK)
-			gfp_mask |= __GFP_NOWARN;
-
-		if (priv->hw_params.rx_page_order > 0)
-			gfp_mask |= __GFP_COMP;
-
-		/* Alloc a new receive buffer */
-		page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
-		if (!page) {
-			if (net_ratelimit())
-				IWL_DEBUG_INFO(priv, "alloc_pages failed, "
-					       "order: %d\n",
-					       priv->hw_params.rx_page_order);
-
-			if ((rxq->free_count <= RX_LOW_WATERMARK) &&
-			    net_ratelimit())
-				IWL_CRIT(priv,
-					"Failed to alloc_pages with %s. "
-					"Only %u free buffers remaining.\n",
-					 priority == GFP_ATOMIC ?
-						 "GFP_ATOMIC" : "GFP_KERNEL",
-					 rxq->free_count);
-			/* We don't reschedule replenish work here -- we will
-			 * call the restock method and if it still needs
-			 * more buffers it will schedule replenish */
-			return;
-		}
-
-		spin_lock_irqsave(&rxq->lock, flags);
-
-		if (list_empty(&rxq->rx_used)) {
-			spin_unlock_irqrestore(&rxq->lock, flags);
-			__free_pages(page, priv->hw_params.rx_page_order);
-			return;
-		}
-		element = rxq->rx_used.next;
-		rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
-		list_del(element);
-
-		spin_unlock_irqrestore(&rxq->lock, flags);
-
-		BUG_ON(rxb->page);
-		rxb->page = page;
-		/* Get physical address of the RB */
-		rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
-				PAGE_SIZE << priv->hw_params.rx_page_order,
-				PCI_DMA_FROMDEVICE);
-		/* dma address must be no more than 36 bits */
-		BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
-		/* and also 256 byte aligned! */
-		BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
-
-		spin_lock_irqsave(&rxq->lock, flags);
-
-		list_add_tail(&rxb->list, &rxq->rx_free);
-		rxq->free_count++;
-		priv->alloc_rxb_page++;
-
-		spin_unlock_irqrestore(&rxq->lock, flags);
-	}
-}
-
-void iwl4965_rx_replenish(struct iwl_priv *priv)
-{
-	unsigned long flags;
-
-	iwl4965_rx_allocate(priv, GFP_KERNEL);
-
-	spin_lock_irqsave(&priv->lock, flags);
-	iwl4965_rx_queue_restock(priv);
-	spin_unlock_irqrestore(&priv->lock, flags);
-}
-
-void iwl4965_rx_replenish_now(struct iwl_priv *priv)
-{
-	iwl4965_rx_allocate(priv, GFP_ATOMIC);
-
-	iwl4965_rx_queue_restock(priv);
-}
-
-/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
- * If an SKB has been detached, the POOL needs to have its SKB set to NULL
- * This free routine walks the list of POOL entries and if SKB is set to
- * non NULL it is unmapped and freed
- */
-void iwl4965_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
-{
-	int i;
-	for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
-		if (rxq->pool[i].page != NULL) {
-			pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
-				PAGE_SIZE << priv->hw_params.rx_page_order,
-				PCI_DMA_FROMDEVICE);
-			__iwl_legacy_free_pages(priv, rxq->pool[i].page);
-			rxq->pool[i].page = NULL;
-		}
-	}
-
-	dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
-			  rxq->bd_dma);
-	dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
-			  rxq->rb_stts, rxq->rb_stts_dma);
-	rxq->bd = NULL;
-	rxq->rb_stts  = NULL;
-}
-
-int iwl4965_rxq_stop(struct iwl_priv *priv)
-{
-
-	/* stop Rx DMA */
-	iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
-	iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
-			    FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
-
-	return 0;
-}
-
-int iwl4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
-{
-	int idx = 0;
-	int band_offset = 0;
-
-	/* HT rate format: mac80211 wants an MCS number, which is just LSB */
-	if (rate_n_flags & RATE_MCS_HT_MSK) {
-		idx = (rate_n_flags & 0xff);
-		return idx;
-	/* Legacy rate format, search for match in table */
-	} else {
-		if (band == IEEE80211_BAND_5GHZ)
-			band_offset = IWL_FIRST_OFDM_RATE;
-		for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
-			if (iwlegacy_rates[idx].plcp == (rate_n_flags & 0xFF))
-				return idx - band_offset;
-	}
-
-	return -1;
-}
-
-static int iwl4965_calc_rssi(struct iwl_priv *priv,
-			     struct iwl_rx_phy_res *rx_resp)
-{
-	/* data from PHY/DSP regarding signal strength, etc.,
-	 *   contents are always there, not configurable by host.  */
-	struct iwl4965_rx_non_cfg_phy *ncphy =
-	    (struct iwl4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
-	u32 agc = (le16_to_cpu(ncphy->agc_info) & IWL49_AGC_DB_MASK)
-			>> IWL49_AGC_DB_POS;
-
-	u32 valid_antennae =
-	    (le16_to_cpu(rx_resp->phy_flags) & IWL49_RX_PHY_FLAGS_ANTENNAE_MASK)
-			>> IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET;
-	u8 max_rssi = 0;
-	u32 i;
-
-	/* Find max rssi among 3 possible receivers.
-	 * These values are measured by the digital signal processor (DSP).
-	 * They should stay fairly constant even as the signal strength varies,
-	 *   if the radio's automatic gain control (AGC) is working right.
-	 * AGC value (see below) will provide the "interesting" info. */
-	for (i = 0; i < 3; i++)
-		if (valid_antennae & (1 << i))
-			max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
-
-	IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n",
-		ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
-		max_rssi, agc);
-
-	/* dBm = max_rssi dB - agc dB - constant.
-	 * Higher AGC (higher radio gain) means lower signal. */
-	return max_rssi - agc - IWL4965_RSSI_OFFSET;
-}
-
-
-static u32 iwl4965_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
-{
-	u32 decrypt_out = 0;
-
-	if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
-					RX_RES_STATUS_STATION_FOUND)
-		decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
-				RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
-
-	decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
-
-	/* packet was not encrypted */
-	if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
-					RX_RES_STATUS_SEC_TYPE_NONE)
-		return decrypt_out;
-
-	/* packet was encrypted with unknown alg */
-	if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
-					RX_RES_STATUS_SEC_TYPE_ERR)
-		return decrypt_out;
-
-	/* decryption was not done in HW */
-	if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
-					RX_MPDU_RES_STATUS_DEC_DONE_MSK)
-		return decrypt_out;
-
-	switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
-
-	case RX_RES_STATUS_SEC_TYPE_CCMP:
-		/* alg is CCM: check MIC only */
-		if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
-			/* Bad MIC */
-			decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
-		else
-			decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
-
-		break;
-
-	case RX_RES_STATUS_SEC_TYPE_TKIP:
-		if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
-			/* Bad TTAK */
-			decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
-			break;
-		}
-		/* fall through if TTAK OK */
-	default:
-		if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
-			decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
-		else
-			decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
-		break;
-	}
-
-	IWL_DEBUG_RX(priv, "decrypt_in:0x%x  decrypt_out = 0x%x\n",
-					decrypt_in, decrypt_out);
-
-	return decrypt_out;
-}
-
-static void iwl4965_pass_packet_to_mac80211(struct iwl_priv *priv,
-					struct ieee80211_hdr *hdr,
-					u16 len,
-					u32 ampdu_status,
-					struct iwl_rx_mem_buffer *rxb,
-					struct ieee80211_rx_status *stats)
-{
-	struct sk_buff *skb;
-	__le16 fc = hdr->frame_control;
-
-	/* We only process data packets if the interface is open */
-	if (unlikely(!priv->is_open)) {
-		IWL_DEBUG_DROP_LIMIT(priv,
-		    "Dropping packet while interface is not open.\n");
-		return;
-	}
-
-	/* In case of HW accelerated crypto and bad decryption, drop */
-	if (!priv->cfg->mod_params->sw_crypto &&
-	    iwl_legacy_set_decrypted_flag(priv, hdr, ampdu_status, stats))
-		return;
-
-	skb = dev_alloc_skb(128);
-	if (!skb) {
-		IWL_ERR(priv, "dev_alloc_skb failed\n");
-		return;
-	}
-
-	skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
-
-	iwl_legacy_update_stats(priv, false, fc, len);
-	memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
-
-	ieee80211_rx(priv->hw, skb);
-	priv->alloc_rxb_page--;
-	rxb->page = NULL;
-}
-
-/* Called for REPLY_RX (legacy ABG frames), or
- * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
-void iwl4965_rx_reply_rx(struct iwl_priv *priv,
-				struct iwl_rx_mem_buffer *rxb)
-{
-	struct ieee80211_hdr *header;
-	struct ieee80211_rx_status rx_status;
-	struct iwl_rx_packet *pkt = rxb_addr(rxb);
-	struct iwl_rx_phy_res *phy_res;
-	__le32 rx_pkt_status;
-	struct iwl_rx_mpdu_res_start *amsdu;
-	u32 len;
-	u32 ampdu_status;
-	u32 rate_n_flags;
-
-	/**
-	 * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently.
-	 *	REPLY_RX: physical layer info is in this buffer
-	 *	REPLY_RX_MPDU_CMD: physical layer info was sent in separate
-	 *		command and cached in priv->last_phy_res
-	 *
-	 * Here we set up local variables depending on which command is
-	 * received.
-	 */
-	if (pkt->hdr.cmd == REPLY_RX) {
-		phy_res = (struct iwl_rx_phy_res *)pkt->u.raw;
-		header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res)
-				+ phy_res->cfg_phy_cnt);
-
-		len = le16_to_cpu(phy_res->byte_count);
-		rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) +
-				phy_res->cfg_phy_cnt + len);
-		ampdu_status = le32_to_cpu(rx_pkt_status);
-	} else {
-		if (!priv->_4965.last_phy_res_valid) {
-			IWL_ERR(priv, "MPDU frame without cached PHY data\n");
-			return;
-		}
-		phy_res = &priv->_4965.last_phy_res;
-		amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw;
-		header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
-		len = le16_to_cpu(amsdu->byte_count);
-		rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len);
-		ampdu_status = iwl4965_translate_rx_status(priv,
-				le32_to_cpu(rx_pkt_status));
-	}
-
-	if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
-		IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
-				phy_res->cfg_phy_cnt);
-		return;
-	}
-
-	if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
-	    !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
-		IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
-				le32_to_cpu(rx_pkt_status));
-		return;
-	}
-
-	/* This will be used in several places later */
-	rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
-
-	/* rx_status carries information about the packet to mac80211 */
-	rx_status.mactime = le64_to_cpu(phy_res->timestamp);
-	rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
-				IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
-	rx_status.freq =
-		ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
-							rx_status.band);
-	rx_status.rate_idx =
-		iwl4965_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
-	rx_status.flag = 0;
-
-	/* TSF isn't reliable. In order to allow smooth user experience,
-	 * this W/A doesn't propagate it to the mac80211 */
-	/*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/
-
-	priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
-
-	/* Find max signal strength (dBm) among 3 antenna/receiver chains */
-	rx_status.signal = iwl4965_calc_rssi(priv, phy_res);
-
-	iwl_legacy_dbg_log_rx_data_frame(priv, len, header);
-	IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n",
-		rx_status.signal, (unsigned long long)rx_status.mactime);
-
-	/*
-	 * "antenna number"
-	 *
-	 * It seems that the antenna field in the phy flags value
-	 * is actually a bit field. This is undefined by radiotap,
-	 * it wants an actual antenna number but I always get "7"
-	 * for most legacy frames I receive indicating that the
-	 * same frame was received on all three RX chains.
-	 *
-	 * I think this field should be removed in favor of a
-	 * new 802.11n radiotap field "RX chains" that is defined
-	 * as a bitmask.
-	 */
-	rx_status.antenna =
-		(le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK)
-		>> RX_RES_PHY_FLAGS_ANTENNA_POS;
-
-	/* set the preamble flag if appropriate */
-	if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
-		rx_status.flag |= RX_FLAG_SHORTPRE;
-
-	/* Set up the HT phy flags */
-	if (rate_n_flags & RATE_MCS_HT_MSK)
-		rx_status.flag |= RX_FLAG_HT;
-	if (rate_n_flags & RATE_MCS_HT40_MSK)
-		rx_status.flag |= RX_FLAG_40MHZ;
-	if (rate_n_flags & RATE_MCS_SGI_MSK)
-		rx_status.flag |= RX_FLAG_SHORT_GI;
-
-	iwl4965_pass_packet_to_mac80211(priv, header, len, ampdu_status,
-				    rxb, &rx_status);
-}
-
-/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
- * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
-void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv,
-			    struct iwl_rx_mem_buffer *rxb)
-{
-	struct iwl_rx_packet *pkt = rxb_addr(rxb);
-	priv->_4965.last_phy_res_valid = true;
-	memcpy(&priv->_4965.last_phy_res, pkt->u.raw,
-	       sizeof(struct iwl_rx_phy_res));
-}
-
-static int iwl4965_get_channels_for_scan(struct iwl_priv *priv,
-				     struct ieee80211_vif *vif,
-				     enum ieee80211_band band,
-				     u8 is_active, u8 n_probes,
-				     struct iwl_scan_channel *scan_ch)
-{
-	struct ieee80211_channel *chan;
-	const struct ieee80211_supported_band *sband;
-	const struct iwl_channel_info *ch_info;
-	u16 passive_dwell = 0;
-	u16 active_dwell = 0;
-	int added, i;
-	u16 channel;
-
-	sband = iwl_get_hw_mode(priv, band);
-	if (!sband)
-		return 0;
-
-	active_dwell = iwl_legacy_get_active_dwell_time(priv, band, n_probes);
-	passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif);
-
-	if (passive_dwell <= active_dwell)
-		passive_dwell = active_dwell + 1;
-
-	for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) {
-		chan = priv->scan_request->channels[i];
-
-		if (chan->band != band)
-			continue;
-
-		channel = chan->hw_value;
-		scan_ch->channel = cpu_to_le16(channel);
-
-		ch_info = iwl_legacy_get_channel_info(priv, band, channel);
-		if (!iwl_legacy_is_channel_valid(ch_info)) {
-			IWL_DEBUG_SCAN(priv,
-				 "Channel %d is INVALID for this band.\n",
-					channel);
-			continue;
-		}
-
-		if (!is_active || iwl_legacy_is_channel_passive(ch_info) ||
-		    (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
-			scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
-		else
-			scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
-
-		if (n_probes)
-			scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes);
-
-		scan_ch->active_dwell = cpu_to_le16(active_dwell);
-		scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
-
-		/* Set txpower levels to defaults */
-		scan_ch->dsp_atten = 110;
-
-		/* NOTE: if we were doing 6Mb OFDM for scans we'd use
-		 * power level:
-		 * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
-		 */
-		if (band == IEEE80211_BAND_5GHZ)
-			scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
-		else
-			scan_ch->tx_gain = ((1 << 5) | (5 << 3));
-
-		IWL_DEBUG_SCAN(priv, "Scanning ch=%d prob=0x%X [%s %d]\n",
-			       channel, le32_to_cpu(scan_ch->type),
-			       (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
-				"ACTIVE" : "PASSIVE",
-			       (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
-			       active_dwell : passive_dwell);
-
-		scan_ch++;
-		added++;
-	}
-
-	IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added);
-	return added;
-}
-
-int iwl4965_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
-{
-	struct iwl_host_cmd cmd = {
-		.id = REPLY_SCAN_CMD,
-		.len = sizeof(struct iwl_scan_cmd),
-		.flags = CMD_SIZE_HUGE,
-	};
-	struct iwl_scan_cmd *scan;
-	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-	u32 rate_flags = 0;
-	u16 cmd_len;
-	u16 rx_chain = 0;
-	enum ieee80211_band band;
-	u8 n_probes = 0;
-	u8 rx_ant = priv->hw_params.valid_rx_ant;
-	u8 rate;
-	bool is_active = false;
-	int  chan_mod;
-	u8 active_chains;
-	u8 scan_tx_antennas = priv->hw_params.valid_tx_ant;
-	int ret;
-
-	lockdep_assert_held(&priv->mutex);
-
-	if (vif)
-		ctx = iwl_legacy_rxon_ctx_from_vif(vif);
-
-	if (!priv->scan_cmd) {
-		priv->scan_cmd = kmalloc(sizeof(struct iwl_scan_cmd) +
-					 IWL_MAX_SCAN_SIZE, GFP_KERNEL);
-		if (!priv->scan_cmd) {
-			IWL_DEBUG_SCAN(priv,
-				       "fail to allocate memory for scan\n");
-			return -ENOMEM;
-		}
-	}
-	scan = priv->scan_cmd;
-	memset(scan, 0, sizeof(struct iwl_scan_cmd) + IWL_MAX_SCAN_SIZE);
-
-	scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
-	scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
-
-	if (iwl_legacy_is_any_associated(priv)) {
-		u16 interval;
-		u32 extra;
-		u32 suspend_time = 100;
-		u32 scan_suspend_time = 100;
-
-		IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
-		interval = vif->bss_conf.beacon_int;
-
-		scan->suspend_time = 0;
-		scan->max_out_time = cpu_to_le32(200 * 1024);
-		if (!interval)
-			interval = suspend_time;
-
-		extra = (suspend_time / interval) << 22;
-		scan_suspend_time = (extra |
-		    ((suspend_time % interval) * 1024));
-		scan->suspend_time = cpu_to_le32(scan_suspend_time);
-		IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
-			       scan_suspend_time, interval);
-	}
-
-	if (priv->scan_request->n_ssids) {
-		int i, p = 0;
-		IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
-		for (i = 0; i < priv->scan_request->n_ssids; i++) {
-			/* always does wildcard anyway */
-			if (!priv->scan_request->ssids[i].ssid_len)
-				continue;
-			scan->direct_scan[p].id = WLAN_EID_SSID;
-			scan->direct_scan[p].len =
-				priv->scan_request->ssids[i].ssid_len;
-			memcpy(scan->direct_scan[p].ssid,
-			       priv->scan_request->ssids[i].ssid,
-			       priv->scan_request->ssids[i].ssid_len);
-			n_probes++;
-			p++;
-		}
-		is_active = true;
-	} else
-		IWL_DEBUG_SCAN(priv, "Start passive scan.\n");
-
-	scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
-	scan->tx_cmd.sta_id = ctx->bcast_sta_id;
-	scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
-
-	switch (priv->scan_band) {
-	case IEEE80211_BAND_2GHZ:
-		scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
-		chan_mod = le32_to_cpu(
-			priv->contexts[IWL_RXON_CTX_BSS].active.flags &
-						RXON_FLG_CHANNEL_MODE_MSK)
-				       >> RXON_FLG_CHANNEL_MODE_POS;
-		if (chan_mod == CHANNEL_MODE_PURE_40) {
-			rate = IWL_RATE_6M_PLCP;
-		} else {
-			rate = IWL_RATE_1M_PLCP;
-			rate_flags = RATE_MCS_CCK_MSK;
-		}
-		break;
-	case IEEE80211_BAND_5GHZ:
-		rate = IWL_RATE_6M_PLCP;
-		break;
-	default:
-		IWL_WARN(priv, "Invalid scan band\n");
-		return -EIO;
-	}
-
-	/*
-	 * If active scanning is requested but a certain channel is
-	 * marked passive, we can do active scanning if we detect
-	 * transmissions.
-	 *
-	 * There is an issue with some firmware versions that triggers
-	 * a sysassert on a "good CRC threshold" of zero (== disabled),
-	 * on a radar channel even though this means that we should NOT
-	 * send probes.
-	 *
-	 * The "good CRC threshold" is the number of frames that we
-	 * need to receive during our dwell time on a channel before
-	 * sending out probes -- setting this to a huge value will
-	 * mean we never reach it, but at the same time work around
-	 * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER
-	 * here instead of IWL_GOOD_CRC_TH_DISABLED.
-	 */
-	scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
-					IWL_GOOD_CRC_TH_NEVER;
-
-	band = priv->scan_band;
-
-	if (priv->cfg->scan_rx_antennas[band])
-		rx_ant = priv->cfg->scan_rx_antennas[band];
-
-	priv->scan_tx_ant[band] = iwl4965_toggle_tx_ant(priv,
-						priv->scan_tx_ant[band],
-						    scan_tx_antennas);
-	rate_flags |= iwl4965_ant_idx_to_flags(priv->scan_tx_ant[band]);
-	scan->tx_cmd.rate_n_flags = iwl4965_hw_set_rate_n_flags(rate, rate_flags);
-
-	/* In power save mode use one chain, otherwise use all chains */
-	if (test_bit(STATUS_POWER_PMI, &priv->status)) {
-		/* rx_ant has been set to all valid chains previously */
-		active_chains = rx_ant &
-				((u8)(priv->chain_noise_data.active_chains));
-		if (!active_chains)
-			active_chains = rx_ant;
-
-		IWL_DEBUG_SCAN(priv, "chain_noise_data.active_chains: %u\n",
-				priv->chain_noise_data.active_chains);
-
-		rx_ant = iwl4965_first_antenna(active_chains);
-	}
-
-	/* MIMO is not used here, but value is required */
-	rx_chain |= priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
-	rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
-	rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
-	rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
-	scan->rx_chain = cpu_to_le16(rx_chain);
-
-	cmd_len = iwl_legacy_fill_probe_req(priv,
-					(struct ieee80211_mgmt *)scan->data,
-					vif->addr,
-					priv->scan_request->ie,
-					priv->scan_request->ie_len,
-					IWL_MAX_SCAN_SIZE - sizeof(*scan));
-	scan->tx_cmd.len = cpu_to_le16(cmd_len);
-
-	scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK |
-			       RXON_FILTER_BCON_AWARE_MSK);
-
-	scan->channel_count = iwl4965_get_channels_for_scan(priv, vif, band,
-						is_active, n_probes,
-						(void *)&scan->data[cmd_len]);
-	if (scan->channel_count == 0) {
-		IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
-		return -EIO;
-	}
-
-	cmd.len += le16_to_cpu(scan->tx_cmd.len) +
-	    scan->channel_count * sizeof(struct iwl_scan_channel);
-	cmd.data = scan;
-	scan->len = cpu_to_le16(cmd.len);
-
-	set_bit(STATUS_SCAN_HW, &priv->status);
-
-	ret = iwl_legacy_send_cmd_sync(priv, &cmd);
-	if (ret)
-		clear_bit(STATUS_SCAN_HW, &priv->status);
-
-	return ret;
-}
-
-int iwl4965_manage_ibss_station(struct iwl_priv *priv,
-			       struct ieee80211_vif *vif, bool add)
-{
-	struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
-
-	if (add)
-		return iwl4965_add_bssid_station(priv, vif_priv->ctx,
-						vif->bss_conf.bssid,
-						&vif_priv->ibss_bssid_sta_id);
-	return iwl_legacy_remove_station(priv, vif_priv->ibss_bssid_sta_id,
-				  vif->bss_conf.bssid);
-}
-
-void iwl4965_free_tfds_in_queue(struct iwl_priv *priv,
-			    int sta_id, int tid, int freed)
-{
-	lockdep_assert_held(&priv->sta_lock);
-
-	if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed)
-		priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
-	else {
-		IWL_DEBUG_TX(priv, "free more than tfds_in_queue (%u:%d)\n",
-			priv->stations[sta_id].tid[tid].tfds_in_queue,
-			freed);
-		priv->stations[sta_id].tid[tid].tfds_in_queue = 0;
-	}
-}
-
-#define IWL_TX_QUEUE_MSK	0xfffff
-
-static bool iwl4965_is_single_rx_stream(struct iwl_priv *priv)
-{
-	return priv->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
-	       priv->current_ht_config.single_chain_sufficient;
-}
-
-#define IWL_NUM_RX_CHAINS_MULTIPLE	3
-#define IWL_NUM_RX_CHAINS_SINGLE	2
-#define IWL_NUM_IDLE_CHAINS_DUAL	2
-#define IWL_NUM_IDLE_CHAINS_SINGLE	1
-
-/*
- * Determine how many receiver/antenna chains to use.
- *
- * More provides better reception via diversity.  Fewer saves power
- * at the expense of throughput, but only when not in powersave to
- * start with.
- *
- * MIMO (dual stream) requires at least 2, but works better with 3.
- * This does not determine *which* chains to use, just how many.
- */
-static int iwl4965_get_active_rx_chain_count(struct iwl_priv *priv)
-{
-	/* # of Rx chains to use when expecting MIMO. */
-	if (iwl4965_is_single_rx_stream(priv))
-		return IWL_NUM_RX_CHAINS_SINGLE;
-	else
-		return IWL_NUM_RX_CHAINS_MULTIPLE;
-}
-
-/*
- * When we are in power saving mode, unless device support spatial
- * multiplexing power save, use the active count for rx chain count.
- */
-static int
-iwl4965_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
-{
-	/* # Rx chains when idling, depending on SMPS mode */
-	switch (priv->current_ht_config.smps) {
-	case IEEE80211_SMPS_STATIC:
-	case IEEE80211_SMPS_DYNAMIC:
-		return IWL_NUM_IDLE_CHAINS_SINGLE;
-	case IEEE80211_SMPS_OFF:
-		return active_cnt;
-	default:
-		WARN(1, "invalid SMPS mode %d",
-		     priv->current_ht_config.smps);
-		return active_cnt;
-	}
-}
-
-/* up to 4 chains */
-static u8 iwl4965_count_chain_bitmap(u32 chain_bitmap)
-{
-	u8 res;
-	res = (chain_bitmap & BIT(0)) >> 0;
-	res += (chain_bitmap & BIT(1)) >> 1;
-	res += (chain_bitmap & BIT(2)) >> 2;
-	res += (chain_bitmap & BIT(3)) >> 3;
-	return res;
-}
-
-/**
- * iwl4965_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
- *
- * Selects how many and which Rx receivers/antennas/chains to use.
- * This should not be used for scan command ... it puts data in wrong place.
- */
-void iwl4965_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
-	bool is_single = iwl4965_is_single_rx_stream(priv);
-	bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
-	u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
-	u32 active_chains;
-	u16 rx_chain;
-
-	/* Tell uCode which antennas are actually connected.
-	 * Before first association, we assume all antennas are connected.
-	 * Just after first association, iwl4965_chain_noise_calibration()
-	 *    checks which antennas actually *are* connected. */
-	if (priv->chain_noise_data.active_chains)
-		active_chains = priv->chain_noise_data.active_chains;
-	else
-		active_chains = priv->hw_params.valid_rx_ant;
-
-	rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS;
-
-	/* How many receivers should we use? */
-	active_rx_cnt = iwl4965_get_active_rx_chain_count(priv);
-	idle_rx_cnt = iwl4965_get_idle_rx_chain_count(priv, active_rx_cnt);
-
-
-	/* correct rx chain count according hw settings
-	 * and chain noise calibration
-	 */
-	valid_rx_cnt = iwl4965_count_chain_bitmap(active_chains);
-	if (valid_rx_cnt < active_rx_cnt)
-		active_rx_cnt = valid_rx_cnt;
-
-	if (valid_rx_cnt < idle_rx_cnt)
-		idle_rx_cnt = valid_rx_cnt;
-
-	rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
-	rx_chain |= idle_rx_cnt  << RXON_RX_CHAIN_CNT_POS;
-
-	ctx->staging.rx_chain = cpu_to_le16(rx_chain);
-
-	if (!is_single && (active_rx_cnt >= IWL_NUM_RX_CHAINS_SINGLE) && is_cam)
-		ctx->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
-	else
-		ctx->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
-
-	IWL_DEBUG_ASSOC(priv, "rx_chain=0x%X active=%d idle=%d\n",
-			ctx->staging.rx_chain,
-			active_rx_cnt, idle_rx_cnt);
-
-	WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 ||
-		active_rx_cnt < idle_rx_cnt);
-}
-
-u8 iwl4965_toggle_tx_ant(struct iwl_priv *priv, u8 ant, u8 valid)
-{
-	int i;
-	u8 ind = ant;
-
-	for (i = 0; i < RATE_ANT_NUM - 1; i++) {
-		ind = (ind + 1) < RATE_ANT_NUM ?  ind + 1 : 0;
-		if (valid & BIT(ind))
-			return ind;
-	}
-	return ant;
-}
-
-static const char *iwl4965_get_fh_string(int cmd)
-{
-	switch (cmd) {
-	IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
-	IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
-	IWL_CMD(FH_RSCSR_CHNL0_WPTR);
-	IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
-	IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
-	IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
-	IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
-	IWL_CMD(FH_TSSR_TX_STATUS_REG);
-	IWL_CMD(FH_TSSR_TX_ERROR_REG);
-	default:
-		return "UNKNOWN";
-	}
-}
-
-int iwl4965_dump_fh(struct iwl_priv *priv, char **buf, bool display)
-{
-	int i;
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-	int pos = 0;
-	size_t bufsz = 0;
-#endif
-	static const u32 fh_tbl[] = {
-		FH_RSCSR_CHNL0_STTS_WPTR_REG,
-		FH_RSCSR_CHNL0_RBDCB_BASE_REG,
-		FH_RSCSR_CHNL0_WPTR,
-		FH_MEM_RCSR_CHNL0_CONFIG_REG,
-		FH_MEM_RSSR_SHARED_CTRL_REG,
-		FH_MEM_RSSR_RX_STATUS_REG,
-		FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
-		FH_TSSR_TX_STATUS_REG,
-		FH_TSSR_TX_ERROR_REG
-	};
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-	if (display) {
-		bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
-		*buf = kmalloc(bufsz, GFP_KERNEL);
-		if (!*buf)
-			return -ENOMEM;
-		pos += scnprintf(*buf + pos, bufsz - pos,
-				"FH register values:\n");
-		for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
-			pos += scnprintf(*buf + pos, bufsz - pos,
-				"  %34s: 0X%08x\n",
-				iwl4965_get_fh_string(fh_tbl[i]),
-				iwl_legacy_read_direct32(priv, fh_tbl[i]));
-		}
-		return pos;
-	}
-#endif
-	IWL_ERR(priv, "FH register values:\n");
-	for (i = 0; i <  ARRAY_SIZE(fh_tbl); i++) {
-		IWL_ERR(priv, "  %34s: 0X%08x\n",
-			iwl4965_get_fh_string(fh_tbl[i]),
-			iwl_legacy_read_direct32(priv, fh_tbl[i]));
-	}
-	return 0;
-}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-rs.c b/drivers/net/wireless/iwlegacy/iwl-4965-rs.c
deleted file mode 100644
index 57ebe21..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-rs.c
+++ /dev/null
@@ -1,2871 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/skbuff.h>
-#include <linux/slab.h>
-#include <net/mac80211.h>
-
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/delay.h>
-
-#include <linux/workqueue.h>
-
-#include "iwl-dev.h"
-#include "iwl-sta.h"
-#include "iwl-core.h"
-#include "iwl-4965.h"
-
-#define IWL4965_RS_NAME "iwl-4965-rs"
-
-#define NUM_TRY_BEFORE_ANT_TOGGLE 1
-#define IWL_NUMBER_TRY      1
-#define IWL_HT_NUMBER_TRY   3
-
-#define IWL_RATE_MAX_WINDOW		62	/* # tx in history window */
-#define IWL_RATE_MIN_FAILURE_TH		6	/* min failures to calc tpt */
-#define IWL_RATE_MIN_SUCCESS_TH		8	/* min successes to calc tpt */
-
-/* max allowed rate miss before sync LQ cmd */
-#define IWL_MISSED_RATE_MAX		15
-/* max time to accum history 2 seconds */
-#define IWL_RATE_SCALE_FLUSH_INTVL   (3*HZ)
-
-static u8 rs_ht_to_legacy[] = {
-	IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX,
-	IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX,
-	IWL_RATE_6M_INDEX,
-	IWL_RATE_6M_INDEX, IWL_RATE_9M_INDEX,
-	IWL_RATE_12M_INDEX, IWL_RATE_18M_INDEX,
-	IWL_RATE_24M_INDEX, IWL_RATE_36M_INDEX,
-	IWL_RATE_48M_INDEX, IWL_RATE_54M_INDEX
-};
-
-static const u8 ant_toggle_lookup[] = {
-	/*ANT_NONE -> */ ANT_NONE,
-	/*ANT_A    -> */ ANT_B,
-	/*ANT_B    -> */ ANT_C,
-	/*ANT_AB   -> */ ANT_BC,
-	/*ANT_C    -> */ ANT_A,
-	/*ANT_AC   -> */ ANT_AB,
-	/*ANT_BC   -> */ ANT_AC,
-	/*ANT_ABC  -> */ ANT_ABC,
-};
-
-#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np)    \
-	[IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP,      \
-				    IWL_RATE_SISO_##s##M_PLCP, \
-				    IWL_RATE_MIMO2_##s##M_PLCP,\
-				    IWL_RATE_##r##M_IEEE,      \
-				    IWL_RATE_##ip##M_INDEX,    \
-				    IWL_RATE_##in##M_INDEX,    \
-				    IWL_RATE_##rp##M_INDEX,    \
-				    IWL_RATE_##rn##M_INDEX,    \
-				    IWL_RATE_##pp##M_INDEX,    \
-				    IWL_RATE_##np##M_INDEX }
-
-/*
- * Parameter order:
- *   rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
- *
- * If there isn't a valid next or previous rate then INV is used which
- * maps to IWL_RATE_INVALID
- *
- */
-const struct iwl_rate_info iwlegacy_rates[IWL_RATE_COUNT] = {
-	IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2),    /*  1mbps */
-	IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5),          /*  2mbps */
-	IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11),        /*5.5mbps */
-	IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18),      /* 11mbps */
-	IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11),        /*  6mbps */
-	IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11),       /*  9mbps */
-	IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18),   /* 12mbps */
-	IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24),   /* 18mbps */
-	IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36),   /* 24mbps */
-	IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48),   /* 36mbps */
-	IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54),   /* 48mbps */
-	IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */
-	IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
-};
-
-static int iwl4965_hwrate_to_plcp_idx(u32 rate_n_flags)
-{
-	int idx = 0;
-
-	/* HT rate format */
-	if (rate_n_flags & RATE_MCS_HT_MSK) {
-		idx = (rate_n_flags & 0xff);
-
-		if (idx >= IWL_RATE_MIMO2_6M_PLCP)
-			idx = idx - IWL_RATE_MIMO2_6M_PLCP;
-
-		idx += IWL_FIRST_OFDM_RATE;
-		/* skip 9M not supported in ht*/
-		if (idx >= IWL_RATE_9M_INDEX)
-			idx += 1;
-		if ((idx >= IWL_FIRST_OFDM_RATE) && (idx <= IWL_LAST_OFDM_RATE))
-			return idx;
-
-	/* legacy rate format, search for match in table */
-	} else {
-		for (idx = 0; idx < ARRAY_SIZE(iwlegacy_rates); idx++)
-			if (iwlegacy_rates[idx].plcp == (rate_n_flags & 0xFF))
-				return idx;
-	}
-
-	return -1;
-}
-
-static void iwl4965_rs_rate_scale_perform(struct iwl_priv *priv,
-				   struct sk_buff *skb,
-				   struct ieee80211_sta *sta,
-				   struct iwl_lq_sta *lq_sta);
-static void iwl4965_rs_fill_link_cmd(struct iwl_priv *priv,
-			     struct iwl_lq_sta *lq_sta, u32 rate_n_flags);
-static void iwl4965_rs_stay_in_table(struct iwl_lq_sta *lq_sta,
-					bool force_search);
-
-#ifdef CONFIG_MAC80211_DEBUGFS
-static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
-			     u32 *rate_n_flags, int index);
-#else
-static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
-			     u32 *rate_n_flags, int index)
-{}
-#endif
-
-/**
- * The following tables contain the expected throughput metrics for all rates
- *
- *	1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits
- *
- * where invalid entries are zeros.
- *
- * CCK rates are only valid in legacy table and will only be used in G
- * (2.4 GHz) band.
- */
-
-static s32 expected_tpt_legacy[IWL_RATE_COUNT] = {
-	7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0
-};
-
-static s32 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = {
-	{0, 0, 0, 0, 42, 0,  76, 102, 124, 158, 183, 193, 202}, /* Norm */
-	{0, 0, 0, 0, 46, 0,  82, 110, 132, 167, 192, 202, 210}, /* SGI */
-	{0, 0, 0, 0, 48, 0,  93, 135, 176, 251, 319, 351, 381}, /* AGG */
-	{0, 0, 0, 0, 53, 0, 102, 149, 193, 275, 348, 381, 413}, /* AGG+SGI */
-};
-
-static s32 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = {
-	{0, 0, 0, 0,  77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */
-	{0, 0, 0, 0,  83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */
-	{0, 0, 0, 0,  96, 0, 182, 259, 328, 451, 553, 598, 640}, /* AGG */
-	{0, 0, 0, 0, 106, 0, 199, 282, 357, 487, 593, 640, 683}, /* AGG+SGI */
-};
-
-static s32 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
-	{0, 0, 0, 0,  74, 0, 123, 155, 179, 213, 235, 243, 250}, /* Norm */
-	{0, 0, 0, 0,  81, 0, 131, 164, 187, 221, 242, 250, 256}, /* SGI */
-	{0, 0, 0, 0,  92, 0, 175, 250, 317, 436, 534, 578, 619}, /* AGG */
-	{0, 0, 0, 0, 102, 0, 192, 273, 344, 470, 573, 619, 660}, /* AGG+SGI*/
-};
-
-static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
-	{0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */
-	{0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */
-	{0, 0, 0, 0, 180, 0, 327, 446, 545, 708, 828, 878, 922}, /* AGG */
-	{0, 0, 0, 0, 197, 0, 355, 481, 584, 752, 872, 922, 966}, /* AGG+SGI */
-};
-
-/* mbps, mcs */
-static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = {
-	{  "1", "BPSK DSSS"},
-	{  "2", "QPSK DSSS"},
-	{"5.5", "BPSK CCK"},
-	{ "11", "QPSK CCK"},
-	{  "6", "BPSK 1/2"},
-	{  "9", "BPSK 1/2"},
-	{ "12", "QPSK 1/2"},
-	{ "18", "QPSK 3/4"},
-	{ "24", "16QAM 1/2"},
-	{ "36", "16QAM 3/4"},
-	{ "48", "64QAM 2/3"},
-	{ "54", "64QAM 3/4"},
-	{ "60", "64QAM 5/6"},
-};
-
-#define MCS_INDEX_PER_STREAM	(8)
-
-static inline u8 iwl4965_rs_extract_rate(u32 rate_n_flags)
-{
-	return (u8)(rate_n_flags & 0xFF);
-}
-
-static void
-iwl4965_rs_rate_scale_clear_window(struct iwl_rate_scale_data *window)
-{
-	window->data = 0;
-	window->success_counter = 0;
-	window->success_ratio = IWL_INVALID_VALUE;
-	window->counter = 0;
-	window->average_tpt = IWL_INVALID_VALUE;
-	window->stamp = 0;
-}
-
-static inline u8 iwl4965_rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
-{
-	return (ant_type & valid_antenna) == ant_type;
-}
-
-/*
- *	removes the old data from the statistics. All data that is older than
- *	TID_MAX_TIME_DIFF, will be deleted.
- */
-static void
-iwl4965_rs_tl_rm_old_stats(struct iwl_traffic_load *tl, u32 curr_time)
-{
-	/* The oldest age we want to keep */
-	u32 oldest_time = curr_time - TID_MAX_TIME_DIFF;
-
-	while (tl->queue_count &&
-	       (tl->time_stamp < oldest_time)) {
-		tl->total -= tl->packet_count[tl->head];
-		tl->packet_count[tl->head] = 0;
-		tl->time_stamp += TID_QUEUE_CELL_SPACING;
-		tl->queue_count--;
-		tl->head++;
-		if (tl->head >= TID_QUEUE_MAX_SIZE)
-			tl->head = 0;
-	}
-}
-
-/*
- *	increment traffic load value for tid and also remove
- *	any old values if passed the certain time period
- */
-static u8 iwl4965_rs_tl_add_packet(struct iwl_lq_sta *lq_data,
-			   struct ieee80211_hdr *hdr)
-{
-	u32 curr_time = jiffies_to_msecs(jiffies);
-	u32 time_diff;
-	s32 index;
-	struct iwl_traffic_load *tl = NULL;
-	u8 tid;
-
-	if (ieee80211_is_data_qos(hdr->frame_control)) {
-		u8 *qc = ieee80211_get_qos_ctl(hdr);
-		tid = qc[0] & 0xf;
-	} else
-		return MAX_TID_COUNT;
-
-	if (unlikely(tid >= TID_MAX_LOAD_COUNT))
-		return MAX_TID_COUNT;
-
-	tl = &lq_data->load[tid];
-
-	curr_time -= curr_time % TID_ROUND_VALUE;
-
-	/* Happens only for the first packet. Initialize the data */
-	if (!(tl->queue_count)) {
-		tl->total = 1;
-		tl->time_stamp = curr_time;
-		tl->queue_count = 1;
-		tl->head = 0;
-		tl->packet_count[0] = 1;
-		return MAX_TID_COUNT;
-	}
-
-	time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
-	index = time_diff / TID_QUEUE_CELL_SPACING;
-
-	/* The history is too long: remove data that is older than */
-	/* TID_MAX_TIME_DIFF */
-	if (index >= TID_QUEUE_MAX_SIZE)
-		iwl4965_rs_tl_rm_old_stats(tl, curr_time);
-
-	index = (tl->head + index) % TID_QUEUE_MAX_SIZE;
-	tl->packet_count[index] = tl->packet_count[index] + 1;
-	tl->total = tl->total + 1;
-
-	if ((index + 1) > tl->queue_count)
-		tl->queue_count = index + 1;
-
-	return tid;
-}
-
-/*
-	get the traffic load value for tid
-*/
-static u32 iwl4965_rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid)
-{
-	u32 curr_time = jiffies_to_msecs(jiffies);
-	u32 time_diff;
-	s32 index;
-	struct iwl_traffic_load *tl = NULL;
-
-	if (tid >= TID_MAX_LOAD_COUNT)
-		return 0;
-
-	tl = &(lq_data->load[tid]);
-
-	curr_time -= curr_time % TID_ROUND_VALUE;
-
-	if (!(tl->queue_count))
-		return 0;
-
-	time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
-	index = time_diff / TID_QUEUE_CELL_SPACING;
-
-	/* The history is too long: remove data that is older than */
-	/* TID_MAX_TIME_DIFF */
-	if (index >= TID_QUEUE_MAX_SIZE)
-		iwl4965_rs_tl_rm_old_stats(tl, curr_time);
-
-	return tl->total;
-}
-
-static int iwl4965_rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
-				      struct iwl_lq_sta *lq_data, u8 tid,
-				      struct ieee80211_sta *sta)
-{
-	int ret = -EAGAIN;
-	u32 load;
-
-	load = iwl4965_rs_tl_get_load(lq_data, tid);
-
-	if (load > IWL_AGG_LOAD_THRESHOLD) {
-		IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n",
-				sta->addr, tid);
-		ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
-		if (ret == -EAGAIN) {
-			/*
-			 * driver and mac80211 is out of sync
-			 * this might be cause by reloading firmware
-			 * stop the tx ba session here
-			 */
-			IWL_ERR(priv, "Fail start Tx agg on tid: %d\n",
-				tid);
-			ieee80211_stop_tx_ba_session(sta, tid);
-		}
-	} else {
-		IWL_ERR(priv, "Aggregation not enabled for tid %d "
-			"because load = %u\n", tid, load);
-	}
-	return ret;
-}
-
-static void iwl4965_rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid,
-			      struct iwl_lq_sta *lq_data,
-			      struct ieee80211_sta *sta)
-{
-	if (tid < TID_MAX_LOAD_COUNT)
-		iwl4965_rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta);
-	else
-		IWL_ERR(priv, "tid exceeds max load count: %d/%d\n",
-			tid, TID_MAX_LOAD_COUNT);
-}
-
-static inline int iwl4965_get_iwl4965_num_of_ant_from_rate(u32 rate_n_flags)
-{
-	return !!(rate_n_flags & RATE_MCS_ANT_A_MSK) +
-	       !!(rate_n_flags & RATE_MCS_ANT_B_MSK) +
-	       !!(rate_n_flags & RATE_MCS_ANT_C_MSK);
-}
-
-/*
- * Static function to get the expected throughput from an iwl_scale_tbl_info
- * that wraps a NULL pointer check
- */
-static s32
-iwl4965_get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index)
-{
-	if (tbl->expected_tpt)
-		return tbl->expected_tpt[rs_index];
-	return 0;
-}
-
-/**
- * iwl4965_rs_collect_tx_data - Update the success/failure sliding window
- *
- * We keep a sliding window of the last 62 packets transmitted
- * at this rate.  window->data contains the bitmask of successful
- * packets.
- */
-static int iwl4965_rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
-			      int scale_index, int attempts, int successes)
-{
-	struct iwl_rate_scale_data *window = NULL;
-	static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1));
-	s32 fail_count, tpt;
-
-	if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
-		return -EINVAL;
-
-	/* Select window for current tx bit rate */
-	window = &(tbl->win[scale_index]);
-
-	/* Get expected throughput */
-	tpt = iwl4965_get_expected_tpt(tbl, scale_index);
-
-	/*
-	 * Keep track of only the latest 62 tx frame attempts in this rate's
-	 * history window; anything older isn't really relevant any more.
-	 * If we have filled up the sliding window, drop the oldest attempt;
-	 * if the oldest attempt (highest bit in bitmap) shows "success",
-	 * subtract "1" from the success counter (this is the main reason
-	 * we keep these bitmaps!).
-	 */
-	while (attempts > 0) {
-		if (window->counter >= IWL_RATE_MAX_WINDOW) {
-
-			/* remove earliest */
-			window->counter = IWL_RATE_MAX_WINDOW - 1;
-
-			if (window->data & mask) {
-				window->data &= ~mask;
-				window->success_counter--;
-			}
-		}
-
-		/* Increment frames-attempted counter */
-		window->counter++;
-
-		/* Shift bitmap by one frame to throw away oldest history */
-		window->data <<= 1;
-
-		/* Mark the most recent #successes attempts as successful */
-		if (successes > 0) {
-			window->success_counter++;
-			window->data |= 0x1;
-			successes--;
-		}
-
-		attempts--;
-	}
-
-	/* Calculate current success ratio, avoid divide-by-0! */
-	if (window->counter > 0)
-		window->success_ratio = 128 * (100 * window->success_counter)
-					/ window->counter;
-	else
-		window->success_ratio = IWL_INVALID_VALUE;
-
-	fail_count = window->counter - window->success_counter;
-
-	/* Calculate average throughput, if we have enough history. */
-	if ((fail_count >= IWL_RATE_MIN_FAILURE_TH) ||
-	    (window->success_counter >= IWL_RATE_MIN_SUCCESS_TH))
-		window->average_tpt = (window->success_ratio * tpt + 64) / 128;
-	else
-		window->average_tpt = IWL_INVALID_VALUE;
-
-	/* Tag this window as having been updated */
-	window->stamp = jiffies;
-
-	return 0;
-}
-
-/*
- * Fill uCode API rate_n_flags field, based on "search" or "active" table.
- */
-static u32 iwl4965_rate_n_flags_from_tbl(struct iwl_priv *priv,
-				 struct iwl_scale_tbl_info *tbl,
-				 int index, u8 use_green)
-{
-	u32 rate_n_flags = 0;
-
-	if (is_legacy(tbl->lq_type)) {
-		rate_n_flags = iwlegacy_rates[index].plcp;
-		if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE)
-			rate_n_flags |= RATE_MCS_CCK_MSK;
-
-	} else if (is_Ht(tbl->lq_type)) {
-		if (index > IWL_LAST_OFDM_RATE) {
-			IWL_ERR(priv, "Invalid HT rate index %d\n", index);
-			index = IWL_LAST_OFDM_RATE;
-		}
-		rate_n_flags = RATE_MCS_HT_MSK;
-
-		if (is_siso(tbl->lq_type))
-			rate_n_flags |=	iwlegacy_rates[index].plcp_siso;
-		else
-			rate_n_flags |=	iwlegacy_rates[index].plcp_mimo2;
-	} else {
-		IWL_ERR(priv, "Invalid tbl->lq_type %d\n", tbl->lq_type);
-	}
-
-	rate_n_flags |= ((tbl->ant_type << RATE_MCS_ANT_POS) &
-						     RATE_MCS_ANT_ABC_MSK);
-
-	if (is_Ht(tbl->lq_type)) {
-		if (tbl->is_ht40) {
-			if (tbl->is_dup)
-				rate_n_flags |= RATE_MCS_DUP_MSK;
-			else
-				rate_n_flags |= RATE_MCS_HT40_MSK;
-		}
-		if (tbl->is_SGI)
-			rate_n_flags |= RATE_MCS_SGI_MSK;
-
-		if (use_green) {
-			rate_n_flags |= RATE_MCS_GF_MSK;
-			if (is_siso(tbl->lq_type) && tbl->is_SGI) {
-				rate_n_flags &= ~RATE_MCS_SGI_MSK;
-				IWL_ERR(priv, "GF was set with SGI:SISO\n");
-			}
-		}
-	}
-	return rate_n_flags;
-}
-
-/*
- * Interpret uCode API's rate_n_flags format,
- * fill "search" or "active" tx mode table.
- */
-static int iwl4965_rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
-				    enum ieee80211_band band,
-				    struct iwl_scale_tbl_info *tbl,
-				    int *rate_idx)
-{
-	u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK);
-	u8 iwl4965_num_of_ant = iwl4965_get_iwl4965_num_of_ant_from_rate(rate_n_flags);
-	u8 mcs;
-
-	memset(tbl, 0, sizeof(struct iwl_scale_tbl_info));
-	*rate_idx = iwl4965_hwrate_to_plcp_idx(rate_n_flags);
-
-	if (*rate_idx  == IWL_RATE_INVALID) {
-		*rate_idx = -1;
-		return -EINVAL;
-	}
-	tbl->is_SGI = 0;	/* default legacy setup */
-	tbl->is_ht40 = 0;
-	tbl->is_dup = 0;
-	tbl->ant_type = (ant_msk >> RATE_MCS_ANT_POS);
-	tbl->lq_type = LQ_NONE;
-	tbl->max_search = IWL_MAX_SEARCH;
-
-	/* legacy rate format */
-	if (!(rate_n_flags & RATE_MCS_HT_MSK)) {
-		if (iwl4965_num_of_ant == 1) {
-			if (band == IEEE80211_BAND_5GHZ)
-				tbl->lq_type = LQ_A;
-			else
-				tbl->lq_type = LQ_G;
-		}
-	/* HT rate format */
-	} else {
-		if (rate_n_flags & RATE_MCS_SGI_MSK)
-			tbl->is_SGI = 1;
-
-		if ((rate_n_flags & RATE_MCS_HT40_MSK) ||
-		    (rate_n_flags & RATE_MCS_DUP_MSK))
-			tbl->is_ht40 = 1;
-
-		if (rate_n_flags & RATE_MCS_DUP_MSK)
-			tbl->is_dup = 1;
-
-		mcs = iwl4965_rs_extract_rate(rate_n_flags);
-
-		/* SISO */
-		if (mcs <= IWL_RATE_SISO_60M_PLCP) {
-			if (iwl4965_num_of_ant == 1)
-				tbl->lq_type = LQ_SISO; /*else NONE*/
-		/* MIMO2 */
-		} else {
-			if (iwl4965_num_of_ant == 2)
-				tbl->lq_type = LQ_MIMO2;
-		}
-	}
-	return 0;
-}
-
-/* switch to another antenna/antennas and return 1 */
-/* if no other valid antenna found, return 0 */
-static int iwl4965_rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
-			     struct iwl_scale_tbl_info *tbl)
-{
-	u8 new_ant_type;
-
-	if (!tbl->ant_type || tbl->ant_type > ANT_ABC)
-		return 0;
-
-	if (!iwl4965_rs_is_valid_ant(valid_ant, tbl->ant_type))
-		return 0;
-
-	new_ant_type = ant_toggle_lookup[tbl->ant_type];
-
-	while ((new_ant_type != tbl->ant_type) &&
-	       !iwl4965_rs_is_valid_ant(valid_ant, new_ant_type))
-		new_ant_type = ant_toggle_lookup[new_ant_type];
-
-	if (new_ant_type == tbl->ant_type)
-		return 0;
-
-	tbl->ant_type = new_ant_type;
-	*rate_n_flags &= ~RATE_MCS_ANT_ABC_MSK;
-	*rate_n_flags |= new_ant_type << RATE_MCS_ANT_POS;
-	return 1;
-}
-
-/**
- * Green-field mode is valid if the station supports it and
- * there are no non-GF stations present in the BSS.
- */
-static bool iwl4965_rs_use_green(struct ieee80211_sta *sta)
-{
-	struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
-	struct iwl_rxon_context *ctx = sta_priv->common.ctx;
-
-	return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
-		!(ctx->ht.non_gf_sta_present);
-}
-
-/**
- * iwl4965_rs_get_supported_rates - get the available rates
- *
- * if management frame or broadcast frame only return
- * basic available rates.
- *
- */
-static u16 iwl4965_rs_get_supported_rates(struct iwl_lq_sta *lq_sta,
-				  struct ieee80211_hdr *hdr,
-				  enum iwl_table_type rate_type)
-{
-	if (is_legacy(rate_type)) {
-		return lq_sta->active_legacy_rate;
-	} else {
-		if (is_siso(rate_type))
-			return lq_sta->active_siso_rate;
-		else
-			return lq_sta->active_mimo2_rate;
-	}
-}
-
-static u16
-iwl4965_rs_get_adjacent_rate(struct iwl_priv *priv, u8 index, u16 rate_mask,
-				int rate_type)
-{
-	u8 high = IWL_RATE_INVALID;
-	u8 low = IWL_RATE_INVALID;
-
-	/* 802.11A or ht walks to the next literal adjacent rate in
-	 * the rate table */
-	if (is_a_band(rate_type) || !is_legacy(rate_type)) {
-		int i;
-		u32 mask;
-
-		/* Find the previous rate that is in the rate mask */
-		i = index - 1;
-		for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
-			if (rate_mask & mask) {
-				low = i;
-				break;
-			}
-		}
-
-		/* Find the next rate that is in the rate mask */
-		i = index + 1;
-		for (mask = (1 << i); i < IWL_RATE_COUNT; i++, mask <<= 1) {
-			if (rate_mask & mask) {
-				high = i;
-				break;
-			}
-		}
-
-		return (high << 8) | low;
-	}
-
-	low = index;
-	while (low != IWL_RATE_INVALID) {
-		low = iwlegacy_rates[low].prev_rs;
-		if (low == IWL_RATE_INVALID)
-			break;
-		if (rate_mask & (1 << low))
-			break;
-		IWL_DEBUG_RATE(priv, "Skipping masked lower rate: %d\n", low);
-	}
-
-	high = index;
-	while (high != IWL_RATE_INVALID) {
-		high = iwlegacy_rates[high].next_rs;
-		if (high == IWL_RATE_INVALID)
-			break;
-		if (rate_mask & (1 << high))
-			break;
-		IWL_DEBUG_RATE(priv, "Skipping masked higher rate: %d\n", high);
-	}
-
-	return (high << 8) | low;
-}
-
-static u32 iwl4965_rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
-			     struct iwl_scale_tbl_info *tbl,
-			     u8 scale_index, u8 ht_possible)
-{
-	s32 low;
-	u16 rate_mask;
-	u16 high_low;
-	u8 switch_to_legacy = 0;
-	u8 is_green = lq_sta->is_green;
-	struct iwl_priv *priv = lq_sta->drv;
-
-	/* check if we need to switch from HT to legacy rates.
-	 * assumption is that mandatory rates (1Mbps or 6Mbps)
-	 * are always supported (spec demand) */
-	if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_index)) {
-		switch_to_legacy = 1;
-		scale_index = rs_ht_to_legacy[scale_index];
-		if (lq_sta->band == IEEE80211_BAND_5GHZ)
-			tbl->lq_type = LQ_A;
-		else
-			tbl->lq_type = LQ_G;
-
-		if (iwl4965_num_of_ant(tbl->ant_type) > 1)
-			tbl->ant_type =
-				iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
-
-		tbl->is_ht40 = 0;
-		tbl->is_SGI = 0;
-		tbl->max_search = IWL_MAX_SEARCH;
-	}
-
-	rate_mask = iwl4965_rs_get_supported_rates(lq_sta, NULL, tbl->lq_type);
-
-	/* Mask with station rate restriction */
-	if (is_legacy(tbl->lq_type)) {
-		/* supp_rates has no CCK bits in A mode */
-		if (lq_sta->band == IEEE80211_BAND_5GHZ)
-			rate_mask  = (u16)(rate_mask &
-			   (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
-		else
-			rate_mask = (u16)(rate_mask & lq_sta->supp_rates);
-	}
-
-	/* If we switched from HT to legacy, check current rate */
-	if (switch_to_legacy && (rate_mask & (1 << scale_index))) {
-		low = scale_index;
-		goto out;
-	}
-
-	high_low = iwl4965_rs_get_adjacent_rate(lq_sta->drv,
-					scale_index, rate_mask,
-					tbl->lq_type);
-	low = high_low & 0xff;
-
-	if (low == IWL_RATE_INVALID)
-		low = scale_index;
-
-out:
-	return iwl4965_rate_n_flags_from_tbl(lq_sta->drv, tbl, low, is_green);
-}
-
-/*
- * Simple function to compare two rate scale table types
- */
-static bool iwl4965_table_type_matches(struct iwl_scale_tbl_info *a,
-			       struct iwl_scale_tbl_info *b)
-{
-	return (a->lq_type == b->lq_type) && (a->ant_type == b->ant_type) &&
-		(a->is_SGI == b->is_SGI);
-}
-
-/*
- * mac80211 sends us Tx status
- */
-static void
-iwl4965_rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
-			 struct ieee80211_sta *sta, void *priv_sta,
-			 struct sk_buff *skb)
-{
-	int legacy_success;
-	int retries;
-	int rs_index, mac_index, i;
-	struct iwl_lq_sta *lq_sta = priv_sta;
-	struct iwl_link_quality_cmd *table;
-	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
-	struct iwl_priv *priv = (struct iwl_priv *)priv_r;
-	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-	enum mac80211_rate_control_flags mac_flags;
-	u32 tx_rate;
-	struct iwl_scale_tbl_info tbl_type;
-	struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
-	struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
-	struct iwl_rxon_context *ctx = sta_priv->common.ctx;
-
-	IWL_DEBUG_RATE_LIMIT(priv,
-		"get frame ack response, update rate scale window\n");
-
-	/* Treat uninitialized rate scaling data same as non-existing. */
-	if (!lq_sta) {
-		IWL_DEBUG_RATE(priv, "Station rate scaling not created yet.\n");
-		return;
-	} else if (!lq_sta->drv) {
-		IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n");
-		return;
-	}
-
-	if (!ieee80211_is_data(hdr->frame_control) ||
-	    info->flags & IEEE80211_TX_CTL_NO_ACK)
-		return;
-
-	/* This packet was aggregated but doesn't carry status info */
-	if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
-	    !(info->flags & IEEE80211_TX_STAT_AMPDU))
-		return;
-
-	/*
-	 * Ignore this Tx frame response if its initial rate doesn't match
-	 * that of latest Link Quality command.  There may be stragglers
-	 * from a previous Link Quality command, but we're no longer interested
-	 * in those; they're either from the "active" mode while we're trying
-	 * to check "search" mode, or a prior "search" mode after we've moved
-	 * to a new "search" mode (which might become the new "active" mode).
-	 */
-	table = &lq_sta->lq;
-	tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
-	iwl4965_rs_get_tbl_info_from_mcs(tx_rate,
-			 priv->band, &tbl_type, &rs_index);
-	if (priv->band == IEEE80211_BAND_5GHZ)
-		rs_index -= IWL_FIRST_OFDM_RATE;
-	mac_flags = info->status.rates[0].flags;
-	mac_index = info->status.rates[0].idx;
-	/* For HT packets, map MCS to PLCP */
-	if (mac_flags & IEEE80211_TX_RC_MCS) {
-		mac_index &= RATE_MCS_CODE_MSK;	/* Remove # of streams */
-		if (mac_index >= (IWL_RATE_9M_INDEX - IWL_FIRST_OFDM_RATE))
-			mac_index++;
-		/*
-		 * mac80211 HT index is always zero-indexed; we need to move
-		 * HT OFDM rates after CCK rates in 2.4 GHz band
-		 */
-		if (priv->band == IEEE80211_BAND_2GHZ)
-			mac_index += IWL_FIRST_OFDM_RATE;
-	}
-	/* Here we actually compare this rate to the latest LQ command */
-	if ((mac_index < 0) ||
-	    (tbl_type.is_SGI !=
-			!!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) ||
-	    (tbl_type.is_ht40 !=
-			!!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH)) ||
-	    (tbl_type.is_dup !=
-			!!(mac_flags & IEEE80211_TX_RC_DUP_DATA)) ||
-	    (tbl_type.ant_type != info->antenna_sel_tx) ||
-	    (!!(tx_rate & RATE_MCS_HT_MSK) !=
-			!!(mac_flags & IEEE80211_TX_RC_MCS)) ||
-	    (!!(tx_rate & RATE_MCS_GF_MSK) !=
-			!!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) ||
-	    (rs_index != mac_index)) {
-		IWL_DEBUG_RATE(priv,
-		"initial rate %d does not match %d (0x%x)\n",
-			 mac_index, rs_index, tx_rate);
-		/*
-		 * Since rates mis-match, the last LQ command may have failed.
-		 * After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with
-		 * ... driver.
-		 */
-		lq_sta->missed_rate_counter++;
-		if (lq_sta->missed_rate_counter > IWL_MISSED_RATE_MAX) {
-			lq_sta->missed_rate_counter = 0;
-			iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq,
-							CMD_ASYNC, false);
-		}
-		/* Regardless, ignore this status info for outdated rate */
-		return;
-	} else
-		/* Rate did match, so reset the missed_rate_counter */
-		lq_sta->missed_rate_counter = 0;
-
-	/* Figure out if rate scale algorithm is in active or search table */
-	if (iwl4965_table_type_matches(&tbl_type,
-				&(lq_sta->lq_info[lq_sta->active_tbl]))) {
-		curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
-		other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
-	} else if (iwl4965_table_type_matches(&tbl_type,
-				&lq_sta->lq_info[1 - lq_sta->active_tbl])) {
-		curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
-		other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
-	} else {
-		IWL_DEBUG_RATE(priv,
-			"Neither active nor search matches tx rate\n");
-		tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
-		IWL_DEBUG_RATE(priv, "active- lq:%x, ant:%x, SGI:%d\n",
-			tmp_tbl->lq_type, tmp_tbl->ant_type, tmp_tbl->is_SGI);
-		tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
-		IWL_DEBUG_RATE(priv, "search- lq:%x, ant:%x, SGI:%d\n",
-			tmp_tbl->lq_type, tmp_tbl->ant_type, tmp_tbl->is_SGI);
-		IWL_DEBUG_RATE(priv, "actual- lq:%x, ant:%x, SGI:%d\n",
-			tbl_type.lq_type, tbl_type.ant_type, tbl_type.is_SGI);
-		/*
-		 * no matching table found, let's by-pass the data collection
-		 * and continue to perform rate scale to find the rate table
-		 */
-		iwl4965_rs_stay_in_table(lq_sta, true);
-		goto done;
-	}
-
-	/*
-	 * Updating the frame history depends on whether packets were
-	 * aggregated.
-	 *
-	 * For aggregation, all packets were transmitted at the same rate, the
-	 * first index into rate scale table.
-	 */
-	if (info->flags & IEEE80211_TX_STAT_AMPDU) {
-		tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
-		iwl4965_rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type,
-				&rs_index);
-		iwl4965_rs_collect_tx_data(curr_tbl, rs_index,
-				   info->status.ampdu_len,
-				   info->status.ampdu_ack_len);
-
-		/* Update success/fail counts if not searching for new mode */
-		if (lq_sta->stay_in_tbl) {
-			lq_sta->total_success += info->status.ampdu_ack_len;
-			lq_sta->total_failed += (info->status.ampdu_len -
-					info->status.ampdu_ack_len);
-		}
-	} else {
-	/*
-	 * For legacy, update frame history with for each Tx retry.
-	 */
-		retries = info->status.rates[0].count - 1;
-		/* HW doesn't send more than 15 retries */
-		retries = min(retries, 15);
-
-		/* The last transmission may have been successful */
-		legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK);
-		/* Collect data for each rate used during failed TX attempts */
-		for (i = 0; i <= retries; ++i) {
-			tx_rate = le32_to_cpu(table->rs_table[i].rate_n_flags);
-			iwl4965_rs_get_tbl_info_from_mcs(tx_rate, priv->band,
-					&tbl_type, &rs_index);
-			/*
-			 * Only collect stats if retried rate is in the same RS
-			 * table as active/search.
-			 */
-			if (iwl4965_table_type_matches(&tbl_type, curr_tbl))
-				tmp_tbl = curr_tbl;
-			else if (iwl4965_table_type_matches(&tbl_type,
-								 other_tbl))
-				tmp_tbl = other_tbl;
-			else
-				continue;
-			iwl4965_rs_collect_tx_data(tmp_tbl, rs_index, 1,
-					   i < retries ? 0 : legacy_success);
-		}
-
-		/* Update success/fail counts if not searching for new mode */
-		if (lq_sta->stay_in_tbl) {
-			lq_sta->total_success += legacy_success;
-			lq_sta->total_failed += retries + (1 - legacy_success);
-		}
-	}
-	/* The last TX rate is cached in lq_sta; it's set in if/else above */
-	lq_sta->last_rate_n_flags = tx_rate;
-done:
-	/* See if there's a better rate or modulation mode to try. */
-	if (sta && sta->supp_rates[sband->band])
-		iwl4965_rs_rate_scale_perform(priv, skb, sta, lq_sta);
-}
-
-/*
- * Begin a period of staying with a selected modulation mode.
- * Set "stay_in_tbl" flag to prevent any mode switches.
- * Set frame tx success limits according to legacy vs. high-throughput,
- * and reset overall (spanning all rates) tx success history statistics.
- * These control how long we stay using same modulation mode before
- * searching for a new mode.
- */
-static void iwl4965_rs_set_stay_in_table(struct iwl_priv *priv, u8 is_legacy,
-				 struct iwl_lq_sta *lq_sta)
-{
-	IWL_DEBUG_RATE(priv, "we are staying in the same table\n");
-	lq_sta->stay_in_tbl = 1;	/* only place this gets set */
-	if (is_legacy) {
-		lq_sta->table_count_limit = IWL_LEGACY_TABLE_COUNT;
-		lq_sta->max_failure_limit = IWL_LEGACY_FAILURE_LIMIT;
-		lq_sta->max_success_limit = IWL_LEGACY_SUCCESS_LIMIT;
-	} else {
-		lq_sta->table_count_limit = IWL_NONE_LEGACY_TABLE_COUNT;
-		lq_sta->max_failure_limit = IWL_NONE_LEGACY_FAILURE_LIMIT;
-		lq_sta->max_success_limit = IWL_NONE_LEGACY_SUCCESS_LIMIT;
-	}
-	lq_sta->table_count = 0;
-	lq_sta->total_failed = 0;
-	lq_sta->total_success = 0;
-	lq_sta->flush_timer = jiffies;
-	lq_sta->action_counter = 0;
-}
-
-/*
- * Find correct throughput table for given mode of modulation
- */
-static void iwl4965_rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
-				      struct iwl_scale_tbl_info *tbl)
-{
-	/* Used to choose among HT tables */
-	s32 (*ht_tbl_pointer)[IWL_RATE_COUNT];
-
-	/* Check for invalid LQ type */
-	if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) {
-		tbl->expected_tpt = expected_tpt_legacy;
-		return;
-	}
-
-	/* Legacy rates have only one table */
-	if (is_legacy(tbl->lq_type)) {
-		tbl->expected_tpt = expected_tpt_legacy;
-		return;
-	}
-
-	/* Choose among many HT tables depending on number of streams
-	 * (SISO/MIMO2), channel width (20/40), SGI, and aggregation
-	 * status */
-	if (is_siso(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
-		ht_tbl_pointer = expected_tpt_siso20MHz;
-	else if (is_siso(tbl->lq_type))
-		ht_tbl_pointer = expected_tpt_siso40MHz;
-	else if (is_mimo2(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
-		ht_tbl_pointer = expected_tpt_mimo2_20MHz;
-	else /* if (is_mimo2(tbl->lq_type)) <-- must be true */
-		ht_tbl_pointer = expected_tpt_mimo2_40MHz;
-
-	if (!tbl->is_SGI && !lq_sta->is_agg)		/* Normal */
-		tbl->expected_tpt = ht_tbl_pointer[0];
-	else if (tbl->is_SGI && !lq_sta->is_agg)	/* SGI */
-		tbl->expected_tpt = ht_tbl_pointer[1];
-	else if (!tbl->is_SGI && lq_sta->is_agg)	/* AGG */
-		tbl->expected_tpt = ht_tbl_pointer[2];
-	else						/* AGG+SGI */
-		tbl->expected_tpt = ht_tbl_pointer[3];
-}
-
-/*
- * Find starting rate for new "search" high-throughput mode of modulation.
- * Goal is to find lowest expected rate (under perfect conditions) that is
- * above the current measured throughput of "active" mode, to give new mode
- * a fair chance to prove itself without too many challenges.
- *
- * This gets called when transitioning to more aggressive modulation
- * (i.e. legacy to SISO or MIMO, or SISO to MIMO), as well as less aggressive
- * (i.e. MIMO to SISO).  When moving to MIMO, bit rate will typically need
- * to decrease to match "active" throughput.  When moving from MIMO to SISO,
- * bit rate will typically need to increase, but not if performance was bad.
- */
-static s32 iwl4965_rs_get_best_rate(struct iwl_priv *priv,
-			    struct iwl_lq_sta *lq_sta,
-			    struct iwl_scale_tbl_info *tbl,	/* "search" */
-			    u16 rate_mask, s8 index)
-{
-	/* "active" values */
-	struct iwl_scale_tbl_info *active_tbl =
-	    &(lq_sta->lq_info[lq_sta->active_tbl]);
-	s32 active_sr = active_tbl->win[index].success_ratio;
-	s32 active_tpt = active_tbl->expected_tpt[index];
-
-	/* expected "search" throughput */
-	s32 *tpt_tbl = tbl->expected_tpt;
-
-	s32 new_rate, high, low, start_hi;
-	u16 high_low;
-	s8 rate = index;
-
-	new_rate = high = low = start_hi = IWL_RATE_INVALID;
-
-	for (; ;) {
-		high_low = iwl4965_rs_get_adjacent_rate(priv, rate, rate_mask,
-						tbl->lq_type);
-
-		low = high_low & 0xff;
-		high = (high_low >> 8) & 0xff;
-
-		/*
-		 * Lower the "search" bit rate, to give new "search" mode
-		 * approximately the same throughput as "active" if:
-		 *
-		 * 1) "Active" mode has been working modestly well (but not
-		 *    great), and expected "search" throughput (under perfect
-		 *    conditions) at candidate rate is above the actual
-		 *    measured "active" throughput (but less than expected
-		 *    "active" throughput under perfect conditions).
-		 * OR
-		 * 2) "Active" mode has been working perfectly or very well
-		 *    and expected "search" throughput (under perfect
-		 *    conditions) at candidate rate is above expected
-		 *    "active" throughput (under perfect conditions).
-		 */
-		if ((((100 * tpt_tbl[rate]) > lq_sta->last_tpt) &&
-		     ((active_sr > IWL_RATE_DECREASE_TH) &&
-		      (active_sr <= IWL_RATE_HIGH_TH) &&
-		      (tpt_tbl[rate] <= active_tpt))) ||
-		    ((active_sr >= IWL_RATE_SCALE_SWITCH) &&
-		     (tpt_tbl[rate] > active_tpt))) {
-
-			/* (2nd or later pass)
-			 * If we've already tried to raise the rate, and are
-			 * now trying to lower it, use the higher rate. */
-			if (start_hi != IWL_RATE_INVALID) {
-				new_rate = start_hi;
-				break;
-			}
-
-			new_rate = rate;
-
-			/* Loop again with lower rate */
-			if (low != IWL_RATE_INVALID)
-				rate = low;
-
-			/* Lower rate not available, use the original */
-			else
-				break;
-
-		/* Else try to raise the "search" rate to match "active" */
-		} else {
-			/* (2nd or later pass)
-			 * If we've already tried to lower the rate, and are
-			 * now trying to raise it, use the lower rate. */
-			if (new_rate != IWL_RATE_INVALID)
-				break;
-
-			/* Loop again with higher rate */
-			else if (high != IWL_RATE_INVALID) {
-				start_hi = high;
-				rate = high;
-
-			/* Higher rate not available, use the original */
-			} else {
-				new_rate = rate;
-				break;
-			}
-		}
-	}
-
-	return new_rate;
-}
-
-/*
- * Set up search table for MIMO2
- */
-static int iwl4965_rs_switch_to_mimo2(struct iwl_priv *priv,
-			     struct iwl_lq_sta *lq_sta,
-			     struct ieee80211_conf *conf,
-			     struct ieee80211_sta *sta,
-			     struct iwl_scale_tbl_info *tbl, int index)
-{
-	u16 rate_mask;
-	s32 rate;
-	s8 is_green = lq_sta->is_green;
-	struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
-	struct iwl_rxon_context *ctx = sta_priv->common.ctx;
-
-	if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
-		return -1;
-
-	if (((sta->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 2)
-						== WLAN_HT_CAP_SM_PS_STATIC)
-		return -1;
-
-	/* Need both Tx chains/antennas to support MIMO */
-	if (priv->hw_params.tx_chains_num < 2)
-		return -1;
-
-	IWL_DEBUG_RATE(priv, "LQ: try to switch to MIMO2\n");
-
-	tbl->lq_type = LQ_MIMO2;
-	tbl->is_dup = lq_sta->is_dup;
-	tbl->action = 0;
-	tbl->max_search = IWL_MAX_SEARCH;
-	rate_mask = lq_sta->active_mimo2_rate;
-
-	if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
-		tbl->is_ht40 = 1;
-	else
-		tbl->is_ht40 = 0;
-
-	iwl4965_rs_set_expected_tpt_table(lq_sta, tbl);
-
-	rate = iwl4965_rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index);
-
-	IWL_DEBUG_RATE(priv, "LQ: MIMO2 best rate %d mask %X\n",
-				rate, rate_mask);
-	if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
-		IWL_DEBUG_RATE(priv,
-				"Can't switch with index %d rate mask %x\n",
-						rate, rate_mask);
-		return -1;
-	}
-	tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv,
-						 tbl, rate, is_green);
-
-	IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n",
-		     tbl->current_rate, is_green);
-	return 0;
-}
-
-/*
- * Set up search table for SISO
- */
-static int iwl4965_rs_switch_to_siso(struct iwl_priv *priv,
-			     struct iwl_lq_sta *lq_sta,
-			     struct ieee80211_conf *conf,
-			     struct ieee80211_sta *sta,
-			     struct iwl_scale_tbl_info *tbl, int index)
-{
-	u16 rate_mask;
-	u8 is_green = lq_sta->is_green;
-	s32 rate;
-	struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
-	struct iwl_rxon_context *ctx = sta_priv->common.ctx;
-
-	if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
-		return -1;
-
-	IWL_DEBUG_RATE(priv, "LQ: try to switch to SISO\n");
-
-	tbl->is_dup = lq_sta->is_dup;
-	tbl->lq_type = LQ_SISO;
-	tbl->action = 0;
-	tbl->max_search = IWL_MAX_SEARCH;
-	rate_mask = lq_sta->active_siso_rate;
-
-	if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
-		tbl->is_ht40 = 1;
-	else
-		tbl->is_ht40 = 0;
-
-	if (is_green)
-		tbl->is_SGI = 0; /*11n spec: no SGI in SISO+Greenfield*/
-
-	iwl4965_rs_set_expected_tpt_table(lq_sta, tbl);
-	rate = iwl4965_rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index);
-
-	IWL_DEBUG_RATE(priv, "LQ: get best rate %d mask %X\n", rate, rate_mask);
-	if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
-		IWL_DEBUG_RATE(priv,
-			"can not switch with index %d rate mask %x\n",
-			     rate, rate_mask);
-		return -1;
-	}
-	tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv,
-						tbl, rate, is_green);
-	IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n",
-		     tbl->current_rate, is_green);
-	return 0;
-}
-
-/*
- * Try to switch to new modulation mode from legacy
- */
-static int iwl4965_rs_move_legacy_other(struct iwl_priv *priv,
-				struct iwl_lq_sta *lq_sta,
-				struct ieee80211_conf *conf,
-				struct ieee80211_sta *sta,
-				int index)
-{
-	struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
-	struct iwl_scale_tbl_info *search_tbl =
-				&(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
-	struct iwl_rate_scale_data *window = &(tbl->win[index]);
-	u32 sz = (sizeof(struct iwl_scale_tbl_info) -
-		  (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
-	u8 start_action;
-	u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
-	u8 tx_chains_num = priv->hw_params.tx_chains_num;
-	int ret = 0;
-	u8 update_search_tbl_counter = 0;
-
-	tbl->action = IWL_LEGACY_SWITCH_SISO;
-
-	start_action = tbl->action;
-	for (; ;) {
-		lq_sta->action_counter++;
-		switch (tbl->action) {
-		case IWL_LEGACY_SWITCH_ANTENNA1:
-		case IWL_LEGACY_SWITCH_ANTENNA2:
-			IWL_DEBUG_RATE(priv, "LQ: Legacy toggle Antenna\n");
-
-			if ((tbl->action == IWL_LEGACY_SWITCH_ANTENNA1 &&
-							tx_chains_num <= 1) ||
-			    (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2 &&
-							tx_chains_num <= 2))
-				break;
-
-			/* Don't change antenna if success has been great */
-			if (window->success_ratio >= IWL_RS_GOOD_RATIO)
-				break;
-
-			/* Set up search table to try other antenna */
-			memcpy(search_tbl, tbl, sz);
-
-			if (iwl4965_rs_toggle_antenna(valid_tx_ant,
-				&search_tbl->current_rate, search_tbl)) {
-				update_search_tbl_counter = 1;
-				iwl4965_rs_set_expected_tpt_table(lq_sta,
-								search_tbl);
-				goto out;
-			}
-			break;
-		case IWL_LEGACY_SWITCH_SISO:
-			IWL_DEBUG_RATE(priv, "LQ: Legacy switch to SISO\n");
-
-			/* Set up search table to try SISO */
-			memcpy(search_tbl, tbl, sz);
-			search_tbl->is_SGI = 0;
-			ret = iwl4965_rs_switch_to_siso(priv, lq_sta, conf, sta,
-						 search_tbl, index);
-			if (!ret) {
-				lq_sta->action_counter = 0;
-				goto out;
-			}
-
-			break;
-		case IWL_LEGACY_SWITCH_MIMO2_AB:
-		case IWL_LEGACY_SWITCH_MIMO2_AC:
-		case IWL_LEGACY_SWITCH_MIMO2_BC:
-			IWL_DEBUG_RATE(priv, "LQ: Legacy switch to MIMO2\n");
-
-			/* Set up search table to try MIMO */
-			memcpy(search_tbl, tbl, sz);
-			search_tbl->is_SGI = 0;
-
-			if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AB)
-				search_tbl->ant_type = ANT_AB;
-			else if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AC)
-				search_tbl->ant_type = ANT_AC;
-			else
-				search_tbl->ant_type = ANT_BC;
-
-			if (!iwl4965_rs_is_valid_ant(valid_tx_ant,
-						search_tbl->ant_type))
-				break;
-
-			ret = iwl4965_rs_switch_to_mimo2(priv, lq_sta,
-						conf, sta,
-						 search_tbl, index);
-			if (!ret) {
-				lq_sta->action_counter = 0;
-				goto out;
-			}
-			break;
-		}
-		tbl->action++;
-		if (tbl->action > IWL_LEGACY_SWITCH_MIMO2_BC)
-			tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
-
-		if (tbl->action == start_action)
-			break;
-
-	}
-	search_tbl->lq_type = LQ_NONE;
-	return 0;
-
-out:
-	lq_sta->search_better_tbl = 1;
-	tbl->action++;
-	if (tbl->action > IWL_LEGACY_SWITCH_MIMO2_BC)
-		tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
-	if (update_search_tbl_counter)
-		search_tbl->action = tbl->action;
-	return 0;
-
-}
-
-/*
- * Try to switch to new modulation mode from SISO
- */
-static int iwl4965_rs_move_siso_to_other(struct iwl_priv *priv,
-				 struct iwl_lq_sta *lq_sta,
-				 struct ieee80211_conf *conf,
-				 struct ieee80211_sta *sta, int index)
-{
-	u8 is_green = lq_sta->is_green;
-	struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
-	struct iwl_scale_tbl_info *search_tbl =
-				&(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
-	struct iwl_rate_scale_data *window = &(tbl->win[index]);
-	struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
-	u32 sz = (sizeof(struct iwl_scale_tbl_info) -
-		  (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
-	u8 start_action;
-	u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
-	u8 tx_chains_num = priv->hw_params.tx_chains_num;
-	u8 update_search_tbl_counter = 0;
-	int ret;
-
-	start_action = tbl->action;
-
-	for (;;) {
-		lq_sta->action_counter++;
-		switch (tbl->action) {
-		case IWL_SISO_SWITCH_ANTENNA1:
-		case IWL_SISO_SWITCH_ANTENNA2:
-			IWL_DEBUG_RATE(priv, "LQ: SISO toggle Antenna\n");
-			if ((tbl->action == IWL_SISO_SWITCH_ANTENNA1 &&
-						tx_chains_num <= 1) ||
-			    (tbl->action == IWL_SISO_SWITCH_ANTENNA2 &&
-						tx_chains_num <= 2))
-				break;
-
-			if (window->success_ratio >= IWL_RS_GOOD_RATIO)
-				break;
-
-			memcpy(search_tbl, tbl, sz);
-			if (iwl4965_rs_toggle_antenna(valid_tx_ant,
-				       &search_tbl->current_rate, search_tbl)) {
-				update_search_tbl_counter = 1;
-				goto out;
-			}
-			break;
-		case IWL_SISO_SWITCH_MIMO2_AB:
-		case IWL_SISO_SWITCH_MIMO2_AC:
-		case IWL_SISO_SWITCH_MIMO2_BC:
-			IWL_DEBUG_RATE(priv, "LQ: SISO switch to MIMO2\n");
-			memcpy(search_tbl, tbl, sz);
-			search_tbl->is_SGI = 0;
-
-			if (tbl->action == IWL_SISO_SWITCH_MIMO2_AB)
-				search_tbl->ant_type = ANT_AB;
-			else if (tbl->action == IWL_SISO_SWITCH_MIMO2_AC)
-				search_tbl->ant_type = ANT_AC;
-			else
-				search_tbl->ant_type = ANT_BC;
-
-			if (!iwl4965_rs_is_valid_ant(valid_tx_ant,
-						 search_tbl->ant_type))
-				break;
-
-			ret = iwl4965_rs_switch_to_mimo2(priv, lq_sta,
-						conf, sta,
-						 search_tbl, index);
-			if (!ret)
-				goto out;
-			break;
-		case IWL_SISO_SWITCH_GI:
-			if (!tbl->is_ht40 && !(ht_cap->cap &
-						IEEE80211_HT_CAP_SGI_20))
-				break;
-			if (tbl->is_ht40 && !(ht_cap->cap &
-						IEEE80211_HT_CAP_SGI_40))
-				break;
-
-			IWL_DEBUG_RATE(priv, "LQ: SISO toggle SGI/NGI\n");
-
-			memcpy(search_tbl, tbl, sz);
-			if (is_green) {
-				if (!tbl->is_SGI)
-					break;
-				else
-					IWL_ERR(priv,
-						"SGI was set in GF+SISO\n");
-			}
-			search_tbl->is_SGI = !tbl->is_SGI;
-			iwl4965_rs_set_expected_tpt_table(lq_sta, search_tbl);
-			if (tbl->is_SGI) {
-				s32 tpt = lq_sta->last_tpt / 100;
-				if (tpt >= search_tbl->expected_tpt[index])
-					break;
-			}
-			search_tbl->current_rate =
-				iwl4965_rate_n_flags_from_tbl(priv, search_tbl,
-						      index, is_green);
-			update_search_tbl_counter = 1;
-			goto out;
-		}
-		tbl->action++;
-		if (tbl->action > IWL_SISO_SWITCH_GI)
-			tbl->action = IWL_SISO_SWITCH_ANTENNA1;
-
-		if (tbl->action == start_action)
-			break;
-	}
-	search_tbl->lq_type = LQ_NONE;
-	return 0;
-
- out:
-	lq_sta->search_better_tbl = 1;
-	tbl->action++;
-	if (tbl->action > IWL_SISO_SWITCH_GI)
-		tbl->action = IWL_SISO_SWITCH_ANTENNA1;
-	if (update_search_tbl_counter)
-		search_tbl->action = tbl->action;
-
-	return 0;
-}
-
-/*
- * Try to switch to new modulation mode from MIMO2
- */
-static int iwl4965_rs_move_mimo2_to_other(struct iwl_priv *priv,
-				 struct iwl_lq_sta *lq_sta,
-				 struct ieee80211_conf *conf,
-				 struct ieee80211_sta *sta, int index)
-{
-	s8 is_green = lq_sta->is_green;
-	struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
-	struct iwl_scale_tbl_info *search_tbl =
-				&(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
-	struct iwl_rate_scale_data *window = &(tbl->win[index]);
-	struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
-	u32 sz = (sizeof(struct iwl_scale_tbl_info) -
-		  (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
-	u8 start_action;
-	u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
-	u8 tx_chains_num = priv->hw_params.tx_chains_num;
-	u8 update_search_tbl_counter = 0;
-	int ret;
-
-	start_action = tbl->action;
-	for (;;) {
-		lq_sta->action_counter++;
-		switch (tbl->action) {
-		case IWL_MIMO2_SWITCH_ANTENNA1:
-		case IWL_MIMO2_SWITCH_ANTENNA2:
-			IWL_DEBUG_RATE(priv, "LQ: MIMO2 toggle Antennas\n");
-
-			if (tx_chains_num <= 2)
-				break;
-
-			if (window->success_ratio >= IWL_RS_GOOD_RATIO)
-				break;
-
-			memcpy(search_tbl, tbl, sz);
-			if (iwl4965_rs_toggle_antenna(valid_tx_ant,
-				       &search_tbl->current_rate, search_tbl)) {
-				update_search_tbl_counter = 1;
-				goto out;
-			}
-			break;
-		case IWL_MIMO2_SWITCH_SISO_A:
-		case IWL_MIMO2_SWITCH_SISO_B:
-		case IWL_MIMO2_SWITCH_SISO_C:
-			IWL_DEBUG_RATE(priv, "LQ: MIMO2 switch to SISO\n");
-
-			/* Set up new search table for SISO */
-			memcpy(search_tbl, tbl, sz);
-
-			if (tbl->action == IWL_MIMO2_SWITCH_SISO_A)
-				search_tbl->ant_type = ANT_A;
-			else if (tbl->action == IWL_MIMO2_SWITCH_SISO_B)
-				search_tbl->ant_type = ANT_B;
-			else
-				search_tbl->ant_type = ANT_C;
-
-			if (!iwl4965_rs_is_valid_ant(valid_tx_ant,
-						search_tbl->ant_type))
-				break;
-
-			ret = iwl4965_rs_switch_to_siso(priv, lq_sta,
-						conf, sta,
-						 search_tbl, index);
-			if (!ret)
-				goto out;
-
-			break;
-
-		case IWL_MIMO2_SWITCH_GI:
-			if (!tbl->is_ht40 && !(ht_cap->cap &
-						IEEE80211_HT_CAP_SGI_20))
-				break;
-			if (tbl->is_ht40 && !(ht_cap->cap &
-						IEEE80211_HT_CAP_SGI_40))
-				break;
-
-			IWL_DEBUG_RATE(priv, "LQ: MIMO2 toggle SGI/NGI\n");
-
-			/* Set up new search table for MIMO2 */
-			memcpy(search_tbl, tbl, sz);
-			search_tbl->is_SGI = !tbl->is_SGI;
-			iwl4965_rs_set_expected_tpt_table(lq_sta, search_tbl);
-			/*
-			 * If active table already uses the fastest possible
-			 * modulation (dual stream with short guard interval),
-			 * and it's working well, there's no need to look
-			 * for a better type of modulation!
-			 */
-			if (tbl->is_SGI) {
-				s32 tpt = lq_sta->last_tpt / 100;
-				if (tpt >= search_tbl->expected_tpt[index])
-					break;
-			}
-			search_tbl->current_rate =
-				iwl4965_rate_n_flags_from_tbl(priv, search_tbl,
-						      index, is_green);
-			update_search_tbl_counter = 1;
-			goto out;
-
-		}
-		tbl->action++;
-		if (tbl->action > IWL_MIMO2_SWITCH_GI)
-			tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
-
-		if (tbl->action == start_action)
-			break;
-	}
-	search_tbl->lq_type = LQ_NONE;
-	return 0;
- out:
-	lq_sta->search_better_tbl = 1;
-	tbl->action++;
-	if (tbl->action > IWL_MIMO2_SWITCH_GI)
-		tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
-	if (update_search_tbl_counter)
-		search_tbl->action = tbl->action;
-
-	return 0;
-
-}
-
-/*
- * Check whether we should continue using same modulation mode, or
- * begin search for a new mode, based on:
- * 1) # tx successes or failures while using this mode
- * 2) # times calling this function
- * 3) elapsed time in this mode (not used, for now)
- */
-static void
-iwl4965_rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
-{
-	struct iwl_scale_tbl_info *tbl;
-	int i;
-	int active_tbl;
-	int flush_interval_passed = 0;
-	struct iwl_priv *priv;
-
-	priv = lq_sta->drv;
-	active_tbl = lq_sta->active_tbl;
-
-	tbl = &(lq_sta->lq_info[active_tbl]);
-
-	/* If we've been disallowing search, see if we should now allow it */
-	if (lq_sta->stay_in_tbl) {
-
-		/* Elapsed time using current modulation mode */
-		if (lq_sta->flush_timer)
-			flush_interval_passed =
-			time_after(jiffies,
-					(unsigned long)(lq_sta->flush_timer +
-					IWL_RATE_SCALE_FLUSH_INTVL));
-
-		/*
-		 * Check if we should allow search for new modulation mode.
-		 * If many frames have failed or succeeded, or we've used
-		 * this same modulation for a long time, allow search, and
-		 * reset history stats that keep track of whether we should
-		 * allow a new search.  Also (below) reset all bitmaps and
-		 * stats in active history.
-		 */
-		if (force_search ||
-		    (lq_sta->total_failed > lq_sta->max_failure_limit) ||
-		    (lq_sta->total_success > lq_sta->max_success_limit) ||
-		    ((!lq_sta->search_better_tbl) && (lq_sta->flush_timer)
-		     && (flush_interval_passed))) {
-			IWL_DEBUG_RATE(priv, "LQ: stay is expired %d %d %d\n:",
-				     lq_sta->total_failed,
-				     lq_sta->total_success,
-				     flush_interval_passed);
-
-			/* Allow search for new mode */
-			lq_sta->stay_in_tbl = 0;	/* only place reset */
-			lq_sta->total_failed = 0;
-			lq_sta->total_success = 0;
-			lq_sta->flush_timer = 0;
-
-		/*
-		 * Else if we've used this modulation mode enough repetitions
-		 * (regardless of elapsed time or success/failure), reset
-		 * history bitmaps and rate-specific stats for all rates in
-		 * active table.
-		 */
-		} else {
-			lq_sta->table_count++;
-			if (lq_sta->table_count >=
-			    lq_sta->table_count_limit) {
-				lq_sta->table_count = 0;
-
-				IWL_DEBUG_RATE(priv,
-					"LQ: stay in table clear win\n");
-				for (i = 0; i < IWL_RATE_COUNT; i++)
-					iwl4965_rs_rate_scale_clear_window(
-						&(tbl->win[i]));
-			}
-		}
-
-		/* If transitioning to allow "search", reset all history
-		 * bitmaps and stats in active table (this will become the new
-		 * "search" table). */
-		if (!lq_sta->stay_in_tbl) {
-			for (i = 0; i < IWL_RATE_COUNT; i++)
-				iwl4965_rs_rate_scale_clear_window(
-							&(tbl->win[i]));
-		}
-	}
-}
-
-/*
- * setup rate table in uCode
- * return rate_n_flags as used in the table
- */
-static u32 iwl4965_rs_update_rate_tbl(struct iwl_priv *priv,
-			      struct iwl_rxon_context *ctx,
-				struct iwl_lq_sta *lq_sta,
-				struct iwl_scale_tbl_info *tbl,
-				int index, u8 is_green)
-{
-	u32 rate;
-
-	/* Update uCode's rate table. */
-	rate = iwl4965_rate_n_flags_from_tbl(priv, tbl, index, is_green);
-	iwl4965_rs_fill_link_cmd(priv, lq_sta, rate);
-	iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false);
-
-	return rate;
-}
-
-/*
- * Do rate scaling and search for new modulation mode.
- */
-static void iwl4965_rs_rate_scale_perform(struct iwl_priv *priv,
-				  struct sk_buff *skb,
-				  struct ieee80211_sta *sta,
-				  struct iwl_lq_sta *lq_sta)
-{
-	struct ieee80211_hw *hw = priv->hw;
-	struct ieee80211_conf *conf = &hw->conf;
-	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
-	int low = IWL_RATE_INVALID;
-	int high = IWL_RATE_INVALID;
-	int index;
-	int i;
-	struct iwl_rate_scale_data *window = NULL;
-	int current_tpt = IWL_INVALID_VALUE;
-	int low_tpt = IWL_INVALID_VALUE;
-	int high_tpt = IWL_INVALID_VALUE;
-	u32 fail_count;
-	s8 scale_action = 0;
-	u16 rate_mask;
-	u8 update_lq = 0;
-	struct iwl_scale_tbl_info *tbl, *tbl1;
-	u16 rate_scale_index_msk = 0;
-	u32 rate;
-	u8 is_green = 0;
-	u8 active_tbl = 0;
-	u8 done_search = 0;
-	u16 high_low;
-	s32 sr;
-	u8 tid = MAX_TID_COUNT;
-	struct iwl_tid_data *tid_data;
-	struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
-	struct iwl_rxon_context *ctx = sta_priv->common.ctx;
-
-	IWL_DEBUG_RATE(priv, "rate scale calculate new rate for skb\n");
-
-	/* Send management frames and NO_ACK data using lowest rate. */
-	/* TODO: this could probably be improved.. */
-	if (!ieee80211_is_data(hdr->frame_control) ||
-	    info->flags & IEEE80211_TX_CTL_NO_ACK)
-		return;
-
-	if (!sta || !lq_sta)
-		return;
-
-	lq_sta->supp_rates = sta->supp_rates[lq_sta->band];
-
-	tid = iwl4965_rs_tl_add_packet(lq_sta, hdr);
-	if ((tid != MAX_TID_COUNT) && (lq_sta->tx_agg_tid_en & (1 << tid))) {
-		tid_data = &priv->stations[lq_sta->lq.sta_id].tid[tid];
-		if (tid_data->agg.state == IWL_AGG_OFF)
-			lq_sta->is_agg = 0;
-		else
-			lq_sta->is_agg = 1;
-	} else
-		lq_sta->is_agg = 0;
-
-	/*
-	 * Select rate-scale / modulation-mode table to work with in
-	 * the rest of this function:  "search" if searching for better
-	 * modulation mode, or "active" if doing rate scaling within a mode.
-	 */
-	if (!lq_sta->search_better_tbl)
-		active_tbl = lq_sta->active_tbl;
-	else
-		active_tbl = 1 - lq_sta->active_tbl;
-
-	tbl = &(lq_sta->lq_info[active_tbl]);
-	if (is_legacy(tbl->lq_type))
-		lq_sta->is_green = 0;
-	else
-		lq_sta->is_green = iwl4965_rs_use_green(sta);
-	is_green = lq_sta->is_green;
-
-	/* current tx rate */
-	index = lq_sta->last_txrate_idx;
-
-	IWL_DEBUG_RATE(priv, "Rate scale index %d for type %d\n", index,
-		       tbl->lq_type);
-
-	/* rates available for this association, and for modulation mode */
-	rate_mask = iwl4965_rs_get_supported_rates(lq_sta, hdr, tbl->lq_type);
-
-	IWL_DEBUG_RATE(priv, "mask 0x%04X\n", rate_mask);
-
-	/* mask with station rate restriction */
-	if (is_legacy(tbl->lq_type)) {
-		if (lq_sta->band == IEEE80211_BAND_5GHZ)
-			/* supp_rates has no CCK bits in A mode */
-			rate_scale_index_msk = (u16) (rate_mask &
-				(lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
-		else
-			rate_scale_index_msk = (u16) (rate_mask &
-						      lq_sta->supp_rates);
-
-	} else
-		rate_scale_index_msk = rate_mask;
-
-	if (!rate_scale_index_msk)
-		rate_scale_index_msk = rate_mask;
-
-	if (!((1 << index) & rate_scale_index_msk)) {
-		IWL_ERR(priv, "Current Rate is not valid\n");
-		if (lq_sta->search_better_tbl) {
-			/* revert to active table if search table is not valid*/
-			tbl->lq_type = LQ_NONE;
-			lq_sta->search_better_tbl = 0;
-			tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
-			/* get "active" rate info */
-			index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate);
-			rate = iwl4965_rs_update_rate_tbl(priv, ctx, lq_sta,
-						  tbl, index, is_green);
-		}
-		return;
-	}
-
-	/* Get expected throughput table and history window for current rate */
-	if (!tbl->expected_tpt) {
-		IWL_ERR(priv, "tbl->expected_tpt is NULL\n");
-		return;
-	}
-
-	/* force user max rate if set by user */
-	if ((lq_sta->max_rate_idx != -1) &&
-	    (lq_sta->max_rate_idx < index)) {
-		index = lq_sta->max_rate_idx;
-		update_lq = 1;
-		window = &(tbl->win[index]);
-		goto lq_update;
-	}
-
-	window = &(tbl->win[index]);
-
-	/*
-	 * If there is not enough history to calculate actual average
-	 * throughput, keep analyzing results of more tx frames, without
-	 * changing rate or mode (bypass most of the rest of this function).
-	 * Set up new rate table in uCode only if old rate is not supported
-	 * in current association (use new rate found above).
-	 */
-	fail_count = window->counter - window->success_counter;
-	if ((fail_count < IWL_RATE_MIN_FAILURE_TH) &&
-			(window->success_counter < IWL_RATE_MIN_SUCCESS_TH)) {
-		IWL_DEBUG_RATE(priv, "LQ: still below TH. succ=%d total=%d "
-			       "for index %d\n",
-			       window->success_counter, window->counter, index);
-
-		/* Can't calculate this yet; not enough history */
-		window->average_tpt = IWL_INVALID_VALUE;
-
-		/* Should we stay with this modulation mode,
-		 * or search for a new one? */
-		iwl4965_rs_stay_in_table(lq_sta, false);
-
-		goto out;
-	}
-	/* Else we have enough samples; calculate estimate of
-	 * actual average throughput */
-	if (window->average_tpt != ((window->success_ratio *
-			tbl->expected_tpt[index] + 64) / 128)) {
-		IWL_ERR(priv,
-			 "expected_tpt should have been calculated by now\n");
-		window->average_tpt = ((window->success_ratio *
-					tbl->expected_tpt[index] + 64) / 128);
-	}
-
-	/* If we are searching for better modulation mode, check success. */
-	if (lq_sta->search_better_tbl) {
-		/* If good success, continue using the "search" mode;
-		 * no need to send new link quality command, since we're
-		 * continuing to use the setup that we've been trying. */
-		if (window->average_tpt > lq_sta->last_tpt) {
-
-			IWL_DEBUG_RATE(priv, "LQ: SWITCHING TO NEW TABLE "
-					"suc=%d cur-tpt=%d old-tpt=%d\n",
-					window->success_ratio,
-					window->average_tpt,
-					lq_sta->last_tpt);
-
-			if (!is_legacy(tbl->lq_type))
-				lq_sta->enable_counter = 1;
-
-			/* Swap tables; "search" becomes "active" */
-			lq_sta->active_tbl = active_tbl;
-			current_tpt = window->average_tpt;
-
-		/* Else poor success; go back to mode in "active" table */
-		} else {
-
-			IWL_DEBUG_RATE(priv, "LQ: GOING BACK TO THE OLD TABLE "
-					"suc=%d cur-tpt=%d old-tpt=%d\n",
-					window->success_ratio,
-					window->average_tpt,
-					lq_sta->last_tpt);
-
-			/* Nullify "search" table */
-			tbl->lq_type = LQ_NONE;
-
-			/* Revert to "active" table */
-			active_tbl = lq_sta->active_tbl;
-			tbl = &(lq_sta->lq_info[active_tbl]);
-
-			/* Revert to "active" rate and throughput info */
-			index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate);
-			current_tpt = lq_sta->last_tpt;
-
-			/* Need to set up a new rate table in uCode */
-			update_lq = 1;
-		}
-
-		/* Either way, we've made a decision; modulation mode
-		 * search is done, allow rate adjustment next time. */
-		lq_sta->search_better_tbl = 0;
-		done_search = 1;	/* Don't switch modes below! */
-		goto lq_update;
-	}
-
-	/* (Else) not in search of better modulation mode, try for better
-	 * starting rate, while staying in this mode. */
-	high_low = iwl4965_rs_get_adjacent_rate(priv, index,
-					rate_scale_index_msk,
-					tbl->lq_type);
-	low = high_low & 0xff;
-	high = (high_low >> 8) & 0xff;
-
-	/* If user set max rate, dont allow higher than user constrain */
-	if ((lq_sta->max_rate_idx != -1) &&
-	    (lq_sta->max_rate_idx < high))
-		high = IWL_RATE_INVALID;
-
-	sr = window->success_ratio;
-
-	/* Collect measured throughputs for current and adjacent rates */
-	current_tpt = window->average_tpt;
-	if (low != IWL_RATE_INVALID)
-		low_tpt = tbl->win[low].average_tpt;
-	if (high != IWL_RATE_INVALID)
-		high_tpt = tbl->win[high].average_tpt;
-
-	scale_action = 0;
-
-	/* Too many failures, decrease rate */
-	if ((sr <= IWL_RATE_DECREASE_TH) || (current_tpt == 0)) {
-		IWL_DEBUG_RATE(priv,
-			"decrease rate because of low success_ratio\n");
-		scale_action = -1;
-
-	/* No throughput measured yet for adjacent rates; try increase. */
-	} else if ((low_tpt == IWL_INVALID_VALUE) &&
-		   (high_tpt == IWL_INVALID_VALUE)) {
-
-		if (high != IWL_RATE_INVALID && sr >= IWL_RATE_INCREASE_TH)
-			scale_action = 1;
-		else if (low != IWL_RATE_INVALID)
-			scale_action = 0;
-	}
-
-	/* Both adjacent throughputs are measured, but neither one has better
-	 * throughput; we're using the best rate, don't change it! */
-	else if ((low_tpt != IWL_INVALID_VALUE) &&
-		 (high_tpt != IWL_INVALID_VALUE) &&
-		 (low_tpt < current_tpt) &&
-		 (high_tpt < current_tpt))
-		scale_action = 0;
-
-	/* At least one adjacent rate's throughput is measured,
-	 * and may have better performance. */
-	else {
-		/* Higher adjacent rate's throughput is measured */
-		if (high_tpt != IWL_INVALID_VALUE) {
-			/* Higher rate has better throughput */
-			if (high_tpt > current_tpt &&
-					sr >= IWL_RATE_INCREASE_TH) {
-				scale_action = 1;
-			} else {
-				scale_action = 0;
-			}
-
-		/* Lower adjacent rate's throughput is measured */
-		} else if (low_tpt != IWL_INVALID_VALUE) {
-			/* Lower rate has better throughput */
-			if (low_tpt > current_tpt) {
-				IWL_DEBUG_RATE(priv,
-				    "decrease rate because of low tpt\n");
-				scale_action = -1;
-			} else if (sr >= IWL_RATE_INCREASE_TH) {
-				scale_action = 1;
-			}
-		}
-	}
-
-	/* Sanity check; asked for decrease, but success rate or throughput
-	 * has been good at old rate.  Don't change it. */
-	if ((scale_action == -1) && (low != IWL_RATE_INVALID) &&
-		    ((sr > IWL_RATE_HIGH_TH) ||
-		     (current_tpt > (100 * tbl->expected_tpt[low]))))
-		scale_action = 0;
-
-	switch (scale_action) {
-	case -1:
-		/* Decrease starting rate, update uCode's rate table */
-		if (low != IWL_RATE_INVALID) {
-			update_lq = 1;
-			index = low;
-		}
-
-		break;
-	case 1:
-		/* Increase starting rate, update uCode's rate table */
-		if (high != IWL_RATE_INVALID) {
-			update_lq = 1;
-			index = high;
-		}
-
-		break;
-	case 0:
-		/* No change */
-	default:
-		break;
-	}
-
-	IWL_DEBUG_RATE(priv, "choose rate scale index %d action %d low %d "
-		    "high %d type %d\n",
-		     index, scale_action, low, high, tbl->lq_type);
-
-lq_update:
-	/* Replace uCode's rate table for the destination station. */
-	if (update_lq)
-		rate = iwl4965_rs_update_rate_tbl(priv, ctx, lq_sta,
-					  tbl, index, is_green);
-
-	/* Should we stay with this modulation mode,
-	 * or search for a new one? */
-	 iwl4965_rs_stay_in_table(lq_sta, false);
-
-	/*
-	 * Search for new modulation mode if we're:
-	 * 1)  Not changing rates right now
-	 * 2)  Not just finishing up a search
-	 * 3)  Allowing a new search
-	 */
-	if (!update_lq && !done_search &&
-		!lq_sta->stay_in_tbl && window->counter) {
-		/* Save current throughput to compare with "search" throughput*/
-		lq_sta->last_tpt = current_tpt;
-
-		/* Select a new "search" modulation mode to try.
-		 * If one is found, set up the new "search" table. */
-		if (is_legacy(tbl->lq_type))
-			iwl4965_rs_move_legacy_other(priv, lq_sta,
-							conf, sta, index);
-		else if (is_siso(tbl->lq_type))
-			iwl4965_rs_move_siso_to_other(priv, lq_sta,
-							conf, sta, index);
-		else /* (is_mimo2(tbl->lq_type)) */
-			iwl4965_rs_move_mimo2_to_other(priv, lq_sta,
-							conf, sta, index);
-
-		/* If new "search" mode was selected, set up in uCode table */
-		if (lq_sta->search_better_tbl) {
-			/* Access the "search" table, clear its history. */
-			tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
-			for (i = 0; i < IWL_RATE_COUNT; i++)
-				iwl4965_rs_rate_scale_clear_window(
-							&(tbl->win[i]));
-
-			/* Use new "search" start rate */
-			index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate);
-
-			IWL_DEBUG_RATE(priv,
-				"Switch current  mcs: %X index: %d\n",
-				     tbl->current_rate, index);
-			iwl4965_rs_fill_link_cmd(priv, lq_sta,
-						tbl->current_rate);
-			iwl_legacy_send_lq_cmd(priv, ctx,
-						&lq_sta->lq, CMD_ASYNC, false);
-		} else
-			done_search = 1;
-	}
-
-	if (done_search && !lq_sta->stay_in_tbl) {
-		/* If the "active" (non-search) mode was legacy,
-		 * and we've tried switching antennas,
-		 * but we haven't been able to try HT modes (not available),
-		 * stay with best antenna legacy modulation for a while
-		 * before next round of mode comparisons. */
-		tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]);
-		if (is_legacy(tbl1->lq_type) && !conf_is_ht(conf) &&
-		    lq_sta->action_counter > tbl1->max_search) {
-			IWL_DEBUG_RATE(priv, "LQ: STAY in legacy table\n");
-			iwl4965_rs_set_stay_in_table(priv, 1, lq_sta);
-		}
-
-		/* If we're in an HT mode, and all 3 mode switch actions
-		 * have been tried and compared, stay in this best modulation
-		 * mode for a while before next round of mode comparisons. */
-		if (lq_sta->enable_counter &&
-		    (lq_sta->action_counter >= tbl1->max_search)) {
-			if ((lq_sta->last_tpt > IWL_AGG_TPT_THREHOLD) &&
-			    (lq_sta->tx_agg_tid_en & (1 << tid)) &&
-			    (tid != MAX_TID_COUNT)) {
-				tid_data =
-				   &priv->stations[lq_sta->lq.sta_id].tid[tid];
-				if (tid_data->agg.state == IWL_AGG_OFF) {
-					IWL_DEBUG_RATE(priv,
-						       "try to aggregate tid %d\n",
-						       tid);
-					iwl4965_rs_tl_turn_on_agg(priv, tid,
-							  lq_sta, sta);
-				}
-			}
-			iwl4965_rs_set_stay_in_table(priv, 0, lq_sta);
-		}
-	}
-
-out:
-	tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv, tbl,
-							index, is_green);
-	i = index;
-	lq_sta->last_txrate_idx = i;
-}
-
-/**
- * iwl4965_rs_initialize_lq - Initialize a station's hardware rate table
- *
- * The uCode's station table contains a table of fallback rates
- * for automatic fallback during transmission.
- *
- * NOTE: This sets up a default set of values.  These will be replaced later
- *       if the driver's iwl-4965-rs rate scaling algorithm is used, instead of
- *       rc80211_simple.
- *
- * NOTE: Run REPLY_ADD_STA command to set up station table entry, before
- *       calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
- *       which requires station table entry to exist).
- */
-static void iwl4965_rs_initialize_lq(struct iwl_priv *priv,
-			     struct ieee80211_conf *conf,
-			     struct ieee80211_sta *sta,
-			     struct iwl_lq_sta *lq_sta)
-{
-	struct iwl_scale_tbl_info *tbl;
-	int rate_idx;
-	int i;
-	u32 rate;
-	u8 use_green = iwl4965_rs_use_green(sta);
-	u8 active_tbl = 0;
-	u8 valid_tx_ant;
-	struct iwl_station_priv *sta_priv;
-	struct iwl_rxon_context *ctx;
-
-	if (!sta || !lq_sta)
-		return;
-
-	sta_priv = (void *)sta->drv_priv;
-	ctx = sta_priv->common.ctx;
-
-	i = lq_sta->last_txrate_idx;
-
-	valid_tx_ant = priv->hw_params.valid_tx_ant;
-
-	if (!lq_sta->search_better_tbl)
-		active_tbl = lq_sta->active_tbl;
-	else
-		active_tbl = 1 - lq_sta->active_tbl;
-
-	tbl = &(lq_sta->lq_info[active_tbl]);
-
-	if ((i < 0) || (i >= IWL_RATE_COUNT))
-		i = 0;
-
-	rate = iwlegacy_rates[i].plcp;
-	tbl->ant_type = iwl4965_first_antenna(valid_tx_ant);
-	rate |= tbl->ant_type << RATE_MCS_ANT_POS;
-
-	if (i >= IWL_FIRST_CCK_RATE && i <= IWL_LAST_CCK_RATE)
-		rate |= RATE_MCS_CCK_MSK;
-
-	iwl4965_rs_get_tbl_info_from_mcs(rate, priv->band, tbl, &rate_idx);
-	if (!iwl4965_rs_is_valid_ant(valid_tx_ant, tbl->ant_type))
-		iwl4965_rs_toggle_antenna(valid_tx_ant, &rate, tbl);
-
-	rate = iwl4965_rate_n_flags_from_tbl(priv, tbl, rate_idx, use_green);
-	tbl->current_rate = rate;
-	iwl4965_rs_set_expected_tpt_table(lq_sta, tbl);
-	iwl4965_rs_fill_link_cmd(NULL, lq_sta, rate);
-	priv->stations[lq_sta->lq.sta_id].lq = &lq_sta->lq;
-	iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_SYNC, true);
-}
-
-static void
-iwl4965_rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
-			struct ieee80211_tx_rate_control *txrc)
-{
-
-	struct sk_buff *skb = txrc->skb;
-	struct ieee80211_supported_band *sband = txrc->sband;
-	struct iwl_priv *priv __maybe_unused = (struct iwl_priv *)priv_r;
-	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-	struct iwl_lq_sta *lq_sta = priv_sta;
-	int rate_idx;
-
-	IWL_DEBUG_RATE_LIMIT(priv, "rate scale calculate new rate for skb\n");
-
-	/* Get max rate if user set max rate */
-	if (lq_sta) {
-		lq_sta->max_rate_idx = txrc->max_rate_idx;
-		if ((sband->band == IEEE80211_BAND_5GHZ) &&
-		    (lq_sta->max_rate_idx != -1))
-			lq_sta->max_rate_idx += IWL_FIRST_OFDM_RATE;
-		if ((lq_sta->max_rate_idx < 0) ||
-		    (lq_sta->max_rate_idx >= IWL_RATE_COUNT))
-			lq_sta->max_rate_idx = -1;
-	}
-
-	/* Treat uninitialized rate scaling data same as non-existing. */
-	if (lq_sta && !lq_sta->drv) {
-		IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n");
-		priv_sta = NULL;
-	}
-
-	/* Send management frames and NO_ACK data using lowest rate. */
-	if (rate_control_send_low(sta, priv_sta, txrc))
-		return;
-
-	if (!lq_sta)
-		return;
-
-	rate_idx  = lq_sta->last_txrate_idx;
-
-	if (lq_sta->last_rate_n_flags & RATE_MCS_HT_MSK) {
-		rate_idx -= IWL_FIRST_OFDM_RATE;
-		/* 6M and 9M shared same MCS index */
-		rate_idx = (rate_idx > 0) ? (rate_idx - 1) : 0;
-		if (iwl4965_rs_extract_rate(lq_sta->last_rate_n_flags) >=
-			 IWL_RATE_MIMO2_6M_PLCP)
-			rate_idx = rate_idx + MCS_INDEX_PER_STREAM;
-		info->control.rates[0].flags = IEEE80211_TX_RC_MCS;
-		if (lq_sta->last_rate_n_flags & RATE_MCS_SGI_MSK)
-			info->control.rates[0].flags |=
-					IEEE80211_TX_RC_SHORT_GI;
-		if (lq_sta->last_rate_n_flags & RATE_MCS_DUP_MSK)
-			info->control.rates[0].flags |=
-					IEEE80211_TX_RC_DUP_DATA;
-		if (lq_sta->last_rate_n_flags & RATE_MCS_HT40_MSK)
-			info->control.rates[0].flags |=
-					IEEE80211_TX_RC_40_MHZ_WIDTH;
-		if (lq_sta->last_rate_n_flags & RATE_MCS_GF_MSK)
-			info->control.rates[0].flags |=
-					IEEE80211_TX_RC_GREEN_FIELD;
-	} else {
-		/* Check for invalid rates */
-		if ((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT_LEGACY) ||
-				((sband->band == IEEE80211_BAND_5GHZ) &&
-				 (rate_idx < IWL_FIRST_OFDM_RATE)))
-			rate_idx = rate_lowest_index(sband, sta);
-		/* On valid 5 GHz rate, adjust index */
-		else if (sband->band == IEEE80211_BAND_5GHZ)
-			rate_idx -= IWL_FIRST_OFDM_RATE;
-		info->control.rates[0].flags = 0;
-	}
-	info->control.rates[0].idx = rate_idx;
-
-}
-
-static void *iwl4965_rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta,
-			  gfp_t gfp)
-{
-	struct iwl_lq_sta *lq_sta;
-	struct iwl_station_priv *sta_priv =
-				(struct iwl_station_priv *) sta->drv_priv;
-	struct iwl_priv *priv;
-
-	priv = (struct iwl_priv *)priv_rate;
-	IWL_DEBUG_RATE(priv, "create station rate scale window\n");
-
-	lq_sta = &sta_priv->lq_sta;
-
-	return lq_sta;
-}
-
-/*
- * Called after adding a new station to initialize rate scaling
- */
-void
-iwl4965_rs_rate_init(struct iwl_priv *priv,
-			struct ieee80211_sta *sta,
-			u8 sta_id)
-{
-	int i, j;
-	struct ieee80211_hw *hw = priv->hw;
-	struct ieee80211_conf *conf = &priv->hw->conf;
-	struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
-	struct iwl_station_priv *sta_priv;
-	struct iwl_lq_sta *lq_sta;
-	struct ieee80211_supported_band *sband;
-
-	sta_priv = (struct iwl_station_priv *) sta->drv_priv;
-	lq_sta = &sta_priv->lq_sta;
-	sband = hw->wiphy->bands[conf->channel->band];
-
-
-	lq_sta->lq.sta_id = sta_id;
-
-	for (j = 0; j < LQ_SIZE; j++)
-		for (i = 0; i < IWL_RATE_COUNT; i++)
-			iwl4965_rs_rate_scale_clear_window(
-					&lq_sta->lq_info[j].win[i]);
-
-	lq_sta->flush_timer = 0;
-	lq_sta->supp_rates = sta->supp_rates[sband->band];
-	for (j = 0; j < LQ_SIZE; j++)
-		for (i = 0; i < IWL_RATE_COUNT; i++)
-			iwl4965_rs_rate_scale_clear_window(
-					&lq_sta->lq_info[j].win[i]);
-
-	IWL_DEBUG_RATE(priv, "LQ:"
-			"*** rate scale station global init for station %d ***\n",
-		       sta_id);
-	/* TODO: what is a good starting rate for STA? About middle? Maybe not
-	 * the lowest or the highest rate.. Could consider using RSSI from
-	 * previous packets? Need to have IEEE 802.1X auth succeed immediately
-	 * after assoc.. */
-
-	lq_sta->is_dup = 0;
-	lq_sta->max_rate_idx = -1;
-	lq_sta->missed_rate_counter = IWL_MISSED_RATE_MAX;
-	lq_sta->is_green = iwl4965_rs_use_green(sta);
-	lq_sta->active_legacy_rate = priv->active_rate & ~(0x1000);
-	lq_sta->band = priv->band;
-	/*
-	 * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3),
-	 * supp_rates[] does not; shift to convert format, force 9 MBits off.
-	 */
-	lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1;
-	lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1;
-	lq_sta->active_siso_rate &= ~((u16)0x2);
-	lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE;
-
-	/* Same here */
-	lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1;
-	lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1;
-	lq_sta->active_mimo2_rate &= ~((u16)0x2);
-	lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE;
-
-	/* These values will be overridden later */
-	lq_sta->lq.general_params.single_stream_ant_msk =
-		iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
-	lq_sta->lq.general_params.dual_stream_ant_msk =
-		priv->hw_params.valid_tx_ant &
-		~iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
-	if (!lq_sta->lq.general_params.dual_stream_ant_msk) {
-		lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB;
-	} else if (iwl4965_num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
-		lq_sta->lq.general_params.dual_stream_ant_msk =
-			priv->hw_params.valid_tx_ant;
-	}
-
-	/* as default allow aggregation for all tids */
-	lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID;
-	lq_sta->drv = priv;
-
-	/* Set last_txrate_idx to lowest rate */
-	lq_sta->last_txrate_idx = rate_lowest_index(sband, sta);
-	if (sband->band == IEEE80211_BAND_5GHZ)
-		lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
-	lq_sta->is_agg = 0;
-
-#ifdef CONFIG_MAC80211_DEBUGFS
-	lq_sta->dbg_fixed_rate = 0;
-#endif
-
-	iwl4965_rs_initialize_lq(priv, conf, sta, lq_sta);
-}
-
-static void iwl4965_rs_fill_link_cmd(struct iwl_priv *priv,
-			     struct iwl_lq_sta *lq_sta, u32 new_rate)
-{
-	struct iwl_scale_tbl_info tbl_type;
-	int index = 0;
-	int rate_idx;
-	int repeat_rate = 0;
-	u8 ant_toggle_cnt = 0;
-	u8 use_ht_possible = 1;
-	u8 valid_tx_ant = 0;
-	struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
-
-	/* Override starting rate (index 0) if needed for debug purposes */
-	iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
-
-	/* Interpret new_rate (rate_n_flags) */
-	iwl4965_rs_get_tbl_info_from_mcs(new_rate, lq_sta->band,
-				  &tbl_type, &rate_idx);
-
-	/* How many times should we repeat the initial rate? */
-	if (is_legacy(tbl_type.lq_type)) {
-		ant_toggle_cnt = 1;
-		repeat_rate = IWL_NUMBER_TRY;
-	} else {
-		repeat_rate = IWL_HT_NUMBER_TRY;
-	}
-
-	lq_cmd->general_params.mimo_delimiter =
-			is_mimo(tbl_type.lq_type) ? 1 : 0;
-
-	/* Fill 1st table entry (index 0) */
-	lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate);
-
-	if (iwl4965_num_of_ant(tbl_type.ant_type) == 1) {
-		lq_cmd->general_params.single_stream_ant_msk =
-						tbl_type.ant_type;
-	} else if (iwl4965_num_of_ant(tbl_type.ant_type) == 2) {
-		lq_cmd->general_params.dual_stream_ant_msk =
-						tbl_type.ant_type;
-	} /* otherwise we don't modify the existing value */
-
-	index++;
-	repeat_rate--;
-	if (priv)
-		valid_tx_ant = priv->hw_params.valid_tx_ant;
-
-	/* Fill rest of rate table */
-	while (index < LINK_QUAL_MAX_RETRY_NUM) {
-		/* Repeat initial/next rate.
-		 * For legacy IWL_NUMBER_TRY == 1, this loop will not execute.
-		 * For HT IWL_HT_NUMBER_TRY == 3, this executes twice. */
-		while (repeat_rate > 0 && (index < LINK_QUAL_MAX_RETRY_NUM)) {
-			if (is_legacy(tbl_type.lq_type)) {
-				if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
-					ant_toggle_cnt++;
-				else if (priv &&
-					 iwl4965_rs_toggle_antenna(valid_tx_ant,
-							&new_rate, &tbl_type))
-					ant_toggle_cnt = 1;
-			}
-
-			/* Override next rate if needed for debug purposes */
-			iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
-
-			/* Fill next table entry */
-			lq_cmd->rs_table[index].rate_n_flags =
-					cpu_to_le32(new_rate);
-			repeat_rate--;
-			index++;
-		}
-
-		iwl4965_rs_get_tbl_info_from_mcs(new_rate,
-						lq_sta->band, &tbl_type,
-						&rate_idx);
-
-		/* Indicate to uCode which entries might be MIMO.
-		 * If initial rate was MIMO, this will finally end up
-		 * as (IWL_HT_NUMBER_TRY * 2), after 2nd pass, otherwise 0. */
-		if (is_mimo(tbl_type.lq_type))
-			lq_cmd->general_params.mimo_delimiter = index;
-
-		/* Get next rate */
-		new_rate = iwl4965_rs_get_lower_rate(lq_sta,
-					&tbl_type, rate_idx,
-					     use_ht_possible);
-
-		/* How many times should we repeat the next rate? */
-		if (is_legacy(tbl_type.lq_type)) {
-			if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
-				ant_toggle_cnt++;
-			else if (priv &&
-				 iwl4965_rs_toggle_antenna(valid_tx_ant,
-						   &new_rate, &tbl_type))
-				ant_toggle_cnt = 1;
-
-			repeat_rate = IWL_NUMBER_TRY;
-		} else {
-			repeat_rate = IWL_HT_NUMBER_TRY;
-		}
-
-		/* Don't allow HT rates after next pass.
-		 * iwl4965_rs_get_lower_rate() will change type to LQ_A or LQ_G. */
-		use_ht_possible = 0;
-
-		/* Override next rate if needed for debug purposes */
-		iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
-
-		/* Fill next table entry */
-		lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate);
-
-		index++;
-		repeat_rate--;
-	}
-
-	lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
-	lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
-
-	lq_cmd->agg_params.agg_time_limit =
-		cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
-}
-
-static void
-*iwl4965_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
-{
-	return hw->priv;
-}
-/* rate scale requires free function to be implemented */
-static void iwl4965_rs_free(void *priv_rate)
-{
-	return;
-}
-
-static void iwl4965_rs_free_sta(void *priv_r, struct ieee80211_sta *sta,
-			void *priv_sta)
-{
-	struct iwl_priv *priv __maybe_unused = priv_r;
-
-	IWL_DEBUG_RATE(priv, "enter\n");
-	IWL_DEBUG_RATE(priv, "leave\n");
-}
-
-
-#ifdef CONFIG_MAC80211_DEBUGFS
-static int iwl4965_open_file_generic(struct inode *inode, struct file *file)
-{
-	file->private_data = inode->i_private;
-	return 0;
-}
-static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
-			     u32 *rate_n_flags, int index)
-{
-	struct iwl_priv *priv;
-	u8 valid_tx_ant;
-	u8 ant_sel_tx;
-
-	priv = lq_sta->drv;
-	valid_tx_ant = priv->hw_params.valid_tx_ant;
-	if (lq_sta->dbg_fixed_rate) {
-		ant_sel_tx =
-		  ((lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK)
-		  >> RATE_MCS_ANT_POS);
-		if ((valid_tx_ant & ant_sel_tx) == ant_sel_tx) {
-			*rate_n_flags = lq_sta->dbg_fixed_rate;
-			IWL_DEBUG_RATE(priv, "Fixed rate ON\n");
-		} else {
-			lq_sta->dbg_fixed_rate = 0;
-			IWL_ERR(priv,
-			    "Invalid antenna selection 0x%X, Valid is 0x%X\n",
-			    ant_sel_tx, valid_tx_ant);
-			IWL_DEBUG_RATE(priv, "Fixed rate OFF\n");
-		}
-	} else {
-		IWL_DEBUG_RATE(priv, "Fixed rate OFF\n");
-	}
-}
-
-static ssize_t iwl4965_rs_sta_dbgfs_scale_table_write(struct file *file,
-			const char __user *user_buf, size_t count, loff_t *ppos)
-{
-	struct iwl_lq_sta *lq_sta = file->private_data;
-	struct iwl_priv *priv;
-	char buf[64];
-	size_t buf_size;
-	u32 parsed_rate;
-	struct iwl_station_priv *sta_priv =
-		container_of(lq_sta, struct iwl_station_priv, lq_sta);
-	struct iwl_rxon_context *ctx = sta_priv->common.ctx;
-
-	priv = lq_sta->drv;
-	memset(buf, 0, sizeof(buf));
-	buf_size = min(count, sizeof(buf) -  1);
-	if (copy_from_user(buf, user_buf, buf_size))
-		return -EFAULT;
-
-	if (sscanf(buf, "%x", &parsed_rate) == 1)
-		lq_sta->dbg_fixed_rate = parsed_rate;
-	else
-		lq_sta->dbg_fixed_rate = 0;
-
-	lq_sta->active_legacy_rate = 0x0FFF;	/* 1 - 54 MBits, includes CCK */
-	lq_sta->active_siso_rate   = 0x1FD0;	/* 6 - 60 MBits, no 9, no CCK */
-	lq_sta->active_mimo2_rate  = 0x1FD0;	/* 6 - 60 MBits, no 9, no CCK */
-
-	IWL_DEBUG_RATE(priv, "sta_id %d rate 0x%X\n",
-		lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate);
-
-	if (lq_sta->dbg_fixed_rate) {
-		iwl4965_rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate);
-		iwl_legacy_send_lq_cmd(lq_sta->drv, ctx, &lq_sta->lq, CMD_ASYNC,
-				false);
-	}
-
-	return count;
-}
-
-static ssize_t iwl4965_rs_sta_dbgfs_scale_table_read(struct file *file,
-			char __user *user_buf, size_t count, loff_t *ppos)
-{
-	char *buff;
-	int desc = 0;
-	int i = 0;
-	int index = 0;
-	ssize_t ret;
-
-	struct iwl_lq_sta *lq_sta = file->private_data;
-	struct iwl_priv *priv;
-	struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
-
-	priv = lq_sta->drv;
-	buff = kmalloc(1024, GFP_KERNEL);
-	if (!buff)
-		return -ENOMEM;
-
-	desc += sprintf(buff+desc, "sta_id %d\n", lq_sta->lq.sta_id);
-	desc += sprintf(buff+desc, "failed=%d success=%d rate=0%X\n",
-			lq_sta->total_failed, lq_sta->total_success,
-			lq_sta->active_legacy_rate);
-	desc += sprintf(buff+desc, "fixed rate 0x%X\n",
-			lq_sta->dbg_fixed_rate);
-	desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n",
-	    (priv->hw_params.valid_tx_ant & ANT_A) ? "ANT_A," : "",
-	    (priv->hw_params.valid_tx_ant & ANT_B) ? "ANT_B," : "",
-	    (priv->hw_params.valid_tx_ant & ANT_C) ? "ANT_C" : "");
-	desc += sprintf(buff+desc, "lq type %s\n",
-	   (is_legacy(tbl->lq_type)) ? "legacy" : "HT");
-	if (is_Ht(tbl->lq_type)) {
-		desc += sprintf(buff+desc, " %s",
-		   (is_siso(tbl->lq_type)) ? "SISO" : "MIMO2");
-		   desc += sprintf(buff+desc, " %s",
-		   (tbl->is_ht40) ? "40MHz" : "20MHz");
-		   desc += sprintf(buff+desc, " %s %s %s\n",
-			(tbl->is_SGI) ? "SGI" : "",
-		   (lq_sta->is_green) ? "GF enabled" : "",
-		   (lq_sta->is_agg) ? "AGG on" : "");
-	}
-	desc += sprintf(buff+desc, "last tx rate=0x%X\n",
-		lq_sta->last_rate_n_flags);
-	desc += sprintf(buff+desc, "general:"
-		"flags=0x%X mimo-d=%d s-ant0x%x d-ant=0x%x\n",
-		lq_sta->lq.general_params.flags,
-		lq_sta->lq.general_params.mimo_delimiter,
-		lq_sta->lq.general_params.single_stream_ant_msk,
-		lq_sta->lq.general_params.dual_stream_ant_msk);
-
-	desc += sprintf(buff+desc, "agg:"
-			"time_limit=%d dist_start_th=%d frame_cnt_limit=%d\n",
-			le16_to_cpu(lq_sta->lq.agg_params.agg_time_limit),
-			lq_sta->lq.agg_params.agg_dis_start_th,
-			lq_sta->lq.agg_params.agg_frame_cnt_limit);
-
-	desc += sprintf(buff+desc,
-			"Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
-			lq_sta->lq.general_params.start_rate_index[0],
-			lq_sta->lq.general_params.start_rate_index[1],
-			lq_sta->lq.general_params.start_rate_index[2],
-			lq_sta->lq.general_params.start_rate_index[3]);
-
-	for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
-		index = iwl4965_hwrate_to_plcp_idx(
-			le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags));
-		if (is_legacy(tbl->lq_type)) {
-			desc += sprintf(buff+desc, " rate[%d] 0x%X %smbps\n",
-			i,
-			le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags),
-			iwl_rate_mcs[index].mbps);
-		} else {
-			desc += sprintf(buff+desc,
-			" rate[%d] 0x%X %smbps (%s)\n",
-			i,
-			le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags),
-			iwl_rate_mcs[index].mbps, iwl_rate_mcs[index].mcs);
-		}
-	}
-
-	ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
-	kfree(buff);
-	return ret;
-}
-
-static const struct file_operations rs_sta_dbgfs_scale_table_ops = {
-	.write = iwl4965_rs_sta_dbgfs_scale_table_write,
-	.read = iwl4965_rs_sta_dbgfs_scale_table_read,
-	.open = iwl4965_open_file_generic,
-	.llseek = default_llseek,
-};
-static ssize_t iwl4965_rs_sta_dbgfs_stats_table_read(struct file *file,
-			char __user *user_buf, size_t count, loff_t *ppos)
-{
-	char *buff;
-	int desc = 0;
-	int i, j;
-	ssize_t ret;
-
-	struct iwl_lq_sta *lq_sta = file->private_data;
-
-	buff = kmalloc(1024, GFP_KERNEL);
-	if (!buff)
-		return -ENOMEM;
-
-	for (i = 0; i < LQ_SIZE; i++) {
-		desc += sprintf(buff+desc,
-				"%s type=%d SGI=%d HT40=%d DUP=%d GF=%d\n"
-				"rate=0x%X\n",
-				lq_sta->active_tbl == i ? "*" : "x",
-				lq_sta->lq_info[i].lq_type,
-				lq_sta->lq_info[i].is_SGI,
-				lq_sta->lq_info[i].is_ht40,
-				lq_sta->lq_info[i].is_dup,
-				lq_sta->is_green,
-				lq_sta->lq_info[i].current_rate);
-		for (j = 0; j < IWL_RATE_COUNT; j++) {
-			desc += sprintf(buff+desc,
-				"counter=%d success=%d %%=%d\n",
-				lq_sta->lq_info[i].win[j].counter,
-				lq_sta->lq_info[i].win[j].success_counter,
-				lq_sta->lq_info[i].win[j].success_ratio);
-		}
-	}
-	ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
-	kfree(buff);
-	return ret;
-}
-
-static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
-	.read = iwl4965_rs_sta_dbgfs_stats_table_read,
-	.open = iwl4965_open_file_generic,
-	.llseek = default_llseek,
-};
-
-static ssize_t iwl4965_rs_sta_dbgfs_rate_scale_data_read(struct file *file,
-			char __user *user_buf, size_t count, loff_t *ppos)
-{
-	char buff[120];
-	int desc = 0;
-	ssize_t ret;
-
-	struct iwl_lq_sta *lq_sta = file->private_data;
-	struct iwl_priv *priv;
-	struct iwl_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl];
-
-	priv = lq_sta->drv;
-
-	if (is_Ht(tbl->lq_type))
-		desc += sprintf(buff+desc,
-				"Bit Rate= %d Mb/s\n",
-				tbl->expected_tpt[lq_sta->last_txrate_idx]);
-	else
-		desc += sprintf(buff+desc,
-				"Bit Rate= %d Mb/s\n",
-				iwlegacy_rates[lq_sta->last_txrate_idx].ieee >> 1);
-
-	ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
-	return ret;
-}
-
-static const struct file_operations rs_sta_dbgfs_rate_scale_data_ops = {
-	.read = iwl4965_rs_sta_dbgfs_rate_scale_data_read,
-	.open = iwl4965_open_file_generic,
-	.llseek = default_llseek,
-};
-
-static void iwl4965_rs_add_debugfs(void *priv, void *priv_sta,
-					struct dentry *dir)
-{
-	struct iwl_lq_sta *lq_sta = priv_sta;
-	lq_sta->rs_sta_dbgfs_scale_table_file =
-		debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir,
-				lq_sta, &rs_sta_dbgfs_scale_table_ops);
-	lq_sta->rs_sta_dbgfs_stats_table_file =
-		debugfs_create_file("rate_stats_table", S_IRUSR, dir,
-			lq_sta, &rs_sta_dbgfs_stats_table_ops);
-	lq_sta->rs_sta_dbgfs_rate_scale_data_file =
-		debugfs_create_file("rate_scale_data", S_IRUSR, dir,
-			lq_sta, &rs_sta_dbgfs_rate_scale_data_ops);
-	lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file =
-		debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir,
-		&lq_sta->tx_agg_tid_en);
-
-}
-
-static void iwl4965_rs_remove_debugfs(void *priv, void *priv_sta)
-{
-	struct iwl_lq_sta *lq_sta = priv_sta;
-	debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
-	debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
-	debugfs_remove(lq_sta->rs_sta_dbgfs_rate_scale_data_file);
-	debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
-}
-#endif
-
-/*
- * Initialization of rate scaling information is done by driver after
- * the station is added. Since mac80211 calls this function before a
- * station is added we ignore it.
- */
-static void
-iwl4965_rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sband,
-			 struct ieee80211_sta *sta, void *priv_sta)
-{
-}
-static struct rate_control_ops rs_4965_ops = {
-	.module = NULL,
-	.name = IWL4965_RS_NAME,
-	.tx_status = iwl4965_rs_tx_status,
-	.get_rate = iwl4965_rs_get_rate,
-	.rate_init = iwl4965_rs_rate_init_stub,
-	.alloc = iwl4965_rs_alloc,
-	.free = iwl4965_rs_free,
-	.alloc_sta = iwl4965_rs_alloc_sta,
-	.free_sta = iwl4965_rs_free_sta,
-#ifdef CONFIG_MAC80211_DEBUGFS
-	.add_sta_debugfs = iwl4965_rs_add_debugfs,
-	.remove_sta_debugfs = iwl4965_rs_remove_debugfs,
-#endif
-};
-
-int iwl4965_rate_control_register(void)
-{
-	return ieee80211_rate_control_register(&rs_4965_ops);
-}
-
-void iwl4965_rate_control_unregister(void)
-{
-	ieee80211_rate_control_unregister(&rs_4965_ops);
-}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-rx.c b/drivers/net/wireless/iwlegacy/iwl-4965-rx.c
deleted file mode 100644
index 2b144bb..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-rx.c
+++ /dev/null
@@ -1,215 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-4965-calib.h"
-#include "iwl-sta.h"
-#include "iwl-io.h"
-#include "iwl-helpers.h"
-#include "iwl-4965-hw.h"
-#include "iwl-4965.h"
-
-void iwl4965_rx_missed_beacon_notif(struct iwl_priv *priv,
-				struct iwl_rx_mem_buffer *rxb)
-
-{
-	struct iwl_rx_packet *pkt = rxb_addr(rxb);
-	struct iwl_missed_beacon_notif *missed_beacon;
-
-	missed_beacon = &pkt->u.missed_beacon;
-	if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
-	    priv->missed_beacon_threshold) {
-		IWL_DEBUG_CALIB(priv,
-		    "missed bcn cnsq %d totl %d rcd %d expctd %d\n",
-		    le32_to_cpu(missed_beacon->consecutive_missed_beacons),
-		    le32_to_cpu(missed_beacon->total_missed_becons),
-		    le32_to_cpu(missed_beacon->num_recvd_beacons),
-		    le32_to_cpu(missed_beacon->num_expected_beacons));
-		if (!test_bit(STATUS_SCANNING, &priv->status))
-			iwl4965_init_sensitivity(priv);
-	}
-}
-
-/* Calculate noise level, based on measurements during network silence just
- *   before arriving beacon.  This measurement can be done only if we know
- *   exactly when to expect beacons, therefore only when we're associated. */
-static void iwl4965_rx_calc_noise(struct iwl_priv *priv)
-{
-	struct statistics_rx_non_phy *rx_info;
-	int num_active_rx = 0;
-	int total_silence = 0;
-	int bcn_silence_a, bcn_silence_b, bcn_silence_c;
-	int last_rx_noise;
-
-	rx_info = &(priv->_4965.statistics.rx.general);
-	bcn_silence_a =
-		le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
-	bcn_silence_b =
-		le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
-	bcn_silence_c =
-		le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
-
-	if (bcn_silence_a) {
-		total_silence += bcn_silence_a;
-		num_active_rx++;
-	}
-	if (bcn_silence_b) {
-		total_silence += bcn_silence_b;
-		num_active_rx++;
-	}
-	if (bcn_silence_c) {
-		total_silence += bcn_silence_c;
-		num_active_rx++;
-	}
-
-	/* Average among active antennas */
-	if (num_active_rx)
-		last_rx_noise = (total_silence / num_active_rx) - 107;
-	else
-		last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
-
-	IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n",
-			bcn_silence_a, bcn_silence_b, bcn_silence_c,
-			last_rx_noise);
-}
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
-/*
- *  based on the assumption of all statistics counter are in DWORD
- *  FIXME: This function is for debugging, do not deal with
- *  the case of counters roll-over.
- */
-static void iwl4965_accumulative_statistics(struct iwl_priv *priv,
-					__le32 *stats)
-{
-	int i, size;
-	__le32 *prev_stats;
-	u32 *accum_stats;
-	u32 *delta, *max_delta;
-	struct statistics_general_common *general, *accum_general;
-	struct statistics_tx *tx, *accum_tx;
-
-	prev_stats = (__le32 *)&priv->_4965.statistics;
-	accum_stats = (u32 *)&priv->_4965.accum_statistics;
-	size = sizeof(struct iwl_notif_statistics);
-	general = &priv->_4965.statistics.general.common;
-	accum_general = &priv->_4965.accum_statistics.general.common;
-	tx = &priv->_4965.statistics.tx;
-	accum_tx = &priv->_4965.accum_statistics.tx;
-	delta = (u32 *)&priv->_4965.delta_statistics;
-	max_delta = (u32 *)&priv->_4965.max_delta;
-
-	for (i = sizeof(__le32); i < size;
-	     i += sizeof(__le32), stats++, prev_stats++, delta++,
-	     max_delta++, accum_stats++) {
-		if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
-			*delta = (le32_to_cpu(*stats) -
-				le32_to_cpu(*prev_stats));
-			*accum_stats += *delta;
-			if (*delta > *max_delta)
-				*max_delta = *delta;
-		}
-	}
-
-	/* reset accumulative statistics for "no-counter" type statistics */
-	accum_general->temperature = general->temperature;
-	accum_general->ttl_timestamp = general->ttl_timestamp;
-}
-#endif
-
-#define REG_RECALIB_PERIOD (60)
-
-void iwl4965_rx_statistics(struct iwl_priv *priv,
-			      struct iwl_rx_mem_buffer *rxb)
-{
-	int change;
-	struct iwl_rx_packet *pkt = rxb_addr(rxb);
-
-	IWL_DEBUG_RX(priv,
-		     "Statistics notification received (%d vs %d).\n",
-		     (int)sizeof(struct iwl_notif_statistics),
-		     le32_to_cpu(pkt->len_n_flags) &
-		     FH_RSCSR_FRAME_SIZE_MSK);
-
-	change = ((priv->_4965.statistics.general.common.temperature !=
-		   pkt->u.stats.general.common.temperature) ||
-		   ((priv->_4965.statistics.flag &
-		   STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
-		   (pkt->u.stats.flag &
-		   STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
-	iwl4965_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
-#endif
-
-	/* TODO: reading some of statistics is unneeded */
-	memcpy(&priv->_4965.statistics, &pkt->u.stats,
-		sizeof(priv->_4965.statistics));
-
-	set_bit(STATUS_STATISTICS, &priv->status);
-
-	/* Reschedule the statistics timer to occur in
-	 * REG_RECALIB_PERIOD seconds to ensure we get a
-	 * thermal update even if the uCode doesn't give
-	 * us one */
-	mod_timer(&priv->statistics_periodic, jiffies +
-		  msecs_to_jiffies(REG_RECALIB_PERIOD * 1000));
-
-	if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
-	    (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
-		iwl4965_rx_calc_noise(priv);
-		queue_work(priv->workqueue, &priv->run_time_calib_work);
-	}
-	if (priv->cfg->ops->lib->temp_ops.temperature && change)
-		priv->cfg->ops->lib->temp_ops.temperature(priv);
-}
-
-void iwl4965_reply_statistics(struct iwl_priv *priv,
-			      struct iwl_rx_mem_buffer *rxb)
-{
-	struct iwl_rx_packet *pkt = rxb_addr(rxb);
-
-	if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) {
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
-		memset(&priv->_4965.accum_statistics, 0,
-			sizeof(struct iwl_notif_statistics));
-		memset(&priv->_4965.delta_statistics, 0,
-			sizeof(struct iwl_notif_statistics));
-		memset(&priv->_4965.max_delta, 0,
-			sizeof(struct iwl_notif_statistics));
-#endif
-		IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
-	}
-	iwl4965_rx_statistics(priv, rxb);
-}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-sta.c b/drivers/net/wireless/iwlegacy/iwl-4965-sta.c
deleted file mode 100644
index a262c23..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-sta.c
+++ /dev/null
@@ -1,721 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <net/mac80211.h>
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-sta.h"
-#include "iwl-4965.h"
-
-static struct iwl_link_quality_cmd *
-iwl4965_sta_alloc_lq(struct iwl_priv *priv, u8 sta_id)
-{
-	int i, r;
-	struct iwl_link_quality_cmd *link_cmd;
-	u32 rate_flags = 0;
-	__le32 rate_n_flags;
-
-	link_cmd = kzalloc(sizeof(struct iwl_link_quality_cmd), GFP_KERNEL);
-	if (!link_cmd) {
-		IWL_ERR(priv, "Unable to allocate memory for LQ cmd.\n");
-		return NULL;
-	}
-	/* Set up the rate scaling to start at selected rate, fall back
-	 * all the way down to 1M in IEEE order, and then spin on 1M */
-	if (priv->band == IEEE80211_BAND_5GHZ)
-		r = IWL_RATE_6M_INDEX;
-	else
-		r = IWL_RATE_1M_INDEX;
-
-	if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
-		rate_flags |= RATE_MCS_CCK_MSK;
-
-	rate_flags |= iwl4965_first_antenna(priv->hw_params.valid_tx_ant) <<
-				RATE_MCS_ANT_POS;
-	rate_n_flags = iwl4965_hw_set_rate_n_flags(iwlegacy_rates[r].plcp,
-						   rate_flags);
-	for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
-		link_cmd->rs_table[i].rate_n_flags = rate_n_flags;
-
-	link_cmd->general_params.single_stream_ant_msk =
-				iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
-
-	link_cmd->general_params.dual_stream_ant_msk =
-		priv->hw_params.valid_tx_ant &
-		~iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
-	if (!link_cmd->general_params.dual_stream_ant_msk) {
-		link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
-	} else if (iwl4965_num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
-		link_cmd->general_params.dual_stream_ant_msk =
-			priv->hw_params.valid_tx_ant;
-	}
-
-	link_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
-	link_cmd->agg_params.agg_time_limit =
-		cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
-
-	link_cmd->sta_id = sta_id;
-
-	return link_cmd;
-}
-
-/*
- * iwl4965_add_bssid_station - Add the special IBSS BSSID station
- *
- * Function sleeps.
- */
-int
-iwl4965_add_bssid_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
-			     const u8 *addr, u8 *sta_id_r)
-{
-	int ret;
-	u8 sta_id;
-	struct iwl_link_quality_cmd *link_cmd;
-	unsigned long flags;
-
-	if (sta_id_r)
-		*sta_id_r = IWL_INVALID_STATION;
-
-	ret = iwl_legacy_add_station_common(priv, ctx, addr, 0, NULL, &sta_id);
-	if (ret) {
-		IWL_ERR(priv, "Unable to add station %pM\n", addr);
-		return ret;
-	}
-
-	if (sta_id_r)
-		*sta_id_r = sta_id;
-
-	spin_lock_irqsave(&priv->sta_lock, flags);
-	priv->stations[sta_id].used |= IWL_STA_LOCAL;
-	spin_unlock_irqrestore(&priv->sta_lock, flags);
-
-	/* Set up default rate scaling table in device's station table */
-	link_cmd = iwl4965_sta_alloc_lq(priv, sta_id);
-	if (!link_cmd) {
-		IWL_ERR(priv,
-			"Unable to initialize rate scaling for station %pM.\n",
-			addr);
-		return -ENOMEM;
-	}
-
-	ret = iwl_legacy_send_lq_cmd(priv, ctx, link_cmd, CMD_SYNC, true);
-	if (ret)
-		IWL_ERR(priv, "Link quality command failed (%d)\n", ret);
-
-	spin_lock_irqsave(&priv->sta_lock, flags);
-	priv->stations[sta_id].lq = link_cmd;
-	spin_unlock_irqrestore(&priv->sta_lock, flags);
-
-	return 0;
-}
-
-static int iwl4965_static_wepkey_cmd(struct iwl_priv *priv,
-				      struct iwl_rxon_context *ctx,
-				      bool send_if_empty)
-{
-	int i, not_empty = 0;
-	u8 buff[sizeof(struct iwl_wep_cmd) +
-		sizeof(struct iwl_wep_key) * WEP_KEYS_MAX];
-	struct iwl_wep_cmd *wep_cmd = (struct iwl_wep_cmd *)buff;
-	size_t cmd_size  = sizeof(struct iwl_wep_cmd);
-	struct iwl_host_cmd cmd = {
-		.id = ctx->wep_key_cmd,
-		.data = wep_cmd,
-		.flags = CMD_SYNC,
-	};
-
-	might_sleep();
-
-	memset(wep_cmd, 0, cmd_size +
-			(sizeof(struct iwl_wep_key) * WEP_KEYS_MAX));
-
-	for (i = 0; i < WEP_KEYS_MAX ; i++) {
-		wep_cmd->key[i].key_index = i;
-		if (ctx->wep_keys[i].key_size) {
-			wep_cmd->key[i].key_offset = i;
-			not_empty = 1;
-		} else {
-			wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET;
-		}
-
-		wep_cmd->key[i].key_size = ctx->wep_keys[i].key_size;
-		memcpy(&wep_cmd->key[i].key[3], ctx->wep_keys[i].key,
-				ctx->wep_keys[i].key_size);
-	}
-
-	wep_cmd->global_key_type = WEP_KEY_WEP_TYPE;
-	wep_cmd->num_keys = WEP_KEYS_MAX;
-
-	cmd_size += sizeof(struct iwl_wep_key) * WEP_KEYS_MAX;
-
-	cmd.len = cmd_size;
-
-	if (not_empty || send_if_empty)
-		return iwl_legacy_send_cmd(priv, &cmd);
-	else
-		return 0;
-}
-
-int iwl4965_restore_default_wep_keys(struct iwl_priv *priv,
-				 struct iwl_rxon_context *ctx)
-{
-	lockdep_assert_held(&priv->mutex);
-
-	return iwl4965_static_wepkey_cmd(priv, ctx, false);
-}
-
-int iwl4965_remove_default_wep_key(struct iwl_priv *priv,
-			       struct iwl_rxon_context *ctx,
-			       struct ieee80211_key_conf *keyconf)
-{
-	int ret;
-
-	lockdep_assert_held(&priv->mutex);
-
-	IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n",
-		      keyconf->keyidx);
-
-	memset(&ctx->wep_keys[keyconf->keyidx], 0, sizeof(ctx->wep_keys[0]));
-	if (iwl_legacy_is_rfkill(priv)) {
-		IWL_DEBUG_WEP(priv,
-		"Not sending REPLY_WEPKEY command due to RFKILL.\n");
-		/* but keys in device are clear anyway so return success */
-		return 0;
-	}
-	ret = iwl4965_static_wepkey_cmd(priv, ctx, 1);
-	IWL_DEBUG_WEP(priv, "Remove default WEP key: idx=%d ret=%d\n",
-		      keyconf->keyidx, ret);
-
-	return ret;
-}
-
-int iwl4965_set_default_wep_key(struct iwl_priv *priv,
-			    struct iwl_rxon_context *ctx,
-			    struct ieee80211_key_conf *keyconf)
-{
-	int ret;
-
-	lockdep_assert_held(&priv->mutex);
-
-	if (keyconf->keylen != WEP_KEY_LEN_128 &&
-	    keyconf->keylen != WEP_KEY_LEN_64) {
-		IWL_DEBUG_WEP(priv, "Bad WEP key length %d\n", keyconf->keylen);
-		return -EINVAL;
-	}
-
-	keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
-	keyconf->hw_key_idx = HW_KEY_DEFAULT;
-	priv->stations[ctx->ap_sta_id].keyinfo.cipher = keyconf->cipher;
-
-	ctx->wep_keys[keyconf->keyidx].key_size = keyconf->keylen;
-	memcpy(&ctx->wep_keys[keyconf->keyidx].key, &keyconf->key,
-							keyconf->keylen);
-
-	ret = iwl4965_static_wepkey_cmd(priv, ctx, false);
-	IWL_DEBUG_WEP(priv, "Set default WEP key: len=%d idx=%d ret=%d\n",
-		keyconf->keylen, keyconf->keyidx, ret);
-
-	return ret;
-}
-
-static int iwl4965_set_wep_dynamic_key_info(struct iwl_priv *priv,
-					struct iwl_rxon_context *ctx,
-					struct ieee80211_key_conf *keyconf,
-					u8 sta_id)
-{
-	unsigned long flags;
-	__le16 key_flags = 0;
-	struct iwl_legacy_addsta_cmd sta_cmd;
-
-	lockdep_assert_held(&priv->mutex);
-
-	keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
-
-	key_flags |= (STA_KEY_FLG_WEP | STA_KEY_FLG_MAP_KEY_MSK);
-	key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
-	key_flags &= ~STA_KEY_FLG_INVALID;
-
-	if (keyconf->keylen == WEP_KEY_LEN_128)
-		key_flags |= STA_KEY_FLG_KEY_SIZE_MSK;
-
-	if (sta_id == ctx->bcast_sta_id)
-		key_flags |= STA_KEY_MULTICAST_MSK;
-
-	spin_lock_irqsave(&priv->sta_lock, flags);
-
-	priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
-	priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
-	priv->stations[sta_id].keyinfo.keyidx = keyconf->keyidx;
-
-	memcpy(priv->stations[sta_id].keyinfo.key,
-				keyconf->key, keyconf->keylen);
-
-	memcpy(&priv->stations[sta_id].sta.key.key[3],
-				keyconf->key, keyconf->keylen);
-
-	if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
-			== STA_KEY_FLG_NO_ENC)
-		priv->stations[sta_id].sta.key.key_offset =
-				 iwl_legacy_get_free_ucode_key_index(priv);
-	/* else, we are overriding an existing key => no need to allocated room
-	 * in uCode. */
-
-	WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
-		"no space for a new key");
-
-	priv->stations[sta_id].sta.key.key_flags = key_flags;
-	priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
-	priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
-
-	memcpy(&sta_cmd, &priv->stations[sta_id].sta,
-			sizeof(struct iwl_legacy_addsta_cmd));
-	spin_unlock_irqrestore(&priv->sta_lock, flags);
-
-	return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
-}
-
-static int iwl4965_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
-					 struct iwl_rxon_context *ctx,
-					 struct ieee80211_key_conf *keyconf,
-					 u8 sta_id)
-{
-	unsigned long flags;
-	__le16 key_flags = 0;
-	struct iwl_legacy_addsta_cmd sta_cmd;
-
-	lockdep_assert_held(&priv->mutex);
-
-	key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
-	key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
-	key_flags &= ~STA_KEY_FLG_INVALID;
-
-	if (sta_id == ctx->bcast_sta_id)
-		key_flags |= STA_KEY_MULTICAST_MSK;
-
-	keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
-
-	spin_lock_irqsave(&priv->sta_lock, flags);
-	priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
-	priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
-
-	memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key,
-	       keyconf->keylen);
-
-	memcpy(priv->stations[sta_id].sta.key.key, keyconf->key,
-	       keyconf->keylen);
-
-	if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
-			== STA_KEY_FLG_NO_ENC)
-		priv->stations[sta_id].sta.key.key_offset =
-				 iwl_legacy_get_free_ucode_key_index(priv);
-	/* else, we are overriding an existing key => no need to allocated room
-	 * in uCode. */
-
-	WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
-		"no space for a new key");
-
-	priv->stations[sta_id].sta.key.key_flags = key_flags;
-	priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
-	priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
-
-	memcpy(&sta_cmd, &priv->stations[sta_id].sta,
-			 sizeof(struct iwl_legacy_addsta_cmd));
-	spin_unlock_irqrestore(&priv->sta_lock, flags);
-
-	return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
-}
-
-static int iwl4965_set_tkip_dynamic_key_info(struct iwl_priv *priv,
-					 struct iwl_rxon_context *ctx,
-					 struct ieee80211_key_conf *keyconf,
-					 u8 sta_id)
-{
-	unsigned long flags;
-	int ret = 0;
-	__le16 key_flags = 0;
-
-	key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
-	key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
-	key_flags &= ~STA_KEY_FLG_INVALID;
-
-	if (sta_id == ctx->bcast_sta_id)
-		key_flags |= STA_KEY_MULTICAST_MSK;
-
-	keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
-	keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
-
-	spin_lock_irqsave(&priv->sta_lock, flags);
-
-	priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
-	priv->stations[sta_id].keyinfo.keylen = 16;
-
-	if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
-			== STA_KEY_FLG_NO_ENC)
-		priv->stations[sta_id].sta.key.key_offset =
-				 iwl_legacy_get_free_ucode_key_index(priv);
-	/* else, we are overriding an existing key => no need to allocated room
-	 * in uCode. */
-
-	WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
-		"no space for a new key");
-
-	priv->stations[sta_id].sta.key.key_flags = key_flags;
-
-
-	/* This copy is acutally not needed: we get the key with each TX */
-	memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16);
-
-	memcpy(priv->stations[sta_id].sta.key.key, keyconf->key, 16);
-
-	spin_unlock_irqrestore(&priv->sta_lock, flags);
-
-	return ret;
-}
-
-void iwl4965_update_tkip_key(struct iwl_priv *priv,
-			 struct iwl_rxon_context *ctx,
-			 struct ieee80211_key_conf *keyconf,
-			 struct ieee80211_sta *sta, u32 iv32, u16 *phase1key)
-{
-	u8 sta_id;
-	unsigned long flags;
-	int i;
-
-	if (iwl_legacy_scan_cancel(priv)) {
-		/* cancel scan failed, just live w/ bad key and rely
-		   briefly on SW decryption */
-		return;
-	}
-
-	sta_id = iwl_legacy_sta_id_or_broadcast(priv, ctx, sta);
-	if (sta_id == IWL_INVALID_STATION)
-		return;
-
-	spin_lock_irqsave(&priv->sta_lock, flags);
-
-	priv->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32;
-
-	for (i = 0; i < 5; i++)
-		priv->stations[sta_id].sta.key.tkip_rx_ttak[i] =
-			cpu_to_le16(phase1key[i]);
-
-	priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
-	priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
-
-	iwl_legacy_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
-
-	spin_unlock_irqrestore(&priv->sta_lock, flags);
-
-}
-
-int iwl4965_remove_dynamic_key(struct iwl_priv *priv,
-			   struct iwl_rxon_context *ctx,
-			   struct ieee80211_key_conf *keyconf,
-			   u8 sta_id)
-{
-	unsigned long flags;
-	u16 key_flags;
-	u8 keyidx;
-	struct iwl_legacy_addsta_cmd sta_cmd;
-
-	lockdep_assert_held(&priv->mutex);
-
-	ctx->key_mapping_keys--;
-
-	spin_lock_irqsave(&priv->sta_lock, flags);
-	key_flags = le16_to_cpu(priv->stations[sta_id].sta.key.key_flags);
-	keyidx = (key_flags >> STA_KEY_FLG_KEYID_POS) & 0x3;
-
-	IWL_DEBUG_WEP(priv, "Remove dynamic key: idx=%d sta=%d\n",
-		      keyconf->keyidx, sta_id);
-
-	if (keyconf->keyidx != keyidx) {
-		/* We need to remove a key with index different that the one
-		 * in the uCode. This means that the key we need to remove has
-		 * been replaced by another one with different index.
-		 * Don't do anything and return ok
-		 */
-		spin_unlock_irqrestore(&priv->sta_lock, flags);
-		return 0;
-	}
-
-	if (priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) {
-		IWL_WARN(priv, "Removing wrong key %d 0x%x\n",
-			    keyconf->keyidx, key_flags);
-		spin_unlock_irqrestore(&priv->sta_lock, flags);
-		return 0;
-	}
-
-	if (!test_and_clear_bit(priv->stations[sta_id].sta.key.key_offset,
-		&priv->ucode_key_table))
-		IWL_ERR(priv, "index %d not used in uCode key table.\n",
-			priv->stations[sta_id].sta.key.key_offset);
-	memset(&priv->stations[sta_id].keyinfo, 0,
-					sizeof(struct iwl_hw_key));
-	memset(&priv->stations[sta_id].sta.key, 0,
-					sizeof(struct iwl4965_keyinfo));
-	priv->stations[sta_id].sta.key.key_flags =
-			STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
-	priv->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET;
-	priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
-	priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
-
-	if (iwl_legacy_is_rfkill(priv)) {
-		IWL_DEBUG_WEP(priv,
-		 "Not sending REPLY_ADD_STA command because RFKILL enabled.\n");
-		spin_unlock_irqrestore(&priv->sta_lock, flags);
-		return 0;
-	}
-	memcpy(&sta_cmd, &priv->stations[sta_id].sta,
-			sizeof(struct iwl_legacy_addsta_cmd));
-	spin_unlock_irqrestore(&priv->sta_lock, flags);
-
-	return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
-}
-
-int iwl4965_set_dynamic_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
-			struct ieee80211_key_conf *keyconf, u8 sta_id)
-{
-	int ret;
-
-	lockdep_assert_held(&priv->mutex);
-
-	ctx->key_mapping_keys++;
-	keyconf->hw_key_idx = HW_KEY_DYNAMIC;
-
-	switch (keyconf->cipher) {
-	case WLAN_CIPHER_SUITE_CCMP:
-		ret = iwl4965_set_ccmp_dynamic_key_info(priv, ctx,
-							keyconf, sta_id);
-		break;
-	case WLAN_CIPHER_SUITE_TKIP:
-		ret = iwl4965_set_tkip_dynamic_key_info(priv, ctx,
-							keyconf, sta_id);
-		break;
-	case WLAN_CIPHER_SUITE_WEP40:
-	case WLAN_CIPHER_SUITE_WEP104:
-		ret = iwl4965_set_wep_dynamic_key_info(priv, ctx,
-							keyconf, sta_id);
-		break;
-	default:
-		IWL_ERR(priv,
-			"Unknown alg: %s cipher = %x\n", __func__,
-			keyconf->cipher);
-		ret = -EINVAL;
-	}
-
-	IWL_DEBUG_WEP(priv,
-		"Set dynamic key: cipher=%x len=%d idx=%d sta=%d ret=%d\n",
-		      keyconf->cipher, keyconf->keylen, keyconf->keyidx,
-		      sta_id, ret);
-
-	return ret;
-}
-
-/**
- * iwl4965_alloc_bcast_station - add broadcast station into driver's station table.
- *
- * This adds the broadcast station into the driver's station table
- * and marks it driver active, so that it will be restored to the
- * device at the next best time.
- */
-int iwl4965_alloc_bcast_station(struct iwl_priv *priv,
-			       struct iwl_rxon_context *ctx)
-{
-	struct iwl_link_quality_cmd *link_cmd;
-	unsigned long flags;
-	u8 sta_id;
-
-	spin_lock_irqsave(&priv->sta_lock, flags);
-	sta_id = iwl_legacy_prep_station(priv, ctx, iwlegacy_bcast_addr,
-								false, NULL);
-	if (sta_id == IWL_INVALID_STATION) {
-		IWL_ERR(priv, "Unable to prepare broadcast station\n");
-		spin_unlock_irqrestore(&priv->sta_lock, flags);
-
-		return -EINVAL;
-	}
-
-	priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE;
-	priv->stations[sta_id].used |= IWL_STA_BCAST;
-	spin_unlock_irqrestore(&priv->sta_lock, flags);
-
-	link_cmd = iwl4965_sta_alloc_lq(priv, sta_id);
-	if (!link_cmd) {
-		IWL_ERR(priv,
-			"Unable to initialize rate scaling for bcast station.\n");
-		return -ENOMEM;
-	}
-
-	spin_lock_irqsave(&priv->sta_lock, flags);
-	priv->stations[sta_id].lq = link_cmd;
-	spin_unlock_irqrestore(&priv->sta_lock, flags);
-
-	return 0;
-}
-
-/**
- * iwl4965_update_bcast_station - update broadcast station's LQ command
- *
- * Only used by iwl4965. Placed here to have all bcast station management
- * code together.
- */
-static int iwl4965_update_bcast_station(struct iwl_priv *priv,
-				    struct iwl_rxon_context *ctx)
-{
-	unsigned long flags;
-	struct iwl_link_quality_cmd *link_cmd;
-	u8 sta_id = ctx->bcast_sta_id;
-
-	link_cmd = iwl4965_sta_alloc_lq(priv, sta_id);
-	if (!link_cmd) {
-		IWL_ERR(priv,
-		"Unable to initialize rate scaling for bcast station.\n");
-		return -ENOMEM;
-	}
-
-	spin_lock_irqsave(&priv->sta_lock, flags);
-	if (priv->stations[sta_id].lq)
-		kfree(priv->stations[sta_id].lq);
-	else
-		IWL_DEBUG_INFO(priv,
-		"Bcast station rate scaling has not been initialized yet.\n");
-	priv->stations[sta_id].lq = link_cmd;
-	spin_unlock_irqrestore(&priv->sta_lock, flags);
-
-	return 0;
-}
-
-int iwl4965_update_bcast_stations(struct iwl_priv *priv)
-{
-	struct iwl_rxon_context *ctx;
-	int ret = 0;
-
-	for_each_context(priv, ctx) {
-		ret = iwl4965_update_bcast_station(priv, ctx);
-		if (ret)
-			break;
-	}
-
-	return ret;
-}
-
-/**
- * iwl4965_sta_tx_modify_enable_tid - Enable Tx for this TID in station table
- */
-int iwl4965_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid)
-{
-	unsigned long flags;
-	struct iwl_legacy_addsta_cmd sta_cmd;
-
-	lockdep_assert_held(&priv->mutex);
-
-	/* Remove "disable" flag, to enable Tx for this TID */
-	spin_lock_irqsave(&priv->sta_lock, flags);
-	priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
-	priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
-	priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
-	memcpy(&sta_cmd, &priv->stations[sta_id].sta,
-					sizeof(struct iwl_legacy_addsta_cmd));
-	spin_unlock_irqrestore(&priv->sta_lock, flags);
-
-	return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
-}
-
-int iwl4965_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
-			 int tid, u16 ssn)
-{
-	unsigned long flags;
-	int sta_id;
-	struct iwl_legacy_addsta_cmd sta_cmd;
-
-	lockdep_assert_held(&priv->mutex);
-
-	sta_id = iwl_legacy_sta_id(sta);
-	if (sta_id == IWL_INVALID_STATION)
-		return -ENXIO;
-
-	spin_lock_irqsave(&priv->sta_lock, flags);
-	priv->stations[sta_id].sta.station_flags_msk = 0;
-	priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
-	priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid;
-	priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
-	priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
-	memcpy(&sta_cmd, &priv->stations[sta_id].sta,
-					sizeof(struct iwl_legacy_addsta_cmd));
-	spin_unlock_irqrestore(&priv->sta_lock, flags);
-
-	return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
-}
-
-int iwl4965_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
-			int tid)
-{
-	unsigned long flags;
-	int sta_id;
-	struct iwl_legacy_addsta_cmd sta_cmd;
-
-	lockdep_assert_held(&priv->mutex);
-
-	sta_id = iwl_legacy_sta_id(sta);
-	if (sta_id == IWL_INVALID_STATION) {
-		IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
-		return -ENXIO;
-	}
-
-	spin_lock_irqsave(&priv->sta_lock, flags);
-	priv->stations[sta_id].sta.station_flags_msk = 0;
-	priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
-	priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid;
-	priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
-	memcpy(&sta_cmd, &priv->stations[sta_id].sta,
-				sizeof(struct iwl_legacy_addsta_cmd));
-	spin_unlock_irqrestore(&priv->sta_lock, flags);
-
-	return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
-}
-
-void
-iwl4965_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&priv->sta_lock, flags);
-	priv->stations[sta_id].sta.station_flags |= STA_FLG_PWR_SAVE_MSK;
-	priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
-	priv->stations[sta_id].sta.sta.modify_mask =
-					STA_MODIFY_SLEEP_TX_COUNT_MSK;
-	priv->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt);
-	priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
-	iwl_legacy_send_add_sta(priv,
-				&priv->stations[sta_id].sta, CMD_ASYNC);
-	spin_unlock_irqrestore(&priv->sta_lock, flags);
-
-}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-tx.c b/drivers/net/wireless/iwlegacy/iwl-4965-tx.c
deleted file mode 100644
index 7f12e36..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-tx.c
+++ /dev/null
@@ -1,1378 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-sta.h"
-#include "iwl-io.h"
-#include "iwl-helpers.h"
-#include "iwl-4965-hw.h"
-#include "iwl-4965.h"
-
-/*
- * mac80211 queues, ACs, hardware queues, FIFOs.
- *
- * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
- *
- * Mac80211 uses the following numbers, which we get as from it
- * by way of skb_get_queue_mapping(skb):
- *
- *	VO	0
- *	VI	1
- *	BE	2
- *	BK	3
- *
- *
- * Regular (not A-MPDU) frames are put into hardware queues corresponding
- * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
- * own queue per aggregation session (RA/TID combination), such queues are
- * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
- * order to map frames to the right queue, we also need an AC->hw queue
- * mapping. This is implemented here.
- *
- * Due to the way hw queues are set up (by the hw specific modules like
- * iwl-4965.c), the AC->hw queue mapping is the identity
- * mapping.
- */
-
-static const u8 tid_to_ac[] = {
-	IEEE80211_AC_BE,
-	IEEE80211_AC_BK,
-	IEEE80211_AC_BK,
-	IEEE80211_AC_BE,
-	IEEE80211_AC_VI,
-	IEEE80211_AC_VI,
-	IEEE80211_AC_VO,
-	IEEE80211_AC_VO
-};
-
-static inline int iwl4965_get_ac_from_tid(u16 tid)
-{
-	if (likely(tid < ARRAY_SIZE(tid_to_ac)))
-		return tid_to_ac[tid];
-
-	/* no support for TIDs 8-15 yet */
-	return -EINVAL;
-}
-
-static inline int
-iwl4965_get_fifo_from_tid(struct iwl_rxon_context *ctx, u16 tid)
-{
-	if (likely(tid < ARRAY_SIZE(tid_to_ac)))
-		return ctx->ac_to_fifo[tid_to_ac[tid]];
-
-	/* no support for TIDs 8-15 yet */
-	return -EINVAL;
-}
-
-/*
- * handle build REPLY_TX command notification.
- */
-static void iwl4965_tx_cmd_build_basic(struct iwl_priv *priv,
-					struct sk_buff *skb,
-					struct iwl_tx_cmd *tx_cmd,
-					struct ieee80211_tx_info *info,
-					struct ieee80211_hdr *hdr,
-					u8 std_id)
-{
-	__le16 fc = hdr->frame_control;
-	__le32 tx_flags = tx_cmd->tx_flags;
-
-	tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
-	if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
-		tx_flags |= TX_CMD_FLG_ACK_MSK;
-		if (ieee80211_is_mgmt(fc))
-			tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
-		if (ieee80211_is_probe_resp(fc) &&
-		    !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
-			tx_flags |= TX_CMD_FLG_TSF_MSK;
-	} else {
-		tx_flags &= (~TX_CMD_FLG_ACK_MSK);
-		tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
-	}
-
-	if (ieee80211_is_back_req(fc))
-		tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
-
-	tx_cmd->sta_id = std_id;
-	if (ieee80211_has_morefrags(fc))
-		tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
-
-	if (ieee80211_is_data_qos(fc)) {
-		u8 *qc = ieee80211_get_qos_ctl(hdr);
-		tx_cmd->tid_tspec = qc[0] & 0xf;
-		tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
-	} else {
-		tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
-	}
-
-	iwl_legacy_tx_cmd_protection(priv, info, fc, &tx_flags);
-
-	tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
-	if (ieee80211_is_mgmt(fc)) {
-		if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
-			tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
-		else
-			tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
-	} else {
-		tx_cmd->timeout.pm_frame_timeout = 0;
-	}
-
-	tx_cmd->driver_txop = 0;
-	tx_cmd->tx_flags = tx_flags;
-	tx_cmd->next_frame_len = 0;
-}
-
-#define RTS_DFAULT_RETRY_LIMIT		60
-
-static void iwl4965_tx_cmd_build_rate(struct iwl_priv *priv,
-			      struct iwl_tx_cmd *tx_cmd,
-			      struct ieee80211_tx_info *info,
-			      __le16 fc)
-{
-	u32 rate_flags;
-	int rate_idx;
-	u8 rts_retry_limit;
-	u8 data_retry_limit;
-	u8 rate_plcp;
-
-	/* Set retry limit on DATA packets and Probe Responses*/
-	if (ieee80211_is_probe_resp(fc))
-		data_retry_limit = 3;
-	else
-		data_retry_limit = IWL4965_DEFAULT_TX_RETRY;
-	tx_cmd->data_retry_limit = data_retry_limit;
-
-	/* Set retry limit on RTS packets */
-	rts_retry_limit = RTS_DFAULT_RETRY_LIMIT;
-	if (data_retry_limit < rts_retry_limit)
-		rts_retry_limit = data_retry_limit;
-	tx_cmd->rts_retry_limit = rts_retry_limit;
-
-	/* DATA packets will use the uCode station table for rate/antenna
-	 * selection */
-	if (ieee80211_is_data(fc)) {
-		tx_cmd->initial_rate_index = 0;
-		tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
-		return;
-	}
-
-	/**
-	 * If the current TX rate stored in mac80211 has the MCS bit set, it's
-	 * not really a TX rate.  Thus, we use the lowest supported rate for
-	 * this band.  Also use the lowest supported rate if the stored rate
-	 * index is invalid.
-	 */
-	rate_idx = info->control.rates[0].idx;
-	if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
-			(rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
-		rate_idx = rate_lowest_index(&priv->bands[info->band],
-				info->control.sta);
-	/* For 5 GHZ band, remap mac80211 rate indices into driver indices */
-	if (info->band == IEEE80211_BAND_5GHZ)
-		rate_idx += IWL_FIRST_OFDM_RATE;
-	/* Get PLCP rate for tx_cmd->rate_n_flags */
-	rate_plcp = iwlegacy_rates[rate_idx].plcp;
-	/* Zero out flags for this packet */
-	rate_flags = 0;
-
-	/* Set CCK flag as needed */
-	if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
-		rate_flags |= RATE_MCS_CCK_MSK;
-
-	/* Set up antennas */
-	priv->mgmt_tx_ant = iwl4965_toggle_tx_ant(priv, priv->mgmt_tx_ant,
-				      priv->hw_params.valid_tx_ant);
-
-	rate_flags |= iwl4965_ant_idx_to_flags(priv->mgmt_tx_ant);
-
-	/* Set the rate in the TX cmd */
-	tx_cmd->rate_n_flags = iwl4965_hw_set_rate_n_flags(rate_plcp, rate_flags);
-}
-
-static void iwl4965_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
-				      struct ieee80211_tx_info *info,
-				      struct iwl_tx_cmd *tx_cmd,
-				      struct sk_buff *skb_frag,
-				      int sta_id)
-{
-	struct ieee80211_key_conf *keyconf = info->control.hw_key;
-
-	switch (keyconf->cipher) {
-	case WLAN_CIPHER_SUITE_CCMP:
-		tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
-		memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
-		if (info->flags & IEEE80211_TX_CTL_AMPDU)
-			tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
-		IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
-		break;
-
-	case WLAN_CIPHER_SUITE_TKIP:
-		tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
-		ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
-		IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n");
-		break;
-
-	case WLAN_CIPHER_SUITE_WEP104:
-		tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
-		/* fall through */
-	case WLAN_CIPHER_SUITE_WEP40:
-		tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
-			(keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
-
-		memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
-
-		IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
-			     "with key %d\n", keyconf->keyidx);
-		break;
-
-	default:
-		IWL_ERR(priv, "Unknown encode cipher %x\n", keyconf->cipher);
-		break;
-	}
-}
-
-/*
- * start REPLY_TX command process
- */
-int iwl4965_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
-{
-	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
-	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-	struct ieee80211_sta *sta = info->control.sta;
-	struct iwl_station_priv *sta_priv = NULL;
-	struct iwl_tx_queue *txq;
-	struct iwl_queue *q;
-	struct iwl_device_cmd *out_cmd;
-	struct iwl_cmd_meta *out_meta;
-	struct iwl_tx_cmd *tx_cmd;
-	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-	int txq_id;
-	dma_addr_t phys_addr;
-	dma_addr_t txcmd_phys;
-	dma_addr_t scratch_phys;
-	u16 len, firstlen, secondlen;
-	u16 seq_number = 0;
-	__le16 fc;
-	u8 hdr_len;
-	u8 sta_id;
-	u8 wait_write_ptr = 0;
-	u8 tid = 0;
-	u8 *qc = NULL;
-	unsigned long flags;
-	bool is_agg = false;
-
-	if (info->control.vif)
-		ctx = iwl_legacy_rxon_ctx_from_vif(info->control.vif);
-
-	spin_lock_irqsave(&priv->lock, flags);
-	if (iwl_legacy_is_rfkill(priv)) {
-		IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
-		goto drop_unlock;
-	}
-
-	fc = hdr->frame_control;
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-	if (ieee80211_is_auth(fc))
-		IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
-	else if (ieee80211_is_assoc_req(fc))
-		IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
-	else if (ieee80211_is_reassoc_req(fc))
-		IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
-#endif
-
-	hdr_len = ieee80211_hdrlen(fc);
-
-	/* For management frames use broadcast id to do not break aggregation */
-	if (!ieee80211_is_data(fc))
-		sta_id = ctx->bcast_sta_id;
-	else {
-		/* Find index into station table for destination station */
-		sta_id = iwl_legacy_sta_id_or_broadcast(priv, ctx, info->control.sta);
-
-		if (sta_id == IWL_INVALID_STATION) {
-			IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
-				       hdr->addr1);
-			goto drop_unlock;
-		}
-	}
-
-	IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
-
-	if (sta)
-		sta_priv = (void *)sta->drv_priv;
-
-	if (sta_priv && sta_priv->asleep &&
-	    (info->flags & IEEE80211_TX_CTL_POLL_RESPONSE)) {
-		/*
-		 * This sends an asynchronous command to the device,
-		 * but we can rely on it being processed before the
-		 * next frame is processed -- and the next frame to
-		 * this station is the one that will consume this
-		 * counter.
-		 * For now set the counter to just 1 since we do not
-		 * support uAPSD yet.
-		 */
-		iwl4965_sta_modify_sleep_tx_count(priv, sta_id, 1);
-	}
-
-	/*
-	 * Send this frame after DTIM -- there's a special queue
-	 * reserved for this for contexts that support AP mode.
-	 */
-	if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
-		txq_id = ctx->mcast_queue;
-		/*
-		 * The microcode will clear the more data
-		 * bit in the last frame it transmits.
-		 */
-		hdr->frame_control |=
-			cpu_to_le16(IEEE80211_FCTL_MOREDATA);
-	} else
-		txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)];
-
-	/* irqs already disabled/saved above when locking priv->lock */
-	spin_lock(&priv->sta_lock);
-
-	if (ieee80211_is_data_qos(fc)) {
-		qc = ieee80211_get_qos_ctl(hdr);
-		tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
-		if (WARN_ON_ONCE(tid >= MAX_TID_COUNT)) {
-			spin_unlock(&priv->sta_lock);
-			goto drop_unlock;
-		}
-		seq_number = priv->stations[sta_id].tid[tid].seq_number;
-		seq_number &= IEEE80211_SCTL_SEQ;
-		hdr->seq_ctrl = hdr->seq_ctrl &
-				cpu_to_le16(IEEE80211_SCTL_FRAG);
-		hdr->seq_ctrl |= cpu_to_le16(seq_number);
-		seq_number += 0x10;
-		/* aggregation is on for this <sta,tid> */
-		if (info->flags & IEEE80211_TX_CTL_AMPDU &&
-		    priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) {
-			txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
-			is_agg = true;
-		}
-	}
-
-	txq = &priv->txq[txq_id];
-	q = &txq->q;
-
-	if (unlikely(iwl_legacy_queue_space(q) < q->high_mark)) {
-		spin_unlock(&priv->sta_lock);
-		goto drop_unlock;
-	}
-
-	if (ieee80211_is_data_qos(fc)) {
-		priv->stations[sta_id].tid[tid].tfds_in_queue++;
-		if (!ieee80211_has_morefrags(fc))
-			priv->stations[sta_id].tid[tid].seq_number = seq_number;
-	}
-
-	spin_unlock(&priv->sta_lock);
-
-	/* Set up driver data for this TFD */
-	memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
-	txq->txb[q->write_ptr].skb = skb;
-	txq->txb[q->write_ptr].ctx = ctx;
-
-	/* Set up first empty entry in queue's array of Tx/cmd buffers */
-	out_cmd = txq->cmd[q->write_ptr];
-	out_meta = &txq->meta[q->write_ptr];
-	tx_cmd = &out_cmd->cmd.tx;
-	memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
-	memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd));
-
-	/*
-	 * Set up the Tx-command (not MAC!) header.
-	 * Store the chosen Tx queue and TFD index within the sequence field;
-	 * after Tx, uCode's Tx response will return this value so driver can
-	 * locate the frame within the tx queue and do post-tx processing.
-	 */
-	out_cmd->hdr.cmd = REPLY_TX;
-	out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
-				INDEX_TO_SEQ(q->write_ptr)));
-
-	/* Copy MAC header from skb into command buffer */
-	memcpy(tx_cmd->hdr, hdr, hdr_len);
-
-
-	/* Total # bytes to be transmitted */
-	len = (u16)skb->len;
-	tx_cmd->len = cpu_to_le16(len);
-
-	if (info->control.hw_key)
-		iwl4965_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
-
-	/* TODO need this for burst mode later on */
-	iwl4965_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id);
-	iwl_legacy_dbg_log_tx_data_frame(priv, len, hdr);
-
-	iwl4965_tx_cmd_build_rate(priv, tx_cmd, info, fc);
-
-	iwl_legacy_update_stats(priv, true, fc, len);
-	/*
-	 * Use the first empty entry in this queue's command buffer array
-	 * to contain the Tx command and MAC header concatenated together
-	 * (payload data will be in another buffer).
-	 * Size of this varies, due to varying MAC header length.
-	 * If end is not dword aligned, we'll have 2 extra bytes at the end
-	 * of the MAC header (device reads on dword boundaries).
-	 * We'll tell device about this padding later.
-	 */
-	len = sizeof(struct iwl_tx_cmd) +
-		sizeof(struct iwl_cmd_header) + hdr_len;
-	firstlen = (len + 3) & ~3;
-
-	/* Tell NIC about any 2-byte padding after MAC header */
-	if (firstlen != len)
-		tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
-
-	/* Physical address of this Tx command's header (not MAC header!),
-	 * within command buffer array. */
-	txcmd_phys = pci_map_single(priv->pci_dev,
-				    &out_cmd->hdr, firstlen,
-				    PCI_DMA_BIDIRECTIONAL);
-	dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
-	dma_unmap_len_set(out_meta, len, firstlen);
-	/* Add buffer containing Tx command and MAC(!) header to TFD's
-	 * first entry */
-	priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
-						   txcmd_phys, firstlen, 1, 0);
-
-	if (!ieee80211_has_morefrags(hdr->frame_control)) {
-		txq->need_update = 1;
-	} else {
-		wait_write_ptr = 1;
-		txq->need_update = 0;
-	}
-
-	/* Set up TFD's 2nd entry to point directly to remainder of skb,
-	 * if any (802.11 null frames have no payload). */
-	secondlen = skb->len - hdr_len;
-	if (secondlen > 0) {
-		phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
-					   secondlen, PCI_DMA_TODEVICE);
-		priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
-							   phys_addr, secondlen,
-							   0, 0);
-	}
-
-	scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
-				offsetof(struct iwl_tx_cmd, scratch);
-
-	/* take back ownership of DMA buffer to enable update */
-	pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys,
-				    firstlen, PCI_DMA_BIDIRECTIONAL);
-	tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
-	tx_cmd->dram_msb_ptr = iwl_legacy_get_dma_hi_addr(scratch_phys);
-
-	IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
-		     le16_to_cpu(out_cmd->hdr.sequence));
-	IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
-	iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
-	iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
-
-	/* Set up entry for this TFD in Tx byte-count array */
-	if (info->flags & IEEE80211_TX_CTL_AMPDU)
-		priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq,
-						     le16_to_cpu(tx_cmd->len));
-
-	pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
-				       firstlen, PCI_DMA_BIDIRECTIONAL);
-
-	trace_iwlwifi_legacy_dev_tx(priv,
-			     &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
-			     sizeof(struct iwl_tfd),
-			     &out_cmd->hdr, firstlen,
-			     skb->data + hdr_len, secondlen);
-
-	/* Tell device the write index *just past* this latest filled TFD */
-	q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd);
-	iwl_legacy_txq_update_write_ptr(priv, txq);
-	spin_unlock_irqrestore(&priv->lock, flags);
-
-	/*
-	 * At this point the frame is "transmitted" successfully
-	 * and we will get a TX status notification eventually,
-	 * regardless of the value of ret. "ret" only indicates
-	 * whether or not we should update the write pointer.
-	 */
-
-	/*
-	 * Avoid atomic ops if it isn't an associated client.
-	 * Also, if this is a packet for aggregation, don't
-	 * increase the counter because the ucode will stop
-	 * aggregation queues when their respective station
-	 * goes to sleep.
-	 */
-	if (sta_priv && sta_priv->client && !is_agg)
-		atomic_inc(&sta_priv->pending_frames);
-
-	if ((iwl_legacy_queue_space(q) < q->high_mark) &&
-			priv->mac80211_registered) {
-		if (wait_write_ptr) {
-			spin_lock_irqsave(&priv->lock, flags);
-			txq->need_update = 1;
-			iwl_legacy_txq_update_write_ptr(priv, txq);
-			spin_unlock_irqrestore(&priv->lock, flags);
-		} else {
-			iwl_legacy_stop_queue(priv, txq);
-		}
-	}
-
-	return 0;
-
-drop_unlock:
-	spin_unlock_irqrestore(&priv->lock, flags);
-	return -1;
-}
-
-static inline int iwl4965_alloc_dma_ptr(struct iwl_priv *priv,
-				    struct iwl_dma_ptr *ptr, size_t size)
-{
-	ptr->addr = dma_alloc_coherent(&priv->pci_dev->dev, size, &ptr->dma,
-				       GFP_KERNEL);
-	if (!ptr->addr)
-		return -ENOMEM;
-	ptr->size = size;
-	return 0;
-}
-
-static inline void iwl4965_free_dma_ptr(struct iwl_priv *priv,
-				    struct iwl_dma_ptr *ptr)
-{
-	if (unlikely(!ptr->addr))
-		return;
-
-	dma_free_coherent(&priv->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
-	memset(ptr, 0, sizeof(*ptr));
-}
-
-/**
- * iwl4965_hw_txq_ctx_free - Free TXQ Context
- *
- * Destroy all TX DMA queues and structures
- */
-void iwl4965_hw_txq_ctx_free(struct iwl_priv *priv)
-{
-	int txq_id;
-
-	/* Tx queues */
-	if (priv->txq) {
-		for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
-			if (txq_id == priv->cmd_queue)
-				iwl_legacy_cmd_queue_free(priv);
-			else
-				iwl_legacy_tx_queue_free(priv, txq_id);
-	}
-	iwl4965_free_dma_ptr(priv, &priv->kw);
-
-	iwl4965_free_dma_ptr(priv, &priv->scd_bc_tbls);
-
-	/* free tx queue structure */
-	iwl_legacy_txq_mem(priv);
-}
-
-/**
- * iwl4965_txq_ctx_alloc - allocate TX queue context
- * Allocate all Tx DMA structures and initialize them
- *
- * @param priv
- * @return error code
- */
-int iwl4965_txq_ctx_alloc(struct iwl_priv *priv)
-{
-	int ret;
-	int txq_id, slots_num;
-	unsigned long flags;
-
-	/* Free all tx/cmd queues and keep-warm buffer */
-	iwl4965_hw_txq_ctx_free(priv);
-
-	ret = iwl4965_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
-				priv->hw_params.scd_bc_tbls_size);
-	if (ret) {
-		IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
-		goto error_bc_tbls;
-	}
-	/* Alloc keep-warm buffer */
-	ret = iwl4965_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
-	if (ret) {
-		IWL_ERR(priv, "Keep Warm allocation failed\n");
-		goto error_kw;
-	}
-
-	/* allocate tx queue structure */
-	ret = iwl_legacy_alloc_txq_mem(priv);
-	if (ret)
-		goto error;
-
-	spin_lock_irqsave(&priv->lock, flags);
-
-	/* Turn off all Tx DMA fifos */
-	iwl4965_txq_set_sched(priv, 0);
-
-	/* Tell NIC where to find the "keep warm" buffer */
-	iwl_legacy_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
-
-	spin_unlock_irqrestore(&priv->lock, flags);
-
-	/* Alloc and init all Tx queues, including the command queue (#4/#9) */
-	for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
-		slots_num = (txq_id == priv->cmd_queue) ?
-					TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
-		ret = iwl_legacy_tx_queue_init(priv,
-					&priv->txq[txq_id], slots_num,
-				       txq_id);
-		if (ret) {
-			IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
-			goto error;
-		}
-	}
-
-	return ret;
-
- error:
-	iwl4965_hw_txq_ctx_free(priv);
-	iwl4965_free_dma_ptr(priv, &priv->kw);
- error_kw:
-	iwl4965_free_dma_ptr(priv, &priv->scd_bc_tbls);
- error_bc_tbls:
-	return ret;
-}
-
-void iwl4965_txq_ctx_reset(struct iwl_priv *priv)
-{
-	int txq_id, slots_num;
-	unsigned long flags;
-
-	spin_lock_irqsave(&priv->lock, flags);
-
-	/* Turn off all Tx DMA fifos */
-	iwl4965_txq_set_sched(priv, 0);
-
-	/* Tell NIC where to find the "keep warm" buffer */
-	iwl_legacy_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
-
-	spin_unlock_irqrestore(&priv->lock, flags);
-
-	/* Alloc and init all Tx queues, including the command queue (#4) */
-	for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
-		slots_num = txq_id == priv->cmd_queue ?
-			    TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
-		iwl_legacy_tx_queue_reset(priv, &priv->txq[txq_id],
-						slots_num, txq_id);
-	}
-}
-
-/**
- * iwl4965_txq_ctx_stop - Stop all Tx DMA channels
- */
-void iwl4965_txq_ctx_stop(struct iwl_priv *priv)
-{
-	int ch, txq_id;
-	unsigned long flags;
-
-	/* Turn off all Tx DMA fifos */
-	spin_lock_irqsave(&priv->lock, flags);
-
-	iwl4965_txq_set_sched(priv, 0);
-
-	/* Stop each Tx DMA channel, and wait for it to be idle */
-	for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) {
-		iwl_legacy_write_direct32(priv,
-				FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
-		if (iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
-				    FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
-				    1000))
-			IWL_ERR(priv, "Failing on timeout while stopping"
-			    " DMA channel %d [0x%08x]", ch,
-			    iwl_legacy_read_direct32(priv,
-					FH_TSSR_TX_STATUS_REG));
-	}
-	spin_unlock_irqrestore(&priv->lock, flags);
-
-	if (!priv->txq)
-		return;
-
-	/* Unmap DMA from host system and free skb's */
-	for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
-		if (txq_id == priv->cmd_queue)
-			iwl_legacy_cmd_queue_unmap(priv);
-		else
-			iwl_legacy_tx_queue_unmap(priv, txq_id);
-}
-
-/*
- * Find first available (lowest unused) Tx Queue, mark it "active".
- * Called only when finding queue for aggregation.
- * Should never return anything < 7, because they should already
- * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
- */
-static int iwl4965_txq_ctx_activate_free(struct iwl_priv *priv)
-{
-	int txq_id;
-
-	for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
-		if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
-			return txq_id;
-	return -1;
-}
-
-/**
- * iwl4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
- */
-static void iwl4965_tx_queue_stop_scheduler(struct iwl_priv *priv,
-					    u16 txq_id)
-{
-	/* Simply stop the queue, but don't change any configuration;
-	 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
-	iwl_legacy_write_prph(priv,
-		IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
-		(0 << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE)|
-		(1 << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
-}
-
-/**
- * iwl4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
- */
-static int iwl4965_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
-					u16 txq_id)
-{
-	u32 tbl_dw_addr;
-	u32 tbl_dw;
-	u16 scd_q2ratid;
-
-	scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
-
-	tbl_dw_addr = priv->scd_base_addr +
-			IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
-
-	tbl_dw = iwl_legacy_read_targ_mem(priv, tbl_dw_addr);
-
-	if (txq_id & 0x1)
-		tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
-	else
-		tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
-
-	iwl_legacy_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
-
-	return 0;
-}
-
-/**
- * iwl4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
- *
- * NOTE:  txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE,
- *        i.e. it must be one of the higher queues used for aggregation
- */
-static int iwl4965_txq_agg_enable(struct iwl_priv *priv, int txq_id,
-				  int tx_fifo, int sta_id, int tid, u16 ssn_idx)
-{
-	unsigned long flags;
-	u16 ra_tid;
-	int ret;
-
-	if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
-	    (IWL49_FIRST_AMPDU_QUEUE +
-		priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
-		IWL_WARN(priv,
-			"queue number out of range: %d, must be %d to %d\n",
-			txq_id, IWL49_FIRST_AMPDU_QUEUE,
-			IWL49_FIRST_AMPDU_QUEUE +
-			priv->cfg->base_params->num_of_ampdu_queues - 1);
-		return -EINVAL;
-	}
-
-	ra_tid = BUILD_RAxTID(sta_id, tid);
-
-	/* Modify device's station table to Tx this TID */
-	ret = iwl4965_sta_tx_modify_enable_tid(priv, sta_id, tid);
-	if (ret)
-		return ret;
-
-	spin_lock_irqsave(&priv->lock, flags);
-
-	/* Stop this Tx queue before configuring it */
-	iwl4965_tx_queue_stop_scheduler(priv, txq_id);
-
-	/* Map receiver-address / traffic-ID to this queue */
-	iwl4965_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
-
-	/* Set this queue as a chain-building queue */
-	iwl_legacy_set_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
-
-	/* Place first TFD at index corresponding to start sequence number.
-	 * Assumes that ssn_idx is valid (!= 0xFFF) */
-	priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
-	priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
-	iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
-
-	/* Set up Tx window size and frame limit for this queue */
-	iwl_legacy_write_targ_mem(priv,
-		priv->scd_base_addr + IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id),
-		(SCD_WIN_SIZE << IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
-		IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
-
-	iwl_legacy_write_targ_mem(priv, priv->scd_base_addr +
-		IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
-		(SCD_FRAME_LIMIT << IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS)
-		& IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
-
-	iwl_legacy_set_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
-
-	/* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
-	iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
-
-	spin_unlock_irqrestore(&priv->lock, flags);
-
-	return 0;
-}
-
-
-int iwl4965_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
-			struct ieee80211_sta *sta, u16 tid, u16 *ssn)
-{
-	int sta_id;
-	int tx_fifo;
-	int txq_id;
-	int ret;
-	unsigned long flags;
-	struct iwl_tid_data *tid_data;
-
-	tx_fifo = iwl4965_get_fifo_from_tid(iwl_legacy_rxon_ctx_from_vif(vif), tid);
-	if (unlikely(tx_fifo < 0))
-		return tx_fifo;
-
-	IWL_WARN(priv, "%s on ra = %pM tid = %d\n",
-			__func__, sta->addr, tid);
-
-	sta_id = iwl_legacy_sta_id(sta);
-	if (sta_id == IWL_INVALID_STATION) {
-		IWL_ERR(priv, "Start AGG on invalid station\n");
-		return -ENXIO;
-	}
-	if (unlikely(tid >= MAX_TID_COUNT))
-		return -EINVAL;
-
-	if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
-		IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
-		return -ENXIO;
-	}
-
-	txq_id = iwl4965_txq_ctx_activate_free(priv);
-	if (txq_id == -1) {
-		IWL_ERR(priv, "No free aggregation queue available\n");
-		return -ENXIO;
-	}
-
-	spin_lock_irqsave(&priv->sta_lock, flags);
-	tid_data = &priv->stations[sta_id].tid[tid];
-	*ssn = SEQ_TO_SN(tid_data->seq_number);
-	tid_data->agg.txq_id = txq_id;
-	iwl_legacy_set_swq_id(&priv->txq[txq_id],
-				iwl4965_get_ac_from_tid(tid), txq_id);
-	spin_unlock_irqrestore(&priv->sta_lock, flags);
-
-	ret = iwl4965_txq_agg_enable(priv, txq_id, tx_fifo,
-						  sta_id, tid, *ssn);
-	if (ret)
-		return ret;
-
-	spin_lock_irqsave(&priv->sta_lock, flags);
-	tid_data = &priv->stations[sta_id].tid[tid];
-	if (tid_data->tfds_in_queue == 0) {
-		IWL_DEBUG_HT(priv, "HW queue is empty\n");
-		tid_data->agg.state = IWL_AGG_ON;
-		ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
-	} else {
-		IWL_DEBUG_HT(priv,
-			"HW queue is NOT empty: %d packets in HW queue\n",
-			     tid_data->tfds_in_queue);
-		tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
-	}
-	spin_unlock_irqrestore(&priv->sta_lock, flags);
-	return ret;
-}
-
-/**
- * txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE
- * priv->lock must be held by the caller
- */
-static int iwl4965_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
-				   u16 ssn_idx, u8 tx_fifo)
-{
-	if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
-	    (IWL49_FIRST_AMPDU_QUEUE +
-		priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
-		IWL_WARN(priv,
-			"queue number out of range: %d, must be %d to %d\n",
-			txq_id, IWL49_FIRST_AMPDU_QUEUE,
-			IWL49_FIRST_AMPDU_QUEUE +
-			priv->cfg->base_params->num_of_ampdu_queues - 1);
-		return -EINVAL;
-	}
-
-	iwl4965_tx_queue_stop_scheduler(priv, txq_id);
-
-	iwl_legacy_clear_bits_prph(priv,
-			IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
-
-	priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
-	priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
-	/* supposes that ssn_idx is valid (!= 0xFFF) */
-	iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
-
-	iwl_legacy_clear_bits_prph(priv,
-			 IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
-	iwl_txq_ctx_deactivate(priv, txq_id);
-	iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
-
-	return 0;
-}
-
-int iwl4965_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
-		       struct ieee80211_sta *sta, u16 tid)
-{
-	int tx_fifo_id, txq_id, sta_id, ssn;
-	struct iwl_tid_data *tid_data;
-	int write_ptr, read_ptr;
-	unsigned long flags;
-
-	tx_fifo_id = iwl4965_get_fifo_from_tid(iwl_legacy_rxon_ctx_from_vif(vif), tid);
-	if (unlikely(tx_fifo_id < 0))
-		return tx_fifo_id;
-
-	sta_id = iwl_legacy_sta_id(sta);
-
-	if (sta_id == IWL_INVALID_STATION) {
-		IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
-		return -ENXIO;
-	}
-
-	spin_lock_irqsave(&priv->sta_lock, flags);
-
-	tid_data = &priv->stations[sta_id].tid[tid];
-	ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
-	txq_id = tid_data->agg.txq_id;
-
-	switch (priv->stations[sta_id].tid[tid].agg.state) {
-	case IWL_EMPTYING_HW_QUEUE_ADDBA:
-		/*
-		 * This can happen if the peer stops aggregation
-		 * again before we've had a chance to drain the
-		 * queue we selected previously, i.e. before the
-		 * session was really started completely.
-		 */
-		IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
-		goto turn_off;
-	case IWL_AGG_ON:
-		break;
-	default:
-		IWL_WARN(priv, "Stopping AGG while state not ON or starting\n");
-	}
-
-	write_ptr = priv->txq[txq_id].q.write_ptr;
-	read_ptr = priv->txq[txq_id].q.read_ptr;
-
-	/* The queue is not empty */
-	if (write_ptr != read_ptr) {
-		IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n");
-		priv->stations[sta_id].tid[tid].agg.state =
-				IWL_EMPTYING_HW_QUEUE_DELBA;
-		spin_unlock_irqrestore(&priv->sta_lock, flags);
-		return 0;
-	}
-
-	IWL_DEBUG_HT(priv, "HW queue is empty\n");
- turn_off:
-	priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
-
-	/* do not restore/save irqs */
-	spin_unlock(&priv->sta_lock);
-	spin_lock(&priv->lock);
-
-	/*
-	 * the only reason this call can fail is queue number out of range,
-	 * which can happen if uCode is reloaded and all the station
-	 * information are lost. if it is outside the range, there is no need
-	 * to deactivate the uCode queue, just return "success" to allow
-	 *  mac80211 to clean up it own data.
-	 */
-	iwl4965_txq_agg_disable(priv, txq_id, ssn, tx_fifo_id);
-	spin_unlock_irqrestore(&priv->lock, flags);
-
-	ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
-
-	return 0;
-}
-
-int iwl4965_txq_check_empty(struct iwl_priv *priv,
-			   int sta_id, u8 tid, int txq_id)
-{
-	struct iwl_queue *q = &priv->txq[txq_id].q;
-	u8 *addr = priv->stations[sta_id].sta.sta.addr;
-	struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
-	struct iwl_rxon_context *ctx;
-
-	ctx = &priv->contexts[priv->stations[sta_id].ctxid];
-
-	lockdep_assert_held(&priv->sta_lock);
-
-	switch (priv->stations[sta_id].tid[tid].agg.state) {
-	case IWL_EMPTYING_HW_QUEUE_DELBA:
-		/* We are reclaiming the last packet of the */
-		/* aggregated HW queue */
-		if ((txq_id  == tid_data->agg.txq_id) &&
-		    (q->read_ptr == q->write_ptr)) {
-			u16 ssn = SEQ_TO_SN(tid_data->seq_number);
-			int tx_fifo = iwl4965_get_fifo_from_tid(ctx, tid);
-			IWL_DEBUG_HT(priv,
-				"HW queue empty: continue DELBA flow\n");
-			iwl4965_txq_agg_disable(priv, txq_id, ssn, tx_fifo);
-			tid_data->agg.state = IWL_AGG_OFF;
-			ieee80211_stop_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
-		}
-		break;
-	case IWL_EMPTYING_HW_QUEUE_ADDBA:
-		/* We are reclaiming the last packet of the queue */
-		if (tid_data->tfds_in_queue == 0) {
-			IWL_DEBUG_HT(priv,
-				"HW queue empty: continue ADDBA flow\n");
-			tid_data->agg.state = IWL_AGG_ON;
-			ieee80211_start_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
-		}
-		break;
-	}
-
-	return 0;
-}
-
-static void iwl4965_non_agg_tx_status(struct iwl_priv *priv,
-				     struct iwl_rxon_context *ctx,
-				     const u8 *addr1)
-{
-	struct ieee80211_sta *sta;
-	struct iwl_station_priv *sta_priv;
-
-	rcu_read_lock();
-	sta = ieee80211_find_sta(ctx->vif, addr1);
-	if (sta) {
-		sta_priv = (void *)sta->drv_priv;
-		/* avoid atomic ops if this isn't a client */
-		if (sta_priv->client &&
-		    atomic_dec_return(&sta_priv->pending_frames) == 0)
-			ieee80211_sta_block_awake(priv->hw, sta, false);
-	}
-	rcu_read_unlock();
-}
-
-static void
-iwl4965_tx_status(struct iwl_priv *priv, struct iwl_tx_info *tx_info,
-			     bool is_agg)
-{
-	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx_info->skb->data;
-
-	if (!is_agg)
-		iwl4965_non_agg_tx_status(priv, tx_info->ctx, hdr->addr1);
-
-	ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb);
-}
-
-int iwl4965_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
-{
-	struct iwl_tx_queue *txq = &priv->txq[txq_id];
-	struct iwl_queue *q = &txq->q;
-	struct iwl_tx_info *tx_info;
-	int nfreed = 0;
-	struct ieee80211_hdr *hdr;
-
-	if ((index >= q->n_bd) || (iwl_legacy_queue_used(q, index) == 0)) {
-		IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
-			  "is out of range [0-%d] %d %d.\n", txq_id,
-			  index, q->n_bd, q->write_ptr, q->read_ptr);
-		return 0;
-	}
-
-	for (index = iwl_legacy_queue_inc_wrap(index, q->n_bd);
-	     q->read_ptr != index;
-	     q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) {
-
-		tx_info = &txq->txb[txq->q.read_ptr];
-
-		if (WARN_ON_ONCE(tx_info->skb == NULL))
-			continue;
-
-		hdr = (struct ieee80211_hdr *)tx_info->skb->data;
-		if (ieee80211_is_data_qos(hdr->frame_control))
-			nfreed++;
-
-		iwl4965_tx_status(priv, tx_info,
-				 txq_id >= IWL4965_FIRST_AMPDU_QUEUE);
-		tx_info->skb = NULL;
-
-		priv->cfg->ops->lib->txq_free_tfd(priv, txq);
-	}
-	return nfreed;
-}
-
-/**
- * iwl4965_tx_status_reply_compressed_ba - Update tx status from block-ack
- *
- * Go through block-ack's bitmap of ACK'd frames, update driver's record of
- * ACK vs. not.  This gets sent to mac80211, then to rate scaling algo.
- */
-static int iwl4965_tx_status_reply_compressed_ba(struct iwl_priv *priv,
-				 struct iwl_ht_agg *agg,
-				 struct iwl_compressed_ba_resp *ba_resp)
-
-{
-	int i, sh, ack;
-	u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
-	u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
-	int successes = 0;
-	struct ieee80211_tx_info *info;
-	u64 bitmap, sent_bitmap;
-
-	if (unlikely(!agg->wait_for_ba))  {
-		if (unlikely(ba_resp->bitmap))
-			IWL_ERR(priv, "Received BA when not expected\n");
-		return -EINVAL;
-	}
-
-	/* Mark that the expected block-ack response arrived */
-	agg->wait_for_ba = 0;
-	IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx,
-							ba_resp->seq_ctl);
-
-	/* Calculate shift to align block-ack bits with our Tx window bits */
-	sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4);
-	if (sh < 0) /* tbw something is wrong with indices */
-		sh += 0x100;
-
-	if (agg->frame_count > (64 - sh)) {
-		IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size");
-		return -1;
-	}
-
-	/* don't use 64-bit values for now */
-	bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
-
-	/* check for success or failure according to the
-	 * transmitted bitmap and block-ack bitmap */
-	sent_bitmap = bitmap & agg->bitmap;
-
-	/* For each frame attempted in aggregation,
-	 * update driver's record of tx frame's status. */
-	i = 0;
-	while (sent_bitmap) {
-		ack = sent_bitmap & 1ULL;
-		successes += ack;
-		IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n",
-			ack ? "ACK" : "NACK", i,
-			(agg->start_idx + i) & 0xff,
-			agg->start_idx + i);
-		sent_bitmap >>= 1;
-		++i;
-	}
-
-	IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n",
-				   (unsigned long long)bitmap);
-
-	info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb);
-	memset(&info->status, 0, sizeof(info->status));
-	info->flags |= IEEE80211_TX_STAT_ACK;
-	info->flags |= IEEE80211_TX_STAT_AMPDU;
-	info->status.ampdu_ack_len = successes;
-	info->status.ampdu_len = agg->frame_count;
-	iwl4965_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
-
-	return 0;
-}
-
-/**
- * translate ucode response to mac80211 tx status control values
- */
-void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
-				  struct ieee80211_tx_info *info)
-{
-	struct ieee80211_tx_rate *r = &info->control.rates[0];
-
-	info->antenna_sel_tx =
-		((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
-	if (rate_n_flags & RATE_MCS_HT_MSK)
-		r->flags |= IEEE80211_TX_RC_MCS;
-	if (rate_n_flags & RATE_MCS_GF_MSK)
-		r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
-	if (rate_n_flags & RATE_MCS_HT40_MSK)
-		r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
-	if (rate_n_flags & RATE_MCS_DUP_MSK)
-		r->flags |= IEEE80211_TX_RC_DUP_DATA;
-	if (rate_n_flags & RATE_MCS_SGI_MSK)
-		r->flags |= IEEE80211_TX_RC_SHORT_GI;
-	r->idx = iwl4965_hwrate_to_mac80211_idx(rate_n_flags, info->band);
-}
-
-/**
- * iwl4965_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
- *
- * Handles block-acknowledge notification from device, which reports success
- * of frames sent via aggregation.
- */
-void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv,
-					   struct iwl_rx_mem_buffer *rxb)
-{
-	struct iwl_rx_packet *pkt = rxb_addr(rxb);
-	struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
-	struct iwl_tx_queue *txq = NULL;
-	struct iwl_ht_agg *agg;
-	int index;
-	int sta_id;
-	int tid;
-	unsigned long flags;
-
-	/* "flow" corresponds to Tx queue */
-	u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
-
-	/* "ssn" is start of block-ack Tx window, corresponds to index
-	 * (in Tx queue's circular buffer) of first TFD/frame in window */
-	u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
-
-	if (scd_flow >= priv->hw_params.max_txq_num) {
-		IWL_ERR(priv,
-			"BUG_ON scd_flow is bigger than number of queues\n");
-		return;
-	}
-
-	txq = &priv->txq[scd_flow];
-	sta_id = ba_resp->sta_id;
-	tid = ba_resp->tid;
-	agg = &priv->stations[sta_id].tid[tid].agg;
-	if (unlikely(agg->txq_id != scd_flow)) {
-		/*
-		 * FIXME: this is a uCode bug which need to be addressed,
-		 * log the information and return for now!
-		 * since it is possible happen very often and in order
-		 * not to fill the syslog, don't enable the logging by default
-		 */
-		IWL_DEBUG_TX_REPLY(priv,
-			"BA scd_flow %d does not match txq_id %d\n",
-			scd_flow, agg->txq_id);
-		return;
-	}
-
-	/* Find index just before block-ack window */
-	index = iwl_legacy_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
-
-	spin_lock_irqsave(&priv->sta_lock, flags);
-
-	IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
-			   "sta_id = %d\n",
-			   agg->wait_for_ba,
-			   (u8 *) &ba_resp->sta_addr_lo32,
-			   ba_resp->sta_id);
-	IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx,"
-			"scd_flow = "
-			   "%d, scd_ssn = %d\n",
-			   ba_resp->tid,
-			   ba_resp->seq_ctl,
-			   (unsigned long long)le64_to_cpu(ba_resp->bitmap),
-			   ba_resp->scd_flow,
-			   ba_resp->scd_ssn);
-	IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx\n",
-			   agg->start_idx,
-			   (unsigned long long)agg->bitmap);
-
-	/* Update driver's record of ACK vs. not for each frame in window */
-	iwl4965_tx_status_reply_compressed_ba(priv, agg, ba_resp);
-
-	/* Release all TFDs before the SSN, i.e. all TFDs in front of
-	 * block-ack window (we assume that they've been successfully
-	 * transmitted ... if not, it's too late anyway). */
-	if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
-		/* calculate mac80211 ampdu sw queue to wake */
-		int freed = iwl4965_tx_queue_reclaim(priv, scd_flow, index);
-		iwl4965_free_tfds_in_queue(priv, sta_id, tid, freed);
-
-		if ((iwl_legacy_queue_space(&txq->q) > txq->q.low_mark) &&
-		    priv->mac80211_registered &&
-		    (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
-			iwl_legacy_wake_queue(priv, txq);
-
-		iwl4965_txq_check_empty(priv, sta_id, tid, scd_flow);
-	}
-
-	spin_unlock_irqrestore(&priv->sta_lock, flags);
-}
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-const char *iwl4965_get_tx_fail_reason(u32 status)
-{
-#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
-#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
-
-	switch (status & TX_STATUS_MSK) {
-	case TX_STATUS_SUCCESS:
-		return "SUCCESS";
-	TX_STATUS_POSTPONE(DELAY);
-	TX_STATUS_POSTPONE(FEW_BYTES);
-	TX_STATUS_POSTPONE(QUIET_PERIOD);
-	TX_STATUS_POSTPONE(CALC_TTAK);
-	TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
-	TX_STATUS_FAIL(SHORT_LIMIT);
-	TX_STATUS_FAIL(LONG_LIMIT);
-	TX_STATUS_FAIL(FIFO_UNDERRUN);
-	TX_STATUS_FAIL(DRAIN_FLOW);
-	TX_STATUS_FAIL(RFKILL_FLUSH);
-	TX_STATUS_FAIL(LIFE_EXPIRE);
-	TX_STATUS_FAIL(DEST_PS);
-	TX_STATUS_FAIL(HOST_ABORTED);
-	TX_STATUS_FAIL(BT_RETRY);
-	TX_STATUS_FAIL(STA_INVALID);
-	TX_STATUS_FAIL(FRAG_DROPPED);
-	TX_STATUS_FAIL(TID_DISABLE);
-	TX_STATUS_FAIL(FIFO_FLUSHED);
-	TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
-	TX_STATUS_FAIL(PASSIVE_NO_RX);
-	TX_STATUS_FAIL(NO_BEACON_ON_RADAR);
-	}
-
-	return "UNKNOWN";
-
-#undef TX_STATUS_FAIL
-#undef TX_STATUS_POSTPONE
-}
-#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-ucode.c b/drivers/net/wireless/iwlegacy/iwl-4965-ucode.c
deleted file mode 100644
index 001d148..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-ucode.c
+++ /dev/null
@@ -1,166 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-#include "iwl-helpers.h"
-#include "iwl-4965-hw.h"
-#include "iwl-4965.h"
-#include "iwl-4965-calib.h"
-
-#define IWL_AC_UNSET -1
-
-/**
- * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host,
- *   using sample data 100 bytes apart.  If these sample points are good,
- *   it's a pretty good bet that everything between them is good, too.
- */
-static int
-iwl4965_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
-{
-	u32 val;
-	int ret = 0;
-	u32 errcnt = 0;
-	u32 i;
-
-	IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
-
-	for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
-		/* read data comes through single port, auto-incr addr */
-		/* NOTE: Use the debugless read so we don't flood kernel log
-		 * if IWL_DL_IO is set */
-		iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
-			i + IWL4965_RTC_INST_LOWER_BOUND);
-		val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
-		if (val != le32_to_cpu(*image)) {
-			ret = -EIO;
-			errcnt++;
-			if (errcnt >= 3)
-				break;
-		}
-	}
-
-	return ret;
-}
-
-/**
- * iwl4965_verify_inst_full - verify runtime uCode image in card vs. host,
- *     looking at all data.
- */
-static int iwl4965_verify_inst_full(struct iwl_priv *priv, __le32 *image,
-				 u32 len)
-{
-	u32 val;
-	u32 save_len = len;
-	int ret = 0;
-	u32 errcnt;
-
-	IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
-
-	iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
-			   IWL4965_RTC_INST_LOWER_BOUND);
-
-	errcnt = 0;
-	for (; len > 0; len -= sizeof(u32), image++) {
-		/* read data comes through single port, auto-incr addr */
-		/* NOTE: Use the debugless read so we don't flood kernel log
-		 * if IWL_DL_IO is set */
-		val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
-		if (val != le32_to_cpu(*image)) {
-			IWL_ERR(priv, "uCode INST section is invalid at "
-				  "offset 0x%x, is 0x%x, s/b 0x%x\n",
-				  save_len - len, val, le32_to_cpu(*image));
-			ret = -EIO;
-			errcnt++;
-			if (errcnt >= 20)
-				break;
-		}
-	}
-
-	if (!errcnt)
-		IWL_DEBUG_INFO(priv,
-		    "ucode image in INSTRUCTION memory is good\n");
-
-	return ret;
-}
-
-/**
- * iwl4965_verify_ucode - determine which instruction image is in SRAM,
- *    and verify its contents
- */
-int iwl4965_verify_ucode(struct iwl_priv *priv)
-{
-	__le32 *image;
-	u32 len;
-	int ret;
-
-	/* Try bootstrap */
-	image = (__le32 *)priv->ucode_boot.v_addr;
-	len = priv->ucode_boot.len;
-	ret = iwl4965_verify_inst_sparse(priv, image, len);
-	if (!ret) {
-		IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n");
-		return 0;
-	}
-
-	/* Try initialize */
-	image = (__le32 *)priv->ucode_init.v_addr;
-	len = priv->ucode_init.len;
-	ret = iwl4965_verify_inst_sparse(priv, image, len);
-	if (!ret) {
-		IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n");
-		return 0;
-	}
-
-	/* Try runtime/protocol */
-	image = (__le32 *)priv->ucode_code.v_addr;
-	len = priv->ucode_code.len;
-	ret = iwl4965_verify_inst_sparse(priv, image, len);
-	if (!ret) {
-		IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n");
-		return 0;
-	}
-
-	IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
-
-	/* Since nothing seems to match, show first several data entries in
-	 * instruction SRAM, so maybe visual inspection will give a clue.
-	 * Selection of bootstrap image (vs. other images) is arbitrary. */
-	image = (__le32 *)priv->ucode_boot.v_addr;
-	len = priv->ucode_boot.len;
-	ret = iwl4965_verify_inst_full(priv, image, len);
-
-	return ret;
-}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965.c b/drivers/net/wireless/iwlegacy/iwl-4965.c
deleted file mode 100644
index 86f4fce..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965.c
+++ /dev/null
@@ -1,2183 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <net/mac80211.h>
-#include <linux/etherdevice.h>
-#include <asm/unaligned.h>
-
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-#include "iwl-helpers.h"
-#include "iwl-4965-calib.h"
-#include "iwl-sta.h"
-#include "iwl-4965-led.h"
-#include "iwl-4965.h"
-#include "iwl-4965-debugfs.h"
-
-static int iwl4965_send_tx_power(struct iwl_priv *priv);
-static int iwl4965_hw_get_temperature(struct iwl_priv *priv);
-
-/* Highest firmware API version supported */
-#define IWL4965_UCODE_API_MAX 2
-
-/* Lowest firmware API version supported */
-#define IWL4965_UCODE_API_MIN 2
-
-#define IWL4965_FW_PRE "iwlwifi-4965-"
-#define _IWL4965_MODULE_FIRMWARE(api) IWL4965_FW_PRE #api ".ucode"
-#define IWL4965_MODULE_FIRMWARE(api) _IWL4965_MODULE_FIRMWARE(api)
-
-/* check contents of special bootstrap uCode SRAM */
-static int iwl4965_verify_bsm(struct iwl_priv *priv)
-{
-	__le32 *image = priv->ucode_boot.v_addr;
-	u32 len = priv->ucode_boot.len;
-	u32 reg;
-	u32 val;
-
-	IWL_DEBUG_INFO(priv, "Begin verify bsm\n");
-
-	/* verify BSM SRAM contents */
-	val = iwl_legacy_read_prph(priv, BSM_WR_DWCOUNT_REG);
-	for (reg = BSM_SRAM_LOWER_BOUND;
-	     reg < BSM_SRAM_LOWER_BOUND + len;
-	     reg += sizeof(u32), image++) {
-		val = iwl_legacy_read_prph(priv, reg);
-		if (val != le32_to_cpu(*image)) {
-			IWL_ERR(priv, "BSM uCode verification failed at "
-				  "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
-				  BSM_SRAM_LOWER_BOUND,
-				  reg - BSM_SRAM_LOWER_BOUND, len,
-				  val, le32_to_cpu(*image));
-			return -EIO;
-		}
-	}
-
-	IWL_DEBUG_INFO(priv, "BSM bootstrap uCode image OK\n");
-
-	return 0;
-}
-
-/**
- * iwl4965_load_bsm - Load bootstrap instructions
- *
- * BSM operation:
- *
- * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
- * in special SRAM that does not power down during RFKILL.  When powering back
- * up after power-saving sleeps (or during initial uCode load), the BSM loads
- * the bootstrap program into the on-board processor, and starts it.
- *
- * The bootstrap program loads (via DMA) instructions and data for a new
- * program from host DRAM locations indicated by the host driver in the
- * BSM_DRAM_* registers.  Once the new program is loaded, it starts
- * automatically.
- *
- * When initializing the NIC, the host driver points the BSM to the
- * "initialize" uCode image.  This uCode sets up some internal data, then
- * notifies host via "initialize alive" that it is complete.
- *
- * The host then replaces the BSM_DRAM_* pointer values to point to the
- * normal runtime uCode instructions and a backup uCode data cache buffer
- * (filled initially with starting data values for the on-board processor),
- * then triggers the "initialize" uCode to load and launch the runtime uCode,
- * which begins normal operation.
- *
- * When doing a power-save shutdown, runtime uCode saves data SRAM into
- * the backup data cache in DRAM before SRAM is powered down.
- *
- * When powering back up, the BSM loads the bootstrap program.  This reloads
- * the runtime uCode instructions and the backup data cache into SRAM,
- * and re-launches the runtime uCode from where it left off.
- */
-static int iwl4965_load_bsm(struct iwl_priv *priv)
-{
-	__le32 *image = priv->ucode_boot.v_addr;
-	u32 len = priv->ucode_boot.len;
-	dma_addr_t pinst;
-	dma_addr_t pdata;
-	u32 inst_len;
-	u32 data_len;
-	int i;
-	u32 done;
-	u32 reg_offset;
-	int ret;
-
-	IWL_DEBUG_INFO(priv, "Begin load bsm\n");
-
-	priv->ucode_type = UCODE_RT;
-
-	/* make sure bootstrap program is no larger than BSM's SRAM size */
-	if (len > IWL49_MAX_BSM_SIZE)
-		return -EINVAL;
-
-	/* Tell bootstrap uCode where to find the "Initialize" uCode
-	 *   in host DRAM ... host DRAM physical address bits 35:4 for 4965.
-	 * NOTE:  iwl_init_alive_start() will replace these values,
-	 *        after the "initialize" uCode has run, to point to
-	 *        runtime/protocol instructions and backup data cache.
-	 */
-	pinst = priv->ucode_init.p_addr >> 4;
-	pdata = priv->ucode_init_data.p_addr >> 4;
-	inst_len = priv->ucode_init.len;
-	data_len = priv->ucode_init_data.len;
-
-	iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
-	iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
-	iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
-	iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
-
-	/* Fill BSM memory with bootstrap instructions */
-	for (reg_offset = BSM_SRAM_LOWER_BOUND;
-	     reg_offset < BSM_SRAM_LOWER_BOUND + len;
-	     reg_offset += sizeof(u32), image++)
-		_iwl_legacy_write_prph(priv, reg_offset, le32_to_cpu(*image));
-
-	ret = iwl4965_verify_bsm(priv);
-	if (ret)
-		return ret;
-
-	/* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
-	iwl_legacy_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
-	iwl_legacy_write_prph(priv,
-			BSM_WR_MEM_DST_REG, IWL49_RTC_INST_LOWER_BOUND);
-	iwl_legacy_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
-
-	/* Load bootstrap code into instruction SRAM now,
-	 *   to prepare to load "initialize" uCode */
-	iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
-
-	/* Wait for load of bootstrap uCode to finish */
-	for (i = 0; i < 100; i++) {
-		done = iwl_legacy_read_prph(priv, BSM_WR_CTRL_REG);
-		if (!(done & BSM_WR_CTRL_REG_BIT_START))
-			break;
-		udelay(10);
-	}
-	if (i < 100)
-		IWL_DEBUG_INFO(priv, "BSM write complete, poll %d iterations\n", i);
-	else {
-		IWL_ERR(priv, "BSM write did not complete!\n");
-		return -EIO;
-	}
-
-	/* Enable future boot loads whenever power management unit triggers it
-	 *   (e.g. when powering back up after power-save shutdown) */
-	iwl_legacy_write_prph(priv,
-			BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
-
-
-	return 0;
-}
-
-/**
- * iwl4965_set_ucode_ptrs - Set uCode address location
- *
- * Tell initialization uCode where to find runtime uCode.
- *
- * BSM registers initially contain pointers to initialization uCode.
- * We need to replace them to load runtime uCode inst and data,
- * and to save runtime data when powering down.
- */
-static int iwl4965_set_ucode_ptrs(struct iwl_priv *priv)
-{
-	dma_addr_t pinst;
-	dma_addr_t pdata;
-	int ret = 0;
-
-	/* bits 35:4 for 4965 */
-	pinst = priv->ucode_code.p_addr >> 4;
-	pdata = priv->ucode_data_backup.p_addr >> 4;
-
-	/* Tell bootstrap uCode where to find image to load */
-	iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
-	iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
-	iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
-				 priv->ucode_data.len);
-
-	/* Inst byte count must be last to set up, bit 31 signals uCode
-	 *   that all new ptr/size info is in place */
-	iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
-				 priv->ucode_code.len | BSM_DRAM_INST_LOAD);
-	IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n");
-
-	return ret;
-}
-
-/**
- * iwl4965_init_alive_start - Called after REPLY_ALIVE notification received
- *
- * Called after REPLY_ALIVE notification received from "initialize" uCode.
- *
- * The 4965 "initialize" ALIVE reply contains calibration data for:
- *   Voltage, temperature, and MIMO tx gain correction, now stored in priv
- *   (3945 does not contain this data).
- *
- * Tell "initialize" uCode to go ahead and load the runtime uCode.
-*/
-static void iwl4965_init_alive_start(struct iwl_priv *priv)
-{
-	/* Bootstrap uCode has loaded initialize uCode ... verify inst image.
-	 * This is a paranoid check, because we would not have gotten the
-	 * "initialize" alive if code weren't properly loaded.  */
-	if (iwl4965_verify_ucode(priv)) {
-		/* Runtime instruction load was bad;
-		 * take it all the way back down so we can try again */
-		IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n");
-		goto restart;
-	}
-
-	/* Calculate temperature */
-	priv->temperature = iwl4965_hw_get_temperature(priv);
-
-	/* Send pointers to protocol/runtime uCode image ... init code will
-	 * load and launch runtime uCode, which will send us another "Alive"
-	 * notification. */
-	IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
-	if (iwl4965_set_ucode_ptrs(priv)) {
-		/* Runtime instruction load won't happen;
-		 * take it all the way back down so we can try again */
-		IWL_DEBUG_INFO(priv, "Couldn't set up uCode pointers.\n");
-		goto restart;
-	}
-	return;
-
-restart:
-	queue_work(priv->workqueue, &priv->restart);
-}
-
-static bool iw4965_is_ht40_channel(__le32 rxon_flags)
-{
-	int chan_mod = le32_to_cpu(rxon_flags & RXON_FLG_CHANNEL_MODE_MSK)
-				    >> RXON_FLG_CHANNEL_MODE_POS;
-	return ((chan_mod == CHANNEL_MODE_PURE_40) ||
-		  (chan_mod == CHANNEL_MODE_MIXED));
-}
-
-static void iwl4965_nic_config(struct iwl_priv *priv)
-{
-	unsigned long flags;
-	u16 radio_cfg;
-
-	spin_lock_irqsave(&priv->lock, flags);
-
-	radio_cfg = iwl_legacy_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
-
-	/* write radio config values to register */
-	if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) == EEPROM_4965_RF_CFG_TYPE_MAX)
-		iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
-			    EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
-			    EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
-			    EEPROM_RF_CFG_DASH_MSK(radio_cfg));
-
-	/* set CSR_HW_CONFIG_REG for uCode use */
-	iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
-		    CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
-		    CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
-
-	priv->calib_info = (struct iwl_eeprom_calib_info *)
-		iwl_legacy_eeprom_query_addr(priv,
-				EEPROM_4965_CALIB_TXPOWER_OFFSET);
-
-	spin_unlock_irqrestore(&priv->lock, flags);
-}
-
-/* Reset differential Rx gains in NIC to prepare for chain noise calibration.
- * Called after every association, but this runs only once!
- *  ... once chain noise is calibrated the first time, it's good forever.  */
-static void iwl4965_chain_noise_reset(struct iwl_priv *priv)
-{
-	struct iwl_chain_noise_data *data = &(priv->chain_noise_data);
-
-	if ((data->state == IWL_CHAIN_NOISE_ALIVE) &&
-	    iwl_legacy_is_any_associated(priv)) {
-		struct iwl_calib_diff_gain_cmd cmd;
-
-		/* clear data for chain noise calibration algorithm */
-		data->chain_noise_a = 0;
-		data->chain_noise_b = 0;
-		data->chain_noise_c = 0;
-		data->chain_signal_a = 0;
-		data->chain_signal_b = 0;
-		data->chain_signal_c = 0;
-		data->beacon_count = 0;
-
-		memset(&cmd, 0, sizeof(cmd));
-		cmd.hdr.op_code = IWL_PHY_CALIBRATE_DIFF_GAIN_CMD;
-		cmd.diff_gain_a = 0;
-		cmd.diff_gain_b = 0;
-		cmd.diff_gain_c = 0;
-		if (iwl_legacy_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
-				 sizeof(cmd), &cmd))
-			IWL_ERR(priv,
-				"Could not send REPLY_PHY_CALIBRATION_CMD\n");
-		data->state = IWL_CHAIN_NOISE_ACCUMULATE;
-		IWL_DEBUG_CALIB(priv, "Run chain_noise_calibrate\n");
-	}
-}
-
-static struct iwl_sensitivity_ranges iwl4965_sensitivity = {
-	.min_nrg_cck = 97,
-	.max_nrg_cck = 0, /* not used, set to 0 */
-
-	.auto_corr_min_ofdm = 85,
-	.auto_corr_min_ofdm_mrc = 170,
-	.auto_corr_min_ofdm_x1 = 105,
-	.auto_corr_min_ofdm_mrc_x1 = 220,
-
-	.auto_corr_max_ofdm = 120,
-	.auto_corr_max_ofdm_mrc = 210,
-	.auto_corr_max_ofdm_x1 = 140,
-	.auto_corr_max_ofdm_mrc_x1 = 270,
-
-	.auto_corr_min_cck = 125,
-	.auto_corr_max_cck = 200,
-	.auto_corr_min_cck_mrc = 200,
-	.auto_corr_max_cck_mrc = 400,
-
-	.nrg_th_cck = 100,
-	.nrg_th_ofdm = 100,
-
-	.barker_corr_th_min = 190,
-	.barker_corr_th_min_mrc = 390,
-	.nrg_th_cca = 62,
-};
-
-static void iwl4965_set_ct_threshold(struct iwl_priv *priv)
-{
-	/* want Kelvin */
-	priv->hw_params.ct_kill_threshold =
-		CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY);
-}
-
-/**
- * iwl4965_hw_set_hw_params
- *
- * Called when initializing driver
- */
-static int iwl4965_hw_set_hw_params(struct iwl_priv *priv)
-{
-	if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
-	    priv->cfg->mod_params->num_of_queues <= IWL49_NUM_QUEUES)
-		priv->cfg->base_params->num_of_queues =
-			priv->cfg->mod_params->num_of_queues;
-
-	priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
-	priv->hw_params.dma_chnl_num = FH49_TCSR_CHNL_NUM;
-	priv->hw_params.scd_bc_tbls_size =
-			priv->cfg->base_params->num_of_queues *
-			sizeof(struct iwl4965_scd_bc_tbl);
-	priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
-	priv->hw_params.max_stations = IWL4965_STATION_COUNT;
-	priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWL4965_BROADCAST_ID;
-	priv->hw_params.max_data_size = IWL49_RTC_DATA_SIZE;
-	priv->hw_params.max_inst_size = IWL49_RTC_INST_SIZE;
-	priv->hw_params.max_bsm_size = BSM_SRAM_SIZE;
-	priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_5GHZ);
-
-	priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
-
-	priv->hw_params.tx_chains_num = iwl4965_num_of_ant(priv->cfg->valid_tx_ant);
-	priv->hw_params.rx_chains_num = iwl4965_num_of_ant(priv->cfg->valid_rx_ant);
-	priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
-	priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
-
-	iwl4965_set_ct_threshold(priv);
-
-	priv->hw_params.sens = &iwl4965_sensitivity;
-	priv->hw_params.beacon_time_tsf_bits = IWL4965_EXT_BEACON_TIME_POS;
-
-	return 0;
-}
-
-static s32 iwl4965_math_div_round(s32 num, s32 denom, s32 *res)
-{
-	s32 sign = 1;
-
-	if (num < 0) {
-		sign = -sign;
-		num = -num;
-	}
-	if (denom < 0) {
-		sign = -sign;
-		denom = -denom;
-	}
-	*res = 1;
-	*res = ((num * 2 + denom) / (denom * 2)) * sign;
-
-	return 1;
-}
-
-/**
- * iwl4965_get_voltage_compensation - Power supply voltage comp for txpower
- *
- * Determines power supply voltage compensation for txpower calculations.
- * Returns number of 1/2-dB steps to subtract from gain table index,
- * to compensate for difference between power supply voltage during
- * factory measurements, vs. current power supply voltage.
- *
- * Voltage indication is higher for lower voltage.
- * Lower voltage requires more gain (lower gain table index).
- */
-static s32 iwl4965_get_voltage_compensation(s32 eeprom_voltage,
-					    s32 current_voltage)
-{
-	s32 comp = 0;
-
-	if ((TX_POWER_IWL_ILLEGAL_VOLTAGE == eeprom_voltage) ||
-	    (TX_POWER_IWL_ILLEGAL_VOLTAGE == current_voltage))
-		return 0;
-
-	iwl4965_math_div_round(current_voltage - eeprom_voltage,
-			       TX_POWER_IWL_VOLTAGE_CODES_PER_03V, &comp);
-
-	if (current_voltage > eeprom_voltage)
-		comp *= 2;
-	if ((comp < -2) || (comp > 2))
-		comp = 0;
-
-	return comp;
-}
-
-static s32 iwl4965_get_tx_atten_grp(u16 channel)
-{
-	if (channel >= CALIB_IWL_TX_ATTEN_GR5_FCH &&
-	    channel <= CALIB_IWL_TX_ATTEN_GR5_LCH)
-		return CALIB_CH_GROUP_5;
-
-	if (channel >= CALIB_IWL_TX_ATTEN_GR1_FCH &&
-	    channel <= CALIB_IWL_TX_ATTEN_GR1_LCH)
-		return CALIB_CH_GROUP_1;
-
-	if (channel >= CALIB_IWL_TX_ATTEN_GR2_FCH &&
-	    channel <= CALIB_IWL_TX_ATTEN_GR2_LCH)
-		return CALIB_CH_GROUP_2;
-
-	if (channel >= CALIB_IWL_TX_ATTEN_GR3_FCH &&
-	    channel <= CALIB_IWL_TX_ATTEN_GR3_LCH)
-		return CALIB_CH_GROUP_3;
-
-	if (channel >= CALIB_IWL_TX_ATTEN_GR4_FCH &&
-	    channel <= CALIB_IWL_TX_ATTEN_GR4_LCH)
-		return CALIB_CH_GROUP_4;
-
-	return -EINVAL;
-}
-
-static u32 iwl4965_get_sub_band(const struct iwl_priv *priv, u32 channel)
-{
-	s32 b = -1;
-
-	for (b = 0; b < EEPROM_TX_POWER_BANDS; b++) {
-		if (priv->calib_info->band_info[b].ch_from == 0)
-			continue;
-
-		if ((channel >= priv->calib_info->band_info[b].ch_from)
-		    && (channel <= priv->calib_info->band_info[b].ch_to))
-			break;
-	}
-
-	return b;
-}
-
-static s32 iwl4965_interpolate_value(s32 x, s32 x1, s32 y1, s32 x2, s32 y2)
-{
-	s32 val;
-
-	if (x2 == x1)
-		return y1;
-	else {
-		iwl4965_math_div_round((x2 - x) * (y1 - y2), (x2 - x1), &val);
-		return val + y2;
-	}
-}
-
-/**
- * iwl4965_interpolate_chan - Interpolate factory measurements for one channel
- *
- * Interpolates factory measurements from the two sample channels within a
- * sub-band, to apply to channel of interest.  Interpolation is proportional to
- * differences in channel frequencies, which is proportional to differences
- * in channel number.
- */
-static int iwl4965_interpolate_chan(struct iwl_priv *priv, u32 channel,
-				    struct iwl_eeprom_calib_ch_info *chan_info)
-{
-	s32 s = -1;
-	u32 c;
-	u32 m;
-	const struct iwl_eeprom_calib_measure *m1;
-	const struct iwl_eeprom_calib_measure *m2;
-	struct iwl_eeprom_calib_measure *omeas;
-	u32 ch_i1;
-	u32 ch_i2;
-
-	s = iwl4965_get_sub_band(priv, channel);
-	if (s >= EEPROM_TX_POWER_BANDS) {
-		IWL_ERR(priv, "Tx Power can not find channel %d\n", channel);
-		return -1;
-	}
-
-	ch_i1 = priv->calib_info->band_info[s].ch1.ch_num;
-	ch_i2 = priv->calib_info->band_info[s].ch2.ch_num;
-	chan_info->ch_num = (u8) channel;
-
-	IWL_DEBUG_TXPOWER(priv, "channel %d subband %d factory cal ch %d & %d\n",
-			  channel, s, ch_i1, ch_i2);
-
-	for (c = 0; c < EEPROM_TX_POWER_TX_CHAINS; c++) {
-		for (m = 0; m < EEPROM_TX_POWER_MEASUREMENTS; m++) {
-			m1 = &(priv->calib_info->band_info[s].ch1.
-			       measurements[c][m]);
-			m2 = &(priv->calib_info->band_info[s].ch2.
-			       measurements[c][m]);
-			omeas = &(chan_info->measurements[c][m]);
-
-			omeas->actual_pow =
-			    (u8) iwl4965_interpolate_value(channel, ch_i1,
-							   m1->actual_pow,
-							   ch_i2,
-							   m2->actual_pow);
-			omeas->gain_idx =
-			    (u8) iwl4965_interpolate_value(channel, ch_i1,
-							   m1->gain_idx, ch_i2,
-							   m2->gain_idx);
-			omeas->temperature =
-			    (u8) iwl4965_interpolate_value(channel, ch_i1,
-							   m1->temperature,
-							   ch_i2,
-							   m2->temperature);
-			omeas->pa_det =
-			    (s8) iwl4965_interpolate_value(channel, ch_i1,
-							   m1->pa_det, ch_i2,
-							   m2->pa_det);
-
-			IWL_DEBUG_TXPOWER(priv,
-				"chain %d meas %d AP1=%d AP2=%d AP=%d\n", c, m,
-				m1->actual_pow, m2->actual_pow, omeas->actual_pow);
-			IWL_DEBUG_TXPOWER(priv,
-				"chain %d meas %d NI1=%d NI2=%d NI=%d\n", c, m,
-				m1->gain_idx, m2->gain_idx, omeas->gain_idx);
-			IWL_DEBUG_TXPOWER(priv,
-				"chain %d meas %d PA1=%d PA2=%d PA=%d\n", c, m,
-				m1->pa_det, m2->pa_det, omeas->pa_det);
-			IWL_DEBUG_TXPOWER(priv,
-				"chain %d meas %d  T1=%d  T2=%d  T=%d\n", c, m,
-				m1->temperature, m2->temperature,
-				omeas->temperature);
-		}
-	}
-
-	return 0;
-}
-
-/* bit-rate-dependent table to prevent Tx distortion, in half-dB units,
- * for OFDM 6, 12, 18, 24, 36, 48, 54, 60 MBit, and CCK all rates. */
-static s32 back_off_table[] = {
-	10, 10, 10, 10, 10, 15, 17, 20,	/* OFDM SISO 20 MHz */
-	10, 10, 10, 10, 10, 15, 17, 20,	/* OFDM MIMO 20 MHz */
-	10, 10, 10, 10, 10, 15, 17, 20,	/* OFDM SISO 40 MHz */
-	10, 10, 10, 10, 10, 15, 17, 20,	/* OFDM MIMO 40 MHz */
-	10			/* CCK */
-};
-
-/* Thermal compensation values for txpower for various frequency ranges ...
- *   ratios from 3:1 to 4.5:1 of degrees (Celsius) per half-dB gain adjust */
-static struct iwl4965_txpower_comp_entry {
-	s32 degrees_per_05db_a;
-	s32 degrees_per_05db_a_denom;
-} tx_power_cmp_tble[CALIB_CH_GROUP_MAX] = {
-	{9, 2},			/* group 0 5.2, ch  34-43 */
-	{4, 1},			/* group 1 5.2, ch  44-70 */
-	{4, 1},			/* group 2 5.2, ch  71-124 */
-	{4, 1},			/* group 3 5.2, ch 125-200 */
-	{3, 1}			/* group 4 2.4, ch   all */
-};
-
-static s32 get_min_power_index(s32 rate_power_index, u32 band)
-{
-	if (!band) {
-		if ((rate_power_index & 7) <= 4)
-			return MIN_TX_GAIN_INDEX_52GHZ_EXT;
-	}
-	return MIN_TX_GAIN_INDEX;
-}
-
-struct gain_entry {
-	u8 dsp;
-	u8 radio;
-};
-
-static const struct gain_entry gain_table[2][108] = {
-	/* 5.2GHz power gain index table */
-	{
-	 {123, 0x3F},		/* highest txpower */
-	 {117, 0x3F},
-	 {110, 0x3F},
-	 {104, 0x3F},
-	 {98, 0x3F},
-	 {110, 0x3E},
-	 {104, 0x3E},
-	 {98, 0x3E},
-	 {110, 0x3D},
-	 {104, 0x3D},
-	 {98, 0x3D},
-	 {110, 0x3C},
-	 {104, 0x3C},
-	 {98, 0x3C},
-	 {110, 0x3B},
-	 {104, 0x3B},
-	 {98, 0x3B},
-	 {110, 0x3A},
-	 {104, 0x3A},
-	 {98, 0x3A},
-	 {110, 0x39},
-	 {104, 0x39},
-	 {98, 0x39},
-	 {110, 0x38},
-	 {104, 0x38},
-	 {98, 0x38},
-	 {110, 0x37},
-	 {104, 0x37},
-	 {98, 0x37},
-	 {110, 0x36},
-	 {104, 0x36},
-	 {98, 0x36},
-	 {110, 0x35},
-	 {104, 0x35},
-	 {98, 0x35},
-	 {110, 0x34},
-	 {104, 0x34},
-	 {98, 0x34},
-	 {110, 0x33},
-	 {104, 0x33},
-	 {98, 0x33},
-	 {110, 0x32},
-	 {104, 0x32},
-	 {98, 0x32},
-	 {110, 0x31},
-	 {104, 0x31},
-	 {98, 0x31},
-	 {110, 0x30},
-	 {104, 0x30},
-	 {98, 0x30},
-	 {110, 0x25},
-	 {104, 0x25},
-	 {98, 0x25},
-	 {110, 0x24},
-	 {104, 0x24},
-	 {98, 0x24},
-	 {110, 0x23},
-	 {104, 0x23},
-	 {98, 0x23},
-	 {110, 0x22},
-	 {104, 0x18},
-	 {98, 0x18},
-	 {110, 0x17},
-	 {104, 0x17},
-	 {98, 0x17},
-	 {110, 0x16},
-	 {104, 0x16},
-	 {98, 0x16},
-	 {110, 0x15},
-	 {104, 0x15},
-	 {98, 0x15},
-	 {110, 0x14},
-	 {104, 0x14},
-	 {98, 0x14},
-	 {110, 0x13},
-	 {104, 0x13},
-	 {98, 0x13},
-	 {110, 0x12},
-	 {104, 0x08},
-	 {98, 0x08},
-	 {110, 0x07},
-	 {104, 0x07},
-	 {98, 0x07},
-	 {110, 0x06},
-	 {104, 0x06},
-	 {98, 0x06},
-	 {110, 0x05},
-	 {104, 0x05},
-	 {98, 0x05},
-	 {110, 0x04},
-	 {104, 0x04},
-	 {98, 0x04},
-	 {110, 0x03},
-	 {104, 0x03},
-	 {98, 0x03},
-	 {110, 0x02},
-	 {104, 0x02},
-	 {98, 0x02},
-	 {110, 0x01},
-	 {104, 0x01},
-	 {98, 0x01},
-	 {110, 0x00},
-	 {104, 0x00},
-	 {98, 0x00},
-	 {93, 0x00},
-	 {88, 0x00},
-	 {83, 0x00},
-	 {78, 0x00},
-	 },
-	/* 2.4GHz power gain index table */
-	{
-	 {110, 0x3f},		/* highest txpower */
-	 {104, 0x3f},
-	 {98, 0x3f},
-	 {110, 0x3e},
-	 {104, 0x3e},
-	 {98, 0x3e},
-	 {110, 0x3d},
-	 {104, 0x3d},
-	 {98, 0x3d},
-	 {110, 0x3c},
-	 {104, 0x3c},
-	 {98, 0x3c},
-	 {110, 0x3b},
-	 {104, 0x3b},
-	 {98, 0x3b},
-	 {110, 0x3a},
-	 {104, 0x3a},
-	 {98, 0x3a},
-	 {110, 0x39},
-	 {104, 0x39},
-	 {98, 0x39},
-	 {110, 0x38},
-	 {104, 0x38},
-	 {98, 0x38},
-	 {110, 0x37},
-	 {104, 0x37},
-	 {98, 0x37},
-	 {110, 0x36},
-	 {104, 0x36},
-	 {98, 0x36},
-	 {110, 0x35},
-	 {104, 0x35},
-	 {98, 0x35},
-	 {110, 0x34},
-	 {104, 0x34},
-	 {98, 0x34},
-	 {110, 0x33},
-	 {104, 0x33},
-	 {98, 0x33},
-	 {110, 0x32},
-	 {104, 0x32},
-	 {98, 0x32},
-	 {110, 0x31},
-	 {104, 0x31},
-	 {98, 0x31},
-	 {110, 0x30},
-	 {104, 0x30},
-	 {98, 0x30},
-	 {110, 0x6},
-	 {104, 0x6},
-	 {98, 0x6},
-	 {110, 0x5},
-	 {104, 0x5},
-	 {98, 0x5},
-	 {110, 0x4},
-	 {104, 0x4},
-	 {98, 0x4},
-	 {110, 0x3},
-	 {104, 0x3},
-	 {98, 0x3},
-	 {110, 0x2},
-	 {104, 0x2},
-	 {98, 0x2},
-	 {110, 0x1},
-	 {104, 0x1},
-	 {98, 0x1},
-	 {110, 0x0},
-	 {104, 0x0},
-	 {98, 0x0},
-	 {97, 0},
-	 {96, 0},
-	 {95, 0},
-	 {94, 0},
-	 {93, 0},
-	 {92, 0},
-	 {91, 0},
-	 {90, 0},
-	 {89, 0},
-	 {88, 0},
-	 {87, 0},
-	 {86, 0},
-	 {85, 0},
-	 {84, 0},
-	 {83, 0},
-	 {82, 0},
-	 {81, 0},
-	 {80, 0},
-	 {79, 0},
-	 {78, 0},
-	 {77, 0},
-	 {76, 0},
-	 {75, 0},
-	 {74, 0},
-	 {73, 0},
-	 {72, 0},
-	 {71, 0},
-	 {70, 0},
-	 {69, 0},
-	 {68, 0},
-	 {67, 0},
-	 {66, 0},
-	 {65, 0},
-	 {64, 0},
-	 {63, 0},
-	 {62, 0},
-	 {61, 0},
-	 {60, 0},
-	 {59, 0},
-	 }
-};
-
-static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
-				    u8 is_ht40, u8 ctrl_chan_high,
-				    struct iwl4965_tx_power_db *tx_power_tbl)
-{
-	u8 saturation_power;
-	s32 target_power;
-	s32 user_target_power;
-	s32 power_limit;
-	s32 current_temp;
-	s32 reg_limit;
-	s32 current_regulatory;
-	s32 txatten_grp = CALIB_CH_GROUP_MAX;
-	int i;
-	int c;
-	const struct iwl_channel_info *ch_info = NULL;
-	struct iwl_eeprom_calib_ch_info ch_eeprom_info;
-	const struct iwl_eeprom_calib_measure *measurement;
-	s16 voltage;
-	s32 init_voltage;
-	s32 voltage_compensation;
-	s32 degrees_per_05db_num;
-	s32 degrees_per_05db_denom;
-	s32 factory_temp;
-	s32 temperature_comp[2];
-	s32 factory_gain_index[2];
-	s32 factory_actual_pwr[2];
-	s32 power_index;
-
-	/* tx_power_user_lmt is in dBm, convert to half-dBm (half-dB units
-	 *   are used for indexing into txpower table) */
-	user_target_power = 2 * priv->tx_power_user_lmt;
-
-	/* Get current (RXON) channel, band, width */
-	IWL_DEBUG_TXPOWER(priv, "chan %d band %d is_ht40 %d\n", channel, band,
-			  is_ht40);
-
-	ch_info = iwl_legacy_get_channel_info(priv, priv->band, channel);
-
-	if (!iwl_legacy_is_channel_valid(ch_info))
-		return -EINVAL;
-
-	/* get txatten group, used to select 1) thermal txpower adjustment
-	 *   and 2) mimo txpower balance between Tx chains. */
-	txatten_grp = iwl4965_get_tx_atten_grp(channel);
-	if (txatten_grp < 0) {
-		IWL_ERR(priv, "Can't find txatten group for channel %d.\n",
-			  channel);
-		return txatten_grp;
-	}
-
-	IWL_DEBUG_TXPOWER(priv, "channel %d belongs to txatten group %d\n",
-			  channel, txatten_grp);
-
-	if (is_ht40) {
-		if (ctrl_chan_high)
-			channel -= 2;
-		else
-			channel += 2;
-	}
-
-	/* hardware txpower limits ...
-	 * saturation (clipping distortion) txpowers are in half-dBm */
-	if (band)
-		saturation_power = priv->calib_info->saturation_power24;
-	else
-		saturation_power = priv->calib_info->saturation_power52;
-
-	if (saturation_power < IWL_TX_POWER_SATURATION_MIN ||
-	    saturation_power > IWL_TX_POWER_SATURATION_MAX) {
-		if (band)
-			saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_24;
-		else
-			saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_52;
-	}
-
-	/* regulatory txpower limits ... reg_limit values are in half-dBm,
-	 *   max_power_avg values are in dBm, convert * 2 */
-	if (is_ht40)
-		reg_limit = ch_info->ht40_max_power_avg * 2;
-	else
-		reg_limit = ch_info->max_power_avg * 2;
-
-	if ((reg_limit < IWL_TX_POWER_REGULATORY_MIN) ||
-	    (reg_limit > IWL_TX_POWER_REGULATORY_MAX)) {
-		if (band)
-			reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_24;
-		else
-			reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_52;
-	}
-
-	/* Interpolate txpower calibration values for this channel,
-	 *   based on factory calibration tests on spaced channels. */
-	iwl4965_interpolate_chan(priv, channel, &ch_eeprom_info);
-
-	/* calculate tx gain adjustment based on power supply voltage */
-	voltage = le16_to_cpu(priv->calib_info->voltage);
-	init_voltage = (s32)le32_to_cpu(priv->card_alive_init.voltage);
-	voltage_compensation =
-	    iwl4965_get_voltage_compensation(voltage, init_voltage);
-
-	IWL_DEBUG_TXPOWER(priv, "curr volt %d eeprom volt %d volt comp %d\n",
-			  init_voltage,
-			  voltage, voltage_compensation);
-
-	/* get current temperature (Celsius) */
-	current_temp = max(priv->temperature, IWL_TX_POWER_TEMPERATURE_MIN);
-	current_temp = min(priv->temperature, IWL_TX_POWER_TEMPERATURE_MAX);
-	current_temp = KELVIN_TO_CELSIUS(current_temp);
-
-	/* select thermal txpower adjustment params, based on channel group
-	 *   (same frequency group used for mimo txatten adjustment) */
-	degrees_per_05db_num =
-	    tx_power_cmp_tble[txatten_grp].degrees_per_05db_a;
-	degrees_per_05db_denom =
-	    tx_power_cmp_tble[txatten_grp].degrees_per_05db_a_denom;
-
-	/* get per-chain txpower values from factory measurements */
-	for (c = 0; c < 2; c++) {
-		measurement = &ch_eeprom_info.measurements[c][1];
-
-		/* txgain adjustment (in half-dB steps) based on difference
-		 *   between factory and current temperature */
-		factory_temp = measurement->temperature;
-		iwl4965_math_div_round((current_temp - factory_temp) *
-				       degrees_per_05db_denom,
-				       degrees_per_05db_num,
-				       &temperature_comp[c]);
-
-		factory_gain_index[c] = measurement->gain_idx;
-		factory_actual_pwr[c] = measurement->actual_pow;
-
-		IWL_DEBUG_TXPOWER(priv, "chain = %d\n", c);
-		IWL_DEBUG_TXPOWER(priv, "fctry tmp %d, "
-				  "curr tmp %d, comp %d steps\n",
-				  factory_temp, current_temp,
-				  temperature_comp[c]);
-
-		IWL_DEBUG_TXPOWER(priv, "fctry idx %d, fctry pwr %d\n",
-				  factory_gain_index[c],
-				  factory_actual_pwr[c]);
-	}
-
-	/* for each of 33 bit-rates (including 1 for CCK) */
-	for (i = 0; i < POWER_TABLE_NUM_ENTRIES; i++) {
-		u8 is_mimo_rate;
-		union iwl4965_tx_power_dual_stream tx_power;
-
-		/* for mimo, reduce each chain's txpower by half
-		 * (3dB, 6 steps), so total output power is regulatory
-		 * compliant. */
-		if (i & 0x8) {
-			current_regulatory = reg_limit -
-			    IWL_TX_POWER_MIMO_REGULATORY_COMPENSATION;
-			is_mimo_rate = 1;
-		} else {
-			current_regulatory = reg_limit;
-			is_mimo_rate = 0;
-		}
-
-		/* find txpower limit, either hardware or regulatory */
-		power_limit = saturation_power - back_off_table[i];
-		if (power_limit > current_regulatory)
-			power_limit = current_regulatory;
-
-		/* reduce user's txpower request if necessary
-		 * for this rate on this channel */
-		target_power = user_target_power;
-		if (target_power > power_limit)
-			target_power = power_limit;
-
-		IWL_DEBUG_TXPOWER(priv, "rate %d sat %d reg %d usr %d tgt %d\n",
-				  i, saturation_power - back_off_table[i],
-				  current_regulatory, user_target_power,
-				  target_power);
-
-		/* for each of 2 Tx chains (radio transmitters) */
-		for (c = 0; c < 2; c++) {
-			s32 atten_value;
-
-			if (is_mimo_rate)
-				atten_value =
-				    (s32)le32_to_cpu(priv->card_alive_init.
-				    tx_atten[txatten_grp][c]);
-			else
-				atten_value = 0;
-
-			/* calculate index; higher index means lower txpower */
-			power_index = (u8) (factory_gain_index[c] -
-					    (target_power -
-					     factory_actual_pwr[c]) -
-					    temperature_comp[c] -
-					    voltage_compensation +
-					    atten_value);
-
-/*			IWL_DEBUG_TXPOWER(priv, "calculated txpower index %d\n",
-						power_index); */
-
-			if (power_index < get_min_power_index(i, band))
-				power_index = get_min_power_index(i, band);
-
-			/* adjust 5 GHz index to support negative indexes */
-			if (!band)
-				power_index += 9;
-
-			/* CCK, rate 32, reduce txpower for CCK */
-			if (i == POWER_TABLE_CCK_ENTRY)
-				power_index +=
-				    IWL_TX_POWER_CCK_COMPENSATION_C_STEP;
-
-			/* stay within the table! */
-			if (power_index > 107) {
-				IWL_WARN(priv, "txpower index %d > 107\n",
-					    power_index);
-				power_index = 107;
-			}
-			if (power_index < 0) {
-				IWL_WARN(priv, "txpower index %d < 0\n",
-					    power_index);
-				power_index = 0;
-			}
-
-			/* fill txpower command for this rate/chain */
-			tx_power.s.radio_tx_gain[c] =
-				gain_table[band][power_index].radio;
-			tx_power.s.dsp_predis_atten[c] =
-				gain_table[band][power_index].dsp;
-
-			IWL_DEBUG_TXPOWER(priv, "chain %d mimo %d index %d "
-					  "gain 0x%02x dsp %d\n",
-					  c, atten_value, power_index,
-					tx_power.s.radio_tx_gain[c],
-					tx_power.s.dsp_predis_atten[c]);
-		} /* for each chain */
-
-		tx_power_tbl->power_tbl[i].dw = cpu_to_le32(tx_power.dw);
-
-	} /* for each rate */
-
-	return 0;
-}
-
-/**
- * iwl4965_send_tx_power - Configure the TXPOWER level user limit
- *
- * Uses the active RXON for channel, band, and characteristics (ht40, high)
- * The power limit is taken from priv->tx_power_user_lmt.
- */
-static int iwl4965_send_tx_power(struct iwl_priv *priv)
-{
-	struct iwl4965_txpowertable_cmd cmd = { 0 };
-	int ret;
-	u8 band = 0;
-	bool is_ht40 = false;
-	u8 ctrl_chan_high = 0;
-	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
-	if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status),
-		      "TX Power requested while scanning!\n"))
-		return -EAGAIN;
-
-	band = priv->band == IEEE80211_BAND_2GHZ;
-
-	is_ht40 = iw4965_is_ht40_channel(ctx->active.flags);
-
-	if (is_ht40 && (ctx->active.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
-		ctrl_chan_high = 1;
-
-	cmd.band = band;
-	cmd.channel = ctx->active.channel;
-
-	ret = iwl4965_fill_txpower_tbl(priv, band,
-				le16_to_cpu(ctx->active.channel),
-				is_ht40, ctrl_chan_high, &cmd.tx_power);
-	if (ret)
-		goto out;
-
-	ret = iwl_legacy_send_cmd_pdu(priv,
-			 REPLY_TX_PWR_TABLE_CMD, sizeof(cmd), &cmd);
-
-out:
-	return ret;
-}
-
-static int iwl4965_send_rxon_assoc(struct iwl_priv *priv,
-				   struct iwl_rxon_context *ctx)
-{
-	int ret = 0;
-	struct iwl4965_rxon_assoc_cmd rxon_assoc;
-	const struct iwl_legacy_rxon_cmd *rxon1 = &ctx->staging;
-	const struct iwl_legacy_rxon_cmd *rxon2 = &ctx->active;
-
-	if ((rxon1->flags == rxon2->flags) &&
-	    (rxon1->filter_flags == rxon2->filter_flags) &&
-	    (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
-	    (rxon1->ofdm_ht_single_stream_basic_rates ==
-	     rxon2->ofdm_ht_single_stream_basic_rates) &&
-	    (rxon1->ofdm_ht_dual_stream_basic_rates ==
-	     rxon2->ofdm_ht_dual_stream_basic_rates) &&
-	    (rxon1->rx_chain == rxon2->rx_chain) &&
-	    (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
-		IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC.  Not resending.\n");
-		return 0;
-	}
-
-	rxon_assoc.flags = ctx->staging.flags;
-	rxon_assoc.filter_flags = ctx->staging.filter_flags;
-	rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
-	rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
-	rxon_assoc.reserved = 0;
-	rxon_assoc.ofdm_ht_single_stream_basic_rates =
-	    ctx->staging.ofdm_ht_single_stream_basic_rates;
-	rxon_assoc.ofdm_ht_dual_stream_basic_rates =
-	    ctx->staging.ofdm_ht_dual_stream_basic_rates;
-	rxon_assoc.rx_chain_select_flags = ctx->staging.rx_chain;
-
-	ret = iwl_legacy_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC,
-				     sizeof(rxon_assoc), &rxon_assoc, NULL);
-
-	return ret;
-}
-
-static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
-	/* cast away the const for active_rxon in this function */
-	struct iwl_legacy_rxon_cmd *active_rxon = (void *)&ctx->active;
-	int ret;
-	bool new_assoc =
-		!!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
-
-	if (!iwl_legacy_is_alive(priv))
-		return -EBUSY;
-
-	if (!ctx->is_active)
-		return 0;
-
-	/* always get timestamp with Rx frame */
-	ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
-
-	ret = iwl_legacy_check_rxon_cmd(priv, ctx);
-	if (ret) {
-		IWL_ERR(priv, "Invalid RXON configuration.  Not committing.\n");
-		return -EINVAL;
-	}
-
-	/*
-	 * receive commit_rxon request
-	 * abort any previous channel switch if still in process
-	 */
-	if (test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status) &&
-	    (priv->switch_channel != ctx->staging.channel)) {
-		IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
-		      le16_to_cpu(priv->switch_channel));
-		iwl_legacy_chswitch_done(priv, false);
-	}
-
-	/* If we don't need to send a full RXON, we can use
-	 * iwl_rxon_assoc_cmd which is used to reconfigure filter
-	 * and other flags for the current radio configuration. */
-	if (!iwl_legacy_full_rxon_required(priv, ctx)) {
-		ret = iwl_legacy_send_rxon_assoc(priv, ctx);
-		if (ret) {
-			IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret);
-			return ret;
-		}
-
-		memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
-		iwl_legacy_print_rx_config_cmd(priv, ctx);
-		/*
-		 * We do not commit tx power settings while channel changing,
-		 * do it now if tx power changed.
-		 */
-		iwl_legacy_set_tx_power(priv, priv->tx_power_next, false);
-		return 0;
-	}
-
-	/* If we are currently associated and the new config requires
-	 * an RXON_ASSOC and the new config wants the associated mask enabled,
-	 * we must clear the associated from the active configuration
-	 * before we apply the new config */
-	if (iwl_legacy_is_associated_ctx(ctx) && new_assoc) {
-		IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n");
-		active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
-
-		ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd,
-				       sizeof(struct iwl_legacy_rxon_cmd),
-				       active_rxon);
-
-		/* If the mask clearing failed then we set
-		 * active_rxon back to what it was previously */
-		if (ret) {
-			active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
-			IWL_ERR(priv, "Error clearing ASSOC_MSK (%d)\n", ret);
-			return ret;
-		}
-		iwl_legacy_clear_ucode_stations(priv, ctx);
-		iwl_legacy_restore_stations(priv, ctx);
-		ret = iwl4965_restore_default_wep_keys(priv, ctx);
-		if (ret) {
-			IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
-			return ret;
-		}
-	}
-
-	IWL_DEBUG_INFO(priv, "Sending RXON\n"
-		       "* with%s RXON_FILTER_ASSOC_MSK\n"
-		       "* channel = %d\n"
-		       "* bssid = %pM\n",
-		       (new_assoc ? "" : "out"),
-		       le16_to_cpu(ctx->staging.channel),
-		       ctx->staging.bssid_addr);
-
-	iwl_legacy_set_rxon_hwcrypto(priv, ctx,
-				!priv->cfg->mod_params->sw_crypto);
-
-	/* Apply the new configuration
-	 * RXON unassoc clears the station table in uCode so restoration of
-	 * stations is needed after it (the RXON command) completes
-	 */
-	if (!new_assoc) {
-		ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd,
-			      sizeof(struct iwl_legacy_rxon_cmd), &ctx->staging);
-		if (ret) {
-			IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
-			return ret;
-		}
-		IWL_DEBUG_INFO(priv, "Return from !new_assoc RXON.\n");
-		memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
-		iwl_legacy_clear_ucode_stations(priv, ctx);
-		iwl_legacy_restore_stations(priv, ctx);
-		ret = iwl4965_restore_default_wep_keys(priv, ctx);
-		if (ret) {
-			IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
-			return ret;
-		}
-	}
-	if (new_assoc) {
-		priv->start_calib = 0;
-		/* Apply the new configuration
-		 * RXON assoc doesn't clear the station table in uCode,
-		 */
-		ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd,
-			      sizeof(struct iwl_legacy_rxon_cmd), &ctx->staging);
-		if (ret) {
-			IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
-			return ret;
-		}
-		memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
-	}
-	iwl_legacy_print_rx_config_cmd(priv, ctx);
-
-	iwl4965_init_sensitivity(priv);
-
-	/* If we issue a new RXON command which required a tune then we must
-	 * send a new TXPOWER command or we won't be able to Tx any frames */
-	ret = iwl_legacy_set_tx_power(priv, priv->tx_power_next, true);
-	if (ret) {
-		IWL_ERR(priv, "Error sending TX power (%d)\n", ret);
-		return ret;
-	}
-
-	return 0;
-}
-
-static int iwl4965_hw_channel_switch(struct iwl_priv *priv,
-				     struct ieee80211_channel_switch *ch_switch)
-{
-	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-	int rc;
-	u8 band = 0;
-	bool is_ht40 = false;
-	u8 ctrl_chan_high = 0;
-	struct iwl4965_channel_switch_cmd cmd;
-	const struct iwl_channel_info *ch_info;
-	u32 switch_time_in_usec, ucode_switch_time;
-	u16 ch;
-	u32 tsf_low;
-	u8 switch_count;
-	u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
-	struct ieee80211_vif *vif = ctx->vif;
-	band = priv->band == IEEE80211_BAND_2GHZ;
-
-	is_ht40 = iw4965_is_ht40_channel(ctx->staging.flags);
-
-	if (is_ht40 &&
-	    (ctx->staging.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
-		ctrl_chan_high = 1;
-
-	cmd.band = band;
-	cmd.expect_beacon = 0;
-	ch = ch_switch->channel->hw_value;
-	cmd.channel = cpu_to_le16(ch);
-	cmd.rxon_flags = ctx->staging.flags;
-	cmd.rxon_filter_flags = ctx->staging.filter_flags;
-	switch_count = ch_switch->count;
-	tsf_low = ch_switch->timestamp & 0x0ffffffff;
-	/*
-	 * calculate the ucode channel switch time
-	 * adding TSF as one of the factor for when to switch
-	 */
-	if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) {
-		if (switch_count > ((priv->ucode_beacon_time - tsf_low) /
-		    beacon_interval)) {
-			switch_count -= (priv->ucode_beacon_time -
-				tsf_low) / beacon_interval;
-		} else
-			switch_count = 0;
-	}
-	if (switch_count <= 1)
-		cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
-	else {
-		switch_time_in_usec =
-			vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
-		ucode_switch_time = iwl_legacy_usecs_to_beacons(priv,
-							 switch_time_in_usec,
-							 beacon_interval);
-		cmd.switch_time = iwl_legacy_add_beacon_time(priv,
-						      priv->ucode_beacon_time,
-						      ucode_switch_time,
-						      beacon_interval);
-	}
-	IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
-		      cmd.switch_time);
-	ch_info = iwl_legacy_get_channel_info(priv, priv->band, ch);
-	if (ch_info)
-		cmd.expect_beacon = iwl_legacy_is_channel_radar(ch_info);
-	else {
-		IWL_ERR(priv, "invalid channel switch from %u to %u\n",
-			ctx->active.channel, ch);
-		return -EFAULT;
-	}
-
-	rc = iwl4965_fill_txpower_tbl(priv, band, ch, is_ht40,
-				      ctrl_chan_high, &cmd.tx_power);
-	if (rc) {
-		IWL_DEBUG_11H(priv, "error:%d  fill txpower_tbl\n", rc);
-		return rc;
-	}
-
-	return iwl_legacy_send_cmd_pdu(priv,
-			 REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd);
-}
-
-/**
- * iwl4965_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
- */
-static void iwl4965_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
-					    struct iwl_tx_queue *txq,
-					    u16 byte_cnt)
-{
-	struct iwl4965_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
-	int txq_id = txq->q.id;
-	int write_ptr = txq->q.write_ptr;
-	int len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
-	__le16 bc_ent;
-
-	WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
-
-	bc_ent = cpu_to_le16(len & 0xFFF);
-	/* Set up byte count within first 256 entries */
-	scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
-
-	/* If within first 64 entries, duplicate at end */
-	if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
-		scd_bc_tbl[txq_id].
-			tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
-}
-
-/**
- * iwl4965_hw_get_temperature - return the calibrated temperature (in Kelvin)
- * @statistics: Provides the temperature reading from the uCode
- *
- * A return of <0 indicates bogus data in the statistics
- */
-static int iwl4965_hw_get_temperature(struct iwl_priv *priv)
-{
-	s32 temperature;
-	s32 vt;
-	s32 R1, R2, R3;
-	u32 R4;
-
-	if (test_bit(STATUS_TEMPERATURE, &priv->status) &&
-	    (priv->_4965.statistics.flag &
-			STATISTICS_REPLY_FLG_HT40_MODE_MSK)) {
-		IWL_DEBUG_TEMP(priv, "Running HT40 temperature calibration\n");
-		R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]);
-		R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[1]);
-		R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[1]);
-		R4 = le32_to_cpu(priv->card_alive_init.therm_r4[1]);
-	} else {
-		IWL_DEBUG_TEMP(priv, "Running temperature calibration\n");
-		R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[0]);
-		R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[0]);
-		R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[0]);
-		R4 = le32_to_cpu(priv->card_alive_init.therm_r4[0]);
-	}
-
-	/*
-	 * Temperature is only 23 bits, so sign extend out to 32.
-	 *
-	 * NOTE If we haven't received a statistics notification yet
-	 * with an updated temperature, use R4 provided to us in the
-	 * "initialize" ALIVE response.
-	 */
-	if (!test_bit(STATUS_TEMPERATURE, &priv->status))
-		vt = sign_extend32(R4, 23);
-	else
-		vt = sign_extend32(le32_to_cpu(priv->_4965.statistics.
-				 general.common.temperature), 23);
-
-	IWL_DEBUG_TEMP(priv, "Calib values R[1-3]: %d %d %d R4: %d\n", R1, R2, R3, vt);
-
-	if (R3 == R1) {
-		IWL_ERR(priv, "Calibration conflict R1 == R3\n");
-		return -1;
-	}
-
-	/* Calculate temperature in degrees Kelvin, adjust by 97%.
-	 * Add offset to center the adjustment around 0 degrees Centigrade. */
-	temperature = TEMPERATURE_CALIB_A_VAL * (vt - R2);
-	temperature /= (R3 - R1);
-	temperature = (temperature * 97) / 100 + TEMPERATURE_CALIB_KELVIN_OFFSET;
-
-	IWL_DEBUG_TEMP(priv, "Calibrated temperature: %dK, %dC\n",
-			temperature, KELVIN_TO_CELSIUS(temperature));
-
-	return temperature;
-}
-
-/* Adjust Txpower only if temperature variance is greater than threshold. */
-#define IWL_TEMPERATURE_THRESHOLD   3
-
-/**
- * iwl4965_is_temp_calib_needed - determines if new calibration is needed
- *
- * If the temperature changed has changed sufficiently, then a recalibration
- * is needed.
- *
- * Assumes caller will replace priv->last_temperature once calibration
- * executed.
- */
-static int iwl4965_is_temp_calib_needed(struct iwl_priv *priv)
-{
-	int temp_diff;
-
-	if (!test_bit(STATUS_STATISTICS, &priv->status)) {
-		IWL_DEBUG_TEMP(priv, "Temperature not updated -- no statistics.\n");
-		return 0;
-	}
-
-	temp_diff = priv->temperature - priv->last_temperature;
-
-	/* get absolute value */
-	if (temp_diff < 0) {
-		IWL_DEBUG_POWER(priv, "Getting cooler, delta %d\n", temp_diff);
-		temp_diff = -temp_diff;
-	} else if (temp_diff == 0)
-		IWL_DEBUG_POWER(priv, "Temperature unchanged\n");
-	else
-		IWL_DEBUG_POWER(priv, "Getting warmer, delta %d\n", temp_diff);
-
-	if (temp_diff < IWL_TEMPERATURE_THRESHOLD) {
-		IWL_DEBUG_POWER(priv, " => thermal txpower calib not needed\n");
-		return 0;
-	}
-
-	IWL_DEBUG_POWER(priv, " => thermal txpower calib needed\n");
-
-	return 1;
-}
-
-static void iwl4965_temperature_calib(struct iwl_priv *priv)
-{
-	s32 temp;
-
-	temp = iwl4965_hw_get_temperature(priv);
-	if (IWL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(temp))
-		return;
-
-	if (priv->temperature != temp) {
-		if (priv->temperature)
-			IWL_DEBUG_TEMP(priv, "Temperature changed "
-				       "from %dC to %dC\n",
-				       KELVIN_TO_CELSIUS(priv->temperature),
-				       KELVIN_TO_CELSIUS(temp));
-		else
-			IWL_DEBUG_TEMP(priv, "Temperature "
-				       "initialized to %dC\n",
-				       KELVIN_TO_CELSIUS(temp));
-	}
-
-	priv->temperature = temp;
-	set_bit(STATUS_TEMPERATURE, &priv->status);
-
-	if (!priv->disable_tx_power_cal &&
-	     unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
-	     iwl4965_is_temp_calib_needed(priv))
-		queue_work(priv->workqueue, &priv->txpower_work);
-}
-
-static u16 iwl4965_get_hcmd_size(u8 cmd_id, u16 len)
-{
-	switch (cmd_id) {
-	case REPLY_RXON:
-		return (u16) sizeof(struct iwl4965_rxon_cmd);
-	default:
-		return len;
-	}
-}
-
-static u16 iwl4965_build_addsta_hcmd(const struct iwl_legacy_addsta_cmd *cmd,
-								u8 *data)
-{
-	struct iwl4965_addsta_cmd *addsta = (struct iwl4965_addsta_cmd *)data;
-	addsta->mode = cmd->mode;
-	memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify));
-	memcpy(&addsta->key, &cmd->key, sizeof(struct iwl4965_keyinfo));
-	addsta->station_flags = cmd->station_flags;
-	addsta->station_flags_msk = cmd->station_flags_msk;
-	addsta->tid_disable_tx = cmd->tid_disable_tx;
-	addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
-	addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
-	addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
-	addsta->sleep_tx_count = cmd->sleep_tx_count;
-	addsta->reserved1 = cpu_to_le16(0);
-	addsta->reserved2 = cpu_to_le16(0);
-
-	return (u16)sizeof(struct iwl4965_addsta_cmd);
-}
-
-static inline u32 iwl4965_get_scd_ssn(struct iwl4965_tx_resp *tx_resp)
-{
-	return le32_to_cpup(&tx_resp->u.status + tx_resp->frame_count) & MAX_SN;
-}
-
-/**
- * iwl4965_tx_status_reply_tx - Handle Tx response for frames in aggregation queue
- */
-static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
-				      struct iwl_ht_agg *agg,
-				      struct iwl4965_tx_resp *tx_resp,
-				      int txq_id, u16 start_idx)
-{
-	u16 status;
-	struct agg_tx_status *frame_status = tx_resp->u.agg_status;
-	struct ieee80211_tx_info *info = NULL;
-	struct ieee80211_hdr *hdr = NULL;
-	u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
-	int i, sh, idx;
-	u16 seq;
-	if (agg->wait_for_ba)
-		IWL_DEBUG_TX_REPLY(priv, "got tx response w/o block-ack\n");
-
-	agg->frame_count = tx_resp->frame_count;
-	agg->start_idx = start_idx;
-	agg->rate_n_flags = rate_n_flags;
-	agg->bitmap = 0;
-
-	/* num frames attempted by Tx command */
-	if (agg->frame_count == 1) {
-		/* Only one frame was attempted; no block-ack will arrive */
-		status = le16_to_cpu(frame_status[0].status);
-		idx = start_idx;
-
-		IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n",
-				   agg->frame_count, agg->start_idx, idx);
-
-		info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb);
-		info->status.rates[0].count = tx_resp->failure_frame + 1;
-		info->flags &= ~IEEE80211_TX_CTL_AMPDU;
-		info->flags |= iwl4965_tx_status_to_mac80211(status);
-		iwl4965_hwrate_to_tx_control(priv, rate_n_flags, info);
-
-		IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n",
-				    status & 0xff, tx_resp->failure_frame);
-		IWL_DEBUG_TX_REPLY(priv, "Rate Info rate_n_flags=%x\n", rate_n_flags);
-
-		agg->wait_for_ba = 0;
-	} else {
-		/* Two or more frames were attempted; expect block-ack */
-		u64 bitmap = 0;
-		int start = agg->start_idx;
-
-		/* Construct bit-map of pending frames within Tx window */
-		for (i = 0; i < agg->frame_count; i++) {
-			u16 sc;
-			status = le16_to_cpu(frame_status[i].status);
-			seq  = le16_to_cpu(frame_status[i].sequence);
-			idx = SEQ_TO_INDEX(seq);
-			txq_id = SEQ_TO_QUEUE(seq);
-
-			if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
-				      AGG_TX_STATE_ABORT_MSK))
-				continue;
-
-			IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n",
-					   agg->frame_count, txq_id, idx);
-
-			hdr = iwl_legacy_tx_queue_get_hdr(priv, txq_id, idx);
-			if (!hdr) {
-				IWL_ERR(priv,
-					"BUG_ON idx doesn't point to valid skb"
-					" idx=%d, txq_id=%d\n", idx, txq_id);
-				return -1;
-			}
-
-			sc = le16_to_cpu(hdr->seq_ctrl);
-			if (idx != (SEQ_TO_SN(sc) & 0xff)) {
-				IWL_ERR(priv,
-					"BUG_ON idx doesn't match seq control"
-					" idx=%d, seq_idx=%d, seq=%d\n",
-					idx, SEQ_TO_SN(sc), hdr->seq_ctrl);
-				return -1;
-			}
-
-			IWL_DEBUG_TX_REPLY(priv, "AGG Frame i=%d idx %d seq=%d\n",
-					   i, idx, SEQ_TO_SN(sc));
-
-			sh = idx - start;
-			if (sh > 64) {
-				sh = (start - idx) + 0xff;
-				bitmap = bitmap << sh;
-				sh = 0;
-				start = idx;
-			} else if (sh < -64)
-				sh  = 0xff - (start - idx);
-			else if (sh < 0) {
-				sh = start - idx;
-				start = idx;
-				bitmap = bitmap << sh;
-				sh = 0;
-			}
-			bitmap |= 1ULL << sh;
-			IWL_DEBUG_TX_REPLY(priv, "start=%d bitmap=0x%llx\n",
-					   start, (unsigned long long)bitmap);
-		}
-
-		agg->bitmap = bitmap;
-		agg->start_idx = start;
-		IWL_DEBUG_TX_REPLY(priv, "Frames %d start_idx=%d bitmap=0x%llx\n",
-				   agg->frame_count, agg->start_idx,
-				   (unsigned long long)agg->bitmap);
-
-		if (bitmap)
-			agg->wait_for_ba = 1;
-	}
-	return 0;
-}
-
-static u8 iwl4965_find_station(struct iwl_priv *priv, const u8 *addr)
-{
-	int i;
-	int start = 0;
-	int ret = IWL_INVALID_STATION;
-	unsigned long flags;
-
-	if ((priv->iw_mode == NL80211_IFTYPE_ADHOC))
-		start = IWL_STA_ID;
-
-	if (is_broadcast_ether_addr(addr))
-		return priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id;
-
-	spin_lock_irqsave(&priv->sta_lock, flags);
-	for (i = start; i < priv->hw_params.max_stations; i++)
-		if (priv->stations[i].used &&
-		    (!compare_ether_addr(priv->stations[i].sta.sta.addr,
-					 addr))) {
-			ret = i;
-			goto out;
-		}
-
-	IWL_DEBUG_ASSOC_LIMIT(priv, "can not find STA %pM total %d\n",
-			      addr, priv->num_stations);
-
- out:
-	/*
-	 * It may be possible that more commands interacting with stations
-	 * arrive before we completed processing the adding of
-	 * station
-	 */
-	if (ret != IWL_INVALID_STATION &&
-	    (!(priv->stations[ret].used & IWL_STA_UCODE_ACTIVE) ||
-	     ((priv->stations[ret].used & IWL_STA_UCODE_ACTIVE) &&
-	      (priv->stations[ret].used & IWL_STA_UCODE_INPROGRESS)))) {
-		IWL_ERR(priv, "Requested station info for sta %d before ready.\n",
-			ret);
-		ret = IWL_INVALID_STATION;
-	}
-	spin_unlock_irqrestore(&priv->sta_lock, flags);
-	return ret;
-}
-
-static int iwl4965_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
-{
-	if (priv->iw_mode == NL80211_IFTYPE_STATION) {
-		return IWL_AP_ID;
-	} else {
-		u8 *da = ieee80211_get_DA(hdr);
-		return iwl4965_find_station(priv, da);
-	}
-}
-
-/**
- * iwl4965_rx_reply_tx - Handle standard (non-aggregation) Tx response
- */
-static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
-				struct iwl_rx_mem_buffer *rxb)
-{
-	struct iwl_rx_packet *pkt = rxb_addr(rxb);
-	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
-	int txq_id = SEQ_TO_QUEUE(sequence);
-	int index = SEQ_TO_INDEX(sequence);
-	struct iwl_tx_queue *txq = &priv->txq[txq_id];
-	struct ieee80211_hdr *hdr;
-	struct ieee80211_tx_info *info;
-	struct iwl4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
-	u32  status = le32_to_cpu(tx_resp->u.status);
-	int uninitialized_var(tid);
-	int sta_id;
-	int freed;
-	u8 *qc = NULL;
-	unsigned long flags;
-
-	if ((index >= txq->q.n_bd) || (iwl_legacy_queue_used(&txq->q, index) == 0)) {
-		IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
-			  "is out of range [0-%d] %d %d\n", txq_id,
-			  index, txq->q.n_bd, txq->q.write_ptr,
-			  txq->q.read_ptr);
-		return;
-	}
-
-	txq->time_stamp = jiffies;
-	info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
-	memset(&info->status, 0, sizeof(info->status));
-
-	hdr = iwl_legacy_tx_queue_get_hdr(priv, txq_id, index);
-	if (ieee80211_is_data_qos(hdr->frame_control)) {
-		qc = ieee80211_get_qos_ctl(hdr);
-		tid = qc[0] & 0xf;
-	}
-
-	sta_id = iwl4965_get_ra_sta_id(priv, hdr);
-	if (txq->sched_retry && unlikely(sta_id == IWL_INVALID_STATION)) {
-		IWL_ERR(priv, "Station not known\n");
-		return;
-	}
-
-	spin_lock_irqsave(&priv->sta_lock, flags);
-	if (txq->sched_retry) {
-		const u32 scd_ssn = iwl4965_get_scd_ssn(tx_resp);
-		struct iwl_ht_agg *agg = NULL;
-		WARN_ON(!qc);
-
-		agg = &priv->stations[sta_id].tid[tid].agg;
-
-		iwl4965_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index);
-
-		/* check if BAR is needed */
-		if ((tx_resp->frame_count == 1) && !iwl4965_is_tx_success(status))
-			info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
-
-		if (txq->q.read_ptr != (scd_ssn & 0xff)) {
-			index = iwl_legacy_queue_dec_wrap(scd_ssn & 0xff,
-								txq->q.n_bd);
-			IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn "
-					   "%d index %d\n", scd_ssn , index);
-			freed = iwl4965_tx_queue_reclaim(priv, txq_id, index);
-			if (qc)
-				iwl4965_free_tfds_in_queue(priv, sta_id,
-						       tid, freed);
-
-			if (priv->mac80211_registered &&
-			    (iwl_legacy_queue_space(&txq->q) > txq->q.low_mark)
-				 && (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
-				iwl_legacy_wake_queue(priv, txq);
-		}
-	} else {
-		info->status.rates[0].count = tx_resp->failure_frame + 1;
-		info->flags |= iwl4965_tx_status_to_mac80211(status);
-		iwl4965_hwrate_to_tx_control(priv,
-					le32_to_cpu(tx_resp->rate_n_flags),
-					info);
-
-		IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) "
-				   "rate_n_flags 0x%x retries %d\n",
-				   txq_id,
-				   iwl4965_get_tx_fail_reason(status), status,
-				   le32_to_cpu(tx_resp->rate_n_flags),
-				   tx_resp->failure_frame);
-
-		freed = iwl4965_tx_queue_reclaim(priv, txq_id, index);
-		if (qc && likely(sta_id != IWL_INVALID_STATION))
-			iwl4965_free_tfds_in_queue(priv, sta_id, tid, freed);
-		else if (sta_id == IWL_INVALID_STATION)
-			IWL_DEBUG_TX_REPLY(priv, "Station not known\n");
-
-		if (priv->mac80211_registered &&
-		    (iwl_legacy_queue_space(&txq->q) > txq->q.low_mark))
-			iwl_legacy_wake_queue(priv, txq);
-	}
-	if (qc && likely(sta_id != IWL_INVALID_STATION))
-		iwl4965_txq_check_empty(priv, sta_id, tid, txq_id);
-
-	iwl4965_check_abort_status(priv, tx_resp->frame_count, status);
-
-	spin_unlock_irqrestore(&priv->sta_lock, flags);
-}
-
-static void iwl4965_rx_beacon_notif(struct iwl_priv *priv,
-				    struct iwl_rx_mem_buffer *rxb)
-{
-	struct iwl_rx_packet *pkt = rxb_addr(rxb);
-	struct iwl4965_beacon_notif *beacon = (void *)pkt->u.raw;
-	u8 rate __maybe_unused =
-		iwl4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
-
-	IWL_DEBUG_RX(priv, "beacon status %#x, retries:%d ibssmgr:%d "
-		"tsf:0x%.8x%.8x rate:%d\n",
-		le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
-		beacon->beacon_notify_hdr.failure_frame,
-		le32_to_cpu(beacon->ibss_mgr_status),
-		le32_to_cpu(beacon->high_tsf),
-		le32_to_cpu(beacon->low_tsf), rate);
-
-	priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
-}
-
-/* Set up 4965-specific Rx frame reply handlers */
-static void iwl4965_rx_handler_setup(struct iwl_priv *priv)
-{
-	/* Legacy Rx frames */
-	priv->rx_handlers[REPLY_RX] = iwl4965_rx_reply_rx;
-	/* Tx response */
-	priv->rx_handlers[REPLY_TX] = iwl4965_rx_reply_tx;
-	priv->rx_handlers[BEACON_NOTIFICATION] = iwl4965_rx_beacon_notif;
-}
-
-static struct iwl_hcmd_ops iwl4965_hcmd = {
-	.rxon_assoc = iwl4965_send_rxon_assoc,
-	.commit_rxon = iwl4965_commit_rxon,
-	.set_rxon_chain = iwl4965_set_rxon_chain,
-};
-
-static void iwl4965_post_scan(struct iwl_priv *priv)
-{
-	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
-	/*
-	 * Since setting the RXON may have been deferred while
-	 * performing the scan, fire one off if needed
-	 */
-	if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
-		iwl_legacy_commit_rxon(priv, ctx);
-}
-
-static void iwl4965_post_associate(struct iwl_priv *priv)
-{
-	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-	struct ieee80211_vif *vif = ctx->vif;
-	struct ieee80211_conf *conf = NULL;
-	int ret = 0;
-
-	if (!vif || !priv->is_open)
-		return;
-
-	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-		return;
-
-	iwl_legacy_scan_cancel_timeout(priv, 200);
-
-	conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw);
-
-	ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
-	iwl_legacy_commit_rxon(priv, ctx);
-
-	ret = iwl_legacy_send_rxon_timing(priv, ctx);
-	if (ret)
-		IWL_WARN(priv, "RXON timing - "
-			    "Attempting to continue.\n");
-
-	ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
-
-	iwl_legacy_set_rxon_ht(priv, &priv->current_ht_config);
-
-	if (priv->cfg->ops->hcmd->set_rxon_chain)
-		priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
-
-	ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid);
-
-	IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n",
-			vif->bss_conf.aid, vif->bss_conf.beacon_int);
-
-	if (vif->bss_conf.use_short_preamble)
-		ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
-	else
-		ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
-
-	if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
-		if (vif->bss_conf.use_short_slot)
-			ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
-		else
-			ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
-	}
-
-	iwl_legacy_commit_rxon(priv, ctx);
-
-	IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
-			vif->bss_conf.aid, ctx->active.bssid_addr);
-
-	switch (vif->type) {
-	case NL80211_IFTYPE_STATION:
-		break;
-	case NL80211_IFTYPE_ADHOC:
-		iwl4965_send_beacon_cmd(priv);
-		break;
-	default:
-		IWL_ERR(priv, "%s Should not be called in %d mode\n",
-			  __func__, vif->type);
-		break;
-	}
-
-	/* the chain noise calibration will enabled PM upon completion
-	 * If chain noise has already been run, then we need to enable
-	 * power management here */
-	if (priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE)
-		iwl_legacy_power_update_mode(priv, false);
-
-	/* Enable Rx differential gain and sensitivity calibrations */
-	iwl4965_chain_noise_reset(priv);
-	priv->start_calib = 1;
-}
-
-static void iwl4965_config_ap(struct iwl_priv *priv)
-{
-	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-	struct ieee80211_vif *vif = ctx->vif;
-	int ret = 0;
-
-	lockdep_assert_held(&priv->mutex);
-
-	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-		return;
-
-	/* The following should be done only at AP bring up */
-	if (!iwl_legacy_is_associated_ctx(ctx)) {
-
-		/* RXON - unassoc (to set timing command) */
-		ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
-		iwl_legacy_commit_rxon(priv, ctx);
-
-		/* RXON Timing */
-		ret = iwl_legacy_send_rxon_timing(priv, ctx);
-		if (ret)
-			IWL_WARN(priv, "RXON timing failed - "
-					"Attempting to continue.\n");
-
-		/* AP has all antennas */
-		priv->chain_noise_data.active_chains =
-			priv->hw_params.valid_rx_ant;
-		iwl_legacy_set_rxon_ht(priv, &priv->current_ht_config);
-		if (priv->cfg->ops->hcmd->set_rxon_chain)
-			priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
-
-		ctx->staging.assoc_id = 0;
-
-		if (vif->bss_conf.use_short_preamble)
-			ctx->staging.flags |=
-				RXON_FLG_SHORT_PREAMBLE_MSK;
-		else
-			ctx->staging.flags &=
-				~RXON_FLG_SHORT_PREAMBLE_MSK;
-
-		if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
-			if (vif->bss_conf.use_short_slot)
-				ctx->staging.flags |=
-					RXON_FLG_SHORT_SLOT_MSK;
-			else
-				ctx->staging.flags &=
-					~RXON_FLG_SHORT_SLOT_MSK;
-		}
-		/* need to send beacon cmd before committing assoc RXON! */
-		iwl4965_send_beacon_cmd(priv);
-		/* restore RXON assoc */
-		ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
-		iwl_legacy_commit_rxon(priv, ctx);
-	}
-	iwl4965_send_beacon_cmd(priv);
-}
-
-static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = {
-	.get_hcmd_size = iwl4965_get_hcmd_size,
-	.build_addsta_hcmd = iwl4965_build_addsta_hcmd,
-	.request_scan = iwl4965_request_scan,
-	.post_scan = iwl4965_post_scan,
-};
-
-static struct iwl_lib_ops iwl4965_lib = {
-	.set_hw_params = iwl4965_hw_set_hw_params,
-	.txq_update_byte_cnt_tbl = iwl4965_txq_update_byte_cnt_tbl,
-	.txq_attach_buf_to_tfd = iwl4965_hw_txq_attach_buf_to_tfd,
-	.txq_free_tfd = iwl4965_hw_txq_free_tfd,
-	.txq_init = iwl4965_hw_tx_queue_init,
-	.rx_handler_setup = iwl4965_rx_handler_setup,
-	.is_valid_rtc_data_addr = iwl4965_hw_valid_rtc_data_addr,
-	.init_alive_start = iwl4965_init_alive_start,
-	.load_ucode = iwl4965_load_bsm,
-	.dump_nic_error_log = iwl4965_dump_nic_error_log,
-	.dump_fh = iwl4965_dump_fh,
-	.set_channel_switch = iwl4965_hw_channel_switch,
-	.apm_ops = {
-		.init = iwl_legacy_apm_init,
-		.config = iwl4965_nic_config,
-	},
-	.eeprom_ops = {
-		.regulatory_bands = {
-			EEPROM_REGULATORY_BAND_1_CHANNELS,
-			EEPROM_REGULATORY_BAND_2_CHANNELS,
-			EEPROM_REGULATORY_BAND_3_CHANNELS,
-			EEPROM_REGULATORY_BAND_4_CHANNELS,
-			EEPROM_REGULATORY_BAND_5_CHANNELS,
-			EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS,
-			EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS
-		},
-		.acquire_semaphore = iwl4965_eeprom_acquire_semaphore,
-		.release_semaphore = iwl4965_eeprom_release_semaphore,
-	},
-	.send_tx_power	= iwl4965_send_tx_power,
-	.update_chain_flags = iwl4965_update_chain_flags,
-	.temp_ops = {
-		.temperature = iwl4965_temperature_calib,
-	},
-	.debugfs_ops = {
-		.rx_stats_read = iwl4965_ucode_rx_stats_read,
-		.tx_stats_read = iwl4965_ucode_tx_stats_read,
-		.general_stats_read = iwl4965_ucode_general_stats_read,
-	},
-};
-
-static const struct iwl_legacy_ops iwl4965_legacy_ops = {
-	.post_associate = iwl4965_post_associate,
-	.config_ap = iwl4965_config_ap,
-	.manage_ibss_station = iwl4965_manage_ibss_station,
-	.update_bcast_stations = iwl4965_update_bcast_stations,
-};
-
-struct ieee80211_ops iwl4965_hw_ops = {
-	.tx = iwl4965_mac_tx,
-	.start = iwl4965_mac_start,
-	.stop = iwl4965_mac_stop,
-	.add_interface = iwl_legacy_mac_add_interface,
-	.remove_interface = iwl_legacy_mac_remove_interface,
-	.change_interface = iwl_legacy_mac_change_interface,
-	.config = iwl_legacy_mac_config,
-	.configure_filter = iwl4965_configure_filter,
-	.set_key = iwl4965_mac_set_key,
-	.update_tkip_key = iwl4965_mac_update_tkip_key,
-	.conf_tx = iwl_legacy_mac_conf_tx,
-	.reset_tsf = iwl_legacy_mac_reset_tsf,
-	.bss_info_changed = iwl_legacy_mac_bss_info_changed,
-	.ampdu_action = iwl4965_mac_ampdu_action,
-	.hw_scan = iwl_legacy_mac_hw_scan,
-	.sta_add = iwl4965_mac_sta_add,
-	.sta_remove = iwl_legacy_mac_sta_remove,
-	.channel_switch = iwl4965_mac_channel_switch,
-	.tx_last_beacon = iwl_legacy_mac_tx_last_beacon,
-};
-
-static const struct iwl_ops iwl4965_ops = {
-	.lib = &iwl4965_lib,
-	.hcmd = &iwl4965_hcmd,
-	.utils = &iwl4965_hcmd_utils,
-	.led = &iwl4965_led_ops,
-	.legacy = &iwl4965_legacy_ops,
-	.ieee80211_ops = &iwl4965_hw_ops,
-};
-
-static struct iwl_base_params iwl4965_base_params = {
-	.eeprom_size = IWL4965_EEPROM_IMG_SIZE,
-	.num_of_queues = IWL49_NUM_QUEUES,
-	.num_of_ampdu_queues = IWL49_NUM_AMPDU_QUEUES,
-	.pll_cfg_val = 0,
-	.set_l0s = true,
-	.use_bsm = true,
-	.led_compensation = 61,
-	.chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS,
-	.wd_timeout = IWL_DEF_WD_TIMEOUT,
-	.temperature_kelvin = true,
-	.ucode_tracing = true,
-	.sensitivity_calib_by_driver = true,
-	.chain_noise_calib_by_driver = true,
-};
-
-struct iwl_cfg iwl4965_cfg = {
-	.name = "Intel(R) Wireless WiFi Link 4965AGN",
-	.fw_name_pre = IWL4965_FW_PRE,
-	.ucode_api_max = IWL4965_UCODE_API_MAX,
-	.ucode_api_min = IWL4965_UCODE_API_MIN,
-	.sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
-	.valid_tx_ant = ANT_AB,
-	.valid_rx_ant = ANT_ABC,
-	.eeprom_ver = EEPROM_4965_EEPROM_VERSION,
-	.eeprom_calib_ver = EEPROM_4965_TX_POWER_VERSION,
-	.ops = &iwl4965_ops,
-	.mod_params = &iwl4965_mod_params,
-	.base_params = &iwl4965_base_params,
-	.led_mode = IWL_LED_BLINK,
-	/*
-	 * Force use of chains B and C for scan RX on 5 GHz band
-	 * because the device has off-channel reception on chain A.
-	 */
-	.scan_rx_antennas[IEEE80211_BAND_5GHZ] = ANT_BC,
-};
-
-/* Module firmware */
-MODULE_FIRMWARE(IWL4965_MODULE_FIRMWARE(IWL4965_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965.h b/drivers/net/wireless/iwlegacy/iwl-4965.h
deleted file mode 100644
index 01f8163..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965.h
+++ /dev/null
@@ -1,282 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license.  When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- *  * Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *  * Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *  * Neither the name Intel Corporation nor the names of its
- *    contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *****************************************************************************/
-
-#ifndef __iwl_4965_h__
-#define __iwl_4965_h__
-
-#include "iwl-dev.h"
-
-/* configuration for the _4965 devices */
-extern struct iwl_cfg iwl4965_cfg;
-
-extern struct iwl_mod_params iwl4965_mod_params;
-
-extern struct ieee80211_ops iwl4965_hw_ops;
-
-/* tx queue */
-void iwl4965_free_tfds_in_queue(struct iwl_priv *priv,
-			    int sta_id, int tid, int freed);
-
-/* RXON */
-void iwl4965_set_rxon_chain(struct iwl_priv *priv,
-				struct iwl_rxon_context *ctx);
-
-/* uCode */
-int iwl4965_verify_ucode(struct iwl_priv *priv);
-
-/* lib */
-void iwl4965_check_abort_status(struct iwl_priv *priv,
-			    u8 frame_count, u32 status);
-
-void iwl4965_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
-int iwl4965_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
-int iwl4965_hw_nic_init(struct iwl_priv *priv);
-int iwl4965_dump_fh(struct iwl_priv *priv, char **buf, bool display);
-
-/* rx */
-void iwl4965_rx_queue_restock(struct iwl_priv *priv);
-void iwl4965_rx_replenish(struct iwl_priv *priv);
-void iwl4965_rx_replenish_now(struct iwl_priv *priv);
-void iwl4965_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
-int iwl4965_rxq_stop(struct iwl_priv *priv);
-int iwl4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
-void iwl4965_rx_reply_rx(struct iwl_priv *priv,
-		     struct iwl_rx_mem_buffer *rxb);
-void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv,
-			 struct iwl_rx_mem_buffer *rxb);
-void iwl4965_rx_handle(struct iwl_priv *priv);
-
-/* tx */
-void iwl4965_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq);
-int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
-				 struct iwl_tx_queue *txq,
-				 dma_addr_t addr, u16 len, u8 reset, u8 pad);
-int iwl4965_hw_tx_queue_init(struct iwl_priv *priv,
-			 struct iwl_tx_queue *txq);
-void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
-			      struct ieee80211_tx_info *info);
-int iwl4965_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
-int iwl4965_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
-			struct ieee80211_sta *sta, u16 tid, u16 *ssn);
-int iwl4965_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
-		       struct ieee80211_sta *sta, u16 tid);
-int iwl4965_txq_check_empty(struct iwl_priv *priv,
-			   int sta_id, u8 tid, int txq_id);
-void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv,
-				struct iwl_rx_mem_buffer *rxb);
-int iwl4965_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index);
-void iwl4965_hw_txq_ctx_free(struct iwl_priv *priv);
-int iwl4965_txq_ctx_alloc(struct iwl_priv *priv);
-void iwl4965_txq_ctx_reset(struct iwl_priv *priv);
-void iwl4965_txq_ctx_stop(struct iwl_priv *priv);
-void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask);
-
-/*
- * Acquire priv->lock before calling this function !
- */
-void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index);
-/**
- * iwl4965_tx_queue_set_status - (optionally) start Tx/Cmd queue
- * @tx_fifo_id: Tx DMA/FIFO channel (range 0-7) that the queue will feed
- * @scd_retry: (1) Indicates queue will be used in aggregation mode
- *
- * NOTE:  Acquire priv->lock before calling this function !
- */
-void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
-					struct iwl_tx_queue *txq,
-					int tx_fifo_id, int scd_retry);
-
-static inline u32 iwl4965_tx_status_to_mac80211(u32 status)
-{
-	status &= TX_STATUS_MSK;
-
-	switch (status) {
-	case TX_STATUS_SUCCESS:
-	case TX_STATUS_DIRECT_DONE:
-		return IEEE80211_TX_STAT_ACK;
-	case TX_STATUS_FAIL_DEST_PS:
-		return IEEE80211_TX_STAT_TX_FILTERED;
-	default:
-		return 0;
-	}
-}
-
-static inline bool iwl4965_is_tx_success(u32 status)
-{
-	status &= TX_STATUS_MSK;
-	return (status == TX_STATUS_SUCCESS) ||
-	       (status == TX_STATUS_DIRECT_DONE);
-}
-
-u8 iwl4965_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx, u8 valid);
-
-/* rx */
-void iwl4965_rx_missed_beacon_notif(struct iwl_priv *priv,
-				struct iwl_rx_mem_buffer *rxb);
-bool iwl4965_good_plcp_health(struct iwl_priv *priv,
-			  struct iwl_rx_packet *pkt);
-void iwl4965_rx_statistics(struct iwl_priv *priv,
-		       struct iwl_rx_mem_buffer *rxb);
-void iwl4965_reply_statistics(struct iwl_priv *priv,
-			  struct iwl_rx_mem_buffer *rxb);
-
-/* scan */
-int iwl4965_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif);
-
-/* station mgmt */
-int iwl4965_manage_ibss_station(struct iwl_priv *priv,
-			       struct ieee80211_vif *vif, bool add);
-
-/* hcmd */
-int iwl4965_send_beacon_cmd(struct iwl_priv *priv);
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-const char *iwl4965_get_tx_fail_reason(u32 status);
-#else
-static inline const char *
-iwl4965_get_tx_fail_reason(u32 status) { return ""; }
-#endif
-
-/* station management */
-int iwl4965_alloc_bcast_station(struct iwl_priv *priv,
-			       struct iwl_rxon_context *ctx);
-int iwl4965_add_bssid_station(struct iwl_priv *priv,
-				struct iwl_rxon_context *ctx,
-			     const u8 *addr, u8 *sta_id_r);
-int iwl4965_remove_default_wep_key(struct iwl_priv *priv,
-			       struct iwl_rxon_context *ctx,
-			       struct ieee80211_key_conf *key);
-int iwl4965_set_default_wep_key(struct iwl_priv *priv,
-			    struct iwl_rxon_context *ctx,
-			    struct ieee80211_key_conf *key);
-int iwl4965_restore_default_wep_keys(struct iwl_priv *priv,
-				 struct iwl_rxon_context *ctx);
-int iwl4965_set_dynamic_key(struct iwl_priv *priv,
-			struct iwl_rxon_context *ctx,
-			struct ieee80211_key_conf *key, u8 sta_id);
-int iwl4965_remove_dynamic_key(struct iwl_priv *priv,
-			struct iwl_rxon_context *ctx,
-			struct ieee80211_key_conf *key, u8 sta_id);
-void iwl4965_update_tkip_key(struct iwl_priv *priv,
-			 struct iwl_rxon_context *ctx,
-			 struct ieee80211_key_conf *keyconf,
-			 struct ieee80211_sta *sta, u32 iv32, u16 *phase1key);
-int iwl4965_sta_tx_modify_enable_tid(struct iwl_priv *priv,
-			int sta_id, int tid);
-int iwl4965_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
-			 int tid, u16 ssn);
-int iwl4965_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
-			int tid);
-void iwl4965_sta_modify_sleep_tx_count(struct iwl_priv *priv,
-			int sta_id, int cnt);
-int iwl4965_update_bcast_stations(struct iwl_priv *priv);
-
-/* rate */
-static inline u32 iwl4965_ant_idx_to_flags(u8 ant_idx)
-{
-	return BIT(ant_idx) << RATE_MCS_ANT_POS;
-}
-
-static inline u8 iwl4965_hw_get_rate(__le32 rate_n_flags)
-{
-	return le32_to_cpu(rate_n_flags) & 0xFF;
-}
-
-static inline __le32 iwl4965_hw_set_rate_n_flags(u8 rate, u32 flags)
-{
-	return cpu_to_le32(flags|(u32)rate);
-}
-
-/* eeprom */
-void iwl4965_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac);
-int iwl4965_eeprom_acquire_semaphore(struct iwl_priv *priv);
-void iwl4965_eeprom_release_semaphore(struct iwl_priv *priv);
-int  iwl4965_eeprom_check_version(struct iwl_priv *priv);
-
-/* mac80211 handlers (for 4965) */
-void iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
-int iwl4965_mac_start(struct ieee80211_hw *hw);
-void iwl4965_mac_stop(struct ieee80211_hw *hw);
-void iwl4965_configure_filter(struct ieee80211_hw *hw,
-			     unsigned int changed_flags,
-			     unsigned int *total_flags,
-			     u64 multicast);
-int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
-		       struct ieee80211_vif *vif, struct ieee80211_sta *sta,
-		       struct ieee80211_key_conf *key);
-void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw,
-				struct ieee80211_vif *vif,
-				struct ieee80211_key_conf *keyconf,
-				struct ieee80211_sta *sta,
-				u32 iv32, u16 *phase1key);
-int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
-			    struct ieee80211_vif *vif,
-			    enum ieee80211_ampdu_mlme_action action,
-			    struct ieee80211_sta *sta, u16 tid, u16 *ssn,
-			    u8 buf_size);
-int iwl4965_mac_sta_add(struct ieee80211_hw *hw,
-		       struct ieee80211_vif *vif,
-		       struct ieee80211_sta *sta);
-void iwl4965_mac_channel_switch(struct ieee80211_hw *hw,
-			       struct ieee80211_channel_switch *ch_switch);
-
-#endif /* __iwl_4965_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.c b/drivers/net/wireless/iwlegacy/iwl-core.c
deleted file mode 100644
index 2bd5659..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-core.c
+++ /dev/null
@@ -1,2661 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/etherdevice.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <net/mac80211.h>
-
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
-#include "iwl-debug.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-#include "iwl-power.h"
-#include "iwl-sta.h"
-#include "iwl-helpers.h"
-
-
-MODULE_DESCRIPTION("iwl-legacy: common functions for 3945 and 4965");
-MODULE_VERSION(IWLWIFI_VERSION);
-MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
-MODULE_LICENSE("GPL");
-
-/*
- * set bt_coex_active to true, uCode will do kill/defer
- * every time the priority line is asserted (BT is sending signals on the
- * priority line in the PCIx).
- * set bt_coex_active to false, uCode will ignore the BT activity and
- * perform the normal operation
- *
- * User might experience transmit issue on some platform due to WiFi/BT
- * co-exist problem. The possible behaviors are:
- *   Able to scan and finding all the available AP
- *   Not able to associate with any AP
- * On those platforms, WiFi communication can be restored by set
- * "bt_coex_active" module parameter to "false"
- *
- * default: bt_coex_active = true (BT_COEX_ENABLE)
- */
-static bool bt_coex_active = true;
-module_param(bt_coex_active, bool, S_IRUGO);
-MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist");
-
-u32 iwlegacy_debug_level;
-EXPORT_SYMBOL(iwlegacy_debug_level);
-
-const u8 iwlegacy_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
-EXPORT_SYMBOL(iwlegacy_bcast_addr);
-
-
-/* This function both allocates and initializes hw and priv. */
-struct ieee80211_hw *iwl_legacy_alloc_all(struct iwl_cfg *cfg)
-{
-	struct iwl_priv *priv;
-	/* mac80211 allocates memory for this device instance, including
-	 *   space for this driver's private structure */
-	struct ieee80211_hw *hw;
-
-	hw = ieee80211_alloc_hw(sizeof(struct iwl_priv),
-				cfg->ops->ieee80211_ops);
-	if (hw == NULL) {
-		pr_err("%s: Can not allocate network device\n",
-		       cfg->name);
-		goto out;
-	}
-
-	priv = hw->priv;
-	priv->hw = hw;
-
-out:
-	return hw;
-}
-EXPORT_SYMBOL(iwl_legacy_alloc_all);
-
-#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
-#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
-static void iwl_legacy_init_ht_hw_capab(const struct iwl_priv *priv,
-			      struct ieee80211_sta_ht_cap *ht_info,
-			      enum ieee80211_band band)
-{
-	u16 max_bit_rate = 0;
-	u8 rx_chains_num = priv->hw_params.rx_chains_num;
-	u8 tx_chains_num = priv->hw_params.tx_chains_num;
-
-	ht_info->cap = 0;
-	memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
-
-	ht_info->ht_supported = true;
-
-	ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
-	max_bit_rate = MAX_BIT_RATE_20_MHZ;
-	if (priv->hw_params.ht40_channel & BIT(band)) {
-		ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
-		ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
-		ht_info->mcs.rx_mask[4] = 0x01;
-		max_bit_rate = MAX_BIT_RATE_40_MHZ;
-	}
-
-	if (priv->cfg->mod_params->amsdu_size_8K)
-		ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
-
-	ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
-	ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
-
-	ht_info->mcs.rx_mask[0] = 0xFF;
-	if (rx_chains_num >= 2)
-		ht_info->mcs.rx_mask[1] = 0xFF;
-	if (rx_chains_num >= 3)
-		ht_info->mcs.rx_mask[2] = 0xFF;
-
-	/* Highest supported Rx data rate */
-	max_bit_rate *= rx_chains_num;
-	WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
-	ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
-
-	/* Tx MCS capabilities */
-	ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
-	if (tx_chains_num != rx_chains_num) {
-		ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
-		ht_info->mcs.tx_params |= ((tx_chains_num - 1) <<
-				IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
-	}
-}
-
-/**
- * iwl_legacy_init_geos - Initialize mac80211's geo/channel info based from eeprom
- */
-int iwl_legacy_init_geos(struct iwl_priv *priv)
-{
-	struct iwl_channel_info *ch;
-	struct ieee80211_supported_band *sband;
-	struct ieee80211_channel *channels;
-	struct ieee80211_channel *geo_ch;
-	struct ieee80211_rate *rates;
-	int i = 0;
-	s8 max_tx_power = 0;
-
-	if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
-	    priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
-		IWL_DEBUG_INFO(priv, "Geography modes already initialized.\n");
-		set_bit(STATUS_GEO_CONFIGURED, &priv->status);
-		return 0;
-	}
-
-	channels = kzalloc(sizeof(struct ieee80211_channel) *
-			   priv->channel_count, GFP_KERNEL);
-	if (!channels)
-		return -ENOMEM;
-
-	rates = kzalloc((sizeof(struct ieee80211_rate) * IWL_RATE_COUNT_LEGACY),
-			GFP_KERNEL);
-	if (!rates) {
-		kfree(channels);
-		return -ENOMEM;
-	}
-
-	/* 5.2GHz channels start after the 2.4GHz channels */
-	sband = &priv->bands[IEEE80211_BAND_5GHZ];
-	sband->channels = &channels[ARRAY_SIZE(iwlegacy_eeprom_band_1)];
-	/* just OFDM */
-	sband->bitrates = &rates[IWL_FIRST_OFDM_RATE];
-	sband->n_bitrates = IWL_RATE_COUNT_LEGACY - IWL_FIRST_OFDM_RATE;
-
-	if (priv->cfg->sku & IWL_SKU_N)
-		iwl_legacy_init_ht_hw_capab(priv, &sband->ht_cap,
-					 IEEE80211_BAND_5GHZ);
-
-	sband = &priv->bands[IEEE80211_BAND_2GHZ];
-	sband->channels = channels;
-	/* OFDM & CCK */
-	sband->bitrates = rates;
-	sband->n_bitrates = IWL_RATE_COUNT_LEGACY;
-
-	if (priv->cfg->sku & IWL_SKU_N)
-		iwl_legacy_init_ht_hw_capab(priv, &sband->ht_cap,
-					 IEEE80211_BAND_2GHZ);
-
-	priv->ieee_channels = channels;
-	priv->ieee_rates = rates;
-
-	for (i = 0;  i < priv->channel_count; i++) {
-		ch = &priv->channel_info[i];
-
-		if (!iwl_legacy_is_channel_valid(ch))
-			continue;
-
-		sband = &priv->bands[ch->band];
-
-		geo_ch = &sband->channels[sband->n_channels++];
-
-		geo_ch->center_freq =
-			ieee80211_channel_to_frequency(ch->channel, ch->band);
-		geo_ch->max_power = ch->max_power_avg;
-		geo_ch->max_antenna_gain = 0xff;
-		geo_ch->hw_value = ch->channel;
-
-		if (iwl_legacy_is_channel_valid(ch)) {
-			if (!(ch->flags & EEPROM_CHANNEL_IBSS))
-				geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
-
-			if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
-				geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
-
-			if (ch->flags & EEPROM_CHANNEL_RADAR)
-				geo_ch->flags |= IEEE80211_CHAN_RADAR;
-
-			geo_ch->flags |= ch->ht40_extension_channel;
-
-			if (ch->max_power_avg > max_tx_power)
-				max_tx_power = ch->max_power_avg;
-		} else {
-			geo_ch->flags |= IEEE80211_CHAN_DISABLED;
-		}
-
-		IWL_DEBUG_INFO(priv, "Channel %d Freq=%d[%sGHz] %s flag=0x%X\n",
-				ch->channel, geo_ch->center_freq,
-				iwl_legacy_is_channel_a_band(ch) ?  "5.2" : "2.4",
-				geo_ch->flags & IEEE80211_CHAN_DISABLED ?
-				"restricted" : "valid",
-				 geo_ch->flags);
-	}
-
-	priv->tx_power_device_lmt = max_tx_power;
-	priv->tx_power_user_lmt = max_tx_power;
-	priv->tx_power_next = max_tx_power;
-
-	if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
-	     priv->cfg->sku & IWL_SKU_A) {
-		IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
-			"Please send your PCI ID 0x%04X:0x%04X to maintainer.\n",
-			   priv->pci_dev->device,
-			   priv->pci_dev->subsystem_device);
-		priv->cfg->sku &= ~IWL_SKU_A;
-	}
-
-	IWL_INFO(priv, "Tunable channels: %d 802.11bg, %d 802.11a channels\n",
-		   priv->bands[IEEE80211_BAND_2GHZ].n_channels,
-		   priv->bands[IEEE80211_BAND_5GHZ].n_channels);
-
-	set_bit(STATUS_GEO_CONFIGURED, &priv->status);
-
-	return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_init_geos);
-
-/*
- * iwl_legacy_free_geos - undo allocations in iwl_legacy_init_geos
- */
-void iwl_legacy_free_geos(struct iwl_priv *priv)
-{
-	kfree(priv->ieee_channels);
-	kfree(priv->ieee_rates);
-	clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
-}
-EXPORT_SYMBOL(iwl_legacy_free_geos);
-
-static bool iwl_legacy_is_channel_extension(struct iwl_priv *priv,
-				     enum ieee80211_band band,
-				     u16 channel, u8 extension_chan_offset)
-{
-	const struct iwl_channel_info *ch_info;
-
-	ch_info = iwl_legacy_get_channel_info(priv, band, channel);
-	if (!iwl_legacy_is_channel_valid(ch_info))
-		return false;
-
-	if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
-		return !(ch_info->ht40_extension_channel &
-					IEEE80211_CHAN_NO_HT40PLUS);
-	else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW)
-		return !(ch_info->ht40_extension_channel &
-					IEEE80211_CHAN_NO_HT40MINUS);
-
-	return false;
-}
-
-bool iwl_legacy_is_ht40_tx_allowed(struct iwl_priv *priv,
-			    struct iwl_rxon_context *ctx,
-			    struct ieee80211_sta_ht_cap *ht_cap)
-{
-	if (!ctx->ht.enabled || !ctx->ht.is_40mhz)
-		return false;
-
-	/*
-	 * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40
-	 * the bit will not set if it is pure 40MHz case
-	 */
-	if (ht_cap && !ht_cap->ht_supported)
-		return false;
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
-	if (priv->disable_ht40)
-		return false;
-#endif
-
-	return iwl_legacy_is_channel_extension(priv, priv->band,
-			le16_to_cpu(ctx->staging.channel),
-			ctx->ht.extension_chan_offset);
-}
-EXPORT_SYMBOL(iwl_legacy_is_ht40_tx_allowed);
-
-static u16 iwl_legacy_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
-{
-	u16 new_val;
-	u16 beacon_factor;
-
-	/*
-	 * If mac80211 hasn't given us a beacon interval, program
-	 * the default into the device.
-	 */
-	if (!beacon_val)
-		return DEFAULT_BEACON_INTERVAL;
-
-	/*
-	 * If the beacon interval we obtained from the peer
-	 * is too large, we'll have to wake up more often
-	 * (and in IBSS case, we'll beacon too much)
-	 *
-	 * For example, if max_beacon_val is 4096, and the
-	 * requested beacon interval is 7000, we'll have to
-	 * use 3500 to be able to wake up on the beacons.
-	 *
-	 * This could badly influence beacon detection stats.
-	 */
-
-	beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val;
-	new_val = beacon_val / beacon_factor;
-
-	if (!new_val)
-		new_val = max_beacon_val;
-
-	return new_val;
-}
-
-int
-iwl_legacy_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
-	u64 tsf;
-	s32 interval_tm, rem;
-	struct ieee80211_conf *conf = NULL;
-	u16 beacon_int;
-	struct ieee80211_vif *vif = ctx->vif;
-
-	conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw);
-
-	lockdep_assert_held(&priv->mutex);
-
-	memset(&ctx->timing, 0, sizeof(struct iwl_rxon_time_cmd));
-
-	ctx->timing.timestamp = cpu_to_le64(priv->timestamp);
-	ctx->timing.listen_interval = cpu_to_le16(conf->listen_interval);
-
-	beacon_int = vif ? vif->bss_conf.beacon_int : 0;
-
-	/*
-	 * TODO: For IBSS we need to get atim_window from mac80211,
-	 *	 for now just always use 0
-	 */
-	ctx->timing.atim_window = 0;
-
-	beacon_int = iwl_legacy_adjust_beacon_interval(beacon_int,
-			priv->hw_params.max_beacon_itrvl * TIME_UNIT);
-	ctx->timing.beacon_interval = cpu_to_le16(beacon_int);
-
-	tsf = priv->timestamp; /* tsf is modifed by do_div: copy it */
-	interval_tm = beacon_int * TIME_UNIT;
-	rem = do_div(tsf, interval_tm);
-	ctx->timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
-
-	ctx->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ?: 1) : 1;
-
-	IWL_DEBUG_ASSOC(priv,
-			"beacon interval %d beacon timer %d beacon tim %d\n",
-			le16_to_cpu(ctx->timing.beacon_interval),
-			le32_to_cpu(ctx->timing.beacon_init_val),
-			le16_to_cpu(ctx->timing.atim_window));
-
-	return iwl_legacy_send_cmd_pdu(priv, ctx->rxon_timing_cmd,
-				sizeof(ctx->timing), &ctx->timing);
-}
-EXPORT_SYMBOL(iwl_legacy_send_rxon_timing);
-
-void
-iwl_legacy_set_rxon_hwcrypto(struct iwl_priv *priv,
-				struct iwl_rxon_context *ctx,
-				int hw_decrypt)
-{
-	struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
-
-	if (hw_decrypt)
-		rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
-	else
-		rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
-
-}
-EXPORT_SYMBOL(iwl_legacy_set_rxon_hwcrypto);
-
-/* validate RXON structure is valid */
-int
-iwl_legacy_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
-	struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
-	bool error = false;
-
-	if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
-		if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) {
-			IWL_WARN(priv, "check 2.4G: wrong narrow\n");
-			error = true;
-		}
-		if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) {
-			IWL_WARN(priv, "check 2.4G: wrong radar\n");
-			error = true;
-		}
-	} else {
-		if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) {
-			IWL_WARN(priv, "check 5.2G: not short slot!\n");
-			error = true;
-		}
-		if (rxon->flags & RXON_FLG_CCK_MSK) {
-			IWL_WARN(priv, "check 5.2G: CCK!\n");
-			error = true;
-		}
-	}
-	if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) {
-		IWL_WARN(priv, "mac/bssid mcast!\n");
-		error = true;
-	}
-
-	/* make sure basic rates 6Mbps and 1Mbps are supported */
-	if ((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0 &&
-	    (rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0) {
-		IWL_WARN(priv, "neither 1 nor 6 are basic\n");
-		error = true;
-	}
-
-	if (le16_to_cpu(rxon->assoc_id) > 2007) {
-		IWL_WARN(priv, "aid > 2007\n");
-		error = true;
-	}
-
-	if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
-			== (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) {
-		IWL_WARN(priv, "CCK and short slot\n");
-		error = true;
-	}
-
-	if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
-			== (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) {
-		IWL_WARN(priv, "CCK and auto detect");
-		error = true;
-	}
-
-	if ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
-			    RXON_FLG_TGG_PROTECT_MSK)) ==
-			    RXON_FLG_TGG_PROTECT_MSK) {
-		IWL_WARN(priv, "TGg but no auto-detect\n");
-		error = true;
-	}
-
-	if (error)
-		IWL_WARN(priv, "Tuning to channel %d\n",
-			    le16_to_cpu(rxon->channel));
-
-	if (error) {
-		IWL_ERR(priv, "Invalid RXON\n");
-		return -EINVAL;
-	}
-	return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_check_rxon_cmd);
-
-/**
- * iwl_legacy_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
- * @priv: staging_rxon is compared to active_rxon
- *
- * If the RXON structure is changing enough to require a new tune,
- * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
- * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
- */
-int iwl_legacy_full_rxon_required(struct iwl_priv *priv,
-			   struct iwl_rxon_context *ctx)
-{
-	const struct iwl_legacy_rxon_cmd *staging = &ctx->staging;
-	const struct iwl_legacy_rxon_cmd *active = &ctx->active;
-
-#define CHK(cond)							\
-	if ((cond)) {							\
-		IWL_DEBUG_INFO(priv, "need full RXON - " #cond "\n");	\
-		return 1;						\
-	}
-
-#define CHK_NEQ(c1, c2)						\
-	if ((c1) != (c2)) {					\
-		IWL_DEBUG_INFO(priv, "need full RXON - "	\
-			       #c1 " != " #c2 " - %d != %d\n",	\
-			       (c1), (c2));			\
-		return 1;					\
-	}
-
-	/* These items are only settable from the full RXON command */
-	CHK(!iwl_legacy_is_associated_ctx(ctx));
-	CHK(compare_ether_addr(staging->bssid_addr, active->bssid_addr));
-	CHK(compare_ether_addr(staging->node_addr, active->node_addr));
-	CHK(compare_ether_addr(staging->wlap_bssid_addr,
-				active->wlap_bssid_addr));
-	CHK_NEQ(staging->dev_type, active->dev_type);
-	CHK_NEQ(staging->channel, active->channel);
-	CHK_NEQ(staging->air_propagation, active->air_propagation);
-	CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates,
-		active->ofdm_ht_single_stream_basic_rates);
-	CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates,
-		active->ofdm_ht_dual_stream_basic_rates);
-	CHK_NEQ(staging->assoc_id, active->assoc_id);
-
-	/* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
-	 * be updated with the RXON_ASSOC command -- however only some
-	 * flag transitions are allowed using RXON_ASSOC */
-
-	/* Check if we are not switching bands */
-	CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK,
-		active->flags & RXON_FLG_BAND_24G_MSK);
-
-	/* Check if we are switching association toggle */
-	CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK,
-		active->filter_flags & RXON_FILTER_ASSOC_MSK);
-
-#undef CHK
-#undef CHK_NEQ
-
-	return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_full_rxon_required);
-
-u8 iwl_legacy_get_lowest_plcp(struct iwl_priv *priv,
-			    struct iwl_rxon_context *ctx)
-{
-	/*
-	 * Assign the lowest rate -- should really get this from
-	 * the beacon skb from mac80211.
-	 */
-	if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK)
-		return IWL_RATE_1M_PLCP;
-	else
-		return IWL_RATE_6M_PLCP;
-}
-EXPORT_SYMBOL(iwl_legacy_get_lowest_plcp);
-
-static void _iwl_legacy_set_rxon_ht(struct iwl_priv *priv,
-			     struct iwl_ht_config *ht_conf,
-			     struct iwl_rxon_context *ctx)
-{
-	struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
-
-	if (!ctx->ht.enabled) {
-		rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
-			RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
-			RXON_FLG_HT40_PROT_MSK |
-			RXON_FLG_HT_PROT_MSK);
-		return;
-	}
-
-	rxon->flags |= cpu_to_le32(ctx->ht.protection <<
-					RXON_FLG_HT_OPERATING_MODE_POS);
-
-	/* Set up channel bandwidth:
-	 * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */
-	/* clear the HT channel mode before set the mode */
-	rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
-			 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
-	if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, NULL)) {
-		/* pure ht40 */
-		if (ctx->ht.protection ==
-				IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
-			rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
-			/* Note: control channel is opposite of extension channel */
-			switch (ctx->ht.extension_chan_offset) {
-			case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
-				rxon->flags &=
-					~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
-				break;
-			case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
-				rxon->flags |=
-					RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
-				break;
-			}
-		} else {
-			/* Note: control channel is opposite of extension channel */
-			switch (ctx->ht.extension_chan_offset) {
-			case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
-				rxon->flags &=
-					~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
-				rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
-				break;
-			case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
-				rxon->flags |=
-					RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
-				rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
-				break;
-			case IEEE80211_HT_PARAM_CHA_SEC_NONE:
-			default:
-				/* channel location only valid if in Mixed mode */
-				IWL_ERR(priv,
-					"invalid extension channel offset\n");
-				break;
-			}
-		}
-	} else {
-		rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY;
-	}
-
-	if (priv->cfg->ops->hcmd->set_rxon_chain)
-		priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
-
-	IWL_DEBUG_ASSOC(priv, "rxon flags 0x%X operation mode :0x%X "
-			"extension channel offset 0x%x\n",
-			le32_to_cpu(rxon->flags), ctx->ht.protection,
-			ctx->ht.extension_chan_offset);
-}
-
-void iwl_legacy_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
-{
-	struct iwl_rxon_context *ctx;
-
-	for_each_context(priv, ctx)
-		_iwl_legacy_set_rxon_ht(priv, ht_conf, ctx);
-}
-EXPORT_SYMBOL(iwl_legacy_set_rxon_ht);
-
-/* Return valid, unused, channel for a passive scan to reset the RF */
-u8 iwl_legacy_get_single_channel_number(struct iwl_priv *priv,
-				 enum ieee80211_band band)
-{
-	const struct iwl_channel_info *ch_info;
-	int i;
-	u8 channel = 0;
-	u8 min, max;
-	struct iwl_rxon_context *ctx;
-
-	if (band == IEEE80211_BAND_5GHZ) {
-		min = 14;
-		max = priv->channel_count;
-	} else {
-		min = 0;
-		max = 14;
-	}
-
-	for (i = min; i < max; i++) {
-		bool busy = false;
-
-		for_each_context(priv, ctx) {
-			busy = priv->channel_info[i].channel ==
-				le16_to_cpu(ctx->staging.channel);
-			if (busy)
-				break;
-		}
-
-		if (busy)
-			continue;
-
-		channel = priv->channel_info[i].channel;
-		ch_info = iwl_legacy_get_channel_info(priv, band, channel);
-		if (iwl_legacy_is_channel_valid(ch_info))
-			break;
-	}
-
-	return channel;
-}
-EXPORT_SYMBOL(iwl_legacy_get_single_channel_number);
-
-/**
- * iwl_legacy_set_rxon_channel - Set the band and channel values in staging RXON
- * @ch: requested channel as a pointer to struct ieee80211_channel
-
- * NOTE:  Does not commit to the hardware; it sets appropriate bit fields
- * in the staging RXON flag structure based on the ch->band
- */
-int
-iwl_legacy_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
-			 struct iwl_rxon_context *ctx)
-{
-	enum ieee80211_band band = ch->band;
-	u16 channel = ch->hw_value;
-
-	if ((le16_to_cpu(ctx->staging.channel) == channel) &&
-	    (priv->band == band))
-		return 0;
-
-	ctx->staging.channel = cpu_to_le16(channel);
-	if (band == IEEE80211_BAND_5GHZ)
-		ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
-	else
-		ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
-
-	priv->band = band;
-
-	IWL_DEBUG_INFO(priv, "Staging channel set to %d [%d]\n", channel, band);
-
-	return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_set_rxon_channel);
-
-void iwl_legacy_set_flags_for_band(struct iwl_priv *priv,
-			    struct iwl_rxon_context *ctx,
-			    enum ieee80211_band band,
-			    struct ieee80211_vif *vif)
-{
-	if (band == IEEE80211_BAND_5GHZ) {
-		ctx->staging.flags &=
-		    ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
-		      | RXON_FLG_CCK_MSK);
-		ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
-	} else {
-		/* Copied from iwl_post_associate() */
-		if (vif && vif->bss_conf.use_short_slot)
-			ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
-		else
-			ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
-
-		ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
-		ctx->staging.flags |= RXON_FLG_AUTO_DETECT_MSK;
-		ctx->staging.flags &= ~RXON_FLG_CCK_MSK;
-	}
-}
-EXPORT_SYMBOL(iwl_legacy_set_flags_for_band);
-
-/*
- * initialize rxon structure with default values from eeprom
- */
-void iwl_legacy_connection_init_rx_config(struct iwl_priv *priv,
-				   struct iwl_rxon_context *ctx)
-{
-	const struct iwl_channel_info *ch_info;
-
-	memset(&ctx->staging, 0, sizeof(ctx->staging));
-
-	if (!ctx->vif) {
-		ctx->staging.dev_type = ctx->unused_devtype;
-	} else
-	switch (ctx->vif->type) {
-
-	case NL80211_IFTYPE_STATION:
-		ctx->staging.dev_type = ctx->station_devtype;
-		ctx->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
-		break;
-
-	case NL80211_IFTYPE_ADHOC:
-		ctx->staging.dev_type = ctx->ibss_devtype;
-		ctx->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
-		ctx->staging.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
-						  RXON_FILTER_ACCEPT_GRP_MSK;
-		break;
-
-	default:
-		IWL_ERR(priv, "Unsupported interface type %d\n",
-			ctx->vif->type);
-		break;
-	}
-
-#if 0
-	/* TODO:  Figure out when short_preamble would be set and cache from
-	 * that */
-	if (!hw_to_local(priv->hw)->short_preamble)
-		ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
-	else
-		ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
-#endif
-
-	ch_info = iwl_legacy_get_channel_info(priv, priv->band,
-				       le16_to_cpu(ctx->active.channel));
-
-	if (!ch_info)
-		ch_info = &priv->channel_info[0];
-
-	ctx->staging.channel = cpu_to_le16(ch_info->channel);
-	priv->band = ch_info->band;
-
-	iwl_legacy_set_flags_for_band(priv, ctx, priv->band, ctx->vif);
-
-	ctx->staging.ofdm_basic_rates =
-	    (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
-	ctx->staging.cck_basic_rates =
-	    (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
-
-	/* clear both MIX and PURE40 mode flag */
-	ctx->staging.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED |
-					RXON_FLG_CHANNEL_MODE_PURE_40);
-	if (ctx->vif)
-		memcpy(ctx->staging.node_addr, ctx->vif->addr, ETH_ALEN);
-
-	ctx->staging.ofdm_ht_single_stream_basic_rates = 0xff;
-	ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
-}
-EXPORT_SYMBOL(iwl_legacy_connection_init_rx_config);
-
-void iwl_legacy_set_rate(struct iwl_priv *priv)
-{
-	const struct ieee80211_supported_band *hw = NULL;
-	struct ieee80211_rate *rate;
-	struct iwl_rxon_context *ctx;
-	int i;
-
-	hw = iwl_get_hw_mode(priv, priv->band);
-	if (!hw) {
-		IWL_ERR(priv, "Failed to set rate: unable to get hw mode\n");
-		return;
-	}
-
-	priv->active_rate = 0;
-
-	for (i = 0; i < hw->n_bitrates; i++) {
-		rate = &(hw->bitrates[i]);
-		if (rate->hw_value < IWL_RATE_COUNT_LEGACY)
-			priv->active_rate |= (1 << rate->hw_value);
-	}
-
-	IWL_DEBUG_RATE(priv, "Set active_rate = %0x\n", priv->active_rate);
-
-	for_each_context(priv, ctx) {
-		ctx->staging.cck_basic_rates =
-		    (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
-
-		ctx->staging.ofdm_basic_rates =
-		   (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
-	}
-}
-EXPORT_SYMBOL(iwl_legacy_set_rate);
-
-void iwl_legacy_chswitch_done(struct iwl_priv *priv, bool is_success)
-{
-	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
-	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-		return;
-
-	if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
-		ieee80211_chswitch_done(ctx->vif, is_success);
-}
-EXPORT_SYMBOL(iwl_legacy_chswitch_done);
-
-void iwl_legacy_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
-{
-	struct iwl_rx_packet *pkt = rxb_addr(rxb);
-	struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
-
-	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-	struct iwl_legacy_rxon_cmd *rxon = (void *)&ctx->active;
-
-	if (!test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
-		return;
-
-	if (!le32_to_cpu(csa->status) && csa->channel == priv->switch_channel) {
-		rxon->channel = csa->channel;
-		ctx->staging.channel = csa->channel;
-		IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
-			      le16_to_cpu(csa->channel));
-		iwl_legacy_chswitch_done(priv, true);
-	} else {
-		IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
-			le16_to_cpu(csa->channel));
-		iwl_legacy_chswitch_done(priv, false);
-	}
-}
-EXPORT_SYMBOL(iwl_legacy_rx_csa);
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv,
-			     struct iwl_rxon_context *ctx)
-{
-	struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
-
-	IWL_DEBUG_RADIO(priv, "RX CONFIG:\n");
-	iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
-	IWL_DEBUG_RADIO(priv, "u16 channel: 0x%x\n",
-				le16_to_cpu(rxon->channel));
-	IWL_DEBUG_RADIO(priv, "u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
-	IWL_DEBUG_RADIO(priv, "u32 filter_flags: 0x%08x\n",
-				le32_to_cpu(rxon->filter_flags));
-	IWL_DEBUG_RADIO(priv, "u8 dev_type: 0x%x\n", rxon->dev_type);
-	IWL_DEBUG_RADIO(priv, "u8 ofdm_basic_rates: 0x%02x\n",
-			rxon->ofdm_basic_rates);
-	IWL_DEBUG_RADIO(priv, "u8 cck_basic_rates: 0x%02x\n",
-				rxon->cck_basic_rates);
-	IWL_DEBUG_RADIO(priv, "u8[6] node_addr: %pM\n", rxon->node_addr);
-	IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
-	IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n",
-				le16_to_cpu(rxon->assoc_id));
-}
-EXPORT_SYMBOL(iwl_legacy_print_rx_config_cmd);
-#endif
-/**
- * iwl_legacy_irq_handle_error - called for HW or SW error interrupt from card
- */
-void iwl_legacy_irq_handle_error(struct iwl_priv *priv)
-{
-	/* Set the FW error flag -- cleared on iwl_down */
-	set_bit(STATUS_FW_ERROR, &priv->status);
-
-	/* Cancel currently queued command. */
-	clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
-
-	IWL_ERR(priv, "Loaded firmware version: %s\n",
-		priv->hw->wiphy->fw_version);
-
-	priv->cfg->ops->lib->dump_nic_error_log(priv);
-	if (priv->cfg->ops->lib->dump_fh)
-		priv->cfg->ops->lib->dump_fh(priv, NULL, false);
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-	if (iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS)
-		iwl_legacy_print_rx_config_cmd(priv,
-					&priv->contexts[IWL_RXON_CTX_BSS]);
-#endif
-
-	wake_up(&priv->wait_command_queue);
-
-	/* Keep the restart process from trying to send host
-	 * commands by clearing the INIT status bit */
-	clear_bit(STATUS_READY, &priv->status);
-
-	if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
-		IWL_DEBUG(priv, IWL_DL_FW_ERRORS,
-			  "Restarting adapter due to uCode error.\n");
-
-		if (priv->cfg->mod_params->restart_fw)
-			queue_work(priv->workqueue, &priv->restart);
-	}
-}
-EXPORT_SYMBOL(iwl_legacy_irq_handle_error);
-
-static int iwl_legacy_apm_stop_master(struct iwl_priv *priv)
-{
-	int ret = 0;
-
-	/* stop device's busmaster DMA activity */
-	iwl_legacy_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
-
-	ret = iwl_poll_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED,
-			CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
-	if (ret)
-		IWL_WARN(priv, "Master Disable Timed Out, 100 usec\n");
-
-	IWL_DEBUG_INFO(priv, "stop master\n");
-
-	return ret;
-}
-
-void iwl_legacy_apm_stop(struct iwl_priv *priv)
-{
-	IWL_DEBUG_INFO(priv, "Stop card, put in low power state\n");
-
-	/* Stop device's DMA activity */
-	iwl_legacy_apm_stop_master(priv);
-
-	/* Reset the entire device */
-	iwl_legacy_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
-
-	udelay(10);
-
-	/*
-	 * Clear "initialization complete" bit to move adapter from
-	 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
-	 */
-	iwl_legacy_clear_bit(priv, CSR_GP_CNTRL,
-				CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
-}
-EXPORT_SYMBOL(iwl_legacy_apm_stop);
-
-
-/*
- * Start up NIC's basic functionality after it has been reset
- * (e.g. after platform boot, or shutdown via iwl_legacy_apm_stop())
- * NOTE:  This does not load uCode nor start the embedded processor
- */
-int iwl_legacy_apm_init(struct iwl_priv *priv)
-{
-	int ret = 0;
-	u16 lctl;
-
-	IWL_DEBUG_INFO(priv, "Init card's basic functions\n");
-
-	/*
-	 * Use "set_bit" below rather than "write", to preserve any hardware
-	 * bits already set by default after reset.
-	 */
-
-	/* Disable L0S exit timer (platform NMI Work/Around) */
-	iwl_legacy_set_bit(priv, CSR_GIO_CHICKEN_BITS,
-			  CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
-
-	/*
-	 * Disable L0s without affecting L1;
-	 *  don't wait for ICH L0s (ICH bug W/A)
-	 */
-	iwl_legacy_set_bit(priv, CSR_GIO_CHICKEN_BITS,
-			  CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
-
-	/* Set FH wait threshold to maximum (HW error during stress W/A) */
-	iwl_legacy_set_bit(priv, CSR_DBG_HPET_MEM_REG,
-					CSR_DBG_HPET_MEM_REG_VAL);
-
-	/*
-	 * Enable HAP INTA (interrupt from management bus) to
-	 * wake device's PCI Express link L1a -> L0s
-	 * NOTE:  This is no-op for 3945 (non-existent bit)
-	 */
-	iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
-				    CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
-
-	/*
-	 * HW bug W/A for instability in PCIe bus L0->L0S->L1 transition.
-	 * Check if BIOS (or OS) enabled L1-ASPM on this device.
-	 * If so (likely), disable L0S, so device moves directly L0->L1;
-	 *    costs negligible amount of power savings.
-	 * If not (unlikely), enable L0S, so there is at least some
-	 *    power savings, even without L1.
-	 */
-	if (priv->cfg->base_params->set_l0s) {
-		lctl = iwl_legacy_pcie_link_ctl(priv);
-		if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
-					PCI_CFG_LINK_CTRL_VAL_L1_EN) {
-			/* L1-ASPM enabled; disable(!) L0S  */
-			iwl_legacy_set_bit(priv, CSR_GIO_REG,
-					CSR_GIO_REG_VAL_L0S_ENABLED);
-			IWL_DEBUG_POWER(priv, "L1 Enabled; Disabling L0S\n");
-		} else {
-			/* L1-ASPM disabled; enable(!) L0S */
-			iwl_legacy_clear_bit(priv, CSR_GIO_REG,
-					CSR_GIO_REG_VAL_L0S_ENABLED);
-			IWL_DEBUG_POWER(priv, "L1 Disabled; Enabling L0S\n");
-		}
-	}
-
-	/* Configure analog phase-lock-loop before activating to D0A */
-	if (priv->cfg->base_params->pll_cfg_val)
-		iwl_legacy_set_bit(priv, CSR_ANA_PLL_CFG,
-			    priv->cfg->base_params->pll_cfg_val);
-
-	/*
-	 * Set "initialization complete" bit to move adapter from
-	 * D0U* --> D0A* (powered-up active) state.
-	 */
-	iwl_legacy_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
-
-	/*
-	 * Wait for clock stabilization; once stabilized, access to
-	 * device-internal resources is supported, e.g. iwl_legacy_write_prph()
-	 * and accesses to uCode SRAM.
-	 */
-	ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
-			CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
-			CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
-	if (ret < 0) {
-		IWL_DEBUG_INFO(priv, "Failed to init the card\n");
-		goto out;
-	}
-
-	/*
-	 * Enable DMA and BSM (if used) clocks, wait for them to stabilize.
-	 * BSM (Boostrap State Machine) is only in 3945 and 4965.
-	 *
-	 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
-	 * do not disable clocks.  This preserves any hardware bits already
-	 * set by default in "CLK_CTRL_REG" after reset.
-	 */
-	if (priv->cfg->base_params->use_bsm)
-		iwl_legacy_write_prph(priv, APMG_CLK_EN_REG,
-			APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT);
-	else
-		iwl_legacy_write_prph(priv, APMG_CLK_EN_REG,
-			APMG_CLK_VAL_DMA_CLK_RQT);
-	udelay(20);
-
-	/* Disable L1-Active */
-	iwl_legacy_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
-			  APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
-
-out:
-	return ret;
-}
-EXPORT_SYMBOL(iwl_legacy_apm_init);
-
-
-int iwl_legacy_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
-{
-	int ret;
-	s8 prev_tx_power;
-	bool defer;
-	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
-	lockdep_assert_held(&priv->mutex);
-
-	if (priv->tx_power_user_lmt == tx_power && !force)
-		return 0;
-
-	if (!priv->cfg->ops->lib->send_tx_power)
-		return -EOPNOTSUPP;
-
-	/* 0 dBm mean 1 milliwatt */
-	if (tx_power < 0) {
-		IWL_WARN(priv,
-			 "Requested user TXPOWER %d below 1 mW.\n",
-			 tx_power);
-		return -EINVAL;
-	}
-
-	if (tx_power > priv->tx_power_device_lmt) {
-		IWL_WARN(priv,
-			"Requested user TXPOWER %d above upper limit %d.\n",
-			 tx_power, priv->tx_power_device_lmt);
-		return -EINVAL;
-	}
-
-	if (!iwl_legacy_is_ready_rf(priv))
-		return -EIO;
-
-	/* scan complete and commit_rxon use tx_power_next value,
-	 * it always need to be updated for newest request */
-	priv->tx_power_next = tx_power;
-
-	/* do not set tx power when scanning or channel changing */
-	defer = test_bit(STATUS_SCANNING, &priv->status) ||
-		memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging));
-	if (defer && !force) {
-		IWL_DEBUG_INFO(priv, "Deferring tx power set\n");
-		return 0;
-	}
-
-	prev_tx_power = priv->tx_power_user_lmt;
-	priv->tx_power_user_lmt = tx_power;
-
-	ret = priv->cfg->ops->lib->send_tx_power(priv);
-
-	/* if fail to set tx_power, restore the orig. tx power */
-	if (ret) {
-		priv->tx_power_user_lmt = prev_tx_power;
-		priv->tx_power_next = prev_tx_power;
-	}
-	return ret;
-}
-EXPORT_SYMBOL(iwl_legacy_set_tx_power);
-
-void iwl_legacy_send_bt_config(struct iwl_priv *priv)
-{
-	struct iwl_bt_cmd bt_cmd = {
-		.lead_time = BT_LEAD_TIME_DEF,
-		.max_kill = BT_MAX_KILL_DEF,
-		.kill_ack_mask = 0,
-		.kill_cts_mask = 0,
-	};
-
-	if (!bt_coex_active)
-		bt_cmd.flags = BT_COEX_DISABLE;
-	else
-		bt_cmd.flags = BT_COEX_ENABLE;
-
-	IWL_DEBUG_INFO(priv, "BT coex %s\n",
-		(bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
-
-	if (iwl_legacy_send_cmd_pdu(priv, REPLY_BT_CONFIG,
-			     sizeof(struct iwl_bt_cmd), &bt_cmd))
-		IWL_ERR(priv, "failed to send BT Coex Config\n");
-}
-EXPORT_SYMBOL(iwl_legacy_send_bt_config);
-
-int iwl_legacy_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
-{
-	struct iwl_statistics_cmd statistics_cmd = {
-		.configuration_flags =
-			clear ? IWL_STATS_CONF_CLEAR_STATS : 0,
-	};
-
-	if (flags & CMD_ASYNC)
-		return iwl_legacy_send_cmd_pdu_async(priv, REPLY_STATISTICS_CMD,
-					sizeof(struct iwl_statistics_cmd),
-					&statistics_cmd, NULL);
-	else
-		return iwl_legacy_send_cmd_pdu(priv, REPLY_STATISTICS_CMD,
-					sizeof(struct iwl_statistics_cmd),
-					&statistics_cmd);
-}
-EXPORT_SYMBOL(iwl_legacy_send_statistics_request);
-
-void iwl_legacy_rx_pm_sleep_notif(struct iwl_priv *priv,
-			   struct iwl_rx_mem_buffer *rxb)
-{
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-	struct iwl_rx_packet *pkt = rxb_addr(rxb);
-	struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
-	IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
-		     sleep->pm_sleep_mode, sleep->pm_wakeup_src);
-#endif
-}
-EXPORT_SYMBOL(iwl_legacy_rx_pm_sleep_notif);
-
-void iwl_legacy_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
-				      struct iwl_rx_mem_buffer *rxb)
-{
-	struct iwl_rx_packet *pkt = rxb_addr(rxb);
-	u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
-	IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
-			"notification for %s:\n", len,
-			iwl_legacy_get_cmd_string(pkt->hdr.cmd));
-	iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, len);
-}
-EXPORT_SYMBOL(iwl_legacy_rx_pm_debug_statistics_notif);
-
-void iwl_legacy_rx_reply_error(struct iwl_priv *priv,
-			struct iwl_rx_mem_buffer *rxb)
-{
-	struct iwl_rx_packet *pkt = rxb_addr(rxb);
-
-	IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) "
-		"seq 0x%04X ser 0x%08X\n",
-		le32_to_cpu(pkt->u.err_resp.error_type),
-		iwl_legacy_get_cmd_string(pkt->u.err_resp.cmd_id),
-		pkt->u.err_resp.cmd_id,
-		le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
-		le32_to_cpu(pkt->u.err_resp.error_info));
-}
-EXPORT_SYMBOL(iwl_legacy_rx_reply_error);
-
-void iwl_legacy_clear_isr_stats(struct iwl_priv *priv)
-{
-	memset(&priv->isr_stats, 0, sizeof(priv->isr_stats));
-}
-
-int iwl_legacy_mac_conf_tx(struct ieee80211_hw *hw,
-			   struct ieee80211_vif *vif, u16 queue,
-			   const struct ieee80211_tx_queue_params *params)
-{
-	struct iwl_priv *priv = hw->priv;
-	struct iwl_rxon_context *ctx;
-	unsigned long flags;
-	int q;
-
-	IWL_DEBUG_MAC80211(priv, "enter\n");
-
-	if (!iwl_legacy_is_ready_rf(priv)) {
-		IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
-		return -EIO;
-	}
-
-	if (queue >= AC_NUM) {
-		IWL_DEBUG_MAC80211(priv, "leave - queue >= AC_NUM %d\n", queue);
-		return 0;
-	}
-
-	q = AC_NUM - 1 - queue;
-
-	spin_lock_irqsave(&priv->lock, flags);
-
-	for_each_context(priv, ctx) {
-		ctx->qos_data.def_qos_parm.ac[q].cw_min =
-			cpu_to_le16(params->cw_min);
-		ctx->qos_data.def_qos_parm.ac[q].cw_max =
-			cpu_to_le16(params->cw_max);
-		ctx->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
-		ctx->qos_data.def_qos_parm.ac[q].edca_txop =
-				cpu_to_le16((params->txop * 32));
-
-		ctx->qos_data.def_qos_parm.ac[q].reserved1 = 0;
-	}
-
-	spin_unlock_irqrestore(&priv->lock, flags);
-
-	IWL_DEBUG_MAC80211(priv, "leave\n");
-	return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_mac_conf_tx);
-
-int iwl_legacy_mac_tx_last_beacon(struct ieee80211_hw *hw)
-{
-	struct iwl_priv *priv = hw->priv;
-
-	return priv->ibss_manager == IWL_IBSS_MANAGER;
-}
-EXPORT_SYMBOL_GPL(iwl_legacy_mac_tx_last_beacon);
-
-static int
-iwl_legacy_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
-	iwl_legacy_connection_init_rx_config(priv, ctx);
-
-	if (priv->cfg->ops->hcmd->set_rxon_chain)
-		priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
-
-	return iwl_legacy_commit_rxon(priv, ctx);
-}
-
-static int iwl_legacy_setup_interface(struct iwl_priv *priv,
-			       struct iwl_rxon_context *ctx)
-{
-	struct ieee80211_vif *vif = ctx->vif;
-	int err;
-
-	lockdep_assert_held(&priv->mutex);
-
-	/*
-	 * This variable will be correct only when there's just
-	 * a single context, but all code using it is for hardware
-	 * that supports only one context.
-	 */
-	priv->iw_mode = vif->type;
-
-	ctx->is_active = true;
-
-	err = iwl_legacy_set_mode(priv, ctx);
-	if (err) {
-		if (!ctx->always_active)
-			ctx->is_active = false;
-		return err;
-	}
-
-	return 0;
-}
-
-int
-iwl_legacy_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
-{
-	struct iwl_priv *priv = hw->priv;
-	struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
-	struct iwl_rxon_context *tmp, *ctx = NULL;
-	int err;
-
-	IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
-			   vif->type, vif->addr);
-
-	mutex_lock(&priv->mutex);
-
-	if (!iwl_legacy_is_ready_rf(priv)) {
-		IWL_WARN(priv, "Try to add interface when device not ready\n");
-		err = -EINVAL;
-		goto out;
-	}
-
-	for_each_context(priv, tmp) {
-		u32 possible_modes =
-			tmp->interface_modes | tmp->exclusive_interface_modes;
-
-		if (tmp->vif) {
-			/* check if this busy context is exclusive */
-			if (tmp->exclusive_interface_modes &
-						BIT(tmp->vif->type)) {
-				err = -EINVAL;
-				goto out;
-			}
-			continue;
-		}
-
-		if (!(possible_modes & BIT(vif->type)))
-			continue;
-
-		/* have maybe usable context w/o interface */
-		ctx = tmp;
-		break;
-	}
-
-	if (!ctx) {
-		err = -EOPNOTSUPP;
-		goto out;
-	}
-
-	vif_priv->ctx = ctx;
-	ctx->vif = vif;
-
-	err = iwl_legacy_setup_interface(priv, ctx);
-	if (!err)
-		goto out;
-
-	ctx->vif = NULL;
-	priv->iw_mode = NL80211_IFTYPE_STATION;
- out:
-	mutex_unlock(&priv->mutex);
-
-	IWL_DEBUG_MAC80211(priv, "leave\n");
-	return err;
-}
-EXPORT_SYMBOL(iwl_legacy_mac_add_interface);
-
-static void iwl_legacy_teardown_interface(struct iwl_priv *priv,
-				   struct ieee80211_vif *vif,
-				   bool mode_change)
-{
-	struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
-
-	lockdep_assert_held(&priv->mutex);
-
-	if (priv->scan_vif == vif) {
-		iwl_legacy_scan_cancel_timeout(priv, 200);
-		iwl_legacy_force_scan_end(priv);
-	}
-
-	if (!mode_change) {
-		iwl_legacy_set_mode(priv, ctx);
-		if (!ctx->always_active)
-			ctx->is_active = false;
-	}
-}
-
-void iwl_legacy_mac_remove_interface(struct ieee80211_hw *hw,
-			      struct ieee80211_vif *vif)
-{
-	struct iwl_priv *priv = hw->priv;
-	struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
-
-	IWL_DEBUG_MAC80211(priv, "enter\n");
-
-	mutex_lock(&priv->mutex);
-
-	WARN_ON(ctx->vif != vif);
-	ctx->vif = NULL;
-
-	iwl_legacy_teardown_interface(priv, vif, false);
-
-	memset(priv->bssid, 0, ETH_ALEN);
-	mutex_unlock(&priv->mutex);
-
-	IWL_DEBUG_MAC80211(priv, "leave\n");
-
-}
-EXPORT_SYMBOL(iwl_legacy_mac_remove_interface);
-
-int iwl_legacy_alloc_txq_mem(struct iwl_priv *priv)
-{
-	if (!priv->txq)
-		priv->txq = kzalloc(
-			sizeof(struct iwl_tx_queue) *
-				priv->cfg->base_params->num_of_queues,
-			GFP_KERNEL);
-	if (!priv->txq) {
-		IWL_ERR(priv, "Not enough memory for txq\n");
-		return -ENOMEM;
-	}
-	return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_alloc_txq_mem);
-
-void iwl_legacy_txq_mem(struct iwl_priv *priv)
-{
-	kfree(priv->txq);
-	priv->txq = NULL;
-}
-EXPORT_SYMBOL(iwl_legacy_txq_mem);
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
-
-#define IWL_TRAFFIC_DUMP_SIZE	(IWL_TRAFFIC_ENTRY_SIZE * IWL_TRAFFIC_ENTRIES)
-
-void iwl_legacy_reset_traffic_log(struct iwl_priv *priv)
-{
-	priv->tx_traffic_idx = 0;
-	priv->rx_traffic_idx = 0;
-	if (priv->tx_traffic)
-		memset(priv->tx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE);
-	if (priv->rx_traffic)
-		memset(priv->rx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE);
-}
-
-int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv)
-{
-	u32 traffic_size = IWL_TRAFFIC_DUMP_SIZE;
-
-	if (iwlegacy_debug_level & IWL_DL_TX) {
-		if (!priv->tx_traffic) {
-			priv->tx_traffic =
-				kzalloc(traffic_size, GFP_KERNEL);
-			if (!priv->tx_traffic)
-				return -ENOMEM;
-		}
-	}
-	if (iwlegacy_debug_level & IWL_DL_RX) {
-		if (!priv->rx_traffic) {
-			priv->rx_traffic =
-				kzalloc(traffic_size, GFP_KERNEL);
-			if (!priv->rx_traffic)
-				return -ENOMEM;
-		}
-	}
-	iwl_legacy_reset_traffic_log(priv);
-	return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_alloc_traffic_mem);
-
-void iwl_legacy_free_traffic_mem(struct iwl_priv *priv)
-{
-	kfree(priv->tx_traffic);
-	priv->tx_traffic = NULL;
-
-	kfree(priv->rx_traffic);
-	priv->rx_traffic = NULL;
-}
-EXPORT_SYMBOL(iwl_legacy_free_traffic_mem);
-
-void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv,
-		      u16 length, struct ieee80211_hdr *header)
-{
-	__le16 fc;
-	u16 len;
-
-	if (likely(!(iwlegacy_debug_level & IWL_DL_TX)))
-		return;
-
-	if (!priv->tx_traffic)
-		return;
-
-	fc = header->frame_control;
-	if (ieee80211_is_data(fc)) {
-		len = (length > IWL_TRAFFIC_ENTRY_SIZE)
-		       ? IWL_TRAFFIC_ENTRY_SIZE : length;
-		memcpy((priv->tx_traffic +
-		       (priv->tx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)),
-		       header, len);
-		priv->tx_traffic_idx =
-			(priv->tx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
-	}
-}
-EXPORT_SYMBOL(iwl_legacy_dbg_log_tx_data_frame);
-
-void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv,
-		      u16 length, struct ieee80211_hdr *header)
-{
-	__le16 fc;
-	u16 len;
-
-	if (likely(!(iwlegacy_debug_level & IWL_DL_RX)))
-		return;
-
-	if (!priv->rx_traffic)
-		return;
-
-	fc = header->frame_control;
-	if (ieee80211_is_data(fc)) {
-		len = (length > IWL_TRAFFIC_ENTRY_SIZE)
-		       ? IWL_TRAFFIC_ENTRY_SIZE : length;
-		memcpy((priv->rx_traffic +
-		       (priv->rx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)),
-		       header, len);
-		priv->rx_traffic_idx =
-			(priv->rx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
-	}
-}
-EXPORT_SYMBOL(iwl_legacy_dbg_log_rx_data_frame);
-
-const char *iwl_legacy_get_mgmt_string(int cmd)
-{
-	switch (cmd) {
-		IWL_CMD(MANAGEMENT_ASSOC_REQ);
-		IWL_CMD(MANAGEMENT_ASSOC_RESP);
-		IWL_CMD(MANAGEMENT_REASSOC_REQ);
-		IWL_CMD(MANAGEMENT_REASSOC_RESP);
-		IWL_CMD(MANAGEMENT_PROBE_REQ);
-		IWL_CMD(MANAGEMENT_PROBE_RESP);
-		IWL_CMD(MANAGEMENT_BEACON);
-		IWL_CMD(MANAGEMENT_ATIM);
-		IWL_CMD(MANAGEMENT_DISASSOC);
-		IWL_CMD(MANAGEMENT_AUTH);
-		IWL_CMD(MANAGEMENT_DEAUTH);
-		IWL_CMD(MANAGEMENT_ACTION);
-	default:
-		return "UNKNOWN";
-
-	}
-}
-
-const char *iwl_legacy_get_ctrl_string(int cmd)
-{
-	switch (cmd) {
-		IWL_CMD(CONTROL_BACK_REQ);
-		IWL_CMD(CONTROL_BACK);
-		IWL_CMD(CONTROL_PSPOLL);
-		IWL_CMD(CONTROL_RTS);
-		IWL_CMD(CONTROL_CTS);
-		IWL_CMD(CONTROL_ACK);
-		IWL_CMD(CONTROL_CFEND);
-		IWL_CMD(CONTROL_CFENDACK);
-	default:
-		return "UNKNOWN";
-
-	}
-}
-
-void iwl_legacy_clear_traffic_stats(struct iwl_priv *priv)
-{
-	memset(&priv->tx_stats, 0, sizeof(struct traffic_stats));
-	memset(&priv->rx_stats, 0, sizeof(struct traffic_stats));
-}
-
-/*
- * if CONFIG_IWLWIFI_LEGACY_DEBUGFS defined,
- * iwl_legacy_update_stats function will
- * record all the MGMT, CTRL and DATA pkt for both TX and Rx pass
- * Use debugFs to display the rx/rx_statistics
- * if CONFIG_IWLWIFI_LEGACY_DEBUGFS not being defined, then no MGMT and CTRL
- * information will be recorded, but DATA pkt still will be recorded
- * for the reason of iwl_led.c need to control the led blinking based on
- * number of tx and rx data.
- *
- */
-void
-iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len)
-{
-	struct traffic_stats	*stats;
-
-	if (is_tx)
-		stats = &priv->tx_stats;
-	else
-		stats = &priv->rx_stats;
-
-	if (ieee80211_is_mgmt(fc)) {
-		switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
-		case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
-			stats->mgmt[MANAGEMENT_ASSOC_REQ]++;
-			break;
-		case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
-			stats->mgmt[MANAGEMENT_ASSOC_RESP]++;
-			break;
-		case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
-			stats->mgmt[MANAGEMENT_REASSOC_REQ]++;
-			break;
-		case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
-			stats->mgmt[MANAGEMENT_REASSOC_RESP]++;
-			break;
-		case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
-			stats->mgmt[MANAGEMENT_PROBE_REQ]++;
-			break;
-		case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
-			stats->mgmt[MANAGEMENT_PROBE_RESP]++;
-			break;
-		case cpu_to_le16(IEEE80211_STYPE_BEACON):
-			stats->mgmt[MANAGEMENT_BEACON]++;
-			break;
-		case cpu_to_le16(IEEE80211_STYPE_ATIM):
-			stats->mgmt[MANAGEMENT_ATIM]++;
-			break;
-		case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
-			stats->mgmt[MANAGEMENT_DISASSOC]++;
-			break;
-		case cpu_to_le16(IEEE80211_STYPE_AUTH):
-			stats->mgmt[MANAGEMENT_AUTH]++;
-			break;
-		case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
-			stats->mgmt[MANAGEMENT_DEAUTH]++;
-			break;
-		case cpu_to_le16(IEEE80211_STYPE_ACTION):
-			stats->mgmt[MANAGEMENT_ACTION]++;
-			break;
-		}
-	} else if (ieee80211_is_ctl(fc)) {
-		switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
-		case cpu_to_le16(IEEE80211_STYPE_BACK_REQ):
-			stats->ctrl[CONTROL_BACK_REQ]++;
-			break;
-		case cpu_to_le16(IEEE80211_STYPE_BACK):
-			stats->ctrl[CONTROL_BACK]++;
-			break;
-		case cpu_to_le16(IEEE80211_STYPE_PSPOLL):
-			stats->ctrl[CONTROL_PSPOLL]++;
-			break;
-		case cpu_to_le16(IEEE80211_STYPE_RTS):
-			stats->ctrl[CONTROL_RTS]++;
-			break;
-		case cpu_to_le16(IEEE80211_STYPE_CTS):
-			stats->ctrl[CONTROL_CTS]++;
-			break;
-		case cpu_to_le16(IEEE80211_STYPE_ACK):
-			stats->ctrl[CONTROL_ACK]++;
-			break;
-		case cpu_to_le16(IEEE80211_STYPE_CFEND):
-			stats->ctrl[CONTROL_CFEND]++;
-			break;
-		case cpu_to_le16(IEEE80211_STYPE_CFENDACK):
-			stats->ctrl[CONTROL_CFENDACK]++;
-			break;
-		}
-	} else {
-		/* data */
-		stats->data_cnt++;
-		stats->data_bytes += len;
-	}
-}
-EXPORT_SYMBOL(iwl_legacy_update_stats);
-#endif
-
-int iwl_legacy_force_reset(struct iwl_priv *priv, bool external)
-{
-	struct iwl_force_reset *force_reset;
-
-	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-		return -EINVAL;
-
-	force_reset = &priv->force_reset;
-	force_reset->reset_request_count++;
-	if (!external) {
-		if (force_reset->last_force_reset_jiffies &&
-		    time_after(force_reset->last_force_reset_jiffies +
-		    force_reset->reset_duration, jiffies)) {
-			IWL_DEBUG_INFO(priv, "force reset rejected\n");
-			force_reset->reset_reject_count++;
-			return -EAGAIN;
-		}
-	}
-	force_reset->reset_success_count++;
-	force_reset->last_force_reset_jiffies = jiffies;
-
-	/*
-	 * if the request is from external(ex: debugfs),
-	 * then always perform the request in regardless the module
-	 * parameter setting
-	 * if the request is from internal (uCode error or driver
-	 * detect failure), then fw_restart module parameter
-	 * need to be check before performing firmware reload
-	 */
-
-	if (!external && !priv->cfg->mod_params->restart_fw) {
-		IWL_DEBUG_INFO(priv, "Cancel firmware reload based on "
-			       "module parameter setting\n");
-		return 0;
-	}
-
-	IWL_ERR(priv, "On demand firmware reload\n");
-
-	/* Set the FW error flag -- cleared on iwl_down */
-	set_bit(STATUS_FW_ERROR, &priv->status);
-	wake_up(&priv->wait_command_queue);
-	/*
-	 * Keep the restart process from trying to send host
-	 * commands by clearing the INIT status bit
-	 */
-	clear_bit(STATUS_READY, &priv->status);
-	queue_work(priv->workqueue, &priv->restart);
-
-	return 0;
-}
-
-int
-iwl_legacy_mac_change_interface(struct ieee80211_hw *hw,
-			struct ieee80211_vif *vif,
-			enum nl80211_iftype newtype, bool newp2p)
-{
-	struct iwl_priv *priv = hw->priv;
-	struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
-	struct iwl_rxon_context *tmp;
-	u32 interface_modes;
-	int err;
-
-	newtype = ieee80211_iftype_p2p(newtype, newp2p);
-
-	mutex_lock(&priv->mutex);
-
-	if (!ctx->vif || !iwl_legacy_is_ready_rf(priv)) {
-		/*
-		 * Huh? But wait ... this can maybe happen when
-		 * we're in the middle of a firmware restart!
-		 */
-		err = -EBUSY;
-		goto out;
-	}
-
-	interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes;
-
-	if (!(interface_modes & BIT(newtype))) {
-		err = -EBUSY;
-		goto out;
-	}
-
-	if (ctx->exclusive_interface_modes & BIT(newtype)) {
-		for_each_context(priv, tmp) {
-			if (ctx == tmp)
-				continue;
-
-			if (!tmp->vif)
-				continue;
-
-			/*
-			 * The current mode switch would be exclusive, but
-			 * another context is active ... refuse the switch.
-			 */
-			err = -EBUSY;
-			goto out;
-		}
-	}
-
-	/* success */
-	iwl_legacy_teardown_interface(priv, vif, true);
-	vif->type = newtype;
-	vif->p2p = newp2p;
-	err = iwl_legacy_setup_interface(priv, ctx);
-	WARN_ON(err);
-	/*
-	 * We've switched internally, but submitting to the
-	 * device may have failed for some reason. Mask this
-	 * error, because otherwise mac80211 will not switch
-	 * (and set the interface type back) and we'll be
-	 * out of sync with it.
-	 */
-	err = 0;
-
- out:
-	mutex_unlock(&priv->mutex);
-	return err;
-}
-EXPORT_SYMBOL(iwl_legacy_mac_change_interface);
-
-/*
- * On every watchdog tick we check (latest) time stamp. If it does not
- * change during timeout period and queue is not empty we reset firmware.
- */
-static int iwl_legacy_check_stuck_queue(struct iwl_priv *priv, int cnt)
-{
-	struct iwl_tx_queue *txq = &priv->txq[cnt];
-	struct iwl_queue *q = &txq->q;
-	unsigned long timeout;
-	int ret;
-
-	if (q->read_ptr == q->write_ptr) {
-		txq->time_stamp = jiffies;
-		return 0;
-	}
-
-	timeout = txq->time_stamp +
-		  msecs_to_jiffies(priv->cfg->base_params->wd_timeout);
-
-	if (time_after(jiffies, timeout)) {
-		IWL_ERR(priv, "Queue %d stuck for %u ms.\n",
-				q->id, priv->cfg->base_params->wd_timeout);
-		ret = iwl_legacy_force_reset(priv, false);
-		return (ret == -EAGAIN) ? 0 : 1;
-	}
-
-	return 0;
-}
-
-/*
- * Making watchdog tick be a quarter of timeout assure we will
- * discover the queue hung between timeout and 1.25*timeout
- */
-#define IWL_WD_TICK(timeout) ((timeout) / 4)
-
-/*
- * Watchdog timer callback, we check each tx queue for stuck, if if hung
- * we reset the firmware. If everything is fine just rearm the timer.
- */
-void iwl_legacy_bg_watchdog(unsigned long data)
-{
-	struct iwl_priv *priv = (struct iwl_priv *)data;
-	int cnt;
-	unsigned long timeout;
-
-	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-		return;
-
-	timeout = priv->cfg->base_params->wd_timeout;
-	if (timeout == 0)
-		return;
-
-	/* monitor and check for stuck cmd queue */
-	if (iwl_legacy_check_stuck_queue(priv, priv->cmd_queue))
-		return;
-
-	/* monitor and check for other stuck queues */
-	if (iwl_legacy_is_any_associated(priv)) {
-		for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
-			/* skip as we already checked the command queue */
-			if (cnt == priv->cmd_queue)
-				continue;
-			if (iwl_legacy_check_stuck_queue(priv, cnt))
-				return;
-		}
-	}
-
-	mod_timer(&priv->watchdog, jiffies +
-		  msecs_to_jiffies(IWL_WD_TICK(timeout)));
-}
-EXPORT_SYMBOL(iwl_legacy_bg_watchdog);
-
-void iwl_legacy_setup_watchdog(struct iwl_priv *priv)
-{
-	unsigned int timeout = priv->cfg->base_params->wd_timeout;
-
-	if (timeout)
-		mod_timer(&priv->watchdog,
-			  jiffies + msecs_to_jiffies(IWL_WD_TICK(timeout)));
-	else
-		del_timer(&priv->watchdog);
-}
-EXPORT_SYMBOL(iwl_legacy_setup_watchdog);
-
-/*
- * extended beacon time format
- * time in usec will be changed into a 32-bit value in extended:internal format
- * the extended part is the beacon counts
- * the internal part is the time in usec within one beacon interval
- */
-u32
-iwl_legacy_usecs_to_beacons(struct iwl_priv *priv,
-					u32 usec, u32 beacon_interval)
-{
-	u32 quot;
-	u32 rem;
-	u32 interval = beacon_interval * TIME_UNIT;
-
-	if (!interval || !usec)
-		return 0;
-
-	quot = (usec / interval) &
-		(iwl_legacy_beacon_time_mask_high(priv,
-		priv->hw_params.beacon_time_tsf_bits) >>
-		priv->hw_params.beacon_time_tsf_bits);
-	rem = (usec % interval) & iwl_legacy_beacon_time_mask_low(priv,
-				   priv->hw_params.beacon_time_tsf_bits);
-
-	return (quot << priv->hw_params.beacon_time_tsf_bits) + rem;
-}
-EXPORT_SYMBOL(iwl_legacy_usecs_to_beacons);
-
-/* base is usually what we get from ucode with each received frame,
- * the same as HW timer counter counting down
- */
-__le32 iwl_legacy_add_beacon_time(struct iwl_priv *priv, u32 base,
-			   u32 addon, u32 beacon_interval)
-{
-	u32 base_low = base & iwl_legacy_beacon_time_mask_low(priv,
-					priv->hw_params.beacon_time_tsf_bits);
-	u32 addon_low = addon & iwl_legacy_beacon_time_mask_low(priv,
-					priv->hw_params.beacon_time_tsf_bits);
-	u32 interval = beacon_interval * TIME_UNIT;
-	u32 res = (base & iwl_legacy_beacon_time_mask_high(priv,
-				priv->hw_params.beacon_time_tsf_bits)) +
-				(addon & iwl_legacy_beacon_time_mask_high(priv,
-				priv->hw_params.beacon_time_tsf_bits));
-
-	if (base_low > addon_low)
-		res += base_low - addon_low;
-	else if (base_low < addon_low) {
-		res += interval + base_low - addon_low;
-		res += (1 << priv->hw_params.beacon_time_tsf_bits);
-	} else
-		res += (1 << priv->hw_params.beacon_time_tsf_bits);
-
-	return cpu_to_le32(res);
-}
-EXPORT_SYMBOL(iwl_legacy_add_beacon_time);
-
-#ifdef CONFIG_PM
-
-int iwl_legacy_pci_suspend(struct device *device)
-{
-	struct pci_dev *pdev = to_pci_dev(device);
-	struct iwl_priv *priv = pci_get_drvdata(pdev);
-
-	/*
-	 * This function is called when system goes into suspend state
-	 * mac80211 will call iwl_mac_stop() from the mac80211 suspend function
-	 * first but since iwl_mac_stop() has no knowledge of who the caller is,
-	 * it will not call apm_ops.stop() to stop the DMA operation.
-	 * Calling apm_ops.stop here to make sure we stop the DMA.
-	 */
-	iwl_legacy_apm_stop(priv);
-
-	return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_pci_suspend);
-
-int iwl_legacy_pci_resume(struct device *device)
-{
-	struct pci_dev *pdev = to_pci_dev(device);
-	struct iwl_priv *priv = pci_get_drvdata(pdev);
-	bool hw_rfkill = false;
-
-	/*
-	 * We disable the RETRY_TIMEOUT register (0x41) to keep
-	 * PCI Tx retries from interfering with C3 CPU state.
-	 */
-	pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
-
-	iwl_legacy_enable_interrupts(priv);
-
-	if (!(iwl_read32(priv, CSR_GP_CNTRL) &
-				CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
-		hw_rfkill = true;
-
-	if (hw_rfkill)
-		set_bit(STATUS_RF_KILL_HW, &priv->status);
-	else
-		clear_bit(STATUS_RF_KILL_HW, &priv->status);
-
-	wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rfkill);
-
-	return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_pci_resume);
-
-const struct dev_pm_ops iwl_legacy_pm_ops = {
-	.suspend = iwl_legacy_pci_suspend,
-	.resume = iwl_legacy_pci_resume,
-	.freeze = iwl_legacy_pci_suspend,
-	.thaw = iwl_legacy_pci_resume,
-	.poweroff = iwl_legacy_pci_suspend,
-	.restore = iwl_legacy_pci_resume,
-};
-EXPORT_SYMBOL(iwl_legacy_pm_ops);
-
-#endif /* CONFIG_PM */
-
-static void
-iwl_legacy_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
-	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-		return;
-
-	if (!ctx->is_active)
-		return;
-
-	ctx->qos_data.def_qos_parm.qos_flags = 0;
-
-	if (ctx->qos_data.qos_active)
-		ctx->qos_data.def_qos_parm.qos_flags |=
-			QOS_PARAM_FLG_UPDATE_EDCA_MSK;
-
-	if (ctx->ht.enabled)
-		ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
-
-	IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
-		      ctx->qos_data.qos_active,
-		      ctx->qos_data.def_qos_parm.qos_flags);
-
-	iwl_legacy_send_cmd_pdu_async(priv, ctx->qos_cmd,
-			       sizeof(struct iwl_qosparam_cmd),
-			       &ctx->qos_data.def_qos_parm, NULL);
-}
-
-/**
- * iwl_legacy_mac_config - mac80211 config callback
- */
-int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed)
-{
-	struct iwl_priv *priv = hw->priv;
-	const struct iwl_channel_info *ch_info;
-	struct ieee80211_conf *conf = &hw->conf;
-	struct ieee80211_channel *channel = conf->channel;
-	struct iwl_ht_config *ht_conf = &priv->current_ht_config;
-	struct iwl_rxon_context *ctx;
-	unsigned long flags = 0;
-	int ret = 0;
-	u16 ch;
-	int scan_active = 0;
-	bool ht_changed[NUM_IWL_RXON_CTX] = {};
-
-	if (WARN_ON(!priv->cfg->ops->legacy))
-		return -EOPNOTSUPP;
-
-	mutex_lock(&priv->mutex);
-
-	IWL_DEBUG_MAC80211(priv, "enter to channel %d changed 0x%X\n",
-					channel->hw_value, changed);
-
-	if (unlikely(test_bit(STATUS_SCANNING, &priv->status))) {
-		scan_active = 1;
-		IWL_DEBUG_MAC80211(priv, "scan active\n");
-	}
-
-	if (changed & (IEEE80211_CONF_CHANGE_SMPS |
-		       IEEE80211_CONF_CHANGE_CHANNEL)) {
-		/* mac80211 uses static for non-HT which is what we want */
-		priv->current_ht_config.smps = conf->smps_mode;
-
-		/*
-		 * Recalculate chain counts.
-		 *
-		 * If monitor mode is enabled then mac80211 will
-		 * set up the SM PS mode to OFF if an HT channel is
-		 * configured.
-		 */
-		if (priv->cfg->ops->hcmd->set_rxon_chain)
-			for_each_context(priv, ctx)
-				priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
-	}
-
-	/* during scanning mac80211 will delay channel setting until
-	 * scan finish with changed = 0
-	 */
-	if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
-		if (scan_active)
-			goto set_ch_out;
-
-		ch = channel->hw_value;
-		ch_info = iwl_legacy_get_channel_info(priv, channel->band, ch);
-		if (!iwl_legacy_is_channel_valid(ch_info)) {
-			IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n");
-			ret = -EINVAL;
-			goto set_ch_out;
-		}
-
-		if (priv->iw_mode == NL80211_IFTYPE_ADHOC &&
-		    !iwl_legacy_is_channel_ibss(ch_info)) {
-			IWL_DEBUG_MAC80211(priv, "leave - not IBSS channel\n");
-			ret = -EINVAL;
-			goto set_ch_out;
-		}
-
-		spin_lock_irqsave(&priv->lock, flags);
-
-		for_each_context(priv, ctx) {
-			/* Configure HT40 channels */
-			if (ctx->ht.enabled != conf_is_ht(conf)) {
-				ctx->ht.enabled = conf_is_ht(conf);
-				ht_changed[ctx->ctxid] = true;
-			}
-			if (ctx->ht.enabled) {
-				if (conf_is_ht40_minus(conf)) {
-					ctx->ht.extension_chan_offset =
-					IEEE80211_HT_PARAM_CHA_SEC_BELOW;
-					ctx->ht.is_40mhz = true;
-				} else if (conf_is_ht40_plus(conf)) {
-					ctx->ht.extension_chan_offset =
-					IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
-					ctx->ht.is_40mhz = true;
-				} else {
-					ctx->ht.extension_chan_offset =
-					IEEE80211_HT_PARAM_CHA_SEC_NONE;
-					ctx->ht.is_40mhz = false;
-				}
-			} else
-				ctx->ht.is_40mhz = false;
-
-			/*
-			 * Default to no protection. Protection mode will
-			 * later be set from BSS config in iwl_ht_conf
-			 */
-			ctx->ht.protection =
-					IEEE80211_HT_OP_MODE_PROTECTION_NONE;
-
-			/* if we are switching from ht to 2.4 clear flags
-			 * from any ht related info since 2.4 does not
-			 * support ht */
-			if ((le16_to_cpu(ctx->staging.channel) != ch))
-				ctx->staging.flags = 0;
-
-			iwl_legacy_set_rxon_channel(priv, channel, ctx);
-			iwl_legacy_set_rxon_ht(priv, ht_conf);
-
-			iwl_legacy_set_flags_for_band(priv, ctx, channel->band,
-					       ctx->vif);
-		}
-
-		spin_unlock_irqrestore(&priv->lock, flags);
-
-		if (priv->cfg->ops->legacy->update_bcast_stations)
-			ret =
-			priv->cfg->ops->legacy->update_bcast_stations(priv);
-
- set_ch_out:
-		/* The list of supported rates and rate mask can be different
-		 * for each band; since the band may have changed, reset
-		 * the rate mask to what mac80211 lists */
-		iwl_legacy_set_rate(priv);
-	}
-
-	if (changed & (IEEE80211_CONF_CHANGE_PS |
-			IEEE80211_CONF_CHANGE_IDLE)) {
-		ret = iwl_legacy_power_update_mode(priv, false);
-		if (ret)
-			IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n");
-	}
-
-	if (changed & IEEE80211_CONF_CHANGE_POWER) {
-		IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n",
-			priv->tx_power_user_lmt, conf->power_level);
-
-		iwl_legacy_set_tx_power(priv, conf->power_level, false);
-	}
-
-	if (!iwl_legacy_is_ready(priv)) {
-		IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
-		goto out;
-	}
-
-	if (scan_active)
-		goto out;
-
-	for_each_context(priv, ctx) {
-		if (memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging)))
-			iwl_legacy_commit_rxon(priv, ctx);
-		else
-			IWL_DEBUG_INFO(priv,
-				"Not re-sending same RXON configuration.\n");
-		if (ht_changed[ctx->ctxid])
-			iwl_legacy_update_qos(priv, ctx);
-	}
-
-out:
-	IWL_DEBUG_MAC80211(priv, "leave\n");
-	mutex_unlock(&priv->mutex);
-	return ret;
-}
-EXPORT_SYMBOL(iwl_legacy_mac_config);
-
-void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw,
-			      struct ieee80211_vif *vif)
-{
-	struct iwl_priv *priv = hw->priv;
-	unsigned long flags;
-	/* IBSS can only be the IWL_RXON_CTX_BSS context */
-	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
-	if (WARN_ON(!priv->cfg->ops->legacy))
-		return;
-
-	mutex_lock(&priv->mutex);
-	IWL_DEBUG_MAC80211(priv, "enter\n");
-
-	spin_lock_irqsave(&priv->lock, flags);
-	memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_config));
-	spin_unlock_irqrestore(&priv->lock, flags);
-
-	spin_lock_irqsave(&priv->lock, flags);
-
-	/* new association get rid of ibss beacon skb */
-	if (priv->beacon_skb)
-		dev_kfree_skb(priv->beacon_skb);
-
-	priv->beacon_skb = NULL;
-
-	priv->timestamp = 0;
-
-	spin_unlock_irqrestore(&priv->lock, flags);
-
-	iwl_legacy_scan_cancel_timeout(priv, 100);
-	if (!iwl_legacy_is_ready_rf(priv)) {
-		IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
-		mutex_unlock(&priv->mutex);
-		return;
-	}
-
-	/* we are restarting association process
-	 * clear RXON_FILTER_ASSOC_MSK bit
-	 */
-	ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
-	iwl_legacy_commit_rxon(priv, ctx);
-
-	iwl_legacy_set_rate(priv);
-
-	mutex_unlock(&priv->mutex);
-
-	IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-EXPORT_SYMBOL(iwl_legacy_mac_reset_tsf);
-
-static void iwl_legacy_ht_conf(struct iwl_priv *priv,
-			struct ieee80211_vif *vif)
-{
-	struct iwl_ht_config *ht_conf = &priv->current_ht_config;
-	struct ieee80211_sta *sta;
-	struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
-	struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
-
-	IWL_DEBUG_ASSOC(priv, "enter:\n");
-
-	if (!ctx->ht.enabled)
-		return;
-
-	ctx->ht.protection =
-		bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
-	ctx->ht.non_gf_sta_present =
-		!!(bss_conf->ht_operation_mode &
-				IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
-
-	ht_conf->single_chain_sufficient = false;
-
-	switch (vif->type) {
-	case NL80211_IFTYPE_STATION:
-		rcu_read_lock();
-		sta = ieee80211_find_sta(vif, bss_conf->bssid);
-		if (sta) {
-			struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
-			int maxstreams;
-
-			maxstreams = (ht_cap->mcs.tx_params &
-			      IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
-				>> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
-			maxstreams += 1;
-
-			if ((ht_cap->mcs.rx_mask[1] == 0) &&
-			    (ht_cap->mcs.rx_mask[2] == 0))
-				ht_conf->single_chain_sufficient = true;
-			if (maxstreams <= 1)
-				ht_conf->single_chain_sufficient = true;
-		} else {
-			/*
-			 * If at all, this can only happen through a race
-			 * when the AP disconnects us while we're still
-			 * setting up the connection, in that case mac80211
-			 * will soon tell us about that.
-			 */
-			ht_conf->single_chain_sufficient = true;
-		}
-		rcu_read_unlock();
-		break;
-	case NL80211_IFTYPE_ADHOC:
-		ht_conf->single_chain_sufficient = true;
-		break;
-	default:
-		break;
-	}
-
-	IWL_DEBUG_ASSOC(priv, "leave\n");
-}
-
-static inline void iwl_legacy_set_no_assoc(struct iwl_priv *priv,
-				    struct ieee80211_vif *vif)
-{
-	struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
-
-	/*
-	 * inform the ucode that there is no longer an
-	 * association and that no more packets should be
-	 * sent
-	 */
-	ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
-	ctx->staging.assoc_id = 0;
-	iwl_legacy_commit_rxon(priv, ctx);
-}
-
-static void iwl_legacy_beacon_update(struct ieee80211_hw *hw,
-				  struct ieee80211_vif *vif)
-{
-	struct iwl_priv *priv = hw->priv;
-	unsigned long flags;
-	__le64 timestamp;
-	struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
-
-	if (!skb)
-		return;
-
-	IWL_DEBUG_MAC80211(priv, "enter\n");
-
-	lockdep_assert_held(&priv->mutex);
-
-	if (!priv->beacon_ctx) {
-		IWL_ERR(priv, "update beacon but no beacon context!\n");
-		dev_kfree_skb(skb);
-		return;
-	}
-
-	spin_lock_irqsave(&priv->lock, flags);
-
-	if (priv->beacon_skb)
-		dev_kfree_skb(priv->beacon_skb);
-
-	priv->beacon_skb = skb;
-
-	timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
-	priv->timestamp = le64_to_cpu(timestamp);
-
-	IWL_DEBUG_MAC80211(priv, "leave\n");
-	spin_unlock_irqrestore(&priv->lock, flags);
-
-	if (!iwl_legacy_is_ready_rf(priv)) {
-		IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
-		return;
-	}
-
-	priv->cfg->ops->legacy->post_associate(priv);
-}
-
-void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw,
-				     struct ieee80211_vif *vif,
-				     struct ieee80211_bss_conf *bss_conf,
-				     u32 changes)
-{
-	struct iwl_priv *priv = hw->priv;
-	struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
-	int ret;
-
-	if (WARN_ON(!priv->cfg->ops->legacy))
-		return;
-
-	IWL_DEBUG_MAC80211(priv, "changes = 0x%X\n", changes);
-
-	mutex_lock(&priv->mutex);
-
-	if (!iwl_legacy_is_alive(priv)) {
-		mutex_unlock(&priv->mutex);
-		return;
-	}
-
-	if (changes & BSS_CHANGED_QOS) {
-		unsigned long flags;
-
-		spin_lock_irqsave(&priv->lock, flags);
-		ctx->qos_data.qos_active = bss_conf->qos;
-		iwl_legacy_update_qos(priv, ctx);
-		spin_unlock_irqrestore(&priv->lock, flags);
-	}
-
-	if (changes & BSS_CHANGED_BEACON_ENABLED) {
-		/*
-		 * the add_interface code must make sure we only ever
-		 * have a single interface that could be beaconing at
-		 * any time.
-		 */
-		if (vif->bss_conf.enable_beacon)
-			priv->beacon_ctx = ctx;
-		else
-			priv->beacon_ctx = NULL;
-	}
-
-	if (changes & BSS_CHANGED_BSSID) {
-		IWL_DEBUG_MAC80211(priv, "BSSID %pM\n", bss_conf->bssid);
-
-		/*
-		 * If there is currently a HW scan going on in the
-		 * background then we need to cancel it else the RXON
-		 * below/in post_associate will fail.
-		 */
-		if (iwl_legacy_scan_cancel_timeout(priv, 100)) {
-			IWL_WARN(priv,
-				"Aborted scan still in progress after 100ms\n");
-			IWL_DEBUG_MAC80211(priv,
-				"leaving - scan abort failed.\n");
-			mutex_unlock(&priv->mutex);
-			return;
-		}
-
-		/* mac80211 only sets assoc when in STATION mode */
-		if (vif->type == NL80211_IFTYPE_ADHOC || bss_conf->assoc) {
-			memcpy(ctx->staging.bssid_addr,
-			       bss_conf->bssid, ETH_ALEN);
-
-			/* currently needed in a few places */
-			memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
-		} else {
-			ctx->staging.filter_flags &=
-				~RXON_FILTER_ASSOC_MSK;
-		}
-
-	}
-
-	/*
-	 * This needs to be after setting the BSSID in case
-	 * mac80211 decides to do both changes at once because
-	 * it will invoke post_associate.
-	 */
-	if (vif->type == NL80211_IFTYPE_ADHOC && changes & BSS_CHANGED_BEACON)
-		iwl_legacy_beacon_update(hw, vif);
-
-	if (changes & BSS_CHANGED_ERP_PREAMBLE) {
-		IWL_DEBUG_MAC80211(priv, "ERP_PREAMBLE %d\n",
-				   bss_conf->use_short_preamble);
-		if (bss_conf->use_short_preamble)
-			ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
-		else
-			ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
-	}
-
-	if (changes & BSS_CHANGED_ERP_CTS_PROT) {
-		IWL_DEBUG_MAC80211(priv,
-			"ERP_CTS %d\n", bss_conf->use_cts_prot);
-		if (bss_conf->use_cts_prot &&
-			(priv->band != IEEE80211_BAND_5GHZ))
-			ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
-		else
-			ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
-		if (bss_conf->use_cts_prot)
-			ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
-		else
-			ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
-	}
-
-	if (changes & BSS_CHANGED_BASIC_RATES) {
-		/* XXX use this information
-		 *
-		 * To do that, remove code from iwl_legacy_set_rate() and put something
-		 * like this here:
-		 *
-		if (A-band)
-			ctx->staging.ofdm_basic_rates =
-				bss_conf->basic_rates;
-		else
-			ctx->staging.ofdm_basic_rates =
-				bss_conf->basic_rates >> 4;
-			ctx->staging.cck_basic_rates =
-				bss_conf->basic_rates & 0xF;
-		 */
-	}
-
-	if (changes & BSS_CHANGED_HT) {
-		iwl_legacy_ht_conf(priv, vif);
-
-		if (priv->cfg->ops->hcmd->set_rxon_chain)
-			priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
-	}
-
-	if (changes & BSS_CHANGED_ASSOC) {
-		IWL_DEBUG_MAC80211(priv, "ASSOC %d\n", bss_conf->assoc);
-		if (bss_conf->assoc) {
-			priv->timestamp = bss_conf->timestamp;
-
-			if (!iwl_legacy_is_rfkill(priv))
-				priv->cfg->ops->legacy->post_associate(priv);
-		} else
-			iwl_legacy_set_no_assoc(priv, vif);
-	}
-
-	if (changes && iwl_legacy_is_associated_ctx(ctx) && bss_conf->aid) {
-		IWL_DEBUG_MAC80211(priv, "Changes (%#x) while associated\n",
-				   changes);
-		ret = iwl_legacy_send_rxon_assoc(priv, ctx);
-		if (!ret) {
-			/* Sync active_rxon with latest change. */
-			memcpy((void *)&ctx->active,
-				&ctx->staging,
-				sizeof(struct iwl_legacy_rxon_cmd));
-		}
-	}
-
-	if (changes & BSS_CHANGED_BEACON_ENABLED) {
-		if (vif->bss_conf.enable_beacon) {
-			memcpy(ctx->staging.bssid_addr,
-			       bss_conf->bssid, ETH_ALEN);
-			memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
-			priv->cfg->ops->legacy->config_ap(priv);
-		} else
-			iwl_legacy_set_no_assoc(priv, vif);
-	}
-
-	if (changes & BSS_CHANGED_IBSS) {
-		ret = priv->cfg->ops->legacy->manage_ibss_station(priv, vif,
-							bss_conf->ibss_joined);
-		if (ret)
-			IWL_ERR(priv, "failed to %s IBSS station %pM\n",
-				bss_conf->ibss_joined ? "add" : "remove",
-				bss_conf->bssid);
-	}
-
-	mutex_unlock(&priv->mutex);
-
-	IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-EXPORT_SYMBOL(iwl_legacy_mac_bss_info_changed);
-
-irqreturn_t iwl_legacy_isr(int irq, void *data)
-{
-	struct iwl_priv *priv = data;
-	u32 inta, inta_mask;
-	u32 inta_fh;
-	unsigned long flags;
-	if (!priv)
-		return IRQ_NONE;
-
-	spin_lock_irqsave(&priv->lock, flags);
-
-	/* Disable (but don't clear!) interrupts here to avoid
-	 *    back-to-back ISRs and sporadic interrupts from our NIC.
-	 * If we have something to service, the tasklet will re-enable ints.
-	 * If we *don't* have something, we'll re-enable before leaving here. */
-	inta_mask = iwl_read32(priv, CSR_INT_MASK);  /* just for debug */
-	iwl_write32(priv, CSR_INT_MASK, 0x00000000);
-
-	/* Discover which interrupts are active/pending */
-	inta = iwl_read32(priv, CSR_INT);
-	inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
-
-	/* Ignore interrupt if there's nothing in NIC to service.
-	 * This may be due to IRQ shared with another device,
-	 * or due to sporadic interrupts thrown from our NIC. */
-	if (!inta && !inta_fh) {
-		IWL_DEBUG_ISR(priv,
-			"Ignore interrupt, inta == 0, inta_fh == 0\n");
-		goto none;
-	}
-
-	if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
-		/* Hardware disappeared. It might have already raised
-		 * an interrupt */
-		IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
-		goto unplugged;
-	}
-
-	IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
-		      inta, inta_mask, inta_fh);
-
-	inta &= ~CSR_INT_BIT_SCD;
-
-	/* iwl_irq_tasklet() will service interrupts and re-enable them */
-	if (likely(inta || inta_fh))
-		tasklet_schedule(&priv->irq_tasklet);
-
-unplugged:
-	spin_unlock_irqrestore(&priv->lock, flags);
-	return IRQ_HANDLED;
-
-none:
-	/* re-enable interrupts here since we don't have anything to service. */
-	/* only Re-enable if disabled by irq */
-	if (test_bit(STATUS_INT_ENABLED, &priv->status))
-		iwl_legacy_enable_interrupts(priv);
-	spin_unlock_irqrestore(&priv->lock, flags);
-	return IRQ_NONE;
-}
-EXPORT_SYMBOL(iwl_legacy_isr);
-
-/*
- *  iwl_legacy_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this
- *  function.
- */
-void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv,
-			       struct ieee80211_tx_info *info,
-			       __le16 fc, __le32 *tx_flags)
-{
-	if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
-		*tx_flags |= TX_CMD_FLG_RTS_MSK;
-		*tx_flags &= ~TX_CMD_FLG_CTS_MSK;
-		*tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
-
-		if (!ieee80211_is_mgmt(fc))
-			return;
-
-		switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
-		case cpu_to_le16(IEEE80211_STYPE_AUTH):
-		case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
-		case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
-		case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
-			*tx_flags &= ~TX_CMD_FLG_RTS_MSK;
-			*tx_flags |= TX_CMD_FLG_CTS_MSK;
-			break;
-		}
-	} else if (info->control.rates[0].flags &
-		   IEEE80211_TX_RC_USE_CTS_PROTECT) {
-		*tx_flags &= ~TX_CMD_FLG_RTS_MSK;
-		*tx_flags |= TX_CMD_FLG_CTS_MSK;
-		*tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
-	}
-}
-EXPORT_SYMBOL(iwl_legacy_tx_cmd_protection);
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.h b/drivers/net/wireless/iwlegacy/iwl-core.h
deleted file mode 100644
index d1271fe..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-core.h
+++ /dev/null
@@ -1,636 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license.  When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- *  * Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *  * Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *  * Neither the name Intel Corporation nor the names of its
- *    contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *****************************************************************************/
-
-#ifndef __iwl_legacy_core_h__
-#define __iwl_legacy_core_h__
-
-/************************
- * forward declarations *
- ************************/
-struct iwl_host_cmd;
-struct iwl_cmd;
-
-
-#define IWLWIFI_VERSION "in-tree:"
-#define DRV_COPYRIGHT	"Copyright(c) 2003-2011 Intel Corporation"
-#define DRV_AUTHOR     "<ilw@linux.intel.com>"
-
-#define IWL_PCI_DEVICE(dev, subdev, cfg) \
-	.vendor = PCI_VENDOR_ID_INTEL,  .device = (dev), \
-	.subvendor = PCI_ANY_ID, .subdevice = (subdev), \
-	.driver_data = (kernel_ulong_t)&(cfg)
-
-#define TIME_UNIT		1024
-
-#define IWL_SKU_G       0x1
-#define IWL_SKU_A       0x2
-#define IWL_SKU_N       0x8
-
-#define IWL_CMD(x) case x: return #x
-
-struct iwl_hcmd_ops {
-	int (*rxon_assoc)(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
-	int (*commit_rxon)(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
-	void (*set_rxon_chain)(struct iwl_priv *priv,
-			       struct iwl_rxon_context *ctx);
-};
-
-struct iwl_hcmd_utils_ops {
-	u16 (*get_hcmd_size)(u8 cmd_id, u16 len);
-	u16 (*build_addsta_hcmd)(const struct iwl_legacy_addsta_cmd *cmd,
-								u8 *data);
-	int (*request_scan)(struct iwl_priv *priv, struct ieee80211_vif *vif);
-	void (*post_scan)(struct iwl_priv *priv);
-};
-
-struct iwl_apm_ops {
-	int (*init)(struct iwl_priv *priv);
-	void (*config)(struct iwl_priv *priv);
-};
-
-struct iwl_debugfs_ops {
-	ssize_t (*rx_stats_read)(struct file *file, char __user *user_buf,
-				 size_t count, loff_t *ppos);
-	ssize_t (*tx_stats_read)(struct file *file, char __user *user_buf,
-				 size_t count, loff_t *ppos);
-	ssize_t (*general_stats_read)(struct file *file, char __user *user_buf,
-				      size_t count, loff_t *ppos);
-};
-
-struct iwl_temp_ops {
-	void (*temperature)(struct iwl_priv *priv);
-};
-
-struct iwl_lib_ops {
-	/* set hw dependent parameters */
-	int (*set_hw_params)(struct iwl_priv *priv);
-	/* Handling TX */
-	void (*txq_update_byte_cnt_tbl)(struct iwl_priv *priv,
-					struct iwl_tx_queue *txq,
-					u16 byte_cnt);
-	int (*txq_attach_buf_to_tfd)(struct iwl_priv *priv,
-				     struct iwl_tx_queue *txq,
-				     dma_addr_t addr,
-				     u16 len, u8 reset, u8 pad);
-	void (*txq_free_tfd)(struct iwl_priv *priv,
-			     struct iwl_tx_queue *txq);
-	int (*txq_init)(struct iwl_priv *priv,
-			struct iwl_tx_queue *txq);
-	/* setup Rx handler */
-	void (*rx_handler_setup)(struct iwl_priv *priv);
-	/* alive notification after init uCode load */
-	void (*init_alive_start)(struct iwl_priv *priv);
-	/* check validity of rtc data address */
-	int (*is_valid_rtc_data_addr)(u32 addr);
-	/* 1st ucode load */
-	int (*load_ucode)(struct iwl_priv *priv);
-
-	void (*dump_nic_error_log)(struct iwl_priv *priv);
-	int (*dump_fh)(struct iwl_priv *priv, char **buf, bool display);
-	int (*set_channel_switch)(struct iwl_priv *priv,
-				  struct ieee80211_channel_switch *ch_switch);
-	/* power management */
-	struct iwl_apm_ops apm_ops;
-
-	/* power */
-	int (*send_tx_power) (struct iwl_priv *priv);
-	void (*update_chain_flags)(struct iwl_priv *priv);
-
-	/* eeprom operations (as defined in iwl-eeprom.h) */
-	struct iwl_eeprom_ops eeprom_ops;
-
-	/* temperature */
-	struct iwl_temp_ops temp_ops;
-
-	struct iwl_debugfs_ops debugfs_ops;
-
-};
-
-struct iwl_led_ops {
-	int (*cmd)(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd);
-};
-
-struct iwl_legacy_ops {
-	void (*post_associate)(struct iwl_priv *priv);
-	void (*config_ap)(struct iwl_priv *priv);
-	/* station management */
-	int (*update_bcast_stations)(struct iwl_priv *priv);
-	int (*manage_ibss_station)(struct iwl_priv *priv,
-				   struct ieee80211_vif *vif, bool add);
-};
-
-struct iwl_ops {
-	const struct iwl_lib_ops *lib;
-	const struct iwl_hcmd_ops *hcmd;
-	const struct iwl_hcmd_utils_ops *utils;
-	const struct iwl_led_ops *led;
-	const struct iwl_nic_ops *nic;
-	const struct iwl_legacy_ops *legacy;
-	const struct ieee80211_ops *ieee80211_ops;
-};
-
-struct iwl_mod_params {
-	int sw_crypto;		/* def: 0 = using hardware encryption */
-	int disable_hw_scan;	/* def: 0 = use h/w scan */
-	int num_of_queues;	/* def: HW dependent */
-	int disable_11n;	/* def: 0 = 11n capabilities enabled */
-	int amsdu_size_8K;	/* def: 1 = enable 8K amsdu size */
-	int antenna;  		/* def: 0 = both antennas (use diversity) */
-	int restart_fw;		/* def: 1 = restart firmware */
-};
-
-/*
- * @led_compensation: compensate on the led on/off time per HW according
- *	to the deviation to achieve the desired led frequency.
- *	The detail algorithm is described in iwl-led.c
- * @chain_noise_num_beacons: number of beacons used to compute chain noise
- * @wd_timeout: TX queues watchdog timeout
- * @temperature_kelvin: temperature report by uCode in kelvin
- * @ucode_tracing: support ucode continuous tracing
- * @sensitivity_calib_by_driver: driver has the capability to perform
- *	sensitivity calibration operation
- * @chain_noise_calib_by_driver: driver has the capability to perform
- *	chain noise calibration operation
- */
-struct iwl_base_params {
-	int eeprom_size;
-	int num_of_queues;	/* def: HW dependent */
-	int num_of_ampdu_queues;/* def: HW dependent */
-	/* for iwl_legacy_apm_init() */
-	u32 pll_cfg_val;
-	bool set_l0s;
-	bool use_bsm;
-
-	u16 led_compensation;
-	int chain_noise_num_beacons;
-	unsigned int wd_timeout;
-	bool temperature_kelvin;
-	const bool ucode_tracing;
-	const bool sensitivity_calib_by_driver;
-	const bool chain_noise_calib_by_driver;
-};
-
-/**
- * struct iwl_cfg
- * @fw_name_pre: Firmware filename prefix. The api version and extension
- *	(.ucode) will be added to filename before loading from disk. The
- *	filename is constructed as fw_name_pre<api>.ucode.
- * @ucode_api_max: Highest version of uCode API supported by driver.
- * @ucode_api_min: Lowest version of uCode API supported by driver.
- * @scan_antennas: available antenna for scan operation
- * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
- *
- * We enable the driver to be backward compatible wrt API version. The
- * driver specifies which APIs it supports (with @ucode_api_max being the
- * highest and @ucode_api_min the lowest). Firmware will only be loaded if
- * it has a supported API version. The firmware's API version will be
- * stored in @iwl_priv, enabling the driver to make runtime changes based
- * on firmware version used.
- *
- * For example,
- * if (IWL_UCODE_API(priv->ucode_ver) >= 2) {
- *	Driver interacts with Firmware API version >= 2.
- * } else {
- *	Driver interacts with Firmware API version 1.
- * }
- *
- * The ideal usage of this infrastructure is to treat a new ucode API
- * release as a new hardware revision. That is, through utilizing the
- * iwl_hcmd_utils_ops etc. we accommodate different command structures
- * and flows between hardware versions as well as their API
- * versions.
- *
- */
-struct iwl_cfg {
-	/* params specific to an individual device within a device family */
-	const char *name;
-	const char *fw_name_pre;
-	const unsigned int ucode_api_max;
-	const unsigned int ucode_api_min;
-	u8   valid_tx_ant;
-	u8   valid_rx_ant;
-	unsigned int sku;
-	u16  eeprom_ver;
-	u16  eeprom_calib_ver;
-	const struct iwl_ops *ops;
-	/* module based parameters which can be set from modprobe cmd */
-	const struct iwl_mod_params *mod_params;
-	/* params not likely to change within a device family */
-	struct iwl_base_params *base_params;
-	/* params likely to change within a device family */
-	u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
-	enum iwl_led_mode led_mode;
-};
-
-/***************************
- *   L i b                 *
- ***************************/
-
-struct ieee80211_hw *iwl_legacy_alloc_all(struct iwl_cfg *cfg);
-int iwl_legacy_mac_conf_tx(struct ieee80211_hw *hw,
-		    struct ieee80211_vif *vif, u16 queue,
-		    const struct ieee80211_tx_queue_params *params);
-int iwl_legacy_mac_tx_last_beacon(struct ieee80211_hw *hw);
-void iwl_legacy_set_rxon_hwcrypto(struct iwl_priv *priv,
-			struct iwl_rxon_context *ctx,
-			int hw_decrypt);
-int iwl_legacy_check_rxon_cmd(struct iwl_priv *priv,
-			struct iwl_rxon_context *ctx);
-int iwl_legacy_full_rxon_required(struct iwl_priv *priv,
-			struct iwl_rxon_context *ctx);
-int iwl_legacy_set_rxon_channel(struct iwl_priv *priv,
-			struct ieee80211_channel *ch,
-			struct iwl_rxon_context *ctx);
-void iwl_legacy_set_flags_for_band(struct iwl_priv *priv,
-			    struct iwl_rxon_context *ctx,
-			    enum ieee80211_band band,
-			    struct ieee80211_vif *vif);
-u8 iwl_legacy_get_single_channel_number(struct iwl_priv *priv,
-				  enum ieee80211_band band);
-void iwl_legacy_set_rxon_ht(struct iwl_priv *priv,
-			struct iwl_ht_config *ht_conf);
-bool iwl_legacy_is_ht40_tx_allowed(struct iwl_priv *priv,
-			    struct iwl_rxon_context *ctx,
-			    struct ieee80211_sta_ht_cap *ht_cap);
-void iwl_legacy_connection_init_rx_config(struct iwl_priv *priv,
-				   struct iwl_rxon_context *ctx);
-void iwl_legacy_set_rate(struct iwl_priv *priv);
-int iwl_legacy_set_decrypted_flag(struct iwl_priv *priv,
-			   struct ieee80211_hdr *hdr,
-			   u32 decrypt_res,
-			   struct ieee80211_rx_status *stats);
-void iwl_legacy_irq_handle_error(struct iwl_priv *priv);
-int iwl_legacy_mac_add_interface(struct ieee80211_hw *hw,
-			  struct ieee80211_vif *vif);
-void iwl_legacy_mac_remove_interface(struct ieee80211_hw *hw,
-			      struct ieee80211_vif *vif);
-int iwl_legacy_mac_change_interface(struct ieee80211_hw *hw,
-			     struct ieee80211_vif *vif,
-			     enum nl80211_iftype newtype, bool newp2p);
-int iwl_legacy_alloc_txq_mem(struct iwl_priv *priv);
-void iwl_legacy_txq_mem(struct iwl_priv *priv);
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
-int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv);
-void iwl_legacy_free_traffic_mem(struct iwl_priv *priv);
-void iwl_legacy_reset_traffic_log(struct iwl_priv *priv);
-void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv,
-				u16 length, struct ieee80211_hdr *header);
-void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv,
-				u16 length, struct ieee80211_hdr *header);
-const char *iwl_legacy_get_mgmt_string(int cmd);
-const char *iwl_legacy_get_ctrl_string(int cmd);
-void iwl_legacy_clear_traffic_stats(struct iwl_priv *priv);
-void iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc,
-		      u16 len);
-#else
-static inline int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv)
-{
-	return 0;
-}
-static inline void iwl_legacy_free_traffic_mem(struct iwl_priv *priv)
-{
-}
-static inline void iwl_legacy_reset_traffic_log(struct iwl_priv *priv)
-{
-}
-static inline void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv,
-		      u16 length, struct ieee80211_hdr *header)
-{
-}
-static inline void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv,
-		      u16 length, struct ieee80211_hdr *header)
-{
-}
-static inline void iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx,
-				    __le16 fc, u16 len)
-{
-}
-#endif
-/*****************************************************
- * RX handlers.
- * **************************************************/
-void iwl_legacy_rx_pm_sleep_notif(struct iwl_priv *priv,
-			   struct iwl_rx_mem_buffer *rxb);
-void iwl_legacy_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
-				      struct iwl_rx_mem_buffer *rxb);
-void iwl_legacy_rx_reply_error(struct iwl_priv *priv,
-			struct iwl_rx_mem_buffer *rxb);
-
-/*****************************************************
-* RX
-******************************************************/
-void iwl_legacy_cmd_queue_unmap(struct iwl_priv *priv);
-void iwl_legacy_cmd_queue_free(struct iwl_priv *priv);
-int iwl_legacy_rx_queue_alloc(struct iwl_priv *priv);
-void iwl_legacy_rx_queue_update_write_ptr(struct iwl_priv *priv,
-				  struct iwl_rx_queue *q);
-int iwl_legacy_rx_queue_space(const struct iwl_rx_queue *q);
-void iwl_legacy_tx_cmd_complete(struct iwl_priv *priv,
-				struct iwl_rx_mem_buffer *rxb);
-/* Handlers */
-void iwl_legacy_rx_spectrum_measure_notif(struct iwl_priv *priv,
-					  struct iwl_rx_mem_buffer *rxb);
-void iwl_legacy_recover_from_statistics(struct iwl_priv *priv,
-				struct iwl_rx_packet *pkt);
-void iwl_legacy_chswitch_done(struct iwl_priv *priv, bool is_success);
-void iwl_legacy_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
-
-/* TX helpers */
-
-/*****************************************************
-* TX
-******************************************************/
-void iwl_legacy_txq_update_write_ptr(struct iwl_priv *priv,
-					struct iwl_tx_queue *txq);
-int iwl_legacy_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
-		      int slots_num, u32 txq_id);
-void iwl_legacy_tx_queue_reset(struct iwl_priv *priv,
-			struct iwl_tx_queue *txq,
-			int slots_num, u32 txq_id);
-void iwl_legacy_tx_queue_unmap(struct iwl_priv *priv, int txq_id);
-void iwl_legacy_tx_queue_free(struct iwl_priv *priv, int txq_id);
-void iwl_legacy_setup_watchdog(struct iwl_priv *priv);
-/*****************************************************
- * TX power
- ****************************************************/
-int iwl_legacy_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force);
-
-/*******************************************************************************
- * Rate
- ******************************************************************************/
-
-u8 iwl_legacy_get_lowest_plcp(struct iwl_priv *priv,
-			    struct iwl_rxon_context *ctx);
-
-/*******************************************************************************
- * Scanning
- ******************************************************************************/
-void iwl_legacy_init_scan_params(struct iwl_priv *priv);
-int iwl_legacy_scan_cancel(struct iwl_priv *priv);
-int iwl_legacy_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
-void iwl_legacy_force_scan_end(struct iwl_priv *priv);
-int iwl_legacy_mac_hw_scan(struct ieee80211_hw *hw,
-		    struct ieee80211_vif *vif,
-		    struct cfg80211_scan_request *req);
-void iwl_legacy_internal_short_hw_scan(struct iwl_priv *priv);
-int iwl_legacy_force_reset(struct iwl_priv *priv, bool external);
-u16 iwl_legacy_fill_probe_req(struct iwl_priv *priv,
-			struct ieee80211_mgmt *frame,
-		       const u8 *ta, const u8 *ie, int ie_len, int left);
-void iwl_legacy_setup_rx_scan_handlers(struct iwl_priv *priv);
-u16 iwl_legacy_get_active_dwell_time(struct iwl_priv *priv,
-			      enum ieee80211_band band,
-			      u8 n_probes);
-u16 iwl_legacy_get_passive_dwell_time(struct iwl_priv *priv,
-			       enum ieee80211_band band,
-			       struct ieee80211_vif *vif);
-void iwl_legacy_setup_scan_deferred_work(struct iwl_priv *priv);
-void iwl_legacy_cancel_scan_deferred_work(struct iwl_priv *priv);
-
-/* For faster active scanning, scan will move to the next channel if fewer than
- * PLCP_QUIET_THRESH packets are heard on this channel within
- * ACTIVE_QUIET_TIME after sending probe request.  This shortens the dwell
- * time if it's a quiet channel (nothing responded to our probe, and there's
- * no other traffic).
- * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */
-#define IWL_ACTIVE_QUIET_TIME       cpu_to_le16(10)  /* msec */
-#define IWL_PLCP_QUIET_THRESH       cpu_to_le16(1)  /* packets */
-
-#define IWL_SCAN_CHECK_WATCHDOG		(HZ * 7)
-
-/*****************************************************
- *   S e n d i n g     H o s t     C o m m a n d s   *
- *****************************************************/
-
-const char *iwl_legacy_get_cmd_string(u8 cmd);
-int __must_check iwl_legacy_send_cmd_sync(struct iwl_priv *priv,
-				   struct iwl_host_cmd *cmd);
-int iwl_legacy_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
-int __must_check iwl_legacy_send_cmd_pdu(struct iwl_priv *priv, u8 id,
-				  u16 len, const void *data);
-int iwl_legacy_send_cmd_pdu_async(struct iwl_priv *priv, u8 id, u16 len,
-			   const void *data,
-			   void (*callback)(struct iwl_priv *priv,
-					    struct iwl_device_cmd *cmd,
-					    struct iwl_rx_packet *pkt));
-
-int iwl_legacy_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
-
-
-/*****************************************************
- * PCI						     *
- *****************************************************/
-
-static inline u16 iwl_legacy_pcie_link_ctl(struct iwl_priv *priv)
-{
-	int pos;
-	u16 pci_lnk_ctl;
-	pos = pci_pcie_cap(priv->pci_dev);
-	pci_read_config_word(priv->pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl);
-	return pci_lnk_ctl;
-}
-
-void iwl_legacy_bg_watchdog(unsigned long data);
-u32 iwl_legacy_usecs_to_beacons(struct iwl_priv *priv,
-					u32 usec, u32 beacon_interval);
-__le32 iwl_legacy_add_beacon_time(struct iwl_priv *priv, u32 base,
-			   u32 addon, u32 beacon_interval);
-
-#ifdef CONFIG_PM
-int iwl_legacy_pci_suspend(struct device *device);
-int iwl_legacy_pci_resume(struct device *device);
-extern const struct dev_pm_ops iwl_legacy_pm_ops;
-
-#define IWL_LEGACY_PM_OPS	(&iwl_legacy_pm_ops)
-
-#else /* !CONFIG_PM */
-
-#define IWL_LEGACY_PM_OPS	NULL
-
-#endif /* !CONFIG_PM */
-
-/*****************************************************
-*  Error Handling Debugging
-******************************************************/
-void iwl4965_dump_nic_error_log(struct iwl_priv *priv);
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv,
-			     struct iwl_rxon_context *ctx);
-#else
-static inline void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv,
-					   struct iwl_rxon_context *ctx)
-{
-}
-#endif
-
-void iwl_legacy_clear_isr_stats(struct iwl_priv *priv);
-
-/*****************************************************
-*  GEOS
-******************************************************/
-int iwl_legacy_init_geos(struct iwl_priv *priv);
-void iwl_legacy_free_geos(struct iwl_priv *priv);
-
-/*************** DRIVER STATUS FUNCTIONS   *****/
-
-#define STATUS_HCMD_ACTIVE	0	/* host command in progress */
-/* 1 is unused (used to be STATUS_HCMD_SYNC_ACTIVE) */
-#define STATUS_INT_ENABLED	2
-#define STATUS_RF_KILL_HW	3
-#define STATUS_CT_KILL		4
-#define STATUS_INIT		5
-#define STATUS_ALIVE		6
-#define STATUS_READY		7
-#define STATUS_TEMPERATURE	8
-#define STATUS_GEO_CONFIGURED	9
-#define STATUS_EXIT_PENDING	10
-#define STATUS_STATISTICS	12
-#define STATUS_SCANNING		13
-#define STATUS_SCAN_ABORTING	14
-#define STATUS_SCAN_HW		15
-#define STATUS_POWER_PMI	16
-#define STATUS_FW_ERROR		17
-#define STATUS_CHANNEL_SWITCH_PENDING 18
-
-static inline int iwl_legacy_is_ready(struct iwl_priv *priv)
-{
-	/* The adapter is 'ready' if READY and GEO_CONFIGURED bits are
-	 * set but EXIT_PENDING is not */
-	return test_bit(STATUS_READY, &priv->status) &&
-	       test_bit(STATUS_GEO_CONFIGURED, &priv->status) &&
-	       !test_bit(STATUS_EXIT_PENDING, &priv->status);
-}
-
-static inline int iwl_legacy_is_alive(struct iwl_priv *priv)
-{
-	return test_bit(STATUS_ALIVE, &priv->status);
-}
-
-static inline int iwl_legacy_is_init(struct iwl_priv *priv)
-{
-	return test_bit(STATUS_INIT, &priv->status);
-}
-
-static inline int iwl_legacy_is_rfkill_hw(struct iwl_priv *priv)
-{
-	return test_bit(STATUS_RF_KILL_HW, &priv->status);
-}
-
-static inline int iwl_legacy_is_rfkill(struct iwl_priv *priv)
-{
-	return iwl_legacy_is_rfkill_hw(priv);
-}
-
-static inline int iwl_legacy_is_ctkill(struct iwl_priv *priv)
-{
-	return test_bit(STATUS_CT_KILL, &priv->status);
-}
-
-static inline int iwl_legacy_is_ready_rf(struct iwl_priv *priv)
-{
-
-	if (iwl_legacy_is_rfkill(priv))
-		return 0;
-
-	return iwl_legacy_is_ready(priv);
-}
-
-extern void iwl_legacy_send_bt_config(struct iwl_priv *priv);
-extern int iwl_legacy_send_statistics_request(struct iwl_priv *priv,
-				       u8 flags, bool clear);
-void iwl_legacy_apm_stop(struct iwl_priv *priv);
-int iwl_legacy_apm_init(struct iwl_priv *priv);
-
-int iwl_legacy_send_rxon_timing(struct iwl_priv *priv,
-				struct iwl_rxon_context *ctx);
-static inline int iwl_legacy_send_rxon_assoc(struct iwl_priv *priv,
-				      struct iwl_rxon_context *ctx)
-{
-	return priv->cfg->ops->hcmd->rxon_assoc(priv, ctx);
-}
-static inline int iwl_legacy_commit_rxon(struct iwl_priv *priv,
-				      struct iwl_rxon_context *ctx)
-{
-	return priv->cfg->ops->hcmd->commit_rxon(priv, ctx);
-}
-static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
-			struct iwl_priv *priv, enum ieee80211_band band)
-{
-	return priv->hw->wiphy->bands[band];
-}
-
-/* mac80211 handlers */
-int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed);
-void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw,
-			      struct ieee80211_vif *vif);
-void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw,
-				     struct ieee80211_vif *vif,
-				     struct ieee80211_bss_conf *bss_conf,
-				     u32 changes);
-void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv,
-				struct ieee80211_tx_info *info,
-				__le16 fc, __le32 *tx_flags);
-
-irqreturn_t iwl_legacy_isr(int irq, void *data);
-
-#endif /* __iwl_legacy_core_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-debug.h b/drivers/net/wireless/iwlegacy/iwl-debug.h
deleted file mode 100644
index ae13112..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-debug.h
+++ /dev/null
@@ -1,198 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#ifndef __iwl_legacy_debug_h__
-#define __iwl_legacy_debug_h__
-
-struct iwl_priv;
-extern u32 iwlegacy_debug_level;
-
-#define IWL_ERR(p, f, a...) dev_err(&((p)->pci_dev->dev), f, ## a)
-#define IWL_WARN(p, f, a...) dev_warn(&((p)->pci_dev->dev), f, ## a)
-#define IWL_INFO(p, f, a...) dev_info(&((p)->pci_dev->dev), f, ## a)
-#define IWL_CRIT(p, f, a...) dev_crit(&((p)->pci_dev->dev), f, ## a)
-
-#define iwl_print_hex_error(priv, p, len)				 \
-do {									\
-	print_hex_dump(KERN_ERR, "iwl data: ",				\
-		       DUMP_PREFIX_OFFSET, 16, 1, p, len, 1);		\
-} while (0)
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-#define IWL_DEBUG(__priv, level, fmt, args...)				\
-do {									\
-	if (iwl_legacy_get_debug_level(__priv) & (level))			\
-		dev_printk(KERN_ERR, &(__priv->hw->wiphy->dev),		\
-			 "%c %s " fmt, in_interrupt() ? 'I' : 'U',	\
-			__func__ , ## args);				\
-} while (0)
-
-#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)			\
-do {									\
-	if ((iwl_legacy_get_debug_level(__priv) & (level)) && net_ratelimit())	\
-		dev_printk(KERN_ERR, &(__priv->hw->wiphy->dev),		\
-			"%c %s " fmt, in_interrupt() ? 'I' : 'U',	\
-			 __func__ , ## args);				\
-} while (0)
-
-#define iwl_print_hex_dump(priv, level, p, len) 			\
-do {									\
-	if (iwl_legacy_get_debug_level(priv) & level)				\
-		print_hex_dump(KERN_DEBUG, "iwl data: ",		\
-			       DUMP_PREFIX_OFFSET, 16, 1, p, len, 1);	\
-} while (0)
-
-#else
-#define IWL_DEBUG(__priv, level, fmt, args...)
-#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
-static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
-				      const void *p, u32 len)
-{}
-#endif				/* CONFIG_IWLWIFI_LEGACY_DEBUG */
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
-int iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name);
-void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv);
-#else
-static inline int
-iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name)
-{
-	return 0;
-}
-static inline void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv)
-{
-}
-#endif				/* CONFIG_IWLWIFI_LEGACY_DEBUGFS */
-
-/*
- * To use the debug system:
- *
- * If you are defining a new debug classification, simply add it to the #define
- * list here in the form of
- *
- * #define IWL_DL_xxxx VALUE
- *
- * where xxxx should be the name of the classification (for example, WEP).
- *
- * You then need to either add a IWL_xxxx_DEBUG() macro definition for your
- * classification, or use IWL_DEBUG(IWL_DL_xxxx, ...) whenever you want
- * to send output to that classification.
- *
- * The active debug levels can be accessed via files
- *
- * 	/sys/module/iwl4965/parameters/debug{50}
- *	/sys/module/iwl3945/parameters/debug
- * 	/sys/class/net/wlan0/device/debug_level
- *
- * when CONFIG_IWLWIFI_LEGACY_DEBUG=y.
- */
-
-/* 0x0000000F - 0x00000001 */
-#define IWL_DL_INFO		(1 << 0)
-#define IWL_DL_MAC80211		(1 << 1)
-#define IWL_DL_HCMD		(1 << 2)
-#define IWL_DL_STATE		(1 << 3)
-/* 0x000000F0 - 0x00000010 */
-#define IWL_DL_MACDUMP		(1 << 4)
-#define IWL_DL_HCMD_DUMP	(1 << 5)
-#define IWL_DL_EEPROM		(1 << 6)
-#define IWL_DL_RADIO		(1 << 7)
-/* 0x00000F00 - 0x00000100 */
-#define IWL_DL_POWER		(1 << 8)
-#define IWL_DL_TEMP		(1 << 9)
-#define IWL_DL_NOTIF		(1 << 10)
-#define IWL_DL_SCAN		(1 << 11)
-/* 0x0000F000 - 0x00001000 */
-#define IWL_DL_ASSOC		(1 << 12)
-#define IWL_DL_DROP		(1 << 13)
-#define IWL_DL_TXPOWER		(1 << 14)
-#define IWL_DL_AP		(1 << 15)
-/* 0x000F0000 - 0x00010000 */
-#define IWL_DL_FW		(1 << 16)
-#define IWL_DL_RF_KILL		(1 << 17)
-#define IWL_DL_FW_ERRORS	(1 << 18)
-#define IWL_DL_LED		(1 << 19)
-/* 0x00F00000 - 0x00100000 */
-#define IWL_DL_RATE		(1 << 20)
-#define IWL_DL_CALIB		(1 << 21)
-#define IWL_DL_WEP		(1 << 22)
-#define IWL_DL_TX		(1 << 23)
-/* 0x0F000000 - 0x01000000 */
-#define IWL_DL_RX		(1 << 24)
-#define IWL_DL_ISR		(1 << 25)
-#define IWL_DL_HT		(1 << 26)
-#define IWL_DL_IO		(1 << 27)
-/* 0xF0000000 - 0x10000000 */
-#define IWL_DL_11H		(1 << 28)
-#define IWL_DL_STATS		(1 << 29)
-#define IWL_DL_TX_REPLY		(1 << 30)
-#define IWL_DL_QOS		(1 << 31)
-
-#define IWL_DEBUG_INFO(p, f, a...)	IWL_DEBUG(p, IWL_DL_INFO, f, ## a)
-#define IWL_DEBUG_MAC80211(p, f, a...)	IWL_DEBUG(p, IWL_DL_MAC80211, f, ## a)
-#define IWL_DEBUG_MACDUMP(p, f, a...)	IWL_DEBUG(p, IWL_DL_MACDUMP, f, ## a)
-#define IWL_DEBUG_TEMP(p, f, a...)	IWL_DEBUG(p, IWL_DL_TEMP, f, ## a)
-#define IWL_DEBUG_SCAN(p, f, a...)	IWL_DEBUG(p, IWL_DL_SCAN, f, ## a)
-#define IWL_DEBUG_RX(p, f, a...)	IWL_DEBUG(p, IWL_DL_RX, f, ## a)
-#define IWL_DEBUG_TX(p, f, a...)	IWL_DEBUG(p, IWL_DL_TX, f, ## a)
-#define IWL_DEBUG_ISR(p, f, a...)	IWL_DEBUG(p, IWL_DL_ISR, f, ## a)
-#define IWL_DEBUG_LED(p, f, a...)	IWL_DEBUG(p, IWL_DL_LED, f, ## a)
-#define IWL_DEBUG_WEP(p, f, a...)	IWL_DEBUG(p, IWL_DL_WEP, f, ## a)
-#define IWL_DEBUG_HC(p, f, a...)	IWL_DEBUG(p, IWL_DL_HCMD, f, ## a)
-#define IWL_DEBUG_HC_DUMP(p, f, a...)	IWL_DEBUG(p, IWL_DL_HCMD_DUMP, f, ## a)
-#define IWL_DEBUG_EEPROM(p, f, a...)	IWL_DEBUG(p, IWL_DL_EEPROM, f, ## a)
-#define IWL_DEBUG_CALIB(p, f, a...)	IWL_DEBUG(p, IWL_DL_CALIB, f, ## a)
-#define IWL_DEBUG_FW(p, f, a...)	IWL_DEBUG(p, IWL_DL_FW, f, ## a)
-#define IWL_DEBUG_RF_KILL(p, f, a...)	IWL_DEBUG(p, IWL_DL_RF_KILL, f, ## a)
-#define IWL_DEBUG_DROP(p, f, a...)	IWL_DEBUG(p, IWL_DL_DROP, f, ## a)
-#define IWL_DEBUG_DROP_LIMIT(p, f, a...)	\
-		IWL_DEBUG_LIMIT(p, IWL_DL_DROP, f, ## a)
-#define IWL_DEBUG_AP(p, f, a...)	IWL_DEBUG(p, IWL_DL_AP, f, ## a)
-#define IWL_DEBUG_TXPOWER(p, f, a...)	IWL_DEBUG(p, IWL_DL_TXPOWER, f, ## a)
-#define IWL_DEBUG_IO(p, f, a...)	IWL_DEBUG(p, IWL_DL_IO, f, ## a)
-#define IWL_DEBUG_RATE(p, f, a...)	IWL_DEBUG(p, IWL_DL_RATE, f, ## a)
-#define IWL_DEBUG_RATE_LIMIT(p, f, a...)	\
-		IWL_DEBUG_LIMIT(p, IWL_DL_RATE, f, ## a)
-#define IWL_DEBUG_NOTIF(p, f, a...)	IWL_DEBUG(p, IWL_DL_NOTIF, f, ## a)
-#define IWL_DEBUG_ASSOC(p, f, a...)	\
-		IWL_DEBUG(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a)
-#define IWL_DEBUG_ASSOC_LIMIT(p, f, a...)	\
-		IWL_DEBUG_LIMIT(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a)
-#define IWL_DEBUG_HT(p, f, a...)	IWL_DEBUG(p, IWL_DL_HT, f, ## a)
-#define IWL_DEBUG_STATS(p, f, a...)	IWL_DEBUG(p, IWL_DL_STATS, f, ## a)
-#define IWL_DEBUG_STATS_LIMIT(p, f, a...)	\
-		IWL_DEBUG_LIMIT(p, IWL_DL_STATS, f, ## a)
-#define IWL_DEBUG_TX_REPLY(p, f, a...)	IWL_DEBUG(p, IWL_DL_TX_REPLY, f, ## a)
-#define IWL_DEBUG_TX_REPLY_LIMIT(p, f, a...) \
-		IWL_DEBUG_LIMIT(p, IWL_DL_TX_REPLY, f, ## a)
-#define IWL_DEBUG_QOS(p, f, a...)	IWL_DEBUG(p, IWL_DL_QOS, f, ## a)
-#define IWL_DEBUG_RADIO(p, f, a...)	IWL_DEBUG(p, IWL_DL_RADIO, f, ## a)
-#define IWL_DEBUG_POWER(p, f, a...)	IWL_DEBUG(p, IWL_DL_POWER, f, ## a)
-#define IWL_DEBUG_11H(p, f, a...)	IWL_DEBUG(p, IWL_DL_11H, f, ## a)
-
-#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-debugfs.c b/drivers/net/wireless/iwlegacy/iwl-debugfs.c
deleted file mode 100644
index 1407dca..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-debugfs.c
+++ /dev/null
@@ -1,1314 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *****************************************************************************/
-#include <linux/ieee80211.h>
-#include <linux/export.h>
-#include <net/mac80211.h>
-
-
-#include "iwl-dev.h"
-#include "iwl-debug.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-
-/* create and remove of files */
-#define DEBUGFS_ADD_FILE(name, parent, mode) do {			\
-	if (!debugfs_create_file(#name, mode, parent, priv,		\
-			 &iwl_legacy_dbgfs_##name##_ops))		\
-		goto err;						\
-} while (0)
-
-#define DEBUGFS_ADD_BOOL(name, parent, ptr) do {			\
-	struct dentry *__tmp;						\
-	__tmp = debugfs_create_bool(#name, S_IWUSR | S_IRUSR,		\
-				    parent, ptr);			\
-	if (IS_ERR(__tmp) || !__tmp)					\
-		goto err;						\
-} while (0)
-
-#define DEBUGFS_ADD_X32(name, parent, ptr) do {				\
-	struct dentry *__tmp;						\
-	__tmp = debugfs_create_x32(#name, S_IWUSR | S_IRUSR,		\
-				   parent, ptr);			\
-	if (IS_ERR(__tmp) || !__tmp)					\
-		goto err;						\
-} while (0)
-
-/* file operation */
-#define DEBUGFS_READ_FUNC(name)                                         \
-static ssize_t iwl_legacy_dbgfs_##name##_read(struct file *file,               \
-					char __user *user_buf,          \
-					size_t count, loff_t *ppos);
-
-#define DEBUGFS_WRITE_FUNC(name)                                        \
-static ssize_t iwl_legacy_dbgfs_##name##_write(struct file *file,              \
-					const char __user *user_buf,    \
-					size_t count, loff_t *ppos);
-
-
-static int
-iwl_legacy_dbgfs_open_file_generic(struct inode *inode, struct file *file)
-{
-	file->private_data = inode->i_private;
-	return 0;
-}
-
-#define DEBUGFS_READ_FILE_OPS(name) 				\
-	DEBUGFS_READ_FUNC(name);                                        \
-static const struct file_operations iwl_legacy_dbgfs_##name##_ops = {	\
-	.read = iwl_legacy_dbgfs_##name##_read,				\
-	.open = iwl_legacy_dbgfs_open_file_generic,                    	\
-	.llseek = generic_file_llseek,					\
-};
-
-#define DEBUGFS_WRITE_FILE_OPS(name) 				\
-	DEBUGFS_WRITE_FUNC(name);                                       \
-static const struct file_operations iwl_legacy_dbgfs_##name##_ops = {	\
-	.write = iwl_legacy_dbgfs_##name##_write,			\
-	.open = iwl_legacy_dbgfs_open_file_generic,                    	\
-	.llseek = generic_file_llseek,					\
-};
-
-#define DEBUGFS_READ_WRITE_FILE_OPS(name)                           \
-	DEBUGFS_READ_FUNC(name);                                        \
-	DEBUGFS_WRITE_FUNC(name);                                       \
-static const struct file_operations iwl_legacy_dbgfs_##name##_ops = {	\
-	.write = iwl_legacy_dbgfs_##name##_write,			\
-	.read = iwl_legacy_dbgfs_##name##_read,				\
-	.open = iwl_legacy_dbgfs_open_file_generic,			\
-	.llseek = generic_file_llseek,					\
-};
-
-static ssize_t iwl_legacy_dbgfs_tx_statistics_read(struct file *file,
-						char __user *user_buf,
-						size_t count, loff_t *ppos) {
-
-	struct iwl_priv *priv = file->private_data;
-	char *buf;
-	int pos = 0;
-
-	int cnt;
-	ssize_t ret;
-	const size_t bufsz = 100 +
-		sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
-	buf = kzalloc(bufsz, GFP_KERNEL);
-	if (!buf)
-		return -ENOMEM;
-	pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
-	for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
-		pos += scnprintf(buf + pos, bufsz - pos,
-				 "\t%25s\t\t: %u\n",
-				 iwl_legacy_get_mgmt_string(cnt),
-				 priv->tx_stats.mgmt[cnt]);
-	}
-	pos += scnprintf(buf + pos, bufsz - pos, "Control\n");
-	for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
-		pos += scnprintf(buf + pos, bufsz - pos,
-				 "\t%25s\t\t: %u\n",
-				 iwl_legacy_get_ctrl_string(cnt),
-				 priv->tx_stats.ctrl[cnt]);
-	}
-	pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
-	pos += scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
-			 priv->tx_stats.data_cnt);
-	pos += scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
-			 priv->tx_stats.data_bytes);
-	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-	kfree(buf);
-	return ret;
-}
-
-static ssize_t
-iwl_legacy_dbgfs_clear_traffic_statistics_write(struct file *file,
-					const char __user *user_buf,
-					size_t count, loff_t *ppos)
-{
-	struct iwl_priv *priv = file->private_data;
-	u32 clear_flag;
-	char buf[8];
-	int buf_size;
-
-	memset(buf, 0, sizeof(buf));
-	buf_size = min(count, sizeof(buf) -  1);
-	if (copy_from_user(buf, user_buf, buf_size))
-		return -EFAULT;
-	if (sscanf(buf, "%x", &clear_flag) != 1)
-		return -EFAULT;
-	iwl_legacy_clear_traffic_stats(priv);
-
-	return count;
-}
-
-static ssize_t iwl_legacy_dbgfs_rx_statistics_read(struct file *file,
-						char __user *user_buf,
-						size_t count, loff_t *ppos) {
-
-	struct iwl_priv *priv = file->private_data;
-	char *buf;
-	int pos = 0;
-	int cnt;
-	ssize_t ret;
-	const size_t bufsz = 100 +
-		sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
-	buf = kzalloc(bufsz, GFP_KERNEL);
-	if (!buf)
-		return -ENOMEM;
-
-	pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
-	for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
-		pos += scnprintf(buf + pos, bufsz - pos,
-				 "\t%25s\t\t: %u\n",
-				 iwl_legacy_get_mgmt_string(cnt),
-				 priv->rx_stats.mgmt[cnt]);
-	}
-	pos += scnprintf(buf + pos, bufsz - pos, "Control:\n");
-	for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
-		pos += scnprintf(buf + pos, bufsz - pos,
-				 "\t%25s\t\t: %u\n",
-				 iwl_legacy_get_ctrl_string(cnt),
-				 priv->rx_stats.ctrl[cnt]);
-	}
-	pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
-	pos += scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
-			 priv->rx_stats.data_cnt);
-	pos += scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
-			 priv->rx_stats.data_bytes);
-
-	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-	kfree(buf);
-	return ret;
-}
-
-#define BYTE1_MASK 0x000000ff;
-#define BYTE2_MASK 0x0000ffff;
-#define BYTE3_MASK 0x00ffffff;
-static ssize_t iwl_legacy_dbgfs_sram_read(struct file *file,
-					char __user *user_buf,
-					size_t count, loff_t *ppos)
-{
-	u32 val;
-	char *buf;
-	ssize_t ret;
-	int i;
-	int pos = 0;
-	struct iwl_priv *priv = file->private_data;
-	size_t bufsz;
-
-	/* default is to dump the entire data segment */
-	if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) {
-		priv->dbgfs_sram_offset = 0x800000;
-		if (priv->ucode_type == UCODE_INIT)
-			priv->dbgfs_sram_len = priv->ucode_init_data.len;
-		else
-			priv->dbgfs_sram_len = priv->ucode_data.len;
-	}
-	bufsz =  30 + priv->dbgfs_sram_len * sizeof(char) * 10;
-	buf = kmalloc(bufsz, GFP_KERNEL);
-	if (!buf)
-		return -ENOMEM;
-	pos += scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n",
-			priv->dbgfs_sram_len);
-	pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n",
-			priv->dbgfs_sram_offset);
-	for (i = priv->dbgfs_sram_len; i > 0; i -= 4) {
-		val = iwl_legacy_read_targ_mem(priv, priv->dbgfs_sram_offset + \
-					priv->dbgfs_sram_len - i);
-		if (i < 4) {
-			switch (i) {
-			case 1:
-				val &= BYTE1_MASK;
-				break;
-			case 2:
-				val &= BYTE2_MASK;
-				break;
-			case 3:
-				val &= BYTE3_MASK;
-				break;
-			}
-		}
-		if (!(i % 16))
-			pos += scnprintf(buf + pos, bufsz - pos, "\n");
-		pos += scnprintf(buf + pos, bufsz - pos, "0x%08x ", val);
-	}
-	pos += scnprintf(buf + pos, bufsz - pos, "\n");
-
-	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-	kfree(buf);
-	return ret;
-}
-
-static ssize_t iwl_legacy_dbgfs_sram_write(struct file *file,
-					const char __user *user_buf,
-					size_t count, loff_t *ppos)
-{
-	struct iwl_priv *priv = file->private_data;
-	char buf[64];
-	int buf_size;
-	u32 offset, len;
-
-	memset(buf, 0, sizeof(buf));
-	buf_size = min(count, sizeof(buf) -  1);
-	if (copy_from_user(buf, user_buf, buf_size))
-		return -EFAULT;
-
-	if (sscanf(buf, "%x,%x", &offset, &len) == 2) {
-		priv->dbgfs_sram_offset = offset;
-		priv->dbgfs_sram_len = len;
-	} else {
-		priv->dbgfs_sram_offset = 0;
-		priv->dbgfs_sram_len = 0;
-	}
-
-	return count;
-}
-
-static ssize_t
-iwl_legacy_dbgfs_stations_read(struct file *file, char __user *user_buf,
-					size_t count, loff_t *ppos)
-{
-	struct iwl_priv *priv = file->private_data;
-	struct iwl_station_entry *station;
-	int max_sta = priv->hw_params.max_stations;
-	char *buf;
-	int i, j, pos = 0;
-	ssize_t ret;
-	/* Add 30 for initial string */
-	const size_t bufsz = 30 + sizeof(char) * 500 * (priv->num_stations);
-
-	buf = kmalloc(bufsz, GFP_KERNEL);
-	if (!buf)
-		return -ENOMEM;
-
-	pos += scnprintf(buf + pos, bufsz - pos, "num of stations: %d\n\n",
-			priv->num_stations);
-
-	for (i = 0; i < max_sta; i++) {
-		station = &priv->stations[i];
-		if (!station->used)
-			continue;
-		pos += scnprintf(buf + pos, bufsz - pos,
-				 "station %d - addr: %pM, flags: %#x\n",
-				 i, station->sta.sta.addr,
-				 station->sta.station_flags_msk);
-		pos += scnprintf(buf + pos, bufsz - pos,
-				"TID\tseq_num\ttxq_id\tframes\ttfds\t");
-		pos += scnprintf(buf + pos, bufsz - pos,
-				"start_idx\tbitmap\t\t\trate_n_flags\n");
-
-		for (j = 0; j < MAX_TID_COUNT; j++) {
-			pos += scnprintf(buf + pos, bufsz - pos,
-				"%d:\t%#x\t%#x\t%u\t%u\t%u\t\t%#.16llx\t%#x",
-				j, station->tid[j].seq_number,
-				station->tid[j].agg.txq_id,
-				station->tid[j].agg.frame_count,
-				station->tid[j].tfds_in_queue,
-				station->tid[j].agg.start_idx,
-				station->tid[j].agg.bitmap,
-				station->tid[j].agg.rate_n_flags);
-
-			if (station->tid[j].agg.wait_for_ba)
-				pos += scnprintf(buf + pos, bufsz - pos,
-						 " - waitforba");
-			pos += scnprintf(buf + pos, bufsz - pos, "\n");
-		}
-
-		pos += scnprintf(buf + pos, bufsz - pos, "\n");
-	}
-
-	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-	kfree(buf);
-	return ret;
-}
-
-static ssize_t iwl_legacy_dbgfs_nvm_read(struct file *file,
-				       char __user *user_buf,
-				       size_t count,
-				       loff_t *ppos)
-{
-	ssize_t ret;
-	struct iwl_priv *priv = file->private_data;
-	int pos = 0, ofs = 0, buf_size = 0;
-	const u8 *ptr;
-	char *buf;
-	u16 eeprom_ver;
-	size_t eeprom_len = priv->cfg->base_params->eeprom_size;
-	buf_size = 4 * eeprom_len + 256;
-
-	if (eeprom_len % 16) {
-		IWL_ERR(priv, "NVM size is not multiple of 16.\n");
-		return -ENODATA;
-	}
-
-	ptr = priv->eeprom;
-	if (!ptr) {
-		IWL_ERR(priv, "Invalid EEPROM memory\n");
-		return -ENOMEM;
-	}
-
-	/* 4 characters for byte 0xYY */
-	buf = kzalloc(buf_size, GFP_KERNEL);
-	if (!buf) {
-		IWL_ERR(priv, "Can not allocate Buffer\n");
-		return -ENOMEM;
-	}
-	eeprom_ver = iwl_legacy_eeprom_query16(priv, EEPROM_VERSION);
-	pos += scnprintf(buf + pos, buf_size - pos, "EEPROM "
-			"version: 0x%x\n", eeprom_ver);
-	for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) {
-		pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
-		hex_dump_to_buffer(ptr + ofs, 16 , 16, 2, buf + pos,
-				   buf_size - pos, 0);
-		pos += strlen(buf + pos);
-		if (buf_size - pos > 0)
-			buf[pos++] = '\n';
-	}
-
-	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-	kfree(buf);
-	return ret;
-}
-
-static ssize_t
-iwl_legacy_dbgfs_channels_read(struct file *file, char __user *user_buf,
-				       size_t count, loff_t *ppos)
-{
-	struct iwl_priv *priv = file->private_data;
-	struct ieee80211_channel *channels = NULL;
-	const struct ieee80211_supported_band *supp_band = NULL;
-	int pos = 0, i, bufsz = PAGE_SIZE;
-	char *buf;
-	ssize_t ret;
-
-	if (!test_bit(STATUS_GEO_CONFIGURED, &priv->status))
-		return -EAGAIN;
-
-	buf = kzalloc(bufsz, GFP_KERNEL);
-	if (!buf) {
-		IWL_ERR(priv, "Can not allocate Buffer\n");
-		return -ENOMEM;
-	}
-
-	supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_2GHZ);
-	if (supp_band) {
-		channels = supp_band->channels;
-
-		pos += scnprintf(buf + pos, bufsz - pos,
-				"Displaying %d channels in 2.4GHz band 802.11bg):\n",
-				supp_band->n_channels);
-
-		for (i = 0; i < supp_band->n_channels; i++)
-			pos += scnprintf(buf + pos, bufsz - pos,
-				"%d: %ddBm: BSS%s%s, %s.\n",
-				channels[i].hw_value,
-				channels[i].max_power,
-				channels[i].flags & IEEE80211_CHAN_RADAR ?
-				" (IEEE 802.11h required)" : "",
-				((channels[i].flags & IEEE80211_CHAN_NO_IBSS)
-				|| (channels[i].flags &
-				IEEE80211_CHAN_RADAR)) ? "" :
-				", IBSS",
-				channels[i].flags &
-				IEEE80211_CHAN_PASSIVE_SCAN ?
-				"passive only" : "active/passive");
-	}
-	supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_5GHZ);
-	if (supp_band) {
-		channels = supp_band->channels;
-
-		pos += scnprintf(buf + pos, bufsz - pos,
-				"Displaying %d channels in 5.2GHz band (802.11a)\n",
-				supp_band->n_channels);
-
-		for (i = 0; i < supp_band->n_channels; i++)
-			pos += scnprintf(buf + pos, bufsz - pos,
-				"%d: %ddBm: BSS%s%s, %s.\n",
-				channels[i].hw_value,
-				channels[i].max_power,
-				channels[i].flags & IEEE80211_CHAN_RADAR ?
-				" (IEEE 802.11h required)" : "",
-				((channels[i].flags & IEEE80211_CHAN_NO_IBSS)
-				|| (channels[i].flags &
-				IEEE80211_CHAN_RADAR)) ? "" :
-				", IBSS",
-				channels[i].flags &
-				IEEE80211_CHAN_PASSIVE_SCAN ?
-				"passive only" : "active/passive");
-	}
-	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-	kfree(buf);
-	return ret;
-}
-
-static ssize_t iwl_legacy_dbgfs_status_read(struct file *file,
-						char __user *user_buf,
-						size_t count, loff_t *ppos) {
-
-	struct iwl_priv *priv = file->private_data;
-	char buf[512];
-	int pos = 0;
-	const size_t bufsz = sizeof(buf);
-
-	pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
-		test_bit(STATUS_HCMD_ACTIVE, &priv->status));
-	pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
-		test_bit(STATUS_INT_ENABLED, &priv->status));
-	pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_HW:\t %d\n",
-		test_bit(STATUS_RF_KILL_HW, &priv->status));
-	pos += scnprintf(buf + pos, bufsz - pos, "STATUS_CT_KILL:\t\t %d\n",
-		test_bit(STATUS_CT_KILL, &priv->status));
-	pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INIT:\t\t %d\n",
-		test_bit(STATUS_INIT, &priv->status));
-	pos += scnprintf(buf + pos, bufsz - pos, "STATUS_ALIVE:\t\t %d\n",
-		test_bit(STATUS_ALIVE, &priv->status));
-	pos += scnprintf(buf + pos, bufsz - pos, "STATUS_READY:\t\t %d\n",
-		test_bit(STATUS_READY, &priv->status));
-	pos += scnprintf(buf + pos, bufsz - pos, "STATUS_TEMPERATURE:\t %d\n",
-		test_bit(STATUS_TEMPERATURE, &priv->status));
-	pos += scnprintf(buf + pos, bufsz - pos, "STATUS_GEO_CONFIGURED:\t %d\n",
-		test_bit(STATUS_GEO_CONFIGURED, &priv->status));
-	pos += scnprintf(buf + pos, bufsz - pos, "STATUS_EXIT_PENDING:\t %d\n",
-		test_bit(STATUS_EXIT_PENDING, &priv->status));
-	pos += scnprintf(buf + pos, bufsz - pos, "STATUS_STATISTICS:\t %d\n",
-		test_bit(STATUS_STATISTICS, &priv->status));
-	pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCANNING:\t %d\n",
-		test_bit(STATUS_SCANNING, &priv->status));
-	pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_ABORTING:\t %d\n",
-		test_bit(STATUS_SCAN_ABORTING, &priv->status));
-	pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_HW:\t\t %d\n",
-		test_bit(STATUS_SCAN_HW, &priv->status));
-	pos += scnprintf(buf + pos, bufsz - pos, "STATUS_POWER_PMI:\t %d\n",
-		test_bit(STATUS_POWER_PMI, &priv->status));
-	pos += scnprintf(buf + pos, bufsz - pos, "STATUS_FW_ERROR:\t %d\n",
-		test_bit(STATUS_FW_ERROR, &priv->status));
-	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
-static ssize_t iwl_legacy_dbgfs_interrupt_read(struct file *file,
-					char __user *user_buf,
-					size_t count, loff_t *ppos) {
-
-	struct iwl_priv *priv = file->private_data;
-	int pos = 0;
-	int cnt = 0;
-	char *buf;
-	int bufsz = 24 * 64; /* 24 items * 64 char per item */
-	ssize_t ret;
-
-	buf = kzalloc(bufsz, GFP_KERNEL);
-	if (!buf) {
-		IWL_ERR(priv, "Can not allocate Buffer\n");
-		return -ENOMEM;
-	}
-
-	pos += scnprintf(buf + pos, bufsz - pos,
-			"Interrupt Statistics Report:\n");
-
-	pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
-		priv->isr_stats.hw);
-	pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
-		priv->isr_stats.sw);
-	if (priv->isr_stats.sw || priv->isr_stats.hw) {
-		pos += scnprintf(buf + pos, bufsz - pos,
-			"\tLast Restarting Code:  0x%X\n",
-			priv->isr_stats.err_code);
-	}
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-	pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
-		priv->isr_stats.sch);
-	pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
-		priv->isr_stats.alive);
-#endif
-	pos += scnprintf(buf + pos, bufsz - pos,
-		"HW RF KILL switch toggled:\t %u\n",
-		priv->isr_stats.rfkill);
-
-	pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
-		priv->isr_stats.ctkill);
-
-	pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
-		priv->isr_stats.wakeup);
-
-	pos += scnprintf(buf + pos, bufsz - pos,
-		"Rx command responses:\t\t %u\n",
-		priv->isr_stats.rx);
-	for (cnt = 0; cnt < REPLY_MAX; cnt++) {
-		if (priv->isr_stats.rx_handlers[cnt] > 0)
-			pos += scnprintf(buf + pos, bufsz - pos,
-				"\tRx handler[%36s]:\t\t %u\n",
-				iwl_legacy_get_cmd_string(cnt),
-				priv->isr_stats.rx_handlers[cnt]);
-	}
-
-	pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
-		priv->isr_stats.tx);
-
-	pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
-		priv->isr_stats.unhandled);
-
-	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-	kfree(buf);
-	return ret;
-}
-
-static ssize_t iwl_legacy_dbgfs_interrupt_write(struct file *file,
-					 const char __user *user_buf,
-					 size_t count, loff_t *ppos)
-{
-	struct iwl_priv *priv = file->private_data;
-	char buf[8];
-	int buf_size;
-	u32 reset_flag;
-
-	memset(buf, 0, sizeof(buf));
-	buf_size = min(count, sizeof(buf) -  1);
-	if (copy_from_user(buf, user_buf, buf_size))
-		return -EFAULT;
-	if (sscanf(buf, "%x", &reset_flag) != 1)
-		return -EFAULT;
-	if (reset_flag == 0)
-		iwl_legacy_clear_isr_stats(priv);
-
-	return count;
-}
-
-static ssize_t
-iwl_legacy_dbgfs_qos_read(struct file *file, char __user *user_buf,
-				       size_t count, loff_t *ppos)
-{
-	struct iwl_priv *priv = file->private_data;
-	struct iwl_rxon_context *ctx;
-	int pos = 0, i;
-	char buf[256 * NUM_IWL_RXON_CTX];
-	const size_t bufsz = sizeof(buf);
-
-	for_each_context(priv, ctx) {
-		pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n",
-				 ctx->ctxid);
-		for (i = 0; i < AC_NUM; i++) {
-			pos += scnprintf(buf + pos, bufsz - pos,
-				"\tcw_min\tcw_max\taifsn\ttxop\n");
-			pos += scnprintf(buf + pos, bufsz - pos,
-				"AC[%d]\t%u\t%u\t%u\t%u\n", i,
-				ctx->qos_data.def_qos_parm.ac[i].cw_min,
-				ctx->qos_data.def_qos_parm.ac[i].cw_max,
-				ctx->qos_data.def_qos_parm.ac[i].aifsn,
-				ctx->qos_data.def_qos_parm.ac[i].edca_txop);
-		}
-		pos += scnprintf(buf + pos, bufsz - pos, "\n");
-	}
-	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
-static ssize_t iwl_legacy_dbgfs_disable_ht40_write(struct file *file,
-					 const char __user *user_buf,
-					 size_t count, loff_t *ppos)
-{
-	struct iwl_priv *priv = file->private_data;
-	char buf[8];
-	int buf_size;
-	int ht40;
-
-	memset(buf, 0, sizeof(buf));
-	buf_size = min(count, sizeof(buf) -  1);
-	if (copy_from_user(buf, user_buf, buf_size))
-		return -EFAULT;
-	if (sscanf(buf, "%d", &ht40) != 1)
-		return -EFAULT;
-	if (!iwl_legacy_is_any_associated(priv))
-		priv->disable_ht40 = ht40 ? true : false;
-	else {
-		IWL_ERR(priv, "Sta associated with AP - "
-			"Change to 40MHz channel support is not allowed\n");
-		return -EINVAL;
-	}
-
-	return count;
-}
-
-static ssize_t iwl_legacy_dbgfs_disable_ht40_read(struct file *file,
-					 char __user *user_buf,
-					 size_t count, loff_t *ppos)
-{
-	struct iwl_priv *priv = file->private_data;
-	char buf[100];
-	int pos = 0;
-	const size_t bufsz = sizeof(buf);
-
-	pos += scnprintf(buf + pos, bufsz - pos,
-			"11n 40MHz Mode: %s\n",
-			priv->disable_ht40 ? "Disabled" : "Enabled");
-	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
-DEBUGFS_READ_WRITE_FILE_OPS(sram);
-DEBUGFS_READ_FILE_OPS(nvm);
-DEBUGFS_READ_FILE_OPS(stations);
-DEBUGFS_READ_FILE_OPS(channels);
-DEBUGFS_READ_FILE_OPS(status);
-DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
-DEBUGFS_READ_FILE_OPS(qos);
-DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40);
-
-static ssize_t iwl_legacy_dbgfs_traffic_log_read(struct file *file,
-					 char __user *user_buf,
-					 size_t count, loff_t *ppos)
-{
-	struct iwl_priv *priv = file->private_data;
-	int pos = 0, ofs = 0;
-	int cnt = 0, entry;
-	struct iwl_tx_queue *txq;
-	struct iwl_queue *q;
-	struct iwl_rx_queue *rxq = &priv->rxq;
-	char *buf;
-	int bufsz = ((IWL_TRAFFIC_ENTRIES * IWL_TRAFFIC_ENTRY_SIZE * 64) * 2) +
-		(priv->cfg->base_params->num_of_queues * 32 * 8) + 400;
-	const u8 *ptr;
-	ssize_t ret;
-
-	if (!priv->txq) {
-		IWL_ERR(priv, "txq not ready\n");
-		return -EAGAIN;
-	}
-	buf = kzalloc(bufsz, GFP_KERNEL);
-	if (!buf) {
-		IWL_ERR(priv, "Can not allocate buffer\n");
-		return -ENOMEM;
-	}
-	pos += scnprintf(buf + pos, bufsz - pos, "Tx Queue\n");
-	for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
-		txq = &priv->txq[cnt];
-		q = &txq->q;
-		pos += scnprintf(buf + pos, bufsz - pos,
-				"q[%d]: read_ptr: %u, write_ptr: %u\n",
-				cnt, q->read_ptr, q->write_ptr);
-	}
-	if (priv->tx_traffic && (iwlegacy_debug_level & IWL_DL_TX)) {
-		ptr = priv->tx_traffic;
-		pos += scnprintf(buf + pos, bufsz - pos,
-				"Tx Traffic idx: %u\n",	priv->tx_traffic_idx);
-		for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
-			for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
-			     entry++,  ofs += 16) {
-				pos += scnprintf(buf + pos, bufsz - pos,
-						"0x%.4x ", ofs);
-				hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
-						   buf + pos, bufsz - pos, 0);
-				pos += strlen(buf + pos);
-				if (bufsz - pos > 0)
-					buf[pos++] = '\n';
-			}
-		}
-	}
-
-	pos += scnprintf(buf + pos, bufsz - pos, "Rx Queue\n");
-	pos += scnprintf(buf + pos, bufsz - pos,
-			"read: %u, write: %u\n",
-			 rxq->read, rxq->write);
-
-	if (priv->rx_traffic && (iwlegacy_debug_level & IWL_DL_RX)) {
-		ptr = priv->rx_traffic;
-		pos += scnprintf(buf + pos, bufsz - pos,
-				"Rx Traffic idx: %u\n",	priv->rx_traffic_idx);
-		for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
-			for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
-			     entry++,  ofs += 16) {
-				pos += scnprintf(buf + pos, bufsz - pos,
-						"0x%.4x ", ofs);
-				hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
-						   buf + pos, bufsz - pos, 0);
-				pos += strlen(buf + pos);
-				if (bufsz - pos > 0)
-					buf[pos++] = '\n';
-			}
-		}
-	}
-
-	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-	kfree(buf);
-	return ret;
-}
-
-static ssize_t iwl_legacy_dbgfs_traffic_log_write(struct file *file,
-					 const char __user *user_buf,
-					 size_t count, loff_t *ppos)
-{
-	struct iwl_priv *priv = file->private_data;
-	char buf[8];
-	int buf_size;
-	int traffic_log;
-
-	memset(buf, 0, sizeof(buf));
-	buf_size = min(count, sizeof(buf) -  1);
-	if (copy_from_user(buf, user_buf, buf_size))
-		return -EFAULT;
-	if (sscanf(buf, "%d", &traffic_log) != 1)
-		return -EFAULT;
-	if (traffic_log == 0)
-		iwl_legacy_reset_traffic_log(priv);
-
-	return count;
-}
-
-static ssize_t iwl_legacy_dbgfs_tx_queue_read(struct file *file,
-						char __user *user_buf,
-						size_t count, loff_t *ppos) {
-
-	struct iwl_priv *priv = file->private_data;
-	struct iwl_tx_queue *txq;
-	struct iwl_queue *q;
-	char *buf;
-	int pos = 0;
-	int cnt;
-	int ret;
-	const size_t bufsz = sizeof(char) * 64 *
-				priv->cfg->base_params->num_of_queues;
-
-	if (!priv->txq) {
-		IWL_ERR(priv, "txq not ready\n");
-		return -EAGAIN;
-	}
-	buf = kzalloc(bufsz, GFP_KERNEL);
-	if (!buf)
-		return -ENOMEM;
-
-	for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
-		txq = &priv->txq[cnt];
-		q = &txq->q;
-		pos += scnprintf(buf + pos, bufsz - pos,
-				"hwq %.2d: read=%u write=%u stop=%d"
-				" swq_id=%#.2x (ac %d/hwq %d)\n",
-				cnt, q->read_ptr, q->write_ptr,
-				!!test_bit(cnt, priv->queue_stopped),
-				txq->swq_id, txq->swq_id & 3,
-				(txq->swq_id >> 2) & 0x1f);
-		if (cnt >= 4)
-			continue;
-		/* for the ACs, display the stop count too */
-		pos += scnprintf(buf + pos, bufsz - pos,
-				"        stop-count: %d\n",
-				atomic_read(&priv->queue_stop_count[cnt]));
-	}
-	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-	kfree(buf);
-	return ret;
-}
-
-static ssize_t iwl_legacy_dbgfs_rx_queue_read(struct file *file,
-						char __user *user_buf,
-						size_t count, loff_t *ppos) {
-
-	struct iwl_priv *priv = file->private_data;
-	struct iwl_rx_queue *rxq = &priv->rxq;
-	char buf[256];
-	int pos = 0;
-	const size_t bufsz = sizeof(buf);
-
-	pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
-						rxq->read);
-	pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
-						rxq->write);
-	pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
-						rxq->free_count);
-	if (rxq->rb_stts) {
-		pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
-			 le16_to_cpu(rxq->rb_stts->closed_rb_num) &  0x0FFF);
-	} else {
-		pos += scnprintf(buf + pos, bufsz - pos,
-					"closed_rb_num: Not Allocated\n");
-	}
-	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
-static ssize_t iwl_legacy_dbgfs_ucode_rx_stats_read(struct file *file,
-					char __user *user_buf,
-					size_t count, loff_t *ppos)
-{
-	struct iwl_priv *priv = file->private_data;
-	return priv->cfg->ops->lib->debugfs_ops.rx_stats_read(file,
-			user_buf, count, ppos);
-}
-
-static ssize_t iwl_legacy_dbgfs_ucode_tx_stats_read(struct file *file,
-					char __user *user_buf,
-					size_t count, loff_t *ppos)
-{
-	struct iwl_priv *priv = file->private_data;
-	return priv->cfg->ops->lib->debugfs_ops.tx_stats_read(file,
-			user_buf, count, ppos);
-}
-
-static ssize_t iwl_legacy_dbgfs_ucode_general_stats_read(struct file *file,
-					char __user *user_buf,
-					size_t count, loff_t *ppos)
-{
-	struct iwl_priv *priv = file->private_data;
-	return priv->cfg->ops->lib->debugfs_ops.general_stats_read(file,
-			user_buf, count, ppos);
-}
-
-static ssize_t iwl_legacy_dbgfs_sensitivity_read(struct file *file,
-					char __user *user_buf,
-					size_t count, loff_t *ppos) {
-
-	struct iwl_priv *priv = file->private_data;
-	int pos = 0;
-	int cnt = 0;
-	char *buf;
-	int bufsz = sizeof(struct iwl_sensitivity_data) * 4 + 100;
-	ssize_t ret;
-	struct iwl_sensitivity_data *data;
-
-	data = &priv->sensitivity_data;
-	buf = kzalloc(bufsz, GFP_KERNEL);
-	if (!buf) {
-		IWL_ERR(priv, "Can not allocate Buffer\n");
-		return -ENOMEM;
-	}
-
-	pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm:\t\t\t %u\n",
-			data->auto_corr_ofdm);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			"auto_corr_ofdm_mrc:\t\t %u\n",
-			data->auto_corr_ofdm_mrc);
-	pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_x1:\t\t %u\n",
-			data->auto_corr_ofdm_x1);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			"auto_corr_ofdm_mrc_x1:\t\t %u\n",
-			data->auto_corr_ofdm_mrc_x1);
-	pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_cck:\t\t\t %u\n",
-			data->auto_corr_cck);
-	pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_cck_mrc:\t\t %u\n",
-			data->auto_corr_cck_mrc);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			"last_bad_plcp_cnt_ofdm:\t\t %u\n",
-			data->last_bad_plcp_cnt_ofdm);
-	pos += scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_ofdm:\t\t %u\n",
-			data->last_fa_cnt_ofdm);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			"last_bad_plcp_cnt_cck:\t\t %u\n",
-			data->last_bad_plcp_cnt_cck);
-	pos += scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_cck:\t\t %u\n",
-			data->last_fa_cnt_cck);
-	pos += scnprintf(buf + pos, bufsz - pos, "nrg_curr_state:\t\t\t %u\n",
-			data->nrg_curr_state);
-	pos += scnprintf(buf + pos, bufsz - pos, "nrg_prev_state:\t\t\t %u\n",
-			data->nrg_prev_state);
-	pos += scnprintf(buf + pos, bufsz - pos, "nrg_value:\t\t\t");
-	for (cnt = 0; cnt < 10; cnt++) {
-		pos += scnprintf(buf + pos, bufsz - pos, " %u",
-				data->nrg_value[cnt]);
-	}
-	pos += scnprintf(buf + pos, bufsz - pos, "\n");
-	pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_rssi:\t\t");
-	for (cnt = 0; cnt < NRG_NUM_PREV_STAT_L; cnt++) {
-		pos += scnprintf(buf + pos, bufsz - pos, " %u",
-				data->nrg_silence_rssi[cnt]);
-	}
-	pos += scnprintf(buf + pos, bufsz - pos, "\n");
-	pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_ref:\t\t %u\n",
-			data->nrg_silence_ref);
-	pos += scnprintf(buf + pos, bufsz - pos, "nrg_energy_idx:\t\t\t %u\n",
-			data->nrg_energy_idx);
-	pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_idx:\t\t %u\n",
-			data->nrg_silence_idx);
-	pos += scnprintf(buf + pos, bufsz - pos, "nrg_th_cck:\t\t\t %u\n",
-			data->nrg_th_cck);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			"nrg_auto_corr_silence_diff:\t %u\n",
-			data->nrg_auto_corr_silence_diff);
-	pos += scnprintf(buf + pos, bufsz - pos, "num_in_cck_no_fa:\t\t %u\n",
-			data->num_in_cck_no_fa);
-	pos += scnprintf(buf + pos, bufsz - pos, "nrg_th_ofdm:\t\t\t %u\n",
-			data->nrg_th_ofdm);
-
-	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-	kfree(buf);
-	return ret;
-}
-
-
-static ssize_t iwl_legacy_dbgfs_chain_noise_read(struct file *file,
-					char __user *user_buf,
-					size_t count, loff_t *ppos) {
-
-	struct iwl_priv *priv = file->private_data;
-	int pos = 0;
-	int cnt = 0;
-	char *buf;
-	int bufsz = sizeof(struct iwl_chain_noise_data) * 4 + 100;
-	ssize_t ret;
-	struct iwl_chain_noise_data *data;
-
-	data = &priv->chain_noise_data;
-	buf = kzalloc(bufsz, GFP_KERNEL);
-	if (!buf) {
-		IWL_ERR(priv, "Can not allocate Buffer\n");
-		return -ENOMEM;
-	}
-
-	pos += scnprintf(buf + pos, bufsz - pos, "active_chains:\t\t\t %u\n",
-			data->active_chains);
-	pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_a:\t\t\t %u\n",
-			data->chain_noise_a);
-	pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_b:\t\t\t %u\n",
-			data->chain_noise_b);
-	pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_c:\t\t\t %u\n",
-			data->chain_noise_c);
-	pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_a:\t\t\t %u\n",
-			data->chain_signal_a);
-	pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_b:\t\t\t %u\n",
-			data->chain_signal_b);
-	pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_c:\t\t\t %u\n",
-			data->chain_signal_c);
-	pos += scnprintf(buf + pos, bufsz - pos, "beacon_count:\t\t\t %u\n",
-			data->beacon_count);
-
-	pos += scnprintf(buf + pos, bufsz - pos, "disconn_array:\t\t\t");
-	for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
-		pos += scnprintf(buf + pos, bufsz - pos, " %u",
-				data->disconn_array[cnt]);
-	}
-	pos += scnprintf(buf + pos, bufsz - pos, "\n");
-	pos += scnprintf(buf + pos, bufsz - pos, "delta_gain_code:\t\t");
-	for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
-		pos += scnprintf(buf + pos, bufsz - pos, " %u",
-				data->delta_gain_code[cnt]);
-	}
-	pos += scnprintf(buf + pos, bufsz - pos, "\n");
-	pos += scnprintf(buf + pos, bufsz - pos, "radio_write:\t\t\t %u\n",
-			data->radio_write);
-	pos += scnprintf(buf + pos, bufsz - pos, "state:\t\t\t\t %u\n",
-			data->state);
-
-	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-	kfree(buf);
-	return ret;
-}
-
-static ssize_t iwl_legacy_dbgfs_power_save_status_read(struct file *file,
-						    char __user *user_buf,
-						    size_t count, loff_t *ppos)
-{
-	struct iwl_priv *priv = file->private_data;
-	char buf[60];
-	int pos = 0;
-	const size_t bufsz = sizeof(buf);
-	u32 pwrsave_status;
-
-	pwrsave_status = iwl_read32(priv, CSR_GP_CNTRL) &
-			CSR_GP_REG_POWER_SAVE_STATUS_MSK;
-
-	pos += scnprintf(buf + pos, bufsz - pos, "Power Save Status: ");
-	pos += scnprintf(buf + pos, bufsz - pos, "%s\n",
-		(pwrsave_status == CSR_GP_REG_NO_POWER_SAVE) ? "none" :
-		(pwrsave_status == CSR_GP_REG_MAC_POWER_SAVE) ? "MAC" :
-		(pwrsave_status == CSR_GP_REG_PHY_POWER_SAVE) ? "PHY" :
-		"error");
-
-	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
-static ssize_t iwl_legacy_dbgfs_clear_ucode_statistics_write(struct file *file,
-					 const char __user *user_buf,
-					 size_t count, loff_t *ppos)
-{
-	struct iwl_priv *priv = file->private_data;
-	char buf[8];
-	int buf_size;
-	int clear;
-
-	memset(buf, 0, sizeof(buf));
-	buf_size = min(count, sizeof(buf) -  1);
-	if (copy_from_user(buf, user_buf, buf_size))
-		return -EFAULT;
-	if (sscanf(buf, "%d", &clear) != 1)
-		return -EFAULT;
-
-	/* make request to uCode to retrieve statistics information */
-	mutex_lock(&priv->mutex);
-	iwl_legacy_send_statistics_request(priv, CMD_SYNC, true);
-	mutex_unlock(&priv->mutex);
-
-	return count;
-}
-
-static ssize_t iwl_legacy_dbgfs_rxon_flags_read(struct file *file,
-					 char __user *user_buf,
-					 size_t count, loff_t *ppos) {
-
-	struct iwl_priv *priv = file->private_data;
-	int len = 0;
-	char buf[20];
-
-	len = sprintf(buf, "0x%04X\n",
-		le32_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.flags));
-	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
-}
-
-static ssize_t iwl_legacy_dbgfs_rxon_filter_flags_read(struct file *file,
-						char __user *user_buf,
-						size_t count, loff_t *ppos) {
-
-	struct iwl_priv *priv = file->private_data;
-	int len = 0;
-	char buf[20];
-
-	len = sprintf(buf, "0x%04X\n",
-	le32_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags));
-	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
-}
-
-static ssize_t iwl_legacy_dbgfs_fh_reg_read(struct file *file,
-					 char __user *user_buf,
-					 size_t count, loff_t *ppos)
-{
-	struct iwl_priv *priv = file->private_data;
-	char *buf;
-	int pos = 0;
-	ssize_t ret = -EFAULT;
-
-	if (priv->cfg->ops->lib->dump_fh) {
-		ret = pos = priv->cfg->ops->lib->dump_fh(priv, &buf, true);
-		if (buf) {
-			ret = simple_read_from_buffer(user_buf,
-						      count, ppos, buf, pos);
-			kfree(buf);
-		}
-	}
-
-	return ret;
-}
-
-static ssize_t iwl_legacy_dbgfs_missed_beacon_read(struct file *file,
-					char __user *user_buf,
-					size_t count, loff_t *ppos) {
-
-	struct iwl_priv *priv = file->private_data;
-	int pos = 0;
-	char buf[12];
-	const size_t bufsz = sizeof(buf);
-
-	pos += scnprintf(buf + pos, bufsz - pos, "%d\n",
-			priv->missed_beacon_threshold);
-
-	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
-static ssize_t iwl_legacy_dbgfs_missed_beacon_write(struct file *file,
-					 const char __user *user_buf,
-					 size_t count, loff_t *ppos)
-{
-	struct iwl_priv *priv = file->private_data;
-	char buf[8];
-	int buf_size;
-	int missed;
-
-	memset(buf, 0, sizeof(buf));
-	buf_size = min(count, sizeof(buf) -  1);
-	if (copy_from_user(buf, user_buf, buf_size))
-		return -EFAULT;
-	if (sscanf(buf, "%d", &missed) != 1)
-		return -EINVAL;
-
-	if (missed < IWL_MISSED_BEACON_THRESHOLD_MIN ||
-	    missed > IWL_MISSED_BEACON_THRESHOLD_MAX)
-		priv->missed_beacon_threshold =
-			IWL_MISSED_BEACON_THRESHOLD_DEF;
-	else
-		priv->missed_beacon_threshold = missed;
-
-	return count;
-}
-
-static ssize_t iwl_legacy_dbgfs_force_reset_read(struct file *file,
-					char __user *user_buf,
-					size_t count, loff_t *ppos) {
-
-	struct iwl_priv *priv = file->private_data;
-	int pos = 0;
-	char buf[300];
-	const size_t bufsz = sizeof(buf);
-	struct iwl_force_reset *force_reset;
-
-	force_reset = &priv->force_reset;
-
-	pos += scnprintf(buf + pos, bufsz - pos,
-			"\tnumber of reset request: %d\n",
-			force_reset->reset_request_count);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			"\tnumber of reset request success: %d\n",
-			force_reset->reset_success_count);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			"\tnumber of reset request reject: %d\n",
-			force_reset->reset_reject_count);
-	pos += scnprintf(buf + pos, bufsz - pos,
-			"\treset duration: %lu\n",
-			force_reset->reset_duration);
-
-	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
-static ssize_t iwl_legacy_dbgfs_force_reset_write(struct file *file,
-					const char __user *user_buf,
-					size_t count, loff_t *ppos) {
-
-	int ret;
-	struct iwl_priv *priv = file->private_data;
-
-	ret = iwl_legacy_force_reset(priv, true);
-
-	return ret ? ret : count;
-}
-
-static ssize_t iwl_legacy_dbgfs_wd_timeout_write(struct file *file,
-					const char __user *user_buf,
-					size_t count, loff_t *ppos) {
-
-	struct iwl_priv *priv = file->private_data;
-	char buf[8];
-	int buf_size;
-	int timeout;
-
-	memset(buf, 0, sizeof(buf));
-	buf_size = min(count, sizeof(buf) -  1);
-	if (copy_from_user(buf, user_buf, buf_size))
-		return -EFAULT;
-	if (sscanf(buf, "%d", &timeout) != 1)
-		return -EINVAL;
-	if (timeout < 0 || timeout > IWL_MAX_WD_TIMEOUT)
-		timeout = IWL_DEF_WD_TIMEOUT;
-
-	priv->cfg->base_params->wd_timeout = timeout;
-	iwl_legacy_setup_watchdog(priv);
-	return count;
-}
-
-DEBUGFS_READ_FILE_OPS(rx_statistics);
-DEBUGFS_READ_FILE_OPS(tx_statistics);
-DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
-DEBUGFS_READ_FILE_OPS(rx_queue);
-DEBUGFS_READ_FILE_OPS(tx_queue);
-DEBUGFS_READ_FILE_OPS(ucode_rx_stats);
-DEBUGFS_READ_FILE_OPS(ucode_tx_stats);
-DEBUGFS_READ_FILE_OPS(ucode_general_stats);
-DEBUGFS_READ_FILE_OPS(sensitivity);
-DEBUGFS_READ_FILE_OPS(chain_noise);
-DEBUGFS_READ_FILE_OPS(power_save_status);
-DEBUGFS_WRITE_FILE_OPS(clear_ucode_statistics);
-DEBUGFS_WRITE_FILE_OPS(clear_traffic_statistics);
-DEBUGFS_READ_FILE_OPS(fh_reg);
-DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon);
-DEBUGFS_READ_WRITE_FILE_OPS(force_reset);
-DEBUGFS_READ_FILE_OPS(rxon_flags);
-DEBUGFS_READ_FILE_OPS(rxon_filter_flags);
-DEBUGFS_WRITE_FILE_OPS(wd_timeout);
-
-/*
- * Create the debugfs files and directories
- *
- */
-int iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name)
-{
-	struct dentry *phyd = priv->hw->wiphy->debugfsdir;
-	struct dentry *dir_drv, *dir_data, *dir_rf, *dir_debug;
-
-	dir_drv = debugfs_create_dir(name, phyd);
-	if (!dir_drv)
-		return -ENOMEM;
-
-	priv->debugfs_dir = dir_drv;
-
-	dir_data = debugfs_create_dir("data", dir_drv);
-	if (!dir_data)
-		goto err;
-	dir_rf = debugfs_create_dir("rf", dir_drv);
-	if (!dir_rf)
-		goto err;
-	dir_debug = debugfs_create_dir("debug", dir_drv);
-	if (!dir_debug)
-		goto err;
-
-	DEBUGFS_ADD_FILE(nvm, dir_data, S_IRUSR);
-	DEBUGFS_ADD_FILE(sram, dir_data, S_IWUSR | S_IRUSR);
-	DEBUGFS_ADD_FILE(stations, dir_data, S_IRUSR);
-	DEBUGFS_ADD_FILE(channels, dir_data, S_IRUSR);
-	DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR);
-	DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR);
-	DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR);
-	DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR);
-	DEBUGFS_ADD_FILE(rx_statistics, dir_debug, S_IRUSR);
-	DEBUGFS_ADD_FILE(tx_statistics, dir_debug, S_IRUSR);
-	DEBUGFS_ADD_FILE(traffic_log, dir_debug, S_IWUSR | S_IRUSR);
-	DEBUGFS_ADD_FILE(rx_queue, dir_debug, S_IRUSR);
-	DEBUGFS_ADD_FILE(tx_queue, dir_debug, S_IRUSR);
-	DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR);
-	DEBUGFS_ADD_FILE(clear_ucode_statistics, dir_debug, S_IWUSR);
-	DEBUGFS_ADD_FILE(clear_traffic_statistics, dir_debug, S_IWUSR);
-	DEBUGFS_ADD_FILE(fh_reg, dir_debug, S_IRUSR);
-	DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR);
-	DEBUGFS_ADD_FILE(force_reset, dir_debug, S_IWUSR | S_IRUSR);
-	DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR);
-	DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR);
-	DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR);
-
-	if (priv->cfg->base_params->sensitivity_calib_by_driver)
-		DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR);
-	if (priv->cfg->base_params->chain_noise_calib_by_driver)
-		DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR);
-	DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
-	DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
-	DEBUGFS_ADD_FILE(wd_timeout, dir_debug, S_IWUSR);
-	if (priv->cfg->base_params->sensitivity_calib_by_driver)
-		DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf,
-				 &priv->disable_sens_cal);
-	if (priv->cfg->base_params->chain_noise_calib_by_driver)
-		DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf,
-				 &priv->disable_chain_noise_cal);
-	DEBUGFS_ADD_BOOL(disable_tx_power, dir_rf,
-				&priv->disable_tx_power_cal);
-	return 0;
-
-err:
-	IWL_ERR(priv, "Can't create the debugfs directory\n");
-	iwl_legacy_dbgfs_unregister(priv);
-	return -ENOMEM;
-}
-EXPORT_SYMBOL(iwl_legacy_dbgfs_register);
-
-/**
- * Remove the debugfs files and directories
- *
- */
-void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv)
-{
-	if (!priv->debugfs_dir)
-		return;
-
-	debugfs_remove_recursive(priv->debugfs_dir);
-	priv->debugfs_dir = NULL;
-}
-EXPORT_SYMBOL(iwl_legacy_dbgfs_unregister);
diff --git a/drivers/net/wireless/iwlegacy/iwl-dev.h b/drivers/net/wireless/iwlegacy/iwl-dev.h
deleted file mode 100644
index 9c786ed..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-dev.h
+++ /dev/null
@@ -1,1364 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-/*
- * Please use this file (iwl-dev.h) for driver implementation definitions.
- * Please use iwl-commands.h for uCode API definitions.
- * Please use iwl-4965-hw.h for hardware-related definitions.
- */
-
-#ifndef __iwl_legacy_dev_h__
-#define __iwl_legacy_dev_h__
-
-#include <linux/interrupt.h>
-#include <linux/pci.h> /* for struct pci_device_id */
-#include <linux/kernel.h>
-#include <linux/leds.h>
-#include <linux/wait.h>
-#include <net/ieee80211_radiotap.h>
-
-#include "iwl-eeprom.h"
-#include "iwl-csr.h"
-#include "iwl-prph.h"
-#include "iwl-fh.h"
-#include "iwl-debug.h"
-#include "iwl-4965-hw.h"
-#include "iwl-3945-hw.h"
-#include "iwl-led.h"
-#include "iwl-power.h"
-#include "iwl-legacy-rs.h"
-
-struct iwl_tx_queue;
-
-/* CT-KILL constants */
-#define CT_KILL_THRESHOLD_LEGACY   110 /* in Celsius */
-
-/* Default noise level to report when noise measurement is not available.
- *   This may be because we're:
- *   1)  Not associated (4965, no beacon statistics being sent to driver)
- *   2)  Scanning (noise measurement does not apply to associated channel)
- *   3)  Receiving CCK (3945 delivers noise info only for OFDM frames)
- * Use default noise value of -127 ... this is below the range of measurable
- *   Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user.
- *   Also, -127 works better than 0 when averaging frames with/without
- *   noise info (e.g. averaging might be done in app); measured dBm values are
- *   always negative ... using a negative value as the default keeps all
- *   averages within an s8's (used in some apps) range of negative values. */
-#define IWL_NOISE_MEAS_NOT_AVAILABLE (-127)
-
-/*
- * RTS threshold here is total size [2347] minus 4 FCS bytes
- * Per spec:
- *   a value of 0 means RTS on all data/management packets
- *   a value > max MSDU size means no RTS
- * else RTS for data/management frames where MPDU is larger
- *   than RTS value.
- */
-#define DEFAULT_RTS_THRESHOLD     2347U
-#define MIN_RTS_THRESHOLD         0U
-#define MAX_RTS_THRESHOLD         2347U
-#define MAX_MSDU_SIZE		  2304U
-#define MAX_MPDU_SIZE		  2346U
-#define DEFAULT_BEACON_INTERVAL   100U
-#define	DEFAULT_SHORT_RETRY_LIMIT 7U
-#define	DEFAULT_LONG_RETRY_LIMIT  4U
-
-struct iwl_rx_mem_buffer {
-	dma_addr_t page_dma;
-	struct page *page;
-	struct list_head list;
-};
-
-#define rxb_addr(r) page_address(r->page)
-
-/* defined below */
-struct iwl_device_cmd;
-
-struct iwl_cmd_meta {
-	/* only for SYNC commands, iff the reply skb is wanted */
-	struct iwl_host_cmd *source;
-	/*
-	 * only for ASYNC commands
-	 * (which is somewhat stupid -- look at iwl-sta.c for instance
-	 * which duplicates a bunch of code because the callback isn't
-	 * invoked for SYNC commands, if it were and its result passed
-	 * through it would be simpler...)
-	 */
-	void (*callback)(struct iwl_priv *priv,
-			 struct iwl_device_cmd *cmd,
-			 struct iwl_rx_packet *pkt);
-
-	/* The CMD_SIZE_HUGE flag bit indicates that the command
-	 * structure is stored at the end of the shared queue memory. */
-	u32 flags;
-
-	DEFINE_DMA_UNMAP_ADDR(mapping);
-	DEFINE_DMA_UNMAP_LEN(len);
-};
-
-/*
- * Generic queue structure
- *
- * Contains common data for Rx and Tx queues
- */
-struct iwl_queue {
-	int n_bd;              /* number of BDs in this queue */
-	int write_ptr;       /* 1-st empty entry (index) host_w*/
-	int read_ptr;         /* last used entry (index) host_r*/
-	/* use for monitoring and recovering the stuck queue */
-	dma_addr_t dma_addr;   /* physical addr for BD's */
-	int n_window;	       /* safe queue window */
-	u32 id;
-	int low_mark;	       /* low watermark, resume queue if free
-				* space more than this */
-	int high_mark;         /* high watermark, stop queue if free
-				* space less than this */
-};
-
-/* One for each TFD */
-struct iwl_tx_info {
-	struct sk_buff *skb;
-	struct iwl_rxon_context *ctx;
-};
-
-/**
- * struct iwl_tx_queue - Tx Queue for DMA
- * @q: generic Rx/Tx queue descriptor
- * @bd: base of circular buffer of TFDs
- * @cmd: array of command/TX buffer pointers
- * @meta: array of meta data for each command/tx buffer
- * @dma_addr_cmd: physical address of cmd/tx buffer array
- * @txb: array of per-TFD driver data
- * @time_stamp: time (in jiffies) of last read_ptr change
- * @need_update: indicates need to update read/write index
- * @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled
- *
- * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
- * descriptors) and required locking structures.
- */
-#define TFD_TX_CMD_SLOTS 256
-#define TFD_CMD_SLOTS 32
-
-struct iwl_tx_queue {
-	struct iwl_queue q;
-	void *tfds;
-	struct iwl_device_cmd **cmd;
-	struct iwl_cmd_meta *meta;
-	struct iwl_tx_info *txb;
-	unsigned long time_stamp;
-	u8 need_update;
-	u8 sched_retry;
-	u8 active;
-	u8 swq_id;
-};
-
-#define IWL_NUM_SCAN_RATES         (2)
-
-struct iwl4965_channel_tgd_info {
-	u8 type;
-	s8 max_power;
-};
-
-struct iwl4965_channel_tgh_info {
-	s64 last_radar_time;
-};
-
-#define IWL4965_MAX_RATE (33)
-
-struct iwl3945_clip_group {
-	/* maximum power level to prevent clipping for each rate, derived by
-	 *   us from this band's saturation power in EEPROM */
-	const s8 clip_powers[IWL_MAX_RATES];
-};
-
-/* current Tx power values to use, one for each rate for each channel.
- * requested power is limited by:
- * -- regulatory EEPROM limits for this channel
- * -- hardware capabilities (clip-powers)
- * -- spectrum management
- * -- user preference (e.g. iwconfig)
- * when requested power is set, base power index must also be set. */
-struct iwl3945_channel_power_info {
-	struct iwl3945_tx_power tpc;	/* actual radio and DSP gain settings */
-	s8 power_table_index;	/* actual (compenst'd) index into gain table */
-	s8 base_power_index;	/* gain index for power at factory temp. */
-	s8 requested_power;	/* power (dBm) requested for this chnl/rate */
-};
-
-/* current scan Tx power values to use, one for each scan rate for each
- * channel. */
-struct iwl3945_scan_power_info {
-	struct iwl3945_tx_power tpc;	/* actual radio and DSP gain settings */
-	s8 power_table_index;	/* actual (compenst'd) index into gain table */
-	s8 requested_power;	/* scan pwr (dBm) requested for chnl/rate */
-};
-
-/*
- * One for each channel, holds all channel setup data
- * Some of the fields (e.g. eeprom and flags/max_power_avg) are redundant
- *     with one another!
- */
-struct iwl_channel_info {
-	struct iwl4965_channel_tgd_info tgd;
-	struct iwl4965_channel_tgh_info tgh;
-	struct iwl_eeprom_channel eeprom;	/* EEPROM regulatory limit */
-	struct iwl_eeprom_channel ht40_eeprom;	/* EEPROM regulatory limit for
-						 * HT40 channel */
-
-	u8 channel;	  /* channel number */
-	u8 flags;	  /* flags copied from EEPROM */
-	s8 max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
-	s8 curr_txpow;	  /* (dBm) regulatory/spectrum/user (not h/w) limit */
-	s8 min_power;	  /* always 0 */
-	s8 scan_power;	  /* (dBm) regul. eeprom, direct scans, any rate */
-
-	u8 group_index;	  /* 0-4, maps channel to group1/2/3/4/5 */
-	u8 band_index;	  /* 0-4, maps channel to band1/2/3/4/5 */
-	enum ieee80211_band band;
-
-	/* HT40 channel info */
-	s8 ht40_max_power_avg;	/* (dBm) regul. eeprom, normal Tx, any rate */
-	u8 ht40_flags;		/* flags copied from EEPROM */
-	u8 ht40_extension_channel; /* HT_IE_EXT_CHANNEL_* */
-
-	/* Radio/DSP gain settings for each "normal" data Tx rate.
-	 * These include, in addition to RF and DSP gain, a few fields for
-	 *   remembering/modifying gain settings (indexes). */
-	struct iwl3945_channel_power_info power_info[IWL4965_MAX_RATE];
-
-	/* Radio/DSP gain settings for each scan rate, for directed scans. */
-	struct iwl3945_scan_power_info scan_pwr_info[IWL_NUM_SCAN_RATES];
-};
-
-#define IWL_TX_FIFO_BK		0	/* shared */
-#define IWL_TX_FIFO_BE		1
-#define IWL_TX_FIFO_VI		2	/* shared */
-#define IWL_TX_FIFO_VO		3
-#define IWL_TX_FIFO_UNUSED	-1
-
-/* Minimum number of queues. MAX_NUM is defined in hw specific files.
- * Set the minimum to accommodate the 4 standard TX queues, 1 command
- * queue, 2 (unused) HCCA queues, and 4 HT queues (one for each AC) */
-#define IWL_MIN_NUM_QUEUES	10
-
-#define IWL_DEFAULT_CMD_QUEUE_NUM	4
-
-#define IEEE80211_DATA_LEN              2304
-#define IEEE80211_4ADDR_LEN             30
-#define IEEE80211_HLEN                  (IEEE80211_4ADDR_LEN)
-#define IEEE80211_FRAME_LEN             (IEEE80211_DATA_LEN + IEEE80211_HLEN)
-
-struct iwl_frame {
-	union {
-		struct ieee80211_hdr frame;
-		struct iwl_tx_beacon_cmd beacon;
-		u8 raw[IEEE80211_FRAME_LEN];
-		u8 cmd[360];
-	} u;
-	struct list_head list;
-};
-
-#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
-#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
-#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
-
-enum {
-	CMD_SYNC = 0,
-	CMD_SIZE_NORMAL = 0,
-	CMD_NO_SKB = 0,
-	CMD_SIZE_HUGE = (1 << 0),
-	CMD_ASYNC = (1 << 1),
-	CMD_WANT_SKB = (1 << 2),
-	CMD_MAPPED = (1 << 3),
-};
-
-#define DEF_CMD_PAYLOAD_SIZE 320
-
-/**
- * struct iwl_device_cmd
- *
- * For allocation of the command and tx queues, this establishes the overall
- * size of the largest command we send to uCode, except for a scan command
- * (which is relatively huge; space is allocated separately).
- */
-struct iwl_device_cmd {
-	struct iwl_cmd_header hdr;	/* uCode API */
-	union {
-		u32 flags;
-		u8 val8;
-		u16 val16;
-		u32 val32;
-		struct iwl_tx_cmd tx;
-		u8 payload[DEF_CMD_PAYLOAD_SIZE];
-	} __packed cmd;
-} __packed;
-
-#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
-
-
-struct iwl_host_cmd {
-	const void *data;
-	unsigned long reply_page;
-	void (*callback)(struct iwl_priv *priv,
-			 struct iwl_device_cmd *cmd,
-			 struct iwl_rx_packet *pkt);
-	u32 flags;
-	u16 len;
-	u8 id;
-};
-
-#define SUP_RATE_11A_MAX_NUM_CHANNELS  8
-#define SUP_RATE_11B_MAX_NUM_CHANNELS  4
-#define SUP_RATE_11G_MAX_NUM_CHANNELS  12
-
-/**
- * struct iwl_rx_queue - Rx queue
- * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
- * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
- * @read: Shared index to newest available Rx buffer
- * @write: Shared index to oldest written Rx packet
- * @free_count: Number of pre-allocated buffers in rx_free
- * @rx_free: list of free SKBs for use
- * @rx_used: List of Rx buffers with no SKB
- * @need_update: flag to indicate we need to update read/write index
- * @rb_stts: driver's pointer to receive buffer status
- * @rb_stts_dma: bus address of receive buffer status
- *
- * NOTE:  rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
- */
-struct iwl_rx_queue {
-	__le32 *bd;
-	dma_addr_t bd_dma;
-	struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
-	struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
-	u32 read;
-	u32 write;
-	u32 free_count;
-	u32 write_actual;
-	struct list_head rx_free;
-	struct list_head rx_used;
-	int need_update;
-	struct iwl_rb_status *rb_stts;
-	dma_addr_t rb_stts_dma;
-	spinlock_t lock;
-};
-
-#define IWL_SUPPORTED_RATES_IE_LEN         8
-
-#define MAX_TID_COUNT        9
-
-#define IWL_INVALID_RATE     0xFF
-#define IWL_INVALID_VALUE    -1
-
-/**
- * struct iwl_ht_agg -- aggregation status while waiting for block-ack
- * @txq_id: Tx queue used for Tx attempt
- * @frame_count: # frames attempted by Tx command
- * @wait_for_ba: Expect block-ack before next Tx reply
- * @start_idx: Index of 1st Transmit Frame Descriptor (TFD) in Tx window
- * @bitmap0: Low order bitmap, one bit for each frame pending ACK in Tx window
- * @bitmap1: High order, one bit for each frame pending ACK in Tx window
- * @rate_n_flags: Rate at which Tx was attempted
- *
- * If REPLY_TX indicates that aggregation was attempted, driver must wait
- * for block ack (REPLY_COMPRESSED_BA).  This struct stores tx reply info
- * until block ack arrives.
- */
-struct iwl_ht_agg {
-	u16 txq_id;
-	u16 frame_count;
-	u16 wait_for_ba;
-	u16 start_idx;
-	u64 bitmap;
-	u32 rate_n_flags;
-#define IWL_AGG_OFF 0
-#define IWL_AGG_ON 1
-#define IWL_EMPTYING_HW_QUEUE_ADDBA 2
-#define IWL_EMPTYING_HW_QUEUE_DELBA 3
-	u8 state;
-};
-
-
-struct iwl_tid_data {
-	u16 seq_number; /* 4965 only */
-	u16 tfds_in_queue;
-	struct iwl_ht_agg agg;
-};
-
-struct iwl_hw_key {
-	u32 cipher;
-	int keylen;
-	u8 keyidx;
-	u8 key[32];
-};
-
-union iwl_ht_rate_supp {
-	u16 rates;
-	struct {
-		u8 siso_rate;
-		u8 mimo_rate;
-	};
-};
-
-#define CFG_HT_RX_AMPDU_FACTOR_8K   (0x0)
-#define CFG_HT_RX_AMPDU_FACTOR_16K  (0x1)
-#define CFG_HT_RX_AMPDU_FACTOR_32K  (0x2)
-#define CFG_HT_RX_AMPDU_FACTOR_64K  (0x3)
-#define CFG_HT_RX_AMPDU_FACTOR_DEF  CFG_HT_RX_AMPDU_FACTOR_64K
-#define CFG_HT_RX_AMPDU_FACTOR_MAX  CFG_HT_RX_AMPDU_FACTOR_64K
-#define CFG_HT_RX_AMPDU_FACTOR_MIN  CFG_HT_RX_AMPDU_FACTOR_8K
-
-/*
- * Maximal MPDU density for TX aggregation
- * 4 - 2us density
- * 5 - 4us density
- * 6 - 8us density
- * 7 - 16us density
- */
-#define CFG_HT_MPDU_DENSITY_2USEC   (0x4)
-#define CFG_HT_MPDU_DENSITY_4USEC   (0x5)
-#define CFG_HT_MPDU_DENSITY_8USEC   (0x6)
-#define CFG_HT_MPDU_DENSITY_16USEC  (0x7)
-#define CFG_HT_MPDU_DENSITY_DEF CFG_HT_MPDU_DENSITY_4USEC
-#define CFG_HT_MPDU_DENSITY_MAX CFG_HT_MPDU_DENSITY_16USEC
-#define CFG_HT_MPDU_DENSITY_MIN     (0x1)
-
-struct iwl_ht_config {
-	bool single_chain_sufficient;
-	enum ieee80211_smps_mode smps; /* current smps mode */
-};
-
-/* QoS structures */
-struct iwl_qos_info {
-	int qos_active;
-	struct iwl_qosparam_cmd def_qos_parm;
-};
-
-/*
- * Structure should be accessed with sta_lock held. When station addition
- * is in progress (IWL_STA_UCODE_INPROGRESS) it is possible to access only
- * the commands (iwl_legacy_addsta_cmd and iwl_link_quality_cmd) without
- * sta_lock held.
- */
-struct iwl_station_entry {
-	struct iwl_legacy_addsta_cmd sta;
-	struct iwl_tid_data tid[MAX_TID_COUNT];
-	u8 used, ctxid;
-	struct iwl_hw_key keyinfo;
-	struct iwl_link_quality_cmd *lq;
-};
-
-struct iwl_station_priv_common {
-	struct iwl_rxon_context *ctx;
-	u8 sta_id;
-};
-
-/*
- * iwl_station_priv: Driver's private station information
- *
- * When mac80211 creates a station it reserves some space (hw->sta_data_size)
- * in the structure for use by driver. This structure is places in that
- * space.
- *
- * The common struct MUST be first because it is shared between
- * 3945 and 4965!
- */
-struct iwl_station_priv {
-	struct iwl_station_priv_common common;
-	struct iwl_lq_sta lq_sta;
-	atomic_t pending_frames;
-	bool client;
-	bool asleep;
-};
-
-/**
- * struct iwl_vif_priv - driver's private per-interface information
- *
- * When mac80211 allocates a virtual interface, it can allocate
- * space for us to put data into.
- */
-struct iwl_vif_priv {
-	struct iwl_rxon_context *ctx;
-	u8 ibss_bssid_sta_id;
-};
-
-/* one for each uCode image (inst/data, boot/init/runtime) */
-struct fw_desc {
-	void *v_addr;		/* access by driver */
-	dma_addr_t p_addr;	/* access by card's busmaster DMA */
-	u32 len;		/* bytes */
-};
-
-/* uCode file layout */
-struct iwl_ucode_header {
-	__le32 ver;	/* major/minor/API/serial */
-	struct {
-		__le32 inst_size;	/* bytes of runtime code */
-		__le32 data_size;	/* bytes of runtime data */
-		__le32 init_size;	/* bytes of init code */
-		__le32 init_data_size;	/* bytes of init data */
-		__le32 boot_size;	/* bytes of bootstrap code */
-		u8 data[0];		/* in same order as sizes */
-	} v1;
-};
-
-struct iwl4965_ibss_seq {
-	u8 mac[ETH_ALEN];
-	u16 seq_num;
-	u16 frag_num;
-	unsigned long packet_time;
-	struct list_head list;
-};
-
-struct iwl_sensitivity_ranges {
-	u16 min_nrg_cck;
-	u16 max_nrg_cck;
-
-	u16 nrg_th_cck;
-	u16 nrg_th_ofdm;
-
-	u16 auto_corr_min_ofdm;
-	u16 auto_corr_min_ofdm_mrc;
-	u16 auto_corr_min_ofdm_x1;
-	u16 auto_corr_min_ofdm_mrc_x1;
-
-	u16 auto_corr_max_ofdm;
-	u16 auto_corr_max_ofdm_mrc;
-	u16 auto_corr_max_ofdm_x1;
-	u16 auto_corr_max_ofdm_mrc_x1;
-
-	u16 auto_corr_max_cck;
-	u16 auto_corr_max_cck_mrc;
-	u16 auto_corr_min_cck;
-	u16 auto_corr_min_cck_mrc;
-
-	u16 barker_corr_th_min;
-	u16 barker_corr_th_min_mrc;
-	u16 nrg_th_cca;
-};
-
-
-#define KELVIN_TO_CELSIUS(x) ((x)-273)
-#define CELSIUS_TO_KELVIN(x) ((x)+273)
-
-
-/**
- * struct iwl_hw_params
- * @max_txq_num: Max # Tx queues supported
- * @dma_chnl_num: Number of Tx DMA/FIFO channels
- * @scd_bc_tbls_size: size of scheduler byte count tables
- * @tfd_size: TFD size
- * @tx/rx_chains_num: Number of TX/RX chains
- * @valid_tx/rx_ant: usable antennas
- * @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2)
- * @max_rxq_log: Log-base-2 of max_rxq_size
- * @rx_page_order: Rx buffer page order
- * @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR
- * @max_stations:
- * @ht40_channel: is 40MHz width possible in band 2.4
- * BIT(IEEE80211_BAND_5GHZ) BIT(IEEE80211_BAND_5GHZ)
- * @sw_crypto: 0 for hw, 1 for sw
- * @max_xxx_size: for ucode uses
- * @ct_kill_threshold: temperature threshold
- * @beacon_time_tsf_bits: number of valid tsf bits for beacon time
- * @struct iwl_sensitivity_ranges: range of sensitivity values
- */
-struct iwl_hw_params {
-	u8 max_txq_num;
-	u8 dma_chnl_num;
-	u16 scd_bc_tbls_size;
-	u32 tfd_size;
-	u8  tx_chains_num;
-	u8  rx_chains_num;
-	u8  valid_tx_ant;
-	u8  valid_rx_ant;
-	u16 max_rxq_size;
-	u16 max_rxq_log;
-	u32 rx_page_order;
-	u32 rx_wrt_ptr_reg;
-	u8  max_stations;
-	u8  ht40_channel;
-	u8  max_beacon_itrvl;	/* in 1024 ms */
-	u32 max_inst_size;
-	u32 max_data_size;
-	u32 max_bsm_size;
-	u32 ct_kill_threshold; /* value in hw-dependent units */
-	u16 beacon_time_tsf_bits;
-	const struct iwl_sensitivity_ranges *sens;
-};
-
-
-/******************************************************************************
- *
- * Functions implemented in core module which are forward declared here
- * for use by iwl-[4-5].c
- *
- * NOTE:  The implementation of these functions are not hardware specific
- * which is why they are in the core module files.
- *
- * Naming convention --
- * iwl_         <-- Is part of iwlwifi
- * iwlXXXX_     <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
- * iwl4965_bg_      <-- Called from work queue context
- * iwl4965_mac_     <-- mac80211 callback
- *
- ****************************************************************************/
-extern void iwl4965_update_chain_flags(struct iwl_priv *priv);
-extern const u8 iwlegacy_bcast_addr[ETH_ALEN];
-extern int iwl_legacy_queue_space(const struct iwl_queue *q);
-static inline int iwl_legacy_queue_used(const struct iwl_queue *q, int i)
-{
-	return q->write_ptr >= q->read_ptr ?
-		(i >= q->read_ptr && i < q->write_ptr) :
-		!(i < q->read_ptr && i >= q->write_ptr);
-}
-
-
-static inline u8 iwl_legacy_get_cmd_index(struct iwl_queue *q, u32 index,
-								int is_huge)
-{
-	/*
-	 * This is for init calibration result and scan command which
-	 * required buffer > TFD_MAX_PAYLOAD_SIZE,
-	 * the big buffer at end of command array
-	 */
-	if (is_huge)
-		return q->n_window;	/* must be power of 2 */
-
-	/* Otherwise, use normal size buffers */
-	return index & (q->n_window - 1);
-}
-
-
-struct iwl_dma_ptr {
-	dma_addr_t dma;
-	void *addr;
-	size_t size;
-};
-
-#define IWL_OPERATION_MODE_AUTO     0
-#define IWL_OPERATION_MODE_HT_ONLY  1
-#define IWL_OPERATION_MODE_MIXED    2
-#define IWL_OPERATION_MODE_20MHZ    3
-
-#define IWL_TX_CRC_SIZE 4
-#define IWL_TX_DELIMITER_SIZE 4
-
-#define TX_POWER_IWL_ILLEGAL_VOLTAGE -10000
-
-/* Sensitivity and chain noise calibration */
-#define INITIALIZATION_VALUE		0xFFFF
-#define IWL4965_CAL_NUM_BEACONS		20
-#define IWL_CAL_NUM_BEACONS		16
-#define MAXIMUM_ALLOWED_PATHLOSS	15
-
-#define CHAIN_NOISE_MAX_DELTA_GAIN_CODE 3
-
-#define MAX_FA_OFDM  50
-#define MIN_FA_OFDM  5
-#define MAX_FA_CCK   50
-#define MIN_FA_CCK   5
-
-#define AUTO_CORR_STEP_OFDM       1
-
-#define AUTO_CORR_STEP_CCK     3
-#define AUTO_CORR_MAX_TH_CCK   160
-
-#define NRG_DIFF               2
-#define NRG_STEP_CCK           2
-#define NRG_MARGIN             8
-#define MAX_NUMBER_CCK_NO_FA 100
-
-#define AUTO_CORR_CCK_MIN_VAL_DEF    (125)
-
-#define CHAIN_A             0
-#define CHAIN_B             1
-#define CHAIN_C             2
-#define CHAIN_NOISE_DELTA_GAIN_INIT_VAL 4
-#define ALL_BAND_FILTER			0xFF00
-#define IN_BAND_FILTER			0xFF
-#define MIN_AVERAGE_NOISE_MAX_VALUE	0xFFFFFFFF
-
-#define NRG_NUM_PREV_STAT_L     20
-#define NUM_RX_CHAINS           3
-
-enum iwl4965_false_alarm_state {
-	IWL_FA_TOO_MANY = 0,
-	IWL_FA_TOO_FEW = 1,
-	IWL_FA_GOOD_RANGE = 2,
-};
-
-enum iwl4965_chain_noise_state {
-	IWL_CHAIN_NOISE_ALIVE = 0,  /* must be 0 */
-	IWL_CHAIN_NOISE_ACCUMULATE,
-	IWL_CHAIN_NOISE_CALIBRATED,
-	IWL_CHAIN_NOISE_DONE,
-};
-
-enum iwl4965_calib_enabled_state {
-	IWL_CALIB_DISABLED = 0,  /* must be 0 */
-	IWL_CALIB_ENABLED = 1,
-};
-
-/*
- * enum iwl_calib
- * defines the order in which results of initial calibrations
- * should be sent to the runtime uCode
- */
-enum iwl_calib {
-	IWL_CALIB_MAX,
-};
-
-/* Opaque calibration results */
-struct iwl_calib_result {
-	void *buf;
-	size_t buf_len;
-};
-
-enum ucode_type {
-	UCODE_NONE = 0,
-	UCODE_INIT,
-	UCODE_RT
-};
-
-/* Sensitivity calib data */
-struct iwl_sensitivity_data {
-	u32 auto_corr_ofdm;
-	u32 auto_corr_ofdm_mrc;
-	u32 auto_corr_ofdm_x1;
-	u32 auto_corr_ofdm_mrc_x1;
-	u32 auto_corr_cck;
-	u32 auto_corr_cck_mrc;
-
-	u32 last_bad_plcp_cnt_ofdm;
-	u32 last_fa_cnt_ofdm;
-	u32 last_bad_plcp_cnt_cck;
-	u32 last_fa_cnt_cck;
-
-	u32 nrg_curr_state;
-	u32 nrg_prev_state;
-	u32 nrg_value[10];
-	u8  nrg_silence_rssi[NRG_NUM_PREV_STAT_L];
-	u32 nrg_silence_ref;
-	u32 nrg_energy_idx;
-	u32 nrg_silence_idx;
-	u32 nrg_th_cck;
-	s32 nrg_auto_corr_silence_diff;
-	u32 num_in_cck_no_fa;
-	u32 nrg_th_ofdm;
-
-	u16 barker_corr_th_min;
-	u16 barker_corr_th_min_mrc;
-	u16 nrg_th_cca;
-};
-
-/* Chain noise (differential Rx gain) calib data */
-struct iwl_chain_noise_data {
-	u32 active_chains;
-	u32 chain_noise_a;
-	u32 chain_noise_b;
-	u32 chain_noise_c;
-	u32 chain_signal_a;
-	u32 chain_signal_b;
-	u32 chain_signal_c;
-	u16 beacon_count;
-	u8 disconn_array[NUM_RX_CHAINS];
-	u8 delta_gain_code[NUM_RX_CHAINS];
-	u8 radio_write;
-	u8 state;
-};
-
-#define	EEPROM_SEM_TIMEOUT 10		/* milliseconds */
-#define EEPROM_SEM_RETRY_LIMIT 1000	/* number of attempts (not time) */
-
-#define IWL_TRAFFIC_ENTRIES	(256)
-#define IWL_TRAFFIC_ENTRY_SIZE  (64)
-
-enum {
-	MEASUREMENT_READY = (1 << 0),
-	MEASUREMENT_ACTIVE = (1 << 1),
-};
-
-/* interrupt statistics */
-struct isr_statistics {
-	u32 hw;
-	u32 sw;
-	u32 err_code;
-	u32 sch;
-	u32 alive;
-	u32 rfkill;
-	u32 ctkill;
-	u32 wakeup;
-	u32 rx;
-	u32 rx_handlers[REPLY_MAX];
-	u32 tx;
-	u32 unhandled;
-};
-
-/* management statistics */
-enum iwl_mgmt_stats {
-	MANAGEMENT_ASSOC_REQ = 0,
-	MANAGEMENT_ASSOC_RESP,
-	MANAGEMENT_REASSOC_REQ,
-	MANAGEMENT_REASSOC_RESP,
-	MANAGEMENT_PROBE_REQ,
-	MANAGEMENT_PROBE_RESP,
-	MANAGEMENT_BEACON,
-	MANAGEMENT_ATIM,
-	MANAGEMENT_DISASSOC,
-	MANAGEMENT_AUTH,
-	MANAGEMENT_DEAUTH,
-	MANAGEMENT_ACTION,
-	MANAGEMENT_MAX,
-};
-/* control statistics */
-enum iwl_ctrl_stats {
-	CONTROL_BACK_REQ =  0,
-	CONTROL_BACK,
-	CONTROL_PSPOLL,
-	CONTROL_RTS,
-	CONTROL_CTS,
-	CONTROL_ACK,
-	CONTROL_CFEND,
-	CONTROL_CFENDACK,
-	CONTROL_MAX,
-};
-
-struct traffic_stats {
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
-	u32 mgmt[MANAGEMENT_MAX];
-	u32 ctrl[CONTROL_MAX];
-	u32 data_cnt;
-	u64 data_bytes;
-#endif
-};
-
-/*
- * host interrupt timeout value
- * used with setting interrupt coalescing timer
- * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit
- *
- * default interrupt coalescing timer is 64 x 32 = 2048 usecs
- * default interrupt coalescing calibration timer is 16 x 32 = 512 usecs
- */
-#define IWL_HOST_INT_TIMEOUT_MAX	(0xFF)
-#define IWL_HOST_INT_TIMEOUT_DEF	(0x40)
-#define IWL_HOST_INT_TIMEOUT_MIN	(0x0)
-#define IWL_HOST_INT_CALIB_TIMEOUT_MAX	(0xFF)
-#define IWL_HOST_INT_CALIB_TIMEOUT_DEF	(0x10)
-#define IWL_HOST_INT_CALIB_TIMEOUT_MIN	(0x0)
-
-#define IWL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5)
-
-/* TX queue watchdog timeouts in mSecs */
-#define IWL_DEF_WD_TIMEOUT	(2000)
-#define IWL_LONG_WD_TIMEOUT	(10000)
-#define IWL_MAX_WD_TIMEOUT	(120000)
-
-struct iwl_force_reset {
-	int reset_request_count;
-	int reset_success_count;
-	int reset_reject_count;
-	unsigned long reset_duration;
-	unsigned long last_force_reset_jiffies;
-};
-
-/* extend beacon time format bit shifting  */
-/*
- * for _3945 devices
- * bits 31:24 - extended
- * bits 23:0  - interval
- */
-#define IWL3945_EXT_BEACON_TIME_POS	24
-/*
- * for _4965 devices
- * bits 31:22 - extended
- * bits 21:0  - interval
- */
-#define IWL4965_EXT_BEACON_TIME_POS	22
-
-enum iwl_rxon_context_id {
-	IWL_RXON_CTX_BSS,
-
-	NUM_IWL_RXON_CTX
-};
-
-struct iwl_rxon_context {
-	struct ieee80211_vif *vif;
-
-	const u8 *ac_to_fifo;
-	const u8 *ac_to_queue;
-	u8 mcast_queue;
-
-	/*
-	 * We could use the vif to indicate active, but we
-	 * also need it to be active during disabling when
-	 * we already removed the vif for type setting.
-	 */
-	bool always_active, is_active;
-
-	bool ht_need_multiple_chains;
-
-	enum iwl_rxon_context_id ctxid;
-
-	u32 interface_modes, exclusive_interface_modes;
-	u8 unused_devtype, ap_devtype, ibss_devtype, station_devtype;
-
-	/*
-	 * We declare this const so it can only be
-	 * changed via explicit cast within the
-	 * routines that actually update the physical
-	 * hardware.
-	 */
-	const struct iwl_legacy_rxon_cmd active;
-	struct iwl_legacy_rxon_cmd staging;
-
-	struct iwl_rxon_time_cmd timing;
-
-	struct iwl_qos_info qos_data;
-
-	u8 bcast_sta_id, ap_sta_id;
-
-	u8 rxon_cmd, rxon_assoc_cmd, rxon_timing_cmd;
-	u8 qos_cmd;
-	u8 wep_key_cmd;
-
-	struct iwl_wep_key wep_keys[WEP_KEYS_MAX];
-	u8 key_mapping_keys;
-
-	__le32 station_flags;
-
-	struct {
-		bool non_gf_sta_present;
-		u8 protection;
-		bool enabled, is_40mhz;
-		u8 extension_chan_offset;
-	} ht;
-};
-
-struct iwl_priv {
-
-	/* ieee device used by generic ieee processing code */
-	struct ieee80211_hw *hw;
-	struct ieee80211_channel *ieee_channels;
-	struct ieee80211_rate *ieee_rates;
-	struct iwl_cfg *cfg;
-
-	/* temporary frame storage list */
-	struct list_head free_frames;
-	int frames_count;
-
-	enum ieee80211_band band;
-	int alloc_rxb_page;
-
-	void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
-				       struct iwl_rx_mem_buffer *rxb);
-
-	struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
-
-	/* spectrum measurement report caching */
-	struct iwl_spectrum_notification measure_report;
-	u8 measurement_status;
-
-	/* ucode beacon time */
-	u32 ucode_beacon_time;
-	int missed_beacon_threshold;
-
-	/* track IBSS manager (last beacon) status */
-	u32 ibss_manager;
-
-	/* force reset */
-	struct iwl_force_reset force_reset;
-
-	/* we allocate array of iwl_channel_info for NIC's valid channels.
-	 *    Access via channel # using indirect index array */
-	struct iwl_channel_info *channel_info;	/* channel info array */
-	u8 channel_count;	/* # of channels */
-
-	/* thermal calibration */
-	s32 temperature;	/* degrees Kelvin */
-	s32 last_temperature;
-
-	/* init calibration results */
-	struct iwl_calib_result calib_results[IWL_CALIB_MAX];
-
-	/* Scan related variables */
-	unsigned long scan_start;
-	unsigned long scan_start_tsf;
-	void *scan_cmd;
-	enum ieee80211_band scan_band;
-	struct cfg80211_scan_request *scan_request;
-	struct ieee80211_vif *scan_vif;
-	u8 scan_tx_ant[IEEE80211_NUM_BANDS];
-	u8 mgmt_tx_ant;
-
-	/* spinlock */
-	spinlock_t lock;	/* protect general shared data */
-	spinlock_t hcmd_lock;	/* protect hcmd */
-	spinlock_t reg_lock;	/* protect hw register access */
-	struct mutex mutex;
-
-	/* basic pci-network driver stuff */
-	struct pci_dev *pci_dev;
-
-	/* pci hardware address support */
-	void __iomem *hw_base;
-	u32  hw_rev;
-	u32  hw_wa_rev;
-	u8   rev_id;
-
-	/* microcode/device supports multiple contexts */
-	u8 valid_contexts;
-
-	/* command queue number */
-	u8 cmd_queue;
-
-	/* max number of station keys */
-	u8 sta_key_max_num;
-
-	/* EEPROM MAC addresses */
-	struct mac_address addresses[1];
-
-	/* uCode images, save to reload in case of failure */
-	int fw_index;			/* firmware we're trying to load */
-	u32 ucode_ver;			/* version of ucode, copy of
-					   iwl_ucode.ver */
-	struct fw_desc ucode_code;	/* runtime inst */
-	struct fw_desc ucode_data;	/* runtime data original */
-	struct fw_desc ucode_data_backup;	/* runtime data save/restore */
-	struct fw_desc ucode_init;	/* initialization inst */
-	struct fw_desc ucode_init_data;	/* initialization data */
-	struct fw_desc ucode_boot;	/* bootstrap inst */
-	enum ucode_type ucode_type;
-	u8 ucode_write_complete;	/* the image write is complete */
-	char firmware_name[25];
-
-	struct iwl_rxon_context contexts[NUM_IWL_RXON_CTX];
-
-	__le16 switch_channel;
-
-	/* 1st responses from initialize and runtime uCode images.
-	 * _4965's initialize alive response contains some calibration data. */
-	struct iwl_init_alive_resp card_alive_init;
-	struct iwl_alive_resp card_alive;
-
-	u16 active_rate;
-
-	u8 start_calib;
-	struct iwl_sensitivity_data sensitivity_data;
-	struct iwl_chain_noise_data chain_noise_data;
-	__le16 sensitivity_tbl[HD_TABLE_SIZE];
-
-	struct iwl_ht_config current_ht_config;
-
-	/* Rate scaling data */
-	u8 retry_rate;
-
-	wait_queue_head_t wait_command_queue;
-
-	int activity_timer_active;
-
-	/* Rx and Tx DMA processing queues */
-	struct iwl_rx_queue rxq;
-	struct iwl_tx_queue *txq;
-	unsigned long txq_ctx_active_msk;
-	struct iwl_dma_ptr  kw;	/* keep warm address */
-	struct iwl_dma_ptr  scd_bc_tbls;
-
-	u32 scd_base_addr;	/* scheduler sram base address */
-
-	unsigned long status;
-
-	/* counts mgmt, ctl, and data packets */
-	struct traffic_stats tx_stats;
-	struct traffic_stats rx_stats;
-
-	/* counts interrupts */
-	struct isr_statistics isr_stats;
-
-	struct iwl_power_mgr power_data;
-
-	/* context information */
-	u8 bssid[ETH_ALEN]; /* used only on 3945 but filled by core */
-
-	/* station table variables */
-
-	/* Note: if lock and sta_lock are needed, lock must be acquired first */
-	spinlock_t sta_lock;
-	int num_stations;
-	struct iwl_station_entry stations[IWL_STATION_COUNT];
-	unsigned long ucode_key_table;
-
-	/* queue refcounts */
-#define IWL_MAX_HW_QUEUES	32
-	unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
-	/* for each AC */
-	atomic_t queue_stop_count[4];
-
-	/* Indication if ieee80211_ops->open has been called */
-	u8 is_open;
-
-	u8 mac80211_registered;
-
-	/* eeprom -- this is in the card's little endian byte order */
-	u8 *eeprom;
-	struct iwl_eeprom_calib_info *calib_info;
-
-	enum nl80211_iftype iw_mode;
-
-	/* Last Rx'd beacon timestamp */
-	u64 timestamp;
-
-	union {
-#if defined(CONFIG_IWL3945) || defined(CONFIG_IWL3945_MODULE)
-		struct {
-			void *shared_virt;
-			dma_addr_t shared_phys;
-
-			struct delayed_work thermal_periodic;
-			struct delayed_work rfkill_poll;
-
-			struct iwl3945_notif_statistics statistics;
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
-			struct iwl3945_notif_statistics accum_statistics;
-			struct iwl3945_notif_statistics delta_statistics;
-			struct iwl3945_notif_statistics max_delta;
-#endif
-
-			u32 sta_supp_rates;
-			int last_rx_rssi;	/* From Rx packet statistics */
-
-			/* Rx'd packet timing information */
-			u32 last_beacon_time;
-			u64 last_tsf;
-
-			/*
-			 * each calibration channel group in the
-			 * EEPROM has a derived clip setting for
-			 * each rate.
-			 */
-			const struct iwl3945_clip_group clip_groups[5];
-
-		} _3945;
-#endif
-#if defined(CONFIG_IWL4965) || defined(CONFIG_IWL4965_MODULE)
-		struct {
-			struct iwl_rx_phy_res last_phy_res;
-			bool last_phy_res_valid;
-
-			struct completion firmware_loading_complete;
-
-			/*
-			 * chain noise reset and gain commands are the
-			 * two extra calibration commands follows the standard
-			 * phy calibration commands
-			 */
-			u8 phy_calib_chain_noise_reset_cmd;
-			u8 phy_calib_chain_noise_gain_cmd;
-
-			struct iwl_notif_statistics statistics;
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
-			struct iwl_notif_statistics accum_statistics;
-			struct iwl_notif_statistics delta_statistics;
-			struct iwl_notif_statistics max_delta;
-#endif
-
-		} _4965;
-#endif
-	};
-
-	struct iwl_hw_params hw_params;
-
-	u32 inta_mask;
-
-	struct workqueue_struct *workqueue;
-
-	struct work_struct restart;
-	struct work_struct scan_completed;
-	struct work_struct rx_replenish;
-	struct work_struct abort_scan;
-
-	struct iwl_rxon_context *beacon_ctx;
-	struct sk_buff *beacon_skb;
-
-	struct work_struct tx_flush;
-
-	struct tasklet_struct irq_tasklet;
-
-	struct delayed_work init_alive_start;
-	struct delayed_work alive_start;
-	struct delayed_work scan_check;
-
-	/* TX Power */
-	s8 tx_power_user_lmt;
-	s8 tx_power_device_lmt;
-	s8 tx_power_next;
-
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-	/* debugging info */
-	u32 debug_level; /* per device debugging will override global
-			    iwlegacy_debug_level if set */
-#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
-	/* debugfs */
-	u16 tx_traffic_idx;
-	u16 rx_traffic_idx;
-	u8 *tx_traffic;
-	u8 *rx_traffic;
-	struct dentry *debugfs_dir;
-	u32 dbgfs_sram_offset, dbgfs_sram_len;
-	bool disable_ht40;
-#endif /* CONFIG_IWLWIFI_LEGACY_DEBUGFS */
-
-	struct work_struct txpower_work;
-	u32 disable_sens_cal;
-	u32 disable_chain_noise_cal;
-	u32 disable_tx_power_cal;
-	struct work_struct run_time_calib_work;
-	struct timer_list statistics_periodic;
-	struct timer_list watchdog;
-	bool hw_ready;
-
-	struct led_classdev led;
-	unsigned long blink_on, blink_off;
-	bool led_registered;
-}; /*iwl_priv */
-
-static inline void iwl_txq_ctx_activate(struct iwl_priv *priv, int txq_id)
-{
-	set_bit(txq_id, &priv->txq_ctx_active_msk);
-}
-
-static inline void iwl_txq_ctx_deactivate(struct iwl_priv *priv, int txq_id)
-{
-	clear_bit(txq_id, &priv->txq_ctx_active_msk);
-}
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-/*
- * iwl_legacy_get_debug_level: Return active debug level for device
- *
- * Using sysfs it is possible to set per device debug level. This debug
- * level will be used if set, otherwise the global debug level which can be
- * set via module parameter is used.
- */
-static inline u32 iwl_legacy_get_debug_level(struct iwl_priv *priv)
-{
-	if (priv->debug_level)
-		return priv->debug_level;
-	else
-		return iwlegacy_debug_level;
-}
-#else
-static inline u32 iwl_legacy_get_debug_level(struct iwl_priv *priv)
-{
-	return iwlegacy_debug_level;
-}
-#endif
-
-
-static inline struct ieee80211_hdr *
-iwl_legacy_tx_queue_get_hdr(struct iwl_priv *priv,
-						 int txq_id, int idx)
-{
-	if (priv->txq[txq_id].txb[idx].skb)
-		return (struct ieee80211_hdr *)priv->txq[txq_id].
-				txb[idx].skb->data;
-	return NULL;
-}
-
-static inline struct iwl_rxon_context *
-iwl_legacy_rxon_ctx_from_vif(struct ieee80211_vif *vif)
-{
-	struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
-
-	return vif_priv->ctx;
-}
-
-#define for_each_context(priv, ctx)				\
-	for (ctx = &priv->contexts[IWL_RXON_CTX_BSS];		\
-	     ctx < &priv->contexts[NUM_IWL_RXON_CTX]; ctx++)	\
-		if (priv->valid_contexts & BIT(ctx->ctxid))
-
-static inline int iwl_legacy_is_associated(struct iwl_priv *priv,
-				    enum iwl_rxon_context_id ctxid)
-{
-	return (priv->contexts[ctxid].active.filter_flags &
-			RXON_FILTER_ASSOC_MSK) ? 1 : 0;
-}
-
-static inline int iwl_legacy_is_any_associated(struct iwl_priv *priv)
-{
-	return iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS);
-}
-
-static inline int iwl_legacy_is_associated_ctx(struct iwl_rxon_context *ctx)
-{
-	return (ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0;
-}
-
-static inline int iwl_legacy_is_channel_valid(const struct iwl_channel_info *ch_info)
-{
-	if (ch_info == NULL)
-		return 0;
-	return (ch_info->flags & EEPROM_CHANNEL_VALID) ? 1 : 0;
-}
-
-static inline int iwl_legacy_is_channel_radar(const struct iwl_channel_info *ch_info)
-{
-	return (ch_info->flags & EEPROM_CHANNEL_RADAR) ? 1 : 0;
-}
-
-static inline u8 iwl_legacy_is_channel_a_band(const struct iwl_channel_info *ch_info)
-{
-	return ch_info->band == IEEE80211_BAND_5GHZ;
-}
-
-static inline int
-iwl_legacy_is_channel_passive(const struct iwl_channel_info *ch)
-{
-	return (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) ? 1 : 0;
-}
-
-static inline int
-iwl_legacy_is_channel_ibss(const struct iwl_channel_info *ch)
-{
-	return (ch->flags & EEPROM_CHANNEL_IBSS) ? 1 : 0;
-}
-
-static inline void
-__iwl_legacy_free_pages(struct iwl_priv *priv, struct page *page)
-{
-	__free_pages(page, priv->hw_params.rx_page_order);
-	priv->alloc_rxb_page--;
-}
-
-static inline void iwl_legacy_free_pages(struct iwl_priv *priv, unsigned long page)
-{
-	free_pages(page, priv->hw_params.rx_page_order);
-	priv->alloc_rxb_page--;
-}
-#endif				/* __iwl_legacy_dev_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-devtrace.c b/drivers/net/wireless/iwlegacy/iwl-devtrace.c
deleted file mode 100644
index acec991..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-devtrace.c
+++ /dev/null
@@ -1,42 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2009 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/module.h>
-
-/* sparse doesn't like tracepoint macros */
-#ifndef __CHECKER__
-#include "iwl-dev.h"
-
-#define CREATE_TRACE_POINTS
-#include "iwl-devtrace.h"
-
-EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_iowrite8);
-EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ioread32);
-EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_iowrite32);
-EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_rx);
-EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_tx);
-EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ucode_error);
-#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-devtrace.h b/drivers/net/wireless/iwlegacy/iwl-devtrace.h
deleted file mode 100644
index a443725..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-devtrace.h
+++ /dev/null
@@ -1,210 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2009 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#if !defined(__IWLWIFI_LEGACY_DEVICE_TRACE) || defined(TRACE_HEADER_MULTI_READ)
-#define __IWLWIFI_LEGACY_DEVICE_TRACE
-
-#include <linux/tracepoint.h>
-
-#if !defined(CONFIG_IWLWIFI_LEGACY_DEVICE_TRACING) || defined(__CHECKER__)
-#undef TRACE_EVENT
-#define TRACE_EVENT(name, proto, ...) \
-static inline void trace_ ## name(proto) {}
-#endif
-
-
-#define PRIV_ENTRY	__field(struct iwl_priv *, priv)
-#define PRIV_ASSIGN	(__entry->priv = priv)
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM iwlwifi_legacy_io
-
-TRACE_EVENT(iwlwifi_legacy_dev_ioread32,
-	TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val),
-	TP_ARGS(priv, offs, val),
-	TP_STRUCT__entry(
-		PRIV_ENTRY
-		__field(u32, offs)
-		__field(u32, val)
-	),
-	TP_fast_assign(
-		PRIV_ASSIGN;
-		__entry->offs = offs;
-		__entry->val = val;
-	),
-	TP_printk("[%p] read io[%#x] = %#x", __entry->priv,
-					__entry->offs, __entry->val)
-);
-
-TRACE_EVENT(iwlwifi_legacy_dev_iowrite8,
-	TP_PROTO(struct iwl_priv *priv, u32 offs, u8 val),
-	TP_ARGS(priv, offs, val),
-	TP_STRUCT__entry(
-		PRIV_ENTRY
-		__field(u32, offs)
-		__field(u8, val)
-	),
-	TP_fast_assign(
-		PRIV_ASSIGN;
-		__entry->offs = offs;
-		__entry->val = val;
-	),
-	TP_printk("[%p] write io[%#x] = %#x)", __entry->priv,
-					__entry->offs, __entry->val)
-);
-
-TRACE_EVENT(iwlwifi_legacy_dev_iowrite32,
-	TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val),
-	TP_ARGS(priv, offs, val),
-	TP_STRUCT__entry(
-		PRIV_ENTRY
-		__field(u32, offs)
-		__field(u32, val)
-	),
-	TP_fast_assign(
-		PRIV_ASSIGN;
-		__entry->offs = offs;
-		__entry->val = val;
-	),
-	TP_printk("[%p] write io[%#x] = %#x)", __entry->priv,
-					__entry->offs, __entry->val)
-);
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM iwlwifi_legacy_ucode
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM iwlwifi
-
-TRACE_EVENT(iwlwifi_legacy_dev_hcmd,
-	TP_PROTO(struct iwl_priv *priv, void *hcmd, size_t len, u32 flags),
-	TP_ARGS(priv, hcmd, len, flags),
-	TP_STRUCT__entry(
-		PRIV_ENTRY
-		__dynamic_array(u8, hcmd, len)
-		__field(u32, flags)
-	),
-	TP_fast_assign(
-		PRIV_ASSIGN;
-		memcpy(__get_dynamic_array(hcmd), hcmd, len);
-		__entry->flags = flags;
-	),
-	TP_printk("[%p] hcmd %#.2x (%ssync)",
-		  __entry->priv, ((u8 *)__get_dynamic_array(hcmd))[0],
-		  __entry->flags & CMD_ASYNC ? "a" : "")
-);
-
-TRACE_EVENT(iwlwifi_legacy_dev_rx,
-	TP_PROTO(struct iwl_priv *priv, void *rxbuf, size_t len),
-	TP_ARGS(priv, rxbuf, len),
-	TP_STRUCT__entry(
-		PRIV_ENTRY
-		__dynamic_array(u8, rxbuf, len)
-	),
-	TP_fast_assign(
-		PRIV_ASSIGN;
-		memcpy(__get_dynamic_array(rxbuf), rxbuf, len);
-	),
-	TP_printk("[%p] RX cmd %#.2x",
-		  __entry->priv, ((u8 *)__get_dynamic_array(rxbuf))[4])
-);
-
-TRACE_EVENT(iwlwifi_legacy_dev_tx,
-	TP_PROTO(struct iwl_priv *priv, void *tfd, size_t tfdlen,
-		 void *buf0, size_t buf0_len,
-		 void *buf1, size_t buf1_len),
-	TP_ARGS(priv, tfd, tfdlen, buf0, buf0_len, buf1, buf1_len),
-	TP_STRUCT__entry(
-		PRIV_ENTRY
-
-		__field(size_t, framelen)
-		__dynamic_array(u8, tfd, tfdlen)
-
-		/*
-		 * Do not insert between or below these items,
-		 * we want to keep the frame together (except
-		 * for the possible padding).
-		 */
-		__dynamic_array(u8, buf0, buf0_len)
-		__dynamic_array(u8, buf1, buf1_len)
-	),
-	TP_fast_assign(
-		PRIV_ASSIGN;
-		__entry->framelen = buf0_len + buf1_len;
-		memcpy(__get_dynamic_array(tfd), tfd, tfdlen);
-		memcpy(__get_dynamic_array(buf0), buf0, buf0_len);
-		memcpy(__get_dynamic_array(buf1), buf1, buf1_len);
-	),
-	TP_printk("[%p] TX %.2x (%zu bytes)",
-		  __entry->priv,
-		  ((u8 *)__get_dynamic_array(buf0))[0],
-		  __entry->framelen)
-);
-
-TRACE_EVENT(iwlwifi_legacy_dev_ucode_error,
-	TP_PROTO(struct iwl_priv *priv, u32 desc, u32 time,
-		 u32 data1, u32 data2, u32 line, u32 blink1,
-		 u32 blink2, u32 ilink1, u32 ilink2),
-	TP_ARGS(priv, desc, time, data1, data2, line,
-		blink1, blink2, ilink1, ilink2),
-	TP_STRUCT__entry(
-		PRIV_ENTRY
-		__field(u32, desc)
-		__field(u32, time)
-		__field(u32, data1)
-		__field(u32, data2)
-		__field(u32, line)
-		__field(u32, blink1)
-		__field(u32, blink2)
-		__field(u32, ilink1)
-		__field(u32, ilink2)
-	),
-	TP_fast_assign(
-		PRIV_ASSIGN;
-		__entry->desc = desc;
-		__entry->time = time;
-		__entry->data1 = data1;
-		__entry->data2 = data2;
-		__entry->line = line;
-		__entry->blink1 = blink1;
-		__entry->blink2 = blink2;
-		__entry->ilink1 = ilink1;
-		__entry->ilink2 = ilink2;
-	),
-	TP_printk("[%p] #%02d %010u data 0x%08X 0x%08X line %u, "
-		  "blink 0x%05X 0x%05X ilink 0x%05X 0x%05X",
-		  __entry->priv, __entry->desc, __entry->time, __entry->data1,
-		  __entry->data2, __entry->line, __entry->blink1,
-		  __entry->blink2, __entry->ilink1, __entry->ilink2)
-);
-
-#endif /* __IWLWIFI_DEVICE_TRACE */
-
-#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH .
-#undef TRACE_INCLUDE_FILE
-#define TRACE_INCLUDE_FILE iwl-devtrace
-#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/iwlegacy/iwl-eeprom.c b/drivers/net/wireless/iwlegacy/iwl-eeprom.c
deleted file mode 100644
index 5bf3f49..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-eeprom.c
+++ /dev/null
@@ -1,553 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license.  When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- *  * Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *  * Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *  * Neither the name Intel Corporation nor the names of its
- *    contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *****************************************************************************/
-
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-
-#include <net/mac80211.h>
-
-#include "iwl-commands.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-debug.h"
-#include "iwl-eeprom.h"
-#include "iwl-io.h"
-
-/************************** EEPROM BANDS ****************************
- *
- * The iwlegacy_eeprom_band definitions below provide the mapping from the
- * EEPROM contents to the specific channel number supported for each
- * band.
- *
- * For example, iwl_priv->eeprom.band_3_channels[4] from the band_3
- * definition below maps to physical channel 42 in the 5.2GHz spectrum.
- * The specific geography and calibration information for that channel
- * is contained in the eeprom map itself.
- *
- * During init, we copy the eeprom information and channel map
- * information into priv->channel_info_24/52 and priv->channel_map_24/52
- *
- * channel_map_24/52 provides the index in the channel_info array for a
- * given channel.  We have to have two separate maps as there is channel
- * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and
- * band_2
- *
- * A value of 0xff stored in the channel_map indicates that the channel
- * is not supported by the hardware at all.
- *
- * A value of 0xfe in the channel_map indicates that the channel is not
- * valid for Tx with the current hardware.  This means that
- * while the system can tune and receive on a given channel, it may not
- * be able to associate or transmit any frames on that
- * channel.  There is no corresponding channel information for that
- * entry.
- *
- *********************************************************************/
-
-/* 2.4 GHz */
-const u8 iwlegacy_eeprom_band_1[14] = {
-	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
-};
-
-/* 5.2 GHz bands */
-static const u8 iwlegacy_eeprom_band_2[] = {	/* 4915-5080MHz */
-	183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
-};
-
-static const u8 iwlegacy_eeprom_band_3[] = {	/* 5170-5320MHz */
-	34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
-};
-
-static const u8 iwlegacy_eeprom_band_4[] = {	/* 5500-5700MHz */
-	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
-};
-
-static const u8 iwlegacy_eeprom_band_5[] = {	/* 5725-5825MHz */
-	145, 149, 153, 157, 161, 165
-};
-
-static const u8 iwlegacy_eeprom_band_6[] = {       /* 2.4 ht40 channel */
-	1, 2, 3, 4, 5, 6, 7
-};
-
-static const u8 iwlegacy_eeprom_band_7[] = {       /* 5.2 ht40 channel */
-	36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
-};
-
-/******************************************************************************
- *
- * EEPROM related functions
- *
-******************************************************************************/
-
-static int iwl_legacy_eeprom_verify_signature(struct iwl_priv *priv)
-{
-	u32 gp = iwl_read32(priv, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
-	int ret = 0;
-
-	IWL_DEBUG_EEPROM(priv, "EEPROM signature=0x%08x\n", gp);
-	switch (gp) {
-	case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
-	case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
-		break;
-	default:
-		IWL_ERR(priv, "bad EEPROM signature,"
-			"EEPROM_GP=0x%08x\n", gp);
-		ret = -ENOENT;
-		break;
-	}
-	return ret;
-}
-
-const u8
-*iwl_legacy_eeprom_query_addr(const struct iwl_priv *priv, size_t offset)
-{
-	BUG_ON(offset >= priv->cfg->base_params->eeprom_size);
-	return &priv->eeprom[offset];
-}
-EXPORT_SYMBOL(iwl_legacy_eeprom_query_addr);
-
-u16 iwl_legacy_eeprom_query16(const struct iwl_priv *priv, size_t offset)
-{
-	if (!priv->eeprom)
-		return 0;
-	return (u16)priv->eeprom[offset] | ((u16)priv->eeprom[offset + 1] << 8);
-}
-EXPORT_SYMBOL(iwl_legacy_eeprom_query16);
-
-/**
- * iwl_legacy_eeprom_init - read EEPROM contents
- *
- * Load the EEPROM contents from adapter into priv->eeprom
- *
- * NOTE:  This routine uses the non-debug IO access functions.
- */
-int iwl_legacy_eeprom_init(struct iwl_priv *priv)
-{
-	__le16 *e;
-	u32 gp = iwl_read32(priv, CSR_EEPROM_GP);
-	int sz;
-	int ret;
-	u16 addr;
-
-	/* allocate eeprom */
-	sz = priv->cfg->base_params->eeprom_size;
-	IWL_DEBUG_EEPROM(priv, "NVM size = %d\n", sz);
-	priv->eeprom = kzalloc(sz, GFP_KERNEL);
-	if (!priv->eeprom) {
-		ret = -ENOMEM;
-		goto alloc_err;
-	}
-	e = (__le16 *)priv->eeprom;
-
-	priv->cfg->ops->lib->apm_ops.init(priv);
-
-	ret = iwl_legacy_eeprom_verify_signature(priv);
-	if (ret < 0) {
-		IWL_ERR(priv, "EEPROM not found, EEPROM_GP=0x%08x\n", gp);
-		ret = -ENOENT;
-		goto err;
-	}
-
-	/* Make sure driver (instead of uCode) is allowed to read EEPROM */
-	ret = priv->cfg->ops->lib->eeprom_ops.acquire_semaphore(priv);
-	if (ret < 0) {
-		IWL_ERR(priv, "Failed to acquire EEPROM semaphore.\n");
-		ret = -ENOENT;
-		goto err;
-	}
-
-	/* eeprom is an array of 16bit values */
-	for (addr = 0; addr < sz; addr += sizeof(u16)) {
-		u32 r;
-
-		_iwl_legacy_write32(priv, CSR_EEPROM_REG,
-			     CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
-
-		ret = iwl_poll_bit(priv, CSR_EEPROM_REG,
-					  CSR_EEPROM_REG_READ_VALID_MSK,
-					  CSR_EEPROM_REG_READ_VALID_MSK,
-					  IWL_EEPROM_ACCESS_TIMEOUT);
-		if (ret < 0) {
-			IWL_ERR(priv, "Time out reading EEPROM[%d]\n",
-							addr);
-			goto done;
-		}
-		r = _iwl_legacy_read_direct32(priv, CSR_EEPROM_REG);
-		e[addr / 2] = cpu_to_le16(r >> 16);
-	}
-
-	IWL_DEBUG_EEPROM(priv, "NVM Type: %s, version: 0x%x\n",
-		       "EEPROM",
-		       iwl_legacy_eeprom_query16(priv, EEPROM_VERSION));
-
-	ret = 0;
-done:
-	priv->cfg->ops->lib->eeprom_ops.release_semaphore(priv);
-
-err:
-	if (ret)
-		iwl_legacy_eeprom_free(priv);
-	/* Reset chip to save power until we load uCode during "up". */
-	iwl_legacy_apm_stop(priv);
-alloc_err:
-	return ret;
-}
-EXPORT_SYMBOL(iwl_legacy_eeprom_init);
-
-void iwl_legacy_eeprom_free(struct iwl_priv *priv)
-{
-	kfree(priv->eeprom);
-	priv->eeprom = NULL;
-}
-EXPORT_SYMBOL(iwl_legacy_eeprom_free);
-
-static void iwl_legacy_init_band_reference(const struct iwl_priv *priv,
-			int eep_band, int *eeprom_ch_count,
-			const struct iwl_eeprom_channel **eeprom_ch_info,
-			const u8 **eeprom_ch_index)
-{
-	u32 offset = priv->cfg->ops->lib->
-			eeprom_ops.regulatory_bands[eep_band - 1];
-	switch (eep_band) {
-	case 1:		/* 2.4GHz band */
-		*eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_1);
-		*eeprom_ch_info = (struct iwl_eeprom_channel *)
-				iwl_legacy_eeprom_query_addr(priv, offset);
-		*eeprom_ch_index = iwlegacy_eeprom_band_1;
-		break;
-	case 2:		/* 4.9GHz band */
-		*eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_2);
-		*eeprom_ch_info = (struct iwl_eeprom_channel *)
-				iwl_legacy_eeprom_query_addr(priv, offset);
-		*eeprom_ch_index = iwlegacy_eeprom_band_2;
-		break;
-	case 3:		/* 5.2GHz band */
-		*eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_3);
-		*eeprom_ch_info = (struct iwl_eeprom_channel *)
-				iwl_legacy_eeprom_query_addr(priv, offset);
-		*eeprom_ch_index = iwlegacy_eeprom_band_3;
-		break;
-	case 4:		/* 5.5GHz band */
-		*eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_4);
-		*eeprom_ch_info = (struct iwl_eeprom_channel *)
-				iwl_legacy_eeprom_query_addr(priv, offset);
-		*eeprom_ch_index = iwlegacy_eeprom_band_4;
-		break;
-	case 5:		/* 5.7GHz band */
-		*eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_5);
-		*eeprom_ch_info = (struct iwl_eeprom_channel *)
-				iwl_legacy_eeprom_query_addr(priv, offset);
-		*eeprom_ch_index = iwlegacy_eeprom_band_5;
-		break;
-	case 6:		/* 2.4GHz ht40 channels */
-		*eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_6);
-		*eeprom_ch_info = (struct iwl_eeprom_channel *)
-				iwl_legacy_eeprom_query_addr(priv, offset);
-		*eeprom_ch_index = iwlegacy_eeprom_band_6;
-		break;
-	case 7:		/* 5 GHz ht40 channels */
-		*eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_7);
-		*eeprom_ch_info = (struct iwl_eeprom_channel *)
-				iwl_legacy_eeprom_query_addr(priv, offset);
-		*eeprom_ch_index = iwlegacy_eeprom_band_7;
-		break;
-	default:
-		BUG();
-	}
-}
-
-#define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \
-			    ? # x " " : "")
-/**
- * iwl_legacy_mod_ht40_chan_info - Copy ht40 channel info into driver's priv.
- *
- * Does not set up a command, or touch hardware.
- */
-static int iwl_legacy_mod_ht40_chan_info(struct iwl_priv *priv,
-			      enum ieee80211_band band, u16 channel,
-			      const struct iwl_eeprom_channel *eeprom_ch,
-			      u8 clear_ht40_extension_channel)
-{
-	struct iwl_channel_info *ch_info;
-
-	ch_info = (struct iwl_channel_info *)
-			iwl_legacy_get_channel_info(priv, band, channel);
-
-	if (!iwl_legacy_is_channel_valid(ch_info))
-		return -1;
-
-	IWL_DEBUG_EEPROM(priv, "HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm):"
-			" Ad-Hoc %ssupported\n",
-			ch_info->channel,
-			iwl_legacy_is_channel_a_band(ch_info) ?
-			"5.2" : "2.4",
-			CHECK_AND_PRINT(IBSS),
-			CHECK_AND_PRINT(ACTIVE),
-			CHECK_AND_PRINT(RADAR),
-			CHECK_AND_PRINT(WIDE),
-			CHECK_AND_PRINT(DFS),
-			eeprom_ch->flags,
-			eeprom_ch->max_power_avg,
-			((eeprom_ch->flags & EEPROM_CHANNEL_IBSS)
-			 && !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ?
-			"" : "not ");
-
-	ch_info->ht40_eeprom = *eeprom_ch;
-	ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg;
-	ch_info->ht40_flags = eeprom_ch->flags;
-	if (eeprom_ch->flags & EEPROM_CHANNEL_VALID)
-		ch_info->ht40_extension_channel &=
-					~clear_ht40_extension_channel;
-
-	return 0;
-}
-
-#define CHECK_AND_PRINT_I(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
-			    ? # x " " : "")
-
-/**
- * iwl_legacy_init_channel_map - Set up driver's info for all possible channels
- */
-int iwl_legacy_init_channel_map(struct iwl_priv *priv)
-{
-	int eeprom_ch_count = 0;
-	const u8 *eeprom_ch_index = NULL;
-	const struct iwl_eeprom_channel *eeprom_ch_info = NULL;
-	int band, ch;
-	struct iwl_channel_info *ch_info;
-
-	if (priv->channel_count) {
-		IWL_DEBUG_EEPROM(priv, "Channel map already initialized.\n");
-		return 0;
-	}
-
-	IWL_DEBUG_EEPROM(priv, "Initializing regulatory info from EEPROM\n");
-
-	priv->channel_count =
-	    ARRAY_SIZE(iwlegacy_eeprom_band_1) +
-	    ARRAY_SIZE(iwlegacy_eeprom_band_2) +
-	    ARRAY_SIZE(iwlegacy_eeprom_band_3) +
-	    ARRAY_SIZE(iwlegacy_eeprom_band_4) +
-	    ARRAY_SIZE(iwlegacy_eeprom_band_5);
-
-	IWL_DEBUG_EEPROM(priv, "Parsing data for %d channels.\n",
-			priv->channel_count);
-
-	priv->channel_info = kzalloc(sizeof(struct iwl_channel_info) *
-				     priv->channel_count, GFP_KERNEL);
-	if (!priv->channel_info) {
-		IWL_ERR(priv, "Could not allocate channel_info\n");
-		priv->channel_count = 0;
-		return -ENOMEM;
-	}
-
-	ch_info = priv->channel_info;
-
-	/* Loop through the 5 EEPROM bands adding them in order to the
-	 * channel map we maintain (that contains additional information than
-	 * what just in the EEPROM) */
-	for (band = 1; band <= 5; band++) {
-
-		iwl_legacy_init_band_reference(priv, band, &eeprom_ch_count,
-					&eeprom_ch_info, &eeprom_ch_index);
-
-		/* Loop through each band adding each of the channels */
-		for (ch = 0; ch < eeprom_ch_count; ch++) {
-			ch_info->channel = eeprom_ch_index[ch];
-			ch_info->band = (band == 1) ? IEEE80211_BAND_2GHZ :
-			    IEEE80211_BAND_5GHZ;
-
-			/* permanently store EEPROM's channel regulatory flags
-			 *   and max power in channel info database. */
-			ch_info->eeprom = eeprom_ch_info[ch];
-
-			/* Copy the run-time flags so they are there even on
-			 * invalid channels */
-			ch_info->flags = eeprom_ch_info[ch].flags;
-			/* First write that ht40 is not enabled, and then enable
-			 * one by one */
-			ch_info->ht40_extension_channel =
-					IEEE80211_CHAN_NO_HT40;
-
-			if (!(iwl_legacy_is_channel_valid(ch_info))) {
-				IWL_DEBUG_EEPROM(priv,
-					       "Ch. %d Flags %x [%sGHz] - "
-					       "No traffic\n",
-					       ch_info->channel,
-					       ch_info->flags,
-					       iwl_legacy_is_channel_a_band(ch_info) ?
-					       "5.2" : "2.4");
-				ch_info++;
-				continue;
-			}
-
-			/* Initialize regulatory-based run-time data */
-			ch_info->max_power_avg = ch_info->curr_txpow =
-			    eeprom_ch_info[ch].max_power_avg;
-			ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
-			ch_info->min_power = 0;
-
-			IWL_DEBUG_EEPROM(priv, "Ch. %d [%sGHz] "
-				       "%s%s%s%s%s%s(0x%02x %ddBm):"
-				       " Ad-Hoc %ssupported\n",
-				       ch_info->channel,
-				       iwl_legacy_is_channel_a_band(ch_info) ?
-				       "5.2" : "2.4",
-				       CHECK_AND_PRINT_I(VALID),
-				       CHECK_AND_PRINT_I(IBSS),
-				       CHECK_AND_PRINT_I(ACTIVE),
-				       CHECK_AND_PRINT_I(RADAR),
-				       CHECK_AND_PRINT_I(WIDE),
-				       CHECK_AND_PRINT_I(DFS),
-				       eeprom_ch_info[ch].flags,
-				       eeprom_ch_info[ch].max_power_avg,
-				       ((eeprom_ch_info[ch].
-					 flags & EEPROM_CHANNEL_IBSS)
-					&& !(eeprom_ch_info[ch].
-					     flags & EEPROM_CHANNEL_RADAR))
-				       ? "" : "not ");
-
-			ch_info++;
-		}
-	}
-
-	/* Check if we do have HT40 channels */
-	if (priv->cfg->ops->lib->eeprom_ops.regulatory_bands[5] ==
-	    EEPROM_REGULATORY_BAND_NO_HT40 &&
-	    priv->cfg->ops->lib->eeprom_ops.regulatory_bands[6] ==
-	    EEPROM_REGULATORY_BAND_NO_HT40)
-		return 0;
-
-	/* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */
-	for (band = 6; band <= 7; band++) {
-		enum ieee80211_band ieeeband;
-
-		iwl_legacy_init_band_reference(priv, band, &eeprom_ch_count,
-					&eeprom_ch_info, &eeprom_ch_index);
-
-		/* EEPROM band 6 is 2.4, band 7 is 5 GHz */
-		ieeeband =
-			(band == 6) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
-
-		/* Loop through each band adding each of the channels */
-		for (ch = 0; ch < eeprom_ch_count; ch++) {
-			/* Set up driver's info for lower half */
-			iwl_legacy_mod_ht40_chan_info(priv, ieeeband,
-						eeprom_ch_index[ch],
-						&eeprom_ch_info[ch],
-						IEEE80211_CHAN_NO_HT40PLUS);
-
-			/* Set up driver's info for upper half */
-			iwl_legacy_mod_ht40_chan_info(priv, ieeeband,
-						eeprom_ch_index[ch] + 4,
-						&eeprom_ch_info[ch],
-						IEEE80211_CHAN_NO_HT40MINUS);
-		}
-	}
-
-	return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_init_channel_map);
-
-/*
- * iwl_legacy_free_channel_map - undo allocations in iwl_legacy_init_channel_map
- */
-void iwl_legacy_free_channel_map(struct iwl_priv *priv)
-{
-	kfree(priv->channel_info);
-	priv->channel_count = 0;
-}
-EXPORT_SYMBOL(iwl_legacy_free_channel_map);
-
-/**
- * iwl_legacy_get_channel_info - Find driver's private channel info
- *
- * Based on band and channel number.
- */
-const struct
-iwl_channel_info *iwl_legacy_get_channel_info(const struct iwl_priv *priv,
-					enum ieee80211_band band, u16 channel)
-{
-	int i;
-
-	switch (band) {
-	case IEEE80211_BAND_5GHZ:
-		for (i = 14; i < priv->channel_count; i++) {
-			if (priv->channel_info[i].channel == channel)
-				return &priv->channel_info[i];
-		}
-		break;
-	case IEEE80211_BAND_2GHZ:
-		if (channel >= 1 && channel <= 14)
-			return &priv->channel_info[channel - 1];
-		break;
-	default:
-		BUG();
-	}
-
-	return NULL;
-}
-EXPORT_SYMBOL(iwl_legacy_get_channel_info);
diff --git a/drivers/net/wireless/iwlegacy/iwl-eeprom.h b/drivers/net/wireless/iwlegacy/iwl-eeprom.h
deleted file mode 100644
index c59c810..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-eeprom.h
+++ /dev/null
@@ -1,344 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license.  When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- *  * Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *  * Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *  * Neither the name Intel Corporation nor the names of its
- *    contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *****************************************************************************/
-
-#ifndef __iwl_legacy_eeprom_h__
-#define __iwl_legacy_eeprom_h__
-
-#include <net/mac80211.h>
-
-struct iwl_priv;
-
-/*
- * EEPROM access time values:
- *
- * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG.
- * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1).
- * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec.
- * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG.
- */
-#define IWL_EEPROM_ACCESS_TIMEOUT	5000 /* uSec */
-
-#define IWL_EEPROM_SEM_TIMEOUT		10   /* microseconds */
-#define IWL_EEPROM_SEM_RETRY_LIMIT	1000 /* number of attempts (not time) */
-
-
-/*
- * Regulatory channel usage flags in EEPROM struct iwl4965_eeprom_channel.flags.
- *
- * IBSS and/or AP operation is allowed *only* on those channels with
- * (VALID && IBSS && ACTIVE && !RADAR).  This restriction is in place because
- * RADAR detection is not supported by the 4965 driver, but is a
- * requirement for establishing a new network for legal operation on channels
- * requiring RADAR detection or restricting ACTIVE scanning.
- *
- * NOTE:  "WIDE" flag does not indicate anything about "HT40" 40 MHz channels.
- *        It only indicates that 20 MHz channel use is supported; HT40 channel
- *        usage is indicated by a separate set of regulatory flags for each
- *        HT40 channel pair.
- *
- * NOTE:  Using a channel inappropriately will result in a uCode error!
- */
-#define IWL_NUM_TX_CALIB_GROUPS 5
-enum {
-	EEPROM_CHANNEL_VALID = (1 << 0),	/* usable for this SKU/geo */
-	EEPROM_CHANNEL_IBSS = (1 << 1),		/* usable as an IBSS channel */
-	/* Bit 2 Reserved */
-	EEPROM_CHANNEL_ACTIVE = (1 << 3),	/* active scanning allowed */
-	EEPROM_CHANNEL_RADAR = (1 << 4),	/* radar detection required */
-	EEPROM_CHANNEL_WIDE = (1 << 5),		/* 20 MHz channel okay */
-	/* Bit 6 Reserved (was Narrow Channel) */
-	EEPROM_CHANNEL_DFS = (1 << 7),	/* dynamic freq selection candidate */
-};
-
-/* SKU Capabilities */
-/* 3945 only */
-#define EEPROM_SKU_CAP_SW_RF_KILL_ENABLE                (1 << 0)
-#define EEPROM_SKU_CAP_HW_RF_KILL_ENABLE                (1 << 1)
-
-/* *regulatory* channel data format in eeprom, one for each channel.
- * There are separate entries for HT40 (40 MHz) vs. normal (20 MHz) channels. */
-struct iwl_eeprom_channel {
-	u8 flags;		/* EEPROM_CHANNEL_* flags copied from EEPROM */
-	s8 max_power_avg;	/* max power (dBm) on this chnl, limit 31 */
-} __packed;
-
-/* 3945 Specific */
-#define EEPROM_3945_EEPROM_VERSION	(0x2f)
-
-/* 4965 has two radio transmitters (and 3 radio receivers) */
-#define EEPROM_TX_POWER_TX_CHAINS      (2)
-
-/* 4965 has room for up to 8 sets of txpower calibration data */
-#define EEPROM_TX_POWER_BANDS          (8)
-
-/* 4965 factory calibration measures txpower gain settings for
- * each of 3 target output levels */
-#define EEPROM_TX_POWER_MEASUREMENTS   (3)
-
-/* 4965 Specific */
-/* 4965 driver does not work with txpower calibration version < 5 */
-#define EEPROM_4965_TX_POWER_VERSION    (5)
-#define EEPROM_4965_EEPROM_VERSION	(0x2f)
-#define EEPROM_4965_CALIB_VERSION_OFFSET       (2*0xB6) /* 2 bytes */
-#define EEPROM_4965_CALIB_TXPOWER_OFFSET       (2*0xE8) /* 48  bytes */
-#define EEPROM_4965_BOARD_REVISION             (2*0x4F) /* 2 bytes */
-#define EEPROM_4965_BOARD_PBA                  (2*0x56+1) /* 9 bytes */
-
-/* 2.4 GHz */
-extern const u8 iwlegacy_eeprom_band_1[14];
-
-/*
- * factory calibration data for one txpower level, on one channel,
- * measured on one of the 2 tx chains (radio transmitter and associated
- * antenna).  EEPROM contains:
- *
- * 1)  Temperature (degrees Celsius) of device when measurement was made.
- *
- * 2)  Gain table index used to achieve the target measurement power.
- *     This refers to the "well-known" gain tables (see iwl-4965-hw.h).
- *
- * 3)  Actual measured output power, in half-dBm ("34" = 17 dBm).
- *
- * 4)  RF power amplifier detector level measurement (not used).
- */
-struct iwl_eeprom_calib_measure {
-	u8 temperature;		/* Device temperature (Celsius) */
-	u8 gain_idx;		/* Index into gain table */
-	u8 actual_pow;		/* Measured RF output power, half-dBm */
-	s8 pa_det;		/* Power amp detector level (not used) */
-} __packed;
-
-
-/*
- * measurement set for one channel.  EEPROM contains:
- *
- * 1)  Channel number measured
- *
- * 2)  Measurements for each of 3 power levels for each of 2 radio transmitters
- *     (a.k.a. "tx chains") (6 measurements altogether)
- */
-struct iwl_eeprom_calib_ch_info {
-	u8 ch_num;
-	struct iwl_eeprom_calib_measure
-		measurements[EEPROM_TX_POWER_TX_CHAINS]
-			[EEPROM_TX_POWER_MEASUREMENTS];
-} __packed;
-
-/*
- * txpower subband info.
- *
- * For each frequency subband, EEPROM contains the following:
- *
- * 1)  First and last channels within range of the subband.  "0" values
- *     indicate that this sample set is not being used.
- *
- * 2)  Sample measurement sets for 2 channels close to the range endpoints.
- */
-struct iwl_eeprom_calib_subband_info {
-	u8 ch_from;	/* channel number of lowest channel in subband */
-	u8 ch_to;	/* channel number of highest channel in subband */
-	struct iwl_eeprom_calib_ch_info ch1;
-	struct iwl_eeprom_calib_ch_info ch2;
-} __packed;
-
-
-/*
- * txpower calibration info.  EEPROM contains:
- *
- * 1)  Factory-measured saturation power levels (maximum levels at which
- *     tx power amplifier can output a signal without too much distortion).
- *     There is one level for 2.4 GHz band and one for 5 GHz band.  These
- *     values apply to all channels within each of the bands.
- *
- * 2)  Factory-measured power supply voltage level.  This is assumed to be
- *     constant (i.e. same value applies to all channels/bands) while the
- *     factory measurements are being made.
- *
- * 3)  Up to 8 sets of factory-measured txpower calibration values.
- *     These are for different frequency ranges, since txpower gain
- *     characteristics of the analog radio circuitry vary with frequency.
- *
- *     Not all sets need to be filled with data;
- *     struct iwl_eeprom_calib_subband_info contains range of channels
- *     (0 if unused) for each set of data.
- */
-struct iwl_eeprom_calib_info {
-	u8 saturation_power24;	/* half-dBm (e.g. "34" = 17 dBm) */
-	u8 saturation_power52;	/* half-dBm */
-	__le16 voltage;		/* signed */
-	struct iwl_eeprom_calib_subband_info
-		band_info[EEPROM_TX_POWER_BANDS];
-} __packed;
-
-
-/* General */
-#define EEPROM_DEVICE_ID                    (2*0x08)	/* 2 bytes */
-#define EEPROM_MAC_ADDRESS                  (2*0x15)	/* 6  bytes */
-#define EEPROM_BOARD_REVISION               (2*0x35)	/* 2  bytes */
-#define EEPROM_BOARD_PBA_NUMBER             (2*0x3B+1)	/* 9  bytes */
-#define EEPROM_VERSION                      (2*0x44)	/* 2  bytes */
-#define EEPROM_SKU_CAP                      (2*0x45)	/* 2  bytes */
-#define EEPROM_OEM_MODE                     (2*0x46)	/* 2  bytes */
-#define EEPROM_WOWLAN_MODE                  (2*0x47)	/* 2  bytes */
-#define EEPROM_RADIO_CONFIG                 (2*0x48)	/* 2  bytes */
-#define EEPROM_NUM_MAC_ADDRESS              (2*0x4C)	/* 2  bytes */
-
-/* The following masks are to be applied on EEPROM_RADIO_CONFIG */
-#define EEPROM_RF_CFG_TYPE_MSK(x)   (x & 0x3)         /* bits 0-1   */
-#define EEPROM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
-#define EEPROM_RF_CFG_DASH_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
-#define EEPROM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
-#define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
-#define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
-
-#define EEPROM_3945_RF_CFG_TYPE_MAX  0x0
-#define EEPROM_4965_RF_CFG_TYPE_MAX  0x1
-
-/*
- * Per-channel regulatory data.
- *
- * Each channel that *might* be supported by iwl has a fixed location
- * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
- * txpower (MSB).
- *
- * Entries immediately below are for 20 MHz channel width.  HT40 (40 MHz)
- * channels (only for 4965, not supported by 3945) appear later in the EEPROM.
- *
- * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
- */
-#define EEPROM_REGULATORY_SKU_ID            (2*0x60)    /* 4  bytes */
-#define EEPROM_REGULATORY_BAND_1            (2*0x62)	/* 2  bytes */
-#define EEPROM_REGULATORY_BAND_1_CHANNELS   (2*0x63)	/* 28 bytes */
-
-/*
- * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196,
- * 5.0 GHz channels 7, 8, 11, 12, 16
- * (4915-5080MHz) (none of these is ever supported)
- */
-#define EEPROM_REGULATORY_BAND_2            (2*0x71)	/* 2  bytes */
-#define EEPROM_REGULATORY_BAND_2_CHANNELS   (2*0x72)	/* 26 bytes */
-
-/*
- * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
- * (5170-5320MHz)
- */
-#define EEPROM_REGULATORY_BAND_3            (2*0x7F)	/* 2  bytes */
-#define EEPROM_REGULATORY_BAND_3_CHANNELS   (2*0x80)	/* 24 bytes */
-
-/*
- * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
- * (5500-5700MHz)
- */
-#define EEPROM_REGULATORY_BAND_4            (2*0x8C)	/* 2  bytes */
-#define EEPROM_REGULATORY_BAND_4_CHANNELS   (2*0x8D)	/* 22 bytes */
-
-/*
- * 5.7 GHz channels 145, 149, 153, 157, 161, 165
- * (5725-5825MHz)
- */
-#define EEPROM_REGULATORY_BAND_5            (2*0x98)	/* 2  bytes */
-#define EEPROM_REGULATORY_BAND_5_CHANNELS   (2*0x99)	/* 12 bytes */
-
-/*
- * 2.4 GHz HT40 channels 1 (5), 2 (6), 3 (7), 4 (8), 5 (9), 6 (10), 7 (11)
- *
- * The channel listed is the center of the lower 20 MHz half of the channel.
- * The overall center frequency is actually 2 channels (10 MHz) above that,
- * and the upper half of each HT40 channel is centered 4 channels (20 MHz) away
- * from the lower half; e.g. the upper half of HT40 channel 1 is channel 5,
- * and the overall HT40 channel width centers on channel 3.
- *
- * NOTE:  The RXON command uses 20 MHz channel numbers to specify the
- *        control channel to which to tune.  RXON also specifies whether the
- *        control channel is the upper or lower half of a HT40 channel.
- *
- * NOTE:  4965 does not support HT40 channels on 2.4 GHz.
- */
-#define EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS (2*0xA0)	/* 14 bytes */
-
-/*
- * 5.2 GHz HT40 channels 36 (40), 44 (48), 52 (56), 60 (64),
- * 100 (104), 108 (112), 116 (120), 124 (128), 132 (136), 149 (153), 157 (161)
- */
-#define EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS (2*0xA8)	/* 22 bytes */
-
-#define EEPROM_REGULATORY_BAND_NO_HT40			(0)
-
-struct iwl_eeprom_ops {
-	const u32 regulatory_bands[7];
-	int (*acquire_semaphore) (struct iwl_priv *priv);
-	void (*release_semaphore) (struct iwl_priv *priv);
-};
-
-
-int iwl_legacy_eeprom_init(struct iwl_priv *priv);
-void iwl_legacy_eeprom_free(struct iwl_priv *priv);
-const u8 *iwl_legacy_eeprom_query_addr(const struct iwl_priv *priv,
-					size_t offset);
-u16 iwl_legacy_eeprom_query16(const struct iwl_priv *priv, size_t offset);
-int iwl_legacy_init_channel_map(struct iwl_priv *priv);
-void iwl_legacy_free_channel_map(struct iwl_priv *priv);
-const struct iwl_channel_info *iwl_legacy_get_channel_info(
-		const struct iwl_priv *priv,
-		enum ieee80211_band band, u16 channel);
-
-#endif  /* __iwl_legacy_eeprom_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-fh.h b/drivers/net/wireless/iwlegacy/iwl-fh.h
deleted file mode 100644
index 6e60918..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-fh.h
+++ /dev/null
@@ -1,513 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license.  When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- *  * Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *  * Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *  * Neither the name Intel Corporation nor the names of its
- *    contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-#ifndef __iwl_legacy_fh_h__
-#define __iwl_legacy_fh_h__
-
-/****************************/
-/* Flow Handler Definitions */
-/****************************/
-
-/**
- * This I/O area is directly read/writable by driver (e.g. Linux uses writel())
- * Addresses are offsets from device's PCI hardware base address.
- */
-#define FH_MEM_LOWER_BOUND                   (0x1000)
-#define FH_MEM_UPPER_BOUND                   (0x2000)
-
-/**
- * Keep-Warm (KW) buffer base address.
- *
- * Driver must allocate a 4KByte buffer that is used by 4965 for keeping the
- * host DRAM powered on (via dummy accesses to DRAM) to maintain low-latency
- * DRAM access when 4965 is Txing or Rxing.  The dummy accesses prevent host
- * from going into a power-savings mode that would cause higher DRAM latency,
- * and possible data over/under-runs, before all Tx/Rx is complete.
- *
- * Driver loads FH_KW_MEM_ADDR_REG with the physical address (bits 35:4)
- * of the buffer, which must be 4K aligned.  Once this is set up, the 4965
- * automatically invokes keep-warm accesses when normal accesses might not
- * be sufficient to maintain fast DRAM response.
- *
- * Bit fields:
- *  31-0:  Keep-warm buffer physical base address [35:4], must be 4K aligned
- */
-#define FH_KW_MEM_ADDR_REG		     (FH_MEM_LOWER_BOUND + 0x97C)
-
-
-/**
- * TFD Circular Buffers Base (CBBC) addresses
- *
- * 4965 has 16 base pointer registers, one for each of 16 host-DRAM-resident
- * circular buffers (CBs/queues) containing Transmit Frame Descriptors (TFDs)
- * (see struct iwl_tfd_frame).  These 16 pointer registers are offset by 0x04
- * bytes from one another.  Each TFD circular buffer in DRAM must be 256-byte
- * aligned (address bits 0-7 must be 0).
- *
- * Bit fields in each pointer register:
- *  27-0: TFD CB physical base address [35:8], must be 256-byte aligned
- */
-#define FH_MEM_CBBC_LOWER_BOUND          (FH_MEM_LOWER_BOUND + 0x9D0)
-#define FH_MEM_CBBC_UPPER_BOUND          (FH_MEM_LOWER_BOUND + 0xA10)
-
-/* Find TFD CB base pointer for given queue (range 0-15). */
-#define FH_MEM_CBBC_QUEUE(x)  (FH_MEM_CBBC_LOWER_BOUND + (x) * 0x4)
-
-
-/**
- * Rx SRAM Control and Status Registers (RSCSR)
- *
- * These registers provide handshake between driver and 4965 for the Rx queue
- * (this queue handles *all* command responses, notifications, Rx data, etc.
- * sent from 4965 uCode to host driver).  Unlike Tx, there is only one Rx
- * queue, and only one Rx DMA/FIFO channel.  Also unlike Tx, which can
- * concatenate up to 20 DRAM buffers to form a Tx frame, each Receive Buffer
- * Descriptor (RBD) points to only one Rx Buffer (RB); there is a 1:1
- * mapping between RBDs and RBs.
- *
- * Driver must allocate host DRAM memory for the following, and set the
- * physical address of each into 4965 registers:
- *
- * 1)  Receive Buffer Descriptor (RBD) circular buffer (CB), typically with 256
- *     entries (although any power of 2, up to 4096, is selectable by driver).
- *     Each entry (1 dword) points to a receive buffer (RB) of consistent size
- *     (typically 4K, although 8K or 16K are also selectable by driver).
- *     Driver sets up RB size and number of RBDs in the CB via Rx config
- *     register FH_MEM_RCSR_CHNL0_CONFIG_REG.
- *
- *     Bit fields within one RBD:
- *     27-0:  Receive Buffer physical address bits [35:8], 256-byte aligned
- *
- *     Driver sets physical address [35:8] of base of RBD circular buffer
- *     into FH_RSCSR_CHNL0_RBDCB_BASE_REG [27:0].
- *
- * 2)  Rx status buffer, 8 bytes, in which 4965 indicates which Rx Buffers
- *     (RBs) have been filled, via a "write pointer", actually the index of
- *     the RB's corresponding RBD within the circular buffer.  Driver sets
- *     physical address [35:4] into FH_RSCSR_CHNL0_STTS_WPTR_REG [31:0].
- *
- *     Bit fields in lower dword of Rx status buffer (upper dword not used
- *     by driver; see struct iwl4965_shared, val0):
- *     31-12:  Not used by driver
- *     11- 0:  Index of last filled Rx buffer descriptor
- *             (4965 writes, driver reads this value)
- *
- * As the driver prepares Receive Buffers (RBs) for 4965 to fill, driver must
- * enter pointers to these RBs into contiguous RBD circular buffer entries,
- * and update the 4965's "write" index register,
- * FH_RSCSR_CHNL0_RBDCB_WPTR_REG.
- *
- * This "write" index corresponds to the *next* RBD that the driver will make
- * available, i.e. one RBD past the tail of the ready-to-fill RBDs within
- * the circular buffer.  This value should initially be 0 (before preparing any
- * RBs), should be 8 after preparing the first 8 RBs (for example), and must
- * wrap back to 0 at the end of the circular buffer (but don't wrap before
- * "read" index has advanced past 1!  See below).
- * NOTE:  4965 EXPECTS THE WRITE INDEX TO BE INCREMENTED IN MULTIPLES OF 8.
- *
- * As the 4965 fills RBs (referenced from contiguous RBDs within the circular
- * buffer), it updates the Rx status buffer in host DRAM, 2) described above,
- * to tell the driver the index of the latest filled RBD.  The driver must
- * read this "read" index from DRAM after receiving an Rx interrupt from 4965.
- *
- * The driver must also internally keep track of a third index, which is the
- * next RBD to process.  When receiving an Rx interrupt, driver should process
- * all filled but unprocessed RBs up to, but not including, the RB
- * corresponding to the "read" index.  For example, if "read" index becomes "1",
- * driver may process the RB pointed to by RBD 0.  Depending on volume of
- * traffic, there may be many RBs to process.
- *
- * If read index == write index, 4965 thinks there is no room to put new data.
- * Due to this, the maximum number of filled RBs is 255, instead of 256.  To
- * be safe, make sure that there is a gap of at least 2 RBDs between "write"
- * and "read" indexes; that is, make sure that there are no more than 254
- * buffers waiting to be filled.
- */
-#define FH_MEM_RSCSR_LOWER_BOUND	(FH_MEM_LOWER_BOUND + 0xBC0)
-#define FH_MEM_RSCSR_UPPER_BOUND	(FH_MEM_LOWER_BOUND + 0xC00)
-#define FH_MEM_RSCSR_CHNL0		(FH_MEM_RSCSR_LOWER_BOUND)
-
-/**
- * Physical base address of 8-byte Rx Status buffer.
- * Bit fields:
- *  31-0: Rx status buffer physical base address [35:4], must 16-byte aligned.
- */
-#define FH_RSCSR_CHNL0_STTS_WPTR_REG	(FH_MEM_RSCSR_CHNL0)
-
-/**
- * Physical base address of Rx Buffer Descriptor Circular Buffer.
- * Bit fields:
- *  27-0:  RBD CD physical base address [35:8], must be 256-byte aligned.
- */
-#define FH_RSCSR_CHNL0_RBDCB_BASE_REG	(FH_MEM_RSCSR_CHNL0 + 0x004)
-
-/**
- * Rx write pointer (index, really!).
- * Bit fields:
- *  11-0:  Index of driver's most recent prepared-to-be-filled RBD, + 1.
- *         NOTE:  For 256-entry circular buffer, use only bits [7:0].
- */
-#define FH_RSCSR_CHNL0_RBDCB_WPTR_REG	(FH_MEM_RSCSR_CHNL0 + 0x008)
-#define FH_RSCSR_CHNL0_WPTR        (FH_RSCSR_CHNL0_RBDCB_WPTR_REG)
-
-
-/**
- * Rx Config/Status Registers (RCSR)
- * Rx Config Reg for channel 0 (only channel used)
- *
- * Driver must initialize FH_MEM_RCSR_CHNL0_CONFIG_REG as follows for
- * normal operation (see bit fields).
- *
- * Clearing FH_MEM_RCSR_CHNL0_CONFIG_REG to 0 turns off Rx DMA.
- * Driver should poll FH_MEM_RSSR_RX_STATUS_REG	for
- * FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (bit 24) before continuing.
- *
- * Bit fields:
- * 31-30: Rx DMA channel enable: '00' off/pause, '01' pause at end of frame,
- *        '10' operate normally
- * 29-24: reserved
- * 23-20: # RBDs in circular buffer = 2^value; use "8" for 256 RBDs (normal),
- *        min "5" for 32 RBDs, max "12" for 4096 RBDs.
- * 19-18: reserved
- * 17-16: size of each receive buffer; '00' 4K (normal), '01' 8K,
- *        '10' 12K, '11' 16K.
- * 15-14: reserved
- * 13-12: IRQ destination; '00' none, '01' host driver (normal operation)
- * 11- 4: timeout for closing Rx buffer and interrupting host (units 32 usec)
- *        typical value 0x10 (about 1/2 msec)
- *  3- 0: reserved
- */
-#define FH_MEM_RCSR_LOWER_BOUND      (FH_MEM_LOWER_BOUND + 0xC00)
-#define FH_MEM_RCSR_UPPER_BOUND      (FH_MEM_LOWER_BOUND + 0xCC0)
-#define FH_MEM_RCSR_CHNL0            (FH_MEM_RCSR_LOWER_BOUND)
-
-#define FH_MEM_RCSR_CHNL0_CONFIG_REG	(FH_MEM_RCSR_CHNL0)
-
-#define FH_RCSR_CHNL0_RX_CONFIG_RB_TIMEOUT_MSK (0x00000FF0) /* bits 4-11 */
-#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_MSK   (0x00001000) /* bits 12 */
-#define FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK (0x00008000) /* bit 15 */
-#define FH_RCSR_CHNL0_RX_CONFIG_RB_SIZE_MSK   (0x00030000) /* bits 16-17 */
-#define FH_RCSR_CHNL0_RX_CONFIG_RBDBC_SIZE_MSK (0x00F00000) /* bits 20-23 */
-#define FH_RCSR_CHNL0_RX_CONFIG_DMA_CHNL_EN_MSK (0xC0000000) /* bits 30-31*/
-
-#define FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS	(20)
-#define FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS	(4)
-#define RX_RB_TIMEOUT	(0x10)
-
-#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL         (0x00000000)
-#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL     (0x40000000)
-#define FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL        (0x80000000)
-
-#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K    (0x00000000)
-#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K    (0x00010000)
-#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K   (0x00020000)
-#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_16K   (0x00030000)
-
-#define FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY              (0x00000004)
-#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL    (0x00000000)
-#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL  (0x00001000)
-
-#define FH_RSCSR_FRAME_SIZE_MSK	(0x00003FFF)	/* bits 0-13 */
-
-/**
- * Rx Shared Status Registers (RSSR)
- *
- * After stopping Rx DMA channel (writing 0 to
- * FH_MEM_RCSR_CHNL0_CONFIG_REG), driver must poll
- * FH_MEM_RSSR_RX_STATUS_REG until Rx channel is idle.
- *
- * Bit fields:
- *  24:  1 = Channel 0 is idle
- *
- * FH_MEM_RSSR_SHARED_CTRL_REG and FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV
- * contain default values that should not be altered by the driver.
- */
-#define FH_MEM_RSSR_LOWER_BOUND           (FH_MEM_LOWER_BOUND + 0xC40)
-#define FH_MEM_RSSR_UPPER_BOUND           (FH_MEM_LOWER_BOUND + 0xD00)
-
-#define FH_MEM_RSSR_SHARED_CTRL_REG       (FH_MEM_RSSR_LOWER_BOUND)
-#define FH_MEM_RSSR_RX_STATUS_REG	(FH_MEM_RSSR_LOWER_BOUND + 0x004)
-#define FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV\
-					(FH_MEM_RSSR_LOWER_BOUND + 0x008)
-
-#define FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE	(0x01000000)
-
-#define FH_MEM_TFDIB_REG1_ADDR_BITSHIFT	28
-
-/* TFDB  Area - TFDs buffer table */
-#define FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK      (0xFFFFFFFF)
-#define FH_TFDIB_LOWER_BOUND       (FH_MEM_LOWER_BOUND + 0x900)
-#define FH_TFDIB_UPPER_BOUND       (FH_MEM_LOWER_BOUND + 0x958)
-#define FH_TFDIB_CTRL0_REG(_chnl)  (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl))
-#define FH_TFDIB_CTRL1_REG(_chnl)  (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl) + 0x4)
-
-/**
- * Transmit DMA Channel Control/Status Registers (TCSR)
- *
- * 4965 has one configuration register for each of 8 Tx DMA/FIFO channels
- * supported in hardware (don't confuse these with the 16 Tx queues in DRAM,
- * which feed the DMA/FIFO channels); config regs are separated by 0x20 bytes.
- *
- * To use a Tx DMA channel, driver must initialize its
- * FH_TCSR_CHNL_TX_CONFIG_REG(chnl) with:
- *
- * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
- * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL
- *
- * All other bits should be 0.
- *
- * Bit fields:
- * 31-30: Tx DMA channel enable: '00' off/pause, '01' pause at end of frame,
- *        '10' operate normally
- * 29- 4: Reserved, set to "0"
- *     3: Enable internal DMA requests (1, normal operation), disable (0)
- *  2- 0: Reserved, set to "0"
- */
-#define FH_TCSR_LOWER_BOUND  (FH_MEM_LOWER_BOUND + 0xD00)
-#define FH_TCSR_UPPER_BOUND  (FH_MEM_LOWER_BOUND + 0xE60)
-
-/* Find Control/Status reg for given Tx DMA/FIFO channel */
-#define FH49_TCSR_CHNL_NUM                            (7)
-#define FH50_TCSR_CHNL_NUM                            (8)
-
-/* TCSR: tx_config register values */
-#define FH_TCSR_CHNL_TX_CONFIG_REG(_chnl)	\
-		(FH_TCSR_LOWER_BOUND + 0x20 * (_chnl))
-#define FH_TCSR_CHNL_TX_CREDIT_REG(_chnl)	\
-		(FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x4)
-#define FH_TCSR_CHNL_TX_BUF_STS_REG(_chnl)	\
-		(FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x8)
-
-#define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF		(0x00000000)
-#define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRV		(0x00000001)
-
-#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE	(0x00000000)
-#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE	(0x00000008)
-
-#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_NOINT	(0x00000000)
-#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD	(0x00100000)
-#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD	(0x00200000)
-
-#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT	(0x00000000)
-#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_ENDTFD	(0x00400000)
-#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_IFTFD	(0x00800000)
-
-#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE	(0x00000000)
-#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE_EOF	(0x40000000)
-#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE	(0x80000000)
-
-#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_EMPTY	(0x00000000)
-#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_WAIT	(0x00002000)
-#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID	(0x00000003)
-
-#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM		(20)
-#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX		(12)
-
-/**
- * Tx Shared Status Registers (TSSR)
- *
- * After stopping Tx DMA channel (writing 0 to
- * FH_TCSR_CHNL_TX_CONFIG_REG(chnl)), driver must poll
- * FH_TSSR_TX_STATUS_REG until selected Tx channel is idle
- * (channel's buffers empty | no pending requests).
- *
- * Bit fields:
- * 31-24:  1 = Channel buffers empty (channel 7:0)
- * 23-16:  1 = No pending requests (channel 7:0)
- */
-#define FH_TSSR_LOWER_BOUND		(FH_MEM_LOWER_BOUND + 0xEA0)
-#define FH_TSSR_UPPER_BOUND		(FH_MEM_LOWER_BOUND + 0xEC0)
-
-#define FH_TSSR_TX_STATUS_REG		(FH_TSSR_LOWER_BOUND + 0x010)
-
-/**
- * Bit fields for TSSR(Tx Shared Status & Control) error status register:
- * 31:  Indicates an address error when accessed to internal memory
- *	uCode/driver must write "1" in order to clear this flag
- * 30:  Indicates that Host did not send the expected number of dwords to FH
- *	uCode/driver must write "1" in order to clear this flag
- * 16-9:Each status bit is for one channel. Indicates that an (Error) ActDMA
- *	command was received from the scheduler while the TRB was already full
- *	with previous command
- *	uCode/driver must write "1" in order to clear this flag
- * 7-0: Each status bit indicates a channel's TxCredit error. When an error
- *	bit is set, it indicates that the FH has received a full indication
- *	from the RTC TxFIFO and the current value of the TxCredit counter was
- *	not equal to zero. This mean that the credit mechanism was not
- *	synchronized to the TxFIFO status
- *	uCode/driver must write "1" in order to clear this flag
- */
-#define FH_TSSR_TX_ERROR_REG		(FH_TSSR_LOWER_BOUND + 0x018)
-
-#define FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl) ((1 << (_chnl)) << 16)
-
-/* Tx service channels */
-#define FH_SRVC_CHNL		(9)
-#define FH_SRVC_LOWER_BOUND	(FH_MEM_LOWER_BOUND + 0x9C8)
-#define FH_SRVC_UPPER_BOUND	(FH_MEM_LOWER_BOUND + 0x9D0)
-#define FH_SRVC_CHNL_SRAM_ADDR_REG(_chnl) \
-		(FH_SRVC_LOWER_BOUND + ((_chnl) - 9) * 0x4)
-
-#define FH_TX_CHICKEN_BITS_REG	(FH_MEM_LOWER_BOUND + 0xE98)
-/* Instruct FH to increment the retry count of a packet when
- * it is brought from the memory to TX-FIFO
- */
-#define FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN	(0x00000002)
-
-#define RX_QUEUE_SIZE                         256
-#define RX_QUEUE_MASK                         255
-#define RX_QUEUE_SIZE_LOG                     8
-
-/*
- * RX related structures and functions
- */
-#define RX_FREE_BUFFERS 64
-#define RX_LOW_WATERMARK 8
-
-/* Size of one Rx buffer in host DRAM */
-#define IWL_RX_BUF_SIZE_3K (3 * 1000) /* 3945 only */
-#define IWL_RX_BUF_SIZE_4K (4 * 1024)
-#define IWL_RX_BUF_SIZE_8K (8 * 1024)
-
-/**
- * struct iwl_rb_status - reseve buffer status
- * 	host memory mapped FH registers
- * @closed_rb_num [0:11] - Indicates the index of the RB which was closed
- * @closed_fr_num [0:11] - Indicates the index of the RX Frame which was closed
- * @finished_rb_num [0:11] - Indicates the index of the current RB
- * 	in which the last frame was written to
- * @finished_fr_num [0:11] - Indicates the index of the RX Frame
- * 	which was transferred
- */
-struct iwl_rb_status {
-	__le16 closed_rb_num;
-	__le16 closed_fr_num;
-	__le16 finished_rb_num;
-	__le16 finished_fr_nam;
-	__le32 __unused; /* 3945 only */
-} __packed;
-
-
-#define TFD_QUEUE_SIZE_MAX      (256)
-#define TFD_QUEUE_SIZE_BC_DUP	(64)
-#define TFD_QUEUE_BC_SIZE	(TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)
-#define IWL_TX_DMA_MASK        DMA_BIT_MASK(36)
-#define IWL_NUM_OF_TBS		20
-
-static inline u8 iwl_legacy_get_dma_hi_addr(dma_addr_t addr)
-{
-	return (sizeof(addr) > sizeof(u32) ? (addr >> 16) >> 16 : 0) & 0xF;
-}
-/**
- * struct iwl_tfd_tb transmit buffer descriptor within transmit frame descriptor
- *
- * This structure contains dma address and length of transmission address
- *
- * @lo: low [31:0] portion of the dma address of TX buffer
- * 	every even is unaligned on 16 bit boundary
- * @hi_n_len 0-3 [35:32] portion of dma
- *	     4-15 length of the tx buffer
- */
-struct iwl_tfd_tb {
-	__le32 lo;
-	__le16 hi_n_len;
-} __packed;
-
-/**
- * struct iwl_tfd
- *
- * Transmit Frame Descriptor (TFD)
- *
- * @ __reserved1[3] reserved
- * @ num_tbs 0-4 number of active tbs
- *	     5   reserved
- * 	     6-7 padding (not used)
- * @ tbs[20]	transmit frame buffer descriptors
- * @ __pad 	padding
- *
- * Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM.
- * Both driver and device share these circular buffers, each of which must be
- * contiguous 256 TFDs x 128 bytes-per-TFD = 32 KBytes
- *
- * Driver must indicate the physical address of the base of each
- * circular buffer via the FH_MEM_CBBC_QUEUE registers.
- *
- * Each TFD contains pointer/size information for up to 20 data buffers
- * in host DRAM.  These buffers collectively contain the (one) frame described
- * by the TFD.  Each buffer must be a single contiguous block of memory within
- * itself, but buffers may be scattered in host DRAM.  Each buffer has max size
- * of (4K - 4).  The concatenates all of a TFD's buffers into a single
- * Tx frame, up to 8 KBytes in size.
- *
- * A maximum of 255 (not 256!) TFDs may be on a queue waiting for Tx.
- */
-struct iwl_tfd {
-	u8 __reserved1[3];
-	u8 num_tbs;
-	struct iwl_tfd_tb tbs[IWL_NUM_OF_TBS];
-	__le32 __pad;
-} __packed;
-
-/* Keep Warm Size */
-#define IWL_KW_SIZE 0x1000	/* 4k */
-
-#endif /* !__iwl_legacy_fh_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-hcmd.c b/drivers/net/wireless/iwlegacy/iwl-hcmd.c
deleted file mode 100644
index ce1fc9f..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-hcmd.c
+++ /dev/null
@@ -1,271 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <net/mac80211.h>
-
-#include "iwl-dev.h"
-#include "iwl-debug.h"
-#include "iwl-eeprom.h"
-#include "iwl-core.h"
-
-
-const char *iwl_legacy_get_cmd_string(u8 cmd)
-{
-	switch (cmd) {
-		IWL_CMD(REPLY_ALIVE);
-		IWL_CMD(REPLY_ERROR);
-		IWL_CMD(REPLY_RXON);
-		IWL_CMD(REPLY_RXON_ASSOC);
-		IWL_CMD(REPLY_QOS_PARAM);
-		IWL_CMD(REPLY_RXON_TIMING);
-		IWL_CMD(REPLY_ADD_STA);
-		IWL_CMD(REPLY_REMOVE_STA);
-		IWL_CMD(REPLY_WEPKEY);
-		IWL_CMD(REPLY_3945_RX);
-		IWL_CMD(REPLY_TX);
-		IWL_CMD(REPLY_RATE_SCALE);
-		IWL_CMD(REPLY_LEDS_CMD);
-		IWL_CMD(REPLY_TX_LINK_QUALITY_CMD);
-		IWL_CMD(REPLY_CHANNEL_SWITCH);
-		IWL_CMD(CHANNEL_SWITCH_NOTIFICATION);
-		IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD);
-		IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION);
-		IWL_CMD(POWER_TABLE_CMD);
-		IWL_CMD(PM_SLEEP_NOTIFICATION);
-		IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC);
-		IWL_CMD(REPLY_SCAN_CMD);
-		IWL_CMD(REPLY_SCAN_ABORT_CMD);
-		IWL_CMD(SCAN_START_NOTIFICATION);
-		IWL_CMD(SCAN_RESULTS_NOTIFICATION);
-		IWL_CMD(SCAN_COMPLETE_NOTIFICATION);
-		IWL_CMD(BEACON_NOTIFICATION);
-		IWL_CMD(REPLY_TX_BEACON);
-		IWL_CMD(REPLY_TX_PWR_TABLE_CMD);
-		IWL_CMD(REPLY_BT_CONFIG);
-		IWL_CMD(REPLY_STATISTICS_CMD);
-		IWL_CMD(STATISTICS_NOTIFICATION);
-		IWL_CMD(CARD_STATE_NOTIFICATION);
-		IWL_CMD(MISSED_BEACONS_NOTIFICATION);
-		IWL_CMD(REPLY_CT_KILL_CONFIG_CMD);
-		IWL_CMD(SENSITIVITY_CMD);
-		IWL_CMD(REPLY_PHY_CALIBRATION_CMD);
-		IWL_CMD(REPLY_RX_PHY_CMD);
-		IWL_CMD(REPLY_RX_MPDU_CMD);
-		IWL_CMD(REPLY_RX);
-		IWL_CMD(REPLY_COMPRESSED_BA);
-	default:
-		return "UNKNOWN";
-
-	}
-}
-EXPORT_SYMBOL(iwl_legacy_get_cmd_string);
-
-#define HOST_COMPLETE_TIMEOUT (HZ / 2)
-
-static void iwl_legacy_generic_cmd_callback(struct iwl_priv *priv,
-				     struct iwl_device_cmd *cmd,
-				     struct iwl_rx_packet *pkt)
-{
-	if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
-		IWL_ERR(priv, "Bad return from %s (0x%08X)\n",
-		iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
-		return;
-	}
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-	switch (cmd->hdr.cmd) {
-	case REPLY_TX_LINK_QUALITY_CMD:
-	case SENSITIVITY_CMD:
-		IWL_DEBUG_HC_DUMP(priv, "back from %s (0x%08X)\n",
-		iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
-		break;
-	default:
-		IWL_DEBUG_HC(priv, "back from %s (0x%08X)\n",
-		iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
-	}
-#endif
-}
-
-static int
-iwl_legacy_send_cmd_async(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
-{
-	int ret;
-
-	BUG_ON(!(cmd->flags & CMD_ASYNC));
-
-	/* An asynchronous command can not expect an SKB to be set. */
-	BUG_ON(cmd->flags & CMD_WANT_SKB);
-
-	/* Assign a generic callback if one is not provided */
-	if (!cmd->callback)
-		cmd->callback = iwl_legacy_generic_cmd_callback;
-
-	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-		return -EBUSY;
-
-	ret = iwl_legacy_enqueue_hcmd(priv, cmd);
-	if (ret < 0) {
-		IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n",
-			  iwl_legacy_get_cmd_string(cmd->id), ret);
-		return ret;
-	}
-	return 0;
-}
-
-int iwl_legacy_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
-{
-	int cmd_idx;
-	int ret;
-
-	lockdep_assert_held(&priv->mutex);
-
-	BUG_ON(cmd->flags & CMD_ASYNC);
-
-	 /* A synchronous command can not have a callback set. */
-	BUG_ON(cmd->callback);
-
-	IWL_DEBUG_INFO(priv, "Attempting to send sync command %s\n",
-			iwl_legacy_get_cmd_string(cmd->id));
-
-	set_bit(STATUS_HCMD_ACTIVE, &priv->status);
-	IWL_DEBUG_INFO(priv, "Setting HCMD_ACTIVE for command %s\n",
-			iwl_legacy_get_cmd_string(cmd->id));
-
-	cmd_idx = iwl_legacy_enqueue_hcmd(priv, cmd);
-	if (cmd_idx < 0) {
-		ret = cmd_idx;
-		IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n",
-			  iwl_legacy_get_cmd_string(cmd->id), ret);
-		goto out;
-	}
-
-	ret = wait_event_timeout(priv->wait_command_queue,
-			!test_bit(STATUS_HCMD_ACTIVE, &priv->status),
-			HOST_COMPLETE_TIMEOUT);
-	if (!ret) {
-		if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) {
-			IWL_ERR(priv,
-				"Error sending %s: time out after %dms.\n",
-				iwl_legacy_get_cmd_string(cmd->id),
-				jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
-
-			clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
-			IWL_DEBUG_INFO(priv,
-				"Clearing HCMD_ACTIVE for command %s\n",
-				       iwl_legacy_get_cmd_string(cmd->id));
-			ret = -ETIMEDOUT;
-			goto cancel;
-		}
-	}
-
-	if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
-		IWL_ERR(priv, "Command %s aborted: RF KILL Switch\n",
-			       iwl_legacy_get_cmd_string(cmd->id));
-		ret = -ECANCELED;
-		goto fail;
-	}
-	if (test_bit(STATUS_FW_ERROR, &priv->status)) {
-		IWL_ERR(priv, "Command %s failed: FW Error\n",
-			       iwl_legacy_get_cmd_string(cmd->id));
-		ret = -EIO;
-		goto fail;
-	}
-	if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
-		IWL_ERR(priv, "Error: Response NULL in '%s'\n",
-			  iwl_legacy_get_cmd_string(cmd->id));
-		ret = -EIO;
-		goto cancel;
-	}
-
-	ret = 0;
-	goto out;
-
-cancel:
-	if (cmd->flags & CMD_WANT_SKB) {
-		/*
-		 * Cancel the CMD_WANT_SKB flag for the cmd in the
-		 * TX cmd queue. Otherwise in case the cmd comes
-		 * in later, it will possibly set an invalid
-		 * address (cmd->meta.source).
-		 */
-		priv->txq[priv->cmd_queue].meta[cmd_idx].flags &=
-							~CMD_WANT_SKB;
-	}
-fail:
-	if (cmd->reply_page) {
-		iwl_legacy_free_pages(priv, cmd->reply_page);
-		cmd->reply_page = 0;
-	}
-out:
-	return ret;
-}
-EXPORT_SYMBOL(iwl_legacy_send_cmd_sync);
-
-int iwl_legacy_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
-{
-	if (cmd->flags & CMD_ASYNC)
-		return iwl_legacy_send_cmd_async(priv, cmd);
-
-	return iwl_legacy_send_cmd_sync(priv, cmd);
-}
-EXPORT_SYMBOL(iwl_legacy_send_cmd);
-
-int
-iwl_legacy_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, const void *data)
-{
-	struct iwl_host_cmd cmd = {
-		.id = id,
-		.len = len,
-		.data = data,
-	};
-
-	return iwl_legacy_send_cmd_sync(priv, &cmd);
-}
-EXPORT_SYMBOL(iwl_legacy_send_cmd_pdu);
-
-int iwl_legacy_send_cmd_pdu_async(struct iwl_priv *priv,
-			   u8 id, u16 len, const void *data,
-			   void (*callback)(struct iwl_priv *priv,
-					    struct iwl_device_cmd *cmd,
-					    struct iwl_rx_packet *pkt))
-{
-	struct iwl_host_cmd cmd = {
-		.id = id,
-		.len = len,
-		.data = data,
-	};
-
-	cmd.flags |= CMD_ASYNC;
-	cmd.callback = callback;
-
-	return iwl_legacy_send_cmd_async(priv, &cmd);
-}
-EXPORT_SYMBOL(iwl_legacy_send_cmd_pdu_async);
diff --git a/drivers/net/wireless/iwlegacy/iwl-helpers.h b/drivers/net/wireless/iwlegacy/iwl-helpers.h
deleted file mode 100644
index 5cf23ea..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-helpers.h
+++ /dev/null
@@ -1,196 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#ifndef __iwl_legacy_helpers_h__
-#define __iwl_legacy_helpers_h__
-
-#include <linux/ctype.h>
-#include <net/mac80211.h>
-
-#include "iwl-io.h"
-
-#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
-
-
-static inline struct ieee80211_conf *iwl_legacy_ieee80211_get_hw_conf(
-	struct ieee80211_hw *hw)
-{
-	return &hw->conf;
-}
-
-/**
- * iwl_legacy_queue_inc_wrap - increment queue index, wrap back to beginning
- * @index -- current index
- * @n_bd -- total number of entries in queue (must be power of 2)
- */
-static inline int iwl_legacy_queue_inc_wrap(int index, int n_bd)
-{
-	return ++index & (n_bd - 1);
-}
-
-/**
- * iwl_legacy_queue_dec_wrap - decrement queue index, wrap back to end
- * @index -- current index
- * @n_bd -- total number of entries in queue (must be power of 2)
- */
-static inline int iwl_legacy_queue_dec_wrap(int index, int n_bd)
-{
-	return --index & (n_bd - 1);
-}
-
-/* TODO: Move fw_desc functions to iwl-pci.ko */
-static inline void iwl_legacy_free_fw_desc(struct pci_dev *pci_dev,
-				    struct fw_desc *desc)
-{
-	if (desc->v_addr)
-		dma_free_coherent(&pci_dev->dev, desc->len,
-				  desc->v_addr, desc->p_addr);
-	desc->v_addr = NULL;
-	desc->len = 0;
-}
-
-static inline int iwl_legacy_alloc_fw_desc(struct pci_dev *pci_dev,
-				    struct fw_desc *desc)
-{
-	if (!desc->len) {
-		desc->v_addr = NULL;
-		return -EINVAL;
-	}
-
-	desc->v_addr = dma_alloc_coherent(&pci_dev->dev, desc->len,
-					  &desc->p_addr, GFP_KERNEL);
-	return (desc->v_addr != NULL) ? 0 : -ENOMEM;
-}
-
-/*
- * we have 8 bits used like this:
- *
- * 7 6 5 4 3 2 1 0
- * | | | | | | | |
- * | | | | | | +-+-------- AC queue (0-3)
- * | | | | | |
- * | +-+-+-+-+------------ HW queue ID
- * |
- * +---------------------- unused
- */
-static inline void
-iwl_legacy_set_swq_id(struct iwl_tx_queue *txq, u8 ac, u8 hwq)
-{
-	BUG_ON(ac > 3);   /* only have 2 bits */
-	BUG_ON(hwq > 31); /* only use 5 bits */
-
-	txq->swq_id = (hwq << 2) | ac;
-}
-
-static inline void iwl_legacy_wake_queue(struct iwl_priv *priv,
-				  struct iwl_tx_queue *txq)
-{
-	u8 queue = txq->swq_id;
-	u8 ac = queue & 3;
-	u8 hwq = (queue >> 2) & 0x1f;
-
-	if (test_and_clear_bit(hwq, priv->queue_stopped))
-		if (atomic_dec_return(&priv->queue_stop_count[ac]) <= 0)
-			ieee80211_wake_queue(priv->hw, ac);
-}
-
-static inline void iwl_legacy_stop_queue(struct iwl_priv *priv,
-				  struct iwl_tx_queue *txq)
-{
-	u8 queue = txq->swq_id;
-	u8 ac = queue & 3;
-	u8 hwq = (queue >> 2) & 0x1f;
-
-	if (!test_and_set_bit(hwq, priv->queue_stopped))
-		if (atomic_inc_return(&priv->queue_stop_count[ac]) > 0)
-			ieee80211_stop_queue(priv->hw, ac);
-}
-
-#ifdef ieee80211_stop_queue
-#undef ieee80211_stop_queue
-#endif
-
-#define ieee80211_stop_queue DO_NOT_USE_ieee80211_stop_queue
-
-#ifdef ieee80211_wake_queue
-#undef ieee80211_wake_queue
-#endif
-
-#define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue
-
-static inline void iwl_legacy_disable_interrupts(struct iwl_priv *priv)
-{
-	clear_bit(STATUS_INT_ENABLED, &priv->status);
-
-	/* disable interrupts from uCode/NIC to host */
-	iwl_write32(priv, CSR_INT_MASK, 0x00000000);
-
-	/* acknowledge/clear/reset any interrupts still pending
-	 * from uCode or flow handler (Rx/Tx DMA) */
-	iwl_write32(priv, CSR_INT, 0xffffffff);
-	iwl_write32(priv, CSR_FH_INT_STATUS, 0xffffffff);
-	IWL_DEBUG_ISR(priv, "Disabled interrupts\n");
-}
-
-static inline void iwl_legacy_enable_rfkill_int(struct iwl_priv *priv)
-{
-	IWL_DEBUG_ISR(priv, "Enabling rfkill interrupt\n");
-	iwl_write32(priv, CSR_INT_MASK, CSR_INT_BIT_RF_KILL);
-}
-
-static inline void iwl_legacy_enable_interrupts(struct iwl_priv *priv)
-{
-	IWL_DEBUG_ISR(priv, "Enabling interrupts\n");
-	set_bit(STATUS_INT_ENABLED, &priv->status);
-	iwl_write32(priv, CSR_INT_MASK, priv->inta_mask);
-}
-
-/**
- * iwl_legacy_beacon_time_mask_low - mask of lower 32 bit of beacon time
- * @priv -- pointer to iwl_priv data structure
- * @tsf_bits -- number of bits need to shift for masking)
- */
-static inline u32 iwl_legacy_beacon_time_mask_low(struct iwl_priv *priv,
-					   u16 tsf_bits)
-{
-	return (1 << tsf_bits) - 1;
-}
-
-/**
- * iwl_legacy_beacon_time_mask_high - mask of higher 32 bit of beacon time
- * @priv -- pointer to iwl_priv data structure
- * @tsf_bits -- number of bits need to shift for masking)
- */
-static inline u32 iwl_legacy_beacon_time_mask_high(struct iwl_priv *priv,
-					    u16 tsf_bits)
-{
-	return ((1 << (32 - tsf_bits)) - 1) << tsf_bits;
-}
-
-#endif				/* __iwl_legacy_helpers_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-io.h b/drivers/net/wireless/iwlegacy/iwl-io.h
deleted file mode 100644
index 5cc5d34..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-io.h
+++ /dev/null
@@ -1,545 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#ifndef __iwl_legacy_io_h__
-#define __iwl_legacy_io_h__
-
-#include <linux/io.h>
-
-#include "iwl-dev.h"
-#include "iwl-debug.h"
-#include "iwl-devtrace.h"
-
-/*
- * IO, register, and NIC memory access functions
- *
- * NOTE on naming convention and macro usage for these
- *
- * A single _ prefix before a an access function means that no state
- * check or debug information is printed when that function is called.
- *
- * A double __ prefix before an access function means that state is checked
- * and the current line number and caller function name are printed in addition
- * to any other debug output.
- *
- * The non-prefixed name is the #define that maps the caller into a
- * #define that provides the caller's name and __LINE__ to the double
- * prefix version.
- *
- * If you wish to call the function without any debug or state checking,
- * you should use the single _ prefix version (as is used by dependent IO
- * routines, for example _iwl_legacy_read_direct32 calls the non-check version of
- * _iwl_legacy_read32.)
- *
- * These declarations are *extremely* useful in quickly isolating code deltas
- * which result in misconfiguration of the hardware I/O.  In combination with
- * git-bisect and the IO debug level you can quickly determine the specific
- * commit which breaks the IO sequence to the hardware.
- *
- */
-
-static inline void _iwl_legacy_write8(struct iwl_priv *priv, u32 ofs, u8 val)
-{
-	trace_iwlwifi_legacy_dev_iowrite8(priv, ofs, val);
-	iowrite8(val, priv->hw_base + ofs);
-}
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static inline void
-__iwl_legacy_write8(const char *f, u32 l, struct iwl_priv *priv,
-				 u32 ofs, u8 val)
-{
-	IWL_DEBUG_IO(priv, "write8(0x%08X, 0x%02X) - %s %d\n", ofs, val, f, l);
-	_iwl_legacy_write8(priv, ofs, val);
-}
-#define iwl_write8(priv, ofs, val) \
-	__iwl_legacy_write8(__FILE__, __LINE__, priv, ofs, val)
-#else
-#define iwl_write8(priv, ofs, val) _iwl_legacy_write8(priv, ofs, val)
-#endif
-
-
-static inline void _iwl_legacy_write32(struct iwl_priv *priv, u32 ofs, u32 val)
-{
-	trace_iwlwifi_legacy_dev_iowrite32(priv, ofs, val);
-	iowrite32(val, priv->hw_base + ofs);
-}
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static inline void
-__iwl_legacy_write32(const char *f, u32 l, struct iwl_priv *priv,
-				 u32 ofs, u32 val)
-{
-	IWL_DEBUG_IO(priv, "write32(0x%08X, 0x%08X) - %s %d\n", ofs, val, f, l);
-	_iwl_legacy_write32(priv, ofs, val);
-}
-#define iwl_write32(priv, ofs, val) \
-	__iwl_legacy_write32(__FILE__, __LINE__, priv, ofs, val)
-#else
-#define iwl_write32(priv, ofs, val) _iwl_legacy_write32(priv, ofs, val)
-#endif
-
-static inline u32 _iwl_legacy_read32(struct iwl_priv *priv, u32 ofs)
-{
-	u32 val = ioread32(priv->hw_base + ofs);
-	trace_iwlwifi_legacy_dev_ioread32(priv, ofs, val);
-	return val;
-}
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static inline u32
-__iwl_legacy_read32(char *f, u32 l, struct iwl_priv *priv, u32 ofs)
-{
-	IWL_DEBUG_IO(priv, "read_direct32(0x%08X) - %s %d\n", ofs, f, l);
-	return _iwl_legacy_read32(priv, ofs);
-}
-#define iwl_read32(priv, ofs) __iwl_legacy_read32(__FILE__, __LINE__, priv, ofs)
-#else
-#define iwl_read32(p, o) _iwl_legacy_read32(p, o)
-#endif
-
-#define IWL_POLL_INTERVAL 10	/* microseconds */
-static inline int
-_iwl_legacy_poll_bit(struct iwl_priv *priv, u32 addr,
-				u32 bits, u32 mask, int timeout)
-{
-	int t = 0;
-
-	do {
-		if ((_iwl_legacy_read32(priv, addr) & mask) == (bits & mask))
-			return t;
-		udelay(IWL_POLL_INTERVAL);
-		t += IWL_POLL_INTERVAL;
-	} while (t < timeout);
-
-	return -ETIMEDOUT;
-}
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static inline int __iwl_legacy_poll_bit(const char *f, u32 l,
-				 struct iwl_priv *priv, u32 addr,
-				 u32 bits, u32 mask, int timeout)
-{
-	int ret = _iwl_legacy_poll_bit(priv, addr, bits, mask, timeout);
-	IWL_DEBUG_IO(priv, "poll_bit(0x%08X, 0x%08X, 0x%08X) - %s- %s %d\n",
-		     addr, bits, mask,
-		     unlikely(ret  == -ETIMEDOUT) ? "timeout" : "", f, l);
-	return ret;
-}
-#define iwl_poll_bit(priv, addr, bits, mask, timeout) \
-	__iwl_legacy_poll_bit(__FILE__, __LINE__, priv, addr, \
-	bits, mask, timeout)
-#else
-#define iwl_poll_bit(p, a, b, m, t) _iwl_legacy_poll_bit(p, a, b, m, t)
-#endif
-
-static inline void _iwl_legacy_set_bit(struct iwl_priv *priv, u32 reg, u32 mask)
-{
-	_iwl_legacy_write32(priv, reg, _iwl_legacy_read32(priv, reg) | mask);
-}
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static inline void __iwl_legacy_set_bit(const char *f, u32 l,
-				 struct iwl_priv *priv, u32 reg, u32 mask)
-{
-	u32 val = _iwl_legacy_read32(priv, reg) | mask;
-	IWL_DEBUG_IO(priv, "set_bit(0x%08X, 0x%08X) = 0x%08X\n", reg,
-							mask, val);
-	_iwl_legacy_write32(priv, reg, val);
-}
-static inline void iwl_legacy_set_bit(struct iwl_priv *p, u32 r, u32 m)
-{
-	unsigned long reg_flags;
-
-	spin_lock_irqsave(&p->reg_lock, reg_flags);
-	__iwl_legacy_set_bit(__FILE__, __LINE__, p, r, m);
-	spin_unlock_irqrestore(&p->reg_lock, reg_flags);
-}
-#else
-static inline void iwl_legacy_set_bit(struct iwl_priv *p, u32 r, u32 m)
-{
-	unsigned long reg_flags;
-
-	spin_lock_irqsave(&p->reg_lock, reg_flags);
-	_iwl_legacy_set_bit(p, r, m);
-	spin_unlock_irqrestore(&p->reg_lock, reg_flags);
-}
-#endif
-
-static inline void
-_iwl_legacy_clear_bit(struct iwl_priv *priv, u32 reg, u32 mask)
-{
-	_iwl_legacy_write32(priv, reg, _iwl_legacy_read32(priv, reg) & ~mask);
-}
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static inline void
-__iwl_legacy_clear_bit(const char *f, u32 l,
-				   struct iwl_priv *priv, u32 reg, u32 mask)
-{
-	u32 val = _iwl_legacy_read32(priv, reg) & ~mask;
-	IWL_DEBUG_IO(priv, "clear_bit(0x%08X, 0x%08X) = 0x%08X\n", reg, mask, val);
-	_iwl_legacy_write32(priv, reg, val);
-}
-static inline void iwl_legacy_clear_bit(struct iwl_priv *p, u32 r, u32 m)
-{
-	unsigned long reg_flags;
-
-	spin_lock_irqsave(&p->reg_lock, reg_flags);
-	__iwl_legacy_clear_bit(__FILE__, __LINE__, p, r, m);
-	spin_unlock_irqrestore(&p->reg_lock, reg_flags);
-}
-#else
-static inline void iwl_legacy_clear_bit(struct iwl_priv *p, u32 r, u32 m)
-{
-	unsigned long reg_flags;
-
-	spin_lock_irqsave(&p->reg_lock, reg_flags);
-	_iwl_legacy_clear_bit(p, r, m);
-	spin_unlock_irqrestore(&p->reg_lock, reg_flags);
-}
-#endif
-
-static inline int _iwl_legacy_grab_nic_access(struct iwl_priv *priv)
-{
-	int ret;
-	u32 val;
-
-	/* this bit wakes up the NIC */
-	_iwl_legacy_set_bit(priv, CSR_GP_CNTRL,
-				CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
-
-	/*
-	 * These bits say the device is running, and should keep running for
-	 * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
-	 * but they do not indicate that embedded SRAM is restored yet;
-	 * 3945 and 4965 have volatile SRAM, and must save/restore contents
-	 * to/from host DRAM when sleeping/waking for power-saving.
-	 * Each direction takes approximately 1/4 millisecond; with this
-	 * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
-	 * series of register accesses are expected (e.g. reading Event Log),
-	 * to keep device from sleeping.
-	 *
-	 * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
-	 * SRAM is okay/restored.  We don't check that here because this call
-	 * is just for hardware register access; but GP1 MAC_SLEEP check is a
-	 * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
-	 *
-	 */
-	ret = _iwl_legacy_poll_bit(priv, CSR_GP_CNTRL,
-			   CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
-			   (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
-			    CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
-	if (ret < 0) {
-		val = _iwl_legacy_read32(priv, CSR_GP_CNTRL);
-		IWL_ERR(priv,
-			"MAC is in deep sleep!.  CSR_GP_CNTRL = 0x%08X\n", val);
-		_iwl_legacy_write32(priv, CSR_RESET,
-				CSR_RESET_REG_FLAG_FORCE_NMI);
-		return -EIO;
-	}
-
-	return 0;
-}
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static inline int __iwl_legacy_grab_nic_access(const char *f, u32 l,
-					       struct iwl_priv *priv)
-{
-	IWL_DEBUG_IO(priv, "grabbing nic access - %s %d\n", f, l);
-	return _iwl_legacy_grab_nic_access(priv);
-}
-#define iwl_grab_nic_access(priv) \
-	__iwl_legacy_grab_nic_access(__FILE__, __LINE__, priv)
-#else
-#define iwl_grab_nic_access(priv) \
-	_iwl_legacy_grab_nic_access(priv)
-#endif
-
-static inline void _iwl_legacy_release_nic_access(struct iwl_priv *priv)
-{
-	_iwl_legacy_clear_bit(priv, CSR_GP_CNTRL,
-			CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
-}
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static inline void __iwl_legacy_release_nic_access(const char *f, u32 l,
-					    struct iwl_priv *priv)
-{
-
-	IWL_DEBUG_IO(priv, "releasing nic access - %s %d\n", f, l);
-	_iwl_legacy_release_nic_access(priv);
-}
-#define iwl_release_nic_access(priv) \
-	__iwl_legacy_release_nic_access(__FILE__, __LINE__, priv)
-#else
-#define iwl_release_nic_access(priv) \
-	_iwl_legacy_release_nic_access(priv)
-#endif
-
-static inline u32 _iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg)
-{
-	return _iwl_legacy_read32(priv, reg);
-}
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static inline u32 __iwl_legacy_read_direct32(const char *f, u32 l,
-					struct iwl_priv *priv, u32 reg)
-{
-	u32 value = _iwl_legacy_read_direct32(priv, reg);
-	IWL_DEBUG_IO(priv,
-			"read_direct32(0x%4X) = 0x%08x - %s %d\n", reg, value,
-		     f, l);
-	return value;
-}
-static inline u32 iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg)
-{
-	u32 value;
-	unsigned long reg_flags;
-
-	spin_lock_irqsave(&priv->reg_lock, reg_flags);
-	iwl_grab_nic_access(priv);
-	value = __iwl_legacy_read_direct32(__FILE__, __LINE__, priv, reg);
-	iwl_release_nic_access(priv);
-	spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
-	return value;
-}
-
-#else
-static inline u32 iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg)
-{
-	u32 value;
-	unsigned long reg_flags;
-
-	spin_lock_irqsave(&priv->reg_lock, reg_flags);
-	iwl_grab_nic_access(priv);
-	value = _iwl_legacy_read_direct32(priv, reg);
-	iwl_release_nic_access(priv);
-	spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
-	return value;
-
-}
-#endif
-
-static inline void _iwl_legacy_write_direct32(struct iwl_priv *priv,
-					 u32 reg, u32 value)
-{
-	_iwl_legacy_write32(priv, reg, value);
-}
-static inline void
-iwl_legacy_write_direct32(struct iwl_priv *priv, u32 reg, u32 value)
-{
-	unsigned long reg_flags;
-
-	spin_lock_irqsave(&priv->reg_lock, reg_flags);
-	if (!iwl_grab_nic_access(priv)) {
-		_iwl_legacy_write_direct32(priv, reg, value);
-		iwl_release_nic_access(priv);
-	}
-	spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
-}
-
-static inline void iwl_legacy_write_reg_buf(struct iwl_priv *priv,
-					       u32 reg, u32 len, u32 *values)
-{
-	u32 count = sizeof(u32);
-
-	if ((priv != NULL) && (values != NULL)) {
-		for (; 0 < len; len -= count, reg += count, values++)
-			iwl_legacy_write_direct32(priv, reg, *values);
-	}
-}
-
-static inline int _iwl_legacy_poll_direct_bit(struct iwl_priv *priv, u32 addr,
-				       u32 mask, int timeout)
-{
-	int t = 0;
-
-	do {
-		if ((iwl_legacy_read_direct32(priv, addr) & mask) == mask)
-			return t;
-		udelay(IWL_POLL_INTERVAL);
-		t += IWL_POLL_INTERVAL;
-	} while (t < timeout);
-
-	return -ETIMEDOUT;
-}
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static inline int __iwl_legacy_poll_direct_bit(const char *f, u32 l,
-					    struct iwl_priv *priv,
-					    u32 addr, u32 mask, int timeout)
-{
-	int ret  = _iwl_legacy_poll_direct_bit(priv, addr, mask, timeout);
-
-	if (unlikely(ret == -ETIMEDOUT))
-		IWL_DEBUG_IO(priv, "poll_direct_bit(0x%08X, 0x%08X) - "
-			     "timedout - %s %d\n", addr, mask, f, l);
-	else
-		IWL_DEBUG_IO(priv, "poll_direct_bit(0x%08X, 0x%08X) = 0x%08X "
-			     "- %s %d\n", addr, mask, ret, f, l);
-	return ret;
-}
-#define iwl_poll_direct_bit(priv, addr, mask, timeout) \
-__iwl_legacy_poll_direct_bit(__FILE__, __LINE__, priv, addr, mask, timeout)
-#else
-#define iwl_poll_direct_bit _iwl_legacy_poll_direct_bit
-#endif
-
-static inline u32 _iwl_legacy_read_prph(struct iwl_priv *priv, u32 reg)
-{
-	_iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_RADDR, reg | (3 << 24));
-	rmb();
-	return _iwl_legacy_read_direct32(priv, HBUS_TARG_PRPH_RDAT);
-}
-static inline u32 iwl_legacy_read_prph(struct iwl_priv *priv, u32 reg)
-{
-	unsigned long reg_flags;
-	u32 val;
-
-	spin_lock_irqsave(&priv->reg_lock, reg_flags);
-	iwl_grab_nic_access(priv);
-	val = _iwl_legacy_read_prph(priv, reg);
-	iwl_release_nic_access(priv);
-	spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
-	return val;
-}
-
-static inline void _iwl_legacy_write_prph(struct iwl_priv *priv,
-					     u32 addr, u32 val)
-{
-	_iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_WADDR,
-			      ((addr & 0x0000FFFF) | (3 << 24)));
-	wmb();
-	_iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_WDAT, val);
-}
-
-static inline void
-iwl_legacy_write_prph(struct iwl_priv *priv, u32 addr, u32 val)
-{
-	unsigned long reg_flags;
-
-	spin_lock_irqsave(&priv->reg_lock, reg_flags);
-	if (!iwl_grab_nic_access(priv)) {
-		_iwl_legacy_write_prph(priv, addr, val);
-		iwl_release_nic_access(priv);
-	}
-	spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
-}
-
-#define _iwl_legacy_set_bits_prph(priv, reg, mask) \
-_iwl_legacy_write_prph(priv, reg, (_iwl_legacy_read_prph(priv, reg) | mask))
-
-static inline void
-iwl_legacy_set_bits_prph(struct iwl_priv *priv, u32 reg, u32 mask)
-{
-	unsigned long reg_flags;
-
-	spin_lock_irqsave(&priv->reg_lock, reg_flags);
-	iwl_grab_nic_access(priv);
-	_iwl_legacy_set_bits_prph(priv, reg, mask);
-	iwl_release_nic_access(priv);
-	spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
-}
-
-#define _iwl_legacy_set_bits_mask_prph(priv, reg, bits, mask) \
-_iwl_legacy_write_prph(priv, reg,				\
-		 ((_iwl_legacy_read_prph(priv, reg) & mask) | bits))
-
-static inline void iwl_legacy_set_bits_mask_prph(struct iwl_priv *priv, u32 reg,
-				u32 bits, u32 mask)
-{
-	unsigned long reg_flags;
-
-	spin_lock_irqsave(&priv->reg_lock, reg_flags);
-	iwl_grab_nic_access(priv);
-	_iwl_legacy_set_bits_mask_prph(priv, reg, bits, mask);
-	iwl_release_nic_access(priv);
-	spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
-}
-
-static inline void iwl_legacy_clear_bits_prph(struct iwl_priv
-						 *priv, u32 reg, u32 mask)
-{
-	unsigned long reg_flags;
-	u32 val;
-
-	spin_lock_irqsave(&priv->reg_lock, reg_flags);
-	iwl_grab_nic_access(priv);
-	val = _iwl_legacy_read_prph(priv, reg);
-	_iwl_legacy_write_prph(priv, reg, (val & ~mask));
-	iwl_release_nic_access(priv);
-	spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
-}
-
-static inline u32 iwl_legacy_read_targ_mem(struct iwl_priv *priv, u32 addr)
-{
-	unsigned long reg_flags;
-	u32 value;
-
-	spin_lock_irqsave(&priv->reg_lock, reg_flags);
-	iwl_grab_nic_access(priv);
-
-	_iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, addr);
-	rmb();
-	value = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
-
-	iwl_release_nic_access(priv);
-	spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
-	return value;
-}
-
-static inline void
-iwl_legacy_write_targ_mem(struct iwl_priv *priv, u32 addr, u32 val)
-{
-	unsigned long reg_flags;
-
-	spin_lock_irqsave(&priv->reg_lock, reg_flags);
-	if (!iwl_grab_nic_access(priv)) {
-		_iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr);
-		wmb();
-		_iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WDAT, val);
-		iwl_release_nic_access(priv);
-	}
-	spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
-}
-
-static inline void
-iwl_legacy_write_targ_mem_buf(struct iwl_priv *priv, u32 addr,
-					  u32 len, u32 *values)
-{
-	unsigned long reg_flags;
-
-	spin_lock_irqsave(&priv->reg_lock, reg_flags);
-	if (!iwl_grab_nic_access(priv)) {
-		_iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr);
-		wmb();
-		for (; 0 < len; len -= sizeof(u32), values++)
-			_iwl_legacy_write_direct32(priv,
-					HBUS_TARG_MEM_WDAT, *values);
-
-		iwl_release_nic_access(priv);
-	}
-	spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
-}
-#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-led.c b/drivers/net/wireless/iwlegacy/iwl-led.c
deleted file mode 100644
index dc568a4..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-led.c
+++ /dev/null
@@ -1,205 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/delay.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <net/mac80211.h>
-#include <linux/etherdevice.h>
-#include <asm/unaligned.h>
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-
-/* default: IWL_LED_BLINK(0) using blinking index table */
-static int led_mode;
-module_param(led_mode, int, S_IRUGO);
-MODULE_PARM_DESC(led_mode, "0=system default, "
-		"1=On(RF On)/Off(RF Off), 2=blinking");
-
-/* Throughput		OFF time(ms)	ON time (ms)
- *	>300			25		25
- *	>200 to 300		40		40
- *	>100 to 200		55		55
- *	>70 to 100		65		65
- *	>50 to 70		75		75
- *	>20 to 50		85		85
- *	>10 to 20		95		95
- *	>5 to 10		110		110
- *	>1 to 5			130		130
- *	>0 to 1			167		167
- *	<=0					SOLID ON
- */
-static const struct ieee80211_tpt_blink iwl_blink[] = {
-	{ .throughput = 0, .blink_time = 334 },
-	{ .throughput = 1 * 1024 - 1, .blink_time = 260 },
-	{ .throughput = 5 * 1024 - 1, .blink_time = 220 },
-	{ .throughput = 10 * 1024 - 1, .blink_time = 190 },
-	{ .throughput = 20 * 1024 - 1, .blink_time = 170 },
-	{ .throughput = 50 * 1024 - 1, .blink_time = 150 },
-	{ .throughput = 70 * 1024 - 1, .blink_time = 130 },
-	{ .throughput = 100 * 1024 - 1, .blink_time = 110 },
-	{ .throughput = 200 * 1024 - 1, .blink_time = 80 },
-	{ .throughput = 300 * 1024 - 1, .blink_time = 50 },
-};
-
-/*
- * Adjust led blink rate to compensate on a MAC Clock difference on every HW
- * Led blink rate analysis showed an average deviation of 0% on 3945,
- * 5% on 4965 HW.
- * Need to compensate on the led on/off time per HW according to the deviation
- * to achieve the desired led frequency
- * The calculation is: (100-averageDeviation)/100 * blinkTime
- * For code efficiency the calculation will be:
- *     compensation = (100 - averageDeviation) * 64 / 100
- *     NewBlinkTime = (compensation * BlinkTime) / 64
- */
-static inline u8 iwl_legacy_blink_compensation(struct iwl_priv *priv,
-				    u8 time, u16 compensation)
-{
-	if (!compensation) {
-		IWL_ERR(priv, "undefined blink compensation: "
-			"use pre-defined blinking time\n");
-		return time;
-	}
-
-	return (u8)((time * compensation) >> 6);
-}
-
-/* Set led pattern command */
-static int iwl_legacy_led_cmd(struct iwl_priv *priv,
-		       unsigned long on,
-		       unsigned long off)
-{
-	struct iwl_led_cmd led_cmd = {
-		.id = IWL_LED_LINK,
-		.interval = IWL_DEF_LED_INTRVL
-	};
-	int ret;
-
-	if (!test_bit(STATUS_READY, &priv->status))
-		return -EBUSY;
-
-	if (priv->blink_on == on && priv->blink_off == off)
-		return 0;
-
-	if (off == 0) {
-		/* led is SOLID_ON */
-		on = IWL_LED_SOLID;
-	}
-
-	IWL_DEBUG_LED(priv, "Led blink time compensation=%u\n",
-			priv->cfg->base_params->led_compensation);
-	led_cmd.on = iwl_legacy_blink_compensation(priv, on,
-				priv->cfg->base_params->led_compensation);
-	led_cmd.off = iwl_legacy_blink_compensation(priv, off,
-				priv->cfg->base_params->led_compensation);
-
-	ret = priv->cfg->ops->led->cmd(priv, &led_cmd);
-	if (!ret) {
-		priv->blink_on = on;
-		priv->blink_off = off;
-	}
-	return ret;
-}
-
-static void iwl_legacy_led_brightness_set(struct led_classdev *led_cdev,
-				   enum led_brightness brightness)
-{
-	struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
-	unsigned long on = 0;
-
-	if (brightness > 0)
-		on = IWL_LED_SOLID;
-
-	iwl_legacy_led_cmd(priv, on, 0);
-}
-
-static int iwl_legacy_led_blink_set(struct led_classdev *led_cdev,
-			     unsigned long *delay_on,
-			     unsigned long *delay_off)
-{
-	struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
-
-	return iwl_legacy_led_cmd(priv, *delay_on, *delay_off);
-}
-
-void iwl_legacy_leds_init(struct iwl_priv *priv)
-{
-	int mode = led_mode;
-	int ret;
-
-	if (mode == IWL_LED_DEFAULT)
-		mode = priv->cfg->led_mode;
-
-	priv->led.name = kasprintf(GFP_KERNEL, "%s-led",
-				   wiphy_name(priv->hw->wiphy));
-	priv->led.brightness_set = iwl_legacy_led_brightness_set;
-	priv->led.blink_set = iwl_legacy_led_blink_set;
-	priv->led.max_brightness = 1;
-
-	switch (mode) {
-	case IWL_LED_DEFAULT:
-		WARN_ON(1);
-		break;
-	case IWL_LED_BLINK:
-		priv->led.default_trigger =
-			ieee80211_create_tpt_led_trigger(priv->hw,
-					IEEE80211_TPT_LEDTRIG_FL_CONNECTED,
-					iwl_blink, ARRAY_SIZE(iwl_blink));
-		break;
-	case IWL_LED_RF_STATE:
-		priv->led.default_trigger =
-			ieee80211_get_radio_led_name(priv->hw);
-		break;
-	}
-
-	ret = led_classdev_register(&priv->pci_dev->dev, &priv->led);
-	if (ret) {
-		kfree(priv->led.name);
-		return;
-	}
-
-	priv->led_registered = true;
-}
-EXPORT_SYMBOL(iwl_legacy_leds_init);
-
-void iwl_legacy_leds_exit(struct iwl_priv *priv)
-{
-	if (!priv->led_registered)
-		return;
-
-	led_classdev_unregister(&priv->led);
-	kfree(priv->led.name);
-}
-EXPORT_SYMBOL(iwl_legacy_leds_exit);
diff --git a/drivers/net/wireless/iwlegacy/iwl-led.h b/drivers/net/wireless/iwlegacy/iwl-led.h
deleted file mode 100644
index f0791f7..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-led.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#ifndef __iwl_legacy_leds_h__
-#define __iwl_legacy_leds_h__
-
-
-struct iwl_priv;
-
-#define IWL_LED_SOLID 11
-#define IWL_DEF_LED_INTRVL cpu_to_le32(1000)
-
-#define IWL_LED_ACTIVITY       (0<<1)
-#define IWL_LED_LINK           (1<<1)
-
-/*
- * LED mode
- *    IWL_LED_DEFAULT:  use device default
- *    IWL_LED_RF_STATE: turn LED on/off based on RF state
- *			LED ON  = RF ON
- *			LED OFF = RF OFF
- *    IWL_LED_BLINK:    adjust led blink rate based on blink table
- */
-enum iwl_led_mode {
-	IWL_LED_DEFAULT,
-	IWL_LED_RF_STATE,
-	IWL_LED_BLINK,
-};
-
-void iwl_legacy_leds_init(struct iwl_priv *priv);
-void iwl_legacy_leds_exit(struct iwl_priv *priv);
-
-#endif /* __iwl_legacy_leds_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-legacy-rs.h b/drivers/net/wireless/iwlegacy/iwl-legacy-rs.h
deleted file mode 100644
index 38647e4..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-legacy-rs.h
+++ /dev/null
@@ -1,456 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#ifndef __iwl_legacy_rs_h__
-#define __iwl_legacy_rs_h__
-
-struct iwl_rate_info {
-	u8 plcp;	/* uCode API:  IWL_RATE_6M_PLCP, etc. */
-	u8 plcp_siso;	/* uCode API:  IWL_RATE_SISO_6M_PLCP, etc. */
-	u8 plcp_mimo2;	/* uCode API:  IWL_RATE_MIMO2_6M_PLCP, etc. */
-	u8 ieee;	/* MAC header:  IWL_RATE_6M_IEEE, etc. */
-	u8 prev_ieee;    /* previous rate in IEEE speeds */
-	u8 next_ieee;    /* next rate in IEEE speeds */
-	u8 prev_rs;      /* previous rate used in rs algo */
-	u8 next_rs;      /* next rate used in rs algo */
-	u8 prev_rs_tgg;  /* previous rate used in TGG rs algo */
-	u8 next_rs_tgg;  /* next rate used in TGG rs algo */
-};
-
-struct iwl3945_rate_info {
-	u8 plcp;		/* uCode API:  IWL_RATE_6M_PLCP, etc. */
-	u8 ieee;		/* MAC header:  IWL_RATE_6M_IEEE, etc. */
-	u8 prev_ieee;		/* previous rate in IEEE speeds */
-	u8 next_ieee;		/* next rate in IEEE speeds */
-	u8 prev_rs;		/* previous rate used in rs algo */
-	u8 next_rs;		/* next rate used in rs algo */
-	u8 prev_rs_tgg;		/* previous rate used in TGG rs algo */
-	u8 next_rs_tgg;		/* next rate used in TGG rs algo */
-	u8 table_rs_index;	/* index in rate scale table cmd */
-	u8 prev_table_rs;	/* prev in rate table cmd */
-};
-
-
-/*
- * These serve as indexes into
- * struct iwl_rate_info iwlegacy_rates[IWL_RATE_COUNT];
- */
-enum {
-	IWL_RATE_1M_INDEX = 0,
-	IWL_RATE_2M_INDEX,
-	IWL_RATE_5M_INDEX,
-	IWL_RATE_11M_INDEX,
-	IWL_RATE_6M_INDEX,
-	IWL_RATE_9M_INDEX,
-	IWL_RATE_12M_INDEX,
-	IWL_RATE_18M_INDEX,
-	IWL_RATE_24M_INDEX,
-	IWL_RATE_36M_INDEX,
-	IWL_RATE_48M_INDEX,
-	IWL_RATE_54M_INDEX,
-	IWL_RATE_60M_INDEX,
-	IWL_RATE_COUNT,
-	IWL_RATE_COUNT_LEGACY = IWL_RATE_COUNT - 1,	/* Excluding 60M */
-	IWL_RATE_COUNT_3945 = IWL_RATE_COUNT - 1,
-	IWL_RATE_INVM_INDEX = IWL_RATE_COUNT,
-	IWL_RATE_INVALID = IWL_RATE_COUNT,
-};
-
-enum {
-	IWL_RATE_6M_INDEX_TABLE = 0,
-	IWL_RATE_9M_INDEX_TABLE,
-	IWL_RATE_12M_INDEX_TABLE,
-	IWL_RATE_18M_INDEX_TABLE,
-	IWL_RATE_24M_INDEX_TABLE,
-	IWL_RATE_36M_INDEX_TABLE,
-	IWL_RATE_48M_INDEX_TABLE,
-	IWL_RATE_54M_INDEX_TABLE,
-	IWL_RATE_1M_INDEX_TABLE,
-	IWL_RATE_2M_INDEX_TABLE,
-	IWL_RATE_5M_INDEX_TABLE,
-	IWL_RATE_11M_INDEX_TABLE,
-	IWL_RATE_INVM_INDEX_TABLE = IWL_RATE_INVM_INDEX - 1,
-};
-
-enum {
-	IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX,
-	IWL39_LAST_OFDM_RATE = IWL_RATE_54M_INDEX,
-	IWL_LAST_OFDM_RATE = IWL_RATE_60M_INDEX,
-	IWL_FIRST_CCK_RATE = IWL_RATE_1M_INDEX,
-	IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX,
-};
-
-/* #define vs. enum to keep from defaulting to 'large integer' */
-#define	IWL_RATE_6M_MASK   (1 << IWL_RATE_6M_INDEX)
-#define	IWL_RATE_9M_MASK   (1 << IWL_RATE_9M_INDEX)
-#define	IWL_RATE_12M_MASK  (1 << IWL_RATE_12M_INDEX)
-#define	IWL_RATE_18M_MASK  (1 << IWL_RATE_18M_INDEX)
-#define	IWL_RATE_24M_MASK  (1 << IWL_RATE_24M_INDEX)
-#define	IWL_RATE_36M_MASK  (1 << IWL_RATE_36M_INDEX)
-#define	IWL_RATE_48M_MASK  (1 << IWL_RATE_48M_INDEX)
-#define	IWL_RATE_54M_MASK  (1 << IWL_RATE_54M_INDEX)
-#define IWL_RATE_60M_MASK  (1 << IWL_RATE_60M_INDEX)
-#define	IWL_RATE_1M_MASK   (1 << IWL_RATE_1M_INDEX)
-#define	IWL_RATE_2M_MASK   (1 << IWL_RATE_2M_INDEX)
-#define	IWL_RATE_5M_MASK   (1 << IWL_RATE_5M_INDEX)
-#define	IWL_RATE_11M_MASK  (1 << IWL_RATE_11M_INDEX)
-
-/* uCode API values for legacy bit rates, both OFDM and CCK */
-enum {
-	IWL_RATE_6M_PLCP  = 13,
-	IWL_RATE_9M_PLCP  = 15,
-	IWL_RATE_12M_PLCP = 5,
-	IWL_RATE_18M_PLCP = 7,
-	IWL_RATE_24M_PLCP = 9,
-	IWL_RATE_36M_PLCP = 11,
-	IWL_RATE_48M_PLCP = 1,
-	IWL_RATE_54M_PLCP = 3,
-	IWL_RATE_60M_PLCP = 3,/*FIXME:RS:should be removed*/
-	IWL_RATE_1M_PLCP  = 10,
-	IWL_RATE_2M_PLCP  = 20,
-	IWL_RATE_5M_PLCP  = 55,
-	IWL_RATE_11M_PLCP = 110,
-	/*FIXME:RS:add IWL_RATE_LEGACY_INVM_PLCP = 0,*/
-};
-
-/* uCode API values for OFDM high-throughput (HT) bit rates */
-enum {
-	IWL_RATE_SISO_6M_PLCP = 0,
-	IWL_RATE_SISO_12M_PLCP = 1,
-	IWL_RATE_SISO_18M_PLCP = 2,
-	IWL_RATE_SISO_24M_PLCP = 3,
-	IWL_RATE_SISO_36M_PLCP = 4,
-	IWL_RATE_SISO_48M_PLCP = 5,
-	IWL_RATE_SISO_54M_PLCP = 6,
-	IWL_RATE_SISO_60M_PLCP = 7,
-	IWL_RATE_MIMO2_6M_PLCP  = 0x8,
-	IWL_RATE_MIMO2_12M_PLCP = 0x9,
-	IWL_RATE_MIMO2_18M_PLCP = 0xa,
-	IWL_RATE_MIMO2_24M_PLCP = 0xb,
-	IWL_RATE_MIMO2_36M_PLCP = 0xc,
-	IWL_RATE_MIMO2_48M_PLCP = 0xd,
-	IWL_RATE_MIMO2_54M_PLCP = 0xe,
-	IWL_RATE_MIMO2_60M_PLCP = 0xf,
-	IWL_RATE_SISO_INVM_PLCP,
-	IWL_RATE_MIMO2_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP,
-};
-
-/* MAC header values for bit rates */
-enum {
-	IWL_RATE_6M_IEEE  = 12,
-	IWL_RATE_9M_IEEE  = 18,
-	IWL_RATE_12M_IEEE = 24,
-	IWL_RATE_18M_IEEE = 36,
-	IWL_RATE_24M_IEEE = 48,
-	IWL_RATE_36M_IEEE = 72,
-	IWL_RATE_48M_IEEE = 96,
-	IWL_RATE_54M_IEEE = 108,
-	IWL_RATE_60M_IEEE = 120,
-	IWL_RATE_1M_IEEE  = 2,
-	IWL_RATE_2M_IEEE  = 4,
-	IWL_RATE_5M_IEEE  = 11,
-	IWL_RATE_11M_IEEE = 22,
-};
-
-#define IWL_CCK_BASIC_RATES_MASK    \
-	(IWL_RATE_1M_MASK          | \
-	IWL_RATE_2M_MASK)
-
-#define IWL_CCK_RATES_MASK          \
-	(IWL_CCK_BASIC_RATES_MASK  | \
-	IWL_RATE_5M_MASK          | \
-	IWL_RATE_11M_MASK)
-
-#define IWL_OFDM_BASIC_RATES_MASK   \
-	(IWL_RATE_6M_MASK         | \
-	IWL_RATE_12M_MASK         | \
-	IWL_RATE_24M_MASK)
-
-#define IWL_OFDM_RATES_MASK         \
-	(IWL_OFDM_BASIC_RATES_MASK | \
-	IWL_RATE_9M_MASK          | \
-	IWL_RATE_18M_MASK         | \
-	IWL_RATE_36M_MASK         | \
-	IWL_RATE_48M_MASK         | \
-	IWL_RATE_54M_MASK)
-
-#define IWL_BASIC_RATES_MASK         \
-	(IWL_OFDM_BASIC_RATES_MASK | \
-	 IWL_CCK_BASIC_RATES_MASK)
-
-#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1)
-#define IWL_RATES_MASK_3945 ((1 << IWL_RATE_COUNT_3945) - 1)
-
-#define IWL_INVALID_VALUE    -1
-
-#define IWL_MIN_RSSI_VAL                 -100
-#define IWL_MAX_RSSI_VAL                    0
-
-/* These values specify how many Tx frame attempts before
- * searching for a new modulation mode */
-#define IWL_LEGACY_FAILURE_LIMIT	160
-#define IWL_LEGACY_SUCCESS_LIMIT	480
-#define IWL_LEGACY_TABLE_COUNT		160
-
-#define IWL_NONE_LEGACY_FAILURE_LIMIT	400
-#define IWL_NONE_LEGACY_SUCCESS_LIMIT	4500
-#define IWL_NONE_LEGACY_TABLE_COUNT	1500
-
-/* Success ratio (ACKed / attempted tx frames) values (perfect is 128 * 100) */
-#define IWL_RS_GOOD_RATIO		12800	/* 100% */
-#define IWL_RATE_SCALE_SWITCH		10880	/*  85% */
-#define IWL_RATE_HIGH_TH		10880	/*  85% */
-#define IWL_RATE_INCREASE_TH		6400	/*  50% */
-#define IWL_RATE_DECREASE_TH		1920	/*  15% */
-
-/* possible actions when in legacy mode */
-#define IWL_LEGACY_SWITCH_ANTENNA1      0
-#define IWL_LEGACY_SWITCH_ANTENNA2      1
-#define IWL_LEGACY_SWITCH_SISO          2
-#define IWL_LEGACY_SWITCH_MIMO2_AB      3
-#define IWL_LEGACY_SWITCH_MIMO2_AC      4
-#define IWL_LEGACY_SWITCH_MIMO2_BC      5
-
-/* possible actions when in siso mode */
-#define IWL_SISO_SWITCH_ANTENNA1        0
-#define IWL_SISO_SWITCH_ANTENNA2        1
-#define IWL_SISO_SWITCH_MIMO2_AB        2
-#define IWL_SISO_SWITCH_MIMO2_AC        3
-#define IWL_SISO_SWITCH_MIMO2_BC        4
-#define IWL_SISO_SWITCH_GI              5
-
-/* possible actions when in mimo mode */
-#define IWL_MIMO2_SWITCH_ANTENNA1       0
-#define IWL_MIMO2_SWITCH_ANTENNA2       1
-#define IWL_MIMO2_SWITCH_SISO_A         2
-#define IWL_MIMO2_SWITCH_SISO_B         3
-#define IWL_MIMO2_SWITCH_SISO_C         4
-#define IWL_MIMO2_SWITCH_GI             5
-
-#define IWL_MAX_SEARCH IWL_MIMO2_SWITCH_GI
-
-#define IWL_ACTION_LIMIT		3	/* # possible actions */
-
-#define LQ_SIZE		2	/* 2 mode tables:  "Active" and "Search" */
-
-/* load per tid defines for A-MPDU activation */
-#define IWL_AGG_TPT_THREHOLD	0
-#define IWL_AGG_LOAD_THRESHOLD	10
-#define IWL_AGG_ALL_TID		0xff
-#define TID_QUEUE_CELL_SPACING	50	/*mS */
-#define TID_QUEUE_MAX_SIZE	20
-#define TID_ROUND_VALUE		5	/* mS */
-#define TID_MAX_LOAD_COUNT	8
-
-#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING)
-#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y))
-
-extern const struct iwl_rate_info iwlegacy_rates[IWL_RATE_COUNT];
-
-enum iwl_table_type {
-	LQ_NONE,
-	LQ_G,		/* legacy types */
-	LQ_A,
-	LQ_SISO,	/* high-throughput types */
-	LQ_MIMO2,
-	LQ_MAX,
-};
-
-#define is_legacy(tbl) (((tbl) == LQ_G) || ((tbl) == LQ_A))
-#define is_siso(tbl) ((tbl) == LQ_SISO)
-#define is_mimo2(tbl) ((tbl) == LQ_MIMO2)
-#define is_mimo(tbl) (is_mimo2(tbl))
-#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl))
-#define is_a_band(tbl) ((tbl) == LQ_A)
-#define is_g_and(tbl) ((tbl) == LQ_G)
-
-#define	ANT_NONE	0x0
-#define	ANT_A		BIT(0)
-#define	ANT_B		BIT(1)
-#define	ANT_AB		(ANT_A | ANT_B)
-#define ANT_C		BIT(2)
-#define	ANT_AC		(ANT_A | ANT_C)
-#define ANT_BC		(ANT_B | ANT_C)
-#define ANT_ABC		(ANT_AB | ANT_C)
-
-#define IWL_MAX_MCS_DISPLAY_SIZE	12
-
-struct iwl_rate_mcs_info {
-	char	mbps[IWL_MAX_MCS_DISPLAY_SIZE];
-	char	mcs[IWL_MAX_MCS_DISPLAY_SIZE];
-};
-
-/**
- * struct iwl_rate_scale_data -- tx success history for one rate
- */
-struct iwl_rate_scale_data {
-	u64 data;		/* bitmap of successful frames */
-	s32 success_counter;	/* number of frames successful */
-	s32 success_ratio;	/* per-cent * 128  */
-	s32 counter;		/* number of frames attempted */
-	s32 average_tpt;	/* success ratio * expected throughput */
-	unsigned long stamp;
-};
-
-/**
- * struct iwl_scale_tbl_info -- tx params and success history for all rates
- *
- * There are two of these in struct iwl_lq_sta,
- * one for "active", and one for "search".
- */
-struct iwl_scale_tbl_info {
-	enum iwl_table_type lq_type;
-	u8 ant_type;
-	u8 is_SGI;	/* 1 = short guard interval */
-	u8 is_ht40;	/* 1 = 40 MHz channel width */
-	u8 is_dup;	/* 1 = duplicated data streams */
-	u8 action;	/* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */
-	u8 max_search;	/* maximun number of tables we can search */
-	s32 *expected_tpt;	/* throughput metrics; expected_tpt_G, etc. */
-	u32 current_rate;  /* rate_n_flags, uCode API format */
-	struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
-};
-
-struct iwl_traffic_load {
-	unsigned long time_stamp;	/* age of the oldest statistics */
-	u32 packet_count[TID_QUEUE_MAX_SIZE];   /* packet count in this time
-						 * slice */
-	u32 total;			/* total num of packets during the
-					 * last TID_MAX_TIME_DIFF */
-	u8 queue_count;			/* number of queues that has
-					 * been used since the last cleanup */
-	u8 head;			/* start of the circular buffer */
-};
-
-/**
- * struct iwl_lq_sta -- driver's rate scaling private structure
- *
- * Pointer to this gets passed back and forth between driver and mac80211.
- */
-struct iwl_lq_sta {
-	u8 active_tbl;		/* index of active table, range 0-1 */
-	u8 enable_counter;	/* indicates HT mode */
-	u8 stay_in_tbl;		/* 1: disallow, 0: allow search for new mode */
-	u8 search_better_tbl;	/* 1: currently trying alternate mode */
-	s32 last_tpt;
-
-	/* The following determine when to search for a new mode */
-	u32 table_count_limit;
-	u32 max_failure_limit;	/* # failed frames before new search */
-	u32 max_success_limit;	/* # successful frames before new search */
-	u32 table_count;
-	u32 total_failed;	/* total failed frames, any/all rates */
-	u32 total_success;	/* total successful frames, any/all rates */
-	u64 flush_timer;	/* time staying in mode before new search */
-
-	u8 action_counter;	/* # mode-switch actions tried */
-	u8 is_green;
-	u8 is_dup;
-	enum ieee80211_band band;
-
-	/* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
-	u32 supp_rates;
-	u16 active_legacy_rate;
-	u16 active_siso_rate;
-	u16 active_mimo2_rate;
-	s8 max_rate_idx;     /* Max rate set by user */
-	u8 missed_rate_counter;
-
-	struct iwl_link_quality_cmd lq;
-	struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
-	struct iwl_traffic_load load[TID_MAX_LOAD_COUNT];
-	u8 tx_agg_tid_en;
-#ifdef CONFIG_MAC80211_DEBUGFS
-	struct dentry *rs_sta_dbgfs_scale_table_file;
-	struct dentry *rs_sta_dbgfs_stats_table_file;
-	struct dentry *rs_sta_dbgfs_rate_scale_data_file;
-	struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
-	u32 dbg_fixed_rate;
-#endif
-	struct iwl_priv *drv;
-
-	/* used to be in sta_info */
-	int last_txrate_idx;
-	/* last tx rate_n_flags */
-	u32 last_rate_n_flags;
-	/* packets destined for this STA are aggregated */
-	u8 is_agg;
-};
-
-static inline u8 iwl4965_num_of_ant(u8 mask)
-{
-	return  !!((mask) & ANT_A) +
-		!!((mask) & ANT_B) +
-		!!((mask) & ANT_C);
-}
-
-static inline u8 iwl4965_first_antenna(u8 mask)
-{
-	if (mask & ANT_A)
-		return ANT_A;
-	if (mask & ANT_B)
-		return ANT_B;
-	return ANT_C;
-}
-
-
-/**
- * iwl3945_rate_scale_init - Initialize the rate scale table based on assoc info
- *
- * The specific throughput table used is based on the type of network
- * the associated with, including A, B, G, and G w/ TGG protection
- */
-extern void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id);
-
-/* Initialize station's rate scaling information after adding station */
-extern void iwl4965_rs_rate_init(struct iwl_priv *priv,
-			     struct ieee80211_sta *sta, u8 sta_id);
-extern void iwl3945_rs_rate_init(struct iwl_priv *priv,
-				 struct ieee80211_sta *sta, u8 sta_id);
-
-/**
- * iwl_rate_control_register - Register the rate control algorithm callbacks
- *
- * Since the rate control algorithm is hardware specific, there is no need
- * or reason to place it as a stand alone module.  The driver can call
- * iwl_rate_control_register in order to register the rate control callbacks
- * with the mac80211 subsystem.  This should be performed prior to calling
- * ieee80211_register_hw
- *
- */
-extern int iwl4965_rate_control_register(void);
-extern int iwl3945_rate_control_register(void);
-
-/**
- * iwl_rate_control_unregister - Unregister the rate control callbacks
- *
- * This should be called after calling ieee80211_unregister_hw, but before
- * the driver is unloaded.
- */
-extern void iwl4965_rate_control_unregister(void);
-extern void iwl3945_rate_control_unregister(void);
-
-#endif /* __iwl_legacy_rs__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-power.c b/drivers/net/wireless/iwlegacy/iwl-power.c
deleted file mode 100644
index 903ef0d..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-power.c
+++ /dev/null
@@ -1,165 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *****************************************************************************/
-
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-
-#include <net/mac80211.h>
-
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-#include "iwl-commands.h"
-#include "iwl-debug.h"
-#include "iwl-power.h"
-
-/*
- * Setting power level allows the card to go to sleep when not busy.
- *
- * We calculate a sleep command based on the required latency, which
- * we get from mac80211. In order to handle thermal throttling, we can
- * also use pre-defined power levels.
- */
-
-/*
- * This defines the old power levels. They are still used by default
- * (level 1) and for thermal throttle (levels 3 through 5)
- */
-
-struct iwl_power_vec_entry {
-	struct iwl_powertable_cmd cmd;
-	u8 no_dtim;	/* number of skip dtim */
-};
-
-static void iwl_legacy_power_sleep_cam_cmd(struct iwl_priv *priv,
-				    struct iwl_powertable_cmd *cmd)
-{
-	memset(cmd, 0, sizeof(*cmd));
-
-	if (priv->power_data.pci_pm)
-		cmd->flags |= IWL_POWER_PCI_PM_MSK;
-
-	IWL_DEBUG_POWER(priv, "Sleep command for CAM\n");
-}
-
-static int
-iwl_legacy_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd)
-{
-	IWL_DEBUG_POWER(priv, "Sending power/sleep command\n");
-	IWL_DEBUG_POWER(priv, "Flags value = 0x%08X\n", cmd->flags);
-	IWL_DEBUG_POWER(priv, "Tx timeout = %u\n",
-					le32_to_cpu(cmd->tx_data_timeout));
-	IWL_DEBUG_POWER(priv, "Rx timeout = %u\n",
-					le32_to_cpu(cmd->rx_data_timeout));
-	IWL_DEBUG_POWER(priv,
-			"Sleep interval vector = { %d , %d , %d , %d , %d }\n",
-			le32_to_cpu(cmd->sleep_interval[0]),
-			le32_to_cpu(cmd->sleep_interval[1]),
-			le32_to_cpu(cmd->sleep_interval[2]),
-			le32_to_cpu(cmd->sleep_interval[3]),
-			le32_to_cpu(cmd->sleep_interval[4]));
-
-	return iwl_legacy_send_cmd_pdu(priv, POWER_TABLE_CMD,
-				sizeof(struct iwl_powertable_cmd), cmd);
-}
-
-int
-iwl_legacy_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
-		       bool force)
-{
-	int ret;
-	bool update_chains;
-
-	lockdep_assert_held(&priv->mutex);
-
-	/* Don't update the RX chain when chain noise calibration is running */
-	update_chains = priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE ||
-			priv->chain_noise_data.state == IWL_CHAIN_NOISE_ALIVE;
-
-	if (!memcmp(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force)
-		return 0;
-
-	if (!iwl_legacy_is_ready_rf(priv))
-		return -EIO;
-
-	/* scan complete use sleep_power_next, need to be updated */
-	memcpy(&priv->power_data.sleep_cmd_next, cmd, sizeof(*cmd));
-	if (test_bit(STATUS_SCANNING, &priv->status) && !force) {
-		IWL_DEBUG_INFO(priv, "Defer power set mode while scanning\n");
-		return 0;
-	}
-
-	if (cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK)
-		set_bit(STATUS_POWER_PMI, &priv->status);
-
-	ret = iwl_legacy_set_power(priv, cmd);
-	if (!ret) {
-		if (!(cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK))
-			clear_bit(STATUS_POWER_PMI, &priv->status);
-
-		if (priv->cfg->ops->lib->update_chain_flags && update_chains)
-			priv->cfg->ops->lib->update_chain_flags(priv);
-		else if (priv->cfg->ops->lib->update_chain_flags)
-			IWL_DEBUG_POWER(priv,
-					"Cannot update the power, chain noise "
-					"calibration running: %d\n",
-					priv->chain_noise_data.state);
-
-		memcpy(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd));
-	} else
-		IWL_ERR(priv, "set power fail, ret = %d", ret);
-
-	return ret;
-}
-
-int iwl_legacy_power_update_mode(struct iwl_priv *priv, bool force)
-{
-	struct iwl_powertable_cmd cmd;
-
-	iwl_legacy_power_sleep_cam_cmd(priv, &cmd);
-	return iwl_legacy_power_set_mode(priv, &cmd, force);
-}
-EXPORT_SYMBOL(iwl_legacy_power_update_mode);
-
-/* initialize to default */
-void iwl_legacy_power_initialize(struct iwl_priv *priv)
-{
-	u16 lctl = iwl_legacy_pcie_link_ctl(priv);
-
-	priv->power_data.pci_pm = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN);
-
-	priv->power_data.debug_sleep_level_override = -1;
-
-	memset(&priv->power_data.sleep_cmd, 0,
-		sizeof(priv->power_data.sleep_cmd));
-}
-EXPORT_SYMBOL(iwl_legacy_power_initialize);
diff --git a/drivers/net/wireless/iwlegacy/iwl-power.h b/drivers/net/wireless/iwlegacy/iwl-power.h
deleted file mode 100644
index d30b36a..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-power.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *****************************************************************************/
-#ifndef __iwl_legacy_power_setting_h__
-#define __iwl_legacy_power_setting_h__
-
-#include "iwl-commands.h"
-
-enum iwl_power_level {
-	IWL_POWER_INDEX_1,
-	IWL_POWER_INDEX_2,
-	IWL_POWER_INDEX_3,
-	IWL_POWER_INDEX_4,
-	IWL_POWER_INDEX_5,
-	IWL_POWER_NUM
-};
-
-struct iwl_power_mgr {
-	struct iwl_powertable_cmd sleep_cmd;
-	struct iwl_powertable_cmd sleep_cmd_next;
-	int debug_sleep_level_override;
-	bool pci_pm;
-};
-
-int
-iwl_legacy_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
-		       bool force);
-int iwl_legacy_power_update_mode(struct iwl_priv *priv, bool force);
-void iwl_legacy_power_initialize(struct iwl_priv *priv);
-
-#endif  /* __iwl_legacy_power_setting_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-rx.c b/drivers/net/wireless/iwlegacy/iwl-rx.c
deleted file mode 100644
index f4d21ec..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-rx.c
+++ /dev/null
@@ -1,282 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/etherdevice.h>
-#include <linux/slab.h>
-#include <linux/export.h>
-#include <net/mac80211.h>
-#include <asm/unaligned.h>
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-sta.h"
-#include "iwl-io.h"
-#include "iwl-helpers.h"
-/************************** RX-FUNCTIONS ****************************/
-/*
- * Rx theory of operation
- *
- * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
- * each of which point to Receive Buffers to be filled by the NIC.  These get
- * used not only for Rx frames, but for any command response or notification
- * from the NIC.  The driver and NIC manage the Rx buffers by means
- * of indexes into the circular buffer.
- *
- * Rx Queue Indexes
- * The host/firmware share two index registers for managing the Rx buffers.
- *
- * The READ index maps to the first position that the firmware may be writing
- * to -- the driver can read up to (but not including) this position and get
- * good data.
- * The READ index is managed by the firmware once the card is enabled.
- *
- * The WRITE index maps to the last position the driver has read from -- the
- * position preceding WRITE is the last slot the firmware can place a packet.
- *
- * The queue is empty (no good data) if WRITE = READ - 1, and is full if
- * WRITE = READ.
- *
- * During initialization, the host sets up the READ queue position to the first
- * INDEX position, and WRITE to the last (READ - 1 wrapped)
- *
- * When the firmware places a packet in a buffer, it will advance the READ index
- * and fire the RX interrupt.  The driver can then query the READ index and
- * process as many packets as possible, moving the WRITE index forward as it
- * resets the Rx queue buffers with new memory.
- *
- * The management in the driver is as follows:
- * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free.  When
- *   iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
- *   to replenish the iwl->rxq->rx_free.
- * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the
- *   iwl->rxq is replenished and the READ INDEX is updated (updating the
- *   'processed' and 'read' driver indexes as well)
- * + A received packet is processed and handed to the kernel network stack,
- *   detached from the iwl->rxq.  The driver 'processed' index is updated.
- * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
- *   list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
- *   INDEX is not incremented and iwl->status(RX_STALLED) is set.  If there
- *   were enough free buffers and RX_STALLED is set it is cleared.
- *
- *
- * Driver sequence:
- *
- * iwl_legacy_rx_queue_alloc()   Allocates rx_free
- * iwl_rx_replenish()     Replenishes rx_free list from rx_used, and calls
- *                            iwl_rx_queue_restock
- * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx
- *                            queue, updates firmware pointers, and updates
- *                            the WRITE index.  If insufficient rx_free buffers
- *                            are available, schedules iwl_rx_replenish
- *
- * -- enable interrupts --
- * ISR - iwl_rx()         Detach iwl_rx_mem_buffers from pool up to the
- *                            READ INDEX, detaching the SKB from the pool.
- *                            Moves the packet buffer from queue to rx_used.
- *                            Calls iwl_rx_queue_restock to refill any empty
- *                            slots.
- * ...
- *
- */
-
-/**
- * iwl_legacy_rx_queue_space - Return number of free slots available in queue.
- */
-int iwl_legacy_rx_queue_space(const struct iwl_rx_queue *q)
-{
-	int s = q->read - q->write;
-	if (s <= 0)
-		s += RX_QUEUE_SIZE;
-	/* keep some buffer to not confuse full and empty queue */
-	s -= 2;
-	if (s < 0)
-		s = 0;
-	return s;
-}
-EXPORT_SYMBOL(iwl_legacy_rx_queue_space);
-
-/**
- * iwl_legacy_rx_queue_update_write_ptr - Update the write pointer for the RX queue
- */
-void
-iwl_legacy_rx_queue_update_write_ptr(struct iwl_priv *priv,
-					struct iwl_rx_queue *q)
-{
-	unsigned long flags;
-	u32 rx_wrt_ptr_reg = priv->hw_params.rx_wrt_ptr_reg;
-	u32 reg;
-
-	spin_lock_irqsave(&q->lock, flags);
-
-	if (q->need_update == 0)
-		goto exit_unlock;
-
-	/* If power-saving is in use, make sure device is awake */
-	if (test_bit(STATUS_POWER_PMI, &priv->status)) {
-		reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
-
-		if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
-			IWL_DEBUG_INFO(priv,
-				"Rx queue requesting wakeup,"
-				" GP1 = 0x%x\n", reg);
-			iwl_legacy_set_bit(priv, CSR_GP_CNTRL,
-				CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
-			goto exit_unlock;
-		}
-
-		q->write_actual = (q->write & ~0x7);
-		iwl_legacy_write_direct32(priv, rx_wrt_ptr_reg,
-				q->write_actual);
-
-	/* Else device is assumed to be awake */
-	} else {
-		/* Device expects a multiple of 8 */
-		q->write_actual = (q->write & ~0x7);
-		iwl_legacy_write_direct32(priv, rx_wrt_ptr_reg,
-			q->write_actual);
-	}
-
-	q->need_update = 0;
-
- exit_unlock:
-	spin_unlock_irqrestore(&q->lock, flags);
-}
-EXPORT_SYMBOL(iwl_legacy_rx_queue_update_write_ptr);
-
-int iwl_legacy_rx_queue_alloc(struct iwl_priv *priv)
-{
-	struct iwl_rx_queue *rxq = &priv->rxq;
-	struct device *dev = &priv->pci_dev->dev;
-	int i;
-
-	spin_lock_init(&rxq->lock);
-	INIT_LIST_HEAD(&rxq->rx_free);
-	INIT_LIST_HEAD(&rxq->rx_used);
-
-	/* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
-	rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma,
-				     GFP_KERNEL);
-	if (!rxq->bd)
-		goto err_bd;
-
-	rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct iwl_rb_status),
-					  &rxq->rb_stts_dma, GFP_KERNEL);
-	if (!rxq->rb_stts)
-		goto err_rb;
-
-	/* Fill the rx_used queue with _all_ of the Rx buffers */
-	for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
-		list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
-
-	/* Set us so that we have processed and used all buffers, but have
-	 * not restocked the Rx queue with fresh buffers */
-	rxq->read = rxq->write = 0;
-	rxq->write_actual = 0;
-	rxq->free_count = 0;
-	rxq->need_update = 0;
-	return 0;
-
-err_rb:
-	dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
-			  rxq->bd_dma);
-err_bd:
-	return -ENOMEM;
-}
-EXPORT_SYMBOL(iwl_legacy_rx_queue_alloc);
-
-
-void iwl_legacy_rx_spectrum_measure_notif(struct iwl_priv *priv,
-					  struct iwl_rx_mem_buffer *rxb)
-{
-	struct iwl_rx_packet *pkt = rxb_addr(rxb);
-	struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
-
-	if (!report->state) {
-		IWL_DEBUG_11H(priv,
-			"Spectrum Measure Notification: Start\n");
-		return;
-	}
-
-	memcpy(&priv->measure_report, report, sizeof(*report));
-	priv->measurement_status |= MEASUREMENT_READY;
-}
-EXPORT_SYMBOL(iwl_legacy_rx_spectrum_measure_notif);
-
-/*
- * returns non-zero if packet should be dropped
- */
-int iwl_legacy_set_decrypted_flag(struct iwl_priv *priv,
-			   struct ieee80211_hdr *hdr,
-			   u32 decrypt_res,
-			   struct ieee80211_rx_status *stats)
-{
-	u16 fc = le16_to_cpu(hdr->frame_control);
-
-	/*
-	 * All contexts have the same setting here due to it being
-	 * a module parameter, so OK to check any context.
-	 */
-	if (priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags &
-						RXON_FILTER_DIS_DECRYPT_MSK)
-		return 0;
-
-	if (!(fc & IEEE80211_FCTL_PROTECTED))
-		return 0;
-
-	IWL_DEBUG_RX(priv, "decrypt_res:0x%x\n", decrypt_res);
-	switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
-	case RX_RES_STATUS_SEC_TYPE_TKIP:
-		/* The uCode has got a bad phase 1 Key, pushes the packet.
-		 * Decryption will be done in SW. */
-		if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
-		    RX_RES_STATUS_BAD_KEY_TTAK)
-			break;
-
-	case RX_RES_STATUS_SEC_TYPE_WEP:
-		if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
-		    RX_RES_STATUS_BAD_ICV_MIC) {
-			/* bad ICV, the packet is destroyed since the
-			 * decryption is inplace, drop it */
-			IWL_DEBUG_RX(priv, "Packet destroyed\n");
-			return -1;
-		}
-	case RX_RES_STATUS_SEC_TYPE_CCMP:
-		if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
-		    RX_RES_STATUS_DECRYPT_OK) {
-			IWL_DEBUG_RX(priv, "hw decrypt successfully!!!\n");
-			stats->flag |= RX_FLAG_DECRYPTED;
-		}
-		break;
-
-	default:
-		break;
-	}
-	return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_set_decrypted_flag);
diff --git a/drivers/net/wireless/iwlegacy/iwl-scan.c b/drivers/net/wireless/iwlegacy/iwl-scan.c
deleted file mode 100644
index 521b73b..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-scan.c
+++ /dev/null
@@ -1,550 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *****************************************************************************/
-#include <linux/slab.h>
-#include <linux/types.h>
-#include <linux/etherdevice.h>
-#include <linux/export.h>
-#include <net/mac80211.h>
-
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-sta.h"
-#include "iwl-io.h"
-#include "iwl-helpers.h"
-
-/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
- * sending probe req.  This should be set long enough to hear probe responses
- * from more than one AP.  */
-#define IWL_ACTIVE_DWELL_TIME_24    (30)       /* all times in msec */
-#define IWL_ACTIVE_DWELL_TIME_52    (20)
-
-#define IWL_ACTIVE_DWELL_FACTOR_24GHZ (3)
-#define IWL_ACTIVE_DWELL_FACTOR_52GHZ (2)
-
-/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel.
- * Must be set longer than active dwell time.
- * For the most reliable scan, set > AP beacon interval (typically 100msec). */
-#define IWL_PASSIVE_DWELL_TIME_24   (20)       /* all times in msec */
-#define IWL_PASSIVE_DWELL_TIME_52   (10)
-#define IWL_PASSIVE_DWELL_BASE      (100)
-#define IWL_CHANNEL_TUNE_TIME       5
-
-static int iwl_legacy_send_scan_abort(struct iwl_priv *priv)
-{
-	int ret;
-	struct iwl_rx_packet *pkt;
-	struct iwl_host_cmd cmd = {
-		.id = REPLY_SCAN_ABORT_CMD,
-		.flags = CMD_WANT_SKB,
-	};
-
-	/* Exit instantly with error when device is not ready
-	 * to receive scan abort command or it does not perform
-	 * hardware scan currently */
-	if (!test_bit(STATUS_READY, &priv->status) ||
-	    !test_bit(STATUS_GEO_CONFIGURED, &priv->status) ||
-	    !test_bit(STATUS_SCAN_HW, &priv->status) ||
-	    test_bit(STATUS_FW_ERROR, &priv->status) ||
-	    test_bit(STATUS_EXIT_PENDING, &priv->status))
-		return -EIO;
-
-	ret = iwl_legacy_send_cmd_sync(priv, &cmd);
-	if (ret)
-		return ret;
-
-	pkt = (struct iwl_rx_packet *)cmd.reply_page;
-	if (pkt->u.status != CAN_ABORT_STATUS) {
-		/* The scan abort will return 1 for success or
-		 * 2 for "failure".  A failure condition can be
-		 * due to simply not being in an active scan which
-		 * can occur if we send the scan abort before we
-		 * the microcode has notified us that a scan is
-		 * completed. */
-		IWL_DEBUG_SCAN(priv, "SCAN_ABORT ret %d.\n", pkt->u.status);
-		ret = -EIO;
-	}
-
-	iwl_legacy_free_pages(priv, cmd.reply_page);
-	return ret;
-}
-
-static void iwl_legacy_complete_scan(struct iwl_priv *priv, bool aborted)
-{
-	/* check if scan was requested from mac80211 */
-	if (priv->scan_request) {
-		IWL_DEBUG_SCAN(priv, "Complete scan in mac80211\n");
-		ieee80211_scan_completed(priv->hw, aborted);
-	}
-
-	priv->scan_vif = NULL;
-	priv->scan_request = NULL;
-}
-
-void iwl_legacy_force_scan_end(struct iwl_priv *priv)
-{
-	lockdep_assert_held(&priv->mutex);
-
-	if (!test_bit(STATUS_SCANNING, &priv->status)) {
-		IWL_DEBUG_SCAN(priv, "Forcing scan end while not scanning\n");
-		return;
-	}
-
-	IWL_DEBUG_SCAN(priv, "Forcing scan end\n");
-	clear_bit(STATUS_SCANNING, &priv->status);
-	clear_bit(STATUS_SCAN_HW, &priv->status);
-	clear_bit(STATUS_SCAN_ABORTING, &priv->status);
-	iwl_legacy_complete_scan(priv, true);
-}
-
-static void iwl_legacy_do_scan_abort(struct iwl_priv *priv)
-{
-	int ret;
-
-	lockdep_assert_held(&priv->mutex);
-
-	if (!test_bit(STATUS_SCANNING, &priv->status)) {
-		IWL_DEBUG_SCAN(priv, "Not performing scan to abort\n");
-		return;
-	}
-
-	if (test_and_set_bit(STATUS_SCAN_ABORTING, &priv->status)) {
-		IWL_DEBUG_SCAN(priv, "Scan abort in progress\n");
-		return;
-	}
-
-	ret = iwl_legacy_send_scan_abort(priv);
-	if (ret) {
-		IWL_DEBUG_SCAN(priv, "Send scan abort failed %d\n", ret);
-		iwl_legacy_force_scan_end(priv);
-	} else
-		IWL_DEBUG_SCAN(priv, "Successfully send scan abort\n");
-}
-
-/**
- * iwl_scan_cancel - Cancel any currently executing HW scan
- */
-int iwl_legacy_scan_cancel(struct iwl_priv *priv)
-{
-	IWL_DEBUG_SCAN(priv, "Queuing abort scan\n");
-	queue_work(priv->workqueue, &priv->abort_scan);
-	return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_scan_cancel);
-
-/**
- * iwl_legacy_scan_cancel_timeout - Cancel any currently executing HW scan
- * @ms: amount of time to wait (in milliseconds) for scan to abort
- *
- */
-int iwl_legacy_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms)
-{
-	unsigned long timeout = jiffies + msecs_to_jiffies(ms);
-
-	lockdep_assert_held(&priv->mutex);
-
-	IWL_DEBUG_SCAN(priv, "Scan cancel timeout\n");
-
-	iwl_legacy_do_scan_abort(priv);
-
-	while (time_before_eq(jiffies, timeout)) {
-		if (!test_bit(STATUS_SCAN_HW, &priv->status))
-			break;
-		msleep(20);
-	}
-
-	return test_bit(STATUS_SCAN_HW, &priv->status);
-}
-EXPORT_SYMBOL(iwl_legacy_scan_cancel_timeout);
-
-/* Service response to REPLY_SCAN_CMD (0x80) */
-static void iwl_legacy_rx_reply_scan(struct iwl_priv *priv,
-			      struct iwl_rx_mem_buffer *rxb)
-{
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-	struct iwl_rx_packet *pkt = rxb_addr(rxb);
-	struct iwl_scanreq_notification *notif =
-	    (struct iwl_scanreq_notification *)pkt->u.raw;
-
-	IWL_DEBUG_SCAN(priv, "Scan request status = 0x%x\n", notif->status);
-#endif
-}
-
-/* Service SCAN_START_NOTIFICATION (0x82) */
-static void iwl_legacy_rx_scan_start_notif(struct iwl_priv *priv,
-				    struct iwl_rx_mem_buffer *rxb)
-{
-	struct iwl_rx_packet *pkt = rxb_addr(rxb);
-	struct iwl_scanstart_notification *notif =
-	    (struct iwl_scanstart_notification *)pkt->u.raw;
-	priv->scan_start_tsf = le32_to_cpu(notif->tsf_low);
-	IWL_DEBUG_SCAN(priv, "Scan start: "
-		       "%d [802.11%s] "
-		       "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n",
-		       notif->channel,
-		       notif->band ? "bg" : "a",
-		       le32_to_cpu(notif->tsf_high),
-		       le32_to_cpu(notif->tsf_low),
-		       notif->status, notif->beacon_timer);
-}
-
-/* Service SCAN_RESULTS_NOTIFICATION (0x83) */
-static void iwl_legacy_rx_scan_results_notif(struct iwl_priv *priv,
-				      struct iwl_rx_mem_buffer *rxb)
-{
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-	struct iwl_rx_packet *pkt = rxb_addr(rxb);
-	struct iwl_scanresults_notification *notif =
-	    (struct iwl_scanresults_notification *)pkt->u.raw;
-
-	IWL_DEBUG_SCAN(priv, "Scan ch.res: "
-		       "%d [802.11%s] "
-		       "(TSF: 0x%08X:%08X) - %d "
-		       "elapsed=%lu usec\n",
-		       notif->channel,
-		       notif->band ? "bg" : "a",
-		       le32_to_cpu(notif->tsf_high),
-		       le32_to_cpu(notif->tsf_low),
-		       le32_to_cpu(notif->statistics[0]),
-		       le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf);
-#endif
-}
-
-/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
-static void iwl_legacy_rx_scan_complete_notif(struct iwl_priv *priv,
-				       struct iwl_rx_mem_buffer *rxb)
-{
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-	struct iwl_rx_packet *pkt = rxb_addr(rxb);
-	struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
-#endif
-
-	IWL_DEBUG_SCAN(priv,
-			"Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
-		       scan_notif->scanned_channels,
-		       scan_notif->tsf_low,
-		       scan_notif->tsf_high, scan_notif->status);
-
-	/* The HW is no longer scanning */
-	clear_bit(STATUS_SCAN_HW, &priv->status);
-
-	IWL_DEBUG_SCAN(priv, "Scan on %sGHz took %dms\n",
-		       (priv->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2",
-		       jiffies_to_msecs(jiffies - priv->scan_start));
-
-	queue_work(priv->workqueue, &priv->scan_completed);
-}
-
-void iwl_legacy_setup_rx_scan_handlers(struct iwl_priv *priv)
-{
-	/* scan handlers */
-	priv->rx_handlers[REPLY_SCAN_CMD] = iwl_legacy_rx_reply_scan;
-	priv->rx_handlers[SCAN_START_NOTIFICATION] =
-					iwl_legacy_rx_scan_start_notif;
-	priv->rx_handlers[SCAN_RESULTS_NOTIFICATION] =
-					iwl_legacy_rx_scan_results_notif;
-	priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] =
-					iwl_legacy_rx_scan_complete_notif;
-}
-EXPORT_SYMBOL(iwl_legacy_setup_rx_scan_handlers);
-
-inline u16 iwl_legacy_get_active_dwell_time(struct iwl_priv *priv,
-				     enum ieee80211_band band,
-				     u8 n_probes)
-{
-	if (band == IEEE80211_BAND_5GHZ)
-		return IWL_ACTIVE_DWELL_TIME_52 +
-			IWL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1);
-	else
-		return IWL_ACTIVE_DWELL_TIME_24 +
-			IWL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1);
-}
-EXPORT_SYMBOL(iwl_legacy_get_active_dwell_time);
-
-u16 iwl_legacy_get_passive_dwell_time(struct iwl_priv *priv,
-			       enum ieee80211_band band,
-			       struct ieee80211_vif *vif)
-{
-	struct iwl_rxon_context *ctx;
-	u16 passive = (band == IEEE80211_BAND_2GHZ) ?
-	    IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 :
-	    IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52;
-
-	if (iwl_legacy_is_any_associated(priv)) {
-		/*
-		 * If we're associated, we clamp the maximum passive
-		 * dwell time to be 98% of the smallest beacon interval
-		 * (minus 2 * channel tune time)
-		 */
-		for_each_context(priv, ctx) {
-			u16 value;
-
-			if (!iwl_legacy_is_associated_ctx(ctx))
-				continue;
-			value = ctx->vif ? ctx->vif->bss_conf.beacon_int : 0;
-			if ((value > IWL_PASSIVE_DWELL_BASE) || !value)
-				value = IWL_PASSIVE_DWELL_BASE;
-			value = (value * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
-			passive = min(value, passive);
-		}
-	}
-
-	return passive;
-}
-EXPORT_SYMBOL(iwl_legacy_get_passive_dwell_time);
-
-void iwl_legacy_init_scan_params(struct iwl_priv *priv)
-{
-	u8 ant_idx = fls(priv->hw_params.valid_tx_ant) - 1;
-	if (!priv->scan_tx_ant[IEEE80211_BAND_5GHZ])
-		priv->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx;
-	if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ])
-		priv->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx;
-}
-EXPORT_SYMBOL(iwl_legacy_init_scan_params);
-
-static int iwl_legacy_scan_initiate(struct iwl_priv *priv,
-				    struct ieee80211_vif *vif)
-{
-	int ret;
-
-	lockdep_assert_held(&priv->mutex);
-
-	if (WARN_ON(!priv->cfg->ops->utils->request_scan))
-		return -EOPNOTSUPP;
-
-	cancel_delayed_work(&priv->scan_check);
-
-	if (!iwl_legacy_is_ready_rf(priv)) {
-		IWL_WARN(priv, "Request scan called when driver not ready.\n");
-		return -EIO;
-	}
-
-	if (test_bit(STATUS_SCAN_HW, &priv->status)) {
-		IWL_DEBUG_SCAN(priv,
-			"Multiple concurrent scan requests in parallel.\n");
-		return -EBUSY;
-	}
-
-	if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
-		IWL_DEBUG_SCAN(priv, "Scan request while abort pending.\n");
-		return -EBUSY;
-	}
-
-	IWL_DEBUG_SCAN(priv, "Starting scan...\n");
-
-	set_bit(STATUS_SCANNING, &priv->status);
-	priv->scan_start = jiffies;
-
-	ret = priv->cfg->ops->utils->request_scan(priv, vif);
-	if (ret) {
-		clear_bit(STATUS_SCANNING, &priv->status);
-		return ret;
-	}
-
-	queue_delayed_work(priv->workqueue, &priv->scan_check,
-			   IWL_SCAN_CHECK_WATCHDOG);
-
-	return 0;
-}
-
-int iwl_legacy_mac_hw_scan(struct ieee80211_hw *hw,
-		    struct ieee80211_vif *vif,
-		    struct cfg80211_scan_request *req)
-{
-	struct iwl_priv *priv = hw->priv;
-	int ret;
-
-	IWL_DEBUG_MAC80211(priv, "enter\n");
-
-	if (req->n_channels == 0)
-		return -EINVAL;
-
-	mutex_lock(&priv->mutex);
-
-	if (test_bit(STATUS_SCANNING, &priv->status)) {
-		IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
-		ret = -EAGAIN;
-		goto out_unlock;
-	}
-
-	/* mac80211 will only ask for one band at a time */
-	priv->scan_request = req;
-	priv->scan_vif = vif;
-	priv->scan_band = req->channels[0]->band;
-
-	ret = iwl_legacy_scan_initiate(priv, vif);
-
-	IWL_DEBUG_MAC80211(priv, "leave\n");
-
-out_unlock:
-	mutex_unlock(&priv->mutex);
-
-	return ret;
-}
-EXPORT_SYMBOL(iwl_legacy_mac_hw_scan);
-
-static void iwl_legacy_bg_scan_check(struct work_struct *data)
-{
-	struct iwl_priv *priv =
-	    container_of(data, struct iwl_priv, scan_check.work);
-
-	IWL_DEBUG_SCAN(priv, "Scan check work\n");
-
-	/* Since we are here firmware does not finish scan and
-	 * most likely is in bad shape, so we don't bother to
-	 * send abort command, just force scan complete to mac80211 */
-	mutex_lock(&priv->mutex);
-	iwl_legacy_force_scan_end(priv);
-	mutex_unlock(&priv->mutex);
-}
-
-/**
- * iwl_legacy_fill_probe_req - fill in all required fields and IE for probe request
- */
-
-u16
-iwl_legacy_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
-		       const u8 *ta, const u8 *ies, int ie_len, int left)
-{
-	int len = 0;
-	u8 *pos = NULL;
-
-	/* Make sure there is enough space for the probe request,
-	 * two mandatory IEs and the data */
-	left -= 24;
-	if (left < 0)
-		return 0;
-
-	frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
-	memcpy(frame->da, iwlegacy_bcast_addr, ETH_ALEN);
-	memcpy(frame->sa, ta, ETH_ALEN);
-	memcpy(frame->bssid, iwlegacy_bcast_addr, ETH_ALEN);
-	frame->seq_ctrl = 0;
-
-	len += 24;
-
-	/* ...next IE... */
-	pos = &frame->u.probe_req.variable[0];
-
-	/* fill in our indirect SSID IE */
-	left -= 2;
-	if (left < 0)
-		return 0;
-	*pos++ = WLAN_EID_SSID;
-	*pos++ = 0;
-
-	len += 2;
-
-	if (WARN_ON(left < ie_len))
-		return len;
-
-	if (ies && ie_len) {
-		memcpy(pos, ies, ie_len);
-		len += ie_len;
-	}
-
-	return (u16)len;
-}
-EXPORT_SYMBOL(iwl_legacy_fill_probe_req);
-
-static void iwl_legacy_bg_abort_scan(struct work_struct *work)
-{
-	struct iwl_priv *priv = container_of(work, struct iwl_priv, abort_scan);
-
-	IWL_DEBUG_SCAN(priv, "Abort scan work\n");
-
-	/* We keep scan_check work queued in case when firmware will not
-	 * report back scan completed notification */
-	mutex_lock(&priv->mutex);
-	iwl_legacy_scan_cancel_timeout(priv, 200);
-	mutex_unlock(&priv->mutex);
-}
-
-static void iwl_legacy_bg_scan_completed(struct work_struct *work)
-{
-	struct iwl_priv *priv =
-	    container_of(work, struct iwl_priv, scan_completed);
-	bool aborted;
-
-	IWL_DEBUG_SCAN(priv, "Completed scan.\n");
-
-	cancel_delayed_work(&priv->scan_check);
-
-	mutex_lock(&priv->mutex);
-
-	aborted = test_and_clear_bit(STATUS_SCAN_ABORTING, &priv->status);
-	if (aborted)
-		IWL_DEBUG_SCAN(priv, "Aborted scan completed.\n");
-
-	if (!test_and_clear_bit(STATUS_SCANNING, &priv->status)) {
-		IWL_DEBUG_SCAN(priv, "Scan already completed.\n");
-		goto out_settings;
-	}
-
-	iwl_legacy_complete_scan(priv, aborted);
-
-out_settings:
-	/* Can we still talk to firmware ? */
-	if (!iwl_legacy_is_ready_rf(priv))
-		goto out;
-
-	/*
-	 * We do not commit power settings while scan is pending,
-	 * do it now if the settings changed.
-	 */
-	iwl_legacy_power_set_mode(priv, &priv->power_data.sleep_cmd_next, false);
-	iwl_legacy_set_tx_power(priv, priv->tx_power_next, false);
-
-	priv->cfg->ops->utils->post_scan(priv);
-
-out:
-	mutex_unlock(&priv->mutex);
-}
-
-void iwl_legacy_setup_scan_deferred_work(struct iwl_priv *priv)
-{
-	INIT_WORK(&priv->scan_completed, iwl_legacy_bg_scan_completed);
-	INIT_WORK(&priv->abort_scan, iwl_legacy_bg_abort_scan);
-	INIT_DELAYED_WORK(&priv->scan_check, iwl_legacy_bg_scan_check);
-}
-EXPORT_SYMBOL(iwl_legacy_setup_scan_deferred_work);
-
-void iwl_legacy_cancel_scan_deferred_work(struct iwl_priv *priv)
-{
-	cancel_work_sync(&priv->abort_scan);
-	cancel_work_sync(&priv->scan_completed);
-
-	if (cancel_delayed_work_sync(&priv->scan_check)) {
-		mutex_lock(&priv->mutex);
-		iwl_legacy_force_scan_end(priv);
-		mutex_unlock(&priv->mutex);
-	}
-}
-EXPORT_SYMBOL(iwl_legacy_cancel_scan_deferred_work);
diff --git a/drivers/net/wireless/iwlegacy/iwl-spectrum.h b/drivers/net/wireless/iwlegacy/iwl-spectrum.h
index 9f70a47..85fe48e 100644
--- a/drivers/net/wireless/iwlegacy/iwl-spectrum.h
+++ b/drivers/net/wireless/iwlegacy/iwl-spectrum.h
@@ -26,8 +26,8 @@
  *
  *****************************************************************************/
 
-#ifndef __iwl_legacy_spectrum_h__
-#define __iwl_legacy_spectrum_h__
+#ifndef __il_spectrum_h__
+#define __il_spectrum_h__
 enum {				/* ieee80211_basic_report.map */
 	IEEE80211_BASIC_MAP_BSS = (1 << 0),
 	IEEE80211_BASIC_MAP_OFDM = (1 << 1),
diff --git a/drivers/net/wireless/iwlegacy/iwl-sta.c b/drivers/net/wireless/iwlegacy/iwl-sta.c
deleted file mode 100644
index f10df3e..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-sta.c
+++ /dev/null
@@ -1,817 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <net/mac80211.h>
-#include <linux/etherdevice.h>
-#include <linux/sched.h>
-#include <linux/lockdep.h>
-#include <linux/export.h>
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-sta.h"
-
-/* priv->sta_lock must be held */
-static void iwl_legacy_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
-{
-
-	if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE))
-		IWL_ERR(priv,
-			"ACTIVATE a non DRIVER active station id %u addr %pM\n",
-			sta_id, priv->stations[sta_id].sta.sta.addr);
-
-	if (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) {
-		IWL_DEBUG_ASSOC(priv,
-			"STA id %u addr %pM already present"
-			" in uCode (according to driver)\n",
-			sta_id, priv->stations[sta_id].sta.sta.addr);
-	} else {
-		priv->stations[sta_id].used |= IWL_STA_UCODE_ACTIVE;
-		IWL_DEBUG_ASSOC(priv, "Added STA id %u addr %pM to uCode\n",
-				sta_id, priv->stations[sta_id].sta.sta.addr);
-	}
-}
-
-static int iwl_legacy_process_add_sta_resp(struct iwl_priv *priv,
-				    struct iwl_legacy_addsta_cmd *addsta,
-				    struct iwl_rx_packet *pkt,
-				    bool sync)
-{
-	u8 sta_id = addsta->sta.sta_id;
-	unsigned long flags;
-	int ret = -EIO;
-
-	if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
-		IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n",
-			pkt->hdr.flags);
-		return ret;
-	}
-
-	IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n",
-		       sta_id);
-
-	spin_lock_irqsave(&priv->sta_lock, flags);
-
-	switch (pkt->u.add_sta.status) {
-	case ADD_STA_SUCCESS_MSK:
-		IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n");
-		iwl_legacy_sta_ucode_activate(priv, sta_id);
-		ret = 0;
-		break;
-	case ADD_STA_NO_ROOM_IN_TABLE:
-		IWL_ERR(priv, "Adding station %d failed, no room in table.\n",
-			sta_id);
-		break;
-	case ADD_STA_NO_BLOCK_ACK_RESOURCE:
-		IWL_ERR(priv,
-			"Adding station %d failed, no block ack resource.\n",
-			sta_id);
-		break;
-	case ADD_STA_MODIFY_NON_EXIST_STA:
-		IWL_ERR(priv, "Attempting to modify non-existing station %d\n",
-			sta_id);
-		break;
-	default:
-		IWL_DEBUG_ASSOC(priv, "Received REPLY_ADD_STA:(0x%08X)\n",
-				pkt->u.add_sta.status);
-		break;
-	}
-
-	IWL_DEBUG_INFO(priv, "%s station id %u addr %pM\n",
-		       priv->stations[sta_id].sta.mode ==
-		       STA_CONTROL_MODIFY_MSK ?  "Modified" : "Added",
-		       sta_id, priv->stations[sta_id].sta.sta.addr);
-
-	/*
-	 * XXX: The MAC address in the command buffer is often changed from
-	 * the original sent to the device. That is, the MAC address
-	 * written to the command buffer often is not the same MAC address
-	 * read from the command buffer when the command returns. This
-	 * issue has not yet been resolved and this debugging is left to
-	 * observe the problem.
-	 */
-	IWL_DEBUG_INFO(priv, "%s station according to cmd buffer %pM\n",
-		       priv->stations[sta_id].sta.mode ==
-		       STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
-		       addsta->sta.addr);
-	spin_unlock_irqrestore(&priv->sta_lock, flags);
-
-	return ret;
-}
-
-static void iwl_legacy_add_sta_callback(struct iwl_priv *priv,
-				 struct iwl_device_cmd *cmd,
-				 struct iwl_rx_packet *pkt)
-{
-	struct iwl_legacy_addsta_cmd *addsta =
-		(struct iwl_legacy_addsta_cmd *)cmd->cmd.payload;
-
-	iwl_legacy_process_add_sta_resp(priv, addsta, pkt, false);
-
-}
-
-int iwl_legacy_send_add_sta(struct iwl_priv *priv,
-		     struct iwl_legacy_addsta_cmd *sta, u8 flags)
-{
-	struct iwl_rx_packet *pkt = NULL;
-	int ret = 0;
-	u8 data[sizeof(*sta)];
-	struct iwl_host_cmd cmd = {
-		.id = REPLY_ADD_STA,
-		.flags = flags,
-		.data = data,
-	};
-	u8 sta_id __maybe_unused = sta->sta.sta_id;
-
-	IWL_DEBUG_INFO(priv, "Adding sta %u (%pM) %ssynchronously\n",
-		       sta_id, sta->sta.addr, flags & CMD_ASYNC ?  "a" : "");
-
-	if (flags & CMD_ASYNC)
-		cmd.callback = iwl_legacy_add_sta_callback;
-	else {
-		cmd.flags |= CMD_WANT_SKB;
-		might_sleep();
-	}
-
-	cmd.len = priv->cfg->ops->utils->build_addsta_hcmd(sta, data);
-	ret = iwl_legacy_send_cmd(priv, &cmd);
-
-	if (ret || (flags & CMD_ASYNC))
-		return ret;
-
-	if (ret == 0) {
-		pkt = (struct iwl_rx_packet *)cmd.reply_page;
-		ret = iwl_legacy_process_add_sta_resp(priv, sta, pkt, true);
-	}
-	iwl_legacy_free_pages(priv, cmd.reply_page);
-
-	return ret;
-}
-EXPORT_SYMBOL(iwl_legacy_send_add_sta);
-
-static void iwl_legacy_set_ht_add_station(struct iwl_priv *priv, u8 index,
-				   struct ieee80211_sta *sta,
-				   struct iwl_rxon_context *ctx)
-{
-	struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap;
-	__le32 sta_flags;
-	u8 mimo_ps_mode;
-
-	if (!sta || !sta_ht_inf->ht_supported)
-		goto done;
-
-	mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2;
-	IWL_DEBUG_ASSOC(priv, "spatial multiplexing power save mode: %s\n",
-			(mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ?
-			"static" :
-			(mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ?
-			"dynamic" : "disabled");
-
-	sta_flags = priv->stations[index].sta.station_flags;
-
-	sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
-
-	switch (mimo_ps_mode) {
-	case WLAN_HT_CAP_SM_PS_STATIC:
-		sta_flags |= STA_FLG_MIMO_DIS_MSK;
-		break;
-	case WLAN_HT_CAP_SM_PS_DYNAMIC:
-		sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
-		break;
-	case WLAN_HT_CAP_SM_PS_DISABLED:
-		break;
-	default:
-		IWL_WARN(priv, "Invalid MIMO PS mode %d\n", mimo_ps_mode);
-		break;
-	}
-
-	sta_flags |= cpu_to_le32(
-	      (u32)sta_ht_inf->ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
-
-	sta_flags |= cpu_to_le32(
-	      (u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
-
-	if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
-		sta_flags |= STA_FLG_HT40_EN_MSK;
-	else
-		sta_flags &= ~STA_FLG_HT40_EN_MSK;
-
-	priv->stations[index].sta.station_flags = sta_flags;
- done:
-	return;
-}
-
-/**
- * iwl_legacy_prep_station - Prepare station information for addition
- *
- * should be called with sta_lock held
- */
-u8 iwl_legacy_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
-		    const u8 *addr, bool is_ap, struct ieee80211_sta *sta)
-{
-	struct iwl_station_entry *station;
-	int i;
-	u8 sta_id = IWL_INVALID_STATION;
-	u16 rate;
-
-	if (is_ap)
-		sta_id = ctx->ap_sta_id;
-	else if (is_broadcast_ether_addr(addr))
-		sta_id = ctx->bcast_sta_id;
-	else
-		for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++) {
-			if (!compare_ether_addr(priv->stations[i].sta.sta.addr,
-						addr)) {
-				sta_id = i;
-				break;
-			}
-
-			if (!priv->stations[i].used &&
-			    sta_id == IWL_INVALID_STATION)
-				sta_id = i;
-		}
-
-	/*
-	 * These two conditions have the same outcome, but keep them
-	 * separate
-	 */
-	if (unlikely(sta_id == IWL_INVALID_STATION))
-		return sta_id;
-
-	/*
-	 * uCode is not able to deal with multiple requests to add a
-	 * station. Keep track if one is in progress so that we do not send
-	 * another.
-	 */
-	if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) {
-		IWL_DEBUG_INFO(priv,
-				"STA %d already in process of being added.\n",
-				sta_id);
-		return sta_id;
-	}
-
-	if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) &&
-	    (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) &&
-	    !compare_ether_addr(priv->stations[sta_id].sta.sta.addr, addr)) {
-		IWL_DEBUG_ASSOC(priv,
-				"STA %d (%pM) already added, not adding again.\n",
-				sta_id, addr);
-		return sta_id;
-	}
-
-	station = &priv->stations[sta_id];
-	station->used = IWL_STA_DRIVER_ACTIVE;
-	IWL_DEBUG_ASSOC(priv, "Add STA to driver ID %d: %pM\n",
-			sta_id, addr);
-	priv->num_stations++;
-
-	/* Set up the REPLY_ADD_STA command to send to device */
-	memset(&station->sta, 0, sizeof(struct iwl_legacy_addsta_cmd));
-	memcpy(station->sta.sta.addr, addr, ETH_ALEN);
-	station->sta.mode = 0;
-	station->sta.sta.sta_id = sta_id;
-	station->sta.station_flags = ctx->station_flags;
-	station->ctxid = ctx->ctxid;
-
-	if (sta) {
-		struct iwl_station_priv_common *sta_priv;
-
-		sta_priv = (void *)sta->drv_priv;
-		sta_priv->ctx = ctx;
-	}
-
-	/*
-	 * OK to call unconditionally, since local stations (IBSS BSSID
-	 * STA and broadcast STA) pass in a NULL sta, and mac80211
-	 * doesn't allow HT IBSS.
-	 */
-	iwl_legacy_set_ht_add_station(priv, sta_id, sta, ctx);
-
-	/* 3945 only */
-	rate = (priv->band == IEEE80211_BAND_5GHZ) ?
-		IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP;
-	/* Turn on both antennas for the station... */
-	station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK);
-
-	return sta_id;
-
-}
-EXPORT_SYMBOL_GPL(iwl_legacy_prep_station);
-
-#define STA_WAIT_TIMEOUT (HZ/2)
-
-/**
- * iwl_legacy_add_station_common -
- */
-int
-iwl_legacy_add_station_common(struct iwl_priv *priv,
-			struct iwl_rxon_context *ctx,
-			   const u8 *addr, bool is_ap,
-			   struct ieee80211_sta *sta, u8 *sta_id_r)
-{
-	unsigned long flags_spin;
-	int ret = 0;
-	u8 sta_id;
-	struct iwl_legacy_addsta_cmd sta_cmd;
-
-	*sta_id_r = 0;
-	spin_lock_irqsave(&priv->sta_lock, flags_spin);
-	sta_id = iwl_legacy_prep_station(priv, ctx, addr, is_ap, sta);
-	if (sta_id == IWL_INVALID_STATION) {
-		IWL_ERR(priv, "Unable to prepare station %pM for addition\n",
-			addr);
-		spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
-		return -EINVAL;
-	}
-
-	/*
-	 * uCode is not able to deal with multiple requests to add a
-	 * station. Keep track if one is in progress so that we do not send
-	 * another.
-	 */
-	if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) {
-		IWL_DEBUG_INFO(priv,
-			"STA %d already in process of being added.\n",
-		       sta_id);
-		spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
-		return -EEXIST;
-	}
-
-	if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) &&
-	    (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
-		IWL_DEBUG_ASSOC(priv,
-			"STA %d (%pM) already added, not adding again.\n",
-			sta_id, addr);
-		spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
-		return -EEXIST;
-	}
-
-	priv->stations[sta_id].used |= IWL_STA_UCODE_INPROGRESS;
-	memcpy(&sta_cmd, &priv->stations[sta_id].sta,
-				sizeof(struct iwl_legacy_addsta_cmd));
-	spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
-
-	/* Add station to device's station table */
-	ret = iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
-	if (ret) {
-		spin_lock_irqsave(&priv->sta_lock, flags_spin);
-		IWL_ERR(priv, "Adding station %pM failed.\n",
-			priv->stations[sta_id].sta.sta.addr);
-		priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
-		priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
-		spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
-	}
-	*sta_id_r = sta_id;
-	return ret;
-}
-EXPORT_SYMBOL(iwl_legacy_add_station_common);
-
-/**
- * iwl_legacy_sta_ucode_deactivate - deactivate ucode status for a station
- *
- * priv->sta_lock must be held
- */
-static void iwl_legacy_sta_ucode_deactivate(struct iwl_priv *priv, u8 sta_id)
-{
-	/* Ucode must be active and driver must be non active */
-	if ((priv->stations[sta_id].used &
-	     (IWL_STA_UCODE_ACTIVE | IWL_STA_DRIVER_ACTIVE)) !=
-						IWL_STA_UCODE_ACTIVE)
-		IWL_ERR(priv, "removed non active STA %u\n", sta_id);
-
-	priv->stations[sta_id].used &= ~IWL_STA_UCODE_ACTIVE;
-
-	memset(&priv->stations[sta_id], 0, sizeof(struct iwl_station_entry));
-	IWL_DEBUG_ASSOC(priv, "Removed STA %u\n", sta_id);
-}
-
-static int iwl_legacy_send_remove_station(struct iwl_priv *priv,
-				   const u8 *addr, int sta_id,
-				   bool temporary)
-{
-	struct iwl_rx_packet *pkt;
-	int ret;
-
-	unsigned long flags_spin;
-	struct iwl_rem_sta_cmd rm_sta_cmd;
-
-	struct iwl_host_cmd cmd = {
-		.id = REPLY_REMOVE_STA,
-		.len = sizeof(struct iwl_rem_sta_cmd),
-		.flags = CMD_SYNC,
-		.data = &rm_sta_cmd,
-	};
-
-	memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
-	rm_sta_cmd.num_sta = 1;
-	memcpy(&rm_sta_cmd.addr, addr, ETH_ALEN);
-
-	cmd.flags |= CMD_WANT_SKB;
-
-	ret = iwl_legacy_send_cmd(priv, &cmd);
-
-	if (ret)
-		return ret;
-
-	pkt = (struct iwl_rx_packet *)cmd.reply_page;
-	if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
-		IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n",
-			  pkt->hdr.flags);
-		ret = -EIO;
-	}
-
-	if (!ret) {
-		switch (pkt->u.rem_sta.status) {
-		case REM_STA_SUCCESS_MSK:
-			if (!temporary) {
-				spin_lock_irqsave(&priv->sta_lock, flags_spin);
-				iwl_legacy_sta_ucode_deactivate(priv, sta_id);
-				spin_unlock_irqrestore(&priv->sta_lock,
-								flags_spin);
-			}
-			IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n");
-			break;
-		default:
-			ret = -EIO;
-			IWL_ERR(priv, "REPLY_REMOVE_STA failed\n");
-			break;
-		}
-	}
-	iwl_legacy_free_pages(priv, cmd.reply_page);
-
-	return ret;
-}
-
-/**
- * iwl_legacy_remove_station - Remove driver's knowledge of station.
- */
-int iwl_legacy_remove_station(struct iwl_priv *priv, const u8 sta_id,
-		       const u8 *addr)
-{
-	unsigned long flags;
-
-	if (!iwl_legacy_is_ready(priv)) {
-		IWL_DEBUG_INFO(priv,
-			"Unable to remove station %pM, device not ready.\n",
-			addr);
-		/*
-		 * It is typical for stations to be removed when we are
-		 * going down. Return success since device will be down
-		 * soon anyway
-		 */
-		return 0;
-	}
-
-	IWL_DEBUG_ASSOC(priv, "Removing STA from driver:%d  %pM\n",
-			sta_id, addr);
-
-	if (WARN_ON(sta_id == IWL_INVALID_STATION))
-		return -EINVAL;
-
-	spin_lock_irqsave(&priv->sta_lock, flags);
-
-	if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
-		IWL_DEBUG_INFO(priv, "Removing %pM but non DRIVER active\n",
-				addr);
-		goto out_err;
-	}
-
-	if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
-		IWL_DEBUG_INFO(priv, "Removing %pM but non UCODE active\n",
-				addr);
-		goto out_err;
-	}
-
-	if (priv->stations[sta_id].used & IWL_STA_LOCAL) {
-		kfree(priv->stations[sta_id].lq);
-		priv->stations[sta_id].lq = NULL;
-	}
-
-	priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
-
-	priv->num_stations--;
-
-	BUG_ON(priv->num_stations < 0);
-
-	spin_unlock_irqrestore(&priv->sta_lock, flags);
-
-	return iwl_legacy_send_remove_station(priv, addr, sta_id, false);
-out_err:
-	spin_unlock_irqrestore(&priv->sta_lock, flags);
-	return -EINVAL;
-}
-EXPORT_SYMBOL_GPL(iwl_legacy_remove_station);
-
-/**
- * iwl_legacy_clear_ucode_stations - clear ucode station table bits
- *
- * This function clears all the bits in the driver indicating
- * which stations are active in the ucode. Call when something
- * other than explicit station management would cause this in
- * the ucode, e.g. unassociated RXON.
- */
-void iwl_legacy_clear_ucode_stations(struct iwl_priv *priv,
-			      struct iwl_rxon_context *ctx)
-{
-	int i;
-	unsigned long flags_spin;
-	bool cleared = false;
-
-	IWL_DEBUG_INFO(priv, "Clearing ucode stations in driver\n");
-
-	spin_lock_irqsave(&priv->sta_lock, flags_spin);
-	for (i = 0; i < priv->hw_params.max_stations; i++) {
-		if (ctx && ctx->ctxid != priv->stations[i].ctxid)
-			continue;
-
-		if (priv->stations[i].used & IWL_STA_UCODE_ACTIVE) {
-			IWL_DEBUG_INFO(priv,
-				"Clearing ucode active for station %d\n", i);
-			priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
-			cleared = true;
-		}
-	}
-	spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
-
-	if (!cleared)
-		IWL_DEBUG_INFO(priv,
-			"No active stations found to be cleared\n");
-}
-EXPORT_SYMBOL(iwl_legacy_clear_ucode_stations);
-
-/**
- * iwl_legacy_restore_stations() - Restore driver known stations to device
- *
- * All stations considered active by driver, but not present in ucode, is
- * restored.
- *
- * Function sleeps.
- */
-void
-iwl_legacy_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
-	struct iwl_legacy_addsta_cmd sta_cmd;
-	struct iwl_link_quality_cmd lq;
-	unsigned long flags_spin;
-	int i;
-	bool found = false;
-	int ret;
-	bool send_lq;
-
-	if (!iwl_legacy_is_ready(priv)) {
-		IWL_DEBUG_INFO(priv,
-			"Not ready yet, not restoring any stations.\n");
-		return;
-	}
-
-	IWL_DEBUG_ASSOC(priv, "Restoring all known stations ... start.\n");
-	spin_lock_irqsave(&priv->sta_lock, flags_spin);
-	for (i = 0; i < priv->hw_params.max_stations; i++) {
-		if (ctx->ctxid != priv->stations[i].ctxid)
-			continue;
-		if ((priv->stations[i].used & IWL_STA_DRIVER_ACTIVE) &&
-			    !(priv->stations[i].used & IWL_STA_UCODE_ACTIVE)) {
-			IWL_DEBUG_ASSOC(priv, "Restoring sta %pM\n",
-					priv->stations[i].sta.sta.addr);
-			priv->stations[i].sta.mode = 0;
-			priv->stations[i].used |= IWL_STA_UCODE_INPROGRESS;
-			found = true;
-		}
-	}
-
-	for (i = 0; i < priv->hw_params.max_stations; i++) {
-		if ((priv->stations[i].used & IWL_STA_UCODE_INPROGRESS)) {
-			memcpy(&sta_cmd, &priv->stations[i].sta,
-			       sizeof(struct iwl_legacy_addsta_cmd));
-			send_lq = false;
-			if (priv->stations[i].lq) {
-				memcpy(&lq, priv->stations[i].lq,
-				       sizeof(struct iwl_link_quality_cmd));
-				send_lq = true;
-			}
-			spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
-			ret = iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
-			if (ret) {
-				spin_lock_irqsave(&priv->sta_lock, flags_spin);
-				IWL_ERR(priv, "Adding station %pM failed.\n",
-					priv->stations[i].sta.sta.addr);
-				priv->stations[i].used &=
-						~IWL_STA_DRIVER_ACTIVE;
-				priv->stations[i].used &=
-						~IWL_STA_UCODE_INPROGRESS;
-				spin_unlock_irqrestore(&priv->sta_lock,
-								flags_spin);
-			}
-			/*
-			 * Rate scaling has already been initialized, send
-			 * current LQ command
-			 */
-			if (send_lq)
-				iwl_legacy_send_lq_cmd(priv, ctx, &lq,
-								CMD_SYNC, true);
-			spin_lock_irqsave(&priv->sta_lock, flags_spin);
-			priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS;
-		}
-	}
-
-	spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
-	if (!found)
-		IWL_DEBUG_INFO(priv, "Restoring all known stations"
-				" .... no stations to be restored.\n");
-	else
-		IWL_DEBUG_INFO(priv, "Restoring all known stations"
-				" .... complete.\n");
-}
-EXPORT_SYMBOL(iwl_legacy_restore_stations);
-
-int iwl_legacy_get_free_ucode_key_index(struct iwl_priv *priv)
-{
-	int i;
-
-	for (i = 0; i < priv->sta_key_max_num; i++)
-		if (!test_and_set_bit(i, &priv->ucode_key_table))
-			return i;
-
-	return WEP_INVALID_OFFSET;
-}
-EXPORT_SYMBOL(iwl_legacy_get_free_ucode_key_index);
-
-void iwl_legacy_dealloc_bcast_stations(struct iwl_priv *priv)
-{
-	unsigned long flags;
-	int i;
-
-	spin_lock_irqsave(&priv->sta_lock, flags);
-	for (i = 0; i < priv->hw_params.max_stations; i++) {
-		if (!(priv->stations[i].used & IWL_STA_BCAST))
-			continue;
-
-		priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
-		priv->num_stations--;
-		BUG_ON(priv->num_stations < 0);
-		kfree(priv->stations[i].lq);
-		priv->stations[i].lq = NULL;
-	}
-	spin_unlock_irqrestore(&priv->sta_lock, flags);
-}
-EXPORT_SYMBOL_GPL(iwl_legacy_dealloc_bcast_stations);
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static void iwl_legacy_dump_lq_cmd(struct iwl_priv *priv,
-			   struct iwl_link_quality_cmd *lq)
-{
-	int i;
-	IWL_DEBUG_RATE(priv, "lq station id 0x%x\n", lq->sta_id);
-	IWL_DEBUG_RATE(priv, "lq ant 0x%X 0x%X\n",
-		       lq->general_params.single_stream_ant_msk,
-		       lq->general_params.dual_stream_ant_msk);
-
-	for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
-		IWL_DEBUG_RATE(priv, "lq index %d 0x%X\n",
-			       i, lq->rs_table[i].rate_n_flags);
-}
-#else
-static inline void iwl_legacy_dump_lq_cmd(struct iwl_priv *priv,
-				   struct iwl_link_quality_cmd *lq)
-{
-}
-#endif
-
-/**
- * iwl_legacy_is_lq_table_valid() - Test one aspect of LQ cmd for validity
- *
- * It sometimes happens when a HT rate has been in use and we
- * loose connectivity with AP then mac80211 will first tell us that the
- * current channel is not HT anymore before removing the station. In such a
- * scenario the RXON flags will be updated to indicate we are not
- * communicating HT anymore, but the LQ command may still contain HT rates.
- * Test for this to prevent driver from sending LQ command between the time
- * RXON flags are updated and when LQ command is updated.
- */
-static bool iwl_legacy_is_lq_table_valid(struct iwl_priv *priv,
-			      struct iwl_rxon_context *ctx,
-			      struct iwl_link_quality_cmd *lq)
-{
-	int i;
-
-	if (ctx->ht.enabled)
-		return true;
-
-	IWL_DEBUG_INFO(priv, "Channel %u is not an HT channel\n",
-		       ctx->active.channel);
-	for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
-		if (le32_to_cpu(lq->rs_table[i].rate_n_flags) &
-						RATE_MCS_HT_MSK) {
-			IWL_DEBUG_INFO(priv,
-				       "index %d of LQ expects HT channel\n",
-				       i);
-			return false;
-		}
-	}
-	return true;
-}
-
-/**
- * iwl_legacy_send_lq_cmd() - Send link quality command
- * @init: This command is sent as part of station initialization right
- *        after station has been added.
- *
- * The link quality command is sent as the last step of station creation.
- * This is the special case in which init is set and we call a callback in
- * this case to clear the state indicating that station creation is in
- * progress.
- */
-int iwl_legacy_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
-		    struct iwl_link_quality_cmd *lq, u8 flags, bool init)
-{
-	int ret = 0;
-	unsigned long flags_spin;
-
-	struct iwl_host_cmd cmd = {
-		.id = REPLY_TX_LINK_QUALITY_CMD,
-		.len = sizeof(struct iwl_link_quality_cmd),
-		.flags = flags,
-		.data = lq,
-	};
-
-	if (WARN_ON(lq->sta_id == IWL_INVALID_STATION))
-		return -EINVAL;
-
-
-	spin_lock_irqsave(&priv->sta_lock, flags_spin);
-	if (!(priv->stations[lq->sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
-		spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
-		return -EINVAL;
-	}
-	spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
-
-	iwl_legacy_dump_lq_cmd(priv, lq);
-	BUG_ON(init && (cmd.flags & CMD_ASYNC));
-
-	if (iwl_legacy_is_lq_table_valid(priv, ctx, lq))
-		ret = iwl_legacy_send_cmd(priv, &cmd);
-	else
-		ret = -EINVAL;
-
-	if (cmd.flags & CMD_ASYNC)
-		return ret;
-
-	if (init) {
-		IWL_DEBUG_INFO(priv, "init LQ command complete,"
-				" clearing sta addition status for sta %d\n",
-			       lq->sta_id);
-		spin_lock_irqsave(&priv->sta_lock, flags_spin);
-		priv->stations[lq->sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
-		spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
-	}
-	return ret;
-}
-EXPORT_SYMBOL(iwl_legacy_send_lq_cmd);
-
-int iwl_legacy_mac_sta_remove(struct ieee80211_hw *hw,
-		       struct ieee80211_vif *vif,
-		       struct ieee80211_sta *sta)
-{
-	struct iwl_priv *priv = hw->priv;
-	struct iwl_station_priv_common *sta_common = (void *)sta->drv_priv;
-	int ret;
-
-	IWL_DEBUG_INFO(priv, "received request to remove station %pM\n",
-			sta->addr);
-	mutex_lock(&priv->mutex);
-	IWL_DEBUG_INFO(priv, "proceeding to remove station %pM\n",
-			sta->addr);
-	ret = iwl_legacy_remove_station(priv, sta_common->sta_id, sta->addr);
-	if (ret)
-		IWL_ERR(priv, "Error removing station %pM\n",
-			sta->addr);
-	mutex_unlock(&priv->mutex);
-	return ret;
-}
-EXPORT_SYMBOL(iwl_legacy_mac_sta_remove);
diff --git a/drivers/net/wireless/iwlegacy/iwl-sta.h b/drivers/net/wireless/iwlegacy/iwl-sta.h
deleted file mode 100644
index 67bd75f..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-sta.h
+++ /dev/null
@@ -1,148 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-#ifndef __iwl_legacy_sta_h__
-#define __iwl_legacy_sta_h__
-
-#include "iwl-dev.h"
-
-#define HW_KEY_DYNAMIC 0
-#define HW_KEY_DEFAULT 1
-
-#define IWL_STA_DRIVER_ACTIVE BIT(0) /* driver entry is active */
-#define IWL_STA_UCODE_ACTIVE  BIT(1) /* ucode entry is active */
-#define IWL_STA_UCODE_INPROGRESS  BIT(2) /* ucode entry is in process of
-					    being activated */
-#define IWL_STA_LOCAL BIT(3) /* station state not directed by mac80211;
-				(this is for the IBSS BSSID stations) */
-#define IWL_STA_BCAST BIT(4) /* this station is the special bcast station */
-
-
-void iwl_legacy_restore_stations(struct iwl_priv *priv,
-				struct iwl_rxon_context *ctx);
-void iwl_legacy_clear_ucode_stations(struct iwl_priv *priv,
-			      struct iwl_rxon_context *ctx);
-void iwl_legacy_dealloc_bcast_stations(struct iwl_priv *priv);
-int iwl_legacy_get_free_ucode_key_index(struct iwl_priv *priv);
-int iwl_legacy_send_add_sta(struct iwl_priv *priv,
-			struct iwl_legacy_addsta_cmd *sta, u8 flags);
-int iwl_legacy_add_station_common(struct iwl_priv *priv,
-			struct iwl_rxon_context *ctx,
-			const u8 *addr, bool is_ap,
-			struct ieee80211_sta *sta, u8 *sta_id_r);
-int iwl_legacy_remove_station(struct iwl_priv *priv,
-			const u8 sta_id,
-			const u8 *addr);
-int iwl_legacy_mac_sta_remove(struct ieee80211_hw *hw,
-			struct ieee80211_vif *vif,
-			struct ieee80211_sta *sta);
-
-u8 iwl_legacy_prep_station(struct iwl_priv *priv,
-			struct iwl_rxon_context *ctx,
-			const u8 *addr, bool is_ap,
-			struct ieee80211_sta *sta);
-
-int iwl_legacy_send_lq_cmd(struct iwl_priv *priv,
-			struct iwl_rxon_context *ctx,
-			struct iwl_link_quality_cmd *lq,
-			u8 flags, bool init);
-
-/**
- * iwl_legacy_clear_driver_stations - clear knowledge of all stations from driver
- * @priv: iwl priv struct
- *
- * This is called during iwl_down() to make sure that in the case
- * we're coming there from a hardware restart mac80211 will be
- * able to reconfigure stations -- if we're getting there in the
- * normal down flow then the stations will already be cleared.
- */
-static inline void iwl_legacy_clear_driver_stations(struct iwl_priv *priv)
-{
-	unsigned long flags;
-	struct iwl_rxon_context *ctx;
-
-	spin_lock_irqsave(&priv->sta_lock, flags);
-	memset(priv->stations, 0, sizeof(priv->stations));
-	priv->num_stations = 0;
-
-	priv->ucode_key_table = 0;
-
-	for_each_context(priv, ctx) {
-		/*
-		 * Remove all key information that is not stored as part
-		 * of station information since mac80211 may not have had
-		 * a chance to remove all the keys. When device is
-		 * reconfigured by mac80211 after an error all keys will
-		 * be reconfigured.
-		 */
-		memset(ctx->wep_keys, 0, sizeof(ctx->wep_keys));
-		ctx->key_mapping_keys = 0;
-	}
-
-	spin_unlock_irqrestore(&priv->sta_lock, flags);
-}
-
-static inline int iwl_legacy_sta_id(struct ieee80211_sta *sta)
-{
-	if (WARN_ON(!sta))
-		return IWL_INVALID_STATION;
-
-	return ((struct iwl_station_priv_common *)sta->drv_priv)->sta_id;
-}
-
-/**
- * iwl_legacy_sta_id_or_broadcast - return sta_id or broadcast sta
- * @priv: iwl priv
- * @context: the current context
- * @sta: mac80211 station
- *
- * In certain circumstances mac80211 passes a station pointer
- * that may be %NULL, for example during TX or key setup. In
- * that case, we need to use the broadcast station, so this
- * inline wraps that pattern.
- */
-static inline int iwl_legacy_sta_id_or_broadcast(struct iwl_priv *priv,
-					  struct iwl_rxon_context *context,
-					  struct ieee80211_sta *sta)
-{
-	int sta_id;
-
-	if (!sta)
-		return context->bcast_sta_id;
-
-	sta_id = iwl_legacy_sta_id(sta);
-
-	/*
-	 * mac80211 should not be passing a partially
-	 * initialised station!
-	 */
-	WARN_ON(sta_id == IWL_INVALID_STATION);
-
-	return sta_id;
-}
-#endif /* __iwl_legacy_sta_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-tx.c b/drivers/net/wireless/iwlegacy/iwl-tx.c
deleted file mode 100644
index c0dfb1a..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-tx.c
+++ /dev/null
@@ -1,659 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/etherdevice.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/export.h>
-#include <net/mac80211.h>
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-sta.h"
-#include "iwl-io.h"
-#include "iwl-helpers.h"
-
-/**
- * iwl_legacy_txq_update_write_ptr - Send new write index to hardware
- */
-void
-iwl_legacy_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
-{
-	u32 reg = 0;
-	int txq_id = txq->q.id;
-
-	if (txq->need_update == 0)
-		return;
-
-	/* if we're trying to save power */
-	if (test_bit(STATUS_POWER_PMI, &priv->status)) {
-		/* wake up nic if it's powered down ...
-		 * uCode will wake up, and interrupt us again, so next
-		 * time we'll skip this part. */
-		reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
-
-		if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
-			IWL_DEBUG_INFO(priv,
-					"Tx queue %d requesting wakeup,"
-					" GP1 = 0x%x\n", txq_id, reg);
-			iwl_legacy_set_bit(priv, CSR_GP_CNTRL,
-					CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
-			return;
-		}
-
-		iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR,
-				txq->q.write_ptr | (txq_id << 8));
-
-		/*
-		 * else not in power-save mode,
-		 * uCode will never sleep when we're
-		 * trying to tx (during RFKILL, we're not trying to tx).
-		 */
-	} else
-		iwl_write32(priv, HBUS_TARG_WRPTR,
-			    txq->q.write_ptr | (txq_id << 8));
-	txq->need_update = 0;
-}
-EXPORT_SYMBOL(iwl_legacy_txq_update_write_ptr);
-
-/**
- * iwl_legacy_tx_queue_unmap -  Unmap any remaining DMA mappings and free skb's
- */
-void iwl_legacy_tx_queue_unmap(struct iwl_priv *priv, int txq_id)
-{
-	struct iwl_tx_queue *txq = &priv->txq[txq_id];
-	struct iwl_queue *q = &txq->q;
-
-	if (q->n_bd == 0)
-		return;
-
-	while (q->write_ptr != q->read_ptr) {
-		priv->cfg->ops->lib->txq_free_tfd(priv, txq);
-		q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd);
-	}
-}
-EXPORT_SYMBOL(iwl_legacy_tx_queue_unmap);
-
-/**
- * iwl_legacy_tx_queue_free - Deallocate DMA queue.
- * @txq: Transmit queue to deallocate.
- *
- * Empty queue by removing and destroying all BD's.
- * Free all buffers.
- * 0-fill, but do not free "txq" descriptor structure.
- */
-void iwl_legacy_tx_queue_free(struct iwl_priv *priv, int txq_id)
-{
-	struct iwl_tx_queue *txq = &priv->txq[txq_id];
-	struct device *dev = &priv->pci_dev->dev;
-	int i;
-
-	iwl_legacy_tx_queue_unmap(priv, txq_id);
-
-	/* De-alloc array of command/tx buffers */
-	for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
-		kfree(txq->cmd[i]);
-
-	/* De-alloc circular buffer of TFDs */
-	if (txq->q.n_bd)
-		dma_free_coherent(dev, priv->hw_params.tfd_size *
-				  txq->q.n_bd, txq->tfds, txq->q.dma_addr);
-
-	/* De-alloc array of per-TFD driver data */
-	kfree(txq->txb);
-	txq->txb = NULL;
-
-	/* deallocate arrays */
-	kfree(txq->cmd);
-	kfree(txq->meta);
-	txq->cmd = NULL;
-	txq->meta = NULL;
-
-	/* 0-fill queue descriptor structure */
-	memset(txq, 0, sizeof(*txq));
-}
-EXPORT_SYMBOL(iwl_legacy_tx_queue_free);
-
-/**
- * iwl_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue
- */
-void iwl_legacy_cmd_queue_unmap(struct iwl_priv *priv)
-{
-	struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
-	struct iwl_queue *q = &txq->q;
-	int i;
-
-	if (q->n_bd == 0)
-		return;
-
-	while (q->read_ptr != q->write_ptr) {
-		i = iwl_legacy_get_cmd_index(q, q->read_ptr, 0);
-
-		if (txq->meta[i].flags & CMD_MAPPED) {
-			pci_unmap_single(priv->pci_dev,
-					 dma_unmap_addr(&txq->meta[i], mapping),
-					 dma_unmap_len(&txq->meta[i], len),
-					 PCI_DMA_BIDIRECTIONAL);
-			txq->meta[i].flags = 0;
-		}
-
-		q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd);
-	}
-
-	i = q->n_window;
-	if (txq->meta[i].flags & CMD_MAPPED) {
-		pci_unmap_single(priv->pci_dev,
-				 dma_unmap_addr(&txq->meta[i], mapping),
-				 dma_unmap_len(&txq->meta[i], len),
-				 PCI_DMA_BIDIRECTIONAL);
-		txq->meta[i].flags = 0;
-	}
-}
-EXPORT_SYMBOL(iwl_legacy_cmd_queue_unmap);
-
-/**
- * iwl_legacy_cmd_queue_free - Deallocate DMA queue.
- * @txq: Transmit queue to deallocate.
- *
- * Empty queue by removing and destroying all BD's.
- * Free all buffers.
- * 0-fill, but do not free "txq" descriptor structure.
- */
-void iwl_legacy_cmd_queue_free(struct iwl_priv *priv)
-{
-	struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
-	struct device *dev = &priv->pci_dev->dev;
-	int i;
-
-	iwl_legacy_cmd_queue_unmap(priv);
-
-	/* De-alloc array of command/tx buffers */
-	for (i = 0; i <= TFD_CMD_SLOTS; i++)
-		kfree(txq->cmd[i]);
-
-	/* De-alloc circular buffer of TFDs */
-	if (txq->q.n_bd)
-		dma_free_coherent(dev, priv->hw_params.tfd_size * txq->q.n_bd,
-				  txq->tfds, txq->q.dma_addr);
-
-	/* deallocate arrays */
-	kfree(txq->cmd);
-	kfree(txq->meta);
-	txq->cmd = NULL;
-	txq->meta = NULL;
-
-	/* 0-fill queue descriptor structure */
-	memset(txq, 0, sizeof(*txq));
-}
-EXPORT_SYMBOL(iwl_legacy_cmd_queue_free);
-
-/*************** DMA-QUEUE-GENERAL-FUNCTIONS  *****
- * DMA services
- *
- * Theory of operation
- *
- * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
- * of buffer descriptors, each of which points to one or more data buffers for
- * the device to read from or fill.  Driver and device exchange status of each
- * queue via "read" and "write" pointers.  Driver keeps minimum of 2 empty
- * entries in each circular buffer, to protect against confusing empty and full
- * queue states.
- *
- * The device reads or writes the data in the queues via the device's several
- * DMA/FIFO channels.  Each queue is mapped to a single DMA channel.
- *
- * For Tx queue, there are low mark and high mark limits. If, after queuing
- * the packet for Tx, free space become < low mark, Tx queue stopped. When
- * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
- * Tx queue resumed.
- *
- * See more detailed info in iwl-4965-hw.h.
- ***************************************************/
-
-int iwl_legacy_queue_space(const struct iwl_queue *q)
-{
-	int s = q->read_ptr - q->write_ptr;
-
-	if (q->read_ptr > q->write_ptr)
-		s -= q->n_bd;
-
-	if (s <= 0)
-		s += q->n_window;
-	/* keep some reserve to not confuse empty and full situations */
-	s -= 2;
-	if (s < 0)
-		s = 0;
-	return s;
-}
-EXPORT_SYMBOL(iwl_legacy_queue_space);
-
-
-/**
- * iwl_legacy_queue_init - Initialize queue's high/low-water and read/write indexes
- */
-static int iwl_legacy_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
-			  int count, int slots_num, u32 id)
-{
-	q->n_bd = count;
-	q->n_window = slots_num;
-	q->id = id;
-
-	/* count must be power-of-two size, otherwise iwl_legacy_queue_inc_wrap
-	 * and iwl_legacy_queue_dec_wrap are broken. */
-	BUG_ON(!is_power_of_2(count));
-
-	/* slots_num must be power-of-two size, otherwise
-	 * iwl_legacy_get_cmd_index is broken. */
-	BUG_ON(!is_power_of_2(slots_num));
-
-	q->low_mark = q->n_window / 4;
-	if (q->low_mark < 4)
-		q->low_mark = 4;
-
-	q->high_mark = q->n_window / 8;
-	if (q->high_mark < 2)
-		q->high_mark = 2;
-
-	q->write_ptr = q->read_ptr = 0;
-
-	return 0;
-}
-
-/**
- * iwl_legacy_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
- */
-static int iwl_legacy_tx_queue_alloc(struct iwl_priv *priv,
-			      struct iwl_tx_queue *txq, u32 id)
-{
-	struct device *dev = &priv->pci_dev->dev;
-	size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
-
-	/* Driver private data, only for Tx (not command) queues,
-	 * not shared with device. */
-	if (id != priv->cmd_queue) {
-		txq->txb = kzalloc(sizeof(txq->txb[0]) *
-				   TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
-		if (!txq->txb) {
-			IWL_ERR(priv, "kmalloc for auxiliary BD "
-				  "structures failed\n");
-			goto error;
-		}
-	} else {
-		txq->txb = NULL;
-	}
-
-	/* Circular buffer of transmit frame descriptors (TFDs),
-	 * shared with device */
-	txq->tfds = dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr,
-				       GFP_KERNEL);
-	if (!txq->tfds) {
-		IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz);
-		goto error;
-	}
-	txq->q.id = id;
-
-	return 0;
-
- error:
-	kfree(txq->txb);
-	txq->txb = NULL;
-
-	return -ENOMEM;
-}
-
-/**
- * iwl_legacy_tx_queue_init - Allocate and initialize one tx/cmd queue
- */
-int iwl_legacy_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
-		      int slots_num, u32 txq_id)
-{
-	int i, len;
-	int ret;
-	int actual_slots = slots_num;
-
-	/*
-	 * Alloc buffer array for commands (Tx or other types of commands).
-	 * For the command queue (#4/#9), allocate command space + one big
-	 * command for scan, since scan command is very huge; the system will
-	 * not have two scans at the same time, so only one is needed.
-	 * For normal Tx queues (all other queues), no super-size command
-	 * space is needed.
-	 */
-	if (txq_id == priv->cmd_queue)
-		actual_slots++;
-
-	txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * actual_slots,
-			    GFP_KERNEL);
-	txq->cmd = kzalloc(sizeof(struct iwl_device_cmd *) * actual_slots,
-			   GFP_KERNEL);
-
-	if (!txq->meta || !txq->cmd)
-		goto out_free_arrays;
-
-	len = sizeof(struct iwl_device_cmd);
-	for (i = 0; i < actual_slots; i++) {
-		/* only happens for cmd queue */
-		if (i == slots_num)
-			len = IWL_MAX_CMD_SIZE;
-
-		txq->cmd[i] = kmalloc(len, GFP_KERNEL);
-		if (!txq->cmd[i])
-			goto err;
-	}
-
-	/* Alloc driver data array and TFD circular buffer */
-	ret = iwl_legacy_tx_queue_alloc(priv, txq, txq_id);
-	if (ret)
-		goto err;
-
-	txq->need_update = 0;
-
-	/*
-	 * For the default queues 0-3, set up the swq_id
-	 * already -- all others need to get one later
-	 * (if they need one at all).
-	 */
-	if (txq_id < 4)
-		iwl_legacy_set_swq_id(txq, txq_id, txq_id);
-
-	/* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
-	 * iwl_legacy_queue_inc_wrap and iwl_legacy_queue_dec_wrap are broken. */
-	BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
-
-	/* Initialize queue's high/low-water marks, and head/tail indexes */
-	iwl_legacy_queue_init(priv, &txq->q,
-				TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
-
-	/* Tell device where to find queue */
-	priv->cfg->ops->lib->txq_init(priv, txq);
-
-	return 0;
-err:
-	for (i = 0; i < actual_slots; i++)
-		kfree(txq->cmd[i]);
-out_free_arrays:
-	kfree(txq->meta);
-	kfree(txq->cmd);
-
-	return -ENOMEM;
-}
-EXPORT_SYMBOL(iwl_legacy_tx_queue_init);
-
-void iwl_legacy_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
-			int slots_num, u32 txq_id)
-{
-	int actual_slots = slots_num;
-
-	if (txq_id == priv->cmd_queue)
-		actual_slots++;
-
-	memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * actual_slots);
-
-	txq->need_update = 0;
-
-	/* Initialize queue's high/low-water marks, and head/tail indexes */
-	iwl_legacy_queue_init(priv, &txq->q,
-				TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
-
-	/* Tell device where to find queue */
-	priv->cfg->ops->lib->txq_init(priv, txq);
-}
-EXPORT_SYMBOL(iwl_legacy_tx_queue_reset);
-
-/*************** HOST COMMAND QUEUE FUNCTIONS   *****/
-
-/**
- * iwl_legacy_enqueue_hcmd - enqueue a uCode command
- * @priv: device private data point
- * @cmd: a point to the ucode command structure
- *
- * The function returns < 0 values to indicate the operation is
- * failed. On success, it turns the index (> 0) of command in the
- * command queue.
- */
-int iwl_legacy_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
-{
-	struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
-	struct iwl_queue *q = &txq->q;
-	struct iwl_device_cmd *out_cmd;
-	struct iwl_cmd_meta *out_meta;
-	dma_addr_t phys_addr;
-	unsigned long flags;
-	int len;
-	u32 idx;
-	u16 fix_size;
-
-	cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
-	fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
-
-	/* If any of the command structures end up being larger than
-	 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
-	 * we will need to increase the size of the TFD entries
-	 * Also, check to see if command buffer should not exceed the size
-	 * of device_cmd and max_cmd_size. */
-	BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
-	       !(cmd->flags & CMD_SIZE_HUGE));
-	BUG_ON(fix_size > IWL_MAX_CMD_SIZE);
-
-	if (iwl_legacy_is_rfkill(priv) || iwl_legacy_is_ctkill(priv)) {
-		IWL_WARN(priv, "Not sending command - %s KILL\n",
-			 iwl_legacy_is_rfkill(priv) ? "RF" : "CT");
-		return -EIO;
-	}
-
-	spin_lock_irqsave(&priv->hcmd_lock, flags);
-
-	if (iwl_legacy_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
-		spin_unlock_irqrestore(&priv->hcmd_lock, flags);
-
-		IWL_ERR(priv, "Restarting adapter due to command queue full\n");
-		queue_work(priv->workqueue, &priv->restart);
-		return -ENOSPC;
-	}
-
-	idx = iwl_legacy_get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE);
-	out_cmd = txq->cmd[idx];
-	out_meta = &txq->meta[idx];
-
-	if (WARN_ON(out_meta->flags & CMD_MAPPED)) {
-		spin_unlock_irqrestore(&priv->hcmd_lock, flags);
-		return -ENOSPC;
-	}
-
-	memset(out_meta, 0, sizeof(*out_meta));	/* re-initialize to NULL */
-	out_meta->flags = cmd->flags | CMD_MAPPED;
-	if (cmd->flags & CMD_WANT_SKB)
-		out_meta->source = cmd;
-	if (cmd->flags & CMD_ASYNC)
-		out_meta->callback = cmd->callback;
-
-	out_cmd->hdr.cmd = cmd->id;
-	memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
-
-	/* At this point, the out_cmd now has all of the incoming cmd
-	 * information */
-
-	out_cmd->hdr.flags = 0;
-	out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(priv->cmd_queue) |
-			INDEX_TO_SEQ(q->write_ptr));
-	if (cmd->flags & CMD_SIZE_HUGE)
-		out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
-	len = sizeof(struct iwl_device_cmd);
-	if (idx == TFD_CMD_SLOTS)
-		len = IWL_MAX_CMD_SIZE;
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-	switch (out_cmd->hdr.cmd) {
-	case REPLY_TX_LINK_QUALITY_CMD:
-	case SENSITIVITY_CMD:
-		IWL_DEBUG_HC_DUMP(priv,
-				"Sending command %s (#%x), seq: 0x%04X, "
-				"%d bytes at %d[%d]:%d\n",
-				iwl_legacy_get_cmd_string(out_cmd->hdr.cmd),
-				out_cmd->hdr.cmd,
-				le16_to_cpu(out_cmd->hdr.sequence), fix_size,
-				q->write_ptr, idx, priv->cmd_queue);
-		break;
-	default:
-		IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, "
-				"%d bytes at %d[%d]:%d\n",
-				iwl_legacy_get_cmd_string(out_cmd->hdr.cmd),
-				out_cmd->hdr.cmd,
-				le16_to_cpu(out_cmd->hdr.sequence), fix_size,
-				q->write_ptr, idx, priv->cmd_queue);
-	}
-#endif
-	txq->need_update = 1;
-
-	if (priv->cfg->ops->lib->txq_update_byte_cnt_tbl)
-		/* Set up entry in queue's byte count circular buffer */
-		priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0);
-
-	phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr,
-				   fix_size, PCI_DMA_BIDIRECTIONAL);
-	dma_unmap_addr_set(out_meta, mapping, phys_addr);
-	dma_unmap_len_set(out_meta, len, fix_size);
-
-	trace_iwlwifi_legacy_dev_hcmd(priv, &out_cmd->hdr,
-						fix_size, cmd->flags);
-
-	priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
-						   phys_addr, fix_size, 1,
-						   U32_PAD(cmd->len));
-
-	/* Increment and update queue's write index */
-	q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd);
-	iwl_legacy_txq_update_write_ptr(priv, txq);
-
-	spin_unlock_irqrestore(&priv->hcmd_lock, flags);
-	return idx;
-}
-
-/**
- * iwl_legacy_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
- *
- * When FW advances 'R' index, all entries between old and new 'R' index
- * need to be reclaimed. As result, some free space forms.  If there is
- * enough free space (> low mark), wake the stack that feeds us.
- */
-static void iwl_legacy_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id,
-				   int idx, int cmd_idx)
-{
-	struct iwl_tx_queue *txq = &priv->txq[txq_id];
-	struct iwl_queue *q = &txq->q;
-	int nfreed = 0;
-
-	if ((idx >= q->n_bd) || (iwl_legacy_queue_used(q, idx) == 0)) {
-		IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
-			  "is out of range [0-%d] %d %d.\n", txq_id,
-			  idx, q->n_bd, q->write_ptr, q->read_ptr);
-		return;
-	}
-
-	for (idx = iwl_legacy_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
-	     q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) {
-
-		if (nfreed++ > 0) {
-			IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx,
-					q->write_ptr, q->read_ptr);
-			queue_work(priv->workqueue, &priv->restart);
-		}
-
-	}
-}
-
-/**
- * iwl_legacy_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
- * @rxb: Rx buffer to reclaim
- *
- * If an Rx buffer has an async callback associated with it the callback
- * will be executed.  The attached skb (if present) will only be freed
- * if the callback returns 1
- */
-void
-iwl_legacy_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
-{
-	struct iwl_rx_packet *pkt = rxb_addr(rxb);
-	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
-	int txq_id = SEQ_TO_QUEUE(sequence);
-	int index = SEQ_TO_INDEX(sequence);
-	int cmd_index;
-	bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
-	struct iwl_device_cmd *cmd;
-	struct iwl_cmd_meta *meta;
-	struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
-	unsigned long flags;
-
-	/* If a Tx command is being handled and it isn't in the actual
-	 * command queue then there a command routing bug has been introduced
-	 * in the queue management code. */
-	if (WARN(txq_id != priv->cmd_queue,
-		 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
-		  txq_id, priv->cmd_queue, sequence,
-		  priv->txq[priv->cmd_queue].q.read_ptr,
-		  priv->txq[priv->cmd_queue].q.write_ptr)) {
-		iwl_print_hex_error(priv, pkt, 32);
-		return;
-	}
-
-	cmd_index = iwl_legacy_get_cmd_index(&txq->q, index, huge);
-	cmd = txq->cmd[cmd_index];
-	meta = &txq->meta[cmd_index];
-
-	txq->time_stamp = jiffies;
-
-	pci_unmap_single(priv->pci_dev,
-			 dma_unmap_addr(meta, mapping),
-			 dma_unmap_len(meta, len),
-			 PCI_DMA_BIDIRECTIONAL);
-
-	/* Input error checking is done when commands are added to queue. */
-	if (meta->flags & CMD_WANT_SKB) {
-		meta->source->reply_page = (unsigned long)rxb_addr(rxb);
-		rxb->page = NULL;
-	} else if (meta->callback)
-		meta->callback(priv, cmd, pkt);
-
-	spin_lock_irqsave(&priv->hcmd_lock, flags);
-
-	iwl_legacy_hcmd_queue_reclaim(priv, txq_id, index, cmd_index);
-
-	if (!(meta->flags & CMD_ASYNC)) {
-		clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
-		IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n",
-			       iwl_legacy_get_cmd_string(cmd->hdr.cmd));
-		wake_up(&priv->wait_command_queue);
-	}
-
-	/* Mark as unmapped */
-	meta->flags = 0;
-
-	spin_unlock_irqrestore(&priv->hcmd_lock, flags);
-}
-EXPORT_SYMBOL(iwl_legacy_tx_cmd_complete);
diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c
deleted file mode 100644
index b282d86..0000000
--- a/drivers/net/wireless/iwlegacy/iwl3945-base.c
+++ /dev/null
@@ -1,4016 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/pci-aspm.h>
-#include <linux/slab.h>
-#include <linux/dma-mapping.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/firmware.h>
-#include <linux/etherdevice.h>
-#include <linux/if_arp.h>
-
-#include <net/ieee80211_radiotap.h>
-#include <net/mac80211.h>
-
-#include <asm/div64.h>
-
-#define DRV_NAME	"iwl3945"
-
-#include "iwl-fh.h"
-#include "iwl-3945-fh.h"
-#include "iwl-commands.h"
-#include "iwl-sta.h"
-#include "iwl-3945.h"
-#include "iwl-core.h"
-#include "iwl-helpers.h"
-#include "iwl-dev.h"
-#include "iwl-spectrum.h"
-
-/*
- * module name, copyright, version, etc.
- */
-
-#define DRV_DESCRIPTION	\
-"Intel(R) PRO/Wireless 3945ABG/BG Network Connection driver for Linux"
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-#define VD "d"
-#else
-#define VD
-#endif
-
-/*
- * add "s" to indicate spectrum measurement included.
- * we add it here to be consistent with previous releases in which
- * this was configurable.
- */
-#define DRV_VERSION  IWLWIFI_VERSION VD "s"
-#define DRV_COPYRIGHT	"Copyright(c) 2003-2011 Intel Corporation"
-#define DRV_AUTHOR     "<ilw@linux.intel.com>"
-
-MODULE_DESCRIPTION(DRV_DESCRIPTION);
-MODULE_VERSION(DRV_VERSION);
-MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
-MODULE_LICENSE("GPL");
-
- /* module parameters */
-struct iwl_mod_params iwl3945_mod_params = {
-	.sw_crypto = 1,
-	.restart_fw = 1,
-	.disable_hw_scan = 1,
-	/* the rest are 0 by default */
-};
-
-/**
- * iwl3945_get_antenna_flags - Get antenna flags for RXON command
- * @priv: eeprom and antenna fields are used to determine antenna flags
- *
- * priv->eeprom39  is used to determine if antenna AUX/MAIN are reversed
- * iwl3945_mod_params.antenna specifies the antenna diversity mode:
- *
- * IWL_ANTENNA_DIVERSITY - NIC selects best antenna by itself
- * IWL_ANTENNA_MAIN      - Force MAIN antenna
- * IWL_ANTENNA_AUX       - Force AUX antenna
- */
-__le32 iwl3945_get_antenna_flags(const struct iwl_priv *priv)
-{
-	struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
-
-	switch (iwl3945_mod_params.antenna) {
-	case IWL_ANTENNA_DIVERSITY:
-		return 0;
-
-	case IWL_ANTENNA_MAIN:
-		if (eeprom->antenna_switch_type)
-			return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK;
-		return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK;
-
-	case IWL_ANTENNA_AUX:
-		if (eeprom->antenna_switch_type)
-			return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK;
-		return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK;
-	}
-
-	/* bad antenna selector value */
-	IWL_ERR(priv, "Bad antenna selector value (0x%x)\n",
-		iwl3945_mod_params.antenna);
-
-	return 0;		/* "diversity" is default if error */
-}
-
-static int iwl3945_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
-				   struct ieee80211_key_conf *keyconf,
-				   u8 sta_id)
-{
-	unsigned long flags;
-	__le16 key_flags = 0;
-	int ret;
-
-	key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
-	key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
-
-	if (sta_id == priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id)
-		key_flags |= STA_KEY_MULTICAST_MSK;
-
-	keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
-	keyconf->hw_key_idx = keyconf->keyidx;
-	key_flags &= ~STA_KEY_FLG_INVALID;
-
-	spin_lock_irqsave(&priv->sta_lock, flags);
-	priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
-	priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
-	memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key,
-	       keyconf->keylen);
-
-	memcpy(priv->stations[sta_id].sta.key.key, keyconf->key,
-	       keyconf->keylen);
-
-	if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
-			== STA_KEY_FLG_NO_ENC)
-		priv->stations[sta_id].sta.key.key_offset =
-				 iwl_legacy_get_free_ucode_key_index(priv);
-	/* else, we are overriding an existing key => no need to allocated room
-	* in uCode. */
-
-	WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
-		"no space for a new key");
-
-	priv->stations[sta_id].sta.key.key_flags = key_flags;
-	priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
-	priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
-
-	IWL_DEBUG_INFO(priv, "hwcrypto: modify ucode station key info\n");
-
-	ret = iwl_legacy_send_add_sta(priv,
-				&priv->stations[sta_id].sta, CMD_ASYNC);
-
-	spin_unlock_irqrestore(&priv->sta_lock, flags);
-
-	return ret;
-}
-
-static int iwl3945_set_tkip_dynamic_key_info(struct iwl_priv *priv,
-				  struct ieee80211_key_conf *keyconf,
-				  u8 sta_id)
-{
-	return -EOPNOTSUPP;
-}
-
-static int iwl3945_set_wep_dynamic_key_info(struct iwl_priv *priv,
-				  struct ieee80211_key_conf *keyconf,
-				  u8 sta_id)
-{
-	return -EOPNOTSUPP;
-}
-
-static int iwl3945_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id)
-{
-	unsigned long flags;
-	struct iwl_legacy_addsta_cmd sta_cmd;
-
-	spin_lock_irqsave(&priv->sta_lock, flags);
-	memset(&priv->stations[sta_id].keyinfo, 0, sizeof(struct iwl_hw_key));
-	memset(&priv->stations[sta_id].sta.key, 0,
-		sizeof(struct iwl4965_keyinfo));
-	priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC;
-	priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
-	priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
-	memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_legacy_addsta_cmd));
-	spin_unlock_irqrestore(&priv->sta_lock, flags);
-
-	IWL_DEBUG_INFO(priv, "hwcrypto: clear ucode station key info\n");
-	return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
-}
-
-static int iwl3945_set_dynamic_key(struct iwl_priv *priv,
-			struct ieee80211_key_conf *keyconf, u8 sta_id)
-{
-	int ret = 0;
-
-	keyconf->hw_key_idx = HW_KEY_DYNAMIC;
-
-	switch (keyconf->cipher) {
-	case WLAN_CIPHER_SUITE_CCMP:
-		ret = iwl3945_set_ccmp_dynamic_key_info(priv, keyconf, sta_id);
-		break;
-	case WLAN_CIPHER_SUITE_TKIP:
-		ret = iwl3945_set_tkip_dynamic_key_info(priv, keyconf, sta_id);
-		break;
-	case WLAN_CIPHER_SUITE_WEP40:
-	case WLAN_CIPHER_SUITE_WEP104:
-		ret = iwl3945_set_wep_dynamic_key_info(priv, keyconf, sta_id);
-		break;
-	default:
-		IWL_ERR(priv, "Unknown alg: %s alg=%x\n", __func__,
-			keyconf->cipher);
-		ret = -EINVAL;
-	}
-
-	IWL_DEBUG_WEP(priv, "Set dynamic key: alg=%x len=%d idx=%d sta=%d ret=%d\n",
-		      keyconf->cipher, keyconf->keylen, keyconf->keyidx,
-		      sta_id, ret);
-
-	return ret;
-}
-
-static int iwl3945_remove_static_key(struct iwl_priv *priv)
-{
-	int ret = -EOPNOTSUPP;
-
-	return ret;
-}
-
-static int iwl3945_set_static_key(struct iwl_priv *priv,
-				struct ieee80211_key_conf *key)
-{
-	if (key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
-	    key->cipher == WLAN_CIPHER_SUITE_WEP104)
-		return -EOPNOTSUPP;
-
-	IWL_ERR(priv, "Static key invalid: cipher %x\n", key->cipher);
-	return -EINVAL;
-}
-
-static void iwl3945_clear_free_frames(struct iwl_priv *priv)
-{
-	struct list_head *element;
-
-	IWL_DEBUG_INFO(priv, "%d frames on pre-allocated heap on clear.\n",
-		       priv->frames_count);
-
-	while (!list_empty(&priv->free_frames)) {
-		element = priv->free_frames.next;
-		list_del(element);
-		kfree(list_entry(element, struct iwl3945_frame, list));
-		priv->frames_count--;
-	}
-
-	if (priv->frames_count) {
-		IWL_WARN(priv, "%d frames still in use.  Did we lose one?\n",
-			    priv->frames_count);
-		priv->frames_count = 0;
-	}
-}
-
-static struct iwl3945_frame *iwl3945_get_free_frame(struct iwl_priv *priv)
-{
-	struct iwl3945_frame *frame;
-	struct list_head *element;
-	if (list_empty(&priv->free_frames)) {
-		frame = kzalloc(sizeof(*frame), GFP_KERNEL);
-		if (!frame) {
-			IWL_ERR(priv, "Could not allocate frame!\n");
-			return NULL;
-		}
-
-		priv->frames_count++;
-		return frame;
-	}
-
-	element = priv->free_frames.next;
-	list_del(element);
-	return list_entry(element, struct iwl3945_frame, list);
-}
-
-static void iwl3945_free_frame(struct iwl_priv *priv, struct iwl3945_frame *frame)
-{
-	memset(frame, 0, sizeof(*frame));
-	list_add(&frame->list, &priv->free_frames);
-}
-
-unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
-				struct ieee80211_hdr *hdr,
-				int left)
-{
-
-	if (!iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS) || !priv->beacon_skb)
-		return 0;
-
-	if (priv->beacon_skb->len > left)
-		return 0;
-
-	memcpy(hdr, priv->beacon_skb->data, priv->beacon_skb->len);
-
-	return priv->beacon_skb->len;
-}
-
-static int iwl3945_send_beacon_cmd(struct iwl_priv *priv)
-{
-	struct iwl3945_frame *frame;
-	unsigned int frame_size;
-	int rc;
-	u8 rate;
-
-	frame = iwl3945_get_free_frame(priv);
-
-	if (!frame) {
-		IWL_ERR(priv, "Could not obtain free frame buffer for beacon "
-			  "command.\n");
-		return -ENOMEM;
-	}
-
-	rate = iwl_legacy_get_lowest_plcp(priv,
-				&priv->contexts[IWL_RXON_CTX_BSS]);
-
-	frame_size = iwl3945_hw_get_beacon_cmd(priv, frame, rate);
-
-	rc = iwl_legacy_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
-			      &frame->u.cmd[0]);
-
-	iwl3945_free_frame(priv, frame);
-
-	return rc;
-}
-
-static void iwl3945_unset_hw_params(struct iwl_priv *priv)
-{
-	if (priv->_3945.shared_virt)
-		dma_free_coherent(&priv->pci_dev->dev,
-				  sizeof(struct iwl3945_shared),
-				  priv->_3945.shared_virt,
-				  priv->_3945.shared_phys);
-}
-
-static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
-				      struct ieee80211_tx_info *info,
-				      struct iwl_device_cmd *cmd,
-				      struct sk_buff *skb_frag,
-				      int sta_id)
-{
-	struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
-	struct iwl_hw_key *keyinfo = &priv->stations[sta_id].keyinfo;
-
-	tx_cmd->sec_ctl = 0;
-
-	switch (keyinfo->cipher) {
-	case WLAN_CIPHER_SUITE_CCMP:
-		tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
-		memcpy(tx_cmd->key, keyinfo->key, keyinfo->keylen);
-		IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
-		break;
-
-	case WLAN_CIPHER_SUITE_TKIP:
-		break;
-
-	case WLAN_CIPHER_SUITE_WEP104:
-		tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
-		/* fall through */
-	case WLAN_CIPHER_SUITE_WEP40:
-		tx_cmd->sec_ctl |= TX_CMD_SEC_WEP |
-		    (info->control.hw_key->hw_key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT;
-
-		memcpy(&tx_cmd->key[3], keyinfo->key, keyinfo->keylen);
-
-		IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
-			     "with key %d\n", info->control.hw_key->hw_key_idx);
-		break;
-
-	default:
-		IWL_ERR(priv, "Unknown encode cipher %x\n", keyinfo->cipher);
-		break;
-	}
-}
-
-/*
- * handle build REPLY_TX command notification.
- */
-static void iwl3945_build_tx_cmd_basic(struct iwl_priv *priv,
-				  struct iwl_device_cmd *cmd,
-				  struct ieee80211_tx_info *info,
-				  struct ieee80211_hdr *hdr, u8 std_id)
-{
-	struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
-	__le32 tx_flags = tx_cmd->tx_flags;
-	__le16 fc = hdr->frame_control;
-
-	tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
-	if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
-		tx_flags |= TX_CMD_FLG_ACK_MSK;
-		if (ieee80211_is_mgmt(fc))
-			tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
-		if (ieee80211_is_probe_resp(fc) &&
-		    !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
-			tx_flags |= TX_CMD_FLG_TSF_MSK;
-	} else {
-		tx_flags &= (~TX_CMD_FLG_ACK_MSK);
-		tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
-	}
-
-	tx_cmd->sta_id = std_id;
-	if (ieee80211_has_morefrags(fc))
-		tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
-
-	if (ieee80211_is_data_qos(fc)) {
-		u8 *qc = ieee80211_get_qos_ctl(hdr);
-		tx_cmd->tid_tspec = qc[0] & 0xf;
-		tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
-	} else {
-		tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
-	}
-
-	iwl_legacy_tx_cmd_protection(priv, info, fc, &tx_flags);
-
-	tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
-	if (ieee80211_is_mgmt(fc)) {
-		if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
-			tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
-		else
-			tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
-	} else {
-		tx_cmd->timeout.pm_frame_timeout = 0;
-	}
-
-	tx_cmd->driver_txop = 0;
-	tx_cmd->tx_flags = tx_flags;
-	tx_cmd->next_frame_len = 0;
-}
-
-/*
- * start REPLY_TX command process
- */
-static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
-{
-	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
-	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-	struct iwl3945_tx_cmd *tx_cmd;
-	struct iwl_tx_queue *txq = NULL;
-	struct iwl_queue *q = NULL;
-	struct iwl_device_cmd *out_cmd;
-	struct iwl_cmd_meta *out_meta;
-	dma_addr_t phys_addr;
-	dma_addr_t txcmd_phys;
-	int txq_id = skb_get_queue_mapping(skb);
-	u16 len, idx, hdr_len;
-	u8 id;
-	u8 unicast;
-	u8 sta_id;
-	u8 tid = 0;
-	__le16 fc;
-	u8 wait_write_ptr = 0;
-	unsigned long flags;
-
-	spin_lock_irqsave(&priv->lock, flags);
-	if (iwl_legacy_is_rfkill(priv)) {
-		IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
-		goto drop_unlock;
-	}
-
-	if ((ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xFF) == IWL_INVALID_RATE) {
-		IWL_ERR(priv, "ERROR: No TX rate available.\n");
-		goto drop_unlock;
-	}
-
-	unicast = !is_multicast_ether_addr(hdr->addr1);
-	id = 0;
-
-	fc = hdr->frame_control;
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-	if (ieee80211_is_auth(fc))
-		IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
-	else if (ieee80211_is_assoc_req(fc))
-		IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
-	else if (ieee80211_is_reassoc_req(fc))
-		IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
-#endif
-
-	spin_unlock_irqrestore(&priv->lock, flags);
-
-	hdr_len = ieee80211_hdrlen(fc);
-
-	/* Find index into station table for destination station */
-	sta_id = iwl_legacy_sta_id_or_broadcast(
-			priv, &priv->contexts[IWL_RXON_CTX_BSS],
-			info->control.sta);
-	if (sta_id == IWL_INVALID_STATION) {
-		IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
-			       hdr->addr1);
-		goto drop;
-	}
-
-	IWL_DEBUG_RATE(priv, "station Id %d\n", sta_id);
-
-	if (ieee80211_is_data_qos(fc)) {
-		u8 *qc = ieee80211_get_qos_ctl(hdr);
-		tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
-		if (unlikely(tid >= MAX_TID_COUNT))
-			goto drop;
-	}
-
-	/* Descriptor for chosen Tx queue */
-	txq = &priv->txq[txq_id];
-	q = &txq->q;
-
-	if ((iwl_legacy_queue_space(q) < q->high_mark))
-		goto drop;
-
-	spin_lock_irqsave(&priv->lock, flags);
-
-	idx = iwl_legacy_get_cmd_index(q, q->write_ptr, 0);
-
-	/* Set up driver data for this TFD */
-	memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
-	txq->txb[q->write_ptr].skb = skb;
-	txq->txb[q->write_ptr].ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
-	/* Init first empty entry in queue's array of Tx/cmd buffers */
-	out_cmd = txq->cmd[idx];
-	out_meta = &txq->meta[idx];
-	tx_cmd = (struct iwl3945_tx_cmd *)out_cmd->cmd.payload;
-	memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
-	memset(tx_cmd, 0, sizeof(*tx_cmd));
-
-	/*
-	 * Set up the Tx-command (not MAC!) header.
-	 * Store the chosen Tx queue and TFD index within the sequence field;
-	 * after Tx, uCode's Tx response will return this value so driver can
-	 * locate the frame within the tx queue and do post-tx processing.
-	 */
-	out_cmd->hdr.cmd = REPLY_TX;
-	out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
-				INDEX_TO_SEQ(q->write_ptr)));
-
-	/* Copy MAC header from skb into command buffer */
-	memcpy(tx_cmd->hdr, hdr, hdr_len);
-
-
-	if (info->control.hw_key)
-		iwl3945_build_tx_cmd_hwcrypto(priv, info, out_cmd, skb, sta_id);
-
-	/* TODO need this for burst mode later on */
-	iwl3945_build_tx_cmd_basic(priv, out_cmd, info, hdr, sta_id);
-
-	/* set is_hcca to 0; it probably will never be implemented */
-	iwl3945_hw_build_tx_cmd_rate(priv, out_cmd, info, hdr, sta_id, 0);
-
-	/* Total # bytes to be transmitted */
-	len = (u16)skb->len;
-	tx_cmd->len = cpu_to_le16(len);
-
-	iwl_legacy_dbg_log_tx_data_frame(priv, len, hdr);
-	iwl_legacy_update_stats(priv, true, fc, len);
-	tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
-	tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
-
-	if (!ieee80211_has_morefrags(hdr->frame_control)) {
-		txq->need_update = 1;
-	} else {
-		wait_write_ptr = 1;
-		txq->need_update = 0;
-	}
-
-	IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
-		     le16_to_cpu(out_cmd->hdr.sequence));
-	IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
-	iwl_print_hex_dump(priv, IWL_DL_TX, tx_cmd, sizeof(*tx_cmd));
-	iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr,
-			   ieee80211_hdrlen(fc));
-
-	/*
-	 * Use the first empty entry in this queue's command buffer array
-	 * to contain the Tx command and MAC header concatenated together
-	 * (payload data will be in another buffer).
-	 * Size of this varies, due to varying MAC header length.
-	 * If end is not dword aligned, we'll have 2 extra bytes at the end
-	 * of the MAC header (device reads on dword boundaries).
-	 * We'll tell device about this padding later.
-	 */
-	len = sizeof(struct iwl3945_tx_cmd) +
-			sizeof(struct iwl_cmd_header) + hdr_len;
-	len = (len + 3) & ~3;
-
-	/* Physical address of this Tx command's header (not MAC header!),
-	 * within command buffer array. */
-	txcmd_phys = pci_map_single(priv->pci_dev, &out_cmd->hdr,
-				    len, PCI_DMA_TODEVICE);
-	/* we do not map meta data ... so we can safely access address to
-	 * provide to unmap command*/
-	dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
-	dma_unmap_len_set(out_meta, len, len);
-
-	/* Add buffer containing Tx command and MAC(!) header to TFD's
-	 * first entry */
-	priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
-						   txcmd_phys, len, 1, 0);
-
-
-	/* Set up TFD's 2nd entry to point directly to remainder of skb,
-	 * if any (802.11 null frames have no payload). */
-	len = skb->len - hdr_len;
-	if (len) {
-		phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
-					   len, PCI_DMA_TODEVICE);
-		priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
-							   phys_addr, len,
-							   0, U32_PAD(len));
-	}
-
-
-	/* Tell device the write index *just past* this latest filled TFD */
-	q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd);
-	iwl_legacy_txq_update_write_ptr(priv, txq);
-	spin_unlock_irqrestore(&priv->lock, flags);
-
-	if ((iwl_legacy_queue_space(q) < q->high_mark)
-	    && priv->mac80211_registered) {
-		if (wait_write_ptr) {
-			spin_lock_irqsave(&priv->lock, flags);
-			txq->need_update = 1;
-			iwl_legacy_txq_update_write_ptr(priv, txq);
-			spin_unlock_irqrestore(&priv->lock, flags);
-		}
-
-		iwl_legacy_stop_queue(priv, txq);
-	}
-
-	return 0;
-
-drop_unlock:
-	spin_unlock_irqrestore(&priv->lock, flags);
-drop:
-	return -1;
-}
-
-static int iwl3945_get_measurement(struct iwl_priv *priv,
-			       struct ieee80211_measurement_params *params,
-			       u8 type)
-{
-	struct iwl_spectrum_cmd spectrum;
-	struct iwl_rx_packet *pkt;
-	struct iwl_host_cmd cmd = {
-		.id = REPLY_SPECTRUM_MEASUREMENT_CMD,
-		.data = (void *)&spectrum,
-		.flags = CMD_WANT_SKB,
-	};
-	u32 add_time = le64_to_cpu(params->start_time);
-	int rc;
-	int spectrum_resp_status;
-	int duration = le16_to_cpu(params->duration);
-	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
-	if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS))
-		add_time = iwl_legacy_usecs_to_beacons(priv,
-			le64_to_cpu(params->start_time) - priv->_3945.last_tsf,
-			le16_to_cpu(ctx->timing.beacon_interval));
-
-	memset(&spectrum, 0, sizeof(spectrum));
-
-	spectrum.channel_count = cpu_to_le16(1);
-	spectrum.flags =
-	    RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK;
-	spectrum.filter_flags = MEASUREMENT_FILTER_FLAG;
-	cmd.len = sizeof(spectrum);
-	spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len));
-
-	if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS))
-		spectrum.start_time =
-			iwl_legacy_add_beacon_time(priv,
-				priv->_3945.last_beacon_time, add_time,
-				le16_to_cpu(ctx->timing.beacon_interval));
-	else
-		spectrum.start_time = 0;
-
-	spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT);
-	spectrum.channels[0].channel = params->channel;
-	spectrum.channels[0].type = type;
-	if (ctx->active.flags & RXON_FLG_BAND_24G_MSK)
-		spectrum.flags |= RXON_FLG_BAND_24G_MSK |
-		    RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK;
-
-	rc = iwl_legacy_send_cmd_sync(priv, &cmd);
-	if (rc)
-		return rc;
-
-	pkt = (struct iwl_rx_packet *)cmd.reply_page;
-	if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
-		IWL_ERR(priv, "Bad return from REPLY_RX_ON_ASSOC command\n");
-		rc = -EIO;
-	}
-
-	spectrum_resp_status = le16_to_cpu(pkt->u.spectrum.status);
-	switch (spectrum_resp_status) {
-	case 0:		/* Command will be handled */
-		if (pkt->u.spectrum.id != 0xff) {
-			IWL_DEBUG_INFO(priv, "Replaced existing measurement: %d\n",
-						pkt->u.spectrum.id);
-			priv->measurement_status &= ~MEASUREMENT_READY;
-		}
-		priv->measurement_status |= MEASUREMENT_ACTIVE;
-		rc = 0;
-		break;
-
-	case 1:		/* Command will not be handled */
-		rc = -EAGAIN;
-		break;
-	}
-
-	iwl_legacy_free_pages(priv, cmd.reply_page);
-
-	return rc;
-}
-
-static void iwl3945_rx_reply_alive(struct iwl_priv *priv,
-			       struct iwl_rx_mem_buffer *rxb)
-{
-	struct iwl_rx_packet *pkt = rxb_addr(rxb);
-	struct iwl_alive_resp *palive;
-	struct delayed_work *pwork;
-
-	palive = &pkt->u.alive_frame;
-
-	IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision "
-		       "0x%01X 0x%01X\n",
-		       palive->is_valid, palive->ver_type,
-		       palive->ver_subtype);
-
-	if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
-		IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
-		memcpy(&priv->card_alive_init, &pkt->u.alive_frame,
-		       sizeof(struct iwl_alive_resp));
-		pwork = &priv->init_alive_start;
-	} else {
-		IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
-		memcpy(&priv->card_alive, &pkt->u.alive_frame,
-		       sizeof(struct iwl_alive_resp));
-		pwork = &priv->alive_start;
-		iwl3945_disable_events(priv);
-	}
-
-	/* We delay the ALIVE response by 5ms to
-	 * give the HW RF Kill time to activate... */
-	if (palive->is_valid == UCODE_VALID_OK)
-		queue_delayed_work(priv->workqueue, pwork,
-				   msecs_to_jiffies(5));
-	else
-		IWL_WARN(priv, "uCode did not respond OK.\n");
-}
-
-static void iwl3945_rx_reply_add_sta(struct iwl_priv *priv,
-				 struct iwl_rx_mem_buffer *rxb)
-{
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-	struct iwl_rx_packet *pkt = rxb_addr(rxb);
-#endif
-
-	IWL_DEBUG_RX(priv, "Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status);
-}
-
-static void iwl3945_rx_beacon_notif(struct iwl_priv *priv,
-				struct iwl_rx_mem_buffer *rxb)
-{
-	struct iwl_rx_packet *pkt = rxb_addr(rxb);
-	struct iwl3945_beacon_notif *beacon = &(pkt->u.beacon_status);
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-	u8 rate = beacon->beacon_notify_hdr.rate;
-
-	IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d "
-		"tsf %d %d rate %d\n",
-		le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK,
-		beacon->beacon_notify_hdr.failure_frame,
-		le32_to_cpu(beacon->ibss_mgr_status),
-		le32_to_cpu(beacon->high_tsf),
-		le32_to_cpu(beacon->low_tsf), rate);
-#endif
-
-	priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
-
-}
-
-/* Handle notification from uCode that card's power state is changing
- * due to software, hardware, or critical temperature RFKILL */
-static void iwl3945_rx_card_state_notif(struct iwl_priv *priv,
-				    struct iwl_rx_mem_buffer *rxb)
-{
-	struct iwl_rx_packet *pkt = rxb_addr(rxb);
-	u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
-	unsigned long status = priv->status;
-
-	IWL_WARN(priv, "Card state received: HW:%s SW:%s\n",
-			  (flags & HW_CARD_DISABLED) ? "Kill" : "On",
-			  (flags & SW_CARD_DISABLED) ? "Kill" : "On");
-
-	iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
-		    CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
-
-	if (flags & HW_CARD_DISABLED)
-		set_bit(STATUS_RF_KILL_HW, &priv->status);
-	else
-		clear_bit(STATUS_RF_KILL_HW, &priv->status);
-
-
-	iwl_legacy_scan_cancel(priv);
-
-	if ((test_bit(STATUS_RF_KILL_HW, &status) !=
-	     test_bit(STATUS_RF_KILL_HW, &priv->status)))
-		wiphy_rfkill_set_hw_state(priv->hw->wiphy,
-				test_bit(STATUS_RF_KILL_HW, &priv->status));
-	else
-		wake_up(&priv->wait_command_queue);
-}
-
-/**
- * iwl3945_setup_rx_handlers - Initialize Rx handler callbacks
- *
- * Setup the RX handlers for each of the reply types sent from the uCode
- * to the host.
- *
- * This function chains into the hardware specific files for them to setup
- * any hardware specific handlers as well.
- */
-static void iwl3945_setup_rx_handlers(struct iwl_priv *priv)
-{
-	priv->rx_handlers[REPLY_ALIVE] = iwl3945_rx_reply_alive;
-	priv->rx_handlers[REPLY_ADD_STA] = iwl3945_rx_reply_add_sta;
-	priv->rx_handlers[REPLY_ERROR] = iwl_legacy_rx_reply_error;
-	priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_legacy_rx_csa;
-	priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
-			iwl_legacy_rx_spectrum_measure_notif;
-	priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_legacy_rx_pm_sleep_notif;
-	priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
-	    iwl_legacy_rx_pm_debug_statistics_notif;
-	priv->rx_handlers[BEACON_NOTIFICATION] = iwl3945_rx_beacon_notif;
-
-	/*
-	 * The same handler is used for both the REPLY to a discrete
-	 * statistics request from the host as well as for the periodic
-	 * statistics notifications (after received beacons) from the uCode.
-	 */
-	priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl3945_reply_statistics;
-	priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl3945_hw_rx_statistics;
-
-	iwl_legacy_setup_rx_scan_handlers(priv);
-	priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl3945_rx_card_state_notif;
-
-	/* Set up hardware specific Rx handlers */
-	iwl3945_hw_rx_handler_setup(priv);
-}
-
-/************************** RX-FUNCTIONS ****************************/
-/*
- * Rx theory of operation
- *
- * The host allocates 32 DMA target addresses and passes the host address
- * to the firmware at register IWL_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
- * 0 to 31
- *
- * Rx Queue Indexes
- * The host/firmware share two index registers for managing the Rx buffers.
- *
- * The READ index maps to the first position that the firmware may be writing
- * to -- the driver can read up to (but not including) this position and get
- * good data.
- * The READ index is managed by the firmware once the card is enabled.
- *
- * The WRITE index maps to the last position the driver has read from -- the
- * position preceding WRITE is the last slot the firmware can place a packet.
- *
- * The queue is empty (no good data) if WRITE = READ - 1, and is full if
- * WRITE = READ.
- *
- * During initialization, the host sets up the READ queue position to the first
- * INDEX position, and WRITE to the last (READ - 1 wrapped)
- *
- * When the firmware places a packet in a buffer, it will advance the READ index
- * and fire the RX interrupt.  The driver can then query the READ index and
- * process as many packets as possible, moving the WRITE index forward as it
- * resets the Rx queue buffers with new memory.
- *
- * The management in the driver is as follows:
- * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free.  When
- *   iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
- *   to replenish the iwl->rxq->rx_free.
- * + In iwl3945_rx_replenish (scheduled) if 'processed' != 'read' then the
- *   iwl->rxq is replenished and the READ INDEX is updated (updating the
- *   'processed' and 'read' driver indexes as well)
- * + A received packet is processed and handed to the kernel network stack,
- *   detached from the iwl->rxq.  The driver 'processed' index is updated.
- * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
- *   list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
- *   INDEX is not incremented and iwl->status(RX_STALLED) is set.  If there
- *   were enough free buffers and RX_STALLED is set it is cleared.
- *
- *
- * Driver sequence:
- *
- * iwl3945_rx_replenish()     Replenishes rx_free list from rx_used, and calls
- *                            iwl3945_rx_queue_restock
- * iwl3945_rx_queue_restock() Moves available buffers from rx_free into Rx
- *                            queue, updates firmware pointers, and updates
- *                            the WRITE index.  If insufficient rx_free buffers
- *                            are available, schedules iwl3945_rx_replenish
- *
- * -- enable interrupts --
- * ISR - iwl3945_rx()         Detach iwl_rx_mem_buffers from pool up to the
- *                            READ INDEX, detaching the SKB from the pool.
- *                            Moves the packet buffer from queue to rx_used.
- *                            Calls iwl3945_rx_queue_restock to refill any empty
- *                            slots.
- * ...
- *
- */
-
-/**
- * iwl3945_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
- */
-static inline __le32 iwl3945_dma_addr2rbd_ptr(struct iwl_priv *priv,
-					  dma_addr_t dma_addr)
-{
-	return cpu_to_le32((u32)dma_addr);
-}
-
-/**
- * iwl3945_rx_queue_restock - refill RX queue from pre-allocated pool
- *
- * If there are slots in the RX queue that need to be restocked,
- * and we have free pre-allocated buffers, fill the ranks as much
- * as we can, pulling from rx_free.
- *
- * This moves the 'write' index forward to catch up with 'processed', and
- * also updates the memory address in the firmware to reference the new
- * target buffer.
- */
-static void iwl3945_rx_queue_restock(struct iwl_priv *priv)
-{
-	struct iwl_rx_queue *rxq = &priv->rxq;
-	struct list_head *element;
-	struct iwl_rx_mem_buffer *rxb;
-	unsigned long flags;
-	int write;
-
-	spin_lock_irqsave(&rxq->lock, flags);
-	write = rxq->write & ~0x7;
-	while ((iwl_legacy_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
-		/* Get next free Rx buffer, remove from free list */
-		element = rxq->rx_free.next;
-		rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
-		list_del(element);
-
-		/* Point to Rx buffer via next RBD in circular buffer */
-		rxq->bd[rxq->write] = iwl3945_dma_addr2rbd_ptr(priv, rxb->page_dma);
-		rxq->queue[rxq->write] = rxb;
-		rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
-		rxq->free_count--;
-	}
-	spin_unlock_irqrestore(&rxq->lock, flags);
-	/* If the pre-allocated buffer pool is dropping low, schedule to
-	 * refill it */
-	if (rxq->free_count <= RX_LOW_WATERMARK)
-		queue_work(priv->workqueue, &priv->rx_replenish);
-
-
-	/* If we've added more space for the firmware to place data, tell it.
-	 * Increment device's write pointer in multiples of 8. */
-	if ((rxq->write_actual != (rxq->write & ~0x7))
-	    || (abs(rxq->write - rxq->read) > 7)) {
-		spin_lock_irqsave(&rxq->lock, flags);
-		rxq->need_update = 1;
-		spin_unlock_irqrestore(&rxq->lock, flags);
-		iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
-	}
-}
-
-/**
- * iwl3945_rx_replenish - Move all used packet from rx_used to rx_free
- *
- * When moving to rx_free an SKB is allocated for the slot.
- *
- * Also restock the Rx queue via iwl3945_rx_queue_restock.
- * This is called as a scheduled work item (except for during initialization)
- */
-static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority)
-{
-	struct iwl_rx_queue *rxq = &priv->rxq;
-	struct list_head *element;
-	struct iwl_rx_mem_buffer *rxb;
-	struct page *page;
-	unsigned long flags;
-	gfp_t gfp_mask = priority;
-
-	while (1) {
-		spin_lock_irqsave(&rxq->lock, flags);
-
-		if (list_empty(&rxq->rx_used)) {
-			spin_unlock_irqrestore(&rxq->lock, flags);
-			return;
-		}
-		spin_unlock_irqrestore(&rxq->lock, flags);
-
-		if (rxq->free_count > RX_LOW_WATERMARK)
-			gfp_mask |= __GFP_NOWARN;
-
-		if (priv->hw_params.rx_page_order > 0)
-			gfp_mask |= __GFP_COMP;
-
-		/* Alloc a new receive buffer */
-		page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
-		if (!page) {
-			if (net_ratelimit())
-				IWL_DEBUG_INFO(priv, "Failed to allocate SKB buffer.\n");
-			if ((rxq->free_count <= RX_LOW_WATERMARK) &&
-			    net_ratelimit())
-				IWL_CRIT(priv, "Failed to allocate SKB buffer with %s. Only %u free buffers remaining.\n",
-					 priority == GFP_ATOMIC ?  "GFP_ATOMIC" : "GFP_KERNEL",
-					 rxq->free_count);
-			/* We don't reschedule replenish work here -- we will
-			 * call the restock method and if it still needs
-			 * more buffers it will schedule replenish */
-			break;
-		}
-
-		spin_lock_irqsave(&rxq->lock, flags);
-		if (list_empty(&rxq->rx_used)) {
-			spin_unlock_irqrestore(&rxq->lock, flags);
-			__free_pages(page, priv->hw_params.rx_page_order);
-			return;
-		}
-		element = rxq->rx_used.next;
-		rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
-		list_del(element);
-		spin_unlock_irqrestore(&rxq->lock, flags);
-
-		rxb->page = page;
-		/* Get physical address of RB/SKB */
-		rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
-				PAGE_SIZE << priv->hw_params.rx_page_order,
-				PCI_DMA_FROMDEVICE);
-
-		spin_lock_irqsave(&rxq->lock, flags);
-
-		list_add_tail(&rxb->list, &rxq->rx_free);
-		rxq->free_count++;
-		priv->alloc_rxb_page++;
-
-		spin_unlock_irqrestore(&rxq->lock, flags);
-	}
-}
-
-void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
-{
-	unsigned long flags;
-	int i;
-	spin_lock_irqsave(&rxq->lock, flags);
-	INIT_LIST_HEAD(&rxq->rx_free);
-	INIT_LIST_HEAD(&rxq->rx_used);
-	/* Fill the rx_used queue with _all_ of the Rx buffers */
-	for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
-		/* In the reset function, these buffers may have been allocated
-		 * to an SKB, so we need to unmap and free potential storage */
-		if (rxq->pool[i].page != NULL) {
-			pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
-				PAGE_SIZE << priv->hw_params.rx_page_order,
-				PCI_DMA_FROMDEVICE);
-			__iwl_legacy_free_pages(priv, rxq->pool[i].page);
-			rxq->pool[i].page = NULL;
-		}
-		list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
-	}
-
-	/* Set us so that we have processed and used all buffers, but have
-	 * not restocked the Rx queue with fresh buffers */
-	rxq->read = rxq->write = 0;
-	rxq->write_actual = 0;
-	rxq->free_count = 0;
-	spin_unlock_irqrestore(&rxq->lock, flags);
-}
-
-void iwl3945_rx_replenish(void *data)
-{
-	struct iwl_priv *priv = data;
-	unsigned long flags;
-
-	iwl3945_rx_allocate(priv, GFP_KERNEL);
-
-	spin_lock_irqsave(&priv->lock, flags);
-	iwl3945_rx_queue_restock(priv);
-	spin_unlock_irqrestore(&priv->lock, flags);
-}
-
-static void iwl3945_rx_replenish_now(struct iwl_priv *priv)
-{
-	iwl3945_rx_allocate(priv, GFP_ATOMIC);
-
-	iwl3945_rx_queue_restock(priv);
-}
-
-
-/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
- * If an SKB has been detached, the POOL needs to have its SKB set to NULL
- * This free routine walks the list of POOL entries and if SKB is set to
- * non NULL it is unmapped and freed
- */
-static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
-{
-	int i;
-	for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
-		if (rxq->pool[i].page != NULL) {
-			pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
-				PAGE_SIZE << priv->hw_params.rx_page_order,
-				PCI_DMA_FROMDEVICE);
-			__iwl_legacy_free_pages(priv, rxq->pool[i].page);
-			rxq->pool[i].page = NULL;
-		}
-	}
-
-	dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
-			  rxq->bd_dma);
-	dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
-			  rxq->rb_stts, rxq->rb_stts_dma);
-	rxq->bd = NULL;
-	rxq->rb_stts  = NULL;
-}
-
-
-/* Convert linear signal-to-noise ratio into dB */
-static u8 ratio2dB[100] = {
-/*	 0   1   2   3   4   5   6   7   8   9 */
-	 0,  0,  6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */
-	20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */
-	26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */
-	29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */
-	32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */
-	34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */
-	36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */
-	37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */
-	38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */
-	39, 39, 39, 39, 39, 40, 40, 40, 40, 40  /* 90 - 99 */
-};
-
-/* Calculates a relative dB value from a ratio of linear
- *   (i.e. not dB) signal levels.
- * Conversion assumes that levels are voltages (20*log), not powers (10*log). */
-int iwl3945_calc_db_from_ratio(int sig_ratio)
-{
-	/* 1000:1 or higher just report as 60 dB */
-	if (sig_ratio >= 1000)
-		return 60;
-
-	/* 100:1 or higher, divide by 10 and use table,
-	 *   add 20 dB to make up for divide by 10 */
-	if (sig_ratio >= 100)
-		return 20 + (int)ratio2dB[sig_ratio/10];
-
-	/* We shouldn't see this */
-	if (sig_ratio < 1)
-		return 0;
-
-	/* Use table for ratios 1:1 - 99:1 */
-	return (int)ratio2dB[sig_ratio];
-}
-
-/**
- * iwl3945_rx_handle - Main entry function for receiving responses from uCode
- *
- * Uses the priv->rx_handlers callback function array to invoke
- * the appropriate handlers, including command responses,
- * frame-received notifications, and other notifications.
- */
-static void iwl3945_rx_handle(struct iwl_priv *priv)
-{
-	struct iwl_rx_mem_buffer *rxb;
-	struct iwl_rx_packet *pkt;
-	struct iwl_rx_queue *rxq = &priv->rxq;
-	u32 r, i;
-	int reclaim;
-	unsigned long flags;
-	u8 fill_rx = 0;
-	u32 count = 8;
-	int total_empty = 0;
-
-	/* uCode's read index (stored in shared DRAM) indicates the last Rx
-	 * buffer that the driver may process (last buffer filled by ucode). */
-	r = le16_to_cpu(rxq->rb_stts->closed_rb_num) &  0x0FFF;
-	i = rxq->read;
-
-	/* calculate total frames need to be restock after handling RX */
-	total_empty = r - rxq->write_actual;
-	if (total_empty < 0)
-		total_empty += RX_QUEUE_SIZE;
-
-	if (total_empty > (RX_QUEUE_SIZE / 2))
-		fill_rx = 1;
-	/* Rx interrupt, but nothing sent from uCode */
-	if (i == r)
-		IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i);
-
-	while (i != r) {
-		int len;
-
-		rxb = rxq->queue[i];
-
-		/* If an RXB doesn't have a Rx queue slot associated with it,
-		 * then a bug has been introduced in the queue refilling
-		 * routines -- catch it here */
-		BUG_ON(rxb == NULL);
-
-		rxq->queue[i] = NULL;
-
-		pci_unmap_page(priv->pci_dev, rxb->page_dma,
-			       PAGE_SIZE << priv->hw_params.rx_page_order,
-			       PCI_DMA_FROMDEVICE);
-		pkt = rxb_addr(rxb);
-
-		len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
-		len += sizeof(u32); /* account for status word */
-		trace_iwlwifi_legacy_dev_rx(priv, pkt, len);
-
-		/* Reclaim a command buffer only if this packet is a response
-		 *   to a (driver-originated) command.
-		 * If the packet (e.g. Rx frame) originated from uCode,
-		 *   there is no command buffer to reclaim.
-		 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
-		 *   but apparently a few don't get set; catch them here. */
-		reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
-			(pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
-			(pkt->hdr.cmd != REPLY_TX);
-
-		/* Based on type of command response or notification,
-		 *   handle those that need handling via function in
-		 *   rx_handlers table.  See iwl3945_setup_rx_handlers() */
-		if (priv->rx_handlers[pkt->hdr.cmd]) {
-			IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, i,
-			iwl_legacy_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
-			priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
-			priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
-		} else {
-			/* No handling needed */
-			IWL_DEBUG_RX(priv,
-				"r %d i %d No handler needed for %s, 0x%02x\n",
-				r, i, iwl_legacy_get_cmd_string(pkt->hdr.cmd),
-				pkt->hdr.cmd);
-		}
-
-		/*
-		 * XXX: After here, we should always check rxb->page
-		 * against NULL before touching it or its virtual
-		 * memory (pkt). Because some rx_handler might have
-		 * already taken or freed the pages.
-		 */
-
-		if (reclaim) {
-			/* Invoke any callbacks, transfer the buffer to caller,
-			 * and fire off the (possibly) blocking iwl_legacy_send_cmd()
-			 * as we reclaim the driver command queue */
-			if (rxb->page)
-				iwl_legacy_tx_cmd_complete(priv, rxb);
-			else
-				IWL_WARN(priv, "Claim null rxb?\n");
-		}
-
-		/* Reuse the page if possible. For notification packets and
-		 * SKBs that fail to Rx correctly, add them back into the
-		 * rx_free list for reuse later. */
-		spin_lock_irqsave(&rxq->lock, flags);
-		if (rxb->page != NULL) {
-			rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page,
-				0, PAGE_SIZE << priv->hw_params.rx_page_order,
-				PCI_DMA_FROMDEVICE);
-			list_add_tail(&rxb->list, &rxq->rx_free);
-			rxq->free_count++;
-		} else
-			list_add_tail(&rxb->list, &rxq->rx_used);
-
-		spin_unlock_irqrestore(&rxq->lock, flags);
-
-		i = (i + 1) & RX_QUEUE_MASK;
-		/* If there are a lot of unused frames,
-		 * restock the Rx queue so ucode won't assert. */
-		if (fill_rx) {
-			count++;
-			if (count >= 8) {
-				rxq->read = i;
-				iwl3945_rx_replenish_now(priv);
-				count = 0;
-			}
-		}
-	}
-
-	/* Backtrack one entry */
-	rxq->read = i;
-	if (fill_rx)
-		iwl3945_rx_replenish_now(priv);
-	else
-		iwl3945_rx_queue_restock(priv);
-}
-
-/* call this function to flush any scheduled tasklet */
-static inline void iwl3945_synchronize_irq(struct iwl_priv *priv)
-{
-	/* wait to make sure we flush pending tasklet*/
-	synchronize_irq(priv->pci_dev->irq);
-	tasklet_kill(&priv->irq_tasklet);
-}
-
-static const char *iwl3945_desc_lookup(int i)
-{
-	switch (i) {
-	case 1:
-		return "FAIL";
-	case 2:
-		return "BAD_PARAM";
-	case 3:
-		return "BAD_CHECKSUM";
-	case 4:
-		return "NMI_INTERRUPT";
-	case 5:
-		return "SYSASSERT";
-	case 6:
-		return "FATAL_ERROR";
-	}
-
-	return "UNKNOWN";
-}
-
-#define ERROR_START_OFFSET  (1 * sizeof(u32))
-#define ERROR_ELEM_SIZE     (7 * sizeof(u32))
-
-void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
-{
-	u32 i;
-	u32 desc, time, count, base, data1;
-	u32 blink1, blink2, ilink1, ilink2;
-
-	base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
-
-	if (!iwl3945_hw_valid_rtc_data_addr(base)) {
-		IWL_ERR(priv, "Not valid error log pointer 0x%08X\n", base);
-		return;
-	}
-
-
-	count = iwl_legacy_read_targ_mem(priv, base);
-
-	if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
-		IWL_ERR(priv, "Start IWL Error Log Dump:\n");
-		IWL_ERR(priv, "Status: 0x%08lX, count: %d\n",
-			priv->status, count);
-	}
-
-	IWL_ERR(priv, "Desc       Time       asrtPC  blink2 "
-		  "ilink1  nmiPC   Line\n");
-	for (i = ERROR_START_OFFSET;
-	     i < (count * ERROR_ELEM_SIZE) + ERROR_START_OFFSET;
-	     i += ERROR_ELEM_SIZE) {
-		desc = iwl_legacy_read_targ_mem(priv, base + i);
-		time =
-		    iwl_legacy_read_targ_mem(priv, base + i + 1 * sizeof(u32));
-		blink1 =
-		    iwl_legacy_read_targ_mem(priv, base + i + 2 * sizeof(u32));
-		blink2 =
-		    iwl_legacy_read_targ_mem(priv, base + i + 3 * sizeof(u32));
-		ilink1 =
-		    iwl_legacy_read_targ_mem(priv, base + i + 4 * sizeof(u32));
-		ilink2 =
-		    iwl_legacy_read_targ_mem(priv, base + i + 5 * sizeof(u32));
-		data1 =
-		    iwl_legacy_read_targ_mem(priv, base + i + 6 * sizeof(u32));
-
-		IWL_ERR(priv,
-			"%-13s (0x%X) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n",
-			iwl3945_desc_lookup(desc), desc, time, blink1, blink2,
-			ilink1, ilink2, data1);
-		trace_iwlwifi_legacy_dev_ucode_error(priv, desc, time, data1, 0,
-					0, blink1, blink2, ilink1, ilink2);
-	}
-}
-
-static void iwl3945_irq_tasklet(struct iwl_priv *priv)
-{
-	u32 inta, handled = 0;
-	u32 inta_fh;
-	unsigned long flags;
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-	u32 inta_mask;
-#endif
-
-	spin_lock_irqsave(&priv->lock, flags);
-
-	/* Ack/clear/reset pending uCode interrupts.
-	 * Note:  Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
-	 *  and will clear only when CSR_FH_INT_STATUS gets cleared. */
-	inta = iwl_read32(priv, CSR_INT);
-	iwl_write32(priv, CSR_INT, inta);
-
-	/* Ack/clear/reset pending flow-handler (DMA) interrupts.
-	 * Any new interrupts that happen after this, either while we're
-	 * in this tasklet, or later, will show up in next ISR/tasklet. */
-	inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
-	iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh);
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-	if (iwl_legacy_get_debug_level(priv) & IWL_DL_ISR) {
-		/* just for debug */
-		inta_mask = iwl_read32(priv, CSR_INT_MASK);
-		IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
-			      inta, inta_mask, inta_fh);
-	}
-#endif
-
-	spin_unlock_irqrestore(&priv->lock, flags);
-
-	/* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
-	 * atomic, make sure that inta covers all the interrupts that
-	 * we've discovered, even if FH interrupt came in just after
-	 * reading CSR_INT. */
-	if (inta_fh & CSR39_FH_INT_RX_MASK)
-		inta |= CSR_INT_BIT_FH_RX;
-	if (inta_fh & CSR39_FH_INT_TX_MASK)
-		inta |= CSR_INT_BIT_FH_TX;
-
-	/* Now service all interrupt bits discovered above. */
-	if (inta & CSR_INT_BIT_HW_ERR) {
-		IWL_ERR(priv, "Hardware error detected.  Restarting.\n");
-
-		/* Tell the device to stop sending interrupts */
-		iwl_legacy_disable_interrupts(priv);
-
-		priv->isr_stats.hw++;
-		iwl_legacy_irq_handle_error(priv);
-
-		handled |= CSR_INT_BIT_HW_ERR;
-
-		return;
-	}
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-	if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
-		/* NIC fires this, but we don't use it, redundant with WAKEUP */
-		if (inta & CSR_INT_BIT_SCD) {
-			IWL_DEBUG_ISR(priv, "Scheduler finished to transmit "
-				      "the frame/frames.\n");
-			priv->isr_stats.sch++;
-		}
-
-		/* Alive notification via Rx interrupt will do the real work */
-		if (inta & CSR_INT_BIT_ALIVE) {
-			IWL_DEBUG_ISR(priv, "Alive interrupt\n");
-			priv->isr_stats.alive++;
-		}
-	}
-#endif
-	/* Safely ignore these bits for debug checks below */
-	inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
-
-	/* Error detected by uCode */
-	if (inta & CSR_INT_BIT_SW_ERR) {
-		IWL_ERR(priv, "Microcode SW error detected. "
-			"Restarting 0x%X.\n", inta);
-		priv->isr_stats.sw++;
-		iwl_legacy_irq_handle_error(priv);
-		handled |= CSR_INT_BIT_SW_ERR;
-	}
-
-	/* uCode wakes up after power-down sleep */
-	if (inta & CSR_INT_BIT_WAKEUP) {
-		IWL_DEBUG_ISR(priv, "Wakeup interrupt\n");
-		iwl_legacy_rx_queue_update_write_ptr(priv, &priv->rxq);
-		iwl_legacy_txq_update_write_ptr(priv, &priv->txq[0]);
-		iwl_legacy_txq_update_write_ptr(priv, &priv->txq[1]);
-		iwl_legacy_txq_update_write_ptr(priv, &priv->txq[2]);
-		iwl_legacy_txq_update_write_ptr(priv, &priv->txq[3]);
-		iwl_legacy_txq_update_write_ptr(priv, &priv->txq[4]);
-		iwl_legacy_txq_update_write_ptr(priv, &priv->txq[5]);
-
-		priv->isr_stats.wakeup++;
-		handled |= CSR_INT_BIT_WAKEUP;
-	}
-
-	/* All uCode command responses, including Tx command responses,
-	 * Rx "responses" (frame-received notification), and other
-	 * notifications from uCode come through here*/
-	if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
-		iwl3945_rx_handle(priv);
-		priv->isr_stats.rx++;
-		handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
-	}
-
-	if (inta & CSR_INT_BIT_FH_TX) {
-		IWL_DEBUG_ISR(priv, "Tx interrupt\n");
-		priv->isr_stats.tx++;
-
-		iwl_write32(priv, CSR_FH_INT_STATUS, (1 << 6));
-		iwl_legacy_write_direct32(priv, FH39_TCSR_CREDIT
-					(FH39_SRVC_CHNL), 0x0);
-		handled |= CSR_INT_BIT_FH_TX;
-	}
-
-	if (inta & ~handled) {
-		IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
-		priv->isr_stats.unhandled++;
-	}
-
-	if (inta & ~priv->inta_mask) {
-		IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n",
-			 inta & ~priv->inta_mask);
-		IWL_WARN(priv, "   with FH_INT = 0x%08x\n", inta_fh);
-	}
-
-	/* Re-enable all interrupts */
-	/* only Re-enable if disabled by irq */
-	if (test_bit(STATUS_INT_ENABLED, &priv->status))
-		iwl_legacy_enable_interrupts(priv);
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-	if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
-		inta = iwl_read32(priv, CSR_INT);
-		inta_mask = iwl_read32(priv, CSR_INT_MASK);
-		inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
-		IWL_DEBUG_ISR(priv, "End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
-			"flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
-	}
-#endif
-}
-
-static int iwl3945_get_channels_for_scan(struct iwl_priv *priv,
-					 enum ieee80211_band band,
-				     u8 is_active, u8 n_probes,
-				     struct iwl3945_scan_channel *scan_ch,
-				     struct ieee80211_vif *vif)
-{
-	struct ieee80211_channel *chan;
-	const struct ieee80211_supported_band *sband;
-	const struct iwl_channel_info *ch_info;
-	u16 passive_dwell = 0;
-	u16 active_dwell = 0;
-	int added, i;
-
-	sband = iwl_get_hw_mode(priv, band);
-	if (!sband)
-		return 0;
-
-	active_dwell = iwl_legacy_get_active_dwell_time(priv, band, n_probes);
-	passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif);
-
-	if (passive_dwell <= active_dwell)
-		passive_dwell = active_dwell + 1;
-
-	for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) {
-		chan = priv->scan_request->channels[i];
-
-		if (chan->band != band)
-			continue;
-
-		scan_ch->channel = chan->hw_value;
-
-		ch_info = iwl_legacy_get_channel_info(priv, band,
-							scan_ch->channel);
-		if (!iwl_legacy_is_channel_valid(ch_info)) {
-			IWL_DEBUG_SCAN(priv,
-				"Channel %d is INVALID for this band.\n",
-			       scan_ch->channel);
-			continue;
-		}
-
-		scan_ch->active_dwell = cpu_to_le16(active_dwell);
-		scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
-		/* If passive , set up for auto-switch
-		 *  and use long active_dwell time.
-		 */
-		if (!is_active || iwl_legacy_is_channel_passive(ch_info) ||
-		    (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)) {
-			scan_ch->type = 0;	/* passive */
-			if (IWL_UCODE_API(priv->ucode_ver) == 1)
-				scan_ch->active_dwell = cpu_to_le16(passive_dwell - 1);
-		} else {
-			scan_ch->type = 1;	/* active */
-		}
-
-		/* Set direct probe bits. These may be used both for active
-		 * scan channels (probes gets sent right away),
-		 * or for passive channels (probes get se sent only after
-		 * hearing clear Rx packet).*/
-		if (IWL_UCODE_API(priv->ucode_ver) >= 2) {
-			if (n_probes)
-				scan_ch->type |= IWL39_SCAN_PROBE_MASK(n_probes);
-		} else {
-			/* uCode v1 does not allow setting direct probe bits on
-			 * passive channel. */
-			if ((scan_ch->type & 1) && n_probes)
-				scan_ch->type |= IWL39_SCAN_PROBE_MASK(n_probes);
-		}
-
-		/* Set txpower levels to defaults */
-		scan_ch->tpc.dsp_atten = 110;
-		/* scan_pwr_info->tpc.dsp_atten; */
-
-		/*scan_pwr_info->tpc.tx_gain; */
-		if (band == IEEE80211_BAND_5GHZ)
-			scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3;
-		else {
-			scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3));
-			/* NOTE: if we were doing 6Mb OFDM for scans we'd use
-			 * power level:
-			 * scan_ch->tpc.tx_gain = ((1 << 5) | (2 << 3)) | 3;
-			 */
-		}
-
-		IWL_DEBUG_SCAN(priv, "Scanning %d [%s %d]\n",
-			       scan_ch->channel,
-			       (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE",
-			       (scan_ch->type & 1) ?
-			       active_dwell : passive_dwell);
-
-		scan_ch++;
-		added++;
-	}
-
-	IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added);
-	return added;
-}
-
-static void iwl3945_init_hw_rates(struct iwl_priv *priv,
-			      struct ieee80211_rate *rates)
-{
-	int i;
-
-	for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) {
-		rates[i].bitrate = iwl3945_rates[i].ieee * 5;
-		rates[i].hw_value = i; /* Rate scaling will work on indexes */
-		rates[i].hw_value_short = i;
-		rates[i].flags = 0;
-		if ((i > IWL39_LAST_OFDM_RATE) || (i < IWL_FIRST_OFDM_RATE)) {
-			/*
-			 * If CCK != 1M then set short preamble rate flag.
-			 */
-			rates[i].flags |= (iwl3945_rates[i].plcp == 10) ?
-				0 : IEEE80211_RATE_SHORT_PREAMBLE;
-		}
-	}
-}
-
-/******************************************************************************
- *
- * uCode download functions
- *
- ******************************************************************************/
-
-static void iwl3945_dealloc_ucode_pci(struct iwl_priv *priv)
-{
-	iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_code);
-	iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data);
-	iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
-	iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init);
-	iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init_data);
-	iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_boot);
-}
-
-/**
- * iwl3945_verify_inst_full - verify runtime uCode image in card vs. host,
- *     looking at all data.
- */
-static int iwl3945_verify_inst_full(struct iwl_priv *priv, __le32 *image, u32 len)
-{
-	u32 val;
-	u32 save_len = len;
-	int rc = 0;
-	u32 errcnt;
-
-	IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
-
-	iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
-			       IWL39_RTC_INST_LOWER_BOUND);
-
-	errcnt = 0;
-	for (; len > 0; len -= sizeof(u32), image++) {
-		/* read data comes through single port, auto-incr addr */
-		/* NOTE: Use the debugless read so we don't flood kernel log
-		 * if IWL_DL_IO is set */
-		val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
-		if (val != le32_to_cpu(*image)) {
-			IWL_ERR(priv, "uCode INST section is invalid at "
-				  "offset 0x%x, is 0x%x, s/b 0x%x\n",
-				  save_len - len, val, le32_to_cpu(*image));
-			rc = -EIO;
-			errcnt++;
-			if (errcnt >= 20)
-				break;
-		}
-	}
-
-
-	if (!errcnt)
-		IWL_DEBUG_INFO(priv,
-			"ucode image in INSTRUCTION memory is good\n");
-
-	return rc;
-}
-
-
-/**
- * iwl3945_verify_inst_sparse - verify runtime uCode image in card vs. host,
- *   using sample data 100 bytes apart.  If these sample points are good,
- *   it's a pretty good bet that everything between them is good, too.
- */
-static int iwl3945_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
-{
-	u32 val;
-	int rc = 0;
-	u32 errcnt = 0;
-	u32 i;
-
-	IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
-
-	for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
-		/* read data comes through single port, auto-incr addr */
-		/* NOTE: Use the debugless read so we don't flood kernel log
-		 * if IWL_DL_IO is set */
-		iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
-			i + IWL39_RTC_INST_LOWER_BOUND);
-		val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
-		if (val != le32_to_cpu(*image)) {
-#if 0 /* Enable this if you want to see details */
-			IWL_ERR(priv, "uCode INST section is invalid at "
-				  "offset 0x%x, is 0x%x, s/b 0x%x\n",
-				  i, val, *image);
-#endif
-			rc = -EIO;
-			errcnt++;
-			if (errcnt >= 3)
-				break;
-		}
-	}
-
-	return rc;
-}
-
-
-/**
- * iwl3945_verify_ucode - determine which instruction image is in SRAM,
- *    and verify its contents
- */
-static int iwl3945_verify_ucode(struct iwl_priv *priv)
-{
-	__le32 *image;
-	u32 len;
-	int rc = 0;
-
-	/* Try bootstrap */
-	image = (__le32 *)priv->ucode_boot.v_addr;
-	len = priv->ucode_boot.len;
-	rc = iwl3945_verify_inst_sparse(priv, image, len);
-	if (rc == 0) {
-		IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n");
-		return 0;
-	}
-
-	/* Try initialize */
-	image = (__le32 *)priv->ucode_init.v_addr;
-	len = priv->ucode_init.len;
-	rc = iwl3945_verify_inst_sparse(priv, image, len);
-	if (rc == 0) {
-		IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n");
-		return 0;
-	}
-
-	/* Try runtime/protocol */
-	image = (__le32 *)priv->ucode_code.v_addr;
-	len = priv->ucode_code.len;
-	rc = iwl3945_verify_inst_sparse(priv, image, len);
-	if (rc == 0) {
-		IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n");
-		return 0;
-	}
-
-	IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
-
-	/* Since nothing seems to match, show first several data entries in
-	 * instruction SRAM, so maybe visual inspection will give a clue.
-	 * Selection of bootstrap image (vs. other images) is arbitrary. */
-	image = (__le32 *)priv->ucode_boot.v_addr;
-	len = priv->ucode_boot.len;
-	rc = iwl3945_verify_inst_full(priv, image, len);
-
-	return rc;
-}
-
-static void iwl3945_nic_start(struct iwl_priv *priv)
-{
-	/* Remove all resets to allow NIC to operate */
-	iwl_write32(priv, CSR_RESET, 0);
-}
-
-#define IWL3945_UCODE_GET(item)						\
-static u32 iwl3945_ucode_get_##item(const struct iwl_ucode_header *ucode)\
-{									\
-	return le32_to_cpu(ucode->v1.item);				\
-}
-
-static u32 iwl3945_ucode_get_header_size(u32 api_ver)
-{
-	return 24;
-}
-
-static u8 *iwl3945_ucode_get_data(const struct iwl_ucode_header *ucode)
-{
-	return (u8 *) ucode->v1.data;
-}
-
-IWL3945_UCODE_GET(inst_size);
-IWL3945_UCODE_GET(data_size);
-IWL3945_UCODE_GET(init_size);
-IWL3945_UCODE_GET(init_data_size);
-IWL3945_UCODE_GET(boot_size);
-
-/**
- * iwl3945_read_ucode - Read uCode images from disk file.
- *
- * Copy into buffers for card to fetch via bus-mastering
- */
-static int iwl3945_read_ucode(struct iwl_priv *priv)
-{
-	const struct iwl_ucode_header *ucode;
-	int ret = -EINVAL, index;
-	const struct firmware *ucode_raw;
-	/* firmware file name contains uCode/driver compatibility version */
-	const char *name_pre = priv->cfg->fw_name_pre;
-	const unsigned int api_max = priv->cfg->ucode_api_max;
-	const unsigned int api_min = priv->cfg->ucode_api_min;
-	char buf[25];
-	u8 *src;
-	size_t len;
-	u32 api_ver, inst_size, data_size, init_size, init_data_size, boot_size;
-
-	/* Ask kernel firmware_class module to get the boot firmware off disk.
-	 * request_firmware() is synchronous, file is in memory on return. */
-	for (index = api_max; index >= api_min; index--) {
-		sprintf(buf, "%s%u%s", name_pre, index, ".ucode");
-		ret = request_firmware(&ucode_raw, buf, &priv->pci_dev->dev);
-		if (ret < 0) {
-			IWL_ERR(priv, "%s firmware file req failed: %d\n",
-				  buf, ret);
-			if (ret == -ENOENT)
-				continue;
-			else
-				goto error;
-		} else {
-			if (index < api_max)
-				IWL_ERR(priv, "Loaded firmware %s, "
-					"which is deprecated. "
-					" Please use API v%u instead.\n",
-					  buf, api_max);
-			IWL_DEBUG_INFO(priv, "Got firmware '%s' file "
-				       "(%zd bytes) from disk\n",
-				       buf, ucode_raw->size);
-			break;
-		}
-	}
-
-	if (ret < 0)
-		goto error;
-
-	/* Make sure that we got at least our header! */
-	if (ucode_raw->size <  iwl3945_ucode_get_header_size(1)) {
-		IWL_ERR(priv, "File size way too small!\n");
-		ret = -EINVAL;
-		goto err_release;
-	}
-
-	/* Data from ucode file:  header followed by uCode images */
-	ucode = (struct iwl_ucode_header *)ucode_raw->data;
-
-	priv->ucode_ver = le32_to_cpu(ucode->ver);
-	api_ver = IWL_UCODE_API(priv->ucode_ver);
-	inst_size = iwl3945_ucode_get_inst_size(ucode);
-	data_size = iwl3945_ucode_get_data_size(ucode);
-	init_size = iwl3945_ucode_get_init_size(ucode);
-	init_data_size = iwl3945_ucode_get_init_data_size(ucode);
-	boot_size = iwl3945_ucode_get_boot_size(ucode);
-	src = iwl3945_ucode_get_data(ucode);
-
-	/* api_ver should match the api version forming part of the
-	 * firmware filename ... but we don't check for that and only rely
-	 * on the API version read from firmware header from here on forward */
-
-	if (api_ver < api_min || api_ver > api_max) {
-		IWL_ERR(priv, "Driver unable to support your firmware API. "
-			  "Driver supports v%u, firmware is v%u.\n",
-			  api_max, api_ver);
-		priv->ucode_ver = 0;
-		ret = -EINVAL;
-		goto err_release;
-	}
-	if (api_ver != api_max)
-		IWL_ERR(priv, "Firmware has old API version. Expected %u, "
-			  "got %u. New firmware can be obtained "
-			  "from http://www.intellinuxwireless.org.\n",
-			  api_max, api_ver);
-
-	IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u\n",
-		IWL_UCODE_MAJOR(priv->ucode_ver),
-		IWL_UCODE_MINOR(priv->ucode_ver),
-		IWL_UCODE_API(priv->ucode_ver),
-		IWL_UCODE_SERIAL(priv->ucode_ver));
-
-	snprintf(priv->hw->wiphy->fw_version,
-		 sizeof(priv->hw->wiphy->fw_version),
-		 "%u.%u.%u.%u",
-		 IWL_UCODE_MAJOR(priv->ucode_ver),
-		 IWL_UCODE_MINOR(priv->ucode_ver),
-		 IWL_UCODE_API(priv->ucode_ver),
-		 IWL_UCODE_SERIAL(priv->ucode_ver));
-
-	IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n",
-		       priv->ucode_ver);
-	IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %u\n",
-		       inst_size);
-	IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %u\n",
-		       data_size);
-	IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %u\n",
-		       init_size);
-	IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %u\n",
-		       init_data_size);
-	IWL_DEBUG_INFO(priv, "f/w package hdr boot inst size = %u\n",
-		       boot_size);
-
-
-	/* Verify size of file vs. image size info in file's header */
-	if (ucode_raw->size != iwl3945_ucode_get_header_size(api_ver) +
-		inst_size + data_size + init_size +
-		init_data_size + boot_size) {
-
-		IWL_DEBUG_INFO(priv,
-			"uCode file size %zd does not match expected size\n",
-			ucode_raw->size);
-		ret = -EINVAL;
-		goto err_release;
-	}
-
-	/* Verify that uCode images will fit in card's SRAM */
-	if (inst_size > IWL39_MAX_INST_SIZE) {
-		IWL_DEBUG_INFO(priv, "uCode instr len %d too large to fit in\n",
-			       inst_size);
-		ret = -EINVAL;
-		goto err_release;
-	}
-
-	if (data_size > IWL39_MAX_DATA_SIZE) {
-		IWL_DEBUG_INFO(priv, "uCode data len %d too large to fit in\n",
-			       data_size);
-		ret = -EINVAL;
-		goto err_release;
-	}
-	if (init_size > IWL39_MAX_INST_SIZE) {
-		IWL_DEBUG_INFO(priv,
-				"uCode init instr len %d too large to fit in\n",
-				init_size);
-		ret = -EINVAL;
-		goto err_release;
-	}
-	if (init_data_size > IWL39_MAX_DATA_SIZE) {
-		IWL_DEBUG_INFO(priv,
-				"uCode init data len %d too large to fit in\n",
-				init_data_size);
-		ret = -EINVAL;
-		goto err_release;
-	}
-	if (boot_size > IWL39_MAX_BSM_SIZE) {
-		IWL_DEBUG_INFO(priv,
-				"uCode boot instr len %d too large to fit in\n",
-				boot_size);
-		ret = -EINVAL;
-		goto err_release;
-	}
-
-	/* Allocate ucode buffers for card's bus-master loading ... */
-
-	/* Runtime instructions and 2 copies of data:
-	 * 1) unmodified from disk
-	 * 2) backup cache for save/restore during power-downs */
-	priv->ucode_code.len = inst_size;
-	iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_code);
-
-	priv->ucode_data.len = data_size;
-	iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data);
-
-	priv->ucode_data_backup.len = data_size;
-	iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
-
-	if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr ||
-	    !priv->ucode_data_backup.v_addr)
-		goto err_pci_alloc;
-
-	/* Initialization instructions and data */
-	if (init_size && init_data_size) {
-		priv->ucode_init.len = init_size;
-		iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init);
-
-		priv->ucode_init_data.len = init_data_size;
-		iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data);
-
-		if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr)
-			goto err_pci_alloc;
-	}
-
-	/* Bootstrap (instructions only, no data) */
-	if (boot_size) {
-		priv->ucode_boot.len = boot_size;
-		iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot);
-
-		if (!priv->ucode_boot.v_addr)
-			goto err_pci_alloc;
-	}
-
-	/* Copy images into buffers for card's bus-master reads ... */
-
-	/* Runtime instructions (first block of data in file) */
-	len = inst_size;
-	IWL_DEBUG_INFO(priv,
-		"Copying (but not loading) uCode instr len %zd\n", len);
-	memcpy(priv->ucode_code.v_addr, src, len);
-	src += len;
-
-	IWL_DEBUG_INFO(priv, "uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
-		priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr);
-
-	/* Runtime data (2nd block)
-	 * NOTE:  Copy into backup buffer will be done in iwl3945_up()  */
-	len = data_size;
-	IWL_DEBUG_INFO(priv,
-		"Copying (but not loading) uCode data len %zd\n", len);
-	memcpy(priv->ucode_data.v_addr, src, len);
-	memcpy(priv->ucode_data_backup.v_addr, src, len);
-	src += len;
-
-	/* Initialization instructions (3rd block) */
-	if (init_size) {
-		len = init_size;
-		IWL_DEBUG_INFO(priv,
-			"Copying (but not loading) init instr len %zd\n", len);
-		memcpy(priv->ucode_init.v_addr, src, len);
-		src += len;
-	}
-
-	/* Initialization data (4th block) */
-	if (init_data_size) {
-		len = init_data_size;
-		IWL_DEBUG_INFO(priv,
-			"Copying (but not loading) init data len %zd\n", len);
-		memcpy(priv->ucode_init_data.v_addr, src, len);
-		src += len;
-	}
-
-	/* Bootstrap instructions (5th block) */
-	len = boot_size;
-	IWL_DEBUG_INFO(priv,
-		"Copying (but not loading) boot instr len %zd\n", len);
-	memcpy(priv->ucode_boot.v_addr, src, len);
-
-	/* We have our copies now, allow OS release its copies */
-	release_firmware(ucode_raw);
-	return 0;
-
- err_pci_alloc:
-	IWL_ERR(priv, "failed to allocate pci memory\n");
-	ret = -ENOMEM;
-	iwl3945_dealloc_ucode_pci(priv);
-
- err_release:
-	release_firmware(ucode_raw);
-
- error:
-	return ret;
-}
-
-
-/**
- * iwl3945_set_ucode_ptrs - Set uCode address location
- *
- * Tell initialization uCode where to find runtime uCode.
- *
- * BSM registers initially contain pointers to initialization uCode.
- * We need to replace them to load runtime uCode inst and data,
- * and to save runtime data when powering down.
- */
-static int iwl3945_set_ucode_ptrs(struct iwl_priv *priv)
-{
-	dma_addr_t pinst;
-	dma_addr_t pdata;
-
-	/* bits 31:0 for 3945 */
-	pinst = priv->ucode_code.p_addr;
-	pdata = priv->ucode_data_backup.p_addr;
-
-	/* Tell bootstrap uCode where to find image to load */
-	iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
-	iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
-	iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
-				 priv->ucode_data.len);
-
-	/* Inst byte count must be last to set up, bit 31 signals uCode
-	 *   that all new ptr/size info is in place */
-	iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
-				 priv->ucode_code.len | BSM_DRAM_INST_LOAD);
-
-	IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n");
-
-	return 0;
-}
-
-/**
- * iwl3945_init_alive_start - Called after REPLY_ALIVE notification received
- *
- * Called after REPLY_ALIVE notification received from "initialize" uCode.
- *
- * Tell "initialize" uCode to go ahead and load the runtime uCode.
- */
-static void iwl3945_init_alive_start(struct iwl_priv *priv)
-{
-	/* Check alive response for "valid" sign from uCode */
-	if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
-		/* We had an error bringing up the hardware, so take it
-		 * all the way back down so we can try again */
-		IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n");
-		goto restart;
-	}
-
-	/* Bootstrap uCode has loaded initialize uCode ... verify inst image.
-	 * This is a paranoid check, because we would not have gotten the
-	 * "initialize" alive if code weren't properly loaded.  */
-	if (iwl3945_verify_ucode(priv)) {
-		/* Runtime instruction load was bad;
-		 * take it all the way back down so we can try again */
-		IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n");
-		goto restart;
-	}
-
-	/* Send pointers to protocol/runtime uCode image ... init code will
-	 * load and launch runtime uCode, which will send us another "Alive"
-	 * notification. */
-	IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
-	if (iwl3945_set_ucode_ptrs(priv)) {
-		/* Runtime instruction load won't happen;
-		 * take it all the way back down so we can try again */
-		IWL_DEBUG_INFO(priv, "Couldn't set up uCode pointers.\n");
-		goto restart;
-	}
-	return;
-
- restart:
-	queue_work(priv->workqueue, &priv->restart);
-}
-
-/**
- * iwl3945_alive_start - called after REPLY_ALIVE notification received
- *                   from protocol/runtime uCode (initialization uCode's
- *                   Alive gets handled by iwl3945_init_alive_start()).
- */
-static void iwl3945_alive_start(struct iwl_priv *priv)
-{
-	int thermal_spin = 0;
-	u32 rfkill;
-	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
-	IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
-
-	if (priv->card_alive.is_valid != UCODE_VALID_OK) {
-		/* We had an error bringing up the hardware, so take it
-		 * all the way back down so we can try again */
-		IWL_DEBUG_INFO(priv, "Alive failed.\n");
-		goto restart;
-	}
-
-	/* Initialize uCode has loaded Runtime uCode ... verify inst image.
-	 * This is a paranoid check, because we would not have gotten the
-	 * "runtime" alive if code weren't properly loaded.  */
-	if (iwl3945_verify_ucode(priv)) {
-		/* Runtime instruction load was bad;
-		 * take it all the way back down so we can try again */
-		IWL_DEBUG_INFO(priv, "Bad runtime uCode load.\n");
-		goto restart;
-	}
-
-	rfkill = iwl_legacy_read_prph(priv, APMG_RFKILL_REG);
-	IWL_DEBUG_INFO(priv, "RFKILL status: 0x%x\n", rfkill);
-
-	if (rfkill & 0x1) {
-		clear_bit(STATUS_RF_KILL_HW, &priv->status);
-		/* if RFKILL is not on, then wait for thermal
-		 * sensor in adapter to kick in */
-		while (iwl3945_hw_get_temperature(priv) == 0) {
-			thermal_spin++;
-			udelay(10);
-		}
-
-		if (thermal_spin)
-			IWL_DEBUG_INFO(priv, "Thermal calibration took %dus\n",
-				       thermal_spin * 10);
-	} else
-		set_bit(STATUS_RF_KILL_HW, &priv->status);
-
-	/* After the ALIVE response, we can send commands to 3945 uCode */
-	set_bit(STATUS_ALIVE, &priv->status);
-
-	/* Enable watchdog to monitor the driver tx queues */
-	iwl_legacy_setup_watchdog(priv);
-
-	if (iwl_legacy_is_rfkill(priv))
-		return;
-
-	ieee80211_wake_queues(priv->hw);
-
-	priv->active_rate = IWL_RATES_MASK_3945;
-
-	iwl_legacy_power_update_mode(priv, true);
-
-	if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
-		struct iwl3945_rxon_cmd *active_rxon =
-				(struct iwl3945_rxon_cmd *)(&ctx->active);
-
-		ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
-		active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
-	} else {
-		/* Initialize our rx_config data */
-		iwl_legacy_connection_init_rx_config(priv, ctx);
-	}
-
-	/* Configure Bluetooth device coexistence support */
-	iwl_legacy_send_bt_config(priv);
-
-	set_bit(STATUS_READY, &priv->status);
-
-	/* Configure the adapter for unassociated operation */
-	iwl3945_commit_rxon(priv, ctx);
-
-	iwl3945_reg_txpower_periodic(priv);
-
-	IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
-	wake_up(&priv->wait_command_queue);
-
-	return;
-
- restart:
-	queue_work(priv->workqueue, &priv->restart);
-}
-
-static void iwl3945_cancel_deferred_work(struct iwl_priv *priv);
-
-static void __iwl3945_down(struct iwl_priv *priv)
-{
-	unsigned long flags;
-	int exit_pending;
-
-	IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n");
-
-	iwl_legacy_scan_cancel_timeout(priv, 200);
-
-	exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
-
-	/* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set
-	 * to prevent rearm timer */
-	del_timer_sync(&priv->watchdog);
-
-	/* Station information will now be cleared in device */
-	iwl_legacy_clear_ucode_stations(priv, NULL);
-	iwl_legacy_dealloc_bcast_stations(priv);
-	iwl_legacy_clear_driver_stations(priv);
-
-	/* Unblock any waiting calls */
-	wake_up_all(&priv->wait_command_queue);
-
-	/* Wipe out the EXIT_PENDING status bit if we are not actually
-	 * exiting the module */
-	if (!exit_pending)
-		clear_bit(STATUS_EXIT_PENDING, &priv->status);
-
-	/* stop and reset the on-board processor */
-	iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
-
-	/* tell the device to stop sending interrupts */
-	spin_lock_irqsave(&priv->lock, flags);
-	iwl_legacy_disable_interrupts(priv);
-	spin_unlock_irqrestore(&priv->lock, flags);
-	iwl3945_synchronize_irq(priv);
-
-	if (priv->mac80211_registered)
-		ieee80211_stop_queues(priv->hw);
-
-	/* If we have not previously called iwl3945_init() then
-	 * clear all bits but the RF Kill bits and return */
-	if (!iwl_legacy_is_init(priv)) {
-		priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
-					STATUS_RF_KILL_HW |
-			       test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
-					STATUS_GEO_CONFIGURED |
-				test_bit(STATUS_EXIT_PENDING, &priv->status) <<
-					STATUS_EXIT_PENDING;
-		goto exit;
-	}
-
-	/* ...otherwise clear out all the status bits but the RF Kill
-	 * bit and continue taking the NIC down. */
-	priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
-				STATUS_RF_KILL_HW |
-			test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
-				STATUS_GEO_CONFIGURED |
-			test_bit(STATUS_FW_ERROR, &priv->status) <<
-				STATUS_FW_ERROR |
-			test_bit(STATUS_EXIT_PENDING, &priv->status) <<
-				STATUS_EXIT_PENDING;
-
-	iwl3945_hw_txq_ctx_stop(priv);
-	iwl3945_hw_rxq_stop(priv);
-
-	/* Power-down device's busmaster DMA clocks */
-	iwl_legacy_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
-	udelay(5);
-
-	/* Stop the device, and put it in low power state */
-	iwl_legacy_apm_stop(priv);
-
- exit:
-	memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
-
-	if (priv->beacon_skb)
-		dev_kfree_skb(priv->beacon_skb);
-	priv->beacon_skb = NULL;
-
-	/* clear out any free frames */
-	iwl3945_clear_free_frames(priv);
-}
-
-static void iwl3945_down(struct iwl_priv *priv)
-{
-	mutex_lock(&priv->mutex);
-	__iwl3945_down(priv);
-	mutex_unlock(&priv->mutex);
-
-	iwl3945_cancel_deferred_work(priv);
-}
-
-#define MAX_HW_RESTARTS 5
-
-static int iwl3945_alloc_bcast_station(struct iwl_priv *priv)
-{
-	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-	unsigned long flags;
-	u8 sta_id;
-
-	spin_lock_irqsave(&priv->sta_lock, flags);
-	sta_id = iwl_legacy_prep_station(priv, ctx,
-					iwlegacy_bcast_addr, false, NULL);
-	if (sta_id == IWL_INVALID_STATION) {
-		IWL_ERR(priv, "Unable to prepare broadcast station\n");
-		spin_unlock_irqrestore(&priv->sta_lock, flags);
-
-		return -EINVAL;
-	}
-
-	priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE;
-	priv->stations[sta_id].used |= IWL_STA_BCAST;
-	spin_unlock_irqrestore(&priv->sta_lock, flags);
-
-	return 0;
-}
-
-static int __iwl3945_up(struct iwl_priv *priv)
-{
-	int rc, i;
-
-	rc = iwl3945_alloc_bcast_station(priv);
-	if (rc)
-		return rc;
-
-	if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
-		IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
-		return -EIO;
-	}
-
-	if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
-		IWL_ERR(priv, "ucode not available for device bring up\n");
-		return -EIO;
-	}
-
-	/* If platform's RF_KILL switch is NOT set to KILL */
-	if (iwl_read32(priv, CSR_GP_CNTRL) &
-				CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
-		clear_bit(STATUS_RF_KILL_HW, &priv->status);
-	else {
-		set_bit(STATUS_RF_KILL_HW, &priv->status);
-		IWL_WARN(priv, "Radio disabled by HW RF Kill switch\n");
-		return -ENODEV;
-	}
-
-	iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
-
-	rc = iwl3945_hw_nic_init(priv);
-	if (rc) {
-		IWL_ERR(priv, "Unable to int nic\n");
-		return rc;
-	}
-
-	/* make sure rfkill handshake bits are cleared */
-	iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
-	iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
-		    CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
-
-	/* clear (again), then enable host interrupts */
-	iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
-	iwl_legacy_enable_interrupts(priv);
-
-	/* really make sure rfkill handshake bits are cleared */
-	iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
-	iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
-
-	/* Copy original ucode data image from disk into backup cache.
-	 * This will be used to initialize the on-board processor's
-	 * data SRAM for a clean start when the runtime program first loads. */
-	memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr,
-	       priv->ucode_data.len);
-
-	/* We return success when we resume from suspend and rf_kill is on. */
-	if (test_bit(STATUS_RF_KILL_HW, &priv->status))
-		return 0;
-
-	for (i = 0; i < MAX_HW_RESTARTS; i++) {
-
-		/* load bootstrap state machine,
-		 * load bootstrap program into processor's memory,
-		 * prepare to load the "initialize" uCode */
-		rc = priv->cfg->ops->lib->load_ucode(priv);
-
-		if (rc) {
-			IWL_ERR(priv,
-				"Unable to set up bootstrap uCode: %d\n", rc);
-			continue;
-		}
-
-		/* start card; "initialize" will load runtime ucode */
-		iwl3945_nic_start(priv);
-
-		IWL_DEBUG_INFO(priv, DRV_NAME " is coming up\n");
-
-		return 0;
-	}
-
-	set_bit(STATUS_EXIT_PENDING, &priv->status);
-	__iwl3945_down(priv);
-	clear_bit(STATUS_EXIT_PENDING, &priv->status);
-
-	/* tried to restart and config the device for as long as our
-	 * patience could withstand */
-	IWL_ERR(priv, "Unable to initialize device after %d attempts.\n", i);
-	return -EIO;
-}
-
-
-/*****************************************************************************
- *
- * Workqueue callbacks
- *
- *****************************************************************************/
-
-static void iwl3945_bg_init_alive_start(struct work_struct *data)
-{
-	struct iwl_priv *priv =
-	    container_of(data, struct iwl_priv, init_alive_start.work);
-
-	mutex_lock(&priv->mutex);
-	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-		goto out;
-
-	iwl3945_init_alive_start(priv);
-out:
-	mutex_unlock(&priv->mutex);
-}
-
-static void iwl3945_bg_alive_start(struct work_struct *data)
-{
-	struct iwl_priv *priv =
-	    container_of(data, struct iwl_priv, alive_start.work);
-
-	mutex_lock(&priv->mutex);
-	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-		goto out;
-
-	iwl3945_alive_start(priv);
-out:
-	mutex_unlock(&priv->mutex);
-}
-
-/*
- * 3945 cannot interrupt driver when hardware rf kill switch toggles;
- * driver must poll CSR_GP_CNTRL_REG register for change.  This register
- * *is* readable even when device has been SW_RESET into low power mode
- * (e.g. during RF KILL).
- */
-static void iwl3945_rfkill_poll(struct work_struct *data)
-{
-	struct iwl_priv *priv =
-	    container_of(data, struct iwl_priv, _3945.rfkill_poll.work);
-	bool old_rfkill = test_bit(STATUS_RF_KILL_HW, &priv->status);
-	bool new_rfkill = !(iwl_read32(priv, CSR_GP_CNTRL)
-			& CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
-
-	if (new_rfkill != old_rfkill) {
-		if (new_rfkill)
-			set_bit(STATUS_RF_KILL_HW, &priv->status);
-		else
-			clear_bit(STATUS_RF_KILL_HW, &priv->status);
-
-		wiphy_rfkill_set_hw_state(priv->hw->wiphy, new_rfkill);
-
-		IWL_DEBUG_RF_KILL(priv, "RF_KILL bit toggled to %s.\n",
-				new_rfkill ? "disable radio" : "enable radio");
-	}
-
-	/* Keep this running, even if radio now enabled.  This will be
-	 * cancelled in mac_start() if system decides to start again */
-	queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll,
-			   round_jiffies_relative(2 * HZ));
-
-}
-
-int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
-{
-	struct iwl_host_cmd cmd = {
-		.id = REPLY_SCAN_CMD,
-		.len = sizeof(struct iwl3945_scan_cmd),
-		.flags = CMD_SIZE_HUGE,
-	};
-	struct iwl3945_scan_cmd *scan;
-	u8 n_probes = 0;
-	enum ieee80211_band band;
-	bool is_active = false;
-	int ret;
-	u16 len;
-
-	lockdep_assert_held(&priv->mutex);
-
-	if (!priv->scan_cmd) {
-		priv->scan_cmd = kmalloc(sizeof(struct iwl3945_scan_cmd) +
-					 IWL_MAX_SCAN_SIZE, GFP_KERNEL);
-		if (!priv->scan_cmd) {
-			IWL_DEBUG_SCAN(priv, "Fail to allocate scan memory\n");
-			return -ENOMEM;
-		}
-	}
-	scan = priv->scan_cmd;
-	memset(scan, 0, sizeof(struct iwl3945_scan_cmd) + IWL_MAX_SCAN_SIZE);
-
-	scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
-	scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
-
-	if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
-		u16 interval;
-		u32 extra;
-		u32 suspend_time = 100;
-		u32 scan_suspend_time = 100;
-
-		IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
-
-		interval = vif->bss_conf.beacon_int;
-
-		scan->suspend_time = 0;
-		scan->max_out_time = cpu_to_le32(200 * 1024);
-		if (!interval)
-			interval = suspend_time;
-		/*
-		 * suspend time format:
-		 *  0-19: beacon interval in usec (time before exec.)
-		 * 20-23: 0
-		 * 24-31: number of beacons (suspend between channels)
-		 */
-
-		extra = (suspend_time / interval) << 24;
-		scan_suspend_time = 0xFF0FFFFF &
-		    (extra | ((suspend_time % interval) * 1024));
-
-		scan->suspend_time = cpu_to_le32(scan_suspend_time);
-		IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
-			       scan_suspend_time, interval);
-	}
-
-	if (priv->scan_request->n_ssids) {
-		int i, p = 0;
-		IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
-		for (i = 0; i < priv->scan_request->n_ssids; i++) {
-			/* always does wildcard anyway */
-			if (!priv->scan_request->ssids[i].ssid_len)
-				continue;
-			scan->direct_scan[p].id = WLAN_EID_SSID;
-			scan->direct_scan[p].len =
-				priv->scan_request->ssids[i].ssid_len;
-			memcpy(scan->direct_scan[p].ssid,
-			       priv->scan_request->ssids[i].ssid,
-			       priv->scan_request->ssids[i].ssid_len);
-			n_probes++;
-			p++;
-		}
-		is_active = true;
-	} else
-		IWL_DEBUG_SCAN(priv, "Kicking off passive scan.\n");
-
-	/* We don't build a direct scan probe request; the uCode will do
-	 * that based on the direct_mask added to each channel entry */
-	scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
-	scan->tx_cmd.sta_id = priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id;
-	scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
-
-	/* flags + rate selection */
-
-	switch (priv->scan_band) {
-	case IEEE80211_BAND_2GHZ:
-		scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
-		scan->tx_cmd.rate = IWL_RATE_1M_PLCP;
-		band = IEEE80211_BAND_2GHZ;
-		break;
-	case IEEE80211_BAND_5GHZ:
-		scan->tx_cmd.rate = IWL_RATE_6M_PLCP;
-		band = IEEE80211_BAND_5GHZ;
-		break;
-	default:
-		IWL_WARN(priv, "Invalid scan band\n");
-		return -EIO;
-	}
-
-	/*
-	 * If active scaning is requested but a certain channel
-	 * is marked passive, we can do active scanning if we
-	 * detect transmissions.
-	 */
-	scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
-					IWL_GOOD_CRC_TH_DISABLED;
-
-	len = iwl_legacy_fill_probe_req(priv, (struct ieee80211_mgmt *)scan->data,
-					vif->addr, priv->scan_request->ie,
-					priv->scan_request->ie_len,
-					IWL_MAX_SCAN_SIZE - sizeof(*scan));
-	scan->tx_cmd.len = cpu_to_le16(len);
-
-	/* select Rx antennas */
-	scan->flags |= iwl3945_get_antenna_flags(priv);
-
-	scan->channel_count = iwl3945_get_channels_for_scan(priv, band, is_active, n_probes,
-							    (void *)&scan->data[len], vif);
-	if (scan->channel_count == 0) {
-		IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
-		return -EIO;
-	}
-
-	cmd.len += le16_to_cpu(scan->tx_cmd.len) +
-	    scan->channel_count * sizeof(struct iwl3945_scan_channel);
-	cmd.data = scan;
-	scan->len = cpu_to_le16(cmd.len);
-
-	set_bit(STATUS_SCAN_HW, &priv->status);
-	ret = iwl_legacy_send_cmd_sync(priv, &cmd);
-	if (ret)
-		clear_bit(STATUS_SCAN_HW, &priv->status);
-	return ret;
-}
-
-void iwl3945_post_scan(struct iwl_priv *priv)
-{
-	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
-	/*
-	 * Since setting the RXON may have been deferred while
-	 * performing the scan, fire one off if needed
-	 */
-	if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
-		iwl3945_commit_rxon(priv, ctx);
-}
-
-static void iwl3945_bg_restart(struct work_struct *data)
-{
-	struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
-
-	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-		return;
-
-	if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) {
-		struct iwl_rxon_context *ctx;
-		mutex_lock(&priv->mutex);
-		for_each_context(priv, ctx)
-			ctx->vif = NULL;
-		priv->is_open = 0;
-		mutex_unlock(&priv->mutex);
-		iwl3945_down(priv);
-		ieee80211_restart_hw(priv->hw);
-	} else {
-		iwl3945_down(priv);
-
-		mutex_lock(&priv->mutex);
-		if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
-			mutex_unlock(&priv->mutex);
-			return;
-		}
-
-		__iwl3945_up(priv);
-		mutex_unlock(&priv->mutex);
-	}
-}
-
-static void iwl3945_bg_rx_replenish(struct work_struct *data)
-{
-	struct iwl_priv *priv =
-	    container_of(data, struct iwl_priv, rx_replenish);
-
-	mutex_lock(&priv->mutex);
-	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-		goto out;
-
-	iwl3945_rx_replenish(priv);
-out:
-	mutex_unlock(&priv->mutex);
-}
-
-void iwl3945_post_associate(struct iwl_priv *priv)
-{
-	int rc = 0;
-	struct ieee80211_conf *conf = NULL;
-	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
-	if (!ctx->vif || !priv->is_open)
-		return;
-
-	IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
-			ctx->vif->bss_conf.aid, ctx->active.bssid_addr);
-
-	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-		return;
-
-	iwl_legacy_scan_cancel_timeout(priv, 200);
-
-	conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw);
-
-	ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
-	iwl3945_commit_rxon(priv, ctx);
-
-	rc = iwl_legacy_send_rxon_timing(priv, ctx);
-	if (rc)
-		IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
-			    "Attempting to continue.\n");
-
-	ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
-
-	ctx->staging.assoc_id = cpu_to_le16(ctx->vif->bss_conf.aid);
-
-	IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n",
-			ctx->vif->bss_conf.aid, ctx->vif->bss_conf.beacon_int);
-
-	if (ctx->vif->bss_conf.use_short_preamble)
-		ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
-	else
-		ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
-
-	if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
-		if (ctx->vif->bss_conf.use_short_slot)
-			ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
-		else
-			ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
-	}
-
-	iwl3945_commit_rxon(priv, ctx);
-
-	switch (ctx->vif->type) {
-	case NL80211_IFTYPE_STATION:
-		iwl3945_rate_scale_init(priv->hw, IWL_AP_ID);
-		break;
-	case NL80211_IFTYPE_ADHOC:
-		iwl3945_send_beacon_cmd(priv);
-		break;
-	default:
-		IWL_ERR(priv, "%s Should not be called in %d mode\n",
-			__func__, ctx->vif->type);
-		break;
-	}
-}
-
-/*****************************************************************************
- *
- * mac80211 entry point functions
- *
- *****************************************************************************/
-
-#define UCODE_READY_TIMEOUT	(2 * HZ)
-
-static int iwl3945_mac_start(struct ieee80211_hw *hw)
-{
-	struct iwl_priv *priv = hw->priv;
-	int ret;
-
-	IWL_DEBUG_MAC80211(priv, "enter\n");
-
-	/* we should be verifying the device is ready to be opened */
-	mutex_lock(&priv->mutex);
-
-	/* fetch ucode file from disk, alloc and copy to bus-master buffers ...
-	 * ucode filename and max sizes are card-specific. */
-
-	if (!priv->ucode_code.len) {
-		ret = iwl3945_read_ucode(priv);
-		if (ret) {
-			IWL_ERR(priv, "Could not read microcode: %d\n", ret);
-			mutex_unlock(&priv->mutex);
-			goto out_release_irq;
-		}
-	}
-
-	ret = __iwl3945_up(priv);
-
-	mutex_unlock(&priv->mutex);
-
-	if (ret)
-		goto out_release_irq;
-
-	IWL_DEBUG_INFO(priv, "Start UP work.\n");
-
-	/* Wait for START_ALIVE from ucode. Otherwise callbacks from
-	 * mac80211 will not be run successfully. */
-	ret = wait_event_timeout(priv->wait_command_queue,
-			test_bit(STATUS_READY, &priv->status),
-			UCODE_READY_TIMEOUT);
-	if (!ret) {
-		if (!test_bit(STATUS_READY, &priv->status)) {
-			IWL_ERR(priv,
-				"Wait for START_ALIVE timeout after %dms.\n",
-				jiffies_to_msecs(UCODE_READY_TIMEOUT));
-			ret = -ETIMEDOUT;
-			goto out_release_irq;
-		}
-	}
-
-	/* ucode is running and will send rfkill notifications,
-	 * no need to poll the killswitch state anymore */
-	cancel_delayed_work(&priv->_3945.rfkill_poll);
-
-	priv->is_open = 1;
-	IWL_DEBUG_MAC80211(priv, "leave\n");
-	return 0;
-
-out_release_irq:
-	priv->is_open = 0;
-	IWL_DEBUG_MAC80211(priv, "leave - failed\n");
-	return ret;
-}
-
-static void iwl3945_mac_stop(struct ieee80211_hw *hw)
-{
-	struct iwl_priv *priv = hw->priv;
-
-	IWL_DEBUG_MAC80211(priv, "enter\n");
-
-	if (!priv->is_open) {
-		IWL_DEBUG_MAC80211(priv, "leave - skip\n");
-		return;
-	}
-
-	priv->is_open = 0;
-
-	iwl3945_down(priv);
-
-	flush_workqueue(priv->workqueue);
-
-	/* start polling the killswitch state again */
-	queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll,
-			   round_jiffies_relative(2 * HZ));
-
-	IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-
-static void iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
-{
-	struct iwl_priv *priv = hw->priv;
-
-	IWL_DEBUG_MAC80211(priv, "enter\n");
-
-	IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
-		     ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
-
-	if (iwl3945_tx_skb(priv, skb))
-		dev_kfree_skb_any(skb);
-
-	IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-
-void iwl3945_config_ap(struct iwl_priv *priv)
-{
-	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-	struct ieee80211_vif *vif = ctx->vif;
-	int rc = 0;
-
-	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-		return;
-
-	/* The following should be done only at AP bring up */
-	if (!(iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS))) {
-
-		/* RXON - unassoc (to set timing command) */
-		ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
-		iwl3945_commit_rxon(priv, ctx);
-
-		/* RXON Timing */
-		rc = iwl_legacy_send_rxon_timing(priv, ctx);
-		if (rc)
-			IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
-					"Attempting to continue.\n");
-
-		ctx->staging.assoc_id = 0;
-
-		if (vif->bss_conf.use_short_preamble)
-			ctx->staging.flags |=
-				RXON_FLG_SHORT_PREAMBLE_MSK;
-		else
-			ctx->staging.flags &=
-				~RXON_FLG_SHORT_PREAMBLE_MSK;
-
-		if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
-			if (vif->bss_conf.use_short_slot)
-				ctx->staging.flags |=
-					RXON_FLG_SHORT_SLOT_MSK;
-			else
-				ctx->staging.flags &=
-					~RXON_FLG_SHORT_SLOT_MSK;
-		}
-		/* restore RXON assoc */
-		ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
-		iwl3945_commit_rxon(priv, ctx);
-	}
-	iwl3945_send_beacon_cmd(priv);
-}
-
-static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
-			       struct ieee80211_vif *vif,
-			       struct ieee80211_sta *sta,
-			       struct ieee80211_key_conf *key)
-{
-	struct iwl_priv *priv = hw->priv;
-	int ret = 0;
-	u8 sta_id = IWL_INVALID_STATION;
-	u8 static_key;
-
-	IWL_DEBUG_MAC80211(priv, "enter\n");
-
-	if (iwl3945_mod_params.sw_crypto) {
-		IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n");
-		return -EOPNOTSUPP;
-	}
-
-	/*
-	 * To support IBSS RSN, don't program group keys in IBSS, the
-	 * hardware will then not attempt to decrypt the frames.
-	 */
-	if (vif->type == NL80211_IFTYPE_ADHOC &&
-	    !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
-		return -EOPNOTSUPP;
-
-	static_key = !iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS);
-
-	if (!static_key) {
-		sta_id = iwl_legacy_sta_id_or_broadcast(
-				priv, &priv->contexts[IWL_RXON_CTX_BSS], sta);
-		if (sta_id == IWL_INVALID_STATION)
-			return -EINVAL;
-	}
-
-	mutex_lock(&priv->mutex);
-	iwl_legacy_scan_cancel_timeout(priv, 100);
-
-	switch (cmd) {
-	case SET_KEY:
-		if (static_key)
-			ret = iwl3945_set_static_key(priv, key);
-		else
-			ret = iwl3945_set_dynamic_key(priv, key, sta_id);
-		IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n");
-		break;
-	case DISABLE_KEY:
-		if (static_key)
-			ret = iwl3945_remove_static_key(priv);
-		else
-			ret = iwl3945_clear_sta_key_info(priv, sta_id);
-		IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n");
-		break;
-	default:
-		ret = -EINVAL;
-	}
-
-	mutex_unlock(&priv->mutex);
-	IWL_DEBUG_MAC80211(priv, "leave\n");
-
-	return ret;
-}
-
-static int iwl3945_mac_sta_add(struct ieee80211_hw *hw,
-			       struct ieee80211_vif *vif,
-			       struct ieee80211_sta *sta)
-{
-	struct iwl_priv *priv = hw->priv;
-	struct iwl3945_sta_priv *sta_priv = (void *)sta->drv_priv;
-	int ret;
-	bool is_ap = vif->type == NL80211_IFTYPE_STATION;
-	u8 sta_id;
-
-	IWL_DEBUG_INFO(priv, "received request to add station %pM\n",
-			sta->addr);
-	mutex_lock(&priv->mutex);
-	IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n",
-			sta->addr);
-	sta_priv->common.sta_id = IWL_INVALID_STATION;
-
-
-	ret = iwl_legacy_add_station_common(priv,
-				&priv->contexts[IWL_RXON_CTX_BSS],
-				     sta->addr, is_ap, sta, &sta_id);
-	if (ret) {
-		IWL_ERR(priv, "Unable to add station %pM (%d)\n",
-			sta->addr, ret);
-		/* Should we return success if return code is EEXIST ? */
-		mutex_unlock(&priv->mutex);
-		return ret;
-	}
-
-	sta_priv->common.sta_id = sta_id;
-
-	/* Initialize rate scaling */
-	IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
-		       sta->addr);
-	iwl3945_rs_rate_init(priv, sta, sta_id);
-	mutex_unlock(&priv->mutex);
-
-	return 0;
-}
-
-static void iwl3945_configure_filter(struct ieee80211_hw *hw,
-				     unsigned int changed_flags,
-				     unsigned int *total_flags,
-				     u64 multicast)
-{
-	struct iwl_priv *priv = hw->priv;
-	__le32 filter_or = 0, filter_nand = 0;
-	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
-#define CHK(test, flag)	do { \
-	if (*total_flags & (test))		\
-		filter_or |= (flag);		\
-	else					\
-		filter_nand |= (flag);		\
-	} while (0)
-
-	IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
-			changed_flags, *total_flags);
-
-	CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
-	CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK);
-	CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
-
-#undef CHK
-
-	mutex_lock(&priv->mutex);
-
-	ctx->staging.filter_flags &= ~filter_nand;
-	ctx->staging.filter_flags |= filter_or;
-
-	/*
-	 * Not committing directly because hardware can perform a scan,
-	 * but even if hw is ready, committing here breaks for some reason,
-	 * we'll eventually commit the filter flags change anyway.
-	 */
-
-	mutex_unlock(&priv->mutex);
-
-	/*
-	 * Receiving all multicast frames is always enabled by the
-	 * default flags setup in iwl_legacy_connection_init_rx_config()
-	 * since we currently do not support programming multicast
-	 * filters into the device.
-	 */
-	*total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
-			FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
-}
-
-
-/*****************************************************************************
- *
- * sysfs attributes
- *
- *****************************************************************************/
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-
-/*
- * The following adds a new attribute to the sysfs representation
- * of this device driver (i.e. a new file in /sys/bus/pci/drivers/iwl/)
- * used for controlling the debug level.
- *
- * See the level definitions in iwl for details.
- *
- * The debug_level being managed using sysfs below is a per device debug
- * level that is used instead of the global debug level if it (the per
- * device debug level) is set.
- */
-static ssize_t iwl3945_show_debug_level(struct device *d,
-				struct device_attribute *attr, char *buf)
-{
-	struct iwl_priv *priv = dev_get_drvdata(d);
-	return sprintf(buf, "0x%08X\n", iwl_legacy_get_debug_level(priv));
-}
-static ssize_t iwl3945_store_debug_level(struct device *d,
-				struct device_attribute *attr,
-				 const char *buf, size_t count)
-{
-	struct iwl_priv *priv = dev_get_drvdata(d);
-	unsigned long val;
-	int ret;
-
-	ret = strict_strtoul(buf, 0, &val);
-	if (ret)
-		IWL_INFO(priv, "%s is not in hex or decimal form.\n", buf);
-	else {
-		priv->debug_level = val;
-		if (iwl_legacy_alloc_traffic_mem(priv))
-			IWL_ERR(priv,
-				"Not enough memory to generate traffic log\n");
-	}
-	return strnlen(buf, count);
-}
-
-static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
-			iwl3945_show_debug_level, iwl3945_store_debug_level);
-
-#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
-
-static ssize_t iwl3945_show_temperature(struct device *d,
-				struct device_attribute *attr, char *buf)
-{
-	struct iwl_priv *priv = dev_get_drvdata(d);
-
-	if (!iwl_legacy_is_alive(priv))
-		return -EAGAIN;
-
-	return sprintf(buf, "%d\n", iwl3945_hw_get_temperature(priv));
-}
-
-static DEVICE_ATTR(temperature, S_IRUGO, iwl3945_show_temperature, NULL);
-
-static ssize_t iwl3945_show_tx_power(struct device *d,
-			     struct device_attribute *attr, char *buf)
-{
-	struct iwl_priv *priv = dev_get_drvdata(d);
-	return sprintf(buf, "%d\n", priv->tx_power_user_lmt);
-}
-
-static ssize_t iwl3945_store_tx_power(struct device *d,
-			      struct device_attribute *attr,
-			      const char *buf, size_t count)
-{
-	struct iwl_priv *priv = dev_get_drvdata(d);
-	char *p = (char *)buf;
-	u32 val;
-
-	val = simple_strtoul(p, &p, 10);
-	if (p == buf)
-		IWL_INFO(priv, ": %s is not in decimal form.\n", buf);
-	else
-		iwl3945_hw_reg_set_txpower(priv, val);
-
-	return count;
-}
-
-static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, iwl3945_show_tx_power, iwl3945_store_tx_power);
-
-static ssize_t iwl3945_show_flags(struct device *d,
-			  struct device_attribute *attr, char *buf)
-{
-	struct iwl_priv *priv = dev_get_drvdata(d);
-	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
-	return sprintf(buf, "0x%04X\n", ctx->active.flags);
-}
-
-static ssize_t iwl3945_store_flags(struct device *d,
-			   struct device_attribute *attr,
-			   const char *buf, size_t count)
-{
-	struct iwl_priv *priv = dev_get_drvdata(d);
-	u32 flags = simple_strtoul(buf, NULL, 0);
-	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
-	mutex_lock(&priv->mutex);
-	if (le32_to_cpu(ctx->staging.flags) != flags) {
-		/* Cancel any currently running scans... */
-		if (iwl_legacy_scan_cancel_timeout(priv, 100))
-			IWL_WARN(priv, "Could not cancel scan.\n");
-		else {
-			IWL_DEBUG_INFO(priv, "Committing rxon.flags = 0x%04X\n",
-				       flags);
-			ctx->staging.flags = cpu_to_le32(flags);
-			iwl3945_commit_rxon(priv, ctx);
-		}
-	}
-	mutex_unlock(&priv->mutex);
-
-	return count;
-}
-
-static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, iwl3945_show_flags, iwl3945_store_flags);
-
-static ssize_t iwl3945_show_filter_flags(struct device *d,
-				 struct device_attribute *attr, char *buf)
-{
-	struct iwl_priv *priv = dev_get_drvdata(d);
-	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
-	return sprintf(buf, "0x%04X\n",
-		le32_to_cpu(ctx->active.filter_flags));
-}
-
-static ssize_t iwl3945_store_filter_flags(struct device *d,
-				  struct device_attribute *attr,
-				  const char *buf, size_t count)
-{
-	struct iwl_priv *priv = dev_get_drvdata(d);
-	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-	u32 filter_flags = simple_strtoul(buf, NULL, 0);
-
-	mutex_lock(&priv->mutex);
-	if (le32_to_cpu(ctx->staging.filter_flags) != filter_flags) {
-		/* Cancel any currently running scans... */
-		if (iwl_legacy_scan_cancel_timeout(priv, 100))
-			IWL_WARN(priv, "Could not cancel scan.\n");
-		else {
-			IWL_DEBUG_INFO(priv, "Committing rxon.filter_flags = "
-				       "0x%04X\n", filter_flags);
-			ctx->staging.filter_flags =
-				cpu_to_le32(filter_flags);
-			iwl3945_commit_rxon(priv, ctx);
-		}
-	}
-	mutex_unlock(&priv->mutex);
-
-	return count;
-}
-
-static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, iwl3945_show_filter_flags,
-		   iwl3945_store_filter_flags);
-
-static ssize_t iwl3945_show_measurement(struct device *d,
-				struct device_attribute *attr, char *buf)
-{
-	struct iwl_priv *priv = dev_get_drvdata(d);
-	struct iwl_spectrum_notification measure_report;
-	u32 size = sizeof(measure_report), len = 0, ofs = 0;
-	u8 *data = (u8 *)&measure_report;
-	unsigned long flags;
-
-	spin_lock_irqsave(&priv->lock, flags);
-	if (!(priv->measurement_status & MEASUREMENT_READY)) {
-		spin_unlock_irqrestore(&priv->lock, flags);
-		return 0;
-	}
-	memcpy(&measure_report, &priv->measure_report, size);
-	priv->measurement_status = 0;
-	spin_unlock_irqrestore(&priv->lock, flags);
-
-	while (size && (PAGE_SIZE - len)) {
-		hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
-				   PAGE_SIZE - len, 1);
-		len = strlen(buf);
-		if (PAGE_SIZE - len)
-			buf[len++] = '\n';
-
-		ofs += 16;
-		size -= min(size, 16U);
-	}
-
-	return len;
-}
-
-static ssize_t iwl3945_store_measurement(struct device *d,
-				 struct device_attribute *attr,
-				 const char *buf, size_t count)
-{
-	struct iwl_priv *priv = dev_get_drvdata(d);
-	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-	struct ieee80211_measurement_params params = {
-		.channel = le16_to_cpu(ctx->active.channel),
-		.start_time = cpu_to_le64(priv->_3945.last_tsf),
-		.duration = cpu_to_le16(1),
-	};
-	u8 type = IWL_MEASURE_BASIC;
-	u8 buffer[32];
-	u8 channel;
-
-	if (count) {
-		char *p = buffer;
-		strncpy(buffer, buf, min(sizeof(buffer), count));
-		channel = simple_strtoul(p, NULL, 0);
-		if (channel)
-			params.channel = channel;
-
-		p = buffer;
-		while (*p && *p != ' ')
-			p++;
-		if (*p)
-			type = simple_strtoul(p + 1, NULL, 0);
-	}
-
-	IWL_DEBUG_INFO(priv, "Invoking measurement of type %d on "
-		       "channel %d (for '%s')\n", type, params.channel, buf);
-	iwl3945_get_measurement(priv, &params, type);
-
-	return count;
-}
-
-static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR,
-		   iwl3945_show_measurement, iwl3945_store_measurement);
-
-static ssize_t iwl3945_store_retry_rate(struct device *d,
-				struct device_attribute *attr,
-				const char *buf, size_t count)
-{
-	struct iwl_priv *priv = dev_get_drvdata(d);
-
-	priv->retry_rate = simple_strtoul(buf, NULL, 0);
-	if (priv->retry_rate <= 0)
-		priv->retry_rate = 1;
-
-	return count;
-}
-
-static ssize_t iwl3945_show_retry_rate(struct device *d,
-			       struct device_attribute *attr, char *buf)
-{
-	struct iwl_priv *priv = dev_get_drvdata(d);
-	return sprintf(buf, "%d", priv->retry_rate);
-}
-
-static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, iwl3945_show_retry_rate,
-		   iwl3945_store_retry_rate);
-
-
-static ssize_t iwl3945_show_channels(struct device *d,
-			     struct device_attribute *attr, char *buf)
-{
-	/* all this shit doesn't belong into sysfs anyway */
-	return 0;
-}
-
-static DEVICE_ATTR(channels, S_IRUSR, iwl3945_show_channels, NULL);
-
-static ssize_t iwl3945_show_antenna(struct device *d,
-			    struct device_attribute *attr, char *buf)
-{
-	struct iwl_priv *priv = dev_get_drvdata(d);
-
-	if (!iwl_legacy_is_alive(priv))
-		return -EAGAIN;
-
-	return sprintf(buf, "%d\n", iwl3945_mod_params.antenna);
-}
-
-static ssize_t iwl3945_store_antenna(struct device *d,
-			     struct device_attribute *attr,
-			     const char *buf, size_t count)
-{
-	struct iwl_priv *priv __maybe_unused = dev_get_drvdata(d);
-	int ant;
-
-	if (count == 0)
-		return 0;
-
-	if (sscanf(buf, "%1i", &ant) != 1) {
-		IWL_DEBUG_INFO(priv, "not in hex or decimal form.\n");
-		return count;
-	}
-
-	if ((ant >= 0) && (ant <= 2)) {
-		IWL_DEBUG_INFO(priv, "Setting antenna select to %d.\n", ant);
-		iwl3945_mod_params.antenna = (enum iwl3945_antenna)ant;
-	} else
-		IWL_DEBUG_INFO(priv, "Bad antenna select value %d.\n", ant);
-
-
-	return count;
-}
-
-static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, iwl3945_show_antenna, iwl3945_store_antenna);
-
-static ssize_t iwl3945_show_status(struct device *d,
-			   struct device_attribute *attr, char *buf)
-{
-	struct iwl_priv *priv = dev_get_drvdata(d);
-	if (!iwl_legacy_is_alive(priv))
-		return -EAGAIN;
-	return sprintf(buf, "0x%08x\n", (int)priv->status);
-}
-
-static DEVICE_ATTR(status, S_IRUGO, iwl3945_show_status, NULL);
-
-static ssize_t iwl3945_dump_error_log(struct device *d,
-			      struct device_attribute *attr,
-			      const char *buf, size_t count)
-{
-	struct iwl_priv *priv = dev_get_drvdata(d);
-	char *p = (char *)buf;
-
-	if (p[0] == '1')
-		iwl3945_dump_nic_error_log(priv);
-
-	return strnlen(buf, count);
-}
-
-static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, iwl3945_dump_error_log);
-
-/*****************************************************************************
- *
- * driver setup and tear down
- *
- *****************************************************************************/
-
-static void iwl3945_setup_deferred_work(struct iwl_priv *priv)
-{
-	priv->workqueue = create_singlethread_workqueue(DRV_NAME);
-
-	init_waitqueue_head(&priv->wait_command_queue);
-
-	INIT_WORK(&priv->restart, iwl3945_bg_restart);
-	INIT_WORK(&priv->rx_replenish, iwl3945_bg_rx_replenish);
-	INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start);
-	INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start);
-	INIT_DELAYED_WORK(&priv->_3945.rfkill_poll, iwl3945_rfkill_poll);
-
-	iwl_legacy_setup_scan_deferred_work(priv);
-
-	iwl3945_hw_setup_deferred_work(priv);
-
-	init_timer(&priv->watchdog);
-	priv->watchdog.data = (unsigned long)priv;
-	priv->watchdog.function = iwl_legacy_bg_watchdog;
-
-	tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
-		     iwl3945_irq_tasklet, (unsigned long)priv);
-}
-
-static void iwl3945_cancel_deferred_work(struct iwl_priv *priv)
-{
-	iwl3945_hw_cancel_deferred_work(priv);
-
-	cancel_delayed_work_sync(&priv->init_alive_start);
-	cancel_delayed_work(&priv->alive_start);
-
-	iwl_legacy_cancel_scan_deferred_work(priv);
-}
-
-static struct attribute *iwl3945_sysfs_entries[] = {
-	&dev_attr_antenna.attr,
-	&dev_attr_channels.attr,
-	&dev_attr_dump_errors.attr,
-	&dev_attr_flags.attr,
-	&dev_attr_filter_flags.attr,
-	&dev_attr_measurement.attr,
-	&dev_attr_retry_rate.attr,
-	&dev_attr_status.attr,
-	&dev_attr_temperature.attr,
-	&dev_attr_tx_power.attr,
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-	&dev_attr_debug_level.attr,
-#endif
-	NULL
-};
-
-static struct attribute_group iwl3945_attribute_group = {
-	.name = NULL,		/* put in device directory */
-	.attrs = iwl3945_sysfs_entries,
-};
-
-struct ieee80211_ops iwl3945_hw_ops = {
-	.tx = iwl3945_mac_tx,
-	.start = iwl3945_mac_start,
-	.stop = iwl3945_mac_stop,
-	.add_interface = iwl_legacy_mac_add_interface,
-	.remove_interface = iwl_legacy_mac_remove_interface,
-	.change_interface = iwl_legacy_mac_change_interface,
-	.config = iwl_legacy_mac_config,
-	.configure_filter = iwl3945_configure_filter,
-	.set_key = iwl3945_mac_set_key,
-	.conf_tx = iwl_legacy_mac_conf_tx,
-	.reset_tsf = iwl_legacy_mac_reset_tsf,
-	.bss_info_changed = iwl_legacy_mac_bss_info_changed,
-	.hw_scan = iwl_legacy_mac_hw_scan,
-	.sta_add = iwl3945_mac_sta_add,
-	.sta_remove = iwl_legacy_mac_sta_remove,
-	.tx_last_beacon = iwl_legacy_mac_tx_last_beacon,
-};
-
-static int iwl3945_init_drv(struct iwl_priv *priv)
-{
-	int ret;
-	struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
-
-	priv->retry_rate = 1;
-	priv->beacon_skb = NULL;
-
-	spin_lock_init(&priv->sta_lock);
-	spin_lock_init(&priv->hcmd_lock);
-
-	INIT_LIST_HEAD(&priv->free_frames);
-
-	mutex_init(&priv->mutex);
-
-	priv->ieee_channels = NULL;
-	priv->ieee_rates = NULL;
-	priv->band = IEEE80211_BAND_2GHZ;
-
-	priv->iw_mode = NL80211_IFTYPE_STATION;
-	priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
-
-	/* initialize force reset */
-	priv->force_reset.reset_duration = IWL_DELAY_NEXT_FORCE_FW_RELOAD;
-
-	if (eeprom->version < EEPROM_3945_EEPROM_VERSION) {
-		IWL_WARN(priv, "Unsupported EEPROM version: 0x%04X\n",
-			 eeprom->version);
-		ret = -EINVAL;
-		goto err;
-	}
-	ret = iwl_legacy_init_channel_map(priv);
-	if (ret) {
-		IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
-		goto err;
-	}
-
-	/* Set up txpower settings in driver for all channels */
-	if (iwl3945_txpower_set_from_eeprom(priv)) {
-		ret = -EIO;
-		goto err_free_channel_map;
-	}
-
-	ret = iwl_legacy_init_geos(priv);
-	if (ret) {
-		IWL_ERR(priv, "initializing geos failed: %d\n", ret);
-		goto err_free_channel_map;
-	}
-	iwl3945_init_hw_rates(priv, priv->ieee_rates);
-
-	return 0;
-
-err_free_channel_map:
-	iwl_legacy_free_channel_map(priv);
-err:
-	return ret;
-}
-
-#define IWL3945_MAX_PROBE_REQUEST	200
-
-static int iwl3945_setup_mac(struct iwl_priv *priv)
-{
-	int ret;
-	struct ieee80211_hw *hw = priv->hw;
-
-	hw->rate_control_algorithm = "iwl-3945-rs";
-	hw->sta_data_size = sizeof(struct iwl3945_sta_priv);
-	hw->vif_data_size = sizeof(struct iwl_vif_priv);
-
-	/* Tell mac80211 our characteristics */
-	hw->flags = IEEE80211_HW_SIGNAL_DBM |
-		    IEEE80211_HW_SPECTRUM_MGMT;
-
-	hw->wiphy->interface_modes =
-		priv->contexts[IWL_RXON_CTX_BSS].interface_modes;
-
-	hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
-			    WIPHY_FLAG_DISABLE_BEACON_HINTS |
-			    WIPHY_FLAG_IBSS_RSN;
-
-	hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945;
-	/* we create the 802.11 header and a zero-length SSID element */
-	hw->wiphy->max_scan_ie_len = IWL3945_MAX_PROBE_REQUEST - 24 - 2;
-
-	/* Default value; 4 EDCA QOS priorities */
-	hw->queues = 4;
-
-	if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
-		priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
-			&priv->bands[IEEE80211_BAND_2GHZ];
-
-	if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
-		priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
-			&priv->bands[IEEE80211_BAND_5GHZ];
-
-	iwl_legacy_leds_init(priv);
-
-	ret = ieee80211_register_hw(priv->hw);
-	if (ret) {
-		IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
-		return ret;
-	}
-	priv->mac80211_registered = 1;
-
-	return 0;
-}
-
-static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
-	int err = 0, i;
-	struct iwl_priv *priv;
-	struct ieee80211_hw *hw;
-	struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
-	struct iwl3945_eeprom *eeprom;
-	unsigned long flags;
-
-	/***********************
-	 * 1. Allocating HW data
-	 * ********************/
-
-	/* mac80211 allocates memory for this device instance, including
-	 *   space for this driver's private structure */
-	hw = iwl_legacy_alloc_all(cfg);
-	if (hw == NULL) {
-		pr_err("Can not allocate network device\n");
-		err = -ENOMEM;
-		goto out;
-	}
-	priv = hw->priv;
-	SET_IEEE80211_DEV(hw, &pdev->dev);
-
-	priv->cmd_queue = IWL39_CMD_QUEUE_NUM;
-
-	/* 3945 has only one valid context */
-	priv->valid_contexts = BIT(IWL_RXON_CTX_BSS);
-
-	for (i = 0; i < NUM_IWL_RXON_CTX; i++)
-		priv->contexts[i].ctxid = i;
-
-	priv->contexts[IWL_RXON_CTX_BSS].rxon_cmd = REPLY_RXON;
-	priv->contexts[IWL_RXON_CTX_BSS].rxon_timing_cmd = REPLY_RXON_TIMING;
-	priv->contexts[IWL_RXON_CTX_BSS].rxon_assoc_cmd = REPLY_RXON_ASSOC;
-	priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM;
-	priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID;
-	priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY;
-	priv->contexts[IWL_RXON_CTX_BSS].interface_modes =
-		BIT(NL80211_IFTYPE_STATION) |
-		BIT(NL80211_IFTYPE_ADHOC);
-	priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS;
-	priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS;
-	priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS;
-
-	/*
-	 * Disabling hardware scan means that mac80211 will perform scans
-	 * "the hard way", rather than using device's scan.
-	 */
-	if (iwl3945_mod_params.disable_hw_scan) {
-		IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
-		iwl3945_hw_ops.hw_scan = NULL;
-	}
-
-	IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
-	priv->cfg = cfg;
-	priv->pci_dev = pdev;
-	priv->inta_mask = CSR_INI_SET_MASK;
-
-	if (iwl_legacy_alloc_traffic_mem(priv))
-		IWL_ERR(priv, "Not enough memory to generate traffic log\n");
-
-	/***************************
-	 * 2. Initializing PCI bus
-	 * *************************/
-	pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
-				PCIE_LINK_STATE_CLKPM);
-
-	if (pci_enable_device(pdev)) {
-		err = -ENODEV;
-		goto out_ieee80211_free_hw;
-	}
-
-	pci_set_master(pdev);
-
-	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
-	if (!err)
-		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
-	if (err) {
-		IWL_WARN(priv, "No suitable DMA available.\n");
-		goto out_pci_disable_device;
-	}
-
-	pci_set_drvdata(pdev, priv);
-	err = pci_request_regions(pdev, DRV_NAME);
-	if (err)
-		goto out_pci_disable_device;
-
-	/***********************
-	 * 3. Read REV Register
-	 * ********************/
-	priv->hw_base = pci_iomap(pdev, 0, 0);
-	if (!priv->hw_base) {
-		err = -ENODEV;
-		goto out_pci_release_regions;
-	}
-
-	IWL_DEBUG_INFO(priv, "pci_resource_len = 0x%08llx\n",
-			(unsigned long long) pci_resource_len(pdev, 0));
-	IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base);
-
-	/* We disable the RETRY_TIMEOUT register (0x41) to keep
-	 * PCI Tx retries from interfering with C3 CPU state */
-	pci_write_config_byte(pdev, 0x41, 0x00);
-
-	/* these spin locks will be used in apm_ops.init and EEPROM access
-	 * we should init now
-	 */
-	spin_lock_init(&priv->reg_lock);
-	spin_lock_init(&priv->lock);
-
-	/*
-	 * stop and reset the on-board processor just in case it is in a
-	 * strange state ... like being left stranded by a primary kernel
-	 * and this is now the kdump kernel trying to start up
-	 */
-	iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
-
-	/***********************
-	 * 4. Read EEPROM
-	 * ********************/
-
-	/* Read the EEPROM */
-	err = iwl_legacy_eeprom_init(priv);
-	if (err) {
-		IWL_ERR(priv, "Unable to init EEPROM\n");
-		goto out_iounmap;
-	}
-	/* MAC Address location in EEPROM same for 3945/4965 */
-	eeprom = (struct iwl3945_eeprom *)priv->eeprom;
-	IWL_DEBUG_INFO(priv, "MAC address: %pM\n", eeprom->mac_address);
-	SET_IEEE80211_PERM_ADDR(priv->hw, eeprom->mac_address);
-
-	/***********************
-	 * 5. Setup HW Constants
-	 * ********************/
-	/* Device-specific setup */
-	if (iwl3945_hw_set_hw_params(priv)) {
-		IWL_ERR(priv, "failed to set hw settings\n");
-		goto out_eeprom_free;
-	}
-
-	/***********************
-	 * 6. Setup priv
-	 * ********************/
-
-	err = iwl3945_init_drv(priv);
-	if (err) {
-		IWL_ERR(priv, "initializing driver failed\n");
-		goto out_unset_hw_params;
-	}
-
-	IWL_INFO(priv, "Detected Intel Wireless WiFi Link %s\n",
-		priv->cfg->name);
-
-	/***********************
-	 * 7. Setup Services
-	 * ********************/
-
-	spin_lock_irqsave(&priv->lock, flags);
-	iwl_legacy_disable_interrupts(priv);
-	spin_unlock_irqrestore(&priv->lock, flags);
-
-	pci_enable_msi(priv->pci_dev);
-
-	err = request_irq(priv->pci_dev->irq, iwl_legacy_isr,
-			  IRQF_SHARED, DRV_NAME, priv);
-	if (err) {
-		IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq);
-		goto out_disable_msi;
-	}
-
-	err = sysfs_create_group(&pdev->dev.kobj, &iwl3945_attribute_group);
-	if (err) {
-		IWL_ERR(priv, "failed to create sysfs device attributes\n");
-		goto out_release_irq;
-	}
-
-	iwl_legacy_set_rxon_channel(priv,
-			     &priv->bands[IEEE80211_BAND_2GHZ].channels[5],
-			     &priv->contexts[IWL_RXON_CTX_BSS]);
-	iwl3945_setup_deferred_work(priv);
-	iwl3945_setup_rx_handlers(priv);
-	iwl_legacy_power_initialize(priv);
-
-	/*********************************
-	 * 8. Setup and Register mac80211
-	 * *******************************/
-
-	iwl_legacy_enable_interrupts(priv);
-
-	err = iwl3945_setup_mac(priv);
-	if (err)
-		goto  out_remove_sysfs;
-
-	err = iwl_legacy_dbgfs_register(priv, DRV_NAME);
-	if (err)
-		IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err);
-
-	/* Start monitoring the killswitch */
-	queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll,
-			   2 * HZ);
-
-	return 0;
-
- out_remove_sysfs:
-	destroy_workqueue(priv->workqueue);
-	priv->workqueue = NULL;
-	sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group);
- out_release_irq:
-	free_irq(priv->pci_dev->irq, priv);
- out_disable_msi:
-	pci_disable_msi(priv->pci_dev);
-	iwl_legacy_free_geos(priv);
-	iwl_legacy_free_channel_map(priv);
- out_unset_hw_params:
-	iwl3945_unset_hw_params(priv);
- out_eeprom_free:
-	iwl_legacy_eeprom_free(priv);
- out_iounmap:
-	pci_iounmap(pdev, priv->hw_base);
- out_pci_release_regions:
-	pci_release_regions(pdev);
- out_pci_disable_device:
-	pci_set_drvdata(pdev, NULL);
-	pci_disable_device(pdev);
- out_ieee80211_free_hw:
-	iwl_legacy_free_traffic_mem(priv);
-	ieee80211_free_hw(priv->hw);
- out:
-	return err;
-}
-
-static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
-{
-	struct iwl_priv *priv = pci_get_drvdata(pdev);
-	unsigned long flags;
-
-	if (!priv)
-		return;
-
-	IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
-
-	iwl_legacy_dbgfs_unregister(priv);
-
-	set_bit(STATUS_EXIT_PENDING, &priv->status);
-
-	iwl_legacy_leds_exit(priv);
-
-	if (priv->mac80211_registered) {
-		ieee80211_unregister_hw(priv->hw);
-		priv->mac80211_registered = 0;
-	} else {
-		iwl3945_down(priv);
-	}
-
-	/*
-	 * Make sure device is reset to low power before unloading driver.
-	 * This may be redundant with iwl_down(), but there are paths to
-	 * run iwl_down() without calling apm_ops.stop(), and there are
-	 * paths to avoid running iwl_down() at all before leaving driver.
-	 * This (inexpensive) call *makes sure* device is reset.
-	 */
-	iwl_legacy_apm_stop(priv);
-
-	/* make sure we flush any pending irq or
-	 * tasklet for the driver
-	 */
-	spin_lock_irqsave(&priv->lock, flags);
-	iwl_legacy_disable_interrupts(priv);
-	spin_unlock_irqrestore(&priv->lock, flags);
-
-	iwl3945_synchronize_irq(priv);
-
-	sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group);
-
-	cancel_delayed_work_sync(&priv->_3945.rfkill_poll);
-
-	iwl3945_dealloc_ucode_pci(priv);
-
-	if (priv->rxq.bd)
-		iwl3945_rx_queue_free(priv, &priv->rxq);
-	iwl3945_hw_txq_ctx_free(priv);
-
-	iwl3945_unset_hw_params(priv);
-
-	/*netif_stop_queue(dev); */
-	flush_workqueue(priv->workqueue);
-
-	/* ieee80211_unregister_hw calls iwl3945_mac_stop, which flushes
-	 * priv->workqueue... so we can't take down the workqueue
-	 * until now... */
-	destroy_workqueue(priv->workqueue);
-	priv->workqueue = NULL;
-	iwl_legacy_free_traffic_mem(priv);
-
-	free_irq(pdev->irq, priv);
-	pci_disable_msi(pdev);
-
-	pci_iounmap(pdev, priv->hw_base);
-	pci_release_regions(pdev);
-	pci_disable_device(pdev);
-	pci_set_drvdata(pdev, NULL);
-
-	iwl_legacy_free_channel_map(priv);
-	iwl_legacy_free_geos(priv);
-	kfree(priv->scan_cmd);
-	if (priv->beacon_skb)
-		dev_kfree_skb(priv->beacon_skb);
-
-	ieee80211_free_hw(priv->hw);
-}
-
-
-/*****************************************************************************
- *
- * driver and module entry point
- *
- *****************************************************************************/
-
-static struct pci_driver iwl3945_driver = {
-	.name = DRV_NAME,
-	.id_table = iwl3945_hw_card_ids,
-	.probe = iwl3945_pci_probe,
-	.remove = __devexit_p(iwl3945_pci_remove),
-	.driver.pm = IWL_LEGACY_PM_OPS,
-};
-
-static int __init iwl3945_init(void)
-{
-
-	int ret;
-	pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
-	pr_info(DRV_COPYRIGHT "\n");
-
-	ret = iwl3945_rate_control_register();
-	if (ret) {
-		pr_err("Unable to register rate control algorithm: %d\n", ret);
-		return ret;
-	}
-
-	ret = pci_register_driver(&iwl3945_driver);
-	if (ret) {
-		pr_err("Unable to initialize PCI module\n");
-		goto error_register;
-	}
-
-	return ret;
-
-error_register:
-	iwl3945_rate_control_unregister();
-	return ret;
-}
-
-static void __exit iwl3945_exit(void)
-{
-	pci_unregister_driver(&iwl3945_driver);
-	iwl3945_rate_control_unregister();
-}
-
-MODULE_FIRMWARE(IWL3945_MODULE_FIRMWARE(IWL3945_UCODE_API_MAX));
-
-module_param_named(antenna, iwl3945_mod_params.antenna, int, S_IRUGO);
-MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
-module_param_named(swcrypto, iwl3945_mod_params.sw_crypto, int, S_IRUGO);
-MODULE_PARM_DESC(swcrypto,
-		"using software crypto (default 1 [software])");
-module_param_named(disable_hw_scan, iwl3945_mod_params.disable_hw_scan,
-		int, S_IRUGO);
-MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 1)");
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-module_param_named(debug, iwlegacy_debug_level, uint, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(debug, "debug output mask");
-#endif
-module_param_named(fw_restart, iwl3945_mod_params.restart_fw, int, S_IRUGO);
-MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
-
-module_exit(iwl3945_exit);
-module_init(iwl3945_init);
diff --git a/drivers/net/wireless/iwlegacy/iwl4965-base.c b/drivers/net/wireless/iwlegacy/iwl4965-base.c
deleted file mode 100644
index d2fba9e..0000000
--- a/drivers/net/wireless/iwlegacy/iwl4965-base.c
+++ /dev/null
@@ -1,3281 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/pci-aspm.h>
-#include <linux/slab.h>
-#include <linux/dma-mapping.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/firmware.h>
-#include <linux/etherdevice.h>
-#include <linux/if_arp.h>
-
-#include <net/mac80211.h>
-
-#include <asm/div64.h>
-
-#define DRV_NAME        "iwl4965"
-
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-#include "iwl-helpers.h"
-#include "iwl-sta.h"
-#include "iwl-4965-calib.h"
-#include "iwl-4965.h"
-#include "iwl-4965-led.h"
-
-
-/******************************************************************************
- *
- * module boiler plate
- *
- ******************************************************************************/
-
-/*
- * module name, copyright, version, etc.
- */
-#define DRV_DESCRIPTION	"Intel(R) Wireless WiFi 4965 driver for Linux"
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-#define VD "d"
-#else
-#define VD
-#endif
-
-#define DRV_VERSION     IWLWIFI_VERSION VD
-
-
-MODULE_DESCRIPTION(DRV_DESCRIPTION);
-MODULE_VERSION(DRV_VERSION);
-MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("iwl4965");
-
-void iwl4965_update_chain_flags(struct iwl_priv *priv)
-{
-	struct iwl_rxon_context *ctx;
-
-	if (priv->cfg->ops->hcmd->set_rxon_chain) {
-		for_each_context(priv, ctx) {
-			priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
-			if (ctx->active.rx_chain != ctx->staging.rx_chain)
-				iwl_legacy_commit_rxon(priv, ctx);
-		}
-	}
-}
-
-static void iwl4965_clear_free_frames(struct iwl_priv *priv)
-{
-	struct list_head *element;
-
-	IWL_DEBUG_INFO(priv, "%d frames on pre-allocated heap on clear.\n",
-		       priv->frames_count);
-
-	while (!list_empty(&priv->free_frames)) {
-		element = priv->free_frames.next;
-		list_del(element);
-		kfree(list_entry(element, struct iwl_frame, list));
-		priv->frames_count--;
-	}
-
-	if (priv->frames_count) {
-		IWL_WARN(priv, "%d frames still in use.  Did we lose one?\n",
-			    priv->frames_count);
-		priv->frames_count = 0;
-	}
-}
-
-static struct iwl_frame *iwl4965_get_free_frame(struct iwl_priv *priv)
-{
-	struct iwl_frame *frame;
-	struct list_head *element;
-	if (list_empty(&priv->free_frames)) {
-		frame = kzalloc(sizeof(*frame), GFP_KERNEL);
-		if (!frame) {
-			IWL_ERR(priv, "Could not allocate frame!\n");
-			return NULL;
-		}
-
-		priv->frames_count++;
-		return frame;
-	}
-
-	element = priv->free_frames.next;
-	list_del(element);
-	return list_entry(element, struct iwl_frame, list);
-}
-
-static void iwl4965_free_frame(struct iwl_priv *priv, struct iwl_frame *frame)
-{
-	memset(frame, 0, sizeof(*frame));
-	list_add(&frame->list, &priv->free_frames);
-}
-
-static u32 iwl4965_fill_beacon_frame(struct iwl_priv *priv,
-				 struct ieee80211_hdr *hdr,
-				 int left)
-{
-	lockdep_assert_held(&priv->mutex);
-
-	if (!priv->beacon_skb)
-		return 0;
-
-	if (priv->beacon_skb->len > left)
-		return 0;
-
-	memcpy(hdr, priv->beacon_skb->data, priv->beacon_skb->len);
-
-	return priv->beacon_skb->len;
-}
-
-/* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */
-static void iwl4965_set_beacon_tim(struct iwl_priv *priv,
-			       struct iwl_tx_beacon_cmd *tx_beacon_cmd,
-			       u8 *beacon, u32 frame_size)
-{
-	u16 tim_idx;
-	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon;
-
-	/*
-	 * The index is relative to frame start but we start looking at the
-	 * variable-length part of the beacon.
-	 */
-	tim_idx = mgmt->u.beacon.variable - beacon;
-
-	/* Parse variable-length elements of beacon to find WLAN_EID_TIM */
-	while ((tim_idx < (frame_size - 2)) &&
-			(beacon[tim_idx] != WLAN_EID_TIM))
-		tim_idx += beacon[tim_idx+1] + 2;
-
-	/* If TIM field was found, set variables */
-	if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
-		tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx);
-		tx_beacon_cmd->tim_size = beacon[tim_idx+1];
-	} else
-		IWL_WARN(priv, "Unable to find TIM Element in beacon\n");
-}
-
-static unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv,
-				       struct iwl_frame *frame)
-{
-	struct iwl_tx_beacon_cmd *tx_beacon_cmd;
-	u32 frame_size;
-	u32 rate_flags;
-	u32 rate;
-	/*
-	 * We have to set up the TX command, the TX Beacon command, and the
-	 * beacon contents.
-	 */
-
-	lockdep_assert_held(&priv->mutex);
-
-	if (!priv->beacon_ctx) {
-		IWL_ERR(priv, "trying to build beacon w/o beacon context!\n");
-		return 0;
-	}
-
-	/* Initialize memory */
-	tx_beacon_cmd = &frame->u.beacon;
-	memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
-
-	/* Set up TX beacon contents */
-	frame_size = iwl4965_fill_beacon_frame(priv, tx_beacon_cmd->frame,
-				sizeof(frame->u) - sizeof(*tx_beacon_cmd));
-	if (WARN_ON_ONCE(frame_size > MAX_MPDU_SIZE))
-		return 0;
-	if (!frame_size)
-		return 0;
-
-	/* Set up TX command fields */
-	tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
-	tx_beacon_cmd->tx.sta_id = priv->beacon_ctx->bcast_sta_id;
-	tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
-	tx_beacon_cmd->tx.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK |
-		TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK;
-
-	/* Set up TX beacon command fields */
-	iwl4965_set_beacon_tim(priv, tx_beacon_cmd, (u8 *)tx_beacon_cmd->frame,
-			   frame_size);
-
-	/* Set up packet rate and flags */
-	rate = iwl_legacy_get_lowest_plcp(priv, priv->beacon_ctx);
-	priv->mgmt_tx_ant = iwl4965_toggle_tx_ant(priv, priv->mgmt_tx_ant,
-					      priv->hw_params.valid_tx_ant);
-	rate_flags = iwl4965_ant_idx_to_flags(priv->mgmt_tx_ant);
-	if ((rate >= IWL_FIRST_CCK_RATE) && (rate <= IWL_LAST_CCK_RATE))
-		rate_flags |= RATE_MCS_CCK_MSK;
-	tx_beacon_cmd->tx.rate_n_flags = iwl4965_hw_set_rate_n_flags(rate,
-			rate_flags);
-
-	return sizeof(*tx_beacon_cmd) + frame_size;
-}
-
-int iwl4965_send_beacon_cmd(struct iwl_priv *priv)
-{
-	struct iwl_frame *frame;
-	unsigned int frame_size;
-	int rc;
-
-	frame = iwl4965_get_free_frame(priv);
-	if (!frame) {
-		IWL_ERR(priv, "Could not obtain free frame buffer for beacon "
-			  "command.\n");
-		return -ENOMEM;
-	}
-
-	frame_size = iwl4965_hw_get_beacon_cmd(priv, frame);
-	if (!frame_size) {
-		IWL_ERR(priv, "Error configuring the beacon command\n");
-		iwl4965_free_frame(priv, frame);
-		return -EINVAL;
-	}
-
-	rc = iwl_legacy_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
-			      &frame->u.cmd[0]);
-
-	iwl4965_free_frame(priv, frame);
-
-	return rc;
-}
-
-static inline dma_addr_t iwl4965_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
-{
-	struct iwl_tfd_tb *tb = &tfd->tbs[idx];
-
-	dma_addr_t addr = get_unaligned_le32(&tb->lo);
-	if (sizeof(dma_addr_t) > sizeof(u32))
-		addr |=
-		((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
-
-	return addr;
-}
-
-static inline u16 iwl4965_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
-{
-	struct iwl_tfd_tb *tb = &tfd->tbs[idx];
-
-	return le16_to_cpu(tb->hi_n_len) >> 4;
-}
-
-static inline void iwl4965_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
-				  dma_addr_t addr, u16 len)
-{
-	struct iwl_tfd_tb *tb = &tfd->tbs[idx];
-	u16 hi_n_len = len << 4;
-
-	put_unaligned_le32(addr, &tb->lo);
-	if (sizeof(dma_addr_t) > sizeof(u32))
-		hi_n_len |= ((addr >> 16) >> 16) & 0xF;
-
-	tb->hi_n_len = cpu_to_le16(hi_n_len);
-
-	tfd->num_tbs = idx + 1;
-}
-
-static inline u8 iwl4965_tfd_get_num_tbs(struct iwl_tfd *tfd)
-{
-	return tfd->num_tbs & 0x1f;
-}
-
-/**
- * iwl4965_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
- * @priv - driver private data
- * @txq - tx queue
- *
- * Does NOT advance any TFD circular buffer read/write indexes
- * Does NOT free the TFD itself (which is within circular buffer)
- */
-void iwl4965_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
-{
-	struct iwl_tfd *tfd_tmp = (struct iwl_tfd *)txq->tfds;
-	struct iwl_tfd *tfd;
-	struct pci_dev *dev = priv->pci_dev;
-	int index = txq->q.read_ptr;
-	int i;
-	int num_tbs;
-
-	tfd = &tfd_tmp[index];
-
-	/* Sanity check on number of chunks */
-	num_tbs = iwl4965_tfd_get_num_tbs(tfd);
-
-	if (num_tbs >= IWL_NUM_OF_TBS) {
-		IWL_ERR(priv, "Too many chunks: %i\n", num_tbs);
-		/* @todo issue fatal error, it is quite serious situation */
-		return;
-	}
-
-	/* Unmap tx_cmd */
-	if (num_tbs)
-		pci_unmap_single(dev,
-				dma_unmap_addr(&txq->meta[index], mapping),
-				dma_unmap_len(&txq->meta[index], len),
-				PCI_DMA_BIDIRECTIONAL);
-
-	/* Unmap chunks, if any. */
-	for (i = 1; i < num_tbs; i++)
-		pci_unmap_single(dev, iwl4965_tfd_tb_get_addr(tfd, i),
-				iwl4965_tfd_tb_get_len(tfd, i),
-				PCI_DMA_TODEVICE);
-
-	/* free SKB */
-	if (txq->txb) {
-		struct sk_buff *skb;
-
-		skb = txq->txb[txq->q.read_ptr].skb;
-
-		/* can be called from irqs-disabled context */
-		if (skb) {
-			dev_kfree_skb_any(skb);
-			txq->txb[txq->q.read_ptr].skb = NULL;
-		}
-	}
-}
-
-int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
-				 struct iwl_tx_queue *txq,
-				 dma_addr_t addr, u16 len,
-				 u8 reset, u8 pad)
-{
-	struct iwl_queue *q;
-	struct iwl_tfd *tfd, *tfd_tmp;
-	u32 num_tbs;
-
-	q = &txq->q;
-	tfd_tmp = (struct iwl_tfd *)txq->tfds;
-	tfd = &tfd_tmp[q->write_ptr];
-
-	if (reset)
-		memset(tfd, 0, sizeof(*tfd));
-
-	num_tbs = iwl4965_tfd_get_num_tbs(tfd);
-
-	/* Each TFD can point to a maximum 20 Tx buffers */
-	if (num_tbs >= IWL_NUM_OF_TBS) {
-		IWL_ERR(priv, "Error can not send more than %d chunks\n",
-			  IWL_NUM_OF_TBS);
-		return -EINVAL;
-	}
-
-	BUG_ON(addr & ~DMA_BIT_MASK(36));
-	if (unlikely(addr & ~IWL_TX_DMA_MASK))
-		IWL_ERR(priv, "Unaligned address = %llx\n",
-			  (unsigned long long)addr);
-
-	iwl4965_tfd_set_tb(tfd, num_tbs, addr, len);
-
-	return 0;
-}
-
-/*
- * Tell nic where to find circular buffer of Tx Frame Descriptors for
- * given Tx queue, and enable the DMA channel used for that queue.
- *
- * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
- * channels supported in hardware.
- */
-int iwl4965_hw_tx_queue_init(struct iwl_priv *priv,
-			 struct iwl_tx_queue *txq)
-{
-	int txq_id = txq->q.id;
-
-	/* Circular buffer (TFD queue in DRAM) physical base address */
-	iwl_legacy_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
-			     txq->q.dma_addr >> 8);
-
-	return 0;
-}
-
-/******************************************************************************
- *
- * Generic RX handler implementations
- *
- ******************************************************************************/
-static void iwl4965_rx_reply_alive(struct iwl_priv *priv,
-				struct iwl_rx_mem_buffer *rxb)
-{
-	struct iwl_rx_packet *pkt = rxb_addr(rxb);
-	struct iwl_alive_resp *palive;
-	struct delayed_work *pwork;
-
-	palive = &pkt->u.alive_frame;
-
-	IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision "
-		       "0x%01X 0x%01X\n",
-		       palive->is_valid, palive->ver_type,
-		       palive->ver_subtype);
-
-	if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
-		IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
-		memcpy(&priv->card_alive_init,
-		       &pkt->u.alive_frame,
-		       sizeof(struct iwl_init_alive_resp));
-		pwork = &priv->init_alive_start;
-	} else {
-		IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
-		memcpy(&priv->card_alive, &pkt->u.alive_frame,
-		       sizeof(struct iwl_alive_resp));
-		pwork = &priv->alive_start;
-	}
-
-	/* We delay the ALIVE response by 5ms to
-	 * give the HW RF Kill time to activate... */
-	if (palive->is_valid == UCODE_VALID_OK)
-		queue_delayed_work(priv->workqueue, pwork,
-				   msecs_to_jiffies(5));
-	else
-		IWL_WARN(priv, "uCode did not respond OK.\n");
-}
-
-/**
- * iwl4965_bg_statistics_periodic - Timer callback to queue statistics
- *
- * This callback is provided in order to send a statistics request.
- *
- * This timer function is continually reset to execute within
- * REG_RECALIB_PERIOD seconds since the last STATISTICS_NOTIFICATION
- * was received.  We need to ensure we receive the statistics in order
- * to update the temperature used for calibrating the TXPOWER.
- */
-static void iwl4965_bg_statistics_periodic(unsigned long data)
-{
-	struct iwl_priv *priv = (struct iwl_priv *)data;
-
-	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-		return;
-
-	/* dont send host command if rf-kill is on */
-	if (!iwl_legacy_is_ready_rf(priv))
-		return;
-
-	iwl_legacy_send_statistics_request(priv, CMD_ASYNC, false);
-}
-
-static void iwl4965_rx_beacon_notif(struct iwl_priv *priv,
-				struct iwl_rx_mem_buffer *rxb)
-{
-	struct iwl_rx_packet *pkt = rxb_addr(rxb);
-	struct iwl4965_beacon_notif *beacon =
-		(struct iwl4965_beacon_notif *)pkt->u.raw;
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-	u8 rate = iwl4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
-
-	IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d "
-		"tsf %d %d rate %d\n",
-		le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
-		beacon->beacon_notify_hdr.failure_frame,
-		le32_to_cpu(beacon->ibss_mgr_status),
-		le32_to_cpu(beacon->high_tsf),
-		le32_to_cpu(beacon->low_tsf), rate);
-#endif
-
-	priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
-}
-
-static void iwl4965_perform_ct_kill_task(struct iwl_priv *priv)
-{
-	unsigned long flags;
-
-	IWL_DEBUG_POWER(priv, "Stop all queues\n");
-
-	if (priv->mac80211_registered)
-		ieee80211_stop_queues(priv->hw);
-
-	iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
-			CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
-	iwl_read32(priv, CSR_UCODE_DRV_GP1);
-
-	spin_lock_irqsave(&priv->reg_lock, flags);
-	if (!iwl_grab_nic_access(priv))
-		iwl_release_nic_access(priv);
-	spin_unlock_irqrestore(&priv->reg_lock, flags);
-}
-
-/* Handle notification from uCode that card's power state is changing
- * due to software, hardware, or critical temperature RFKILL */
-static void iwl4965_rx_card_state_notif(struct iwl_priv *priv,
-				    struct iwl_rx_mem_buffer *rxb)
-{
-	struct iwl_rx_packet *pkt = rxb_addr(rxb);
-	u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
-	unsigned long status = priv->status;
-
-	IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n",
-			  (flags & HW_CARD_DISABLED) ? "Kill" : "On",
-			  (flags & SW_CARD_DISABLED) ? "Kill" : "On",
-			  (flags & CT_CARD_DISABLED) ?
-			  "Reached" : "Not reached");
-
-	if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
-		     CT_CARD_DISABLED)) {
-
-		iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
-			    CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
-
-		iwl_legacy_write_direct32(priv, HBUS_TARG_MBX_C,
-					HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
-
-		if (!(flags & RXON_CARD_DISABLED)) {
-			iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
-				    CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
-			iwl_legacy_write_direct32(priv, HBUS_TARG_MBX_C,
-					HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
-		}
-	}
-
-	if (flags & CT_CARD_DISABLED)
-		iwl4965_perform_ct_kill_task(priv);
-
-	if (flags & HW_CARD_DISABLED)
-		set_bit(STATUS_RF_KILL_HW, &priv->status);
-	else
-		clear_bit(STATUS_RF_KILL_HW, &priv->status);
-
-	if (!(flags & RXON_CARD_DISABLED))
-		iwl_legacy_scan_cancel(priv);
-
-	if ((test_bit(STATUS_RF_KILL_HW, &status) !=
-	     test_bit(STATUS_RF_KILL_HW, &priv->status)))
-		wiphy_rfkill_set_hw_state(priv->hw->wiphy,
-			test_bit(STATUS_RF_KILL_HW, &priv->status));
-	else
-		wake_up(&priv->wait_command_queue);
-}
-
-/**
- * iwl4965_setup_rx_handlers - Initialize Rx handler callbacks
- *
- * Setup the RX handlers for each of the reply types sent from the uCode
- * to the host.
- *
- * This function chains into the hardware specific files for them to setup
- * any hardware specific handlers as well.
- */
-static void iwl4965_setup_rx_handlers(struct iwl_priv *priv)
-{
-	priv->rx_handlers[REPLY_ALIVE] = iwl4965_rx_reply_alive;
-	priv->rx_handlers[REPLY_ERROR] = iwl_legacy_rx_reply_error;
-	priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_legacy_rx_csa;
-	priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
-			iwl_legacy_rx_spectrum_measure_notif;
-	priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_legacy_rx_pm_sleep_notif;
-	priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
-	    iwl_legacy_rx_pm_debug_statistics_notif;
-	priv->rx_handlers[BEACON_NOTIFICATION] = iwl4965_rx_beacon_notif;
-
-	/*
-	 * The same handler is used for both the REPLY to a discrete
-	 * statistics request from the host as well as for the periodic
-	 * statistics notifications (after received beacons) from the uCode.
-	 */
-	priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl4965_reply_statistics;
-	priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl4965_rx_statistics;
-
-	iwl_legacy_setup_rx_scan_handlers(priv);
-
-	/* status change handler */
-	priv->rx_handlers[CARD_STATE_NOTIFICATION] =
-					iwl4965_rx_card_state_notif;
-
-	priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] =
-	    iwl4965_rx_missed_beacon_notif;
-	/* Rx handlers */
-	priv->rx_handlers[REPLY_RX_PHY_CMD] = iwl4965_rx_reply_rx_phy;
-	priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwl4965_rx_reply_rx;
-	/* block ack */
-	priv->rx_handlers[REPLY_COMPRESSED_BA] = iwl4965_rx_reply_compressed_ba;
-	/* Set up hardware specific Rx handlers */
-	priv->cfg->ops->lib->rx_handler_setup(priv);
-}
-
-/**
- * iwl4965_rx_handle - Main entry function for receiving responses from uCode
- *
- * Uses the priv->rx_handlers callback function array to invoke
- * the appropriate handlers, including command responses,
- * frame-received notifications, and other notifications.
- */
-void iwl4965_rx_handle(struct iwl_priv *priv)
-{
-	struct iwl_rx_mem_buffer *rxb;
-	struct iwl_rx_packet *pkt;
-	struct iwl_rx_queue *rxq = &priv->rxq;
-	u32 r, i;
-	int reclaim;
-	unsigned long flags;
-	u8 fill_rx = 0;
-	u32 count = 8;
-	int total_empty;
-
-	/* uCode's read index (stored in shared DRAM) indicates the last Rx
-	 * buffer that the driver may process (last buffer filled by ucode). */
-	r = le16_to_cpu(rxq->rb_stts->closed_rb_num) &  0x0FFF;
-	i = rxq->read;
-
-	/* Rx interrupt, but nothing sent from uCode */
-	if (i == r)
-		IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i);
-
-	/* calculate total frames need to be restock after handling RX */
-	total_empty = r - rxq->write_actual;
-	if (total_empty < 0)
-		total_empty += RX_QUEUE_SIZE;
-
-	if (total_empty > (RX_QUEUE_SIZE / 2))
-		fill_rx = 1;
-
-	while (i != r) {
-		int len;
-
-		rxb = rxq->queue[i];
-
-		/* If an RXB doesn't have a Rx queue slot associated with it,
-		 * then a bug has been introduced in the queue refilling
-		 * routines -- catch it here */
-		BUG_ON(rxb == NULL);
-
-		rxq->queue[i] = NULL;
-
-		pci_unmap_page(priv->pci_dev, rxb->page_dma,
-			       PAGE_SIZE << priv->hw_params.rx_page_order,
-			       PCI_DMA_FROMDEVICE);
-		pkt = rxb_addr(rxb);
-
-		len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
-		len += sizeof(u32); /* account for status word */
-		trace_iwlwifi_legacy_dev_rx(priv, pkt, len);
-
-		/* Reclaim a command buffer only if this packet is a response
-		 *   to a (driver-originated) command.
-		 * If the packet (e.g. Rx frame) originated from uCode,
-		 *   there is no command buffer to reclaim.
-		 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
-		 *   but apparently a few don't get set; catch them here. */
-		reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
-			(pkt->hdr.cmd != REPLY_RX_PHY_CMD) &&
-			(pkt->hdr.cmd != REPLY_RX) &&
-			(pkt->hdr.cmd != REPLY_RX_MPDU_CMD) &&
-			(pkt->hdr.cmd != REPLY_COMPRESSED_BA) &&
-			(pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
-			(pkt->hdr.cmd != REPLY_TX);
-
-		/* Based on type of command response or notification,
-		 *   handle those that need handling via function in
-		 *   rx_handlers table.  See iwl4965_setup_rx_handlers() */
-		if (priv->rx_handlers[pkt->hdr.cmd]) {
-			IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r,
-				i, iwl_legacy_get_cmd_string(pkt->hdr.cmd),
-				pkt->hdr.cmd);
-			priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
-			priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
-		} else {
-			/* No handling needed */
-			IWL_DEBUG_RX(priv,
-				"r %d i %d No handler needed for %s, 0x%02x\n",
-				r, i, iwl_legacy_get_cmd_string(pkt->hdr.cmd),
-				pkt->hdr.cmd);
-		}
-
-		/*
-		 * XXX: After here, we should always check rxb->page
-		 * against NULL before touching it or its virtual
-		 * memory (pkt). Because some rx_handler might have
-		 * already taken or freed the pages.
-		 */
-
-		if (reclaim) {
-			/* Invoke any callbacks, transfer the buffer to caller,
-			 * and fire off the (possibly) blocking iwl_legacy_send_cmd()
-			 * as we reclaim the driver command queue */
-			if (rxb->page)
-				iwl_legacy_tx_cmd_complete(priv, rxb);
-			else
-				IWL_WARN(priv, "Claim null rxb?\n");
-		}
-
-		/* Reuse the page if possible. For notification packets and
-		 * SKBs that fail to Rx correctly, add them back into the
-		 * rx_free list for reuse later. */
-		spin_lock_irqsave(&rxq->lock, flags);
-		if (rxb->page != NULL) {
-			rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page,
-				0, PAGE_SIZE << priv->hw_params.rx_page_order,
-				PCI_DMA_FROMDEVICE);
-			list_add_tail(&rxb->list, &rxq->rx_free);
-			rxq->free_count++;
-		} else
-			list_add_tail(&rxb->list, &rxq->rx_used);
-
-		spin_unlock_irqrestore(&rxq->lock, flags);
-
-		i = (i + 1) & RX_QUEUE_MASK;
-		/* If there are a lot of unused frames,
-		 * restock the Rx queue so ucode wont assert. */
-		if (fill_rx) {
-			count++;
-			if (count >= 8) {
-				rxq->read = i;
-				iwl4965_rx_replenish_now(priv);
-				count = 0;
-			}
-		}
-	}
-
-	/* Backtrack one entry */
-	rxq->read = i;
-	if (fill_rx)
-		iwl4965_rx_replenish_now(priv);
-	else
-		iwl4965_rx_queue_restock(priv);
-}
-
-/* call this function to flush any scheduled tasklet */
-static inline void iwl4965_synchronize_irq(struct iwl_priv *priv)
-{
-	/* wait to make sure we flush pending tasklet*/
-	synchronize_irq(priv->pci_dev->irq);
-	tasklet_kill(&priv->irq_tasklet);
-}
-
-static void iwl4965_irq_tasklet(struct iwl_priv *priv)
-{
-	u32 inta, handled = 0;
-	u32 inta_fh;
-	unsigned long flags;
-	u32 i;
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-	u32 inta_mask;
-#endif
-
-	spin_lock_irqsave(&priv->lock, flags);
-
-	/* Ack/clear/reset pending uCode interrupts.
-	 * Note:  Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
-	 *  and will clear only when CSR_FH_INT_STATUS gets cleared. */
-	inta = iwl_read32(priv, CSR_INT);
-	iwl_write32(priv, CSR_INT, inta);
-
-	/* Ack/clear/reset pending flow-handler (DMA) interrupts.
-	 * Any new interrupts that happen after this, either while we're
-	 * in this tasklet, or later, will show up in next ISR/tasklet. */
-	inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
-	iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh);
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-	if (iwl_legacy_get_debug_level(priv) & IWL_DL_ISR) {
-		/* just for debug */
-		inta_mask = iwl_read32(priv, CSR_INT_MASK);
-		IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
-			      inta, inta_mask, inta_fh);
-	}
-#endif
-
-	spin_unlock_irqrestore(&priv->lock, flags);
-
-	/* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
-	 * atomic, make sure that inta covers all the interrupts that
-	 * we've discovered, even if FH interrupt came in just after
-	 * reading CSR_INT. */
-	if (inta_fh & CSR49_FH_INT_RX_MASK)
-		inta |= CSR_INT_BIT_FH_RX;
-	if (inta_fh & CSR49_FH_INT_TX_MASK)
-		inta |= CSR_INT_BIT_FH_TX;
-
-	/* Now service all interrupt bits discovered above. */
-	if (inta & CSR_INT_BIT_HW_ERR) {
-		IWL_ERR(priv, "Hardware error detected.  Restarting.\n");
-
-		/* Tell the device to stop sending interrupts */
-		iwl_legacy_disable_interrupts(priv);
-
-		priv->isr_stats.hw++;
-		iwl_legacy_irq_handle_error(priv);
-
-		handled |= CSR_INT_BIT_HW_ERR;
-
-		return;
-	}
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-	if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
-		/* NIC fires this, but we don't use it, redundant with WAKEUP */
-		if (inta & CSR_INT_BIT_SCD) {
-			IWL_DEBUG_ISR(priv, "Scheduler finished to transmit "
-				      "the frame/frames.\n");
-			priv->isr_stats.sch++;
-		}
-
-		/* Alive notification via Rx interrupt will do the real work */
-		if (inta & CSR_INT_BIT_ALIVE) {
-			IWL_DEBUG_ISR(priv, "Alive interrupt\n");
-			priv->isr_stats.alive++;
-		}
-	}
-#endif
-	/* Safely ignore these bits for debug checks below */
-	inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
-
-	/* HW RF KILL switch toggled */
-	if (inta & CSR_INT_BIT_RF_KILL) {
-		int hw_rf_kill = 0;
-		if (!(iwl_read32(priv, CSR_GP_CNTRL) &
-				CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
-			hw_rf_kill = 1;
-
-		IWL_WARN(priv, "RF_KILL bit toggled to %s.\n",
-				hw_rf_kill ? "disable radio" : "enable radio");
-
-		priv->isr_stats.rfkill++;
-
-		/* driver only loads ucode once setting the interface up.
-		 * the driver allows loading the ucode even if the radio
-		 * is killed. Hence update the killswitch state here. The
-		 * rfkill handler will care about restarting if needed.
-		 */
-		if (!test_bit(STATUS_ALIVE, &priv->status)) {
-			if (hw_rf_kill)
-				set_bit(STATUS_RF_KILL_HW, &priv->status);
-			else
-				clear_bit(STATUS_RF_KILL_HW, &priv->status);
-			wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rf_kill);
-		}
-
-		handled |= CSR_INT_BIT_RF_KILL;
-	}
-
-	/* Chip got too hot and stopped itself */
-	if (inta & CSR_INT_BIT_CT_KILL) {
-		IWL_ERR(priv, "Microcode CT kill error detected.\n");
-		priv->isr_stats.ctkill++;
-		handled |= CSR_INT_BIT_CT_KILL;
-	}
-
-	/* Error detected by uCode */
-	if (inta & CSR_INT_BIT_SW_ERR) {
-		IWL_ERR(priv, "Microcode SW error detected. "
-			" Restarting 0x%X.\n", inta);
-		priv->isr_stats.sw++;
-		iwl_legacy_irq_handle_error(priv);
-		handled |= CSR_INT_BIT_SW_ERR;
-	}
-
-	/*
-	 * uCode wakes up after power-down sleep.
-	 * Tell device about any new tx or host commands enqueued,
-	 * and about any Rx buffers made available while asleep.
-	 */
-	if (inta & CSR_INT_BIT_WAKEUP) {
-		IWL_DEBUG_ISR(priv, "Wakeup interrupt\n");
-		iwl_legacy_rx_queue_update_write_ptr(priv, &priv->rxq);
-		for (i = 0; i < priv->hw_params.max_txq_num; i++)
-			iwl_legacy_txq_update_write_ptr(priv, &priv->txq[i]);
-		priv->isr_stats.wakeup++;
-		handled |= CSR_INT_BIT_WAKEUP;
-	}
-
-	/* All uCode command responses, including Tx command responses,
-	 * Rx "responses" (frame-received notification), and other
-	 * notifications from uCode come through here*/
-	if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
-		iwl4965_rx_handle(priv);
-		priv->isr_stats.rx++;
-		handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
-	}
-
-	/* This "Tx" DMA channel is used only for loading uCode */
-	if (inta & CSR_INT_BIT_FH_TX) {
-		IWL_DEBUG_ISR(priv, "uCode load interrupt\n");
-		priv->isr_stats.tx++;
-		handled |= CSR_INT_BIT_FH_TX;
-		/* Wake up uCode load routine, now that load is complete */
-		priv->ucode_write_complete = 1;
-		wake_up(&priv->wait_command_queue);
-	}
-
-	if (inta & ~handled) {
-		IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
-		priv->isr_stats.unhandled++;
-	}
-
-	if (inta & ~(priv->inta_mask)) {
-		IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n",
-			 inta & ~priv->inta_mask);
-		IWL_WARN(priv, "   with FH_INT = 0x%08x\n", inta_fh);
-	}
-
-	/* Re-enable all interrupts */
-	/* only Re-enable if disabled by irq */
-	if (test_bit(STATUS_INT_ENABLED, &priv->status))
-		iwl_legacy_enable_interrupts(priv);
-	/* Re-enable RF_KILL if it occurred */
-	else if (handled & CSR_INT_BIT_RF_KILL)
-		iwl_legacy_enable_rfkill_int(priv);
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-	if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
-		inta = iwl_read32(priv, CSR_INT);
-		inta_mask = iwl_read32(priv, CSR_INT_MASK);
-		inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
-		IWL_DEBUG_ISR(priv,
-			"End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
-			"flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
-	}
-#endif
-}
-
-/*****************************************************************************
- *
- * sysfs attributes
- *
- *****************************************************************************/
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-
-/*
- * The following adds a new attribute to the sysfs representation
- * of this device driver (i.e. a new file in /sys/class/net/wlan0/device/)
- * used for controlling the debug level.
- *
- * See the level definitions in iwl for details.
- *
- * The debug_level being managed using sysfs below is a per device debug
- * level that is used instead of the global debug level if it (the per
- * device debug level) is set.
- */
-static ssize_t iwl4965_show_debug_level(struct device *d,
-				struct device_attribute *attr, char *buf)
-{
-	struct iwl_priv *priv = dev_get_drvdata(d);
-	return sprintf(buf, "0x%08X\n", iwl_legacy_get_debug_level(priv));
-}
-static ssize_t iwl4965_store_debug_level(struct device *d,
-				struct device_attribute *attr,
-				 const char *buf, size_t count)
-{
-	struct iwl_priv *priv = dev_get_drvdata(d);
-	unsigned long val;
-	int ret;
-
-	ret = strict_strtoul(buf, 0, &val);
-	if (ret)
-		IWL_ERR(priv, "%s is not in hex or decimal form.\n", buf);
-	else {
-		priv->debug_level = val;
-		if (iwl_legacy_alloc_traffic_mem(priv))
-			IWL_ERR(priv,
-				"Not enough memory to generate traffic log\n");
-	}
-	return strnlen(buf, count);
-}
-
-static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
-			iwl4965_show_debug_level, iwl4965_store_debug_level);
-
-
-#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
-
-
-static ssize_t iwl4965_show_temperature(struct device *d,
-				struct device_attribute *attr, char *buf)
-{
-	struct iwl_priv *priv = dev_get_drvdata(d);
-
-	if (!iwl_legacy_is_alive(priv))
-		return -EAGAIN;
-
-	return sprintf(buf, "%d\n", priv->temperature);
-}
-
-static DEVICE_ATTR(temperature, S_IRUGO, iwl4965_show_temperature, NULL);
-
-static ssize_t iwl4965_show_tx_power(struct device *d,
-			     struct device_attribute *attr, char *buf)
-{
-	struct iwl_priv *priv = dev_get_drvdata(d);
-
-	if (!iwl_legacy_is_ready_rf(priv))
-		return sprintf(buf, "off\n");
-	else
-		return sprintf(buf, "%d\n", priv->tx_power_user_lmt);
-}
-
-static ssize_t iwl4965_store_tx_power(struct device *d,
-			      struct device_attribute *attr,
-			      const char *buf, size_t count)
-{
-	struct iwl_priv *priv = dev_get_drvdata(d);
-	unsigned long val;
-	int ret;
-
-	ret = strict_strtoul(buf, 10, &val);
-	if (ret)
-		IWL_INFO(priv, "%s is not in decimal form.\n", buf);
-	else {
-		ret = iwl_legacy_set_tx_power(priv, val, false);
-		if (ret)
-			IWL_ERR(priv, "failed setting tx power (0x%d).\n",
-				ret);
-		else
-			ret = count;
-	}
-	return ret;
-}
-
-static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO,
-			iwl4965_show_tx_power, iwl4965_store_tx_power);
-
-static struct attribute *iwl_sysfs_entries[] = {
-	&dev_attr_temperature.attr,
-	&dev_attr_tx_power.attr,
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-	&dev_attr_debug_level.attr,
-#endif
-	NULL
-};
-
-static struct attribute_group iwl_attribute_group = {
-	.name = NULL,		/* put in device directory */
-	.attrs = iwl_sysfs_entries,
-};
-
-/******************************************************************************
- *
- * uCode download functions
- *
- ******************************************************************************/
-
-static void iwl4965_dealloc_ucode_pci(struct iwl_priv *priv)
-{
-	iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_code);
-	iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data);
-	iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
-	iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init);
-	iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init_data);
-	iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_boot);
-}
-
-static void iwl4965_nic_start(struct iwl_priv *priv)
-{
-	/* Remove all resets to allow NIC to operate */
-	iwl_write32(priv, CSR_RESET, 0);
-}
-
-static void iwl4965_ucode_callback(const struct firmware *ucode_raw,
-					void *context);
-static int iwl4965_mac_setup_register(struct iwl_priv *priv,
-						u32 max_probe_length);
-
-static int __must_check iwl4965_request_firmware(struct iwl_priv *priv, bool first)
-{
-	const char *name_pre = priv->cfg->fw_name_pre;
-	char tag[8];
-
-	if (first) {
-		priv->fw_index = priv->cfg->ucode_api_max;
-		sprintf(tag, "%d", priv->fw_index);
-	} else {
-		priv->fw_index--;
-		sprintf(tag, "%d", priv->fw_index);
-	}
-
-	if (priv->fw_index < priv->cfg->ucode_api_min) {
-		IWL_ERR(priv, "no suitable firmware found!\n");
-		return -ENOENT;
-	}
-
-	sprintf(priv->firmware_name, "%s%s%s", name_pre, tag, ".ucode");
-
-	IWL_DEBUG_INFO(priv, "attempting to load firmware '%s'\n",
-		       priv->firmware_name);
-
-	return request_firmware_nowait(THIS_MODULE, 1, priv->firmware_name,
-				       &priv->pci_dev->dev, GFP_KERNEL, priv,
-				       iwl4965_ucode_callback);
-}
-
-struct iwl4965_firmware_pieces {
-	const void *inst, *data, *init, *init_data, *boot;
-	size_t inst_size, data_size, init_size, init_data_size, boot_size;
-};
-
-static int iwl4965_load_firmware(struct iwl_priv *priv,
-				       const struct firmware *ucode_raw,
-				       struct iwl4965_firmware_pieces *pieces)
-{
-	struct iwl_ucode_header *ucode = (void *)ucode_raw->data;
-	u32 api_ver, hdr_size;
-	const u8 *src;
-
-	priv->ucode_ver = le32_to_cpu(ucode->ver);
-	api_ver = IWL_UCODE_API(priv->ucode_ver);
-
-	switch (api_ver) {
-	default:
-	case 0:
-	case 1:
-	case 2:
-		hdr_size = 24;
-		if (ucode_raw->size < hdr_size) {
-			IWL_ERR(priv, "File size too small!\n");
-			return -EINVAL;
-		}
-		pieces->inst_size = le32_to_cpu(ucode->v1.inst_size);
-		pieces->data_size = le32_to_cpu(ucode->v1.data_size);
-		pieces->init_size = le32_to_cpu(ucode->v1.init_size);
-		pieces->init_data_size =
-				le32_to_cpu(ucode->v1.init_data_size);
-		pieces->boot_size = le32_to_cpu(ucode->v1.boot_size);
-		src = ucode->v1.data;
-		break;
-	}
-
-	/* Verify size of file vs. image size info in file's header */
-	if (ucode_raw->size != hdr_size + pieces->inst_size +
-				pieces->data_size + pieces->init_size +
-				pieces->init_data_size + pieces->boot_size) {
-
-		IWL_ERR(priv,
-			"uCode file size %d does not match expected size\n",
-			(int)ucode_raw->size);
-		return -EINVAL;
-	}
-
-	pieces->inst = src;
-	src += pieces->inst_size;
-	pieces->data = src;
-	src += pieces->data_size;
-	pieces->init = src;
-	src += pieces->init_size;
-	pieces->init_data = src;
-	src += pieces->init_data_size;
-	pieces->boot = src;
-	src += pieces->boot_size;
-
-	return 0;
-}
-
-/**
- * iwl4965_ucode_callback - callback when firmware was loaded
- *
- * If loaded successfully, copies the firmware into buffers
- * for the card to fetch (via DMA).
- */
-static void
-iwl4965_ucode_callback(const struct firmware *ucode_raw, void *context)
-{
-	struct iwl_priv *priv = context;
-	struct iwl_ucode_header *ucode;
-	int err;
-	struct iwl4965_firmware_pieces pieces;
-	const unsigned int api_max = priv->cfg->ucode_api_max;
-	const unsigned int api_min = priv->cfg->ucode_api_min;
-	u32 api_ver;
-
-	u32 max_probe_length = 200;
-	u32 standard_phy_calibration_size =
-			IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
-
-	memset(&pieces, 0, sizeof(pieces));
-
-	if (!ucode_raw) {
-		if (priv->fw_index <= priv->cfg->ucode_api_max)
-			IWL_ERR(priv,
-				"request for firmware file '%s' failed.\n",
-				priv->firmware_name);
-		goto try_again;
-	}
-
-	IWL_DEBUG_INFO(priv, "Loaded firmware file '%s' (%zd bytes).\n",
-		       priv->firmware_name, ucode_raw->size);
-
-	/* Make sure that we got at least the API version number */
-	if (ucode_raw->size < 4) {
-		IWL_ERR(priv, "File size way too small!\n");
-		goto try_again;
-	}
-
-	/* Data from ucode file:  header followed by uCode images */
-	ucode = (struct iwl_ucode_header *)ucode_raw->data;
-
-	err = iwl4965_load_firmware(priv, ucode_raw, &pieces);
-
-	if (err)
-		goto try_again;
-
-	api_ver = IWL_UCODE_API(priv->ucode_ver);
-
-	/*
-	 * api_ver should match the api version forming part of the
-	 * firmware filename ... but we don't check for that and only rely
-	 * on the API version read from firmware header from here on forward
-	 */
-	if (api_ver < api_min || api_ver > api_max) {
-		IWL_ERR(priv,
-			"Driver unable to support your firmware API. "
-			"Driver supports v%u, firmware is v%u.\n",
-			api_max, api_ver);
-		goto try_again;
-	}
-
-	if (api_ver != api_max)
-		IWL_ERR(priv,
-			"Firmware has old API version. Expected v%u, "
-			"got v%u. New firmware can be obtained "
-			"from http://www.intellinuxwireless.org.\n",
-			api_max, api_ver);
-
-	IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u\n",
-		 IWL_UCODE_MAJOR(priv->ucode_ver),
-		 IWL_UCODE_MINOR(priv->ucode_ver),
-		 IWL_UCODE_API(priv->ucode_ver),
-		 IWL_UCODE_SERIAL(priv->ucode_ver));
-
-	snprintf(priv->hw->wiphy->fw_version,
-		 sizeof(priv->hw->wiphy->fw_version),
-		 "%u.%u.%u.%u",
-		 IWL_UCODE_MAJOR(priv->ucode_ver),
-		 IWL_UCODE_MINOR(priv->ucode_ver),
-		 IWL_UCODE_API(priv->ucode_ver),
-		 IWL_UCODE_SERIAL(priv->ucode_ver));
-
-	/*
-	 * For any of the failures below (before allocating pci memory)
-	 * we will try to load a version with a smaller API -- maybe the
-	 * user just got a corrupted version of the latest API.
-	 */
-
-	IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n",
-		       priv->ucode_ver);
-	IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %Zd\n",
-		       pieces.inst_size);
-	IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %Zd\n",
-		       pieces.data_size);
-	IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %Zd\n",
-		       pieces.init_size);
-	IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %Zd\n",
-		       pieces.init_data_size);
-	IWL_DEBUG_INFO(priv, "f/w package hdr boot inst size = %Zd\n",
-		       pieces.boot_size);
-
-	/* Verify that uCode images will fit in card's SRAM */
-	if (pieces.inst_size > priv->hw_params.max_inst_size) {
-		IWL_ERR(priv, "uCode instr len %Zd too large to fit in\n",
-			pieces.inst_size);
-		goto try_again;
-	}
-
-	if (pieces.data_size > priv->hw_params.max_data_size) {
-		IWL_ERR(priv, "uCode data len %Zd too large to fit in\n",
-			pieces.data_size);
-		goto try_again;
-	}
-
-	if (pieces.init_size > priv->hw_params.max_inst_size) {
-		IWL_ERR(priv, "uCode init instr len %Zd too large to fit in\n",
-			pieces.init_size);
-		goto try_again;
-	}
-
-	if (pieces.init_data_size > priv->hw_params.max_data_size) {
-		IWL_ERR(priv, "uCode init data len %Zd too large to fit in\n",
-			pieces.init_data_size);
-		goto try_again;
-	}
-
-	if (pieces.boot_size > priv->hw_params.max_bsm_size) {
-		IWL_ERR(priv, "uCode boot instr len %Zd too large to fit in\n",
-			pieces.boot_size);
-		goto try_again;
-	}
-
-	/* Allocate ucode buffers for card's bus-master loading ... */
-
-	/* Runtime instructions and 2 copies of data:
-	 * 1) unmodified from disk
-	 * 2) backup cache for save/restore during power-downs */
-	priv->ucode_code.len = pieces.inst_size;
-	iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_code);
-
-	priv->ucode_data.len = pieces.data_size;
-	iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data);
-
-	priv->ucode_data_backup.len = pieces.data_size;
-	iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
-
-	if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr ||
-	    !priv->ucode_data_backup.v_addr)
-		goto err_pci_alloc;
-
-	/* Initialization instructions and data */
-	if (pieces.init_size && pieces.init_data_size) {
-		priv->ucode_init.len = pieces.init_size;
-		iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init);
-
-		priv->ucode_init_data.len = pieces.init_data_size;
-		iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data);
-
-		if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr)
-			goto err_pci_alloc;
-	}
-
-	/* Bootstrap (instructions only, no data) */
-	if (pieces.boot_size) {
-		priv->ucode_boot.len = pieces.boot_size;
-		iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot);
-
-		if (!priv->ucode_boot.v_addr)
-			goto err_pci_alloc;
-	}
-
-	/* Now that we can no longer fail, copy information */
-
-	priv->sta_key_max_num = STA_KEY_MAX_NUM;
-
-	/* Copy images into buffers for card's bus-master reads ... */
-
-	/* Runtime instructions (first block of data in file) */
-	IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode instr len %Zd\n",
-			pieces.inst_size);
-	memcpy(priv->ucode_code.v_addr, pieces.inst, pieces.inst_size);
-
-	IWL_DEBUG_INFO(priv, "uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
-		priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr);
-
-	/*
-	 * Runtime data
-	 * NOTE:  Copy into backup buffer will be done in iwl_up()
-	 */
-	IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode data len %Zd\n",
-			pieces.data_size);
-	memcpy(priv->ucode_data.v_addr, pieces.data, pieces.data_size);
-	memcpy(priv->ucode_data_backup.v_addr, pieces.data, pieces.data_size);
-
-	/* Initialization instructions */
-	if (pieces.init_size) {
-		IWL_DEBUG_INFO(priv,
-				"Copying (but not loading) init instr len %Zd\n",
-				pieces.init_size);
-		memcpy(priv->ucode_init.v_addr, pieces.init, pieces.init_size);
-	}
-
-	/* Initialization data */
-	if (pieces.init_data_size) {
-		IWL_DEBUG_INFO(priv,
-				"Copying (but not loading) init data len %Zd\n",
-			       pieces.init_data_size);
-		memcpy(priv->ucode_init_data.v_addr, pieces.init_data,
-		       pieces.init_data_size);
-	}
-
-	/* Bootstrap instructions */
-	IWL_DEBUG_INFO(priv, "Copying (but not loading) boot instr len %Zd\n",
-			pieces.boot_size);
-	memcpy(priv->ucode_boot.v_addr, pieces.boot, pieces.boot_size);
-
-	/*
-	 * figure out the offset of chain noise reset and gain commands
-	 * base on the size of standard phy calibration commands table size
-	 */
-	priv->_4965.phy_calib_chain_noise_reset_cmd =
-		standard_phy_calibration_size;
-	priv->_4965.phy_calib_chain_noise_gain_cmd =
-		standard_phy_calibration_size + 1;
-
-	/**************************************************
-	 * This is still part of probe() in a sense...
-	 *
-	 * 9. Setup and register with mac80211 and debugfs
-	 **************************************************/
-	err = iwl4965_mac_setup_register(priv, max_probe_length);
-	if (err)
-		goto out_unbind;
-
-	err = iwl_legacy_dbgfs_register(priv, DRV_NAME);
-	if (err)
-		IWL_ERR(priv,
-		"failed to create debugfs files. Ignoring error: %d\n", err);
-
-	err = sysfs_create_group(&priv->pci_dev->dev.kobj,
-					&iwl_attribute_group);
-	if (err) {
-		IWL_ERR(priv, "failed to create sysfs device attributes\n");
-		goto out_unbind;
-	}
-
-	/* We have our copies now, allow OS release its copies */
-	release_firmware(ucode_raw);
-	complete(&priv->_4965.firmware_loading_complete);
-	return;
-
- try_again:
-	/* try next, if any */
-	if (iwl4965_request_firmware(priv, false))
-		goto out_unbind;
-	release_firmware(ucode_raw);
-	return;
-
- err_pci_alloc:
-	IWL_ERR(priv, "failed to allocate pci memory\n");
-	iwl4965_dealloc_ucode_pci(priv);
- out_unbind:
-	complete(&priv->_4965.firmware_loading_complete);
-	device_release_driver(&priv->pci_dev->dev);
-	release_firmware(ucode_raw);
-}
-
-static const char * const desc_lookup_text[] = {
-	"OK",
-	"FAIL",
-	"BAD_PARAM",
-	"BAD_CHECKSUM",
-	"NMI_INTERRUPT_WDG",
-	"SYSASSERT",
-	"FATAL_ERROR",
-	"BAD_COMMAND",
-	"HW_ERROR_TUNE_LOCK",
-	"HW_ERROR_TEMPERATURE",
-	"ILLEGAL_CHAN_FREQ",
-	"VCC_NOT_STABLE",
-	"FH_ERROR",
-	"NMI_INTERRUPT_HOST",
-	"NMI_INTERRUPT_ACTION_PT",
-	"NMI_INTERRUPT_UNKNOWN",
-	"UCODE_VERSION_MISMATCH",
-	"HW_ERROR_ABS_LOCK",
-	"HW_ERROR_CAL_LOCK_FAIL",
-	"NMI_INTERRUPT_INST_ACTION_PT",
-	"NMI_INTERRUPT_DATA_ACTION_PT",
-	"NMI_TRM_HW_ER",
-	"NMI_INTERRUPT_TRM",
-	"NMI_INTERRUPT_BREAK_POINT",
-	"DEBUG_0",
-	"DEBUG_1",
-	"DEBUG_2",
-	"DEBUG_3",
-};
-
-static struct { char *name; u8 num; } advanced_lookup[] = {
-	{ "NMI_INTERRUPT_WDG", 0x34 },
-	{ "SYSASSERT", 0x35 },
-	{ "UCODE_VERSION_MISMATCH", 0x37 },
-	{ "BAD_COMMAND", 0x38 },
-	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
-	{ "FATAL_ERROR", 0x3D },
-	{ "NMI_TRM_HW_ERR", 0x46 },
-	{ "NMI_INTERRUPT_TRM", 0x4C },
-	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
-	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
-	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
-	{ "NMI_INTERRUPT_HOST", 0x66 },
-	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
-	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
-	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
-	{ "ADVANCED_SYSASSERT", 0 },
-};
-
-static const char *iwl4965_desc_lookup(u32 num)
-{
-	int i;
-	int max = ARRAY_SIZE(desc_lookup_text);
-
-	if (num < max)
-		return desc_lookup_text[num];
-
-	max = ARRAY_SIZE(advanced_lookup) - 1;
-	for (i = 0; i < max; i++) {
-		if (advanced_lookup[i].num == num)
-			break;
-	}
-	return advanced_lookup[i].name;
-}
-
-#define ERROR_START_OFFSET  (1 * sizeof(u32))
-#define ERROR_ELEM_SIZE     (7 * sizeof(u32))
-
-void iwl4965_dump_nic_error_log(struct iwl_priv *priv)
-{
-	u32 data2, line;
-	u32 desc, time, count, base, data1;
-	u32 blink1, blink2, ilink1, ilink2;
-	u32 pc, hcmd;
-
-	if (priv->ucode_type == UCODE_INIT) {
-		base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
-	} else {
-		base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
-	}
-
-	if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
-		IWL_ERR(priv,
-			"Not valid error log pointer 0x%08X for %s uCode\n",
-			base, (priv->ucode_type == UCODE_INIT) ? "Init" : "RT");
-		return;
-	}
-
-	count = iwl_legacy_read_targ_mem(priv, base);
-
-	if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
-		IWL_ERR(priv, "Start IWL Error Log Dump:\n");
-		IWL_ERR(priv, "Status: 0x%08lX, count: %d\n",
-			priv->status, count);
-	}
-
-	desc = iwl_legacy_read_targ_mem(priv, base + 1 * sizeof(u32));
-	priv->isr_stats.err_code = desc;
-	pc = iwl_legacy_read_targ_mem(priv, base + 2 * sizeof(u32));
-	blink1 = iwl_legacy_read_targ_mem(priv, base + 3 * sizeof(u32));
-	blink2 = iwl_legacy_read_targ_mem(priv, base + 4 * sizeof(u32));
-	ilink1 = iwl_legacy_read_targ_mem(priv, base + 5 * sizeof(u32));
-	ilink2 = iwl_legacy_read_targ_mem(priv, base + 6 * sizeof(u32));
-	data1 = iwl_legacy_read_targ_mem(priv, base + 7 * sizeof(u32));
-	data2 = iwl_legacy_read_targ_mem(priv, base + 8 * sizeof(u32));
-	line = iwl_legacy_read_targ_mem(priv, base + 9 * sizeof(u32));
-	time = iwl_legacy_read_targ_mem(priv, base + 11 * sizeof(u32));
-	hcmd = iwl_legacy_read_targ_mem(priv, base + 22 * sizeof(u32));
-
-	trace_iwlwifi_legacy_dev_ucode_error(priv, desc,
-					time, data1, data2, line,
-				      blink1, blink2, ilink1, ilink2);
-
-	IWL_ERR(priv, "Desc                                  Time       "
-		"data1      data2      line\n");
-	IWL_ERR(priv, "%-28s (0x%04X) %010u 0x%08X 0x%08X %u\n",
-		iwl4965_desc_lookup(desc), desc, time, data1, data2, line);
-	IWL_ERR(priv, "pc      blink1  blink2  ilink1  ilink2  hcmd\n");
-	IWL_ERR(priv, "0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n",
-		pc, blink1, blink2, ilink1, ilink2, hcmd);
-}
-
-static void iwl4965_rf_kill_ct_config(struct iwl_priv *priv)
-{
-	struct iwl_ct_kill_config cmd;
-	unsigned long flags;
-	int ret = 0;
-
-	spin_lock_irqsave(&priv->lock, flags);
-	iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
-		    CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
-	spin_unlock_irqrestore(&priv->lock, flags);
-
-	cmd.critical_temperature_R =
-		cpu_to_le32(priv->hw_params.ct_kill_threshold);
-
-	ret = iwl_legacy_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD,
-			       sizeof(cmd), &cmd);
-	if (ret)
-		IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
-	else
-		IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD "
-				"succeeded, "
-				"critical temperature is %d\n",
-				priv->hw_params.ct_kill_threshold);
-}
-
-static const s8 default_queue_to_tx_fifo[] = {
-	IWL_TX_FIFO_VO,
-	IWL_TX_FIFO_VI,
-	IWL_TX_FIFO_BE,
-	IWL_TX_FIFO_BK,
-	IWL49_CMD_FIFO_NUM,
-	IWL_TX_FIFO_UNUSED,
-	IWL_TX_FIFO_UNUSED,
-};
-
-static int iwl4965_alive_notify(struct iwl_priv *priv)
-{
-	u32 a;
-	unsigned long flags;
-	int i, chan;
-	u32 reg_val;
-
-	spin_lock_irqsave(&priv->lock, flags);
-
-	/* Clear 4965's internal Tx Scheduler data base */
-	priv->scd_base_addr = iwl_legacy_read_prph(priv,
-					IWL49_SCD_SRAM_BASE_ADDR);
-	a = priv->scd_base_addr + IWL49_SCD_CONTEXT_DATA_OFFSET;
-	for (; a < priv->scd_base_addr + IWL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4)
-		iwl_legacy_write_targ_mem(priv, a, 0);
-	for (; a < priv->scd_base_addr + IWL49_SCD_TRANSLATE_TBL_OFFSET; a += 4)
-		iwl_legacy_write_targ_mem(priv, a, 0);
-	for (; a < priv->scd_base_addr +
-	       IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
-		iwl_legacy_write_targ_mem(priv, a, 0);
-
-	/* Tel 4965 where to find Tx byte count tables */
-	iwl_legacy_write_prph(priv, IWL49_SCD_DRAM_BASE_ADDR,
-			priv->scd_bc_tbls.dma >> 10);
-
-	/* Enable DMA channel */
-	for (chan = 0; chan < FH49_TCSR_CHNL_NUM ; chan++)
-		iwl_legacy_write_direct32(priv,
-				FH_TCSR_CHNL_TX_CONFIG_REG(chan),
-				FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
-				FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
-
-	/* Update FH chicken bits */
-	reg_val = iwl_legacy_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
-	iwl_legacy_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
-			   reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
-
-	/* Disable chain mode for all queues */
-	iwl_legacy_write_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, 0);
-
-	/* Initialize each Tx queue (including the command queue) */
-	for (i = 0; i < priv->hw_params.max_txq_num; i++) {
-
-		/* TFD circular buffer read/write indexes */
-		iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(i), 0);
-		iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
-
-		/* Max Tx Window size for Scheduler-ACK mode */
-		iwl_legacy_write_targ_mem(priv, priv->scd_base_addr +
-				IWL49_SCD_CONTEXT_QUEUE_OFFSET(i),
-				(SCD_WIN_SIZE <<
-				IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
-				IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
-
-		/* Frame limit */
-		iwl_legacy_write_targ_mem(priv, priv->scd_base_addr +
-				IWL49_SCD_CONTEXT_QUEUE_OFFSET(i) +
-				sizeof(u32),
-				(SCD_FRAME_LIMIT <<
-				IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
-				IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
-
-	}
-	iwl_legacy_write_prph(priv, IWL49_SCD_INTERRUPT_MASK,
-				 (1 << priv->hw_params.max_txq_num) - 1);
-
-	/* Activate all Tx DMA/FIFO channels */
-	iwl4965_txq_set_sched(priv, IWL_MASK(0, 6));
-
-	iwl4965_set_wr_ptrs(priv, IWL_DEFAULT_CMD_QUEUE_NUM, 0);
-
-	/* make sure all queue are not stopped */
-	memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
-	for (i = 0; i < 4; i++)
-		atomic_set(&priv->queue_stop_count[i], 0);
-
-	/* reset to 0 to enable all the queue first */
-	priv->txq_ctx_active_msk = 0;
-	/* Map each Tx/cmd queue to its corresponding fifo */
-	BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7);
-
-	for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
-		int ac = default_queue_to_tx_fifo[i];
-
-		iwl_txq_ctx_activate(priv, i);
-
-		if (ac == IWL_TX_FIFO_UNUSED)
-			continue;
-
-		iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
-	}
-
-	spin_unlock_irqrestore(&priv->lock, flags);
-
-	return 0;
-}
-
-/**
- * iwl4965_alive_start - called after REPLY_ALIVE notification received
- *                   from protocol/runtime uCode (initialization uCode's
- *                   Alive gets handled by iwl_init_alive_start()).
- */
-static void iwl4965_alive_start(struct iwl_priv *priv)
-{
-	int ret = 0;
-	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
-	IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
-
-	if (priv->card_alive.is_valid != UCODE_VALID_OK) {
-		/* We had an error bringing up the hardware, so take it
-		 * all the way back down so we can try again */
-		IWL_DEBUG_INFO(priv, "Alive failed.\n");
-		goto restart;
-	}
-
-	/* Initialize uCode has loaded Runtime uCode ... verify inst image.
-	 * This is a paranoid check, because we would not have gotten the
-	 * "runtime" alive if code weren't properly loaded.  */
-	if (iwl4965_verify_ucode(priv)) {
-		/* Runtime instruction load was bad;
-		 * take it all the way back down so we can try again */
-		IWL_DEBUG_INFO(priv, "Bad runtime uCode load.\n");
-		goto restart;
-	}
-
-	ret = iwl4965_alive_notify(priv);
-	if (ret) {
-		IWL_WARN(priv,
-			"Could not complete ALIVE transition [ntf]: %d\n", ret);
-		goto restart;
-	}
-
-
-	/* After the ALIVE response, we can send host commands to the uCode */
-	set_bit(STATUS_ALIVE, &priv->status);
-
-	/* Enable watchdog to monitor the driver tx queues */
-	iwl_legacy_setup_watchdog(priv);
-
-	if (iwl_legacy_is_rfkill(priv))
-		return;
-
-	ieee80211_wake_queues(priv->hw);
-
-	priv->active_rate = IWL_RATES_MASK;
-
-	if (iwl_legacy_is_associated_ctx(ctx)) {
-		struct iwl_legacy_rxon_cmd *active_rxon =
-				(struct iwl_legacy_rxon_cmd *)&ctx->active;
-		/* apply any changes in staging */
-		ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
-		active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
-	} else {
-		struct iwl_rxon_context *tmp;
-		/* Initialize our rx_config data */
-		for_each_context(priv, tmp)
-			iwl_legacy_connection_init_rx_config(priv, tmp);
-
-		if (priv->cfg->ops->hcmd->set_rxon_chain)
-			priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
-	}
-
-	/* Configure bluetooth coexistence if enabled */
-	iwl_legacy_send_bt_config(priv);
-
-	iwl4965_reset_run_time_calib(priv);
-
-	set_bit(STATUS_READY, &priv->status);
-
-	/* Configure the adapter for unassociated operation */
-	iwl_legacy_commit_rxon(priv, ctx);
-
-	/* At this point, the NIC is initialized and operational */
-	iwl4965_rf_kill_ct_config(priv);
-
-	IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
-	wake_up(&priv->wait_command_queue);
-
-	iwl_legacy_power_update_mode(priv, true);
-	IWL_DEBUG_INFO(priv, "Updated power mode\n");
-
-	return;
-
- restart:
-	queue_work(priv->workqueue, &priv->restart);
-}
-
-static void iwl4965_cancel_deferred_work(struct iwl_priv *priv);
-
-static void __iwl4965_down(struct iwl_priv *priv)
-{
-	unsigned long flags;
-	int exit_pending;
-
-	IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n");
-
-	iwl_legacy_scan_cancel_timeout(priv, 200);
-
-	exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
-
-	/* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set
-	 * to prevent rearm timer */
-	del_timer_sync(&priv->watchdog);
-
-	iwl_legacy_clear_ucode_stations(priv, NULL);
-	iwl_legacy_dealloc_bcast_stations(priv);
-	iwl_legacy_clear_driver_stations(priv);
-
-	/* Unblock any waiting calls */
-	wake_up_all(&priv->wait_command_queue);
-
-	/* Wipe out the EXIT_PENDING status bit if we are not actually
-	 * exiting the module */
-	if (!exit_pending)
-		clear_bit(STATUS_EXIT_PENDING, &priv->status);
-
-	/* stop and reset the on-board processor */
-	iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
-
-	/* tell the device to stop sending interrupts */
-	spin_lock_irqsave(&priv->lock, flags);
-	iwl_legacy_disable_interrupts(priv);
-	spin_unlock_irqrestore(&priv->lock, flags);
-	iwl4965_synchronize_irq(priv);
-
-	if (priv->mac80211_registered)
-		ieee80211_stop_queues(priv->hw);
-
-	/* If we have not previously called iwl_init() then
-	 * clear all bits but the RF Kill bit and return */
-	if (!iwl_legacy_is_init(priv)) {
-		priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
-					STATUS_RF_KILL_HW |
-			       test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
-					STATUS_GEO_CONFIGURED |
-			       test_bit(STATUS_EXIT_PENDING, &priv->status) <<
-					STATUS_EXIT_PENDING;
-		goto exit;
-	}
-
-	/* ...otherwise clear out all the status bits but the RF Kill
-	 * bit and continue taking the NIC down. */
-	priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
-				STATUS_RF_KILL_HW |
-			test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
-				STATUS_GEO_CONFIGURED |
-			test_bit(STATUS_FW_ERROR, &priv->status) <<
-				STATUS_FW_ERROR |
-		       test_bit(STATUS_EXIT_PENDING, &priv->status) <<
-				STATUS_EXIT_PENDING;
-
-	iwl4965_txq_ctx_stop(priv);
-	iwl4965_rxq_stop(priv);
-
-	/* Power-down device's busmaster DMA clocks */
-	iwl_legacy_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
-	udelay(5);
-
-	/* Make sure (redundant) we've released our request to stay awake */
-	iwl_legacy_clear_bit(priv, CSR_GP_CNTRL,
-				CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
-
-	/* Stop the device, and put it in low power state */
-	iwl_legacy_apm_stop(priv);
-
- exit:
-	memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
-
-	dev_kfree_skb(priv->beacon_skb);
-	priv->beacon_skb = NULL;
-
-	/* clear out any free frames */
-	iwl4965_clear_free_frames(priv);
-}
-
-static void iwl4965_down(struct iwl_priv *priv)
-{
-	mutex_lock(&priv->mutex);
-	__iwl4965_down(priv);
-	mutex_unlock(&priv->mutex);
-
-	iwl4965_cancel_deferred_work(priv);
-}
-
-#define HW_READY_TIMEOUT (50)
-
-static int iwl4965_set_hw_ready(struct iwl_priv *priv)
-{
-	int ret = 0;
-
-	iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
-		CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
-
-	/* See if we got it */
-	ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
-				CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
-				CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
-				HW_READY_TIMEOUT);
-	if (ret != -ETIMEDOUT)
-		priv->hw_ready = true;
-	else
-		priv->hw_ready = false;
-
-	IWL_DEBUG_INFO(priv, "hardware %s\n",
-		      (priv->hw_ready == 1) ? "ready" : "not ready");
-	return ret;
-}
-
-static int iwl4965_prepare_card_hw(struct iwl_priv *priv)
-{
-	int ret = 0;
-
-	IWL_DEBUG_INFO(priv, "iwl4965_prepare_card_hw enter\n");
-
-	ret = iwl4965_set_hw_ready(priv);
-	if (priv->hw_ready)
-		return ret;
-
-	/* If HW is not ready, prepare the conditions to check again */
-	iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
-			CSR_HW_IF_CONFIG_REG_PREPARE);
-
-	ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
-			~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
-			CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
-
-	/* HW should be ready by now, check again. */
-	if (ret != -ETIMEDOUT)
-		iwl4965_set_hw_ready(priv);
-
-	return ret;
-}
-
-#define MAX_HW_RESTARTS 5
-
-static int __iwl4965_up(struct iwl_priv *priv)
-{
-	struct iwl_rxon_context *ctx;
-	int i;
-	int ret;
-
-	if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
-		IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
-		return -EIO;
-	}
-
-	if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
-		IWL_ERR(priv, "ucode not available for device bringup\n");
-		return -EIO;
-	}
-
-	for_each_context(priv, ctx) {
-		ret = iwl4965_alloc_bcast_station(priv, ctx);
-		if (ret) {
-			iwl_legacy_dealloc_bcast_stations(priv);
-			return ret;
-		}
-	}
-
-	iwl4965_prepare_card_hw(priv);
-
-	if (!priv->hw_ready) {
-		IWL_WARN(priv, "Exit HW not ready\n");
-		return -EIO;
-	}
-
-	/* If platform's RF_KILL switch is NOT set to KILL */
-	if (iwl_read32(priv,
-		CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
-		clear_bit(STATUS_RF_KILL_HW, &priv->status);
-	else
-		set_bit(STATUS_RF_KILL_HW, &priv->status);
-
-	if (iwl_legacy_is_rfkill(priv)) {
-		wiphy_rfkill_set_hw_state(priv->hw->wiphy, true);
-
-		iwl_legacy_enable_interrupts(priv);
-		IWL_WARN(priv, "Radio disabled by HW RF Kill switch\n");
-		return 0;
-	}
-
-	iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
-
-	/* must be initialised before iwl_hw_nic_init */
-	priv->cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
-
-	ret = iwl4965_hw_nic_init(priv);
-	if (ret) {
-		IWL_ERR(priv, "Unable to init nic\n");
-		return ret;
-	}
-
-	/* make sure rfkill handshake bits are cleared */
-	iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
-	iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
-		    CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
-
-	/* clear (again), then enable host interrupts */
-	iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
-	iwl_legacy_enable_interrupts(priv);
-
-	/* really make sure rfkill handshake bits are cleared */
-	iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
-	iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
-
-	/* Copy original ucode data image from disk into backup cache.
-	 * This will be used to initialize the on-board processor's
-	 * data SRAM for a clean start when the runtime program first loads. */
-	memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr,
-	       priv->ucode_data.len);
-
-	for (i = 0; i < MAX_HW_RESTARTS; i++) {
-
-		/* load bootstrap state machine,
-		 * load bootstrap program into processor's memory,
-		 * prepare to load the "initialize" uCode */
-		ret = priv->cfg->ops->lib->load_ucode(priv);
-
-		if (ret) {
-			IWL_ERR(priv, "Unable to set up bootstrap uCode: %d\n",
-				ret);
-			continue;
-		}
-
-		/* start card; "initialize" will load runtime ucode */
-		iwl4965_nic_start(priv);
-
-		IWL_DEBUG_INFO(priv, DRV_NAME " is coming up\n");
-
-		return 0;
-	}
-
-	set_bit(STATUS_EXIT_PENDING, &priv->status);
-	__iwl4965_down(priv);
-	clear_bit(STATUS_EXIT_PENDING, &priv->status);
-
-	/* tried to restart and config the device for as long as our
-	 * patience could withstand */
-	IWL_ERR(priv, "Unable to initialize device after %d attempts.\n", i);
-	return -EIO;
-}
-
-
-/*****************************************************************************
- *
- * Workqueue callbacks
- *
- *****************************************************************************/
-
-static void iwl4965_bg_init_alive_start(struct work_struct *data)
-{
-	struct iwl_priv *priv =
-	    container_of(data, struct iwl_priv, init_alive_start.work);
-
-	mutex_lock(&priv->mutex);
-	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-		goto out;
-
-	priv->cfg->ops->lib->init_alive_start(priv);
-out:
-	mutex_unlock(&priv->mutex);
-}
-
-static void iwl4965_bg_alive_start(struct work_struct *data)
-{
-	struct iwl_priv *priv =
-	    container_of(data, struct iwl_priv, alive_start.work);
-
-	mutex_lock(&priv->mutex);
-	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-		goto out;
-
-	iwl4965_alive_start(priv);
-out:
-	mutex_unlock(&priv->mutex);
-}
-
-static void iwl4965_bg_run_time_calib_work(struct work_struct *work)
-{
-	struct iwl_priv *priv = container_of(work, struct iwl_priv,
-			run_time_calib_work);
-
-	mutex_lock(&priv->mutex);
-
-	if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
-	    test_bit(STATUS_SCANNING, &priv->status)) {
-		mutex_unlock(&priv->mutex);
-		return;
-	}
-
-	if (priv->start_calib) {
-		iwl4965_chain_noise_calibration(priv,
-				(void *)&priv->_4965.statistics);
-		iwl4965_sensitivity_calibration(priv,
-				(void *)&priv->_4965.statistics);
-	}
-
-	mutex_unlock(&priv->mutex);
-}
-
-static void iwl4965_bg_restart(struct work_struct *data)
-{
-	struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
-
-	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-		return;
-
-	if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) {
-		struct iwl_rxon_context *ctx;
-
-		mutex_lock(&priv->mutex);
-		for_each_context(priv, ctx)
-			ctx->vif = NULL;
-		priv->is_open = 0;
-
-		__iwl4965_down(priv);
-
-		mutex_unlock(&priv->mutex);
-		iwl4965_cancel_deferred_work(priv);
-		ieee80211_restart_hw(priv->hw);
-	} else {
-		iwl4965_down(priv);
-
-		mutex_lock(&priv->mutex);
-		if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
-			mutex_unlock(&priv->mutex);
-			return;
-		}
-
-		__iwl4965_up(priv);
-		mutex_unlock(&priv->mutex);
-	}
-}
-
-static void iwl4965_bg_rx_replenish(struct work_struct *data)
-{
-	struct iwl_priv *priv =
-	    container_of(data, struct iwl_priv, rx_replenish);
-
-	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-		return;
-
-	mutex_lock(&priv->mutex);
-	iwl4965_rx_replenish(priv);
-	mutex_unlock(&priv->mutex);
-}
-
-/*****************************************************************************
- *
- * mac80211 entry point functions
- *
- *****************************************************************************/
-
-#define UCODE_READY_TIMEOUT	(4 * HZ)
-
-/*
- * Not a mac80211 entry point function, but it fits in with all the
- * other mac80211 functions grouped here.
- */
-static int iwl4965_mac_setup_register(struct iwl_priv *priv,
-				  u32 max_probe_length)
-{
-	int ret;
-	struct ieee80211_hw *hw = priv->hw;
-	struct iwl_rxon_context *ctx;
-
-	hw->rate_control_algorithm = "iwl-4965-rs";
-
-	/* Tell mac80211 our characteristics */
-	hw->flags = IEEE80211_HW_SIGNAL_DBM |
-		    IEEE80211_HW_AMPDU_AGGREGATION |
-		    IEEE80211_HW_NEED_DTIM_PERIOD |
-		    IEEE80211_HW_SPECTRUM_MGMT |
-		    IEEE80211_HW_REPORTS_TX_ACK_STATUS;
-
-	if (priv->cfg->sku & IWL_SKU_N)
-		hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
-			     IEEE80211_HW_SUPPORTS_STATIC_SMPS;
-
-	hw->sta_data_size = sizeof(struct iwl_station_priv);
-	hw->vif_data_size = sizeof(struct iwl_vif_priv);
-
-	for_each_context(priv, ctx) {
-		hw->wiphy->interface_modes |= ctx->interface_modes;
-		hw->wiphy->interface_modes |= ctx->exclusive_interface_modes;
-	}
-
-	hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
-			    WIPHY_FLAG_DISABLE_BEACON_HINTS;
-
-	/*
-	 * For now, disable PS by default because it affects
-	 * RX performance significantly.
-	 */
-	hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
-
-	hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
-	/* we create the 802.11 header and a zero-length SSID element */
-	hw->wiphy->max_scan_ie_len = max_probe_length - 24 - 2;
-
-	/* Default value; 4 EDCA QOS priorities */
-	hw->queues = 4;
-
-	hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
-
-	if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
-		priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
-			&priv->bands[IEEE80211_BAND_2GHZ];
-	if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
-		priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
-			&priv->bands[IEEE80211_BAND_5GHZ];
-
-	iwl_legacy_leds_init(priv);
-
-	ret = ieee80211_register_hw(priv->hw);
-	if (ret) {
-		IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
-		return ret;
-	}
-	priv->mac80211_registered = 1;
-
-	return 0;
-}
-
-
-int iwl4965_mac_start(struct ieee80211_hw *hw)
-{
-	struct iwl_priv *priv = hw->priv;
-	int ret;
-
-	IWL_DEBUG_MAC80211(priv, "enter\n");
-
-	/* we should be verifying the device is ready to be opened */
-	mutex_lock(&priv->mutex);
-	ret = __iwl4965_up(priv);
-	mutex_unlock(&priv->mutex);
-
-	if (ret)
-		return ret;
-
-	if (iwl_legacy_is_rfkill(priv))
-		goto out;
-
-	IWL_DEBUG_INFO(priv, "Start UP work done.\n");
-
-	/* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from
-	 * mac80211 will not be run successfully. */
-	ret = wait_event_timeout(priv->wait_command_queue,
-			test_bit(STATUS_READY, &priv->status),
-			UCODE_READY_TIMEOUT);
-	if (!ret) {
-		if (!test_bit(STATUS_READY, &priv->status)) {
-			IWL_ERR(priv, "START_ALIVE timeout after %dms.\n",
-				jiffies_to_msecs(UCODE_READY_TIMEOUT));
-			return -ETIMEDOUT;
-		}
-	}
-
-	iwl4965_led_enable(priv);
-
-out:
-	priv->is_open = 1;
-	IWL_DEBUG_MAC80211(priv, "leave\n");
-	return 0;
-}
-
-void iwl4965_mac_stop(struct ieee80211_hw *hw)
-{
-	struct iwl_priv *priv = hw->priv;
-
-	IWL_DEBUG_MAC80211(priv, "enter\n");
-
-	if (!priv->is_open)
-		return;
-
-	priv->is_open = 0;
-
-	iwl4965_down(priv);
-
-	flush_workqueue(priv->workqueue);
-
-	/* User space software may expect getting rfkill changes
-	 * even if interface is down */
-	iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
-	iwl_legacy_enable_rfkill_int(priv);
-
-	IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-
-void iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
-{
-	struct iwl_priv *priv = hw->priv;
-
-	IWL_DEBUG_MACDUMP(priv, "enter\n");
-
-	IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
-		     ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
-
-	if (iwl4965_tx_skb(priv, skb))
-		dev_kfree_skb_any(skb);
-
-	IWL_DEBUG_MACDUMP(priv, "leave\n");
-}
-
-void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw,
-				struct ieee80211_vif *vif,
-				struct ieee80211_key_conf *keyconf,
-				struct ieee80211_sta *sta,
-				u32 iv32, u16 *phase1key)
-{
-	struct iwl_priv *priv = hw->priv;
-	struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
-
-	IWL_DEBUG_MAC80211(priv, "enter\n");
-
-	iwl4965_update_tkip_key(priv, vif_priv->ctx, keyconf, sta,
-			    iv32, phase1key);
-
-	IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-
-int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
-		       struct ieee80211_vif *vif, struct ieee80211_sta *sta,
-		       struct ieee80211_key_conf *key)
-{
-	struct iwl_priv *priv = hw->priv;
-	struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
-	struct iwl_rxon_context *ctx = vif_priv->ctx;
-	int ret;
-	u8 sta_id;
-	bool is_default_wep_key = false;
-
-	IWL_DEBUG_MAC80211(priv, "enter\n");
-
-	if (priv->cfg->mod_params->sw_crypto) {
-		IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n");
-		return -EOPNOTSUPP;
-	}
-
-	sta_id = iwl_legacy_sta_id_or_broadcast(priv, vif_priv->ctx, sta);
-	if (sta_id == IWL_INVALID_STATION)
-		return -EINVAL;
-
-	mutex_lock(&priv->mutex);
-	iwl_legacy_scan_cancel_timeout(priv, 100);
-
-	/*
-	 * If we are getting WEP group key and we didn't receive any key mapping
-	 * so far, we are in legacy wep mode (group key only), otherwise we are
-	 * in 1X mode.
-	 * In legacy wep mode, we use another host command to the uCode.
-	 */
-	if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
-	     key->cipher == WLAN_CIPHER_SUITE_WEP104) &&
-	    !sta) {
-		if (cmd == SET_KEY)
-			is_default_wep_key = !ctx->key_mapping_keys;
-		else
-			is_default_wep_key =
-					(key->hw_key_idx == HW_KEY_DEFAULT);
-	}
-
-	switch (cmd) {
-	case SET_KEY:
-		if (is_default_wep_key)
-			ret = iwl4965_set_default_wep_key(priv,
-							vif_priv->ctx, key);
-		else
-			ret = iwl4965_set_dynamic_key(priv, vif_priv->ctx,
-						  key, sta_id);
-
-		IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n");
-		break;
-	case DISABLE_KEY:
-		if (is_default_wep_key)
-			ret = iwl4965_remove_default_wep_key(priv, ctx, key);
-		else
-			ret = iwl4965_remove_dynamic_key(priv, ctx,
-							key, sta_id);
-
-		IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n");
-		break;
-	default:
-		ret = -EINVAL;
-	}
-
-	mutex_unlock(&priv->mutex);
-	IWL_DEBUG_MAC80211(priv, "leave\n");
-
-	return ret;
-}
-
-int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
-			    struct ieee80211_vif *vif,
-			    enum ieee80211_ampdu_mlme_action action,
-			    struct ieee80211_sta *sta, u16 tid, u16 *ssn,
-			    u8 buf_size)
-{
-	struct iwl_priv *priv = hw->priv;
-	int ret = -EINVAL;
-
-	IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
-		     sta->addr, tid);
-
-	if (!(priv->cfg->sku & IWL_SKU_N))
-		return -EACCES;
-
-	mutex_lock(&priv->mutex);
-
-	switch (action) {
-	case IEEE80211_AMPDU_RX_START:
-		IWL_DEBUG_HT(priv, "start Rx\n");
-		ret = iwl4965_sta_rx_agg_start(priv, sta, tid, *ssn);
-		break;
-	case IEEE80211_AMPDU_RX_STOP:
-		IWL_DEBUG_HT(priv, "stop Rx\n");
-		ret = iwl4965_sta_rx_agg_stop(priv, sta, tid);
-		if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-			ret = 0;
-		break;
-	case IEEE80211_AMPDU_TX_START:
-		IWL_DEBUG_HT(priv, "start Tx\n");
-		ret = iwl4965_tx_agg_start(priv, vif, sta, tid, ssn);
-		break;
-	case IEEE80211_AMPDU_TX_STOP:
-		IWL_DEBUG_HT(priv, "stop Tx\n");
-		ret = iwl4965_tx_agg_stop(priv, vif, sta, tid);
-		if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-			ret = 0;
-		break;
-	case IEEE80211_AMPDU_TX_OPERATIONAL:
-		ret = 0;
-		break;
-	}
-	mutex_unlock(&priv->mutex);
-
-	return ret;
-}
-
-int iwl4965_mac_sta_add(struct ieee80211_hw *hw,
-		       struct ieee80211_vif *vif,
-		       struct ieee80211_sta *sta)
-{
-	struct iwl_priv *priv = hw->priv;
-	struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
-	struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
-	bool is_ap = vif->type == NL80211_IFTYPE_STATION;
-	int ret;
-	u8 sta_id;
-
-	IWL_DEBUG_INFO(priv, "received request to add station %pM\n",
-			sta->addr);
-	mutex_lock(&priv->mutex);
-	IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n",
-			sta->addr);
-	sta_priv->common.sta_id = IWL_INVALID_STATION;
-
-	atomic_set(&sta_priv->pending_frames, 0);
-
-	ret = iwl_legacy_add_station_common(priv, vif_priv->ctx, sta->addr,
-				     is_ap, sta, &sta_id);
-	if (ret) {
-		IWL_ERR(priv, "Unable to add station %pM (%d)\n",
-			sta->addr, ret);
-		/* Should we return success if return code is EEXIST ? */
-		mutex_unlock(&priv->mutex);
-		return ret;
-	}
-
-	sta_priv->common.sta_id = sta_id;
-
-	/* Initialize rate scaling */
-	IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
-		       sta->addr);
-	iwl4965_rs_rate_init(priv, sta, sta_id);
-	mutex_unlock(&priv->mutex);
-
-	return 0;
-}
-
-void iwl4965_mac_channel_switch(struct ieee80211_hw *hw,
-			       struct ieee80211_channel_switch *ch_switch)
-{
-	struct iwl_priv *priv = hw->priv;
-	const struct iwl_channel_info *ch_info;
-	struct ieee80211_conf *conf = &hw->conf;
-	struct ieee80211_channel *channel = ch_switch->channel;
-	struct iwl_ht_config *ht_conf = &priv->current_ht_config;
-
-	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-	u16 ch;
-
-	IWL_DEBUG_MAC80211(priv, "enter\n");
-
-	mutex_lock(&priv->mutex);
-
-	if (iwl_legacy_is_rfkill(priv))
-		goto out;
-
-	if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
-	    test_bit(STATUS_SCANNING, &priv->status) ||
-	    test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
-		goto out;
-
-	if (!iwl_legacy_is_associated_ctx(ctx))
-		goto out;
-
-	if (!priv->cfg->ops->lib->set_channel_switch)
-		goto out;
-
-	ch = channel->hw_value;
-	if (le16_to_cpu(ctx->active.channel) == ch)
-		goto out;
-
-	ch_info = iwl_legacy_get_channel_info(priv, channel->band, ch);
-	if (!iwl_legacy_is_channel_valid(ch_info)) {
-		IWL_DEBUG_MAC80211(priv, "invalid channel\n");
-		goto out;
-	}
-
-	spin_lock_irq(&priv->lock);
-
-	priv->current_ht_config.smps = conf->smps_mode;
-
-	/* Configure HT40 channels */
-	ctx->ht.enabled = conf_is_ht(conf);
-	if (ctx->ht.enabled) {
-		if (conf_is_ht40_minus(conf)) {
-			ctx->ht.extension_chan_offset =
-				IEEE80211_HT_PARAM_CHA_SEC_BELOW;
-			ctx->ht.is_40mhz = true;
-		} else if (conf_is_ht40_plus(conf)) {
-			ctx->ht.extension_chan_offset =
-				IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
-			ctx->ht.is_40mhz = true;
-		} else {
-			ctx->ht.extension_chan_offset =
-				IEEE80211_HT_PARAM_CHA_SEC_NONE;
-			ctx->ht.is_40mhz = false;
-		}
-	} else
-		ctx->ht.is_40mhz = false;
-
-	if ((le16_to_cpu(ctx->staging.channel) != ch))
-		ctx->staging.flags = 0;
-
-	iwl_legacy_set_rxon_channel(priv, channel, ctx);
-	iwl_legacy_set_rxon_ht(priv, ht_conf);
-	iwl_legacy_set_flags_for_band(priv, ctx, channel->band, ctx->vif);
-
-	spin_unlock_irq(&priv->lock);
-
-	iwl_legacy_set_rate(priv);
-	/*
-	 * at this point, staging_rxon has the
-	 * configuration for channel switch
-	 */
-	set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
-	priv->switch_channel = cpu_to_le16(ch);
-	if (priv->cfg->ops->lib->set_channel_switch(priv, ch_switch)) {
-		clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
-		priv->switch_channel = 0;
-		ieee80211_chswitch_done(ctx->vif, false);
-	}
-
-out:
-	mutex_unlock(&priv->mutex);
-	IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-
-void iwl4965_configure_filter(struct ieee80211_hw *hw,
-			     unsigned int changed_flags,
-			     unsigned int *total_flags,
-			     u64 multicast)
-{
-	struct iwl_priv *priv = hw->priv;
-	__le32 filter_or = 0, filter_nand = 0;
-	struct iwl_rxon_context *ctx;
-
-#define CHK(test, flag)	do { \
-	if (*total_flags & (test))		\
-		filter_or |= (flag);		\
-	else					\
-		filter_nand |= (flag);		\
-	} while (0)
-
-	IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
-			changed_flags, *total_flags);
-
-	CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
-	/* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
-	CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
-	CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
-
-#undef CHK
-
-	mutex_lock(&priv->mutex);
-
-	for_each_context(priv, ctx) {
-		ctx->staging.filter_flags &= ~filter_nand;
-		ctx->staging.filter_flags |= filter_or;
-
-		/*
-		 * Not committing directly because hardware can perform a scan,
-		 * but we'll eventually commit the filter flags change anyway.
-		 */
-	}
-
-	mutex_unlock(&priv->mutex);
-
-	/*
-	 * Receiving all multicast frames is always enabled by the
-	 * default flags setup in iwl_legacy_connection_init_rx_config()
-	 * since we currently do not support programming multicast
-	 * filters into the device.
-	 */
-	*total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
-			FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
-}
-
-/*****************************************************************************
- *
- * driver setup and teardown
- *
- *****************************************************************************/
-
-static void iwl4965_bg_txpower_work(struct work_struct *work)
-{
-	struct iwl_priv *priv = container_of(work, struct iwl_priv,
-			txpower_work);
-
-	mutex_lock(&priv->mutex);
-
-	/* If a scan happened to start before we got here
-	 * then just return; the statistics notification will
-	 * kick off another scheduled work to compensate for
-	 * any temperature delta we missed here. */
-	if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
-	    test_bit(STATUS_SCANNING, &priv->status))
-		goto out;
-
-	/* Regardless of if we are associated, we must reconfigure the
-	 * TX power since frames can be sent on non-radar channels while
-	 * not associated */
-	priv->cfg->ops->lib->send_tx_power(priv);
-
-	/* Update last_temperature to keep is_calib_needed from running
-	 * when it isn't needed... */
-	priv->last_temperature = priv->temperature;
-out:
-	mutex_unlock(&priv->mutex);
-}
-
-static void iwl4965_setup_deferred_work(struct iwl_priv *priv)
-{
-	priv->workqueue = create_singlethread_workqueue(DRV_NAME);
-
-	init_waitqueue_head(&priv->wait_command_queue);
-
-	INIT_WORK(&priv->restart, iwl4965_bg_restart);
-	INIT_WORK(&priv->rx_replenish, iwl4965_bg_rx_replenish);
-	INIT_WORK(&priv->run_time_calib_work, iwl4965_bg_run_time_calib_work);
-	INIT_DELAYED_WORK(&priv->init_alive_start, iwl4965_bg_init_alive_start);
-	INIT_DELAYED_WORK(&priv->alive_start, iwl4965_bg_alive_start);
-
-	iwl_legacy_setup_scan_deferred_work(priv);
-
-	INIT_WORK(&priv->txpower_work, iwl4965_bg_txpower_work);
-
-	init_timer(&priv->statistics_periodic);
-	priv->statistics_periodic.data = (unsigned long)priv;
-	priv->statistics_periodic.function = iwl4965_bg_statistics_periodic;
-
-	init_timer(&priv->watchdog);
-	priv->watchdog.data = (unsigned long)priv;
-	priv->watchdog.function = iwl_legacy_bg_watchdog;
-
-	tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
-		iwl4965_irq_tasklet, (unsigned long)priv);
-}
-
-static void iwl4965_cancel_deferred_work(struct iwl_priv *priv)
-{
-	cancel_work_sync(&priv->txpower_work);
-	cancel_delayed_work_sync(&priv->init_alive_start);
-	cancel_delayed_work(&priv->alive_start);
-	cancel_work_sync(&priv->run_time_calib_work);
-
-	iwl_legacy_cancel_scan_deferred_work(priv);
-
-	del_timer_sync(&priv->statistics_periodic);
-}
-
-static void iwl4965_init_hw_rates(struct iwl_priv *priv,
-			      struct ieee80211_rate *rates)
-{
-	int i;
-
-	for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) {
-		rates[i].bitrate = iwlegacy_rates[i].ieee * 5;
-		rates[i].hw_value = i; /* Rate scaling will work on indexes */
-		rates[i].hw_value_short = i;
-		rates[i].flags = 0;
-		if ((i >= IWL_FIRST_CCK_RATE) && (i <= IWL_LAST_CCK_RATE)) {
-			/*
-			 * If CCK != 1M then set short preamble rate flag.
-			 */
-			rates[i].flags |=
-				(iwlegacy_rates[i].plcp == IWL_RATE_1M_PLCP) ?
-					0 : IEEE80211_RATE_SHORT_PREAMBLE;
-		}
-	}
-}
-/*
- * Acquire priv->lock before calling this function !
- */
-void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index)
-{
-	iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR,
-			     (index & 0xff) | (txq_id << 8));
-	iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(txq_id), index);
-}
-
-void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
-					struct iwl_tx_queue *txq,
-					int tx_fifo_id, int scd_retry)
-{
-	int txq_id = txq->q.id;
-
-	/* Find out whether to activate Tx queue */
-	int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0;
-
-	/* Set up and activate */
-	iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
-			 (active << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
-			 (tx_fifo_id << IWL49_SCD_QUEUE_STTS_REG_POS_TXF) |
-			 (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_WSL) |
-			 (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
-			 IWL49_SCD_QUEUE_STTS_REG_MSK);
-
-	txq->sched_retry = scd_retry;
-
-	IWL_DEBUG_INFO(priv, "%s %s Queue %d on AC %d\n",
-		       active ? "Activate" : "Deactivate",
-		       scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
-}
-
-
-static int iwl4965_init_drv(struct iwl_priv *priv)
-{
-	int ret;
-
-	spin_lock_init(&priv->sta_lock);
-	spin_lock_init(&priv->hcmd_lock);
-
-	INIT_LIST_HEAD(&priv->free_frames);
-
-	mutex_init(&priv->mutex);
-
-	priv->ieee_channels = NULL;
-	priv->ieee_rates = NULL;
-	priv->band = IEEE80211_BAND_2GHZ;
-
-	priv->iw_mode = NL80211_IFTYPE_STATION;
-	priv->current_ht_config.smps = IEEE80211_SMPS_STATIC;
-	priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
-
-	/* initialize force reset */
-	priv->force_reset.reset_duration = IWL_DELAY_NEXT_FORCE_FW_RELOAD;
-
-	/* Choose which receivers/antennas to use */
-	if (priv->cfg->ops->hcmd->set_rxon_chain)
-		priv->cfg->ops->hcmd->set_rxon_chain(priv,
-					&priv->contexts[IWL_RXON_CTX_BSS]);
-
-	iwl_legacy_init_scan_params(priv);
-
-	ret = iwl_legacy_init_channel_map(priv);
-	if (ret) {
-		IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
-		goto err;
-	}
-
-	ret = iwl_legacy_init_geos(priv);
-	if (ret) {
-		IWL_ERR(priv, "initializing geos failed: %d\n", ret);
-		goto err_free_channel_map;
-	}
-	iwl4965_init_hw_rates(priv, priv->ieee_rates);
-
-	return 0;
-
-err_free_channel_map:
-	iwl_legacy_free_channel_map(priv);
-err:
-	return ret;
-}
-
-static void iwl4965_uninit_drv(struct iwl_priv *priv)
-{
-	iwl4965_calib_free_results(priv);
-	iwl_legacy_free_geos(priv);
-	iwl_legacy_free_channel_map(priv);
-	kfree(priv->scan_cmd);
-}
-
-static void iwl4965_hw_detect(struct iwl_priv *priv)
-{
-	priv->hw_rev = _iwl_legacy_read32(priv, CSR_HW_REV);
-	priv->hw_wa_rev = _iwl_legacy_read32(priv, CSR_HW_REV_WA_REG);
-	priv->rev_id = priv->pci_dev->revision;
-	IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", priv->rev_id);
-}
-
-static int iwl4965_set_hw_params(struct iwl_priv *priv)
-{
-	priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
-	priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
-	if (priv->cfg->mod_params->amsdu_size_8K)
-		priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_8K);
-	else
-		priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_4K);
-
-	priv->hw_params.max_beacon_itrvl = IWL_MAX_UCODE_BEACON_INTERVAL;
-
-	if (priv->cfg->mod_params->disable_11n)
-		priv->cfg->sku &= ~IWL_SKU_N;
-
-	/* Device-specific setup */
-	return priv->cfg->ops->lib->set_hw_params(priv);
-}
-
-static const u8 iwl4965_bss_ac_to_fifo[] = {
-	IWL_TX_FIFO_VO,
-	IWL_TX_FIFO_VI,
-	IWL_TX_FIFO_BE,
-	IWL_TX_FIFO_BK,
-};
-
-static const u8 iwl4965_bss_ac_to_queue[] = {
-	0, 1, 2, 3,
-};
-
-static int
-iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
-	int err = 0, i;
-	struct iwl_priv *priv;
-	struct ieee80211_hw *hw;
-	struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
-	unsigned long flags;
-	u16 pci_cmd;
-
-	/************************
-	 * 1. Allocating HW data
-	 ************************/
-
-	hw = iwl_legacy_alloc_all(cfg);
-	if (!hw) {
-		err = -ENOMEM;
-		goto out;
-	}
-	priv = hw->priv;
-	/* At this point both hw and priv are allocated. */
-
-	/*
-	 * The default context is always valid,
-	 * more may be discovered when firmware
-	 * is loaded.
-	 */
-	priv->valid_contexts = BIT(IWL_RXON_CTX_BSS);
-
-	for (i = 0; i < NUM_IWL_RXON_CTX; i++)
-		priv->contexts[i].ctxid = i;
-
-	priv->contexts[IWL_RXON_CTX_BSS].always_active = true;
-	priv->contexts[IWL_RXON_CTX_BSS].is_active = true;
-	priv->contexts[IWL_RXON_CTX_BSS].rxon_cmd = REPLY_RXON;
-	priv->contexts[IWL_RXON_CTX_BSS].rxon_timing_cmd = REPLY_RXON_TIMING;
-	priv->contexts[IWL_RXON_CTX_BSS].rxon_assoc_cmd = REPLY_RXON_ASSOC;
-	priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM;
-	priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID;
-	priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY;
-	priv->contexts[IWL_RXON_CTX_BSS].ac_to_fifo = iwl4965_bss_ac_to_fifo;
-	priv->contexts[IWL_RXON_CTX_BSS].ac_to_queue = iwl4965_bss_ac_to_queue;
-	priv->contexts[IWL_RXON_CTX_BSS].exclusive_interface_modes =
-		BIT(NL80211_IFTYPE_ADHOC);
-	priv->contexts[IWL_RXON_CTX_BSS].interface_modes =
-		BIT(NL80211_IFTYPE_STATION);
-	priv->contexts[IWL_RXON_CTX_BSS].ap_devtype = RXON_DEV_TYPE_AP;
-	priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS;
-	priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS;
-	priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS;
-
-	BUILD_BUG_ON(NUM_IWL_RXON_CTX != 1);
-
-	SET_IEEE80211_DEV(hw, &pdev->dev);
-
-	IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
-	priv->cfg = cfg;
-	priv->pci_dev = pdev;
-	priv->inta_mask = CSR_INI_SET_MASK;
-
-	if (iwl_legacy_alloc_traffic_mem(priv))
-		IWL_ERR(priv, "Not enough memory to generate traffic log\n");
-
-	/**************************
-	 * 2. Initializing PCI bus
-	 **************************/
-	pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
-				PCIE_LINK_STATE_CLKPM);
-
-	if (pci_enable_device(pdev)) {
-		err = -ENODEV;
-		goto out_ieee80211_free_hw;
-	}
-
-	pci_set_master(pdev);
-
-	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
-	if (!err)
-		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
-	if (err) {
-		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
-		if (!err)
-			err = pci_set_consistent_dma_mask(pdev,
-							DMA_BIT_MASK(32));
-		/* both attempts failed: */
-		if (err) {
-			IWL_WARN(priv, "No suitable DMA available.\n");
-			goto out_pci_disable_device;
-		}
-	}
-
-	err = pci_request_regions(pdev, DRV_NAME);
-	if (err)
-		goto out_pci_disable_device;
-
-	pci_set_drvdata(pdev, priv);
-
-
-	/***********************
-	 * 3. Read REV register
-	 ***********************/
-	priv->hw_base = pci_iomap(pdev, 0, 0);
-	if (!priv->hw_base) {
-		err = -ENODEV;
-		goto out_pci_release_regions;
-	}
-
-	IWL_DEBUG_INFO(priv, "pci_resource_len = 0x%08llx\n",
-		(unsigned long long) pci_resource_len(pdev, 0));
-	IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base);
-
-	/* these spin locks will be used in apm_ops.init and EEPROM access
-	 * we should init now
-	 */
-	spin_lock_init(&priv->reg_lock);
-	spin_lock_init(&priv->lock);
-
-	/*
-	 * stop and reset the on-board processor just in case it is in a
-	 * strange state ... like being left stranded by a primary kernel
-	 * and this is now the kdump kernel trying to start up
-	 */
-	iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
-
-	iwl4965_hw_detect(priv);
-	IWL_INFO(priv, "Detected %s, REV=0x%X\n",
-		priv->cfg->name, priv->hw_rev);
-
-	/* We disable the RETRY_TIMEOUT register (0x41) to keep
-	 * PCI Tx retries from interfering with C3 CPU state */
-	pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
-
-	iwl4965_prepare_card_hw(priv);
-	if (!priv->hw_ready) {
-		IWL_WARN(priv, "Failed, HW not ready\n");
-		goto out_iounmap;
-	}
-
-	/*****************
-	 * 4. Read EEPROM
-	 *****************/
-	/* Read the EEPROM */
-	err = iwl_legacy_eeprom_init(priv);
-	if (err) {
-		IWL_ERR(priv, "Unable to init EEPROM\n");
-		goto out_iounmap;
-	}
-	err = iwl4965_eeprom_check_version(priv);
-	if (err)
-		goto out_free_eeprom;
-
-	if (err)
-		goto out_free_eeprom;
-
-	/* extract MAC Address */
-	iwl4965_eeprom_get_mac(priv, priv->addresses[0].addr);
-	IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr);
-	priv->hw->wiphy->addresses = priv->addresses;
-	priv->hw->wiphy->n_addresses = 1;
-
-	/************************
-	 * 5. Setup HW constants
-	 ************************/
-	if (iwl4965_set_hw_params(priv)) {
-		IWL_ERR(priv, "failed to set hw parameters\n");
-		goto out_free_eeprom;
-	}
-
-	/*******************
-	 * 6. Setup priv
-	 *******************/
-
-	err = iwl4965_init_drv(priv);
-	if (err)
-		goto out_free_eeprom;
-	/* At this point both hw and priv are initialized. */
-
-	/********************
-	 * 7. Setup services
-	 ********************/
-	spin_lock_irqsave(&priv->lock, flags);
-	iwl_legacy_disable_interrupts(priv);
-	spin_unlock_irqrestore(&priv->lock, flags);
-
-	pci_enable_msi(priv->pci_dev);
-
-	err = request_irq(priv->pci_dev->irq, iwl_legacy_isr,
-			  IRQF_SHARED, DRV_NAME, priv);
-	if (err) {
-		IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq);
-		goto out_disable_msi;
-	}
-
-	iwl4965_setup_deferred_work(priv);
-	iwl4965_setup_rx_handlers(priv);
-
-	/*********************************************
-	 * 8. Enable interrupts and read RFKILL state
-	 *********************************************/
-
-	/* enable rfkill interrupt: hw bug w/a */
-	pci_read_config_word(priv->pci_dev, PCI_COMMAND, &pci_cmd);
-	if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
-		pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
-		pci_write_config_word(priv->pci_dev, PCI_COMMAND, pci_cmd);
-	}
-
-	iwl_legacy_enable_rfkill_int(priv);
-
-	/* If platform's RF_KILL switch is NOT set to KILL */
-	if (iwl_read32(priv, CSR_GP_CNTRL) &
-		CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
-		clear_bit(STATUS_RF_KILL_HW, &priv->status);
-	else
-		set_bit(STATUS_RF_KILL_HW, &priv->status);
-
-	wiphy_rfkill_set_hw_state(priv->hw->wiphy,
-		test_bit(STATUS_RF_KILL_HW, &priv->status));
-
-	iwl_legacy_power_initialize(priv);
-
-	init_completion(&priv->_4965.firmware_loading_complete);
-
-	err = iwl4965_request_firmware(priv, true);
-	if (err)
-		goto out_destroy_workqueue;
-
-	return 0;
-
- out_destroy_workqueue:
-	destroy_workqueue(priv->workqueue);
-	priv->workqueue = NULL;
-	free_irq(priv->pci_dev->irq, priv);
- out_disable_msi:
-	pci_disable_msi(priv->pci_dev);
-	iwl4965_uninit_drv(priv);
- out_free_eeprom:
-	iwl_legacy_eeprom_free(priv);
- out_iounmap:
-	pci_iounmap(pdev, priv->hw_base);
- out_pci_release_regions:
-	pci_set_drvdata(pdev, NULL);
-	pci_release_regions(pdev);
- out_pci_disable_device:
-	pci_disable_device(pdev);
- out_ieee80211_free_hw:
-	iwl_legacy_free_traffic_mem(priv);
-	ieee80211_free_hw(priv->hw);
- out:
-	return err;
-}
-
-static void __devexit iwl4965_pci_remove(struct pci_dev *pdev)
-{
-	struct iwl_priv *priv = pci_get_drvdata(pdev);
-	unsigned long flags;
-
-	if (!priv)
-		return;
-
-	wait_for_completion(&priv->_4965.firmware_loading_complete);
-
-	IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
-
-	iwl_legacy_dbgfs_unregister(priv);
-	sysfs_remove_group(&pdev->dev.kobj, &iwl_attribute_group);
-
-	/* ieee80211_unregister_hw call wil cause iwl_mac_stop to
-	 * to be called and iwl4965_down since we are removing the device
-	 * we need to set STATUS_EXIT_PENDING bit.
-	 */
-	set_bit(STATUS_EXIT_PENDING, &priv->status);
-
-	iwl_legacy_leds_exit(priv);
-
-	if (priv->mac80211_registered) {
-		ieee80211_unregister_hw(priv->hw);
-		priv->mac80211_registered = 0;
-	} else {
-		iwl4965_down(priv);
-	}
-
-	/*
-	 * Make sure device is reset to low power before unloading driver.
-	 * This may be redundant with iwl4965_down(), but there are paths to
-	 * run iwl4965_down() without calling apm_ops.stop(), and there are
-	 * paths to avoid running iwl4965_down() at all before leaving driver.
-	 * This (inexpensive) call *makes sure* device is reset.
-	 */
-	iwl_legacy_apm_stop(priv);
-
-	/* make sure we flush any pending irq or
-	 * tasklet for the driver
-	 */
-	spin_lock_irqsave(&priv->lock, flags);
-	iwl_legacy_disable_interrupts(priv);
-	spin_unlock_irqrestore(&priv->lock, flags);
-
-	iwl4965_synchronize_irq(priv);
-
-	iwl4965_dealloc_ucode_pci(priv);
-
-	if (priv->rxq.bd)
-		iwl4965_rx_queue_free(priv, &priv->rxq);
-	iwl4965_hw_txq_ctx_free(priv);
-
-	iwl_legacy_eeprom_free(priv);
-
-
-	/*netif_stop_queue(dev); */
-	flush_workqueue(priv->workqueue);
-
-	/* ieee80211_unregister_hw calls iwl_mac_stop, which flushes
-	 * priv->workqueue... so we can't take down the workqueue
-	 * until now... */
-	destroy_workqueue(priv->workqueue);
-	priv->workqueue = NULL;
-	iwl_legacy_free_traffic_mem(priv);
-
-	free_irq(priv->pci_dev->irq, priv);
-	pci_disable_msi(priv->pci_dev);
-	pci_iounmap(pdev, priv->hw_base);
-	pci_release_regions(pdev);
-	pci_disable_device(pdev);
-	pci_set_drvdata(pdev, NULL);
-
-	iwl4965_uninit_drv(priv);
-
-	dev_kfree_skb(priv->beacon_skb);
-
-	ieee80211_free_hw(priv->hw);
-}
-
-/*
- * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
- * must be called under priv->lock and mac access
- */
-void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask)
-{
-	iwl_legacy_write_prph(priv, IWL49_SCD_TXFACT, mask);
-}
-
-/*****************************************************************************
- *
- * driver and module entry point
- *
- *****************************************************************************/
-
-/* Hardware specific file defines the PCI IDs table for that hardware module */
-static DEFINE_PCI_DEVICE_TABLE(iwl4965_hw_card_ids) = {
-#if defined(CONFIG_IWL4965_MODULE) || defined(CONFIG_IWL4965)
-	{IWL_PCI_DEVICE(0x4229, PCI_ANY_ID, iwl4965_cfg)},
-	{IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_cfg)},
-#endif /* CONFIG_IWL4965 */
-
-	{0}
-};
-MODULE_DEVICE_TABLE(pci, iwl4965_hw_card_ids);
-
-static struct pci_driver iwl4965_driver = {
-	.name = DRV_NAME,
-	.id_table = iwl4965_hw_card_ids,
-	.probe = iwl4965_pci_probe,
-	.remove = __devexit_p(iwl4965_pci_remove),
-	.driver.pm = IWL_LEGACY_PM_OPS,
-};
-
-static int __init iwl4965_init(void)
-{
-
-	int ret;
-	pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
-	pr_info(DRV_COPYRIGHT "\n");
-
-	ret = iwl4965_rate_control_register();
-	if (ret) {
-		pr_err("Unable to register rate control algorithm: %d\n", ret);
-		return ret;
-	}
-
-	ret = pci_register_driver(&iwl4965_driver);
-	if (ret) {
-		pr_err("Unable to initialize PCI module\n");
-		goto error_register;
-	}
-
-	return ret;
-
-error_register:
-	iwl4965_rate_control_unregister();
-	return ret;
-}
-
-static void __exit iwl4965_exit(void)
-{
-	pci_unregister_driver(&iwl4965_driver);
-	iwl4965_rate_control_unregister();
-}
-
-module_exit(iwl4965_exit);
-module_init(iwl4965_init);
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-module_param_named(debug, iwlegacy_debug_level, uint, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(debug, "debug output mask");
-#endif
-
-module_param_named(swcrypto, iwl4965_mod_params.sw_crypto, int, S_IRUGO);
-MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
-module_param_named(queues_num, iwl4965_mod_params.num_of_queues, int, S_IRUGO);
-MODULE_PARM_DESC(queues_num, "number of hw queues.");
-module_param_named(11n_disable, iwl4965_mod_params.disable_11n, int, S_IRUGO);
-MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
-module_param_named(amsdu_size_8K, iwl4965_mod_params.amsdu_size_8K,
-		   int, S_IRUGO);
-MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
-module_param_named(fw_restart, iwl4965_mod_params.restart_fw, int, S_IRUGO);
-MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
diff --git a/drivers/net/wireless/iwlegacy/iwl-prph.h b/drivers/net/wireless/iwlegacy/prph.h
similarity index 82%
rename from drivers/net/wireless/iwlegacy/iwl-prph.h
rename to drivers/net/wireless/iwlegacy/prph.h
index 30a4930..ffec4b4 100644
--- a/drivers/net/wireless/iwlegacy/iwl-prph.h
+++ b/drivers/net/wireless/iwlegacy/prph.h
@@ -60,8 +60,8 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *****************************************************************************/
 
-#ifndef	__iwl_legacy_prph_h__
-#define __iwl_legacy_prph_h__
+#ifndef	__il_prph_h__
+#define __il_prph_h__
 
 /*
  * Registers in this file are internal, not PCI bus memory mapped.
@@ -91,9 +91,9 @@
 #define APMG_PS_CTRL_VAL_RESET_REQ		(0x04000000)
 #define APMG_PS_CTRL_MSK_PWR_SRC		(0x03000000)
 #define APMG_PS_CTRL_VAL_PWR_SRC_VMAIN		(0x00000000)
-#define APMG_PS_CTRL_VAL_PWR_SRC_MAX		(0x01000000) /* 3945 only */
+#define APMG_PS_CTRL_VAL_PWR_SRC_MAX		(0x01000000)	/* 3945 only */
 #define APMG_PS_CTRL_VAL_PWR_SRC_VAUX		(0x02000000)
-#define APMG_SVR_VOLTAGE_CONFIG_BIT_MSK	(0x000001E0) /* bit 8:5 */
+#define APMG_SVR_VOLTAGE_CONFIG_BIT_MSK	(0x000001E0)	/* bit 8:5 */
 #define APMG_SVR_DIGITAL_VOLTAGE_1_32		(0x00000060)
 
 #define APMG_PCIDEV_STT_VAL_L1_ACT_DIS		(0x00000800)
@@ -120,13 +120,13 @@
  *
  * 1)  Initialization -- performs hardware calibration and sets up some
  *     internal data, then notifies host via "initialize alive" notification
- *     (struct iwl_init_alive_resp) that it has completed all of its work.
+ *     (struct il_init_alive_resp) that it has completed all of its work.
  *     After signal from host, it then loads and starts the runtime program.
  *     The initialization program must be used when initially setting up the
  *     NIC after loading the driver.
  *
  * 2)  Runtime/Protocol -- performs all normal runtime operations.  This
- *     notifies host via "alive" notification (struct iwl_alive_resp) that it
+ *     notifies host via "alive" notification (struct il_alive_resp) that it
  *     is ready to be used.
  *
  * When initializing the NIC, the host driver does the following procedure:
@@ -189,7 +189,7 @@
  *        procedure.
  *
  * This save/restore method is mostly for autonomous power management during
- * normal operation (result of POWER_TABLE_CMD).  Platform suspend/resume and
+ * normal operation (result of C_POWER_TBL).  Platform suspend/resume and
  * RFKILL should use complete restarts (with total re-initialization) of uCode,
  * allowing total shutdown (including BSM memory).
  *
@@ -202,19 +202,19 @@
  */
 
 /* BSM bit fields */
-#define BSM_WR_CTRL_REG_BIT_START     (0x80000000) /* start boot load now */
-#define BSM_WR_CTRL_REG_BIT_START_EN  (0x40000000) /* enable boot after pwrup*/
-#define BSM_DRAM_INST_LOAD            (0x80000000) /* start program load now */
+#define BSM_WR_CTRL_REG_BIT_START     (0x80000000)	/* start boot load now */
+#define BSM_WR_CTRL_REG_BIT_START_EN  (0x40000000)	/* enable boot after pwrup */
+#define BSM_DRAM_INST_LOAD            (0x80000000)	/* start program load now */
 
 /* BSM addresses */
 #define BSM_BASE                     (PRPH_BASE + 0x3400)
 #define BSM_END                      (PRPH_BASE + 0x3800)
 
-#define BSM_WR_CTRL_REG              (BSM_BASE + 0x000) /* ctl and status */
-#define BSM_WR_MEM_SRC_REG           (BSM_BASE + 0x004) /* source in BSM mem */
-#define BSM_WR_MEM_DST_REG           (BSM_BASE + 0x008) /* dest in SRAM mem */
-#define BSM_WR_DWCOUNT_REG           (BSM_BASE + 0x00C) /* bytes */
-#define BSM_WR_STATUS_REG            (BSM_BASE + 0x010) /* bit 0:  1 == done */
+#define BSM_WR_CTRL_REG              (BSM_BASE + 0x000)	/* ctl and status */
+#define BSM_WR_MEM_SRC_REG           (BSM_BASE + 0x004)	/* source in BSM mem */
+#define BSM_WR_MEM_DST_REG           (BSM_BASE + 0x008)	/* dest in SRAM mem */
+#define BSM_WR_DWCOUNT_REG           (BSM_BASE + 0x00C)	/* bytes */
+#define BSM_WR_STATUS_REG            (BSM_BASE + 0x010)	/* bit 0:  1 == done */
 
 /*
  * Pointers and size regs for bootstrap load and data SRAM save/restore.
@@ -231,8 +231,7 @@
  * Read/write, address range from LOWER_BOUND to (LOWER_BOUND + SIZE -1)
  */
 #define BSM_SRAM_LOWER_BOUND         (PRPH_BASE + 0x3800)
-#define BSM_SRAM_SIZE			(1024) /* bytes */
-
+#define BSM_SRAM_SIZE			(1024)	/* bytes */
 
 /* 3945 Tx scheduler registers */
 #define ALM_SCD_BASE                        (PRPH_BASE + 0x2E00)
@@ -255,7 +254,7 @@
  * but one DMA channel may take input from several queues.
  *
  * Tx DMA FIFOs have dedicated purposes.  For 4965, they are used as follows
- * (cf. default_queue_to_tx_fifo in iwl-4965.c):
+ * (cf. default_queue_to_tx_fifo in 4965.c):
  *
  * 0 -- EDCA BK (background) frames, lowest priority
  * 1 -- EDCA BE (best effort) frames, normal priority
@@ -274,20 +273,20 @@
  * The driver sets up each queue to work in one of two modes:
  *
  * 1)  Scheduler-Ack, in which the scheduler automatically supports a
- *     block-ack (BA) window of up to 64 TFDs.  In this mode, each queue
+ *     block-ack (BA) win of up to 64 TFDs.  In this mode, each queue
  *     contains TFDs for a unique combination of Recipient Address (RA)
  *     and Traffic Identifier (TID), that is, traffic of a given
  *     Quality-Of-Service (QOS) priority, destined for a single station.
  *
  *     In scheduler-ack mode, the scheduler keeps track of the Tx status of
- *     each frame within the BA window, including whether it's been transmitted,
+ *     each frame within the BA win, including whether it's been transmitted,
  *     and whether it's been acknowledged by the receiving station.  The device
  *     automatically processes block-acks received from the receiving STA,
  *     and reschedules un-acked frames to be retransmitted (successful
  *     Tx completion may end up being out-of-order).
  *
  *     The driver must maintain the queue's Byte Count table in host DRAM
- *     (struct iwl4965_sched_queue_byte_cnt_tbl) for this mode.
+ *     (struct il4965_sched_queue_byte_cnt_tbl) for this mode.
  *     This mode does not support fragmentation.
  *
  * 2)  FIFO (a.k.a. non-Scheduler-ACK), in which each TFD is processed in order.
@@ -316,34 +315,34 @@
  */
 
 /**
- * Max Tx window size is the max number of contiguous TFDs that the scheduler
+ * Max Tx win size is the max number of contiguous TFDs that the scheduler
  * can keep track of at one time when creating block-ack chains of frames.
  * Note that "64" matches the number of ack bits in a block-ack packet.
  * Driver should use SCD_WIN_SIZE and SCD_FRAME_LIMIT values to initialize
- * IWL49_SCD_CONTEXT_QUEUE_OFFSET(x) values.
+ * IL49_SCD_CONTEXT_QUEUE_OFFSET(x) values.
  */
 #define SCD_WIN_SIZE				64
 #define SCD_FRAME_LIMIT				64
 
 /* SCD registers are internal, must be accessed via HBUS_TARG_PRPH regs */
-#define IWL49_SCD_START_OFFSET		0xa02c00
+#define IL49_SCD_START_OFFSET		0xa02c00
 
 /*
  * 4965 tells driver SRAM address for internal scheduler structs via this reg.
  * Value is valid only after "Alive" response from uCode.
  */
-#define IWL49_SCD_SRAM_BASE_ADDR           (IWL49_SCD_START_OFFSET + 0x0)
+#define IL49_SCD_SRAM_BASE_ADDR           (IL49_SCD_START_OFFSET + 0x0)
 
 /*
  * Driver may need to update queue-empty bits after changing queue's
- * write and read pointers (indexes) during (re-)initialization (i.e. when
+ * write and read pointers (idxes) during (re-)initialization (i.e. when
  * scheduler is not tracking what's happening).
  * Bit fields:
  * 31-16:  Write mask -- 1: update empty bit, 0: don't change empty bit
  * 15-00:  Empty state, one for each queue -- 1: empty, 0: non-empty
  * NOTE:  This register is not used by Linux driver.
  */
-#define IWL49_SCD_EMPTY_BITS               (IWL49_SCD_START_OFFSET + 0x4)
+#define IL49_SCD_EMPTY_BITS               (IL49_SCD_START_OFFSET + 0x4)
 
 /*
  * Physical base address of array of byte count (BC) circular buffers (CBs).
@@ -351,11 +350,11 @@
  * This register points to BC CB for queue 0, must be on 1024-byte boundary.
  * Others are spaced by 1024 bytes.
  * Each BC CB is 2 bytes * (256 + 64) = 740 bytes, followed by 384 bytes pad.
- * (Index into a queue's BC CB) = (index into queue's TFD CB) = (SSN & 0xff).
+ * (Index into a queue's BC CB) = (idx into queue's TFD CB) = (SSN & 0xff).
  * Bit fields:
  * 25-00:  Byte Count CB physical address [35:10], must be 1024-byte aligned.
  */
-#define IWL49_SCD_DRAM_BASE_ADDR           (IWL49_SCD_START_OFFSET + 0x10)
+#define IL49_SCD_DRAM_BASE_ADDR           (IL49_SCD_START_OFFSET + 0x10)
 
 /*
  * Enables any/all Tx DMA/FIFO channels.
@@ -364,23 +363,23 @@
  * Bit fields:
  *  7- 0:  Enable (1), disable (0), one bit for each channel 0-7
  */
-#define IWL49_SCD_TXFACT                   (IWL49_SCD_START_OFFSET + 0x1c)
+#define IL49_SCD_TXFACT                   (IL49_SCD_START_OFFSET + 0x1c)
 /*
- * Queue (x) Write Pointers (indexes, really!), one for each Tx queue.
+ * Queue (x) Write Pointers (idxes, really!), one for each Tx queue.
  * Initialized and updated by driver as new TFDs are added to queue.
- * NOTE:  If using Block Ack, index must correspond to frame's
- *        Start Sequence Number; index = (SSN & 0xff)
+ * NOTE:  If using Block Ack, idx must correspond to frame's
+ *        Start Sequence Number; idx = (SSN & 0xff)
  * NOTE:  Alternative to HBUS_TARG_WRPTR, which is what Linux driver uses?
  */
-#define IWL49_SCD_QUEUE_WRPTR(x)  (IWL49_SCD_START_OFFSET + 0x24 + (x) * 4)
+#define IL49_SCD_QUEUE_WRPTR(x)  (IL49_SCD_START_OFFSET + 0x24 + (x) * 4)
 
 /*
- * Queue (x) Read Pointers (indexes, really!), one for each Tx queue.
- * For FIFO mode, index indicates next frame to transmit.
- * For Scheduler-ACK mode, index indicates first frame in Tx window.
+ * Queue (x) Read Pointers (idxes, really!), one for each Tx queue.
+ * For FIFO mode, idx indicates next frame to transmit.
+ * For Scheduler-ACK mode, idx indicates first frame in Tx win.
  * Initialized by driver, updated by scheduler.
  */
-#define IWL49_SCD_QUEUE_RDPTR(x)  (IWL49_SCD_START_OFFSET + 0x64 + (x) * 4)
+#define IL49_SCD_QUEUE_RDPTR(x)  (IL49_SCD_START_OFFSET + 0x64 + (x) * 4)
 
 /*
  * Select which queues work in chain mode (1) vs. not (0).
@@ -391,18 +390,18 @@
  * NOTE:  If driver sets up queue for chain mode, it should be also set up
  *        Scheduler-ACK mode as well, via SCD_QUEUE_STATUS_BITS(x).
  */
-#define IWL49_SCD_QUEUECHAIN_SEL  (IWL49_SCD_START_OFFSET + 0xd0)
+#define IL49_SCD_QUEUECHAIN_SEL  (IL49_SCD_START_OFFSET + 0xd0)
 
 /*
  * Select which queues interrupt driver when scheduler increments
- * a queue's read pointer (index).
+ * a queue's read pointer (idx).
  * Bit fields:
  * 31-16:  Reserved
  * 15-00:  Interrupt enable, one bit for each queue -- 1: enabled, 0: disabled
  * NOTE:  This functionality is apparently a no-op; driver relies on interrupts
  *        from Rx queue to read Tx command responses and update Tx queues.
  */
-#define IWL49_SCD_INTERRUPT_MASK  (IWL49_SCD_START_OFFSET + 0xe4)
+#define IL49_SCD_INTERRUPT_MASK  (IL49_SCD_START_OFFSET + 0xe4)
 
 /*
  * Queue search status registers.  One for each queue.
@@ -414,7 +413,7 @@
  *        Driver should init to "1" for aggregation mode, or "0" otherwise.
  *   7-6: Driver should init to "0"
  *     5: Window Size Left; indicates whether scheduler can request
- *        another TFD, based on window size, etc.  Driver should init
+ *        another TFD, based on win size, etc.  Driver should init
  *        this bit to "1" for aggregation mode, or "0" for non-agg.
  *   4-1: Tx FIFO to use (range 0-7).
  *     0: Queue is active (1), not active (0).
@@ -423,18 +422,18 @@
  * NOTE:  If enabling Scheduler-ACK mode, chain mode should also be enabled
  *        via SCD_QUEUECHAIN_SEL.
  */
-#define IWL49_SCD_QUEUE_STATUS_BITS(x)\
-	(IWL49_SCD_START_OFFSET + 0x104 + (x) * 4)
+#define IL49_SCD_QUEUE_STATUS_BITS(x)\
+	(IL49_SCD_START_OFFSET + 0x104 + (x) * 4)
 
 /* Bit field positions */
-#define IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE	(0)
-#define IWL49_SCD_QUEUE_STTS_REG_POS_TXF	(1)
-#define IWL49_SCD_QUEUE_STTS_REG_POS_WSL	(5)
-#define IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK	(8)
+#define IL49_SCD_QUEUE_STTS_REG_POS_ACTIVE	(0)
+#define IL49_SCD_QUEUE_STTS_REG_POS_TXF	(1)
+#define IL49_SCD_QUEUE_STTS_REG_POS_WSL	(5)
+#define IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK	(8)
 
 /* Write masks */
-#define IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN	(10)
-#define IWL49_SCD_QUEUE_STTS_REG_MSK		(0x0007FC00)
+#define IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN	(10)
+#define IL49_SCD_QUEUE_STTS_REG_MSK		(0x0007FC00)
 
 /**
  * 4965 internal SRAM structures for scheduler, shared with driver ...
@@ -460,7 +459,7 @@
  * each queue's entry as follows:
  *
  * LS Dword bit fields:
- *  0-06:  Max Tx window size for Scheduler-ACK.  Driver should init to 64.
+ *  0-06:  Max Tx win size for Scheduler-ACK.  Driver should init to 64.
  *
  * MS Dword bit fields:
  * 16-22:  Frame limit.  Driver should init to 10 (0xa).
@@ -470,14 +469,14 @@
  * Init must be done after driver receives "Alive" response from 4965 uCode,
  * and when setting up queue for aggregation.
  */
-#define IWL49_SCD_CONTEXT_DATA_OFFSET			0x380
-#define IWL49_SCD_CONTEXT_QUEUE_OFFSET(x) \
-			(IWL49_SCD_CONTEXT_DATA_OFFSET + ((x) * 8))
+#define IL49_SCD_CONTEXT_DATA_OFFSET			0x380
+#define IL49_SCD_CONTEXT_QUEUE_OFFSET(x) \
+			(IL49_SCD_CONTEXT_DATA_OFFSET + ((x) * 8))
 
-#define IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS		(0)
-#define IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK		(0x0000007F)
-#define IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS	(16)
-#define IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK	(0x007F0000)
+#define IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS		(0)
+#define IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK		(0x0000007F)
+#define IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS	(16)
+#define IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK	(0x007F0000)
 
 /*
  * Tx Status Bitmap
@@ -486,7 +485,7 @@
  * "Alive" notification from uCode.  Area is used only by device itself;
  * no other support (besides clearing) is required from driver.
  */
-#define IWL49_SCD_TX_STTS_BITMAP_OFFSET		0x400
+#define IL49_SCD_TX_STTS_BITMAP_OFFSET		0x400
 
 /*
  * RAxTID to queue translation mapping.
@@ -494,7 +493,7 @@
  * When queue is in Scheduler-ACK mode, frames placed in a that queue must be
  * for only one combination of receiver address (RA) and traffic ID (TID), i.e.
  * one QOS priority level destined for one station (for this wireless link,
- * not final destination).  The SCD_TRANSLATE_TABLE area provides 16 16-bit
+ * not final destination).  The SCD_TRANSLATE_TBL area provides 16 16-bit
  * mappings, one for each of the 16 queues.  If queue is not in Scheduler-ACK
  * mode, the device ignores the mapping value.
  *
@@ -508,16 +507,16 @@
  * must read a dword-aligned value from device SRAM, replace the 16-bit map
  * value of interest, and write the dword value back into device SRAM.
  */
-#define IWL49_SCD_TRANSLATE_TBL_OFFSET		0x500
+#define IL49_SCD_TRANSLATE_TBL_OFFSET		0x500
 
 /* Find translation table dword to read/write for given queue */
-#define IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \
-	((IWL49_SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffffffc)
+#define IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \
+	((IL49_SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffffffc)
 
-#define IWL_SCD_TXFIFO_POS_TID			(0)
-#define IWL_SCD_TXFIFO_POS_RA			(4)
-#define IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK	(0x01FF)
+#define IL_SCD_TXFIFO_POS_TID			(0)
+#define IL_SCD_TXFIFO_POS_RA			(4)
+#define IL_SCD_QUEUE_RA_TID_MAP_RATID_MSK	(0x01FF)
 
 /*********************** END TX SCHEDULER *************************************/
 
-#endif				/* __iwl_legacy_prph_h__ */
+#endif /* __il_prph_h__ */
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index 57703d5..ae08498 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -102,12 +102,28 @@
 	  occur.
 endmenu
 
-config IWLWIFI_DEVICE_SVTOOL
-	bool "iwlwifi device svtool support"
+config IWLWIFI_DEVICE_TESTMODE
+	def_bool y
 	depends on IWLWIFI
-	select NL80211_TESTMODE
+	depends on NL80211_TESTMODE
 	help
-	  This option enables the svtool support for iwlwifi device through
-	  NL80211_TESTMODE. svtool is a software validation tool that runs in
-	  the user space and interacts with the device in the kernel space
-	  through the generic netlink message via NL80211_TESTMODE channel.
+	  This option enables the testmode support for iwlwifi device through
+	  NL80211_TESTMODE. This provide the capabilities of enable user space
+	  validation applications to interacts with the device through the
+	  generic netlink message via NL80211_TESTMODE channel.
+
+config IWLWIFI_P2P
+       bool "iwlwifi experimental P2P support"
+       depends on IWLWIFI
+       help
+         This option enables experimental P2P support for some devices
+         based on microcode support. Since P2P support is still under
+         development, this option may even enable it for some devices
+         now that turn out to not support it in the future due to
+         microcode restrictions.
+
+         To determine if your microcode supports the experimental P2P
+         offered by this option, check if the driver advertises AP
+         support when it is loaded.
+
+         Say Y only if you want to experiment with P2P.
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index c73e5ed..9dc84a7 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -1,7 +1,7 @@
 # WIFI
 obj-$(CONFIG_IWLWIFI)	+= iwlwifi.o
-iwlwifi-objs		:= iwl-agn.o iwl-agn-rs.o
-iwlwifi-objs		+= iwl-agn-ucode.o iwl-agn-tx.o
+iwlwifi-objs		:= iwl-agn.o iwl-agn-rs.o iwl-mac80211.o
+iwlwifi-objs		+= iwl-ucode.o iwl-agn-tx.o
 iwlwifi-objs		+= iwl-agn-lib.o iwl-agn-calib.o iwl-io.o
 iwlwifi-objs		+= iwl-agn-tt.o iwl-agn-sta.o iwl-agn-rx.o
 
@@ -18,7 +18,7 @@
 
 iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
 iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
-iwlwifi-$(CONFIG_IWLWIFI_DEVICE_SVTOOL) += iwl-sv-open.o
+iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += iwl-testmode.o
 
 CFLAGS_iwl-devtrace.o := -I$(src)
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index dd008b0..1ef7bfc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -124,10 +124,10 @@
 {
 	if (iwlagn_mod_params.num_of_queues >= IWL_MIN_NUM_QUEUES &&
 	    iwlagn_mod_params.num_of_queues <= IWLAGN_NUM_QUEUES)
-		priv->cfg->base_params->num_of_queues =
+		cfg(priv)->base_params->num_of_queues =
 			iwlagn_mod_params.num_of_queues;
 
-	hw_params(priv).max_txq_num = priv->cfg->base_params->num_of_queues;
+	hw_params(priv).max_txq_num = cfg(priv)->base_params->num_of_queues;
 	priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
 
 	hw_params(priv).max_data_size = IWLAGN_RTC_DATA_SIZE;
@@ -135,28 +135,19 @@
 
 	hw_params(priv).ht40_channel =  BIT(IEEE80211_BAND_2GHZ);
 
-	hw_params(priv).tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
-	if (priv->cfg->rx_with_siso_diversity)
+	hw_params(priv).tx_chains_num = num_of_ant(cfg(priv)->valid_tx_ant);
+	if (cfg(priv)->rx_with_siso_diversity)
 		hw_params(priv).rx_chains_num = 1;
 	else
 		hw_params(priv).rx_chains_num =
-			num_of_ant(priv->cfg->valid_rx_ant);
-	hw_params(priv).valid_tx_ant = priv->cfg->valid_tx_ant;
-	hw_params(priv).valid_rx_ant = priv->cfg->valid_rx_ant;
+			num_of_ant(cfg(priv)->valid_rx_ant);
+	hw_params(priv).valid_tx_ant = cfg(priv)->valid_tx_ant;
+	hw_params(priv).valid_rx_ant = cfg(priv)->valid_rx_ant;
 
 	iwl1000_set_ct_threshold(priv);
 
 	/* Set initial sensitivity parameters */
-	/* Set initial calibration set */
 	hw_params(priv).sens = &iwl1000_sensitivity;
-	hw_params(priv).calib_init_cfg =
-			BIT(IWL_CALIB_XTAL)		|
-			BIT(IWL_CALIB_LO)		|
-			BIT(IWL_CALIB_TX_IQ) 		|
-			BIT(IWL_CALIB_TX_IQ_PERD)	|
-			BIT(IWL_CALIB_BASE_BAND);
-	if (priv->cfg->need_dc_calib)
-		hw_params(priv).calib_init_cfg |= BIT(IWL_CALIB_DC);
 
 	return 0;
 }
diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
index 7943197..0946933 100644
--- a/drivers/net/wireless/iwlwifi/iwl-2000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
@@ -86,7 +86,7 @@
 {
 	iwl_rf_config(priv);
 
-	if (priv->cfg->iq_invert)
+	if (cfg(priv)->iq_invert)
 		iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG,
 			    CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER);
 }
@@ -120,10 +120,10 @@
 {
 	if (iwlagn_mod_params.num_of_queues >= IWL_MIN_NUM_QUEUES &&
 	    iwlagn_mod_params.num_of_queues <= IWLAGN_NUM_QUEUES)
-		priv->cfg->base_params->num_of_queues =
+		cfg(priv)->base_params->num_of_queues =
 			iwlagn_mod_params.num_of_queues;
 
-	hw_params(priv).max_txq_num = priv->cfg->base_params->num_of_queues;
+	hw_params(priv).max_txq_num = cfg(priv)->base_params->num_of_queues;
 	priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
 
 	hw_params(priv).max_data_size = IWL60_RTC_DATA_SIZE;
@@ -131,29 +131,19 @@
 
 	hw_params(priv).ht40_channel =  BIT(IEEE80211_BAND_2GHZ);
 
-	hw_params(priv).tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
-	if (priv->cfg->rx_with_siso_diversity)
+	hw_params(priv).tx_chains_num = num_of_ant(cfg(priv)->valid_tx_ant);
+	if (cfg(priv)->rx_with_siso_diversity)
 		hw_params(priv).rx_chains_num = 1;
 	else
 		hw_params(priv).rx_chains_num =
-			num_of_ant(priv->cfg->valid_rx_ant);
-	hw_params(priv).valid_tx_ant = priv->cfg->valid_tx_ant;
-	hw_params(priv).valid_rx_ant = priv->cfg->valid_rx_ant;
+			num_of_ant(cfg(priv)->valid_rx_ant);
+	hw_params(priv).valid_tx_ant = cfg(priv)->valid_tx_ant;
+	hw_params(priv).valid_rx_ant = cfg(priv)->valid_rx_ant;
 
 	iwl2000_set_ct_threshold(priv);
 
 	/* Set initial sensitivity parameters */
-	/* Set initial calibration set */
 	hw_params(priv).sens = &iwl2000_sensitivity;
-	hw_params(priv).calib_init_cfg =
-		BIT(IWL_CALIB_XTAL)             |
-		BIT(IWL_CALIB_LO)               |
-		BIT(IWL_CALIB_TX_IQ)            |
-		BIT(IWL_CALIB_BASE_BAND);
-	if (priv->cfg->need_dc_calib)
-		hw_params(priv).calib_rt_cfg |= IWL_CALIB_CFG_DC_IDX;
-	if (priv->cfg->need_temp_offset_calib)
-		hw_params(priv).calib_init_cfg |= BIT(IWL_CALIB_TEMP_OFFSET);
 
 	return 0;
 }
@@ -258,25 +248,19 @@
 	.eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION,	\
 	.lib = &iwl2000_lib,					\
 	.base_params = &iwl2000_base_params,			\
-	.need_dc_calib = true,					\
 	.need_temp_offset_calib = true,				\
 	.temp_offset_v2 = true,					\
 	.led_mode = IWL_LED_RF_STATE,				\
 	.iq_invert = true					\
 
 struct iwl_cfg iwl2000_2bgn_cfg = {
-	.name = "2000 Series 2x2 BGN",
+	.name = "Intel(R) Centrino(R) Wireless-N 2200 BGN",
 	IWL_DEVICE_2000,
 	.ht_params = &iwl2000_ht_params,
 };
 
-struct iwl_cfg iwl2000_2bg_cfg = {
-	.name = "2000 Series 2x2 BG",
-	IWL_DEVICE_2000,
-};
-
 struct iwl_cfg iwl2000_2bgn_d_cfg = {
-	.name = "2000D Series 2x2 BGN",
+	.name = "Intel(R) Centrino(R) Wireless-N 2200D BGN",
 	IWL_DEVICE_2000,
 	.ht_params = &iwl2000_ht_params,
 };
@@ -291,7 +275,6 @@
 	.lib = &iwl2030_lib,					\
 	.base_params = &iwl2030_base_params,			\
 	.bt_params = &iwl2030_bt_params,			\
-	.need_dc_calib = true,					\
 	.need_temp_offset_calib = true,				\
 	.temp_offset_v2 = true,					\
 	.led_mode = IWL_LED_RF_STATE,				\
@@ -299,16 +282,11 @@
 	.iq_invert = true					\
 
 struct iwl_cfg iwl2030_2bgn_cfg = {
-	.name = "2000 Series 2x2 BGN/BT",
+	.name = "Intel(R) Centrino(R) Wireless-N 2230 BGN",
 	IWL_DEVICE_2030,
 	.ht_params = &iwl2000_ht_params,
 };
 
-struct iwl_cfg iwl2030_2bg_cfg = {
-	.name = "2000 Series 2x2 BG/BT",
-	IWL_DEVICE_2030,
-};
-
 #define IWL_DEVICE_105						\
 	.fw_name_pre = IWL105_FW_PRE,				\
 	.ucode_api_max = IWL105_UCODE_API_MAX,			\
@@ -318,7 +296,6 @@
 	.eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION,	\
 	.lib = &iwl2000_lib,					\
 	.base_params = &iwl2000_base_params,			\
-	.need_dc_calib = true,					\
 	.need_temp_offset_calib = true,				\
 	.temp_offset_v2 = true,					\
 	.led_mode = IWL_LED_RF_STATE,				\
@@ -326,19 +303,14 @@
 	.rx_with_siso_diversity = true,				\
 	.iq_invert = true					\
 
-struct iwl_cfg iwl105_bg_cfg = {
-	.name = "105 Series 1x1 BG",
-	IWL_DEVICE_105,
-};
-
 struct iwl_cfg iwl105_bgn_cfg = {
-	.name = "105 Series 1x1 BGN",
+	.name = "Intel(R) Centrino(R) Wireless-N 105 BGN",
 	IWL_DEVICE_105,
 	.ht_params = &iwl2000_ht_params,
 };
 
 struct iwl_cfg iwl105_bgn_d_cfg = {
-	.name = "105D Series 1x1 BGN",
+	.name = "Intel(R) Centrino(R) Wireless-N 105D BGN",
 	IWL_DEVICE_105,
 	.ht_params = &iwl2000_ht_params,
 };
@@ -353,7 +325,6 @@
 	.lib = &iwl2030_lib,					\
 	.base_params = &iwl2030_base_params,			\
 	.bt_params = &iwl2030_bt_params,			\
-	.need_dc_calib = true,					\
 	.need_temp_offset_calib = true,				\
 	.temp_offset_v2 = true,					\
 	.led_mode = IWL_LED_RF_STATE,				\
@@ -361,13 +332,8 @@
 	.rx_with_siso_diversity = true,				\
 	.iq_invert = true					\
 
-struct iwl_cfg iwl135_bg_cfg = {
-	.name = "135 Series 1x1 BG/BT",
-	IWL_DEVICE_135,
-};
-
 struct iwl_cfg iwl135_bgn_cfg = {
-	.name = "135 Series 1x1 BGN/BT",
+	.name = "Intel(R) Centrino(R) Wireless-N 135 BGN",
 	IWL_DEVICE_135,
 	.ht_params = &iwl2000_ht_params,
 };
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index f55fb2d..b3a365f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -134,10 +134,10 @@
 
 #define IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF	(-5)
 
-static s32 iwl_temp_calib_to_offset(struct iwl_priv *priv)
+static s32 iwl_temp_calib_to_offset(struct iwl_shared *shrd)
 {
 	u16 temperature, voltage;
-	__le16 *temp_calib = (__le16 *)iwl_eeprom_query_addr(priv,
+	__le16 *temp_calib = (__le16 *)iwl_eeprom_query_addr(shrd,
 				EEPROM_KELVIN_TEMPERATURE);
 
 	temperature = le16_to_cpu(temp_calib[0]);
@@ -151,7 +151,7 @@
 {
 	const s32 volt2temp_coef = IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF;
 	s32 threshold = (s32)CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY) -
-			iwl_temp_calib_to_offset(priv);
+			iwl_temp_calib_to_offset(priv->shrd);
 
 	hw_params(priv).ct_kill_threshold = threshold * volt2temp_coef;
 }
@@ -166,10 +166,10 @@
 {
 	if (iwlagn_mod_params.num_of_queues >= IWL_MIN_NUM_QUEUES &&
 	    iwlagn_mod_params.num_of_queues <= IWLAGN_NUM_QUEUES)
-		priv->cfg->base_params->num_of_queues =
+		cfg(priv)->base_params->num_of_queues =
 			iwlagn_mod_params.num_of_queues;
 
-	hw_params(priv).max_txq_num = priv->cfg->base_params->num_of_queues;
+	hw_params(priv).max_txq_num = cfg(priv)->base_params->num_of_queues;
 	priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
 
 	hw_params(priv).max_data_size = IWLAGN_RTC_DATA_SIZE;
@@ -178,22 +178,15 @@
 	hw_params(priv).ht40_channel =  BIT(IEEE80211_BAND_2GHZ) |
 					BIT(IEEE80211_BAND_5GHZ);
 
-	hw_params(priv).tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
-	hw_params(priv).rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
-	hw_params(priv).valid_tx_ant = priv->cfg->valid_tx_ant;
-	hw_params(priv).valid_rx_ant = priv->cfg->valid_rx_ant;
+	hw_params(priv).tx_chains_num = num_of_ant(cfg(priv)->valid_tx_ant);
+	hw_params(priv).rx_chains_num = num_of_ant(cfg(priv)->valid_rx_ant);
+	hw_params(priv).valid_tx_ant = cfg(priv)->valid_tx_ant;
+	hw_params(priv).valid_rx_ant = cfg(priv)->valid_rx_ant;
 
 	iwl5000_set_ct_threshold(priv);
 
 	/* Set initial sensitivity parameters */
-	/* Set initial calibration set */
 	hw_params(priv).sens = &iwl5000_sensitivity;
-	hw_params(priv).calib_init_cfg =
-		BIT(IWL_CALIB_XTAL)		|
-		BIT(IWL_CALIB_LO)		|
-		BIT(IWL_CALIB_TX_IQ)		|
-		BIT(IWL_CALIB_TX_IQ_PERD)	|
-		BIT(IWL_CALIB_BASE_BAND);
 
 	return 0;
 }
@@ -202,10 +195,10 @@
 {
 	if (iwlagn_mod_params.num_of_queues >= IWL_MIN_NUM_QUEUES &&
 	    iwlagn_mod_params.num_of_queues <= IWLAGN_NUM_QUEUES)
-		priv->cfg->base_params->num_of_queues =
+		cfg(priv)->base_params->num_of_queues =
 			iwlagn_mod_params.num_of_queues;
 
-	hw_params(priv).max_txq_num = priv->cfg->base_params->num_of_queues;
+	hw_params(priv).max_txq_num = cfg(priv)->base_params->num_of_queues;
 	priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
 
 	hw_params(priv).max_data_size = IWLAGN_RTC_DATA_SIZE;
@@ -214,22 +207,15 @@
 	hw_params(priv).ht40_channel =  BIT(IEEE80211_BAND_2GHZ) |
 					BIT(IEEE80211_BAND_5GHZ);
 
-	hw_params(priv).tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
-	hw_params(priv).rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
-	hw_params(priv).valid_tx_ant = priv->cfg->valid_tx_ant;
-	hw_params(priv).valid_rx_ant = priv->cfg->valid_rx_ant;
+	hw_params(priv).tx_chains_num = num_of_ant(cfg(priv)->valid_tx_ant);
+	hw_params(priv).rx_chains_num = num_of_ant(cfg(priv)->valid_rx_ant);
+	hw_params(priv).valid_tx_ant = cfg(priv)->valid_tx_ant;
+	hw_params(priv).valid_rx_ant = cfg(priv)->valid_rx_ant;
 
 	iwl5150_set_ct_threshold(priv);
 
 	/* Set initial sensitivity parameters */
-	/* Set initial calibration set */
 	hw_params(priv).sens = &iwl5150_sensitivity;
-	hw_params(priv).calib_init_cfg =
-		BIT(IWL_CALIB_LO)		|
-		BIT(IWL_CALIB_TX_IQ)		|
-		BIT(IWL_CALIB_BASE_BAND);
-	if (priv->cfg->need_dc_calib)
-		hw_params(priv).calib_init_cfg |= BIT(IWL_CALIB_DC);
 
 	return 0;
 }
@@ -237,7 +223,7 @@
 static void iwl5150_temperature(struct iwl_priv *priv)
 {
 	u32 vt = 0;
-	s32 offset =  iwl_temp_calib_to_offset(priv);
+	s32 offset =  iwl_temp_calib_to_offset(priv->shrd);
 
 	vt = le32_to_cpu(priv->statistics.common.temperature);
 	vt = vt / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF + offset;
@@ -434,7 +420,7 @@
 	.eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,	\
 	.lib = &iwl5150_lib,					\
 	.base_params = &iwl5000_base_params,			\
-	.need_dc_calib = true,					\
+	.no_xtal_calib = true,					\
 	.led_mode = IWL_LED_BLINK,				\
 	.internal_wimax_coex = true
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index c840c78..54b7533 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -46,11 +46,12 @@
 #include "iwl-cfg.h"
 
 /* Highest firmware API version supported */
-#define IWL6000_UCODE_API_MAX 4
+#define IWL6000_UCODE_API_MAX 6
 #define IWL6050_UCODE_API_MAX 5
 #define IWL6000G2_UCODE_API_MAX 6
 
 /* Oldest version we won't warn about */
+#define IWL6000_UCODE_API_OK 4
 #define IWL6000G2_UCODE_API_OK 5
 
 /* Lowest firmware API version supported */
@@ -80,7 +81,7 @@
 static void iwl6050_additional_nic_config(struct iwl_priv *priv)
 {
 	/* Indicate calibration version to uCode. */
-	if (iwlagn_eeprom_calib_version(priv) >= 6)
+	if (iwl_eeprom_calib_version(priv->shrd) >= 6)
 		iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG,
 				CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
 }
@@ -88,7 +89,7 @@
 static void iwl6150_additional_nic_config(struct iwl_priv *priv)
 {
 	/* Indicate calibration version to uCode. */
-	if (iwlagn_eeprom_calib_version(priv) >= 6)
+	if (iwl_eeprom_calib_version(priv->shrd) >= 6)
 		iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG,
 				CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
 	iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG,
@@ -101,14 +102,14 @@
 	iwl_rf_config(priv);
 
 	/* no locking required for register write */
-	if (priv->cfg->pa_type == IWL_PA_INTERNAL) {
+	if (cfg(priv)->pa_type == IWL_PA_INTERNAL) {
 		/* 2x2 IPA phy type */
 		iwl_write32(bus(priv), CSR_GP_DRIVER_REG,
 			     CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA);
 	}
 	/* do additional nic configuration if needed */
-	if (priv->cfg->additional_nic_config)
-			priv->cfg->additional_nic_config(priv);
+	if (cfg(priv)->additional_nic_config)
+			cfg(priv)->additional_nic_config(priv);
 }
 
 static struct iwl_sensitivity_ranges iwl6000_sensitivity = {
@@ -140,10 +141,10 @@
 {
 	if (iwlagn_mod_params.num_of_queues >= IWL_MIN_NUM_QUEUES &&
 	    iwlagn_mod_params.num_of_queues <= IWLAGN_NUM_QUEUES)
-		priv->cfg->base_params->num_of_queues =
+		cfg(priv)->base_params->num_of_queues =
 			iwlagn_mod_params.num_of_queues;
 
-	hw_params(priv).max_txq_num = priv->cfg->base_params->num_of_queues;
+	hw_params(priv).max_txq_num = cfg(priv)->base_params->num_of_queues;
 	priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
 
 	hw_params(priv).max_data_size = IWL60_RTC_DATA_SIZE;
@@ -152,29 +153,19 @@
 	hw_params(priv).ht40_channel =  BIT(IEEE80211_BAND_2GHZ) |
 					BIT(IEEE80211_BAND_5GHZ);
 
-	hw_params(priv).tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
-	if (priv->cfg->rx_with_siso_diversity)
+	hw_params(priv).tx_chains_num = num_of_ant(cfg(priv)->valid_tx_ant);
+	if (cfg(priv)->rx_with_siso_diversity)
 		hw_params(priv).rx_chains_num = 1;
 	else
 		hw_params(priv).rx_chains_num =
-			num_of_ant(priv->cfg->valid_rx_ant);
-	hw_params(priv).valid_tx_ant = priv->cfg->valid_tx_ant;
-	hw_params(priv).valid_rx_ant = priv->cfg->valid_rx_ant;
+			num_of_ant(cfg(priv)->valid_rx_ant);
+	hw_params(priv).valid_tx_ant = cfg(priv)->valid_tx_ant;
+	hw_params(priv).valid_rx_ant = cfg(priv)->valid_rx_ant;
 
 	iwl6000_set_ct_threshold(priv);
 
 	/* Set initial sensitivity parameters */
-	/* Set initial calibration set */
 	hw_params(priv).sens = &iwl6000_sensitivity;
-	hw_params(priv).calib_init_cfg =
-		BIT(IWL_CALIB_XTAL)		|
-		BIT(IWL_CALIB_LO)		|
-		BIT(IWL_CALIB_TX_IQ)		|
-		BIT(IWL_CALIB_BASE_BAND);
-	if (priv->cfg->need_dc_calib)
-		hw_params(priv).calib_rt_cfg |= IWL_CALIB_CFG_DC_IDX;
-	if (priv->cfg->need_temp_offset_calib)
-		hw_params(priv).calib_init_cfg |= BIT(IWL_CALIB_TEMP_OFFSET);
 
 	return 0;
 }
@@ -364,7 +355,6 @@
 	.eeprom_calib_ver = EEPROM_6005_TX_POWER_VERSION,	\
 	.lib = &iwl6000_lib,					\
 	.base_params = &iwl6000_g2_base_params,			\
-	.need_dc_calib = true,					\
 	.need_temp_offset_calib = true,				\
 	.led_mode = IWL_LED_RF_STATE
 
@@ -406,7 +396,6 @@
 	.lib = &iwl6030_lib,					\
 	.base_params = &iwl6000_g2_base_params,			\
 	.bt_params = &iwl6000_bt_params,			\
-	.need_dc_calib = true,					\
 	.need_temp_offset_calib = true,				\
 	.led_mode = IWL_LED_RF_STATE,				\
 	.adv_pm = true						\
@@ -434,21 +423,11 @@
 };
 
 struct iwl_cfg iwl6035_2agn_cfg = {
-	.name = "6035 Series 2x2 AGN/BT",
+	.name = "Intel(R) Centrino(R) Advanced-N 6235 AGN",
 	IWL_DEVICE_6030,
 	.ht_params = &iwl6000_ht_params,
 };
 
-struct iwl_cfg iwl6035_2abg_cfg = {
-	.name = "6035 Series 2x2 ABG/BT",
-	IWL_DEVICE_6030,
-};
-
-struct iwl_cfg iwl6035_2bg_cfg = {
-	.name = "6035 Series 2x2 BG/BT",
-	IWL_DEVICE_6030,
-};
-
 struct iwl_cfg iwl1030_bgn_cfg = {
 	.name = "Intel(R) Centrino(R) Wireless-N 1030 BGN",
 	IWL_DEVICE_6030,
@@ -479,6 +458,7 @@
 #define IWL_DEVICE_6000i					\
 	.fw_name_pre = IWL6000_FW_PRE,				\
 	.ucode_api_max = IWL6000_UCODE_API_MAX,			\
+	.ucode_api_ok = IWL6000_UCODE_API_OK,			\
 	.ucode_api_min = IWL6000_UCODE_API_MIN,			\
 	.valid_tx_ant = ANT_BC,		/* .cfg overwrite */	\
 	.valid_rx_ant = ANT_BC,		/* .cfg overwrite */	\
@@ -516,7 +496,6 @@
 	.eeprom_ver = EEPROM_6050_EEPROM_VERSION,		\
 	.eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION,	\
 	.base_params = &iwl6050_base_params,			\
-	.need_dc_calib = true,					\
 	.led_mode = IWL_LED_BLINK,				\
 	.internal_wimax_coex = true
 
@@ -540,7 +519,6 @@
 	.eeprom_ver = EEPROM_6150_EEPROM_VERSION,		\
 	.eeprom_calib_ver = EEPROM_6150_TX_POWER_VERSION,	\
 	.base_params = &iwl6050_base_params,			\
-	.need_dc_calib = true,					\
 	.led_mode = IWL_LED_BLINK,				\
 	.internal_wimax_coex = true
 
@@ -559,17 +537,17 @@
 	.name = "Intel(R) Centrino(R) Ultimate-N 6300 AGN",
 	.fw_name_pre = IWL6000_FW_PRE,
 	.ucode_api_max = IWL6000_UCODE_API_MAX,
+	.ucode_api_ok = IWL6000_UCODE_API_OK,
 	.ucode_api_min = IWL6000_UCODE_API_MIN,
 	.eeprom_ver = EEPROM_6000_EEPROM_VERSION,
 	.eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,
 	.lib = &iwl6000_lib,
 	.base_params = &iwl6000_base_params,
 	.ht_params = &iwl6000_ht_params,
-	.need_dc_calib = true,
 	.led_mode = IWL_LED_BLINK,
 };
 
-MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_OK));
 MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
index 03bac48..50ff849 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
@@ -82,56 +82,64 @@
 	u32 beacon_energy_c;
 };
 
-int iwl_send_calib_results(struct iwl_priv *priv)
+int iwl_send_calib_results(struct iwl_trans *trans)
 {
-	int ret = 0;
-	int i = 0;
-
 	struct iwl_host_cmd hcmd = {
 		.id = REPLY_PHY_CALIBRATION_CMD,
 		.flags = CMD_SYNC,
 	};
+	struct iwl_calib_result *res;
 
-	for (i = 0; i < IWL_CALIB_MAX; i++) {
-		if ((BIT(i) & hw_params(priv).calib_init_cfg) &&
-		    priv->calib_results[i].buf) {
-			hcmd.len[0] = priv->calib_results[i].buf_len;
-			hcmd.data[0] = priv->calib_results[i].buf;
-			hcmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
-			ret = iwl_trans_send_cmd(trans(priv), &hcmd);
-			if (ret) {
-				IWL_ERR(priv, "Error %d iteration %d\n",
-					ret, i);
-				break;
-			}
+	list_for_each_entry(res, &trans->calib_results, list) {
+		int ret;
+
+		hcmd.len[0] = res->cmd_len;
+		hcmd.data[0] = &res->hdr;
+		hcmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
+		ret = iwl_trans_send_cmd(trans, &hcmd);
+		if (ret) {
+			IWL_ERR(trans, "Error %d on calib cmd %d\n",
+				ret, res->hdr.op_code);
+			return ret;
 		}
 	}
 
-	return ret;
-}
-
-int iwl_calib_set(struct iwl_calib_result *res, const u8 *buf, int len)
-{
-	if (res->buf_len != len) {
-		kfree(res->buf);
-		res->buf = kzalloc(len, GFP_ATOMIC);
-	}
-	if (unlikely(res->buf == NULL))
-		return -ENOMEM;
-
-	res->buf_len = len;
-	memcpy(res->buf, buf, len);
 	return 0;
 }
 
-void iwl_calib_free_results(struct iwl_priv *priv)
+int iwl_calib_set(struct iwl_trans *trans,
+		  const struct iwl_calib_hdr *cmd, int len)
 {
-	int i;
+	struct iwl_calib_result *res, *tmp;
 
-	for (i = 0; i < IWL_CALIB_MAX; i++) {
-		kfree(priv->calib_results[i].buf);
-		priv->calib_results[i].buf = NULL;
-		priv->calib_results[i].buf_len = 0;
+	res = kmalloc(sizeof(*res) + len - sizeof(struct iwl_calib_hdr),
+		      GFP_ATOMIC);
+	if (!res)
+		return -ENOMEM;
+	memcpy(&res->hdr, cmd, len);
+	res->cmd_len = len;
+
+	list_for_each_entry(tmp, &trans->calib_results, list) {
+		if (tmp->hdr.op_code == res->hdr.op_code) {
+			list_replace(&tmp->list, &res->list);
+			kfree(tmp);
+			return 0;
+		}
+	}
+
+	/* wasn't in list already */
+	list_add_tail(&res->list, &trans->calib_results);
+
+	return 0;
+}
+
+void iwl_calib_free_results(struct iwl_trans *trans)
+{
+	struct iwl_calib_result *res, *tmp;
+
+	list_for_each_entry_safe(res, tmp, &trans->calib_results, list) {
+		list_del(&res->list);
+		kfree(res);
 	}
 }
 
@@ -505,7 +513,7 @@
 
 	iwl_prepare_legacy_sensitivity_tbl(priv, data, &cmd.enhance_table[0]);
 
-	if (priv->cfg->base_params->hd_v2) {
+	if (cfg(priv)->base_params->hd_v2) {
 		cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX] =
 			HD_INA_NON_SQUARE_DET_OFDM_DATA_V2;
 		cmd.enhance_table[HD_INA_NON_SQUARE_DET_CCK_INDEX] =
@@ -839,7 +847,7 @@
 			 * connect the first valid tx chain
 			 */
 			first_chain =
-				find_first_chain(priv->cfg->valid_tx_ant);
+				find_first_chain(cfg(priv)->valid_tx_ant);
 			data->disconn_array[first_chain] = 0;
 			active_chains |= BIT(first_chain);
 			IWL_DEBUG_CALIB(priv,
@@ -882,7 +890,7 @@
 			continue;
 		}
 
-		delta_g = (priv->cfg->base_params->chain_noise_scale *
+		delta_g = (cfg(priv)->base_params->chain_noise_scale *
 			((s32)average_noise[default_chain] -
 			(s32)average_noise[i])) / 1500;
 
@@ -1039,8 +1047,8 @@
 		return;
 
 	/* Analyze signal for disconnected antenna */
-	if (priv->cfg->bt_params &&
-	    priv->cfg->bt_params->advanced_bt_coexist) {
+	if (cfg(priv)->bt_params &&
+	    cfg(priv)->bt_params->advanced_bt_coexist) {
 		/* Disable disconnected antenna algorithm for advanced
 		   bt coex, assuming valid antennas are connected */
 		data->active_chains = hw_params(priv).valid_rx_ant;
@@ -1074,7 +1082,7 @@
 
 	iwlagn_gain_computation(priv, average_noise,
 				min_average_noise_antenna_i, min_average_noise,
-				find_first_chain(priv->cfg->valid_rx_ant));
+				find_first_chain(cfg(priv)->valid_rx_ant));
 
 	/* Some power changes may have been made during the calibration.
 	 * Update and commit the RXON
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-calib.h b/drivers/net/wireless/iwlwifi/iwl-agn-calib.h
index a869fc9..10275ce 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-calib.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-calib.h
@@ -72,8 +72,4 @@
 void iwl_init_sensitivity(struct iwl_priv *priv);
 void iwl_reset_run_time_calib(struct iwl_priv *priv);
 
-int iwl_send_calib_results(struct iwl_priv *priv);
-int iwl_calib_set(struct iwl_calib_result *res, const u8 *buf, int len);
-void iwl_calib_free_results(struct iwl_priv *priv);
-
 #endif /* __iwl_calib_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
index 1a52ed2..64cf439 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
@@ -32,6 +32,7 @@
 #include <linux/init.h>
 #include <linux/sched.h>
 
+#include "iwl-wifi.h"
 #include "iwl-dev.h"
 #include "iwl-core.h"
 #include "iwl-io.h"
@@ -92,11 +93,11 @@
 	iwl_tt_handler(priv);
 }
 
-u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv)
+u16 iwl_eeprom_calib_version(struct iwl_shared *shrd)
 {
 	struct iwl_eeprom_calib_hdr *hdr;
 
-	hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv,
+	hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(shrd,
 							EEPROM_CALIB_ALL);
 	return hdr->version;
 
@@ -105,7 +106,7 @@
 /*
  * EEPROM
  */
-static u32 eeprom_indirect_address(const struct iwl_priv *priv, u32 address)
+static u32 eeprom_indirect_address(const struct iwl_shared *shrd, u32 address)
 {
 	u16 offset = 0;
 
@@ -114,31 +115,31 @@
 
 	switch (address & INDIRECT_TYPE_MSK) {
 	case INDIRECT_HOST:
-		offset = iwl_eeprom_query16(priv, EEPROM_LINK_HOST);
+		offset = iwl_eeprom_query16(shrd, EEPROM_LINK_HOST);
 		break;
 	case INDIRECT_GENERAL:
-		offset = iwl_eeprom_query16(priv, EEPROM_LINK_GENERAL);
+		offset = iwl_eeprom_query16(shrd, EEPROM_LINK_GENERAL);
 		break;
 	case INDIRECT_REGULATORY:
-		offset = iwl_eeprom_query16(priv, EEPROM_LINK_REGULATORY);
+		offset = iwl_eeprom_query16(shrd, EEPROM_LINK_REGULATORY);
 		break;
 	case INDIRECT_TXP_LIMIT:
-		offset = iwl_eeprom_query16(priv, EEPROM_LINK_TXP_LIMIT);
+		offset = iwl_eeprom_query16(shrd, EEPROM_LINK_TXP_LIMIT);
 		break;
 	case INDIRECT_TXP_LIMIT_SIZE:
-		offset = iwl_eeprom_query16(priv, EEPROM_LINK_TXP_LIMIT_SIZE);
+		offset = iwl_eeprom_query16(shrd, EEPROM_LINK_TXP_LIMIT_SIZE);
 		break;
 	case INDIRECT_CALIBRATION:
-		offset = iwl_eeprom_query16(priv, EEPROM_LINK_CALIBRATION);
+		offset = iwl_eeprom_query16(shrd, EEPROM_LINK_CALIBRATION);
 		break;
 	case INDIRECT_PROCESS_ADJST:
-		offset = iwl_eeprom_query16(priv, EEPROM_LINK_PROCESS_ADJST);
+		offset = iwl_eeprom_query16(shrd, EEPROM_LINK_PROCESS_ADJST);
 		break;
 	case INDIRECT_OTHERS:
-		offset = iwl_eeprom_query16(priv, EEPROM_LINK_OTHERS);
+		offset = iwl_eeprom_query16(shrd, EEPROM_LINK_OTHERS);
 		break;
 	default:
-		IWL_ERR(priv, "illegal indirect type: 0x%X\n",
+		IWL_ERR(shrd->trans, "illegal indirect type: 0x%X\n",
 		address & INDIRECT_TYPE_MSK);
 		break;
 	}
@@ -147,11 +148,11 @@
 	return (address & ADDRESS_MSK) + (offset << 1);
 }
 
-const u8 *iwl_eeprom_query_addr(const struct iwl_priv *priv, size_t offset)
+const u8 *iwl_eeprom_query_addr(const struct iwl_shared *shrd, size_t offset)
 {
-	u32 address = eeprom_indirect_address(priv, offset);
-	BUG_ON(address >= priv->cfg->base_params->eeprom_size);
-	return &priv->eeprom[address];
+	u32 address = eeprom_indirect_address(shrd, offset);
+	BUG_ON(address >= shrd->cfg->base_params->eeprom_size);
+	return &shrd->eeprom[address];
 }
 
 struct iwl_mod_params iwlagn_mod_params = {
@@ -232,7 +233,7 @@
 				IWL_PAN_SCD_BK_MSK | IWL_PAN_SCD_MGMT_MSK |
 				IWL_PAN_SCD_MULTICAST_MSK;
 
-	if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE)
+	if (cfg(priv)->sku & EEPROM_SKU_CAP_11N_ENABLE)
 		flush_cmd.fifo_control |= IWL_AGG_TX_QUEUE_MSK;
 
 	IWL_DEBUG_INFO(priv, "fifo queue control: 0X%x\n",
@@ -374,15 +375,15 @@
 	BUILD_BUG_ON(sizeof(iwlagn_def_3w_lookup) !=
 			sizeof(basic.bt3_lookup_table));
 
-	if (priv->cfg->bt_params) {
-		if (priv->cfg->bt_params->bt_session_2) {
+	if (cfg(priv)->bt_params) {
+		if (cfg(priv)->bt_params->bt_session_2) {
 			bt_cmd_2000.prio_boost = cpu_to_le32(
-				priv->cfg->bt_params->bt_prio_boost);
+				cfg(priv)->bt_params->bt_prio_boost);
 			bt_cmd_2000.tx_prio_boost = 0;
 			bt_cmd_2000.rx_prio_boost = 0;
 		} else {
 			bt_cmd_6000.prio_boost =
-				priv->cfg->bt_params->bt_prio_boost;
+				cfg(priv)->bt_params->bt_prio_boost;
 			bt_cmd_6000.tx_prio_boost = 0;
 			bt_cmd_6000.rx_prio_boost = 0;
 		}
@@ -430,7 +431,7 @@
 		       priv->bt_full_concurrent ?
 		       "full concurrency" : "3-wire");
 
-	if (priv->cfg->bt_params->bt_session_2) {
+	if (cfg(priv)->bt_params->bt_session_2) {
 		memcpy(&bt_cmd_2000.basic, &basic,
 			sizeof(basic));
 		ret = iwl_trans_send_cmd_pdu(trans(priv), REPLY_BT_CONFIG,
@@ -799,8 +800,8 @@
  */
 static int iwl_get_active_rx_chain_count(struct iwl_priv *priv)
 {
-	if (priv->cfg->bt_params &&
-	    priv->cfg->bt_params->advanced_bt_coexist &&
+	if (cfg(priv)->bt_params &&
+	    cfg(priv)->bt_params->advanced_bt_coexist &&
 	    (priv->bt_full_concurrent ||
 	     priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) {
 		/*
@@ -827,6 +828,7 @@
 	case IEEE80211_SMPS_STATIC:
 	case IEEE80211_SMPS_DYNAMIC:
 		return IWL_NUM_IDLE_CHAINS_SINGLE;
+	case IEEE80211_SMPS_AUTOMATIC:
 	case IEEE80211_SMPS_OFF:
 		return active_cnt;
 	default:
@@ -870,8 +872,8 @@
 	else
 		active_chains = hw_params(priv).valid_rx_ant;
 
-	if (priv->cfg->bt_params &&
-	    priv->cfg->bt_params->advanced_bt_coexist &&
+	if (cfg(priv)->bt_params &&
+	    cfg(priv)->bt_params->advanced_bt_coexist &&
 	    (priv->bt_full_concurrent ||
 	     priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) {
 		/*
@@ -933,53 +935,359 @@
 	return ant;
 }
 
-/* notification wait support */
-void iwlagn_init_notification_wait(struct iwl_priv *priv,
-				   struct iwl_notification_wait *wait_entry,
-				   u8 cmd,
-				   void (*fn)(struct iwl_priv *priv,
-					      struct iwl_rx_packet *pkt,
-					      void *data),
-				   void *fn_data)
+#ifdef CONFIG_PM_SLEEP
+static void iwlagn_convert_p1k(u16 *p1k, __le16 *out)
 {
-	wait_entry->fn = fn;
-	wait_entry->fn_data = fn_data;
-	wait_entry->cmd = cmd;
-	wait_entry->triggered = false;
-	wait_entry->aborted = false;
+	int i;
 
-	spin_lock_bh(&priv->notif_wait_lock);
-	list_add(&wait_entry->list, &priv->notif_waits);
-	spin_unlock_bh(&priv->notif_wait_lock);
+	for (i = 0; i < IWLAGN_P1K_SIZE; i++)
+		out[i] = cpu_to_le16(p1k[i]);
 }
 
-int iwlagn_wait_notification(struct iwl_priv *priv,
-			     struct iwl_notification_wait *wait_entry,
-			     unsigned long timeout)
+struct wowlan_key_data {
+	struct iwl_rxon_context *ctx;
+	struct iwlagn_wowlan_rsc_tsc_params_cmd *rsc_tsc;
+	struct iwlagn_wowlan_tkip_params_cmd *tkip;
+	const u8 *bssid;
+	bool error, use_rsc_tsc, use_tkip;
+};
+
+
+static void iwlagn_wowlan_program_keys(struct ieee80211_hw *hw,
+			       struct ieee80211_vif *vif,
+			       struct ieee80211_sta *sta,
+			       struct ieee80211_key_conf *key,
+			       void *_data)
 {
-	int ret;
+	struct iwl_priv *priv = hw->priv;
+	struct wowlan_key_data *data = _data;
+	struct iwl_rxon_context *ctx = data->ctx;
+	struct aes_sc *aes_sc, *aes_tx_sc = NULL;
+	struct tkip_sc *tkip_sc, *tkip_tx_sc = NULL;
+	struct iwlagn_p1k_cache *rx_p1ks;
+	u8 *rx_mic_key;
+	struct ieee80211_key_seq seq;
+	u32 cur_rx_iv32 = 0;
+	u16 p1k[IWLAGN_P1K_SIZE];
+	int ret, i;
 
-	ret = wait_event_timeout(priv->notif_waitq,
-				 wait_entry->triggered || wait_entry->aborted,
-				 timeout);
+	mutex_lock(&priv->shrd->mutex);
 
-	spin_lock_bh(&priv->notif_wait_lock);
-	list_del(&wait_entry->list);
-	spin_unlock_bh(&priv->notif_wait_lock);
+	if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+	     key->cipher == WLAN_CIPHER_SUITE_WEP104) &&
+	     !sta && !ctx->key_mapping_keys)
+		ret = iwl_set_default_wep_key(priv, ctx, key);
+	else
+		ret = iwl_set_dynamic_key(priv, ctx, key, sta);
 
-	if (wait_entry->aborted)
-		return -EIO;
+	if (ret) {
+		IWL_ERR(priv, "Error setting key during suspend!\n");
+		data->error = true;
+	}
 
-	/* return value is always >= 0 */
-	if (ret <= 0)
-		return -ETIMEDOUT;
-	return 0;
+	switch (key->cipher) {
+	case WLAN_CIPHER_SUITE_TKIP:
+		if (sta) {
+			tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.unicast_rsc;
+			tkip_tx_sc = &data->rsc_tsc->all_tsc_rsc.tkip.tsc;
+
+			rx_p1ks = data->tkip->rx_uni;
+
+			ieee80211_get_key_tx_seq(key, &seq);
+			tkip_tx_sc->iv16 = cpu_to_le16(seq.tkip.iv16);
+			tkip_tx_sc->iv32 = cpu_to_le32(seq.tkip.iv32);
+
+			ieee80211_get_tkip_p1k_iv(key, seq.tkip.iv32, p1k);
+			iwlagn_convert_p1k(p1k, data->tkip->tx.p1k);
+
+			memcpy(data->tkip->mic_keys.tx,
+			       &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
+			       IWLAGN_MIC_KEY_SIZE);
+
+			rx_mic_key = data->tkip->mic_keys.rx_unicast;
+		} else {
+			tkip_sc =
+				data->rsc_tsc->all_tsc_rsc.tkip.multicast_rsc;
+			rx_p1ks = data->tkip->rx_multi;
+			rx_mic_key = data->tkip->mic_keys.rx_mcast;
+		}
+
+		/*
+		 * For non-QoS this relies on the fact that both the uCode and
+		 * mac80211 use TID 0 (as they need to to avoid replay attacks)
+		 * for checking the IV in the frames.
+		 */
+		for (i = 0; i < IWLAGN_NUM_RSC; i++) {
+			ieee80211_get_key_rx_seq(key, i, &seq);
+			tkip_sc[i].iv16 = cpu_to_le16(seq.tkip.iv16);
+			tkip_sc[i].iv32 = cpu_to_le32(seq.tkip.iv32);
+			/* wrapping isn't allowed, AP must rekey */
+			if (seq.tkip.iv32 > cur_rx_iv32)
+				cur_rx_iv32 = seq.tkip.iv32;
+		}
+
+		ieee80211_get_tkip_rx_p1k(key, data->bssid, cur_rx_iv32, p1k);
+		iwlagn_convert_p1k(p1k, rx_p1ks[0].p1k);
+		ieee80211_get_tkip_rx_p1k(key, data->bssid,
+					  cur_rx_iv32 + 1, p1k);
+		iwlagn_convert_p1k(p1k, rx_p1ks[1].p1k);
+
+		memcpy(rx_mic_key,
+		       &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
+		       IWLAGN_MIC_KEY_SIZE);
+
+		data->use_tkip = true;
+		data->use_rsc_tsc = true;
+		break;
+	case WLAN_CIPHER_SUITE_CCMP:
+		if (sta) {
+			u8 *pn = seq.ccmp.pn;
+
+			aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc;
+			aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc;
+
+			ieee80211_get_key_tx_seq(key, &seq);
+			aes_tx_sc->pn = cpu_to_le64(
+					(u64)pn[5] |
+					((u64)pn[4] << 8) |
+					((u64)pn[3] << 16) |
+					((u64)pn[2] << 24) |
+					((u64)pn[1] << 32) |
+					((u64)pn[0] << 40));
+		} else
+			aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc;
+
+		/*
+		 * For non-QoS this relies on the fact that both the uCode and
+		 * mac80211 use TID 0 for checking the IV in the frames.
+		 */
+		for (i = 0; i < IWLAGN_NUM_RSC; i++) {
+			u8 *pn = seq.ccmp.pn;
+
+			ieee80211_get_key_rx_seq(key, i, &seq);
+			aes_sc->pn = cpu_to_le64(
+					(u64)pn[5] |
+					((u64)pn[4] << 8) |
+					((u64)pn[3] << 16) |
+					((u64)pn[2] << 24) |
+					((u64)pn[1] << 32) |
+					((u64)pn[0] << 40));
+		}
+		data->use_rsc_tsc = true;
+		break;
+	}
+
+	mutex_unlock(&priv->shrd->mutex);
 }
 
-void iwlagn_remove_notification(struct iwl_priv *priv,
-				struct iwl_notification_wait *wait_entry)
+int iwlagn_send_patterns(struct iwl_priv *priv,
+			struct cfg80211_wowlan *wowlan)
 {
-	spin_lock_bh(&priv->notif_wait_lock);
-	list_del(&wait_entry->list);
-	spin_unlock_bh(&priv->notif_wait_lock);
+	struct iwlagn_wowlan_patterns_cmd *pattern_cmd;
+	struct iwl_host_cmd cmd = {
+		.id = REPLY_WOWLAN_PATTERNS,
+		.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+		.flags = CMD_SYNC,
+	};
+	int i, err;
+
+	if (!wowlan->n_patterns)
+		return 0;
+
+	cmd.len[0] = sizeof(*pattern_cmd) +
+		wowlan->n_patterns * sizeof(struct iwlagn_wowlan_pattern);
+
+	pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL);
+	if (!pattern_cmd)
+		return -ENOMEM;
+
+	pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns);
+
+	for (i = 0; i < wowlan->n_patterns; i++) {
+		int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
+
+		memcpy(&pattern_cmd->patterns[i].mask,
+			wowlan->patterns[i].mask, mask_len);
+		memcpy(&pattern_cmd->patterns[i].pattern,
+			wowlan->patterns[i].pattern,
+			wowlan->patterns[i].pattern_len);
+		pattern_cmd->patterns[i].mask_size = mask_len;
+		pattern_cmd->patterns[i].pattern_size =
+			wowlan->patterns[i].pattern_len;
+	}
+
+	cmd.data[0] = pattern_cmd;
+	err = iwl_trans_send_cmd(trans(priv), &cmd);
+	kfree(pattern_cmd);
+	return err;
 }
+
+int iwlagn_suspend(struct iwl_priv *priv,
+		struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
+{
+	struct iwlagn_wowlan_wakeup_filter_cmd wakeup_filter_cmd;
+	struct iwl_rxon_cmd rxon;
+	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+	struct iwlagn_wowlan_kek_kck_material_cmd kek_kck_cmd;
+	struct iwlagn_wowlan_tkip_params_cmd tkip_cmd = {};
+	struct iwlagn_d3_config_cmd d3_cfg_cmd = {};
+	struct wowlan_key_data key_data = {
+		.ctx = ctx,
+		.bssid = ctx->active.bssid_addr,
+		.use_rsc_tsc = false,
+		.tkip = &tkip_cmd,
+		.use_tkip = false,
+	};
+	int ret, i;
+	u16 seq;
+
+	key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL);
+	if (!key_data.rsc_tsc)
+		return -ENOMEM;
+
+	memset(&wakeup_filter_cmd, 0, sizeof(wakeup_filter_cmd));
+
+	/*
+	 * We know the last used seqno, and the uCode expects to know that
+	 * one, it will increment before TX.
+	 */
+	seq = le16_to_cpu(priv->last_seq_ctl) & IEEE80211_SCTL_SEQ;
+	wakeup_filter_cmd.non_qos_seq = cpu_to_le16(seq);
+
+	/*
+	 * For QoS counters, we store the one to use next, so subtract 0x10
+	 * since the uCode will add 0x10 before using the value.
+	 */
+	for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
+		seq = priv->tid_data[IWL_AP_ID][i].seq_number;
+		seq -= 0x10;
+		wakeup_filter_cmd.qos_seq[i] = cpu_to_le16(seq);
+	}
+
+	if (wowlan->disconnect)
+		wakeup_filter_cmd.enabled |=
+			cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_BEACON_MISS |
+				    IWLAGN_WOWLAN_WAKEUP_LINK_CHANGE);
+	if (wowlan->magic_pkt)
+		wakeup_filter_cmd.enabled |=
+			cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_MAGIC_PACKET);
+	if (wowlan->gtk_rekey_failure)
+		wakeup_filter_cmd.enabled |=
+			cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_GTK_REKEY_FAIL);
+	if (wowlan->eap_identity_req)
+		wakeup_filter_cmd.enabled |=
+			cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_EAP_IDENT_REQ);
+	if (wowlan->four_way_handshake)
+		wakeup_filter_cmd.enabled |=
+			cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_4WAY_HANDSHAKE);
+	if (wowlan->n_patterns)
+		wakeup_filter_cmd.enabled |=
+			cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_PATTERN_MATCH);
+
+	if (wowlan->rfkill_release)
+		d3_cfg_cmd.wakeup_flags |=
+			cpu_to_le32(IWLAGN_D3_WAKEUP_RFKILL);
+
+	iwl_scan_cancel_timeout(priv, 200);
+
+	memcpy(&rxon, &ctx->active, sizeof(rxon));
+
+	iwl_trans_stop_device(trans(priv));
+
+	priv->shrd->wowlan = true;
+
+	ret = iwl_load_ucode_wait_alive(trans(priv), IWL_UCODE_WOWLAN);
+	if (ret)
+		goto out;
+
+	/* now configure WoWLAN ucode */
+	ret = iwl_alive_start(priv);
+	if (ret)
+		goto out;
+
+	memcpy(&ctx->staging, &rxon, sizeof(rxon));
+	ret = iwlagn_commit_rxon(priv, ctx);
+	if (ret)
+		goto out;
+
+	ret = iwl_power_update_mode(priv, true);
+	if (ret)
+		goto out;
+
+	if (!iwlagn_mod_params.sw_crypto) {
+		/* mark all keys clear */
+		priv->ucode_key_table = 0;
+		ctx->key_mapping_keys = 0;
+
+		/*
+		 * This needs to be unlocked due to lock ordering
+		 * constraints. Since we're in the suspend path
+		 * that isn't really a problem though.
+		 */
+		mutex_unlock(&priv->shrd->mutex);
+		ieee80211_iter_keys(priv->hw, ctx->vif,
+				    iwlagn_wowlan_program_keys,
+				    &key_data);
+		mutex_lock(&priv->shrd->mutex);
+		if (key_data.error) {
+			ret = -EIO;
+			goto out;
+		}
+
+		if (key_data.use_rsc_tsc) {
+			struct iwl_host_cmd rsc_tsc_cmd = {
+				.id = REPLY_WOWLAN_TSC_RSC_PARAMS,
+				.flags = CMD_SYNC,
+				.data[0] = key_data.rsc_tsc,
+				.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+				.len[0] = sizeof(key_data.rsc_tsc),
+			};
+
+			ret = iwl_trans_send_cmd(trans(priv), &rsc_tsc_cmd);
+			if (ret)
+				goto out;
+		}
+
+		if (key_data.use_tkip) {
+			ret = iwl_trans_send_cmd_pdu(trans(priv),
+						 REPLY_WOWLAN_TKIP_PARAMS,
+						 CMD_SYNC, sizeof(tkip_cmd),
+						 &tkip_cmd);
+			if (ret)
+				goto out;
+		}
+
+		if (priv->have_rekey_data) {
+			memset(&kek_kck_cmd, 0, sizeof(kek_kck_cmd));
+			memcpy(kek_kck_cmd.kck, priv->kck, NL80211_KCK_LEN);
+			kek_kck_cmd.kck_len = cpu_to_le16(NL80211_KCK_LEN);
+			memcpy(kek_kck_cmd.kek, priv->kek, NL80211_KEK_LEN);
+			kek_kck_cmd.kek_len = cpu_to_le16(NL80211_KEK_LEN);
+			kek_kck_cmd.replay_ctr = priv->replay_ctr;
+
+			ret = iwl_trans_send_cmd_pdu(trans(priv),
+						 REPLY_WOWLAN_KEK_KCK_MATERIAL,
+						 CMD_SYNC, sizeof(kek_kck_cmd),
+						 &kek_kck_cmd);
+			if (ret)
+				goto out;
+		}
+	}
+
+	ret = iwl_trans_send_cmd_pdu(trans(priv), REPLY_D3_CONFIG, CMD_SYNC,
+				     sizeof(d3_cfg_cmd), &d3_cfg_cmd);
+	if (ret)
+		goto out;
+
+	ret = iwl_trans_send_cmd_pdu(trans(priv), REPLY_WOWLAN_WAKEUP_FILTER,
+				 CMD_SYNC, sizeof(wakeup_filter_cmd),
+				 &wakeup_filter_cmd);
+	if (ret)
+		goto out;
+
+	ret = iwlagn_send_patterns(priv, wowlan);
+ out:
+	kfree(key_data.rsc_tsc);
+	return ret;
+}
+#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index 66118ce..334b5ae 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -298,7 +298,7 @@
 	} else
 		return IWL_MAX_TID_COUNT;
 
-	if (unlikely(tid >= TID_MAX_LOAD_COUNT))
+	if (unlikely(tid >= IWL_MAX_TID_COUNT))
 		return IWL_MAX_TID_COUNT;
 
 	tl = &lq_data->load[tid];
@@ -352,7 +352,7 @@
 	lq_sta->active_mimo2_rate  = 0x1FD0;	/* 6 - 60 MBits, no 9, no CCK */
 	lq_sta->active_mimo3_rate  = 0x1FD0;	/* 6 - 60 MBits, no 9, no CCK */
 
-#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
+#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
 	/* testmode has higher priority to overwirte the fixed rate */
 	if (priv->tm_fixed_rate)
 		lq_sta->dbg_fixed_rate = priv->tm_fixed_rate;
@@ -379,7 +379,7 @@
 	s32 index;
 	struct iwl_traffic_load *tl = NULL;
 
-	if (tid >= TID_MAX_LOAD_COUNT)
+	if (tid >= IWL_MAX_TID_COUNT)
 		return 0;
 
 	tl = &(lq_data->load[tid]);
@@ -444,11 +444,11 @@
 			      struct iwl_lq_sta *lq_data,
 			      struct ieee80211_sta *sta)
 {
-	if (tid < TID_MAX_LOAD_COUNT)
+	if (tid < IWL_MAX_TID_COUNT)
 		rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta);
 	else
-		IWL_ERR(priv, "tid exceeds max load count: %d/%d\n",
-			tid, TID_MAX_LOAD_COUNT);
+		IWL_ERR(priv, "tid exceeds max TID count: %d/%d\n",
+			tid, IWL_MAX_TID_COUNT);
 }
 
 static inline int get_num_of_ant_from_rate(u32 rate_n_flags)
@@ -1081,12 +1081,12 @@
 	if (sta && sta->supp_rates[sband->band])
 		rs_rate_scale_perform(priv, skb, sta, lq_sta);
 
-#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_IWLWIFI_DEVICE_SVTOOL)
+#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_IWLWIFI_DEVICE_TESTMODE)
 	if ((priv->tm_fixed_rate) &&
 	    (priv->tm_fixed_rate != lq_sta->dbg_fixed_rate))
 		rs_program_fix_rate(priv, lq_sta);
 #endif
-	if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist)
+	if (cfg(priv)->bt_params && cfg(priv)->bt_params->advanced_bt_coexist)
 		rs_bt_update_lq(priv, ctx, lq_sta);
 }
 
@@ -1458,10 +1458,8 @@
 		break;
 	case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
 		/* avoid antenna B unless MIMO */
-		valid_tx_ant =
-			first_antenna(hw_params(priv).valid_tx_ant);
 		if (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2)
-			tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
+			tbl->action = IWL_LEGACY_SWITCH_SISO;
 		break;
 	case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
 	case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
@@ -1636,10 +1634,8 @@
 		break;
 	case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
 		/* avoid antenna B unless MIMO */
-		valid_tx_ant =
-			first_antenna(hw_params(priv).valid_tx_ant);
 		if (tbl->action == IWL_SISO_SWITCH_ANTENNA2)
-			tbl->action = IWL_SISO_SWITCH_ANTENNA1;
+			tbl->action = IWL_SISO_SWITCH_MIMO2_AB;
 		break;
 	case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
 	case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
@@ -2277,7 +2273,7 @@
 	tid = rs_tl_add_packet(lq_sta, hdr);
 	if ((tid != IWL_MAX_TID_COUNT) &&
 	    (lq_sta->tx_agg_tid_en & (1 << tid))) {
-		tid_data = &priv->shrd->tid_data[lq_sta->lq.sta_id][tid];
+		tid_data = &priv->tid_data[lq_sta->lq.sta_id][tid];
 		if (tid_data->agg.state == IWL_AGG_OFF)
 			lq_sta->is_agg = 0;
 		else
@@ -2649,8 +2645,7 @@
 			    (lq_sta->tx_agg_tid_en & (1 << tid)) &&
 			    (tid != IWL_MAX_TID_COUNT)) {
 				u8 sta_id = lq_sta->lq.sta_id;
-				tid_data =
-				   &priv->shrd->tid_data[sta_id][tid];
+				tid_data = &priv->tid_data[sta_id][tid];
 				if (tid_data->agg.state == IWL_AGG_OFF) {
 					IWL_DEBUG_RATE(priv,
 						       "try to aggregate tid %d\n",
@@ -2908,7 +2903,7 @@
 	if (sband->band == IEEE80211_BAND_5GHZ)
 		lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
 	lq_sta->is_agg = 0;
-#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
+#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
 	priv->tm_fixed_rate = 0;
 #endif
 #ifdef CONFIG_MAC80211_DEBUGFS
@@ -3059,11 +3054,11 @@
 	 * overwrite if needed, pass aggregation time limit
 	 * to uCode in uSec
 	 */
-	if (priv && priv->cfg->bt_params &&
-	    priv->cfg->bt_params->agg_time_limit &&
+	if (priv && cfg(priv)->bt_params &&
+	    cfg(priv)->bt_params->agg_time_limit &&
 	    priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)
 		lq_cmd->agg_params.agg_time_limit =
-			cpu_to_le16(priv->cfg->bt_params->agg_time_limit);
+			cpu_to_le16(cfg(priv)->bt_params->agg_time_limit);
 }
 
 static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
index f4f6deb..6675b3c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
@@ -281,7 +281,6 @@
 #define TID_QUEUE_CELL_SPACING	50	/*mS */
 #define TID_QUEUE_MAX_SIZE	20
 #define TID_ROUND_VALUE		5	/* mS */
-#define TID_MAX_LOAD_COUNT	8
 
 #define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING)
 #define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y))
@@ -402,7 +401,7 @@
 
 	struct iwl_link_quality_cmd lq;
 	struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
-	struct iwl_traffic_load load[TID_MAX_LOAD_COUNT];
+	struct iwl_traffic_load load[IWL_MAX_TID_COUNT];
 	u8 tx_agg_tid_en;
 #ifdef CONFIG_MAC80211_DEBUGFS
 	struct dentry *rs_sta_dbgfs_scale_table_file;
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rx.c b/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
index 5af9e62..b22b297 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
@@ -117,6 +117,7 @@
 		IWL_CMD(REPLY_WOWLAN_TKIP_PARAMS);
 		IWL_CMD(REPLY_WOWLAN_KEK_KCK_MATERIAL);
 		IWL_CMD(REPLY_WOWLAN_GET_STATUS);
+		IWL_CMD(REPLY_D3_CONFIG);
 	default:
 		return "UNKNOWN";
 
@@ -317,7 +318,7 @@
 				 unsigned int msecs)
 {
 	int delta;
-	int threshold = priv->cfg->base_params->plcp_delta_threshold;
+	int threshold = cfg(priv)->base_params->plcp_delta_threshold;
 
 	if (threshold == IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) {
 		IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n");
@@ -582,8 +583,8 @@
 		iwlagn_rx_calc_noise(priv);
 		queue_work(priv->shrd->workqueue, &priv->run_time_calib_work);
 	}
-	if (priv->cfg->lib->temperature && change)
-		priv->cfg->lib->temperature(priv);
+	if (cfg(priv)->lib->temperature && change)
+		cfg(priv)->lib->temperature(priv);
 	return 0;
 }
 
@@ -800,7 +801,8 @@
 					       ctx->active.bssid_addr))
 				continue;
 			ctx->last_tx_rejected = false;
-			iwl_trans_wake_any_queue(trans(priv), ctx->ctxid);
+			iwl_trans_wake_any_queue(trans(priv), ctx->ctxid,
+				"channel got active");
 		}
 	}
 
@@ -1032,6 +1034,50 @@
 	return 0;
 }
 
+static int iwlagn_rx_noa_notification(struct iwl_priv *priv,
+				      struct iwl_rx_mem_buffer *rxb,
+				      struct iwl_device_cmd *cmd)
+{
+	struct iwl_wipan_noa_data *new_data, *old_data;
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	struct iwl_wipan_noa_notification *noa_notif = (void *)pkt->u.raw;
+
+	/* no condition -- we're in softirq */
+	old_data = rcu_dereference_protected(priv->noa_data, true);
+
+	if (noa_notif->noa_active) {
+		u32 len = le16_to_cpu(noa_notif->noa_attribute.length);
+		u32 copylen = len;
+
+		/* EID, len, OUI, subtype */
+		len += 1 + 1 + 3 + 1;
+		/* P2P id, P2P length */
+		len += 1 + 2;
+		copylen += 1 + 2;
+
+		new_data = kmalloc(sizeof(*new_data) + len, GFP_ATOMIC);
+		if (new_data) {
+			new_data->length = len;
+			new_data->data[0] = WLAN_EID_VENDOR_SPECIFIC;
+			new_data->data[1] = len - 2; /* not counting EID, len */
+			new_data->data[2] = (WLAN_OUI_WFA >> 16) & 0xff;
+			new_data->data[3] = (WLAN_OUI_WFA >> 8) & 0xff;
+			new_data->data[4] = (WLAN_OUI_WFA >> 0) & 0xff;
+			new_data->data[5] = WLAN_OUI_TYPE_WFA_P2P;
+			memcpy(&new_data->data[6], &noa_notif->noa_attribute,
+			       copylen);
+		}
+	} else
+		new_data = NULL;
+
+	rcu_assign_pointer(priv->noa_data, new_data);
+
+	if (old_data)
+		kfree_rcu(old_data, rcu_head);
+
+	return 0;
+}
+
 /**
  * iwl_setup_rx_handlers - Initialize Rx handler callbacks
  *
@@ -1055,6 +1101,8 @@
 	handlers[BEACON_NOTIFICATION]		= iwlagn_rx_beacon_notif;
 	handlers[REPLY_ADD_STA]			= iwl_add_sta_callback;
 
+	handlers[REPLY_WIPAN_NOA_NOTIFICATION]	= iwlagn_rx_noa_notification;
+
 	/*
 	 * The same handler is used for both the REPLY to a discrete
 	 * statistics request from the host as well as for the periodic
@@ -1083,13 +1131,13 @@
 	priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx;
 
 	/* set up notification wait support */
-	spin_lock_init(&priv->notif_wait_lock);
-	INIT_LIST_HEAD(&priv->notif_waits);
-	init_waitqueue_head(&priv->notif_waitq);
+	spin_lock_init(&priv->shrd->notif_wait_lock);
+	INIT_LIST_HEAD(&priv->shrd->notif_waits);
+	init_waitqueue_head(&priv->shrd->notif_waitq);
 
 	/* Set up BT Rx handlers */
-	if (priv->cfg->lib->bt_rx_handler_setup)
-		priv->cfg->lib->bt_rx_handler_setup(priv);
+	if (cfg(priv)->lib->bt_rx_handler_setup)
+		cfg(priv)->lib->bt_rx_handler_setup(priv);
 
 }
 
@@ -1104,11 +1152,11 @@
 	 * even if the RX handler consumes the RXB we have
 	 * access to it in the notification wait entry.
 	 */
-	if (!list_empty(&priv->notif_waits)) {
+	if (!list_empty(&priv->shrd->notif_waits)) {
 		struct iwl_notification_wait *w;
 
-		spin_lock(&priv->notif_wait_lock);
-		list_for_each_entry(w, &priv->notif_waits, list) {
+		spin_lock(&priv->shrd->notif_wait_lock);
+		list_for_each_entry(w, &priv->shrd->notif_waits, list) {
 			if (w->cmd != pkt->hdr.cmd)
 				continue;
 			IWL_DEBUG_RX(priv,
@@ -1117,11 +1165,11 @@
 				pkt->hdr.cmd);
 			w->triggered = true;
 			if (w->fn)
-				w->fn(priv, pkt, w->fn_data);
+				w->fn(trans(priv), pkt, w->fn_data);
 		}
-		spin_unlock(&priv->notif_wait_lock);
+		spin_unlock(&priv->shrd->notif_wait_lock);
 
-		wake_up_all(&priv->notif_waitq);
+		wake_up_all(&priv->shrd->notif_waitq);
 	}
 
 	if (priv->pre_rx_handler)
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
index a7a6def..1c66594 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
@@ -45,7 +45,8 @@
 	send->filter_flags = old_filter;
 
 	if (ret)
-		IWL_ERR(priv, "Error clearing ASSOC_MSK on BSS (%d)\n", ret);
+		IWL_DEBUG_QUIET_RFKILL(priv,
+			"Error clearing ASSOC_MSK on BSS (%d)\n", ret);
 
 	return ret;
 }
@@ -59,7 +60,7 @@
 	u8 old_dev_type = send->dev_type;
 	int ret;
 
-	iwlagn_init_notification_wait(priv, &disable_wait,
+	iwl_init_notification_wait(priv->shrd, &disable_wait,
 				      REPLY_WIPAN_DEACTIVATION_COMPLETE,
 				      NULL, NULL);
 
@@ -73,9 +74,9 @@
 
 	if (ret) {
 		IWL_ERR(priv, "Error disabling PAN (%d)\n", ret);
-		iwlagn_remove_notification(priv, &disable_wait);
+		iwl_remove_notification(priv->shrd, &disable_wait);
 	} else {
-		ret = iwlagn_wait_notification(priv, &disable_wait, HZ);
+		ret = iwl_wait_notification(priv->shrd, &disable_wait, HZ);
 		if (ret)
 			IWL_ERR(priv, "Timed out waiting for PAN disable\n");
 	}
@@ -116,7 +117,7 @@
 	if (ctx->ht.enabled)
 		ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
 
-	IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
+	IWL_DEBUG_INFO(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
 		      ctx->qos_data.qos_active,
 		      ctx->qos_data.def_qos_parm.qos_flags);
 
@@ -124,7 +125,7 @@
 			       sizeof(struct iwl_qosparam_cmd),
 			       &ctx->qos_data.def_qos_parm);
 	if (ret)
-		IWL_ERR(priv, "Failed to update QoS\n");
+		IWL_DEBUG_QUIET_RFKILL(priv, "Failed to update QoS\n");
 }
 
 static int iwlagn_update_beacon(struct iwl_priv *priv,
@@ -295,9 +296,9 @@
 	}
 
 	if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION &&
-	    priv->cfg->ht_params && priv->cfg->ht_params->smps_mode)
+	    cfg(priv)->ht_params && cfg(priv)->ht_params->smps_mode)
 		ieee80211_request_smps(ctx->vif,
-				       priv->cfg->ht_params->smps_mode);
+				       cfg(priv)->ht_params->smps_mode);
 
 	return 0;
 }
@@ -444,8 +445,8 @@
 	 * force CTS-to-self frames protection if RTS-CTS is not preferred
 	 * one aggregation protection method
 	 */
-	if (!(priv->cfg->ht_params &&
-	      priv->cfg->ht_params->use_rts_for_aggregation))
+	if (!(cfg(priv)->ht_params &&
+	      cfg(priv)->ht_params->use_rts_for_aggregation))
 		ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
 
 	if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) ||
@@ -559,6 +560,9 @@
 
 	mutex_lock(&priv->shrd->mutex);
 
+	if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+		goto out;
+
 	if (unlikely(test_bit(STATUS_SCANNING, &priv->shrd->status))) {
 		IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
 		goto out;
@@ -606,8 +610,8 @@
 			if (ctx->ht.enabled) {
 				/* if HT40 is used, it should not change
 				 * after associated except channel switch */
-				if (iwl_is_associated_ctx(ctx) &&
-				     !ctx->ht.is_40mhz)
+				if (!ctx->ht.is_40mhz ||
+						!iwl_is_associated_ctx(ctx))
 					iwlagn_config_ht40(conf, ctx);
 			} else
 				ctx->ht.is_40mhz = false;
@@ -850,7 +854,8 @@
 			if (ctx->last_tx_rejected) {
 				ctx->last_tx_rejected = false;
 				iwl_trans_wake_any_queue(trans(priv),
-							 ctx->ctxid);
+							 ctx->ctxid,
+							 "Disassoc: flush queue");
 			}
 			ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
index 4b2aa1d..7353826 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
@@ -130,25 +130,15 @@
 	return iwl_process_add_sta_resp(priv, addsta, pkt);
 }
 
-static u16 iwlagn_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
-{
-	u16 size = (u16)sizeof(struct iwl_addsta_cmd);
-	struct iwl_addsta_cmd *addsta = (struct iwl_addsta_cmd *)data;
-	memcpy(addsta, cmd, size);
-	/* resrved in 5000 */
-	addsta->rate_n_flags = cpu_to_le16(0);
-	return size;
-}
-
 int iwl_send_add_sta(struct iwl_priv *priv,
 		     struct iwl_addsta_cmd *sta, u8 flags)
 {
 	int ret = 0;
-	u8 data[sizeof(*sta)];
 	struct iwl_host_cmd cmd = {
 		.id = REPLY_ADD_STA,
 		.flags = flags,
-		.data = { data, },
+		.data = { sta, },
+		.len = { sizeof(*sta), },
 	};
 	u8 sta_id __maybe_unused = sta->sta.sta_id;
 
@@ -160,7 +150,6 @@
 		might_sleep();
 	}
 
-	cmd.len[0] = iwlagn_build_addsta_hcmd(sta, data);
 	ret = iwl_trans_send_cmd(trans(priv), &cmd);
 
 	if (ret || (flags & CMD_ASYNC))
@@ -463,6 +452,7 @@
 		       const u8 *addr)
 {
 	unsigned long flags;
+	u8 tid;
 
 	if (!iwl_is_ready(priv->shrd)) {
 		IWL_DEBUG_INFO(priv,
@@ -501,6 +491,10 @@
 		priv->stations[sta_id].lq = NULL;
 	}
 
+	for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++)
+		memset(&priv->tid_data[sta_id][tid], 0,
+			sizeof(priv->tid_data[sta_id][tid]));
+
 	priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
 
 	priv->num_stations--;
@@ -647,7 +641,7 @@
 	int ret;
 	struct iwl_addsta_cmd sta_cmd;
 	struct iwl_link_quality_cmd lq;
-	bool active;
+	bool active, have_lq = false;
 
 	spin_lock_irqsave(&priv->shrd->sta_lock, flags);
 	if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
@@ -657,7 +651,10 @@
 
 	memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(sta_cmd));
 	sta_cmd.mode = 0;
-	memcpy(&lq, priv->stations[sta_id].lq, sizeof(lq));
+	if (priv->stations[sta_id].lq) {
+		memcpy(&lq, priv->stations[sta_id].lq, sizeof(lq));
+		have_lq = true;
+	}
 
 	active = priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE;
 	priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
@@ -679,7 +676,8 @@
 	if (ret)
 		IWL_ERR(priv, "failed to re-add STA %pM (%d)\n",
 			priv->stations[sta_id].sta.sta.addr, ret);
-	iwl_send_lq_cmd(priv, ctx, &lq, CMD_SYNC, true);
+	if (have_lq)
+		iwl_send_lq_cmd(priv, ctx, &lq, CMD_SYNC, true);
 }
 
 int iwl_get_free_ucode_key_offset(struct iwl_priv *priv)
@@ -825,28 +823,6 @@
 	return ret;
 }
 
-int iwlagn_mac_sta_remove(struct ieee80211_hw *hw,
-		       struct ieee80211_vif *vif,
-		       struct ieee80211_sta *sta)
-{
-	struct iwl_priv *priv = hw->priv;
-	struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
-	int ret;
-
-	IWL_DEBUG_MAC80211(priv, "enter: received request to remove "
-			   "station %pM\n", sta->addr);
-	mutex_lock(&priv->shrd->mutex);
-	IWL_DEBUG_INFO(priv, "proceeding to remove station %pM\n",
-			sta->addr);
-	ret = iwl_remove_station(priv, sta_priv->sta_id, sta->addr);
-	if (ret)
-		IWL_ERR(priv, "Error removing station %pM\n",
-			sta->addr);
-	mutex_unlock(&priv->shrd->mutex);
-	IWL_DEBUG_MAC80211(priv, "leave\n");
-
-	return ret;
-}
 
 void iwl_sta_fill_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
 		     u8 sta_id, struct iwl_link_quality_cmd *link_cmd)
@@ -1459,20 +1435,7 @@
 	return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
 }
 
-static void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id)
-{
-	unsigned long flags;
 
-	spin_lock_irqsave(&priv->shrd->sta_lock, flags);
-	priv->stations[sta_id].sta.station_flags &= ~STA_FLG_PWR_SAVE_MSK;
-	priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
-	priv->stations[sta_id].sta.sta.modify_mask = 0;
-	priv->stations[sta_id].sta.sleep_tx_count = 0;
-	priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
-	iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
-	spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
-
-}
 
 void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt)
 {
@@ -1489,36 +1452,3 @@
 	spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
 
 }
-
-void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
-			   struct ieee80211_vif *vif,
-			   enum sta_notify_cmd cmd,
-			   struct ieee80211_sta *sta)
-{
-	struct iwl_priv *priv = hw->priv;
-	struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
-	int sta_id;
-
-	IWL_DEBUG_MAC80211(priv, "enter\n");
-
-	switch (cmd) {
-	case STA_NOTIFY_SLEEP:
-		WARN_ON(!sta_priv->client);
-		sta_priv->asleep = true;
-		if (atomic_read(&sta_priv->pending_frames) > 0)
-			ieee80211_sta_block_awake(hw, sta, true);
-		break;
-	case STA_NOTIFY_AWAKE:
-		WARN_ON(!sta_priv->client);
-		if (!sta_priv->asleep)
-			break;
-		sta_priv->asleep = false;
-		sta_id = iwl_sta_id(sta);
-		if (sta_id != IWL_INVALID_STATION)
-			iwl_sta_modify_ps_wake(priv, sta_id);
-		break;
-	default:
-		break;
-	}
-	IWL_DEBUG_MAC80211(priv, "leave\n");
-}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tt.c b/drivers/net/wireless/iwlwifi/iwl-agn-tt.c
index c27180a..b0dff7a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tt.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tt.c
@@ -633,7 +633,7 @@
 	INIT_WORK(&priv->ct_enter, iwl_bg_ct_enter);
 	INIT_WORK(&priv->ct_exit, iwl_bg_ct_exit);
 
-	if (priv->cfg->base_params->adv_thermal_throttle) {
+	if (cfg(priv)->base_params->adv_thermal_throttle) {
 		IWL_DEBUG_TEMP(priv, "Advanced Thermal Throttling\n");
 		tt->restriction = kcalloc(IWL_TI_STATE_MAX,
 					  sizeof(struct iwl_tt_restriction),
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
index 35a6b71..c664c27 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
@@ -74,8 +74,8 @@
 	else if (ieee80211_is_back_req(fc))
 		tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
 	else if (info->band == IEEE80211_BAND_2GHZ &&
-		 priv->cfg->bt_params &&
-		 priv->cfg->bt_params->advanced_bt_coexist &&
+		 cfg(priv)->bt_params &&
+		 cfg(priv)->bt_params->advanced_bt_coexist &&
 		 (ieee80211_is_auth(fc) || ieee80211_is_assoc_req(fc) ||
 		 ieee80211_is_reassoc_req(fc) ||
 		 skb->protocol == cpu_to_be16(ETH_P_PAE)))
@@ -91,7 +91,10 @@
 		tx_cmd->tid_tspec = qc[0] & 0xf;
 		tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
 	} else {
-		tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+		if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
+			tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+		else
+			tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
 	}
 
 	iwlagn_tx_cmd_protection(priv, info, fc, &tx_flags);
@@ -148,7 +151,7 @@
 	if (ieee80211_is_data(fc)) {
 		tx_cmd->initial_rate_index = 0;
 		tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
-#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
+#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
 		if (priv->tm_fixed_rate) {
 			/*
 			 * rate overwrite by testmode
@@ -161,7 +164,8 @@
 		}
 #endif
 		return;
-	}
+	} else if (ieee80211_is_back_req(fc))
+		tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
 
 	/**
 	 * If the current TX rate stored in mac80211 has the MCS bit set, it's
@@ -187,8 +191,8 @@
 		rate_flags |= RATE_MCS_CCK_MSK;
 
 	/* Set up antennas */
-	 if (priv->cfg->bt_params &&
-	     priv->cfg->bt_params->advanced_bt_coexist &&
+	 if (cfg(priv)->bt_params &&
+	     cfg(priv)->bt_params->advanced_bt_coexist &&
 	     priv->bt_full_concurrent) {
 		/* operated as 1x1 in full concurrency mode */
 		priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
@@ -258,8 +262,8 @@
 
 	__le16 fc;
 	u8 hdr_len;
-	u16 len;
-	u8 sta_id;
+	u16 len, seq_number = 0;
+	u8 sta_id, tid = IWL_MAX_TID_COUNT;
 	unsigned long flags;
 	bool is_agg = false;
 
@@ -283,6 +287,19 @@
 		IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
 #endif
 
+	if (unlikely(ieee80211_is_probe_resp(fc))) {
+		struct iwl_wipan_noa_data *noa_data =
+			rcu_dereference(priv->noa_data);
+
+		if (noa_data &&
+		    pskb_expand_head(skb, 0, noa_data->length,
+				     GFP_ATOMIC) == 0) {
+			memcpy(skb_put(skb, noa_data->length),
+			       noa_data->data, noa_data->length);
+			hdr = (struct ieee80211_hdr *)skb->data;
+		}
+	}
+
 	hdr_len = ieee80211_hdrlen(fc);
 
 	/* For management frames use broadcast id to do not break aggregation */
@@ -351,9 +368,51 @@
 	info->driver_data[0] = ctx;
 	info->driver_data[1] = dev_cmd;
 
-	if (iwl_trans_tx(trans(priv), skb, dev_cmd, ctx->ctxid, sta_id))
+	if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
+		u8 *qc = NULL;
+		struct iwl_tid_data *tid_data;
+		qc = ieee80211_get_qos_ctl(hdr);
+		tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
+		if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
+			goto drop_unlock_sta;
+		tid_data = &priv->tid_data[sta_id][tid];
+
+		/* aggregation is on for this <sta,tid> */
+		if (info->flags & IEEE80211_TX_CTL_AMPDU &&
+		    tid_data->agg.state != IWL_AGG_ON) {
+			IWL_ERR(priv, "TX_CTL_AMPDU while not in AGG:"
+				" Tx flags = 0x%08x, agg.state = %d",
+				info->flags, tid_data->agg.state);
+			IWL_ERR(priv, "sta_id = %d, tid = %d seq_num = %d",
+				sta_id, tid, SEQ_TO_SN(tid_data->seq_number));
+			goto drop_unlock_sta;
+		}
+
+		/* We can receive packets from the stack in IWL_AGG_{ON,OFF}
+		 * only. Check this here.
+		 */
+		if (WARN_ONCE(tid_data->agg.state != IWL_AGG_ON &&
+		    tid_data->agg.state != IWL_AGG_OFF,
+		    "Tx while agg.state = %d", tid_data->agg.state))
+			goto drop_unlock_sta;
+
+		seq_number = tid_data->seq_number;
+		seq_number &= IEEE80211_SCTL_SEQ;
+		hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
+		hdr->seq_ctrl |= cpu_to_le16(seq_number);
+		seq_number += 0x10;
+	}
+
+	/* Copy MAC header from skb into command buffer */
+	memcpy(tx_cmd->hdr, hdr, hdr_len);
+
+	if (iwl_trans_tx(trans(priv), skb, dev_cmd, ctx->ctxid, sta_id, tid))
 		goto drop_unlock_sta;
 
+	if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc) &&
+	    !ieee80211_has_morefrags(fc))
+		priv->tid_data[sta_id][tid].seq_number = seq_number;
+
 	spin_unlock(&priv->shrd->sta_lock);
 	spin_unlock_irqrestore(&priv->shrd->lock, flags);
 
@@ -378,10 +437,81 @@
 	return -1;
 }
 
+int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
+			struct ieee80211_sta *sta, u16 tid)
+{
+	struct iwl_tid_data *tid_data;
+	unsigned long flags;
+	int sta_id;
+
+	sta_id = iwl_sta_id(sta);
+
+	if (sta_id == IWL_INVALID_STATION) {
+		IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
+		return -ENXIO;
+	}
+
+	spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+
+	tid_data = &priv->tid_data[sta_id][tid];
+
+	switch (priv->tid_data[sta_id][tid].agg.state) {
+	case IWL_EMPTYING_HW_QUEUE_ADDBA:
+		/*
+		* This can happen if the peer stops aggregation
+		* again before we've had a chance to drain the
+		* queue we selected previously, i.e. before the
+		* session was really started completely.
+		*/
+		IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
+		goto turn_off;
+	case IWL_AGG_ON:
+		break;
+	default:
+		IWL_WARN(priv, "Stopping AGG while state not ON "
+			 "or starting for %d on %d (%d)\n", sta_id, tid,
+			 priv->tid_data[sta_id][tid].agg.state);
+		spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+		return 0;
+	}
+
+	tid_data->agg.ssn = SEQ_TO_SN(tid_data->seq_number);
+
+	/* There are still packets for this RA / TID in the HW */
+	if (tid_data->agg.ssn != tid_data->next_reclaimed) {
+		IWL_DEBUG_TX_QUEUES(priv, "Can't proceed: ssn %d, "
+				    "next_recl = %d",
+				    tid_data->agg.ssn,
+				    tid_data->next_reclaimed);
+		priv->tid_data[sta_id][tid].agg.state =
+			IWL_EMPTYING_HW_QUEUE_DELBA;
+		spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+		return 0;
+	}
+
+	IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d",
+			    tid_data->agg.ssn);
+turn_off:
+	priv->tid_data[sta_id][tid].agg.state = IWL_AGG_OFF;
+
+	/* do not restore/save irqs */
+	spin_unlock(&priv->shrd->sta_lock);
+	spin_lock(&priv->shrd->lock);
+
+	iwl_trans_tx_agg_disable(trans(priv), sta_id, tid);
+
+	spin_unlock_irqrestore(&priv->shrd->lock, flags);
+
+	ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+
+	return 0;
+}
+
 int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
 			struct ieee80211_sta *sta, u16 tid, u16 *ssn)
 {
-	struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+	struct iwl_tid_data *tid_data;
+	unsigned long flags;
 	int sta_id;
 	int ret;
 
@@ -396,7 +526,7 @@
 	if (unlikely(tid >= IWL_MAX_TID_COUNT))
 		return -EINVAL;
 
-	if (priv->shrd->tid_data[sta_id][tid].agg.state != IWL_AGG_OFF) {
+	if (priv->tid_data[sta_id][tid].agg.state != IWL_AGG_OFF) {
 		IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
 		return -ENXIO;
 	}
@@ -405,27 +535,136 @@
 	if (ret)
 		return ret;
 
-	ret = iwl_trans_tx_agg_alloc(trans(priv), vif_priv->ctx->ctxid, sta_id,
-				     tid, ssn);
+	spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+
+	tid_data = &priv->tid_data[sta_id][tid];
+	tid_data->agg.ssn = SEQ_TO_SN(tid_data->seq_number);
+
+	*ssn = tid_data->agg.ssn;
+
+	ret = iwl_trans_tx_agg_alloc(trans(priv), sta_id, tid);
+	if (ret) {
+		spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+		return ret;
+	}
+
+	if (*ssn == tid_data->next_reclaimed) {
+		IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d",
+				    tid_data->agg.ssn);
+		tid_data->agg.state = IWL_AGG_ON;
+		ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+	} else {
+		IWL_DEBUG_TX_QUEUES(priv, "Can't proceed: ssn %d, "
+				    "next_reclaimed = %d",
+				    tid_data->agg.ssn,
+				    tid_data->next_reclaimed);
+		tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
+	}
+
+	spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
 
 	return ret;
 }
 
-int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
-		       struct ieee80211_sta *sta, u16 tid)
+int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif,
+			struct ieee80211_sta *sta, u16 tid, u8 buf_size)
 {
-	int sta_id;
-	struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+	struct iwl_station_priv *sta_priv = (void *) sta->drv_priv;
+	struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
+	unsigned long flags;
+	u16 ssn;
 
-	sta_id = iwl_sta_id(sta);
+	buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
 
-	if (sta_id == IWL_INVALID_STATION) {
-		IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
-		return -ENXIO;
+	spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+	ssn = priv->tid_data[sta_priv->sta_id][tid].agg.ssn;
+	spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+
+	iwl_trans_tx_agg_setup(trans(priv), ctx->ctxid, sta_priv->sta_id, tid,
+			       buf_size, ssn);
+
+	/*
+	 * If the limit is 0, then it wasn't initialised yet,
+	 * use the default. We can do that since we take the
+	 * minimum below, and we don't want to go above our
+	 * default due to hardware restrictions.
+	 */
+	if (sta_priv->max_agg_bufsize == 0)
+		sta_priv->max_agg_bufsize =
+			LINK_QUAL_AGG_FRAME_LIMIT_DEF;
+
+	/*
+	 * Even though in theory the peer could have different
+	 * aggregation reorder buffer sizes for different sessions,
+	 * our ucode doesn't allow for that and has a global limit
+	 * for each station. Therefore, use the minimum of all the
+	 * aggregation sessions and our default value.
+	 */
+	sta_priv->max_agg_bufsize =
+		min(sta_priv->max_agg_bufsize, buf_size);
+
+	if (cfg(priv)->ht_params &&
+	    cfg(priv)->ht_params->use_rts_for_aggregation) {
+		/*
+		 * switch to RTS/CTS if it is the prefer protection
+		 * method for HT traffic
+		 */
+
+		sta_priv->lq_sta.lq.general_params.flags |=
+			LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
 	}
+	priv->agg_tids_count++;
+	IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n",
+		     priv->agg_tids_count);
 
-	return iwl_trans_tx_agg_disable(trans(priv), vif_priv->ctx->ctxid,
-					sta_id, tid);
+	sta_priv->lq_sta.lq.agg_params.agg_frame_cnt_limit =
+		sta_priv->max_agg_bufsize;
+
+	IWL_INFO(priv, "Tx aggregation enabled on ra = %pM tid = %d\n",
+		 sta->addr, tid);
+
+	return iwl_send_lq_cmd(priv, ctx,
+			&sta_priv->lq_sta.lq, CMD_ASYNC, false);
+}
+
+static void iwlagn_check_ratid_empty(struct iwl_priv *priv, int sta_id, u8 tid)
+{
+	struct iwl_tid_data *tid_data = &priv->tid_data[sta_id][tid];
+	enum iwl_rxon_context_id ctx;
+	struct ieee80211_vif *vif;
+	u8 *addr;
+
+	lockdep_assert_held(&priv->shrd->sta_lock);
+
+	addr = priv->stations[sta_id].sta.sta.addr;
+	ctx = priv->stations[sta_id].ctxid;
+	vif = priv->contexts[ctx].vif;
+
+	switch (priv->tid_data[sta_id][tid].agg.state) {
+	case IWL_EMPTYING_HW_QUEUE_DELBA:
+		/* There are no packets for this RA / TID in the HW any more */
+		if (tid_data->agg.ssn == tid_data->next_reclaimed) {
+			IWL_DEBUG_TX_QUEUES(priv,
+				"Can continue DELBA flow ssn = next_recl ="
+				" %d", tid_data->next_reclaimed);
+			iwl_trans_tx_agg_disable(trans(priv), sta_id, tid);
+			tid_data->agg.state = IWL_AGG_OFF;
+			ieee80211_stop_tx_ba_cb_irqsafe(vif, addr, tid);
+		}
+		break;
+	case IWL_EMPTYING_HW_QUEUE_ADDBA:
+		/* There are no packets for this RA / TID in the HW any more */
+		if (tid_data->agg.ssn == tid_data->next_reclaimed) {
+			IWL_DEBUG_TX_QUEUES(priv,
+				"Can continue ADDBA flow ssn = next_recl ="
+				" %d", tid_data->next_reclaimed);
+			tid_data->agg.state = IWL_AGG_ON;
+			ieee80211_start_tx_ba_cb_irqsafe(vif, addr, tid);
+		}
+		break;
+	default:
+		break;
+	}
 }
 
 static void iwlagn_non_agg_tx_status(struct iwl_priv *priv,
@@ -565,7 +804,7 @@
 		IWLAGN_TX_RES_TID_POS;
 	int sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >>
 		IWLAGN_TX_RES_RA_POS;
-	struct iwl_ht_agg *agg = &priv->shrd->tid_data[sta_id][tid].agg;
+	struct iwl_ht_agg *agg = &priv->tid_data[sta_id][tid].agg;
 	u32 status = le16_to_cpu(tx_resp->status.status);
 	int i;
 
@@ -581,8 +820,8 @@
 	 * notification again.
 	 */
 	if (tx_resp->bt_kill_count && tx_resp->frame_count == 1 &&
-	    priv->cfg->bt_params &&
-	    priv->cfg->bt_params->advanced_bt_coexist) {
+	    cfg(priv)->bt_params &&
+	    cfg(priv)->bt_params->advanced_bt_coexist) {
 		IWL_DEBUG_COEX(priv, "receive reply tx w/ bt_kill\n");
 	}
 
@@ -755,7 +994,7 @@
 	struct iwlagn_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
 	struct ieee80211_hdr *hdr;
 	u32 status = le16_to_cpu(tx_resp->status.status);
-	u32 ssn = iwlagn_get_scd_ssn(tx_resp);
+	u16 ssn = iwlagn_get_scd_ssn(tx_resp);
 	int tid;
 	int sta_id;
 	int freed;
@@ -777,10 +1016,34 @@
 		iwl_rx_reply_tx_agg(priv, tx_resp);
 
 	if (tx_resp->frame_count == 1) {
+		u16 next_reclaimed = le16_to_cpu(tx_resp->seq_ctl);
+		next_reclaimed = SEQ_TO_SN(next_reclaimed + 0x10);
+
+		if (is_agg) {
+			/* If this is an aggregation queue, we can rely on the
+			 * ssn since the wifi sequence number corresponds to
+			 * the index in the TFD ring (%256).
+			 * The seq_ctl is the sequence control of the packet
+			 * to which this Tx response relates. But if there is a
+			 * hole in the bitmap of the BA we received, this Tx
+			 * response may allow to reclaim the hole and all the
+			 * subsequent packets that were already acked.
+			 * In that case, seq_ctl != ssn, and the next packet
+			 * to be reclaimed will be ssn and not seq_ctl.
+			 */
+			next_reclaimed = ssn;
+		}
+
 		__skb_queue_head_init(&skbs);
+		priv->tid_data[sta_id][tid].next_reclaimed = next_reclaimed;
+
+		IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d",
+					  next_reclaimed);
+
 		/*we can free until ssn % q.n_bd not inclusive */
-		iwl_trans_reclaim(trans(priv), sta_id, tid, txq_id,
-				  ssn, status, &skbs);
+		WARN_ON(iwl_trans_reclaim(trans(priv), sta_id, tid, txq_id,
+				  ssn, status, &skbs));
+		iwlagn_check_ratid_empty(priv, sta_id, tid);
 		freed = 0;
 		while (!skb_queue_empty(&skbs)) {
 			skb = __skb_dequeue(&skbs);
@@ -800,7 +1063,8 @@
 			    iwl_is_associated_ctx(ctx) && ctx->vif &&
 			    ctx->vif->type == NL80211_IFTYPE_STATION) {
 				ctx->last_tx_rejected = true;
-				iwl_trans_stop_queue(trans(priv), txq_id);
+				iwl_trans_stop_queue(trans(priv), txq_id,
+					"Tx on passive channel");
 
 				IWL_DEBUG_TX_REPLY(priv,
 					   "TXQ %d status %s (0x%08x) "
@@ -874,24 +1138,10 @@
 
 	sta_id = ba_resp->sta_id;
 	tid = ba_resp->tid;
-	agg = &priv->shrd->tid_data[sta_id][tid].agg;
+	agg = &priv->tid_data[sta_id][tid].agg;
 
 	spin_lock_irqsave(&priv->shrd->sta_lock, flags);
 
-	if (unlikely(agg->txq_id != scd_flow)) {
-		/*
-		 * FIXME: this is a uCode bug which need to be addressed,
-		 * log the information and return for now!
-		 * since it is possible happen very often and in order
-		 * not to fill the syslog, don't enable the logging by default
-		 */
-		IWL_DEBUG_TX_REPLY(priv,
-			"BA scd_flow %d does not match txq_id %d\n",
-			scd_flow, agg->txq_id);
-		spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
-		return 0;
-	}
-
 	if (unlikely(!agg->wait_for_ba)) {
 		if (unlikely(ba_resp->bitmap))
 			IWL_ERR(priv, "Received BA when not expected\n");
@@ -899,6 +1149,17 @@
 		return 0;
 	}
 
+	__skb_queue_head_init(&reclaimed_skbs);
+
+	/* Release all TFDs before the SSN, i.e. all TFDs in front of
+	 * block-ack window (we assume that they've been successfully
+	 * transmitted ... if not, it's too late anyway). */
+	if (iwl_trans_reclaim(trans(priv), sta_id, tid, scd_flow,
+			      ba_resp_scd_ssn, 0, &reclaimed_skbs)) {
+		spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+		return 0;
+	}
+
 	IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
 			   "sta_id = %d\n",
 			   agg->wait_for_ba,
@@ -906,11 +1167,9 @@
 			   ba_resp->sta_id);
 	IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, "
 			   "scd_flow = %d, scd_ssn = %d\n",
-			   ba_resp->tid,
-			   ba_resp->seq_ctl,
+			   ba_resp->tid, le16_to_cpu(ba_resp->seq_ctl),
 			   (unsigned long long)le64_to_cpu(ba_resp->bitmap),
-			   ba_resp->scd_flow,
-			   ba_resp->scd_ssn);
+			   scd_flow, ba_resp_scd_ssn);
 
 	/* Mark that the expected block-ack response arrived */
 	agg->wait_for_ba = false;
@@ -929,13 +1188,9 @@
 	IWL_DEBUG_HT(priv, "agg frames sent:%d, acked:%d\n",
 			ba_resp->txed, ba_resp->txed_2_done);
 
-	__skb_queue_head_init(&reclaimed_skbs);
+	priv->tid_data[sta_id][tid].next_reclaimed = ba_resp_scd_ssn;
 
-	/* Release all TFDs before the SSN, i.e. all TFDs in front of
-	 * block-ack window (we assume that they've been successfully
-	 * transmitted ... if not, it's too late anyway). */
-	iwl_trans_reclaim(trans(priv), sta_id, tid, scd_flow, ba_resp_scd_ssn,
-			  0, &reclaimed_skbs);
+	iwlagn_check_ratid_empty(priv, sta_id, tid);
 	freed = 0;
 	while (!skb_queue_empty(&reclaimed_skbs)) {
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
deleted file mode 100644
index 8ba0dd5..0000000
--- a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
+++ /dev/null
@@ -1,634 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-#include "iwl-agn-hw.h"
-#include "iwl-agn.h"
-#include "iwl-agn-calib.h"
-#include "iwl-trans.h"
-#include "iwl-fh.h"
-
-static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = {
-	{COEX_CU_UNASSOC_IDLE_RP, COEX_CU_UNASSOC_IDLE_WP,
-	 0, COEX_UNASSOC_IDLE_FLAGS},
-	{COEX_CU_UNASSOC_MANUAL_SCAN_RP, COEX_CU_UNASSOC_MANUAL_SCAN_WP,
-	 0, COEX_UNASSOC_MANUAL_SCAN_FLAGS},
-	{COEX_CU_UNASSOC_AUTO_SCAN_RP, COEX_CU_UNASSOC_AUTO_SCAN_WP,
-	 0, COEX_UNASSOC_AUTO_SCAN_FLAGS},
-	{COEX_CU_CALIBRATION_RP, COEX_CU_CALIBRATION_WP,
-	 0, COEX_CALIBRATION_FLAGS},
-	{COEX_CU_PERIODIC_CALIBRATION_RP, COEX_CU_PERIODIC_CALIBRATION_WP,
-	 0, COEX_PERIODIC_CALIBRATION_FLAGS},
-	{COEX_CU_CONNECTION_ESTAB_RP, COEX_CU_CONNECTION_ESTAB_WP,
-	 0, COEX_CONNECTION_ESTAB_FLAGS},
-	{COEX_CU_ASSOCIATED_IDLE_RP, COEX_CU_ASSOCIATED_IDLE_WP,
-	 0, COEX_ASSOCIATED_IDLE_FLAGS},
-	{COEX_CU_ASSOC_MANUAL_SCAN_RP, COEX_CU_ASSOC_MANUAL_SCAN_WP,
-	 0, COEX_ASSOC_MANUAL_SCAN_FLAGS},
-	{COEX_CU_ASSOC_AUTO_SCAN_RP, COEX_CU_ASSOC_AUTO_SCAN_WP,
-	 0, COEX_ASSOC_AUTO_SCAN_FLAGS},
-	{COEX_CU_ASSOC_ACTIVE_LEVEL_RP, COEX_CU_ASSOC_ACTIVE_LEVEL_WP,
-	 0, COEX_ASSOC_ACTIVE_LEVEL_FLAGS},
-	{COEX_CU_RF_ON_RP, COEX_CU_RF_ON_WP, 0, COEX_CU_RF_ON_FLAGS},
-	{COEX_CU_RF_OFF_RP, COEX_CU_RF_OFF_WP, 0, COEX_RF_OFF_FLAGS},
-	{COEX_CU_STAND_ALONE_DEBUG_RP, COEX_CU_STAND_ALONE_DEBUG_WP,
-	 0, COEX_STAND_ALONE_DEBUG_FLAGS},
-	{COEX_CU_IPAN_ASSOC_LEVEL_RP, COEX_CU_IPAN_ASSOC_LEVEL_WP,
-	 0, COEX_IPAN_ASSOC_LEVEL_FLAGS},
-	{COEX_CU_RSRVD1_RP, COEX_CU_RSRVD1_WP, 0, COEX_RSRVD1_FLAGS},
-	{COEX_CU_RSRVD2_RP, COEX_CU_RSRVD2_WP, 0, COEX_RSRVD2_FLAGS}
-};
-
-/*
- * ucode
- */
-static int iwlagn_load_section(struct iwl_priv *priv, const char *name,
-				struct fw_desc *image, u32 dst_addr)
-{
-	dma_addr_t phy_addr = image->p_addr;
-	u32 byte_cnt = image->len;
-	int ret;
-
-	priv->ucode_write_complete = 0;
-
-	iwl_write_direct32(bus(priv),
-		FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
-		FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
-
-	iwl_write_direct32(bus(priv),
-		FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr);
-
-	iwl_write_direct32(bus(priv),
-		FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
-		phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
-
-	iwl_write_direct32(bus(priv),
-		FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
-		(iwl_get_dma_hi_addr(phy_addr)
-			<< FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
-
-	iwl_write_direct32(bus(priv),
-		FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
-		1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
-		1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
-		FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
-
-	iwl_write_direct32(bus(priv),
-		FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
-		FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE	|
-		FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE	|
-		FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
-
-	IWL_DEBUG_FW(priv, "%s uCode section being loaded...\n", name);
-	ret = wait_event_timeout(priv->shrd->wait_command_queue,
-				 priv->ucode_write_complete, 5 * HZ);
-	if (!ret) {
-		IWL_ERR(priv, "Could not load the %s uCode section\n",
-			name);
-		return -ETIMEDOUT;
-	}
-
-	return 0;
-}
-
-static int iwlagn_load_given_ucode(struct iwl_priv *priv,
-				   struct fw_img *image)
-{
-	int ret = 0;
-
-	ret = iwlagn_load_section(priv, "INST", &image->code,
-				   IWLAGN_RTC_INST_LOWER_BOUND);
-	if (ret)
-		return ret;
-
-	return iwlagn_load_section(priv, "DATA", &image->data,
-				    IWLAGN_RTC_DATA_LOWER_BOUND);
-}
-
-/*
- *  Calibration
- */
-static int iwlagn_set_Xtal_calib(struct iwl_priv *priv)
-{
-	struct iwl_calib_xtal_freq_cmd cmd;
-	__le16 *xtal_calib =
-		(__le16 *)iwl_eeprom_query_addr(priv, EEPROM_XTAL);
-
-	iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD);
-	cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]);
-	cmd.cap_pin2 = le16_to_cpu(xtal_calib[1]);
-	return iwl_calib_set(&priv->calib_results[IWL_CALIB_XTAL],
-			     (u8 *)&cmd, sizeof(cmd));
-}
-
-static int iwlagn_set_temperature_offset_calib(struct iwl_priv *priv)
-{
-	struct iwl_calib_temperature_offset_cmd cmd;
-	__le16 *offset_calib =
-		(__le16 *)iwl_eeprom_query_addr(priv, EEPROM_RAW_TEMPERATURE);
-
-	memset(&cmd, 0, sizeof(cmd));
-	iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
-	memcpy(&cmd.radio_sensor_offset, offset_calib, sizeof(*offset_calib));
-	if (!(cmd.radio_sensor_offset))
-		cmd.radio_sensor_offset = DEFAULT_RADIO_SENSOR_OFFSET;
-
-	IWL_DEBUG_CALIB(priv, "Radio sensor offset: %d\n",
-			le16_to_cpu(cmd.radio_sensor_offset));
-	return iwl_calib_set(&priv->calib_results[IWL_CALIB_TEMP_OFFSET],
-			     (u8 *)&cmd, sizeof(cmd));
-}
-
-static int iwlagn_set_temperature_offset_calib_v2(struct iwl_priv *priv)
-{
-	struct iwl_calib_temperature_offset_v2_cmd cmd;
-	__le16 *offset_calib_high = (__le16 *)iwl_eeprom_query_addr(priv,
-				     EEPROM_KELVIN_TEMPERATURE);
-	__le16 *offset_calib_low =
-		(__le16 *)iwl_eeprom_query_addr(priv, EEPROM_RAW_TEMPERATURE);
-	struct iwl_eeprom_calib_hdr *hdr;
-
-	memset(&cmd, 0, sizeof(cmd));
-	iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
-	hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv,
-							EEPROM_CALIB_ALL);
-	memcpy(&cmd.radio_sensor_offset_high, offset_calib_high,
-		sizeof(*offset_calib_high));
-	memcpy(&cmd.radio_sensor_offset_low, offset_calib_low,
-		sizeof(*offset_calib_low));
-	if (!(cmd.radio_sensor_offset_low)) {
-		IWL_DEBUG_CALIB(priv, "no info in EEPROM, use default\n");
-		cmd.radio_sensor_offset_low = DEFAULT_RADIO_SENSOR_OFFSET;
-		cmd.radio_sensor_offset_high = DEFAULT_RADIO_SENSOR_OFFSET;
-	}
-	memcpy(&cmd.burntVoltageRef, &hdr->voltage,
-		sizeof(hdr->voltage));
-
-	IWL_DEBUG_CALIB(priv, "Radio sensor offset high: %d\n",
-			le16_to_cpu(cmd.radio_sensor_offset_high));
-	IWL_DEBUG_CALIB(priv, "Radio sensor offset low: %d\n",
-			le16_to_cpu(cmd.radio_sensor_offset_low));
-	IWL_DEBUG_CALIB(priv, "Voltage Ref: %d\n",
-			le16_to_cpu(cmd.burntVoltageRef));
-
-	return iwl_calib_set(&priv->calib_results[IWL_CALIB_TEMP_OFFSET],
-			     (u8 *)&cmd, sizeof(cmd));
-}
-
-static int iwlagn_send_calib_cfg(struct iwl_priv *priv)
-{
-	struct iwl_calib_cfg_cmd calib_cfg_cmd;
-	struct iwl_host_cmd cmd = {
-		.id = CALIBRATION_CFG_CMD,
-		.len = { sizeof(struct iwl_calib_cfg_cmd), },
-		.data = { &calib_cfg_cmd, },
-	};
-
-	memset(&calib_cfg_cmd, 0, sizeof(calib_cfg_cmd));
-	calib_cfg_cmd.ucd_calib_cfg.once.is_enable = IWL_CALIB_INIT_CFG_ALL;
-	calib_cfg_cmd.ucd_calib_cfg.once.start = IWL_CALIB_INIT_CFG_ALL;
-	calib_cfg_cmd.ucd_calib_cfg.once.send_res = IWL_CALIB_INIT_CFG_ALL;
-	calib_cfg_cmd.ucd_calib_cfg.flags =
-		IWL_CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_MSK;
-
-	return iwl_trans_send_cmd(trans(priv), &cmd);
-}
-
-int iwlagn_rx_calib_result(struct iwl_priv *priv,
-			    struct iwl_rx_mem_buffer *rxb,
-			    struct iwl_device_cmd *cmd)
-{
-	struct iwl_rx_packet *pkt = rxb_addr(rxb);
-	struct iwl_calib_hdr *hdr = (struct iwl_calib_hdr *)pkt->u.raw;
-	int len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
-	int index;
-
-	/* reduce the size of the length field itself */
-	len -= 4;
-
-	/* Define the order in which the results will be sent to the runtime
-	 * uCode. iwl_send_calib_results sends them in a row according to
-	 * their index. We sort them here
-	 */
-	switch (hdr->op_code) {
-	case IWL_PHY_CALIBRATE_DC_CMD:
-		index = IWL_CALIB_DC;
-		break;
-	case IWL_PHY_CALIBRATE_LO_CMD:
-		index = IWL_CALIB_LO;
-		break;
-	case IWL_PHY_CALIBRATE_TX_IQ_CMD:
-		index = IWL_CALIB_TX_IQ;
-		break;
-	case IWL_PHY_CALIBRATE_TX_IQ_PERD_CMD:
-		index = IWL_CALIB_TX_IQ_PERD;
-		break;
-	case IWL_PHY_CALIBRATE_BASE_BAND_CMD:
-		index = IWL_CALIB_BASE_BAND;
-		break;
-	default:
-		IWL_ERR(priv, "Unknown calibration notification %d\n",
-			  hdr->op_code);
-		return -1;
-	}
-	iwl_calib_set(&priv->calib_results[index], pkt->u.raw, len);
-	return 0;
-}
-
-int iwlagn_init_alive_start(struct iwl_priv *priv)
-{
-	int ret;
-
-	if (priv->cfg->bt_params &&
-	    priv->cfg->bt_params->advanced_bt_coexist) {
-		/*
-		 * Tell uCode we are ready to perform calibration
-		 * need to perform this before any calibration
-		 * no need to close the envlope since we are going
-		 * to load the runtime uCode later.
-		 */
-		ret = iwlagn_send_bt_env(priv, IWL_BT_COEX_ENV_OPEN,
-			BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
-		if (ret)
-			return ret;
-
-	}
-
-	ret = iwlagn_send_calib_cfg(priv);
-	if (ret)
-		return ret;
-
-	/**
-	 * temperature offset calibration is only needed for runtime ucode,
-	 * so prepare the value now.
-	 */
-	if (priv->cfg->need_temp_offset_calib) {
-		if (priv->cfg->temp_offset_v2)
-			return iwlagn_set_temperature_offset_calib_v2(priv);
-		else
-			return iwlagn_set_temperature_offset_calib(priv);
-	}
-
-	return 0;
-}
-
-static int iwlagn_send_wimax_coex(struct iwl_priv *priv)
-{
-	struct iwl_wimax_coex_cmd coex_cmd;
-
-	if (priv->cfg->base_params->support_wimax_coexist) {
-		/* UnMask wake up src at associated sleep */
-		coex_cmd.flags = COEX_FLAGS_ASSOC_WA_UNMASK_MSK;
-
-		/* UnMask wake up src at unassociated sleep */
-		coex_cmd.flags |= COEX_FLAGS_UNASSOC_WA_UNMASK_MSK;
-		memcpy(coex_cmd.sta_prio, cu_priorities,
-			sizeof(struct iwl_wimax_coex_event_entry) *
-			 COEX_NUM_OF_EVENTS);
-
-		/* enabling the coexistence feature */
-		coex_cmd.flags |= COEX_FLAGS_COEX_ENABLE_MSK;
-
-		/* enabling the priorities tables */
-		coex_cmd.flags |= COEX_FLAGS_STA_TABLE_VALID_MSK;
-	} else {
-		/* coexistence is disabled */
-		memset(&coex_cmd, 0, sizeof(coex_cmd));
-	}
-	return iwl_trans_send_cmd_pdu(trans(priv),
-				COEX_PRIORITY_TABLE_CMD, CMD_SYNC,
-				sizeof(coex_cmd), &coex_cmd);
-}
-
-static const u8 iwlagn_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
-	((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
-		(0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
-	((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
-		(1 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
-	((BT_COEX_PRIO_TBL_PRIO_LOW << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
-		(0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
-	((BT_COEX_PRIO_TBL_PRIO_LOW << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
-		(1 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
-	((BT_COEX_PRIO_TBL_PRIO_HIGH << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
-		(0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
-	((BT_COEX_PRIO_TBL_PRIO_HIGH << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
-		(1 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
-	((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
-		(0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
-	((BT_COEX_PRIO_TBL_PRIO_COEX_OFF << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
-		(0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
-	((BT_COEX_PRIO_TBL_PRIO_COEX_ON << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
-		(0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
-	0, 0, 0, 0, 0, 0, 0
-};
-
-void iwlagn_send_prio_tbl(struct iwl_priv *priv)
-{
-	struct iwl_bt_coex_prio_table_cmd prio_tbl_cmd;
-
-	memcpy(prio_tbl_cmd.prio_tbl, iwlagn_bt_prio_tbl,
-		sizeof(iwlagn_bt_prio_tbl));
-	if (iwl_trans_send_cmd_pdu(trans(priv),
-				REPLY_BT_COEX_PRIO_TABLE, CMD_SYNC,
-				sizeof(prio_tbl_cmd), &prio_tbl_cmd))
-		IWL_ERR(priv, "failed to send BT prio tbl command\n");
-}
-
-int iwlagn_send_bt_env(struct iwl_priv *priv, u8 action, u8 type)
-{
-	struct iwl_bt_coex_prot_env_cmd env_cmd;
-	int ret;
-
-	env_cmd.action = action;
-	env_cmd.type = type;
-	ret = iwl_trans_send_cmd_pdu(trans(priv),
-			       REPLY_BT_COEX_PROT_ENV, CMD_SYNC,
-			       sizeof(env_cmd), &env_cmd);
-	if (ret)
-		IWL_ERR(priv, "failed to send BT env command\n");
-	return ret;
-}
-
-
-static int iwlagn_alive_notify(struct iwl_priv *priv)
-{
-	struct iwl_rxon_context *ctx;
-	int ret;
-
-	if (!priv->tx_cmd_pool)
-		priv->tx_cmd_pool =
-			kmem_cache_create("iwlagn_dev_cmd",
-					  sizeof(struct iwl_device_cmd),
-					  sizeof(void *), 0, NULL);
-
-	if (!priv->tx_cmd_pool)
-		return -ENOMEM;
-
-	iwl_trans_tx_start(trans(priv));
-	for_each_context(priv, ctx)
-		ctx->last_tx_rejected = false;
-
-	ret = iwlagn_send_wimax_coex(priv);
-	if (ret)
-		return ret;
-
-	ret = iwlagn_set_Xtal_calib(priv);
-	if (ret)
-		return ret;
-
-	return iwl_send_calib_results(priv);
-}
-
-
-/**
- * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host,
- *   using sample data 100 bytes apart.  If these sample points are good,
- *   it's a pretty good bet that everything between them is good, too.
- */
-static int iwl_verify_inst_sparse(struct iwl_priv *priv,
-				      struct fw_desc *fw_desc)
-{
-	__le32 *image = (__le32 *)fw_desc->v_addr;
-	u32 len = fw_desc->len;
-	u32 val;
-	u32 i;
-
-	IWL_DEBUG_FW(priv, "ucode inst image size is %u\n", len);
-
-	for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
-		/* read data comes through single port, auto-incr addr */
-		/* NOTE: Use the debugless read so we don't flood kernel log
-		 * if IWL_DL_IO is set */
-		iwl_write_direct32(bus(priv), HBUS_TARG_MEM_RADDR,
-			i + IWLAGN_RTC_INST_LOWER_BOUND);
-		val = iwl_read32(bus(priv), HBUS_TARG_MEM_RDAT);
-		if (val != le32_to_cpu(*image))
-			return -EIO;
-	}
-
-	return 0;
-}
-
-static void iwl_print_mismatch_inst(struct iwl_priv *priv,
-				    struct fw_desc *fw_desc)
-{
-	__le32 *image = (__le32 *)fw_desc->v_addr;
-	u32 len = fw_desc->len;
-	u32 val;
-	u32 offs;
-	int errors = 0;
-
-	IWL_DEBUG_FW(priv, "ucode inst image size is %u\n", len);
-
-	iwl_write_direct32(bus(priv), HBUS_TARG_MEM_RADDR,
-			   IWLAGN_RTC_INST_LOWER_BOUND);
-
-	for (offs = 0;
-	     offs < len && errors < 20;
-	     offs += sizeof(u32), image++) {
-		/* read data comes through single port, auto-incr addr */
-		val = iwl_read32(bus(priv), HBUS_TARG_MEM_RDAT);
-		if (val != le32_to_cpu(*image)) {
-			IWL_ERR(priv, "uCode INST section at "
-				"offset 0x%x, is 0x%x, s/b 0x%x\n",
-				offs, val, le32_to_cpu(*image));
-			errors++;
-		}
-	}
-}
-
-/**
- * iwl_verify_ucode - determine which instruction image is in SRAM,
- *    and verify its contents
- */
-static int iwl_verify_ucode(struct iwl_priv *priv, struct fw_img *img)
-{
-	if (!iwl_verify_inst_sparse(priv, &img->code)) {
-		IWL_DEBUG_FW(priv, "uCode is good in inst SRAM\n");
-		return 0;
-	}
-
-	IWL_ERR(priv, "UCODE IMAGE IN INSTRUCTION SRAM NOT VALID!!\n");
-
-	iwl_print_mismatch_inst(priv, &img->code);
-	return -EIO;
-}
-
-struct iwlagn_alive_data {
-	bool valid;
-	u8 subtype;
-};
-
-static void iwlagn_alive_fn(struct iwl_priv *priv,
-			    struct iwl_rx_packet *pkt,
-			    void *data)
-{
-	struct iwlagn_alive_data *alive_data = data;
-	struct iwl_alive_resp *palive;
-
-	palive = &pkt->u.alive_frame;
-
-	IWL_DEBUG_FW(priv, "Alive ucode status 0x%08X revision "
-		       "0x%01X 0x%01X\n",
-		       palive->is_valid, palive->ver_type,
-		       palive->ver_subtype);
-
-	priv->device_pointers.error_event_table =
-		le32_to_cpu(palive->error_event_table_ptr);
-	priv->device_pointers.log_event_table =
-		le32_to_cpu(palive->log_event_table_ptr);
-
-	alive_data->subtype = palive->ver_subtype;
-	alive_data->valid = palive->is_valid == UCODE_VALID_OK;
-}
-
-#define UCODE_ALIVE_TIMEOUT	HZ
-#define UCODE_CALIB_TIMEOUT	(2*HZ)
-
-int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
-				 struct fw_img *image,
-				 enum iwlagn_ucode_type ucode_type)
-{
-	struct iwl_notification_wait alive_wait;
-	struct iwlagn_alive_data alive_data;
-	int ret;
-	enum iwlagn_ucode_type old_type;
-
-	ret = iwl_trans_start_device(trans(priv));
-	if (ret)
-		return ret;
-
-	iwlagn_init_notification_wait(priv, &alive_wait, REPLY_ALIVE,
-				      iwlagn_alive_fn, &alive_data);
-
-	old_type = priv->ucode_type;
-	priv->ucode_type = ucode_type;
-
-	ret = iwlagn_load_given_ucode(priv, image);
-	if (ret) {
-		priv->ucode_type = old_type;
-		iwlagn_remove_notification(priv, &alive_wait);
-		return ret;
-	}
-
-	iwl_trans_kick_nic(trans(priv));
-
-	/*
-	 * Some things may run in the background now, but we
-	 * just wait for the ALIVE notification here.
-	 */
-	ret = iwlagn_wait_notification(priv, &alive_wait, UCODE_ALIVE_TIMEOUT);
-	if (ret) {
-		priv->ucode_type = old_type;
-		return ret;
-	}
-
-	if (!alive_data.valid) {
-		IWL_ERR(priv, "Loaded ucode is not valid!\n");
-		priv->ucode_type = old_type;
-		return -EIO;
-	}
-
-	/*
-	 * This step takes a long time (60-80ms!!) and
-	 * WoWLAN image should be loaded quickly, so
-	 * skip it for WoWLAN.
-	 */
-	if (ucode_type != IWL_UCODE_WOWLAN) {
-		ret = iwl_verify_ucode(priv, image);
-		if (ret) {
-			priv->ucode_type = old_type;
-			return ret;
-		}
-
-		/* delay a bit to give rfkill time to run */
-		msleep(5);
-	}
-
-	ret = iwlagn_alive_notify(priv);
-	if (ret) {
-		IWL_WARN(priv,
-			"Could not complete ALIVE transition: %d\n", ret);
-		priv->ucode_type = old_type;
-		return ret;
-	}
-
-	return 0;
-}
-
-int iwlagn_run_init_ucode(struct iwl_priv *priv)
-{
-	struct iwl_notification_wait calib_wait;
-	int ret;
-
-	lockdep_assert_held(&priv->shrd->mutex);
-
-	/* No init ucode required? Curious, but maybe ok */
-	if (!priv->ucode_init.code.len)
-		return 0;
-
-	if (priv->ucode_type != IWL_UCODE_NONE)
-		return 0;
-
-	iwlagn_init_notification_wait(priv, &calib_wait,
-				      CALIBRATION_COMPLETE_NOTIFICATION,
-				      NULL, NULL);
-
-	/* Will also start the device */
-	ret = iwlagn_load_ucode_wait_alive(priv, &priv->ucode_init,
-					   IWL_UCODE_INIT);
-	if (ret)
-		goto error;
-
-	ret = iwlagn_init_alive_start(priv);
-	if (ret)
-		goto error;
-
-	/*
-	 * Some things may run in the background now, but we
-	 * just wait for the calibration complete notification.
-	 */
-	ret = iwlagn_wait_notification(priv, &calib_wait, UCODE_CALIB_TIMEOUT);
-
-	goto out;
-
- error:
-	iwlagn_remove_notification(priv, &calib_wait);
- out:
-	/* Whatever happened, stop the device */
-	iwl_trans_stop_device(trans(priv));
-	return ret;
-}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index bacc06c..b5c7c5f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -30,7 +30,6 @@
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/slab.h>
-#include <linux/dma-mapping.h>
 #include <linux/delay.h>
 #include <linux/sched.h>
 #include <linux/skbuff.h>
@@ -44,6 +43,7 @@
 #include <asm/div64.h>
 
 #include "iwl-eeprom.h"
+#include "iwl-wifi.h"
 #include "iwl-dev.h"
 #include "iwl-core.h"
 #include "iwl-io.h"
@@ -367,7 +367,7 @@
 	u32 num_wraps;  /* # times uCode wrapped to top of log */
 	u32 next_entry; /* index of next entry to be written by uCode */
 
-	base = priv->device_pointers.error_event_table;
+	base = priv->shrd->device_pointers.error_event_table;
 	if (iwlagn_hw_valid_rtc_data_addr(base)) {
 		capacity = iwl_read_targ_mem(bus(priv), base);
 		num_wraps = iwl_read_targ_mem(bus(priv),
@@ -452,52 +452,6 @@
 	iwlagn_dev_txfifo_flush(priv, IWL_DROP_ALL);
 }
 
-/******************************************************************************
- *
- * uCode download functions
- *
- ******************************************************************************/
-
-static void iwl_free_fw_desc(struct iwl_priv *priv, struct fw_desc *desc)
-{
-	if (desc->v_addr)
-		dma_free_coherent(bus(priv)->dev, desc->len,
-				  desc->v_addr, desc->p_addr);
-	desc->v_addr = NULL;
-	desc->len = 0;
-}
-
-static void iwl_free_fw_img(struct iwl_priv *priv, struct fw_img *img)
-{
-	iwl_free_fw_desc(priv, &img->code);
-	iwl_free_fw_desc(priv, &img->data);
-}
-
-static void iwl_dealloc_ucode(struct iwl_priv *priv)
-{
-	iwl_free_fw_img(priv, &priv->ucode_rt);
-	iwl_free_fw_img(priv, &priv->ucode_init);
-	iwl_free_fw_img(priv, &priv->ucode_wowlan);
-}
-
-static int iwl_alloc_fw_desc(struct iwl_priv *priv, struct fw_desc *desc,
-			     const void *data, size_t len)
-{
-	if (!len) {
-		desc->v_addr = NULL;
-		return -EINVAL;
-	}
-
-	desc->v_addr = dma_alloc_coherent(bus(priv)->dev, len,
-					  &desc->p_addr, GFP_KERNEL);
-	if (!desc->v_addr)
-		return -ENOMEM;
-
-	desc->len = len;
-	memcpy(desc->v_addr, data, len);
-	return 0;
-}
-
 static void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags)
 {
 	int i;
@@ -555,23 +509,14 @@
 	BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
 }
 
-
-struct iwlagn_ucode_capabilities {
-	u32 max_probe_length;
-	u32 standard_phy_calibration_size;
-	u32 flags;
-};
-
 static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context);
-static int iwlagn_mac_setup_register(struct iwl_priv *priv,
-				  struct iwlagn_ucode_capabilities *capa);
 
 #define UCODE_EXPERIMENTAL_INDEX	100
 #define UCODE_EXPERIMENTAL_TAG		"exp"
 
 static int __must_check iwl_request_firmware(struct iwl_priv *priv, bool first)
 {
-	const char *name_pre = priv->cfg->fw_name_pre;
+	const char *name_pre = cfg(priv)->fw_name_pre;
 	char tag[8];
 
 	if (first) {
@@ -580,14 +525,14 @@
 		strcpy(tag, UCODE_EXPERIMENTAL_TAG);
 	} else if (priv->fw_index == UCODE_EXPERIMENTAL_INDEX) {
 #endif
-		priv->fw_index = priv->cfg->ucode_api_max;
+		priv->fw_index = cfg(priv)->ucode_api_max;
 		sprintf(tag, "%d", priv->fw_index);
 	} else {
 		priv->fw_index--;
 		sprintf(tag, "%d", priv->fw_index);
 	}
 
-	if (priv->fw_index < priv->cfg->ucode_api_min) {
+	if (priv->fw_index < cfg(priv)->ucode_api_min) {
 		IWL_ERR(priv, "no suitable firmware found!\n");
 		return -ENOENT;
 	}
@@ -892,9 +837,9 @@
 	struct iwl_ucode_header *ucode;
 	int err;
 	struct iwlagn_firmware_pieces pieces;
-	const unsigned int api_max = priv->cfg->ucode_api_max;
-	unsigned int api_ok = priv->cfg->ucode_api_ok;
-	const unsigned int api_min = priv->cfg->ucode_api_min;
+	const unsigned int api_max = cfg(priv)->ucode_api_max;
+	unsigned int api_ok = cfg(priv)->ucode_api_ok;
+	const unsigned int api_min = cfg(priv)->ucode_api_min;
 	u32 api_ver;
 	char buildstr[25];
 	u32 build;
@@ -1040,30 +985,32 @@
 	/* Runtime instructions and 2 copies of data:
 	 * 1) unmodified from disk
 	 * 2) backup cache for save/restore during power-downs */
-	if (iwl_alloc_fw_desc(priv, &priv->ucode_rt.code,
+	if (iwl_alloc_fw_desc(bus(priv), &trans(priv)->ucode_rt.code,
 			      pieces.inst, pieces.inst_size))
 		goto err_pci_alloc;
-	if (iwl_alloc_fw_desc(priv, &priv->ucode_rt.data,
+	if (iwl_alloc_fw_desc(bus(priv), &trans(priv)->ucode_rt.data,
 			      pieces.data, pieces.data_size))
 		goto err_pci_alloc;
 
 	/* Initialization instructions and data */
 	if (pieces.init_size && pieces.init_data_size) {
-		if (iwl_alloc_fw_desc(priv, &priv->ucode_init.code,
+		if (iwl_alloc_fw_desc(bus(priv), &trans(priv)->ucode_init.code,
 				      pieces.init, pieces.init_size))
 			goto err_pci_alloc;
-		if (iwl_alloc_fw_desc(priv, &priv->ucode_init.data,
+		if (iwl_alloc_fw_desc(bus(priv), &trans(priv)->ucode_init.data,
 				      pieces.init_data, pieces.init_data_size))
 			goto err_pci_alloc;
 	}
 
 	/* WoWLAN instructions and data */
 	if (pieces.wowlan_inst_size && pieces.wowlan_data_size) {
-		if (iwl_alloc_fw_desc(priv, &priv->ucode_wowlan.code,
+		if (iwl_alloc_fw_desc(bus(priv),
+				      &trans(priv)->ucode_wowlan.code,
 				      pieces.wowlan_inst,
 				      pieces.wowlan_inst_size))
 			goto err_pci_alloc;
-		if (iwl_alloc_fw_desc(priv, &priv->ucode_wowlan.data,
+		if (iwl_alloc_fw_desc(bus(priv),
+				      &trans(priv)->ucode_wowlan.data,
 				      pieces.wowlan_data,
 				      pieces.wowlan_data_size))
 			goto err_pci_alloc;
@@ -1081,20 +1028,23 @@
 		priv->init_evtlog_size = (pieces.init_evtlog_size - 16)/12;
 	else
 		priv->init_evtlog_size =
-			priv->cfg->base_params->max_event_log_size;
+			cfg(priv)->base_params->max_event_log_size;
 	priv->init_errlog_ptr = pieces.init_errlog_ptr;
 	priv->inst_evtlog_ptr = pieces.inst_evtlog_ptr;
 	if (pieces.inst_evtlog_size)
 		priv->inst_evtlog_size = (pieces.inst_evtlog_size - 16)/12;
 	else
 		priv->inst_evtlog_size =
-			priv->cfg->base_params->max_event_log_size;
+			cfg(priv)->base_params->max_event_log_size;
 	priv->inst_errlog_ptr = pieces.inst_errlog_ptr;
+#ifndef CONFIG_IWLWIFI_P2P
+	ucode_capa.flags &= ~IWL_UCODE_TLV_FLAGS_PAN;
+#endif
 
 	priv->new_scan_threshold_behaviour =
 		!!(ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWSCAN);
 
-	if (!(priv->cfg->sku & EEPROM_SKU_CAP_IPAN_ENABLE))
+	if (!(cfg(priv)->sku & EEPROM_SKU_CAP_IPAN_ENABLE))
 		ucode_capa.flags &= ~IWL_UCODE_TLV_FLAGS_PAN;
 
 	/*
@@ -1111,7 +1061,6 @@
 		priv->sta_key_max_num = STA_KEY_MAX_NUM;
 		priv->shrd->cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
 	}
-
 	/*
 	 * figure out the offset of chain noise reset and gain commands
 	 * base on the size of standard phy calibration commands table size
@@ -1156,7 +1105,7 @@
 
  err_pci_alloc:
 	IWL_ERR(priv, "failed to allocate pci memory\n");
-	iwl_dealloc_ucode(priv);
+	iwl_dealloc_ucode(trans(priv));
  out_unbind:
 	complete(&priv->firmware_loading_complete);
 	device_release_driver(bus(priv)->dev);
@@ -1176,7 +1125,7 @@
 	spin_unlock_irqrestore(&priv->shrd->lock, flags);
 	priv->thermal_throttle.ct_kill_toggle = false;
 
-	if (priv->cfg->base_params->support_ct_kill_exit) {
+	if (cfg(priv)->base_params->support_ct_kill_exit) {
 		adv_cmd.critical_temperature_enter =
 			cpu_to_le32(hw_params(priv).ct_kill_threshold);
 		adv_cmd.critical_temperature_exit =
@@ -1271,10 +1220,10 @@
 		return -ERFKILL;
 
 	/* download priority table before any calibration request */
-	if (priv->cfg->bt_params &&
-	    priv->cfg->bt_params->advanced_bt_coexist) {
+	if (cfg(priv)->bt_params &&
+	    cfg(priv)->bt_params->advanced_bt_coexist) {
 		/* Configure Bluetooth device coexistence support */
-		if (priv->cfg->bt_params->bt_sco_disable)
+		if (cfg(priv)->bt_params->bt_sco_disable)
 			priv->bt_enable_pspoll = false;
 		else
 			priv->bt_enable_pspoll = true;
@@ -1286,14 +1235,14 @@
 		priv->bt_valid = IWLAGN_BT_VALID_ENABLE_FLAGS;
 		priv->cur_rssi_ctx = NULL;
 
-		iwlagn_send_prio_tbl(priv);
+		iwl_send_prio_tbl(trans(priv));
 
 		/* FIXME: w/a to force change uCode BT state machine */
-		ret = iwlagn_send_bt_env(priv, IWL_BT_COEX_ENV_OPEN,
+		ret = iwl_send_bt_env(trans(priv), IWL_BT_COEX_ENV_OPEN,
 					 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
 		if (ret)
 			return ret;
-		ret = iwlagn_send_bt_env(priv, IWL_BT_COEX_ENV_CLOSE,
+		ret = iwl_send_bt_env(trans(priv), IWL_BT_COEX_ENV_CLOSE,
 					 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
 		if (ret)
 			return ret;
@@ -1304,16 +1253,17 @@
 		iwl_send_bt_config(priv);
 	}
 
-	if (hw_params(priv).calib_rt_cfg)
-		iwlagn_send_calib_cfg_rt(priv,
-					 hw_params(priv).calib_rt_cfg);
+	/*
+	 * Perform runtime calibrations, including DC calibration.
+	 */
+	iwlagn_send_calib_cfg_rt(priv, IWL_CALIB_CFG_DC_IDX);
 
 	ieee80211_wake_queues(priv->hw);
 
 	priv->active_rate = IWL_RATES_MASK;
 
 	/* Configure Tx antenna selection based on H/W config */
-	iwlagn_send_tx_ant_config(priv, priv->cfg->valid_tx_ant);
+	iwlagn_send_tx_ant_config(priv, cfg(priv)->valid_tx_ant);
 
 	if (iwl_is_associated_ctx(ctx) && !priv->shrd->wowlan) {
 		struct iwl_rxon_cmd *active_rxon =
@@ -1352,7 +1302,7 @@
 
 static void iwl_cancel_deferred_work(struct iwl_priv *priv);
 
-static void __iwl_down(struct iwl_priv *priv)
+void __iwl_down(struct iwl_priv *priv)
 {
 	int exit_pending;
 
@@ -1382,9 +1332,9 @@
 	priv->bt_status = 0;
 	priv->cur_rssi_ctx = NULL;
 	priv->bt_is_sco = 0;
-	if (priv->cfg->bt_params)
+	if (cfg(priv)->bt_params)
 		priv->bt_traffic_load =
-			 priv->cfg->bt_params->bt_init_traffic_load;
+			 cfg(priv)->bt_params->bt_init_traffic_load;
 	else
 		priv->bt_traffic_load = 0;
 	priv->bt_full_concurrent = false;
@@ -1415,7 +1365,7 @@
 	priv->beacon_skb = NULL;
 }
 
-static void iwl_down(struct iwl_priv *priv)
+void iwl_down(struct iwl_priv *priv)
 {
 	mutex_lock(&priv->shrd->mutex);
 	__iwl_down(priv);
@@ -1424,57 +1374,6 @@
 	iwl_cancel_deferred_work(priv);
 }
 
-#define MAX_HW_RESTARTS 5
-
-static int __iwl_up(struct iwl_priv *priv)
-{
-	struct iwl_rxon_context *ctx;
-	int ret;
-
-	lockdep_assert_held(&priv->shrd->mutex);
-
-	if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status)) {
-		IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
-		return -EIO;
-	}
-
-	for_each_context(priv, ctx) {
-		ret = iwlagn_alloc_bcast_station(priv, ctx);
-		if (ret) {
-			iwl_dealloc_bcast_stations(priv);
-			return ret;
-		}
-	}
-
-	ret = iwlagn_run_init_ucode(priv);
-	if (ret) {
-		IWL_ERR(priv, "Failed to run INIT ucode: %d\n", ret);
-		goto error;
-	}
-
-	ret = iwlagn_load_ucode_wait_alive(priv,
-					   &priv->ucode_rt,
-					   IWL_UCODE_REGULAR);
-	if (ret) {
-		IWL_ERR(priv, "Failed to start RT ucode: %d\n", ret);
-		goto error;
-	}
-
-	ret = iwl_alive_start(priv);
-	if (ret)
-		goto error;
-	return 0;
-
- error:
-	set_bit(STATUS_EXIT_PENDING, &priv->shrd->status);
-	__iwl_down(priv);
-	clear_bit(STATUS_EXIT_PENDING, &priv->shrd->status);
-
-	IWL_ERR(priv, "Unable to initialize device.\n");
-	return ret;
-}
-
-
 /*****************************************************************************
  *
  * Workqueue callbacks
@@ -1502,7 +1401,7 @@
 	mutex_unlock(&priv->shrd->mutex);
 }
 
-static void iwlagn_prepare_restart(struct iwl_priv *priv)
+void iwlagn_prepare_restart(struct iwl_priv *priv)
 {
 	struct iwl_rxon_context *ctx;
 	bool bt_full_concurrent;
@@ -1559,1172 +1458,8 @@
 	}
 }
 
-/*****************************************************************************
- *
- * mac80211 entry point functions
- *
- *****************************************************************************/
-
-static const struct ieee80211_iface_limit iwlagn_sta_ap_limits[] = {
-	{
-		.max = 1,
-		.types = BIT(NL80211_IFTYPE_STATION),
-	},
-	{
-		.max = 1,
-		.types = BIT(NL80211_IFTYPE_AP),
-	},
-};
-
-static const struct ieee80211_iface_limit iwlagn_2sta_limits[] = {
-	{
-		.max = 2,
-		.types = BIT(NL80211_IFTYPE_STATION),
-	},
-};
-
-static const struct ieee80211_iface_limit iwlagn_p2p_sta_go_limits[] = {
-	{
-		.max = 1,
-		.types = BIT(NL80211_IFTYPE_STATION),
-	},
-	{
-		.max = 1,
-		.types = BIT(NL80211_IFTYPE_P2P_GO) |
-			 BIT(NL80211_IFTYPE_AP),
-	},
-};
-
-static const struct ieee80211_iface_limit iwlagn_p2p_2sta_limits[] = {
-	{
-		.max = 2,
-		.types = BIT(NL80211_IFTYPE_STATION),
-	},
-	{
-		.max = 1,
-		.types = BIT(NL80211_IFTYPE_P2P_CLIENT),
-	},
-};
-
-static const struct ieee80211_iface_combination
-iwlagn_iface_combinations_dualmode[] = {
-	{ .num_different_channels = 1,
-	  .max_interfaces = 2,
-	  .beacon_int_infra_match = true,
-	  .limits = iwlagn_sta_ap_limits,
-	  .n_limits = ARRAY_SIZE(iwlagn_sta_ap_limits),
-	},
-	{ .num_different_channels = 1,
-	  .max_interfaces = 2,
-	  .limits = iwlagn_2sta_limits,
-	  .n_limits = ARRAY_SIZE(iwlagn_2sta_limits),
-	},
-};
-
-static const struct ieee80211_iface_combination
-iwlagn_iface_combinations_p2p[] = {
-	{ .num_different_channels = 1,
-	  .max_interfaces = 2,
-	  .beacon_int_infra_match = true,
-	  .limits = iwlagn_p2p_sta_go_limits,
-	  .n_limits = ARRAY_SIZE(iwlagn_p2p_sta_go_limits),
-	},
-	{ .num_different_channels = 1,
-	  .max_interfaces = 2,
-	  .limits = iwlagn_p2p_2sta_limits,
-	  .n_limits = ARRAY_SIZE(iwlagn_p2p_2sta_limits),
-	},
-};
-
-/*
- * Not a mac80211 entry point function, but it fits in with all the
- * other mac80211 functions grouped here.
- */
-static int iwlagn_mac_setup_register(struct iwl_priv *priv,
-				  struct iwlagn_ucode_capabilities *capa)
-{
-	int ret;
-	struct ieee80211_hw *hw = priv->hw;
-	struct iwl_rxon_context *ctx;
-
-	hw->rate_control_algorithm = "iwl-agn-rs";
-
-	/* Tell mac80211 our characteristics */
-	hw->flags = IEEE80211_HW_SIGNAL_DBM |
-		    IEEE80211_HW_AMPDU_AGGREGATION |
-		    IEEE80211_HW_NEED_DTIM_PERIOD |
-		    IEEE80211_HW_SPECTRUM_MGMT |
-		    IEEE80211_HW_REPORTS_TX_ACK_STATUS;
-
-	/*
-	 * Including the following line will crash some AP's.  This
-	 * workaround removes the stimulus which causes the crash until
-	 * the AP software can be fixed.
-	hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
-	 */
-
-	hw->flags |= IEEE80211_HW_SUPPORTS_PS |
-		     IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
-
-	if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE)
-		hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
-			     IEEE80211_HW_SUPPORTS_STATIC_SMPS;
-
-	if (capa->flags & IWL_UCODE_TLV_FLAGS_MFP)
-		hw->flags |= IEEE80211_HW_MFP_CAPABLE;
-
-	hw->sta_data_size = sizeof(struct iwl_station_priv);
-	hw->vif_data_size = sizeof(struct iwl_vif_priv);
-
-	for_each_context(priv, ctx) {
-		hw->wiphy->interface_modes |= ctx->interface_modes;
-		hw->wiphy->interface_modes |= ctx->exclusive_interface_modes;
-	}
-
-	BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
-
-	if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT)) {
-		hw->wiphy->iface_combinations = iwlagn_iface_combinations_p2p;
-		hw->wiphy->n_iface_combinations =
-			ARRAY_SIZE(iwlagn_iface_combinations_p2p);
-	} else if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) {
-		hw->wiphy->iface_combinations = iwlagn_iface_combinations_dualmode;
-		hw->wiphy->n_iface_combinations =
-			ARRAY_SIZE(iwlagn_iface_combinations_dualmode);
-	}
-
-	hw->wiphy->max_remain_on_channel_duration = 1000;
-
-	hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
-			    WIPHY_FLAG_DISABLE_BEACON_HINTS |
-			    WIPHY_FLAG_IBSS_RSN;
-
-	if (priv->ucode_wowlan.code.len && device_can_wakeup(bus(priv)->dev)) {
-		hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
-					  WIPHY_WOWLAN_DISCONNECT |
-					  WIPHY_WOWLAN_EAP_IDENTITY_REQ |
-					  WIPHY_WOWLAN_RFKILL_RELEASE;
-		if (!iwlagn_mod_params.sw_crypto)
-			hw->wiphy->wowlan.flags |=
-				WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
-				WIPHY_WOWLAN_GTK_REKEY_FAILURE;
-
-		hw->wiphy->wowlan.n_patterns = IWLAGN_WOWLAN_MAX_PATTERNS;
-		hw->wiphy->wowlan.pattern_min_len =
-					IWLAGN_WOWLAN_MIN_PATTERN_LEN;
-		hw->wiphy->wowlan.pattern_max_len =
-					IWLAGN_WOWLAN_MAX_PATTERN_LEN;
-	}
-
-	if (iwlagn_mod_params.power_save)
-		hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
-	else
-		hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
-
-	hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
-	/* we create the 802.11 header and a zero-length SSID element */
-	hw->wiphy->max_scan_ie_len = capa->max_probe_length - 24 - 2;
-
-	/* Default value; 4 EDCA QOS priorities */
-	hw->queues = 4;
-
-	hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
-
-	if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
-		priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
-			&priv->bands[IEEE80211_BAND_2GHZ];
-	if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
-		priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
-			&priv->bands[IEEE80211_BAND_5GHZ];
-
-	iwl_leds_init(priv);
-
-	ret = ieee80211_register_hw(priv->hw);
-	if (ret) {
-		IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
-		return ret;
-	}
-	priv->mac80211_registered = 1;
-
-	return 0;
-}
-
-
-static int iwlagn_mac_start(struct ieee80211_hw *hw)
-{
-	struct iwl_priv *priv = hw->priv;
-	int ret;
-
-	IWL_DEBUG_MAC80211(priv, "enter\n");
-
-	/* we should be verifying the device is ready to be opened */
-	mutex_lock(&priv->shrd->mutex);
-	ret = __iwl_up(priv);
-	mutex_unlock(&priv->shrd->mutex);
-	if (ret)
-		return ret;
-
-	IWL_DEBUG_INFO(priv, "Start UP work done.\n");
-
-	/* Now we should be done, and the READY bit should be set. */
-	if (WARN_ON(!test_bit(STATUS_READY, &priv->shrd->status)))
-		ret = -EIO;
-
-	iwlagn_led_enable(priv);
-
-	priv->is_open = 1;
-	IWL_DEBUG_MAC80211(priv, "leave\n");
-	return 0;
-}
-
-static void iwlagn_mac_stop(struct ieee80211_hw *hw)
-{
-	struct iwl_priv *priv = hw->priv;
-
-	IWL_DEBUG_MAC80211(priv, "enter\n");
-
-	if (!priv->is_open)
-		return;
-
-	priv->is_open = 0;
-
-	iwl_down(priv);
-
-	flush_workqueue(priv->shrd->workqueue);
-
-	/* User space software may expect getting rfkill changes
-	 * even if interface is down */
-	iwl_write32(bus(priv), CSR_INT, 0xFFFFFFFF);
-	iwl_enable_rfkill_int(priv);
-
-	IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int iwlagn_send_patterns(struct iwl_priv *priv,
-				struct cfg80211_wowlan *wowlan)
-{
-	struct iwlagn_wowlan_patterns_cmd *pattern_cmd;
-	struct iwl_host_cmd cmd = {
-		.id = REPLY_WOWLAN_PATTERNS,
-		.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
-		.flags = CMD_SYNC,
-	};
-	int i, err;
-
-	if (!wowlan->n_patterns)
-		return 0;
-
-	cmd.len[0] = sizeof(*pattern_cmd) +
-			wowlan->n_patterns * sizeof(struct iwlagn_wowlan_pattern);
-
-	pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL);
-	if (!pattern_cmd)
-		return -ENOMEM;
-
-	pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns);
-
-	for (i = 0; i < wowlan->n_patterns; i++) {
-		int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
-
-		memcpy(&pattern_cmd->patterns[i].mask,
-			wowlan->patterns[i].mask, mask_len);
-		memcpy(&pattern_cmd->patterns[i].pattern,
-			wowlan->patterns[i].pattern,
-			wowlan->patterns[i].pattern_len);
-		pattern_cmd->patterns[i].mask_size = mask_len;
-		pattern_cmd->patterns[i].pattern_size =
-			wowlan->patterns[i].pattern_len;
-	}
-
-	cmd.data[0] = pattern_cmd;
-	err = iwl_trans_send_cmd(trans(priv), &cmd);
-	kfree(pattern_cmd);
-	return err;
-}
-#endif
-
-static void iwlagn_mac_set_rekey_data(struct ieee80211_hw *hw,
-				      struct ieee80211_vif *vif,
-				      struct cfg80211_gtk_rekey_data *data)
-{
-	struct iwl_priv *priv = hw->priv;
-
-	if (iwlagn_mod_params.sw_crypto)
-		return;
-
-	IWL_DEBUG_MAC80211(priv, "enter\n");
-	mutex_lock(&priv->shrd->mutex);
-
-	if (priv->contexts[IWL_RXON_CTX_BSS].vif != vif)
-		goto out;
-
-	memcpy(priv->kek, data->kek, NL80211_KEK_LEN);
-	memcpy(priv->kck, data->kck, NL80211_KCK_LEN);
-	priv->replay_ctr = cpu_to_le64(be64_to_cpup((__be64 *)&data->replay_ctr));
-	priv->have_rekey_data = true;
-
- out:
-	mutex_unlock(&priv->shrd->mutex);
-	IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-
-struct wowlan_key_data {
-	struct iwl_rxon_context *ctx;
-	struct iwlagn_wowlan_rsc_tsc_params_cmd *rsc_tsc;
-	struct iwlagn_wowlan_tkip_params_cmd *tkip;
-	const u8 *bssid;
-	bool error, use_rsc_tsc, use_tkip;
-};
-
-#ifdef CONFIG_PM_SLEEP
-static void iwlagn_convert_p1k(u16 *p1k, __le16 *out)
-{
-	int i;
-
-	for (i = 0; i < IWLAGN_P1K_SIZE; i++)
-		out[i] = cpu_to_le16(p1k[i]);
-}
-
-static void iwlagn_wowlan_program_keys(struct ieee80211_hw *hw,
-				       struct ieee80211_vif *vif,
-				       struct ieee80211_sta *sta,
-				       struct ieee80211_key_conf *key,
-				       void *_data)
-{
-	struct iwl_priv *priv = hw->priv;
-	struct wowlan_key_data *data = _data;
-	struct iwl_rxon_context *ctx = data->ctx;
-	struct aes_sc *aes_sc, *aes_tx_sc = NULL;
-	struct tkip_sc *tkip_sc, *tkip_tx_sc = NULL;
-	struct iwlagn_p1k_cache *rx_p1ks;
-	u8 *rx_mic_key;
-	struct ieee80211_key_seq seq;
-	u32 cur_rx_iv32 = 0;
-	u16 p1k[IWLAGN_P1K_SIZE];
-	int ret, i;
-
-	mutex_lock(&priv->shrd->mutex);
-
-	if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
-	     key->cipher == WLAN_CIPHER_SUITE_WEP104) &&
-	     !sta && !ctx->key_mapping_keys)
-		ret = iwl_set_default_wep_key(priv, ctx, key);
-	else
-		ret = iwl_set_dynamic_key(priv, ctx, key, sta);
-
-	if (ret) {
-		IWL_ERR(priv, "Error setting key during suspend!\n");
-		data->error = true;
-	}
-
-	switch (key->cipher) {
-	case WLAN_CIPHER_SUITE_TKIP:
-		if (sta) {
-			tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.unicast_rsc;
-			tkip_tx_sc = &data->rsc_tsc->all_tsc_rsc.tkip.tsc;
-
-			rx_p1ks = data->tkip->rx_uni;
-
-			ieee80211_get_key_tx_seq(key, &seq);
-			tkip_tx_sc->iv16 = cpu_to_le16(seq.tkip.iv16);
-			tkip_tx_sc->iv32 = cpu_to_le32(seq.tkip.iv32);
-
-			ieee80211_get_tkip_p1k_iv(key, seq.tkip.iv32, p1k);
-			iwlagn_convert_p1k(p1k, data->tkip->tx.p1k);
-
-			memcpy(data->tkip->mic_keys.tx,
-			       &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
-			       IWLAGN_MIC_KEY_SIZE);
-
-			rx_mic_key = data->tkip->mic_keys.rx_unicast;
-		} else {
-			tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.multicast_rsc;
-			rx_p1ks = data->tkip->rx_multi;
-			rx_mic_key = data->tkip->mic_keys.rx_mcast;
-		}
-
-		/*
-		 * For non-QoS this relies on the fact that both the uCode and
-		 * mac80211 use TID 0 (as they need to to avoid replay attacks)
-		 * for checking the IV in the frames.
-		 */
-		for (i = 0; i < IWLAGN_NUM_RSC; i++) {
-			ieee80211_get_key_rx_seq(key, i, &seq);
-			tkip_sc[i].iv16 = cpu_to_le16(seq.tkip.iv16);
-			tkip_sc[i].iv32 = cpu_to_le32(seq.tkip.iv32);
-			/* wrapping isn't allowed, AP must rekey */
-			if (seq.tkip.iv32 > cur_rx_iv32)
-				cur_rx_iv32 = seq.tkip.iv32;
-		}
-
-		ieee80211_get_tkip_rx_p1k(key, data->bssid, cur_rx_iv32, p1k);
-		iwlagn_convert_p1k(p1k, rx_p1ks[0].p1k);
-		ieee80211_get_tkip_rx_p1k(key, data->bssid,
-					  cur_rx_iv32 + 1, p1k);
-		iwlagn_convert_p1k(p1k, rx_p1ks[1].p1k);
-
-		memcpy(rx_mic_key,
-		       &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
-		       IWLAGN_MIC_KEY_SIZE);
-
-		data->use_tkip = true;
-		data->use_rsc_tsc = true;
-		break;
-	case WLAN_CIPHER_SUITE_CCMP:
-		if (sta) {
-			u8 *pn = seq.ccmp.pn;
-
-			aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc;
-			aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc;
-
-			ieee80211_get_key_tx_seq(key, &seq);
-			aes_tx_sc->pn = cpu_to_le64(
-					(u64)pn[5] |
-					((u64)pn[4] << 8) |
-					((u64)pn[3] << 16) |
-					((u64)pn[2] << 24) |
-					((u64)pn[1] << 32) |
-					((u64)pn[0] << 40));
-		} else
-			aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc;
-
-		/*
-		 * For non-QoS this relies on the fact that both the uCode and
-		 * mac80211 use TID 0 for checking the IV in the frames.
-		 */
-		for (i = 0; i < IWLAGN_NUM_RSC; i++) {
-			u8 *pn = seq.ccmp.pn;
-
-			ieee80211_get_key_rx_seq(key, i, &seq);
-			aes_sc->pn = cpu_to_le64(
-					(u64)pn[5] |
-					((u64)pn[4] << 8) |
-					((u64)pn[3] << 16) |
-					((u64)pn[2] << 24) |
-					((u64)pn[1] << 32) |
-					((u64)pn[0] << 40));
-		}
-		data->use_rsc_tsc = true;
-		break;
-	}
-
-	mutex_unlock(&priv->shrd->mutex);
-}
-
-static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
-			      struct cfg80211_wowlan *wowlan)
-{
-	struct iwl_priv *priv = hw->priv;
-	struct iwlagn_wowlan_wakeup_filter_cmd wakeup_filter_cmd;
-	struct iwl_rxon_cmd rxon;
-	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-	struct iwlagn_wowlan_kek_kck_material_cmd kek_kck_cmd;
-	struct iwlagn_wowlan_tkip_params_cmd tkip_cmd = {};
-	struct wowlan_key_data key_data = {
-		.ctx = ctx,
-		.bssid = ctx->active.bssid_addr,
-		.use_rsc_tsc = false,
-		.tkip = &tkip_cmd,
-		.use_tkip = false,
-	};
-	int ret, i;
-	u16 seq;
-
-	if (WARN_ON(!wowlan))
-		return -EINVAL;
-
-	IWL_DEBUG_MAC80211(priv, "enter\n");
-	mutex_lock(&priv->shrd->mutex);
-
-	/* Don't attempt WoWLAN when not associated, tear down instead. */
-	if (!ctx->vif || ctx->vif->type != NL80211_IFTYPE_STATION ||
-	    !iwl_is_associated_ctx(ctx)) {
-		ret = 1;
-		goto out;
-	}
-
-	key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL);
-	if (!key_data.rsc_tsc) {
-		ret = -ENOMEM;
-		goto out;
-	}
-
-	memset(&wakeup_filter_cmd, 0, sizeof(wakeup_filter_cmd));
-
-	/*
-	 * We know the last used seqno, and the uCode expects to know that
-	 * one, it will increment before TX.
-	 */
-	seq = le16_to_cpu(priv->last_seq_ctl) & IEEE80211_SCTL_SEQ;
-	wakeup_filter_cmd.non_qos_seq = cpu_to_le16(seq);
-
-	/*
-	 * For QoS counters, we store the one to use next, so subtract 0x10
-	 * since the uCode will add 0x10 before using the value.
-	 */
-	for (i = 0; i < 8; i++) {
-		seq = priv->shrd->tid_data[IWL_AP_ID][i].seq_number;
-		seq -= 0x10;
-		wakeup_filter_cmd.qos_seq[i] = cpu_to_le16(seq);
-	}
-
-	if (wowlan->disconnect)
-		wakeup_filter_cmd.enabled |=
-			cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_BEACON_MISS |
-				    IWLAGN_WOWLAN_WAKEUP_LINK_CHANGE);
-	if (wowlan->magic_pkt)
-		wakeup_filter_cmd.enabled |=
-			cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_MAGIC_PACKET);
-	if (wowlan->gtk_rekey_failure)
-		wakeup_filter_cmd.enabled |=
-			cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_GTK_REKEY_FAIL);
-	if (wowlan->eap_identity_req)
-		wakeup_filter_cmd.enabled |=
-			cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_EAP_IDENT_REQ);
-	if (wowlan->four_way_handshake)
-		wakeup_filter_cmd.enabled |=
-			cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_4WAY_HANDSHAKE);
-	if (wowlan->rfkill_release)
-		wakeup_filter_cmd.enabled |=
-			cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_RFKILL);
-	if (wowlan->n_patterns)
-		wakeup_filter_cmd.enabled |=
-			cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_PATTERN_MATCH);
-
-	iwl_scan_cancel_timeout(priv, 200);
-
-	memcpy(&rxon, &ctx->active, sizeof(rxon));
-
-	iwl_trans_stop_device(trans(priv));
-
-	priv->shrd->wowlan = true;
-
-	ret = iwlagn_load_ucode_wait_alive(priv, &priv->ucode_wowlan,
-					   IWL_UCODE_WOWLAN);
-	if (ret)
-		goto error;
-
-	/* now configure WoWLAN ucode */
-	ret = iwl_alive_start(priv);
-	if (ret)
-		goto error;
-
-	memcpy(&ctx->staging, &rxon, sizeof(rxon));
-	ret = iwlagn_commit_rxon(priv, ctx);
-	if (ret)
-		goto error;
-
-	ret = iwl_power_update_mode(priv, true);
-	if (ret)
-		goto error;
-
-	if (!iwlagn_mod_params.sw_crypto) {
-		/* mark all keys clear */
-		priv->ucode_key_table = 0;
-		ctx->key_mapping_keys = 0;
-
-		/*
-		 * This needs to be unlocked due to lock ordering
-		 * constraints. Since we're in the suspend path
-		 * that isn't really a problem though.
-		 */
-		mutex_unlock(&priv->shrd->mutex);
-		ieee80211_iter_keys(priv->hw, ctx->vif,
-				    iwlagn_wowlan_program_keys,
-				    &key_data);
-		mutex_lock(&priv->shrd->mutex);
-		if (key_data.error) {
-			ret = -EIO;
-			goto error;
-		}
-
-		if (key_data.use_rsc_tsc) {
-			struct iwl_host_cmd rsc_tsc_cmd = {
-				.id = REPLY_WOWLAN_TSC_RSC_PARAMS,
-				.flags = CMD_SYNC,
-				.data[0] = key_data.rsc_tsc,
-				.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
-				.len[0] = sizeof(*key_data.rsc_tsc),
-			};
-
-			ret = iwl_trans_send_cmd(trans(priv), &rsc_tsc_cmd);
-			if (ret)
-				goto error;
-		}
-
-		if (key_data.use_tkip) {
-			ret = iwl_trans_send_cmd_pdu(trans(priv),
-						 REPLY_WOWLAN_TKIP_PARAMS,
-						 CMD_SYNC, sizeof(tkip_cmd),
-						 &tkip_cmd);
-			if (ret)
-				goto error;
-		}
-
-		if (priv->have_rekey_data) {
-			memset(&kek_kck_cmd, 0, sizeof(kek_kck_cmd));
-			memcpy(kek_kck_cmd.kck, priv->kck, NL80211_KCK_LEN);
-			kek_kck_cmd.kck_len = cpu_to_le16(NL80211_KCK_LEN);
-			memcpy(kek_kck_cmd.kek, priv->kek, NL80211_KEK_LEN);
-			kek_kck_cmd.kek_len = cpu_to_le16(NL80211_KEK_LEN);
-			kek_kck_cmd.replay_ctr = priv->replay_ctr;
-
-			ret = iwl_trans_send_cmd_pdu(trans(priv),
-						 REPLY_WOWLAN_KEK_KCK_MATERIAL,
-						 CMD_SYNC, sizeof(kek_kck_cmd),
-						 &kek_kck_cmd);
-			if (ret)
-				goto error;
-		}
-	}
-
-	ret = iwl_trans_send_cmd_pdu(trans(priv), REPLY_WOWLAN_WAKEUP_FILTER,
-				 CMD_SYNC, sizeof(wakeup_filter_cmd),
-				 &wakeup_filter_cmd);
-	if (ret)
-		goto error;
-
-	ret = iwlagn_send_patterns(priv, wowlan);
-	if (ret)
-		goto error;
-
-	device_set_wakeup_enable(bus(priv)->dev, true);
-
-	/* Now let the ucode operate on its own */
-	iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_SET,
-			  CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
-
-	goto out;
-
- error:
-	priv->shrd->wowlan = false;
-	iwlagn_prepare_restart(priv);
-	ieee80211_restart_hw(priv->hw);
- out:
-	mutex_unlock(&priv->shrd->mutex);
-	kfree(key_data.rsc_tsc);
-	IWL_DEBUG_MAC80211(priv, "leave\n");
-
-	return ret;
-}
-
-static int iwlagn_mac_resume(struct ieee80211_hw *hw)
-{
-	struct iwl_priv *priv = hw->priv;
-	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-	struct ieee80211_vif *vif;
-	unsigned long flags;
-	u32 base, status = 0xffffffff;
-	int ret = -EIO;
-
-	IWL_DEBUG_MAC80211(priv, "enter\n");
-	mutex_lock(&priv->shrd->mutex);
-
-	iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_CLR,
-			  CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
-
-	base = priv->device_pointers.error_event_table;
-	if (iwlagn_hw_valid_rtc_data_addr(base)) {
-		spin_lock_irqsave(&bus(priv)->reg_lock, flags);
-		ret = iwl_grab_nic_access_silent(bus(priv));
-		if (ret == 0) {
-			iwl_write32(bus(priv), HBUS_TARG_MEM_RADDR, base);
-			status = iwl_read32(bus(priv), HBUS_TARG_MEM_RDAT);
-			iwl_release_nic_access(bus(priv));
-		}
-		spin_unlock_irqrestore(&bus(priv)->reg_lock, flags);
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-		if (ret == 0) {
-			if (!priv->wowlan_sram)
-				priv->wowlan_sram =
-					kzalloc(priv->ucode_wowlan.data.len,
-						GFP_KERNEL);
-
-			if (priv->wowlan_sram)
-				_iwl_read_targ_mem_words(
-					bus(priv), 0x800000, priv->wowlan_sram,
-					priv->ucode_wowlan.data.len / 4);
-		}
-#endif
-	}
-
-	/* we'll clear ctx->vif during iwlagn_prepare_restart() */
-	vif = ctx->vif;
-
-	priv->shrd->wowlan = false;
-
-	device_set_wakeup_enable(bus(priv)->dev, false);
-
-	iwlagn_prepare_restart(priv);
-
-	memset((void *)&ctx->active, 0, sizeof(ctx->active));
-	iwl_connection_init_rx_config(priv, ctx);
-	iwlagn_set_rxon_chain(priv, ctx);
-
-	mutex_unlock(&priv->shrd->mutex);
-	IWL_DEBUG_MAC80211(priv, "leave\n");
-
-	ieee80211_resume_disconnect(vif);
-
-	return 1;
-}
-#endif
-
-static void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
-{
-	struct iwl_priv *priv = hw->priv;
-
-	IWL_DEBUG_MACDUMP(priv, "enter\n");
-
-	IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
-		     ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
-
-	if (iwlagn_tx_skb(priv, skb))
-		dev_kfree_skb_any(skb);
-
-	IWL_DEBUG_MACDUMP(priv, "leave\n");
-}
-
-static void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
-				       struct ieee80211_vif *vif,
-				       struct ieee80211_key_conf *keyconf,
-				       struct ieee80211_sta *sta,
-				       u32 iv32, u16 *phase1key)
-{
-	struct iwl_priv *priv = hw->priv;
-
-	iwl_update_tkip_key(priv, vif, keyconf, sta, iv32, phase1key);
-}
-
-static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
-			      struct ieee80211_vif *vif,
-			      struct ieee80211_sta *sta,
-			      struct ieee80211_key_conf *key)
-{
-	struct iwl_priv *priv = hw->priv;
-	struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
-	struct iwl_rxon_context *ctx = vif_priv->ctx;
-	int ret;
-	bool is_default_wep_key = false;
-
-	IWL_DEBUG_MAC80211(priv, "enter\n");
-
-	if (iwlagn_mod_params.sw_crypto) {
-		IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n");
-		return -EOPNOTSUPP;
-	}
-
-	switch (key->cipher) {
-	case WLAN_CIPHER_SUITE_TKIP:
-		key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
-		/* fall through */
-	case WLAN_CIPHER_SUITE_CCMP:
-		key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
-		break;
-	default:
-		break;
-	}
-
-	/*
-	 * We could program these keys into the hardware as well, but we
-	 * don't expect much multicast traffic in IBSS and having keys
-	 * for more stations is probably more useful.
-	 *
-	 * Mark key TX-only and return 0.
-	 */
-	if (vif->type == NL80211_IFTYPE_ADHOC &&
-	    !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
-		key->hw_key_idx = WEP_INVALID_OFFSET;
-		return 0;
-	}
-
-	/* If they key was TX-only, accept deletion */
-	if (cmd == DISABLE_KEY && key->hw_key_idx == WEP_INVALID_OFFSET)
-		return 0;
-
-	mutex_lock(&priv->shrd->mutex);
-	iwl_scan_cancel_timeout(priv, 100);
-
-	BUILD_BUG_ON(WEP_INVALID_OFFSET == IWLAGN_HW_KEY_DEFAULT);
-
-	/*
-	 * If we are getting WEP group key and we didn't receive any key mapping
-	 * so far, we are in legacy wep mode (group key only), otherwise we are
-	 * in 1X mode.
-	 * In legacy wep mode, we use another host command to the uCode.
-	 */
-	if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
-	     key->cipher == WLAN_CIPHER_SUITE_WEP104) && !sta) {
-		if (cmd == SET_KEY)
-			is_default_wep_key = !ctx->key_mapping_keys;
-		else
-			is_default_wep_key =
-				key->hw_key_idx == IWLAGN_HW_KEY_DEFAULT;
-	}
-
-
-	switch (cmd) {
-	case SET_KEY:
-		if (is_default_wep_key) {
-			ret = iwl_set_default_wep_key(priv, vif_priv->ctx, key);
-			break;
-		}
-		ret = iwl_set_dynamic_key(priv, vif_priv->ctx, key, sta);
-		if (ret) {
-			/*
-			 * can't add key for RX, but we don't need it
-			 * in the device for TX so still return 0
-			 */
-			ret = 0;
-			key->hw_key_idx = WEP_INVALID_OFFSET;
-		}
-
-		IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n");
-		break;
-	case DISABLE_KEY:
-		if (is_default_wep_key)
-			ret = iwl_remove_default_wep_key(priv, ctx, key);
-		else
-			ret = iwl_remove_dynamic_key(priv, ctx, key, sta);
-
-		IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n");
-		break;
-	default:
-		ret = -EINVAL;
-	}
-
-	mutex_unlock(&priv->shrd->mutex);
-	IWL_DEBUG_MAC80211(priv, "leave\n");
-
-	return ret;
-}
-
-static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
-				   struct ieee80211_vif *vif,
-				   enum ieee80211_ampdu_mlme_action action,
-				   struct ieee80211_sta *sta, u16 tid, u16 *ssn,
-				   u8 buf_size)
-{
-	struct iwl_priv *priv = hw->priv;
-	int ret = -EINVAL;
-	struct iwl_station_priv *sta_priv = (void *) sta->drv_priv;
-	struct iwl_rxon_context *ctx =  iwl_rxon_ctx_from_vif(vif);
-
-	IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
-		     sta->addr, tid);
-
-	if (!(priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE))
-		return -EACCES;
-
-	IWL_DEBUG_MAC80211(priv, "enter\n");
-	mutex_lock(&priv->shrd->mutex);
-
-	switch (action) {
-	case IEEE80211_AMPDU_RX_START:
-		IWL_DEBUG_HT(priv, "start Rx\n");
-		ret = iwl_sta_rx_agg_start(priv, sta, tid, *ssn);
-		break;
-	case IEEE80211_AMPDU_RX_STOP:
-		IWL_DEBUG_HT(priv, "stop Rx\n");
-		ret = iwl_sta_rx_agg_stop(priv, sta, tid);
-		if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
-			ret = 0;
-		break;
-	case IEEE80211_AMPDU_TX_START:
-		IWL_DEBUG_HT(priv, "start Tx\n");
-		ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn);
-		break;
-	case IEEE80211_AMPDU_TX_STOP:
-		IWL_DEBUG_HT(priv, "stop Tx\n");
-		ret = iwlagn_tx_agg_stop(priv, vif, sta, tid);
-		if ((ret == 0) && (priv->agg_tids_count > 0)) {
-			priv->agg_tids_count--;
-			IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n",
-				     priv->agg_tids_count);
-		}
-		if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
-			ret = 0;
-		if (!priv->agg_tids_count && priv->cfg->ht_params &&
-		    priv->cfg->ht_params->use_rts_for_aggregation) {
-			/*
-			 * switch off RTS/CTS if it was previously enabled
-			 */
-			sta_priv->lq_sta.lq.general_params.flags &=
-				~LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
-			iwl_send_lq_cmd(priv, iwl_rxon_ctx_from_vif(vif),
-					&sta_priv->lq_sta.lq, CMD_ASYNC, false);
-		}
-		break;
-	case IEEE80211_AMPDU_TX_OPERATIONAL:
-		buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
-
-		iwl_trans_tx_agg_setup(trans(priv), ctx->ctxid, iwl_sta_id(sta),
-				tid, buf_size);
-
-		/*
-		 * If the limit is 0, then it wasn't initialised yet,
-		 * use the default. We can do that since we take the
-		 * minimum below, and we don't want to go above our
-		 * default due to hardware restrictions.
-		 */
-		if (sta_priv->max_agg_bufsize == 0)
-			sta_priv->max_agg_bufsize =
-				LINK_QUAL_AGG_FRAME_LIMIT_DEF;
-
-		/*
-		 * Even though in theory the peer could have different
-		 * aggregation reorder buffer sizes for different sessions,
-		 * our ucode doesn't allow for that and has a global limit
-		 * for each station. Therefore, use the minimum of all the
-		 * aggregation sessions and our default value.
-		 */
-		sta_priv->max_agg_bufsize =
-			min(sta_priv->max_agg_bufsize, buf_size);
-
-		if (priv->cfg->ht_params &&
-		    priv->cfg->ht_params->use_rts_for_aggregation) {
-			/*
-			 * switch to RTS/CTS if it is the prefer protection
-			 * method for HT traffic
-			 */
-
-			sta_priv->lq_sta.lq.general_params.flags |=
-				LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
-		}
-		priv->agg_tids_count++;
-		IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n",
-			     priv->agg_tids_count);
-
-		sta_priv->lq_sta.lq.agg_params.agg_frame_cnt_limit =
-			sta_priv->max_agg_bufsize;
-
-		iwl_send_lq_cmd(priv, iwl_rxon_ctx_from_vif(vif),
-				&sta_priv->lq_sta.lq, CMD_ASYNC, false);
-
-		IWL_INFO(priv, "Tx aggregation enabled on ra = %pM tid = %d\n",
-			 sta->addr, tid);
-		ret = 0;
-		break;
-	}
-	mutex_unlock(&priv->shrd->mutex);
-	IWL_DEBUG_MAC80211(priv, "leave\n");
-	return ret;
-}
-
-static int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
-			      struct ieee80211_vif *vif,
-			      struct ieee80211_sta *sta)
-{
-	struct iwl_priv *priv = hw->priv;
-	struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
-	struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
-	bool is_ap = vif->type == NL80211_IFTYPE_STATION;
-	int ret = 0;
-	u8 sta_id;
-
-	IWL_DEBUG_MAC80211(priv, "received request to add station %pM\n",
-			sta->addr);
-	mutex_lock(&priv->shrd->mutex);
-	IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n",
-			sta->addr);
-	sta_priv->sta_id = IWL_INVALID_STATION;
-
-	atomic_set(&sta_priv->pending_frames, 0);
-	if (vif->type == NL80211_IFTYPE_AP)
-		sta_priv->client = true;
-
-	ret = iwl_add_station_common(priv, vif_priv->ctx, sta->addr,
-				     is_ap, sta, &sta_id);
-	if (ret) {
-		IWL_ERR(priv, "Unable to add station %pM (%d)\n",
-			sta->addr, ret);
-		/* Should we return success if return code is EEXIST ? */
-		goto out;
-	}
-
-	sta_priv->sta_id = sta_id;
-
-	/* Initialize rate scaling */
-	IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
-		       sta->addr);
-	iwl_rs_rate_init(priv, sta, sta_id);
- out:
-	mutex_unlock(&priv->shrd->mutex);
-	IWL_DEBUG_MAC80211(priv, "leave\n");
-
-	return ret;
-}
-
-static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
-				struct ieee80211_channel_switch *ch_switch)
-{
-	struct iwl_priv *priv = hw->priv;
-	const struct iwl_channel_info *ch_info;
-	struct ieee80211_conf *conf = &hw->conf;
-	struct ieee80211_channel *channel = ch_switch->channel;
-	struct iwl_ht_config *ht_conf = &priv->current_ht_config;
-	/*
-	 * MULTI-FIXME
-	 * When we add support for multiple interfaces, we need to
-	 * revisit this. The channel switch command in the device
-	 * only affects the BSS context, but what does that really
-	 * mean? And what if we get a CSA on the second interface?
-	 * This needs a lot of work.
-	 */
-	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-	u16 ch;
-
-	IWL_DEBUG_MAC80211(priv, "enter\n");
-
-	mutex_lock(&priv->shrd->mutex);
-
-	if (iwl_is_rfkill(priv->shrd))
-		goto out;
-
-	if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status) ||
-	    test_bit(STATUS_SCANNING, &priv->shrd->status) ||
-	    test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status))
-		goto out;
-
-	if (!iwl_is_associated_ctx(ctx))
-		goto out;
-
-	if (!priv->cfg->lib->set_channel_switch)
-		goto out;
-
-	ch = channel->hw_value;
-	if (le16_to_cpu(ctx->active.channel) == ch)
-		goto out;
-
-	ch_info = iwl_get_channel_info(priv, channel->band, ch);
-	if (!is_channel_valid(ch_info)) {
-		IWL_DEBUG_MAC80211(priv, "invalid channel\n");
-		goto out;
-	}
-
-	spin_lock_irq(&priv->shrd->lock);
-
-	priv->current_ht_config.smps = conf->smps_mode;
-
-	/* Configure HT40 channels */
-	ctx->ht.enabled = conf_is_ht(conf);
-	if (ctx->ht.enabled)
-		iwlagn_config_ht40(conf, ctx);
-	else
-		ctx->ht.is_40mhz = false;
-
-	if ((le16_to_cpu(ctx->staging.channel) != ch))
-		ctx->staging.flags = 0;
-
-	iwl_set_rxon_channel(priv, channel, ctx);
-	iwl_set_rxon_ht(priv, ht_conf);
-	iwl_set_flags_for_band(priv, ctx, channel->band, ctx->vif);
-
-	spin_unlock_irq(&priv->shrd->lock);
-
-	iwl_set_rate(priv);
-	/*
-	 * at this point, staging_rxon has the
-	 * configuration for channel switch
-	 */
-	set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status);
-	priv->switch_channel = cpu_to_le16(ch);
-	if (priv->cfg->lib->set_channel_switch(priv, ch_switch)) {
-		clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status);
-		priv->switch_channel = 0;
-		ieee80211_chswitch_done(ctx->vif, false);
-	}
-
-out:
-	mutex_unlock(&priv->shrd->mutex);
-	IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-
-static void iwlagn_configure_filter(struct ieee80211_hw *hw,
-				    unsigned int changed_flags,
-				    unsigned int *total_flags,
-				    u64 multicast)
-{
-	struct iwl_priv *priv = hw->priv;
-	__le32 filter_or = 0, filter_nand = 0;
-	struct iwl_rxon_context *ctx;
-
-#define CHK(test, flag)	do { \
-	if (*total_flags & (test))		\
-		filter_or |= (flag);		\
-	else					\
-		filter_nand |= (flag);		\
-	} while (0)
-
-	IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
-			changed_flags, *total_flags);
-
-	CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
-	/* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
-	CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
-	CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
-
-#undef CHK
-
-	mutex_lock(&priv->shrd->mutex);
-
-	for_each_context(priv, ctx) {
-		ctx->staging.filter_flags &= ~filter_nand;
-		ctx->staging.filter_flags |= filter_or;
-
-		/*
-		 * Not committing directly because hardware can perform a scan,
-		 * but we'll eventually commit the filter flags change anyway.
-		 */
-	}
-
-	mutex_unlock(&priv->shrd->mutex);
-
-	/*
-	 * Receiving all multicast frames is always enabled by the
-	 * default flags setup in iwl_connection_init_rx_config()
-	 * since we currently do not support programming multicast
-	 * filters into the device.
-	 */
-	*total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
-			FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
-}
-
-static void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop)
-{
-	struct iwl_priv *priv = hw->priv;
-
-	mutex_lock(&priv->shrd->mutex);
-	IWL_DEBUG_MAC80211(priv, "enter\n");
 
-	if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status)) {
-		IWL_DEBUG_TX(priv, "Aborting flush due to device shutdown\n");
-		goto done;
-	}
-	if (iwl_is_rfkill(priv->shrd)) {
-		IWL_DEBUG_TX(priv, "Aborting flush due to RF Kill\n");
-		goto done;
-	}
 
-	/*
-	 * mac80211 will not push any more frames for transmit
-	 * until the flush is completed
-	 */
-	if (drop) {
-		IWL_DEBUG_MAC80211(priv, "send flush command\n");
-		if (iwlagn_txfifo_flush(priv, IWL_DROP_ALL)) {
-			IWL_ERR(priv, "flush request fail\n");
-			goto done;
-		}
-	}
-	IWL_DEBUG_MAC80211(priv, "wait transmit/flush all frames\n");
-	iwl_trans_wait_tx_queue_empty(trans(priv));
-done:
-	mutex_unlock(&priv->shrd->mutex);
-	IWL_DEBUG_MAC80211(priv, "leave\n");
-}
 
 void iwlagn_disable_roc(struct iwl_priv *priv)
 {
@@ -2758,160 +1493,6 @@
 	mutex_unlock(&priv->shrd->mutex);
 }
 
-static int iwlagn_mac_remain_on_channel(struct ieee80211_hw *hw,
-				     struct ieee80211_channel *channel,
-				     enum nl80211_channel_type channel_type,
-				     int duration)
-{
-	struct iwl_priv *priv = hw->priv;
-	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN];
-	int err = 0;
-
-	if (!(priv->shrd->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
-		return -EOPNOTSUPP;
-
-	if (!(ctx->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT)))
-		return -EOPNOTSUPP;
-
-	IWL_DEBUG_MAC80211(priv, "enter\n");
-	mutex_lock(&priv->shrd->mutex);
-
-	if (test_bit(STATUS_SCAN_HW, &priv->shrd->status)) {
-		err = -EBUSY;
-		goto out;
-	}
-
-	priv->hw_roc_channel = channel;
-	priv->hw_roc_chantype = channel_type;
-	priv->hw_roc_duration = duration;
-	priv->hw_roc_start_notified = false;
-	cancel_delayed_work(&priv->hw_roc_disable_work);
-
-	if (!ctx->is_active) {
-		ctx->is_active = true;
-		ctx->staging.dev_type = RXON_DEV_TYPE_P2P;
-		memcpy(ctx->staging.node_addr,
-		       priv->contexts[IWL_RXON_CTX_BSS].staging.node_addr,
-		       ETH_ALEN);
-		memcpy(ctx->staging.bssid_addr,
-		       priv->contexts[IWL_RXON_CTX_BSS].staging.node_addr,
-		       ETH_ALEN);
-		err = iwlagn_commit_rxon(priv, ctx);
-		if (err)
-			goto out;
-		ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK |
-					     RXON_FILTER_PROMISC_MSK |
-					     RXON_FILTER_CTL2HOST_MSK;
-
-		err = iwlagn_commit_rxon(priv, ctx);
-		if (err) {
-			iwlagn_disable_roc(priv);
-			goto out;
-		}
-		priv->hw_roc_setup = true;
-	}
-
-	err = iwl_scan_initiate(priv, ctx->vif, IWL_SCAN_ROC, channel->band);
-	if (err)
-		iwlagn_disable_roc(priv);
-
- out:
-	mutex_unlock(&priv->shrd->mutex);
-	IWL_DEBUG_MAC80211(priv, "leave\n");
-
-	return err;
-}
-
-static int iwlagn_mac_cancel_remain_on_channel(struct ieee80211_hw *hw)
-{
-	struct iwl_priv *priv = hw->priv;
-
-	if (!(priv->shrd->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
-		return -EOPNOTSUPP;
-
-	IWL_DEBUG_MAC80211(priv, "enter\n");
-	mutex_lock(&priv->shrd->mutex);
-	iwl_scan_cancel_timeout(priv, priv->hw_roc_duration);
-	iwlagn_disable_roc(priv);
-	mutex_unlock(&priv->shrd->mutex);
-	IWL_DEBUG_MAC80211(priv, "leave\n");
-
-	return 0;
-}
-
-static int iwlagn_mac_tx_sync(struct ieee80211_hw *hw,
-			      struct ieee80211_vif *vif,
-			      const u8 *bssid,
-			      enum ieee80211_tx_sync_type type)
-{
-	struct iwl_priv *priv = hw->priv;
-	struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
-	struct iwl_rxon_context *ctx = vif_priv->ctx;
-	int ret;
-	u8 sta_id;
-
-	IWL_DEBUG_MAC80211(priv, "enter\n");
-	mutex_lock(&priv->shrd->mutex);
-
-	if (iwl_is_associated_ctx(ctx)) {
-		ret = 0;
-		goto out;
-	}
-
-	if (ctx->preauth_bssid || test_bit(STATUS_SCAN_HW, &priv->shrd->status)) {
-		ret = -EBUSY;
-		goto out;
-	}
-
-	ret = iwl_add_station_common(priv, ctx, bssid, true, NULL, &sta_id);
-	if (ret)
-		goto out;
-
-	if (WARN_ON(sta_id != ctx->ap_sta_id)) {
-		ret = -EIO;
-		goto out_remove_sta;
-	}
-
-	memcpy(ctx->bssid, bssid, ETH_ALEN);
-	ctx->preauth_bssid = true;
-
-	ret = iwlagn_commit_rxon(priv, ctx);
-
-	if (ret == 0)
-		goto out;
-
- out_remove_sta:
-	iwl_remove_station(priv, sta_id, bssid);
- out:
-	mutex_unlock(&priv->shrd->mutex);
-	IWL_DEBUG_MAC80211(priv, "leave\n");
-
-	return ret;
-}
-
-static void iwlagn_mac_finish_tx_sync(struct ieee80211_hw *hw,
-				   struct ieee80211_vif *vif,
-				   const u8 *bssid,
-				   enum ieee80211_tx_sync_type type)
-{
-	struct iwl_priv *priv = hw->priv;
-	struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
-	struct iwl_rxon_context *ctx = vif_priv->ctx;
-
-	IWL_DEBUG_MAC80211(priv, "enter\n");
-	mutex_lock(&priv->shrd->mutex);
-
-	if (iwl_is_associated_ctx(ctx))
-		goto out;
-
-	iwl_remove_station(priv, ctx->ap_sta_id, bssid);
-	ctx->preauth_bssid = false;
-	/* no need to commit */
- out:
-	mutex_unlock(&priv->shrd->mutex);
-	IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-
 /*****************************************************************************
  *
  * driver setup and teardown
@@ -2935,8 +1516,8 @@
 
 	iwl_setup_scan_deferred_work(priv);
 
-	if (priv->cfg->lib->bt_setup_deferred_work)
-		priv->cfg->lib->bt_setup_deferred_work(priv);
+	if (cfg(priv)->lib->bt_setup_deferred_work)
+		cfg(priv)->lib->bt_setup_deferred_work(priv);
 
 	init_timer(&priv->statistics_periodic);
 	priv->statistics_periodic.data = (unsigned long)priv;
@@ -2953,8 +1534,8 @@
 
 static void iwl_cancel_deferred_work(struct iwl_priv *priv)
 {
-	if (priv->cfg->lib->cancel_deferred_work)
-		priv->cfg->lib->cancel_deferred_work(priv);
+	if (cfg(priv)->lib->cancel_deferred_work)
+		cfg(priv)->lib->cancel_deferred_work(priv);
 
 	cancel_work_sync(&priv->run_time_calib_work);
 	cancel_work_sync(&priv->beacon_update);
@@ -2998,6 +1579,8 @@
 
 	mutex_init(&priv->shrd->mutex);
 
+	INIT_LIST_HEAD(&trans(priv)->calib_results);
+
 	priv->ieee_channels = NULL;
 	priv->ieee_rates = NULL;
 	priv->band = IEEE80211_BAND_2GHZ;
@@ -3021,8 +1604,8 @@
 	iwl_init_scan_params(priv);
 
 	/* init bt coex */
-	if (priv->cfg->bt_params &&
-	    priv->cfg->bt_params->advanced_bt_coexist) {
+	if (cfg(priv)->bt_params &&
+	    cfg(priv)->bt_params->advanced_bt_coexist) {
 		priv->kill_ack_mask = IWLAGN_BT_KILL_ACK_MASK_DEFAULT;
 		priv->kill_cts_mask = IWLAGN_BT_KILL_CTS_MASK_DEFAULT;
 		priv->bt_valid = IWLAGN_BT_ALL_VALID_MSK;
@@ -3054,88 +1637,19 @@
 
 static void iwl_uninit_drv(struct iwl_priv *priv)
 {
-	iwl_calib_free_results(priv);
 	iwl_free_geos(priv);
 	iwl_free_channel_map(priv);
 	if (priv->tx_cmd_pool)
 		kmem_cache_destroy(priv->tx_cmd_pool);
 	kfree(priv->scan_cmd);
 	kfree(priv->beacon_cmd);
+	kfree(rcu_dereference_raw(priv->noa_data));
 #ifdef CONFIG_IWLWIFI_DEBUGFS
 	kfree(priv->wowlan_sram);
 #endif
 }
 
-static void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw,
-			   enum ieee80211_rssi_event rssi_event)
-{
-	struct iwl_priv *priv = hw->priv;
 
-	IWL_DEBUG_MAC80211(priv, "enter\n");
-	mutex_lock(&priv->shrd->mutex);
-
-	if (priv->cfg->bt_params &&
-			priv->cfg->bt_params->advanced_bt_coexist) {
-		if (rssi_event == RSSI_EVENT_LOW)
-			priv->bt_enable_pspoll = true;
-		else if (rssi_event == RSSI_EVENT_HIGH)
-			priv->bt_enable_pspoll = false;
-
-		iwlagn_send_advance_bt_config(priv);
-	} else {
-		IWL_DEBUG_MAC80211(priv, "Advanced BT coex disabled,"
-				"ignoring RSSI callback\n");
-	}
-
-	mutex_unlock(&priv->shrd->mutex);
-	IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-
-static int iwlagn_mac_set_tim(struct ieee80211_hw *hw,
-			   struct ieee80211_sta *sta, bool set)
-{
-	struct iwl_priv *priv = hw->priv;
-
-	queue_work(priv->shrd->workqueue, &priv->beacon_update);
-
-	return 0;
-}
-
-struct ieee80211_ops iwlagn_hw_ops = {
-	.tx = iwlagn_mac_tx,
-	.start = iwlagn_mac_start,
-	.stop = iwlagn_mac_stop,
-#ifdef CONFIG_PM_SLEEP
-	.suspend = iwlagn_mac_suspend,
-	.resume = iwlagn_mac_resume,
-#endif
-	.add_interface = iwlagn_mac_add_interface,
-	.remove_interface = iwlagn_mac_remove_interface,
-	.change_interface = iwlagn_mac_change_interface,
-	.config = iwlagn_mac_config,
-	.configure_filter = iwlagn_configure_filter,
-	.set_key = iwlagn_mac_set_key,
-	.update_tkip_key = iwlagn_mac_update_tkip_key,
-	.set_rekey_data = iwlagn_mac_set_rekey_data,
-	.conf_tx = iwlagn_mac_conf_tx,
-	.bss_info_changed = iwlagn_bss_info_changed,
-	.ampdu_action = iwlagn_mac_ampdu_action,
-	.hw_scan = iwlagn_mac_hw_scan,
-	.sta_notify = iwlagn_mac_sta_notify,
-	.sta_add = iwlagn_mac_sta_add,
-	.sta_remove = iwlagn_mac_sta_remove,
-	.channel_switch = iwlagn_mac_channel_switch,
-	.flush = iwlagn_mac_flush,
-	.tx_last_beacon = iwlagn_mac_tx_last_beacon,
-	.remain_on_channel = iwlagn_mac_remain_on_channel,
-	.cancel_remain_on_channel = iwlagn_mac_cancel_remain_on_channel,
-	.rssi_callback = iwlagn_mac_rssi_callback,
-	CFG80211_TESTMODE_CMD(iwlagn_mac_testmode_cmd)
-	CFG80211_TESTMODE_DUMP(iwlagn_mac_testmode_dump)
-	.tx_sync = iwlagn_mac_tx_sync,
-	.finish_tx_sync = iwlagn_mac_finish_tx_sync,
-	.set_tim = iwlagn_mac_set_tim,
-};
 
 static u32 iwl_hw_detect(struct iwl_priv *priv)
 {
@@ -3155,40 +1669,55 @@
 		hw_params(priv).rx_page_order =
 			get_order(IWL_RX_BUF_SIZE_4K);
 
-	if (iwlagn_mod_params.disable_11n)
-		priv->cfg->sku &= ~EEPROM_SKU_CAP_11N_ENABLE;
+	if (iwlagn_mod_params.disable_11n & IWL_DISABLE_HT_ALL)
+		cfg(priv)->sku &= ~EEPROM_SKU_CAP_11N_ENABLE;
 
 	hw_params(priv).num_ampdu_queues =
-		priv->cfg->base_params->num_of_ampdu_queues;
+		cfg(priv)->base_params->num_of_ampdu_queues;
 	hw_params(priv).shadow_reg_enable =
-		priv->cfg->base_params->shadow_reg_enable;
-	hw_params(priv).sku = priv->cfg->sku;
-	hw_params(priv).wd_timeout = priv->cfg->base_params->wd_timeout;
+		cfg(priv)->base_params->shadow_reg_enable;
+	hw_params(priv).sku = cfg(priv)->sku;
+	hw_params(priv).wd_timeout = cfg(priv)->base_params->wd_timeout;
 
 	/* Device-specific setup */
-	return priv->cfg->lib->set_hw_params(priv);
+	return cfg(priv)->lib->set_hw_params(priv);
 }
 
-/* This function both allocates and initializes hw and priv. */
-static struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg)
+
+
+static void iwl_debug_config(struct iwl_priv *priv)
 {
-	struct iwl_priv *priv;
-	/* mac80211 allocates memory for this device instance, including
-	 *   space for this driver's private structure */
-	struct ieee80211_hw *hw;
+	dev_printk(KERN_INFO, bus(priv)->dev, "CONFIG_IWLWIFI_DEBUG "
+#ifdef CONFIG_IWLWIFI_DEBUG
+		"enabled\n");
+#else
+		"disabled\n");
+#endif
+	dev_printk(KERN_INFO, bus(priv)->dev, "CONFIG_IWLWIFI_DEBUGFS "
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+		"enabled\n");
+#else
+		"disabled\n");
+#endif
+	dev_printk(KERN_INFO, bus(priv)->dev, "CONFIG_IWLWIFI_DEVICE_TRACING "
+#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
+		"enabled\n");
+#else
+		"disabled\n");
+#endif
 
-	hw = ieee80211_alloc_hw(sizeof(struct iwl_priv), &iwlagn_hw_ops);
-	if (hw == NULL) {
-		pr_err("%s: Can not allocate network device\n",
-		       cfg->name);
-		goto out;
-	}
-
-	priv = hw->priv;
-	priv->hw = hw;
-
-out:
-	return hw;
+	dev_printk(KERN_INFO, bus(priv)->dev, "CONFIG_IWLWIFI_DEVICE_TESTMODE "
+#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
+		"enabled\n");
+#else
+		"disabled\n");
+#endif
+	dev_printk(KERN_INFO, bus(priv)->dev, "CONFIG_IWLWIFI_P2P "
+#ifdef CONFIG_IWLWIFI_P2P
+		"enabled\n");
+#else
+		"disabled\n");
+#endif
 }
 
 int iwl_probe(struct iwl_bus *bus, const struct iwl_trans_ops *trans_ops,
@@ -3203,8 +1732,9 @@
 	/************************
 	 * 1. Allocating HW data
 	 ************************/
-	hw = iwl_alloc_all(cfg);
+	hw = iwl_alloc_all();
 	if (!hw) {
+		pr_err("%s: Cannot allocate network device\n", cfg->name);
 		err = -ENOMEM;
 		goto out;
 	}
@@ -3225,8 +1755,11 @@
 
 	SET_IEEE80211_DEV(hw, bus(priv)->dev);
 
+	/* what debugging capabilities we have */
+	iwl_debug_config(priv);
+
 	IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
-	priv->cfg = cfg;
+	cfg(priv) = cfg;
 
 	/* is antenna coupling more than 35dB ? */
 	priv->bt_ant_couple_ok =
@@ -3260,7 +1793,7 @@
 	 ***********************/
 	hw_rev = iwl_hw_detect(priv);
 	IWL_INFO(priv, "Detected %s, REV=0x%X\n",
-		priv->cfg->name, hw_rev);
+		cfg(priv)->name, hw_rev);
 
 	err = iwl_trans_request_irq(trans(priv));
 	if (err)
@@ -3290,11 +1823,11 @@
 		goto out_free_eeprom;
 
 	/* extract MAC Address */
-	iwl_eeprom_get_mac(priv, priv->addresses[0].addr);
+	iwl_eeprom_get_mac(priv->shrd, priv->addresses[0].addr);
 	IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr);
 	priv->hw->wiphy->addresses = priv->addresses;
 	priv->hw->wiphy->n_addresses = 1;
-	num_mac = iwl_eeprom_query16(priv, EEPROM_NUM_MAC_ADDRESS);
+	num_mac = iwl_eeprom_query16(priv->shrd, EEPROM_NUM_MAC_ADDRESS);
 	if (num_mac > 1) {
 		memcpy(priv->addresses[1].addr, priv->addresses[0].addr,
 		       ETH_ALEN);
@@ -3359,7 +1892,7 @@
 	priv->shrd->workqueue = NULL;
 	iwl_uninit_drv(priv);
 out_free_eeprom:
-	iwl_eeprom_free(priv);
+	iwl_eeprom_free(priv->shrd);
 out_free_trans:
 	iwl_trans_free(trans(priv));
 out_free_traffic_mem:
@@ -3384,21 +1917,16 @@
 	set_bit(STATUS_EXIT_PENDING, &priv->shrd->status);
 
 	iwl_testmode_cleanup(priv);
-	iwl_leds_exit(priv);
-
-	if (priv->mac80211_registered) {
-		ieee80211_unregister_hw(priv->hw);
-		priv->mac80211_registered = 0;
-	}
+	iwlagn_mac_unregister(priv);
 
 	iwl_tt_exit(priv);
 
 	/*This will stop the queues, move the device to low power state */
 	iwl_trans_stop_device(trans(priv));
 
-	iwl_dealloc_ucode(priv);
+	iwl_dealloc_ucode(trans(priv));
 
-	iwl_eeprom_free(priv);
+	iwl_eeprom_free(priv->shrd);
 
 	/*netif_stop_queue(dev); */
 	flush_workqueue(priv->shrd->workqueue);
@@ -3468,8 +1996,9 @@
 MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
 module_param_named(queues_num, iwlagn_mod_params.num_of_queues, int, S_IRUGO);
 MODULE_PARM_DESC(queues_num, "number of hw queues.");
-module_param_named(11n_disable, iwlagn_mod_params.disable_11n, int, S_IRUGO);
-MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
+module_param_named(11n_disable, iwlagn_mod_params.disable_11n, uint, S_IRUGO);
+MODULE_PARM_DESC(11n_disable,
+	"disable 11n functionality, bitmap: 1: full, 2: agg TX, 4: agg RX");
 module_param_named(amsdu_size_8K, iwlagn_mod_params.amsdu_size_8K,
 		   int, S_IRUGO);
 MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.h b/drivers/net/wireless/iwlwifi/iwl-agn.h
index 3856aba..f84fb3c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.h
@@ -65,6 +65,12 @@
 
 #include "iwl-dev.h"
 
+struct iwlagn_ucode_capabilities {
+	u32 max_probe_length;
+	u32 standard_phy_calibration_size;
+	u32 flags;
+};
+
 extern struct ieee80211_ops iwlagn_hw_ops;
 
 int iwl_reset_ict(struct iwl_trans *trans);
@@ -77,6 +83,16 @@
 	hdr->data_valid = 1;
 }
 
+void __iwl_down(struct iwl_priv *priv);
+void iwl_down(struct iwl_priv *priv);
+void iwlagn_prepare_restart(struct iwl_priv *priv);
+
+/* MAC80211 */
+struct ieee80211_hw *iwl_alloc_all(void);
+int iwlagn_mac_setup_register(struct iwl_priv *priv,
+			      struct iwlagn_ucode_capabilities *capa);
+void iwlagn_mac_unregister(struct iwl_priv *priv);
+
 /* RXON */
 int iwlagn_set_pan_params(struct iwl_priv *priv);
 int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
@@ -93,20 +109,20 @@
 int iwlagn_rx_calib_result(struct iwl_priv *priv,
 			    struct iwl_rx_mem_buffer *rxb,
 			    struct iwl_device_cmd *cmd);
-int iwlagn_send_bt_env(struct iwl_priv *priv, u8 action, u8 type);
-void iwlagn_send_prio_tbl(struct iwl_priv *priv);
-int iwlagn_run_init_ucode(struct iwl_priv *priv);
-int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
-				 struct fw_img *image,
-				 enum iwlagn_ucode_type ucode_type);
 
 /* lib */
 int iwlagn_send_tx_power(struct iwl_priv *priv);
 void iwlagn_temperature(struct iwl_priv *priv);
-u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv);
+u16 iwl_eeprom_calib_version(struct iwl_shared *shrd);
 int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control);
 void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control);
 int iwlagn_send_beacon_cmd(struct iwl_priv *priv);
+#ifdef CONFIG_PM_SLEEP
+int iwlagn_send_patterns(struct iwl_priv *priv,
+			 struct cfg80211_wowlan *wowlan);
+int iwlagn_suspend(struct iwl_priv *priv,
+		   struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan);
+#endif
 
 /* rx */
 int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
@@ -117,6 +133,8 @@
 int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
 int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
 			struct ieee80211_sta *sta, u16 tid, u16 *ssn);
+int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif,
+			struct ieee80211_sta *sta, u16 tid, u8 buf_size);
 int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
 		       struct ieee80211_sta *sta, u16 tid);
 int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
@@ -198,9 +216,6 @@
 			   struct ieee80211_sta *sta, u8 *sta_id_r);
 int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
 		       const u8 *addr);
-int iwlagn_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
-		       struct ieee80211_sta *sta);
-
 u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
 		    const u8 *addr, bool is_ap, struct ieee80211_sta *sta);
 
@@ -318,10 +333,6 @@
 int iwl_update_bcast_station(struct iwl_priv *priv,
 			     struct iwl_rxon_context *ctx);
 int iwl_update_bcast_stations(struct iwl_priv *priv);
-void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
-			   struct ieee80211_vif *vif,
-			   enum sta_notify_cmd cmd,
-			   struct ieee80211_sta *sta);
 
 /* rate */
 static inline u32 iwl_ant_idx_to_flags(u8 ant_idx)
@@ -341,28 +352,11 @@
 
 /* eeprom */
 void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv);
-void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac);
+void iwl_eeprom_get_mac(const struct iwl_shared *shrd, u8 *mac);
 
-/* notification wait support */
-void __acquires(wait_entry)
-iwlagn_init_notification_wait(struct iwl_priv *priv,
-			      struct iwl_notification_wait *wait_entry,
-			      u8 cmd,
-			      void (*fn)(struct iwl_priv *priv,
-					 struct iwl_rx_packet *pkt,
-					 void *data),
-			      void *fn_data);
-int __must_check __releases(wait_entry)
-iwlagn_wait_notification(struct iwl_priv *priv,
-			 struct iwl_notification_wait *wait_entry,
-			 unsigned long timeout);
-void __releases(wait_entry)
-iwlagn_remove_notification(struct iwl_priv *priv,
-			   struct iwl_notification_wait *wait_entry);
-extern int iwlagn_init_alive_start(struct iwl_priv *priv);
 extern int iwl_alive_start(struct iwl_priv *priv);
 /* svtool */
-#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
+#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
 extern int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data,
 				   int len);
 extern int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/iwlwifi/iwl-bus.h b/drivers/net/wireless/iwlwifi/iwl-bus.h
index 08b9759..940d503 100644
--- a/drivers/net/wireless/iwlwifi/iwl-bus.h
+++ b/drivers/net/wireless/iwlwifi/iwl-bus.h
@@ -122,7 +122,8 @@
  * struct iwl_bus_ops - bus specific operations
  * @get_pm_support: must returns true if the bus can go to sleep
  * @apm_config: will be called during the config of the APM
- * @get_hw_id: prints the hw_id in the provided buffer
+ * @get_hw_id_string: prints the hw_id in the provided buffer
+ * @get_hw_id: get hw_id in u32
  * @write8: write a byte to register at offset ofs
  * @write32: write a dword to register at offset ofs
  * @wread32: read a dword at register at offset ofs
@@ -130,7 +131,8 @@
 struct iwl_bus_ops {
 	bool (*get_pm_support)(struct iwl_bus *bus);
 	void (*apm_config)(struct iwl_bus *bus);
-	void (*get_hw_id)(struct iwl_bus *bus, char buf[], int buf_len);
+	void (*get_hw_id_string)(struct iwl_bus *bus, char buf[], int buf_len);
+	u32 (*get_hw_id)(struct iwl_bus *bus);
 	void (*write8)(struct iwl_bus *bus, u32 ofs, u8 val);
 	void (*write32)(struct iwl_bus *bus, u32 ofs, u32 val);
 	u32 (*read32)(struct iwl_bus *bus, u32 ofs);
@@ -172,9 +174,15 @@
 	bus->ops->apm_config(bus);
 }
 
-static inline void bus_get_hw_id(struct iwl_bus *bus, char buf[], int buf_len)
+static inline void bus_get_hw_id_string(struct iwl_bus *bus, char buf[],
+		int buf_len)
 {
-	bus->ops->get_hw_id(bus, buf, buf_len);
+	bus->ops->get_hw_id_string(bus, buf, buf_len);
+}
+
+static inline u32 bus_get_hw_id(struct iwl_bus *bus)
+{
+	return bus->ops->get_hw_id(bus);
 }
 
 static inline void bus_write8(struct iwl_bus *bus, u32 ofs, u8 val)
diff --git a/drivers/net/wireless/iwlwifi/iwl-cfg.h b/drivers/net/wireless/iwlwifi/iwl-cfg.h
index 2a2dc45..e1d7825 100644
--- a/drivers/net/wireless/iwlwifi/iwl-cfg.h
+++ b/drivers/net/wireless/iwlwifi/iwl-cfg.h
@@ -101,17 +101,11 @@
 extern struct iwl_cfg iwl130_bgn_cfg;
 extern struct iwl_cfg iwl130_bg_cfg;
 extern struct iwl_cfg iwl2000_2bgn_cfg;
-extern struct iwl_cfg iwl2000_2bg_cfg;
 extern struct iwl_cfg iwl2000_2bgn_d_cfg;
 extern struct iwl_cfg iwl2030_2bgn_cfg;
-extern struct iwl_cfg iwl2030_2bg_cfg;
 extern struct iwl_cfg iwl6035_2agn_cfg;
-extern struct iwl_cfg iwl6035_2abg_cfg;
-extern struct iwl_cfg iwl6035_2bg_cfg;
-extern struct iwl_cfg iwl105_bg_cfg;
 extern struct iwl_cfg iwl105_bgn_cfg;
 extern struct iwl_cfg iwl105_bgn_d_cfg;
-extern struct iwl_cfg iwl135_bg_cfg;
 extern struct iwl_cfg iwl135_bgn_cfg;
 
 #endif /* __iwl_pci_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index 69d5f85..265de39 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -109,10 +109,10 @@
 	/* RX, TX, LEDs */
 	REPLY_TX = 0x1c,
 	REPLY_LEDS_CMD = 0x48,
-	REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* for 4965 and up */
+	REPLY_TX_LINK_QUALITY_CMD = 0x4e,
 
 	/* WiMAX coexistence */
-	COEX_PRIORITY_TABLE_CMD = 0x5a,	/* for 5000 series and up */
+	COEX_PRIORITY_TABLE_CMD = 0x5a,
 	COEX_MEDIUM_NOTIFICATION = 0x5b,
 	COEX_EVENT_CMD = 0x5c,
 
@@ -198,6 +198,7 @@
 	REPLY_WOWLAN_TKIP_PARAMS = 0xe3,
 	REPLY_WOWLAN_KEK_KCK_MATERIAL = 0xe4,
 	REPLY_WOWLAN_GET_STATUS = 0xe5,
+	REPLY_D3_CONFIG = 0xd3,
 
 	REPLY_MAX = 0xff
 };
@@ -465,23 +466,27 @@
 	u32 frame_ptr;		/* frame pointer */
 	u32 stack_ptr;		/* stack pointer */
 	u32 hcmd;		/* last host command header */
-#if 0
-	/* no need to read the remainder, we don't use the values */
-	u32 isr0;		/* isr status register LMPM_NIC_ISR0: rxtx_flag */
-	u32 isr1;		/* isr status register LMPM_NIC_ISR1: host_flag */
-	u32 isr2;		/* isr status register LMPM_NIC_ISR2: enc_flag */
-	u32 isr3;		/* isr status register LMPM_NIC_ISR3: time_flag */
-	u32 isr4;		/* isr status register LMPM_NIC_ISR4: wico interrupt */
+	u32 isr0;		/* isr status register LMPM_NIC_ISR0:
+				 * rxtx_flag */
+	u32 isr1;		/* isr status register LMPM_NIC_ISR1:
+				 * host_flag */
+	u32 isr2;		/* isr status register LMPM_NIC_ISR2:
+				 * enc_flag */
+	u32 isr3;		/* isr status register LMPM_NIC_ISR3:
+				 * time_flag */
+	u32 isr4;		/* isr status register LMPM_NIC_ISR4:
+				 * wico interrupt */
 	u32 isr_pref;		/* isr status register LMPM_NIC_PREF_STAT */
 	u32 wait_event;		/* wait event() caller address */
 	u32 l2p_control;	/* L2pControlField */
 	u32 l2p_duration;	/* L2pDurationField */
 	u32 l2p_mhvalid;	/* L2pMhValidBits */
 	u32 l2p_addr_match;	/* L2pAddrMatchStat */
-	u32 lmpm_pmg_sel;	/* indicate which clocks are turned on (LMPM_PMG_SEL) */
-	u32 u_timestamp;	/* indicate when the date and time of the compilation */
+	u32 lmpm_pmg_sel;	/* indicate which clocks are turned on
+				 * (LMPM_PMG_SEL) */
+	u32 u_timestamp;	/* indicate when the date and time of the
+				 * compilation */
 	u32 flow_handler;	/* FH read/write pointers, RX credit */
-#endif
 } __packed;
 
 struct iwl_alive_resp {
@@ -809,7 +814,7 @@
 #define	IWLAGN_STATION_COUNT	16
 
 #define	IWL_INVALID_STATION 	255
-#define IWL_MAX_TID_COUNT	9
+#define IWL_MAX_TID_COUNT	8
 
 #define STA_FLG_TX_RATE_MSK		cpu_to_le32(1 << 2)
 #define STA_FLG_PWR_SAVE_MSK		cpu_to_le32(1 << 8)
@@ -930,8 +935,7 @@
 	 * corresponding to bit (e.g. bit 5 controls TID 5).
 	 * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
 	__le16 tid_disable_tx;
-
-	__le16	rate_n_flags;		/* 3945 only */
+	__le16 legacy_reserved;
 
 	/* TID for which to add block-ack support.
 	 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
@@ -1161,8 +1165,7 @@
  *
  * uCode handles retrying Tx when an ACK is expected but not received.
  * This includes trying lower data rates than the one requested in the Tx
- * command, as set up by the REPLY_RATE_SCALE (for 3945) or
- * REPLY_TX_LINK_QUALITY_CMD (agn).
+ * command, as set up by the REPLY_TX_LINK_QUALITY_CMD (agn).
  *
  * Driver sets up transmit power for various rates via REPLY_TX_PWR_TABLE_CMD.
  * This command must be executed after every RXON command, before Tx can occur.
@@ -1174,25 +1177,9 @@
  * 1: Use RTS/CTS protocol or CTS-to-self if spec allows it
  * before this frame. if CTS-to-self required check
  * RXON_FLG_SELF_CTS_EN status.
- * unused in 3945/4965, used in 5000 series and after
  */
 #define TX_CMD_FLG_PROT_REQUIRE_MSK cpu_to_le32(1 << 0)
 
-/*
- * 1: Use Request-To-Send protocol before this frame.
- * Mutually exclusive vs. TX_CMD_FLG_CTS_MSK.
- * used in 3945/4965, unused in 5000 series and after
- */
-#define TX_CMD_FLG_RTS_MSK cpu_to_le32(1 << 1)
-
-/*
- * 1: Transmit Clear-To-Send to self before this frame.
- * Driver should set this for AUTH/DEAUTH/ASSOC-REQ/REASSOC mgmnt frames.
- * Mutually exclusive vs. TX_CMD_FLG_RTS_MSK.
- * used in 3945/4965, unused in 5000 series and after
- */
-#define TX_CMD_FLG_CTS_MSK cpu_to_le32(1 << 2)
-
 /* 1: Expect ACK from receiving station
  * 0: Don't expect ACK (MAC header's duration field s/b 0)
  * Set this for unicast frames, but not broadcast/multicast. */
@@ -1210,18 +1197,8 @@
  * Set when Txing a block-ack request frame.  Also set TX_CMD_FLG_ACK_MSK. */
 #define TX_CMD_FLG_IMM_BA_RSP_MASK  cpu_to_le32(1 << 6)
 
-/*
- * 1: Frame requires full Tx-Op protection.
- * Set this if either RTS or CTS Tx Flag gets set.
- * used in 3945/4965, unused in 5000 series and after
- */
-#define TX_CMD_FLG_FULL_TXOP_PROT_MSK cpu_to_le32(1 << 7)
-
-/* Tx antenna selection field; used only for 3945, reserved (0) for agn devices.
- * Set field to "0" to allow 3945 uCode to select antenna (normal usage). */
+/* Tx antenna selection field; reserved (0) for agn devices. */
 #define TX_CMD_FLG_ANT_SEL_MSK cpu_to_le32(0xf00)
-#define TX_CMD_FLG_ANT_A_MSK cpu_to_le32(1 << 8)
-#define TX_CMD_FLG_ANT_B_MSK cpu_to_le32(1 << 9)
 
 /* 1: Ignore Bluetooth priority for this frame.
  * 0: Delay Tx until Bluetooth device is done (normal usage). */
@@ -1567,7 +1544,6 @@
 	__le64 bitmap;
 	__le16 scd_flow;
 	__le16 scd_ssn;
-	/* following only for 5000 series and up */
 	u8 txed;	/* number of frames sent */
 	u8 txed_2_done; /* number of frames acked */
 } __packed;
@@ -1669,7 +1645,7 @@
 /*
  * REPLY_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response)
  *
- * For agn devices only; 3945 uses REPLY_RATE_SCALE.
+ * For agn devices
  *
  * Each station in the agn device's internal station table has its own table
  * of 16
@@ -1918,7 +1894,7 @@
 /*
  * REPLY_BT_CONFIG = 0x9b (command, has simple generic response)
  *
- * 3945 and agn devices support hardware handshake with Bluetooth device on
+ * agn devices support hardware handshake with Bluetooth device on
  * same platform.  Bluetooth device alerts wireless device when it will Tx;
  * wireless device can delay or kill its own Tx to accommodate.
  */
@@ -2202,8 +2178,8 @@
 
 struct iwl_powertable_cmd {
 	__le16 flags;
-	u8 keep_alive_seconds;		/* 3945 reserved */
-	u8 debug_flags;			/* 3945 reserved */
+	u8 keep_alive_seconds;
+	u8 debug_flags;
 	__le32 rx_data_timeout;
 	__le32 tx_data_timeout;
 	__le32 sleep_interval[IWL_POWER_VEC_SIZE];
@@ -2324,9 +2300,9 @@
 /**
  * struct iwl_ssid_ie - directed scan network information element
  *
- * Up to 20 of these may appear in REPLY_SCAN_CMD (Note: Only 4 are in
- * 3945 SCAN api), selected by "type" bit field in struct iwl_scan_channel;
- * each channel may select different ssids from among the 20 (4) entries.
+ * Up to 20 of these may appear in REPLY_SCAN_CMD,
+ * selected by "type" bit field in struct iwl_scan_channel;
+ * each channel may select different ssids from among the 20 entries.
  * SSID IEs get transmitted in reverse order of entry.
  */
 struct iwl_ssid_ie {
@@ -2335,7 +2311,6 @@
 	u8 ssid[32];
 } __packed;
 
-#define PROBE_OPTION_MAX_3945		4
 #define PROBE_OPTION_MAX		20
 #define TX_CMD_LIFE_TIME_INFINITE	cpu_to_le32(0xFFFFFFFF)
 #define IWL_GOOD_CRC_TH_DISABLED	0
@@ -2416,8 +2391,6 @@
 				 * channel */
 	__le32 suspend_time;	/* pause scan this long (in "extended beacon
 				 * format") when returning to service chnl:
-				 * 3945; 31:24 # beacons, 19:0 additional usec,
-				 * 4965; 31:22 # beacons, 21:0 additional usec.
 				 */
 	__le32 flags;		/* RXON_FLG_* */
 	__le32 filter_flags;	/* RXON_FILTER_* */
@@ -2733,7 +2706,7 @@
 
 struct statistics_general_common {
 	__le32 temperature;   /* radio temperature */
-	__le32 temperature_m; /* for 5000 and up, this is radio voltage */
+	__le32 temperature_m; /* radio voltage */
 	struct statistics_dbg dbg;
 	__le32 sleep_time;
 	__le32 slots_out;
@@ -3801,6 +3774,19 @@
 } __attribute__((packed));
 
 /*
+ * REPLY_D3_CONFIG
+ */
+enum iwlagn_d3_wakeup_filters {
+	IWLAGN_D3_WAKEUP_RFKILL		= BIT(0),
+	IWLAGN_D3_WAKEUP_SYSASSERT	= BIT(1),
+};
+
+struct iwlagn_d3_config_cmd {
+	__le32 min_sleep_time;
+	__le32 wakeup_flags;
+} __packed;
+
+/*
  * REPLY_WOWLAN_PATTERNS
  */
 #define IWLAGN_WOWLAN_MIN_PATTERN_LEN	16
@@ -3830,19 +3816,16 @@
 	IWLAGN_WOWLAN_WAKEUP_BEACON_MISS	= BIT(2),
 	IWLAGN_WOWLAN_WAKEUP_LINK_CHANGE	= BIT(3),
 	IWLAGN_WOWLAN_WAKEUP_GTK_REKEY_FAIL	= BIT(4),
-	IWLAGN_WOWLAN_WAKEUP_RFKILL		= BIT(5),
-	IWLAGN_WOWLAN_WAKEUP_UCODE_ERROR	= BIT(6),
-	IWLAGN_WOWLAN_WAKEUP_EAP_IDENT_REQ	= BIT(7),
-	IWLAGN_WOWLAN_WAKEUP_4WAY_HANDSHAKE	= BIT(8),
-	IWLAGN_WOWLAN_WAKEUP_ALWAYS		= BIT(9),
-	IWLAGN_WOWLAN_WAKEUP_ENABLE_NET_DETECT	= BIT(10),
+	IWLAGN_WOWLAN_WAKEUP_EAP_IDENT_REQ	= BIT(5),
+	IWLAGN_WOWLAN_WAKEUP_4WAY_HANDSHAKE	= BIT(6),
+	IWLAGN_WOWLAN_WAKEUP_ALWAYS		= BIT(7),
+	IWLAGN_WOWLAN_WAKEUP_ENABLE_NET_DETECT	= BIT(8),
 };
 
 struct iwlagn_wowlan_wakeup_filter_cmd {
 	__le32 enabled;
 	__le16 non_qos_seq;
-	u8 min_sleep_seconds;
-	u8 reserved;
+	__le16 reserved;
 	__le16 qos_seq[8];
 };
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index fcf5416..7bcfa78 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -60,8 +60,8 @@
 
 	ht_info->ht_supported = true;
 
-	if (priv->cfg->ht_params &&
-	    priv->cfg->ht_params->ht_greenfield_support)
+	if (cfg(priv)->ht_params &&
+	    cfg(priv)->ht_params->ht_greenfield_support)
 		ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD;
 	ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
 	max_bit_rate = MAX_BIT_RATE_20_MHZ;
@@ -76,11 +76,7 @@
 		ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
 
 	ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
-	if (priv->cfg->bt_params && priv->cfg->bt_params->ampdu_factor)
-		ht_info->ampdu_factor = priv->cfg->bt_params->ampdu_factor;
 	ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
-	if (priv->cfg->bt_params && priv->cfg->bt_params->ampdu_density)
-		ht_info->ampdu_density = priv->cfg->bt_params->ampdu_density;
 
 	ht_info->mcs.rx_mask[0] = 0xFF;
 	if (rx_chains_num >= 2)
@@ -141,7 +137,7 @@
 	sband->bitrates = &rates[IWL_FIRST_OFDM_RATE];
 	sband->n_bitrates = IWL_RATE_COUNT_LEGACY - IWL_FIRST_OFDM_RATE;
 
-	if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE)
+	if (cfg(priv)->sku & EEPROM_SKU_CAP_11N_ENABLE)
 		iwl_init_ht_hw_capab(priv, &sband->ht_cap,
 					 IEEE80211_BAND_5GHZ);
 
@@ -151,7 +147,7 @@
 	sband->bitrates = rates;
 	sband->n_bitrates = IWL_RATE_COUNT_LEGACY;
 
-	if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE)
+	if (cfg(priv)->sku & EEPROM_SKU_CAP_11N_ENABLE)
 		iwl_init_ht_hw_capab(priv, &sband->ht_cap,
 					 IEEE80211_BAND_2GHZ);
 
@@ -206,12 +202,12 @@
 	priv->tx_power_next = max_tx_power;
 
 	if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
-	     priv->cfg->sku & EEPROM_SKU_CAP_BAND_52GHZ) {
+	     cfg(priv)->sku & EEPROM_SKU_CAP_BAND_52GHZ) {
 		char buf[32];
-		bus_get_hw_id(bus(priv), buf, sizeof(buf));
+		bus_get_hw_id_string(bus(priv), buf, sizeof(buf));
 		IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
 			"Please send your %s to maintainer.\n", buf);
-		priv->cfg->sku &= ~EEPROM_SKU_CAP_BAND_52GHZ;
+		cfg(priv)->sku &= ~EEPROM_SKU_CAP_BAND_52GHZ;
 	}
 
 	IWL_INFO(priv, "Tunable channels: %d 802.11bg, %d 802.11a channels\n",
@@ -836,19 +832,6 @@
 }
 #endif
 
-static void iwlagn_abort_notification_waits(struct iwl_priv *priv)
-{
-	unsigned long flags;
-	struct iwl_notification_wait *wait_entry;
-
-	spin_lock_irqsave(&priv->notif_wait_lock, flags);
-	list_for_each_entry(wait_entry, &priv->notif_waits, list)
-		wait_entry->aborted = true;
-	spin_unlock_irqrestore(&priv->notif_wait_lock, flags);
-
-	wake_up_all(&priv->notif_waitq);
-}
-
 void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
 {
 	unsigned int reload_msec;
@@ -860,7 +843,7 @@
 	/* Cancel currently queued command. */
 	clear_bit(STATUS_HCMD_ACTIVE, &priv->shrd->status);
 
-	iwlagn_abort_notification_waits(priv);
+	iwl_abort_notification_waits(priv->shrd);
 
 	/* Keep the restart process from trying to send host
 	 * commands by clearing the ready bit */
@@ -979,9 +962,9 @@
 	bus_apm_config(bus(priv));
 
 	/* Configure analog phase-lock-loop before activating to D0A */
-	if (priv->cfg->base_params->pll_cfg_val)
+	if (cfg(priv)->base_params->pll_cfg_val)
 		iwl_set_bit(bus(priv), CSR_ANA_PLL_CFG,
-			    priv->cfg->base_params->pll_cfg_val);
+			    cfg(priv)->base_params->pll_cfg_val);
 
 	/*
 	 * Set "initialization complete" bit to move adapter from
@@ -1120,229 +1103,8 @@
 					&statistics_cmd);
 }
 
-int iwlagn_mac_conf_tx(struct ieee80211_hw *hw,
-		    struct ieee80211_vif *vif, u16 queue,
-		    const struct ieee80211_tx_queue_params *params)
-{
-	struct iwl_priv *priv = hw->priv;
-	struct iwl_rxon_context *ctx;
-	unsigned long flags;
-	int q;
 
-	IWL_DEBUG_MAC80211(priv, "enter\n");
 
-	if (!iwl_is_ready_rf(priv->shrd)) {
-		IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
-		return -EIO;
-	}
-
-	if (queue >= AC_NUM) {
-		IWL_DEBUG_MAC80211(priv, "leave - queue >= AC_NUM %d\n", queue);
-		return 0;
-	}
-
-	q = AC_NUM - 1 - queue;
-
-	spin_lock_irqsave(&priv->shrd->lock, flags);
-
-	/*
-	 * MULTI-FIXME
-	 * This may need to be done per interface in nl80211/cfg80211/mac80211.
-	 */
-	for_each_context(priv, ctx) {
-		ctx->qos_data.def_qos_parm.ac[q].cw_min =
-			cpu_to_le16(params->cw_min);
-		ctx->qos_data.def_qos_parm.ac[q].cw_max =
-			cpu_to_le16(params->cw_max);
-		ctx->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
-		ctx->qos_data.def_qos_parm.ac[q].edca_txop =
-				cpu_to_le16((params->txop * 32));
-
-		ctx->qos_data.def_qos_parm.ac[q].reserved1 = 0;
-	}
-
-	spin_unlock_irqrestore(&priv->shrd->lock, flags);
-
-	IWL_DEBUG_MAC80211(priv, "leave\n");
-	return 0;
-}
-
-int iwlagn_mac_tx_last_beacon(struct ieee80211_hw *hw)
-{
-	struct iwl_priv *priv = hw->priv;
-
-	return priv->ibss_manager == IWL_IBSS_MANAGER;
-}
-
-static int iwl_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
-	iwl_connection_init_rx_config(priv, ctx);
-
-	iwlagn_set_rxon_chain(priv, ctx);
-
-	return iwlagn_commit_rxon(priv, ctx);
-}
-
-static int iwl_setup_interface(struct iwl_priv *priv,
-			       struct iwl_rxon_context *ctx)
-{
-	struct ieee80211_vif *vif = ctx->vif;
-	int err;
-
-	lockdep_assert_held(&priv->shrd->mutex);
-
-	/*
-	 * This variable will be correct only when there's just
-	 * a single context, but all code using it is for hardware
-	 * that supports only one context.
-	 */
-	priv->iw_mode = vif->type;
-
-	ctx->is_active = true;
-
-	err = iwl_set_mode(priv, ctx);
-	if (err) {
-		if (!ctx->always_active)
-			ctx->is_active = false;
-		return err;
-	}
-
-	if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist &&
-	    vif->type == NL80211_IFTYPE_ADHOC) {
-		/*
-		 * pretend to have high BT traffic as long as we
-		 * are operating in IBSS mode, as this will cause
-		 * the rate scaling etc. to behave as intended.
-		 */
-		priv->bt_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_HIGH;
-	}
-
-	return 0;
-}
-
-int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
-			     struct ieee80211_vif *vif)
-{
-	struct iwl_priv *priv = hw->priv;
-	struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
-	struct iwl_rxon_context *tmp, *ctx = NULL;
-	int err;
-	enum nl80211_iftype viftype = ieee80211_vif_type_p2p(vif);
-
-	IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
-			   viftype, vif->addr);
-
-	cancel_delayed_work_sync(&priv->hw_roc_disable_work);
-
-	mutex_lock(&priv->shrd->mutex);
-
-	iwlagn_disable_roc(priv);
-
-	if (!iwl_is_ready_rf(priv->shrd)) {
-		IWL_WARN(priv, "Try to add interface when device not ready\n");
-		err = -EINVAL;
-		goto out;
-	}
-
-	for_each_context(priv, tmp) {
-		u32 possible_modes =
-			tmp->interface_modes | tmp->exclusive_interface_modes;
-
-		if (tmp->vif) {
-			/* check if this busy context is exclusive */
-			if (tmp->exclusive_interface_modes &
-						BIT(tmp->vif->type)) {
-				err = -EINVAL;
-				goto out;
-			}
-			continue;
-		}
-
-		if (!(possible_modes & BIT(viftype)))
-			continue;
-
-		/* have maybe usable context w/o interface */
-		ctx = tmp;
-		break;
-	}
-
-	if (!ctx) {
-		err = -EOPNOTSUPP;
-		goto out;
-	}
-
-	vif_priv->ctx = ctx;
-	ctx->vif = vif;
-
-	err = iwl_setup_interface(priv, ctx);
-	if (!err)
-		goto out;
-
-	ctx->vif = NULL;
-	priv->iw_mode = NL80211_IFTYPE_STATION;
- out:
-	mutex_unlock(&priv->shrd->mutex);
-
-	IWL_DEBUG_MAC80211(priv, "leave\n");
-	return err;
-}
-
-static void iwl_teardown_interface(struct iwl_priv *priv,
-				   struct ieee80211_vif *vif,
-				   bool mode_change)
-{
-	struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
-
-	lockdep_assert_held(&priv->shrd->mutex);
-
-	if (priv->scan_vif == vif) {
-		iwl_scan_cancel_timeout(priv, 200);
-		iwl_force_scan_end(priv);
-	}
-
-	if (!mode_change) {
-		iwl_set_mode(priv, ctx);
-		if (!ctx->always_active)
-			ctx->is_active = false;
-	}
-
-	/*
-	 * When removing the IBSS interface, overwrite the
-	 * BT traffic load with the stored one from the last
-	 * notification, if any. If this is a device that
-	 * doesn't implement this, this has no effect since
-	 * both values are the same and zero.
-	 */
-	if (vif->type == NL80211_IFTYPE_ADHOC)
-		priv->bt_traffic_load = priv->last_bt_traffic_load;
-}
-
-void iwlagn_mac_remove_interface(struct ieee80211_hw *hw,
-			      struct ieee80211_vif *vif)
-{
-	struct iwl_priv *priv = hw->priv;
-	struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
-
-	IWL_DEBUG_MAC80211(priv, "enter\n");
-
-	mutex_lock(&priv->shrd->mutex);
-
-	if (WARN_ON(ctx->vif != vif)) {
-		struct iwl_rxon_context *tmp;
-		IWL_ERR(priv, "ctx->vif = %p, vif = %p\n", ctx->vif, vif);
-		for_each_context(priv, tmp)
-			IWL_ERR(priv, "\tID = %d:\tctx = %p\tctx->vif = %p\n",
-				tmp->ctxid, tmp, tmp->vif);
-	}
-	ctx->vif = NULL;
-
-	iwl_teardown_interface(priv, vif, false);
-
-	mutex_unlock(&priv->shrd->mutex);
-
-	IWL_DEBUG_MAC80211(priv, "leave\n");
-
-}
 
 #ifdef CONFIG_IWLWIFI_DEBUGFS
 
@@ -1649,97 +1411,13 @@
 	return 0;
 }
 
-int iwlagn_mac_change_interface(struct ieee80211_hw *hw,
-				struct ieee80211_vif *vif,
-				enum nl80211_iftype newtype, bool newp2p)
-{
-	struct iwl_priv *priv = hw->priv;
-	struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
-	struct iwl_rxon_context *bss_ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-	struct iwl_rxon_context *tmp;
-	enum nl80211_iftype newviftype = newtype;
-	u32 interface_modes;
-	int err;
-
-	IWL_DEBUG_MAC80211(priv, "enter\n");
-
-	newtype = ieee80211_iftype_p2p(newtype, newp2p);
-
-	mutex_lock(&priv->shrd->mutex);
-
-	if (!ctx->vif || !iwl_is_ready_rf(priv->shrd)) {
-		/*
-		 * Huh? But wait ... this can maybe happen when
-		 * we're in the middle of a firmware restart!
-		 */
-		err = -EBUSY;
-		goto out;
-	}
-
-	interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes;
-
-	if (!(interface_modes & BIT(newtype))) {
-		err = -EBUSY;
-		goto out;
-	}
-
-	/*
-	 * Refuse a change that should be done by moving from the PAN
-	 * context to the BSS context instead, if the BSS context is
-	 * available and can support the new interface type.
-	 */
-	if (ctx->ctxid == IWL_RXON_CTX_PAN && !bss_ctx->vif &&
-	    (bss_ctx->interface_modes & BIT(newtype) ||
-	     bss_ctx->exclusive_interface_modes & BIT(newtype))) {
-		BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
-		err = -EBUSY;
-		goto out;
-	}
-
-	if (ctx->exclusive_interface_modes & BIT(newtype)) {
-		for_each_context(priv, tmp) {
-			if (ctx == tmp)
-				continue;
-
-			if (!tmp->vif)
-				continue;
-
-			/*
-			 * The current mode switch would be exclusive, but
-			 * another context is active ... refuse the switch.
-			 */
-			err = -EBUSY;
-			goto out;
-		}
-	}
-
-	/* success */
-	iwl_teardown_interface(priv, vif, true);
-	vif->type = newviftype;
-	vif->p2p = newp2p;
-	err = iwl_setup_interface(priv, ctx);
-	WARN_ON(err);
-	/*
-	 * We've switched internally, but submitting to the
-	 * device may have failed for some reason. Mask this
-	 * error, because otherwise mac80211 will not switch
-	 * (and set the interface type back) and we'll be
-	 * out of sync with it.
-	 */
-	err = 0;
-
- out:
-	mutex_unlock(&priv->shrd->mutex);
-	IWL_DEBUG_MAC80211(priv, "leave\n");
-
-	return err;
-}
 
 int iwl_cmd_echo_test(struct iwl_priv *priv)
 {
 	int ret;
 	struct iwl_host_cmd cmd = {
 		.id = REPLY_ECHO,
+		.len = { 0 },
 		.flags = CMD_SYNC,
 	};
 
@@ -1783,7 +1461,7 @@
 	if (iwl_is_rfkill(priv->shrd))
 		return;
 
-	timeout = priv->cfg->base_params->wd_timeout;
+	timeout = cfg(priv)->base_params->wd_timeout;
 	if (timeout == 0)
 		return;
 
@@ -1808,11 +1486,11 @@
 
 void iwl_setup_watchdog(struct iwl_priv *priv)
 {
-	unsigned int timeout = priv->cfg->base_params->wd_timeout;
+	unsigned int timeout = cfg(priv)->base_params->wd_timeout;
 
 	if (!iwlagn_mod_params.wd_disable) {
 		/* use system default */
-		if (timeout && !priv->cfg->base_params->wd_disable)
+		if (timeout && !cfg(priv)->base_params->wd_disable)
 			mod_timer(&priv->watchdog,
 				jiffies +
 				msecs_to_jiffies(IWL_WD_TICK(timeout)));
@@ -1902,34 +1580,6 @@
 	return cpu_to_le32(res);
 }
 
-void iwl_start_tx_ba_trans_ready(struct iwl_priv *priv,
-				 enum iwl_rxon_context_id ctx,
-				 u8 sta_id, u8 tid)
-{
-	struct ieee80211_vif *vif;
-	u8 *addr = priv->stations[sta_id].sta.sta.addr;
-
-	if (ctx == NUM_IWL_RXON_CTX)
-		ctx = priv->stations[sta_id].ctxid;
-	vif = priv->contexts[ctx].vif;
-
-	ieee80211_start_tx_ba_cb_irqsafe(vif, addr, tid);
-}
-
-void iwl_stop_tx_ba_trans_ready(struct iwl_priv *priv,
-				enum iwl_rxon_context_id ctx,
-				u8 sta_id, u8 tid)
-{
-	struct ieee80211_vif *vif;
-	u8 *addr = priv->stations[sta_id].sta.sta.addr;
-
-	if (ctx == NUM_IWL_RXON_CTX)
-		ctx = priv->stations[sta_id].ctxid;
-	vif = priv->contexts[ctx].vif;
-
-	ieee80211_stop_tx_ba_cb_irqsafe(vif, addr, tid);
-}
-
 void iwl_set_hw_rfkill_state(struct iwl_priv *priv, bool state)
 {
 	wiphy_rfkill_set_hw_state(priv->hw->wiphy, state);
@@ -1937,8 +1587,7 @@
 
 void iwl_nic_config(struct iwl_priv *priv)
 {
-	priv->cfg->lib->nic_config(priv);
-
+	cfg(priv)->lib->nic_config(priv);
 }
 
 void iwl_free_skb(struct iwl_priv *priv, struct sk_buff *skb)
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index f2fc288..7bf76ab 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -142,8 +142,6 @@
  * @bt_init_traffic_load: specify initial bt traffic load
  * @bt_prio_boost: default bt priority boost value
  * @agg_time_limit: maximum number of uSec in aggregation
- * @ampdu_factor: Maximum A-MPDU length factor
- * @ampdu_density: Minimum A-MPDU spacing
  * @bt_sco_disable: uCode should not response to BT in SCO/ESCO mode
  */
 struct iwl_bt_params {
@@ -151,8 +149,6 @@
 	u8 bt_init_traffic_load;
 	u8 bt_prio_boost;
 	u16 agg_time_limit;
-	u8 ampdu_factor;
-	u8 ampdu_density;
 	bool bt_sco_disable;
 	bool bt_session_2;
 };
@@ -165,84 +161,10 @@
 	enum ieee80211_smps_mode smps_mode;
 };
 
-/**
- * struct iwl_cfg
- * @name: Offical name of the device
- * @fw_name_pre: Firmware filename prefix. The api version and extension
- *	(.ucode) will be added to filename before loading from disk. The
- *	filename is constructed as fw_name_pre<api>.ucode.
- * @ucode_api_max: Highest version of uCode API supported by driver.
- * @ucode_api_ok: oldest version of the uCode API that is OK to load
- *	without a warning, for use in transitions
- * @ucode_api_min: Lowest version of uCode API supported by driver.
- * @valid_tx_ant: valid transmit antenna
- * @valid_rx_ant: valid receive antenna
- * @sku: sku information from EEPROM
- * @eeprom_ver: EEPROM version
- * @eeprom_calib_ver: EEPROM calibration version
- * @lib: pointer to the lib ops
- * @additional_nic_config: additional nic configuration
- * @base_params: pointer to basic parameters
- * @ht_params: point to ht patameters
- * @bt_params: pointer to bt parameters
- * @pa_type: used by 6000 series only to identify the type of Power Amplifier
- * @need_dc_calib: need to perform init dc calibration
- * @need_temp_offset_calib: need to perform temperature offset calibration
- * @scan_antennas: available antenna for scan operation
- * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
- * @adv_pm: advance power management
- * @rx_with_siso_diversity: 1x1 device with rx antenna diversity
- * @internal_wimax_coex: internal wifi/wimax combo device
- * @iq_invert: I/Q inversion
- * @temp_offset_v2: support v2 of temperature offset calibration
- *
- * We enable the driver to be backward compatible wrt API version. The
- * driver specifies which APIs it supports (with @ucode_api_max being the
- * highest and @ucode_api_min the lowest). Firmware will only be loaded if
- * it has a supported API version.
- *
- * The ideal usage of this infrastructure is to treat a new ucode API
- * release as a new hardware revision.
- */
-struct iwl_cfg {
-	/* params specific to an individual device within a device family */
-	const char *name;
-	const char *fw_name_pre;
-	const unsigned int ucode_api_max;
-	const unsigned int ucode_api_ok;
-	const unsigned int ucode_api_min;
-	u8   valid_tx_ant;
-	u8   valid_rx_ant;
-	u16  sku;
-	u16  eeprom_ver;
-	u16  eeprom_calib_ver;
-	const struct iwl_lib_ops *lib;
-	void (*additional_nic_config)(struct iwl_priv *priv);
-	/* params not likely to change within a device family */
-	struct iwl_base_params *base_params;
-	/* params likely to change within a device family */
-	struct iwl_ht_params *ht_params;
-	struct iwl_bt_params *bt_params;
-	enum iwl_pa_type pa_type;	  /* if used set to IWL_PA_SYSTEM */
-	const bool need_dc_calib;	  /* if used set to true */
-	const bool need_temp_offset_calib; /* if used set to true */
-	u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
-	enum iwl_led_mode led_mode;
-	const bool adv_pm;
-	const bool rx_with_siso_diversity;
-	const bool internal_wimax_coex;
-	const bool iq_invert;
-	const bool temp_offset_v2;
-};
-
 /***************************
  *   L i b                 *
  ***************************/
 
-int iwlagn_mac_conf_tx(struct ieee80211_hw *hw,
-		    struct ieee80211_vif *vif, u16 queue,
-		    const struct ieee80211_tx_queue_params *params);
-int iwlagn_mac_tx_last_beacon(struct ieee80211_hw *hw);
 void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
 			   int hw_decrypt);
 int iwl_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
@@ -262,13 +184,6 @@
 void iwl_connection_init_rx_config(struct iwl_priv *priv,
 				   struct iwl_rxon_context *ctx);
 void iwl_set_rate(struct iwl_priv *priv);
-int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
-			  struct ieee80211_vif *vif);
-void iwlagn_mac_remove_interface(struct ieee80211_hw *hw,
-			      struct ieee80211_vif *vif);
-int iwlagn_mac_change_interface(struct ieee80211_hw *hw,
-			     struct ieee80211_vif *vif,
-			     enum nl80211_iftype newtype, bool newp2p);
 int iwl_cmd_echo_test(struct iwl_priv *priv);
 #ifdef CONFIG_IWLWIFI_DEBUGFS
 int iwl_alloc_traffic_mem(struct iwl_priv *priv);
@@ -325,9 +240,6 @@
 int iwl_scan_cancel(struct iwl_priv *priv);
 void iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
 void iwl_force_scan_end(struct iwl_priv *priv);
-int iwlagn_mac_hw_scan(struct ieee80211_hw *hw,
-		    struct ieee80211_vif *vif,
-		    struct cfg80211_scan_request *req);
 void iwl_internal_short_hw_scan(struct iwl_priv *priv);
 int iwl_force_reset(struct iwl_priv *priv, int mode, bool external);
 u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
@@ -381,8 +293,8 @@
 
 static inline bool iwl_advanced_bt_coexist(struct iwl_priv *priv)
 {
-	return priv->cfg->bt_params &&
-	       priv->cfg->bt_params->advanced_bt_coexist;
+	return cfg(priv)->bt_params &&
+	       cfg(priv)->bt_params->advanced_bt_coexist;
 }
 
 static inline void iwl_enable_rfkill_int(struct iwl_priv *priv)
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index b9f3267..fbc3095 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -284,8 +284,8 @@
 #define CSR_HW_REV_TYPE_6x35	       CSR_HW_REV_TYPE_6x05
 #define CSR_HW_REV_TYPE_2x30	       (0x00000C0)
 #define CSR_HW_REV_TYPE_2x00	       (0x0000100)
-#define CSR_HW_REV_TYPE_200	       (0x0000110)
-#define CSR_HW_REV_TYPE_230	       (0x0000120)
+#define CSR_HW_REV_TYPE_105	       (0x0000110)
+#define CSR_HW_REV_TYPE_135	       (0x0000120)
 #define CSR_HW_REV_TYPE_NONE           (0x00001F0)
 
 /* EEPROM REG */
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index 69a77e2..f8fc239 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -47,20 +47,21 @@
 } while (0)
 
 #ifdef CONFIG_IWLWIFI_DEBUG
-#define IWL_DEBUG(m, level, fmt, args...)				\
+#define IWL_DEBUG(m, level, fmt, ...)					\
 do {									\
 	if (iwl_get_debug_level((m)->shrd) & (level))			\
-		dev_printk(KERN_ERR, bus(m)->dev,			\
-			 "%c %s " fmt, in_interrupt() ? 'I' : 'U',	\
-			__func__ , ## args);				\
+		dev_err(bus(m)->dev, "%c %s " fmt,			\
+			in_interrupt() ? 'I' : 'U', __func__,		\
+			##__VA_ARGS__);					\
 } while (0)
 
-#define IWL_DEBUG_LIMIT(m, level, fmt, args...)				\
+#define IWL_DEBUG_LIMIT(m, level, fmt, ...)				\
 do {									\
-	if (iwl_get_debug_level((m)->shrd) & (level) && net_ratelimit())\
-		dev_printk(KERN_ERR, bus(m)->dev,			\
-			"%c %s " fmt, in_interrupt() ? 'I' : 'U',	\
-			 __func__ , ## args);				\
+	if (iwl_get_debug_level((m)->shrd) & (level) &&			\
+	    net_ratelimit())						\
+		dev_err(bus(m)->dev, "%c %s " fmt,			\
+			in_interrupt() ? 'I' : 'U', __func__,		\
+			##__VA_ARGS__);					\
 } while (0)
 
 #define iwl_print_hex_dump(m, level, p, len)				\
@@ -70,10 +71,29 @@
 			       DUMP_PREFIX_OFFSET, 16, 1, p, len, 1);	\
 } while (0)
 
+#define IWL_DEBUG_QUIET_RFKILL(p, fmt, ...)				\
+do {									\
+	if (!iwl_is_rfkill(p->shrd))					\
+		dev_err(bus(p)->dev, "%s%c %s " fmt,			\
+			"",						\
+			in_interrupt() ? 'I' : 'U', __func__,		\
+			##__VA_ARGS__);					\
+	else if	(iwl_get_debug_level(p->shrd) & IWL_DL_RADIO)		\
+		dev_err(bus(p)->dev, "%s%c %s " fmt,			\
+			"(RFKILL) ",					\
+			in_interrupt() ? 'I' : 'U', __func__,		\
+			##__VA_ARGS__);					\
+} while (0)
+
 #else
 #define IWL_DEBUG(m, level, fmt, args...)
 #define IWL_DEBUG_LIMIT(m, level, fmt, args...)
 #define iwl_print_hex_dump(m, level, p, len)
+#define IWL_DEBUG_QUIET_RFKILL(p, fmt, args...)	\
+do {							\
+	if (!iwl_is_rfkill(p->shrd))			\
+		IWL_ERR(p, fmt, ##args);		\
+} while (0)
 #endif				/* CONFIG_IWLWIFI_DEBUG */
 
 #ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -114,48 +134,43 @@
  */
 
 /* 0x0000000F - 0x00000001 */
-#define IWL_DL_INFO		(1 << 0)
-#define IWL_DL_MAC80211		(1 << 1)
-#define IWL_DL_HCMD		(1 << 2)
-#define IWL_DL_STATE		(1 << 3)
+#define IWL_DL_INFO		0x00000001
+#define IWL_DL_MAC80211		0x00000002
+#define IWL_DL_HCMD		0x00000004
+#define IWL_DL_STATE		0x00000008
 /* 0x000000F0 - 0x00000010 */
-#define IWL_DL_MACDUMP		(1 << 4)
-#define IWL_DL_HCMD_DUMP	(1 << 5)
-#define IWL_DL_EEPROM		(1 << 6)
-#define IWL_DL_RADIO		(1 << 7)
+#define IWL_DL_EEPROM		0x00000040
+#define IWL_DL_RADIO		0x00000080
 /* 0x00000F00 - 0x00000100 */
-#define IWL_DL_POWER		(1 << 8)
-#define IWL_DL_TEMP		(1 << 9)
-/* reserved (1 << 10) */
-#define IWL_DL_SCAN		(1 << 11)
+#define IWL_DL_POWER		0x00000100
+#define IWL_DL_TEMP		0x00000200
+#define IWL_DL_SCAN		0x00000800
 /* 0x0000F000 - 0x00001000 */
-#define IWL_DL_ASSOC		(1 << 12)
-#define IWL_DL_DROP		(1 << 13)
-/* reserved (1 << 14) */
-#define IWL_DL_COEX		(1 << 15)
+#define IWL_DL_ASSOC		0x00001000
+#define IWL_DL_DROP		0x00002000
+#define IWL_DL_COEX		0x00008000
 /* 0x000F0000 - 0x00010000 */
-#define IWL_DL_FW		(1 << 16)
-#define IWL_DL_RF_KILL		(1 << 17)
-#define IWL_DL_FW_ERRORS	(1 << 18)
-#define IWL_DL_LED		(1 << 19)
+#define IWL_DL_FW		0x00010000
+#define IWL_DL_RF_KILL		0x00020000
+#define IWL_DL_FW_ERRORS	0x00040000
+#define IWL_DL_LED		0x00080000
 /* 0x00F00000 - 0x00100000 */
-#define IWL_DL_RATE		(1 << 20)
-#define IWL_DL_CALIB		(1 << 21)
-#define IWL_DL_WEP		(1 << 22)
-#define IWL_DL_TX		(1 << 23)
+#define IWL_DL_RATE		0x00100000
+#define IWL_DL_CALIB		0x00200000
+#define IWL_DL_WEP		0x00400000
+#define IWL_DL_TX		0x00800000
 /* 0x0F000000 - 0x01000000 */
-#define IWL_DL_RX		(1 << 24)
-#define IWL_DL_ISR		(1 << 25)
-#define IWL_DL_HT		(1 << 26)
+#define IWL_DL_RX		0x01000000
+#define IWL_DL_ISR		0x02000000
+#define IWL_DL_HT		0x04000000
 /* 0xF0000000 - 0x10000000 */
-#define IWL_DL_11H		(1 << 28)
-#define IWL_DL_STATS		(1 << 29)
-#define IWL_DL_TX_REPLY		(1 << 30)
-#define IWL_DL_QOS		(1 << 31)
+#define IWL_DL_11H		0x10000000
+#define IWL_DL_STATS		0x20000000
+#define IWL_DL_TX_REPLY		0x40000000
+#define IWL_DL_TX_QUEUES	0x80000000
 
 #define IWL_DEBUG_INFO(p, f, a...)	IWL_DEBUG(p, IWL_DL_INFO, f, ## a)
 #define IWL_DEBUG_MAC80211(p, f, a...)	IWL_DEBUG(p, IWL_DL_MAC80211, f, ## a)
-#define IWL_DEBUG_MACDUMP(p, f, a...)	IWL_DEBUG(p, IWL_DL_MACDUMP, f, ## a)
 #define IWL_DEBUG_TEMP(p, f, a...)	IWL_DEBUG(p, IWL_DL_TEMP, f, ## a)
 #define IWL_DEBUG_SCAN(p, f, a...)	IWL_DEBUG(p, IWL_DL_SCAN, f, ## a)
 #define IWL_DEBUG_RX(p, f, a...)	IWL_DEBUG(p, IWL_DL_RX, f, ## a)
@@ -164,7 +179,6 @@
 #define IWL_DEBUG_LED(p, f, a...)	IWL_DEBUG(p, IWL_DL_LED, f, ## a)
 #define IWL_DEBUG_WEP(p, f, a...)	IWL_DEBUG(p, IWL_DL_WEP, f, ## a)
 #define IWL_DEBUG_HC(p, f, a...)	IWL_DEBUG(p, IWL_DL_HCMD, f, ## a)
-#define IWL_DEBUG_HC_DUMP(p, f, a...)	IWL_DEBUG(p, IWL_DL_HCMD_DUMP, f, ## a)
 #define IWL_DEBUG_EEPROM(p, f, a...)	IWL_DEBUG(p, IWL_DL_EEPROM, f, ## a)
 #define IWL_DEBUG_CALIB(p, f, a...)	IWL_DEBUG(p, IWL_DL_CALIB, f, ## a)
 #define IWL_DEBUG_FW(p, f, a...)	IWL_DEBUG(p, IWL_DL_FW, f, ## a)
@@ -186,9 +200,7 @@
 #define IWL_DEBUG_STATS_LIMIT(p, f, a...)	\
 		IWL_DEBUG_LIMIT(p, IWL_DL_STATS, f, ## a)
 #define IWL_DEBUG_TX_REPLY(p, f, a...)	IWL_DEBUG(p, IWL_DL_TX_REPLY, f, ## a)
-#define IWL_DEBUG_TX_REPLY_LIMIT(p, f, a...) \
-		IWL_DEBUG_LIMIT(p, IWL_DL_TX_REPLY, f, ## a)
-#define IWL_DEBUG_QOS(p, f, a...)	IWL_DEBUG(p, IWL_DL_QOS, f, ## a)
+#define IWL_DEBUG_TX_QUEUES(p, f, a...)	IWL_DEBUG(p, IWL_DL_TX_QUEUES, f, ## a)
 #define IWL_DEBUG_RADIO(p, f, a...)	IWL_DEBUG(p, IWL_DL_RADIO, f, ## a)
 #define IWL_DEBUG_POWER(p, f, a...)	IWL_DEBUG(p, IWL_DL_POWER, f, ## a)
 #define IWL_DEBUG_11H(p, f, a...)	IWL_DEBUG(p, IWL_DL_11H, f, ## a)
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index a1670e3..04a3343 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -234,11 +234,12 @@
 
 	/* default is to dump the entire data segment */
 	if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) {
+		struct iwl_trans *trans = trans(priv);
 		priv->dbgfs_sram_offset = 0x800000;
-		if (priv->ucode_type == IWL_UCODE_INIT)
-			priv->dbgfs_sram_len = priv->ucode_init.data.len;
+		if (trans->shrd->ucode_type == IWL_UCODE_INIT)
+			priv->dbgfs_sram_len = trans->ucode_init.data.len;
 		else
-			priv->dbgfs_sram_len = priv->ucode_rt.data.len;
+			priv->dbgfs_sram_len = trans->ucode_rt.data.len;
 	}
 	len = priv->dbgfs_sram_len;
 
@@ -341,7 +342,7 @@
 
 	return simple_read_from_buffer(user_buf, count, ppos,
 				       priv->wowlan_sram,
-				       priv->ucode_wowlan.data.len);
+				       trans(priv)->ucode_wowlan.data.len);
 }
 static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
 					size_t count, loff_t *ppos)
@@ -371,15 +372,13 @@
 				 i, station->sta.sta.addr,
 				 station->sta.station_flags_msk);
 		pos += scnprintf(buf + pos, bufsz - pos,
-				"TID\tseq_num\ttxq_id\ttfds\trate_n_flags\n");
+				"TID\tseq_num\trate_n_flags\n");
 
 		for (j = 0; j < IWL_MAX_TID_COUNT; j++) {
-			tid_data = &priv->shrd->tid_data[i][j];
+			tid_data = &priv->tid_data[i][j];
 			pos += scnprintf(buf + pos, bufsz - pos,
-				"%d:\t%#x\t%#x\t%u\t%#x",
+				"%d:\t%#x\t%#x",
 				j, tid_data->seq_number,
-				tid_data->agg.txq_id,
-				tid_data->tfds_in_queue,
 				tid_data->agg.rate_n_flags);
 
 			if (tid_data->agg.wait_for_ba)
@@ -407,7 +406,7 @@
 	const u8 *ptr;
 	char *buf;
 	u16 eeprom_ver;
-	size_t eeprom_len = priv->cfg->base_params->eeprom_size;
+	size_t eeprom_len = cfg(priv)->base_params->eeprom_size;
 	buf_size = 4 * eeprom_len + 256;
 
 	if (eeprom_len % 16) {
@@ -415,7 +414,7 @@
 		return -ENODATA;
 	}
 
-	ptr = priv->eeprom;
+	ptr = priv->shrd->eeprom;
 	if (!ptr) {
 		IWL_ERR(priv, "Invalid EEPROM/OTP memory\n");
 		return -ENOMEM;
@@ -427,10 +426,10 @@
 		IWL_ERR(priv, "Can not allocate Buffer\n");
 		return -ENOMEM;
 	}
-	eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION);
+	eeprom_ver = iwl_eeprom_query16(priv->shrd, EEPROM_VERSION);
 	pos += scnprintf(buf + pos, buf_size - pos, "NVM Type: %s, "
 			"version: 0x%x\n",
-			(priv->nvm_device_type == NVM_DEVICE_TYPE_OTP)
+			(trans(priv)->nvm_device_type == NVM_DEVICE_TYPE_OTP)
 			 ? "OTP" : "EEPROM", eeprom_ver);
 	for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) {
 		pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
@@ -1541,15 +1540,15 @@
 	if (tx->tx_power.ant_a || tx->tx_power.ant_b || tx->tx_power.ant_c) {
 		pos += scnprintf(buf + pos, bufsz - pos,
 			"tx power: (1/2 dB step)\n");
-		if ((priv->cfg->valid_tx_ant & ANT_A) && tx->tx_power.ant_a)
+		if ((cfg(priv)->valid_tx_ant & ANT_A) && tx->tx_power.ant_a)
 			pos += scnprintf(buf + pos, bufsz - pos,
 					fmt_hex, "antenna A:",
 					tx->tx_power.ant_a);
-		if ((priv->cfg->valid_tx_ant & ANT_B) && tx->tx_power.ant_b)
+		if ((cfg(priv)->valid_tx_ant & ANT_B) && tx->tx_power.ant_b)
 			pos += scnprintf(buf + pos, bufsz - pos,
 					fmt_hex, "antenna B:",
 					tx->tx_power.ant_b);
-		if ((priv->cfg->valid_tx_ant & ANT_C) && tx->tx_power.ant_c)
+		if ((cfg(priv)->valid_tx_ant & ANT_C) && tx->tx_power.ant_c)
 			pos += scnprintf(buf + pos, bufsz - pos,
 					fmt_hex, "antenna C:",
 					tx->tx_power.ant_c);
@@ -2220,7 +2219,7 @@
 	const size_t bufsz = sizeof(buf);
 
 	pos += scnprintf(buf + pos, bufsz - pos, "%u\n",
-			priv->cfg->base_params->plcp_delta_threshold);
+			cfg(priv)->base_params->plcp_delta_threshold);
 
 	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 }
@@ -2242,10 +2241,10 @@
 		return -EINVAL;
 	if ((plcp < IWL_MAX_PLCP_ERR_THRESHOLD_MIN) ||
 		(plcp > IWL_MAX_PLCP_ERR_THRESHOLD_MAX))
-		priv->cfg->base_params->plcp_delta_threshold =
+		cfg(priv)->base_params->plcp_delta_threshold =
 			IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE;
 	else
-		priv->cfg->base_params->plcp_delta_threshold = plcp;
+		cfg(priv)->base_params->plcp_delta_threshold = plcp;
 	return count;
 }
 
@@ -2347,7 +2346,7 @@
 	if (timeout < 0 || timeout > IWL_MAX_WD_TIMEOUT)
 		timeout = IWL_DEF_WD_TIMEOUT;
 
-	priv->cfg->base_params->wd_timeout = timeout;
+	cfg(priv)->base_params->wd_timeout = timeout;
 	iwl_setup_watchdog(priv);
 	return count;
 }
@@ -2407,10 +2406,10 @@
 	char buf[40];
 	const size_t bufsz = sizeof(buf);
 
-	if (priv->cfg->ht_params)
+	if (cfg(priv)->ht_params)
 		pos += scnprintf(buf + pos, bufsz - pos,
 			 "use %s for aggregation\n",
-			 (priv->cfg->ht_params->use_rts_for_aggregation) ?
+			 (cfg(priv)->ht_params->use_rts_for_aggregation) ?
 				"rts/cts" : "cts-to-self");
 	else
 		pos += scnprintf(buf + pos, bufsz - pos, "N/A");
@@ -2427,7 +2426,7 @@
 	int buf_size;
 	int rts;
 
-	if (!priv->cfg->ht_params)
+	if (!cfg(priv)->ht_params)
 		return -EINVAL;
 
 	memset(buf, 0, sizeof(buf));
@@ -2437,9 +2436,9 @@
 	if (sscanf(buf, "%d", &rts) != 1)
 		return -EINVAL;
 	if (rts)
-		priv->cfg->ht_params->use_rts_for_aggregation = true;
+		cfg(priv)->ht_params->use_rts_for_aggregation = true;
 	else
-		priv->cfg->ht_params->use_rts_for_aggregation = false;
+		cfg(priv)->ht_params->use_rts_for_aggregation = false;
 	return count;
 }
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 6c00a44..e54a4d1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -60,11 +60,10 @@
 
 /* Default noise level to report when noise measurement is not available.
  *   This may be because we're:
- *   1)  Not associated (4965, no beacon statistics being sent to driver)
+ *   1)  Not associated  no beacon statistics being sent to driver)
  *   2)  Scanning (noise measurement does not apply to associated channel)
- *   3)  Receiving CCK (3945 delivers noise info only for OFDM frames)
  * Use default noise value of -127 ... this is below the range of measurable
- *   Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user.
+ *   Rx dBm for all agn devices, so it can indicate "unmeasurable" to user.
  *   Also, -127 works better than 0 when averaging frames with/without
  *   noise info (e.g. averaging might be done in app); measured dBm values are
  *   always negative ... using a negative value as the default keeps all
@@ -190,6 +189,69 @@
 	struct iwl_qosparam_cmd def_qos_parm;
 };
 
+/**
+ * enum iwl_agg_state
+ *
+ * The state machine of the BA agreement establishment / tear down.
+ * These states relate to a specific RA / TID.
+ *
+ * @IWL_AGG_OFF: aggregation is not used
+ * @IWL_AGG_ON: aggregation session is up
+ * @IWL_EMPTYING_HW_QUEUE_ADDBA: establishing a BA session - waiting for the
+ *	HW queue to be empty from packets for this RA /TID.
+ * @IWL_EMPTYING_HW_QUEUE_DELBA: tearing down a BA session - waiting for the
+ *	HW queue to be empty from packets for this RA /TID.
+ */
+enum iwl_agg_state {
+	IWL_AGG_OFF = 0,
+	IWL_AGG_ON,
+	IWL_EMPTYING_HW_QUEUE_ADDBA,
+	IWL_EMPTYING_HW_QUEUE_DELBA,
+};
+
+/**
+ * struct iwl_ht_agg - aggregation state machine
+
+ * This structs holds the states for the BA agreement establishment and tear
+ * down. It also holds the state during the BA session itself. This struct is
+ * duplicated for each RA / TID.
+
+ * @rate_n_flags: Rate at which Tx was attempted. Holds the data between the
+ *	Tx response (REPLY_TX), and the block ack notification
+ *	(REPLY_COMPRESSED_BA).
+ * @state: state of the BA agreement establishment / tear down.
+ * @txq_id: Tx queue used by the BA session - used by the transport layer.
+ *	Needed by the upper layer for debugfs only.
+ * @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or
+ *	the first packet to be sent in legacy HW queue in Tx AGG stop flow.
+ *	Basically when next_reclaimed reaches ssn, we can tell mac80211 that
+ *	we are ready to finish the Tx AGG stop / start flow.
+ * @wait_for_ba: Expect block-ack before next Tx reply
+ */
+struct iwl_ht_agg {
+	u32 rate_n_flags;
+	enum iwl_agg_state state;
+	u16 txq_id;
+	u16 ssn;
+	bool wait_for_ba;
+};
+
+/**
+ * struct iwl_tid_data - one for each RA / TID
+
+ * This structs holds the states for each RA / TID.
+
+ * @seq_number: the next WiFi sequence number to use
+ * @next_reclaimed: the WiFi sequence number of the next packet to be acked.
+ *	This is basically (last acked packet++).
+ * @agg: aggregation state machine
+ */
+struct iwl_tid_data {
+	u16 seq_number;
+	u16 next_reclaimed;
+	struct iwl_ht_agg agg;
+};
+
 /*
  * Structure should be accessed with sta_lock held. When station addition
  * is in progress (IWL_STA_UCODE_INPROGRESS) it is possible to access only
@@ -230,17 +292,6 @@
 	u8 ibss_bssid_sta_id;
 };
 
-/* one for each uCode image (inst/data, boot/init/runtime) */
-struct fw_desc {
-	void *v_addr;		/* access by driver */
-	dma_addr_t p_addr;	/* access by card's busmaster DMA */
-	u32 len;		/* bytes */
-};
-
-struct fw_img {
-	struct fw_desc code, data;
-};
-
 /* v1/v2 uCode file layout */
 struct iwl_ucode_header {
 	__le32 ver;	/* major/minor/API/serial */
@@ -452,29 +503,6 @@
 	IWL_CHAIN_NOISE_DONE,
 };
 
-
-/*
- * enum iwl_calib
- * defines the order in which results of initial calibrations
- * should be sent to the runtime uCode
- */
-enum iwl_calib {
-	IWL_CALIB_XTAL,
-	IWL_CALIB_DC,
-	IWL_CALIB_LO,
-	IWL_CALIB_TX_IQ,
-	IWL_CALIB_TX_IQ_PERD,
-	IWL_CALIB_BASE_BAND,
-	IWL_CALIB_TEMP_OFFSET,
-	IWL_CALIB_MAX
-};
-
-/* Opaque calibration results */
-struct iwl_calib_result {
-	void *buf;
-	size_t buf_len;
-};
-
 /* Sensitivity calib data */
 struct iwl_sensitivity_data {
 	u32 auto_corr_ofdm;
@@ -547,16 +575,6 @@
 	IWL_OTP_ACCESS_RELATIVE,
 };
 
-/**
- * enum iwl_pa_type - Power Amplifier type
- * @IWL_PA_SYSTEM:  based on uCode configuration
- * @IWL_PA_INTERNAL: use Internal only
- */
-enum iwl_pa_type {
-	IWL_PA_SYSTEM = 0,
-	IWL_PA_INTERNAL = 1,
-};
-
 /* reply_tx_statistics (for _agn devices) */
 struct reply_tx_error_statistics {
 	u32 pp_delay;
@@ -714,35 +732,6 @@
  */
 #define IWLAGN_EXT_BEACON_TIME_POS	22
 
-/**
- * struct iwl_notification_wait - notification wait entry
- * @list: list head for global list
- * @fn: function called with the notification
- * @cmd: command ID
- *
- * This structure is not used directly, to wait for a
- * notification declare it on the stack, and call
- * iwlagn_init_notification_wait() with appropriate
- * parameters. Then do whatever will cause the ucode
- * to notify the driver, and to wait for that then
- * call iwlagn_wait_notification().
- *
- * Each notification is one-shot. If at some point we
- * need to support multi-shot notifications (which
- * can't be allocated on the stack) we need to modify
- * the code for them.
- */
-struct iwl_notification_wait {
-	struct list_head list;
-
-	void (*fn)(struct iwl_priv *priv, struct iwl_rx_packet *pkt,
-		   void *data);
-	void *fn_data;
-
-	u8 cmd;
-	bool triggered, aborted;
-};
-
 struct iwl_rxon_context {
 	struct ieee80211_vif *vif;
 
@@ -805,14 +794,7 @@
 	IWL_SCAN_ROC,
 };
 
-enum iwlagn_ucode_type {
-	IWL_UCODE_NONE,
-	IWL_UCODE_REGULAR,
-	IWL_UCODE_INIT,
-	IWL_UCODE_WOWLAN,
-};
-
-#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
+#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
 struct iwl_testmode_trace {
 	u32 buff_size;
 	u32 total_size;
@@ -822,8 +804,20 @@
 	dma_addr_t dma_addr;
 	bool trace_enabled;
 };
+struct iwl_testmode_sram {
+	u32 buff_size;
+	u32 num_chunks;
+	u8 *buff_addr;
+	bool sram_readed;
+};
 #endif
 
+struct iwl_wipan_noa_data {
+	struct rcu_head rcu_head;
+	u32 length;
+	u8 data[];
+};
+
 struct iwl_priv {
 
 	/*data shared among all the driver's layers */
@@ -835,7 +829,6 @@
 	struct ieee80211_channel *ieee_channels;
 	struct ieee80211_rate *ieee_rates;
 	struct kmem_cache *tx_cmd_pool;
-	struct iwl_cfg *cfg;
 
 	enum ieee80211_band band;
 
@@ -880,8 +873,7 @@
 	s32 temperature;	/* Celsius */
 	s32 last_temperature;
 
-	/* init calibration results */
-	struct iwl_calib_result calib_results[IWL_CALIB_MAX];
+	struct iwl_wipan_noa_data __rcu *noa_data;
 
 	/* Scan related variables */
 	unsigned long scan_start;
@@ -907,23 +899,12 @@
 	u32 ucode_ver;			/* version of ucode, copy of
 					   iwl_ucode.ver */
 
-	struct fw_img ucode_rt;
-	struct fw_img ucode_init;
-	struct fw_img ucode_wowlan;
-
-	enum iwlagn_ucode_type ucode_type;
-	u8 ucode_write_complete;	/* the image write is complete */
 	char firmware_name[25];
 
 	struct iwl_rxon_context contexts[NUM_IWL_RXON_CTX];
 
 	__le16 switch_channel;
 
-	struct {
-		u32 error_event_table;
-		u32 log_event_table;
-	} device_pointers;
-
 	u16 active_rate;
 
 	u8 start_calib;
@@ -951,17 +932,13 @@
 	int num_stations;
 	struct iwl_station_entry stations[IWLAGN_STATION_COUNT];
 	unsigned long ucode_key_table;
+	struct iwl_tid_data tid_data[IWLAGN_STATION_COUNT][IWL_MAX_TID_COUNT];
 
 	u8 mac80211_registered;
 
 	/* Indication if ieee80211_ops->open has been called */
 	u8 is_open;
 
-	/* eeprom -- this is in the card's little endian byte order */
-	u8 *eeprom;
-	int    nvm_device_type;
-	struct iwl_eeprom_calib_info *calib_info;
-
 	enum nl80211_iftype iw_mode;
 
 	/* Last Rx'd beacon timestamp */
@@ -1017,10 +994,6 @@
 	/* counts reply_tx error */
 	struct reply_tx_error_statistics reply_tx_stats;
 	struct reply_agg_tx_error_statistics reply_agg_tx_stats;
-	/* notification wait support */
-	struct list_head notif_waits;
-	spinlock_t notif_wait_lock;
-	wait_queue_head_t notif_waitq;
 
 	/* remain-on-channel offload support */
 	struct ieee80211_channel *hw_roc_channel;
@@ -1098,8 +1071,9 @@
 	struct led_classdev led;
 	unsigned long blink_on, blink_off;
 	bool led_registered;
-#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
+#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
 	struct iwl_testmode_trace testmode_trace;
+	struct iwl_testmode_sram testmode_sram;
 	u32 tm_fixed_rate;
 #endif
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.c b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
index a635a7e..2a2c8de 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.c
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
@@ -28,7 +28,7 @@
 
 /* sparse doesn't like tracepoint macros */
 #ifndef __CHECKER__
-#include "iwl-dev.h"
+#include "iwl-trans.h"
 
 #define CREATE_TRACE_POINTS
 #include "iwl-devtrace.h"
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
index 8a51c5c..9b212a8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
@@ -29,7 +29,6 @@
 
 #include <linux/tracepoint.h>
 
-struct iwl_priv;
 
 #if !defined(CONFIG_IWLWIFI_DEVICE_TRACING) || defined(__CHECKER__)
 #undef TRACE_EVENT
@@ -37,14 +36,14 @@
 static inline void trace_ ## name(proto) {}
 #endif
 
-#define PRIV_ENTRY	__field(struct iwl_priv *, priv)
+#define PRIV_ENTRY	__field(void *, priv)
 #define PRIV_ASSIGN	__entry->priv = priv
 
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM iwlwifi_io
 
 TRACE_EVENT(iwlwifi_dev_ioread32,
-	TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val),
+	TP_PROTO(void *priv, u32 offs, u32 val),
 	TP_ARGS(priv, offs, val),
 	TP_STRUCT__entry(
 		PRIV_ENTRY
@@ -60,7 +59,7 @@
 );
 
 TRACE_EVENT(iwlwifi_dev_iowrite8,
-	TP_PROTO(struct iwl_priv *priv, u32 offs, u8 val),
+	TP_PROTO(void *priv, u32 offs, u8 val),
 	TP_ARGS(priv, offs, val),
 	TP_STRUCT__entry(
 		PRIV_ENTRY
@@ -76,7 +75,7 @@
 );
 
 TRACE_EVENT(iwlwifi_dev_iowrite32,
-	TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val),
+	TP_PROTO(void *priv, u32 offs, u32 val),
 	TP_ARGS(priv, offs, val),
 	TP_STRUCT__entry(
 		PRIV_ENTRY
@@ -91,11 +90,40 @@
 	TP_printk("[%p] write io[%#x] = %#x)", __entry->priv, __entry->offs, __entry->val)
 );
 
+TRACE_EVENT(iwlwifi_dev_irq,
+	TP_PROTO(void *priv),
+	TP_ARGS(priv),
+	TP_STRUCT__entry(
+		PRIV_ENTRY
+	),
+	TP_fast_assign(
+		PRIV_ASSIGN;
+	),
+	/* TP_printk("") doesn't compile */
+	TP_printk("%d", 0)
+);
+
+TRACE_EVENT(iwlwifi_dev_ict_read,
+	TP_PROTO(void *priv, u32 index, u32 value),
+	TP_ARGS(priv, index, value),
+	TP_STRUCT__entry(
+		PRIV_ENTRY
+		__field(u32, index)
+		__field(u32, value)
+	),
+	TP_fast_assign(
+		PRIV_ASSIGN;
+		__entry->index = index;
+		__entry->value = value;
+	),
+	TP_printk("read ict[%d] = %#.8x", __entry->index, __entry->value)
+);
+
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM iwlwifi_ucode
 
 TRACE_EVENT(iwlwifi_dev_ucode_cont_event,
-	TP_PROTO(struct iwl_priv *priv, u32 time, u32 data, u32 ev),
+	TP_PROTO(void *priv, u32 time, u32 data, u32 ev),
 	TP_ARGS(priv, time, data, ev),
 	TP_STRUCT__entry(
 		PRIV_ENTRY
@@ -115,7 +143,7 @@
 );
 
 TRACE_EVENT(iwlwifi_dev_ucode_wrap_event,
-	TP_PROTO(struct iwl_priv *priv, u32 wraps, u32 n_entry, u32 p_entry),
+	TP_PROTO(void *priv, u32 wraps, u32 n_entry, u32 p_entry),
 	TP_ARGS(priv, wraps, n_entry, p_entry),
 	TP_STRUCT__entry(
 		PRIV_ENTRY
@@ -139,7 +167,7 @@
 #define TRACE_SYSTEM iwlwifi
 
 TRACE_EVENT(iwlwifi_dev_hcmd,
-	TP_PROTO(struct iwl_priv *priv, u32 flags,
+	TP_PROTO(void *priv, u32 flags,
 		 const void *hcmd0, size_t len0,
 		 const void *hcmd1, size_t len1,
 		 const void *hcmd2, size_t len2),
@@ -164,7 +192,7 @@
 );
 
 TRACE_EVENT(iwlwifi_dev_rx,
-	TP_PROTO(struct iwl_priv *priv, void *rxbuf, size_t len),
+	TP_PROTO(void *priv, void *rxbuf, size_t len),
 	TP_ARGS(priv, rxbuf, len),
 	TP_STRUCT__entry(
 		PRIV_ENTRY
@@ -179,7 +207,7 @@
 );
 
 TRACE_EVENT(iwlwifi_dev_tx,
-	TP_PROTO(struct iwl_priv *priv, void *tfd, size_t tfdlen,
+	TP_PROTO(void *priv, void *tfd, size_t tfdlen,
 		 void *buf0, size_t buf0_len,
 		 void *buf1, size_t buf1_len),
 	TP_ARGS(priv, tfd, tfdlen, buf0, buf0_len, buf1, buf1_len),
@@ -211,7 +239,7 @@
 );
 
 TRACE_EVENT(iwlwifi_dev_ucode_error,
-	TP_PROTO(struct iwl_priv *priv, u32 desc, u32 tsf_low,
+	TP_PROTO(void *priv, u32 desc, u32 tsf_low,
 		 u32 data1, u32 data2, u32 line, u32 blink1,
 		 u32 blink2, u32 ilink1, u32 ilink2, u32 bcon_time,
 		 u32 gp1, u32 gp2, u32 gp3, u32 ucode_ver, u32 hw_ver,
@@ -271,7 +299,7 @@
 );
 
 TRACE_EVENT(iwlwifi_dev_ucode_event,
-	TP_PROTO(struct iwl_priv *priv, u32 time, u32 data, u32 ev),
+	TP_PROTO(void *priv, u32 time, u32 data, u32 ev),
 	TP_ARGS(priv, time, data, ev),
 	TP_STRUCT__entry(
 		PRIV_ENTRY
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
index a4e43bd..c1eda97 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -149,23 +149,23 @@
  * EEPROM chip, not a single event, so even reads could conflict if they
  * weren't arbitrated by the semaphore.
  */
-static int iwl_eeprom_acquire_semaphore(struct iwl_priv *priv)
+static int iwl_eeprom_acquire_semaphore(struct iwl_bus *bus)
 {
 	u16 count;
 	int ret;
 
 	for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
 		/* Request semaphore */
-		iwl_set_bit(bus(priv), CSR_HW_IF_CONFIG_REG,
+		iwl_set_bit(bus, CSR_HW_IF_CONFIG_REG,
 			    CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
 
 		/* See if we got it */
-		ret = iwl_poll_bit(bus(priv), CSR_HW_IF_CONFIG_REG,
+		ret = iwl_poll_bit(bus, CSR_HW_IF_CONFIG_REG,
 				CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
 				CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
 				EEPROM_SEM_TIMEOUT);
 		if (ret >= 0) {
-			IWL_DEBUG_EEPROM(priv,
+			IWL_DEBUG_EEPROM(bus,
 				"Acquired semaphore after %d tries.\n",
 				count+1);
 			return ret;
@@ -175,39 +175,39 @@
 	return ret;
 }
 
-static void iwl_eeprom_release_semaphore(struct iwl_priv *priv)
+static void iwl_eeprom_release_semaphore(struct iwl_bus *bus)
 {
-	iwl_clear_bit(bus(priv), CSR_HW_IF_CONFIG_REG,
+	iwl_clear_bit(bus, CSR_HW_IF_CONFIG_REG,
 		CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
 
 }
 
-static int iwl_eeprom_verify_signature(struct iwl_priv *priv)
+static int iwl_eeprom_verify_signature(struct iwl_trans *trans)
 {
-	u32 gp = iwl_read32(bus(priv), CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
+	u32 gp = iwl_read32(bus(trans), CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
 	int ret = 0;
 
-	IWL_DEBUG_EEPROM(priv, "EEPROM signature=0x%08x\n", gp);
+	IWL_DEBUG_EEPROM(trans, "EEPROM signature=0x%08x\n", gp);
 	switch (gp) {
 	case CSR_EEPROM_GP_BAD_SIG_EEP_GOOD_SIG_OTP:
-		if (priv->nvm_device_type != NVM_DEVICE_TYPE_OTP) {
-			IWL_ERR(priv, "EEPROM with bad signature: 0x%08x\n",
+		if (trans->nvm_device_type != NVM_DEVICE_TYPE_OTP) {
+			IWL_ERR(trans, "EEPROM with bad signature: 0x%08x\n",
 				gp);
 			ret = -ENOENT;
 		}
 		break;
 	case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
 	case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
-		if (priv->nvm_device_type != NVM_DEVICE_TYPE_EEPROM) {
-			IWL_ERR(priv, "OTP with bad signature: 0x%08x\n", gp);
+		if (trans->nvm_device_type != NVM_DEVICE_TYPE_EEPROM) {
+			IWL_ERR(trans, "OTP with bad signature: 0x%08x\n", gp);
 			ret = -ENOENT;
 		}
 		break;
 	case CSR_EEPROM_GP_BAD_SIGNATURE_BOTH_EEP_AND_OTP:
 	default:
-		IWL_ERR(priv, "bad EEPROM/OTP signature, type=%s, "
+		IWL_ERR(trans, "bad EEPROM/OTP signature, type=%s, "
 			"EEPROM_GP=0x%08x\n",
-			(priv->nvm_device_type == NVM_DEVICE_TYPE_OTP)
+			(trans->nvm_device_type == NVM_DEVICE_TYPE_OTP)
 			? "OTP" : "EEPROM", gp);
 		ret = -ENOENT;
 		break;
@@ -215,11 +215,11 @@
 	return ret;
 }
 
-u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset)
+u16 iwl_eeprom_query16(const struct iwl_shared *shrd, size_t offset)
 {
-	if (!priv->eeprom)
+	if (!shrd->eeprom)
 		return 0;
-	return (u16)priv->eeprom[offset] | ((u16)priv->eeprom[offset + 1] << 8);
+	return (u16)shrd->eeprom[offset] | ((u16)shrd->eeprom[offset + 1] << 8);
 }
 
 int iwl_eeprom_check_version(struct iwl_priv *priv)
@@ -227,11 +227,11 @@
 	u16 eeprom_ver;
 	u16 calib_ver;
 
-	eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION);
-	calib_ver = iwlagn_eeprom_calib_version(priv);
+	eeprom_ver = iwl_eeprom_query16(priv->shrd, EEPROM_VERSION);
+	calib_ver = iwl_eeprom_calib_version(priv->shrd);
 
-	if (eeprom_ver < priv->cfg->eeprom_ver ||
-	    calib_ver < priv->cfg->eeprom_calib_ver)
+	if (eeprom_ver < cfg(priv)->eeprom_ver ||
+	    calib_ver < cfg(priv)->eeprom_calib_ver)
 		goto err;
 
 	IWL_INFO(priv, "device EEPROM VER=0x%x, CALIB=0x%x\n",
@@ -241,45 +241,46 @@
 err:
 	IWL_ERR(priv, "Unsupported (too old) EEPROM VER=0x%x < 0x%x "
 		  "CALIB=0x%x < 0x%x\n",
-		  eeprom_ver, priv->cfg->eeprom_ver,
-		  calib_ver,  priv->cfg->eeprom_calib_ver);
+		  eeprom_ver, cfg(priv)->eeprom_ver,
+		  calib_ver,  cfg(priv)->eeprom_calib_ver);
 	return -EINVAL;
 
 }
 
 int iwl_eeprom_check_sku(struct iwl_priv *priv)
 {
+	struct iwl_shared *shrd = priv->shrd;
 	u16 radio_cfg;
 
-	if (!priv->cfg->sku) {
+	if (!cfg(priv)->sku) {
 		/* not using sku overwrite */
-		priv->cfg->sku = iwl_eeprom_query16(priv, EEPROM_SKU_CAP);
-		if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE &&
-		    !priv->cfg->ht_params) {
+		cfg(priv)->sku = iwl_eeprom_query16(shrd, EEPROM_SKU_CAP);
+		if (cfg(priv)->sku & EEPROM_SKU_CAP_11N_ENABLE &&
+		    !cfg(priv)->ht_params) {
 			IWL_ERR(priv, "Invalid 11n configuration\n");
 			return -EINVAL;
 		}
 	}
-	if (!priv->cfg->sku) {
+	if (!cfg(priv)->sku) {
 		IWL_ERR(priv, "Invalid device sku\n");
 		return -EINVAL;
 	}
 
-	IWL_INFO(priv, "Device SKU: 0X%x\n", priv->cfg->sku);
+	IWL_INFO(priv, "Device SKU: 0x%X\n", cfg(priv)->sku);
 
-	if (!priv->cfg->valid_tx_ant && !priv->cfg->valid_rx_ant) {
+	if (!cfg(priv)->valid_tx_ant && !cfg(priv)->valid_rx_ant) {
 		/* not using .cfg overwrite */
-		radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
-		priv->cfg->valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg);
-		priv->cfg->valid_rx_ant = EEPROM_RF_CFG_RX_ANT_MSK(radio_cfg);
-		if (!priv->cfg->valid_tx_ant || !priv->cfg->valid_rx_ant) {
-			IWL_ERR(priv, "Invalid chain (0X%x, 0X%x)\n",
-				priv->cfg->valid_tx_ant,
-				priv->cfg->valid_rx_ant);
+		radio_cfg = iwl_eeprom_query16(shrd, EEPROM_RADIO_CONFIG);
+		cfg(priv)->valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg);
+		cfg(priv)->valid_rx_ant = EEPROM_RF_CFG_RX_ANT_MSK(radio_cfg);
+		if (!cfg(priv)->valid_tx_ant || !cfg(priv)->valid_rx_ant) {
+			IWL_ERR(priv, "Invalid chain (0x%X, 0x%X)\n",
+				cfg(priv)->valid_tx_ant,
+				cfg(priv)->valid_rx_ant);
 			return -EINVAL;
 		}
-		IWL_INFO(priv, "Valid Tx ant: 0X%x, Valid Rx ant: 0X%x\n",
-			 priv->cfg->valid_tx_ant, priv->cfg->valid_rx_ant);
+		IWL_INFO(priv, "Valid Tx ant: 0x%X, Valid Rx ant: 0x%X\n",
+			 cfg(priv)->valid_tx_ant, cfg(priv)->valid_rx_ant);
 	}
 	/*
 	 * for some special cases,
@@ -289,9 +290,9 @@
 	return 0;
 }
 
-void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac)
+void iwl_eeprom_get_mac(const struct iwl_shared *shrd, u8 *mac)
 {
-	const u8 *addr = iwl_eeprom_query_addr(priv,
+	const u8 *addr = iwl_eeprom_query_addr(shrd,
 					EEPROM_MAC_ADDRESS);
 	memcpy(mac, addr, ETH_ALEN);
 }
@@ -302,19 +303,19 @@
  *
 ******************************************************************************/
 
-static void iwl_set_otp_access(struct iwl_priv *priv, enum iwl_access_mode mode)
+static void iwl_set_otp_access(struct iwl_bus *bus, enum iwl_access_mode mode)
 {
-	iwl_read32(bus(priv), CSR_OTP_GP_REG);
+	iwl_read32(bus, CSR_OTP_GP_REG);
 
 	if (mode == IWL_OTP_ACCESS_ABSOLUTE)
-		iwl_clear_bit(bus(priv), CSR_OTP_GP_REG,
+		iwl_clear_bit(bus, CSR_OTP_GP_REG,
 			      CSR_OTP_GP_REG_OTP_ACCESS_MODE);
 	else
-		iwl_set_bit(bus(priv), CSR_OTP_GP_REG,
+		iwl_set_bit(bus, CSR_OTP_GP_REG,
 			    CSR_OTP_GP_REG_OTP_ACCESS_MODE);
 }
 
-static int iwl_get_nvm_type(struct iwl_priv *priv, u32 hw_rev)
+static int iwl_get_nvm_type(struct iwl_bus *bus, u32 hw_rev)
 {
 	u32 otpgp;
 	int nvm_type;
@@ -322,7 +323,7 @@
 	/* OTP only valid for CP/PP and after */
 	switch (hw_rev & CSR_HW_REV_TYPE_MSK) {
 	case CSR_HW_REV_TYPE_NONE:
-		IWL_ERR(priv, "Unknown hardware type\n");
+		IWL_ERR(bus, "Unknown hardware type\n");
 		return -ENOENT;
 	case CSR_HW_REV_TYPE_5300:
 	case CSR_HW_REV_TYPE_5350:
@@ -331,7 +332,7 @@
 		nvm_type = NVM_DEVICE_TYPE_EEPROM;
 		break;
 	default:
-		otpgp = iwl_read32(bus(priv), CSR_OTP_GP_REG);
+		otpgp = iwl_read32(bus, CSR_OTP_GP_REG);
 		if (otpgp & CSR_OTP_GP_REG_DEVICE_SELECT)
 			nvm_type = NVM_DEVICE_TYPE_OTP;
 		else
@@ -341,73 +342,73 @@
 	return  nvm_type;
 }
 
-static int iwl_init_otp_access(struct iwl_priv *priv)
+static int iwl_init_otp_access(struct iwl_bus *bus)
 {
 	int ret;
 
 	/* Enable 40MHz radio clock */
-	iwl_write32(bus(priv), CSR_GP_CNTRL,
-		    iwl_read32(bus(priv), CSR_GP_CNTRL) |
+	iwl_write32(bus, CSR_GP_CNTRL,
+		    iwl_read32(bus, CSR_GP_CNTRL) |
 		    CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
 
 	/* wait for clock to be ready */
-	ret = iwl_poll_bit(bus(priv), CSR_GP_CNTRL,
+	ret = iwl_poll_bit(bus, CSR_GP_CNTRL,
 				 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
 				 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
 				 25000);
 	if (ret < 0)
-		IWL_ERR(priv, "Time out access OTP\n");
+		IWL_ERR(bus, "Time out access OTP\n");
 	else {
-		iwl_set_bits_prph(bus(priv), APMG_PS_CTRL_REG,
+		iwl_set_bits_prph(bus, APMG_PS_CTRL_REG,
 				  APMG_PS_CTRL_VAL_RESET_REQ);
 		udelay(5);
-		iwl_clear_bits_prph(bus(priv), APMG_PS_CTRL_REG,
+		iwl_clear_bits_prph(bus, APMG_PS_CTRL_REG,
 				    APMG_PS_CTRL_VAL_RESET_REQ);
 
 		/*
 		 * CSR auto clock gate disable bit -
 		 * this is only applicable for HW with OTP shadow RAM
 		 */
-		if (priv->cfg->base_params->shadow_ram_support)
-			iwl_set_bit(bus(priv), CSR_DBG_LINK_PWR_MGMT_REG,
+		if (cfg(bus)->base_params->shadow_ram_support)
+			iwl_set_bit(bus, CSR_DBG_LINK_PWR_MGMT_REG,
 				CSR_RESET_LINK_PWR_MGMT_DISABLED);
 	}
 	return ret;
 }
 
-static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, __le16 *eeprom_data)
+static int iwl_read_otp_word(struct iwl_bus *bus, u16 addr, __le16 *eeprom_data)
 {
 	int ret = 0;
 	u32 r;
 	u32 otpgp;
 
-	iwl_write32(bus(priv), CSR_EEPROM_REG,
+	iwl_write32(bus, CSR_EEPROM_REG,
 		    CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
-	ret = iwl_poll_bit(bus(priv), CSR_EEPROM_REG,
+	ret = iwl_poll_bit(bus, CSR_EEPROM_REG,
 				 CSR_EEPROM_REG_READ_VALID_MSK,
 				 CSR_EEPROM_REG_READ_VALID_MSK,
 				 IWL_EEPROM_ACCESS_TIMEOUT);
 	if (ret < 0) {
-		IWL_ERR(priv, "Time out reading OTP[%d]\n", addr);
+		IWL_ERR(bus, "Time out reading OTP[%d]\n", addr);
 		return ret;
 	}
-	r = iwl_read32(bus(priv), CSR_EEPROM_REG);
+	r = iwl_read32(bus, CSR_EEPROM_REG);
 	/* check for ECC errors: */
-	otpgp = iwl_read32(bus(priv), CSR_OTP_GP_REG);
+	otpgp = iwl_read32(bus, CSR_OTP_GP_REG);
 	if (otpgp & CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK) {
 		/* stop in this case */
 		/* set the uncorrectable OTP ECC bit for acknowledgement */
-		iwl_set_bit(bus(priv), CSR_OTP_GP_REG,
+		iwl_set_bit(bus, CSR_OTP_GP_REG,
 			CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
-		IWL_ERR(priv, "Uncorrectable OTP ECC error, abort OTP read\n");
+		IWL_ERR(bus, "Uncorrectable OTP ECC error, abort OTP read\n");
 		return -EINVAL;
 	}
 	if (otpgp & CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK) {
 		/* continue in this case */
 		/* set the correctable OTP ECC bit for acknowledgement */
-		iwl_set_bit(bus(priv), CSR_OTP_GP_REG,
+		iwl_set_bit(bus, CSR_OTP_GP_REG,
 				CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK);
-		IWL_ERR(priv, "Correctable OTP ECC error, continue read\n");
+		IWL_ERR(bus, "Correctable OTP ECC error, continue read\n");
 	}
 	*eeprom_data = cpu_to_le16(r >> 16);
 	return 0;
@@ -416,20 +417,20 @@
 /*
  * iwl_is_otp_empty: check for empty OTP
  */
-static bool iwl_is_otp_empty(struct iwl_priv *priv)
+static bool iwl_is_otp_empty(struct iwl_bus *bus)
 {
 	u16 next_link_addr = 0;
 	__le16 link_value;
 	bool is_empty = false;
 
 	/* locate the beginning of OTP link list */
-	if (!iwl_read_otp_word(priv, next_link_addr, &link_value)) {
+	if (!iwl_read_otp_word(bus, next_link_addr, &link_value)) {
 		if (!link_value) {
-			IWL_ERR(priv, "OTP is empty\n");
+			IWL_ERR(bus, "OTP is empty\n");
 			is_empty = true;
 		}
 	} else {
-		IWL_ERR(priv, "Unable to read first block of OTP list.\n");
+		IWL_ERR(bus, "Unable to read first block of OTP list.\n");
 		is_empty = true;
 	}
 
@@ -446,7 +447,7 @@
  *   we should read and used to configure the device.
  *   only perform this operation if shadow RAM is disabled
  */
-static int iwl_find_otp_image(struct iwl_priv *priv,
+static int iwl_find_otp_image(struct iwl_bus *bus,
 					u16 *validblockaddr)
 {
 	u16 next_link_addr = 0, valid_addr;
@@ -454,10 +455,10 @@
 	int usedblocks = 0;
 
 	/* set addressing mode to absolute to traverse the link list */
-	iwl_set_otp_access(priv, IWL_OTP_ACCESS_ABSOLUTE);
+	iwl_set_otp_access(bus, IWL_OTP_ACCESS_ABSOLUTE);
 
 	/* checking for empty OTP or error */
-	if (iwl_is_otp_empty(priv))
+	if (iwl_is_otp_empty(bus))
 		return -EINVAL;
 
 	/*
@@ -471,9 +472,9 @@
 		 */
 		valid_addr = next_link_addr;
 		next_link_addr = le16_to_cpu(link_value) * sizeof(u16);
-		IWL_DEBUG_EEPROM(priv, "OTP blocks %d addr 0x%x\n",
+		IWL_DEBUG_EEPROM(bus, "OTP blocks %d addr 0x%x\n",
 			       usedblocks, next_link_addr);
-		if (iwl_read_otp_word(priv, next_link_addr, &link_value))
+		if (iwl_read_otp_word(bus, next_link_addr, &link_value))
 			return -EINVAL;
 		if (!link_value) {
 			/*
@@ -488,10 +489,10 @@
 		}
 		/* more in the link list, continue */
 		usedblocks++;
-	} while (usedblocks <= priv->cfg->base_params->max_ll_items);
+	} while (usedblocks <= cfg(bus)->base_params->max_ll_items);
 
 	/* OTP has no valid blocks */
-	IWL_DEBUG_EEPROM(priv, "OTP has no valid blocks\n");
+	IWL_DEBUG_EEPROM(bus, "OTP has no valid blocks\n");
 	return -EINVAL;
 }
 
@@ -504,28 +505,28 @@
  * iwl_get_max_txpower_avg - get the highest tx power from all chains.
  *     find the highest tx power from all chains for the channel
  */
-static s8 iwl_get_max_txpower_avg(struct iwl_priv *priv,
+static s8 iwl_get_max_txpower_avg(struct iwl_cfg *cfg,
 		struct iwl_eeprom_enhanced_txpwr *enhanced_txpower,
 		int element, s8 *max_txpower_in_half_dbm)
 {
 	s8 max_txpower_avg = 0; /* (dBm) */
 
 	/* Take the highest tx power from any valid chains */
-	if ((priv->cfg->valid_tx_ant & ANT_A) &&
+	if ((cfg->valid_tx_ant & ANT_A) &&
 	    (enhanced_txpower[element].chain_a_max > max_txpower_avg))
 		max_txpower_avg = enhanced_txpower[element].chain_a_max;
-	if ((priv->cfg->valid_tx_ant & ANT_B) &&
+	if ((cfg->valid_tx_ant & ANT_B) &&
 	    (enhanced_txpower[element].chain_b_max > max_txpower_avg))
 		max_txpower_avg = enhanced_txpower[element].chain_b_max;
-	if ((priv->cfg->valid_tx_ant & ANT_C) &&
+	if ((cfg->valid_tx_ant & ANT_C) &&
 	    (enhanced_txpower[element].chain_c_max > max_txpower_avg))
 		max_txpower_avg = enhanced_txpower[element].chain_c_max;
-	if (((priv->cfg->valid_tx_ant == ANT_AB) |
-	    (priv->cfg->valid_tx_ant == ANT_BC) |
-	    (priv->cfg->valid_tx_ant == ANT_AC)) &&
+	if (((cfg->valid_tx_ant == ANT_AB) |
+	    (cfg->valid_tx_ant == ANT_BC) |
+	    (cfg->valid_tx_ant == ANT_AC)) &&
 	    (enhanced_txpower[element].mimo2_max > max_txpower_avg))
 		max_txpower_avg =  enhanced_txpower[element].mimo2_max;
-	if ((priv->cfg->valid_tx_ant == ANT_ABC) &&
+	if ((cfg->valid_tx_ant == ANT_ABC) &&
 	    (enhanced_txpower[element].mimo3_max > max_txpower_avg))
 		max_txpower_avg = enhanced_txpower[element].mimo3_max;
 
@@ -582,6 +583,7 @@
 
 void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv)
 {
+	struct iwl_shared *shrd = priv->shrd;
 	struct iwl_eeprom_enhanced_txpwr *txp_array, *txp;
 	int idx, entries;
 	__le16 *txp_len;
@@ -590,10 +592,10 @@
 	BUILD_BUG_ON(sizeof(struct iwl_eeprom_enhanced_txpwr) != 8);
 
 	/* the length is in 16-bit words, but we want entries */
-	txp_len = (__le16 *) iwl_eeprom_query_addr(priv, EEPROM_TXP_SZ_OFFS);
+	txp_len = (__le16 *) iwl_eeprom_query_addr(shrd, EEPROM_TXP_SZ_OFFS);
 	entries = le16_to_cpup(txp_len) * 2 / EEPROM_TXP_ENTRY_LEN;
 
-	txp_array = (void *) iwl_eeprom_query_addr(priv, EEPROM_TXP_OFFS);
+	txp_array = (void *) iwl_eeprom_query_addr(shrd, EEPROM_TXP_OFFS);
 
 	for (idx = 0; idx < entries; idx++) {
 		txp = &txp_array[idx];
@@ -627,7 +629,7 @@
 				 ((txp->delta_20_in_40 & 0xf0) >> 4),
 				 (txp->delta_20_in_40 & 0x0f));
 
-		max_txp_avg = iwl_get_max_txpower_avg(priv, txp_array, idx,
+		max_txp_avg = iwl_get_max_txpower_avg(cfg(priv), txp_array, idx,
 						      &max_txp_avg_halfdbm);
 
 		/*
@@ -646,12 +648,13 @@
 /**
  * iwl_eeprom_init - read EEPROM contents
  *
- * Load the EEPROM contents from adapter into priv->eeprom
+ * Load the EEPROM contents from adapter into shrd->eeprom
  *
  * NOTE:  This routine uses the non-debug IO access functions.
  */
 int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
 {
+	struct iwl_shared *shrd = priv->shrd;
 	__le16 *e;
 	u32 gp = iwl_read32(bus(priv), CSR_EEPROM_GP);
 	int sz;
@@ -660,22 +663,22 @@
 	u16 validblockaddr = 0;
 	u16 cache_addr = 0;
 
-	priv->nvm_device_type = iwl_get_nvm_type(priv, hw_rev);
-	if (priv->nvm_device_type == -ENOENT)
+	trans(priv)->nvm_device_type = iwl_get_nvm_type(bus(priv), hw_rev);
+	if (trans(priv)->nvm_device_type == -ENOENT)
 		return -ENOENT;
 	/* allocate eeprom */
-	sz = priv->cfg->base_params->eeprom_size;
+	sz = cfg(priv)->base_params->eeprom_size;
 	IWL_DEBUG_EEPROM(priv, "NVM size = %d\n", sz);
-	priv->eeprom = kzalloc(sz, GFP_KERNEL);
-	if (!priv->eeprom) {
+	shrd->eeprom = kzalloc(sz, GFP_KERNEL);
+	if (!shrd->eeprom) {
 		ret = -ENOMEM;
 		goto alloc_err;
 	}
-	e = (__le16 *)priv->eeprom;
+	e = (__le16 *)shrd->eeprom;
 
 	iwl_apm_init(priv);
 
-	ret = iwl_eeprom_verify_signature(priv);
+	ret = iwl_eeprom_verify_signature(trans(priv));
 	if (ret < 0) {
 		IWL_ERR(priv, "EEPROM not found, EEPROM_GP=0x%08x\n", gp);
 		ret = -ENOENT;
@@ -683,16 +686,16 @@
 	}
 
 	/* Make sure driver (instead of uCode) is allowed to read EEPROM */
-	ret = iwl_eeprom_acquire_semaphore(priv);
+	ret = iwl_eeprom_acquire_semaphore(bus(priv));
 	if (ret < 0) {
 		IWL_ERR(priv, "Failed to acquire EEPROM semaphore.\n");
 		ret = -ENOENT;
 		goto err;
 	}
 
-	if (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP) {
+	if (trans(priv)->nvm_device_type == NVM_DEVICE_TYPE_OTP) {
 
-		ret = iwl_init_otp_access(priv);
+		ret = iwl_init_otp_access(bus(priv));
 		if (ret) {
 			IWL_ERR(priv, "Failed to initialize OTP access.\n");
 			ret = -ENOENT;
@@ -706,8 +709,8 @@
 			     CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK |
 			     CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
 		/* traversing the linked list if no shadow ram supported */
-		if (!priv->cfg->base_params->shadow_ram_support) {
-			if (iwl_find_otp_image(priv, &validblockaddr)) {
+		if (!cfg(priv)->base_params->shadow_ram_support) {
+			if (iwl_find_otp_image(bus(priv), &validblockaddr)) {
 				ret = -ENOENT;
 				goto done;
 			}
@@ -716,7 +719,7 @@
 		     addr += sizeof(u16)) {
 			__le16 eeprom_data;
 
-			ret = iwl_read_otp_word(priv, addr, &eeprom_data);
+			ret = iwl_read_otp_word(bus(priv), addr, &eeprom_data);
 			if (ret)
 				goto done;
 			e[cache_addr / 2] = eeprom_data;
@@ -744,27 +747,27 @@
 	}
 
 	IWL_DEBUG_EEPROM(priv, "NVM Type: %s, version: 0x%x\n",
-		       (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP)
+		       (trans(priv)->nvm_device_type == NVM_DEVICE_TYPE_OTP)
 		       ? "OTP" : "EEPROM",
-		       iwl_eeprom_query16(priv, EEPROM_VERSION));
+		       iwl_eeprom_query16(shrd, EEPROM_VERSION));
 
 	ret = 0;
 done:
-	iwl_eeprom_release_semaphore(priv);
+	iwl_eeprom_release_semaphore(bus(priv));
 
 err:
 	if (ret)
-		iwl_eeprom_free(priv);
+		iwl_eeprom_free(priv->shrd);
 	/* Reset chip to save power until we load uCode during "up". */
 	iwl_apm_stop(priv);
 alloc_err:
 	return ret;
 }
 
-void iwl_eeprom_free(struct iwl_priv *priv)
+void iwl_eeprom_free(struct iwl_shared *shrd)
 {
-	kfree(priv->eeprom);
-	priv->eeprom = NULL;
+	kfree(shrd->eeprom);
+	shrd->eeprom = NULL;
 }
 
 static void iwl_init_band_reference(const struct iwl_priv *priv,
@@ -772,49 +775,50 @@
 			const struct iwl_eeprom_channel **eeprom_ch_info,
 			const u8 **eeprom_ch_index)
 {
-	u32 offset = priv->cfg->lib->
+	struct iwl_shared *shrd = priv->shrd;
+	u32 offset = cfg(priv)->lib->
 			eeprom_ops.regulatory_bands[eep_band - 1];
 	switch (eep_band) {
 	case 1:		/* 2.4GHz band */
 		*eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_1);
 		*eeprom_ch_info = (struct iwl_eeprom_channel *)
-				iwl_eeprom_query_addr(priv, offset);
+				iwl_eeprom_query_addr(shrd, offset);
 		*eeprom_ch_index = iwl_eeprom_band_1;
 		break;
 	case 2:		/* 4.9GHz band */
 		*eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_2);
 		*eeprom_ch_info = (struct iwl_eeprom_channel *)
-				iwl_eeprom_query_addr(priv, offset);
+				iwl_eeprom_query_addr(shrd, offset);
 		*eeprom_ch_index = iwl_eeprom_band_2;
 		break;
 	case 3:		/* 5.2GHz band */
 		*eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_3);
 		*eeprom_ch_info = (struct iwl_eeprom_channel *)
-				iwl_eeprom_query_addr(priv, offset);
+				iwl_eeprom_query_addr(shrd, offset);
 		*eeprom_ch_index = iwl_eeprom_band_3;
 		break;
 	case 4:		/* 5.5GHz band */
 		*eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_4);
 		*eeprom_ch_info = (struct iwl_eeprom_channel *)
-				iwl_eeprom_query_addr(priv, offset);
+				iwl_eeprom_query_addr(shrd, offset);
 		*eeprom_ch_index = iwl_eeprom_band_4;
 		break;
 	case 5:		/* 5.7GHz band */
 		*eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_5);
 		*eeprom_ch_info = (struct iwl_eeprom_channel *)
-				iwl_eeprom_query_addr(priv, offset);
+				iwl_eeprom_query_addr(shrd, offset);
 		*eeprom_ch_index = iwl_eeprom_band_5;
 		break;
 	case 6:		/* 2.4GHz ht40 channels */
 		*eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_6);
 		*eeprom_ch_info = (struct iwl_eeprom_channel *)
-				iwl_eeprom_query_addr(priv, offset);
+				iwl_eeprom_query_addr(shrd, offset);
 		*eeprom_ch_index = iwl_eeprom_band_6;
 		break;
 	case 7:		/* 5 GHz ht40 channels */
 		*eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_7);
 		*eeprom_ch_info = (struct iwl_eeprom_channel *)
-				iwl_eeprom_query_addr(priv, offset);
+				iwl_eeprom_query_addr(shrd, offset);
 		*eeprom_ch_index = iwl_eeprom_band_7;
 		break;
 	default:
@@ -979,9 +983,9 @@
 	}
 
 	/* Check if we do have HT40 channels */
-	if (priv->cfg->lib->eeprom_ops.regulatory_bands[5] ==
+	if (cfg(priv)->lib->eeprom_ops.regulatory_bands[5] ==
 	    EEPROM_REGULATORY_BAND_NO_HT40 &&
-	    priv->cfg->lib->eeprom_ops.regulatory_bands[6] ==
+	    cfg(priv)->lib->eeprom_ops.regulatory_bands[6] ==
 	    EEPROM_REGULATORY_BAND_NO_HT40)
 		return 0;
 
@@ -1017,8 +1021,8 @@
 	 * driver need to process addition information
 	 * to determine the max channel tx power limits
 	 */
-	if (priv->cfg->lib->eeprom_ops.update_enhanced_txpower)
-		priv->cfg->lib->eeprom_ops.update_enhanced_txpower(priv);
+	if (cfg(priv)->lib->eeprom_ops.update_enhanced_txpower)
+		cfg(priv)->lib->eeprom_ops.update_enhanced_txpower(priv);
 
 	return 0;
 }
@@ -1064,7 +1068,7 @@
 {
 	u16 radio_cfg;
 
-	radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
+	radio_cfg = iwl_eeprom_query16(priv->shrd, EEPROM_RADIO_CONFIG);
 
 	/* write radio config values to register */
 	if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) <= EEPROM_RF_CONFIG_TYPE_MAX) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
index c94747e..9fa937e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
@@ -66,6 +66,7 @@
 #include <net/mac80211.h>
 
 struct iwl_priv;
+struct iwl_shared;
 
 /*
  * EEPROM access time values:
@@ -305,11 +306,11 @@
 
 
 int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev);
-void iwl_eeprom_free(struct iwl_priv *priv);
+void iwl_eeprom_free(struct iwl_shared *shrd);
 int  iwl_eeprom_check_version(struct iwl_priv *priv);
 int  iwl_eeprom_check_sku(struct iwl_priv *priv);
-const u8 *iwl_eeprom_query_addr(const struct iwl_priv *priv, size_t offset);
-u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset);
+const u8 *iwl_eeprom_query_addr(const struct iwl_shared *shrd, size_t offset);
+u16 iwl_eeprom_query16(const struct iwl_shared *shrd, size_t offset);
 int iwl_init_channel_map(struct iwl_priv *priv);
 void iwl_free_channel_map(struct iwl_priv *priv);
 const struct iwl_channel_info *iwl_get_channel_info(
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.c b/drivers/net/wireless/iwlwifi/iwl-io.c
index 3ffa8e6..d57ea64 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/iwlwifi/iwl-io.c
@@ -143,7 +143,7 @@
 
 	spin_lock_irqsave(&bus->reg_lock, flags);
 	iwl_grab_nic_access(bus);
-	value = iwl_read32(bus(bus), reg);
+	value = iwl_read32(bus, reg);
 	iwl_release_nic_access(bus);
 	spin_unlock_irqrestore(&bus->reg_lock, flags);
 
@@ -283,16 +283,29 @@
 	return value;
 }
 
-void iwl_write_targ_mem(struct iwl_bus *bus, u32 addr, u32 val)
+int _iwl_write_targ_mem_words(struct iwl_bus *bus, u32 addr,
+				void *buf, int words)
 {
 	unsigned long flags;
+	int offs, result = 0;
+	u32 *vals = buf;
 
 	spin_lock_irqsave(&bus->reg_lock, flags);
 	if (!iwl_grab_nic_access(bus)) {
 		iwl_write32(bus, HBUS_TARG_MEM_WADDR, addr);
 		wmb();
-		iwl_write32(bus, HBUS_TARG_MEM_WDAT, val);
+
+		for (offs = 0; offs < words; offs++)
+			iwl_write32(bus, HBUS_TARG_MEM_WDAT, vals[offs]);
 		iwl_release_nic_access(bus);
-	}
+	} else
+		result = -EBUSY;
 	spin_unlock_irqrestore(&bus->reg_lock, flags);
+
+	return result;
+}
+
+int iwl_write_targ_mem(struct iwl_bus *bus, u32 addr, u32 val)
+{
+	return _iwl_write_targ_mem_words(bus, addr, &val, 1);
 }
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
index ced2cbe..aae2eeb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -85,6 +85,9 @@
 					 (bufsize) / sizeof(u32));\
 	} while (0)
 
+int _iwl_write_targ_mem_words(struct iwl_bus *bus, u32 addr,
+			      void *buf, int words);
+
 u32 iwl_read_targ_mem(struct iwl_bus *bus, u32 addr);
-void iwl_write_targ_mem(struct iwl_bus *bus, u32 addr, u32 val);
+int iwl_write_targ_mem(struct iwl_bus *bus, u32 addr, u32 val);
 #endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.c b/drivers/net/wireless/iwlwifi/iwl-led.c
index eb54173..14dcbfc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-led.c
@@ -137,11 +137,11 @@
 	}
 
 	IWL_DEBUG_LED(priv, "Led blink time compensation=%u\n",
-			priv->cfg->base_params->led_compensation);
+			cfg(priv)->base_params->led_compensation);
 	led_cmd.on = iwl_blink_compensation(priv, on,
-				priv->cfg->base_params->led_compensation);
+				cfg(priv)->base_params->led_compensation);
 	led_cmd.off = iwl_blink_compensation(priv, off,
-				priv->cfg->base_params->led_compensation);
+				cfg(priv)->base_params->led_compensation);
 
 	ret = iwl_send_led_cmd(priv, &led_cmd);
 	if (!ret) {
@@ -178,7 +178,7 @@
 	int ret;
 
 	if (mode == IWL_LED_DEFAULT)
-		mode = priv->cfg->led_mode;
+		mode = cfg(priv)->led_mode;
 
 	priv->led.name = kasprintf(GFP_KERNEL, "%s-led",
 				   wiphy_name(priv->hw->wiphy));
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.h b/drivers/net/wireless/iwlwifi/iwl-led.h
index 1c93dfe..2550b3c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.h
+++ b/drivers/net/wireless/iwlwifi/iwl-led.h
@@ -36,20 +36,6 @@
 #define IWL_LED_ACTIVITY       (0<<1)
 #define IWL_LED_LINK           (1<<1)
 
-/*
- * LED mode
- *    IWL_LED_DEFAULT:  use device default
- *    IWL_LED_RF_STATE: turn LED on/off based on RF state
- *			LED ON  = RF ON
- *			LED OFF = RF OFF
- *    IWL_LED_BLINK:    adjust led blink rate based on blink table
- */
-enum iwl_led_mode {
-	IWL_LED_DEFAULT,
-	IWL_LED_RF_STATE,
-	IWL_LED_BLINK,
-};
-
 void iwlagn_led_enable(struct iwl_priv *priv);
 void iwl_leds_init(struct iwl_priv *priv);
 void iwl_leds_exit(struct iwl_priv *priv);
diff --git a/drivers/net/wireless/iwlwifi/iwl-mac80211.c b/drivers/net/wireless/iwlwifi/iwl-mac80211.c
new file mode 100644
index 0000000..f980e57
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-mac80211.c
@@ -0,0 +1,1601 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/firmware.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+
+#include <net/mac80211.h>
+
+#include <asm/div64.h>
+
+#include "iwl-eeprom.h"
+#include "iwl-wifi.h"
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-io.h"
+#include "iwl-agn-calib.h"
+#include "iwl-agn.h"
+#include "iwl-shared.h"
+#include "iwl-bus.h"
+#include "iwl-trans.h"
+
+/*****************************************************************************
+ *
+ * mac80211 entry point functions
+ *
+ *****************************************************************************/
+
+static const struct ieee80211_iface_limit iwlagn_sta_ap_limits[] = {
+	{
+		.max = 1,
+		.types = BIT(NL80211_IFTYPE_STATION),
+	},
+	{
+		.max = 1,
+		.types = BIT(NL80211_IFTYPE_AP),
+	},
+};
+
+static const struct ieee80211_iface_limit iwlagn_2sta_limits[] = {
+	{
+		.max = 2,
+		.types = BIT(NL80211_IFTYPE_STATION),
+	},
+};
+
+static const struct ieee80211_iface_limit iwlagn_p2p_sta_go_limits[] = {
+	{
+		.max = 1,
+		.types = BIT(NL80211_IFTYPE_STATION),
+	},
+	{
+		.max = 1,
+		.types = BIT(NL80211_IFTYPE_P2P_GO) |
+			 BIT(NL80211_IFTYPE_AP),
+	},
+};
+
+static const struct ieee80211_iface_limit iwlagn_p2p_2sta_limits[] = {
+	{
+		.max = 2,
+		.types = BIT(NL80211_IFTYPE_STATION),
+	},
+	{
+		.max = 1,
+		.types = BIT(NL80211_IFTYPE_P2P_CLIENT),
+	},
+};
+
+static const struct ieee80211_iface_combination
+iwlagn_iface_combinations_dualmode[] = {
+	{ .num_different_channels = 1,
+	  .max_interfaces = 2,
+	  .beacon_int_infra_match = true,
+	  .limits = iwlagn_sta_ap_limits,
+	  .n_limits = ARRAY_SIZE(iwlagn_sta_ap_limits),
+	},
+	{ .num_different_channels = 1,
+	  .max_interfaces = 2,
+	  .limits = iwlagn_2sta_limits,
+	  .n_limits = ARRAY_SIZE(iwlagn_2sta_limits),
+	},
+};
+
+static const struct ieee80211_iface_combination
+iwlagn_iface_combinations_p2p[] = {
+	{ .num_different_channels = 1,
+	  .max_interfaces = 2,
+	  .beacon_int_infra_match = true,
+	  .limits = iwlagn_p2p_sta_go_limits,
+	  .n_limits = ARRAY_SIZE(iwlagn_p2p_sta_go_limits),
+	},
+	{ .num_different_channels = 1,
+	  .max_interfaces = 2,
+	  .limits = iwlagn_p2p_2sta_limits,
+	  .n_limits = ARRAY_SIZE(iwlagn_p2p_2sta_limits),
+	},
+};
+
+/*
+ * Not a mac80211 entry point function, but it fits in with all the
+ * other mac80211 functions grouped here.
+ */
+int iwlagn_mac_setup_register(struct iwl_priv *priv,
+				  struct iwlagn_ucode_capabilities *capa)
+{
+	int ret;
+	struct ieee80211_hw *hw = priv->hw;
+	struct iwl_rxon_context *ctx;
+
+	hw->rate_control_algorithm = "iwl-agn-rs";
+
+	/* Tell mac80211 our characteristics */
+	hw->flags = IEEE80211_HW_SIGNAL_DBM |
+		    IEEE80211_HW_AMPDU_AGGREGATION |
+		    IEEE80211_HW_NEED_DTIM_PERIOD |
+		    IEEE80211_HW_SPECTRUM_MGMT |
+		    IEEE80211_HW_REPORTS_TX_ACK_STATUS;
+
+	/*
+	 * Including the following line will crash some AP's.  This
+	 * workaround removes the stimulus which causes the crash until
+	 * the AP software can be fixed.
+	hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
+	 */
+
+	hw->flags |= IEEE80211_HW_SUPPORTS_PS |
+		     IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
+
+	if (cfg(priv)->sku & EEPROM_SKU_CAP_11N_ENABLE)
+		hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
+			     IEEE80211_HW_SUPPORTS_STATIC_SMPS;
+
+	if (capa->flags & IWL_UCODE_TLV_FLAGS_MFP)
+		hw->flags |= IEEE80211_HW_MFP_CAPABLE;
+
+	hw->sta_data_size = sizeof(struct iwl_station_priv);
+	hw->vif_data_size = sizeof(struct iwl_vif_priv);
+
+	for_each_context(priv, ctx) {
+		hw->wiphy->interface_modes |= ctx->interface_modes;
+		hw->wiphy->interface_modes |= ctx->exclusive_interface_modes;
+	}
+
+	BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
+
+	if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT)) {
+		hw->wiphy->iface_combinations = iwlagn_iface_combinations_p2p;
+		hw->wiphy->n_iface_combinations =
+			ARRAY_SIZE(iwlagn_iface_combinations_p2p);
+	} else if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) {
+		hw->wiphy->iface_combinations =
+			iwlagn_iface_combinations_dualmode;
+		hw->wiphy->n_iface_combinations =
+			ARRAY_SIZE(iwlagn_iface_combinations_dualmode);
+	}
+
+	hw->wiphy->max_remain_on_channel_duration = 1000;
+
+	hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
+			    WIPHY_FLAG_DISABLE_BEACON_HINTS |
+			    WIPHY_FLAG_IBSS_RSN;
+
+	if (trans(priv)->ucode_wowlan.code.len &&
+	    device_can_wakeup(bus(priv)->dev)) {
+		hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
+					  WIPHY_WOWLAN_DISCONNECT |
+					  WIPHY_WOWLAN_EAP_IDENTITY_REQ |
+					  WIPHY_WOWLAN_RFKILL_RELEASE;
+		if (!iwlagn_mod_params.sw_crypto)
+			hw->wiphy->wowlan.flags |=
+				WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
+				WIPHY_WOWLAN_GTK_REKEY_FAILURE;
+
+		hw->wiphy->wowlan.n_patterns = IWLAGN_WOWLAN_MAX_PATTERNS;
+		hw->wiphy->wowlan.pattern_min_len =
+					IWLAGN_WOWLAN_MIN_PATTERN_LEN;
+		hw->wiphy->wowlan.pattern_max_len =
+					IWLAGN_WOWLAN_MAX_PATTERN_LEN;
+	}
+
+	if (iwlagn_mod_params.power_save)
+		hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
+	else
+		hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
+	hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
+	/* we create the 802.11 header and a zero-length SSID element */
+	hw->wiphy->max_scan_ie_len = capa->max_probe_length - 24 - 2;
+
+	/* Default value; 4 EDCA QOS priorities */
+	hw->queues = 4;
+
+	hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
+
+	if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
+		priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+			&priv->bands[IEEE80211_BAND_2GHZ];
+	if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
+		priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
+			&priv->bands[IEEE80211_BAND_5GHZ];
+
+	hw->wiphy->hw_version = bus_get_hw_id(bus(priv));
+
+	iwl_leds_init(priv);
+
+	ret = ieee80211_register_hw(priv->hw);
+	if (ret) {
+		IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
+		return ret;
+	}
+	priv->mac80211_registered = 1;
+
+	return 0;
+}
+
+void iwlagn_mac_unregister(struct iwl_priv *priv)
+{
+	if (!priv->mac80211_registered)
+		return;
+	iwl_leds_exit(priv);
+	ieee80211_unregister_hw(priv->hw);
+	priv->mac80211_registered = 0;
+}
+
+static int __iwl_up(struct iwl_priv *priv)
+{
+	struct iwl_rxon_context *ctx;
+	int ret;
+
+	lockdep_assert_held(&priv->shrd->mutex);
+
+	if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status)) {
+		IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
+		return -EIO;
+	}
+
+	for_each_context(priv, ctx) {
+		ret = iwlagn_alloc_bcast_station(priv, ctx);
+		if (ret) {
+			iwl_dealloc_bcast_stations(priv);
+			return ret;
+		}
+	}
+
+	ret = iwl_run_init_ucode(trans(priv));
+	if (ret) {
+		IWL_ERR(priv, "Failed to run INIT ucode: %d\n", ret);
+		goto error;
+	}
+
+	ret = iwl_load_ucode_wait_alive(trans(priv), IWL_UCODE_REGULAR);
+	if (ret) {
+		IWL_ERR(priv, "Failed to start RT ucode: %d\n", ret);
+		goto error;
+	}
+
+	ret = iwl_alive_start(priv);
+	if (ret)
+		goto error;
+	return 0;
+
+ error:
+	set_bit(STATUS_EXIT_PENDING, &priv->shrd->status);
+	__iwl_down(priv);
+	clear_bit(STATUS_EXIT_PENDING, &priv->shrd->status);
+
+	IWL_ERR(priv, "Unable to initialize device.\n");
+	return ret;
+}
+
+static int iwlagn_mac_start(struct ieee80211_hw *hw)
+{
+	struct iwl_priv *priv = hw->priv;
+	int ret;
+
+	IWL_DEBUG_MAC80211(priv, "enter\n");
+
+	/* we should be verifying the device is ready to be opened */
+	mutex_lock(&priv->shrd->mutex);
+	ret = __iwl_up(priv);
+	mutex_unlock(&priv->shrd->mutex);
+	if (ret)
+		return ret;
+
+	IWL_DEBUG_INFO(priv, "Start UP work done.\n");
+
+	/* Now we should be done, and the READY bit should be set. */
+	if (WARN_ON(!test_bit(STATUS_READY, &priv->shrd->status)))
+		ret = -EIO;
+
+	iwlagn_led_enable(priv);
+
+	priv->is_open = 1;
+	IWL_DEBUG_MAC80211(priv, "leave\n");
+	return 0;
+}
+
+static void iwlagn_mac_stop(struct ieee80211_hw *hw)
+{
+	struct iwl_priv *priv = hw->priv;
+
+	IWL_DEBUG_MAC80211(priv, "enter\n");
+
+	if (!priv->is_open)
+		return;
+
+	priv->is_open = 0;
+
+	iwl_down(priv);
+
+	flush_workqueue(priv->shrd->workqueue);
+
+	/* User space software may expect getting rfkill changes
+	 * even if interface is down */
+	iwl_write32(bus(priv), CSR_INT, 0xFFFFFFFF);
+	iwl_enable_rfkill_int(priv);
+
+	IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+
+static void iwlagn_mac_set_rekey_data(struct ieee80211_hw *hw,
+				      struct ieee80211_vif *vif,
+				      struct cfg80211_gtk_rekey_data *data)
+{
+	struct iwl_priv *priv = hw->priv;
+
+	if (iwlagn_mod_params.sw_crypto)
+		return;
+
+	IWL_DEBUG_MAC80211(priv, "enter\n");
+	mutex_lock(&priv->shrd->mutex);
+
+	if (priv->contexts[IWL_RXON_CTX_BSS].vif != vif)
+		goto out;
+
+	memcpy(priv->kek, data->kek, NL80211_KEK_LEN);
+	memcpy(priv->kck, data->kck, NL80211_KCK_LEN);
+	priv->replay_ctr =
+		cpu_to_le64(be64_to_cpup((__be64 *)&data->replay_ctr));
+	priv->have_rekey_data = true;
+
+ out:
+	mutex_unlock(&priv->shrd->mutex);
+	IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+
+#ifdef CONFIG_PM_SLEEP
+
+static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
+			      struct cfg80211_wowlan *wowlan)
+{
+	struct iwl_priv *priv = hw->priv;
+	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+	int ret;
+
+	if (WARN_ON(!wowlan))
+		return -EINVAL;
+
+	IWL_DEBUG_MAC80211(priv, "enter\n");
+	mutex_lock(&priv->shrd->mutex);
+
+	/* Don't attempt WoWLAN when not associated, tear down instead. */
+	if (!ctx->vif || ctx->vif->type != NL80211_IFTYPE_STATION ||
+	    !iwl_is_associated_ctx(ctx)) {
+		ret = 1;
+		goto out;
+	}
+
+	ret = iwlagn_suspend(priv, hw, wowlan);
+	if (ret)
+		goto error;
+
+	device_set_wakeup_enable(bus(priv)->dev, true);
+
+	/* Now let the ucode operate on its own */
+	iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_SET,
+			  CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
+
+	goto out;
+
+ error:
+	priv->shrd->wowlan = false;
+	iwlagn_prepare_restart(priv);
+	ieee80211_restart_hw(priv->hw);
+ out:
+	mutex_unlock(&priv->shrd->mutex);
+	IWL_DEBUG_MAC80211(priv, "leave\n");
+
+	return ret;
+}
+
+static int iwlagn_mac_resume(struct ieee80211_hw *hw)
+{
+	struct iwl_priv *priv = hw->priv;
+	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+	struct ieee80211_vif *vif;
+	unsigned long flags;
+	u32 base, status = 0xffffffff;
+	int ret = -EIO;
+
+	IWL_DEBUG_MAC80211(priv, "enter\n");
+	mutex_lock(&priv->shrd->mutex);
+
+	iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_CLR,
+			  CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
+
+	base = priv->shrd->device_pointers.error_event_table;
+	if (iwlagn_hw_valid_rtc_data_addr(base)) {
+		spin_lock_irqsave(&bus(priv)->reg_lock, flags);
+		ret = iwl_grab_nic_access_silent(bus(priv));
+		if (ret == 0) {
+			iwl_write32(bus(priv), HBUS_TARG_MEM_RADDR, base);
+			status = iwl_read32(bus(priv), HBUS_TARG_MEM_RDAT);
+			iwl_release_nic_access(bus(priv));
+		}
+		spin_unlock_irqrestore(&bus(priv)->reg_lock, flags);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+		if (ret == 0) {
+			struct iwl_trans *trans = trans(priv);
+			if (!priv->wowlan_sram)
+				priv->wowlan_sram =
+					kzalloc(trans->ucode_wowlan.data.len,
+						GFP_KERNEL);
+
+			if (priv->wowlan_sram)
+				_iwl_read_targ_mem_words(
+					bus(priv), 0x800000, priv->wowlan_sram,
+					trans->ucode_wowlan.data.len / 4);
+		}
+#endif
+	}
+
+	/* we'll clear ctx->vif during iwlagn_prepare_restart() */
+	vif = ctx->vif;
+
+	priv->shrd->wowlan = false;
+
+	device_set_wakeup_enable(bus(priv)->dev, false);
+
+	iwlagn_prepare_restart(priv);
+
+	memset((void *)&ctx->active, 0, sizeof(ctx->active));
+	iwl_connection_init_rx_config(priv, ctx);
+	iwlagn_set_rxon_chain(priv, ctx);
+
+	mutex_unlock(&priv->shrd->mutex);
+	IWL_DEBUG_MAC80211(priv, "leave\n");
+
+	ieee80211_resume_disconnect(vif);
+
+	return 1;
+}
+
+#endif
+
+static void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+	struct iwl_priv *priv = hw->priv;
+
+	IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
+		     ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
+
+	if (iwlagn_tx_skb(priv, skb))
+		dev_kfree_skb_any(skb);
+}
+
+static void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
+				       struct ieee80211_vif *vif,
+				       struct ieee80211_key_conf *keyconf,
+				       struct ieee80211_sta *sta,
+				       u32 iv32, u16 *phase1key)
+{
+	struct iwl_priv *priv = hw->priv;
+
+	iwl_update_tkip_key(priv, vif, keyconf, sta, iv32, phase1key);
+}
+
+static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+			      struct ieee80211_vif *vif,
+			      struct ieee80211_sta *sta,
+			      struct ieee80211_key_conf *key)
+{
+	struct iwl_priv *priv = hw->priv;
+	struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+	struct iwl_rxon_context *ctx = vif_priv->ctx;
+	int ret;
+	bool is_default_wep_key = false;
+
+	IWL_DEBUG_MAC80211(priv, "enter\n");
+
+	if (iwlagn_mod_params.sw_crypto) {
+		IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n");
+		return -EOPNOTSUPP;
+	}
+
+	switch (key->cipher) {
+	case WLAN_CIPHER_SUITE_TKIP:
+		key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+		/* fall through */
+	case WLAN_CIPHER_SUITE_CCMP:
+		key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+		break;
+	default:
+		break;
+	}
+
+	/*
+	 * We could program these keys into the hardware as well, but we
+	 * don't expect much multicast traffic in IBSS and having keys
+	 * for more stations is probably more useful.
+	 *
+	 * Mark key TX-only and return 0.
+	 */
+	if (vif->type == NL80211_IFTYPE_ADHOC &&
+	    !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
+		key->hw_key_idx = WEP_INVALID_OFFSET;
+		return 0;
+	}
+
+	/* If they key was TX-only, accept deletion */
+	if (cmd == DISABLE_KEY && key->hw_key_idx == WEP_INVALID_OFFSET)
+		return 0;
+
+	mutex_lock(&priv->shrd->mutex);
+	iwl_scan_cancel_timeout(priv, 100);
+
+	BUILD_BUG_ON(WEP_INVALID_OFFSET == IWLAGN_HW_KEY_DEFAULT);
+
+	/*
+	 * If we are getting WEP group key and we didn't receive any key mapping
+	 * so far, we are in legacy wep mode (group key only), otherwise we are
+	 * in 1X mode.
+	 * In legacy wep mode, we use another host command to the uCode.
+	 */
+	if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+	     key->cipher == WLAN_CIPHER_SUITE_WEP104) && !sta) {
+		if (cmd == SET_KEY)
+			is_default_wep_key = !ctx->key_mapping_keys;
+		else
+			is_default_wep_key =
+				key->hw_key_idx == IWLAGN_HW_KEY_DEFAULT;
+	}
+
+
+	switch (cmd) {
+	case SET_KEY:
+		if (is_default_wep_key) {
+			ret = iwl_set_default_wep_key(priv, vif_priv->ctx, key);
+			break;
+		}
+		ret = iwl_set_dynamic_key(priv, vif_priv->ctx, key, sta);
+		if (ret) {
+			/*
+			 * can't add key for RX, but we don't need it
+			 * in the device for TX so still return 0
+			 */
+			ret = 0;
+			key->hw_key_idx = WEP_INVALID_OFFSET;
+		}
+
+		IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n");
+		break;
+	case DISABLE_KEY:
+		if (is_default_wep_key)
+			ret = iwl_remove_default_wep_key(priv, ctx, key);
+		else
+			ret = iwl_remove_dynamic_key(priv, ctx, key, sta);
+
+		IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n");
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	mutex_unlock(&priv->shrd->mutex);
+	IWL_DEBUG_MAC80211(priv, "leave\n");
+
+	return ret;
+}
+
+static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
+				   struct ieee80211_vif *vif,
+				   enum ieee80211_ampdu_mlme_action action,
+				   struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+				   u8 buf_size)
+{
+	struct iwl_priv *priv = hw->priv;
+	int ret = -EINVAL;
+	struct iwl_station_priv *sta_priv = (void *) sta->drv_priv;
+
+	IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
+		     sta->addr, tid);
+
+	if (!(cfg(priv)->sku & EEPROM_SKU_CAP_11N_ENABLE))
+		return -EACCES;
+
+	IWL_DEBUG_MAC80211(priv, "enter\n");
+	mutex_lock(&priv->shrd->mutex);
+
+	switch (action) {
+	case IEEE80211_AMPDU_RX_START:
+		if (iwlagn_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
+			break;
+		IWL_DEBUG_HT(priv, "start Rx\n");
+		ret = iwl_sta_rx_agg_start(priv, sta, tid, *ssn);
+		break;
+	case IEEE80211_AMPDU_RX_STOP:
+		IWL_DEBUG_HT(priv, "stop Rx\n");
+		ret = iwl_sta_rx_agg_stop(priv, sta, tid);
+		if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+			ret = 0;
+		break;
+	case IEEE80211_AMPDU_TX_START:
+		if (iwlagn_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
+			break;
+		IWL_DEBUG_HT(priv, "start Tx\n");
+		ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn);
+		break;
+	case IEEE80211_AMPDU_TX_STOP:
+		IWL_DEBUG_HT(priv, "stop Tx\n");
+		ret = iwlagn_tx_agg_stop(priv, vif, sta, tid);
+		if ((ret == 0) && (priv->agg_tids_count > 0)) {
+			priv->agg_tids_count--;
+			IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n",
+				     priv->agg_tids_count);
+		}
+		if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+			ret = 0;
+		if (!priv->agg_tids_count && cfg(priv)->ht_params &&
+		    cfg(priv)->ht_params->use_rts_for_aggregation) {
+			/*
+			 * switch off RTS/CTS if it was previously enabled
+			 */
+			sta_priv->lq_sta.lq.general_params.flags &=
+				~LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
+			iwl_send_lq_cmd(priv, iwl_rxon_ctx_from_vif(vif),
+					&sta_priv->lq_sta.lq, CMD_ASYNC, false);
+		}
+		break;
+	case IEEE80211_AMPDU_TX_OPERATIONAL:
+		ret = iwlagn_tx_agg_oper(priv, vif, sta, tid, buf_size);
+		break;
+	}
+	mutex_unlock(&priv->shrd->mutex);
+	IWL_DEBUG_MAC80211(priv, "leave\n");
+	return ret;
+}
+
+static int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
+			      struct ieee80211_vif *vif,
+			      struct ieee80211_sta *sta)
+{
+	struct iwl_priv *priv = hw->priv;
+	struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
+	struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+	bool is_ap = vif->type == NL80211_IFTYPE_STATION;
+	int ret = 0;
+	u8 sta_id;
+
+	IWL_DEBUG_MAC80211(priv, "received request to add station %pM\n",
+			sta->addr);
+	mutex_lock(&priv->shrd->mutex);
+	IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n",
+			sta->addr);
+	sta_priv->sta_id = IWL_INVALID_STATION;
+
+	atomic_set(&sta_priv->pending_frames, 0);
+	if (vif->type == NL80211_IFTYPE_AP)
+		sta_priv->client = true;
+
+	ret = iwl_add_station_common(priv, vif_priv->ctx, sta->addr,
+				     is_ap, sta, &sta_id);
+	if (ret) {
+		IWL_ERR(priv, "Unable to add station %pM (%d)\n",
+			sta->addr, ret);
+		/* Should we return success if return code is EEXIST ? */
+		goto out;
+	}
+
+	sta_priv->sta_id = sta_id;
+
+	/* Initialize rate scaling */
+	IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
+		       sta->addr);
+	iwl_rs_rate_init(priv, sta, sta_id);
+ out:
+	mutex_unlock(&priv->shrd->mutex);
+	IWL_DEBUG_MAC80211(priv, "leave\n");
+
+	return ret;
+}
+
+static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
+				struct ieee80211_channel_switch *ch_switch)
+{
+	struct iwl_priv *priv = hw->priv;
+	const struct iwl_channel_info *ch_info;
+	struct ieee80211_conf *conf = &hw->conf;
+	struct ieee80211_channel *channel = ch_switch->channel;
+	struct iwl_ht_config *ht_conf = &priv->current_ht_config;
+	/*
+	 * MULTI-FIXME
+	 * When we add support for multiple interfaces, we need to
+	 * revisit this. The channel switch command in the device
+	 * only affects the BSS context, but what does that really
+	 * mean? And what if we get a CSA on the second interface?
+	 * This needs a lot of work.
+	 */
+	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+	u16 ch;
+
+	IWL_DEBUG_MAC80211(priv, "enter\n");
+
+	mutex_lock(&priv->shrd->mutex);
+
+	if (iwl_is_rfkill(priv->shrd))
+		goto out;
+
+	if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status) ||
+	    test_bit(STATUS_SCANNING, &priv->shrd->status) ||
+	    test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status))
+		goto out;
+
+	if (!iwl_is_associated_ctx(ctx))
+		goto out;
+
+	if (!cfg(priv)->lib->set_channel_switch)
+		goto out;
+
+	ch = channel->hw_value;
+	if (le16_to_cpu(ctx->active.channel) == ch)
+		goto out;
+
+	ch_info = iwl_get_channel_info(priv, channel->band, ch);
+	if (!is_channel_valid(ch_info)) {
+		IWL_DEBUG_MAC80211(priv, "invalid channel\n");
+		goto out;
+	}
+
+	spin_lock_irq(&priv->shrd->lock);
+
+	priv->current_ht_config.smps = conf->smps_mode;
+
+	/* Configure HT40 channels */
+	ctx->ht.enabled = conf_is_ht(conf);
+	if (ctx->ht.enabled)
+		iwlagn_config_ht40(conf, ctx);
+	else
+		ctx->ht.is_40mhz = false;
+
+	if ((le16_to_cpu(ctx->staging.channel) != ch))
+		ctx->staging.flags = 0;
+
+	iwl_set_rxon_channel(priv, channel, ctx);
+	iwl_set_rxon_ht(priv, ht_conf);
+	iwl_set_flags_for_band(priv, ctx, channel->band, ctx->vif);
+
+	spin_unlock_irq(&priv->shrd->lock);
+
+	iwl_set_rate(priv);
+	/*
+	 * at this point, staging_rxon has the
+	 * configuration for channel switch
+	 */
+	set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status);
+	priv->switch_channel = cpu_to_le16(ch);
+	if (cfg(priv)->lib->set_channel_switch(priv, ch_switch)) {
+		clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status);
+		priv->switch_channel = 0;
+		ieee80211_chswitch_done(ctx->vif, false);
+	}
+
+out:
+	mutex_unlock(&priv->shrd->mutex);
+	IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+
+static void iwlagn_configure_filter(struct ieee80211_hw *hw,
+				    unsigned int changed_flags,
+				    unsigned int *total_flags,
+				    u64 multicast)
+{
+	struct iwl_priv *priv = hw->priv;
+	__le32 filter_or = 0, filter_nand = 0;
+	struct iwl_rxon_context *ctx;
+
+#define CHK(test, flag)	do { \
+	if (*total_flags & (test))		\
+		filter_or |= (flag);		\
+	else					\
+		filter_nand |= (flag);		\
+	} while (0)
+
+	IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
+			changed_flags, *total_flags);
+
+	CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
+	/* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
+	CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
+	CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
+
+#undef CHK
+
+	mutex_lock(&priv->shrd->mutex);
+
+	for_each_context(priv, ctx) {
+		ctx->staging.filter_flags &= ~filter_nand;
+		ctx->staging.filter_flags |= filter_or;
+
+		/*
+		 * Not committing directly because hardware can perform a scan,
+		 * but we'll eventually commit the filter flags change anyway.
+		 */
+	}
+
+	mutex_unlock(&priv->shrd->mutex);
+
+	/*
+	 * Receiving all multicast frames is always enabled by the
+	 * default flags setup in iwl_connection_init_rx_config()
+	 * since we currently do not support programming multicast
+	 * filters into the device.
+	 */
+	*total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
+			FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
+}
+
+static void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop)
+{
+	struct iwl_priv *priv = hw->priv;
+
+	mutex_lock(&priv->shrd->mutex);
+	IWL_DEBUG_MAC80211(priv, "enter\n");
+
+	if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status)) {
+		IWL_DEBUG_TX(priv, "Aborting flush due to device shutdown\n");
+		goto done;
+	}
+	if (iwl_is_rfkill(priv->shrd)) {
+		IWL_DEBUG_TX(priv, "Aborting flush due to RF Kill\n");
+		goto done;
+	}
+
+	/*
+	 * mac80211 will not push any more frames for transmit
+	 * until the flush is completed
+	 */
+	if (drop) {
+		IWL_DEBUG_MAC80211(priv, "send flush command\n");
+		if (iwlagn_txfifo_flush(priv, IWL_DROP_ALL)) {
+			IWL_ERR(priv, "flush request fail\n");
+			goto done;
+		}
+	}
+	IWL_DEBUG_MAC80211(priv, "wait transmit/flush all frames\n");
+	iwl_trans_wait_tx_queue_empty(trans(priv));
+done:
+	mutex_unlock(&priv->shrd->mutex);
+	IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+
+static int iwlagn_mac_remain_on_channel(struct ieee80211_hw *hw,
+				     struct ieee80211_channel *channel,
+				     enum nl80211_channel_type channel_type,
+				     int duration)
+{
+	struct iwl_priv *priv = hw->priv;
+	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN];
+	int err = 0;
+
+	if (!(priv->shrd->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
+		return -EOPNOTSUPP;
+
+	if (!(ctx->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT)))
+		return -EOPNOTSUPP;
+
+	IWL_DEBUG_MAC80211(priv, "enter\n");
+	mutex_lock(&priv->shrd->mutex);
+
+	if (test_bit(STATUS_SCAN_HW, &priv->shrd->status)) {
+		err = -EBUSY;
+		goto out;
+	}
+
+	priv->hw_roc_channel = channel;
+	priv->hw_roc_chantype = channel_type;
+	/* convert from ms to TU */
+	priv->hw_roc_duration = DIV_ROUND_UP(1000 * duration, 1024);
+	priv->hw_roc_start_notified = false;
+	cancel_delayed_work(&priv->hw_roc_disable_work);
+
+	if (!ctx->is_active) {
+		static const struct iwl_qos_info default_qos_data = {
+			.def_qos_parm = {
+				.ac[0] = {
+					.cw_min = cpu_to_le16(3),
+					.cw_max = cpu_to_le16(7),
+					.aifsn = 2,
+					.edca_txop = cpu_to_le16(1504),
+				},
+				.ac[1] = {
+					.cw_min = cpu_to_le16(7),
+					.cw_max = cpu_to_le16(15),
+					.aifsn = 2,
+					.edca_txop = cpu_to_le16(3008),
+				},
+				.ac[2] = {
+					.cw_min = cpu_to_le16(15),
+					.cw_max = cpu_to_le16(1023),
+					.aifsn = 3,
+				},
+				.ac[3] = {
+					.cw_min = cpu_to_le16(15),
+					.cw_max = cpu_to_le16(1023),
+					.aifsn = 7,
+				},
+			},
+		};
+
+		ctx->is_active = true;
+		ctx->qos_data = default_qos_data;
+		ctx->staging.dev_type = RXON_DEV_TYPE_P2P;
+		memcpy(ctx->staging.node_addr,
+		       priv->contexts[IWL_RXON_CTX_BSS].staging.node_addr,
+		       ETH_ALEN);
+		memcpy(ctx->staging.bssid_addr,
+		       priv->contexts[IWL_RXON_CTX_BSS].staging.node_addr,
+		       ETH_ALEN);
+		err = iwlagn_commit_rxon(priv, ctx);
+		if (err)
+			goto out;
+		ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK |
+					     RXON_FILTER_PROMISC_MSK |
+					     RXON_FILTER_CTL2HOST_MSK;
+
+		err = iwlagn_commit_rxon(priv, ctx);
+		if (err) {
+			iwlagn_disable_roc(priv);
+			goto out;
+		}
+		priv->hw_roc_setup = true;
+	}
+
+	err = iwl_scan_initiate(priv, ctx->vif, IWL_SCAN_ROC, channel->band);
+	if (err)
+		iwlagn_disable_roc(priv);
+
+ out:
+	mutex_unlock(&priv->shrd->mutex);
+	IWL_DEBUG_MAC80211(priv, "leave\n");
+
+	return err;
+}
+
+static int iwlagn_mac_cancel_remain_on_channel(struct ieee80211_hw *hw)
+{
+	struct iwl_priv *priv = hw->priv;
+
+	if (!(priv->shrd->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
+		return -EOPNOTSUPP;
+
+	IWL_DEBUG_MAC80211(priv, "enter\n");
+	mutex_lock(&priv->shrd->mutex);
+	iwl_scan_cancel_timeout(priv, priv->hw_roc_duration);
+	iwlagn_disable_roc(priv);
+	mutex_unlock(&priv->shrd->mutex);
+	IWL_DEBUG_MAC80211(priv, "leave\n");
+
+	return 0;
+}
+
+static int iwlagn_mac_tx_sync(struct ieee80211_hw *hw,
+			      struct ieee80211_vif *vif,
+			      const u8 *bssid,
+			      enum ieee80211_tx_sync_type type)
+{
+	struct iwl_priv *priv = hw->priv;
+	struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+	struct iwl_rxon_context *ctx = vif_priv->ctx;
+	int ret;
+	u8 sta_id;
+
+	if (ctx->ctxid != IWL_RXON_CTX_PAN)
+		return 0;
+
+	IWL_DEBUG_MAC80211(priv, "enter\n");
+	mutex_lock(&priv->shrd->mutex);
+
+	if (iwl_is_associated_ctx(ctx)) {
+		ret = 0;
+		goto out;
+	}
+
+	if (ctx->preauth_bssid || test_bit(STATUS_SCAN_HW,
+	    &priv->shrd->status)) {
+		ret = -EBUSY;
+		goto out;
+	}
+
+	ret = iwl_add_station_common(priv, ctx, bssid, true, NULL, &sta_id);
+	if (ret)
+		goto out;
+
+	if (WARN_ON(sta_id != ctx->ap_sta_id)) {
+		ret = -EIO;
+		goto out_remove_sta;
+	}
+
+	memcpy(ctx->bssid, bssid, ETH_ALEN);
+	ctx->preauth_bssid = true;
+
+	ret = iwlagn_commit_rxon(priv, ctx);
+
+	if (ret == 0)
+		goto out;
+
+ out_remove_sta:
+	iwl_remove_station(priv, sta_id, bssid);
+ out:
+	mutex_unlock(&priv->shrd->mutex);
+	IWL_DEBUG_MAC80211(priv, "leave\n");
+
+	return ret;
+}
+
+static void iwlagn_mac_finish_tx_sync(struct ieee80211_hw *hw,
+				   struct ieee80211_vif *vif,
+				   const u8 *bssid,
+				   enum ieee80211_tx_sync_type type)
+{
+	struct iwl_priv *priv = hw->priv;
+	struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+	struct iwl_rxon_context *ctx = vif_priv->ctx;
+
+	if (ctx->ctxid != IWL_RXON_CTX_PAN)
+		return;
+
+	IWL_DEBUG_MAC80211(priv, "enter\n");
+	mutex_lock(&priv->shrd->mutex);
+
+	if (iwl_is_associated_ctx(ctx))
+		goto out;
+
+	iwl_remove_station(priv, ctx->ap_sta_id, bssid);
+	ctx->preauth_bssid = false;
+	/* no need to commit */
+ out:
+	mutex_unlock(&priv->shrd->mutex);
+	IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+
+static void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw,
+			   enum ieee80211_rssi_event rssi_event)
+{
+	struct iwl_priv *priv = hw->priv;
+
+	IWL_DEBUG_MAC80211(priv, "enter\n");
+	mutex_lock(&priv->shrd->mutex);
+
+	if (cfg(priv)->bt_params &&
+			cfg(priv)->bt_params->advanced_bt_coexist) {
+		if (rssi_event == RSSI_EVENT_LOW)
+			priv->bt_enable_pspoll = true;
+		else if (rssi_event == RSSI_EVENT_HIGH)
+			priv->bt_enable_pspoll = false;
+
+		iwlagn_send_advance_bt_config(priv);
+	} else {
+		IWL_DEBUG_MAC80211(priv, "Advanced BT coex disabled,"
+				"ignoring RSSI callback\n");
+	}
+
+	mutex_unlock(&priv->shrd->mutex);
+	IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+
+static int iwlagn_mac_set_tim(struct ieee80211_hw *hw,
+			   struct ieee80211_sta *sta, bool set)
+{
+	struct iwl_priv *priv = hw->priv;
+
+	queue_work(priv->shrd->workqueue, &priv->beacon_update);
+
+	return 0;
+}
+
+static int iwlagn_mac_conf_tx(struct ieee80211_hw *hw,
+		    struct ieee80211_vif *vif, u16 queue,
+		    const struct ieee80211_tx_queue_params *params)
+{
+	struct iwl_priv *priv = hw->priv;
+	struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+	struct iwl_rxon_context *ctx = vif_priv->ctx;
+	unsigned long flags;
+	int q;
+
+	if (WARN_ON(!ctx))
+		return -EINVAL;
+
+	IWL_DEBUG_MAC80211(priv, "enter\n");
+
+	if (!iwl_is_ready_rf(priv->shrd)) {
+		IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
+		return -EIO;
+	}
+
+	if (queue >= AC_NUM) {
+		IWL_DEBUG_MAC80211(priv, "leave - queue >= AC_NUM %d\n", queue);
+		return 0;
+	}
+
+	q = AC_NUM - 1 - queue;
+
+	spin_lock_irqsave(&priv->shrd->lock, flags);
+
+	ctx->qos_data.def_qos_parm.ac[q].cw_min =
+		cpu_to_le16(params->cw_min);
+	ctx->qos_data.def_qos_parm.ac[q].cw_max =
+		cpu_to_le16(params->cw_max);
+	ctx->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
+	ctx->qos_data.def_qos_parm.ac[q].edca_txop =
+			cpu_to_le16((params->txop * 32));
+
+	ctx->qos_data.def_qos_parm.ac[q].reserved1 = 0;
+
+	spin_unlock_irqrestore(&priv->shrd->lock, flags);
+
+	IWL_DEBUG_MAC80211(priv, "leave\n");
+	return 0;
+}
+
+static int iwlagn_mac_tx_last_beacon(struct ieee80211_hw *hw)
+{
+	struct iwl_priv *priv = hw->priv;
+
+	return priv->ibss_manager == IWL_IBSS_MANAGER;
+}
+
+static int iwl_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
+{
+	iwl_connection_init_rx_config(priv, ctx);
+
+	iwlagn_set_rxon_chain(priv, ctx);
+
+	return iwlagn_commit_rxon(priv, ctx);
+}
+
+static int iwl_setup_interface(struct iwl_priv *priv,
+			       struct iwl_rxon_context *ctx)
+{
+	struct ieee80211_vif *vif = ctx->vif;
+	int err;
+
+	lockdep_assert_held(&priv->shrd->mutex);
+
+	/*
+	 * This variable will be correct only when there's just
+	 * a single context, but all code using it is for hardware
+	 * that supports only one context.
+	 */
+	priv->iw_mode = vif->type;
+
+	ctx->is_active = true;
+
+	err = iwl_set_mode(priv, ctx);
+	if (err) {
+		if (!ctx->always_active)
+			ctx->is_active = false;
+		return err;
+	}
+
+	if (cfg(priv)->bt_params && cfg(priv)->bt_params->advanced_bt_coexist &&
+	    vif->type == NL80211_IFTYPE_ADHOC) {
+		/*
+		 * pretend to have high BT traffic as long as we
+		 * are operating in IBSS mode, as this will cause
+		 * the rate scaling etc. to behave as intended.
+		 */
+		priv->bt_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_HIGH;
+	}
+
+	return 0;
+}
+
+static int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
+			     struct ieee80211_vif *vif)
+{
+	struct iwl_priv *priv = hw->priv;
+	struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+	struct iwl_rxon_context *tmp, *ctx = NULL;
+	int err;
+	enum nl80211_iftype viftype = ieee80211_vif_type_p2p(vif);
+
+	IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
+			   viftype, vif->addr);
+
+	cancel_delayed_work_sync(&priv->hw_roc_disable_work);
+
+	mutex_lock(&priv->shrd->mutex);
+
+	iwlagn_disable_roc(priv);
+
+	if (!iwl_is_ready_rf(priv->shrd)) {
+		IWL_WARN(priv, "Try to add interface when device not ready\n");
+		err = -EINVAL;
+		goto out;
+	}
+
+	for_each_context(priv, tmp) {
+		u32 possible_modes =
+			tmp->interface_modes | tmp->exclusive_interface_modes;
+
+		if (tmp->vif) {
+			/* check if this busy context is exclusive */
+			if (tmp->exclusive_interface_modes &
+						BIT(tmp->vif->type)) {
+				err = -EINVAL;
+				goto out;
+			}
+			continue;
+		}
+
+		if (!(possible_modes & BIT(viftype)))
+			continue;
+
+		/* have maybe usable context w/o interface */
+		ctx = tmp;
+		break;
+	}
+
+	if (!ctx) {
+		err = -EOPNOTSUPP;
+		goto out;
+	}
+
+	vif_priv->ctx = ctx;
+	ctx->vif = vif;
+
+	err = iwl_setup_interface(priv, ctx);
+	if (!err)
+		goto out;
+
+	ctx->vif = NULL;
+	priv->iw_mode = NL80211_IFTYPE_STATION;
+ out:
+	mutex_unlock(&priv->shrd->mutex);
+
+	IWL_DEBUG_MAC80211(priv, "leave\n");
+	return err;
+}
+
+static void iwl_teardown_interface(struct iwl_priv *priv,
+				   struct ieee80211_vif *vif,
+				   bool mode_change)
+{
+	struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
+
+	lockdep_assert_held(&priv->shrd->mutex);
+
+	if (priv->scan_vif == vif) {
+		iwl_scan_cancel_timeout(priv, 200);
+		iwl_force_scan_end(priv);
+	}
+
+	if (!mode_change) {
+		iwl_set_mode(priv, ctx);
+		if (!ctx->always_active)
+			ctx->is_active = false;
+	}
+
+	/*
+	 * When removing the IBSS interface, overwrite the
+	 * BT traffic load with the stored one from the last
+	 * notification, if any. If this is a device that
+	 * doesn't implement this, this has no effect since
+	 * both values are the same and zero.
+	 */
+	if (vif->type == NL80211_IFTYPE_ADHOC)
+		priv->bt_traffic_load = priv->last_bt_traffic_load;
+}
+
+static void iwlagn_mac_remove_interface(struct ieee80211_hw *hw,
+			      struct ieee80211_vif *vif)
+{
+	struct iwl_priv *priv = hw->priv;
+	struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
+
+	IWL_DEBUG_MAC80211(priv, "enter\n");
+
+	mutex_lock(&priv->shrd->mutex);
+
+	if (WARN_ON(ctx->vif != vif)) {
+		struct iwl_rxon_context *tmp;
+		IWL_ERR(priv, "ctx->vif = %p, vif = %p\n", ctx->vif, vif);
+		for_each_context(priv, tmp)
+			IWL_ERR(priv, "\tID = %d:\tctx = %p\tctx->vif = %p\n",
+				tmp->ctxid, tmp, tmp->vif);
+	}
+	ctx->vif = NULL;
+
+	iwl_teardown_interface(priv, vif, false);
+
+	mutex_unlock(&priv->shrd->mutex);
+
+	IWL_DEBUG_MAC80211(priv, "leave\n");
+
+}
+
+static int iwlagn_mac_change_interface(struct ieee80211_hw *hw,
+				struct ieee80211_vif *vif,
+				enum nl80211_iftype newtype, bool newp2p)
+{
+	struct iwl_priv *priv = hw->priv;
+	struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
+	struct iwl_rxon_context *bss_ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+	struct iwl_rxon_context *tmp;
+	enum nl80211_iftype newviftype = newtype;
+	u32 interface_modes;
+	int err;
+
+	IWL_DEBUG_MAC80211(priv, "enter\n");
+
+	newtype = ieee80211_iftype_p2p(newtype, newp2p);
+
+	mutex_lock(&priv->shrd->mutex);
+
+	if (!ctx->vif || !iwl_is_ready_rf(priv->shrd)) {
+		/*
+		 * Huh? But wait ... this can maybe happen when
+		 * we're in the middle of a firmware restart!
+		 */
+		err = -EBUSY;
+		goto out;
+	}
+
+	interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes;
+
+	if (!(interface_modes & BIT(newtype))) {
+		err = -EBUSY;
+		goto out;
+	}
+
+	/*
+	 * Refuse a change that should be done by moving from the PAN
+	 * context to the BSS context instead, if the BSS context is
+	 * available and can support the new interface type.
+	 */
+	if (ctx->ctxid == IWL_RXON_CTX_PAN && !bss_ctx->vif &&
+	    (bss_ctx->interface_modes & BIT(newtype) ||
+	     bss_ctx->exclusive_interface_modes & BIT(newtype))) {
+		BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
+		err = -EBUSY;
+		goto out;
+	}
+
+	if (ctx->exclusive_interface_modes & BIT(newtype)) {
+		for_each_context(priv, tmp) {
+			if (ctx == tmp)
+				continue;
+
+			if (!tmp->vif)
+				continue;
+
+			/*
+			 * The current mode switch would be exclusive, but
+			 * another context is active ... refuse the switch.
+			 */
+			err = -EBUSY;
+			goto out;
+		}
+	}
+
+	/* success */
+	iwl_teardown_interface(priv, vif, true);
+	vif->type = newviftype;
+	vif->p2p = newp2p;
+	err = iwl_setup_interface(priv, ctx);
+	WARN_ON(err);
+	/*
+	 * We've switched internally, but submitting to the
+	 * device may have failed for some reason. Mask this
+	 * error, because otherwise mac80211 will not switch
+	 * (and set the interface type back) and we'll be
+	 * out of sync with it.
+	 */
+	err = 0;
+
+ out:
+	mutex_unlock(&priv->shrd->mutex);
+	IWL_DEBUG_MAC80211(priv, "leave\n");
+
+	return err;
+}
+
+static int iwlagn_mac_hw_scan(struct ieee80211_hw *hw,
+		    struct ieee80211_vif *vif,
+		    struct cfg80211_scan_request *req)
+{
+	struct iwl_priv *priv = hw->priv;
+	int ret;
+
+	IWL_DEBUG_MAC80211(priv, "enter\n");
+
+	if (req->n_channels == 0)
+		return -EINVAL;
+
+	mutex_lock(&priv->shrd->mutex);
+
+	/*
+	 * If an internal scan is in progress, just set
+	 * up the scan_request as per above.
+	 */
+	if (priv->scan_type != IWL_SCAN_NORMAL) {
+		IWL_DEBUG_SCAN(priv,
+			       "SCAN request during internal scan - defer\n");
+		priv->scan_request = req;
+		priv->scan_vif = vif;
+		ret = 0;
+	} else {
+		priv->scan_request = req;
+		priv->scan_vif = vif;
+		/*
+		 * mac80211 will only ask for one band at a time
+		 * so using channels[0] here is ok
+		 */
+		ret = iwl_scan_initiate(priv, vif, IWL_SCAN_NORMAL,
+					req->channels[0]->band);
+		if (ret) {
+			priv->scan_request = NULL;
+			priv->scan_vif = NULL;
+		}
+	}
+
+	IWL_DEBUG_MAC80211(priv, "leave\n");
+
+	mutex_unlock(&priv->shrd->mutex);
+
+	return ret;
+}
+
+static int iwlagn_mac_sta_remove(struct ieee80211_hw *hw,
+		       struct ieee80211_vif *vif,
+		       struct ieee80211_sta *sta)
+{
+	struct iwl_priv *priv = hw->priv;
+	struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
+	int ret;
+
+	IWL_DEBUG_MAC80211(priv, "enter: received request to remove "
+			   "station %pM\n", sta->addr);
+	mutex_lock(&priv->shrd->mutex);
+	IWL_DEBUG_INFO(priv, "proceeding to remove station %pM\n",
+			sta->addr);
+	ret = iwl_remove_station(priv, sta_priv->sta_id, sta->addr);
+	if (ret)
+		IWL_DEBUG_QUIET_RFKILL(priv, "Error removing station %pM\n",
+			sta->addr);
+	mutex_unlock(&priv->shrd->mutex);
+	IWL_DEBUG_MAC80211(priv, "leave\n");
+
+	return ret;
+}
+
+static void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+	priv->stations[sta_id].sta.station_flags &= ~STA_FLG_PWR_SAVE_MSK;
+	priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
+	priv->stations[sta_id].sta.sta.modify_mask = 0;
+	priv->stations[sta_id].sta.sleep_tx_count = 0;
+	priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+	iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
+	spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+
+}
+
+static void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
+			   struct ieee80211_vif *vif,
+			   enum sta_notify_cmd cmd,
+			   struct ieee80211_sta *sta)
+{
+	struct iwl_priv *priv = hw->priv;
+	struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
+	int sta_id;
+
+	IWL_DEBUG_MAC80211(priv, "enter\n");
+
+	switch (cmd) {
+	case STA_NOTIFY_SLEEP:
+		WARN_ON(!sta_priv->client);
+		sta_priv->asleep = true;
+		if (atomic_read(&sta_priv->pending_frames) > 0)
+			ieee80211_sta_block_awake(hw, sta, true);
+		break;
+	case STA_NOTIFY_AWAKE:
+		WARN_ON(!sta_priv->client);
+		if (!sta_priv->asleep)
+			break;
+		sta_priv->asleep = false;
+		sta_id = iwl_sta_id(sta);
+		if (sta_id != IWL_INVALID_STATION)
+			iwl_sta_modify_ps_wake(priv, sta_id);
+		break;
+	default:
+		break;
+	}
+	IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+
+struct ieee80211_ops iwlagn_hw_ops = {
+	.tx = iwlagn_mac_tx,
+	.start = iwlagn_mac_start,
+	.stop = iwlagn_mac_stop,
+#ifdef CONFIG_PM_SLEEP
+	.suspend = iwlagn_mac_suspend,
+	.resume = iwlagn_mac_resume,
+#endif
+	.add_interface = iwlagn_mac_add_interface,
+	.remove_interface = iwlagn_mac_remove_interface,
+	.change_interface = iwlagn_mac_change_interface,
+	.config = iwlagn_mac_config,
+	.configure_filter = iwlagn_configure_filter,
+	.set_key = iwlagn_mac_set_key,
+	.update_tkip_key = iwlagn_mac_update_tkip_key,
+	.set_rekey_data = iwlagn_mac_set_rekey_data,
+	.conf_tx = iwlagn_mac_conf_tx,
+	.bss_info_changed = iwlagn_bss_info_changed,
+	.ampdu_action = iwlagn_mac_ampdu_action,
+	.hw_scan = iwlagn_mac_hw_scan,
+	.sta_notify = iwlagn_mac_sta_notify,
+	.sta_add = iwlagn_mac_sta_add,
+	.sta_remove = iwlagn_mac_sta_remove,
+	.channel_switch = iwlagn_mac_channel_switch,
+	.flush = iwlagn_mac_flush,
+	.tx_last_beacon = iwlagn_mac_tx_last_beacon,
+	.remain_on_channel = iwlagn_mac_remain_on_channel,
+	.cancel_remain_on_channel = iwlagn_mac_cancel_remain_on_channel,
+	.rssi_callback = iwlagn_mac_rssi_callback,
+	CFG80211_TESTMODE_CMD(iwlagn_mac_testmode_cmd)
+	CFG80211_TESTMODE_DUMP(iwlagn_mac_testmode_dump)
+	.tx_sync = iwlagn_mac_tx_sync,
+	.finish_tx_sync = iwlagn_mac_finish_tx_sync,
+	.set_tim = iwlagn_mac_set_tim,
+};
+
+/* This function both allocates and initializes hw and priv. */
+struct ieee80211_hw *iwl_alloc_all(void)
+{
+	struct iwl_priv *priv;
+	/* mac80211 allocates memory for this device instance, including
+	 *   space for this driver's private structure */
+	struct ieee80211_hw *hw;
+
+	hw = ieee80211_alloc_hw(sizeof(struct iwl_priv), &iwlagn_hw_ops);
+	if (!hw)
+		goto out;
+
+	priv = hw->priv;
+	priv->hw = hw;
+
+out:
+	return hw;
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-pci.c b/drivers/net/wireless/iwlwifi/iwl-pci.c
index 1800029..fb30ea7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-pci.c
+++ b/drivers/net/wireless/iwlwifi/iwl-pci.c
@@ -135,7 +135,7 @@
 	}
 }
 
-static void iwl_pci_get_hw_id(struct iwl_bus *bus, char buf[],
+static void iwl_pci_get_hw_id_string(struct iwl_bus *bus, char buf[],
 			      int buf_len)
 {
 	struct pci_dev *pci_dev = IWL_BUS_GET_PCI_DEV(bus);
@@ -144,6 +144,13 @@
 		 pci_dev->subsystem_device);
 }
 
+static u32 iwl_pci_get_hw_id(struct iwl_bus *bus)
+{
+	struct pci_dev *pci_dev = IWL_BUS_GET_PCI_DEV(bus);
+
+	return (pci_dev->device << 16) + pci_dev->subsystem_device;
+}
+
 static void iwl_pci_write8(struct iwl_bus *bus, u32 ofs, u8 val)
 {
 	iowrite8(val, IWL_BUS_GET_PCI_BUS(bus)->hw_base + ofs);
@@ -163,6 +170,7 @@
 static const struct iwl_bus_ops bus_ops_pci = {
 	.get_pm_support = iwl_pci_is_pm_supported,
 	.apm_config = iwl_pci_apm_config,
+	.get_hw_id_string = iwl_pci_get_hw_id_string,
 	.get_hw_id = iwl_pci_get_hw_id,
 	.write8 = iwl_pci_write8,
 	.write32 = iwl_pci_write32,
@@ -256,6 +264,8 @@
 	{IWL_PCI_DEVICE(0x0082, 0xC020, iwl6005_2agn_sff_cfg)},
 	{IWL_PCI_DEVICE(0x0085, 0xC220, iwl6005_2agn_sff_cfg)},
 	{IWL_PCI_DEVICE(0x0082, 0x1341, iwl6005_2agn_d_cfg)},
+	{IWL_PCI_DEVICE(0x0082, 0x1304, iwl6005_2agn_cfg)},/* low 5GHz active */
+	{IWL_PCI_DEVICE(0x0082, 0x1305, iwl6005_2agn_cfg)},/* high 5GHz active */
 
 /* 6x30 Series */
 	{IWL_PCI_DEVICE(0x008A, 0x5305, iwl1030_bgn_cfg)},
@@ -325,46 +335,28 @@
 	{IWL_PCI_DEVICE(0x0890, 0x4022, iwl2000_2bgn_cfg)},
 	{IWL_PCI_DEVICE(0x0891, 0x4222, iwl2000_2bgn_cfg)},
 	{IWL_PCI_DEVICE(0x0890, 0x4422, iwl2000_2bgn_cfg)},
-	{IWL_PCI_DEVICE(0x0890, 0x4026, iwl2000_2bg_cfg)},
-	{IWL_PCI_DEVICE(0x0891, 0x4226, iwl2000_2bg_cfg)},
-	{IWL_PCI_DEVICE(0x0890, 0x4426, iwl2000_2bg_cfg)},
 	{IWL_PCI_DEVICE(0x0890, 0x4822, iwl2000_2bgn_d_cfg)},
 
 /* 2x30 Series */
 	{IWL_PCI_DEVICE(0x0887, 0x4062, iwl2030_2bgn_cfg)},
 	{IWL_PCI_DEVICE(0x0888, 0x4262, iwl2030_2bgn_cfg)},
 	{IWL_PCI_DEVICE(0x0887, 0x4462, iwl2030_2bgn_cfg)},
-	{IWL_PCI_DEVICE(0x0887, 0x4066, iwl2030_2bg_cfg)},
-	{IWL_PCI_DEVICE(0x0888, 0x4266, iwl2030_2bg_cfg)},
-	{IWL_PCI_DEVICE(0x0887, 0x4466, iwl2030_2bg_cfg)},
 
 /* 6x35 Series */
 	{IWL_PCI_DEVICE(0x088E, 0x4060, iwl6035_2agn_cfg)},
 	{IWL_PCI_DEVICE(0x088F, 0x4260, iwl6035_2agn_cfg)},
 	{IWL_PCI_DEVICE(0x088E, 0x4460, iwl6035_2agn_cfg)},
-	{IWL_PCI_DEVICE(0x088E, 0x4064, iwl6035_2abg_cfg)},
-	{IWL_PCI_DEVICE(0x088F, 0x4264, iwl6035_2abg_cfg)},
-	{IWL_PCI_DEVICE(0x088E, 0x4464, iwl6035_2abg_cfg)},
-	{IWL_PCI_DEVICE(0x088E, 0x4066, iwl6035_2bg_cfg)},
-	{IWL_PCI_DEVICE(0x088F, 0x4266, iwl6035_2bg_cfg)},
-	{IWL_PCI_DEVICE(0x088E, 0x4466, iwl6035_2bg_cfg)},
 
 /* 105 Series */
 	{IWL_PCI_DEVICE(0x0894, 0x0022, iwl105_bgn_cfg)},
 	{IWL_PCI_DEVICE(0x0895, 0x0222, iwl105_bgn_cfg)},
 	{IWL_PCI_DEVICE(0x0894, 0x0422, iwl105_bgn_cfg)},
-	{IWL_PCI_DEVICE(0x0894, 0x0026, iwl105_bg_cfg)},
-	{IWL_PCI_DEVICE(0x0895, 0x0226, iwl105_bg_cfg)},
-	{IWL_PCI_DEVICE(0x0894, 0x0426, iwl105_bg_cfg)},
 	{IWL_PCI_DEVICE(0x0894, 0x0822, iwl105_bgn_d_cfg)},
 
 /* 135 Series */
 	{IWL_PCI_DEVICE(0x0892, 0x0062, iwl135_bgn_cfg)},
 	{IWL_PCI_DEVICE(0x0893, 0x0262, iwl135_bgn_cfg)},
 	{IWL_PCI_DEVICE(0x0892, 0x0462, iwl135_bgn_cfg)},
-	{IWL_PCI_DEVICE(0x0892, 0x0066, iwl135_bg_cfg)},
-	{IWL_PCI_DEVICE(0x0893, 0x0266, iwl135_bg_cfg)},
-	{IWL_PCI_DEVICE(0x0892, 0x0466, iwl135_bg_cfg)},
 
 	{0}
 };
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
index 4eaab20..2b188a6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.c
+++ b/drivers/net/wireless/iwlwifi/iwl-power.c
@@ -167,7 +167,7 @@
 	u8 skip;
 	u32 slp_itrvl;
 
-	if (priv->cfg->adv_pm) {
+	if (cfg(priv)->adv_pm) {
 		table = apm_range_2;
 		if (period <= IWL_DTIM_RANGE_1_MAX)
 			table = apm_range_1;
@@ -221,7 +221,7 @@
 		cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA;
 
 	if (iwl_advanced_bt_coexist(priv)) {
-		if (!priv->cfg->bt_params->bt_sco_disable)
+		if (!cfg(priv)->bt_params->bt_sco_disable)
 			cmd->flags |= IWL_POWER_BT_SCO_ENA;
 		else
 			cmd->flags &= ~IWL_POWER_BT_SCO_ENA;
@@ -307,7 +307,7 @@
 		cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA;
 
 	if (iwl_advanced_bt_coexist(priv)) {
-		if (!priv->cfg->bt_params->bt_sco_disable)
+		if (!cfg(priv)->bt_params->bt_sco_disable)
 			cmd->flags |= IWL_POWER_BT_SCO_ENA;
 		else
 			cmd->flags &= ~IWL_POWER_BT_SCO_ENA;
@@ -350,7 +350,7 @@
 
 	if (priv->shrd->wowlan)
 		iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, dtimper);
-	else if (!priv->cfg->base_params->no_idle_support &&
+	else if (!cfg(priv)->base_params->no_idle_support &&
 		 priv->hw->conf.flags & IEEE80211_CONF_IDLE)
 		iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, 20);
 	else if (iwl_tt_is_low_power_state(priv)) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index e5d727f..084aa2c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -416,6 +416,8 @@
 
 		if (!iwl_is_associated_ctx(ctx))
 			continue;
+		if (ctx->staging.dev_type == RXON_DEV_TYPE_P2P)
+			continue;
 		value = ctx->beacon_int;
 		if (!value)
 			value = IWL_PASSIVE_DWELL_BASE;
@@ -678,7 +680,8 @@
 			priv->contexts[IWL_RXON_CTX_BSS].active.flags &
 						RXON_FLG_CHANNEL_MODE_MSK)
 				       >> RXON_FLG_CHANNEL_MODE_POS;
-		if (chan_mod == CHANNEL_MODE_PURE_40) {
+		if ((priv->scan_request && priv->scan_request->no_cck) ||
+		    chan_mod == CHANNEL_MODE_PURE_40) {
 			rate = IWL_RATE_6M_PLCP;
 		} else {
 			rate = IWL_RATE_1M_PLCP;
@@ -688,8 +691,8 @@
 		 * Internal scans are passive, so we can indiscriminately set
 		 * the BT ignore flag on 2.4 GHz since it applies to TX only.
 		 */
-		if (priv->cfg->bt_params &&
-		    priv->cfg->bt_params->advanced_bt_coexist)
+		if (cfg(priv)->bt_params &&
+		    cfg(priv)->bt_params->advanced_bt_coexist)
 			scan->tx_cmd.tx_flags |= TX_CMD_FLG_IGNORE_BT;
 		break;
 	case IEEE80211_BAND_5GHZ:
@@ -730,12 +733,12 @@
 
 	band = priv->scan_band;
 
-	if (priv->cfg->scan_rx_antennas[band])
-		rx_ant = priv->cfg->scan_rx_antennas[band];
+	if (cfg(priv)->scan_rx_antennas[band])
+		rx_ant = cfg(priv)->scan_rx_antennas[band];
 
 	if (band == IEEE80211_BAND_2GHZ &&
-	    priv->cfg->bt_params &&
-	    priv->cfg->bt_params->advanced_bt_coexist) {
+	    cfg(priv)->bt_params &&
+	    cfg(priv)->bt_params->advanced_bt_coexist) {
 		/* transmit 2.4 GHz probes only on first antenna */
 		scan_tx_antennas = first_antenna(scan_tx_antennas);
 	}
@@ -759,8 +762,8 @@
 
 		rx_ant = first_antenna(active_chains);
 	}
-	if (priv->cfg->bt_params &&
-	    priv->cfg->bt_params->advanced_bt_coexist &&
+	if (cfg(priv)->bt_params &&
+	    cfg(priv)->bt_params->advanced_bt_coexist &&
 	    priv->bt_full_concurrent) {
 		/* operated as 1x1 in full concurrency mode */
 		rx_ant = first_antenna(rx_ant);
@@ -938,51 +941,6 @@
 	return 0;
 }
 
-int iwlagn_mac_hw_scan(struct ieee80211_hw *hw,
-		    struct ieee80211_vif *vif,
-		    struct cfg80211_scan_request *req)
-{
-	struct iwl_priv *priv = hw->priv;
-	int ret;
-
-	IWL_DEBUG_MAC80211(priv, "enter\n");
-
-	if (req->n_channels == 0)
-		return -EINVAL;
-
-	mutex_lock(&priv->shrd->mutex);
-
-	/*
-	 * If an internal scan is in progress, just set
-	 * up the scan_request as per above.
-	 */
-	if (priv->scan_type != IWL_SCAN_NORMAL) {
-		IWL_DEBUG_SCAN(priv,
-			       "SCAN request during internal scan - defer\n");
-		priv->scan_request = req;
-		priv->scan_vif = vif;
-		ret = 0;
-	} else {
-		priv->scan_request = req;
-		priv->scan_vif = vif;
-		/*
-		 * mac80211 will only ask for one band at a time
-		 * so using channels[0] here is ok
-		 */
-		ret = iwl_scan_initiate(priv, vif, IWL_SCAN_NORMAL,
-					req->channels[0]->band);
-		if (ret) {
-			priv->scan_request = NULL;
-			priv->scan_vif = NULL;
-		}
-	}
-
-	IWL_DEBUG_MAC80211(priv, "leave\n");
-
-	mutex_unlock(&priv->shrd->mutex);
-
-	return ret;
-}
 
 /*
  * internal short scan, this function should only been called while associated.
diff --git a/drivers/net/wireless/iwlwifi/iwl-shared.h b/drivers/net/wireless/iwlwifi/iwl-shared.h
index 14eaf37..dc55cc4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-shared.h
+++ b/drivers/net/wireless/iwlwifi/iwl-shared.h
@@ -94,9 +94,9 @@
  * This implementation is iwl-pci.c
  */
 
-struct iwl_cfg;
 struct iwl_bus;
 struct iwl_priv;
+struct iwl_trans;
 struct iwl_sensitivity_ranges;
 struct iwl_trans_ops;
 
@@ -107,6 +107,10 @@
 
 extern struct iwl_mod_params iwlagn_mod_params;
 
+#define IWL_DISABLE_HT_ALL	BIT(0)
+#define IWL_DISABLE_HT_TXAGG	BIT(1)
+#define IWL_DISABLE_HT_RXAGG	BIT(2)
+
 /**
  * struct iwl_mod_params
  *
@@ -114,7 +118,8 @@
  *
  * @sw_crypto: using hardware encryption, default = 0
  * @num_of_queues: number of tx queue, HW dependent
- * @disable_11n: 11n capabilities enabled, default = 0
+ * @disable_11n: disable 11n capabilities, default = 0,
+ *	use IWL_DISABLE_HT_* constants
  * @amsdu_size_8K: enable 8K amsdu size, default = 1
  * @antenna: both antennas (use diversity), default = 0
  * @restart_fw: restart firmware, default = 1
@@ -135,7 +140,7 @@
 struct iwl_mod_params {
 	int sw_crypto;
 	int num_of_queues;
-	int disable_11n;
+	unsigned int disable_11n;
 	int amsdu_size_8K;
 	int antenna;
 	int restart_fw;
@@ -174,8 +179,6 @@
  * @ct_kill_exit_threshold: when to reeable the device - in hw dependent unit
  *	relevant for 1000, 6000 and up
  * @wd_timeout: TX queues watchdog timeout
- * @calib_init_cfg: setup initial calibrations for the hw
- * @calib_rt_cfg: setup runtime calibrations for the hw
  * @struct iwl_sensitivity_ranges: range of sensitivity values
  */
 struct iwl_hw_params {
@@ -195,67 +198,148 @@
 	u32 ct_kill_exit_threshold;
 	unsigned int wd_timeout;
 
-	u32 calib_init_cfg;
-	u32 calib_rt_cfg;
 	const struct iwl_sensitivity_ranges *sens;
 };
 
 /**
- * enum iwl_agg_state
+ * enum iwl_ucode_type
  *
- * The state machine of the BA agreement establishment / tear down.
- * These states relate to a specific RA / TID.
+ * The type of ucode currently loaded on the hardware.
  *
- * @IWL_AGG_OFF: aggregation is not used
- * @IWL_AGG_ON: aggregation session is up
- * @IWL_EMPTYING_HW_QUEUE_ADDBA: establishing a BA session - waiting for the
- *	HW queue to be empty from packets for this RA /TID.
- * @IWL_EMPTYING_HW_QUEUE_DELBA: tearing down a BA session - waiting for the
- *	HW queue to be empty from packets for this RA /TID.
+ * @IWL_UCODE_NONE: No ucode loaded
+ * @IWL_UCODE_REGULAR: Normal runtime ucode
+ * @IWL_UCODE_INIT: Initial ucode
+ * @IWL_UCODE_WOWLAN: Wake on Wireless enabled ucode
  */
-enum iwl_agg_state {
-	IWL_AGG_OFF = 0,
-	IWL_AGG_ON,
-	IWL_EMPTYING_HW_QUEUE_ADDBA,
-	IWL_EMPTYING_HW_QUEUE_DELBA,
+enum iwl_ucode_type {
+	IWL_UCODE_NONE,
+	IWL_UCODE_REGULAR,
+	IWL_UCODE_INIT,
+	IWL_UCODE_WOWLAN,
 };
 
 /**
- * struct iwl_ht_agg - aggregation state machine
-
- * This structs holds the states for the BA agreement establishment and tear
- * down. It also holds the state during the BA session itself. This struct is
- * duplicated for each RA / TID.
-
- * @rate_n_flags: Rate at which Tx was attempted. Holds the data between the
- *	Tx response (REPLY_TX), and the block ack notification
- *	(REPLY_COMPRESSED_BA).
- * @state: state of the BA agreement establishment / tear down.
- * @txq_id: Tx queue used by the BA session - used by the transport layer.
- *	Needed by the upper layer for debugfs only.
- * @wait_for_ba: Expect block-ack before next Tx reply
+ * struct iwl_notification_wait - notification wait entry
+ * @list: list head for global list
+ * @fn: function called with the notification
+ * @cmd: command ID
+ *
+ * This structure is not used directly, to wait for a
+ * notification declare it on the stack, and call
+ * iwlagn_init_notification_wait() with appropriate
+ * parameters. Then do whatever will cause the ucode
+ * to notify the driver, and to wait for that then
+ * call iwlagn_wait_notification().
+ *
+ * Each notification is one-shot. If at some point we
+ * need to support multi-shot notifications (which
+ * can't be allocated on the stack) we need to modify
+ * the code for them.
  */
-struct iwl_ht_agg {
-	u32 rate_n_flags;
-	enum iwl_agg_state state;
-	u16 txq_id;
-	bool wait_for_ba;
+struct iwl_notification_wait {
+	struct list_head list;
+
+	void (*fn)(struct iwl_trans *trans, struct iwl_rx_packet *pkt,
+		   void *data);
+	void *fn_data;
+
+	u8 cmd;
+	bool triggered, aborted;
 };
 
 /**
- * struct iwl_tid_data - one for each RA / TID
-
- * This structs holds the states for each RA / TID.
-
- * @seq_number: the next WiFi sequence number to use
- * @tfds_in_queue: number of packets sent to the HW queues.
- *	Exported for debugfs only
- * @agg: aggregation state machine
+ * enum iwl_pa_type - Power Amplifier type
+ * @IWL_PA_SYSTEM:  based on uCode configuration
+ * @IWL_PA_INTERNAL: use Internal only
  */
-struct iwl_tid_data {
-	u16 seq_number;
-	u16 tfds_in_queue;
-	struct iwl_ht_agg agg;
+enum iwl_pa_type {
+	IWL_PA_SYSTEM = 0,
+	IWL_PA_INTERNAL = 1,
+};
+
+/*
+ * LED mode
+ *    IWL_LED_DEFAULT:  use device default
+ *    IWL_LED_RF_STATE: turn LED on/off based on RF state
+ *			LED ON  = RF ON
+ *			LED OFF = RF OFF
+ *    IWL_LED_BLINK:    adjust led blink rate based on blink table
+ */
+enum iwl_led_mode {
+	IWL_LED_DEFAULT,
+	IWL_LED_RF_STATE,
+	IWL_LED_BLINK,
+};
+
+/**
+ * struct iwl_cfg
+ * @name: Offical name of the device
+ * @fw_name_pre: Firmware filename prefix. The api version and extension
+ *	(.ucode) will be added to filename before loading from disk. The
+ *	filename is constructed as fw_name_pre<api>.ucode.
+ * @ucode_api_max: Highest version of uCode API supported by driver.
+ * @ucode_api_ok: oldest version of the uCode API that is OK to load
+ *	without a warning, for use in transitions
+ * @ucode_api_min: Lowest version of uCode API supported by driver.
+ * @valid_tx_ant: valid transmit antenna
+ * @valid_rx_ant: valid receive antenna
+ * @sku: sku information from EEPROM
+ * @eeprom_ver: EEPROM version
+ * @eeprom_calib_ver: EEPROM calibration version
+ * @lib: pointer to the lib ops
+ * @additional_nic_config: additional nic configuration
+ * @base_params: pointer to basic parameters
+ * @ht_params: point to ht patameters
+ * @bt_params: pointer to bt parameters
+ * @pa_type: used by 6000 series only to identify the type of Power Amplifier
+ * @need_temp_offset_calib: need to perform temperature offset calibration
+ * @no_xtal_calib: some devices do not need crystal calibration data,
+ *	don't send it to those
+ * @scan_rx_antennas: available antenna for scan operation
+ * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
+ * @adv_pm: advance power management
+ * @rx_with_siso_diversity: 1x1 device with rx antenna diversity
+ * @internal_wimax_coex: internal wifi/wimax combo device
+ * @iq_invert: I/Q inversion
+ * @temp_offset_v2: support v2 of temperature offset calibration
+ *
+ * We enable the driver to be backward compatible wrt API version. The
+ * driver specifies which APIs it supports (with @ucode_api_max being the
+ * highest and @ucode_api_min the lowest). Firmware will only be loaded if
+ * it has a supported API version.
+ *
+ * The ideal usage of this infrastructure is to treat a new ucode API
+ * release as a new hardware revision.
+ */
+struct iwl_cfg {
+	/* params specific to an individual device within a device family */
+	const char *name;
+	const char *fw_name_pre;
+	const unsigned int ucode_api_max;
+	const unsigned int ucode_api_ok;
+	const unsigned int ucode_api_min;
+	u8   valid_tx_ant;
+	u8   valid_rx_ant;
+	u16  sku;
+	u16  eeprom_ver;
+	u16  eeprom_calib_ver;
+	const struct iwl_lib_ops *lib;
+	void (*additional_nic_config)(struct iwl_priv *priv);
+	/* params not likely to change within a device family */
+	struct iwl_base_params *base_params;
+	/* params likely to change within a device family */
+	struct iwl_ht_params *ht_params;
+	struct iwl_bt_params *bt_params;
+	enum iwl_pa_type pa_type;	  /* if used set to IWL_PA_SYSTEM */
+	const bool need_temp_offset_calib; /* if used set to true */
+	const bool no_xtal_calib;
+	u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
+	enum iwl_led_mode led_mode;
+	const bool adv_pm;
+	const bool rx_with_siso_diversity;
+	const bool internal_wimax_coex;
+	const bool iq_invert;
+	const bool temp_offset_v2;
 };
 
 /**
@@ -266,15 +350,25 @@
  * @ucode_owner: IWL_OWNERSHIP_*
  * @cmd_queue: command queue number
  * @status: STATUS_*
+ * @wowlan: are we running wowlan uCode
  * @valid_contexts: microcode/device supports multiple contexts
  * @bus: pointer to the bus layer data
+ * @cfg: see struct iwl_cfg
  * @priv: pointer to the upper layer data
+ * @trans: pointer to the transport layer data
  * @hw_params: see struct iwl_hw_params
  * @workqueue: the workqueue used by all the layers of the driver
  * @lock: protect general shared data
  * @sta_lock: protects the station table.
  *	If lock and sta_lock are needed, lock must be acquired first.
  * @mutex:
+ * @wait_command_queue: the wait_queue for SYNC host command nad uCode load
+ * @eeprom: pointer to the eeprom/OTP image
+ * @ucode_type: indicator of loaded ucode image
+ * @notif_waits: things waiting for notification
+ * @notif_wait_lock: lock protecting notification
+ * @notif_waitq: head of notification wait queue
+ * @device_pointers: pointers to ucode event tables
  */
 struct iwl_shared {
 #ifdef CONFIG_IWLWIFI_DEBUG
@@ -290,6 +384,7 @@
 	u8 valid_contexts;
 
 	struct iwl_bus *bus;
+	struct iwl_cfg *cfg;
 	struct iwl_priv *priv;
 	struct iwl_trans *trans;
 	struct iwl_hw_params hw_params;
@@ -299,13 +394,29 @@
 	spinlock_t sta_lock;
 	struct mutex mutex;
 
-	struct iwl_tid_data tid_data[IWLAGN_STATION_COUNT][IWL_MAX_TID_COUNT];
-
 	wait_queue_head_t wait_command_queue;
+
+	/* eeprom -- this is in the card's little endian byte order */
+	u8 *eeprom;
+
+	/* ucode related variables */
+	enum iwl_ucode_type ucode_type;
+
+	/* notification wait support */
+	struct list_head notif_waits;
+	spinlock_t notif_wait_lock;
+	wait_queue_head_t notif_waitq;
+
+	struct {
+		u32 error_event_table;
+		u32 log_event_table;
+	} device_pointers;
+
 };
 
 /*Whatever _m is (iwl_trans, iwl_priv, iwl_bus, these macros will work */
 #define priv(_m)	((_m)->shrd->priv)
+#define cfg(_m)		((_m)->shrd->cfg)
 #define bus(_m)		((_m)->shrd->bus)
 #define trans(_m)	((_m)->shrd->trans)
 #define hw_params(_m)	((_m)->shrd->hw_params)
@@ -427,12 +538,6 @@
 				 struct iwl_device_cmd *cmd);
 
 int iwlagn_hw_valid_rtc_data_addr(u32 addr);
-void iwl_start_tx_ba_trans_ready(struct iwl_priv *priv,
-				 enum iwl_rxon_context_id ctx,
-				 u8 sta_id, u8 tid);
-void iwl_stop_tx_ba_trans_ready(struct iwl_priv *priv,
-				enum iwl_rxon_context_id ctx,
-				u8 sta_id, u8 tid);
 void iwl_set_hw_rfkill_state(struct iwl_priv *priv, bool state);
 void iwl_nic_config(struct iwl_priv *priv);
 void iwl_free_skb(struct iwl_priv *priv, struct sk_buff *skb);
@@ -445,6 +550,24 @@
 void iwl_stop_sw_queue(struct iwl_priv *priv, u8 ac);
 void iwl_wake_sw_queue(struct iwl_priv *priv, u8 ac);
 
+/* notification wait support */
+void iwl_abort_notification_waits(struct iwl_shared *shrd);
+void __acquires(wait_entry)
+iwl_init_notification_wait(struct iwl_shared *shrd,
+			      struct iwl_notification_wait *wait_entry,
+			      u8 cmd,
+			      void (*fn)(struct iwl_trans *trans,
+					 struct iwl_rx_packet *pkt,
+					 void *data),
+			      void *fn_data);
+int __must_check __releases(wait_entry)
+iwl_wait_notification(struct iwl_shared *shrd,
+			 struct iwl_notification_wait *wait_entry,
+			 unsigned long timeout);
+void __releases(wait_entry)
+iwl_remove_notification(struct iwl_shared *shrd,
+			   struct iwl_notification_wait *wait_entry);
+
 #ifdef CONFIG_IWLWIFI_DEBUGFS
 void iwl_reset_traffic_log(struct iwl_priv *priv);
 #endif /* CONFIG_IWLWIFI_DEBUGFS */
diff --git a/drivers/net/wireless/iwlwifi/iwl-sv-open.c b/drivers/net/wireless/iwlwifi/iwl-testmode.c
similarity index 74%
rename from drivers/net/wireless/iwlwifi/iwl-sv-open.c
rename to drivers/net/wireless/iwlwifi/iwl-testmode.c
index 5e50d88..4a5cddd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sv-open.c
+++ b/drivers/net/wireless/iwlwifi/iwl-testmode.c
@@ -70,6 +70,7 @@
 #include <net/mac80211.h>
 #include <net/netlink.h>
 
+#include "iwl-wifi.h"
 #include "iwl-dev.h"
 #include "iwl-core.h"
 #include "iwl-debug.h"
@@ -77,6 +78,7 @@
 #include "iwl-agn.h"
 #include "iwl-testmode.h"
 #include "iwl-trans.h"
+#include "iwl-bus.h"
 
 /* The TLVs used in the gnl message policy between the kernel module and
  * user space application. iwl_testmode_gnl_msg_policy is to be carried
@@ -106,6 +108,13 @@
 	[IWL_TM_ATTR_FIXRATE] = { .type = NLA_U32, },
 
 	[IWL_TM_ATTR_UCODE_OWNER] = { .type = NLA_U8, },
+
+	[IWL_TM_ATTR_SRAM_ADDR] = { .type = NLA_U32, },
+	[IWL_TM_ATTR_SRAM_SIZE] = { .type = NLA_U32, },
+	[IWL_TM_ATTR_SRAM_DUMP] = { .type = NLA_UNSPEC, },
+
+	[IWL_TM_ATTR_FW_VERSION] = { .type = NLA_U32, },
+	[IWL_TM_ATTR_DEVICE_ID] = { .type = NLA_U32, },
 };
 
 /*
@@ -177,6 +186,18 @@
 {
 	priv->pre_rx_handler = iwl_testmode_ucode_rx_pkt;
 	priv->testmode_trace.trace_enabled = false;
+	priv->testmode_sram.sram_readed = false;
+}
+
+static void iwl_sram_cleanup(struct iwl_priv *priv)
+{
+	if (priv->testmode_sram.sram_readed) {
+		kfree(priv->testmode_sram.buff_addr);
+		priv->testmode_sram.buff_addr = NULL;
+		priv->testmode_sram.buff_size = 0;
+		priv->testmode_sram.num_chunks = 0;
+		priv->testmode_sram.sram_readed = false;
+	}
 }
 
 static void iwl_trace_cleanup(struct iwl_priv *priv)
@@ -201,6 +222,7 @@
 void iwl_testmode_cleanup(struct iwl_priv *priv)
 {
 	iwl_trace_cleanup(priv);
+	iwl_sram_cleanup(priv);
 }
 
 /*
@@ -276,7 +298,7 @@
 	IWL_INFO(priv, "testmode register access command offset 0x%x\n", ofs);
 
 	switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
-	case IWL_TM_CMD_APP2DEV_REG_READ32:
+	case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
 		val32 = iwl_read32(bus(priv), ofs);
 		IWL_INFO(priv, "32bit value to read 0x%x\n", val32);
 
@@ -291,7 +313,7 @@
 			IWL_DEBUG_INFO(priv,
 				       "Error sending msg : %d\n", status);
 		break;
-	case IWL_TM_CMD_APP2DEV_REG_WRITE32:
+	case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
 		if (!tb[IWL_TM_ATTR_REG_VALUE32]) {
 			IWL_DEBUG_INFO(priv,
 				       "Error finding value to write\n");
@@ -302,7 +324,7 @@
 			iwl_write32(bus(priv), ofs, val32);
 		}
 		break;
-	case IWL_TM_CMD_APP2DEV_REG_WRITE8:
+	case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
 		if (!tb[IWL_TM_ATTR_REG_VALUE8]) {
 			IWL_DEBUG_INFO(priv, "Error finding value to write\n");
 			return -ENOMSG;
@@ -312,6 +334,32 @@
 			iwl_write8(bus(priv), ofs, val8);
 		}
 		break;
+	case IWL_TM_CMD_APP2DEV_INDIRECT_REG_READ32:
+		val32 = iwl_read_prph(bus(priv), ofs);
+		IWL_INFO(priv, "32bit value to read 0x%x\n", val32);
+
+		skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
+		if (!skb) {
+			IWL_DEBUG_INFO(priv, "Error allocating memory\n");
+			return -ENOMEM;
+		}
+		NLA_PUT_U32(skb, IWL_TM_ATTR_REG_VALUE32, val32);
+		status = cfg80211_testmode_reply(skb);
+		if (status < 0)
+			IWL_DEBUG_INFO(priv,
+					"Error sending msg : %d\n", status);
+		break;
+	case IWL_TM_CMD_APP2DEV_INDIRECT_REG_WRITE32:
+		if (!tb[IWL_TM_ATTR_REG_VALUE32]) {
+			IWL_DEBUG_INFO(priv,
+					"Error finding value to write\n");
+			return -ENOMSG;
+		} else {
+			val32 = nla_get_u32(tb[IWL_TM_ATTR_REG_VALUE32]);
+			IWL_INFO(priv, "32bit value to write 0x%x\n", val32);
+			iwl_write_prph(bus(priv), ofs, val32);
+		}
+		break;
 	default:
 		IWL_DEBUG_INFO(priv, "Unknown testmode register command ID\n");
 		return -ENOSYS;
@@ -330,24 +378,24 @@
 	struct iwl_notification_wait calib_wait;
 	int ret;
 
-	iwlagn_init_notification_wait(priv, &calib_wait,
+	iwl_init_notification_wait(priv->shrd, &calib_wait,
 				      CALIBRATION_COMPLETE_NOTIFICATION,
 				      NULL, NULL);
-	ret = iwlagn_init_alive_start(priv);
+	ret = iwl_init_alive_start(trans(priv));
 	if (ret) {
 		IWL_DEBUG_INFO(priv,
 			"Error configuring init calibration: %d\n", ret);
 		goto cfg_init_calib_error;
 	}
 
-	ret = iwlagn_wait_notification(priv, &calib_wait, 2 * HZ);
+	ret = iwl_wait_notification(priv->shrd, &calib_wait, 2 * HZ);
 	if (ret)
 		IWL_DEBUG_INFO(priv, "Error detecting"
 			" CALIBRATION_COMPLETE_NOTIFICATION: %d\n", ret);
 	return ret;
 
 cfg_init_calib_error:
-	iwlagn_remove_notification(priv, &calib_wait);
+	iwl_remove_notification(priv->shrd, &calib_wait);
 	return ret;
 }
 
@@ -370,14 +418,16 @@
 static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
 {
 	struct iwl_priv *priv = hw->priv;
+	struct iwl_trans *trans = trans(priv);
 	struct sk_buff *skb;
 	unsigned char *rsp_data_ptr = NULL;
 	int status = 0, rsp_data_len = 0;
+	u32 devid;
 
 	switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
 	case IWL_TM_CMD_APP2DEV_GET_DEVICENAME:
-		rsp_data_ptr = (unsigned char *)priv->cfg->name;
-		rsp_data_len = strlen(priv->cfg->name);
+		rsp_data_ptr = (unsigned char *)cfg(priv)->name;
+		rsp_data_len = strlen(cfg(priv)->name);
 		skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
 							rsp_data_len + 20);
 		if (!skb) {
@@ -396,8 +446,7 @@
 		break;
 
 	case IWL_TM_CMD_APP2DEV_LOAD_INIT_FW:
-		status = iwlagn_load_ucode_wait_alive(priv, &priv->ucode_init,
-						      IWL_UCODE_INIT);
+		status = iwl_load_ucode_wait_alive(trans, IWL_UCODE_INIT);
 		if (status)
 			IWL_DEBUG_INFO(priv,
 				"Error loading init ucode: %d\n", status);
@@ -405,13 +454,11 @@
 
 	case IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB:
 		iwl_testmode_cfg_init_calib(priv);
-		iwl_trans_stop_device(trans(priv));
+		iwl_trans_stop_device(trans);
 		break;
 
 	case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW:
-		status = iwlagn_load_ucode_wait_alive(priv,
-					   &priv->ucode_rt,
-					   IWL_UCODE_REGULAR);
+		status = iwl_load_ucode_wait_alive(trans, IWL_UCODE_REGULAR);
 		if (status) {
 			IWL_DEBUG_INFO(priv,
 				"Error loading runtime ucode: %d\n", status);
@@ -423,10 +470,25 @@
 				"Error starting the device: %d\n", status);
 		break;
 
+	case IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW:
+		iwl_scan_cancel_timeout(priv, 200);
+		iwl_trans_stop_device(trans);
+		status = iwl_load_ucode_wait_alive(trans, IWL_UCODE_WOWLAN);
+		if (status) {
+			IWL_DEBUG_INFO(priv,
+				"Error loading WOWLAN ucode: %d\n", status);
+			break;
+		}
+		status = iwl_alive_start(priv);
+		if (status)
+			IWL_DEBUG_INFO(priv,
+				"Error starting the device: %d\n", status);
+		break;
+
 	case IWL_TM_CMD_APP2DEV_GET_EEPROM:
-		if (priv->eeprom) {
+		if (priv->shrd->eeprom) {
 			skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
-				priv->cfg->base_params->eeprom_size + 20);
+				cfg(priv)->base_params->eeprom_size + 20);
 			if (!skb) {
 				IWL_DEBUG_INFO(priv,
 				       "Error allocating memory\n");
@@ -435,8 +497,8 @@
 			NLA_PUT_U32(skb, IWL_TM_ATTR_COMMAND,
 				IWL_TM_CMD_DEV2APP_EEPROM_RSP);
 			NLA_PUT(skb, IWL_TM_ATTR_EEPROM,
-				priv->cfg->base_params->eeprom_size,
-				priv->eeprom);
+				cfg(priv)->base_params->eeprom_size,
+				priv->shrd->eeprom);
 			status = cfg80211_testmode_reply(skb);
 			if (status < 0)
 				IWL_DEBUG_INFO(priv,
@@ -455,6 +517,37 @@
 		priv->tm_fixed_rate = nla_get_u32(tb[IWL_TM_ATTR_FIXRATE]);
 		break;
 
+	case IWL_TM_CMD_APP2DEV_GET_FW_VERSION:
+		IWL_INFO(priv, "uCode version raw: 0x%x\n", priv->ucode_ver);
+
+		skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
+		if (!skb) {
+			IWL_DEBUG_INFO(priv, "Error allocating memory\n");
+			return -ENOMEM;
+		}
+		NLA_PUT_U32(skb, IWL_TM_ATTR_FW_VERSION, priv->ucode_ver);
+		status = cfg80211_testmode_reply(skb);
+		if (status < 0)
+			IWL_DEBUG_INFO(priv,
+					"Error sending msg : %d\n", status);
+		break;
+
+	case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID:
+		devid = bus_get_hw_id(bus(priv));
+		IWL_INFO(priv, "hw version: 0x%x\n", devid);
+
+		skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
+		if (!skb) {
+			IWL_DEBUG_INFO(priv, "Error allocating memory\n");
+			return -ENOMEM;
+		}
+		NLA_PUT_U32(skb, IWL_TM_ATTR_DEVICE_ID, devid);
+		status = cfg80211_testmode_reply(skb);
+		if (status < 0)
+			IWL_DEBUG_INFO(priv,
+					"Error sending msg : %d\n", status);
+		break;
+
 	default:
 		IWL_DEBUG_INFO(priv, "Unknown testmode driver command ID\n");
 		return -ENOSYS;
@@ -535,7 +628,7 @@
 		}
 		priv->testmode_trace.num_chunks =
 			DIV_ROUND_UP(priv->testmode_trace.buff_size,
-				     TRACE_CHUNK_SIZE);
+				     DUMP_CHUNK_SIZE);
 		break;
 
 	case IWL_TM_CMD_APP2DEV_END_TRACE:
@@ -567,15 +660,15 @@
 		idx = cb->args[4];
 		if (idx >= priv->testmode_trace.num_chunks)
 			return -ENOENT;
-		length = TRACE_CHUNK_SIZE;
+		length = DUMP_CHUNK_SIZE;
 		if (((idx + 1) == priv->testmode_trace.num_chunks) &&
-		    (priv->testmode_trace.buff_size % TRACE_CHUNK_SIZE))
+		    (priv->testmode_trace.buff_size % DUMP_CHUNK_SIZE))
 			length = priv->testmode_trace.buff_size %
-				TRACE_CHUNK_SIZE;
+				DUMP_CHUNK_SIZE;
 
 		NLA_PUT(skb, IWL_TM_ATTR_TRACE_DUMP, length,
 			priv->testmode_trace.trace_addr +
-			(TRACE_CHUNK_SIZE * idx));
+			(DUMP_CHUNK_SIZE * idx));
 		idx++;
 		cb->args[4] = idx;
 		return 0;
@@ -621,6 +714,110 @@
 	return 0;
 }
 
+/*
+ * This function handles the user application commands for SRAM data dump
+ *
+ * It retrieves the mandatory fields IWL_TM_ATTR_SRAM_ADDR and
+ * IWL_TM_ATTR_SRAM_SIZE to decide the memory area for SRAM data reading
+ *
+ * Several error will be retured, -EBUSY if the SRAM data retrieved by
+ * previous command has not been delivered to userspace, or -ENOMSG if
+ * the mandatory fields (IWL_TM_ATTR_SRAM_ADDR,IWL_TM_ATTR_SRAM_SIZE)
+ * are missing, or -ENOMEM if the buffer allocation fails.
+ *
+ * Otherwise 0 is replied indicating the success of the SRAM reading.
+ *
+ * @hw: ieee80211_hw object that represents the device
+ * @tb: gnl message fields from the user space
+ */
+static int iwl_testmode_sram(struct ieee80211_hw *hw, struct nlattr **tb)
+{
+	struct iwl_priv *priv = hw->priv;
+	u32 base, ofs, size, maxsize;
+
+	if (priv->testmode_sram.sram_readed)
+		return -EBUSY;
+
+	if (!tb[IWL_TM_ATTR_SRAM_ADDR]) {
+		IWL_DEBUG_INFO(priv, "Error finding SRAM offset address\n");
+		return -ENOMSG;
+	}
+	ofs = nla_get_u32(tb[IWL_TM_ATTR_SRAM_ADDR]);
+	if (!tb[IWL_TM_ATTR_SRAM_SIZE]) {
+		IWL_DEBUG_INFO(priv, "Error finding size for SRAM reading\n");
+		return -ENOMSG;
+	}
+	size = nla_get_u32(tb[IWL_TM_ATTR_SRAM_SIZE]);
+	switch (priv->shrd->ucode_type) {
+	case IWL_UCODE_REGULAR:
+		maxsize = trans(priv)->ucode_rt.data.len;
+		break;
+	case IWL_UCODE_INIT:
+		maxsize = trans(priv)->ucode_init.data.len;
+		break;
+	case IWL_UCODE_WOWLAN:
+		maxsize = trans(priv)->ucode_wowlan.data.len;
+		break;
+	case IWL_UCODE_NONE:
+		IWL_DEBUG_INFO(priv, "Error, uCode does not been loaded\n");
+		return -ENOSYS;
+	default:
+		IWL_DEBUG_INFO(priv, "Error, unsupported uCode type\n");
+		return -ENOSYS;
+	}
+	if ((ofs + size) > maxsize) {
+		IWL_DEBUG_INFO(priv, "Invalid offset/size: out of range\n");
+		return -EINVAL;
+	}
+	priv->testmode_sram.buff_size = (size / 4) * 4;
+	priv->testmode_sram.buff_addr =
+		kmalloc(priv->testmode_sram.buff_size, GFP_KERNEL);
+	if (priv->testmode_sram.buff_addr == NULL) {
+		IWL_DEBUG_INFO(priv, "Error allocating memory\n");
+		return -ENOMEM;
+	}
+	base = 0x800000;
+	_iwl_read_targ_mem_words(bus(priv), base + ofs,
+					priv->testmode_sram.buff_addr,
+					priv->testmode_sram.buff_size / 4);
+	priv->testmode_sram.num_chunks =
+		DIV_ROUND_UP(priv->testmode_sram.buff_size, DUMP_CHUNK_SIZE);
+	priv->testmode_sram.sram_readed = true;
+	return 0;
+}
+
+static int iwl_testmode_sram_dump(struct ieee80211_hw *hw, struct nlattr **tb,
+				   struct sk_buff *skb,
+				   struct netlink_callback *cb)
+{
+	struct iwl_priv *priv = hw->priv;
+	int idx, length;
+
+	if (priv->testmode_sram.sram_readed) {
+		idx = cb->args[4];
+		if (idx >= priv->testmode_sram.num_chunks) {
+			iwl_sram_cleanup(priv);
+			return -ENOENT;
+		}
+		length = DUMP_CHUNK_SIZE;
+		if (((idx + 1) == priv->testmode_sram.num_chunks) &&
+		    (priv->testmode_sram.buff_size % DUMP_CHUNK_SIZE))
+			length = priv->testmode_sram.buff_size %
+				DUMP_CHUNK_SIZE;
+
+		NLA_PUT(skb, IWL_TM_ATTR_SRAM_DUMP, length,
+			priv->testmode_sram.buff_addr +
+			(DUMP_CHUNK_SIZE * idx));
+		idx++;
+		cb->args[4] = idx;
+		return 0;
+	} else
+		return -EFAULT;
+
+ nla_put_failure:
+	return -ENOBUFS;
+}
+
 
 /* The testmode gnl message handler that takes the gnl message from the
  * user space and parses it per the policy iwl_testmode_gnl_msg_policy, then
@@ -668,9 +865,11 @@
 		IWL_DEBUG_INFO(priv, "testmode cmd to uCode\n");
 		result = iwl_testmode_ucode(hw, tb);
 		break;
-	case IWL_TM_CMD_APP2DEV_REG_READ32:
-	case IWL_TM_CMD_APP2DEV_REG_WRITE32:
-	case IWL_TM_CMD_APP2DEV_REG_WRITE8:
+	case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
+	case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
+	case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
+	case IWL_TM_CMD_APP2DEV_INDIRECT_REG_READ32:
+	case IWL_TM_CMD_APP2DEV_INDIRECT_REG_WRITE32:
 		IWL_DEBUG_INFO(priv, "testmode cmd to register\n");
 		result = iwl_testmode_reg(hw, tb);
 		break;
@@ -680,6 +879,9 @@
 	case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW:
 	case IWL_TM_CMD_APP2DEV_GET_EEPROM:
 	case IWL_TM_CMD_APP2DEV_FIXRATE_REQ:
+	case IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW:
+	case IWL_TM_CMD_APP2DEV_GET_FW_VERSION:
+	case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID:
 		IWL_DEBUG_INFO(priv, "testmode cmd to driver\n");
 		result = iwl_testmode_driver(hw, tb);
 		break;
@@ -696,6 +898,11 @@
 		result = iwl_testmode_ownership(hw, tb);
 		break;
 
+	case IWL_TM_CMD_APP2DEV_READ_SRAM:
+		IWL_DEBUG_INFO(priv, "testmode sram read cmd to driver\n");
+		result = iwl_testmode_sram(hw, tb);
+		break;
+
 	default:
 		IWL_DEBUG_INFO(priv, "Unknown testmode command\n");
 		result = -ENOSYS;
@@ -744,6 +951,10 @@
 		IWL_DEBUG_INFO(priv, "uCode trace cmd to driver\n");
 		result = iwl_testmode_trace_dump(hw, tb, skb, cb);
 		break;
+	case IWL_TM_CMD_APP2DEV_DUMP_SRAM:
+		IWL_DEBUG_INFO(priv, "testmode sram dump cmd to driver\n");
+		result = iwl_testmode_sram_dump(hw, tb, skb, cb);
+		break;
 	default:
 		result = -EINVAL;
 		break;
diff --git a/drivers/net/wireless/iwlwifi/iwl-testmode.h b/drivers/net/wireless/iwlwifi/iwl-testmode.h
index b980bda..26138f1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-testmode.h
+++ b/drivers/net/wireless/iwlwifi/iwl-testmode.h
@@ -76,9 +76,9 @@
  *	the actual uCode host command ID is carried with
  *	IWL_TM_ATTR_UCODE_CMD_ID
  *
- * @IWL_TM_CMD_APP2DEV_REG_READ32:
- * @IWL_TM_CMD_APP2DEV_REG_WRITE32:
- * @IWL_TM_CMD_APP2DEV_REG_WRITE8:
+ * @IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
+ * @IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
+ * @IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
  *	commands from user applicaiton to access register
  *
  * @IWL_TM_CMD_APP2DEV_GET_DEVICENAME: retrieve device name
@@ -103,16 +103,30 @@
  * @IWL_TM_CMD_DEV2APP_EEPROM_RSP:
  *	commands from kernel space to carry the eeprom response
  *	to user application
+ *
  * @IWL_TM_CMD_APP2DEV_OWNERSHIP:
  *	commands from user application to own change the ownership of the uCode
  *	if application has the ownership, the only host command from
  *	testmode will deliver to uCode. Default owner is driver
+ *
+ * @IWL_TM_CMD_APP2DEV_INDIRECT_REG_READ32:
+ * @IWL_TM_CMD_APP2DEV_INDIRECT_REG_WRITE32:
+ *	commands from user applicaiton to indirectly access peripheral register
+ *
+ * @IWL_TM_CMD_APP2DEV_READ_SRAM:
+ * @IWL_TM_CMD_APP2DEV_DUMP_SRAM:
+ *	commands from user applicaiton to read data in sram
+ *
+ * @IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW: load Weak On Wireless LAN uCode image
+ * @IWL_TM_CMD_APP2DEV_GET_FW_VERSION: retrieve uCode version
+ * @IWL_TM_CMD_APP2DEV_GET_DEVICE_ID: retrieve ID information in device
+ *
  */
 enum iwl_tm_cmd_t {
 	IWL_TM_CMD_APP2DEV_UCODE		= 1,
-	IWL_TM_CMD_APP2DEV_REG_READ32		= 2,
-	IWL_TM_CMD_APP2DEV_REG_WRITE32		= 3,
-	IWL_TM_CMD_APP2DEV_REG_WRITE8		= 4,
+	IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32	= 2,
+	IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32	= 3,
+	IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8	= 4,
 	IWL_TM_CMD_APP2DEV_GET_DEVICENAME	= 5,
 	IWL_TM_CMD_APP2DEV_LOAD_INIT_FW		= 6,
 	IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB	= 7,
@@ -126,7 +140,14 @@
 	IWL_TM_CMD_DEV2APP_UCODE_RX_PKT		= 15,
 	IWL_TM_CMD_DEV2APP_EEPROM_RSP		= 16,
 	IWL_TM_CMD_APP2DEV_OWNERSHIP		= 17,
-	IWL_TM_CMD_MAX				= 18,
+	IWL_TM_CMD_APP2DEV_INDIRECT_REG_READ32	= 18,
+	IWL_TM_CMD_APP2DEV_INDIRECT_REG_WRITE32	= 19,
+	IWL_TM_CMD_APP2DEV_READ_SRAM		= 20,
+	IWL_TM_CMD_APP2DEV_DUMP_SRAM		= 21,
+	IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW	= 22,
+	IWL_TM_CMD_APP2DEV_GET_FW_VERSION	= 23,
+	IWL_TM_CMD_APP2DEV_GET_DEVICE_ID	= 24,
+	IWL_TM_CMD_MAX				= 25,
 };
 
 /*
@@ -196,6 +217,26 @@
  *	When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_OWNERSHIP,
  *	The mandatory fields are:
  *	IWL_TM_ATTR_UCODE_OWNER for the new owner
+ *
+ * @IWL_TM_ATTR_SRAM_ADDR:
+ * @IWL_TM_ATTR_SRAM_SIZE:
+ *	When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_READ_SRAM,
+ *	The mandatory fields are:
+ *	IWL_TM_ATTR_SRAM_ADDR for the address in sram
+ *	IWL_TM_ATTR_SRAM_SIZE for the buffer size of data reading
+ *
+ * @IWL_TM_ATTR_SRAM_DUMP:
+ *	When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_DUMP_SRAM,
+ *	IWL_TM_ATTR_SRAM_DUMP for the data in sram
+ *
+ * @IWL_TM_ATTR_FW_VERSION:
+ *	When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_GET_FW_VERSION,
+ *	IWL_TM_ATTR_FW_VERSION for the uCode version
+ *
+ * @IWL_TM_ATTR_DEVICE_ID:
+ *	When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_GET_DEVICE_ID,
+ *	IWL_TM_ATTR_DEVICE_ID for the device ID information
+ *
  */
 enum iwl_tm_attr_t {
 	IWL_TM_ATTR_NOT_APPLICABLE		= 0,
@@ -213,7 +254,12 @@
 	IWL_TM_ATTR_TRACE_DUMP			= 12,
 	IWL_TM_ATTR_FIXRATE			= 13,
 	IWL_TM_ATTR_UCODE_OWNER			= 14,
-	IWL_TM_ATTR_MAX				= 15,
+	IWL_TM_ATTR_SRAM_ADDR			= 15,
+	IWL_TM_ATTR_SRAM_SIZE			= 16,
+	IWL_TM_ATTR_SRAM_DUMP			= 17,
+	IWL_TM_ATTR_FW_VERSION			= 18,
+	IWL_TM_ATTR_DEVICE_ID			= 19,
+	IWL_TM_ATTR_MAX				= 20,
 };
 
 /* uCode trace buffer */
@@ -221,6 +267,8 @@
 #define TRACE_BUFF_SIZE_MIN	0x20000
 #define TRACE_BUFF_SIZE_DEF	TRACE_BUFF_SIZE_MIN
 #define TRACE_BUFF_PADD		0x2000
-#define TRACE_CHUNK_SIZE	(PAGE_SIZE - 1024)
+
+/* Maximum data size of each dump it packet */
+#define DUMP_CHUNK_SIZE		(PAGE_SIZE - 1024)
 
 #endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
index 2b6756e..f6debf9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
@@ -219,9 +219,7 @@
 
 	/* INT ICT Table */
 	__le32 *ict_tbl;
-	void *ict_tbl_vir;
 	dma_addr_t ict_tbl_dma;
-	dma_addr_t aligned_ict_tbl_dma;
 	int ict_index;
 	u32 inta;
 	bool use_ict;
@@ -236,6 +234,7 @@
 	const u8 *ac_to_fifo[NUM_IWL_RXON_CTX];
 	const u8 *ac_to_queue[NUM_IWL_RXON_CTX];
 	u8 mcast_queue[NUM_IWL_RXON_CTX];
+	u8 agg_txq[IWLAGN_STATION_COUNT][IWL_MAX_TID_COUNT];
 
 	struct iwl_tx_queue *txq;
 	unsigned long txq_ctx_active_msk;
@@ -280,20 +279,16 @@
 void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
 					   struct iwl_tx_queue *txq,
 					   u16 byte_cnt);
-void iwl_trans_pcie_txq_agg_disable(struct iwl_trans *trans, int txq_id);
 int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans,
-				  enum iwl_rxon_context_id ctx, int sta_id,
-				  int tid);
+				  int sta_id, int tid);
 void iwl_trans_set_wr_ptrs(struct iwl_trans *trans, int txq_id, u32 index);
 void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
 			     struct iwl_tx_queue *txq,
 			     int tx_fifo_id, int scd_retry);
-int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans,
-				enum iwl_rxon_context_id ctx, int sta_id,
-				int tid, u16 *ssn);
+int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans, int sta_id, int tid);
 void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
 				 enum iwl_rxon_context_id ctx,
-				 int sta_id, int tid, int frame_limit);
+				 int sta_id, int tid, int frame_limit, u16 ssn);
 void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
 	int index, enum dma_data_direction dma_dir);
 int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
@@ -354,8 +349,13 @@
 	txq->swq_id = (hwq << 2) | ac;
 }
 
+static inline u8 iwl_get_queue_ac(struct iwl_tx_queue *txq)
+{
+	return txq->swq_id & 0x3;
+}
+
 static inline void iwl_wake_queue(struct iwl_trans *trans,
-				  struct iwl_tx_queue *txq)
+				  struct iwl_tx_queue *txq, const char *msg)
 {
 	u8 queue = txq->swq_id;
 	u8 ac = queue & 3;
@@ -363,13 +363,22 @@
 	struct iwl_trans_pcie *trans_pcie =
 		IWL_TRANS_GET_PCIE_TRANS(trans);
 
-	if (test_and_clear_bit(hwq, trans_pcie->queue_stopped))
-		if (atomic_dec_return(&trans_pcie->queue_stop_count[ac]) <= 0)
+	if (test_and_clear_bit(hwq, trans_pcie->queue_stopped)) {
+		if (atomic_dec_return(&trans_pcie->queue_stop_count[ac]) <= 0) {
 			iwl_wake_sw_queue(priv(trans), ac);
+			IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d ac %d. %s",
+					    hwq, ac, msg);
+		} else {
+			IWL_DEBUG_TX_QUEUES(trans, "Don't wake hwq %d ac %d"
+					    " stop count %d. %s",
+					    hwq, ac, atomic_read(&trans_pcie->
+					    queue_stop_count[ac]), msg);
+		}
+	}
 }
 
 static inline void iwl_stop_queue(struct iwl_trans *trans,
-				  struct iwl_tx_queue *txq)
+				  struct iwl_tx_queue *txq, const char *msg)
 {
 	u8 queue = txq->swq_id;
 	u8 ac = queue & 3;
@@ -377,9 +386,23 @@
 	struct iwl_trans_pcie *trans_pcie =
 		IWL_TRANS_GET_PCIE_TRANS(trans);
 
-	if (!test_and_set_bit(hwq, trans_pcie->queue_stopped))
-		if (atomic_inc_return(&trans_pcie->queue_stop_count[ac]) > 0)
+	if (!test_and_set_bit(hwq, trans_pcie->queue_stopped)) {
+		if (atomic_inc_return(&trans_pcie->queue_stop_count[ac]) > 0) {
 			iwl_stop_sw_queue(priv(trans), ac);
+			IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d ac %d"
+					    " stop count %d. %s",
+					    hwq, ac, atomic_read(&trans_pcie->
+					    queue_stop_count[ac]), msg);
+		} else {
+			IWL_DEBUG_TX_QUEUES(trans, "Don't stop hwq %d ac %d"
+					    " stop count %d. %s",
+					    hwq, ac, atomic_read(&trans_pcie->
+					    queue_stop_count[ac]), msg);
+		}
+	} else {
+		IWL_DEBUG_TX_QUEUES(trans, "stop hwq %d, but it is stopped/ %s",
+				    hwq, msg);
+	}
 }
 
 #ifdef ieee80211_stop_queue
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
index 374c68c..752493f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
@@ -594,8 +594,8 @@
 	struct iwl_trans_pcie *trans_pcie =
 		IWL_TRANS_GET_PCIE_TRANS(trans);
 
-	base = priv->device_pointers.error_event_table;
-	if (priv->ucode_type == IWL_UCODE_INIT) {
+	base = trans->shrd->device_pointers.error_event_table;
+	if (trans->shrd->ucode_type == IWL_UCODE_INIT) {
 		if (!base)
 			base = priv->init_errlog_ptr;
 	} else {
@@ -607,7 +607,7 @@
 		IWL_ERR(trans,
 			"Not valid error log pointer 0x%08X for %s uCode\n",
 			base,
-			(priv->ucode_type == IWL_UCODE_INIT)
+			(trans->shrd->ucode_type == IWL_UCODE_INIT)
 					? "Init" : "RT");
 		return;
 	}
@@ -648,6 +648,21 @@
 	IWL_ERR(trans, "0x%08X | hw version\n", table.hw_ver);
 	IWL_ERR(trans, "0x%08X | board version\n", table.brd_ver);
 	IWL_ERR(trans, "0x%08X | hcmd\n", table.hcmd);
+
+	IWL_ERR(trans, "0x%08X | isr0\n", table.isr0);
+	IWL_ERR(trans, "0x%08X | isr1\n", table.isr1);
+	IWL_ERR(trans, "0x%08X | isr2\n", table.isr2);
+	IWL_ERR(trans, "0x%08X | isr3\n", table.isr3);
+	IWL_ERR(trans, "0x%08X | isr4\n", table.isr4);
+	IWL_ERR(trans, "0x%08X | isr_pref\n", table.isr_pref);
+	IWL_ERR(trans, "0x%08X | wait_event\n", table.wait_event);
+	IWL_ERR(trans, "0x%08X | l2p_control\n", table.l2p_control);
+	IWL_ERR(trans, "0x%08X | l2p_duration\n", table.l2p_duration);
+	IWL_ERR(trans, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid);
+	IWL_ERR(trans, "0x%08X | l2p_addr_match\n", table.l2p_addr_match);
+	IWL_ERR(trans, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
+	IWL_ERR(trans, "0x%08X | timestamp\n", table.u_timestamp);
+	IWL_ERR(trans, "0x%08X | flow_handler\n", table.flow_handler);
 }
 
 /**
@@ -657,7 +672,7 @@
 {
 	struct iwl_priv *priv = priv(trans);
 	/* W/A for WiFi/WiMAX coex and WiMAX own the RF */
-	if (priv->cfg->internal_wimax_coex &&
+	if (cfg(priv)->internal_wimax_coex &&
 	    (!(iwl_read_prph(bus(trans), APMG_CLK_CTRL_REG) &
 			APMS_CLK_VAL_MRB_FUNC_MODE) ||
 	     (iwl_read_prph(bus(trans), APMG_PS_CTRL_REG) &
@@ -709,8 +724,8 @@
 	if (num_events == 0)
 		return pos;
 
-	base = priv->device_pointers.log_event_table;
-	if (priv->ucode_type == IWL_UCODE_INIT) {
+	base = trans->shrd->device_pointers.log_event_table;
+	if (trans->shrd->ucode_type == IWL_UCODE_INIT) {
 		if (!base)
 			base = priv->init_evtlog_ptr;
 	} else {
@@ -823,8 +838,8 @@
 	size_t bufsz = 0;
 	struct iwl_priv *priv = priv(trans);
 
-	base = priv->device_pointers.log_event_table;
-	if (priv->ucode_type == IWL_UCODE_INIT) {
+	base = trans->shrd->device_pointers.log_event_table;
+	if (trans->shrd->ucode_type == IWL_UCODE_INIT) {
 		logsize = priv->init_evtlog_size;
 		if (!base)
 			base = priv->init_evtlog_ptr;
@@ -838,7 +853,7 @@
 		IWL_ERR(trans,
 			"Invalid event log pointer 0x%08X for %s uCode\n",
 			base,
-			(priv->ucode_type == IWL_UCODE_INIT)
+			(trans->shrd->ucode_type == IWL_UCODE_INIT)
 					? "Init" : "RT");
 		return -EINVAL;
 	}
@@ -1108,7 +1123,7 @@
 		isr_stats->tx++;
 		handled |= CSR_INT_BIT_FH_TX;
 		/* Wake up uCode load routine, now that load is complete */
-		priv(trans)->ucode_write_complete = 1;
+		trans->ucode_write_complete = 1;
 		wake_up(&trans->shrd->wait_command_queue);
 	}
 
@@ -1136,7 +1151,11 @@
  * ICT functions
  *
  ******************************************************************************/
-#define ICT_COUNT (PAGE_SIZE/sizeof(u32))
+
+/* a device (PCI-E) page is 4096 bytes long */
+#define ICT_SHIFT	12
+#define ICT_SIZE	(1 << ICT_SHIFT)
+#define ICT_COUNT	(ICT_SIZE / sizeof(u32))
 
 /* Free dram table */
 void iwl_free_isr_ict(struct iwl_trans *trans)
@@ -1144,21 +1163,19 @@
 	struct iwl_trans_pcie *trans_pcie =
 		IWL_TRANS_GET_PCIE_TRANS(trans);
 
-	if (trans_pcie->ict_tbl_vir) {
-		dma_free_coherent(bus(trans)->dev,
-				  (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
-				  trans_pcie->ict_tbl_vir,
+	if (trans_pcie->ict_tbl) {
+		dma_free_coherent(bus(trans)->dev, ICT_SIZE,
+				  trans_pcie->ict_tbl,
 				  trans_pcie->ict_tbl_dma);
-		trans_pcie->ict_tbl_vir = NULL;
-		memset(&trans_pcie->ict_tbl_dma, 0,
-			sizeof(trans_pcie->ict_tbl_dma));
-		memset(&trans_pcie->aligned_ict_tbl_dma, 0,
-			sizeof(trans_pcie->aligned_ict_tbl_dma));
+		trans_pcie->ict_tbl = NULL;
+		trans_pcie->ict_tbl_dma = 0;
 	}
 }
 
 
-/* allocate dram shared table it is a PAGE_SIZE aligned
+/*
+ * allocate dram shared table, it is an aligned memory
+ * block of ICT_SIZE.
  * also reset all data related to ICT table interrupt.
  */
 int iwl_alloc_isr_ict(struct iwl_trans *trans)
@@ -1166,36 +1183,26 @@
 	struct iwl_trans_pcie *trans_pcie =
 		IWL_TRANS_GET_PCIE_TRANS(trans);
 
-	/* allocate shrared data table */
-	trans_pcie->ict_tbl_vir =
-		dma_alloc_coherent(bus(trans)->dev,
-				   (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
-				   &trans_pcie->ict_tbl_dma, GFP_KERNEL);
-	if (!trans_pcie->ict_tbl_vir)
+	trans_pcie->ict_tbl =
+		dma_alloc_coherent(bus(trans)->dev, ICT_SIZE,
+				   &trans_pcie->ict_tbl_dma,
+				   GFP_KERNEL);
+	if (!trans_pcie->ict_tbl)
 		return -ENOMEM;
 
-	/* align table to PAGE_SIZE boundary */
-	trans_pcie->aligned_ict_tbl_dma =
-		ALIGN(trans_pcie->ict_tbl_dma, PAGE_SIZE);
+	/* just an API sanity check ... it is guaranteed to be aligned */
+	if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
+		iwl_free_isr_ict(trans);
+		return -EINVAL;
+	}
 
-	IWL_DEBUG_ISR(trans, "ict dma addr %Lx dma aligned %Lx diff %d\n",
-			   (unsigned long long)trans_pcie->ict_tbl_dma,
-			   (unsigned long long)trans_pcie->aligned_ict_tbl_dma,
-			   (int)(trans_pcie->aligned_ict_tbl_dma -
-			   trans_pcie->ict_tbl_dma));
+	IWL_DEBUG_ISR(trans, "ict dma addr %Lx\n",
+		      (unsigned long long)trans_pcie->ict_tbl_dma);
 
-	trans_pcie->ict_tbl =  trans_pcie->ict_tbl_vir +
-			  (trans_pcie->aligned_ict_tbl_dma -
-			  trans_pcie->ict_tbl_dma);
-
-	IWL_DEBUG_ISR(trans, "ict vir addr %p vir aligned %p diff %d\n",
-			     trans_pcie->ict_tbl, trans_pcie->ict_tbl_vir,
-			(int)(trans_pcie->aligned_ict_tbl_dma -
-			    trans_pcie->ict_tbl_dma));
+	IWL_DEBUG_ISR(trans, "ict vir addr %p\n", trans_pcie->ict_tbl);
 
 	/* reset table and index to all 0 */
-	memset(trans_pcie->ict_tbl_vir, 0,
-		(sizeof(u32) * ICT_COUNT) + PAGE_SIZE);
+	memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
 	trans_pcie->ict_index = 0;
 
 	/* add periodic RX interrupt */
@@ -1213,23 +1220,20 @@
 	struct iwl_trans_pcie *trans_pcie =
 		IWL_TRANS_GET_PCIE_TRANS(trans);
 
-	if (!trans_pcie->ict_tbl_vir)
+	if (!trans_pcie->ict_tbl)
 		return 0;
 
 	spin_lock_irqsave(&trans->shrd->lock, flags);
 	iwl_disable_interrupts(trans);
 
-	memset(&trans_pcie->ict_tbl[0], 0, sizeof(u32) * ICT_COUNT);
+	memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
 
-	val = trans_pcie->aligned_ict_tbl_dma >> PAGE_SHIFT;
+	val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
 
 	val |= CSR_DRAM_INT_TBL_ENABLE;
 	val |= CSR_DRAM_INIT_TBL_WRAP_CHECK;
 
-	IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%X "
-			"aligned dma address %Lx\n",
-			val,
-			(unsigned long long)trans_pcie->aligned_ict_tbl_dma);
+	IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
 
 	iwl_write32(bus(trans), CSR_DRAM_INT_TBL_REG, val);
 	trans_pcie->use_ict = true;
@@ -1266,6 +1270,8 @@
 	if (!trans)
 		return IRQ_NONE;
 
+	trace_iwlwifi_dev_irq(priv(trans));
+
 	trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
 	spin_lock_irqsave(&trans->shrd->lock, flags);
@@ -1340,6 +1346,7 @@
 	struct iwl_trans_pcie *trans_pcie;
 	u32 inta, inta_mask;
 	u32 val = 0;
+	u32 read;
 	unsigned long flags;
 
 	if (!trans)
@@ -1353,6 +1360,8 @@
 	if (!trans_pcie->use_ict)
 		return iwl_isr(irq, data);
 
+	trace_iwlwifi_dev_irq(priv(trans));
+
 	spin_lock_irqsave(&trans->shrd->lock, flags);
 
 	/* Disable (but don't clear!) interrupts here to avoid
@@ -1367,24 +1376,29 @@
 	/* Ignore interrupt if there's nothing in NIC to service.
 	 * This may be due to IRQ shared with another device,
 	 * or due to sporadic interrupts thrown from our NIC. */
-	if (!trans_pcie->ict_tbl[trans_pcie->ict_index]) {
+	read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
+	trace_iwlwifi_dev_ict_read(priv(trans), trans_pcie->ict_index, read);
+	if (!read) {
 		IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
 		goto none;
 	}
 
-	/* read all entries that not 0 start with ict_index */
-	while (trans_pcie->ict_tbl[trans_pcie->ict_index]) {
-
-		val |= le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
+	/*
+	 * Collect all entries up to the first 0, starting from ict_index;
+	 * note we already read at ict_index.
+	 */
+	do {
+		val |= read;
 		IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
-				trans_pcie->ict_index,
-				le32_to_cpu(
-				  trans_pcie->ict_tbl[trans_pcie->ict_index]));
+				trans_pcie->ict_index, read);
 		trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
 		trans_pcie->ict_index =
 			iwl_queue_inc_wrap(trans_pcie->ict_index, ICT_COUNT);
 
-	}
+		read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
+		trace_iwlwifi_dev_ict_read(priv(trans), trans_pcie->ict_index,
+					   read);
+	} while (read);
 
 	/* We should not get this value, just ignore it. */
 	if (val == 0xffffffff)
@@ -1411,7 +1425,7 @@
 	if (likely(inta))
 		tasklet_schedule(&trans_pcie->irq_tasklet);
 	else if (test_bit(STATUS_INT_ENABLED, &trans->shrd->status) &&
-			!trans_pcie->inta) {
+		 !trans_pcie->inta) {
 		/* Allow interrupt if was disabled by this handler and
 		 * no tasklet was schedules, We should not enable interrupt,
 		 * tasklet will enable it.
@@ -1427,7 +1441,7 @@
 	 * only Re-enable if disabled by irq.
 	 */
 	if (test_bit(STATUS_INT_ENABLED, &trans->shrd->status) &&
-		!trans_pcie->inta)
+	    !trans_pcie->inta)
 		iwl_enable_interrupts(trans);
 
 	spin_unlock_irqrestore(&trans->shrd->lock, flags);
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
index 4a0c953..bd29568 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
@@ -408,6 +408,7 @@
 void iwl_trans_set_wr_ptrs(struct iwl_trans *trans,
 				int txq_id, u32 index)
 {
+	IWL_DEBUG_TX_QUEUES(trans, "Q %d  WrPtr: %d", txq_id, index & 0xff);
 	iwl_write_direct32(bus(trans), HBUS_TARG_WRPTR,
 			(index & 0xff) | (txq_id << 8));
 	iwl_write_prph(bus(trans), SCD_QUEUE_RDPTR(txq_id), index);
@@ -430,7 +431,7 @@
 
 	txq->sched_retry = scd_retry;
 
-	IWL_DEBUG_INFO(trans, "%s %s Queue %d on FIFO %d\n",
+	IWL_DEBUG_TX_QUEUES(trans, "%s %s Queue %d on FIFO %d\n",
 		       active ? "Activate" : "Deactivate",
 		       scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id);
 }
@@ -446,14 +447,21 @@
 	return -EINVAL;
 }
 
+static inline bool is_agg_txqid_valid(struct iwl_trans *trans, int txq_id)
+{
+	if (txq_id < IWLAGN_FIRST_AMPDU_QUEUE)
+		return false;
+	return txq_id < (IWLAGN_FIRST_AMPDU_QUEUE +
+		hw_params(trans).num_ampdu_queues);
+}
+
 void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
 				 enum iwl_rxon_context_id ctx, int sta_id,
-				 int tid, int frame_limit)
+				 int tid, int frame_limit, u16 ssn)
 {
-	int tx_fifo, txq_id, ssn_idx;
+	int tx_fifo, txq_id;
 	u16 ra_tid;
 	unsigned long flags;
-	struct iwl_tid_data *tid_data;
 
 	struct iwl_trans_pcie *trans_pcie =
 		IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -469,11 +477,15 @@
 		return;
 	}
 
-	spin_lock_irqsave(&trans->shrd->sta_lock, flags);
-	tid_data = &trans->shrd->tid_data[sta_id][tid];
-	ssn_idx = SEQ_TO_SN(tid_data->seq_number);
-	txq_id = tid_data->agg.txq_id;
-	spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
+	txq_id = trans_pcie->agg_txq[sta_id][tid];
+	if (WARN_ON_ONCE(is_agg_txqid_valid(trans, txq_id) == false)) {
+		IWL_ERR(trans,
+			"queue number out of range: %d, must be %d to %d\n",
+			txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
+			IWLAGN_FIRST_AMPDU_QUEUE +
+			hw_params(trans).num_ampdu_queues - 1);
+		return;
+	}
 
 	ra_tid = BUILD_RAxTID(sta_id, tid);
 
@@ -493,9 +505,9 @@
 
 	/* Place first TFD at index corresponding to start sequence number.
 	 * Assumes that ssn_idx is valid (!= 0xFFF) */
-	trans_pcie->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
-	trans_pcie->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
-	iwl_trans_set_wr_ptrs(trans, txq_id, ssn_idx);
+	trans_pcie->txq[txq_id].q.read_ptr = (ssn & 0xff);
+	trans_pcie->txq[txq_id].q.write_ptr = (ssn & 0xff);
+	iwl_trans_set_wr_ptrs(trans, txq_id, ssn);
 
 	/* Set up Tx window size and frame limit for this queue */
 	iwl_write_targ_mem(bus(trans), trans_pcie->scd_base_addr +
@@ -539,12 +551,9 @@
 }
 
 int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans,
-				enum iwl_rxon_context_id ctx, int sta_id,
-				int tid, u16 *ssn)
+				int sta_id, int tid)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-	struct iwl_tid_data *tid_data;
-	unsigned long flags;
 	int txq_id;
 
 	txq_id = iwlagn_txq_ctx_activate_free(trans);
@@ -553,34 +562,31 @@
 		return -ENXIO;
 	}
 
-	spin_lock_irqsave(&trans->shrd->sta_lock, flags);
-	tid_data = &trans->shrd->tid_data[sta_id][tid];
-	*ssn = SEQ_TO_SN(tid_data->seq_number);
-	tid_data->agg.txq_id = txq_id;
+	trans_pcie->agg_txq[sta_id][tid] = txq_id;
 	iwl_set_swq_id(&trans_pcie->txq[txq_id], get_ac_from_tid(tid), txq_id);
 
-	tid_data = &trans->shrd->tid_data[sta_id][tid];
-	if (tid_data->tfds_in_queue == 0) {
-		IWL_DEBUG_HT(trans, "HW queue is empty\n");
-		tid_data->agg.state = IWL_AGG_ON;
-		iwl_start_tx_ba_trans_ready(priv(trans), ctx, sta_id, tid);
-	} else {
-		IWL_DEBUG_HT(trans, "HW queue is NOT empty: %d packets in HW"
-			     "queue\n", tid_data->tfds_in_queue);
-		tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
-	}
-	spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
-
 	return 0;
 }
 
-void iwl_trans_pcie_txq_agg_disable(struct iwl_trans *trans, int txq_id)
+int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, int sta_id, int tid)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	u8 txq_id = trans_pcie->agg_txq[sta_id][tid];
+
+	if (WARN_ON_ONCE(is_agg_txqid_valid(trans, txq_id) == false)) {
+		IWL_ERR(trans,
+			"queue number out of range: %d, must be %d to %d\n",
+			txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
+			IWLAGN_FIRST_AMPDU_QUEUE +
+			hw_params(trans).num_ampdu_queues - 1);
+		return -EINVAL;
+	}
+
 	iwlagn_tx_queue_stop_scheduler(trans, txq_id);
 
 	iwl_clear_bits_prph(bus(trans), SCD_AGGR_SEL, (1 << txq_id));
 
+	trans_pcie->agg_txq[sta_id][tid] = 0;
 	trans_pcie->txq[txq_id].q.read_ptr = 0;
 	trans_pcie->txq[txq_id].q.write_ptr = 0;
 	/* supposes that ssn_idx is valid (!= 0xFFF) */
@@ -589,81 +595,6 @@
 	iwl_clear_bits_prph(bus(trans), SCD_INTERRUPT_MASK, (1 << txq_id));
 	iwl_txq_ctx_deactivate(trans_pcie, txq_id);
 	iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], 0, 0);
-}
-
-int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans,
-				  enum iwl_rxon_context_id ctx, int sta_id,
-				  int tid)
-{
-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-	unsigned long flags;
-	int read_ptr, write_ptr;
-	struct iwl_tid_data *tid_data;
-	int txq_id;
-
-	spin_lock_irqsave(&trans->shrd->sta_lock, flags);
-
-	tid_data = &trans->shrd->tid_data[sta_id][tid];
-	txq_id = tid_data->agg.txq_id;
-
-	if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
-	    (IWLAGN_FIRST_AMPDU_QUEUE +
-		hw_params(trans).num_ampdu_queues <= txq_id)) {
-		IWL_ERR(trans,
-			"queue number out of range: %d, must be %d to %d\n",
-			txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
-			IWLAGN_FIRST_AMPDU_QUEUE +
-			hw_params(trans).num_ampdu_queues - 1);
-		spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
-		return -EINVAL;
-	}
-
-	switch (trans->shrd->tid_data[sta_id][tid].agg.state) {
-	case IWL_EMPTYING_HW_QUEUE_ADDBA:
-		/*
-		* This can happen if the peer stops aggregation
-		* again before we've had a chance to drain the
-		* queue we selected previously, i.e. before the
-		* session was really started completely.
-		*/
-		IWL_DEBUG_HT(trans, "AGG stop before setup done\n");
-		goto turn_off;
-	case IWL_AGG_ON:
-		break;
-	default:
-		IWL_WARN(trans, "Stopping AGG while state not ON "
-			 "or starting for %d on %d (%d)\n", sta_id, tid,
-			 trans->shrd->tid_data[sta_id][tid].agg.state);
-		spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
-		return 0;
-	}
-
-	write_ptr = trans_pcie->txq[txq_id].q.write_ptr;
-	read_ptr = trans_pcie->txq[txq_id].q.read_ptr;
-
-	/* The queue is not empty */
-	if (write_ptr != read_ptr) {
-		IWL_DEBUG_HT(trans, "Stopping a non empty AGG HW QUEUE\n");
-		trans->shrd->tid_data[sta_id][tid].agg.state =
-			IWL_EMPTYING_HW_QUEUE_DELBA;
-		spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
-		return 0;
-	}
-
-	IWL_DEBUG_HT(trans, "HW queue is empty\n");
-turn_off:
-	trans->shrd->tid_data[sta_id][tid].agg.state = IWL_AGG_OFF;
-
-	/* do not restore/save irqs */
-	spin_unlock(&trans->shrd->sta_lock);
-	spin_lock(&trans->shrd->lock);
-
-	iwl_trans_pcie_txq_agg_disable(trans, txq_id);
-
-	spin_unlock_irqrestore(&trans->shrd->lock, flags);
-
-	iwl_stop_tx_ba_trans_ready(priv(trans), ctx, sta_id, tid);
-
 	return 0;
 }
 
@@ -982,7 +913,8 @@
 
 	ret = iwl_enqueue_hcmd(trans, cmd);
 	if (ret < 0) {
-		IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
+		IWL_DEBUG_QUIET_RFKILL(trans,
+			"Error sending %s: enqueue_hcmd failed: %d\n",
 			  get_cmd_string(cmd->id), ret);
 		return ret;
 	}
@@ -1000,6 +932,20 @@
 	IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
 			get_cmd_string(cmd->id));
 
+	if (test_bit(STATUS_EXIT_PENDING, &trans->shrd->status))
+		return -EBUSY;
+
+
+	if (test_bit(STATUS_RF_KILL_HW, &trans->shrd->status)) {
+		IWL_ERR(trans, "Command %s aborted: RF KILL Switch\n",
+			       get_cmd_string(cmd->id));
+		return -ECANCELED;
+	}
+	if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) {
+		IWL_ERR(trans, "Command %s failed: FW Error\n",
+			       get_cmd_string(cmd->id));
+		return -EIO;
+	}
 	set_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
 	IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
 			get_cmd_string(cmd->id));
@@ -1008,7 +954,8 @@
 	if (cmd_idx < 0) {
 		ret = cmd_idx;
 		clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
-		IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
+		IWL_DEBUG_QUIET_RFKILL(trans,
+			"Error sending %s: enqueue_hcmd failed: %d\n",
 			  get_cmd_string(cmd->id), ret);
 		return ret;
 	}
@@ -1022,12 +969,12 @@
 				&trans_pcie->txq[trans->shrd->cmd_queue];
 			struct iwl_queue *q = &txq->q;
 
-			IWL_ERR(trans,
+			IWL_DEBUG_QUIET_RFKILL(trans,
 				"Error sending %s: time out after %dms.\n",
 				get_cmd_string(cmd->id),
 				jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
 
-			IWL_ERR(trans,
+			IWL_DEBUG_QUIET_RFKILL(trans,
 				"Current CMD queue read_ptr %d write_ptr %d\n",
 				q->read_ptr, q->write_ptr);
 
@@ -1039,18 +986,6 @@
 		}
 	}
 
-	if (test_bit(STATUS_RF_KILL_HW, &trans->shrd->status)) {
-		IWL_ERR(trans, "Command %s aborted: RF KILL Switch\n",
-			       get_cmd_string(cmd->id));
-		ret = -ECANCELED;
-		goto fail;
-	}
-	if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) {
-		IWL_ERR(trans, "Command %s failed: FW Error\n",
-			       get_cmd_string(cmd->id));
-		ret = -EIO;
-		goto fail;
-	}
 	if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
 		IWL_ERR(trans, "Error: Response NULL in '%s'\n",
 			  get_cmd_string(cmd->id));
@@ -1071,7 +1006,7 @@
 		trans_pcie->txq[trans->shrd->cmd_queue].meta[cmd_idx].flags &=
 							~CMD_WANT_SKB;
 	}
-fail:
+
 	if (cmd->reply_page) {
 		iwl_free_pages(trans->shrd, cmd->reply_page);
 		cmd->reply_page = 0;
@@ -1115,9 +1050,6 @@
 		return 0;
 	}
 
-	IWL_DEBUG_TX_REPLY(trans, "reclaim: [%d, %d, %d]\n", txq_id,
-			   q->read_ptr, index);
-
 	if (WARN_ON(!skb_queue_empty(skbs)))
 		return 0;
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
index ce91898..67d6e32 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
@@ -88,18 +88,16 @@
 		return -EINVAL;
 
 	/* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
-	rxq->bd = dma_alloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
-				     &rxq->bd_dma, GFP_KERNEL);
+	rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
+				      &rxq->bd_dma, GFP_KERNEL);
 	if (!rxq->bd)
 		goto err_bd;
-	memset(rxq->bd, 0, sizeof(__le32) * RX_QUEUE_SIZE);
 
 	/*Allocate the driver's pointer to receive buffer status */
-	rxq->rb_stts = dma_alloc_coherent(dev, sizeof(*rxq->rb_stts),
-					  &rxq->rb_stts_dma, GFP_KERNEL);
+	rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
+					   &rxq->rb_stts_dma, GFP_KERNEL);
 	if (!rxq->rb_stts)
 		goto err_rb_stts;
-	memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
 
 	return 0;
 
@@ -1044,7 +1042,7 @@
 
 static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
 		struct iwl_device_cmd *dev_cmd, enum iwl_rxon_context_id ctx,
-		u8 sta_id)
+		u8 sta_id, u8 tid)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
@@ -1058,13 +1056,12 @@
 	dma_addr_t txcmd_phys;
 	dma_addr_t scratch_phys;
 	u16 len, firstlen, secondlen;
-	u16 seq_number = 0;
 	u8 wait_write_ptr = 0;
 	u8 txq_id;
-	u8 tid = 0;
 	bool is_agg = false;
 	__le16 fc = hdr->frame_control;
 	u8 hdr_len = ieee80211_hdrlen(fc);
+	u16 __maybe_unused wifi_seq;
 
 	/*
 	 * Send this frame after DTIM -- there's a special queue
@@ -1085,36 +1082,28 @@
 		txq_id =
 		    trans_pcie->ac_to_queue[ctx][skb_get_queue_mapping(skb)];
 
-	if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
-		u8 *qc = NULL;
-		struct iwl_tid_data *tid_data;
-		qc = ieee80211_get_qos_ctl(hdr);
-		tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
-		tid_data = &trans->shrd->tid_data[sta_id][tid];
-
-		if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
-			return -1;
-
-		seq_number = tid_data->seq_number;
-		seq_number &= IEEE80211_SCTL_SEQ;
-		hdr->seq_ctrl = hdr->seq_ctrl &
-				cpu_to_le16(IEEE80211_SCTL_FRAG);
-		hdr->seq_ctrl |= cpu_to_le16(seq_number);
-		seq_number += 0x10;
-		/* aggregation is on for this <sta,tid> */
-		if (info->flags & IEEE80211_TX_CTL_AMPDU) {
-			WARN_ON_ONCE(tid_data->agg.state != IWL_AGG_ON);
-			txq_id = tid_data->agg.txq_id;
-			is_agg = true;
-		}
+	/* aggregation is on for this <sta,tid> */
+	if (info->flags & IEEE80211_TX_CTL_AMPDU) {
+		WARN_ON(tid >= IWL_MAX_TID_COUNT);
+		txq_id = trans_pcie->agg_txq[sta_id][tid];
+		is_agg = true;
 	}
 
-	/* Copy MAC header from skb into command buffer */
-	memcpy(tx_cmd->hdr, hdr, hdr_len);
-
 	txq = &trans_pcie->txq[txq_id];
 	q = &txq->q;
 
+	/* In AGG mode, the index in the ring must correspond to the WiFi
+	 * sequence number. This is a HW requirements to help the SCD to parse
+	 * the BA.
+	 * Check here that the packets are in the right place on the ring.
+	 */
+#ifdef CONFIG_IWLWIFI_DEBUG
+	wifi_seq = SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
+	WARN_ONCE(is_agg && ((wifi_seq & 0xff) != q->write_ptr),
+		  "Q: %d WiFi Seq %d tfdNum %d",
+		  txq_id, wifi_seq, q->write_ptr);
+#endif
+
 	/* Set up driver data for this TFD */
 	txq->skbs[q->write_ptr] = skb;
 	txq->cmd[q->write_ptr] = dev_cmd;
@@ -1197,9 +1186,7 @@
 	iwl_print_hex_dump(trans, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
 
 	/* Set up entry for this TFD in Tx byte-count array */
-	if (is_agg)
-		iwl_trans_txq_update_byte_cnt_tbl(trans, txq,
-					       le16_to_cpu(tx_cmd->len));
+	iwl_trans_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
 
 	dma_sync_single_for_device(bus(trans)->dev, txcmd_phys, firstlen,
 			DMA_BIDIRECTIONAL);
@@ -1214,13 +1201,6 @@
 	q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
 	iwl_txq_update_write_ptr(trans, txq);
 
-	if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
-		trans->shrd->tid_data[sta_id][tid].tfds_in_queue++;
-		if (!ieee80211_has_morefrags(fc))
-			trans->shrd->tid_data[sta_id][tid].seq_number =
-				seq_number;
-	}
-
 	/*
 	 * At this point the frame is "transmitted" successfully
 	 * and we will get a TX status notification eventually,
@@ -1232,7 +1212,7 @@
 			txq->need_update = 1;
 			iwl_txq_update_write_ptr(trans, txq);
 		} else {
-			iwl_stop_queue(trans, txq);
+			iwl_stop_queue(trans, txq, "Queue is full");
 		}
 	}
 	return 0;
@@ -1269,101 +1249,48 @@
 	return 0;
 }
 
-static int iwlagn_txq_check_empty(struct iwl_trans *trans,
-			   int sta_id, u8 tid, int txq_id)
-{
-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-	struct iwl_queue *q = &trans_pcie->txq[txq_id].q;
-	struct iwl_tid_data *tid_data = &trans->shrd->tid_data[sta_id][tid];
-
-	lockdep_assert_held(&trans->shrd->sta_lock);
-
-	switch (trans->shrd->tid_data[sta_id][tid].agg.state) {
-	case IWL_EMPTYING_HW_QUEUE_DELBA:
-		/* We are reclaiming the last packet of the */
-		/* aggregated HW queue */
-		if ((txq_id  == tid_data->agg.txq_id) &&
-		    (q->read_ptr == q->write_ptr)) {
-			IWL_DEBUG_HT(trans,
-				"HW queue empty: continue DELBA flow\n");
-			iwl_trans_pcie_txq_agg_disable(trans, txq_id);
-			tid_data->agg.state = IWL_AGG_OFF;
-			iwl_stop_tx_ba_trans_ready(priv(trans),
-						   NUM_IWL_RXON_CTX,
-						   sta_id, tid);
-			iwl_wake_queue(trans, &trans_pcie->txq[txq_id]);
-		}
-		break;
-	case IWL_EMPTYING_HW_QUEUE_ADDBA:
-		/* We are reclaiming the last packet of the queue */
-		if (tid_data->tfds_in_queue == 0) {
-			IWL_DEBUG_HT(trans,
-				"HW queue empty: continue ADDBA flow\n");
-			tid_data->agg.state = IWL_AGG_ON;
-			iwl_start_tx_ba_trans_ready(priv(trans),
-						    NUM_IWL_RXON_CTX,
-						    sta_id, tid);
-		}
-		break;
-	default:
-		break;
-	}
-
-	return 0;
-}
-
-static void iwl_free_tfds_in_queue(struct iwl_trans *trans,
-			    int sta_id, int tid, int freed)
-{
-	lockdep_assert_held(&trans->shrd->sta_lock);
-
-	if (trans->shrd->tid_data[sta_id][tid].tfds_in_queue >= freed)
-		trans->shrd->tid_data[sta_id][tid].tfds_in_queue -= freed;
-	else {
-		IWL_DEBUG_TX(trans, "free more than tfds_in_queue (%u:%d)\n",
-			trans->shrd->tid_data[sta_id][tid].tfds_in_queue,
-			freed);
-		trans->shrd->tid_data[sta_id][tid].tfds_in_queue = 0;
-	}
-}
-
-static void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid,
+static int iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid,
 		      int txq_id, int ssn, u32 status,
 		      struct sk_buff_head *skbs)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
-	enum iwl_agg_state agg_state;
 	/* n_bd is usually 256 => n_bd - 1 = 0xff */
 	int tfd_num = ssn & (txq->q.n_bd - 1);
 	int freed = 0;
-	bool cond;
 
 	txq->time_stamp = jiffies;
 
-	if (txq->sched_retry) {
-		agg_state =
-			trans->shrd->tid_data[txq->sta_id][txq->tid].agg.state;
-		cond = (agg_state != IWL_EMPTYING_HW_QUEUE_DELBA);
-	} else {
-		cond = (status != TX_STATUS_FAIL_PASSIVE_NO_RX);
+	if (unlikely(txq_id >= IWLAGN_FIRST_AMPDU_QUEUE &&
+		     txq_id != trans_pcie->agg_txq[sta_id][tid])) {
+		/*
+		 * FIXME: this is a uCode bug which need to be addressed,
+		 * log the information and return for now.
+		 * Since it is can possibly happen very often and in order
+		 * not to fill the syslog, don't use IWL_ERR or IWL_WARN
+		 */
+		IWL_DEBUG_TX_QUEUES(trans, "Bad queue mapping txq_id %d, "
+			"agg_txq[sta_id[tid] %d", txq_id,
+			trans_pcie->agg_txq[sta_id][tid]);
+		return 1;
 	}
 
 	if (txq->q.read_ptr != tfd_num) {
-		IWL_DEBUG_TX_REPLY(trans, "Retry scheduler reclaim "
-				"scd_ssn=%d idx=%d txq=%d swq=%d\n",
-				ssn , tfd_num, txq_id, txq->swq_id);
+		IWL_DEBUG_TX_REPLY(trans, "[Q %d | AC %d] %d -> %d (%d)\n",
+				txq_id, iwl_get_queue_ac(txq), txq->q.read_ptr,
+				tfd_num, ssn);
 		freed = iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs);
-		if (iwl_queue_space(&txq->q) > txq->q.low_mark && cond)
-			iwl_wake_queue(trans, txq);
+		if (iwl_queue_space(&txq->q) > txq->q.low_mark &&
+		   (!txq->sched_retry ||
+		   status != TX_STATUS_FAIL_PASSIVE_NO_RX))
+			iwl_wake_queue(trans, txq, "Packets reclaimed");
 	}
-
-	iwl_free_tfds_in_queue(trans, sta_id, tid, freed);
-	iwlagn_txq_check_empty(trans, sta_id, tid, txq_id);
+	return 0;
 }
 
 static void iwl_trans_pcie_free(struct iwl_trans *trans)
 {
+	iwl_calib_free_results(trans);
 	iwl_trans_pcie_tx_free(trans);
 	iwl_trans_pcie_rx_free(trans);
 	free_irq(bus(trans)->irq, trans);
@@ -1419,7 +1346,8 @@
 #endif /* CONFIG_PM_SLEEP */
 
 static void iwl_trans_pcie_wake_any_queue(struct iwl_trans *trans,
-					  enum iwl_rxon_context_id ctx)
+					  enum iwl_rxon_context_id ctx,
+					  const char *msg)
 {
 	u8 ac, txq_id;
 	struct iwl_trans_pcie *trans_pcie =
@@ -1427,11 +1355,11 @@
 
 	for (ac = 0; ac < AC_NUM; ac++) {
 		txq_id = trans_pcie->ac_to_queue[ctx][ac];
-		IWL_DEBUG_INFO(trans, "Queue Status: Q[%d] %s\n",
+		IWL_DEBUG_TX_QUEUES(trans, "Queue Status: Q[%d] %s\n",
 			ac,
 			(atomic_read(&trans_pcie->queue_stop_count[ac]) > 0)
 			      ? "stopped" : "awake");
-		iwl_wake_queue(trans, &trans_pcie->txq[txq_id]);
+		iwl_wake_queue(trans, &trans_pcie->txq[txq_id], msg);
 	}
 }
 
@@ -1454,11 +1382,12 @@
 	return iwl_trans;
 }
 
-static void iwl_trans_pcie_stop_queue(struct iwl_trans *trans, int txq_id)
+static void iwl_trans_pcie_stop_queue(struct iwl_trans *trans, int txq_id,
+				      const char *msg)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
-	iwl_stop_queue(trans, &trans_pcie->txq[txq_id]);
+	iwl_stop_queue(trans, &trans_pcie->txq[txq_id], msg);
 }
 
 #define IWL_FLUSH_WAIT_MS	2000
@@ -1513,8 +1442,12 @@
 	if (time_after(jiffies, timeout)) {
 		IWL_ERR(trans, "Queue %d stuck for %u ms.\n", q->id,
 			hw_params(trans).wd_timeout);
-		IWL_ERR(trans, "Current read_ptr %d write_ptr %d\n",
+		IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
 			q->read_ptr, q->write_ptr);
+		IWL_ERR(trans, "Current HW read_ptr %d write_ptr %d\n",
+			iwl_read_prph(bus(trans), SCD_QUEUE_RDPTR(cnt))
+				& (TFD_QUEUE_SIZE_MAX - 1),
+			iwl_read_prph(bus(trans), SCD_QUEUE_WRPTR(cnt)));
 		return 1;
 	}
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index c592312..e6bf3f5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -171,32 +171,31 @@
 	void (*tx_start)(struct iwl_trans *trans);
 
 	void (*wake_any_queue)(struct iwl_trans *trans,
-			       enum iwl_rxon_context_id ctx);
+			       enum iwl_rxon_context_id ctx,
+			       const char *msg);
 
 	int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
 
 	int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
 		struct iwl_device_cmd *dev_cmd, enum iwl_rxon_context_id ctx,
-		u8 sta_id);
-	void (*reclaim)(struct iwl_trans *trans, int sta_id, int tid,
+		u8 sta_id, u8 tid);
+	int (*reclaim)(struct iwl_trans *trans, int sta_id, int tid,
 			int txq_id, int ssn, u32 status,
 			struct sk_buff_head *skbs);
 
 	int (*tx_agg_disable)(struct iwl_trans *trans,
-			      enum iwl_rxon_context_id ctx, int sta_id,
-			      int tid);
+			      int sta_id, int tid);
 	int (*tx_agg_alloc)(struct iwl_trans *trans,
-			    enum iwl_rxon_context_id ctx, int sta_id, int tid,
-			    u16 *ssn);
+			    int sta_id, int tid);
 	void (*tx_agg_setup)(struct iwl_trans *trans,
 			     enum iwl_rxon_context_id ctx, int sta_id, int tid,
-			     int frame_limit);
+			     int frame_limit, u16 ssn);
 
 	void (*kick_nic)(struct iwl_trans *trans);
 
 	void (*free)(struct iwl_trans *trans);
 
-	void (*stop_queue)(struct iwl_trans *trans, int q);
+	void (*stop_queue)(struct iwl_trans *trans, int q, const char *msg);
 
 	int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir);
 	int (*check_stuck_queue)(struct iwl_trans *trans, int q);
@@ -207,17 +206,54 @@
 #endif
 };
 
+/* one for each uCode image (inst/data, boot/init/runtime) */
+struct fw_desc {
+	dma_addr_t p_addr;	/* hardware address */
+	void *v_addr;		/* software address */
+	u32 len;		/* size in bytes */
+};
+
+struct fw_img {
+	struct fw_desc code;	/* firmware code image */
+	struct fw_desc data;	/* firmware data image */
+};
+
+/* Opaque calibration results */
+struct iwl_calib_result {
+	struct list_head list;
+	size_t cmd_len;
+	struct iwl_calib_hdr hdr;
+	/* data follows */
+};
+
 /**
  * struct iwl_trans - transport common data
  * @ops - pointer to iwl_trans_ops
  * @shrd - pointer to iwl_shared which holds shared data from the upper layer
  * @hcmd_lock: protects HCMD
+ * @ucode_write_complete: indicates that the ucode has been copied.
+ * @ucode_rt: run time ucode image
+ * @ucode_init: init ucode image
+ * @ucode_wowlan: wake on wireless ucode image (optional)
+ * @nvm_device_type: indicates OTP or eeprom
+ * @calib_results: list head for init calibration results
  */
 struct iwl_trans {
 	const struct iwl_trans_ops *ops;
 	struct iwl_shared *shrd;
 	spinlock_t hcmd_lock;
 
+	u8 ucode_write_complete;	/* the image write is complete */
+	struct fw_img ucode_rt;
+	struct fw_img ucode_init;
+	struct fw_img ucode_wowlan;
+
+	/* eeprom related variables */
+	int    nvm_device_type;
+
+	/* init calibration results */
+	struct list_head calib_results;
+
 	/* pointer to trans specific struct */
 	/*Ensure that this pointer will always be aligned to sizeof pointer */
 	char trans_specific[0] __attribute__((__aligned__(sizeof(void *))));
@@ -249,9 +285,10 @@
 }
 
 static inline void iwl_trans_wake_any_queue(struct iwl_trans *trans,
-					    enum iwl_rxon_context_id ctx)
+					    enum iwl_rxon_context_id ctx,
+					    const char *msg)
 {
-	trans->ops->wake_any_queue(trans, ctx);
+	trans->ops->wake_any_queue(trans, ctx, msg);
 }
 
 
@@ -266,39 +303,38 @@
 
 static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
 		struct iwl_device_cmd *dev_cmd, enum iwl_rxon_context_id ctx,
-		u8 sta_id)
+		u8 sta_id, u8 tid)
 {
-	return trans->ops->tx(trans, skb, dev_cmd, ctx, sta_id);
+	return trans->ops->tx(trans, skb, dev_cmd, ctx, sta_id, tid);
 }
 
-static inline void iwl_trans_reclaim(struct iwl_trans *trans, int sta_id,
+static inline int iwl_trans_reclaim(struct iwl_trans *trans, int sta_id,
 				 int tid, int txq_id, int ssn, u32 status,
 				 struct sk_buff_head *skbs)
 {
-	trans->ops->reclaim(trans, sta_id, tid, txq_id, ssn, status, skbs);
+	return trans->ops->reclaim(trans, sta_id, tid, txq_id, ssn,
+				   status, skbs);
 }
 
 static inline int iwl_trans_tx_agg_disable(struct iwl_trans *trans,
-					    enum iwl_rxon_context_id ctx,
 					    int sta_id, int tid)
 {
-	return trans->ops->tx_agg_disable(trans, ctx, sta_id, tid);
+	return trans->ops->tx_agg_disable(trans, sta_id, tid);
 }
 
 static inline int iwl_trans_tx_agg_alloc(struct iwl_trans *trans,
-					 enum iwl_rxon_context_id ctx,
-					 int sta_id, int tid, u16 *ssn)
+					 int sta_id, int tid)
 {
-	return trans->ops->tx_agg_alloc(trans, ctx, sta_id, tid, ssn);
+	return trans->ops->tx_agg_alloc(trans, sta_id, tid);
 }
 
 
 static inline void iwl_trans_tx_agg_setup(struct iwl_trans *trans,
 					   enum iwl_rxon_context_id ctx,
 					   int sta_id, int tid,
-					   int frame_limit)
+					   int frame_limit, u16 ssn)
 {
-	trans->ops->tx_agg_setup(trans, ctx, sta_id, tid, frame_limit);
+	trans->ops->tx_agg_setup(trans, ctx, sta_id, tid, frame_limit, ssn);
 }
 
 static inline void iwl_trans_kick_nic(struct iwl_trans *trans)
@@ -311,9 +347,10 @@
 	trans->ops->free(trans);
 }
 
-static inline void iwl_trans_stop_queue(struct iwl_trans *trans, int q)
+static inline void iwl_trans_stop_queue(struct iwl_trans *trans, int q,
+					const char *msg)
 {
-	trans->ops->stop_queue(trans, q);
+	trans->ops->stop_queue(trans, q, msg);
 }
 
 static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans)
@@ -348,4 +385,13 @@
 ******************************************************/
 extern const struct iwl_trans_ops trans_ops_pcie;
 
+int iwl_alloc_fw_desc(struct iwl_bus *bus, struct fw_desc *desc,
+		      const void *data, size_t len);
+void iwl_dealloc_ucode(struct iwl_trans *trans);
+
+int iwl_send_calib_results(struct iwl_trans *trans);
+int iwl_calib_set(struct iwl_trans *trans,
+		  const struct iwl_calib_hdr *cmd, int len);
+void iwl_calib_free_results(struct iwl_trans *trans);
+
 #endif /* __iwl_trans_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-ucode.c b/drivers/net/wireless/iwlwifi/iwl-ucode.c
new file mode 100644
index 0000000..36a1b5b
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-ucode.c
@@ -0,0 +1,758 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/dma-mapping.h>
+
+#include "iwl-wifi.h"
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-io.h"
+#include "iwl-agn-hw.h"
+#include "iwl-agn.h"
+#include "iwl-agn-calib.h"
+#include "iwl-trans.h"
+#include "iwl-fh.h"
+
+static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = {
+	{COEX_CU_UNASSOC_IDLE_RP, COEX_CU_UNASSOC_IDLE_WP,
+	 0, COEX_UNASSOC_IDLE_FLAGS},
+	{COEX_CU_UNASSOC_MANUAL_SCAN_RP, COEX_CU_UNASSOC_MANUAL_SCAN_WP,
+	 0, COEX_UNASSOC_MANUAL_SCAN_FLAGS},
+	{COEX_CU_UNASSOC_AUTO_SCAN_RP, COEX_CU_UNASSOC_AUTO_SCAN_WP,
+	 0, COEX_UNASSOC_AUTO_SCAN_FLAGS},
+	{COEX_CU_CALIBRATION_RP, COEX_CU_CALIBRATION_WP,
+	 0, COEX_CALIBRATION_FLAGS},
+	{COEX_CU_PERIODIC_CALIBRATION_RP, COEX_CU_PERIODIC_CALIBRATION_WP,
+	 0, COEX_PERIODIC_CALIBRATION_FLAGS},
+	{COEX_CU_CONNECTION_ESTAB_RP, COEX_CU_CONNECTION_ESTAB_WP,
+	 0, COEX_CONNECTION_ESTAB_FLAGS},
+	{COEX_CU_ASSOCIATED_IDLE_RP, COEX_CU_ASSOCIATED_IDLE_WP,
+	 0, COEX_ASSOCIATED_IDLE_FLAGS},
+	{COEX_CU_ASSOC_MANUAL_SCAN_RP, COEX_CU_ASSOC_MANUAL_SCAN_WP,
+	 0, COEX_ASSOC_MANUAL_SCAN_FLAGS},
+	{COEX_CU_ASSOC_AUTO_SCAN_RP, COEX_CU_ASSOC_AUTO_SCAN_WP,
+	 0, COEX_ASSOC_AUTO_SCAN_FLAGS},
+	{COEX_CU_ASSOC_ACTIVE_LEVEL_RP, COEX_CU_ASSOC_ACTIVE_LEVEL_WP,
+	 0, COEX_ASSOC_ACTIVE_LEVEL_FLAGS},
+	{COEX_CU_RF_ON_RP, COEX_CU_RF_ON_WP, 0, COEX_CU_RF_ON_FLAGS},
+	{COEX_CU_RF_OFF_RP, COEX_CU_RF_OFF_WP, 0, COEX_RF_OFF_FLAGS},
+	{COEX_CU_STAND_ALONE_DEBUG_RP, COEX_CU_STAND_ALONE_DEBUG_WP,
+	 0, COEX_STAND_ALONE_DEBUG_FLAGS},
+	{COEX_CU_IPAN_ASSOC_LEVEL_RP, COEX_CU_IPAN_ASSOC_LEVEL_WP,
+	 0, COEX_IPAN_ASSOC_LEVEL_FLAGS},
+	{COEX_CU_RSRVD1_RP, COEX_CU_RSRVD1_WP, 0, COEX_RSRVD1_FLAGS},
+	{COEX_CU_RSRVD2_RP, COEX_CU_RSRVD2_WP, 0, COEX_RSRVD2_FLAGS}
+};
+
+/******************************************************************************
+ *
+ * uCode download functions
+ *
+ ******************************************************************************/
+
+static void iwl_free_fw_desc(struct iwl_bus *bus, struct fw_desc *desc)
+{
+	if (desc->v_addr)
+		dma_free_coherent(bus->dev, desc->len,
+				  desc->v_addr, desc->p_addr);
+	desc->v_addr = NULL;
+	desc->len = 0;
+}
+
+static void iwl_free_fw_img(struct iwl_bus *bus, struct fw_img *img)
+{
+	iwl_free_fw_desc(bus, &img->code);
+	iwl_free_fw_desc(bus, &img->data);
+}
+
+void iwl_dealloc_ucode(struct iwl_trans *trans)
+{
+	iwl_free_fw_img(bus(trans), &trans->ucode_rt);
+	iwl_free_fw_img(bus(trans), &trans->ucode_init);
+	iwl_free_fw_img(bus(trans), &trans->ucode_wowlan);
+}
+
+int iwl_alloc_fw_desc(struct iwl_bus *bus, struct fw_desc *desc,
+		      const void *data, size_t len)
+{
+	if (!len) {
+		desc->v_addr = NULL;
+		return -EINVAL;
+	}
+
+	desc->v_addr = dma_alloc_coherent(bus->dev, len,
+					  &desc->p_addr, GFP_KERNEL);
+	if (!desc->v_addr)
+		return -ENOMEM;
+
+	desc->len = len;
+	memcpy(desc->v_addr, data, len);
+	return 0;
+}
+
+/*
+ * ucode
+ */
+static int iwl_load_section(struct iwl_trans *trans, const char *name,
+				struct fw_desc *image, u32 dst_addr)
+{
+	struct iwl_bus *bus = bus(trans);
+	dma_addr_t phy_addr = image->p_addr;
+	u32 byte_cnt = image->len;
+	int ret;
+
+	trans->ucode_write_complete = 0;
+
+	iwl_write_direct32(bus,
+		FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
+		FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
+
+	iwl_write_direct32(bus,
+		FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr);
+
+	iwl_write_direct32(bus,
+		FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
+		phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
+
+	iwl_write_direct32(bus,
+		FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
+		(iwl_get_dma_hi_addr(phy_addr)
+			<< FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
+
+	iwl_write_direct32(bus,
+		FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
+		1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
+		1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
+		FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
+
+	iwl_write_direct32(bus,
+		FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
+		FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE	|
+		FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE	|
+		FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
+
+	IWL_DEBUG_FW(bus, "%s uCode section being loaded...\n", name);
+	ret = wait_event_timeout(trans->shrd->wait_command_queue,
+				 trans->ucode_write_complete, 5 * HZ);
+	if (!ret) {
+		IWL_ERR(trans, "Could not load the %s uCode section\n",
+			name);
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+static inline struct fw_img *iwl_get_ucode_image(struct iwl_trans *trans,
+					enum iwl_ucode_type ucode_type)
+{
+	switch (ucode_type) {
+	case IWL_UCODE_INIT:
+		return &trans->ucode_init;
+	case IWL_UCODE_WOWLAN:
+		return &trans->ucode_wowlan;
+	case IWL_UCODE_REGULAR:
+		return &trans->ucode_rt;
+	case IWL_UCODE_NONE:
+		break;
+	}
+	return NULL;
+}
+
+static int iwl_load_given_ucode(struct iwl_trans *trans,
+				   enum iwl_ucode_type ucode_type)
+{
+	int ret = 0;
+	struct fw_img *image = iwl_get_ucode_image(trans, ucode_type);
+
+
+	if (!image) {
+		IWL_ERR(trans, "Invalid ucode requested (%d)\n",
+			ucode_type);
+		return -EINVAL;
+	}
+
+	ret = iwl_load_section(trans, "INST", &image->code,
+				   IWLAGN_RTC_INST_LOWER_BOUND);
+	if (ret)
+		return ret;
+
+	return iwl_load_section(trans, "DATA", &image->data,
+				    IWLAGN_RTC_DATA_LOWER_BOUND);
+}
+
+/*
+ *  Calibration
+ */
+static int iwl_set_Xtal_calib(struct iwl_trans *trans)
+{
+	struct iwl_calib_xtal_freq_cmd cmd;
+	__le16 *xtal_calib =
+		(__le16 *)iwl_eeprom_query_addr(trans->shrd, EEPROM_XTAL);
+
+	iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD);
+	cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]);
+	cmd.cap_pin2 = le16_to_cpu(xtal_calib[1]);
+	return iwl_calib_set(trans, (void *)&cmd, sizeof(cmd));
+}
+
+static int iwl_set_temperature_offset_calib(struct iwl_trans *trans)
+{
+	struct iwl_calib_temperature_offset_cmd cmd;
+	__le16 *offset_calib =
+		(__le16 *)iwl_eeprom_query_addr(trans->shrd,
+						EEPROM_RAW_TEMPERATURE);
+
+	memset(&cmd, 0, sizeof(cmd));
+	iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
+	memcpy(&cmd.radio_sensor_offset, offset_calib, sizeof(*offset_calib));
+	if (!(cmd.radio_sensor_offset))
+		cmd.radio_sensor_offset = DEFAULT_RADIO_SENSOR_OFFSET;
+
+	IWL_DEBUG_CALIB(trans, "Radio sensor offset: %d\n",
+			le16_to_cpu(cmd.radio_sensor_offset));
+	return iwl_calib_set(trans, (void *)&cmd, sizeof(cmd));
+}
+
+static int iwl_set_temperature_offset_calib_v2(struct iwl_trans *trans)
+{
+	struct iwl_calib_temperature_offset_v2_cmd cmd;
+	__le16 *offset_calib_high = (__le16 *)iwl_eeprom_query_addr(trans->shrd,
+				     EEPROM_KELVIN_TEMPERATURE);
+	__le16 *offset_calib_low =
+		(__le16 *)iwl_eeprom_query_addr(trans->shrd,
+						EEPROM_RAW_TEMPERATURE);
+	struct iwl_eeprom_calib_hdr *hdr;
+
+	memset(&cmd, 0, sizeof(cmd));
+	iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
+	hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(trans->shrd,
+							EEPROM_CALIB_ALL);
+	memcpy(&cmd.radio_sensor_offset_high, offset_calib_high,
+		sizeof(*offset_calib_high));
+	memcpy(&cmd.radio_sensor_offset_low, offset_calib_low,
+		sizeof(*offset_calib_low));
+	if (!(cmd.radio_sensor_offset_low)) {
+		IWL_DEBUG_CALIB(trans, "no info in EEPROM, use default\n");
+		cmd.radio_sensor_offset_low = DEFAULT_RADIO_SENSOR_OFFSET;
+		cmd.radio_sensor_offset_high = DEFAULT_RADIO_SENSOR_OFFSET;
+	}
+	memcpy(&cmd.burntVoltageRef, &hdr->voltage,
+		sizeof(hdr->voltage));
+
+	IWL_DEBUG_CALIB(trans, "Radio sensor offset high: %d\n",
+			le16_to_cpu(cmd.radio_sensor_offset_high));
+	IWL_DEBUG_CALIB(trans, "Radio sensor offset low: %d\n",
+			le16_to_cpu(cmd.radio_sensor_offset_low));
+	IWL_DEBUG_CALIB(trans, "Voltage Ref: %d\n",
+			le16_to_cpu(cmd.burntVoltageRef));
+
+	return iwl_calib_set(trans, (void *)&cmd, sizeof(cmd));
+}
+
+static int iwl_send_calib_cfg(struct iwl_trans *trans)
+{
+	struct iwl_calib_cfg_cmd calib_cfg_cmd;
+	struct iwl_host_cmd cmd = {
+		.id = CALIBRATION_CFG_CMD,
+		.len = { sizeof(struct iwl_calib_cfg_cmd), },
+		.data = { &calib_cfg_cmd, },
+	};
+
+	memset(&calib_cfg_cmd, 0, sizeof(calib_cfg_cmd));
+	calib_cfg_cmd.ucd_calib_cfg.once.is_enable = IWL_CALIB_INIT_CFG_ALL;
+	calib_cfg_cmd.ucd_calib_cfg.once.start = IWL_CALIB_INIT_CFG_ALL;
+	calib_cfg_cmd.ucd_calib_cfg.once.send_res = IWL_CALIB_INIT_CFG_ALL;
+	calib_cfg_cmd.ucd_calib_cfg.flags =
+		IWL_CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_MSK;
+
+	return iwl_trans_send_cmd(trans, &cmd);
+}
+
+int iwlagn_rx_calib_result(struct iwl_priv *priv,
+			    struct iwl_rx_mem_buffer *rxb,
+			    struct iwl_device_cmd *cmd)
+{
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	struct iwl_calib_hdr *hdr = (struct iwl_calib_hdr *)pkt->u.raw;
+	int len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+
+	/* reduce the size of the length field itself */
+	len -= 4;
+
+	if (iwl_calib_set(trans(priv), hdr, len))
+		IWL_ERR(priv, "Failed to record calibration data %d\n",
+			hdr->op_code);
+
+	return 0;
+}
+
+int iwl_init_alive_start(struct iwl_trans *trans)
+{
+	int ret;
+
+	if (cfg(trans)->bt_params &&
+	    cfg(trans)->bt_params->advanced_bt_coexist) {
+		/*
+		 * Tell uCode we are ready to perform calibration
+		 * need to perform this before any calibration
+		 * no need to close the envlope since we are going
+		 * to load the runtime uCode later.
+		 */
+		ret = iwl_send_bt_env(trans, IWL_BT_COEX_ENV_OPEN,
+			BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
+		if (ret)
+			return ret;
+
+	}
+
+	ret = iwl_send_calib_cfg(trans);
+	if (ret)
+		return ret;
+
+	/**
+	 * temperature offset calibration is only needed for runtime ucode,
+	 * so prepare the value now.
+	 */
+	if (cfg(trans)->need_temp_offset_calib) {
+		if (cfg(trans)->temp_offset_v2)
+			return iwl_set_temperature_offset_calib_v2(trans);
+		else
+			return iwl_set_temperature_offset_calib(trans);
+	}
+
+	return 0;
+}
+
+static int iwl_send_wimax_coex(struct iwl_trans *trans)
+{
+	struct iwl_wimax_coex_cmd coex_cmd;
+
+	if (cfg(trans)->base_params->support_wimax_coexist) {
+		/* UnMask wake up src at associated sleep */
+		coex_cmd.flags = COEX_FLAGS_ASSOC_WA_UNMASK_MSK;
+
+		/* UnMask wake up src at unassociated sleep */
+		coex_cmd.flags |= COEX_FLAGS_UNASSOC_WA_UNMASK_MSK;
+		memcpy(coex_cmd.sta_prio, cu_priorities,
+			sizeof(struct iwl_wimax_coex_event_entry) *
+			 COEX_NUM_OF_EVENTS);
+
+		/* enabling the coexistence feature */
+		coex_cmd.flags |= COEX_FLAGS_COEX_ENABLE_MSK;
+
+		/* enabling the priorities tables */
+		coex_cmd.flags |= COEX_FLAGS_STA_TABLE_VALID_MSK;
+	} else {
+		/* coexistence is disabled */
+		memset(&coex_cmd, 0, sizeof(coex_cmd));
+	}
+	return iwl_trans_send_cmd_pdu(trans,
+				COEX_PRIORITY_TABLE_CMD, CMD_SYNC,
+				sizeof(coex_cmd), &coex_cmd);
+}
+
+static const u8 iwl_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
+	((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
+		(0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
+	((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
+		(1 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
+	((BT_COEX_PRIO_TBL_PRIO_LOW << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
+		(0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
+	((BT_COEX_PRIO_TBL_PRIO_LOW << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
+		(1 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
+	((BT_COEX_PRIO_TBL_PRIO_HIGH << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
+		(0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
+	((BT_COEX_PRIO_TBL_PRIO_HIGH << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
+		(1 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
+	((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
+		(0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
+	((BT_COEX_PRIO_TBL_PRIO_COEX_OFF << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
+		(0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
+	((BT_COEX_PRIO_TBL_PRIO_COEX_ON << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
+		(0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
+	0, 0, 0, 0, 0, 0, 0
+};
+
+void iwl_send_prio_tbl(struct iwl_trans *trans)
+{
+	struct iwl_bt_coex_prio_table_cmd prio_tbl_cmd;
+
+	memcpy(prio_tbl_cmd.prio_tbl, iwl_bt_prio_tbl,
+		sizeof(iwl_bt_prio_tbl));
+	if (iwl_trans_send_cmd_pdu(trans,
+				REPLY_BT_COEX_PRIO_TABLE, CMD_SYNC,
+				sizeof(prio_tbl_cmd), &prio_tbl_cmd))
+		IWL_ERR(trans, "failed to send BT prio tbl command\n");
+}
+
+int iwl_send_bt_env(struct iwl_trans *trans, u8 action, u8 type)
+{
+	struct iwl_bt_coex_prot_env_cmd env_cmd;
+	int ret;
+
+	env_cmd.action = action;
+	env_cmd.type = type;
+	ret = iwl_trans_send_cmd_pdu(trans,
+			       REPLY_BT_COEX_PROT_ENV, CMD_SYNC,
+			       sizeof(env_cmd), &env_cmd);
+	if (ret)
+		IWL_ERR(trans, "failed to send BT env command\n");
+	return ret;
+}
+
+
+static int iwl_alive_notify(struct iwl_trans *trans)
+{
+	struct iwl_priv *priv = priv(trans);
+	struct iwl_rxon_context *ctx;
+	int ret;
+
+	if (!priv->tx_cmd_pool)
+		priv->tx_cmd_pool =
+			kmem_cache_create("iwl_dev_cmd",
+					  sizeof(struct iwl_device_cmd),
+					  sizeof(void *), 0, NULL);
+
+	if (!priv->tx_cmd_pool)
+		return -ENOMEM;
+
+	iwl_trans_tx_start(trans);
+	for_each_context(priv, ctx)
+		ctx->last_tx_rejected = false;
+
+	ret = iwl_send_wimax_coex(trans);
+	if (ret)
+		return ret;
+
+	if (!cfg(priv)->no_xtal_calib) {
+		ret = iwl_set_Xtal_calib(trans);
+		if (ret)
+			return ret;
+	}
+
+	return iwl_send_calib_results(trans);
+}
+
+
+/**
+ * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host,
+ *   using sample data 100 bytes apart.  If these sample points are good,
+ *   it's a pretty good bet that everything between them is good, too.
+ */
+static int iwl_verify_inst_sparse(struct iwl_bus *bus,
+				      struct fw_desc *fw_desc)
+{
+	__le32 *image = (__le32 *)fw_desc->v_addr;
+	u32 len = fw_desc->len;
+	u32 val;
+	u32 i;
+
+	IWL_DEBUG_FW(bus, "ucode inst image size is %u\n", len);
+
+	for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
+		/* read data comes through single port, auto-incr addr */
+		/* NOTE: Use the debugless read so we don't flood kernel log
+		 * if IWL_DL_IO is set */
+		iwl_write_direct32(bus, HBUS_TARG_MEM_RADDR,
+			i + IWLAGN_RTC_INST_LOWER_BOUND);
+		val = iwl_read32(bus, HBUS_TARG_MEM_RDAT);
+		if (val != le32_to_cpu(*image))
+			return -EIO;
+	}
+
+	return 0;
+}
+
+static void iwl_print_mismatch_inst(struct iwl_bus *bus,
+				    struct fw_desc *fw_desc)
+{
+	__le32 *image = (__le32 *)fw_desc->v_addr;
+	u32 len = fw_desc->len;
+	u32 val;
+	u32 offs;
+	int errors = 0;
+
+	IWL_DEBUG_FW(bus, "ucode inst image size is %u\n", len);
+
+	iwl_write_direct32(bus, HBUS_TARG_MEM_RADDR,
+			   IWLAGN_RTC_INST_LOWER_BOUND);
+
+	for (offs = 0;
+	     offs < len && errors < 20;
+	     offs += sizeof(u32), image++) {
+		/* read data comes through single port, auto-incr addr */
+		val = iwl_read32(bus, HBUS_TARG_MEM_RDAT);
+		if (val != le32_to_cpu(*image)) {
+			IWL_ERR(bus, "uCode INST section at "
+				"offset 0x%x, is 0x%x, s/b 0x%x\n",
+				offs, val, le32_to_cpu(*image));
+			errors++;
+		}
+	}
+}
+
+/**
+ * iwl_verify_ucode - determine which instruction image is in SRAM,
+ *    and verify its contents
+ */
+static int iwl_verify_ucode(struct iwl_trans *trans,
+			    enum iwl_ucode_type ucode_type)
+{
+	struct fw_img *img = iwl_get_ucode_image(trans, ucode_type);
+
+	if (!img) {
+		IWL_ERR(trans, "Invalid ucode requested (%d)\n", ucode_type);
+		return -EINVAL;
+	}
+
+	if (!iwl_verify_inst_sparse(bus(trans), &img->code)) {
+		IWL_DEBUG_FW(trans, "uCode is good in inst SRAM\n");
+		return 0;
+	}
+
+	IWL_ERR(trans, "UCODE IMAGE IN INSTRUCTION SRAM NOT VALID!!\n");
+
+	iwl_print_mismatch_inst(bus(trans), &img->code);
+	return -EIO;
+}
+
+struct iwl_alive_data {
+	bool valid;
+	u8 subtype;
+};
+
+static void iwl_alive_fn(struct iwl_trans *trans,
+			    struct iwl_rx_packet *pkt,
+			    void *data)
+{
+	struct iwl_alive_data *alive_data = data;
+	struct iwl_alive_resp *palive;
+
+	palive = &pkt->u.alive_frame;
+
+	IWL_DEBUG_FW(trans, "Alive ucode status 0x%08X revision "
+		       "0x%01X 0x%01X\n",
+		       palive->is_valid, palive->ver_type,
+		       palive->ver_subtype);
+
+	trans->shrd->device_pointers.error_event_table =
+		le32_to_cpu(palive->error_event_table_ptr);
+	trans->shrd->device_pointers.log_event_table =
+		le32_to_cpu(palive->log_event_table_ptr);
+
+	alive_data->subtype = palive->ver_subtype;
+	alive_data->valid = palive->is_valid == UCODE_VALID_OK;
+}
+
+/* notification wait support */
+void iwl_init_notification_wait(struct iwl_shared *shrd,
+				   struct iwl_notification_wait *wait_entry,
+				   u8 cmd,
+				   void (*fn)(struct iwl_trans *trans,
+					      struct iwl_rx_packet *pkt,
+					      void *data),
+				   void *fn_data)
+{
+	wait_entry->fn = fn;
+	wait_entry->fn_data = fn_data;
+	wait_entry->cmd = cmd;
+	wait_entry->triggered = false;
+	wait_entry->aborted = false;
+
+	spin_lock_bh(&shrd->notif_wait_lock);
+	list_add(&wait_entry->list, &shrd->notif_waits);
+	spin_unlock_bh(&shrd->notif_wait_lock);
+}
+
+int iwl_wait_notification(struct iwl_shared *shrd,
+			     struct iwl_notification_wait *wait_entry,
+			     unsigned long timeout)
+{
+	int ret;
+
+	ret = wait_event_timeout(shrd->notif_waitq,
+				 wait_entry->triggered || wait_entry->aborted,
+				 timeout);
+
+	spin_lock_bh(&shrd->notif_wait_lock);
+	list_del(&wait_entry->list);
+	spin_unlock_bh(&shrd->notif_wait_lock);
+
+	if (wait_entry->aborted)
+		return -EIO;
+
+	/* return value is always >= 0 */
+	if (ret <= 0)
+		return -ETIMEDOUT;
+	return 0;
+}
+
+void iwl_remove_notification(struct iwl_shared *shrd,
+				struct iwl_notification_wait *wait_entry)
+{
+	spin_lock_bh(&shrd->notif_wait_lock);
+	list_del(&wait_entry->list);
+	spin_unlock_bh(&shrd->notif_wait_lock);
+}
+
+void iwl_abort_notification_waits(struct iwl_shared *shrd)
+{
+	unsigned long flags;
+	struct iwl_notification_wait *wait_entry;
+
+	spin_lock_irqsave(&shrd->notif_wait_lock, flags);
+	list_for_each_entry(wait_entry, &shrd->notif_waits, list)
+		wait_entry->aborted = true;
+	spin_unlock_irqrestore(&shrd->notif_wait_lock, flags);
+
+	wake_up_all(&shrd->notif_waitq);
+}
+
+#define UCODE_ALIVE_TIMEOUT	HZ
+#define UCODE_CALIB_TIMEOUT	(2*HZ)
+
+int iwl_load_ucode_wait_alive(struct iwl_trans *trans,
+				 enum iwl_ucode_type ucode_type)
+{
+	struct iwl_notification_wait alive_wait;
+	struct iwl_alive_data alive_data;
+	int ret;
+	enum iwl_ucode_type old_type;
+
+	ret = iwl_trans_start_device(trans);
+	if (ret)
+		return ret;
+
+	iwl_init_notification_wait(trans->shrd, &alive_wait, REPLY_ALIVE,
+				      iwl_alive_fn, &alive_data);
+
+	old_type = trans->shrd->ucode_type;
+	trans->shrd->ucode_type = ucode_type;
+
+	ret = iwl_load_given_ucode(trans, ucode_type);
+	if (ret) {
+		trans->shrd->ucode_type = old_type;
+		iwl_remove_notification(trans->shrd, &alive_wait);
+		return ret;
+	}
+
+	iwl_trans_kick_nic(trans);
+
+	/*
+	 * Some things may run in the background now, but we
+	 * just wait for the ALIVE notification here.
+	 */
+	ret = iwl_wait_notification(trans->shrd, &alive_wait,
+					UCODE_ALIVE_TIMEOUT);
+	if (ret) {
+		trans->shrd->ucode_type = old_type;
+		return ret;
+	}
+
+	if (!alive_data.valid) {
+		IWL_ERR(trans, "Loaded ucode is not valid!\n");
+		trans->shrd->ucode_type = old_type;
+		return -EIO;
+	}
+
+	/*
+	 * This step takes a long time (60-80ms!!) and
+	 * WoWLAN image should be loaded quickly, so
+	 * skip it for WoWLAN.
+	 */
+	if (ucode_type != IWL_UCODE_WOWLAN) {
+		ret = iwl_verify_ucode(trans, ucode_type);
+		if (ret) {
+			trans->shrd->ucode_type = old_type;
+			return ret;
+		}
+
+		/* delay a bit to give rfkill time to run */
+		msleep(5);
+	}
+
+	ret = iwl_alive_notify(trans);
+	if (ret) {
+		IWL_WARN(trans,
+			"Could not complete ALIVE transition: %d\n", ret);
+		trans->shrd->ucode_type = old_type;
+		return ret;
+	}
+
+	return 0;
+}
+
+int iwl_run_init_ucode(struct iwl_trans *trans)
+{
+	struct iwl_notification_wait calib_wait;
+	int ret;
+
+	lockdep_assert_held(&trans->shrd->mutex);
+
+	/* No init ucode required? Curious, but maybe ok */
+	if (!trans->ucode_init.code.len)
+		return 0;
+
+	if (trans->shrd->ucode_type != IWL_UCODE_NONE)
+		return 0;
+
+	iwl_init_notification_wait(trans->shrd, &calib_wait,
+				      CALIBRATION_COMPLETE_NOTIFICATION,
+				      NULL, NULL);
+
+	/* Will also start the device */
+	ret = iwl_load_ucode_wait_alive(trans, IWL_UCODE_INIT);
+	if (ret)
+		goto error;
+
+	ret = iwl_init_alive_start(trans);
+	if (ret)
+		goto error;
+
+	/*
+	 * Some things may run in the background now, but we
+	 * just wait for the calibration complete notification.
+	 */
+	ret = iwl_wait_notification(trans->shrd, &calib_wait,
+					UCODE_CALIB_TIMEOUT);
+
+	goto out;
+
+ error:
+	iwl_remove_notification(trans->shrd, &calib_wait);
+ out:
+	/* Whatever happened, stop the device */
+	iwl_trans_stop_device(trans);
+	return ret;
+}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-calib.h b/drivers/net/wireless/iwlwifi/iwl-wifi.h
similarity index 85%
rename from drivers/net/wireless/iwlegacy/iwl-4965-calib.h
rename to drivers/net/wireless/iwlwifi/iwl-wifi.h
index f46c80e..1850110 100644
--- a/drivers/net/wireless/iwlegacy/iwl-4965-calib.h
+++ b/drivers/net/wireless/iwlwifi/iwl-wifi.h
@@ -59,17 +59,16 @@
  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *****************************************************************************/
-#ifndef __iwl_4965_calib_h__
-#define __iwl_4965_calib_h__
 
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-commands.h"
+#ifndef __iwl_wifi_h__
+#define __iwl_wifi_h__
 
-void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp);
-void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp);
-void iwl4965_init_sensitivity(struct iwl_priv *priv);
-void iwl4965_reset_run_time_calib(struct iwl_priv *priv);
-void iwl4965_calib_free_results(struct iwl_priv *priv);
+#include "iwl-shared.h"
 
-#endif /* __iwl_4965_calib_h__ */
+int iwl_send_bt_env(struct iwl_trans *trans, u8 action, u8 type);
+void iwl_send_prio_tbl(struct iwl_trans *trans);
+int iwl_init_alive_start(struct iwl_trans *trans);
+int iwl_run_init_ucode(struct iwl_trans *trans);
+int iwl_load_ucode_wait_alive(struct iwl_trans *trans,
+				 enum iwl_ucode_type ucode_type);
+#endif  /* __iwl_wifi_h__ */
diff --git a/drivers/net/wireless/iwmc3200wifi/cfg80211.c b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
index c42be81..48e8218 100644
--- a/drivers/net/wireless/iwmc3200wifi/cfg80211.c
+++ b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
@@ -165,11 +165,15 @@
 				struct key_params *params)
 {
 	struct iwm_priv *iwm = ndev_to_iwm(ndev);
-	struct iwm_key *key = &iwm->keys[key_index];
+	struct iwm_key *key;
 	int ret;
 
 	IWM_DBG_WEXT(iwm, DBG, "Adding key for %pM\n", mac_addr);
 
+	if (key_index >= IWM_NUM_KEYS)
+		return -ENOENT;
+
+	key = &iwm->keys[key_index];
 	memset(key, 0, sizeof(struct iwm_key));
 	ret = iwm_key_init(key, key_index, mac_addr, params);
 	if (ret < 0) {
@@ -214,8 +218,12 @@
 				u8 key_index, bool pairwise, const u8 *mac_addr)
 {
 	struct iwm_priv *iwm = ndev_to_iwm(ndev);
-	struct iwm_key *key = &iwm->keys[key_index];
+	struct iwm_key *key;
 
+	if (key_index >= IWM_NUM_KEYS)
+		return -ENOENT;
+
+	key = &iwm->keys[key_index];
 	if (!iwm->keys[key_index].key_len) {
 		IWM_DBG_WEXT(iwm, DBG, "Key %d not used\n", key_index);
 		return 0;
@@ -236,6 +244,9 @@
 
 	IWM_DBG_WEXT(iwm, DBG, "Default key index is: %d\n", key_index);
 
+	if (key_index >= IWM_NUM_KEYS)
+		return -ENOENT;
+
 	if (!iwm->keys[key_index].key_len) {
 		IWM_ERR(iwm, "Key %d not used\n", key_index);
 		return -EINVAL;
diff --git a/drivers/net/wireless/iwmc3200wifi/main.c b/drivers/net/wireless/iwmc3200wifi/main.c
index 98a179f..1f868b1 100644
--- a/drivers/net/wireless/iwmc3200wifi/main.c
+++ b/drivers/net/wireless/iwmc3200wifi/main.c
@@ -91,11 +91,11 @@
 	.mac_addr		= {0x00, 0x02, 0xb3, 0x01, 0x02, 0x03},
 };
 
-static int modparam_reset;
+static bool modparam_reset;
 module_param_named(reset, modparam_reset, bool, 0644);
 MODULE_PARM_DESC(reset, "reset on firmware errors (default 0 [not reset])");
 
-static int modparam_wimax_enable = 1;
+static bool modparam_wimax_enable = true;
 module_param_named(wimax_enable, modparam_wimax_enable, bool, 0644);
 MODULE_PARM_DESC(wimax_enable, "Enable wimax core (default 1 [wimax enabled])");
 
@@ -130,7 +130,7 @@
 		iwm_invalidate_mlme_profile(iwm);
 
 	clear_bit(IWM_STATUS_ASSOCIATED, &iwm->status);
-	iwm->umac_profile_active = 0;
+	iwm->umac_profile_active = false;
 	memset(iwm->bssid, 0, ETH_ALEN);
 	iwm->channel = 0;
 
diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c
index a414768..7d708f4 100644
--- a/drivers/net/wireless/iwmc3200wifi/rx.c
+++ b/drivers/net/wireless/iwmc3200wifi/rx.c
@@ -660,7 +660,7 @@
 	clear_bit(IWM_STATUS_SME_CONNECTING, &iwm->status);
 	clear_bit(IWM_STATUS_ASSOCIATED, &iwm->status);
 
-	iwm->umac_profile_active = 0;
+	iwm->umac_profile_active = false;
 	memset(iwm->bssid, 0, ETH_ALEN);
 	iwm->channel = 0;
 
@@ -735,7 +735,7 @@
 			     umac_sta->mac_addr,
 			     umac_sta->flags & UMAC_STA_FLAG_QOS);
 
-		sta->valid = 1;
+		sta->valid = true;
 		sta->qos = umac_sta->flags & UMAC_STA_FLAG_QOS;
 		sta->color = GET_VAL8(umac_sta->sta_id, LMAC_STA_COLOR);
 		memcpy(sta->addr, umac_sta->mac_addr, ETH_ALEN);
@@ -750,12 +750,12 @@
 		sta = &iwm->sta_table[GET_VAL8(umac_sta->sta_id, LMAC_STA_ID)];
 
 		if (!memcmp(sta->addr, umac_sta->mac_addr, ETH_ALEN))
-			sta->valid = 0;
+			sta->valid = false;
 
 		break;
 	case UMAC_OPCODE_CLEAR_ALL:
 		for (i = 0; i < IWM_STA_TABLE_NUM; i++)
-			iwm->sta_table[i].valid = 0;
+			iwm->sta_table[i].valid = false;
 
 		break;
 	default:
@@ -1203,7 +1203,7 @@
 
 	switch (hdr->oid) {
 	case UMAC_WIFI_IF_CMD_SET_PROFILE:
-		iwm->umac_profile_active = 1;
+		iwm->umac_profile_active = true;
 		break;
 	default:
 		break;
@@ -1363,7 +1363,7 @@
 	 */
 	list_for_each_entry(cmd, &iwm->nonwifi_pending_cmd, pending)
 		if (cmd->seq_num == seq_num) {
-			cmd->resp_received = 1;
+			cmd->resp_received = true;
 			cmd->buf.len = buf_size;
 			memcpy(cmd->buf.hdr, buf, buf_size);
 			wake_up_interruptible(&iwm->nonwifi_queue);
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index a7f1ab2..a7cd311 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -485,6 +485,7 @@
 static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy,
 	struct cmd_header *resp)
 {
+	struct cfg80211_bss *bss;
 	struct cmd_ds_802_11_scan_rsp *scanresp = (void *)resp;
 	int bsssize;
 	const u8 *pos;
@@ -632,12 +633,14 @@
 				     LBS_SCAN_RSSI_TO_MBM(rssi)/100);
 
 			if (channel &&
-			    !(channel->flags & IEEE80211_CHAN_DISABLED))
-				cfg80211_inform_bss(wiphy, channel,
+			    !(channel->flags & IEEE80211_CHAN_DISABLED)) {
+				bss = cfg80211_inform_bss(wiphy, channel,
 					bssid, get_unaligned_le64(tsfdesc),
 					capa, intvl, ie, ielen,
 					LBS_SCAN_RSSI_TO_MBM(rssi),
 					GFP_KERNEL);
+				cfg80211_put_bss(bss);
+			}
 		} else
 			lbs_deb_scan("scan response: missing BSS channel IE\n");
 
@@ -728,9 +731,11 @@
 		le16_to_cpu(scan_cmd->hdr.size),
 		lbs_ret_scan, 0);
 
-	if (priv->scan_channel >= priv->scan_req->n_channels)
+	if (priv->scan_channel >= priv->scan_req->n_channels) {
 		/* Mark scan done */
+		cancel_delayed_work(&priv->scan_work);
 		lbs_scan_done(priv);
+	}
 
 	/* Restart network */
 	if (carrier)
@@ -759,12 +764,12 @@
 		request->n_ssids, request->n_channels, request->ie_len);
 
 	priv->scan_channel = 0;
-	queue_delayed_work(priv->work_thread, &priv->scan_work,
-		msecs_to_jiffies(50));
-
 	priv->scan_req = request;
 	priv->internal_scan = internal;
 
+	queue_delayed_work(priv->work_thread, &priv->scan_work,
+		msecs_to_jiffies(50));
+
 	lbs_deb_leave(LBS_DEB_CFG80211);
 }
 
@@ -1720,6 +1725,7 @@
 		   2 + 2 +                      /* atim */
 		   2 + 8];                      /* extended rates */
 	u8 *fake = fake_ie;
+	struct cfg80211_bss *bss;
 
 	lbs_deb_enter(LBS_DEB_CFG80211);
 
@@ -1763,14 +1769,15 @@
 	*fake++ = 0x6c;
 	lbs_deb_hex(LBS_DEB_CFG80211, "IE", fake_ie, fake - fake_ie);
 
-	cfg80211_inform_bss(priv->wdev->wiphy,
-			    params->channel,
-			    bssid,
-			    0,
-			    capability,
-			    params->beacon_interval,
-			    fake_ie, fake - fake_ie,
-			    0, GFP_KERNEL);
+	bss = cfg80211_inform_bss(priv->wdev->wiphy,
+				  params->channel,
+				  bssid,
+				  0,
+				  capability,
+				  params->beacon_interval,
+				  fake_ie, fake - fake_ie,
+				  0, GFP_KERNEL);
+	cfg80211_put_bss(bss);
 
 	memcpy(priv->wdev->ssid, params->ssid, params->ssid_len);
 	priv->wdev->ssid_len = params->ssid_len;
diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
index d8d8f0d..c192671 100644
--- a/drivers/net/wireless/libertas/debugfs.c
+++ b/drivers/net/wireless/libertas/debugfs.c
@@ -704,7 +704,7 @@
 
 struct lbs_debugfs_files {
 	const char *name;
-	int perm;
+	umode_t perm;
 	struct file_operations fops;
 };
 
diff --git a/drivers/net/wireless/libertas/ethtool.c b/drivers/net/wireless/libertas/ethtool.c
index 885ddc1..f955b2d 100644
--- a/drivers/net/wireless/libertas/ethtool.c
+++ b/drivers/net/wireless/libertas/ethtool.c
@@ -13,13 +13,14 @@
 {
 	struct lbs_private *priv = dev->ml_priv;
 
-	snprintf(info->fw_version, 32, "%u.%u.%u.p%u",
+	snprintf(info->fw_version, sizeof(info->fw_version),
+		"%u.%u.%u.p%u",
 		priv->fwrelease >> 24 & 0xff,
 		priv->fwrelease >> 16 & 0xff,
 		priv->fwrelease >>  8 & 0xff,
 		priv->fwrelease       & 0xff);
-	strcpy(info->driver, "libertas");
-	strcpy(info->version, lbs_driver_version);
+	strlcpy(info->driver, "libertas", sizeof(info->driver));
+	strlcpy(info->version, lbs_driver_version, sizeof(info->version));
 }
 
 /*
diff --git a/drivers/net/wireless/libertas/if_cs.c b/drivers/net/wireless/libertas/if_cs.c
index e269351..3f7bf4d 100644
--- a/drivers/net/wireless/libertas/if_cs.c
+++ b/drivers/net/wireless/libertas/if_cs.c
@@ -859,7 +859,7 @@
 	 * Most of the libertas cards can do unaligned register access, but some
 	 * weird ones cannot. That's especially true for the CF8305 card.
 	 */
-	card->align_regs = 0;
+	card->align_regs = false;
 
 	card->model = get_model(p_dev->manf_id, p_dev->card_id);
 	if (card->model == MODEL_UNKNOWN) {
@@ -871,7 +871,7 @@
 	/* Check if we have a current silicon */
 	prod_id = if_cs_read8(card, IF_CS_PRODUCT_ID);
 	if (card->model == MODEL_8305) {
-		card->align_regs = 1;
+		card->align_regs = true;
 		if (prod_id < IF_CS_CF8305_B1_REV) {
 			pr_err("8305 rev B0 and older are not supported\n");
 			ret = -ENODEV;
diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c
index 728baa4..50b1ee7 100644
--- a/drivers/net/wireless/libertas/if_spi.c
+++ b/drivers/net/wireless/libertas/if_spi.c
@@ -1291,7 +1291,6 @@
 	.remove = __devexit_p(libertas_spi_remove),
 	.driver = {
 		.name	= "libertas_spi",
-		.bus	= &spi_bus_type,
 		.owner	= THIS_MODULE,
 		.pm	= &if_spi_pm_ops,
 	},
diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c
index db879c3..b5fbbc7 100644
--- a/drivers/net/wireless/libertas/if_usb.c
+++ b/drivers/net/wireless/libertas/if_usb.c
@@ -1184,29 +1184,7 @@
 	.reset_resume = if_usb_resume,
 };
 
-static int __init if_usb_init_module(void)
-{
-	int ret = 0;
-
-	lbs_deb_enter(LBS_DEB_MAIN);
-
-	ret = usb_register(&if_usb_driver);
-
-	lbs_deb_leave_args(LBS_DEB_MAIN, "ret %d", ret);
-	return ret;
-}
-
-static void __exit if_usb_exit_module(void)
-{
-	lbs_deb_enter(LBS_DEB_MAIN);
-
-	usb_deregister(&if_usb_driver);
-
-	lbs_deb_leave(LBS_DEB_MAIN);
-}
-
-module_init(if_usb_init_module);
-module_exit(if_usb_exit_module);
+module_usb_driver(if_usb_driver);
 
 MODULE_DESCRIPTION("8388 USB WLAN Driver");
 MODULE_AUTHOR("Marvell International Ltd. and Red Hat, Inc.");
diff --git a/drivers/net/wireless/libertas_tf/if_usb.c b/drivers/net/wireless/libertas_tf/if_usb.c
index 68202e6..aff8b57 100644
--- a/drivers/net/wireless/libertas_tf/if_usb.c
+++ b/drivers/net/wireless/libertas_tf/if_usb.c
@@ -924,27 +924,7 @@
 	.resume = if_usb_resume,
 };
 
-static int __init if_usb_init_module(void)
-{
-	int ret = 0;
-
-	lbtf_deb_enter(LBTF_DEB_MAIN);
-
-	ret = usb_register(&if_usb_driver);
-
-	lbtf_deb_leave_args(LBTF_DEB_MAIN, "ret %d", ret);
-	return ret;
-}
-
-static void __exit if_usb_exit_module(void)
-{
-	lbtf_deb_enter(LBTF_DEB_MAIN);
-	usb_deregister(&if_usb_driver);
-	lbtf_deb_leave(LBTF_DEB_MAIN);
-}
-
-module_init(if_usb_init_module);
-module_exit(if_usb_exit_module);
+module_usb_driver(if_usb_driver);
 
 MODULE_DESCRIPTION("8388 USB WLAN Thinfirm Driver");
 MODULE_AUTHOR("Cozybit Inc.");
diff --git a/drivers/net/wireless/libertas_tf/main.c b/drivers/net/wireless/libertas_tf/main.c
index ceb51b6..a034572 100644
--- a/drivers/net/wireless/libertas_tf/main.c
+++ b/drivers/net/wireless/libertas_tf/main.c
@@ -719,11 +719,11 @@
 		return;
 
 	if (skb_queue_empty(&priv->bc_ps_buf)) {
-		bool tx_buff_bc = 0;
+		bool tx_buff_bc = false;
 
 		while ((skb = ieee80211_get_buffered_bc(priv->hw, priv->vif))) {
 			skb_queue_tail(&priv->bc_ps_buf, skb);
-			tx_buff_bc = 1;
+			tx_buff_bc = true;
 		}
 		if (tx_buff_bc) {
 			ieee80211_stop_queues(priv->hw);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 523ad55..4b9e730 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -37,7 +37,8 @@
 MODULE_DESCRIPTION("Software simulator of 802.11 radio(s) for mac80211");
 MODULE_LICENSE("GPL");
 
-int wmediumd_pid;
+static u32 wmediumd_pid;
+
 static int radios = 2;
 module_param(radios, int, 0444);
 MODULE_PARM_DESC(radios, "Number of simulated radios");
@@ -665,7 +666,7 @@
 {
 	bool ack;
 	struct ieee80211_tx_info *txi;
-	int _pid;
+	u32 _pid;
 
 	mac80211_hwsim_monitor_rx(hw, skb);
 
@@ -676,7 +677,7 @@
 	}
 
 	/* wmediumd mode check */
-	_pid = wmediumd_pid;
+	_pid = ACCESS_ONCE(wmediumd_pid);
 
 	if (_pid)
 		return mac80211_hwsim_tx_frame_nl(hw, skb, _pid);
@@ -707,7 +708,7 @@
 {
 	struct mac80211_hwsim_data *data = hw->priv;
 	wiphy_debug(hw->wiphy, "%s\n", __func__);
-	data->started = 1;
+	data->started = true;
 	return 0;
 }
 
@@ -715,7 +716,7 @@
 static void mac80211_hwsim_stop(struct ieee80211_hw *hw)
 {
 	struct mac80211_hwsim_data *data = hw->priv;
-	data->started = 0;
+	data->started = false;
 	del_timer(&data->beacon_timer);
 	wiphy_debug(hw->wiphy, "%s\n", __func__);
 }
@@ -764,7 +765,7 @@
 	struct ieee80211_hw *hw = arg;
 	struct sk_buff *skb;
 	struct ieee80211_tx_info *info;
-	int _pid;
+	u32 _pid;
 
 	hwsim_check_magic(vif);
 
@@ -781,7 +782,7 @@
 	mac80211_hwsim_monitor_rx(hw, skb);
 
 	/* wmediumd mode check */
-	_pid = wmediumd_pid;
+	_pid = ACCESS_ONCE(wmediumd_pid);
 
 	if (_pid)
 		return mac80211_hwsim_tx_frame_nl(hw, skb, _pid);
@@ -1254,7 +1255,7 @@
 	struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
 	struct sk_buff *skb;
 	struct ieee80211_pspoll *pspoll;
-	int _pid;
+	u32 _pid;
 
 	if (!vp->assoc)
 		return;
@@ -1275,7 +1276,7 @@
 	memcpy(pspoll->ta, mac, ETH_ALEN);
 
 	/* wmediumd mode check */
-	_pid = wmediumd_pid;
+	_pid = ACCESS_ONCE(wmediumd_pid);
 
 	if (_pid)
 		return mac80211_hwsim_tx_frame_nl(data->hw, skb, _pid);
@@ -1292,7 +1293,7 @@
 	struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
 	struct sk_buff *skb;
 	struct ieee80211_hdr *hdr;
-	int _pid;
+	u32 _pid;
 
 	if (!vp->assoc)
 		return;
@@ -1314,7 +1315,7 @@
 	memcpy(hdr->addr3, vp->bssid, ETH_ALEN);
 
 	/* wmediumd mode check */
-	_pid = wmediumd_pid;
+	_pid = ACCESS_ONCE(wmediumd_pid);
 
 	if (_pid)
 		return mac80211_hwsim_tx_frame_nl(data->hw, skb, _pid);
@@ -1634,8 +1635,6 @@
 	int rc;
 	printk(KERN_INFO "mac80211_hwsim: initializing netlink\n");
 
-	wmediumd_pid = 0;
-
 	rc = genl_register_family_with_ops(&hwsim_genl_family,
 		hwsim_ops, ARRAY_SIZE(hwsim_ops));
 	if (rc)
@@ -1748,6 +1747,8 @@
 			    IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
 			    IEEE80211_HW_AMPDU_AGGREGATION;
 
+		hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
+
 		/* ask mac80211 to reserve space for magic */
 		hw->vif_data_size = sizeof(struct hwsim_vif_priv);
 		hw->sta_data_size = sizeof(struct hwsim_sta_priv);
diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.c b/drivers/net/wireless/mwifiex/11n_rxreorder.c
index 7aa9aa0..681d3f2 100644
--- a/drivers/net/wireless/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/mwifiex/11n_rxreorder.c
@@ -33,7 +33,7 @@
  * Since the buffer is linear, the function uses rotation to simulate
  * circular buffer.
  */
-static int
+static void
 mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
 					 struct mwifiex_rx_reorder_tbl
 					 *rx_reor_tbl_ptr, int start_win)
@@ -71,8 +71,6 @@
 
 	rx_reor_tbl_ptr->start_win = start_win;
 	spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
-
-	return 0;
 }
 
 /*
@@ -83,7 +81,7 @@
  * Since the buffer is linear, the function uses rotation to simulate
  * circular buffer.
  */
-static int
+static void
 mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
 			      struct mwifiex_rx_reorder_tbl *rx_reor_tbl_ptr)
 {
@@ -119,7 +117,6 @@
 	rx_reor_tbl_ptr->start_win = (rx_reor_tbl_ptr->start_win + i)
 		&(MAX_TID_VALUE - 1);
 	spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
-	return 0;
 }
 
 /*
@@ -405,7 +402,7 @@
 				u8 *ta, u8 pkt_type, void *payload)
 {
 	struct mwifiex_rx_reorder_tbl *rx_reor_tbl_ptr;
-	int start_win, end_win, win_size, ret;
+	int start_win, end_win, win_size;
 	u16 pkt_index;
 
 	rx_reor_tbl_ptr =
@@ -452,11 +449,8 @@
 			start_win = (end_win - win_size) + 1;
 		else
 			start_win = (MAX_TID_VALUE - (win_size - seq_num)) + 1;
-		ret = mwifiex_11n_dispatch_pkt_until_start_win(priv,
+		mwifiex_11n_dispatch_pkt_until_start_win(priv,
 						rx_reor_tbl_ptr, start_win);
-
-		if (ret)
-			return ret;
 	}
 
 	if (pkt_type != PKT_TYPE_BAR) {
@@ -475,9 +469,9 @@
 	 * Dispatch all packets sequentially from start_win until a
 	 * hole is found and adjust the start_win appropriately
 	 */
-	ret = mwifiex_11n_scan_and_dispatch(priv, rx_reor_tbl_ptr);
+	mwifiex_11n_scan_and_dispatch(priv, rx_reor_tbl_ptr);
 
-	return ret;
+	return 0;
 }
 
 /*
diff --git a/drivers/net/wireless/mwifiex/Kconfig b/drivers/net/wireless/mwifiex/Kconfig
index 8f2797a..2a078ce 100644
--- a/drivers/net/wireless/mwifiex/Kconfig
+++ b/drivers/net/wireless/mwifiex/Kconfig
@@ -10,12 +10,12 @@
 	  mwifiex.
 
 config MWIFIEX_SDIO
-	tristate "Marvell WiFi-Ex Driver for SD8787"
+	tristate "Marvell WiFi-Ex Driver for SD8787/SD8797"
 	depends on MWIFIEX && MMC
 	select FW_LOADER
 	---help---
 	  This adds support for wireless adapters based on Marvell
-	  8787 chipset with SDIO interface.
+	  8787/8797 chipsets with SDIO interface.
 
 	  If you choose to build it as a module, it will be called
 	  mwifiex_sdio.
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index 462c710..c3b6c46 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -24,50 +24,26 @@
  * This function maps the nl802.11 channel type into driver channel type.
  *
  * The mapping is as follows -
- *      NL80211_CHAN_NO_HT     -> NO_SEC_CHANNEL
- *      NL80211_CHAN_HT20      -> NO_SEC_CHANNEL
- *      NL80211_CHAN_HT40PLUS  -> SEC_CHANNEL_ABOVE
- *      NL80211_CHAN_HT40MINUS -> SEC_CHANNEL_BELOW
- *      Others                 -> NO_SEC_CHANNEL
+ *      NL80211_CHAN_NO_HT     -> IEEE80211_HT_PARAM_CHA_SEC_NONE
+ *      NL80211_CHAN_HT20      -> IEEE80211_HT_PARAM_CHA_SEC_NONE
+ *      NL80211_CHAN_HT40PLUS  -> IEEE80211_HT_PARAM_CHA_SEC_ABOVE
+ *      NL80211_CHAN_HT40MINUS -> IEEE80211_HT_PARAM_CHA_SEC_BELOW
+ *      Others                 -> IEEE80211_HT_PARAM_CHA_SEC_NONE
  */
-static int
-mwifiex_cfg80211_channel_type_to_mwifiex_channels(enum nl80211_channel_type
-						  channel_type)
+static u8
+mwifiex_cfg80211_channel_type_to_sec_chan_offset(enum nl80211_channel_type
+						 channel_type)
 {
 	switch (channel_type) {
 	case NL80211_CHAN_NO_HT:
 	case NL80211_CHAN_HT20:
-		return NO_SEC_CHANNEL;
+		return IEEE80211_HT_PARAM_CHA_SEC_NONE;
 	case NL80211_CHAN_HT40PLUS:
-		return SEC_CHANNEL_ABOVE;
+		return IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
 	case NL80211_CHAN_HT40MINUS:
-		return SEC_CHANNEL_BELOW;
+		return IEEE80211_HT_PARAM_CHA_SEC_BELOW;
 	default:
-		return NO_SEC_CHANNEL;
-	}
-}
-
-/*
- * This function maps the driver channel type into nl802.11 channel type.
- *
- * The mapping is as follows -
- *      NO_SEC_CHANNEL      -> NL80211_CHAN_HT20
- *      SEC_CHANNEL_ABOVE   -> NL80211_CHAN_HT40PLUS
- *      SEC_CHANNEL_BELOW   -> NL80211_CHAN_HT40MINUS
- *      Others              -> NL80211_CHAN_HT20
- */
-static enum nl80211_channel_type
-mwifiex_channels_to_cfg80211_channel_type(int channel_type)
-{
-	switch (channel_type) {
-	case NO_SEC_CHANNEL:
-		return NL80211_CHAN_HT20;
-	case SEC_CHANNEL_ABOVE:
-		return NL80211_CHAN_HT40PLUS;
-	case SEC_CHANNEL_BELOW:
-		return NL80211_CHAN_HT40MINUS;
-	default:
-		return NL80211_CHAN_HT20;
+		return IEEE80211_HT_PARAM_CHA_SEC_NONE;
 	}
 }
 
@@ -120,10 +96,11 @@
 static int
 mwifiex_cfg80211_set_tx_power(struct wiphy *wiphy,
 			      enum nl80211_tx_power_setting type,
-			      int dbm)
+			      int mbm)
 {
 	struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
 	struct mwifiex_power_cfg power_cfg;
+	int dbm = MBM_TO_DBM(mbm);
 
 	if (type == NL80211_TX_POWER_FIXED) {
 		power_cfg.is_power_auto = 0;
@@ -330,37 +307,51 @@
 		       enum nl80211_channel_type channel_type)
 {
 	struct mwifiex_chan_freq_power cfp;
-	struct mwifiex_ds_band_cfg band_cfg;
 	u32 config_bands = 0;
 	struct wiphy *wiphy = priv->wdev->wiphy;
+	struct mwifiex_adapter *adapter = priv->adapter;
 
 	if (chan) {
-		memset(&band_cfg, 0, sizeof(band_cfg));
 		/* Set appropriate bands */
-		if (chan->band == IEEE80211_BAND_2GHZ)
-			config_bands = BAND_B | BAND_G | BAND_GN;
-		else
-			config_bands = BAND_AN | BAND_A;
-		if (priv->bss_mode == NL80211_IFTYPE_STATION
-		    || priv->bss_mode == NL80211_IFTYPE_UNSPECIFIED) {
-			band_cfg.config_bands = config_bands;
-		} else if (priv->bss_mode == NL80211_IFTYPE_ADHOC) {
-			band_cfg.config_bands = config_bands;
-			band_cfg.adhoc_start_band = config_bands;
+		if (chan->band == IEEE80211_BAND_2GHZ) {
+			if (channel_type == NL80211_CHAN_NO_HT)
+				if (priv->adapter->config_bands == BAND_B ||
+					  priv->adapter->config_bands == BAND_G)
+					config_bands =
+						priv->adapter->config_bands;
+				else
+					config_bands = BAND_B | BAND_G;
+			else
+				config_bands = BAND_B | BAND_G | BAND_GN;
+		} else {
+			if (channel_type == NL80211_CHAN_NO_HT)
+				config_bands = BAND_A;
+			else
+				config_bands = BAND_AN | BAND_A;
 		}
 
-		band_cfg.sec_chan_offset =
-			mwifiex_cfg80211_channel_type_to_mwifiex_channels
+		if (!((config_bands | adapter->fw_bands) &
+						~adapter->fw_bands)) {
+			adapter->config_bands = config_bands;
+			if (priv->bss_mode == NL80211_IFTYPE_ADHOC) {
+				adapter->adhoc_start_band = config_bands;
+				if ((config_bands & BAND_GN) ||
+						(config_bands & BAND_AN))
+					adapter->adhoc_11n_enabled = true;
+				else
+					adapter->adhoc_11n_enabled = false;
+			}
+		}
+		adapter->sec_chan_offset =
+			mwifiex_cfg80211_channel_type_to_sec_chan_offset
 			(channel_type);
-
-		if (mwifiex_set_radio_band_cfg(priv, &band_cfg))
-			return -EFAULT;
+		adapter->channel_type = channel_type;
 
 		mwifiex_send_domain_info_cmd_fw(wiphy);
 	}
 
 	wiphy_dbg(wiphy, "info: setting band %d, channel offset %d and "
-		"mode %d\n", config_bands, band_cfg.sec_chan_offset,
+		"mode %d\n", config_bands, adapter->sec_chan_offset,
 		priv->bss_mode);
 	if (!chan)
 		return 0;
@@ -696,9 +687,9 @@
 				const u8 *peer,
 				const struct cfg80211_bitrate_mask *mask)
 {
-	struct mwifiex_ds_band_cfg band_cfg;
 	struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
 	int index = 0, mode = 0, i;
+	struct mwifiex_adapter *adapter = priv->adapter;
 
 	/* Currently only 2.4GHz is supported */
 	for (i = 0; i < mwifiex_band_2ghz.n_bitrates; i++) {
@@ -720,16 +711,15 @@
 			mode |=  BAND_B;
 	}
 
-	memset(&band_cfg, 0, sizeof(band_cfg));
-	band_cfg.config_bands = mode;
-
-	if (priv->bss_mode == NL80211_IFTYPE_ADHOC)
-		band_cfg.adhoc_start_band = mode;
-
-	band_cfg.sec_chan_offset = NO_SEC_CHANNEL;
-
-	if (mwifiex_set_radio_band_cfg(priv, &band_cfg))
-		return -EFAULT;
+	if (!((mode | adapter->fw_bands) & ~adapter->fw_bands)) {
+		adapter->config_bands = mode;
+		if (priv->bss_mode == NL80211_IFTYPE_ADHOC) {
+			adapter->adhoc_start_band = mode;
+			adapter->adhoc_11n_enabled = false;
+		}
+	}
+	adapter->sec_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE;
+	adapter->channel_type = NL80211_CHAN_NO_HT;
 
 	wiphy_debug(wiphy, "info: device configured in 802.11%s%s mode\n",
 				(mode & BAND_B) ? "b" : "",
@@ -750,17 +740,13 @@
 {
 	struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
 
-	if (priv->disconnect)
-		return -EBUSY;
-
-	priv->disconnect = 1;
 	if (mwifiex_deauthenticate(priv, NULL))
 		return -EFAULT;
 
 	wiphy_dbg(wiphy, "info: successfully disconnected from %pM:"
 		" reason code %d\n", priv->cfg_bssid, reason_code);
 
-	queue_work(priv->workqueue, &priv->cfg_workqueue);
+	memset(priv->cfg_bssid, 0, ETH_ALEN);
 
 	return 0;
 }
@@ -780,6 +766,7 @@
 {
 	struct ieee80211_channel *chan;
 	struct mwifiex_bss_info bss_info;
+	struct cfg80211_bss *bss;
 	int ie_len;
 	u8 ie_buf[IEEE80211_MAX_SSID_LEN + sizeof(struct ieee_types_header)];
 	enum ieee80211_band band;
@@ -800,9 +787,10 @@
 			ieee80211_channel_to_frequency(bss_info.bss_chan,
 						       band));
 
-	cfg80211_inform_bss(priv->wdev->wiphy, chan,
+	bss = cfg80211_inform_bss(priv->wdev->wiphy, chan,
 		bss_info.bssid, 0, WLAN_CAPABILITY_IBSS,
 		0, ie_buf, ie_len, 0, GFP_KERNEL);
+	cfg80211_put_bss(bss);
 	memcpy(priv->cfg_bssid, bss_info.bssid, ETH_ALEN);
 
 	return 0;
@@ -851,8 +839,7 @@
 
 	if (channel)
 		ret = mwifiex_set_rf_channel(priv, channel,
-				mwifiex_channels_to_cfg80211_channel_type
-				(priv->adapter->chan_offset));
+						priv->adapter->channel_type);
 
 	ret = mwifiex_set_encode(priv, NULL, 0, 0, 1);	/* Disable keys */
 
@@ -978,27 +965,32 @@
 	struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
 	int ret = 0;
 
-	if (priv->assoc_request)
-		return -EBUSY;
-
 	if (priv->bss_mode == NL80211_IFTYPE_ADHOC) {
 		wiphy_err(wiphy, "received infra assoc request "
 				"when station is in ibss mode\n");
 		goto done;
 	}
 
-	priv->assoc_request = -EINPROGRESS;
-
 	wiphy_dbg(wiphy, "info: Trying to associate to %s and bssid %pM\n",
 	       (char *) sme->ssid, sme->bssid);
 
 	ret = mwifiex_cfg80211_assoc(priv, sme->ssid_len, sme->ssid, sme->bssid,
 				     priv->bss_mode, sme->channel, sme, 0);
-
-	priv->assoc_request = 1;
 done:
-	priv->assoc_result = ret;
-	queue_work(priv->workqueue, &priv->cfg_workqueue);
+	if (!ret) {
+		cfg80211_connect_result(priv->netdev, priv->cfg_bssid, NULL, 0,
+					NULL, 0, WLAN_STATUS_SUCCESS,
+					GFP_KERNEL);
+		dev_dbg(priv->adapter->dev,
+			"info: associated to bssid %pM successfully\n",
+			priv->cfg_bssid);
+	} else {
+		dev_dbg(priv->adapter->dev,
+			"info: association to bssid %pM failed\n",
+			priv->cfg_bssid);
+		memset(priv->cfg_bssid, 0, ETH_ALEN);
+	}
+
 	return ret;
 }
 
@@ -1015,28 +1007,29 @@
 	struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
 	int ret = 0;
 
-	if (priv->ibss_join_request)
-		return -EBUSY;
-
 	if (priv->bss_mode != NL80211_IFTYPE_ADHOC) {
 		wiphy_err(wiphy, "request to join ibss received "
 				"when station is not in ibss mode\n");
 		goto done;
 	}
 
-	priv->ibss_join_request = -EINPROGRESS;
-
 	wiphy_dbg(wiphy, "info: trying to join to %s and bssid %pM\n",
 	       (char *) params->ssid, params->bssid);
 
 	ret = mwifiex_cfg80211_assoc(priv, params->ssid_len, params->ssid,
 				params->bssid, priv->bss_mode,
 				params->channel, NULL, params->privacy);
-
-	priv->ibss_join_request = 1;
 done:
-	priv->ibss_join_result = ret;
-	queue_work(priv->workqueue, &priv->cfg_workqueue);
+	if (!ret) {
+		cfg80211_ibss_joined(priv->netdev, priv->cfg_bssid, GFP_KERNEL);
+		dev_dbg(priv->adapter->dev,
+			"info: joined/created adhoc network with bssid"
+			" %pM successfully\n", priv->cfg_bssid);
+	} else {
+		dev_dbg(priv->adapter->dev,
+			"info: failed creating/joining adhoc network\n");
+	}
+
 	return ret;
 }
 
@@ -1051,17 +1044,12 @@
 {
 	struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
 
-	if (priv->disconnect)
-		return -EBUSY;
-
-	priv->disconnect = 1;
-
 	wiphy_dbg(wiphy, "info: disconnecting from essid %pM\n",
 			priv->cfg_bssid);
 	if (mwifiex_deauthenticate(priv, NULL))
 		return -EFAULT;
 
-	queue_work(priv->workqueue, &priv->cfg_workqueue);
+	memset(priv->cfg_bssid, 0, ETH_ALEN);
 
 	return 0;
 }
@@ -1078,15 +1066,42 @@
 		      struct cfg80211_scan_request *request)
 {
 	struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+	int i;
+	struct ieee80211_channel *chan;
 
 	wiphy_dbg(wiphy, "info: received scan request on %s\n", dev->name);
 
-	if (priv->scan_request && priv->scan_request != request)
-		return -EBUSY;
-
 	priv->scan_request = request;
 
-	queue_work(priv->workqueue, &priv->cfg_workqueue);
+	priv->user_scan_cfg = kzalloc(sizeof(struct mwifiex_user_scan_cfg),
+					GFP_KERNEL);
+	if (!priv->user_scan_cfg) {
+		dev_err(priv->adapter->dev, "failed to alloc scan_req\n");
+		return -ENOMEM;
+	}
+	for (i = 0; i < request->n_ssids; i++) {
+		memcpy(priv->user_scan_cfg->ssid_list[i].ssid,
+			request->ssids[i].ssid, request->ssids[i].ssid_len);
+		priv->user_scan_cfg->ssid_list[i].max_len =
+			request->ssids[i].ssid_len;
+	}
+	for (i = 0; i < request->n_channels; i++) {
+		chan = request->channels[i];
+		priv->user_scan_cfg->chan_list[i].chan_number = chan->hw_value;
+		priv->user_scan_cfg->chan_list[i].radio_type = chan->band;
+
+		if (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)
+			priv->user_scan_cfg->chan_list[i].scan_type =
+				MWIFIEX_SCAN_TYPE_PASSIVE;
+		else
+			priv->user_scan_cfg->chan_list[i].scan_type =
+				MWIFIEX_SCAN_TYPE_ACTIVE;
+
+		priv->user_scan_cfg->chan_list[i].scan_time = 0;
+	}
+	if (mwifiex_set_user_scan_ioctl(priv, priv->user_scan_cfg))
+		return -EFAULT;
+
 	return 0;
 }
 
@@ -1292,10 +1307,6 @@
 
 	priv->media_connected = false;
 
-	cancel_work_sync(&priv->cfg_workqueue);
-	flush_workqueue(priv->workqueue);
-	destroy_workqueue(priv->workqueue);
-
 	priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
 
 	return 0;
@@ -1373,9 +1384,6 @@
 	memcpy(wdev->wiphy->perm_addr, priv->curr_addr, ETH_ALEN);
 	wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
 
-	/* We are using custom domains */
-	wdev->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
-
 	/* Reserve space for bss band information */
 	wdev->wiphy->bss_priv_size = sizeof(u8);
 
@@ -1404,100 +1412,3 @@
 
 	return ret;
 }
-
-/*
- * This function handles the result of different pending network operations.
- *
- * The following operations are handled and CFG802.11 subsystem is
- * notified accordingly -
- *      - Scan request completion
- *      - Association request completion
- *      - IBSS join request completion
- *      - Disconnect request completion
- */
-void
-mwifiex_cfg80211_results(struct work_struct *work)
-{
-	struct mwifiex_private *priv =
-		container_of(work, struct mwifiex_private, cfg_workqueue);
-	struct mwifiex_user_scan_cfg *scan_req;
-	int ret = 0, i;
-	struct ieee80211_channel *chan;
-
-	if (priv->scan_request) {
-		scan_req = kzalloc(sizeof(struct mwifiex_user_scan_cfg),
-				   GFP_KERNEL);
-		if (!scan_req) {
-			dev_err(priv->adapter->dev, "failed to alloc "
-						    "scan_req\n");
-			return;
-		}
-		for (i = 0; i < priv->scan_request->n_ssids; i++) {
-			memcpy(scan_req->ssid_list[i].ssid,
-					priv->scan_request->ssids[i].ssid,
-					priv->scan_request->ssids[i].ssid_len);
-			scan_req->ssid_list[i].max_len =
-					priv->scan_request->ssids[i].ssid_len;
-		}
-		for (i = 0; i < priv->scan_request->n_channels; i++) {
-			chan = priv->scan_request->channels[i];
-			scan_req->chan_list[i].chan_number = chan->hw_value;
-			scan_req->chan_list[i].radio_type = chan->band;
-			if (chan->flags & IEEE80211_CHAN_DISABLED)
-				scan_req->chan_list[i].scan_type =
-					MWIFIEX_SCAN_TYPE_PASSIVE;
-			else
-				scan_req->chan_list[i].scan_type =
-					MWIFIEX_SCAN_TYPE_ACTIVE;
-			scan_req->chan_list[i].scan_time = 0;
-		}
-		if (mwifiex_set_user_scan_ioctl(priv, scan_req))
-			ret = -EFAULT;
-		priv->scan_result_status = ret;
-		dev_dbg(priv->adapter->dev, "info: %s: sending scan results\n",
-							__func__);
-		cfg80211_scan_done(priv->scan_request,
-				(priv->scan_result_status < 0));
-		priv->scan_request = NULL;
-		kfree(scan_req);
-	}
-
-	if (priv->assoc_request == 1) {
-		if (!priv->assoc_result) {
-			cfg80211_connect_result(priv->netdev, priv->cfg_bssid,
-						NULL, 0, NULL, 0,
-						WLAN_STATUS_SUCCESS,
-						GFP_KERNEL);
-			dev_dbg(priv->adapter->dev,
-				"info: associated to bssid %pM successfully\n",
-			       priv->cfg_bssid);
-		} else {
-			dev_dbg(priv->adapter->dev,
-				"info: association to bssid %pM failed\n",
-			       priv->cfg_bssid);
-			memset(priv->cfg_bssid, 0, ETH_ALEN);
-		}
-		priv->assoc_request = 0;
-		priv->assoc_result = 0;
-	}
-
-	if (priv->ibss_join_request == 1) {
-		if (!priv->ibss_join_result) {
-			cfg80211_ibss_joined(priv->netdev, priv->cfg_bssid,
-					     GFP_KERNEL);
-			dev_dbg(priv->adapter->dev,
-				"info: joined/created adhoc network with bssid"
-					" %pM successfully\n", priv->cfg_bssid);
-		} else {
-			dev_dbg(priv->adapter->dev,
-				"info: failed creating/joining adhoc network\n");
-		}
-		priv->ibss_join_request = 0;
-		priv->ibss_join_result = 0;
-	}
-
-	if (priv->disconnect) {
-		memset(priv->cfg_bssid, 0, ETH_ALEN);
-		priv->disconnect = 0;
-	}
-}
diff --git a/drivers/net/wireless/mwifiex/cfg80211.h b/drivers/net/wireless/mwifiex/cfg80211.h
index 8d010f2..76c76c6 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.h
+++ b/drivers/net/wireless/mwifiex/cfg80211.h
@@ -26,5 +26,4 @@
 
 int mwifiex_register_cfg80211(struct mwifiex_private *);
 
-void mwifiex_cfg80211_results(struct work_struct *work);
 #endif
diff --git a/drivers/net/wireless/mwifiex/cfp.c b/drivers/net/wireless/mwifiex/cfp.c
index f2e6de0..1782a77 100644
--- a/drivers/net/wireless/mwifiex/cfp.c
+++ b/drivers/net/wireless/mwifiex/cfp.c
@@ -75,18 +75,32 @@
  * This function maps an index in supported rates table into
  * the corresponding data rate.
  */
-u32 mwifiex_index_to_data_rate(u8 index, u8 ht_info)
+u32 mwifiex_index_to_data_rate(struct mwifiex_private *priv, u8 index,
+							u8 ht_info)
 {
-	u16 mcs_rate[4][8] = {
-		{0x1b, 0x36, 0x51, 0x6c, 0xa2, 0xd8, 0xf3, 0x10e}
-	,			/* LG 40M */
-	{0x1e, 0x3c, 0x5a, 0x78, 0xb4, 0xf0, 0x10e, 0x12c}
-	,			/* SG 40M */
-	{0x0d, 0x1a, 0x27, 0x34, 0x4e, 0x68, 0x75, 0x82}
-	,			/* LG 20M */
-	{0x0e, 0x1c, 0x2b, 0x39, 0x56, 0x73, 0x82, 0x90}
-	};			/* SG 20M */
+	/*
+	 * For every mcs_rate line, the first 8 bytes are for stream 1x1,
+	 * and all 16 bytes are for stream 2x2.
+	 */
+	u16  mcs_rate[4][16] = {
+		/* LGI 40M */
+		{ 0x1b, 0x36, 0x51, 0x6c, 0xa2, 0xd8, 0xf3, 0x10e,
+		  0x36, 0x6c, 0xa2, 0xd8, 0x144, 0x1b0, 0x1e6, 0x21c },
 
+		/* SGI 40M */
+		{ 0x1e, 0x3c, 0x5a, 0x78, 0xb4, 0xf0, 0x10e, 0x12c,
+		  0x3c, 0x78, 0xb4, 0xf0, 0x168, 0x1e0, 0x21c, 0x258 },
+
+		/* LGI 20M */
+		{ 0x0d, 0x1a, 0x27, 0x34, 0x4e, 0x68, 0x75, 0x82,
+		  0x1a, 0x34, 0x4e, 0x68, 0x9c, 0xd0, 0xea, 0x104 },
+
+		/* SGI 20M */
+		{ 0x0e, 0x1c, 0x2b, 0x39, 0x56, 0x73, 0x82, 0x90,
+		  0x1c, 0x39, 0x56, 0x73, 0xad, 0xe7, 0x104, 0x120 }
+	};
+	u32 mcs_num_supp =
+		(priv->adapter->hw_dev_mcs_support == HT_STREAM_2X2) ? 16 : 8;
 	u32 rate;
 
 	if (ht_info & BIT(0)) {
@@ -95,7 +109,7 @@
 				rate = 0x0D;	/* MCS 32 SGI rate */
 			else
 				rate = 0x0C;	/* MCS 32 LGI rate */
-		} else if (index < 8) {
+		} else if (index < mcs_num_supp) {
 			if (ht_info & BIT(1)) {
 				if (ht_info & BIT(2))
 					/* SGI, 40M */
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index ac27815..6e0a3ea 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -939,7 +939,6 @@
 {
 	struct cmd_ctrl_node *cmd_node = NULL, *tmp_node = NULL;
 	unsigned long cmd_flags;
-	unsigned long cmd_pending_q_flags;
 	unsigned long scan_pending_q_flags;
 	uint16_t cancel_scan_cmd = false;
 
@@ -949,12 +948,9 @@
 		cmd_node = adapter->curr_cmd;
 		cmd_node->wait_q_enabled = false;
 		cmd_node->cmd_flag |= CMD_F_CANCELED;
-		spin_lock_irqsave(&adapter->cmd_pending_q_lock,
-				  cmd_pending_q_flags);
-		list_del(&cmd_node->list);
-		spin_unlock_irqrestore(&adapter->cmd_pending_q_lock,
-				       cmd_pending_q_flags);
 		mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
+		mwifiex_complete_cmd(adapter, adapter->curr_cmd);
+		adapter->curr_cmd = NULL;
 		spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
 	}
 
@@ -981,7 +977,6 @@
 		spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
 	}
 	adapter->cmd_wait_q.status = -1;
-	mwifiex_complete_cmd(adapter, adapter->curr_cmd);
 }
 
 /*
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index 0cc5d73..51c5417 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -165,6 +165,7 @@
 
 #define GET_RXMCSSUPP(DevMCSSupported) (DevMCSSupported & 0x0f)
 #define SETHT_MCS32(x) (x[4] |= 1)
+#define HT_STREAM_2X2	0x22
 
 #define SET_SECONDARYCHAN(RadioType, SECCHAN) (RadioType |= (SECCHAN << 4))
 
@@ -375,8 +376,6 @@
 	MWIFIEX_DISABLE_CHAN_FILT = BIT(1),
 };
 
-#define SECOND_CHANNEL_BELOW    0x30
-#define SECOND_CHANNEL_ABOVE    0x10
 struct mwifiex_chan_scan_param_set {
 	u8 radio_type;
 	u8 chan_number;
@@ -673,7 +672,7 @@
 	union ieee_types_phy_param_set phy_param_set;
 	u16 reserved1;
 	__le16 cap_info_bitmap;
-	u8 DataRate[HOSTCMD_SUPPORTED_RATES];
+	u8 data_rate[HOSTCMD_SUPPORTED_RATES];
 } __packed;
 
 struct host_cmd_ds_802_11_ad_hoc_result {
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
index d792b3f..e05b417 100644
--- a/drivers/net/wireless/mwifiex/init.c
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -187,8 +187,6 @@
 	struct mwifiex_opt_sleep_confirm *sleep_cfm_buf = NULL;
 
 	skb_put(adapter->sleep_cfm, sizeof(struct mwifiex_opt_sleep_confirm));
-	sleep_cfm_buf = (struct mwifiex_opt_sleep_confirm *)
-						(adapter->sleep_cfm->data);
 
 	adapter->cmd_sent = false;
 
@@ -248,12 +246,14 @@
 	memset(adapter->event_body, 0, sizeof(adapter->event_body));
 	adapter->hw_dot_11n_dev_cap = 0;
 	adapter->hw_dev_mcs_support = 0;
-	adapter->chan_offset = 0;
+	adapter->sec_chan_offset = 0;
 	adapter->adhoc_11n_enabled = false;
 
 	mwifiex_wmm_init(adapter);
 
 	if (adapter->sleep_cfm) {
+		sleep_cfm_buf = (struct mwifiex_opt_sleep_confirm *)
+						adapter->sleep_cfm->data;
 		memset(sleep_cfm_buf, 0, adapter->sleep_cfm->len);
 		sleep_cfm_buf->command =
 				cpu_to_le16(HostCmd_CMD_802_11_PS_MODE_ENH);
@@ -283,6 +283,45 @@
 }
 
 /*
+ * This function sets trans_start per tx_queue
+ */
+void mwifiex_set_trans_start(struct net_device *dev)
+{
+	int i;
+
+	for (i = 0; i < dev->num_tx_queues; i++)
+		netdev_get_tx_queue(dev, i)->trans_start = jiffies;
+
+	dev->trans_start = jiffies;
+}
+
+/*
+ * This function wakes up all queues in net_device
+ */
+void mwifiex_wake_up_net_dev_queue(struct net_device *netdev,
+					struct mwifiex_adapter *adapter)
+{
+	unsigned long dev_queue_flags;
+
+	spin_lock_irqsave(&adapter->queue_lock, dev_queue_flags);
+	netif_tx_wake_all_queues(netdev);
+	spin_unlock_irqrestore(&adapter->queue_lock, dev_queue_flags);
+}
+
+/*
+ * This function stops all queues in net_device
+ */
+void mwifiex_stop_net_dev_queue(struct net_device *netdev,
+					struct mwifiex_adapter *adapter)
+{
+	unsigned long dev_queue_flags;
+
+	spin_lock_irqsave(&adapter->queue_lock, dev_queue_flags);
+	netif_tx_stop_all_queues(netdev);
+	spin_unlock_irqrestore(&adapter->queue_lock, dev_queue_flags);
+}
+
+/*
  *  This function releases the lock variables and frees the locks and
  *  associated locks.
  */
@@ -359,6 +398,7 @@
 	spin_lock_init(&adapter->int_lock);
 	spin_lock_init(&adapter->main_proc_lock);
 	spin_lock_init(&adapter->mwifiex_cmd_lock);
+	spin_lock_init(&adapter->queue_lock);
 	for (i = 0; i < adapter->priv_num; i++) {
 		if (adapter->priv[i]) {
 			priv = adapter->priv[i];
diff --git a/drivers/net/wireless/mwifiex/ioctl.h b/drivers/net/wireless/mwifiex/ioctl.h
index e0b68e7..d5d81f1 100644
--- a/drivers/net/wireless/mwifiex/ioctl.h
+++ b/drivers/net/wireless/mwifiex/ioctl.h
@@ -62,17 +62,6 @@
 	BAND_AN = 16,
 };
 
-#define NO_SEC_CHANNEL               0
-#define SEC_CHANNEL_ABOVE            1
-#define SEC_CHANNEL_BELOW            3
-
-struct mwifiex_ds_band_cfg {
-	u32 config_bands;
-	u32 adhoc_start_band;
-	u32 adhoc_channel;
-	u32 sec_chan_offset;
-};
-
 enum {
 	ADHOC_IDLE,
 	ADHOC_STARTED,
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
index 62b4c29..0b0eb5e 100644
--- a/drivers/net/wireless/mwifiex/join.c
+++ b/drivers/net/wireless/mwifiex/join.c
@@ -724,8 +724,8 @@
 	u32 cmd_append_size = 0;
 	u32 i;
 	u16 tmp_cap;
-	uint16_t ht_cap_info;
 	struct mwifiex_ie_types_chan_list_param_set *chan_tlv;
+	u8 radio_type;
 
 	struct mwifiex_ie_types_htcap *ht_cap;
 	struct mwifiex_ie_types_htinfo *ht_info;
@@ -837,8 +837,8 @@
 		bss_desc->privacy = MWIFIEX_802_11_PRIV_FILTER_ACCEPT_ALL;
 	}
 
-	memset(adhoc_start->DataRate, 0, sizeof(adhoc_start->DataRate));
-	mwifiex_get_active_data_rates(priv, adhoc_start->DataRate);
+	memset(adhoc_start->data_rate, 0, sizeof(adhoc_start->data_rate));
+	mwifiex_get_active_data_rates(priv, adhoc_start->data_rate);
 	if ((adapter->adhoc_start_band & BAND_G) &&
 	    (priv->curr_pkt_filter & HostCmd_ACT_MAC_ADHOC_G_PROTECTION_ON)) {
 		if (mwifiex_send_cmd_async(priv, HostCmd_CMD_MAC_CONTROL,
@@ -850,20 +850,19 @@
 		}
 	}
 	/* Find the last non zero */
-	for (i = 0; i < sizeof(adhoc_start->DataRate) &&
-			adhoc_start->DataRate[i];
-			i++)
-			;
+	for (i = 0; i < sizeof(adhoc_start->data_rate); i++)
+		if (!adhoc_start->data_rate[i])
+			break;
 
 	priv->curr_bss_params.num_of_rates = i;
 
 	/* Copy the ad-hoc creating rates into Current BSS rate structure */
 	memcpy(&priv->curr_bss_params.data_rates,
-	       &adhoc_start->DataRate, priv->curr_bss_params.num_of_rates);
+	       &adhoc_start->data_rate, priv->curr_bss_params.num_of_rates);
 
 	dev_dbg(adapter->dev, "info: ADHOC_S_CMD: rates=%02x %02x %02x %02x\n",
-	       adhoc_start->DataRate[0], adhoc_start->DataRate[1],
-	       adhoc_start->DataRate[2], adhoc_start->DataRate[3]);
+	       adhoc_start->data_rate[0], adhoc_start->data_rate[1],
+	       adhoc_start->data_rate[2], adhoc_start->data_rate[3]);
 
 	dev_dbg(adapter->dev, "info: ADHOC_S_CMD: AD-HOC Start command is ready\n");
 
@@ -886,12 +885,14 @@
 		       = mwifiex_band_to_radio_type(priv->curr_bss_params.band);
 		if (adapter->adhoc_start_band & BAND_GN
 		    || adapter->adhoc_start_band & BAND_AN) {
-			if (adapter->chan_offset == SEC_CHANNEL_ABOVE)
+			if (adapter->sec_chan_offset ==
+					    IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
 				chan_tlv->chan_scan_param[0].radio_type |=
-					SECOND_CHANNEL_ABOVE;
-			else if (adapter->chan_offset == SEC_CHANNEL_BELOW)
+					(IEEE80211_HT_PARAM_CHA_SEC_ABOVE << 4);
+			else if (adapter->sec_chan_offset ==
+					    IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
 				chan_tlv->chan_scan_param[0].radio_type |=
-					SECOND_CHANNEL_BELOW;
+					(IEEE80211_HT_PARAM_CHA_SEC_BELOW << 4);
 		}
 		dev_dbg(adapter->dev, "info: ADHOC_S_CMD: TLV Band = %d\n",
 		       chan_tlv->chan_scan_param[0].radio_type);
@@ -914,55 +915,40 @@
 	}
 
 	if (adapter->adhoc_11n_enabled) {
-		{
-			ht_cap = (struct mwifiex_ie_types_htcap *) pos;
-			memset(ht_cap, 0,
-			       sizeof(struct mwifiex_ie_types_htcap));
-			ht_cap->header.type =
-				cpu_to_le16(WLAN_EID_HT_CAPABILITY);
-			ht_cap->header.len =
-			       cpu_to_le16(sizeof(struct ieee80211_ht_cap));
-			ht_cap_info = le16_to_cpu(ht_cap->ht_cap.cap_info);
+		/* Fill HT CAPABILITY */
+		ht_cap = (struct mwifiex_ie_types_htcap *) pos;
+		memset(ht_cap, 0, sizeof(struct mwifiex_ie_types_htcap));
+		ht_cap->header.type = cpu_to_le16(WLAN_EID_HT_CAPABILITY);
+		ht_cap->header.len =
+		       cpu_to_le16(sizeof(struct ieee80211_ht_cap));
+		radio_type = mwifiex_band_to_radio_type(
+					priv->adapter->config_bands);
+		mwifiex_fill_cap_info(priv, radio_type, ht_cap);
 
-			ht_cap_info |= IEEE80211_HT_CAP_SGI_20;
-			if (adapter->chan_offset) {
-				ht_cap_info |= IEEE80211_HT_CAP_SGI_40;
-				ht_cap_info |= IEEE80211_HT_CAP_DSSSCCK40;
-				ht_cap_info |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
-				SETHT_MCS32(ht_cap->ht_cap.mcs.rx_mask);
-			}
+		pos += sizeof(struct mwifiex_ie_types_htcap);
+		cmd_append_size +=
+			sizeof(struct mwifiex_ie_types_htcap);
 
-			ht_cap->ht_cap.ampdu_params_info
-					= IEEE80211_HT_MAX_AMPDU_64K;
-			ht_cap->ht_cap.mcs.rx_mask[0] = 0xff;
-			pos += sizeof(struct mwifiex_ie_types_htcap);
-			cmd_append_size +=
-				sizeof(struct mwifiex_ie_types_htcap);
-		}
-		{
-			ht_info = (struct mwifiex_ie_types_htinfo *) pos;
-			memset(ht_info, 0,
-			       sizeof(struct mwifiex_ie_types_htinfo));
-			ht_info->header.type =
-				cpu_to_le16(WLAN_EID_HT_INFORMATION);
-			ht_info->header.len =
-				cpu_to_le16(sizeof(struct ieee80211_ht_info));
-			ht_info->ht_info.control_chan =
-				(u8) priv->curr_bss_params.bss_descriptor.
-				channel;
-			if (adapter->chan_offset) {
-				ht_info->ht_info.ht_param =
-					adapter->chan_offset;
-				ht_info->ht_info.ht_param |=
+		/* Fill HT INFORMATION */
+		ht_info = (struct mwifiex_ie_types_htinfo *) pos;
+		memset(ht_info, 0, sizeof(struct mwifiex_ie_types_htinfo));
+		ht_info->header.type = cpu_to_le16(WLAN_EID_HT_INFORMATION);
+		ht_info->header.len =
+			cpu_to_le16(sizeof(struct ieee80211_ht_info));
+
+		ht_info->ht_info.control_chan =
+			(u8) priv->curr_bss_params.bss_descriptor.channel;
+		if (adapter->sec_chan_offset) {
+			ht_info->ht_info.ht_param = adapter->sec_chan_offset;
+			ht_info->ht_info.ht_param |=
 					IEEE80211_HT_PARAM_CHAN_WIDTH_ANY;
-			}
-			ht_info->ht_info.operation_mode =
-			     cpu_to_le16(IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
-			ht_info->ht_info.basic_set[0] = 0xff;
-			pos += sizeof(struct mwifiex_ie_types_htinfo);
-			cmd_append_size +=
-				sizeof(struct mwifiex_ie_types_htinfo);
 		}
+		ht_info->ht_info.operation_mode =
+		     cpu_to_le16(IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
+		ht_info->ht_info.basic_set[0] = 0xff;
+		pos += sizeof(struct mwifiex_ie_types_htinfo);
+		cmd_append_size +=
+			sizeof(struct mwifiex_ie_types_htinfo);
 	}
 
 	cmd->size = cpu_to_le16((u16)
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 67e6db7..84be196 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -401,7 +401,7 @@
 static int
 mwifiex_open(struct net_device *dev)
 {
-	netif_start_queue(dev);
+	netif_tx_start_all_queues(dev);
 	return 0;
 }
 
@@ -465,8 +465,8 @@
 	atomic_inc(&priv->adapter->tx_pending);
 
 	if (atomic_read(&priv->adapter->tx_pending) >= MAX_TX_PENDING) {
-		netif_stop_queue(priv->netdev);
-		dev->trans_start = jiffies;
+		mwifiex_set_trans_start(dev);
+		mwifiex_stop_net_dev_queue(priv->netdev, priv->adapter);
 	}
 
 	queue_work(priv->adapter->workqueue, &priv->adapter->main_work);
@@ -533,7 +533,7 @@
 
 	dev_err(priv->adapter->dev, "%lu : Tx timeout, bss_index=%d\n",
 				jiffies, priv->bss_index);
-	dev->trans_start = jiffies;
+	mwifiex_set_trans_start(dev);
 	priv->num_tx_timeout++;
 }
 
@@ -586,8 +586,6 @@
 	priv->media_connected = false;
 	memset(&priv->nick_name, 0, sizeof(priv->nick_name));
 	priv->num_tx_timeout = 0;
-	priv->workqueue = create_singlethread_workqueue("cfg80211_wq");
-	INIT_WORK(&priv->cfg_workqueue, mwifiex_cfg80211_results);
 	memcpy(dev->dev_addr, priv->curr_addr, ETH_ALEN);
 }
 
@@ -793,7 +791,8 @@
 		priv = adapter->priv[i];
 		if (priv && priv->netdev) {
 			if (!netif_queue_stopped(priv->netdev))
-				netif_stop_queue(priv->netdev);
+				mwifiex_stop_net_dev_queue(priv->netdev,
+								adapter);
 			if (netif_carrier_ok(priv->netdev))
 				netif_carrier_off(priv->netdev);
 		}
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index 30f138b..3186aa4 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -453,15 +453,8 @@
 	u8 scan_pending_on_block;
 	u8 report_scan_result;
 	struct cfg80211_scan_request *scan_request;
-	int scan_result_status;
-	int assoc_request;
-	u16 assoc_result;
-	int ibss_join_request;
-	u16 ibss_join_result;
-	bool disconnect;
+	struct mwifiex_user_scan_cfg *user_scan_cfg;
 	u8 cfg_bssid[6];
-	struct workqueue_struct *workqueue;
-	struct work_struct cfg_workqueue;
 	u8 country_code[IEEE80211_COUNTRY_STRING_LEN];
 	struct wps wps;
 	u8 scan_block;
@@ -647,7 +640,8 @@
 	u32 hw_dot_11n_dev_cap;
 	u8 hw_dev_mcs_support;
 	u8 adhoc_11n_enabled;
-	u8 chan_offset;
+	u8 sec_chan_offset;
+	enum nl80211_channel_type channel_type;
 	struct mwifiex_dbg dbg;
 	u8 arp_filter[ARP_FILTER_MAX_BUF_SIZE];
 	u32 arp_filter_size;
@@ -655,10 +649,19 @@
 	struct mwifiex_wait_queue cmd_wait_q;
 	u8 scan_wait_q_woken;
 	struct cmd_ctrl_node *cmd_queued;
+	spinlock_t queue_lock;		/* lock for tx queues */
 };
 
 int mwifiex_init_lock_list(struct mwifiex_adapter *adapter);
 
+void mwifiex_set_trans_start(struct net_device *dev);
+
+void mwifiex_stop_net_dev_queue(struct net_device *netdev,
+		struct mwifiex_adapter *adapter);
+
+void mwifiex_wake_up_net_dev_queue(struct net_device *netdev,
+		struct mwifiex_adapter *adapter);
+
 int mwifiex_init_fw(struct mwifiex_adapter *adapter);
 
 int mwifiex_init_fw_complete(struct mwifiex_adapter *adapter);
@@ -775,7 +778,8 @@
 struct mwifiex_chan_freq_power *mwifiex_get_cfp_by_band_and_freq_from_cfg80211(
 						struct mwifiex_private *priv,
 						u8 band, u32 freq);
-u32 mwifiex_index_to_data_rate(u8 index, u8 ht_info);
+u32 mwifiex_index_to_data_rate(struct mwifiex_private *priv, u8 index,
+							u8 ht_info);
 u32 mwifiex_find_freq_from_band_chan(u8, u8);
 int mwifiex_cmd_append_vsie_tlv(struct mwifiex_private *priv, u16 vsie_mask,
 				u8 **buffer);
@@ -951,8 +955,6 @@
 
 int mwifiex_bss_set_channel(struct mwifiex_private *,
 			    struct mwifiex_chan_freq_power *cfp);
-int mwifiex_set_radio_band_cfg(struct mwifiex_private *,
-			 struct mwifiex_ds_band_cfg *);
 int mwifiex_get_bss_info(struct mwifiex_private *,
 			 struct mwifiex_bss_info *);
 int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv,
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
index d34acf0..4053509 100644
--- a/drivers/net/wireless/mwifiex/pcie.c
+++ b/drivers/net/wireless/mwifiex/pcie.c
@@ -386,8 +386,7 @@
 	card->txbd_ring_vbase = kzalloc(card->txbd_ring_size, GFP_KERNEL);
 	if (!card->txbd_ring_vbase) {
 		dev_err(adapter->dev, "Unable to allocate buffer for txbd ring.\n");
-		kfree(card->txbd_ring_vbase);
-		return -1;
+		return -ENOMEM;
 	}
 	card->txbd_ring_pbase = virt_to_phys(card->txbd_ring_vbase);
 
@@ -477,7 +476,7 @@
 	if (!card->rxbd_ring_vbase) {
 		dev_err(adapter->dev, "Unable to allocate buffer for "
 				"rxbd_ring.\n");
-		return -1;
+		return -ENOMEM;
 	}
 	card->rxbd_ring_pbase = virt_to_phys(card->rxbd_ring_vbase);
 
@@ -570,7 +569,7 @@
 	if (!card->evtbd_ring_vbase) {
 		dev_err(adapter->dev, "Unable to allocate buffer. "
 				"Terminating download\n");
-		return -1;
+		return -ENOMEM;
 	}
 	card->evtbd_ring_pbase = virt_to_phys(card->evtbd_ring_vbase);
 
@@ -1229,15 +1228,16 @@
 	if (!skb)
 		return 0;
 
-	if (rdptr >= MWIFIEX_MAX_EVT_BD)
+	if (rdptr >= MWIFIEX_MAX_EVT_BD) {
 		dev_err(adapter->dev, "event_complete: Invalid rdptr 0x%x\n",
 					rdptr);
+		return -EINVAL;
+	}
 
 	/* Read the event ring write pointer set by firmware */
 	if (mwifiex_read_reg(adapter, REG_EVTBD_WRPTR, &wrptr)) {
 		dev_err(adapter->dev, "event_complete: failed to read REG_EVTBD_WRPTR\n");
-		ret = -1;
-		goto done;
+		return -1;
 	}
 
 	if (!card->evt_buf_list[rdptr]) {
@@ -1266,15 +1266,9 @@
 	/* Write the event ring read pointer in to REG_EVTBD_RDPTR */
 	if (mwifiex_write_reg(adapter, REG_EVTBD_RDPTR, card->evtbd_rdptr)) {
 		dev_err(adapter->dev, "event_complete: failed to read REG_EVTBD_RDPTR\n");
-		ret = -1;
-		goto done;
+		return -1;
 	}
 
-done:
-	/* Free the buffer for failure case */
-	if (ret && skb)
-		dev_kfree_skb_any(skb);
-
 	dev_dbg(adapter->dev, "info: Check Events Again\n");
 	ret = mwifiex_pcie_process_event_ready(adapter);
 
@@ -1672,9 +1666,8 @@
 				     struct sk_buff *skb,
 				     struct mwifiex_tx_param *tx_param)
 {
-	if (!adapter || !skb) {
-		dev_err(adapter->dev, "Invalid parameter in %s <%p, %p>\n",
-				__func__, adapter, skb);
+	if (!skb) {
+		dev_err(adapter->dev, "Passed NULL skb to %s\n", __func__);
 		return -1;
 	}
 
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index 8d3ab37..6396d33 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -500,7 +500,6 @@
 	struct ieee80211_channel *ch;
 	struct mwifiex_adapter *adapter = priv->adapter;
 	int chan_idx = 0, i;
-	u8 scan_type;
 
 	for (band = 0; (band < IEEE80211_NUM_BANDS) ; band++) {
 
@@ -514,19 +513,20 @@
 			if (ch->flags & IEEE80211_CHAN_DISABLED)
 				continue;
 			scan_chan_list[chan_idx].radio_type = band;
-			scan_type = ch->flags & IEEE80211_CHAN_PASSIVE_SCAN;
+
 			if (user_scan_in &&
 				user_scan_in->chan_list[0].scan_time)
 				scan_chan_list[chan_idx].max_scan_time =
 					cpu_to_le16((u16) user_scan_in->
 					chan_list[0].scan_time);
-			else if (scan_type == MWIFIEX_SCAN_TYPE_PASSIVE)
+			else if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
 				scan_chan_list[chan_idx].max_scan_time =
 					cpu_to_le16(adapter->passive_scan_time);
 			else
 				scan_chan_list[chan_idx].max_scan_time =
 					cpu_to_le16(adapter->active_scan_time);
-			if (scan_type == MWIFIEX_SCAN_TYPE_PASSIVE)
+
+			if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
 				scan_chan_list[chan_idx].chan_scan_mode_bitmap
 					|= MWIFIEX_PASSIVE_SCAN;
 			else
@@ -1391,11 +1391,8 @@
 {
 	int status;
 
-	priv->adapter->scan_wait_q_woken = false;
-
 	status = mwifiex_scan_networks(priv, scan_req);
-	if (!status)
-		status = mwifiex_wait_queue_complete(priv->adapter);
+	queue_work(priv->adapter->workqueue, &priv->adapter->main_work);
 
 	return status;
 }
@@ -1537,11 +1534,6 @@
 	return 0;
 }
 
-static void mwifiex_free_bss_priv(struct cfg80211_bss *bss)
-{
-	kfree(bss->priv);
-}
-
 /*
  * This function handles the command response of scan.
  *
@@ -1767,7 +1759,7 @@
 					      cap_info_bitmap, beacon_period,
 					      ie_buf, ie_len, rssi, GFP_KERNEL);
 				*(u8 *)bss->priv = band;
-				bss->free_priv = mwifiex_free_bss_priv;
+				cfg80211_put_bss(bss);
 
 				if (priv->media_connected && !memcmp(bssid,
 					priv->curr_bss_params.bss_descriptor
@@ -1801,6 +1793,14 @@
 			up(&priv->async_sem);
 		}
 
+		if (priv->user_scan_cfg) {
+			dev_dbg(priv->adapter->dev, "info: %s: sending scan "
+							"results\n", __func__);
+			cfg80211_scan_done(priv->scan_request, 0);
+			priv->scan_request = NULL;
+			kfree(priv->user_scan_cfg);
+			priv->user_scan_cfg = NULL;
+		}
 	} else {
 		/* Get scan command from scan_pending_q and put to
 		   cmd_pending_q */
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
index 283171b..d39d845 100644
--- a/drivers/net/wireless/mwifiex/sdio.c
+++ b/drivers/net/wireless/mwifiex/sdio.c
@@ -256,10 +256,13 @@
 
 /* Device ID for SD8787 */
 #define SDIO_DEVICE_ID_MARVELL_8787   (0x9119)
+/* Device ID for SD8797 */
+#define SDIO_DEVICE_ID_MARVELL_8797   (0x9129)
 
 /* WLAN IDs */
 static const struct sdio_device_id mwifiex_ids[] = {
 	{SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8787)},
+	{SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8797)},
 	{},
 };
 
@@ -1084,7 +1087,7 @@
 					   (adapter->ioport | 0x1000 |
 					    (card->mpa_rx.ports << 4)) +
 					   card->mpa_rx.start_port, 1))
-			return -1;
+			goto error;
 
 		curr_ptr = card->mpa_rx.buf;
 
@@ -1127,12 +1130,29 @@
 		if (mwifiex_sdio_card_to_host(adapter, &pkt_type,
 					      skb->data, skb->len,
 					      adapter->ioport + port))
-			return -1;
+			goto error;
 
 		mwifiex_decode_rx_packet(adapter, skb, pkt_type);
 	}
 
 	return 0;
+
+error:
+	if (MP_RX_AGGR_IN_PROGRESS(card)) {
+		/* Multiport-aggregation transfer failed - cleanup */
+		for (pind = 0; pind < card->mpa_rx.pkt_cnt; pind++) {
+			/* copy pkt to deaggr buf */
+			skb_deaggr = card->mpa_rx.skb_arr[pind];
+			dev_kfree_skb_any(skb_deaggr);
+		}
+		MP_RX_AGGR_BUF_RESET(card);
+	}
+
+	if (f_do_rx_cur)
+		/* Single transfer pending. Free curr buff also */
+		dev_kfree_skb_any(skb);
+
+	return -1;
 }
 
 /*
@@ -1268,7 +1288,6 @@
 
 				dev_dbg(adapter->dev,
 						"info: CFG reg val =%x\n", cr);
-				dev_kfree_skb_any(skb);
 				return -1;
 			}
 		}
@@ -1573,7 +1592,16 @@
 	sdio_set_drvdata(func, card);
 
 	adapter->dev = &func->dev;
-	strcpy(adapter->fw_name, SD8787_DEFAULT_FW_NAME);
+
+	switch (func->device) {
+	case SDIO_DEVICE_ID_MARVELL_8797:
+		strcpy(adapter->fw_name, SD8797_DEFAULT_FW_NAME);
+		break;
+	case SDIO_DEVICE_ID_MARVELL_8787:
+	default:
+		strcpy(adapter->fw_name, SD8787_DEFAULT_FW_NAME);
+		break;
+	}
 
 	return 0;
 
@@ -1630,14 +1658,14 @@
 	card->mpa_tx.pkt_cnt = 0;
 	card->mpa_tx.start_port = 0;
 
-	card->mpa_tx.enabled = 0;
+	card->mpa_tx.enabled = 1;
 	card->mpa_tx.pkt_aggr_limit = SDIO_MP_AGGR_DEF_PKT_LIMIT;
 
 	card->mpa_rx.buf_len = 0;
 	card->mpa_rx.pkt_cnt = 0;
 	card->mpa_rx.start_port = 0;
 
-	card->mpa_rx.enabled = 0;
+	card->mpa_rx.enabled = 1;
 	card->mpa_rx.pkt_aggr_limit = SDIO_MP_AGGR_DEF_PKT_LIMIT;
 
 	/* Allocate buffers for SDIO MP-A */
@@ -1774,4 +1802,5 @@
 MODULE_DESCRIPTION("Marvell WiFi-Ex SDIO Driver version " SDIO_VERSION);
 MODULE_VERSION(SDIO_VERSION);
 MODULE_LICENSE("GPL v2");
-MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin");
+MODULE_FIRMWARE(SD8787_DEFAULT_FW_NAME);
+MODULE_FIRMWARE(SD8797_DEFAULT_FW_NAME);
diff --git a/drivers/net/wireless/mwifiex/sdio.h b/drivers/net/wireless/mwifiex/sdio.h
index 3f71180..a3fb322 100644
--- a/drivers/net/wireless/mwifiex/sdio.h
+++ b/drivers/net/wireless/mwifiex/sdio.h
@@ -29,6 +29,7 @@
 #include "main.h"
 
 #define SD8787_DEFAULT_FW_NAME "mrvl/sd8787_uapsta.bin"
+#define SD8797_DEFAULT_FW_NAME "mrvl/sd8797_uapsta.bin"
 
 #define BLOCK_MODE	1
 #define BYTE_MODE	0
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
index ea6518d..6e443ff 100644
--- a/drivers/net/wireless/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -748,7 +748,7 @@
 				cpu_to_le16(HostCmd_SCAN_RADIO_TYPE_A);
 
 		rf_type = le16_to_cpu(rf_chan->rf_type);
-		SET_SECONDARYCHAN(rf_type, priv->adapter->chan_offset);
+		SET_SECONDARYCHAN(rf_type, priv->adapter->sec_chan_offset);
 		rf_chan->current_channel = cpu_to_le16(*channel);
 	}
 	rf_chan->action = cpu_to_le16(cmd_action);
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index 7a16b0c..e812db8 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -508,7 +508,7 @@
 	priv->tx_htinfo = resp->params.tx_rate.ht_info;
 	if (!priv->is_data_rate_auto)
 		priv->data_rate =
-			mwifiex_index_to_data_rate(priv->tx_rate,
+			mwifiex_index_to_data_rate(priv, priv->tx_rate,
 						   priv->tx_htinfo);
 
 	return 0;
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c
index f204810..d7aa21d 100644
--- a/drivers/net/wireless/mwifiex/sta_event.c
+++ b/drivers/net/wireless/mwifiex/sta_event.c
@@ -115,18 +115,17 @@
 	if (adapter->num_cmd_timeout && adapter->curr_cmd)
 		return;
 	priv->media_connected = false;
-	if (!priv->disconnect) {
-		priv->disconnect = 1;
-		dev_dbg(adapter->dev, "info: successfully disconnected from"
-				" %pM: reason code %d\n", priv->cfg_bssid,
-				WLAN_REASON_DEAUTH_LEAVING);
-		cfg80211_disconnected(priv->netdev,
-				WLAN_REASON_DEAUTH_LEAVING, NULL, 0,
-				GFP_KERNEL);
-		queue_work(priv->workqueue, &priv->cfg_workqueue);
+	dev_dbg(adapter->dev, "info: successfully disconnected from"
+			" %pM: reason code %d\n", priv->cfg_bssid,
+			WLAN_REASON_DEAUTH_LEAVING);
+	if (priv->bss_mode == NL80211_IFTYPE_STATION) {
+		cfg80211_disconnected(priv->netdev, WLAN_REASON_DEAUTH_LEAVING,
+				      NULL, 0, GFP_KERNEL);
 	}
+	memset(priv->cfg_bssid, 0, ETH_ALEN);
+
 	if (!netif_queue_stopped(priv->netdev))
-		netif_stop_queue(priv->netdev);
+		mwifiex_stop_net_dev_queue(priv->netdev, adapter);
 	if (netif_carrier_ok(priv->netdev))
 		netif_carrier_off(priv->netdev);
 	/* Reset wireless stats signal info */
@@ -201,7 +200,7 @@
 		if (!netif_carrier_ok(priv->netdev))
 			netif_carrier_on(priv->netdev);
 		if (netif_queue_stopped(priv->netdev))
-			netif_wake_queue(priv->netdev);
+			mwifiex_wake_up_net_dev_queue(priv->netdev, adapter);
 		break;
 
 	case EVENT_DEAUTHENTICATED:
@@ -292,7 +291,7 @@
 		priv->adhoc_is_link_sensed = false;
 		mwifiex_clean_txrx(priv);
 		if (!netif_queue_stopped(priv->netdev))
-			netif_stop_queue(priv->netdev);
+			mwifiex_stop_net_dev_queue(priv->netdev, adapter);
 		if (netif_carrier_ok(priv->netdev))
 			netif_carrier_off(priv->netdev);
 		break;
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index ea4a29b..470ca75 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -55,9 +55,14 @@
 {
 	bool cancel_flag = false;
 	int status = adapter->cmd_wait_q.status;
-	struct cmd_ctrl_node *cmd_queued = adapter->cmd_queued;
+	struct cmd_ctrl_node *cmd_queued;
 
+	if (!adapter->cmd_queued)
+		return 0;
+
+	cmd_queued = adapter->cmd_queued;
 	adapter->cmd_queued = NULL;
+
 	dev_dbg(adapter->dev, "cmd pending\n");
 	atomic_inc(&adapter->cmd_pending);
 
@@ -234,7 +239,7 @@
 				      "associating...\n");
 
 		if (!netif_queue_stopped(priv->netdev))
-			netif_stop_queue(priv->netdev);
+			mwifiex_stop_net_dev_queue(priv->netdev, adapter);
 
 		/* Clear any past association response stored for
 		 * application retrieval */
@@ -265,7 +270,7 @@
 		ret = mwifiex_check_network_compatibility(priv, bss_desc);
 
 		if (!netif_queue_stopped(priv->netdev))
-			netif_stop_queue(priv->netdev);
+			mwifiex_stop_net_dev_queue(priv->netdev, adapter);
 
 		if (!ret) {
 			dev_dbg(adapter->dev, "info: network found in scan"
@@ -472,67 +477,6 @@
 }
 
 /*
- * The function sets band configurations.
- *
- * it performs extra checks to make sure the Ad-Hoc
- * band and channel are compatible. Otherwise it returns an error.
- *
- */
-int mwifiex_set_radio_band_cfg(struct mwifiex_private *priv,
-			       struct mwifiex_ds_band_cfg *radio_cfg)
-{
-	struct mwifiex_adapter *adapter = priv->adapter;
-	u8 infra_band, adhoc_band;
-	u32 adhoc_channel;
-
-	infra_band = (u8) radio_cfg->config_bands;
-	adhoc_band = (u8) radio_cfg->adhoc_start_band;
-	adhoc_channel = radio_cfg->adhoc_channel;
-
-	/* SET Infra band */
-	if ((infra_band | adapter->fw_bands) & ~adapter->fw_bands)
-		return -1;
-
-	adapter->config_bands = infra_band;
-
-	/* SET Ad-hoc Band */
-	if ((adhoc_band | adapter->fw_bands) & ~adapter->fw_bands)
-		return -1;
-
-	if (adhoc_band)
-		adapter->adhoc_start_band = adhoc_band;
-	adapter->chan_offset = (u8) radio_cfg->sec_chan_offset;
-	/*
-	 * If no adhoc_channel is supplied verify if the existing adhoc
-	 * channel compiles with new adhoc_band
-	 */
-	if (!adhoc_channel) {
-		if (!mwifiex_get_cfp_by_band_and_channel_from_cfg80211
-		     (priv, adapter->adhoc_start_band,
-		     priv->adhoc_channel)) {
-			/* Pass back the default channel */
-			radio_cfg->adhoc_channel = DEFAULT_AD_HOC_CHANNEL;
-			if ((adapter->adhoc_start_band & BAND_A)
-			    || (adapter->adhoc_start_band & BAND_AN))
-				radio_cfg->adhoc_channel =
-					DEFAULT_AD_HOC_CHANNEL_A;
-		}
-	} else {	/* Retrurn error if adhoc_band and
-			   adhoc_channel combination is invalid */
-		if (!mwifiex_get_cfp_by_band_and_channel_from_cfg80211
-		    (priv, adapter->adhoc_start_band, (u16) adhoc_channel))
-			return -1;
-		priv->adhoc_channel = (u8) adhoc_channel;
-	}
-	if ((adhoc_band & BAND_GN) || (adhoc_band & BAND_AN))
-		adapter->adhoc_11n_enabled = true;
-	else
-		adapter->adhoc_11n_enabled = false;
-
-	return 0;
-}
-
-/*
  * The function disables auto deep sleep mode.
  */
 int mwifiex_disable_auto_ds(struct mwifiex_private *priv)
@@ -832,8 +776,8 @@
 
 	if (!ret) {
 		if (rate->is_rate_auto)
-			rate->rate = mwifiex_index_to_data_rate(priv->tx_rate,
-							priv->tx_htinfo);
+			rate->rate = mwifiex_index_to_data_rate(priv,
+					priv->tx_rate, priv->tx_htinfo);
 		else
 			rate->rate = priv->data_rate;
 	} else {
diff --git a/drivers/net/wireless/mwifiex/sta_rx.c b/drivers/net/wireless/mwifiex/sta_rx.c
index 2743051..5e1ef7e 100644
--- a/drivers/net/wireless/mwifiex/sta_rx.c
+++ b/drivers/net/wireless/mwifiex/sta_rx.c
@@ -126,6 +126,9 @@
 	u16 rx_pkt_type;
 	struct mwifiex_private *priv = adapter->priv[rx_info->bss_index];
 
+	if (!priv)
+		return -1;
+
 	local_rx_pd = (struct rxpd *) (skb->data);
 	rx_pkt_type = local_rx_pd->rx_pkt_type;
 
@@ -189,12 +192,11 @@
 					     (u8) local_rx_pd->rx_pkt_type,
 					     skb);
 
-	if (ret || (rx_pkt_type == PKT_TYPE_BAR)) {
-		if (priv && (ret == -1))
-			priv->stats.rx_dropped++;
-
+	if (ret || (rx_pkt_type == PKT_TYPE_BAR))
 		dev_kfree_skb_any(skb);
-	}
+
+	if (ret)
+		priv->stats.rx_dropped++;
 
 	return ret;
 }
diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c
index a206f41..d9274a1 100644
--- a/drivers/net/wireless/mwifiex/txrx.c
+++ b/drivers/net/wireless/mwifiex/txrx.c
@@ -134,7 +134,7 @@
 	if (!priv)
 		goto done;
 
-	priv->netdev->trans_start = jiffies;
+	mwifiex_set_trans_start(priv->netdev);
 	if (!status) {
 		priv->stats.tx_packets++;
 		priv->stats.tx_bytes += skb->len;
@@ -152,7 +152,8 @@
 		if ((GET_BSS_ROLE(tpriv) == MWIFIEX_BSS_ROLE_STA)
 				&& (tpriv->media_connected)) {
 			if (netif_queue_stopped(tpriv->netdev))
-				netif_wake_queue(tpriv->netdev);
+				mwifiex_wake_up_net_dev_queue(tpriv->netdev,
+								adapter);
 		}
 	}
 done:
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 995695c..7becea3 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -28,10 +28,10 @@
 
 #define MWL8K_DESC	"Marvell TOPDOG(R) 802.11 Wireless Network Driver"
 #define MWL8K_NAME	KBUILD_MODNAME
-#define MWL8K_VERSION	"0.12"
+#define MWL8K_VERSION	"0.13"
 
 /* Module parameters */
-static unsigned ap_mode_default;
+static bool ap_mode_default;
 module_param(ap_mode_default, bool, 0);
 MODULE_PARM_DESC(ap_mode_default,
 		 "Set to 1 to make ap mode the default instead of sta mode");
@@ -198,6 +198,7 @@
 	/* firmware access */
 	struct mutex fw_mutex;
 	struct task_struct *fw_mutex_owner;
+	struct task_struct *hw_restart_owner;
 	int fw_mutex_depth;
 	struct completion *hostcmd_wait;
 
@@ -262,6 +263,10 @@
 	 */
 	struct ieee80211_tx_queue_params wmm_params[MWL8K_TX_WMM_QUEUES];
 
+	/* To perform the task of reloading the firmware */
+	struct work_struct fw_reload;
+	bool hw_restart_in_progress;
+
 	/* async firmware loading state */
 	unsigned fw_state;
 	char *fw_pref;
@@ -738,10 +743,10 @@
 
 		ready_code = ioread32(priv->regs + MWL8K_HIU_INT_CODE);
 		if (ready_code == MWL8K_FWAP_READY) {
-			priv->ap_fw = 1;
+			priv->ap_fw = true;
 			break;
 		} else if (ready_code == MWL8K_FWSTA_READY) {
-			priv->ap_fw = 0;
+			priv->ap_fw = false;
 			break;
 		}
 
@@ -1498,6 +1503,18 @@
 
 	might_sleep();
 
+	/* Since fw restart is in progress, allow only the firmware
+	 * commands from the restart code and block the other
+	 * commands since they are going to fail in any case since
+	 * the firmware has crashed
+	 */
+	if (priv->hw_restart_in_progress) {
+		if (priv->hw_restart_owner == current)
+			return 0;
+		else
+			return -EBUSY;
+	}
+
 	/*
 	 * The TX queues are stopped at this point, so this test
 	 * doesn't need to take ->tx_lock.
@@ -1541,6 +1558,8 @@
 		wiphy_err(hw->wiphy, "tx rings stuck for %d ms\n",
 			  MWL8K_TX_WAIT_TIMEOUT_MS);
 		mwl8k_dump_tx_rings(hw);
+		priv->hw_restart_in_progress = true;
+		ieee80211_queue_work(hw, &priv->fw_reload);
 
 		rc = -ETIMEDOUT;
 	}
@@ -2058,7 +2077,9 @@
 
 		rc = mwl8k_tx_wait_empty(hw);
 		if (rc) {
-			ieee80211_wake_queues(hw);
+			if (!priv->hw_restart_in_progress)
+				ieee80211_wake_queues(hw);
+
 			mutex_unlock(&priv->fw_mutex);
 
 			return rc;
@@ -2077,7 +2098,9 @@
 	struct mwl8k_priv *priv = hw->priv;
 
 	if (!--priv->fw_mutex_depth) {
-		ieee80211_wake_queues(hw);
+		if (!priv->hw_restart_in_progress)
+			ieee80211_wake_queues(hw);
+
 		priv->fw_mutex_owner = NULL;
 		mutex_unlock(&priv->fw_mutex);
 	}
@@ -4398,7 +4421,8 @@
 	struct mwl8k_priv *priv = hw->priv;
 	int i;
 
-	mwl8k_cmd_radio_disable(hw);
+	if (!priv->hw_restart_in_progress)
+		mwl8k_cmd_radio_disable(hw);
 
 	ieee80211_stop_queues(hw);
 
@@ -4499,6 +4523,16 @@
 	return 0;
 }
 
+static void mwl8k_remove_vif(struct mwl8k_priv *priv, struct mwl8k_vif *vif)
+{
+	/* Has ieee80211_restart_hw re-added the removed interfaces? */
+	if (!priv->macids_used)
+		return;
+
+	priv->macids_used &= ~(1 << vif->macid);
+	list_del(&vif->list);
+}
+
 static void mwl8k_remove_interface(struct ieee80211_hw *hw,
 				   struct ieee80211_vif *vif)
 {
@@ -4510,8 +4544,54 @@
 
 	mwl8k_cmd_set_mac_addr(hw, vif, "\x00\x00\x00\x00\x00\x00");
 
-	priv->macids_used &= ~(1 << mwl8k_vif->macid);
-	list_del(&mwl8k_vif->list);
+	mwl8k_remove_vif(priv, mwl8k_vif);
+}
+
+static void mwl8k_hw_restart_work(struct work_struct *work)
+{
+	struct mwl8k_priv *priv =
+		container_of(work, struct mwl8k_priv, fw_reload);
+	struct ieee80211_hw *hw = priv->hw;
+	struct mwl8k_device_info *di;
+	int rc;
+
+	/* If some command is waiting for a response, clear it */
+	if (priv->hostcmd_wait != NULL) {
+		complete(priv->hostcmd_wait);
+		priv->hostcmd_wait = NULL;
+	}
+
+	priv->hw_restart_owner = current;
+	di = priv->device_info;
+	mwl8k_fw_lock(hw);
+
+	if (priv->ap_fw)
+		rc = mwl8k_reload_firmware(hw, di->fw_image_ap);
+	else
+		rc = mwl8k_reload_firmware(hw, di->fw_image_sta);
+
+	if (rc)
+		goto fail;
+
+	priv->hw_restart_owner = NULL;
+	priv->hw_restart_in_progress = false;
+
+	/*
+	 * This unlock will wake up the queues and
+	 * also opens the command path for other
+	 * commands
+	 */
+	mwl8k_fw_unlock(hw);
+
+	ieee80211_restart_hw(hw);
+
+	wiphy_err(hw->wiphy, "Firmware restarted successfully\n");
+
+	return;
+fail:
+	mwl8k_fw_unlock(hw);
+
+	wiphy_err(hw->wiphy, "Firmware restart failed\n");
 }
 
 static int mwl8k_config(struct ieee80211_hw *hw, u32 changed)
@@ -5024,7 +5104,11 @@
 		for (i = 0; i < MAX_AMPDU_ATTEMPTS; i++) {
 			rc = mwl8k_check_ba(hw, stream);
 
-			if (!rc)
+			/* If HW restart is in progress mwl8k_post_cmd will
+			 * return -EBUSY. Avoid retrying mwl8k_check_ba in
+			 * such cases
+			 */
+			if (!rc || rc == -EBUSY)
 				break;
 			/*
 			 * HW queues take time to be flushed, give them
@@ -5044,14 +5128,14 @@
 		ieee80211_start_tx_ba_cb_irqsafe(vif, addr, tid);
 		break;
 	case IEEE80211_AMPDU_TX_STOP:
-		if (stream == NULL)
-			break;
-		if (stream->state == AMPDU_STREAM_ACTIVE) {
-			spin_unlock(&priv->stream_lock);
-			mwl8k_destroy_ba(hw, stream);
-			spin_lock(&priv->stream_lock);
+		if (stream) {
+			if (stream->state == AMPDU_STREAM_ACTIVE) {
+				spin_unlock(&priv->stream_lock);
+				mwl8k_destroy_ba(hw, stream);
+				spin_lock(&priv->stream_lock);
+			}
+			mwl8k_remove_stream(hw, stream);
 		}
-		mwl8k_remove_stream(hw, stream);
 		ieee80211_stop_tx_ba_cb_irqsafe(vif, addr, tid);
 		break;
 	case IEEE80211_AMPDU_TX_OPERATIONAL:
@@ -5263,12 +5347,15 @@
 	mwl8k_release_firmware(priv);
 }
 
+#define MAX_RESTART_ATTEMPTS 1
 static int mwl8k_init_firmware(struct ieee80211_hw *hw, char *fw_image,
 			       bool nowait)
 {
 	struct mwl8k_priv *priv = hw->priv;
 	int rc;
+	int count = MAX_RESTART_ATTEMPTS;
 
+retry:
 	/* Reset firmware and hardware */
 	mwl8k_hw_reset(priv);
 
@@ -5290,6 +5377,16 @@
 	/* Reclaim memory once firmware is successfully loaded */
 	mwl8k_release_firmware(priv);
 
+	if (rc && count) {
+		/* FW did not start successfully;
+		 * lets try one more time
+		 */
+		count--;
+		wiphy_err(hw->wiphy, "Trying to reload the firmware again\n");
+		msleep(20);
+		goto retry;
+	}
+
 	return rc;
 }
 
@@ -5365,7 +5462,14 @@
 		goto err_free_queues;
 	}
 
-	memset(priv->ampdu, 0, sizeof(priv->ampdu));
+	/*
+	 * When hw restart is requested,
+	 * mac80211 will take care of clearing
+	 * the ampdu streams, so do not clear
+	 * the ampdu state here
+	 */
+	if (!priv->hw_restart_in_progress)
+		memset(priv->ampdu, 0, sizeof(priv->ampdu));
 
 	/*
 	 * Temporarily enable interrupts.  Initial firmware host
@@ -5439,10 +5543,20 @@
 {
 	int i, rc = 0;
 	struct mwl8k_priv *priv = hw->priv;
+	struct mwl8k_vif *vif, *tmp_vif;
 
 	mwl8k_stop(hw);
 	mwl8k_rxq_deinit(hw, 0);
 
+	/*
+	 * All the existing interfaces are re-added by the ieee80211_reconfig;
+	 * which means driver should remove existing interfaces before calling
+	 * ieee80211_restart_hw
+	 */
+	if (priv->hw_restart_in_progress)
+		list_for_each_entry_safe(vif, tmp_vif, &priv->vif_list, list)
+			mwl8k_remove_vif(priv, vif);
+
 	for (i = 0; i < mwl8k_tx_queues(priv); i++)
 		mwl8k_txq_deinit(hw, i);
 
@@ -5454,6 +5568,9 @@
 	if (rc)
 		goto fail;
 
+	if (priv->hw_restart_in_progress)
+		return rc;
+
 	rc = mwl8k_start(hw);
 	if (rc)
 		goto fail;
@@ -5517,13 +5634,15 @@
 	INIT_LIST_HEAD(&priv->vif_list);
 
 	/* Set default radio state and preamble */
-	priv->radio_on = 0;
-	priv->radio_short_preamble = 0;
+	priv->radio_on = false;
+	priv->radio_short_preamble = false;
 
 	/* Finalize join worker */
 	INIT_WORK(&priv->finalize_join_worker, mwl8k_finalize_join_worker);
 	/* Handle watchdog ba events */
 	INIT_WORK(&priv->watchdog_ba_handle, mwl8k_watchdog_ba_events);
+	/* To reload the firmware if it crashes */
+	INIT_WORK(&priv->fw_reload, mwl8k_hw_restart_work);
 
 	/* TX reclaim and RX tasklets.  */
 	tasklet_init(&priv->poll_tx_task, mwl8k_tx_poll, (unsigned long)hw);
@@ -5667,6 +5786,9 @@
 	rc = mwl8k_init_firmware(hw, priv->fw_pref, true);
 	if (rc)
 		goto err_stop_firmware;
+
+	priv->hw_restart_in_progress = false;
+
 	return rc;
 
 err_stop_firmware:
diff --git a/drivers/net/wireless/orinoco/main.c b/drivers/net/wireless/orinoco/main.c
index b52acc4..9fb77d0 100644
--- a/drivers/net/wireless/orinoco/main.c
+++ b/drivers/net/wireless/orinoco/main.c
@@ -121,7 +121,7 @@
 MODULE_PARM_DESC(orinoco_debug, "Debug level");
 #endif
 
-static int suppress_linkstatus; /* = 0 */
+static bool suppress_linkstatus; /* = 0 */
 module_param(suppress_linkstatus, bool, 0644);
 MODULE_PARM_DESC(suppress_linkstatus, "Don't log link status changes");
 
diff --git a/drivers/net/wireless/orinoco/orinoco_usb.c b/drivers/net/wireless/orinoco/orinoco_usb.c
index 0793e42..ae8ce56 100644
--- a/drivers/net/wireless/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/orinoco/orinoco_usb.c
@@ -1759,32 +1759,7 @@
 static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION
 	" (Manuel Estrada Sainz)";
 
-static int __init ezusb_module_init(void)
-{
-	int err;
-
-	printk(KERN_DEBUG "%s\n", version);
-
-	/* register this driver with the USB subsystem */
-	err = usb_register(&orinoco_driver);
-	if (err < 0) {
-		printk(KERN_ERR PFX "usb_register failed, error %d\n",
-		       err);
-		return err;
-	}
-
-	return 0;
-}
-
-static void __exit ezusb_module_exit(void)
-{
-	/* deregister this driver with the USB subsystem */
-	usb_deregister(&orinoco_driver);
-}
-
-
-module_init(ezusb_module_init);
-module_exit(ezusb_module_exit);
+module_usb_driver(orinoco_driver);
 
 MODULE_AUTHOR("Manuel Estrada Sainz");
 MODULE_DESCRIPTION("Driver for Orinoco wireless LAN cards using EZUSB bridge");
diff --git a/drivers/net/wireless/orinoco/scan.c b/drivers/net/wireless/orinoco/scan.c
index e99ca1c..96e39ed 100644
--- a/drivers/net/wireless/orinoco/scan.c
+++ b/drivers/net/wireless/orinoco/scan.c
@@ -76,6 +76,7 @@
 {
 	struct wiphy *wiphy = priv_to_wiphy(priv);
 	struct ieee80211_channel *channel;
+	struct cfg80211_bss *cbss;
 	u8 *ie;
 	u8 ie_buf[46];
 	u64 timestamp;
@@ -121,9 +122,10 @@
 	beacon_interval = le16_to_cpu(bss->a.beacon_interv);
 	signal = SIGNAL_TO_MBM(le16_to_cpu(bss->a.level));
 
-	cfg80211_inform_bss(wiphy, channel, bss->a.bssid, timestamp,
-			    capability, beacon_interval, ie_buf, ie_len,
-			    signal, GFP_KERNEL);
+	cbss = cfg80211_inform_bss(wiphy, channel, bss->a.bssid, timestamp,
+				   capability, beacon_interval, ie_buf, ie_len,
+				   signal, GFP_KERNEL);
+	cfg80211_put_bss(cbss);
 }
 
 void orinoco_add_extscan_result(struct orinoco_private *priv,
@@ -132,6 +134,7 @@
 {
 	struct wiphy *wiphy = priv_to_wiphy(priv);
 	struct ieee80211_channel *channel;
+	struct cfg80211_bss *cbss;
 	const u8 *ie;
 	u64 timestamp;
 	s32 signal;
@@ -152,9 +155,10 @@
 	ie = bss->data;
 	signal = SIGNAL_TO_MBM(bss->level);
 
-	cfg80211_inform_bss(wiphy, channel, bss->bssid, timestamp,
-			    capability, beacon_interval, ie, ie_len,
-			    signal, GFP_KERNEL);
+	cbss = cfg80211_inform_bss(wiphy, channel, bss->bssid, timestamp,
+				   capability, beacon_interval, ie, ie_len,
+				   signal, GFP_KERNEL);
+	cfg80211_put_bss(cbss);
 }
 
 void orinoco_add_hostscan_results(struct orinoco_private *priv,
diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c
index db4d9a0..af2ca1a 100644
--- a/drivers/net/wireless/p54/main.c
+++ b/drivers/net/wireless/p54/main.c
@@ -27,7 +27,7 @@
 #include "p54.h"
 #include "lmac.h"
 
-static int modparam_nohwcrypt;
+static bool modparam_nohwcrypt;
 module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
 MODULE_AUTHOR("Michael Wu <flamingice@sourmilk.net>");
diff --git a/drivers/net/wireless/p54/p54spi.c b/drivers/net/wireless/p54/p54spi.c
index 78d0d69..7faed62 100644
--- a/drivers/net/wireless/p54/p54spi.c
+++ b/drivers/net/wireless/p54/p54spi.c
@@ -581,11 +581,7 @@
 	struct p54s_priv *priv = dev->priv;
 	unsigned long flags;
 
-	if (mutex_lock_interruptible(&priv->mutex)) {
-		/* FIXME: how to handle this error? */
-		return;
-	}
-
+	mutex_lock(&priv->mutex);
 	WARN_ON(priv->fw_state != FW_STATE_READY);
 
 	p54spi_power_off(priv);
@@ -704,7 +700,6 @@
 static struct spi_driver p54spi_driver = {
 	.driver = {
 		.name		= "p54spi",
-		.bus		= &spi_bus_type,
 		.owner		= THIS_MODULE,
 	},
 
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index 9b60968..f4d28c3 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -1083,15 +1083,4 @@
 	.soft_unbind = 1,
 };
 
-static int __init p54u_init(void)
-{
-	return usb_register(&p54u_driver);
-}
-
-static void __exit p54u_exit(void)
-{
-	usb_deregister(&p54u_driver);
-}
-
-module_init(p54u_init);
-module_exit(p54u_exit);
+module_usb_driver(p54u_driver);
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index 6ed9c32..42b97bc 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -242,7 +242,7 @@
 
 	skb_unlink(skb, &priv->tx_queue);
 	p54_tx_qos_accounting_free(priv, skb);
-	dev_kfree_skb_any(skb);
+	ieee80211_free_txskb(dev, skb);
 }
 EXPORT_SYMBOL_GPL(p54_free_skb);
 
@@ -788,7 +788,7 @@
 			    &hdr_flags, &aid, &burst_allowed);
 
 	if (p54_tx_qos_accounting_alloc(priv, skb, queue)) {
-		dev_kfree_skb_any(skb);
+		ieee80211_free_txskb(dev, skb);
 		return;
 	}
 
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index bc2ba80..4e44b1a 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -2493,323 +2493,7 @@
 	return ret;
 }
 
-/* Note: currently, use hostapd ioctl from the Host AP driver for WPA
- * support. This is to be replaced with Linux wireless extensions once they
- * get WPA support. */
-
-/* Note II: please leave all this together as it will be easier to remove later,
- * once wireless extensions add WPA support -mcgrof */
-
-/* PRISM54_HOSTAPD ioctl() cmd: */
-enum {
-	PRISM2_SET_ENCRYPTION = 6,
-	PRISM2_HOSTAPD_SET_GENERIC_ELEMENT = 12,
-	PRISM2_HOSTAPD_MLME = 13,
-	PRISM2_HOSTAPD_SCAN_REQ = 14,
-};
-
 #define PRISM54_SET_WPA			SIOCIWFIRSTPRIV+12
-#define PRISM54_HOSTAPD			SIOCIWFIRSTPRIV+25
-#define PRISM54_DROP_UNENCRYPTED	SIOCIWFIRSTPRIV+26
-
-#define PRISM2_HOSTAPD_MAX_BUF_SIZE 1024
-#define PRISM2_HOSTAPD_GENERIC_ELEMENT_HDR_LEN \
-	offsetof(struct prism2_hostapd_param, u.generic_elem.data)
-
-/* Maximum length for algorithm names (-1 for nul termination)
- * used in ioctl() */
-#define HOSTAP_CRYPT_ALG_NAME_LEN 16
-
-struct prism2_hostapd_param {
-	u32 cmd;
-	u8 sta_addr[ETH_ALEN];
-	union {
-	       struct {
-		       u8 alg[HOSTAP_CRYPT_ALG_NAME_LEN];
-		       u32 flags;
-		       u32 err;
-		       u8 idx;
-		       u8 seq[8]; /* sequence counter (set: RX, get: TX) */
-		       u16 key_len;
-		       u8 key[0];
-		       } crypt;
-               struct {
-                       u8 len;
-                       u8 data[0];
-               } generic_elem;
-               struct {
-#define MLME_STA_DEAUTH 0
-#define MLME_STA_DISASSOC 1
-                       u16 cmd;
-                       u16 reason_code;
-               } mlme;
-               struct {
-                       u8 ssid_len;
-                       u8 ssid[32];
-               } scan_req;
-       } u;
-};
-
-
-static int
-prism2_ioctl_set_encryption(struct net_device *dev,
-	struct prism2_hostapd_param *param,
-	int param_len)
-{
-	islpci_private *priv = netdev_priv(dev);
-	int rvalue = 0, force = 0;
-	int authen = DOT11_AUTH_OS, invoke = 0, exunencrypt = 0;
-	union oid_res_t r;
-
-	/* with the new API, it's impossible to get a NULL pointer.
-	 * New version of iwconfig set the IW_ENCODE_NOKEY flag
-	 * when no key is given, but older versions don't. */
-
-	if (param->u.crypt.key_len > 0) {
-		/* we have a key to set */
-		int index = param->u.crypt.idx;
-		int current_index;
-		struct obj_key key = { DOT11_PRIV_TKIP, 0, "" };
-
-		/* get the current key index */
-		rvalue = mgt_get_request(priv, DOT11_OID_DEFKEYID, 0, NULL, &r);
-		current_index = r.u;
-		/* Verify that the key is not marked as invalid */
-		if (!(param->u.crypt.flags & IW_ENCODE_NOKEY)) {
-			key.length = param->u.crypt.key_len > sizeof (param->u.crypt.key) ?
-			    sizeof (param->u.crypt.key) : param->u.crypt.key_len;
-			memcpy(key.key, param->u.crypt.key, key.length);
-			if (key.length == 32)
-				/* we want WPA-PSK */
-				key.type = DOT11_PRIV_TKIP;
-			if ((index < 0) || (index > 3))
-				/* no index provided use the current one */
-				index = current_index;
-
-			/* now send the key to the card  */
-			rvalue |=
-			    mgt_set_request(priv, DOT11_OID_DEFKEYX, index,
-					    &key);
-		}
-		/*
-		 * If a valid key is set, encryption should be enabled
-		 * (user may turn it off later).
-		 * This is also how "iwconfig ethX key on" works
-		 */
-		if ((index == current_index) && (key.length > 0))
-			force = 1;
-	} else {
-		int index = (param->u.crypt.flags & IW_ENCODE_INDEX) - 1;
-		if ((index >= 0) && (index <= 3)) {
-			/* we want to set the key index */
-			rvalue |=
-			    mgt_set_request(priv, DOT11_OID_DEFKEYID, 0,
-					    &index);
-		} else {
-			if (!(param->u.crypt.flags & IW_ENCODE_MODE)) {
-				/* we cannot do anything. Complain. */
-				return -EINVAL;
-			}
-		}
-	}
-	/* now read the flags */
-	if (param->u.crypt.flags & IW_ENCODE_DISABLED) {
-		/* Encoding disabled,
-		 * authen = DOT11_AUTH_OS;
-		 * invoke = 0;
-		 * exunencrypt = 0; */
-	}
-	if (param->u.crypt.flags & IW_ENCODE_OPEN)
-		/* Encode but accept non-encoded packets. No auth */
-		invoke = 1;
-	if ((param->u.crypt.flags & IW_ENCODE_RESTRICTED) || force) {
-		/* Refuse non-encoded packets. Auth */
-		authen = DOT11_AUTH_BOTH;
-		invoke = 1;
-		exunencrypt = 1;
-	}
-	/* do the change if requested  */
-	if ((param->u.crypt.flags & IW_ENCODE_MODE) || force) {
-		rvalue |=
-		    mgt_set_request(priv, DOT11_OID_AUTHENABLE, 0, &authen);
-		rvalue |=
-		    mgt_set_request(priv, DOT11_OID_PRIVACYINVOKED, 0, &invoke);
-		rvalue |=
-		    mgt_set_request(priv, DOT11_OID_EXUNENCRYPTED, 0,
-				    &exunencrypt);
-	}
-	return rvalue;
-}
-
-static int
-prism2_ioctl_set_generic_element(struct net_device *ndev,
-	struct prism2_hostapd_param *param,
-	int param_len)
-{
-       islpci_private *priv = netdev_priv(ndev);
-       int max_len, len, alen, ret=0;
-       struct obj_attachment *attach;
-
-       len = param->u.generic_elem.len;
-       max_len = param_len - PRISM2_HOSTAPD_GENERIC_ELEMENT_HDR_LEN;
-       if (max_len < 0 || max_len < len)
-               return -EINVAL;
-
-       alen = sizeof(*attach) + len;
-       attach = kzalloc(alen, GFP_KERNEL);
-       if (attach == NULL)
-               return -ENOMEM;
-
-#define WLAN_FC_TYPE_MGMT 0
-#define WLAN_FC_STYPE_ASSOC_REQ 0
-#define WLAN_FC_STYPE_REASSOC_REQ 2
-
-       /* Note: endianness is covered by mgt_set_varlen */
-
-       attach->type = (WLAN_FC_TYPE_MGMT << 2) |
-               (WLAN_FC_STYPE_ASSOC_REQ << 4);
-       attach->id = -1;
-       attach->size = len;
-       memcpy(attach->data, param->u.generic_elem.data, len);
-
-       ret = mgt_set_varlen(priv, DOT11_OID_ATTACHMENT, attach, len);
-
-       if (ret == 0) {
-               attach->type = (WLAN_FC_TYPE_MGMT << 2) |
-                       (WLAN_FC_STYPE_REASSOC_REQ << 4);
-
-	       ret = mgt_set_varlen(priv, DOT11_OID_ATTACHMENT, attach, len);
-
-	       if (ret == 0)
-		       printk(KERN_DEBUG "%s: WPA IE Attachment was set\n",
-				       ndev->name);
-       }
-
-       kfree(attach);
-       return ret;
-
-}
-
-static int
-prism2_ioctl_mlme(struct net_device *dev, struct prism2_hostapd_param *param)
-{
-	return -EOPNOTSUPP;
-}
-
-static int
-prism2_ioctl_scan_req(struct net_device *ndev,
-                     struct prism2_hostapd_param *param)
-{
-	islpci_private *priv = netdev_priv(ndev);
-	struct iw_request_info info;
-	int i, rvalue;
-	struct obj_bsslist *bsslist;
-	u32 noise = 0;
-	char *extra = "";
-	char *current_ev = "foo";
-	union oid_res_t r;
-
-	if (islpci_get_state(priv) < PRV_STATE_INIT) {
-		/* device is not ready, fail gently */
-		return 0;
-	}
-
-	/* first get the noise value. We will use it to report the link quality */
-	rvalue = mgt_get_request(priv, DOT11_OID_NOISEFLOOR, 0, NULL, &r);
-	noise = r.u;
-
-	/* Ask the device for a list of known bss. We can report at most
-	 * IW_MAX_AP=64 to the range struct. But the device won't repport anything
-	 * if you change the value of IWMAX_BSS=24.
-	 */
-	rvalue |= mgt_get_request(priv, DOT11_OID_BSSLIST, 0, NULL, &r);
-	bsslist = r.ptr;
-
-	info.cmd = PRISM54_HOSTAPD;
-	info.flags = 0;
-
-	/* ok now, scan the list and translate its info */
-	for (i = 0; i < min(IW_MAX_AP, (int) bsslist->nr); i++)
-		current_ev = prism54_translate_bss(ndev, &info, current_ev,
-						   extra + IW_SCAN_MAX_DATA,
-						   &(bsslist->bsslist[i]),
-						   noise);
-	kfree(bsslist);
-
-	return rvalue;
-}
-
-static int
-prism54_hostapd(struct net_device *ndev, struct iw_point *p)
-{
-       struct prism2_hostapd_param *param;
-       int ret = 0;
-       u32 uwrq;
-
-       printk(KERN_DEBUG "prism54_hostapd - len=%d\n", p->length);
-       if (p->length < sizeof(struct prism2_hostapd_param) ||
-           p->length > PRISM2_HOSTAPD_MAX_BUF_SIZE || !p->pointer)
-               return -EINVAL;
-
-	param = memdup_user(p->pointer, p->length);
-	if (IS_ERR(param))
-		return PTR_ERR(param);
-
-       switch (param->cmd) {
-       case PRISM2_SET_ENCRYPTION:
-	       printk(KERN_DEBUG "%s: Caught WPA supplicant set encryption request\n",
-			       ndev->name);
-               ret = prism2_ioctl_set_encryption(ndev, param, p->length);
-               break;
-       case PRISM2_HOSTAPD_SET_GENERIC_ELEMENT:
-	       printk(KERN_DEBUG "%s: Caught WPA supplicant set WPA IE request\n",
-			       ndev->name);
-               ret = prism2_ioctl_set_generic_element(ndev, param,
-                                                      p->length);
-               break;
-       case PRISM2_HOSTAPD_MLME:
-	       printk(KERN_DEBUG "%s: Caught WPA supplicant MLME request\n",
-			       ndev->name);
-               ret = prism2_ioctl_mlme(ndev, param);
-               break;
-       case PRISM2_HOSTAPD_SCAN_REQ:
-	       printk(KERN_DEBUG "%s: Caught WPA supplicant scan request\n",
-			       ndev->name);
-               ret = prism2_ioctl_scan_req(ndev, param);
-               break;
-	case PRISM54_SET_WPA:
-	       printk(KERN_DEBUG "%s: Caught WPA supplicant wpa init request\n",
-			       ndev->name);
-	       uwrq = 1;
-	       ret = prism54_set_wpa(ndev, NULL, &uwrq, NULL);
-	       break;
-	case PRISM54_DROP_UNENCRYPTED:
-	       printk(KERN_DEBUG "%s: Caught WPA drop unencrypted request\n",
-			       ndev->name);
-#if 0
-	       uwrq = 0x01;
-	       mgt_set(priv, DOT11_OID_EXUNENCRYPTED, &uwrq);
-	       down_write(&priv->mib_sem);
-	       mgt_commit(priv);
-	       up_write(&priv->mib_sem);
-#endif
-	       /* Not necessary, as set_wpa does it, should we just do it here though? */
-	       ret = 0;
-	       break;
-       default:
-	       printk(KERN_DEBUG "%s: Caught a WPA supplicant request that is not supported\n",
-			       ndev->name);
-               ret = -EOPNOTSUPP;
-               break;
-       }
-
-       if (ret == 0 && copy_to_user(p->pointer, param, p->length))
-               ret = -EFAULT;
-
-       kfree(param);
-
-       return ret;
-}
 
 static int
 prism54_set_wpa(struct net_device *ndev, struct iw_request_info *info,
@@ -3223,20 +2907,3 @@
 	.private_args = (struct iw_priv_args *) prism54_private_args,
 	.get_wireless_stats = prism54_get_wireless_stats,
 };
-
-/* For wpa_supplicant */
-
-int
-prism54_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
-{
-	struct iwreq *wrq = (struct iwreq *) rq;
-	int ret = -1;
-	switch (cmd) {
-		case PRISM54_HOSTAPD:
-		if (!capable(CAP_NET_ADMIN))
-			return -EPERM;
-		ret = prism54_hostapd(ndev, &wrq->u.data);
-		return ret;
-	}
-	return -EOPNOTSUPP;
-}
diff --git a/drivers/net/wireless/prism54/isl_ioctl.h b/drivers/net/wireless/prism54/isl_ioctl.h
index bcfbfb9..a34bceb 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.h
+++ b/drivers/net/wireless/prism54/isl_ioctl.h
@@ -43,8 +43,6 @@
 
 int prism54_set_mac_address(struct net_device *, void *);
 
-int prism54_ioctl(struct net_device *, struct ifreq *, int);
-
 extern const struct iw_handler_def prism54_handler_def;
 
 #endif				/* _ISL_IOCTL_H */
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c
index 5d0f615..5970ff6 100644
--- a/drivers/net/wireless/prism54/islpci_dev.c
+++ b/drivers/net/wireless/prism54/islpci_dev.c
@@ -793,8 +793,8 @@
 static void islpci_ethtool_get_drvinfo(struct net_device *dev,
                                        struct ethtool_drvinfo *info)
 {
-	strcpy(info->driver, DRV_NAME);
-	strcpy(info->version, DRV_VERSION);
+	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
 }
 
 static const struct ethtool_ops islpci_ethtool_ops = {
@@ -804,7 +804,6 @@
 static const struct net_device_ops islpci_netdev_ops = {
 	.ndo_open 		= islpci_open,
 	.ndo_stop		= islpci_close,
-	.ndo_do_ioctl		= prism54_ioctl,
 	.ndo_start_xmit		= islpci_eth_transmit,
 	.ndo_tx_timeout		= islpci_eth_tx_timeout,
 	.ndo_set_mac_address 	= prism54_set_mac_address,
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 0021e49..04fec1f 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -2426,7 +2426,7 @@
 			    unsigned int pkt_addr, int rx_len)
 {
 	UCHAR buff[256];
-	struct rx_msg *msg = (struct rx_msg *)buff;
+	struct ray_rx_msg *msg = (struct ray_rx_msg *) buff;
 
 	del_timer(&local->timer);
 
@@ -2513,7 +2513,7 @@
 			      unsigned int pkt_addr, int rx_len)
 {
 /*  UCHAR buff[256];
-    struct rx_msg *msg = (struct rx_msg *)buff;
+    struct ray_rx_msg *msg = (struct ray_rx_msg *) buff;
 */
 	pr_debug("Deauthentication frame received\n");
 	local->authentication_state = UNAUTHENTICATED;
diff --git a/drivers/net/wireless/rayctl.h b/drivers/net/wireless/rayctl.h
index d7646f2..3c3b98b1 100644
--- a/drivers/net/wireless/rayctl.h
+++ b/drivers/net/wireless/rayctl.h
@@ -566,9 +566,9 @@
     UCHAR hdr_3;
     UCHAR hdr_4;
 };
-struct rx_msg {
+struct ray_rx_msg {
     struct mac_header mac;
-    UCHAR  var[1];
+    UCHAR  var[0];
 };
 
 struct tx_msg {
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 0c13840..a330c69 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -244,6 +244,10 @@
 	NDIS_80211_POWER_MODE_FAST_PSP,
 };
 
+enum ndis_80211_pmkid_cand_list_flag_bits {
+	NDIS_80211_PMKID_CAND_PREAUTH = cpu_to_le32(1 << 0)
+};
+
 struct ndis_80211_auth_request {
 	__le32 length;
 	u8 bssid[6];
@@ -387,19 +391,17 @@
 struct ndis_80211_bssid_info {
 	u8 bssid[6];
 	u8 pmkid[16];
-};
+} __packed;
 
 struct ndis_80211_pmkid {
 	__le32 length;
 	__le32 bssid_info_count;
 	struct ndis_80211_bssid_info bssid_info[0];
-};
+} __packed;
 
 /*
  *  private data
  */
-#define NET_TYPE_11FB	0
-
 #define CAP_MODE_80211A		1
 #define CAP_MODE_80211B		2
 #define CAP_MODE_80211G		4
@@ -414,6 +416,7 @@
 #define RNDIS_WLAN_ALG_TKIP	(1<<1)
 #define RNDIS_WLAN_ALG_CCMP	(1<<2)
 
+#define RNDIS_WLAN_NUM_KEYS		4
 #define RNDIS_WLAN_KEY_MGMT_NONE	0
 #define RNDIS_WLAN_KEY_MGMT_802_1X	(1<<0)
 #define RNDIS_WLAN_KEY_MGMT_PSK		(1<<1)
@@ -516,7 +519,7 @@
 
 	/* encryption stuff */
 	int  encr_tx_key_index;
-	struct rndis_wlan_encr_key encr_keys[4];
+	struct rndis_wlan_encr_key encr_keys[RNDIS_WLAN_NUM_KEYS];
 	int  wpa_version;
 
 	u8 command_buffer[COMMAND_BUFFER_SIZE];
@@ -1346,6 +1349,32 @@
 	return ret;
 }
 
+static struct ieee80211_channel *get_current_channel(struct usbnet *usbdev,
+						     u16 *beacon_interval)
+{
+	struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
+	struct ieee80211_channel *channel;
+	struct ndis_80211_conf config;
+	int len, ret;
+
+	/* Get channel and beacon interval */
+	len = sizeof(config);
+	ret = rndis_query_oid(usbdev, OID_802_11_CONFIGURATION, &config, &len);
+	netdev_dbg(usbdev->net, "%s(): OID_802_11_CONFIGURATION -> %d\n",
+				__func__, ret);
+	if (ret < 0)
+		return NULL;
+
+	channel = ieee80211_get_channel(priv->wdev.wiphy,
+				KHZ_TO_MHZ(le32_to_cpu(config.ds_config)));
+	if (!channel)
+		return NULL;
+
+	if (beacon_interval)
+		*beacon_interval = le16_to_cpu(config.beacon_period);
+	return channel;
+}
+
 /* index must be 0 - N, as per NDIS  */
 static int add_wep_key(struct usbnet *usbdev, const u8 *key, int key_len,
 								int index)
@@ -1535,6 +1564,9 @@
 	bool is_wpa;
 	int ret;
 
+	if (index >= RNDIS_WLAN_NUM_KEYS)
+		return -ENOENT;
+
 	if (priv->encr_keys[index].len == 0)
 		return 0;
 
@@ -1972,11 +2004,12 @@
 	return ret;
 }
 
-static struct cfg80211_bss *rndis_bss_info_update(struct usbnet *usbdev,
-					struct ndis_80211_bssid_ex *bssid)
+static bool rndis_bss_info_update(struct usbnet *usbdev,
+				  struct ndis_80211_bssid_ex *bssid)
 {
 	struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
 	struct ieee80211_channel *channel;
+	struct cfg80211_bss *bss;
 	s32 signal;
 	u64 timestamp;
 	u16 capability;
@@ -2015,9 +2048,12 @@
 	capability = le16_to_cpu(fixed->capabilities);
 	beacon_interval = le16_to_cpu(fixed->beacon_interval);
 
-	return cfg80211_inform_bss(priv->wdev.wiphy, channel, bssid->mac,
+	bss = cfg80211_inform_bss(priv->wdev.wiphy, channel, bssid->mac,
 		timestamp, capability, beacon_interval, ie, ie_len, signal,
 		GFP_KERNEL);
+	cfg80211_put_bss(bss);
+
+	return (bss != NULL);
 }
 
 static struct ndis_80211_bssid_ex *next_bssid_list_item(
@@ -2451,6 +2487,9 @@
 
 	netdev_dbg(usbdev->net, "%s(%i)\n", __func__, key_index);
 
+	if (key_index >= RNDIS_WLAN_NUM_KEYS)
+		return -ENOENT;
+
 	priv->encr_tx_key_index = key_index;
 
 	if (is_wpa_key(priv, key_index))
@@ -2639,12 +2678,12 @@
 {
 	struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
 	struct ieee80211_channel *channel;
-	struct ndis_80211_conf config;
 	struct ndis_80211_ssid ssid;
+	struct cfg80211_bss *bss;
 	s32 signal;
 	u64 timestamp;
 	u16 capability;
-	u16 beacon_interval;
+	u16 beacon_interval = 0;
 	__le32 rssi;
 	u8 ie_buf[34];
 	int len, ret, ie_len;
@@ -2669,22 +2708,10 @@
 	}
 
 	/* Get channel and beacon interval */
-	len = sizeof(config);
-	ret = rndis_query_oid(usbdev, OID_802_11_CONFIGURATION, &config, &len);
-	netdev_dbg(usbdev->net, "%s(): OID_802_11_CONFIGURATION -> %d\n",
-				__func__, ret);
-	if (ret >= 0) {
-		beacon_interval = le16_to_cpu(config.beacon_period);
-		channel = ieee80211_get_channel(priv->wdev.wiphy,
-				KHZ_TO_MHZ(le32_to_cpu(config.ds_config)));
-		if (!channel) {
-			netdev_warn(usbdev->net, "%s(): could not get channel."
-						 "\n", __func__);
-			return;
-		}
-	} else {
-		netdev_warn(usbdev->net, "%s(): could not get configuration.\n",
-					 __func__);
+	channel = get_current_channel(usbdev, &beacon_interval);
+	if (!channel) {
+		netdev_warn(usbdev->net, "%s(): could not get channel.\n",
+					__func__);
 		return;
 	}
 
@@ -2714,9 +2741,10 @@
 		bssid, (u32)timestamp, capability, beacon_interval, ie_len,
 		ssid.essid, signal);
 
-	cfg80211_inform_bss(priv->wdev.wiphy, channel, bssid,
+	bss = cfg80211_inform_bss(priv->wdev.wiphy, channel, bssid,
 		timestamp, capability, beacon_interval, ie_buf, ie_len,
 		signal, GFP_KERNEL);
+	cfg80211_put_bss(bss);
 }
 
 /*
@@ -2828,8 +2856,9 @@
 						req_ie_len, resp_ie,
 						resp_ie_len, 0, GFP_KERNEL);
 		else
-			cfg80211_roamed(usbdev->net, NULL, bssid,
-					req_ie, req_ie_len,
+			cfg80211_roamed(usbdev->net,
+					get_current_channel(usbdev, NULL),
+					bssid, req_ie, req_ie_len,
 					resp_ie, resp_ie_len, GFP_KERNEL);
 	} else if (priv->infra_mode == NDIS_80211_INFRA_ADHOC)
 		cfg80211_ibss_joined(usbdev->net, bssid, GFP_KERNEL);
@@ -2995,25 +3024,13 @@
 	for (i = 0; i < le32_to_cpu(cand_list->num_candidates); i++) {
 		struct ndis_80211_pmkid_candidate *cand =
 						&cand_list->candidate_list[i];
+		bool preauth = !!(cand->flags & NDIS_80211_PMKID_CAND_PREAUTH);
 
-		netdev_dbg(usbdev->net, "cand[%i]: flags: 0x%08x, bssid: %pM\n",
-			   i, le32_to_cpu(cand->flags), cand->bssid);
+		netdev_dbg(usbdev->net, "cand[%i]: flags: 0x%08x, preauth: %d, bssid: %pM\n",
+			   i, le32_to_cpu(cand->flags), preauth, cand->bssid);
 
-#if 0
-		struct iw_pmkid_cand pcand;
-		union iwreq_data wrqu;
-
-		memset(&pcand, 0, sizeof(pcand));
-		if (le32_to_cpu(cand->flags) & 0x01)
-			pcand.flags |= IW_PMKID_CAND_PREAUTH;
-		pcand.index = i;
-		memcpy(pcand.bssid.sa_data, cand->bssid, ETH_ALEN);
-
-		memset(&wrqu, 0, sizeof(wrqu));
-		wrqu.data.length = sizeof(pcand);
-		wireless_send_event(usbdev->net, IWEVPMKIDCAND, &wrqu,
-								(u8 *)&pcand);
-#endif
+		cfg80211_pmksa_candidate_notify(usbdev->net, i, cand->bssid,
+						preauth, GFP_ATOMIC);
 	}
 }
 
@@ -3754,17 +3771,7 @@
 	.resume =	usbnet_resume,
 };
 
-static int __init rndis_wlan_init(void)
-{
-	return usb_register(&rndis_wlan_driver);
-}
-module_init(rndis_wlan_init);
-
-static void __exit rndis_wlan_exit(void)
-{
-	usb_deregister(&rndis_wlan_driver);
-}
-module_exit(rndis_wlan_exit);
+module_usb_driver(rndis_wlan_driver);
 
 MODULE_AUTHOR("Bjorge Dijkstra");
 MODULE_AUTHOR("Jussi Kivilinna");
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 53c5f87..1de9c75 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -39,7 +39,7 @@
 /*
  * Allow hardware encryption to be disabled.
  */
-static int modparam_nohwcrypt;
+static bool modparam_nohwcrypt;
 module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
 
@@ -1982,15 +1982,4 @@
 	.resume		= rt2x00usb_resume,
 };
 
-static int __init rt2500usb_init(void)
-{
-	return usb_register(&rt2500usb_driver);
-}
-
-static void __exit rt2500usb_exit(void)
-{
-	usb_deregister(&rt2500usb_driver);
-}
-
-module_init(rt2500usb_init);
-module_exit(rt2500usb_exit);
+module_usb_driver(rt2500usb_driver);
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index 4778620..2571a2f 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -50,7 +50,7 @@
  * RF2853 2.4G/5G 3T3R
  * RF3320 2.4G 1T1R(RT3350/RT3370/RT3390)
  * RF3322 2.4G 2T2R(RT3352/RT3371/RT3372/RT3391/RT3392)
- * RF3853 2.4G/5G 3T3R(RT3883/RT3563/RT3573/RT3593/RT3662)
+ * RF3053 2.4G/5G 3T3R(RT3883/RT3563/RT3573/RT3593/RT3662)
  * RF5370 2.4G 1T1R
  * RF5390 2.4G 1T1R
  */
@@ -66,7 +66,7 @@
 #define RF2853				0x000a
 #define RF3320				0x000b
 #define RF3322				0x000c
-#define RF3853				0x000d
+#define RF3053				0x000d
 #define RF5370				0x5370
 #define RF5390				0x5390
 
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 1ba079d..22a1a8f 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -1203,8 +1203,10 @@
 			   !(filter_flags & FIF_CONTROL));
 	rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_PSPOLL,
 			   !(filter_flags & FIF_PSPOLL));
-	rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BA, 1);
-	rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BAR, 0);
+	rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BA,
+			   !(filter_flags & FIF_CONTROL));
+	rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BAR,
+			   !(filter_flags & FIF_CONTROL));
 	rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_CNTL,
 			   !(filter_flags & FIF_CONTROL));
 	rt2800_register_write(rt2x00dev, RX_FILTER_CFG, reg);
@@ -1942,19 +1944,24 @@
 		info->default_power2 = TXPOWER_A_TO_DEV(info->default_power2);
 	}
 
-	if (rt2x00_rf(rt2x00dev, RF2020) ||
-	    rt2x00_rf(rt2x00dev, RF3020) ||
-	    rt2x00_rf(rt2x00dev, RF3021) ||
-	    rt2x00_rf(rt2x00dev, RF3022) ||
-	    rt2x00_rf(rt2x00dev, RF3320))
+	switch (rt2x00dev->chip.rf) {
+	case RF2020:
+	case RF3020:
+	case RF3021:
+	case RF3022:
+	case RF3320:
 		rt2800_config_channel_rf3xxx(rt2x00dev, conf, rf, info);
-	else if (rt2x00_rf(rt2x00dev, RF3052))
+		break;
+	case RF3052:
 		rt2800_config_channel_rf3052(rt2x00dev, conf, rf, info);
-	else if (rt2x00_rf(rt2x00dev, RF5370) ||
-		 rt2x00_rf(rt2x00dev, RF5390))
+		break;
+	case RF5370:
+	case RF5390:
 		rt2800_config_channel_rf53xx(rt2x00dev, conf, rf, info);
-	else
+		break;
+	default:
 		rt2800_config_channel_rf2xxx(rt2x00dev, conf, rf, info);
+	}
 
 	/*
 	 * Change BBP settings
@@ -3930,15 +3937,18 @@
 	rt2x00_set_chip(rt2x00dev, rt2x00_get_field32(reg, MAC_CSR0_CHIPSET),
 			value, rt2x00_get_field32(reg, MAC_CSR0_REVISION));
 
-	if (!rt2x00_rt(rt2x00dev, RT2860) &&
-	    !rt2x00_rt(rt2x00dev, RT2872) &&
-	    !rt2x00_rt(rt2x00dev, RT2883) &&
-	    !rt2x00_rt(rt2x00dev, RT3070) &&
-	    !rt2x00_rt(rt2x00dev, RT3071) &&
-	    !rt2x00_rt(rt2x00dev, RT3090) &&
-	    !rt2x00_rt(rt2x00dev, RT3390) &&
-	    !rt2x00_rt(rt2x00dev, RT3572) &&
-	    !rt2x00_rt(rt2x00dev, RT5390)) {
+	switch (rt2x00dev->chip.rt) {
+	case RT2860:
+	case RT2872:
+	case RT2883:
+	case RT3070:
+	case RT3071:
+	case RT3090:
+	case RT3390:
+	case RT3572:
+	case RT5390:
+		break;
+	default:
 		ERROR(rt2x00dev, "Invalid RT chipset detected.\n");
 		return -ENODEV;
 	}
@@ -4552,6 +4562,9 @@
 		survey->channel_time_ext_busy = busy_ext / 1000;
 	}
 
+	if (!(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL))
+		survey->filled |= SURVEY_INFO_IN_USE;
+
 	return 0;
 
 }
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index da48c8a..4941a1a 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -50,7 +50,7 @@
 /*
  * Allow hardware encryption to be disabled.
  */
-static int modparam_nohwcrypt = 0;
+static bool modparam_nohwcrypt = false;
 module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
 
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 3778763..262ee9e 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -45,7 +45,7 @@
 /*
  * Allow hardware encryption to be disabled.
  */
-static int modparam_nohwcrypt;
+static bool modparam_nohwcrypt;
 module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
 
@@ -400,10 +400,10 @@
 	/*
 	 * The size of TXINFO_W0_USB_DMA_TX_PKT_LEN is
 	 * TXWI + 802.11 header + L2 pad + payload + pad,
-	 * so need to decrease size of TXINFO and USB end pad.
+	 * so need to decrease size of TXINFO.
 	 */
 	rt2x00_set_field32(&word, TXINFO_W0_USB_DMA_TX_PKT_LEN,
-			   entry->skb->len - TXINFO_DESC_SIZE - 4);
+			   roundup(entry->skb->len, 4) - TXINFO_DESC_SIZE);
 	rt2x00_set_field32(&word, TXINFO_W0_WIV,
 			   !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags));
 	rt2x00_set_field32(&word, TXINFO_W0_QSEL, 2);
@@ -421,37 +421,20 @@
 	skbdesc->desc_len = TXINFO_DESC_SIZE + TXWI_DESC_SIZE;
 }
 
-static void rt2800usb_write_tx_data(struct queue_entry *entry,
-					struct txentry_desc *txdesc)
-{
-	unsigned int len;
-	int err;
-
-	rt2800_write_tx_data(entry, txdesc);
-
-	/*
-	 * pad(1~3 bytes) is added after each 802.11 payload.
-	 * USB end pad(4 bytes) is added at each USB bulk out packet end.
-	 * TX frame format is :
-	 * | TXINFO | TXWI | 802.11 header | L2 pad | payload | pad | USB end pad |
-	 *                 |<------------- tx_pkt_len ------------->|
-	 */
-	len = roundup(entry->skb->len, 4) + 4;
-	err = skb_padto(entry->skb, len);
-	if (unlikely(err)) {
-		WARNING(entry->queue->rt2x00dev, "TX SKB padding error, out of memory\n");
-		return;
-	}
-
-	entry->skb->len = len;
-}
-
 /*
  * TX data initialization
  */
 static int rt2800usb_get_tx_data_len(struct queue_entry *entry)
 {
-	return entry->skb->len;
+	/*
+	 * pad(1~3 bytes) is needed after each 802.11 payload.
+	 * USB end pad(4 bytes) is needed at each USB bulk out packet end.
+	 * TX frame format is :
+	 * | TXINFO | TXWI | 802.11 header | L2 pad | payload | pad | USB end pad |
+	 *                 |<------------- tx_pkt_len ------------->|
+	 */
+
+	return roundup(entry->skb->len, 4) + 4;
 }
 
 /*
@@ -807,7 +790,7 @@
 	.flush_queue		= rt2x00usb_flush_queue,
 	.tx_dma_done		= rt2800usb_tx_dma_done,
 	.write_tx_desc		= rt2800usb_write_tx_desc,
-	.write_tx_data		= rt2800usb_write_tx_data,
+	.write_tx_data		= rt2800_write_tx_data,
 	.write_beacon		= rt2800_write_beacon,
 	.clear_beacon		= rt2800_clear_beacon,
 	.get_tx_data_len	= rt2800usb_get_tx_data_len,
@@ -914,12 +897,14 @@
 	{ USB_DEVICE(0x050d, 0x8053) },
 	{ USB_DEVICE(0x050d, 0x805c) },
 	{ USB_DEVICE(0x050d, 0x815c) },
+	{ USB_DEVICE(0x050d, 0x825a) },
 	{ USB_DEVICE(0x050d, 0x825b) },
 	{ USB_DEVICE(0x050d, 0x935a) },
 	{ USB_DEVICE(0x050d, 0x935b) },
 	/* Buffalo */
 	{ USB_DEVICE(0x0411, 0x00e8) },
 	{ USB_DEVICE(0x0411, 0x0158) },
+	{ USB_DEVICE(0x0411, 0x015d) },
 	{ USB_DEVICE(0x0411, 0x016f) },
 	{ USB_DEVICE(0x0411, 0x01a2) },
 	/* Corega */
@@ -934,6 +919,8 @@
 	{ USB_DEVICE(0x07d1, 0x3c0e) },
 	{ USB_DEVICE(0x07d1, 0x3c0f) },
 	{ USB_DEVICE(0x07d1, 0x3c11) },
+	{ USB_DEVICE(0x07d1, 0x3c13) },
+	{ USB_DEVICE(0x07d1, 0x3c15) },
 	{ USB_DEVICE(0x07d1, 0x3c16) },
 	/* Draytek */
 	{ USB_DEVICE(0x07fa, 0x7712) },
@@ -943,6 +930,7 @@
 	{ USB_DEVICE(0x7392, 0x7711) },
 	{ USB_DEVICE(0x7392, 0x7717) },
 	{ USB_DEVICE(0x7392, 0x7718) },
+	{ USB_DEVICE(0x7392, 0x7722) },
 	/* Encore */
 	{ USB_DEVICE(0x203d, 0x1480) },
 	{ USB_DEVICE(0x203d, 0x14a9) },
@@ -976,6 +964,8 @@
 	{ USB_DEVICE(0x13b1, 0x0031) },
 	{ USB_DEVICE(0x1737, 0x0070) },
 	{ USB_DEVICE(0x1737, 0x0071) },
+	{ USB_DEVICE(0x1737, 0x0077) },
+	{ USB_DEVICE(0x1737, 0x0078) },
 	/* Logitec */
 	{ USB_DEVICE(0x0789, 0x0162) },
 	{ USB_DEVICE(0x0789, 0x0163) },
@@ -999,9 +989,13 @@
 	{ USB_DEVICE(0x0db0, 0x871b) },
 	{ USB_DEVICE(0x0db0, 0x871c) },
 	{ USB_DEVICE(0x0db0, 0x899a) },
+	/* Ovislink */
+	{ USB_DEVICE(0x1b75, 0x3071) },
+	{ USB_DEVICE(0x1b75, 0x3072) },
 	/* Para */
 	{ USB_DEVICE(0x20b8, 0x8888) },
 	/* Pegatron */
+	{ USB_DEVICE(0x1d4d, 0x0002) },
 	{ USB_DEVICE(0x1d4d, 0x000c) },
 	{ USB_DEVICE(0x1d4d, 0x000e) },
 	{ USB_DEVICE(0x1d4d, 0x0011) },
@@ -1054,7 +1048,9 @@
 	/* Sparklan */
 	{ USB_DEVICE(0x15a9, 0x0006) },
 	/* Sweex */
+	{ USB_DEVICE(0x177f, 0x0153) },
 	{ USB_DEVICE(0x177f, 0x0302) },
+	{ USB_DEVICE(0x177f, 0x0313) },
 	/* U-Media */
 	{ USB_DEVICE(0x157e, 0x300e) },
 	{ USB_DEVICE(0x157e, 0x3013) },
@@ -1138,27 +1134,24 @@
 	{ USB_DEVICE(0x13d3, 0x3322) },
 	/* Belkin */
 	{ USB_DEVICE(0x050d, 0x1003) },
-	{ USB_DEVICE(0x050d, 0x825a) },
 	/* Buffalo */
 	{ USB_DEVICE(0x0411, 0x012e) },
 	{ USB_DEVICE(0x0411, 0x0148) },
 	{ USB_DEVICE(0x0411, 0x0150) },
-	{ USB_DEVICE(0x0411, 0x015d) },
 	/* Corega */
 	{ USB_DEVICE(0x07aa, 0x0041) },
 	{ USB_DEVICE(0x07aa, 0x0042) },
 	{ USB_DEVICE(0x18c5, 0x0008) },
 	/* D-Link */
 	{ USB_DEVICE(0x07d1, 0x3c0b) },
-	{ USB_DEVICE(0x07d1, 0x3c13) },
-	{ USB_DEVICE(0x07d1, 0x3c15) },
 	{ USB_DEVICE(0x07d1, 0x3c17) },
 	{ USB_DEVICE(0x2001, 0x3c17) },
 	/* Edimax */
 	{ USB_DEVICE(0x7392, 0x4085) },
-	{ USB_DEVICE(0x7392, 0x7722) },
 	/* Encore */
 	{ USB_DEVICE(0x203d, 0x14a1) },
+	/* Fujitsu Stylistic 550 */
+	{ USB_DEVICE(0x1690, 0x0761) },
 	/* Gemtek */
 	{ USB_DEVICE(0x15a9, 0x0010) },
 	/* Gigabyte */
@@ -1170,20 +1163,13 @@
 	/* LevelOne */
 	{ USB_DEVICE(0x1740, 0x0605) },
 	{ USB_DEVICE(0x1740, 0x0615) },
-	/* Linksys */
-	{ USB_DEVICE(0x1737, 0x0077) },
-	{ USB_DEVICE(0x1737, 0x0078) },
 	/* Logitec */
 	{ USB_DEVICE(0x0789, 0x0168) },
 	{ USB_DEVICE(0x0789, 0x0169) },
 	/* Motorola */
 	{ USB_DEVICE(0x100d, 0x9032) },
-	/* Ovislink */
-	{ USB_DEVICE(0x1b75, 0x3071) },
-	{ USB_DEVICE(0x1b75, 0x3072) },
 	/* Pegatron */
 	{ USB_DEVICE(0x05a6, 0x0101) },
-	{ USB_DEVICE(0x1d4d, 0x0002) },
 	{ USB_DEVICE(0x1d4d, 0x0010) },
 	/* Planex */
 	{ USB_DEVICE(0x2019, 0x5201) },
@@ -1202,9 +1188,6 @@
 	{ USB_DEVICE(0x083a, 0xc522) },
 	{ USB_DEVICE(0x083a, 0xd522) },
 	{ USB_DEVICE(0x083a, 0xf511) },
-	/* Sweex */
-	{ USB_DEVICE(0x177f, 0x0153) },
-	{ USB_DEVICE(0x177f, 0x0313) },
 	/* Zyxel */
 	{ USB_DEVICE(0x0586, 0x341a) },
 #endif
@@ -1234,15 +1217,4 @@
 	.resume		= rt2x00usb_resume,
 };
 
-static int __init rt2800usb_init(void)
-{
-	return usb_register(&rt2800usb_driver);
-}
-
-static void __exit rt2800usb_exit(void)
-{
-	usb_deregister(&rt2800usb_driver);
-}
-
-module_init(rt2800usb_init);
-module_exit(rt2800usb_exit);
+module_usb_driver(rt2800usb_driver);
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 99ff12d..b03b22c 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -189,9 +189,9 @@
 #define RT3090		0x3090	/* 2.4GHz PCIe */
 #define RT3390		0x3390
 #define RT3572		0x3572
-#define RT3593		0x3593	/* PCIe */
+#define RT3593		0x3593
 #define RT3883		0x3883	/* WSOC */
-#define RT5390         0x5390  /* 2.4GHz */
+#define RT5390		0x5390  /* 2.4GHz */
 
 	u16 rf;
 	u16 rev;
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index edd317f..c3e1aa7 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -831,11 +831,11 @@
 	if (spec->supported_rates & SUPPORT_RATE_OFDM)
 		num_rates += 8;
 
-	channels = kzalloc(sizeof(*channels) * spec->num_channels, GFP_KERNEL);
+	channels = kcalloc(spec->num_channels, sizeof(*channels), GFP_KERNEL);
 	if (!channels)
 		return -ENOMEM;
 
-	rates = kzalloc(sizeof(*rates) * num_rates, GFP_KERNEL);
+	rates = kcalloc(num_rates, sizeof(*rates), GFP_KERNEL);
 	if (!rates)
 		goto exit_free_channels;
 
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index bf0acff..ede3c58 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -160,7 +160,7 @@
  exit_fail:
 	rt2x00queue_pause_queue(queue);
  exit_free_skb:
-	dev_kfree_skb_any(skb);
+	ieee80211_free_txskb(hw, skb);
 }
 EXPORT_SYMBOL_GPL(rt2x00mac_tx);
 
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index 1e31050..2eea386 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -298,12 +298,22 @@
 		return false;
 
 	/*
-	 * USB devices cannot blindly pass the skb->len as the
-	 * length of the data to usb_fill_bulk_urb. Pass the skb
-	 * to the driver to determine what the length should be.
+	 * USB devices require certain padding at the end of each frame
+	 * and urb. Those paddings are not included in skbs. Pass entry
+	 * to the driver to determine what the overall length should be.
 	 */
 	length = rt2x00dev->ops->lib->get_tx_data_len(entry);
 
+	status = skb_padto(entry->skb, length);
+	if (unlikely(status)) {
+		/* TODO: report something more appropriate than IO_FAILED. */
+		WARNING(rt2x00dev, "TX SKB padding error, out of memory\n");
+		set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
+		rt2x00lib_dmadone(entry);
+
+		return false;
+	}
+
 	usb_fill_bulk_urb(entry_priv->urb, usb_dev,
 			  usb_sndbulkpipe(usb_dev, entry->queue->usb_endpoint),
 			  entry->skb->data, length,
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index bf55b4a..e0c6d11 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -41,7 +41,7 @@
 /*
  * Allow hardware encryption to be disabled.
  */
-static int modparam_nohwcrypt = 0;
+static bool modparam_nohwcrypt = false;
 module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
 
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index cfb19db..e477a96 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -40,7 +40,7 @@
 /*
  * Allow hardware encryption to be disabled.
  */
-static int modparam_nohwcrypt;
+static bool modparam_nohwcrypt;
 module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
 
@@ -2528,15 +2528,4 @@
 	.resume		= rt2x00usb_resume,
 };
 
-static int __init rt73usb_init(void)
-{
-	return usb_register(&rt73usb_driver);
-}
-
-static void __exit rt73usb_exit(void)
-{
-	usb_deregister(&rt73usb_driver);
-}
-
-module_init(rt73usb_init);
-module_exit(rt73usb_exit);
+module_usb_driver(rt73usb_driver);
diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c
index 4a78f9e..638fbef 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c
@@ -1592,15 +1592,4 @@
 	.disconnect	= __devexit_p(rtl8187_disconnect),
 };
 
-static int __init rtl8187_init(void)
-{
-	return usb_register(&rtl8187_driver);
-}
-
-static void __exit rtl8187_exit(void)
-{
-	usb_deregister(&rtl8187_driver);
-}
-
-module_init(rtl8187_init);
-module_exit(rtl8187_exit);
+module_usb_driver(rtl8187_driver);
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index b4ce934..8d6eb0f 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -345,9 +345,9 @@
 	if (is_valid_ether_addr(rtlefuse->dev_addr)) {
 		SET_IEEE80211_PERM_ADDR(hw, rtlefuse->dev_addr);
 	} else {
-		u8 rtlmac[] = { 0x00, 0xe0, 0x4c, 0x81, 0x92, 0x00 };
-		get_random_bytes((rtlmac + (ETH_ALEN - 1)), 1);
-		SET_IEEE80211_PERM_ADDR(hw, rtlmac);
+		u8 rtlmac1[] = { 0x00, 0xe0, 0x4c, 0x81, 0x92, 0x00 };
+		get_random_bytes((rtlmac1 + (ETH_ALEN - 1)), 1);
+		SET_IEEE80211_PERM_ADDR(hw, rtlmac1);
 	}
 
 }
@@ -396,7 +396,7 @@
 	u8 valid = 0;
 
 	/*set init state to on */
-	rtlpriv->rfkill.rfkill_state = 1;
+	rtlpriv->rfkill.rfkill_state = true;
 	wiphy_rfkill_set_hw_state(hw->wiphy, 0);
 
 	radio_state = rtlpriv->cfg->ops->radio_onoff_checking(hw, &valid);
@@ -448,12 +448,12 @@
 
 	/* <4> locks */
 	mutex_init(&rtlpriv->locks.conf_mutex);
+	mutex_init(&rtlpriv->locks.ps_mutex);
 	spin_lock_init(&rtlpriv->locks.ips_lock);
 	spin_lock_init(&rtlpriv->locks.irq_th_lock);
 	spin_lock_init(&rtlpriv->locks.h2c_lock);
 	spin_lock_init(&rtlpriv->locks.rf_ps_lock);
 	spin_lock_init(&rtlpriv->locks.rf_lock);
-	spin_lock_init(&rtlpriv->locks.lps_lock);
 	spin_lock_init(&rtlpriv->locks.waitq_lock);
 	spin_lock_init(&rtlpriv->locks.cck_and_rw_pagea_lock);
 
diff --git a/drivers/net/wireless/rtlwifi/base.h b/drivers/net/wireless/rtlwifi/base.h
index 4ae9059..f66b575 100644
--- a/drivers/net/wireless/rtlwifi/base.h
+++ b/drivers/net/wireless/rtlwifi/base.h
@@ -76,7 +76,7 @@
 	SET_BITS_TO_LE_2BYTE(_hdr, 8, 1, _val)
 
 #define SET_80211_PS_POLL_AID(_hdr, _val)		\
-	(*(u16 *)((u8 *)(_hdr) + 2) = le16_to_cpu(_val))
+	(*(u16 *)((u8 *)(_hdr) + 2) = _val)
 #define SET_80211_PS_POLL_BSSID(_hdr, _val)		\
 	memcpy(((u8 *)(_hdr)) + 4, (u8 *)(_val), ETH_ALEN)
 #define SET_80211_PS_POLL_TA(_hdr, _val)		\
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index eb61061..39e0907 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -78,7 +78,7 @@
 	u8 init_aspm;
 
 	ppsc->reg_rfps_level = 0;
-	ppsc->support_aspm = 0;
+	ppsc->support_aspm = false;
 
 	/*Update PCI ASPM setting */
 	ppsc->const_amdpci_aspm = rtlpci->const_amdpci_aspm;
@@ -570,9 +570,9 @@
 		if (ieee80211_is_nullfunc(fc)) {
 			if (ieee80211_has_pm(fc)) {
 				rtlpriv->mac80211.offchan_delay = true;
-				rtlpriv->psc.state_inap = 1;
+				rtlpriv->psc.state_inap = true;
 			} else {
-				rtlpriv->psc.state_inap = 0;
+				rtlpriv->psc.state_inap = false;
 			}
 		}
 
@@ -610,7 +610,7 @@
 	if (((rtlpriv->link_info.num_rx_inperiod +
 		rtlpriv->link_info.num_tx_inperiod) > 8) ||
 		(rtlpriv->link_info.num_rx_inperiod > 2)) {
-		tasklet_schedule(&rtlpriv->works.ips_leave_tasklet);
+		schedule_work(&rtlpriv->works.lps_leave_work);
 	}
 }
 
@@ -736,7 +736,7 @@
 		if (((rtlpriv->link_info.num_rx_inperiod +
 			rtlpriv->link_info.num_tx_inperiod) > 8) ||
 			(rtlpriv->link_info.num_rx_inperiod > 2)) {
-			tasklet_schedule(&rtlpriv->works.ips_leave_tasklet);
+			schedule_work(&rtlpriv->works.lps_leave_work);
 		}
 
 		dev_kfree_skb_any(skb);
@@ -780,6 +780,7 @@
 	unsigned long flags;
 	u32 inta = 0;
 	u32 intb = 0;
+	irqreturn_t ret = IRQ_HANDLED;
 
 	spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
 
@@ -787,8 +788,10 @@
 	rtlpriv->cfg->ops->interrupt_recognized(hw, &inta, &intb);
 
 	/*Shared IRQ or HW disappared */
-	if (!inta || inta == 0xffff)
+	if (!inta || inta == 0xffff) {
+		ret = IRQ_NONE;
 		goto done;
+	}
 
 	/*<1> beacon related */
 	if (inta & rtlpriv->cfg->maps[RTL_IMR_TBDOK]) {
@@ -890,12 +893,9 @@
 	if (rtlpriv->rtlhal.earlymode_enable)
 		tasklet_schedule(&rtlpriv->works.irq_tasklet);
 
-	spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
-	return IRQ_HANDLED;
-
 done:
 	spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
-	return IRQ_HANDLED;
+	return ret;
 }
 
 static void _rtl_pci_irq_tasklet(struct ieee80211_hw *hw)
@@ -903,11 +903,6 @@
 	_rtl_pci_tx_chk_waitq(hw);
 }
 
-static void _rtl_pci_ips_leave_tasklet(struct ieee80211_hw *hw)
-{
-	rtl_lps_leave(hw);
-}
-
 static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -945,6 +940,15 @@
 	return;
 }
 
+static void rtl_lps_leave_work_callback(struct work_struct *work)
+{
+	struct rtl_works *rtlworks =
+	    container_of(work, struct rtl_works, lps_leave_work);
+	struct ieee80211_hw *hw = rtlworks->hw;
+
+	rtl_lps_leave(hw);
+}
+
 static void _rtl_pci_init_trx_var(struct ieee80211_hw *hw)
 {
 	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
@@ -1006,9 +1010,7 @@
 	tasklet_init(&rtlpriv->works.irq_prepare_bcn_tasklet,
 		     (void (*)(unsigned long))_rtl_pci_prepare_bcn_tasklet,
 		     (unsigned long)hw);
-	tasklet_init(&rtlpriv->works.ips_leave_tasklet,
-		     (void (*)(unsigned long))_rtl_pci_ips_leave_tasklet,
-		     (unsigned long)hw);
+	INIT_WORK(&rtlpriv->works.lps_leave_work, rtl_lps_leave_work_callback);
 }
 
 static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
@@ -1478,7 +1480,7 @@
 
 	synchronize_irq(rtlpci->pdev->irq);
 	tasklet_kill(&rtlpriv->works.irq_tasklet);
-	tasklet_kill(&rtlpriv->works.ips_leave_tasklet);
+	cancel_work_sync(&rtlpriv->works.lps_leave_work);
 
 	flush_workqueue(rtlpriv->works.rtl_wq);
 	destroy_workqueue(rtlpriv->works.rtl_wq);
@@ -1553,7 +1555,7 @@
 	set_hal_stop(rtlhal);
 
 	rtlpriv->cfg->ops->disable_interrupt(hw);
-	tasklet_kill(&rtlpriv->works.ips_leave_tasklet);
+	cancel_work_sync(&rtlpriv->works.lps_leave_work);
 
 	spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flags);
 	while (ppsc->rfchange_inprogress) {
diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c
index 55c8e50..130fdd9 100644
--- a/drivers/net/wireless/rtlwifi/ps.c
+++ b/drivers/net/wireless/rtlwifi/ps.c
@@ -237,11 +237,12 @@
 	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
 	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
 	enum rf_pwrstate rtstate;
+	unsigned long flags;
 
 	if (mac->opmode != NL80211_IFTYPE_STATION)
 		return;
 
-	spin_lock(&rtlpriv->locks.ips_lock);
+	spin_lock_irqsave(&rtlpriv->locks.ips_lock, flags);
 
 	if (ppsc->inactiveps) {
 		rtstate = ppsc->rfpwr_state;
@@ -257,7 +258,7 @@
 		}
 	}
 
-	spin_unlock(&rtlpriv->locks.ips_lock);
+	spin_unlock_irqrestore(&rtlpriv->locks.ips_lock, flags);
 }
 
 /*for FW LPS*/
@@ -395,7 +396,7 @@
 	if (mac->link_state != MAC80211_LINKED)
 		return;
 
-	spin_lock_irq(&rtlpriv->locks.lps_lock);
+	mutex_lock(&rtlpriv->locks.ps_mutex);
 
 	/* Idle for a while if we connect to AP a while ago. */
 	if (mac->cnt_after_linked >= 2) {
@@ -407,7 +408,7 @@
 		}
 	}
 
-	spin_unlock_irq(&rtlpriv->locks.lps_lock);
+	mutex_unlock(&rtlpriv->locks.ps_mutex);
 }
 
 /*Leave the leisure power save mode.*/
@@ -416,9 +417,8 @@
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
 	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-	unsigned long flags;
 
-	spin_lock_irqsave(&rtlpriv->locks.lps_lock, flags);
+	mutex_lock(&rtlpriv->locks.ps_mutex);
 
 	if (ppsc->fwctrl_lps) {
 		if (ppsc->dot11_psmode != EACTIVE) {
@@ -439,7 +439,7 @@
 			rtl_lps_set_psmode(hw, EACTIVE);
 		}
 	}
-	spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flags);
+	mutex_unlock(&rtlpriv->locks.ps_mutex);
 }
 
 /* For sw LPS*/
@@ -540,9 +540,9 @@
 		RT_CLEAR_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM);
 	}
 
-	spin_lock_irq(&rtlpriv->locks.lps_lock);
+	mutex_lock(&rtlpriv->locks.ps_mutex);
 	rtl_ps_set_rf_state(hw, ERFON, RF_CHANGE_BY_PS);
-	spin_unlock_irq(&rtlpriv->locks.lps_lock);
+	mutex_unlock(&rtlpriv->locks.ps_mutex);
 }
 
 void rtl_swlps_rfon_wq_callback(void *data)
@@ -575,9 +575,9 @@
 	if (rtlpriv->link_info.busytraffic)
 		return;
 
-	spin_lock_irq(&rtlpriv->locks.lps_lock);
+	mutex_lock(&rtlpriv->locks.ps_mutex);
 	rtl_ps_set_rf_state(hw, ERFSLEEP, RF_CHANGE_BY_PS);
-	spin_unlock_irq(&rtlpriv->locks.lps_lock);
+	mutex_unlock(&rtlpriv->locks.ps_mutex);
 
 	if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM &&
 		!RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM)) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
index 950c65a..931d979 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
@@ -73,6 +73,34 @@
 	}
 }
 
+static void rtl_block_fw_writeN(struct ieee80211_hw *hw, const u8 *buffer,
+				u32 size)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u32 blockSize = REALTEK_USB_VENQT_MAX_BUF_SIZE - 20;
+	u8 *bufferPtr = (u8 *) buffer;
+	u32 i, offset, blockCount, remainSize;
+
+	blockCount = size / blockSize;
+	remainSize = size % blockSize;
+
+	for (i = 0; i < blockCount; i++) {
+		offset = i * blockSize;
+		rtlpriv->io.writeN_sync(rtlpriv,
+					(FW_8192C_START_ADDRESS + offset),
+					(void *)(bufferPtr + offset),
+					blockSize);
+	}
+
+	if (remainSize) {
+		offset = blockCount * blockSize;
+		rtlpriv->io.writeN_sync(rtlpriv,
+					(FW_8192C_START_ADDRESS + offset),
+					(void *)(bufferPtr + offset),
+					remainSize);
+	}
+}
+
 static void _rtl92c_fw_block_write(struct ieee80211_hw *hw,
 				   const u8 *buffer, u32 size)
 {
@@ -81,23 +109,30 @@
 	u8 *bufferPtr = (u8 *) buffer;
 	u32 *pu4BytePtr = (u32 *) buffer;
 	u32 i, offset, blockCount, remainSize;
+	u32 data;
 
+	if (rtlpriv->io.writeN_sync) {
+		rtl_block_fw_writeN(hw, buffer, size);
+		return;
+	}
 	blockCount = size / blockSize;
 	remainSize = size % blockSize;
+	if (remainSize) {
+		/* the last word is < 4 bytes - pad it with zeros */
+		for (i = 0; i < 4 - remainSize; i++)
+			*(bufferPtr + size + i) = 0;
+		blockCount++;
+	}
 
 	for (i = 0; i < blockCount; i++) {
 		offset = i * blockSize;
+		/* for big-endian platforms, the firmware data need to be byte
+		 * swapped as it was read as a byte string and will be written
+		 * as 32-bit dwords and byte swapped when written
+		 */
+		data = le32_to_cpu(*(__le32 *)(pu4BytePtr + i));
 		rtl_write_dword(rtlpriv, (FW_8192C_START_ADDRESS + offset),
-				*(pu4BytePtr + i));
-	}
-
-	if (remainSize) {
-		offset = blockCount * blockSize;
-		bufferPtr += offset;
-		for (i = 0; i < remainSize; i++) {
-			rtl_write_byte(rtlpriv, (FW_8192C_START_ADDRESS +
-						 offset + i), *(bufferPtr + i));
-		}
+				data);
 	}
 }
 
@@ -227,10 +262,10 @@
 	u32 fwsize;
 	enum version_8192c version = rtlhal->version;
 
-	pr_info("Loading firmware file %s\n", rtlpriv->cfg->fw_name);
 	if (!rtlhal->pfirmware)
 		return 1;
 
+	pr_info("Loading firmware file %s\n", rtlpriv->cfg->fw_name);
 	pfwheader = (struct rtl92c_firmware_header *)rtlhal->pfirmware;
 	pfwdata = (u8 *) rtlhal->pfirmware;
 	fwsize = rtlhal->fwsize;
@@ -238,8 +273,9 @@
 	if (IS_FW_HEADER_EXIST(pfwheader)) {
 		RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
 			 ("Firmware Version(%d), Signature(%#x),Size(%d)\n",
-			  pfwheader->version, pfwheader->signature,
-			  (uint)sizeof(struct rtl92c_firmware_header)));
+			 le16_to_cpu(pfwheader->version),
+			 le16_to_cpu(pfwheader->signature),
+			 (uint)sizeof(struct rtl92c_firmware_header)));
 
 		pfwdata = pfwdata + sizeof(struct rtl92c_firmware_header);
 		fwsize = fwsize - sizeof(struct rtl92c_firmware_header);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
index 3d5823c..cec5a3a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
@@ -32,32 +32,32 @@
 
 #define FW_8192C_SIZE				0x3000
 #define FW_8192C_START_ADDRESS			0x1000
-#define FW_8192C_END_ADDRESS			0x3FFF
+#define FW_8192C_END_ADDRESS			0x1FFF
 #define FW_8192C_PAGE_SIZE			4096
 #define FW_8192C_POLLING_DELAY			5
 #define FW_8192C_POLLING_TIMEOUT_COUNT		100
 
 #define IS_FW_HEADER_EXIST(_pfwhdr)	\
-	((_pfwhdr->signature&0xFFF0) == 0x92C0 ||\
-	(_pfwhdr->signature&0xFFF0) == 0x88C0)
+	((le16_to_cpu(_pfwhdr->signature)&0xFFF0) == 0x92C0 ||\
+	(le16_to_cpu(_pfwhdr->signature)&0xFFF0) == 0x88C0)
 
 struct rtl92c_firmware_header {
-	u16 signature;
+	__le16 signature;
 	u8 category;
 	u8 function;
-	u16 version;
+	__le16 version;
 	u8 subversion;
 	u8 rsvd1;
 	u8 month;
 	u8 date;
 	u8 hour;
 	u8 minute;
-	u16 ramcodeSize;
-	u16 rsvd2;
-	u32 svnindex;
-	u32 rsvd3;
-	u32 rsvd4;
-	u32 rsvd5;
+	__le16 ramcodeSize;
+	__le16 rsvd2;
+	__le32 svnindex;
+	__le32 rsvd3;
+	__le32 rsvd4;
+	__le32 rsvd5;
 };
 
 enum rtl8192c_h2c_cmd {
@@ -94,5 +94,6 @@
 void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode);
 void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished);
 void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus);
+void usb_writeN_async(struct rtl_priv *rtlpriv, u32 addr, void *data, u16 len);
 
 #endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
index 592a10a..3b585aa 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
@@ -569,7 +569,7 @@
 		}
 	case ERFSLEEP:{
 			if (ppsc->rfpwr_state == ERFOFF)
-				break;
+				return false;
 			for (queue_id = 0, i = 0;
 			     queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
 				ring = &pcipriv->dev.tx_ring[queue_id];
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
index f2aa33d..89ef698 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
@@ -98,9 +98,9 @@
 
 	rtl8192ce_bt_reg_init(hw);
 
-	rtlpriv->dm.dm_initialgain_enable = 1;
+	rtlpriv->dm.dm_initialgain_enable = true;
 	rtlpriv->dm.dm_flag = 0;
-	rtlpriv->dm.disable_framebursting = 0;
+	rtlpriv->dm.disable_framebursting = false;
 	rtlpriv->dm.thermalvalue = 0;
 	rtlpci->transmit_config = CFENDFORM | BIT(12) | BIT(13);
 
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
index 814c05d..124cf63 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
@@ -498,7 +498,7 @@
 	}
 	RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_LOUD, ("MAP\n"),
 		      hwinfo, HWSET_MAX_SIZE);
-	eeprom_id = *((u16 *)&hwinfo[0]);
+	eeprom_id = le16_to_cpu(*((__le16 *)&hwinfo[0]));
 	if (eeprom_id != RTL8190_EEPROM_ID) {
 		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
 			 ("EEPROM ID(%#x) is invalid!!\n", eeprom_id));
@@ -516,13 +516,14 @@
 	pr_info("MAC address: %pM\n", rtlefuse->dev_addr);
 	_rtl92cu_read_txpower_info_from_hwpg(hw,
 					   rtlefuse->autoload_failflag, hwinfo);
-	rtlefuse->eeprom_vid = *(u16 *)&hwinfo[EEPROM_VID];
-	rtlefuse->eeprom_did = *(u16 *)&hwinfo[EEPROM_DID];
+	rtlefuse->eeprom_vid = le16_to_cpu(*(__le16 *)&hwinfo[EEPROM_VID]);
+	rtlefuse->eeprom_did = le16_to_cpu(*(__le16 *)&hwinfo[EEPROM_DID]);
 	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
 		 (" VID = 0x%02x PID = 0x%02x\n",
 		 rtlefuse->eeprom_vid, rtlefuse->eeprom_did));
 	rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN];
-	rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION];
+	rtlefuse->eeprom_version =
+			 le16_to_cpu(*(__le16 *)&hwinfo[EEPROM_VERSION]);
 	rtlefuse->txpwr_fromeprom = true;
 	rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID];
 	RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
@@ -2435,7 +2436,7 @@
 			 "%x\n", ppsc->hwradiooff, e_rfpowerstate_toset));
 	}
 	if (actuallyset) {
-		ppsc->hwradiooff = 1;
+		ppsc->hwradiooff = true;
 		if (e_rfpowerstate_toset == ERFON) {
 			if ((ppsc->reg_rfps_level  & RT_RF_OFF_LEVL_ASPM) &&
 			     RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM))
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
index 060a06f..9e0c8fc 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
@@ -84,6 +84,7 @@
 		}
 	}
 	rtlhal->version  = (enum version_8192c)chip_version;
+	pr_info("rtl8192cu: Chip version 0x%x\n", chip_version);
 	switch (rtlhal->version) {
 	case VERSION_NORMAL_TSMC_CHIP_92C_1T2R:
 		RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
index 7285290..e49cf22 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
@@ -548,7 +548,7 @@
 		break;
 	case ERFSLEEP:
 		if (ppsc->rfpwr_state == ERFOFF)
-			break;
+			return false;
 		for (queue_id = 0, i = 0;
 		     queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
 			ring = &pcipriv->dev.tx_ring[queue_id];
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index c244f2f..6d2ca77 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -57,9 +57,9 @@
 	const struct firmware *firmware;
 	int err;
 
-	rtlpriv->dm.dm_initialgain_enable = 1;
+	rtlpriv->dm.dm_initialgain_enable = true;
 	rtlpriv->dm.dm_flag = 0;
-	rtlpriv->dm.disable_framebursting = 0;
+	rtlpriv->dm.disable_framebursting = false;
 	rtlpriv->dm.thermalvalue = 0;
 	rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug;
 	rtlpriv->rtlhal.pfirmware = vmalloc(0x4000);
@@ -275,6 +275,8 @@
 	{RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8191, rtl92cu_hal_cfg)},
 
 	/****** 8188CU ********/
+	/* RTL8188CTV */
+	{RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x018a, rtl92cu_hal_cfg)},
 	/* 8188CE-VAU USB minCard */
 	{RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8170, rtl92cu_hal_cfg)},
 	/* 8188cu 1*1 dongle */
@@ -291,14 +293,14 @@
 	{RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817e, rtl92cu_hal_cfg)},
 	/* 8188RU in Alfa AWUS036NHR */
 	{RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817f, rtl92cu_hal_cfg)},
+	/* RTL8188CUS-VL */
+	{RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x818a, rtl92cu_hal_cfg)},
 	/* 8188 Combo for BC4 */
 	{RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8754, rtl92cu_hal_cfg)},
 
 	/****** 8192CU ********/
-	/* 8191cu 1*2 */
-	{RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8177, rtl92cu_hal_cfg)},
 	/* 8192cu 2*2 */
-	{RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817b, rtl92cu_hal_cfg)},
+	{RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8178, rtl92cu_hal_cfg)},
 	/* 8192CE-VAU USB minCard */
 	{RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817c, rtl92cu_hal_cfg)},
 
@@ -309,13 +311,17 @@
 	{RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
 	{RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
 	{RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/
-	{RTL_USB_DEVICE(0x0Df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
+	{RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
+	{RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
 	{RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/
 	/* HP - Lite-On ,8188CUS Slim Combo */
 	{RTL_USB_DEVICE(0x103c, 0x1629, rtl92cu_hal_cfg)},
 	{RTL_USB_DEVICE(0x13d3, 0x3357, rtl92cu_hal_cfg)}, /* AzureWave */
 	{RTL_USB_DEVICE(0x2001, 0x3308, rtl92cu_hal_cfg)}, /*D-Link - Alpha*/
+	{RTL_USB_DEVICE(0x2019, 0x4902, rtl92cu_hal_cfg)}, /*Planex - Etop*/
 	{RTL_USB_DEVICE(0x2019, 0xab2a, rtl92cu_hal_cfg)}, /*Planex - Abocom*/
+	/*SW-WF02-AD15 -Abocom*/
+	{RTL_USB_DEVICE(0x2019, 0xab2e, rtl92cu_hal_cfg)},
 	{RTL_USB_DEVICE(0x2019, 0xed17, rtl92cu_hal_cfg)}, /*PCI - Edimax*/
 	{RTL_USB_DEVICE(0x20f4, 0x648b, rtl92cu_hal_cfg)}, /*TRENDnet - Cameo*/
 	{RTL_USB_DEVICE(0x7392, 0x7811, rtl92cu_hal_cfg)}, /*Edimax - Edimax*/
@@ -326,14 +332,36 @@
 	{RTL_USB_DEVICE(0x4855, 0x0091, rtl92cu_hal_cfg)}, /* NetweeN-Feixun */
 	{RTL_USB_DEVICE(0x9846, 0x9041, rtl92cu_hal_cfg)}, /* Netgear Cameo */
 
+	/****** 8188 RU ********/
+	/* Netcore */
+	{RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x317f, rtl92cu_hal_cfg)},
+
+	/****** 8188CUS Slim Solo********/
+	{RTL_USB_DEVICE(0x04f2, 0xaff7, rtl92cu_hal_cfg)}, /*Xavi*/
+	{RTL_USB_DEVICE(0x04f2, 0xaff9, rtl92cu_hal_cfg)}, /*Xavi*/
+	{RTL_USB_DEVICE(0x04f2, 0xaffa, rtl92cu_hal_cfg)}, /*Xavi*/
+
+	/****** 8188CUS Slim Combo ********/
+	{RTL_USB_DEVICE(0x04f2, 0xaff8, rtl92cu_hal_cfg)}, /*Xavi*/
+	{RTL_USB_DEVICE(0x04f2, 0xaffb, rtl92cu_hal_cfg)}, /*Xavi*/
+	{RTL_USB_DEVICE(0x04f2, 0xaffc, rtl92cu_hal_cfg)}, /*Xavi*/
+	{RTL_USB_DEVICE(0x2019, 0x1201, rtl92cu_hal_cfg)}, /*Planex-Vencer*/
+
 	/****** 8192CU ********/
+	{RTL_USB_DEVICE(0x050d, 0x2102, rtl92cu_hal_cfg)}, /*Belcom-Sercomm*/
+	{RTL_USB_DEVICE(0x050d, 0x2103, rtl92cu_hal_cfg)}, /*Belcom-Edimax*/
 	{RTL_USB_DEVICE(0x0586, 0x341f, rtl92cu_hal_cfg)}, /*Zyxel -Abocom*/
 	{RTL_USB_DEVICE(0x07aa, 0x0056, rtl92cu_hal_cfg)}, /*ATKK-Gemtek*/
 	{RTL_USB_DEVICE(0x07b8, 0x8178, rtl92cu_hal_cfg)}, /*Funai -Abocom*/
+	{RTL_USB_DEVICE(0x0846, 0x9021, rtl92cu_hal_cfg)}, /*Netgear-Sercomm*/
+	{RTL_USB_DEVICE(0x0b05, 0x17ab, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/
+	{RTL_USB_DEVICE(0x0df6, 0x0061, rtl92cu_hal_cfg)}, /*Sitecom-Edimax*/
+	{RTL_USB_DEVICE(0x0e66, 0x0019, rtl92cu_hal_cfg)}, /*Hawking-Edimax*/
 	{RTL_USB_DEVICE(0x2001, 0x3307, rtl92cu_hal_cfg)}, /*D-Link-Cameo*/
 	{RTL_USB_DEVICE(0x2001, 0x3309, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
 	{RTL_USB_DEVICE(0x2001, 0x330a, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
 	{RTL_USB_DEVICE(0x2019, 0xab2b, rtl92cu_hal_cfg)}, /*Planex -Abocom*/
+	{RTL_USB_DEVICE(0x20f4, 0x624d, rtl92cu_hal_cfg)}, /*TRENDNet*/
 	{RTL_USB_DEVICE(0x7392, 0x7822, rtl92cu_hal_cfg)}, /*Edimax -Edimax*/
 	{}
 };
@@ -356,15 +384,4 @@
 #endif
 };
 
-static int __init rtl8192cu_init(void)
-{
-	return usb_register(&rtl8192cu_driver);
-}
-
-static void __exit rtl8192cu_exit(void)
-{
-	usb_deregister(&rtl8192cu_driver);
-}
-
-module_init(rtl8192cu_init);
-module_exit(rtl8192cu_exit);
+module_usb_driver(rtl8192cu_driver);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
index bc33b14..b3cc7b9 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
@@ -491,7 +491,7 @@
 	SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, 0);
 	for (index = 0; index < 16; index++)
 		checksum = checksum ^ (*(ptr + index));
-	SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, checksum);
+	SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, cpu_to_le16(checksum));
 }
 
 void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
index f5bd3a3..9d89d7c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
@@ -466,8 +466,8 @@
 		bool int_migration = *(bool *) (val);
 
 		if (int_migration) {
-			/* Set interrrupt migration timer and
-			 * corresponging Tx/Rx counter.
+			/* Set interrupt migration timer and
+			 * corresponding Tx/Rx counter.
 			 * timer 25ns*0xfa0=100us for 0xf packets.
 			 * 0x306:Rx, 0x307:Tx */
 			rtl_write_dword(rtlpriv, REG_INT_MIG, 0xfe000fa0);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
index 3ac7af1..0883349 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
@@ -3374,7 +3374,7 @@
 		break;
 	case ERFSLEEP:
 		if (ppsc->rfpwr_state == ERFOFF)
-			break;
+			return false;
 
 		for (queue_id = 0, i = 0;
 		     queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
index 149493f..7911c9c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
@@ -99,9 +99,9 @@
 
 	rtlpriv->dm.dm_initialgain_enable = true;
 	rtlpriv->dm.dm_flag = 0;
-	rtlpriv->dm.disable_framebursting = 0;
+	rtlpriv->dm.disable_framebursting = false;
 	rtlpriv->dm.thermalvalue = 0;
-	rtlpriv->dm.useramask = 1;
+	rtlpriv->dm.useramask = true;
 
 	/* dual mac */
 	if (rtlpriv->rtlhal.current_bandtype == BAND_ON_5G)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
index f27171a..f10ac1a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
@@ -602,7 +602,7 @@
 		}
 	case ERFSLEEP:
 			if (ppsc->rfpwr_state == ERFOFF)
-				break;
+				return false;
 
 			for (queue_id = 0, i = 0;
 			     queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
index 92f49d5..78723cf 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
@@ -98,9 +98,9 @@
 	int err = 0;
 	u16 earlyrxthreshold = 7;
 
-	rtlpriv->dm.dm_initialgain_enable = 1;
+	rtlpriv->dm.dm_initialgain_enable = true;
 	rtlpriv->dm.dm_flag = 0;
-	rtlpriv->dm.disable_framebursting = 0;
+	rtlpriv->dm.disable_framebursting = false;
 	rtlpriv->dm.thermalvalue = 0;
 	rtlpriv->dm.useramask = true;
 
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index 54cb8a6..e956fa7 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -34,13 +34,14 @@
 #include "usb.h"
 #include "base.h"
 #include "ps.h"
+#include "rtl8192c/fw_common.h"
 
 #define	REALTEK_USB_VENQT_READ			0xC0
 #define	REALTEK_USB_VENQT_WRITE			0x40
 #define REALTEK_USB_VENQT_CMD_REQ		0x05
 #define	REALTEK_USB_VENQT_CMD_IDX		0x00
 
-#define REALTEK_USB_VENQT_MAX_BUF_SIZE		254
+#define MAX_USBCTRL_VENDORREQ_TIMES		10
 
 static void usbctrl_async_callback(struct urb *urb)
 {
@@ -82,6 +83,7 @@
 	dr->wValue = cpu_to_le16(value);
 	dr->wIndex = cpu_to_le16(index);
 	dr->wLength = cpu_to_le16(len);
+	/* data are already in little-endian order */
 	memcpy(buf, pdata, len);
 	usb_fill_control_urb(urb, udev, pipe,
 			     (unsigned char *)dr, buf, len,
@@ -100,16 +102,28 @@
 	unsigned int pipe;
 	int status;
 	u8 reqtype;
+	int vendorreq_times = 0;
+	static int count;
 
 	pipe = usb_rcvctrlpipe(udev, 0); /* read_in */
 	reqtype =  REALTEK_USB_VENQT_READ;
 
-	status = usb_control_msg(udev, pipe, request, reqtype, value, index,
-				 pdata, len, 0); /* max. timeout */
+	do {
+		status = usb_control_msg(udev, pipe, request, reqtype, value,
+					 index, pdata, len, 0); /*max. timeout*/
+		if (status < 0) {
+			/* firmware download is checksumed, don't retry */
+			if ((value >= FW_8192C_START_ADDRESS &&
+			    value <= FW_8192C_END_ADDRESS))
+				break;
+		} else {
+			break;
+		}
+	} while (++vendorreq_times < MAX_USBCTRL_VENDORREQ_TIMES);
 
-	if (status < 0)
+	if (status < 0 && count++ < 4)
 		pr_err("reg 0x%x, usbctrl_vendorreq TimeOut! status:0x%x value=0x%x\n",
-		       value, status, *(u32 *)pdata);
+		       value, status, le32_to_cpu(*(u32 *)pdata));
 	return status;
 }
 
@@ -129,7 +143,7 @@
 
 	wvalue = (u16)addr;
 	_usbctrl_vendorreq_sync_read(udev, request, wvalue, index, data, len);
-	ret = *data;
+	ret = le32_to_cpu(*data);
 	kfree(data);
 	return ret;
 }
@@ -161,12 +175,12 @@
 	u8 request;
 	u16 wvalue;
 	u16 index;
-	u32 data;
+	__le32 data;
 
 	request = REALTEK_USB_VENQT_CMD_REQ;
 	index = REALTEK_USB_VENQT_CMD_IDX; /* n/a */
 	wvalue = (u16)(addr&0x0000ffff);
-	data = val;
+	data = cpu_to_le32(val);
 	_usbctrl_vendorreq_async_write(udev, request, wvalue, index, &data,
 				       len);
 }
@@ -192,6 +206,30 @@
 	_usb_write_async(to_usb_device(dev), addr, val, 4);
 }
 
+static void _usb_writeN_sync(struct rtl_priv *rtlpriv, u32 addr, void *data,
+			     u16 len)
+{
+	struct device *dev = rtlpriv->io.dev;
+	struct usb_device *udev = to_usb_device(dev);
+	u8 request = REALTEK_USB_VENQT_CMD_REQ;
+	u8 reqtype =  REALTEK_USB_VENQT_WRITE;
+	u16 wvalue;
+	u16 index = REALTEK_USB_VENQT_CMD_IDX;
+	int pipe = usb_sndctrlpipe(udev, 0); /* write_out */
+	u8 *buffer;
+	dma_addr_t dma_addr;
+
+	wvalue = (u16)(addr&0x0000ffff);
+	buffer = usb_alloc_coherent(udev, (size_t)len, GFP_ATOMIC, &dma_addr);
+	if (!buffer)
+		return;
+	memcpy(buffer, data, len);
+	usb_control_msg(udev, pipe, request, reqtype, wvalue,
+			index, buffer, len, 50);
+
+	usb_free_coherent(udev, (size_t)len, buffer, dma_addr);
+}
+
 static void _rtl_usb_io_handler_init(struct device *dev,
 				     struct ieee80211_hw *hw)
 {
@@ -205,6 +243,7 @@
 	rtlpriv->io.read8_sync		= _usb_read8_sync;
 	rtlpriv->io.read16_sync		= _usb_read16_sync;
 	rtlpriv->io.read32_sync		= _usb_read32_sync;
+	rtlpriv->io.writeN_sync		= _usb_writeN_sync;
 }
 
 static void _rtl_usb_io_handler_release(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index 713c7dd..cdaf142 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -63,6 +63,7 @@
 #define AC_MAX					4
 #define QOS_QUEUE_NUM				4
 #define RTL_MAC80211_NUM_QUEUE			5
+#define REALTEK_USB_VENQT_MAX_BUF_SIZE		254
 
 #define QBSS_LOAD_SIZE				5
 #define MAX_WMMELE_LENGTH			64
@@ -943,8 +944,10 @@
 	unsigned long pci_base_addr;	/*device I/O address */
 
 	void (*write8_async) (struct rtl_priv *rtlpriv, u32 addr, u8 val);
-	void (*write16_async) (struct rtl_priv *rtlpriv, u32 addr, __le16 val);
-	void (*write32_async) (struct rtl_priv *rtlpriv, u32 addr, __le32 val);
+	void (*write16_async) (struct rtl_priv *rtlpriv, u32 addr, u16 val);
+	void (*write32_async) (struct rtl_priv *rtlpriv, u32 addr, u32 val);
+	void (*writeN_sync) (struct rtl_priv *rtlpriv, u32 addr, void *buf,
+			     u16 len);
 
 	u8(*read8_sync) (struct rtl_priv *rtlpriv, u32 addr);
 	u16(*read16_sync) (struct rtl_priv *rtlpriv, u32 addr);
@@ -1485,7 +1488,7 @@
 
 struct rtl_mod_params {
 	/* default: 0 = using hardware encryption */
-	int sw_crypto;
+	bool sw_crypto;
 
 	/* default: 0 = DBG_EMERG (0)*/
 	int debug;
@@ -1541,6 +1544,7 @@
 struct rtl_locks {
 	/* mutex */
 	struct mutex conf_mutex;
+	struct mutex ps_mutex;
 
 	/*spin lock */
 	spinlock_t ips_lock;
@@ -1548,7 +1552,6 @@
 	spinlock_t h2c_lock;
 	spinlock_t rf_ps_lock;
 	spinlock_t rf_lock;
-	spinlock_t lps_lock;
 	spinlock_t waitq_lock;
 
 	/*Dual mac*/
@@ -1573,7 +1576,8 @@
 	/* For SW LPS */
 	struct delayed_work ps_work;
 	struct delayed_work ps_rfon_wq;
-	struct tasklet_struct ips_leave_tasklet;
+
+	struct work_struct lps_leave_work;
 };
 
 struct rtl_debug {
diff --git a/drivers/net/wireless/wl1251/spi.c b/drivers/net/wireless/wl1251/spi.c
index eaa5f95..6248c35 100644
--- a/drivers/net/wireless/wl1251/spi.c
+++ b/drivers/net/wireless/wl1251/spi.c
@@ -319,7 +319,6 @@
 static struct spi_driver wl1251_spi_driver = {
 	.driver = {
 		.name		= DRIVER_NAME,
-		.bus		= &spi_bus_type,
 		.owner		= THIS_MODULE,
 	},
 
diff --git a/drivers/net/wireless/wl12xx/Kconfig b/drivers/net/wireless/wl12xx/Kconfig
index 3fe388b..af08c86 100644
--- a/drivers/net/wireless/wl12xx/Kconfig
+++ b/drivers/net/wireless/wl12xx/Kconfig
@@ -42,16 +42,6 @@
 	  If you choose to build a module, it'll be called wl12xx_sdio.
 	  Say N if unsure.
 
-config WL12XX_SDIO_TEST
-	tristate "TI wl12xx SDIO testing support"
-	depends on WL12XX && MMC && WL12XX_SDIO
-	default n
-	---help---
-	  This module adds support for the SDIO bus testing with the
-	  TI wl12xx chipsets.  You probably don't want this unless you are
-	  testing a new hardware platform.  Select this if you want to test the
-	  SDIO bus which is connected to the wl12xx chip.
-
 config WL12XX_PLATFORM_DATA
 	bool
 	depends on WL12XX_SDIO != n || WL1251_SDIO != n
diff --git a/drivers/net/wireless/wl12xx/Makefile b/drivers/net/wireless/wl12xx/Makefile
index 621b348..fe67262 100644
--- a/drivers/net/wireless/wl12xx/Makefile
+++ b/drivers/net/wireless/wl12xx/Makefile
@@ -3,14 +3,11 @@
 
 wl12xx_spi-objs 	= spi.o
 wl12xx_sdio-objs	= sdio.o
-wl12xx_sdio_test-objs	= sdio_test.o
 
 wl12xx-$(CONFIG_NL80211_TESTMODE)	+= testmode.o
 obj-$(CONFIG_WL12XX)			+= wl12xx.o
 obj-$(CONFIG_WL12XX_SPI)		+= wl12xx_spi.o
 obj-$(CONFIG_WL12XX_SDIO)		+= wl12xx_sdio.o
 
-obj-$(CONFIG_WL12XX_SDIO_TEST)		+= wl12xx_sdio_test.o
-
 # small builtin driver bit
 obj-$(CONFIG_WL12XX_PLATFORM_DATA)	+= wl12xx_platform_data.o
diff --git a/drivers/net/wireless/wl12xx/acx.c b/drivers/net/wireless/wl12xx/acx.c
index ca044a7..7537c40 100644
--- a/drivers/net/wireless/wl12xx/acx.c
+++ b/drivers/net/wireless/wl12xx/acx.c
@@ -29,11 +29,12 @@
 #include <linux/slab.h>
 
 #include "wl12xx.h"
+#include "debug.h"
 #include "wl12xx_80211.h"
 #include "reg.h"
 #include "ps.h"
 
-int wl1271_acx_wake_up_conditions(struct wl1271 *wl)
+int wl1271_acx_wake_up_conditions(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
 	struct acx_wake_up_condition *wake_up;
 	int ret;
@@ -46,7 +47,7 @@
 		goto out;
 	}
 
-	wake_up->role_id = wl->role_id;
+	wake_up->role_id = wlvif->role_id;
 	wake_up->wake_up_event = wl->conf.conn.wake_up_event;
 	wake_up->listen_interval = wl->conf.conn.listen_interval;
 
@@ -84,7 +85,8 @@
 	return ret;
 }
 
-int wl1271_acx_tx_power(struct wl1271 *wl, int power)
+int wl1271_acx_tx_power(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+			int power)
 {
 	struct acx_current_tx_power *acx;
 	int ret;
@@ -100,7 +102,7 @@
 		goto out;
 	}
 
-	acx->role_id = wl->role_id;
+	acx->role_id = wlvif->role_id;
 	acx->current_tx_power = power * 10;
 
 	ret = wl1271_cmd_configure(wl, DOT11_CUR_TX_PWR, acx, sizeof(*acx));
@@ -114,7 +116,7 @@
 	return ret;
 }
 
-int wl1271_acx_feature_cfg(struct wl1271 *wl)
+int wl1271_acx_feature_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
 	struct acx_feature_config *feature;
 	int ret;
@@ -128,7 +130,7 @@
 	}
 
 	/* DF_ENCRYPTION_DISABLE and DF_SNIFF_MODE_ENABLE are disabled */
-	feature->role_id = wl->role_id;
+	feature->role_id = wlvif->role_id;
 	feature->data_flow_options = 0;
 	feature->options = 0;
 
@@ -184,33 +186,8 @@
 	return ret;
 }
 
-int wl1271_acx_pd_threshold(struct wl1271 *wl)
-{
-	struct acx_packet_detection *pd;
-	int ret;
-
-	wl1271_debug(DEBUG_ACX, "acx data pd threshold");
-
-	pd = kzalloc(sizeof(*pd), GFP_KERNEL);
-	if (!pd) {
-		ret = -ENOMEM;
-		goto out;
-	}
-
-	pd->threshold = cpu_to_le32(wl->conf.rx.packet_detection_threshold);
-
-	ret = wl1271_cmd_configure(wl, ACX_PD_THRESHOLD, pd, sizeof(*pd));
-	if (ret < 0) {
-		wl1271_warning("failed to set pd threshold: %d", ret);
-		goto out;
-	}
-
-out:
-	kfree(pd);
-	return ret;
-}
-
-int wl1271_acx_slot(struct wl1271 *wl, enum acx_slot_type slot_time)
+int wl1271_acx_slot(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+		    enum acx_slot_type slot_time)
 {
 	struct acx_slot *slot;
 	int ret;
@@ -223,7 +200,7 @@
 		goto out;
 	}
 
-	slot->role_id = wl->role_id;
+	slot->role_id = wlvif->role_id;
 	slot->wone_index = STATION_WONE_INDEX;
 	slot->slot_time = slot_time;
 
@@ -238,8 +215,8 @@
 	return ret;
 }
 
-int wl1271_acx_group_address_tbl(struct wl1271 *wl, bool enable,
-				 void *mc_list, u32 mc_list_len)
+int wl1271_acx_group_address_tbl(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+				 bool enable, void *mc_list, u32 mc_list_len)
 {
 	struct acx_dot11_grp_addr_tbl *acx;
 	int ret;
@@ -253,7 +230,7 @@
 	}
 
 	/* MAC filtering */
-	acx->role_id = wl->role_id;
+	acx->role_id = wlvif->role_id;
 	acx->enabled = enable;
 	acx->num_groups = mc_list_len;
 	memcpy(acx->mac_table, mc_list, mc_list_len * ETH_ALEN);
@@ -270,7 +247,8 @@
 	return ret;
 }
 
-int wl1271_acx_service_period_timeout(struct wl1271 *wl)
+int wl1271_acx_service_period_timeout(struct wl1271 *wl,
+				      struct wl12xx_vif *wlvif)
 {
 	struct acx_rx_timeout *rx_timeout;
 	int ret;
@@ -283,7 +261,7 @@
 
 	wl1271_debug(DEBUG_ACX, "acx service period timeout");
 
-	rx_timeout->role_id = wl->role_id;
+	rx_timeout->role_id = wlvif->role_id;
 	rx_timeout->ps_poll_timeout = cpu_to_le16(wl->conf.rx.ps_poll_timeout);
 	rx_timeout->upsd_timeout = cpu_to_le16(wl->conf.rx.upsd_timeout);
 
@@ -300,7 +278,8 @@
 	return ret;
 }
 
-int wl1271_acx_rts_threshold(struct wl1271 *wl, u32 rts_threshold)
+int wl1271_acx_rts_threshold(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+			     u32 rts_threshold)
 {
 	struct acx_rts_threshold *rts;
 	int ret;
@@ -320,7 +299,7 @@
 		goto out;
 	}
 
-	rts->role_id = wl->role_id;
+	rts->role_id = wlvif->role_id;
 	rts->threshold = cpu_to_le16((u16)rts_threshold);
 
 	ret = wl1271_cmd_configure(wl, DOT11_RTS_THRESHOLD, rts, sizeof(*rts));
@@ -363,7 +342,8 @@
 	return ret;
 }
 
-int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter)
+int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+				 bool enable_filter)
 {
 	struct acx_beacon_filter_option *beacon_filter = NULL;
 	int ret = 0;
@@ -380,7 +360,7 @@
 		goto out;
 	}
 
-	beacon_filter->role_id = wl->role_id;
+	beacon_filter->role_id = wlvif->role_id;
 	beacon_filter->enable = enable_filter;
 
 	/*
@@ -401,7 +381,8 @@
 	return ret;
 }
 
-int wl1271_acx_beacon_filter_table(struct wl1271 *wl)
+int wl1271_acx_beacon_filter_table(struct wl1271 *wl,
+				   struct wl12xx_vif *wlvif)
 {
 	struct acx_beacon_filter_ie_table *ie_table;
 	int i, idx = 0;
@@ -417,7 +398,7 @@
 	}
 
 	/* configure default beacon pass-through rules */
-	ie_table->role_id = wl->role_id;
+	ie_table->role_id = wlvif->role_id;
 	ie_table->num_ie = 0;
 	for (i = 0; i < wl->conf.conn.bcn_filt_ie_count; i++) {
 		struct conf_bcn_filt_rule *r = &(wl->conf.conn.bcn_filt_ie[i]);
@@ -458,7 +439,8 @@
 
 #define ACX_CONN_MONIT_DISABLE_VALUE  0xffffffff
 
-int wl1271_acx_conn_monit_params(struct wl1271 *wl, bool enable)
+int wl1271_acx_conn_monit_params(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+				 bool enable)
 {
 	struct acx_conn_monit_params *acx;
 	u32 threshold = ACX_CONN_MONIT_DISABLE_VALUE;
@@ -479,7 +461,7 @@
 		timeout = wl->conf.conn.bss_lose_timeout;
 	}
 
-	acx->role_id = wl->role_id;
+	acx->role_id = wlvif->role_id;
 	acx->synch_fail_thold = cpu_to_le32(threshold);
 	acx->bss_lose_timeout = cpu_to_le32(timeout);
 
@@ -582,7 +564,7 @@
 	return ret;
 }
 
-int wl1271_acx_bcn_dtim_options(struct wl1271 *wl)
+int wl1271_acx_bcn_dtim_options(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
 	struct acx_beacon_broadcast *bb;
 	int ret;
@@ -595,7 +577,7 @@
 		goto out;
 	}
 
-	bb->role_id = wl->role_id;
+	bb->role_id = wlvif->role_id;
 	bb->beacon_rx_timeout = cpu_to_le16(wl->conf.conn.beacon_rx_timeout);
 	bb->broadcast_timeout = cpu_to_le16(wl->conf.conn.broadcast_timeout);
 	bb->rx_broadcast_in_ps = wl->conf.conn.rx_broadcast_in_ps;
@@ -612,7 +594,7 @@
 	return ret;
 }
 
-int wl1271_acx_aid(struct wl1271 *wl, u16 aid)
+int wl1271_acx_aid(struct wl1271 *wl, struct wl12xx_vif *wlvif, u16 aid)
 {
 	struct acx_aid *acx_aid;
 	int ret;
@@ -625,7 +607,7 @@
 		goto out;
 	}
 
-	acx_aid->role_id = wl->role_id;
+	acx_aid->role_id = wlvif->role_id;
 	acx_aid->aid = cpu_to_le16(aid);
 
 	ret = wl1271_cmd_configure(wl, ACX_AID, acx_aid, sizeof(*acx_aid));
@@ -668,7 +650,8 @@
 	return ret;
 }
 
-int wl1271_acx_set_preamble(struct wl1271 *wl, enum acx_preamble_type preamble)
+int wl1271_acx_set_preamble(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+			    enum acx_preamble_type preamble)
 {
 	struct acx_preamble *acx;
 	int ret;
@@ -681,7 +664,7 @@
 		goto out;
 	}
 
-	acx->role_id = wl->role_id;
+	acx->role_id = wlvif->role_id;
 	acx->preamble = preamble;
 
 	ret = wl1271_cmd_configure(wl, ACX_PREAMBLE_TYPE, acx, sizeof(*acx));
@@ -695,7 +678,7 @@
 	return ret;
 }
 
-int wl1271_acx_cts_protect(struct wl1271 *wl,
+int wl1271_acx_cts_protect(struct wl1271 *wl, struct wl12xx_vif *wlvif,
 			   enum acx_ctsprotect_type ctsprotect)
 {
 	struct acx_ctsprotect *acx;
@@ -709,7 +692,7 @@
 		goto out;
 	}
 
-	acx->role_id = wl->role_id;
+	acx->role_id = wlvif->role_id;
 	acx->ctsprotect = ctsprotect;
 
 	ret = wl1271_cmd_configure(wl, ACX_CTS_PROTECTION, acx, sizeof(*acx));
@@ -739,7 +722,7 @@
 	return 0;
 }
 
-int wl1271_acx_sta_rate_policies(struct wl1271 *wl)
+int wl1271_acx_sta_rate_policies(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
 	struct acx_rate_policy *acx;
 	struct conf_tx_rate_class *c = &wl->conf.tx.sta_rc_conf;
@@ -755,11 +738,11 @@
 	}
 
 	wl1271_debug(DEBUG_ACX, "basic_rate: 0x%x, full_rate: 0x%x",
-		wl->basic_rate, wl->rate_set);
+		wlvif->basic_rate, wlvif->rate_set);
 
 	/* configure one basic rate class */
-	acx->rate_policy_idx = cpu_to_le32(ACX_TX_BASIC_RATE);
-	acx->rate_policy.enabled_rates = cpu_to_le32(wl->basic_rate);
+	acx->rate_policy_idx = cpu_to_le32(wlvif->sta.basic_rate_idx);
+	acx->rate_policy.enabled_rates = cpu_to_le32(wlvif->basic_rate);
 	acx->rate_policy.short_retry_limit = c->short_retry_limit;
 	acx->rate_policy.long_retry_limit = c->long_retry_limit;
 	acx->rate_policy.aflags = c->aflags;
@@ -771,8 +754,8 @@
 	}
 
 	/* configure one AP supported rate class */
-	acx->rate_policy_idx = cpu_to_le32(ACX_TX_AP_FULL_RATE);
-	acx->rate_policy.enabled_rates = cpu_to_le32(wl->rate_set);
+	acx->rate_policy_idx = cpu_to_le32(wlvif->sta.ap_rate_idx);
+	acx->rate_policy.enabled_rates = cpu_to_le32(wlvif->rate_set);
 	acx->rate_policy.short_retry_limit = c->short_retry_limit;
 	acx->rate_policy.long_retry_limit = c->long_retry_limit;
 	acx->rate_policy.aflags = c->aflags;
@@ -788,7 +771,7 @@
 	 * (p2p packets should always go out with OFDM rates, even
 	 * if we are currently connected to 11b AP)
 	 */
-	acx->rate_policy_idx = cpu_to_le32(ACX_TX_BASIC_RATE_P2P);
+	acx->rate_policy_idx = cpu_to_le32(wlvif->sta.p2p_rate_idx);
 	acx->rate_policy.enabled_rates =
 				cpu_to_le32(CONF_TX_RATE_MASK_BASIC_P2P);
 	acx->rate_policy.short_retry_limit = c->short_retry_limit;
@@ -839,8 +822,8 @@
 	return ret;
 }
 
-int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max,
-		      u8 aifsn, u16 txop)
+int wl1271_acx_ac_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+		      u8 ac, u8 cw_min, u16 cw_max, u8 aifsn, u16 txop)
 {
 	struct acx_ac_cfg *acx;
 	int ret = 0;
@@ -855,7 +838,7 @@
 		goto out;
 	}
 
-	acx->role_id = wl->role_id;
+	acx->role_id = wlvif->role_id;
 	acx->ac = ac;
 	acx->cw_min = cw_min;
 	acx->cw_max = cpu_to_le16(cw_max);
@@ -873,7 +856,8 @@
 	return ret;
 }
 
-int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type,
+int wl1271_acx_tid_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+		       u8 queue_id, u8 channel_type,
 		       u8 tsid, u8 ps_scheme, u8 ack_policy,
 		       u32 apsd_conf0, u32 apsd_conf1)
 {
@@ -889,7 +873,7 @@
 		goto out;
 	}
 
-	acx->role_id = wl->role_id;
+	acx->role_id = wlvif->role_id;
 	acx->queue_id = queue_id;
 	acx->channel_type = channel_type;
 	acx->tsid = tsid;
@@ -1098,7 +1082,8 @@
 	return ret;
 }
 
-int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable)
+int wl1271_acx_bet_enable(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+			  bool enable)
 {
 	struct wl1271_acx_bet_enable *acx = NULL;
 	int ret = 0;
@@ -1114,7 +1099,7 @@
 		goto out;
 	}
 
-	acx->role_id = wl->role_id;
+	acx->role_id = wlvif->role_id;
 	acx->enable = enable ? CONF_BET_MODE_ENABLE : CONF_BET_MODE_DISABLE;
 	acx->max_consecutive = wl->conf.conn.bet_max_consecutive;
 
@@ -1129,7 +1114,8 @@
 	return ret;
 }
 
-int wl1271_acx_arp_ip_filter(struct wl1271 *wl, u8 enable, __be32 address)
+int wl1271_acx_arp_ip_filter(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+			     u8 enable, __be32 address)
 {
 	struct wl1271_acx_arp_filter *acx;
 	int ret;
@@ -1142,7 +1128,7 @@
 		goto out;
 	}
 
-	acx->role_id = wl->role_id;
+	acx->role_id = wlvif->role_id;
 	acx->version = ACX_IPV4_VERSION;
 	acx->enable = enable;
 
@@ -1189,7 +1175,8 @@
 	return ret;
 }
 
-int wl1271_acx_keep_alive_mode(struct wl1271 *wl, bool enable)
+int wl1271_acx_keep_alive_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+			       bool enable)
 {
 	struct wl1271_acx_keep_alive_mode *acx = NULL;
 	int ret = 0;
@@ -1202,7 +1189,7 @@
 		goto out;
 	}
 
-	acx->role_id = wl->role_id;
+	acx->role_id = wlvif->role_id;
 	acx->enabled = enable;
 
 	ret = wl1271_cmd_configure(wl, ACX_KEEP_ALIVE_MODE, acx, sizeof(*acx));
@@ -1216,7 +1203,8 @@
 	return ret;
 }
 
-int wl1271_acx_keep_alive_config(struct wl1271 *wl, u8 index, u8 tpl_valid)
+int wl1271_acx_keep_alive_config(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+				 u8 index, u8 tpl_valid)
 {
 	struct wl1271_acx_keep_alive_config *acx = NULL;
 	int ret = 0;
@@ -1229,7 +1217,7 @@
 		goto out;
 	}
 
-	acx->role_id = wl->role_id;
+	acx->role_id = wlvif->role_id;
 	acx->period = cpu_to_le32(wl->conf.conn.keep_alive_interval);
 	acx->index = index;
 	acx->tpl_validation = tpl_valid;
@@ -1247,8 +1235,8 @@
 	return ret;
 }
 
-int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, bool enable,
-				s16 thold, u8 hyst)
+int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+				bool enable, s16 thold, u8 hyst)
 {
 	struct wl1271_acx_rssi_snr_trigger *acx = NULL;
 	int ret = 0;
@@ -1261,9 +1249,9 @@
 		goto out;
 	}
 
-	wl->last_rssi_event = -1;
+	wlvif->last_rssi_event = -1;
 
-	acx->role_id = wl->role_id;
+	acx->role_id = wlvif->role_id;
 	acx->pacing = cpu_to_le16(wl->conf.roam_trigger.trigger_pacing);
 	acx->metric = WL1271_ACX_TRIG_METRIC_RSSI_BEACON;
 	acx->type = WL1271_ACX_TRIG_TYPE_EDGE;
@@ -1288,7 +1276,8 @@
 	return ret;
 }
 
-int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl)
+int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl,
+				    struct wl12xx_vif *wlvif)
 {
 	struct wl1271_acx_rssi_snr_avg_weights *acx = NULL;
 	struct conf_roam_trigger_settings *c = &wl->conf.roam_trigger;
@@ -1302,7 +1291,7 @@
 		goto out;
 	}
 
-	acx->role_id = wl->role_id;
+	acx->role_id = wlvif->role_id;
 	acx->rssi_beacon = c->avg_weight_rssi_beacon;
 	acx->rssi_data = c->avg_weight_rssi_data;
 	acx->snr_beacon = c->avg_weight_snr_beacon;
@@ -1367,6 +1356,7 @@
 }
 
 int wl1271_acx_set_ht_information(struct wl1271 *wl,
+				   struct wl12xx_vif *wlvif,
 				   u16 ht_operation_mode)
 {
 	struct wl1271_acx_ht_information *acx;
@@ -1380,7 +1370,7 @@
 		goto out;
 	}
 
-	acx->role_id = wl->role_id;
+	acx->role_id = wlvif->role_id;
 	acx->ht_protection =
 		(u8)(ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION);
 	acx->rifs_mode = 0;
@@ -1402,7 +1392,8 @@
 }
 
 /* Configure BA session initiator/receiver parameters setting in the FW. */
-int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl)
+int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl,
+				       struct wl12xx_vif *wlvif)
 {
 	struct wl1271_acx_ba_initiator_policy *acx;
 	int ret;
@@ -1416,7 +1407,7 @@
 	}
 
 	/* set for the current role */
-	acx->role_id = wl->role_id;
+	acx->role_id = wlvif->role_id;
 	acx->tid_bitmap = wl->conf.ht.tx_ba_tid_bitmap;
 	acx->win_size = wl->conf.ht.tx_ba_win_size;
 	acx->inactivity_timeout = wl->conf.ht.inactivity_timeout;
@@ -1494,7 +1485,8 @@
 	return ret;
 }
 
-int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, bool enable)
+int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+			       bool enable)
 {
 	struct wl1271_acx_ps_rx_streaming *rx_streaming;
 	u32 conf_queues, enable_queues;
@@ -1523,7 +1515,7 @@
 		if (!(conf_queues & BIT(i)))
 			continue;
 
-		rx_streaming->role_id = wl->role_id;
+		rx_streaming->role_id = wlvif->role_id;
 		rx_streaming->tid = i;
 		rx_streaming->enable = enable_queues & BIT(i);
 		rx_streaming->period = wl->conf.rx_streaming.interval;
@@ -1542,7 +1534,7 @@
 	return ret;
 }
 
-int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl)
+int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
 	struct wl1271_acx_ap_max_tx_retry *acx = NULL;
 	int ret;
@@ -1553,7 +1545,7 @@
 	if (!acx)
 		return -ENOMEM;
 
-	acx->role_id = wl->role_id;
+	acx->role_id = wlvif->role_id;
 	acx->max_tx_retry = cpu_to_le16(wl->conf.tx.max_tx_retries);
 
 	ret = wl1271_cmd_configure(wl, ACX_MAX_TX_FAILURE, acx, sizeof(*acx));
@@ -1567,7 +1559,7 @@
 	return ret;
 }
 
-int wl1271_acx_config_ps(struct wl1271 *wl)
+int wl12xx_acx_config_ps(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
 	struct wl1271_acx_config_ps *config_ps;
 	int ret;
@@ -1582,7 +1574,7 @@
 
 	config_ps->exit_retries = wl->conf.conn.psm_exit_retries;
 	config_ps->enter_retries = wl->conf.conn.psm_entry_retries;
-	config_ps->null_data_rate = cpu_to_le32(wl->basic_rate);
+	config_ps->null_data_rate = cpu_to_le32(wlvif->basic_rate);
 
 	ret = wl1271_cmd_configure(wl, ACX_CONFIG_PS, config_ps,
 				   sizeof(*config_ps));
diff --git a/drivers/net/wireless/wl12xx/acx.h b/drivers/net/wireless/wl12xx/acx.h
index e3f93b4..69892b4 100644
--- a/drivers/net/wireless/wl12xx/acx.h
+++ b/drivers/net/wireless/wl12xx/acx.h
@@ -171,13 +171,6 @@
 	__le32 lifetime;
 } __packed;
 
-struct acx_packet_detection {
-	struct acx_header header;
-
-	__le32 threshold;
-} __packed;
-
-
 enum acx_slot_type {
 	SLOT_TIME_LONG = 0,
 	SLOT_TIME_SHORT = 1,
@@ -654,11 +647,6 @@
 	u8 reserved;
 };
 
-#define ACX_TX_BASIC_RATE      0
-#define ACX_TX_AP_FULL_RATE    1
-#define ACX_TX_BASIC_RATE_P2P  2
-#define ACX_TX_AP_MODE_MGMT_RATE 4
-#define ACX_TX_AP_MODE_BCST_RATE 5
 struct acx_rate_policy {
 	struct acx_header header;
 
@@ -1234,39 +1222,48 @@
 };
 
 
-int wl1271_acx_wake_up_conditions(struct wl1271 *wl);
+int wl1271_acx_wake_up_conditions(struct wl1271 *wl,
+				  struct wl12xx_vif *wlvif);
 int wl1271_acx_sleep_auth(struct wl1271 *wl, u8 sleep_auth);
-int wl1271_acx_tx_power(struct wl1271 *wl, int power);
-int wl1271_acx_feature_cfg(struct wl1271 *wl);
+int wl1271_acx_tx_power(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+			int power);
+int wl1271_acx_feature_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif);
 int wl1271_acx_mem_map(struct wl1271 *wl,
 		       struct acx_header *mem_map, size_t len);
 int wl1271_acx_rx_msdu_life_time(struct wl1271 *wl);
-int wl1271_acx_pd_threshold(struct wl1271 *wl);
-int wl1271_acx_slot(struct wl1271 *wl, enum acx_slot_type slot_time);
-int wl1271_acx_group_address_tbl(struct wl1271 *wl, bool enable,
-				 void *mc_list, u32 mc_list_len);
-int wl1271_acx_service_period_timeout(struct wl1271 *wl);
-int wl1271_acx_rts_threshold(struct wl1271 *wl, u32 rts_threshold);
+int wl1271_acx_slot(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+		    enum acx_slot_type slot_time);
+int wl1271_acx_group_address_tbl(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+				 bool enable, void *mc_list, u32 mc_list_len);
+int wl1271_acx_service_period_timeout(struct wl1271 *wl,
+				      struct wl12xx_vif *wlvif);
+int wl1271_acx_rts_threshold(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+			     u32 rts_threshold);
 int wl1271_acx_dco_itrim_params(struct wl1271 *wl);
-int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter);
-int wl1271_acx_beacon_filter_table(struct wl1271 *wl);
-int wl1271_acx_conn_monit_params(struct wl1271 *wl, bool enable);
+int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+				 bool enable_filter);
+int wl1271_acx_beacon_filter_table(struct wl1271 *wl,
+				   struct wl12xx_vif *wlvif);
+int wl1271_acx_conn_monit_params(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+				 bool enable);
 int wl1271_acx_sg_enable(struct wl1271 *wl, bool enable);
 int wl12xx_acx_sg_cfg(struct wl1271 *wl);
 int wl1271_acx_cca_threshold(struct wl1271 *wl);
-int wl1271_acx_bcn_dtim_options(struct wl1271 *wl);
-int wl1271_acx_aid(struct wl1271 *wl, u16 aid);
+int wl1271_acx_bcn_dtim_options(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl1271_acx_aid(struct wl1271 *wl, struct wl12xx_vif *wlvif, u16 aid);
 int wl1271_acx_event_mbox_mask(struct wl1271 *wl, u32 event_mask);
-int wl1271_acx_set_preamble(struct wl1271 *wl, enum acx_preamble_type preamble);
-int wl1271_acx_cts_protect(struct wl1271 *wl,
+int wl1271_acx_set_preamble(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+			    enum acx_preamble_type preamble);
+int wl1271_acx_cts_protect(struct wl1271 *wl, struct wl12xx_vif *wlvif,
 			   enum acx_ctsprotect_type ctsprotect);
 int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats);
-int wl1271_acx_sta_rate_policies(struct wl1271 *wl);
+int wl1271_acx_sta_rate_policies(struct wl1271 *wl, struct wl12xx_vif *wlvif);
 int wl1271_acx_ap_rate_policy(struct wl1271 *wl, struct conf_tx_rate_class *c,
 		      u8 idx);
-int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max,
-		      u8 aifsn, u16 txop);
-int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type,
+int wl1271_acx_ac_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+		      u8 ac, u8 cw_min, u16 cw_max, u8 aifsn, u16 txop);
+int wl1271_acx_tid_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+		       u8 queue_id, u8 channel_type,
 		       u8 tsid, u8 ps_scheme, u8 ack_policy,
 		       u32 apsd_conf0, u32 apsd_conf1);
 int wl1271_acx_frag_threshold(struct wl1271 *wl, u32 frag_threshold);
@@ -1276,26 +1273,34 @@
 int wl1271_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap);
 int wl1271_acx_init_rx_interrupt(struct wl1271 *wl);
 int wl1271_acx_smart_reflex(struct wl1271 *wl);
-int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable);
-int wl1271_acx_arp_ip_filter(struct wl1271 *wl, u8 enable, __be32 address);
+int wl1271_acx_bet_enable(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+			  bool enable);
+int wl1271_acx_arp_ip_filter(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+			     u8 enable, __be32 address);
 int wl1271_acx_pm_config(struct wl1271 *wl);
-int wl1271_acx_keep_alive_mode(struct wl1271 *wl, bool enable);
-int wl1271_acx_keep_alive_config(struct wl1271 *wl, u8 index, u8 tpl_valid);
-int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, bool enable,
-				s16 thold, u8 hyst);
-int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl);
+int wl1271_acx_keep_alive_mode(struct wl1271 *wl, struct wl12xx_vif *vif,
+			       bool enable);
+int wl1271_acx_keep_alive_config(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+				 u8 index, u8 tpl_valid);
+int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+				bool enable, s16 thold, u8 hyst);
+int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl,
+				    struct wl12xx_vif *wlvif);
 int wl1271_acx_set_ht_capabilities(struct wl1271 *wl,
 				    struct ieee80211_sta_ht_cap *ht_cap,
 				    bool allow_ht_operation, u8 hlid);
 int wl1271_acx_set_ht_information(struct wl1271 *wl,
+				   struct wl12xx_vif *wlvif,
 				   u16 ht_operation_mode);
-int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl);
+int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl,
+				       struct wl12xx_vif *wlvif);
 int wl12xx_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index,
 				       u16 ssn, bool enable, u8 peer_hlid);
 int wl1271_acx_tsf_info(struct wl1271 *wl, u64 *mactime);
-int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, bool enable);
-int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl);
-int wl1271_acx_config_ps(struct wl1271 *wl);
+int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+			       bool enable);
+int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl12xx_acx_config_ps(struct wl1271 *wl, struct wl12xx_vif *wlvif);
 int wl1271_acx_set_inconnection_sta(struct wl1271 *wl, u8 *addr);
 int wl1271_acx_fm_coex(struct wl1271 *wl);
 int wl12xx_acx_set_rate_mgmt_params(struct wl1271 *wl);
diff --git a/drivers/net/wireless/wl12xx/boot.c b/drivers/net/wireless/wl12xx/boot.c
index 6813379..8f9cf5a 100644
--- a/drivers/net/wireless/wl12xx/boot.c
+++ b/drivers/net/wireless/wl12xx/boot.c
@@ -25,6 +25,7 @@
 #include <linux/wl12xx.h>
 #include <linux/export.h>
 
+#include "debug.h"
 #include "acx.h"
 #include "reg.h"
 #include "boot.h"
@@ -347,6 +348,9 @@
 		nvs_ptr += 3;
 
 		for (i = 0; i < burst_len; i++) {
+			if (nvs_ptr + 3 >= (u8 *) wl->nvs + nvs_len)
+				goto out_badnvs;
+
 			val = (nvs_ptr[0] | (nvs_ptr[1] << 8)
 			       | (nvs_ptr[2] << 16) | (nvs_ptr[3] << 24));
 
@@ -358,6 +362,9 @@
 			nvs_ptr += 4;
 			dest_addr += 4;
 		}
+
+		if (nvs_ptr >= (u8 *) wl->nvs + nvs_len)
+			goto out_badnvs;
 	}
 
 	/*
@@ -369,6 +376,10 @@
 	 */
 	nvs_ptr = (u8 *)wl->nvs +
 			ALIGN(nvs_ptr - (u8 *)wl->nvs + 7, 4);
+
+	if (nvs_ptr >= (u8 *) wl->nvs + nvs_len)
+		goto out_badnvs;
+
 	nvs_len -= nvs_ptr - (u8 *)wl->nvs;
 
 	/* Now we must set the partition correctly */
@@ -384,6 +395,10 @@
 
 	kfree(nvs_aligned);
 	return 0;
+
+out_badnvs:
+	wl1271_error("nvs data is malformed");
+	return -EILSEQ;
 }
 
 static void wl1271_boot_enable_interrupts(struct wl1271 *wl)
diff --git a/drivers/net/wireless/wl12xx/cmd.c b/drivers/net/wireless/wl12xx/cmd.c
index a52299e..25990bd 100644
--- a/drivers/net/wireless/wl12xx/cmd.c
+++ b/drivers/net/wireless/wl12xx/cmd.c
@@ -29,6 +29,7 @@
 #include <linux/slab.h>
 
 #include "wl12xx.h"
+#include "debug.h"
 #include "reg.h"
 #include "io.h"
 #include "acx.h"
@@ -120,6 +121,11 @@
 	if (!wl->nvs)
 		return -ENODEV;
 
+	if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
+		wl1271_warning("FEM index from INI out of bounds");
+		return -EINVAL;
+	}
+
 	gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL);
 	if (!gen_parms)
 		return -ENOMEM;
@@ -143,6 +149,12 @@
 	gp->tx_bip_fem_manufacturer =
 		gen_parms->general_params.tx_bip_fem_manufacturer;
 
+	if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
+		wl1271_warning("FEM index from FW out of bounds");
+		ret = -EINVAL;
+		goto out;
+	}
+
 	wl1271_debug(DEBUG_CMD, "FEM autodetect: %s, manufacturer: %d\n",
 		     answer ? "auto" : "manual", gp->tx_bip_fem_manufacturer);
 
@@ -162,6 +174,11 @@
 	if (!wl->nvs)
 		return -ENODEV;
 
+	if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
+		wl1271_warning("FEM index from ini out of bounds");
+		return -EINVAL;
+	}
+
 	gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL);
 	if (!gen_parms)
 		return -ENOMEM;
@@ -186,6 +203,12 @@
 	gp->tx_bip_fem_manufacturer =
 		gen_parms->general_params.tx_bip_fem_manufacturer;
 
+	if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
+		wl1271_warning("FEM index from FW out of bounds");
+		ret = -EINVAL;
+		goto out;
+	}
+
 	wl1271_debug(DEBUG_CMD, "FEM autodetect: %s, manufacturer: %d\n",
 		     answer ? "auto" : "manual", gp->tx_bip_fem_manufacturer);
 
@@ -358,7 +381,8 @@
 	return 0;
 }
 
-int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 role_type, u8 *role_id)
+int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 *addr, u8 role_type,
+			   u8 *role_id)
 {
 	struct wl12xx_cmd_role_enable *cmd;
 	int ret;
@@ -381,7 +405,7 @@
 		goto out_free;
 	}
 
-	memcpy(cmd->mac_address, wl->mac_addr, ETH_ALEN);
+	memcpy(cmd->mac_address, addr, ETH_ALEN);
 	cmd->role_type = role_type;
 
 	ret = wl1271_cmd_send(wl, CMD_ROLE_ENABLE, cmd, sizeof(*cmd), 0);
@@ -433,37 +457,41 @@
 	return ret;
 }
 
-static int wl12xx_allocate_link(struct wl1271 *wl, u8 *hlid)
+int wl12xx_allocate_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid)
 {
 	u8 link = find_first_zero_bit(wl->links_map, WL12XX_MAX_LINKS);
 	if (link >= WL12XX_MAX_LINKS)
 		return -EBUSY;
 
 	__set_bit(link, wl->links_map);
+	__set_bit(link, wlvif->links_map);
 	*hlid = link;
 	return 0;
 }
 
-static void wl12xx_free_link(struct wl1271 *wl, u8 *hlid)
+void wl12xx_free_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid)
 {
 	if (*hlid == WL12XX_INVALID_LINK_ID)
 		return;
 
 	__clear_bit(*hlid, wl->links_map);
+	__clear_bit(*hlid, wlvif->links_map);
 	*hlid = WL12XX_INVALID_LINK_ID;
 }
 
-static int wl12xx_get_new_session_id(struct wl1271 *wl)
+static int wl12xx_get_new_session_id(struct wl1271 *wl,
+				     struct wl12xx_vif *wlvif)
 {
-	if (wl->session_counter >= SESSION_COUNTER_MAX)
-		wl->session_counter = 0;
+	if (wlvif->session_counter >= SESSION_COUNTER_MAX)
+		wlvif->session_counter = 0;
 
-	wl->session_counter++;
+	wlvif->session_counter++;
 
-	return wl->session_counter;
+	return wlvif->session_counter;
 }
 
-int wl12xx_cmd_role_start_dev(struct wl1271 *wl)
+static int wl12xx_cmd_role_start_dev(struct wl1271 *wl,
+				     struct wl12xx_vif *wlvif)
 {
 	struct wl12xx_cmd_role_start *cmd;
 	int ret;
@@ -474,20 +502,20 @@
 		goto out;
 	}
 
-	wl1271_debug(DEBUG_CMD, "cmd role start dev %d", wl->dev_role_id);
+	wl1271_debug(DEBUG_CMD, "cmd role start dev %d", wlvif->dev_role_id);
 
-	cmd->role_id = wl->dev_role_id;
-	if (wl->band == IEEE80211_BAND_5GHZ)
+	cmd->role_id = wlvif->dev_role_id;
+	if (wlvif->band == IEEE80211_BAND_5GHZ)
 		cmd->band = WL12XX_BAND_5GHZ;
-	cmd->channel = wl->channel;
+	cmd->channel = wlvif->channel;
 
-	if (wl->dev_hlid == WL12XX_INVALID_LINK_ID) {
-		ret = wl12xx_allocate_link(wl, &wl->dev_hlid);
+	if (wlvif->dev_hlid == WL12XX_INVALID_LINK_ID) {
+		ret = wl12xx_allocate_link(wl, wlvif, &wlvif->dev_hlid);
 		if (ret)
 			goto out_free;
 	}
-	cmd->device.hlid = wl->dev_hlid;
-	cmd->device.session = wl->session_counter;
+	cmd->device.hlid = wlvif->dev_hlid;
+	cmd->device.session = wlvif->session_counter;
 
 	wl1271_debug(DEBUG_CMD, "role start: roleid=%d, hlid=%d, session=%d",
 		     cmd->role_id, cmd->device.hlid, cmd->device.session);
@@ -502,9 +530,7 @@
 
 err_hlid:
 	/* clear links on error */
-	__clear_bit(wl->dev_hlid, wl->links_map);
-	wl->dev_hlid = WL12XX_INVALID_LINK_ID;
-
+	wl12xx_free_link(wl, wlvif, &wlvif->dev_hlid);
 
 out_free:
 	kfree(cmd);
@@ -513,12 +539,13 @@
 	return ret;
 }
 
-int wl12xx_cmd_role_stop_dev(struct wl1271 *wl)
+static int wl12xx_cmd_role_stop_dev(struct wl1271 *wl,
+				    struct wl12xx_vif *wlvif)
 {
 	struct wl12xx_cmd_role_stop *cmd;
 	int ret;
 
-	if (WARN_ON(wl->dev_hlid == WL12XX_INVALID_LINK_ID))
+	if (WARN_ON(wlvif->dev_hlid == WL12XX_INVALID_LINK_ID))
 		return -EINVAL;
 
 	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -529,7 +556,7 @@
 
 	wl1271_debug(DEBUG_CMD, "cmd role stop dev");
 
-	cmd->role_id = wl->dev_role_id;
+	cmd->role_id = wlvif->dev_role_id;
 	cmd->disc_type = DISCONNECT_IMMEDIATE;
 	cmd->reason = cpu_to_le16(WLAN_REASON_UNSPECIFIED);
 
@@ -545,7 +572,7 @@
 		goto out_free;
 	}
 
-	wl12xx_free_link(wl, &wl->dev_hlid);
+	wl12xx_free_link(wl, wlvif, &wlvif->dev_hlid);
 
 out_free:
 	kfree(cmd);
@@ -554,8 +581,9 @@
 	return ret;
 }
 
-int wl12xx_cmd_role_start_sta(struct wl1271 *wl)
+int wl12xx_cmd_role_start_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
+	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
 	struct wl12xx_cmd_role_start *cmd;
 	int ret;
 
@@ -565,33 +593,33 @@
 		goto out;
 	}
 
-	wl1271_debug(DEBUG_CMD, "cmd role start sta %d", wl->role_id);
+	wl1271_debug(DEBUG_CMD, "cmd role start sta %d", wlvif->role_id);
 
-	cmd->role_id = wl->role_id;
-	if (wl->band == IEEE80211_BAND_5GHZ)
+	cmd->role_id = wlvif->role_id;
+	if (wlvif->band == IEEE80211_BAND_5GHZ)
 		cmd->band = WL12XX_BAND_5GHZ;
-	cmd->channel = wl->channel;
-	cmd->sta.basic_rate_set = cpu_to_le32(wl->basic_rate_set);
-	cmd->sta.beacon_interval = cpu_to_le16(wl->beacon_int);
+	cmd->channel = wlvif->channel;
+	cmd->sta.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set);
+	cmd->sta.beacon_interval = cpu_to_le16(wlvif->beacon_int);
 	cmd->sta.ssid_type = WL12XX_SSID_TYPE_ANY;
-	cmd->sta.ssid_len = wl->ssid_len;
-	memcpy(cmd->sta.ssid, wl->ssid, wl->ssid_len);
-	memcpy(cmd->sta.bssid, wl->bssid, ETH_ALEN);
-	cmd->sta.local_rates = cpu_to_le32(wl->rate_set);
+	cmd->sta.ssid_len = wlvif->ssid_len;
+	memcpy(cmd->sta.ssid, wlvif->ssid, wlvif->ssid_len);
+	memcpy(cmd->sta.bssid, vif->bss_conf.bssid, ETH_ALEN);
+	cmd->sta.local_rates = cpu_to_le32(wlvif->rate_set);
 
-	if (wl->sta_hlid == WL12XX_INVALID_LINK_ID) {
-		ret = wl12xx_allocate_link(wl, &wl->sta_hlid);
+	if (wlvif->sta.hlid == WL12XX_INVALID_LINK_ID) {
+		ret = wl12xx_allocate_link(wl, wlvif, &wlvif->sta.hlid);
 		if (ret)
 			goto out_free;
 	}
-	cmd->sta.hlid = wl->sta_hlid;
-	cmd->sta.session = wl12xx_get_new_session_id(wl);
-	cmd->sta.remote_rates = cpu_to_le32(wl->rate_set);
+	cmd->sta.hlid = wlvif->sta.hlid;
+	cmd->sta.session = wl12xx_get_new_session_id(wl, wlvif);
+	cmd->sta.remote_rates = cpu_to_le32(wlvif->rate_set);
 
 	wl1271_debug(DEBUG_CMD, "role start: roleid=%d, hlid=%d, session=%d "
 		     "basic_rate_set: 0x%x, remote_rates: 0x%x",
-		     wl->role_id, cmd->sta.hlid, cmd->sta.session,
-		     wl->basic_rate_set, wl->rate_set);
+		     wlvif->role_id, cmd->sta.hlid, cmd->sta.session,
+		     wlvif->basic_rate_set, wlvif->rate_set);
 
 	ret = wl1271_cmd_send(wl, CMD_ROLE_START, cmd, sizeof(*cmd), 0);
 	if (ret < 0) {
@@ -603,7 +631,7 @@
 
 err_hlid:
 	/* clear links on error. */
-	wl12xx_free_link(wl, &wl->sta_hlid);
+	wl12xx_free_link(wl, wlvif, &wlvif->sta.hlid);
 
 out_free:
 	kfree(cmd);
@@ -613,12 +641,12 @@
 }
 
 /* use this function to stop ibss as well */
-int wl12xx_cmd_role_stop_sta(struct wl1271 *wl)
+int wl12xx_cmd_role_stop_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
 	struct wl12xx_cmd_role_stop *cmd;
 	int ret;
 
-	if (WARN_ON(wl->sta_hlid == WL12XX_INVALID_LINK_ID))
+	if (WARN_ON(wlvif->sta.hlid == WL12XX_INVALID_LINK_ID))
 		return -EINVAL;
 
 	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -627,9 +655,9 @@
 		goto out;
 	}
 
-	wl1271_debug(DEBUG_CMD, "cmd role stop sta %d", wl->role_id);
+	wl1271_debug(DEBUG_CMD, "cmd role stop sta %d", wlvif->role_id);
 
-	cmd->role_id = wl->role_id;
+	cmd->role_id = wlvif->role_id;
 	cmd->disc_type = DISCONNECT_IMMEDIATE;
 	cmd->reason = cpu_to_le16(WLAN_REASON_UNSPECIFIED);
 
@@ -639,7 +667,7 @@
 		goto out_free;
 	}
 
-	wl12xx_free_link(wl, &wl->sta_hlid);
+	wl12xx_free_link(wl, wlvif, &wlvif->sta.hlid);
 
 out_free:
 	kfree(cmd);
@@ -648,16 +676,17 @@
 	return ret;
 }
 
-int wl12xx_cmd_role_start_ap(struct wl1271 *wl)
+int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
 	struct wl12xx_cmd_role_start *cmd;
-	struct ieee80211_bss_conf *bss_conf = &wl->vif->bss_conf;
+	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
+	struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
 	int ret;
 
-	wl1271_debug(DEBUG_CMD, "cmd role start ap %d", wl->role_id);
+	wl1271_debug(DEBUG_CMD, "cmd role start ap %d", wlvif->role_id);
 
 	/* trying to use hidden SSID with an old hostapd version */
-	if (wl->ssid_len == 0 && !bss_conf->hidden_ssid) {
+	if (wlvif->ssid_len == 0 && !bss_conf->hidden_ssid) {
 		wl1271_error("got a null SSID from beacon/bss");
 		ret = -EINVAL;
 		goto out;
@@ -669,30 +698,30 @@
 		goto out;
 	}
 
-	ret = wl12xx_allocate_link(wl, &wl->ap_global_hlid);
+	ret = wl12xx_allocate_link(wl, wlvif, &wlvif->ap.global_hlid);
 	if (ret < 0)
 		goto out_free;
 
-	ret = wl12xx_allocate_link(wl, &wl->ap_bcast_hlid);
+	ret = wl12xx_allocate_link(wl, wlvif, &wlvif->ap.bcast_hlid);
 	if (ret < 0)
 		goto out_free_global;
 
-	cmd->role_id = wl->role_id;
+	cmd->role_id = wlvif->role_id;
 	cmd->ap.aging_period = cpu_to_le16(wl->conf.tx.ap_aging_period);
 	cmd->ap.bss_index = WL1271_AP_BSS_INDEX;
-	cmd->ap.global_hlid = wl->ap_global_hlid;
-	cmd->ap.broadcast_hlid = wl->ap_bcast_hlid;
-	cmd->ap.basic_rate_set = cpu_to_le32(wl->basic_rate_set);
-	cmd->ap.beacon_interval = cpu_to_le16(wl->beacon_int);
+	cmd->ap.global_hlid = wlvif->ap.global_hlid;
+	cmd->ap.broadcast_hlid = wlvif->ap.bcast_hlid;
+	cmd->ap.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set);
+	cmd->ap.beacon_interval = cpu_to_le16(wlvif->beacon_int);
 	cmd->ap.dtim_interval = bss_conf->dtim_period;
 	cmd->ap.beacon_expiry = WL1271_AP_DEF_BEACON_EXP;
-	cmd->channel = wl->channel;
+	cmd->channel = wlvif->channel;
 
 	if (!bss_conf->hidden_ssid) {
 		/* take the SSID from the beacon for backward compatibility */
 		cmd->ap.ssid_type = WL12XX_SSID_TYPE_PUBLIC;
-		cmd->ap.ssid_len = wl->ssid_len;
-		memcpy(cmd->ap.ssid, wl->ssid, wl->ssid_len);
+		cmd->ap.ssid_len = wlvif->ssid_len;
+		memcpy(cmd->ap.ssid, wlvif->ssid, wlvif->ssid_len);
 	} else {
 		cmd->ap.ssid_type = WL12XX_SSID_TYPE_HIDDEN;
 		cmd->ap.ssid_len = bss_conf->ssid_len;
@@ -701,7 +730,7 @@
 
 	cmd->ap.local_rates = cpu_to_le32(0xffffffff);
 
-	switch (wl->band) {
+	switch (wlvif->band) {
 	case IEEE80211_BAND_2GHZ:
 		cmd->band = RADIO_BAND_2_4GHZ;
 		break;
@@ -709,7 +738,7 @@
 		cmd->band = RADIO_BAND_5GHZ;
 		break;
 	default:
-		wl1271_warning("ap start - unknown band: %d", (int)wl->band);
+		wl1271_warning("ap start - unknown band: %d", (int)wlvif->band);
 		cmd->band = RADIO_BAND_2_4GHZ;
 		break;
 	}
@@ -723,10 +752,10 @@
 	goto out_free;
 
 out_free_bcast:
-	wl12xx_free_link(wl, &wl->ap_bcast_hlid);
+	wl12xx_free_link(wl, wlvif, &wlvif->ap.bcast_hlid);
 
 out_free_global:
-	wl12xx_free_link(wl, &wl->ap_global_hlid);
+	wl12xx_free_link(wl, wlvif, &wlvif->ap.global_hlid);
 
 out_free:
 	kfree(cmd);
@@ -735,7 +764,7 @@
 	return ret;
 }
 
-int wl12xx_cmd_role_stop_ap(struct wl1271 *wl)
+int wl12xx_cmd_role_stop_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
 	struct wl12xx_cmd_role_stop *cmd;
 	int ret;
@@ -746,9 +775,9 @@
 		goto out;
 	}
 
-	wl1271_debug(DEBUG_CMD, "cmd role stop ap %d", wl->role_id);
+	wl1271_debug(DEBUG_CMD, "cmd role stop ap %d", wlvif->role_id);
 
-	cmd->role_id = wl->role_id;
+	cmd->role_id = wlvif->role_id;
 
 	ret = wl1271_cmd_send(wl, CMD_ROLE_STOP, cmd, sizeof(*cmd), 0);
 	if (ret < 0) {
@@ -756,8 +785,8 @@
 		goto out_free;
 	}
 
-	wl12xx_free_link(wl, &wl->ap_bcast_hlid);
-	wl12xx_free_link(wl, &wl->ap_global_hlid);
+	wl12xx_free_link(wl, wlvif, &wlvif->ap.bcast_hlid);
+	wl12xx_free_link(wl, wlvif, &wlvif->ap.global_hlid);
 
 out_free:
 	kfree(cmd);
@@ -766,10 +795,11 @@
 	return ret;
 }
 
-int wl12xx_cmd_role_start_ibss(struct wl1271 *wl)
+int wl12xx_cmd_role_start_ibss(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
+	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
 	struct wl12xx_cmd_role_start *cmd;
-	struct ieee80211_bss_conf *bss_conf = &wl->vif->bss_conf;
+	struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
 	int ret;
 
 	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -778,35 +808,36 @@
 		goto out;
 	}
 
-	wl1271_debug(DEBUG_CMD, "cmd role start ibss %d", wl->role_id);
+	wl1271_debug(DEBUG_CMD, "cmd role start ibss %d", wlvif->role_id);
 
-	cmd->role_id = wl->role_id;
-	if (wl->band == IEEE80211_BAND_5GHZ)
+	cmd->role_id = wlvif->role_id;
+	if (wlvif->band == IEEE80211_BAND_5GHZ)
 		cmd->band = WL12XX_BAND_5GHZ;
-	cmd->channel = wl->channel;
-	cmd->ibss.basic_rate_set = cpu_to_le32(wl->basic_rate_set);
-	cmd->ibss.beacon_interval = cpu_to_le16(wl->beacon_int);
+	cmd->channel = wlvif->channel;
+	cmd->ibss.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set);
+	cmd->ibss.beacon_interval = cpu_to_le16(wlvif->beacon_int);
 	cmd->ibss.dtim_interval = bss_conf->dtim_period;
 	cmd->ibss.ssid_type = WL12XX_SSID_TYPE_ANY;
-	cmd->ibss.ssid_len = wl->ssid_len;
-	memcpy(cmd->ibss.ssid, wl->ssid, wl->ssid_len);
-	memcpy(cmd->ibss.bssid, wl->bssid, ETH_ALEN);
-	cmd->sta.local_rates = cpu_to_le32(wl->rate_set);
+	cmd->ibss.ssid_len = wlvif->ssid_len;
+	memcpy(cmd->ibss.ssid, wlvif->ssid, wlvif->ssid_len);
+	memcpy(cmd->ibss.bssid, vif->bss_conf.bssid, ETH_ALEN);
+	cmd->sta.local_rates = cpu_to_le32(wlvif->rate_set);
 
-	if (wl->sta_hlid == WL12XX_INVALID_LINK_ID) {
-		ret = wl12xx_allocate_link(wl, &wl->sta_hlid);
+	if (wlvif->sta.hlid == WL12XX_INVALID_LINK_ID) {
+		ret = wl12xx_allocate_link(wl, wlvif, &wlvif->sta.hlid);
 		if (ret)
 			goto out_free;
 	}
-	cmd->ibss.hlid = wl->sta_hlid;
-	cmd->ibss.remote_rates = cpu_to_le32(wl->rate_set);
+	cmd->ibss.hlid = wlvif->sta.hlid;
+	cmd->ibss.remote_rates = cpu_to_le32(wlvif->rate_set);
 
 	wl1271_debug(DEBUG_CMD, "role start: roleid=%d, hlid=%d, session=%d "
 		     "basic_rate_set: 0x%x, remote_rates: 0x%x",
-		     wl->role_id, cmd->sta.hlid, cmd->sta.session,
-		     wl->basic_rate_set, wl->rate_set);
+		     wlvif->role_id, cmd->sta.hlid, cmd->sta.session,
+		     wlvif->basic_rate_set, wlvif->rate_set);
 
-	wl1271_debug(DEBUG_CMD, "wl->bssid = %pM", wl->bssid);
+	wl1271_debug(DEBUG_CMD, "vif->bss_conf.bssid = %pM",
+		     vif->bss_conf.bssid);
 
 	ret = wl1271_cmd_send(wl, CMD_ROLE_START, cmd, sizeof(*cmd), 0);
 	if (ret < 0) {
@@ -818,7 +849,7 @@
 
 err_hlid:
 	/* clear links on error. */
-	wl12xx_free_link(wl, &wl->sta_hlid);
+	wl12xx_free_link(wl, wlvif, &wlvif->sta.hlid);
 
 out_free:
 	kfree(cmd);
@@ -962,7 +993,8 @@
 	return ret;
 }
 
-int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode)
+int wl1271_cmd_ps_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+		       u8 ps_mode)
 {
 	struct wl1271_cmd_ps_params *ps_params = NULL;
 	int ret = 0;
@@ -975,7 +1007,7 @@
 		goto out;
 	}
 
-	ps_params->role_id = wl->role_id;
+	ps_params->role_id = wlvif->role_id;
 	ps_params->ps_mode = ps_mode;
 
 	ret = wl1271_cmd_send(wl, CMD_SET_PS_MODE, ps_params,
@@ -1030,7 +1062,7 @@
 	return ret;
 }
 
-int wl1271_cmd_build_null_data(struct wl1271 *wl)
+int wl12xx_cmd_build_null_data(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
 	struct sk_buff *skb = NULL;
 	int size;
@@ -1038,11 +1070,12 @@
 	int ret = -ENOMEM;
 
 
-	if (wl->bss_type == BSS_TYPE_IBSS) {
+	if (wlvif->bss_type == BSS_TYPE_IBSS) {
 		size = sizeof(struct wl12xx_null_data_template);
 		ptr = NULL;
 	} else {
-		skb = ieee80211_nullfunc_get(wl->hw, wl->vif);
+		skb = ieee80211_nullfunc_get(wl->hw,
+					     wl12xx_wlvif_to_vif(wlvif));
 		if (!skb)
 			goto out;
 		size = skb->len;
@@ -1050,7 +1083,7 @@
 	}
 
 	ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, ptr, size, 0,
-				      wl->basic_rate);
+				      wlvif->basic_rate);
 
 out:
 	dev_kfree_skb(skb);
@@ -1061,19 +1094,21 @@
 
 }
 
-int wl1271_cmd_build_klv_null_data(struct wl1271 *wl)
+int wl12xx_cmd_build_klv_null_data(struct wl1271 *wl,
+				   struct wl12xx_vif *wlvif)
 {
+	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
 	struct sk_buff *skb = NULL;
 	int ret = -ENOMEM;
 
-	skb = ieee80211_nullfunc_get(wl->hw, wl->vif);
+	skb = ieee80211_nullfunc_get(wl->hw, vif);
 	if (!skb)
 		goto out;
 
 	ret = wl1271_cmd_template_set(wl, CMD_TEMPL_KLV,
 				      skb->data, skb->len,
 				      CMD_TEMPL_KLV_IDX_NULL_DATA,
-				      wl->basic_rate);
+				      wlvif->basic_rate);
 
 out:
 	dev_kfree_skb(skb);
@@ -1084,32 +1119,35 @@
 
 }
 
-int wl1271_cmd_build_ps_poll(struct wl1271 *wl, u16 aid)
+int wl1271_cmd_build_ps_poll(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+			     u16 aid)
 {
+	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
 	struct sk_buff *skb;
 	int ret = 0;
 
-	skb = ieee80211_pspoll_get(wl->hw, wl->vif);
+	skb = ieee80211_pspoll_get(wl->hw, vif);
 	if (!skb)
 		goto out;
 
 	ret = wl1271_cmd_template_set(wl, CMD_TEMPL_PS_POLL, skb->data,
-				      skb->len, 0, wl->basic_rate_set);
+				      skb->len, 0, wlvif->basic_rate_set);
 
 out:
 	dev_kfree_skb(skb);
 	return ret;
 }
 
-int wl1271_cmd_build_probe_req(struct wl1271 *wl,
+int wl1271_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif,
 			       const u8 *ssid, size_t ssid_len,
 			       const u8 *ie, size_t ie_len, u8 band)
 {
+	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
 	struct sk_buff *skb;
 	int ret;
 	u32 rate;
 
-	skb = ieee80211_probereq_get(wl->hw, wl->vif, ssid, ssid_len,
+	skb = ieee80211_probereq_get(wl->hw, vif, ssid, ssid_len,
 				     ie, ie_len);
 	if (!skb) {
 		ret = -ENOMEM;
@@ -1118,7 +1156,7 @@
 
 	wl1271_dump(DEBUG_SCAN, "PROBE REQ: ", skb->data, skb->len);
 
-	rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[band]);
+	rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
 	if (band == IEEE80211_BAND_2GHZ)
 		ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4,
 					      skb->data, skb->len, 0, rate);
@@ -1132,20 +1170,22 @@
 }
 
 struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl,
+					      struct wl12xx_vif *wlvif,
 					      struct sk_buff *skb)
 {
+	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
 	int ret;
 	u32 rate;
 
 	if (!skb)
-		skb = ieee80211_ap_probereq_get(wl->hw, wl->vif);
+		skb = ieee80211_ap_probereq_get(wl->hw, vif);
 	if (!skb)
 		goto out;
 
 	wl1271_dump(DEBUG_SCAN, "AP PROBE REQ: ", skb->data, skb->len);
 
-	rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[wl->band]);
-	if (wl->band == IEEE80211_BAND_2GHZ)
+	rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[wlvif->band]);
+	if (wlvif->band == IEEE80211_BAND_2GHZ)
 		ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4,
 					      skb->data, skb->len, 0, rate);
 	else
@@ -1159,9 +1199,11 @@
 	return skb;
 }
 
-int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, __be32 ip_addr)
+int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+			     __be32 ip_addr)
 {
 	int ret;
+	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
 	struct wl12xx_arp_rsp_template tmpl;
 	struct ieee80211_hdr_3addr *hdr;
 	struct arphdr *arp_hdr;
@@ -1173,8 +1215,8 @@
 	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
 					 IEEE80211_STYPE_DATA |
 					 IEEE80211_FCTL_TODS);
-	memcpy(hdr->addr1, wl->vif->bss_conf.bssid, ETH_ALEN);
-	memcpy(hdr->addr2, wl->vif->addr, ETH_ALEN);
+	memcpy(hdr->addr1, vif->bss_conf.bssid, ETH_ALEN);
+	memcpy(hdr->addr2, vif->addr, ETH_ALEN);
 	memset(hdr->addr3, 0xff, ETH_ALEN);
 
 	/* llc layer */
@@ -1190,25 +1232,26 @@
 	arp_hdr->ar_op = cpu_to_be16(ARPOP_REPLY);
 
 	/* arp payload */
-	memcpy(tmpl.sender_hw, wl->vif->addr, ETH_ALEN);
+	memcpy(tmpl.sender_hw, vif->addr, ETH_ALEN);
 	tmpl.sender_ip = ip_addr;
 
 	ret = wl1271_cmd_template_set(wl, CMD_TEMPL_ARP_RSP,
 				      &tmpl, sizeof(tmpl), 0,
-				      wl->basic_rate);
+				      wlvif->basic_rate);
 
 	return ret;
 }
 
-int wl1271_build_qos_null_data(struct wl1271 *wl)
+int wl1271_build_qos_null_data(struct wl1271 *wl, struct ieee80211_vif *vif)
 {
+	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
 	struct ieee80211_qos_hdr template;
 
 	memset(&template, 0, sizeof(template));
 
-	memcpy(template.addr1, wl->bssid, ETH_ALEN);
-	memcpy(template.addr2, wl->mac_addr, ETH_ALEN);
-	memcpy(template.addr3, wl->bssid, ETH_ALEN);
+	memcpy(template.addr1, vif->bss_conf.bssid, ETH_ALEN);
+	memcpy(template.addr2, vif->addr, ETH_ALEN);
+	memcpy(template.addr3, vif->bss_conf.bssid, ETH_ALEN);
 
 	template.frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
 					     IEEE80211_STYPE_QOS_NULLFUNC |
@@ -1219,7 +1262,7 @@
 
 	return wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, &template,
 				       sizeof(template), 0,
-				       wl->basic_rate);
+				       wlvif->basic_rate);
 }
 
 int wl12xx_cmd_set_default_wep_key(struct wl1271 *wl, u8 id, u8 hlid)
@@ -1253,7 +1296,8 @@
 	return ret;
 }
 
-int wl1271_cmd_set_sta_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
+int wl1271_cmd_set_sta_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+		       u16 action, u8 id, u8 key_type,
 		       u8 key_size, const u8 *key, const u8 *addr,
 		       u32 tx_seq_32, u16 tx_seq_16)
 {
@@ -1261,7 +1305,7 @@
 	int ret = 0;
 
 	/* hlid might have already been deleted */
-	if (wl->sta_hlid == WL12XX_INVALID_LINK_ID)
+	if (wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
 		return 0;
 
 	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -1270,7 +1314,7 @@
 		goto out;
 	}
 
-	cmd->hlid = wl->sta_hlid;
+	cmd->hlid = wlvif->sta.hlid;
 
 	if (key_type == KEY_WEP)
 		cmd->lid_key_type = WEP_DEFAULT_LID_TYPE;
@@ -1321,9 +1365,10 @@
  * TODO: merge with sta/ibss into 1 set_key function.
  * note there are slight diffs
  */
-int wl1271_cmd_set_ap_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
-			u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
-			u16 tx_seq_16)
+int wl1271_cmd_set_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+			  u16 action, u8 id, u8 key_type,
+			  u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
+			  u16 tx_seq_16)
 {
 	struct wl1271_cmd_set_keys *cmd;
 	int ret = 0;
@@ -1333,7 +1378,7 @@
 	if (!cmd)
 		return -ENOMEM;
 
-	if (hlid == wl->ap_bcast_hlid) {
+	if (hlid == wlvif->ap.bcast_hlid) {
 		if (key_type == KEY_WEP)
 			lid_type = WEP_DEFAULT_LID_TYPE;
 		else
@@ -1411,7 +1456,8 @@
 	return ret;
 }
 
-int wl12xx_cmd_add_peer(struct wl1271 *wl, struct ieee80211_sta *sta, u8 hlid)
+int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+			struct ieee80211_sta *sta, u8 hlid)
 {
 	struct wl12xx_cmd_add_peer *cmd;
 	int i, ret;
@@ -1438,13 +1484,13 @@
 		else
 			cmd->psd_type[i] = WL1271_PSD_LEGACY;
 
-	sta_rates = sta->supp_rates[wl->band];
+	sta_rates = sta->supp_rates[wlvif->band];
 	if (sta->ht_cap.ht_supported)
 		sta_rates |= sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET;
 
 	cmd->supported_rates =
 		cpu_to_le32(wl1271_tx_enabled_rates_get(wl, sta_rates,
-							wl->band));
+							wlvif->band));
 
 	wl1271_debug(DEBUG_CMD, "new peer rates=0x%x queues=0x%x",
 		     cmd->supported_rates, sta->uapsd_queues);
@@ -1584,12 +1630,13 @@
 	return ret;
 }
 
-static int wl12xx_cmd_roc(struct wl1271 *wl, u8 role_id)
+static int wl12xx_cmd_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+			  u8 role_id)
 {
 	struct wl12xx_cmd_roc *cmd;
 	int ret = 0;
 
-	wl1271_debug(DEBUG_CMD, "cmd roc %d (%d)", wl->channel, role_id);
+	wl1271_debug(DEBUG_CMD, "cmd roc %d (%d)", wlvif->channel, role_id);
 
 	if (WARN_ON(role_id == WL12XX_INVALID_ROLE_ID))
 		return -EINVAL;
@@ -1601,8 +1648,8 @@
 	}
 
 	cmd->role_id = role_id;
-	cmd->channel = wl->channel;
-	switch (wl->band) {
+	cmd->channel = wlvif->channel;
+	switch (wlvif->band) {
 	case IEEE80211_BAND_2GHZ:
 		cmd->band = RADIO_BAND_2_4GHZ;
 		break;
@@ -1610,7 +1657,7 @@
 		cmd->band = RADIO_BAND_5GHZ;
 		break;
 	default:
-		wl1271_error("roc - unknown band: %d", (int)wl->band);
+		wl1271_error("roc - unknown band: %d", (int)wlvif->band);
 		ret = -EINVAL;
 		goto out_free;
 	}
@@ -1657,14 +1704,14 @@
 	return ret;
 }
 
-int wl12xx_roc(struct wl1271 *wl, u8 role_id)
+int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id)
 {
 	int ret = 0;
 
 	if (WARN_ON(test_bit(role_id, wl->roc_map)))
 		return 0;
 
-	ret = wl12xx_cmd_roc(wl, role_id);
+	ret = wl12xx_cmd_roc(wl, wlvif, role_id);
 	if (ret < 0)
 		goto out;
 
@@ -1753,3 +1800,53 @@
 out:
 	return ret;
 }
+
+/* start dev role and roc on its channel */
+int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+{
+	int ret;
+
+	if (WARN_ON(!(wlvif->bss_type == BSS_TYPE_STA_BSS ||
+		      wlvif->bss_type == BSS_TYPE_IBSS)))
+		return -EINVAL;
+
+	ret = wl12xx_cmd_role_start_dev(wl, wlvif);
+	if (ret < 0)
+		goto out;
+
+	ret = wl12xx_roc(wl, wlvif, wlvif->dev_role_id);
+	if (ret < 0)
+		goto out_stop;
+
+	return 0;
+
+out_stop:
+	wl12xx_cmd_role_stop_dev(wl, wlvif);
+out:
+	return ret;
+}
+
+/* croc dev hlid, and stop the role */
+int wl12xx_stop_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+{
+	int ret;
+
+	if (WARN_ON(!(wlvif->bss_type == BSS_TYPE_STA_BSS ||
+		      wlvif->bss_type == BSS_TYPE_IBSS)))
+		return -EINVAL;
+
+	/* flush all pending packets */
+	wl1271_tx_work_locked(wl);
+
+	if (test_bit(wlvif->dev_role_id, wl->roc_map)) {
+		ret = wl12xx_croc(wl, wlvif->dev_role_id);
+		if (ret < 0)
+			goto out;
+	}
+
+	ret = wl12xx_cmd_role_stop_dev(wl, wlvif);
+	if (ret < 0)
+		goto out;
+out:
+	return ret;
+}
diff --git a/drivers/net/wireless/wl12xx/cmd.h b/drivers/net/wireless/wl12xx/cmd.h
index b7bd427..3f7d0b9 100644
--- a/drivers/net/wireless/wl12xx/cmd.h
+++ b/drivers/net/wireless/wl12xx/cmd.h
@@ -36,45 +36,54 @@
 int wl1271_cmd_radio_parms(struct wl1271 *wl);
 int wl128x_cmd_radio_parms(struct wl1271 *wl);
 int wl1271_cmd_ext_radio_parms(struct wl1271 *wl);
-int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 role_type, u8 *role_id);
+int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 *addr, u8 role_type,
+			   u8 *role_id);
 int wl12xx_cmd_role_disable(struct wl1271 *wl, u8 *role_id);
-int wl12xx_cmd_role_start_dev(struct wl1271 *wl);
-int wl12xx_cmd_role_stop_dev(struct wl1271 *wl);
-int wl12xx_cmd_role_start_sta(struct wl1271 *wl);
-int wl12xx_cmd_role_stop_sta(struct wl1271 *wl);
-int wl12xx_cmd_role_start_ap(struct wl1271 *wl);
-int wl12xx_cmd_role_stop_ap(struct wl1271 *wl);
-int wl12xx_cmd_role_start_ibss(struct wl1271 *wl);
+int wl12xx_cmd_role_start_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl12xx_cmd_role_stop_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl12xx_cmd_role_stop_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl12xx_cmd_role_start_ibss(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl12xx_stop_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif);
 int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer);
 int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len);
 int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len);
 int wl1271_cmd_data_path(struct wl1271 *wl, bool enable);
-int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode);
+int wl1271_cmd_ps_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+		       u8 ps_mode);
 int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer,
 			   size_t len);
 int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id,
 			    void *buf, size_t buf_len, int index, u32 rates);
-int wl1271_cmd_build_null_data(struct wl1271 *wl);
-int wl1271_cmd_build_ps_poll(struct wl1271 *wl, u16 aid);
-int wl1271_cmd_build_probe_req(struct wl1271 *wl,
+int wl12xx_cmd_build_null_data(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl1271_cmd_build_ps_poll(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+			     u16 aid);
+int wl1271_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif,
 			       const u8 *ssid, size_t ssid_len,
 			       const u8 *ie, size_t ie_len, u8 band);
 struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl,
+					      struct wl12xx_vif *wlvif,
 					      struct sk_buff *skb);
-int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, __be32 ip_addr);
-int wl1271_build_qos_null_data(struct wl1271 *wl);
-int wl1271_cmd_build_klv_null_data(struct wl1271 *wl);
+int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+			     __be32 ip_addr);
+int wl1271_build_qos_null_data(struct wl1271 *wl, struct ieee80211_vif *vif);
+int wl12xx_cmd_build_klv_null_data(struct wl1271 *wl,
+				   struct wl12xx_vif *wlvif);
 int wl12xx_cmd_set_default_wep_key(struct wl1271 *wl, u8 id, u8 hlid);
-int wl1271_cmd_set_sta_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
+int wl1271_cmd_set_sta_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+			   u16 action, u8 id, u8 key_type,
 			   u8 key_size, const u8 *key, const u8 *addr,
 			   u32 tx_seq_32, u16 tx_seq_16);
-int wl1271_cmd_set_ap_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
+int wl1271_cmd_set_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+			  u16 action, u8 id, u8 key_type,
 			  u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
 			  u16 tx_seq_16);
 int wl12xx_cmd_set_peer_state(struct wl1271 *wl, u8 hlid);
-int wl12xx_roc(struct wl1271 *wl, u8 role_id);
+int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id);
 int wl12xx_croc(struct wl1271 *wl, u8 role_id);
-int wl12xx_cmd_add_peer(struct wl1271 *wl, struct ieee80211_sta *sta, u8 hlid);
+int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+			struct ieee80211_sta *sta, u8 hlid);
 int wl12xx_cmd_remove_peer(struct wl1271 *wl, u8 hlid);
 int wl12xx_cmd_config_fwlog(struct wl1271 *wl);
 int wl12xx_cmd_start_fwlog(struct wl1271 *wl);
@@ -82,6 +91,9 @@
 int wl12xx_cmd_channel_switch(struct wl1271 *wl,
 			      struct ieee80211_channel_switch *ch_switch);
 int wl12xx_cmd_stop_channel_switch(struct wl1271 *wl);
+int wl12xx_allocate_link(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+			 u8 *hlid);
+void wl12xx_free_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid);
 
 enum wl1271_commands {
 	CMD_INTERROGATE     = 1,    /*use this to read information elements*/
diff --git a/drivers/net/wireless/wl12xx/conf.h b/drivers/net/wireless/wl12xx/conf.h
index 04bb8fb..1bcfb01 100644
--- a/drivers/net/wireless/wl12xx/conf.h
+++ b/drivers/net/wireless/wl12xx/conf.h
@@ -440,6 +440,10 @@
 	CONF_HW_BIT_RATE_36MBPS | CONF_HW_BIT_RATE_48MBPS |      \
 	CONF_HW_BIT_RATE_54MBPS)
 
+#define CONF_TX_CCK_RATES  (CONF_HW_BIT_RATE_1MBPS |		\
+	CONF_HW_BIT_RATE_2MBPS | CONF_HW_BIT_RATE_5_5MBPS |	\
+	CONF_HW_BIT_RATE_11MBPS)
+
 #define CONF_TX_OFDM_RATES (CONF_HW_BIT_RATE_6MBPS |             \
 	CONF_HW_BIT_RATE_12MBPS | CONF_HW_BIT_RATE_24MBPS |      \
 	CONF_HW_BIT_RATE_36MBPS | CONF_HW_BIT_RATE_48MBPS |      \
diff --git a/drivers/net/wireless/wl12xx/debug.h b/drivers/net/wireless/wl12xx/debug.h
new file mode 100644
index 0000000..b85fd8c4
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/debug.h
@@ -0,0 +1,101 @@
+/*
+ * This file is part of wl12xx
+ *
+ * Copyright (C) 2011 Texas Instruments. All rights reserved.
+ * Copyright (C) 2008-2009 Nokia Corporation
+ *
+ * Contact: Luciano Coelho <coelho@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __DEBUG_H__
+#define __DEBUG_H__
+
+#include <linux/bitops.h>
+#include <linux/printk.h>
+
+#define DRIVER_NAME "wl12xx"
+#define DRIVER_PREFIX DRIVER_NAME ": "
+
+enum {
+	DEBUG_NONE	= 0,
+	DEBUG_IRQ	= BIT(0),
+	DEBUG_SPI	= BIT(1),
+	DEBUG_BOOT	= BIT(2),
+	DEBUG_MAILBOX	= BIT(3),
+	DEBUG_TESTMODE	= BIT(4),
+	DEBUG_EVENT	= BIT(5),
+	DEBUG_TX	= BIT(6),
+	DEBUG_RX	= BIT(7),
+	DEBUG_SCAN	= BIT(8),
+	DEBUG_CRYPT	= BIT(9),
+	DEBUG_PSM	= BIT(10),
+	DEBUG_MAC80211	= BIT(11),
+	DEBUG_CMD	= BIT(12),
+	DEBUG_ACX	= BIT(13),
+	DEBUG_SDIO	= BIT(14),
+	DEBUG_FILTERS   = BIT(15),
+	DEBUG_ADHOC     = BIT(16),
+	DEBUG_AP	= BIT(17),
+	DEBUG_MASTER	= (DEBUG_ADHOC | DEBUG_AP),
+	DEBUG_ALL	= ~0,
+};
+
+extern u32 wl12xx_debug_level;
+
+#define DEBUG_DUMP_LIMIT 1024
+
+#define wl1271_error(fmt, arg...) \
+	pr_err(DRIVER_PREFIX "ERROR " fmt "\n", ##arg)
+
+#define wl1271_warning(fmt, arg...) \
+	pr_warning(DRIVER_PREFIX "WARNING " fmt "\n", ##arg)
+
+#define wl1271_notice(fmt, arg...) \
+	pr_info(DRIVER_PREFIX fmt "\n", ##arg)
+
+#define wl1271_info(fmt, arg...) \
+	pr_info(DRIVER_PREFIX fmt "\n", ##arg)
+
+#define wl1271_debug(level, fmt, arg...) \
+	do { \
+		if (level & wl12xx_debug_level) \
+			pr_debug(DRIVER_PREFIX fmt "\n", ##arg); \
+	} while (0)
+
+/* TODO: use pr_debug_hex_dump when it becomes available */
+#define wl1271_dump(level, prefix, buf, len)	\
+	do { \
+		if (level & wl12xx_debug_level) \
+			print_hex_dump(KERN_DEBUG, DRIVER_PREFIX prefix, \
+				       DUMP_PREFIX_OFFSET, 16, 1,	\
+				       buf,				\
+				       min_t(size_t, len, DEBUG_DUMP_LIMIT), \
+				       0);				\
+	} while (0)
+
+#define wl1271_dump_ascii(level, prefix, buf, len)	\
+	do { \
+		if (level & wl12xx_debug_level) \
+			print_hex_dump(KERN_DEBUG, DRIVER_PREFIX prefix, \
+				       DUMP_PREFIX_OFFSET, 16, 1,	\
+				       buf,				\
+				       min_t(size_t, len, DEBUG_DUMP_LIMIT), \
+				       true);				\
+	} while (0)
+
+#endif /* __DEBUG_H__ */
diff --git a/drivers/net/wireless/wl12xx/debugfs.c b/drivers/net/wireless/wl12xx/debugfs.c
index 3999fd5..15eb3a9 100644
--- a/drivers/net/wireless/wl12xx/debugfs.c
+++ b/drivers/net/wireless/wl12xx/debugfs.c
@@ -27,6 +27,7 @@
 #include <linux/slab.h>
 
 #include "wl12xx.h"
+#include "debug.h"
 #include "acx.h"
 #include "ps.h"
 #include "io.h"
@@ -316,12 +317,19 @@
 {
 	struct wl1271 *wl = file->private_data;
 	int res = 0;
-	char buf[1024];
+	ssize_t ret;
+	char *buf;
+
+#define DRIVER_STATE_BUF_LEN 1024
+
+	buf = kmalloc(DRIVER_STATE_BUF_LEN, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
 
 	mutex_lock(&wl->mutex);
 
 #define DRIVER_STATE_PRINT(x, fmt)   \
-	(res += scnprintf(buf + res, sizeof(buf) - res,\
+	(res += scnprintf(buf + res, DRIVER_STATE_BUF_LEN - res,\
 			  #x " = " fmt "\n", wl->x))
 
 #define DRIVER_STATE_PRINT_LONG(x) DRIVER_STATE_PRINT(x, "%ld")
@@ -346,29 +354,14 @@
 	DRIVER_STATE_PRINT_INT(tx_results_count);
 	DRIVER_STATE_PRINT_LHEX(flags);
 	DRIVER_STATE_PRINT_INT(tx_blocks_freed);
-	DRIVER_STATE_PRINT_INT(tx_security_last_seq_lsb);
 	DRIVER_STATE_PRINT_INT(rx_counter);
-	DRIVER_STATE_PRINT_INT(session_counter);
 	DRIVER_STATE_PRINT_INT(state);
-	DRIVER_STATE_PRINT_INT(bss_type);
 	DRIVER_STATE_PRINT_INT(channel);
-	DRIVER_STATE_PRINT_HEX(rate_set);
-	DRIVER_STATE_PRINT_HEX(basic_rate_set);
-	DRIVER_STATE_PRINT_HEX(basic_rate);
 	DRIVER_STATE_PRINT_INT(band);
-	DRIVER_STATE_PRINT_INT(beacon_int);
-	DRIVER_STATE_PRINT_INT(psm_entry_retry);
-	DRIVER_STATE_PRINT_INT(ps_poll_failures);
 	DRIVER_STATE_PRINT_INT(power_level);
-	DRIVER_STATE_PRINT_INT(rssi_thold);
-	DRIVER_STATE_PRINT_INT(last_rssi_event);
 	DRIVER_STATE_PRINT_INT(sg_enabled);
 	DRIVER_STATE_PRINT_INT(enable_11a);
 	DRIVER_STATE_PRINT_INT(noise);
-	DRIVER_STATE_PRINT_LHEX(ap_hlid_map[0]);
-	DRIVER_STATE_PRINT_INT(last_tx_hlid);
-	DRIVER_STATE_PRINT_INT(ba_support);
-	DRIVER_STATE_PRINT_HEX(ba_rx_bitmap);
 	DRIVER_STATE_PRINT_HEX(ap_fw_ps_map);
 	DRIVER_STATE_PRINT_LHEX(ap_ps_map);
 	DRIVER_STATE_PRINT_HEX(quirks);
@@ -387,10 +380,13 @@
 #undef DRIVER_STATE_PRINT_LHEX
 #undef DRIVER_STATE_PRINT_STR
 #undef DRIVER_STATE_PRINT
+#undef DRIVER_STATE_BUF_LEN
 
 	mutex_unlock(&wl->mutex);
 
-	return simple_read_from_buffer(user_buf, count, ppos, buf, res);
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, res);
+	kfree(buf);
+	return ret;
 }
 
 static const struct file_operations driver_state_ops = {
@@ -399,6 +395,115 @@
 	.llseek = default_llseek,
 };
 
+static ssize_t vifs_state_read(struct file *file, char __user *user_buf,
+				 size_t count, loff_t *ppos)
+{
+	struct wl1271 *wl = file->private_data;
+	struct wl12xx_vif *wlvif;
+	int ret, res = 0;
+	const int buf_size = 4096;
+	char *buf;
+	char tmp_buf[64];
+
+	buf = kzalloc(buf_size, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	mutex_lock(&wl->mutex);
+
+#define VIF_STATE_PRINT(x, fmt)				\
+	(res += scnprintf(buf + res, buf_size - res,	\
+			  #x " = " fmt "\n", wlvif->x))
+
+#define VIF_STATE_PRINT_LONG(x)  VIF_STATE_PRINT(x, "%ld")
+#define VIF_STATE_PRINT_INT(x)   VIF_STATE_PRINT(x, "%d")
+#define VIF_STATE_PRINT_STR(x)   VIF_STATE_PRINT(x, "%s")
+#define VIF_STATE_PRINT_LHEX(x)  VIF_STATE_PRINT(x, "0x%lx")
+#define VIF_STATE_PRINT_LLHEX(x) VIF_STATE_PRINT(x, "0x%llx")
+#define VIF_STATE_PRINT_HEX(x)   VIF_STATE_PRINT(x, "0x%x")
+
+#define VIF_STATE_PRINT_NSTR(x, len)				\
+	do {							\
+		memset(tmp_buf, 0, sizeof(tmp_buf));		\
+		memcpy(tmp_buf, wlvif->x,			\
+		       min_t(u8, len, sizeof(tmp_buf) - 1));	\
+		res += scnprintf(buf + res, buf_size - res,	\
+				 #x " = %s\n", tmp_buf);	\
+	} while (0)
+
+	wl12xx_for_each_wlvif(wl, wlvif) {
+		VIF_STATE_PRINT_INT(role_id);
+		VIF_STATE_PRINT_INT(bss_type);
+		VIF_STATE_PRINT_LHEX(flags);
+		VIF_STATE_PRINT_INT(p2p);
+		VIF_STATE_PRINT_INT(dev_role_id);
+		VIF_STATE_PRINT_INT(dev_hlid);
+
+		if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
+		    wlvif->bss_type == BSS_TYPE_IBSS) {
+			VIF_STATE_PRINT_INT(sta.hlid);
+			VIF_STATE_PRINT_INT(sta.ba_rx_bitmap);
+			VIF_STATE_PRINT_INT(sta.basic_rate_idx);
+			VIF_STATE_PRINT_INT(sta.ap_rate_idx);
+			VIF_STATE_PRINT_INT(sta.p2p_rate_idx);
+		} else {
+			VIF_STATE_PRINT_INT(ap.global_hlid);
+			VIF_STATE_PRINT_INT(ap.bcast_hlid);
+			VIF_STATE_PRINT_LHEX(ap.sta_hlid_map[0]);
+			VIF_STATE_PRINT_INT(ap.mgmt_rate_idx);
+			VIF_STATE_PRINT_INT(ap.bcast_rate_idx);
+			VIF_STATE_PRINT_INT(ap.ucast_rate_idx[0]);
+			VIF_STATE_PRINT_INT(ap.ucast_rate_idx[1]);
+			VIF_STATE_PRINT_INT(ap.ucast_rate_idx[2]);
+			VIF_STATE_PRINT_INT(ap.ucast_rate_idx[3]);
+		}
+		VIF_STATE_PRINT_INT(last_tx_hlid);
+		VIF_STATE_PRINT_LHEX(links_map[0]);
+		VIF_STATE_PRINT_NSTR(ssid, wlvif->ssid_len);
+		VIF_STATE_PRINT_INT(band);
+		VIF_STATE_PRINT_INT(channel);
+		VIF_STATE_PRINT_HEX(bitrate_masks[0]);
+		VIF_STATE_PRINT_HEX(bitrate_masks[1]);
+		VIF_STATE_PRINT_HEX(basic_rate_set);
+		VIF_STATE_PRINT_HEX(basic_rate);
+		VIF_STATE_PRINT_HEX(rate_set);
+		VIF_STATE_PRINT_INT(beacon_int);
+		VIF_STATE_PRINT_INT(default_key);
+		VIF_STATE_PRINT_INT(aid);
+		VIF_STATE_PRINT_INT(session_counter);
+		VIF_STATE_PRINT_INT(ps_poll_failures);
+		VIF_STATE_PRINT_INT(psm_entry_retry);
+		VIF_STATE_PRINT_INT(power_level);
+		VIF_STATE_PRINT_INT(rssi_thold);
+		VIF_STATE_PRINT_INT(last_rssi_event);
+		VIF_STATE_PRINT_INT(ba_support);
+		VIF_STATE_PRINT_INT(ba_allowed);
+		VIF_STATE_PRINT_LLHEX(tx_security_seq);
+		VIF_STATE_PRINT_INT(tx_security_last_seq_lsb);
+	}
+
+#undef VIF_STATE_PRINT_INT
+#undef VIF_STATE_PRINT_LONG
+#undef VIF_STATE_PRINT_HEX
+#undef VIF_STATE_PRINT_LHEX
+#undef VIF_STATE_PRINT_LLHEX
+#undef VIF_STATE_PRINT_STR
+#undef VIF_STATE_PRINT_NSTR
+#undef VIF_STATE_PRINT
+
+	mutex_unlock(&wl->mutex);
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, res);
+	kfree(buf);
+	return ret;
+}
+
+static const struct file_operations vifs_state_ops = {
+	.read = vifs_state_read,
+	.open = wl1271_open_file_generic,
+	.llseek = default_llseek,
+};
+
 static ssize_t dtim_interval_read(struct file *file, char __user *user_buf,
 				  size_t count, loff_t *ppos)
 {
@@ -520,6 +625,7 @@
 			   size_t count, loff_t *ppos)
 {
 	struct wl1271 *wl = file->private_data;
+	struct wl12xx_vif *wlvif;
 	unsigned long value;
 	int ret;
 
@@ -543,7 +649,9 @@
 	if (ret < 0)
 		goto out;
 
-	wl1271_recalc_rx_streaming(wl);
+	wl12xx_for_each_wlvif_sta(wl, wlvif) {
+		wl1271_recalc_rx_streaming(wl, wlvif);
+	}
 
 	wl1271_ps_elp_sleep(wl);
 out:
@@ -572,6 +680,7 @@
 			   size_t count, loff_t *ppos)
 {
 	struct wl1271 *wl = file->private_data;
+	struct wl12xx_vif *wlvif;
 	unsigned long value;
 	int ret;
 
@@ -595,7 +704,9 @@
 	if (ret < 0)
 		goto out;
 
-	wl1271_recalc_rx_streaming(wl);
+	wl12xx_for_each_wlvif_sta(wl, wlvif) {
+		wl1271_recalc_rx_streaming(wl, wlvif);
+	}
 
 	wl1271_ps_elp_sleep(wl);
 out:
@@ -624,6 +735,7 @@
 				      size_t count, loff_t *ppos)
 {
 	struct wl1271 *wl = file->private_data;
+	struct wl12xx_vif *wlvif;
 	char buf[10];
 	size_t len;
 	unsigned long value;
@@ -646,7 +758,9 @@
 	if (ret < 0)
 		goto out;
 
-	ret = wl1271_acx_beacon_filter_opt(wl, !!value);
+	wl12xx_for_each_wlvif(wl, wlvif) {
+		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, !!value);
+	}
 
 	wl1271_ps_elp_sleep(wl);
 out:
@@ -770,6 +884,7 @@
 	DEBUGFS_ADD(gpio_power, rootdir);
 	DEBUGFS_ADD(start_recovery, rootdir);
 	DEBUGFS_ADD(driver_state, rootdir);
+	DEBUGFS_ADD(vifs_state, rootdir);
 	DEBUGFS_ADD(dtim_interval, rootdir);
 	DEBUGFS_ADD(beacon_interval, rootdir);
 	DEBUGFS_ADD(beacon_filtering, rootdir);
diff --git a/drivers/net/wireless/wl12xx/event.c b/drivers/net/wireless/wl12xx/event.c
index 674ad2a..d3280df68 100644
--- a/drivers/net/wireless/wl12xx/event.c
+++ b/drivers/net/wireless/wl12xx/event.c
@@ -22,6 +22,7 @@
  */
 
 #include "wl12xx.h"
+#include "debug.h"
 #include "reg.h"
 #include "io.h"
 #include "event.h"
@@ -31,12 +32,16 @@
 
 void wl1271_pspoll_work(struct work_struct *work)
 {
+	struct ieee80211_vif *vif;
+	struct wl12xx_vif *wlvif;
 	struct delayed_work *dwork;
 	struct wl1271 *wl;
 	int ret;
 
 	dwork = container_of(work, struct delayed_work, work);
-	wl = container_of(dwork, struct wl1271, pspoll_work);
+	wlvif = container_of(dwork, struct wl12xx_vif, pspoll_work);
+	vif = container_of((void *)wlvif, struct ieee80211_vif, drv_priv);
+	wl = wlvif->wl;
 
 	wl1271_debug(DEBUG_EVENT, "pspoll work");
 
@@ -45,10 +50,10 @@
 	if (unlikely(wl->state == WL1271_STATE_OFF))
 		goto out;
 
-	if (!test_and_clear_bit(WL1271_FLAG_PSPOLL_FAILURE, &wl->flags))
+	if (!test_and_clear_bit(WLVIF_FLAG_PSPOLL_FAILURE, &wlvif->flags))
 		goto out;
 
-	if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
+	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
 		goto out;
 
 	/*
@@ -60,31 +65,33 @@
 	if (ret < 0)
 		goto out;
 
-	wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE, wl->basic_rate, true);
+	wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE,
+			   wlvif->basic_rate, true);
 
 	wl1271_ps_elp_sleep(wl);
 out:
 	mutex_unlock(&wl->mutex);
 };
 
-static void wl1271_event_pspoll_delivery_fail(struct wl1271 *wl)
+static void wl1271_event_pspoll_delivery_fail(struct wl1271 *wl,
+					      struct wl12xx_vif *wlvif)
 {
 	int delay = wl->conf.conn.ps_poll_recovery_period;
 	int ret;
 
-	wl->ps_poll_failures++;
-	if (wl->ps_poll_failures == 1)
+	wlvif->ps_poll_failures++;
+	if (wlvif->ps_poll_failures == 1)
 		wl1271_info("AP with dysfunctional ps-poll, "
 			    "trying to work around it.");
 
 	/* force active mode receive data from the AP */
-	if (test_bit(WL1271_FLAG_PSM, &wl->flags)) {
-		ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE,
-					 wl->basic_rate, true);
+	if (test_bit(WLVIF_FLAG_PSM, &wlvif->flags)) {
+		ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE,
+					 wlvif->basic_rate, true);
 		if (ret < 0)
 			return;
-		set_bit(WL1271_FLAG_PSPOLL_FAILURE, &wl->flags);
-		ieee80211_queue_delayed_work(wl->hw, &wl->pspoll_work,
+		set_bit(WLVIF_FLAG_PSPOLL_FAILURE, &wlvif->flags);
+		ieee80211_queue_delayed_work(wl->hw, &wlvif->pspoll_work,
 					     msecs_to_jiffies(delay));
 	}
 
@@ -97,6 +104,7 @@
 }
 
 static int wl1271_event_ps_report(struct wl1271 *wl,
+				  struct wl12xx_vif *wlvif,
 				  struct event_mailbox *mbox,
 				  bool *beacon_loss)
 {
@@ -109,41 +117,37 @@
 	case EVENT_ENTER_POWER_SAVE_FAIL:
 		wl1271_debug(DEBUG_PSM, "PSM entry failed");
 
-		if (!test_bit(WL1271_FLAG_PSM, &wl->flags)) {
+		if (!test_bit(WLVIF_FLAG_PSM, &wlvif->flags)) {
 			/* remain in active mode */
-			wl->psm_entry_retry = 0;
+			wlvif->psm_entry_retry = 0;
 			break;
 		}
 
-		if (wl->psm_entry_retry < total_retries) {
-			wl->psm_entry_retry++;
-			ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE,
-						 wl->basic_rate, true);
+		if (wlvif->psm_entry_retry < total_retries) {
+			wlvif->psm_entry_retry++;
+			ret = wl1271_ps_set_mode(wl, wlvif,
+						 STATION_POWER_SAVE_MODE,
+						 wlvif->basic_rate, true);
 		} else {
 			wl1271_info("No ack to nullfunc from AP.");
-			wl->psm_entry_retry = 0;
+			wlvif->psm_entry_retry = 0;
 			*beacon_loss = true;
 		}
 		break;
 	case EVENT_ENTER_POWER_SAVE_SUCCESS:
-		wl->psm_entry_retry = 0;
-
-		/* enable beacon filtering */
-		ret = wl1271_acx_beacon_filter_opt(wl, true);
-		if (ret < 0)
-			break;
+		wlvif->psm_entry_retry = 0;
 
 		/*
 		 * BET has only a minor effect in 5GHz and masks
 		 * channel switch IEs, so we only enable BET on 2.4GHz
 		*/
-		if (wl->band == IEEE80211_BAND_2GHZ)
+		if (wlvif->band == IEEE80211_BAND_2GHZ)
 			/* enable beacon early termination */
-			ret = wl1271_acx_bet_enable(wl, true);
+			ret = wl1271_acx_bet_enable(wl, wlvif, true);
 
-		if (wl->ps_compl) {
-			complete(wl->ps_compl);
-			wl->ps_compl = NULL;
+		if (wlvif->ps_compl) {
+			complete(wlvif->ps_compl);
+			wlvif->ps_compl = NULL;
 		}
 		break;
 	default:
@@ -154,39 +158,44 @@
 }
 
 static void wl1271_event_rssi_trigger(struct wl1271 *wl,
+				      struct wl12xx_vif *wlvif,
 				      struct event_mailbox *mbox)
 {
+	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
 	enum nl80211_cqm_rssi_threshold_event event;
 	s8 metric = mbox->rssi_snr_trigger_metric[0];
 
 	wl1271_debug(DEBUG_EVENT, "RSSI trigger metric: %d", metric);
 
-	if (metric <= wl->rssi_thold)
+	if (metric <= wlvif->rssi_thold)
 		event = NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW;
 	else
 		event = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH;
 
-	if (event != wl->last_rssi_event)
-		ieee80211_cqm_rssi_notify(wl->vif, event, GFP_KERNEL);
-	wl->last_rssi_event = event;
+	if (event != wlvif->last_rssi_event)
+		ieee80211_cqm_rssi_notify(vif, event, GFP_KERNEL);
+	wlvif->last_rssi_event = event;
 }
 
-static void wl1271_stop_ba_event(struct wl1271 *wl)
+static void wl1271_stop_ba_event(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
-	if (wl->bss_type != BSS_TYPE_AP_BSS) {
-		if (!wl->ba_rx_bitmap)
+	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
+
+	if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
+		if (!wlvif->sta.ba_rx_bitmap)
 			return;
-		ieee80211_stop_rx_ba_session(wl->vif, wl->ba_rx_bitmap,
-					     wl->bssid);
+		ieee80211_stop_rx_ba_session(vif, wlvif->sta.ba_rx_bitmap,
+					     vif->bss_conf.bssid);
 	} else {
-		int i;
+		u8 hlid;
 		struct wl1271_link *lnk;
-		for (i = WL1271_AP_STA_HLID_START; i < AP_MAX_LINKS; i++) {
-			lnk = &wl->links[i];
-			if (!wl1271_is_active_sta(wl, i) || !lnk->ba_bitmap)
+		for_each_set_bit(hlid, wlvif->ap.sta_hlid_map,
+				 WL12XX_MAX_LINKS) {
+			lnk = &wl->links[hlid];
+			if (!lnk->ba_bitmap)
 				continue;
 
-			ieee80211_stop_rx_ba_session(wl->vif,
+			ieee80211_stop_rx_ba_session(vif,
 						     lnk->ba_bitmap,
 						     lnk->addr);
 		}
@@ -196,14 +205,23 @@
 static void wl12xx_event_soft_gemini_sense(struct wl1271 *wl,
 					       u8 enable)
 {
+	struct ieee80211_vif *vif;
+	struct wl12xx_vif *wlvif;
+
 	if (enable) {
 		/* disable dynamic PS when requested by the firmware */
-		ieee80211_disable_dyn_ps(wl->vif);
+		wl12xx_for_each_wlvif_sta(wl, wlvif) {
+			vif = wl12xx_wlvif_to_vif(wlvif);
+			ieee80211_disable_dyn_ps(vif);
+		}
 		set_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags);
 	} else {
-		ieee80211_enable_dyn_ps(wl->vif);
 		clear_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags);
-		wl1271_recalc_rx_streaming(wl);
+		wl12xx_for_each_wlvif_sta(wl, wlvif) {
+			vif = wl12xx_wlvif_to_vif(wlvif);
+			ieee80211_enable_dyn_ps(vif);
+			wl1271_recalc_rx_streaming(wl, wlvif);
+		}
 	}
 
 }
@@ -217,10 +235,11 @@
 
 static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
 {
+	struct ieee80211_vif *vif;
+	struct wl12xx_vif *wlvif;
 	int ret;
 	u32 vector;
 	bool beacon_loss = false;
-	bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
 	bool disconnect_sta = false;
 	unsigned long sta_bitmap = 0;
 
@@ -234,7 +253,7 @@
 		wl1271_debug(DEBUG_EVENT, "status: 0x%x",
 			     mbox->scheduled_scan_status);
 
-		wl1271_scan_stm(wl);
+		wl1271_scan_stm(wl, wl->scan_vif);
 	}
 
 	if (vector & PERIODIC_SCAN_REPORT_EVENT_ID) {
@@ -248,13 +267,12 @@
 		wl1271_debug(DEBUG_EVENT, "PERIODIC_SCAN_COMPLETE_EVENT "
 			     "(status 0x%0x)", mbox->scheduled_scan_status);
 		if (wl->sched_scanning) {
-			wl1271_scan_sched_scan_stop(wl);
 			ieee80211_sched_scan_stopped(wl->hw);
+			wl->sched_scanning = false;
 		}
 	}
 
-	if (vector & SOFT_GEMINI_SENSE_EVENT_ID &&
-	    wl->bss_type == BSS_TYPE_STA_BSS)
+	if (vector & SOFT_GEMINI_SENSE_EVENT_ID)
 		wl12xx_event_soft_gemini_sense(wl,
 					       mbox->soft_gemini_sense_info);
 
@@ -267,40 +285,54 @@
 	 * BSS_LOSE_EVENT, beacon loss has to be reported to the stack.
 	 *
 	 */
-	if ((vector & BSS_LOSE_EVENT_ID) && !is_ap) {
+	if (vector & BSS_LOSE_EVENT_ID) {
+		/* TODO: check for multi-role */
 		wl1271_info("Beacon loss detected.");
 
 		/* indicate to the stack, that beacons have been lost */
 		beacon_loss = true;
 	}
 
-	if ((vector & PS_REPORT_EVENT_ID) && !is_ap) {
+	if (vector & PS_REPORT_EVENT_ID) {
 		wl1271_debug(DEBUG_EVENT, "PS_REPORT_EVENT");
-		ret = wl1271_event_ps_report(wl, mbox, &beacon_loss);
-		if (ret < 0)
-			return ret;
+		wl12xx_for_each_wlvif_sta(wl, wlvif) {
+			ret = wl1271_event_ps_report(wl, wlvif,
+						     mbox, &beacon_loss);
+			if (ret < 0)
+				return ret;
+		}
 	}
 
-	if ((vector & PSPOLL_DELIVERY_FAILURE_EVENT_ID) && !is_ap)
-		wl1271_event_pspoll_delivery_fail(wl);
+	if (vector & PSPOLL_DELIVERY_FAILURE_EVENT_ID)
+		wl12xx_for_each_wlvif_sta(wl, wlvif) {
+			wl1271_event_pspoll_delivery_fail(wl, wlvif);
+		}
 
 	if (vector & RSSI_SNR_TRIGGER_0_EVENT_ID) {
+		/* TODO: check actual multi-role support */
 		wl1271_debug(DEBUG_EVENT, "RSSI_SNR_TRIGGER_0_EVENT");
-		if (wl->vif)
-			wl1271_event_rssi_trigger(wl, mbox);
+		wl12xx_for_each_wlvif_sta(wl, wlvif) {
+			wl1271_event_rssi_trigger(wl, wlvif, mbox);
+		}
 	}
 
-	if ((vector & BA_SESSION_RX_CONSTRAINT_EVENT_ID)) {
+	if (vector & BA_SESSION_RX_CONSTRAINT_EVENT_ID) {
+		u8 role_id = mbox->role_id;
 		wl1271_debug(DEBUG_EVENT, "BA_SESSION_RX_CONSTRAINT_EVENT_ID. "
-			     "ba_allowed = 0x%x", mbox->rx_ba_allowed);
+			     "ba_allowed = 0x%x, role_id=%d",
+			     mbox->rx_ba_allowed, role_id);
 
-		wl->ba_allowed = !!mbox->rx_ba_allowed;
+		wl12xx_for_each_wlvif(wl, wlvif) {
+			if (role_id != 0xff && role_id != wlvif->role_id)
+				continue;
 
-		if (wl->vif && !wl->ba_allowed)
-			wl1271_stop_ba_event(wl);
+			wlvif->ba_allowed = !!mbox->rx_ba_allowed;
+			if (!wlvif->ba_allowed)
+				wl1271_stop_ba_event(wl, wlvif);
+		}
 	}
 
-	if ((vector & CHANNEL_SWITCH_COMPLETE_EVENT_ID) && !is_ap) {
+	if (vector & CHANNEL_SWITCH_COMPLETE_EVENT_ID) {
 		wl1271_debug(DEBUG_EVENT, "CHANNEL_SWITCH_COMPLETE_EVENT_ID. "
 					  "status = 0x%x",
 					  mbox->channel_switch_status);
@@ -309,50 +341,65 @@
 		 * 1) channel switch complete with status=0
 		 * 2) channel switch failed status=1
 		 */
-		if (test_and_clear_bit(WL1271_FLAG_CS_PROGRESS, &wl->flags) &&
-		    (wl->vif))
-			ieee80211_chswitch_done(wl->vif,
-				mbox->channel_switch_status ? false : true);
+
+		/* TODO: configure only the relevant vif */
+		wl12xx_for_each_wlvif_sta(wl, wlvif) {
+			struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
+			bool success;
+
+			if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS,
+						&wl->flags))
+				continue;
+
+			success = mbox->channel_switch_status ? false : true;
+			ieee80211_chswitch_done(vif, success);
+		}
 	}
 
 	if ((vector & DUMMY_PACKET_EVENT_ID)) {
 		wl1271_debug(DEBUG_EVENT, "DUMMY_PACKET_ID_EVENT_ID");
-		if (wl->vif)
-			wl1271_tx_dummy_packet(wl);
+		wl1271_tx_dummy_packet(wl);
 	}
 
 	/*
 	 * "TX retries exceeded" has a different meaning according to mode.
 	 * In AP mode the offending station is disconnected.
 	 */
-	if ((vector & MAX_TX_RETRY_EVENT_ID) && is_ap) {
+	if (vector & MAX_TX_RETRY_EVENT_ID) {
 		wl1271_debug(DEBUG_EVENT, "MAX_TX_RETRY_EVENT_ID");
 		sta_bitmap |= le16_to_cpu(mbox->sta_tx_retry_exceeded);
 		disconnect_sta = true;
 	}
 
-	if ((vector & INACTIVE_STA_EVENT_ID) && is_ap) {
+	if (vector & INACTIVE_STA_EVENT_ID) {
 		wl1271_debug(DEBUG_EVENT, "INACTIVE_STA_EVENT_ID");
 		sta_bitmap |= le16_to_cpu(mbox->sta_aging_status);
 		disconnect_sta = true;
 	}
 
-	if (is_ap && disconnect_sta) {
+	if (disconnect_sta) {
 		u32 num_packets = wl->conf.tx.max_tx_retries;
 		struct ieee80211_sta *sta;
 		const u8 *addr;
 		int h;
 
-		for (h = find_first_bit(&sta_bitmap, AP_MAX_LINKS);
-		     h < AP_MAX_LINKS;
-		     h = find_next_bit(&sta_bitmap, AP_MAX_LINKS, h+1)) {
-			if (!wl1271_is_active_sta(wl, h))
+		for_each_set_bit(h, &sta_bitmap, WL12XX_MAX_LINKS) {
+			bool found = false;
+			/* find the ap vif connected to this sta */
+			wl12xx_for_each_wlvif_ap(wl, wlvif) {
+				if (!test_bit(h, wlvif->ap.sta_hlid_map))
+					continue;
+				found = true;
+				break;
+			}
+			if (!found)
 				continue;
 
+			vif = wl12xx_wlvif_to_vif(wlvif);
 			addr = wl->links[h].addr;
 
 			rcu_read_lock();
-			sta = ieee80211_find_sta(wl->vif, addr);
+			sta = ieee80211_find_sta(vif, addr);
 			if (sta) {
 				wl1271_debug(DEBUG_EVENT, "remove sta %d", h);
 				ieee80211_report_low_ack(sta, num_packets);
@@ -361,8 +408,11 @@
 		}
 	}
 
-	if (wl->vif && beacon_loss)
-		ieee80211_connection_loss(wl->vif);
+	if (beacon_loss)
+		wl12xx_for_each_wlvif_sta(wl, wlvif) {
+			vif = wl12xx_wlvif_to_vif(wlvif);
+			ieee80211_connection_loss(vif);
+		}
 
 	return 0;
 }
diff --git a/drivers/net/wireless/wl12xx/event.h b/drivers/net/wireless/wl12xx/event.h
index 49c1a0e..1d878ba 100644
--- a/drivers/net/wireless/wl12xx/event.h
+++ b/drivers/net/wireless/wl12xx/event.h
@@ -132,7 +132,4 @@
 int wl1271_event_handle(struct wl1271 *wl, u8 mbox);
 void wl1271_pspoll_work(struct work_struct *work);
 
-/* Functions from main.c */
-bool wl1271_is_active_sta(struct wl1271 *wl, u8 hlid);
-
 #endif
diff --git a/drivers/net/wireless/wl12xx/init.c b/drivers/net/wireless/wl12xx/init.c
index 04db64c..ca7ee59 100644
--- a/drivers/net/wireless/wl12xx/init.c
+++ b/drivers/net/wireless/wl12xx/init.c
@@ -25,6 +25,7 @@
 #include <linux/module.h>
 #include <linux/slab.h>
 
+#include "debug.h"
 #include "init.h"
 #include "wl12xx_80211.h"
 #include "acx.h"
@@ -33,7 +34,7 @@
 #include "tx.h"
 #include "io.h"
 
-int wl1271_sta_init_templates_config(struct wl1271 *wl)
+int wl1271_init_templates_config(struct wl1271 *wl)
 {
 	int ret, i;
 
@@ -64,7 +65,7 @@
 
 	ret = wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, NULL,
 				      sizeof
-				      (struct wl12xx_qos_null_data_template),
+				      (struct ieee80211_qos_hdr),
 				      0, WL1271_RATE_AUTOMATIC);
 	if (ret < 0)
 		return ret;
@@ -88,105 +89,6 @@
 	if (ret < 0)
 		return ret;
 
-	for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) {
-		ret = wl1271_cmd_template_set(wl, CMD_TEMPL_KLV, NULL,
-					      WL1271_CMD_TEMPL_DFLT_SIZE, i,
-					      WL1271_RATE_AUTOMATIC);
-		if (ret < 0)
-			return ret;
-	}
-
-	return 0;
-}
-
-static int wl1271_ap_init_deauth_template(struct wl1271 *wl)
-{
-	struct wl12xx_disconn_template *tmpl;
-	int ret;
-	u32 rate;
-
-	tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL);
-	if (!tmpl) {
-		ret = -ENOMEM;
-		goto out;
-	}
-
-	tmpl->header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_MGMT |
-					     IEEE80211_STYPE_DEAUTH);
-
-	rate = wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
-	ret = wl1271_cmd_template_set(wl, CMD_TEMPL_DEAUTH_AP,
-				      tmpl, sizeof(*tmpl), 0, rate);
-
-out:
-	kfree(tmpl);
-	return ret;
-}
-
-static int wl1271_ap_init_null_template(struct wl1271 *wl)
-{
-	struct ieee80211_hdr_3addr *nullfunc;
-	int ret;
-	u32 rate;
-
-	nullfunc = kzalloc(sizeof(*nullfunc), GFP_KERNEL);
-	if (!nullfunc) {
-		ret = -ENOMEM;
-		goto out;
-	}
-
-	nullfunc->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
-					      IEEE80211_STYPE_NULLFUNC |
-					      IEEE80211_FCTL_FROMDS);
-
-	/* nullfunc->addr1 is filled by FW */
-
-	memcpy(nullfunc->addr2, wl->mac_addr, ETH_ALEN);
-	memcpy(nullfunc->addr3, wl->mac_addr, ETH_ALEN);
-
-	rate = wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
-	ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, nullfunc,
-				      sizeof(*nullfunc), 0, rate);
-
-out:
-	kfree(nullfunc);
-	return ret;
-}
-
-static int wl1271_ap_init_qos_null_template(struct wl1271 *wl)
-{
-	struct ieee80211_qos_hdr *qosnull;
-	int ret;
-	u32 rate;
-
-	qosnull = kzalloc(sizeof(*qosnull), GFP_KERNEL);
-	if (!qosnull) {
-		ret = -ENOMEM;
-		goto out;
-	}
-
-	qosnull->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
-					     IEEE80211_STYPE_QOS_NULLFUNC |
-					     IEEE80211_FCTL_FROMDS);
-
-	/* qosnull->addr1 is filled by FW */
-
-	memcpy(qosnull->addr2, wl->mac_addr, ETH_ALEN);
-	memcpy(qosnull->addr3, wl->mac_addr, ETH_ALEN);
-
-	rate = wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
-	ret = wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, qosnull,
-				      sizeof(*qosnull), 0, rate);
-
-out:
-	kfree(qosnull);
-	return ret;
-}
-
-static int wl1271_ap_init_templates_config(struct wl1271 *wl)
-{
-	int ret;
-
 	/*
 	 * Put very large empty placeholders for all templates. These
 	 * reserve memory for later.
@@ -210,22 +112,106 @@
 	if (ret < 0)
 		return ret;
 
-	ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, NULL,
-				      sizeof(struct wl12xx_null_data_template),
-				      0, WL1271_RATE_AUTOMATIC);
-	if (ret < 0)
-		return ret;
-
-	ret = wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, NULL,
-				      sizeof
-				      (struct wl12xx_qos_null_data_template),
-				      0, WL1271_RATE_AUTOMATIC);
-	if (ret < 0)
-		return ret;
+	for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) {
+		ret = wl1271_cmd_template_set(wl, CMD_TEMPL_KLV, NULL,
+					      sizeof(struct ieee80211_qos_hdr),
+					      i, WL1271_RATE_AUTOMATIC);
+		if (ret < 0)
+			return ret;
+	}
 
 	return 0;
 }
 
+static int wl1271_ap_init_deauth_template(struct wl1271 *wl,
+					  struct wl12xx_vif *wlvif)
+{
+	struct wl12xx_disconn_template *tmpl;
+	int ret;
+	u32 rate;
+
+	tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL);
+	if (!tmpl) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	tmpl->header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+					     IEEE80211_STYPE_DEAUTH);
+
+	rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
+	ret = wl1271_cmd_template_set(wl, CMD_TEMPL_DEAUTH_AP,
+				      tmpl, sizeof(*tmpl), 0, rate);
+
+out:
+	kfree(tmpl);
+	return ret;
+}
+
+static int wl1271_ap_init_null_template(struct wl1271 *wl,
+					struct ieee80211_vif *vif)
+{
+	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+	struct ieee80211_hdr_3addr *nullfunc;
+	int ret;
+	u32 rate;
+
+	nullfunc = kzalloc(sizeof(*nullfunc), GFP_KERNEL);
+	if (!nullfunc) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	nullfunc->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
+					      IEEE80211_STYPE_NULLFUNC |
+					      IEEE80211_FCTL_FROMDS);
+
+	/* nullfunc->addr1 is filled by FW */
+
+	memcpy(nullfunc->addr2, vif->addr, ETH_ALEN);
+	memcpy(nullfunc->addr3, vif->addr, ETH_ALEN);
+
+	rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
+	ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, nullfunc,
+				      sizeof(*nullfunc), 0, rate);
+
+out:
+	kfree(nullfunc);
+	return ret;
+}
+
+static int wl1271_ap_init_qos_null_template(struct wl1271 *wl,
+					    struct ieee80211_vif *vif)
+{
+	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+	struct ieee80211_qos_hdr *qosnull;
+	int ret;
+	u32 rate;
+
+	qosnull = kzalloc(sizeof(*qosnull), GFP_KERNEL);
+	if (!qosnull) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	qosnull->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
+					     IEEE80211_STYPE_QOS_NULLFUNC |
+					     IEEE80211_FCTL_FROMDS);
+
+	/* qosnull->addr1 is filled by FW */
+
+	memcpy(qosnull->addr2, vif->addr, ETH_ALEN);
+	memcpy(qosnull->addr3, vif->addr, ETH_ALEN);
+
+	rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
+	ret = wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, qosnull,
+				      sizeof(*qosnull), 0, rate);
+
+out:
+	kfree(qosnull);
+	return ret;
+}
+
 static int wl12xx_init_rx_config(struct wl1271 *wl)
 {
 	int ret;
@@ -237,39 +223,37 @@
 	return 0;
 }
 
-int wl1271_init_phy_config(struct wl1271 *wl)
+static int wl12xx_init_phy_vif_config(struct wl1271 *wl,
+					    struct wl12xx_vif *wlvif)
 {
 	int ret;
 
-	ret = wl1271_acx_pd_threshold(wl);
+	ret = wl1271_acx_slot(wl, wlvif, DEFAULT_SLOT_TIME);
 	if (ret < 0)
 		return ret;
 
-	ret = wl1271_acx_slot(wl, DEFAULT_SLOT_TIME);
+	ret = wl1271_acx_service_period_timeout(wl, wlvif);
 	if (ret < 0)
 		return ret;
 
-	ret = wl1271_acx_service_period_timeout(wl);
-	if (ret < 0)
-		return ret;
-
-	ret = wl1271_acx_rts_threshold(wl, wl->hw->wiphy->rts_threshold);
+	ret = wl1271_acx_rts_threshold(wl, wlvif, wl->hw->wiphy->rts_threshold);
 	if (ret < 0)
 		return ret;
 
 	return 0;
 }
 
-static int wl1271_init_beacon_filter(struct wl1271 *wl)
+static int wl1271_init_sta_beacon_filter(struct wl1271 *wl,
+					 struct wl12xx_vif *wlvif)
 {
 	int ret;
 
-	/* disable beacon filtering at this stage */
-	ret = wl1271_acx_beacon_filter_opt(wl, false);
+	ret = wl1271_acx_beacon_filter_table(wl, wlvif);
 	if (ret < 0)
 		return ret;
 
-	ret = wl1271_acx_beacon_filter_table(wl);
+	/* enable beacon filtering */
+	ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
 	if (ret < 0)
 		return ret;
 
@@ -302,11 +286,12 @@
 	return 0;
 }
 
-static int wl1271_init_beacon_broadcast(struct wl1271 *wl)
+static int wl1271_init_beacon_broadcast(struct wl1271 *wl,
+					struct wl12xx_vif *wlvif)
 {
 	int ret;
 
-	ret = wl1271_acx_bcn_dtim_options(wl);
+	ret = wl1271_acx_bcn_dtim_options(wl, wlvif);
 	if (ret < 0)
 		return ret;
 
@@ -327,36 +312,13 @@
 	return 0;
 }
 
-static int wl1271_sta_hw_init(struct wl1271 *wl)
+/* generic sta initialization (non vif-specific) */
+static int wl1271_sta_hw_init(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
 	int ret;
 
-	if (wl->chip.id != CHIP_ID_1283_PG20) {
-		ret = wl1271_cmd_ext_radio_parms(wl);
-		if (ret < 0)
-			return ret;
-	}
-
 	/* PS config */
-	ret = wl1271_acx_config_ps(wl);
-	if (ret < 0)
-		return ret;
-
-	ret = wl1271_sta_init_templates_config(wl);
-	if (ret < 0)
-		return ret;
-
-	ret = wl1271_acx_group_address_tbl(wl, true, NULL, 0);
-	if (ret < 0)
-		return ret;
-
-	/* Initialize connection monitoring thresholds */
-	ret = wl1271_acx_conn_monit_params(wl, false);
-	if (ret < 0)
-		return ret;
-
-	/* Beacon filtering */
-	ret = wl1271_init_beacon_filter(wl);
+	ret = wl12xx_acx_config_ps(wl, wlvif);
 	if (ret < 0)
 		return ret;
 
@@ -365,103 +327,61 @@
 	if (ret < 0)
 		return ret;
 
-	/* Beacons and broadcast settings */
-	ret = wl1271_init_beacon_broadcast(wl);
-	if (ret < 0)
-		return ret;
-
-	/* Configure for ELP power saving */
-	ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
-	if (ret < 0)
-		return ret;
-
-	/* Configure rssi/snr averaging weights */
-	ret = wl1271_acx_rssi_snr_avg_weights(wl);
-	if (ret < 0)
-		return ret;
-
-	ret = wl1271_acx_sta_rate_policies(wl);
-	if (ret < 0)
-		return ret;
-
-	ret = wl12xx_acx_mem_cfg(wl);
-	if (ret < 0)
-		return ret;
-
-	/* Configure the FW logger */
-	ret = wl12xx_init_fwlog(wl);
+	ret = wl1271_acx_sta_rate_policies(wl, wlvif);
 	if (ret < 0)
 		return ret;
 
 	return 0;
 }
 
-static int wl1271_sta_hw_init_post_mem(struct wl1271 *wl)
+static int wl1271_sta_hw_init_post_mem(struct wl1271 *wl,
+				       struct ieee80211_vif *vif)
 {
+	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
 	int ret, i;
 
 	/* disable all keep-alive templates */
 	for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) {
-		ret = wl1271_acx_keep_alive_config(wl, i,
+		ret = wl1271_acx_keep_alive_config(wl, wlvif, i,
 						   ACX_KEEP_ALIVE_TPL_INVALID);
 		if (ret < 0)
 			return ret;
 	}
 
 	/* disable the keep-alive feature */
-	ret = wl1271_acx_keep_alive_mode(wl, false);
+	ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
 	if (ret < 0)
 		return ret;
 
 	return 0;
 }
 
-static int wl1271_ap_hw_init(struct wl1271 *wl)
+/* generic ap initialization (non vif-specific) */
+static int wl1271_ap_hw_init(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
 	int ret;
 
-	ret = wl1271_ap_init_templates_config(wl);
-	if (ret < 0)
-		return ret;
-
-	/* Configure for power always on */
-	ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
-	if (ret < 0)
-		return ret;
-
-	ret = wl1271_init_ap_rates(wl);
-	if (ret < 0)
-		return ret;
-
-	ret = wl1271_acx_ap_max_tx_retry(wl);
-	if (ret < 0)
-		return ret;
-
-	ret = wl12xx_acx_mem_cfg(wl);
-	if (ret < 0)
-		return ret;
-
-	/* initialize Tx power */
-	ret = wl1271_acx_tx_power(wl, wl->power_level);
+	ret = wl1271_init_ap_rates(wl, wlvif);
 	if (ret < 0)
 		return ret;
 
 	return 0;
 }
 
-int wl1271_ap_init_templates(struct wl1271 *wl)
+int wl1271_ap_init_templates(struct wl1271 *wl, struct ieee80211_vif *vif)
 {
+	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
 	int ret;
 
-	ret = wl1271_ap_init_deauth_template(wl);
+	ret = wl1271_ap_init_deauth_template(wl, wlvif);
 	if (ret < 0)
 		return ret;
 
-	ret = wl1271_ap_init_null_template(wl);
+	ret = wl1271_ap_init_null_template(wl, vif);
 	if (ret < 0)
 		return ret;
 
-	ret = wl1271_ap_init_qos_null_template(wl);
+	ret = wl1271_ap_init_qos_null_template(wl, vif);
 	if (ret < 0)
 		return ret;
 
@@ -469,43 +389,45 @@
 	 * when operating as AP we want to receive external beacons for
 	 * configuring ERP protection.
 	 */
-	ret = wl1271_acx_beacon_filter_opt(wl, false);
+	ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
 	if (ret < 0)
 		return ret;
 
 	return 0;
 }
 
-static int wl1271_ap_hw_init_post_mem(struct wl1271 *wl)
+static int wl1271_ap_hw_init_post_mem(struct wl1271 *wl,
+				      struct ieee80211_vif *vif)
 {
-	return wl1271_ap_init_templates(wl);
+	return wl1271_ap_init_templates(wl, vif);
 }
 
-int wl1271_init_ap_rates(struct wl1271 *wl)
+int wl1271_init_ap_rates(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
 	int i, ret;
 	struct conf_tx_rate_class rc;
 	u32 supported_rates;
 
-	wl1271_debug(DEBUG_AP, "AP basic rate set: 0x%x", wl->basic_rate_set);
+	wl1271_debug(DEBUG_AP, "AP basic rate set: 0x%x",
+		     wlvif->basic_rate_set);
 
-	if (wl->basic_rate_set == 0)
+	if (wlvif->basic_rate_set == 0)
 		return -EINVAL;
 
-	rc.enabled_rates = wl->basic_rate_set;
+	rc.enabled_rates = wlvif->basic_rate_set;
 	rc.long_retry_limit = 10;
 	rc.short_retry_limit = 10;
 	rc.aflags = 0;
-	ret = wl1271_acx_ap_rate_policy(wl, &rc, ACX_TX_AP_MODE_MGMT_RATE);
+	ret = wl1271_acx_ap_rate_policy(wl, &rc, wlvif->ap.mgmt_rate_idx);
 	if (ret < 0)
 		return ret;
 
 	/* use the min basic rate for AP broadcast/multicast */
-	rc.enabled_rates = wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
+	rc.enabled_rates = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
 	rc.short_retry_limit = 10;
 	rc.long_retry_limit = 10;
 	rc.aflags = 0;
-	ret = wl1271_acx_ap_rate_policy(wl, &rc, ACX_TX_AP_MODE_BCST_RATE);
+	ret = wl1271_acx_ap_rate_policy(wl, &rc, wlvif->ap.bcast_rate_idx);
 	if (ret < 0)
 		return ret;
 
@@ -513,7 +435,7 @@
 	 * If the basic rates contain OFDM rates, use OFDM only
 	 * rates for unicast TX as well. Else use all supported rates.
 	 */
-	if ((wl->basic_rate_set & CONF_TX_OFDM_RATES))
+	if ((wlvif->basic_rate_set & CONF_TX_OFDM_RATES))
 		supported_rates = CONF_TX_OFDM_RATES;
 	else
 		supported_rates = CONF_TX_AP_ENABLED_RATES;
@@ -527,7 +449,8 @@
 		rc.short_retry_limit = 10;
 		rc.long_retry_limit = 10;
 		rc.aflags = 0;
-		ret = wl1271_acx_ap_rate_policy(wl, &rc, i);
+		ret = wl1271_acx_ap_rate_policy(wl, &rc,
+						wlvif->ap.ucast_rate_idx[i]);
 		if (ret < 0)
 			return ret;
 	}
@@ -535,24 +458,23 @@
 	return 0;
 }
 
-static int wl1271_set_ba_policies(struct wl1271 *wl)
+static int wl1271_set_ba_policies(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
 	/* Reset the BA RX indicators */
-	wl->ba_rx_bitmap = 0;
-	wl->ba_allowed = true;
+	wlvif->ba_allowed = true;
 	wl->ba_rx_session_count = 0;
 
 	/* BA is supported in STA/AP modes */
-	if (wl->bss_type != BSS_TYPE_AP_BSS &&
-	    wl->bss_type != BSS_TYPE_STA_BSS) {
-		wl->ba_support = false;
+	if (wlvif->bss_type != BSS_TYPE_AP_BSS &&
+	    wlvif->bss_type != BSS_TYPE_STA_BSS) {
+		wlvif->ba_support = false;
 		return 0;
 	}
 
-	wl->ba_support = true;
+	wlvif->ba_support = true;
 
 	/* 802.11n initiator BA session setting */
-	return wl12xx_acx_set_ba_initiator_policy(wl);
+	return wl12xx_acx_set_ba_initiator_policy(wl, wlvif);
 }
 
 int wl1271_chip_specific_init(struct wl1271 *wl)
@@ -562,7 +484,7 @@
 	if (wl->chip.id == CHIP_ID_1283_PG20) {
 		u32 host_cfg_bitmap = HOST_IF_CFG_RX_FIFO_ENABLE;
 
-		if (wl->quirks & WL12XX_QUIRK_BLOCKSIZE_ALIGNMENT)
+		if (!(wl->quirks & WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT))
 			/* Enable SDIO padding */
 			host_cfg_bitmap |= HOST_IF_CFG_TX_PAD_TO_SDIO_BLK;
 
@@ -575,39 +497,186 @@
 	return ret;
 }
 
+/* vif-specifc initialization */
+static int wl12xx_init_sta_role(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+{
+	int ret;
+
+	ret = wl1271_acx_group_address_tbl(wl, wlvif, true, NULL, 0);
+	if (ret < 0)
+		return ret;
+
+	/* Initialize connection monitoring thresholds */
+	ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
+	if (ret < 0)
+		return ret;
+
+	/* Beacon filtering */
+	ret = wl1271_init_sta_beacon_filter(wl, wlvif);
+	if (ret < 0)
+		return ret;
+
+	/* Beacons and broadcast settings */
+	ret = wl1271_init_beacon_broadcast(wl, wlvif);
+	if (ret < 0)
+		return ret;
+
+	/* Configure rssi/snr averaging weights */
+	ret = wl1271_acx_rssi_snr_avg_weights(wl, wlvif);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+/* vif-specific intialization */
+static int wl12xx_init_ap_role(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+{
+	int ret;
+
+	ret = wl1271_acx_ap_max_tx_retry(wl, wlvif);
+	if (ret < 0)
+		return ret;
+
+	/* initialize Tx power */
+	ret = wl1271_acx_tx_power(wl, wlvif, wlvif->power_level);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+int wl1271_init_vif_specific(struct wl1271 *wl, struct ieee80211_vif *vif)
+{
+	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+	struct conf_tx_ac_category *conf_ac;
+	struct conf_tx_tid *conf_tid;
+	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
+	int ret, i;
+
+	/*
+	 * consider all existing roles before configuring psm.
+	 * TODO: reconfigure on interface removal.
+	 */
+	if (!wl->ap_count) {
+		if (is_ap) {
+			/* Configure for power always on */
+			ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
+			if (ret < 0)
+				return ret;
+		} else if (!wl->sta_count) {
+			/* Configure for ELP power saving */
+			ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
+			if (ret < 0)
+				return ret;
+		}
+	}
+
+	/* Mode specific init */
+	if (is_ap) {
+		ret = wl1271_ap_hw_init(wl, wlvif);
+		if (ret < 0)
+			return ret;
+
+		ret = wl12xx_init_ap_role(wl, wlvif);
+		if (ret < 0)
+			return ret;
+	} else {
+		ret = wl1271_sta_hw_init(wl, wlvif);
+		if (ret < 0)
+			return ret;
+
+		ret = wl12xx_init_sta_role(wl, wlvif);
+		if (ret < 0)
+			return ret;
+	}
+
+	wl12xx_init_phy_vif_config(wl, wlvif);
+
+	/* Default TID/AC configuration */
+	BUG_ON(wl->conf.tx.tid_conf_count != wl->conf.tx.ac_conf_count);
+	for (i = 0; i < wl->conf.tx.tid_conf_count; i++) {
+		conf_ac = &wl->conf.tx.ac_conf[i];
+		ret = wl1271_acx_ac_cfg(wl, wlvif, conf_ac->ac,
+					conf_ac->cw_min, conf_ac->cw_max,
+					conf_ac->aifsn, conf_ac->tx_op_limit);
+		if (ret < 0)
+			return ret;
+
+		conf_tid = &wl->conf.tx.tid_conf[i];
+		ret = wl1271_acx_tid_cfg(wl, wlvif,
+					 conf_tid->queue_id,
+					 conf_tid->channel_type,
+					 conf_tid->tsid,
+					 conf_tid->ps_scheme,
+					 conf_tid->ack_policy,
+					 conf_tid->apsd_conf[0],
+					 conf_tid->apsd_conf[1]);
+		if (ret < 0)
+			return ret;
+	}
+
+	/* Configure HW encryption */
+	ret = wl1271_acx_feature_cfg(wl, wlvif);
+	if (ret < 0)
+		return ret;
+
+	/* Mode specific init - post mem init */
+	if (is_ap)
+		ret = wl1271_ap_hw_init_post_mem(wl, vif);
+	else
+		ret = wl1271_sta_hw_init_post_mem(wl, vif);
+
+	if (ret < 0)
+		return ret;
+
+	/* Configure initiator BA sessions policies */
+	ret = wl1271_set_ba_policies(wl, wlvif);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
 
 int wl1271_hw_init(struct wl1271 *wl)
 {
-	struct conf_tx_ac_category *conf_ac;
-	struct conf_tx_tid *conf_tid;
-	int ret, i;
-	bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+	int ret;
 
-	if (wl->chip.id == CHIP_ID_1283_PG20)
+	if (wl->chip.id == CHIP_ID_1283_PG20) {
 		ret = wl128x_cmd_general_parms(wl);
-	else
-		ret = wl1271_cmd_general_parms(wl);
-	if (ret < 0)
-		return ret;
-
-	if (wl->chip.id == CHIP_ID_1283_PG20)
+		if (ret < 0)
+			return ret;
 		ret = wl128x_cmd_radio_parms(wl);
-	else
+		if (ret < 0)
+			return ret;
+	} else {
+		ret = wl1271_cmd_general_parms(wl);
+		if (ret < 0)
+			return ret;
 		ret = wl1271_cmd_radio_parms(wl);
-	if (ret < 0)
-		return ret;
+		if (ret < 0)
+			return ret;
+		ret = wl1271_cmd_ext_radio_parms(wl);
+		if (ret < 0)
+			return ret;
+	}
 
 	/* Chip-specific init */
 	ret = wl1271_chip_specific_init(wl);
 	if (ret < 0)
 		return ret;
 
-	/* Mode specific init */
-	if (is_ap)
-		ret = wl1271_ap_hw_init(wl);
-	else
-		ret = wl1271_sta_hw_init(wl);
+	/* Init templates */
+	ret = wl1271_init_templates_config(wl);
+	if (ret < 0)
+		return ret;
 
+	ret = wl12xx_acx_mem_cfg(wl);
+	if (ret < 0)
+		return ret;
+
+	/* Configure the FW logger */
+	ret = wl12xx_init_fwlog(wl);
 	if (ret < 0)
 		return ret;
 
@@ -626,11 +695,6 @@
 	if (ret < 0)
 		goto out_free_memmap;
 
-	/* PHY layer config */
-	ret = wl1271_init_phy_config(wl);
-	if (ret < 0)
-		goto out_free_memmap;
-
 	ret = wl1271_acx_dco_itrim_params(wl);
 	if (ret < 0)
 		goto out_free_memmap;
@@ -655,61 +719,20 @@
 	if (ret < 0)
 		goto out_free_memmap;
 
-	/* Default TID/AC configuration */
-	BUG_ON(wl->conf.tx.tid_conf_count != wl->conf.tx.ac_conf_count);
-	for (i = 0; i < wl->conf.tx.tid_conf_count; i++) {
-		conf_ac = &wl->conf.tx.ac_conf[i];
-		ret = wl1271_acx_ac_cfg(wl, conf_ac->ac, conf_ac->cw_min,
-					conf_ac->cw_max, conf_ac->aifsn,
-					conf_ac->tx_op_limit);
-		if (ret < 0)
-			goto out_free_memmap;
-
-		conf_tid = &wl->conf.tx.tid_conf[i];
-		ret = wl1271_acx_tid_cfg(wl, conf_tid->queue_id,
-					 conf_tid->channel_type,
-					 conf_tid->tsid,
-					 conf_tid->ps_scheme,
-					 conf_tid->ack_policy,
-					 conf_tid->apsd_conf[0],
-					 conf_tid->apsd_conf[1]);
-		if (ret < 0)
-			goto out_free_memmap;
-	}
-
 	/* Enable data path */
 	ret = wl1271_cmd_data_path(wl, 1);
 	if (ret < 0)
 		goto out_free_memmap;
 
-	/* Configure HW encryption */
-	ret = wl1271_acx_feature_cfg(wl);
-	if (ret < 0)
-		goto out_free_memmap;
-
 	/* configure PM */
 	ret = wl1271_acx_pm_config(wl);
 	if (ret < 0)
 		goto out_free_memmap;
 
-	/* Mode specific init - post mem init */
-	if (is_ap)
-		ret = wl1271_ap_hw_init_post_mem(wl);
-	else
-		ret = wl1271_sta_hw_init_post_mem(wl);
-
-	if (ret < 0)
-		goto out_free_memmap;
-
 	ret = wl12xx_acx_set_rate_mgmt_params(wl);
 	if (ret < 0)
 		goto out_free_memmap;
 
-	/* Configure initiator BA sessions policies */
-	ret = wl1271_set_ba_policies(wl);
-	if (ret < 0)
-		goto out_free_memmap;
-
 	/* configure hangover */
 	ret = wl12xx_acx_config_hangover(wl);
 	if (ret < 0)
diff --git a/drivers/net/wireless/wl12xx/init.h b/drivers/net/wireless/wl12xx/init.h
index 3a3c230..2da0f40 100644
--- a/drivers/net/wireless/wl12xx/init.h
+++ b/drivers/net/wireless/wl12xx/init.h
@@ -27,13 +27,13 @@
 #include "wl12xx.h"
 
 int wl1271_hw_init_power_auth(struct wl1271 *wl);
-int wl1271_sta_init_templates_config(struct wl1271 *wl);
-int wl1271_init_phy_config(struct wl1271 *wl);
+int wl1271_init_templates_config(struct wl1271 *wl);
 int wl1271_init_pta(struct wl1271 *wl);
 int wl1271_init_energy_detection(struct wl1271 *wl);
 int wl1271_chip_specific_init(struct wl1271 *wl);
 int wl1271_hw_init(struct wl1271 *wl);
-int wl1271_init_ap_rates(struct wl1271 *wl);
-int wl1271_ap_init_templates(struct wl1271 *wl);
+int wl1271_init_vif_specific(struct wl1271 *wl, struct ieee80211_vif *vif);
+int wl1271_init_ap_rates(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl1271_ap_init_templates(struct wl1271 *wl, struct ieee80211_vif *vif);
 
 #endif
diff --git a/drivers/net/wireless/wl12xx/io.c b/drivers/net/wireless/wl12xx/io.c
index c2da66f..079ad38 100644
--- a/drivers/net/wireless/wl12xx/io.c
+++ b/drivers/net/wireless/wl12xx/io.c
@@ -24,8 +24,10 @@
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/spi/spi.h>
+#include <linux/interrupt.h>
 
 #include "wl12xx.h"
+#include "debug.h"
 #include "wl12xx_80211.h"
 #include "io.h"
 #include "tx.h"
@@ -46,7 +48,7 @@
 bool wl1271_set_block_size(struct wl1271 *wl)
 {
 	if (wl->if_ops->set_block_size) {
-		wl->if_ops->set_block_size(wl, WL12XX_BUS_BLOCK_SIZE);
+		wl->if_ops->set_block_size(wl->dev, WL12XX_BUS_BLOCK_SIZE);
 		return true;
 	}
 
@@ -55,12 +57,12 @@
 
 void wl1271_disable_interrupts(struct wl1271 *wl)
 {
-	wl->if_ops->disable_irq(wl);
+	disable_irq(wl->irq);
 }
 
 void wl1271_enable_interrupts(struct wl1271 *wl)
 {
-	wl->if_ops->enable_irq(wl);
+	enable_irq(wl->irq);
 }
 
 /* Set the SPI partitions to access the chip addresses
@@ -128,13 +130,13 @@
 void wl1271_io_reset(struct wl1271 *wl)
 {
 	if (wl->if_ops->reset)
-		wl->if_ops->reset(wl);
+		wl->if_ops->reset(wl->dev);
 }
 
 void wl1271_io_init(struct wl1271 *wl)
 {
 	if (wl->if_ops->init)
-		wl->if_ops->init(wl);
+		wl->if_ops->init(wl->dev);
 }
 
 void wl1271_top_reg_write(struct wl1271 *wl, int addr, u16 val)
diff --git a/drivers/net/wireless/wl12xx/io.h b/drivers/net/wireless/wl12xx/io.h
index e839341..d398cbc 100644
--- a/drivers/net/wireless/wl12xx/io.h
+++ b/drivers/net/wireless/wl12xx/io.h
@@ -51,23 +51,17 @@
 void wl1271_io_reset(struct wl1271 *wl);
 void wl1271_io_init(struct wl1271 *wl);
 
-static inline struct device *wl1271_wl_to_dev(struct wl1271 *wl)
-{
-	return wl->if_ops->dev(wl);
-}
-
-
 /* Raw target IO, address is not translated */
 static inline void wl1271_raw_write(struct wl1271 *wl, int addr, void *buf,
 				    size_t len, bool fixed)
 {
-	wl->if_ops->write(wl, addr, buf, len, fixed);
+	wl->if_ops->write(wl->dev, addr, buf, len, fixed);
 }
 
 static inline void wl1271_raw_read(struct wl1271 *wl, int addr, void *buf,
 				   size_t len, bool fixed)
 {
-	wl->if_ops->read(wl, addr, buf, len, fixed);
+	wl->if_ops->read(wl->dev, addr, buf, len, fixed);
 }
 
 static inline u32 wl1271_raw_read32(struct wl1271 *wl, int addr)
@@ -155,13 +149,13 @@
 
 static inline void wl1271_power_off(struct wl1271 *wl)
 {
-	wl->if_ops->power(wl, false);
+	wl->if_ops->power(wl->dev, false);
 	clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
 }
 
 static inline int wl1271_power_on(struct wl1271 *wl)
 {
-	int ret = wl->if_ops->power(wl, true);
+	int ret = wl->if_ops->power(wl->dev, true);
 	if (ret == 0)
 		set_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
 
@@ -176,15 +170,10 @@
 int wl1271_set_partition(struct wl1271 *wl,
 			 struct wl1271_partition_set *p);
 
+bool wl1271_set_block_size(struct wl1271 *wl);
+
 /* Functions from wl1271_main.c */
 
-int wl1271_register_hw(struct wl1271 *wl);
-void wl1271_unregister_hw(struct wl1271 *wl);
-int wl1271_init_ieee80211(struct wl1271 *wl);
-struct ieee80211_hw *wl1271_alloc_hw(void);
-int wl1271_free_hw(struct wl1271 *wl);
-irqreturn_t wl1271_irq(int irq, void *data);
-bool wl1271_set_block_size(struct wl1271 *wl);
 int wl1271_tx_dummy_packet(struct wl1271 *wl);
 
 #endif
diff --git a/drivers/net/wireless/wl12xx/main.c b/drivers/net/wireless/wl12xx/main.c
index 884f82b..d5f55a1 100644
--- a/drivers/net/wireless/wl12xx/main.c
+++ b/drivers/net/wireless/wl12xx/main.c
@@ -32,8 +32,10 @@
 #include <linux/slab.h>
 #include <linux/wl12xx.h>
 #include <linux/sched.h>
+#include <linux/interrupt.h>
 
 #include "wl12xx.h"
+#include "debug.h"
 #include "wl12xx_80211.h"
 #include "reg.h"
 #include "io.h"
@@ -377,42 +379,30 @@
 static bool bug_on_recovery;
 
 static void __wl1271_op_remove_interface(struct wl1271 *wl,
+					 struct ieee80211_vif *vif,
 					 bool reset_tx_queues);
-static void wl1271_free_ap_keys(struct wl1271 *wl);
-
-
-static void wl1271_device_release(struct device *dev)
-{
-
-}
-
-static struct platform_device wl1271_device = {
-	.name           = "wl1271",
-	.id             = -1,
-
-	/* device model insists to have a release function */
-	.dev            = {
-		.release = wl1271_device_release,
-	},
-};
+static void wl1271_op_stop(struct ieee80211_hw *hw);
+static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
 
 static DEFINE_MUTEX(wl_list_mutex);
 static LIST_HEAD(wl_list);
 
-static int wl1271_check_operstate(struct wl1271 *wl, unsigned char operstate)
+static int wl1271_check_operstate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+				  unsigned char operstate)
 {
 	int ret;
+
 	if (operstate != IF_OPER_UP)
 		return 0;
 
-	if (test_and_set_bit(WL1271_FLAG_STA_STATE_SENT, &wl->flags))
+	if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
 		return 0;
 
-	ret = wl12xx_cmd_set_peer_state(wl, wl->sta_hlid);
+	ret = wl12xx_cmd_set_peer_state(wl, wlvif->sta.hlid);
 	if (ret < 0)
 		return ret;
 
-	wl12xx_croc(wl, wl->role_id);
+	wl12xx_croc(wl, wlvif->role_id);
 
 	wl1271_info("Association completed.");
 	return 0;
@@ -426,6 +416,7 @@
 	struct ieee80211_hw *hw;
 	struct wl1271 *wl;
 	struct wl1271 *wl_temp;
+	struct wl12xx_vif *wlvif;
 	int ret = 0;
 
 	/* Check that this notification is for us. */
@@ -459,17 +450,28 @@
 	if (wl->state == WL1271_STATE_OFF)
 		goto out;
 
-	if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
+	if (dev->operstate != IF_OPER_UP)
 		goto out;
+	/*
+	 * The correct behavior should be just getting the appropriate wlvif
+	 * from the given dev, but currently we don't have a mac80211
+	 * interface for it.
+	 */
+	wl12xx_for_each_wlvif_sta(wl, wlvif) {
+		struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
 
-	ret = wl1271_ps_elp_wakeup(wl);
-	if (ret < 0)
-		goto out;
+		if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
+			continue;
 
-	wl1271_check_operstate(wl, dev->operstate);
+		ret = wl1271_ps_elp_wakeup(wl);
+		if (ret < 0)
+			goto out;
 
-	wl1271_ps_elp_sleep(wl);
+		wl1271_check_operstate(wl, wlvif,
+				       ieee80211_get_operstate(vif));
 
+		wl1271_ps_elp_sleep(wl);
+	}
 out:
 	mutex_unlock(&wl->mutex);
 
@@ -498,19 +500,20 @@
 	return 0;
 }
 
-static int wl1271_set_rx_streaming(struct wl1271 *wl, bool enable)
+static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+				   bool enable)
 {
 	int ret = 0;
 
 	/* we should hold wl->mutex */
-	ret = wl1271_acx_ps_rx_streaming(wl, enable);
+	ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
 	if (ret < 0)
 		goto out;
 
 	if (enable)
-		set_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags);
+		set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
 	else
-		clear_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags);
+		clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
 out:
 	return ret;
 }
@@ -519,25 +522,25 @@
  * this function is being called when the rx_streaming interval
  * has beed changed or rx_streaming should be disabled
  */
-int wl1271_recalc_rx_streaming(struct wl1271 *wl)
+int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
 	int ret = 0;
 	int period = wl->conf.rx_streaming.interval;
 
 	/* don't reconfigure if rx_streaming is disabled */
-	if (!test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags))
+	if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
 		goto out;
 
 	/* reconfigure/disable according to new streaming_period */
 	if (period &&
-	    test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags) &&
+	    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
 	    (wl->conf.rx_streaming.always ||
 	     test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
-		ret = wl1271_set_rx_streaming(wl, true);
+		ret = wl1271_set_rx_streaming(wl, wlvif, true);
 	else {
-		ret = wl1271_set_rx_streaming(wl, false);
+		ret = wl1271_set_rx_streaming(wl, wlvif, false);
 		/* don't cancel_work_sync since we might deadlock */
-		del_timer_sync(&wl->rx_streaming_timer);
+		del_timer_sync(&wlvif->rx_streaming_timer);
 	}
 out:
 	return ret;
@@ -546,13 +549,14 @@
 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
 {
 	int ret;
-	struct wl1271 *wl =
-		container_of(work, struct wl1271, rx_streaming_enable_work);
+	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
+						rx_streaming_enable_work);
+	struct wl1271 *wl = wlvif->wl;
 
 	mutex_lock(&wl->mutex);
 
-	if (test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags) ||
-	    !test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags) ||
+	if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
+	    !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
 	    (!wl->conf.rx_streaming.always &&
 	     !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
 		goto out;
@@ -564,12 +568,12 @@
 	if (ret < 0)
 		goto out;
 
-	ret = wl1271_set_rx_streaming(wl, true);
+	ret = wl1271_set_rx_streaming(wl, wlvif, true);
 	if (ret < 0)
 		goto out_sleep;
 
 	/* stop it after some time of inactivity */
-	mod_timer(&wl->rx_streaming_timer,
+	mod_timer(&wlvif->rx_streaming_timer,
 		  jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
 
 out_sleep:
@@ -581,19 +585,20 @@
 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
 {
 	int ret;
-	struct wl1271 *wl =
-		container_of(work, struct wl1271, rx_streaming_disable_work);
+	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
+						rx_streaming_disable_work);
+	struct wl1271 *wl = wlvif->wl;
 
 	mutex_lock(&wl->mutex);
 
-	if (!test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags))
+	if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
 		goto out;
 
 	ret = wl1271_ps_elp_wakeup(wl);
 	if (ret < 0)
 		goto out;
 
-	ret = wl1271_set_rx_streaming(wl, false);
+	ret = wl1271_set_rx_streaming(wl, wlvif, false);
 	if (ret)
 		goto out_sleep;
 
@@ -605,8 +610,9 @@
 
 static void wl1271_rx_streaming_timer(unsigned long data)
 {
-	struct wl1271 *wl = (struct wl1271 *)data;
-	ieee80211_queue_work(wl->hw, &wl->rx_streaming_disable_work);
+	struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
+	struct wl1271 *wl = wlvif->wl;
+	ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
 }
 
 static void wl1271_conf_init(struct wl1271 *wl)
@@ -645,9 +651,7 @@
 
 static int wl1271_plt_init(struct wl1271 *wl)
 {
-	struct conf_tx_ac_category *conf_ac;
-	struct conf_tx_tid *conf_tid;
-	int ret, i;
+	int ret;
 
 	if (wl->chip.id == CHIP_ID_1283_PG20)
 		ret = wl128x_cmd_general_parms(wl);
@@ -676,74 +680,14 @@
 	if (ret < 0)
 		return ret;
 
-	ret = wl1271_sta_init_templates_config(wl);
-	if (ret < 0)
-		return ret;
-
 	ret = wl1271_acx_init_mem_config(wl);
 	if (ret < 0)
 		return ret;
 
-	/* PHY layer config */
-	ret = wl1271_init_phy_config(wl);
-	if (ret < 0)
-		goto out_free_memmap;
-
-	ret = wl1271_acx_dco_itrim_params(wl);
-	if (ret < 0)
-		goto out_free_memmap;
-
-	/* Initialize connection monitoring thresholds */
-	ret = wl1271_acx_conn_monit_params(wl, false);
-	if (ret < 0)
-		goto out_free_memmap;
-
-	/* Bluetooth WLAN coexistence */
-	ret = wl1271_init_pta(wl);
-	if (ret < 0)
-		goto out_free_memmap;
-
-	/* FM WLAN coexistence */
-	ret = wl1271_acx_fm_coex(wl);
-	if (ret < 0)
-		goto out_free_memmap;
-
-	/* Energy detection */
-	ret = wl1271_init_energy_detection(wl);
-	if (ret < 0)
-		goto out_free_memmap;
-
 	ret = wl12xx_acx_mem_cfg(wl);
 	if (ret < 0)
 		goto out_free_memmap;
 
-	/* Default fragmentation threshold */
-	ret = wl1271_acx_frag_threshold(wl, wl->conf.tx.frag_threshold);
-	if (ret < 0)
-		goto out_free_memmap;
-
-	/* Default TID/AC configuration */
-	BUG_ON(wl->conf.tx.tid_conf_count != wl->conf.tx.ac_conf_count);
-	for (i = 0; i < wl->conf.tx.tid_conf_count; i++) {
-		conf_ac = &wl->conf.tx.ac_conf[i];
-		ret = wl1271_acx_ac_cfg(wl, conf_ac->ac, conf_ac->cw_min,
-					conf_ac->cw_max, conf_ac->aifsn,
-					conf_ac->tx_op_limit);
-		if (ret < 0)
-			goto out_free_memmap;
-
-		conf_tid = &wl->conf.tx.tid_conf[i];
-		ret = wl1271_acx_tid_cfg(wl, conf_tid->queue_id,
-					 conf_tid->channel_type,
-					 conf_tid->tsid,
-					 conf_tid->ps_scheme,
-					 conf_tid->ack_policy,
-					 conf_tid->apsd_conf[0],
-					 conf_tid->apsd_conf[1]);
-		if (ret < 0)
-			goto out_free_memmap;
-	}
-
 	/* Enable data path */
 	ret = wl1271_cmd_data_path(wl, 1);
 	if (ret < 0)
@@ -768,14 +712,12 @@
 	return ret;
 }
 
-static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl, u8 hlid, u8 tx_pkts)
+static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
+					struct wl12xx_vif *wlvif,
+					u8 hlid, u8 tx_pkts)
 {
 	bool fw_ps, single_sta;
 
-	/* only regulate station links */
-	if (hlid < WL1271_AP_STA_HLID_START)
-		return;
-
 	fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
 	single_sta = (wl->active_sta_count == 1);
 
@@ -784,7 +726,7 @@
 	 * packets in FW or if the STA is awake.
 	 */
 	if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
-		wl1271_ps_link_end(wl, hlid);
+		wl12xx_ps_link_end(wl, wlvif, hlid);
 
 	/*
 	 * Start high-level PS if the STA is asleep with enough blocks in FW.
@@ -792,24 +734,14 @@
 	 * case FW-memory congestion is not a problem.
 	 */
 	else if (!single_sta && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
-		wl1271_ps_link_start(wl, hlid, true);
-}
-
-bool wl1271_is_active_sta(struct wl1271 *wl, u8 hlid)
-{
-	int id;
-
-	/* global/broadcast "stations" are always active */
-	if (hlid < WL1271_AP_STA_HLID_START)
-		return true;
-
-	id = hlid - WL1271_AP_STA_HLID_START;
-	return test_bit(id, wl->ap_hlid_map);
+		wl12xx_ps_link_start(wl, wlvif, hlid, true);
 }
 
 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
+					   struct wl12xx_vif *wlvif,
 					   struct wl12xx_fw_status *status)
 {
+	struct wl1271_link *lnk;
 	u32 cur_fw_ps_map;
 	u8 hlid, cnt;
 
@@ -825,25 +757,22 @@
 		wl->ap_fw_ps_map = cur_fw_ps_map;
 	}
 
-	for (hlid = WL1271_AP_STA_HLID_START; hlid < AP_MAX_LINKS; hlid++) {
-		if (!wl1271_is_active_sta(wl, hlid))
-			continue;
+	for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, WL12XX_MAX_LINKS) {
+		lnk = &wl->links[hlid];
+		cnt = status->tx_lnk_free_pkts[hlid] - lnk->prev_freed_pkts;
 
-		cnt = status->tx_lnk_free_pkts[hlid] -
-		      wl->links[hlid].prev_freed_pkts;
+		lnk->prev_freed_pkts = status->tx_lnk_free_pkts[hlid];
+		lnk->allocated_pkts -= cnt;
 
-		wl->links[hlid].prev_freed_pkts =
-			status->tx_lnk_free_pkts[hlid];
-		wl->links[hlid].allocated_pkts -= cnt;
-
-		wl12xx_irq_ps_regulate_link(wl, hlid,
-					    wl->links[hlid].allocated_pkts);
+		wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
+					    lnk->allocated_pkts);
 	}
 }
 
 static void wl12xx_fw_status(struct wl1271 *wl,
 			     struct wl12xx_fw_status *status)
 {
+	struct wl12xx_vif *wlvif;
 	struct timespec ts;
 	u32 old_tx_blk_count = wl->tx_blocks_available;
 	int avail, freed_blocks;
@@ -898,8 +827,9 @@
 		clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
 
 	/* for AP update num of allocated TX blocks per link and ps status */
-	if (wl->bss_type == BSS_TYPE_AP_BSS)
-		wl12xx_irq_update_links_status(wl, status);
+	wl12xx_for_each_wlvif_ap(wl, wlvif) {
+		wl12xx_irq_update_links_status(wl, wlvif, status);
+	}
 
 	/* update the host-chipset time offset */
 	getnstimeofday(&ts);
@@ -932,7 +862,7 @@
 
 #define WL1271_IRQ_MAX_LOOPS 256
 
-irqreturn_t wl1271_irq(int irq, void *cookie)
+static irqreturn_t wl1271_irq(int irq, void *cookie)
 {
 	int ret;
 	u32 intr;
@@ -1054,7 +984,6 @@
 
 	return IRQ_HANDLED;
 }
-EXPORT_SYMBOL_GPL(wl1271_irq);
 
 static int wl1271_fetch_firmware(struct wl1271 *wl)
 {
@@ -1069,10 +998,10 @@
 
 	wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
 
-	ret = request_firmware(&fw, fw_name, wl1271_wl_to_dev(wl));
+	ret = request_firmware(&fw, fw_name, wl->dev);
 
 	if (ret < 0) {
-		wl1271_error("could not get firmware: %d", ret);
+		wl1271_error("could not get firmware %s: %d", fw_name, ret);
 		return ret;
 	}
 
@@ -1107,10 +1036,11 @@
 	const struct firmware *fw;
 	int ret;
 
-	ret = request_firmware(&fw, WL12XX_NVS_NAME, wl1271_wl_to_dev(wl));
+	ret = request_firmware(&fw, WL12XX_NVS_NAME, wl->dev);
 
 	if (ret < 0) {
-		wl1271_error("could not get nvs file: %d", ret);
+		wl1271_error("could not get nvs file %s: %d", WL12XX_NVS_NAME,
+			     ret);
 		return ret;
 	}
 
@@ -1217,11 +1147,13 @@
 {
 	struct wl1271 *wl =
 		container_of(work, struct wl1271, recovery_work);
+	struct wl12xx_vif *wlvif;
+	struct ieee80211_vif *vif;
 
 	mutex_lock(&wl->mutex);
 
 	if (wl->state != WL1271_STATE_ON)
-		goto out;
+		goto out_unlock;
 
 	/* Avoid a recursive recovery */
 	set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
@@ -1238,9 +1170,12 @@
 	 * in the firmware during recovery. This doens't hurt if the network is
 	 * not encrypted.
 	 */
-	if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags) ||
-	    test_bit(WL1271_FLAG_AP_STARTED, &wl->flags))
-		wl->tx_security_seq += WL1271_TX_SQN_POST_RECOVERY_PADDING;
+	wl12xx_for_each_wlvif(wl, wlvif) {
+		if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
+		    test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
+			wlvif->tx_security_seq +=
+				WL1271_TX_SQN_POST_RECOVERY_PADDING;
+	}
 
 	/* Prevent spurious TX during FW restart */
 	ieee80211_stop_queues(wl->hw);
@@ -1251,7 +1186,14 @@
 	}
 
 	/* reboot the chipset */
-	__wl1271_op_remove_interface(wl, false);
+	while (!list_empty(&wl->wlvif_list)) {
+		wlvif = list_first_entry(&wl->wlvif_list,
+				       struct wl12xx_vif, list);
+		vif = wl12xx_wlvif_to_vif(wlvif);
+		__wl1271_op_remove_interface(wl, vif, false);
+	}
+	mutex_unlock(&wl->mutex);
+	wl1271_op_stop(wl->hw);
 
 	clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
 
@@ -1262,8 +1204,8 @@
 	 * to restart the HW.
 	 */
 	ieee80211_wake_queues(wl->hw);
-
-out:
+	return;
+out_unlock:
 	mutex_unlock(&wl->mutex);
 }
 
@@ -1318,7 +1260,16 @@
 	/* 0. read chip id from CHIP_ID */
 	wl->chip.id = wl1271_read32(wl, CHIP_ID_B);
 
-	/* 1. check if chip id is valid */
+	/*
+	 * For wl127x based devices we could use the default block
+	 * size (512 bytes), but due to a bug in the sdio driver, we
+	 * need to set it explicitly after the chip is powered on.  To
+	 * simplify the code and since the performance impact is
+	 * negligible, we use the same block size for all different
+	 * chip types.
+	 */
+	if (!wl1271_set_block_size(wl))
+		wl->quirks |= WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT;
 
 	switch (wl->chip.id) {
 	case CHIP_ID_1271_PG10:
@@ -1328,7 +1279,9 @@
 		ret = wl1271_setup(wl);
 		if (ret < 0)
 			goto out;
+		wl->quirks |= WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT;
 		break;
+
 	case CHIP_ID_1271_PG20:
 		wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1271 PG20)",
 			     wl->chip.id);
@@ -1336,7 +1289,9 @@
 		ret = wl1271_setup(wl);
 		if (ret < 0)
 			goto out;
+		wl->quirks |= WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT;
 		break;
+
 	case CHIP_ID_1283_PG20:
 		wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1283 PG20)",
 			     wl->chip.id);
@@ -1344,9 +1299,6 @@
 		ret = wl1271_setup(wl);
 		if (ret < 0)
 			goto out;
-
-		if (wl1271_set_block_size(wl))
-			wl->quirks |= WL12XX_QUIRK_BLOCKSIZE_ALIGNMENT;
 		break;
 	case CHIP_ID_1283_PG10:
 	default:
@@ -1389,8 +1341,6 @@
 		goto out;
 	}
 
-	wl->bss_type = BSS_TYPE_STA_BSS;
-
 	while (retries) {
 		retries--;
 		ret = wl1271_chip_wakeup(wl);
@@ -1482,33 +1432,34 @@
 static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 {
 	struct wl1271 *wl = hw->priv;
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct ieee80211_vif *vif = info->control.vif;
+	struct wl12xx_vif *wlvif = NULL;
 	unsigned long flags;
 	int q, mapping;
-	u8 hlid = 0;
+	u8 hlid;
+
+	if (vif)
+		wlvif = wl12xx_vif_to_data(vif);
 
 	mapping = skb_get_queue_mapping(skb);
 	q = wl1271_tx_get_queue(mapping);
 
-	if (wl->bss_type == BSS_TYPE_AP_BSS)
-		hlid = wl12xx_tx_get_hlid_ap(wl, skb);
+	hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
 
 	spin_lock_irqsave(&wl->wl_lock, flags);
 
 	/* queue the packet */
-	if (wl->bss_type == BSS_TYPE_AP_BSS) {
-		if (!wl1271_is_active_sta(wl, hlid)) {
-			wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d",
-				     hlid, q);
-			dev_kfree_skb(skb);
-			goto out;
-		}
-
-		wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d", hlid, q);
-		skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
-	} else {
-		skb_queue_tail(&wl->tx_queue[q], skb);
+	if (hlid == WL12XX_INVALID_LINK_ID ||
+	    (wlvif && !test_bit(hlid, wlvif->links_map))) {
+		wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
+		ieee80211_free_txskb(hw, skb);
+		goto out;
 	}
 
+	wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d", hlid, q);
+	skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
+
 	wl->tx_queue_count[q]++;
 
 	/*
@@ -1609,13 +1560,14 @@
 };
 
 #ifdef CONFIG_PM
-static int wl1271_configure_suspend_sta(struct wl1271 *wl)
+static int wl1271_configure_suspend_sta(struct wl1271 *wl,
+					struct wl12xx_vif *wlvif)
 {
 	int ret = 0;
 
 	mutex_lock(&wl->mutex);
 
-	if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
+	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
 		goto out_unlock;
 
 	ret = wl1271_ps_elp_wakeup(wl);
@@ -1623,12 +1575,12 @@
 		goto out_unlock;
 
 	/* enter psm if needed*/
-	if (!test_bit(WL1271_FLAG_PSM, &wl->flags)) {
+	if (!test_bit(WLVIF_FLAG_PSM, &wlvif->flags)) {
 		DECLARE_COMPLETION_ONSTACK(compl);
 
-		wl->ps_compl = &compl;
-		ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE,
-				   wl->basic_rate, true);
+		wlvif->ps_compl = &compl;
+		ret = wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE,
+				   wlvif->basic_rate, true);
 		if (ret < 0)
 			goto out_sleep;
 
@@ -1638,42 +1590,43 @@
 
 		ret = wait_for_completion_timeout(
 			&compl, msecs_to_jiffies(WL1271_PS_COMPLETE_TIMEOUT));
+
+		mutex_lock(&wl->mutex);
 		if (ret <= 0) {
 			wl1271_warning("couldn't enter ps mode!");
 			ret = -EBUSY;
-			goto out;
+			goto out_cleanup;
 		}
 
-		/* take mutex again, and wakeup */
-		mutex_lock(&wl->mutex);
-
 		ret = wl1271_ps_elp_wakeup(wl);
 		if (ret < 0)
-			goto out_unlock;
+			goto out_cleanup;
 	}
 out_sleep:
 	wl1271_ps_elp_sleep(wl);
+out_cleanup:
+	wlvif->ps_compl = NULL;
 out_unlock:
 	mutex_unlock(&wl->mutex);
-out:
 	return ret;
 
 }
 
-static int wl1271_configure_suspend_ap(struct wl1271 *wl)
+static int wl1271_configure_suspend_ap(struct wl1271 *wl,
+				       struct wl12xx_vif *wlvif)
 {
 	int ret = 0;
 
 	mutex_lock(&wl->mutex);
 
-	if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags))
+	if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
 		goto out_unlock;
 
 	ret = wl1271_ps_elp_wakeup(wl);
 	if (ret < 0)
 		goto out_unlock;
 
-	ret = wl1271_acx_beacon_filter_opt(wl, true);
+	ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
 
 	wl1271_ps_elp_sleep(wl);
 out_unlock:
@@ -1682,20 +1635,22 @@
 
 }
 
-static int wl1271_configure_suspend(struct wl1271 *wl)
+static int wl1271_configure_suspend(struct wl1271 *wl,
+				    struct wl12xx_vif *wlvif)
 {
-	if (wl->bss_type == BSS_TYPE_STA_BSS)
-		return wl1271_configure_suspend_sta(wl);
-	if (wl->bss_type == BSS_TYPE_AP_BSS)
-		return wl1271_configure_suspend_ap(wl);
+	if (wlvif->bss_type == BSS_TYPE_STA_BSS)
+		return wl1271_configure_suspend_sta(wl, wlvif);
+	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
+		return wl1271_configure_suspend_ap(wl, wlvif);
 	return 0;
 }
 
-static void wl1271_configure_resume(struct wl1271 *wl)
+static void wl1271_configure_resume(struct wl1271 *wl,
+				    struct wl12xx_vif *wlvif)
 {
 	int ret;
-	bool is_sta = wl->bss_type == BSS_TYPE_STA_BSS;
-	bool is_ap = wl->bss_type == BSS_TYPE_AP_BSS;
+	bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
+	bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
 
 	if (!is_sta && !is_ap)
 		return;
@@ -1707,11 +1662,11 @@
 
 	if (is_sta) {
 		/* exit psm if it wasn't configured */
-		if (!test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags))
-			wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE,
-					   wl->basic_rate, true);
+		if (!test_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags))
+			wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE,
+					   wlvif->basic_rate, true);
 	} else if (is_ap) {
-		wl1271_acx_beacon_filter_opt(wl, false);
+		wl1271_acx_beacon_filter_opt(wl, wlvif, false);
 	}
 
 	wl1271_ps_elp_sleep(wl);
@@ -1723,16 +1678,19 @@
 			    struct cfg80211_wowlan *wow)
 {
 	struct wl1271 *wl = hw->priv;
+	struct wl12xx_vif *wlvif;
 	int ret;
 
 	wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
 	WARN_ON(!wow || !wow->any);
 
 	wl->wow_enabled = true;
-	ret = wl1271_configure_suspend(wl);
-	if (ret < 0) {
-		wl1271_warning("couldn't prepare device to suspend");
-		return ret;
+	wl12xx_for_each_wlvif(wl, wlvif) {
+		ret = wl1271_configure_suspend(wl, wlvif);
+		if (ret < 0) {
+			wl1271_warning("couldn't prepare device to suspend");
+			return ret;
+		}
 	}
 	/* flush any remaining work */
 	wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
@@ -1751,7 +1709,9 @@
 
 	wl1271_enable_interrupts(wl);
 	flush_work(&wl->tx_work);
-	flush_delayed_work(&wl->pspoll_work);
+	wl12xx_for_each_wlvif(wl, wlvif) {
+		flush_delayed_work(&wlvif->pspoll_work);
+	}
 	flush_delayed_work(&wl->elp_work);
 
 	return 0;
@@ -1760,6 +1720,7 @@
 static int wl1271_op_resume(struct ieee80211_hw *hw)
 {
 	struct wl1271 *wl = hw->priv;
+	struct wl12xx_vif *wlvif;
 	unsigned long flags;
 	bool run_irq_work = false;
 
@@ -1783,7 +1744,9 @@
 		wl1271_irq(0, wl);
 		wl1271_enable_interrupts(wl);
 	}
-	wl1271_configure_resume(wl);
+	wl12xx_for_each_wlvif(wl, wlvif) {
+		wl1271_configure_resume(wl, wlvif);
+	}
 	wl->wow_enabled = false;
 
 	return 0;
@@ -1810,20 +1773,119 @@
 
 static void wl1271_op_stop(struct ieee80211_hw *hw)
 {
+	struct wl1271 *wl = hw->priv;
+	int i;
+
 	wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
+
+	mutex_lock(&wl->mutex);
+	if (wl->state == WL1271_STATE_OFF) {
+		mutex_unlock(&wl->mutex);
+		return;
+	}
+	/*
+	 * this must be before the cancel_work calls below, so that the work
+	 * functions don't perform further work.
+	 */
+	wl->state = WL1271_STATE_OFF;
+	mutex_unlock(&wl->mutex);
+
+	mutex_lock(&wl_list_mutex);
+	list_del(&wl->list);
+	mutex_unlock(&wl_list_mutex);
+
+	wl1271_disable_interrupts(wl);
+	wl1271_flush_deferred_work(wl);
+	cancel_delayed_work_sync(&wl->scan_complete_work);
+	cancel_work_sync(&wl->netstack_work);
+	cancel_work_sync(&wl->tx_work);
+	cancel_delayed_work_sync(&wl->elp_work);
+
+	/* let's notify MAC80211 about the remaining pending TX frames */
+	wl12xx_tx_reset(wl, true);
+	mutex_lock(&wl->mutex);
+
+	wl1271_power_off(wl);
+
+	wl->band = IEEE80211_BAND_2GHZ;
+
+	wl->rx_counter = 0;
+	wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
+	wl->tx_blocks_available = 0;
+	wl->tx_allocated_blocks = 0;
+	wl->tx_results_count = 0;
+	wl->tx_packets_count = 0;
+	wl->time_offset = 0;
+	wl->tx_spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT;
+	wl->ap_fw_ps_map = 0;
+	wl->ap_ps_map = 0;
+	wl->sched_scanning = false;
+	memset(wl->roles_map, 0, sizeof(wl->roles_map));
+	memset(wl->links_map, 0, sizeof(wl->links_map));
+	memset(wl->roc_map, 0, sizeof(wl->roc_map));
+	wl->active_sta_count = 0;
+
+	/* The system link is always allocated */
+	__set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
+
+	/*
+	 * this is performed after the cancel_work calls and the associated
+	 * mutex_lock, so that wl1271_op_add_interface does not accidentally
+	 * get executed before all these vars have been reset.
+	 */
+	wl->flags = 0;
+
+	wl->tx_blocks_freed = 0;
+
+	for (i = 0; i < NUM_TX_QUEUES; i++) {
+		wl->tx_pkts_freed[i] = 0;
+		wl->tx_allocated_pkts[i] = 0;
+	}
+
+	wl1271_debugfs_reset(wl);
+
+	kfree(wl->fw_status);
+	wl->fw_status = NULL;
+	kfree(wl->tx_res_if);
+	wl->tx_res_if = NULL;
+	kfree(wl->target_mem_map);
+	wl->target_mem_map = NULL;
+
+	mutex_unlock(&wl->mutex);
 }
 
-static u8 wl12xx_get_role_type(struct wl1271 *wl)
+static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
 {
-	switch (wl->bss_type) {
+	u8 policy = find_first_zero_bit(wl->rate_policies_map,
+					WL12XX_MAX_RATE_POLICIES);
+	if (policy >= WL12XX_MAX_RATE_POLICIES)
+		return -EBUSY;
+
+	__set_bit(policy, wl->rate_policies_map);
+	*idx = policy;
+	return 0;
+}
+
+static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
+{
+	if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
+		return;
+
+	__clear_bit(*idx, wl->rate_policies_map);
+	*idx = WL12XX_MAX_RATE_POLICIES;
+}
+
+static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+{
+	switch (wlvif->bss_type) {
 	case BSS_TYPE_AP_BSS:
-		if (wl->p2p)
+		if (wlvif->p2p)
 			return WL1271_ROLE_P2P_GO;
 		else
 			return WL1271_ROLE_AP;
 
 	case BSS_TYPE_STA_BSS:
-		if (wl->p2p)
+		if (wlvif->p2p)
 			return WL1271_ROLE_P2P_CL;
 		else
 			return WL1271_ROLE_STA;
@@ -1832,79 +1894,96 @@
 		return WL1271_ROLE_IBSS;
 
 	default:
-		wl1271_error("invalid bss_type: %d", wl->bss_type);
+		wl1271_error("invalid bss_type: %d", wlvif->bss_type);
 	}
 	return WL12XX_INVALID_ROLE_TYPE;
 }
 
-static int wl1271_op_add_interface(struct ieee80211_hw *hw,
-				   struct ieee80211_vif *vif)
+static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
 {
-	struct wl1271 *wl = hw->priv;
-	struct wiphy *wiphy = hw->wiphy;
-	int retries = WL1271_BOOT_RETRIES;
-	int ret = 0;
-	u8 role_type;
-	bool booted = false;
+	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+	int i;
 
-	wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
-		     ieee80211_vif_type_p2p(vif), vif->addr);
-
-	mutex_lock(&wl->mutex);
-	if (wl->vif) {
-		wl1271_debug(DEBUG_MAC80211,
-			     "multiple vifs are not supported yet");
-		ret = -EBUSY;
-		goto out;
-	}
-
-	/*
-	 * in some very corner case HW recovery scenarios its possible to
-	 * get here before __wl1271_op_remove_interface is complete, so
-	 * opt out if that is the case.
-	 */
-	if (test_bit(WL1271_FLAG_IF_INITIALIZED, &wl->flags)) {
-		ret = -EBUSY;
-		goto out;
-	}
+	/* clear everything but the persistent data */
+	memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
 
 	switch (ieee80211_vif_type_p2p(vif)) {
 	case NL80211_IFTYPE_P2P_CLIENT:
-		wl->p2p = 1;
+		wlvif->p2p = 1;
 		/* fall-through */
 	case NL80211_IFTYPE_STATION:
-		wl->bss_type = BSS_TYPE_STA_BSS;
-		wl->set_bss_type = BSS_TYPE_STA_BSS;
+		wlvif->bss_type = BSS_TYPE_STA_BSS;
 		break;
 	case NL80211_IFTYPE_ADHOC:
-		wl->bss_type = BSS_TYPE_IBSS;
-		wl->set_bss_type = BSS_TYPE_STA_BSS;
+		wlvif->bss_type = BSS_TYPE_IBSS;
 		break;
 	case NL80211_IFTYPE_P2P_GO:
-		wl->p2p = 1;
+		wlvif->p2p = 1;
 		/* fall-through */
 	case NL80211_IFTYPE_AP:
-		wl->bss_type = BSS_TYPE_AP_BSS;
+		wlvif->bss_type = BSS_TYPE_AP_BSS;
 		break;
 	default:
-		ret = -EOPNOTSUPP;
-		goto out;
+		wlvif->bss_type = MAX_BSS_TYPE;
+		return -EOPNOTSUPP;
 	}
 
-	role_type = wl12xx_get_role_type(wl);
-	if (role_type == WL12XX_INVALID_ROLE_TYPE) {
-		ret = -EINVAL;
-		goto out;
-	}
-	memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
+	wlvif->role_id = WL12XX_INVALID_ROLE_ID;
+	wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
+	wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
 
-	if (wl->state != WL1271_STATE_OFF) {
-		wl1271_error("cannot start because not in off state: %d",
-			     wl->state);
-		ret = -EBUSY;
-		goto out;
+	if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
+	    wlvif->bss_type == BSS_TYPE_IBSS) {
+		/* init sta/ibss data */
+		wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
+		wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
+		wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
+		wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
+	} else {
+		/* init ap data */
+		wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
+		wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
+		wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
+		wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
+		for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
+			wl12xx_allocate_rate_policy(wl,
+						&wlvif->ap.ucast_rate_idx[i]);
 	}
 
+	wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
+	wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
+	wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
+	wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
+	wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
+	wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
+
+	/*
+	 * mac80211 configures some values globally, while we treat them
+	 * per-interface. thus, on init, we have to copy them from wl
+	 */
+	wlvif->band = wl->band;
+	wlvif->channel = wl->channel;
+	wlvif->power_level = wl->power_level;
+
+	INIT_WORK(&wlvif->rx_streaming_enable_work,
+		  wl1271_rx_streaming_enable_work);
+	INIT_WORK(&wlvif->rx_streaming_disable_work,
+		  wl1271_rx_streaming_disable_work);
+	INIT_DELAYED_WORK(&wlvif->pspoll_work, wl1271_pspoll_work);
+	INIT_LIST_HEAD(&wlvif->list);
+
+	setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
+		    (unsigned long) wlvif);
+	return 0;
+}
+
+static bool wl12xx_init_fw(struct wl1271 *wl)
+{
+	int retries = WL1271_BOOT_RETRIES;
+	bool booted = false;
+	struct wiphy *wiphy = wl->hw->wiphy;
+	int ret;
+
 	while (retries) {
 		retries--;
 		ret = wl1271_chip_wakeup(wl);
@@ -1915,25 +1994,6 @@
 		if (ret < 0)
 			goto power_off;
 
-		if (wl->bss_type == BSS_TYPE_STA_BSS ||
-		    wl->bss_type == BSS_TYPE_IBSS) {
-			/*
-			 * The device role is a special role used for
-			 * rx and tx frames prior to association (as
-			 * the STA role can get packets only from
-			 * its associated bssid)
-			 */
-			ret = wl12xx_cmd_role_enable(wl,
-							 WL1271_ROLE_DEVICE,
-							 &wl->dev_role_id);
-			if (ret < 0)
-				goto irq_disable;
-		}
-
-		ret = wl12xx_cmd_role_enable(wl, role_type, &wl->role_id);
-		if (ret < 0)
-			goto irq_disable;
-
 		ret = wl1271_hw_init(wl);
 		if (ret < 0)
 			goto irq_disable;
@@ -1964,9 +2024,6 @@
 		goto out;
 	}
 
-	wl->vif = vif;
-	wl->state = WL1271_STATE_ON;
-	set_bit(WL1271_FLAG_IF_INITIALIZED, &wl->flags);
 	wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
 
 	/* update hw/fw version info in wiphy struct */
@@ -1984,7 +2041,115 @@
 	wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
 		     wl->enable_11a ? "" : "not ");
 
+	wl->state = WL1271_STATE_ON;
 out:
+	return booted;
+}
+
+static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
+{
+	return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
+}
+
+static int wl1271_op_add_interface(struct ieee80211_hw *hw,
+				   struct ieee80211_vif *vif)
+{
+	struct wl1271 *wl = hw->priv;
+	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+	int ret = 0;
+	u8 role_type;
+	bool booted = false;
+
+	wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
+		     ieee80211_vif_type_p2p(vif), vif->addr);
+
+	mutex_lock(&wl->mutex);
+	ret = wl1271_ps_elp_wakeup(wl);
+	if (ret < 0)
+		goto out_unlock;
+
+	if (wl->vif) {
+		wl1271_debug(DEBUG_MAC80211,
+			     "multiple vifs are not supported yet");
+		ret = -EBUSY;
+		goto out;
+	}
+
+	/*
+	 * in some very corner case HW recovery scenarios its possible to
+	 * get here before __wl1271_op_remove_interface is complete, so
+	 * opt out if that is the case.
+	 */
+	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
+	    test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
+		ret = -EBUSY;
+		goto out;
+	}
+
+	ret = wl12xx_init_vif_data(wl, vif);
+	if (ret < 0)
+		goto out;
+
+	wlvif->wl = wl;
+	role_type = wl12xx_get_role_type(wl, wlvif);
+	if (role_type == WL12XX_INVALID_ROLE_TYPE) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/*
+	 * TODO: after the nvs issue will be solved, move this block
+	 * to start(), and make sure here the driver is ON.
+	 */
+	if (wl->state == WL1271_STATE_OFF) {
+		/*
+		 * we still need this in order to configure the fw
+		 * while uploading the nvs
+		 */
+		memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
+
+		booted = wl12xx_init_fw(wl);
+		if (!booted) {
+			ret = -EINVAL;
+			goto out;
+		}
+	}
+
+	if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
+	    wlvif->bss_type == BSS_TYPE_IBSS) {
+		/*
+		 * The device role is a special role used for
+		 * rx and tx frames prior to association (as
+		 * the STA role can get packets only from
+		 * its associated bssid)
+		 */
+		ret = wl12xx_cmd_role_enable(wl, vif->addr,
+						 WL1271_ROLE_DEVICE,
+						 &wlvif->dev_role_id);
+		if (ret < 0)
+			goto out;
+	}
+
+	ret = wl12xx_cmd_role_enable(wl, vif->addr,
+				     role_type, &wlvif->role_id);
+	if (ret < 0)
+		goto out;
+
+	ret = wl1271_init_vif_specific(wl, vif);
+	if (ret < 0)
+		goto out;
+
+	wl->vif = vif;
+	list_add(&wlvif->list, &wl->wlvif_list);
+	set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
+
+	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
+		wl->ap_count++;
+	else
+		wl->sta_count++;
+out:
+	wl1271_ps_elp_sleep(wl);
+out_unlock:
 	mutex_unlock(&wl->mutex);
 
 	mutex_lock(&wl_list_mutex);
@@ -1996,29 +2161,34 @@
 }
 
 static void __wl1271_op_remove_interface(struct wl1271 *wl,
+					 struct ieee80211_vif *vif,
 					 bool reset_tx_queues)
 {
-	int ret, i;
+	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+	int i, ret;
 
 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
 
+	if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
+		return;
+
+	wl->vif = NULL;
+
 	/* because of hardware recovery, we may get here twice */
 	if (wl->state != WL1271_STATE_ON)
 		return;
 
 	wl1271_info("down");
 
-	mutex_lock(&wl_list_mutex);
-	list_del(&wl->list);
-	mutex_unlock(&wl_list_mutex);
-
 	/* enable dyn ps just in case (if left on due to fw crash etc) */
-	if (wl->bss_type == BSS_TYPE_STA_BSS)
-		ieee80211_enable_dyn_ps(wl->vif);
+	if (wlvif->bss_type == BSS_TYPE_STA_BSS)
+		ieee80211_enable_dyn_ps(vif);
 
-	if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
+	if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
+	    wl->scan_vif == vif) {
 		wl->scan.state = WL1271_SCAN_STATE_IDLE;
 		memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
+		wl->scan_vif = NULL;
 		wl->scan.req = NULL;
 		ieee80211_scan_completed(wl->hw, true);
 	}
@@ -2029,13 +2199,17 @@
 		if (ret < 0)
 			goto deinit;
 
-		if (wl->bss_type == BSS_TYPE_STA_BSS) {
-			ret = wl12xx_cmd_role_disable(wl, &wl->dev_role_id);
+		if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
+		    wlvif->bss_type == BSS_TYPE_IBSS) {
+			if (wl12xx_dev_role_started(wlvif))
+				wl12xx_stop_dev(wl, wlvif);
+
+			ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
 			if (ret < 0)
 				goto deinit;
 		}
 
-		ret = wl12xx_cmd_role_disable(wl, &wl->role_id);
+		ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
 		if (ret < 0)
 			goto deinit;
 
@@ -2043,120 +2217,93 @@
 	}
 deinit:
 	/* clear all hlids (except system_hlid) */
-	wl->sta_hlid = WL12XX_INVALID_LINK_ID;
-	wl->dev_hlid = WL12XX_INVALID_LINK_ID;
-	wl->ap_bcast_hlid = WL12XX_INVALID_LINK_ID;
-	wl->ap_global_hlid = WL12XX_INVALID_LINK_ID;
+	wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
 
-	/*
-	 * this must be before the cancel_work calls below, so that the work
-	 * functions don't perform further work.
-	 */
-	wl->state = WL1271_STATE_OFF;
-
-	mutex_unlock(&wl->mutex);
-
-	wl1271_disable_interrupts(wl);
-	wl1271_flush_deferred_work(wl);
-	cancel_delayed_work_sync(&wl->scan_complete_work);
-	cancel_work_sync(&wl->netstack_work);
-	cancel_work_sync(&wl->tx_work);
-	del_timer_sync(&wl->rx_streaming_timer);
-	cancel_work_sync(&wl->rx_streaming_enable_work);
-	cancel_work_sync(&wl->rx_streaming_disable_work);
-	cancel_delayed_work_sync(&wl->pspoll_work);
-	cancel_delayed_work_sync(&wl->elp_work);
-
-	mutex_lock(&wl->mutex);
-
-	/* let's notify MAC80211 about the remaining pending TX frames */
-	wl1271_tx_reset(wl, reset_tx_queues);
-	wl1271_power_off(wl);
-
-	memset(wl->bssid, 0, ETH_ALEN);
-	memset(wl->ssid, 0, IEEE80211_MAX_SSID_LEN + 1);
-	wl->ssid_len = 0;
-	wl->bss_type = MAX_BSS_TYPE;
-	wl->set_bss_type = MAX_BSS_TYPE;
-	wl->p2p = 0;
-	wl->band = IEEE80211_BAND_2GHZ;
-
-	wl->rx_counter = 0;
-	wl->psm_entry_retry = 0;
-	wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
-	wl->tx_blocks_available = 0;
-	wl->tx_allocated_blocks = 0;
-	wl->tx_results_count = 0;
-	wl->tx_packets_count = 0;
-	wl->time_offset = 0;
-	wl->session_counter = 0;
-	wl->rate_set = CONF_TX_RATE_MASK_BASIC;
-	wl->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
-	wl->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
-	wl->vif = NULL;
-	wl->tx_spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT;
-	wl1271_free_ap_keys(wl);
-	memset(wl->ap_hlid_map, 0, sizeof(wl->ap_hlid_map));
-	wl->ap_fw_ps_map = 0;
-	wl->ap_ps_map = 0;
-	wl->sched_scanning = false;
-	wl->role_id = WL12XX_INVALID_ROLE_ID;
-	wl->dev_role_id = WL12XX_INVALID_ROLE_ID;
-	memset(wl->roles_map, 0, sizeof(wl->roles_map));
-	memset(wl->links_map, 0, sizeof(wl->links_map));
-	memset(wl->roc_map, 0, sizeof(wl->roc_map));
-	wl->active_sta_count = 0;
-
-	/* The system link is always allocated */
-	__set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
-
-	/*
-	 * this is performed after the cancel_work calls and the associated
-	 * mutex_lock, so that wl1271_op_add_interface does not accidentally
-	 * get executed before all these vars have been reset.
-	 */
-	wl->flags = 0;
-
-	wl->tx_blocks_freed = 0;
-
-	for (i = 0; i < NUM_TX_QUEUES; i++) {
-		wl->tx_pkts_freed[i] = 0;
-		wl->tx_allocated_pkts[i] = 0;
+	if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
+	    wlvif->bss_type == BSS_TYPE_IBSS) {
+		wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
+		wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
+		wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
+		wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
+	} else {
+		wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
+		wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
+		wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
+		wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
+		for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
+			wl12xx_free_rate_policy(wl,
+						&wlvif->ap.ucast_rate_idx[i]);
 	}
 
-	wl1271_debugfs_reset(wl);
+	wl12xx_tx_reset_wlvif(wl, wlvif);
+	wl1271_free_ap_keys(wl, wlvif);
+	if (wl->last_wlvif == wlvif)
+		wl->last_wlvif = NULL;
+	list_del(&wlvif->list);
+	memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
+	wlvif->role_id = WL12XX_INVALID_ROLE_ID;
+	wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
 
-	kfree(wl->fw_status);
-	wl->fw_status = NULL;
-	kfree(wl->tx_res_if);
-	wl->tx_res_if = NULL;
-	kfree(wl->target_mem_map);
-	wl->target_mem_map = NULL;
+	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
+		wl->ap_count--;
+	else
+		wl->sta_count--;
+
+	mutex_unlock(&wl->mutex);
+	del_timer_sync(&wlvif->rx_streaming_timer);
+	cancel_work_sync(&wlvif->rx_streaming_enable_work);
+	cancel_work_sync(&wlvif->rx_streaming_disable_work);
+	cancel_delayed_work_sync(&wlvif->pspoll_work);
+
+	mutex_lock(&wl->mutex);
 }
 
 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
 				       struct ieee80211_vif *vif)
 {
 	struct wl1271 *wl = hw->priv;
+	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+	struct wl12xx_vif *iter;
 
 	mutex_lock(&wl->mutex);
+
+	if (wl->state == WL1271_STATE_OFF ||
+	    !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
+		goto out;
+
 	/*
 	 * wl->vif can be null here if someone shuts down the interface
 	 * just when hardware recovery has been started.
 	 */
-	if (wl->vif) {
-		WARN_ON(wl->vif != vif);
-		__wl1271_op_remove_interface(wl, true);
-	}
+	wl12xx_for_each_wlvif(wl, iter) {
+		if (iter != wlvif)
+			continue;
 
+		__wl1271_op_remove_interface(wl, vif, true);
+		break;
+	}
+	WARN_ON(iter != wlvif);
+out:
 	mutex_unlock(&wl->mutex);
 	cancel_work_sync(&wl->recovery_work);
 }
 
-static int wl1271_join(struct wl1271 *wl, bool set_assoc)
+static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
+				      struct ieee80211_vif *vif,
+				      enum nl80211_iftype new_type, bool p2p)
+{
+	wl1271_op_remove_interface(hw, vif);
+
+	vif->type = ieee80211_iftype_p2p(new_type, p2p);
+	vif->p2p = p2p;
+	return wl1271_op_add_interface(hw, vif);
+}
+
+static int wl1271_join(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+			  bool set_assoc)
 {
 	int ret;
-	bool is_ibss = (wl->bss_type == BSS_TYPE_IBSS);
+	bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
 
 	/*
 	 * One of the side effects of the JOIN command is that is clears
@@ -2167,20 +2314,20 @@
 	 * Keep the below message for now, unless it starts bothering
 	 * users who really like to roam a lot :)
 	 */
-	if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
+	if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
 		wl1271_info("JOIN while associated.");
 
 	if (set_assoc)
-		set_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags);
+		set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
 
 	if (is_ibss)
-		ret = wl12xx_cmd_role_start_ibss(wl);
+		ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
 	else
-		ret = wl12xx_cmd_role_start_sta(wl);
+		ret = wl12xx_cmd_role_start_sta(wl, wlvif);
 	if (ret < 0)
 		goto out;
 
-	if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
+	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
 		goto out;
 
 	/*
@@ -2189,19 +2336,20 @@
 	 * the join. The acx_aid starts the keep-alive process, and the order
 	 * of the commands below is relevant.
 	 */
-	ret = wl1271_acx_keep_alive_mode(wl, true);
+	ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
 	if (ret < 0)
 		goto out;
 
-	ret = wl1271_acx_aid(wl, wl->aid);
+	ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
 	if (ret < 0)
 		goto out;
 
-	ret = wl1271_cmd_build_klv_null_data(wl);
+	ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
 	if (ret < 0)
 		goto out;
 
-	ret = wl1271_acx_keep_alive_config(wl, CMD_TEMPL_KLV_IDX_NULL_DATA,
+	ret = wl1271_acx_keep_alive_config(wl, wlvif,
+					   CMD_TEMPL_KLV_IDX_NULL_DATA,
 					   ACX_KEEP_ALIVE_TPL_VALID);
 	if (ret < 0)
 		goto out;
@@ -2210,72 +2358,63 @@
 	return ret;
 }
 
-static int wl1271_unjoin(struct wl1271 *wl)
+static int wl1271_unjoin(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
 	int ret;
 
-	if (test_and_clear_bit(WL1271_FLAG_CS_PROGRESS, &wl->flags)) {
+	if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
+		struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
+
 		wl12xx_cmd_stop_channel_switch(wl);
-		ieee80211_chswitch_done(wl->vif, false);
+		ieee80211_chswitch_done(vif, false);
 	}
 
 	/* to stop listening to a channel, we disconnect */
-	ret = wl12xx_cmd_role_stop_sta(wl);
+	ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
 	if (ret < 0)
 		goto out;
 
-	memset(wl->bssid, 0, ETH_ALEN);
-
 	/* reset TX security counters on a clean disconnect */
-	wl->tx_security_last_seq_lsb = 0;
-	wl->tx_security_seq = 0;
+	wlvif->tx_security_last_seq_lsb = 0;
+	wlvif->tx_security_seq = 0;
 
 out:
 	return ret;
 }
 
-static void wl1271_set_band_rate(struct wl1271 *wl)
+static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
-	wl->basic_rate_set = wl->bitrate_masks[wl->band];
-	wl->rate_set = wl->basic_rate_set;
+	wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
+	wlvif->rate_set = wlvif->basic_rate_set;
 }
 
-static bool wl12xx_is_roc(struct wl1271 *wl)
-{
-	u8 role_id;
-
-	role_id = find_first_bit(wl->roc_map, WL12XX_MAX_ROLES);
-	if (role_id >= WL12XX_MAX_ROLES)
-		return false;
-
-	return true;
-}
-
-static int wl1271_sta_handle_idle(struct wl1271 *wl, bool idle)
+static int wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+				  bool idle)
 {
 	int ret;
+	bool cur_idle = !test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
+
+	if (idle == cur_idle)
+		return 0;
 
 	if (idle) {
 		/* no need to croc if we weren't busy (e.g. during boot) */
-		if (wl12xx_is_roc(wl)) {
-			ret = wl12xx_croc(wl, wl->dev_role_id);
-			if (ret < 0)
-				goto out;
-
-			ret = wl12xx_cmd_role_stop_dev(wl);
+		if (wl12xx_dev_role_started(wlvif)) {
+			ret = wl12xx_stop_dev(wl, wlvif);
 			if (ret < 0)
 				goto out;
 		}
-		wl->rate_set = wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
-		ret = wl1271_acx_sta_rate_policies(wl);
+		wlvif->rate_set =
+			wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
+		ret = wl1271_acx_sta_rate_policies(wl, wlvif);
 		if (ret < 0)
 			goto out;
 		ret = wl1271_acx_keep_alive_config(
-			wl, CMD_TEMPL_KLV_IDX_NULL_DATA,
+			wl, wlvif, CMD_TEMPL_KLV_IDX_NULL_DATA,
 			ACX_KEEP_ALIVE_TPL_INVALID);
 		if (ret < 0)
 			goto out;
-		set_bit(WL1271_FLAG_IDLE, &wl->flags);
+		clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
 	} else {
 		/* The current firmware only supports sched_scan in idle */
 		if (wl->sched_scanning) {
@@ -2283,26 +2422,135 @@
 			ieee80211_sched_scan_stopped(wl->hw);
 		}
 
-		ret = wl12xx_cmd_role_start_dev(wl);
+		ret = wl12xx_start_dev(wl, wlvif);
 		if (ret < 0)
 			goto out;
-
-		ret = wl12xx_roc(wl, wl->dev_role_id);
-		if (ret < 0)
-			goto out;
-		clear_bit(WL1271_FLAG_IDLE, &wl->flags);
+		set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
 	}
 
 out:
 	return ret;
 }
 
+static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+			     struct ieee80211_conf *conf, u32 changed)
+{
+	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
+	int channel, ret;
+
+	channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
+
+	/* if the channel changes while joined, join again */
+	if (changed & IEEE80211_CONF_CHANGE_CHANNEL &&
+	    ((wlvif->band != conf->channel->band) ||
+	     (wlvif->channel != channel))) {
+		/* send all pending packets */
+		wl1271_tx_work_locked(wl);
+		wlvif->band = conf->channel->band;
+		wlvif->channel = channel;
+
+		if (!is_ap) {
+			/*
+			 * FIXME: the mac80211 should really provide a fixed
+			 * rate to use here. for now, just use the smallest
+			 * possible rate for the band as a fixed rate for
+			 * association frames and other control messages.
+			 */
+			if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
+				wl1271_set_band_rate(wl, wlvif);
+
+			wlvif->basic_rate =
+				wl1271_tx_min_rate_get(wl,
+						       wlvif->basic_rate_set);
+			ret = wl1271_acx_sta_rate_policies(wl, wlvif);
+			if (ret < 0)
+				wl1271_warning("rate policy for channel "
+					       "failed %d", ret);
+
+			if (test_bit(WLVIF_FLAG_STA_ASSOCIATED,
+				     &wlvif->flags)) {
+				if (wl12xx_dev_role_started(wlvif)) {
+					/* roaming */
+					ret = wl12xx_croc(wl,
+							  wlvif->dev_role_id);
+					if (ret < 0)
+						return ret;
+				}
+				ret = wl1271_join(wl, wlvif, false);
+				if (ret < 0)
+					wl1271_warning("cmd join on channel "
+						       "failed %d", ret);
+			} else {
+				/*
+				 * change the ROC channel. do it only if we are
+				 * not idle. otherwise, CROC will be called
+				 * anyway.
+				 */
+				if (wl12xx_dev_role_started(wlvif) &&
+				    !(conf->flags & IEEE80211_CONF_IDLE)) {
+					ret = wl12xx_stop_dev(wl, wlvif);
+					if (ret < 0)
+						return ret;
+
+					ret = wl12xx_start_dev(wl, wlvif);
+					if (ret < 0)
+						return ret;
+				}
+			}
+		}
+	}
+
+	/*
+	 * if mac80211 changes the PSM mode, make sure the mode is not
+	 * incorrectly changed after the pspoll failure active window.
+	 */
+	if (changed & IEEE80211_CONF_CHANGE_PS)
+		clear_bit(WLVIF_FLAG_PSPOLL_FAILURE, &wlvif->flags);
+
+	if (conf->flags & IEEE80211_CONF_PS &&
+	    !test_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags)) {
+		set_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags);
+
+		/*
+		 * We enter PSM only if we're already associated.
+		 * If we're not, we'll enter it when joining an SSID,
+		 * through the bss_info_changed() hook.
+		 */
+		if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
+			wl1271_debug(DEBUG_PSM, "psm enabled");
+			ret = wl1271_ps_set_mode(wl, wlvif,
+						 STATION_POWER_SAVE_MODE,
+						 wlvif->basic_rate, true);
+		}
+	} else if (!(conf->flags & IEEE80211_CONF_PS) &&
+		   test_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags)) {
+		wl1271_debug(DEBUG_PSM, "psm disabled");
+
+		clear_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags);
+
+		if (test_bit(WLVIF_FLAG_PSM, &wlvif->flags))
+			ret = wl1271_ps_set_mode(wl, wlvif,
+						 STATION_ACTIVE_MODE,
+						 wlvif->basic_rate, true);
+	}
+
+	if (conf->power_level != wlvif->power_level) {
+		ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
+		if (ret < 0)
+			return ret;
+
+		wlvif->power_level = conf->power_level;
+	}
+
+	return 0;
+}
+
 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
 {
 	struct wl1271 *wl = hw->priv;
+	struct wl12xx_vif *wlvif;
 	struct ieee80211_conf *conf = &hw->conf;
 	int channel, ret = 0;
-	bool is_ap;
 
 	channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
 
@@ -2325,128 +2573,27 @@
 
 	mutex_lock(&wl->mutex);
 
-	if (unlikely(wl->state == WL1271_STATE_OFF)) {
-		/* we support configuring the channel and band while off */
-		if ((changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
-			wl->band = conf->channel->band;
-			wl->channel = channel;
-		}
-
-		if ((changed & IEEE80211_CONF_CHANGE_POWER))
-			wl->power_level = conf->power_level;
-
-		goto out;
+	/* we support configuring the channel and band even while off */
+	if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+		wl->band = conf->channel->band;
+		wl->channel = channel;
 	}
 
-	is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+	if (changed & IEEE80211_CONF_CHANGE_POWER)
+		wl->power_level = conf->power_level;
+
+	if (unlikely(wl->state == WL1271_STATE_OFF))
+		goto out;
 
 	ret = wl1271_ps_elp_wakeup(wl);
 	if (ret < 0)
 		goto out;
 
-	/* if the channel changes while joined, join again */
-	if (changed & IEEE80211_CONF_CHANGE_CHANNEL &&
-	    ((wl->band != conf->channel->band) ||
-	     (wl->channel != channel))) {
-		/* send all pending packets */
-		wl1271_tx_work_locked(wl);
-		wl->band = conf->channel->band;
-		wl->channel = channel;
-
-		if (!is_ap) {
-			/*
-			 * FIXME: the mac80211 should really provide a fixed
-			 * rate to use here. for now, just use the smallest
-			 * possible rate for the band as a fixed rate for
-			 * association frames and other control messages.
-			 */
-			if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
-				wl1271_set_band_rate(wl);
-
-			wl->basic_rate =
-				wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
-			ret = wl1271_acx_sta_rate_policies(wl);
-			if (ret < 0)
-				wl1271_warning("rate policy for channel "
-					       "failed %d", ret);
-
-			if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) {
-				if (wl12xx_is_roc(wl)) {
-					/* roaming */
-					ret = wl12xx_croc(wl, wl->dev_role_id);
-					if (ret < 0)
-						goto out_sleep;
-				}
-				ret = wl1271_join(wl, false);
-				if (ret < 0)
-					wl1271_warning("cmd join on channel "
-						       "failed %d", ret);
-			} else {
-				/*
-				 * change the ROC channel. do it only if we are
-				 * not idle. otherwise, CROC will be called
-				 * anyway.
-				 */
-				if (wl12xx_is_roc(wl) &&
-				    !(conf->flags & IEEE80211_CONF_IDLE)) {
-					ret = wl12xx_croc(wl, wl->dev_role_id);
-					if (ret < 0)
-						goto out_sleep;
-
-					ret = wl12xx_roc(wl, wl->dev_role_id);
-					if (ret < 0)
-						wl1271_warning("roc failed %d",
-							       ret);
-				}
-			}
-		}
-	}
-
-	if (changed & IEEE80211_CONF_CHANGE_IDLE && !is_ap) {
-		ret = wl1271_sta_handle_idle(wl,
-					conf->flags & IEEE80211_CONF_IDLE);
-		if (ret < 0)
-			wl1271_warning("idle mode change failed %d", ret);
-	}
-
-	/*
-	 * if mac80211 changes the PSM mode, make sure the mode is not
-	 * incorrectly changed after the pspoll failure active window.
-	 */
-	if (changed & IEEE80211_CONF_CHANGE_PS)
-		clear_bit(WL1271_FLAG_PSPOLL_FAILURE, &wl->flags);
-
-	if (conf->flags & IEEE80211_CONF_PS &&
-	    !test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags)) {
-		set_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags);
-
-		/*
-		 * We enter PSM only if we're already associated.
-		 * If we're not, we'll enter it when joining an SSID,
-		 * through the bss_info_changed() hook.
-		 */
-		if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) {
-			wl1271_debug(DEBUG_PSM, "psm enabled");
-			ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE,
-						 wl->basic_rate, true);
-		}
-	} else if (!(conf->flags & IEEE80211_CONF_PS) &&
-		   test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags)) {
-		wl1271_debug(DEBUG_PSM, "psm disabled");
-
-		clear_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags);
-
-		if (test_bit(WL1271_FLAG_PSM, &wl->flags))
-			ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE,
-						 wl->basic_rate, true);
-	}
-
-	if (conf->power_level != wl->power_level) {
-		ret = wl1271_acx_tx_power(wl, conf->power_level);
+	/* configure each interface */
+	wl12xx_for_each_wlvif(wl, wlvif) {
+		ret = wl12xx_config_vif(wl, wlvif, conf, changed);
 		if (ret < 0)
 			goto out_sleep;
-
-		wl->power_level = conf->power_level;
 	}
 
 out_sleep:
@@ -2509,6 +2656,8 @@
 {
 	struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
 	struct wl1271 *wl = hw->priv;
+	struct wl12xx_vif *wlvif;
+
 	int ret;
 
 	wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
@@ -2526,15 +2675,20 @@
 	if (ret < 0)
 		goto out;
 
-	if (wl->bss_type != BSS_TYPE_AP_BSS) {
-		if (*total & FIF_ALLMULTI)
-			ret = wl1271_acx_group_address_tbl(wl, false, NULL, 0);
-		else if (fp)
-			ret = wl1271_acx_group_address_tbl(wl, fp->enabled,
-							   fp->mc_list,
-							   fp->mc_list_length);
-		if (ret < 0)
-			goto out_sleep;
+	wl12xx_for_each_wlvif(wl, wlvif) {
+		if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
+			if (*total & FIF_ALLMULTI)
+				ret = wl1271_acx_group_address_tbl(wl, wlvif,
+								   false,
+								   NULL, 0);
+			else if (fp)
+				ret = wl1271_acx_group_address_tbl(wl, wlvif,
+							fp->enabled,
+							fp->mc_list,
+							fp->mc_list_length);
+			if (ret < 0)
+				goto out_sleep;
+		}
 	}
 
 	/*
@@ -2551,9 +2705,10 @@
 	kfree(fp);
 }
 
-static int wl1271_record_ap_key(struct wl1271 *wl, u8 id, u8 key_type,
-			u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
-			u16 tx_seq_16)
+static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+				u8 id, u8 key_type, u8 key_size,
+				const u8 *key, u8 hlid, u32 tx_seq_32,
+				u16 tx_seq_16)
 {
 	struct wl1271_ap_key *ap_key;
 	int i;
@@ -2568,10 +2723,10 @@
 	 * an existing key.
 	 */
 	for (i = 0; i < MAX_NUM_KEYS; i++) {
-		if (wl->recorded_ap_keys[i] == NULL)
+		if (wlvif->ap.recorded_keys[i] == NULL)
 			break;
 
-		if (wl->recorded_ap_keys[i]->id == id) {
+		if (wlvif->ap.recorded_keys[i]->id == id) {
 			wl1271_warning("trying to record key replacement");
 			return -EINVAL;
 		}
@@ -2592,21 +2747,21 @@
 	ap_key->tx_seq_32 = tx_seq_32;
 	ap_key->tx_seq_16 = tx_seq_16;
 
-	wl->recorded_ap_keys[i] = ap_key;
+	wlvif->ap.recorded_keys[i] = ap_key;
 	return 0;
 }
 
-static void wl1271_free_ap_keys(struct wl1271 *wl)
+static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
 	int i;
 
 	for (i = 0; i < MAX_NUM_KEYS; i++) {
-		kfree(wl->recorded_ap_keys[i]);
-		wl->recorded_ap_keys[i] = NULL;
+		kfree(wlvif->ap.recorded_keys[i]);
+		wlvif->ap.recorded_keys[i] = NULL;
 	}
 }
 
-static int wl1271_ap_init_hwenc(struct wl1271 *wl)
+static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
 	int i, ret = 0;
 	struct wl1271_ap_key *key;
@@ -2614,15 +2769,15 @@
 
 	for (i = 0; i < MAX_NUM_KEYS; i++) {
 		u8 hlid;
-		if (wl->recorded_ap_keys[i] == NULL)
+		if (wlvif->ap.recorded_keys[i] == NULL)
 			break;
 
-		key = wl->recorded_ap_keys[i];
+		key = wlvif->ap.recorded_keys[i];
 		hlid = key->hlid;
 		if (hlid == WL12XX_INVALID_LINK_ID)
-			hlid = wl->ap_bcast_hlid;
+			hlid = wlvif->ap.bcast_hlid;
 
-		ret = wl1271_cmd_set_ap_key(wl, KEY_ADD_OR_REPLACE,
+		ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
 					    key->id, key->key_type,
 					    key->key_size, key->key,
 					    hlid, key->tx_seq_32,
@@ -2635,23 +2790,24 @@
 	}
 
 	if (wep_key_added) {
-		ret = wl12xx_cmd_set_default_wep_key(wl, wl->default_key,
-						     wl->ap_bcast_hlid);
+		ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
+						     wlvif->ap.bcast_hlid);
 		if (ret < 0)
 			goto out;
 	}
 
 out:
-	wl1271_free_ap_keys(wl);
+	wl1271_free_ap_keys(wl, wlvif);
 	return ret;
 }
 
-static int wl1271_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
+static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+		       u16 action, u8 id, u8 key_type,
 		       u8 key_size, const u8 *key, u32 tx_seq_32,
 		       u16 tx_seq_16, struct ieee80211_sta *sta)
 {
 	int ret;
-	bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
 
 	if (is_ap) {
 		struct wl1271_station *wl_sta;
@@ -2661,10 +2817,10 @@
 			wl_sta = (struct wl1271_station *)sta->drv_priv;
 			hlid = wl_sta->hlid;
 		} else {
-			hlid = wl->ap_bcast_hlid;
+			hlid = wlvif->ap.bcast_hlid;
 		}
 
-		if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) {
+		if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
 			/*
 			 * We do not support removing keys after AP shutdown.
 			 * Pretend we do to make mac80211 happy.
@@ -2672,12 +2828,12 @@
 			if (action != KEY_ADD_OR_REPLACE)
 				return 0;
 
-			ret = wl1271_record_ap_key(wl, id,
+			ret = wl1271_record_ap_key(wl, wlvif, id,
 					     key_type, key_size,
 					     key, hlid, tx_seq_32,
 					     tx_seq_16);
 		} else {
-			ret = wl1271_cmd_set_ap_key(wl, action,
+			ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
 					     id, key_type, key_size,
 					     key, hlid, tx_seq_32,
 					     tx_seq_16);
@@ -2718,10 +2874,10 @@
 
 		/* don't remove key if hlid was already deleted */
 		if (action == KEY_REMOVE &&
-		    wl->sta_hlid == WL12XX_INVALID_LINK_ID)
+		    wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
 			return 0;
 
-		ret = wl1271_cmd_set_sta_key(wl, action,
+		ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
 					     id, key_type, key_size,
 					     key, addr, tx_seq_32,
 					     tx_seq_16);
@@ -2731,8 +2887,8 @@
 		/* the default WEP key needs to be configured at least once */
 		if (key_type == KEY_WEP) {
 			ret = wl12xx_cmd_set_default_wep_key(wl,
-							     wl->default_key,
-							     wl->sta_hlid);
+							wlvif->default_key,
+							wlvif->sta.hlid);
 			if (ret < 0)
 				return ret;
 		}
@@ -2747,6 +2903,7 @@
 			     struct ieee80211_key_conf *key_conf)
 {
 	struct wl1271 *wl = hw->priv;
+	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
 	int ret;
 	u32 tx_seq_32 = 0;
 	u16 tx_seq_16 = 0;
@@ -2782,20 +2939,20 @@
 		key_type = KEY_TKIP;
 
 		key_conf->hw_key_idx = key_conf->keyidx;
-		tx_seq_32 = WL1271_TX_SECURITY_HI32(wl->tx_security_seq);
-		tx_seq_16 = WL1271_TX_SECURITY_LO16(wl->tx_security_seq);
+		tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
+		tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
 		break;
 	case WLAN_CIPHER_SUITE_CCMP:
 		key_type = KEY_AES;
 
-		key_conf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
-		tx_seq_32 = WL1271_TX_SECURITY_HI32(wl->tx_security_seq);
-		tx_seq_16 = WL1271_TX_SECURITY_LO16(wl->tx_security_seq);
+		key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
+		tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
+		tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
 		break;
 	case WL1271_CIPHER_SUITE_GEM:
 		key_type = KEY_GEM;
-		tx_seq_32 = WL1271_TX_SECURITY_HI32(wl->tx_security_seq);
-		tx_seq_16 = WL1271_TX_SECURITY_LO16(wl->tx_security_seq);
+		tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
+		tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
 		break;
 	default:
 		wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
@@ -2806,7 +2963,7 @@
 
 	switch (cmd) {
 	case SET_KEY:
-		ret = wl1271_set_key(wl, KEY_ADD_OR_REPLACE,
+		ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
 				 key_conf->keyidx, key_type,
 				 key_conf->keylen, key_conf->key,
 				 tx_seq_32, tx_seq_16, sta);
@@ -2817,7 +2974,7 @@
 		break;
 
 	case DISABLE_KEY:
-		ret = wl1271_set_key(wl, KEY_REMOVE,
+		ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
 				     key_conf->keyidx, key_type,
 				     key_conf->keylen, key_conf->key,
 				     0, 0, sta);
@@ -2847,6 +3004,8 @@
 			     struct cfg80211_scan_request *req)
 {
 	struct wl1271 *wl = hw->priv;
+	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+
 	int ret;
 	u8 *ssid = NULL;
 	size_t len = 0;
@@ -2874,18 +3033,18 @@
 	if (ret < 0)
 		goto out;
 
-	/* cancel ROC before scanning */
-	if (wl12xx_is_roc(wl)) {
-		if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) {
-			/* don't allow scanning right now */
-			ret = -EBUSY;
-			goto out_sleep;
-		}
-		wl12xx_croc(wl, wl->dev_role_id);
-		wl12xx_cmd_role_stop_dev(wl);
+	if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
+	    test_bit(wlvif->role_id, wl->roc_map)) {
+		/* don't allow scanning right now */
+		ret = -EBUSY;
+		goto out_sleep;
 	}
 
-	ret = wl1271_scan(hw->priv, ssid, len, req);
+	/* cancel ROC before scanning */
+	if (wl12xx_dev_role_started(wlvif))
+		wl12xx_stop_dev(wl, wlvif);
+
+	ret = wl1271_scan(hw->priv, vif, ssid, len, req);
 out_sleep:
 	wl1271_ps_elp_sleep(wl);
 out:
@@ -2921,6 +3080,7 @@
 	}
 	wl->scan.state = WL1271_SCAN_STATE_IDLE;
 	memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
+	wl->scan_vif = NULL;
 	wl->scan.req = NULL;
 	ieee80211_scan_completed(wl->hw, true);
 
@@ -2938,6 +3098,7 @@
 				      struct ieee80211_sched_scan_ies *ies)
 {
 	struct wl1271 *wl = hw->priv;
+	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
 	int ret;
 
 	wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
@@ -2948,11 +3109,11 @@
 	if (ret < 0)
 		goto out;
 
-	ret = wl1271_scan_sched_scan_config(wl, req, ies);
+	ret = wl1271_scan_sched_scan_config(wl, wlvif, req, ies);
 	if (ret < 0)
 		goto out_sleep;
 
-	ret = wl1271_scan_sched_scan_start(wl);
+	ret = wl1271_scan_sched_scan_start(wl, wlvif);
 	if (ret < 0)
 		goto out_sleep;
 
@@ -3017,6 +3178,7 @@
 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
 {
 	struct wl1271 *wl = hw->priv;
+	struct wl12xx_vif *wlvif;
 	int ret = 0;
 
 	mutex_lock(&wl->mutex);
@@ -3030,10 +3192,11 @@
 	if (ret < 0)
 		goto out;
 
-	ret = wl1271_acx_rts_threshold(wl, value);
-	if (ret < 0)
-		wl1271_warning("wl1271_op_set_rts_threshold failed: %d", ret);
-
+	wl12xx_for_each_wlvif(wl, wlvif) {
+		ret = wl1271_acx_rts_threshold(wl, wlvif, value);
+		if (ret < 0)
+			wl1271_warning("set rts threshold failed: %d", ret);
+	}
 	wl1271_ps_elp_sleep(wl);
 
 out:
@@ -3042,9 +3205,10 @@
 	return ret;
 }
 
-static int wl1271_ssid_set(struct wl1271 *wl, struct sk_buff *skb,
+static int wl1271_ssid_set(struct ieee80211_vif *vif, struct sk_buff *skb,
 			    int offset)
 {
+	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
 	u8 ssid_len;
 	const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
 					 skb->len - offset);
@@ -3060,8 +3224,8 @@
 		return -EINVAL;
 	}
 
-	wl->ssid_len = ssid_len;
-	memcpy(wl->ssid, ptr+2, ssid_len);
+	wlvif->ssid_len = ssid_len;
+	memcpy(wlvif->ssid, ptr+2, ssid_len);
 	return 0;
 }
 
@@ -3096,18 +3260,40 @@
 	skb_trim(skb, skb->len - len);
 }
 
-static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl,
-					 u8 *probe_rsp_data,
-					 size_t probe_rsp_len,
-					 u32 rates)
+static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
+					 struct ieee80211_vif *vif)
 {
-	struct ieee80211_bss_conf *bss_conf = &wl->vif->bss_conf;
+	struct sk_buff *skb;
+	int ret;
+
+	skb = ieee80211_proberesp_get(wl->hw, vif);
+	if (!skb)
+		return -EOPNOTSUPP;
+
+	ret = wl1271_cmd_template_set(wl,
+				      CMD_TEMPL_AP_PROBE_RESPONSE,
+				      skb->data,
+				      skb->len, 0,
+				      rates);
+
+	dev_kfree_skb(skb);
+	return ret;
+}
+
+static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
+					     struct ieee80211_vif *vif,
+					     u8 *probe_rsp_data,
+					     size_t probe_rsp_len,
+					     u32 rates)
+{
+	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+	struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
 	u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
 	int ssid_ie_offset, ie_offset, templ_len;
 	const u8 *ptr;
 
 	/* no need to change probe response if the SSID is set correctly */
-	if (wl->ssid_len > 0)
+	if (wlvif->ssid_len > 0)
 		return wl1271_cmd_template_set(wl,
 					       CMD_TEMPL_AP_PROBE_RESPONSE,
 					       probe_rsp_data,
@@ -3153,16 +3339,18 @@
 }
 
 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
+				       struct ieee80211_vif *vif,
 				       struct ieee80211_bss_conf *bss_conf,
 				       u32 changed)
 {
+	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
 	int ret = 0;
 
 	if (changed & BSS_CHANGED_ERP_SLOT) {
 		if (bss_conf->use_short_slot)
-			ret = wl1271_acx_slot(wl, SLOT_TIME_SHORT);
+			ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
 		else
-			ret = wl1271_acx_slot(wl, SLOT_TIME_LONG);
+			ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
 		if (ret < 0) {
 			wl1271_warning("Set slot time failed %d", ret);
 			goto out;
@@ -3171,16 +3359,18 @@
 
 	if (changed & BSS_CHANGED_ERP_PREAMBLE) {
 		if (bss_conf->use_short_preamble)
-			wl1271_acx_set_preamble(wl, ACX_PREAMBLE_SHORT);
+			wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
 		else
-			wl1271_acx_set_preamble(wl, ACX_PREAMBLE_LONG);
+			wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
 	}
 
 	if (changed & BSS_CHANGED_ERP_CTS_PROT) {
 		if (bss_conf->use_cts_prot)
-			ret = wl1271_acx_cts_protect(wl, CTSPROTECT_ENABLE);
+			ret = wl1271_acx_cts_protect(wl, wlvif,
+						     CTSPROTECT_ENABLE);
 		else
-			ret = wl1271_acx_cts_protect(wl, CTSPROTECT_DISABLE);
+			ret = wl1271_acx_cts_protect(wl, wlvif,
+						     CTSPROTECT_DISABLE);
 		if (ret < 0) {
 			wl1271_warning("Set ctsprotect failed %d", ret);
 			goto out;
@@ -3196,14 +3386,23 @@
 					  struct ieee80211_bss_conf *bss_conf,
 					  u32 changed)
 {
-	bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
 	int ret = 0;
 
 	if ((changed & BSS_CHANGED_BEACON_INT)) {
 		wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
 			bss_conf->beacon_int);
 
-		wl->beacon_int = bss_conf->beacon_int;
+		wlvif->beacon_int = bss_conf->beacon_int;
+	}
+
+	if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
+		u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
+		if (!wl1271_ap_set_probe_resp_tmpl(wl, rate, vif)) {
+			wl1271_debug(DEBUG_AP, "probe response updated");
+			set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
+		}
 	}
 
 	if ((changed & BSS_CHANGED_BEACON)) {
@@ -3214,17 +3413,19 @@
 		struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
 		u16 tmpl_id;
 
-		if (!beacon)
+		if (!beacon) {
+			ret = -EINVAL;
 			goto out;
+		}
 
 		wl1271_debug(DEBUG_MASTER, "beacon updated");
 
-		ret = wl1271_ssid_set(wl, beacon, ieoffset);
+		ret = wl1271_ssid_set(vif, beacon, ieoffset);
 		if (ret < 0) {
 			dev_kfree_skb(beacon);
 			goto out;
 		}
-		min_rate = wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
+		min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
 		tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
 				  CMD_TEMPL_BEACON;
 		ret = wl1271_cmd_template_set(wl, tmpl_id,
@@ -3236,6 +3437,13 @@
 			goto out;
 		}
 
+		/*
+		 * In case we already have a probe-resp beacon set explicitly
+		 * by usermode, don't use the beacon data.
+		 */
+		if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
+			goto end_bcn;
+
 		/* remove TIM ie from probe response */
 		wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
 
@@ -3254,7 +3462,7 @@
 		hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
 						 IEEE80211_STYPE_PROBE_RESP);
 		if (is_ap)
-			ret = wl1271_ap_set_probe_resp_tmpl(wl,
+			ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
 						beacon->data,
 						beacon->len,
 						min_rate);
@@ -3264,12 +3472,15 @@
 						beacon->data,
 						beacon->len, 0,
 						min_rate);
+end_bcn:
 		dev_kfree_skb(beacon);
 		if (ret < 0)
 			goto out;
 	}
 
 out:
+	if (ret != 0)
+		wl1271_error("beacon info change failed: %d", ret);
 	return ret;
 }
 
@@ -3279,23 +3490,24 @@
 				       struct ieee80211_bss_conf *bss_conf,
 				       u32 changed)
 {
+	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
 	int ret = 0;
 
 	if ((changed & BSS_CHANGED_BASIC_RATES)) {
 		u32 rates = bss_conf->basic_rates;
 
-		wl->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
-								 wl->band);
-		wl->basic_rate = wl1271_tx_min_rate_get(wl,
-							wl->basic_rate_set);
+		wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
+								 wlvif->band);
+		wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
+							wlvif->basic_rate_set);
 
-		ret = wl1271_init_ap_rates(wl);
+		ret = wl1271_init_ap_rates(wl, wlvif);
 		if (ret < 0) {
 			wl1271_error("AP rate policy change failed %d", ret);
 			goto out;
 		}
 
-		ret = wl1271_ap_init_templates(wl);
+		ret = wl1271_ap_init_templates(wl, vif);
 		if (ret < 0)
 			goto out;
 	}
@@ -3306,38 +3518,40 @@
 
 	if ((changed & BSS_CHANGED_BEACON_ENABLED)) {
 		if (bss_conf->enable_beacon) {
-			if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) {
-				ret = wl12xx_cmd_role_start_ap(wl);
+			if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
+				ret = wl12xx_cmd_role_start_ap(wl, wlvif);
 				if (ret < 0)
 					goto out;
 
-				ret = wl1271_ap_init_hwenc(wl);
+				ret = wl1271_ap_init_hwenc(wl, wlvif);
 				if (ret < 0)
 					goto out;
 
-				set_bit(WL1271_FLAG_AP_STARTED, &wl->flags);
+				set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
 				wl1271_debug(DEBUG_AP, "started AP");
 			}
 		} else {
-			if (test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) {
-				ret = wl12xx_cmd_role_stop_ap(wl);
+			if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
+				ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
 				if (ret < 0)
 					goto out;
 
-				clear_bit(WL1271_FLAG_AP_STARTED, &wl->flags);
+				clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
+				clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
+					  &wlvif->flags);
 				wl1271_debug(DEBUG_AP, "stopped AP");
 			}
 		}
 	}
 
-	ret = wl1271_bss_erp_info_changed(wl, bss_conf, changed);
+	ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
 	if (ret < 0)
 		goto out;
 
 	/* Handle HT information change */
 	if ((changed & BSS_CHANGED_HT) &&
 	    (bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
-		ret = wl1271_acx_set_ht_information(wl,
+		ret = wl1271_acx_set_ht_information(wl, wlvif,
 					bss_conf->ht_operation_mode);
 		if (ret < 0) {
 			wl1271_warning("Set ht information failed %d", ret);
@@ -3355,8 +3569,9 @@
 					struct ieee80211_bss_conf *bss_conf,
 					u32 changed)
 {
+	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
 	bool do_join = false, set_assoc = false;
-	bool is_ibss = (wl->bss_type == BSS_TYPE_IBSS);
+	bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
 	bool ibss_joined = false;
 	u32 sta_rate_set = 0;
 	int ret;
@@ -3373,14 +3588,13 @@
 
 	if (changed & BSS_CHANGED_IBSS) {
 		if (bss_conf->ibss_joined) {
-			set_bit(WL1271_FLAG_IBSS_JOINED, &wl->flags);
+			set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
 			ibss_joined = true;
 		} else {
-			if (test_and_clear_bit(WL1271_FLAG_IBSS_JOINED,
-					       &wl->flags)) {
-				wl1271_unjoin(wl);
-				wl12xx_cmd_role_start_dev(wl);
-				wl12xx_roc(wl, wl->dev_role_id);
+			if (test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED,
+					       &wlvif->flags)) {
+				wl1271_unjoin(wl, wlvif);
+				wl12xx_start_dev(wl, wlvif);
 			}
 		}
 	}
@@ -3396,46 +3610,40 @@
 		wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
 			     bss_conf->enable_beacon ? "enabled" : "disabled");
 
-		if (bss_conf->enable_beacon)
-			wl->set_bss_type = BSS_TYPE_IBSS;
-		else
-			wl->set_bss_type = BSS_TYPE_STA_BSS;
 		do_join = true;
 	}
 
+	if (changed & BSS_CHANGED_IDLE) {
+		ret = wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
+		if (ret < 0)
+			wl1271_warning("idle mode change failed %d", ret);
+	}
+
 	if ((changed & BSS_CHANGED_CQM)) {
 		bool enable = false;
 		if (bss_conf->cqm_rssi_thold)
 			enable = true;
-		ret = wl1271_acx_rssi_snr_trigger(wl, enable,
+		ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
 						  bss_conf->cqm_rssi_thold,
 						  bss_conf->cqm_rssi_hyst);
 		if (ret < 0)
 			goto out;
-		wl->rssi_thold = bss_conf->cqm_rssi_thold;
+		wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
 	}
 
-	if ((changed & BSS_CHANGED_BSSID) &&
-	    /*
-	     * Now we know the correct bssid, so we send a new join command
-	     * and enable the BSSID filter
-	     */
-	    memcmp(wl->bssid, bss_conf->bssid, ETH_ALEN)) {
-		memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN);
-
-		if (!is_zero_ether_addr(wl->bssid)) {
-			ret = wl1271_cmd_build_null_data(wl);
+	if (changed & BSS_CHANGED_BSSID)
+		if (!is_zero_ether_addr(bss_conf->bssid)) {
+			ret = wl12xx_cmd_build_null_data(wl, wlvif);
 			if (ret < 0)
 				goto out;
 
-			ret = wl1271_build_qos_null_data(wl);
+			ret = wl1271_build_qos_null_data(wl, vif);
 			if (ret < 0)
 				goto out;
 
 			/* Need to update the BSSID (for filtering etc) */
 			do_join = true;
 		}
-	}
 
 	if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_HT)) {
 		rcu_read_lock();
@@ -3459,26 +3667,28 @@
 		if (bss_conf->assoc) {
 			u32 rates;
 			int ieoffset;
-			wl->aid = bss_conf->aid;
+			wlvif->aid = bss_conf->aid;
 			set_assoc = true;
 
-			wl->ps_poll_failures = 0;
+			wlvif->ps_poll_failures = 0;
 
 			/*
 			 * use basic rates from AP, and determine lowest rate
 			 * to use with control frames.
 			 */
 			rates = bss_conf->basic_rates;
-			wl->basic_rate_set =
+			wlvif->basic_rate_set =
 				wl1271_tx_enabled_rates_get(wl, rates,
-							    wl->band);
-			wl->basic_rate =
-				wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
+							    wlvif->band);
+			wlvif->basic_rate =
+				wl1271_tx_min_rate_get(wl,
+						       wlvif->basic_rate_set);
 			if (sta_rate_set)
-				wl->rate_set = wl1271_tx_enabled_rates_get(wl,
+				wlvif->rate_set =
+					wl1271_tx_enabled_rates_get(wl,
 								sta_rate_set,
-								wl->band);
-			ret = wl1271_acx_sta_rate_policies(wl);
+								wlvif->band);
+			ret = wl1271_acx_sta_rate_policies(wl, wlvif);
 			if (ret < 0)
 				goto out;
 
@@ -3488,53 +3698,56 @@
 			 * updates it by itself when the first beacon is
 			 * received after a join.
 			 */
-			ret = wl1271_cmd_build_ps_poll(wl, wl->aid);
+			ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
 			if (ret < 0)
 				goto out;
 
 			/*
 			 * Get a template for hardware connection maintenance
 			 */
-			dev_kfree_skb(wl->probereq);
-			wl->probereq = wl1271_cmd_build_ap_probe_req(wl, NULL);
+			dev_kfree_skb(wlvif->probereq);
+			wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
+									wlvif,
+									NULL);
 			ieoffset = offsetof(struct ieee80211_mgmt,
 					    u.probe_req.variable);
-			wl1271_ssid_set(wl, wl->probereq, ieoffset);
+			wl1271_ssid_set(vif, wlvif->probereq, ieoffset);
 
 			/* enable the connection monitoring feature */
-			ret = wl1271_acx_conn_monit_params(wl, true);
+			ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
 			if (ret < 0)
 				goto out;
 		} else {
 			/* use defaults when not associated */
 			bool was_assoc =
-			    !!test_and_clear_bit(WL1271_FLAG_STA_ASSOCIATED,
-						 &wl->flags);
+			    !!test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED,
+						 &wlvif->flags);
 			bool was_ifup =
-			    !!test_and_clear_bit(WL1271_FLAG_STA_STATE_SENT,
-						 &wl->flags);
-			wl->aid = 0;
+			    !!test_and_clear_bit(WLVIF_FLAG_STA_STATE_SENT,
+						 &wlvif->flags);
+			wlvif->aid = 0;
 
 			/* free probe-request template */
-			dev_kfree_skb(wl->probereq);
-			wl->probereq = NULL;
+			dev_kfree_skb(wlvif->probereq);
+			wlvif->probereq = NULL;
 
 			/* re-enable dynamic ps - just in case */
-			ieee80211_enable_dyn_ps(wl->vif);
+			ieee80211_enable_dyn_ps(vif);
 
 			/* revert back to minimum rates for the current band */
-			wl1271_set_band_rate(wl);
-			wl->basic_rate =
-				wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
-			ret = wl1271_acx_sta_rate_policies(wl);
+			wl1271_set_band_rate(wl, wlvif);
+			wlvif->basic_rate =
+				wl1271_tx_min_rate_get(wl,
+						       wlvif->basic_rate_set);
+			ret = wl1271_acx_sta_rate_policies(wl, wlvif);
 			if (ret < 0)
 				goto out;
 
 			/* disable connection monitor features */
-			ret = wl1271_acx_conn_monit_params(wl, false);
+			ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
 
 			/* Disable the keep-alive feature */
-			ret = wl1271_acx_keep_alive_mode(wl, false);
+			ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
 			if (ret < 0)
 				goto out;
 
@@ -3546,7 +3759,7 @@
 				 * no IF_OPER_UP notification.
 				 */
 				if (!was_ifup) {
-					ret = wl12xx_croc(wl, wl->role_id);
+					ret = wl12xx_croc(wl, wlvif->role_id);
 					if (ret < 0)
 						goto out;
 				}
@@ -3555,17 +3768,16 @@
 				 * roaming on the same channel. until we will
 				 * have a better flow...)
 				 */
-				if (test_bit(wl->dev_role_id, wl->roc_map)) {
-					ret = wl12xx_croc(wl, wl->dev_role_id);
+				if (test_bit(wlvif->dev_role_id, wl->roc_map)) {
+					ret = wl12xx_croc(wl,
+							  wlvif->dev_role_id);
 					if (ret < 0)
 						goto out;
 				}
 
-				wl1271_unjoin(wl);
-				if (!(conf_flags & IEEE80211_CONF_IDLE)) {
-					wl12xx_cmd_role_start_dev(wl);
-					wl12xx_roc(wl, wl->dev_role_id);
-				}
+				wl1271_unjoin(wl, wlvif);
+				if (!(conf_flags & IEEE80211_CONF_IDLE))
+					wl12xx_start_dev(wl, wlvif);
 			}
 		}
 	}
@@ -3576,27 +3788,28 @@
 
 		if (bss_conf->ibss_joined) {
 			u32 rates = bss_conf->basic_rates;
-			wl->basic_rate_set =
+			wlvif->basic_rate_set =
 				wl1271_tx_enabled_rates_get(wl, rates,
-							    wl->band);
-			wl->basic_rate =
-				wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
+							    wlvif->band);
+			wlvif->basic_rate =
+				wl1271_tx_min_rate_get(wl,
+						       wlvif->basic_rate_set);
 
 			/* by default, use 11b + OFDM rates */
-			wl->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
-			ret = wl1271_acx_sta_rate_policies(wl);
+			wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
+			ret = wl1271_acx_sta_rate_policies(wl, wlvif);
 			if (ret < 0)
 				goto out;
 		}
 	}
 
-	ret = wl1271_bss_erp_info_changed(wl, bss_conf, changed);
+	ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
 	if (ret < 0)
 		goto out;
 
 	if (changed & BSS_CHANGED_ARP_FILTER) {
 		__be32 addr = bss_conf->arp_addr_list[0];
-		WARN_ON(wl->bss_type != BSS_TYPE_STA_BSS);
+		WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
 
 		if (bss_conf->arp_addr_cnt == 1 &&
 		    bss_conf->arp_filter_enabled) {
@@ -3606,24 +3819,24 @@
 			 * isn't being set (when sending), so we have to
 			 * reconfigure the template upon every ip change.
 			 */
-			ret = wl1271_cmd_build_arp_rsp(wl, addr);
+			ret = wl1271_cmd_build_arp_rsp(wl, wlvif, addr);
 			if (ret < 0) {
 				wl1271_warning("build arp rsp failed: %d", ret);
 				goto out;
 			}
 
-			ret = wl1271_acx_arp_ip_filter(wl,
+			ret = wl1271_acx_arp_ip_filter(wl, wlvif,
 				ACX_ARP_FILTER_ARP_FILTERING,
 				addr);
 		} else
-			ret = wl1271_acx_arp_ip_filter(wl, 0, addr);
+			ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
 
 		if (ret < 0)
 			goto out;
 	}
 
 	if (do_join) {
-		ret = wl1271_join(wl, set_assoc);
+		ret = wl1271_join(wl, wlvif, set_assoc);
 		if (ret < 0) {
 			wl1271_warning("cmd join failed %d", ret);
 			goto out;
@@ -3631,35 +3844,31 @@
 
 		/* ROC until connected (after EAPOL exchange) */
 		if (!is_ibss) {
-			ret = wl12xx_roc(wl, wl->role_id);
+			ret = wl12xx_roc(wl, wlvif, wlvif->role_id);
 			if (ret < 0)
 				goto out;
 
-			wl1271_check_operstate(wl,
+			wl1271_check_operstate(wl, wlvif,
 					       ieee80211_get_operstate(vif));
 		}
 		/*
 		 * stop device role if started (we might already be in
-		 * STA role). TODO: make it better.
+		 * STA/IBSS role).
 		 */
-		if (wl->dev_role_id != WL12XX_INVALID_ROLE_ID) {
-			ret = wl12xx_croc(wl, wl->dev_role_id);
-			if (ret < 0)
-				goto out;
-
-			ret = wl12xx_cmd_role_stop_dev(wl);
+		if (wl12xx_dev_role_started(wlvif)) {
+			ret = wl12xx_stop_dev(wl, wlvif);
 			if (ret < 0)
 				goto out;
 		}
 
 		/* If we want to go in PSM but we're not there yet */
-		if (test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags) &&
-		    !test_bit(WL1271_FLAG_PSM, &wl->flags)) {
+		if (test_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags) &&
+		    !test_bit(WLVIF_FLAG_PSM, &wlvif->flags)) {
 			enum wl1271_cmd_ps_mode mode;
 
 			mode = STATION_POWER_SAVE_MODE;
-			ret = wl1271_ps_set_mode(wl, mode,
-						 wl->basic_rate,
+			ret = wl1271_ps_set_mode(wl, wlvif, mode,
+						 wlvif->basic_rate,
 						 true);
 			if (ret < 0)
 				goto out;
@@ -3673,7 +3882,7 @@
 			ret = wl1271_acx_set_ht_capabilities(wl,
 							     &sta_ht_cap,
 							     true,
-							     wl->sta_hlid);
+							     wlvif->sta.hlid);
 			if (ret < 0) {
 				wl1271_warning("Set ht cap true failed %d",
 					       ret);
@@ -3685,7 +3894,7 @@
 			ret = wl1271_acx_set_ht_capabilities(wl,
 							     &sta_ht_cap,
 							     false,
-							     wl->sta_hlid);
+							     wlvif->sta.hlid);
 			if (ret < 0) {
 				wl1271_warning("Set ht cap false failed %d",
 					       ret);
@@ -3697,7 +3906,7 @@
 	/* Handle HT information change. Done after join. */
 	if ((changed & BSS_CHANGED_HT) &&
 	    (bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
-		ret = wl1271_acx_set_ht_information(wl,
+		ret = wl1271_acx_set_ht_information(wl, wlvif,
 					bss_conf->ht_operation_mode);
 		if (ret < 0) {
 			wl1271_warning("Set ht information failed %d", ret);
@@ -3715,7 +3924,8 @@
 				       u32 changed)
 {
 	struct wl1271 *wl = hw->priv;
-	bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
 	int ret;
 
 	wl1271_debug(DEBUG_MAC80211, "mac80211 bss info changed 0x%x",
@@ -3726,6 +3936,9 @@
 	if (unlikely(wl->state == WL1271_STATE_OFF))
 		goto out;
 
+	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
+		goto out;
+
 	ret = wl1271_ps_elp_wakeup(wl);
 	if (ret < 0)
 		goto out;
@@ -3746,6 +3959,7 @@
 			     const struct ieee80211_tx_queue_params *params)
 {
 	struct wl1271 *wl = hw->priv;
+	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
 	u8 ps_scheme;
 	int ret = 0;
 
@@ -3758,31 +3972,8 @@
 	else
 		ps_scheme = CONF_PS_SCHEME_LEGACY;
 
-	if (wl->state == WL1271_STATE_OFF) {
-		/*
-		 * If the state is off, the parameters will be recorded and
-		 * configured on init. This happens in AP-mode.
-		 */
-		struct conf_tx_ac_category *conf_ac =
-			&wl->conf.tx.ac_conf[wl1271_tx_get_queue(queue)];
-		struct conf_tx_tid *conf_tid =
-			&wl->conf.tx.tid_conf[wl1271_tx_get_queue(queue)];
-
-		conf_ac->ac = wl1271_tx_get_queue(queue);
-		conf_ac->cw_min = (u8)params->cw_min;
-		conf_ac->cw_max = params->cw_max;
-		conf_ac->aifsn = params->aifs;
-		conf_ac->tx_op_limit = params->txop << 5;
-
-		conf_tid->queue_id = wl1271_tx_get_queue(queue);
-		conf_tid->channel_type = CONF_CHANNEL_TYPE_EDCF;
-		conf_tid->tsid = wl1271_tx_get_queue(queue);
-		conf_tid->ps_scheme = ps_scheme;
-		conf_tid->ack_policy = CONF_ACK_POLICY_LEGACY;
-		conf_tid->apsd_conf[0] = 0;
-		conf_tid->apsd_conf[1] = 0;
+	if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
 		goto out;
-	}
 
 	ret = wl1271_ps_elp_wakeup(wl);
 	if (ret < 0)
@@ -3792,13 +3983,13 @@
 	 * the txop is confed in units of 32us by the mac80211,
 	 * we need us
 	 */
-	ret = wl1271_acx_ac_cfg(wl, wl1271_tx_get_queue(queue),
+	ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
 				params->cw_min, params->cw_max,
 				params->aifs, params->txop << 5);
 	if (ret < 0)
 		goto out_sleep;
 
-	ret = wl1271_acx_tid_cfg(wl, wl1271_tx_get_queue(queue),
+	ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
 				 CONF_CHANNEL_TYPE_EDCF,
 				 wl1271_tx_get_queue(queue),
 				 ps_scheme, CONF_ACK_POLICY_LEGACY,
@@ -3861,43 +4052,43 @@
 }
 
 static int wl1271_allocate_sta(struct wl1271 *wl,
-			     struct ieee80211_sta *sta,
-			     u8 *hlid)
+			     struct wl12xx_vif *wlvif,
+			     struct ieee80211_sta *sta)
 {
 	struct wl1271_station *wl_sta;
-	int id;
+	int ret;
 
-	id = find_first_zero_bit(wl->ap_hlid_map, AP_MAX_STATIONS);
-	if (id >= AP_MAX_STATIONS) {
+
+	if (wl->active_sta_count >= AP_MAX_STATIONS) {
 		wl1271_warning("could not allocate HLID - too much stations");
 		return -EBUSY;
 	}
 
 	wl_sta = (struct wl1271_station *)sta->drv_priv;
-	set_bit(id, wl->ap_hlid_map);
-	wl_sta->hlid = WL1271_AP_STA_HLID_START + id;
-	*hlid = wl_sta->hlid;
+	ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
+	if (ret < 0) {
+		wl1271_warning("could not allocate HLID - too many links");
+		return -EBUSY;
+	}
+
+	set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
 	memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
 	wl->active_sta_count++;
 	return 0;
 }
 
-void wl1271_free_sta(struct wl1271 *wl, u8 hlid)
+void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
 {
-	int id = hlid - WL1271_AP_STA_HLID_START;
-
-	if (hlid < WL1271_AP_STA_HLID_START)
+	if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
 		return;
 
-	if (!test_bit(id, wl->ap_hlid_map))
-		return;
-
-	clear_bit(id, wl->ap_hlid_map);
+	clear_bit(hlid, wlvif->ap.sta_hlid_map);
 	memset(wl->links[hlid].addr, 0, ETH_ALEN);
 	wl->links[hlid].ba_bitmap = 0;
 	wl1271_tx_reset_link_queues(wl, hlid);
 	__clear_bit(hlid, &wl->ap_ps_map);
 	__clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
+	wl12xx_free_link(wl, wlvif, &hlid);
 	wl->active_sta_count--;
 }
 
@@ -3906,6 +4097,8 @@
 			     struct ieee80211_sta *sta)
 {
 	struct wl1271 *wl = hw->priv;
+	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+	struct wl1271_station *wl_sta;
 	int ret = 0;
 	u8 hlid;
 
@@ -3914,20 +4107,23 @@
 	if (unlikely(wl->state == WL1271_STATE_OFF))
 		goto out;
 
-	if (wl->bss_type != BSS_TYPE_AP_BSS)
+	if (wlvif->bss_type != BSS_TYPE_AP_BSS)
 		goto out;
 
 	wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
 
-	ret = wl1271_allocate_sta(wl, sta, &hlid);
+	ret = wl1271_allocate_sta(wl, wlvif, sta);
 	if (ret < 0)
 		goto out;
 
+	wl_sta = (struct wl1271_station *)sta->drv_priv;
+	hlid = wl_sta->hlid;
+
 	ret = wl1271_ps_elp_wakeup(wl);
 	if (ret < 0)
 		goto out_free_sta;
 
-	ret = wl12xx_cmd_add_peer(wl, sta, hlid);
+	ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
 	if (ret < 0)
 		goto out_sleep;
 
@@ -3944,7 +4140,7 @@
 
 out_free_sta:
 	if (ret < 0)
-		wl1271_free_sta(wl, hlid);
+		wl1271_free_sta(wl, wlvif, hlid);
 
 out:
 	mutex_unlock(&wl->mutex);
@@ -3956,6 +4152,7 @@
 				struct ieee80211_sta *sta)
 {
 	struct wl1271 *wl = hw->priv;
+	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
 	struct wl1271_station *wl_sta;
 	int ret = 0, id;
 
@@ -3964,14 +4161,14 @@
 	if (unlikely(wl->state == WL1271_STATE_OFF))
 		goto out;
 
-	if (wl->bss_type != BSS_TYPE_AP_BSS)
+	if (wlvif->bss_type != BSS_TYPE_AP_BSS)
 		goto out;
 
 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
 
 	wl_sta = (struct wl1271_station *)sta->drv_priv;
-	id = wl_sta->hlid - WL1271_AP_STA_HLID_START;
-	if (WARN_ON(!test_bit(id, wl->ap_hlid_map)))
+	id = wl_sta->hlid;
+	if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
 		goto out;
 
 	ret = wl1271_ps_elp_wakeup(wl);
@@ -3982,7 +4179,7 @@
 	if (ret < 0)
 		goto out_sleep;
 
-	wl1271_free_sta(wl, wl_sta->hlid);
+	wl1271_free_sta(wl, wlvif, wl_sta->hlid);
 
 out_sleep:
 	wl1271_ps_elp_sleep(wl);
@@ -3999,6 +4196,7 @@
 				  u8 buf_size)
 {
 	struct wl1271 *wl = hw->priv;
+	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
 	int ret;
 	u8 hlid, *ba_bitmap;
 
@@ -4016,10 +4214,10 @@
 		goto out;
 	}
 
-	if (wl->bss_type == BSS_TYPE_STA_BSS) {
-		hlid = wl->sta_hlid;
-		ba_bitmap = &wl->ba_rx_bitmap;
-	} else if (wl->bss_type == BSS_TYPE_AP_BSS) {
+	if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
+		hlid = wlvif->sta.hlid;
+		ba_bitmap = &wlvif->sta.ba_rx_bitmap;
+	} else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
 		struct wl1271_station *wl_sta;
 
 		wl_sta = (struct wl1271_station *)sta->drv_priv;
@@ -4039,7 +4237,7 @@
 
 	switch (action) {
 	case IEEE80211_AMPDU_RX_START:
-		if (!wl->ba_support || !wl->ba_allowed) {
+		if (!wlvif->ba_support || !wlvif->ba_allowed) {
 			ret = -ENOTSUPP;
 			break;
 		}
@@ -4108,8 +4306,9 @@
 				   struct ieee80211_vif *vif,
 				   const struct cfg80211_bitrate_mask *mask)
 {
+	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
 	struct wl1271 *wl = hw->priv;
-	int i;
+	int i, ret = 0;
 
 	wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
 		mask->control[NL80211_BAND_2GHZ].legacy,
@@ -4118,19 +4317,39 @@
 	mutex_lock(&wl->mutex);
 
 	for (i = 0; i < IEEE80211_NUM_BANDS; i++)
-		wl->bitrate_masks[i] =
+		wlvif->bitrate_masks[i] =
 			wl1271_tx_enabled_rates_get(wl,
 						    mask->control[i].legacy,
 						    i);
+
+	if (unlikely(wl->state == WL1271_STATE_OFF))
+		goto out;
+
+	if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
+	    !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
+
+		ret = wl1271_ps_elp_wakeup(wl);
+		if (ret < 0)
+			goto out;
+
+		wl1271_set_band_rate(wl, wlvif);
+		wlvif->basic_rate =
+			wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
+		ret = wl1271_acx_sta_rate_policies(wl, wlvif);
+
+		wl1271_ps_elp_sleep(wl);
+	}
+out:
 	mutex_unlock(&wl->mutex);
 
-	return 0;
+	return ret;
 }
 
 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
 				     struct ieee80211_channel_switch *ch_switch)
 {
 	struct wl1271 *wl = hw->priv;
+	struct wl12xx_vif *wlvif;
 	int ret;
 
 	wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
@@ -4138,19 +4357,24 @@
 	mutex_lock(&wl->mutex);
 
 	if (unlikely(wl->state == WL1271_STATE_OFF)) {
-		mutex_unlock(&wl->mutex);
-		ieee80211_chswitch_done(wl->vif, false);
-		return;
+		wl12xx_for_each_wlvif_sta(wl, wlvif) {
+			struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
+			ieee80211_chswitch_done(vif, false);
+		}
+		goto out;
 	}
 
 	ret = wl1271_ps_elp_wakeup(wl);
 	if (ret < 0)
 		goto out;
 
-	ret = wl12xx_cmd_channel_switch(wl, ch_switch);
+	/* TODO: change mac80211 to pass vif as param */
+	wl12xx_for_each_wlvif_sta(wl, wlvif) {
+		ret = wl12xx_cmd_channel_switch(wl, ch_switch);
 
-	if (!ret)
-		set_bit(WL1271_FLAG_CS_PROGRESS, &wl->flags);
+		if (!ret)
+			set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
+	}
 
 	wl1271_ps_elp_sleep(wl);
 
@@ -4170,10 +4394,6 @@
 
 	/* packets are considered pending if in the TX queue or the FW */
 	ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
-
-	/* the above is appropriate for STA mode for PS purposes */
-	WARN_ON(wl->bss_type != BSS_TYPE_STA_BSS);
-
 out:
 	mutex_unlock(&wl->mutex);
 
@@ -4410,6 +4630,7 @@
 	.stop = wl1271_op_stop,
 	.add_interface = wl1271_op_add_interface,
 	.remove_interface = wl1271_op_remove_interface,
+	.change_interface = wl12xx_op_change_interface,
 #ifdef CONFIG_PM
 	.suspend = wl1271_op_suspend,
 	.resume = wl1271_op_resume,
@@ -4604,7 +4825,7 @@
 	.read = wl1271_sysfs_read_fwlog,
 };
 
-int wl1271_register_hw(struct wl1271 *wl)
+static int wl1271_register_hw(struct wl1271 *wl)
 {
 	int ret;
 
@@ -4645,9 +4866,8 @@
 
 	return 0;
 }
-EXPORT_SYMBOL_GPL(wl1271_register_hw);
 
-void wl1271_unregister_hw(struct wl1271 *wl)
+static void wl1271_unregister_hw(struct wl1271 *wl)
 {
 	if (wl->state == WL1271_STATE_PLT)
 		__wl1271_plt_stop(wl);
@@ -4657,9 +4877,8 @@
 	wl->mac80211_registered = false;
 
 }
-EXPORT_SYMBOL_GPL(wl1271_unregister_hw);
 
-int wl1271_init_ieee80211(struct wl1271 *wl)
+static int wl1271_init_ieee80211(struct wl1271 *wl)
 {
 	static const u32 cipher_suites[] = {
 		WLAN_CIPHER_SUITE_WEP40,
@@ -4736,27 +4955,33 @@
 
 	wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
 
-	SET_IEEE80211_DEV(wl->hw, wl1271_wl_to_dev(wl));
+	/* the FW answers probe-requests in AP-mode */
+	wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
+	wl->hw->wiphy->probe_resp_offload =
+		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
+		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
+		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
+
+	SET_IEEE80211_DEV(wl->hw, wl->dev);
 
 	wl->hw->sta_data_size = sizeof(struct wl1271_station);
+	wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
 
 	wl->hw->max_rx_aggregation_subframes = 8;
 
 	return 0;
 }
-EXPORT_SYMBOL_GPL(wl1271_init_ieee80211);
 
 #define WL1271_DEFAULT_CHANNEL 0
 
-struct ieee80211_hw *wl1271_alloc_hw(void)
+static struct ieee80211_hw *wl1271_alloc_hw(void)
 {
 	struct ieee80211_hw *hw;
-	struct platform_device *plat_dev = NULL;
 	struct wl1271 *wl;
 	int i, j, ret;
 	unsigned int order;
 
-	BUILD_BUG_ON(AP_MAX_LINKS > WL12XX_MAX_LINKS);
+	BUILD_BUG_ON(AP_MAX_STATIONS > WL12XX_MAX_LINKS);
 
 	hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
 	if (!hw) {
@@ -4765,41 +4990,26 @@
 		goto err_hw_alloc;
 	}
 
-	plat_dev = kmemdup(&wl1271_device, sizeof(wl1271_device), GFP_KERNEL);
-	if (!plat_dev) {
-		wl1271_error("could not allocate platform_device");
-		ret = -ENOMEM;
-		goto err_plat_alloc;
-	}
-
 	wl = hw->priv;
 	memset(wl, 0, sizeof(*wl));
 
 	INIT_LIST_HEAD(&wl->list);
+	INIT_LIST_HEAD(&wl->wlvif_list);
 
 	wl->hw = hw;
-	wl->plat_dev = plat_dev;
 
 	for (i = 0; i < NUM_TX_QUEUES; i++)
-		skb_queue_head_init(&wl->tx_queue[i]);
-
-	for (i = 0; i < NUM_TX_QUEUES; i++)
-		for (j = 0; j < AP_MAX_LINKS; j++)
+		for (j = 0; j < WL12XX_MAX_LINKS; j++)
 			skb_queue_head_init(&wl->links[j].tx_queue[i]);
 
 	skb_queue_head_init(&wl->deferred_rx_queue);
 	skb_queue_head_init(&wl->deferred_tx_queue);
 
 	INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
-	INIT_DELAYED_WORK(&wl->pspoll_work, wl1271_pspoll_work);
 	INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
 	INIT_WORK(&wl->tx_work, wl1271_tx_work);
 	INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
 	INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
-	INIT_WORK(&wl->rx_streaming_enable_work,
-		  wl1271_rx_streaming_enable_work);
-	INIT_WORK(&wl->rx_streaming_disable_work,
-		  wl1271_rx_streaming_disable_work);
 
 	wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
 	if (!wl->freezable_wq) {
@@ -4808,41 +5018,21 @@
 	}
 
 	wl->channel = WL1271_DEFAULT_CHANNEL;
-	wl->beacon_int = WL1271_DEFAULT_BEACON_INT;
-	wl->default_key = 0;
 	wl->rx_counter = 0;
-	wl->psm_entry_retry = 0;
 	wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
-	wl->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
-	wl->basic_rate = CONF_TX_RATE_MASK_BASIC;
-	wl->rate_set = CONF_TX_RATE_MASK_BASIC;
 	wl->band = IEEE80211_BAND_2GHZ;
 	wl->vif = NULL;
 	wl->flags = 0;
 	wl->sg_enabled = true;
 	wl->hw_pg_ver = -1;
-	wl->bss_type = MAX_BSS_TYPE;
-	wl->set_bss_type = MAX_BSS_TYPE;
-	wl->last_tx_hlid = 0;
 	wl->ap_ps_map = 0;
 	wl->ap_fw_ps_map = 0;
 	wl->quirks = 0;
 	wl->platform_quirks = 0;
 	wl->sched_scanning = false;
-	wl->tx_security_seq = 0;
-	wl->tx_security_last_seq_lsb = 0;
 	wl->tx_spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT;
-	wl->role_id = WL12XX_INVALID_ROLE_ID;
 	wl->system_hlid = WL12XX_SYSTEM_HLID;
-	wl->sta_hlid = WL12XX_INVALID_LINK_ID;
-	wl->dev_role_id = WL12XX_INVALID_ROLE_ID;
-	wl->dev_hlid = WL12XX_INVALID_LINK_ID;
-	wl->session_counter = 0;
-	wl->ap_bcast_hlid = WL12XX_INVALID_LINK_ID;
-	wl->ap_global_hlid = WL12XX_INVALID_LINK_ID;
 	wl->active_sta_count = 0;
-	setup_timer(&wl->rx_streaming_timer, wl1271_rx_streaming_timer,
-		    (unsigned long) wl);
 	wl->fwlog_size = 0;
 	init_waitqueue_head(&wl->fwlog_waitq);
 
@@ -4860,8 +5050,6 @@
 
 	/* Apply default driver configuration. */
 	wl1271_conf_init(wl);
-	wl->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
-	wl->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
 
 	order = get_order(WL1271_AGGR_BUFFER_SIZE);
 	wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
@@ -4883,49 +5071,8 @@
 		goto err_dummy_packet;
 	}
 
-	/* Register platform device */
-	ret = platform_device_register(wl->plat_dev);
-	if (ret) {
-		wl1271_error("couldn't register platform device");
-		goto err_fwlog;
-	}
-	dev_set_drvdata(&wl->plat_dev->dev, wl);
-
-	/* Create sysfs file to control bt coex state */
-	ret = device_create_file(&wl->plat_dev->dev, &dev_attr_bt_coex_state);
-	if (ret < 0) {
-		wl1271_error("failed to create sysfs file bt_coex_state");
-		goto err_platform;
-	}
-
-	/* Create sysfs file to get HW PG version */
-	ret = device_create_file(&wl->plat_dev->dev, &dev_attr_hw_pg_ver);
-	if (ret < 0) {
-		wl1271_error("failed to create sysfs file hw_pg_ver");
-		goto err_bt_coex_state;
-	}
-
-	/* Create sysfs file for the FW log */
-	ret = device_create_bin_file(&wl->plat_dev->dev, &fwlog_attr);
-	if (ret < 0) {
-		wl1271_error("failed to create sysfs file fwlog");
-		goto err_hw_pg_ver;
-	}
-
 	return hw;
 
-err_hw_pg_ver:
-	device_remove_file(&wl->plat_dev->dev, &dev_attr_hw_pg_ver);
-
-err_bt_coex_state:
-	device_remove_file(&wl->plat_dev->dev, &dev_attr_bt_coex_state);
-
-err_platform:
-	platform_device_unregister(wl->plat_dev);
-
-err_fwlog:
-	free_page((unsigned long)wl->fwlog);
-
 err_dummy_packet:
 	dev_kfree_skb(wl->dummy_packet);
 
@@ -4937,18 +5084,14 @@
 
 err_hw:
 	wl1271_debugfs_exit(wl);
-	kfree(plat_dev);
-
-err_plat_alloc:
 	ieee80211_free_hw(hw);
 
 err_hw_alloc:
 
 	return ERR_PTR(ret);
 }
-EXPORT_SYMBOL_GPL(wl1271_alloc_hw);
 
-int wl1271_free_hw(struct wl1271 *wl)
+static int wl1271_free_hw(struct wl1271 *wl)
 {
 	/* Unblock any fwlog readers */
 	mutex_lock(&wl->mutex);
@@ -4956,17 +5099,15 @@
 	wake_up_interruptible_all(&wl->fwlog_waitq);
 	mutex_unlock(&wl->mutex);
 
-	device_remove_bin_file(&wl->plat_dev->dev, &fwlog_attr);
+	device_remove_bin_file(wl->dev, &fwlog_attr);
 
-	device_remove_file(&wl->plat_dev->dev, &dev_attr_hw_pg_ver);
+	device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
 
-	device_remove_file(&wl->plat_dev->dev, &dev_attr_bt_coex_state);
-	platform_device_unregister(wl->plat_dev);
+	device_remove_file(wl->dev, &dev_attr_bt_coex_state);
 	free_page((unsigned long)wl->fwlog);
 	dev_kfree_skb(wl->dummy_packet);
 	free_pages((unsigned long)wl->aggr_buf,
 			get_order(WL1271_AGGR_BUFFER_SIZE));
-	kfree(wl->plat_dev);
 
 	wl1271_debugfs_exit(wl);
 
@@ -4983,7 +5124,174 @@
 
 	return 0;
 }
-EXPORT_SYMBOL_GPL(wl1271_free_hw);
+
+static irqreturn_t wl12xx_hardirq(int irq, void *cookie)
+{
+	struct wl1271 *wl = cookie;
+	unsigned long flags;
+
+	wl1271_debug(DEBUG_IRQ, "IRQ");
+
+	/* complete the ELP completion */
+	spin_lock_irqsave(&wl->wl_lock, flags);
+	set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
+	if (wl->elp_compl) {
+		complete(wl->elp_compl);
+		wl->elp_compl = NULL;
+	}
+
+	if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
+		/* don't enqueue a work right now. mark it as pending */
+		set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
+		wl1271_debug(DEBUG_IRQ, "should not enqueue work");
+		disable_irq_nosync(wl->irq);
+		pm_wakeup_event(wl->dev, 0);
+		spin_unlock_irqrestore(&wl->wl_lock, flags);
+		return IRQ_HANDLED;
+	}
+	spin_unlock_irqrestore(&wl->wl_lock, flags);
+
+	return IRQ_WAKE_THREAD;
+}
+
+static int __devinit wl12xx_probe(struct platform_device *pdev)
+{
+	struct wl12xx_platform_data *pdata = pdev->dev.platform_data;
+	struct ieee80211_hw *hw;
+	struct wl1271 *wl;
+	unsigned long irqflags;
+	int ret = -ENODEV;
+
+	hw = wl1271_alloc_hw();
+	if (IS_ERR(hw)) {
+		wl1271_error("can't allocate hw");
+		ret = PTR_ERR(hw);
+		goto out;
+	}
+
+	wl = hw->priv;
+	wl->irq = platform_get_irq(pdev, 0);
+	wl->ref_clock = pdata->board_ref_clock;
+	wl->tcxo_clock = pdata->board_tcxo_clock;
+	wl->platform_quirks = pdata->platform_quirks;
+	wl->set_power = pdata->set_power;
+	wl->dev = &pdev->dev;
+	wl->if_ops = pdata->ops;
+
+	platform_set_drvdata(pdev, wl);
+
+	if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
+		irqflags = IRQF_TRIGGER_RISING;
+	else
+		irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
+
+	ret = request_threaded_irq(wl->irq, wl12xx_hardirq, wl1271_irq,
+				   irqflags,
+				   pdev->name, wl);
+	if (ret < 0) {
+		wl1271_error("request_irq() failed: %d", ret);
+		goto out_free_hw;
+	}
+
+	ret = enable_irq_wake(wl->irq);
+	if (!ret) {
+		wl->irq_wake_enabled = true;
+		device_init_wakeup(wl->dev, 1);
+		if (pdata->pwr_in_suspend)
+			hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY;
+
+	}
+	disable_irq(wl->irq);
+
+	ret = wl1271_init_ieee80211(wl);
+	if (ret)
+		goto out_irq;
+
+	ret = wl1271_register_hw(wl);
+	if (ret)
+		goto out_irq;
+
+	/* Create sysfs file to control bt coex state */
+	ret = device_create_file(wl->dev, &dev_attr_bt_coex_state);
+	if (ret < 0) {
+		wl1271_error("failed to create sysfs file bt_coex_state");
+		goto out_irq;
+	}
+
+	/* Create sysfs file to get HW PG version */
+	ret = device_create_file(wl->dev, &dev_attr_hw_pg_ver);
+	if (ret < 0) {
+		wl1271_error("failed to create sysfs file hw_pg_ver");
+		goto out_bt_coex_state;
+	}
+
+	/* Create sysfs file for the FW log */
+	ret = device_create_bin_file(wl->dev, &fwlog_attr);
+	if (ret < 0) {
+		wl1271_error("failed to create sysfs file fwlog");
+		goto out_hw_pg_ver;
+	}
+
+	return 0;
+
+out_hw_pg_ver:
+	device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
+
+out_bt_coex_state:
+	device_remove_file(wl->dev, &dev_attr_bt_coex_state);
+
+out_irq:
+	free_irq(wl->irq, wl);
+
+out_free_hw:
+	wl1271_free_hw(wl);
+
+out:
+	return ret;
+}
+
+static int __devexit wl12xx_remove(struct platform_device *pdev)
+{
+	struct wl1271 *wl = platform_get_drvdata(pdev);
+
+	if (wl->irq_wake_enabled) {
+		device_init_wakeup(wl->dev, 0);
+		disable_irq_wake(wl->irq);
+	}
+	wl1271_unregister_hw(wl);
+	free_irq(wl->irq, wl);
+	wl1271_free_hw(wl);
+
+	return 0;
+}
+
+static const struct platform_device_id wl12xx_id_table[] __devinitconst = {
+	{ "wl12xx", 0 },
+	{  } /* Terminating Entry */
+};
+MODULE_DEVICE_TABLE(platform, wl12xx_id_table);
+
+static struct platform_driver wl12xx_driver = {
+	.probe		= wl12xx_probe,
+	.remove		= __devexit_p(wl12xx_remove),
+	.id_table	= wl12xx_id_table,
+	.driver = {
+		.name	= "wl12xx_driver",
+		.owner	= THIS_MODULE,
+	}
+};
+
+static int __init wl12xx_init(void)
+{
+	return platform_driver_register(&wl12xx_driver);
+}
+module_init(wl12xx_init);
+
+static void __exit wl12xx_exit(void)
+{
+	platform_driver_unregister(&wl12xx_driver);
+}
+module_exit(wl12xx_exit);
 
 u32 wl12xx_debug_level = DEBUG_NONE;
 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
diff --git a/drivers/net/wireless/wl12xx/ps.c b/drivers/net/wireless/wl12xx/ps.c
index c15ebf2..a2bdacd 100644
--- a/drivers/net/wireless/wl12xx/ps.c
+++ b/drivers/net/wireless/wl12xx/ps.c
@@ -25,6 +25,7 @@
 #include "ps.h"
 #include "io.h"
 #include "tx.h"
+#include "debug.h"
 
 #define WL1271_WAKEUP_TIMEOUT 500
 
@@ -32,6 +33,7 @@
 {
 	struct delayed_work *dwork;
 	struct wl1271 *wl;
+	struct wl12xx_vif *wlvif;
 
 	dwork = container_of(work, struct delayed_work, work);
 	wl = container_of(dwork, struct wl1271, elp_work);
@@ -47,11 +49,18 @@
 	if (unlikely(!test_bit(WL1271_FLAG_ELP_REQUESTED, &wl->flags)))
 		goto out;
 
-	if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags) ||
-	    (!test_bit(WL1271_FLAG_PSM, &wl->flags) &&
-	     !test_bit(WL1271_FLAG_IDLE, &wl->flags)))
+	if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
 		goto out;
 
+	wl12xx_for_each_wlvif(wl, wlvif) {
+		if (wlvif->bss_type == BSS_TYPE_AP_BSS)
+			goto out;
+
+		if (!test_bit(WLVIF_FLAG_PSM, &wlvif->flags) &&
+		    test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
+			goto out;
+	}
+
 	wl1271_debug(DEBUG_PSM, "chip to elp");
 	wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_SLEEP);
 	set_bit(WL1271_FLAG_IN_ELP, &wl->flags);
@@ -65,13 +74,20 @@
 /* Routines to toggle sleep mode while in ELP */
 void wl1271_ps_elp_sleep(struct wl1271 *wl)
 {
+	struct wl12xx_vif *wlvif;
+
 	/* we shouldn't get consecutive sleep requests */
 	if (WARN_ON(test_and_set_bit(WL1271_FLAG_ELP_REQUESTED, &wl->flags)))
 		return;
 
-	if (!test_bit(WL1271_FLAG_PSM, &wl->flags) &&
-	    !test_bit(WL1271_FLAG_IDLE, &wl->flags))
-		return;
+	wl12xx_for_each_wlvif(wl, wlvif) {
+		if (wlvif->bss_type == BSS_TYPE_AP_BSS)
+			return;
+
+		if (!test_bit(WLVIF_FLAG_PSM, &wlvif->flags) &&
+		    test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
+			return;
+	}
 
 	ieee80211_queue_delayed_work(wl->hw, &wl->elp_work,
 				     msecs_to_jiffies(ELP_ENTRY_DELAY));
@@ -143,8 +159,8 @@
 	return 0;
 }
 
-int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
-		       u32 rates, bool send)
+int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+		       enum wl1271_cmd_ps_mode mode, u32 rates, bool send)
 {
 	int ret;
 
@@ -152,39 +168,34 @@
 	case STATION_POWER_SAVE_MODE:
 		wl1271_debug(DEBUG_PSM, "entering psm");
 
-		ret = wl1271_acx_wake_up_conditions(wl);
+		ret = wl1271_acx_wake_up_conditions(wl, wlvif);
 		if (ret < 0) {
 			wl1271_error("couldn't set wake up conditions");
 			return ret;
 		}
 
-		ret = wl1271_cmd_ps_mode(wl, STATION_POWER_SAVE_MODE);
+		ret = wl1271_cmd_ps_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
 		if (ret < 0)
 			return ret;
 
-		set_bit(WL1271_FLAG_PSM, &wl->flags);
+		set_bit(WLVIF_FLAG_PSM, &wlvif->flags);
 		break;
 	case STATION_ACTIVE_MODE:
 	default:
 		wl1271_debug(DEBUG_PSM, "leaving psm");
 
 		/* disable beacon early termination */
-		if (wl->band == IEEE80211_BAND_2GHZ) {
-			ret = wl1271_acx_bet_enable(wl, false);
+		if (wlvif->band == IEEE80211_BAND_2GHZ) {
+			ret = wl1271_acx_bet_enable(wl, wlvif, false);
 			if (ret < 0)
 				return ret;
 		}
 
-		/* disable beacon filtering */
-		ret = wl1271_acx_beacon_filter_opt(wl, false);
+		ret = wl1271_cmd_ps_mode(wl, wlvif, STATION_ACTIVE_MODE);
 		if (ret < 0)
 			return ret;
 
-		ret = wl1271_cmd_ps_mode(wl, STATION_ACTIVE_MODE);
-		if (ret < 0)
-			return ret;
-
-		clear_bit(WL1271_FLAG_PSM, &wl->flags);
+		clear_bit(WLVIF_FLAG_PSM, &wlvif->flags);
 		break;
 	}
 
@@ -223,9 +234,11 @@
 	wl1271_handle_tx_low_watermark(wl);
 }
 
-void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues)
+void wl12xx_ps_link_start(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+			  u8 hlid, bool clean_queues)
 {
 	struct ieee80211_sta *sta;
+	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
 
 	if (test_bit(hlid, &wl->ap_ps_map))
 		return;
@@ -235,7 +248,7 @@
 		     clean_queues);
 
 	rcu_read_lock();
-	sta = ieee80211_find_sta(wl->vif, wl->links[hlid].addr);
+	sta = ieee80211_find_sta(vif, wl->links[hlid].addr);
 	if (!sta) {
 		wl1271_error("could not find sta %pM for starting ps",
 			     wl->links[hlid].addr);
@@ -253,9 +266,10 @@
 	__set_bit(hlid, &wl->ap_ps_map);
 }
 
-void wl1271_ps_link_end(struct wl1271 *wl, u8 hlid)
+void wl12xx_ps_link_end(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
 {
 	struct ieee80211_sta *sta;
+	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
 
 	if (!test_bit(hlid, &wl->ap_ps_map))
 		return;
@@ -265,7 +279,7 @@
 	__clear_bit(hlid, &wl->ap_ps_map);
 
 	rcu_read_lock();
-	sta = ieee80211_find_sta(wl->vif, wl->links[hlid].addr);
+	sta = ieee80211_find_sta(vif, wl->links[hlid].addr);
 	if (!sta) {
 		wl1271_error("could not find sta %pM for ending ps",
 			     wl->links[hlid].addr);
diff --git a/drivers/net/wireless/wl12xx/ps.h b/drivers/net/wireless/wl12xx/ps.h
index 25eb9bc..a12052f0 100644
--- a/drivers/net/wireless/wl12xx/ps.h
+++ b/drivers/net/wireless/wl12xx/ps.h
@@ -27,13 +27,14 @@
 #include "wl12xx.h"
 #include "acx.h"
 
-int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
-		       u32 rates, bool send);
+int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+		       enum wl1271_cmd_ps_mode mode, u32 rates, bool send);
 void wl1271_ps_elp_sleep(struct wl1271 *wl);
 int wl1271_ps_elp_wakeup(struct wl1271 *wl);
 void wl1271_elp_work(struct work_struct *work);
-void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues);
-void wl1271_ps_link_end(struct wl1271 *wl, u8 hlid);
+void wl12xx_ps_link_start(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+			  u8 hlid, bool clean_queues);
+void wl12xx_ps_link_end(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid);
 
 #define WL1271_PS_COMPLETE_TIMEOUT 500
 
diff --git a/drivers/net/wireless/wl12xx/reg.h b/drivers/net/wireless/wl12xx/reg.h
index 3f570f3..df34d59 100644
--- a/drivers/net/wireless/wl12xx/reg.h
+++ b/drivers/net/wireless/wl12xx/reg.h
@@ -408,7 +408,7 @@
 
 
 /* Firmware image load chunk size */
-#define CHUNK_SIZE          512
+#define CHUNK_SIZE	16384
 
 /* Firmware image header size */
 #define FW_HDR_SIZE 8
diff --git a/drivers/net/wireless/wl12xx/rx.c b/drivers/net/wireless/wl12xx/rx.c
index dee4cfe..4fbd2a7 100644
--- a/drivers/net/wireless/wl12xx/rx.c
+++ b/drivers/net/wireless/wl12xx/rx.c
@@ -25,9 +25,11 @@
 #include <linux/sched.h>
 
 #include "wl12xx.h"
+#include "debug.h"
 #include "acx.h"
 #include "reg.h"
 #include "rx.h"
+#include "tx.h"
 #include "io.h"
 
 static u8 wl12xx_rx_get_mem_block(struct wl12xx_fw_status *status,
@@ -96,7 +98,7 @@
 }
 
 static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
-				 bool unaligned)
+				 bool unaligned, u8 *hlid)
 {
 	struct wl1271_rx_descriptor *desc;
 	struct sk_buff *skb;
@@ -159,6 +161,7 @@
 	 * payload aligned to 4 bytes.
 	 */
 	memcpy(buf, data + sizeof(*desc), length - sizeof(*desc));
+	*hlid = desc->hlid;
 
 	hdr = (struct ieee80211_hdr *)skb->data;
 	if (ieee80211_is_beacon(hdr->frame_control))
@@ -169,10 +172,10 @@
 	wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon);
 
 	seq_num = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
-	wl1271_debug(DEBUG_RX, "rx skb 0x%p: %d B %s seq %d", skb,
+	wl1271_debug(DEBUG_RX, "rx skb 0x%p: %d B %s seq %d hlid %d", skb,
 		     skb->len - desc->pad_len,
 		     beacon ? "beacon" : "",
-		     seq_num);
+		     seq_num, *hlid);
 
 	skb_trim(skb, skb->len - desc->pad_len);
 
@@ -185,6 +188,7 @@
 void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status)
 {
 	struct wl1271_acx_mem_map *wl_mem_map = wl->target_mem_map;
+	unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0};
 	u32 buf_size;
 	u32 fw_rx_counter  = status->fw_rx_counter & NUM_RX_PKT_DESC_MOD_MASK;
 	u32 drv_rx_counter = wl->rx_counter & NUM_RX_PKT_DESC_MOD_MASK;
@@ -192,8 +196,7 @@
 	u32 mem_block;
 	u32 pkt_length;
 	u32 pkt_offset;
-	bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
-	bool had_data = false;
+	u8 hlid;
 	bool unaligned = false;
 
 	while (drv_rx_counter != fw_rx_counter) {
@@ -253,8 +256,15 @@
 			 */
 			if (wl1271_rx_handle_data(wl,
 						  wl->aggr_buf + pkt_offset,
-						  pkt_length, unaligned) == 1)
-				had_data = true;
+						  pkt_length, unaligned,
+						  &hlid) == 1) {
+				if (hlid < WL12XX_MAX_LINKS)
+					__set_bit(hlid, active_hlids);
+				else
+					WARN(1,
+					     "hlid exceeded WL12XX_MAX_LINKS "
+					     "(%d)\n", hlid);
+			}
 
 			wl->rx_counter++;
 			drv_rx_counter++;
@@ -270,17 +280,5 @@
 	if (wl->quirks & WL12XX_QUIRK_END_OF_TRANSACTION)
 		wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter);
 
-	if (!is_ap && wl->conf.rx_streaming.interval && had_data &&
-	    (wl->conf.rx_streaming.always ||
-	     test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))) {
-		u32 timeout = wl->conf.rx_streaming.duration;
-
-		/* restart rx streaming */
-		if (!test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags))
-			ieee80211_queue_work(wl->hw,
-					     &wl->rx_streaming_enable_work);
-
-		mod_timer(&wl->rx_streaming_timer,
-			  jiffies + msecs_to_jiffies(timeout));
-	}
+	wl12xx_rearm_rx_streaming(wl, active_hlids);
 }
diff --git a/drivers/net/wireless/wl12xx/scan.c b/drivers/net/wireless/wl12xx/scan.c
index fc29c67..e24111e 100644
--- a/drivers/net/wireless/wl12xx/scan.c
+++ b/drivers/net/wireless/wl12xx/scan.c
@@ -24,6 +24,7 @@
 #include <linux/ieee80211.h>
 
 #include "wl12xx.h"
+#include "debug.h"
 #include "cmd.h"
 #include "scan.h"
 #include "acx.h"
@@ -34,6 +35,8 @@
 {
 	struct delayed_work *dwork;
 	struct wl1271 *wl;
+	struct ieee80211_vif *vif;
+	struct wl12xx_vif *wlvif;
 	int ret;
 	bool is_sta, is_ibss;
 
@@ -50,28 +53,31 @@
 	if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
 		goto out;
 
+	vif = wl->scan_vif;
+	wlvif = wl12xx_vif_to_data(vif);
+
 	wl->scan.state = WL1271_SCAN_STATE_IDLE;
 	memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
 	wl->scan.req = NULL;
+	wl->scan_vif = NULL;
 
 	ret = wl1271_ps_elp_wakeup(wl);
 	if (ret < 0)
 		goto out;
 
-	if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) {
+	if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
 		/* restore hardware connection monitoring template */
-		wl1271_cmd_build_ap_probe_req(wl, wl->probereq);
+		wl1271_cmd_build_ap_probe_req(wl, wlvif, wlvif->probereq);
 	}
 
 	/* return to ROC if needed */
-	is_sta = (wl->bss_type == BSS_TYPE_STA_BSS);
-	is_ibss = (wl->bss_type == BSS_TYPE_IBSS);
-	if (((is_sta && !test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) ||
-	     (is_ibss && !test_bit(WL1271_FLAG_IBSS_JOINED, &wl->flags))) &&
-	    !test_bit(wl->dev_role_id, wl->roc_map)) {
+	is_sta = (wlvif->bss_type == BSS_TYPE_STA_BSS);
+	is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
+	if (((is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) ||
+	     (is_ibss && !test_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))) &&
+	    !test_bit(wlvif->dev_role_id, wl->roc_map)) {
 		/* restore remain on channel */
-		wl12xx_cmd_role_start_dev(wl);
-		wl12xx_roc(wl, wl->dev_role_id);
+		wl12xx_start_dev(wl, wlvif);
 	}
 	wl1271_ps_elp_sleep(wl);
 
@@ -155,9 +161,11 @@
 
 #define WL1271_NOTHING_TO_SCAN 1
 
-static int wl1271_scan_send(struct wl1271 *wl, enum ieee80211_band band,
-			     bool passive, u32 basic_rate)
+static int wl1271_scan_send(struct wl1271 *wl, struct ieee80211_vif *vif,
+			    enum ieee80211_band band,
+			    bool passive, u32 basic_rate)
 {
+	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
 	struct wl1271_cmd_scan *cmd;
 	struct wl1271_cmd_trigger_scan_to *trigger;
 	int ret;
@@ -177,11 +185,11 @@
 	if (passive)
 		scan_options |= WL1271_SCAN_OPT_PASSIVE;
 
-	if (WARN_ON(wl->role_id == WL12XX_INVALID_ROLE_ID)) {
+	if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID)) {
 		ret = -EINVAL;
 		goto out;
 	}
-	cmd->params.role_id = wl->role_id;
+	cmd->params.role_id = wlvif->role_id;
 	cmd->params.scan_options = cpu_to_le16(scan_options);
 
 	cmd->params.n_ch = wl1271_get_scan_channels(wl, wl->scan.req,
@@ -194,7 +202,6 @@
 
 	cmd->params.tx_rate = cpu_to_le32(basic_rate);
 	cmd->params.n_probe_reqs = wl->conf.scan.num_probe_reqs;
-	cmd->params.tx_rate = cpu_to_le32(basic_rate);
 	cmd->params.tid_trigger = 0;
 	cmd->params.scan_tag = WL1271_SCAN_DEFAULT_TAG;
 
@@ -208,11 +215,11 @@
 		memcpy(cmd->params.ssid, wl->scan.ssid, wl->scan.ssid_len);
 	}
 
-	memcpy(cmd->addr, wl->mac_addr, ETH_ALEN);
+	memcpy(cmd->addr, vif->addr, ETH_ALEN);
 
-	ret = wl1271_cmd_build_probe_req(wl, wl->scan.ssid, wl->scan.ssid_len,
-					 wl->scan.req->ie, wl->scan.req->ie_len,
-					 band);
+	ret = wl1271_cmd_build_probe_req(wl, wlvif, wl->scan.ssid,
+					 wl->scan.ssid_len, wl->scan.req->ie,
+					 wl->scan.req->ie_len, band);
 	if (ret < 0) {
 		wl1271_error("PROBE request template failed");
 		goto out;
@@ -241,11 +248,12 @@
 	return ret;
 }
 
-void wl1271_scan_stm(struct wl1271 *wl)
+void wl1271_scan_stm(struct wl1271 *wl, struct ieee80211_vif *vif)
 {
+	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
 	int ret = 0;
 	enum ieee80211_band band;
-	u32 rate;
+	u32 rate, mask;
 
 	switch (wl->scan.state) {
 	case WL1271_SCAN_STATE_IDLE:
@@ -253,47 +261,59 @@
 
 	case WL1271_SCAN_STATE_2GHZ_ACTIVE:
 		band = IEEE80211_BAND_2GHZ;
-		rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[band]);
-		ret = wl1271_scan_send(wl, band, false, rate);
+		mask = wlvif->bitrate_masks[band];
+		if (wl->scan.req->no_cck) {
+			mask &= ~CONF_TX_CCK_RATES;
+			if (!mask)
+				mask = CONF_TX_RATE_MASK_BASIC_P2P;
+		}
+		rate = wl1271_tx_min_rate_get(wl, mask);
+		ret = wl1271_scan_send(wl, vif, band, false, rate);
 		if (ret == WL1271_NOTHING_TO_SCAN) {
 			wl->scan.state = WL1271_SCAN_STATE_2GHZ_PASSIVE;
-			wl1271_scan_stm(wl);
+			wl1271_scan_stm(wl, vif);
 		}
 
 		break;
 
 	case WL1271_SCAN_STATE_2GHZ_PASSIVE:
 		band = IEEE80211_BAND_2GHZ;
-		rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[band]);
-		ret = wl1271_scan_send(wl, band, true, rate);
+		mask = wlvif->bitrate_masks[band];
+		if (wl->scan.req->no_cck) {
+			mask &= ~CONF_TX_CCK_RATES;
+			if (!mask)
+				mask = CONF_TX_RATE_MASK_BASIC_P2P;
+		}
+		rate = wl1271_tx_min_rate_get(wl, mask);
+		ret = wl1271_scan_send(wl, vif, band, true, rate);
 		if (ret == WL1271_NOTHING_TO_SCAN) {
 			if (wl->enable_11a)
 				wl->scan.state = WL1271_SCAN_STATE_5GHZ_ACTIVE;
 			else
 				wl->scan.state = WL1271_SCAN_STATE_DONE;
-			wl1271_scan_stm(wl);
+			wl1271_scan_stm(wl, vif);
 		}
 
 		break;
 
 	case WL1271_SCAN_STATE_5GHZ_ACTIVE:
 		band = IEEE80211_BAND_5GHZ;
-		rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[band]);
-		ret = wl1271_scan_send(wl, band, false, rate);
+		rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
+		ret = wl1271_scan_send(wl, vif, band, false, rate);
 		if (ret == WL1271_NOTHING_TO_SCAN) {
 			wl->scan.state = WL1271_SCAN_STATE_5GHZ_PASSIVE;
-			wl1271_scan_stm(wl);
+			wl1271_scan_stm(wl, vif);
 		}
 
 		break;
 
 	case WL1271_SCAN_STATE_5GHZ_PASSIVE:
 		band = IEEE80211_BAND_5GHZ;
-		rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[band]);
-		ret = wl1271_scan_send(wl, band, true, rate);
+		rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
+		ret = wl1271_scan_send(wl, vif, band, true, rate);
 		if (ret == WL1271_NOTHING_TO_SCAN) {
 			wl->scan.state = WL1271_SCAN_STATE_DONE;
-			wl1271_scan_stm(wl);
+			wl1271_scan_stm(wl, vif);
 		}
 
 		break;
@@ -317,7 +337,8 @@
 	}
 }
 
-int wl1271_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
+int wl1271_scan(struct wl1271 *wl, struct ieee80211_vif *vif,
+		const u8 *ssid, size_t ssid_len,
 		struct cfg80211_scan_request *req)
 {
 	/*
@@ -338,6 +359,7 @@
 		wl->scan.ssid_len = 0;
 	}
 
+	wl->scan_vif = vif;
 	wl->scan.req = req;
 	memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
 
@@ -346,7 +368,7 @@
 	ieee80211_queue_delayed_work(wl->hw, &wl->scan_complete_work,
 				     msecs_to_jiffies(WL1271_SCAN_TIMEOUT));
 
-	wl1271_scan_stm(wl);
+	wl1271_scan_stm(wl, vif);
 
 	return 0;
 }
@@ -415,18 +437,19 @@
 
 			if (flags & IEEE80211_CHAN_RADAR) {
 				channels[j].flags |= SCAN_CHANNEL_FLAGS_DFS;
+
 				channels[j].passive_duration =
 					cpu_to_le16(c->dwell_time_dfs);
-			}
-			else if (flags & IEEE80211_CHAN_PASSIVE_SCAN) {
+			} else {
 				channels[j].passive_duration =
 					cpu_to_le16(c->dwell_time_passive);
-			} else {
-				channels[j].min_duration =
-					cpu_to_le16(c->min_dwell_time_active);
-				channels[j].max_duration =
-					cpu_to_le16(c->max_dwell_time_active);
 			}
+
+			channels[j].min_duration =
+				cpu_to_le16(c->min_dwell_time_active);
+			channels[j].max_duration =
+				cpu_to_le16(c->max_dwell_time_active);
+
 			channels[j].tx_power_att = req->channels[i]->max_power;
 			channels[j].channel = req->channels[i]->hw_value;
 
@@ -550,6 +573,9 @@
 			 * so they're used in probe requests.
 			 */
 			for (i = 0; i < req->n_ssids; i++) {
+				if (!req->ssids[i].ssid_len)
+					continue;
+
 				for (j = 0; j < cmd->n_ssids; j++)
 					if (!memcmp(req->ssids[i].ssid,
 						   cmd->ssids[j].ssid,
@@ -585,6 +611,7 @@
 }
 
 int wl1271_scan_sched_scan_config(struct wl1271 *wl,
+				  struct wl12xx_vif *wlvif,
 				  struct cfg80211_sched_scan_request *req,
 				  struct ieee80211_sched_scan_ies *ies)
 {
@@ -631,7 +658,7 @@
 	}
 
 	if (!force_passive && cfg->active[0]) {
-		ret = wl1271_cmd_build_probe_req(wl, req->ssids[0].ssid,
+		ret = wl1271_cmd_build_probe_req(wl, wlvif, req->ssids[0].ssid,
 						 req->ssids[0].ssid_len,
 						 ies->ie[IEEE80211_BAND_2GHZ],
 						 ies->len[IEEE80211_BAND_2GHZ],
@@ -643,7 +670,7 @@
 	}
 
 	if (!force_passive && cfg->active[1]) {
-		ret = wl1271_cmd_build_probe_req(wl,  req->ssids[0].ssid,
+		ret = wl1271_cmd_build_probe_req(wl, wlvif, req->ssids[0].ssid,
 						 req->ssids[0].ssid_len,
 						 ies->ie[IEEE80211_BAND_5GHZ],
 						 ies->len[IEEE80211_BAND_5GHZ],
@@ -667,17 +694,17 @@
 	return ret;
 }
 
-int wl1271_scan_sched_scan_start(struct wl1271 *wl)
+int wl1271_scan_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
 	struct wl1271_cmd_sched_scan_start *start;
 	int ret = 0;
 
 	wl1271_debug(DEBUG_CMD, "cmd periodic scan start");
 
-	if (wl->bss_type != BSS_TYPE_STA_BSS)
+	if (wlvif->bss_type != BSS_TYPE_STA_BSS)
 		return -EOPNOTSUPP;
 
-	if (!test_bit(WL1271_FLAG_IDLE, &wl->flags))
+	if (test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
 		return -EBUSY;
 
 	start = kzalloc(sizeof(*start), GFP_KERNEL);
@@ -727,7 +754,6 @@
 		wl1271_error("failed to send sched scan stop command");
 		goto out_free;
 	}
-	wl->sched_scanning = false;
 
 out_free:
 	kfree(stop);
diff --git a/drivers/net/wireless/wl12xx/scan.h b/drivers/net/wireless/wl12xx/scan.h
index 9211515..a7ed43d 100644
--- a/drivers/net/wireless/wl12xx/scan.h
+++ b/drivers/net/wireless/wl12xx/scan.h
@@ -26,18 +26,20 @@
 
 #include "wl12xx.h"
 
-int wl1271_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
+int wl1271_scan(struct wl1271 *wl, struct ieee80211_vif *vif,
+		const u8 *ssid, size_t ssid_len,
 		struct cfg80211_scan_request *req);
 int wl1271_scan_stop(struct wl1271 *wl);
 int wl1271_scan_build_probe_req(struct wl1271 *wl,
 				const u8 *ssid, size_t ssid_len,
 				const u8 *ie, size_t ie_len, u8 band);
-void wl1271_scan_stm(struct wl1271 *wl);
+void wl1271_scan_stm(struct wl1271 *wl, struct ieee80211_vif *vif);
 void wl1271_scan_complete_work(struct work_struct *work);
 int wl1271_scan_sched_scan_config(struct wl1271 *wl,
+				     struct wl12xx_vif *wlvif,
 				     struct cfg80211_sched_scan_request *req,
 				     struct ieee80211_sched_scan_ies *ies);
-int wl1271_scan_sched_scan_start(struct wl1271 *wl);
+int wl1271_scan_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif);
 void wl1271_scan_sched_scan_stop(struct wl1271 *wl);
 void wl1271_scan_sched_scan_results(struct wl1271 *wl);
 
diff --git a/drivers/net/wireless/wl12xx/sdio.c b/drivers/net/wireless/wl12xx/sdio.c
index 516a898..468a505 100644
--- a/drivers/net/wireless/wl12xx/sdio.c
+++ b/drivers/net/wireless/wl12xx/sdio.c
@@ -24,6 +24,7 @@
 #include <linux/irq.h>
 #include <linux/module.h>
 #include <linux/vmalloc.h>
+#include <linux/platform_device.h>
 #include <linux/mmc/sdio_func.h>
 #include <linux/mmc/sdio_ids.h>
 #include <linux/mmc/card.h>
@@ -44,107 +45,67 @@
 #define SDIO_DEVICE_ID_TI_WL1271	0x4076
 #endif
 
+struct wl12xx_sdio_glue {
+	struct device *dev;
+	struct platform_device *core;
+};
+
 static const struct sdio_device_id wl1271_devices[] __devinitconst = {
 	{ SDIO_DEVICE(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271) },
 	{}
 };
 MODULE_DEVICE_TABLE(sdio, wl1271_devices);
 
-static void wl1271_sdio_set_block_size(struct wl1271 *wl, unsigned int blksz)
+static void wl1271_sdio_set_block_size(struct device *child,
+				       unsigned int blksz)
 {
-	sdio_claim_host(wl->if_priv);
-	sdio_set_block_size(wl->if_priv, blksz);
-	sdio_release_host(wl->if_priv);
+	struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent);
+	struct sdio_func *func = dev_to_sdio_func(glue->dev);
+
+	sdio_claim_host(func);
+	sdio_set_block_size(func, blksz);
+	sdio_release_host(func);
 }
 
-static inline struct sdio_func *wl_to_func(struct wl1271 *wl)
-{
-	return wl->if_priv;
-}
-
-static struct device *wl1271_sdio_wl_to_dev(struct wl1271 *wl)
-{
-	return &(wl_to_func(wl)->dev);
-}
-
-static irqreturn_t wl1271_hardirq(int irq, void *cookie)
-{
-	struct wl1271 *wl = cookie;
-	unsigned long flags;
-
-	wl1271_debug(DEBUG_IRQ, "IRQ");
-
-	/* complete the ELP completion */
-	spin_lock_irqsave(&wl->wl_lock, flags);
-	set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
-	if (wl->elp_compl) {
-		complete(wl->elp_compl);
-		wl->elp_compl = NULL;
-	}
-
-	if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
-		/* don't enqueue a work right now. mark it as pending */
-		set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
-		wl1271_debug(DEBUG_IRQ, "should not enqueue work");
-		disable_irq_nosync(wl->irq);
-		pm_wakeup_event(wl1271_sdio_wl_to_dev(wl), 0);
-		spin_unlock_irqrestore(&wl->wl_lock, flags);
-		return IRQ_HANDLED;
-	}
-	spin_unlock_irqrestore(&wl->wl_lock, flags);
-
-	return IRQ_WAKE_THREAD;
-}
-
-static void wl1271_sdio_disable_interrupts(struct wl1271 *wl)
-{
-	disable_irq(wl->irq);
-}
-
-static void wl1271_sdio_enable_interrupts(struct wl1271 *wl)
-{
-	enable_irq(wl->irq);
-}
-
-static void wl1271_sdio_raw_read(struct wl1271 *wl, int addr, void *buf,
+static void wl12xx_sdio_raw_read(struct device *child, int addr, void *buf,
 				 size_t len, bool fixed)
 {
 	int ret;
-	struct sdio_func *func = wl_to_func(wl);
+	struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent);
+	struct sdio_func *func = dev_to_sdio_func(glue->dev);
 
 	if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
 		((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret);
-		wl1271_debug(DEBUG_SDIO, "sdio read 52 addr 0x%x, byte 0x%02x",
-			     addr, ((u8 *)buf)[0]);
+		dev_dbg(child->parent, "sdio read 52 addr 0x%x, byte 0x%02x\n",
+			addr, ((u8 *)buf)[0]);
 	} else {
 		if (fixed)
 			ret = sdio_readsb(func, buf, addr, len);
 		else
 			ret = sdio_memcpy_fromio(func, buf, addr, len);
 
-		wl1271_debug(DEBUG_SDIO, "sdio read 53 addr 0x%x, %zu bytes",
-			     addr, len);
-		wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len);
+		dev_dbg(child->parent, "sdio read 53 addr 0x%x, %zu bytes\n",
+			addr, len);
 	}
 
 	if (ret)
-		wl1271_error("sdio read failed (%d)", ret);
+		dev_err(child->parent, "sdio read failed (%d)\n", ret);
 }
 
-static void wl1271_sdio_raw_write(struct wl1271 *wl, int addr, void *buf,
+static void wl12xx_sdio_raw_write(struct device *child, int addr, void *buf,
 				  size_t len, bool fixed)
 {
 	int ret;
-	struct sdio_func *func = wl_to_func(wl);
+	struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent);
+	struct sdio_func *func = dev_to_sdio_func(glue->dev);
 
 	if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
 		sdio_f0_writeb(func, ((u8 *)buf)[0], addr, &ret);
-		wl1271_debug(DEBUG_SDIO, "sdio write 52 addr 0x%x, byte 0x%02x",
-			     addr, ((u8 *)buf)[0]);
+		dev_dbg(child->parent, "sdio write 52 addr 0x%x, byte 0x%02x\n",
+			addr, ((u8 *)buf)[0]);
 	} else {
-		wl1271_debug(DEBUG_SDIO, "sdio write 53 addr 0x%x, %zu bytes",
-			     addr, len);
-		wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len);
+		dev_dbg(child->parent, "sdio write 53 addr 0x%x, %zu bytes\n",
+			addr, len);
 
 		if (fixed)
 			ret = sdio_writesb(func, addr, buf, len);
@@ -153,13 +114,13 @@
 	}
 
 	if (ret)
-		wl1271_error("sdio write failed (%d)", ret);
+		dev_err(child->parent, "sdio write failed (%d)\n", ret);
 }
 
-static int wl1271_sdio_power_on(struct wl1271 *wl)
+static int wl12xx_sdio_power_on(struct wl12xx_sdio_glue *glue)
 {
-	struct sdio_func *func = wl_to_func(wl);
 	int ret;
+	struct sdio_func *func = dev_to_sdio_func(glue->dev);
 
 	/* If enabled, tell runtime PM not to power off the card */
 	if (pm_runtime_enabled(&func->dev)) {
@@ -180,10 +141,10 @@
 	return ret;
 }
 
-static int wl1271_sdio_power_off(struct wl1271 *wl)
+static int wl12xx_sdio_power_off(struct wl12xx_sdio_glue *glue)
 {
-	struct sdio_func *func = wl_to_func(wl);
 	int ret;
+	struct sdio_func *func = dev_to_sdio_func(glue->dev);
 
 	sdio_disable_func(func);
 	sdio_release_host(func);
@@ -200,46 +161,43 @@
 	return ret;
 }
 
-static int wl1271_sdio_set_power(struct wl1271 *wl, bool enable)
+static int wl12xx_sdio_set_power(struct device *child, bool enable)
 {
+	struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent);
+
 	if (enable)
-		return wl1271_sdio_power_on(wl);
+		return wl12xx_sdio_power_on(glue);
 	else
-		return wl1271_sdio_power_off(wl);
+		return wl12xx_sdio_power_off(glue);
 }
 
 static struct wl1271_if_operations sdio_ops = {
-	.read		= wl1271_sdio_raw_read,
-	.write		= wl1271_sdio_raw_write,
-	.power		= wl1271_sdio_set_power,
-	.dev		= wl1271_sdio_wl_to_dev,
-	.enable_irq	= wl1271_sdio_enable_interrupts,
-	.disable_irq	= wl1271_sdio_disable_interrupts,
+	.read		= wl12xx_sdio_raw_read,
+	.write		= wl12xx_sdio_raw_write,
+	.power		= wl12xx_sdio_set_power,
 	.set_block_size = wl1271_sdio_set_block_size,
 };
 
 static int __devinit wl1271_probe(struct sdio_func *func,
 				  const struct sdio_device_id *id)
 {
-	struct ieee80211_hw *hw;
-	const struct wl12xx_platform_data *wlan_data;
-	struct wl1271 *wl;
-	unsigned long irqflags;
+	struct wl12xx_platform_data *wlan_data;
+	struct wl12xx_sdio_glue *glue;
+	struct resource res[1];
 	mmc_pm_flag_t mmcflags;
-	int ret;
+	int ret = -ENOMEM;
 
 	/* We are only able to handle the wlan function */
 	if (func->num != 0x02)
 		return -ENODEV;
 
-	hw = wl1271_alloc_hw();
-	if (IS_ERR(hw))
-		return PTR_ERR(hw);
+	glue = kzalloc(sizeof(*glue), GFP_KERNEL);
+	if (!glue) {
+		dev_err(&func->dev, "can't allocate glue\n");
+		goto out;
+	}
 
-	wl = hw->priv;
-
-	wl->if_priv = func;
-	wl->if_ops = &sdio_ops;
+	glue->dev = &func->dev;
 
 	/* Grab access to FN0 for ELP reg. */
 	func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
@@ -250,80 +208,79 @@
 	wlan_data = wl12xx_get_platform_data();
 	if (IS_ERR(wlan_data)) {
 		ret = PTR_ERR(wlan_data);
-		wl1271_error("missing wlan platform data: %d", ret);
-		goto out_free;
+		dev_err(glue->dev, "missing wlan platform data: %d\n", ret);
+		goto out_free_glue;
 	}
 
-	wl->irq = wlan_data->irq;
-	wl->ref_clock = wlan_data->board_ref_clock;
-	wl->tcxo_clock = wlan_data->board_tcxo_clock;
-	wl->platform_quirks = wlan_data->platform_quirks;
+	/* if sdio can keep power while host is suspended, enable wow */
+	mmcflags = sdio_get_host_pm_caps(func);
+	dev_dbg(glue->dev, "sdio PM caps = 0x%x\n", mmcflags);
 
-	if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
-		irqflags = IRQF_TRIGGER_RISING;
-	else
-		irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
+	if (mmcflags & MMC_PM_KEEP_POWER)
+		wlan_data->pwr_in_suspend = true;
 
-	ret = request_threaded_irq(wl->irq, wl1271_hardirq, wl1271_irq,
-				   irqflags,
-				   DRIVER_NAME, wl);
-	if (ret < 0) {
-		wl1271_error("request_irq() failed: %d", ret);
-		goto out_free;
-	}
+	wlan_data->ops = &sdio_ops;
 
-	ret = enable_irq_wake(wl->irq);
-	if (!ret) {
-		wl->irq_wake_enabled = true;
-		device_init_wakeup(wl1271_sdio_wl_to_dev(wl), 1);
-
-		/* if sdio can keep power while host is suspended, enable wow */
-		mmcflags = sdio_get_host_pm_caps(func);
-		wl1271_debug(DEBUG_SDIO, "sdio PM caps = 0x%x", mmcflags);
-
-		if (mmcflags & MMC_PM_KEEP_POWER)
-			hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY;
-	}
-	disable_irq(wl->irq);
-
-	ret = wl1271_init_ieee80211(wl);
-	if (ret)
-		goto out_irq;
-
-	ret = wl1271_register_hw(wl);
-	if (ret)
-		goto out_irq;
-
-	sdio_set_drvdata(func, wl);
+	sdio_set_drvdata(func, glue);
 
 	/* Tell PM core that we don't need the card to be powered now */
 	pm_runtime_put_noidle(&func->dev);
 
+	glue->core = platform_device_alloc("wl12xx", -1);
+	if (!glue->core) {
+		dev_err(glue->dev, "can't allocate platform_device");
+		ret = -ENOMEM;
+		goto out_free_glue;
+	}
+
+	glue->core->dev.parent = &func->dev;
+
+	memset(res, 0x00, sizeof(res));
+
+	res[0].start = wlan_data->irq;
+	res[0].flags = IORESOURCE_IRQ;
+	res[0].name = "irq";
+
+	ret = platform_device_add_resources(glue->core, res, ARRAY_SIZE(res));
+	if (ret) {
+		dev_err(glue->dev, "can't add resources\n");
+		goto out_dev_put;
+	}
+
+	ret = platform_device_add_data(glue->core, wlan_data,
+				       sizeof(*wlan_data));
+	if (ret) {
+		dev_err(glue->dev, "can't add platform data\n");
+		goto out_dev_put;
+	}
+
+	ret = platform_device_add(glue->core);
+	if (ret) {
+		dev_err(glue->dev, "can't add platform device\n");
+		goto out_dev_put;
+	}
 	return 0;
 
- out_irq:
-	free_irq(wl->irq, wl);
+out_dev_put:
+	platform_device_put(glue->core);
 
- out_free:
-	wl1271_free_hw(wl);
+out_free_glue:
+	kfree(glue);
 
+out:
 	return ret;
 }
 
 static void __devexit wl1271_remove(struct sdio_func *func)
 {
-	struct wl1271 *wl = sdio_get_drvdata(func);
+	struct wl12xx_sdio_glue *glue = sdio_get_drvdata(func);
 
 	/* Undo decrement done above in wl1271_probe */
 	pm_runtime_get_noresume(&func->dev);
 
-	wl1271_unregister_hw(wl);
-	if (wl->irq_wake_enabled) {
-		device_init_wakeup(wl1271_sdio_wl_to_dev(wl), 0);
-		disable_irq_wake(wl->irq);
-	}
-	free_irq(wl->irq, wl);
-	wl1271_free_hw(wl);
+	platform_device_del(glue->core);
+	platform_device_put(glue->core);
+	kfree(glue);
 }
 
 #ifdef CONFIG_PM
@@ -332,20 +289,21 @@
 	/* Tell MMC/SDIO core it's OK to power down the card
 	 * (if it isn't already), but not to remove it completely */
 	struct sdio_func *func = dev_to_sdio_func(dev);
-	struct wl1271 *wl = sdio_get_drvdata(func);
+	struct wl12xx_sdio_glue *glue = sdio_get_drvdata(func);
+	struct wl1271 *wl = platform_get_drvdata(glue->core);
 	mmc_pm_flag_t sdio_flags;
 	int ret = 0;
 
-	wl1271_debug(DEBUG_MAC80211, "wl1271 suspend. wow_enabled: %d",
-		     wl->wow_enabled);
+	dev_dbg(dev, "wl1271 suspend. wow_enabled: %d\n",
+		wl->wow_enabled);
 
 	/* check whether sdio should keep power */
 	if (wl->wow_enabled) {
 		sdio_flags = sdio_get_host_pm_caps(func);
 
 		if (!(sdio_flags & MMC_PM_KEEP_POWER)) {
-			wl1271_error("can't keep power while host "
-				     "is suspended");
+			dev_err(dev, "can't keep power while host "
+				     "is suspended\n");
 			ret = -EINVAL;
 			goto out;
 		}
@@ -353,7 +311,7 @@
 		/* keep power while host suspended */
 		ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
 		if (ret) {
-			wl1271_error("error while trying to keep power");
+			dev_err(dev, "error while trying to keep power\n");
 			goto out;
 		}
 
@@ -367,9 +325,10 @@
 static int wl1271_resume(struct device *dev)
 {
 	struct sdio_func *func = dev_to_sdio_func(dev);
-	struct wl1271 *wl = sdio_get_drvdata(func);
+	struct wl12xx_sdio_glue *glue = sdio_get_drvdata(func);
+	struct wl1271 *wl = platform_get_drvdata(glue->core);
 
-	wl1271_debug(DEBUG_MAC80211, "wl1271 resume");
+	dev_dbg(dev, "wl1271 resume\n");
 	if (wl->wow_enabled) {
 		/* claim back host */
 		sdio_claim_host(func);
diff --git a/drivers/net/wireless/wl12xx/sdio_test.c b/drivers/net/wireless/wl12xx/sdio_test.c
deleted file mode 100644
index f25d5d9..0000000
--- a/drivers/net/wireless/wl12xx/sdio_test.c
+++ /dev/null
@@ -1,543 +0,0 @@
-/*
- * SDIO testing driver for wl12xx
- *
- * Copyright (C) 2010 Nokia Corporation
- *
- * Contact: Roger Quadros <roger.quadros@nokia.com>
- *
- * wl12xx read/write routines taken from the main module
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- */
-
-#include <linux/irq.h>
-#include <linux/module.h>
-#include <linux/crc7.h>
-#include <linux/vmalloc.h>
-#include <linux/mmc/sdio_func.h>
-#include <linux/mmc/sdio_ids.h>
-#include <linux/mmc/card.h>
-#include <linux/mmc/host.h>
-#include <linux/gpio.h>
-#include <linux/wl12xx.h>
-#include <linux/kthread.h>
-#include <linux/firmware.h>
-#include <linux/pm_runtime.h>
-
-#include "wl12xx.h"
-#include "io.h"
-#include "boot.h"
-
-#ifndef SDIO_VENDOR_ID_TI
-#define SDIO_VENDOR_ID_TI		0x0097
-#endif
-
-#ifndef SDIO_DEVICE_ID_TI_WL1271
-#define SDIO_DEVICE_ID_TI_WL1271	0x4076
-#endif
-
-static bool rx, tx;
-
-module_param(rx, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(rx, "Perform rx test. Default (0). "
-	"This test continuously reads data from the SDIO device.\n");
-
-module_param(tx, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(tx, "Perform tx test. Default (0). "
-	"This test continuously writes data to the SDIO device.\n");
-
-struct wl1271_test {
-	struct wl1271 wl;
-	struct task_struct *test_task;
-};
-
-static const struct sdio_device_id wl1271_devices[] = {
-	{ SDIO_DEVICE(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271) },
-	{}
-};
-
-static inline struct sdio_func *wl_to_func(struct wl1271 *wl)
-{
-	return wl->if_priv;
-}
-
-static struct device *wl1271_sdio_wl_to_dev(struct wl1271 *wl)
-{
-	return &(wl_to_func(wl)->dev);
-}
-
-static void wl1271_sdio_raw_read(struct wl1271 *wl, int addr, void *buf,
-		size_t len, bool fixed)
-{
-	int ret = 0;
-	struct sdio_func *func = wl_to_func(wl);
-
-	if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
-		((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret);
-		wl1271_debug(DEBUG_SDIO, "sdio read 52 addr 0x%x, byte 0x%02x",
-				addr, ((u8 *)buf)[0]);
-	} else {
-		if (fixed)
-			ret = sdio_readsb(func, buf, addr, len);
-		else
-			ret = sdio_memcpy_fromio(func, buf, addr, len);
-
-		wl1271_debug(DEBUG_SDIO, "sdio read 53 addr 0x%x, %zu bytes",
-				addr, len);
-		wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len);
-	}
-
-	if (ret)
-		wl1271_error("sdio read failed (%d)", ret);
-}
-
-static void wl1271_sdio_raw_write(struct wl1271 *wl, int addr, void *buf,
-		size_t len, bool fixed)
-{
-	int ret = 0;
-	struct sdio_func *func = wl_to_func(wl);
-
-	if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
-		sdio_f0_writeb(func, ((u8 *)buf)[0], addr, &ret);
-		wl1271_debug(DEBUG_SDIO, "sdio write 52 addr 0x%x, byte 0x%02x",
-				addr, ((u8 *)buf)[0]);
-	} else {
-		wl1271_debug(DEBUG_SDIO, "sdio write 53 addr 0x%x, %zu bytes",
-				addr, len);
-		wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len);
-
-		if (fixed)
-			ret = sdio_writesb(func, addr, buf, len);
-		else
-			ret = sdio_memcpy_toio(func, addr, buf, len);
-	}
-	if (ret)
-		wl1271_error("sdio write failed (%d)", ret);
-
-}
-
-static int wl1271_sdio_set_power(struct wl1271 *wl, bool enable)
-{
-	struct sdio_func *func = wl_to_func(wl);
-	int ret;
-
-	/* Let the SDIO stack handle wlan_enable control, so we
-	 * keep host claimed while wlan is in use to keep wl1271
-	 * alive.
-	 */
-	if (enable) {
-		/* Power up the card */
-		ret = pm_runtime_get_sync(&func->dev);
-		if (ret < 0)
-			goto out;
-
-		/* Runtime PM might be disabled, power up the card manually */
-		ret = mmc_power_restore_host(func->card->host);
-		if (ret < 0)
-			goto out;
-
-		sdio_claim_host(func);
-		sdio_enable_func(func);
-	} else {
-		sdio_disable_func(func);
-		sdio_release_host(func);
-
-		/* Runtime PM might be disabled, power off the card manually */
-		ret = mmc_power_save_host(func->card->host);
-		if (ret < 0)
-			goto out;
-
-		/* Power down the card */
-		ret = pm_runtime_put_sync(&func->dev);
-	}
-
-out:
-	return ret;
-}
-
-static void wl1271_sdio_disable_interrupts(struct wl1271 *wl)
-{
-}
-
-static void wl1271_sdio_enable_interrupts(struct wl1271 *wl)
-{
-}
-
-
-static struct wl1271_if_operations sdio_ops = {
-	.read		= wl1271_sdio_raw_read,
-	.write		= wl1271_sdio_raw_write,
-	.power		= wl1271_sdio_set_power,
-	.dev		= wl1271_sdio_wl_to_dev,
-	.enable_irq	= wl1271_sdio_enable_interrupts,
-	.disable_irq	= wl1271_sdio_disable_interrupts,
-};
-
-static void wl1271_fw_wakeup(struct wl1271 *wl)
-{
-	u32 elp_reg;
-
-	elp_reg = ELPCTRL_WAKE_UP;
-	wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, elp_reg);
-}
-
-static int wl1271_fetch_firmware(struct wl1271 *wl)
-{
-	const struct firmware *fw;
-	int ret;
-
-	if (wl->chip.id == CHIP_ID_1283_PG20)
-		ret = request_firmware(&fw, WL128X_FW_NAME,
-				       wl1271_wl_to_dev(wl));
-	else
-		ret = request_firmware(&fw, WL127X_FW_NAME,
-				       wl1271_wl_to_dev(wl));
-
-	if (ret < 0) {
-		wl1271_error("could not get firmware: %d", ret);
-		return ret;
-	}
-
-	if (fw->size % 4) {
-		wl1271_error("firmware size is not multiple of 32 bits: %zu",
-				fw->size);
-		ret = -EILSEQ;
-		goto out;
-	}
-
-	wl->fw_len = fw->size;
-	wl->fw = vmalloc(wl->fw_len);
-
-	if (!wl->fw) {
-		wl1271_error("could not allocate memory for the firmware");
-		ret = -ENOMEM;
-		goto out;
-	}
-
-	memcpy(wl->fw, fw->data, wl->fw_len);
-
-	ret = 0;
-
-out:
-	release_firmware(fw);
-
-	return ret;
-}
-
-static int wl1271_fetch_nvs(struct wl1271 *wl)
-{
-	const struct firmware *fw;
-	int ret;
-
-	ret = request_firmware(&fw, WL12XX_NVS_NAME, wl1271_wl_to_dev(wl));
-
-	if (ret < 0) {
-		wl1271_error("could not get nvs file: %d", ret);
-		return ret;
-	}
-
-	wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
-
-	if (!wl->nvs) {
-		wl1271_error("could not allocate memory for the nvs file");
-		ret = -ENOMEM;
-		goto out;
-	}
-
-	wl->nvs_len = fw->size;
-
-out:
-	release_firmware(fw);
-
-	return ret;
-}
-
-static int wl1271_chip_wakeup(struct wl1271 *wl)
-{
-	struct wl1271_partition_set partition;
-	int ret;
-
-	msleep(WL1271_PRE_POWER_ON_SLEEP);
-	ret = wl1271_power_on(wl);
-	if (ret)
-		return ret;
-
-	msleep(WL1271_POWER_ON_SLEEP);
-
-	/* We don't need a real memory partition here, because we only want
-	 * to use the registers at this point. */
-	memset(&partition, 0, sizeof(partition));
-	partition.reg.start = REGISTERS_BASE;
-	partition.reg.size = REGISTERS_DOWN_SIZE;
-	wl1271_set_partition(wl, &partition);
-
-	/* ELP module wake up */
-	wl1271_fw_wakeup(wl);
-
-	/* whal_FwCtrl_BootSm() */
-
-	/* 0. read chip id from CHIP_ID */
-	wl->chip.id = wl1271_read32(wl, CHIP_ID_B);
-
-	/* 1. check if chip id is valid */
-
-	switch (wl->chip.id) {
-	case CHIP_ID_1271_PG10:
-		wl1271_warning("chip id 0x%x (1271 PG10) support is obsolete",
-				wl->chip.id);
-		break;
-	case CHIP_ID_1271_PG20:
-		wl1271_notice("chip id 0x%x (1271 PG20)",
-				wl->chip.id);
-		break;
-	case CHIP_ID_1283_PG20:
-		wl1271_notice("chip id 0x%x (1283 PG20)",
-				wl->chip.id);
-		break;
-	case CHIP_ID_1283_PG10:
-	default:
-		wl1271_warning("unsupported chip id: 0x%x", wl->chip.id);
-		return -ENODEV;
-	}
-
-	return ret;
-}
-
-static struct wl1271_partition_set part_down = {
-	.mem = {
-		.start = 0x00000000,
-		.size  = 0x000177c0
-	},
-	.reg = {
-		.start = REGISTERS_BASE,
-		.size  = 0x00008800
-	},
-	.mem2 = {
-		.start = 0x00000000,
-		.size  = 0x00000000
-	},
-	.mem3 = {
-		.start = 0x00000000,
-		.size  = 0x00000000
-	},
-};
-
-static int tester(void *data)
-{
-	struct wl1271 *wl = data;
-	struct sdio_func *func = wl_to_func(wl);
-	struct device *pdev = &func->dev;
-	int ret = 0;
-	bool rx_started = 0;
-	bool tx_started = 0;
-	uint8_t *tx_buf, *rx_buf;
-	int test_size = PAGE_SIZE;
-	u32 addr = 0;
-	struct wl1271_partition_set partition;
-
-	/* We assume chip is powered up and firmware fetched */
-
-	memcpy(&partition, &part_down, sizeof(partition));
-	partition.mem.start = addr;
-	wl1271_set_partition(wl, &partition);
-
-	tx_buf = kmalloc(test_size, GFP_KERNEL);
-	rx_buf = kmalloc(test_size, GFP_KERNEL);
-	if (!tx_buf || !rx_buf) {
-		dev_err(pdev,
-			"Could not allocate memory. Test will not run.\n");
-		ret = -ENOMEM;
-		goto free;
-	}
-
-	memset(tx_buf, 0x5a, test_size);
-
-	/* write something in data area so we can read it back */
-	wl1271_write(wl, addr, tx_buf, test_size, false);
-
-	while (!kthread_should_stop()) {
-		if (rx && !rx_started) {
-			dev_info(pdev, "starting rx test\n");
-			rx_started = 1;
-		} else if (!rx && rx_started) {
-			dev_info(pdev, "stopping rx test\n");
-			rx_started = 0;
-		}
-
-		if (tx && !tx_started) {
-			dev_info(pdev, "starting tx test\n");
-			tx_started = 1;
-		} else if (!tx && tx_started) {
-			dev_info(pdev, "stopping tx test\n");
-			tx_started = 0;
-		}
-
-		if (rx_started)
-			wl1271_read(wl, addr, rx_buf, test_size, false);
-
-		if (tx_started)
-			wl1271_write(wl, addr, tx_buf, test_size, false);
-
-		if (!rx_started && !tx_started)
-			msleep(100);
-	}
-
-free:
-	kfree(tx_buf);
-	kfree(rx_buf);
-	return ret;
-}
-
-static int __devinit wl1271_probe(struct sdio_func *func,
-		const struct sdio_device_id *id)
-{
-	const struct wl12xx_platform_data *wlan_data;
-	struct wl1271 *wl;
-	struct wl1271_test *wl_test;
-	int ret = 0;
-
-	/* wl1271 has 2 sdio functions we handle just the wlan part */
-	if (func->num != 0x02)
-		return -ENODEV;
-
-	wl_test = kzalloc(sizeof(struct wl1271_test), GFP_KERNEL);
-	if (!wl_test) {
-		dev_err(&func->dev, "Could not allocate memory\n");
-		return -ENOMEM;
-	}
-
-	wl = &wl_test->wl;
-
-	wl->if_priv = func;
-	wl->if_ops = &sdio_ops;
-
-	/* Grab access to FN0 for ELP reg. */
-	func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
-
-	/* Use block mode for transferring over one block size of data */
-	func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE;
-
-	wlan_data = wl12xx_get_platform_data();
-	if (IS_ERR(wlan_data)) {
-		ret = PTR_ERR(wlan_data);
-		dev_err(&func->dev, "missing wlan platform data: %d\n", ret);
-		goto out_free;
-	}
-
-	wl->irq = wlan_data->irq;
-	wl->ref_clock = wlan_data->board_ref_clock;
-	wl->tcxo_clock = wlan_data->board_tcxo_clock;
-
-	sdio_set_drvdata(func, wl_test);
-
-	/* power up the device */
-	ret = wl1271_chip_wakeup(wl);
-	if (ret) {
-		dev_err(&func->dev, "could not wake up chip\n");
-		goto out_free;
-	}
-
-	if (wl->fw == NULL) {
-		ret = wl1271_fetch_firmware(wl);
-		if (ret < 0) {
-			dev_err(&func->dev, "firmware fetch error\n");
-			goto out_off;
-		}
-	}
-
-	/* fetch NVS */
-	if (wl->nvs == NULL) {
-		ret = wl1271_fetch_nvs(wl);
-		if (ret < 0) {
-			dev_err(&func->dev, "NVS fetch error\n");
-			goto out_off;
-		}
-	}
-
-	ret = wl1271_load_firmware(wl);
-	if (ret < 0) {
-		dev_err(&func->dev, "firmware load error: %d\n", ret);
-		goto out_free;
-	}
-
-	dev_info(&func->dev, "initialized\n");
-
-	/* I/O testing will be done in the tester thread */
-
-	wl_test->test_task = kthread_run(tester, wl, "sdio_tester");
-	if (IS_ERR(wl_test->test_task)) {
-		dev_err(&func->dev, "unable to create kernel thread\n");
-		ret = PTR_ERR(wl_test->test_task);
-		goto out_free;
-	}
-
-	return 0;
-
-out_off:
-	/* power off the chip */
-	wl1271_power_off(wl);
-
-out_free:
-	kfree(wl_test);
-	return ret;
-}
-
-static void __devexit wl1271_remove(struct sdio_func *func)
-{
-	struct wl1271_test *wl_test = sdio_get_drvdata(func);
-
-	/* stop the I/O test thread */
-	kthread_stop(wl_test->test_task);
-
-	/* power off the chip */
-	wl1271_power_off(&wl_test->wl);
-
-	vfree(wl_test->wl.fw);
-	wl_test->wl.fw = NULL;
-	kfree(wl_test->wl.nvs);
-	wl_test->wl.nvs = NULL;
-
-	kfree(wl_test);
-}
-
-static struct sdio_driver wl1271_sdio_driver = {
-	.name		= "wl12xx_sdio_test",
-	.id_table	= wl1271_devices,
-	.probe		= wl1271_probe,
-	.remove		= __devexit_p(wl1271_remove),
-};
-
-static int __init wl1271_init(void)
-{
-	int ret;
-
-	ret = sdio_register_driver(&wl1271_sdio_driver);
-	if (ret < 0)
-		pr_err("failed to register sdio driver: %d\n", ret);
-
-	return ret;
-}
-module_init(wl1271_init);
-
-static void __exit wl1271_exit(void)
-{
-	sdio_unregister_driver(&wl1271_sdio_driver);
-}
-module_exit(wl1271_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Roger Quadros <roger.quadros@nokia.com>");
-
diff --git a/drivers/net/wireless/wl12xx/spi.c b/drivers/net/wireless/wl12xx/spi.c
index 0f97186..92caa7c 100644
--- a/drivers/net/wireless/wl12xx/spi.c
+++ b/drivers/net/wireless/wl12xx/spi.c
@@ -27,6 +27,7 @@
 #include <linux/crc7.h>
 #include <linux/spi/spi.h>
 #include <linux/wl12xx.h>
+#include <linux/platform_device.h>
 #include <linux/slab.h>
 
 #include "wl12xx.h"
@@ -69,35 +70,22 @@
 
 #define WSPI_MAX_NUM_OF_CHUNKS (WL1271_AGGR_BUFFER_SIZE / WSPI_MAX_CHUNK_SIZE)
 
-static inline struct spi_device *wl_to_spi(struct wl1271 *wl)
-{
-	return wl->if_priv;
-}
+struct wl12xx_spi_glue {
+	struct device *dev;
+	struct platform_device *core;
+};
 
-static struct device *wl1271_spi_wl_to_dev(struct wl1271 *wl)
+static void wl12xx_spi_reset(struct device *child)
 {
-	return &(wl_to_spi(wl)->dev);
-}
-
-static void wl1271_spi_disable_interrupts(struct wl1271 *wl)
-{
-	disable_irq(wl->irq);
-}
-
-static void wl1271_spi_enable_interrupts(struct wl1271 *wl)
-{
-	enable_irq(wl->irq);
-}
-
-static void wl1271_spi_reset(struct wl1271 *wl)
-{
+	struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
 	u8 *cmd;
 	struct spi_transfer t;
 	struct spi_message m;
 
 	cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL);
 	if (!cmd) {
-		wl1271_error("could not allocate cmd for spi reset");
+		dev_err(child->parent,
+			"could not allocate cmd for spi reset\n");
 		return;
 	}
 
@@ -110,21 +98,22 @@
 	t.len = WSPI_INIT_CMD_LEN;
 	spi_message_add_tail(&t, &m);
 
-	spi_sync(wl_to_spi(wl), &m);
+	spi_sync(to_spi_device(glue->dev), &m);
 
-	wl1271_dump(DEBUG_SPI, "spi reset -> ", cmd, WSPI_INIT_CMD_LEN);
 	kfree(cmd);
 }
 
-static void wl1271_spi_init(struct wl1271 *wl)
+static void wl12xx_spi_init(struct device *child)
 {
+	struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
 	u8 crc[WSPI_INIT_CMD_CRC_LEN], *cmd;
 	struct spi_transfer t;
 	struct spi_message m;
 
 	cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL);
 	if (!cmd) {
-		wl1271_error("could not allocate cmd for spi init");
+		dev_err(child->parent,
+			"could not allocate cmd for spi init\n");
 		return;
 	}
 
@@ -165,15 +154,16 @@
 	t.len = WSPI_INIT_CMD_LEN;
 	spi_message_add_tail(&t, &m);
 
-	spi_sync(wl_to_spi(wl), &m);
-	wl1271_dump(DEBUG_SPI, "spi init -> ", cmd, WSPI_INIT_CMD_LEN);
+	spi_sync(to_spi_device(glue->dev), &m);
 	kfree(cmd);
 }
 
 #define WL1271_BUSY_WORD_TIMEOUT 1000
 
-static int wl1271_spi_read_busy(struct wl1271 *wl)
+static int wl12xx_spi_read_busy(struct device *child)
 {
+	struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
+	struct wl1271 *wl = dev_get_drvdata(child);
 	struct spi_transfer t[1];
 	struct spi_message m;
 	u32 *busy_buf;
@@ -194,20 +184,22 @@
 		t[0].len = sizeof(u32);
 		t[0].cs_change = true;
 		spi_message_add_tail(&t[0], &m);
-		spi_sync(wl_to_spi(wl), &m);
+		spi_sync(to_spi_device(glue->dev), &m);
 
 		if (*busy_buf & 0x1)
 			return 0;
 	}
 
 	/* The SPI bus is unresponsive, the read failed. */
-	wl1271_error("SPI read busy-word timeout!\n");
+	dev_err(child->parent, "SPI read busy-word timeout!\n");
 	return -ETIMEDOUT;
 }
 
-static void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf,
+static void wl12xx_spi_raw_read(struct device *child, int addr, void *buf,
 				size_t len, bool fixed)
 {
+	struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
+	struct wl1271 *wl = dev_get_drvdata(child);
 	struct spi_transfer t[2];
 	struct spi_message m;
 	u32 *busy_buf;
@@ -243,10 +235,10 @@
 		t[1].cs_change = true;
 		spi_message_add_tail(&t[1], &m);
 
-		spi_sync(wl_to_spi(wl), &m);
+		spi_sync(to_spi_device(glue->dev), &m);
 
 		if (!(busy_buf[WL1271_BUSY_WORD_CNT - 1] & 0x1) &&
-		    wl1271_spi_read_busy(wl)) {
+		    wl12xx_spi_read_busy(child)) {
 			memset(buf, 0, chunk_len);
 			return;
 		}
@@ -259,10 +251,7 @@
 		t[0].cs_change = true;
 		spi_message_add_tail(&t[0], &m);
 
-		spi_sync(wl_to_spi(wl), &m);
-
-		wl1271_dump(DEBUG_SPI, "spi_read cmd -> ", cmd, sizeof(*cmd));
-		wl1271_dump(DEBUG_SPI, "spi_read buf <- ", buf, chunk_len);
+		spi_sync(to_spi_device(glue->dev), &m);
 
 		if (!fixed)
 			addr += chunk_len;
@@ -271,9 +260,10 @@
 	}
 }
 
-static void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
-			  size_t len, bool fixed)
+static void wl12xx_spi_raw_write(struct device *child, int addr, void *buf,
+				 size_t len, bool fixed)
 {
+	struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
 	struct spi_transfer t[2 * WSPI_MAX_NUM_OF_CHUNKS];
 	struct spi_message m;
 	u32 commands[WSPI_MAX_NUM_OF_CHUNKS];
@@ -308,9 +298,6 @@
 		t[i].len = chunk_len;
 		spi_message_add_tail(&t[i++], &m);
 
-		wl1271_dump(DEBUG_SPI, "spi_write cmd -> ", cmd, sizeof(*cmd));
-		wl1271_dump(DEBUG_SPI, "spi_write buf -> ", buf, chunk_len);
-
 		if (!fixed)
 			addr += chunk_len;
 		buf += chunk_len;
@@ -318,72 +305,41 @@
 		cmd++;
 	}
 
-	spi_sync(wl_to_spi(wl), &m);
-}
-
-static irqreturn_t wl1271_hardirq(int irq, void *cookie)
-{
-	struct wl1271 *wl = cookie;
-	unsigned long flags;
-
-	wl1271_debug(DEBUG_IRQ, "IRQ");
-
-	/* complete the ELP completion */
-	spin_lock_irqsave(&wl->wl_lock, flags);
-	set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
-	if (wl->elp_compl) {
-		complete(wl->elp_compl);
-		wl->elp_compl = NULL;
-	}
-	spin_unlock_irqrestore(&wl->wl_lock, flags);
-
-	return IRQ_WAKE_THREAD;
-}
-
-static int wl1271_spi_set_power(struct wl1271 *wl, bool enable)
-{
-	if (wl->set_power)
-		wl->set_power(enable);
-
-	return 0;
+	spi_sync(to_spi_device(glue->dev), &m);
 }
 
 static struct wl1271_if_operations spi_ops = {
-	.read		= wl1271_spi_raw_read,
-	.write		= wl1271_spi_raw_write,
-	.reset		= wl1271_spi_reset,
-	.init		= wl1271_spi_init,
-	.power		= wl1271_spi_set_power,
-	.dev		= wl1271_spi_wl_to_dev,
-	.enable_irq	= wl1271_spi_enable_interrupts,
-	.disable_irq	= wl1271_spi_disable_interrupts,
+	.read		= wl12xx_spi_raw_read,
+	.write		= wl12xx_spi_raw_write,
+	.reset		= wl12xx_spi_reset,
+	.init		= wl12xx_spi_init,
 	.set_block_size = NULL,
 };
 
 static int __devinit wl1271_probe(struct spi_device *spi)
 {
+	struct wl12xx_spi_glue *glue;
 	struct wl12xx_platform_data *pdata;
-	struct ieee80211_hw *hw;
-	struct wl1271 *wl;
-	unsigned long irqflags;
-	int ret;
+	struct resource res[1];
+	int ret = -ENOMEM;
 
 	pdata = spi->dev.platform_data;
 	if (!pdata) {
-		wl1271_error("no platform data");
+		dev_err(&spi->dev, "no platform data\n");
 		return -ENODEV;
 	}
 
-	hw = wl1271_alloc_hw();
-	if (IS_ERR(hw))
-		return PTR_ERR(hw);
+	pdata->ops = &spi_ops;
 
-	wl = hw->priv;
+	glue = kzalloc(sizeof(*glue), GFP_KERNEL);
+	if (!glue) {
+		dev_err(&spi->dev, "can't allocate glue\n");
+		goto out;
+	}
 
-	dev_set_drvdata(&spi->dev, wl);
-	wl->if_priv = spi;
+	glue->dev = &spi->dev;
 
-	wl->if_ops = &spi_ops;
+	spi_set_drvdata(spi, glue);
 
 	/* This is the only SPI value that we need to set here, the rest
 	 * comes from the board-peripherals file */
@@ -391,69 +347,61 @@
 
 	ret = spi_setup(spi);
 	if (ret < 0) {
-		wl1271_error("spi_setup failed");
-		goto out_free;
+		dev_err(glue->dev, "spi_setup failed\n");
+		goto out_free_glue;
 	}
 
-	wl->set_power = pdata->set_power;
-	if (!wl->set_power) {
-		wl1271_error("set power function missing in platform data");
-		ret = -ENODEV;
-		goto out_free;
+	glue->core = platform_device_alloc("wl12xx", -1);
+	if (!glue->core) {
+		dev_err(glue->dev, "can't allocate platform_device\n");
+		ret = -ENOMEM;
+		goto out_free_glue;
 	}
 
-	wl->ref_clock = pdata->board_ref_clock;
-	wl->tcxo_clock = pdata->board_tcxo_clock;
-	wl->platform_quirks = pdata->platform_quirks;
+	glue->core->dev.parent = &spi->dev;
 
-	if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
-		irqflags = IRQF_TRIGGER_RISING;
-	else
-		irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
+	memset(res, 0x00, sizeof(res));
 
-	wl->irq = spi->irq;
-	if (wl->irq < 0) {
-		wl1271_error("irq missing in platform data");
-		ret = -ENODEV;
-		goto out_free;
+	res[0].start = spi->irq;
+	res[0].flags = IORESOURCE_IRQ;
+	res[0].name = "irq";
+
+	ret = platform_device_add_resources(glue->core, res, ARRAY_SIZE(res));
+	if (ret) {
+		dev_err(glue->dev, "can't add resources\n");
+		goto out_dev_put;
 	}
 
-	ret = request_threaded_irq(wl->irq, wl1271_hardirq, wl1271_irq,
-				   irqflags,
-				   DRIVER_NAME, wl);
-	if (ret < 0) {
-		wl1271_error("request_irq() failed: %d", ret);
-		goto out_free;
+	ret = platform_device_add_data(glue->core, pdata, sizeof(*pdata));
+	if (ret) {
+		dev_err(glue->dev, "can't add platform data\n");
+		goto out_dev_put;
 	}
 
-	disable_irq(wl->irq);
-
-	ret = wl1271_init_ieee80211(wl);
-	if (ret)
-		goto out_irq;
-
-	ret = wl1271_register_hw(wl);
-	if (ret)
-		goto out_irq;
+	ret = platform_device_add(glue->core);
+	if (ret) {
+		dev_err(glue->dev, "can't register platform device\n");
+		goto out_dev_put;
+	}
 
 	return 0;
 
- out_irq:
-	free_irq(wl->irq, wl);
+out_dev_put:
+	platform_device_put(glue->core);
 
- out_free:
-	wl1271_free_hw(wl);
-
+out_free_glue:
+	kfree(glue);
+out:
 	return ret;
 }
 
 static int __devexit wl1271_remove(struct spi_device *spi)
 {
-	struct wl1271 *wl = dev_get_drvdata(&spi->dev);
+	struct wl12xx_spi_glue *glue = spi_get_drvdata(spi);
 
-	wl1271_unregister_hw(wl);
-	free_irq(wl->irq, wl);
-	wl1271_free_hw(wl);
+	platform_device_del(glue->core);
+	platform_device_put(glue->core);
+	kfree(glue);
 
 	return 0;
 }
@@ -462,7 +410,6 @@
 static struct spi_driver wl1271_spi_driver = {
 	.driver = {
 		.name		= "wl1271_spi",
-		.bus		= &spi_bus_type,
 		.owner		= THIS_MODULE,
 	},
 
diff --git a/drivers/net/wireless/wl12xx/testmode.c b/drivers/net/wireless/wl12xx/testmode.c
index 4ae8eff..25093c0 100644
--- a/drivers/net/wireless/wl12xx/testmode.c
+++ b/drivers/net/wireless/wl12xx/testmode.c
@@ -26,8 +26,10 @@
 #include <net/genetlink.h>
 
 #include "wl12xx.h"
+#include "debug.h"
 #include "acx.h"
 #include "reg.h"
+#include "ps.h"
 
 #define WL1271_TM_MAX_DATA_LENGTH 1024
 
@@ -36,6 +38,7 @@
 	WL1271_TM_CMD_TEST,
 	WL1271_TM_CMD_INTERROGATE,
 	WL1271_TM_CMD_CONFIGURE,
+	WL1271_TM_CMD_NVS_PUSH,		/* Not in use. Keep to not break ABI */
 	WL1271_TM_CMD_SET_PLT_MODE,
 	WL1271_TM_CMD_RECOVER,
 
@@ -87,31 +90,47 @@
 		return -EMSGSIZE;
 
 	mutex_lock(&wl->mutex);
-	ret = wl1271_cmd_test(wl, buf, buf_len, answer);
-	mutex_unlock(&wl->mutex);
 
+	if (wl->state == WL1271_STATE_OFF) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	ret = wl1271_ps_elp_wakeup(wl);
+	if (ret < 0)
+		goto out;
+
+	ret = wl1271_cmd_test(wl, buf, buf_len, answer);
 	if (ret < 0) {
 		wl1271_warning("testmode cmd test failed: %d", ret);
-		return ret;
+		goto out_sleep;
 	}
 
 	if (answer) {
 		len = nla_total_size(buf_len);
 		skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, len);
-		if (!skb)
-			return -ENOMEM;
+		if (!skb) {
+			ret = -ENOMEM;
+			goto out_sleep;
+		}
 
 		NLA_PUT(skb, WL1271_TM_ATTR_DATA, buf_len, buf);
 		ret = cfg80211_testmode_reply(skb);
 		if (ret < 0)
-			return ret;
+			goto out_sleep;
 	}
 
-	return 0;
+out_sleep:
+	wl1271_ps_elp_sleep(wl);
+out:
+	mutex_unlock(&wl->mutex);
+
+	return ret;
 
 nla_put_failure:
 	kfree_skb(skb);
-	return -EMSGSIZE;
+	ret = -EMSGSIZE;
+	goto out_sleep;
 }
 
 static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[])
@@ -128,33 +147,53 @@
 
 	ie_id = nla_get_u8(tb[WL1271_TM_ATTR_IE_ID]);
 
-	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
-	if (!cmd)
-		return -ENOMEM;
-
 	mutex_lock(&wl->mutex);
-	ret = wl1271_cmd_interrogate(wl, ie_id, cmd, sizeof(*cmd));
-	mutex_unlock(&wl->mutex);
 
+	if (wl->state == WL1271_STATE_OFF) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	ret = wl1271_ps_elp_wakeup(wl);
+	if (ret < 0)
+		goto out;
+
+	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+	if (!cmd) {
+		ret = -ENOMEM;
+		goto out_sleep;
+	}
+
+	ret = wl1271_cmd_interrogate(wl, ie_id, cmd, sizeof(*cmd));
 	if (ret < 0) {
 		wl1271_warning("testmode cmd interrogate failed: %d", ret);
-		kfree(cmd);
-		return ret;
+		goto out_free;
 	}
 
 	skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, sizeof(*cmd));
 	if (!skb) {
-		kfree(cmd);
-		return -ENOMEM;
+		ret = -ENOMEM;
+		goto out_free;
 	}
 
 	NLA_PUT(skb, WL1271_TM_ATTR_DATA, sizeof(*cmd), cmd);
+	ret = cfg80211_testmode_reply(skb);
+	if (ret < 0)
+		goto out_free;
 
-	return 0;
+out_free:
+	kfree(cmd);
+out_sleep:
+	wl1271_ps_elp_sleep(wl);
+out:
+	mutex_unlock(&wl->mutex);
+
+	return ret;
 
 nla_put_failure:
 	kfree_skb(skb);
-	return -EMSGSIZE;
+	ret = -EMSGSIZE;
+	goto out_free;
 }
 
 static int wl1271_tm_cmd_configure(struct wl1271 *wl, struct nlattr *tb[])
diff --git a/drivers/net/wireless/wl12xx/tx.c b/drivers/net/wireless/wl12xx/tx.c
index bad9e29..4508ccd 100644
--- a/drivers/net/wireless/wl12xx/tx.c
+++ b/drivers/net/wireless/wl12xx/tx.c
@@ -26,22 +26,24 @@
 #include <linux/etherdevice.h>
 
 #include "wl12xx.h"
+#include "debug.h"
 #include "io.h"
 #include "reg.h"
 #include "ps.h"
 #include "tx.h"
 #include "event.h"
 
-static int wl1271_set_default_wep_key(struct wl1271 *wl, u8 id)
+static int wl1271_set_default_wep_key(struct wl1271 *wl,
+				      struct wl12xx_vif *wlvif, u8 id)
 {
 	int ret;
-	bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
 
 	if (is_ap)
 		ret = wl12xx_cmd_set_default_wep_key(wl, id,
-						     wl->ap_bcast_hlid);
+						     wlvif->ap.bcast_hlid);
 	else
-		ret = wl12xx_cmd_set_default_wep_key(wl, id, wl->sta_hlid);
+		ret = wl12xx_cmd_set_default_wep_key(wl, id, wlvif->sta.hlid);
 
 	if (ret < 0)
 		return ret;
@@ -76,7 +78,8 @@
 }
 
 static int wl1271_tx_update_filters(struct wl1271 *wl,
-						 struct sk_buff *skb)
+				    struct wl12xx_vif *wlvif,
+				    struct sk_buff *skb)
 {
 	struct ieee80211_hdr *hdr;
 	int ret;
@@ -92,15 +95,11 @@
 	if (!ieee80211_is_auth(hdr->frame_control))
 		return 0;
 
-	if (wl->dev_hlid != WL12XX_INVALID_LINK_ID)
+	if (wlvif->dev_hlid != WL12XX_INVALID_LINK_ID)
 		goto out;
 
 	wl1271_debug(DEBUG_CMD, "starting device role for roaming");
-	ret = wl12xx_cmd_role_start_dev(wl);
-	if (ret < 0)
-		goto out;
-
-	ret = wl12xx_roc(wl, wl->dev_role_id);
+	ret = wl12xx_start_dev(wl, wlvif);
 	if (ret < 0)
 		goto out;
 out:
@@ -123,18 +122,16 @@
 		wl1271_acx_set_inconnection_sta(wl, hdr->addr1);
 }
 
-static void wl1271_tx_regulate_link(struct wl1271 *wl, u8 hlid)
+static void wl1271_tx_regulate_link(struct wl1271 *wl,
+				    struct wl12xx_vif *wlvif,
+				    u8 hlid)
 {
 	bool fw_ps, single_sta;
 	u8 tx_pkts;
 
-	/* only regulate station links */
-	if (hlid < WL1271_AP_STA_HLID_START)
+	if (WARN_ON(!test_bit(hlid, wlvif->links_map)))
 		return;
 
-	if (WARN_ON(!wl1271_is_active_sta(wl, hlid)))
-	    return;
-
 	fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
 	tx_pkts = wl->links[hlid].allocated_pkts;
 	single_sta = (wl->active_sta_count == 1);
@@ -146,7 +143,7 @@
 	 * case FW-memory congestion is not a problem.
 	 */
 	if (!single_sta && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
-		wl1271_ps_link_start(wl, hlid, true);
+		wl12xx_ps_link_start(wl, wlvif, hlid, true);
 }
 
 bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb)
@@ -154,7 +151,8 @@
 	return wl->dummy_packet == skb;
 }
 
-u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct sk_buff *skb)
+u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+			 struct sk_buff *skb)
 {
 	struct ieee80211_tx_info *control = IEEE80211_SKB_CB(skb);
 
@@ -167,49 +165,51 @@
 	} else {
 		struct ieee80211_hdr *hdr;
 
-		if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags))
+		if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
 			return wl->system_hlid;
 
 		hdr = (struct ieee80211_hdr *)skb->data;
 		if (ieee80211_is_mgmt(hdr->frame_control))
-			return wl->ap_global_hlid;
+			return wlvif->ap.global_hlid;
 		else
-			return wl->ap_bcast_hlid;
+			return wlvif->ap.bcast_hlid;
 	}
 }
 
-static u8 wl1271_tx_get_hlid(struct wl1271 *wl, struct sk_buff *skb)
+u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+		      struct sk_buff *skb)
 {
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 
-	if (wl12xx_is_dummy_packet(wl, skb))
+	if (!wlvif || wl12xx_is_dummy_packet(wl, skb))
 		return wl->system_hlid;
 
-	if (wl->bss_type == BSS_TYPE_AP_BSS)
-		return wl12xx_tx_get_hlid_ap(wl, skb);
+	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
+		return wl12xx_tx_get_hlid_ap(wl, wlvif, skb);
 
-	wl1271_tx_update_filters(wl, skb);
+	wl1271_tx_update_filters(wl, wlvif, skb);
 
-	if ((test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags) ||
-	     test_bit(WL1271_FLAG_IBSS_JOINED, &wl->flags)) &&
+	if ((test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
+	     test_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags)) &&
 	    !ieee80211_is_auth(hdr->frame_control) &&
 	    !ieee80211_is_assoc_req(hdr->frame_control))
-		return wl->sta_hlid;
+		return wlvif->sta.hlid;
 	else
-		return wl->dev_hlid;
+		return wlvif->dev_hlid;
 }
 
 static unsigned int wl12xx_calc_packet_alignment(struct wl1271 *wl,
 						unsigned int packet_length)
 {
-	if (wl->quirks & WL12XX_QUIRK_BLOCKSIZE_ALIGNMENT)
-		return ALIGN(packet_length, WL12XX_BUS_BLOCK_SIZE);
-	else
+	if (wl->quirks & WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT)
 		return ALIGN(packet_length, WL1271_TX_ALIGN_TO);
+	else
+		return ALIGN(packet_length, WL12XX_BUS_BLOCK_SIZE);
 }
 
-static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
-				u32 buf_offset, u8 hlid)
+static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+			      struct sk_buff *skb, u32 extra, u32 buf_offset,
+			      u8 hlid)
 {
 	struct wl1271_tx_hw_descr *desc;
 	u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra;
@@ -217,6 +217,7 @@
 	u32 total_blocks;
 	int id, ret = -EBUSY, ac;
 	u32 spare_blocks = wl->tx_spare_blocks;
+	bool is_dummy = false;
 
 	if (buf_offset + total_len > WL1271_AGGR_BUFFER_SIZE)
 		return -EAGAIN;
@@ -231,8 +232,10 @@
 	len = wl12xx_calc_packet_alignment(wl, total_len);
 
 	/* in case of a dummy packet, use default amount of spare mem blocks */
-	if (unlikely(wl12xx_is_dummy_packet(wl, skb)))
+	if (unlikely(wl12xx_is_dummy_packet(wl, skb))) {
+		is_dummy = true;
 		spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT;
+	}
 
 	total_blocks = (len + TX_HW_BLOCK_SIZE - 1) / TX_HW_BLOCK_SIZE +
 		spare_blocks;
@@ -257,8 +260,9 @@
 		ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
 		wl->tx_allocated_pkts[ac]++;
 
-		if (wl->bss_type == BSS_TYPE_AP_BSS &&
-		    hlid >= WL1271_AP_STA_HLID_START)
+		if (!is_dummy && wlvif &&
+		    wlvif->bss_type == BSS_TYPE_AP_BSS &&
+		    test_bit(hlid, wlvif->ap.sta_hlid_map))
 			wl->links[hlid].allocated_pkts++;
 
 		ret = 0;
@@ -273,15 +277,16 @@
 	return ret;
 }
 
-static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
-			      u32 extra, struct ieee80211_tx_info *control,
-			      u8 hlid)
+static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+			       struct sk_buff *skb, u32 extra,
+			       struct ieee80211_tx_info *control, u8 hlid)
 {
 	struct timespec ts;
 	struct wl1271_tx_hw_descr *desc;
 	int aligned_len, ac, rate_idx;
 	s64 hosttime;
-	u16 tx_attr;
+	u16 tx_attr = 0;
+	bool is_dummy;
 
 	desc = (struct wl1271_tx_hw_descr *) skb->data;
 
@@ -298,7 +303,8 @@
 	hosttime = (timespec_to_ns(&ts) >> 10);
 	desc->start_time = cpu_to_le32(hosttime - wl->time_offset);
 
-	if (wl->bss_type != BSS_TYPE_AP_BSS)
+	is_dummy = wl12xx_is_dummy_packet(wl, skb);
+	if (is_dummy || !wlvif || wlvif->bss_type != BSS_TYPE_AP_BSS)
 		desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU);
 	else
 		desc->life_time = cpu_to_le16(TX_HW_AP_MODE_PKT_LIFETIME_TU);
@@ -307,39 +313,42 @@
 	ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
 	desc->tid = skb->priority;
 
-	if (wl12xx_is_dummy_packet(wl, skb)) {
+	if (is_dummy) {
 		/*
 		 * FW expects the dummy packet to have an invalid session id -
 		 * any session id that is different than the one set in the join
 		 */
-		tx_attr = ((~wl->session_counter) <<
+		tx_attr = (SESSION_COUNTER_INVALID <<
 			   TX_HW_ATTR_OFST_SESSION_COUNTER) &
 			   TX_HW_ATTR_SESSION_COUNTER;
 
 		tx_attr |= TX_HW_ATTR_TX_DUMMY_REQ;
-	} else {
+	} else if (wlvif) {
 		/* configure the tx attributes */
-		tx_attr =
-			wl->session_counter << TX_HW_ATTR_OFST_SESSION_COUNTER;
+		tx_attr = wlvif->session_counter <<
+			  TX_HW_ATTR_OFST_SESSION_COUNTER;
 	}
 
 	desc->hlid = hlid;
-
-	if (wl->bss_type != BSS_TYPE_AP_BSS) {
+	if (is_dummy || !wlvif)
+		rate_idx = 0;
+	else if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
 		/* if the packets are destined for AP (have a STA entry)
 		   send them with AP rate policies, otherwise use default
 		   basic rates */
-		if (control->control.sta)
-			rate_idx = ACX_TX_AP_FULL_RATE;
+		if (control->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
+			rate_idx = wlvif->sta.p2p_rate_idx;
+		else if (control->control.sta)
+			rate_idx = wlvif->sta.ap_rate_idx;
 		else
-			rate_idx = ACX_TX_BASIC_RATE;
+			rate_idx = wlvif->sta.basic_rate_idx;
 	} else {
-		if (hlid == wl->ap_global_hlid)
-			rate_idx = ACX_TX_AP_MODE_MGMT_RATE;
-		else if (hlid == wl->ap_bcast_hlid)
-			rate_idx = ACX_TX_AP_MODE_BCST_RATE;
+		if (hlid == wlvif->ap.global_hlid)
+			rate_idx = wlvif->ap.mgmt_rate_idx;
+		else if (hlid == wlvif->ap.bcast_hlid)
+			rate_idx = wlvif->ap.bcast_rate_idx;
 		else
-			rate_idx = ac;
+			rate_idx = wlvif->ap.ucast_rate_idx[ac];
 	}
 
 	tx_attr |= rate_idx << TX_HW_ATTR_OFST_RATE_POLICY;
@@ -379,20 +388,24 @@
 }
 
 /* caller must hold wl->mutex */
-static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb,
-							u32 buf_offset)
+static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+				   struct sk_buff *skb, u32 buf_offset)
 {
 	struct ieee80211_tx_info *info;
 	u32 extra = 0;
 	int ret = 0;
 	u32 total_len;
 	u8 hlid;
+	bool is_dummy;
 
 	if (!skb)
 		return -EINVAL;
 
 	info = IEEE80211_SKB_CB(skb);
 
+	/* TODO: handle dummy packets on multi-vifs */
+	is_dummy = wl12xx_is_dummy_packet(wl, skb);
+
 	if (info->control.hw_key &&
 	    info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP)
 		extra = WL1271_TKIP_IV_SPACE;
@@ -405,29 +418,28 @@
 		is_wep = (cipher == WLAN_CIPHER_SUITE_WEP40) ||
 			 (cipher == WLAN_CIPHER_SUITE_WEP104);
 
-		if (unlikely(is_wep && wl->default_key != idx)) {
-			ret = wl1271_set_default_wep_key(wl, idx);
+		if (unlikely(is_wep && wlvif->default_key != idx)) {
+			ret = wl1271_set_default_wep_key(wl, wlvif, idx);
 			if (ret < 0)
 				return ret;
-			wl->default_key = idx;
+			wlvif->default_key = idx;
 		}
 	}
-
-	hlid = wl1271_tx_get_hlid(wl, skb);
+	hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
 	if (hlid == WL12XX_INVALID_LINK_ID) {
 		wl1271_error("invalid hlid. dropping skb 0x%p", skb);
 		return -EINVAL;
 	}
 
-	ret = wl1271_tx_allocate(wl, skb, extra, buf_offset, hlid);
+	ret = wl1271_tx_allocate(wl, wlvif, skb, extra, buf_offset, hlid);
 	if (ret < 0)
 		return ret;
 
-	wl1271_tx_fill_hdr(wl, skb, extra, info, hlid);
+	wl1271_tx_fill_hdr(wl, wlvif, skb, extra, info, hlid);
 
-	if (wl->bss_type == BSS_TYPE_AP_BSS) {
+	if (!is_dummy && wlvif && wlvif->bss_type == BSS_TYPE_AP_BSS) {
 		wl1271_tx_ap_update_inconnection_sta(wl, skb);
-		wl1271_tx_regulate_link(wl, hlid);
+		wl1271_tx_regulate_link(wl, wlvif, hlid);
 	}
 
 	/*
@@ -444,7 +456,7 @@
 	memset(wl->aggr_buf + buf_offset + skb->len, 0, total_len - skb->len);
 
 	/* Revert side effects in the dummy packet skb, so it can be reused */
-	if (wl12xx_is_dummy_packet(wl, skb))
+	if (is_dummy)
 		skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
 
 	return total_len;
@@ -522,19 +534,18 @@
 	return &queues[q];
 }
 
-static struct sk_buff *wl1271_sta_skb_dequeue(struct wl1271 *wl)
+static struct sk_buff *wl12xx_lnk_skb_dequeue(struct wl1271 *wl,
+					      struct wl1271_link *lnk)
 {
-	struct sk_buff *skb = NULL;
+	struct sk_buff *skb;
 	unsigned long flags;
 	struct sk_buff_head *queue;
 
-	queue = wl1271_select_queue(wl, wl->tx_queue);
+	queue = wl1271_select_queue(wl, lnk->tx_queue);
 	if (!queue)
-		goto out;
+		return NULL;
 
 	skb = skb_dequeue(queue);
-
-out:
 	if (skb) {
 		int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
 		spin_lock_irqsave(&wl->wl_lock, flags);
@@ -545,43 +556,33 @@
 	return skb;
 }
 
-static struct sk_buff *wl1271_ap_skb_dequeue(struct wl1271 *wl)
+static struct sk_buff *wl12xx_vif_skb_dequeue(struct wl1271 *wl,
+					      struct wl12xx_vif *wlvif)
 {
 	struct sk_buff *skb = NULL;
-	unsigned long flags;
 	int i, h, start_hlid;
-	struct sk_buff_head *queue;
 
 	/* start from the link after the last one */
-	start_hlid = (wl->last_tx_hlid + 1) % AP_MAX_LINKS;
+	start_hlid = (wlvif->last_tx_hlid + 1) % WL12XX_MAX_LINKS;
 
 	/* dequeue according to AC, round robin on each link */
-	for (i = 0; i < AP_MAX_LINKS; i++) {
-		h = (start_hlid + i) % AP_MAX_LINKS;
+	for (i = 0; i < WL12XX_MAX_LINKS; i++) {
+		h = (start_hlid + i) % WL12XX_MAX_LINKS;
 
 		/* only consider connected stations */
-		if (h >= WL1271_AP_STA_HLID_START &&
-		    !test_bit(h - WL1271_AP_STA_HLID_START, wl->ap_hlid_map))
+		if (!test_bit(h, wlvif->links_map))
 			continue;
 
-		queue = wl1271_select_queue(wl, wl->links[h].tx_queue);
-		if (!queue)
+		skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[h]);
+		if (!skb)
 			continue;
 
-		skb = skb_dequeue(queue);
-		if (skb)
-			break;
+		wlvif->last_tx_hlid = h;
+		break;
 	}
 
-	if (skb) {
-		int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
-		wl->last_tx_hlid = h;
-		spin_lock_irqsave(&wl->wl_lock, flags);
-		wl->tx_queue_count[q]--;
-		spin_unlock_irqrestore(&wl->wl_lock, flags);
-	} else {
-		wl->last_tx_hlid = 0;
-	}
+	if (!skb)
+		wlvif->last_tx_hlid = 0;
 
 	return skb;
 }
@@ -589,12 +590,32 @@
 static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
 {
 	unsigned long flags;
+	struct wl12xx_vif *wlvif = wl->last_wlvif;
 	struct sk_buff *skb = NULL;
 
-	if (wl->bss_type == BSS_TYPE_AP_BSS)
-		skb = wl1271_ap_skb_dequeue(wl);
-	else
-		skb = wl1271_sta_skb_dequeue(wl);
+	if (wlvif) {
+		wl12xx_for_each_wlvif_continue(wl, wlvif) {
+			skb = wl12xx_vif_skb_dequeue(wl, wlvif);
+			if (skb) {
+				wl->last_wlvif = wlvif;
+				break;
+			}
+		}
+	}
+
+	/* do another pass */
+	if (!skb) {
+		wl12xx_for_each_wlvif(wl, wlvif) {
+			skb = wl12xx_vif_skb_dequeue(wl, wlvif);
+			if (skb) {
+				wl->last_wlvif = wlvif;
+				break;
+			}
+		}
+	}
+
+	if (!skb)
+		skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[wl->system_hlid]);
 
 	if (!skb &&
 	    test_and_clear_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags)) {
@@ -610,21 +631,21 @@
 	return skb;
 }
 
-static void wl1271_skb_queue_head(struct wl1271 *wl, struct sk_buff *skb)
+static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+				  struct sk_buff *skb)
 {
 	unsigned long flags;
 	int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
 
 	if (wl12xx_is_dummy_packet(wl, skb)) {
 		set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
-	} else if (wl->bss_type == BSS_TYPE_AP_BSS) {
-		u8 hlid = wl1271_tx_get_hlid(wl, skb);
+	} else {
+		u8 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
 		skb_queue_head(&wl->links[hlid].tx_queue[q], skb);
 
 		/* make sure we dequeue the same packet next time */
-		wl->last_tx_hlid = (hlid + AP_MAX_LINKS - 1) % AP_MAX_LINKS;
-	} else {
-		skb_queue_head(&wl->tx_queue[q], skb);
+		wlvif->last_tx_hlid = (hlid + WL12XX_MAX_LINKS - 1) %
+				      WL12XX_MAX_LINKS;
 	}
 
 	spin_lock_irqsave(&wl->wl_lock, flags);
@@ -639,29 +660,71 @@
 	return ieee80211_is_data_present(hdr->frame_control);
 }
 
+void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids)
+{
+	struct wl12xx_vif *wlvif;
+	u32 timeout;
+	u8 hlid;
+
+	if (!wl->conf.rx_streaming.interval)
+		return;
+
+	if (!wl->conf.rx_streaming.always &&
+	    !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))
+		return;
+
+	timeout = wl->conf.rx_streaming.duration;
+	wl12xx_for_each_wlvif_sta(wl, wlvif) {
+		bool found = false;
+		for_each_set_bit(hlid, active_hlids, WL12XX_MAX_LINKS) {
+			if (test_bit(hlid, wlvif->links_map)) {
+				found  = true;
+				break;
+			}
+		}
+
+		if (!found)
+			continue;
+
+		/* enable rx streaming */
+		if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
+			ieee80211_queue_work(wl->hw,
+					     &wlvif->rx_streaming_enable_work);
+
+		mod_timer(&wlvif->rx_streaming_timer,
+			  jiffies + msecs_to_jiffies(timeout));
+	}
+}
+
 void wl1271_tx_work_locked(struct wl1271 *wl)
 {
+	struct wl12xx_vif *wlvif;
 	struct sk_buff *skb;
+	struct wl1271_tx_hw_descr *desc;
 	u32 buf_offset = 0;
 	bool sent_packets = false;
-	bool had_data = false;
-	bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+	unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0};
 	int ret;
 
 	if (unlikely(wl->state == WL1271_STATE_OFF))
 		return;
 
 	while ((skb = wl1271_skb_dequeue(wl))) {
-		if (wl1271_tx_is_data_present(skb))
-			had_data = true;
+		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+		bool has_data = false;
 
-		ret = wl1271_prepare_tx_frame(wl, skb, buf_offset);
+		wlvif = NULL;
+		if (!wl12xx_is_dummy_packet(wl, skb) && info->control.vif)
+			wlvif = wl12xx_vif_to_data(info->control.vif);
+
+		has_data = wlvif && wl1271_tx_is_data_present(skb);
+		ret = wl1271_prepare_tx_frame(wl, wlvif, skb, buf_offset);
 		if (ret == -EAGAIN) {
 			/*
 			 * Aggregation buffer is full.
 			 * Flush buffer and try again.
 			 */
-			wl1271_skb_queue_head(wl, skb);
+			wl1271_skb_queue_head(wl, wlvif, skb);
 			wl1271_write(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf,
 				     buf_offset, true);
 			sent_packets = true;
@@ -672,16 +735,27 @@
 			 * Firmware buffer is full.
 			 * Queue back last skb, and stop aggregating.
 			 */
-			wl1271_skb_queue_head(wl, skb);
+			wl1271_skb_queue_head(wl, wlvif, skb);
 			/* No work left, avoid scheduling redundant tx work */
 			set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
 			goto out_ack;
 		} else if (ret < 0) {
-			dev_kfree_skb(skb);
+			if (wl12xx_is_dummy_packet(wl, skb))
+				/*
+				 * fw still expects dummy packet,
+				 * so re-enqueue it
+				 */
+				wl1271_skb_queue_head(wl, wlvif, skb);
+			else
+				ieee80211_free_txskb(wl->hw, skb);
 			goto out_ack;
 		}
 		buf_offset += ret;
 		wl->tx_packets_count++;
+		if (has_data) {
+			desc = (struct wl1271_tx_hw_descr *) skb->data;
+			__set_bit(desc->hlid, active_hlids);
+		}
 	}
 
 out_ack:
@@ -701,19 +775,7 @@
 
 		wl1271_handle_tx_low_watermark(wl);
 	}
-	if (!is_ap && wl->conf.rx_streaming.interval && had_data &&
-	    (wl->conf.rx_streaming.always ||
-	     test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))) {
-		u32 timeout = wl->conf.rx_streaming.duration;
-
-		/* enable rx streaming */
-		if (!test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags))
-			ieee80211_queue_work(wl->hw,
-					     &wl->rx_streaming_enable_work);
-
-		mod_timer(&wl->rx_streaming_timer,
-			  jiffies + msecs_to_jiffies(timeout));
-	}
+	wl12xx_rearm_rx_streaming(wl, active_hlids);
 }
 
 void wl1271_tx_work(struct work_struct *work)
@@ -737,6 +799,8 @@
 				      struct wl1271_tx_hw_res_descr *result)
 {
 	struct ieee80211_tx_info *info;
+	struct ieee80211_vif *vif;
+	struct wl12xx_vif *wlvif;
 	struct sk_buff *skb;
 	int id = result->id;
 	int rate = -1;
@@ -756,11 +820,16 @@
 		return;
 	}
 
+	/* info->control is valid as long as we don't update info->status */
+	vif = info->control.vif;
+	wlvif = wl12xx_vif_to_data(vif);
+
 	/* update the TX status info */
 	if (result->status == TX_SUCCESS) {
 		if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
 			info->flags |= IEEE80211_TX_STAT_ACK;
-		rate = wl1271_rate_to_idx(result->rate_class_index, wl->band);
+		rate = wl1271_rate_to_idx(result->rate_class_index,
+					  wlvif->band);
 		retries = result->ack_failures;
 	} else if (result->status == TX_RETRY_EXCEEDED) {
 		wl->stats.excessive_retries++;
@@ -783,14 +852,14 @@
 	     info->control.hw_key->cipher == WLAN_CIPHER_SUITE_CCMP ||
 	     info->control.hw_key->cipher == WL1271_CIPHER_SUITE_GEM)) {
 		u8 fw_lsb = result->tx_security_sequence_number_lsb;
-		u8 cur_lsb = wl->tx_security_last_seq_lsb;
+		u8 cur_lsb = wlvif->tx_security_last_seq_lsb;
 
 		/*
 		 * update security sequence number, taking care of potential
 		 * wrap-around
 		 */
-		wl->tx_security_seq += (fw_lsb - cur_lsb + 256) % 256;
-		wl->tx_security_last_seq_lsb = fw_lsb;
+		wlvif->tx_security_seq += (fw_lsb - cur_lsb) & 0xff;
+		wlvif->tx_security_last_seq_lsb = fw_lsb;
 	}
 
 	/* remove private header from packet */
@@ -886,40 +955,31 @@
 }
 
 /* caller must hold wl->mutex and TX must be stopped */
-void wl1271_tx_reset(struct wl1271 *wl, bool reset_tx_queues)
+void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+{
+	int i;
+
+	/* TX failure */
+	for_each_set_bit(i, wlvif->links_map, WL12XX_MAX_LINKS) {
+		if (wlvif->bss_type == BSS_TYPE_AP_BSS)
+			wl1271_free_sta(wl, wlvif, i);
+		else
+			wlvif->sta.ba_rx_bitmap = 0;
+
+		wl1271_tx_reset_link_queues(wl, i);
+		wl->links[i].allocated_pkts = 0;
+		wl->links[i].prev_freed_pkts = 0;
+	}
+	wlvif->last_tx_hlid = 0;
+
+}
+/* caller must hold wl->mutex and TX must be stopped */
+void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues)
 {
 	int i;
 	struct sk_buff *skb;
 	struct ieee80211_tx_info *info;
 
-	/* TX failure */
-	if (wl->bss_type == BSS_TYPE_AP_BSS) {
-		for (i = 0; i < AP_MAX_LINKS; i++) {
-			wl1271_free_sta(wl, i);
-			wl1271_tx_reset_link_queues(wl, i);
-			wl->links[i].allocated_pkts = 0;
-			wl->links[i].prev_freed_pkts = 0;
-		}
-
-		wl->last_tx_hlid = 0;
-	} else {
-		for (i = 0; i < NUM_TX_QUEUES; i++) {
-			while ((skb = skb_dequeue(&wl->tx_queue[i]))) {
-				wl1271_debug(DEBUG_TX, "freeing skb 0x%p",
-					     skb);
-
-				if (!wl12xx_is_dummy_packet(wl, skb)) {
-					info = IEEE80211_SKB_CB(skb);
-					info->status.rates[0].idx = -1;
-					info->status.rates[0].count = 0;
-					ieee80211_tx_status_ni(wl->hw, skb);
-				}
-			}
-		}
-
-		wl->ba_rx_bitmap = 0;
-	}
-
 	for (i = 0; i < NUM_TX_QUEUES; i++)
 		wl->tx_queue_count[i] = 0;
 
diff --git a/drivers/net/wireless/wl12xx/tx.h b/drivers/net/wireless/wl12xx/tx.h
index dc4f09a..2dbb24e 100644
--- a/drivers/net/wireless/wl12xx/tx.h
+++ b/drivers/net/wireless/wl12xx/tx.h
@@ -206,18 +206,23 @@
 void wl1271_tx_work(struct work_struct *work);
 void wl1271_tx_work_locked(struct wl1271 *wl);
 void wl1271_tx_complete(struct wl1271 *wl);
-void wl1271_tx_reset(struct wl1271 *wl, bool reset_tx_queues);
+void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues);
 void wl1271_tx_flush(struct wl1271 *wl);
 u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band);
 u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
 				enum ieee80211_band rate_band);
 u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set);
-u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct sk_buff *skb);
+u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+			 struct sk_buff *skb);
+u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+		      struct sk_buff *skb);
 void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid);
 void wl1271_handle_tx_low_watermark(struct wl1271 *wl);
 bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb);
+void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids);
 
 /* from main.c */
-void wl1271_free_sta(struct wl1271 *wl, u8 hlid);
+void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid);
 
 #endif
diff --git a/drivers/net/wireless/wl12xx/wl12xx.h b/drivers/net/wireless/wl12xx/wl12xx.h
index 1ec90fc7..b2b09cd 100644
--- a/drivers/net/wireless/wl12xx/wl12xx.h
+++ b/drivers/net/wireless/wl12xx/wl12xx.h
@@ -35,83 +35,6 @@
 #include "conf.h"
 #include "ini.h"
 
-#define DRIVER_NAME "wl1271"
-#define DRIVER_PREFIX DRIVER_NAME ": "
-
-/*
- * FW versions support BA 11n
- * versions marks x.x.x.50-60.x
- */
-#define WL12XX_BA_SUPPORT_FW_COST_VER2_START    50
-#define WL12XX_BA_SUPPORT_FW_COST_VER2_END      60
-
-enum {
-	DEBUG_NONE	= 0,
-	DEBUG_IRQ	= BIT(0),
-	DEBUG_SPI	= BIT(1),
-	DEBUG_BOOT	= BIT(2),
-	DEBUG_MAILBOX	= BIT(3),
-	DEBUG_TESTMODE	= BIT(4),
-	DEBUG_EVENT	= BIT(5),
-	DEBUG_TX	= BIT(6),
-	DEBUG_RX	= BIT(7),
-	DEBUG_SCAN	= BIT(8),
-	DEBUG_CRYPT	= BIT(9),
-	DEBUG_PSM	= BIT(10),
-	DEBUG_MAC80211	= BIT(11),
-	DEBUG_CMD	= BIT(12),
-	DEBUG_ACX	= BIT(13),
-	DEBUG_SDIO	= BIT(14),
-	DEBUG_FILTERS   = BIT(15),
-	DEBUG_ADHOC     = BIT(16),
-	DEBUG_AP	= BIT(17),
-	DEBUG_MASTER	= (DEBUG_ADHOC | DEBUG_AP),
-	DEBUG_ALL	= ~0,
-};
-
-extern u32 wl12xx_debug_level;
-
-#define DEBUG_DUMP_LIMIT 1024
-
-#define wl1271_error(fmt, arg...) \
-	pr_err(DRIVER_PREFIX "ERROR " fmt "\n", ##arg)
-
-#define wl1271_warning(fmt, arg...) \
-	pr_warning(DRIVER_PREFIX "WARNING " fmt "\n", ##arg)
-
-#define wl1271_notice(fmt, arg...) \
-	pr_info(DRIVER_PREFIX fmt "\n", ##arg)
-
-#define wl1271_info(fmt, arg...) \
-	pr_info(DRIVER_PREFIX fmt "\n", ##arg)
-
-#define wl1271_debug(level, fmt, arg...) \
-	do { \
-		if (level & wl12xx_debug_level) \
-			pr_debug(DRIVER_PREFIX fmt "\n", ##arg); \
-	} while (0)
-
-/* TODO: use pr_debug_hex_dump when it will be available */
-#define wl1271_dump(level, prefix, buf, len)	\
-	do { \
-		if (level & wl12xx_debug_level) \
-			print_hex_dump(KERN_DEBUG, DRIVER_PREFIX prefix, \
-				       DUMP_PREFIX_OFFSET, 16, 1,	\
-				       buf,				\
-				       min_t(size_t, len, DEBUG_DUMP_LIMIT), \
-				       0);				\
-	} while (0)
-
-#define wl1271_dump_ascii(level, prefix, buf, len)	\
-	do { \
-		if (level & wl12xx_debug_level) \
-			print_hex_dump(KERN_DEBUG, DRIVER_PREFIX prefix, \
-				       DUMP_PREFIX_OFFSET, 16, 1,	\
-				       buf,				\
-				       min_t(size_t, len, DEBUG_DUMP_LIMIT), \
-				       true);				\
-	} while (0)
-
 #define WL127X_FW_NAME "ti-connectivity/wl127x-fw-3.bin"
 #define WL128X_FW_NAME "ti-connectivity/wl128x-fw-3.bin"
 
@@ -142,16 +65,12 @@
 #define WL12XX_INVALID_ROLE_ID     0xff
 #define WL12XX_INVALID_LINK_ID     0xff
 
+#define WL12XX_MAX_RATE_POLICIES 16
+
 /* Defined by FW as 0. Will not be freed or allocated. */
 #define WL12XX_SYSTEM_HLID         0
 
 /*
- * TODO: we currently don't support multirole. remove
- * this constant from the code when we do.
- */
-#define WL1271_AP_STA_HLID_START   3
-
-/*
  * When in AP-mode, we allow (at least) this number of packets
  * to be transmitted to FW for a STA in PS-mode. Only when packets are
  * present in the FW buffers it will wake the sleeping STA. We want to put
@@ -236,13 +155,6 @@
 
 #define AP_MAX_STATIONS            8
 
-/* Broadcast and Global links + system link + links to stations */
-/*
- * TODO: when WL1271_AP_STA_HLID_START is no longer constant, change all
- * the places that use this.
- */
-#define AP_MAX_LINKS               (AP_MAX_STATIONS + WL1271_AP_STA_HLID_START)
-
 /* FW status registers */
 struct wl12xx_fw_status {
 	__le32 intr;
@@ -299,17 +211,14 @@
 };
 
 struct wl1271_if_operations {
-	void (*read)(struct wl1271 *wl, int addr, void *buf, size_t len,
+	void (*read)(struct device *child, int addr, void *buf, size_t len,
 		     bool fixed);
-	void (*write)(struct wl1271 *wl, int addr, void *buf, size_t len,
+	void (*write)(struct device *child, int addr, void *buf, size_t len,
 		     bool fixed);
-	void (*reset)(struct wl1271 *wl);
-	void (*init)(struct wl1271 *wl);
-	int (*power)(struct wl1271 *wl, bool enable);
-	struct device* (*dev)(struct wl1271 *wl);
-	void (*enable_irq)(struct wl1271 *wl);
-	void (*disable_irq)(struct wl1271 *wl);
-	void (*set_block_size) (struct wl1271 *wl, unsigned int blksz);
+	void (*reset)(struct device *child);
+	void (*init)(struct device *child);
+	int (*power)(struct device *child, bool enable);
+	void (*set_block_size) (struct device *child, unsigned int blksz);
 };
 
 #define MAX_NUM_KEYS 14
@@ -326,29 +235,33 @@
 };
 
 enum wl12xx_flags {
-	WL1271_FLAG_STA_ASSOCIATED,
-	WL1271_FLAG_IBSS_JOINED,
 	WL1271_FLAG_GPIO_POWER,
 	WL1271_FLAG_TX_QUEUE_STOPPED,
 	WL1271_FLAG_TX_PENDING,
 	WL1271_FLAG_IN_ELP,
 	WL1271_FLAG_ELP_REQUESTED,
-	WL1271_FLAG_PSM,
-	WL1271_FLAG_PSM_REQUESTED,
 	WL1271_FLAG_IRQ_RUNNING,
-	WL1271_FLAG_IDLE,
-	WL1271_FLAG_PSPOLL_FAILURE,
-	WL1271_FLAG_STA_STATE_SENT,
 	WL1271_FLAG_FW_TX_BUSY,
-	WL1271_FLAG_AP_STARTED,
-	WL1271_FLAG_IF_INITIALIZED,
 	WL1271_FLAG_DUMMY_PACKET_PENDING,
 	WL1271_FLAG_SUSPENDED,
 	WL1271_FLAG_PENDING_WORK,
 	WL1271_FLAG_SOFT_GEMINI,
-	WL1271_FLAG_RX_STREAMING_STARTED,
 	WL1271_FLAG_RECOVERY_IN_PROGRESS,
-	WL1271_FLAG_CS_PROGRESS,
+};
+
+enum wl12xx_vif_flags {
+	WLVIF_FLAG_INITIALIZED,
+	WLVIF_FLAG_STA_ASSOCIATED,
+	WLVIF_FLAG_IBSS_JOINED,
+	WLVIF_FLAG_AP_STARTED,
+	WLVIF_FLAG_PSM,
+	WLVIF_FLAG_PSM_REQUESTED,
+	WLVIF_FLAG_STA_STATE_SENT,
+	WLVIF_FLAG_RX_STREAMING_STARTED,
+	WLVIF_FLAG_PSPOLL_FAILURE,
+	WLVIF_FLAG_CS_PROGRESS,
+	WLVIF_FLAG_AP_PROBE_RESP_SET,
+	WLVIF_FLAG_IN_USE,
 };
 
 struct wl1271_link {
@@ -366,10 +279,11 @@
 };
 
 struct wl1271 {
-	struct platform_device *plat_dev;
 	struct ieee80211_hw *hw;
 	bool mac80211_registered;
 
+	struct device *dev;
+
 	void *if_priv;
 
 	struct wl1271_if_operations *if_ops;
@@ -399,25 +313,20 @@
 
 	s8 hw_pg_ver;
 
-	u8 bssid[ETH_ALEN];
 	u8 mac_addr[ETH_ALEN];
-	u8 bss_type;
-	u8 set_bss_type;
-	u8 p2p; /* we are using p2p role */
-	u8 ssid[IEEE80211_MAX_SSID_LEN + 1];
-	u8 ssid_len;
 	int channel;
-	u8 role_id;
-	u8 dev_role_id;
 	u8 system_hlid;
-	u8 sta_hlid;
-	u8 dev_hlid;
-	u8 ap_global_hlid;
-	u8 ap_bcast_hlid;
 
 	unsigned long links_map[BITS_TO_LONGS(WL12XX_MAX_LINKS)];
 	unsigned long roles_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)];
 	unsigned long roc_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)];
+	unsigned long rate_policies_map[
+			BITS_TO_LONGS(WL12XX_MAX_RATE_POLICIES)];
+
+	struct list_head wlvif_list;
+
+	u8 sta_count;
+	u8 ap_count;
 
 	struct wl1271_acx_mem_map *target_mem_map;
 
@@ -440,11 +349,7 @@
 	/* Time-offset between host and chipset clocks */
 	s64 time_offset;
 
-	/* Session counter for the chipset */
-	int session_counter;
-
 	/* Frames scheduled for transmission, not handled yet */
-	struct sk_buff_head tx_queue[NUM_TX_QUEUES];
 	int tx_queue_count[NUM_TX_QUEUES];
 	long stopped_queues_map;
 
@@ -462,17 +367,6 @@
 	struct sk_buff *tx_frames[ACX_TX_DESCRIPTORS];
 	int tx_frames_cnt;
 
-	/*
-	 * Security sequence number
-	 *     bits 0-15: lower 16 bits part of sequence number
-	 *     bits 16-47: higher 32 bits part of sequence number
-	 *     bits 48-63: not in use
-	 */
-	u64 tx_security_seq;
-
-	/* 8 bits of the last sequence number in use */
-	u8 tx_security_last_seq_lsb;
-
 	/* FW Rx counter */
 	u32 rx_counter;
 
@@ -507,59 +401,21 @@
 	u32 mbox_ptr[2];
 
 	/* Are we currently scanning */
+	struct ieee80211_vif *scan_vif;
 	struct wl1271_scan scan;
 	struct delayed_work scan_complete_work;
 
 	bool sched_scanning;
 
-	/* probe-req template for the current AP */
-	struct sk_buff *probereq;
-
-	/* Our association ID */
-	u16 aid;
-
-	/*
-	 * currently configured rate set:
-	 *	bits  0-15 - 802.11abg rates
-	 *	bits 16-23 - 802.11n   MCS index mask
-	 * support only 1 stream, thus only 8 bits for the MCS rates (0-7).
-	 */
-	u32 basic_rate_set;
-	u32 basic_rate;
-	u32 rate_set;
-	u32 bitrate_masks[IEEE80211_NUM_BANDS];
-
 	/* The current band */
 	enum ieee80211_band band;
 
-	/* Beaconing interval (needed for ad-hoc) */
-	u32 beacon_int;
-
-	/* Default key (for WEP) */
-	u32 default_key;
-
-	/* Rx Streaming */
-	struct work_struct rx_streaming_enable_work;
-	struct work_struct rx_streaming_disable_work;
-	struct timer_list rx_streaming_timer;
-
 	struct completion *elp_compl;
-	struct completion *ps_compl;
 	struct delayed_work elp_work;
-	struct delayed_work pspoll_work;
-
-	/* counter for ps-poll delivery failures */
-	int ps_poll_failures;
-
-	/* retry counter for PSM entries */
-	u8 psm_entry_retry;
 
 	/* in dBm */
 	int power_level;
 
-	int rssi_thold;
-	int last_rssi_event;
-
 	struct wl1271_stats stats;
 
 	__le32 buffer_32;
@@ -583,20 +439,9 @@
 	/* Most recently reported noise in dBm */
 	s8 noise;
 
-	/* map for HLIDs of associated stations - when operating in AP mode */
-	unsigned long ap_hlid_map[BITS_TO_LONGS(AP_MAX_STATIONS)];
-
-	/* recoreded keys for AP-mode - set here before AP startup */
-	struct wl1271_ap_key *recorded_ap_keys[MAX_NUM_KEYS];
-
 	/* bands supported by this instance of wl12xx */
 	struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
 
-	/* RX BA constraint value */
-	bool ba_support;
-	u8 ba_rx_bitmap;
-	bool ba_allowed;
-
 	int tcxo_clock;
 
 	/*
@@ -610,10 +455,7 @@
 	 * AP-mode - links indexed by HLID. The global and broadcast links
 	 * are always active.
 	 */
-	struct wl1271_link links[AP_MAX_LINKS];
-
-	/* the hlid of the link where the last transmitted skb came from */
-	int last_tx_hlid;
+	struct wl1271_link links[WL12XX_MAX_LINKS];
 
 	/* AP-mode - a bitmap of links currently in PS mode according to FW */
 	u32 ap_fw_ps_map;
@@ -632,21 +474,173 @@
 
 	/* AP-mode - number of currently connected stations */
 	int active_sta_count;
+
+	/* last wlvif we transmitted from */
+	struct wl12xx_vif *last_wlvif;
 };
 
 struct wl1271_station {
 	u8 hlid;
 };
 
+struct wl12xx_vif {
+	struct wl1271 *wl;
+	struct list_head list;
+	unsigned long flags;
+	u8 bss_type;
+	u8 p2p; /* we are using p2p role */
+	u8 role_id;
+
+	/* sta/ibss specific */
+	u8 dev_role_id;
+	u8 dev_hlid;
+
+	union {
+		struct {
+			u8 hlid;
+			u8 ba_rx_bitmap;
+
+			u8 basic_rate_idx;
+			u8 ap_rate_idx;
+			u8 p2p_rate_idx;
+		} sta;
+		struct {
+			u8 global_hlid;
+			u8 bcast_hlid;
+
+			/* HLIDs bitmap of associated stations */
+			unsigned long sta_hlid_map[BITS_TO_LONGS(
+							WL12XX_MAX_LINKS)];
+
+			/* recoreded keys - set here before AP startup */
+			struct wl1271_ap_key *recorded_keys[MAX_NUM_KEYS];
+
+			u8 mgmt_rate_idx;
+			u8 bcast_rate_idx;
+			u8 ucast_rate_idx[CONF_TX_MAX_AC_COUNT];
+		} ap;
+	};
+
+	/* the hlid of the last transmitted skb */
+	int last_tx_hlid;
+
+	unsigned long links_map[BITS_TO_LONGS(WL12XX_MAX_LINKS)];
+
+	u8 ssid[IEEE80211_MAX_SSID_LEN + 1];
+	u8 ssid_len;
+
+	/* The current band */
+	enum ieee80211_band band;
+	int channel;
+
+	u32 bitrate_masks[IEEE80211_NUM_BANDS];
+	u32 basic_rate_set;
+
+	/*
+	 * currently configured rate set:
+	 *	bits  0-15 - 802.11abg rates
+	 *	bits 16-23 - 802.11n   MCS index mask
+	 * support only 1 stream, thus only 8 bits for the MCS rates (0-7).
+	 */
+	u32 basic_rate;
+	u32 rate_set;
+
+	/* probe-req template for the current AP */
+	struct sk_buff *probereq;
+
+	/* Beaconing interval (needed for ad-hoc) */
+	u32 beacon_int;
+
+	/* Default key (for WEP) */
+	u32 default_key;
+
+	/* Our association ID */
+	u16 aid;
+
+	/* Session counter for the chipset */
+	int session_counter;
+
+	struct completion *ps_compl;
+	struct delayed_work pspoll_work;
+
+	/* counter for ps-poll delivery failures */
+	int ps_poll_failures;
+
+	/* retry counter for PSM entries */
+	u8 psm_entry_retry;
+
+	/* in dBm */
+	int power_level;
+
+	int rssi_thold;
+	int last_rssi_event;
+
+	/* RX BA constraint value */
+	bool ba_support;
+	bool ba_allowed;
+
+	/* Rx Streaming */
+	struct work_struct rx_streaming_enable_work;
+	struct work_struct rx_streaming_disable_work;
+	struct timer_list rx_streaming_timer;
+
+	/*
+	 * This struct must be last!
+	 * data that has to be saved acrossed reconfigs (e.g. recovery)
+	 * should be declared in this struct.
+	 */
+	struct {
+		u8 persistent[0];
+		/*
+		 * Security sequence number
+		 *     bits 0-15: lower 16 bits part of sequence number
+		 *     bits 16-47: higher 32 bits part of sequence number
+		 *     bits 48-63: not in use
+		 */
+		u64 tx_security_seq;
+
+		/* 8 bits of the last sequence number in use */
+		u8 tx_security_last_seq_lsb;
+	};
+};
+
+static inline struct wl12xx_vif *wl12xx_vif_to_data(struct ieee80211_vif *vif)
+{
+	return (struct wl12xx_vif *)vif->drv_priv;
+}
+
+static inline
+struct ieee80211_vif *wl12xx_wlvif_to_vif(struct wl12xx_vif *wlvif)
+{
+	return container_of((void *)wlvif, struct ieee80211_vif, drv_priv);
+}
+
+#define wl12xx_for_each_wlvif(wl, wlvif) \
+		list_for_each_entry(wlvif, &wl->wlvif_list, list)
+
+#define wl12xx_for_each_wlvif_continue(wl, wlvif) \
+		list_for_each_entry_continue(wlvif, &wl->wlvif_list, list)
+
+#define wl12xx_for_each_wlvif_bss_type(wl, wlvif, _bss_type)	\
+		wl12xx_for_each_wlvif(wl, wlvif)		\
+			if (wlvif->bss_type == _bss_type)
+
+#define wl12xx_for_each_wlvif_sta(wl, wlvif)	\
+		wl12xx_for_each_wlvif_bss_type(wl, wlvif, BSS_TYPE_STA_BSS)
+
+#define wl12xx_for_each_wlvif_ap(wl, wlvif)	\
+		wl12xx_for_each_wlvif_bss_type(wl, wlvif, BSS_TYPE_AP_BSS)
+
 int wl1271_plt_start(struct wl1271 *wl);
 int wl1271_plt_stop(struct wl1271 *wl);
-int wl1271_recalc_rx_streaming(struct wl1271 *wl);
+int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif);
 void wl12xx_queue_recovery_work(struct wl1271 *wl);
 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen);
 
 #define JOIN_TIMEOUT 5000 /* 5000 milliseconds to join */
 
-#define SESSION_COUNTER_MAX 7 /* maximum value for the session counter */
+#define SESSION_COUNTER_MAX 6 /* maximum value for the session counter */
+#define SESSION_COUNTER_INVALID 7 /* used with dummy_packet */
 
 #define WL1271_DEFAULT_POWER_LEVEL 0
 
@@ -669,8 +663,8 @@
 /* Each RX/TX transaction requires an end-of-transaction transfer */
 #define WL12XX_QUIRK_END_OF_TRANSACTION		BIT(0)
 
-/* WL128X requires aggregated packets to be aligned to the SDIO block size */
-#define WL12XX_QUIRK_BLOCKSIZE_ALIGNMENT	BIT(2)
+/* wl127x and SPI don't support SDIO block size alignment */
+#define WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT	BIT(2)
 
 /* Older firmwares did not implement the FW logger over bus feature */
 #define WL12XX_QUIRK_FWLOG_NOT_IMPLEMENTED	BIT(4)
diff --git a/drivers/net/wireless/wl12xx/wl12xx_80211.h b/drivers/net/wireless/wl12xx/wl12xx_80211.h
index f7971d3..8f0ffaf 100644
--- a/drivers/net/wireless/wl12xx/wl12xx_80211.h
+++ b/drivers/net/wireless/wl12xx/wl12xx_80211.h
@@ -116,11 +116,6 @@
 	u8 ta[ETH_ALEN];
 } __packed;
 
-struct wl12xx_qos_null_data_template {
-	struct ieee80211_header header;
-	__le16 qos_ctl;
-} __packed;
-
 struct wl12xx_arp_rsp_template {
 	struct ieee80211_hdr_3addr hdr;
 
diff --git a/drivers/net/wireless/wl12xx/wl12xx_platform_data.c b/drivers/net/wireless/wl12xx/wl12xx_platform_data.c
index 973b110..998e958 100644
--- a/drivers/net/wireless/wl12xx/wl12xx_platform_data.c
+++ b/drivers/net/wireless/wl12xx/wl12xx_platform_data.c
@@ -1,8 +1,29 @@
+/*
+ * This file is part of wl12xx
+ *
+ * Copyright (C) 2010-2011 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
 #include <linux/module.h>
 #include <linux/err.h>
 #include <linux/wl12xx.h>
 
-static const struct wl12xx_platform_data *platform_data;
+static struct wl12xx_platform_data *platform_data;
 
 int __init wl12xx_set_platform_data(const struct wl12xx_platform_data *data)
 {
@@ -18,7 +39,7 @@
 	return 0;
 }
 
-const struct wl12xx_platform_data *wl12xx_get_platform_data(void)
+struct wl12xx_platform_data *wl12xx_get_platform_data(void)
 {
 	if (!platform_data)
 		return ERR_PTR(-ENODEV);
diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c
index 8efa2f2..a66b93b 100644
--- a/drivers/net/wireless/zd1201.c
+++ b/drivers/net/wireless/zd1201.c
@@ -1907,15 +1907,4 @@
 	.resume = zd1201_resume,
 };
 
-static int __init zd1201_init(void)
-{
-	return usb_register(&zd1201_usb);
-}
-
-static void __exit zd1201_cleanup(void)
-{
-	usb_deregister(&zd1201_usb);
-}
-
-module_init(zd1201_init);
-module_exit(zd1201_cleanup);
+module_usb_driver(zd1201_usb);
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 1825629..b7d41f8 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -165,7 +165,8 @@
 	return 0;
 }
 
-static u32 xenvif_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t xenvif_fix_features(struct net_device *dev,
+	netdev_features_t features)
 {
 	struct xenvif *vif = netdev_priv(dev);
 
@@ -222,7 +223,7 @@
 	}
 }
 
-static struct ethtool_ops xenvif_ethtool_ops = {
+static const struct ethtool_ops xenvif_ethtool_ops = {
 	.get_link	= ethtool_op_get_link,
 
 	.get_sset_count = xenvif_get_sset_count,
@@ -230,7 +231,7 @@
 	.get_strings = xenvif_get_strings,
 };
 
-static struct net_device_ops xenvif_netdev_ops = {
+static const struct net_device_ops xenvif_netdev_ops = {
 	.ndo_start_xmit	= xenvif_start_xmit,
 	.ndo_get_stats	= xenvif_get_stats,
 	.ndo_open	= xenvif_open,
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 15e332d..639cf8a 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -395,7 +395,7 @@
 	struct gnttab_copy *copy_gop;
 	struct netbk_rx_meta *meta;
 	/*
-	 * These variables a used iff get_page_ext returns true,
+	 * These variables are used iff get_page_ext returns true,
 	 * in which case they are guaranteed to be initialized.
 	 */
 	unsigned int uninitialized_var(group), uninitialized_var(idx);
@@ -940,8 +940,6 @@
 		if (!page)
 			return NULL;
 
-		netbk->mmap_pages[pending_idx] = page;
-
 		gop->source.u.ref = txp->gref;
 		gop->source.domid = vif->domid;
 		gop->source.offset = txp->offset;
@@ -1336,8 +1334,6 @@
 			continue;
 		}
 
-		netbk->mmap_pages[pending_idx] = page;
-
 		gop->source.u.ref = txreq.gref;
 		gop->source.domid = vif->domid;
 		gop->source.offset = txreq.offset;
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 226faab..0a59c57 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -201,7 +201,7 @@
 #define xennet_sysfs_delif(dev) do { } while (0)
 #endif
 
-static int xennet_can_sg(struct net_device *dev)
+static bool xennet_can_sg(struct net_device *dev)
 {
 	return dev->features & NETIF_F_SG;
 }
@@ -1190,7 +1190,8 @@
 	gnttab_free_grant_references(np->gref_rx_head);
 }
 
-static u32 xennet_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t xennet_fix_features(struct net_device *dev,
+	netdev_features_t features)
 {
 	struct netfront_info *np = netdev_priv(dev);
 	int val;
@@ -1216,7 +1217,8 @@
 	return features;
 }
 
-static int xennet_set_features(struct net_device *dev, u32 features)
+static int xennet_set_features(struct net_device *dev,
+	netdev_features_t features)
 {
 	if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) {
 		netdev_info(dev, "Reducing MTU because no SG offload");
@@ -1707,7 +1709,6 @@
 	case XenbusStateInitialised:
 	case XenbusStateReconfiguring:
 	case XenbusStateReconfigured:
-	case XenbusStateConnected:
 	case XenbusStateUnknown:
 	case XenbusStateClosed:
 		break;
@@ -1718,6 +1719,9 @@
 		if (xennet_connect(netdev) != 0)
 			break;
 		xenbus_switch_state(dev, XenbusStateConnected);
+		break;
+
+	case XenbusStateConnected:
 		netif_notify_peers(netdev);
 		break;
 
diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c
index 7bcb1fe..1a1500b 100644
--- a/drivers/nfc/pn533.c
+++ b/drivers/nfc/pn533.c
@@ -72,6 +72,7 @@
 #define PN533_CMD_IN_LIST_PASSIVE_TARGET 0x4A
 #define PN533_CMD_IN_ATR 0x50
 #define PN533_CMD_IN_RELEASE 0x52
+#define PN533_CMD_IN_JUMP_FOR_DEP 0x56
 
 #define PN533_CMD_RESPONSE(cmd) (cmd + 1)
 
@@ -231,6 +232,26 @@
 	u8 gt[];
 } __packed;
 
+/* PN533_CMD_IN_JUMP_FOR_DEP */
+struct pn533_cmd_jump_dep {
+	u8 active;
+	u8 baud;
+	u8 next;
+	u8 gt[];
+} __packed;
+
+struct pn533_cmd_jump_dep_response {
+	u8 status;
+	u8 tg;
+	u8 nfcid3t[10];
+	u8 didt;
+	u8 bst;
+	u8 brt;
+	u8 to;
+	u8 ppt;
+	/* optional */
+	u8 gt[];
+} __packed;
 
 struct pn533 {
 	struct usb_device *udev;
@@ -1121,6 +1142,7 @@
 {
 	struct pn533_cmd_activate_param param;
 	struct pn533_cmd_activate_response *resp;
+	u16 gt_len;
 	int rc;
 
 	nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
@@ -1146,7 +1168,11 @@
 	if (rc != PN533_CMD_RET_SUCCESS)
 		return -EIO;
 
-	return 0;
+	/* ATR_RES general bytes are located at offset 16 */
+	gt_len = PN533_FRAME_CMD_PARAMS_LEN(dev->in_frame) - 16;
+	rc = nfc_set_remote_general_bytes(dev->nfc_dev, resp->gt, gt_len);
+
+	return rc;
 }
 
 static int pn533_activate_target(struct nfc_dev *nfc_dev, u32 target_idx,
@@ -1239,6 +1265,142 @@
 	return;
 }
 
+
+static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg,
+						u8 *params, int params_len)
+{
+	struct pn533_cmd_jump_dep *cmd;
+	struct pn533_cmd_jump_dep_response *resp;
+	struct nfc_target nfc_target;
+	u8 target_gt_len;
+	int rc;
+
+	if (params_len == -ENOENT) {
+		nfc_dev_dbg(&dev->interface->dev, "");
+		return 0;
+	}
+
+	if (params_len < 0) {
+		nfc_dev_err(&dev->interface->dev,
+				"Error %d when bringing DEP link up",
+								params_len);
+		return 0;
+	}
+
+	if (dev->tgt_available_prots &&
+	    !(dev->tgt_available_prots & (1 << NFC_PROTO_NFC_DEP))) {
+		nfc_dev_err(&dev->interface->dev,
+			"The target does not support DEP");
+		return -EINVAL;
+	}
+
+	resp = (struct pn533_cmd_jump_dep_response *) params;
+	cmd = (struct pn533_cmd_jump_dep *) arg;
+	rc = resp->status & PN533_CMD_RET_MASK;
+	if (rc != PN533_CMD_RET_SUCCESS) {
+		nfc_dev_err(&dev->interface->dev,
+				"Bringing DEP link up failed %d", rc);
+		return 0;
+	}
+
+	if (!dev->tgt_available_prots) {
+		nfc_dev_dbg(&dev->interface->dev, "Creating new target");
+
+		nfc_target.supported_protocols = NFC_PROTO_NFC_DEP_MASK;
+		rc = nfc_targets_found(dev->nfc_dev, &nfc_target, 1);
+		if (rc)
+			return 0;
+
+		dev->tgt_available_prots = 0;
+	}
+
+	dev->tgt_active_prot = NFC_PROTO_NFC_DEP;
+
+	/* ATR_RES general bytes are located at offset 17 */
+	target_gt_len = PN533_FRAME_CMD_PARAMS_LEN(dev->in_frame) - 17;
+	rc = nfc_set_remote_general_bytes(dev->nfc_dev,
+						resp->gt, target_gt_len);
+	if (rc == 0)
+		rc = nfc_dep_link_is_up(dev->nfc_dev,
+						dev->nfc_dev->targets[0].idx,
+						!cmd->active, NFC_RF_INITIATOR);
+
+	return 0;
+}
+
+static int pn533_dep_link_up(struct nfc_dev *nfc_dev, int target_idx,
+						u8 comm_mode, u8 rf_mode)
+{
+	struct pn533 *dev = nfc_get_drvdata(nfc_dev);
+	struct pn533_cmd_jump_dep *cmd;
+	u8 cmd_len, local_gt_len, *local_gt;
+	int rc;
+
+	nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+
+	if (rf_mode == NFC_RF_TARGET) {
+		nfc_dev_err(&dev->interface->dev, "Target mode not supported");
+		return -EOPNOTSUPP;
+	}
+
+
+	if (dev->poll_mod_count) {
+		nfc_dev_err(&dev->interface->dev,
+				"Cannot bring the DEP link up while polling");
+		return -EBUSY;
+	}
+
+	if (dev->tgt_active_prot) {
+		nfc_dev_err(&dev->interface->dev,
+				"There is already an active target");
+		return -EBUSY;
+	}
+
+	local_gt = nfc_get_local_general_bytes(dev->nfc_dev, &local_gt_len);
+	if (local_gt_len > NFC_MAX_GT_LEN)
+		return -EINVAL;
+
+	cmd_len = sizeof(struct pn533_cmd_jump_dep) + local_gt_len;
+	cmd = kzalloc(cmd_len, GFP_KERNEL);
+	if (cmd == NULL)
+		return -ENOMEM;
+
+	pn533_tx_frame_init(dev->out_frame, PN533_CMD_IN_JUMP_FOR_DEP);
+
+	cmd->active = !comm_mode;
+	cmd->baud = 0;
+	if (local_gt != NULL) {
+		cmd->next = 4; /* We have some Gi */
+		memcpy(cmd->gt, local_gt, local_gt_len);
+	} else {
+		cmd->next = 0;
+	}
+
+	memcpy(PN533_FRAME_CMD_PARAMS_PTR(dev->out_frame), cmd, cmd_len);
+	dev->out_frame->datalen += cmd_len;
+
+	pn533_tx_frame_finish(dev->out_frame);
+
+	rc = pn533_send_cmd_frame_async(dev, dev->out_frame, dev->in_frame,
+				dev->in_maxlen,	pn533_in_dep_link_up_complete,
+				cmd, GFP_KERNEL);
+	if (rc)
+		goto out;
+
+
+out:
+	kfree(cmd);
+
+	return rc;
+}
+
+static int pn533_dep_link_down(struct nfc_dev *nfc_dev)
+{
+	pn533_deactivate_target(nfc_dev, 0);
+
+	return 0;
+}
+
 #define PN533_CMD_DATAEXCH_HEAD_LEN (sizeof(struct pn533_frame) + 3)
 #define PN533_CMD_DATAEXCH_DATA_MAXLEN 262
 
@@ -1339,7 +1501,7 @@
 	return 0;
 }
 
-int pn533_data_exchange(struct nfc_dev *nfc_dev, u32 target_idx,
+static int pn533_data_exchange(struct nfc_dev *nfc_dev, u32 target_idx,
 						struct sk_buff *skb,
 						data_exchange_cb_t cb,
 						void *cb_context)
@@ -1368,7 +1530,7 @@
 			PN533_CMD_DATAEXCH_DATA_MAXLEN +
 			PN533_FRAME_TAIL_SIZE;
 
-	skb_resp = nfc_alloc_skb(skb_resp_len, GFP_KERNEL);
+	skb_resp = nfc_alloc_recv_skb(skb_resp_len, GFP_KERNEL);
 	if (!skb_resp) {
 		rc = -ENOMEM;
 		goto error;
@@ -1434,6 +1596,8 @@
 struct nfc_ops pn533_nfc_ops = {
 	.dev_up = NULL,
 	.dev_down = NULL,
+	.dep_link_up = pn533_dep_link_up,
+	.dep_link_down = pn533_dep_link_down,
 	.start_poll = pn533_start_poll,
 	.stop_poll = pn533_stop_poll,
 	.activate_target = pn533_activate_target,
@@ -1597,24 +1761,7 @@
 	.id_table =	pn533_table,
 };
 
-static int __init pn533_init(void)
-{
-	int rc;
-
-	rc = usb_register(&pn533_driver);
-	if (rc)
-		err("usb_register failed. Error number %d", rc);
-
-	return rc;
-}
-
-static void __exit pn533_exit(void)
-{
-	usb_deregister(&pn533_driver);
-}
-
-module_init(pn533_init);
-module_exit(pn533_exit);
+module_usb_driver(pn533_driver);
 
 MODULE_AUTHOR("Lauro Ramos Venancio <lauro.venancio@openbossa.org>,"
 			" Aloisio Almeida Jr <aloisio.almeida@openbossa.org>");
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index cac63c9..268163d 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -15,6 +15,15 @@
 	  an image of the device tree that the kernel copies from Open
 	  Firmware or other boot firmware. If unsure, say Y here.
 
+config OF_SELFTEST
+	bool "Device Tree Runtime self tests"
+	help
+	  This option builds in test cases for the device tree infrastructure
+	  that are executed one at boot time, and the results dumped to the
+	  console.
+
+	  If unsure, say N here, but this option is safe to enable.
+
 config OF_FLATTREE
 	bool
 	select DTC
diff --git a/drivers/of/Makefile b/drivers/of/Makefile
index dccb117..a73f5a5 100644
--- a/drivers/of/Makefile
+++ b/drivers/of/Makefile
@@ -8,6 +8,7 @@
 obj-$(CONFIG_OF_I2C)	+= of_i2c.o
 obj-$(CONFIG_OF_NET)	+= of_net.o
 obj-$(CONFIG_OF_SPI)	+= of_spi.o
+obj-$(CONFIG_OF_SELFTEST) += selftest.o
 obj-$(CONFIG_OF_MDIO)	+= of_mdio.o
 obj-$(CONFIG_OF_PCI)	+= of_pci.o
 obj-$(CONFIG_OF_PCI_IRQ)  += of_pci_irq.o
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 9b6588e..133908a 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -752,7 +752,7 @@
 
 	for (i = 0; total < prop->length; total += l, p += l) {
 		l = strlen(p) + 1;
-		if ((*p != 0) && (i++ == index)) {
+		if (i++ == index) {
 			*output = p;
 			return 0;
 		}
@@ -790,11 +790,9 @@
 
 	p = prop->value;
 
-	for (i = 0; total < prop->length; total += l, p += l) {
+	for (i = 0; total < prop->length; total += l, p += l, i++)
 		l = strlen(p) + 1;
-		if (*p != 0)
-			i++;
-	}
+
 	return i;
 }
 EXPORT_SYMBOL_GPL(of_property_count_strings);
@@ -824,17 +822,19 @@
 EXPORT_SYMBOL(of_parse_phandle);
 
 /**
- * of_parse_phandles_with_args - Find a node pointed by phandle in a list
+ * of_parse_phandle_with_args() - Find a node pointed by phandle in a list
  * @np:		pointer to a device tree node containing a list
  * @list_name:	property name that contains a list
  * @cells_name:	property name that specifies phandles' arguments count
  * @index:	index of a phandle to parse out
- * @out_node:	optional pointer to device_node struct pointer (will be filled)
- * @out_args:	optional pointer to arguments pointer (will be filled)
+ * @out_args:	optional pointer to output arguments structure (will be filled)
  *
  * This function is useful to parse lists of phandles and their arguments.
- * Returns 0 on success and fills out_node and out_args, on error returns
- * appropriate errno value.
+ * Returns 0 on success and fills out_args, on error returns appropriate
+ * errno value.
+ *
+ * Caller is responsible to call of_node_put() on the returned out_args->node
+ * pointer.
  *
  * Example:
  *
@@ -851,94 +851,96 @@
  * }
  *
  * To get a device_node of the `node2' node you may call this:
- * of_parse_phandles_with_args(node3, "list", "#list-cells", 2, &node2, &args);
+ * of_parse_phandle_with_args(node3, "list", "#list-cells", 1, &args);
  */
-int of_parse_phandles_with_args(struct device_node *np, const char *list_name,
+int of_parse_phandle_with_args(struct device_node *np, const char *list_name,
 				const char *cells_name, int index,
-				struct device_node **out_node,
-				const void **out_args)
+				struct of_phandle_args *out_args)
 {
-	int ret = -EINVAL;
-	const __be32 *list;
-	const __be32 *list_end;
-	int size;
-	int cur_index = 0;
+	const __be32 *list, *list_end;
+	int size, cur_index = 0;
+	uint32_t count = 0;
 	struct device_node *node = NULL;
-	const void *args = NULL;
+	phandle phandle;
 
+	/* Retrieve the phandle list property */
 	list = of_get_property(np, list_name, &size);
-	if (!list) {
-		ret = -ENOENT;
-		goto err0;
-	}
+	if (!list)
+		return -EINVAL;
 	list_end = list + size / sizeof(*list);
 
+	/* Loop over the phandles until all the requested entry is found */
 	while (list < list_end) {
-		const __be32 *cells;
-		phandle phandle;
+		count = 0;
 
+		/*
+		 * If phandle is 0, then it is an empty entry with no
+		 * arguments.  Skip forward to the next entry.
+		 */
 		phandle = be32_to_cpup(list++);
-		args = list;
+		if (phandle) {
+			/*
+			 * Find the provider node and parse the #*-cells
+			 * property to determine the argument length
+			 */
+			node = of_find_node_by_phandle(phandle);
+			if (!node) {
+				pr_err("%s: could not find phandle\n",
+					 np->full_name);
+				break;
+			}
+			if (of_property_read_u32(node, cells_name, &count)) {
+				pr_err("%s: could not get %s for %s\n",
+					 np->full_name, cells_name,
+					 node->full_name);
+				break;
+			}
 
-		/* one cell hole in the list = <>; */
-		if (!phandle)
-			goto next;
-
-		node = of_find_node_by_phandle(phandle);
-		if (!node) {
-			pr_debug("%s: could not find phandle\n",
-				 np->full_name);
-			goto err0;
+			/*
+			 * Make sure that the arguments actually fit in the
+			 * remaining property data length
+			 */
+			if (list + count > list_end) {
+				pr_err("%s: arguments longer than property\n",
+					 np->full_name);
+				break;
+			}
 		}
 
-		cells = of_get_property(node, cells_name, &size);
-		if (!cells || size != sizeof(*cells)) {
-			pr_debug("%s: could not get %s for %s\n",
-				 np->full_name, cells_name, node->full_name);
-			goto err1;
-		}
+		/*
+		 * All of the error cases above bail out of the loop, so at
+		 * this point, the parsing is successful. If the requested
+		 * index matches, then fill the out_args structure and return,
+		 * or return -ENOENT for an empty entry.
+		 */
+		if (cur_index == index) {
+			if (!phandle)
+				return -ENOENT;
 
-		list += be32_to_cpup(cells);
-		if (list > list_end) {
-			pr_debug("%s: insufficient arguments length\n",
-				 np->full_name);
-			goto err1;
+			if (out_args) {
+				int i;
+				if (WARN_ON(count > MAX_PHANDLE_ARGS))
+					count = MAX_PHANDLE_ARGS;
+				out_args->np = node;
+				out_args->args_count = count;
+				for (i = 0; i < count; i++)
+					out_args->args[i] = be32_to_cpup(list++);
+			}
+			return 0;
 		}
-next:
-		if (cur_index == index)
-			break;
 
 		of_node_put(node);
 		node = NULL;
-		args = NULL;
+		list += count;
 		cur_index++;
 	}
 
-	if (!node) {
-		/*
-		 * args w/o node indicates that the loop above has stopped at
-		 * the 'hole' cell. Report this differently.
-		 */
-		if (args)
-			ret = -EEXIST;
-		else
-			ret = -ENOENT;
-		goto err0;
-	}
-
-	if (out_node)
-		*out_node = node;
-	if (out_args)
-		*out_args = args;
-
-	return 0;
-err1:
-	of_node_put(node);
-err0:
-	pr_debug("%s failed with status %d\n", __func__, ret);
-	return ret;
+	/* Loop exited without finding a valid entry; return an error */
+	if (node)
+		of_node_put(node);
+	return -EINVAL;
 }
-EXPORT_SYMBOL(of_parse_phandles_with_args);
+EXPORT_SYMBOL(of_parse_phandle_with_args);
 
 /**
  * prom_add_property - Add a property to a node
@@ -1159,7 +1161,7 @@
 	if (!of_aliases)
 		return;
 
-	for_each_property(pp, of_aliases->properties) {
+	for_each_property_of_node(of_aliases, pp) {
 		const char *start = pp->name;
 		const char *end = start + strlen(start);
 		struct device_node *np;
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index fd85fa2..91a375f 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -18,6 +18,7 @@
 #include <linux/errno.h>
 #include <linux/slab.h>
 
+#include <asm/setup.h>  /* for COMMAND_LINE_SIZE */
 #ifdef CONFIG_PPC
 #include <asm/machdep.h>
 #endif /* CONFIG_PPC */
@@ -107,7 +108,7 @@
  * of_fdt_match - Return true if node matches a list of compatible values
  */
 int of_fdt_match(struct boot_param_header *blob, unsigned long node,
-                 const char **compat)
+                 const char *const *compat)
 {
 	unsigned int tmp, score = 0;
 
@@ -541,7 +542,7 @@
 /**
  * of_flat_dt_match - Return true if node matches a list of compatible values
  */
-int __init of_flat_dt_match(unsigned long node, const char **compat)
+int __init of_flat_dt_match(unsigned long node, const char *const *compat)
 {
 	return of_fdt_match(initial_boot_params, node, compat);
 }
diff --git a/drivers/of/gpio.c b/drivers/of/gpio.c
index ef0105f..7e62d15 100644
--- a/drivers/of/gpio.c
+++ b/drivers/of/gpio.c
@@ -35,32 +35,27 @@
                            int index, enum of_gpio_flags *flags)
 {
 	int ret;
-	struct device_node *gpio_np;
 	struct gpio_chip *gc;
-	int size;
-	const void *gpio_spec;
-	const __be32 *gpio_cells;
+	struct of_phandle_args gpiospec;
 
-	ret = of_parse_phandles_with_args(np, propname, "#gpio-cells", index,
-					  &gpio_np, &gpio_spec);
+	ret = of_parse_phandle_with_args(np, propname, "#gpio-cells", index,
+					 &gpiospec);
 	if (ret) {
 		pr_debug("%s: can't parse gpios property\n", __func__);
 		goto err0;
 	}
 
-	gc = of_node_to_gpiochip(gpio_np);
+	gc = of_node_to_gpiochip(gpiospec.np);
 	if (!gc) {
 		pr_debug("%s: gpio controller %s isn't registered\n",
-			 np->full_name, gpio_np->full_name);
+			 np->full_name, gpiospec.np->full_name);
 		ret = -ENODEV;
 		goto err1;
 	}
 
-	gpio_cells = of_get_property(gpio_np, "#gpio-cells", &size);
-	if (!gpio_cells || size != sizeof(*gpio_cells) ||
-			be32_to_cpup(gpio_cells) != gc->of_gpio_n_cells) {
+	if (gpiospec.args_count != gc->of_gpio_n_cells) {
 		pr_debug("%s: wrong #gpio-cells for %s\n",
-			 np->full_name, gpio_np->full_name);
+			 np->full_name, gpiospec.np->full_name);
 		ret = -EINVAL;
 		goto err1;
 	}
@@ -69,13 +64,13 @@
 	if (flags)
 		*flags = 0;
 
-	ret = gc->of_xlate(gc, np, gpio_spec, flags);
+	ret = gc->of_xlate(gc, &gpiospec, flags);
 	if (ret < 0)
 		goto err1;
 
 	ret += gc->base;
 err1:
-	of_node_put(gpio_np);
+	of_node_put(gpiospec.np);
 err0:
 	pr_debug("%s exited with status %d\n", __func__, ret);
 	return ret;
@@ -105,8 +100,8 @@
 	do {
 		int ret;
 
-		ret = of_parse_phandles_with_args(np, "gpios", "#gpio-cells",
-						  cnt, NULL, NULL);
+		ret = of_parse_phandle_with_args(np, "gpios", "#gpio-cells",
+						 cnt, NULL);
 		/* A hole in the gpios = <> counts anyway. */
 		if (ret < 0 && ret != -EEXIST)
 			break;
@@ -127,12 +122,9 @@
  * gpio chips. This function performs only one sanity check: whether gpio
  * is less than ngpios (that is specified in the gpio_chip).
  */
-int of_gpio_simple_xlate(struct gpio_chip *gc, struct device_node *np,
-			 const void *gpio_spec, u32 *flags)
+int of_gpio_simple_xlate(struct gpio_chip *gc,
+			 const struct of_phandle_args *gpiospec, u32 *flags)
 {
-	const __be32 *gpio = gpio_spec;
-	const u32 n = be32_to_cpup(gpio);
-
 	/*
 	 * We're discouraging gpio_cells < 2, since that way you'll have to
 	 * write your own xlate function (that will have to retrive the GPIO
@@ -144,13 +136,16 @@
 		return -EINVAL;
 	}
 
-	if (n > gc->ngpio)
+	if (WARN_ON(gpiospec->args_count < gc->of_gpio_n_cells))
+		return -EINVAL;
+
+	if (gpiospec->args[0] > gc->ngpio)
 		return -EINVAL;
 
 	if (flags)
-		*flags = be32_to_cpu(gpio[1]);
+		*flags = gpiospec->args[1];
 
-	return n;
+	return gpiospec->args[0];
 }
 EXPORT_SYMBOL(of_gpio_simple_xlate);
 
@@ -198,8 +193,6 @@
 	if (ret)
 		goto err2;
 
-	pr_debug("%s: registered as generic GPIO chip, base is %d\n",
-		 np->full_name, gc->base);
 	return 0;
 err2:
 	iounmap(mm_gc->regs);
diff --git a/drivers/of/pdt.c b/drivers/of/pdt.c
index bc5b399..07cc1d6 100644
--- a/drivers/of/pdt.c
+++ b/drivers/of/pdt.c
@@ -229,7 +229,7 @@
 	return ret;
 }
 
-static void *kernel_tree_alloc(u64 size, u64 align)
+static void * __init kernel_tree_alloc(u64 size, u64 align)
 {
 	return prom_early_alloc(size);
 }
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index cbd5d70..63b3ec4 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -314,7 +314,7 @@
 	if (!lookup)
 		return NULL;
 
-	for(; lookup->name != NULL; lookup++) {
+	for(; lookup->compatible != NULL; lookup++) {
 		if (!of_device_is_compatible(np, lookup->compatible))
 			continue;
 		if (of_address_to_resource(np, 0, &res))
diff --git a/drivers/of/selftest.c b/drivers/of/selftest.c
new file mode 100644
index 0000000..9d2b480
--- /dev/null
+++ b/drivers/of/selftest.c
@@ -0,0 +1,139 @@
+/*
+ * Self tests for device tree subsystem
+ */
+
+#define pr_fmt(fmt) "### %s(): " fmt, __func__
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+
+static bool selftest_passed = true;
+#define selftest(result, fmt, ...) { \
+	selftest_passed &= (result); \
+	if (!(result)) \
+		pr_err("FAIL %s:%i " fmt, __FILE__, __LINE__, ##__VA_ARGS__); \
+}
+
+static void __init of_selftest_parse_phandle_with_args(void)
+{
+	struct device_node *np;
+	struct of_phandle_args args;
+	int rc, i;
+	bool passed_all = true;
+
+	pr_info("start\n");
+	np = of_find_node_by_path("/testcase-data/phandle-tests/consumer-a");
+	if (!np) {
+		pr_err("missing testcase data\n");
+		return;
+	}
+
+	for (i = 0; i < 7; i++) {
+		bool passed = true;
+		rc = of_parse_phandle_with_args(np, "phandle-list",
+						"#phandle-cells", i, &args);
+
+		/* Test the values from tests-phandle.dtsi */
+		switch (i) {
+		case 0:
+			passed &= !rc;
+			passed &= (args.args_count == 1);
+			passed &= (args.args[0] == (i + 1));
+			break;
+		case 1:
+			passed &= !rc;
+			passed &= (args.args_count == 2);
+			passed &= (args.args[0] == (i + 1));
+			passed &= (args.args[1] == 0);
+			break;
+		case 2:
+			passed &= (rc == -ENOENT);
+			break;
+		case 3:
+			passed &= !rc;
+			passed &= (args.args_count == 3);
+			passed &= (args.args[0] == (i + 1));
+			passed &= (args.args[1] == 4);
+			passed &= (args.args[2] == 3);
+			break;
+		case 4:
+			passed &= !rc;
+			passed &= (args.args_count == 2);
+			passed &= (args.args[0] == (i + 1));
+			passed &= (args.args[1] == 100);
+			break;
+		case 5:
+			passed &= !rc;
+			passed &= (args.args_count == 0);
+			break;
+		case 6:
+			passed &= !rc;
+			passed &= (args.args_count == 1);
+			passed &= (args.args[0] == (i + 1));
+			break;
+		case 7:
+			passed &= (rc == -EINVAL);
+			break;
+		default:
+			passed = false;
+		}
+
+		if (!passed) {
+			int j;
+			pr_err("index %i - data error on node %s rc=%i regs=[",
+				i, args.np->full_name, rc);
+			for (j = 0; j < args.args_count; j++)
+				printk(" %i", args.args[j]);
+			printk(" ]\n");
+
+			passed_all = false;
+		}
+	}
+
+	/* Check for missing list property */
+	rc = of_parse_phandle_with_args(np, "phandle-list-missing",
+					"#phandle-cells", 0, &args);
+	passed_all &= (rc == -EINVAL);
+
+	/* Check for missing cells property */
+	rc = of_parse_phandle_with_args(np, "phandle-list",
+					"#phandle-cells-missing", 0, &args);
+	passed_all &= (rc == -EINVAL);
+
+	/* Check for bad phandle in list */
+	rc = of_parse_phandle_with_args(np, "phandle-list-bad-phandle",
+					"#phandle-cells", 0, &args);
+	passed_all &= (rc == -EINVAL);
+
+	/* Check for incorrectly formed argument list */
+	rc = of_parse_phandle_with_args(np, "phandle-list-bad-args",
+					"#phandle-cells", 1, &args);
+	passed_all &= (rc == -EINVAL);
+
+	pr_info("end - %s\n", passed_all ? "PASS" : "FAIL");
+}
+
+static int __init of_selftest(void)
+{
+	struct device_node *np;
+
+	np = of_find_node_by_path("/testcase-data/phandle-tests/consumer-a");
+	if (!np) {
+		pr_info("No testcase data in device tree; not running tests\n");
+		return 0;
+	}
+	of_node_put(np);
+
+	pr_info("start of selftest - you will see error messages\n");
+	of_selftest_parse_phandle_with_args();
+	pr_info("end of selftest - %s\n", selftest_passed ? "PASS" : "FAIL");
+	return 0;
+}
+late_initcall(of_selftest);
diff --git a/drivers/oprofile/nmi_timer_int.c b/drivers/oprofile/nmi_timer_int.c
new file mode 100644
index 0000000..76f1c93
--- /dev/null
+++ b/drivers/oprofile/nmi_timer_int.c
@@ -0,0 +1,173 @@
+/**
+ * @file nmi_timer_int.c
+ *
+ * @remark Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * @author Robert Richter <robert.richter@amd.com>
+ */
+
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/errno.h>
+#include <linux/oprofile.h>
+#include <linux/perf_event.h>
+
+#ifdef CONFIG_OPROFILE_NMI_TIMER
+
+static DEFINE_PER_CPU(struct perf_event *, nmi_timer_events);
+static int ctr_running;
+
+static struct perf_event_attr nmi_timer_attr = {
+	.type           = PERF_TYPE_HARDWARE,
+	.config         = PERF_COUNT_HW_CPU_CYCLES,
+	.size           = sizeof(struct perf_event_attr),
+	.pinned         = 1,
+	.disabled       = 1,
+};
+
+static void nmi_timer_callback(struct perf_event *event,
+			       struct perf_sample_data *data,
+			       struct pt_regs *regs)
+{
+	event->hw.interrupts = 0;       /* don't throttle interrupts */
+	oprofile_add_sample(regs, 0);
+}
+
+static int nmi_timer_start_cpu(int cpu)
+{
+	struct perf_event *event = per_cpu(nmi_timer_events, cpu);
+
+	if (!event) {
+		event = perf_event_create_kernel_counter(&nmi_timer_attr, cpu, NULL,
+							 nmi_timer_callback, NULL);
+		if (IS_ERR(event))
+			return PTR_ERR(event);
+		per_cpu(nmi_timer_events, cpu) = event;
+	}
+
+	if (event && ctr_running)
+		perf_event_enable(event);
+
+	return 0;
+}
+
+static void nmi_timer_stop_cpu(int cpu)
+{
+	struct perf_event *event = per_cpu(nmi_timer_events, cpu);
+
+	if (event && ctr_running)
+		perf_event_disable(event);
+}
+
+static int nmi_timer_cpu_notifier(struct notifier_block *b, unsigned long action,
+				  void *data)
+{
+	int cpu = (unsigned long)data;
+	switch (action) {
+	case CPU_DOWN_FAILED:
+	case CPU_ONLINE:
+		nmi_timer_start_cpu(cpu);
+		break;
+	case CPU_DOWN_PREPARE:
+		nmi_timer_stop_cpu(cpu);
+		break;
+	}
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block nmi_timer_cpu_nb = {
+	.notifier_call = nmi_timer_cpu_notifier
+};
+
+static int nmi_timer_start(void)
+{
+	int cpu;
+
+	get_online_cpus();
+	ctr_running = 1;
+	for_each_online_cpu(cpu)
+		nmi_timer_start_cpu(cpu);
+	put_online_cpus();
+
+	return 0;
+}
+
+static void nmi_timer_stop(void)
+{
+	int cpu;
+
+	get_online_cpus();
+	for_each_online_cpu(cpu)
+		nmi_timer_stop_cpu(cpu);
+	ctr_running = 0;
+	put_online_cpus();
+}
+
+static void nmi_timer_shutdown(void)
+{
+	struct perf_event *event;
+	int cpu;
+
+	get_online_cpus();
+	unregister_cpu_notifier(&nmi_timer_cpu_nb);
+	for_each_possible_cpu(cpu) {
+		event = per_cpu(nmi_timer_events, cpu);
+		if (!event)
+			continue;
+		perf_event_disable(event);
+		per_cpu(nmi_timer_events, cpu) = NULL;
+		perf_event_release_kernel(event);
+	}
+
+	put_online_cpus();
+}
+
+static int nmi_timer_setup(void)
+{
+	int cpu, err;
+	u64 period;
+
+	/* clock cycles per tick: */
+	period = (u64)cpu_khz * 1000;
+	do_div(period, HZ);
+	nmi_timer_attr.sample_period = period;
+
+	get_online_cpus();
+	err = register_cpu_notifier(&nmi_timer_cpu_nb);
+	if (err)
+		goto out;
+	/* can't attach events to offline cpus: */
+	for_each_online_cpu(cpu) {
+		err = nmi_timer_start_cpu(cpu);
+		if (err)
+			break;
+	}
+	if (err)
+		nmi_timer_shutdown();
+out:
+	put_online_cpus();
+	return err;
+}
+
+int __init op_nmi_timer_init(struct oprofile_operations *ops)
+{
+	int err = 0;
+
+	err = nmi_timer_setup();
+	if (err)
+		return err;
+	nmi_timer_shutdown();		/* only check, don't alloc */
+
+	ops->create_files	= NULL;
+	ops->setup		= nmi_timer_setup;
+	ops->shutdown		= nmi_timer_shutdown;
+	ops->start		= nmi_timer_start;
+	ops->stop		= nmi_timer_stop;
+	ops->cpu_type		= "timer";
+
+	printk(KERN_INFO "oprofile: using NMI timer interrupt.\n");
+
+	return 0;
+}
+
+#endif
diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
index f8c752e..ed2c3ec 100644
--- a/drivers/oprofile/oprof.c
+++ b/drivers/oprofile/oprof.c
@@ -246,37 +246,31 @@
 	int err;
 
 	/* always init architecture to setup backtrace support */
+	timer_mode = 0;
 	err = oprofile_arch_init(&oprofile_ops);
+	if (!err) {
+		if (!timer && !oprofilefs_register())
+			return 0;
+		oprofile_arch_exit();
+	}
 
-	timer_mode = err || timer;	/* fall back to timer mode on errors */
-	if (timer_mode) {
-		if (!err)
-			oprofile_arch_exit();
+	/* setup timer mode: */
+	timer_mode = 1;
+	/* no nmi timer mode if oprofile.timer is set */
+	if (timer || op_nmi_timer_init(&oprofile_ops)) {
 		err = oprofile_timer_init(&oprofile_ops);
 		if (err)
 			return err;
 	}
 
-	err = oprofilefs_register();
-	if (!err)
-		return 0;
-
-	/* failed */
-	if (timer_mode)
-		oprofile_timer_exit();
-	else
-		oprofile_arch_exit();
-
-	return err;
+	return oprofilefs_register();
 }
 
 
 static void __exit oprofile_exit(void)
 {
 	oprofilefs_unregister();
-	if (timer_mode)
-		oprofile_timer_exit();
-	else
+	if (!timer_mode)
 		oprofile_arch_exit();
 }
 
diff --git a/drivers/oprofile/oprof.h b/drivers/oprofile/oprof.h
index 177b73d..d32ef81 100644
--- a/drivers/oprofile/oprof.h
+++ b/drivers/oprofile/oprof.h
@@ -35,7 +35,15 @@
 
 void oprofile_create_files(struct super_block *sb, struct dentry *root);
 int oprofile_timer_init(struct oprofile_operations *ops);
-void oprofile_timer_exit(void);
+#ifdef CONFIG_OPROFILE_NMI_TIMER
+int op_nmi_timer_init(struct oprofile_operations *ops);
+#else
+static inline int op_nmi_timer_init(struct oprofile_operations *ops)
+{
+	return -ENODEV;
+}
+#endif
+
 
 int oprofile_set_ulong(unsigned long *addr, unsigned long val);
 int oprofile_set_timeout(unsigned long time);
diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
index 89f6345..84a208d 100644
--- a/drivers/oprofile/oprofile_files.c
+++ b/drivers/oprofile/oprofile_files.c
@@ -45,7 +45,7 @@
 		return -EINVAL;
 
 	retval = oprofilefs_ulong_from_user(&val, buf, count);
-	if (retval)
+	if (retval <= 0)
 		return retval;
 
 	retval = oprofile_set_timeout(val);
@@ -84,7 +84,7 @@
 		return -EINVAL;
 
 	retval = oprofilefs_ulong_from_user(&val, buf, count);
-	if (retval)
+	if (retval <= 0)
 		return retval;
 
 	retval = oprofile_set_ulong(&oprofile_backtrace_depth, val);
@@ -141,9 +141,10 @@
 		return -EINVAL;
 
 	retval = oprofilefs_ulong_from_user(&val, buf, count);
-	if (retval)
+	if (retval <= 0)
 		return retval;
 
+	retval = 0;
 	if (val)
 		retval = oprofile_start();
 	else
diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
index d0de6cc..2f0aa0f 100644
--- a/drivers/oprofile/oprofilefs.c
+++ b/drivers/oprofile/oprofilefs.c
@@ -60,6 +60,13 @@
 }
 
 
+/*
+ * Note: If oprofilefs_ulong_from_user() returns 0, then *val remains
+ * unchanged and might be uninitialized. This follows write syscall
+ * implementation when count is zero: "If count is zero ... [and if]
+ * no errors are detected, 0 will be returned without causing any
+ * other effect." (man 2 write)
+ */
 int oprofilefs_ulong_from_user(unsigned long *val, char const __user *buf, size_t count)
 {
 	char tmpbuf[TMPBUFSIZE];
@@ -79,7 +86,7 @@
 	raw_spin_lock_irqsave(&oprofilefs_lock, flags);
 	*val = simple_strtoul(tmpbuf, NULL, 0);
 	raw_spin_unlock_irqrestore(&oprofilefs_lock, flags);
-	return 0;
+	return count;
 }
 
 
@@ -99,7 +106,7 @@
 		return -EINVAL;
 
 	retval = oprofilefs_ulong_from_user(&value, buf, count);
-	if (retval)
+	if (retval <= 0)
 		return retval;
 
 	retval = oprofile_set_ulong(file->private_data, value);
diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
index 878fba1..93404f7 100644
--- a/drivers/oprofile/timer_int.c
+++ b/drivers/oprofile/timer_int.c
@@ -97,24 +97,24 @@
 	.notifier_call = oprofile_cpu_notify,
 };
 
-int oprofile_timer_init(struct oprofile_operations *ops)
+static int oprofile_hrtimer_setup(void)
 {
-	int rc;
-
-	rc = register_hotcpu_notifier(&oprofile_cpu_notifier);
-	if (rc)
-		return rc;
-	ops->create_files = NULL;
-	ops->setup = NULL;
-	ops->shutdown = NULL;
-	ops->start = oprofile_hrtimer_start;
-	ops->stop = oprofile_hrtimer_stop;
-	ops->cpu_type = "timer";
-	printk(KERN_INFO "oprofile: using timer interrupt.\n");
-	return 0;
+	return register_hotcpu_notifier(&oprofile_cpu_notifier);
 }
 
-void oprofile_timer_exit(void)
+static void oprofile_hrtimer_shutdown(void)
 {
 	unregister_hotcpu_notifier(&oprofile_cpu_notifier);
 }
+
+int oprofile_timer_init(struct oprofile_operations *ops)
+{
+	ops->create_files	= NULL;
+	ops->setup		= oprofile_hrtimer_setup;
+	ops->shutdown		= oprofile_hrtimer_shutdown;
+	ops->start		= oprofile_hrtimer_start;
+	ops->stop		= oprofile_hrtimer_stop;
+	ops->cpu_type		= "timer";
+	printk(KERN_INFO "oprofile: using timer interrupt.\n");
+	return 0;
+}
diff --git a/drivers/parisc/Kconfig b/drivers/parisc/Kconfig
index 553a990..6202649 100644
--- a/drivers/parisc/Kconfig
+++ b/drivers/parisc/Kconfig
@@ -108,13 +108,6 @@
 	depends on IOMMU_SBA || IOMMU_CCIO
 	default y
 
-#config PCI_EPIC
-#	bool "EPIC/SAGA PCI support"
-#	depends on PCI
-#	default y
-#	help
-#	  Say Y here for V-class PCI, DMA/IOMMU, IRQ subsystem support.
-
 source "drivers/pcmcia/Kconfig"
 
 source "drivers/pci/hotplug/Kconfig"
diff --git a/drivers/parport/parport_ax88796.c b/drivers/parport/parport_ax88796.c
index 844f613..7c5d866 100644
--- a/drivers/parport/parport_ax88796.c
+++ b/drivers/parport/parport_ax88796.c
@@ -420,18 +420,7 @@
 	.resume		= parport_ax88796_resume,
 };
 
-static int __init parport_ax88796_init(void)
-{
-	return platform_driver_register(&axdrv);
-}
-
-static void __exit parport_ax88796_exit(void)
-{
-	platform_driver_unregister(&axdrv);
-}
-
-module_init(parport_ax88796_init)
-module_exit(parport_ax88796_exit)
+module_platform_driver(axdrv);
 
 MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
 MODULE_DESCRIPTION("AX88796 Parport parallel port driver");
diff --git a/drivers/parport/parport_mfc3.c b/drivers/parport/parport_mfc3.c
index 362db31..1c0c642 100644
--- a/drivers/parport/parport_mfc3.c
+++ b/drivers/parport/parport_mfc3.c
@@ -397,7 +397,7 @@
 
 
 MODULE_AUTHOR("Joerg Dorchain <joerg@dorchain.net>");
-MODULE_DESCRIPTION("Parport Driver for Multiface 3 expansion cards Paralllel Port");
+MODULE_DESCRIPTION("Parport Driver for Multiface 3 expansion cards Parallel Port");
 MODULE_SUPPORTED_DEVICE("Multiface 3 Parallel Port");
 MODULE_LICENSE("GPL");
 
diff --git a/drivers/parport/parport_sunbpp.c b/drivers/parport/parport_sunbpp.c
index 910c5a2..9390a53 100644
--- a/drivers/parport/parport_sunbpp.c
+++ b/drivers/parport/parport_sunbpp.c
@@ -391,21 +391,10 @@
 	.remove		= __devexit_p(bpp_remove),
 };
 
-static int __init parport_sunbpp_init(void)
-{
-	return platform_driver_register(&bpp_sbus_driver);
-}
-
-static void __exit parport_sunbpp_exit(void)
-{
-	platform_driver_unregister(&bpp_sbus_driver);
-}
+module_platform_driver(bpp_sbus_driver);
 
 MODULE_AUTHOR("Derrick J Brashear");
 MODULE_DESCRIPTION("Parport Driver for Sparc bidirectional Port");
 MODULE_SUPPORTED_DEVICE("Sparc Bidirectional Parallel Port");
 MODULE_VERSION("2.0");
 MODULE_LICENSE("GPL");
-
-module_init(parport_sunbpp_init)
-module_exit(parport_sunbpp_exit)
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index f02b523..37856f7 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -98,11 +98,11 @@
 	  If unsure, say N.
 
 config PCI_IOAPIC
-	bool
+	tristate "PCI IO-APIC hotplug support" if X86
 	depends on PCI
 	depends on ACPI
 	depends on HOTPLUG
-	default y
+	default !X86
 
 config PCI_LABEL
 	def_bool y if (DMI || ACPI)
diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c
index 7ec56fb..b0dd08e 100644
--- a/drivers/pci/ats.c
+++ b/drivers/pci/ats.c
@@ -13,6 +13,7 @@
 #include <linux/export.h>
 #include <linux/pci-ats.h>
 #include <linux/pci.h>
+#include <linux/slab.h>
 
 #include "pci.h"
 
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index fce1c54..9ddf69e 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -132,6 +132,18 @@
 	if (!acpi_pci_check_ejectable(pbus, handle) && !is_dock_device(handle))
 		return AE_OK;
 
+	pdev = pbus->self;
+	if (pdev && pci_is_pcie(pdev)) {
+		tmp = acpi_find_root_bridge_handle(pdev);
+		if (tmp) {
+			struct acpi_pci_root *root = acpi_pci_find_root(tmp);
+
+			if (root && (root->osc_control_set &
+					OSC_PCI_EXPRESS_NATIVE_HP_CONTROL))
+				return AE_OK;
+		}
+	}
+
 	acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
 	device = (adr >> 16) & 0xffff;
 	function = adr & 0xffff;
@@ -213,7 +225,6 @@
 
 	pdev = pci_get_slot(pbus, PCI_DEVFN(device, function));
 	if (pdev) {
-		pdev->current_state = PCI_D0;
 		slot->flags |= (SLOT_ENABLED | SLOT_POWEREDON);
 		pci_dev_put(pdev);
 	}
@@ -459,17 +470,8 @@
 {
 	acpi_status status;
 	unsigned long long tmp;
-	struct acpi_pci_root *root;
 	acpi_handle dummy_handle;
 
-	/*
-	 * We shouldn't use this bridge if PCIe native hotplug control has been
-	 * granted by the BIOS for it.
-	 */
-	root = acpi_pci_find_root(handle);
-	if (root && (root->osc_control_set & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL))
-		return -ENODEV;
-
 	/* if the bridge doesn't have _STA, we assume it is always there */
 	status = acpi_get_handle(handle, "_STA", &dummy_handle);
 	if (ACPI_SUCCESS(status)) {
@@ -1385,19 +1387,11 @@
 static acpi_status
 find_root_bridges(acpi_handle handle, u32 lvl, void *context, void **rv)
 {
-	struct acpi_pci_root *root;
 	int *count = (int *)context;
 
 	if (!acpi_is_root_bridge(handle))
 		return AE_OK;
 
-	root = acpi_pci_find_root(handle);
-	if (!root)
-		return AE_OK;
-
-	if (root->osc_control_set & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL)
-		return AE_OK;
-
 	(*count)++;
 	acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
 				    handle_hotplug_event_bridge, NULL);
diff --git a/drivers/pci/ioapic.c b/drivers/pci/ioapic.c
index 5775638..205af8d 100644
--- a/drivers/pci/ioapic.c
+++ b/drivers/pci/ioapic.c
@@ -17,7 +17,7 @@
  */
 
 #include <linux/pci.h>
-#include <linux/export.h>
+#include <linux/module.h>
 #include <linux/acpi.h>
 #include <linux/slab.h>
 #include <acpi/acpi_bus.h>
@@ -27,7 +27,7 @@
 	u32		gsi_base;
 };
 
-static int ioapic_probe(struct pci_dev *dev, const struct pci_device_id *ent)
+static int __devinit ioapic_probe(struct pci_dev *dev, const struct pci_device_id *ent)
 {
 	acpi_handle handle;
 	acpi_status status;
@@ -88,7 +88,7 @@
 	return -ENODEV;
 }
 
-static void ioapic_remove(struct pci_dev *dev)
+static void __devexit ioapic_remove(struct pci_dev *dev)
 {
 	struct ioapic *ioapic = pci_get_drvdata(dev);
 
@@ -99,13 +99,12 @@
 }
 
 
-static struct pci_device_id ioapic_devices[] = {
-	{ PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
-	  PCI_CLASS_SYSTEM_PIC_IOAPIC << 8, 0xffff00, },
-	{ PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
-	  PCI_CLASS_SYSTEM_PIC_IOXAPIC << 8, 0xffff00, },
+static DEFINE_PCI_DEVICE_TABLE(ioapic_devices) = {
+	{ PCI_DEVICE_CLASS(PCI_CLASS_SYSTEM_PIC_IOAPIC, ~0) },
+	{ PCI_DEVICE_CLASS(PCI_CLASS_SYSTEM_PIC_IOXAPIC, ~0) },
 	{ }
 };
+MODULE_DEVICE_TABLE(pci, ioapic_devices);
 
 static struct pci_driver ioapic_driver = {
 	.name		= "ioapic",
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index b82c155..1969a3e 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -283,6 +283,7 @@
 	struct resource *res;
 	struct pci_dev *pdev;
 	struct pci_sriov *iov = dev->sriov;
+	int bars = 0;
 
 	if (!nr_virtfn)
 		return 0;
@@ -307,6 +308,7 @@
 
 	nres = 0;
 	for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
+		bars |= (1 << (i + PCI_IOV_RESOURCES));
 		res = dev->resource + PCI_IOV_RESOURCES + i;
 		if (res->parent)
 			nres++;
@@ -324,6 +326,11 @@
 		return -ENOMEM;
 	}
 
+	if (pci_enable_resources(dev, bars)) {
+		dev_err(&dev->dev, "SR-IOV: IOV BARS not allocated\n");
+		return -ENOMEM;
+	}
+
 	if (iov->link != dev->devfn) {
 		pdev = pci_get_slot(dev->bus, iov->link);
 		if (!pdev)
diff --git a/drivers/pci/pci-label.c b/drivers/pci/pci-label.c
index 81525ae..edaed6f 100644
--- a/drivers/pci/pci-label.c
+++ b/drivers/pci/pci-label.c
@@ -89,7 +89,7 @@
 	return 0;
 }
 
-static mode_t
+static umode_t
 smbios_instance_string_exist(struct kobject *kobj, struct attribute *attr,
 			     int n)
 {
@@ -275,7 +275,7 @@
 	return FALSE;
 }
 
-static mode_t
+static umode_t
 acpi_index_string_exist(struct kobject *kobj, struct attribute *attr, int n)
 {
 	struct device *dev;
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 6f45a73..6d4a531 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -664,6 +664,9 @@
 		error = platform_pci_set_power_state(dev, state);
 		if (!error)
 			pci_update_current_state(dev, state);
+		/* Fall back to PCI_D0 if native PM is not supported */
+		if (!dev->pm_cap)
+			dev->current_state = PCI_D0;
 	} else {
 		error = -ENODEV;
 		/* Fall back to PCI_D0 if native PM is not supported */
@@ -1126,7 +1129,11 @@
 	if (atomic_add_return(1, &dev->enable_cnt) > 1)
 		return 0;		/* already enabled */
 
-	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
+	/* only skip sriov related */
+	for (i = 0; i <= PCI_ROM_RESOURCE; i++)
+		if (dev->resource[i].flags & flags)
+			bars |= (1 << i);
+	for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
 		if (dev->resource[i].flags & flags)
 			bars |= (1 << i);
 
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index edaccad9..b7944f9 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -1477,7 +1477,7 @@
 	NULL
 };
 
-static mode_t asus_sysfs_is_visible(struct kobject *kobj,
+static umode_t asus_sysfs_is_visible(struct kobject *kobj,
 				    struct attribute *attr,
 				    int idx)
 {
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index d1049ee..72d731c 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -992,7 +992,7 @@
 	NULL
 };
 
-static mode_t asus_hwmon_sysfs_is_visible(struct kobject *kobj,
+static umode_t asus_hwmon_sysfs_is_visible(struct kobject *kobj,
 					  struct attribute *attr, int idx)
 {
 	struct device *dev = container_of(kobj, struct device, kobj);
@@ -1357,7 +1357,7 @@
 	NULL
 };
 
-static mode_t asus_sysfs_is_visible(struct kobject *kobj,
+static umode_t asus_sysfs_is_visible(struct kobject *kobj,
 				    struct attribute *attr, int idx)
 {
 	struct device *dev = container_of(kobj, struct device, kobj);
diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
index d9312b3..6f966d6 100644
--- a/drivers/platform/x86/asus_acpi.c
+++ b/drivers/platform/x86/asus_acpi.c
@@ -1053,7 +1053,7 @@
 };
 
 static int
-asus_proc_add(char *name, const struct file_operations *proc_fops, mode_t mode,
+asus_proc_add(char *name, const struct file_operations *proc_fops, umode_t mode,
 		     struct acpi_device *device)
 {
 	struct proc_dir_entry *proc;
@@ -1072,7 +1072,7 @@
 static int asus_hotk_add_fs(struct acpi_device *device)
 {
 	struct proc_dir_entry *proc;
-	mode_t mode;
+	umode_t mode;
 
 	if ((asus_uid == 0) && (asus_gid == 0)) {
 		mode = S_IFREG | S_IRUGO | S_IWUSR | S_IWGRP;
diff --git a/drivers/platform/x86/ibm_rtl.c b/drivers/platform/x86/ibm_rtl.c
index 811d436..42a7d60 100644
--- a/drivers/platform/x86/ibm_rtl.c
+++ b/drivers/platform/x86/ibm_rtl.c
@@ -28,7 +28,6 @@
 #include <linux/delay.h>
 #include <linux/module.h>
 #include <linux/io.h>
-#include <linux/sysdev.h>
 #include <linux/dmi.h>
 #include <linux/efi.h>
 #include <linux/mutex.h>
@@ -165,22 +164,22 @@
 	return ret;
 }
 
-static ssize_t rtl_show_version(struct sysdev_class * dev,
-                                struct sysdev_class_attribute *attr,
+static ssize_t rtl_show_version(struct device *dev,
+                                struct device_attribute *attr,
                                 char *buf)
 {
 	return sprintf(buf, "%d\n", (int)ioread8(&rtl_table->version));
 }
 
-static ssize_t rtl_show_state(struct sysdev_class *dev,
-                              struct sysdev_class_attribute *attr,
+static ssize_t rtl_show_state(struct device *dev,
+                              struct device_attribute *attr,
                               char *buf)
 {
 	return sprintf(buf, "%d\n", ioread8(&rtl_table->rt_status));
 }
 
-static ssize_t rtl_set_state(struct sysdev_class *dev,
-                             struct sysdev_class_attribute *attr,
+static ssize_t rtl_set_state(struct device *dev,
+                             struct device_attribute *attr,
                              const char *buf,
                              size_t count)
 {
@@ -205,27 +204,28 @@
 	return ret;
 }
 
-static struct sysdev_class class_rtl = {
+static struct bus_type rtl_subsys = {
 	.name = "ibm_rtl",
+	.dev_name = "ibm_rtl",
 };
 
-static SYSDEV_CLASS_ATTR(version, S_IRUGO, rtl_show_version, NULL);
-static SYSDEV_CLASS_ATTR(state, 0600, rtl_show_state, rtl_set_state);
+static DEVICE_ATTR(version, S_IRUGO, rtl_show_version, NULL);
+static DEVICE_ATTR(state, 0600, rtl_show_state, rtl_set_state);
 
-static struct sysdev_class_attribute *rtl_attributes[] = {
-	&attr_version,
-	&attr_state,
+static struct device_attribute *rtl_attributes[] = {
+	&dev_attr_version,
+	&dev_attr_state,
 	NULL
 };
 
 
 static int rtl_setup_sysfs(void) {
 	int ret, i;
-	ret = sysdev_class_register(&class_rtl);
 
+	ret = subsys_system_register(&rtl_subsys, NULL);
 	if (!ret) {
 		for (i = 0; rtl_attributes[i]; i ++)
-			sysdev_class_create_file(&class_rtl, rtl_attributes[i]);
+			device_create_file(rtl_subsys.dev_root, rtl_attributes[i]);
 	}
 	return ret;
 }
@@ -233,8 +233,8 @@
 static void rtl_teardown_sysfs(void) {
 	int i;
 	for (i = 0; rtl_attributes[i]; i ++)
-		sysdev_class_remove_file(&class_rtl, rtl_attributes[i]);
-	sysdev_class_unregister(&class_rtl);
+		device_remove_file(rtl_subsys.dev_root, rtl_attributes[i]);
+	bus_unregister(&rtl_subsys);
 }
 
 
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index a36addf..ac902f7 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -368,7 +368,7 @@
 	NULL
 };
 
-static mode_t ideapad_is_visible(struct kobject *kobj,
+static umode_t ideapad_is_visible(struct kobject *kobj,
 				 struct attribute *attr,
 				 int idx)
 {
diff --git a/drivers/platform/x86/intel_menlow.c b/drivers/platform/x86/intel_menlow.c
index abddc83..3271ac8 100644
--- a/drivers/platform/x86/intel_menlow.c
+++ b/drivers/platform/x86/intel_menlow.c
@@ -389,7 +389,7 @@
 	return sprintf(buf, "%s\n", bios_enabled ? "enabled" : "disabled");
 }
 
-static int intel_menlow_add_one_attribute(char *name, int mode, void *show,
+static int intel_menlow_add_one_attribute(char *name, umode_t mode, void *show,
 					  void *store, struct device *dev,
 					  acpi_handle handle)
 {
diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
index 48870e5..f00d0d1 100644
--- a/drivers/platform/x86/intel_scu_ipc.c
+++ b/drivers/platform/x86/intel_scu_ipc.c
@@ -19,7 +19,7 @@
 #include <linux/delay.h>
 #include <linux/errno.h>
 #include <linux/init.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/pm.h>
 #include <linux/pci.h>
 #include <linux/interrupt.h>
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 7b82868..62533c1 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -297,7 +297,7 @@
 	char param[32];
 
 	int (*init) (struct ibm_init_struct *);
-	mode_t base_procfs_mode;
+	umode_t base_procfs_mode;
 	struct ibm_struct *data;
 };
 
@@ -2456,8 +2456,9 @@
 	u32 poll_mask, event_mask;
 	unsigned int si, so;
 	unsigned long t;
-	unsigned int change_detector, must_reset;
+	unsigned int change_detector;
 	unsigned int poll_freq;
+	bool was_frozen;
 
 	mutex_lock(&hotkey_thread_mutex);
 
@@ -2488,14 +2489,14 @@
 				t = 100;	/* should never happen... */
 		}
 		t = msleep_interruptible(t);
-		if (unlikely(kthread_should_stop()))
+		if (unlikely(kthread_freezable_should_stop(&was_frozen)))
 			break;
-		must_reset = try_to_freeze();
-		if (t > 0 && !must_reset)
+
+		if (t > 0 && !was_frozen)
 			continue;
 
 		mutex_lock(&hotkey_thread_data_mutex);
-		if (must_reset || hotkey_config_change != change_detector) {
+		if (was_frozen || hotkey_config_change != change_detector) {
 			/* forget old state on thaw or config change */
 			si = so;
 			t = 0;
@@ -2528,10 +2529,6 @@
 static void hotkey_poll_stop_sync(void)
 {
 	if (tpacpi_hotkey_task) {
-		if (frozen(tpacpi_hotkey_task) ||
-		    freezing(tpacpi_hotkey_task))
-			thaw_process(tpacpi_hotkey_task);
-
 		kthread_stop(tpacpi_hotkey_task);
 		tpacpi_hotkey_task = NULL;
 		mutex_lock(&hotkey_thread_mutex);
@@ -8542,7 +8539,7 @@
 		"%s installed\n", ibm->name);
 
 	if (ibm->read) {
-		mode_t mode = iibm->base_procfs_mode;
+		umode_t mode = iibm->base_procfs_mode;
 
 		if (!mode)
 			mode = S_IRUGO;
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
index e15d4c9..e95cd65 100644
--- a/drivers/power/power_supply_sysfs.c
+++ b/drivers/power/power_supply_sysfs.c
@@ -176,13 +176,13 @@
 static struct attribute *
 __power_supply_attrs[ARRAY_SIZE(power_supply_attrs) + 1];
 
-static mode_t power_supply_attr_is_visible(struct kobject *kobj,
+static umode_t power_supply_attr_is_visible(struct kobject *kobj,
 					   struct attribute *attr,
 					   int attrno)
 {
 	struct device *dev = container_of(kobj, struct device, kobj);
 	struct power_supply *psy = dev_get_drvdata(dev);
-	mode_t mode = S_IRUSR | S_IRGRP | S_IROTH;
+	umode_t mode = S_IRUSR | S_IRGRP | S_IROTH;
 	int i;
 
 	if (attrno == POWER_SUPPLY_PROP_TYPE)
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index fa4d9f3..8e28625 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -319,20 +319,6 @@
 }
 EXPORT_SYMBOL_GPL(rtc_read_alarm);
 
-static int ___rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
-{
-	int err;
-
-	if (!rtc->ops)
-		err = -ENODEV;
-	else if (!rtc->ops->set_alarm)
-		err = -EINVAL;
-	else
-		err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
-
-	return err;
-}
-
 static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
 {
 	struct rtc_time tm;
@@ -356,7 +342,14 @@
 	 * over right here, before we set the alarm.
 	 */
 
-	return ___rtc_set_alarm(rtc, alarm);
+	if (!rtc->ops)
+		err = -ENODEV;
+	else if (!rtc->ops->set_alarm)
+		err = -EINVAL;
+	else
+		err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
+
+	return err;
 }
 
 int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
@@ -770,20 +763,6 @@
 	return 0;
 }
 
-static void rtc_alarm_disable(struct rtc_device *rtc)
-{
-	struct rtc_wkalrm alarm;
-	struct rtc_time tm;
-
-	__rtc_read_time(rtc, &tm);
-
-	alarm.time = rtc_ktime_to_tm(ktime_add(rtc_tm_to_ktime(tm),
-				     ktime_set(300, 0)));
-	alarm.enabled = 0;
-
-	___rtc_set_alarm(rtc, &alarm);
-}
-
 /**
  * rtc_timer_remove - Removes a rtc_timer from the rtc_device timerqueue
  * @rtc rtc device
@@ -805,10 +784,8 @@
 		struct rtc_wkalrm alarm;
 		int err;
 		next = timerqueue_getnext(&rtc->timerqueue);
-		if (!next) {
-			rtc_alarm_disable(rtc);
+		if (!next)
 			return;
-		}
 		alarm.time = rtc_ktime_to_tm(next->expires);
 		alarm.enabled = 1;
 		err = __rtc_set_alarm(rtc, &alarm);
@@ -870,8 +847,7 @@
 		err = __rtc_set_alarm(rtc, &alarm);
 		if (err == -ETIME)
 			goto again;
-	} else
-		rtc_alarm_disable(rtc);
+	}
 
 	mutex_unlock(&rtc->ops_lock);
 }
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index eda128f..64aedd8 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -357,10 +357,19 @@
 static struct rtc_class_ops m41t80_rtc_ops = {
 	.read_time = m41t80_rtc_read_time,
 	.set_time = m41t80_rtc_set_time,
+	/*
+	 * XXX - m41t80 alarm functionality is reported broken.
+	 * until it is fixed, don't register alarm functions.
+	 *
 	.read_alarm = m41t80_rtc_read_alarm,
 	.set_alarm = m41t80_rtc_set_alarm,
+	*/
 	.proc = m41t80_rtc_proc,
+	/*
+	 * See above comment on broken alarm
+	 *
 	.alarm_irq_enable = m41t80_rtc_alarm_irq_enable,
+	*/
 };
 
 #if defined(CONFIG_RTC_INTF_SYSFS) || defined(CONFIG_RTC_INTF_SYSFS_MODULE)
diff --git a/drivers/rtc/rtc-pl030.c b/drivers/rtc/rtc-pl030.c
index 1d28d44..02111fe 100644
--- a/drivers/rtc/rtc-pl030.c
+++ b/drivers/rtc/rtc-pl030.c
@@ -174,6 +174,8 @@
 	{ 0, 0 },
 };
 
+MODULE_DEVICE_TABLE(amba, pl030_ids);
+
 static struct amba_driver pl030_driver = {
 	.drv		= {
 		.name	= "rtc-pl030",
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index ff1b84b..a952c8d 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -420,6 +420,8 @@
 	{0, 0},
 };
 
+MODULE_DEVICE_TABLE(amba, pl031_ids);
+
 static struct amba_driver pl031_driver = {
 	.drv = {
 		.name = "rtc-pl031",
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 65894f0..eef27a1 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -17,7 +17,6 @@
 #include <linux/ctype.h>
 #include <linux/major.h>
 #include <linux/slab.h>
-#include <linux/buffer_head.h>
 #include <linux/hdreg.h>
 #include <linux/async.h>
 #include <linux/mutex.h>
@@ -1073,7 +1072,7 @@
 static void dasd_profile_init(struct dasd_profile *profile,
 			      struct dentry *base_dentry)
 {
-	mode_t mode;
+	umode_t mode;
 	struct dentry *pde;
 
 	if (!base_dentry)
@@ -1112,7 +1111,7 @@
 
 static void dasd_statistics_createroot(void)
 {
-	mode_t mode;
+	umode_t mode;
 	struct dentry *pde;
 
 	dasd_debugfs_root_entry = NULL;
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index 87a0cf1..0326571 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -1718,7 +1718,7 @@
 	erp->startdev = device;
 	erp->memdev = device;
 	erp->magic = default_erp->magic;
-	erp->expires = 0;
+	erp->expires = default_erp->expires;
 	erp->retries = 256;
 	erp->buildclk = get_clock();
 	erp->status = DASD_CQR_FILLED;
@@ -2363,7 +2363,7 @@
 	erp->memdev   = device;
 	erp->block    = cqr->block;
 	erp->magic    = cqr->magic;
-	erp->expires  = 0;
+	erp->expires  = cqr->expires;
 	erp->retries  = 256;
 	erp->buildclk = get_clock();
 	erp->status = DASD_CQR_FILLED;
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index c388eda..553b3c5 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -705,6 +705,16 @@
 	if (lcu->pav == NO_PAV ||
 	    lcu->flags & (NEED_UAC_UPDATE | UPDATE_PENDING))
 		return NULL;
+	if (unlikely(!(private->features.feature[8] & 0x01))) {
+		/*
+		 * PAV enabled but prefix not, very unlikely
+		 * seems to be a lost pathgroup
+		 * use base device to do IO
+		 */
+		DBF_DEV_EVENT(DBF_ERR, base_device, "%s",
+			      "Prefix not enabled with PAV enabled\n");
+		return NULL;
+	}
 
 	spin_lock_irqsave(&lcu->lock, flags);
 	alias_device = group->next;
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 6ab2968..bbcd5e9 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -752,24 +752,13 @@
 		return sizes_trk0[recid];
 	return LABEL_SIZE;
 }
-
-/*
- * Generate device unique id that specifies the physical device.
- */
-static int dasd_eckd_generate_uid(struct dasd_device *device)
+/* create unique id from private structure. */
+static void create_uid(struct dasd_eckd_private *private)
 {
-	struct dasd_eckd_private *private;
-	struct dasd_uid *uid;
 	int count;
-	unsigned long flags;
+	struct dasd_uid *uid;
 
-	private = (struct dasd_eckd_private *) device->private;
-	if (!private)
-		return -ENODEV;
-	if (!private->ned || !private->gneq)
-		return -ENODEV;
 	uid = &private->uid;
-	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
 	memset(uid, 0, sizeof(struct dasd_uid));
 	memcpy(uid->vendor, private->ned->HDA_manufacturer,
 	       sizeof(uid->vendor) - 1);
@@ -792,6 +781,23 @@
 				private->vdsneq->uit[count]);
 		}
 	}
+}
+
+/*
+ * Generate device unique id that specifies the physical device.
+ */
+static int dasd_eckd_generate_uid(struct dasd_device *device)
+{
+	struct dasd_eckd_private *private;
+	unsigned long flags;
+
+	private = (struct dasd_eckd_private *) device->private;
+	if (!private)
+		return -ENODEV;
+	if (!private->ned || !private->gneq)
+		return -ENODEV;
+	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+	create_uid(private);
 	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
 	return 0;
 }
@@ -811,6 +817,21 @@
 	return -EINVAL;
 }
 
+/*
+ * compare device UID with data of a given dasd_eckd_private structure
+ * return 0 for match
+ */
+static int dasd_eckd_compare_path_uid(struct dasd_device *device,
+				      struct dasd_eckd_private *private)
+{
+	struct dasd_uid device_uid;
+
+	create_uid(private);
+	dasd_eckd_get_uid(device, &device_uid);
+
+	return memcmp(&device_uid, &private->uid, sizeof(struct dasd_uid));
+}
+
 static void dasd_eckd_fill_rcd_cqr(struct dasd_device *device,
 				   struct dasd_ccw_req *cqr,
 				   __u8 *rcd_buffer,
@@ -1005,59 +1026,120 @@
 	int conf_len, conf_data_saved;
 	int rc;
 	__u8 lpm, opm;
-	struct dasd_eckd_private *private;
+	struct dasd_eckd_private *private, path_private;
 	struct dasd_path *path_data;
+	struct dasd_uid *uid;
+	char print_path_uid[60], print_device_uid[60];
 
 	private = (struct dasd_eckd_private *) device->private;
 	path_data = &device->path_data;
 	opm = ccw_device_get_path_mask(device->cdev);
-	lpm = 0x80;
 	conf_data_saved = 0;
 	/* get configuration data per operational path */
 	for (lpm = 0x80; lpm; lpm>>= 1) {
-		if (lpm & opm) {
-			rc = dasd_eckd_read_conf_lpm(device, &conf_data,
-						     &conf_len, lpm);
-			if (rc && rc != -EOPNOTSUPP) {	/* -EOPNOTSUPP is ok */
-				DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
-					  "Read configuration data returned "
-					  "error %d", rc);
-				return rc;
-			}
-			if (conf_data == NULL) {
-				DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
-						"No configuration data "
-						"retrieved");
-				/* no further analysis possible */
-				path_data->opm |= lpm;
-				continue;	/* no error */
-			}
-			/* save first valid configuration data */
-			if (!conf_data_saved) {
-				kfree(private->conf_data);
-				private->conf_data = conf_data;
-				private->conf_len = conf_len;
-				if (dasd_eckd_identify_conf_parts(private)) {
-					private->conf_data = NULL;
-					private->conf_len = 0;
-					kfree(conf_data);
-					continue;
-				}
-				conf_data_saved++;
-			}
-			switch (dasd_eckd_path_access(conf_data, conf_len)) {
-			case 0x02:
-				path_data->npm |= lpm;
-				break;
-			case 0x03:
-				path_data->ppm |= lpm;
-				break;
-			}
-			path_data->opm |= lpm;
-			if (conf_data != private->conf_data)
-				kfree(conf_data);
+		if (!(lpm & opm))
+			continue;
+		rc = dasd_eckd_read_conf_lpm(device, &conf_data,
+					     &conf_len, lpm);
+		if (rc && rc != -EOPNOTSUPP) {	/* -EOPNOTSUPP is ok */
+			DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
+					"Read configuration data returned "
+					"error %d", rc);
+			return rc;
 		}
+		if (conf_data == NULL) {
+			DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+					"No configuration data "
+					"retrieved");
+			/* no further analysis possible */
+			path_data->opm |= lpm;
+			continue;	/* no error */
+		}
+		/* save first valid configuration data */
+		if (!conf_data_saved) {
+			kfree(private->conf_data);
+			private->conf_data = conf_data;
+			private->conf_len = conf_len;
+			if (dasd_eckd_identify_conf_parts(private)) {
+				private->conf_data = NULL;
+				private->conf_len = 0;
+				kfree(conf_data);
+				continue;
+			}
+			/*
+			 * build device UID that other path data
+			 * can be compared to it
+			 */
+			dasd_eckd_generate_uid(device);
+			conf_data_saved++;
+		} else {
+			path_private.conf_data = conf_data;
+			path_private.conf_len = DASD_ECKD_RCD_DATA_SIZE;
+			if (dasd_eckd_identify_conf_parts(
+				    &path_private)) {
+				path_private.conf_data = NULL;
+				path_private.conf_len = 0;
+				kfree(conf_data);
+				continue;
+			}
+
+			if (dasd_eckd_compare_path_uid(
+				    device, &path_private)) {
+				uid = &path_private.uid;
+				if (strlen(uid->vduit) > 0)
+					snprintf(print_path_uid,
+						 sizeof(print_path_uid),
+						 "%s.%s.%04x.%02x.%s",
+						 uid->vendor, uid->serial,
+						 uid->ssid, uid->real_unit_addr,
+						 uid->vduit);
+				else
+					snprintf(print_path_uid,
+						 sizeof(print_path_uid),
+						 "%s.%s.%04x.%02x",
+						 uid->vendor, uid->serial,
+						 uid->ssid,
+						 uid->real_unit_addr);
+				uid = &private->uid;
+				if (strlen(uid->vduit) > 0)
+					snprintf(print_device_uid,
+						 sizeof(print_device_uid),
+						 "%s.%s.%04x.%02x.%s",
+						 uid->vendor, uid->serial,
+						 uid->ssid, uid->real_unit_addr,
+						 uid->vduit);
+				else
+					snprintf(print_device_uid,
+						 sizeof(print_device_uid),
+						 "%s.%s.%04x.%02x",
+						 uid->vendor, uid->serial,
+						 uid->ssid,
+						 uid->real_unit_addr);
+				dev_err(&device->cdev->dev,
+					"Not all channel paths lead to "
+					"the same device, path %02X leads to "
+					"device %s instead of %s\n", lpm,
+					print_path_uid, print_device_uid);
+				return -EINVAL;
+			}
+
+			path_private.conf_data = NULL;
+			path_private.conf_len = 0;
+		}
+		switch (dasd_eckd_path_access(conf_data, conf_len)) {
+		case 0x02:
+			path_data->npm |= lpm;
+			break;
+		case 0x03:
+			path_data->ppm |= lpm;
+			break;
+		}
+		path_data->opm |= lpm;
+
+		if (conf_data != private->conf_data)
+			kfree(conf_data);
 	}
+
 	return 0;
 }
 
@@ -1090,12 +1172,61 @@
 	return 0;
 }
 
+static int rebuild_device_uid(struct dasd_device *device,
+			      struct path_verification_work_data *data)
+{
+	struct dasd_eckd_private *private;
+	struct dasd_path *path_data;
+	__u8 lpm, opm;
+	int rc;
+
+	rc = -ENODEV;
+	private = (struct dasd_eckd_private *) device->private;
+	path_data = &device->path_data;
+	opm = device->path_data.opm;
+
+	for (lpm = 0x80; lpm; lpm >>= 1) {
+		if (!(lpm & opm))
+			continue;
+		memset(&data->rcd_buffer, 0, sizeof(data->rcd_buffer));
+		memset(&data->cqr, 0, sizeof(data->cqr));
+		data->cqr.cpaddr = &data->ccw;
+		rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
+						     data->rcd_buffer,
+						     lpm);
+
+		if (rc) {
+			if (rc == -EOPNOTSUPP) /* -EOPNOTSUPP is ok */
+				continue;
+			DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
+					"Read configuration data "
+					"returned error %d", rc);
+			break;
+		}
+		memcpy(private->conf_data, data->rcd_buffer,
+		       DASD_ECKD_RCD_DATA_SIZE);
+		if (dasd_eckd_identify_conf_parts(private)) {
+			rc = -ENODEV;
+		} else /* first valid path is enough */
+			break;
+	}
+
+	if (!rc)
+		rc = dasd_eckd_generate_uid(device);
+
+	return rc;
+}
+
 static void do_path_verification_work(struct work_struct *work)
 {
 	struct path_verification_work_data *data;
 	struct dasd_device *device;
+	struct dasd_eckd_private path_private;
+	struct dasd_uid *uid;
+	__u8 path_rcd_buf[DASD_ECKD_RCD_DATA_SIZE];
 	__u8 lpm, opm, npm, ppm, epm;
 	unsigned long flags;
+	char print_uid[60];
 	int rc;
 
 	data = container_of(work, struct path_verification_work_data, worker);
@@ -1112,64 +1243,129 @@
 	ppm = 0;
 	epm = 0;
 	for (lpm = 0x80; lpm; lpm >>= 1) {
-		if (lpm & data->tbvpm) {
-			memset(data->rcd_buffer, 0, sizeof(data->rcd_buffer));
-			memset(&data->cqr, 0, sizeof(data->cqr));
-			data->cqr.cpaddr = &data->ccw;
-			rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
-							     data->rcd_buffer,
-							     lpm);
-			if (!rc) {
-				switch (dasd_eckd_path_access(data->rcd_buffer,
-						     DASD_ECKD_RCD_DATA_SIZE)) {
-				case 0x02:
-					npm |= lpm;
-					break;
-				case 0x03:
-					ppm |= lpm;
-					break;
-				}
-				opm |= lpm;
-			} else if (rc == -EOPNOTSUPP) {
-				DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
-				       "path verification: No configuration "
-				       "data retrieved");
-				opm |= lpm;
-			} else if (rc == -EAGAIN) {
-				DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+		if (!(lpm & data->tbvpm))
+			continue;
+		memset(&data->rcd_buffer, 0, sizeof(data->rcd_buffer));
+		memset(&data->cqr, 0, sizeof(data->cqr));
+		data->cqr.cpaddr = &data->ccw;
+		rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
+						     data->rcd_buffer,
+						     lpm);
+		if (!rc) {
+			switch (dasd_eckd_path_access(data->rcd_buffer,
+						      DASD_ECKD_RCD_DATA_SIZE)
+				) {
+			case 0x02:
+				npm |= lpm;
+				break;
+			case 0x03:
+				ppm |= lpm;
+				break;
+			}
+			opm |= lpm;
+		} else if (rc == -EOPNOTSUPP) {
+			DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+					"path verification: No configuration "
+					"data retrieved");
+			opm |= lpm;
+		} else if (rc == -EAGAIN) {
+			DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
 					"path verification: device is stopped,"
 					" try again later");
-				epm |= lpm;
-			} else {
-				dev_warn(&device->cdev->dev,
-					 "Reading device feature codes failed "
-					 "(rc=%d) for new path %x\n", rc, lpm);
-				continue;
-			}
-			if (verify_fcx_max_data(device, lpm)) {
+			epm |= lpm;
+		} else {
+			dev_warn(&device->cdev->dev,
+				 "Reading device feature codes failed "
+				 "(rc=%d) for new path %x\n", rc, lpm);
+			continue;
+		}
+		if (verify_fcx_max_data(device, lpm)) {
+			opm &= ~lpm;
+			npm &= ~lpm;
+			ppm &= ~lpm;
+			continue;
+		}
+
+		/*
+		 * save conf_data for comparison after
+		 * rebuild_device_uid may have changed
+		 * the original data
+		 */
+		memcpy(&path_rcd_buf, data->rcd_buffer,
+		       DASD_ECKD_RCD_DATA_SIZE);
+		path_private.conf_data = (void *) &path_rcd_buf;
+		path_private.conf_len = DASD_ECKD_RCD_DATA_SIZE;
+		if (dasd_eckd_identify_conf_parts(&path_private)) {
+			path_private.conf_data = NULL;
+			path_private.conf_len = 0;
+			continue;
+		}
+
+		/*
+		 * compare path UID with device UID only if at least
+		 * one valid path is left
+		 * in other case the device UID may have changed and
+		 * the first working path UID will be used as device UID
+		 */
+		if (device->path_data.opm &&
+		    dasd_eckd_compare_path_uid(device, &path_private)) {
+			/*
+			 * the comparison was not successful
+			 * rebuild the device UID with at least one
+			 * known path in case a z/VM hyperswap command
+			 * has changed the device
+			 *
+			 * after this compare again
+			 *
+			 * if either the rebuild or the recompare fails
+			 * the path can not be used
+			 */
+			if (rebuild_device_uid(device, data) ||
+			    dasd_eckd_compare_path_uid(
+				    device, &path_private)) {
+				uid = &path_private.uid;
+				if (strlen(uid->vduit) > 0)
+					snprintf(print_uid, sizeof(print_uid),
+						 "%s.%s.%04x.%02x.%s",
+						 uid->vendor, uid->serial,
+						 uid->ssid, uid->real_unit_addr,
+						 uid->vduit);
+				else
+					snprintf(print_uid, sizeof(print_uid),
+						 "%s.%s.%04x.%02x",
+						 uid->vendor, uid->serial,
+						 uid->ssid,
+						 uid->real_unit_addr);
+				dev_err(&device->cdev->dev,
+					"The newly added channel path %02X "
+					"will not be used because it leads "
+					"to a different device %s\n",
+					lpm, print_uid);
 				opm &= ~lpm;
 				npm &= ~lpm;
 				ppm &= ~lpm;
+				continue;
 			}
 		}
+
+		/*
+		 * There is a small chance that a path is lost again between
+		 * above path verification and the following modification of
+		 * the device opm mask. We could avoid that race here by using
+		 * yet another path mask, but we rather deal with this unlikely
+		 * situation in dasd_start_IO.
+		 */
+		spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+		if (!device->path_data.opm && opm) {
+			device->path_data.opm = opm;
+			dasd_generic_path_operational(device);
+		} else
+			device->path_data.opm |= opm;
+		device->path_data.npm |= npm;
+		device->path_data.ppm |= ppm;
+		device->path_data.tbvpm |= epm;
+		spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
 	}
-	/*
-	 * There is a small chance that a path is lost again between
-	 * above path verification and the following modification of
-	 * the device opm mask. We could avoid that race here by using
-	 * yet another path mask, but we rather deal with this unlikely
-	 * situation in dasd_start_IO.
-	 */
-	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
-	if (!device->path_data.opm && opm) {
-		device->path_data.opm = opm;
-		dasd_generic_path_operational(device);
-	} else
-		device->path_data.opm |= opm;
-	device->path_data.npm |= npm;
-	device->path_data.ppm |= ppm;
-	device->path_data.tbvpm |= epm;
-	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
 
 	dasd_put_device(device);
 	if (data->isglobal)
@@ -1441,11 +1637,6 @@
 			device->default_expires = value;
 	}
 
-	/* Generate device unique id */
-	rc = dasd_eckd_generate_uid(device);
-	if (rc)
-		goto out_err1;
-
 	dasd_eckd_get_uid(device, &temp_uid);
 	if (temp_uid.type == UA_BASE_DEVICE) {
 		block = dasd_alloc_block();
@@ -2206,7 +2397,7 @@
 					   sizeof(struct PFX_eckd_data));
 	} else {
 		if (define_extent(ccw++, cqr->data, first_trk,
-				  last_trk, cmd, startdev) == -EAGAIN) {
+				  last_trk, cmd, basedev) == -EAGAIN) {
 			/* Clock not in sync and XRC is enabled.
 			 * Try again later.
 			 */
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index 98f3e4a..690c333 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -36,7 +36,7 @@
 #include <linux/blkdev.h>
 #include <linux/blkpg.h>
 #include <linux/hdreg.h>  /* HDIO_GETGEO */
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/bio.h>
 #include <linux/suspend.h>
 #include <linux/platform_device.h>
diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c
index 95b909a..3c03c10 100644
--- a/drivers/s390/char/sclp_config.c
+++ b/drivers/s390/char/sclp_config.c
@@ -11,7 +11,7 @@
 #include <linux/init.h>
 #include <linux/errno.h>
 #include <linux/cpu.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/workqueue.h>
 #include <asm/smp.h>
 
@@ -31,14 +31,14 @@
 static void sclp_cpu_capability_notify(struct work_struct *work)
 {
 	int cpu;
-	struct sys_device *sysdev;
+	struct device *dev;
 
 	s390_adjust_jiffies();
 	pr_warning("cpu capability changed.\n");
 	get_online_cpus();
 	for_each_online_cpu(cpu) {
-		sysdev = get_cpu_sysdev(cpu);
-		kobject_uevent(&sysdev->kobj, KOBJ_CHANGE);
+		dev = get_cpu_device(cpu);
+		kobject_uevent(&dev->kobj, KOBJ_CHANGE);
 	}
 	put_online_cpus();
 }
diff --git a/drivers/s390/char/tape_class.h b/drivers/s390/char/tape_class.h
index 9e32780..ba2092f 100644
--- a/drivers/s390/char/tape_class.h
+++ b/drivers/s390/char/tape_class.h
@@ -14,7 +14,6 @@
 #include <linux/module.h>
 #include <linux/fs.h>
 #include <linux/major.h>
-#include <linux/kobj_map.h>
 #include <linux/cdev.h>
 
 #include <linux/device.h>
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 2acc01f..452989a 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -22,12 +22,9 @@
 static struct kmem_cache *qdio_q_cache;
 static struct kmem_cache *qdio_aob_cache;
 
-struct qaob *qdio_allocate_aob()
+struct qaob *qdio_allocate_aob(void)
 {
-	struct qaob *aob;
-
-	aob = kmem_cache_zalloc(qdio_aob_cache, GFP_ATOMIC);
-	return aob;
+	return kmem_cache_zalloc(qdio_aob_cache, GFP_ATOMIC);
 }
 EXPORT_SYMBOL_GPL(qdio_allocate_aob);
 
@@ -180,7 +177,8 @@
 		setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i);
 
 		q->is_input_q = 1;
-		q->u.in.queue_start_poll = qdio_init->queue_start_poll[i];
+		q->u.in.queue_start_poll = qdio_init->queue_start_poll_array ?
+				qdio_init->queue_start_poll_array[i] : NULL;
 
 		setup_storage_lists(q, irq_ptr, input_sbal_array, i);
 		input_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
index dd47378..077b7d1 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.c
+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -56,11 +56,6 @@
 #define PCIXCC_MAX_ICA_RESPONSE_SIZE 0x77c /* max size type86 v2 reply	    */
 
 #define PCIXCC_MAX_XCRB_MESSAGE_SIZE (12*1024)
-#define PCIXCC_MAX_XCRB_RESPONSE_SIZE PCIXCC_MAX_XCRB_MESSAGE_SIZE
-#define PCIXCC_MAX_XCRB_DATA_SIZE (11*1024)
-#define PCIXCC_MAX_XCRB_REPLY_SIZE (5*1024)
-
-#define PCIXCC_MAX_RESPONSE_SIZE PCIXCC_MAX_XCRB_RESPONSE_SIZE
 
 #define PCIXCC_CLEANUP_TIME	(15*HZ)
 
@@ -265,7 +260,7 @@
  * @ap_msg: pointer to AP message
  * @xcRB: pointer to user input data
  *
- * Returns 0 on success or -EFAULT.
+ * Returns 0 on success or -EFAULT, -EINVAL.
  */
 struct type86_fmt2_msg {
 	struct type86_hdr hdr;
@@ -295,19 +290,12 @@
 		CEIL4(xcRB->request_control_blk_length) +
 		xcRB->request_data_length;
 	if (ap_msg->length > PCIXCC_MAX_XCRB_MESSAGE_SIZE)
-		return -EFAULT;
-	if (CEIL4(xcRB->reply_control_blk_length) > PCIXCC_MAX_XCRB_REPLY_SIZE)
-		return -EFAULT;
-	if (CEIL4(xcRB->reply_data_length) > PCIXCC_MAX_XCRB_DATA_SIZE)
-		return -EFAULT;
-	replylen = CEIL4(xcRB->reply_control_blk_length) +
-		CEIL4(xcRB->reply_data_length) +
-		sizeof(struct type86_fmt2_msg);
-	if (replylen > PCIXCC_MAX_XCRB_RESPONSE_SIZE) {
-		xcRB->reply_control_blk_length = PCIXCC_MAX_XCRB_RESPONSE_SIZE -
-			(sizeof(struct type86_fmt2_msg) +
-			    CEIL4(xcRB->reply_data_length));
-	}
+		return -EINVAL;
+	replylen = sizeof(struct type86_fmt2_msg) +
+		CEIL4(xcRB->reply_control_blk_length) +
+		xcRB->reply_data_length;
+	if (replylen > PCIXCC_MAX_XCRB_MESSAGE_SIZE)
+		return -EINVAL;
 
 	/* prepare type6 header */
 	msg->hdr = static_type6_hdrX;
@@ -326,7 +314,7 @@
 		return -EFAULT;
 	if (msg->cprbx.cprb_len + sizeof(msg->hdr.function_code) >
 	    xcRB->request_control_blk_length)
-		return -EFAULT;
+		return -EINVAL;
 	function_code = ((unsigned char *)&msg->cprbx) + msg->cprbx.cprb_len;
 	memcpy(msg->hdr.function_code, function_code, sizeof(msg->hdr.function_code));
 
@@ -678,7 +666,7 @@
 			break;
 		case PCIXCC_RESPONSE_TYPE_XCRB:
 			length = t86r->fmt2.offset2 + t86r->fmt2.count2;
-			length = min(PCIXCC_MAX_XCRB_RESPONSE_SIZE, length);
+			length = min(PCIXCC_MAX_XCRB_MESSAGE_SIZE, length);
 			memcpy(msg->message, reply->message, length);
 			break;
 		default:
@@ -1043,7 +1031,7 @@
 	struct zcrypt_device *zdev;
 	int rc = 0;
 
-	zdev = zcrypt_device_alloc(PCIXCC_MAX_RESPONSE_SIZE);
+	zdev = zcrypt_device_alloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE);
 	if (!zdev)
 		return -ENOMEM;
 	zdev->ap_dev = ap_dev;
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
index 94f49ff..8af868b 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/kvm/kvm_virtio.c
@@ -263,6 +263,11 @@
 	return PTR_ERR(vqs[i]);
 }
 
+static const char *kvm_bus_name(struct virtio_device *vdev)
+{
+	return "";
+}
+
 /*
  * The config ops structure as defined by virtio config
  */
@@ -276,6 +281,7 @@
 	.reset = kvm_reset,
 	.find_vqs = kvm_find_vqs,
 	.del_vqs = kvm_del_vqs,
+	.bus_name = kvm_bus_name,
 };
 
 /*
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index b6a6356..8160591 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -63,6 +63,7 @@
 
 #include <asm/io.h>
 #include <asm/uaccess.h>
+#include <asm/ebcdic.h>
 
 #include <net/iucv/iucv.h>
 #include "fsm.h"
@@ -75,7 +76,7 @@
  * Debug Facility stuff
  */
 #define IUCV_DBF_SETUP_NAME "iucv_setup"
-#define IUCV_DBF_SETUP_LEN 32
+#define IUCV_DBF_SETUP_LEN 64
 #define IUCV_DBF_SETUP_PAGES 2
 #define IUCV_DBF_SETUP_NR_AREAS 1
 #define IUCV_DBF_SETUP_LEVEL 3
@@ -226,6 +227,7 @@
 	struct net_device         *netdev;
 	struct connection_profile prof;
 	char                      userid[9];
+	char			  userdata[17];
 };
 
 /**
@@ -263,7 +265,7 @@
 };
 
 #define NETIUCV_HDRLEN		 (sizeof(struct ll_header))
-#define NETIUCV_BUFSIZE_MAX      32768
+#define NETIUCV_BUFSIZE_MAX	 65537
 #define NETIUCV_BUFSIZE_DEFAULT  NETIUCV_BUFSIZE_MAX
 #define NETIUCV_MTU_MAX          (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN)
 #define NETIUCV_MTU_DEFAULT      9216
@@ -288,7 +290,12 @@
 	return test_and_set_bit(0, &priv->tbusy);
 }
 
-static u8 iucvMagic[16] = {
+static u8 iucvMagic_ascii[16] = {
+	0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+	0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20
+};
+
+static u8 iucvMagic_ebcdic[16] = {
 	0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
 	0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40
 };
@@ -301,18 +308,38 @@
  *
  * @returns The printable string (static data!!)
  */
-static char *netiucv_printname(char *name)
+static char *netiucv_printname(char *name, int len)
 {
-	static char tmp[9];
+	static char tmp[17];
 	char *p = tmp;
-	memcpy(tmp, name, 8);
-	tmp[8] = '\0';
-	while (*p && (!isspace(*p)))
+	memcpy(tmp, name, len);
+	tmp[len] = '\0';
+	while (*p && ((p - tmp) < len) && (!isspace(*p)))
 		p++;
 	*p = '\0';
 	return tmp;
 }
 
+static char *netiucv_printuser(struct iucv_connection *conn)
+{
+	static char tmp_uid[9];
+	static char tmp_udat[17];
+	static char buf[100];
+
+	if (memcmp(conn->userdata, iucvMagic_ebcdic, 16)) {
+		tmp_uid[8] = '\0';
+		tmp_udat[16] = '\0';
+		memcpy(tmp_uid, conn->userid, 8);
+		memcpy(tmp_uid, netiucv_printname(tmp_uid, 8), 8);
+		memcpy(tmp_udat, conn->userdata, 16);
+		EBCASC(tmp_udat, 16);
+		memcpy(tmp_udat, netiucv_printname(tmp_udat, 16), 16);
+		sprintf(buf, "%s.%s", tmp_uid, tmp_udat);
+		return buf;
+	} else
+		return netiucv_printname(conn->userid, 8);
+}
+
 /**
  * States of the interface statemachine.
  */
@@ -563,15 +590,18 @@
 {
 	struct iucv_connection *conn = path->private;
 	struct iucv_event ev;
+	static char tmp_user[9];
+	static char tmp_udat[17];
 	int rc;
 
-	if (memcmp(iucvMagic, ipuser, 16))
-		/* ipuser must match iucvMagic. */
-		return -EINVAL;
 	rc = -EINVAL;
+	memcpy(tmp_user, netiucv_printname(ipvmid, 8), 8);
+	memcpy(tmp_udat, ipuser, 16);
+	EBCASC(tmp_udat, 16);
 	read_lock_bh(&iucv_connection_rwlock);
 	list_for_each_entry(conn, &iucv_connection_list, list) {
-		if (strncmp(ipvmid, conn->userid, 8))
+		if (strncmp(ipvmid, conn->userid, 8) ||
+		    strncmp(ipuser, conn->userdata, 16))
 			continue;
 		/* Found a matching connection for this path. */
 		conn->path = path;
@@ -580,6 +610,8 @@
 		fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev);
 		rc = 0;
 	}
+	IUCV_DBF_TEXT_(setup, 2, "Connection requested for %s.%s\n",
+		       tmp_user, netiucv_printname(tmp_udat, 16));
 	read_unlock_bh(&iucv_connection_rwlock);
 	return rc;
 }
@@ -816,7 +848,7 @@
 	conn->path = path;
 	path->msglim = NETIUCV_QUEUELEN_DEFAULT;
 	path->flags = 0;
-	rc = iucv_path_accept(path, &netiucv_handler, NULL, conn);
+	rc = iucv_path_accept(path, &netiucv_handler, conn->userdata , conn);
 	if (rc) {
 		IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc);
 		return;
@@ -854,7 +886,7 @@
 
 	IUCV_DBF_TEXT(trace, 3, __func__);
 	fsm_deltimer(&conn->timer);
-	iucv_path_sever(conn->path, NULL);
+	iucv_path_sever(conn->path, conn->userdata);
 	fsm_newstate(fi, CONN_STATE_STARTWAIT);
 }
 
@@ -867,9 +899,9 @@
 	IUCV_DBF_TEXT(trace, 3, __func__);
 
 	fsm_deltimer(&conn->timer);
-	iucv_path_sever(conn->path, NULL);
-	dev_info(privptr->dev, "The peer interface of the IUCV device"
-		" has closed the connection\n");
+	iucv_path_sever(conn->path, conn->userdata);
+	dev_info(privptr->dev, "The peer z/VM guest %s has closed the "
+			       "connection\n", netiucv_printuser(conn));
 	IUCV_DBF_TEXT(data, 2,
 		      "conn_action_connsever: Remote dropped connection\n");
 	fsm_newstate(fi, CONN_STATE_STARTWAIT);
@@ -886,8 +918,6 @@
 	IUCV_DBF_TEXT(trace, 3, __func__);
 
 	fsm_newstate(fi, CONN_STATE_STARTWAIT);
-	IUCV_DBF_TEXT_(setup, 2, "%s('%s'): connecting ...\n",
-		netdev->name, conn->userid);
 
 	/*
 	 * We must set the state before calling iucv_connect because the
@@ -897,8 +927,11 @@
 
 	fsm_newstate(fi, CONN_STATE_SETUPWAIT);
 	conn->path = iucv_path_alloc(NETIUCV_QUEUELEN_DEFAULT, 0, GFP_KERNEL);
+	IUCV_DBF_TEXT_(setup, 2, "%s: connecting to %s ...\n",
+		netdev->name, netiucv_printuser(conn));
+
 	rc = iucv_path_connect(conn->path, &netiucv_handler, conn->userid,
-			       NULL, iucvMagic, conn);
+			       NULL, conn->userdata, conn);
 	switch (rc) {
 	case 0:
 		netdev->tx_queue_len = conn->path->msglim;
@@ -908,13 +941,13 @@
 	case 11:
 		dev_warn(privptr->dev,
 			"The IUCV device failed to connect to z/VM guest %s\n",
-			netiucv_printname(conn->userid));
+			netiucv_printname(conn->userid, 8));
 		fsm_newstate(fi, CONN_STATE_STARTWAIT);
 		break;
 	case 12:
 		dev_warn(privptr->dev,
 			"The IUCV device failed to connect to the peer on z/VM"
-			" guest %s\n", netiucv_printname(conn->userid));
+			" guest %s\n", netiucv_printname(conn->userid, 8));
 		fsm_newstate(fi, CONN_STATE_STARTWAIT);
 		break;
 	case 13:
@@ -927,7 +960,7 @@
 		dev_err(privptr->dev,
 			"z/VM guest %s has too many IUCV connections"
 			" to connect with the IUCV device\n",
-			netiucv_printname(conn->userid));
+			netiucv_printname(conn->userid, 8));
 		fsm_newstate(fi, CONN_STATE_CONNERR);
 		break;
 	case 15:
@@ -972,7 +1005,7 @@
 	netiucv_purge_skb_queue(&conn->collect_queue);
 	if (conn->path) {
 		IUCV_DBF_TEXT(trace, 5, "calling iucv_path_sever\n");
-		iucv_path_sever(conn->path, iucvMagic);
+		iucv_path_sever(conn->path, conn->userdata);
 		kfree(conn->path);
 		conn->path = NULL;
 	}
@@ -1090,7 +1123,8 @@
 			fsm_newstate(fi, DEV_STATE_RUNNING);
 			dev_info(privptr->dev,
 				"The IUCV device has been connected"
-				" successfully to %s\n", privptr->conn->userid);
+				" successfully to %s\n",
+				netiucv_printuser(privptr->conn));
 			IUCV_DBF_TEXT(setup, 3,
 				"connection is up and running\n");
 			break;
@@ -1452,7 +1486,55 @@
 	struct netiucv_priv *priv = dev_get_drvdata(dev);
 
 	IUCV_DBF_TEXT(trace, 5, __func__);
-	return sprintf(buf, "%s\n", netiucv_printname(priv->conn->userid));
+	return sprintf(buf, "%s\n", netiucv_printuser(priv->conn));
+}
+
+static int netiucv_check_user(const char *buf, size_t count, char *username,
+			      char *userdata)
+{
+	const char *p;
+	int i;
+
+	p = strchr(buf, '.');
+	if ((p && ((count > 26) ||
+		   ((p - buf) > 8) ||
+		   (buf + count - p > 18))) ||
+	    (!p && (count > 9))) {
+		IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
+		return -EINVAL;
+	}
+
+	for (i = 0, p = buf; i < 8 && *p && *p != '.'; i++, p++) {
+		if (isalnum(*p) || *p == '$') {
+			username[i] = toupper(*p);
+			continue;
+		}
+		if (*p == '\n')
+			/* trailing lf, grr */
+			break;
+		IUCV_DBF_TEXT_(setup, 2,
+			       "conn_write: invalid character %02x\n", *p);
+		return -EINVAL;
+	}
+	while (i < 8)
+		username[i++] = ' ';
+	username[8] = '\0';
+
+	if (*p == '.') {
+		p++;
+		for (i = 0; i < 16 && *p; i++, p++) {
+			if (*p == '\n')
+				break;
+			userdata[i] = toupper(*p);
+		}
+		while (i > 0 && i < 16)
+			userdata[i++] = ' ';
+	} else
+		memcpy(userdata, iucvMagic_ascii, 16);
+	userdata[16] = '\0';
+	ASCEBC(userdata, 16);
+
+	return 0;
 }
 
 static ssize_t user_write(struct device *dev, struct device_attribute *attr,
@@ -1460,36 +1542,15 @@
 {
 	struct netiucv_priv *priv = dev_get_drvdata(dev);
 	struct net_device *ndev = priv->conn->netdev;
-	char    *p;
-	char    *tmp;
-	char 	username[9];
-	int 	i;
+	char	username[9];
+	char	userdata[17];
+	int	rc;
 	struct iucv_connection *cp;
 
 	IUCV_DBF_TEXT(trace, 3, __func__);
-	if (count > 9) {
-		IUCV_DBF_TEXT_(setup, 2,
-			       "%d is length of username\n", (int) count);
-		return -EINVAL;
-	}
-
-	tmp = strsep((char **) &buf, "\n");
-	for (i = 0, p = tmp; i < 8 && *p; i++, p++) {
-		if (isalnum(*p) || (*p == '$')) {
-			username[i]= toupper(*p);
-			continue;
-		}
-		if (*p == '\n') {
-			/* trailing lf, grr */
-			break;
-		}
-		IUCV_DBF_TEXT_(setup, 2,
-			       "username: invalid character %c\n", *p);
-		return -EINVAL;
-	}
-	while (i < 8)
-		username[i++] = ' ';
-	username[8] = '\0';
+	rc = netiucv_check_user(buf, count, username, userdata);
+	if (rc)
+		return rc;
 
 	if (memcmp(username, priv->conn->userid, 9) &&
 	    (ndev->flags & (IFF_UP | IFF_RUNNING))) {
@@ -1499,15 +1560,17 @@
 	}
 	read_lock_bh(&iucv_connection_rwlock);
 	list_for_each_entry(cp, &iucv_connection_list, list) {
-		if (!strncmp(username, cp->userid, 9) && cp->netdev != ndev) {
+		if (!strncmp(username, cp->userid, 9) &&
+		   !strncmp(userdata, cp->userdata, 17) && cp->netdev != ndev) {
 			read_unlock_bh(&iucv_connection_rwlock);
-			IUCV_DBF_TEXT_(setup, 2, "user_write: Connection "
-				"to %s already exists\n", username);
+			IUCV_DBF_TEXT_(setup, 2, "user_write: Connection to %s "
+				"already exists\n", netiucv_printuser(cp));
 			return -EEXIST;
 		}
 	}
 	read_unlock_bh(&iucv_connection_rwlock);
 	memcpy(priv->conn->userid, username, 9);
+	memcpy(priv->conn->userdata, userdata, 17);
 	return count;
 }
 
@@ -1537,7 +1600,8 @@
 	bs1 = simple_strtoul(buf, &e, 0);
 
 	if (e && (!isspace(*e))) {
-		IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %c\n", *e);
+		IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %02x\n",
+			*e);
 		return -EINVAL;
 	}
 	if (bs1 > NETIUCV_BUFSIZE_MAX) {
@@ -1864,7 +1928,8 @@
  * Add it to the list of netiucv connections;
  */
 static struct iucv_connection *netiucv_new_connection(struct net_device *dev,
-						      char *username)
+						      char *username,
+						      char *userdata)
 {
 	struct iucv_connection *conn;
 
@@ -1893,6 +1958,8 @@
 	fsm_settimer(conn->fsm, &conn->timer);
 	fsm_newstate(conn->fsm, CONN_STATE_INVALID);
 
+	if (userdata)
+		memcpy(conn->userdata, userdata, 17);
 	if (username) {
 		memcpy(conn->userid, username, 9);
 		fsm_newstate(conn->fsm, CONN_STATE_STOPPED);
@@ -1919,6 +1986,7 @@
  */
 static void netiucv_remove_connection(struct iucv_connection *conn)
 {
+
 	IUCV_DBF_TEXT(trace, 3, __func__);
 	write_lock_bh(&iucv_connection_rwlock);
 	list_del_init(&conn->list);
@@ -1926,7 +1994,7 @@
 	fsm_deltimer(&conn->timer);
 	netiucv_purge_skb_queue(&conn->collect_queue);
 	if (conn->path) {
-		iucv_path_sever(conn->path, iucvMagic);
+		iucv_path_sever(conn->path, conn->userdata);
 		kfree(conn->path);
 		conn->path = NULL;
 	}
@@ -1985,7 +2053,7 @@
 /**
  * Allocate and initialize everything of a net device.
  */
-static struct net_device *netiucv_init_netdevice(char *username)
+static struct net_device *netiucv_init_netdevice(char *username, char *userdata)
 {
 	struct netiucv_priv *privptr;
 	struct net_device *dev;
@@ -2004,7 +2072,7 @@
 	if (!privptr->fsm)
 		goto out_netdev;
 
-	privptr->conn = netiucv_new_connection(dev, username);
+	privptr->conn = netiucv_new_connection(dev, username, userdata);
 	if (!privptr->conn) {
 		IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n");
 		goto out_fsm;
@@ -2022,47 +2090,31 @@
 static ssize_t conn_write(struct device_driver *drv,
 			  const char *buf, size_t count)
 {
-	const char *p;
 	char username[9];
-	int i, rc;
+	char userdata[17];
+	int rc;
 	struct net_device *dev;
 	struct netiucv_priv *priv;
 	struct iucv_connection *cp;
 
 	IUCV_DBF_TEXT(trace, 3, __func__);
-	if (count>9) {
-		IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
-		return -EINVAL;
-	}
-
-	for (i = 0, p = buf; i < 8 && *p; i++, p++) {
-		if (isalnum(*p) || *p == '$') {
-			username[i] = toupper(*p);
-			continue;
-		}
-		if (*p == '\n')
-			/* trailing lf, grr */
-			break;
-		IUCV_DBF_TEXT_(setup, 2,
-			       "conn_write: invalid character %c\n", *p);
-		return -EINVAL;
-	}
-	while (i < 8)
-		username[i++] = ' ';
-	username[8] = '\0';
+	rc = netiucv_check_user(buf, count, username, userdata);
+	if (rc)
+		return rc;
 
 	read_lock_bh(&iucv_connection_rwlock);
 	list_for_each_entry(cp, &iucv_connection_list, list) {
-		if (!strncmp(username, cp->userid, 9)) {
+		if (!strncmp(username, cp->userid, 9) &&
+		    !strncmp(userdata, cp->userdata, 17)) {
 			read_unlock_bh(&iucv_connection_rwlock);
-			IUCV_DBF_TEXT_(setup, 2, "conn_write: Connection "
-				"to %s already exists\n", username);
+			IUCV_DBF_TEXT_(setup, 2, "conn_write: Connection to %s "
+				"already exists\n", netiucv_printuser(cp));
 			return -EEXIST;
 		}
 	}
 	read_unlock_bh(&iucv_connection_rwlock);
 
-	dev = netiucv_init_netdevice(username);
+	dev = netiucv_init_netdevice(username, userdata);
 	if (!dev) {
 		IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n");
 		return -ENODEV;
@@ -2083,8 +2135,9 @@
 	if (rc)
 		goto out_unreg;
 
-	dev_info(priv->dev, "The IUCV interface to %s has been"
-		" established successfully\n", netiucv_printname(username));
+	dev_info(priv->dev, "The IUCV interface to %s has been established "
+			    "successfully\n",
+		netiucv_printuser(priv->conn));
 
 	return count;
 
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index fff57de..9c3f38d 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -66,7 +66,7 @@
 static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
 		struct qeth_qdio_out_buffer *buf,
 		enum qeth_qdio_buffer_states newbufstate);
-
+static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
 
 static inline const char *qeth_get_cardname(struct qeth_card *card)
 {
@@ -363,6 +363,9 @@
 static inline void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q,
 	int bidx, int forced_cleanup)
 {
+	if (q->card->options.cq != QETH_CQ_ENABLED)
+		return;
+
 	if (q->bufs[bidx]->next_pending != NULL) {
 		struct qeth_qdio_out_buffer *head = q->bufs[bidx];
 		struct qeth_qdio_out_buffer *c = q->bufs[bidx]->next_pending;
@@ -390,6 +393,13 @@
 
 		}
 	}
+	if (forced_cleanup && (atomic_read(&(q->bufs[bidx]->state)) ==
+					QETH_QDIO_BUF_HANDLED_DELAYED)) {
+		/* for recovery situations */
+		q->bufs[bidx]->aob = q->bufstates[bidx].aob;
+		qeth_init_qdio_out_buf(q, bidx);
+		QETH_CARD_TEXT(q->card, 2, "clprecov");
+	}
 }
 
 
@@ -412,7 +422,6 @@
 		notification = TX_NOTIFY_OK;
 	} else {
 		BUG_ON(atomic_read(&buffer->state) != QETH_QDIO_BUF_PENDING);
-
 		atomic_set(&buffer->state, QETH_QDIO_BUF_IN_CQ);
 		notification = TX_NOTIFY_DELAYED_OK;
 	}
@@ -425,7 +434,8 @@
 
 	buffer->aob = NULL;
 	qeth_clear_output_buffer(buffer->q, buffer,
-				QETH_QDIO_BUF_HANDLED_DELAYED);
+				 QETH_QDIO_BUF_HANDLED_DELAYED);
+
 	/* from here on: do not touch buffer anymore */
 	qdio_release_aob(aob);
 }
@@ -1113,11 +1123,25 @@
 static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf)
 {
 	struct sk_buff *skb;
+	struct iucv_sock *iucv;
+	int notify_general_error = 0;
+
+	if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING)
+		notify_general_error = 1;
+
+	/* release may never happen from within CQ tasklet scope */
+	BUG_ON(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ);
 
 	skb = skb_dequeue(&buf->skb_list);
 	while (skb) {
 		QETH_CARD_TEXT(buf->q->card, 5, "skbr");
 		QETH_CARD_TEXT_(buf->q->card, 5, "%lx", (long) skb);
+		if (notify_general_error && skb->protocol == ETH_P_AF_IUCV) {
+			if (skb->sk) {
+				iucv = iucv_sk(skb->sk);
+				iucv->sk_txnotify(skb, TX_NOTIFY_GENERALERROR);
+			}
+		}
 		atomic_dec(&skb->users);
 		dev_kfree_skb_any(skb);
 		skb = skb_dequeue(&buf->skb_list);
@@ -1160,7 +1184,7 @@
 	for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
 		if (!q->bufs[j])
 			continue;
-		qeth_cleanup_handled_pending(q, j, free);
+		qeth_cleanup_handled_pending(q, j, 1);
 		qeth_clear_output_buffer(q, q->bufs[j], QETH_QDIO_BUF_EMPTY);
 		if (free) {
 			kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]);
@@ -1207,7 +1231,7 @@
 	qeth_free_cq(card);
 	cancel_delayed_work_sync(&card->buffer_reclaim_work);
 	for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
-		kfree_skb(card->qdio.in_q->bufs[j].rx_skb);
+		dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb);
 	kfree(card->qdio.in_q);
 	card->qdio.in_q = NULL;
 	/* inbound buffer pool */
@@ -1329,6 +1353,7 @@
 
 static void qeth_start_kernel_thread(struct work_struct *work)
 {
+	struct task_struct *ts;
 	struct qeth_card *card = container_of(work, struct qeth_card,
 					kernel_thread_starter);
 	QETH_CARD_TEXT(card , 2, "strthrd");
@@ -1336,9 +1361,15 @@
 	if (card->read.state != CH_STATE_UP &&
 	    card->write.state != CH_STATE_UP)
 		return;
-	if (qeth_do_start_thread(card, QETH_RECOVER_THREAD))
-		kthread_run(card->discipline.recover, (void *) card,
+	if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) {
+		ts = kthread_run(card->discipline.recover, (void *)card,
 				"qeth_recover");
+		if (IS_ERR(ts)) {
+			qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
+			qeth_clear_thread_running_bit(card,
+				QETH_RECOVER_THREAD);
+		}
+	}
 }
 
 static int qeth_setup_card(struct qeth_card *card)
@@ -4521,7 +4552,7 @@
 	init_data.no_output_qs           = card->qdio.no_out_queues;
 	init_data.input_handler          = card->discipline.input_handler;
 	init_data.output_handler         = card->discipline.output_handler;
-	init_data.queue_start_poll       = queue_start_poll;
+	init_data.queue_start_poll_array = queue_start_poll;
 	init_data.int_parm               = (unsigned long) card;
 	init_data.input_sbal_addr_array  = (void **) in_sbal_ptrs;
 	init_data.output_sbal_addr_array = (void **) out_sbal_ptrs;
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index a21ae3d..c129671 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -301,21 +301,21 @@
 	spin_unlock_bh(&card->vlanlock);
 }
 
-static void qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+static int qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
 {
 	struct qeth_card *card = dev->ml_priv;
 	struct qeth_vlan_vid *id;
 
 	QETH_CARD_TEXT_(card, 4, "aid:%d", vid);
 	if (!vid)
-		return;
+		return 0;
 	if (card->info.type == QETH_CARD_TYPE_OSM) {
 		QETH_CARD_TEXT(card, 3, "aidOSM");
-		return;
+		return 0;
 	}
 	if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
 		QETH_CARD_TEXT(card, 3, "aidREC");
-		return;
+		return 0;
 	}
 	id = kmalloc(sizeof(struct qeth_vlan_vid), GFP_ATOMIC);
 	if (id) {
@@ -324,10 +324,13 @@
 		spin_lock_bh(&card->vlanlock);
 		list_add_tail(&id->list, &card->vid_list);
 		spin_unlock_bh(&card->vlanlock);
+	} else {
+		return -ENOMEM;
 	}
+	return 0;
 }
 
-static void qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
 {
 	struct qeth_vlan_vid *id, *tmpid = NULL;
 	struct qeth_card *card = dev->ml_priv;
@@ -335,11 +338,11 @@
 	QETH_CARD_TEXT_(card, 4, "kid:%d", vid);
 	if (card->info.type == QETH_CARD_TYPE_OSM) {
 		QETH_CARD_TEXT(card, 3, "kidOSM");
-		return;
+		return 0;
 	}
 	if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
 		QETH_CARD_TEXT(card, 3, "kidREC");
-		return;
+		return 0;
 	}
 	spin_lock_bh(&card->vlanlock);
 	list_for_each_entry(id, &card->vid_list, list) {
@@ -355,6 +358,7 @@
 		kfree(tmpid);
 	}
 	qeth_l2_set_multicast_list(card->dev);
+	return 0;
 }
 
 static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
@@ -1169,6 +1173,7 @@
 static void qeth_l2_shutdown(struct ccwgroup_device *gdev)
 {
 	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+	qeth_set_allowed_threads(card, 0, 1);
 	if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
 		qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
 	qeth_qdio_clear_card(card, 0);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 4d5307d..9648e4e 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1869,15 +1869,15 @@
 	qeth_l3_free_vlan_addresses6(card, vid);
 }
 
-static void qeth_l3_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+static int qeth_l3_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
 {
 	struct qeth_card *card = dev->ml_priv;
 
 	set_bit(vid, card->active_vlans);
-	return;
+	return 0;
 }
 
-static void qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
 {
 	struct qeth_card *card = dev->ml_priv;
 	unsigned long flags;
@@ -1885,7 +1885,7 @@
 	QETH_CARD_TEXT_(card, 4, "kid:%d", vid);
 	if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
 		QETH_CARD_TEXT(card, 3, "kidREC");
-		return;
+		return 0;
 	}
 	spin_lock_irqsave(&card->vlanlock, flags);
 	/* unregister IP addresses of vlan device */
@@ -1893,6 +1893,7 @@
 	clear_bit(vid, card->active_vlans);
 	spin_unlock_irqrestore(&card->vlanlock, flags);
 	qeth_l3_set_multicast_list(card->dev);
+	return 0;
 }
 
 static inline int qeth_l3_rebuild_skb(struct qeth_card *card,
@@ -2759,7 +2760,7 @@
 	rcu_read_lock();
 	dst = skb_dst(skb);
 	if (dst)
-		n = dst_get_neighbour(dst);
+		n = dst_get_neighbour_noref(dst);
 	if (n) {
 		cast_type = n->type;
 		rcu_read_unlock();
@@ -2855,7 +2856,7 @@
 	rcu_read_lock();
 	dst = skb_dst(skb);
 	if (dst)
-		n = dst_get_neighbour(dst);
+		n = dst_get_neighbour_noref(dst);
 	if (ipv == 4) {
 		/* IPv4 */
 		hdr->hdr.l3.flags = qeth_l3_get_qeth_hdr_flags4(cast_type);
@@ -3209,7 +3210,8 @@
 	return 0;
 }
 
-static u32 qeth_l3_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t qeth_l3_fix_features(struct net_device *dev,
+	netdev_features_t features)
 {
 	struct qeth_card *card = dev->ml_priv;
 
@@ -3223,7 +3225,8 @@
 	return features;
 }
 
-static int qeth_l3_set_features(struct net_device *dev, u32 features)
+static int qeth_l3_set_features(struct net_device *dev,
+	netdev_features_t features)
 {
 	struct qeth_card *card = dev->ml_priv;
 	u32 changed = dev->features ^ features;
@@ -3489,14 +3492,13 @@
 	else
 		netif_carrier_off(card->dev);
 	if (recover_flag == CARD_STATE_RECOVER) {
+		rtnl_lock();
 		if (recovery_mode)
 			__qeth_l3_open(card->dev);
-		else {
-			rtnl_lock();
+		else
 			dev_open(card->dev);
-			rtnl_unlock();
-		}
 		qeth_l3_set_multicast_list(card->dev);
+		rtnl_unlock();
 	}
 	/* let user_space know that device is online */
 	kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
@@ -3542,6 +3544,11 @@
 		card->info.hwtrap = 1;
 	}
 	qeth_l3_stop_card(card, recovery_mode);
+	if ((card->options.cq == QETH_CQ_ENABLED) && card->dev) {
+		rtnl_lock();
+		call_netdevice_notifiers(NETDEV_REBOOT, card->dev);
+		rtnl_unlock();
+	}
 	rc  = ccw_device_set_offline(CARD_DDEV(card));
 	rc2 = ccw_device_set_offline(CARD_WDEV(card));
 	rc3 = ccw_device_set_offline(CARD_RDEV(card));
@@ -3596,6 +3603,7 @@
 static void qeth_l3_shutdown(struct ccwgroup_device *gdev)
 {
 	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+	qeth_set_allowed_threads(card, 0, 1);
 	if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
 		qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
 	qeth_qdio_clear_card(card, 0);
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 11f07f8..b79576b 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -55,6 +55,10 @@
 {
 	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
 
+	/* if previous slave_alloc returned early, there is nothing to do */
+	if (!zfcp_sdev->port)
+		return;
+
 	zfcp_erp_lun_shutdown_wait(sdev, "scssd_1");
 	put_device(&zfcp_sdev->port->dev);
 }
diff --git a/drivers/sbus/char/bbc_i2c.c b/drivers/sbus/char/bbc_i2c.c
index 5f94d22..5426682 100644
--- a/drivers/sbus/char/bbc_i2c.c
+++ b/drivers/sbus/char/bbc_i2c.c
@@ -233,13 +233,9 @@
 	int ret = 0;
 
 	while (len > 0) {
-		int err = bbc_i2c_writeb(client, *buf, off);
-
-		if (err < 0) {
-			ret = err;
+		ret = bbc_i2c_writeb(client, *buf, off);
+		if (ret < 0)
 			break;
-		}
-
 		len--;
 		buf++;
 		off++;
@@ -253,11 +249,9 @@
 	int ret = 0;
 
 	while (len > 0) {
-		int err = bbc_i2c_readb(client, buf, off);
-		if (err < 0) {
-			ret = err;
+		ret = bbc_i2c_readb(client, buf, off);
+		if (ret < 0)
 			break;
-		}
 		len--;
 		buf++;
 		off++;
@@ -422,17 +416,6 @@
 	.remove		= __devexit_p(bbc_i2c_remove),
 };
 
-static int __init bbc_i2c_init(void)
-{
-	return platform_driver_register(&bbc_i2c_driver);
-}
-
-static void __exit bbc_i2c_exit(void)
-{
-	platform_driver_unregister(&bbc_i2c_driver);
-}
-
-module_init(bbc_i2c_init);
-module_exit(bbc_i2c_exit);
+module_platform_driver(bbc_i2c_driver);
 
 MODULE_LICENSE("GPL");
diff --git a/drivers/sbus/char/display7seg.c b/drivers/sbus/char/display7seg.c
index 965a1fc..4b99397 100644
--- a/drivers/sbus/char/display7seg.c
+++ b/drivers/sbus/char/display7seg.c
@@ -275,15 +275,4 @@
 	.remove		= __devexit_p(d7s_remove),
 };
 
-static int __init d7s_init(void)
-{
-	return platform_driver_register(&d7s_driver);
-}
-
-static void __exit d7s_exit(void)
-{
-	platform_driver_unregister(&d7s_driver);
-}
-
-module_init(d7s_init);
-module_exit(d7s_exit);
+module_platform_driver(d7s_driver);
diff --git a/drivers/sbus/char/envctrl.c b/drivers/sbus/char/envctrl.c
index be7b4e5..339fd6f 100644
--- a/drivers/sbus/char/envctrl.c
+++ b/drivers/sbus/char/envctrl.c
@@ -1138,16 +1138,6 @@
 	.remove		= __devexit_p(envctrl_remove),
 };
 
-static int __init envctrl_init(void)
-{
-	return platform_driver_register(&envctrl_driver);
-}
+module_platform_driver(envctrl_driver);
 
-static void __exit envctrl_exit(void)
-{
-	platform_driver_unregister(&envctrl_driver);
-}
-
-module_init(envctrl_init);
-module_exit(envctrl_exit);
 MODULE_LICENSE("GPL");
diff --git a/drivers/sbus/char/flash.c b/drivers/sbus/char/flash.c
index 73dd4e7..826157f 100644
--- a/drivers/sbus/char/flash.c
+++ b/drivers/sbus/char/flash.c
@@ -216,16 +216,6 @@
 	.remove		= __devexit_p(flash_remove),
 };
 
-static int __init flash_init(void)
-{
-	return platform_driver_register(&flash_driver);
-}
+module_platform_driver(flash_driver);
 
-static void __exit flash_cleanup(void)
-{
-	platform_driver_unregister(&flash_driver);
-}
-
-module_init(flash_init);
-module_exit(flash_cleanup);
 MODULE_LICENSE("GPL");
diff --git a/drivers/sbus/char/uctrl.c b/drivers/sbus/char/uctrl.c
index ebce963..0b31658 100644
--- a/drivers/sbus/char/uctrl.c
+++ b/drivers/sbus/char/uctrl.c
@@ -435,16 +435,6 @@
 };
 
 
-static int __init uctrl_init(void)
-{
-	return platform_driver_register(&uctrl_driver);
-}
+module_platform_driver(uctrl_driver);
 
-static void __exit uctrl_exit(void)
-{
-	platform_driver_unregister(&uctrl_driver);
-}
-
-module_init(uctrl_init);
-module_exit(uctrl_exit);
 MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index 8a0b330..0bd38da 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -650,6 +650,7 @@
 				     AAC_OPT_NEW_COMM) ?
 				      (dev->scsi_host_ptr->max_sectors << 9) :
 				      65536)) {
+					kfree(usg);
 					rcode = -EINVAL;
 					goto cleanup;
 				}
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm.c b/drivers/scsi/aic7xxx/aicasm/aicasm.c
index e4a7787..2e3117a 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm.c
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm.c
@@ -1,5 +1,5 @@
 /*
- * Aic7xxx SCSI host adapter firmware asssembler
+ * Aic7xxx SCSI host adapter firmware assembler
  *
  * Copyright (c) 1997, 1998, 2000, 2001 Justin T. Gibbs.
  * Copyright (c) 2001, 2002 Adaptec Inc.
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index 8b002f6..33c8f09 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -733,7 +733,7 @@
 	iscsi_destroy_endpoint(beiscsi_ep->openiscsi_ep);
 }
 
-mode_t be2iscsi_attr_is_visible(int param_type, int param)
+umode_t be2iscsi_attr_is_visible(int param_type, int param)
 {
 	switch (param_type) {
 	case ISCSI_HOST_PARAM:
diff --git a/drivers/scsi/be2iscsi/be_iscsi.h b/drivers/scsi/be2iscsi/be_iscsi.h
index 4a1f2e3..5c45be1 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.h
+++ b/drivers/scsi/be2iscsi/be_iscsi.h
@@ -26,7 +26,7 @@
 #define BE2_IPV4  0x1
 #define BE2_IPV6  0x10
 
-mode_t be2iscsi_attr_is_visible(int param_type, int param);
+umode_t be2iscsi_attr_is_visible(int param_type, int param);
 
 void beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
 				struct beiscsi_offload_params *params);
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 379c696..797a439 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -325,9 +325,9 @@
 }
 
 
-static mode_t beiscsi_tgt_get_attr_visibility(void *data, int type)
+static umode_t beiscsi_tgt_get_attr_visibility(void *data, int type)
 {
-	int rc;
+	umode_t rc;
 
 	switch (type) {
 	case ISCSI_BOOT_TGT_NAME:
@@ -348,9 +348,9 @@
 	return rc;
 }
 
-static mode_t beiscsi_ini_get_attr_visibility(void *data, int type)
+static umode_t beiscsi_ini_get_attr_visibility(void *data, int type)
 {
-	int rc;
+	umode_t rc;
 
 	switch (type) {
 	case ISCSI_BOOT_INI_INITIATOR_NAME:
@@ -364,9 +364,9 @@
 }
 
 
-static mode_t beiscsi_eth_get_attr_visibility(void *data, int type)
+static umode_t beiscsi_eth_get_attr_visibility(void *data, int type)
 {
-	int rc;
+	umode_t rc;
 
 	switch (type) {
 	case ISCSI_BOOT_ETH_FLAGS:
diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c
index dee1a09..caca9b7 100644
--- a/drivers/scsi/bfa/bfad_debugfs.c
+++ b/drivers/scsi/bfa/bfad_debugfs.c
@@ -472,7 +472,7 @@
 
 struct bfad_debugfs_entry {
 	const char *name;
-	mode_t	mode;
+	umode_t	mode;
 	const struct file_operations *fops;
 };
 
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index dba72a4..1ad0b82 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -1906,18 +1906,19 @@
 	spin_lock(&session->lock);
 	task = iscsi_itt_to_task(bnx2i_conn->cls_conn->dd_data,
 				 cqe->itt & ISCSI_CMD_RESPONSE_INDEX);
-	if (!task) {
+	if (!task || !task->sc) {
 		spin_unlock(&session->lock);
 		return -EINVAL;
 	}
 	sc = task->sc;
-	spin_unlock(&session->lock);
 
 	if (!blk_rq_cpu_valid(sc->request))
 		cpu = smp_processor_id();
 	else
 		cpu = sc->request->cpu;
 
+	spin_unlock(&session->lock);
+
 	p = &per_cpu(bnx2i_percpu, cpu);
 	spin_lock(&p->p_work_lock);
 	if (unlikely(!p->iothread)) {
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index d1e6971..1a44b45 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -2177,7 +2177,7 @@
 	return 0;
 }
 
-static mode_t bnx2i_attr_is_visible(int param_type, int param)
+static umode_t bnx2i_attr_is_visible(int param_type, int param)
 {
 	switch (param_type) {
 	case ISCSI_HOST_PARAM:
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
index 000294a..36739da 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -966,7 +966,7 @@
 		csk->saddr.sin_addr.s_addr = chba->ipv4addr;
 
 	csk->rss_qid = 0;
-	csk->l2t = t3_l2t_get(t3dev, dst_get_neighbour(dst), ndev);
+	csk->l2t = t3_l2t_get(t3dev, dst, ndev);
 	if (!csk->l2t) {
 		pr_err("NO l2t available.\n");
 		return -EINVAL;
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index ac7a9b1..5a4a3bf 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -1127,6 +1127,7 @@
 	struct net_device *ndev = cdev->ports[csk->port_id];
 	struct port_info *pi = netdev_priv(ndev);
 	struct sk_buff *skb = NULL;
+	struct neighbour *n;
 	unsigned int step;
 
 	log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
@@ -1141,7 +1142,12 @@
 	cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
 	cxgbi_sock_get(csk);
 
-	csk->l2t = cxgb4_l2t_get(lldi->l2t, dst_get_neighbour(csk->dst), ndev, 0);
+	n = dst_get_neighbour_noref(csk->dst);
+	if (!n) {
+		pr_err("%s, can't get neighbour of csk->dst.\n", ndev->name);
+		goto rel_resource;
+	}
+	csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, 0);
 	if (!csk->l2t) {
 		pr_err("%s, cannot alloc l2t.\n", ndev->name);
 		goto rel_resource;
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index c10f74a..c5360ff 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -472,6 +472,7 @@
 	struct net_device *ndev;
 	struct cxgbi_device *cdev;
 	struct rtable *rt = NULL;
+	struct neighbour *n;
 	struct flowi4 fl4;
 	struct cxgbi_sock *csk = NULL;
 	unsigned int mtu = 0;
@@ -493,7 +494,12 @@
 		goto err_out;
 	}
 	dst = &rt->dst;
-	ndev = dst_get_neighbour(dst)->dev;
+	n = dst_get_neighbour_noref(dst);
+	if (!n) {
+		err = -ENODEV;
+		goto rel_rt;
+	}
+	ndev = n->dev;
 
 	if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
 		pr_info("multi-cast route %pI4, port %u, dev %s.\n",
@@ -507,7 +513,7 @@
 		ndev = ip_dev_find(&init_net, daddr->sin_addr.s_addr);
 		mtu = ndev->mtu;
 		pr_info("rt dev %s, loopback -> %s, mtu %u.\n",
-			dst_get_neighbour(dst)->dev->name, ndev->name, mtu);
+			n->dev->name, ndev->name, mtu);
 	}
 
 	cdev = cxgbi_device_find_by_netdev(ndev, &port);
@@ -2569,7 +2575,7 @@
 }
 EXPORT_SYMBOL_GPL(cxgbi_iscsi_cleanup);
 
-mode_t cxgbi_attr_is_visible(int param_type, int param)
+umode_t cxgbi_attr_is_visible(int param_type, int param)
 {
 	switch (param_type) {
 	case ISCSI_HOST_PARAM:
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
index 20c88279..80fa99b 100644
--- a/drivers/scsi/cxgbi/libcxgbi.h
+++ b/drivers/scsi/cxgbi/libcxgbi.h
@@ -709,7 +709,7 @@
 
 void cxgbi_cleanup_task(struct iscsi_task *task);
 
-mode_t cxgbi_attr_is_visible(int param_type, int param);
+umode_t cxgbi_attr_is_visible(int param_type, int param);
 void cxgbi_get_conn_stats(struct iscsi_cls_conn *, struct iscsi_stats *);
 int cxgbi_set_conn_param(struct iscsi_cls_conn *,
 			enum iscsi_param, char *, int);
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index cefbe44..8d67467 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -31,6 +31,8 @@
 #include <linux/sysfs.h>
 #include <linux/ctype.h>
 #include <linux/workqueue.h>
+#include <net/dcbnl.h>
+#include <net/dcbevent.h>
 #include <scsi/scsi_tcq.h>
 #include <scsi/scsicam.h>
 #include <scsi/scsi_transport.h>
@@ -101,6 +103,8 @@
 static int fcoe_ddp_target(struct fc_lport *, u16, struct scatterlist *,
 			   unsigned int);
 static int fcoe_cpu_callback(struct notifier_block *, unsigned long, void *);
+static int fcoe_dcb_app_notification(struct notifier_block *notifier,
+				     ulong event, void *ptr);
 
 static bool fcoe_match(struct net_device *netdev);
 static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode);
@@ -129,6 +133,11 @@
 	.notifier_call = fcoe_cpu_callback,
 };
 
+/* notification function for DCB events */
+static struct notifier_block dcb_notifier = {
+	.notifier_call = fcoe_dcb_app_notification,
+};
+
 static struct scsi_transport_template *fcoe_nport_scsi_transport;
 static struct scsi_transport_template *fcoe_vport_scsi_transport;
 
@@ -1522,6 +1531,8 @@
 	skb_reset_network_header(skb);
 	skb->mac_len = elen;
 	skb->protocol = htons(ETH_P_FCOE);
+	skb->priority = port->priority;
+
 	if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN &&
 	    fcoe->realdev->features & NETIF_F_HW_VLAN_TX) {
 		skb->vlan_tci = VLAN_TAG_PRESENT |
@@ -1624,6 +1635,7 @@
 	stats->InvalidCRCCount++;
 	if (stats->InvalidCRCCount < 5)
 		printk(KERN_WARNING "fcoe: dropping frame with CRC error\n");
+	put_cpu();
 	return -EINVAL;
 }
 
@@ -1746,6 +1758,7 @@
  */
 static void fcoe_dev_setup(void)
 {
+	register_dcbevent_notifier(&dcb_notifier);
 	register_netdevice_notifier(&fcoe_notifier);
 }
 
@@ -1754,9 +1767,69 @@
  */
 static void fcoe_dev_cleanup(void)
 {
+	unregister_dcbevent_notifier(&dcb_notifier);
 	unregister_netdevice_notifier(&fcoe_notifier);
 }
 
+static struct fcoe_interface *
+fcoe_hostlist_lookup_realdev_port(struct net_device *netdev)
+{
+	struct fcoe_interface *fcoe;
+	struct net_device *real_dev;
+
+	list_for_each_entry(fcoe, &fcoe_hostlist, list) {
+		if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN)
+			real_dev = vlan_dev_real_dev(fcoe->netdev);
+		else
+			real_dev = fcoe->netdev;
+
+		if (netdev == real_dev)
+			return fcoe;
+	}
+	return NULL;
+}
+
+static int fcoe_dcb_app_notification(struct notifier_block *notifier,
+				     ulong event, void *ptr)
+{
+	struct dcb_app_type *entry = ptr;
+	struct fcoe_interface *fcoe;
+	struct net_device *netdev;
+	struct fcoe_port *port;
+	int prio;
+
+	if (entry->app.selector != DCB_APP_IDTYPE_ETHTYPE)
+		return NOTIFY_OK;
+
+	netdev = dev_get_by_index(&init_net, entry->ifindex);
+	if (!netdev)
+		return NOTIFY_OK;
+
+	fcoe = fcoe_hostlist_lookup_realdev_port(netdev);
+	dev_put(netdev);
+	if (!fcoe)
+		return NOTIFY_OK;
+
+	if (entry->dcbx & DCB_CAP_DCBX_VER_CEE)
+		prio = ffs(entry->app.priority) - 1;
+	else
+		prio = entry->app.priority;
+
+	if (prio < 0)
+		return NOTIFY_OK;
+
+	if (entry->app.protocol == ETH_P_FIP ||
+	    entry->app.protocol == ETH_P_FCOE)
+		fcoe->ctlr.priority = prio;
+
+	if (entry->app.protocol == ETH_P_FCOE) {
+		port = lport_priv(fcoe->ctlr.lp);
+		port->priority = prio;
+	}
+
+	return NOTIFY_OK;
+}
+
 /**
  * fcoe_device_notification() - Handler for net device events
  * @notifier: The context of the notification
@@ -1965,6 +2038,46 @@
 }
 
 /**
+ * fcoe_dcb_create() - Initialize DCB attributes and hooks
+ * @netdev: The net_device object of the L2 link that should be queried
+ * @port: The fcoe_port to bind FCoE APP priority with
+ * @
+ */
+static void fcoe_dcb_create(struct fcoe_interface *fcoe)
+{
+#ifdef CONFIG_DCB
+	int dcbx;
+	u8 fup, up;
+	struct net_device *netdev = fcoe->realdev;
+	struct fcoe_port *port = lport_priv(fcoe->ctlr.lp);
+	struct dcb_app app = {
+				.priority = 0,
+				.protocol = ETH_P_FCOE
+			     };
+
+	/* setup DCB priority attributes. */
+	if (netdev && netdev->dcbnl_ops && netdev->dcbnl_ops->getdcbx) {
+		dcbx = netdev->dcbnl_ops->getdcbx(netdev);
+
+		if (dcbx & DCB_CAP_DCBX_VER_IEEE) {
+			app.selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE;
+			up = dcb_ieee_getapp_mask(netdev, &app);
+			app.protocol = ETH_P_FIP;
+			fup = dcb_ieee_getapp_mask(netdev, &app);
+		} else {
+			app.selector = DCB_APP_IDTYPE_ETHTYPE;
+			up = dcb_getapp(netdev, &app);
+			app.protocol = ETH_P_FIP;
+			fup = dcb_getapp(netdev, &app);
+		}
+
+		port->priority = ffs(up) ? ffs(up) - 1 : 0;
+		fcoe->ctlr.priority = ffs(fup) ? ffs(fup) - 1 : port->priority;
+	}
+#endif
+}
+
+/**
  * fcoe_create() - Create a fcoe interface
  * @netdev  : The net_device object the Ethernet interface to create on
  * @fip_mode: The FIP mode for this creation
@@ -2007,6 +2120,9 @@
 	/* Make this the "master" N_Port */
 	fcoe->ctlr.lp = lport;
 
+	/* setup DCB priority attributes. */
+	fcoe_dcb_create(fcoe);
+
 	/* add to lports list */
 	fcoe_hostlist_add(lport);
 
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index c74c4b8..e7522dc 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -320,6 +320,7 @@
 
 	skb_put(skb, sizeof(*sol));
 	skb->protocol = htons(ETH_P_FIP);
+	skb->priority = fip->priority;
 	skb_reset_mac_header(skb);
 	skb_reset_network_header(skb);
 	fip->send(fip, skb);
@@ -474,6 +475,7 @@
 	}
 	skb_put(skb, len);
 	skb->protocol = htons(ETH_P_FIP);
+	skb->priority = fip->priority;
 	skb_reset_mac_header(skb);
 	skb_reset_network_header(skb);
 	fip->send(fip, skb);
@@ -566,6 +568,7 @@
 	cap->fip.fip_dl_len = htons(dlen / FIP_BPW);
 
 	skb->protocol = htons(ETH_P_FIP);
+	skb->priority = fip->priority;
 	skb_reset_mac_header(skb);
 	skb_reset_network_header(skb);
 	return 0;
@@ -1911,6 +1914,7 @@
 
 	skb_put(skb, len);
 	skb->protocol = htons(ETH_P_FIP);
+	skb->priority = fip->priority;
 	skb_reset_mac_header(skb);
 	skb_reset_network_header(skb);
 
diff --git a/drivers/scsi/gdth.h b/drivers/scsi/gdth.h
index d969855..d3e4d7c 100644
--- a/drivers/scsi/gdth.h
+++ b/drivers/scsi/gdth.h
@@ -359,7 +359,7 @@
     u32     cmd_buff_addr2;     /* physical address of cmd buffer 1 */   
     u32     cmd_buff_u_addr2;   /* reserved for 64 bit addressing */
     u32     cmd_buff_indx2;     /* cmd buf addr1 unique identifier */
-    u32     cmd_buff_size;      /* size of each cmd bufer in bytes */
+    u32     cmd_buff_size;      /* size of each cmd buffer in bytes */
     u32     reserved1;
     u32     reserved2;
 } __attribute__((packed)) gdth_perf_modes;
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index 218f71a..d77891e 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -4494,7 +4494,7 @@
 /*                                                                          */
 /*   Initialize a CCB to default values                                     */
 /*                                                                          */
-/* ASSUMED to be callled from within a lock                                 */
+/* ASSUMED to be called from within a lock                                 */
 /*                                                                          */
 /****************************************************************************/
 static ips_scb_t *
diff --git a/drivers/scsi/iscsi_boot_sysfs.c b/drivers/scsi/iscsi_boot_sysfs.c
index 89700cb..14c1c8f 100644
--- a/drivers/scsi/iscsi_boot_sysfs.c
+++ b/drivers/scsi/iscsi_boot_sysfs.c
@@ -112,7 +112,7 @@
 	NULL
 };
 
-static mode_t iscsi_boot_tgt_attr_is_visible(struct kobject *kobj,
+static umode_t iscsi_boot_tgt_attr_is_visible(struct kobject *kobj,
 					     struct attribute *attr, int i)
 {
 	struct iscsi_boot_kobj *boot_kobj =
@@ -193,7 +193,7 @@
 	NULL
 };
 
-static mode_t iscsi_boot_eth_attr_is_visible(struct kobject *kobj,
+static umode_t iscsi_boot_eth_attr_is_visible(struct kobject *kobj,
 					     struct attribute *attr, int i)
 {
 	struct iscsi_boot_kobj *boot_kobj =
@@ -265,7 +265,7 @@
 	NULL
 };
 
-static mode_t iscsi_boot_ini_attr_is_visible(struct kobject *kobj,
+static umode_t iscsi_boot_ini_attr_is_visible(struct kobject *kobj,
 					     struct attribute *attr, int i)
 {
 	struct iscsi_boot_kobj *boot_kobj =
@@ -306,7 +306,7 @@
 		       struct attribute_group *attr_group,
 		       const char *name, int index, void *data,
 		       ssize_t (*show) (void *data, int type, char *buf),
-		       mode_t (*is_visible) (void *data, int type),
+		       umode_t (*is_visible) (void *data, int type),
 		       void (*release) (void *data))
 {
 	struct iscsi_boot_kobj *boot_kobj;
@@ -369,7 +369,7 @@
 iscsi_boot_create_target(struct iscsi_boot_kset *boot_kset, int index,
 			 void *data,
 			 ssize_t (*show) (void *data, int type, char *buf),
-			 mode_t (*is_visible) (void *data, int type),
+			 umode_t (*is_visible) (void *data, int type),
 			 void (*release) (void *data))
 {
 	return iscsi_boot_create_kobj(boot_kset, &iscsi_boot_target_attr_group,
@@ -394,7 +394,7 @@
 iscsi_boot_create_initiator(struct iscsi_boot_kset *boot_kset, int index,
 			    void *data,
 			    ssize_t (*show) (void *data, int type, char *buf),
-			    mode_t (*is_visible) (void *data, int type),
+			    umode_t (*is_visible) (void *data, int type),
 			    void (*release) (void *data))
 {
 	return iscsi_boot_create_kobj(boot_kset,
@@ -420,7 +420,7 @@
 iscsi_boot_create_ethernet(struct iscsi_boot_kset *boot_kset, int index,
 			   void *data,
 			   ssize_t (*show) (void *data, int type, char *buf),
-			   mode_t (*is_visible) (void *data, int type),
+			   umode_t (*is_visible) (void *data, int type),
 			   void (*release) (void *data))
 {
 	return iscsi_boot_create_kobj(boot_kset,
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 7c34d8e..db47158 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -873,7 +873,7 @@
 	iscsi_host_free(shost);
 }
 
-static mode_t iscsi_sw_tcp_attr_is_visible(int param_type, int param)
+static umode_t iscsi_sw_tcp_attr_is_visible(int param_type, int param)
 {
 	switch (param_type) {
 	case ISCSI_HOST_PARAM:
diff --git a/drivers/scsi/jazz_esp.c b/drivers/scsi/jazz_esp.c
index 08e26d4..27cfb0c 100644
--- a/drivers/scsi/jazz_esp.c
+++ b/drivers/scsi/jazz_esp.c
@@ -1,6 +1,6 @@
 /* jazz_esp.c: ESP front-end for MIPS JAZZ systems.
  *
- * Copyright (C) 2007 Thomas Bogendörfer (tsbogend@alpha.frankende)
+ * Copyright (C) 2007 Thomas Bogendörfer (tsbogend@alpha.frankende)
  */
 
 #include <linux/kernel.h>
diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c
index 590ce1e..4ceeace 100644
--- a/drivers/scsi/mac_esp.c
+++ b/drivers/scsi/mac_esp.c
@@ -25,6 +25,7 @@
 #include <asm/dma.h>
 #include <asm/macints.h>
 #include <asm/macintosh.h>
+#include <asm/mac_via.h>
 
 #include <scsi/scsi_host.h>
 
@@ -149,7 +150,7 @@
 
 	do {
 		if (mep->pdma_regs == NULL) {
-			if (mac_irq_pending(IRQ_MAC_SCSIDRQ))
+			if (via2_scsi_drq_pending())
 				return 0;
 		} else {
 			if (nubus_readl(mep->pdma_regs) & 0x200)
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index 4e041f6..d570573 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -4335,7 +4335,7 @@
 	/* insert into event log */
 	sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
 	     sizeof(Mpi2EventDataSasDeviceStatusChange_t);
-	event_reply = kzalloc(sz, GFP_KERNEL);
+	event_reply = kzalloc(sz, GFP_ATOMIC);
 	if (!event_reply) {
 		printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
 		    ioc->name, __FILE__, __LINE__, __func__);
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 5163edb..ea8a0b4 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -4105,7 +4105,7 @@
 	hdr = kmalloc(sizeof(struct pmcraid_ioctl_header), GFP_KERNEL);
 
 	if (!hdr) {
-		pmcraid_err("faile to allocate memory for ioctl header\n");
+		pmcraid_err("failed to allocate memory for ioctl header\n");
 		return -ENOMEM;
 	}
 
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index ac326c4..6465dae 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1762,12 +1762,31 @@
 	scsi_qla_host_t *vha = shost_priv(shost);
 	struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev);
 
-	if (!base_vha->flags.online)
+	if (!base_vha->flags.online) {
 		fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
-	else if (atomic_read(&base_vha->loop_state) == LOOP_TIMEOUT)
-		fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
-	else
+		return;
+	}
+
+	switch (atomic_read(&base_vha->loop_state)) {
+	case LOOP_UPDATE:
+		fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
+		break;
+	case LOOP_DOWN:
+		if (test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags))
+			fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
+		else
+			fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
+		break;
+	case LOOP_DEAD:
+		fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
+		break;
+	case LOOP_READY:
 		fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
+		break;
+	default:
+		fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
+		break;
+	}
 }
 
 static int
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 9df4787..f3cddd5 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -12,17 +12,17 @@
  * |             Level            |   Last Value Used  |     Holes	|
  * ----------------------------------------------------------------------
  * | Module Init and Probe        |       0x0116       |  		|
- * | Mailbox commands             |       0x1129       |		|
+ * | Mailbox commands             |       0x112b       |		|
  * | Device Discovery             |       0x2083       |		|
  * | Queue Command and IO tracing |       0x302e       |     0x3008     |
  * | DPC Thread                   |       0x401c       |		|
  * | Async Events                 |       0x5059       |		|
- * | Timer Routines               |       0x600d       |		|
+ * | Timer Routines               |       0x6010       | 0x600e,0x600f  |
  * | User Space Interactions      |       0x709d       |		|
- * | Task Management              |       0x8041       |    		|
+ * | Task Management              |       0x8041       | 0x800b         |
  * | AER/EEH                      |       0x900f       |		|
  * | Virtual Port                 |       0xa007       |		|
- * | ISP82XX Specific             |       0xb051       |    		|
+ * | ISP82XX Specific             |       0xb052       |    		|
  * | MultiQ                       |       0xc00b       |		|
  * | Misc                         |       0xd00b       |		|
  * ----------------------------------------------------------------------
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index ce32d81..c0c11af 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -578,6 +578,7 @@
 extern void qla82xx_chip_reset_cleanup(scsi_qla_host_t *);
 extern int qla82xx_mbx_beacon_ctl(scsi_qla_host_t *, int);
 extern char *qdev_state(uint32_t);
+extern void qla82xx_clear_pending_mbx(scsi_qla_host_t *);
 
 /* BSG related functions */
 extern int qla24xx_bsg_request(struct fc_bsg_job *);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index f03e915f..54ea68c 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1509,7 +1509,8 @@
 				    &ha->fw_xcb_count, NULL, NULL,
 				    &ha->max_npiv_vports, NULL);
 
-				if (!fw_major_version && ql2xallocfwdump)
+				if (!fw_major_version && ql2xallocfwdump
+				    && !IS_QLA82XX(ha))
 					qla2x00_alloc_fw_dump(vha);
 			}
 		} else {
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index dbec896..a4b267e 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -120,11 +120,10 @@
  * Returns a pointer to the continuation type 1 IOCB packet.
  */
 static inline cont_a64_entry_t *
-qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha)
+qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
 {
 	cont_a64_entry_t *cont_pkt;
 
-	struct req_que *req = vha->req;
 	/* Adjust ring index. */
 	req->ring_index++;
 	if (req->ring_index == req->length) {
@@ -292,7 +291,7 @@
 			 * Five DSDs are available in the Continuation
 			 * Type 1 IOCB.
 			 */
-			cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
+			cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
 			cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
 			avail_dsds = 5;
 		}
@@ -684,7 +683,7 @@
 			 * Five DSDs are available in the Continuation
 			 * Type 1 IOCB.
 			 */
-			cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
+			cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
 			cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
 			avail_dsds = 5;
 		}
@@ -2070,7 +2069,8 @@
 			* Five DSDs are available in the Cont.
 			* Type 1 IOCB.
 			       */
-			cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
+			cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
+			    vha->hw->req_q_map[0]);
 			cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
 			avail_dsds = 5;
 			cont_iocb_prsnt = 1;
@@ -2096,6 +2096,7 @@
 	int index;
 	uint16_t tot_dsds;
         scsi_qla_host_t *vha = sp->fcport->vha;
+	struct qla_hw_data *ha = vha->hw;
 	struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
 	int loop_iterartion = 0;
 	int cont_iocb_prsnt = 0;
@@ -2141,7 +2142,8 @@
 			* Five DSDs are available in the Cont.
 			* Type 1 IOCB.
 			       */
-			cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
+			cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
+			    ha->req_q_map[0]);
 			cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
 			avail_dsds = 5;
 			cont_iocb_prsnt = 1;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 2516adf..7b91b29 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1741,7 +1741,7 @@
 				    resid, scsi_bufflen(cp));
 
 				cp->result = DID_ERROR << 16 | lscsi_status;
-				break;
+				goto check_scsi_status;
 			}
 
 			if (!lscsi_status &&
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 3b3cec9..82a3353 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -79,8 +79,7 @@
 		mcp->mb[0] = MBS_LINK_DOWN_ERROR;
 		ql_log(ql_log_warn, base_vha, 0x1004,
 		    "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
-		rval = QLA_FUNCTION_FAILED;
-		goto premature_exit;
+		return QLA_FUNCTION_TIMEOUT;
 	}
 
 	/*
@@ -163,6 +162,7 @@
 				HINT_MBX_INT_PENDING) {
 				spin_unlock_irqrestore(&ha->hardware_lock,
 					flags);
+				ha->flags.mbox_busy = 0;
 				ql_dbg(ql_dbg_mbx, base_vha, 0x1010,
 				    "Pending mailbox timeout, exiting.\n");
 				rval = QLA_FUNCTION_TIMEOUT;
@@ -188,6 +188,7 @@
 				HINT_MBX_INT_PENDING) {
 				spin_unlock_irqrestore(&ha->hardware_lock,
 					flags);
+				ha->flags.mbox_busy = 0;
 				ql_dbg(ql_dbg_mbx, base_vha, 0x1012,
 				    "Pending mailbox timeout, exiting.\n");
 				rval = QLA_FUNCTION_TIMEOUT;
@@ -302,7 +303,15 @@
 			if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
 			    !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
 			    !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
-
+				if (IS_QLA82XX(ha)) {
+					ql_dbg(ql_dbg_mbx, vha, 0x112a,
+					    "disabling pause transmit on port "
+					    "0 & 1.\n");
+					qla82xx_wr_32(ha,
+					    QLA82XX_CRB_NIU + 0x98,
+					    CRB_NIU_XG_PAUSE_CTL_P0|
+					    CRB_NIU_XG_PAUSE_CTL_P1);
+				}
 				ql_log(ql_log_info, base_vha, 0x101c,
 				    "Mailbox cmd timeout occured. "
 				    "Scheduling ISP abort eeh_busy=0x%x.\n",
@@ -318,7 +327,15 @@
 			if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
 			    !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
 			    !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
-
+				if (IS_QLA82XX(ha)) {
+					ql_dbg(ql_dbg_mbx, vha, 0x112b,
+					    "disabling pause transmit on port "
+					    "0 & 1.\n");
+					qla82xx_wr_32(ha,
+					    QLA82XX_CRB_NIU + 0x98,
+					    CRB_NIU_XG_PAUSE_CTL_P0|
+					    CRB_NIU_XG_PAUSE_CTL_P1);
+				}
 				ql_log(ql_log_info, base_vha, 0x101e,
 				    "Mailbox cmd timeout occured. "
 				    "Scheduling ISP abort.\n");
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 94bded5..1873940 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -1055,7 +1055,7 @@
 		    "ROM lock failed.\n");
 		return -1;
 	}
-	return 0;;
+	return 0;
 }
 
 static int
@@ -3817,6 +3817,20 @@
 	return rval;
 }
 
+void qla82xx_clear_pending_mbx(scsi_qla_host_t *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+
+	if (ha->flags.mbox_busy) {
+		ha->flags.mbox_int = 1;
+		ha->flags.mbox_busy = 0;
+		ql_log(ql_log_warn, vha, 0x6010,
+		    "Doing premature completion of mbx command.\n");
+		if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags))
+			complete(&ha->mbx_intr_comp);
+	}
+}
+
 void qla82xx_watchdog(scsi_qla_host_t *vha)
 {
 	uint32_t dev_state, halt_status;
@@ -3839,9 +3853,13 @@
 			qla2xxx_wake_dpc(vha);
 		} else {
 			if (qla82xx_check_fw_alive(vha)) {
+				ql_dbg(ql_dbg_timer, vha, 0x6011,
+				    "disabling pause transmit on port 0 & 1.\n");
+				qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
+				    CRB_NIU_XG_PAUSE_CTL_P0|CRB_NIU_XG_PAUSE_CTL_P1);
 				halt_status = qla82xx_rd_32(ha,
 				    QLA82XX_PEG_HALT_STATUS1);
-				ql_dbg(ql_dbg_timer, vha, 0x6005,
+				ql_log(ql_log_info, vha, 0x6005,
 				    "dumping hw/fw registers:.\n "
 				    " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,.\n "
 				    " PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,.\n "
@@ -3858,6 +3876,11 @@
 					    QLA82XX_CRB_PEG_NET_3 + 0x3c),
 				    qla82xx_rd_32(ha,
 					    QLA82XX_CRB_PEG_NET_4 + 0x3c));
+				if (LSW(MSB(halt_status)) == 0x67)
+					ql_log(ql_log_warn, vha, 0xb052,
+					    "Firmware aborted with "
+					    "error code 0x00006700. Device is "
+					    "being reset.\n");
 				if (halt_status & HALT_STATUS_UNRECOVERABLE) {
 					set_bit(ISP_UNRECOVERABLE,
 					    &vha->dpc_flags);
@@ -3869,16 +3892,8 @@
 				}
 				qla2xxx_wake_dpc(vha);
 				ha->flags.isp82xx_fw_hung = 1;
-				if (ha->flags.mbox_busy) {
-					ha->flags.mbox_int = 1;
-					ql_log(ql_log_warn, vha, 0x6007,
-					    "Due to FW hung, doing "
-					    "premature completion of mbx "
-					    "command.\n");
-					if (test_bit(MBX_INTR_WAIT,
-					    &ha->mbx_cmd_flags))
-						complete(&ha->mbx_intr_comp);
-				}
+				ql_log(ql_log_warn, vha, 0x6007, "Firmware hung.\n");
+				qla82xx_clear_pending_mbx(vha);
 			}
 		}
 	}
@@ -4073,10 +4088,7 @@
 			msleep(1000);
 			if (qla82xx_check_fw_alive(vha)) {
 				ha->flags.isp82xx_fw_hung = 1;
-				if (ha->flags.mbox_busy) {
-					ha->flags.mbox_int = 1;
-					complete(&ha->mbx_intr_comp);
-				}
+				qla82xx_clear_pending_mbx(vha);
 				break;
 			}
 		}
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index 57820c1..57a226b 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -1173,4 +1173,8 @@
 
 static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8, 0x410000AC,
 	0x410000B8, 0x410000BC };
+
+#define CRB_NIU_XG_PAUSE_CTL_P0        0x1
+#define CRB_NIU_XG_PAUSE_CTL_P1        0x8
+
 #endif
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index fd14c7b..f9e5b85 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -201,12 +201,12 @@
 		"Set the Minidump driver capture mask level. "
 		"Default is 0x7F - Can be set to 0x3, 0x7, 0xF, 0x1F, 0x7F.");
 
-int ql2xmdenable;
+int ql2xmdenable = 1;
 module_param(ql2xmdenable, int, S_IRUGO);
 MODULE_PARM_DESC(ql2xmdenable,
 		"Enable/disable MiniDump. "
-		"0 (Default) - MiniDump disabled. "
-		"1 - MiniDump enabled.");
+		"0 - MiniDump disabled. "
+		"1 (Default) - MiniDump enabled.");
 
 /*
  * SCSI host template entry points
@@ -423,6 +423,7 @@
 	qla25xx_delete_queues(vha);
 	destroy_workqueue(ha->wq);
 	ha->wq = NULL;
+	vha->req = ha->req_q_map[0];
 fail:
 	ha->mqenable = 0;
 	kfree(ha->req_q_map);
@@ -814,49 +815,6 @@
 	return return_status;
 }
 
-/*
- * qla2x00_wait_for_loop_ready
- *    Wait for MAX_LOOP_TIMEOUT(5 min) value for loop
- *    to be in LOOP_READY state.
- * Input:
- *     ha - pointer to host adapter structure
- *
- * Note:
- *    Does context switching-Release SPIN_LOCK
- *    (if any) before calling this routine.
- *
- *
- * Return:
- *    Success (LOOP_READY) : 0
- *    Failed  (LOOP_NOT_READY) : 1
- */
-static inline int
-qla2x00_wait_for_loop_ready(scsi_qla_host_t *vha)
-{
-	int 	 return_status = QLA_SUCCESS;
-	unsigned long loop_timeout ;
-	struct qla_hw_data *ha = vha->hw;
-	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
-
-	/* wait for 5 min at the max for loop to be ready */
-	loop_timeout = jiffies + (MAX_LOOP_TIMEOUT * HZ);
-
-	while ((!atomic_read(&base_vha->loop_down_timer) &&
-	    atomic_read(&base_vha->loop_state) == LOOP_DOWN) ||
-	    atomic_read(&base_vha->loop_state) != LOOP_READY) {
-		if (atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
-			return_status = QLA_FUNCTION_FAILED;
-			break;
-		}
-		msleep(1000);
-		if (time_after_eq(jiffies, loop_timeout)) {
-			return_status = QLA_FUNCTION_FAILED;
-			break;
-		}
-	}
-	return (return_status);
-}
-
 static void
 sp_get(struct srb *sp)
 {
@@ -1035,12 +993,6 @@
 		    "Wait for hba online failed for cmd=%p.\n", cmd);
 		goto eh_reset_failed;
 	}
-	err = 1;
-	if (qla2x00_wait_for_loop_ready(vha) != QLA_SUCCESS) {
-		ql_log(ql_log_warn, vha, 0x800b,
-		    "Wait for loop ready failed for cmd=%p.\n", cmd);
-		goto eh_reset_failed;
-	}
 	err = 2;
 	if (do_reset(fcport, cmd->device->lun, cmd->request->cpu + 1)
 		!= QLA_SUCCESS) {
@@ -1137,10 +1089,9 @@
 		goto eh_bus_reset_done;
 	}
 
-	if (qla2x00_wait_for_loop_ready(vha) == QLA_SUCCESS) {
-		if (qla2x00_loop_reset(vha) == QLA_SUCCESS)
-			ret = SUCCESS;
-	}
+	if (qla2x00_loop_reset(vha) == QLA_SUCCESS)
+		ret = SUCCESS;
+
 	if (ret == FAILED)
 		goto eh_bus_reset_done;
 
@@ -1206,15 +1157,6 @@
 	if (qla2x00_wait_for_reset_ready(vha) != QLA_SUCCESS)
 		goto eh_host_reset_lock;
 
-	/*
-	 * Fixme-may be dpc thread is active and processing
-	 * loop_resync,so wait a while for it to
-	 * be completed and then issue big hammer.Otherwise
-	 * it may cause I/O failure as big hammer marks the
-	 * devices as lost kicking of the port_down_timer
-	 * while dpc is stuck for the mailbox to complete.
-	 */
-	qla2x00_wait_for_loop_ready(vha);
 	if (vha != base_vha) {
 		if (qla2x00_vp_abort_isp(vha))
 			goto eh_host_reset_lock;
@@ -1297,16 +1239,13 @@
 		atomic_set(&vha->loop_state, LOOP_DOWN);
 		atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
 		qla2x00_mark_all_devices_lost(vha, 0);
-		qla2x00_wait_for_loop_ready(vha);
 	}
 
 	if (ha->flags.enable_lip_reset) {
 		ret = qla2x00_lip_reset(vha);
-		if (ret != QLA_SUCCESS) {
+		if (ret != QLA_SUCCESS)
 			ql_dbg(ql_dbg_taskm, vha, 0x802e,
 			    "lip_reset failed (%d).\n", ret);
-		} else
-			qla2x00_wait_for_loop_ready(vha);
 	}
 
 	/* Issue marker command only when we are going to start the I/O */
@@ -4070,13 +4009,8 @@
 		/* For ISP82XX complete any pending mailbox cmd */
 		if (IS_QLA82XX(ha)) {
 			ha->flags.isp82xx_fw_hung = 1;
-			if (ha->flags.mbox_busy) {
-				ha->flags.mbox_int = 1;
-				ql_dbg(ql_dbg_aer, vha, 0x9001,
-				    "Due to pci channel io frozen, doing premature "
-				    "completion of mbx command.\n");
-				complete(&ha->mbx_intr_comp);
-			}
+			ql_dbg(ql_dbg_aer, vha, 0x9001, "Pci channel io frozen\n");
+			qla82xx_clear_pending_mbx(vha);
 		}
 		qla2x00_free_irqs(vha);
 		pci_disable_device(pdev);
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 13b6357..23f33a6 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
 /*
  * Driver version
  */
-#define QLA2XXX_VERSION      "8.03.07.07-k"
+#define QLA2XXX_VERSION      "8.03.07.12-k"
 
 #define QLA_DRIVER_MAJOR_VER	8
 #define QLA_DRIVER_MINOR_VER	3
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index ace637b..fd5edc6 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -147,7 +147,7 @@
 #define ISCSI_ALIAS_SIZE		32	/* ISCSI Alias name size */
 #define ISCSI_NAME_SIZE			0xE0	/* ISCSI Name size */
 
-#define QL4_SESS_RECOVERY_TMO		30	/* iSCSI session */
+#define QL4_SESS_RECOVERY_TMO		120	/* iSCSI session */
 						/* recovery timeout */
 
 #define LSDW(x) ((u32)((u64)(x)))
@@ -173,6 +173,8 @@
 #define ISNS_DEREG_TOV			5
 #define HBA_ONLINE_TOV			30
 #define DISABLE_ACB_TOV			30
+#define IP_CONFIG_TOV			30
+#define LOGIN_TOV			12
 
 #define MAX_RESET_HA_RETRIES		2
 
@@ -240,6 +242,45 @@
 
 	uint16_t fw_ddb_index;	/* DDB firmware index */
 	uint32_t fw_ddb_device_state; /* F/W Device State  -- see ql4_fw.h */
+	uint16_t ddb_type;
+#define FLASH_DDB 0x01
+
+	struct dev_db_entry fw_ddb_entry;
+	int (*unblock_sess)(struct iscsi_cls_session *cls_session);
+	int (*ddb_change)(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+			  struct ddb_entry *ddb_entry, uint32_t state);
+
+	/* Driver Re-login  */
+	unsigned long flags;		  /* DDB Flags */
+	uint16_t default_relogin_timeout; /*  Max time to wait for
+					   *  relogin to complete */
+	atomic_t retry_relogin_timer;	  /* Min Time between relogins
+					   * (4000 only) */
+	atomic_t relogin_timer;		  /* Max Time to wait for
+					   * relogin to complete */
+	atomic_t relogin_retry_count;	  /* Num of times relogin has been
+					   * retried */
+	uint32_t default_time2wait;	  /* Default Min time between
+					   * relogins (+aens) */
+
+};
+
+struct qla_ddb_index {
+	struct list_head list;
+	uint16_t fw_ddb_idx;
+	struct dev_db_entry fw_ddb;
+};
+
+#define DDB_IPADDR_LEN 64
+
+struct ql4_tuple_ddb {
+	int port;
+	int tpgt;
+	char ip_addr[DDB_IPADDR_LEN];
+	char iscsi_name[ISCSI_NAME_SIZE];
+	uint16_t options;
+#define DDB_OPT_IPV6 0x0e0e
+#define DDB_OPT_IPV4 0x0f0f
 };
 
 /*
@@ -411,7 +452,7 @@
 #define AF_FW_RECOVERY			19 /* 0x00080000 */
 #define AF_EEH_BUSY			20 /* 0x00100000 */
 #define AF_PCI_CHANNEL_IO_PERM_FAILURE	21 /* 0x00200000 */
-
+#define AF_BUILD_DDB_LIST		22 /* 0x00400000 */
 	unsigned long dpc_flags;
 
 #define DPC_RESET_HA			1 /* 0x00000002 */
@@ -604,6 +645,7 @@
 	uint16_t bootload_minor;
 	uint16_t bootload_patch;
 	uint16_t bootload_build;
+	uint16_t def_timeout; /* Default login timeout */
 
 	uint32_t flash_state;
 #define	QLFLASH_WAITING		0
@@ -623,6 +665,11 @@
 	uint16_t iscsi_pci_func_cnt;
 	uint8_t model_name[16];
 	struct completion disable_acb_comp;
+	struct dma_pool *fw_ddb_dma_pool;
+#define DDB_DMA_BLOCK_SIZE 512
+	uint16_t pri_ddb_idx;
+	uint16_t sec_ddb_idx;
+	int is_reset;
 };
 
 struct ql4_task_data {
@@ -835,6 +882,10 @@
 /*---------------------------------------------------------------------------*/
 
 /* Defines for qla4xxx_initialize_adapter() and qla4xxx_recover_adapter() */
+
+#define INIT_ADAPTER    0
+#define RESET_ADAPTER   1
+
 #define PRESERVE_DDB_LIST	0
 #define REBUILD_DDB_LIST	1
 
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index cbd5a20..7825c14 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -12,6 +12,7 @@
 #define MAX_PRST_DEV_DB_ENTRIES		64
 #define MIN_DISC_DEV_DB_ENTRY		MAX_PRST_DEV_DB_ENTRIES
 #define MAX_DEV_DB_ENTRIES		512
+#define MAX_DEV_DB_ENTRIES_40XX		256
 
 /*************************************************************************
  *
@@ -604,6 +605,13 @@
 	uint8_t res14[140];	/* 274-2FF */
 };
 
+#define IP_ADDR_COUNT	4 /* Total 4 IP address supported in one interface
+			   * One IPv4, one IPv6 link local and 2 IPv6
+			   */
+
+#define IP_STATE_MASK	0x0F000000
+#define IP_STATE_SHIFT	24
+
 struct init_fw_ctrl_blk {
 	struct addr_ctrl_blk pri;
 /*	struct addr_ctrl_blk sec;*/
@@ -744,7 +752,7 @@
 	uint8_t res4[0x36];	/* 8A-BF */
 	uint8_t iscsi_name[0xE0];	/* C0-19F : xxzzy Make this a
 					 * pointer to a string so we
-					 * don't have to reserve soooo
+					 * don't have to reserve so
 					 * much RAM */
 	uint8_t link_local_ipv6_addr[0x10]; /* 1A0-1AF */
 	uint8_t res5[0x10];	/* 1B0-1BF */
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index 160db9d..d0dd4b3 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -13,7 +13,7 @@
 int qla4xxx_hw_reset(struct scsi_qla_host *ha);
 int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a);
 int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb *srb);
-int qla4xxx_initialize_adapter(struct scsi_qla_host *ha);
+int qla4xxx_initialize_adapter(struct scsi_qla_host *ha, int is_reset);
 int qla4xxx_soft_reset(struct scsi_qla_host *ha);
 irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id);
 
@@ -153,10 +153,13 @@
 			  uint32_t *mbx_sts);
 int qla4xxx_clear_ddb_entry(struct scsi_qla_host *ha, uint32_t fw_ddb_index);
 int qla4xxx_send_passthru0(struct iscsi_task *task);
+void qla4xxx_free_ddb_index(struct scsi_qla_host *ha);
 int qla4xxx_get_mgmt_data(struct scsi_qla_host *ha, uint16_t fw_ddb_index,
 			  uint16_t stats_size, dma_addr_t stats_dma);
 void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
 				       struct ddb_entry *ddb_entry);
+void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha,
+					     struct ddb_entry *ddb_entry);
 int qla4xxx_bootdb_by_index(struct scsi_qla_host *ha,
 			    struct dev_db_entry *fw_ddb_entry,
 			    dma_addr_t fw_ddb_entry_dma, uint16_t ddb_index);
@@ -169,11 +172,22 @@
 int qla4xxx_restore_factory_defaults(struct scsi_qla_host *ha,
 				     uint32_t region, uint32_t field0,
 				     uint32_t field1);
+int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index);
+void qla4xxx_login_flash_ddb(struct iscsi_cls_session *cls_session);
+int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session);
+int qla4xxx_unblock_flash_ddb(struct iscsi_cls_session *cls_session);
+int qla4xxx_flash_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+			     struct ddb_entry *ddb_entry, uint32_t state);
+int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+		       struct ddb_entry *ddb_entry, uint32_t state);
+void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset);
 
 /* BSG Functions */
 int qla4xxx_bsg_request(struct bsg_job *bsg_job);
 int qla4xxx_process_vendor_specific(struct bsg_job *bsg_job);
 
+void qla4xxx_arm_relogin_timer(struct ddb_entry *ddb_entry);
+
 extern int ql4xextended_error_logging;
 extern int ql4xdontresethba;
 extern int ql4xenablemsix;
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 3075fba..1bdfa81 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -773,22 +773,24 @@
  * be freed so that when login happens from user space there are free DDB
  * indices available.
  **/
-static void qla4xxx_free_ddb_index(struct scsi_qla_host *ha)
+void qla4xxx_free_ddb_index(struct scsi_qla_host *ha)
 {
 	int max_ddbs;
 	int ret;
 	uint32_t idx = 0, next_idx = 0;
 	uint32_t state = 0, conn_err = 0;
 
-	max_ddbs =  is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES :
+	max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
 				     MAX_DEV_DB_ENTRIES;
 
 	for (idx = 0; idx < max_ddbs; idx = next_idx) {
 		ret = qla4xxx_get_fwddb_entry(ha, idx, NULL, 0, NULL,
 					      &next_idx, &state, &conn_err,
 						NULL, NULL);
-		if (ret == QLA_ERROR)
+		if (ret == QLA_ERROR) {
+			next_idx++;
 			continue;
+		}
 		if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
 		    state == DDB_DS_SESSION_FAILED) {
 			DEBUG2(ql4_printk(KERN_INFO, ha,
@@ -804,7 +806,6 @@
 	}
 }
 
-
 /**
  * qla4xxx_initialize_adapter - initiailizes hba
  * @ha: Pointer to host adapter structure.
@@ -812,7 +813,7 @@
  * This routine parforms all of the steps necessary to initialize the adapter.
  *
  **/
-int qla4xxx_initialize_adapter(struct scsi_qla_host *ha)
+int qla4xxx_initialize_adapter(struct scsi_qla_host *ha, int is_reset)
 {
 	int status = QLA_ERROR;
 
@@ -840,7 +841,8 @@
 	if (status == QLA_ERROR)
 		goto exit_init_hba;
 
-	qla4xxx_free_ddb_index(ha);
+	if (is_reset == RESET_ADAPTER)
+		qla4xxx_build_ddb_list(ha, is_reset);
 
 	set_bit(AF_ONLINE, &ha->flags);
 exit_init_hba:
@@ -855,38 +857,12 @@
 	return status;
 }
 
-/**
- * qla4xxx_process_ddb_changed - process ddb state change
- * @ha - Pointer to host adapter structure.
- * @fw_ddb_index - Firmware's device database index
- * @state - Device state
- *
- * This routine processes a Decive Database Changed AEN Event.
- **/
-int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
-		uint32_t state, uint32_t conn_err)
+int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+		       struct ddb_entry *ddb_entry, uint32_t state)
 {
-	struct ddb_entry * ddb_entry;
 	uint32_t old_fw_ddb_device_state;
 	int status = QLA_ERROR;
 
-	/* check for out of range index */
-	if (fw_ddb_index >= MAX_DDB_ENTRIES)
-		goto exit_ddb_event;
-
-	/* Get the corresponging ddb entry */
-	ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, fw_ddb_index);
-	/* Device does not currently exist in our database. */
-	if (ddb_entry == NULL) {
-		ql4_printk(KERN_ERR, ha, "%s: No ddb_entry at FW index [%d]\n",
-			   __func__, fw_ddb_index);
-
-		if (state == DDB_DS_NO_CONNECTION_ACTIVE)
-			clear_bit(fw_ddb_index, ha->ddb_idx_map);
-
-		goto exit_ddb_event;
-	}
-
 	old_fw_ddb_device_state = ddb_entry->fw_ddb_device_state;
 	DEBUG2(ql4_printk(KERN_INFO, ha,
 			  "%s: DDB - old state = 0x%x, new state = 0x%x for "
@@ -900,9 +876,7 @@
 		switch (state) {
 		case DDB_DS_SESSION_ACTIVE:
 		case DDB_DS_DISCOVERY:
-			iscsi_conn_start(ddb_entry->conn);
-			iscsi_conn_login_event(ddb_entry->conn,
-					       ISCSI_CONN_STATE_LOGGED_IN);
+			ddb_entry->unblock_sess(ddb_entry->sess);
 			qla4xxx_update_session_conn_param(ha, ddb_entry);
 			status = QLA_SUCCESS;
 			break;
@@ -936,9 +910,7 @@
 		switch (state) {
 		case DDB_DS_SESSION_ACTIVE:
 		case DDB_DS_DISCOVERY:
-			iscsi_conn_start(ddb_entry->conn);
-			iscsi_conn_login_event(ddb_entry->conn,
-					       ISCSI_CONN_STATE_LOGGED_IN);
+			ddb_entry->unblock_sess(ddb_entry->sess);
 			qla4xxx_update_session_conn_param(ha, ddb_entry);
 			status = QLA_SUCCESS;
 			break;
@@ -954,7 +926,198 @@
 				__func__));
 		break;
 	}
+	return status;
+}
+
+void qla4xxx_arm_relogin_timer(struct ddb_entry *ddb_entry)
+{
+	/*
+	 * This triggers a relogin.  After the relogin_timer
+	 * expires, the relogin gets scheduled.  We must wait a
+	 * minimum amount of time since receiving an 0x8014 AEN
+	 * with failed device_state or a logout response before
+	 * we can issue another relogin.
+	 *
+	 * Firmware pads this timeout: (time2wait +1).
+	 * Driver retry to login should be longer than F/W.
+	 * Otherwise F/W will fail
+	 * set_ddb() mbx cmd with 0x4005 since it still
+	 * counting down its time2wait.
+	 */
+	atomic_set(&ddb_entry->relogin_timer, 0);
+	atomic_set(&ddb_entry->retry_relogin_timer,
+		   ddb_entry->default_time2wait + 4);
+
+}
+
+int qla4xxx_flash_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+			     struct ddb_entry *ddb_entry, uint32_t state)
+{
+	uint32_t old_fw_ddb_device_state;
+	int status = QLA_ERROR;
+
+	old_fw_ddb_device_state = ddb_entry->fw_ddb_device_state;
+	DEBUG2(ql4_printk(KERN_INFO, ha,
+			  "%s: DDB - old state = 0x%x, new state = 0x%x for "
+			  "index [%d]\n", __func__,
+			  ddb_entry->fw_ddb_device_state, state, fw_ddb_index));
+
+	ddb_entry->fw_ddb_device_state = state;
+
+	switch (old_fw_ddb_device_state) {
+	case DDB_DS_LOGIN_IN_PROCESS:
+	case DDB_DS_NO_CONNECTION_ACTIVE:
+		switch (state) {
+		case DDB_DS_SESSION_ACTIVE:
+			ddb_entry->unblock_sess(ddb_entry->sess);
+			qla4xxx_update_session_conn_fwddb_param(ha, ddb_entry);
+			status = QLA_SUCCESS;
+			break;
+		case DDB_DS_SESSION_FAILED:
+			iscsi_block_session(ddb_entry->sess);
+			if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
+				qla4xxx_arm_relogin_timer(ddb_entry);
+			status = QLA_SUCCESS;
+			break;
+		}
+		break;
+	case DDB_DS_SESSION_ACTIVE:
+		switch (state) {
+		case DDB_DS_SESSION_FAILED:
+			iscsi_block_session(ddb_entry->sess);
+			if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
+				qla4xxx_arm_relogin_timer(ddb_entry);
+			status = QLA_SUCCESS;
+			break;
+		}
+		break;
+	case DDB_DS_SESSION_FAILED:
+		switch (state) {
+		case DDB_DS_SESSION_ACTIVE:
+			ddb_entry->unblock_sess(ddb_entry->sess);
+			qla4xxx_update_session_conn_fwddb_param(ha, ddb_entry);
+			status = QLA_SUCCESS;
+			break;
+		case DDB_DS_SESSION_FAILED:
+			if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
+				qla4xxx_arm_relogin_timer(ddb_entry);
+			status = QLA_SUCCESS;
+			break;
+		}
+		break;
+	default:
+		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Unknown Event\n",
+				  __func__));
+		break;
+	}
+	return status;
+}
+
+/**
+ * qla4xxx_process_ddb_changed - process ddb state change
+ * @ha - Pointer to host adapter structure.
+ * @fw_ddb_index - Firmware's device database index
+ * @state - Device state
+ *
+ * This routine processes a Decive Database Changed AEN Event.
+ **/
+int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
+				uint32_t fw_ddb_index,
+				uint32_t state, uint32_t conn_err)
+{
+	struct ddb_entry *ddb_entry;
+	int status = QLA_ERROR;
+
+	/* check for out of range index */
+	if (fw_ddb_index >= MAX_DDB_ENTRIES)
+		goto exit_ddb_event;
+
+	/* Get the corresponging ddb entry */
+	ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, fw_ddb_index);
+	/* Device does not currently exist in our database. */
+	if (ddb_entry == NULL) {
+		ql4_printk(KERN_ERR, ha, "%s: No ddb_entry at FW index [%d]\n",
+			   __func__, fw_ddb_index);
+
+		if (state == DDB_DS_NO_CONNECTION_ACTIVE)
+			clear_bit(fw_ddb_index, ha->ddb_idx_map);
+
+		goto exit_ddb_event;
+	}
+
+	ddb_entry->ddb_change(ha, fw_ddb_index, ddb_entry, state);
 
 exit_ddb_event:
 	return status;
 }
+
+/**
+ * qla4xxx_login_flash_ddb - Login to target (DDB)
+ * @cls_session: Pointer to the session to login
+ *
+ * This routine logins to the target.
+ * Issues setddb and conn open mbx
+ **/
+void qla4xxx_login_flash_ddb(struct iscsi_cls_session *cls_session)
+{
+	struct iscsi_session *sess;
+	struct ddb_entry *ddb_entry;
+	struct scsi_qla_host *ha;
+	struct dev_db_entry *fw_ddb_entry = NULL;
+	dma_addr_t fw_ddb_dma;
+	uint32_t mbx_sts = 0;
+	int ret;
+
+	sess = cls_session->dd_data;
+	ddb_entry = sess->dd_data;
+	ha =  ddb_entry->ha;
+
+	if (!test_bit(AF_LINK_UP, &ha->flags))
+		return;
+
+	if (ddb_entry->ddb_type != FLASH_DDB) {
+		DEBUG2(ql4_printk(KERN_INFO, ha,
+				  "Skipping login to non FLASH DB"));
+		goto exit_login;
+	}
+
+	fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
+				      &fw_ddb_dma);
+	if (fw_ddb_entry == NULL) {
+		DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
+		goto exit_login;
+	}
+
+	if (ddb_entry->fw_ddb_index == INVALID_ENTRY) {
+		ret = qla4xxx_get_ddb_index(ha, &ddb_entry->fw_ddb_index);
+		if (ret == QLA_ERROR)
+			goto exit_login;
+
+		ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry;
+		ha->tot_ddbs++;
+	}
+
+	memcpy(fw_ddb_entry, &ddb_entry->fw_ddb_entry,
+	       sizeof(struct dev_db_entry));
+	ddb_entry->sess->target_id = ddb_entry->fw_ddb_index;
+
+	ret = qla4xxx_set_ddb_entry(ha, ddb_entry->fw_ddb_index,
+				    fw_ddb_dma, &mbx_sts);
+	if (ret == QLA_ERROR) {
+		DEBUG2(ql4_printk(KERN_ERR, ha, "Set DDB failed\n"));
+		goto exit_login;
+	}
+
+	ddb_entry->fw_ddb_device_state = DDB_DS_LOGIN_IN_PROCESS;
+	ret = qla4xxx_conn_open(ha, ddb_entry->fw_ddb_index);
+	if (ret == QLA_ERROR) {
+		ql4_printk(KERN_ERR, ha, "%s: Login failed: %s\n", __func__,
+			   sess->targetname);
+		goto exit_login;
+	}
+
+exit_login:
+	if (fw_ddb_entry)
+		dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
+}
+
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 4c2b848..c259378 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -41,6 +41,16 @@
 		return status;
 	}
 
+	if (is_qla40XX(ha)) {
+		if (test_bit(AF_HA_REMOVAL, &ha->flags)) {
+			DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: "
+					  "prematurely completing mbx cmd as "
+					  "adapter removal detected\n",
+					  ha->host_no, __func__));
+			return status;
+		}
+	}
+
 	if (is_qla8022(ha)) {
 		if (test_bit(AF_FW_RECOVERY, &ha->flags)) {
 			DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: "
@@ -413,6 +423,7 @@
 	memcpy(ha->name_string, init_fw_cb->iscsi_name,
 		min(sizeof(ha->name_string),
 		sizeof(init_fw_cb->iscsi_name)));
+	ha->def_timeout = le16_to_cpu(init_fw_cb->def_timeout);
 	/*memcpy(ha->alias, init_fw_cb->Alias,
 	       min(sizeof(ha->alias), sizeof(init_fw_cb->Alias)));*/
 
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 30f31b1..78bf700 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -8,6 +8,7 @@
 #include <linux/slab.h>
 #include <linux/blkdev.h>
 #include <linux/iscsi_boot_sysfs.h>
+#include <linux/inet.h>
 
 #include <scsi/scsi_tcq.h>
 #include <scsi/scsicam.h>
@@ -31,6 +32,13 @@
 /*
  * Module parameter information and variables
  */
+int ql4xdisablesysfsboot = 1;
+module_param(ql4xdisablesysfsboot, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ql4xdisablesysfsboot,
+		"Set to disable exporting boot targets to sysfs\n"
+		" 0 - Export boot targets\n"
+		" 1 - Do not export boot targets (Default)");
+
 int ql4xdontresethba = 0;
 module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(ql4xdontresethba,
@@ -63,7 +71,7 @@
 module_param(ql4xsess_recovery_tmo, int, S_IRUGO);
 MODULE_PARM_DESC(ql4xsess_recovery_tmo,
 		"Target Session Recovery Timeout.\n"
-		" Default: 30 sec.");
+		" Default: 120 sec.");
 
 static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha);
 /*
@@ -120,7 +128,7 @@
 static int qla4xxx_slave_alloc(struct scsi_device *device);
 static int qla4xxx_slave_configure(struct scsi_device *device);
 static void qla4xxx_slave_destroy(struct scsi_device *sdev);
-static mode_t ql4_attr_is_visible(int param_type, int param);
+static umode_t ql4_attr_is_visible(int param_type, int param);
 static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type);
 
 static struct qla4_8xxx_legacy_intr_set legacy_intr[] =
@@ -189,7 +197,7 @@
 
 static struct scsi_transport_template *qla4xxx_scsi_transport;
 
-static mode_t ql4_attr_is_visible(int param_type, int param)
+static umode_t ql4_attr_is_visible(int param_type, int param)
 {
 	switch (param_type) {
 	case ISCSI_HOST_PARAM:
@@ -415,7 +423,7 @@
 	qla_ep = ep->dd_data;
 	ha = to_qla_host(qla_ep->host);
 
-	if (adapter_up(ha))
+	if (adapter_up(ha) && !test_bit(AF_BUILD_DDB_LIST, &ha->flags))
 		ret = 1;
 
 	return ret;
@@ -975,6 +983,150 @@
 
 }
 
+int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index)
+{
+	uint32_t mbx_sts = 0;
+	uint16_t tmp_ddb_index;
+	int ret;
+
+get_ddb_index:
+	tmp_ddb_index = find_first_zero_bit(ha->ddb_idx_map, MAX_DDB_ENTRIES);
+
+	if (tmp_ddb_index >= MAX_DDB_ENTRIES) {
+		DEBUG2(ql4_printk(KERN_INFO, ha,
+				  "Free DDB index not available\n"));
+		ret = QLA_ERROR;
+		goto exit_get_ddb_index;
+	}
+
+	if (test_and_set_bit(tmp_ddb_index, ha->ddb_idx_map))
+		goto get_ddb_index;
+
+	DEBUG2(ql4_printk(KERN_INFO, ha,
+			  "Found a free DDB index at %d\n", tmp_ddb_index));
+	ret = qla4xxx_req_ddb_entry(ha, tmp_ddb_index, &mbx_sts);
+	if (ret == QLA_ERROR) {
+		if (mbx_sts == MBOX_STS_COMMAND_ERROR) {
+			ql4_printk(KERN_INFO, ha,
+				   "DDB index = %d not available trying next\n",
+				   tmp_ddb_index);
+			goto get_ddb_index;
+		}
+		DEBUG2(ql4_printk(KERN_INFO, ha,
+				  "Free FW DDB not available\n"));
+	}
+
+	*ddb_index = tmp_ddb_index;
+
+exit_get_ddb_index:
+	return ret;
+}
+
+static int qla4xxx_match_ipaddress(struct scsi_qla_host *ha,
+				   struct ddb_entry *ddb_entry,
+				   char *existing_ipaddr,
+				   char *user_ipaddr)
+{
+	uint8_t dst_ipaddr[IPv6_ADDR_LEN];
+	char formatted_ipaddr[DDB_IPADDR_LEN];
+	int status = QLA_SUCCESS, ret = 0;
+
+	if (ddb_entry->fw_ddb_entry.options & DDB_OPT_IPV6_DEVICE) {
+		ret = in6_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr,
+			       '\0', NULL);
+		if (ret == 0) {
+			status = QLA_ERROR;
+			goto out_match;
+		}
+		ret = sprintf(formatted_ipaddr, "%pI6", dst_ipaddr);
+	} else {
+		ret = in4_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr,
+			       '\0', NULL);
+		if (ret == 0) {
+			status = QLA_ERROR;
+			goto out_match;
+		}
+		ret = sprintf(formatted_ipaddr, "%pI4", dst_ipaddr);
+	}
+
+	if (strcmp(existing_ipaddr, formatted_ipaddr))
+		status = QLA_ERROR;
+
+out_match:
+	return status;
+}
+
+static int qla4xxx_match_fwdb_session(struct scsi_qla_host *ha,
+				      struct iscsi_cls_conn *cls_conn)
+{
+	int idx = 0, max_ddbs, rval;
+	struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
+	struct iscsi_session *sess, *existing_sess;
+	struct iscsi_conn *conn, *existing_conn;
+	struct ddb_entry *ddb_entry;
+
+	sess = cls_sess->dd_data;
+	conn = cls_conn->dd_data;
+
+	if (sess->targetname == NULL ||
+	    conn->persistent_address == NULL ||
+	    conn->persistent_port == 0)
+		return QLA_ERROR;
+
+	max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+				     MAX_DEV_DB_ENTRIES;
+
+	for (idx = 0; idx < max_ddbs; idx++) {
+		ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
+		if (ddb_entry == NULL)
+			continue;
+
+		if (ddb_entry->ddb_type != FLASH_DDB)
+			continue;
+
+		existing_sess = ddb_entry->sess->dd_data;
+		existing_conn = ddb_entry->conn->dd_data;
+
+		if (existing_sess->targetname == NULL ||
+		    existing_conn->persistent_address == NULL ||
+		    existing_conn->persistent_port == 0)
+			continue;
+
+		DEBUG2(ql4_printk(KERN_INFO, ha,
+				  "IQN = %s User IQN = %s\n",
+				  existing_sess->targetname,
+				  sess->targetname));
+
+		DEBUG2(ql4_printk(KERN_INFO, ha,
+				  "IP = %s User IP = %s\n",
+				  existing_conn->persistent_address,
+				  conn->persistent_address));
+
+		DEBUG2(ql4_printk(KERN_INFO, ha,
+				  "Port = %d User Port = %d\n",
+				  existing_conn->persistent_port,
+				  conn->persistent_port));
+
+		if (strcmp(existing_sess->targetname, sess->targetname))
+			continue;
+		rval = qla4xxx_match_ipaddress(ha, ddb_entry,
+					existing_conn->persistent_address,
+					conn->persistent_address);
+		if (rval == QLA_ERROR)
+			continue;
+		if (existing_conn->persistent_port != conn->persistent_port)
+			continue;
+		break;
+	}
+
+	if (idx == max_ddbs)
+		return QLA_ERROR;
+
+	DEBUG2(ql4_printk(KERN_INFO, ha,
+			  "Match found in fwdb sessions\n"));
+	return QLA_SUCCESS;
+}
+
 static struct iscsi_cls_session *
 qla4xxx_session_create(struct iscsi_endpoint *ep,
 			uint16_t cmds_max, uint16_t qdepth,
@@ -984,8 +1136,7 @@
 	struct scsi_qla_host *ha;
 	struct qla_endpoint *qla_ep;
 	struct ddb_entry *ddb_entry;
-	uint32_t ddb_index;
-	uint32_t mbx_sts = 0;
+	uint16_t ddb_index;
 	struct iscsi_session *sess;
 	struct sockaddr *dst_addr;
 	int ret;
@@ -1000,32 +1151,9 @@
 	dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
 	ha = to_qla_host(qla_ep->host);
 
-get_ddb_index:
-	ddb_index = find_first_zero_bit(ha->ddb_idx_map, MAX_DDB_ENTRIES);
-
-	if (ddb_index >= MAX_DDB_ENTRIES) {
-		DEBUG2(ql4_printk(KERN_INFO, ha,
-				  "Free DDB index not available\n"));
+	ret = qla4xxx_get_ddb_index(ha, &ddb_index);
+	if (ret == QLA_ERROR)
 		return NULL;
-	}
-
-	if (test_and_set_bit(ddb_index, ha->ddb_idx_map))
-		goto get_ddb_index;
-
-	DEBUG2(ql4_printk(KERN_INFO, ha,
-			  "Found a free DDB index at %d\n", ddb_index));
-	ret = qla4xxx_req_ddb_entry(ha, ddb_index, &mbx_sts);
-	if (ret == QLA_ERROR) {
-		if (mbx_sts == MBOX_STS_COMMAND_ERROR) {
-			ql4_printk(KERN_INFO, ha,
-				   "DDB index = %d not available trying next\n",
-				   ddb_index);
-			goto get_ddb_index;
-		}
-		DEBUG2(ql4_printk(KERN_INFO, ha,
-				  "Free FW DDB not available\n"));
-		return NULL;
-	}
 
 	cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, qla_ep->host,
 				       cmds_max, sizeof(struct ddb_entry),
@@ -1040,6 +1168,8 @@
 	ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
 	ddb_entry->ha = ha;
 	ddb_entry->sess = cls_sess;
+	ddb_entry->unblock_sess = qla4xxx_unblock_ddb;
+	ddb_entry->ddb_change = qla4xxx_ddb_change;
 	cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
 	ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry;
 	ha->tot_ddbs++;
@@ -1077,6 +1207,9 @@
 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
 	cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn),
 				    conn_idx);
+	if (!cls_conn)
+		return NULL;
+
 	sess = cls_sess->dd_data;
 	ddb_entry = sess->dd_data;
 	ddb_entry->conn = cls_conn;
@@ -1109,7 +1242,7 @@
 	struct iscsi_session *sess;
 	struct ddb_entry *ddb_entry;
 	struct scsi_qla_host *ha;
-	struct dev_db_entry *fw_ddb_entry;
+	struct dev_db_entry *fw_ddb_entry = NULL;
 	dma_addr_t fw_ddb_entry_dma;
 	uint32_t mbx_sts = 0;
 	int ret = 0;
@@ -1120,12 +1253,25 @@
 	ddb_entry = sess->dd_data;
 	ha = ddb_entry->ha;
 
+	/* Check if we have  matching FW DDB, if yes then do not
+	 * login to this target. This could cause target to logout previous
+	 * connection
+	 */
+	ret = qla4xxx_match_fwdb_session(ha, cls_conn);
+	if (ret == QLA_SUCCESS) {
+		ql4_printk(KERN_INFO, ha,
+			   "Session already exist in FW.\n");
+		ret = -EEXIST;
+		goto exit_conn_start;
+	}
+
 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
 					  &fw_ddb_entry_dma, GFP_KERNEL);
 	if (!fw_ddb_entry) {
 		ql4_printk(KERN_ERR, ha,
 			   "%s: Unable to allocate dma buffer\n", __func__);
-		return -ENOMEM;
+		ret = -ENOMEM;
+		goto exit_conn_start;
 	}
 
 	ret = qla4xxx_set_param_ddbentry(ha, ddb_entry, cls_conn, &mbx_sts);
@@ -1138,9 +1284,7 @@
 		if (mbx_sts)
 			if (ddb_entry->fw_ddb_device_state ==
 						DDB_DS_SESSION_ACTIVE) {
-				iscsi_conn_start(ddb_entry->conn);
-				iscsi_conn_login_event(ddb_entry->conn,
-						ISCSI_CONN_STATE_LOGGED_IN);
+				ddb_entry->unblock_sess(ddb_entry->sess);
 				goto exit_set_param;
 			}
 
@@ -1167,8 +1311,9 @@
 	ret = 0;
 
 exit_conn_start:
-	dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
-			  fw_ddb_entry, fw_ddb_entry_dma);
+	if (fw_ddb_entry)
+		dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+				  fw_ddb_entry, fw_ddb_entry_dma);
 	return ret;
 }
 
@@ -1344,6 +1489,101 @@
 	return -ENOSYS;
 }
 
+static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha,
+				     struct dev_db_entry *fw_ddb_entry,
+				     struct iscsi_cls_session *cls_sess,
+				     struct iscsi_cls_conn *cls_conn)
+{
+	int buflen = 0;
+	struct iscsi_session *sess;
+	struct iscsi_conn *conn;
+	char ip_addr[DDB_IPADDR_LEN];
+	uint16_t options = 0;
+
+	sess = cls_sess->dd_data;
+	conn = cls_conn->dd_data;
+
+	conn->max_recv_dlength = BYTE_UNITS *
+			  le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
+
+	conn->max_xmit_dlength = BYTE_UNITS *
+			  le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
+
+	sess->initial_r2t_en =
+			    (BIT_10 & le16_to_cpu(fw_ddb_entry->iscsi_options));
+
+	sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
+
+	sess->imm_data_en = (BIT_11 & le16_to_cpu(fw_ddb_entry->iscsi_options));
+
+	sess->first_burst = BYTE_UNITS *
+			       le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
+
+	sess->max_burst = BYTE_UNITS *
+				 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
+
+	sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
+
+	sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
+
+	conn->persistent_port = le16_to_cpu(fw_ddb_entry->port);
+
+	sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
+
+	options = le16_to_cpu(fw_ddb_entry->options);
+	if (options & DDB_OPT_IPV6_DEVICE)
+		sprintf(ip_addr, "%pI6", fw_ddb_entry->ip_addr);
+	else
+		sprintf(ip_addr, "%pI4", fw_ddb_entry->ip_addr);
+
+	iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_NAME,
+			(char *)fw_ddb_entry->iscsi_name, buflen);
+	iscsi_set_param(cls_conn, ISCSI_PARAM_INITIATOR_NAME,
+			(char *)ha->name_string, buflen);
+	iscsi_set_param(cls_conn, ISCSI_PARAM_PERSISTENT_ADDRESS,
+			(char *)ip_addr, buflen);
+}
+
+void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha,
+					     struct ddb_entry *ddb_entry)
+{
+	struct iscsi_cls_session *cls_sess;
+	struct iscsi_cls_conn *cls_conn;
+	uint32_t ddb_state;
+	dma_addr_t fw_ddb_entry_dma;
+	struct dev_db_entry *fw_ddb_entry;
+
+	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+					  &fw_ddb_entry_dma, GFP_KERNEL);
+	if (!fw_ddb_entry) {
+		ql4_printk(KERN_ERR, ha,
+			   "%s: Unable to allocate dma buffer\n", __func__);
+		goto exit_session_conn_fwddb_param;
+	}
+
+	if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
+				    fw_ddb_entry_dma, NULL, NULL, &ddb_state,
+				    NULL, NULL, NULL) == QLA_ERROR) {
+		DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
+				  "get_ddb_entry for fw_ddb_index %d\n",
+				  ha->host_no, __func__,
+				  ddb_entry->fw_ddb_index));
+		goto exit_session_conn_fwddb_param;
+	}
+
+	cls_sess = ddb_entry->sess;
+
+	cls_conn = ddb_entry->conn;
+
+	/* Update params */
+	qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
+
+exit_session_conn_fwddb_param:
+	if (fw_ddb_entry)
+		dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+				  fw_ddb_entry, fw_ddb_entry_dma);
+}
+
 void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
 				       struct ddb_entry *ddb_entry)
 {
@@ -1360,7 +1600,7 @@
 	if (!fw_ddb_entry) {
 		ql4_printk(KERN_ERR, ha,
 			   "%s: Unable to allocate dma buffer\n", __func__);
-		return;
+		goto exit_session_conn_param;
 	}
 
 	if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
@@ -1370,7 +1610,7 @@
 				  "get_ddb_entry for fw_ddb_index %d\n",
 				  ha->host_no, __func__,
 				  ddb_entry->fw_ddb_index));
-		return;
+		goto exit_session_conn_param;
 	}
 
 	cls_sess = ddb_entry->sess;
@@ -1379,6 +1619,12 @@
 	cls_conn = ddb_entry->conn;
 	conn = cls_conn->dd_data;
 
+	/* Update timers after login */
+	ddb_entry->default_relogin_timeout =
+				le16_to_cpu(fw_ddb_entry->def_timeout);
+	ddb_entry->default_time2wait =
+				le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
+
 	/* Update params */
 	conn->max_recv_dlength = BYTE_UNITS *
 			  le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
@@ -1407,6 +1653,11 @@
 
 	memcpy(sess->initiatorname, ha->name_string,
 	       min(sizeof(ha->name_string), sizeof(sess->initiatorname)));
+
+exit_session_conn_param:
+	if (fw_ddb_entry)
+		dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+				  fw_ddb_entry, fw_ddb_entry_dma);
 }
 
 /*
@@ -1607,6 +1858,9 @@
 		vfree(ha->chap_list);
 	ha->chap_list = NULL;
 
+	if (ha->fw_ddb_dma_pool)
+		dma_pool_destroy(ha->fw_ddb_dma_pool);
+
 	/* release io space registers  */
 	if (is_qla8022(ha)) {
 		if (ha->nx_pcibase)
@@ -1689,6 +1943,16 @@
 		goto mem_alloc_error_exit;
 	}
 
+	ha->fw_ddb_dma_pool = dma_pool_create("ql4_fw_ddb", &ha->pdev->dev,
+					      DDB_DMA_BLOCK_SIZE, 8, 0);
+
+	if (ha->fw_ddb_dma_pool == NULL) {
+		ql4_printk(KERN_WARNING, ha,
+			   "%s: fw_ddb_dma_pool allocation failed..\n",
+			   __func__);
+		goto mem_alloc_error_exit;
+	}
+
 	return QLA_SUCCESS;
 
 mem_alloc_error_exit:
@@ -1800,6 +2064,60 @@
 	}
 }
 
+void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
+{
+	struct iscsi_session *sess;
+	struct ddb_entry *ddb_entry;
+	struct scsi_qla_host *ha;
+
+	sess = cls_sess->dd_data;
+	ddb_entry = sess->dd_data;
+	ha = ddb_entry->ha;
+
+	if (!(ddb_entry->ddb_type == FLASH_DDB))
+		return;
+
+	if (adapter_up(ha) && !test_bit(DF_RELOGIN, &ddb_entry->flags) &&
+	    !iscsi_is_session_online(cls_sess)) {
+		if (atomic_read(&ddb_entry->retry_relogin_timer) !=
+		    INVALID_ENTRY) {
+			if (atomic_read(&ddb_entry->retry_relogin_timer) ==
+					0) {
+				atomic_set(&ddb_entry->retry_relogin_timer,
+					   INVALID_ENTRY);
+				set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
+				set_bit(DF_RELOGIN, &ddb_entry->flags);
+				DEBUG2(ql4_printk(KERN_INFO, ha,
+				       "%s: index [%d] login device\n",
+					__func__, ddb_entry->fw_ddb_index));
+			} else
+				atomic_dec(&ddb_entry->retry_relogin_timer);
+		}
+	}
+
+	/* Wait for relogin to timeout */
+	if (atomic_read(&ddb_entry->relogin_timer) &&
+	    (atomic_dec_and_test(&ddb_entry->relogin_timer) != 0)) {
+		/*
+		 * If the relogin times out and the device is
+		 * still NOT ONLINE then try and relogin again.
+		 */
+		if (!iscsi_is_session_online(cls_sess)) {
+			/* Reset retry relogin timer */
+			atomic_inc(&ddb_entry->relogin_retry_count);
+			DEBUG2(ql4_printk(KERN_INFO, ha,
+				"%s: index[%d] relogin timed out-retrying"
+				" relogin (%d), retry (%d)\n", __func__,
+				ddb_entry->fw_ddb_index,
+				atomic_read(&ddb_entry->relogin_retry_count),
+				ddb_entry->default_time2wait + 4));
+			set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
+			atomic_set(&ddb_entry->retry_relogin_timer,
+				   ddb_entry->default_time2wait + 4);
+		}
+	}
+}
+
 /**
  * qla4xxx_timer - checks every second for work to do.
  * @ha: Pointer to host adapter structure.
@@ -1809,6 +2127,8 @@
 	int start_dpc = 0;
 	uint16_t w;
 
+	iscsi_host_for_each_session(ha->host, qla4xxx_check_relogin_flash_ddb);
+
 	/* If we are in the middle of AER/EEH processing
 	 * skip any processing and reschedule the timer
 	 */
@@ -2078,7 +2398,12 @@
 	sess = cls_session->dd_data;
 	ddb_entry = sess->dd_data;
 	ddb_entry->fw_ddb_device_state = DDB_DS_SESSION_FAILED;
-	iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
+
+	if (ddb_entry->ddb_type == FLASH_DDB)
+		iscsi_block_session(ddb_entry->sess);
+	else
+		iscsi_session_failure(cls_session->dd_data,
+				      ISCSI_ERR_CONN_FAILED);
 }
 
 /**
@@ -2163,7 +2488,7 @@
 
 		/* NOTE: AF_ONLINE flag set upon successful completion of
 		 *       qla4xxx_initialize_adapter */
-		status = qla4xxx_initialize_adapter(ha);
+		status = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
 	}
 
 	/* Retry failed adapter initialization, if necessary
@@ -2245,17 +2570,108 @@
 			iscsi_unblock_session(ddb_entry->sess);
 		} else {
 			/* Trigger relogin */
-			iscsi_session_failure(cls_session->dd_data,
-					      ISCSI_ERR_CONN_FAILED);
+			if (ddb_entry->ddb_type == FLASH_DDB) {
+				if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
+					qla4xxx_arm_relogin_timer(ddb_entry);
+			} else
+				iscsi_session_failure(cls_session->dd_data,
+						      ISCSI_ERR_CONN_FAILED);
 		}
 	}
 }
 
+int qla4xxx_unblock_flash_ddb(struct iscsi_cls_session *cls_session)
+{
+	struct iscsi_session *sess;
+	struct ddb_entry *ddb_entry;
+	struct scsi_qla_host *ha;
+
+	sess = cls_session->dd_data;
+	ddb_entry = sess->dd_data;
+	ha = ddb_entry->ha;
+	ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
+		   " unblock session\n", ha->host_no, __func__,
+		   ddb_entry->fw_ddb_index);
+
+	iscsi_unblock_session(ddb_entry->sess);
+
+	/* Start scan target */
+	if (test_bit(AF_ONLINE, &ha->flags)) {
+		ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
+			   " start scan\n", ha->host_no, __func__,
+			   ddb_entry->fw_ddb_index);
+		scsi_queue_work(ha->host, &ddb_entry->sess->scan_work);
+	}
+	return QLA_SUCCESS;
+}
+
+int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session)
+{
+	struct iscsi_session *sess;
+	struct ddb_entry *ddb_entry;
+	struct scsi_qla_host *ha;
+
+	sess = cls_session->dd_data;
+	ddb_entry = sess->dd_data;
+	ha = ddb_entry->ha;
+	ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
+		   " unblock user space session\n", ha->host_no, __func__,
+		   ddb_entry->fw_ddb_index);
+	iscsi_conn_start(ddb_entry->conn);
+	iscsi_conn_login_event(ddb_entry->conn,
+			       ISCSI_CONN_STATE_LOGGED_IN);
+
+	return QLA_SUCCESS;
+}
+
 static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha)
 {
 	iscsi_host_for_each_session(ha->host, qla4xxx_relogin_devices);
 }
 
+static void qla4xxx_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
+{
+	uint16_t relogin_timer;
+	struct iscsi_session *sess;
+	struct ddb_entry *ddb_entry;
+	struct scsi_qla_host *ha;
+
+	sess = cls_sess->dd_data;
+	ddb_entry = sess->dd_data;
+	ha = ddb_entry->ha;
+
+	relogin_timer = max(ddb_entry->default_relogin_timeout,
+			    (uint16_t)RELOGIN_TOV);
+	atomic_set(&ddb_entry->relogin_timer, relogin_timer);
+
+	DEBUG2(ql4_printk(KERN_INFO, ha,
+			  "scsi%ld: Relogin index [%d]. TOV=%d\n", ha->host_no,
+			  ddb_entry->fw_ddb_index, relogin_timer));
+
+	qla4xxx_login_flash_ddb(cls_sess);
+}
+
+static void qla4xxx_dpc_relogin(struct iscsi_cls_session *cls_sess)
+{
+	struct iscsi_session *sess;
+	struct ddb_entry *ddb_entry;
+	struct scsi_qla_host *ha;
+
+	sess = cls_sess->dd_data;
+	ddb_entry = sess->dd_data;
+	ha = ddb_entry->ha;
+
+	if (!(ddb_entry->ddb_type == FLASH_DDB))
+		return;
+
+	if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags) &&
+	    !iscsi_is_session_online(cls_sess)) {
+		DEBUG2(ql4_printk(KERN_INFO, ha,
+				  "relogin issued\n"));
+		qla4xxx_relogin_flash_ddb(cls_sess);
+	}
+}
+
 void qla4xxx_wake_dpc(struct scsi_qla_host *ha)
 {
 	if (ha->dpc_thread)
@@ -2356,6 +2772,12 @@
 	if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags))
 		qla4xxx_get_dhcp_ip_address(ha);
 
+	/* ---- relogin device? --- */
+	if (adapter_up(ha) &&
+	    test_and_clear_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags)) {
+		iscsi_host_for_each_session(ha->host, qla4xxx_dpc_relogin);
+	}
+
 	/* ---- link change? --- */
 	if (test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) {
 		if (!test_bit(AF_LINK_UP, &ha->flags)) {
@@ -2368,8 +2790,12 @@
 			 * fatal error recovery.  Therefore, the driver must
 			 * manually relogin to devices when recovering from
 			 * connection failures, logouts, expired KATO, etc. */
-
-			qla4xxx_relogin_all_devices(ha);
+			if (test_and_clear_bit(AF_BUILD_DDB_LIST, &ha->flags)) {
+				qla4xxx_build_ddb_list(ha, ha->is_reset);
+				iscsi_host_for_each_session(ha->host,
+						qla4xxx_login_flash_ddb);
+			} else
+				qla4xxx_relogin_all_devices(ha);
 		}
 	}
 }
@@ -2613,7 +3039,7 @@
 	return rc;
 }
 
-static mode_t qla4xxx_eth_get_attr_visibility(void *data, int type)
+static umode_t qla4xxx_eth_get_attr_visibility(void *data, int type)
 {
 	int rc;
 
@@ -2647,7 +3073,7 @@
 	return rc;
 }
 
-static mode_t qla4xxx_ini_get_attr_visibility(void *data, int type)
+static umode_t qla4xxx_ini_get_attr_visibility(void *data, int type)
 {
 	int rc;
 
@@ -2734,7 +3160,7 @@
 	return qla4xxx_show_boot_tgt_info(boot_sess, type, buf);
 }
 
-static mode_t qla4xxx_tgt_get_attr_visibility(void *data, int type)
+static umode_t qla4xxx_tgt_get_attr_visibility(void *data, int type)
 {
 	int rc;
 
@@ -2867,6 +3293,9 @@
 			  " target ID %d\n", __func__, ddb_index[0],
 			  ddb_index[1]));
 
+	ha->pri_ddb_idx = ddb_index[0];
+	ha->sec_ddb_idx = ddb_index[1];
+
 exit_boot_info_free:
 	dma_free_coherent(&ha->pdev->dev, size, buf, buf_dma);
 exit_boot_info:
@@ -3034,6 +3463,9 @@
 		return ret;
 	}
 
+	if (ql4xdisablesysfsboot)
+		return QLA_SUCCESS;
+
 	if (ddb_index[0] == 0xffff)
 		goto sec_target;
 
@@ -3066,7 +3498,15 @@
 	struct iscsi_boot_kobj *boot_kobj;
 
 	if (qla4xxx_get_boot_info(ha) != QLA_SUCCESS)
-		return 0;
+		return QLA_ERROR;
+
+	if (ql4xdisablesysfsboot) {
+		ql4_printk(KERN_INFO, ha,
+			   "%s: syfsboot disabled - driver will trigger login"
+			   "and publish session for discovery .\n", __func__);
+		return QLA_SUCCESS;
+	}
+
 
 	ha->boot_kset = iscsi_boot_create_host_kset(ha->host->host_no);
 	if (!ha->boot_kset)
@@ -3108,7 +3548,7 @@
 	if (!boot_kobj)
 		goto put_host;
 
-	return 0;
+	return QLA_SUCCESS;
 
 put_host:
 	scsi_host_put(ha->host);
@@ -3174,9 +3614,507 @@
 exit_chap_list:
 	dma_free_coherent(&ha->pdev->dev, chap_size,
 			chap_flash_data, chap_dma);
-	return;
 }
 
+static void qla4xxx_get_param_ddb(struct ddb_entry *ddb_entry,
+				  struct ql4_tuple_ddb *tddb)
+{
+	struct scsi_qla_host *ha;
+	struct iscsi_cls_session *cls_sess;
+	struct iscsi_cls_conn *cls_conn;
+	struct iscsi_session *sess;
+	struct iscsi_conn *conn;
+
+	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
+	ha = ddb_entry->ha;
+	cls_sess = ddb_entry->sess;
+	sess = cls_sess->dd_data;
+	cls_conn = ddb_entry->conn;
+	conn = cls_conn->dd_data;
+
+	tddb->tpgt = sess->tpgt;
+	tddb->port = conn->persistent_port;
+	strncpy(tddb->iscsi_name, sess->targetname, ISCSI_NAME_SIZE);
+	strncpy(tddb->ip_addr, conn->persistent_address, DDB_IPADDR_LEN);
+}
+
+static void qla4xxx_convert_param_ddb(struct dev_db_entry *fw_ddb_entry,
+				      struct ql4_tuple_ddb *tddb)
+{
+	uint16_t options = 0;
+
+	tddb->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
+	memcpy(&tddb->iscsi_name[0], &fw_ddb_entry->iscsi_name[0],
+	       min(sizeof(tddb->iscsi_name), sizeof(fw_ddb_entry->iscsi_name)));
+
+	options = le16_to_cpu(fw_ddb_entry->options);
+	if (options & DDB_OPT_IPV6_DEVICE)
+		sprintf(tddb->ip_addr, "%pI6", fw_ddb_entry->ip_addr);
+	else
+		sprintf(tddb->ip_addr, "%pI4", fw_ddb_entry->ip_addr);
+
+	tddb->port = le16_to_cpu(fw_ddb_entry->port);
+}
+
+static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha,
+				     struct ql4_tuple_ddb *old_tddb,
+				     struct ql4_tuple_ddb *new_tddb)
+{
+	if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name))
+		return QLA_ERROR;
+
+	if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr))
+		return QLA_ERROR;
+
+	if (old_tddb->port != new_tddb->port)
+		return QLA_ERROR;
+
+	DEBUG2(ql4_printk(KERN_INFO, ha,
+			  "Match Found, fw[%d,%d,%s,%s], [%d,%d,%s,%s]",
+			  old_tddb->port, old_tddb->tpgt, old_tddb->ip_addr,
+			  old_tddb->iscsi_name, new_tddb->port, new_tddb->tpgt,
+			  new_tddb->ip_addr, new_tddb->iscsi_name));
+
+	return QLA_SUCCESS;
+}
+
+static int qla4xxx_is_session_exists(struct scsi_qla_host *ha,
+				     struct dev_db_entry *fw_ddb_entry)
+{
+	struct ddb_entry *ddb_entry;
+	struct ql4_tuple_ddb *fw_tddb = NULL;
+	struct ql4_tuple_ddb *tmp_tddb = NULL;
+	int idx;
+	int ret = QLA_ERROR;
+
+	fw_tddb = vzalloc(sizeof(*fw_tddb));
+	if (!fw_tddb) {
+		DEBUG2(ql4_printk(KERN_WARNING, ha,
+				  "Memory Allocation failed.\n"));
+		ret = QLA_SUCCESS;
+		goto exit_check;
+	}
+
+	tmp_tddb = vzalloc(sizeof(*tmp_tddb));
+	if (!tmp_tddb) {
+		DEBUG2(ql4_printk(KERN_WARNING, ha,
+				  "Memory Allocation failed.\n"));
+		ret = QLA_SUCCESS;
+		goto exit_check;
+	}
+
+	qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb);
+
+	for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
+		ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
+		if (ddb_entry == NULL)
+			continue;
+
+		qla4xxx_get_param_ddb(ddb_entry, tmp_tddb);
+		if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb)) {
+			ret = QLA_SUCCESS; /* found */
+			goto exit_check;
+		}
+	}
+
+exit_check:
+	if (fw_tddb)
+		vfree(fw_tddb);
+	if (tmp_tddb)
+		vfree(tmp_tddb);
+	return ret;
+}
+
+static int qla4xxx_is_flash_ddb_exists(struct scsi_qla_host *ha,
+				       struct list_head *list_nt,
+				       struct dev_db_entry *fw_ddb_entry)
+{
+	struct qla_ddb_index  *nt_ddb_idx, *nt_ddb_idx_tmp;
+	struct ql4_tuple_ddb *fw_tddb = NULL;
+	struct ql4_tuple_ddb *tmp_tddb = NULL;
+	int ret = QLA_ERROR;
+
+	fw_tddb = vzalloc(sizeof(*fw_tddb));
+	if (!fw_tddb) {
+		DEBUG2(ql4_printk(KERN_WARNING, ha,
+				  "Memory Allocation failed.\n"));
+		ret = QLA_SUCCESS;
+		goto exit_check;
+	}
+
+	tmp_tddb = vzalloc(sizeof(*tmp_tddb));
+	if (!tmp_tddb) {
+		DEBUG2(ql4_printk(KERN_WARNING, ha,
+				  "Memory Allocation failed.\n"));
+		ret = QLA_SUCCESS;
+		goto exit_check;
+	}
+
+	qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb);
+
+	list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
+		qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb);
+		if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb)) {
+			ret = QLA_SUCCESS; /* found */
+			goto exit_check;
+		}
+	}
+
+exit_check:
+	if (fw_tddb)
+		vfree(fw_tddb);
+	if (tmp_tddb)
+		vfree(tmp_tddb);
+	return ret;
+}
+
+static void qla4xxx_free_nt_list(struct list_head *list_nt)
+{
+	struct qla_ddb_index  *nt_ddb_idx, *nt_ddb_idx_tmp;
+
+	/* Free up the normaltargets list */
+	list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
+		list_del_init(&nt_ddb_idx->list);
+		vfree(nt_ddb_idx);
+	}
+
+}
+
+static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha,
+					struct dev_db_entry *fw_ddb_entry)
+{
+	struct iscsi_endpoint *ep;
+	struct sockaddr_in *addr;
+	struct sockaddr_in6 *addr6;
+	struct sockaddr *dst_addr;
+	char *ip;
+
+	/* TODO: need to destroy on unload iscsi_endpoint*/
+	dst_addr = vmalloc(sizeof(*dst_addr));
+	if (!dst_addr)
+		return NULL;
+
+	if (fw_ddb_entry->options & DDB_OPT_IPV6_DEVICE) {
+		dst_addr->sa_family = AF_INET6;
+		addr6 = (struct sockaddr_in6 *)dst_addr;
+		ip = (char *)&addr6->sin6_addr;
+		memcpy(ip, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN);
+		addr6->sin6_port = htons(le16_to_cpu(fw_ddb_entry->port));
+
+	} else {
+		dst_addr->sa_family = AF_INET;
+		addr = (struct sockaddr_in *)dst_addr;
+		ip = (char *)&addr->sin_addr;
+		memcpy(ip, fw_ddb_entry->ip_addr, IP_ADDR_LEN);
+		addr->sin_port = htons(le16_to_cpu(fw_ddb_entry->port));
+	}
+
+	ep = qla4xxx_ep_connect(ha->host, dst_addr, 0);
+	vfree(dst_addr);
+	return ep;
+}
+
+static int qla4xxx_verify_boot_idx(struct scsi_qla_host *ha, uint16_t idx)
+{
+	if (ql4xdisablesysfsboot)
+		return QLA_SUCCESS;
+	if (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx)
+		return QLA_ERROR;
+	return QLA_SUCCESS;
+}
+
+static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
+					  struct ddb_entry *ddb_entry)
+{
+	ddb_entry->ddb_type = FLASH_DDB;
+	ddb_entry->fw_ddb_index = INVALID_ENTRY;
+	ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
+	ddb_entry->ha = ha;
+	ddb_entry->unblock_sess = qla4xxx_unblock_flash_ddb;
+	ddb_entry->ddb_change = qla4xxx_flash_ddb_change;
+
+	atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
+	atomic_set(&ddb_entry->relogin_timer, 0);
+	atomic_set(&ddb_entry->relogin_retry_count, 0);
+
+	ddb_entry->default_relogin_timeout =
+		le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
+	ddb_entry->default_time2wait =
+		le16_to_cpu(ddb_entry->fw_ddb_entry.iscsi_def_time2wait);
+}
+
+static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha)
+{
+	uint32_t idx = 0;
+	uint32_t ip_idx[IP_ADDR_COUNT] = {0, 1, 2, 3}; /* 4 IP interfaces */
+	uint32_t sts[MBOX_REG_COUNT];
+	uint32_t ip_state;
+	unsigned long wtime;
+	int ret;
+
+	wtime = jiffies + (HZ * IP_CONFIG_TOV);
+	do {
+		for (idx = 0; idx < IP_ADDR_COUNT; idx++) {
+			if (ip_idx[idx] == -1)
+				continue;
+
+			ret = qla4xxx_get_ip_state(ha, 0, ip_idx[idx], sts);
+
+			if (ret == QLA_ERROR) {
+				ip_idx[idx] = -1;
+				continue;
+			}
+
+			ip_state = (sts[1] & IP_STATE_MASK) >> IP_STATE_SHIFT;
+
+			DEBUG2(ql4_printk(KERN_INFO, ha,
+					  "Waiting for IP state for idx = %d, state = 0x%x\n",
+					  ip_idx[idx], ip_state));
+			if (ip_state == IP_ADDRSTATE_UNCONFIGURED ||
+			    ip_state == IP_ADDRSTATE_INVALID ||
+			    ip_state == IP_ADDRSTATE_PREFERRED ||
+			    ip_state == IP_ADDRSTATE_DEPRICATED ||
+			    ip_state == IP_ADDRSTATE_DISABLING)
+				ip_idx[idx] = -1;
+
+		}
+
+		/* Break if all IP states checked */
+		if ((ip_idx[0] == -1) &&
+		    (ip_idx[1] == -1) &&
+		    (ip_idx[2] == -1) &&
+		    (ip_idx[3] == -1))
+			break;
+		schedule_timeout_uninterruptible(HZ);
+	} while (time_after(wtime, jiffies));
+}
+
+void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
+{
+	int max_ddbs;
+	int ret;
+	uint32_t idx = 0, next_idx = 0;
+	uint32_t state = 0, conn_err = 0;
+	uint16_t conn_id;
+	struct dev_db_entry *fw_ddb_entry;
+	struct ddb_entry *ddb_entry = NULL;
+	dma_addr_t fw_ddb_dma;
+	struct iscsi_cls_session *cls_sess;
+	struct iscsi_session *sess;
+	struct iscsi_cls_conn *cls_conn;
+	struct iscsi_endpoint *ep;
+	uint16_t cmds_max = 32, tmo = 0;
+	uint32_t initial_cmdsn = 0;
+	struct list_head list_st, list_nt; /* List of sendtargets */
+	struct qla_ddb_index  *st_ddb_idx, *st_ddb_idx_tmp;
+	int fw_idx_size;
+	unsigned long wtime;
+	struct qla_ddb_index  *nt_ddb_idx;
+
+	if (!test_bit(AF_LINK_UP, &ha->flags)) {
+		set_bit(AF_BUILD_DDB_LIST, &ha->flags);
+		ha->is_reset = is_reset;
+		return;
+	}
+	max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+				     MAX_DEV_DB_ENTRIES;
+
+	fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
+				      &fw_ddb_dma);
+	if (fw_ddb_entry == NULL) {
+		DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
+		goto exit_ddb_list;
+	}
+
+	INIT_LIST_HEAD(&list_st);
+	INIT_LIST_HEAD(&list_nt);
+	fw_idx_size = sizeof(struct qla_ddb_index);
+
+	for (idx = 0; idx < max_ddbs; idx = next_idx) {
+		ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry,
+					      fw_ddb_dma, NULL,
+					      &next_idx, &state, &conn_err,
+					      NULL, &conn_id);
+		if (ret == QLA_ERROR)
+			break;
+
+		if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS)
+			goto continue_next_st;
+
+		/* Check if ST, add to the list_st */
+		if (strlen((char *) fw_ddb_entry->iscsi_name) != 0)
+			goto continue_next_st;
+
+		st_ddb_idx = vzalloc(fw_idx_size);
+		if (!st_ddb_idx)
+			break;
+
+		st_ddb_idx->fw_ddb_idx = idx;
+
+		list_add_tail(&st_ddb_idx->list, &list_st);
+continue_next_st:
+		if (next_idx == 0)
+			break;
+	}
+
+	/* Before issuing conn open mbox, ensure all IPs states are configured
+	 * Note, conn open fails if IPs are not configured
+	 */
+	qla4xxx_wait_for_ip_configuration(ha);
+
+	/* Go thru the STs and fire the sendtargets by issuing conn open mbx */
+	list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) {
+		qla4xxx_conn_open(ha, st_ddb_idx->fw_ddb_idx);
+	}
+
+	/* Wait to ensure all sendtargets are done for min 12 sec wait */
+	tmo = ((ha->def_timeout < LOGIN_TOV) ? LOGIN_TOV : ha->def_timeout);
+	DEBUG2(ql4_printk(KERN_INFO, ha,
+			  "Default time to wait for build ddb %d\n", tmo));
+
+	wtime = jiffies + (HZ * tmo);
+	do {
+		list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st,
+					 list) {
+			ret = qla4xxx_get_fwddb_entry(ha,
+						      st_ddb_idx->fw_ddb_idx,
+						      NULL, 0, NULL, &next_idx,
+						      &state, &conn_err, NULL,
+						      NULL);
+			if (ret == QLA_ERROR)
+				continue;
+
+			if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
+			    state == DDB_DS_SESSION_FAILED) {
+				list_del_init(&st_ddb_idx->list);
+				vfree(st_ddb_idx);
+			}
+		}
+		schedule_timeout_uninterruptible(HZ / 10);
+	} while (time_after(wtime, jiffies));
+
+	/* Free up the sendtargets list */
+	list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) {
+		list_del_init(&st_ddb_idx->list);
+		vfree(st_ddb_idx);
+	}
+
+	for (idx = 0; idx < max_ddbs; idx = next_idx) {
+		ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry,
+					      fw_ddb_dma, NULL,
+					      &next_idx, &state, &conn_err,
+					      NULL, &conn_id);
+		if (ret == QLA_ERROR)
+			break;
+
+		if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS)
+			goto continue_next_nt;
+
+		/* Check if NT, then add to list it */
+		if (strlen((char *) fw_ddb_entry->iscsi_name) == 0)
+			goto continue_next_nt;
+
+		if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
+		    state == DDB_DS_SESSION_FAILED) {
+			DEBUG2(ql4_printk(KERN_INFO, ha,
+					  "Adding  DDB to session = 0x%x\n",
+					  idx));
+			if (is_reset == INIT_ADAPTER) {
+				nt_ddb_idx = vmalloc(fw_idx_size);
+				if (!nt_ddb_idx)
+					break;
+
+				nt_ddb_idx->fw_ddb_idx = idx;
+
+				memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry,
+				       sizeof(struct dev_db_entry));
+
+				if (qla4xxx_is_flash_ddb_exists(ha, &list_nt,
+						fw_ddb_entry) == QLA_SUCCESS) {
+					vfree(nt_ddb_idx);
+					goto continue_next_nt;
+				}
+				list_add_tail(&nt_ddb_idx->list, &list_nt);
+			} else if (is_reset == RESET_ADAPTER) {
+				if (qla4xxx_is_session_exists(ha,
+						   fw_ddb_entry) == QLA_SUCCESS)
+					goto continue_next_nt;
+			}
+
+			/* Create session object, with INVALID_ENTRY,
+			 * the targer_id would get set when we issue the login
+			 */
+			cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport,
+						ha->host, cmds_max,
+						sizeof(struct ddb_entry),
+						sizeof(struct ql4_task_data),
+						initial_cmdsn, INVALID_ENTRY);
+			if (!cls_sess)
+				goto exit_ddb_list;
+
+			/*
+			 * iscsi_session_setup increments the driver reference
+			 * count which wouldn't let the driver to be unloaded.
+			 * so calling module_put function to decrement the
+			 * reference count.
+			 **/
+			module_put(qla4xxx_iscsi_transport.owner);
+			sess = cls_sess->dd_data;
+			ddb_entry = sess->dd_data;
+			ddb_entry->sess = cls_sess;
+
+			cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
+			memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry,
+			       sizeof(struct dev_db_entry));
+
+			qla4xxx_setup_flash_ddb_entry(ha, ddb_entry);
+
+			cls_conn = iscsi_conn_setup(cls_sess,
+						    sizeof(struct qla_conn),
+						    conn_id);
+			if (!cls_conn)
+				goto exit_ddb_list;
+
+			ddb_entry->conn = cls_conn;
+
+			/* Setup ep, for displaying attributes in sysfs */
+			ep = qla4xxx_get_ep_fwdb(ha, fw_ddb_entry);
+			if (ep) {
+				ep->conn = cls_conn;
+				cls_conn->ep = ep;
+			} else {
+				DEBUG2(ql4_printk(KERN_ERR, ha,
+						  "Unable to get ep\n"));
+			}
+
+			/* Update sess/conn params */
+			qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess,
+						 cls_conn);
+
+			if (is_reset == RESET_ADAPTER) {
+				iscsi_block_session(cls_sess);
+				/* Use the relogin path to discover new devices
+				 *  by short-circuting the logic of setting
+				 *  timer to relogin - instead set the flags
+				 *  to initiate login right away.
+				 */
+				set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
+				set_bit(DF_RELOGIN, &ddb_entry->flags);
+			}
+		}
+continue_next_nt:
+		if (next_idx == 0)
+			break;
+	}
+exit_ddb_list:
+	qla4xxx_free_nt_list(&list_nt);
+	if (fw_ddb_entry)
+		dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
+
+	qla4xxx_free_ddb_index(ha);
+}
+
+
 /**
  * qla4xxx_probe_adapter - callback function to probe HBA
  * @pdev: pointer to pci_dev structure
@@ -3298,7 +4236,7 @@
 	 * firmware
 	 * NOTE: interrupts enabled upon successful completion
 	 */
-	status = qla4xxx_initialize_adapter(ha);
+	status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
 	while ((!test_bit(AF_ONLINE, &ha->flags)) &&
 	    init_retry_count++ < MAX_INIT_RETRIES) {
 
@@ -3319,7 +4257,7 @@
 		if (ha->isp_ops->reset_chip(ha) == QLA_ERROR)
 			continue;
 
-		status = qla4xxx_initialize_adapter(ha);
+		status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
 	}
 
 	if (!test_bit(AF_ONLINE, &ha->flags)) {
@@ -3386,12 +4324,16 @@
 	       ha->host_no, ha->firmware_version[0], ha->firmware_version[1],
 	       ha->patch_number, ha->build_number);
 
-	qla4xxx_create_chap_list(ha);
-
 	if (qla4xxx_setup_boot_info(ha))
 		ql4_printk(KERN_ERR, ha, "%s:ISCSI boot info setup failed\n",
 			   __func__);
 
+		/* Perform the build ddb list and login to each */
+	qla4xxx_build_ddb_list(ha, INIT_ADAPTER);
+	iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb);
+
+	qla4xxx_create_chap_list(ha);
+
 	qla4xxx_create_ifaces(ha);
 	return 0;
 
@@ -3449,6 +4391,38 @@
 	}
 }
 
+static void qla4xxx_destroy_fw_ddb_session(struct scsi_qla_host *ha)
+{
+	struct ddb_entry *ddb_entry;
+	int options;
+	int idx;
+
+	for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
+
+		ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
+		if ((ddb_entry != NULL) &&
+		    (ddb_entry->ddb_type == FLASH_DDB)) {
+
+			options = LOGOUT_OPTION_CLOSE_SESSION;
+			if (qla4xxx_session_logout_ddb(ha, ddb_entry, options)
+			    == QLA_ERROR)
+				ql4_printk(KERN_ERR, ha, "%s: Logout failed\n",
+					   __func__);
+
+			qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
+			/*
+			 * we have decremented the reference count of the driver
+			 * when we setup the session to have the driver unload
+			 * to be seamless without actually destroying the
+			 * session
+			 **/
+			try_module_get(qla4xxx_iscsi_transport.owner);
+			iscsi_destroy_endpoint(ddb_entry->conn->ep);
+			qla4xxx_free_ddb(ha, ddb_entry);
+			iscsi_session_teardown(ddb_entry->sess);
+		}
+	}
+}
 /**
  * qla4xxx_remove_adapter - calback function to remove adapter.
  * @pci_dev: PCI device pointer
@@ -3465,9 +4439,11 @@
 	/* destroy iface from sysfs */
 	qla4xxx_destroy_ifaces(ha);
 
-	if (ha->boot_kset)
+	if ((!ql4xdisablesysfsboot) && ha->boot_kset)
 		iscsi_boot_destroy_kset(ha->boot_kset);
 
+	qla4xxx_destroy_fw_ddb_session(ha);
+
 	scsi_remove_host(ha->host);
 
 	qla4xxx_free_adapter(ha);
@@ -4115,7 +5091,7 @@
 
 		qla4_8xxx_idc_unlock(ha);
 		clear_bit(AF_FW_RECOVERY, &ha->flags);
-		rval = qla4xxx_initialize_adapter(ha);
+		rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
 		qla4_8xxx_idc_lock(ha);
 
 		if (rval != QLA_SUCCESS) {
@@ -4151,7 +5127,7 @@
 		if ((qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE) ==
 		    QLA82XX_DEV_READY)) {
 			clear_bit(AF_FW_RECOVERY, &ha->flags);
-			rval = qla4xxx_initialize_adapter(ha);
+			rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
 			if (rval == QLA_SUCCESS) {
 				ret = qla4xxx_request_irqs(ha);
 				if (ret) {
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index c15347d..5254e57 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
  * See LICENSE.qla4xxx for copyright and licensing details.
  */
 
-#define QLA4XXX_DRIVER_VERSION	"5.02.00-k8"
+#define QLA4XXX_DRIVER_VERSION	"5.02.00-k9"
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 96029e6..e8447fb 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -328,7 +328,7 @@
 iscsi_iface_net_attr(iface, mtu, ISCSI_NET_PARAM_MTU);
 iscsi_iface_net_attr(iface, port, ISCSI_NET_PARAM_PORT);
 
-static mode_t iscsi_iface_attr_is_visible(struct kobject *kobj,
+static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj,
 					  struct attribute *attr, int i)
 {
 	struct device *dev = container_of(kobj, struct device, kobj);
@@ -2199,7 +2199,7 @@
 	NULL,
 };
 
-static mode_t iscsi_conn_attr_is_visible(struct kobject *kobj,
+static umode_t iscsi_conn_attr_is_visible(struct kobject *kobj,
 					 struct attribute *attr, int i)
 {
 	struct device *cdev = container_of(kobj, struct device, kobj);
@@ -2370,7 +2370,7 @@
 	NULL,
 };
 
-static mode_t iscsi_session_attr_is_visible(struct kobject *kobj,
+static umode_t iscsi_session_attr_is_visible(struct kobject *kobj,
 					    struct attribute *attr, int i)
 {
 	struct device *cdev = container_of(kobj, struct device, kobj);
@@ -2468,7 +2468,7 @@
 	NULL,
 };
 
-static mode_t iscsi_host_attr_is_visible(struct kobject *kobj,
+static umode_t iscsi_host_attr_is_visible(struct kobject *kobj,
 					 struct attribute *attr, int i)
 {
 	struct device *cdev = container_of(kobj, struct device, kobj);
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index 5fbeadd..a2715c3 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -1434,7 +1434,7 @@
 	(si->f->show_##name ? S_IRUGO : 0) | \
 	(si->f->set_##name ? S_IWUSR : 0)
 
-static mode_t target_attribute_is_visible(struct kobject *kobj,
+static umode_t target_attribute_is_visible(struct kobject *kobj,
 					  struct attribute *attr, int i)
 {
 	struct device *cdev = container_of(kobj, struct device, kobj);
diff --git a/drivers/scsi/scsicam.c b/drivers/scsi/scsicam.c
index 6803b1e..92d24d6 100644
--- a/drivers/scsi/scsicam.c
+++ b/drivers/scsi/scsicam.c
@@ -16,7 +16,6 @@
 #include <linux/genhd.h>
 #include <linux/kernel.h>
 #include <linux/blkdev.h>
-#include <linux/buffer_head.h>
 #include <asm/unaligned.h>
 
 #include <scsi/scsicam.h>
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 441a1c5..02d9998 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -2325,16 +2325,15 @@
 static int
 sg_proc_init(void)
 {
-	int k, mask;
 	int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
-	struct sg_proc_leaf * leaf;
+	int k;
 
 	sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
 	if (!sg_proc_sgp)
 		return 1;
 	for (k = 0; k < num_leaves; ++k) {
-		leaf = &sg_proc_leaf_arr[k];
-		mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO;
+		struct sg_proc_leaf *leaf = &sg_proc_leaf_arr[k];
+		umode_t mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO;
 		proc_create(leaf->name, mask, sg_proc_sgp, leaf->fops);
 	}
 	return 0;
diff --git a/drivers/scsi/sni_53c710.c b/drivers/scsi/sni_53c710.c
index 9acc2b2..cf51432 100644
--- a/drivers/scsi/sni_53c710.c
+++ b/drivers/scsi/sni_53c710.c
@@ -51,7 +51,7 @@
 
 #include "53c700.h"
 
-MODULE_AUTHOR("Thomas Bogendörfer");
+MODULE_AUTHOR("Thomas Bogendörfer");
 MODULE_DESCRIPTION("SNI RM 53c710 SCSI Driver");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS("platform:snirm_53c710");
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index a18996d..7264116 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -1144,7 +1144,7 @@
  *
  * These are statically allocated.  Trying to be clever was not worth it.
  *
- * Dynamic allocation can fail, and we can't go deeep into the memory
+ * Dynamic allocation can fail, and we can't go deep into the memory
  * allocator, since we're a SCSI driver, and trying too hard to allocate
  * memory might generate disk I/O.  We also don't want to fail disk I/O
  * in that case because we can't get an allocation - the I/O could be
diff --git a/drivers/sh/intc/core.c b/drivers/sh/intc/core.c
index 8b7a141..e53e449 100644
--- a/drivers/sh/intc/core.c
+++ b/drivers/sh/intc/core.c
@@ -25,7 +25,7 @@
 #include <linux/stat.h>
 #include <linux/interrupt.h>
 #include <linux/sh_intc.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/syscore_ops.h>
 #include <linux/list.h>
 #include <linux/spinlock.h>
@@ -354,6 +354,8 @@
 	if (desc->force_enable)
 		intc_enable_disable_enum(desc, d, desc->force_enable, 1);
 
+	d->skip_suspend = desc->skip_syscore_suspend;
+
 	nr_intc_controllers++;
 
 	return 0;
@@ -386,6 +388,9 @@
 	list_for_each_entry(d, &intc_list, list) {
 		int irq;
 
+		if (d->skip_suspend)
+			continue;
+
 		/* enable wakeup irqs belonging to this intc controller */
 		for_each_active_irq(irq) {
 			struct irq_data *data;
@@ -409,6 +414,9 @@
 	list_for_each_entry(d, &intc_list, list) {
 		int irq;
 
+		if (d->skip_suspend)
+			continue;
+
 		for_each_active_irq(irq) {
 			struct irq_data *data;
 			struct irq_chip *chip;
@@ -434,46 +442,47 @@
 	.resume		= intc_resume,
 };
 
-struct sysdev_class intc_sysdev_class = {
+struct bus_type intc_subsys = {
 	.name		= "intc",
+	.dev_name	= "intc",
 };
 
 static ssize_t
-show_intc_name(struct sys_device *dev, struct sysdev_attribute *attr, char *buf)
+show_intc_name(struct device *dev, struct device_attribute *attr, char *buf)
 {
 	struct intc_desc_int *d;
 
-	d = container_of(dev, struct intc_desc_int, sysdev);
+	d = container_of(dev, struct intc_desc_int, dev);
 
 	return sprintf(buf, "%s\n", d->chip.name);
 }
 
-static SYSDEV_ATTR(name, S_IRUGO, show_intc_name, NULL);
+static DEVICE_ATTR(name, S_IRUGO, show_intc_name, NULL);
 
-static int __init register_intc_sysdevs(void)
+static int __init register_intc_devs(void)
 {
 	struct intc_desc_int *d;
 	int error;
 
 	register_syscore_ops(&intc_syscore_ops);
 
-	error = sysdev_class_register(&intc_sysdev_class);
+	error = subsys_system_register(&intc_subsys, NULL);
 	if (!error) {
 		list_for_each_entry(d, &intc_list, list) {
-			d->sysdev.id = d->index;
-			d->sysdev.cls = &intc_sysdev_class;
-			error = sysdev_register(&d->sysdev);
+			d->dev.id = d->index;
+			d->dev.bus = &intc_subsys;
+			error = device_register(&d->dev);
 			if (error == 0)
-				error = sysdev_create_file(&d->sysdev,
-							   &attr_name);
+				error = device_create_file(&d->dev,
+							   &dev_attr_name);
 			if (error)
 				break;
 		}
 	}
 
 	if (error)
-		pr_err("sysdev registration error\n");
+		pr_err("device registration error\n");
 
 	return error;
 }
-device_initcall(register_intc_sysdevs);
+device_initcall(register_intc_devs);
diff --git a/drivers/sh/intc/internals.h b/drivers/sh/intc/internals.h
index 5b93485..b0e9155 100644
--- a/drivers/sh/intc/internals.h
+++ b/drivers/sh/intc/internals.h
@@ -4,7 +4,7 @@
 #include <linux/kernel.h>
 #include <linux/types.h>
 #include <linux/radix-tree.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 
 #define _INTC_MK(fn, mode, addr_e, addr_d, width, shift) \
 	((shift) | ((width) << 5) | ((fn) << 9) | ((mode) << 13) | \
@@ -51,7 +51,7 @@
 
 struct intc_desc_int {
 	struct list_head list;
-	struct sys_device sysdev;
+	struct device dev;
 	struct radix_tree_root tree;
 	raw_spinlock_t lock;
 	unsigned int index;
@@ -67,6 +67,7 @@
 	struct intc_window *window;
 	unsigned int nr_windows;
 	struct irq_chip chip;
+	bool skip_suspend;
 };
 
 
@@ -157,7 +158,7 @@
 extern struct list_head intc_list;
 extern raw_spinlock_t intc_big_lock;
 extern unsigned int nr_intc_controllers;
-extern struct sysdev_class intc_sysdev_class;
+extern struct bus_type intc_subsys;
 
 unsigned int intc_get_dfl_prio_level(void);
 unsigned int intc_get_prio_level(unsigned int irq);
diff --git a/drivers/sh/intc/userimask.c b/drivers/sh/intc/userimask.c
index 56bf933..e649cea 100644
--- a/drivers/sh/intc/userimask.c
+++ b/drivers/sh/intc/userimask.c
@@ -10,7 +10,7 @@
 #define pr_fmt(fmt) "intc: " fmt
 
 #include <linux/errno.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/init.h>
 #include <linux/io.h>
 #include <linux/stat.h>
@@ -20,15 +20,15 @@
 static void __iomem *uimask;
 
 static ssize_t
-show_intc_userimask(struct sysdev_class *cls,
-		    struct sysdev_class_attribute *attr, char *buf)
+show_intc_userimask(struct device *dev,
+		    struct device_attribute *attr, char *buf)
 {
 	return sprintf(buf, "%d\n", (__raw_readl(uimask) >> 4) & 0xf);
 }
 
 static ssize_t
-store_intc_userimask(struct sysdev_class *cls,
-		     struct sysdev_class_attribute *attr,
+store_intc_userimask(struct device *dev,
+		     struct device_attribute *attr,
 		     const char *buf, size_t count)
 {
 	unsigned long level;
@@ -55,8 +55,8 @@
 	return count;
 }
 
-static SYSDEV_CLASS_ATTR(userimask, S_IRUSR | S_IWUSR,
-			 show_intc_userimask, store_intc_userimask);
+static DEVICE_ATTR(userimask, S_IRUSR | S_IWUSR,
+		   show_intc_userimask, store_intc_userimask);
 
 
 static int __init userimask_sysdev_init(void)
@@ -64,7 +64,7 @@
 	if (unlikely(!uimask))
 		return -ENXIO;
 
-	return sysdev_class_create_file(&intc_sysdev_class, &attr_userimask);
+	return device_create_file(intc_subsys.dev_root, &dev_attr_userimask);
 }
 late_initcall(userimask_sysdev_init);
 
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 8ba4510..56abf55 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -174,8 +174,7 @@
 
 config SPI_MPC52xx
 	tristate "Freescale MPC52xx SPI (non-PSC) controller support"
-	depends on PPC_MPC52xx && SPI
-	select SPI_MASTER_OF
+	depends on PPC_MPC52xx
 	help
 	  This drivers supports the MPC52xx SPI controller in master SPI
 	  mode.
@@ -346,14 +345,14 @@
 	  serial port.
 
 config SPI_TOPCLIFF_PCH
-	tristate "Intel EG20T PCH/OKI SEMICONDUCTOR ML7213 IOH SPI controller"
+	tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) SPI"
 	depends on PCI
 	help
 	  SPI driver for the Topcliff PCH (Platform Controller Hub) SPI bus
 	  used in some x86 embedded processors.
 
-	  This driver also supports the ML7213, a companion chip for the
-	  Atom E6xx series and compatible with the Intel EG20T PCH.
+	  This driver also supports the ML7213/ML7223/ML7831, a companion chip
+	  for the Atom E6xx series and compatible with the Intel EG20T PCH.
 
 config SPI_TXX9
 	tristate "Toshiba TXx9 SPI controller"
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 322be7a..0b0dfb7 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -121,6 +121,7 @@
 	/* SPI1 has 4 channels, while SPI2 has 2 */
 	struct omap2_mcspi_dma	*dma_channels;
 	struct  device		*dev;
+	struct workqueue_struct *wq;
 };
 
 struct omap2_mcspi_cs {
@@ -143,8 +144,6 @@
 
 static struct omap2_mcspi_regs omap2_mcspi_ctx[OMAP2_MCSPI_MAX_CTRL];
 
-static struct workqueue_struct *omap2_mcspi_wq;
-
 #define MOD_REG_BIT(val, mask, set) do { \
 	if (set) \
 		val |= mask; \
@@ -1043,7 +1042,7 @@
 
 	spin_lock_irqsave(&mcspi->lock, flags);
 	list_add_tail(&m->queue, &mcspi->msg_queue);
-	queue_work(omap2_mcspi_wq, &mcspi->work);
+	queue_work(mcspi->wq, &mcspi->work);
 	spin_unlock_irqrestore(&mcspi->lock, flags);
 
 	return 0;
@@ -1088,6 +1087,7 @@
 	struct omap2_mcspi	*mcspi;
 	struct resource		*r;
 	int			status = 0, i;
+	char			wq_name[20];
 
 	master = spi_alloc_master(&pdev->dev, sizeof *mcspi);
 	if (master == NULL) {
@@ -1111,10 +1111,17 @@
 	mcspi = spi_master_get_devdata(master);
 	mcspi->master = master;
 
+	sprintf(wq_name, "omap2_mcspi/%d", master->bus_num);
+	mcspi->wq = alloc_workqueue(wq_name, WQ_MEM_RECLAIM, 1);
+	if (mcspi->wq == NULL) {
+		status = -ENOMEM;
+		goto free_master;
+	}
+
 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	if (r == NULL) {
 		status = -ENODEV;
-		goto err1;
+		goto free_master;
 	}
 
 	r->start += pdata->regs_offset;
@@ -1123,14 +1130,14 @@
 	if (!request_mem_region(r->start, resource_size(r),
 				dev_name(&pdev->dev))) {
 		status = -EBUSY;
-		goto err1;
+		goto free_master;
 	}
 
 	mcspi->base = ioremap(r->start, resource_size(r));
 	if (!mcspi->base) {
 		dev_dbg(&pdev->dev, "can't ioremap MCSPI\n");
 		status = -ENOMEM;
-		goto err2;
+		goto release_region;
 	}
 
 	mcspi->dev = &pdev->dev;
@@ -1145,7 +1152,7 @@
 			GFP_KERNEL);
 
 	if (mcspi->dma_channels == NULL)
-		goto err2;
+		goto unmap_io;
 
 	for (i = 0; i < master->num_chipselect; i++) {
 		char dma_ch_name[14];
@@ -1175,25 +1182,33 @@
 		mcspi->dma_channels[i].dma_tx_sync_dev = dma_res->start;
 	}
 
+	if (status < 0)
+		goto dma_chnl_free;
+
 	pm_runtime_enable(&pdev->dev);
 
 	if (status || omap2_mcspi_master_setup(mcspi) < 0)
-		goto err3;
+		goto disable_pm;
 
 	status = spi_register_master(master);
 	if (status < 0)
-		goto err4;
+		goto err_spi_register;
 
 	return status;
 
-err4:
+err_spi_register:
 	spi_master_put(master);
-err3:
+disable_pm:
+	pm_runtime_disable(&pdev->dev);
+dma_chnl_free:
 	kfree(mcspi->dma_channels);
-err2:
-	release_mem_region(r->start, resource_size(r));
+unmap_io:
 	iounmap(mcspi->base);
-err1:
+release_region:
+	release_mem_region(r->start, resource_size(r));
+free_master:
+	kfree(master);
+	platform_set_drvdata(pdev, NULL);
 	return status;
 }
 
@@ -1210,6 +1225,7 @@
 	dma_channels = mcspi->dma_channels;
 
 	omap2_mcspi_disable_clocks(mcspi);
+	pm_runtime_disable(&pdev->dev);
 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	release_mem_region(r->start, resource_size(r));
 
@@ -1217,6 +1233,8 @@
 	spi_unregister_master(master);
 	iounmap(base);
 	kfree(dma_channels);
+	destroy_workqueue(mcspi->wq);
+	platform_set_drvdata(pdev, NULL);
 
 	return 0;
 }
@@ -1275,10 +1293,6 @@
 
 static int __init omap2_mcspi_init(void)
 {
-	omap2_mcspi_wq = create_singlethread_workqueue(
-				omap2_mcspi_driver.driver.name);
-	if (omap2_mcspi_wq == NULL)
-		return -1;
 	return platform_driver_probe(&omap2_mcspi_driver, omap2_mcspi_probe);
 }
 subsys_initcall(omap2_mcspi_init);
@@ -1287,7 +1301,6 @@
 {
 	platform_driver_unregister(&omap2_mcspi_driver);
 
-	destroy_workqueue(omap2_mcspi_wq);
 }
 module_exit(omap2_mcspi_exit);
 
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index 5559b22..f1f5efb 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -340,6 +340,10 @@
  * @cur_msg: Pointer to current spi_message being processed
  * @cur_transfer: Pointer to current spi_transfer
  * @cur_chip: pointer to current clients chip(assigned from controller_state)
+ * @next_msg_cs_active: the next message in the queue has been examined
+ *  and it was found that it uses the same chip select as the previous
+ *  message, so we left it active after the previous transfer, and it's
+ *  active already.
  * @tx: current position in TX buffer to be read
  * @tx_end: end position in TX buffer to be read
  * @rx: current position in RX buffer to be written
@@ -373,6 +377,7 @@
 	struct spi_message		*cur_msg;
 	struct spi_transfer		*cur_transfer;
 	struct chip_data		*cur_chip;
+	bool				next_msg_cs_active;
 	void				*tx;
 	void				*tx_end;
 	void				*rx;
@@ -445,23 +450,9 @@
 	struct spi_transfer *last_transfer;
 	unsigned long flags;
 	struct spi_message *msg;
-	void (*curr_cs_control) (u32 command);
+	pl022->next_msg_cs_active = false;
 
-	/*
-	 * This local reference to the chip select function
-	 * is needed because we set curr_chip to NULL
-	 * as a step toward termininating the message.
-	 */
-	curr_cs_control = pl022->cur_chip->cs_control;
-	spin_lock_irqsave(&pl022->queue_lock, flags);
-	msg = pl022->cur_msg;
-	pl022->cur_msg = NULL;
-	pl022->cur_transfer = NULL;
-	pl022->cur_chip = NULL;
-	queue_work(pl022->workqueue, &pl022->pump_messages);
-	spin_unlock_irqrestore(&pl022->queue_lock, flags);
-
-	last_transfer = list_entry(msg->transfers.prev,
+	last_transfer = list_entry(pl022->cur_msg->transfers.prev,
 					struct spi_transfer,
 					transfer_list);
 
@@ -473,18 +464,13 @@
 		 */
 		udelay(last_transfer->delay_usecs);
 
-	/*
-	 * Drop chip select UNLESS cs_change is true or we are returning
-	 * a message with an error, or next message is for another chip
-	 */
-	if (!last_transfer->cs_change)
-		curr_cs_control(SSP_CHIP_DESELECT);
-	else {
+	if (!last_transfer->cs_change) {
 		struct spi_message *next_msg;
 
-		/* Holding of cs was hinted, but we need to make sure
-		 * the next message is for the same chip.  Don't waste
-		 * time with the following tests unless this was hinted.
+		/*
+		 * cs_change was not set. We can keep the chip select
+		 * enabled if there is message in the queue and it is
+		 * for the same spi device.
 		 *
 		 * We cannot postpone this until pump_messages, because
 		 * after calling msg->complete (below) the driver that
@@ -501,19 +487,29 @@
 					struct spi_message, queue);
 		spin_unlock_irqrestore(&pl022->queue_lock, flags);
 
-		/* see if the next and current messages point
-		 * to the same chip
+		/*
+		 * see if the next and current messages point
+		 * to the same spi device.
 		 */
-		if (next_msg && next_msg->spi != msg->spi)
+		if (next_msg && next_msg->spi != pl022->cur_msg->spi)
 			next_msg = NULL;
-		if (!next_msg || msg->state == STATE_ERROR)
-			curr_cs_control(SSP_CHIP_DESELECT);
+		if (!next_msg || pl022->cur_msg->state == STATE_ERROR)
+			pl022->cur_chip->cs_control(SSP_CHIP_DESELECT);
+		else
+			pl022->next_msg_cs_active = true;
 	}
+
+	spin_lock_irqsave(&pl022->queue_lock, flags);
+	msg = pl022->cur_msg;
+	pl022->cur_msg = NULL;
+	pl022->cur_transfer = NULL;
+	pl022->cur_chip = NULL;
+	queue_work(pl022->workqueue, &pl022->pump_messages);
+	spin_unlock_irqrestore(&pl022->queue_lock, flags);
+
 	msg->state = NULL;
 	if (msg->complete)
 		msg->complete(msg->context);
-	/* This message is completed, so let's turn off the clocks & power */
-	pm_runtime_put(&pl022->adev->dev);
 }
 
 /**
@@ -1244,9 +1240,9 @@
 
 	if ((pl022->tx == pl022->tx_end) && (flag == 0)) {
 		flag = 1;
-		/* Disable Transmit interrupt */
-		writew(readw(SSP_IMSC(pl022->virtbase)) &
-		       (~SSP_IMSC_MASK_TXIM),
+		/* Disable Transmit interrupt, enable receive interrupt */
+		writew((readw(SSP_IMSC(pl022->virtbase)) &
+		       ~SSP_IMSC_MASK_TXIM) | SSP_IMSC_MASK_RXIM,
 		       SSP_IMSC(pl022->virtbase));
 	}
 
@@ -1352,7 +1348,7 @@
 			 */
 			udelay(previous->delay_usecs);
 
-		/* Drop chip select only if cs_change is requested */
+		/* Reselect chip select only if cs_change was requested */
 		if (previous->cs_change)
 			pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
 	} else {
@@ -1379,15 +1375,22 @@
 	}
 
 err_config_dma:
-	writew(ENABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
+	/* enable all interrupts except RX */
+	writew(ENABLE_ALL_INTERRUPTS & ~SSP_IMSC_MASK_RXIM, SSP_IMSC(pl022->virtbase));
 }
 
 static void do_interrupt_dma_transfer(struct pl022 *pl022)
 {
-	u32 irqflags = ENABLE_ALL_INTERRUPTS;
+	/*
+	 * Default is to enable all interrupts except RX -
+	 * this will be enabled once TX is complete
+	 */
+	u32 irqflags = ENABLE_ALL_INTERRUPTS & ~SSP_IMSC_MASK_RXIM;
 
-	/* Enable target chip */
-	pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
+	/* Enable target chip, if not already active */
+	if (!pl022->next_msg_cs_active)
+		pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
+
 	if (set_up_next_transfer(pl022, pl022->cur_transfer)) {
 		/* Error path */
 		pl022->cur_msg->state = STATE_ERROR;
@@ -1442,7 +1445,8 @@
 		} else {
 			/* STATE_START */
 			message->state = STATE_RUNNING;
-			pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
+			if (!pl022->next_msg_cs_active)
+				pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
 		}
 
 		/* Configuration Changing Per Transfer */
@@ -1504,14 +1508,28 @@
 	struct pl022 *pl022 =
 		container_of(work, struct pl022, pump_messages);
 	unsigned long flags;
+	bool was_busy = false;
 
 	/* Lock queue and check for queue work */
 	spin_lock_irqsave(&pl022->queue_lock, flags);
 	if (list_empty(&pl022->queue) || !pl022->running) {
+		if (pl022->busy) {
+			/* nothing more to do - disable spi/ssp and power off */
+			writew((readw(SSP_CR1(pl022->virtbase)) &
+				(~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
+
+			if (pl022->master_info->autosuspend_delay > 0) {
+				pm_runtime_mark_last_busy(&pl022->adev->dev);
+				pm_runtime_put_autosuspend(&pl022->adev->dev);
+			} else {
+				pm_runtime_put(&pl022->adev->dev);
+			}
+		}
 		pl022->busy = false;
 		spin_unlock_irqrestore(&pl022->queue_lock, flags);
 		return;
 	}
+
 	/* Make sure we are not already running a message */
 	if (pl022->cur_msg) {
 		spin_unlock_irqrestore(&pl022->queue_lock, flags);
@@ -1522,7 +1540,10 @@
 	    list_entry(pl022->queue.next, struct spi_message, queue);
 
 	list_del_init(&pl022->cur_msg->queue);
-	pl022->busy = true;
+	if (pl022->busy)
+		was_busy = true;
+	else
+		pl022->busy = true;
 	spin_unlock_irqrestore(&pl022->queue_lock, flags);
 
 	/* Initial message state */
@@ -1532,12 +1553,14 @@
 
 	/* Setup the SPI using the per chip configuration */
 	pl022->cur_chip = spi_get_ctldata(pl022->cur_msg->spi);
-	/*
-	 * We enable the core voltage and clocks here, then the clocks
-	 * and core will be disabled when giveback() is called in each method
-	 * (poll/interrupt/DMA)
-	 */
-	pm_runtime_get_sync(&pl022->adev->dev);
+	if (!was_busy)
+		/*
+		 * We enable the core voltage and clocks here, then the clocks
+		 * and core will be disabled when this workqueue is run again
+		 * and there is no more work to be done.
+		 */
+		pm_runtime_get_sync(&pl022->adev->dev);
+
 	restore_state(pl022);
 	flush(pl022);
 
@@ -1582,6 +1605,7 @@
 	pl022->cur_msg = NULL;
 	pl022->cur_transfer = NULL;
 	pl022->cur_chip = NULL;
+	pl022->next_msg_cs_active = false;
 	spin_unlock_irqrestore(&pl022->queue_lock, flags);
 
 	queue_work(pl022->workqueue, &pl022->pump_messages);
@@ -1881,7 +1905,7 @@
 {
 	struct pl022_config_chip const *chip_info;
 	struct chip_data *chip;
-	struct ssp_clock_params clk_freq = {0, };
+	struct ssp_clock_params clk_freq = { .cpsdvsr = 0, .scr = 0};
 	int status = 0;
 	struct pl022 *pl022 = spi_master_get_devdata(spi->master);
 	unsigned int bits = spi->bits_per_word;
@@ -2231,7 +2255,17 @@
 	dev_dbg(dev, "probe succeeded\n");
 
 	/* let runtime pm put suspend */
-	pm_runtime_put(dev);
+	if (platform_info->autosuspend_delay > 0) {
+		dev_info(&adev->dev,
+			"will use autosuspend for runtime pm, delay %dms\n",
+			platform_info->autosuspend_delay);
+		pm_runtime_set_autosuspend_delay(dev,
+			platform_info->autosuspend_delay);
+		pm_runtime_use_autosuspend(dev);
+		pm_runtime_put_autosuspend(dev);
+	} else {
+		pm_runtime_put(dev);
+	}
 	return 0;
 
  err_spi_register:
@@ -2305,11 +2339,6 @@
 		return status;
 	}
 
-	amba_vcore_enable(pl022->adev);
-	amba_pclk_enable(pl022->adev);
-	load_ssp_default_config(pl022);
-	amba_pclk_disable(pl022->adev);
-	amba_vcore_disable(pl022->adev);
 	dev_dbg(dev, "suspended\n");
 	return 0;
 }
@@ -2432,6 +2461,8 @@
 	{ 0, 0 },
 };
 
+MODULE_DEVICE_TABLE(amba, pl022_ids);
+
 static struct amba_driver pl022_driver = {
 	.drv = {
 		.name	= "ssp-pl022",
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index 6a80749..7086583 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -1,7 +1,7 @@
 /*
  * SPI bus driver for the Topcliff PCH used by Intel SoCs
  *
- * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
+ * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -95,16 +95,18 @@
 #define PCH_CLOCK_HZ		50000000
 #define PCH_MAX_SPBR		1023
 
-/* Definition for ML7213 by OKI SEMICONDUCTOR */
+/* Definition for ML7213/ML7223/ML7831 by LAPIS Semiconductor */
 #define PCI_VENDOR_ID_ROHM		0x10DB
 #define PCI_DEVICE_ID_ML7213_SPI	0x802c
 #define PCI_DEVICE_ID_ML7223_SPI	0x800F
+#define PCI_DEVICE_ID_ML7831_SPI	0x8816
 
 /*
  * Set the number of SPI instance max
  * Intel EG20T PCH :		1ch
- * OKI SEMICONDUCTOR ML7213 IOH :	2ch
- * OKI SEMICONDUCTOR ML7223 IOH :	1ch
+ * LAPIS Semiconductor ML7213 IOH :	2ch
+ * LAPIS Semiconductor ML7223 IOH :	1ch
+ * LAPIS Semiconductor ML7831 IOH :	1ch
 */
 #define PCH_SPI_MAX_DEV			2
 
@@ -218,6 +220,7 @@
 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_GE_SPI),    1, },
 	{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_SPI), 2, },
 	{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_SPI), 1, },
+	{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_SPI), 1, },
 	{ }
 };
 
@@ -1753,4 +1756,4 @@
 		 "to use DMA for data transfers pass 1 else 0; default 1");
 
 MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Intel EG20T PCH/OKI SEMICONDUCTOR ML7xxx IOH SPI Driver");
+MODULE_DESCRIPTION("Intel EG20T PCH/LAPIS Semiconductor ML7xxx IOH SPI Driver");
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 77eae99..b2ccdea 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -319,7 +319,7 @@
 	}
 
 	spi->master = master;
-	spi->dev.parent = dev;
+	spi->dev.parent = &master->dev;
 	spi->dev.bus = &spi_bus_type;
 	spi->dev.release = spidev_release;
 	device_initialize(&spi->dev);
diff --git a/drivers/ssb/driver_pcicore.c b/drivers/ssb/driver_pcicore.c
index 84c934c..520e828 100644
--- a/drivers/ssb/driver_pcicore.c
+++ b/drivers/ssb/driver_pcicore.c
@@ -517,10 +517,14 @@
 
 static void __devinit ssb_pcicore_init_clientmode(struct ssb_pcicore *pc)
 {
-	ssb_pcicore_fix_sprom_core_index(pc);
+	struct ssb_device *pdev = pc->dev;
+	struct ssb_bus *bus = pdev->bus;
+
+	if (bus->bustype == SSB_BUSTYPE_PCI)
+		ssb_pcicore_fix_sprom_core_index(pc);
 
 	/* Disable PCI interrupts. */
-	ssb_write32(pc->dev, SSB_INTVEC, 0);
+	ssb_write32(pdev, SSB_INTVEC, 0);
 
 	/* Additional PCIe always once-executed workarounds */
 	if (pc->dev->id.coreid == SSB_DEV_PCIE) {
diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c
index 34c3bab..973223f 100644
--- a/drivers/ssb/pci.c
+++ b/drivers/ssb/pci.c
@@ -607,6 +607,29 @@
 	memcpy(&out->antenna_gain.ghz5, &out->antenna_gain.ghz24,
 	       sizeof(out->antenna_gain.ghz5));
 
+	/* Extract FEM info */
+	SPEX(fem.ghz2.tssipos, SSB_SPROM8_FEM2G,
+		SSB_SROM8_FEM_TSSIPOS, SSB_SROM8_FEM_TSSIPOS_SHIFT);
+	SPEX(fem.ghz2.extpa_gain, SSB_SPROM8_FEM2G,
+		SSB_SROM8_FEM_EXTPA_GAIN, SSB_SROM8_FEM_EXTPA_GAIN_SHIFT);
+	SPEX(fem.ghz2.pdet_range, SSB_SPROM8_FEM2G,
+		SSB_SROM8_FEM_PDET_RANGE, SSB_SROM8_FEM_PDET_RANGE_SHIFT);
+	SPEX(fem.ghz2.tr_iso, SSB_SPROM8_FEM2G,
+		SSB_SROM8_FEM_TR_ISO, SSB_SROM8_FEM_TR_ISO_SHIFT);
+	SPEX(fem.ghz2.antswlut, SSB_SPROM8_FEM2G,
+		SSB_SROM8_FEM_ANTSWLUT, SSB_SROM8_FEM_ANTSWLUT_SHIFT);
+
+	SPEX(fem.ghz5.tssipos, SSB_SPROM8_FEM5G,
+		SSB_SROM8_FEM_TSSIPOS, SSB_SROM8_FEM_TSSIPOS_SHIFT);
+	SPEX(fem.ghz5.extpa_gain, SSB_SPROM8_FEM5G,
+		SSB_SROM8_FEM_EXTPA_GAIN, SSB_SROM8_FEM_EXTPA_GAIN_SHIFT);
+	SPEX(fem.ghz5.pdet_range, SSB_SPROM8_FEM5G,
+		SSB_SROM8_FEM_PDET_RANGE, SSB_SROM8_FEM_PDET_RANGE_SHIFT);
+	SPEX(fem.ghz5.tr_iso, SSB_SPROM8_FEM5G,
+		SSB_SROM8_FEM_TR_ISO, SSB_SROM8_FEM_TR_ISO_SHIFT);
+	SPEX(fem.ghz5.antswlut, SSB_SPROM8_FEM5G,
+		SSB_SROM8_FEM_ANTSWLUT, SSB_SROM8_FEM_ANTSWLUT_SHIFT);
+
 	sprom_extract_r458(out, in);
 
 	/* TODO - get remaining rev 8 stuff needed */
diff --git a/drivers/staging/bcm/target_params.h b/drivers/staging/bcm/target_params.h
index 2d8b8a3..1487638 100644
--- a/drivers/staging/bcm/target_params.h
+++ b/drivers/staging/bcm/target_params.h
@@ -72,8 +72,8 @@
 	// removed SHUT down related 'unused' params from here to sync 4.x and 5.x CFG files..
 
     //BAMC Related Parameters
-    //Bit 0-15 Band AMC signaling configuration: Bit 1 = 1 – Enable Band AMC signaling.
-    //bit 16-31 Band AMC Data configuration: Bit 16 = 1 – Band AMC 2x3 support.
+    //Bit 0-15 Band AMC signaling configuration: Bit 1 = 1 – Enable Band AMC signaling.
+    //bit 16-31 Band AMC Data configuration: Bit 16 = 1 – Band AMC 2x3 support.
 	B_UINT32 m_u32BandAMCEnable;
 
 } stTargetParams,TARGET_PARAMS,*PTARGET_PARAMS, STARGETPARAMS, *PSTARGETPARAMS;
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c
index c75a1a1..f9545b0 100644
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c
+++ b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c
@@ -3598,7 +3598,7 @@
 			n = comedi_buf_write_alloc(s->async,
 				(7 + 12) * sizeof(unsigned int));
 
-			/*  If not enougth memory available, event is set to Comedi Buffer Errror */
+			/*  If not enough memory available, event is set to Comedi Buffer Error */
 			if (n > ((7 + 12) * sizeof(unsigned int))) {
 				printk("\ncomedi_buf_write_alloc n = %i", n);
 				s->async->events |= COMEDI_CB_ERROR;
diff --git a/drivers/staging/cxt1e1/libsbew.h b/drivers/staging/cxt1e1/libsbew.h
index 5c99646..ae8f06d 100644
--- a/drivers/staging/cxt1e1/libsbew.h
+++ b/drivers/staging/cxt1e1/libsbew.h
@@ -323,7 +323,7 @@
 #define CFG_CH_DINV_TX      0x02
 
 
-/* Posssible resettable chipsets/functions */
+/* Possible resettable chipsets/functions */
 #define RESET_DEV_TEMUX     1
 #define RESET_DEV_TECT3     RESET_DEV_TEMUX
 #define RESET_DEV_PLL       2
diff --git a/drivers/staging/frontier/alphatrack.c b/drivers/staging/frontier/alphatrack.c
index 2babb03..d8efed6 100644
--- a/drivers/staging/frontier/alphatrack.c
+++ b/drivers/staging/frontier/alphatrack.c
@@ -867,30 +867,4 @@
 	.id_table = usb_alphatrack_table,
 };
 
-/**
- *	usb_alphatrack_init
- */
-static int __init usb_alphatrack_init(void)
-{
-	int retval;
-
-	/* register this driver with the USB subsystem */
-	retval = usb_register(&usb_alphatrack_driver);
-	if (retval)
-		err("usb_register failed for the " __FILE__
-		    " driver. Error number %d\n", retval);
-
-	return retval;
-}
-
-/**
- *	usb_alphatrack_exit
- */
-static void __exit usb_alphatrack_exit(void)
-{
-	/* deregister this driver with the USB subsystem */
-	usb_deregister(&usb_alphatrack_driver);
-}
-
-module_init(usb_alphatrack_init);
-module_exit(usb_alphatrack_exit);
+module_usb_driver(usb_alphatrack_driver);
diff --git a/drivers/staging/frontier/tranzport.c b/drivers/staging/frontier/tranzport.c
index 8894ab1..c263284 100644
--- a/drivers/staging/frontier/tranzport.c
+++ b/drivers/staging/frontier/tranzport.c
@@ -971,29 +971,4 @@
 	.id_table = usb_tranzport_table,
 };
 
-/**
- *	usb_tranzport_init
- */
-static int __init usb_tranzport_init(void)
-{
-	int retval;
-
-	/* register this driver with the USB subsystem */
-	retval = usb_register(&usb_tranzport_driver);
-	if (retval)
-		err("usb_register failed for the " __FILE__
-			" driver. Error number %d\n", retval);
-	return retval;
-}
-/**
- *	usb_tranzport_exit
- */
-
-static void __exit usb_tranzport_exit(void)
-{
-	/* deregister this driver with the USB subsystem */
-	usb_deregister(&usb_tranzport_driver);
-}
-
-module_init(usb_tranzport_init);
-module_exit(usb_tranzport_exit);
+module_usb_driver(usb_tranzport_driver);
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c
index b3d743a..917bbb0 100644
--- a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c
+++ b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c
@@ -344,10 +344,10 @@
 	}
 	mdelay(1);
 	if (info->AsicID == ELECTRABUZZ_ID) {
-		// set watermark to -1 in order to not generate an interrrupt
+		// set watermark to -1 in order to not generate an interrupt
 		ft1000_write_reg(dev, FT1000_REG_WATERMARK, 0xffff);
 	} else {
-		// set watermark to -1 in order to not generate an interrrupt
+		// set watermark to -1 in order to not generate an interrupt
 		ft1000_write_reg(dev, FT1000_REG_MAG_WATERMARK, 0xffff);
 	}
 	// clear interrupts
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c b/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c
index aaf44c3..43b1d36 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c
@@ -601,7 +601,7 @@
 
 	mdelay(1);
 
-	/* set watermark to -1 in order to not generate an interrrupt */
+	/* set watermark to -1 in order to not generate an interrupt */
 	ft1000_write_register(ft1000dev, 0xffff, FT1000_REG_MAG_WATERMARK);
 
 	/* clear interrupts */
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_usb.c b/drivers/staging/ft1000/ft1000-usb/ft1000_usb.c
index 79482ac..84c38d5 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_usb.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_usb.c
@@ -263,24 +263,4 @@
 	.id_table = id_table,
 };
 
-static int __init usb_ft1000_init(void)
-{
-	int ret = 0;
-
-	DEBUG("Initialize and register the driver\n");
-
-	ret = usb_register(&ft1000_usb_driver);
-	if (ret)
-		err("usb_register failed. Error number %d", ret);
-
-	return ret;
-}
-
-static void __exit usb_ft1000_exit(void)
-{
-	DEBUG("Deregister the driver\n");
-	usb_deregister(&ft1000_usb_driver);
-}
-
-module_init(usb_ft1000_init);
-module_exit(usb_ft1000_exit);
+module_usb_driver(ft1000_usb_driver);
diff --git a/drivers/staging/iio/accel/adis16201_core.c b/drivers/staging/iio/accel/adis16201_core.c
index 1c5dad5..97f747e 100644
--- a/drivers/staging/iio/accel/adis16201_core.c
+++ b/drivers/staging/iio/accel/adis16201_core.c
@@ -549,18 +549,7 @@
 	.probe = adis16201_probe,
 	.remove = __devexit_p(adis16201_remove),
 };
-
-static __init int adis16201_init(void)
-{
-	return spi_register_driver(&adis16201_driver);
-}
-module_init(adis16201_init);
-
-static __exit void adis16201_exit(void)
-{
-	spi_unregister_driver(&adis16201_driver);
-}
-module_exit(adis16201_exit);
+module_spi_driver(adis16201_driver);
 
 MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
 MODULE_DESCRIPTION("Analog Devices ADIS16201 Programmable Digital Vibration Sensor driver");
diff --git a/drivers/staging/iio/accel/adis16203_core.c b/drivers/staging/iio/accel/adis16203_core.c
index 8a33374..a6d6d27 100644
--- a/drivers/staging/iio/accel/adis16203_core.c
+++ b/drivers/staging/iio/accel/adis16203_core.c
@@ -504,18 +504,7 @@
 	.probe = adis16203_probe,
 	.remove = __devexit_p(adis16203_remove),
 };
-
-static __init int adis16203_init(void)
-{
-	return spi_register_driver(&adis16203_driver);
-}
-module_init(adis16203_init);
-
-static __exit void adis16203_exit(void)
-{
-	spi_unregister_driver(&adis16203_driver);
-}
-module_exit(adis16203_exit);
+module_spi_driver(adis16203_driver);
 
 MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
 MODULE_DESCRIPTION("Analog Devices ADIS16203 Programmable Digital Vibration Sensor driver");
diff --git a/drivers/staging/iio/accel/adis16204_core.c b/drivers/staging/iio/accel/adis16204_core.c
index 644ac8e..7ac5b4c 100644
--- a/drivers/staging/iio/accel/adis16204_core.c
+++ b/drivers/staging/iio/accel/adis16204_core.c
@@ -577,18 +577,7 @@
 	.probe = adis16204_probe,
 	.remove = __devexit_p(adis16204_remove),
 };
-
-static __init int adis16204_init(void)
-{
-	return spi_register_driver(&adis16204_driver);
-}
-module_init(adis16204_init);
-
-static __exit void adis16204_exit(void)
-{
-	spi_unregister_driver(&adis16204_driver);
-}
-module_exit(adis16204_exit);
+module_spi_driver(adis16204_driver);
 
 MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
 MODULE_DESCRIPTION("ADIS16204 High-g Digital Impact Sensor and Recorder");
diff --git a/drivers/staging/iio/accel/adis16209_core.c b/drivers/staging/iio/accel/adis16209_core.c
index 0a8571b..c03afbf 100644
--- a/drivers/staging/iio/accel/adis16209_core.c
+++ b/drivers/staging/iio/accel/adis16209_core.c
@@ -553,18 +553,7 @@
 	.probe = adis16209_probe,
 	.remove = __devexit_p(adis16209_remove),
 };
-
-static __init int adis16209_init(void)
-{
-	return spi_register_driver(&adis16209_driver);
-}
-module_init(adis16209_init);
-
-static __exit void adis16209_exit(void)
-{
-	spi_unregister_driver(&adis16209_driver);
-}
-module_exit(adis16209_exit);
+module_spi_driver(adis16209_driver);
 
 MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
 MODULE_DESCRIPTION("Analog Devices ADIS16209 Digital Vibration Sensor driver");
diff --git a/drivers/staging/iio/accel/adis16220_core.c b/drivers/staging/iio/accel/adis16220_core.c
index 6d4503d..73298e7 100644
--- a/drivers/staging/iio/accel/adis16220_core.c
+++ b/drivers/staging/iio/accel/adis16220_core.c
@@ -708,18 +708,7 @@
 	.probe = adis16220_probe,
 	.remove = __devexit_p(adis16220_remove),
 };
-
-static __init int adis16220_init(void)
-{
-	return spi_register_driver(&adis16220_driver);
-}
-module_init(adis16220_init);
-
-static __exit void adis16220_exit(void)
-{
-	spi_unregister_driver(&adis16220_driver);
-}
-module_exit(adis16220_exit);
+module_spi_driver(adis16220_driver);
 
 MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
 MODULE_DESCRIPTION("Analog Devices ADIS16220 Digital Vibration Sensor");
diff --git a/drivers/staging/iio/accel/adis16240_core.c b/drivers/staging/iio/accel/adis16240_core.c
index b8be292..88881b9 100644
--- a/drivers/staging/iio/accel/adis16240_core.c
+++ b/drivers/staging/iio/accel/adis16240_core.c
@@ -606,18 +606,7 @@
 	.probe = adis16240_probe,
 	.remove = __devexit_p(adis16240_remove),
 };
-
-static __init int adis16240_init(void)
-{
-	return spi_register_driver(&adis16240_driver);
-}
-module_init(adis16240_init);
-
-static __exit void adis16240_exit(void)
-{
-	spi_unregister_driver(&adis16240_driver);
-}
-module_exit(adis16240_exit);
+module_spi_driver(adis16240_driver);
 
 MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
 MODULE_DESCRIPTION("Analog Devices Programmable Impact Sensor and Recorder");
diff --git a/drivers/staging/iio/accel/kxsd9.c b/drivers/staging/iio/accel/kxsd9.c
index 5238503..cfce21c 100644
--- a/drivers/staging/iio/accel/kxsd9.c
+++ b/drivers/staging/iio/accel/kxsd9.c
@@ -280,18 +280,7 @@
 	.remove = __devexit_p(kxsd9_remove),
 	.id_table = kxsd9_id,
 };
-
-static __init int kxsd9_spi_init(void)
-{
-	return spi_register_driver(&kxsd9_driver);
-}
-module_init(kxsd9_spi_init);
-
-static __exit void kxsd9_spi_exit(void)
-{
-	spi_unregister_driver(&kxsd9_driver);
-}
-module_exit(kxsd9_spi_exit);
+module_spi_driver(kxsd9_driver);
 
 MODULE_AUTHOR("Jonathan Cameron <jic23@cam.ac.uk>");
 MODULE_DESCRIPTION("Kionix KXSD9 SPI driver");
diff --git a/drivers/staging/iio/accel/lis3l02dq_core.c b/drivers/staging/iio/accel/lis3l02dq_core.c
index 559545a..6877521 100644
--- a/drivers/staging/iio/accel/lis3l02dq_core.c
+++ b/drivers/staging/iio/accel/lis3l02dq_core.c
@@ -804,18 +804,7 @@
 	.probe = lis3l02dq_probe,
 	.remove = __devexit_p(lis3l02dq_remove),
 };
-
-static __init int lis3l02dq_init(void)
-{
-	return spi_register_driver(&lis3l02dq_driver);
-}
-module_init(lis3l02dq_init);
-
-static __exit void lis3l02dq_exit(void)
-{
-	spi_unregister_driver(&lis3l02dq_driver);
-}
-module_exit(lis3l02dq_exit);
+module_spi_driver(lis3l02dq_driver);
 
 MODULE_AUTHOR("Jonathan Cameron <jic23@cam.ac.uk>");
 MODULE_DESCRIPTION("ST LIS3L02DQ Accelerometer SPI driver");
diff --git a/drivers/staging/iio/accel/sca3000_core.c b/drivers/staging/iio/accel/sca3000_core.c
index a44a705..6c35907 100644
--- a/drivers/staging/iio/accel/sca3000_core.c
+++ b/drivers/staging/iio/accel/sca3000_core.c
@@ -1250,18 +1250,7 @@
 	.remove = __devexit_p(sca3000_remove),
 	.id_table = sca3000_id,
 };
-
-static __init int sca3000_init(void)
-{
-	return spi_register_driver(&sca3000_driver);
-}
-module_init(sca3000_init);
-
-static __exit void sca3000_exit(void)
-{
-	spi_unregister_driver(&sca3000_driver);
-}
-module_exit(sca3000_exit);
+module_spi_driver(sca3000_driver);
 
 MODULE_AUTHOR("Jonathan Cameron <jic23@cam.ac.uk>");
 MODULE_DESCRIPTION("VTI SCA3000 Series Accelerometers SPI driver");
diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c
index 31c376b..797e65c 100644
--- a/drivers/staging/iio/adc/ad7192.c
+++ b/drivers/staging/iio/adc/ad7192.c
@@ -838,14 +838,14 @@
 	NULL
 };
 
-static mode_t ad7192_attr_is_visible(struct kobject *kobj,
+static umode_t ad7192_attr_is_visible(struct kobject *kobj,
 				     struct attribute *attr, int n)
 {
 	struct device *dev = container_of(kobj, struct device, kobj);
 	struct iio_dev *indio_dev = dev_get_drvdata(dev);
 	struct ad7192_state *st = iio_priv(indio_dev);
 
-	mode_t mode = attr->mode;
+	umode_t mode = attr->mode;
 
 	if ((st->devid != ID_AD7195) &&
 		(attr == &iio_dev_attr_ac_excitation_en.dev_attr.attr))
@@ -1161,18 +1161,7 @@
 	.remove		= __devexit_p(ad7192_remove),
 	.id_table	= ad7192_id,
 };
-
-static int __init ad7192_init(void)
-{
-	return spi_register_driver(&ad7192_driver);
-}
-module_init(ad7192_init);
-
-static void __exit ad7192_exit(void)
-{
-	spi_unregister_driver(&ad7192_driver);
-}
-module_exit(ad7192_exit);
+module_spi_driver(ad7192_driver);
 
 MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
 MODULE_DESCRIPTION("Analog Devices AD7190, AD7192, AD7195 ADC");
diff --git a/drivers/staging/iio/adc/ad7280a.c b/drivers/staging/iio/adc/ad7280a.c
index 372d059..dbaeae8 100644
--- a/drivers/staging/iio/adc/ad7280a.c
+++ b/drivers/staging/iio/adc/ad7280a.c
@@ -979,18 +979,7 @@
 	.remove		= __devexit_p(ad7280_remove),
 	.id_table	= ad7280_id,
 };
-
-static int __init ad7280_init(void)
-{
-	return spi_register_driver(&ad7280_driver);
-}
-module_init(ad7280_init);
-
-static void __exit ad7280_exit(void)
-{
-	spi_unregister_driver(&ad7280_driver);
-}
-module_exit(ad7280_exit);
+module_spi_driver(ad7280_driver);
 
 MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
 MODULE_DESCRIPTION("Analog Devices AD7280A");
diff --git a/drivers/staging/iio/adc/ad7291.c b/drivers/staging/iio/adc/ad7291.c
index 10e79e8..aa44a52 100644
--- a/drivers/staging/iio/adc/ad7291.c
+++ b/drivers/staging/iio/adc/ad7291.c
@@ -700,20 +700,8 @@
 	.remove = __devexit_p(ad7291_remove),
 	.id_table = ad7291_id,
 };
-
-static __init int ad7291_init(void)
-{
-	return i2c_add_driver(&ad7291_driver);
-}
-
-static __exit void ad7291_exit(void)
-{
-	i2c_del_driver(&ad7291_driver);
-}
+module_i2c_driver(ad7291_driver);
 
 MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
 MODULE_DESCRIPTION("Analog Devices AD7291 ADC driver");
 MODULE_LICENSE("GPL v2");
-
-module_init(ad7291_init);
-module_exit(ad7291_exit);
diff --git a/drivers/staging/iio/adc/ad7298_core.c b/drivers/staging/iio/adc/ad7298_core.c
index c1de73a..a799bd1 100644
--- a/drivers/staging/iio/adc/ad7298_core.c
+++ b/drivers/staging/iio/adc/ad7298_core.c
@@ -276,18 +276,7 @@
 	.remove		= __devexit_p(ad7298_remove),
 	.id_table	= ad7298_id,
 };
-
-static int __init ad7298_init(void)
-{
-	return spi_register_driver(&ad7298_driver);
-}
-module_init(ad7298_init);
-
-static void __exit ad7298_exit(void)
-{
-	spi_unregister_driver(&ad7298_driver);
-}
-module_exit(ad7298_exit);
+module_spi_driver(ad7298_driver);
 
 MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
 MODULE_DESCRIPTION("Analog Devices AD7298 ADC");
diff --git a/drivers/staging/iio/adc/ad7476_core.c b/drivers/staging/iio/adc/ad7476_core.c
index fd79fac..0b58520 100644
--- a/drivers/staging/iio/adc/ad7476_core.c
+++ b/drivers/staging/iio/adc/ad7476_core.c
@@ -248,18 +248,7 @@
 	.remove		= __devexit_p(ad7476_remove),
 	.id_table	= ad7476_id,
 };
-
-static int __init ad7476_init(void)
-{
-	return spi_register_driver(&ad7476_driver);
-}
-module_init(ad7476_init);
-
-static void __exit ad7476_exit(void)
-{
-	spi_unregister_driver(&ad7476_driver);
-}
-module_exit(ad7476_exit);
+module_spi_driver(ad7476_driver);
 
 MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
 MODULE_DESCRIPTION("Analog Devices AD7475/6/7/8(A) AD7466/7/8 ADC");
diff --git a/drivers/staging/iio/adc/ad7606_core.c b/drivers/staging/iio/adc/ad7606_core.c
index 54423ab..e3ecd3d 100644
--- a/drivers/staging/iio/adc/ad7606_core.c
+++ b/drivers/staging/iio/adc/ad7606_core.c
@@ -205,14 +205,14 @@
 	NULL,
 };
 
-static mode_t ad7606_attr_is_visible(struct kobject *kobj,
+static umode_t ad7606_attr_is_visible(struct kobject *kobj,
 				     struct attribute *attr, int n)
 {
 	struct device *dev = container_of(kobj, struct device, kobj);
 	struct iio_dev *indio_dev = dev_get_drvdata(dev);
 	struct ad7606_state *st = iio_priv(indio_dev);
 
-	mode_t mode = attr->mode;
+	umode_t mode = attr->mode;
 
 	if (!(gpio_is_valid(st->pdata->gpio_os0) &&
 	      gpio_is_valid(st->pdata->gpio_os1) &&
diff --git a/drivers/staging/iio/adc/ad7606_spi.c b/drivers/staging/iio/adc/ad7606_spi.c
index aede1ba..b984bd2 100644
--- a/drivers/staging/iio/adc/ad7606_spi.c
+++ b/drivers/staging/iio/adc/ad7606_spi.c
@@ -109,18 +109,7 @@
 	.remove = __devexit_p(ad7606_spi_remove),
 	.id_table = ad7606_id,
 };
-
-static int __init ad7606_spi_init(void)
-{
-	return spi_register_driver(&ad7606_driver);
-}
-module_init(ad7606_spi_init);
-
-static void __exit ad7606_spi_exit(void)
-{
-	spi_unregister_driver(&ad7606_driver);
-}
-module_exit(ad7606_spi_exit);
+module_spi_driver(ad7606_driver);
 
 MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
 MODULE_DESCRIPTION("Analog Devices AD7606 ADC");
diff --git a/drivers/staging/iio/adc/ad7780.c b/drivers/staging/iio/adc/ad7780.c
index 7a579a1..ec90261 100644
--- a/drivers/staging/iio/adc/ad7780.c
+++ b/drivers/staging/iio/adc/ad7780.c
@@ -283,18 +283,7 @@
 	.remove		= __devexit_p(ad7780_remove),
 	.id_table	= ad7780_id,
 };
-
-static int __init ad7780_init(void)
-{
-	return spi_register_driver(&ad7780_driver);
-}
-module_init(ad7780_init);
-
-static void __exit ad7780_exit(void)
-{
-	spi_unregister_driver(&ad7780_driver);
-}
-module_exit(ad7780_exit);
+module_spi_driver(ad7780_driver);
 
 MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
 MODULE_DESCRIPTION("Analog Devices AD7780/1 ADC");
diff --git a/drivers/staging/iio/adc/ad7793.c b/drivers/staging/iio/adc/ad7793.c
index 999f8f7..1c5588e 100644
--- a/drivers/staging/iio/adc/ad7793.c
+++ b/drivers/staging/iio/adc/ad7793.c
@@ -1045,18 +1045,7 @@
 	.remove		= __devexit_p(ad7793_remove),
 	.id_table	= ad7793_id,
 };
-
-static int __init ad7793_init(void)
-{
-	return spi_register_driver(&ad7793_driver);
-}
-module_init(ad7793_init);
-
-static void __exit ad7793_exit(void)
-{
-	spi_unregister_driver(&ad7793_driver);
-}
-module_exit(ad7793_exit);
+module_spi_driver(ad7793_driver);
 
 MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
 MODULE_DESCRIPTION("Analog Devices AD7792/3 ADC");
diff --git a/drivers/staging/iio/adc/ad7816.c b/drivers/staging/iio/adc/ad7816.c
index bdb9049..acbf936 100644
--- a/drivers/staging/iio/adc/ad7816.c
+++ b/drivers/staging/iio/adc/ad7816.c
@@ -466,21 +466,9 @@
 	.remove = __devexit_p(ad7816_remove),
 	.id_table = ad7816_id,
 };
-
-static __init int ad7816_init(void)
-{
-	return spi_register_driver(&ad7816_driver);
-}
-
-static __exit void ad7816_exit(void)
-{
-	spi_unregister_driver(&ad7816_driver);
-}
+module_spi_driver(ad7816_driver);
 
 MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
 MODULE_DESCRIPTION("Analog Devices AD7816/7/8 digital"
 			" temperature sensor driver");
 MODULE_LICENSE("GPL v2");
-
-module_init(ad7816_init);
-module_exit(ad7816_exit);
diff --git a/drivers/staging/iio/adc/ad7887_core.c b/drivers/staging/iio/adc/ad7887_core.c
index 609dcd5..91b8fb0 100644
--- a/drivers/staging/iio/adc/ad7887_core.c
+++ b/drivers/staging/iio/adc/ad7887_core.c
@@ -257,18 +257,7 @@
 	.remove		= __devexit_p(ad7887_remove),
 	.id_table	= ad7887_id,
 };
-
-static int __init ad7887_init(void)
-{
-	return spi_register_driver(&ad7887_driver);
-}
-module_init(ad7887_init);
-
-static void __exit ad7887_exit(void)
-{
-	spi_unregister_driver(&ad7887_driver);
-}
-module_exit(ad7887_exit);
+module_spi_driver(ad7887_driver);
 
 MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
 MODULE_DESCRIPTION("Analog Devices AD7887 ADC");
diff --git a/drivers/staging/iio/adc/ad799x_core.c b/drivers/staging/iio/adc/ad799x_core.c
index ee6cd79..c0d2f88 100644
--- a/drivers/staging/iio/adc/ad799x_core.c
+++ b/drivers/staging/iio/adc/ad799x_core.c
@@ -929,21 +929,9 @@
 	.remove = __devexit_p(ad799x_remove),
 	.id_table = ad799x_id,
 };
-
-static __init int ad799x_init(void)
-{
-	return i2c_add_driver(&ad799x_driver);
-}
-
-static __exit void ad799x_exit(void)
-{
-	i2c_del_driver(&ad799x_driver);
-}
+module_i2c_driver(ad799x_driver);
 
 MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
 MODULE_DESCRIPTION("Analog Devices AD799x ADC");
 MODULE_LICENSE("GPL v2");
 MODULE_ALIAS("i2c:ad799x");
-
-module_init(ad799x_init);
-module_exit(ad799x_exit);
diff --git a/drivers/staging/iio/adc/adt7310.c b/drivers/staging/iio/adc/adt7310.c
index c9e0be3..bc307f3 100644
--- a/drivers/staging/iio/adc/adt7310.c
+++ b/drivers/staging/iio/adc/adt7310.c
@@ -884,21 +884,9 @@
 	.remove = __devexit_p(adt7310_remove),
 	.id_table = adt7310_id,
 };
-
-static __init int adt7310_init(void)
-{
-	return spi_register_driver(&adt7310_driver);
-}
-
-static __exit void adt7310_exit(void)
-{
-	spi_unregister_driver(&adt7310_driver);
-}
+module_spi_driver(adt7310_driver);
 
 MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
 MODULE_DESCRIPTION("Analog Devices ADT7310 digital"
 			" temperature sensor driver");
 MODULE_LICENSE("GPL v2");
-
-module_init(adt7310_init);
-module_exit(adt7310_exit);
diff --git a/drivers/staging/iio/adc/adt7410.c b/drivers/staging/iio/adc/adt7410.c
index a289e42..3481cf6 100644
--- a/drivers/staging/iio/adc/adt7410.c
+++ b/drivers/staging/iio/adc/adt7410.c
@@ -844,21 +844,9 @@
 	.remove = __devexit_p(adt7410_remove),
 	.id_table = adt7410_id,
 };
-
-static __init int adt7410_init(void)
-{
-	return i2c_add_driver(&adt7410_driver);
-}
-
-static __exit void adt7410_exit(void)
-{
-	i2c_del_driver(&adt7410_driver);
-}
+module_i2c_driver(adt7410_driver);
 
 MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
 MODULE_DESCRIPTION("Analog Devices ADT7410 digital"
 			" temperature sensor driver");
 MODULE_LICENSE("GPL v2");
-
-module_init(adt7410_init);
-module_exit(adt7410_exit);
diff --git a/drivers/staging/iio/adc/max1363_core.c b/drivers/staging/iio/adc/max1363_core.c
index eb699ad..3f28f1a 100644
--- a/drivers/staging/iio/adc/max1363_core.c
+++ b/drivers/staging/iio/adc/max1363_core.c
@@ -1410,20 +1410,8 @@
 	.remove = max1363_remove,
 	.id_table = max1363_id,
 };
-
-static __init int max1363_init(void)
-{
-	return i2c_add_driver(&max1363_driver);
-}
-
-static __exit void max1363_exit(void)
-{
-	i2c_del_driver(&max1363_driver);
-}
+module_i2c_driver(max1363_driver);
 
 MODULE_AUTHOR("Jonathan Cameron <jic23@cam.ac.uk>");
 MODULE_DESCRIPTION("Maxim 1363 ADC");
 MODULE_LICENSE("GPL v2");
-
-module_init(max1363_init);
-module_exit(max1363_exit);
diff --git a/drivers/staging/iio/addac/adt7316-i2c.c b/drivers/staging/iio/addac/adt7316-i2c.c
index 07d718e..2c03a39 100644
--- a/drivers/staging/iio/addac/adt7316-i2c.c
+++ b/drivers/staging/iio/addac/adt7316-i2c.c
@@ -151,21 +151,9 @@
 	.resume = adt7316_i2c_resume,
 	.id_table = adt7316_i2c_id,
 };
-
-static __init int adt7316_i2c_init(void)
-{
-	return i2c_add_driver(&adt7316_driver);
-}
-
-static __exit void adt7316_i2c_exit(void)
-{
-	i2c_del_driver(&adt7316_driver);
-}
+module_i2c_driver(adt7316_driver);
 
 MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
 MODULE_DESCRIPTION("I2C bus driver for Analog Devices ADT7316/7/9 and"
 			"ADT7516/7/8 digital temperature sensor, ADC and DAC");
 MODULE_LICENSE("GPL v2");
-
-module_init(adt7316_i2c_init);
-module_exit(adt7316_i2c_exit);
diff --git a/drivers/staging/iio/addac/adt7316-spi.c b/drivers/staging/iio/addac/adt7316-spi.c
index 369d4d0..1e93c7b 100644
--- a/drivers/staging/iio/addac/adt7316-spi.c
+++ b/drivers/staging/iio/addac/adt7316-spi.c
@@ -160,21 +160,9 @@
 	.resume = adt7316_spi_resume,
 	.id_table = adt7316_spi_id,
 };
-
-static __init int adt7316_spi_init(void)
-{
-	return spi_register_driver(&adt7316_driver);
-}
-
-static __exit void adt7316_spi_exit(void)
-{
-	spi_unregister_driver(&adt7316_driver);
-}
+module_spi_driver(adt7316_driver);
 
 MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
 MODULE_DESCRIPTION("SPI bus driver for Analog Devices ADT7316/7/8 and"
 			"ADT7516/7/9 digital temperature sensor, ADC and DAC");
 MODULE_LICENSE("GPL v2");
-
-module_init(adt7316_spi_init);
-module_exit(adt7316_spi_exit);
diff --git a/drivers/staging/iio/cdc/ad7150.c b/drivers/staging/iio/cdc/ad7150.c
index a761ca9..4718187 100644
--- a/drivers/staging/iio/cdc/ad7150.c
+++ b/drivers/staging/iio/cdc/ad7150.c
@@ -657,20 +657,8 @@
 	.remove = __devexit_p(ad7150_remove),
 	.id_table = ad7150_id,
 };
-
-static __init int ad7150_init(void)
-{
-	return i2c_add_driver(&ad7150_driver);
-}
-
-static __exit void ad7150_exit(void)
-{
-	i2c_del_driver(&ad7150_driver);
-}
+module_i2c_driver(ad7150_driver);
 
 MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
 MODULE_DESCRIPTION("Analog Devices AD7150/1/6 capacitive sensor driver");
 MODULE_LICENSE("GPL v2");
-
-module_init(ad7150_init);
-module_exit(ad7150_exit);
diff --git a/drivers/staging/iio/cdc/ad7152.c b/drivers/staging/iio/cdc/ad7152.c
index 662584d..152d3be 100644
--- a/drivers/staging/iio/cdc/ad7152.c
+++ b/drivers/staging/iio/cdc/ad7152.c
@@ -540,20 +540,8 @@
 	.remove = __devexit_p(ad7152_remove),
 	.id_table = ad7152_id,
 };
-
-static __init int ad7152_init(void)
-{
-	return i2c_add_driver(&ad7152_driver);
-}
-
-static __exit void ad7152_exit(void)
-{
-	i2c_del_driver(&ad7152_driver);
-}
+module_i2c_driver(ad7152_driver);
 
 MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
 MODULE_DESCRIPTION("Analog Devices AD7152/3 capacitive sensor driver");
 MODULE_LICENSE("GPL v2");
-
-module_init(ad7152_init);
-module_exit(ad7152_exit);
diff --git a/drivers/staging/iio/cdc/ad7746.c b/drivers/staging/iio/cdc/ad7746.c
index 2867943..9df5908 100644
--- a/drivers/staging/iio/cdc/ad7746.c
+++ b/drivers/staging/iio/cdc/ad7746.c
@@ -788,20 +788,8 @@
 	.remove = __devexit_p(ad7746_remove),
 	.id_table = ad7746_id,
 };
-
-static __init int ad7746_init(void)
-{
-	return i2c_add_driver(&ad7746_driver);
-}
-
-static __exit void ad7746_exit(void)
-{
-	i2c_del_driver(&ad7746_driver);
-}
+module_i2c_driver(ad7746_driver);
 
 MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
 MODULE_DESCRIPTION("Analog Devices AD7746/5/7 capacitive sensor driver");
 MODULE_LICENSE("GPL v2");
-
-module_init(ad7746_init);
-module_exit(ad7746_exit);
diff --git a/drivers/staging/iio/dac/ad5064.c b/drivers/staging/iio/dac/ad5064.c
index 24279f2..39cfe6c 100644
--- a/drivers/staging/iio/dac/ad5064.c
+++ b/drivers/staging/iio/dac/ad5064.c
@@ -445,18 +445,7 @@
 	.remove = __devexit_p(ad5064_remove),
 	.id_table = ad5064_id,
 };
-
-static __init int ad5064_spi_init(void)
-{
-	return spi_register_driver(&ad5064_driver);
-}
-module_init(ad5064_spi_init);
-
-static __exit void ad5064_spi_exit(void)
-{
-	spi_unregister_driver(&ad5064_driver);
-}
-module_exit(ad5064_spi_exit);
+module_spi_driver(ad5064_driver);
 
 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
 MODULE_DESCRIPTION("Analog Devices AD5064/64-1/44/24 DAC");
diff --git a/drivers/staging/iio/dac/ad5360.c b/drivers/staging/iio/dac/ad5360.c
index 72d0f3f..bc0459e 100644
--- a/drivers/staging/iio/dac/ad5360.c
+++ b/drivers/staging/iio/dac/ad5360.c
@@ -563,18 +563,7 @@
 	.remove = __devexit_p(ad5360_remove),
 	.id_table = ad5360_ids,
 };
-
-static __init int ad5360_spi_init(void)
-{
-	return spi_register_driver(&ad5360_driver);
-}
-module_init(ad5360_spi_init);
-
-static __exit void ad5360_spi_exit(void)
-{
-	spi_unregister_driver(&ad5360_driver);
-}
-module_exit(ad5360_spi_exit);
+module_spi_driver(ad5360_driver);
 
 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
 MODULE_DESCRIPTION("Analog Devices AD5360/61/62/63/70/71/72/73 DAC");
diff --git a/drivers/staging/iio/dac/ad5446.c b/drivers/staging/iio/dac/ad5446.c
index e1c204d..ec701e9 100644
--- a/drivers/staging/iio/dac/ad5446.c
+++ b/drivers/staging/iio/dac/ad5446.c
@@ -197,14 +197,14 @@
 	NULL,
 };
 
-static mode_t ad5446_attr_is_visible(struct kobject *kobj,
+static umode_t ad5446_attr_is_visible(struct kobject *kobj,
 				     struct attribute *attr, int n)
 {
 	struct device *dev = container_of(kobj, struct device, kobj);
 	struct iio_dev *indio_dev = dev_get_drvdata(dev);
 	struct ad5446_state *st = iio_priv(indio_dev);
 
-	mode_t mode = attr->mode;
+	umode_t mode = attr->mode;
 
 	if (!st->chip_info->store_pwr_down &&
 		(attr == &iio_dev_attr_out_voltage0_powerdown.dev_attr.attr ||
@@ -465,18 +465,7 @@
 	.remove		= __devexit_p(ad5446_remove),
 	.id_table	= ad5446_id,
 };
-
-static int __init ad5446_init(void)
-{
-	return spi_register_driver(&ad5446_driver);
-}
-module_init(ad5446_init);
-
-static void __exit ad5446_exit(void)
-{
-	spi_unregister_driver(&ad5446_driver);
-}
-module_exit(ad5446_exit);
+module_spi_driver(ad5446_driver);
 
 MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
 MODULE_DESCRIPTION("Analog Devices AD5444/AD5446 DAC");
diff --git a/drivers/staging/iio/dac/ad5504.c b/drivers/staging/iio/dac/ad5504.c
index 60dd640..57539ce 100644
--- a/drivers/staging/iio/dac/ad5504.c
+++ b/drivers/staging/iio/dac/ad5504.c
@@ -377,18 +377,7 @@
 	.remove = __devexit_p(ad5504_remove),
 	.id_table = ad5504_id,
 };
-
-static __init int ad5504_spi_init(void)
-{
-	return spi_register_driver(&ad5504_driver);
-}
-module_init(ad5504_spi_init);
-
-static __exit void ad5504_spi_exit(void)
-{
-	spi_unregister_driver(&ad5504_driver);
-}
-module_exit(ad5504_spi_exit);
+module_spi_driver(ad5504_driver);
 
 MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
 MODULE_DESCRIPTION("Analog Devices AD5501/AD5501 DAC");
diff --git a/drivers/staging/iio/dac/ad5624r_spi.c b/drivers/staging/iio/dac/ad5624r_spi.c
index 284d8790..6e05f0d 100644
--- a/drivers/staging/iio/dac/ad5624r_spi.c
+++ b/drivers/staging/iio/dac/ad5624r_spi.c
@@ -317,18 +317,7 @@
 	.remove = __devexit_p(ad5624r_remove),
 	.id_table = ad5624r_id,
 };
-
-static __init int ad5624r_spi_init(void)
-{
-	return spi_register_driver(&ad5624r_driver);
-}
-module_init(ad5624r_spi_init);
-
-static __exit void ad5624r_spi_exit(void)
-{
-	spi_unregister_driver(&ad5624r_driver);
-}
-module_exit(ad5624r_spi_exit);
+module_spi_driver(ad5624r_driver);
 
 MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
 MODULE_DESCRIPTION("Analog Devices AD5624/44/64R DAC spi driver");
diff --git a/drivers/staging/iio/dac/ad5686.c b/drivers/staging/iio/dac/ad5686.c
index 974c6f5..e72db2f 100644
--- a/drivers/staging/iio/dac/ad5686.c
+++ b/drivers/staging/iio/dac/ad5686.c
@@ -447,18 +447,7 @@
 	.remove = __devexit_p(ad5686_remove),
 	.id_table = ad5686_id,
 };
-
-static __init int ad5686_spi_init(void)
-{
-	return spi_register_driver(&ad5686_driver);
-}
-module_init(ad5686_spi_init);
-
-static __exit void ad5686_spi_exit(void)
-{
-	spi_unregister_driver(&ad5686_driver);
-}
-module_exit(ad5686_spi_exit);
+module_spi_driver(ad5686_driver);
 
 MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
 MODULE_DESCRIPTION("Analog Devices AD5686/85/84 DAC");
diff --git a/drivers/staging/iio/dac/ad5791.c b/drivers/staging/iio/dac/ad5791.c
index 6fbca8d..4a80fd8 100644
--- a/drivers/staging/iio/dac/ad5791.c
+++ b/drivers/staging/iio/dac/ad5791.c
@@ -410,18 +410,7 @@
 	.remove = __devexit_p(ad5791_remove),
 	.id_table = ad5791_id,
 };
-
-static __init int ad5791_spi_init(void)
-{
-	return spi_register_driver(&ad5791_driver);
-}
-module_init(ad5791_spi_init);
-
-static __exit void ad5791_spi_exit(void)
-{
-	spi_unregister_driver(&ad5791_driver);
-}
-module_exit(ad5791_spi_exit);
+module_spi_driver(ad5791_driver);
 
 MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
 MODULE_DESCRIPTION("Analog Devices AD5760/AD5780/AD5781/AD5791 DAC");
diff --git a/drivers/staging/iio/dac/max517.c b/drivers/staging/iio/dac/max517.c
index adfbd20..a4df6d7 100644
--- a/drivers/staging/iio/dac/max517.c
+++ b/drivers/staging/iio/dac/max517.c
@@ -280,20 +280,8 @@
 	.resume		= max517_resume,
 	.id_table	= max517_id,
 };
-
-static int __init max517_init(void)
-{
-	return i2c_add_driver(&max517_driver);
-}
-
-static void __exit max517_exit(void)
-{
-	i2c_del_driver(&max517_driver);
-}
+module_i2c_driver(max517_driver);
 
 MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
 MODULE_DESCRIPTION("MAX517/MAX518/MAX519 8-bit DAC");
 MODULE_LICENSE("GPL");
-
-module_init(max517_init);
-module_exit(max517_exit);
diff --git a/drivers/staging/iio/dds/ad5930.c b/drivers/staging/iio/dds/ad5930.c
index f5e368b..4a360d0 100644
--- a/drivers/staging/iio/dds/ad5930.c
+++ b/drivers/staging/iio/dds/ad5930.c
@@ -143,18 +143,7 @@
 	.probe = ad5930_probe,
 	.remove = __devexit_p(ad5930_remove),
 };
-
-static __init int ad5930_spi_init(void)
-{
-	return spi_register_driver(&ad5930_driver);
-}
-module_init(ad5930_spi_init);
-
-static __exit void ad5930_spi_exit(void)
-{
-	spi_unregister_driver(&ad5930_driver);
-}
-module_exit(ad5930_spi_exit);
+module_spi_driver(ad5930_driver);
 
 MODULE_AUTHOR("Cliff Cai");
 MODULE_DESCRIPTION("Analog Devices ad5930 driver");
diff --git a/drivers/staging/iio/dds/ad9832.c b/drivers/staging/iio/dds/ad9832.c
index 9b4ff60..cc32fd6 100644
--- a/drivers/staging/iio/dds/ad9832.c
+++ b/drivers/staging/iio/dds/ad9832.c
@@ -355,18 +355,7 @@
 	.remove		= __devexit_p(ad9832_remove),
 	.id_table	= ad9832_id,
 };
-
-static int __init ad9832_init(void)
-{
-	return spi_register_driver(&ad9832_driver);
-}
-module_init(ad9832_init);
-
-static void __exit ad9832_exit(void)
-{
-	spi_unregister_driver(&ad9832_driver);
-}
-module_exit(ad9832_exit);
+module_spi_driver(ad9832_driver);
 
 MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
 MODULE_DESCRIPTION("Analog Devices AD9832/AD9835 DDS");
diff --git a/drivers/staging/iio/dds/ad9834.c b/drivers/staging/iio/dds/ad9834.c
index c468f69..51fda6f 100644
--- a/drivers/staging/iio/dds/ad9834.c
+++ b/drivers/staging/iio/dds/ad9834.c
@@ -281,14 +281,14 @@
 	NULL,
 };
 
-static mode_t ad9834_attr_is_visible(struct kobject *kobj,
+static umode_t ad9834_attr_is_visible(struct kobject *kobj,
 				     struct attribute *attr, int n)
 {
 	struct device *dev = container_of(kobj, struct device, kobj);
 	struct iio_dev *indio_dev = dev_get_drvdata(dev);
 	struct ad9834_state *st = iio_priv(indio_dev);
 
-	mode_t mode = attr->mode;
+	umode_t mode = attr->mode;
 
 	if (((st->devid == ID_AD9833) || (st->devid == ID_AD9837)) &&
 		((attr == &iio_dev_attr_dds0_out1_enable.dev_attr.attr) ||
@@ -446,18 +446,7 @@
 	.remove		= __devexit_p(ad9834_remove),
 	.id_table	= ad9834_id,
 };
-
-static int __init ad9834_init(void)
-{
-	return spi_register_driver(&ad9834_driver);
-}
-module_init(ad9834_init);
-
-static void __exit ad9834_exit(void)
-{
-	spi_unregister_driver(&ad9834_driver);
-}
-module_exit(ad9834_exit);
+module_spi_driver(ad9834_driver);
 
 MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
 MODULE_DESCRIPTION("Analog Devices AD9833/AD9834/AD9837/AD9838 DDS");
diff --git a/drivers/staging/iio/dds/ad9850.c b/drivers/staging/iio/dds/ad9850.c
index a14771b..f9c96af 100644
--- a/drivers/staging/iio/dds/ad9850.c
+++ b/drivers/staging/iio/dds/ad9850.c
@@ -129,18 +129,7 @@
 	.probe = ad9850_probe,
 	.remove = __devexit_p(ad9850_remove),
 };
-
-static __init int ad9850_spi_init(void)
-{
-	return spi_register_driver(&ad9850_driver);
-}
-module_init(ad9850_spi_init);
-
-static __exit void ad9850_spi_exit(void)
-{
-	spi_unregister_driver(&ad9850_driver);
-}
-module_exit(ad9850_spi_exit);
+module_spi_driver(ad9850_driver);
 
 MODULE_AUTHOR("Cliff Cai");
 MODULE_DESCRIPTION("Analog Devices ad9850 driver");
diff --git a/drivers/staging/iio/dds/ad9852.c b/drivers/staging/iio/dds/ad9852.c
index cfceaa6..9fc73fd 100644
--- a/drivers/staging/iio/dds/ad9852.c
+++ b/drivers/staging/iio/dds/ad9852.c
@@ -280,18 +280,7 @@
 	.probe = ad9852_probe,
 	.remove = __devexit_p(ad9852_remove),
 };
-
-static __init int ad9852_spi_init(void)
-{
-	return spi_register_driver(&ad9852_driver);
-}
-module_init(ad9852_spi_init);
-
-static __exit void ad9852_spi_exit(void)
-{
-	spi_unregister_driver(&ad9852_driver);
-}
-module_exit(ad9852_spi_exit);
+module_spi_driver(ad9852_driver);
 
 MODULE_AUTHOR("Cliff Cai");
 MODULE_DESCRIPTION("Analog Devices ad9852 driver");
diff --git a/drivers/staging/iio/dds/ad9910.c b/drivers/staging/iio/dds/ad9910.c
index da83d2b..57046b0 100644
--- a/drivers/staging/iio/dds/ad9910.c
+++ b/drivers/staging/iio/dds/ad9910.c
@@ -413,18 +413,7 @@
 	.probe = ad9910_probe,
 	.remove = __devexit_p(ad9910_remove),
 };
-
-static __init int ad9910_spi_init(void)
-{
-	return spi_register_driver(&ad9910_driver);
-}
-module_init(ad9910_spi_init);
-
-static __exit void ad9910_spi_exit(void)
-{
-	spi_unregister_driver(&ad9910_driver);
-}
-module_exit(ad9910_spi_exit);
+module_spi_driver(ad9910_driver);
 
 MODULE_AUTHOR("Cliff Cai");
 MODULE_DESCRIPTION("Analog Devices ad9910 driver");
diff --git a/drivers/staging/iio/dds/ad9951.c b/drivers/staging/iio/dds/ad9951.c
index 20c1825..d29130e 100644
--- a/drivers/staging/iio/dds/ad9951.c
+++ b/drivers/staging/iio/dds/ad9951.c
@@ -224,18 +224,7 @@
 	.probe = ad9951_probe,
 	.remove = __devexit_p(ad9951_remove),
 };
-
-static __init int ad9951_spi_init(void)
-{
-	return spi_register_driver(&ad9951_driver);
-}
-module_init(ad9951_spi_init);
-
-static __exit void ad9951_spi_exit(void)
-{
-	spi_unregister_driver(&ad9951_driver);
-}
-module_exit(ad9951_spi_exit);
+module_spi_driver(ad9951_driver);
 
 MODULE_AUTHOR("Cliff Cai");
 MODULE_DESCRIPTION("Analog Devices ad9951 driver");
diff --git a/drivers/staging/iio/gyro/adis16080_core.c b/drivers/staging/iio/gyro/adis16080_core.c
index 5d7a906..9405f2d 100644
--- a/drivers/staging/iio/gyro/adis16080_core.c
+++ b/drivers/staging/iio/gyro/adis16080_core.c
@@ -189,18 +189,7 @@
 	.probe = adis16080_probe,
 	.remove = __devexit_p(adis16080_remove),
 };
-
-static __init int adis16080_init(void)
-{
-	return spi_register_driver(&adis16080_driver);
-}
-module_init(adis16080_init);
-
-static __exit void adis16080_exit(void)
-{
-	spi_unregister_driver(&adis16080_driver);
-}
-module_exit(adis16080_exit);
+module_spi_driver(adis16080_driver);
 
 MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
 MODULE_DESCRIPTION("Analog Devices ADIS16080/100 Yaw Rate Gyroscope Driver");
diff --git a/drivers/staging/iio/gyro/adis16130_core.c b/drivers/staging/iio/gyro/adis16130_core.c
index 749240d..c9aaca9 100644
--- a/drivers/staging/iio/gyro/adis16130_core.c
+++ b/drivers/staging/iio/gyro/adis16130_core.c
@@ -168,18 +168,7 @@
 	.probe = adis16130_probe,
 	.remove = __devexit_p(adis16130_remove),
 };
-
-static __init int adis16130_init(void)
-{
-	return spi_register_driver(&adis16130_driver);
-}
-module_init(adis16130_init);
-
-static __exit void adis16130_exit(void)
-{
-	spi_unregister_driver(&adis16130_driver);
-}
-module_exit(adis16130_exit);
+module_spi_driver(adis16130_driver);
 
 MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
 MODULE_DESCRIPTION("Analog Devices ADIS16130 High Precision Angular Rate");
diff --git a/drivers/staging/iio/gyro/adis16260_core.c b/drivers/staging/iio/gyro/adis16260_core.c
index aaa3967..886dddf 100644
--- a/drivers/staging/iio/gyro/adis16260_core.c
+++ b/drivers/staging/iio/gyro/adis16260_core.c
@@ -711,18 +711,7 @@
 	.remove = __devexit_p(adis16260_remove),
 	.id_table = adis16260_id,
 };
-
-static __init int adis16260_init(void)
-{
-	return spi_register_driver(&adis16260_driver);
-}
-module_init(adis16260_init);
-
-static __exit void adis16260_exit(void)
-{
-	spi_unregister_driver(&adis16260_driver);
-}
-module_exit(adis16260_exit);
+module_spi_driver(adis16260_driver);
 
 MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
 MODULE_DESCRIPTION("Analog Devices ADIS16260/5 Digital Gyroscope Sensor");
diff --git a/drivers/staging/iio/gyro/adxrs450_core.c b/drivers/staging/iio/gyro/adxrs450_core.c
index 3c3ef79..70fd468 100644
--- a/drivers/staging/iio/gyro/adxrs450_core.c
+++ b/drivers/staging/iio/gyro/adxrs450_core.c
@@ -381,18 +381,7 @@
 	.probe = adxrs450_probe,
 	.remove = __devexit_p(adxrs450_remove),
 };
-
-static __init int adxrs450_init(void)
-{
-	return spi_register_driver(&adxrs450_driver);
-}
-module_init(adxrs450_init);
-
-static __exit void adxrs450_exit(void)
-{
-	spi_unregister_driver(&adxrs450_driver);
-}
-module_exit(adxrs450_exit);
+module_spi_driver(adxrs450_driver);
 
 MODULE_AUTHOR("Cliff Cai <cliff.cai@xxxxxxxxxx>");
 MODULE_DESCRIPTION("Analog Devices ADXRS450 Gyroscope SPI driver");
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index 1086e0b..454d131 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -796,18 +796,7 @@
 	.remove = __devexit_p(ad5933_remove),
 	.id_table = ad5933_id,
 };
-
-static __init int ad5933_init(void)
-{
-	return i2c_add_driver(&ad5933_driver);
-}
-module_init(ad5933_init);
-
-static __exit void ad5933_exit(void)
-{
-	i2c_del_driver(&ad5933_driver);
-}
-module_exit(ad5933_exit);
+module_i2c_driver(ad5933_driver);
 
 MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
 MODULE_DESCRIPTION("Analog Devices AD5933 Impedance Conv. Network Analyzer");
diff --git a/drivers/staging/iio/imu/adis16400_core.c b/drivers/staging/iio/imu/adis16400_core.c
index d082a37..efc0f65 100644
--- a/drivers/staging/iio/imu/adis16400_core.c
+++ b/drivers/staging/iio/imu/adis16400_core.c
@@ -1128,18 +1128,7 @@
 	.probe = adis16400_probe,
 	.remove = __devexit_p(adis16400_remove),
 };
-
-static __init int adis16400_init(void)
-{
-	return spi_register_driver(&adis16400_driver);
-}
-module_init(adis16400_init);
-
-static __exit void adis16400_exit(void)
-{
-	spi_unregister_driver(&adis16400_driver);
-}
-module_exit(adis16400_exit);
+module_spi_driver(adis16400_driver);
 
 MODULE_AUTHOR("Manuel Stahl <manuel.stahl@iis.fraunhofer.de>");
 MODULE_DESCRIPTION("Analog Devices ADIS16400/5 IMU SPI driver");
diff --git a/drivers/staging/iio/industrialio-trigger.c b/drivers/staging/iio/industrialio-trigger.c
index 2c626e0..68a4d4e 100644
--- a/drivers/staging/iio/industrialio-trigger.c
+++ b/drivers/staging/iio/industrialio-trigger.c
@@ -295,7 +295,7 @@
 EXPORT_SYMBOL_GPL(iio_dealloc_pollfunc);
 
 /**
- * iio_trigger_read_currrent() - trigger consumer sysfs query which trigger
+ * iio_trigger_read_current() - trigger consumer sysfs query which trigger
  *
  * For trigger consumers the current_trigger interface allows the trigger
  * used by the device to be queried.
diff --git a/drivers/staging/iio/light/isl29018.c b/drivers/staging/iio/light/isl29018.c
index 9dc9e63..4763836 100644
--- a/drivers/staging/iio/light/isl29018.c
+++ b/drivers/staging/iio/light/isl29018.c
@@ -603,19 +603,7 @@
 	.remove	 = __devexit_p(isl29018_remove),
 	.id_table = isl29018_id,
 };
-
-static int __init isl29018_init(void)
-{
-	return i2c_add_driver(&isl29018_driver);
-}
-
-static void __exit isl29018_exit(void)
-{
-	i2c_del_driver(&isl29018_driver);
-}
-
-module_init(isl29018_init);
-module_exit(isl29018_exit);
+module_i2c_driver(isl29018_driver);
 
 MODULE_DESCRIPTION("ISL29018 Ambient Light Sensor driver");
 MODULE_LICENSE("GPL");
diff --git a/drivers/staging/iio/light/tsl2563.c b/drivers/staging/iio/light/tsl2563.c
index 7e984bc..1942db1 100644
--- a/drivers/staging/iio/light/tsl2563.c
+++ b/drivers/staging/iio/light/tsl2563.c
@@ -866,20 +866,8 @@
 	.remove		= __devexit_p(tsl2563_remove),
 	.id_table	= tsl2563_id,
 };
-
-static int __init tsl2563_init(void)
-{
-	return i2c_add_driver(&tsl2563_i2c_driver);
-}
-
-static void __exit tsl2563_exit(void)
-{
-	i2c_del_driver(&tsl2563_i2c_driver);
-}
+module_i2c_driver(tsl2563_i2c_driver);
 
 MODULE_AUTHOR("Nokia Corporation");
 MODULE_DESCRIPTION("tsl2563 light sensor driver");
 MODULE_LICENSE("GPL");
-
-module_init(tsl2563_init);
-module_exit(tsl2563_exit);
diff --git a/drivers/staging/iio/light/tsl2583.c b/drivers/staging/iio/light/tsl2583.c
index 80f77cf..3836f73 100644
--- a/drivers/staging/iio/light/tsl2583.c
+++ b/drivers/staging/iio/light/tsl2583.c
@@ -933,19 +933,7 @@
 	.probe = taos_probe,
 	.remove = __devexit_p(taos_remove),
 };
-
-static int __init taos_init(void)
-{
-	return i2c_add_driver(&taos_driver);
-}
-
-static void __exit taos_exit(void)
-{
-	i2c_del_driver(&taos_driver);
-}
-
-module_init(taos_init);
-module_exit(taos_exit);
+module_i2c_driver(taos_driver);
 
 MODULE_AUTHOR("J. August Brenner<jbrenner@taosinc.com>");
 MODULE_DESCRIPTION("TAOS tsl2583 ambient light sensor driver");
diff --git a/drivers/staging/iio/magnetometer/ak8975.c b/drivers/staging/iio/magnetometer/ak8975.c
index 8b01712..db31d6d 100644
--- a/drivers/staging/iio/magnetometer/ak8975.c
+++ b/drivers/staging/iio/magnetometer/ak8975.c
@@ -572,19 +572,7 @@
 	.remove		= __devexit_p(ak8975_remove),
 	.id_table	= ak8975_id,
 };
-
-static int __init ak8975_init(void)
-{
-	return i2c_add_driver(&ak8975_driver);
-}
-
-static void __exit ak8975_exit(void)
-{
-	i2c_del_driver(&ak8975_driver);
-}
-
-module_init(ak8975_init);
-module_exit(ak8975_exit);
+module_i2c_driver(ak8975_driver);
 
 MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
 MODULE_DESCRIPTION("AK8975 magnetometer driver");
diff --git a/drivers/staging/iio/magnetometer/hmc5843.c b/drivers/staging/iio/magnetometer/hmc5843.c
index fc9ee97..7bb1bc6 100644
--- a/drivers/staging/iio/magnetometer/hmc5843.c
+++ b/drivers/staging/iio/magnetometer/hmc5843.c
@@ -618,20 +618,8 @@
 	.suspend	= hmc5843_suspend,
 	.resume		= hmc5843_resume,
 };
-
-static int __init hmc5843_init(void)
-{
-	return i2c_add_driver(&hmc5843_driver);
-}
-
-static void __exit hmc5843_exit(void)
-{
-	i2c_del_driver(&hmc5843_driver);
-}
+module_i2c_driver(hmc5843_driver);
 
 MODULE_AUTHOR("Shubhrajyoti Datta <shubhrajyoti@ti.com");
 MODULE_DESCRIPTION("HMC5843 driver");
 MODULE_LICENSE("GPL");
-
-module_init(hmc5843_init);
-module_exit(hmc5843_exit);
diff --git a/drivers/staging/iio/meter/ade7753.c b/drivers/staging/iio/meter/ade7753.c
index 940fef6..4c7b0cb 100644
--- a/drivers/staging/iio/meter/ade7753.c
+++ b/drivers/staging/iio/meter/ade7753.c
@@ -577,18 +577,7 @@
 	.probe = ade7753_probe,
 	.remove = __devexit_p(ade7753_remove),
 };
-
-static __init int ade7753_init(void)
-{
-	return spi_register_driver(&ade7753_driver);
-}
-module_init(ade7753_init);
-
-static __exit void ade7753_exit(void)
-{
-	spi_unregister_driver(&ade7753_driver);
-}
-module_exit(ade7753_exit);
+module_spi_driver(ade7753_driver);
 
 MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
 MODULE_DESCRIPTION("Analog Devices ADE7753/6 Single-Phase Multifunction Meter");
diff --git a/drivers/staging/iio/meter/ade7754.c b/drivers/staging/iio/meter/ade7754.c
index 33f0d32..15c98cd 100644
--- a/drivers/staging/iio/meter/ade7754.c
+++ b/drivers/staging/iio/meter/ade7754.c
@@ -600,18 +600,7 @@
 	.probe = ade7754_probe,
 	.remove = __devexit_p(ade7754_remove),
 };
-
-static __init int ade7754_init(void)
-{
-	return spi_register_driver(&ade7754_driver);
-}
-module_init(ade7754_init);
-
-static __exit void ade7754_exit(void)
-{
-	spi_unregister_driver(&ade7754_driver);
-}
-module_exit(ade7754_exit);
+module_spi_driver(ade7754_driver);
 
 MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
 MODULE_DESCRIPTION("Analog Devices ADE7754 Polyphase Multifunction Energy Metering IC Driver");
diff --git a/drivers/staging/iio/meter/ade7758_core.c b/drivers/staging/iio/meter/ade7758_core.c
index c5dafbdf..39338bc 100644
--- a/drivers/staging/iio/meter/ade7758_core.c
+++ b/drivers/staging/iio/meter/ade7758_core.c
@@ -853,18 +853,7 @@
 	.remove = __devexit_p(ade7758_remove),
 	.id_table = ade7758_id,
 };
-
-static __init int ade7758_init(void)
-{
-	return spi_register_driver(&ade7758_driver);
-}
-module_init(ade7758_init);
-
-static __exit void ade7758_exit(void)
-{
-	spi_unregister_driver(&ade7758_driver);
-}
-module_exit(ade7758_exit);
+module_spi_driver(ade7758_driver);
 
 MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
 MODULE_DESCRIPTION("Analog Devices ADE7758 Polyphase Multifunction Energy Metering IC Driver");
diff --git a/drivers/staging/iio/meter/ade7759.c b/drivers/staging/iio/meter/ade7759.c
index b691f10..cfa2a5e 100644
--- a/drivers/staging/iio/meter/ade7759.c
+++ b/drivers/staging/iio/meter/ade7759.c
@@ -521,18 +521,7 @@
 	.probe = ade7759_probe,
 	.remove = __devexit_p(ade7759_remove),
 };
-
-static __init int ade7759_init(void)
-{
-	return spi_register_driver(&ade7759_driver);
-}
-module_init(ade7759_init);
-
-static __exit void ade7759_exit(void)
-{
-	spi_unregister_driver(&ade7759_driver);
-}
-module_exit(ade7759_exit);
+module_spi_driver(ade7759_driver);
 
 MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
 MODULE_DESCRIPTION("Analog Devices ADE7759 Active Energy Metering IC Driver");
diff --git a/drivers/staging/iio/meter/ade7854-i2c.c b/drivers/staging/iio/meter/ade7854-i2c.c
index cbca3fd..1e1faa0 100644
--- a/drivers/staging/iio/meter/ade7854-i2c.c
+++ b/drivers/staging/iio/meter/ade7854-i2c.c
@@ -253,19 +253,7 @@
 	.remove   = __devexit_p(ade7854_i2c_remove),
 	.id_table = ade7854_id,
 };
-
-static __init int ade7854_i2c_init(void)
-{
-	return i2c_add_driver(&ade7854_i2c_driver);
-}
-module_init(ade7854_i2c_init);
-
-static __exit void ade7854_i2c_exit(void)
-{
-	i2c_del_driver(&ade7854_i2c_driver);
-}
-module_exit(ade7854_i2c_exit);
-
+module_i2c_driver(ade7854_i2c_driver);
 
 MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
 MODULE_DESCRIPTION("Analog Devices ADE7854/58/68/78 Polyphase Multifunction Energy Metering IC I2C Driver");
diff --git a/drivers/staging/iio/meter/ade7854-spi.c b/drivers/staging/iio/meter/ade7854-spi.c
index cfa23ba..c485a79 100644
--- a/drivers/staging/iio/meter/ade7854-spi.c
+++ b/drivers/staging/iio/meter/ade7854-spi.c
@@ -353,18 +353,7 @@
 	.remove = __devexit_p(ade7854_spi_remove),
 	.id_table = ade7854_id,
 };
-
-static __init int ade7854_init(void)
-{
-	return spi_register_driver(&ade7854_driver);
-}
-module_init(ade7854_init);
-
-static __exit void ade7854_exit(void)
-{
-	spi_unregister_driver(&ade7854_driver);
-}
-module_exit(ade7854_exit);
+module_spi_driver(ade7854_driver);
 
 MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
 MODULE_DESCRIPTION("Analog Devices ADE7854/58/68/78 SPI Driver");
diff --git a/drivers/staging/iio/resolver/ad2s1200.c b/drivers/staging/iio/resolver/ad2s1200.c
index d7ad46a..1c6a02b 100644
--- a/drivers/staging/iio/resolver/ad2s1200.c
+++ b/drivers/staging/iio/resolver/ad2s1200.c
@@ -170,18 +170,7 @@
 	.remove = __devexit_p(ad2s1200_remove),
 	.id_table = ad2s1200_id,
 };
-
-static __init int ad2s1200_spi_init(void)
-{
-	return spi_register_driver(&ad2s1200_driver);
-}
-module_init(ad2s1200_spi_init);
-
-static __exit void ad2s1200_spi_exit(void)
-{
-	spi_unregister_driver(&ad2s1200_driver);
-}
-module_exit(ad2s1200_spi_exit);
+module_spi_driver(ad2s1200_driver);
 
 MODULE_AUTHOR("Graff Yang <graff.yang@gmail.com>");
 MODULE_DESCRIPTION("Analog Devices AD2S1200/1205 Resolver to Digital SPI driver");
diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c
index 6401a62..ff1b331 100644
--- a/drivers/staging/iio/resolver/ad2s1210.c
+++ b/drivers/staging/iio/resolver/ad2s1210.c
@@ -759,18 +759,7 @@
 	.remove = __devexit_p(ad2s1210_remove),
 	.id_table = ad2s1210_id,
 };
-
-static __init int ad2s1210_spi_init(void)
-{
-	return spi_register_driver(&ad2s1210_driver);
-}
-module_init(ad2s1210_spi_init);
-
-static __exit void ad2s1210_spi_exit(void)
-{
-	spi_unregister_driver(&ad2s1210_driver);
-}
-module_exit(ad2s1210_spi_exit);
+module_spi_driver(ad2s1210_driver);
 
 MODULE_AUTHOR("Graff Yang <graff.yang@gmail.com>");
 MODULE_DESCRIPTION("Analog Devices AD2S1210 Resolver to Digital SPI driver");
diff --git a/drivers/staging/iio/resolver/ad2s90.c b/drivers/staging/iio/resolver/ad2s90.c
index a9200d9..6d07943 100644
--- a/drivers/staging/iio/resolver/ad2s90.c
+++ b/drivers/staging/iio/resolver/ad2s90.c
@@ -119,18 +119,7 @@
 	.remove = __devexit_p(ad2s90_remove),
 	.id_table = ad2s90_id,
 };
-
-static __init int ad2s90_spi_init(void)
-{
-	return spi_register_driver(&ad2s90_driver);
-}
-module_init(ad2s90_spi_init);
-
-static __exit void ad2s90_spi_exit(void)
-{
-	spi_unregister_driver(&ad2s90_driver);
-}
-module_exit(ad2s90_spi_exit);
+module_spi_driver(ad2s90_driver);
 
 MODULE_AUTHOR("Graff Yang <graff.yang@gmail.com>");
 MODULE_DESCRIPTION("Analog Devices AD2S90 Resolver to Digital SPI driver");
diff --git a/drivers/staging/keucr/smilmain.c b/drivers/staging/keucr/smilmain.c
index 31f7813..cc49038 100644
--- a/drivers/staging/keucr/smilmain.c
+++ b/drivers/staging/keucr/smilmain.c
@@ -148,7 +148,7 @@
 {
 	WORD len, bn;
 
-	//if (Check_D_MediaPower())        ; ¦b 6250 don't care
+	//if (Check_D_MediaPower())        ; ¦b 6250 don't care
 	//    return(ErrCode);
 	//if (Check_D_MediaFmt(fdoExt))    ;
 	//    return(ErrCode);
@@ -594,7 +594,7 @@
 //    if (Check_D_CardStsChg())
 //        MediaChange = ERROR;
 //    //usleep(56*1024);
-//    if ((!Check_D_CntPower())&&(!MediaChange))  // ¦³ power & Media ¨S³Q change, «h return success
+//    if ((!Check_D_CntPower())&&(!MediaChange))  // ¦³ power & Media ¨S³Q change, «h return success
 //        return(SMSUCCESS);
 //    //usleep(56*1024);
 //
diff --git a/drivers/staging/keucr/usb.c b/drivers/staging/keucr/usb.c
index 66aad3a..4833034 100644
--- a/drivers/staging/keucr/usb.c
+++ b/drivers/staging/keucr/usb.c
@@ -701,26 +701,4 @@
 	.soft_unbind =	1,
 };
 
-//----- usb_stor_init() ---------------------
-static int __init usb_stor_init(void)
-{
-	int retval;
-	pr_info("usb --- usb_stor_init start\n");
-
-	retval = usb_register(&usb_storage_driver);
-	if (retval == 0)
-		pr_info("ENE USB Mass Storage support registered.\n");
-
-	return retval;
-}
-
-//----- usb_stor_exit() ---------------------
-static void __exit usb_stor_exit(void)
-{
-	pr_info("usb --- usb_stor_exit\n");
-
-	usb_deregister(&usb_storage_driver) ;
-}
-
-module_init(usb_stor_init);
-module_exit(usb_stor_exit);
+module_usb_driver(usb_storage_driver);
diff --git a/drivers/staging/media/go7007/go7007-usb.c b/drivers/staging/media/go7007/go7007-usb.c
index 3db3b0a..b7175fe 100644
--- a/drivers/staging/media/go7007/go7007-usb.c
+++ b/drivers/staging/media/go7007/go7007-usb.c
@@ -1272,17 +1272,4 @@
 	.id_table	= go7007_usb_id_table,
 };
 
-static int __init go7007_usb_init(void)
-{
-	return usb_register(&go7007_usb_driver);
-}
-
-static void __exit go7007_usb_cleanup(void)
-{
-	usb_deregister(&go7007_usb_driver);
-}
-
-module_init(go7007_usb_init);
-module_exit(go7007_usb_cleanup);
-
-MODULE_LICENSE("GPL v2");
+module_usb_driver(go7007_usb_driver);
diff --git a/drivers/staging/media/lirc/lirc_igorplugusb.c b/drivers/staging/media/lirc/lirc_igorplugusb.c
index 0dc2c2b..6cd4cd6 100644
--- a/drivers/staging/media/lirc/lirc_igorplugusb.c
+++ b/drivers/staging/media/lirc/lirc_igorplugusb.c
@@ -541,26 +541,7 @@
 	.id_table =	igorplugusb_remote_id_table
 };
 
-static int __init igorplugusb_remote_init(void)
-{
-	int ret = 0;
-
-	dprintk(DRIVER_NAME ": loaded, debug mode enabled\n");
-
-	ret = usb_register(&igorplugusb_remote_driver);
-	if (ret)
-		printk(KERN_ERR DRIVER_NAME ": usb register failed!\n");
-
-	return ret;
-}
-
-static void __exit igorplugusb_remote_exit(void)
-{
-	usb_deregister(&igorplugusb_remote_driver);
-}
-
-module_init(igorplugusb_remote_init);
-module_exit(igorplugusb_remote_exit);
+module_usb_driver(igorplugusb_remote_driver);
 
 #include <linux/vermagic.h>
 MODULE_INFO(vermagic, VERMAGIC_STRING);
diff --git a/drivers/staging/media/lirc/lirc_imon.c b/drivers/staging/media/lirc/lirc_imon.c
index f5308d5..f682180 100644
--- a/drivers/staging/media/lirc/lirc_imon.c
+++ b/drivers/staging/media/lirc/lirc_imon.c
@@ -1025,26 +1025,4 @@
 	return rc;
 }
 
-static int __init imon_init(void)
-{
-	int rc;
-
-	printk(KERN_INFO MOD_NAME ": " MOD_DESC ", v" MOD_VERSION "\n");
-
-	rc = usb_register(&imon_driver);
-	if (rc) {
-		err("%s: usb register failed(%d)", __func__, rc);
-		return -ENODEV;
-	}
-
-	return 0;
-}
-
-static void __exit imon_exit(void)
-{
-	usb_deregister(&imon_driver);
-	printk(KERN_INFO MOD_NAME ": module removed. Goodbye!\n");
-}
-
-module_init(imon_init);
-module_exit(imon_exit);
+module_usb_driver(imon_driver);
diff --git a/drivers/staging/media/lirc/lirc_sasem.c b/drivers/staging/media/lirc/lirc_sasem.c
index a2d18b0..7855baa 100644
--- a/drivers/staging/media/lirc/lirc_sasem.c
+++ b/drivers/staging/media/lirc/lirc_sasem.c
@@ -913,27 +913,4 @@
 	mutex_unlock(&disconnect_lock);
 }
 
-static int __init sasem_init(void)
-{
-	int rc;
-
-	printk(KERN_INFO MOD_DESC ", v" MOD_VERSION "\n");
-	printk(KERN_INFO MOD_AUTHOR "\n");
-
-	rc = usb_register(&sasem_driver);
-	if (rc < 0) {
-		err("%s: usb register failed (%d)", __func__, rc);
-		return -ENODEV;
-	}
-	return 0;
-}
-
-static void __exit sasem_exit(void)
-{
-	usb_deregister(&sasem_driver);
-	printk(KERN_INFO "module removed. Goodbye!\n");
-}
-
-
-module_init(sasem_init);
-module_exit(sasem_exit);
+module_usb_driver(sasem_driver);
diff --git a/drivers/staging/media/lirc/lirc_ttusbir.c b/drivers/staging/media/lirc/lirc_ttusbir.c
index e4b329b..7950887 100644
--- a/drivers/staging/media/lirc/lirc_ttusbir.c
+++ b/drivers/staging/media/lirc/lirc_ttusbir.c
@@ -372,24 +372,4 @@
 	kfree(ttusbir);
 }
 
-static int ttusbir_init_module(void)
-{
-	int result;
-
-	DPRINTK(KERN_DEBUG "Module ttusbir init\n");
-
-	/* register this driver with the USB subsystem */
-	result = usb_register(&usb_driver);
-	if (result)
-		err("usb_register failed. Error number %d", result);
-	return result;
-}
-
-static void ttusbir_exit_module(void)
-{
-	printk(KERN_DEBUG "Module ttusbir exit\n");
-	usb_deregister(&usb_driver);
-}
-
-module_init(ttusbir_init_module);
-module_exit(ttusbir_exit_module);
+module_usb_driver(usb_driver);
diff --git a/drivers/staging/pohmelfs/dir.c b/drivers/staging/pohmelfs/dir.c
index 7598e77..2ee4491 100644
--- a/drivers/staging/pohmelfs/dir.c
+++ b/drivers/staging/pohmelfs/dir.c
@@ -590,13 +590,13 @@
  * during writeback for given inode.
  */
 struct pohmelfs_inode *pohmelfs_create_entry_local(struct pohmelfs_sb *psb,
-	struct pohmelfs_inode *parent, struct qstr *str, u64 start, int mode)
+	struct pohmelfs_inode *parent, struct qstr *str, u64 start, umode_t mode)
 {
 	struct pohmelfs_inode *npi;
 	int err = -ENOMEM;
 	struct netfs_inode_info info;
 
-	dprintk("%s: name: '%s', mode: %o, start: %llu.\n",
+	dprintk("%s: name: '%s', mode: %ho, start: %llu.\n",
 			__func__, str->name, mode, start);
 
 	info.mode = mode;
@@ -630,7 +630,8 @@
 /*
  * Create local object and bind it to dentry.
  */
-static int pohmelfs_create_entry(struct inode *dir, struct dentry *dentry, u64 start, int mode)
+static int pohmelfs_create_entry(struct inode *dir, struct dentry *dentry,
+				 u64 start, umode_t mode)
 {
 	struct pohmelfs_sb *psb = POHMELFS_SB(dir->i_sb);
 	struct pohmelfs_inode *npi, *parent;
@@ -661,13 +662,13 @@
 /*
  * VFS create and mkdir callbacks.
  */
-static int pohmelfs_create(struct inode *dir, struct dentry *dentry, int mode,
+static int pohmelfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
 		struct nameidata *nd)
 {
 	return pohmelfs_create_entry(dir, dentry, 0, mode);
 }
 
-static int pohmelfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+static int pohmelfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 {
 	int err;
 
diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c
index 7a19555..807e3f3 100644
--- a/drivers/staging/pohmelfs/inode.c
+++ b/drivers/staging/pohmelfs/inode.c
@@ -830,7 +830,6 @@
 static void pohmelfs_i_callback(struct rcu_head *head)
 {
 	struct inode *inode = container_of(head, struct inode, i_rcu);
-	INIT_LIST_HEAD(&inode->i_dentry);
 	kmem_cache_free(pohmelfs_inode_cache, POHMELFS_I(inode));
 }
 
@@ -1370,9 +1369,9 @@
 	return 0;
 }
 
-static int pohmelfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
+static int pohmelfs_show_options(struct seq_file *seq, struct dentry *root)
 {
-	struct pohmelfs_sb *psb = POHMELFS_SB(vfs->mnt_sb);
+	struct pohmelfs_sb *psb = POHMELFS_SB(root->d_sb);
 
 	seq_printf(seq, ",idx=%u", psb->idx);
 	seq_printf(seq, ",trans_scan_timeout=%u", jiffies_to_msecs(psb->trans_scan_timeout));
@@ -1760,11 +1759,11 @@
 	return err;
 }
 
-static int pohmelfs_show_stats(struct seq_file *m, struct vfsmount *mnt)
+static int pohmelfs_show_stats(struct seq_file *m, struct dentry *root)
 {
 	struct netfs_state *st;
 	struct pohmelfs_ctl *ctl;
-	struct pohmelfs_sb *psb = POHMELFS_SB(mnt->mnt_sb);
+	struct pohmelfs_sb *psb = POHMELFS_SB(root->d_sb);
 	struct pohmelfs_config *c;
 
 	mutex_lock(&psb->state_lock);
diff --git a/drivers/staging/pohmelfs/netfs.h b/drivers/staging/pohmelfs/netfs.h
index 985b6b7..f26894f 100644
--- a/drivers/staging/pohmelfs/netfs.h
+++ b/drivers/staging/pohmelfs/netfs.h
@@ -776,7 +776,7 @@
 void pohmelfs_inode_del_inode(struct pohmelfs_sb *psb, struct pohmelfs_inode *pi);
 
 struct pohmelfs_inode *pohmelfs_create_entry_local(struct pohmelfs_sb *psb,
-	struct pohmelfs_inode *parent, struct qstr *str, u64 start, int mode);
+	struct pohmelfs_inode *parent, struct qstr *str, u64 start, umode_t mode);
 
 int pohmelfs_write_create_inode(struct pohmelfs_inode *pi);
 
diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
index fb2e89c..5385da2 100644
--- a/drivers/staging/rtl8712/usb_intf.c
+++ b/drivers/staging/rtl8712/usb_intf.c
@@ -89,6 +89,7 @@
 	{USB_DEVICE(0x0DF6, 0x0045)},
 	{USB_DEVICE(0x0DF6, 0x0059)}, /* 11n mode disable */
 	{USB_DEVICE(0x0DF6, 0x004B)},
+	{USB_DEVICE(0x0DF6, 0x005D)},
 	{USB_DEVICE(0x0DF6, 0x0063)},
 	/* Sweex */
 	{USB_DEVICE(0x177F, 0x0154)},
diff --git a/drivers/staging/rts5139/rts51x.c b/drivers/staging/rts5139/rts51x.c
index d9cee6d..2b9f785 100644
--- a/drivers/staging/rts5139/rts51x.c
+++ b/drivers/staging/rts5139/rts51x.c
@@ -934,34 +934,4 @@
 	.soft_unbind = 1,
 };
 
-static int __init rts51x_init(void)
-{
-	int retval;
-
-	printk(KERN_INFO "Initializing %s USB card reader driver...\n",
-	       RTS51X_NAME);
-
-	/* register the driver, return usb_register return code if error */
-	retval = usb_register(&rts51x_driver);
-	if (retval == 0) {
-		printk(KERN_INFO
-		       "Realtek %s USB card reader support registered.\n",
-		       RTS51X_NAME);
-	}
-	return retval;
-}
-
-static void __exit rts51x_exit(void)
-{
-	RTS51X_DEBUGP("rts51x_exit() called\n");
-
-	/* Deregister the driver
-	 * This will cause disconnect() to be called for each
-	 * attached unit
-	 */
-	RTS51X_DEBUGP("-- calling usb_deregister()\n");
-	usb_deregister(&rts51x_driver);
-}
-
-module_init(rts51x_init);
-module_exit(rts51x_exit);
+module_usb_driver(rts51x_driver);
diff --git a/drivers/staging/rts_pstor/rtsx.c b/drivers/staging/rts_pstor/rtsx.c
index 115635f..a7feb3e 100644
--- a/drivers/staging/rts_pstor/rtsx.c
+++ b/drivers/staging/rts_pstor/rtsx.c
@@ -466,8 +466,6 @@
 	struct rtsx_chip *chip = dev->chip;
 	struct Scsi_Host *host = rtsx_to_host(dev);
 
-	current->flags |= PF_NOFREEZE;
-
 	for (;;) {
 		if (wait_for_completion_interruptible(&dev->cmnd_ready))
 			break;
diff --git a/drivers/staging/sep/sep_driver.c b/drivers/staging/sep/sep_driver.c
index 8ac3fae..f47571e 100644
--- a/drivers/staging/sep/sep_driver.c
+++ b/drivers/staging/sep/sep_driver.c
@@ -1235,7 +1235,7 @@
 	/* Counter of lli array entry */
 	u32 array_counter;
 
-	/* Init currrent table data size and lli array entry counter */
+	/* Init current table data size and lli array entry counter */
 	curr_table_data_size = 0;
 	array_counter = 0;
 	*num_table_entries_ptr = 1;
diff --git a/drivers/staging/tidspbridge/Kconfig b/drivers/staging/tidspbridge/Kconfig
index 93de4f2..21a559e 100644
--- a/drivers/staging/tidspbridge/Kconfig
+++ b/drivers/staging/tidspbridge/Kconfig
@@ -78,7 +78,7 @@
 	bool "Notify power errors"
 	depends on TIDSPBRIDGE
 	help
-	  Enable notifications to registered clients on the event of power errror
+	  Enable notifications to registered clients on the event of power error
 	  trying to suspend bridge driver. Say Y, to signal this event as a fatal
 	  error, this will require a bridge restart to recover.
 
diff --git a/drivers/staging/tidspbridge/core/dsp-clock.c b/drivers/staging/tidspbridge/core/dsp-clock.c
index 3d1279c..7eb5617 100644
--- a/drivers/staging/tidspbridge/core/dsp-clock.c
+++ b/drivers/staging/tidspbridge/core/dsp-clock.c
@@ -54,6 +54,7 @@
 
 /* Bridge GPT id (1 - 4), DM Timer id (5 - 8) */
 #define DMT_ID(id) ((id) + 4)
+#define DM_TIMER_CLOCKS		4
 
 /* Bridge MCBSP id (6 - 10), OMAP Mcbsp id (0 - 4) */
 #define MCBSP_ID(id) ((id) - 6)
@@ -114,8 +115,13 @@
  */
 void dsp_clk_exit(void)
 {
+	int i;
+
 	dsp_clock_disable_all(dsp_clocks);
 
+	for (i = 0; i < DM_TIMER_CLOCKS; i++)
+		omap_dm_timer_free(timer[i]);
+
 	clk_put(iva2_clk);
 	clk_put(ssi.sst_fck);
 	clk_put(ssi.ssr_fck);
@@ -130,9 +136,13 @@
 void dsp_clk_init(void)
 {
 	static struct platform_device dspbridge_device;
+	int i, id;
 
 	dspbridge_device.dev.bus = &platform_bus_type;
 
+	for (i = 0, id = 5; i < DM_TIMER_CLOCKS; i++, id++)
+		timer[i] = omap_dm_timer_request_specific(id);
+
 	iva2_clk = clk_get(&dspbridge_device.dev, "iva2_ck");
 	if (IS_ERR(iva2_clk))
 		dev_err(bridge, "failed to get iva2 clock %p\n", iva2_clk);
@@ -204,8 +214,7 @@
 		clk_enable(iva2_clk);
 		break;
 	case GPT_CLK:
-		timer[clk_id - 1] =
-				omap_dm_timer_request_specific(DMT_ID(clk_id));
+		status = omap_dm_timer_start(timer[clk_id - 1]);
 		break;
 #ifdef CONFIG_OMAP_MCBSP
 	case MCBSP_CLK:
@@ -281,7 +290,7 @@
 		clk_disable(iva2_clk);
 		break;
 	case GPT_CLK:
-		omap_dm_timer_free(timer[clk_id - 1]);
+		status = omap_dm_timer_stop(timer[clk_id - 1]);
 		break;
 #ifdef CONFIG_OMAP_MCBSP
 	case MCBSP_CLK:
diff --git a/drivers/staging/tidspbridge/rmgr/dbdcd.c b/drivers/staging/tidspbridge/rmgr/dbdcd.c
index a7e407e..fda2402 100644
--- a/drivers/staging/tidspbridge/rmgr/dbdcd.c
+++ b/drivers/staging/tidspbridge/rmgr/dbdcd.c
@@ -285,7 +285,7 @@
 			enum_refs = 0;
 
 			/*
-			 * TODO: Revisit, this is not an errror case but code
+			 * TODO: Revisit, this is not an error case but code
 			 * expects non-zero value.
 			 */
 			status = ENODATA;
diff --git a/drivers/staging/tidspbridge/rmgr/drv_interface.c b/drivers/staging/tidspbridge/rmgr/drv_interface.c
index c43c7e3..76cfc6e 100644
--- a/drivers/staging/tidspbridge/rmgr/drv_interface.c
+++ b/drivers/staging/tidspbridge/rmgr/drv_interface.c
@@ -24,11 +24,7 @@
 #include <linux/types.h>
 #include <linux/platform_device.h>
 #include <linux/pm.h>
-
-#ifdef MODULE
 #include <linux/module.h>
-#endif
-
 #include <linux/device.h>
 #include <linux/init.h>
 #include <linux/moduleparam.h>
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index 27521b6..541f9aa 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -2103,16 +2103,4 @@
 #endif /* CONFIG_PM */
 };
 
-static int __init vt6656_init_module(void)
-{
-    printk(KERN_NOTICE DEVICE_FULL_DRV_NAM " " DEVICE_VERSION);
-    return usb_register(&vt6656_driver);
-}
-
-static void __exit vt6656_cleanup_module(void)
-{
-	usb_deregister(&vt6656_driver);
-}
-
-module_init(vt6656_init_module);
-module_exit(vt6656_cleanup_module);
+module_usb_driver(vt6656_driver);
diff --git a/drivers/staging/winbond/wbusb.c b/drivers/staging/winbond/wbusb.c
index f958eb4..c3751a7 100644
--- a/drivers/staging/winbond/wbusb.c
+++ b/drivers/staging/winbond/wbusb.c
@@ -865,15 +865,4 @@
 	.disconnect	= wb35_disconnect,
 };
 
-static int __init wb35_init(void)
-{
-	return usb_register(&wb35_driver);
-}
-
-static void __exit wb35_exit(void)
-{
-	usb_deregister(&wb35_driver);
-}
-
-module_init(wb35_init);
-module_exit(wb35_exit);
+module_usb_driver(wb35_driver);
diff --git a/drivers/staging/wlags49_h2/debug.h b/drivers/staging/wlags49_h2/debug.h
index 8d5dddf..811698f 100644
--- a/drivers/staging/wlags49_h2/debug.h
+++ b/drivers/staging/wlags49_h2/debug.h
@@ -43,7 +43,7 @@
  *
  * Disclaimer
  *
- * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
  * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  ANY
  * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/dhfcfg.h b/drivers/staging/wlags49_h2/dhfcfg.h
index 75c279f..147f4c8 100644
--- a/drivers/staging/wlags49_h2/dhfcfg.h
+++ b/drivers/staging/wlags49_h2/dhfcfg.h
@@ -43,7 +43,7 @@
  *
  * Disclaimer
  *
- * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
  * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  ANY
  * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/hcf.c b/drivers/staging/wlags49_h2/hcf.c
index 7dc176a..b0087733 100644
--- a/drivers/staging/wlags49_h2/hcf.c
+++ b/drivers/staging/wlags49_h2/hcf.c
@@ -32,9 +32,9 @@
  * software indicates your acceptance of these terms and conditions.  If you do
  * not agree with these terms and conditions, do not use the software.
  *
- * COPYRIGHT © 1994 - 1995   by AT&T.                All Rights Reserved
- * COPYRIGHT © 1996 - 2000 by Lucent Technologies.   All Rights Reserved
- * COPYRIGHT © 2001 - 2004   by Agere Systems Inc.   All Rights Reserved
+ * COPYRIGHT © 1994 - 1995   by AT&T.                All Rights Reserved
+ * COPYRIGHT © 1996 - 2000 by Lucent Technologies.   All Rights Reserved
+ * COPYRIGHT © 2001 - 2004   by Agere Systems Inc.   All Rights Reserved
  * All rights reserved.
  *
  * Redistribution and use in source or binary forms, with or without
diff --git a/drivers/staging/wlags49_h2/hcf.h b/drivers/staging/wlags49_h2/hcf.h
index 0009947..95527b5 100644
--- a/drivers/staging/wlags49_h2/hcf.h
+++ b/drivers/staging/wlags49_h2/hcf.h
@@ -40,9 +40,9 @@
 * software indicates your acceptance of these terms and conditions.  If you do
 * not agree with these terms and conditions, do not use the software.
 *
-* COPYRIGHT © 1994 - 1995	by AT&T.				All Rights Reserved
-* COPYRIGHT © 1996 - 2000 by Lucent Technologies.	All Rights Reserved
-* COPYRIGHT © 2001 - 2004	by Agere Systems Inc.	All Rights Reserved
+* COPYRIGHT © 1994 - 1995	by AT&T.				All Rights Reserved
+* COPYRIGHT © 1996 - 2000 by Lucent Technologies.	All Rights Reserved
+* COPYRIGHT © 2001 - 2004	by Agere Systems Inc.	All Rights Reserved
 * All rights reserved.
 *
 * Redistribution and use in source or binary forms, with or without
diff --git a/drivers/staging/wlags49_h2/hcfcfg.h b/drivers/staging/wlags49_h2/hcfcfg.h
index 7545bc5..ef60da8 100644
--- a/drivers/staging/wlags49_h2/hcfcfg.h
+++ b/drivers/staging/wlags49_h2/hcfcfg.h
@@ -64,9 +64,9 @@
 * software indicates your acceptance of these terms and conditions.  If you do
 * not agree with these terms and conditions, do not use the software.
 *
-* COPYRIGHT © 1994 - 1995	by AT&T.				All Rights Reserved
-* COPYRIGHT © 1996 - 2000 by Lucent Technologies.	All Rights Reserved
-* COPYRIGHT © 2001 - 2004	by Agere Systems Inc.	All Rights Reserved
+* COPYRIGHT © 1994 - 1995	by AT&T.				All Rights Reserved
+* COPYRIGHT © 1996 - 2000 by Lucent Technologies.	All Rights Reserved
+* COPYRIGHT © 2001 - 2004	by Agere Systems Inc.	All Rights Reserved
 * All rights reserved.
 *
 * Redistribution and use in source or binary forms, with or without
diff --git a/drivers/staging/wlags49_h2/hcfdef.h b/drivers/staging/wlags49_h2/hcfdef.h
index a62b53a..30744e1 100644
--- a/drivers/staging/wlags49_h2/hcfdef.h
+++ b/drivers/staging/wlags49_h2/hcfdef.h
@@ -33,9 +33,9 @@
  * software indicates your acceptance of these terms and conditions.  If you do
  * not agree with these terms and conditions, do not use the software.
  *
- * COPYRIGHT © 1994 - 1995   by AT&T.                All Rights Reserved
- * COPYRIGHT © 1996 - 2000 by Lucent Technologies.   All Rights Reserved
- * COPYRIGHT © 2001 - 2004   by Agere Systems Inc.   All Rights Reserved
+ * COPYRIGHT © 1994 - 1995   by AT&T.                All Rights Reserved
+ * COPYRIGHT © 1996 - 2000 by Lucent Technologies.   All Rights Reserved
+ * COPYRIGHT © 2001 - 2004   by Agere Systems Inc.   All Rights Reserved
  * All rights reserved.
  *
  * Redistribution and use in source or binary forms, with or without
diff --git a/drivers/staging/wlags49_h2/mdd.h b/drivers/staging/wlags49_h2/mdd.h
index b02e3ea..5f951ef 100644
--- a/drivers/staging/wlags49_h2/mdd.h
+++ b/drivers/staging/wlags49_h2/mdd.h
@@ -33,9 +33,9 @@
 * software indicates your acceptance of these terms and conditions.  If you do
 * not agree with these terms and conditions, do not use the software.
 *
-* COPYRIGHT © 1994 - 1995	by AT&T.				All Rights Reserved
-* COPYRIGHT © 1996 - 2000 by Lucent Technologies.	All Rights Reserved
-* COPYRIGHT © 2001 - 2004	by Agere Systems Inc.	All Rights Reserved
+* COPYRIGHT © 1994 - 1995	by AT&T.				All Rights Reserved
+* COPYRIGHT © 1996 - 2000 by Lucent Technologies.	All Rights Reserved
+* COPYRIGHT © 2001 - 2004	by Agere Systems Inc.	All Rights Reserved
 * All rights reserved.
 *
 * Redistribution and use in source or binary forms, with or without
diff --git a/drivers/staging/wlags49_h2/mmd.c b/drivers/staging/wlags49_h2/mmd.c
index de138c4..c8f5210 100644
--- a/drivers/staging/wlags49_h2/mmd.c
+++ b/drivers/staging/wlags49_h2/mmd.c
@@ -35,7 +35,7 @@
 * software indicates your acceptance of these terms and conditions.  If you do
 * not agree with these terms and conditions, do not use the software.
 *
-* COPYRIGHT © 2001 - 2004	by Agere Systems Inc.	All Rights Reserved
+* COPYRIGHT © 2001 - 2004	by Agere Systems Inc.	All Rights Reserved
 * All rights reserved.
 *
 * Redistribution and use in source or binary forms, with or without
diff --git a/drivers/staging/wlags49_h2/mmd.h b/drivers/staging/wlags49_h2/mmd.h
index 06890c1..9149525 100644
--- a/drivers/staging/wlags49_h2/mmd.h
+++ b/drivers/staging/wlags49_h2/mmd.h
@@ -33,7 +33,7 @@
 * software indicates your acceptance of these terms and conditions.  If you do
 * not agree with these terms and conditions, do not use the software.
 *
-* COPYRIGHT © 2001 - 2004	by Agere Systems Inc.	All Rights Reserved
+* COPYRIGHT © 2001 - 2004	by Agere Systems Inc.	All Rights Reserved
 * All rights reserved.
 *
 * Redistribution and use in source or binary forms, with or without
diff --git a/drivers/staging/wlags49_h2/wl_cs.h b/drivers/staging/wlags49_h2/wl_cs.h
index 21f17be..a7ab579 100644
--- a/drivers/staging/wlags49_h2/wl_cs.h
+++ b/drivers/staging/wlags49_h2/wl_cs.h
@@ -22,7 +22,7 @@
  * software indicates your acceptance of these terms and conditions.  If you do
  * not agree with these terms and conditions, do not use the software.
  *
- * Copyright © 2003 Agere Systems Inc.
+ * Copyright © 2003 Agere Systems Inc.
  * All rights reserved.
  *
  * Redistribution and use in source or binary forms, with or without
@@ -43,7 +43,7 @@
  *
  * Disclaimer
  *
- * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
  * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  ANY
  * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_enc.c b/drivers/staging/wlags49_h2/wl_enc.c
index 26cf548..4c6f776 100644
--- a/drivers/staging/wlags49_h2/wl_enc.c
+++ b/drivers/staging/wlags49_h2/wl_enc.c
@@ -23,7 +23,7 @@
  * software indicates your acceptance of these terms and conditions.  If you do
  * not agree with these terms and conditions, do not use the software.
  *
- * Copyright © 2003 Agere Systems Inc.
+ * Copyright © 2003 Agere Systems Inc.
  * All rights reserved.
  *
  * Redistribution and use in source or binary forms, with or without
@@ -44,7 +44,7 @@
  *
  * Disclaimer
  *
- * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
  * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  ANY
  * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_enc.h b/drivers/staging/wlags49_h2/wl_enc.h
index b4f54d8..46629f3 100644
--- a/drivers/staging/wlags49_h2/wl_enc.h
+++ b/drivers/staging/wlags49_h2/wl_enc.h
@@ -22,7 +22,7 @@
  * software indicates your acceptance of these terms and conditions.  If you do
  * not agree with these terms and conditions, do not use the software.
  *
- * Copyright © 2003 Agere Systems Inc.
+ * Copyright © 2003 Agere Systems Inc.
  * All rights reserved.
  *
  * Redistribution and use in source or binary forms, with or without
@@ -43,7 +43,7 @@
  *
  * Disclaimer
  *
- * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
  * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  ANY
  * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_if.h b/drivers/staging/wlags49_h2/wl_if.h
index ed2b413..6a26130 100644
--- a/drivers/staging/wlags49_h2/wl_if.h
+++ b/drivers/staging/wlags49_h2/wl_if.h
@@ -23,7 +23,7 @@
  * software indicates your acceptance of these terms and conditions.  If you do
  * not agree with these terms and conditions, do not use the software.
  *
- * Copyright © 2003 Agere Systems Inc.
+ * Copyright © 2003 Agere Systems Inc.
  * All rights reserved.
  *
  * Redistribution and use in source or binary forms, with or without
@@ -44,7 +44,7 @@
  *
  * Disclaimer
  *
- * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
  * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  ANY
  * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_internal.h b/drivers/staging/wlags49_h2/wl_internal.h
index 5753408..553601f 100644
--- a/drivers/staging/wlags49_h2/wl_internal.h
+++ b/drivers/staging/wlags49_h2/wl_internal.h
@@ -22,7 +22,7 @@
  * software indicates your acceptance of these terms and conditions.  If you do
  * not agree with these terms and conditions, do not use the software.
  *
- * Copyright © 2003 Agere Systems Inc.
+ * Copyright © 2003 Agere Systems Inc.
  * All rights reserved.
  *
  * Redistribution and use in source or binary forms, with or without
@@ -43,7 +43,7 @@
  *
  * Disclaimer
  *
- * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
  * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  ANY
  * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_main.c b/drivers/staging/wlags49_h2/wl_main.c
index 483eee1..dab603e 100644
--- a/drivers/staging/wlags49_h2/wl_main.c
+++ b/drivers/staging/wlags49_h2/wl_main.c
@@ -23,7 +23,7 @@
  * software indicates your acceptance of these terms and conditions.  If you do
  * not agree with these terms and conditions, do not use the software.
  *
- * Copyright © 2003 Agere Systems Inc.
+ * Copyright © 2003 Agere Systems Inc.
  * All rights reserved.
  *
  * Redistribution and use in source or binary forms, with or without
@@ -44,7 +44,7 @@
  *
  * Disclaimer
  *
- * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
  * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  ANY
  * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_main.h b/drivers/staging/wlags49_h2/wl_main.h
index d593ae5..3b5acdf 100644
--- a/drivers/staging/wlags49_h2/wl_main.h
+++ b/drivers/staging/wlags49_h2/wl_main.h
@@ -22,7 +22,7 @@
  * software indicates your acceptance of these terms and conditions.  If you do
  * not agree with these terms and conditions, do not use the software.
  *
- * Copyright © 2003 Agere Systems Inc.
+ * Copyright © 2003 Agere Systems Inc.
  * All rights reserved.
  *
  * Redistribution and use in source or binary forms, with or without
@@ -43,7 +43,7 @@
  *
  * Disclaimer
  *
- * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
  * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  ANY
  * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_netdev.c b/drivers/staging/wlags49_h2/wl_netdev.c
index 5a2b334..9c16f54 100644
--- a/drivers/staging/wlags49_h2/wl_netdev.c
+++ b/drivers/staging/wlags49_h2/wl_netdev.c
@@ -23,7 +23,7 @@
  * software indicates your acceptance of these terms and conditions.  If you do
  * not agree with these terms and conditions, do not use the software.
  *
- * Copyright © 2003 Agere Systems Inc.
+ * Copyright © 2003 Agere Systems Inc.
  * All rights reserved.
  *
  * Redistribution and use in source or binary forms, with or without
@@ -44,7 +44,7 @@
  *
  * Disclaimer
  *
- * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
  * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  ANY
  * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_netdev.h b/drivers/staging/wlags49_h2/wl_netdev.h
index 632ab2e..61f040f 100644
--- a/drivers/staging/wlags49_h2/wl_netdev.h
+++ b/drivers/staging/wlags49_h2/wl_netdev.h
@@ -23,7 +23,7 @@
  * software indicates your acceptance of these terms and conditions.  If you do
  * not agree with these terms and conditions, do not use the software.
  *
- * Copyright © 2003 Agere Systems Inc.
+ * Copyright © 2003 Agere Systems Inc.
  * All rights reserved.
  *
  * Redistribution and use in source or binary forms, with or without
@@ -44,7 +44,7 @@
  *
  * Disclaimer
  *
- * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
  * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  ANY
  * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_pci.c b/drivers/staging/wlags49_h2/wl_pci.c
index 28ae9dd..1f1d986 100644
--- a/drivers/staging/wlags49_h2/wl_pci.c
+++ b/drivers/staging/wlags49_h2/wl_pci.c
@@ -23,7 +23,7 @@
  * software indicates your acceptance of these terms and conditions.  If you do
  * not agree with these terms and conditions, do not use the software.
  *
- * Copyright © 2003 Agere Systems Inc.
+ * Copyright © 2003 Agere Systems Inc.
  * All rights reserved.
  *
  * Redistribution and use in source or binary forms, with or without
@@ -44,7 +44,7 @@
  *
  * Disclaimer
  *
- * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
  * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  ANY
  * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_pci.h b/drivers/staging/wlags49_h2/wl_pci.h
index cea04c4..86831f1 100644
--- a/drivers/staging/wlags49_h2/wl_pci.h
+++ b/drivers/staging/wlags49_h2/wl_pci.h
@@ -22,7 +22,7 @@
  * software indicates your acceptance of these terms and conditions.  If you do
  * not agree with these terms and conditions, do not use the software.
  *
- * Copyright © 2003 Agere Systems Inc.
+ * Copyright © 2003 Agere Systems Inc.
  * All rights reserved.
  *
  * Redistribution and use in source or binary forms, with or without
@@ -43,7 +43,7 @@
  *
  * Disclaimer
  *
- * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
  * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  ANY
  * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_priv.c b/drivers/staging/wlags49_h2/wl_priv.c
index 260d4f0..f30e5ee 100644
--- a/drivers/staging/wlags49_h2/wl_priv.c
+++ b/drivers/staging/wlags49_h2/wl_priv.c
@@ -22,7 +22,7 @@
  * software indicates your acceptance of these terms and conditions.  If you do
  * not agree with these terms and conditions, do not use the software.
  *
- * Copyright © 2003 Agere Systems Inc.
+ * Copyright © 2003 Agere Systems Inc.
  * All rights reserved.
  *
  * Redistribution and use in source or binary forms, with or without
@@ -43,7 +43,7 @@
  *
  * Disclaimer
  *
- * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
  * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  ANY
  * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_priv.h b/drivers/staging/wlags49_h2/wl_priv.h
index 9b02544..b647bfd 100644
--- a/drivers/staging/wlags49_h2/wl_priv.h
+++ b/drivers/staging/wlags49_h2/wl_priv.h
@@ -22,7 +22,7 @@
  * software indicates your acceptance of these terms and conditions.  If you do
  * not agree with these terms and conditions, do not use the software.
  *
- * Copyright © 2003 Agere Systems Inc.
+ * Copyright © 2003 Agere Systems Inc.
  * All rights reserved.
  *
  * Redistribution and use in source or binary forms, with or without
@@ -43,7 +43,7 @@
  *
  * Disclaimer
  *
- * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
  * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  ANY
  * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_profile.c b/drivers/staging/wlags49_h2/wl_profile.c
index a459e48..b8c96cf 100644
--- a/drivers/staging/wlags49_h2/wl_profile.c
+++ b/drivers/staging/wlags49_h2/wl_profile.c
@@ -23,7 +23,7 @@
  * software indicates your acceptance of these terms and conditions.  If you do
  * not agree with these terms and conditions, do not use the software.
  *
- * Copyright © 2003 Agere Systems Inc.
+ * Copyright © 2003 Agere Systems Inc.
  * All rights reserved.
  *
  * Redistribution and use in source or binary forms, with or without
@@ -44,7 +44,7 @@
  *
  * Disclaimer
  *
- * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
  * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  ANY
  * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_profile.h b/drivers/staging/wlags49_h2/wl_profile.h
index 81db8e8..f81df51 100644
--- a/drivers/staging/wlags49_h2/wl_profile.h
+++ b/drivers/staging/wlags49_h2/wl_profile.h
@@ -22,7 +22,7 @@
  * software indicates your acceptance of these terms and conditions.  If you do
  * not agree with these terms and conditions, do not use the software.
  *
- * Copyright © 2003 Agere Systems Inc.
+ * Copyright © 2003 Agere Systems Inc.
  * All rights reserved.
  *
  * Redistribution and use in source or binary forms, with or without
@@ -43,7 +43,7 @@
  *
  * Disclaimer
  *
- * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
  * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  ANY
  * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_util.c b/drivers/staging/wlags49_h2/wl_util.c
index 3b6f5a5..b748a3f 100644
--- a/drivers/staging/wlags49_h2/wl_util.c
+++ b/drivers/staging/wlags49_h2/wl_util.c
@@ -22,7 +22,7 @@
  * software indicates your acceptance of these terms and conditions.  If you do
  * not agree with these terms and conditions, do not use the software.
  *
- * Copyright © 2003 Agere Systems Inc.
+ * Copyright © 2003 Agere Systems Inc.
  * All rights reserved.
  *
  * Redistribution and use in source or binary forms, with or without
@@ -43,7 +43,7 @@
  *
  * Disclaimer
  *
- * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
  * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  ANY
  * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_util.h b/drivers/staging/wlags49_h2/wl_util.h
index 2661bcd..946b1b6 100644
--- a/drivers/staging/wlags49_h2/wl_util.h
+++ b/drivers/staging/wlags49_h2/wl_util.h
@@ -23,7 +23,7 @@
  * software indicates your acceptance of these terms and conditions.  If you do
  * not agree with these terms and conditions, do not use the software.
  *
- * Copyright © 2003 Agere Systems Inc.
+ * Copyright © 2003 Agere Systems Inc.
  * All rights reserved.
  *
  * Redistribution and use in source or binary forms, with or without
@@ -44,7 +44,7 @@
  *
  * Disclaimer
  *
- * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
  * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  ANY
  * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_version.h b/drivers/staging/wlags49_h2/wl_version.h
index fd37040..3deacfa 100644
--- a/drivers/staging/wlags49_h2/wl_version.h
+++ b/drivers/staging/wlags49_h2/wl_version.h
@@ -23,7 +23,7 @@
  * software indicates your acceptance of these terms and conditions.  If you do
  * not agree with these terms and conditions, do not use the software.
  *
- * Copyright © 2003 Agere Systems Inc.
+ * Copyright © 2003 Agere Systems Inc.
  * All rights reserved.
  *
  * Redistribution and use in source or binary forms, with or without
@@ -44,7 +44,7 @@
  *
  * Disclaimer
  *
- * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
  * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  ANY
  * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_wext.c b/drivers/staging/wlags49_h2/wl_wext.c
index 8ac5e10..7ff0a10 100644
--- a/drivers/staging/wlags49_h2/wl_wext.c
+++ b/drivers/staging/wlags49_h2/wl_wext.c
@@ -18,7 +18,7 @@
  * software indicates your acceptance of these terms and conditions.  If you do
  * not agree with these terms and conditions, do not use the software.
  *
- * Copyright © 2003 Agere Systems Inc.
+ * Copyright © 2003 Agere Systems Inc.
  * All rights reserved.
  *
  * Redistribution and use in source or binary forms, with or without
@@ -39,7 +39,7 @@
  *
  * Disclaimer
  *
- * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
  * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  ANY
  * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_wext.h b/drivers/staging/wlags49_h2/wl_wext.h
index a713058..029da52 100644
--- a/drivers/staging/wlags49_h2/wl_wext.h
+++ b/drivers/staging/wlags49_h2/wl_wext.h
@@ -22,7 +22,7 @@
  * software indicates your acceptance of these terms and conditions.  If you do
  * not agree with these terms and conditions, do not use the software.
  *
- * Copyright © 2003 Agere Systems Inc.
+ * Copyright © 2003 Agere Systems Inc.
  * All rights reserved.
  *
  * Redistribution and use in source or binary forms, with or without
@@ -43,7 +43,7 @@
  *
  * Disclaimer
  *
- * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
  * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  ANY
  * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlan-ng/prism2usb.c b/drivers/staging/wlan-ng/prism2usb.c
index 4efa027..b1aed1f 100644
--- a/drivers/staging/wlan-ng/prism2usb.c
+++ b/drivers/staging/wlan-ng/prism2usb.c
@@ -358,16 +358,4 @@
 	/* fops, minor? */
 };
 
-static int __init prism2usb_init(void)
-{
-	/* This call will result in calls to prism2sta_probe_usb. */
-	return usb_register(&prism2_usb_driver);
-};
-
-static void __exit prism2usb_cleanup(void)
-{
-	usb_deregister(&prism2_usb_driver);
-};
-
-module_init(prism2usb_init);
-module_exit(prism2usb_cleanup);
+module_usb_driver(prism2_usb_driver);
diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
index cea5603..a09ce3e 100644
--- a/drivers/tty/n_hdlc.c
+++ b/drivers/tty/n_hdlc.c
@@ -417,7 +417,7 @@
 				__FILE__,__LINE__,tbuf,tbuf->count);
 			
 		/* Send the next block of data to device */
-		tty->flags |= (1 << TTY_DO_WRITE_WAKEUP);
+		set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
 		actual = tty->ops->write(tty, tbuf->buf, tbuf->count);
 
 		/* rollback was possible and has been done */
@@ -459,7 +459,7 @@
 	}
 	
 	if (!tbuf)
-		tty->flags  &= ~(1 << TTY_DO_WRITE_WAKEUP);
+		clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
 	
 	/* Clear the re-entry flag */
 	spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock, flags);
@@ -491,7 +491,7 @@
 		return;
 
 	if (tty != n_hdlc->tty) {
-		tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP);
+		clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
 		return;
 	}
 
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 39d6ab6..d2256d0 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -61,7 +61,7 @@
  * controlling the space in the read buffer.
  */
 #define TTY_THRESHOLD_THROTTLE		128 /* now based on remaining room */
-#define TTY_THRESHOLD_UNTHROTTLE 	128
+#define TTY_THRESHOLD_UNTHROTTLE	128
 
 /*
  * Special byte codes used in the echo buffer to represent operations
@@ -405,7 +405,7 @@
 				    const unsigned char *buf, unsigned int nr)
 {
 	int	space;
-	int 	i;
+	int	i;
 	const unsigned char *cp;
 
 	mutex_lock(&tty->output_lock);
@@ -1607,7 +1607,7 @@
 }
 
 /**
- * 	copy_from_read_buf	-	copy read data directly
+ *	copy_from_read_buf	-	copy read data directly
  *	@tty: terminal device
  *	@b: user data
  *	@nr: size of data
@@ -1909,7 +1909,7 @@
 		if (nr)
 			clear_bit(TTY_PUSH, &tty->flags);
 	} else if (test_and_clear_bit(TTY_PUSH, &tty->flags))
-		 goto do_it_again;
+		goto do_it_again;
 
 	n_tty_set_room(tty);
 	return retval;
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index e18604b..d8653ab 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -446,19 +446,8 @@
 int pty_limit = NR_UNIX98_PTY_DEFAULT;
 static int pty_limit_min;
 static int pty_limit_max = NR_UNIX98_PTY_MAX;
-static int tty_count;
 static int pty_count;
 
-static inline void pty_inc_count(void)
-{
-	pty_count = (++tty_count) / 2;
-}
-
-static inline void pty_dec_count(void)
-{
-	pty_count = (--tty_count) / 2;
-}
-
 static struct cdev ptmx_cdev;
 
 static struct ctl_table pty_table[] = {
@@ -600,8 +589,7 @@
 	 */
 	tty_driver_kref_get(driver);
 	tty->count++;
-	pty_inc_count(); /* tty */
-	pty_inc_count(); /* tty->link */
+	pty_count++;
 	return 0;
 err_free_mem:
 	deinitialize_tty_struct(o_tty);
@@ -613,15 +601,19 @@
 	return -ENOMEM;
 }
 
-static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty)
+static void ptm_unix98_remove(struct tty_driver *driver, struct tty_struct *tty)
 {
-	pty_dec_count();
+	pty_count--;
+}
+
+static void pts_unix98_remove(struct tty_driver *driver, struct tty_struct *tty)
+{
 }
 
 static const struct tty_operations ptm_unix98_ops = {
 	.lookup = ptm_unix98_lookup,
 	.install = pty_unix98_install,
-	.remove = pty_unix98_remove,
+	.remove = ptm_unix98_remove,
 	.open = pty_open,
 	.close = pty_close,
 	.write = pty_write,
@@ -638,7 +630,7 @@
 static const struct tty_operations pty_unix98_ops = {
 	.lookup = pts_unix98_lookup,
 	.install = pty_unix98_install,
-	.remove = pty_unix98_remove,
+	.remove = pts_unix98_remove,
 	.open = pty_open,
 	.close = pty_close,
 	.write = pty_write,
diff --git a/drivers/tty/serial/8250.c b/drivers/tty/serial/8250.c
index eeadf1b..9f50c4e 100644
--- a/drivers/tty/serial/8250.c
+++ b/drivers/tty/serial/8250.c
@@ -129,32 +129,6 @@
 static unsigned int probe_rsa_count;
 #endif /* CONFIG_SERIAL_8250_RSA  */
 
-struct uart_8250_port {
-	struct uart_port	port;
-	struct timer_list	timer;		/* "no irq" timer */
-	struct list_head	list;		/* ports on this IRQ */
-	unsigned short		capabilities;	/* port capabilities */
-	unsigned short		bugs;		/* port bugs */
-	unsigned int		tx_loadsz;	/* transmit fifo load size */
-	unsigned char		acr;
-	unsigned char		ier;
-	unsigned char		lcr;
-	unsigned char		mcr;
-	unsigned char		mcr_mask;	/* mask of user bits */
-	unsigned char		mcr_force;	/* mask of forced bits */
-	unsigned char		cur_iotype;	/* Running I/O type */
-
-	/*
-	 * Some bits in registers are cleared on a read, so they must
-	 * be saved whenever the register is read but the bits will not
-	 * be immediately processed.
-	 */
-#define LSR_SAVE_FLAGS UART_LSR_BRK_ERROR_BITS
-	unsigned char		lsr_saved_flags;
-#define MSR_SAVE_FLAGS UART_MSR_ANY_DELTA
-	unsigned char		msr_saved_flags;
-};
-
 struct irq_info {
 	struct			hlist_node node;
 	int			irq;
@@ -1326,8 +1300,6 @@
 	}
 }
 
-static void transmit_chars(struct uart_8250_port *up);
-
 static void serial8250_start_tx(struct uart_port *port)
 {
 	struct uart_8250_port *up =
@@ -1344,7 +1316,7 @@
 			if ((up->port.type == PORT_RM9000) ?
 				(lsr & UART_LSR_THRE) :
 				(lsr & UART_LSR_TEMT))
-				transmit_chars(up);
+				serial8250_tx_chars(up);
 		}
 	}
 
@@ -1401,11 +1373,16 @@
 	} while (1);
 }
 
-static void
-receive_chars(struct uart_8250_port *up, unsigned int *status)
+/*
+ * serial8250_rx_chars: processes according to the passed in LSR
+ * value, and returns the remaining LSR bits not handled
+ * by this Rx routine.
+ */
+unsigned char
+serial8250_rx_chars(struct uart_8250_port *up, unsigned char lsr)
 {
 	struct tty_struct *tty = up->port.state->port.tty;
-	unsigned char ch, lsr = *status;
+	unsigned char ch;
 	int max_count = 256;
 	char flag;
 
@@ -1481,10 +1458,11 @@
 	spin_unlock(&up->port.lock);
 	tty_flip_buffer_push(tty);
 	spin_lock(&up->port.lock);
-	*status = lsr;
+	return lsr;
 }
+EXPORT_SYMBOL_GPL(serial8250_rx_chars);
 
-static void transmit_chars(struct uart_8250_port *up)
+void serial8250_tx_chars(struct uart_8250_port *up)
 {
 	struct circ_buf *xmit = &up->port.state->xmit;
 	int count;
@@ -1521,8 +1499,9 @@
 	if (uart_circ_empty(xmit))
 		__stop_tx(up);
 }
+EXPORT_SYMBOL_GPL(serial8250_tx_chars);
 
-static unsigned int check_modem_status(struct uart_8250_port *up)
+unsigned int serial8250_modem_status(struct uart_8250_port *up)
 {
 	unsigned int status = serial_in(up, UART_MSR);
 
@@ -1544,14 +1523,20 @@
 
 	return status;
 }
+EXPORT_SYMBOL_GPL(serial8250_modem_status);
 
 /*
  * This handles the interrupt from one port.
  */
-static void serial8250_handle_port(struct uart_8250_port *up)
+int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
 {
-	unsigned int status;
+	unsigned char status;
 	unsigned long flags;
+	struct uart_8250_port *up =
+		container_of(port, struct uart_8250_port, port);
+
+	if (iir & UART_IIR_NO_INT)
+		return 0;
 
 	spin_lock_irqsave(&up->port.lock, flags);
 
@@ -1560,25 +1545,13 @@
 	DEBUG_INTR("status = %x...", status);
 
 	if (status & (UART_LSR_DR | UART_LSR_BI))
-		receive_chars(up, &status);
-	check_modem_status(up);
+		status = serial8250_rx_chars(up, status);
+	serial8250_modem_status(up);
 	if (status & UART_LSR_THRE)
-		transmit_chars(up);
+		serial8250_tx_chars(up);
 
 	spin_unlock_irqrestore(&up->port.lock, flags);
-}
-
-int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
-{
-	struct uart_8250_port *up =
-		container_of(port, struct uart_8250_port, port);
-
-	if (!(iir & UART_IIR_NO_INT)) {
-		serial8250_handle_port(up);
-		return 1;
-	}
-
-	return 0;
+	return 1;
 }
 EXPORT_SYMBOL_GPL(serial8250_handle_irq);
 
@@ -1619,11 +1592,13 @@
 	do {
 		struct uart_8250_port *up;
 		struct uart_port *port;
+		bool skip;
 
 		up = list_entry(l, struct uart_8250_port, list);
 		port = &up->port;
+		skip = pass_counter && up->port.flags & UPF_IIR_ONCE;
 
-		if (port->handle_irq(port)) {
+		if (!skip && port->handle_irq(port)) {
 			handled = 1;
 			end = NULL;
 		} else if (end == NULL)
@@ -1758,11 +1733,8 @@
 static void serial8250_timeout(unsigned long data)
 {
 	struct uart_8250_port *up = (struct uart_8250_port *)data;
-	unsigned int iir;
 
-	iir = serial_in(up, UART_IIR);
-	if (!(iir & UART_IIR_NO_INT))
-		serial8250_handle_port(up);
+	up->port.handle_irq(&up->port);
 	mod_timer(&up->timer, jiffies + uart_poll_timeout(&up->port));
 }
 
@@ -1801,7 +1773,7 @@
 	}
 
 	if (!(iir & UART_IIR_NO_INT))
-		transmit_chars(up);
+		serial8250_tx_chars(up);
 
 	if (is_real_interrupt(up->port.irq))
 		serial_out(up, UART_IER, ier);
@@ -1835,7 +1807,7 @@
 	unsigned int status;
 	unsigned int ret;
 
-	status = check_modem_status(up);
+	status = serial8250_modem_status(up);
 
 	ret = 0;
 	if (status & UART_MSR_DCD)
@@ -2000,7 +1972,7 @@
 		serial_outp(up, UART_IER, 0);
 		serial_outp(up, UART_LCR, 0);
 		serial_icr_write(up, UART_CSR, 0); /* Reset the UART */
-		serial_outp(up, UART_LCR, 0xBF);
+		serial_outp(up, UART_LCR, UART_LCR_CONF_MODE_B);
 		serial_outp(up, UART_EFR, UART_EFR_ECB);
 		serial_outp(up, UART_LCR, 0);
 	}
@@ -2848,7 +2820,7 @@
 
 	local_irq_save(flags);
 	if (up->port.sysrq) {
-		/* serial8250_handle_port() already took the lock */
+		/* serial8250_handle_irq() already took the lock */
 		locked = 0;
 	} else if (oops_in_progress) {
 		locked = spin_trylock(&up->port.lock);
@@ -2882,7 +2854,7 @@
 	 *	while processing with interrupts off.
 	 */
 	if (up->msr_saved_flags)
-		check_modem_status(up);
+		serial8250_modem_status(up);
 
 	if (locked)
 		spin_unlock(&up->port.lock);
diff --git a/drivers/tty/serial/8250.h b/drivers/tty/serial/8250.h
index 6edf4a6..ae027be 100644
--- a/drivers/tty/serial/8250.h
+++ b/drivers/tty/serial/8250.h
@@ -13,6 +13,32 @@
 
 #include <linux/serial_8250.h>
 
+struct uart_8250_port {
+	struct uart_port	port;
+	struct timer_list	timer;		/* "no irq" timer */
+	struct list_head	list;		/* ports on this IRQ */
+	unsigned short		capabilities;	/* port capabilities */
+	unsigned short		bugs;		/* port bugs */
+	unsigned int		tx_loadsz;	/* transmit fifo load size */
+	unsigned char		acr;
+	unsigned char		ier;
+	unsigned char		lcr;
+	unsigned char		mcr;
+	unsigned char		mcr_mask;	/* mask of user bits */
+	unsigned char		mcr_force;	/* mask of forced bits */
+	unsigned char		cur_iotype;	/* Running I/O type */
+
+	/*
+	 * Some bits in registers are cleared on a read, so they must
+	 * be saved whenever the register is read but the bits will not
+	 * be immediately processed.
+	 */
+#define LSR_SAVE_FLAGS UART_LSR_BRK_ERROR_BITS
+	unsigned char		lsr_saved_flags;
+#define MSR_SAVE_FLAGS UART_MSR_ANY_DELTA
+	unsigned char		msr_saved_flags;
+};
+
 struct old_serial_port {
 	unsigned int uart;
 	unsigned int baud_base;
diff --git a/drivers/tty/serial/8250_dw.c b/drivers/tty/serial/8250_dw.c
index bf1fba6..f574eef 100644
--- a/drivers/tty/serial/8250_dw.c
+++ b/drivers/tty/serial/8250_dw.c
@@ -177,17 +177,7 @@
 	.remove			= __devexit_p(dw8250_remove),
 };
 
-static int __init dw8250_init(void)
-{
-	return platform_driver_register(&dw8250_platform_driver);
-}
-module_init(dw8250_init);
-
-static void __exit dw8250_exit(void)
-{
-	platform_driver_unregister(&dw8250_platform_driver);
-}
-module_exit(dw8250_exit);
+module_platform_driver(dw8250_platform_driver);
 
 MODULE_AUTHOR("Jamie Iles");
 MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/8250_fsl.c b/drivers/tty/serial/8250_fsl.c
new file mode 100644
index 0000000..f4d3c47
--- /dev/null
+++ b/drivers/tty/serial/8250_fsl.c
@@ -0,0 +1,63 @@
+#include <linux/serial_reg.h>
+#include <linux/serial_8250.h>
+
+#include "8250.h"
+
+/*
+ * Freescale 16550 UART "driver", Copyright (C) 2011 Paul Gortmaker.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This isn't a full driver; it just provides an alternate IRQ
+ * handler to deal with an errata.  Everything else is just
+ * using the bog standard 8250 support.
+ *
+ * We follow code flow of serial8250_default_handle_irq() but add
+ * a check for a break and insert a dummy read on the Rx for the
+ * immediately following IRQ event.
+ *
+ * We re-use the already existing "bug handling" lsr_saved_flags
+ * field to carry the "what we just did" information from the one
+ * IRQ event to the next one.
+ */
+
+int fsl8250_handle_irq(struct uart_port *port)
+{
+	unsigned char lsr, orig_lsr;
+	unsigned long flags;
+	unsigned int iir;
+	struct uart_8250_port *up =
+		container_of(port, struct uart_8250_port, port);
+
+	spin_lock_irqsave(&up->port.lock, flags);
+
+	iir = port->serial_in(port, UART_IIR);
+	if (iir & UART_IIR_NO_INT) {
+		spin_unlock_irqrestore(&up->port.lock, flags);
+		return 0;
+	}
+
+	/* This is the WAR; if last event was BRK, then read and return */
+	if (unlikely(up->lsr_saved_flags & UART_LSR_BI)) {
+		up->lsr_saved_flags &= ~UART_LSR_BI;
+		port->serial_in(port, UART_RX);
+		spin_unlock_irqrestore(&up->port.lock, flags);
+		return 1;
+	}
+
+	lsr = orig_lsr = up->port.serial_in(&up->port, UART_LSR);
+
+	if (lsr & (UART_LSR_DR | UART_LSR_BI))
+		lsr = serial8250_rx_chars(up, lsr);
+
+	serial8250_modem_status(up);
+
+	if (lsr & UART_LSR_THRE)
+		serial8250_tx_chars(up);
+
+	up->lsr_saved_flags = orig_lsr;
+	spin_unlock_irqrestore(&up->port.lock, flags);
+	return 1;
+}
diff --git a/drivers/tty/serial/8250_pci.c b/drivers/tty/serial/8250_pci.c
index 825937a..da2b0b0 100644
--- a/drivers/tty/serial/8250_pci.c
+++ b/drivers/tty/serial/8250_pci.c
@@ -1092,6 +1092,14 @@
 	return pci_default_setup(priv, board, port, idx);
 }
 
+static int kt_serial_setup(struct serial_private *priv,
+			   const struct pciserial_board *board,
+			   struct uart_port *port, int idx)
+{
+	port->flags |= UPF_IIR_ONCE;
+	return skip_tx_en_setup(priv, board, port, idx);
+}
+
 static int pci_eg20t_init(struct pci_dev *dev)
 {
 #if defined(CONFIG_SERIAL_PCH_UART) || defined(CONFIG_SERIAL_PCH_UART_MODULE)
@@ -1110,7 +1118,18 @@
 	return pci_default_setup(priv, board, port, idx);
 }
 
-/* This should be in linux/pci_ids.h */
+static int try_enable_msi(struct pci_dev *dev)
+{
+	/* use msi if available, but fallback to legacy otherwise */
+	pci_enable_msi(dev);
+	return 0;
+}
+
+static void disable_msi(struct pci_dev *dev)
+{
+	pci_disable_msi(dev);
+}
+
 #define PCI_VENDOR_ID_SBSMODULARIO	0x124B
 #define PCI_SUBVENDOR_ID_SBSMODULARIO	0x124B
 #define PCI_DEVICE_ID_OCTPRO		0x0001
@@ -1133,9 +1152,14 @@
 #define PCI_DEVICE_ID_TITAN_800E	0xA014
 #define PCI_DEVICE_ID_TITAN_200EI	0xA016
 #define PCI_DEVICE_ID_TITAN_200EISI	0xA017
+#define PCI_DEVICE_ID_TITAN_400V3	0xA310
+#define PCI_DEVICE_ID_TITAN_410V3	0xA312
+#define PCI_DEVICE_ID_TITAN_800V3	0xA314
+#define PCI_DEVICE_ID_TITAN_800V3B	0xA315
 #define PCI_DEVICE_ID_OXSEMI_16PCI958	0x9538
 #define PCIE_DEVICE_ID_NEO_2_OX_IBM	0x00F6
 #define PCI_DEVICE_ID_PLX_CRONYX_OMEGA	0xc001
+#define PCI_DEVICE_ID_INTEL_PATSBURG_KT 0x1d3d
 
 /* Unknown vendors/cards - this should not be in linux/pci_ids.h */
 #define PCI_SUBDEVICE_ID_UNKNOWN_0x1584	0x1584
@@ -1220,6 +1244,15 @@
 		.subdevice	= PCI_ANY_ID,
 		.setup		= ce4100_serial_setup,
 	},
+	{
+		.vendor		= PCI_VENDOR_ID_INTEL,
+		.device		= PCI_DEVICE_ID_INTEL_PATSBURG_KT,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+		.init		= try_enable_msi,
+		.setup		= kt_serial_setup,
+		.exit		= disable_msi,
+	},
 	/*
 	 * ITE
 	 */
@@ -3414,6 +3447,18 @@
 	{	PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_200EISI,
 		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
 		pbn_oxsemi_2_4000000 },
+	{	PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_400V3,
+		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		pbn_b0_4_921600 },
+	{	PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_410V3,
+		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		pbn_b0_4_921600 },
+	{	PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_800V3,
+		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		pbn_b0_4_921600 },
+	{	PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_800V3B,
+		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		pbn_b0_4_921600 },
 
 	{	PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_1S_10x_550,
 		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 925a1e5..113fccf 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -97,6 +97,11 @@
 	  This builds standard PNP serial support. You may be able to
 	  disable this feature if you only need legacy serial support.
 
+config SERIAL_8250_FSL
+	bool
+	depends on SERIAL_8250_CONSOLE && PPC_UDBG_16550
+	default PPC
+
 config SERIAL_8250_HP300
 	tristate
 	depends on SERIAL_8250 && HP300
@@ -535,6 +540,27 @@
 	help
 	  Serial port support for Samsung's S5P Family of SoC's
 
+config SERIAL_SIRFSOC
+        tristate "SiRF SoC Platform Serial port support"
+        depends on ARM && ARCH_PRIMA2
+        select SERIAL_CORE
+        help
+          Support for the on-chip UART on the CSR SiRFprimaII series,
+          providing /dev/ttySiRF0, 1 and 2 (note, some machines may not
+          provide all of these ports, depending on how the serial port
+          pins are configured).
+
+config SERIAL_SIRFSOC_CONSOLE
+        bool "Support for console on SiRF SoC serial port"
+        depends on SERIAL_SIRFSOC=y
+        select SERIAL_CORE_CONSOLE
+        help
+          Even if you say Y here, the currently visible virtual console
+          (/dev/tty0) will still be used as the system console by default, but
+          you can alter that using a kernel command line option such as
+          "console=ttySiRFx". (Try "man bootparam" or see the documentation of
+          your boot loader about how to pass options to the kernel at
+          boot time.)
 
 config SERIAL_MAX3100
 	tristate "MAX3100 support"
@@ -808,7 +834,7 @@
 
 config SERIAL_IMX
 	bool "IMX serial port support"
-	depends on ARM && (ARCH_IMX || ARCH_MXC)
+	depends on ARCH_MXC
 	select SERIAL_CORE
 	select RATIONAL
 	help
@@ -1324,7 +1350,7 @@
 
 config SERIAL_OMAP
 	tristate "OMAP serial port support"
-	depends on ARCH_OMAP2 || ARCH_OMAP3 || ARCH_OMAP4
+	depends on ARCH_OMAP2PLUS
 	select SERIAL_CORE
 	help
 	  If you have a machine based on an Texas Instruments OMAP CPU you
@@ -1575,6 +1601,15 @@
 	  ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series.
 	  ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH.
 
+config SERIAL_PCH_UART_CONSOLE
+	bool "Support for console on Intel EG20T PCH UART/OKI SEMICONDUCTOR ML7213 IOH"
+	depends on SERIAL_PCH_UART=y
+	select SERIAL_CORE_CONSOLE
+	help
+	  Say Y here if you wish to use the PCH UART as the system console
+	  (the system  console is the device which receives all kernel messages and
+	  warnings and which allows logins in single user mode).
+
 config SERIAL_MSM_SMD
 	bool "Enable tty device interface for some SMD ports"
 	default n
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index e10cf5b..75eadb8d 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -28,6 +28,7 @@
 obj-$(CONFIG_SERIAL_8250_EXAR_ST16C554) += 8250_exar_st16c554.o
 obj-$(CONFIG_SERIAL_8250_HUB6) += 8250_hub6.o
 obj-$(CONFIG_SERIAL_8250_MCA) += 8250_mca.o
+obj-$(CONFIG_SERIAL_8250_FSL) += 8250_fsl.o
 obj-$(CONFIG_SERIAL_8250_DW) += 8250_dw.o
 obj-$(CONFIG_SERIAL_AMBA_PL010) += amba-pl010.o
 obj-$(CONFIG_SERIAL_AMBA_PL011) += amba-pl011.o
@@ -94,3 +95,4 @@
 obj-$(CONFIG_SERIAL_MXS_AUART) += mxs-auart.o
 obj-$(CONFIG_SERIAL_LANTIQ)	+= lantiq.o
 obj-$(CONFIG_SERIAL_XILINX_PS_UART) += xilinx_uartps.o
+obj-$(CONFIG_SERIAL_SIRFSOC) += sirfsoc_uart.o
diff --git a/drivers/tty/serial/amba-pl010.c b/drivers/tty/serial/amba-pl010.c
index efdf92c..0d91a54 100644
--- a/drivers/tty/serial/amba-pl010.c
+++ b/drivers/tty/serial/amba-pl010.c
@@ -795,6 +795,8 @@
 	{ 0, 0 },
 };
 
+MODULE_DEVICE_TABLE(amba, pl010_ids);
+
 static struct amba_driver pl010_driver = {
 	.drv = {
 		.name	= "uart-pl010",
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 00233af..6958594 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -1994,6 +1994,8 @@
 	{ 0, 0 },
 };
 
+MODULE_DEVICE_TABLE(amba, pl011_ids);
+
 static struct amba_driver pl011_driver = {
 	.drv = {
 		.name	= "uart-pl011",
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 4c823f3..10605ec 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -212,8 +212,9 @@
 {
 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
 	unsigned int mode;
+	unsigned long flags;
 
-	spin_lock(&port->lock);
+	spin_lock_irqsave(&port->lock, flags);
 
 	/* Disable interrupts */
 	UART_PUT_IDR(port, atmel_port->tx_done_mask);
@@ -244,7 +245,7 @@
 	/* Enable interrupts */
 	UART_PUT_IER(port, atmel_port->tx_done_mask);
 
-	spin_unlock(&port->lock);
+	spin_unlock_irqrestore(&port->lock, flags);
 
 }
 
@@ -1256,12 +1257,7 @@
 
 static void atmel_set_ldisc(struct uart_port *port, int new)
 {
-	int line = port->line;
-
-	if (line >= port->state->port.tty->driver->num)
-		return;
-
-	if (port->state->port.tty->ldisc->ops->num == N_PPS) {
+	if (new == N_PPS) {
 		port->flags |= UPF_HARDPPS_CD;
 		atmel_enable_ms(port);
 	} else {
diff --git a/drivers/tty/serial/bfin_sport_uart.c b/drivers/tty/serial/bfin_sport_uart.c
index ee101c0..7fbc3a0 100644
--- a/drivers/tty/serial/bfin_sport_uart.c
+++ b/drivers/tty/serial/bfin_sport_uart.c
@@ -299,8 +299,13 @@
 			dev_info(port->dev, "Unable to attach BlackFin UART over SPORT CTS interrupt. So, disable it.\n");
 		}
 	}
-	if (up->rts_pin >= 0)
-		gpio_direction_output(up->rts_pin, 0);
+	if (up->rts_pin >= 0) {
+		if (gpio_request(up->rts_pin, DRV_NAME)) {
+			dev_info(port->dev, "fail to request RTS PIN at GPIO_%d\n", up->rts_pin);
+			up->rts_pin = -1;
+		} else
+			gpio_direction_output(up->rts_pin, 0);
+	}
 #endif
 
 	return 0;
@@ -445,6 +450,8 @@
 #ifdef CONFIG_SERIAL_BFIN_SPORT_CTSRTS
 	if (up->cts_pin >= 0)
 		free_irq(gpio_to_irq(up->cts_pin), up);
+	if (up->rts_pin >= 0)
+		gpio_free(up->rts_pin);
 #endif
 }
 
@@ -803,17 +810,16 @@
 		res = platform_get_resource(pdev, IORESOURCE_IO, 0);
 		if (res == NULL)
 			sport->cts_pin = -1;
-		else
+		else {
 			sport->cts_pin = res->start;
+			sport->port.flags |= ASYNC_CTS_FLOW;
+		}
 
 		res = platform_get_resource(pdev, IORESOURCE_IO, 1);
 		if (res == NULL)
 			sport->rts_pin = -1;
 		else
 			sport->rts_pin = res->start;
-
-		if (sport->rts_pin >= 0)
-			gpio_request(sport->rts_pin, DRV_NAME);
 #endif
 	}
 
@@ -853,10 +859,6 @@
 
 	if (sport) {
 		uart_remove_one_port(&sport_uart_reg, &sport->port);
-#ifdef CONFIG_SERIAL_BFIN_CTSRTS
-		if (sport->rts_pin >= 0)
-			gpio_free(sport->rts_pin);
-#endif
 		iounmap(sport->port.membase);
 		peripheral_free_list(
 			(unsigned short *)pdev->dev.platform_data);
diff --git a/drivers/tty/serial/bfin_sport_uart.h b/drivers/tty/serial/bfin_sport_uart.h
index 6d06ce1..e4510ea 100644
--- a/drivers/tty/serial/bfin_sport_uart.h
+++ b/drivers/tty/serial/bfin_sport_uart.h
@@ -45,11 +45,12 @@
 #define SPORT_GET_RX32(sport) \
 ({ \
 	unsigned int __ret; \
+	unsigned long flags; \
 	if (ANOMALY_05000473) \
-		local_irq_disable(); \
+		local_irq_save(flags); \
 	__ret = bfin_read32((sport)->port.membase + OFFSET_RX); \
 	if (ANOMALY_05000473) \
-		local_irq_enable(); \
+		local_irq_restore(flags); \
 	__ret; \
 })
 #define SPORT_GET_RCR1(sport)		bfin_read16(((sport)->port.membase + OFFSET_RCR1))
diff --git a/drivers/tty/serial/bfin_uart.c b/drivers/tty/serial/bfin_uart.c
index 66afb98..26953bf 100644
--- a/drivers/tty/serial/bfin_uart.c
+++ b/drivers/tty/serial/bfin_uart.c
@@ -116,15 +116,22 @@
 static irqreturn_t bfin_serial_mctrl_cts_int(int irq, void *dev_id)
 {
 	struct bfin_serial_port *uart = dev_id;
-	unsigned int status;
-
-	status = bfin_serial_get_mctrl(&uart->port);
-	uart_handle_cts_change(&uart->port, status & TIOCM_CTS);
+	unsigned int status = bfin_serial_get_mctrl(&uart->port);
 #ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
-	uart->scts = 1;
+	struct tty_struct *tty = uart->port.state->port.tty;
+
 	UART_CLEAR_SCTS(uart);
-	UART_CLEAR_IER(uart, EDSSI);
+	if (tty->hw_stopped) {
+		if (status) {
+			tty->hw_stopped = 0;
+			uart_write_wakeup(&uart->port);
+		}
+	} else {
+		if (!status)
+			tty->hw_stopped = 1;
+	}
 #endif
+	uart_handle_cts_change(&uart->port, status & TIOCM_CTS);
 
 	return IRQ_HANDLED;
 }
@@ -175,13 +182,6 @@
 	struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
 	struct tty_struct *tty = uart->port.state->port.tty;
 
-#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
-	if (uart->scts && !(bfin_serial_get_mctrl(&uart->port) & TIOCM_CTS)) {
-		uart->scts = 0;
-		uart_handle_cts_change(&uart->port, uart->scts);
-	}
-#endif
-
 	/*
 	 * To avoid losting RX interrupt, we reset IR function
 	 * before sending data.
@@ -380,12 +380,6 @@
 {
 	struct bfin_serial_port *uart = dev_id;
 
-#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
-	if (uart->scts && !(bfin_serial_get_mctrl(&uart->port) & TIOCM_CTS)) {
-		uart->scts = 0;
-		uart_handle_cts_change(&uart->port, uart->scts);
-	}
-#endif
 	spin_lock(&uart->port.lock);
 	if (UART_GET_LSR(uart) & THRE)
 		bfin_serial_tx_chars(uart);
@@ -531,13 +525,6 @@
 	struct bfin_serial_port *uart = dev_id;
 	struct circ_buf *xmit = &uart->port.state->xmit;
 
-#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
-	if (uart->scts && !(bfin_serial_get_mctrl(&uart->port)&TIOCM_CTS)) {
-		uart->scts = 0;
-		uart_handle_cts_change(&uart->port, uart->scts);
-	}
-#endif
-
 	spin_lock(&uart->port.lock);
 	if (!(get_dma_curr_irqstat(uart->tx_dma_channel)&DMA_RUN)) {
 		disable_dma(uart->tx_dma_channel);
@@ -739,20 +726,26 @@
 			pr_info("Unable to attach BlackFin UART CTS interrupt. So, disable it.\n");
 		}
 	}
-	if (uart->rts_pin >= 0)
-		gpio_direction_output(uart->rts_pin, 0);
+	if (uart->rts_pin >= 0) {
+		if (gpio_request(uart->rts_pin, DRIVER_NAME)) {
+			pr_info("fail to request RTS PIN at GPIO_%d\n", uart->rts_pin);
+			uart->rts_pin = -1;
+		} else
+			gpio_direction_output(uart->rts_pin, 0);
+	}
 #endif
 #ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
-	if (uart->cts_pin >= 0 && request_irq(uart->status_irq,
-		bfin_serial_mctrl_cts_int,
-		0, "BFIN_UART_MODEM_STATUS", uart)) {
-		uart->cts_pin = -1;
-		pr_info("Unable to attach BlackFin UART Modem Status interrupt.\n");
-	}
+	if (uart->cts_pin >= 0) {
+		if (request_irq(uart->status_irq, bfin_serial_mctrl_cts_int,
+			IRQF_DISABLED, "BFIN_UART_MODEM_STATUS", uart)) {
+			uart->cts_pin = -1;
+			dev_info(port->dev, "Unable to attach BlackFin UART Modem Status interrupt.\n");
+		}
 
-	/* CTS RTS PINs are negative assertive. */
-	UART_PUT_MCR(uart, ACTS);
-	UART_SET_IER(uart, EDSSI);
+		/* CTS RTS PINs are negative assertive. */
+		UART_PUT_MCR(uart, ACTS);
+		UART_SET_IER(uart, EDSSI);
+	}
 #endif
 
 	UART_SET_IER(uart, ERBFI);
@@ -792,6 +785,8 @@
 #ifdef CONFIG_SERIAL_BFIN_CTSRTS
 	if (uart->cts_pin >= 0)
 		free_irq(gpio_to_irq(uart->cts_pin), uart);
+	if (uart->rts_pin >= 0)
+		gpio_free(uart->rts_pin);
 #endif
 #ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
 	if (uart->cts_pin >= 0)
@@ -1370,18 +1365,18 @@
 		res = platform_get_resource(pdev, IORESOURCE_IO, 0);
 		if (res == NULL)
 			uart->cts_pin = -1;
-		else
+		else {
 			uart->cts_pin = res->start;
+#ifdef CONFIG_SERIAL_BFIN_CTSRTS
+			uart->port.flags |= ASYNC_CTS_FLOW;
+#endif
+		}
 
 		res = platform_get_resource(pdev, IORESOURCE_IO, 1);
 		if (res == NULL)
 			uart->rts_pin = -1;
 		else
 			uart->rts_pin = res->start;
-# if defined(CONFIG_SERIAL_BFIN_CTSRTS)
-		if (uart->rts_pin >= 0)
-			gpio_request(uart->rts_pin, DRIVER_NAME);
-# endif
 #endif
 	}
 
@@ -1421,10 +1416,6 @@
 
 	if (uart) {
 		uart_remove_one_port(&bfin_serial_reg, &uart->port);
-#ifdef CONFIG_SERIAL_BFIN_CTSRTS
-		if (uart->rts_pin >= 0)
-			gpio_free(uart->rts_pin);
-#endif
 		iounmap(uart->port.membase);
 		peripheral_free_list(
 			(unsigned short *)pdev->dev.platform_data);
diff --git a/drivers/tty/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c
index 426434e..7e925e2 100644
--- a/drivers/tty/serial/ifx6x60.c
+++ b/drivers/tty/serial/ifx6x60.c
@@ -1334,7 +1334,6 @@
 static const struct spi_driver ifx_spi_driver = {
 	.driver = {
 		.name = DRVNAME,
-		.bus = &spi_bus_type,
 		.pm = &ifx_spi_pm,
 		.owner = THIS_MODULE},
 	.probe = ifx_spi_spi_probe,
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 163fc90..0b7fed7 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -102,6 +102,7 @@
 #define  UCR2_STPB       (1<<6)	 /* Stop */
 #define  UCR2_WS         (1<<5)	 /* Word size */
 #define  UCR2_RTSEN      (1<<4)	 /* Request to send interrupt enable */
+#define  UCR2_ATEN       (1<<3)  /* Aging Timer Enable */
 #define  UCR2_TXEN       (1<<2)	 /* Transmitter enabled */
 #define  UCR2_RXEN       (1<<1)	 /* Receiver enabled */
 #define  UCR2_SRST 	 (1<<0)	 /* SW reset */
@@ -207,6 +208,12 @@
 	struct imx_uart_data	*devdata;
 };
 
+struct imx_port_ucrs {
+	unsigned int	ucr1;
+	unsigned int	ucr2;
+	unsigned int	ucr3;
+};
+
 #ifdef CONFIG_IRDA
 #define USE_IRDA(sport)	((sport)->use_irda)
 #else
@@ -260,6 +267,27 @@
 }
 
 /*
+ * Save and restore functions for UCR1, UCR2 and UCR3 registers
+ */
+static void imx_port_ucrs_save(struct uart_port *port,
+			       struct imx_port_ucrs *ucr)
+{
+	/* save control registers */
+	ucr->ucr1 = readl(port->membase + UCR1);
+	ucr->ucr2 = readl(port->membase + UCR2);
+	ucr->ucr3 = readl(port->membase + UCR3);
+}
+
+static void imx_port_ucrs_restore(struct uart_port *port,
+				  struct imx_port_ucrs *ucr)
+{
+	/* restore control registers */
+	writel(ucr->ucr1, port->membase + UCR1);
+	writel(ucr->ucr2, port->membase + UCR2);
+	writel(ucr->ucr3, port->membase + UCR3);
+}
+
+/*
  * Handle any change of modem status signal since we were last called.
  */
 static void imx_mctrl_check(struct imx_port *sport)
@@ -566,6 +594,9 @@
 	if (sts & USR1_RTSD)
 		imx_rtsint(irq, dev_id);
 
+	if (sts & USR1_AWAKE)
+		writel(USR1_AWAKE, sport->port.membase + USR1);
+
 	return IRQ_HANDLED;
 }
 
@@ -901,6 +932,8 @@
 			ucr2 |= UCR2_PROE;
 	}
 
+	del_timer_sync(&sport->timer);
+
 	/*
 	 * Ask the core to calculate the divisor for us.
 	 */
@@ -931,8 +964,6 @@
 			sport->port.ignore_status_mask |= URXD_OVRRUN;
 	}
 
-	del_timer_sync(&sport->timer);
-
 	/*
 	 * Update the per-port timeout.
 	 */
@@ -1079,6 +1110,70 @@
 	return ret;
 }
 
+#if defined(CONFIG_CONSOLE_POLL)
+static int imx_poll_get_char(struct uart_port *port)
+{
+	struct imx_port_ucrs old_ucr;
+	unsigned int status;
+	unsigned char c;
+
+	/* save control registers */
+	imx_port_ucrs_save(port, &old_ucr);
+
+	/* disable interrupts */
+	writel(UCR1_UARTEN, port->membase + UCR1);
+	writel(old_ucr.ucr2 & ~(UCR2_ATEN | UCR2_RTSEN | UCR2_ESCI),
+	       port->membase + UCR2);
+	writel(old_ucr.ucr3 & ~(UCR3_DCD | UCR3_RI | UCR3_DTREN),
+	       port->membase + UCR3);
+
+	/* poll */
+	do {
+		status = readl(port->membase + USR2);
+	} while (~status & USR2_RDR);
+
+	/* read */
+	c = readl(port->membase + URXD0);
+
+	/* restore control registers */
+	imx_port_ucrs_restore(port, &old_ucr);
+
+	return c;
+}
+
+static void imx_poll_put_char(struct uart_port *port, unsigned char c)
+{
+	struct imx_port_ucrs old_ucr;
+	unsigned int status;
+
+	/* save control registers */
+	imx_port_ucrs_save(port, &old_ucr);
+
+	/* disable interrupts */
+	writel(UCR1_UARTEN, port->membase + UCR1);
+	writel(old_ucr.ucr2 & ~(UCR2_ATEN | UCR2_RTSEN | UCR2_ESCI),
+	       port->membase + UCR2);
+	writel(old_ucr.ucr3 & ~(UCR3_DCD | UCR3_RI | UCR3_DTREN),
+	       port->membase + UCR3);
+
+	/* drain */
+	do {
+		status = readl(port->membase + USR1);
+	} while (~status & USR1_TRDY);
+
+	/* write */
+	writel(c, port->membase + URTX0);
+
+	/* flush */
+	do {
+		status = readl(port->membase + USR2);
+	} while (~status & USR2_TXDC);
+
+	/* restore control registers */
+	imx_port_ucrs_restore(port, &old_ucr);
+}
+#endif
+
 static struct uart_ops imx_pops = {
 	.tx_empty	= imx_tx_empty,
 	.set_mctrl	= imx_set_mctrl,
@@ -1096,6 +1191,10 @@
 	.request_port	= imx_request_port,
 	.config_port	= imx_config_port,
 	.verify_port	= imx_verify_port,
+#if defined(CONFIG_CONSOLE_POLL)
+	.poll_get_char  = imx_poll_get_char,
+	.poll_put_char  = imx_poll_put_char,
+#endif
 };
 
 static struct imx_port *imx_ports[UART_NR];
@@ -1118,13 +1217,14 @@
 imx_console_write(struct console *co, const char *s, unsigned int count)
 {
 	struct imx_port *sport = imx_ports[co->index];
-	unsigned int old_ucr1, old_ucr2, ucr1;
+	struct imx_port_ucrs old_ucr;
+	unsigned int ucr1;
 
 	/*
-	 *	First, save UCR1/2 and then disable interrupts
+	 *	First, save UCR1/2/3 and then disable interrupts
 	 */
-	ucr1 = old_ucr1 = readl(sport->port.membase + UCR1);
-	old_ucr2 = readl(sport->port.membase + UCR2);
+	imx_port_ucrs_save(&sport->port, &old_ucr);
+	ucr1 = old_ucr.ucr1;
 
 	if (is_imx1_uart(sport))
 		ucr1 |= IMX1_UCR1_UARTCLKEN;
@@ -1133,18 +1233,17 @@
 
 	writel(ucr1, sport->port.membase + UCR1);
 
-	writel(old_ucr2 | UCR2_TXEN, sport->port.membase + UCR2);
+	writel(old_ucr.ucr2 | UCR2_TXEN, sport->port.membase + UCR2);
 
 	uart_console_write(&sport->port, s, count, imx_console_putchar);
 
 	/*
 	 *	Finally, wait for transmitter to become empty
-	 *	and restore UCR1/2
+	 *	and restore UCR1/2/3
 	 */
 	while (!(readl(sport->port.membase + USR2) & USR2_TXDC));
 
-	writel(old_ucr1, sport->port.membase + UCR1);
-	writel(old_ucr2, sport->port.membase + UCR2);
+	imx_port_ucrs_restore(&sport->port, &old_ucr);
 }
 
 /*
@@ -1269,6 +1368,12 @@
 static int serial_imx_suspend(struct platform_device *dev, pm_message_t state)
 {
 	struct imx_port *sport = platform_get_drvdata(dev);
+	unsigned int val;
+
+	/* enable wakeup from i.MX UART */
+	val = readl(sport->port.membase + UCR3);
+	val |= UCR3_AWAKEN;
+	writel(val, sport->port.membase + UCR3);
 
 	if (sport)
 		uart_suspend_port(&imx_reg, &sport->port);
@@ -1279,6 +1384,12 @@
 static int serial_imx_resume(struct platform_device *dev)
 {
 	struct imx_port *sport = platform_get_drvdata(dev);
+	unsigned int val;
+
+	/* disable wakeup from i.MX UART */
+	val = readl(sport->port.membase + UCR3);
+	val &= ~UCR3_AWAKEN;
+	writel(val, sport->port.membase + UCR3);
 
 	if (sport)
 		uart_resume_port(&imx_reg, &sport->port);
@@ -1287,6 +1398,10 @@
 }
 
 #ifdef CONFIG_OF
+/*
+ * This function returns 1 iff pdev isn't a device instatiated by dt, 0 iff it
+ * could successfully get all information from dt or a negative errno.
+ */
 static int serial_imx_probe_dt(struct imx_port *sport,
 		struct platform_device *pdev)
 {
@@ -1296,12 +1411,13 @@
 	int ret;
 
 	if (!np)
-		return -ENODEV;
+		/* no device tree device */
+		return 1;
 
 	ret = of_alias_get_id(np, "serial");
 	if (ret < 0) {
 		dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret);
-		return -ENODEV;
+		return ret;
 	}
 	sport->port.line = ret;
 
@@ -1319,7 +1435,7 @@
 static inline int serial_imx_probe_dt(struct imx_port *sport,
 		struct platform_device *pdev)
 {
-	return -ENODEV;
+	return 1;
 }
 #endif
 
@@ -1354,8 +1470,10 @@
 		return -ENOMEM;
 
 	ret = serial_imx_probe_dt(sport, pdev);
-	if (ret == -ENODEV)
+	if (ret > 0)
 		serial_imx_probe_pdata(sport, pdev);
+	else if (ret < 0)
+		goto free;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	if (!res) {
@@ -1476,7 +1594,7 @@
 	if (ret != 0)
 		uart_unregister_driver(&imx_reg);
 
-	return 0;
+	return ret;
 }
 
 static void __exit imx_serial_exit(void)
diff --git a/drivers/tty/serial/m32r_sio.c b/drivers/tty/serial/m32r_sio.c
index 0801893..94a6792 100644
--- a/drivers/tty/serial/m32r_sio.c
+++ b/drivers/tty/serial/m32r_sio.c
@@ -1000,11 +1000,8 @@
 		init_timer(&up->timer);
 		up->timer.function = m32r_sio_timeout;
 
-		/*
-		 * ALPHA_KLUDGE_MCR needs to be killed.
-		 */
-		up->mcr_mask = ~ALPHA_KLUDGE_MCR;
-		up->mcr_force = ALPHA_KLUDGE_MCR;
+		up->mcr_mask = ~0;
+		up->mcr_force = 0;
 
 		uart_add_one_port(drv, &up->port);
 	}
diff --git a/drivers/tty/serial/max3100.c b/drivers/tty/serial/max3100.c
index 8a6cc8c..b4902b9 100644
--- a/drivers/tty/serial/max3100.c
+++ b/drivers/tty/serial/max3100.c
@@ -901,7 +901,6 @@
 static struct spi_driver max3100_driver = {
 	.driver = {
 		.name		= "max3100",
-		.bus		= &spi_bus_type,
 		.owner		= THIS_MODULE,
 	},
 
diff --git a/drivers/tty/serial/max3107-aava.c b/drivers/tty/serial/max3107-aava.c
index 90c40f2..aae772a 100644
--- a/drivers/tty/serial/max3107-aava.c
+++ b/drivers/tty/serial/max3107-aava.c
@@ -315,7 +315,6 @@
 static struct spi_driver max3107_driver = {
 	.driver = {
 		.name		= "aava-max3107",
-		.bus		= &spi_bus_type,
 		.owner		= THIS_MODULE,
 	},
 	.probe		= max3107_probe_aava,
diff --git a/drivers/tty/serial/max3107.c b/drivers/tty/serial/max3107.c
index 7827000..17c7ba8 100644
--- a/drivers/tty/serial/max3107.c
+++ b/drivers/tty/serial/max3107.c
@@ -1181,7 +1181,6 @@
 static struct spi_driver max3107_driver = {
 	.driver = {
 		.name		= "max3107",
-		.bus		= &spi_bus_type,
 		.owner		= THIS_MODULE,
 	},
 	.probe		= max3107_probe_generic,
diff --git a/drivers/tty/serial/mfd.c b/drivers/tty/serial/mfd.c
index e272d39..a9234ba8 100644
--- a/drivers/tty/serial/mfd.c
+++ b/drivers/tty/serial/mfd.c
@@ -1154,7 +1154,6 @@
 	int bits = 8;
 	int parity = 'n';
 	int flow = 'n';
-	int ret;
 
 	if (co->index == -1 || co->index >= serial_hsu_reg.nr)
 		co->index = 0;
@@ -1165,9 +1164,7 @@
 	if (options)
 		uart_parse_options(options, &baud, &parity, &bits, &flow);
 
-	ret = uart_set_options(&up->port, co, baud, parity, bits, flow);
-
-	return ret;
+	return uart_set_options(&up->port, co, baud, parity, bits, flow);
 }
 
 static struct console serial_hsu_console = {
@@ -1176,9 +1173,13 @@
 	.device		= uart_console_device,
 	.setup		= serial_hsu_console_setup,
 	.flags		= CON_PRINTBUFFER,
-	.index		= 2,
+	.index		= -1,
 	.data		= &serial_hsu_reg,
 };
+
+#define SERIAL_HSU_CONSOLE	(&serial_hsu_console)
+#else
+#define SERIAL_HSU_CONSOLE	NULL
 #endif
 
 struct uart_ops serial_hsu_pops = {
@@ -1208,6 +1209,7 @@
 	.major		= TTY_MAJOR,
 	.minor		= 128,
 	.nr		= 3,
+	.cons		= SERIAL_HSU_CONSOLE,
 };
 
 #ifdef CONFIG_PM
@@ -1342,12 +1344,6 @@
 		}
 		uart_add_one_port(&serial_hsu_reg, &uport->port);
 
-#ifdef CONFIG_SERIAL_MFD_HSU_CONSOLE
-		if (index == 2) {
-			register_console(&serial_hsu_console);
-			uport->port.cons = &serial_hsu_console;
-		}
-#endif
 		pci_set_drvdata(pdev, uport);
 	}
 
diff --git a/drivers/tty/serial/mrst_max3110.c b/drivers/tty/serial/mrst_max3110.c
index 4c309e8..df2a224 100644
--- a/drivers/tty/serial/mrst_max3110.c
+++ b/drivers/tty/serial/mrst_max3110.c
@@ -876,7 +876,6 @@
 static struct spi_driver uart_max3110_driver = {
 	.driver = {
 			.name	= "spi_max3111",
-			.bus	= &spi_bus_type,
 			.owner	= THIS_MODULE,
 	},
 	.probe		= serial_m3110_probe,
diff --git a/drivers/tty/serial/msm_serial_hs.c b/drivers/tty/serial/msm_serial_hs.c
index 60c6eb8..5e85e1e 100644
--- a/drivers/tty/serial/msm_serial_hs.c
+++ b/drivers/tty/serial/msm_serial_hs.c
@@ -422,9 +422,9 @@
 		      msm_uport->rx.rbuffer);
 	dma_pool_destroy(msm_uport->rx.pool);
 
-	dma_unmap_single(dev, msm_uport->rx.cmdptr_dmaaddr, sizeof(u32 *),
+	dma_unmap_single(dev, msm_uport->rx.cmdptr_dmaaddr, sizeof(u32),
 			 DMA_TO_DEVICE);
-	dma_unmap_single(dev, msm_uport->tx.mapped_cmd_ptr_ptr, sizeof(u32 *),
+	dma_unmap_single(dev, msm_uport->tx.mapped_cmd_ptr_ptr, sizeof(u32),
 			 DMA_TO_DEVICE);
 	dma_unmap_single(dev, msm_uport->tx.mapped_cmd_ptr, sizeof(dmov_box),
 			 DMA_TO_DEVICE);
@@ -812,7 +812,7 @@
 	*tx->command_ptr_ptr = CMD_PTR_LP | DMOV_CMD_ADDR(tx->mapped_cmd_ptr);
 
 	dma_sync_single_for_device(uport->dev, tx->mapped_cmd_ptr_ptr,
-				   sizeof(u32 *), DMA_TO_DEVICE);
+				   sizeof(u32), DMA_TO_DEVICE);
 
 	/* Save tx_count to use in Callback */
 	tx->tx_count = tx_count;
@@ -1087,12 +1087,10 @@
 }
 
 /*  Handle CTS changes (Called from interrupt handler) */
-static void msm_hs_handle_delta_cts(struct uart_port *uport)
+static void msm_hs_handle_delta_cts_locked(struct uart_port *uport)
 {
-	unsigned long flags;
 	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
 
-	spin_lock_irqsave(&uport->lock, flags);
 	clk_enable(msm_uport->clk);
 
 	/* clear interrupt */
@@ -1100,7 +1098,6 @@
 	uport->icount.cts++;
 
 	clk_disable(msm_uport->clk);
-	spin_unlock_irqrestore(&uport->lock, flags);
 
 	/* clear the IOCTL TIOCMIWAIT if called */
 	wake_up_interruptible(&uport->state->port.delta_msr_wait);
@@ -1248,7 +1245,7 @@
 
 	/* Change in CTS interrupt */
 	if (isr_status & UARTDM_ISR_DELTA_CTS_BMSK)
-		msm_hs_handle_delta_cts(uport);
+		msm_hs_handle_delta_cts_locked(uport);
 
 	spin_unlock_irqrestore(&uport->lock, flags);
 
@@ -1537,7 +1534,7 @@
 	if (!tx->command_ptr)
 		return -ENOMEM;
 
-	tx->command_ptr_ptr = kmalloc(sizeof(u32 *), GFP_KERNEL | __GFP_DMA);
+	tx->command_ptr_ptr = kmalloc(sizeof(u32), GFP_KERNEL | __GFP_DMA);
 	if (!tx->command_ptr_ptr) {
 		ret = -ENOMEM;
 		goto err_tx_command_ptr_ptr;
@@ -1547,7 +1544,7 @@
 					    sizeof(dmov_box), DMA_TO_DEVICE);
 	tx->mapped_cmd_ptr_ptr = dma_map_single(uport->dev,
 						tx->command_ptr_ptr,
-						sizeof(u32 *), DMA_TO_DEVICE);
+						sizeof(u32), DMA_TO_DEVICE);
 	tx->xfer.cmdptr = DMOV_CMD_ADDR(tx->mapped_cmd_ptr_ptr);
 
 	init_waitqueue_head(&rx->wait);
@@ -1575,7 +1572,7 @@
 		goto err_rx_command_ptr;
 	}
 
-	rx->command_ptr_ptr = kmalloc(sizeof(u32 *), GFP_KERNEL | __GFP_DMA);
+	rx->command_ptr_ptr = kmalloc(sizeof(u32), GFP_KERNEL | __GFP_DMA);
 	if (!rx->command_ptr_ptr) {
 		pr_err("%s(): cannot allocate rx->command_ptr_ptr", __func__);
 		ret = -ENOMEM;
@@ -1593,7 +1590,7 @@
 	*rx->command_ptr_ptr = CMD_PTR_LP | DMOV_CMD_ADDR(rx->mapped_cmd_ptr);
 
 	rx->cmdptr_dmaaddr = dma_map_single(uport->dev, rx->command_ptr_ptr,
-					    sizeof(u32 *), DMA_TO_DEVICE);
+					    sizeof(u32), DMA_TO_DEVICE);
 	rx->xfer.cmdptr = DMOV_CMD_ADDR(rx->cmdptr_dmaaddr);
 
 	INIT_WORK(&rx->tty_work, msm_hs_tty_flip_buffer_work);
@@ -1609,7 +1606,7 @@
 	dma_pool_destroy(msm_uport->rx.pool);
 err_dma_pool_create:
 	dma_unmap_single(uport->dev, msm_uport->tx.mapped_cmd_ptr_ptr,
-				sizeof(u32 *), DMA_TO_DEVICE);
+				sizeof(u32), DMA_TO_DEVICE);
 	dma_unmap_single(uport->dev, msm_uport->tx.mapped_cmd_ptr,
 				sizeof(dmov_box), DMA_TO_DEVICE);
 	kfree(msm_uport->tx.command_ptr_ptr);
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index 7e02c9c..076169f 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -145,11 +145,12 @@
 			writel(xmit->buf[xmit->tail],
 				     s->port.membase + AUART_DATA);
 			xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
-			if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
-				uart_write_wakeup(&s->port);
 		} else
 			break;
 	}
+	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+		uart_write_wakeup(&s->port);
+
 	if (uart_circ_empty(&(s->port.state->xmit)))
 		writel(AUART_INTR_TXIEN,
 			     s->port.membase + AUART_INTR_CLR);
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index 5e713d3..f2a1380 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -399,7 +399,7 @@
 static unsigned int serial_omap_get_mctrl(struct uart_port *port)
 {
 	struct uart_omap_port *up = (struct uart_omap_port *)port;
-	unsigned char status;
+	unsigned int status;
 	unsigned int ret = 0;
 
 	status = check_modem_status(up);
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index d6aba8c..de0f613 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -25,6 +25,9 @@
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/dmi.h>
+#include <linux/console.h>
+#include <linux/nmi.h>
+#include <linux/delay.h>
 
 #include <linux/dmaengine.h>
 #include <linux/pch_dma.h>
@@ -198,6 +201,10 @@
 
 #define PCI_VENDOR_ID_ROHM		0x10DB
 
+#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
+
+#define DEFAULT_BAUD_RATE 1843200 /* 1.8432MHz */
+
 struct pch_uart_buffer {
 	unsigned char *buf;
 	int size;
@@ -276,6 +283,9 @@
 	[pch_ml7831_uart1] = {PCH_UART_2LINE, 1},
 };
 
+#ifdef CONFIG_SERIAL_PCH_UART_CONSOLE
+static struct eg20t_port *pch_uart_ports[PCH_UART_NR];
+#endif
 static unsigned int default_baud = 9600;
 static const int trigger_level_256[4] = { 1, 64, 128, 224 };
 static const int trigger_level_64[4] = { 1, 16, 32, 56 };
@@ -1385,6 +1395,143 @@
 	.verify_port = pch_uart_verify_port
 };
 
+#ifdef CONFIG_SERIAL_PCH_UART_CONSOLE
+
+/*
+ *	Wait for transmitter & holding register to empty
+ */
+static void wait_for_xmitr(struct eg20t_port *up, int bits)
+{
+	unsigned int status, tmout = 10000;
+
+	/* Wait up to 10ms for the character(s) to be sent. */
+	for (;;) {
+		status = ioread8(up->membase + UART_LSR);
+
+		if ((status & bits) == bits)
+			break;
+		if (--tmout == 0)
+			break;
+		udelay(1);
+	}
+
+	/* Wait up to 1s for flow control if necessary */
+	if (up->port.flags & UPF_CONS_FLOW) {
+		unsigned int tmout;
+		for (tmout = 1000000; tmout; tmout--) {
+			unsigned int msr = ioread8(up->membase + UART_MSR);
+			if (msr & UART_MSR_CTS)
+				break;
+			udelay(1);
+			touch_nmi_watchdog();
+		}
+	}
+}
+
+static void pch_console_putchar(struct uart_port *port, int ch)
+{
+	struct eg20t_port *priv =
+		container_of(port, struct eg20t_port, port);
+
+	wait_for_xmitr(priv, UART_LSR_THRE);
+	iowrite8(ch, priv->membase + PCH_UART_THR);
+}
+
+/*
+ *	Print a string to the serial port trying not to disturb
+ *	any possible real use of the port...
+ *
+ *	The console_lock must be held when we get here.
+ */
+static void
+pch_console_write(struct console *co, const char *s, unsigned int count)
+{
+	struct eg20t_port *priv;
+
+	unsigned long flags;
+	u8 ier;
+	int locked = 1;
+
+	priv = pch_uart_ports[co->index];
+
+	touch_nmi_watchdog();
+
+	local_irq_save(flags);
+	if (priv->port.sysrq) {
+		/* serial8250_handle_port() already took the lock */
+		locked = 0;
+	} else if (oops_in_progress) {
+		locked = spin_trylock(&priv->port.lock);
+	} else
+		spin_lock(&priv->port.lock);
+
+	/*
+	 *	First save the IER then disable the interrupts
+	 */
+	ier = ioread8(priv->membase + UART_IER);
+
+	pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_ALL_INT);
+
+	uart_console_write(&priv->port, s, count, pch_console_putchar);
+
+	/*
+	 *	Finally, wait for transmitter to become empty
+	 *	and restore the IER
+	 */
+	wait_for_xmitr(priv, BOTH_EMPTY);
+	iowrite8(ier, priv->membase + UART_IER);
+
+	if (locked)
+		spin_unlock(&priv->port.lock);
+	local_irq_restore(flags);
+}
+
+static int __init pch_console_setup(struct console *co, char *options)
+{
+	struct uart_port *port;
+	int baud = 9600;
+	int bits = 8;
+	int parity = 'n';
+	int flow = 'n';
+
+	/*
+	 * Check whether an invalid uart number has been specified, and
+	 * if so, search for the first available port that does have
+	 * console support.
+	 */
+	if (co->index >= PCH_UART_NR)
+		co->index = 0;
+	port = &pch_uart_ports[co->index]->port;
+
+	if (!port || (!port->iobase && !port->membase))
+		return -ENODEV;
+
+	/* setup uartclock */
+	port->uartclk = DEFAULT_BAUD_RATE;
+
+	if (options)
+		uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+	return uart_set_options(port, co, baud, parity, bits, flow);
+}
+
+static struct uart_driver pch_uart_driver;
+
+static struct console pch_console = {
+	.name		= PCH_UART_DRIVER_DEVICE,
+	.write		= pch_console_write,
+	.device		= uart_console_device,
+	.setup		= pch_console_setup,
+	.flags		= CON_PRINTBUFFER | CON_ANYTIME,
+	.index		= -1,
+	.data		= &pch_uart_driver,
+};
+
+#define PCH_CONSOLE	(&pch_console)
+#else
+#define PCH_CONSOLE	NULL
+#endif
+
 static struct uart_driver pch_uart_driver = {
 	.owner = THIS_MODULE,
 	.driver_name = KBUILD_MODNAME,
@@ -1392,6 +1539,7 @@
 	.major = 0,
 	.minor = 0,
 	.nr = PCH_UART_NR,
+	.cons = PCH_CONSOLE,
 };
 
 static struct eg20t_port *pch_uart_init_port(struct pci_dev *pdev,
@@ -1418,7 +1566,7 @@
 	if (!rxbuf)
 		goto init_port_free_txbuf;
 
-	base_baud = 1843200; /* 1.8432MHz */
+	base_baud = DEFAULT_BAUD_RATE;
 
 	/* quirk for CM-iTC board */
 	board_name = dmi_get_system_info(DMI_BOARD_NAME);
@@ -1468,6 +1616,9 @@
 	pci_set_drvdata(pdev, priv);
 	pch_uart_hal_request(pdev, fifosize, base_baud);
 
+#ifdef CONFIG_SERIAL_PCH_UART_CONSOLE
+	pch_uart_ports[board->line_no] = priv;
+#endif
 	ret = uart_add_one_port(&pch_uart_driver, &priv->port);
 	if (ret < 0)
 		goto init_port_hal_free;
@@ -1475,6 +1626,9 @@
 	return priv;
 
 init_port_hal_free:
+#ifdef CONFIG_SERIAL_PCH_UART_CONSOLE
+	pch_uart_ports[board->line_no] = NULL;
+#endif
 	free_page((unsigned long)rxbuf);
 init_port_free_txbuf:
 	kfree(priv);
@@ -1497,6 +1651,10 @@
 	priv = (struct eg20t_port *)pci_get_drvdata(pdev);
 
 	pci_disable_msi(pdev);
+
+#ifdef CONFIG_SERIAL_PCH_UART_CONSOLE
+	pch_uart_ports[priv->port.line] = NULL;
+#endif
 	pch_uart_exit_port(priv);
 	pci_disable_device(pdev);
 	kfree(priv);
diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
index 5acd24a..e9c2dfe4 100644
--- a/drivers/tty/serial/pmac_zilog.c
+++ b/drivers/tty/serial/pmac_zilog.c
@@ -99,6 +99,9 @@
 #define PMACZILOG_NAME		"ttyPZ"
 #endif
 
+#define pmz_debug(fmt, arg...)	pr_debug("ttyPZ%d: " fmt, uap->port.line, ## arg)
+#define pmz_error(fmt, arg...)	pr_err("ttyPZ%d: " fmt, uap->port.line, ## arg)
+#define pmz_info(fmt, arg...)	pr_info("ttyPZ%d: " fmt, uap->port.line, ## arg)
 
 /*
  * For the sake of early serial console, we can do a pre-probe
@@ -106,7 +109,6 @@
  */
 static struct uart_pmac_port	pmz_ports[MAX_ZS_PORTS];
 static int			pmz_ports_count;
-static DEFINE_MUTEX(pmz_irq_mutex);
 
 static struct uart_driver pmz_uart_reg = {
 	.owner		=	THIS_MODULE,
@@ -126,9 +128,6 @@
 {
 	int i;
 
-	if (ZS_IS_ASLEEP(uap))
-		return;
-
 	/* Let pending transmits finish.  */
 	for (i = 0; i < 1000; i++) {
 		unsigned char stat = read_zsreg(uap, R1);
@@ -216,32 +215,24 @@
 	}
 }
 
+static void pmz_interrupt_control(struct uart_pmac_port *uap, int enable)
+{
+	if (enable) {
+		uap->curregs[1] |= INT_ALL_Rx | TxINT_ENAB;
+		if (!ZS_IS_EXTCLK(uap))
+			uap->curregs[1] |= EXT_INT_ENAB;
+	} else {
+		uap->curregs[1] &= ~(EXT_INT_ENAB | TxINT_ENAB | RxINT_MASK);
+	}
+	write_zsreg(uap, R1, uap->curregs[1]);
+}
+
 static struct tty_struct *pmz_receive_chars(struct uart_pmac_port *uap)
 {
 	struct tty_struct *tty = NULL;
 	unsigned char ch, r1, drop, error, flag;
 	int loops = 0;
 
-	/* The interrupt can be enabled when the port isn't open, typically
-	 * that happens when using one port is open and the other closed (stale
-	 * interrupt) or when one port is used as a console.
-	 */
-	if (!ZS_IS_OPEN(uap)) {
-		pmz_debug("pmz: draining input\n");
-		/* Port is closed, drain input data */
-		for (;;) {
-			if ((++loops) > 1000)
-				goto flood;
-			(void)read_zsreg(uap, R1);
-			write_zsreg(uap, R0, ERR_RES);
-			(void)read_zsdata(uap);
-			ch = read_zsreg(uap, R0);
-			if (!(ch & Rx_CH_AV))
-				break;
-		}
-		return NULL;
-	}
-
 	/* Sanity check, make sure the old bug is no longer happening */
 	if (uap->port.state == NULL || uap->port.state->port.tty == NULL) {
 		WARN_ON(1);
@@ -339,9 +330,7 @@
 
 	return tty;
  flood:
-	uap->curregs[R1] &= ~(EXT_INT_ENAB | TxINT_ENAB | RxINT_MASK);
-	write_zsreg(uap, R1, uap->curregs[R1]);
-	zssync(uap);
+	pmz_interrupt_control(uap, 0);
 	pmz_error("pmz: rx irq flood !\n");
 	return tty;
 }
@@ -383,8 +372,6 @@
 {
 	struct circ_buf *xmit;
 
-	if (ZS_IS_ASLEEP(uap))
-		return;
 	if (ZS_IS_CONS(uap)) {
 		unsigned char status = read_zsreg(uap, R0);
 
@@ -481,6 +468,10 @@
 	/* Channel A */
 	tty = NULL;
 	if (r3 & (CHAEXT | CHATxIP | CHARxIP)) {
+		if (!ZS_IS_OPEN(uap_a)) {
+			pmz_debug("ChanA interrupt while open !\n");
+			goto skip_a;
+		}
 		write_zsreg(uap_a, R0, RES_H_IUS);
 		zssync(uap_a);		
 		if (r3 & CHAEXT)
@@ -491,16 +482,21 @@
 			pmz_transmit_chars(uap_a);
 		rc = IRQ_HANDLED;
 	}
+ skip_a:
 	spin_unlock(&uap_a->port.lock);
 	if (tty != NULL)
 		tty_flip_buffer_push(tty);
 
-	if (uap_b->node == NULL)
+	if (!uap_b)
 		goto out;
 
 	spin_lock(&uap_b->port.lock);
 	tty = NULL;
 	if (r3 & (CHBEXT | CHBTxIP | CHBRxIP)) {
+		if (!ZS_IS_OPEN(uap_a)) {
+			pmz_debug("ChanB interrupt while open !\n");
+			goto skip_b;
+		}
 		write_zsreg(uap_b, R0, RES_H_IUS);
 		zssync(uap_b);
 		if (r3 & CHBEXT)
@@ -511,14 +507,12 @@
 			pmz_transmit_chars(uap_b);
 		rc = IRQ_HANDLED;
 	}
+ skip_b:
 	spin_unlock(&uap_b->port.lock);
 	if (tty != NULL)
 		tty_flip_buffer_push(tty);
 
  out:
-#ifdef DEBUG_HARD
-	pmz_debug("irq done.\n");
-#endif
 	return rc;
 }
 
@@ -543,12 +537,8 @@
  */
 static unsigned int pmz_tx_empty(struct uart_port *port)
 {
-	struct uart_pmac_port *uap = to_pmz(port);
 	unsigned char status;
 
-	if (ZS_IS_ASLEEP(uap) || uap->node == NULL)
-		return TIOCSER_TEMT;
-
 	status = pmz_peek_status(to_pmz(port));
 	if (status & Tx_BUF_EMP)
 		return TIOCSER_TEMT;
@@ -570,8 +560,7 @@
 	if (ZS_IS_IRDA(uap))
 		return;
 	/* We get called during boot with a port not up yet */
-	if (ZS_IS_ASLEEP(uap) ||
-	    !(ZS_IS_OPEN(uap) || ZS_IS_CONS(uap)))
+	if (!(ZS_IS_OPEN(uap) || ZS_IS_CONS(uap)))
 		return;
 
 	set_bits = clear_bits = 0;
@@ -590,8 +579,7 @@
 	/* NOTE: Not subject to 'transmitter active' rule.  */ 
 	uap->curregs[R5] |= set_bits;
 	uap->curregs[R5] &= ~clear_bits;
-	if (ZS_IS_ASLEEP(uap))
-		return;
+
 	write_zsreg(uap, R5, uap->curregs[R5]);
 	pmz_debug("pmz_set_mctrl: set bits: %x, clear bits: %x -> %x\n",
 		  set_bits, clear_bits, uap->curregs[R5]);
@@ -609,9 +597,6 @@
 	unsigned char status;
 	unsigned int ret;
 
-	if (ZS_IS_ASLEEP(uap) || uap->node == NULL)
-		return 0;
-
 	status = read_zsreg(uap, R0);
 
 	ret = 0;
@@ -649,9 +634,6 @@
 	uap->flags |= PMACZILOG_FLAG_TX_ACTIVE;
 	uap->flags &= ~PMACZILOG_FLAG_TX_STOPPED;
 
-	if (ZS_IS_ASLEEP(uap) || uap->node == NULL)
-		return;
-
 	status = read_zsreg(uap, R0);
 
 	/* TX busy?  Just wait for the TX done interrupt.  */
@@ -690,9 +672,6 @@
 {
 	struct uart_pmac_port *uap = to_pmz(port);
 
-	if (ZS_IS_ASLEEP(uap) || uap->node == NULL)
-		return;
-
 	pmz_debug("pmz: stop_rx()()\n");
 
 	/* Disable all RX interrupts.  */
@@ -711,14 +690,12 @@
 	struct uart_pmac_port *uap = to_pmz(port);
 	unsigned char new_reg;
 
-	if (ZS_IS_IRDA(uap) || uap->node == NULL)
+	if (ZS_IS_IRDA(uap))
 		return;
 	new_reg = uap->curregs[R15] | (DCDIE | SYNCIE | CTSIE);
 	if (new_reg != uap->curregs[R15]) {
 		uap->curregs[R15] = new_reg;
 
-		if (ZS_IS_ASLEEP(uap))
-			return;
 		/* NOTE: Not subject to 'transmitter active' rule. */
 		write_zsreg(uap, R15, uap->curregs[R15]);
 	}
@@ -734,8 +711,6 @@
 	unsigned char set_bits, clear_bits, new_reg;
 	unsigned long flags;
 
-	if (uap->node == NULL)
-		return;
 	set_bits = clear_bits = 0;
 
 	if (break_state)
@@ -748,12 +723,6 @@
 	new_reg = (uap->curregs[R5] | set_bits) & ~clear_bits;
 	if (new_reg != uap->curregs[R5]) {
 		uap->curregs[R5] = new_reg;
-
-		/* NOTE: Not subject to 'transmitter active' rule. */
-		if (ZS_IS_ASLEEP(uap)) {
-			spin_unlock_irqrestore(&port->lock, flags);
-			return;
-		}
 		write_zsreg(uap, R5, uap->curregs[R5]);
 	}
 
@@ -927,14 +896,21 @@
 
 static void pmz_irda_reset(struct uart_pmac_port *uap)
 {
+	unsigned long flags;
+
+	spin_lock_irqsave(&uap->port.lock, flags);
 	uap->curregs[R5] |= DTR;
 	write_zsreg(uap, R5, uap->curregs[R5]);
 	zssync(uap);
-	mdelay(110);
+	spin_unlock_irqrestore(&uap->port.lock, flags);
+	msleep(110);
+
+	spin_lock_irqsave(&uap->port.lock, flags);
 	uap->curregs[R5] &= ~DTR;
 	write_zsreg(uap, R5, uap->curregs[R5]);
 	zssync(uap);
-	mdelay(10);
+	spin_unlock_irqrestore(&uap->port.lock, flags);
+	msleep(10);
 }
 
 /*
@@ -949,13 +925,6 @@
 
 	pmz_debug("pmz: startup()\n");
 
-	if (ZS_IS_ASLEEP(uap))
-		return -EAGAIN;
-	if (uap->node == NULL)
-		return -ENODEV;
-
-	mutex_lock(&pmz_irq_mutex);
-
 	uap->flags |= PMACZILOG_FLAG_IS_OPEN;
 
 	/* A console is never powered down. Else, power up and
@@ -966,18 +935,14 @@
 		pwr_delay = __pmz_startup(uap);
 		spin_unlock_irqrestore(&port->lock, flags);
 	}	
-
-	pmz_get_port_A(uap)->flags |= PMACZILOG_FLAG_IS_IRQ_ON;
+	sprintf(uap->irq_name, PMACZILOG_NAME"%d", uap->port.line);
 	if (request_irq(uap->port.irq, pmz_interrupt, IRQF_SHARED,
-			"SCC", uap)) {
+			uap->irq_name, uap)) {
 		pmz_error("Unable to register zs interrupt handler.\n");
 		pmz_set_scc_power(uap, 0);
-		mutex_unlock(&pmz_irq_mutex);
 		return -ENXIO;
 	}
 
-	mutex_unlock(&pmz_irq_mutex);
-
 	/* Right now, we deal with delay by blocking here, I'll be
 	 * smarter later on
 	 */
@@ -990,12 +955,9 @@
 	if (ZS_IS_IRDA(uap))
 		pmz_irda_reset(uap);
 
-	/* Enable interrupts emission from the chip */
+	/* Enable interrupt requests for the channel */
 	spin_lock_irqsave(&port->lock, flags);
-	uap->curregs[R1] |= INT_ALL_Rx | TxINT_ENAB;
-	if (!ZS_IS_EXTCLK(uap))
-		uap->curregs[R1] |= EXT_INT_ENAB;
-	write_zsreg(uap, R1, uap->curregs[R1]);
+	pmz_interrupt_control(uap, 1);
 	spin_unlock_irqrestore(&port->lock, flags);
 
 	pmz_debug("pmz: startup() done.\n");
@@ -1010,10 +972,22 @@
 
 	pmz_debug("pmz: shutdown()\n");
 
-	if (uap->node == NULL)
-		return;
+	spin_lock_irqsave(&port->lock, flags);
 
-	mutex_lock(&pmz_irq_mutex);
+	/* Disable interrupt requests for the channel */
+	pmz_interrupt_control(uap, 0);
+
+	if (!ZS_IS_CONS(uap)) {
+		/* Disable receiver and transmitter */
+		uap->curregs[R3] &= ~RxENABLE;
+		uap->curregs[R5] &= ~TxENABLE;
+
+		/* Disable break assertion */
+		uap->curregs[R5] &= ~SND_BRK;
+		pmz_maybe_update_regs(uap);
+	}
+
+	spin_unlock_irqrestore(&port->lock, flags);
 
 	/* Release interrupt handler */
 	free_irq(uap->port.irq, uap);
@@ -1022,37 +996,11 @@
 
 	uap->flags &= ~PMACZILOG_FLAG_IS_OPEN;
 
-	if (!ZS_IS_OPEN(uap->mate))
-		pmz_get_port_A(uap)->flags &= ~PMACZILOG_FLAG_IS_IRQ_ON;
-
-	/* Disable interrupts */
-	if (!ZS_IS_ASLEEP(uap)) {
-		uap->curregs[R1] &= ~(EXT_INT_ENAB | TxINT_ENAB | RxINT_MASK);
-		write_zsreg(uap, R1, uap->curregs[R1]);
-		zssync(uap);
-	}
-
-	if (ZS_IS_CONS(uap) || ZS_IS_ASLEEP(uap)) {
-		spin_unlock_irqrestore(&port->lock, flags);
-		mutex_unlock(&pmz_irq_mutex);
-		return;
-	}
-
-	/* Disable receiver and transmitter.  */
-	uap->curregs[R3] &= ~RxENABLE;
-	uap->curregs[R5] &= ~TxENABLE;
-
-	/* Disable all interrupts and BRK assertion.  */
-	uap->curregs[R5] &= ~SND_BRK;
-	pmz_maybe_update_regs(uap);
-
-	/* Shut the chip down */
-	pmz_set_scc_power(uap, 0);
+	if (!ZS_IS_CONS(uap))
+		pmz_set_scc_power(uap, 0);	/* Shut the chip down */
 
 	spin_unlock_irqrestore(&port->lock, flags);
 
-	mutex_unlock(&pmz_irq_mutex);
-
 	pmz_debug("pmz: shutdown() done.\n");
 }
 
@@ -1300,9 +1248,6 @@
 
 	pmz_debug("pmz: set_termios()\n");
 
-	if (ZS_IS_ASLEEP(uap))
-		return;
-
 	memcpy(&uap->termios_cache, termios, sizeof(struct ktermios));
 
 	/* XXX Check which revs of machines actually allow 1 and 4Mb speeds
@@ -1352,19 +1297,15 @@
 	spin_lock_irqsave(&port->lock, flags);	
 
 	/* Disable IRQs on the port */
-	uap->curregs[R1] &= ~(EXT_INT_ENAB | TxINT_ENAB | RxINT_MASK);
-	write_zsreg(uap, R1, uap->curregs[R1]);
+	pmz_interrupt_control(uap, 0);
 
 	/* Setup new port configuration */
 	__pmz_set_termios(port, termios, old);
 
 	/* Re-enable IRQs on the port */
-	if (ZS_IS_OPEN(uap)) {
-		uap->curregs[R1] |= INT_ALL_Rx | TxINT_ENAB;
-		if (!ZS_IS_EXTCLK(uap))
-			uap->curregs[R1] |= EXT_INT_ENAB;
-		write_zsreg(uap, R1, uap->curregs[R1]);
-	}
+	if (ZS_IS_OPEN(uap))
+		pmz_interrupt_control(uap, 1);
+
 	spin_unlock_irqrestore(&port->lock, flags);
 }
 
@@ -1604,25 +1545,34 @@
  */
 static int pmz_attach(struct macio_dev *mdev, const struct of_device_id *match)
 {
+	struct uart_pmac_port *uap;
 	int i;
 	
 	/* Iterate the pmz_ports array to find a matching entry
 	 */
 	for (i = 0; i < MAX_ZS_PORTS; i++)
-		if (pmz_ports[i].node == mdev->ofdev.dev.of_node) {
-			struct uart_pmac_port *uap = &pmz_ports[i];
+		if (pmz_ports[i].node == mdev->ofdev.dev.of_node)
+			break;
+	if (i >= MAX_ZS_PORTS)
+		return -ENODEV;
 
-			uap->dev = mdev;
-			dev_set_drvdata(&mdev->ofdev.dev, uap);
-			if (macio_request_resources(uap->dev, "pmac_zilog"))
-				printk(KERN_WARNING "%s: Failed to request resource"
-				       ", port still active\n",
-				       uap->node->name);
-			else
-				uap->flags |= PMACZILOG_FLAG_RSRC_REQUESTED;				
-			return 0;
-		}
-	return -ENODEV;
+
+	uap = &pmz_ports[i];
+	uap->dev = mdev;
+	uap->port.dev = &mdev->ofdev.dev;
+	dev_set_drvdata(&mdev->ofdev.dev, uap);
+
+	/* We still activate the port even when failing to request resources
+	 * to work around bugs in ancient Apple device-trees
+	 */
+	if (macio_request_resources(uap->dev, "pmac_zilog"))
+		printk(KERN_WARNING "%s: Failed to request resource"
+		       ", port still active\n",
+		       uap->node->name);
+	else
+		uap->flags |= PMACZILOG_FLAG_RSRC_REQUESTED;
+
+	return uart_add_one_port(&pmz_uart_reg, &uap->port);
 }
 
 /*
@@ -1636,12 +1586,15 @@
 	if (!uap)
 		return -ENODEV;
 
+	uart_remove_one_port(&pmz_uart_reg, &uap->port);
+
 	if (uap->flags & PMACZILOG_FLAG_RSRC_REQUESTED) {
 		macio_release_resources(uap->dev);
 		uap->flags &= ~PMACZILOG_FLAG_RSRC_REQUESTED;
 	}
 	dev_set_drvdata(&mdev->ofdev.dev, NULL);
 	uap->dev = NULL;
+	uap->port.dev = NULL;
 	
 	return 0;
 }
@@ -1650,59 +1603,13 @@
 static int pmz_suspend(struct macio_dev *mdev, pm_message_t pm_state)
 {
 	struct uart_pmac_port *uap = dev_get_drvdata(&mdev->ofdev.dev);
-	struct uart_state *state;
-	unsigned long flags;
 
 	if (uap == NULL) {
 		printk("HRM... pmz_suspend with NULL uap\n");
 		return 0;
 	}
 
-	if (pm_state.event == mdev->ofdev.dev.power.power_state.event)
-		return 0;
-
-	pmz_debug("suspend, switching to state %d\n", pm_state.event);
-
-	state = pmz_uart_reg.state + uap->port.line;
-
-	mutex_lock(&pmz_irq_mutex);
-	mutex_lock(&state->port.mutex);
-
-	spin_lock_irqsave(&uap->port.lock, flags);
-
-	if (ZS_IS_OPEN(uap) || ZS_IS_CONS(uap)) {
-		/* Disable receiver and transmitter.  */
-		uap->curregs[R3] &= ~RxENABLE;
-		uap->curregs[R5] &= ~TxENABLE;
-
-		/* Disable all interrupts and BRK assertion.  */
-		uap->curregs[R1] &= ~(EXT_INT_ENAB | TxINT_ENAB | RxINT_MASK);
-		uap->curregs[R5] &= ~SND_BRK;
-		pmz_load_zsregs(uap, uap->curregs);
-		uap->flags |= PMACZILOG_FLAG_IS_ASLEEP;
-		mb();
-	}
-
-	spin_unlock_irqrestore(&uap->port.lock, flags);
-
-	if (ZS_IS_OPEN(uap) || ZS_IS_OPEN(uap->mate))
-		if (ZS_IS_ASLEEP(uap->mate) && ZS_IS_IRQ_ON(pmz_get_port_A(uap))) {
-			pmz_get_port_A(uap)->flags &= ~PMACZILOG_FLAG_IS_IRQ_ON;
-			disable_irq(uap->port.irq);
-		}
-
-	if (ZS_IS_CONS(uap))
-		uap->port.cons->flags &= ~CON_ENABLED;
-
-	/* Shut the chip down */
-	pmz_set_scc_power(uap, 0);
-
-	mutex_unlock(&state->port.mutex);
-	mutex_unlock(&pmz_irq_mutex);
-
-	pmz_debug("suspend, switching complete\n");
-
-	mdev->ofdev.dev.power.power_state = pm_state;
+	uart_suspend_port(&pmz_uart_reg, &uap->port);
 
 	return 0;
 }
@@ -1711,76 +1618,20 @@
 static int pmz_resume(struct macio_dev *mdev)
 {
 	struct uart_pmac_port *uap = dev_get_drvdata(&mdev->ofdev.dev);
-	struct uart_state *state;
-	unsigned long flags;
-	int pwr_delay = 0;
 
 	if (uap == NULL)
 		return 0;
 
-	if (mdev->ofdev.dev.power.power_state.event == PM_EVENT_ON)
-		return 0;
-	
-	pmz_debug("resume, switching to state 0\n");
-
-	state = pmz_uart_reg.state + uap->port.line;
-
-	mutex_lock(&pmz_irq_mutex);
-	mutex_lock(&state->port.mutex);
-
-	spin_lock_irqsave(&uap->port.lock, flags);
-	if (!ZS_IS_OPEN(uap) && !ZS_IS_CONS(uap)) {
-		spin_unlock_irqrestore(&uap->port.lock, flags);
-		goto bail;
-	}
-	pwr_delay = __pmz_startup(uap);
-
-	/* Take care of config that may have changed while asleep */
-	__pmz_set_termios(&uap->port, &uap->termios_cache, NULL);
-
-	if (ZS_IS_OPEN(uap)) {
-		/* Enable interrupts */		
-		uap->curregs[R1] |= INT_ALL_Rx | TxINT_ENAB;
-		if (!ZS_IS_EXTCLK(uap))
-			uap->curregs[R1] |= EXT_INT_ENAB;
-		write_zsreg(uap, R1, uap->curregs[R1]);
-	}
-
-	spin_unlock_irqrestore(&uap->port.lock, flags);
-
-	if (ZS_IS_CONS(uap))
-		uap->port.cons->flags |= CON_ENABLED;
-
-	/* Re-enable IRQ on the controller */
-	if (ZS_IS_OPEN(uap) && !ZS_IS_IRQ_ON(pmz_get_port_A(uap))) {
-		pmz_get_port_A(uap)->flags |= PMACZILOG_FLAG_IS_IRQ_ON;
-		enable_irq(uap->port.irq);
-	}
-
- bail:
-	mutex_unlock(&state->port.mutex);
-	mutex_unlock(&pmz_irq_mutex);
-
-	/* Right now, we deal with delay by blocking here, I'll be
-	 * smarter later on
-	 */
-	if (pwr_delay != 0) {
-		pmz_debug("pmz: delaying %d ms\n", pwr_delay);
-		msleep(pwr_delay);
-	}
-
-	pmz_debug("resume, switching complete\n");
-
-	mdev->ofdev.dev.power.power_state.event = PM_EVENT_ON;
+	uart_resume_port(&pmz_uart_reg, &uap->port);
 
 	return 0;
 }
 
 /*
  * Probe all ports in the system and build the ports array, we register
- * with the serial layer at this point, the macio-type probing is only
- * used later to "attach" to the sysfs tree so we get power management
- * events
+ * with the serial layer later, so we get a proper struct device which
+ * allows the tty to attach properly. This is later than it used to be
+ * but the tty layer really wants it that way.
  */
 static int __init pmz_probe(void)
 {
@@ -1816,8 +1667,10 @@
 		/*
 		 * Fill basic fields in the port structures
 		 */
-		pmz_ports[count].mate		= &pmz_ports[count+1];
-		pmz_ports[count+1].mate		= &pmz_ports[count];
+		if (node_b != NULL) {
+			pmz_ports[count].mate		= &pmz_ports[count+1];
+			pmz_ports[count+1].mate		= &pmz_ports[count];
+		}
 		pmz_ports[count].flags		= PMACZILOG_FLAG_IS_CHANNEL_A;
 		pmz_ports[count].node		= node_a;
 		pmz_ports[count+1].node		= node_b;
@@ -1855,8 +1708,8 @@
 	struct resource *r_ports;
 	int irq;
 
-	r_ports = platform_get_resource(uap->node, IORESOURCE_MEM, 0);
-	irq = platform_get_irq(uap->node, 0);
+	r_ports = platform_get_resource(uap->pdev, IORESOURCE_MEM, 0);
+	irq = platform_get_irq(uap->pdev, 0);
 	if (!r_ports || !irq)
 		return -ENODEV;
 
@@ -1885,19 +1738,19 @@
 
 	pmz_ports_count = 0;
 
-	pmz_ports[0].mate      = &pmz_ports[1];
 	pmz_ports[0].port.line = 0;
 	pmz_ports[0].flags     = PMACZILOG_FLAG_IS_CHANNEL_A;
-	pmz_ports[0].node      = &scc_a_pdev;
+	pmz_ports[0].pdev      = &scc_a_pdev;
 	err = pmz_init_port(&pmz_ports[0]);
 	if (err)
 		return err;
 	pmz_ports_count++;
 
+	pmz_ports[0].mate      = &pmz_ports[1];
 	pmz_ports[1].mate      = &pmz_ports[0];
 	pmz_ports[1].port.line = 1;
 	pmz_ports[1].flags     = 0;
-	pmz_ports[1].node      = &scc_b_pdev;
+	pmz_ports[1].pdev      = &scc_b_pdev;
 	err = pmz_init_port(&pmz_ports[1]);
 	if (err)
 		return err;
@@ -1913,16 +1766,35 @@
 
 static int __init pmz_attach(struct platform_device *pdev)
 {
+	struct uart_pmac_port *uap;
 	int i;
 
+	/* Iterate the pmz_ports array to find a matching entry */
 	for (i = 0; i < pmz_ports_count; i++)
-		if (pmz_ports[i].node == pdev)
-			return 0;
-	return -ENODEV;
+		if (pmz_ports[i].pdev == pdev)
+			break;
+	if (i >= pmz_ports_count)
+		return -ENODEV;
+
+	uap = &pmz_ports[i];
+	uap->port.dev = &pdev->dev;
+	platform_set_drvdata(pdev, uap);
+
+	return uart_add_one_port(&pmz_uart_reg, &uap->port);
 }
 
 static int __exit pmz_detach(struct platform_device *pdev)
 {
+	struct uart_pmac_port *uap = platform_get_drvdata(pdev);
+
+	if (!uap)
+		return -ENODEV;
+
+	uart_remove_one_port(&pmz_uart_reg, &uap->port);
+
+	platform_set_drvdata(pdev, NULL);
+	uap->port.dev = NULL;
+
 	return 0;
 }
 
@@ -1954,38 +1826,13 @@
  */
 static int __init pmz_register(void)
 {
-	int i, rc;
-	
 	pmz_uart_reg.nr = pmz_ports_count;
 	pmz_uart_reg.cons = PMACZILOG_CONSOLE;
 
 	/*
 	 * Register this driver with the serial core
 	 */
-	rc = uart_register_driver(&pmz_uart_reg);
-	if (rc)
-		return rc;
-
-	/*
-	 * Register each port with the serial core
-	 */
-	for (i = 0; i < pmz_ports_count; i++) {
-		struct uart_pmac_port *uport = &pmz_ports[i];
-		/* NULL node may happen on wallstreet */
-		if (uport->node != NULL)
-			rc = uart_add_one_port(&pmz_uart_reg, &uport->port);
-		if (rc)
-			goto err_out;
-	}
-
-	return 0;
-err_out:
-	while (i-- > 0) {
-		struct uart_pmac_port *uport = &pmz_ports[i];
-		uart_remove_one_port(&pmz_uart_reg, &uport->port);
-	}
-	uart_unregister_driver(&pmz_uart_reg);
-	return rc;
+	return uart_register_driver(&pmz_uart_reg);
 }
 
 #ifdef CONFIG_PPC_PMAC
@@ -2084,10 +1931,13 @@
 
 	for (i = 0; i < pmz_ports_count; i++) {
 		struct uart_pmac_port *uport = &pmz_ports[i];
-		if (uport->node != NULL) {
-			uart_remove_one_port(&pmz_uart_reg, &uport->port);
+#ifdef CONFIG_PPC_PMAC
+		if (uport->node != NULL)
 			pmz_dispose_port(uport);
-		}
+#else
+		if (uport->pdev != NULL)
+			pmz_dispose_port(uport);
+#endif
 	}
 	/* Unregister UART driver */
 	uart_unregister_driver(&pmz_uart_reg);
@@ -2114,8 +1964,6 @@
 	struct uart_pmac_port *uap = &pmz_ports[con->index];
 	unsigned long flags;
 
-	if (ZS_IS_ASLEEP(uap))
-		return;
 	spin_lock_irqsave(&uap->port.lock, flags);
 
 	/* Turn of interrupts and enable the transmitter. */
@@ -2160,8 +2008,13 @@
 	if (co->index >= pmz_ports_count)
 		co->index = 0;
 	uap = &pmz_ports[co->index];
+#ifdef CONFIG_PPC_PMAC
 	if (uap->node == NULL)
 		return -ENODEV;
+#else
+	if (uap->pdev == NULL)
+		return -ENODEV;
+#endif
 	port = &uap->port;
 
 	/*
diff --git a/drivers/tty/serial/pmac_zilog.h b/drivers/tty/serial/pmac_zilog.h
index cbc34fb..3483242 100644
--- a/drivers/tty/serial/pmac_zilog.h
+++ b/drivers/tty/serial/pmac_zilog.h
@@ -1,16 +1,6 @@
 #ifndef __PMAC_ZILOG_H__
 #define __PMAC_ZILOG_H__
 
-#ifdef CONFIG_PPC_PMAC
-#define pmz_debug(fmt, arg...)	dev_dbg(&uap->dev->ofdev.dev, fmt, ## arg)
-#define pmz_error(fmt, arg...)	dev_err(&uap->dev->ofdev.dev, fmt, ## arg)
-#define pmz_info(fmt, arg...)	dev_info(&uap->dev->ofdev.dev, fmt, ## arg)
-#else
-#define pmz_debug(fmt, arg...)	dev_dbg(&uap->node->dev, fmt, ## arg)
-#define pmz_error(fmt, arg...)	dev_err(&uap->node->dev, fmt, ## arg)
-#define pmz_info(fmt, arg...)	dev_info(&uap->node->dev, fmt, ## arg)
-#endif
-
 /*
  * At most 2 ESCCs with 2 ports each
  */
@@ -35,7 +25,7 @@
 	 */
 	struct device_node		*node;
 #else
-	struct platform_device		*node;
+	struct platform_device		*pdev;
 #endif
 
 	/* Port type as obtained from device tree (IRDA, modem, ...) */
@@ -50,14 +40,11 @@
 #define PMACZILOG_FLAG_REGS_HELD	0x00000010
 #define PMACZILOG_FLAG_TX_STOPPED	0x00000020
 #define PMACZILOG_FLAG_TX_ACTIVE	0x00000040
-#define PMACZILOG_FLAG_ENABLED          0x00000080
 #define PMACZILOG_FLAG_IS_IRDA		0x00000100
 #define PMACZILOG_FLAG_IS_INTMODEM	0x00000200
 #define PMACZILOG_FLAG_HAS_DMA		0x00000400
 #define PMACZILOG_FLAG_RSRC_REQUESTED	0x00000800
-#define PMACZILOG_FLAG_IS_ASLEEP	0x00001000
 #define PMACZILOG_FLAG_IS_OPEN		0x00002000
-#define PMACZILOG_FLAG_IS_IRQ_ON	0x00004000
 #define PMACZILOG_FLAG_IS_EXTCLK	0x00008000
 #define PMACZILOG_FLAG_BREAK		0x00010000
 
@@ -74,6 +61,8 @@
 	volatile struct dbdma_regs	__iomem *rx_dma_regs;
 #endif
 
+	unsigned char			irq_name[8];
+
 	struct ktermios			termios_cache;
 };
 
@@ -388,9 +377,7 @@
 #define ZS_IS_IRDA(UP)			((UP)->flags & PMACZILOG_FLAG_IS_IRDA)
 #define ZS_IS_INTMODEM(UP)		((UP)->flags & PMACZILOG_FLAG_IS_INTMODEM)
 #define ZS_HAS_DMA(UP)			((UP)->flags & PMACZILOG_FLAG_HAS_DMA)
-#define ZS_IS_ASLEEP(UP)		((UP)->flags & PMACZILOG_FLAG_IS_ASLEEP)
 #define ZS_IS_OPEN(UP)			((UP)->flags & PMACZILOG_FLAG_IS_OPEN)
-#define ZS_IS_IRQ_ON(UP)		((UP)->flags & PMACZILOG_FLAG_IS_IRQ_ON)
 #define ZS_IS_EXTCLK(UP)		((UP)->flags & PMACZILOG_FLAG_IS_EXTCLK)
 
 #endif /* __PMAC_ZILOG_H__ */
diff --git a/drivers/tty/serial/sc26xx.c b/drivers/tty/serial/sc26xx.c
index 75038ad..e0b4b0a 100644
--- a/drivers/tty/serial/sc26xx.c
+++ b/drivers/tty/serial/sc26xx.c
@@ -736,19 +736,7 @@
 	},
 };
 
-static int __init sc26xx_init(void)
-{
-	return platform_driver_register(&sc26xx_driver);
-}
-
-static void __exit sc26xx_exit(void)
-{
-	platform_driver_unregister(&sc26xx_driver);
-}
-
-module_init(sc26xx_init);
-module_exit(sc26xx_exit);
-
+module_platform_driver(sc26xx_driver);
 
 MODULE_AUTHOR("Thomas Bogendörfer");
 MODULE_DESCRIPTION("SC681/SC2692 serial driver");
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 0406d7f..c7bf31a 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -22,6 +22,7 @@
  */
 #include <linux/module.h>
 #include <linux/tty.h>
+#include <linux/tty_flip.h>
 #include <linux/slab.h>
 #include <linux/init.h>
 #include <linux/console.h>
@@ -60,6 +61,8 @@
 static void uart_wait_until_sent(struct tty_struct *tty, int timeout);
 static void uart_change_pm(struct uart_state *state, int pm_state);
 
+static void uart_port_shutdown(struct tty_port *port);
+
 /*
  * This routine is used by the interrupt handler to schedule processing in
  * the software interrupt portion of the driver.
@@ -128,25 +131,16 @@
  * Startup the port.  This will be called once per open.  All calls
  * will be serialised by the per-port mutex.
  */
-static int uart_startup(struct tty_struct *tty, struct uart_state *state, int init_hw)
+static int uart_port_startup(struct tty_struct *tty, struct uart_state *state,
+		int init_hw)
 {
 	struct uart_port *uport = state->uart_port;
 	struct tty_port *port = &state->port;
 	unsigned long page;
 	int retval = 0;
 
-	if (port->flags & ASYNC_INITIALIZED)
-		return 0;
-
-	/*
-	 * Set the TTY IO error marker - we will only clear this
-	 * once we have successfully opened the port.  Also set
-	 * up the tty->alt_speed kludge
-	 */
-	set_bit(TTY_IO_ERROR, &tty->flags);
-
 	if (uport->type == PORT_UNKNOWN)
-		return 0;
+		return 1;
 
 	/*
 	 * Initialise and allocate the transmit and temporary
@@ -188,10 +182,6 @@
 				tty->hw_stopped = 1;
 			spin_unlock_irq(&uport->lock);
 		}
-
-		set_bit(ASYNCB_INITIALIZED, &port->flags);
-
-		clear_bit(TTY_IO_ERROR, &tty->flags);
 	}
 
 	/*
@@ -200,6 +190,31 @@
 	 * now.
 	 */
 	if (retval && capable(CAP_SYS_ADMIN))
+		return 1;
+
+	return retval;
+}
+
+static int uart_startup(struct tty_struct *tty, struct uart_state *state,
+		int init_hw)
+{
+	struct tty_port *port = &state->port;
+	int retval;
+
+	if (port->flags & ASYNC_INITIALIZED)
+		return 0;
+
+	/*
+	 * Set the TTY IO error marker - we will only clear this
+	 * once we have successfully opened the port.
+	 */
+	set_bit(TTY_IO_ERROR, &tty->flags);
+
+	retval = uart_port_startup(tty, state, init_hw);
+	if (!retval) {
+		set_bit(ASYNCB_INITIALIZED, &port->flags);
+		clear_bit(TTY_IO_ERROR, &tty->flags);
+	} else if (retval > 0)
 		retval = 0;
 
 	return retval;
@@ -228,24 +243,7 @@
 		if (!tty || (tty->termios->c_cflag & HUPCL))
 			uart_clear_mctrl(uport, TIOCM_DTR | TIOCM_RTS);
 
-		/*
-		 * clear delta_msr_wait queue to avoid mem leaks: we may free
-		 * the irq here so the queue might never be woken up.  Note
-		 * that we won't end up waiting on delta_msr_wait again since
-		 * any outstanding file descriptors should be pointing at
-		 * hung_up_tty_fops now.
-		 */
-		wake_up_interruptible(&port->delta_msr_wait);
-
-		/*
-		 * Free the IRQ and disable the port.
-		 */
-		uport->ops->shutdown(uport);
-
-		/*
-		 * Ensure that the IRQ handler isn't running on another CPU.
-		 */
-		synchronize_irq(uport->irq);
+		uart_port_shutdown(port);
 	}
 
 	/*
@@ -423,7 +421,7 @@
 	if (baud == 38400 && (port->flags & UPF_SPD_MASK) == UPF_SPD_CUST)
 		quot = port->custom_divisor;
 	else
-		quot = (port->uartclk + (8 * baud)) / (16 * baud);
+		quot = DIV_ROUND_CLOSEST(port->uartclk, 16 * baud);
 
 	return quot;
 }
@@ -658,10 +656,10 @@
 	tmp.flags	    = uport->flags;
 	tmp.xmit_fifo_size  = uport->fifosize;
 	tmp.baud_base	    = uport->uartclk / 16;
-	tmp.close_delay	    = port->close_delay / 10;
+	tmp.close_delay	    = jiffies_to_msecs(port->close_delay) / 10;
 	tmp.closing_wait    = port->closing_wait == ASYNC_CLOSING_WAIT_NONE ?
 				ASYNC_CLOSING_WAIT_NONE :
-				port->closing_wait / 10;
+				jiffies_to_msecs(port->closing_wait) / 10;
 	tmp.custom_divisor  = uport->custom_divisor;
 	tmp.hub6	    = uport->hub6;
 	tmp.io_type         = uport->iotype;
@@ -695,9 +693,10 @@
 		new_port += (unsigned long) new_serial.port_high << HIGH_BITS_OFFSET;
 
 	new_serial.irq = irq_canonicalize(new_serial.irq);
-	close_delay = new_serial.close_delay * 10;
+	close_delay = msecs_to_jiffies(new_serial.close_delay * 10);
 	closing_wait = new_serial.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
-			ASYNC_CLOSING_WAIT_NONE : new_serial.closing_wait * 10;
+			ASYNC_CLOSING_WAIT_NONE :
+			msecs_to_jiffies(new_serial.closing_wait * 10);
 
 	/*
 	 * This semaphore protects port->count.  It is also
@@ -1265,47 +1264,8 @@
 
 	pr_debug("uart_close(%d) called\n", uport->line);
 
-	spin_lock_irqsave(&port->lock, flags);
-
-	if (tty_hung_up_p(filp)) {
-		spin_unlock_irqrestore(&port->lock, flags);
+	if (tty_port_close_start(port, tty, filp) == 0)
 		return;
-	}
-
-	if ((tty->count == 1) && (port->count != 1)) {
-		/*
-		 * Uh, oh.  tty->count is 1, which means that the tty
-		 * structure will be freed.  port->count should always
-		 * be one in these conditions.  If it's greater than
-		 * one, we've got real problems, since it means the
-		 * serial port won't be shutdown.
-		 */
-		printk(KERN_ERR "uart_close: bad serial port count; tty->count is 1, "
-		       "port->count is %d\n", port->count);
-		port->count = 1;
-	}
-	if (--port->count < 0) {
-		printk(KERN_ERR "uart_close: bad serial port count for %s: %d\n",
-		       tty->name, port->count);
-		port->count = 0;
-	}
-	if (port->count) {
-		spin_unlock_irqrestore(&port->lock, flags);
-		return;
-	}
-
-	/*
-	 * Now we wait for the transmit buffer to clear; and we notify
-	 * the line discipline to only process XON/XOFF characters by
-	 * setting tty->closing.
-	 */
-	set_bit(ASYNCB_CLOSING, &port->flags);
-	tty->closing = 1;
-	spin_unlock_irqrestore(&port->lock, flags);
-
-	if (port->closing_wait != ASYNC_CLOSING_WAIT_NONE)
-		tty_wait_until_sent_from_close(tty,
-				msecs_to_jiffies(port->closing_wait));
 
 	/*
 	 * At this point, we stop accepting input.  To do this, we
@@ -1337,7 +1297,8 @@
 	if (port->blocked_open) {
 		spin_unlock_irqrestore(&port->lock, flags);
 		if (port->close_delay)
-			msleep_interruptible(port->close_delay);
+			msleep_interruptible(
+					jiffies_to_msecs(port->close_delay));
 		spin_lock_irqsave(&port->lock, flags);
 	} else if (!uart_console(uport)) {
 		spin_unlock_irqrestore(&port->lock, flags);
@@ -1441,6 +1402,36 @@
 	mutex_unlock(&port->mutex);
 }
 
+static int uart_port_activate(struct tty_port *port, struct tty_struct *tty)
+{
+	return 0;
+}
+
+static void uart_port_shutdown(struct tty_port *port)
+{
+	struct uart_state *state = container_of(port, struct uart_state, port);
+	struct uart_port *uport = state->uart_port;
+
+	/*
+	 * clear delta_msr_wait queue to avoid mem leaks: we may free
+	 * the irq here so the queue might never be woken up.  Note
+	 * that we won't end up waiting on delta_msr_wait again since
+	 * any outstanding file descriptors should be pointing at
+	 * hung_up_tty_fops now.
+	 */
+	wake_up_interruptible(&port->delta_msr_wait);
+
+	/*
+	 * Free the IRQ and disable the port.
+	 */
+	uport->ops->shutdown(uport);
+
+	/*
+	 * Ensure that the IRQ handler isn't running on another CPU.
+	 */
+	synchronize_irq(uport->irq);
+}
+
 static int uart_carrier_raised(struct tty_port *port)
 {
 	struct uart_state *state = container_of(port, struct uart_state, port);
@@ -1466,33 +1457,6 @@
 		uart_clear_mctrl(uport, TIOCM_DTR | TIOCM_RTS);
 }
 
-static struct uart_state *uart_get(struct uart_driver *drv, int line)
-{
-	struct uart_state *state;
-	struct tty_port *port;
-	int ret = 0;
-
-	state = drv->state + line;
-	port = &state->port;
-	if (mutex_lock_interruptible(&port->mutex)) {
-		ret = -ERESTARTSYS;
-		goto err;
-	}
-
-	port->count++;
-	if (!state->uart_port || state->uart_port->flags & UPF_DEAD) {
-		ret = -ENXIO;
-		goto err_unlock;
-	}
-	return state;
-
- err_unlock:
-	port->count--;
-	mutex_unlock(&port->mutex);
- err:
-	return ERR_PTR(ret);
-}
-
 /*
  * calls to uart_open are serialised by the BKL in
  *   fs/char_dev.c:chrdev_open()
@@ -1506,26 +1470,29 @@
 static int uart_open(struct tty_struct *tty, struct file *filp)
 {
 	struct uart_driver *drv = (struct uart_driver *)tty->driver->driver_state;
-	struct uart_state *state;
-	struct tty_port *port;
 	int retval, line = tty->index;
+	struct uart_state *state = drv->state + line;
+	struct tty_port *port = &state->port;
 
 	pr_debug("uart_open(%d) called\n", line);
 
 	/*
-	 * We take the semaphore inside uart_get to guarantee that we won't
-	 * be re-entered while allocating the state structure, or while we
-	 * request any IRQs that the driver may need.  This also has the nice
-	 * side-effect that it delays the action of uart_hangup, so we can
-	 * guarantee that state->port.tty will always contain something
-	 * reasonable.
+	 * We take the semaphore here to guarantee that we won't be re-entered
+	 * while allocating the state structure, or while we request any IRQs
+	 * that the driver may need.  This also has the nice side-effect that
+	 * it delays the action of uart_hangup, so we can guarantee that
+	 * state->port.tty will always contain something reasonable.
 	 */
-	state = uart_get(drv, line);
-	if (IS_ERR(state)) {
-		retval = PTR_ERR(state);
-		goto fail;
+	if (mutex_lock_interruptible(&port->mutex)) {
+		retval = -ERESTARTSYS;
+		goto end;
 	}
-	port = &state->port;
+
+	port->count++;
+	if (!state->uart_port || state->uart_port->flags & UPF_DEAD) {
+		retval = -ENXIO;
+		goto err_dec_count;
+	}
 
 	/*
 	 * Once we set tty->driver_data here, we are guaranteed that
@@ -1535,7 +1502,6 @@
 	tty->driver_data = state;
 	state->uart_port->state = state;
 	tty->low_latency = (state->uart_port->flags & UPF_LOW_LATENCY) ? 1 : 0;
-	tty->alt_speed = 0;
 	tty_port_tty_set(port, tty);
 
 	/*
@@ -1543,9 +1509,7 @@
 	 */
 	if (tty_hung_up_p(filp)) {
 		retval = -EAGAIN;
-		port->count--;
-		mutex_unlock(&port->mutex);
-		goto fail;
+		goto err_dec_count;
 	}
 
 	/*
@@ -1566,8 +1530,12 @@
 	if (retval == 0)
 		retval = tty_port_block_til_ready(port, tty, filp);
 
-fail:
+end:
 	return retval;
+err_dec_count:
+	port->count--;
+	mutex_unlock(&port->mutex);
+	goto end;
 }
 
 static const char *uart_type(struct uart_port *port)
@@ -1858,6 +1826,14 @@
 EXPORT_SYMBOL_GPL(uart_set_options);
 #endif /* CONFIG_SERIAL_CORE_CONSOLE */
 
+/**
+ * uart_change_pm - set power state of the port
+ *
+ * @state: port descriptor
+ * @pm_state: new state
+ *
+ * Locking: port->mutex has to be held
+ */
 static void uart_change_pm(struct uart_state *state, int pm_state)
 {
 	struct uart_port *port = state->uart_port;
@@ -2214,6 +2190,8 @@
 };
 
 static const struct tty_port_operations uart_port_ops = {
+	.activate	= uart_port_activate,
+	.shutdown	= uart_port_shutdown,
 	.carrier_raised = uart_carrier_raised,
 	.dtr_rts	= uart_dtr_rts,
 };
@@ -2275,8 +2253,8 @@
 
 		tty_port_init(port);
 		port->ops = &uart_port_ops;
-		port->close_delay     = 500;	/* .5 seconds */
-		port->closing_wait    = 30000;	/* 30 seconds */
+		port->close_delay     = HZ / 2;	/* .5 seconds */
+		port->closing_wait    = 30 * HZ;/* 30 seconds */
 	}
 
 	retval = tty_register_driver(normal);
@@ -2467,6 +2445,99 @@
 }
 EXPORT_SYMBOL(uart_match_port);
 
+/**
+ *	uart_handle_dcd_change - handle a change of carrier detect state
+ *	@uport: uart_port structure for the open port
+ *	@status: new carrier detect status, nonzero if active
+ */
+void uart_handle_dcd_change(struct uart_port *uport, unsigned int status)
+{
+	struct uart_state *state = uport->state;
+	struct tty_port *port = &state->port;
+	struct tty_ldisc *ld = tty_ldisc_ref(port->tty);
+	struct pps_event_time ts;
+
+	if (ld && ld->ops->dcd_change)
+		pps_get_ts(&ts);
+
+	uport->icount.dcd++;
+#ifdef CONFIG_HARD_PPS
+	if ((uport->flags & UPF_HARDPPS_CD) && status)
+		hardpps();
+#endif
+
+	if (port->flags & ASYNC_CHECK_CD) {
+		if (status)
+			wake_up_interruptible(&port->open_wait);
+		else if (port->tty)
+			tty_hangup(port->tty);
+	}
+
+	if (ld && ld->ops->dcd_change)
+		ld->ops->dcd_change(port->tty, status, &ts);
+	if (ld)
+		tty_ldisc_deref(ld);
+}
+EXPORT_SYMBOL_GPL(uart_handle_dcd_change);
+
+/**
+ *	uart_handle_cts_change - handle a change of clear-to-send state
+ *	@uport: uart_port structure for the open port
+ *	@status: new clear to send status, nonzero if active
+ */
+void uart_handle_cts_change(struct uart_port *uport, unsigned int status)
+{
+	struct tty_port *port = &uport->state->port;
+	struct tty_struct *tty = port->tty;
+
+	uport->icount.cts++;
+
+	if (port->flags & ASYNC_CTS_FLOW) {
+		if (tty->hw_stopped) {
+			if (status) {
+				tty->hw_stopped = 0;
+				uport->ops->start_tx(uport);
+				uart_write_wakeup(uport);
+			}
+		} else {
+			if (!status) {
+				tty->hw_stopped = 1;
+				uport->ops->stop_tx(uport);
+			}
+		}
+	}
+}
+EXPORT_SYMBOL_GPL(uart_handle_cts_change);
+
+/**
+ * uart_insert_char - push a char to the uart layer
+ *
+ * User is responsible to call tty_flip_buffer_push when they are done with
+ * insertion.
+ *
+ * @port: corresponding port
+ * @status: state of the serial port RX buffer (LSR for 8250)
+ * @overrun: mask of overrun bits in @status
+ * @ch: character to push
+ * @flag: flag for the character (see TTY_NORMAL and friends)
+ */
+void uart_insert_char(struct uart_port *port, unsigned int status,
+		 unsigned int overrun, unsigned int ch, unsigned int flag)
+{
+	struct tty_struct *tty = port->state->port.tty;
+
+	if ((status & port->ignore_status_mask & ~overrun) == 0)
+		tty_insert_flip_char(tty, ch, flag);
+
+	/*
+	 * Overrun is special.  Since it's reported immediately,
+	 * it doesn't affect the current character.
+	 */
+	if (status & ~port->ignore_status_mask & overrun)
+		tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+}
+EXPORT_SYMBOL_GPL(uart_insert_char);
+
 EXPORT_SYMBOL(uart_write_wakeup);
 EXPORT_SYMBOL(uart_register_driver);
 EXPORT_SYMBOL(uart_unregister_driver);
diff --git a/drivers/tty/serial/serial_cs.c b/drivers/tty/serial/serial_cs.c
index eef736f..8609060 100644
--- a/drivers/tty/serial/serial_cs.c
+++ b/drivers/tty/serial/serial_cs.c
@@ -317,7 +317,7 @@
 	info->p_dev = link;
 	link->priv = info;
 
-	link->config_flags |= CONF_ENABLE_IRQ;
+	link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO;
 	if (do_sound)
 		link->config_flags |= CONF_ENABLE_SPKR;
 
@@ -445,7 +445,7 @@
 
 	/* First pass: look for a config entry that looks normal.
 	 * Two tries: without IO aliases, then with aliases */
-	link->config_flags |= CONF_AUTO_SET_VPP | CONF_AUTO_SET_IO;
+	link->config_flags |= CONF_AUTO_SET_VPP;
 	for (try = 0; try < 4; try++)
 		if (!pcmcia_loop_config(link, simple_config_check, &try))
 			goto found_port;
@@ -501,7 +501,8 @@
 {
 	int *base2 = priv_data;
 
-	if (!p_dev->resource[0]->end || !p_dev->resource[1]->end)
+	if (!p_dev->resource[0]->end || !p_dev->resource[1]->end ||
+		p_dev->resource[0]->start + 8 != p_dev->resource[1]->start)
 		return -ENODEV;
 
 	p_dev->resource[0]->end = p_dev->resource[1]->end = 8;
@@ -520,7 +521,6 @@
 	struct serial_info *info = link->priv;
 	int i, base2 = 0;
 
-	link->config_flags |= CONF_AUTO_SET_IO;
 	/* First, look for a generic full-sized window */
 	if (!pcmcia_loop_config(link, multi_config_check, &info->multi))
 		base2 = link->resource[0]->start + 8;
diff --git a/drivers/tty/serial/sirfsoc_uart.c b/drivers/tty/serial/sirfsoc_uart.c
new file mode 100644
index 0000000..a60523f
--- /dev/null
+++ b/drivers/tty/serial/sirfsoc_uart.c
@@ -0,0 +1,783 @@
+/*
+ * Driver for CSR SiRFprimaII onboard UARTs.
+ *
+ * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
+ *
+ * Licensed under GPLv2 or later.
+ */
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/sysrq.h>
+#include <linux/console.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial_core.h>
+#include <linux/serial.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <asm/irq.h>
+#include <asm/mach/irq.h>
+#include <linux/pinctrl/pinmux.h>
+
+#include "sirfsoc_uart.h"
+
+static unsigned int
+sirfsoc_uart_pio_tx_chars(struct sirfsoc_uart_port *sirfport, int count);
+static unsigned int
+sirfsoc_uart_pio_rx_chars(struct uart_port *port, unsigned int max_rx_count);
+static struct uart_driver sirfsoc_uart_drv;
+
+static const struct sirfsoc_baudrate_to_regv baudrate_to_regv[] = {
+	{4000000, 2359296},
+	{3500000, 1310721},
+	{3000000, 1572865},
+	{2500000, 1245186},
+	{2000000, 1572866},
+	{1500000, 1245188},
+	{1152000, 1638404},
+	{1000000, 1572869},
+	{921600, 1114120},
+	{576000, 1245196},
+	{500000, 1245198},
+	{460800, 1572876},
+	{230400, 1310750},
+	{115200, 1310781},
+	{57600, 1310843},
+	{38400, 1114328},
+	{19200, 1114545},
+	{9600, 1114979},
+};
+
+static struct sirfsoc_uart_port sirfsoc_uart_ports[SIRFSOC_UART_NR] = {
+	[0] = {
+		.port = {
+			.iotype		= UPIO_MEM,
+			.flags		= UPF_BOOT_AUTOCONF,
+			.line		= 0,
+		},
+	},
+	[1] = {
+		.port = {
+			.iotype		= UPIO_MEM,
+			.flags		= UPF_BOOT_AUTOCONF,
+			.line		= 1,
+		},
+	},
+	[2] = {
+		.port = {
+			.iotype		= UPIO_MEM,
+			.flags		= UPF_BOOT_AUTOCONF,
+			.line		= 2,
+		},
+	},
+};
+
+static inline struct sirfsoc_uart_port *to_sirfport(struct uart_port *port)
+{
+	return container_of(port, struct sirfsoc_uart_port, port);
+}
+
+static inline unsigned int sirfsoc_uart_tx_empty(struct uart_port *port)
+{
+	unsigned long reg;
+	reg = rd_regl(port, SIRFUART_TX_FIFO_STATUS);
+	if (reg & SIRFUART_FIFOEMPTY_MASK(port))
+		return TIOCSER_TEMT;
+	else
+		return 0;
+}
+
+static unsigned int sirfsoc_uart_get_mctrl(struct uart_port *port)
+{
+	struct sirfsoc_uart_port *sirfport = to_sirfport(port);
+	if (!(sirfport->ms_enabled)) {
+		goto cts_asserted;
+	} else if (sirfport->hw_flow_ctrl) {
+		if (!(rd_regl(port, SIRFUART_AFC_CTRL) &
+						SIRFUART_CTS_IN_STATUS))
+			goto cts_asserted;
+		else
+			goto cts_deasserted;
+	}
+cts_deasserted:
+	return TIOCM_CAR | TIOCM_DSR;
+cts_asserted:
+	return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
+}
+
+static void sirfsoc_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+	struct sirfsoc_uart_port *sirfport = to_sirfport(port);
+	unsigned int assert = mctrl & TIOCM_RTS;
+	unsigned int val = assert ? SIRFUART_AFC_CTRL_RX_THD : 0x0;
+	unsigned int current_val;
+	if (sirfport->hw_flow_ctrl) {
+		current_val = rd_regl(port, SIRFUART_AFC_CTRL) & ~0xFF;
+		val |= current_val;
+		wr_regl(port, SIRFUART_AFC_CTRL, val);
+	}
+}
+
+static void sirfsoc_uart_stop_tx(struct uart_port *port)
+{
+	unsigned int regv;
+	regv = rd_regl(port, SIRFUART_INT_EN);
+	wr_regl(port, SIRFUART_INT_EN, regv & ~SIRFUART_TX_INT_EN);
+}
+
+void sirfsoc_uart_start_tx(struct uart_port *port)
+{
+	struct sirfsoc_uart_port *sirfport = to_sirfport(port);
+	unsigned long regv;
+	sirfsoc_uart_pio_tx_chars(sirfport, 1);
+	wr_regl(port, SIRFUART_TX_FIFO_OP, SIRFUART_TX_FIFO_START);
+	regv = rd_regl(port, SIRFUART_INT_EN);
+	wr_regl(port, SIRFUART_INT_EN, regv | SIRFUART_TX_INT_EN);
+}
+
+static void sirfsoc_uart_stop_rx(struct uart_port *port)
+{
+	unsigned long regv;
+	wr_regl(port, SIRFUART_RX_FIFO_OP, 0);
+	regv = rd_regl(port, SIRFUART_INT_EN);
+	wr_regl(port, SIRFUART_INT_EN, regv & ~SIRFUART_RX_IO_INT_EN);
+}
+
+static void sirfsoc_uart_disable_ms(struct uart_port *port)
+{
+	struct sirfsoc_uart_port *sirfport = to_sirfport(port);
+	unsigned long reg;
+	sirfport->ms_enabled = 0;
+	if (!sirfport->hw_flow_ctrl)
+		return;
+	reg = rd_regl(port, SIRFUART_AFC_CTRL);
+	wr_regl(port, SIRFUART_AFC_CTRL, reg & ~0x3FF);
+	reg = rd_regl(port, SIRFUART_INT_EN);
+	wr_regl(port, SIRFUART_INT_EN, reg & ~SIRFUART_CTS_INT_EN);
+}
+
+static void sirfsoc_uart_enable_ms(struct uart_port *port)
+{
+	struct sirfsoc_uart_port *sirfport = to_sirfport(port);
+	unsigned long reg;
+	unsigned long flg;
+	if (!sirfport->hw_flow_ctrl)
+		return;
+	flg = SIRFUART_AFC_RX_EN | SIRFUART_AFC_TX_EN;
+	reg = rd_regl(port, SIRFUART_AFC_CTRL);
+	wr_regl(port, SIRFUART_AFC_CTRL, reg | flg);
+	reg = rd_regl(port, SIRFUART_INT_EN);
+	wr_regl(port, SIRFUART_INT_EN, reg | SIRFUART_CTS_INT_EN);
+	uart_handle_cts_change(port,
+		!(rd_regl(port, SIRFUART_AFC_CTRL) & SIRFUART_CTS_IN_STATUS));
+	sirfport->ms_enabled = 1;
+}
+
+static void sirfsoc_uart_break_ctl(struct uart_port *port, int break_state)
+{
+	unsigned long ulcon = rd_regl(port, SIRFUART_LINE_CTRL);
+	if (break_state)
+		ulcon |= SIRFUART_SET_BREAK;
+	else
+		ulcon &= ~SIRFUART_SET_BREAK;
+	wr_regl(port, SIRFUART_LINE_CTRL, ulcon);
+}
+
+static unsigned int
+sirfsoc_uart_pio_rx_chars(struct uart_port *port, unsigned int max_rx_count)
+{
+	unsigned int ch, rx_count = 0;
+	struct tty_struct *tty;
+
+	tty = tty_port_tty_get(&port->state->port);
+	if (!tty)
+		return -ENODEV;
+
+	while (!(rd_regl(port, SIRFUART_RX_FIFO_STATUS) &
+					SIRFUART_FIFOEMPTY_MASK(port))) {
+		ch = rd_regl(port, SIRFUART_RX_FIFO_DATA) | SIRFUART_DUMMY_READ;
+		if (unlikely(uart_handle_sysrq_char(port, ch)))
+			continue;
+		uart_insert_char(port, 0, 0, ch, TTY_NORMAL);
+		rx_count++;
+		if (rx_count >= max_rx_count)
+			break;
+	}
+
+	port->icount.rx += rx_count;
+	tty_flip_buffer_push(tty);
+	tty_kref_put(tty);
+
+	return rx_count;
+}
+
+static unsigned int
+sirfsoc_uart_pio_tx_chars(struct sirfsoc_uart_port *sirfport, int count)
+{
+	struct uart_port *port = &sirfport->port;
+	struct circ_buf *xmit = &port->state->xmit;
+	unsigned int num_tx = 0;
+	while (!uart_circ_empty(xmit) &&
+		!(rd_regl(port, SIRFUART_TX_FIFO_STATUS) &
+					SIRFUART_FIFOFULL_MASK(port)) &&
+		count--) {
+		wr_regl(port, SIRFUART_TX_FIFO_DATA, xmit->buf[xmit->tail]);
+		xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+		port->icount.tx++;
+		num_tx++;
+	}
+	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+		uart_write_wakeup(port);
+	return num_tx;
+}
+
+static irqreturn_t sirfsoc_uart_isr(int irq, void *dev_id)
+{
+	unsigned long intr_status;
+	unsigned long cts_status;
+	unsigned long flag = TTY_NORMAL;
+	struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)dev_id;
+	struct uart_port *port = &sirfport->port;
+	struct uart_state *state = port->state;
+	struct circ_buf *xmit = &port->state->xmit;
+	intr_status = rd_regl(port, SIRFUART_INT_STATUS);
+	wr_regl(port, SIRFUART_INT_STATUS, intr_status);
+	intr_status &= rd_regl(port, SIRFUART_INT_EN);
+	if (unlikely(intr_status & (SIRFUART_ERR_INT_STAT))) {
+		if (intr_status & SIRFUART_RXD_BREAK) {
+			if (uart_handle_break(port))
+				goto recv_char;
+			uart_insert_char(port, intr_status,
+					SIRFUART_RX_OFLOW, 0, TTY_BREAK);
+			return IRQ_HANDLED;
+		}
+		if (intr_status & SIRFUART_RX_OFLOW)
+			port->icount.overrun++;
+		if (intr_status & SIRFUART_FRM_ERR) {
+			port->icount.frame++;
+			flag = TTY_FRAME;
+		}
+		if (intr_status & SIRFUART_PARITY_ERR)
+			flag = TTY_PARITY;
+		wr_regl(port, SIRFUART_RX_FIFO_OP, SIRFUART_RX_FIFO_RESET);
+		wr_regl(port, SIRFUART_RX_FIFO_OP, 0);
+		wr_regl(port, SIRFUART_RX_FIFO_OP, SIRFUART_RX_FIFO_START);
+		intr_status &= port->read_status_mask;
+		uart_insert_char(port, intr_status,
+					SIRFUART_RX_OFLOW_INT, 0, flag);
+	}
+recv_char:
+	if (intr_status & SIRFUART_CTS_INT_EN) {
+		cts_status = !(rd_regl(port, SIRFUART_AFC_CTRL) &
+							SIRFUART_CTS_IN_STATUS);
+		if (cts_status != 0) {
+			uart_handle_cts_change(port, 1);
+		} else {
+			uart_handle_cts_change(port, 0);
+			wake_up_interruptible(&state->port.delta_msr_wait);
+		}
+	}
+	if (intr_status & SIRFUART_RX_IO_INT_EN)
+		sirfsoc_uart_pio_rx_chars(port, SIRFSOC_UART_IO_RX_MAX_CNT);
+	if (intr_status & SIRFUART_TX_INT_EN) {
+		if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
+			return IRQ_HANDLED;
+		} else {
+			sirfsoc_uart_pio_tx_chars(sirfport,
+					SIRFSOC_UART_IO_TX_REASONABLE_CNT);
+			if ((uart_circ_empty(xmit)) &&
+				(rd_regl(port, SIRFUART_TX_FIFO_STATUS) &
+						SIRFUART_FIFOEMPTY_MASK(port)))
+				sirfsoc_uart_stop_tx(port);
+		}
+	}
+	return IRQ_HANDLED;
+}
+
+static void sirfsoc_uart_start_rx(struct uart_port *port)
+{
+	unsigned long regv;
+	regv = rd_regl(port, SIRFUART_INT_EN);
+	wr_regl(port, SIRFUART_INT_EN, regv | SIRFUART_RX_IO_INT_EN);
+	wr_regl(port, SIRFUART_RX_FIFO_OP, SIRFUART_RX_FIFO_RESET);
+	wr_regl(port, SIRFUART_RX_FIFO_OP, 0);
+	wr_regl(port, SIRFUART_RX_FIFO_OP, SIRFUART_RX_FIFO_START);
+}
+
+static unsigned int
+sirfsoc_calc_sample_div(unsigned long baud_rate,
+			unsigned long ioclk_rate, unsigned long *setted_baud)
+{
+	unsigned long min_delta = ~0UL;
+	unsigned short sample_div;
+	unsigned int regv = 0;
+	unsigned long ioclk_div;
+	unsigned long baud_tmp;
+	int temp_delta;
+
+	for (sample_div = SIRF_MIN_SAMPLE_DIV;
+			sample_div <= SIRF_MAX_SAMPLE_DIV; sample_div++) {
+		ioclk_div = (ioclk_rate / (baud_rate * (sample_div + 1))) - 1;
+		if (ioclk_div > SIRF_IOCLK_DIV_MAX)
+			continue;
+		baud_tmp = ioclk_rate / ((ioclk_div + 1) * (sample_div + 1));
+		temp_delta = baud_tmp - baud_rate;
+		temp_delta = (temp_delta > 0) ? temp_delta : -temp_delta;
+		if (temp_delta < min_delta) {
+			regv = regv & (~SIRF_IOCLK_DIV_MASK);
+			regv = regv | ioclk_div;
+			regv = regv & (~SIRF_SAMPLE_DIV_MASK);
+			regv = regv | (sample_div << SIRF_SAMPLE_DIV_SHIFT);
+			min_delta = temp_delta;
+			*setted_baud = baud_tmp;
+		}
+	}
+	return regv;
+}
+
+static void sirfsoc_uart_set_termios(struct uart_port *port,
+				       struct ktermios *termios,
+				       struct ktermios *old)
+{
+	struct sirfsoc_uart_port *sirfport = to_sirfport(port);
+	unsigned long	ioclk_rate;
+	unsigned long	config_reg = 0;
+	unsigned long	baud_rate;
+	unsigned long	setted_baud;
+	unsigned long	flags;
+	unsigned long	ic;
+	unsigned int	clk_div_reg = 0;
+	unsigned long	temp_reg_val;
+	unsigned long	rx_time_out;
+	int		threshold_div;
+	int		temp;
+
+	ioclk_rate = 150000000;
+	switch (termios->c_cflag & CSIZE) {
+	default:
+	case CS8:
+		config_reg |= SIRFUART_DATA_BIT_LEN_8;
+		break;
+	case CS7:
+		config_reg |= SIRFUART_DATA_BIT_LEN_7;
+		break;
+	case CS6:
+		config_reg |= SIRFUART_DATA_BIT_LEN_6;
+		break;
+	case CS5:
+		config_reg |= SIRFUART_DATA_BIT_LEN_5;
+		break;
+	}
+	if (termios->c_cflag & CSTOPB)
+		config_reg |= SIRFUART_STOP_BIT_LEN_2;
+	baud_rate = uart_get_baud_rate(port, termios, old, 0, 4000000);
+	spin_lock_irqsave(&port->lock, flags);
+	port->read_status_mask = SIRFUART_RX_OFLOW_INT;
+	port->ignore_status_mask = 0;
+	/* read flags */
+	if (termios->c_iflag & INPCK)
+		port->read_status_mask |=
+			SIRFUART_FRM_ERR_INT | SIRFUART_PARITY_ERR_INT;
+	if (termios->c_iflag & (BRKINT | PARMRK))
+		port->read_status_mask |= SIRFUART_RXD_BREAK_INT;
+	/* ignore flags */
+	if (termios->c_iflag & IGNPAR)
+		port->ignore_status_mask |=
+			SIRFUART_FRM_ERR_INT | SIRFUART_PARITY_ERR_INT;
+	if ((termios->c_cflag & CREAD) == 0)
+		port->ignore_status_mask |= SIRFUART_DUMMY_READ;
+	/* enable parity if PARENB is set*/
+	if (termios->c_cflag & PARENB) {
+		if (termios->c_cflag & CMSPAR) {
+			if (termios->c_cflag & PARODD)
+				config_reg |= SIRFUART_STICK_BIT_MARK;
+			else
+				config_reg |= SIRFUART_STICK_BIT_SPACE;
+		} else if (termios->c_cflag & PARODD) {
+			config_reg |= SIRFUART_STICK_BIT_ODD;
+		} else {
+			config_reg |= SIRFUART_STICK_BIT_EVEN;
+		}
+	}
+	/* Hardware Flow Control Settings */
+	if (UART_ENABLE_MS(port, termios->c_cflag)) {
+		if (!sirfport->ms_enabled)
+			sirfsoc_uart_enable_ms(port);
+	} else {
+		if (sirfport->ms_enabled)
+			sirfsoc_uart_disable_ms(port);
+	}
+
+	/* common rate: fast calculation */
+	for (ic = 0; ic < SIRF_BAUD_RATE_SUPPORT_NR; ic++)
+		if (baud_rate == baudrate_to_regv[ic].baud_rate)
+			clk_div_reg = baudrate_to_regv[ic].reg_val;
+	setted_baud = baud_rate;
+	/* arbitary rate setting */
+	if (unlikely(clk_div_reg == 0))
+		clk_div_reg = sirfsoc_calc_sample_div(baud_rate, ioclk_rate,
+								&setted_baud);
+	wr_regl(port, SIRFUART_DIVISOR, clk_div_reg);
+
+	if (tty_termios_baud_rate(termios))
+		tty_termios_encode_baud_rate(termios, setted_baud, setted_baud);
+
+	/* set receive timeout */
+	rx_time_out = SIRFSOC_UART_RX_TIMEOUT(baud_rate, 20000);
+	rx_time_out = (rx_time_out > 0xFFFF) ? 0xFFFF : rx_time_out;
+	config_reg |= SIRFUART_RECV_TIMEOUT(rx_time_out);
+	temp_reg_val = rd_regl(port, SIRFUART_TX_FIFO_OP);
+	wr_regl(port, SIRFUART_RX_FIFO_OP, 0);
+	wr_regl(port, SIRFUART_TX_FIFO_OP,
+				temp_reg_val & ~SIRFUART_TX_FIFO_START);
+	wr_regl(port, SIRFUART_TX_DMA_IO_CTRL, SIRFUART_TX_MODE_IO);
+	wr_regl(port, SIRFUART_RX_DMA_IO_CTRL, SIRFUART_RX_MODE_IO);
+	wr_regl(port, SIRFUART_LINE_CTRL, config_reg);
+
+	/* Reset Rx/Tx FIFO Threshold level for proper baudrate */
+	if (baud_rate < 1000000)
+		threshold_div = 1;
+	else
+		threshold_div = 2;
+	temp = port->line == 1 ? 16 : 64;
+	wr_regl(port, SIRFUART_TX_FIFO_CTRL, temp / threshold_div);
+	wr_regl(port, SIRFUART_RX_FIFO_CTRL, temp / threshold_div);
+	temp_reg_val |= SIRFUART_TX_FIFO_START;
+	wr_regl(port, SIRFUART_TX_FIFO_OP, temp_reg_val);
+	uart_update_timeout(port, termios->c_cflag, baud_rate);
+	sirfsoc_uart_start_rx(port);
+	wr_regl(port, SIRFUART_TX_RX_EN, SIRFUART_TX_EN | SIRFUART_RX_EN);
+	spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void startup_uart_controller(struct uart_port *port)
+{
+	unsigned long temp_regv;
+	int temp;
+	temp_regv = rd_regl(port, SIRFUART_TX_DMA_IO_CTRL);
+	wr_regl(port, SIRFUART_TX_DMA_IO_CTRL, temp_regv | SIRFUART_TX_MODE_IO);
+	temp_regv = rd_regl(port, SIRFUART_RX_DMA_IO_CTRL);
+	wr_regl(port, SIRFUART_RX_DMA_IO_CTRL, temp_regv | SIRFUART_RX_MODE_IO);
+	wr_regl(port, SIRFUART_TX_DMA_IO_LEN, 0);
+	wr_regl(port, SIRFUART_RX_DMA_IO_LEN, 0);
+	wr_regl(port, SIRFUART_TX_RX_EN, SIRFUART_RX_EN | SIRFUART_TX_EN);
+	wr_regl(port, SIRFUART_TX_FIFO_OP, SIRFUART_TX_FIFO_RESET);
+	wr_regl(port, SIRFUART_TX_FIFO_OP, 0);
+	wr_regl(port, SIRFUART_RX_FIFO_OP, SIRFUART_RX_FIFO_RESET);
+	wr_regl(port, SIRFUART_RX_FIFO_OP, 0);
+	temp = port->line == 1 ? 16 : 64;
+	wr_regl(port, SIRFUART_TX_FIFO_CTRL, temp);
+	wr_regl(port, SIRFUART_RX_FIFO_CTRL, temp);
+}
+
+static int sirfsoc_uart_startup(struct uart_port *port)
+{
+	struct sirfsoc_uart_port *sirfport	= to_sirfport(port);
+	unsigned int index			= port->line;
+	int ret;
+	set_irq_flags(port->irq, IRQF_VALID | IRQF_NOAUTOEN);
+	ret = request_irq(port->irq,
+				sirfsoc_uart_isr,
+				0,
+				SIRFUART_PORT_NAME,
+				sirfport);
+	if (ret != 0) {
+		dev_err(port->dev, "UART%d request IRQ line (%d) failed.\n",
+							index, port->irq);
+		goto irq_err;
+	}
+	startup_uart_controller(port);
+	enable_irq(port->irq);
+irq_err:
+	return ret;
+}
+
+static void sirfsoc_uart_shutdown(struct uart_port *port)
+{
+	struct sirfsoc_uart_port *sirfport = to_sirfport(port);
+	wr_regl(port, SIRFUART_INT_EN, 0);
+	free_irq(port->irq, sirfport);
+	if (sirfport->ms_enabled) {
+		sirfsoc_uart_disable_ms(port);
+		sirfport->ms_enabled = 0;
+	}
+}
+
+static const char *sirfsoc_uart_type(struct uart_port *port)
+{
+	return port->type == SIRFSOC_PORT_TYPE ? SIRFUART_PORT_NAME : NULL;
+}
+
+static int sirfsoc_uart_request_port(struct uart_port *port)
+{
+	void *ret;
+	ret = request_mem_region(port->mapbase,
+				SIRFUART_MAP_SIZE, SIRFUART_PORT_NAME);
+	return ret ? 0 : -EBUSY;
+}
+
+static void sirfsoc_uart_release_port(struct uart_port *port)
+{
+	release_mem_region(port->mapbase, SIRFUART_MAP_SIZE);
+}
+
+static void sirfsoc_uart_config_port(struct uart_port *port, int flags)
+{
+	if (flags & UART_CONFIG_TYPE) {
+		port->type = SIRFSOC_PORT_TYPE;
+		sirfsoc_uart_request_port(port);
+	}
+}
+
+static struct uart_ops sirfsoc_uart_ops = {
+	.tx_empty	= sirfsoc_uart_tx_empty,
+	.get_mctrl	= sirfsoc_uart_get_mctrl,
+	.set_mctrl	= sirfsoc_uart_set_mctrl,
+	.stop_tx	= sirfsoc_uart_stop_tx,
+	.start_tx	= sirfsoc_uart_start_tx,
+	.stop_rx	= sirfsoc_uart_stop_rx,
+	.enable_ms	= sirfsoc_uart_enable_ms,
+	.break_ctl	= sirfsoc_uart_break_ctl,
+	.startup	= sirfsoc_uart_startup,
+	.shutdown	= sirfsoc_uart_shutdown,
+	.set_termios	= sirfsoc_uart_set_termios,
+	.type		= sirfsoc_uart_type,
+	.release_port	= sirfsoc_uart_release_port,
+	.request_port	= sirfsoc_uart_request_port,
+	.config_port	= sirfsoc_uart_config_port,
+};
+
+#ifdef CONFIG_SERIAL_SIRFSOC_CONSOLE
+static int __init sirfsoc_uart_console_setup(struct console *co, char *options)
+{
+	unsigned int baud = 115200;
+	unsigned int bits = 8;
+	unsigned int parity = 'n';
+	unsigned int flow = 'n';
+	struct uart_port *port = &sirfsoc_uart_ports[co->index].port;
+
+	if (co->index < 0 || co->index >= SIRFSOC_UART_NR)
+		return -EINVAL;
+
+	if (!port->mapbase)
+		return -ENODEV;
+
+	if (options)
+		uart_parse_options(options, &baud, &parity, &bits, &flow);
+	port->cons = co;
+	return uart_set_options(port, co, baud, parity, bits, flow);
+}
+
+static void sirfsoc_uart_console_putchar(struct uart_port *port, int ch)
+{
+	while (rd_regl(port,
+		SIRFUART_TX_FIFO_STATUS) & SIRFUART_FIFOFULL_MASK(port))
+		cpu_relax();
+	wr_regb(port, SIRFUART_TX_FIFO_DATA, ch);
+}
+
+static void sirfsoc_uart_console_write(struct console *co, const char *s,
+							unsigned int count)
+{
+	struct uart_port *port = &sirfsoc_uart_ports[co->index].port;
+	uart_console_write(port, s, count, sirfsoc_uart_console_putchar);
+}
+
+static struct console sirfsoc_uart_console = {
+	.name		= SIRFSOC_UART_NAME,
+	.device		= uart_console_device,
+	.flags		= CON_PRINTBUFFER,
+	.index		= -1,
+	.write		= sirfsoc_uart_console_write,
+	.setup		= sirfsoc_uart_console_setup,
+	.data           = &sirfsoc_uart_drv,
+};
+
+static int __init sirfsoc_uart_console_init(void)
+{
+	register_console(&sirfsoc_uart_console);
+	return 0;
+}
+console_initcall(sirfsoc_uart_console_init);
+#endif
+
+static struct uart_driver sirfsoc_uart_drv = {
+	.owner		= THIS_MODULE,
+	.driver_name	= SIRFUART_PORT_NAME,
+	.nr		= SIRFSOC_UART_NR,
+	.dev_name	= SIRFSOC_UART_NAME,
+	.major		= SIRFSOC_UART_MAJOR,
+	.minor		= SIRFSOC_UART_MINOR,
+#ifdef CONFIG_SERIAL_SIRFSOC_CONSOLE
+	.cons			= &sirfsoc_uart_console,
+#else
+	.cons			= NULL,
+#endif
+};
+
+int sirfsoc_uart_probe(struct platform_device *pdev)
+{
+	struct sirfsoc_uart_port *sirfport;
+	struct uart_port *port;
+	struct resource *res;
+	int ret;
+
+	if (of_property_read_u32(pdev->dev.of_node, "cell-index", &pdev->id)) {
+		dev_err(&pdev->dev,
+			"Unable to find cell-index in uart node.\n");
+		ret = -EFAULT;
+		goto err;
+	}
+
+	sirfport = &sirfsoc_uart_ports[pdev->id];
+	port = &sirfport->port;
+	port->dev = &pdev->dev;
+	port->private_data = sirfport;
+
+	if (of_find_property(pdev->dev.of_node, "hw_flow_ctrl", NULL))
+		sirfport->hw_flow_ctrl = 1;
+
+	if (of_property_read_u32(pdev->dev.of_node,
+			"fifosize",
+			&port->fifosize)) {
+		dev_err(&pdev->dev,
+			"Unable to find fifosize in uart node.\n");
+		ret = -EFAULT;
+		goto err;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (res == NULL) {
+		dev_err(&pdev->dev, "Insufficient resources.\n");
+		ret = -EFAULT;
+		goto err;
+	}
+	port->mapbase = res->start;
+	port->membase = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+	if (!port->membase) {
+		dev_err(&pdev->dev, "Cannot remap resource.\n");
+		ret = -ENOMEM;
+		goto err;
+	}
+	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (res == NULL) {
+		dev_err(&pdev->dev, "Insufficient resources.\n");
+		ret = -EFAULT;
+		goto irq_err;
+	}
+	port->irq = res->start;
+
+	if (sirfport->hw_flow_ctrl) {
+		sirfport->pmx = pinmux_get(&pdev->dev, NULL);
+		ret = IS_ERR(sirfport->pmx);
+		if (ret)
+			goto pmx_err;
+
+		pinmux_enable(sirfport->pmx);
+	}
+
+	port->ops = &sirfsoc_uart_ops;
+	spin_lock_init(&port->lock);
+
+	platform_set_drvdata(pdev, sirfport);
+	ret = uart_add_one_port(&sirfsoc_uart_drv, port);
+	if (ret != 0) {
+		dev_err(&pdev->dev, "Cannot add UART port(%d).\n", pdev->id);
+		goto port_err;
+	}
+
+	return 0;
+
+port_err:
+	platform_set_drvdata(pdev, NULL);
+	if (sirfport->hw_flow_ctrl) {
+		pinmux_disable(sirfport->pmx);
+		pinmux_put(sirfport->pmx);
+	}
+pmx_err:
+irq_err:
+	devm_iounmap(&pdev->dev, port->membase);
+err:
+	return ret;
+}
+
+static int sirfsoc_uart_remove(struct platform_device *pdev)
+{
+	struct sirfsoc_uart_port *sirfport = platform_get_drvdata(pdev);
+	struct uart_port *port = &sirfport->port;
+	platform_set_drvdata(pdev, NULL);
+	if (sirfport->hw_flow_ctrl) {
+		pinmux_disable(sirfport->pmx);
+		pinmux_put(sirfport->pmx);
+	}
+	devm_iounmap(&pdev->dev, port->membase);
+	uart_remove_one_port(&sirfsoc_uart_drv, port);
+	return 0;
+}
+
+static int
+sirfsoc_uart_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	struct sirfsoc_uart_port *sirfport = platform_get_drvdata(pdev);
+	struct uart_port *port = &sirfport->port;
+	uart_suspend_port(&sirfsoc_uart_drv, port);
+	return 0;
+}
+
+static int sirfsoc_uart_resume(struct platform_device *pdev)
+{
+	struct sirfsoc_uart_port *sirfport = platform_get_drvdata(pdev);
+	struct uart_port *port = &sirfport->port;
+	uart_resume_port(&sirfsoc_uart_drv, port);
+	return 0;
+}
+
+static struct of_device_id sirfsoc_uart_ids[] __devinitdata = {
+	{ .compatible = "sirf,prima2-uart", },
+	{}
+};
+MODULE_DEVICE_TABLE(of, sirfsoc_serial_of_match);
+
+static struct platform_driver sirfsoc_uart_driver = {
+	.probe		= sirfsoc_uart_probe,
+	.remove		= __devexit_p(sirfsoc_uart_remove),
+	.suspend	= sirfsoc_uart_suspend,
+	.resume		= sirfsoc_uart_resume,
+	.driver		= {
+		.name	= SIRFUART_PORT_NAME,
+		.owner	= THIS_MODULE,
+		.of_match_table = sirfsoc_uart_ids,
+	},
+};
+
+static int __init sirfsoc_uart_init(void)
+{
+	int ret = 0;
+
+	ret = uart_register_driver(&sirfsoc_uart_drv);
+	if (ret)
+		goto out;
+
+	ret = platform_driver_register(&sirfsoc_uart_driver);
+	if (ret)
+		uart_unregister_driver(&sirfsoc_uart_drv);
+out:
+	return ret;
+}
+module_init(sirfsoc_uart_init);
+
+static void __exit sirfsoc_uart_exit(void)
+{
+	platform_driver_unregister(&sirfsoc_uart_driver);
+	uart_unregister_driver(&sirfsoc_uart_drv);
+}
+module_exit(sirfsoc_uart_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Bin Shi <Bin.Shi@csr.com>, Rong Wang<Rong.Wang@csr.com>");
+MODULE_DESCRIPTION("CSR SiRFprimaII Uart Driver");
diff --git a/drivers/tty/serial/sirfsoc_uart.h b/drivers/tty/serial/sirfsoc_uart.h
new file mode 100644
index 0000000..fc64260
--- /dev/null
+++ b/drivers/tty/serial/sirfsoc_uart.h
@@ -0,0 +1,185 @@
+/*
+ * Drivers for CSR SiRFprimaII onboard UARTs.
+ *
+ * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
+ *
+ * Licensed under GPLv2 or later.
+ */
+#include <linux/bitops.h>
+
+/* UART Register Offset Define */
+#define SIRFUART_LINE_CTRL			0x0040
+#define SIRFUART_TX_RX_EN			0x004c
+#define SIRFUART_DIVISOR			0x0050
+#define SIRFUART_INT_EN				0x0054
+#define SIRFUART_INT_STATUS			0x0058
+#define SIRFUART_TX_DMA_IO_CTRL			0x0100
+#define SIRFUART_TX_DMA_IO_LEN			0x0104
+#define SIRFUART_TX_FIFO_CTRL			0x0108
+#define SIRFUART_TX_FIFO_LEVEL_CHK		0x010C
+#define SIRFUART_TX_FIFO_OP			0x0110
+#define SIRFUART_TX_FIFO_STATUS			0x0114
+#define SIRFUART_TX_FIFO_DATA			0x0118
+#define SIRFUART_RX_DMA_IO_CTRL			0x0120
+#define SIRFUART_RX_DMA_IO_LEN			0x0124
+#define SIRFUART_RX_FIFO_CTRL			0x0128
+#define SIRFUART_RX_FIFO_LEVEL_CHK		0x012C
+#define SIRFUART_RX_FIFO_OP			0x0130
+#define SIRFUART_RX_FIFO_STATUS			0x0134
+#define SIRFUART_RX_FIFO_DATA			0x0138
+#define SIRFUART_AFC_CTRL			0x0140
+#define SIRFUART_SWH_DMA_IO			0x0148
+
+/* UART Line Control Register */
+#define SIRFUART_DATA_BIT_LEN_MASK		0x3
+#define SIRFUART_DATA_BIT_LEN_5			BIT(0)
+#define SIRFUART_DATA_BIT_LEN_6			1
+#define SIRFUART_DATA_BIT_LEN_7			2
+#define SIRFUART_DATA_BIT_LEN_8			3
+#define SIRFUART_STOP_BIT_LEN_1			0
+#define SIRFUART_STOP_BIT_LEN_2			BIT(2)
+#define SIRFUART_PARITY_EN			BIT(3)
+#define SIRFUART_EVEN_BIT			BIT(4)
+#define SIRFUART_STICK_BIT_MASK			(7 << 3)
+#define SIRFUART_STICK_BIT_NONE			(0 << 3)
+#define SIRFUART_STICK_BIT_EVEN			BIT(3)
+#define SIRFUART_STICK_BIT_ODD			(3 << 3)
+#define SIRFUART_STICK_BIT_MARK			(5 << 3)
+#define SIRFUART_STICK_BIT_SPACE		(7 << 3)
+#define SIRFUART_SET_BREAK			BIT(6)
+#define SIRFUART_LOOP_BACK			BIT(7)
+#define SIRFUART_PARITY_MASK			(7 << 3)
+#define SIRFUART_DUMMY_READ			BIT(16)
+
+#define SIRFSOC_UART_RX_TIMEOUT(br, to)	(((br) * (((to) + 999) / 1000)) / 1000)
+#define SIRFUART_RECV_TIMEOUT_MASK	(0xFFFF << 16)
+#define SIRFUART_RECV_TIMEOUT(x)	(((x) & 0xFFFF) << 16)
+
+/* UART Auto Flow Control */
+#define SIRFUART_AFC_RX_THD_MASK		0x000000FF
+#define SIRFUART_AFC_RX_EN			BIT(8)
+#define SIRFUART_AFC_TX_EN			BIT(9)
+#define SIRFUART_CTS_CTRL			BIT(10)
+#define SIRFUART_RTS_CTRL			BIT(11)
+#define SIRFUART_CTS_IN_STATUS			BIT(12)
+#define SIRFUART_RTS_OUT_STATUS			BIT(13)
+
+/* UART Interrupt Enable Register */
+#define SIRFUART_RX_DONE_INT			BIT(0)
+#define SIRFUART_TX_DONE_INT			BIT(1)
+#define SIRFUART_RX_OFLOW_INT			BIT(2)
+#define SIRFUART_TX_ALLOUT_INT			BIT(3)
+#define SIRFUART_RX_IO_DMA_INT			BIT(4)
+#define SIRFUART_TX_IO_DMA_INT			BIT(5)
+#define SIRFUART_RXFIFO_FULL_INT		BIT(6)
+#define SIRFUART_TXFIFO_EMPTY_INT		BIT(7)
+#define SIRFUART_RXFIFO_THD_INT			BIT(8)
+#define SIRFUART_TXFIFO_THD_INT			BIT(9)
+#define SIRFUART_FRM_ERR_INT			BIT(10)
+#define SIRFUART_RXD_BREAK_INT			BIT(11)
+#define SIRFUART_RX_TIMEOUT_INT			BIT(12)
+#define SIRFUART_PARITY_ERR_INT			BIT(13)
+#define SIRFUART_CTS_INT_EN			BIT(14)
+#define SIRFUART_RTS_INT_EN			BIT(15)
+
+/* UART Interrupt Status Register */
+#define SIRFUART_RX_DONE			BIT(0)
+#define SIRFUART_TX_DONE			BIT(1)
+#define SIRFUART_RX_OFLOW			BIT(2)
+#define SIRFUART_TX_ALL_EMPTY			BIT(3)
+#define SIRFUART_DMA_IO_RX_DONE			BIT(4)
+#define SIRFUART_DMA_IO_TX_DONE			BIT(5)
+#define SIRFUART_RXFIFO_FULL			BIT(6)
+#define SIRFUART_TXFIFO_EMPTY			BIT(7)
+#define SIRFUART_RXFIFO_THD_REACH		BIT(8)
+#define SIRFUART_TXFIFO_THD_REACH		BIT(9)
+#define SIRFUART_FRM_ERR			BIT(10)
+#define SIRFUART_RXD_BREAK			BIT(11)
+#define SIRFUART_RX_TIMEOUT			BIT(12)
+#define SIRFUART_PARITY_ERR			BIT(13)
+#define SIRFUART_CTS_CHANGE			BIT(14)
+#define SIRFUART_RTS_CHANGE			BIT(15)
+#define SIRFUART_PLUG_IN			BIT(16)
+
+#define SIRFUART_ERR_INT_STAT					\
+				(SIRFUART_RX_OFLOW |		\
+				SIRFUART_FRM_ERR |		\
+				SIRFUART_RXD_BREAK |		\
+				SIRFUART_PARITY_ERR)
+#define SIRFUART_ERR_INT_EN					\
+				(SIRFUART_RX_OFLOW_INT |	\
+				SIRFUART_FRM_ERR_INT |		\
+				SIRFUART_RXD_BREAK_INT |	\
+				SIRFUART_PARITY_ERR_INT)
+#define SIRFUART_TX_INT_EN	SIRFUART_TXFIFO_EMPTY_INT
+#define SIRFUART_RX_IO_INT_EN					\
+				(SIRFUART_RX_TIMEOUT_INT |	\
+				SIRFUART_RXFIFO_THD_INT |	\
+				SIRFUART_RXFIFO_FULL_INT |	\
+				SIRFUART_ERR_INT_EN)
+
+/* UART FIFO Register */
+#define SIRFUART_TX_FIFO_STOP			0x0
+#define SIRFUART_TX_FIFO_RESET			0x1
+#define SIRFUART_TX_FIFO_START			0x2
+#define SIRFUART_RX_FIFO_STOP			0x0
+#define SIRFUART_RX_FIFO_RESET			0x1
+#define SIRFUART_RX_FIFO_START			0x2
+#define SIRFUART_TX_MODE_DMA			0
+#define SIRFUART_TX_MODE_IO			1
+#define SIRFUART_RX_MODE_DMA			0
+#define SIRFUART_RX_MODE_IO			1
+
+#define SIRFUART_RX_EN				0x1
+#define SIRFUART_TX_EN				0x2
+
+/* Generic Definitions */
+#define SIRFSOC_UART_NAME			"ttySiRF"
+#define SIRFSOC_UART_MAJOR			0
+#define SIRFSOC_UART_MINOR			0
+#define SIRFUART_PORT_NAME			"sirfsoc-uart"
+#define SIRFUART_MAP_SIZE			0x200
+#define SIRFSOC_UART_NR				3
+#define SIRFSOC_PORT_TYPE			0xa5
+
+/* Baud Rate Calculation */
+#define SIRF_MIN_SAMPLE_DIV			0xf
+#define SIRF_MAX_SAMPLE_DIV			0x3f
+#define SIRF_IOCLK_DIV_MAX			0xffff
+#define SIRF_SAMPLE_DIV_SHIFT			16
+#define SIRF_IOCLK_DIV_MASK			0xffff
+#define SIRF_SAMPLE_DIV_MASK			0x3f0000
+#define SIRF_BAUD_RATE_SUPPORT_NR		18
+
+/* For Fast Baud Rate Calculation */
+struct sirfsoc_baudrate_to_regv {
+	unsigned int baud_rate;
+	unsigned int reg_val;
+};
+
+struct sirfsoc_uart_port {
+	unsigned char			hw_flow_ctrl;
+	unsigned char			ms_enabled;
+
+	struct uart_port		port;
+	struct pinmux			*pmx;
+};
+
+/* Hardware Flow Control */
+#define SIRFUART_AFC_CTRL_RX_THD	0x70
+
+/* Register Access Control */
+#define portaddr(port, reg)		((port)->membase + (reg))
+#define rd_regb(port, reg)		(__raw_readb(portaddr(port, reg)))
+#define rd_regl(port, reg)		(__raw_readl(portaddr(port, reg)))
+#define wr_regb(port, reg, val)		__raw_writeb(val, portaddr(port, reg))
+#define wr_regl(port, reg, val)		__raw_writel(val, portaddr(port, reg))
+
+/* UART Port Mask */
+#define SIRFUART_FIFOLEVEL_MASK(port)	((port->line == 1) ? (0x1f) : (0x7f))
+#define SIRFUART_FIFOFULL_MASK(port)	((port->line == 1) ? (0x20) : (0x80))
+#define SIRFUART_FIFOEMPTY_MASK(port)	((port->line == 1) ? (0x40) : (0x100))
+
+/* I/O Mode */
+#define SIRFSOC_UART_IO_RX_MAX_CNT		256
+#define SIRFSOC_UART_IO_TX_REASONABLE_CNT	6
diff --git a/drivers/tty/serial/timbuart.c b/drivers/tty/serial/timbuart.c
index e76c8b7..70f97491 100644
--- a/drivers/tty/serial/timbuart.c
+++ b/drivers/tty/serial/timbuart.c
@@ -513,20 +513,7 @@
 	.remove		= __devexit_p(timbuart_remove),
 };
 
-/*--------------------------------------------------------------------------*/
-
-static int __init timbuart_init(void)
-{
-	return platform_driver_register(&timbuart_platform_driver);
-}
-
-static void __exit timbuart_exit(void)
-{
-	platform_driver_unregister(&timbuart_platform_driver);
-}
-
-module_init(timbuart_init);
-module_exit(timbuart_exit);
+module_platform_driver(timbuart_platform_driver);
 
 MODULE_DESCRIPTION("Timberdale UART driver");
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/tty/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c
index cea8918..2ebe606 100644
--- a/drivers/tty/serial/ucc_uart.c
+++ b/drivers/tty/serial/ucc_uart.c
@@ -963,6 +963,9 @@
 	/* Do we really need a spinlock here? */
 	spin_lock_irqsave(&port->lock, flags);
 
+	/* Update the per-port timeout. */
+	uart_update_timeout(port, termios->c_cflag, baud);
+
 	out_be16(&uccp->upsmr, upsmr);
 	if (soft_uart) {
 		out_be16(&uccup->supsmr, supsmr);
diff --git a/drivers/tty/serial/vr41xx_siu.c b/drivers/tty/serial/vr41xx_siu.c
index 3beb6ab..83148e7 100644
--- a/drivers/tty/serial/vr41xx_siu.c
+++ b/drivers/tty/serial/vr41xx_siu.c
@@ -961,18 +961,7 @@
 	},
 };
 
-static int __init vr41xx_siu_init(void)
-{
-	return platform_driver_register(&siu_device_driver);
-}
-
-static void __exit vr41xx_siu_exit(void)
-{
-	platform_driver_unregister(&siu_device_driver);
-}
-
-module_init(vr41xx_siu_init);
-module_exit(vr41xx_siu_exit);
+module_platform_driver(siu_device_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_ALIAS("platform:SIU");
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index 43db715..7867b7c 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -32,7 +32,6 @@
 #include <linux/module.h>
 #include <linux/suspend.h>
 #include <linux/writeback.h>
-#include <linux/buffer_head.h>		/* for fsync_bdev() */
 #include <linux/swap.h>
 #include <linux/spinlock.h>
 #include <linux/vt_kern.h>
@@ -41,6 +40,7 @@
 #include <linux/oom.h>
 #include <linux/slab.h>
 #include <linux/input.h>
+#include <linux/uaccess.h>
 
 #include <asm/ptrace.h>
 #include <asm/irq_regs.h>
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 05085be..e41b9bb 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -790,19 +790,24 @@
 void disassociate_ctty(int on_exit)
 {
 	struct tty_struct *tty;
-	struct pid *tty_pgrp = NULL;
 
 	if (!current->signal->leader)
 		return;
 
 	tty = get_current_tty();
 	if (tty) {
-		tty_pgrp = get_pid(tty->pgrp);
+		struct pid *tty_pgrp = get_pid(tty->pgrp);
 		if (on_exit) {
 			if (tty->driver->type != TTY_DRIVER_TYPE_PTY)
 				tty_vhangup(tty);
 		}
 		tty_kref_put(tty);
+		if (tty_pgrp) {
+			kill_pgrp(tty_pgrp, SIGHUP, on_exit);
+			if (!on_exit)
+				kill_pgrp(tty_pgrp, SIGCONT, on_exit);
+			put_pid(tty_pgrp);
+		}
 	} else if (on_exit) {
 		struct pid *old_pgrp;
 		spin_lock_irq(&current->sighand->siglock);
@@ -816,12 +821,6 @@
 		}
 		return;
 	}
-	if (tty_pgrp) {
-		kill_pgrp(tty_pgrp, SIGHUP, on_exit);
-		if (!on_exit)
-			kill_pgrp(tty_pgrp, SIGCONT, on_exit);
-		put_pid(tty_pgrp);
-	}
 
 	spin_lock_irq(&current->sighand->siglock);
 	put_pid(current->signal->tty_old_pgrp);
@@ -1558,6 +1557,59 @@
 }
 
 /**
+ *	tty_release_checks - check a tty before real release
+ *	@tty: tty to check
+ *	@o_tty: link of @tty (if any)
+ *	@idx: index of the tty
+ *
+ *	Performs some paranoid checking before true release of the @tty.
+ *	This is a no-op unless TTY_PARANOIA_CHECK is defined.
+ */
+static int tty_release_checks(struct tty_struct *tty, struct tty_struct *o_tty,
+		int idx)
+{
+#ifdef TTY_PARANOIA_CHECK
+	if (idx < 0 || idx >= tty->driver->num) {
+		printk(KERN_DEBUG "%s: bad idx when trying to free (%s)\n",
+				__func__, tty->name);
+		return -1;
+	}
+
+	/* not much to check for devpts */
+	if (tty->driver->flags & TTY_DRIVER_DEVPTS_MEM)
+		return 0;
+
+	if (tty != tty->driver->ttys[idx]) {
+		printk(KERN_DEBUG "%s: driver.table[%d] not tty for (%s)\n",
+				__func__, idx, tty->name);
+		return -1;
+	}
+	if (tty->termios != tty->driver->termios[idx]) {
+		printk(KERN_DEBUG "%s: driver.termios[%d] not termios for (%s)\n",
+				__func__, idx, tty->name);
+		return -1;
+	}
+	if (tty->driver->other) {
+		if (o_tty != tty->driver->other->ttys[idx]) {
+			printk(KERN_DEBUG "%s: other->table[%d] not o_tty for (%s)\n",
+					__func__, idx, tty->name);
+			return -1;
+		}
+		if (o_tty->termios != tty->driver->other->termios[idx]) {
+			printk(KERN_DEBUG "%s: other->termios[%d] not o_termios for (%s)\n",
+					__func__, idx, tty->name);
+			return -1;
+		}
+		if (o_tty->link != tty) {
+			printk(KERN_DEBUG "%s: bad pty pointers\n", __func__);
+			return -1;
+		}
+	}
+#endif
+	return 0;
+}
+
+/**
  *	tty_release		-	vfs callback for close
  *	@inode: inode of tty
  *	@filp: file pointer for handle to tty
@@ -1585,11 +1637,11 @@
 	int	idx;
 	char	buf[64];
 
-	if (tty_paranoia_check(tty, inode, "tty_release_dev"))
+	if (tty_paranoia_check(tty, inode, __func__))
 		return 0;
 
 	tty_lock();
-	check_tty_count(tty, "tty_release_dev");
+	check_tty_count(tty, __func__);
 
 	__tty_fasync(-1, filp, 0);
 
@@ -1599,59 +1651,16 @@
 	devpts = (tty->driver->flags & TTY_DRIVER_DEVPTS_MEM) != 0;
 	o_tty = tty->link;
 
-#ifdef TTY_PARANOIA_CHECK
-	if (idx < 0 || idx >= tty->driver->num) {
-		printk(KERN_DEBUG "tty_release_dev: bad idx when trying to "
-				  "free (%s)\n", tty->name);
+	if (tty_release_checks(tty, o_tty, idx)) {
 		tty_unlock();
 		return 0;
 	}
-	if (!devpts) {
-		if (tty != tty->driver->ttys[idx]) {
-			tty_unlock();
-			printk(KERN_DEBUG "tty_release_dev: driver.table[%d] not tty "
-			       "for (%s)\n", idx, tty->name);
-			return 0;
-		}
-		if (tty->termios != tty->driver->termios[idx]) {
-			tty_unlock();
-			printk(KERN_DEBUG "tty_release_dev: driver.termios[%d] not termios "
-			       "for (%s)\n",
-			       idx, tty->name);
-			return 0;
-		}
-	}
-#endif
 
 #ifdef TTY_DEBUG_HANGUP
-	printk(KERN_DEBUG "tty_release_dev of %s (tty count=%d)...",
-	       tty_name(tty, buf), tty->count);
+	printk(KERN_DEBUG "%s: %s (tty count=%d)...\n", __func__,
+			tty_name(tty, buf), tty->count);
 #endif
 
-#ifdef TTY_PARANOIA_CHECK
-	if (tty->driver->other &&
-	     !(tty->driver->flags & TTY_DRIVER_DEVPTS_MEM)) {
-		if (o_tty != tty->driver->other->ttys[idx]) {
-			tty_unlock();
-			printk(KERN_DEBUG "tty_release_dev: other->table[%d] "
-					  "not o_tty for (%s)\n",
-			       idx, tty->name);
-			return 0 ;
-		}
-		if (o_tty->termios != tty->driver->other->termios[idx]) {
-			tty_unlock();
-			printk(KERN_DEBUG "tty_release_dev: other->termios[%d] "
-					  "not o_termios for (%s)\n",
-			       idx, tty->name);
-			return 0;
-		}
-		if (o_tty->link != tty) {
-			tty_unlock();
-			printk(KERN_DEBUG "tty_release_dev: bad pty pointers\n");
-			return 0;
-		}
-	}
-#endif
 	if (tty->ops->close)
 		tty->ops->close(tty, filp);
 
@@ -1707,8 +1716,8 @@
 		if (!do_sleep)
 			break;
 
-		printk(KERN_WARNING "tty_release_dev: %s: read/write wait queue "
-				    "active!\n", tty_name(tty, buf));
+		printk(KERN_WARNING "%s: %s: read/write wait queue active!\n",
+				__func__, tty_name(tty, buf));
 		tty_unlock();
 		mutex_unlock(&tty_mutex);
 		schedule();
@@ -1721,15 +1730,14 @@
 	 */
 	if (pty_master) {
 		if (--o_tty->count < 0) {
-			printk(KERN_WARNING "tty_release_dev: bad pty slave count "
-					    "(%d) for %s\n",
-			       o_tty->count, tty_name(o_tty, buf));
+			printk(KERN_WARNING "%s: bad pty slave count (%d) for %s\n",
+				__func__, o_tty->count, tty_name(o_tty, buf));
 			o_tty->count = 0;
 		}
 	}
 	if (--tty->count < 0) {
-		printk(KERN_WARNING "tty_release_dev: bad tty->count (%d) for %s\n",
-		       tty->count, tty_name(tty, buf));
+		printk(KERN_WARNING "%s: bad tty->count (%d) for %s\n",
+				__func__, tty->count, tty_name(tty, buf));
 		tty->count = 0;
 	}
 
@@ -1778,7 +1786,7 @@
 	}
 
 #ifdef TTY_DEBUG_HANGUP
-	printk(KERN_DEBUG "freeing tty structure...");
+	printk(KERN_DEBUG "%s: freeing tty structure...\n", __func__);
 #endif
 	/*
 	 * Ask the line discipline code to release its structures
@@ -1798,6 +1806,83 @@
 }
 
 /**
+ *	tty_open_current_tty - get tty of current task for open
+ *	@device: device number
+ *	@filp: file pointer to tty
+ *	@return: tty of the current task iff @device is /dev/tty
+ *
+ *	We cannot return driver and index like for the other nodes because
+ *	devpts will not work then. It expects inodes to be from devpts FS.
+ */
+static struct tty_struct *tty_open_current_tty(dev_t device, struct file *filp)
+{
+	struct tty_struct *tty;
+
+	if (device != MKDEV(TTYAUX_MAJOR, 0))
+		return NULL;
+
+	tty = get_current_tty();
+	if (!tty)
+		return ERR_PTR(-ENXIO);
+
+	filp->f_flags |= O_NONBLOCK; /* Don't let /dev/tty block */
+	/* noctty = 1; */
+	tty_kref_put(tty);
+	/* FIXME: we put a reference and return a TTY! */
+	return tty;
+}
+
+/**
+ *	tty_lookup_driver - lookup a tty driver for a given device file
+ *	@device: device number
+ *	@filp: file pointer to tty
+ *	@noctty: set if the device should not become a controlling tty
+ *	@index: index for the device in the @return driver
+ *	@return: driver for this inode (with increased refcount)
+ *
+ * 	If @return is not erroneous, the caller is responsible to decrement the
+ * 	refcount by tty_driver_kref_put.
+ *
+ *	Locking: tty_mutex protects get_tty_driver
+ */
+static struct tty_driver *tty_lookup_driver(dev_t device, struct file *filp,
+		int *noctty, int *index)
+{
+	struct tty_driver *driver;
+
+	switch (device) {
+#ifdef CONFIG_VT
+	case MKDEV(TTY_MAJOR, 0): {
+		extern struct tty_driver *console_driver;
+		driver = tty_driver_kref_get(console_driver);
+		*index = fg_console;
+		*noctty = 1;
+		break;
+	}
+#endif
+	case MKDEV(TTYAUX_MAJOR, 1): {
+		struct tty_driver *console_driver = console_device(index);
+		if (console_driver) {
+			driver = tty_driver_kref_get(console_driver);
+			if (driver) {
+				/* Don't let /dev/console block */
+				filp->f_flags |= O_NONBLOCK;
+				*noctty = 1;
+				break;
+			}
+		}
+		return ERR_PTR(-ENODEV);
+	}
+	default:
+		driver = get_tty_driver(device, index);
+		if (!driver)
+			return ERR_PTR(-ENODEV);
+		break;
+	}
+	return driver;
+}
+
+/**
  *	tty_open		-	open a tty device
  *	@inode: inode of device file
  *	@filp: file pointer to tty
@@ -1813,16 +1898,16 @@
  *	The termios state of a pty is reset on first open so that
  *	settings don't persist across reuse.
  *
- *	Locking: tty_mutex protects tty, get_tty_driver and tty_init_dev work.
+ *	Locking: tty_mutex protects tty, tty_lookup_driver and tty_init_dev.
  *		 tty->count should protect the rest.
  *		 ->siglock protects ->signal/->sighand
  */
 
 static int tty_open(struct inode *inode, struct file *filp)
 {
-	struct tty_struct *tty = NULL;
+	struct tty_struct *tty;
 	int noctty, retval;
-	struct tty_driver *driver;
+	struct tty_driver *driver = NULL;
 	int index;
 	dev_t device = inode->i_rdev;
 	unsigned saved_flags = filp->f_flags;
@@ -1841,66 +1926,22 @@
 	mutex_lock(&tty_mutex);
 	tty_lock();
 
-	if (device == MKDEV(TTYAUX_MAJOR, 0)) {
-		tty = get_current_tty();
-		if (!tty) {
-			tty_unlock();
-			mutex_unlock(&tty_mutex);
-			tty_free_file(filp);
-			return -ENXIO;
+	tty = tty_open_current_tty(device, filp);
+	if (IS_ERR(tty)) {
+		retval = PTR_ERR(tty);
+		goto err_unlock;
+	} else if (!tty) {
+		driver = tty_lookup_driver(device, filp, &noctty, &index);
+		if (IS_ERR(driver)) {
+			retval = PTR_ERR(driver);
+			goto err_unlock;
 		}
-		driver = tty_driver_kref_get(tty->driver);
-		index = tty->index;
-		filp->f_flags |= O_NONBLOCK; /* Don't let /dev/tty block */
-		/* noctty = 1; */
-		/* FIXME: Should we take a driver reference ? */
-		tty_kref_put(tty);
-		goto got_driver;
-	}
-#ifdef CONFIG_VT
-	if (device == MKDEV(TTY_MAJOR, 0)) {
-		extern struct tty_driver *console_driver;
-		driver = tty_driver_kref_get(console_driver);
-		index = fg_console;
-		noctty = 1;
-		goto got_driver;
-	}
-#endif
-	if (device == MKDEV(TTYAUX_MAJOR, 1)) {
-		struct tty_driver *console_driver = console_device(&index);
-		if (console_driver) {
-			driver = tty_driver_kref_get(console_driver);
-			if (driver) {
-				/* Don't let /dev/console block */
-				filp->f_flags |= O_NONBLOCK;
-				noctty = 1;
-				goto got_driver;
-			}
-		}
-		tty_unlock();
-		mutex_unlock(&tty_mutex);
-		tty_free_file(filp);
-		return -ENODEV;
-	}
 
-	driver = get_tty_driver(device, &index);
-	if (!driver) {
-		tty_unlock();
-		mutex_unlock(&tty_mutex);
-		tty_free_file(filp);
-		return -ENODEV;
-	}
-got_driver:
-	if (!tty) {
 		/* check whether we're reopening an existing tty */
 		tty = tty_driver_lookup_tty(driver, inode, index);
-
 		if (IS_ERR(tty)) {
-			tty_unlock();
-			mutex_unlock(&tty_mutex);
-			tty_driver_kref_put(driver);
-			tty_free_file(filp);
-			return PTR_ERR(tty);
+			retval = PTR_ERR(tty);
+			goto err_unlock;
 		}
 	}
 
@@ -1912,21 +1953,22 @@
 		tty = tty_init_dev(driver, index, 0);
 
 	mutex_unlock(&tty_mutex);
-	tty_driver_kref_put(driver);
+	if (driver)
+		tty_driver_kref_put(driver);
 	if (IS_ERR(tty)) {
 		tty_unlock();
-		tty_free_file(filp);
-		return PTR_ERR(tty);
+		retval = PTR_ERR(tty);
+		goto err_file;
 	}
 
 	tty_add_file(tty, filp);
 
-	check_tty_count(tty, "tty_open");
+	check_tty_count(tty, __func__);
 	if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
 	    tty->driver->subtype == PTY_TYPE_MASTER)
 		noctty = 1;
 #ifdef TTY_DEBUG_HANGUP
-	printk(KERN_DEBUG "opening %s...", tty->name);
+	printk(KERN_DEBUG "%s: opening %s...\n", __func__, tty->name);
 #endif
 	if (tty->ops->open)
 		retval = tty->ops->open(tty, filp);
@@ -1940,8 +1982,8 @@
 
 	if (retval) {
 #ifdef TTY_DEBUG_HANGUP
-		printk(KERN_DEBUG "error %d in opening %s...", retval,
-		       tty->name);
+		printk(KERN_DEBUG "%s: error %d in opening %s...\n", __func__,
+				retval, tty->name);
 #endif
 		tty_unlock(); /* need to call tty_release without BTM */
 		tty_release(inode, filp);
@@ -1976,6 +2018,15 @@
 	tty_unlock();
 	mutex_unlock(&tty_mutex);
 	return 0;
+err_unlock:
+	tty_unlock();
+	mutex_unlock(&tty_mutex);
+	/* after locks to avoid deadlock */
+	if (!IS_ERR_OR_NULL(driver))
+		tty_driver_kref_put(driver);
+err_file:
+	tty_free_file(filp);
+	return retval;
 }
 
 
@@ -3267,7 +3318,7 @@
 	}
 }
 
-static char *tty_devnode(struct device *dev, mode_t *mode)
+static char *tty_devnode(struct device *dev, umode_t *mode)
 {
 	if (!mode)
 		return NULL;
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 8e0924f..24b95db 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -1,19 +1,11 @@
 #include <linux/types.h>
-#include <linux/major.h>
 #include <linux/errno.h>
-#include <linux/signal.h>
-#include <linux/fcntl.h>
+#include <linux/kmod.h>
 #include <linux/sched.h>
 #include <linux/interrupt.h>
 #include <linux/tty.h>
 #include <linux/tty_driver.h>
-#include <linux/tty_flip.h>
-#include <linux/devpts_fs.h>
 #include <linux/file.h>
-#include <linux/console.h>
-#include <linux/timer.h>
-#include <linux/ctype.h>
-#include <linux/kd.h>
 #include <linux/mm.h>
 #include <linux/string.h>
 #include <linux/slab.h>
@@ -24,18 +16,8 @@
 #include <linux/device.h>
 #include <linux/wait.h>
 #include <linux/bitops.h>
-#include <linux/delay.h>
 #include <linux/seq_file.h>
-
 #include <linux/uaccess.h>
-#include <asm/system.h>
-
-#include <linux/kbd_kern.h>
-#include <linux/vt_kern.h>
-#include <linux/selection.h>
-
-#include <linux/kmod.h>
-#include <linux/nsproxy.h>
 #include <linux/ratelimit.h>
 
 /*
@@ -558,8 +540,6 @@
 	long ret;
 	ret = wait_event_timeout(tty_ldisc_idle,
 			atomic_read(&tty->ldisc->users) == 1, timeout);
-	if (ret < 0)
-		return ret;
 	return ret > 0 ? 0 : -EBUSY;
 }
 
diff --git a/drivers/tty/vt/consolemap.c b/drivers/tty/vt/consolemap.c
index 45d3e80..a0f3d6c 100644
--- a/drivers/tty/vt/consolemap.c
+++ b/drivers/tty/vt/consolemap.c
@@ -584,7 +584,7 @@
 			return 0;
 		dflt->refcount++;
 		*vc->vc_uni_pagedir_loc = (unsigned long)dflt;
-		if (p && --p->refcount) {
+		if (p && !--p->refcount) {
 			con_release_unimap(p);
 			kfree(p);
 		}
diff --git a/drivers/uio/uio_pdrv.c b/drivers/uio/uio_pdrv.c
index ff50595..72d3646 100644
--- a/drivers/uio/uio_pdrv.c
+++ b/drivers/uio/uio_pdrv.c
@@ -104,17 +104,7 @@
 	},
 };
 
-static int __init uio_pdrv_init(void)
-{
-	return platform_driver_register(&uio_pdrv);
-}
-
-static void __exit uio_pdrv_exit(void)
-{
-	platform_driver_unregister(&uio_pdrv);
-}
-module_init(uio_pdrv_init);
-module_exit(uio_pdrv_exit);
+module_platform_driver(uio_pdrv);
 
 MODULE_AUTHOR("Uwe Kleine-Koenig");
 MODULE_DESCRIPTION("Userspace I/O platform driver");
diff --git a/drivers/uio/uio_pdrv_genirq.c b/drivers/uio/uio_pdrv_genirq.c
index 25de302..b98371d 100644
--- a/drivers/uio/uio_pdrv_genirq.c
+++ b/drivers/uio/uio_pdrv_genirq.c
@@ -273,18 +273,7 @@
 	},
 };
 
-static int __init uio_pdrv_genirq_init(void)
-{
-	return platform_driver_register(&uio_pdrv_genirq);
-}
-
-static void __exit uio_pdrv_genirq_exit(void)
-{
-	platform_driver_unregister(&uio_pdrv_genirq);
-}
-
-module_init(uio_pdrv_genirq_init);
-module_exit(uio_pdrv_genirq_exit);
+module_platform_driver(uio_pdrv_genirq);
 
 MODULE_AUTHOR("Magnus Damm");
 MODULE_DESCRIPTION("Userspace I/O platform driver with generic IRQ handling");
diff --git a/drivers/uio/uio_pruss.c b/drivers/uio/uio_pruss.c
index e67b566..33a7a27 100644
--- a/drivers/uio/uio_pruss.c
+++ b/drivers/uio/uio_pruss.c
@@ -227,19 +227,7 @@
 		   },
 };
 
-static int __init pruss_init_module(void)
-{
-	return platform_driver_register(&pruss_driver);
-}
-
-module_init(pruss_init_module);
-
-static void __exit pruss_exit_module(void)
-{
-	platform_driver_unregister(&pruss_driver);
-}
-
-module_exit(pruss_exit_module);
+module_platform_driver(pruss_driver);
 
 MODULE_LICENSE("GPL v2");
 MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
index a845f8b..98b89fe 100644
--- a/drivers/usb/atm/cxacru.c
+++ b/drivers/usb/atm/cxacru.c
@@ -1372,18 +1372,7 @@
 	.id_table	= cxacru_usb_ids
 };
 
-static int __init cxacru_init(void)
-{
-	return usb_register(&cxacru_usb_driver);
-}
-
-static void __exit cxacru_cleanup(void)
-{
-	usb_deregister(&cxacru_usb_driver);
-}
-
-module_init(cxacru_init);
-module_exit(cxacru_cleanup);
+module_usb_driver(cxacru_usb_driver);
 
 MODULE_AUTHOR(DRIVER_AUTHOR);
 MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/usb/atm/speedtch.c b/drivers/usb/atm/speedtch.c
index 0842cfb..b42092e 100644
--- a/drivers/usb/atm/speedtch.c
+++ b/drivers/usb/atm/speedtch.c
@@ -953,22 +953,7 @@
 	return usbatm_usb_probe(intf, id, &speedtch_usbatm_driver);
 }
 
-static int __init speedtch_usb_init(void)
-{
-	dbg("%s: driver version %s", __func__, DRIVER_VERSION);
-
-	return usb_register(&speedtch_usb_driver);
-}
-
-static void __exit speedtch_usb_cleanup(void)
-{
-	dbg("%s", __func__);
-
-	usb_deregister(&speedtch_usb_driver);
-}
-
-module_init(speedtch_usb_init);
-module_exit(speedtch_usb_cleanup);
+module_usb_driver(speedtch_usb_driver);
 
 MODULE_AUTHOR(DRIVER_AUTHOR);
 MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/usb/atm/ueagle-atm.c b/drivers/usb/atm/ueagle-atm.c
index 428f368..00f171a 100644
--- a/drivers/usb/atm/ueagle-atm.c
+++ b/drivers/usb/atm/ueagle-atm.c
@@ -2753,36 +2753,7 @@
 
 MODULE_DEVICE_TABLE(usb, uea_ids);
 
-/**
- * uea_init - Initialize the module.
- *      Register to USB subsystem
- */
-static int __init uea_init(void)
-{
-	printk(KERN_INFO "[ueagle-atm] driver " EAGLEUSBVERSION " loaded\n");
-
-	usb_register(&uea_driver);
-
-	return 0;
-}
-
-module_init(uea_init);
-
-/**
- * uea_exit  -  Destroy module
- *    Deregister with USB subsystem
- */
-static void __exit uea_exit(void)
-{
-	/*
-	 * This calls automatically the uea_disconnect method if necessary:
-	 */
-	usb_deregister(&uea_driver);
-
-	printk(KERN_INFO "[ueagle-atm] driver unloaded\n");
-}
-
-module_exit(uea_exit);
+module_usb_driver(uea_driver);
 
 MODULE_AUTHOR("Damien Bergamini/Matthieu Castet/Stanislaw W. Gruszka");
 MODULE_DESCRIPTION("ADI 930/Eagle USB ADSL Modem driver");
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 57f2e10..9543b19 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1502,6 +1502,16 @@
 	},
 	{ USB_DEVICE(0x22b8, 0x6425), /* Motorola MOTOMAGX phones */
 	},
+	/* Motorola H24 HSPA module: */
+	{ USB_DEVICE(0x22b8, 0x2d91) }, /* modem                                */
+	{ USB_DEVICE(0x22b8, 0x2d92) }, /* modem           + diagnostics        */
+	{ USB_DEVICE(0x22b8, 0x2d93) }, /* modem + AT port                      */
+	{ USB_DEVICE(0x22b8, 0x2d95) }, /* modem + AT port + diagnostics        */
+	{ USB_DEVICE(0x22b8, 0x2d96) }, /* modem                         + NMEA */
+	{ USB_DEVICE(0x22b8, 0x2d97) }, /* modem           + diagnostics + NMEA */
+	{ USB_DEVICE(0x22b8, 0x2d99) }, /* modem + AT port               + NMEA */
+	{ USB_DEVICE(0x22b8, 0x2d9a) }, /* modem + AT port + diagnostics + NMEA */
+
 	{ USB_DEVICE(0x0572, 0x1329), /* Hummingbird huc56s (Conexant) */
 	.driver_info = NO_UNION_NORMAL, /* union descriptor misplaced on
 					   data interface instead of
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index efe6849..1c50baf 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -895,24 +895,7 @@
 	.supports_autosuspend = 1,
 };
 
-/* --- low level module stuff --- */
-
-static int __init wdm_init(void)
-{
-	int rv;
-
-	rv = usb_register(&wdm_driver);
-
-	return rv;
-}
-
-static void __exit wdm_exit(void)
-{
-	usb_deregister(&wdm_driver);
-}
-
-module_init(wdm_init);
-module_exit(wdm_exit);
+module_usb_driver(wdm_driver);
 
 MODULE_AUTHOR(DRIVER_AUTHOR);
 MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
index cb3a932..a68c1a6 100644
--- a/drivers/usb/class/usblp.c
+++ b/drivers/usb/class/usblp.c
@@ -1045,7 +1045,7 @@
 	.llseek =	noop_llseek,
 };
 
-static char *usblp_devnode(struct device *dev, mode_t *mode)
+static char *usblp_devnode(struct device *dev, umode_t *mode)
 {
 	return kasprintf(GFP_KERNEL, "usb/%s", dev_name(dev));
 }
@@ -1412,18 +1412,7 @@
 	.supports_autosuspend =	1,
 };
 
-static int __init usblp_init(void)
-{
-	return usb_register(&usblp_driver);
-}
-
-static void __exit usblp_exit(void)
-{
-	usb_deregister(&usblp_driver);
-}
-
-module_init(usblp_init);
-module_exit(usblp_exit);
+module_usb_driver(usblp_driver);
 
 MODULE_AUTHOR(DRIVER_AUTHOR);
 MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index 12cf5e7..70d69d0 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -1116,21 +1116,6 @@
 	.resume		= usbtmc_resume,
 };
 
-static int __init usbtmc_init(void)
-{
-	int retcode;
-
-	retcode = usb_register(&usbtmc_driver);
-	if (retcode)
-		printk(KERN_ERR KBUILD_MODNAME": Unable to register driver\n");
-	return retcode;
-}
-module_init(usbtmc_init);
-
-static void __exit usbtmc_exit(void)
-{
-	usb_deregister(&usbtmc_driver);
-}
-module_exit(usbtmc_exit);
+module_usb_driver(usbtmc_driver);
 
 MODULE_LICENSE("GPL");
diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c
index 99458c8..d95760d 100644
--- a/drivers/usb/core/file.c
+++ b/drivers/usb/core/file.c
@@ -66,7 +66,7 @@
 	struct class *class;
 } *usb_class;
 
-static char *usb_devnode(struct device *dev, mode_t *mode)
+static char *usb_devnode(struct device *dev, umode_t *mode)
 {
 	struct usb_class_driver *drv;
 
diff --git a/drivers/usb/core/inode.c b/drivers/usb/core/inode.c
index 2278dad..9e186f3 100644
--- a/drivers/usb/core/inode.c
+++ b/drivers/usb/core/inode.c
@@ -65,7 +65,7 @@
 static umode_t busmode = USBFS_DEFAULT_BUSMODE;
 static umode_t listmode = USBFS_DEFAULT_LISTMODE;
 
-static int usbfs_show_options(struct seq_file *seq, struct vfsmount *mnt)
+static int usbfs_show_options(struct seq_file *seq, struct dentry *root)
 {
 	if (devuid != 0)
 		seq_printf(seq, ",devuid=%u", devuid);
@@ -264,21 +264,19 @@
 		return -EINVAL;
 	}
 
-	if (usbfs_mount && usbfs_mount->mnt_sb)
+	if (usbfs_mount)
 		update_sb(usbfs_mount->mnt_sb);
 
 	return 0;
 }
 
-static struct inode *usbfs_get_inode (struct super_block *sb, int mode, dev_t dev)
+static struct inode *usbfs_get_inode (struct super_block *sb, umode_t mode, dev_t dev)
 {
 	struct inode *inode = new_inode(sb);
 
 	if (inode) {
 		inode->i_ino = get_next_ino();
-		inode->i_mode = mode;
-		inode->i_uid = current_fsuid();
-		inode->i_gid = current_fsgid();
+		inode_init_owner(inode, NULL, mode);
 		inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
 		switch (mode & S_IFMT) {
 		default:
@@ -300,7 +298,7 @@
 }
 
 /* SMP-safe */
-static int usbfs_mknod (struct inode *dir, struct dentry *dentry, int mode,
+static int usbfs_mknod (struct inode *dir, struct dentry *dentry, umode_t mode,
 			dev_t dev)
 {
 	struct inode *inode = usbfs_get_inode(dir->i_sb, mode, dev);
@@ -317,7 +315,7 @@
 	return error;
 }
 
-static int usbfs_mkdir (struct inode *dir, struct dentry *dentry, int mode)
+static int usbfs_mkdir (struct inode *dir, struct dentry *dentry, umode_t mode)
 {
 	int res;
 
@@ -328,7 +326,7 @@
 	return res;
 }
 
-static int usbfs_create (struct inode *dir, struct dentry *dentry, int mode)
+static int usbfs_create (struct inode *dir, struct dentry *dentry, umode_t mode)
 {
 	mode = (mode & S_IALLUGO) | S_IFREG;
 	return usbfs_mknod (dir, dentry, mode, 0);
@@ -489,7 +487,7 @@
  *
  * This function handles both regular files and directories.
  */
-static int fs_create_by_name (const char *name, mode_t mode,
+static int fs_create_by_name (const char *name, umode_t mode,
 			      struct dentry *parent, struct dentry **dentry)
 {
 	int error = 0;
@@ -500,9 +498,8 @@
 	 * have around.
 	 */
 	if (!parent ) {
-		if (usbfs_mount && usbfs_mount->mnt_sb) {
-			parent = usbfs_mount->mnt_sb->s_root;
-		}
+		if (usbfs_mount)
+			parent = usbfs_mount->mnt_root;
 	}
 
 	if (!parent) {
@@ -514,7 +511,7 @@
 	mutex_lock(&parent->d_inode->i_mutex);
 	*dentry = lookup_one_len(name, parent, strlen(name));
 	if (!IS_ERR(*dentry)) {
-		if ((mode & S_IFMT) == S_IFDIR)
+		if (S_ISDIR(mode))
 			error = usbfs_mkdir (parent->d_inode, *dentry, mode);
 		else 
 			error = usbfs_create (parent->d_inode, *dentry, mode);
@@ -525,7 +522,7 @@
 	return error;
 }
 
-static struct dentry *fs_create_file (const char *name, mode_t mode,
+static struct dentry *fs_create_file (const char *name, umode_t mode,
 				      struct dentry *parent, void *data,
 				      const struct file_operations *fops,
 				      uid_t uid, gid_t gid)
@@ -608,7 +605,7 @@
 
 	ignore_mount = 0;
 
-	parent = usbfs_mount->mnt_sb->s_root;
+	parent = usbfs_mount->mnt_root;
 	devices_usbfs_dentry = fs_create_file ("devices",
 					       listmode | S_IFREG, parent,
 					       NULL, &usbfs_devices_fops,
@@ -662,7 +659,7 @@
 
 	sprintf (name, "%03d", bus->busnum);
 
-	parent = usbfs_mount->mnt_sb->s_root;
+	parent = usbfs_mount->mnt_root;
 	bus->usbfs_dentry = fs_create_file (name, busmode | S_IFDIR, parent,
 					    bus, NULL, busuid, busgid);
 	if (bus->usbfs_dentry == NULL) {
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index 662c0cf..9e491ca 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -642,7 +642,7 @@
 	NULL
 };
 
-static mode_t dev_string_attrs_are_visible(struct kobject *kobj,
+static umode_t dev_string_attrs_are_visible(struct kobject *kobj,
 		struct attribute *a, int n)
 {
 	struct device *dev = container_of(kobj, struct device, kobj);
@@ -877,7 +877,7 @@
 	NULL,
 };
 
-static mode_t intf_assoc_attrs_are_visible(struct kobject *kobj,
+static umode_t intf_assoc_attrs_are_visible(struct kobject *kobj,
 		struct attribute *a, int n)
 {
 	struct device *dev = container_of(kobj, struct device, kobj);
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 73cd900..1382c90 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -326,7 +326,7 @@
 #endif	/* CONFIG_PM */
 
 
-static char *usb_devnode(struct device *dev, mode_t *mode)
+static char *usb_devnode(struct device *dev, umode_t *mode)
 {
 	struct usb_device *usb_dev;
 
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 455bb1e..7c9df63 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -326,7 +326,7 @@
 		ret = -ENODEV;
 		goto err0;
 	}
-	dwc->revision = reg & DWC3_GSNPSREV_MASK;
+	dwc->revision = reg;
 
 	dwc3_core_soft_reset(dwc);
 
diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c
index 87d403d..433c97c 100644
--- a/drivers/usb/dwc3/debugfs.c
+++ b/drivers/usb/dwc3/debugfs.c
@@ -51,18 +51,13 @@
 #include "io.h"
 #include "debug.h"
 
-struct dwc3_register {
-	const char	*name;
-	u32		offset;
-};
-
 #define dump_register(nm)				\
 {							\
 	.name	= __stringify(nm),			\
 	.offset	= DWC3_ ##nm,				\
 }
 
-static const struct dwc3_register dwc3_regs[] = {
+static const struct debugfs_reg32 dwc3_regs[] = {
 	dump_register(GSBUSCFG0),
 	dump_register(GSBUSCFG1),
 	dump_register(GTXTHRCFG),
@@ -382,15 +377,10 @@
 static int dwc3_regdump_show(struct seq_file *s, void *unused)
 {
 	struct dwc3		*dwc = s->private;
-	int			i;
 
 	seq_printf(s, "DesignWare USB3 Core Register Dump\n");
-
-	for (i = 0; i < ARRAY_SIZE(dwc3_regs); i++) {
-		seq_printf(s, "%-20s :    %08x\n", dwc3_regs[i].name,
-				dwc3_readl(dwc->regs, dwc3_regs[i].offset));
-	}
-
+	debugfs_print_regs32(s, dwc3_regs, ARRAY_SIZE(dwc3_regs),
+			     dwc->regs, "");
 	return 0;
 }
 
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 1fe87aa..7ecb68a 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -263,7 +263,6 @@
 config USB_S3C_HSOTG
 	tristate "S3C HS/OtG USB Device controller"
 	depends on S3C_DEV_USB_HSOTG
-	select USB_GADGET_S3C_HSOTG_PIO
 	select USB_GADGET_DUALSPEED
 	help
 	  The Samsung S3C64XX USB2.0 high-speed gadget controller
diff --git a/drivers/usb/gadget/epautoconf.c b/drivers/usb/gadget/epautoconf.c
index 38bcbfb..753aa06 100644
--- a/drivers/usb/gadget/epautoconf.c
+++ b/drivers/usb/gadget/epautoconf.c
@@ -130,9 +130,6 @@
 			num_req_streams = ep_comp->bmAttributes & 0x1f;
 			if (num_req_streams > ep->max_streams)
 				return 0;
-			/* Update the ep_comp descriptor if needed */
-			if (num_req_streams != ep->max_streams)
-				ep_comp->bmAttributes = ep->max_streams;
 		}
 
 	}
diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c
index da9d048..f63dc6c 100644
--- a/drivers/usb/gadget/f_fs.c
+++ b/drivers/usb/gadget/f_fs.c
@@ -1037,7 +1037,6 @@
 {
 	struct ffs_sb_fill_data *data = _data;
 	struct inode	*inode;
-	struct dentry	*d;
 	struct ffs_data	*ffs;
 
 	ENTER();
@@ -1045,7 +1044,7 @@
 	/* Initialise data */
 	ffs = ffs_data_new();
 	if (unlikely(!ffs))
-		goto enomem0;
+		goto Enomem;
 
 	ffs->sb              = sb;
 	ffs->dev_name        = data->dev_name;
@@ -1065,26 +1064,21 @@
 				  &simple_dir_inode_operations,
 				  &data->perms);
 	if (unlikely(!inode))
-		goto enomem1;
-	d = d_alloc_root(inode);
-	if (unlikely(!d))
-		goto enomem2;
-	sb->s_root = d;
+		goto Enomem;
+	sb->s_root = d_alloc_root(inode);
+	if (unlikely(!sb->s_root)) {
+		iput(inode);
+		goto Enomem;
+	}
 
 	/* EP0 file */
 	if (unlikely(!ffs_sb_create_file(sb, "ep0", ffs,
 					 &ffs_ep0_operations, NULL)))
-		goto enomem3;
+		goto Enomem;
 
 	return 0;
 
-enomem3:
-	dput(d);
-enomem2:
-	iput(inode);
-enomem1:
-	ffs_data_put(ffs);
-enomem0:
+Enomem:
 	return -ENOMEM;
 }
 
@@ -1196,14 +1190,11 @@
 static void
 ffs_fs_kill_sb(struct super_block *sb)
 {
-	void *ptr;
-
 	ENTER();
 
 	kill_litter_super(sb);
-	ptr = xchg(&sb->s_fs_info, NULL);
-	if (ptr)
-		ffs_data_put(ptr);
+	if (sb->s_fs_info)
+		ffs_data_put(sb->s_fs_info);
 }
 
 static struct file_system_type ffs_fs_type = {
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
index a18ebee..6353eca 100644
--- a/drivers/usb/gadget/f_mass_storage.c
+++ b/drivers/usb/gadget/f_mass_storage.c
@@ -2987,6 +2987,7 @@
 	fsg_common_put(common);
 	usb_free_descriptors(fsg->function.descriptors);
 	usb_free_descriptors(fsg->function.hs_descriptors);
+	usb_free_descriptors(fsg->function.ss_descriptors);
 	kfree(fsg);
 }
 
diff --git a/drivers/usb/gadget/f_phonet.c b/drivers/usb/gadget/f_phonet.c
index 16a509a..7cdcb63 100644
--- a/drivers/usb/gadget/f_phonet.c
+++ b/drivers/usb/gadget/f_phonet.c
@@ -298,11 +298,10 @@
 static int
 pn_rx_submit(struct f_phonet *fp, struct usb_request *req, gfp_t gfp_flags)
 {
-	struct net_device *dev = fp->dev;
 	struct page *page;
 	int err;
 
-	page = __netdev_alloc_page(dev, gfp_flags);
+	page = alloc_page(gfp_flags);
 	if (!page)
 		return -ENOMEM;
 
@@ -312,7 +311,7 @@
 
 	err = usb_ep_queue(fp->out_ep, req, gfp_flags);
 	if (unlikely(err))
-		netdev_free_page(dev, page);
+		put_page(page);
 	return err;
 }
 
@@ -374,9 +373,9 @@
 	}
 
 	if (page)
-		netdev_free_page(dev, page);
+		put_page(page);
 	if (req)
-		pn_rx_submit(fp, req, GFP_ATOMIC);
+		pn_rx_submit(fp, req, GFP_ATOMIC | __GFP_COLD);
 }
 
 /*-------------------------------------------------------------------------*/
@@ -436,7 +435,7 @@
 
 			netif_carrier_on(dev);
 			for (i = 0; i < phonet_rxq_size; i++)
-				pn_rx_submit(fp, fp->out_reqv[i], GFP_ATOMIC);
+				pn_rx_submit(fp, fp->out_reqv[i], GFP_ATOMIC | __GFP_COLD);
 		}
 		spin_unlock(&port->lock);
 		return 0;
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
index 9361251..ae04266 100644
--- a/drivers/usb/gadget/inode.c
+++ b/drivers/usb/gadget/inode.c
@@ -2035,7 +2035,6 @@
 gadgetfs_fill_super (struct super_block *sb, void *opts, int silent)
 {
 	struct inode	*inode;
-	struct dentry	*d;
 	struct dev_data	*dev;
 
 	if (the_device)
@@ -2058,24 +2057,27 @@
 			NULL, &simple_dir_operations,
 			S_IFDIR | S_IRUGO | S_IXUGO);
 	if (!inode)
-		goto enomem0;
+		goto Enomem;
 	inode->i_op = &simple_dir_inode_operations;
-	if (!(d = d_alloc_root (inode)))
-		goto enomem1;
-	sb->s_root = d;
+	if (!(sb->s_root = d_alloc_root (inode))) {
+		iput(inode);
+		goto Enomem;
+	}
 
 	/* the ep0 file is named after the controller we expect;
 	 * user mode code can use it for sanity checks, like we do.
 	 */
 	dev = dev_new ();
 	if (!dev)
-		goto enomem2;
+		goto Enomem;
 
 	dev->sb = sb;
 	if (!gadgetfs_create_file (sb, CHIP,
 				dev, &dev_init_operations,
-				&dev->dentry))
-		goto enomem3;
+				&dev->dentry)) {
+		put_dev(dev);
+		goto Enomem;
+	}
 
 	/* other endpoint files are available after hardware setup,
 	 * from binding to a controller.
@@ -2083,13 +2085,7 @@
 	the_device = dev;
 	return 0;
 
-enomem3:
-	put_dev (dev);
-enomem2:
-	dput (d);
-enomem1:
-	iput (inode);
-enomem0:
+Enomem:
 	return -ENOMEM;
 }
 
diff --git a/drivers/usb/gadget/s3c2410_udc.c b/drivers/usb/gadget/s3c2410_udc.c
index 4d860e9..3f87cb9 100644
--- a/drivers/usb/gadget/s3c2410_udc.c
+++ b/drivers/usb/gadget/s3c2410_udc.c
@@ -3,7 +3,7 @@
  *
  * Samsung S3C24xx series on-chip full speed USB device controllers
  *
- * Copyright (C) 2004-2007 Herbert Pötzl - Arnaud Patard
+ * Copyright (C) 2004-2007 Herbert Pötzl - Arnaud Patard
  *	Additional cleanups by Ben Dooks <ben-linux@fluff.org>
  *
  * This program is free software; you can redistribute it and/or modify
@@ -51,7 +51,7 @@
 
 #define DRIVER_DESC	"S3C2410 USB Device Controller Gadget"
 #define DRIVER_VERSION	"29 Apr 2007"
-#define DRIVER_AUTHOR	"Herbert Pötzl <herbert@13thfloor.at>, " \
+#define DRIVER_AUTHOR	"Herbert Pötzl <herbert@13thfloor.at>, " \
 			"Arnaud Patard <arnaud.patard@rtp-net.org>"
 
 static const char		gadget_name[] = "s3c2410_udc";
diff --git a/drivers/usb/gadget/s3c2410_udc.h b/drivers/usb/gadget/s3c2410_udc.h
index a48f619..1653bae 100644
--- a/drivers/usb/gadget/s3c2410_udc.h
+++ b/drivers/usb/gadget/s3c2410_udc.h
@@ -2,7 +2,7 @@
  * linux/drivers/usb/gadget/s3c2410_udc.h
  * Samsung on-chip full speed USB device controllers
  *
- * Copyright (C) 2004-2007 Herbert Pötzl - Arnaud Patard
+ * Copyright (C) 2004-2007 Herbert Pötzl - Arnaud Patard
  *	Additional cleanups by Ben Dooks <ben-linux@fluff.org>
  *
  * This program is free software; you can redistribute it and/or modify
diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c
index 565d79f..104730d 100644
--- a/drivers/usb/host/hwa-hc.c
+++ b/drivers/usb/host/hwa-hc.c
@@ -481,7 +481,7 @@
 		encryption_value = 0;
 	}
 
-	/* Set the encryption type for commmunicating with the device */
+	/* Set the encryption type for communicating with the device */
 	result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
 			USB_REQ_SET_ENCRYPTION,
 			USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
@@ -836,18 +836,7 @@
 	.id_table =	hwahc_id_table,
 };
 
-static int __init hwahc_driver_init(void)
-{
-	return usb_register(&hwahc_driver);
-}
-module_init(hwahc_driver_init);
-
-static void __exit hwahc_driver_exit(void)
-{
-	usb_deregister(&hwahc_driver);
-}
-module_exit(hwahc_driver_exit);
-
+module_usb_driver(hwahc_driver);
 
 MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
 MODULE_DESCRIPTION("Host Wired Adapter USB Host Control Driver");
diff --git a/drivers/usb/host/imx21-hcd.c b/drivers/usb/host/imx21-hcd.c
index 6923bcb..ff471c1 100644
--- a/drivers/usb/host/imx21-hcd.c
+++ b/drivers/usb/host/imx21-hcd.c
@@ -473,7 +473,7 @@
 /* End handling 				*/
 /* ===========================================	*/
 
-/* Endpoint now idle - release it's ETD(s) or asssign to queued request */
+/* Endpoint now idle - release its ETD(s) or assign to queued request */
 static void ep_idle(struct imx21 *imx21, struct ep_priv *ep_priv)
 {
 	int i;
diff --git a/drivers/usb/host/isp1760-if.c b/drivers/usb/host/isp1760-if.c
index 61de62c..4592dc1 100644
--- a/drivers/usb/host/isp1760-if.c
+++ b/drivers/usb/host/isp1760-if.c
@@ -18,7 +18,7 @@
 
 #include "isp1760-hcd.h"
 
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
 #include <linux/slab.h>
 #include <linux/of.h>
 #include <linux/of_platform.h>
@@ -31,7 +31,7 @@
 #include <linux/pci.h>
 #endif
 
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
 struct isp1760 {
 	struct usb_hcd *hcd;
 	int rst_gpio;
@@ -442,7 +442,7 @@
 	ret = platform_driver_register(&isp1760_plat_driver);
 	if (!ret)
 		any_ret = 0;
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
 	ret = platform_driver_register(&isp1760_of_driver);
 	if (!ret)
 		any_ret = 0;
@@ -462,7 +462,7 @@
 static void __exit isp1760_exit(void)
 {
 	platform_driver_unregister(&isp1760_plat_driver);
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
 	platform_driver_unregister(&isp1760_of_driver);
 #endif
 #ifdef CONFIG_PCI
diff --git a/drivers/usb/image/microtek.c b/drivers/usb/image/microtek.c
index 27e209a..9c0f8ca 100644
--- a/drivers/usb/image/microtek.c
+++ b/drivers/usb/image/microtek.c
@@ -809,19 +809,7 @@
 	kfree(desc);
 }
 
-
-static int __init microtek_drv_init(void)
-{
-	return usb_register(&mts_usb_driver);
-}
-
-static void __exit microtek_drv_exit(void)
-{
-	usb_deregister(&mts_usb_driver);
-}
-
-module_init(microtek_drv_init);
-module_exit(microtek_drv_exit);
+module_usb_driver(mts_usb_driver);
 
 MODULE_AUTHOR( DRIVER_AUTHOR );
 MODULE_DESCRIPTION( DRIVER_DESC );
diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c
index fe85871..284b854 100644
--- a/drivers/usb/misc/adutux.c
+++ b/drivers/usb/misc/adutux.c
@@ -885,40 +885,7 @@
 	.id_table = device_table,
 };
 
-static int __init adu_init(void)
-{
-	int result;
-
-	dbg(2," %s : enter", __func__);
-
-	/* register this driver with the USB subsystem */
-	result = usb_register(&adu_driver);
-	if (result < 0) {
-		printk(KERN_ERR "usb_register failed for the "__FILE__
-		       " driver. Error number %d\n", result);
-		goto exit;
-	}
-
-	printk(KERN_INFO "adutux " DRIVER_DESC " " DRIVER_VERSION "\n");
-	printk(KERN_INFO "adutux is an experimental driver. "
-	       "Use at your own risk\n");
-
-exit:
-	dbg(2," %s : leave, return value %d", __func__, result);
-
-	return result;
-}
-
-static void __exit adu_exit(void)
-{
-	dbg(2," %s : enter", __func__);
-	/* deregister this driver with the USB subsystem */
-	usb_deregister(&adu_driver);
-	dbg(2," %s : leave", __func__);
-}
-
-module_init(adu_init);
-module_exit(adu_exit);
+module_usb_driver(adu_driver);
 
 MODULE_AUTHOR(DRIVER_AUTHOR);
 MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/usb/misc/cypress_cy7c63.c b/drivers/usb/misc/cypress_cy7c63.c
index 9251773..3f7c1a9 100644
--- a/drivers/usb/misc/cypress_cy7c63.c
+++ b/drivers/usb/misc/cypress_cy7c63.c
@@ -271,27 +271,7 @@
 	.id_table = cypress_table,
 };
 
-static int __init cypress_init(void)
-{
-	int result;
-
-	/* register this driver with the USB subsystem */
-	result = usb_register(&cypress_driver);
-	if (result)
-		printk(KERN_ERR KBUILD_MODNAME ": usb_register failed! "
-		       "Error number: %d\n", result);
-
-	return result;
-}
-
-static void __exit cypress_exit(void)
-{
-	/* deregister this driver with the USB subsystem */
-	usb_deregister(&cypress_driver);
-}
-
-module_init(cypress_init);
-module_exit(cypress_exit);
+module_usb_driver(cypress_driver);
 
 MODULE_AUTHOR(DRIVER_AUTHOR);
 MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/usb/misc/cytherm.c b/drivers/usb/misc/cytherm.c
index 1d7251b..5b9831b 100644
--- a/drivers/usb/misc/cytherm.c
+++ b/drivers/usb/misc/cytherm.c
@@ -417,31 +417,7 @@
 	dev_info(&interface->dev, "Cypress thermometer now disconnected\n");
 }
 
-
-static int __init usb_cytherm_init(void)
-{
-	int result;
-
-	result = usb_register(&cytherm_driver);
-	if (result) {
-		printk(KERN_ERR KBUILD_MODNAME ": usb_register failed! "
-		       "Error number: %d\n", result);
-		return result;
-	}
-
-	printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
-	       DRIVER_DESC "\n");
-	return 0;
-}
-
-static void __exit usb_cytherm_exit(void)
-{
-	usb_deregister(&cytherm_driver);
-}
-
-
-module_init (usb_cytherm_init);
-module_exit (usb_cytherm_exit);
+module_usb_driver(cytherm_driver);
 
 MODULE_AUTHOR(DRIVER_AUTHOR);
 MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/usb/misc/emi26.c b/drivers/usb/misc/emi26.c
index a6521c95..d9b6a03 100644
--- a/drivers/usb/misc/emi26.c
+++ b/drivers/usb/misc/emi26.c
@@ -276,18 +276,7 @@
 	.id_table	= id_table,
 };
 
-static int __init emi26_init (void)
-{
-	return usb_register(&emi26_driver);
-}
-
-static void __exit emi26_exit (void)
-{
-	usb_deregister (&emi26_driver);
-}
-
-module_init(emi26_init);
-module_exit(emi26_exit);
+module_usb_driver(emi26_driver);
 
 MODULE_AUTHOR("Tapio Laxström");
 MODULE_DESCRIPTION("Emagic EMI 2|6 firmware loader.");
diff --git a/drivers/usb/misc/emi62.c b/drivers/usb/misc/emi62.c
index fc15ad4..9f39062 100644
--- a/drivers/usb/misc/emi62.c
+++ b/drivers/usb/misc/emi62.c
@@ -290,22 +290,7 @@
 	.id_table	= id_table,
 };
 
-static int __init emi62_init (void)
-{
-	int retval;
-	retval = usb_register (&emi62_driver);
-	if (retval)
-		printk(KERN_ERR "adi-emi: registration failed\n");
-	return retval;
-}
-
-static void __exit emi62_exit (void)
-{
-	usb_deregister (&emi62_driver);
-}
-
-module_init(emi62_init);
-module_exit(emi62_exit);
+module_usb_driver(emi62_driver);
 
 MODULE_AUTHOR("Tapio Laxström");
 MODULE_DESCRIPTION("Emagic EMI 6|2m firmware loader.");
diff --git a/drivers/usb/misc/idmouse.c b/drivers/usb/misc/idmouse.c
index 515b67f..0dee246 100644
--- a/drivers/usb/misc/idmouse.c
+++ b/drivers/usb/misc/idmouse.c
@@ -428,29 +428,7 @@
 	dev_info(&interface->dev, "disconnected\n");
 }
 
-static int __init usb_idmouse_init(void)
-{
-	int result;
-
-	printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
-	       DRIVER_DESC "\n");
-
-	/* register this driver with the USB subsystem */
-	result = usb_register(&idmouse_driver);
-	if (result)
-		err("Unable to register device (error %d).", result);
-
-	return result;
-}
-
-static void __exit usb_idmouse_exit(void)
-{
-	/* deregister this driver with the USB subsystem */
-	usb_deregister(&idmouse_driver);
-}
-
-module_init(usb_idmouse_init);
-module_exit(usb_idmouse_exit);
+module_usb_driver(idmouse_driver);
 
 MODULE_AUTHOR(DRIVER_AUTHOR);
 MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index 8145790..2453a39 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -734,7 +734,7 @@
 	.llseek = noop_llseek,
 };
 
-static char *iowarrior_devnode(struct device *dev, mode_t *mode)
+static char *iowarrior_devnode(struct device *dev, umode_t *mode)
 {
 	return kasprintf(GFP_KERNEL, "usb/%s", dev_name(dev));
 }
@@ -927,15 +927,4 @@
 	.id_table = iowarrior_ids,
 };
 
-static int __init iowarrior_init(void)
-{
-	return usb_register(&iowarrior_driver);
-}
-
-static void __exit iowarrior_exit(void)
-{
-	usb_deregister(&iowarrior_driver);
-}
-
-module_init(iowarrior_init);
-module_exit(iowarrior_exit);
+module_usb_driver(iowarrior_driver);
diff --git a/drivers/usb/misc/isight_firmware.c b/drivers/usb/misc/isight_firmware.c
index 8f725f6..1c61830 100644
--- a/drivers/usb/misc/isight_firmware.c
+++ b/drivers/usb/misc/isight_firmware.c
@@ -128,18 +128,7 @@
 	.id_table = id_table,
 };
 
-static int __init isight_firmware_init(void)
-{
-	return usb_register(&isight_firmware_driver);
-}
-
-static void __exit isight_firmware_exit(void)
-{
-	usb_deregister(&isight_firmware_driver);
-}
-
-module_init(isight_firmware_init);
-module_exit(isight_firmware_exit);
+module_usb_driver(isight_firmware_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>");
diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c
index 48c166f..5db4ab5 100644
--- a/drivers/usb/misc/ldusb.c
+++ b/drivers/usb/misc/ldusb.c
@@ -821,30 +821,5 @@
 	.id_table =	ld_usb_table,
 };
 
-/**
- *	ld_usb_init
- */
-static int __init ld_usb_init(void)
-{
-	int retval;
-
-	/* register this driver with the USB subsystem */
-	retval = usb_register(&ld_usb_driver);
-	if (retval)
-		err("usb_register failed for the %s driver. Error number %d\n", __FILE__, retval);
-
-	return retval;
-}
-
-/**
- *	ld_usb_exit
- */
-static void __exit ld_usb_exit(void)
-{
-	/* deregister this driver with the USB subsystem */
-	usb_deregister(&ld_usb_driver);
-}
-
-module_init(ld_usb_init);
-module_exit(ld_usb_exit);
+module_usb_driver(ld_usb_driver);
 
diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c
index a989356..5752220 100644
--- a/drivers/usb/misc/legousbtower.c
+++ b/drivers/usb/misc/legousbtower.c
@@ -269,7 +269,7 @@
 	.llseek =	tower_llseek,
 };
 
-static char *legousbtower_devnode(struct device *dev, mode_t *mode)
+static char *legousbtower_devnode(struct device *dev, umode_t *mode)
 {
 	return kasprintf(GFP_KERNEL, "usb/%s", dev_name(dev));
 }
@@ -1043,51 +1043,7 @@
 	dbg(2, "%s: leave", __func__);
 }
 
-
-
-/**
- *	lego_usb_tower_init
- */
-static int __init lego_usb_tower_init(void)
-{
-	int result;
-	int retval = 0;
-
-	dbg(2, "%s: enter", __func__);
-
-	/* register this driver with the USB subsystem */
-	result = usb_register(&tower_driver);
-	if (result < 0) {
-		err("usb_register failed for the %s driver. Error number %d", __FILE__, result);
-		retval = -1;
-		goto exit;
-	}
-
-	printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
-	       DRIVER_DESC "\n");
-
-exit:
-	dbg(2, "%s: leave, return value %d", __func__, retval);
-
-	return retval;
-}
-
-
-/**
- *	lego_usb_tower_exit
- */
-static void __exit lego_usb_tower_exit(void)
-{
-	dbg(2, "%s: enter", __func__);
-
-	/* deregister this driver with the USB subsystem */
-	usb_deregister (&tower_driver);
-
-	dbg(2, "%s: leave", __func__);
-}
-
-module_init (lego_usb_tower_init);
-module_exit (lego_usb_tower_exit);
+module_usb_driver(tower_driver);
 
 MODULE_AUTHOR(DRIVER_AUTHOR);
 MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/usb/misc/rio500.c b/drivers/usb/misc/rio500.c
index 4e23d38..487a8ce 100644
--- a/drivers/usb/misc/rio500.c
+++ b/drivers/usb/misc/rio500.c
@@ -531,33 +531,7 @@
 	.id_table =	rio_table,
 };
 
-static int __init usb_rio_init(void)
-{
-	int retval;
-	retval = usb_register(&rio_driver);
-	if (retval)
-		goto out;
-
-	printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
-	       DRIVER_DESC "\n");
-
-out:
-	return retval;
-}
-
-
-static void __exit usb_rio_cleanup(void)
-{
-	struct rio_usb_data *rio = &rio_instance;
-
-	rio->present = 0;
-	usb_deregister(&rio_driver);
-
-
-}
-
-module_init(usb_rio_init);
-module_exit(usb_rio_cleanup);
+module_usb_driver(rio_driver);
 
 MODULE_AUTHOR( DRIVER_AUTHOR );
 MODULE_DESCRIPTION( DRIVER_DESC );
diff --git a/drivers/usb/misc/trancevibrator.c b/drivers/usb/misc/trancevibrator.c
index f63776a..741efed 100644
--- a/drivers/usb/misc/trancevibrator.c
+++ b/drivers/usb/misc/trancevibrator.c
@@ -137,26 +137,7 @@
 	.id_table =	id_table,
 };
 
-static int __init tv_init(void)
-{
-	int retval = usb_register(&tv_driver);
-	if (retval) {
-		err("usb_register failed. Error number %d", retval);
-		return retval;
-	}
-
-	printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
-	       DRIVER_DESC "\n");
-	return 0;
-}
-
-static void __exit tv_exit(void)
-{
-	usb_deregister(&tv_driver);
-}
-
-module_init (tv_init);
-module_exit (tv_exit);
+module_usb_driver(tv_driver);
 
 MODULE_AUTHOR(DRIVER_AUTHOR);
 MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/usb/misc/usblcd.c b/drivers/usb/misc/usblcd.c
index 1871cdf..e2b4bd3 100644
--- a/drivers/usb/misc/usblcd.c
+++ b/drivers/usb/misc/usblcd.c
@@ -450,25 +450,7 @@
 	.supports_autosuspend = 1,
 };
 
-static int __init usb_lcd_init(void)
-{
-	int result;
-
-	result = usb_register(&lcd_driver);
-	if (result)
-		err("usb_register failed. Error number %d", result);
-
-	return result;
-}
-
-
-static void __exit usb_lcd_exit(void)
-{
-	usb_deregister(&lcd_driver);
-}
-
-module_init(usb_lcd_init);
-module_exit(usb_lcd_exit);
+module_usb_driver(lcd_driver);
 
 MODULE_AUTHOR("Georges Toth <g.toth@e-biz.lu>");
 MODULE_DESCRIPTION(DRIVER_VERSION);
diff --git a/drivers/usb/misc/usbled.c b/drivers/usb/misc/usbled.c
index 43f84e5..4af56fb 100644
--- a/drivers/usb/misc/usbled.c
+++ b/drivers/usb/misc/usbled.c
@@ -231,23 +231,7 @@
 	.id_table =	id_table,
 };
 
-static int __init usb_led_init(void)
-{
-	int retval = 0;
-
-	retval = usb_register(&led_driver);
-	if (retval)
-		err("usb_register failed. Error number %d", retval);
-	return retval;
-}
-
-static void __exit usb_led_exit(void)
-{
-	usb_deregister(&led_driver);
-}
-
-module_init(usb_led_init);
-module_exit(usb_led_exit);
+module_usb_driver(led_driver);
 
 MODULE_AUTHOR(DRIVER_AUTHOR);
 MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/usb/misc/usbsevseg.c b/drivers/usb/misc/usbsevseg.c
index 417b8f2..107bf13 100644
--- a/drivers/usb/misc/usbsevseg.c
+++ b/drivers/usb/misc/usbsevseg.c
@@ -437,23 +437,7 @@
 	.supports_autosuspend = 1,
 };
 
-static int __init usb_sevseg_init(void)
-{
-	int rc = 0;
-
-	rc = usb_register(&sevseg_driver);
-	if (rc)
-		err("usb_register failed. Error number %d", rc);
-	return rc;
-}
-
-static void __exit usb_sevseg_exit(void)
-{
-	usb_deregister(&sevseg_driver);
-}
-
-module_init(usb_sevseg_init);
-module_exit(usb_sevseg_exit);
+module_usb_driver(sevseg_driver);
 
 MODULE_AUTHOR(DRIVER_AUTHOR);
 MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
index ac5bfd6..897edda 100644
--- a/drivers/usb/misc/yurex.c
+++ b/drivers/usb/misc/yurex.c
@@ -539,26 +539,6 @@
 	.llseek =	default_llseek,
 };
 
-
-static int __init usb_yurex_init(void)
-{
-	int result;
-
-	/* register this driver with the USB subsystem */
-	result = usb_register(&yurex_driver);
-	if (result)
-		err("usb_register failed. Error number %d", result);
-
-	return result;
-}
-
-static void __exit usb_yurex_exit(void)
-{
-	/* deregister this driver with the USB subsystem */
-	usb_deregister(&yurex_driver);
-}
-
-module_init(usb_yurex_init);
-module_exit(usb_yurex_exit);
+module_usb_driver(yurex_driver);
 
 MODULE_LICENSE("GPL");
diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c
index 318fb4e..53be7ae 100644
--- a/drivers/usb/musb/cppi_dma.c
+++ b/drivers/usb/musb/cppi_dma.c
@@ -513,7 +513,7 @@
 		if (!(val & MUSB_RXCSR_H_REQPKT)) {
 			val |= MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_H_WZC_BITS;
 			musb_writew(regs, MUSB_RXCSR, val);
-			/* flush writebufer */
+			/* flush writebuffer */
 			val = musb_readw(regs, MUSB_RXCSR);
 		}
 	}
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 60ddba8..79cb0af 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -774,6 +774,10 @@
 			if (musb->double_buffer_not_ok)
 				musb_writew(epio, MUSB_TXMAXP,
 						hw_ep->max_packet_sz_tx);
+			else if (can_bulk_split(musb, qh->type))
+				musb_writew(epio, MUSB_TXMAXP, packet_sz
+					| ((hw_ep->max_packet_sz_tx /
+						packet_sz) - 1) << 11);
 			else
 				musb_writew(epio, MUSB_TXMAXP,
 						qh->maxpacket |
diff --git a/drivers/usb/otg/fsl_otg.c b/drivers/usb/otg/fsl_otg.c
index 2a52ff1..a190850 100644
--- a/drivers/usb/otg/fsl_otg.c
+++ b/drivers/usb/otg/fsl_otg.c
@@ -639,7 +639,7 @@
  * Delayed pin detect interrupt processing.
  *
  * When the Mini-A cable is disconnected from the board,
- * the pin-detect interrupt happens before the disconnnect
+ * the pin-detect interrupt happens before the disconnect
  * interrupts for the connected device(s).  In order to
  * process the disconnect interrupt(s) prior to switching
  * roles, the pin-detect interrupts are delayed, and handled
diff --git a/drivers/usb/renesas_usbhs/mod.c b/drivers/usb/renesas_usbhs/mod.c
index f382e43..1b97fb1 100644
--- a/drivers/usb/renesas_usbhs/mod.c
+++ b/drivers/usb/renesas_usbhs/mod.c
@@ -351,7 +351,7 @@
 		if (mod->irq_attch)
 			intenb1 |= ATTCHE;
 
-		if (mod->irq_attch)
+		if (mod->irq_dtch)
 			intenb1 |= DTCHE;
 
 		if (mod->irq_sign)
diff --git a/drivers/usb/renesas_usbhs/mod_host.c b/drivers/usb/renesas_usbhs/mod_host.c
index df8363d..1834cf5 100644
--- a/drivers/usb/renesas_usbhs/mod_host.c
+++ b/drivers/usb/renesas_usbhs/mod_host.c
@@ -1515,6 +1515,7 @@
 		dev_err(dev, "Failed to create hcd\n");
 		return -ENOMEM;
 	}
+	hcd->has_tt = 1; /* for low/full speed */
 
 	/*
 	 * CAUTION
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 055b64e..df1d7da 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -2,7 +2,7 @@
  * vendor/product IDs (VID/PID) of devices using FTDI USB serial converters.
  * Please keep numerically sorted within individual areas, thanks!
  *
- * Philipp Gühring - pg@futureware.at - added the Device ID of the USB relais
+ * Philipp Gühring - pg@futureware.at - added the Device ID of the USB relais
  * from Rudolf Gugler
  *
  */
@@ -78,7 +78,7 @@
  */
 #define FTDI_ASK_RDR400_PID	0xC991	/* ASK RDR 400 series card reader */
 
-/* www.starting-point-systems.com µChameleon device */
+/* www.starting-point-systems.com µChameleon device */
 #define FTDI_MICRO_CHAMELEON_PID	0xCAA0	/* Product Id */
 
 /*
@@ -291,7 +291,7 @@
 
 /*
  * Teratronik product ids.
- * Submitted by O. Wölfelschneider.
+ * Submitted by O. Wölfelschneider.
  */
 #define FTDI_TERATRONIK_VCP_PID	 0xEC88	/* Teratronik device (preferring VCP driver on windows) */
 #define FTDI_TERATRONIK_D2XX_PID 0xEC89	/* Teratronik device (preferring D2XX driver on windows) */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 3745d14..c96b6b6 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -667,7 +667,12 @@
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x01) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x02) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x03) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x08) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x10) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x12) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x13) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x01) },  /* E398 3G Modem */
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x02) },  /* E398 3G PC UI Interface */
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x03) },  /* E398 3G Application Interface */
 	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
 	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
 	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) },
diff --git a/drivers/usb/storage/alauda.c b/drivers/usb/storage/alauda.c
index 9ce3bba..51af2fe 100644
--- a/drivers/usb/storage/alauda.c
+++ b/drivers/usb/storage/alauda.c
@@ -1278,15 +1278,4 @@
 	.soft_unbind =	1,
 };
 
-static int __init alauda_init(void)
-{
-	return usb_register(&alauda_driver);
-}
-
-static void __exit alauda_exit(void)
-{
-	usb_deregister(&alauda_driver);
-}
-
-module_init(alauda_init);
-module_exit(alauda_exit);
+module_usb_driver(alauda_driver);
diff --git a/drivers/usb/storage/cypress_atacb.c b/drivers/usb/storage/cypress_atacb.c
index 740bfe6..387cbd47 100644
--- a/drivers/usb/storage/cypress_atacb.c
+++ b/drivers/usb/storage/cypress_atacb.c
@@ -274,15 +274,4 @@
 	.soft_unbind =	1,
 };
 
-static int __init cypress_init(void)
-{
-	return usb_register(&cypress_driver);
-}
-
-static void __exit cypress_exit(void)
-{
-	usb_deregister(&cypress_driver);
-}
-
-module_init(cypress_init);
-module_exit(cypress_exit);
+module_usb_driver(cypress_driver);
diff --git a/drivers/usb/storage/datafab.c b/drivers/usb/storage/datafab.c
index 0d8d97c..15d41f2 100644
--- a/drivers/usb/storage/datafab.c
+++ b/drivers/usb/storage/datafab.c
@@ -753,15 +753,4 @@
 	.soft_unbind =	1,
 };
 
-static int __init datafab_init(void)
-{
-	return usb_register(&datafab_driver);
-}
-
-static void __exit datafab_exit(void)
-{
-	usb_deregister(&datafab_driver);
-}
-
-module_init(datafab_init);
-module_exit(datafab_exit);
+module_usb_driver(datafab_driver);
diff --git a/drivers/usb/storage/ene_ub6250.c b/drivers/usb/storage/ene_ub6250.c
index b990726..a6ade40 100644
--- a/drivers/usb/storage/ene_ub6250.c
+++ b/drivers/usb/storage/ene_ub6250.c
@@ -2409,15 +2409,4 @@
 	.soft_unbind =	1,
 };
 
-static int __init ene_ub6250_init(void)
-{
-	return usb_register(&ene_ub6250_driver);
-}
-
-static void __exit ene_ub6250_exit(void)
-{
-	usb_deregister(&ene_ub6250_driver);
-}
-
-module_init(ene_ub6250_init);
-module_exit(ene_ub6250_exit);
+module_usb_driver(ene_ub6250_driver);
diff --git a/drivers/usb/storage/freecom.c b/drivers/usb/storage/freecom.c
index 8cf16f8..fa16157 100644
--- a/drivers/usb/storage/freecom.c
+++ b/drivers/usb/storage/freecom.c
@@ -555,15 +555,4 @@
 	.soft_unbind =	1,
 };
 
-static int __init freecom_init(void)
-{
-	return usb_register(&freecom_driver);
-}
-
-static void __exit freecom_exit(void)
-{
-	usb_deregister(&freecom_driver);
-}
-
-module_init(freecom_init);
-module_exit(freecom_exit);
+module_usb_driver(freecom_driver);
diff --git a/drivers/usb/storage/isd200.c b/drivers/usb/storage/isd200.c
index 7b81303..bd55027 100644
--- a/drivers/usb/storage/isd200.c
+++ b/drivers/usb/storage/isd200.c
@@ -61,7 +61,7 @@
 #include "scsiglue.h"
 
 MODULE_DESCRIPTION("Driver for In-System Design, Inc. ISD200 ASIC");
-MODULE_AUTHOR("Björn Stenberg <bjorn@haxx.se>");
+MODULE_AUTHOR("Björn Stenberg <bjorn@haxx.se>");
 MODULE_LICENSE("GPL");
 
 static int isd200_Initialization(struct us_data *us);
@@ -1568,15 +1568,4 @@
 	.soft_unbind =	1,
 };
 
-static int __init isd200_init(void)
-{
-	return usb_register(&isd200_driver);
-}
-
-static void __exit isd200_exit(void)
-{
-	usb_deregister(&isd200_driver);
-}
-
-module_init(isd200_init);
-module_exit(isd200_exit);
+module_usb_driver(isd200_driver);
diff --git a/drivers/usb/storage/jumpshot.c b/drivers/usb/storage/jumpshot.c
index 5ef55c7..a19211b 100644
--- a/drivers/usb/storage/jumpshot.c
+++ b/drivers/usb/storage/jumpshot.c
@@ -679,15 +679,4 @@
 	.soft_unbind =	1,
 };
 
-static int __init jumpshot_init(void)
-{
-	return usb_register(&jumpshot_driver);
-}
-
-static void __exit jumpshot_exit(void)
-{
-	usb_deregister(&jumpshot_driver);
-}
-
-module_init(jumpshot_init);
-module_exit(jumpshot_exit);
+module_usb_driver(jumpshot_driver);
diff --git a/drivers/usb/storage/karma.c b/drivers/usb/storage/karma.c
index fb5bfb0..e720f8e 100644
--- a/drivers/usb/storage/karma.c
+++ b/drivers/usb/storage/karma.c
@@ -232,15 +232,4 @@
 	.soft_unbind =	1,
 };
 
-static int __init karma_init(void)
-{
-	return usb_register(&karma_driver);
-}
-
-static void __exit karma_exit(void)
-{
-	usb_deregister(&karma_driver);
-}
-
-module_init(karma_init);
-module_exit(karma_exit);
+module_usb_driver(karma_driver);
diff --git a/drivers/usb/storage/onetouch.c b/drivers/usb/storage/onetouch.c
index d29be3e..d75155c 100644
--- a/drivers/usb/storage/onetouch.c
+++ b/drivers/usb/storage/onetouch.c
@@ -314,15 +314,4 @@
 	.soft_unbind =	1,
 };
 
-static int __init onetouch_init(void)
-{
-	return usb_register(&onetouch_driver);
-}
-
-static void __exit onetouch_exit(void)
-{
-	usb_deregister(&onetouch_driver);
-}
-
-module_init(onetouch_init);
-module_exit(onetouch_exit);
+module_usb_driver(onetouch_driver);
diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c
index 7114767..1f62723 100644
--- a/drivers/usb/storage/realtek_cr.c
+++ b/drivers/usb/storage/realtek_cr.c
@@ -1102,15 +1102,4 @@
 	.supports_autosuspend = 1,
 };
 
-static int __init realtek_cr_init(void)
-{
-	return usb_register(&realtek_cr_driver);
-}
-
-static void __exit realtek_cr_exit(void)
-{
-	usb_deregister(&realtek_cr_driver);
-}
-
-module_init(realtek_cr_init);
-module_exit(realtek_cr_exit);
+module_usb_driver(realtek_cr_driver);
diff --git a/drivers/usb/storage/sddr09.c b/drivers/usb/storage/sddr09.c
index 6ecbf44..425df7d 100644
--- a/drivers/usb/storage/sddr09.c
+++ b/drivers/usb/storage/sddr09.c
@@ -1789,15 +1789,4 @@
 	.soft_unbind =	1,
 };
 
-static int __init sddr09_init(void)
-{
-	return usb_register(&sddr09_driver);
-}
-
-static void __exit sddr09_exit(void)
-{
-	usb_deregister(&sddr09_driver);
-}
-
-module_init(sddr09_init);
-module_exit(sddr09_exit);
+module_usb_driver(sddr09_driver);
diff --git a/drivers/usb/storage/sddr55.c b/drivers/usb/storage/sddr55.c
index f293044..e4ca5fc 100644
--- a/drivers/usb/storage/sddr55.c
+++ b/drivers/usb/storage/sddr55.c
@@ -1008,15 +1008,4 @@
 	.soft_unbind =	1,
 };
 
-static int __init sddr55_init(void)
-{
-	return usb_register(&sddr55_driver);
-}
-
-static void __exit sddr55_exit(void)
-{
-	usb_deregister(&sddr55_driver);
-}
-
-module_init(sddr55_init);
-module_exit(sddr55_exit);
+module_usb_driver(sddr55_driver);
diff --git a/drivers/usb/storage/shuttle_usbat.c b/drivers/usb/storage/shuttle_usbat.c
index 7d642c8..1369d25 100644
--- a/drivers/usb/storage/shuttle_usbat.c
+++ b/drivers/usb/storage/shuttle_usbat.c
@@ -1865,15 +1865,4 @@
 	.soft_unbind =	1,
 };
 
-static int __init usbat_init(void)
-{
-	return usb_register(&usbat_driver);
-}
-
-static void __exit usbat_exit(void)
-{
-	usb_deregister(&usbat_driver);
-}
-
-module_init(usbat_init);
-module_exit(usbat_exit);
+module_usb_driver(usbat_driver);
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 1d10d5b..a33ead5 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -760,18 +760,7 @@
 	.id_table = uas_usb_ids,
 };
 
-static int uas_init(void)
-{
-	return usb_register(&uas_driver);
-}
-
-static void uas_exit(void)
-{
-	usb_deregister(&uas_driver);
-}
-
-module_init(uas_init);
-module_exit(uas_exit);
+module_usb_driver(uas_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Matthew Wilcox and Sarah Sharp");
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 24caba7..856ad92 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1914,7 +1914,7 @@
 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
 		US_FL_INITIAL_READ10 ),
 
-/* Patch by Richard Schütz <r.schtz@t-online.de>
+/* Patch by Richard Schütz <r.schtz@t-online.de>
  * This external hard drive enclosure uses a JMicron chip which
  * needs the US_FL_IGNORE_RESIDUE flag to work properly. */
 UNUSUAL_DEV(  0x1e68, 0x001b, 0x0000, 0x0000,
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index 9e069ef..3dd7da9 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -831,7 +831,8 @@
 
 	dev_dbg(dev, "device found\n");
 
-	set_freezable_with_signal();
+	set_freezable();
+
 	/*
 	 * Wait for the timeout to expire or for a disconnect
 	 *
@@ -839,16 +840,16 @@
 	 * fail to freeze, but we can't be non-freezable either. Nor can
 	 * khubd freeze while waiting for scanning to complete as it may
 	 * hold the device lock, causing a hang when suspending devices.
-	 * So we request a fake signal when freezing and use
-	 * interruptible sleep to kick us out of our wait early when
-	 * freezing happens.
+	 * So instead of using wait_event_freezable(), explicitly test
+	 * for (DONT_SCAN || freezing) in interruptible wait and proceed
+	 * if any of DONT_SCAN, freezing or timeout has happened.
 	 */
 	if (delay_use > 0) {
 		dev_dbg(dev, "waiting for device to settle "
 				"before scanning\n");
 		wait_event_interruptible_timeout(us->delay_wait,
-				test_bit(US_FLIDX_DONT_SCAN, &us->dflags),
-				delay_use * HZ);
+				test_bit(US_FLIDX_DONT_SCAN, &us->dflags) ||
+				freezing(current), delay_use * HZ);
 	}
 
 	/* If the device is still connected, perform the scanning */
diff --git a/drivers/usb/usb-skeleton.c b/drivers/usb/usb-skeleton.c
index d9a9584..8efeae2 100644
--- a/drivers/usb/usb-skeleton.c
+++ b/drivers/usb/usb-skeleton.c
@@ -690,25 +690,6 @@
 	.supports_autosuspend = 1,
 };
 
-static int __init usb_skel_init(void)
-{
-	int result;
-
-	/* register this driver with the USB subsystem */
-	result = usb_register(&skel_driver);
-	if (result)
-		err("usb_register failed. Error number %d", result);
-
-	return result;
-}
-
-static void __exit usb_skel_exit(void)
-{
-	/* deregister this driver with the USB subsystem */
-	usb_deregister(&skel_driver);
-}
-
-module_init(usb_skel_init);
-module_exit(usb_skel_exit);
+module_usb_driver(skel_driver);
 
 MODULE_LICENSE("GPL");
diff --git a/drivers/usb/wusbcore/cbaf.c b/drivers/usb/wusbcore/cbaf.c
index 200fd7c..7f78f30 100644
--- a/drivers/usb/wusbcore/cbaf.c
+++ b/drivers/usb/wusbcore/cbaf.c
@@ -655,17 +655,7 @@
 	.disconnect =	cbaf_disconnect,
 };
 
-static int __init cbaf_driver_init(void)
-{
-	return usb_register(&cbaf_driver);
-}
-module_init(cbaf_driver_init);
-
-static void __exit cbaf_driver_exit(void)
-{
-	usb_deregister(&cbaf_driver);
-}
-module_exit(cbaf_driver_exit);
+module_usb_driver(cbaf_driver);
 
 MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
 MODULE_DESCRIPTION("Wireless USB Cable Based Association");
diff --git a/drivers/uwb/hwa-rc.c b/drivers/uwb/hwa-rc.c
index 2babcd4..66797e9 100644
--- a/drivers/uwb/hwa-rc.c
+++ b/drivers/uwb/hwa-rc.c
@@ -914,17 +914,7 @@
 	.post_reset =   hwarc_post_reset,
 };
 
-static int __init hwarc_driver_init(void)
-{
-	return usb_register(&hwarc_driver);
-}
-module_init(hwarc_driver_init);
-
-static void __exit hwarc_driver_exit(void)
-{
-	usb_deregister(&hwarc_driver);
-}
-module_exit(hwarc_driver_exit);
+module_usb_driver(hwarc_driver);
 
 MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
 MODULE_DESCRIPTION("Host Wireless Adapter Radio Control Driver");
diff --git a/drivers/uwb/i1480/dfu/usb.c b/drivers/uwb/i1480/dfu/usb.c
index ba86643..2bfc846 100644
--- a/drivers/uwb/i1480/dfu/usb.c
+++ b/drivers/uwb/i1480/dfu/usb.c
@@ -104,7 +104,7 @@
  *
  * Data buffers to USB cannot be on the stack or in vmalloc'ed areas,
  * so we copy it to the local i1480 buffer before proceeding. In any
- * case, we have a max size we can send, soooo.
+ * case, we have a max size we can send.
  */
 static
 int i1480_usb_write(struct i1480 *i1480, u32 memory_address,
@@ -451,25 +451,7 @@
 	.disconnect =	NULL,
 };
 
-
-/*
- * Initialize the i1480 DFU driver.
- *
- * We also need to register our function for guessing event sizes.
- */
-static int __init i1480_dfu_driver_init(void)
-{
-	return usb_register(&i1480_dfu_driver);
-}
-module_init(i1480_dfu_driver_init);
-
-
-static void __exit i1480_dfu_driver_exit(void)
-{
-	usb_deregister(&i1480_dfu_driver);
-}
-module_exit(i1480_dfu_driver_exit);
-
+module_usb_driver(i1480_dfu_driver);
 
 MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
 MODULE_DESCRIPTION("Intel Wireless UWB Link 1480 firmware uploader for USB");
diff --git a/drivers/video/amba-clcd.c b/drivers/video/amba-clcd.c
index 2cda6ba..0a2cce7 100644
--- a/drivers/video/amba-clcd.c
+++ b/drivers/video/amba-clcd.c
@@ -621,6 +621,8 @@
 	{ 0, 0 },
 };
 
+MODULE_DEVICE_TABLE(amba, clcdfb_id_table);
+
 static struct amba_driver clcd_driver = {
 	.drv 		= {
 		.name	= "clcd-pl11x",
diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c
index 56720fb..46b03f5 100644
--- a/drivers/video/bf54x-lq043fb.c
+++ b/drivers/video/bf54x-lq043fb.c
@@ -4,7 +4,7 @@
  * Author:       Michael Hennerich <hennerich@blackfin.uclinux.org>
  *
  * Created:
- * Description:  ADSP-BF54x Framebufer driver
+ * Description:  ADSP-BF54x Framebuffer driver
  *
  *
  * Modified:
diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c
index d5e1267..7a0c05f 100644
--- a/drivers/video/bfin-t350mcqb-fb.c
+++ b/drivers/video/bfin-t350mcqb-fb.c
@@ -4,7 +4,7 @@
  * Author:       Michael Hennerich <hennerich@blackfin.uclinux.org>
  *
  * Created:
- * Description:  Blackfin LCD Framebufer driver
+ * Description:  Blackfin LCD Framebuffer driver
  *
  *
  * Modified:
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
index 2209e35..c2d11fe 100644
--- a/drivers/video/console/Kconfig
+++ b/drivers/video/console/Kconfig
@@ -66,8 +66,6 @@
           Say Y here if you want the console on the Newport aka XL graphics
           card of your Indy.  Most people say Y here.
 
-#  bool 'IODC console' CONFIG_IODC_CONSOLE
-
 config DUMMY_CONSOLE
 	bool
 	depends on VGA_CONSOLE!=y || SGI_NEWPORT_CONSOLE!=y 
diff --git a/drivers/video/offb.c b/drivers/video/offb.c
index cb163a53..0c4f343 100644
--- a/drivers/video/offb.c
+++ b/drivers/video/offb.c
@@ -41,13 +41,14 @@
 /* Supported palette hacks */
 enum {
 	cmap_unknown,
-	cmap_m64,		/* ATI Mach64 */
+	cmap_simple,		/* ATI Mach64 */
 	cmap_r128,		/* ATI Rage128 */
 	cmap_M3A,		/* ATI Rage Mobility M3 Head A */
 	cmap_M3B,		/* ATI Rage Mobility M3 Head B */
 	cmap_radeon,		/* ATI Radeon */
 	cmap_gxt2000,		/* IBM GXT2000 */
 	cmap_avivo,		/* ATI R5xx */
+	cmap_qemu,		/* qemu vga */
 };
 
 struct offb_par {
@@ -100,36 +101,32 @@
 			  u_int transp, struct fb_info *info)
 {
 	struct offb_par *par = (struct offb_par *) info->par;
-	int i, depth;
-	u32 *pal = info->pseudo_palette;
 
-	depth = info->var.bits_per_pixel;
-	if (depth == 16)
-		depth = (info->var.green.length == 5) ? 15 : 16;
+	if (info->fix.visual == FB_VISUAL_TRUECOLOR) {
+		u32 *pal = info->pseudo_palette;
+		u32 cr = red >> (16 - info->var.red.length);
+		u32 cg = green >> (16 - info->var.green.length);
+		u32 cb = blue >> (16 - info->var.blue.length);
+		u32 value;
 
-	if (regno > 255 ||
-	    (depth == 16 && regno > 63) ||
-	    (depth == 15 && regno > 31))
-		return 1;
+		if (regno >= 16)
+			return -EINVAL;
 
-	if (regno < 16) {
-		switch (depth) {
-		case 15:
-			pal[regno] = (regno << 10) | (regno << 5) | regno;
-			break;
-		case 16:
-			pal[regno] = (regno << 11) | (regno << 5) | regno;
-			break;
-		case 24:
-			pal[regno] = (regno << 16) | (regno << 8) | regno;
-			break;
-		case 32:
-			i = (regno << 8) | regno;
-			pal[regno] = (i << 16) | i;
-			break;
+		value = (cr << info->var.red.offset) |
+			(cg << info->var.green.offset) |
+			(cb << info->var.blue.offset);
+		if (info->var.transp.length > 0) {
+			u32 mask = (1 << info->var.transp.length) - 1;
+			mask <<= info->var.transp.offset;
+			value |= mask;
 		}
+		pal[regno] = value;
+		return 0;
 	}
 
+	if (regno > 255)
+		return -EINVAL;
+
 	red >>= 8;
 	green >>= 8;
 	blue >>= 8;
@@ -138,7 +135,7 @@
 		return 0;
 
 	switch (par->cmap_type) {
-	case cmap_m64:
+	case cmap_simple:
 		writeb(regno, par->cmap_adr);
 		writeb(red, par->cmap_data);
 		writeb(green, par->cmap_data);
@@ -208,7 +205,7 @@
 	if (blank)
 		for (i = 0; i < 256; i++) {
 			switch (par->cmap_type) {
-			case cmap_m64:
+			case cmap_simple:
 				writeb(i, par->cmap_adr);
 				for (j = 0; j < 3; j++)
 					writeb(0, par->cmap_data);
@@ -350,7 +347,7 @@
 		par->cmap_adr =
 			ioremap(base + 0x7ff000, 0x1000) + 0xcc0;
 		par->cmap_data = par->cmap_adr + 1;
-		par->cmap_type = cmap_m64;
+		par->cmap_type = cmap_simple;
 	} else if (dp && (of_device_is_compatible(dp, "pci1014,b7") ||
 			  of_device_is_compatible(dp, "pci1014,21c"))) {
 		par->cmap_adr = offb_map_reg(dp, 0, 0x6000, 0x1000);
@@ -371,6 +368,16 @@
 				par->cmap_type = cmap_avivo;
 		}
 		of_node_put(pciparent);
+	} else if (dp && of_device_is_compatible(dp, "qemu,std-vga")) {
+		const u32 io_of_addr[3] = { 0x01000000, 0x0, 0x0 };
+		u64 io_addr = of_translate_address(dp, io_of_addr);
+		if (io_addr != OF_BAD_ADDR) {
+			par->cmap_adr = ioremap(io_addr + 0x3c8, 2);
+			if (par->cmap_adr) {
+				par->cmap_type = cmap_simple;
+				par->cmap_data = par->cmap_adr + 1;
+			}
+		}
 	}
 	info->fix.visual = (par->cmap_type != cmap_unknown) ?
 		FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_STATIC_PSEUDOCOLOR;
@@ -381,7 +388,7 @@
 				int pitch, unsigned long address,
 				int foreign_endian, struct device_node *dp)
 {
-	unsigned long res_size = pitch * height * (depth + 7) / 8;
+	unsigned long res_size = pitch * height;
 	struct offb_par *par = &default_par;
 	unsigned long res_start = address;
 	struct fb_fix_screeninfo *fix;
diff --git a/drivers/video/omap/rfbi.c b/drivers/video/omap/rfbi.c
index 0c6981f..2c1a340 100644
--- a/drivers/video/omap/rfbi.c
+++ b/drivers/video/omap/rfbi.c
@@ -2,7 +2,7 @@
  * OMAP2 Remote Frame Buffer Interface support
  *
  * Copyright (C) 2005 Nokia Corporation
- * Author: Juha Yrjölä <juha.yrjola@nokia.com>
+ * Author: Juha Yrjölä <juha.yrjola@nokia.com>
  *	   Imre Deak <imre.deak@nokia.com>
  *
  * This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/video/omap/sossi.c b/drivers/video/omap/sossi.c
index 8fb7c70..f79c137 100644
--- a/drivers/video/omap/sossi.c
+++ b/drivers/video/omap/sossi.c
@@ -2,7 +2,7 @@
  * OMAP1 Special OptimiSed Screen Interface support
  *
  * Copyright (C) 2004-2005 Nokia Corporation
- * Author: Juha Yrjölä <juha.yrjola@nokia.com>
+ * Author: Juha Yrjölä <juha.yrjola@nokia.com>
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the
diff --git a/drivers/video/smscufx.c b/drivers/video/smscufx.c
index aaccffa..3c22994 100644
--- a/drivers/video/smscufx.c
+++ b/drivers/video/smscufx.c
@@ -1792,24 +1792,7 @@
 	.id_table = id_table,
 };
 
-static int __init ufx_module_init(void)
-{
-	int res;
-
-	res = usb_register(&ufx_driver);
-	if (res)
-		err("usb_register failed. Error number %d", res);
-
-	return res;
-}
-
-static void __exit ufx_module_exit(void)
-{
-	usb_deregister(&ufx_driver);
-}
-
-module_init(ufx_module_init);
-module_exit(ufx_module_exit);
+module_usb_driver(ufx_driver);
 
 static void ufx_urb_completion(struct urb *urb)
 {
diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
index 3473e75..1f868d0 100644
--- a/drivers/video/udlfb.c
+++ b/drivers/video/udlfb.c
@@ -1761,24 +1761,7 @@
 	.id_table = id_table,
 };
 
-static int __init dlfb_module_init(void)
-{
-	int res;
-
-	res = usb_register(&dlfb_driver);
-	if (res)
-		err("usb_register failed. Error number %d", res);
-
-	return res;
-}
-
-static void __exit dlfb_module_exit(void)
-{
-	usb_deregister(&dlfb_driver);
-}
-
-module_init(dlfb_module_init);
-module_exit(dlfb_module_exit);
+module_usb_driver(dlfb_driver);
 
 static void dlfb_urb_completion(struct urb *urb)
 {
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index 7317dc2e..0269717 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -361,7 +361,12 @@
 	return 0;
 }
 
+static const char *vm_bus_name(struct virtio_device *vdev)
+{
+	struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
 
+	return vm_dev->pdev->name;
+}
 
 static struct virtio_config_ops virtio_mmio_config_ops = {
 	.get		= vm_get,
@@ -373,6 +378,7 @@
 	.del_vqs	= vm_del_vqs,
 	.get_features	= vm_get_features,
 	.finalize_features = vm_finalize_features,
+	.bus_name	= vm_bus_name,
 };
 
 
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
index 03d1984..baabb79 100644
--- a/drivers/virtio/virtio_pci.c
+++ b/drivers/virtio/virtio_pci.c
@@ -598,6 +598,13 @@
 				  false, false);
 }
 
+static const char *vp_bus_name(struct virtio_device *vdev)
+{
+	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+
+	return pci_name(vp_dev->pci_dev);
+}
+
 static struct virtio_config_ops virtio_pci_config_ops = {
 	.get		= vp_get,
 	.set		= vp_set,
@@ -608,6 +615,7 @@
 	.del_vqs	= vp_del_vqs,
 	.get_features	= vp_get_features,
 	.finalize_features = vp_finalize_features,
+	.bus_name	= vp_bus_name,
 };
 
 static void virtio_pci_release_dev(struct device *_d)
diff --git a/drivers/w1/masters/ds2490.c b/drivers/w1/masters/ds2490.c
index b5abaae..4f7e1d7 100644
--- a/drivers/w1/masters/ds2490.c
+++ b/drivers/w1/masters/ds2490.c
@@ -1002,26 +1002,7 @@
 	kfree(dev);
 }
 
-static int ds_init(void)
-{
-	int err;
-
-	err = usb_register(&ds_driver);
-	if (err) {
-		printk(KERN_INFO "Failed to register DS9490R USB device: err=%d.\n", err);
-		return err;
-	}
-
-	return 0;
-}
-
-static void ds_fini(void)
-{
-	usb_deregister(&ds_driver);
-}
-
-module_init(ds_init);
-module_exit(ds_fini);
+module_usb_driver(ds_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c
index a1ef9b5..ff29ae7 100644
--- a/drivers/w1/slaves/w1_therm.c
+++ b/drivers/w1/slaves/w1_therm.c
@@ -175,11 +175,13 @@
 {
 	struct w1_slave *sl = dev_to_w1_slave(device);
 	struct w1_master *dev = sl->master;
-	u8 rom[9], crc, verdict;
+	u8 rom[9], crc, verdict, external_power;
 	int i, max_trying = 10;
 	ssize_t c = PAGE_SIZE;
 
-	mutex_lock(&dev->mutex);
+	i = mutex_lock_interruptible(&dev->mutex);
+	if (i != 0)
+		return i;
 
 	memset(rom, 0, sizeof(rom));
 
@@ -190,13 +192,37 @@
 		if (!w1_reset_select_slave(sl)) {
 			int count = 0;
 			unsigned int tm = 750;
+			unsigned long sleep_rem;
+
+			w1_write_8(dev, W1_READ_PSUPPLY);
+			external_power = w1_read_8(dev);
+
+			if (w1_reset_select_slave(sl))
+				continue;
 
 			/* 750ms strong pullup (or delay) after the convert */
-			if (w1_strong_pullup)
+			if (!external_power && w1_strong_pullup)
 				w1_next_pullup(dev, tm);
+
 			w1_write_8(dev, W1_CONVERT_TEMP);
-			if (!w1_strong_pullup)
-				msleep(tm);
+
+			if (external_power) {
+				mutex_unlock(&dev->mutex);
+
+				sleep_rem = msleep_interruptible(tm);
+				if (sleep_rem != 0)
+					return -EINTR;
+
+				i = mutex_lock_interruptible(&dev->mutex);
+				if (i != 0)
+					return i;
+			} else if (!w1_strong_pullup) {
+				sleep_rem = msleep_interruptible(tm);
+				if (sleep_rem != 0) {
+					mutex_unlock(&dev->mutex);
+					return -EINTR;
+				}
+			}
 
 			if (!w1_reset_select_slave(sl)) {
 
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index c374978..9761950 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -892,6 +892,16 @@
 			break;
 		}
 
+		/* Do fast search on single slave bus */
+		if (dev->max_slave_count == 1) {
+			w1_write_8(dev, W1_READ_ROM);
+
+			if (w1_read_block(dev, (u8 *)&rn, 8) == 8 && rn)
+				cb(dev, rn);
+
+			break;
+		}
+
 		/* Start the search */
 		w1_write_8(dev, search_type);
 		for (i = 0; i < 64; ++i) {
diff --git a/drivers/watchdog/coh901327_wdt.c b/drivers/watchdog/coh901327_wdt.c
index 03f449a..5b89f7d 100644
--- a/drivers/watchdog/coh901327_wdt.c
+++ b/drivers/watchdog/coh901327_wdt.c
@@ -76,8 +76,6 @@
 static void __iomem *virtbase;
 static unsigned long coh901327_users;
 static unsigned long boot_status;
-static u16 wdogenablestore;
-static u16 irqmaskstore;
 static struct device *parent;
 
 /*
@@ -461,6 +459,10 @@
 }
 
 #ifdef CONFIG_PM
+
+static u16 wdogenablestore;
+static u16 irqmaskstore;
+
 static int coh901327_suspend(struct platform_device *pdev, pm_message_t state)
 {
 	irqmaskstore = readw(virtbase + U300_WDOG_IMR) & 0x0001U;
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index 3774c9b..8464ea1 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -231,6 +231,7 @@
 
 	cmn_regs.u1.reax = CRU_BIOS_SIGNATURE_VALUE;
 
+	set_memory_x((unsigned long)bios32_entrypoint, (2 * PAGE_SIZE));
 	asminline_call(&cmn_regs, bios32_entrypoint);
 
 	if (cmn_regs.u1.ral != 0) {
@@ -248,8 +249,10 @@
 		if ((physical_bios_base + physical_bios_offset)) {
 			cru_rom_addr =
 				ioremap(cru_physical_address, cru_length);
-			if (cru_rom_addr)
+			if (cru_rom_addr) {
+				set_memory_x((unsigned long)cru_rom_addr, cru_length);
 				retval = 0;
+			}
 		}
 
 		printk(KERN_DEBUG "hpwdt: CRU Base Address:   0x%lx\n",
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index ba6ad66..99796c5 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -384,10 +384,10 @@
 	"Watchdog cannot be stopped once started (default="
 				__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
 
-static int turn_SMI_watchdog_clear_off = 0;
+static int turn_SMI_watchdog_clear_off = 1;
 module_param(turn_SMI_watchdog_clear_off, int, 0);
 MODULE_PARM_DESC(turn_SMI_watchdog_clear_off,
-	"Turn off SMI clearing watchdog (default=0)");
+	"Turn off SMI clearing watchdog (depends on TCO-version)(default=1)");
 
 /*
  * Some TCO specific functions
@@ -813,7 +813,7 @@
 		ret = -EIO;
 		goto out_unmap;
 	}
-	if (turn_SMI_watchdog_clear_off) {
+	if (turn_SMI_watchdog_clear_off >= iTCO_wdt_private.iTCO_version) {
 		/* Bit 13: TCO_EN -> 0 = Disables TCO logic generating an SMI# */
 		val32 = inl(SMI_EN);
 		val32 &= 0xffffdfff;	/* Turn off SMI clearing watchdog */
diff --git a/drivers/watchdog/pcwd_usb.c b/drivers/watchdog/pcwd_usb.c
index 748a74b..d8de1dd 100644
--- a/drivers/watchdog/pcwd_usb.c
+++ b/drivers/watchdog/pcwd_usb.c
@@ -827,37 +827,4 @@
 	printk(KERN_INFO PFX "USB PC Watchdog disconnected\n");
 }
 
-
-
-/**
- *	usb_pcwd_init
- */
-static int __init usb_pcwd_init(void)
-{
-	int result;
-
-	/* register this driver with the USB subsystem */
-	result = usb_register(&usb_pcwd_driver);
-	if (result) {
-		printk(KERN_ERR PFX "usb_register failed. Error number %d\n",
-		    result);
-		return result;
-	}
-
-	printk(KERN_INFO PFX DRIVER_DESC " v" DRIVER_VERSION "\n");
-	return 0;
-}
-
-
-/**
- *	usb_pcwd_exit
- */
-static void __exit usb_pcwd_exit(void)
-{
-	/* deregister this driver with the USB subsystem */
-	usb_deregister(&usb_pcwd_driver);
-}
-
-
-module_init(usb_pcwd_init);
-module_exit(usb_pcwd_exit);
+module_usb_driver(usb_pcwd_driver);
diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c
index cc2cfbe..eef1524 100644
--- a/drivers/watchdog/sp805_wdt.c
+++ b/drivers/watchdog/sp805_wdt.c
@@ -351,7 +351,7 @@
 	return 0;
 }
 
-static struct amba_id sp805_wdt_ids[] __initdata = {
+static struct amba_id sp805_wdt_ids[] = {
 	{
 		.id	= 0x00141805,
 		.mask	= 0x00ffffff,
@@ -359,6 +359,8 @@
 	{ 0, 0 },
 };
 
+MODULE_DEVICE_TABLE(amba, sp805_wdt_ids);
+
 static struct amba_driver sp805_wdt_driver = {
 	.drv = {
 		.name	= MODULE_NAME,
diff --git a/drivers/watchdog/w83627hf_wdt.c b/drivers/watchdog/w83627hf_wdt.c
index dd5d675..576a388 100644
--- a/drivers/watchdog/w83627hf_wdt.c
+++ b/drivers/watchdog/w83627hf_wdt.c
@@ -4,7 +4,7 @@
  *	(c) Copyright 2007 Vlad Drukker <vlad@storewiz.com>
  *		added support for W83627THF.
  *
- *	(c) Copyright 2003,2007 Pádraig Brady <P@draigBrady.com>
+ *	(c) Copyright 2003,2007 Pádraig Brady <P@draigBrady.com>
  *
  *	Based on advantechwdt.c which is based on wdt.c.
  *	Original copyright messages:
@@ -401,6 +401,6 @@
 module_exit(wdt_exit);
 
 MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Pádraig Brady <P@draigBrady.com>");
+MODULE_AUTHOR("Pádraig  Brady <P@draigBrady.com>");
 MODULE_DESCRIPTION("w83627hf/thf WDT driver");
 MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 8e964b9..284798a 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -166,7 +166,7 @@
 	/*
 	 * Get IO TLB memory from any location.
 	 */
-	xen_io_tlb_start = alloc_bootmem(bytes);
+	xen_io_tlb_start = alloc_bootmem_pages(PAGE_ALIGN(bytes));
 	if (!xen_io_tlb_start) {
 		m = "Cannot allocate Xen-SWIOTLB buffer!\n";
 		goto error;
@@ -179,7 +179,7 @@
 			       bytes,
 			       xen_io_tlb_nslabs);
 	if (rc) {
-		free_bootmem(__pa(xen_io_tlb_start), bytes);
+		free_bootmem(__pa(xen_io_tlb_start), PAGE_ALIGN(bytes));
 		m = "Failed to get contiguous memory for DMA from Xen!\n"\
 		    "You either: don't have the permissions, do not have"\
 		    " enough free memory under 4GB, or the hypervisor memory"\
diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c
index 9cc2259..3832e30 100644
--- a/drivers/xen/xen-balloon.c
+++ b/drivers/xen/xen-balloon.c
@@ -32,7 +32,6 @@
 
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/sysdev.h>
 #include <linux/capability.h>
 
 #include <xen/xen.h>
@@ -46,9 +45,9 @@
 
 #define BALLOON_CLASS_NAME "xen_memory"
 
-static struct sys_device balloon_sysdev;
+static struct device balloon_dev;
 
-static int register_balloon(struct sys_device *sysdev);
+static int register_balloon(struct device *dev);
 
 /* React to a change in the target key */
 static void watch_target(struct xenbus_watch *watch,
@@ -98,9 +97,9 @@
 
 	pr_info("xen-balloon: Initialising balloon driver.\n");
 
-	register_balloon(&balloon_sysdev);
+	register_balloon(&balloon_dev);
 
-	register_xen_selfballooning(&balloon_sysdev);
+	register_xen_selfballooning(&balloon_dev);
 
 	register_xenstore_notifier(&xenstore_notifier);
 
@@ -117,31 +116,31 @@
 module_exit(balloon_exit);
 
 #define BALLOON_SHOW(name, format, args...)				\
-	static ssize_t show_##name(struct sys_device *dev,		\
-				   struct sysdev_attribute *attr,	\
+	static ssize_t show_##name(struct device *dev,			\
+				   struct device_attribute *attr,	\
 				   char *buf)				\
 	{								\
 		return sprintf(buf, format, ##args);			\
 	}								\
-	static SYSDEV_ATTR(name, S_IRUGO, show_##name, NULL)
+	static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
 
 BALLOON_SHOW(current_kb, "%lu\n", PAGES2KB(balloon_stats.current_pages));
 BALLOON_SHOW(low_kb, "%lu\n", PAGES2KB(balloon_stats.balloon_low));
 BALLOON_SHOW(high_kb, "%lu\n", PAGES2KB(balloon_stats.balloon_high));
 
-static SYSDEV_ULONG_ATTR(schedule_delay, 0444, balloon_stats.schedule_delay);
-static SYSDEV_ULONG_ATTR(max_schedule_delay, 0644, balloon_stats.max_schedule_delay);
-static SYSDEV_ULONG_ATTR(retry_count, 0444, balloon_stats.retry_count);
-static SYSDEV_ULONG_ATTR(max_retry_count, 0644, balloon_stats.max_retry_count);
+static DEVICE_ULONG_ATTR(schedule_delay, 0444, balloon_stats.schedule_delay);
+static DEVICE_ULONG_ATTR(max_schedule_delay, 0644, balloon_stats.max_schedule_delay);
+static DEVICE_ULONG_ATTR(retry_count, 0444, balloon_stats.retry_count);
+static DEVICE_ULONG_ATTR(max_retry_count, 0644, balloon_stats.max_retry_count);
 
-static ssize_t show_target_kb(struct sys_device *dev, struct sysdev_attribute *attr,
+static ssize_t show_target_kb(struct device *dev, struct device_attribute *attr,
 			      char *buf)
 {
 	return sprintf(buf, "%lu\n", PAGES2KB(balloon_stats.target_pages));
 }
 
-static ssize_t store_target_kb(struct sys_device *dev,
-			       struct sysdev_attribute *attr,
+static ssize_t store_target_kb(struct device *dev,
+			       struct device_attribute *attr,
 			       const char *buf,
 			       size_t count)
 {
@@ -158,11 +157,11 @@
 	return count;
 }
 
-static SYSDEV_ATTR(target_kb, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(target_kb, S_IRUGO | S_IWUSR,
 		   show_target_kb, store_target_kb);
 
 
-static ssize_t show_target(struct sys_device *dev, struct sysdev_attribute *attr,
+static ssize_t show_target(struct device *dev, struct device_attribute *attr,
 			      char *buf)
 {
 	return sprintf(buf, "%llu\n",
@@ -170,8 +169,8 @@
 		       << PAGE_SHIFT);
 }
 
-static ssize_t store_target(struct sys_device *dev,
-			    struct sysdev_attribute *attr,
+static ssize_t store_target(struct device *dev,
+			    struct device_attribute *attr,
 			    const char *buf,
 			    size_t count)
 {
@@ -188,23 +187,23 @@
 	return count;
 }
 
-static SYSDEV_ATTR(target, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(target, S_IRUGO | S_IWUSR,
 		   show_target, store_target);
 
 
-static struct sysdev_attribute *balloon_attrs[] = {
-	&attr_target_kb,
-	&attr_target,
-	&attr_schedule_delay.attr,
-	&attr_max_schedule_delay.attr,
-	&attr_retry_count.attr,
-	&attr_max_retry_count.attr
+static struct device_attribute *balloon_attrs[] = {
+	&dev_attr_target_kb,
+	&dev_attr_target,
+	&dev_attr_schedule_delay.attr,
+	&dev_attr_max_schedule_delay.attr,
+	&dev_attr_retry_count.attr,
+	&dev_attr_max_retry_count.attr
 };
 
 static struct attribute *balloon_info_attrs[] = {
-	&attr_current_kb.attr,
-	&attr_low_kb.attr,
-	&attr_high_kb.attr,
+	&dev_attr_current_kb.attr,
+	&dev_attr_low_kb.attr,
+	&dev_attr_high_kb.attr,
 	NULL
 };
 
@@ -213,34 +212,35 @@
 	.attrs = balloon_info_attrs
 };
 
-static struct sysdev_class balloon_sysdev_class = {
-	.name = BALLOON_CLASS_NAME
+static struct bus_type balloon_subsys = {
+	.name = BALLOON_CLASS_NAME,
+	.dev_name = BALLOON_CLASS_NAME,
 };
 
-static int register_balloon(struct sys_device *sysdev)
+static int register_balloon(struct device *dev)
 {
 	int i, error;
 
-	error = sysdev_class_register(&balloon_sysdev_class);
+	error = bus_register(&balloon_subsys);
 	if (error)
 		return error;
 
-	sysdev->id = 0;
-	sysdev->cls = &balloon_sysdev_class;
+	dev->id = 0;
+	dev->bus = &balloon_subsys;
 
-	error = sysdev_register(sysdev);
+	error = device_register(dev);
 	if (error) {
-		sysdev_class_unregister(&balloon_sysdev_class);
+		bus_unregister(&balloon_subsys);
 		return error;
 	}
 
 	for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++) {
-		error = sysdev_create_file(sysdev, balloon_attrs[i]);
+		error = device_create_file(dev, balloon_attrs[i]);
 		if (error)
 			goto fail;
 	}
 
-	error = sysfs_create_group(&sysdev->kobj, &balloon_info_group);
+	error = sysfs_create_group(&dev->kobj, &balloon_info_group);
 	if (error)
 		goto fail;
 
@@ -248,9 +248,9 @@
 
  fail:
 	while (--i >= 0)
-		sysdev_remove_file(sysdev, balloon_attrs[i]);
-	sysdev_unregister(sysdev);
-	sysdev_class_unregister(&balloon_sysdev_class);
+		device_remove_file(dev, balloon_attrs[i]);
+	device_unregister(dev);
+	bus_unregister(&balloon_subsys);
 	return error;
 }
 
diff --git a/drivers/xen/xen-selfballoon.c b/drivers/xen/xen-selfballoon.c
index d93c708..767ff65 100644
--- a/drivers/xen/xen-selfballoon.c
+++ b/drivers/xen/xen-selfballoon.c
@@ -74,6 +74,7 @@
 #include <linux/mman.h>
 #include <linux/module.h>
 #include <linux/workqueue.h>
+#include <linux/device.h>
 #include <xen/balloon.h>
 #include <xen/tmem.h>
 #include <xen/xen.h>
@@ -266,21 +267,20 @@
 
 #ifdef CONFIG_SYSFS
 
-#include <linux/sysdev.h>
 #include <linux/capability.h>
 
 #define SELFBALLOON_SHOW(name, format, args...)				\
-	static ssize_t show_##name(struct sys_device *dev,	\
-					   struct sysdev_attribute *attr, \
-					   char *buf) \
+	static ssize_t show_##name(struct device *dev,	\
+					  struct device_attribute *attr, \
+					  char *buf) \
 	{ \
 		return sprintf(buf, format, ##args); \
 	}
 
 SELFBALLOON_SHOW(selfballooning, "%d\n", xen_selfballooning_enabled);
 
-static ssize_t store_selfballooning(struct sys_device *dev,
-			    struct sysdev_attribute *attr,
+static ssize_t store_selfballooning(struct device *dev,
+			    struct device_attribute *attr,
 			    const char *buf,
 			    size_t count)
 {
@@ -303,13 +303,13 @@
 	return count;
 }
 
-static SYSDEV_ATTR(selfballooning, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(selfballooning, S_IRUGO | S_IWUSR,
 		   show_selfballooning, store_selfballooning);
 
 SELFBALLOON_SHOW(selfballoon_interval, "%d\n", selfballoon_interval);
 
-static ssize_t store_selfballoon_interval(struct sys_device *dev,
-					  struct sysdev_attribute *attr,
+static ssize_t store_selfballoon_interval(struct device *dev,
+					  struct device_attribute *attr,
 					  const char *buf,
 					  size_t count)
 {
@@ -325,13 +325,13 @@
 	return count;
 }
 
-static SYSDEV_ATTR(selfballoon_interval, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(selfballoon_interval, S_IRUGO | S_IWUSR,
 		   show_selfballoon_interval, store_selfballoon_interval);
 
 SELFBALLOON_SHOW(selfballoon_downhys, "%d\n", selfballoon_downhysteresis);
 
-static ssize_t store_selfballoon_downhys(struct sys_device *dev,
-					 struct sysdev_attribute *attr,
+static ssize_t store_selfballoon_downhys(struct device *dev,
+					 struct device_attribute *attr,
 					 const char *buf,
 					 size_t count)
 {
@@ -347,14 +347,14 @@
 	return count;
 }
 
-static SYSDEV_ATTR(selfballoon_downhysteresis, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(selfballoon_downhysteresis, S_IRUGO | S_IWUSR,
 		   show_selfballoon_downhys, store_selfballoon_downhys);
 
 
 SELFBALLOON_SHOW(selfballoon_uphys, "%d\n", selfballoon_uphysteresis);
 
-static ssize_t store_selfballoon_uphys(struct sys_device *dev,
-				       struct sysdev_attribute *attr,
+static ssize_t store_selfballoon_uphys(struct device *dev,
+				       struct device_attribute *attr,
 				       const char *buf,
 				       size_t count)
 {
@@ -370,14 +370,14 @@
 	return count;
 }
 
-static SYSDEV_ATTR(selfballoon_uphysteresis, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(selfballoon_uphysteresis, S_IRUGO | S_IWUSR,
 		   show_selfballoon_uphys, store_selfballoon_uphys);
 
 SELFBALLOON_SHOW(selfballoon_min_usable_mb, "%d\n",
 				selfballoon_min_usable_mb);
 
-static ssize_t store_selfballoon_min_usable_mb(struct sys_device *dev,
-					       struct sysdev_attribute *attr,
+static ssize_t store_selfballoon_min_usable_mb(struct device *dev,
+					       struct device_attribute *attr,
 					       const char *buf,
 					       size_t count)
 {
@@ -393,7 +393,7 @@
 	return count;
 }
 
-static SYSDEV_ATTR(selfballoon_min_usable_mb, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(selfballoon_min_usable_mb, S_IRUGO | S_IWUSR,
 		   show_selfballoon_min_usable_mb,
 		   store_selfballoon_min_usable_mb);
 
@@ -401,8 +401,8 @@
 #ifdef CONFIG_FRONTSWAP
 SELFBALLOON_SHOW(frontswap_selfshrinking, "%d\n", frontswap_selfshrinking);
 
-static ssize_t store_frontswap_selfshrinking(struct sys_device *dev,
-					     struct sysdev_attribute *attr,
+static ssize_t store_frontswap_selfshrinking(struct device *dev,
+					     struct device_attribute *attr,
 					     const char *buf,
 					     size_t count)
 {
@@ -424,13 +424,13 @@
 	return count;
 }
 
-static SYSDEV_ATTR(frontswap_selfshrinking, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(frontswap_selfshrinking, S_IRUGO | S_IWUSR,
 		   show_frontswap_selfshrinking, store_frontswap_selfshrinking);
 
 SELFBALLOON_SHOW(frontswap_inertia, "%d\n", frontswap_inertia);
 
-static ssize_t store_frontswap_inertia(struct sys_device *dev,
-				       struct sysdev_attribute *attr,
+static ssize_t store_frontswap_inertia(struct device *dev,
+				       struct device_attribute *attr,
 				       const char *buf,
 				       size_t count)
 {
@@ -447,13 +447,13 @@
 	return count;
 }
 
-static SYSDEV_ATTR(frontswap_inertia, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(frontswap_inertia, S_IRUGO | S_IWUSR,
 		   show_frontswap_inertia, store_frontswap_inertia);
 
 SELFBALLOON_SHOW(frontswap_hysteresis, "%d\n", frontswap_hysteresis);
 
-static ssize_t store_frontswap_hysteresis(struct sys_device *dev,
-					  struct sysdev_attribute *attr,
+static ssize_t store_frontswap_hysteresis(struct device *dev,
+					  struct device_attribute *attr,
 					  const char *buf,
 					  size_t count)
 {
@@ -469,21 +469,21 @@
 	return count;
 }
 
-static SYSDEV_ATTR(frontswap_hysteresis, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(frontswap_hysteresis, S_IRUGO | S_IWUSR,
 		   show_frontswap_hysteresis, store_frontswap_hysteresis);
 
 #endif /* CONFIG_FRONTSWAP */
 
 static struct attribute *selfballoon_attrs[] = {
-	&attr_selfballooning.attr,
-	&attr_selfballoon_interval.attr,
-	&attr_selfballoon_downhysteresis.attr,
-	&attr_selfballoon_uphysteresis.attr,
-	&attr_selfballoon_min_usable_mb.attr,
+	&dev_attr_selfballooning.attr,
+	&dev_attr_selfballoon_interval.attr,
+	&dev_attr_selfballoon_downhysteresis.attr,
+	&dev_attr_selfballoon_uphysteresis.attr,
+	&dev_attr_selfballoon_min_usable_mb.attr,
 #ifdef CONFIG_FRONTSWAP
-	&attr_frontswap_selfshrinking.attr,
-	&attr_frontswap_hysteresis.attr,
-	&attr_frontswap_inertia.attr,
+	&dev_attr_frontswap_selfshrinking.attr,
+	&dev_attr_frontswap_hysteresis.attr,
+	&dev_attr_frontswap_inertia.attr,
 #endif
 	NULL
 };
@@ -494,12 +494,12 @@
 };
 #endif
 
-int register_xen_selfballooning(struct sys_device *sysdev)
+int register_xen_selfballooning(struct device *dev)
 {
 	int error = -1;
 
 #ifdef CONFIG_SYSFS
-	error = sysfs_create_group(&sysdev->kobj, &selfballoon_group);
+	error = sysfs_create_group(&dev->kobj, &selfballoon_group);
 #endif
 	return error;
 }
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index b3b8f2f..ede860f 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -621,15 +621,6 @@
 	return NULL;
 }
 
-static void xs_reset_watches(void)
-{
-	int err;
-
-	err = xs_error(xs_single(XBT_NIL, XS_RESET_WATCHES, "", NULL));
-	if (err && err != -EEXIST)
-		printk(KERN_WARNING "xs_reset_watches failed: %d\n", err);
-}
-
 /* Register callback to watch this node. */
 int register_xenbus_watch(struct xenbus_watch *watch)
 {
@@ -906,9 +897,5 @@
 	if (IS_ERR(task))
 		return PTR_ERR(task);
 
-	/* shutdown watches for kexec boot */
-	if (xen_hvm_domain())
-		xs_reset_watches();
-
 	return 0;
 }
diff --git a/drivers/zorro/zorro.ids b/drivers/zorro/zorro.ids
index de24e3d..119abea 100644
--- a/drivers/zorro/zorro.ids
+++ b/drivers/zorro/zorro.ids
@@ -351,7 +351,7 @@
 	0200  EGS 28/24 Spectrum [Graphics Card]
 0892  Apollo
 	0100  A1200 [FPU and RAM Expansion]
-0893  Ingenieurbüro Helfrich
+0893  Ingenieurbüro Helfrich
 	0500  Piccolo RAM [Graphics Card]
 	0600  Piccolo [Graphics Card]
 	0700  PeggyPlus MPEG [Video Card]
diff --git a/firmware/README.AddingFirmware b/firmware/README.AddingFirmware
index e24cd89..ea78c3a 100644
--- a/firmware/README.AddingFirmware
+++ b/firmware/README.AddingFirmware
@@ -12,7 +12,7 @@
 This directory is _NOT_ for adding arbitrary new firmware images. The
 place to add those is the separate linux-firmware repository:
 
-    git://git.kernel.org/pub/scm/linux/kernel/git/dwmw2/linux-firmware.git
+    git://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git
 
 That repository contains all these firmware images which have been
 extracted from older drivers, as well various new firmware images which
@@ -22,6 +22,7 @@
 To submit firmware to that repository, please send either a git binary
 diff or preferably a git pull request to:
       David Woodhouse <dwmw2@infradead.org>
+      Ben Hutchings <ben@decadent.org.uk>
 
 Your commit should include an update to the WHENCE file clearly
 identifying the licence under which the firmware is available, and
diff --git a/fs/9p/v9fs_vfs.h b/fs/9p/v9fs_vfs.h
index 410ffd6..dc95a25 100644
--- a/fs/9p/v9fs_vfs.h
+++ b/fs/9p/v9fs_vfs.h
@@ -54,9 +54,9 @@
 
 struct inode *v9fs_alloc_inode(struct super_block *sb);
 void v9fs_destroy_inode(struct inode *inode);
-struct inode *v9fs_get_inode(struct super_block *sb, int mode, dev_t);
+struct inode *v9fs_get_inode(struct super_block *sb, umode_t mode, dev_t);
 int v9fs_init_inode(struct v9fs_session_info *v9ses,
-		    struct inode *inode, int mode, dev_t);
+		    struct inode *inode, umode_t mode, dev_t);
 void v9fs_evict_inode(struct inode *inode);
 ino_t v9fs_qid2ino(struct p9_qid *qid);
 void v9fs_stat2inode(struct p9_wstat *, struct inode *, struct super_block *);
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 879ed88..e0f20de 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -59,15 +59,13 @@
  *
  */
 
-static int unixmode2p9mode(struct v9fs_session_info *v9ses, int mode)
+static u32 unixmode2p9mode(struct v9fs_session_info *v9ses, umode_t mode)
 {
 	int res;
 	res = mode & 0777;
 	if (S_ISDIR(mode))
 		res |= P9_DMDIR;
 	if (v9fs_proto_dotu(v9ses)) {
-		if (S_ISLNK(mode))
-			res |= P9_DMSYMLINK;
 		if (v9ses->nodev == 0) {
 			if (S_ISSOCK(mode))
 				res |= P9_DMSOCKET;
@@ -85,10 +83,7 @@
 			res |= P9_DMSETGID;
 		if ((mode & S_ISVTX) == S_ISVTX)
 			res |= P9_DMSETVTX;
-		if ((mode & P9_DMLINK))
-			res |= P9_DMLINK;
 	}
-
 	return res;
 }
 
@@ -99,11 +94,11 @@
  * @rdev: major number, minor number in case of device files.
  *
  */
-static int p9mode2unixmode(struct v9fs_session_info *v9ses,
-			   struct p9_wstat *stat, dev_t *rdev)
+static umode_t p9mode2unixmode(struct v9fs_session_info *v9ses,
+			       struct p9_wstat *stat, dev_t *rdev)
 {
 	int res;
-	int mode = stat->mode;
+	u32 mode = stat->mode;
 
 	res = mode & S_IALLUGO;
 	*rdev = 0;
@@ -251,7 +246,6 @@
 static void v9fs_i_callback(struct rcu_head *head)
 {
 	struct inode *inode = container_of(head, struct inode, i_rcu);
-	INIT_LIST_HEAD(&inode->i_dentry);
 	kmem_cache_free(v9fs_inode_cache, V9FS_I(inode));
 }
 
@@ -261,7 +255,7 @@
 }
 
 int v9fs_init_inode(struct v9fs_session_info *v9ses,
-		    struct inode *inode, int mode, dev_t rdev)
+		    struct inode *inode, umode_t mode, dev_t rdev)
 {
 	int err = 0;
 
@@ -335,7 +329,7 @@
 
 		break;
 	default:
-		P9_DPRINTK(P9_DEBUG_ERROR, "BAD mode 0x%x S_IFMT 0x%x\n",
+		P9_DPRINTK(P9_DEBUG_ERROR, "BAD mode 0x%hx S_IFMT 0x%x\n",
 			   mode, mode & S_IFMT);
 		err = -EINVAL;
 		goto error;
@@ -352,13 +346,13 @@
  *
  */
 
-struct inode *v9fs_get_inode(struct super_block *sb, int mode, dev_t rdev)
+struct inode *v9fs_get_inode(struct super_block *sb, umode_t mode, dev_t rdev)
 {
 	int err;
 	struct inode *inode;
 	struct v9fs_session_info *v9ses = sb->s_fs_info;
 
-	P9_DPRINTK(P9_DEBUG_VFS, "super block: %p mode: %o\n", sb, mode);
+	P9_DPRINTK(P9_DEBUG_VFS, "super block: %p mode: %ho\n", sb, mode);
 
 	inode = new_inode(sb);
 	if (!inode) {
@@ -492,7 +486,8 @@
 				   int new)
 {
 	dev_t rdev;
-	int retval, umode;
+	int retval;
+	umode_t umode;
 	unsigned long i_ino;
 	struct inode *inode;
 	struct v9fs_session_info *v9ses = sb->s_fs_info;
@@ -703,7 +698,7 @@
  */
 
 static int
-v9fs_vfs_create(struct inode *dir, struct dentry *dentry, int mode,
+v9fs_vfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
 		struct nameidata *nd)
 {
 	int err;
@@ -786,7 +781,7 @@
  *
  */
 
-static int v9fs_vfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+static int v9fs_vfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 {
 	int err;
 	u32 perm;
@@ -1131,7 +1126,7 @@
 v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
 	struct super_block *sb)
 {
-	mode_t mode;
+	umode_t mode;
 	char ext[32];
 	char tag_name[14];
 	unsigned int i_nlink;
@@ -1304,9 +1299,8 @@
  */
 
 static int v9fs_vfs_mkspecial(struct inode *dir, struct dentry *dentry,
-	int mode, const char *extension)
+	u32 perm, const char *extension)
 {
-	u32 perm;
 	struct p9_fid *fid;
 	struct v9fs_session_info *v9ses;
 
@@ -1316,7 +1310,6 @@
 		return -EPERM;
 	}
 
-	perm = unixmode2p9mode(v9ses, mode);
 	fid = v9fs_create(v9ses, dir, dentry, (char *) extension, perm,
 								P9_OREAD);
 	if (IS_ERR(fid))
@@ -1343,7 +1336,7 @@
 	P9_DPRINTK(P9_DEBUG_VFS, " %lu,%s,%s\n", dir->i_ino,
 					dentry->d_name.name, symname);
 
-	return v9fs_vfs_mkspecial(dir, dentry, S_IFLNK, symname);
+	return v9fs_vfs_mkspecial(dir, dentry, P9_DMSYMLINK, symname);
 }
 
 /**
@@ -1398,13 +1391,15 @@
  */
 
 static int
-v9fs_vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev)
+v9fs_vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev)
 {
+	struct v9fs_session_info *v9ses = v9fs_inode2v9ses(dir);
 	int retval;
 	char *name;
+	u32 perm;
 
 	P9_DPRINTK(P9_DEBUG_VFS,
-		" %lu,%s mode: %x MAJOR: %u MINOR: %u\n", dir->i_ino,
+		" %lu,%s mode: %hx MAJOR: %u MINOR: %u\n", dir->i_ino,
 		dentry->d_name.name, mode, MAJOR(rdev), MINOR(rdev));
 
 	if (!new_valid_dev(rdev))
@@ -1427,7 +1422,8 @@
 		return -EINVAL;
 	}
 
-	retval = v9fs_vfs_mkspecial(dir, dentry, mode, name);
+	perm = unixmode2p9mode(v9ses, mode);
+	retval = v9fs_vfs_mkspecial(dir, dentry, perm, name);
 	__putname(name);
 
 	return retval;
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
index 0b5745e..8ef152a 100644
--- a/fs/9p/vfs_inode_dotl.c
+++ b/fs/9p/vfs_inode_dotl.c
@@ -48,7 +48,7 @@
 #include "acl.h"
 
 static int
-v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int omode,
+v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, umode_t omode,
 		    dev_t rdev);
 
 /**
@@ -253,7 +253,7 @@
  */
 
 static int
-v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode,
+v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, umode_t omode,
 		struct nameidata *nd)
 {
 	int err = 0;
@@ -284,7 +284,7 @@
 
 	name = (char *) dentry->d_name.name;
 	P9_DPRINTK(P9_DEBUG_VFS, "v9fs_vfs_create_dotl: name:%s flags:0x%x "
-			"mode:0x%x\n", name, flags, omode);
+			"mode:0x%hx\n", name, flags, omode);
 
 	dfid = v9fs_fid_lookup(dentry->d_parent);
 	if (IS_ERR(dfid)) {
@@ -395,7 +395,7 @@
  */
 
 static int v9fs_vfs_mkdir_dotl(struct inode *dir,
-			       struct dentry *dentry, int omode)
+			       struct dentry *dentry, umode_t omode)
 {
 	int err;
 	struct v9fs_session_info *v9ses;
@@ -594,7 +594,7 @@
 void
 v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode)
 {
-	mode_t mode;
+	umode_t mode;
 	struct v9fs_inode *v9inode = V9FS_I(inode);
 
 	if ((stat->st_result_mask & P9_STATS_BASIC) == P9_STATS_BASIC) {
@@ -799,7 +799,7 @@
  *
  */
 static int
-v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int omode,
+v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, umode_t omode,
 		dev_t rdev)
 {
 	int err;
@@ -814,7 +814,7 @@
 	struct posix_acl *dacl = NULL, *pacl = NULL;
 
 	P9_DPRINTK(P9_DEBUG_VFS,
-		" %lu,%s mode: %x MAJOR: %u MINOR: %u\n", dir->i_ino,
+		" %lu,%s mode: %hx MAJOR: %u MINOR: %u\n", dir->i_ino,
 		dentry->d_name.name, omode, MAJOR(rdev), MINOR(rdev));
 
 	if (!new_valid_dev(rdev))
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index c70251d..f68ff65 100644
--- a/fs/9p/vfs_super.c
+++ b/fs/9p/vfs_super.c
@@ -117,7 +117,7 @@
 	struct inode *inode = NULL;
 	struct dentry *root = NULL;
 	struct v9fs_session_info *v9ses = NULL;
-	int mode = S_IRWXUGO | S_ISVTX;
+	umode_t mode = S_IRWXUGO | S_ISVTX;
 	struct p9_fid *fid;
 	int retval = 0;
 
diff --git a/fs/Kconfig b/fs/Kconfig
index 5f4c45d..30145d8 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -266,14 +266,6 @@
 
 endif # NETWORK_FILESYSTEMS
 
-if BLOCK
-menu "Partition Types"
-
-source "fs/partitions/Kconfig"
-
-endmenu
-endif
-
 source "fs/nls/Kconfig"
 source "fs/dlm/Kconfig"
 
diff --git a/fs/Makefile b/fs/Makefile
index d2c3353..93804d4 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -19,6 +19,8 @@
 obj-y +=	no-block.o
 endif
 
+obj-$(CONFIG_PROC_FS) += proc_namespace.o
+
 obj-$(CONFIG_BLK_DEV_INTEGRITY) += bio-integrity.o
 obj-y				+= notify/
 obj-$(CONFIG_EPOLL)		+= eventpoll.o
@@ -52,7 +54,6 @@
 obj-y				+= quota/
 
 obj-$(CONFIG_PROC_FS)		+= proc/
-obj-y				+= partitions/
 obj-$(CONFIG_SYSFS)		+= sysfs/
 obj-$(CONFIG_CONFIGFS_FS)	+= configfs/
 obj-y				+= devpts/
diff --git a/fs/adfs/super.c b/fs/adfs/super.c
index c8bf36a..8e3b36a 100644
--- a/fs/adfs/super.c
+++ b/fs/adfs/super.c
@@ -126,9 +126,9 @@
 	sb->s_fs_info = NULL;
 }
 
-static int adfs_show_options(struct seq_file *seq, struct vfsmount *mnt)
+static int adfs_show_options(struct seq_file *seq, struct dentry *root)
 {
-	struct adfs_sb_info *asb = ADFS_SB(mnt->mnt_sb);
+	struct adfs_sb_info *asb = ADFS_SB(root->d_sb);
 
 	if (asb->s_uid != 0)
 		seq_printf(seq, ",uid=%u", asb->s_uid);
diff --git a/fs/affs/affs.h b/fs/affs/affs.h
index c2b9c79..45a0ce4 100644
--- a/fs/affs/affs.h
+++ b/fs/affs/affs.h
@@ -136,7 +136,7 @@
 extern u32	affs_checksum_block(struct super_block *sb, struct buffer_head *bh);
 extern void	affs_fix_checksum(struct super_block *sb, struct buffer_head *bh);
 extern void	secs_to_datestamp(time_t secs, struct affs_date *ds);
-extern mode_t	prot_to_mode(u32 prot);
+extern umode_t	prot_to_mode(u32 prot);
 extern void	mode_to_prot(struct inode *inode);
 extern void	affs_error(struct super_block *sb, const char *function, const char *fmt, ...);
 extern void	affs_warning(struct super_block *sb, const char *function, const char *fmt, ...);
@@ -156,8 +156,8 @@
 extern int	affs_hash_name(struct super_block *sb, const u8 *name, unsigned int len);
 extern struct dentry *affs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *);
 extern int	affs_unlink(struct inode *dir, struct dentry *dentry);
-extern int	affs_create(struct inode *dir, struct dentry *dentry, int mode, struct nameidata *);
-extern int	affs_mkdir(struct inode *dir, struct dentry *dentry, int mode);
+extern int	affs_create(struct inode *dir, struct dentry *dentry, umode_t mode, struct nameidata *);
+extern int	affs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode);
 extern int	affs_rmdir(struct inode *dir, struct dentry *dentry);
 extern int	affs_link(struct dentry *olddentry, struct inode *dir,
 			  struct dentry *dentry);
diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c
index de37ec8..52a6407 100644
--- a/fs/affs/amigaffs.c
+++ b/fs/affs/amigaffs.c
@@ -390,10 +390,10 @@
 	ds->ticks = cpu_to_be32(secs * 50);
 }
 
-mode_t
+umode_t
 prot_to_mode(u32 prot)
 {
-	int mode = 0;
+	umode_t mode = 0;
 
 	if (!(prot & FIBF_NOWRITE))
 		mode |= S_IWUSR;
@@ -421,7 +421,7 @@
 mode_to_prot(struct inode *inode)
 {
 	u32 prot = AFFS_I(inode)->i_protect;
-	mode_t mode = inode->i_mode;
+	umode_t mode = inode->i_mode;
 
 	if (!(mode & S_IXUSR))
 		prot |= FIBF_NOEXECUTE;
diff --git a/fs/affs/namei.c b/fs/affs/namei.c
index 780a11d..4780694 100644
--- a/fs/affs/namei.c
+++ b/fs/affs/namei.c
@@ -255,13 +255,13 @@
 }
 
 int
-affs_create(struct inode *dir, struct dentry *dentry, int mode, struct nameidata *nd)
+affs_create(struct inode *dir, struct dentry *dentry, umode_t mode, struct nameidata *nd)
 {
 	struct super_block *sb = dir->i_sb;
 	struct inode	*inode;
 	int		 error;
 
-	pr_debug("AFFS: create(%lu,\"%.*s\",0%o)\n",dir->i_ino,(int)dentry->d_name.len,
+	pr_debug("AFFS: create(%lu,\"%.*s\",0%ho)\n",dir->i_ino,(int)dentry->d_name.len,
 		 dentry->d_name.name,mode);
 
 	inode = affs_new_inode(dir);
@@ -285,12 +285,12 @@
 }
 
 int
-affs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+affs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 {
 	struct inode		*inode;
 	int			 error;
 
-	pr_debug("AFFS: mkdir(%lu,\"%.*s\",0%o)\n",dir->i_ino,
+	pr_debug("AFFS: mkdir(%lu,\"%.*s\",0%ho)\n",dir->i_ino,
 		 (int)dentry->d_name.len,dentry->d_name.name,mode);
 
 	inode = affs_new_inode(dir);
diff --git a/fs/affs/super.c b/fs/affs/super.c
index b31507d..8ba73fe 100644
--- a/fs/affs/super.c
+++ b/fs/affs/super.c
@@ -98,7 +98,6 @@
 static void affs_i_callback(struct rcu_head *head)
 {
 	struct inode *inode = container_of(head, struct inode, i_rcu);
-	INIT_LIST_HEAD(&inode->i_dentry);
 	kmem_cache_free(affs_inode_cachep, AFFS_I(inode));
 }
 
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index 1b0b195..e22dc4b 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -28,9 +28,9 @@
 static void afs_d_release(struct dentry *dentry);
 static int afs_lookup_filldir(void *_cookie, const char *name, int nlen,
 				  loff_t fpos, u64 ino, unsigned dtype);
-static int afs_create(struct inode *dir, struct dentry *dentry, int mode,
+static int afs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
 		      struct nameidata *nd);
-static int afs_mkdir(struct inode *dir, struct dentry *dentry, int mode);
+static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode);
 static int afs_rmdir(struct inode *dir, struct dentry *dentry);
 static int afs_unlink(struct inode *dir, struct dentry *dentry);
 static int afs_link(struct dentry *from, struct inode *dir,
@@ -764,7 +764,7 @@
 /*
  * create a directory on an AFS filesystem
  */
-static int afs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 {
 	struct afs_file_status status;
 	struct afs_callback cb;
@@ -777,7 +777,7 @@
 
 	dvnode = AFS_FS_I(dir);
 
-	_enter("{%x:%u},{%s},%o",
+	_enter("{%x:%u},{%s},%ho",
 	       dvnode->fid.vid, dvnode->fid.vnode, dentry->d_name.name, mode);
 
 	ret = -ENAMETOOLONG;
@@ -948,7 +948,7 @@
 /*
  * create a regular file on an AFS filesystem
  */
-static int afs_create(struct inode *dir, struct dentry *dentry, int mode,
+static int afs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
 		      struct nameidata *nd)
 {
 	struct afs_file_status status;
@@ -962,7 +962,7 @@
 
 	dvnode = AFS_FS_I(dir);
 
-	_enter("{%x:%u},{%s},%o,",
+	_enter("{%x:%u},{%s},%ho,",
 	       dvnode->fid.vid, dvnode->fid.vnode, dentry->d_name.name, mode);
 
 	ret = -ENAMETOOLONG;
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
index aa59184..8f4ce26 100644
--- a/fs/afs/mntpt.c
+++ b/fs/afs/mntpt.c
@@ -242,7 +242,7 @@
 {
 	struct vfsmount *newmnt;
 
-	_enter("{%s,%s}", path->mnt->mnt_devname, path->dentry->d_name.name);
+	_enter("{%s}", path->dentry->d_name.name);
 
 	newmnt = afs_mntpt_do_automount(path->dentry);
 	if (IS_ERR(newmnt))
@@ -252,7 +252,7 @@
 	mnt_set_expiry(newmnt, &afs_vfsmounts);
 	queue_delayed_work(afs_wq, &afs_mntpt_expiry_timer,
 			   afs_mntpt_expiry_timeout * HZ);
-	_leave(" = %p {%s}", newmnt, newmnt->mnt_devname);
+	_leave(" = %p", newmnt);
 	return newmnt;
 }
 
diff --git a/fs/afs/super.c b/fs/afs/super.c
index 356dcf0..983ec59 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -495,7 +495,6 @@
 {
 	struct inode *inode = container_of(head, struct inode, i_rcu);
 	struct afs_vnode *vnode = AFS_FS_I(inode);
-	INIT_LIST_HEAD(&inode->i_dentry);
 	kmem_cache_free(afs_inode_cachep, vnode);
 }
 
diff --git a/fs/attr.c b/fs/attr.c
index 7ee7ba4..95053ad 100644
--- a/fs/attr.c
+++ b/fs/attr.c
@@ -166,7 +166,7 @@
 int notify_change(struct dentry * dentry, struct iattr * attr)
 {
 	struct inode *inode = dentry->d_inode;
-	mode_t mode = inode->i_mode;
+	umode_t mode = inode->i_mode;
 	int error;
 	struct timespec now;
 	unsigned int ia_valid = attr->ia_valid;
@@ -177,7 +177,7 @@
 	}
 
 	if ((ia_valid & ATTR_MODE)) {
-		mode_t amode = attr->ia_mode;
+		umode_t amode = attr->ia_mode;
 		/* Flag setting protected by i_mutex */
 		if (is_sxid(amode))
 			inode->i_flags &= ~S_NOSEC;
diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h
index 326dc08..5869d4e 100644
--- a/fs/autofs4/autofs_i.h
+++ b/fs/autofs4/autofs_i.h
@@ -155,7 +155,7 @@
 	return 0;
 }
 
-struct inode *autofs4_get_inode(struct super_block *, mode_t);
+struct inode *autofs4_get_inode(struct super_block *, umode_t);
 void autofs4_free_ino(struct autofs_info *);
 
 /* Expiration */
diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c
index 509fe1e..76741d8 100644
--- a/fs/autofs4/dev-ioctl.c
+++ b/fs/autofs4/dev-ioctl.c
@@ -194,7 +194,7 @@
 		return err;
 	err = -ENOENT;
 	while (path.dentry == path.mnt->mnt_root) {
-		if (path.mnt->mnt_sb->s_magic == AUTOFS_SUPER_MAGIC) {
+		if (path.dentry->d_sb->s_magic == AUTOFS_SUPER_MAGIC) {
 			if (test(&path, data)) {
 				path_get(&path);
 				if (!err) /* already found some */
@@ -212,7 +212,7 @@
 
 static int test_by_dev(struct path *path, void *p)
 {
-	return path->mnt->mnt_sb->s_dev == *(dev_t *)p;
+	return path->dentry->d_sb->s_dev == *(dev_t *)p;
 }
 
 static int test_by_type(struct path *path, void *p)
@@ -538,11 +538,11 @@
 			err = find_autofs_mount(name, &path, test_by_type, &type);
 		if (err)
 			goto out;
-		devid = new_encode_dev(path.mnt->mnt_sb->s_dev);
+		devid = new_encode_dev(path.dentry->d_sb->s_dev);
 		err = 0;
 		if (path.mnt->mnt_root == path.dentry) {
 			err = 1;
-			magic = path.mnt->mnt_sb->s_magic;
+			magic = path.dentry->d_sb->s_magic;
 		}
 	} else {
 		dev_t dev = sbi->sb->s_dev;
@@ -556,7 +556,7 @@
 		err = have_submounts(path.dentry);
 
 		if (follow_down_one(&path))
-			magic = path.mnt->mnt_sb->s_magic;
+			magic = path.dentry->d_sb->s_magic;
 	}
 
 	param->ismountpoint.out.devid = devid;
diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c
index 8179f1a..2ba44c7 100644
--- a/fs/autofs4/inode.c
+++ b/fs/autofs4/inode.c
@@ -70,10 +70,10 @@
 	kill_litter_super(sb);
 }
 
-static int autofs4_show_options(struct seq_file *m, struct vfsmount *mnt)
+static int autofs4_show_options(struct seq_file *m, struct dentry *root)
 {
-	struct autofs_sb_info *sbi = autofs4_sbi(mnt->mnt_sb);
-	struct inode *root_inode = mnt->mnt_sb->s_root->d_inode;
+	struct autofs_sb_info *sbi = autofs4_sbi(root->d_sb);
+	struct inode *root_inode = root->d_sb->s_root->d_inode;
 
 	if (!sbi)
 		return 0;
@@ -326,7 +326,7 @@
 	return -EINVAL;
 }
 
-struct inode *autofs4_get_inode(struct super_block *sb, mode_t mode)
+struct inode *autofs4_get_inode(struct super_block *sb, umode_t mode)
 {
 	struct inode *inode = new_inode(sb);
 
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index f55ae23..75e5f1c 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -26,7 +26,7 @@
 static int autofs4_dir_symlink(struct inode *,struct dentry *,const char *);
 static int autofs4_dir_unlink(struct inode *,struct dentry *);
 static int autofs4_dir_rmdir(struct inode *,struct dentry *);
-static int autofs4_dir_mkdir(struct inode *,struct dentry *,int);
+static int autofs4_dir_mkdir(struct inode *,struct dentry *,umode_t);
 static long autofs4_root_ioctl(struct file *,unsigned int,unsigned long);
 #ifdef CONFIG_COMPAT
 static long autofs4_root_compat_ioctl(struct file *,unsigned int,unsigned long);
@@ -699,7 +699,7 @@
 	return 0;
 }
 
-static int autofs4_dir_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+static int autofs4_dir_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 {
 	struct autofs_sb_info *sbi = autofs4_sbi(dir->i_sb);
 	struct autofs_info *ino = autofs4_dentry_ino(dentry);
diff --git a/fs/bad_inode.c b/fs/bad_inode.c
index 9205cf2..22e9a78 100644
--- a/fs/bad_inode.c
+++ b/fs/bad_inode.c
@@ -173,7 +173,7 @@
 };
 
 static int bad_inode_create (struct inode *dir, struct dentry *dentry,
-		int mode, struct nameidata *nd)
+		umode_t mode, struct nameidata *nd)
 {
 	return -EIO;
 }
@@ -202,7 +202,7 @@
 }
 
 static int bad_inode_mkdir(struct inode *dir, struct dentry *dentry,
-			int mode)
+			umode_t mode)
 {
 	return -EIO;
 }
@@ -213,7 +213,7 @@
 }
 
 static int bad_inode_mknod (struct inode *dir, struct dentry *dentry,
-			int mode, dev_t rdev)
+			umode_t mode, dev_t rdev)
 {
 	return -EIO;
 }
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index 8342ca6..6e6d536 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -286,7 +286,6 @@
 static void befs_i_callback(struct rcu_head *head)
 {
 	struct inode *inode = container_of(head, struct inode, i_rcu);
-	INIT_LIST_HEAD(&inode->i_dentry);
         kmem_cache_free(befs_inode_cachep, BEFS_I(inode));
 }
 
diff --git a/fs/bfs/dir.c b/fs/bfs/dir.c
index 9cc07401..d12c796 100644
--- a/fs/bfs/dir.c
+++ b/fs/bfs/dir.c
@@ -84,7 +84,7 @@
 
 extern void dump_imap(const char *, struct super_block *);
 
-static int bfs_create(struct inode *dir, struct dentry *dentry, int mode,
+static int bfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
 						struct nameidata *nd)
 {
 	int err;
diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c
index 697af5b..b0391bc 100644
--- a/fs/bfs/inode.c
+++ b/fs/bfs/inode.c
@@ -251,7 +251,6 @@
 static void bfs_i_callback(struct rcu_head *head)
 {
 	struct inode *inode = container_of(head, struct inode, i_rcu);
-	INIT_LIST_HEAD(&inode->i_dentry);
 	kmem_cache_free(bfs_inode_cachep, BFS_I(inode));
 }
 
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index 1e9edbd..a9198df 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -560,7 +560,7 @@
 			break;
 		case 2: set_bit(Enabled, &e->flags);
 			break;
-		case 3: root = dget(file->f_path.mnt->mnt_sb->s_root);
+		case 3: root = dget(file->f_path.dentry->d_sb->s_root);
 			mutex_lock(&root->d_inode->i_mutex);
 
 			kill_node(e);
@@ -587,7 +587,7 @@
 	Node *e;
 	struct inode *inode;
 	struct dentry *root, *dentry;
-	struct super_block *sb = file->f_path.mnt->mnt_sb;
+	struct super_block *sb = file->f_path.dentry->d_sb;
 	int err = 0;
 
 	e = create_entry(buffer, count);
@@ -666,7 +666,7 @@
 	switch (res) {
 		case 1: enabled = 0; break;
 		case 2: enabled = 1; break;
-		case 3: root = dget(file->f_path.mnt->mnt_sb->s_root);
+		case 3: root = dget(file->f_path.dentry->d_sb->s_root);
 			mutex_lock(&root->d_inode->i_mutex);
 
 			while (!list_empty(&entries))
diff --git a/fs/block_dev.c b/fs/block_dev.c
index b07f1da..69a5b6f 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -17,6 +17,7 @@
 #include <linux/module.h>
 #include <linux/blkpg.h>
 #include <linux/buffer_head.h>
+#include <linux/swap.h>
 #include <linux/pagevec.h>
 #include <linux/writeback.h>
 #include <linux/mpage.h>
@@ -25,6 +26,7 @@
 #include <linux/namei.h>
 #include <linux/log2.h>
 #include <linux/kmemleak.h>
+#include <linux/cleancache.h>
 #include <asm/uaccess.h>
 #include "internal.h"
 
@@ -82,13 +84,35 @@
 }
 
 /* Kill _all_ buffers and pagecache , dirty or not.. */
-static void kill_bdev(struct block_device *bdev)
+void kill_bdev(struct block_device *bdev)
 {
-	if (bdev->bd_inode->i_mapping->nrpages == 0)
+	struct address_space *mapping = bdev->bd_inode->i_mapping;
+
+	if (mapping->nrpages == 0)
 		return;
+
 	invalidate_bh_lrus();
-	truncate_inode_pages(bdev->bd_inode->i_mapping, 0);
+	truncate_inode_pages(mapping, 0);
 }	
+EXPORT_SYMBOL(kill_bdev);
+
+/* Invalidate clean unused buffers and pagecache. */
+void invalidate_bdev(struct block_device *bdev)
+{
+	struct address_space *mapping = bdev->bd_inode->i_mapping;
+
+	if (mapping->nrpages == 0)
+		return;
+
+	invalidate_bh_lrus();
+	lru_add_drain_all();	/* make sure all lru add caches are flushed */
+	invalidate_mapping_pages(mapping, 0, -1);
+	/* 99% of the time, we don't need to flush the cleancache on the bdev.
+	 * But, for the strange corners, lets be cautious
+	 */
+	cleancache_flush_inode(mapping);
+}
+EXPORT_SYMBOL(invalidate_bdev);
 
 int set_blocksize(struct block_device *bdev, int size)
 {
@@ -425,7 +449,6 @@
 	struct inode *inode = container_of(head, struct inode, i_rcu);
 	struct bdev_inode *bdi = BDEV_I(inode);
 
-	INIT_LIST_HEAD(&inode->i_dentry);
 	kmem_cache_free(bdev_cachep, bdi);
 }
 
@@ -493,7 +516,7 @@
 	.kill_sb	= kill_anon_super,
 };
 
-struct super_block *blockdev_superblock __read_mostly;
+static struct super_block *blockdev_superblock __read_mostly;
 
 void __init bdev_cache_init(void)
 {
@@ -639,6 +662,11 @@
 	return bdev;
 }
 
+static inline int sb_is_blkdev_sb(struct super_block *sb)
+{
+	return sb == blockdev_superblock;
+}
+
 /* Call when you free inode */
 
 void bd_forget(struct inode *inode)
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 7ec1409..0cc20b3 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -64,6 +64,8 @@
 	int idle;
 };
 
+static int __btrfs_start_workers(struct btrfs_workers *workers);
+
 /*
  * btrfs_start_workers uses kthread_run, which can block waiting for memory
  * for a very long time.  It will actually throttle on page writeback,
@@ -88,27 +90,10 @@
 {
 	struct worker_start *start;
 	start = container_of(work, struct worker_start, work);
-	btrfs_start_workers(start->queue, 1);
+	__btrfs_start_workers(start->queue);
 	kfree(start);
 }
 
-static int start_new_worker(struct btrfs_workers *queue)
-{
-	struct worker_start *start;
-	int ret;
-
-	start = kzalloc(sizeof(*start), GFP_NOFS);
-	if (!start)
-		return -ENOMEM;
-
-	start->work.func = start_new_worker_func;
-	start->queue = queue;
-	ret = btrfs_queue_worker(queue->atomic_worker_start, &start->work);
-	if (ret)
-		kfree(start);
-	return ret;
-}
-
 /*
  * helper function to move a thread onto the idle list after it
  * has finished some requests.
@@ -153,12 +138,20 @@
 static void check_pending_worker_creates(struct btrfs_worker_thread *worker)
 {
 	struct btrfs_workers *workers = worker->workers;
+	struct worker_start *start;
 	unsigned long flags;
 
 	rmb();
 	if (!workers->atomic_start_pending)
 		return;
 
+	start = kzalloc(sizeof(*start), GFP_NOFS);
+	if (!start)
+		return;
+
+	start->work.func = start_new_worker_func;
+	start->queue = workers;
+
 	spin_lock_irqsave(&workers->lock, flags);
 	if (!workers->atomic_start_pending)
 		goto out;
@@ -170,10 +163,11 @@
 
 	workers->num_workers_starting += 1;
 	spin_unlock_irqrestore(&workers->lock, flags);
-	start_new_worker(workers);
+	btrfs_queue_worker(workers->atomic_worker_start, &start->work);
 	return;
 
 out:
+	kfree(start);
 	spin_unlock_irqrestore(&workers->lock, flags);
 }
 
@@ -331,7 +325,7 @@
 			run_ordered_completions(worker->workers, work);
 
 			check_pending_worker_creates(worker);
-
+			cond_resched();
 		}
 
 		spin_lock_irq(&worker->lock);
@@ -340,7 +334,7 @@
 		if (freezing(current)) {
 			worker->working = 0;
 			spin_unlock_irq(&worker->lock);
-			refrigerator();
+			try_to_freeze();
 		} else {
 			spin_unlock_irq(&worker->lock);
 			if (!kthread_should_stop()) {
@@ -462,56 +456,55 @@
  * starts new worker threads.  This does not enforce the max worker
  * count in case you need to temporarily go past it.
  */
-static int __btrfs_start_workers(struct btrfs_workers *workers,
-				 int num_workers)
+static int __btrfs_start_workers(struct btrfs_workers *workers)
 {
 	struct btrfs_worker_thread *worker;
 	int ret = 0;
-	int i;
 
-	for (i = 0; i < num_workers; i++) {
-		worker = kzalloc(sizeof(*worker), GFP_NOFS);
-		if (!worker) {
-			ret = -ENOMEM;
-			goto fail;
-		}
-
-		INIT_LIST_HEAD(&worker->pending);
-		INIT_LIST_HEAD(&worker->prio_pending);
-		INIT_LIST_HEAD(&worker->worker_list);
-		spin_lock_init(&worker->lock);
-
-		atomic_set(&worker->num_pending, 0);
-		atomic_set(&worker->refs, 1);
-		worker->workers = workers;
-		worker->task = kthread_run(worker_loop, worker,
-					   "btrfs-%s-%d", workers->name,
-					   workers->num_workers + i);
-		if (IS_ERR(worker->task)) {
-			ret = PTR_ERR(worker->task);
-			kfree(worker);
-			goto fail;
-		}
-		spin_lock_irq(&workers->lock);
-		list_add_tail(&worker->worker_list, &workers->idle_list);
-		worker->idle = 1;
-		workers->num_workers++;
-		workers->num_workers_starting--;
-		WARN_ON(workers->num_workers_starting < 0);
-		spin_unlock_irq(&workers->lock);
+	worker = kzalloc(sizeof(*worker), GFP_NOFS);
+	if (!worker) {
+		ret = -ENOMEM;
+		goto fail;
 	}
+
+	INIT_LIST_HEAD(&worker->pending);
+	INIT_LIST_HEAD(&worker->prio_pending);
+	INIT_LIST_HEAD(&worker->worker_list);
+	spin_lock_init(&worker->lock);
+
+	atomic_set(&worker->num_pending, 0);
+	atomic_set(&worker->refs, 1);
+	worker->workers = workers;
+	worker->task = kthread_run(worker_loop, worker,
+				   "btrfs-%s-%d", workers->name,
+				   workers->num_workers + 1);
+	if (IS_ERR(worker->task)) {
+		ret = PTR_ERR(worker->task);
+		kfree(worker);
+		goto fail;
+	}
+	spin_lock_irq(&workers->lock);
+	list_add_tail(&worker->worker_list, &workers->idle_list);
+	worker->idle = 1;
+	workers->num_workers++;
+	workers->num_workers_starting--;
+	WARN_ON(workers->num_workers_starting < 0);
+	spin_unlock_irq(&workers->lock);
+
 	return 0;
 fail:
-	btrfs_stop_workers(workers);
+	spin_lock_irq(&workers->lock);
+	workers->num_workers_starting--;
+	spin_unlock_irq(&workers->lock);
 	return ret;
 }
 
-int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
+int btrfs_start_workers(struct btrfs_workers *workers)
 {
 	spin_lock_irq(&workers->lock);
-	workers->num_workers_starting += num_workers;
+	workers->num_workers_starting++;
 	spin_unlock_irq(&workers->lock);
-	return __btrfs_start_workers(workers, num_workers);
+	return __btrfs_start_workers(workers);
 }
 
 /*
@@ -568,9 +561,10 @@
 	struct btrfs_worker_thread *worker;
 	unsigned long flags;
 	struct list_head *fallback;
+	int ret;
 
-again:
 	spin_lock_irqsave(&workers->lock, flags);
+again:
 	worker = next_worker(workers);
 
 	if (!worker) {
@@ -584,7 +578,10 @@
 			workers->num_workers_starting++;
 			spin_unlock_irqrestore(&workers->lock, flags);
 			/* we're below the limit, start another worker */
-			__btrfs_start_workers(workers, 1);
+			ret = __btrfs_start_workers(workers);
+			spin_lock_irqsave(&workers->lock, flags);
+			if (ret)
+				goto fallback;
 			goto again;
 		}
 	}
@@ -665,7 +662,7 @@
 /*
  * places a struct btrfs_work into the pending queue of one of the kthreads
  */
-int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
+void btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
 {
 	struct btrfs_worker_thread *worker;
 	unsigned long flags;
@@ -673,7 +670,7 @@
 
 	/* don't requeue something already on a list */
 	if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags))
-		goto out;
+		return;
 
 	worker = find_worker(workers);
 	if (workers->ordered) {
@@ -712,7 +709,4 @@
 	if (wake)
 		wake_up_process(worker->task);
 	spin_unlock_irqrestore(&worker->lock, flags);
-
-out:
-	return 0;
 }
diff --git a/fs/btrfs/async-thread.h b/fs/btrfs/async-thread.h
index 5077746..f34cc31 100644
--- a/fs/btrfs/async-thread.h
+++ b/fs/btrfs/async-thread.h
@@ -109,8 +109,8 @@
 	char *name;
 };
 
-int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work);
-int btrfs_start_workers(struct btrfs_workers *workers, int num_workers);
+void btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work);
+int btrfs_start_workers(struct btrfs_workers *workers);
 int btrfs_stop_workers(struct btrfs_workers *workers);
 void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max,
 			struct btrfs_workers *async_starter);
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 50634abe..6738503 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -2692,7 +2692,8 @@
 int btrfs_readpage(struct file *file, struct page *page);
 void btrfs_evict_inode(struct inode *inode);
 int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc);
-void btrfs_dirty_inode(struct inode *inode, int flags);
+int btrfs_dirty_inode(struct inode *inode);
+int btrfs_update_time(struct file *file);
 struct inode *btrfs_alloc_inode(struct super_block *sb);
 void btrfs_destroy_inode(struct inode *inode);
 int btrfs_drop_inode(struct inode *inode);
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 5b16357..9c1eccc 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -640,8 +640,8 @@
 	 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
 	 * we're accounted for.
 	 */
-	if (!trans->bytes_reserved &&
-	    src_rsv != &root->fs_info->delalloc_block_rsv) {
+	if (!src_rsv || (!trans->bytes_reserved &&
+	    src_rsv != &root->fs_info->delalloc_block_rsv)) {
 		ret = btrfs_block_rsv_add_noflush(root, dst_rsv, num_bytes);
 		/*
 		 * Since we're under a transaction reserve_metadata_bytes could
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 632f8f3..f99a099 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1579,9 +1579,7 @@
 			btrfs_run_defrag_inodes(root->fs_info);
 		}
 
-		if (freezing(current)) {
-			refrigerator();
-		} else {
+		if (!try_to_freeze()) {
 			set_current_state(TASK_INTERRUPTIBLE);
 			if (!kthread_should_stop())
 				schedule();
@@ -1635,9 +1633,7 @@
 		wake_up_process(root->fs_info->cleaner_kthread);
 		mutex_unlock(&root->fs_info->transaction_kthread_mutex);
 
-		if (freezing(current)) {
-			refrigerator();
-		} else {
+		if (!try_to_freeze()) {
 			set_current_state(TASK_INTERRUPTIBLE);
 			if (!kthread_should_stop() &&
 			    !btrfs_transaction_blocked(root->fs_info))
@@ -2194,19 +2190,27 @@
 	fs_info->endio_meta_write_workers.idle_thresh = 2;
 	fs_info->readahead_workers.idle_thresh = 2;
 
-	btrfs_start_workers(&fs_info->workers, 1);
-	btrfs_start_workers(&fs_info->generic_worker, 1);
-	btrfs_start_workers(&fs_info->submit_workers, 1);
-	btrfs_start_workers(&fs_info->delalloc_workers, 1);
-	btrfs_start_workers(&fs_info->fixup_workers, 1);
-	btrfs_start_workers(&fs_info->endio_workers, 1);
-	btrfs_start_workers(&fs_info->endio_meta_workers, 1);
-	btrfs_start_workers(&fs_info->endio_meta_write_workers, 1);
-	btrfs_start_workers(&fs_info->endio_write_workers, 1);
-	btrfs_start_workers(&fs_info->endio_freespace_worker, 1);
-	btrfs_start_workers(&fs_info->delayed_workers, 1);
-	btrfs_start_workers(&fs_info->caching_workers, 1);
-	btrfs_start_workers(&fs_info->readahead_workers, 1);
+	/*
+	 * btrfs_start_workers can really only fail because of ENOMEM so just
+	 * return -ENOMEM if any of these fail.
+	 */
+	ret = btrfs_start_workers(&fs_info->workers);
+	ret |= btrfs_start_workers(&fs_info->generic_worker);
+	ret |= btrfs_start_workers(&fs_info->submit_workers);
+	ret |= btrfs_start_workers(&fs_info->delalloc_workers);
+	ret |= btrfs_start_workers(&fs_info->fixup_workers);
+	ret |= btrfs_start_workers(&fs_info->endio_workers);
+	ret |= btrfs_start_workers(&fs_info->endio_meta_workers);
+	ret |= btrfs_start_workers(&fs_info->endio_meta_write_workers);
+	ret |= btrfs_start_workers(&fs_info->endio_write_workers);
+	ret |= btrfs_start_workers(&fs_info->endio_freespace_worker);
+	ret |= btrfs_start_workers(&fs_info->delayed_workers);
+	ret |= btrfs_start_workers(&fs_info->caching_workers);
+	ret |= btrfs_start_workers(&fs_info->readahead_workers);
+	if (ret) {
+		ret = -ENOMEM;
+		goto fail_sb_buffer;
+	}
 
 	fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
 	fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 2ad8136..f5fbe57 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2822,7 +2822,7 @@
 	btrfs_release_path(path);
 out:
 	spin_lock(&block_group->lock);
-	if (!ret)
+	if (!ret && dcs == BTRFS_DC_SETUP)
 		block_group->cache_generation = trans->transid;
 	block_group->disk_cache_state = dcs;
 	spin_unlock(&block_group->lock);
@@ -4204,12 +4204,17 @@
 	struct btrfs_root *root = BTRFS_I(inode)->root;
 	struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
 	u64 to_reserve = 0;
+	u64 csum_bytes;
 	unsigned nr_extents = 0;
+	int extra_reserve = 0;
 	int flush = 1;
 	int ret;
 
+	/* Need to be holding the i_mutex here if we aren't free space cache */
 	if (btrfs_is_free_space_inode(root, inode))
 		flush = 0;
+	else
+		WARN_ON(!mutex_is_locked(&inode->i_mutex));
 
 	if (flush && btrfs_transaction_in_commit(root->fs_info))
 		schedule_timeout(1);
@@ -4220,11 +4225,9 @@
 	BTRFS_I(inode)->outstanding_extents++;
 
 	if (BTRFS_I(inode)->outstanding_extents >
-	    BTRFS_I(inode)->reserved_extents) {
+	    BTRFS_I(inode)->reserved_extents)
 		nr_extents = BTRFS_I(inode)->outstanding_extents -
 			BTRFS_I(inode)->reserved_extents;
-		BTRFS_I(inode)->reserved_extents += nr_extents;
-	}
 
 	/*
 	 * Add an item to reserve for updating the inode when we complete the
@@ -4232,11 +4235,12 @@
 	 */
 	if (!BTRFS_I(inode)->delalloc_meta_reserved) {
 		nr_extents++;
-		BTRFS_I(inode)->delalloc_meta_reserved = 1;
+		extra_reserve = 1;
 	}
 
 	to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
 	to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
+	csum_bytes = BTRFS_I(inode)->csum_bytes;
 	spin_unlock(&BTRFS_I(inode)->lock);
 
 	ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
@@ -4246,22 +4250,35 @@
 
 		spin_lock(&BTRFS_I(inode)->lock);
 		dropped = drop_outstanding_extent(inode);
-		to_free = calc_csum_metadata_size(inode, num_bytes, 0);
-		spin_unlock(&BTRFS_I(inode)->lock);
-		to_free += btrfs_calc_trans_metadata_size(root, dropped);
-
 		/*
-		 * Somebody could have come in and twiddled with the
-		 * reservation, so if we have to free more than we would have
-		 * reserved from this reservation go ahead and release those
-		 * bytes.
+		 * If the inodes csum_bytes is the same as the original
+		 * csum_bytes then we know we haven't raced with any free()ers
+		 * so we can just reduce our inodes csum bytes and carry on.
+		 * Otherwise we have to do the normal free thing to account for
+		 * the case that the free side didn't free up its reserve
+		 * because of this outstanding reservation.
 		 */
-		to_free -= to_reserve;
+		if (BTRFS_I(inode)->csum_bytes == csum_bytes)
+			calc_csum_metadata_size(inode, num_bytes, 0);
+		else
+			to_free = calc_csum_metadata_size(inode, num_bytes, 0);
+		spin_unlock(&BTRFS_I(inode)->lock);
+		if (dropped)
+			to_free += btrfs_calc_trans_metadata_size(root, dropped);
+
 		if (to_free)
 			btrfs_block_rsv_release(root, block_rsv, to_free);
 		return ret;
 	}
 
+	spin_lock(&BTRFS_I(inode)->lock);
+	if (extra_reserve) {
+		BTRFS_I(inode)->delalloc_meta_reserved = 1;
+		nr_extents--;
+	}
+	BTRFS_I(inode)->reserved_extents += nr_extents;
+	spin_unlock(&BTRFS_I(inode)->lock);
+
 	block_rsv_add_bytes(block_rsv, to_reserve, 1);
 
 	return 0;
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index dafdfa0..97fbe93 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1167,6 +1167,8 @@
 	nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) /
 		     PAGE_CACHE_SIZE, PAGE_CACHE_SIZE /
 		     (sizeof(struct page *)));
+	nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
+	nrptrs = max(nrptrs, 8);
 	pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
 	if (!pages)
 		return -ENOMEM;
@@ -1387,7 +1389,11 @@
 		goto out;
 	}
 
-	file_update_time(file);
+	err = btrfs_update_time(file);
+	if (err) {
+		mutex_unlock(&inode->i_mutex);
+		goto out;
+	}
 	BTRFS_I(inode)->sequence++;
 
 	start_pos = round_down(pos, root->sectorsize);
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index ec23d43..9a897bf 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -423,7 +423,7 @@
 	}
 
 	if (index == 0)
-		offset = sizeof(u32) * io_ctl->num_pages;;
+		offset = sizeof(u32) * io_ctl->num_pages;
 
 	crc = btrfs_csum_data(io_ctl->root, io_ctl->orig + offset, crc,
 			      PAGE_CACHE_SIZE - offset);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 2c984f7..81b235a 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -38,6 +38,7 @@
 #include <linux/falloc.h>
 #include <linux/slab.h>
 #include <linux/ratelimit.h>
+#include <linux/mount.h>
 #include "compat.h"
 #include "ctree.h"
 #include "disk-io.h"
@@ -1943,7 +1944,7 @@
 };
 
 /*
- * This is called in transaction commmit time. If there are no orphan
+ * This is called in transaction commit time. If there are no orphan
  * files in the subvolume, it removes orphan item and frees block_rsv
  * structure.
  */
@@ -2031,7 +2032,7 @@
 	/* insert an orphan item to track this unlinked/truncated file */
 	if (insert >= 1) {
 		ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode));
-		BUG_ON(ret);
+		BUG_ON(ret && ret != -EEXIST);
 	}
 
 	/* insert an orphan item to track subvolume contains orphan files */
@@ -2158,6 +2159,38 @@
 		if (ret && ret != -ESTALE)
 			goto out;
 
+		if (ret == -ESTALE && root == root->fs_info->tree_root) {
+			struct btrfs_root *dead_root;
+			struct btrfs_fs_info *fs_info = root->fs_info;
+			int is_dead_root = 0;
+
+			/*
+			 * this is an orphan in the tree root. Currently these
+			 * could come from 2 sources:
+			 *  a) a snapshot deletion in progress
+			 *  b) a free space cache inode
+			 * We need to distinguish those two, as the snapshot
+			 * orphan must not get deleted.
+			 * find_dead_roots already ran before us, so if this
+			 * is a snapshot deletion, we should find the root
+			 * in the dead_roots list
+			 */
+			spin_lock(&fs_info->trans_lock);
+			list_for_each_entry(dead_root, &fs_info->dead_roots,
+					    root_list) {
+				if (dead_root->root_key.objectid ==
+				    found_key.objectid) {
+					is_dead_root = 1;
+					break;
+				}
+			}
+			spin_unlock(&fs_info->trans_lock);
+			if (is_dead_root) {
+				/* prevent this orphan from being found again */
+				key.offset = found_key.objectid - 1;
+				continue;
+			}
+		}
 		/*
 		 * Inode is already gone but the orphan item is still there,
 		 * kill the orphan item.
@@ -2191,7 +2224,14 @@
 				continue;
 			}
 			nr_truncate++;
+			/*
+			 * Need to hold the imutex for reservation purposes, not
+			 * a huge deal here but I have a WARN_ON in
+			 * btrfs_delalloc_reserve_space to catch offenders.
+			 */
+			mutex_lock(&inode->i_mutex);
 			ret = btrfs_truncate(inode);
+			mutex_unlock(&inode->i_mutex);
 		} else {
 			nr_unlink++;
 		}
@@ -3327,7 +3367,7 @@
 			u64 hint_byte = 0;
 			hole_size = last_byte - cur_offset;
 
-			trans = btrfs_start_transaction(root, 2);
+			trans = btrfs_start_transaction(root, 3);
 			if (IS_ERR(trans)) {
 				err = PTR_ERR(trans);
 				break;
@@ -3337,6 +3377,7 @@
 						 cur_offset + hole_size,
 						 &hint_byte, 1);
 			if (err) {
+				btrfs_update_inode(trans, root, inode);
 				btrfs_end_transaction(trans, root);
 				break;
 			}
@@ -3346,6 +3387,7 @@
 					0, hole_size, 0, hole_size,
 					0, 0, 0);
 			if (err) {
+				btrfs_update_inode(trans, root, inode);
 				btrfs_end_transaction(trans, root);
 				break;
 			}
@@ -3353,6 +3395,7 @@
 			btrfs_drop_extent_cache(inode, hole_start,
 					last_byte - 1, 0);
 
+			btrfs_update_inode(trans, root, inode);
 			btrfs_end_transaction(trans, root);
 		}
 		free_extent_map(em);
@@ -3370,6 +3413,8 @@
 
 static int btrfs_setsize(struct inode *inode, loff_t newsize)
 {
+	struct btrfs_root *root = BTRFS_I(inode)->root;
+	struct btrfs_trans_handle *trans;
 	loff_t oldsize = i_size_read(inode);
 	int ret;
 
@@ -3377,16 +3422,19 @@
 		return 0;
 
 	if (newsize > oldsize) {
-		i_size_write(inode, newsize);
-		btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
 		truncate_pagecache(inode, oldsize, newsize);
 		ret = btrfs_cont_expand(inode, oldsize, newsize);
-		if (ret) {
-			btrfs_setsize(inode, oldsize);
+		if (ret)
 			return ret;
-		}
 
-		mark_inode_dirty(inode);
+		trans = btrfs_start_transaction(root, 1);
+		if (IS_ERR(trans))
+			return PTR_ERR(trans);
+
+		i_size_write(inode, newsize);
+		btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
+		ret = btrfs_update_inode(trans, root, inode);
+		btrfs_end_transaction_throttle(trans, root);
 	} else {
 
 		/*
@@ -3426,9 +3474,9 @@
 
 	if (attr->ia_valid) {
 		setattr_copy(inode, attr);
-		mark_inode_dirty(inode);
+		err = btrfs_dirty_inode(inode);
 
-		if (attr->ia_valid & ATTR_MODE)
+		if (!err && attr->ia_valid & ATTR_MODE)
 			err = btrfs_acl_chmod(inode);
 	}
 
@@ -4204,42 +4252,80 @@
  * FIXME, needs more benchmarking...there are no reasons other than performance
  * to keep or drop this code.
  */
-void btrfs_dirty_inode(struct inode *inode, int flags)
+int btrfs_dirty_inode(struct inode *inode)
 {
 	struct btrfs_root *root = BTRFS_I(inode)->root;
 	struct btrfs_trans_handle *trans;
 	int ret;
 
 	if (BTRFS_I(inode)->dummy_inode)
-		return;
+		return 0;
 
 	trans = btrfs_join_transaction(root);
-	BUG_ON(IS_ERR(trans));
+	if (IS_ERR(trans))
+		return PTR_ERR(trans);
 
 	ret = btrfs_update_inode(trans, root, inode);
 	if (ret && ret == -ENOSPC) {
 		/* whoops, lets try again with the full transaction */
 		btrfs_end_transaction(trans, root);
 		trans = btrfs_start_transaction(root, 1);
-		if (IS_ERR(trans)) {
-			printk_ratelimited(KERN_ERR "btrfs: fail to "
-				       "dirty  inode %llu error %ld\n",
-				       (unsigned long long)btrfs_ino(inode),
-				       PTR_ERR(trans));
-			return;
-		}
+		if (IS_ERR(trans))
+			return PTR_ERR(trans);
 
 		ret = btrfs_update_inode(trans, root, inode);
-		if (ret) {
-			printk_ratelimited(KERN_ERR "btrfs: fail to "
-				       "dirty  inode %llu error %d\n",
-				       (unsigned long long)btrfs_ino(inode),
-				       ret);
-		}
 	}
 	btrfs_end_transaction(trans, root);
 	if (BTRFS_I(inode)->delayed_node)
 		btrfs_balance_delayed_items(root);
+
+	return ret;
+}
+
+/*
+ * This is a copy of file_update_time.  We need this so we can return error on
+ * ENOSPC for updating the inode in the case of file write and mmap writes.
+ */
+int btrfs_update_time(struct file *file)
+{
+	struct inode *inode = file->f_path.dentry->d_inode;
+	struct timespec now;
+	int ret;
+	enum { S_MTIME = 1, S_CTIME = 2, S_VERSION = 4 } sync_it = 0;
+
+	/* First try to exhaust all avenues to not sync */
+	if (IS_NOCMTIME(inode))
+		return 0;
+
+	now = current_fs_time(inode->i_sb);
+	if (!timespec_equal(&inode->i_mtime, &now))
+		sync_it = S_MTIME;
+
+	if (!timespec_equal(&inode->i_ctime, &now))
+		sync_it |= S_CTIME;
+
+	if (IS_I_VERSION(inode))
+		sync_it |= S_VERSION;
+
+	if (!sync_it)
+		return 0;
+
+	/* Finally allowed to write? Takes lock. */
+	if (mnt_want_write_file(file))
+		return 0;
+
+	/* Only change inode inside the lock region */
+	if (sync_it & S_VERSION)
+		inode_inc_iversion(inode);
+	if (sync_it & S_CTIME)
+		inode->i_ctime = now;
+	if (sync_it & S_MTIME)
+		inode->i_mtime = now;
+	ret = btrfs_dirty_inode(inode);
+	if (!ret)
+		mark_inode_dirty_sync(inode);
+	mnt_drop_write(file->f_path.mnt);
+	return ret;
 }
 
 /*
@@ -4326,8 +4412,8 @@
 				     struct btrfs_root *root,
 				     struct inode *dir,
 				     const char *name, int name_len,
-				     u64 ref_objectid, u64 objectid, int mode,
-				     u64 *index)
+				     u64 ref_objectid, u64 objectid,
+				     umode_t mode, u64 *index)
 {
 	struct inode *inode;
 	struct btrfs_inode_item *inode_item;
@@ -4504,17 +4590,13 @@
 	int err = btrfs_add_link(trans, dir, inode,
 				 dentry->d_name.name, dentry->d_name.len,
 				 backref, index);
-	if (!err) {
-		d_instantiate(dentry, inode);
-		return 0;
-	}
 	if (err > 0)
 		err = -EEXIST;
 	return err;
 }
 
 static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
-			int mode, dev_t rdev)
+			umode_t mode, dev_t rdev)
 {
 	struct btrfs_trans_handle *trans;
 	struct btrfs_root *root = BTRFS_I(dir)->root;
@@ -4555,13 +4637,21 @@
 		goto out_unlock;
 	}
 
+	/*
+	* If the active LSM wants to access the inode during
+	* d_instantiate it needs these. Smack checks to see
+	* if the filesystem supports xattrs by looking at the
+	* ops vector.
+	*/
+
+	inode->i_op = &btrfs_special_inode_operations;
 	err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
 	if (err)
 		drop_inode = 1;
 	else {
-		inode->i_op = &btrfs_special_inode_operations;
 		init_special_inode(inode, inode->i_mode, rdev);
 		btrfs_update_inode(trans, root, inode);
+		d_instantiate(dentry, inode);
 	}
 out_unlock:
 	nr = trans->blocks_used;
@@ -4575,7 +4665,7 @@
 }
 
 static int btrfs_create(struct inode *dir, struct dentry *dentry,
-			int mode, struct nameidata *nd)
+			umode_t mode, struct nameidata *nd)
 {
 	struct btrfs_trans_handle *trans;
 	struct btrfs_root *root = BTRFS_I(dir)->root;
@@ -4613,15 +4703,23 @@
 		goto out_unlock;
 	}
 
+	/*
+	* If the active LSM wants to access the inode during
+	* d_instantiate it needs these. Smack checks to see
+	* if the filesystem supports xattrs by looking at the
+	* ops vector.
+	*/
+	inode->i_fop = &btrfs_file_operations;
+	inode->i_op = &btrfs_file_inode_operations;
+
 	err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
 	if (err)
 		drop_inode = 1;
 	else {
 		inode->i_mapping->a_ops = &btrfs_aops;
 		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
-		inode->i_fop = &btrfs_file_operations;
-		inode->i_op = &btrfs_file_inode_operations;
 		BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
+		d_instantiate(dentry, inode);
 	}
 out_unlock:
 	nr = trans->blocks_used;
@@ -4679,6 +4777,7 @@
 		struct dentry *parent = dentry->d_parent;
 		err = btrfs_update_inode(trans, root, inode);
 		BUG_ON(err);
+		d_instantiate(dentry, inode);
 		btrfs_log_new_name(trans, inode, NULL, parent);
 	}
 
@@ -4693,7 +4792,7 @@
 	return err;
 }
 
-static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 {
 	struct inode *inode = NULL;
 	struct btrfs_trans_handle *trans;
@@ -6303,7 +6402,12 @@
 	u64 page_start;
 	u64 page_end;
 
+	/* Need this to keep space reservations serialized */
+	mutex_lock(&inode->i_mutex);
 	ret  = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
+	mutex_unlock(&inode->i_mutex);
+	if (!ret)
+		ret = btrfs_update_time(vma->vm_file);
 	if (ret) {
 		if (ret == -ENOMEM)
 			ret = VM_FAULT_OOM;
@@ -6515,8 +6619,9 @@
 			/* Just need the 1 for updating the inode */
 			trans = btrfs_start_transaction(root, 1);
 			if (IS_ERR(trans)) {
-				err = PTR_ERR(trans);
-				goto out;
+				ret = err = PTR_ERR(trans);
+				trans = NULL;
+				break;
 			}
 		}
 
@@ -6656,7 +6761,6 @@
 static void btrfs_i_callback(struct rcu_head *head)
 {
 	struct inode *inode = container_of(head, struct inode, i_rcu);
-	INIT_LIST_HEAD(&inode->i_dentry);
 	kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
 }
 
@@ -7076,14 +7180,21 @@
 		goto out_unlock;
 	}
 
+	/*
+	* If the active LSM wants to access the inode during
+	* d_instantiate it needs these. Smack checks to see
+	* if the filesystem supports xattrs by looking at the
+	* ops vector.
+	*/
+	inode->i_fop = &btrfs_file_operations;
+	inode->i_op = &btrfs_file_inode_operations;
+
 	err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
 	if (err)
 		drop_inode = 1;
 	else {
 		inode->i_mapping->a_ops = &btrfs_aops;
 		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
-		inode->i_fop = &btrfs_file_operations;
-		inode->i_op = &btrfs_file_inode_operations;
 		BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
 	}
 	if (drop_inode)
@@ -7132,6 +7243,8 @@
 		drop_inode = 1;
 
 out_unlock:
+	if (!err)
+		d_instantiate(dentry, inode);
 	nr = trans->blocks_used;
 	btrfs_end_transaction_throttle(trans, root);
 	if (drop_inode) {
@@ -7353,6 +7466,7 @@
 	.follow_link	= page_follow_link_light,
 	.put_link	= page_put_link,
 	.getattr	= btrfs_getattr,
+	.setattr	= btrfs_setattr,
 	.permission	= btrfs_permission,
 	.setxattr	= btrfs_setxattr,
 	.getxattr	= btrfs_getxattr,
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 72d4616..5441ff1 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -201,7 +201,7 @@
 		}
 	}
 
-	ret = mnt_want_write(file->f_path.mnt);
+	ret = mnt_want_write_file(file);
 	if (ret)
 		goto out_unlock;
 
@@ -252,14 +252,14 @@
 	trans = btrfs_join_transaction(root);
 	BUG_ON(IS_ERR(trans));
 
+	btrfs_update_iflags(inode);
+	inode->i_ctime = CURRENT_TIME;
 	ret = btrfs_update_inode(trans, root, inode);
 	BUG_ON(ret);
 
-	btrfs_update_iflags(inode);
-	inode->i_ctime = CURRENT_TIME;
 	btrfs_end_transaction(trans, root);
 
-	mnt_drop_write(file->f_path.mnt);
+	mnt_drop_write_file(file);
 
 	ret = 0;
  out_unlock:
@@ -858,8 +858,10 @@
 		return 0;
 	file_end = (isize - 1) >> PAGE_CACHE_SHIFT;
 
+	mutex_lock(&inode->i_mutex);
 	ret = btrfs_delalloc_reserve_space(inode,
 					   num_pages << PAGE_CACHE_SHIFT);
+	mutex_unlock(&inode->i_mutex);
 	if (ret)
 		return ret;
 again:
@@ -1853,7 +1855,7 @@
 		goto out;
 	}
 
-	err = mnt_want_write(file->f_path.mnt);
+	err = mnt_want_write_file(file);
 	if (err)
 		goto out;
 
@@ -1969,7 +1971,7 @@
 	dput(dentry);
 out_unlock_dir:
 	mutex_unlock(&dir->i_mutex);
-	mnt_drop_write(file->f_path.mnt);
+	mnt_drop_write_file(file);
 out:
 	kfree(vol_args);
 	return err;
@@ -1985,7 +1987,7 @@
 	if (btrfs_root_readonly(root))
 		return -EROFS;
 
-	ret = mnt_want_write(file->f_path.mnt);
+	ret = mnt_want_write_file(file);
 	if (ret)
 		return ret;
 
@@ -2038,7 +2040,7 @@
 		ret = -EINVAL;
 	}
 out:
-	mnt_drop_write(file->f_path.mnt);
+	mnt_drop_write_file(file);
 	return ret;
 }
 
@@ -2193,7 +2195,7 @@
 	if (btrfs_root_readonly(root))
 		return -EROFS;
 
-	ret = mnt_want_write(file->f_path.mnt);
+	ret = mnt_want_write_file(file);
 	if (ret)
 		return ret;
 
@@ -2508,7 +2510,7 @@
 out_fput:
 	fput(src_file);
 out_drop_write:
-	mnt_drop_write(file->f_path.mnt);
+	mnt_drop_write_file(file);
 	return ret;
 }
 
@@ -2547,7 +2549,7 @@
 	if (btrfs_root_readonly(root))
 		goto out;
 
-	ret = mnt_want_write(file->f_path.mnt);
+	ret = mnt_want_write_file(file);
 	if (ret)
 		goto out;
 
@@ -2563,7 +2565,7 @@
 
 out_drop:
 	atomic_dec(&root->fs_info->open_ioctl_trans);
-	mnt_drop_write(file->f_path.mnt);
+	mnt_drop_write_file(file);
 out:
 	return ret;
 }
@@ -2798,7 +2800,7 @@
 
 	atomic_dec(&root->fs_info->open_ioctl_trans);
 
-	mnt_drop_write(file->f_path.mnt);
+	mnt_drop_write_file(file);
 	return 0;
 }
 
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index dff29d5..cfb5543 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -2947,7 +2947,9 @@
 	index = (cluster->start - offset) >> PAGE_CACHE_SHIFT;
 	last_index = (cluster->end - offset) >> PAGE_CACHE_SHIFT;
 	while (index <= last_index) {
+		mutex_lock(&inode->i_mutex);
 		ret = btrfs_delalloc_reserve_metadata(inode, PAGE_CACHE_SIZE);
+		mutex_unlock(&inode->i_mutex);
 		if (ret)
 			goto out;
 
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index c27bcb6..ddf2c90 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -1535,18 +1535,22 @@
 static noinline_for_stack int scrub_workers_get(struct btrfs_root *root)
 {
 	struct btrfs_fs_info *fs_info = root->fs_info;
+	int ret = 0;
 
 	mutex_lock(&fs_info->scrub_lock);
 	if (fs_info->scrub_workers_refcnt == 0) {
 		btrfs_init_workers(&fs_info->scrub_workers, "scrub",
 			   fs_info->thread_pool_size, &fs_info->generic_worker);
 		fs_info->scrub_workers.idle_thresh = 4;
-		btrfs_start_workers(&fs_info->scrub_workers, 1);
+		ret = btrfs_start_workers(&fs_info->scrub_workers);
+		if (ret)
+			goto out;
 	}
 	++fs_info->scrub_workers_refcnt;
+out:
 	mutex_unlock(&fs_info->scrub_lock);
 
-	return 0;
+	return ret;
 }
 
 static noinline_for_stack void scrub_workers_put(struct btrfs_root *root)
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index e28ad4b..ae488aa 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -40,7 +40,7 @@
 #include <linux/magic.h>
 #include <linux/slab.h>
 #include <linux/cleancache.h>
-#include <linux/mnt_namespace.h>
+#include <linux/ratelimit.h>
 #include "compat.h"
 #include "delayed-inode.h"
 #include "ctree.h"
@@ -661,9 +661,9 @@
 	return ret;
 }
 
-static int btrfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
+static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
 {
-	struct btrfs_root *root = btrfs_sb(vfs->mnt_sb);
+	struct btrfs_root *root = btrfs_sb(dentry->d_sb);
 	struct btrfs_fs_info *info = root->fs_info;
 	char *compress_type;
 
@@ -1053,7 +1053,7 @@
 	u64 avail_space;
 	u64 used_space;
 	u64 min_stripe_size;
-	int min_stripes = 1;
+	int min_stripes = 1, num_stripes = 1;
 	int i = 0, nr_devices;
 	int ret;
 
@@ -1067,12 +1067,16 @@
 
 	/* calc min stripe number for data space alloction */
 	type = btrfs_get_alloc_profile(root, 1);
-	if (type & BTRFS_BLOCK_GROUP_RAID0)
+	if (type & BTRFS_BLOCK_GROUP_RAID0) {
 		min_stripes = 2;
-	else if (type & BTRFS_BLOCK_GROUP_RAID1)
+		num_stripes = nr_devices;
+	} else if (type & BTRFS_BLOCK_GROUP_RAID1) {
 		min_stripes = 2;
-	else if (type & BTRFS_BLOCK_GROUP_RAID10)
+		num_stripes = 2;
+	} else if (type & BTRFS_BLOCK_GROUP_RAID10) {
 		min_stripes = 4;
+		num_stripes = 4;
+	}
 
 	if (type & BTRFS_BLOCK_GROUP_DUP)
 		min_stripe_size = 2 * BTRFS_STRIPE_LEN;
@@ -1141,13 +1145,16 @@
 	i = nr_devices - 1;
 	avail_space = 0;
 	while (nr_devices >= min_stripes) {
+		if (num_stripes > nr_devices)
+			num_stripes = nr_devices;
+
 		if (devices_info[i].max_avail >= min_stripe_size) {
 			int j;
 			u64 alloc_size;
 
-			avail_space += devices_info[i].max_avail * min_stripes;
+			avail_space += devices_info[i].max_avail * num_stripes;
 			alloc_size = devices_info[i].max_avail;
-			for (j = i + 1 - min_stripes; j <= i; j++)
+			for (j = i + 1 - num_stripes; j <= i; j++)
 				devices_info[j].max_avail -= alloc_size;
 		}
 		i--;
@@ -1264,6 +1271,16 @@
 	return 0;
 }
 
+static void btrfs_fs_dirty_inode(struct inode *inode, int flags)
+{
+	int ret;
+
+	ret = btrfs_dirty_inode(inode);
+	if (ret)
+		printk_ratelimited(KERN_ERR "btrfs: fail to dirty inode %Lu "
+				   "error %d\n", btrfs_ino(inode), ret);
+}
+
 static const struct super_operations btrfs_super_ops = {
 	.drop_inode	= btrfs_drop_inode,
 	.evict_inode	= btrfs_evict_inode,
@@ -1271,7 +1288,7 @@
 	.sync_fs	= btrfs_sync_fs,
 	.show_options	= btrfs_show_options,
 	.write_inode	= btrfs_write_inode,
-	.dirty_inode	= btrfs_dirty_inode,
+	.dirty_inode	= btrfs_fs_dirty_inode,
 	.alloc_inode	= btrfs_alloc_inode,
 	.destroy_inode	= btrfs_destroy_inode,
 	.statfs		= btrfs_statfs,
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 0a8c8f8..f4b839f 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -295,6 +295,12 @@
 			btrfs_requeue_work(&device->work);
 			goto done;
 		}
+		/* unplug every 64 requests just for good measure */
+		if (batch_run % 64 == 0) {
+			blk_finish_plug(&plug);
+			blk_start_plug(&plug);
+			sync_pending = 0;
+		}
 	}
 
 	cond_resched();
@@ -3258,7 +3264,7 @@
 		 */
 		if (atomic_read(&bbio->error) > bbio->max_errors) {
 			err = -EIO;
-		} else if (err) {
+		} else {
 			/*
 			 * this bio is actually up to date, we didn't
 			 * go over the max number of errors
diff --git a/fs/buffer.c b/fs/buffer.c
index 19d8eb7..1a30db7 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -41,7 +41,6 @@
 #include <linux/bitops.h>
 #include <linux/mpage.h>
 #include <linux/bit_spinlock.h>
-#include <linux/cleancache.h>
 
 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
 
@@ -231,55 +230,6 @@
 	return ret;
 }
 
-/* If invalidate_buffers() will trash dirty buffers, it means some kind
-   of fs corruption is going on. Trashing dirty data always imply losing
-   information that was supposed to be just stored on the physical layer
-   by the user.
-
-   Thus invalidate_buffers in general usage is not allwowed to trash
-   dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
-   be preserved.  These buffers are simply skipped.
-  
-   We also skip buffers which are still in use.  For example this can
-   happen if a userspace program is reading the block device.
-
-   NOTE: In the case where the user removed a removable-media-disk even if
-   there's still dirty data not synced on disk (due a bug in the device driver
-   or due an error of the user), by not destroying the dirty buffers we could
-   generate corruption also on the next media inserted, thus a parameter is
-   necessary to handle this case in the most safe way possible (trying
-   to not corrupt also the new disk inserted with the data belonging to
-   the old now corrupted disk). Also for the ramdisk the natural thing
-   to do in order to release the ramdisk memory is to destroy dirty buffers.
-
-   These are two special cases. Normal usage imply the device driver
-   to issue a sync on the device (without waiting I/O completion) and
-   then an invalidate_buffers call that doesn't trash dirty buffers.
-
-   For handling cache coherency with the blkdev pagecache the 'update' case
-   is been introduced. It is needed to re-read from disk any pinned
-   buffer. NOTE: re-reading from disk is destructive so we can do it only
-   when we assume nobody is changing the buffercache under our I/O and when
-   we think the disk contains more recent information than the buffercache.
-   The update == 1 pass marks the buffers we need to update, the update == 2
-   pass does the actual I/O. */
-void invalidate_bdev(struct block_device *bdev)
-{
-	struct address_space *mapping = bdev->bd_inode->i_mapping;
-
-	if (mapping->nrpages == 0)
-		return;
-
-	invalidate_bh_lrus();
-	lru_add_drain_all();	/* make sure all lru add caches are flushed */
-	invalidate_mapping_pages(mapping, 0, -1);
-	/* 99% of the time, we don't need to flush the cleancache on the bdev.
-	 * But, for the strange corners, lets be cautious
-	 */
-	cleancache_flush_inode(mapping);
-}
-EXPORT_SYMBOL(invalidate_bdev);
-
 /*
  * Kick the writeback threads then try to free up some ZONE_NORMAL memory.
  */
diff --git a/fs/cachefiles/interface.c b/fs/cachefiles/interface.c
index 1064805..67bef6d 100644
--- a/fs/cachefiles/interface.c
+++ b/fs/cachefiles/interface.c
@@ -11,7 +11,6 @@
 
 #include <linux/slab.h>
 #include <linux/mount.h>
-#include <linux/buffer_head.h>
 #include "internal.h"
 
 #define list_to_page(head) (list_entry((head)->prev, struct page, lru))
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 4144caf..173b1d2 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -87,7 +87,7 @@
 	snapc = ceph_get_snap_context(ci->i_snap_realm->cached_context);
 
 	/* dirty the head */
-	spin_lock(&inode->i_lock);
+	spin_lock(&ci->i_ceph_lock);
 	if (ci->i_head_snapc == NULL)
 		ci->i_head_snapc = ceph_get_snap_context(snapc);
 	++ci->i_wrbuffer_ref_head;
@@ -100,7 +100,7 @@
 	     ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref_head-1,
 	     ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
 	     snapc, snapc->seq, snapc->num_snaps);
-	spin_unlock(&inode->i_lock);
+	spin_unlock(&ci->i_ceph_lock);
 
 	/* now adjust page */
 	spin_lock_irq(&mapping->tree_lock);
@@ -391,7 +391,7 @@
 	struct ceph_snap_context *snapc = NULL;
 	struct ceph_cap_snap *capsnap = NULL;
 
-	spin_lock(&inode->i_lock);
+	spin_lock(&ci->i_ceph_lock);
 	list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
 		dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap,
 		     capsnap->context, capsnap->dirty_pages);
@@ -407,7 +407,7 @@
 		dout(" head snapc %p has %d dirty pages\n",
 		     snapc, ci->i_wrbuffer_ref_head);
 	}
-	spin_unlock(&inode->i_lock);
+	spin_unlock(&ci->i_ceph_lock);
 	return snapc;
 }
 
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 0f327c6..b60fc8bf 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -309,7 +309,7 @@
 /*
  * Find ceph_cap for given mds, if any.
  *
- * Called with i_lock held.
+ * Called with i_ceph_lock held.
  */
 static struct ceph_cap *__get_cap_for_mds(struct ceph_inode_info *ci, int mds)
 {
@@ -332,9 +332,9 @@
 {
 	struct ceph_cap *cap;
 
-	spin_lock(&ci->vfs_inode.i_lock);
+	spin_lock(&ci->i_ceph_lock);
 	cap = __get_cap_for_mds(ci, mds);
-	spin_unlock(&ci->vfs_inode.i_lock);
+	spin_unlock(&ci->i_ceph_lock);
 	return cap;
 }
 
@@ -361,15 +361,16 @@
 
 int ceph_get_cap_mds(struct inode *inode)
 {
+	struct ceph_inode_info *ci = ceph_inode(inode);
 	int mds;
-	spin_lock(&inode->i_lock);
+	spin_lock(&ci->i_ceph_lock);
 	mds = __ceph_get_cap_mds(ceph_inode(inode));
-	spin_unlock(&inode->i_lock);
+	spin_unlock(&ci->i_ceph_lock);
 	return mds;
 }
 
 /*
- * Called under i_lock.
+ * Called under i_ceph_lock.
  */
 static void __insert_cap_node(struct ceph_inode_info *ci,
 			      struct ceph_cap *new)
@@ -415,7 +416,7 @@
  *
  * If I_FLUSH is set, leave the inode at the front of the list.
  *
- * Caller holds i_lock
+ * Caller holds i_ceph_lock
  *    -> we take mdsc->cap_delay_lock
  */
 static void __cap_delay_requeue(struct ceph_mds_client *mdsc,
@@ -457,7 +458,7 @@
 /*
  * Cancel delayed work on cap.
  *
- * Caller must hold i_lock.
+ * Caller must hold i_ceph_lock.
  */
 static void __cap_delay_cancel(struct ceph_mds_client *mdsc,
 			       struct ceph_inode_info *ci)
@@ -532,14 +533,14 @@
 		wanted |= ceph_caps_for_mode(fmode);
 
 retry:
-	spin_lock(&inode->i_lock);
+	spin_lock(&ci->i_ceph_lock);
 	cap = __get_cap_for_mds(ci, mds);
 	if (!cap) {
 		if (new_cap) {
 			cap = new_cap;
 			new_cap = NULL;
 		} else {
-			spin_unlock(&inode->i_lock);
+			spin_unlock(&ci->i_ceph_lock);
 			new_cap = get_cap(mdsc, caps_reservation);
 			if (new_cap == NULL)
 				return -ENOMEM;
@@ -625,7 +626,7 @@
 
 	if (fmode >= 0)
 		__ceph_get_fmode(ci, fmode);
-	spin_unlock(&inode->i_lock);
+	spin_unlock(&ci->i_ceph_lock);
 	wake_up_all(&ci->i_cap_wq);
 	return 0;
 }
@@ -792,7 +793,7 @@
 	struct rb_node *p;
 	int ret = 0;
 
-	spin_lock(&inode->i_lock);
+	spin_lock(&ci->i_ceph_lock);
 	for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
 		cap = rb_entry(p, struct ceph_cap, ci_node);
 		if (__cap_is_valid(cap) &&
@@ -801,7 +802,7 @@
 			break;
 		}
 	}
-	spin_unlock(&inode->i_lock);
+	spin_unlock(&ci->i_ceph_lock);
 	dout("ceph_caps_revoking %p %s = %d\n", inode,
 	     ceph_cap_string(mask), ret);
 	return ret;
@@ -855,7 +856,7 @@
 }
 
 /*
- * called under i_lock
+ * called under i_ceph_lock
  */
 static int __ceph_is_any_caps(struct ceph_inode_info *ci)
 {
@@ -865,7 +866,7 @@
 /*
  * Remove a cap.  Take steps to deal with a racing iterate_session_caps.
  *
- * caller should hold i_lock.
+ * caller should hold i_ceph_lock.
  * caller will not hold session s_mutex if called from destroy_inode.
  */
 void __ceph_remove_cap(struct ceph_cap *cap)
@@ -927,7 +928,7 @@
 			u64 size, u64 max_size,
 			struct timespec *mtime, struct timespec *atime,
 			u64 time_warp_seq,
-			uid_t uid, gid_t gid, mode_t mode,
+			uid_t uid, gid_t gid, umode_t mode,
 			u64 xattr_version,
 			struct ceph_buffer *xattrs_buf,
 			u64 follows)
@@ -1028,7 +1029,7 @@
 
 /*
  * Queue cap releases when an inode is dropped from our cache.  Since
- * inode is about to be destroyed, there is no need for i_lock.
+ * inode is about to be destroyed, there is no need for i_ceph_lock.
  */
 void ceph_queue_caps_release(struct inode *inode)
 {
@@ -1049,7 +1050,7 @@
 
 /*
  * Send a cap msg on the given inode.  Update our caps state, then
- * drop i_lock and send the message.
+ * drop i_ceph_lock and send the message.
  *
  * Make note of max_size reported/requested from mds, revoked caps
  * that have now been implemented.
@@ -1061,13 +1062,13 @@
  * Return non-zero if delayed release, or we experienced an error
  * such that the caller should requeue + retry later.
  *
- * called with i_lock, then drops it.
+ * called with i_ceph_lock, then drops it.
  * caller should hold snap_rwsem (read), s_mutex.
  */
 static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
 		      int op, int used, int want, int retain, int flushing,
 		      unsigned *pflush_tid)
-	__releases(cap->ci->vfs_inode->i_lock)
+	__releases(cap->ci->i_ceph_lock)
 {
 	struct ceph_inode_info *ci = cap->ci;
 	struct inode *inode = &ci->vfs_inode;
@@ -1077,7 +1078,7 @@
 	u64 size, max_size;
 	struct timespec mtime, atime;
 	int wake = 0;
-	mode_t mode;
+	umode_t mode;
 	uid_t uid;
 	gid_t gid;
 	struct ceph_mds_session *session;
@@ -1170,7 +1171,7 @@
 		xattr_version = ci->i_xattrs.version;
 	}
 
-	spin_unlock(&inode->i_lock);
+	spin_unlock(&ci->i_ceph_lock);
 
 	ret = send_cap_msg(session, ceph_vino(inode).ino, cap_id,
 		op, keep, want, flushing, seq, flush_tid, issue_seq, mseq,
@@ -1198,13 +1199,13 @@
  * Unless @again is true, skip cap_snaps that were already sent to
  * the MDS (i.e., during this session).
  *
- * Called under i_lock.  Takes s_mutex as needed.
+ * Called under i_ceph_lock.  Takes s_mutex as needed.
  */
 void __ceph_flush_snaps(struct ceph_inode_info *ci,
 			struct ceph_mds_session **psession,
 			int again)
-		__releases(ci->vfs_inode->i_lock)
-		__acquires(ci->vfs_inode->i_lock)
+		__releases(ci->i_ceph_lock)
+		__acquires(ci->i_ceph_lock)
 {
 	struct inode *inode = &ci->vfs_inode;
 	int mds;
@@ -1261,7 +1262,7 @@
 			session = NULL;
 		}
 		if (!session) {
-			spin_unlock(&inode->i_lock);
+			spin_unlock(&ci->i_ceph_lock);
 			mutex_lock(&mdsc->mutex);
 			session = __ceph_lookup_mds_session(mdsc, mds);
 			mutex_unlock(&mdsc->mutex);
@@ -1275,7 +1276,7 @@
 			 * deletion or migration.  retry, and we'll
 			 * get a better @mds value next time.
 			 */
-			spin_lock(&inode->i_lock);
+			spin_lock(&ci->i_ceph_lock);
 			goto retry;
 		}
 
@@ -1285,7 +1286,7 @@
 			list_del_init(&capsnap->flushing_item);
 		list_add_tail(&capsnap->flushing_item,
 			      &session->s_cap_snaps_flushing);
-		spin_unlock(&inode->i_lock);
+		spin_unlock(&ci->i_ceph_lock);
 
 		dout("flush_snaps %p cap_snap %p follows %lld tid %llu\n",
 		     inode, capsnap, capsnap->follows, capsnap->flush_tid);
@@ -1302,7 +1303,7 @@
 		next_follows = capsnap->follows + 1;
 		ceph_put_cap_snap(capsnap);
 
-		spin_lock(&inode->i_lock);
+		spin_lock(&ci->i_ceph_lock);
 		goto retry;
 	}
 
@@ -1322,11 +1323,9 @@
 
 static void ceph_flush_snaps(struct ceph_inode_info *ci)
 {
-	struct inode *inode = &ci->vfs_inode;
-
-	spin_lock(&inode->i_lock);
+	spin_lock(&ci->i_ceph_lock);
 	__ceph_flush_snaps(ci, NULL, 0);
-	spin_unlock(&inode->i_lock);
+	spin_unlock(&ci->i_ceph_lock);
 }
 
 /*
@@ -1373,7 +1372,7 @@
  * Add dirty inode to the flushing list.  Assigned a seq number so we
  * can wait for caps to flush without starving.
  *
- * Called under i_lock.
+ * Called under i_ceph_lock.
  */
 static int __mark_caps_flushing(struct inode *inode,
 				 struct ceph_mds_session *session)
@@ -1421,9 +1420,9 @@
 	struct ceph_inode_info *ci = ceph_inode(inode);
 	u32 invalidating_gen = ci->i_rdcache_gen;
 
-	spin_unlock(&inode->i_lock);
+	spin_unlock(&ci->i_ceph_lock);
 	invalidate_mapping_pages(&inode->i_data, 0, -1);
-	spin_lock(&inode->i_lock);
+	spin_lock(&ci->i_ceph_lock);
 
 	if (inode->i_data.nrpages == 0 &&
 	    invalidating_gen == ci->i_rdcache_gen) {
@@ -1470,7 +1469,7 @@
 	if (mdsc->stopping)
 		is_delayed = 1;
 
-	spin_lock(&inode->i_lock);
+	spin_lock(&ci->i_ceph_lock);
 
 	if (ci->i_ceph_flags & CEPH_I_FLUSH)
 		flags |= CHECK_CAPS_FLUSH;
@@ -1480,7 +1479,7 @@
 		__ceph_flush_snaps(ci, &session, 0);
 	goto retry_locked;
 retry:
-	spin_lock(&inode->i_lock);
+	spin_lock(&ci->i_ceph_lock);
 retry_locked:
 	file_wanted = __ceph_caps_file_wanted(ci);
 	used = __ceph_caps_used(ci);
@@ -1634,7 +1633,7 @@
 			if (mutex_trylock(&session->s_mutex) == 0) {
 				dout("inverting session/ino locks on %p\n",
 				     session);
-				spin_unlock(&inode->i_lock);
+				spin_unlock(&ci->i_ceph_lock);
 				if (took_snap_rwsem) {
 					up_read(&mdsc->snap_rwsem);
 					took_snap_rwsem = 0;
@@ -1648,7 +1647,7 @@
 			if (down_read_trylock(&mdsc->snap_rwsem) == 0) {
 				dout("inverting snap/in locks on %p\n",
 				     inode);
-				spin_unlock(&inode->i_lock);
+				spin_unlock(&ci->i_ceph_lock);
 				down_read(&mdsc->snap_rwsem);
 				took_snap_rwsem = 1;
 				goto retry;
@@ -1664,10 +1663,10 @@
 		mds = cap->mds;  /* remember mds, so we don't repeat */
 		sent++;
 
-		/* __send_cap drops i_lock */
+		/* __send_cap drops i_ceph_lock */
 		delayed += __send_cap(mdsc, cap, CEPH_CAP_OP_UPDATE, used, want,
 				      retain, flushing, NULL);
-		goto retry; /* retake i_lock and restart our cap scan. */
+		goto retry; /* retake i_ceph_lock and restart our cap scan. */
 	}
 
 	/*
@@ -1681,7 +1680,7 @@
 	else if (!is_delayed || force_requeue)
 		__cap_delay_requeue(mdsc, ci);
 
-	spin_unlock(&inode->i_lock);
+	spin_unlock(&ci->i_ceph_lock);
 
 	if (queue_invalidate)
 		ceph_queue_invalidate(inode);
@@ -1704,7 +1703,7 @@
 	int flushing = 0;
 
 retry:
-	spin_lock(&inode->i_lock);
+	spin_lock(&ci->i_ceph_lock);
 	if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
 		dout("try_flush_caps skipping %p I_NOFLUSH set\n", inode);
 		goto out;
@@ -1716,7 +1715,7 @@
 		int delayed;
 
 		if (!session) {
-			spin_unlock(&inode->i_lock);
+			spin_unlock(&ci->i_ceph_lock);
 			session = cap->session;
 			mutex_lock(&session->s_mutex);
 			goto retry;
@@ -1727,18 +1726,18 @@
 
 		flushing = __mark_caps_flushing(inode, session);
 
-		/* __send_cap drops i_lock */
+		/* __send_cap drops i_ceph_lock */
 		delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH, used, want,
 				     cap->issued | cap->implemented, flushing,
 				     flush_tid);
 		if (!delayed)
 			goto out_unlocked;
 
-		spin_lock(&inode->i_lock);
+		spin_lock(&ci->i_ceph_lock);
 		__cap_delay_requeue(mdsc, ci);
 	}
 out:
-	spin_unlock(&inode->i_lock);
+	spin_unlock(&ci->i_ceph_lock);
 out_unlocked:
 	if (session && unlock_session)
 		mutex_unlock(&session->s_mutex);
@@ -1753,7 +1752,7 @@
 	struct ceph_inode_info *ci = ceph_inode(inode);
 	int i, ret = 1;
 
-	spin_lock(&inode->i_lock);
+	spin_lock(&ci->i_ceph_lock);
 	for (i = 0; i < CEPH_CAP_BITS; i++)
 		if ((ci->i_flushing_caps & (1 << i)) &&
 		    ci->i_cap_flush_tid[i] <= tid) {
@@ -1761,7 +1760,7 @@
 			ret = 0;
 			break;
 		}
-	spin_unlock(&inode->i_lock);
+	spin_unlock(&ci->i_ceph_lock);
 	return ret;
 }
 
@@ -1868,10 +1867,10 @@
 		struct ceph_mds_client *mdsc =
 			ceph_sb_to_client(inode->i_sb)->mdsc;
 
-		spin_lock(&inode->i_lock);
+		spin_lock(&ci->i_ceph_lock);
 		if (__ceph_caps_dirty(ci))
 			__cap_delay_requeue_front(mdsc, ci);
-		spin_unlock(&inode->i_lock);
+		spin_unlock(&ci->i_ceph_lock);
 	}
 	return err;
 }
@@ -1894,7 +1893,7 @@
 		struct inode *inode = &ci->vfs_inode;
 		struct ceph_cap *cap;
 
-		spin_lock(&inode->i_lock);
+		spin_lock(&ci->i_ceph_lock);
 		cap = ci->i_auth_cap;
 		if (cap && cap->session == session) {
 			dout("kick_flushing_caps %p cap %p capsnap %p\n", inode,
@@ -1904,7 +1903,7 @@
 			pr_err("%p auth cap %p not mds%d ???\n", inode,
 			       cap, session->s_mds);
 		}
-		spin_unlock(&inode->i_lock);
+		spin_unlock(&ci->i_ceph_lock);
 	}
 }
 
@@ -1921,7 +1920,7 @@
 		struct ceph_cap *cap;
 		int delayed = 0;
 
-		spin_lock(&inode->i_lock);
+		spin_lock(&ci->i_ceph_lock);
 		cap = ci->i_auth_cap;
 		if (cap && cap->session == session) {
 			dout("kick_flushing_caps %p cap %p %s\n", inode,
@@ -1932,14 +1931,14 @@
 					     cap->issued | cap->implemented,
 					     ci->i_flushing_caps, NULL);
 			if (delayed) {
-				spin_lock(&inode->i_lock);
+				spin_lock(&ci->i_ceph_lock);
 				__cap_delay_requeue(mdsc, ci);
-				spin_unlock(&inode->i_lock);
+				spin_unlock(&ci->i_ceph_lock);
 			}
 		} else {
 			pr_err("%p auth cap %p not mds%d ???\n", inode,
 			       cap, session->s_mds);
-			spin_unlock(&inode->i_lock);
+			spin_unlock(&ci->i_ceph_lock);
 		}
 	}
 }
@@ -1952,7 +1951,7 @@
 	struct ceph_cap *cap;
 	int delayed = 0;
 
-	spin_lock(&inode->i_lock);
+	spin_lock(&ci->i_ceph_lock);
 	cap = ci->i_auth_cap;
 	dout("kick_flushing_inode_caps %p flushing %s flush_seq %lld\n", inode,
 	     ceph_cap_string(ci->i_flushing_caps), ci->i_cap_flush_seq);
@@ -1964,12 +1963,12 @@
 				     cap->issued | cap->implemented,
 				     ci->i_flushing_caps, NULL);
 		if (delayed) {
-			spin_lock(&inode->i_lock);
+			spin_lock(&ci->i_ceph_lock);
 			__cap_delay_requeue(mdsc, ci);
-			spin_unlock(&inode->i_lock);
+			spin_unlock(&ci->i_ceph_lock);
 		}
 	} else {
-		spin_unlock(&inode->i_lock);
+		spin_unlock(&ci->i_ceph_lock);
 	}
 }
 
@@ -1978,7 +1977,7 @@
  * Take references to capabilities we hold, so that we don't release
  * them to the MDS prematurely.
  *
- * Protected by i_lock.
+ * Protected by i_ceph_lock.
  */
 static void __take_cap_refs(struct ceph_inode_info *ci, int got)
 {
@@ -2016,7 +2015,7 @@
 
 	dout("get_cap_refs %p need %s want %s\n", inode,
 	     ceph_cap_string(need), ceph_cap_string(want));
-	spin_lock(&inode->i_lock);
+	spin_lock(&ci->i_ceph_lock);
 
 	/* make sure file is actually open */
 	file_wanted = __ceph_caps_file_wanted(ci);
@@ -2077,7 +2076,7 @@
 		     ceph_cap_string(have), ceph_cap_string(need));
 	}
 out:
-	spin_unlock(&inode->i_lock);
+	spin_unlock(&ci->i_ceph_lock);
 	dout("get_cap_refs %p ret %d got %s\n", inode,
 	     ret, ceph_cap_string(*got));
 	return ret;
@@ -2094,7 +2093,7 @@
 	int check = 0;
 
 	/* do we need to explicitly request a larger max_size? */
-	spin_lock(&inode->i_lock);
+	spin_lock(&ci->i_ceph_lock);
 	if ((endoff >= ci->i_max_size ||
 	     endoff > (inode->i_size << 1)) &&
 	    endoff > ci->i_wanted_max_size) {
@@ -2103,7 +2102,7 @@
 		ci->i_wanted_max_size = endoff;
 		check = 1;
 	}
-	spin_unlock(&inode->i_lock);
+	spin_unlock(&ci->i_ceph_lock);
 	if (check)
 		ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
 }
@@ -2140,9 +2139,9 @@
  */
 void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps)
 {
-	spin_lock(&ci->vfs_inode.i_lock);
+	spin_lock(&ci->i_ceph_lock);
 	__take_cap_refs(ci, caps);
-	spin_unlock(&ci->vfs_inode.i_lock);
+	spin_unlock(&ci->i_ceph_lock);
 }
 
 /*
@@ -2160,7 +2159,7 @@
 	int last = 0, put = 0, flushsnaps = 0, wake = 0;
 	struct ceph_cap_snap *capsnap;
 
-	spin_lock(&inode->i_lock);
+	spin_lock(&ci->i_ceph_lock);
 	if (had & CEPH_CAP_PIN)
 		--ci->i_pin_ref;
 	if (had & CEPH_CAP_FILE_RD)
@@ -2193,7 +2192,7 @@
 				}
 			}
 		}
-	spin_unlock(&inode->i_lock);
+	spin_unlock(&ci->i_ceph_lock);
 
 	dout("put_cap_refs %p had %s%s%s\n", inode, ceph_cap_string(had),
 	     last ? " last" : "", put ? " put" : "");
@@ -2225,7 +2224,7 @@
 	int found = 0;
 	struct ceph_cap_snap *capsnap = NULL;
 
-	spin_lock(&inode->i_lock);
+	spin_lock(&ci->i_ceph_lock);
 	ci->i_wrbuffer_ref -= nr;
 	last = !ci->i_wrbuffer_ref;
 
@@ -2274,7 +2273,7 @@
 		}
 	}
 
-	spin_unlock(&inode->i_lock);
+	spin_unlock(&ci->i_ceph_lock);
 
 	if (last) {
 		ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
@@ -2291,7 +2290,7 @@
  * Handle a cap GRANT message from the MDS.  (Note that a GRANT may
  * actually be a revocation if it specifies a smaller cap set.)
  *
- * caller holds s_mutex and i_lock, we drop both.
+ * caller holds s_mutex and i_ceph_lock, we drop both.
  *
  * return value:
  *  0 - ok
@@ -2302,7 +2301,7 @@
 			     struct ceph_mds_session *session,
 			     struct ceph_cap *cap,
 			     struct ceph_buffer *xattr_buf)
-		__releases(inode->i_lock)
+		__releases(ci->i_ceph_lock)
 {
 	struct ceph_inode_info *ci = ceph_inode(inode);
 	int mds = session->s_mds;
@@ -2453,7 +2452,7 @@
 	}
 	BUG_ON(cap->issued & ~cap->implemented);
 
-	spin_unlock(&inode->i_lock);
+	spin_unlock(&ci->i_ceph_lock);
 	if (writeback)
 		/*
 		 * queue inode for writeback: we can't actually call
@@ -2483,7 +2482,7 @@
 				 struct ceph_mds_caps *m,
 				 struct ceph_mds_session *session,
 				 struct ceph_cap *cap)
-	__releases(inode->i_lock)
+	__releases(ci->i_ceph_lock)
 {
 	struct ceph_inode_info *ci = ceph_inode(inode);
 	struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
@@ -2539,7 +2538,7 @@
 	wake_up_all(&ci->i_cap_wq);
 
 out:
-	spin_unlock(&inode->i_lock);
+	spin_unlock(&ci->i_ceph_lock);
 	if (drop)
 		iput(inode);
 }
@@ -2562,7 +2561,7 @@
 	dout("handle_cap_flushsnap_ack inode %p ci %p mds%d follows %lld\n",
 	     inode, ci, session->s_mds, follows);
 
-	spin_lock(&inode->i_lock);
+	spin_lock(&ci->i_ceph_lock);
 	list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
 		if (capsnap->follows == follows) {
 			if (capsnap->flush_tid != flush_tid) {
@@ -2585,7 +2584,7 @@
 			     capsnap, capsnap->follows);
 		}
 	}
-	spin_unlock(&inode->i_lock);
+	spin_unlock(&ci->i_ceph_lock);
 	if (drop)
 		iput(inode);
 }
@@ -2598,7 +2597,7 @@
 static void handle_cap_trunc(struct inode *inode,
 			     struct ceph_mds_caps *trunc,
 			     struct ceph_mds_session *session)
-	__releases(inode->i_lock)
+	__releases(ci->i_ceph_lock)
 {
 	struct ceph_inode_info *ci = ceph_inode(inode);
 	int mds = session->s_mds;
@@ -2617,7 +2616,7 @@
 	     inode, mds, seq, truncate_size, truncate_seq);
 	queue_trunc = ceph_fill_file_size(inode, issued,
 					  truncate_seq, truncate_size, size);
-	spin_unlock(&inode->i_lock);
+	spin_unlock(&ci->i_ceph_lock);
 
 	if (queue_trunc)
 		ceph_queue_vmtruncate(inode);
@@ -2646,7 +2645,7 @@
 	dout("handle_cap_export inode %p ci %p mds%d mseq %d\n",
 	     inode, ci, mds, mseq);
 
-	spin_lock(&inode->i_lock);
+	spin_lock(&ci->i_ceph_lock);
 
 	/* make sure we haven't seen a higher mseq */
 	for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
@@ -2690,7 +2689,7 @@
 	}
 	/* else, we already released it */
 
-	spin_unlock(&inode->i_lock);
+	spin_unlock(&ci->i_ceph_lock);
 }
 
 /*
@@ -2745,9 +2744,9 @@
 	up_read(&mdsc->snap_rwsem);
 
 	/* make sure we re-request max_size, if necessary */
-	spin_lock(&inode->i_lock);
+	spin_lock(&ci->i_ceph_lock);
 	ci->i_requested_max_size = 0;
-	spin_unlock(&inode->i_lock);
+	spin_unlock(&ci->i_ceph_lock);
 }
 
 /*
@@ -2762,6 +2761,7 @@
 	struct ceph_mds_client *mdsc = session->s_mdsc;
 	struct super_block *sb = mdsc->fsc->sb;
 	struct inode *inode;
+	struct ceph_inode_info *ci;
 	struct ceph_cap *cap;
 	struct ceph_mds_caps *h;
 	int mds = session->s_mds;
@@ -2815,6 +2815,7 @@
 
 	/* lookup ino */
 	inode = ceph_find_inode(sb, vino);
+	ci = ceph_inode(inode);
 	dout(" op %s ino %llx.%llx inode %p\n", ceph_cap_op_name(op), vino.ino,
 	     vino.snap, inode);
 	if (!inode) {
@@ -2844,16 +2845,16 @@
 	}
 
 	/* the rest require a cap */
-	spin_lock(&inode->i_lock);
+	spin_lock(&ci->i_ceph_lock);
 	cap = __get_cap_for_mds(ceph_inode(inode), mds);
 	if (!cap) {
 		dout(" no cap on %p ino %llx.%llx from mds%d\n",
 		     inode, ceph_ino(inode), ceph_snap(inode), mds);
-		spin_unlock(&inode->i_lock);
+		spin_unlock(&ci->i_ceph_lock);
 		goto flush_cap_releases;
 	}
 
-	/* note that each of these drops i_lock for us */
+	/* note that each of these drops i_ceph_lock for us */
 	switch (op) {
 	case CEPH_CAP_OP_REVOKE:
 	case CEPH_CAP_OP_GRANT:
@@ -2869,7 +2870,7 @@
 		break;
 
 	default:
-		spin_unlock(&inode->i_lock);
+		spin_unlock(&ci->i_ceph_lock);
 		pr_err("ceph_handle_caps: unknown cap op %d %s\n", op,
 		       ceph_cap_op_name(op));
 	}
@@ -2962,13 +2963,13 @@
 	struct inode *inode = &ci->vfs_inode;
 	int last = 0;
 
-	spin_lock(&inode->i_lock);
+	spin_lock(&ci->i_ceph_lock);
 	dout("put_fmode %p fmode %d %d -> %d\n", inode, fmode,
 	     ci->i_nr_by_mode[fmode], ci->i_nr_by_mode[fmode]-1);
 	BUG_ON(ci->i_nr_by_mode[fmode] == 0);
 	if (--ci->i_nr_by_mode[fmode] == 0)
 		last++;
-	spin_unlock(&inode->i_lock);
+	spin_unlock(&ci->i_ceph_lock);
 
 	if (last && ci->i_vino.snap == CEPH_NOSNAP)
 		ceph_check_caps(ci, 0, NULL);
@@ -2991,7 +2992,7 @@
 	int used, dirty;
 	int ret = 0;
 
-	spin_lock(&inode->i_lock);
+	spin_lock(&ci->i_ceph_lock);
 	used = __ceph_caps_used(ci);
 	dirty = __ceph_caps_dirty(ci);
 
@@ -3046,7 +3047,7 @@
 			     inode, cap, ceph_cap_string(cap->issued));
 		}
 	}
-	spin_unlock(&inode->i_lock);
+	spin_unlock(&ci->i_ceph_lock);
 	return ret;
 }
 
@@ -3061,7 +3062,7 @@
 
 	/*
 	 * force an record for the directory caps if we have a dentry lease.
-	 * this is racy (can't take i_lock and d_lock together), but it
+	 * this is racy (can't take i_ceph_lock and d_lock together), but it
 	 * doesn't have to be perfect; the mds will revoke anything we don't
 	 * release.
 	 */
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index bca3948..74fd747 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -281,18 +281,18 @@
 	}
 
 	/* can we use the dcache? */
-	spin_lock(&inode->i_lock);
+	spin_lock(&ci->i_ceph_lock);
 	if ((filp->f_pos == 2 || fi->dentry) &&
 	    !ceph_test_mount_opt(fsc, NOASYNCREADDIR) &&
 	    ceph_snap(inode) != CEPH_SNAPDIR &&
 	    ceph_dir_test_complete(inode) &&
 	    __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) {
-		spin_unlock(&inode->i_lock);
+		spin_unlock(&ci->i_ceph_lock);
 		err = __dcache_readdir(filp, dirent, filldir);
 		if (err != -EAGAIN)
 			return err;
 	} else {
-		spin_unlock(&inode->i_lock);
+		spin_unlock(&ci->i_ceph_lock);
 	}
 	if (fi->dentry) {
 		err = note_last_dentry(fi, fi->dentry->d_name.name,
@@ -428,12 +428,12 @@
 	 * were released during the whole readdir, and we should have
 	 * the complete dir contents in our cache.
 	 */
-	spin_lock(&inode->i_lock);
+	spin_lock(&ci->i_ceph_lock);
 	if (ci->i_release_count == fi->dir_release_count) {
 		ceph_dir_set_complete(inode);
 		ci->i_max_offset = filp->f_pos;
 	}
-	spin_unlock(&inode->i_lock);
+	spin_unlock(&ci->i_ceph_lock);
 
 	dout("readdir %p filp %p done.\n", inode, filp);
 	return 0;
@@ -607,7 +607,7 @@
 		struct ceph_inode_info *ci = ceph_inode(dir);
 		struct ceph_dentry_info *di = ceph_dentry(dentry);
 
-		spin_lock(&dir->i_lock);
+		spin_lock(&ci->i_ceph_lock);
 		dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags);
 		if (strncmp(dentry->d_name.name,
 			    fsc->mount_options->snapdir_name,
@@ -615,13 +615,13 @@
 		    !is_root_ceph_dentry(dir, dentry) &&
 		    ceph_dir_test_complete(dir) &&
 		    (__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) {
-			spin_unlock(&dir->i_lock);
+			spin_unlock(&ci->i_ceph_lock);
 			dout(" dir %p complete, -ENOENT\n", dir);
 			d_add(dentry, NULL);
 			di->lease_shared_gen = ci->i_shared_gen;
 			return NULL;
 		}
-		spin_unlock(&dir->i_lock);
+		spin_unlock(&ci->i_ceph_lock);
 	}
 
 	op = ceph_snap(dir) == CEPH_SNAPDIR ?
@@ -666,7 +666,7 @@
 }
 
 static int ceph_mknod(struct inode *dir, struct dentry *dentry,
-		      int mode, dev_t rdev)
+		      umode_t mode, dev_t rdev)
 {
 	struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
 	struct ceph_mds_client *mdsc = fsc->mdsc;
@@ -676,7 +676,7 @@
 	if (ceph_snap(dir) != CEPH_NOSNAP)
 		return -EROFS;
 
-	dout("mknod in dir %p dentry %p mode 0%o rdev %d\n",
+	dout("mknod in dir %p dentry %p mode 0%ho rdev %d\n",
 	     dir, dentry, mode, rdev);
 	req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_MKNOD, USE_AUTH_MDS);
 	if (IS_ERR(req)) {
@@ -699,7 +699,7 @@
 	return err;
 }
 
-static int ceph_create(struct inode *dir, struct dentry *dentry, int mode,
+static int ceph_create(struct inode *dir, struct dentry *dentry, umode_t mode,
 		       struct nameidata *nd)
 {
 	dout("create in dir %p dentry %p name '%.*s'\n",
@@ -753,7 +753,7 @@
 	return err;
 }
 
-static int ceph_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+static int ceph_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 {
 	struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
 	struct ceph_mds_client *mdsc = fsc->mdsc;
@@ -767,7 +767,7 @@
 		dout("mksnap dir %p snap '%.*s' dn %p\n", dir,
 		     dentry->d_name.len, dentry->d_name.name, dentry);
 	} else if (ceph_snap(dir) == CEPH_NOSNAP) {
-		dout("mkdir dir %p dn %p mode 0%o\n", dir, dentry, mode);
+		dout("mkdir dir %p dn %p mode 0%ho\n", dir, dentry, mode);
 		op = CEPH_MDS_OP_MKDIR;
 	} else {
 		goto out;
@@ -841,12 +841,12 @@
 	struct ceph_inode_info *ci = ceph_inode(inode);
 	int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL;
 
-	spin_lock(&inode->i_lock);
+	spin_lock(&ci->i_ceph_lock);
 	if (inode->i_nlink == 1) {
 		drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN);
 		ci->i_ceph_flags |= CEPH_I_NODELAY;
 	}
-	spin_unlock(&inode->i_lock);
+	spin_unlock(&ci->i_ceph_lock);
 	return drop;
 }
 
@@ -870,7 +870,7 @@
 	} else if (ceph_snap(dir) == CEPH_NOSNAP) {
 		dout("unlink/rmdir dir %p dn %p inode %p\n",
 		     dir, dentry, inode);
-		op = ((dentry->d_inode->i_mode & S_IFMT) == S_IFDIR) ?
+		op = S_ISDIR(dentry->d_inode->i_mode) ?
 			CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK;
 	} else
 		goto out;
@@ -1015,10 +1015,10 @@
 	struct ceph_dentry_info *di = ceph_dentry(dentry);
 	int valid = 0;
 
-	spin_lock(&dir->i_lock);
+	spin_lock(&ci->i_ceph_lock);
 	if (ci->i_shared_gen == di->lease_shared_gen)
 		valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1);
-	spin_unlock(&dir->i_lock);
+	spin_unlock(&ci->i_ceph_lock);
 	dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n",
 	     dir, (unsigned)ci->i_shared_gen, dentry,
 	     (unsigned)di->lease_shared_gen, valid);
@@ -1094,42 +1094,19 @@
 /*
  * Set/clear/test dir complete flag on the dir's dentry.
  */
-static struct dentry * __d_find_any_alias(struct inode *inode)
-{
-	struct dentry *alias;
-
-	if (list_empty(&inode->i_dentry))
-		return NULL;
-	alias = list_first_entry(&inode->i_dentry, struct dentry, d_alias);
-	return alias;
-}
-
 void ceph_dir_set_complete(struct inode *inode)
 {
-	struct dentry *dentry = __d_find_any_alias(inode);
-	
-	if (dentry && ceph_dentry(dentry)) {
-		dout(" marking %p (%p) complete\n", inode, dentry);
-		set_bit(CEPH_D_COMPLETE, &ceph_dentry(dentry)->flags);
-	}
+	/* not yet implemented */
 }
 
 void ceph_dir_clear_complete(struct inode *inode)
 {
-	struct dentry *dentry = __d_find_any_alias(inode);
-
-	if (dentry && ceph_dentry(dentry)) {
-		dout(" marking %p (%p) NOT complete\n", inode, dentry);
-		clear_bit(CEPH_D_COMPLETE, &ceph_dentry(dentry)->flags);
-	}
+	/* not yet implemented */
 }
 
 bool ceph_dir_test_complete(struct inode *inode)
 {
-	struct dentry *dentry = __d_find_any_alias(inode);
-
-	if (dentry && ceph_dentry(dentry))
-		return test_bit(CEPH_D_COMPLETE, &ceph_dentry(dentry)->flags);
+	/* not yet implemented */
 	return false;
 }
 
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index ce549d3..ed72428 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -147,9 +147,9 @@
 
 	/* trivially open snapdir */
 	if (ceph_snap(inode) == CEPH_SNAPDIR) {
-		spin_lock(&inode->i_lock);
+		spin_lock(&ci->i_ceph_lock);
 		__ceph_get_fmode(ci, fmode);
-		spin_unlock(&inode->i_lock);
+		spin_unlock(&ci->i_ceph_lock);
 		return ceph_init_file(inode, file, fmode);
 	}
 
@@ -158,7 +158,7 @@
 	 * write) or any MDS (for read).  Update wanted set
 	 * asynchronously.
 	 */
-	spin_lock(&inode->i_lock);
+	spin_lock(&ci->i_ceph_lock);
 	if (__ceph_is_any_real_caps(ci) &&
 	    (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
 		int mds_wanted = __ceph_caps_mds_wanted(ci);
@@ -168,7 +168,7 @@
 		     inode, fmode, ceph_cap_string(wanted),
 		     ceph_cap_string(issued));
 		__ceph_get_fmode(ci, fmode);
-		spin_unlock(&inode->i_lock);
+		spin_unlock(&ci->i_ceph_lock);
 
 		/* adjust wanted? */
 		if ((issued & wanted) != wanted &&
@@ -180,10 +180,10 @@
 	} else if (ceph_snap(inode) != CEPH_NOSNAP &&
 		   (ci->i_snap_caps & wanted) == wanted) {
 		__ceph_get_fmode(ci, fmode);
-		spin_unlock(&inode->i_lock);
+		spin_unlock(&ci->i_ceph_lock);
 		return ceph_init_file(inode, file, fmode);
 	}
-	spin_unlock(&inode->i_lock);
+	spin_unlock(&ci->i_ceph_lock);
 
 	dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
 	req = prepare_open_request(inode->i_sb, flags, 0);
@@ -743,9 +743,9 @@
 		 */
 		int dirty;
 
-		spin_lock(&inode->i_lock);
+		spin_lock(&ci->i_ceph_lock);
 		dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR);
-		spin_unlock(&inode->i_lock);
+		spin_unlock(&ci->i_ceph_lock);
 		ceph_put_cap_refs(ci, got);
 
 		ret = generic_file_aio_write(iocb, iov, nr_segs, pos);
@@ -764,9 +764,9 @@
 
 	if (ret >= 0) {
 		int dirty;
-		spin_lock(&inode->i_lock);
+		spin_lock(&ci->i_ceph_lock);
 		dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR);
-		spin_unlock(&inode->i_lock);
+		spin_unlock(&ci->i_ceph_lock);
 		if (dirty)
 			__mark_inode_dirty(inode, dirty);
 	}
@@ -797,7 +797,8 @@
 
 	mutex_lock(&inode->i_mutex);
 	__ceph_do_pending_vmtruncate(inode);
-	if (origin != SEEK_CUR || origin != SEEK_SET) {
+
+	if (origin == SEEK_END || origin == SEEK_DATA || origin == SEEK_HOLE) {
 		ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE);
 		if (ret < 0) {
 			offset = ret;
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 116f365..25283e7 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -297,6 +297,8 @@
 
 	dout("alloc_inode %p\n", &ci->vfs_inode);
 
+	spin_lock_init(&ci->i_ceph_lock);
+
 	ci->i_version = 0;
 	ci->i_time_warp_seq = 0;
 	ci->i_ceph_flags = 0;
@@ -382,7 +384,6 @@
 	struct inode *inode = container_of(head, struct inode, i_rcu);
 	struct ceph_inode_info *ci = ceph_inode(inode);
 
-	INIT_LIST_HEAD(&inode->i_dentry);
 	kmem_cache_free(ceph_inode_cachep, ci);
 }
 
@@ -583,7 +584,7 @@
 			       iinfo->xattr_len);
 	}
 
-	spin_lock(&inode->i_lock);
+	spin_lock(&ci->i_ceph_lock);
 
 	/*
 	 * provided version will be odd if inode value is projected,
@@ -680,7 +681,7 @@
 			char *sym;
 
 			BUG_ON(symlen != inode->i_size);
-			spin_unlock(&inode->i_lock);
+			spin_unlock(&ci->i_ceph_lock);
 
 			err = -ENOMEM;
 			sym = kmalloc(symlen+1, GFP_NOFS);
@@ -689,7 +690,7 @@
 			memcpy(sym, iinfo->symlink, symlen);
 			sym[symlen] = 0;
 
-			spin_lock(&inode->i_lock);
+			spin_lock(&ci->i_ceph_lock);
 			if (!ci->i_symlink)
 				ci->i_symlink = sym;
 			else
@@ -715,7 +716,7 @@
 	}
 
 no_change:
-	spin_unlock(&inode->i_lock);
+	spin_unlock(&ci->i_ceph_lock);
 
 	/* queue truncate if we saw i_size decrease */
 	if (queue_trunc)
@@ -750,13 +751,13 @@
 				     info->cap.flags,
 				     caps_reservation);
 		} else {
-			spin_lock(&inode->i_lock);
+			spin_lock(&ci->i_ceph_lock);
 			dout(" %p got snap_caps %s\n", inode,
 			     ceph_cap_string(le32_to_cpu(info->cap.caps)));
 			ci->i_snap_caps |= le32_to_cpu(info->cap.caps);
 			if (cap_fmode >= 0)
 				__ceph_get_fmode(ci, cap_fmode);
-			spin_unlock(&inode->i_lock);
+			spin_unlock(&ci->i_ceph_lock);
 		}
 	} else if (cap_fmode >= 0) {
 		pr_warning("mds issued no caps on %llx.%llx\n",
@@ -849,19 +850,20 @@
 {
 	struct dentry *dir = dn->d_parent;
 	struct inode *inode = dir->d_inode;
+	struct ceph_inode_info *ci = ceph_inode(inode);
 	struct ceph_dentry_info *di;
 
 	BUG_ON(!inode);
 
 	di = ceph_dentry(dn);
 
-	spin_lock(&inode->i_lock);
+	spin_lock(&ci->i_ceph_lock);
 	if (!ceph_dir_test_complete(inode)) {
-		spin_unlock(&inode->i_lock);
+		spin_unlock(&ci->i_ceph_lock);
 		return;
 	}
 	di->offset = ceph_inode(inode)->i_max_offset++;
-	spin_unlock(&inode->i_lock);
+	spin_unlock(&ci->i_ceph_lock);
 
 	spin_lock(&dir->d_lock);
 	spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
@@ -1308,7 +1310,7 @@
 	struct ceph_inode_info *ci = ceph_inode(inode);
 	int ret = 0;
 
-	spin_lock(&inode->i_lock);
+	spin_lock(&ci->i_ceph_lock);
 	dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size);
 	inode->i_size = size;
 	inode->i_blocks = (size + (1 << 9) - 1) >> 9;
@@ -1318,7 +1320,7 @@
 	    (ci->i_reported_size << 1) < ci->i_max_size)
 		ret = 1;
 
-	spin_unlock(&inode->i_lock);
+	spin_unlock(&ci->i_ceph_lock);
 	return ret;
 }
 
@@ -1376,20 +1378,20 @@
 	u32 orig_gen;
 	int check = 0;
 
-	spin_lock(&inode->i_lock);
+	spin_lock(&ci->i_ceph_lock);
 	dout("invalidate_pages %p gen %d revoking %d\n", inode,
 	     ci->i_rdcache_gen, ci->i_rdcache_revoking);
 	if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
 		/* nevermind! */
-		spin_unlock(&inode->i_lock);
+		spin_unlock(&ci->i_ceph_lock);
 		goto out;
 	}
 	orig_gen = ci->i_rdcache_gen;
-	spin_unlock(&inode->i_lock);
+	spin_unlock(&ci->i_ceph_lock);
 
 	truncate_inode_pages(&inode->i_data, 0);
 
-	spin_lock(&inode->i_lock);
+	spin_lock(&ci->i_ceph_lock);
 	if (orig_gen == ci->i_rdcache_gen &&
 	    orig_gen == ci->i_rdcache_revoking) {
 		dout("invalidate_pages %p gen %d successful\n", inode,
@@ -1401,7 +1403,7 @@
 		     inode, orig_gen, ci->i_rdcache_gen,
 		     ci->i_rdcache_revoking);
 	}
-	spin_unlock(&inode->i_lock);
+	spin_unlock(&ci->i_ceph_lock);
 
 	if (check)
 		ceph_check_caps(ci, 0, NULL);
@@ -1460,10 +1462,10 @@
 	int wrbuffer_refs, wake = 0;
 
 retry:
-	spin_lock(&inode->i_lock);
+	spin_lock(&ci->i_ceph_lock);
 	if (ci->i_truncate_pending == 0) {
 		dout("__do_pending_vmtruncate %p none pending\n", inode);
-		spin_unlock(&inode->i_lock);
+		spin_unlock(&ci->i_ceph_lock);
 		return;
 	}
 
@@ -1474,7 +1476,7 @@
 	if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
 		dout("__do_pending_vmtruncate %p flushing snaps first\n",
 		     inode);
-		spin_unlock(&inode->i_lock);
+		spin_unlock(&ci->i_ceph_lock);
 		filemap_write_and_wait_range(&inode->i_data, 0,
 					     inode->i_sb->s_maxbytes);
 		goto retry;
@@ -1484,15 +1486,15 @@
 	wrbuffer_refs = ci->i_wrbuffer_ref;
 	dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode,
 	     ci->i_truncate_pending, to);
-	spin_unlock(&inode->i_lock);
+	spin_unlock(&ci->i_ceph_lock);
 
 	truncate_inode_pages(inode->i_mapping, to);
 
-	spin_lock(&inode->i_lock);
+	spin_lock(&ci->i_ceph_lock);
 	ci->i_truncate_pending--;
 	if (ci->i_truncate_pending == 0)
 		wake = 1;
-	spin_unlock(&inode->i_lock);
+	spin_unlock(&ci->i_ceph_lock);
 
 	if (wrbuffer_refs == 0)
 		ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
@@ -1547,7 +1549,7 @@
 	if (IS_ERR(req))
 		return PTR_ERR(req);
 
-	spin_lock(&inode->i_lock);
+	spin_lock(&ci->i_ceph_lock);
 	issued = __ceph_caps_issued(ci, NULL);
 	dout("setattr %p issued %s\n", inode, ceph_cap_string(issued));
 
@@ -1695,7 +1697,7 @@
 	}
 
 	release &= issued;
-	spin_unlock(&inode->i_lock);
+	spin_unlock(&ci->i_ceph_lock);
 
 	if (inode_dirty_flags)
 		__mark_inode_dirty(inode, inode_dirty_flags);
@@ -1717,7 +1719,7 @@
 	__ceph_do_pending_vmtruncate(inode);
 	return err;
 out:
-	spin_unlock(&inode->i_lock);
+	spin_unlock(&ci->i_ceph_lock);
 	ceph_mdsc_put_request(req);
 	return err;
 }
diff --git a/fs/ceph/ioctl.c b/fs/ceph/ioctl.c
index 5a14c29..790914a59 100644
--- a/fs/ceph/ioctl.c
+++ b/fs/ceph/ioctl.c
@@ -241,11 +241,11 @@
 	struct ceph_inode_info *ci = ceph_inode(inode);
 
 	if ((fi->fmode & CEPH_FILE_MODE_LAZY) == 0) {
-		spin_lock(&inode->i_lock);
+		spin_lock(&ci->i_ceph_lock);
 		ci->i_nr_by_mode[fi->fmode]--;
 		fi->fmode |= CEPH_FILE_MODE_LAZY;
 		ci->i_nr_by_mode[fi->fmode]++;
-		spin_unlock(&inode->i_lock);
+		spin_unlock(&ci->i_ceph_lock);
 		dout("ioctl_layzio: file %p marked lazy\n", file);
 
 		ceph_check_caps(ci, 0, NULL);
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 264ab70..6203d80 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -732,21 +732,21 @@
 		}
 	}
 
-	spin_lock(&inode->i_lock);
+	spin_lock(&ci->i_ceph_lock);
 	cap = NULL;
 	if (mode == USE_AUTH_MDS)
 		cap = ci->i_auth_cap;
 	if (!cap && !RB_EMPTY_ROOT(&ci->i_caps))
 		cap = rb_entry(rb_first(&ci->i_caps), struct ceph_cap, ci_node);
 	if (!cap) {
-		spin_unlock(&inode->i_lock);
+		spin_unlock(&ci->i_ceph_lock);
 		goto random;
 	}
 	mds = cap->session->s_mds;
 	dout("choose_mds %p %llx.%llx mds%d (%scap %p)\n",
 	     inode, ceph_vinop(inode), mds,
 	     cap == ci->i_auth_cap ? "auth " : "", cap);
-	spin_unlock(&inode->i_lock);
+	spin_unlock(&ci->i_ceph_lock);
 	return mds;
 
 random:
@@ -951,7 +951,7 @@
 
 	dout("removing cap %p, ci is %p, inode is %p\n",
 	     cap, ci, &ci->vfs_inode);
-	spin_lock(&inode->i_lock);
+	spin_lock(&ci->i_ceph_lock);
 	__ceph_remove_cap(cap);
 	if (!__ceph_is_any_real_caps(ci)) {
 		struct ceph_mds_client *mdsc =
@@ -984,7 +984,7 @@
 		}
 		spin_unlock(&mdsc->cap_dirty_lock);
 	}
-	spin_unlock(&inode->i_lock);
+	spin_unlock(&ci->i_ceph_lock);
 	while (drop--)
 		iput(inode);
 	return 0;
@@ -1015,10 +1015,10 @@
 
 	wake_up_all(&ci->i_cap_wq);
 	if (arg) {
-		spin_lock(&inode->i_lock);
+		spin_lock(&ci->i_ceph_lock);
 		ci->i_wanted_max_size = 0;
 		ci->i_requested_max_size = 0;
-		spin_unlock(&inode->i_lock);
+		spin_unlock(&ci->i_ceph_lock);
 	}
 	return 0;
 }
@@ -1151,7 +1151,7 @@
 	if (session->s_trim_caps <= 0)
 		return -1;
 
-	spin_lock(&inode->i_lock);
+	spin_lock(&ci->i_ceph_lock);
 	mine = cap->issued | cap->implemented;
 	used = __ceph_caps_used(ci);
 	oissued = __ceph_caps_issued_other(ci, cap);
@@ -1170,7 +1170,7 @@
 		__ceph_remove_cap(cap);
 	} else {
 		/* try to drop referring dentries */
-		spin_unlock(&inode->i_lock);
+		spin_unlock(&ci->i_ceph_lock);
 		d_prune_aliases(inode);
 		dout("trim_caps_cb %p cap %p  pruned, count now %d\n",
 		     inode, cap, atomic_read(&inode->i_count));
@@ -1178,7 +1178,7 @@
 	}
 
 out:
-	spin_unlock(&inode->i_lock);
+	spin_unlock(&ci->i_ceph_lock);
 	return 0;
 }
 
@@ -1296,7 +1296,7 @@
 					   i_flushing_item);
 			struct inode *inode = &ci->vfs_inode;
 
-			spin_lock(&inode->i_lock);
+			spin_lock(&ci->i_ceph_lock);
 			if (ci->i_cap_flush_seq <= want_flush_seq) {
 				dout("check_cap_flush still flushing %p "
 				     "seq %lld <= %lld to mds%d\n", inode,
@@ -1304,7 +1304,7 @@
 				     session->s_mds);
 				ret = 0;
 			}
-			spin_unlock(&inode->i_lock);
+			spin_unlock(&ci->i_ceph_lock);
 		}
 		mutex_unlock(&session->s_mutex);
 		ceph_put_mds_session(session);
@@ -1495,6 +1495,7 @@
 			     pos, temp);
 		} else if (stop_on_nosnap && inode &&
 			   ceph_snap(inode) == CEPH_NOSNAP) {
+			spin_unlock(&temp->d_lock);
 			break;
 		} else {
 			pos -= temp->d_name.len;
@@ -2011,10 +2012,10 @@
 	struct ceph_inode_info *ci = ceph_inode(inode);
 
 	dout("invalidate_dir_request %p (D_COMPLETE, lease(s))\n", inode);
-	spin_lock(&inode->i_lock);
+	spin_lock(&ci->i_ceph_lock);
 	ceph_dir_clear_complete(inode);
 	ci->i_release_count++;
-	spin_unlock(&inode->i_lock);
+	spin_unlock(&ci->i_ceph_lock);
 
 	if (req->r_dentry)
 		ceph_invalidate_dentry_lease(req->r_dentry);
@@ -2422,7 +2423,7 @@
 	if (err)
 		goto out_free;
 
-	spin_lock(&inode->i_lock);
+	spin_lock(&ci->i_ceph_lock);
 	cap->seq = 0;        /* reset cap seq */
 	cap->issue_seq = 0;  /* and issue_seq */
 
@@ -2445,7 +2446,7 @@
 		rec.v1.pathbase = cpu_to_le64(pathbase);
 		reclen = sizeof(rec.v1);
 	}
-	spin_unlock(&inode->i_lock);
+	spin_unlock(&ci->i_ceph_lock);
 
 	if (recon_state->flock) {
 		int num_fcntl_locks, num_flock_locks;
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index 4bb2399..a50ca0e 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -20,7 +20,7 @@
  *
  *         mdsc->snap_rwsem
  *
- *         inode->i_lock
+ *         ci->i_ceph_lock
  *                 mdsc->snap_flush_lock
  *                 mdsc->cap_delay_lock
  *
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index e264371..a559c80 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -446,7 +446,7 @@
 		return;
 	}
 
-	spin_lock(&inode->i_lock);
+	spin_lock(&ci->i_ceph_lock);
 	used = __ceph_caps_used(ci);
 	dirty = __ceph_caps_dirty(ci);
 
@@ -528,7 +528,7 @@
 		kfree(capsnap);
 	}
 
-	spin_unlock(&inode->i_lock);
+	spin_unlock(&ci->i_ceph_lock);
 }
 
 /*
@@ -537,7 +537,7 @@
  *
  * If capsnap can now be flushed, add to snap_flush list, and return 1.
  *
- * Caller must hold i_lock.
+ * Caller must hold i_ceph_lock.
  */
 int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
 			    struct ceph_cap_snap *capsnap)
@@ -739,9 +739,9 @@
 		inode = &ci->vfs_inode;
 		ihold(inode);
 		spin_unlock(&mdsc->snap_flush_lock);
-		spin_lock(&inode->i_lock);
+		spin_lock(&ci->i_ceph_lock);
 		__ceph_flush_snaps(ci, &session, 0);
-		spin_unlock(&inode->i_lock);
+		spin_unlock(&ci->i_ceph_lock);
 		iput(inode);
 		spin_lock(&mdsc->snap_flush_lock);
 	}
@@ -847,7 +847,7 @@
 				continue;
 			ci = ceph_inode(inode);
 
-			spin_lock(&inode->i_lock);
+			spin_lock(&ci->i_ceph_lock);
 			if (!ci->i_snap_realm)
 				goto skip_inode;
 			/*
@@ -876,7 +876,7 @@
 			oldrealm = ci->i_snap_realm;
 			ci->i_snap_realm = realm;
 			spin_unlock(&realm->inodes_with_caps_lock);
-			spin_unlock(&inode->i_lock);
+			spin_unlock(&ci->i_ceph_lock);
 
 			ceph_get_snap_realm(mdsc, realm);
 			ceph_put_snap_realm(mdsc, oldrealm);
@@ -885,7 +885,7 @@
 			continue;
 
 skip_inode:
-			spin_unlock(&inode->i_lock);
+			spin_unlock(&ci->i_ceph_lock);
 			iput(inode);
 		}
 
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index 8dc73a5..11bd0fc 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -341,11 +341,11 @@
 /**
  * ceph_show_options - Show mount options in /proc/mounts
  * @m: seq_file to write to
- * @mnt: mount descriptor
+ * @root: root of that (sub)tree
  */
-static int ceph_show_options(struct seq_file *m, struct vfsmount *mnt)
+static int ceph_show_options(struct seq_file *m, struct dentry *root)
 {
-	struct ceph_fs_client *fsc = ceph_sb_to_client(mnt->mnt_sb);
+	struct ceph_fs_client *fsc = ceph_sb_to_client(root->d_sb);
 	struct ceph_mount_options *fsopt = fsc->mount_options;
 	struct ceph_options *opt = fsc->client->options;
 
@@ -383,7 +383,7 @@
 	if (fsopt->rsize != CEPH_RSIZE_DEFAULT)
 		seq_printf(m, ",rsize=%d", fsopt->rsize);
 	if (fsopt->rasize != CEPH_RASIZE_DEFAULT)
-		seq_printf(m, ",rasize=%d", fsopt->rsize);
+		seq_printf(m, ",rasize=%d", fsopt->rasize);
 	if (fsopt->congestion_kb != default_congestion_kb())
 		seq_printf(m, ",write_congestion_kb=%d", fsopt->congestion_kb);
 	if (fsopt->caps_wanted_delay_min != CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT)
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 01bf189..cb3652b 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -136,7 +136,7 @@
 	int issued, dirty;
 	struct ceph_snap_context *context;
 
-	mode_t mode;
+	umode_t mode;
 	uid_t uid;
 	gid_t gid;
 
@@ -220,7 +220,7 @@
  * The locking for D_COMPLETE is a bit odd:
  *  - we can clear it at almost any time (see ceph_d_prune)
  *  - it is only meaningful if:
- *    - we hold dir inode i_lock
+ *    - we hold dir inode i_ceph_lock
  *    - we hold dir FILE_SHARED caps
  *    - the dentry D_COMPLETE is set
  */
@@ -250,6 +250,8 @@
 struct ceph_inode_info {
 	struct ceph_vino i_vino;   /* ceph ino + snap */
 
+	spinlock_t i_ceph_lock;
+
 	u64 i_version;
 	u32 i_time_warp_seq;
 
@@ -271,7 +273,7 @@
 
 	struct ceph_inode_xattrs_info i_xattrs;
 
-	/* capabilities.  protected _both_ by i_lock and cap->session's
+	/* capabilities.  protected _both_ by i_ceph_lock and cap->session's
 	 * s_mutex. */
 	struct rb_root i_caps;           /* cap list */
 	struct ceph_cap *i_auth_cap;     /* authoritative cap, if any */
@@ -437,18 +439,18 @@
 {
 	struct ceph_inode_info *ci = ceph_inode(inode);
 
-	spin_lock(&inode->i_lock);
+	spin_lock(&ci->i_ceph_lock);
 	ci->i_ceph_flags &= ~mask;
-	spin_unlock(&inode->i_lock);
+	spin_unlock(&ci->i_ceph_lock);
 }
 
 static inline void ceph_i_set(struct inode *inode, unsigned mask)
 {
 	struct ceph_inode_info *ci = ceph_inode(inode);
 
-	spin_lock(&inode->i_lock);
+	spin_lock(&ci->i_ceph_lock);
 	ci->i_ceph_flags |= mask;
-	spin_unlock(&inode->i_lock);
+	spin_unlock(&ci->i_ceph_lock);
 }
 
 static inline bool ceph_i_test(struct inode *inode, unsigned mask)
@@ -456,9 +458,9 @@
 	struct ceph_inode_info *ci = ceph_inode(inode);
 	bool r;
 
-	spin_lock(&inode->i_lock);
+	spin_lock(&ci->i_ceph_lock);
 	r = (ci->i_ceph_flags & mask) == mask;
-	spin_unlock(&inode->i_lock);
+	spin_unlock(&ci->i_ceph_lock);
 	return r;
 }
 
@@ -508,9 +510,9 @@
 static inline int ceph_caps_issued(struct ceph_inode_info *ci)
 {
 	int issued;
-	spin_lock(&ci->vfs_inode.i_lock);
+	spin_lock(&ci->i_ceph_lock);
 	issued = __ceph_caps_issued(ci, NULL);
-	spin_unlock(&ci->vfs_inode.i_lock);
+	spin_unlock(&ci->i_ceph_lock);
 	return issued;
 }
 
@@ -518,9 +520,9 @@
 					int touch)
 {
 	int r;
-	spin_lock(&ci->vfs_inode.i_lock);
+	spin_lock(&ci->i_ceph_lock);
 	r = __ceph_caps_issued_mask(ci, mask, touch);
-	spin_unlock(&ci->vfs_inode.i_lock);
+	spin_unlock(&ci->i_ceph_lock);
 	return r;
 }
 
@@ -743,10 +745,9 @@
 extern void __ceph_remove_cap(struct ceph_cap *cap);
 static inline void ceph_remove_cap(struct ceph_cap *cap)
 {
-	struct inode *inode = &cap->ci->vfs_inode;
-	spin_lock(&inode->i_lock);
+	spin_lock(&cap->ci->i_ceph_lock);
 	__ceph_remove_cap(cap);
-	spin_unlock(&inode->i_lock);
+	spin_unlock(&cap->ci->i_ceph_lock);
 }
 extern void ceph_put_cap(struct ceph_mds_client *mdsc,
 			 struct ceph_cap *cap);
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 96c6739..a5e36e4 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -343,8 +343,8 @@
 }
 
 static int __build_xattrs(struct inode *inode)
-	__releases(inode->i_lock)
-	__acquires(inode->i_lock)
+	__releases(ci->i_ceph_lock)
+	__acquires(ci->i_ceph_lock)
 {
 	u32 namelen;
 	u32 numattr = 0;
@@ -372,7 +372,7 @@
 		end = p + ci->i_xattrs.blob->vec.iov_len;
 		ceph_decode_32_safe(&p, end, numattr, bad);
 		xattr_version = ci->i_xattrs.version;
-		spin_unlock(&inode->i_lock);
+		spin_unlock(&ci->i_ceph_lock);
 
 		xattrs = kcalloc(numattr, sizeof(struct ceph_xattr *),
 				 GFP_NOFS);
@@ -387,7 +387,7 @@
 				goto bad_lock;
 		}
 
-		spin_lock(&inode->i_lock);
+		spin_lock(&ci->i_ceph_lock);
 		if (ci->i_xattrs.version != xattr_version) {
 			/* lost a race, retry */
 			for (i = 0; i < numattr; i++)
@@ -418,7 +418,7 @@
 
 	return err;
 bad_lock:
-	spin_lock(&inode->i_lock);
+	spin_lock(&ci->i_ceph_lock);
 bad:
 	if (xattrs) {
 		for (i = 0; i < numattr; i++)
@@ -512,7 +512,7 @@
 	if (vxattrs)
 		vxattr = ceph_match_vxattr(vxattrs, name);
 
-	spin_lock(&inode->i_lock);
+	spin_lock(&ci->i_ceph_lock);
 	dout("getxattr %p ver=%lld index_ver=%lld\n", inode,
 	     ci->i_xattrs.version, ci->i_xattrs.index_version);
 
@@ -520,14 +520,14 @@
 	    (ci->i_xattrs.index_version >= ci->i_xattrs.version)) {
 		goto get_xattr;
 	} else {
-		spin_unlock(&inode->i_lock);
+		spin_unlock(&ci->i_ceph_lock);
 		/* get xattrs from mds (if we don't already have them) */
 		err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR);
 		if (err)
 			return err;
 	}
 
-	spin_lock(&inode->i_lock);
+	spin_lock(&ci->i_ceph_lock);
 
 	if (vxattr && vxattr->readonly) {
 		err = vxattr->getxattr_cb(ci, value, size);
@@ -558,7 +558,7 @@
 	memcpy(value, xattr->val, xattr->val_len);
 
 out:
-	spin_unlock(&inode->i_lock);
+	spin_unlock(&ci->i_ceph_lock);
 	return err;
 }
 
@@ -573,7 +573,7 @@
 	u32 len;
 	int i;
 
-	spin_lock(&inode->i_lock);
+	spin_lock(&ci->i_ceph_lock);
 	dout("listxattr %p ver=%lld index_ver=%lld\n", inode,
 	     ci->i_xattrs.version, ci->i_xattrs.index_version);
 
@@ -581,13 +581,13 @@
 	    (ci->i_xattrs.index_version >= ci->i_xattrs.version)) {
 		goto list_xattr;
 	} else {
-		spin_unlock(&inode->i_lock);
+		spin_unlock(&ci->i_ceph_lock);
 		err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR);
 		if (err)
 			return err;
 	}
 
-	spin_lock(&inode->i_lock);
+	spin_lock(&ci->i_ceph_lock);
 
 	err = __build_xattrs(inode);
 	if (err < 0)
@@ -619,7 +619,7 @@
 		}
 
 out:
-	spin_unlock(&inode->i_lock);
+	spin_unlock(&ci->i_ceph_lock);
 	return err;
 }
 
@@ -739,7 +739,7 @@
 	if (!xattr)
 		goto out;
 
-	spin_lock(&inode->i_lock);
+	spin_lock(&ci->i_ceph_lock);
 retry:
 	issued = __ceph_caps_issued(ci, NULL);
 	if (!(issued & CEPH_CAP_XATTR_EXCL))
@@ -752,12 +752,12 @@
 	    required_blob_size > ci->i_xattrs.prealloc_blob->alloc_len) {
 		struct ceph_buffer *blob = NULL;
 
-		spin_unlock(&inode->i_lock);
+		spin_unlock(&ci->i_ceph_lock);
 		dout(" preaallocating new blob size=%d\n", required_blob_size);
 		blob = ceph_buffer_new(required_blob_size, GFP_NOFS);
 		if (!blob)
 			goto out;
-		spin_lock(&inode->i_lock);
+		spin_lock(&ci->i_ceph_lock);
 		if (ci->i_xattrs.prealloc_blob)
 			ceph_buffer_put(ci->i_xattrs.prealloc_blob);
 		ci->i_xattrs.prealloc_blob = blob;
@@ -770,13 +770,13 @@
 	dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL);
 	ci->i_xattrs.dirty = true;
 	inode->i_ctime = CURRENT_TIME;
-	spin_unlock(&inode->i_lock);
+	spin_unlock(&ci->i_ceph_lock);
 	if (dirty)
 		__mark_inode_dirty(inode, dirty);
 	return err;
 
 do_sync:
-	spin_unlock(&inode->i_lock);
+	spin_unlock(&ci->i_ceph_lock);
 	err = ceph_sync_setxattr(dentry, name, value, size, flags);
 out:
 	kfree(newname);
@@ -833,7 +833,7 @@
 			return -EOPNOTSUPP;
 	}
 
-	spin_lock(&inode->i_lock);
+	spin_lock(&ci->i_ceph_lock);
 	__build_xattrs(inode);
 	issued = __ceph_caps_issued(ci, NULL);
 	dout("removexattr %p issued %s\n", inode, ceph_cap_string(issued));
@@ -846,12 +846,12 @@
 	ci->i_xattrs.dirty = true;
 	inode->i_ctime = CURRENT_TIME;
 
-	spin_unlock(&inode->i_lock);
+	spin_unlock(&ci->i_ceph_lock);
 	if (dirty)
 		__mark_inode_dirty(inode, dirty);
 	return err;
 do_sync:
-	spin_unlock(&inode->i_lock);
+	spin_unlock(&ci->i_ceph_lock);
 	err = ceph_send_removexattr(dentry, name);
 	return err;
 }
diff --git a/fs/char_dev.c b/fs/char_dev.c
index dca9e5e..3f152b9 100644
--- a/fs/char_dev.c
+++ b/fs/char_dev.c
@@ -272,7 +272,7 @@
 	cd = __register_chrdev_region(major, baseminor, count, name);
 	if (IS_ERR(cd))
 		return PTR_ERR(cd);
-	
+
 	cdev = cdev_alloc();
 	if (!cdev)
 		goto out2;
@@ -280,7 +280,7 @@
 	cdev->owner = fops->owner;
 	cdev->ops = fops;
 	kobject_set_name(&cdev->kobj, "%s", name);
-		
+
 	err = cdev_add(cdev, MKDEV(cd->major, baseminor), count);
 	if (err)
 		goto out;
@@ -405,7 +405,7 @@
 		goto out_cdev_put;
 
 	if (filp->f_op->open) {
-		ret = filp->f_op->open(inode,filp);
+		ret = filp->f_op->open(inode, filp);
 		if (ret)
 			goto out_cdev_put;
 	}
diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h
index 500d658..c865bfd 100644
--- a/fs/cifs/cifs_fs_sb.h
+++ b/fs/cifs/cifs_fs_sb.h
@@ -59,8 +59,8 @@
 	gid_t	mnt_gid;
 	uid_t	mnt_backupuid;
 	gid_t	mnt_backupgid;
-	mode_t	mnt_file_mode;
-	mode_t	mnt_dir_mode;
+	umode_t	mnt_file_mode;
+	umode_t	mnt_dir_mode;
 	unsigned int mnt_cifs_flags;
 	char   *mountdata; /* options received at mount time or via DFS refs */
 	struct backing_dev_info bdi;
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 8f1fe32..b1fd382 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -343,9 +343,9 @@
  * ones are.
  */
 static int
-cifs_show_options(struct seq_file *s, struct vfsmount *m)
+cifs_show_options(struct seq_file *s, struct dentry *root)
 {
-	struct cifs_sb_info *cifs_sb = CIFS_SB(m->mnt_sb);
+	struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
 	struct sockaddr *srcaddr;
 	srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
@@ -393,7 +393,7 @@
 	cifs_show_address(s, tcon->ses->server);
 
 	if (!tcon->unix_ext)
-		seq_printf(s, ",file_mode=0%o,dir_mode=0%o",
+		seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho",
 					   cifs_sb->mnt_file_mode,
 					   cifs_sb->mnt_dir_mode);
 	if (tcon->seal)
@@ -430,7 +430,7 @@
 		seq_printf(s, ",cifsacl");
 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
 		seq_printf(s, ",dynperm");
-	if (m->mnt_sb->s_flags & MS_POSIXACL)
+	if (root->d_sb->s_flags & MS_POSIXACL)
 		seq_printf(s, ",acl");
 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
 		seq_printf(s, ",mfsymlinks");
@@ -488,7 +488,7 @@
 }
 
 #ifdef CONFIG_CIFS_STATS2
-static int cifs_show_stats(struct seq_file *s, struct vfsmount *mnt)
+static int cifs_show_stats(struct seq_file *s, struct dentry *root)
 {
 	/* BB FIXME */
 	return 0;
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 30ff560..fe5ecf1 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -44,14 +44,14 @@
 /* Functions related to inodes */
 extern const struct inode_operations cifs_dir_inode_ops;
 extern struct inode *cifs_root_iget(struct super_block *);
-extern int cifs_create(struct inode *, struct dentry *, int,
+extern int cifs_create(struct inode *, struct dentry *, umode_t,
 		       struct nameidata *);
 extern struct dentry *cifs_lookup(struct inode *, struct dentry *,
 				  struct nameidata *);
 extern int cifs_unlink(struct inode *dir, struct dentry *dentry);
 extern int cifs_hardlink(struct dentry *, struct inode *, struct dentry *);
-extern int cifs_mknod(struct inode *, struct dentry *, int, dev_t);
-extern int cifs_mkdir(struct inode *, struct dentry *, int);
+extern int cifs_mknod(struct inode *, struct dentry *, umode_t, dev_t);
+extern int cifs_mkdir(struct inode *, struct dentry *, umode_t);
 extern int cifs_rmdir(struct inode *, struct dentry *);
 extern int cifs_rename(struct inode *, struct dentry *, struct inode *,
 		       struct dentry *);
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 8238aa1..ba53c1c 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -169,8 +169,8 @@
 	gid_t linux_gid;
 	uid_t backupuid;
 	gid_t backupgid;
-	mode_t file_mode;
-	mode_t dir_mode;
+	umode_t file_mode;
+	umode_t dir_mode;
 	unsigned secFlg;
 	bool retry:1;
 	bool intr:1;
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 8cd4b52..4666780 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -282,7 +282,7 @@
 	byte_count = be32_to_cpu(pTargetSMB->smb_buf_length);
 	byte_count += total_in_buf2;
 	/* don't allow buffer to overflow */
-	if (byte_count > CIFSMaxBufSize)
+	if (byte_count > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4)
 		return -ENOBUFS;
 	pTargetSMB->smb_buf_length = cpu_to_be32(byte_count);
 
@@ -2122,7 +2122,7 @@
 		warned_on_ntlm = true;
 		cERROR(1, "default security mechanism requested.  The default "
 			"security mechanism will be upgraded from ntlm to "
-			"ntlmv2 in kernel release 3.2");
+			"ntlmv2 in kernel release 3.3");
 	}
 	ses->overrideSecFlg = volume_info->secFlg;
 
@@ -2819,7 +2819,7 @@
 		cifs_sb->mnt_backupgid = pvolume_info->backupgid;
 	cifs_sb->mnt_file_mode = pvolume_info->file_mode;
 	cifs_sb->mnt_dir_mode = pvolume_info->dir_mode;
-	cFYI(1, "file mode: 0x%x  dir mode: 0x%x",
+	cFYI(1, "file mode: 0x%hx  dir mode: 0x%hx",
 		cifs_sb->mnt_file_mode, cifs_sb->mnt_dir_mode);
 
 	cifs_sb->actimeo = pvolume_info->actimeo;
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index d7eeb9d..df8fecb 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -136,7 +136,7 @@
 /* Inode operations in similar order to how they appear in Linux file fs.h */
 
 int
-cifs_create(struct inode *inode, struct dentry *direntry, int mode,
+cifs_create(struct inode *inode, struct dentry *direntry, umode_t mode,
 		struct nameidata *nd)
 {
 	int rc = -ENOENT;
@@ -355,7 +355,7 @@
 	return rc;
 }
 
-int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode,
+int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode,
 		dev_t device_number)
 {
 	int rc = -EPERM;
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index e851d5b..a5f54b7 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -1264,7 +1264,7 @@
 	return rc;
 }
 
-int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode)
+int cifs_mkdir(struct inode *inode, struct dentry *direntry, umode_t mode)
 {
 	int rc = 0, tmprc;
 	int xid;
@@ -1275,7 +1275,7 @@
 	struct inode *newinode = NULL;
 	struct cifs_fattr fattr;
 
-	cFYI(1, "In cifs_mkdir, mode = 0x%x inode = 0x%p", mode, inode);
+	cFYI(1, "In cifs_mkdir, mode = 0x%hx inode = 0x%p", mode, inode);
 
 	cifs_sb = CIFS_SB(inode->i_sb);
 	tlink = cifs_sb_tlink(cifs_sb);
diff --git a/fs/coda/dir.c b/fs/coda/dir.c
index 28e7e13..83d2fd8 100644
--- a/fs/coda/dir.c
+++ b/fs/coda/dir.c
@@ -30,14 +30,14 @@
 #include "coda_int.h"
 
 /* dir inode-ops */
-static int coda_create(struct inode *dir, struct dentry *new, int mode, struct nameidata *nd);
+static int coda_create(struct inode *dir, struct dentry *new, umode_t mode, struct nameidata *nd);
 static struct dentry *coda_lookup(struct inode *dir, struct dentry *target, struct nameidata *nd);
 static int coda_link(struct dentry *old_dentry, struct inode *dir_inode, 
 		     struct dentry *entry);
 static int coda_unlink(struct inode *dir_inode, struct dentry *entry);
 static int coda_symlink(struct inode *dir_inode, struct dentry *entry,
 			const char *symname);
-static int coda_mkdir(struct inode *dir_inode, struct dentry *entry, int mode);
+static int coda_mkdir(struct inode *dir_inode, struct dentry *entry, umode_t mode);
 static int coda_rmdir(struct inode *dir_inode, struct dentry *entry);
 static int coda_rename(struct inode *old_inode, struct dentry *old_dentry, 
                        struct inode *new_inode, struct dentry *new_dentry);
@@ -191,7 +191,7 @@
 }
 
 /* creation routines: create, mknod, mkdir, link, symlink */
-static int coda_create(struct inode *dir, struct dentry *de, int mode, struct nameidata *nd)
+static int coda_create(struct inode *dir, struct dentry *de, umode_t mode, struct nameidata *nd)
 {
 	int error;
 	const char *name=de->d_name.name;
@@ -223,7 +223,7 @@
 	return error;
 }
 
-static int coda_mkdir(struct inode *dir, struct dentry *de, int mode)
+static int coda_mkdir(struct inode *dir, struct dentry *de, umode_t mode)
 {
 	struct inode *inode;
 	struct coda_vattr attrs;
diff --git a/fs/coda/inode.c b/fs/coda/inode.c
index 871b277..1c08a8c 100644
--- a/fs/coda/inode.c
+++ b/fs/coda/inode.c
@@ -58,7 +58,6 @@
 static void coda_i_callback(struct rcu_head *head)
 {
 	struct inode *inode = container_of(head, struct inode, i_rcu);
-	INIT_LIST_HEAD(&inode->i_dentry);
 	kmem_cache_free(coda_inode_cachep, ITOC(inode));
 }
 
diff --git a/fs/compat.c b/fs/compat.c
index c987875..fa9d721 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -342,16 +342,9 @@
  */
 asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u)
 {
-	struct super_block *sb;
 	struct compat_ustat tmp;
 	struct kstatfs sbuf;
-	int err;
-
-	sb = user_get_super(new_decode_dev(dev));
-	if (!sb)
-		return -EINVAL;
-	err = statfs_by_dentry(sb->s_root, &sbuf);
-	drop_super(sb);
+	int err = vfs_ustat(new_decode_dev(dev), &sbuf);
 	if (err)
 		return err;
 
@@ -1288,7 +1281,7 @@
  * O_LARGEFILE flag.
  */
 asmlinkage long
-compat_sys_open(const char __user *filename, int flags, int mode)
+compat_sys_open(const char __user *filename, int flags, umode_t mode)
 {
 	return do_sys_open(AT_FDCWD, filename, flags, mode);
 }
@@ -1298,7 +1291,7 @@
  * O_LARGEFILE flag.
  */
 asmlinkage long
-compat_sys_openat(unsigned int dfd, const char __user *filename, int flags, int mode)
+compat_sys_openat(unsigned int dfd, const char __user *filename, int flags, umode_t mode)
 {
 	return do_sys_open(dfd, filename, flags, mode);
 }
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index 51352de..a10e428 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -1506,35 +1506,6 @@
 	return -ENOIOCTLCMD;
 }
 
-static void compat_ioctl_error(struct file *filp, unsigned int fd,
-		unsigned int cmd, unsigned long arg)
-{
-	char buf[10];
-	char *fn = "?";
-	char *path;
-
-	/* find the name of the device. */
-	path = (char *)__get_free_page(GFP_KERNEL);
-	if (path) {
-		fn = d_path(&filp->f_path, path, PAGE_SIZE);
-		if (IS_ERR(fn))
-			fn = "?";
-	}
-
-	 sprintf(buf,"'%c'", (cmd>>_IOC_TYPESHIFT) & _IOC_TYPEMASK);
-	if (!isprint(buf[1]))
-		sprintf(buf, "%02x", buf[1]);
-	compat_printk("ioctl32(%s:%d): Unknown cmd fd(%d) "
-			"cmd(%08x){t:%s;sz:%u} arg(%08x) on %s\n",
-			current->comm, current->pid,
-			(int)fd, (unsigned int)cmd, buf,
-			(cmd >> _IOC_SIZESHIFT) & _IOC_SIZEMASK,
-			(unsigned int)arg, fn);
-
-	if (path)
-		free_page((unsigned long)path);
-}
-
 static int compat_ioctl_check_table(unsigned int xcmd)
 {
 	int i;
@@ -1621,13 +1592,8 @@
 		goto found_handler;
 
 	error = do_ioctl_trans(fd, cmd, arg, filp);
-	if (error == -ENOIOCTLCMD) {
-		static int count;
-
-		if (++count <= 50)
-			compat_ioctl_error(filp, fd, cmd, arg);
-		error = -EINVAL;
-	}
+	if (error == -ENOIOCTLCMD)
+		error = -ENOTTY;
 
 	goto out_fput;
 
diff --git a/fs/configfs/configfs_internal.h b/fs/configfs/configfs_internal.h
index 82bda8f..ede857d 100644
--- a/fs/configfs/configfs_internal.h
+++ b/fs/configfs/configfs_internal.h
@@ -63,8 +63,8 @@
 
 extern int configfs_is_root(struct config_item *item);
 
-extern struct inode * configfs_new_inode(mode_t mode, struct configfs_dirent *);
-extern int configfs_create(struct dentry *, int mode, int (*init)(struct inode *));
+extern struct inode * configfs_new_inode(umode_t mode, struct configfs_dirent *);
+extern int configfs_create(struct dentry *, umode_t mode, int (*init)(struct inode *));
 extern int configfs_inode_init(void);
 extern void configfs_inode_exit(void);
 
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index 9a37a9b..5ddd7eb 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -311,8 +311,8 @@
 
 	if (item->ci_parent)
 		parent = item->ci_parent->ci_dentry;
-	else if (configfs_mount && configfs_mount->mnt_sb)
-		parent = configfs_mount->mnt_sb->s_root;
+	else if (configfs_mount)
+		parent = configfs_mount->mnt_root;
 	else
 		return -EFAULT;
 
@@ -1170,7 +1170,7 @@
 }
 EXPORT_SYMBOL(configfs_undepend_item);
 
-static int configfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+static int configfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 {
 	int ret = 0;
 	int module_got = 0;
diff --git a/fs/configfs/inode.c b/fs/configfs/inode.c
index ca418aa..3ee36d4 100644
--- a/fs/configfs/inode.c
+++ b/fs/configfs/inode.c
@@ -116,7 +116,7 @@
 	return error;
 }
 
-static inline void set_default_inode_attr(struct inode * inode, mode_t mode)
+static inline void set_default_inode_attr(struct inode * inode, umode_t mode)
 {
 	inode->i_mode = mode;
 	inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
@@ -132,7 +132,7 @@
 	inode->i_ctime = iattr->ia_ctime;
 }
 
-struct inode * configfs_new_inode(mode_t mode, struct configfs_dirent * sd)
+struct inode *configfs_new_inode(umode_t mode, struct configfs_dirent * sd)
 {
 	struct inode * inode = new_inode(configfs_sb);
 	if (inode) {
@@ -185,7 +185,7 @@
 
 #endif /* CONFIG_LOCKDEP */
 
-int configfs_create(struct dentry * dentry, int mode, int (*init)(struct inode *))
+int configfs_create(struct dentry * dentry, umode_t mode, int (*init)(struct inode *))
 {
 	int error = 0;
 	struct inode * inode = NULL;
@@ -292,7 +292,7 @@
 	return bdi_init(&configfs_backing_dev_info);
 }
 
-void __exit configfs_inode_exit(void)
+void configfs_inode_exit(void)
 {
 	bdi_destroy(&configfs_backing_dev_info);
 }
diff --git a/fs/configfs/mount.c b/fs/configfs/mount.c
index ecc6217..276e15c 100644
--- a/fs/configfs/mount.c
+++ b/fs/configfs/mount.c
@@ -143,28 +143,26 @@
 		goto out;
 
 	config_kobj = kobject_create_and_add("config", kernel_kobj);
-	if (!config_kobj) {
-		kmem_cache_destroy(configfs_dir_cachep);
-		configfs_dir_cachep = NULL;
-		goto out;
-	}
-
-	err = register_filesystem(&configfs_fs_type);
-	if (err) {
-		printk(KERN_ERR "configfs: Unable to register filesystem!\n");
-		kobject_put(config_kobj);
-		kmem_cache_destroy(configfs_dir_cachep);
-		configfs_dir_cachep = NULL;
-		goto out;
-	}
+	if (!config_kobj)
+		goto out2;
 
 	err = configfs_inode_init();
-	if (err) {
-		unregister_filesystem(&configfs_fs_type);
-		kobject_put(config_kobj);
-		kmem_cache_destroy(configfs_dir_cachep);
-		configfs_dir_cachep = NULL;
-	}
+	if (err)
+		goto out3;
+
+	err = register_filesystem(&configfs_fs_type);
+	if (err)
+		goto out4;
+
+	return 0;
+out4:
+	printk(KERN_ERR "configfs: Unable to register filesystem!\n");
+	configfs_inode_exit();
+out3:
+	kobject_put(config_kobj);
+out2:
+	kmem_cache_destroy(configfs_dir_cachep);
+	configfs_dir_cachep = NULL;
 out:
 	return err;
 }
diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
index 739fb59..a2ee8f9 100644
--- a/fs/cramfs/inode.c
+++ b/fs/cramfs/inode.c
@@ -20,7 +20,6 @@
 #include <linux/cramfs_fs.h>
 #include <linux/slab.h>
 #include <linux/cramfs_fs_sb.h>
-#include <linux/buffer_head.h>
 #include <linux/vfs.h>
 #include <linux/mutex.h>
 
@@ -378,7 +377,7 @@
 		unsigned long nextoffset;
 		char *name;
 		ino_t ino;
-		mode_t mode;
+		umode_t mode;
 		int namelen, error;
 
 		mutex_lock(&read_mutex);
diff --git a/fs/dcache.c b/fs/dcache.c
index 89509b5..9791b1e 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -38,6 +38,7 @@
 #include <linux/prefetch.h>
 #include <linux/ratelimit.h>
 #include "internal.h"
+#include "mount.h"
 
 /*
  * Usage:
@@ -2451,6 +2452,7 @@
 {
 	struct dentry *dentry = path->dentry;
 	struct vfsmount *vfsmnt = path->mnt;
+	struct mount *mnt = real_mount(vfsmnt);
 	bool slash = false;
 	int error = 0;
 
@@ -2460,11 +2462,11 @@
 
 		if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
 			/* Global root? */
-			if (vfsmnt->mnt_parent == vfsmnt) {
+			if (!mnt_has_parent(mnt))
 				goto global_root;
-			}
-			dentry = vfsmnt->mnt_mountpoint;
-			vfsmnt = vfsmnt->mnt_parent;
+			dentry = mnt->mnt_mountpoint;
+			mnt = mnt->mnt_parent;
+			vfsmnt = &mnt->mnt;
 			continue;
 		}
 		parent = dentry->d_parent;
@@ -2501,7 +2503,7 @@
 	if (!slash)
 		error = prepend(buffer, buflen, "/", 1);
 	if (!error)
-		error = vfsmnt->mnt_ns ? 1 : 2;
+		error = real_mount(vfsmnt)->mnt_ns ? 1 : 2;
 	goto out;
 }
 
@@ -2853,31 +2855,6 @@
 	return result;
 }
 
-int path_is_under(struct path *path1, struct path *path2)
-{
-	struct vfsmount *mnt = path1->mnt;
-	struct dentry *dentry = path1->dentry;
-	int res;
-
-	br_read_lock(vfsmount_lock);
-	if (mnt != path2->mnt) {
-		for (;;) {
-			if (mnt->mnt_parent == mnt) {
-				br_read_unlock(vfsmount_lock);
-				return 0;
-			}
-			if (mnt->mnt_parent == path2->mnt)
-				break;
-			mnt = mnt->mnt_parent;
-		}
-		dentry = mnt->mnt_mountpoint;
-	}
-	res = is_subdir(dentry, path2->dentry);
-	br_read_unlock(vfsmount_lock);
-	return res;
-}
-EXPORT_SYMBOL(path_is_under);
-
 void d_genocide(struct dentry *root)
 {
 	struct dentry *this_parent;
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
index 90f7657..f65d445 100644
--- a/fs/debugfs/file.c
+++ b/fs/debugfs/file.c
@@ -15,9 +15,11 @@
 
 #include <linux/module.h>
 #include <linux/fs.h>
+#include <linux/seq_file.h>
 #include <linux/pagemap.h>
 #include <linux/namei.h>
 #include <linux/debugfs.h>
+#include <linux/io.h>
 
 static ssize_t default_read_file(struct file *file, char __user *buf,
 				 size_t count, loff_t *ppos)
@@ -95,7 +97,7 @@
  * %NULL or !%NULL instead as to eliminate the need for #ifdef in the calling
  * code.
  */
-struct dentry *debugfs_create_u8(const char *name, mode_t mode,
+struct dentry *debugfs_create_u8(const char *name, umode_t mode,
 				 struct dentry *parent, u8 *value)
 {
 	/* if there are no write bits set, make read only */
@@ -147,7 +149,7 @@
  * %NULL or !%NULL instead as to eliminate the need for #ifdef in the calling
  * code.
  */
-struct dentry *debugfs_create_u16(const char *name, mode_t mode,
+struct dentry *debugfs_create_u16(const char *name, umode_t mode,
 				  struct dentry *parent, u16 *value)
 {
 	/* if there are no write bits set, make read only */
@@ -199,7 +201,7 @@
  * %NULL or !%NULL instead as to eliminate the need for #ifdef in the calling
  * code.
  */
-struct dentry *debugfs_create_u32(const char *name, mode_t mode,
+struct dentry *debugfs_create_u32(const char *name, umode_t mode,
 				 struct dentry *parent, u32 *value)
 {
 	/* if there are no write bits set, make read only */
@@ -252,7 +254,7 @@
  * %NULL or !%NULL instead as to eliminate the need for #ifdef in the calling
  * code.
  */
-struct dentry *debugfs_create_u64(const char *name, mode_t mode,
+struct dentry *debugfs_create_u64(const char *name, umode_t mode,
 				 struct dentry *parent, u64 *value)
 {
 	/* if there are no write bits set, make read only */
@@ -298,7 +300,7 @@
  * @value: a pointer to the variable that the file should read to and write
  *         from.
  */
-struct dentry *debugfs_create_x8(const char *name, mode_t mode,
+struct dentry *debugfs_create_x8(const char *name, umode_t mode,
 				 struct dentry *parent, u8 *value)
 {
 	/* if there are no write bits set, make read only */
@@ -322,7 +324,7 @@
  * @value: a pointer to the variable that the file should read to and write
  *         from.
  */
-struct dentry *debugfs_create_x16(const char *name, mode_t mode,
+struct dentry *debugfs_create_x16(const char *name, umode_t mode,
 				 struct dentry *parent, u16 *value)
 {
 	/* if there are no write bits set, make read only */
@@ -346,7 +348,7 @@
  * @value: a pointer to the variable that the file should read to and write
  *         from.
  */
-struct dentry *debugfs_create_x32(const char *name, mode_t mode,
+struct dentry *debugfs_create_x32(const char *name, umode_t mode,
 				 struct dentry *parent, u32 *value)
 {
 	/* if there are no write bits set, make read only */
@@ -370,7 +372,7 @@
  * @value: a pointer to the variable that the file should read to and write
  *         from.
  */
-struct dentry *debugfs_create_x64(const char *name, mode_t mode,
+struct dentry *debugfs_create_x64(const char *name, umode_t mode,
 				 struct dentry *parent, u64 *value)
 {
 	return debugfs_create_file(name, mode, parent, value, &fops_x64);
@@ -401,7 +403,7 @@
  * @value: a pointer to the variable that the file should read to and write
  *         from.
  */
-struct dentry *debugfs_create_size_t(const char *name, mode_t mode,
+struct dentry *debugfs_create_size_t(const char *name, umode_t mode,
 				     struct dentry *parent, size_t *value)
 {
 	return debugfs_create_file(name, mode, parent, value, &fops_size_t);
@@ -473,7 +475,7 @@
  * %NULL or !%NULL instead as to eliminate the need for #ifdef in the calling
  * code.
  */
-struct dentry *debugfs_create_bool(const char *name, mode_t mode,
+struct dentry *debugfs_create_bool(const char *name, umode_t mode,
 				   struct dentry *parent, u32 *value)
 {
 	return debugfs_create_file(name, mode, parent, value, &fops_bool);
@@ -518,10 +520,103 @@
  * %NULL or !%NULL instead as to eliminate the need for #ifdef in the calling
  * code.
  */
-struct dentry *debugfs_create_blob(const char *name, mode_t mode,
+struct dentry *debugfs_create_blob(const char *name, umode_t mode,
 				   struct dentry *parent,
 				   struct debugfs_blob_wrapper *blob)
 {
 	return debugfs_create_file(name, mode, parent, blob, &fops_blob);
 }
 EXPORT_SYMBOL_GPL(debugfs_create_blob);
+
+#ifdef CONFIG_HAS_IOMEM
+
+/*
+ * The regset32 stuff is used to print 32-bit registers using the
+ * seq_file utilities. We offer printing a register set in an already-opened
+ * sequential file or create a debugfs file that only prints a regset32.
+ */
+
+/**
+ * debugfs_print_regs32 - use seq_print to describe a set of registers
+ * @s: the seq_file structure being used to generate output
+ * @regs: an array if struct debugfs_reg32 structures
+ * @mregs: the length of the above array
+ * @base: the base address to be used in reading the registers
+ * @prefix: a string to be prefixed to every output line
+ *
+ * This function outputs a text block describing the current values of
+ * some 32-bit hardware registers. It is meant to be used within debugfs
+ * files based on seq_file that need to show registers, intermixed with other
+ * information. The prefix argument may be used to specify a leading string,
+ * because some peripherals have several blocks of identical registers,
+ * for example configuration of dma channels
+ */
+int debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs,
+			   int nregs, void __iomem *base, char *prefix)
+{
+	int i, ret = 0;
+
+	for (i = 0; i < nregs; i++, regs++) {
+		if (prefix)
+			ret += seq_printf(s, "%s", prefix);
+		ret += seq_printf(s, "%s = 0x%08x\n", regs->name,
+				  readl(base + regs->offset));
+	}
+	return ret;
+}
+EXPORT_SYMBOL_GPL(debugfs_print_regs32);
+
+static int debugfs_show_regset32(struct seq_file *s, void *data)
+{
+	struct debugfs_regset32 *regset = s->private;
+
+	debugfs_print_regs32(s, regset->regs, regset->nregs, regset->base, "");
+	return 0;
+}
+
+static int debugfs_open_regset32(struct inode *inode, struct file *file)
+{
+	return single_open(file, debugfs_show_regset32, inode->i_private);
+}
+
+static const struct file_operations fops_regset32 = {
+	.open =		debugfs_open_regset32,
+	.read =		seq_read,
+	.llseek =	seq_lseek,
+	.release =	single_release,
+};
+
+/**
+ * debugfs_create_regset32 - create a debugfs file that returns register values
+ * @name: a pointer to a string containing the name of the file to create.
+ * @mode: the permission that the file should have
+ * @parent: a pointer to the parent dentry for this file.  This should be a
+ *          directory dentry if set.  If this parameter is %NULL, then the
+ *          file will be created in the root of the debugfs filesystem.
+ * @regset: a pointer to a struct debugfs_regset32, which contains a pointer
+ *          to an array of register definitions, the array size and the base
+ *          address where the register bank is to be found.
+ *
+ * This function creates a file in debugfs with the given name that reports
+ * the names and values of a set of 32-bit registers. If the @mode variable
+ * is so set it can be read from. Writing is not supported.
+ *
+ * This function will return a pointer to a dentry if it succeeds.  This
+ * pointer must be passed to the debugfs_remove() function when the file is
+ * to be removed (no automatic cleanup happens if your module is unloaded,
+ * you are responsible here.)  If an error occurs, %NULL will be returned.
+ *
+ * If debugfs is not enabled in the kernel, the value -%ENODEV will be
+ * returned.  It is not wise to check for this value, but rather, check for
+ * %NULL or !%NULL instead as to eliminate the need for #ifdef in the calling
+ * code.
+ */
+struct dentry *debugfs_create_regset32(const char *name, mode_t mode,
+				       struct dentry *parent,
+				       struct debugfs_regset32 *regset)
+{
+	return debugfs_create_file(name, mode, parent, regset, &fops_regset32);
+}
+EXPORT_SYMBOL_GPL(debugfs_create_regset32);
+
+#endif /* CONFIG_HAS_IOMEM */
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index f3a257d..956d5dd 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -30,7 +30,7 @@
 static int debugfs_mount_count;
 static bool debugfs_registered;
 
-static struct inode *debugfs_get_inode(struct super_block *sb, int mode, dev_t dev,
+static struct inode *debugfs_get_inode(struct super_block *sb, umode_t mode, dev_t dev,
 				       void *data, const struct file_operations *fops)
 
 {
@@ -69,7 +69,7 @@
 
 /* SMP-safe */
 static int debugfs_mknod(struct inode *dir, struct dentry *dentry,
-			 int mode, dev_t dev, void *data,
+			 umode_t mode, dev_t dev, void *data,
 			 const struct file_operations *fops)
 {
 	struct inode *inode;
@@ -87,7 +87,7 @@
 	return error;
 }
 
-static int debugfs_mkdir(struct inode *dir, struct dentry *dentry, int mode,
+static int debugfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode,
 			 void *data, const struct file_operations *fops)
 {
 	int res;
@@ -101,14 +101,14 @@
 	return res;
 }
 
-static int debugfs_link(struct inode *dir, struct dentry *dentry, int mode,
+static int debugfs_link(struct inode *dir, struct dentry *dentry, umode_t mode,
 			void *data, const struct file_operations *fops)
 {
 	mode = (mode & S_IALLUGO) | S_IFLNK;
 	return debugfs_mknod(dir, dentry, mode, 0, data, fops);
 }
 
-static int debugfs_create(struct inode *dir, struct dentry *dentry, int mode,
+static int debugfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
 			  void *data, const struct file_operations *fops)
 {
 	int res;
@@ -146,7 +146,7 @@
 	.kill_sb =	kill_litter_super,
 };
 
-static int debugfs_create_by_name(const char *name, mode_t mode,
+static int debugfs_create_by_name(const char *name, umode_t mode,
 				  struct dentry *parent,
 				  struct dentry **dentry,
 				  void *data,
@@ -160,7 +160,7 @@
 	 * have around.
 	 */
 	if (!parent)
-		parent = debugfs_mount->mnt_sb->s_root;
+		parent = debugfs_mount->mnt_root;
 
 	*dentry = NULL;
 	mutex_lock(&parent->d_inode->i_mutex);
@@ -214,7 +214,7 @@
  * If debugfs is not enabled in the kernel, the value -%ENODEV will be
  * returned.
  */
-struct dentry *debugfs_create_file(const char *name, mode_t mode,
+struct dentry *debugfs_create_file(const char *name, umode_t mode,
 				   struct dentry *parent, void *data,
 				   const struct file_operations *fops)
 {
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index d5d5297..c4e2a58 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -246,9 +246,9 @@
 	return err;
 }
 
-static int devpts_show_options(struct seq_file *seq, struct vfsmount *vfs)
+static int devpts_show_options(struct seq_file *seq, struct dentry *root)
 {
-	struct pts_fs_info *fsi = DEVPTS_SB(vfs->mnt_sb);
+	struct pts_fs_info *fsi = DEVPTS_SB(root->d_sb);
 	struct pts_mount_opts *opts = &fsi->mount_opts;
 
 	if (opts->setuid)
@@ -301,7 +301,7 @@
 
 	inode = new_inode(s);
 	if (!inode)
-		goto free_fsi;
+		goto fail;
 	inode->i_ino = 1;
 	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
 	inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR;
@@ -316,8 +316,6 @@
 	printk(KERN_ERR "devpts: get root dentry failed\n");
 	iput(inode);
 
-free_fsi:
-	kfree(s->s_fs_info);
 fail:
 	return -ENOMEM;
 }
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index 990626e..0b3109e 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -281,7 +281,7 @@
 	} else {
 		struct sockaddr_in6 *in6  = (struct sockaddr_in6 *) &addr;
 		struct sockaddr_in6 *ret6 = (struct sockaddr_in6 *) retaddr;
-		ipv6_addr_copy(&ret6->sin6_addr, &in6->sin6_addr);
+		ret6->sin6_addr = in6->sin6_addr;
 	}
 
 	return 0;
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index 32f90a3a..19a8ca4 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -144,24 +144,6 @@
 }
 
 /**
- * ecryptfs_create_underlying_file
- * @lower_dir_inode: inode of the parent in the lower fs of the new file
- * @dentry: New file's dentry
- * @mode: The mode of the new file
- *
- * Creates the file in the lower file system.
- *
- * Returns zero on success; non-zero on error condition
- */
-static int
-ecryptfs_create_underlying_file(struct inode *lower_dir_inode,
-				struct dentry *dentry, int mode)
-{
-	struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
-	return vfs_create(lower_dir_inode, lower_dentry, mode, NULL);
-}
-
-/**
  * ecryptfs_do_create
  * @directory_inode: inode of the new file's dentry's parent in ecryptfs
  * @ecryptfs_dentry: New file's dentry in ecryptfs
@@ -176,7 +158,7 @@
  */
 static struct inode *
 ecryptfs_do_create(struct inode *directory_inode,
-		   struct dentry *ecryptfs_dentry, int mode)
+		   struct dentry *ecryptfs_dentry, umode_t mode)
 {
 	int rc;
 	struct dentry *lower_dentry;
@@ -191,8 +173,7 @@
 		inode = ERR_CAST(lower_dir_dentry);
 		goto out;
 	}
-	rc = ecryptfs_create_underlying_file(lower_dir_dentry->d_inode,
-					     ecryptfs_dentry, mode);
+	rc = vfs_create(lower_dir_dentry->d_inode, lower_dentry, mode, NULL);
 	if (rc) {
 		printk(KERN_ERR "%s: Failure to create dentry in lower fs; "
 		       "rc = [%d]\n", __func__, rc);
@@ -267,7 +248,7 @@
  */
 static int
 ecryptfs_create(struct inode *directory_inode, struct dentry *ecryptfs_dentry,
-		int mode, struct nameidata *nd)
+		umode_t mode, struct nameidata *nd)
 {
 	struct inode *ecryptfs_inode;
 	int rc;
@@ -559,7 +540,7 @@
 	return rc;
 }
 
-static int ecryptfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+static int ecryptfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 {
 	int rc;
 	struct dentry *lower_dentry;
@@ -607,7 +588,7 @@
 }
 
 static int
-ecryptfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
+ecryptfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
 {
 	int rc;
 	struct dentry *lower_dentry;
diff --git a/fs/ecryptfs/super.c b/fs/ecryptfs/super.c
index dbd52d40..9df7fd6 100644
--- a/fs/ecryptfs/super.c
+++ b/fs/ecryptfs/super.c
@@ -69,7 +69,6 @@
 	struct ecryptfs_inode_info *inode_info;
 	inode_info = ecryptfs_inode_to_private(inode);
 
-	INIT_LIST_HEAD(&inode->i_dentry);
 	kmem_cache_free(ecryptfs_inode_info_cache, inode_info);
 }
 
@@ -132,9 +131,9 @@
  * Prints the mount options for a given superblock.
  * Returns zero; does not fail.
  */
-static int ecryptfs_show_options(struct seq_file *m, struct vfsmount *mnt)
+static int ecryptfs_show_options(struct seq_file *m, struct dentry *root)
 {
-	struct super_block *sb = mnt->mnt_sb;
+	struct super_block *sb = root->d_sb;
 	struct ecryptfs_mount_crypt_stat *mount_crypt_stat =
 		&ecryptfs_superblock_to_private(sb)->mount_crypt_stat;
 	struct ecryptfs_global_auth_tok *walker;
diff --git a/fs/efs/super.c b/fs/efs/super.c
index 0f31acb..9811064 100644
--- a/fs/efs/super.c
+++ b/fs/efs/super.c
@@ -68,7 +68,6 @@
 static void efs_i_callback(struct rcu_head *head)
 {
 	struct inode *inode = container_of(head, struct inode, i_rcu);
-	INIT_LIST_HEAD(&inode->i_dentry);
 	kmem_cache_free(efs_inode_cachep, INODE_INFO(inode));
 }
 
diff --git a/fs/exec.c b/fs/exec.c
index 3625464..3f64b9f 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1225,7 +1225,7 @@
  * - the caller must hold ->cred_guard_mutex to protect against
  *   PTRACE_ATTACH
  */
-int check_unsafe_exec(struct linux_binprm *bprm)
+static int check_unsafe_exec(struct linux_binprm *bprm)
 {
 	struct task_struct *p = current, *t;
 	unsigned n_fs;
diff --git a/fs/exofs/dir.c b/fs/exofs/dir.c
index d0941c6..8040583 100644
--- a/fs/exofs/dir.c
+++ b/fs/exofs/dir.c
@@ -234,7 +234,7 @@
 static inline
 void exofs_set_de_type(struct exofs_dir_entry *de, struct inode *inode)
 {
-	mode_t mode = inode->i_mode;
+	umode_t mode = inode->i_mode;
 	de->file_type = exofs_type_by_mode[(mode & S_IFMT) >> S_SHIFT];
 }
 
diff --git a/fs/exofs/exofs.h b/fs/exofs/exofs.h
index 51f4b4c..ca9d496 100644
--- a/fs/exofs/exofs.h
+++ b/fs/exofs/exofs.h
@@ -154,7 +154,7 @@
 		loff_t pos, unsigned len, unsigned flags,
 		struct page **pagep, void **fsdata);
 extern struct inode *exofs_iget(struct super_block *, unsigned long);
-struct inode *exofs_new_inode(struct inode *, int);
+struct inode *exofs_new_inode(struct inode *, umode_t);
 extern int exofs_write_inode(struct inode *, struct writeback_control *wbc);
 extern void exofs_evict_inode(struct inode *);
 
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c
index f6dbf77..ea5e1f9 100644
--- a/fs/exofs/inode.c
+++ b/fs/exofs/inode.c
@@ -1276,7 +1276,7 @@
 /*
  * Set up a new inode and create an object for it on the OSD
  */
-struct inode *exofs_new_inode(struct inode *dir, int mode)
+struct inode *exofs_new_inode(struct inode *dir, umode_t mode)
 {
 	struct super_block *sb = dir->i_sb;
 	struct exofs_sb_info *sbi = sb->s_fs_info;
diff --git a/fs/exofs/namei.c b/fs/exofs/namei.c
index b54c437..9dbf0c3 100644
--- a/fs/exofs/namei.c
+++ b/fs/exofs/namei.c
@@ -59,7 +59,7 @@
 	return d_splice_alias(inode, dentry);
 }
 
-static int exofs_create(struct inode *dir, struct dentry *dentry, int mode,
+static int exofs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
 			 struct nameidata *nd)
 {
 	struct inode *inode = exofs_new_inode(dir, mode);
@@ -74,7 +74,7 @@
 	return err;
 }
 
-static int exofs_mknod(struct inode *dir, struct dentry *dentry, int mode,
+static int exofs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
 		       dev_t rdev)
 {
 	struct inode *inode;
@@ -153,7 +153,7 @@
 	return exofs_add_nondir(dentry, inode);
 }
 
-static int exofs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+static int exofs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 {
 	struct inode *inode;
 	int err = -EMLINK;
diff --git a/fs/exofs/super.c b/fs/exofs/super.c
index e6085ec..d22cd16 100644
--- a/fs/exofs/super.c
+++ b/fs/exofs/super.c
@@ -166,7 +166,6 @@
 static void exofs_i_callback(struct rcu_head *head)
 {
 	struct inode *inode = container_of(head, struct inode, i_rcu);
-	INIT_LIST_HEAD(&inode->i_dentry);
 	kmem_cache_free(exofs_inode_cachep, exofs_i(inode));
 }
 
@@ -839,6 +838,8 @@
 	ret = bdi_setup_and_register(&sbi->bdi, "exofs", BDI_CAP_MAP_COPY);
 	if (ret) {
 		EXOFS_DBGMSG("Failed to bdi_setup_and_register\n");
+		dput(sb->s_root);
+		sb->s_root = NULL;
 		goto free_sbi;
 	}
 
diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c
index 47cda41..d37df35 100644
--- a/fs/ext2/dir.c
+++ b/fs/ext2/dir.c
@@ -279,7 +279,7 @@
 
 static inline void ext2_set_de_type(ext2_dirent *de, struct inode *inode)
 {
-	mode_t mode = inode->i_mode;
+	umode_t mode = inode->i_mode;
 	if (EXT2_HAS_INCOMPAT_FEATURE(inode->i_sb, EXT2_FEATURE_INCOMPAT_FILETYPE))
 		de->file_type = ext2_type_by_mode[(mode & S_IFMT)>>S_SHIFT];
 	else
diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h
index 9a4e5e20..75ad433 100644
--- a/fs/ext2/ext2.h
+++ b/fs/ext2/ext2.h
@@ -110,7 +110,7 @@
 extern void ext2_set_link(struct inode *, struct ext2_dir_entry_2 *, struct page *, struct inode *, int);
 
 /* ialloc.c */
-extern struct inode * ext2_new_inode (struct inode *, int, const struct qstr *);
+extern struct inode * ext2_new_inode (struct inode *, umode_t, const struct qstr *);
 extern void ext2_free_inode (struct inode *);
 extern unsigned long ext2_count_free_inodes (struct super_block *);
 extern void ext2_check_inodes_bitmap (struct super_block *);
diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c
index c4e81df..cd7f5f4 100644
--- a/fs/ext2/ialloc.c
+++ b/fs/ext2/ialloc.c
@@ -429,7 +429,7 @@
 	return group;
 }
 
-struct inode *ext2_new_inode(struct inode *dir, int mode,
+struct inode *ext2_new_inode(struct inode *dir, umode_t mode,
 			     const struct qstr *qstr)
 {
 	struct super_block *sb;
diff --git a/fs/ext2/ioctl.c b/fs/ext2/ioctl.c
index f81e250..1089f76 100644
--- a/fs/ext2/ioctl.c
+++ b/fs/ext2/ioctl.c
@@ -35,7 +35,7 @@
 	case EXT2_IOC_SETFLAGS: {
 		unsigned int oldflags;
 
-		ret = mnt_want_write(filp->f_path.mnt);
+		ret = mnt_want_write_file(filp);
 		if (ret)
 			return ret;
 
@@ -83,7 +83,7 @@
 		inode->i_ctime = CURRENT_TIME_SEC;
 		mark_inode_dirty(inode);
 setflags_out:
-		mnt_drop_write(filp->f_path.mnt);
+		mnt_drop_write_file(filp);
 		return ret;
 	}
 	case EXT2_IOC_GETVERSION:
@@ -91,7 +91,7 @@
 	case EXT2_IOC_SETVERSION:
 		if (!inode_owner_or_capable(inode))
 			return -EPERM;
-		ret = mnt_want_write(filp->f_path.mnt);
+		ret = mnt_want_write_file(filp);
 		if (ret)
 			return ret;
 		if (get_user(inode->i_generation, (int __user *) arg)) {
@@ -100,7 +100,7 @@
 			inode->i_ctime = CURRENT_TIME_SEC;
 			mark_inode_dirty(inode);
 		}
-		mnt_drop_write(filp->f_path.mnt);
+		mnt_drop_write_file(filp);
 		return ret;
 	case EXT2_IOC_GETRSVSZ:
 		if (test_opt(inode->i_sb, RESERVATION)
@@ -121,7 +121,7 @@
 		if (get_user(rsv_window_size, (int __user *)arg))
 			return -EFAULT;
 
-		ret = mnt_want_write(filp->f_path.mnt);
+		ret = mnt_want_write_file(filp);
 		if (ret)
 			return ret;
 
@@ -145,7 +145,7 @@
 			rsv->rsv_goal_size = rsv_window_size;
 		}
 		mutex_unlock(&ei->truncate_mutex);
-		mnt_drop_write(filp->f_path.mnt);
+		mnt_drop_write_file(filp);
 		return 0;
 	}
 	default:
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c
index 761fde8..0804198 100644
--- a/fs/ext2/namei.c
+++ b/fs/ext2/namei.c
@@ -94,7 +94,7 @@
  * If the create succeeds, we fill in the inode information
  * with d_instantiate(). 
  */
-static int ext2_create (struct inode * dir, struct dentry * dentry, int mode, struct nameidata *nd)
+static int ext2_create (struct inode * dir, struct dentry * dentry, umode_t mode, struct nameidata *nd)
 {
 	struct inode *inode;
 
@@ -119,7 +119,7 @@
 	return ext2_add_nondir(dentry, inode);
 }
 
-static int ext2_mknod (struct inode * dir, struct dentry *dentry, int mode, dev_t rdev)
+static int ext2_mknod (struct inode * dir, struct dentry *dentry, umode_t mode, dev_t rdev)
 {
 	struct inode * inode;
 	int err;
@@ -214,7 +214,7 @@
 	return err;
 }
 
-static int ext2_mkdir(struct inode * dir, struct dentry * dentry, int mode)
+static int ext2_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode)
 {
 	struct inode * inode;
 	int err = -EMLINK;
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index bd8ac16..9b403f0 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -173,7 +173,6 @@
 static void ext2_i_callback(struct rcu_head *head)
 {
 	struct inode *inode = container_of(head, struct inode, i_rcu);
-	INIT_LIST_HEAD(&inode->i_dentry);
 	kmem_cache_free(ext2_inode_cachep, EXT2_I(inode));
 }
 
@@ -211,9 +210,9 @@
 	kmem_cache_destroy(ext2_inode_cachep);
 }
 
-static int ext2_show_options(struct seq_file *seq, struct vfsmount *vfs)
+static int ext2_show_options(struct seq_file *seq, struct dentry *root)
 {
-	struct super_block *sb = vfs->mnt_sb;
+	struct super_block *sb = root->d_sb;
 	struct ext2_sb_info *sbi = EXT2_SB(sb);
 	struct ext2_super_block *es = sbi->s_es;
 	unsigned long def_mount_opts;
diff --git a/fs/ext3/ialloc.c b/fs/ext3/ialloc.c
index 5c866e0..92cc86d 100644
--- a/fs/ext3/ialloc.c
+++ b/fs/ext3/ialloc.c
@@ -371,7 +371,7 @@
  * group to find a free inode.
  */
 struct inode *ext3_new_inode(handle_t *handle, struct inode * dir,
-			     const struct qstr *qstr, int mode)
+			     const struct qstr *qstr, umode_t mode)
 {
 	struct super_block *sb;
 	struct buffer_head *bitmap_bh = NULL;
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index 85fe655..15cb470 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -2490,7 +2490,7 @@
  * transaction, and VFS/VM ensures that ext3_truncate() cannot run
  * simultaneously on behalf of the same inode.
  *
- * As we work through the truncate and commmit bits of it to the journal there
+ * As we work through the truncate and commit bits of it to the journal there
  * is one core, guiding principle: the file's tree must always be consistent on
  * disk.  We must be able to restart the truncate after a crash.
  *
diff --git a/fs/ext3/ioctl.c b/fs/ext3/ioctl.c
index ba1b54e..8e37c41 100644
--- a/fs/ext3/ioctl.c
+++ b/fs/ext3/ioctl.c
@@ -44,7 +44,7 @@
 		if (get_user(flags, (int __user *) arg))
 			return -EFAULT;
 
-		err = mnt_want_write(filp->f_path.mnt);
+		err = mnt_want_write_file(filp);
 		if (err)
 			return err;
 
@@ -110,7 +110,7 @@
 			err = ext3_change_inode_journal_flag(inode, jflag);
 flags_out:
 		mutex_unlock(&inode->i_mutex);
-		mnt_drop_write(filp->f_path.mnt);
+		mnt_drop_write_file(filp);
 		return err;
 	}
 	case EXT3_IOC_GETVERSION:
@@ -126,7 +126,7 @@
 		if (!inode_owner_or_capable(inode))
 			return -EPERM;
 
-		err = mnt_want_write(filp->f_path.mnt);
+		err = mnt_want_write_file(filp);
 		if (err)
 			return err;
 		if (get_user(generation, (int __user *) arg)) {
@@ -147,7 +147,7 @@
 		}
 		ext3_journal_stop(handle);
 setversion_out:
-		mnt_drop_write(filp->f_path.mnt);
+		mnt_drop_write_file(filp);
 		return err;
 	}
 	case EXT3_IOC_GETRSVSZ:
@@ -164,7 +164,7 @@
 		if (!test_opt(inode->i_sb, RESERVATION) ||!S_ISREG(inode->i_mode))
 			return -ENOTTY;
 
-		err = mnt_want_write(filp->f_path.mnt);
+		err = mnt_want_write_file(filp);
 		if (err)
 			return err;
 
@@ -195,7 +195,7 @@
 		}
 		mutex_unlock(&ei->truncate_mutex);
 setrsvsz_out:
-		mnt_drop_write(filp->f_path.mnt);
+		mnt_drop_write_file(filp);
 		return err;
 	}
 	case EXT3_IOC_GROUP_EXTEND: {
@@ -206,7 +206,7 @@
 		if (!capable(CAP_SYS_RESOURCE))
 			return -EPERM;
 
-		err = mnt_want_write(filp->f_path.mnt);
+		err = mnt_want_write_file(filp);
 		if (err)
 			return err;
 
@@ -221,7 +221,7 @@
 		if (err == 0)
 			err = err2;
 group_extend_out:
-		mnt_drop_write(filp->f_path.mnt);
+		mnt_drop_write_file(filp);
 		return err;
 	}
 	case EXT3_IOC_GROUP_ADD: {
@@ -232,7 +232,7 @@
 		if (!capable(CAP_SYS_RESOURCE))
 			return -EPERM;
 
-		err = mnt_want_write(filp->f_path.mnt);
+		err = mnt_want_write_file(filp);
 		if (err)
 			return err;
 
@@ -249,7 +249,7 @@
 		if (err == 0)
 			err = err2;
 group_add_out:
-		mnt_drop_write(filp->f_path.mnt);
+		mnt_drop_write_file(filp);
 		return err;
 	}
 	case FITRIM: {
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
index 642dc6d..d269821 100644
--- a/fs/ext3/namei.c
+++ b/fs/ext3/namei.c
@@ -1698,7 +1698,7 @@
  * If the create succeeds, we fill in the inode information
  * with d_instantiate().
  */
-static int ext3_create (struct inode * dir, struct dentry * dentry, int mode,
+static int ext3_create (struct inode * dir, struct dentry * dentry, umode_t mode,
 		struct nameidata *nd)
 {
 	handle_t *handle;
@@ -1732,7 +1732,7 @@
 }
 
 static int ext3_mknod (struct inode * dir, struct dentry *dentry,
-			int mode, dev_t rdev)
+			umode_t mode, dev_t rdev)
 {
 	handle_t *handle;
 	struct inode *inode;
@@ -1768,7 +1768,7 @@
 	return err;
 }
 
-static int ext3_mkdir(struct inode * dir, struct dentry * dentry, int mode)
+static int ext3_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode)
 {
 	handle_t *handle;
 	struct inode * inode;
@@ -2272,7 +2272,7 @@
 			err = PTR_ERR(handle);
 			goto err_drop_inode;
 		}
-		inc_nlink(inode);
+		set_nlink(inode, 1);
 		err = ext3_orphan_del(handle, inode);
 		if (err) {
 			ext3_journal_stop(handle);
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 922d289..3a10b88 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -511,7 +511,6 @@
 static void ext3_i_callback(struct rcu_head *head)
 {
 	struct inode *inode = container_of(head, struct inode, i_rcu);
-	INIT_LIST_HEAD(&inode->i_dentry);
 	kmem_cache_free(ext3_inode_cachep, EXT3_I(inode));
 }
 
@@ -611,9 +610,9 @@
  *  - it's set to a non-default value OR
  *  - if the per-sb default is different from the global default
  */
-static int ext3_show_options(struct seq_file *seq, struct vfsmount *vfs)
+static int ext3_show_options(struct seq_file *seq, struct dentry *root)
 {
-	struct super_block *sb = vfs->mnt_sb;
+	struct super_block *sb = root->d_sb;
 	struct ext3_sb_info *sbi = EXT3_SB(sb);
 	struct ext3_super_block *es = sbi->s_es;
 	unsigned long def_mount_opts;
@@ -2910,7 +2909,7 @@
 		return -EINVAL;
 
 	/* Quotafile not on the same filesystem? */
-	if (path->mnt->mnt_sb != sb)
+	if (path->dentry->d_sb != sb)
 		return -EXDEV;
 	/* Journaling quota? */
 	if (EXT3_SB(sb)->s_qf_names[type]) {
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 5b0e26a..1554b15 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -1819,7 +1819,7 @@
 			  dx_hash_info *hinfo);
 
 /* ialloc.c */
-extern struct inode *ext4_new_inode(handle_t *, struct inode *, int,
+extern struct inode *ext4_new_inode(handle_t *, struct inode *, umode_t,
 				    const struct qstr *qstr, __u32 goal,
 				    uid_t *owner);
 extern void ext4_free_inode(handle_t *, struct inode *);
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 61fa9e1..607b155 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -1095,7 +1095,7 @@
 		  le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block),
 		  ext4_idx_pblock(EXT_FIRST_INDEX(neh)));
 
-	neh->eh_depth = cpu_to_le16(neh->eh_depth + 1);
+	neh->eh_depth = cpu_to_le16(le16_to_cpu(neh->eh_depth) + 1);
 	ext4_mark_inode_dirty(handle, inode);
 out:
 	brelse(bh);
@@ -2955,7 +2955,6 @@
 	/* Pre-conditions */
 	BUG_ON(!ext4_ext_is_uninitialized(ex));
 	BUG_ON(!in_range(map->m_lblk, ee_block, ee_len));
-	BUG_ON(map->m_lblk + map->m_len > ee_block + ee_len);
 
 	/*
 	 * Attempt to transfer newly initialized blocks from the currently
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 00beb4f..4637af0 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -351,7 +351,7 @@
  */
 
 static int find_group_orlov(struct super_block *sb, struct inode *parent,
-			    ext4_group_t *group, int mode,
+			    ext4_group_t *group, umode_t mode,
 			    const struct qstr *qstr)
 {
 	ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
@@ -497,7 +497,7 @@
 }
 
 static int find_group_other(struct super_block *sb, struct inode *parent,
-			    ext4_group_t *group, int mode)
+			    ext4_group_t *group, umode_t mode)
 {
 	ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
 	ext4_group_t i, last, ngroups = ext4_get_groups_count(sb);
@@ -602,7 +602,7 @@
  */
 static int ext4_claim_inode(struct super_block *sb,
 			struct buffer_head *inode_bitmap_bh,
-			unsigned long ino, ext4_group_t group, int mode)
+			unsigned long ino, ext4_group_t group, umode_t mode)
 {
 	int free = 0, retval = 0, count;
 	struct ext4_sb_info *sbi = EXT4_SB(sb);
@@ -690,7 +690,7 @@
  * For other inodes, search forward from the parent directory's block
  * group to find a free inode.
  */
-struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, int mode,
+struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, umode_t mode,
 			     const struct qstr *qstr, __u32 goal, uid_t *owner)
 {
 	struct super_block *sb;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 848f436..7dbcc3e 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1339,8 +1339,11 @@
 					clear_buffer_unwritten(bh);
 				}
 
-				/* skip page if block allocation undone */
-				if (buffer_delay(bh) || buffer_unwritten(bh))
+				/*
+				 * skip page if block allocation undone and
+				 * block is dirty
+				 */
+				if (ext4_bh_delay_or_unwritten(NULL, bh))
 					skip_page = 1;
 				bh = bh->b_this_page;
 				block_start += bh->b_size;
@@ -1878,7 +1881,7 @@
  * a[0] = 'a';
  * truncate(f, 4096);
  * we have in the page first buffer_head mapped via page_mkwrite call back
- * but other bufer_heads would be unmapped but dirty(dirty done via the
+ * but other buffer_heads would be unmapped but dirty (dirty done via the
  * do_wp_page). So writepage should write the first block. If we modify
  * the mmap area beyond 1024 we will again get a page_fault and the
  * page_mkwrite callback will do the block allocation and mark the
@@ -2387,7 +2390,6 @@
 	pgoff_t index;
 	struct inode *inode = mapping->host;
 	handle_t *handle;
-	loff_t page_len;
 
 	index = pos >> PAGE_CACHE_SHIFT;
 
@@ -2434,13 +2436,6 @@
 		 */
 		if (pos + len > inode->i_size)
 			ext4_truncate_failed_write(inode);
-	} else {
-		page_len = pos & (PAGE_CACHE_SIZE - 1);
-		if (page_len > 0) {
-			ret = ext4_discard_partial_page_buffers_no_lock(handle,
-				inode, page, pos - page_len, page_len,
-				EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED);
-		}
 	}
 
 	if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
@@ -2483,7 +2478,6 @@
 	loff_t new_i_size;
 	unsigned long start, end;
 	int write_mode = (int)(unsigned long)fsdata;
-	loff_t page_len;
 
 	if (write_mode == FALL_BACK_TO_NONDELALLOC) {
 		if (ext4_should_order_data(inode)) {
@@ -2508,7 +2502,7 @@
 	 */
 
 	new_i_size = pos + copied;
-	if (new_i_size > EXT4_I(inode)->i_disksize) {
+	if (copied && new_i_size > EXT4_I(inode)->i_disksize) {
 		if (ext4_da_should_update_i_disksize(page, end)) {
 			down_write(&EXT4_I(inode)->i_data_sem);
 			if (new_i_size > EXT4_I(inode)->i_disksize) {
@@ -2532,16 +2526,6 @@
 	}
 	ret2 = generic_write_end(file, mapping, pos, len, copied,
 							page, fsdata);
-
-	page_len = PAGE_CACHE_SIZE -
-			((pos + copied - 1) & (PAGE_CACHE_SIZE - 1));
-
-	if (page_len > 0) {
-		ret = ext4_discard_partial_page_buffers_no_lock(handle,
-			inode, page, pos + copied - 1, page_len,
-			EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED);
-	}
-
 	copied = ret2;
 	if (ret2 < 0)
 		ret = ret2;
@@ -2781,10 +2765,11 @@
  		  iocb->private, io_end->inode->i_ino, iocb, offset,
 		  size);
 
+	iocb->private = NULL;
+
 	/* if not aio dio with unwritten extents, just free io and return */
 	if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
 		ext4_free_io_end(io_end);
-		iocb->private = NULL;
 out:
 		if (is_async)
 			aio_complete(iocb, ret, 0);
@@ -2807,7 +2792,6 @@
 	spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
 
 	/* queue the work to convert unwritten extents to written */
-	iocb->private = NULL;
 	queue_work(wq, &io_end->work);
 
 	/* XXX: probably should move into the real I/O completion handler */
@@ -3203,26 +3187,8 @@
 
 	iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
 
-	if (!page_has_buffers(page)) {
-		/*
-		 * If the range to be discarded covers a partial block
-		 * we need to get the page buffers.  This is because
-		 * partial blocks cannot be released and the page needs
-		 * to be updated with the contents of the block before
-		 * we write the zeros on top of it.
-		 */
-		if ((from & (blocksize - 1)) ||
-		    ((from + length) & (blocksize - 1))) {
-			create_empty_buffers(page, blocksize, 0);
-		} else {
-			/*
-			 * If there are no partial blocks,
-			 * there is nothing to update,
-			 * so we can return now
-			 */
-			return 0;
-		}
-	}
+	if (!page_has_buffers(page))
+		create_empty_buffers(page, blocksize, 0);
 
 	/* Find the buffer that contains "offset" */
 	bh = page_buffers(page);
@@ -3503,7 +3469,7 @@
  * transaction, and VFS/VM ensures that ext4_truncate() cannot run
  * simultaneously on behalf of the same inode.
  *
- * As we work through the truncate and commmit bits of it to the journal there
+ * As we work through the truncate and commit bits of it to the journal there
  * is one core, guiding principle: the file's tree must always be consistent on
  * disk.  We must be able to restart the truncate after a crash.
  *
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index a567968..d37b3bb 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -45,7 +45,7 @@
 		if (get_user(flags, (int __user *) arg))
 			return -EFAULT;
 
-		err = mnt_want_write(filp->f_path.mnt);
+		err = mnt_want_write_file(filp);
 		if (err)
 			return err;
 
@@ -134,7 +134,7 @@
 			err = ext4_ext_migrate(inode);
 flags_out:
 		mutex_unlock(&inode->i_mutex);
-		mnt_drop_write(filp->f_path.mnt);
+		mnt_drop_write_file(filp);
 		return err;
 	}
 	case EXT4_IOC_GETVERSION:
@@ -150,7 +150,7 @@
 		if (!inode_owner_or_capable(inode))
 			return -EPERM;
 
-		err = mnt_want_write(filp->f_path.mnt);
+		err = mnt_want_write_file(filp);
 		if (err)
 			return err;
 		if (get_user(generation, (int __user *) arg)) {
@@ -171,7 +171,7 @@
 		}
 		ext4_journal_stop(handle);
 setversion_out:
-		mnt_drop_write(filp->f_path.mnt);
+		mnt_drop_write_file(filp);
 		return err;
 	}
 	case EXT4_IOC_GROUP_EXTEND: {
@@ -192,7 +192,7 @@
 			return -EOPNOTSUPP;
 		}
 
-		err = mnt_want_write(filp->f_path.mnt);
+		err = mnt_want_write_file(filp);
 		if (err)
 			return err;
 
@@ -204,7 +204,7 @@
 		}
 		if (err == 0)
 			err = err2;
-		mnt_drop_write(filp->f_path.mnt);
+		mnt_drop_write_file(filp);
 		ext4_resize_end(sb);
 
 		return err;
@@ -240,13 +240,13 @@
 			return -EOPNOTSUPP;
 		}
 
-		err = mnt_want_write(filp->f_path.mnt);
+		err = mnt_want_write_file(filp);
 		if (err)
 			goto mext_out;
 
 		err = ext4_move_extents(filp, donor_filp, me.orig_start,
 					me.donor_start, me.len, &me.moved_len);
-		mnt_drop_write(filp->f_path.mnt);
+		mnt_drop_write_file(filp);
 		if (me.moved_len > 0)
 			file_remove_suid(donor_filp);
 
@@ -277,7 +277,7 @@
 			return -EOPNOTSUPP;
 		}
 
-		err = mnt_want_write(filp->f_path.mnt);
+		err = mnt_want_write_file(filp);
 		if (err)
 			return err;
 
@@ -289,7 +289,7 @@
 		}
 		if (err == 0)
 			err = err2;
-		mnt_drop_write(filp->f_path.mnt);
+		mnt_drop_write_file(filp);
 		ext4_resize_end(sb);
 
 		return err;
@@ -301,7 +301,7 @@
 		if (!inode_owner_or_capable(inode))
 			return -EACCES;
 
-		err = mnt_want_write(filp->f_path.mnt);
+		err = mnt_want_write_file(filp);
 		if (err)
 			return err;
 		/*
@@ -313,7 +313,7 @@
 		mutex_lock(&(inode->i_mutex));
 		err = ext4_ext_migrate(inode);
 		mutex_unlock(&(inode->i_mutex));
-		mnt_drop_write(filp->f_path.mnt);
+		mnt_drop_write_file(filp);
 		return err;
 	}
 
@@ -323,11 +323,11 @@
 		if (!inode_owner_or_capable(inode))
 			return -EACCES;
 
-		err = mnt_want_write(filp->f_path.mnt);
+		err = mnt_want_write_file(filp);
 		if (err)
 			return err;
 		err = ext4_alloc_da_blocks(inode);
-		mnt_drop_write(filp->f_path.mnt);
+		mnt_drop_write_file(filp);
 		return err;
 	}
 
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index aa4c782..2043f48 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -1736,7 +1736,7 @@
  * If the create succeeds, we fill in the inode information
  * with d_instantiate().
  */
-static int ext4_create(struct inode *dir, struct dentry *dentry, int mode,
+static int ext4_create(struct inode *dir, struct dentry *dentry, umode_t mode,
 		       struct nameidata *nd)
 {
 	handle_t *handle;
@@ -1770,7 +1770,7 @@
 }
 
 static int ext4_mknod(struct inode *dir, struct dentry *dentry,
-		      int mode, dev_t rdev)
+		      umode_t mode, dev_t rdev)
 {
 	handle_t *handle;
 	struct inode *inode;
@@ -1806,7 +1806,7 @@
 	return err;
 }
 
-static int ext4_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+static int ext4_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 {
 	handle_t *handle;
 	struct inode *inode;
@@ -2315,7 +2315,7 @@
 			err = PTR_ERR(handle);
 			goto err_drop_inode;
 		}
-		inc_nlink(inode);
+		set_nlink(inode, 1);
 		err = ext4_orphan_del(handle, inode);
 		if (err) {
 			ext4_journal_stop(handle);
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 7ce1d0b..7e106c8 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -385,6 +385,18 @@
 
 		block_end = block_start + blocksize;
 		if (block_start >= len) {
+			/*
+			 * Comments copied from block_write_full_page_endio:
+			 *
+			 * The page straddles i_size.  It must be zeroed out on
+			 * each and every writepage invocation because it may
+			 * be mmapped.  "A file is mapped in multiples of the
+			 * page size.  For a file that is not a multiple of
+			 * the  page size, the remaining memory is zeroed when
+			 * mapped, and writes to that region are not written
+			 * out to the file."
+			 */
+			zero_user_segment(page, block_start, block_end);
 			clear_buffer_dirty(bh);
 			set_buffer_uptodate(bh);
 			continue;
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 3858767..64e2529 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -930,7 +930,6 @@
 static void ext4_i_callback(struct rcu_head *head)
 {
 	struct inode *inode = container_of(head, struct inode, i_rcu);
-	INIT_LIST_HEAD(&inode->i_dentry);
 	kmem_cache_free(ext4_inode_cachep, EXT4_I(inode));
 }
 
@@ -1033,11 +1032,11 @@
  *  - it's set to a non-default value OR
  *  - if the per-sb default is different from the global default
  */
-static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs)
+static int ext4_show_options(struct seq_file *seq, struct dentry *root)
 {
 	int def_errors;
 	unsigned long def_mount_opts;
-	struct super_block *sb = vfs->mnt_sb;
+	struct super_block *sb = root->d_sb;
 	struct ext4_sb_info *sbi = EXT4_SB(sb);
 	struct ext4_super_block *es = sbi->s_es;
 
@@ -1155,9 +1154,9 @@
 		seq_puts(seq, ",block_validity");
 
 	if (!test_opt(sb, INIT_INODE_TABLE))
-		seq_puts(seq, ",noinit_inode_table");
+		seq_puts(seq, ",noinit_itable");
 	else if (sbi->s_li_wait_mult != EXT4_DEF_LI_WAIT_MULT)
-		seq_printf(seq, ",init_inode_table=%u",
+		seq_printf(seq, ",init_itable=%u",
 			   (unsigned) sbi->s_li_wait_mult);
 
 	ext4_show_quota_options(seq, sb);
@@ -1333,8 +1332,7 @@
 	Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity,
 	Opt_inode_readahead_blks, Opt_journal_ioprio,
 	Opt_dioread_nolock, Opt_dioread_lock,
-	Opt_discard, Opt_nodiscard,
-	Opt_init_inode_table, Opt_noinit_inode_table,
+	Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable,
 };
 
 static const match_table_t tokens = {
@@ -1407,9 +1405,9 @@
 	{Opt_dioread_lock, "dioread_lock"},
 	{Opt_discard, "discard"},
 	{Opt_nodiscard, "nodiscard"},
-	{Opt_init_inode_table, "init_itable=%u"},
-	{Opt_init_inode_table, "init_itable"},
-	{Opt_noinit_inode_table, "noinit_itable"},
+	{Opt_init_itable, "init_itable=%u"},
+	{Opt_init_itable, "init_itable"},
+	{Opt_noinit_itable, "noinit_itable"},
 	{Opt_err, NULL},
 };
 
@@ -1892,7 +1890,7 @@
 		case Opt_dioread_lock:
 			clear_opt(sb, DIOREAD_NOLOCK);
 			break;
-		case Opt_init_inode_table:
+		case Opt_init_itable:
 			set_opt(sb, INIT_INODE_TABLE);
 			if (args[0].from) {
 				if (match_int(&args[0], &option))
@@ -1903,7 +1901,7 @@
 				return 0;
 			sbi->s_li_wait_mult = option;
 			break;
-		case Opt_noinit_inode_table:
+		case Opt_noinit_itable:
 			clear_opt(sb, INIT_INODE_TABLE);
 			break;
 		default:
@@ -2884,8 +2882,7 @@
 		}
 		mutex_unlock(&eli->li_list_mtx);
 
-		if (freezing(current))
-			refrigerator();
+		try_to_freeze();
 
 		cur = jiffies;
 		if ((time_after_eq(cur, next_wakeup)) ||
@@ -4783,7 +4780,7 @@
 		return -EINVAL;
 
 	/* Quotafile not on the same filesystem? */
-	if (path->mnt->mnt_sb != sb)
+	if (path->dentry->d_sb != sb)
 		return -EXDEV;
 	/* Journaling quota? */
 	if (EXT4_SB(sb)->s_qf_names[type]) {
diff --git a/fs/fat/fat.h b/fs/fat/fat.h
index 1510a4d..66994f3 100644
--- a/fs/fat/fat.h
+++ b/fs/fat/fat.h
@@ -141,7 +141,7 @@
 static inline int fat_mode_can_hold_ro(struct inode *inode)
 {
 	struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
-	mode_t mask;
+	umode_t mask;
 
 	if (S_ISDIR(inode->i_mode)) {
 		if (!sbi->options.rodir)
@@ -156,8 +156,8 @@
 }
 
 /* Convert attribute bits and a mask to the UNIX mode. */
-static inline mode_t fat_make_mode(struct msdos_sb_info *sbi,
-				   u8 attrs, mode_t mode)
+static inline umode_t fat_make_mode(struct msdos_sb_info *sbi,
+				   u8 attrs, umode_t mode)
 {
 	if (attrs & ATTR_RO && !((attrs & ATTR_DIR) && !sbi->options.rodir))
 		mode &= ~S_IWUGO;
diff --git a/fs/fat/file.c b/fs/fat/file.c
index c118acf..a71fe37 100644
--- a/fs/fat/file.c
+++ b/fs/fat/file.c
@@ -44,7 +44,7 @@
 		goto out;
 
 	mutex_lock(&inode->i_mutex);
-	err = mnt_want_write(file->f_path.mnt);
+	err = mnt_want_write_file(file);
 	if (err)
 		goto out_unlock_inode;
 
@@ -108,7 +108,7 @@
 	fat_save_attrs(inode, attr);
 	mark_inode_dirty(inode);
 out_drop_write:
-	mnt_drop_write(file->f_path.mnt);
+	mnt_drop_write_file(file);
 out_unlock_inode:
 	mutex_unlock(&inode->i_mutex);
 out:
@@ -314,7 +314,7 @@
 static int fat_sanitize_mode(const struct msdos_sb_info *sbi,
 			     struct inode *inode, umode_t *mode_ptr)
 {
-	mode_t mask, perm;
+	umode_t mask, perm;
 
 	/*
 	 * Note, the basic check is already done by a caller of
@@ -351,7 +351,7 @@
 
 static int fat_allow_set_time(struct msdos_sb_info *sbi, struct inode *inode)
 {
-	mode_t allow_utime = sbi->options.allow_utime;
+	umode_t allow_utime = sbi->options.allow_utime;
 
 	if (current_fsuid() != inode->i_uid) {
 		if (in_group_p(inode->i_gid))
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 808cac7..3ab8410 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -518,7 +518,6 @@
 static void fat_i_callback(struct rcu_head *head)
 {
 	struct inode *inode = container_of(head, struct inode, i_rcu);
-	INIT_LIST_HEAD(&inode->i_dentry);
 	kmem_cache_free(fat_inode_cachep, MSDOS_I(inode));
 }
 
@@ -672,7 +671,7 @@
 
 EXPORT_SYMBOL_GPL(fat_sync_inode);
 
-static int fat_show_options(struct seq_file *m, struct vfsmount *mnt);
+static int fat_show_options(struct seq_file *m, struct dentry *root);
 static const struct super_operations fat_sops = {
 	.alloc_inode	= fat_alloc_inode,
 	.destroy_inode	= fat_destroy_inode,
@@ -811,9 +810,9 @@
 	.get_parent	= fat_get_parent,
 };
 
-static int fat_show_options(struct seq_file *m, struct vfsmount *mnt)
+static int fat_show_options(struct seq_file *m, struct dentry *root)
 {
-	struct msdos_sb_info *sbi = MSDOS_SB(mnt->mnt_sb);
+	struct msdos_sb_info *sbi = MSDOS_SB(root->d_sb);
 	struct fat_mount_options *opts = &sbi->options;
 	int isvfat = opts->isvfat;
 
@@ -898,7 +897,7 @@
 	Opt_charset, Opt_shortname_lower, Opt_shortname_win95,
 	Opt_shortname_winnt, Opt_shortname_mixed, Opt_utf8_no, Opt_utf8_yes,
 	Opt_uni_xl_no, Opt_uni_xl_yes, Opt_nonumtail_no, Opt_nonumtail_yes,
-	Opt_obsolate, Opt_flush, Opt_tz_utc, Opt_rodir, Opt_err_cont,
+	Opt_obsolete, Opt_flush, Opt_tz_utc, Opt_rodir, Opt_err_cont,
 	Opt_err_panic, Opt_err_ro, Opt_discard, Opt_err,
 };
 
@@ -928,17 +927,17 @@
 	{Opt_err_panic, "errors=panic"},
 	{Opt_err_ro, "errors=remount-ro"},
 	{Opt_discard, "discard"},
-	{Opt_obsolate, "conv=binary"},
-	{Opt_obsolate, "conv=text"},
-	{Opt_obsolate, "conv=auto"},
-	{Opt_obsolate, "conv=b"},
-	{Opt_obsolate, "conv=t"},
-	{Opt_obsolate, "conv=a"},
-	{Opt_obsolate, "fat=%u"},
-	{Opt_obsolate, "blocksize=%u"},
-	{Opt_obsolate, "cvf_format=%20s"},
-	{Opt_obsolate, "cvf_options=%100s"},
-	{Opt_obsolate, "posix"},
+	{Opt_obsolete, "conv=binary"},
+	{Opt_obsolete, "conv=text"},
+	{Opt_obsolete, "conv=auto"},
+	{Opt_obsolete, "conv=b"},
+	{Opt_obsolete, "conv=t"},
+	{Opt_obsolete, "conv=a"},
+	{Opt_obsolete, "fat=%u"},
+	{Opt_obsolete, "blocksize=%u"},
+	{Opt_obsolete, "cvf_format=%20s"},
+	{Opt_obsolete, "cvf_options=%100s"},
+	{Opt_obsolete, "posix"},
 	{Opt_err, NULL},
 };
 static const match_table_t msdos_tokens = {
@@ -1170,7 +1169,7 @@
 			break;
 
 		/* obsolete mount options */
-		case Opt_obsolate:
+		case Opt_obsolete:
 			fat_msg(sb, KERN_INFO, "\"%s\" option is obsolete, "
 			       "not supported now", p);
 			break;
diff --git a/fs/fat/namei_msdos.c b/fs/fat/namei_msdos.c
index 216b419..c5938c9 100644
--- a/fs/fat/namei_msdos.c
+++ b/fs/fat/namei_msdos.c
@@ -264,7 +264,7 @@
 }
 
 /***** Create a file */
-static int msdos_create(struct inode *dir, struct dentry *dentry, int mode,
+static int msdos_create(struct inode *dir, struct dentry *dentry, umode_t mode,
 			struct nameidata *nd)
 {
 	struct super_block *sb = dir->i_sb;
@@ -346,7 +346,7 @@
 }
 
 /***** Make a directory */
-static int msdos_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+static int msdos_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 {
 	struct super_block *sb = dir->i_sb;
 	struct fat_slot_info sinfo;
diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
index c25cf15..a81eb23 100644
--- a/fs/fat/namei_vfat.c
+++ b/fs/fat/namei_vfat.c
@@ -782,7 +782,7 @@
 	return ERR_PTR(err);
 }
 
-static int vfat_create(struct inode *dir, struct dentry *dentry, int mode,
+static int vfat_create(struct inode *dir, struct dentry *dentry, umode_t mode,
 		       struct nameidata *nd)
 {
 	struct super_block *sb = dir->i_sb;
@@ -871,7 +871,7 @@
 	return err;
 }
 
-static int vfat_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+static int vfat_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 {
 	struct super_block *sb = dir->i_sb;
 	struct inode *inode;
diff --git a/fs/fhandle.c b/fs/fhandle.c
index 6b08864..a48e4a1 100644
--- a/fs/fhandle.c
+++ b/fs/fhandle.c
@@ -10,6 +10,7 @@
 #include <linux/personality.h>
 #include <asm/uaccess.h>
 #include "internal.h"
+#include "mount.h"
 
 static long do_sys_name_to_handle(struct path *path,
 				  struct file_handle __user *ufh,
@@ -24,8 +25,8 @@
 	 * We need t make sure wether the file system
 	 * support decoding of the file handle
 	 */
-	if (!path->mnt->mnt_sb->s_export_op ||
-	    !path->mnt->mnt_sb->s_export_op->fh_to_dentry)
+	if (!path->dentry->d_sb->s_export_op ||
+	    !path->dentry->d_sb->s_export_op->fh_to_dentry)
 		return -EOPNOTSUPP;
 
 	if (copy_from_user(&f_handle, ufh, sizeof(struct file_handle)))
@@ -66,7 +67,8 @@
 	} else
 		retval = 0;
 	/* copy the mount id */
-	if (copy_to_user(mnt_id, &path->mnt->mnt_id, sizeof(*mnt_id)) ||
+	if (copy_to_user(mnt_id, &real_mount(path->mnt)->mnt_id,
+			 sizeof(*mnt_id)) ||
 	    copy_to_user(ufh, handle,
 			 sizeof(struct file_handle) + handle_bytes))
 		retval = -EFAULT;
diff --git a/fs/file_table.c b/fs/file_table.c
index c322794..20002e3 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -474,29 +474,6 @@
 
 #endif
 
-int fs_may_remount_ro(struct super_block *sb)
-{
-	struct file *file;
-	/* Check that no files are currently opened for writing. */
-	lg_global_lock(files_lglock);
-	do_file_list_for_each_entry(sb, file) {
-		struct inode *inode = file->f_path.dentry->d_inode;
-
-		/* File with pending delete? */
-		if (inode->i_nlink == 0)
-			goto too_bad;
-
-		/* Writeable file? */
-		if (S_ISREG(inode->i_mode) && (file->f_mode & FMODE_WRITE))
-			goto too_bad;
-	} while_file_list_for_each_entry;
-	lg_global_unlock(files_lglock);
-	return 1; /* Tis' cool bro. */
-too_bad:
-	lg_global_unlock(files_lglock);
-	return 0;
-}
-
 /**
  *	mark_files_ro - mark all files read-only
  *	@sb: superblock in question
diff --git a/fs/filesystems.c b/fs/filesystems.c
index 0845f84..96f2428 100644
--- a/fs/filesystems.c
+++ b/fs/filesystems.c
@@ -74,7 +74,6 @@
 	BUG_ON(strchr(fs->name, '.'));
 	if (fs->next)
 		return -EBUSY;
-	INIT_LIST_HEAD(&fs->fs_supers);
 	write_lock(&file_systems_lock);
 	p = find_filesystem(fs->name, strlen(fs->name));
 	if (*p)
diff --git a/fs/freevxfs/vxfs_inode.c b/fs/freevxfs/vxfs_inode.c
index 7b2af5a..cf9ef91 100644
--- a/fs/freevxfs/vxfs_inode.c
+++ b/fs/freevxfs/vxfs_inode.c
@@ -187,10 +187,10 @@
  *  vxfs_transmod returns a Linux mode_t for a given
  *  VxFS inode structure.
  */
-static __inline__ mode_t
+static __inline__ umode_t
 vxfs_transmod(struct vxfs_inode_info *vip)
 {
-	mode_t			ret = vip->vii_mode & ~VXFS_TYPE_MASK;
+	umode_t			ret = vip->vii_mode & ~VXFS_TYPE_MASK;
 
 	if (VXFS_ISFIFO(vip))
 		ret |= S_IFIFO;
@@ -340,7 +340,6 @@
 static void vxfs_i_callback(struct rcu_head *head)
 {
 	struct inode *inode = container_of(head, struct inode, i_rcu);
-	INIT_LIST_HEAD(&inode->i_dentry);
 	kmem_cache_free(vxfs_inode_cachep, inode->i_private);
 }
 
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 73c3992..e295150 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -25,7 +25,6 @@
 #include <linux/writeback.h>
 #include <linux/blkdev.h>
 #include <linux/backing-dev.h>
-#include <linux/buffer_head.h>
 #include <linux/tracepoint.h>
 #include "internal.h"
 
@@ -47,17 +46,6 @@
 	struct completion *done;	/* set if the caller waits */
 };
 
-const char *wb_reason_name[] = {
-	[WB_REASON_BACKGROUND]		= "background",
-	[WB_REASON_TRY_TO_FREE_PAGES]	= "try_to_free_pages",
-	[WB_REASON_SYNC]		= "sync",
-	[WB_REASON_PERIODIC]		= "periodic",
-	[WB_REASON_LAPTOP_TIMER]	= "laptop_timer",
-	[WB_REASON_FREE_MORE_MEM]	= "free_more_memory",
-	[WB_REASON_FS_FREE_SPACE]	= "fs_free_space",
-	[WB_REASON_FORKER_THREAD]	= "forker_thread"
-};
-
 /*
  * Include the creation of the trace points after defining the
  * wb_writeback_work structure so that the definition remains local to this
@@ -156,6 +144,7 @@
  * bdi_start_writeback - start writeback
  * @bdi: the backing device to write from
  * @nr_pages: the number of pages to write
+ * @reason: reason why some writeback work was initiated
  *
  * Description:
  *   This does WB_SYNC_NONE opportunistic writeback. The IO is only
@@ -947,7 +936,7 @@
 
 	trace_writeback_thread_start(bdi);
 
-	while (!kthread_should_stop()) {
+	while (!kthread_freezable_should_stop(NULL)) {
 		/*
 		 * Remove own delayed wake-up timer, since we are already awake
 		 * and we'll take care of the preriodic write-back.
@@ -977,8 +966,6 @@
 			 */
 			schedule();
 		}
-
-		try_to_freeze();
 	}
 
 	/* Flush any work that raced with us exiting */
@@ -1223,6 +1210,7 @@
  * writeback_inodes_sb_nr -	writeback dirty inodes from given super_block
  * @sb: the superblock
  * @nr: the number of pages to write
+ * @reason: reason why some writeback work initiated
  *
  * Start writeback on some inodes on this super_block. No guarantees are made
  * on how many (if any) will be written, and this function does not wait
@@ -1251,6 +1239,7 @@
 /**
  * writeback_inodes_sb	-	writeback dirty inodes from given super_block
  * @sb: the superblock
+ * @reason: reason why some writeback work was initiated
  *
  * Start writeback on some inodes on this super_block. No guarantees are made
  * on how many (if any) will be written, and this function does not wait
@@ -1265,6 +1254,7 @@
 /**
  * writeback_inodes_sb_if_idle	-	start writeback if none underway
  * @sb: the superblock
+ * @reason: reason why some writeback work was initiated
  *
  * Invoke writeback_inodes_sb if no writeback is currently underway.
  * Returns 1 if writeback was started, 0 if not.
@@ -1285,6 +1275,7 @@
  * writeback_inodes_sb_if_idle	-	start writeback if none underway
  * @sb: the superblock
  * @nr: the number of pages to write
+ * @reason: reason why some writeback work was initiated
  *
  * Invoke writeback_inodes_sb if no writeback is currently underway.
  * Returns 1 if writeback was started, 0 if not.
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 5cb8614..2aaf3ea 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -1512,7 +1512,7 @@
 	else if (outarg->offset + num > file_size)
 		num = file_size - outarg->offset;
 
-	while (num) {
+	while (num && req->num_pages < FUSE_MAX_PAGES_PER_REQ) {
 		struct page *page;
 		unsigned int this_num;
 
@@ -1526,6 +1526,7 @@
 
 		num -= this_num;
 		total_len += this_num;
+		index++;
 	}
 	req->misc.retrieve_in.offset = outarg->offset;
 	req->misc.retrieve_in.size = total_len;
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 9f63e49..5ddd6ea 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -369,8 +369,8 @@
  * If the filesystem doesn't support this, then fall back to separate
  * 'mknod' + 'open' requests.
  */
-static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode,
-			    struct nameidata *nd)
+static int fuse_create_open(struct inode *dir, struct dentry *entry,
+			    umode_t mode, struct nameidata *nd)
 {
 	int err;
 	struct inode *inode;
@@ -480,7 +480,7 @@
  */
 static int create_new_entry(struct fuse_conn *fc, struct fuse_req *req,
 			    struct inode *dir, struct dentry *entry,
-			    int mode)
+			    umode_t mode)
 {
 	struct fuse_entry_out outarg;
 	struct inode *inode;
@@ -547,7 +547,7 @@
 	return err;
 }
 
-static int fuse_mknod(struct inode *dir, struct dentry *entry, int mode,
+static int fuse_mknod(struct inode *dir, struct dentry *entry, umode_t mode,
 		      dev_t rdev)
 {
 	struct fuse_mknod_in inarg;
@@ -573,7 +573,7 @@
 	return create_new_entry(fc, req, dir, entry, mode);
 }
 
-static int fuse_create(struct inode *dir, struct dentry *entry, int mode,
+static int fuse_create(struct inode *dir, struct dentry *entry, umode_t mode,
 		       struct nameidata *nd)
 {
 	if (nd) {
@@ -585,7 +585,7 @@
 	return fuse_mknod(dir, entry, mode, 0);
 }
 
-static int fuse_mkdir(struct inode *dir, struct dentry *entry, int mode)
+static int fuse_mkdir(struct inode *dir, struct dentry *entry, umode_t mode)
 {
 	struct fuse_mkdir_in inarg;
 	struct fuse_conn *fc = get_fuse_conn(dir);
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 594f07a..0c84100 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1556,7 +1556,7 @@
 	struct inode *inode = file->f_path.dentry->d_inode;
 
 	mutex_lock(&inode->i_mutex);
-	if (origin != SEEK_CUR || origin != SEEK_SET) {
+	if (origin != SEEK_CUR && origin != SEEK_SET) {
 		retval = fuse_update_attributes(inode, NULL, file, NULL);
 		if (retval)
 			goto exit;
@@ -1567,6 +1567,10 @@
 		offset += i_size_read(inode);
 		break;
 	case SEEK_CUR:
+		if (offset == 0) {
+			retval = file->f_pos;
+			goto exit;
+		}
 		offset += file->f_pos;
 		break;
 	case SEEK_DATA:
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index cf6db0a..1964da0 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -80,7 +80,7 @@
 
 	/** The sticky bit in inode->i_mode may have been removed, so
 	    preserve the original mode */
-	mode_t orig_i_mode;
+	umode_t orig_i_mode;
 
 	/** Version of last attribute change */
 	u64 attr_version;
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 3e6d727..64cf8d0 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -107,7 +107,6 @@
 static void fuse_i_callback(struct rcu_head *head)
 {
 	struct inode *inode = container_of(head, struct inode, i_rcu);
-	INIT_LIST_HEAD(&inode->i_dentry);
 	kmem_cache_free(fuse_inode_cachep, inode);
 }
 
@@ -498,9 +497,10 @@
 	return 1;
 }
 
-static int fuse_show_options(struct seq_file *m, struct vfsmount *mnt)
+static int fuse_show_options(struct seq_file *m, struct dentry *root)
 {
-	struct fuse_conn *fc = get_fuse_conn_super(mnt->mnt_sb);
+	struct super_block *sb = root->d_sb;
+	struct fuse_conn *fc = get_fuse_conn_super(sb);
 
 	seq_printf(m, ",user_id=%u", fc->user_id);
 	seq_printf(m, ",group_id=%u", fc->group_id);
@@ -510,9 +510,8 @@
 		seq_puts(m, ",allow_other");
 	if (fc->max_read != ~0)
 		seq_printf(m, ",max_read=%u", fc->max_read);
-	if (mnt->mnt_sb->s_bdev &&
-	    mnt->mnt_sb->s_blocksize != FUSE_DEFAULT_BLKSIZE)
-		seq_printf(m, ",blksize=%lu", mnt->mnt_sb->s_blocksize);
+	if (sb->s_bdev && sb->s_blocksize != FUSE_DEFAULT_BLKSIZE)
+		seq_printf(m, ",blksize=%lu", sb->s_blocksize);
 	return 0;
 }
 
@@ -1138,28 +1137,28 @@
 {
 	int err;
 
-	err = register_filesystem(&fuse_fs_type);
-	if (err)
-		goto out;
-
-	err = register_fuseblk();
-	if (err)
-		goto out_unreg;
-
 	fuse_inode_cachep = kmem_cache_create("fuse_inode",
 					      sizeof(struct fuse_inode),
 					      0, SLAB_HWCACHE_ALIGN,
 					      fuse_inode_init_once);
 	err = -ENOMEM;
 	if (!fuse_inode_cachep)
-		goto out_unreg2;
+		goto out;
+
+	err = register_fuseblk();
+	if (err)
+		goto out2;
+
+	err = register_filesystem(&fuse_fs_type);
+	if (err)
+		goto out3;
 
 	return 0;
 
- out_unreg2:
+ out3:
 	unregister_fuseblk();
- out_unreg:
-	unregister_filesystem(&fuse_fs_type);
+ out2:
+	kmem_cache_destroy(fuse_inode_cachep);
  out:
 	return err;
 }
diff --git a/fs/gfs2/acl.c b/fs/gfs2/acl.c
index 65978d7..230eb0f 100644
--- a/fs/gfs2/acl.c
+++ b/fs/gfs2/acl.c
@@ -38,8 +38,9 @@
 	return NULL;
 }
 
-static struct posix_acl *gfs2_acl_get(struct gfs2_inode *ip, int type)
+struct posix_acl *gfs2_get_acl(struct inode *inode, int type)
 {
+	struct gfs2_inode *ip = GFS2_I(inode);
 	struct posix_acl *acl;
 	const char *name;
 	char *data;
@@ -67,11 +68,6 @@
 	return acl;
 }
 
-struct posix_acl *gfs2_get_acl(struct inode *inode, int type)
-{
-	return gfs2_acl_get(GFS2_I(inode), type);
-}
-
 static int gfs2_set_mode(struct inode *inode, umode_t mode)
 {
 	int error = 0;
@@ -125,7 +121,7 @@
 	if (S_ISLNK(inode->i_mode))
 		return 0;
 
-	acl = gfs2_acl_get(dip, ACL_TYPE_DEFAULT);
+	acl = gfs2_get_acl(&dip->i_inode, ACL_TYPE_DEFAULT);
 	if (IS_ERR(acl))
 		return PTR_ERR(acl);
 	if (!acl) {
@@ -166,7 +162,7 @@
 	unsigned int len;
 	int error;
 
-	acl = gfs2_acl_get(ip, ACL_TYPE_ACCESS);
+	acl = gfs2_get_acl(&ip->i_inode, ACL_TYPE_ACCESS);
 	if (IS_ERR(acl))
 		return PTR_ERR(acl);
 	if (!acl)
@@ -216,7 +212,7 @@
 	if (type < 0)
 		return type;
 
-	acl = gfs2_acl_get(GFS2_I(inode), type);
+	acl = gfs2_get_acl(inode, type);
 	if (IS_ERR(acl))
 		return PTR_ERR(acl);
 	if (acl == NULL)
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 4858e1f..501e5cb 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -615,7 +615,7 @@
 	unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
 	int alloc_required;
 	int error = 0;
-	struct gfs2_alloc *al = NULL;
+	struct gfs2_qadata *qa = NULL;
 	pgoff_t index = pos >> PAGE_CACHE_SHIFT;
 	unsigned from = pos & (PAGE_CACHE_SIZE - 1);
 	struct page *page;
@@ -639,8 +639,8 @@
 		gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks);
 
 	if (alloc_required) {
-		al = gfs2_alloc_get(ip);
-		if (!al) {
+		qa = gfs2_qadata_get(ip);
+		if (!qa) {
 			error = -ENOMEM;
 			goto out_unlock;
 		}
@@ -649,8 +649,7 @@
 		if (error)
 			goto out_alloc_put;
 
-		al->al_requested = data_blocks + ind_blocks;
-		error = gfs2_inplace_reserve(ip);
+		error = gfs2_inplace_reserve(ip, data_blocks + ind_blocks);
 		if (error)
 			goto out_qunlock;
 	}
@@ -711,7 +710,7 @@
 out_qunlock:
 		gfs2_quota_unlock(ip);
 out_alloc_put:
-		gfs2_alloc_put(ip);
+		gfs2_qadata_put(ip);
 	}
 out_unlock:
 	if (&ip->i_inode == sdp->sd_rindex) {
@@ -848,7 +847,7 @@
 	struct gfs2_sbd *sdp = GFS2_SB(inode);
 	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
 	struct buffer_head *dibh;
-	struct gfs2_alloc *al = ip->i_alloc;
+	struct gfs2_qadata *qa = ip->i_qadata;
 	unsigned int from = pos & (PAGE_CACHE_SIZE - 1);
 	unsigned int to = from + len;
 	int ret;
@@ -880,10 +879,11 @@
 	brelse(dibh);
 failed:
 	gfs2_trans_end(sdp);
-	if (al) {
+	if (ip->i_res)
 		gfs2_inplace_release(ip);
+	if (qa) {
 		gfs2_quota_unlock(ip);
-		gfs2_alloc_put(ip);
+		gfs2_qadata_put(ip);
 	}
 	if (inode == sdp->sd_rindex) {
 		gfs2_glock_dq(&m_ip->i_gh);
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 41d494d..14a7040 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -133,7 +133,7 @@
 		   and write it out to disk */
 
 		unsigned int n = 1;
-		error = gfs2_alloc_block(ip, &block, &n);
+		error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
 		if (error)
 			goto out_brelse;
 		if (isdir) {
@@ -503,7 +503,7 @@
 	do {
 		int error;
 		n = blks - alloced;
-		error = gfs2_alloc_block(ip, &bn, &n);
+		error = gfs2_alloc_blocks(ip, &bn, &n, 0, NULL);
 		if (error)
 			return error;
 		alloced += n;
@@ -743,9 +743,6 @@
 	else if (ip->i_depth)
 		revokes = sdp->sd_inptrs;
 
-	if (error)
-		return error;
-
 	memset(&rlist, 0, sizeof(struct gfs2_rgrp_list));
 	bstart = 0;
 	blen = 0;
@@ -1044,7 +1041,7 @@
 		lblock = (size - 1) >> sdp->sd_sb.sb_bsize_shift;
 
 	find_metapath(sdp, lblock, &mp, ip->i_height);
-	if (!gfs2_alloc_get(ip))
+	if (!gfs2_qadata_get(ip))
 		return -ENOMEM;
 
 	error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
@@ -1064,7 +1061,7 @@
 	gfs2_quota_unhold(ip);
 
 out:
-	gfs2_alloc_put(ip);
+	gfs2_qadata_put(ip);
 	return error;
 }
 
@@ -1166,21 +1163,20 @@
 	struct gfs2_inode *ip = GFS2_I(inode);
 	struct gfs2_sbd *sdp = GFS2_SB(inode);
 	struct buffer_head *dibh;
-	struct gfs2_alloc *al = NULL;
+	struct gfs2_qadata *qa = NULL;
 	int error;
 
 	if (gfs2_is_stuffed(ip) &&
 	    (size > (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)))) {
-		al = gfs2_alloc_get(ip);
-		if (al == NULL)
+		qa = gfs2_qadata_get(ip);
+		if (qa == NULL)
 			return -ENOMEM;
 
 		error = gfs2_quota_lock_check(ip);
 		if (error)
 			goto do_grow_alloc_put;
 
-		al->al_requested = 1;
-		error = gfs2_inplace_reserve(ip);
+		error = gfs2_inplace_reserve(ip, 1);
 		if (error)
 			goto do_grow_qunlock;
 	}
@@ -1189,7 +1185,7 @@
 	if (error)
 		goto do_grow_release;
 
-	if (al) {
+	if (qa) {
 		error = gfs2_unstuff_dinode(ip, NULL);
 		if (error)
 			goto do_end_trans;
@@ -1208,12 +1204,12 @@
 do_end_trans:
 	gfs2_trans_end(sdp);
 do_grow_release:
-	if (al) {
+	if (qa) {
 		gfs2_inplace_release(ip);
 do_grow_qunlock:
 		gfs2_quota_unlock(ip);
 do_grow_alloc_put:
-		gfs2_alloc_put(ip);
+		gfs2_qadata_put(ip);
 	}
 	return error;
 }
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index 8ccad24..c35573a 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -76,6 +76,8 @@
 #define IS_LEAF     1 /* Hashed (leaf) directory */
 #define IS_DINODE   2 /* Linear (stuffed dinode block) directory */
 
+#define MAX_RA_BLOCKS 32 /* max read-ahead blocks */
+
 #define gfs2_disk_hash2offset(h) (((u64)(h)) >> 1)
 #define gfs2_dir_offset2hash(p) ((u32)(((u64)(p)) << 1))
 
@@ -821,7 +823,7 @@
 	struct gfs2_dirent *dent;
 	struct qstr name = { .name = "", .len = 0, .hash = 0 };
 
-	error = gfs2_alloc_block(ip, &bn, &n);
+	error = gfs2_alloc_blocks(ip, &bn, &n, 0, NULL);
 	if (error)
 		return NULL;
 	bh = gfs2_meta_new(ip->i_gl, bn);
@@ -1376,6 +1378,52 @@
 	return error;
 }
 
+/**
+ * gfs2_dir_readahead - Issue read-ahead requests for leaf blocks.
+ *
+ * Note: we can't calculate each index like dir_e_read can because we don't
+ * have the leaf, and therefore we don't have the depth, and therefore we
+ * don't have the length. So we have to just read enough ahead to make up
+ * for the loss of information.
+ */
+static void gfs2_dir_readahead(struct inode *inode, unsigned hsize, u32 index,
+			       struct file_ra_state *f_ra)
+{
+	struct gfs2_inode *ip = GFS2_I(inode);
+	struct gfs2_glock *gl = ip->i_gl;
+	struct buffer_head *bh;
+	u64 blocknr = 0, last;
+	unsigned count;
+
+	/* First check if we've already read-ahead for the whole range. */
+	if (index + MAX_RA_BLOCKS < f_ra->start)
+		return;
+
+	f_ra->start = max((pgoff_t)index, f_ra->start);
+	for (count = 0; count < MAX_RA_BLOCKS; count++) {
+		if (f_ra->start >= hsize) /* if exceeded the hash table */
+			break;
+
+		last = blocknr;
+		blocknr = be64_to_cpu(ip->i_hash_cache[f_ra->start]);
+		f_ra->start++;
+		if (blocknr == last)
+			continue;
+
+		bh = gfs2_getbuf(gl, blocknr, 1);
+		if (trylock_buffer(bh)) {
+			if (buffer_uptodate(bh)) {
+				unlock_buffer(bh);
+				brelse(bh);
+				continue;
+			}
+			bh->b_end_io = end_buffer_read_sync;
+			submit_bh(READA | REQ_META, bh);
+			continue;
+		}
+		brelse(bh);
+	}
+}
 
 /**
  * dir_e_read - Reads the entries from a directory into a filldir buffer
@@ -1388,7 +1436,7 @@
  */
 
 static int dir_e_read(struct inode *inode, u64 *offset, void *opaque,
-		      filldir_t filldir)
+		      filldir_t filldir, struct file_ra_state *f_ra)
 {
 	struct gfs2_inode *dip = GFS2_I(inode);
 	u32 hsize, len = 0;
@@ -1402,10 +1450,14 @@
 	hash = gfs2_dir_offset2hash(*offset);
 	index = hash >> (32 - dip->i_depth);
 
+	if (dip->i_hash_cache == NULL)
+		f_ra->start = 0;
 	lp = gfs2_dir_get_hash_table(dip);
 	if (IS_ERR(lp))
 		return PTR_ERR(lp);
 
+	gfs2_dir_readahead(inode, hsize, index, f_ra);
+
 	while (index < hsize) {
 		error = gfs2_dir_read_leaf(inode, offset, opaque, filldir,
 					   &copied, &depth,
@@ -1423,7 +1475,7 @@
 }
 
 int gfs2_dir_read(struct inode *inode, u64 *offset, void *opaque,
-		  filldir_t filldir)
+		  filldir_t filldir, struct file_ra_state *f_ra)
 {
 	struct gfs2_inode *dip = GFS2_I(inode);
 	struct gfs2_sbd *sdp = GFS2_SB(inode);
@@ -1437,7 +1489,7 @@
 		return 0;
 
 	if (dip->i_diskflags & GFS2_DIF_EXHASH)
-		return dir_e_read(inode, offset, opaque, filldir);
+		return dir_e_read(inode, offset, opaque, filldir, f_ra);
 
 	if (!gfs2_is_stuffed(dip)) {
 		gfs2_consist_inode(dip);
@@ -1798,7 +1850,7 @@
 	if (!ht)
 		return -ENOMEM;
 
-	if (!gfs2_alloc_get(dip)) {
+	if (!gfs2_qadata_get(dip)) {
 		error = -ENOMEM;
 		goto out;
 	}
@@ -1887,7 +1939,7 @@
 	gfs2_rlist_free(&rlist);
 	gfs2_quota_unhold(dip);
 out_put:
-	gfs2_alloc_put(dip);
+	gfs2_qadata_put(dip);
 out:
 	kfree(ht);
 	return error;
diff --git a/fs/gfs2/dir.h b/fs/gfs2/dir.h
index ff5772f..98c960b 100644
--- a/fs/gfs2/dir.h
+++ b/fs/gfs2/dir.h
@@ -25,7 +25,7 @@
 			const struct gfs2_inode *ip);
 extern int gfs2_dir_del(struct gfs2_inode *dip, const struct dentry *dentry);
 extern int gfs2_dir_read(struct inode *inode, u64 *offset, void *opaque,
-			 filldir_t filldir);
+			 filldir_t filldir, struct file_ra_state *f_ra);
 extern int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename,
 			  const struct gfs2_inode *nip, unsigned int new_type);
 
diff --git a/fs/gfs2/export.c b/fs/gfs2/export.c
index fe9945f..70ba891 100644
--- a/fs/gfs2/export.c
+++ b/fs/gfs2/export.c
@@ -99,6 +99,7 @@
 	struct gfs2_holder gh;
 	u64 offset = 0;
 	int error;
+	struct file_ra_state f_ra = { .start = 0 };
 
 	if (!dir)
 		return -EINVAL;
@@ -118,7 +119,7 @@
 	if (error)
 		return error;
 
-	error = gfs2_dir_read(dir, &offset, &gnfd, get_name_filldir);
+	error = gfs2_dir_read(dir, &offset, &gnfd, get_name_filldir, &f_ra);
 
 	gfs2_glock_dq_uninit(&gh);
 
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index ce36a56..c5fb359 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -105,7 +105,7 @@
 		return error;
 	}
 
-	error = gfs2_dir_read(dir, &offset, dirent, filldir);
+	error = gfs2_dir_read(dir, &offset, dirent, filldir, &file->f_ra);
 
 	gfs2_glock_dq_uninit(&d_gh);
 
@@ -223,7 +223,7 @@
 	int error;
 	u32 new_flags, flags;
 
-	error = mnt_want_write(filp->f_path.mnt);
+	error = mnt_want_write_file(filp);
 	if (error)
 		return error;
 
@@ -285,7 +285,7 @@
 out:
 	gfs2_glock_dq_uninit(&gh);
 out_drop_write:
-	mnt_drop_write(filp->f_path.mnt);
+	mnt_drop_write_file(filp);
 	return error;
 }
 
@@ -365,7 +365,7 @@
 	u64 pos = page->index << PAGE_CACHE_SHIFT;
 	unsigned int data_blocks, ind_blocks, rblocks;
 	struct gfs2_holder gh;
-	struct gfs2_alloc *al;
+	struct gfs2_qadata *qa;
 	loff_t size;
 	int ret;
 
@@ -393,16 +393,15 @@
 	}
 
 	ret = -ENOMEM;
-	al = gfs2_alloc_get(ip);
-	if (al == NULL)
+	qa = gfs2_qadata_get(ip);
+	if (qa == NULL)
 		goto out_unlock;
 
 	ret = gfs2_quota_lock_check(ip);
 	if (ret)
 		goto out_alloc_put;
 	gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks);
-	al->al_requested = data_blocks + ind_blocks;
-	ret = gfs2_inplace_reserve(ip);
+	ret = gfs2_inplace_reserve(ip, data_blocks + ind_blocks);
 	if (ret)
 		goto out_quota_unlock;
 
@@ -448,7 +447,7 @@
 out_quota_unlock:
 	gfs2_quota_unlock(ip);
 out_alloc_put:
-	gfs2_alloc_put(ip);
+	gfs2_qadata_put(ip);
 out_unlock:
 	gfs2_glock_dq(&gh);
 out:
@@ -609,7 +608,7 @@
 	struct inode *inode = mapping->host;
 	int sync_state = inode->i_state & (I_DIRTY_SYNC|I_DIRTY_DATASYNC);
 	struct gfs2_inode *ip = GFS2_I(inode);
-	int ret, ret1 = 0;
+	int ret = 0, ret1 = 0;
 
 	if (mapping->nrpages) {
 		ret1 = filemap_fdatawrite_range(mapping, start, end);
@@ -750,8 +749,10 @@
 	struct gfs2_inode *ip = GFS2_I(inode);
 	unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
 	loff_t bytes, max_bytes;
-	struct gfs2_alloc *al;
+	struct gfs2_qadata *qa;
 	int error;
+	const loff_t pos = offset;
+	const loff_t count = len;
 	loff_t bsize_mask = ~((loff_t)sdp->sd_sb.sb_bsize - 1);
 	loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift;
 	loff_t max_chunk_size = UINT_MAX & bsize_mask;
@@ -782,8 +783,8 @@
 	while (len > 0) {
 		if (len < bytes)
 			bytes = len;
-		al = gfs2_alloc_get(ip);
-		if (!al) {
+		qa = gfs2_qadata_get(ip);
+		if (!qa) {
 			error = -ENOMEM;
 			goto out_unlock;
 		}
@@ -795,8 +796,7 @@
 retry:
 		gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
 
-		al->al_requested = data_blocks + ind_blocks;
-		error = gfs2_inplace_reserve(ip);
+		error = gfs2_inplace_reserve(ip, data_blocks + ind_blocks);
 		if (error) {
 			if (error == -ENOSPC && bytes > sdp->sd_sb.sb_bsize) {
 				bytes >>= 1;
@@ -810,7 +810,6 @@
 		max_bytes = bytes;
 		calc_max_reserv(ip, (len > max_chunk_size)? max_chunk_size: len,
 				&max_bytes, &data_blocks, &ind_blocks);
-		al->al_requested = data_blocks + ind_blocks;
 
 		rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA +
 			  RES_RG_HDR + gfs2_rg_blocks(ip);
@@ -832,8 +831,11 @@
 		offset += max_bytes;
 		gfs2_inplace_release(ip);
 		gfs2_quota_unlock(ip);
-		gfs2_alloc_put(ip);
+		gfs2_qadata_put(ip);
 	}
+
+	if (error == 0)
+		error = generic_write_sync(file, pos, count);
 	goto out_unlock;
 
 out_trans_fail:
@@ -841,7 +843,7 @@
 out_qunlock:
 	gfs2_quota_unlock(ip);
 out_alloc_put:
-	gfs2_alloc_put(ip);
+	gfs2_qadata_put(ip);
 out_unlock:
 	gfs2_glock_dq(&ip->i_gh);
 out_uninit:
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 7389dfd..e1d3bb5 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -244,17 +244,16 @@
 
 #define GFS2_MIN_LVB_SIZE 32	/* Min size of LVB that gfs2 supports */
 
-struct gfs2_alloc {
+struct gfs2_qadata { /* quota allocation data */
 	/* Quota stuff */
-	struct gfs2_quota_data *al_qd[2*MAXQUOTAS];
-	struct gfs2_holder al_qd_ghs[2*MAXQUOTAS];
-	unsigned int al_qd_num;
+	struct gfs2_quota_data *qa_qd[2*MAXQUOTAS];
+	struct gfs2_holder qa_qd_ghs[2*MAXQUOTAS];
+	unsigned int qa_qd_num;
+};
 
-	u32 al_requested; /* Filled in by caller of gfs2_inplace_reserve() */
-	u32 al_alloced; /* Filled in by gfs2_alloc_*() */
-
-	/* Filled in by gfs2_inplace_reserve() */
-	struct gfs2_holder al_rgd_gh;
+struct gfs2_blkreserv {
+	u32 rs_requested; /* Filled in by caller of gfs2_inplace_reserve() */
+	struct gfs2_holder rs_rgd_gh; /* Filled in by gfs2_inplace_reserve() */
 };
 
 enum {
@@ -275,7 +274,8 @@
 	struct gfs2_glock *i_gl; /* Move into i_gh? */
 	struct gfs2_holder i_iopen_gh;
 	struct gfs2_holder i_gh; /* for prepare/commit_write only */
-	struct gfs2_alloc *i_alloc;
+	struct gfs2_qadata *i_qadata; /* quota allocation data */
+	struct gfs2_blkreserv *i_res; /* resource group block reservation */
 	struct gfs2_rgrpd *i_rgd;
 	u64 i_goal;	/* goal block for allocations */
 	struct rw_semaphore i_rw_mutex;
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index cfd4959..017960c 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -333,7 +333,7 @@
  */
 
 static int create_ok(struct gfs2_inode *dip, const struct qstr *name,
-		     unsigned int mode)
+		     umode_t mode)
 {
 	int error;
 
@@ -364,7 +364,7 @@
 	return 0;
 }
 
-static void munge_mode_uid_gid(struct gfs2_inode *dip, unsigned int *mode,
+static void munge_mode_uid_gid(struct gfs2_inode *dip, umode_t *mode,
 			       unsigned int *uid, unsigned int *gid)
 {
 	if (GFS2_SB(&dip->i_inode)->sd_args.ar_suiddir &&
@@ -389,12 +389,13 @@
 {
 	struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
 	int error;
+	int dblocks = 1;
 
-	if (gfs2_alloc_get(dip) == NULL)
-		return -ENOMEM;
+	error = gfs2_rindex_update(sdp);
+	if (error)
+		fs_warn(sdp, "rindex update returns %d\n", error);
 
-	dip->i_alloc->al_requested = RES_DINODE;
-	error = gfs2_inplace_reserve(dip);
+	error = gfs2_inplace_reserve(dip, RES_DINODE);
 	if (error)
 		goto out;
 
@@ -402,14 +403,13 @@
 	if (error)
 		goto out_ipreserv;
 
-	error = gfs2_alloc_di(dip, no_addr, generation);
+	error = gfs2_alloc_blocks(dip, no_addr, &dblocks, 1, generation);
 
 	gfs2_trans_end(sdp);
 
 out_ipreserv:
 	gfs2_inplace_release(dip);
 out:
-	gfs2_alloc_put(dip);
 	return error;
 }
 
@@ -447,7 +447,7 @@
  */
 
 static void init_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
-			const struct gfs2_inum_host *inum, unsigned int mode,
+			const struct gfs2_inum_host *inum, umode_t mode,
 			unsigned int uid, unsigned int gid,
 			const u64 *generation, dev_t dev, const char *symname,
 			unsigned size, struct buffer_head **bhp)
@@ -516,7 +516,7 @@
 }
 
 static int make_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
-		       unsigned int mode, const struct gfs2_inum_host *inum,
+		       umode_t mode, const struct gfs2_inum_host *inum,
 		       const u64 *generation, dev_t dev, const char *symname,
 		       unsigned int size, struct buffer_head **bhp)
 {
@@ -525,7 +525,7 @@
 	int error;
 
 	munge_mode_uid_gid(dip, &mode, &uid, &gid);
-	if (!gfs2_alloc_get(dip))
+	if (!gfs2_qadata_get(dip))
 		return -ENOMEM;
 
 	error = gfs2_quota_lock(dip, uid, gid);
@@ -547,7 +547,7 @@
 out_quota:
 	gfs2_quota_unlock(dip);
 out:
-	gfs2_alloc_put(dip);
+	gfs2_qadata_put(dip);
 	return error;
 }
 
@@ -555,13 +555,13 @@
 		       struct gfs2_inode *ip)
 {
 	struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
-	struct gfs2_alloc *al;
+	struct gfs2_qadata *qa;
 	int alloc_required;
 	struct buffer_head *dibh;
 	int error;
 
-	al = gfs2_alloc_get(dip);
-	if (!al)
+	qa = gfs2_qadata_get(dip);
+	if (!qa)
 		return -ENOMEM;
 
 	error = gfs2_quota_lock(dip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
@@ -576,9 +576,7 @@
 		if (error)
 			goto fail_quota_locks;
 
-		al->al_requested = sdp->sd_max_dirres;
-
-		error = gfs2_inplace_reserve(dip);
+		error = gfs2_inplace_reserve(dip, sdp->sd_max_dirres);
 		if (error)
 			goto fail_quota_locks;
 
@@ -619,11 +617,11 @@
 	gfs2_quota_unlock(dip);
 
 fail:
-	gfs2_alloc_put(dip);
+	gfs2_qadata_put(dip);
 	return error;
 }
 
-int gfs2_initxattrs(struct inode *inode, const struct xattr *xattr_array,
+static int gfs2_initxattrs(struct inode *inode, const struct xattr *xattr_array,
 		    void *fs_info)
 {
 	const struct xattr *xattr;
@@ -659,7 +657,7 @@
  */
 
 static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
-			     unsigned int mode, dev_t dev, const char *symname,
+			     umode_t mode, dev_t dev, const char *symname,
 			     unsigned int size, int excl)
 {
 	const struct qstr *name = &dentry->d_name;
@@ -728,9 +726,12 @@
 		brelse(bh);
 
 	gfs2_trans_end(sdp);
-	gfs2_inplace_release(dip);
+	/* Check if we reserved space in the rgrp. Function link_dinode may
+	   not, depending on whether alloc is required. */
+	if (dip->i_res)
+		gfs2_inplace_release(dip);
 	gfs2_quota_unlock(dip);
-	gfs2_alloc_put(dip);
+	gfs2_qadata_put(dip);
 	mark_inode_dirty(inode);
 	gfs2_glock_dq_uninit_m(2, ghs);
 	d_instantiate(dentry, inode);
@@ -760,7 +761,7 @@
  */
 
 static int gfs2_create(struct inode *dir, struct dentry *dentry,
-		       int mode, struct nameidata *nd)
+		       umode_t mode, struct nameidata *nd)
 {
 	int excl = 0;
 	if (nd && (nd->flags & LOOKUP_EXCL))
@@ -875,8 +876,9 @@
 	error = 0;
 
 	if (alloc_required) {
-		struct gfs2_alloc *al = gfs2_alloc_get(dip);
-		if (!al) {
+		struct gfs2_qadata *qa = gfs2_qadata_get(dip);
+
+		if (!qa) {
 			error = -ENOMEM;
 			goto out_gunlock;
 		}
@@ -885,9 +887,7 @@
 		if (error)
 			goto out_alloc;
 
-		al->al_requested = sdp->sd_max_dirres;
-
-		error = gfs2_inplace_reserve(dip);
+		error = gfs2_inplace_reserve(dip, sdp->sd_max_dirres);
 		if (error)
 			goto out_gunlock_q;
 
@@ -930,7 +930,7 @@
 		gfs2_quota_unlock(dip);
 out_alloc:
 	if (alloc_required)
-		gfs2_alloc_put(dip);
+		gfs2_qadata_put(dip);
 out_gunlock:
 	gfs2_glock_dq(ghs + 1);
 out_child:
@@ -1037,12 +1037,14 @@
 	struct buffer_head *bh;
 	struct gfs2_holder ghs[3];
 	struct gfs2_rgrpd *rgd;
-	int error;
+	int error = -EROFS;
 
 	gfs2_holder_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs);
 	gfs2_holder_init(ip->i_gl,  LM_ST_EXCLUSIVE, 0, ghs + 1);
 
 	rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr);
+	if (!rgd)
+		goto out_inodes;
 	gfs2_holder_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, ghs + 2);
 
 
@@ -1088,12 +1090,13 @@
 out_gunlock:
 	gfs2_glock_dq(ghs + 2);
 out_rgrp:
-	gfs2_holder_uninit(ghs + 2);
 	gfs2_glock_dq(ghs + 1);
 out_child:
-	gfs2_holder_uninit(ghs + 1);
 	gfs2_glock_dq(ghs);
 out_parent:
+	gfs2_holder_uninit(ghs + 2);
+out_inodes:
+	gfs2_holder_uninit(ghs + 1);
 	gfs2_holder_uninit(ghs);
 	return error;
 }
@@ -1129,7 +1132,7 @@
  * Returns: errno
  */
 
-static int gfs2_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+static int gfs2_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 {
 	return gfs2_create_inode(dir, dentry, S_IFDIR | mode, 0, NULL, 0, 0);
 }
@@ -1143,7 +1146,7 @@
  *
  */
 
-static int gfs2_mknod(struct inode *dir, struct dentry *dentry, int mode,
+static int gfs2_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
 		      dev_t dev)
 {
 	return gfs2_create_inode(dir, dentry, mode, dev, NULL, 0, 0);
@@ -1350,8 +1353,9 @@
 	error = 0;
 
 	if (alloc_required) {
-		struct gfs2_alloc *al = gfs2_alloc_get(ndip);
-		if (!al) {
+		struct gfs2_qadata *qa = gfs2_qadata_get(ndip);
+
+		if (!qa) {
 			error = -ENOMEM;
 			goto out_gunlock;
 		}
@@ -1360,9 +1364,7 @@
 		if (error)
 			goto out_alloc;
 
-		al->al_requested = sdp->sd_max_dirres;
-
-		error = gfs2_inplace_reserve(ndip);
+		error = gfs2_inplace_reserve(ndip, sdp->sd_max_dirres);
 		if (error)
 			goto out_gunlock_q;
 
@@ -1423,7 +1425,7 @@
 		gfs2_quota_unlock(ndip);
 out_alloc:
 	if (alloc_required)
-		gfs2_alloc_put(ndip);
+		gfs2_qadata_put(ndip);
 out_gunlock:
 	while (x--) {
 		gfs2_glock_dq(ghs + x);
@@ -1584,7 +1586,7 @@
 	if (!(attr->ia_valid & ATTR_GID) || ogid == ngid)
 		ogid = ngid = NO_QUOTA_CHANGE;
 
-	if (!gfs2_alloc_get(ip))
+	if (!gfs2_qadata_get(ip))
 		return -ENOMEM;
 
 	error = gfs2_quota_lock(ip, nuid, ngid);
@@ -1616,7 +1618,7 @@
 out_gunlock_q:
 	gfs2_quota_unlock(ip);
 out_alloc:
-	gfs2_alloc_put(ip);
+	gfs2_qadata_put(ip);
 	return error;
 }
 
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index 5986464..756fae9 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -626,7 +626,7 @@
 	if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags))
 		submit_bh(WRITE_SYNC | REQ_META | REQ_PRIO, bh);
 	else
-		submit_bh(WRITE_FLUSH_FUA | REQ_META | REQ_PRIO, bh);
+		submit_bh(WRITE_FLUSH_FUA | REQ_META, bh);
 	wait_on_buffer(bh);
 
 	if (!buffer_uptodate(bh))
@@ -951,8 +951,8 @@
 			wake_up(&sdp->sd_log_waitq);
 
 		t = gfs2_tune_get(sdp, gt_logd_secs) * HZ;
-		if (freezing(current))
-			refrigerator();
+
+		try_to_freeze();
 
 		do {
 			prepare_to_wait(&sdp->sd_logd_waitq, &wait,
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index 8a139ff..c150298 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -40,7 +40,8 @@
 	inode_init_once(&ip->i_inode);
 	init_rwsem(&ip->i_rw_mutex);
 	INIT_LIST_HEAD(&ip->i_trunc_list);
-	ip->i_alloc = NULL;
+	ip->i_qadata = NULL;
+	ip->i_res = NULL;
 	ip->i_hash_cache = NULL;
 }
 
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index be29858..181586e 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -435,7 +435,7 @@
 	if (buffer_uptodate(first_bh))
 		goto out;
 	if (!buffer_locked(first_bh))
-		ll_rw_block(READ_SYNC | REQ_META | REQ_PRIO, 1, &first_bh);
+		ll_rw_block(READ_SYNC | REQ_META, 1, &first_bh);
 
 	dblock++;
 	extlen--;
@@ -444,7 +444,7 @@
 		bh = gfs2_getbuf(gl, dblock, CREATE);
 
 		if (!buffer_uptodate(bh) && !buffer_locked(bh))
-			ll_rw_block(READA, 1, &bh);
+			ll_rw_block(READA | REQ_META, 1, &bh);
 		brelse(bh);
 		dblock++;
 		extlen--;
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index cb23c2b..fe72e79 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -224,7 +224,7 @@
 
 	bio->bi_end_io = end_bio_io_page;
 	bio->bi_private = page;
-	submit_bio(READ_SYNC | REQ_META | REQ_PRIO, bio);
+	submit_bio(READ_SYNC | REQ_META, bio);
 	wait_on_page_locked(page);
 	bio_put(bio);
 	if (!PageUptodate(page)) {
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 7e528dc..a45b21b 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -494,11 +494,11 @@
 int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid)
 {
 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
-	struct gfs2_alloc *al = ip->i_alloc;
-	struct gfs2_quota_data **qd = al->al_qd;
+	struct gfs2_qadata *qa = ip->i_qadata;
+	struct gfs2_quota_data **qd = qa->qa_qd;
 	int error;
 
-	if (gfs2_assert_warn(sdp, !al->al_qd_num) ||
+	if (gfs2_assert_warn(sdp, !qa->qa_qd_num) ||
 	    gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags)))
 		return -EIO;
 
@@ -508,20 +508,20 @@
 	error = qdsb_get(sdp, QUOTA_USER, ip->i_inode.i_uid, qd);
 	if (error)
 		goto out;
-	al->al_qd_num++;
+	qa->qa_qd_num++;
 	qd++;
 
 	error = qdsb_get(sdp, QUOTA_GROUP, ip->i_inode.i_gid, qd);
 	if (error)
 		goto out;
-	al->al_qd_num++;
+	qa->qa_qd_num++;
 	qd++;
 
 	if (uid != NO_QUOTA_CHANGE && uid != ip->i_inode.i_uid) {
 		error = qdsb_get(sdp, QUOTA_USER, uid, qd);
 		if (error)
 			goto out;
-		al->al_qd_num++;
+		qa->qa_qd_num++;
 		qd++;
 	}
 
@@ -529,7 +529,7 @@
 		error = qdsb_get(sdp, QUOTA_GROUP, gid, qd);
 		if (error)
 			goto out;
-		al->al_qd_num++;
+		qa->qa_qd_num++;
 		qd++;
 	}
 
@@ -542,16 +542,16 @@
 void gfs2_quota_unhold(struct gfs2_inode *ip)
 {
 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
-	struct gfs2_alloc *al = ip->i_alloc;
+	struct gfs2_qadata *qa = ip->i_qadata;
 	unsigned int x;
 
 	gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));
 
-	for (x = 0; x < al->al_qd_num; x++) {
-		qdsb_put(al->al_qd[x]);
-		al->al_qd[x] = NULL;
+	for (x = 0; x < qa->qa_qd_num; x++) {
+		qdsb_put(qa->qa_qd[x]);
+		qa->qa_qd[x] = NULL;
 	}
-	al->al_qd_num = 0;
+	qa->qa_qd_num = 0;
 }
 
 static int sort_qd(const void *a, const void *b)
@@ -712,7 +712,7 @@
 		set_buffer_uptodate(bh);
 
 	if (!buffer_uptodate(bh)) {
-		ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh);
+		ll_rw_block(READ | REQ_META, 1, &bh);
 		wait_on_buffer(bh);
 		if (!buffer_uptodate(bh))
 			goto unlock_out;
@@ -762,7 +762,6 @@
 	struct gfs2_quota_data *qd;
 	loff_t offset;
 	unsigned int nalloc = 0, blocks;
-	struct gfs2_alloc *al = NULL;
 	int error;
 
 	gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
@@ -792,26 +791,19 @@
 			nalloc++;
 	}
 
-	al = gfs2_alloc_get(ip);
-	if (!al) {
-		error = -ENOMEM;
-		goto out_gunlock;
-	}
 	/* 
 	 * 1 blk for unstuffing inode if stuffed. We add this extra
 	 * block to the reservation unconditionally. If the inode
 	 * doesn't need unstuffing, the block will be released to the 
 	 * rgrp since it won't be allocated during the transaction
 	 */
-	al->al_requested = 1;
 	/* +3 in the end for unstuffing block, inode size update block
 	 * and another block in case quota straddles page boundary and 
 	 * two blocks need to be updated instead of 1 */
 	blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3;
 
-	if (nalloc)
-		al->al_requested += nalloc * (data_blocks + ind_blocks);		
-	error = gfs2_inplace_reserve(ip);
+	error = gfs2_inplace_reserve(ip, 1 +
+				     (nalloc * (data_blocks + ind_blocks)));
 	if (error)
 		goto out_alloc;
 
@@ -840,8 +832,6 @@
 out_ipres:
 	gfs2_inplace_release(ip);
 out_alloc:
-	gfs2_alloc_put(ip);
-out_gunlock:
 	gfs2_glock_dq_uninit(&i_gh);
 out:
 	while (qx--)
@@ -925,7 +915,7 @@
 int gfs2_quota_lock(struct gfs2_inode *ip, u32 uid, u32 gid)
 {
 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
-	struct gfs2_alloc *al = ip->i_alloc;
+	struct gfs2_qadata *qa = ip->i_qadata;
 	struct gfs2_quota_data *qd;
 	unsigned int x;
 	int error = 0;
@@ -938,15 +928,15 @@
 	    sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
 		return 0;
 
-	sort(al->al_qd, al->al_qd_num, sizeof(struct gfs2_quota_data *),
+	sort(qa->qa_qd, qa->qa_qd_num, sizeof(struct gfs2_quota_data *),
 	     sort_qd, NULL);
 
-	for (x = 0; x < al->al_qd_num; x++) {
+	for (x = 0; x < qa->qa_qd_num; x++) {
 		int force = NO_FORCE;
-		qd = al->al_qd[x];
+		qd = qa->qa_qd[x];
 		if (test_and_clear_bit(QDF_REFRESH, &qd->qd_flags))
 			force = FORCE;
-		error = do_glock(qd, force, &al->al_qd_ghs[x]);
+		error = do_glock(qd, force, &qa->qa_qd_ghs[x]);
 		if (error)
 			break;
 	}
@@ -955,7 +945,7 @@
 		set_bit(GIF_QD_LOCKED, &ip->i_flags);
 	else {
 		while (x--)
-			gfs2_glock_dq_uninit(&al->al_qd_ghs[x]);
+			gfs2_glock_dq_uninit(&qa->qa_qd_ghs[x]);
 		gfs2_quota_unhold(ip);
 	}
 
@@ -1000,7 +990,7 @@
 
 void gfs2_quota_unlock(struct gfs2_inode *ip)
 {
-	struct gfs2_alloc *al = ip->i_alloc;
+	struct gfs2_qadata *qa = ip->i_qadata;
 	struct gfs2_quota_data *qda[4];
 	unsigned int count = 0;
 	unsigned int x;
@@ -1008,14 +998,14 @@
 	if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
 		goto out;
 
-	for (x = 0; x < al->al_qd_num; x++) {
+	for (x = 0; x < qa->qa_qd_num; x++) {
 		struct gfs2_quota_data *qd;
 		int sync;
 
-		qd = al->al_qd[x];
+		qd = qa->qa_qd[x];
 		sync = need_sync(qd);
 
-		gfs2_glock_dq_uninit(&al->al_qd_ghs[x]);
+		gfs2_glock_dq_uninit(&qa->qa_qd_ghs[x]);
 
 		if (sync && qd_trylock(qd))
 			qda[count++] = qd;
@@ -1048,7 +1038,7 @@
 int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid)
 {
 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
-	struct gfs2_alloc *al = ip->i_alloc;
+	struct gfs2_qadata *qa = ip->i_qadata;
 	struct gfs2_quota_data *qd;
 	s64 value;
 	unsigned int x;
@@ -1060,8 +1050,8 @@
         if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
                 return 0;
 
-	for (x = 0; x < al->al_qd_num; x++) {
-		qd = al->al_qd[x];
+	for (x = 0; x < qa->qa_qd_num; x++) {
+		qd = qa->qa_qd[x];
 
 		if (!((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
 		      (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))))
@@ -1099,7 +1089,7 @@
 void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
 		       u32 uid, u32 gid)
 {
-	struct gfs2_alloc *al = ip->i_alloc;
+	struct gfs2_qadata *qa = ip->i_qadata;
 	struct gfs2_quota_data *qd;
 	unsigned int x;
 
@@ -1108,8 +1098,8 @@
 	if (ip->i_diskflags & GFS2_DIF_SYSTEM)
 		return;
 
-	for (x = 0; x < al->al_qd_num; x++) {
-		qd = al->al_qd[x];
+	for (x = 0; x < qa->qa_qd_num; x++) {
+		qd = qa->qa_qd[x];
 
 		if ((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
 		    (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))) {
@@ -1427,8 +1417,8 @@
 		/* Check for & recover partially truncated inodes */
 		quotad_check_trunc_list(sdp);
 
-		if (freezing(current))
-			refrigerator();
+		try_to_freeze();
+
 		t = min(quotad_timeo, statfs_timeo);
 
 		prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE);
@@ -1529,7 +1519,6 @@
 	unsigned int data_blocks, ind_blocks;
 	unsigned int blocks = 0;
 	int alloc_required;
-	struct gfs2_alloc *al;
 	loff_t offset;
 	int error;
 
@@ -1594,15 +1583,12 @@
 	if (gfs2_is_stuffed(ip))
 		alloc_required = 1;
 	if (alloc_required) {
-		al = gfs2_alloc_get(ip);
-		if (al == NULL)
-			goto out_i;
 		gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
 				       &data_blocks, &ind_blocks);
-		blocks = al->al_requested = 1 + data_blocks + ind_blocks;
-		error = gfs2_inplace_reserve(ip);
+		blocks = 1 + data_blocks + ind_blocks;
+		error = gfs2_inplace_reserve(ip, blocks);
 		if (error)
-			goto out_alloc;
+			goto out_i;
 		blocks += gfs2_rg_blocks(ip);
 	}
 
@@ -1617,11 +1603,8 @@
 
 	gfs2_trans_end(sdp);
 out_release:
-	if (alloc_required) {
+	if (alloc_required)
 		gfs2_inplace_release(ip);
-out_alloc:
-		gfs2_alloc_put(ip);
-	}
 out_i:
 	gfs2_glock_dq_uninit(&i_gh);
 out_q:
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 96bd6d75..2223462 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -65,8 +65,8 @@
 };
 
 static u32 rgblk_search(struct gfs2_rgrpd *rgd, u32 goal,
-                        unsigned char old_state, unsigned char new_state,
-			unsigned int *n);
+			unsigned char old_state,
+			struct gfs2_bitmap **rbi);
 
 /**
  * gfs2_setbit - Set a bit in the bitmaps
@@ -860,22 +860,36 @@
 }
 
 /**
- * gfs2_alloc_get - get the struct gfs2_alloc structure for an inode
+ * gfs2_qadata_get - get the struct gfs2_qadata structure for an inode
  * @ip: the incore GFS2 inode structure
  *
- * Returns: the struct gfs2_alloc
+ * Returns: the struct gfs2_qadata
  */
 
-struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip)
+struct gfs2_qadata *gfs2_qadata_get(struct gfs2_inode *ip)
 {
 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 	int error;
-	BUG_ON(ip->i_alloc != NULL);
-	ip->i_alloc = kzalloc(sizeof(struct gfs2_alloc), GFP_NOFS);
+	BUG_ON(ip->i_qadata != NULL);
+	ip->i_qadata = kzalloc(sizeof(struct gfs2_qadata), GFP_NOFS);
 	error = gfs2_rindex_update(sdp);
 	if (error)
 		fs_warn(sdp, "rindex update returns %d\n", error);
-	return ip->i_alloc;
+	return ip->i_qadata;
+}
+
+/**
+ * gfs2_blkrsv_get - get the struct gfs2_blkreserv structure for an inode
+ * @ip: the incore GFS2 inode structure
+ *
+ * Returns: the struct gfs2_qadata
+ */
+
+static struct gfs2_blkreserv *gfs2_blkrsv_get(struct gfs2_inode *ip)
+{
+	BUG_ON(ip->i_res != NULL);
+	ip->i_res = kzalloc(sizeof(struct gfs2_blkreserv), GFP_NOFS);
+	return ip->i_res;
 }
 
 /**
@@ -890,15 +904,20 @@
 
 static int try_rgrp_fit(const struct gfs2_rgrpd *rgd, const struct gfs2_inode *ip)
 {
-	const struct gfs2_alloc *al = ip->i_alloc;
+	const struct gfs2_blkreserv *rs = ip->i_res;
 
 	if (rgd->rd_flags & (GFS2_RGF_NOALLOC | GFS2_RDF_ERROR))
 		return 0;
-	if (rgd->rd_free_clone >= al->al_requested)
+	if (rgd->rd_free_clone >= rs->rs_requested)
 		return 1;
 	return 0;
 }
 
+static inline u32 gfs2_bi2rgd_blk(struct gfs2_bitmap *bi, u32 blk)
+{
+	return (bi->bi_start * GFS2_NBBY) + blk;
+}
+
 /**
  * try_rgrp_unlink - Look for any unlinked, allocated, but unused inodes
  * @rgd: The rgrp
@@ -912,20 +931,20 @@
 	u32 goal = 0, block;
 	u64 no_addr;
 	struct gfs2_sbd *sdp = rgd->rd_sbd;
-	unsigned int n;
 	struct gfs2_glock *gl;
 	struct gfs2_inode *ip;
 	int error;
 	int found = 0;
+	struct gfs2_bitmap *bi;
 
 	while (goal < rgd->rd_data) {
 		down_write(&sdp->sd_log_flush_lock);
-		n = 1;
-		block = rgblk_search(rgd, goal, GFS2_BLKST_UNLINKED,
-				     GFS2_BLKST_UNLINKED, &n);
+		block = rgblk_search(rgd, goal, GFS2_BLKST_UNLINKED, &bi);
 		up_write(&sdp->sd_log_flush_lock);
 		if (block == BFITNOENT)
 			break;
+
+		block = gfs2_bi2rgd_blk(bi, block);
 		/* rgblk_search can return a block < goal, so we need to
 		   keep it marching forward. */
 		no_addr = block + rgd->rd_data0;
@@ -977,8 +996,8 @@
 {
 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 	struct gfs2_rgrpd *rgd, *begin = NULL;
-	struct gfs2_alloc *al = ip->i_alloc;
-	int error, rg_locked;
+	struct gfs2_blkreserv *rs = ip->i_res;
+	int error, rg_locked, flags = LM_FLAG_TRY;
 	int loops = 0;
 
 	if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, ip->i_goal))
@@ -997,7 +1016,7 @@
 			error = 0;
 		} else {
 			error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
-						   LM_FLAG_TRY, &al->al_rgd_gh);
+						   flags, &rs->rs_rgd_gh);
 		}
 		switch (error) {
 		case 0:
@@ -1008,12 +1027,14 @@
 			if (rgd->rd_flags & GFS2_RDF_CHECK)
 				try_rgrp_unlink(rgd, last_unlinked, ip->i_no_addr);
 			if (!rg_locked)
-				gfs2_glock_dq_uninit(&al->al_rgd_gh);
+				gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
 			/* fall through */
 		case GLR_TRYFAILED:
 			rgd = gfs2_rgrpd_get_next(rgd);
-			if (rgd == begin)
+			if (rgd == begin) {
+				flags = 0;
 				loops++;
+			}
 			break;
 		default:
 			return error;
@@ -1023,6 +1044,13 @@
 	return -ENOSPC;
 }
 
+static void gfs2_blkrsv_put(struct gfs2_inode *ip)
+{
+	BUG_ON(ip->i_res == NULL);
+	kfree(ip->i_res);
+	ip->i_res = NULL;
+}
+
 /**
  * gfs2_inplace_reserve - Reserve space in the filesystem
  * @ip: the inode to reserve space for
@@ -1030,16 +1058,23 @@
  * Returns: errno
  */
 
-int gfs2_inplace_reserve(struct gfs2_inode *ip)
+int gfs2_inplace_reserve(struct gfs2_inode *ip, u32 requested)
 {
 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
-	struct gfs2_alloc *al = ip->i_alloc;
+	struct gfs2_blkreserv *rs;
 	int error = 0;
 	u64 last_unlinked = NO_BLOCK;
 	int tries = 0;
 
-	if (gfs2_assert_warn(sdp, al->al_requested))
-		return -EINVAL;
+	rs = gfs2_blkrsv_get(ip);
+	if (!rs)
+		return -ENOMEM;
+
+	rs->rs_requested = requested;
+	if (gfs2_assert_warn(sdp, requested)) {
+		error = -EINVAL;
+		goto out;
+	}
 
 	do {
 		error = get_local_rgrp(ip, &last_unlinked);
@@ -1056,6 +1091,9 @@
 		gfs2_log_flush(sdp, NULL);
 	} while (tries++ < 3);
 
+out:
+	if (error)
+		gfs2_blkrsv_put(ip);
 	return error;
 }
 
@@ -1068,10 +1106,11 @@
 
 void gfs2_inplace_release(struct gfs2_inode *ip)
 {
-	struct gfs2_alloc *al = ip->i_alloc;
+	struct gfs2_blkreserv *rs = ip->i_res;
 
-	if (al->al_rgd_gh.gh_gl)
-		gfs2_glock_dq_uninit(&al->al_rgd_gh);
+	gfs2_blkrsv_put(ip);
+	if (rs->rs_rgd_gh.gh_gl)
+		gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
 }
 
 /**
@@ -1108,39 +1147,35 @@
 }
 
 /**
- * rgblk_search - find a block in @old_state, change allocation
- *           state to @new_state
+ * rgblk_search - find a block in @state
  * @rgd: the resource group descriptor
  * @goal: the goal block within the RG (start here to search for avail block)
- * @old_state: GFS2_BLKST_XXX the before-allocation state to find
- * @new_state: GFS2_BLKST_XXX the after-allocation block state
- * @n: The extent length
+ * @state: GFS2_BLKST_XXX the before-allocation state to find
+ * @dinode: TRUE if the first block we allocate is for a dinode
+ * @rbi: address of the pointer to the bitmap containing the block found
  *
- * Walk rgrp's bitmap to find bits that represent a block in @old_state.
- * Add the found bitmap buffer to the transaction.
- * Set the found bits to @new_state to change block's allocation state.
+ * Walk rgrp's bitmap to find bits that represent a block in @state.
  *
  * This function never fails, because we wouldn't call it unless we
  * know (from reservation results, etc.) that a block is available.
  *
- * Scope of @goal and returned block is just within rgrp, not the whole
- * filesystem.
+ * Scope of @goal is just within rgrp, not the whole filesystem.
+ * Scope of @returned block is just within bitmap, not the whole filesystem.
  *
- * Returns:  the block number allocated
+ * Returns: the block number found relative to the bitmap rbi
  */
 
 static u32 rgblk_search(struct gfs2_rgrpd *rgd, u32 goal,
-			unsigned char old_state, unsigned char new_state,
-			unsigned int *n)
+			unsigned char state,
+			struct gfs2_bitmap **rbi)
 {
 	struct gfs2_bitmap *bi = NULL;
 	const u32 length = rgd->rd_length;
 	u32 blk = BFITNOENT;
 	unsigned int buf, x;
-	const unsigned int elen = *n;
 	const u8 *buffer = NULL;
 
-	*n = 0;
+	*rbi = NULL;
 	/* Find bitmap block that contains bits for goal block */
 	for (buf = 0; buf < length; buf++) {
 		bi = rgd->rd_bits + buf;
@@ -1163,21 +1198,21 @@
 		bi = rgd->rd_bits + buf;
 
 		if (test_bit(GBF_FULL, &bi->bi_flags) &&
-		    (old_state == GFS2_BLKST_FREE))
+		    (state == GFS2_BLKST_FREE))
 			goto skip;
 
 		/* The GFS2_BLKST_UNLINKED state doesn't apply to the clone
 		   bitmaps, so we must search the originals for that. */
 		buffer = bi->bi_bh->b_data + bi->bi_offset;
 		WARN_ON(!buffer_uptodate(bi->bi_bh));
-		if (old_state != GFS2_BLKST_UNLINKED && bi->bi_clone)
+		if (state != GFS2_BLKST_UNLINKED && bi->bi_clone)
 			buffer = bi->bi_clone + bi->bi_offset;
 
-		blk = gfs2_bitfit(buffer, bi->bi_len, goal, old_state);
+		blk = gfs2_bitfit(buffer, bi->bi_len, goal, state);
 		if (blk != BFITNOENT)
 			break;
 
-		if ((goal == 0) && (old_state == GFS2_BLKST_FREE))
+		if ((goal == 0) && (state == GFS2_BLKST_FREE))
 			set_bit(GBF_FULL, &bi->bi_flags);
 
 		/* Try next bitmap block (wrap back to rgrp header if at end) */
@@ -1187,16 +1222,37 @@
 		goal = 0;
 	}
 
-	if (blk == BFITNOENT)
-		return blk;
+	if (blk != BFITNOENT)
+		*rbi = bi;
 
-	*n = 1;
-	if (old_state == new_state)
-		goto out;
+	return blk;
+}
 
+/**
+ * gfs2_alloc_extent - allocate an extent from a given bitmap
+ * @rgd: the resource group descriptor
+ * @bi: the bitmap within the rgrp
+ * @blk: the block within the bitmap
+ * @dinode: TRUE if the first block we allocate is for a dinode
+ * @n: The extent length
+ *
+ * Add the found bitmap buffer to the transaction.
+ * Set the found bits to @new_state to change block's allocation state.
+ * Returns: starting block number of the extent (fs scope)
+ */
+static u64 gfs2_alloc_extent(struct gfs2_rgrpd *rgd, struct gfs2_bitmap *bi,
+			     u32 blk, bool dinode, unsigned int *n)
+{
+	const unsigned int elen = *n;
+	u32 goal;
+	const u8 *buffer = NULL;
+
+	*n = 0;
+	buffer = bi->bi_bh->b_data + bi->bi_offset;
 	gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1);
 	gfs2_setbit(rgd, bi->bi_bh->b_data, bi->bi_clone, bi->bi_offset,
-		    bi, blk, new_state);
+		    bi, blk, dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
+	(*n)++;
 	goal = blk;
 	while (*n < elen) {
 		goal++;
@@ -1206,11 +1262,12 @@
 		    GFS2_BLKST_FREE)
 			break;
 		gfs2_setbit(rgd, bi->bi_bh->b_data, bi->bi_clone, bi->bi_offset,
-			    bi, goal, new_state);
+			    bi, goal, GFS2_BLKST_USED);
 		(*n)++;
 	}
-out:
-	return (bi->bi_start * GFS2_NBBY) + blk;
+	blk = gfs2_bi2rgd_blk(bi, blk);
+	rgd->rd_last_alloc = blk + *n - 1;
+	return rgd->rd_data0 + blk;
 }
 
 /**
@@ -1298,121 +1355,93 @@
 }
 
 /**
- * gfs2_alloc_block - Allocate one or more blocks
+ * gfs2_alloc_blocks - Allocate one or more blocks of data and/or a dinode
  * @ip: the inode to allocate the block for
  * @bn: Used to return the starting block number
- * @n: requested number of blocks/extent length (value/result)
+ * @ndata: requested number of blocks/extent length (value/result)
+ * @dinode: 1 if we're allocating a dinode block, else 0
+ * @generation: the generation number of the inode
  *
  * Returns: 0 or error
  */
 
-int gfs2_alloc_block(struct gfs2_inode *ip, u64 *bn, unsigned int *n)
+int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
+		      bool dinode, u64 *generation)
 {
 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 	struct buffer_head *dibh;
-	struct gfs2_alloc *al = ip->i_alloc;
 	struct gfs2_rgrpd *rgd;
-	u32 goal, blk;
-	u64 block;
+	unsigned int ndata;
+	u32 goal, blk; /* block, within the rgrp scope */
+	u64 block; /* block, within the file system scope */
 	int error;
+	struct gfs2_bitmap *bi;
 
 	/* Only happens if there is a bug in gfs2, return something distinctive
 	 * to ensure that it is noticed.
 	 */
-	if (al == NULL)
+	if (ip->i_res == NULL)
 		return -ECANCELED;
 
 	rgd = ip->i_rgd;
 
-	if (rgrp_contains_block(rgd, ip->i_goal))
+	if (!dinode && rgrp_contains_block(rgd, ip->i_goal))
 		goal = ip->i_goal - rgd->rd_data0;
 	else
 		goal = rgd->rd_last_alloc;
 
-	blk = rgblk_search(rgd, goal, GFS2_BLKST_FREE, GFS2_BLKST_USED, n);
+	blk = rgblk_search(rgd, goal, GFS2_BLKST_FREE, &bi);
 
 	/* Since all blocks are reserved in advance, this shouldn't happen */
 	if (blk == BFITNOENT)
 		goto rgrp_error;
 
-	rgd->rd_last_alloc = blk;
-	block = rgd->rd_data0 + blk;
-	ip->i_goal = block + *n - 1;
-	error = gfs2_meta_inode_buffer(ip, &dibh);
-	if (error == 0) {
-		struct gfs2_dinode *di = (struct gfs2_dinode *)dibh->b_data;
-		gfs2_trans_add_bh(ip->i_gl, dibh, 1);
-		di->di_goal_meta = di->di_goal_data = cpu_to_be64(ip->i_goal);
-		brelse(dibh);
+	block = gfs2_alloc_extent(rgd, bi, blk, dinode, nblocks);
+	ndata = *nblocks;
+	if (dinode)
+		ndata--;
+
+	if (!dinode) {
+		ip->i_goal = block + ndata - 1;
+		error = gfs2_meta_inode_buffer(ip, &dibh);
+		if (error == 0) {
+			struct gfs2_dinode *di =
+				(struct gfs2_dinode *)dibh->b_data;
+			gfs2_trans_add_bh(ip->i_gl, dibh, 1);
+			di->di_goal_meta = di->di_goal_data =
+				cpu_to_be64(ip->i_goal);
+			brelse(dibh);
+		}
 	}
-	if (rgd->rd_free < *n)
+	if (rgd->rd_free < *nblocks)
 		goto rgrp_error;
 
-	rgd->rd_free -= *n;
-
-	gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
-	gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
-
-	al->al_alloced += *n;
-
-	gfs2_statfs_change(sdp, 0, -(s64)*n, 0);
-	gfs2_quota_change(ip, *n, ip->i_inode.i_uid, ip->i_inode.i_gid);
-
-	rgd->rd_free_clone -= *n;
-	trace_gfs2_block_alloc(ip, block, *n, GFS2_BLKST_USED);
-	*bn = block;
-	return 0;
-
-rgrp_error:
-	gfs2_rgrp_error(rgd);
-	return -EIO;
-}
-
-/**
- * gfs2_alloc_di - Allocate a dinode
- * @dip: the directory that the inode is going in
- * @bn: the block number which is allocated
- * @generation: the generation number of the inode
- *
- * Returns: 0 on success or error
- */
-
-int gfs2_alloc_di(struct gfs2_inode *dip, u64 *bn, u64 *generation)
-{
-	struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
-	struct gfs2_alloc *al = dip->i_alloc;
-	struct gfs2_rgrpd *rgd = dip->i_rgd;
-	u32 blk;
-	u64 block;
-	unsigned int n = 1;
-
-	blk = rgblk_search(rgd, rgd->rd_last_alloc,
-			   GFS2_BLKST_FREE, GFS2_BLKST_DINODE, &n);
-
-	/* Since all blocks are reserved in advance, this shouldn't happen */
-	if (blk == BFITNOENT)
-		goto rgrp_error;
-
-	rgd->rd_last_alloc = blk;
-	block = rgd->rd_data0 + blk;
-	if (rgd->rd_free == 0)
-		goto rgrp_error;
-
-	rgd->rd_free--;
-	rgd->rd_dinodes++;
-	*generation = rgd->rd_igeneration++;
-	if (*generation == 0)
+	rgd->rd_free -= *nblocks;
+	if (dinode) {
+		rgd->rd_dinodes++;
 		*generation = rgd->rd_igeneration++;
+		if (*generation == 0)
+			*generation = rgd->rd_igeneration++;
+	}
+
 	gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
 	gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
 
-	al->al_alloced++;
+	gfs2_statfs_change(sdp, 0, -(s64)*nblocks, dinode ? 1 : 0);
+	if (dinode)
+		gfs2_trans_add_unrevoke(sdp, block, 1);
 
-	gfs2_statfs_change(sdp, 0, -1, +1);
-	gfs2_trans_add_unrevoke(sdp, block, 1);
+	/*
+	 * This needs reviewing to see why we cannot do the quota change
+	 * at this point in the dinode case.
+	 */
+	if (ndata)
+		gfs2_quota_change(ip, ndata, ip->i_inode.i_uid,
+				  ip->i_inode.i_gid);
 
-	rgd->rd_free_clone--;
-	trace_gfs2_block_alloc(dip, block, 1, GFS2_BLKST_DINODE);
+	rgd->rd_free_clone -= *nblocks;
+	trace_gfs2_block_alloc(ip, block, *nblocks,
+			       dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
 	*bn = block;
 	return 0;
 
diff --git a/fs/gfs2/rgrp.h b/fs/gfs2/rgrp.h
index cf5c501..ceec910 100644
--- a/fs/gfs2/rgrp.h
+++ b/fs/gfs2/rgrp.h
@@ -28,19 +28,19 @@
 extern int gfs2_rgrp_go_lock(struct gfs2_holder *gh);
 extern void gfs2_rgrp_go_unlock(struct gfs2_holder *gh);
 
-extern struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip);
-static inline void gfs2_alloc_put(struct gfs2_inode *ip)
+extern struct gfs2_qadata *gfs2_qadata_get(struct gfs2_inode *ip);
+static inline void gfs2_qadata_put(struct gfs2_inode *ip)
 {
-	BUG_ON(ip->i_alloc == NULL);
-	kfree(ip->i_alloc);
-	ip->i_alloc = NULL;
+	BUG_ON(ip->i_qadata == NULL);
+	kfree(ip->i_qadata);
+	ip->i_qadata = NULL;
 }
 
-extern int gfs2_inplace_reserve(struct gfs2_inode *ip);
+extern int gfs2_inplace_reserve(struct gfs2_inode *ip, u32 requested);
 extern void gfs2_inplace_release(struct gfs2_inode *ip);
 
-extern int gfs2_alloc_block(struct gfs2_inode *ip, u64 *bn, unsigned int *n);
-extern int gfs2_alloc_di(struct gfs2_inode *ip, u64 *bn, u64 *generation);
+extern int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *n,
+			     bool dinode, u64 *generation);
 
 extern void __gfs2_free_blocks(struct gfs2_inode *ip, u64 bstart, u32 blen, int meta);
 extern void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen);
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 71e4209..4553ce5 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -1284,18 +1284,18 @@
 /**
  * gfs2_show_options - Show mount options for /proc/mounts
  * @s: seq_file structure
- * @mnt: vfsmount
+ * @root: root of this (sub)tree
  *
  * Returns: 0 on success or error code
  */
 
-static int gfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
+static int gfs2_show_options(struct seq_file *s, struct dentry *root)
 {
-	struct gfs2_sbd *sdp = mnt->mnt_sb->s_fs_info;
+	struct gfs2_sbd *sdp = root->d_sb->s_fs_info;
 	struct gfs2_args *args = &sdp->sd_args;
 	int val;
 
-	if (is_ancestor(mnt->mnt_root, sdp->sd_master_dir))
+	if (is_ancestor(root, sdp->sd_master_dir))
 		seq_printf(s, ",meta");
 	if (args->ar_lockproto[0])
 		seq_printf(s, ",lockproto=%s", args->ar_lockproto);
@@ -1399,8 +1399,9 @@
 static int gfs2_dinode_dealloc(struct gfs2_inode *ip)
 {
 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
-	struct gfs2_alloc *al;
+	struct gfs2_qadata *qa;
 	struct gfs2_rgrpd *rgd;
+	struct gfs2_holder gh;
 	int error;
 
 	if (gfs2_get_inode_blocks(&ip->i_inode) != 1) {
@@ -1408,8 +1409,8 @@
 		return -EIO;
 	}
 
-	al = gfs2_alloc_get(ip);
-	if (!al)
+	qa = gfs2_qadata_get(ip);
+	if (!qa)
 		return -ENOMEM;
 
 	error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
@@ -1423,8 +1424,7 @@
 		goto out_qs;
 	}
 
-	error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0,
-				   &al->al_rgd_gh);
+	error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh);
 	if (error)
 		goto out_qs;
 
@@ -1440,11 +1440,11 @@
 	gfs2_trans_end(sdp);
 
 out_rg_gunlock:
-	gfs2_glock_dq_uninit(&al->al_rgd_gh);
+	gfs2_glock_dq_uninit(&gh);
 out_qs:
 	gfs2_quota_unhold(ip);
 out:
-	gfs2_alloc_put(ip);
+	gfs2_qadata_put(ip);
 	return error;
 }
 
@@ -1582,7 +1582,6 @@
 static void gfs2_i_callback(struct rcu_head *head)
 {
 	struct inode *inode = container_of(head, struct inode, i_rcu);
-	INIT_LIST_HEAD(&inode->i_dentry);
 	kmem_cache_free(gfs2_inode_cachep, inode);
 }
 
diff --git a/fs/gfs2/trans.h b/fs/gfs2/trans.h
index f8f101e..125d457 100644
--- a/fs/gfs2/trans.h
+++ b/fs/gfs2/trans.h
@@ -30,9 +30,9 @@
  * block, or all of the blocks in the rg, whichever is smaller */
 static inline unsigned int gfs2_rg_blocks(const struct gfs2_inode *ip)
 {
-	const struct gfs2_alloc *al = ip->i_alloc;
-	if (al->al_requested < ip->i_rgd->rd_length)
-		return al->al_requested + 1;
+	const struct gfs2_blkreserv *rs = ip->i_res;
+	if (rs->rs_requested < ip->i_rgd->rd_length)
+		return rs->rs_requested + 1;
 	return ip->i_rgd->rd_length;
 }
 
diff --git a/fs/gfs2/xattr.c b/fs/gfs2/xattr.c
index 71d7bf8..e963659 100644
--- a/fs/gfs2/xattr.c
+++ b/fs/gfs2/xattr.c
@@ -321,11 +321,11 @@
 			       struct gfs2_ea_header *ea,
 			       struct gfs2_ea_header *prev, int leave)
 {
-	struct gfs2_alloc *al;
+	struct gfs2_qadata *qa;
 	int error;
 
-	al = gfs2_alloc_get(ip);
-	if (!al)
+	qa = gfs2_qadata_get(ip);
+	if (!qa)
 		return -ENOMEM;
 
 	error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
@@ -336,7 +336,7 @@
 
 	gfs2_quota_unhold(ip);
 out_alloc:
-	gfs2_alloc_put(ip);
+	gfs2_qadata_put(ip);
 	return error;
 }
 
@@ -549,9 +549,10 @@
 		goto out;
 
 	error = gfs2_ea_get_copy(ip, &el, data, len);
-	if (error == 0)
-		error = len;
-	*ppdata = data;
+	if (error < 0)
+		kfree(data);
+	else
+		*ppdata = data;
 out:
 	brelse(el.el_bh);
 	return error;
@@ -609,7 +610,7 @@
 	u64 block;
 	int error;
 
-	error = gfs2_alloc_block(ip, &block, &n);
+	error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
 	if (error)
 		return error;
 	gfs2_trans_add_unrevoke(sdp, block, 1);
@@ -671,7 +672,7 @@
 			int mh_size = sizeof(struct gfs2_meta_header);
 			unsigned int n = 1;
 
-			error = gfs2_alloc_block(ip, &block, &n);
+			error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
 			if (error)
 				return error;
 			gfs2_trans_add_unrevoke(sdp, block, 1);
@@ -708,21 +709,19 @@
 			     unsigned int blks,
 			     ea_skeleton_call_t skeleton_call, void *private)
 {
-	struct gfs2_alloc *al;
+	struct gfs2_qadata *qa;
 	struct buffer_head *dibh;
 	int error;
 
-	al = gfs2_alloc_get(ip);
-	if (!al)
+	qa = gfs2_qadata_get(ip);
+	if (!qa)
 		return -ENOMEM;
 
 	error = gfs2_quota_lock_check(ip);
 	if (error)
 		goto out;
 
-	al->al_requested = blks;
-
-	error = gfs2_inplace_reserve(ip);
+	error = gfs2_inplace_reserve(ip, blks);
 	if (error)
 		goto out_gunlock_q;
 
@@ -751,7 +750,7 @@
 out_gunlock_q:
 	gfs2_quota_unlock(ip);
 out:
-	gfs2_alloc_put(ip);
+	gfs2_qadata_put(ip);
 	return error;
 }
 
@@ -991,7 +990,7 @@
 	} else {
 		u64 blk;
 		unsigned int n = 1;
-		error = gfs2_alloc_block(ip, &blk, &n);
+		error = gfs2_alloc_blocks(ip, &blk, &n, 0, NULL);
 		if (error)
 			return error;
 		gfs2_trans_add_unrevoke(sdp, blk, 1);
@@ -1435,9 +1434,9 @@
 static int ea_dealloc_block(struct gfs2_inode *ip)
 {
 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
-	struct gfs2_alloc *al = ip->i_alloc;
 	struct gfs2_rgrpd *rgd;
 	struct buffer_head *dibh;
+	struct gfs2_holder gh;
 	int error;
 
 	rgd = gfs2_blk2rgrpd(sdp, ip->i_eattr);
@@ -1446,8 +1445,7 @@
 		return -EIO;
 	}
 
-	error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0,
-				   &al->al_rgd_gh);
+	error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh);
 	if (error)
 		return error;
 
@@ -1471,7 +1469,7 @@
 	gfs2_trans_end(sdp);
 
 out_gunlock:
-	gfs2_glock_dq_uninit(&al->al_rgd_gh);
+	gfs2_glock_dq_uninit(&gh);
 	return error;
 }
 
@@ -1484,11 +1482,11 @@
 
 int gfs2_ea_dealloc(struct gfs2_inode *ip)
 {
-	struct gfs2_alloc *al;
+	struct gfs2_qadata *qa;
 	int error;
 
-	al = gfs2_alloc_get(ip);
-	if (!al)
+	qa = gfs2_qadata_get(ip);
+	if (!qa)
 		return -ENOMEM;
 
 	error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
@@ -1510,7 +1508,7 @@
 out_quota:
 	gfs2_quota_unhold(ip);
 out_alloc:
-	gfs2_alloc_put(ip);
+	gfs2_qadata_put(ip);
 	return error;
 }
 
diff --git a/fs/hfs/dir.c b/fs/hfs/dir.c
index bce4eef..62fc14e 100644
--- a/fs/hfs/dir.c
+++ b/fs/hfs/dir.c
@@ -186,7 +186,7 @@
  * a directory and return a corresponding inode, given the inode for
  * the directory and the name (and its length) of the new file.
  */
-static int hfs_create(struct inode *dir, struct dentry *dentry, int mode,
+static int hfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
 		      struct nameidata *nd)
 {
 	struct inode *inode;
@@ -216,7 +216,7 @@
  * in a directory, given the inode for the parent directory and the
  * name (and its length) of the new directory.
  */
-static int hfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+static int hfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 {
 	struct inode *inode;
 	int res;
diff --git a/fs/hfs/hfs_fs.h b/fs/hfs/hfs_fs.h
index ad97c2d..1bf967c 100644
--- a/fs/hfs/hfs_fs.h
+++ b/fs/hfs/hfs_fs.h
@@ -184,7 +184,7 @@
 extern const struct address_space_operations hfs_aops;
 extern const struct address_space_operations hfs_btree_aops;
 
-extern struct inode *hfs_new_inode(struct inode *, struct qstr *, int);
+extern struct inode *hfs_new_inode(struct inode *, struct qstr *, umode_t);
 extern void hfs_inode_write_fork(struct inode *, struct hfs_extent *, __be32 *, __be32 *);
 extern int hfs_write_inode(struct inode *, struct writeback_control *);
 extern int hfs_inode_setattr(struct dentry *, struct iattr *);
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index a1a9fdc..737dbeb 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -169,7 +169,7 @@
 /*
  * hfs_new_inode
  */
-struct inode *hfs_new_inode(struct inode *dir, struct qstr *name, int mode)
+struct inode *hfs_new_inode(struct inode *dir, struct qstr *name, umode_t mode)
 {
 	struct super_block *sb = dir->i_sb;
 	struct inode *inode = new_inode(sb);
diff --git a/fs/hfs/super.c b/fs/hfs/super.c
index 1b55f70..8137fb3 100644
--- a/fs/hfs/super.c
+++ b/fs/hfs/super.c
@@ -133,9 +133,9 @@
 	return 0;
 }
 
-static int hfs_show_options(struct seq_file *seq, struct vfsmount *mnt)
+static int hfs_show_options(struct seq_file *seq, struct dentry *root)
 {
-	struct hfs_sb_info *sbi = HFS_SB(mnt->mnt_sb);
+	struct hfs_sb_info *sbi = HFS_SB(root->d_sb);
 
 	if (sbi->s_creator != cpu_to_be32(0x3f3f3f3f))
 		seq_printf(seq, ",creator=%.4s", (char *)&sbi->s_creator);
@@ -170,7 +170,6 @@
 static void hfs_i_callback(struct rcu_head *head)
 {
 	struct inode *inode = container_of(head, struct inode, i_rcu);
-	INIT_LIST_HEAD(&inode->i_dentry);
 	kmem_cache_free(hfs_inode_cachep, HFS_I(inode));
 }
 
diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
index 4536cd3..88e155f 100644
--- a/fs/hfsplus/dir.c
+++ b/fs/hfsplus/dir.c
@@ -424,7 +424,7 @@
 }
 
 static int hfsplus_mknod(struct inode *dir, struct dentry *dentry,
-			 int mode, dev_t rdev)
+			 umode_t mode, dev_t rdev)
 {
 	struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb);
 	struct inode *inode;
@@ -453,13 +453,13 @@
 	return res;
 }
 
-static int hfsplus_create(struct inode *dir, struct dentry *dentry, int mode,
+static int hfsplus_create(struct inode *dir, struct dentry *dentry, umode_t mode,
 			  struct nameidata *nd)
 {
 	return hfsplus_mknod(dir, dentry, mode, 0);
 }
 
-static int hfsplus_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+static int hfsplus_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 {
 	return hfsplus_mknod(dir, dentry, mode | S_IFDIR, 0);
 }
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
index d7674d0..21a5b7f 100644
--- a/fs/hfsplus/hfsplus_fs.h
+++ b/fs/hfsplus/hfsplus_fs.h
@@ -402,7 +402,7 @@
 void hfsplus_inode_write_fork(struct inode *, struct hfsplus_fork_raw *);
 int hfsplus_cat_read_inode(struct inode *, struct hfs_find_data *);
 int hfsplus_cat_write_inode(struct inode *);
-struct inode *hfsplus_new_inode(struct super_block *, int);
+struct inode *hfsplus_new_inode(struct super_block *, umode_t);
 void hfsplus_delete_inode(struct inode *);
 int hfsplus_file_fsync(struct file *file, loff_t start, loff_t end,
 		       int datasync);
@@ -419,7 +419,7 @@
 int hfsplus_parse_options(char *, struct hfsplus_sb_info *);
 int hfsplus_parse_options_remount(char *input, int *force);
 void hfsplus_fill_defaults(struct hfsplus_sb_info *);
-int hfsplus_show_options(struct seq_file *, struct vfsmount *);
+int hfsplus_show_options(struct seq_file *, struct dentry *);
 
 /* super.c */
 struct inode *hfsplus_iget(struct super_block *, unsigned long);
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index 40e1413..6643b24 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -378,7 +378,7 @@
 	.unlocked_ioctl = hfsplus_ioctl,
 };
 
-struct inode *hfsplus_new_inode(struct super_block *sb, int mode)
+struct inode *hfsplus_new_inode(struct super_block *sb, umode_t mode)
 {
 	struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
 	struct inode *inode = new_inode(sb);
diff --git a/fs/hfsplus/ioctl.c b/fs/hfsplus/ioctl.c
index fbaa669..f66c765 100644
--- a/fs/hfsplus/ioctl.c
+++ b/fs/hfsplus/ioctl.c
@@ -43,7 +43,7 @@
 	unsigned int flags;
 	int err = 0;
 
-	err = mnt_want_write(file->f_path.mnt);
+	err = mnt_want_write_file(file);
 	if (err)
 		goto out;
 
@@ -94,7 +94,7 @@
 out_unlock_inode:
 	mutex_unlock(&inode->i_mutex);
 out_drop_write:
-	mnt_drop_write(file->f_path.mnt);
+	mnt_drop_write_file(file);
 out:
 	return err;
 }
diff --git a/fs/hfsplus/options.c b/fs/hfsplus/options.c
index bb62a5882..06fa561 100644
--- a/fs/hfsplus/options.c
+++ b/fs/hfsplus/options.c
@@ -206,9 +206,9 @@
 	return 1;
 }
 
-int hfsplus_show_options(struct seq_file *seq, struct vfsmount *mnt)
+int hfsplus_show_options(struct seq_file *seq, struct dentry *root)
 {
-	struct hfsplus_sb_info *sbi = HFSPLUS_SB(mnt->mnt_sb);
+	struct hfsplus_sb_info *sbi = HFSPLUS_SB(root->d_sb);
 
 	if (sbi->creator != HFSPLUS_DEF_CR_TYPE)
 		seq_printf(seq, ",creator=%.4s", (char *)&sbi->creator);
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index d24a9b6..edf0a80 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -558,7 +558,6 @@
 {
 	struct inode *inode = container_of(head, struct inode, i_rcu);
 
-	INIT_LIST_HEAD(&inode->i_dentry);
 	kmem_cache_free(hfsplus_inode_cachep, HFSPLUS_I(inode));
 }
 
diff --git a/fs/hostfs/hostfs.h b/fs/hostfs/hostfs.h
index bf15a43..3cbfa93 100644
--- a/fs/hostfs/hostfs.h
+++ b/fs/hostfs/hostfs.h
@@ -39,7 +39,7 @@
 
 struct hostfs_iattr {
 	unsigned int	ia_valid;
-	mode_t		ia_mode;
+	unsigned short	ia_mode;
 	uid_t		ia_uid;
 	gid_t		ia_gid;
 	loff_t		ia_size;
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index 2f72da5..e130bd4 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -250,7 +250,6 @@
 static void hostfs_i_callback(struct rcu_head *head)
 {
 	struct inode *inode = container_of(head, struct inode, i_rcu);
-	INIT_LIST_HEAD(&inode->i_dentry);
 	kfree(HOSTFS_I(inode));
 }
 
@@ -259,9 +258,9 @@
 	call_rcu(&inode->i_rcu, hostfs_i_callback);
 }
 
-static int hostfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
+static int hostfs_show_options(struct seq_file *seq, struct dentry *root)
 {
-	const char *root_path = vfs->mnt_sb->s_fs_info;
+	const char *root_path = root->d_sb->s_fs_info;
 	size_t offset = strlen(root_ino) + 1;
 
 	if (strlen(root_path) > offset)
@@ -552,7 +551,7 @@
 	return 0;
 }
 
-int hostfs_create(struct inode *dir, struct dentry *dentry, int mode,
+int hostfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
 		  struct nameidata *nd)
 {
 	struct inode *inode;
@@ -677,7 +676,7 @@
 	return err;
 }
 
-int hostfs_mkdir(struct inode *ino, struct dentry *dentry, int mode)
+int hostfs_mkdir(struct inode *ino, struct dentry *dentry, umode_t mode)
 {
 	char *file;
 	int err;
@@ -701,7 +700,7 @@
 	return err;
 }
 
-int hostfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
+static int hostfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
 {
 	struct inode *inode;
 	char *name;
diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c
index ea91fcb..30dd7b1 100644
--- a/fs/hpfs/namei.c
+++ b/fs/hpfs/namei.c
@@ -8,7 +8,7 @@
 #include <linux/sched.h>
 #include "hpfs_fn.h"
 
-static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 {
 	const unsigned char *name = dentry->d_name.name;
 	unsigned len = dentry->d_name.len;
@@ -115,7 +115,7 @@
 	return err;
 }
 
-static int hpfs_create(struct inode *dir, struct dentry *dentry, int mode, struct nameidata *nd)
+static int hpfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, struct nameidata *nd)
 {
 	const unsigned char *name = dentry->d_name.name;
 	unsigned len = dentry->d_name.len;
@@ -201,7 +201,7 @@
 	return err;
 }
 
-static int hpfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev)
+static int hpfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev)
 {
 	const unsigned char *name = dentry->d_name.name;
 	unsigned len = dentry->d_name.len;
diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
index 98580a3..3690467c9 100644
--- a/fs/hpfs/super.c
+++ b/fs/hpfs/super.c
@@ -181,7 +181,6 @@
 static void hpfs_i_callback(struct rcu_head *head)
 {
 	struct inode *inode = container_of(head, struct inode, i_rcu);
-	INIT_LIST_HEAD(&inode->i_dentry);
 	kmem_cache_free(hpfs_inode_cachep, hpfs_i(inode));
 }
 
diff --git a/fs/hppfs/hppfs.c b/fs/hppfs/hppfs.c
index f590b11..d92f4ce 100644
--- a/fs/hppfs/hppfs.c
+++ b/fs/hppfs/hppfs.c
@@ -622,7 +622,6 @@
 static void hppfs_i_callback(struct rcu_head *head)
 {
 	struct inode *inode = container_of(head, struct inode, i_rcu);
-	INIT_LIST_HEAD(&inode->i_dentry);
 	kfree(HPPFS_I(inode));
 }
 
@@ -726,7 +725,7 @@
 	sb->s_fs_info = proc_mnt;
 
 	err = -ENOMEM;
-	root_inode = get_inode(sb, dget(proc_mnt->mnt_sb->s_root));
+	root_inode = get_inode(sb, dget(proc_mnt->mnt_root));
 	if (!root_inode)
 		goto out_mntput;
 
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 0be5a78..e425ad9 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -447,8 +447,8 @@
 	return 0;
 }
 
-static struct inode *hugetlbfs_get_inode(struct super_block *sb, uid_t uid, 
-					gid_t gid, int mode, dev_t dev)
+static struct inode *hugetlbfs_get_root(struct super_block *sb,
+					struct hugetlbfs_config *config)
 {
 	struct inode *inode;
 
@@ -456,9 +456,31 @@
 	if (inode) {
 		struct hugetlbfs_inode_info *info;
 		inode->i_ino = get_next_ino();
-		inode->i_mode = mode;
-		inode->i_uid = uid;
-		inode->i_gid = gid;
+		inode->i_mode = S_IFDIR | config->mode;
+		inode->i_uid = config->uid;
+		inode->i_gid = config->gid;
+		inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+		info = HUGETLBFS_I(inode);
+		mpol_shared_policy_init(&info->policy, NULL);
+		inode->i_op = &hugetlbfs_dir_inode_operations;
+		inode->i_fop = &simple_dir_operations;
+		/* directory inodes start off with i_nlink == 2 (for "." entry) */
+		inc_nlink(inode);
+	}
+	return inode;
+}
+
+static struct inode *hugetlbfs_get_inode(struct super_block *sb,
+					struct inode *dir,
+					umode_t mode, dev_t dev)
+{
+	struct inode *inode;
+
+	inode = new_inode(sb);
+	if (inode) {
+		struct hugetlbfs_inode_info *info;
+		inode->i_ino = get_next_ino();
+		inode_init_owner(inode, dir, mode);
 		inode->i_mapping->a_ops = &hugetlbfs_aops;
 		inode->i_mapping->backing_dev_info =&hugetlbfs_backing_dev_info;
 		inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
@@ -500,20 +522,12 @@
  * File creation. Allocate an inode, and we're done..
  */
 static int hugetlbfs_mknod(struct inode *dir,
-			struct dentry *dentry, int mode, dev_t dev)
+			struct dentry *dentry, umode_t mode, dev_t dev)
 {
 	struct inode *inode;
 	int error = -ENOSPC;
-	gid_t gid;
 
-	if (dir->i_mode & S_ISGID) {
-		gid = dir->i_gid;
-		if (S_ISDIR(mode))
-			mode |= S_ISGID;
-	} else {
-		gid = current_fsgid();
-	}
-	inode = hugetlbfs_get_inode(dir->i_sb, current_fsuid(), gid, mode, dev);
+	inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev);
 	if (inode) {
 		dir->i_ctime = dir->i_mtime = CURRENT_TIME;
 		d_instantiate(dentry, inode);
@@ -523,7 +537,7 @@
 	return error;
 }
 
-static int hugetlbfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+static int hugetlbfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 {
 	int retval = hugetlbfs_mknod(dir, dentry, mode | S_IFDIR, 0);
 	if (!retval)
@@ -531,7 +545,7 @@
 	return retval;
 }
 
-static int hugetlbfs_create(struct inode *dir, struct dentry *dentry, int mode, struct nameidata *nd)
+static int hugetlbfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, struct nameidata *nd)
 {
 	return hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0);
 }
@@ -541,15 +555,8 @@
 {
 	struct inode *inode;
 	int error = -ENOSPC;
-	gid_t gid;
 
-	if (dir->i_mode & S_ISGID)
-		gid = dir->i_gid;
-	else
-		gid = current_fsgid();
-
-	inode = hugetlbfs_get_inode(dir->i_sb, current_fsuid(),
-					gid, S_IFLNK|S_IRWXUGO, 0);
+	inode = hugetlbfs_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0);
 	if (inode) {
 		int l = strlen(symname)+1;
 		error = page_symlink(inode, symname, l);
@@ -666,7 +673,6 @@
 static void hugetlbfs_i_callback(struct rcu_head *head)
 {
 	struct inode *inode = container_of(head, struct inode, i_rcu);
-	INIT_LIST_HEAD(&inode->i_dentry);
 	kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode));
 }
 
@@ -858,8 +864,7 @@
 	sb->s_magic = HUGETLBFS_MAGIC;
 	sb->s_op = &hugetlbfs_ops;
 	sb->s_time_gran = 1;
-	inode = hugetlbfs_get_inode(sb, config.uid, config.gid,
-					S_IFDIR | config.mode, 0);
+	inode = hugetlbfs_get_root(sb, &config);
 	if (!inode)
 		goto out_free;
 
@@ -957,8 +962,7 @@
 
 	path.mnt = mntget(hugetlbfs_vfsmount);
 	error = -ENOSPC;
-	inode = hugetlbfs_get_inode(root->d_sb, current_fsuid(),
-				current_fsgid(), S_IFREG | S_IRWXUGO, 0);
+	inode = hugetlbfs_get_inode(root->d_sb, NULL, S_IFREG | S_IRWXUGO, 0);
 	if (!inode)
 		goto out_dentry;
 
diff --git a/fs/inode.c b/fs/inode.c
index ee4e66b..8753575 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -26,6 +26,7 @@
 #include <linux/ima.h>
 #include <linux/cred.h>
 #include <linux/buffer_head.h> /* for inode_has_buffers */
+#include <linux/ratelimit.h>
 #include "internal.h"
 
 /*
@@ -191,6 +192,7 @@
 	}
 	inode->i_private = NULL;
 	inode->i_mapping = mapping;
+	INIT_LIST_HEAD(&inode->i_dentry);	/* buggered by rcu freeing */
 #ifdef CONFIG_FS_POSIX_ACL
 	inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED;
 #endif
@@ -241,6 +243,11 @@
 	BUG_ON(inode_has_buffers(inode));
 	security_inode_free(inode);
 	fsnotify_inode_delete(inode);
+	if (!inode->i_nlink) {
+		WARN_ON(atomic_long_read(&inode->i_sb->s_remove_count) == 0);
+		atomic_long_dec(&inode->i_sb->s_remove_count);
+	}
+
 #ifdef CONFIG_FS_POSIX_ACL
 	if (inode->i_acl && inode->i_acl != ACL_NOT_CACHED)
 		posix_acl_release(inode->i_acl);
@@ -254,7 +261,6 @@
 static void i_callback(struct rcu_head *head)
 {
 	struct inode *inode = container_of(head, struct inode, i_rcu);
-	INIT_LIST_HEAD(&inode->i_dentry);
 	kmem_cache_free(inode_cachep, inode);
 }
 
@@ -268,6 +274,85 @@
 		call_rcu(&inode->i_rcu, i_callback);
 }
 
+/**
+ * drop_nlink - directly drop an inode's link count
+ * @inode: inode
+ *
+ * This is a low-level filesystem helper to replace any
+ * direct filesystem manipulation of i_nlink.  In cases
+ * where we are attempting to track writes to the
+ * filesystem, a decrement to zero means an imminent
+ * write when the file is truncated and actually unlinked
+ * on the filesystem.
+ */
+void drop_nlink(struct inode *inode)
+{
+	WARN_ON(inode->i_nlink == 0);
+	inode->__i_nlink--;
+	if (!inode->i_nlink)
+		atomic_long_inc(&inode->i_sb->s_remove_count);
+}
+EXPORT_SYMBOL(drop_nlink);
+
+/**
+ * clear_nlink - directly zero an inode's link count
+ * @inode: inode
+ *
+ * This is a low-level filesystem helper to replace any
+ * direct filesystem manipulation of i_nlink.  See
+ * drop_nlink() for why we care about i_nlink hitting zero.
+ */
+void clear_nlink(struct inode *inode)
+{
+	if (inode->i_nlink) {
+		inode->__i_nlink = 0;
+		atomic_long_inc(&inode->i_sb->s_remove_count);
+	}
+}
+EXPORT_SYMBOL(clear_nlink);
+
+/**
+ * set_nlink - directly set an inode's link count
+ * @inode: inode
+ * @nlink: new nlink (should be non-zero)
+ *
+ * This is a low-level filesystem helper to replace any
+ * direct filesystem manipulation of i_nlink.
+ */
+void set_nlink(struct inode *inode, unsigned int nlink)
+{
+	if (!nlink) {
+		printk_ratelimited(KERN_INFO
+			"set_nlink() clearing i_nlink on %s inode %li\n",
+			inode->i_sb->s_type->name, inode->i_ino);
+		clear_nlink(inode);
+	} else {
+		/* Yes, some filesystems do change nlink from zero to one */
+		if (inode->i_nlink == 0)
+			atomic_long_dec(&inode->i_sb->s_remove_count);
+
+		inode->__i_nlink = nlink;
+	}
+}
+EXPORT_SYMBOL(set_nlink);
+
+/**
+ * inc_nlink - directly increment an inode's link count
+ * @inode: inode
+ *
+ * This is a low-level filesystem helper to replace any
+ * direct filesystem manipulation of i_nlink.  Currently,
+ * it is only here for parity with dec_nlink().
+ */
+void inc_nlink(struct inode *inode)
+{
+	if (WARN_ON(inode->i_nlink == 0))
+		atomic_long_dec(&inode->i_sb->s_remove_count);
+
+	inode->__i_nlink++;
+}
+EXPORT_SYMBOL(inc_nlink);
+
 void address_space_init_once(struct address_space *mapping)
 {
 	memset(mapping, 0, sizeof(*mapping));
@@ -290,7 +375,6 @@
 {
 	memset(inode, 0, sizeof(*inode));
 	INIT_HLIST_NODE(&inode->i_hash);
-	INIT_LIST_HEAD(&inode->i_dentry);
 	INIT_LIST_HEAD(&inode->i_devices);
 	INIT_LIST_HEAD(&inode->i_wb_list);
 	INIT_LIST_HEAD(&inode->i_lru);
@@ -1508,7 +1592,7 @@
 	if (sync_it & S_MTIME)
 		inode->i_mtime = now;
 	mark_inode_dirty_sync(inode);
-	mnt_drop_write(file->f_path.mnt);
+	mnt_drop_write_file(file);
 }
 EXPORT_SYMBOL(file_update_time);
 
@@ -1647,7 +1731,7 @@
  * @mode: mode of the new inode
  */
 void inode_init_owner(struct inode *inode, const struct inode *dir,
-			mode_t mode)
+			umode_t mode)
 {
 	inode->i_uid = current_fsuid();
 	if (dir && dir->i_mode & S_ISGID) {
diff --git a/fs/internal.h b/fs/internal.h
index fe327c2..9962c59 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -15,19 +15,14 @@
 struct file_system_type;
 struct linux_binprm;
 struct path;
+struct mount;
 
 /*
  * block_dev.c
  */
 #ifdef CONFIG_BLOCK
-extern struct super_block *blockdev_superblock;
 extern void __init bdev_cache_init(void);
 
-static inline int sb_is_blkdev_sb(struct super_block *sb)
-{
-	return sb == blockdev_superblock;
-}
-
 extern int __sync_blockdev(struct block_device *bdev, int wait);
 
 #else
@@ -35,11 +30,6 @@
 {
 }
 
-static inline int sb_is_blkdev_sb(struct super_block *sb)
-{
-	return 0;
-}
-
 static inline int __sync_blockdev(struct block_device *bdev, int wait)
 {
 	return 0;
@@ -52,28 +42,17 @@
 extern void __init chrdev_init(void);
 
 /*
- * exec.c
- */
-extern int check_unsafe_exec(struct linux_binprm *);
-
-/*
  * namespace.c
  */
 extern int copy_mount_options(const void __user *, unsigned long *);
 extern int copy_mount_string(const void __user *, char **);
 
-extern unsigned int mnt_get_count(struct vfsmount *mnt);
-extern struct vfsmount *__lookup_mnt(struct vfsmount *, struct dentry *, int);
 extern struct vfsmount *lookup_mnt(struct path *);
-extern void mnt_set_mountpoint(struct vfsmount *, struct dentry *,
-				struct vfsmount *);
-extern void release_mounts(struct list_head *);
-extern void umount_tree(struct vfsmount *, int, struct list_head *);
-extern struct vfsmount *copy_tree(struct vfsmount *, struct dentry *, int);
 extern int finish_automount(struct vfsmount *, struct path *);
 
 extern void mnt_make_longterm(struct vfsmount *);
 extern void mnt_make_shortterm(struct vfsmount *);
+extern int sb_prepare_remount_readonly(struct super_block *);
 
 extern void __init mnt_init(void);
 
@@ -98,10 +77,9 @@
  */
 extern int do_remount_sb(struct super_block *, int, void *, int);
 extern bool grab_super_passive(struct super_block *sb);
-extern void __put_super(struct super_block *sb);
-extern void put_super(struct super_block *sb);
 extern struct dentry *mount_fs(struct file_system_type *,
 			       int, const char *, void *);
+extern struct super_block *user_get_super(dev_t);
 
 /*
  * open.c
@@ -111,7 +89,7 @@
 extern void release_open_intent(struct nameidata *);
 struct open_flags {
 	int open_flag;
-	int mode;
+	umode_t mode;
 	int acc_mode;
 	int intent;
 };
diff --git a/fs/ioctl.c b/fs/ioctl.c
index 1d9b9fc..066836e 100644
--- a/fs/ioctl.c
+++ b/fs/ioctl.c
@@ -42,7 +42,7 @@
 
 	error = filp->f_op->unlocked_ioctl(filp, cmd, arg);
 	if (error == -ENOIOCTLCMD)
-		error = -EINVAL;
+		error = -ENOTTY;
  out:
 	return error;
 }
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index f950059..7b99f5f 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -85,7 +85,6 @@
 static void isofs_i_callback(struct rcu_head *head)
 {
 	struct inode *inode = container_of(head, struct inode, i_rcu);
-	INIT_LIST_HEAD(&inode->i_dentry);
 	kmem_cache_free(isofs_inode_cachep, ISOFS_I(inode));
 }
 
@@ -170,8 +169,8 @@
 	unsigned char map;
 	unsigned char check;
 	unsigned int blocksize;
-	mode_t fmode;
-	mode_t dmode;
+	umode_t fmode;
+	umode_t dmode;
 	gid_t gid;
 	uid_t uid;
 	char *iocharset;
diff --git a/fs/isofs/isofs.h b/fs/isofs/isofs.h
index 7d33de8..0e73f63 100644
--- a/fs/isofs/isofs.h
+++ b/fs/isofs/isofs.h
@@ -50,14 +50,14 @@
 	unsigned int  s_uid_set:1;
 	unsigned int  s_gid_set:1;
 
-	mode_t s_fmode;
-	mode_t s_dmode;
+	umode_t s_fmode;
+	umode_t s_dmode;
 	gid_t s_gid;
 	uid_t s_uid;
 	struct nls_table *s_nls_iocharset; /* Native language support table */
 };
 
-#define ISOFS_INVALID_MODE ((mode_t) -1)
+#define ISOFS_INVALID_MODE ((umode_t) -1)
 
 static inline struct isofs_sb_info *ISOFS_SB(struct super_block *sb)
 {
diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
index f94fc48..5d1a00a 100644
--- a/fs/jbd/checkpoint.c
+++ b/fs/jbd/checkpoint.c
@@ -537,7 +537,7 @@
  * them.
  *
  * Called with j_list_lock held.
- * Returns number of bufers reaped (for debug)
+ * Returns number of buffers reaped (for debug)
  */
 
 static int journal_clean_one_cp_list(struct journal_head *jh, int *released)
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index fea8dd6..a96cff0 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -166,7 +166,7 @@
 		 */
 		jbd_debug(1, "Now suspending kjournald\n");
 		spin_unlock(&journal->j_state_lock);
-		refrigerator();
+		try_to_freeze();
 		spin_lock(&journal->j_state_lock);
 	} else {
 		/*
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
index 16a698b..d49d202 100644
--- a/fs/jbd2/checkpoint.c
+++ b/fs/jbd2/checkpoint.c
@@ -565,7 +565,7 @@
  *
  * Called with the journal locked.
  * Called with j_list_lock held.
- * Returns number of bufers reaped (for debug)
+ * Returns number of buffers reaped (for debug)
  */
 
 static int journal_clean_one_cp_list(struct journal_head *jh, int *released)
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 0fa0123..c0a5f9f 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -173,7 +173,7 @@
 		 */
 		jbd_debug(1, "Now suspending kjournald2\n");
 		write_unlock(&journal->j_state_lock);
-		refrigerator();
+		try_to_freeze();
 		write_lock(&journal->j_state_lock);
 	} else {
 		/*
diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
index be6169b..973ac58 100644
--- a/fs/jffs2/dir.c
+++ b/fs/jffs2/dir.c
@@ -22,16 +22,16 @@
 
 static int jffs2_readdir (struct file *, void *, filldir_t);
 
-static int jffs2_create (struct inode *,struct dentry *,int,
+static int jffs2_create (struct inode *,struct dentry *,umode_t,
 			 struct nameidata *);
 static struct dentry *jffs2_lookup (struct inode *,struct dentry *,
 				    struct nameidata *);
 static int jffs2_link (struct dentry *,struct inode *,struct dentry *);
 static int jffs2_unlink (struct inode *,struct dentry *);
 static int jffs2_symlink (struct inode *,struct dentry *,const char *);
-static int jffs2_mkdir (struct inode *,struct dentry *,int);
+static int jffs2_mkdir (struct inode *,struct dentry *,umode_t);
 static int jffs2_rmdir (struct inode *,struct dentry *);
-static int jffs2_mknod (struct inode *,struct dentry *,int,dev_t);
+static int jffs2_mknod (struct inode *,struct dentry *,umode_t,dev_t);
 static int jffs2_rename (struct inode *, struct dentry *,
 			 struct inode *, struct dentry *);
 
@@ -169,8 +169,8 @@
 /***********************************************************************/
 
 
-static int jffs2_create(struct inode *dir_i, struct dentry *dentry, int mode,
-			struct nameidata *nd)
+static int jffs2_create(struct inode *dir_i, struct dentry *dentry,
+			umode_t mode, struct nameidata *nd)
 {
 	struct jffs2_raw_inode *ri;
 	struct jffs2_inode_info *f, *dir_f;
@@ -450,7 +450,7 @@
 }
 
 
-static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode)
+static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, umode_t mode)
 {
 	struct jffs2_inode_info *f, *dir_f;
 	struct jffs2_sb_info *c;
@@ -618,7 +618,7 @@
 	return ret;
 }
 
-static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, dev_t rdev)
+static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, umode_t mode, dev_t rdev)
 {
 	struct jffs2_inode_info *f, *dir_f;
 	struct jffs2_sb_info *c;
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index e7e9744..8be4925 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -45,7 +45,6 @@
 static void jffs2_i_callback(struct rcu_head *head)
 {
 	struct inode *inode = container_of(head, struct inode, i_rcu);
-	INIT_LIST_HEAD(&inode->i_dentry);
 	kmem_cache_free(jffs2_inode_cachep, JFFS2_INODE_INFO(inode));
 }
 
@@ -97,9 +96,9 @@
 	}
 }
 
-static int jffs2_show_options(struct seq_file *s, struct vfsmount *mnt)
+static int jffs2_show_options(struct seq_file *s, struct dentry *root)
 {
-	struct jffs2_sb_info *c = JFFS2_SB_INFO(mnt->mnt_sb);
+	struct jffs2_sb_info *c = JFFS2_SB_INFO(root->d_sb);
 	struct jffs2_mount_opts *opts = &c->mount_opts;
 
 	if (opts->override_compr)
diff --git a/fs/jfs/ioctl.c b/fs/jfs/ioctl.c
index 6f98a18..f19d1e0 100644
--- a/fs/jfs/ioctl.c
+++ b/fs/jfs/ioctl.c
@@ -68,7 +68,7 @@
 		unsigned int oldflags;
 		int err;
 
-		err = mnt_want_write(filp->f_path.mnt);
+		err = mnt_want_write_file(filp);
 		if (err)
 			return err;
 
@@ -120,7 +120,7 @@
 		inode->i_ctime = CURRENT_TIME_SEC;
 		mark_inode_dirty(inode);
 setflags_out:
-		mnt_drop_write(filp->f_path.mnt);
+		mnt_drop_write_file(filp);
 		return err;
 	}
 	default:
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
index cc5f811..2eb952c 100644
--- a/fs/jfs/jfs_logmgr.c
+++ b/fs/jfs/jfs_logmgr.c
@@ -2349,7 +2349,7 @@
 
 		if (freezing(current)) {
 			spin_unlock_irq(&log_redrive_lock);
-			refrigerator();
+			try_to_freeze();
 		} else {
 			set_current_state(TASK_INTERRUPTIBLE);
 			spin_unlock_irq(&log_redrive_lock);
diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c
index af96060..bb8b661 100644
--- a/fs/jfs/jfs_txnmgr.c
+++ b/fs/jfs/jfs_txnmgr.c
@@ -2800,7 +2800,7 @@
 
 		if (freezing(current)) {
 			LAZY_UNLOCK(flags);
-			refrigerator();
+			try_to_freeze();
 		} else {
 			DECLARE_WAITQUEUE(wq, current);
 
@@ -2994,7 +2994,7 @@
 
 		if (freezing(current)) {
 			TXN_UNLOCK();
-			refrigerator();
+			try_to_freeze();
 		} else {
 			set_current_state(TASK_INTERRUPTIBLE);
 			TXN_UNLOCK();
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index a112ad9..5f7c160 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -72,7 +72,7 @@
  * RETURN:	Errors from subroutines
  *
  */
-static int jfs_create(struct inode *dip, struct dentry *dentry, int mode,
+static int jfs_create(struct inode *dip, struct dentry *dentry, umode_t mode,
 		struct nameidata *nd)
 {
 	int rc = 0;
@@ -205,7 +205,7 @@
  * note:
  * EACCESS: user needs search+write permission on the parent directory
  */
-static int jfs_mkdir(struct inode *dip, struct dentry *dentry, int mode)
+static int jfs_mkdir(struct inode *dip, struct dentry *dentry, umode_t mode)
 {
 	int rc = 0;
 	tid_t tid;		/* transaction id */
@@ -1353,7 +1353,7 @@
  * FUNCTION:	Create a special file (device)
  */
 static int jfs_mknod(struct inode *dir, struct dentry *dentry,
-		int mode, dev_t rdev)
+		umode_t mode, dev_t rdev)
 {
 	struct jfs_inode_info *jfs_ip;
 	struct btstack btstack;
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index a44eff076..682bca6 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -119,7 +119,6 @@
 {
 	struct inode *inode = container_of(head, struct inode, i_rcu);
 	struct jfs_inode_info *ji = JFS_IP(inode);
-	INIT_LIST_HEAD(&inode->i_dentry);
 	kmem_cache_free(jfs_inode_cachep, ji);
 }
 
@@ -609,9 +608,9 @@
 	return 0;
 }
 
-static int jfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
+static int jfs_show_options(struct seq_file *seq, struct dentry *root)
 {
-	struct jfs_sb_info *sbi = JFS_SBI(vfs->mnt_sb);
+	struct jfs_sb_info *sbi = JFS_SBI(root->d_sb);
 
 	if (sbi->uid != -1)
 		seq_printf(seq, ",uid=%d", sbi->uid);
diff --git a/fs/libfs.c b/fs/libfs.c
index f6d411e..5b2dbb3 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -12,7 +12,7 @@
 #include <linux/mutex.h>
 #include <linux/exportfs.h>
 #include <linux/writeback.h>
-#include <linux/buffer_head.h>
+#include <linux/buffer_head.h> /* sync_mapping_buffers */
 
 #include <asm/uaccess.h>
 
diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c
index 1ca0679..2240d38 100644
--- a/fs/lockd/svcsubs.c
+++ b/fs/lockd/svcsubs.c
@@ -403,7 +403,7 @@
 {
 	struct super_block *sb = datap;
 
-	return sb == file->f_file->f_path.mnt->mnt_sb;
+	return sb == file->f_file->f_path.dentry->d_sb;
 }
 
 /**
diff --git a/fs/locks.c b/fs/locks.c
index 3b0d05d..637694b 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -1205,6 +1205,8 @@
 	int want_write = (mode & O_ACCMODE) != O_RDONLY;
 
 	new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
+	if (IS_ERR(new_fl))
+		return PTR_ERR(new_fl);
 
 	lock_flocks();
 
@@ -1221,12 +1223,6 @@
 		if (fl->fl_owner == current->files)
 			i_have_this_lease = 1;
 
-	if (IS_ERR(new_fl) && !i_have_this_lease
-			&& ((mode & O_NONBLOCK) == 0)) {
-		error = PTR_ERR(new_fl);
-		goto out;
-	}
-
 	break_time = 0;
 	if (lease_break_time > 0) {
 		break_time = jiffies + lease_break_time * HZ;
@@ -1284,8 +1280,7 @@
 
 out:
 	unlock_flocks();
-	if (!IS_ERR(new_fl))
-		locks_free_lock(new_fl);
+	locks_free_lock(new_fl);
 	return error;
 }
 
diff --git a/fs/logfs/dir.c b/fs/logfs/dir.c
index b7d7f67..501043e 100644
--- a/fs/logfs/dir.c
+++ b/fs/logfs/dir.c
@@ -482,7 +482,7 @@
 	return ret;
 }
 
-static int logfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+static int logfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 {
 	struct inode *inode;
 
@@ -501,7 +501,7 @@
 	return __logfs_create(dir, dentry, inode, NULL, 0);
 }
 
-static int logfs_create(struct inode *dir, struct dentry *dentry, int mode,
+static int logfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
 		struct nameidata *nd)
 {
 	struct inode *inode;
@@ -517,7 +517,7 @@
 	return __logfs_create(dir, dentry, inode, NULL, 0);
 }
 
-static int logfs_mknod(struct inode *dir, struct dentry *dentry, int mode,
+static int logfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
 		dev_t rdev)
 {
 	struct inode *inode;
diff --git a/fs/logfs/inode.c b/fs/logfs/inode.c
index 7e441ad..388df1a 100644
--- a/fs/logfs/inode.c
+++ b/fs/logfs/inode.c
@@ -144,7 +144,6 @@
 static void logfs_i_callback(struct rcu_head *head)
 {
 	struct inode *inode = container_of(head, struct inode, i_rcu);
-	INIT_LIST_HEAD(&inode->i_dentry);
 	kmem_cache_free(logfs_inode_cache, logfs_inode(inode));
 }
 
@@ -324,7 +323,7 @@
 	mutex_unlock(&super->s_journal_mutex);
 }
 
-struct inode *logfs_new_inode(struct inode *dir, int mode)
+struct inode *logfs_new_inode(struct inode *dir, umode_t mode)
 {
 	struct super_block *sb = dir->i_sb;
 	struct inode *inode;
diff --git a/fs/logfs/logfs.h b/fs/logfs/logfs.h
index 398ecff6..9263738 100644
--- a/fs/logfs/logfs.h
+++ b/fs/logfs/logfs.h
@@ -520,7 +520,7 @@
 struct inode *logfs_iget(struct super_block *sb, ino_t ino);
 struct inode *logfs_safe_iget(struct super_block *sb, ino_t ino, int *cookie);
 void logfs_safe_iput(struct inode *inode, int cookie);
-struct inode *logfs_new_inode(struct inode *dir, int mode);
+struct inode *logfs_new_inode(struct inode *dir, umode_t mode);
 struct inode *logfs_new_meta_inode(struct super_block *sb, u64 ino);
 struct inode *logfs_read_meta_inode(struct super_block *sb, u64 ino);
 int logfs_init_inode_cache(void);
diff --git a/fs/minix/bitmap.c b/fs/minix/bitmap.c
index ef175cb..4bc50da 100644
--- a/fs/minix/bitmap.c
+++ b/fs/minix/bitmap.c
@@ -209,7 +209,7 @@
 	mark_buffer_dirty(bh);
 }
 
-struct inode *minix_new_inode(const struct inode *dir, int mode, int *error)
+struct inode *minix_new_inode(const struct inode *dir, umode_t mode, int *error)
 {
 	struct super_block *sb = dir->i_sb;
 	struct minix_sb_info *sbi = minix_sb(sb);
diff --git a/fs/minix/inode.c b/fs/minix/inode.c
index 1d9e339..fa8b612 100644
--- a/fs/minix/inode.c
+++ b/fs/minix/inode.c
@@ -71,7 +71,6 @@
 static void minix_i_callback(struct rcu_head *head)
 {
 	struct inode *inode = container_of(head, struct inode, i_rcu);
-	INIT_LIST_HEAD(&inode->i_dentry);
 	kmem_cache_free(minix_inode_cachep, minix_i(inode));
 }
 
@@ -263,23 +262,6 @@
 		goto out_no_root;
 	}
 
-	ret = -ENOMEM;
-	s->s_root = d_alloc_root(root_inode);
-	if (!s->s_root)
-		goto out_iput;
-
-	if (!(s->s_flags & MS_RDONLY)) {
-		if (sbi->s_version != MINIX_V3) /* s_state is now out from V3 sb */
-			ms->s_state &= ~MINIX_VALID_FS;
-		mark_buffer_dirty(bh);
-	}
-	if (!(sbi->s_mount_state & MINIX_VALID_FS))
-		printk("MINIX-fs: mounting unchecked file system, "
-			"running fsck is recommended\n");
- 	else if (sbi->s_mount_state & MINIX_ERROR_FS)
-		printk("MINIX-fs: mounting file system with errors, "
-			"running fsck is recommended\n");
-
 	/* Apparently minix can create filesystems that allocate more blocks for
 	 * the bitmaps than needed.  We simply ignore that, but verify it didn't
 	 * create one with not enough blocks and bail out if so.
@@ -300,6 +282,23 @@
 		goto out_iput;
 	}
 
+	ret = -ENOMEM;
+	s->s_root = d_alloc_root(root_inode);
+	if (!s->s_root)
+		goto out_iput;
+
+	if (!(s->s_flags & MS_RDONLY)) {
+		if (sbi->s_version != MINIX_V3) /* s_state is now out from V3 sb */
+			ms->s_state &= ~MINIX_VALID_FS;
+		mark_buffer_dirty(bh);
+	}
+	if (!(sbi->s_mount_state & MINIX_VALID_FS))
+		printk("MINIX-fs: mounting unchecked file system, "
+			"running fsck is recommended\n");
+	else if (sbi->s_mount_state & MINIX_ERROR_FS)
+		printk("MINIX-fs: mounting file system with errors, "
+			"running fsck is recommended\n");
+
 	return 0;
 
 out_iput:
diff --git a/fs/minix/minix.h b/fs/minix/minix.h
index 26bbd55..c889ef0 100644
--- a/fs/minix/minix.h
+++ b/fs/minix/minix.h
@@ -46,7 +46,7 @@
 extern struct inode *minix_iget(struct super_block *, unsigned long);
 extern struct minix_inode * minix_V1_raw_inode(struct super_block *, ino_t, struct buffer_head **);
 extern struct minix2_inode * minix_V2_raw_inode(struct super_block *, ino_t, struct buffer_head **);
-extern struct inode * minix_new_inode(const struct inode *, int, int *);
+extern struct inode * minix_new_inode(const struct inode *, umode_t, int *);
 extern void minix_free_inode(struct inode * inode);
 extern unsigned long minix_count_free_inodes(struct super_block *sb);
 extern int minix_new_block(struct inode * inode);
diff --git a/fs/minix/namei.c b/fs/minix/namei.c
index 6e6777f..2f76e38 100644
--- a/fs/minix/namei.c
+++ b/fs/minix/namei.c
@@ -36,7 +36,7 @@
 	return NULL;
 }
 
-static int minix_mknod(struct inode * dir, struct dentry *dentry, int mode, dev_t rdev)
+static int minix_mknod(struct inode * dir, struct dentry *dentry, umode_t mode, dev_t rdev)
 {
 	int error;
 	struct inode *inode;
@@ -54,7 +54,7 @@
 	return error;
 }
 
-static int minix_create(struct inode * dir, struct dentry *dentry, int mode,
+static int minix_create(struct inode *dir, struct dentry *dentry, umode_t mode,
 		struct nameidata *nd)
 {
 	return minix_mknod(dir, dentry, mode, 0);
@@ -103,7 +103,7 @@
 	return add_nondir(dentry, inode);
 }
 
-static int minix_mkdir(struct inode * dir, struct dentry *dentry, int mode)
+static int minix_mkdir(struct inode * dir, struct dentry *dentry, umode_t mode)
 {
 	struct inode * inode;
 	int err = -EMLINK;
diff --git a/fs/mount.h b/fs/mount.h
new file mode 100644
index 0000000..4ef36d9
--- /dev/null
+++ b/fs/mount.h
@@ -0,0 +1,76 @@
+#include <linux/mount.h>
+#include <linux/seq_file.h>
+#include <linux/poll.h>
+
+struct mnt_namespace {
+	atomic_t		count;
+	struct mount *	root;
+	struct list_head	list;
+	wait_queue_head_t poll;
+	int event;
+};
+
+struct mnt_pcp {
+	int mnt_count;
+	int mnt_writers;
+};
+
+struct mount {
+	struct list_head mnt_hash;
+	struct mount *mnt_parent;
+	struct dentry *mnt_mountpoint;
+	struct vfsmount mnt;
+#ifdef CONFIG_SMP
+	struct mnt_pcp __percpu *mnt_pcp;
+	atomic_t mnt_longterm;		/* how many of the refs are longterm */
+#else
+	int mnt_count;
+	int mnt_writers;
+#endif
+	struct list_head mnt_mounts;	/* list of children, anchored here */
+	struct list_head mnt_child;	/* and going through their mnt_child */
+	struct list_head mnt_instance;	/* mount instance on sb->s_mounts */
+	const char *mnt_devname;	/* Name of device e.g. /dev/dsk/hda1 */
+	struct list_head mnt_list;
+	struct list_head mnt_expire;	/* link in fs-specific expiry list */
+	struct list_head mnt_share;	/* circular list of shared mounts */
+	struct list_head mnt_slave_list;/* list of slave mounts */
+	struct list_head mnt_slave;	/* slave list entry */
+	struct mount *mnt_master;	/* slave is on master->mnt_slave_list */
+	struct mnt_namespace *mnt_ns;	/* containing namespace */
+#ifdef CONFIG_FSNOTIFY
+	struct hlist_head mnt_fsnotify_marks;
+	__u32 mnt_fsnotify_mask;
+#endif
+	int mnt_id;			/* mount identifier */
+	int mnt_group_id;		/* peer group identifier */
+	int mnt_expiry_mark;		/* true if marked for expiry */
+	int mnt_pinned;
+	int mnt_ghosts;
+};
+
+static inline struct mount *real_mount(struct vfsmount *mnt)
+{
+	return container_of(mnt, struct mount, mnt);
+}
+
+static inline int mnt_has_parent(struct mount *mnt)
+{
+	return mnt != mnt->mnt_parent;
+}
+
+extern struct mount *__lookup_mnt(struct vfsmount *, struct dentry *, int);
+
+static inline void get_mnt_ns(struct mnt_namespace *ns)
+{
+	atomic_inc(&ns->count);
+}
+
+struct proc_mounts {
+	struct seq_file m; /* must be the first element */
+	struct mnt_namespace *ns;
+	struct path root;
+	int (*show)(struct seq_file *, struct vfsmount *);
+};
+
+extern const struct seq_operations mounts_op;
diff --git a/fs/namei.c b/fs/namei.c
index 5008f01..c283a1e 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -36,6 +36,7 @@
 #include <asm/uaccess.h>
 
 #include "internal.h"
+#include "mount.h"
 
 /* [Feb-1997 T. Schoebel-Theuer]
  * Fundamental changes in the pathname lookup mechanisms (namei)
@@ -676,36 +677,38 @@
 
 static int follow_up_rcu(struct path *path)
 {
-	struct vfsmount *parent;
+	struct mount *mnt = real_mount(path->mnt);
+	struct mount *parent;
 	struct dentry *mountpoint;
 
-	parent = path->mnt->mnt_parent;
-	if (parent == path->mnt)
+	parent = mnt->mnt_parent;
+	if (&parent->mnt == path->mnt)
 		return 0;
-	mountpoint = path->mnt->mnt_mountpoint;
+	mountpoint = mnt->mnt_mountpoint;
 	path->dentry = mountpoint;
-	path->mnt = parent;
+	path->mnt = &parent->mnt;
 	return 1;
 }
 
 int follow_up(struct path *path)
 {
-	struct vfsmount *parent;
+	struct mount *mnt = real_mount(path->mnt);
+	struct mount *parent;
 	struct dentry *mountpoint;
 
 	br_read_lock(vfsmount_lock);
-	parent = path->mnt->mnt_parent;
-	if (parent == path->mnt) {
+	parent = mnt->mnt_parent;
+	if (&parent->mnt == path->mnt) {
 		br_read_unlock(vfsmount_lock);
 		return 0;
 	}
-	mntget(parent);
-	mountpoint = dget(path->mnt->mnt_mountpoint);
+	mntget(&parent->mnt);
+	mountpoint = dget(mnt->mnt_mountpoint);
 	br_read_unlock(vfsmount_lock);
 	dput(path->dentry);
 	path->dentry = mountpoint;
 	mntput(path->mnt);
-	path->mnt = parent;
+	path->mnt = &parent->mnt;
 	return 1;
 }
 
@@ -884,7 +887,7 @@
 			       struct inode **inode)
 {
 	for (;;) {
-		struct vfsmount *mounted;
+		struct mount *mounted;
 		/*
 		 * Don't forget we might have a non-mountpoint managed dentry
 		 * that wants to block transit.
@@ -898,8 +901,8 @@
 		mounted = __lookup_mnt(path->mnt, path->dentry, 1);
 		if (!mounted)
 			break;
-		path->mnt = mounted;
-		path->dentry = mounted->mnt_root;
+		path->mnt = &mounted->mnt;
+		path->dentry = mounted->mnt.mnt_root;
 		nd->flags |= LOOKUP_JUMPED;
 		nd->seq = read_seqcount_begin(&path->dentry->d_seq);
 		/*
@@ -915,12 +918,12 @@
 static void follow_mount_rcu(struct nameidata *nd)
 {
 	while (d_mountpoint(nd->path.dentry)) {
-		struct vfsmount *mounted;
+		struct mount *mounted;
 		mounted = __lookup_mnt(nd->path.mnt, nd->path.dentry, 1);
 		if (!mounted)
 			break;
-		nd->path.mnt = mounted;
-		nd->path.dentry = mounted->mnt_root;
+		nd->path.mnt = &mounted->mnt;
+		nd->path.dentry = mounted->mnt.mnt_root;
 		nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
 	}
 }
@@ -1976,7 +1979,7 @@
 	}
 }
 
-int vfs_create(struct inode *dir, struct dentry *dentry, int mode,
+int vfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
 		struct nameidata *nd)
 {
 	int error = may_create(dir, dentry);
@@ -2177,7 +2180,7 @@
 
 	/* Negative dentry, just create the file */
 	if (!dentry->d_inode) {
-		int mode = op->mode;
+		umode_t mode = op->mode;
 		if (!IS_POSIXACL(dir->d_inode))
 			mode &= ~current_umask();
 		/*
@@ -2444,7 +2447,7 @@
 }
 EXPORT_SYMBOL(user_path_create);
 
-int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
+int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
 {
 	int error = may_create(dir, dentry);
 
@@ -2472,7 +2475,7 @@
 	return error;
 }
 
-static int may_mknod(mode_t mode)
+static int may_mknod(umode_t mode)
 {
 	switch (mode & S_IFMT) {
 	case S_IFREG:
@@ -2489,7 +2492,7 @@
 	}
 }
 
-SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
+SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
 		unsigned, dev)
 {
 	struct dentry *dentry;
@@ -2536,12 +2539,12 @@
 	return error;
 }
 
-SYSCALL_DEFINE3(mknod, const char __user *, filename, int, mode, unsigned, dev)
+SYSCALL_DEFINE3(mknod, const char __user *, filename, umode_t, mode, unsigned, dev)
 {
 	return sys_mknodat(AT_FDCWD, filename, mode, dev);
 }
 
-int vfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+int vfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 {
 	int error = may_create(dir, dentry);
 
@@ -2562,7 +2565,7 @@
 	return error;
 }
 
-SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
+SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode)
 {
 	struct dentry *dentry;
 	struct path path;
@@ -2590,7 +2593,7 @@
 	return error;
 }
 
-SYSCALL_DEFINE2(mkdir, const char __user *, pathname, int, mode)
+SYSCALL_DEFINE2(mkdir, const char __user *, pathname, umode_t, mode)
 {
 	return sys_mkdirat(AT_FDCWD, pathname, mode);
 }
diff --git a/fs/namespace.c b/fs/namespace.c
index cfc6d44..e608199 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -9,30 +9,17 @@
  */
 
 #include <linux/syscalls.h>
-#include <linux/slab.h>
-#include <linux/sched.h>
-#include <linux/spinlock.h>
-#include <linux/percpu.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/acct.h>
+#include <linux/export.h>
 #include <linux/capability.h>
-#include <linux/cpumask.h>
-#include <linux/module.h>
-#include <linux/sysfs.h>
-#include <linux/seq_file.h>
 #include <linux/mnt_namespace.h>
 #include <linux/namei.h>
-#include <linux/nsproxy.h>
 #include <linux/security.h>
-#include <linux/mount.h>
-#include <linux/ramfs.h>
-#include <linux/log2.h>
 #include <linux/idr.h>
-#include <linux/fs_struct.h>
-#include <linux/fsnotify.h>
-#include <asm/uaccess.h>
-#include <asm/unistd.h>
+#include <linux/acct.h>		/* acct_auto_close_mnt */
+#include <linux/ramfs.h>	/* init_rootfs */
+#include <linux/fs_struct.h>	/* get_fs_root et.al. */
+#include <linux/fsnotify.h>	/* fsnotify_vfsmount_delete */
+#include <linux/uaccess.h>
 #include "pnode.h"
 #include "internal.h"
 
@@ -78,7 +65,7 @@
  * allocation is serialized by namespace_sem, but we need the spinlock to
  * serialize with freeing.
  */
-static int mnt_alloc_id(struct vfsmount *mnt)
+static int mnt_alloc_id(struct mount *mnt)
 {
 	int res;
 
@@ -95,7 +82,7 @@
 	return res;
 }
 
-static void mnt_free_id(struct vfsmount *mnt)
+static void mnt_free_id(struct mount *mnt)
 {
 	int id = mnt->mnt_id;
 	spin_lock(&mnt_id_lock);
@@ -110,7 +97,7 @@
  *
  * mnt_group_ida is protected by namespace_sem
  */
-static int mnt_alloc_group_id(struct vfsmount *mnt)
+static int mnt_alloc_group_id(struct mount *mnt)
 {
 	int res;
 
@@ -129,7 +116,7 @@
 /*
  * Release a peer group ID
  */
-void mnt_release_group_id(struct vfsmount *mnt)
+void mnt_release_group_id(struct mount *mnt)
 {
 	int id = mnt->mnt_group_id;
 	ida_remove(&mnt_group_ida, id);
@@ -141,7 +128,7 @@
 /*
  * vfsmount lock must be held for read
  */
-static inline void mnt_add_count(struct vfsmount *mnt, int n)
+static inline void mnt_add_count(struct mount *mnt, int n)
 {
 #ifdef CONFIG_SMP
 	this_cpu_add(mnt->mnt_pcp->mnt_count, n);
@@ -152,35 +139,10 @@
 #endif
 }
 
-static inline void mnt_set_count(struct vfsmount *mnt, int n)
-{
-#ifdef CONFIG_SMP
-	this_cpu_write(mnt->mnt_pcp->mnt_count, n);
-#else
-	mnt->mnt_count = n;
-#endif
-}
-
-/*
- * vfsmount lock must be held for read
- */
-static inline void mnt_inc_count(struct vfsmount *mnt)
-{
-	mnt_add_count(mnt, 1);
-}
-
-/*
- * vfsmount lock must be held for read
- */
-static inline void mnt_dec_count(struct vfsmount *mnt)
-{
-	mnt_add_count(mnt, -1);
-}
-
 /*
  * vfsmount lock must be held for write
  */
-unsigned int mnt_get_count(struct vfsmount *mnt)
+unsigned int mnt_get_count(struct mount *mnt)
 {
 #ifdef CONFIG_SMP
 	unsigned int count = 0;
@@ -196,9 +158,9 @@
 #endif
 }
 
-static struct vfsmount *alloc_vfsmnt(const char *name)
+static struct mount *alloc_vfsmnt(const char *name)
 {
-	struct vfsmount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL);
+	struct mount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL);
 	if (mnt) {
 		int err;
 
@@ -277,7 +239,7 @@
 }
 EXPORT_SYMBOL_GPL(__mnt_is_readonly);
 
-static inline void mnt_inc_writers(struct vfsmount *mnt)
+static inline void mnt_inc_writers(struct mount *mnt)
 {
 #ifdef CONFIG_SMP
 	this_cpu_inc(mnt->mnt_pcp->mnt_writers);
@@ -286,7 +248,7 @@
 #endif
 }
 
-static inline void mnt_dec_writers(struct vfsmount *mnt)
+static inline void mnt_dec_writers(struct mount *mnt)
 {
 #ifdef CONFIG_SMP
 	this_cpu_dec(mnt->mnt_pcp->mnt_writers);
@@ -295,7 +257,7 @@
 #endif
 }
 
-static unsigned int mnt_get_writers(struct vfsmount *mnt)
+static unsigned int mnt_get_writers(struct mount *mnt)
 {
 #ifdef CONFIG_SMP
 	unsigned int count = 0;
@@ -311,6 +273,15 @@
 #endif
 }
 
+static int mnt_is_readonly(struct vfsmount *mnt)
+{
+	if (mnt->mnt_sb->s_readonly_remount)
+		return 1;
+	/* Order wrt setting s_flags/s_readonly_remount in do_remount() */
+	smp_rmb();
+	return __mnt_is_readonly(mnt);
+}
+
 /*
  * Most r/o checks on a fs are for operations that take
  * discrete amounts of time, like a write() or unlink().
@@ -321,7 +292,7 @@
  */
 /**
  * mnt_want_write - get write access to a mount
- * @mnt: the mount on which to take a write
+ * @m: the mount on which to take a write
  *
  * This tells the low-level filesystem that a write is
  * about to be performed to it, and makes sure that
@@ -329,8 +300,9 @@
  * the write operation is finished, mnt_drop_write()
  * must be called.  This is effectively a refcount.
  */
-int mnt_want_write(struct vfsmount *mnt)
+int mnt_want_write(struct vfsmount *m)
 {
+	struct mount *mnt = real_mount(m);
 	int ret = 0;
 
 	preempt_disable();
@@ -341,7 +313,7 @@
 	 * incremented count after it has set MNT_WRITE_HOLD.
 	 */
 	smp_mb();
-	while (mnt->mnt_flags & MNT_WRITE_HOLD)
+	while (mnt->mnt.mnt_flags & MNT_WRITE_HOLD)
 		cpu_relax();
 	/*
 	 * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will
@@ -349,12 +321,10 @@
 	 * MNT_WRITE_HOLD is cleared.
 	 */
 	smp_rmb();
-	if (__mnt_is_readonly(mnt)) {
+	if (mnt_is_readonly(m)) {
 		mnt_dec_writers(mnt);
 		ret = -EROFS;
-		goto out;
 	}
-out:
 	preempt_enable();
 	return ret;
 }
@@ -378,7 +348,7 @@
 	if (__mnt_is_readonly(mnt))
 		return -EROFS;
 	preempt_disable();
-	mnt_inc_writers(mnt);
+	mnt_inc_writers(real_mount(mnt));
 	preempt_enable();
 	return 0;
 }
@@ -412,17 +382,23 @@
 void mnt_drop_write(struct vfsmount *mnt)
 {
 	preempt_disable();
-	mnt_dec_writers(mnt);
+	mnt_dec_writers(real_mount(mnt));
 	preempt_enable();
 }
 EXPORT_SYMBOL_GPL(mnt_drop_write);
 
-static int mnt_make_readonly(struct vfsmount *mnt)
+void mnt_drop_write_file(struct file *file)
+{
+	mnt_drop_write(file->f_path.mnt);
+}
+EXPORT_SYMBOL(mnt_drop_write_file);
+
+static int mnt_make_readonly(struct mount *mnt)
 {
 	int ret = 0;
 
 	br_write_lock(vfsmount_lock);
-	mnt->mnt_flags |= MNT_WRITE_HOLD;
+	mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
 	/*
 	 * After storing MNT_WRITE_HOLD, we'll read the counters. This store
 	 * should be visible before we do.
@@ -448,25 +424,61 @@
 	if (mnt_get_writers(mnt) > 0)
 		ret = -EBUSY;
 	else
-		mnt->mnt_flags |= MNT_READONLY;
+		mnt->mnt.mnt_flags |= MNT_READONLY;
 	/*
 	 * MNT_READONLY must become visible before ~MNT_WRITE_HOLD, so writers
 	 * that become unheld will see MNT_READONLY.
 	 */
 	smp_wmb();
-	mnt->mnt_flags &= ~MNT_WRITE_HOLD;
+	mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
 	br_write_unlock(vfsmount_lock);
 	return ret;
 }
 
-static void __mnt_unmake_readonly(struct vfsmount *mnt)
+static void __mnt_unmake_readonly(struct mount *mnt)
 {
 	br_write_lock(vfsmount_lock);
-	mnt->mnt_flags &= ~MNT_READONLY;
+	mnt->mnt.mnt_flags &= ~MNT_READONLY;
 	br_write_unlock(vfsmount_lock);
 }
 
-static void free_vfsmnt(struct vfsmount *mnt)
+int sb_prepare_remount_readonly(struct super_block *sb)
+{
+	struct mount *mnt;
+	int err = 0;
+
+	/* Racy optimization.  Recheck the counter under MNT_WRITE_HOLD */
+	if (atomic_long_read(&sb->s_remove_count))
+		return -EBUSY;
+
+	br_write_lock(vfsmount_lock);
+	list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) {
+		if (!(mnt->mnt.mnt_flags & MNT_READONLY)) {
+			mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
+			smp_mb();
+			if (mnt_get_writers(mnt) > 0) {
+				err = -EBUSY;
+				break;
+			}
+		}
+	}
+	if (!err && atomic_long_read(&sb->s_remove_count))
+		err = -EBUSY;
+
+	if (!err) {
+		sb->s_readonly_remount = 1;
+		smp_wmb();
+	}
+	list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) {
+		if (mnt->mnt.mnt_flags & MNT_WRITE_HOLD)
+			mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
+	}
+	br_write_unlock(vfsmount_lock);
+
+	return err;
+}
+
+static void free_vfsmnt(struct mount *mnt)
 {
 	kfree(mnt->mnt_devname);
 	mnt_free_id(mnt);
@@ -481,20 +493,20 @@
  * @dir. If @dir is set return the first mount else return the last mount.
  * vfsmount_lock must be held for read or write.
  */
-struct vfsmount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry,
+struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry,
 			      int dir)
 {
 	struct list_head *head = mount_hashtable + hash(mnt, dentry);
 	struct list_head *tmp = head;
-	struct vfsmount *p, *found = NULL;
+	struct mount *p, *found = NULL;
 
 	for (;;) {
 		tmp = dir ? tmp->next : tmp->prev;
 		p = NULL;
 		if (tmp == head)
 			break;
-		p = list_entry(tmp, struct vfsmount, mnt_hash);
-		if (p->mnt_parent == mnt && p->mnt_mountpoint == dentry) {
+		p = list_entry(tmp, struct mount, mnt_hash);
+		if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry) {
 			found = p;
 			break;
 		}
@@ -508,16 +520,21 @@
  */
 struct vfsmount *lookup_mnt(struct path *path)
 {
-	struct vfsmount *child_mnt;
+	struct mount *child_mnt;
 
 	br_read_lock(vfsmount_lock);
-	if ((child_mnt = __lookup_mnt(path->mnt, path->dentry, 1)))
-		mntget(child_mnt);
-	br_read_unlock(vfsmount_lock);
-	return child_mnt;
+	child_mnt = __lookup_mnt(path->mnt, path->dentry, 1);
+	if (child_mnt) {
+		mnt_add_count(child_mnt, 1);
+		br_read_unlock(vfsmount_lock);
+		return &child_mnt->mnt;
+	} else {
+		br_read_unlock(vfsmount_lock);
+		return NULL;
+	}
 }
 
-static inline int check_mnt(struct vfsmount *mnt)
+static inline int check_mnt(struct mount *mnt)
 {
 	return mnt->mnt_ns == current->nsproxy->mnt_ns;
 }
@@ -548,12 +565,12 @@
  * Clear dentry's mounted state if it has no remaining mounts.
  * vfsmount_lock must be held for write.
  */
-static void dentry_reset_mounted(struct vfsmount *mnt, struct dentry *dentry)
+static void dentry_reset_mounted(struct dentry *dentry)
 {
 	unsigned u;
 
 	for (u = 0; u < HASH_SIZE; u++) {
-		struct vfsmount *p;
+		struct mount *p;
 
 		list_for_each_entry(p, &mount_hashtable[u], mnt_hash) {
 			if (p->mnt_mountpoint == dentry)
@@ -568,25 +585,26 @@
 /*
  * vfsmount lock must be held for write
  */
-static void detach_mnt(struct vfsmount *mnt, struct path *old_path)
+static void detach_mnt(struct mount *mnt, struct path *old_path)
 {
 	old_path->dentry = mnt->mnt_mountpoint;
-	old_path->mnt = mnt->mnt_parent;
+	old_path->mnt = &mnt->mnt_parent->mnt;
 	mnt->mnt_parent = mnt;
-	mnt->mnt_mountpoint = mnt->mnt_root;
+	mnt->mnt_mountpoint = mnt->mnt.mnt_root;
 	list_del_init(&mnt->mnt_child);
 	list_del_init(&mnt->mnt_hash);
-	dentry_reset_mounted(old_path->mnt, old_path->dentry);
+	dentry_reset_mounted(old_path->dentry);
 }
 
 /*
  * vfsmount lock must be held for write
  */
-void mnt_set_mountpoint(struct vfsmount *mnt, struct dentry *dentry,
-			struct vfsmount *child_mnt)
+void mnt_set_mountpoint(struct mount *mnt, struct dentry *dentry,
+			struct mount *child_mnt)
 {
-	child_mnt->mnt_parent = mntget(mnt);
+	mnt_add_count(mnt, 1);	/* essentially, that's mntget */
 	child_mnt->mnt_mountpoint = dget(dentry);
+	child_mnt->mnt_parent = mnt;
 	spin_lock(&dentry->d_lock);
 	dentry->d_flags |= DCACHE_MOUNTED;
 	spin_unlock(&dentry->d_lock);
@@ -595,15 +613,15 @@
 /*
  * vfsmount lock must be held for write
  */
-static void attach_mnt(struct vfsmount *mnt, struct path *path)
+static void attach_mnt(struct mount *mnt, struct path *path)
 {
-	mnt_set_mountpoint(path->mnt, path->dentry, mnt);
+	mnt_set_mountpoint(real_mount(path->mnt), path->dentry, mnt);
 	list_add_tail(&mnt->mnt_hash, mount_hashtable +
 			hash(path->mnt, path->dentry));
-	list_add_tail(&mnt->mnt_child, &path->mnt->mnt_mounts);
+	list_add_tail(&mnt->mnt_child, &real_mount(path->mnt)->mnt_mounts);
 }
 
-static inline void __mnt_make_longterm(struct vfsmount *mnt)
+static inline void __mnt_make_longterm(struct mount *mnt)
 {
 #ifdef CONFIG_SMP
 	atomic_inc(&mnt->mnt_longterm);
@@ -611,7 +629,7 @@
 }
 
 /* needs vfsmount lock for write */
-static inline void __mnt_make_shortterm(struct vfsmount *mnt)
+static inline void __mnt_make_shortterm(struct mount *mnt)
 {
 #ifdef CONFIG_SMP
 	atomic_dec(&mnt->mnt_longterm);
@@ -621,10 +639,10 @@
 /*
  * vfsmount lock must be held for write
  */
-static void commit_tree(struct vfsmount *mnt)
+static void commit_tree(struct mount *mnt)
 {
-	struct vfsmount *parent = mnt->mnt_parent;
-	struct vfsmount *m;
+	struct mount *parent = mnt->mnt_parent;
+	struct mount *m;
 	LIST_HEAD(head);
 	struct mnt_namespace *n = parent->mnt_ns;
 
@@ -639,12 +657,12 @@
 	list_splice(&head, n->list.prev);
 
 	list_add_tail(&mnt->mnt_hash, mount_hashtable +
-				hash(parent, mnt->mnt_mountpoint));
+				hash(&parent->mnt, mnt->mnt_mountpoint));
 	list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
 	touch_mnt_namespace(n);
 }
 
-static struct vfsmount *next_mnt(struct vfsmount *p, struct vfsmount *root)
+static struct mount *next_mnt(struct mount *p, struct mount *root)
 {
 	struct list_head *next = p->mnt_mounts.next;
 	if (next == &p->mnt_mounts) {
@@ -657,14 +675,14 @@
 			p = p->mnt_parent;
 		}
 	}
-	return list_entry(next, struct vfsmount, mnt_child);
+	return list_entry(next, struct mount, mnt_child);
 }
 
-static struct vfsmount *skip_mnt_tree(struct vfsmount *p)
+static struct mount *skip_mnt_tree(struct mount *p)
 {
 	struct list_head *prev = p->mnt_mounts.prev;
 	while (prev != &p->mnt_mounts) {
-		p = list_entry(prev, struct vfsmount, mnt_child);
+		p = list_entry(prev, struct mount, mnt_child);
 		prev = p->mnt_mounts.prev;
 	}
 	return p;
@@ -673,7 +691,7 @@
 struct vfsmount *
 vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void *data)
 {
-	struct vfsmount *mnt;
+	struct mount *mnt;
 	struct dentry *root;
 
 	if (!type)
@@ -684,7 +702,7 @@
 		return ERR_PTR(-ENOMEM);
 
 	if (flags & MS_KERNMOUNT)
-		mnt->mnt_flags = MNT_INTERNAL;
+		mnt->mnt.mnt_flags = MNT_INTERNAL;
 
 	root = mount_fs(type, flags, name, data);
 	if (IS_ERR(root)) {
@@ -692,19 +710,22 @@
 		return ERR_CAST(root);
 	}
 
-	mnt->mnt_root = root;
-	mnt->mnt_sb = root->d_sb;
-	mnt->mnt_mountpoint = mnt->mnt_root;
+	mnt->mnt.mnt_root = root;
+	mnt->mnt.mnt_sb = root->d_sb;
+	mnt->mnt_mountpoint = mnt->mnt.mnt_root;
 	mnt->mnt_parent = mnt;
-	return mnt;
+	br_write_lock(vfsmount_lock);
+	list_add_tail(&mnt->mnt_instance, &root->d_sb->s_mounts);
+	br_write_unlock(vfsmount_lock);
+	return &mnt->mnt;
 }
 EXPORT_SYMBOL_GPL(vfs_kern_mount);
 
-static struct vfsmount *clone_mnt(struct vfsmount *old, struct dentry *root,
+static struct mount *clone_mnt(struct mount *old, struct dentry *root,
 					int flag)
 {
-	struct super_block *sb = old->mnt_sb;
-	struct vfsmount *mnt = alloc_vfsmnt(old->mnt_devname);
+	struct super_block *sb = old->mnt.mnt_sb;
+	struct mount *mnt = alloc_vfsmnt(old->mnt_devname);
 
 	if (mnt) {
 		if (flag & (CL_SLAVE | CL_PRIVATE))
@@ -718,12 +739,15 @@
 				goto out_free;
 		}
 
-		mnt->mnt_flags = old->mnt_flags & ~MNT_WRITE_HOLD;
+		mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~MNT_WRITE_HOLD;
 		atomic_inc(&sb->s_active);
-		mnt->mnt_sb = sb;
-		mnt->mnt_root = dget(root);
-		mnt->mnt_mountpoint = mnt->mnt_root;
+		mnt->mnt.mnt_sb = sb;
+		mnt->mnt.mnt_root = dget(root);
+		mnt->mnt_mountpoint = mnt->mnt.mnt_root;
 		mnt->mnt_parent = mnt;
+		br_write_lock(vfsmount_lock);
+		list_add_tail(&mnt->mnt_instance, &sb->s_mounts);
+		br_write_unlock(vfsmount_lock);
 
 		if (flag & CL_SLAVE) {
 			list_add(&mnt->mnt_slave, &old->mnt_slave_list);
@@ -753,9 +777,10 @@
 	return NULL;
 }
 
-static inline void mntfree(struct vfsmount *mnt)
+static inline void mntfree(struct mount *mnt)
 {
-	struct super_block *sb = mnt->mnt_sb;
+	struct vfsmount *m = &mnt->mnt;
+	struct super_block *sb = m->mnt_sb;
 
 	/*
 	 * This probably indicates that somebody messed
@@ -768,32 +793,32 @@
 	 * so mnt_get_writers() below is safe.
 	 */
 	WARN_ON(mnt_get_writers(mnt));
-	fsnotify_vfsmount_delete(mnt);
-	dput(mnt->mnt_root);
+	fsnotify_vfsmount_delete(m);
+	dput(m->mnt_root);
 	free_vfsmnt(mnt);
 	deactivate_super(sb);
 }
 
-static void mntput_no_expire(struct vfsmount *mnt)
+static void mntput_no_expire(struct mount *mnt)
 {
 put_again:
 #ifdef CONFIG_SMP
 	br_read_lock(vfsmount_lock);
 	if (likely(atomic_read(&mnt->mnt_longterm))) {
-		mnt_dec_count(mnt);
+		mnt_add_count(mnt, -1);
 		br_read_unlock(vfsmount_lock);
 		return;
 	}
 	br_read_unlock(vfsmount_lock);
 
 	br_write_lock(vfsmount_lock);
-	mnt_dec_count(mnt);
+	mnt_add_count(mnt, -1);
 	if (mnt_get_count(mnt)) {
 		br_write_unlock(vfsmount_lock);
 		return;
 	}
 #else
-	mnt_dec_count(mnt);
+	mnt_add_count(mnt, -1);
 	if (likely(mnt_get_count(mnt)))
 		return;
 	br_write_lock(vfsmount_lock);
@@ -802,9 +827,10 @@
 		mnt_add_count(mnt, mnt->mnt_pinned + 1);
 		mnt->mnt_pinned = 0;
 		br_write_unlock(vfsmount_lock);
-		acct_auto_close_mnt(mnt);
+		acct_auto_close_mnt(&mnt->mnt);
 		goto put_again;
 	}
+	list_del(&mnt->mnt_instance);
 	br_write_unlock(vfsmount_lock);
 	mntfree(mnt);
 }
@@ -812,10 +838,11 @@
 void mntput(struct vfsmount *mnt)
 {
 	if (mnt) {
+		struct mount *m = real_mount(mnt);
 		/* avoid cacheline pingpong, hope gcc doesn't get "smart" */
-		if (unlikely(mnt->mnt_expiry_mark))
-			mnt->mnt_expiry_mark = 0;
-		mntput_no_expire(mnt);
+		if (unlikely(m->mnt_expiry_mark))
+			m->mnt_expiry_mark = 0;
+		mntput_no_expire(m);
 	}
 }
 EXPORT_SYMBOL(mntput);
@@ -823,7 +850,7 @@
 struct vfsmount *mntget(struct vfsmount *mnt)
 {
 	if (mnt)
-		mnt_inc_count(mnt);
+		mnt_add_count(real_mount(mnt), 1);
 	return mnt;
 }
 EXPORT_SYMBOL(mntget);
@@ -831,16 +858,17 @@
 void mnt_pin(struct vfsmount *mnt)
 {
 	br_write_lock(vfsmount_lock);
-	mnt->mnt_pinned++;
+	real_mount(mnt)->mnt_pinned++;
 	br_write_unlock(vfsmount_lock);
 }
 EXPORT_SYMBOL(mnt_pin);
 
-void mnt_unpin(struct vfsmount *mnt)
+void mnt_unpin(struct vfsmount *m)
 {
+	struct mount *mnt = real_mount(m);
 	br_write_lock(vfsmount_lock);
 	if (mnt->mnt_pinned) {
-		mnt_inc_count(mnt);
+		mnt_add_count(mnt, 1);
 		mnt->mnt_pinned--;
 	}
 	br_write_unlock(vfsmount_lock);
@@ -858,12 +886,12 @@
  *
  * See also save_mount_options().
  */
-int generic_show_options(struct seq_file *m, struct vfsmount *mnt)
+int generic_show_options(struct seq_file *m, struct dentry *root)
 {
 	const char *options;
 
 	rcu_read_lock();
-	options = rcu_dereference(mnt->mnt_sb->s_options);
+	options = rcu_dereference(root->d_sb->s_options);
 
 	if (options != NULL && options[0]) {
 		seq_putc(m, ',');
@@ -907,10 +935,10 @@
 EXPORT_SYMBOL(replace_mount_options);
 
 #ifdef CONFIG_PROC_FS
-/* iterator */
+/* iterator; we want it to have access to namespace_sem, thus here... */
 static void *m_start(struct seq_file *m, loff_t *pos)
 {
-	struct proc_mounts *p = m->private;
+	struct proc_mounts *p = container_of(m, struct proc_mounts, m);
 
 	down_read(&namespace_sem);
 	return seq_list_start(&p->ns->list, *pos);
@@ -918,7 +946,7 @@
 
 static void *m_next(struct seq_file *m, void *v, loff_t *pos)
 {
-	struct proc_mounts *p = m->private;
+	struct proc_mounts *p = container_of(m, struct proc_mounts, m);
 
 	return seq_list_next(v, &p->ns->list, pos);
 }
@@ -928,219 +956,18 @@
 	up_read(&namespace_sem);
 }
 
-int mnt_had_events(struct proc_mounts *p)
+static int m_show(struct seq_file *m, void *v)
 {
-	struct mnt_namespace *ns = p->ns;
-	int res = 0;
-
-	br_read_lock(vfsmount_lock);
-	if (p->m.poll_event != ns->event) {
-		p->m.poll_event = ns->event;
-		res = 1;
-	}
-	br_read_unlock(vfsmount_lock);
-
-	return res;
-}
-
-struct proc_fs_info {
-	int flag;
-	const char *str;
-};
-
-static int show_sb_opts(struct seq_file *m, struct super_block *sb)
-{
-	static const struct proc_fs_info fs_info[] = {
-		{ MS_SYNCHRONOUS, ",sync" },
-		{ MS_DIRSYNC, ",dirsync" },
-		{ MS_MANDLOCK, ",mand" },
-		{ 0, NULL }
-	};
-	const struct proc_fs_info *fs_infop;
-
-	for (fs_infop = fs_info; fs_infop->flag; fs_infop++) {
-		if (sb->s_flags & fs_infop->flag)
-			seq_puts(m, fs_infop->str);
-	}
-
-	return security_sb_show_options(m, sb);
-}
-
-static void show_mnt_opts(struct seq_file *m, struct vfsmount *mnt)
-{
-	static const struct proc_fs_info mnt_info[] = {
-		{ MNT_NOSUID, ",nosuid" },
-		{ MNT_NODEV, ",nodev" },
-		{ MNT_NOEXEC, ",noexec" },
-		{ MNT_NOATIME, ",noatime" },
-		{ MNT_NODIRATIME, ",nodiratime" },
-		{ MNT_RELATIME, ",relatime" },
-		{ 0, NULL }
-	};
-	const struct proc_fs_info *fs_infop;
-
-	for (fs_infop = mnt_info; fs_infop->flag; fs_infop++) {
-		if (mnt->mnt_flags & fs_infop->flag)
-			seq_puts(m, fs_infop->str);
-	}
-}
-
-static void show_type(struct seq_file *m, struct super_block *sb)
-{
-	mangle(m, sb->s_type->name);
-	if (sb->s_subtype && sb->s_subtype[0]) {
-		seq_putc(m, '.');
-		mangle(m, sb->s_subtype);
-	}
-}
-
-static int show_vfsmnt(struct seq_file *m, void *v)
-{
-	struct vfsmount *mnt = list_entry(v, struct vfsmount, mnt_list);
-	int err = 0;
-	struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
-
-	if (mnt->mnt_sb->s_op->show_devname) {
-		err = mnt->mnt_sb->s_op->show_devname(m, mnt);
-		if (err)
-			goto out;
-	} else {
-		mangle(m, mnt->mnt_devname ? mnt->mnt_devname : "none");
-	}
-	seq_putc(m, ' ');
-	seq_path(m, &mnt_path, " \t\n\\");
-	seq_putc(m, ' ');
-	show_type(m, mnt->mnt_sb);
-	seq_puts(m, __mnt_is_readonly(mnt) ? " ro" : " rw");
-	err = show_sb_opts(m, mnt->mnt_sb);
-	if (err)
-		goto out;
-	show_mnt_opts(m, mnt);
-	if (mnt->mnt_sb->s_op->show_options)
-		err = mnt->mnt_sb->s_op->show_options(m, mnt);
-	seq_puts(m, " 0 0\n");
-out:
-	return err;
+	struct proc_mounts *p = container_of(m, struct proc_mounts, m);
+	struct mount *r = list_entry(v, struct mount, mnt_list);
+	return p->show(m, &r->mnt);
 }
 
 const struct seq_operations mounts_op = {
 	.start	= m_start,
 	.next	= m_next,
 	.stop	= m_stop,
-	.show	= show_vfsmnt
-};
-
-static int show_mountinfo(struct seq_file *m, void *v)
-{
-	struct proc_mounts *p = m->private;
-	struct vfsmount *mnt = list_entry(v, struct vfsmount, mnt_list);
-	struct super_block *sb = mnt->mnt_sb;
-	struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
-	struct path root = p->root;
-	int err = 0;
-
-	seq_printf(m, "%i %i %u:%u ", mnt->mnt_id, mnt->mnt_parent->mnt_id,
-		   MAJOR(sb->s_dev), MINOR(sb->s_dev));
-	if (sb->s_op->show_path)
-		err = sb->s_op->show_path(m, mnt);
-	else
-		seq_dentry(m, mnt->mnt_root, " \t\n\\");
-	if (err)
-		goto out;
-	seq_putc(m, ' ');
-
-	/* mountpoints outside of chroot jail will give SEQ_SKIP on this */
-	err = seq_path_root(m, &mnt_path, &root, " \t\n\\");
-	if (err)
-		goto out;
-
-	seq_puts(m, mnt->mnt_flags & MNT_READONLY ? " ro" : " rw");
-	show_mnt_opts(m, mnt);
-
-	/* Tagged fields ("foo:X" or "bar") */
-	if (IS_MNT_SHARED(mnt))
-		seq_printf(m, " shared:%i", mnt->mnt_group_id);
-	if (IS_MNT_SLAVE(mnt)) {
-		int master = mnt->mnt_master->mnt_group_id;
-		int dom = get_dominating_id(mnt, &p->root);
-		seq_printf(m, " master:%i", master);
-		if (dom && dom != master)
-			seq_printf(m, " propagate_from:%i", dom);
-	}
-	if (IS_MNT_UNBINDABLE(mnt))
-		seq_puts(m, " unbindable");
-
-	/* Filesystem specific data */
-	seq_puts(m, " - ");
-	show_type(m, sb);
-	seq_putc(m, ' ');
-	if (sb->s_op->show_devname)
-		err = sb->s_op->show_devname(m, mnt);
-	else
-		mangle(m, mnt->mnt_devname ? mnt->mnt_devname : "none");
-	if (err)
-		goto out;
-	seq_puts(m, sb->s_flags & MS_RDONLY ? " ro" : " rw");
-	err = show_sb_opts(m, sb);
-	if (err)
-		goto out;
-	if (sb->s_op->show_options)
-		err = sb->s_op->show_options(m, mnt);
-	seq_putc(m, '\n');
-out:
-	return err;
-}
-
-const struct seq_operations mountinfo_op = {
-	.start	= m_start,
-	.next	= m_next,
-	.stop	= m_stop,
-	.show	= show_mountinfo,
-};
-
-static int show_vfsstat(struct seq_file *m, void *v)
-{
-	struct vfsmount *mnt = list_entry(v, struct vfsmount, mnt_list);
-	struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
-	int err = 0;
-
-	/* device */
-	if (mnt->mnt_sb->s_op->show_devname) {
-		seq_puts(m, "device ");
-		err = mnt->mnt_sb->s_op->show_devname(m, mnt);
-	} else {
-		if (mnt->mnt_devname) {
-			seq_puts(m, "device ");
-			mangle(m, mnt->mnt_devname);
-		} else
-			seq_puts(m, "no device");
-	}
-
-	/* mount point */
-	seq_puts(m, " mounted on ");
-	seq_path(m, &mnt_path, " \t\n\\");
-	seq_putc(m, ' ');
-
-	/* file system type */
-	seq_puts(m, "with fstype ");
-	show_type(m, mnt->mnt_sb);
-
-	/* optional statistics */
-	if (mnt->mnt_sb->s_op->show_stats) {
-		seq_putc(m, ' ');
-		if (!err)
-			err = mnt->mnt_sb->s_op->show_stats(m, mnt);
-	}
-
-	seq_putc(m, '\n');
-	return err;
-}
-
-const struct seq_operations mountstats_op = {
-	.start	= m_start,
-	.next	= m_next,
-	.stop	= m_stop,
-	.show	= show_vfsstat,
+	.show	= m_show,
 };
 #endif  /* CONFIG_PROC_FS */
 
@@ -1152,11 +979,13 @@
  * open files, pwds, chroots or sub mounts that are
  * busy.
  */
-int may_umount_tree(struct vfsmount *mnt)
+int may_umount_tree(struct vfsmount *m)
 {
+	struct mount *mnt = real_mount(m);
 	int actual_refs = 0;
 	int minimum_refs = 0;
-	struct vfsmount *p;
+	struct mount *p;
+	BUG_ON(!m);
 
 	/* write lock needed for mnt_get_count */
 	br_write_lock(vfsmount_lock);
@@ -1192,7 +1021,7 @@
 	int ret = 1;
 	down_read(&namespace_sem);
 	br_write_lock(vfsmount_lock);
-	if (propagate_mount_busy(mnt, 2))
+	if (propagate_mount_busy(real_mount(mnt), 2))
 		ret = 0;
 	br_write_unlock(vfsmount_lock);
 	up_read(&namespace_sem);
@@ -1203,25 +1032,25 @@
 
 void release_mounts(struct list_head *head)
 {
-	struct vfsmount *mnt;
+	struct mount *mnt;
 	while (!list_empty(head)) {
-		mnt = list_first_entry(head, struct vfsmount, mnt_hash);
+		mnt = list_first_entry(head, struct mount, mnt_hash);
 		list_del_init(&mnt->mnt_hash);
-		if (mnt->mnt_parent != mnt) {
+		if (mnt_has_parent(mnt)) {
 			struct dentry *dentry;
-			struct vfsmount *m;
+			struct mount *m;
 
 			br_write_lock(vfsmount_lock);
 			dentry = mnt->mnt_mountpoint;
 			m = mnt->mnt_parent;
-			mnt->mnt_mountpoint = mnt->mnt_root;
+			mnt->mnt_mountpoint = mnt->mnt.mnt_root;
 			mnt->mnt_parent = mnt;
 			m->mnt_ghosts--;
 			br_write_unlock(vfsmount_lock);
 			dput(dentry);
-			mntput(m);
+			mntput(&m->mnt);
 		}
-		mntput(mnt);
+		mntput(&mnt->mnt);
 	}
 }
 
@@ -1229,10 +1058,10 @@
  * vfsmount lock must be held for write
  * namespace_sem must be held for write
  */
-void umount_tree(struct vfsmount *mnt, int propagate, struct list_head *kill)
+void umount_tree(struct mount *mnt, int propagate, struct list_head *kill)
 {
 	LIST_HEAD(tmp_list);
-	struct vfsmount *p;
+	struct mount *p;
 
 	for (p = mnt; p; p = next_mnt(p, mnt))
 		list_move(&p->mnt_hash, &tmp_list);
@@ -1247,24 +1076,24 @@
 		p->mnt_ns = NULL;
 		__mnt_make_shortterm(p);
 		list_del_init(&p->mnt_child);
-		if (p->mnt_parent != p) {
+		if (mnt_has_parent(p)) {
 			p->mnt_parent->mnt_ghosts++;
-			dentry_reset_mounted(p->mnt_parent, p->mnt_mountpoint);
+			dentry_reset_mounted(p->mnt_mountpoint);
 		}
 		change_mnt_propagation(p, MS_PRIVATE);
 	}
 	list_splice(&tmp_list, kill);
 }
 
-static void shrink_submounts(struct vfsmount *mnt, struct list_head *umounts);
+static void shrink_submounts(struct mount *mnt, struct list_head *umounts);
 
-static int do_umount(struct vfsmount *mnt, int flags)
+static int do_umount(struct mount *mnt, int flags)
 {
-	struct super_block *sb = mnt->mnt_sb;
+	struct super_block *sb = mnt->mnt.mnt_sb;
 	int retval;
 	LIST_HEAD(umount_list);
 
-	retval = security_sb_umount(mnt, flags);
+	retval = security_sb_umount(&mnt->mnt, flags);
 	if (retval)
 		return retval;
 
@@ -1275,7 +1104,7 @@
 	 *  (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount]
 	 */
 	if (flags & MNT_EXPIRE) {
-		if (mnt == current->fs->root.mnt ||
+		if (&mnt->mnt == current->fs->root.mnt ||
 		    flags & (MNT_FORCE | MNT_DETACH))
 			return -EINVAL;
 
@@ -1317,7 +1146,7 @@
 	 * /reboot - static binary that would close all descriptors and
 	 * call reboot(9). Then init(8) could umount root and exec /reboot.
 	 */
-	if (mnt == current->fs->root.mnt && !(flags & MNT_DETACH)) {
+	if (&mnt->mnt == current->fs->root.mnt && !(flags & MNT_DETACH)) {
 		/*
 		 * Special case for "unmounting" root ...
 		 * we just try to remount it readonly.
@@ -1359,6 +1188,7 @@
 SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
 {
 	struct path path;
+	struct mount *mnt;
 	int retval;
 	int lookup_flags = 0;
 
@@ -1371,21 +1201,22 @@
 	retval = user_path_at(AT_FDCWD, name, lookup_flags, &path);
 	if (retval)
 		goto out;
+	mnt = real_mount(path.mnt);
 	retval = -EINVAL;
 	if (path.dentry != path.mnt->mnt_root)
 		goto dput_and_out;
-	if (!check_mnt(path.mnt))
+	if (!check_mnt(mnt))
 		goto dput_and_out;
 
 	retval = -EPERM;
 	if (!capable(CAP_SYS_ADMIN))
 		goto dput_and_out;
 
-	retval = do_umount(path.mnt, flags);
+	retval = do_umount(mnt, flags);
 dput_and_out:
 	/* we mustn't call path_put() as that would clear mnt_expiry_mark */
 	dput(path.dentry);
-	mntput_no_expire(path.mnt);
+	mntput_no_expire(mnt);
 out:
 	return retval;
 }
@@ -1420,10 +1251,10 @@
 #endif
 }
 
-struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry,
+struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
 					int flag)
 {
-	struct vfsmount *res, *p, *q, *r, *s;
+	struct mount *res, *p, *q, *r;
 	struct path path;
 
 	if (!(flag & CL_COPY_ALL) && IS_MNT_UNBINDABLE(mnt))
@@ -1436,6 +1267,7 @@
 
 	p = mnt;
 	list_for_each_entry(r, &mnt->mnt_mounts, mnt_child) {
+		struct mount *s;
 		if (!is_subdir(r->mnt_mountpoint, dentry))
 			continue;
 
@@ -1449,9 +1281,9 @@
 				q = q->mnt_parent;
 			}
 			p = s;
-			path.mnt = q;
+			path.mnt = &q->mnt;
 			path.dentry = p->mnt_mountpoint;
-			q = clone_mnt(p, p->mnt_root, flag);
+			q = clone_mnt(p, p->mnt.mnt_root, flag);
 			if (!q)
 				goto Enomem;
 			br_write_lock(vfsmount_lock);
@@ -1474,11 +1306,12 @@
 
 struct vfsmount *collect_mounts(struct path *path)
 {
-	struct vfsmount *tree;
+	struct mount *tree;
 	down_write(&namespace_sem);
-	tree = copy_tree(path->mnt, path->dentry, CL_COPY_ALL | CL_PRIVATE);
+	tree = copy_tree(real_mount(path->mnt), path->dentry,
+			 CL_COPY_ALL | CL_PRIVATE);
 	up_write(&namespace_sem);
-	return tree;
+	return tree ? &tree->mnt : NULL;
 }
 
 void drop_collected_mounts(struct vfsmount *mnt)
@@ -1486,7 +1319,7 @@
 	LIST_HEAD(umount_list);
 	down_write(&namespace_sem);
 	br_write_lock(vfsmount_lock);
-	umount_tree(mnt, 0, &umount_list);
+	umount_tree(real_mount(mnt), 0, &umount_list);
 	br_write_unlock(vfsmount_lock);
 	up_write(&namespace_sem);
 	release_mounts(&umount_list);
@@ -1495,21 +1328,21 @@
 int iterate_mounts(int (*f)(struct vfsmount *, void *), void *arg,
 		   struct vfsmount *root)
 {
-	struct vfsmount *mnt;
+	struct mount *mnt;
 	int res = f(root, arg);
 	if (res)
 		return res;
-	list_for_each_entry(mnt, &root->mnt_list, mnt_list) {
-		res = f(mnt, arg);
+	list_for_each_entry(mnt, &real_mount(root)->mnt_list, mnt_list) {
+		res = f(&mnt->mnt, arg);
 		if (res)
 			return res;
 	}
 	return 0;
 }
 
-static void cleanup_group_ids(struct vfsmount *mnt, struct vfsmount *end)
+static void cleanup_group_ids(struct mount *mnt, struct mount *end)
 {
-	struct vfsmount *p;
+	struct mount *p;
 
 	for (p = mnt; p != end; p = next_mnt(p, mnt)) {
 		if (p->mnt_group_id && !IS_MNT_SHARED(p))
@@ -1517,9 +1350,9 @@
 	}
 }
 
-static int invent_group_ids(struct vfsmount *mnt, bool recurse)
+static int invent_group_ids(struct mount *mnt, bool recurse)
 {
-	struct vfsmount *p;
+	struct mount *p;
 
 	for (p = mnt; p; p = recurse ? next_mnt(p, mnt) : NULL) {
 		if (!p->mnt_group_id && !IS_MNT_SHARED(p)) {
@@ -1597,13 +1430,13 @@
  * Must be called without spinlocks held, since this function can sleep
  * in allocations.
  */
-static int attach_recursive_mnt(struct vfsmount *source_mnt,
+static int attach_recursive_mnt(struct mount *source_mnt,
 			struct path *path, struct path *parent_path)
 {
 	LIST_HEAD(tree_list);
-	struct vfsmount *dest_mnt = path->mnt;
+	struct mount *dest_mnt = real_mount(path->mnt);
 	struct dentry *dest_dentry = path->dentry;
-	struct vfsmount *child, *p;
+	struct mount *child, *p;
 	int err;
 
 	if (IS_MNT_SHARED(dest_mnt)) {
@@ -1624,7 +1457,7 @@
 	if (parent_path) {
 		detach_mnt(source_mnt, parent_path);
 		attach_mnt(source_mnt, path);
-		touch_mnt_namespace(parent_path->mnt->mnt_ns);
+		touch_mnt_namespace(source_mnt->mnt_ns);
 	} else {
 		mnt_set_mountpoint(dest_mnt, dest_dentry, source_mnt);
 		commit_tree(source_mnt);
@@ -1672,13 +1505,13 @@
 	mutex_unlock(&path->dentry->d_inode->i_mutex);
 }
 
-static int graft_tree(struct vfsmount *mnt, struct path *path)
+static int graft_tree(struct mount *mnt, struct path *path)
 {
-	if (mnt->mnt_sb->s_flags & MS_NOUSER)
+	if (mnt->mnt.mnt_sb->s_flags & MS_NOUSER)
 		return -EINVAL;
 
 	if (S_ISDIR(path->dentry->d_inode->i_mode) !=
-	      S_ISDIR(mnt->mnt_root->d_inode->i_mode))
+	      S_ISDIR(mnt->mnt.mnt_root->d_inode->i_mode))
 		return -ENOTDIR;
 
 	if (d_unlinked(path->dentry))
@@ -1709,7 +1542,8 @@
  */
 static int do_change_type(struct path *path, int flag)
 {
-	struct vfsmount *m, *mnt = path->mnt;
+	struct mount *m;
+	struct mount *mnt = real_mount(path->mnt);
 	int recurse = flag & MS_REC;
 	int type;
 	int err = 0;
@@ -1749,7 +1583,7 @@
 {
 	LIST_HEAD(umount_list);
 	struct path old_path;
-	struct vfsmount *mnt = NULL;
+	struct mount *mnt = NULL, *old;
 	int err = mount_is_safe(path);
 	if (err)
 		return err;
@@ -1763,18 +1597,20 @@
 	if (err)
 		goto out;
 
+	old = real_mount(old_path.mnt);
+
 	err = -EINVAL;
-	if (IS_MNT_UNBINDABLE(old_path.mnt))
+	if (IS_MNT_UNBINDABLE(old))
 		goto out2;
 
-	if (!check_mnt(path->mnt) || !check_mnt(old_path.mnt))
+	if (!check_mnt(real_mount(path->mnt)) || !check_mnt(old))
 		goto out2;
 
 	err = -ENOMEM;
 	if (recurse)
-		mnt = copy_tree(old_path.mnt, old_path.dentry, 0);
+		mnt = copy_tree(old, old_path.dentry, 0);
 	else
-		mnt = clone_mnt(old_path.mnt, old_path.dentry, 0);
+		mnt = clone_mnt(old, old_path.dentry, 0);
 
 	if (!mnt)
 		goto out2;
@@ -1804,9 +1640,9 @@
 		return 0;
 
 	if (readonly_request)
-		error = mnt_make_readonly(mnt);
+		error = mnt_make_readonly(real_mount(mnt));
 	else
-		__mnt_unmake_readonly(mnt);
+		__mnt_unmake_readonly(real_mount(mnt));
 	return error;
 }
 
@@ -1820,11 +1656,12 @@
 {
 	int err;
 	struct super_block *sb = path->mnt->mnt_sb;
+	struct mount *mnt = real_mount(path->mnt);
 
 	if (!capable(CAP_SYS_ADMIN))
 		return -EPERM;
 
-	if (!check_mnt(path->mnt))
+	if (!check_mnt(mnt))
 		return -EINVAL;
 
 	if (path->dentry != path->mnt->mnt_root)
@@ -1841,22 +1678,22 @@
 		err = do_remount_sb(sb, flags, data, 0);
 	if (!err) {
 		br_write_lock(vfsmount_lock);
-		mnt_flags |= path->mnt->mnt_flags & MNT_PROPAGATION_MASK;
-		path->mnt->mnt_flags = mnt_flags;
+		mnt_flags |= mnt->mnt.mnt_flags & MNT_PROPAGATION_MASK;
+		mnt->mnt.mnt_flags = mnt_flags;
 		br_write_unlock(vfsmount_lock);
 	}
 	up_write(&sb->s_umount);
 	if (!err) {
 		br_write_lock(vfsmount_lock);
-		touch_mnt_namespace(path->mnt->mnt_ns);
+		touch_mnt_namespace(mnt->mnt_ns);
 		br_write_unlock(vfsmount_lock);
 	}
 	return err;
 }
 
-static inline int tree_contains_unbindable(struct vfsmount *mnt)
+static inline int tree_contains_unbindable(struct mount *mnt)
 {
-	struct vfsmount *p;
+	struct mount *p;
 	for (p = mnt; p; p = next_mnt(p, mnt)) {
 		if (IS_MNT_UNBINDABLE(p))
 			return 1;
@@ -1867,7 +1704,8 @@
 static int do_move_mount(struct path *path, char *old_name)
 {
 	struct path old_path, parent_path;
-	struct vfsmount *p;
+	struct mount *p;
+	struct mount *old;
 	int err = 0;
 	if (!capable(CAP_SYS_ADMIN))
 		return -EPERM;
@@ -1881,8 +1719,11 @@
 	if (err < 0)
 		goto out;
 
+	old = real_mount(old_path.mnt);
+	p = real_mount(path->mnt);
+
 	err = -EINVAL;
-	if (!check_mnt(path->mnt) || !check_mnt(old_path.mnt))
+	if (!check_mnt(p) || !check_mnt(old))
 		goto out1;
 
 	if (d_unlinked(path->dentry))
@@ -1892,7 +1733,7 @@
 	if (old_path.dentry != old_path.mnt->mnt_root)
 		goto out1;
 
-	if (old_path.mnt == old_path.mnt->mnt_parent)
+	if (!mnt_has_parent(old))
 		goto out1;
 
 	if (S_ISDIR(path->dentry->d_inode->i_mode) !=
@@ -1901,28 +1742,26 @@
 	/*
 	 * Don't move a mount residing in a shared parent.
 	 */
-	if (old_path.mnt->mnt_parent &&
-	    IS_MNT_SHARED(old_path.mnt->mnt_parent))
+	if (IS_MNT_SHARED(old->mnt_parent))
 		goto out1;
 	/*
 	 * Don't move a mount tree containing unbindable mounts to a destination
 	 * mount which is shared.
 	 */
-	if (IS_MNT_SHARED(path->mnt) &&
-	    tree_contains_unbindable(old_path.mnt))
+	if (IS_MNT_SHARED(p) && tree_contains_unbindable(old))
 		goto out1;
 	err = -ELOOP;
-	for (p = path->mnt; p->mnt_parent != p; p = p->mnt_parent)
-		if (p == old_path.mnt)
+	for (; mnt_has_parent(p); p = p->mnt_parent)
+		if (p == old)
 			goto out1;
 
-	err = attach_recursive_mnt(old_path.mnt, path, &parent_path);
+	err = attach_recursive_mnt(old, path, &parent_path);
 	if (err)
 		goto out1;
 
 	/* if the mount is moved, it should no longer be expire
 	 * automatically */
-	list_del_init(&old_path.mnt->mnt_expire);
+	list_del_init(&old->mnt_expire);
 out1:
 	unlock_mount(path);
 out:
@@ -1955,7 +1794,7 @@
 	return ERR_PTR(err);
 }
 
-struct vfsmount *
+static struct vfsmount *
 do_kern_mount(const char *fstype, int flags, const char *name, void *data)
 {
 	struct file_system_type *type = get_fs_type(fstype);
@@ -1969,12 +1808,11 @@
 	put_filesystem(type);
 	return mnt;
 }
-EXPORT_SYMBOL_GPL(do_kern_mount);
 
 /*
  * add a mount into a namespace's mount tree
  */
-static int do_add_mount(struct vfsmount *newmnt, struct path *path, int mnt_flags)
+static int do_add_mount(struct mount *newmnt, struct path *path, int mnt_flags)
 {
 	int err;
 
@@ -1985,20 +1823,20 @@
 		return err;
 
 	err = -EINVAL;
-	if (!(mnt_flags & MNT_SHRINKABLE) && !check_mnt(path->mnt))
+	if (!(mnt_flags & MNT_SHRINKABLE) && !check_mnt(real_mount(path->mnt)))
 		goto unlock;
 
 	/* Refuse the same filesystem on the same mount point */
 	err = -EBUSY;
-	if (path->mnt->mnt_sb == newmnt->mnt_sb &&
+	if (path->mnt->mnt_sb == newmnt->mnt.mnt_sb &&
 	    path->mnt->mnt_root == path->dentry)
 		goto unlock;
 
 	err = -EINVAL;
-	if (S_ISLNK(newmnt->mnt_root->d_inode->i_mode))
+	if (S_ISLNK(newmnt->mnt.mnt_root->d_inode->i_mode))
 		goto unlock;
 
-	newmnt->mnt_flags = mnt_flags;
+	newmnt->mnt.mnt_flags = mnt_flags;
 	err = graft_tree(newmnt, path);
 
 unlock:
@@ -2027,7 +1865,7 @@
 	if (IS_ERR(mnt))
 		return PTR_ERR(mnt);
 
-	err = do_add_mount(mnt, path, mnt_flags);
+	err = do_add_mount(real_mount(mnt), path, mnt_flags);
 	if (err)
 		mntput(mnt);
 	return err;
@@ -2035,11 +1873,12 @@
 
 int finish_automount(struct vfsmount *m, struct path *path)
 {
+	struct mount *mnt = real_mount(m);
 	int err;
 	/* The new mount record should have at least 2 refs to prevent it being
 	 * expired before we get a chance to add it
 	 */
-	BUG_ON(mnt_get_count(m) < 2);
+	BUG_ON(mnt_get_count(mnt) < 2);
 
 	if (m->mnt_sb == path->mnt->mnt_sb &&
 	    m->mnt_root == path->dentry) {
@@ -2047,15 +1886,15 @@
 		goto fail;
 	}
 
-	err = do_add_mount(m, path, path->mnt->mnt_flags | MNT_SHRINKABLE);
+	err = do_add_mount(mnt, path, path->mnt->mnt_flags | MNT_SHRINKABLE);
 	if (!err)
 		return 0;
 fail:
 	/* remove m from any expiration list it may be on */
-	if (!list_empty(&m->mnt_expire)) {
+	if (!list_empty(&mnt->mnt_expire)) {
 		down_write(&namespace_sem);
 		br_write_lock(vfsmount_lock);
-		list_del_init(&m->mnt_expire);
+		list_del_init(&mnt->mnt_expire);
 		br_write_unlock(vfsmount_lock);
 		up_write(&namespace_sem);
 	}
@@ -2074,7 +1913,7 @@
 	down_write(&namespace_sem);
 	br_write_lock(vfsmount_lock);
 
-	list_add_tail(&mnt->mnt_expire, expiry_list);
+	list_add_tail(&real_mount(mnt)->mnt_expire, expiry_list);
 
 	br_write_unlock(vfsmount_lock);
 	up_write(&namespace_sem);
@@ -2088,7 +1927,7 @@
  */
 void mark_mounts_for_expiry(struct list_head *mounts)
 {
-	struct vfsmount *mnt, *next;
+	struct mount *mnt, *next;
 	LIST_HEAD(graveyard);
 	LIST_HEAD(umounts);
 
@@ -2111,7 +1950,7 @@
 		list_move(&mnt->mnt_expire, &graveyard);
 	}
 	while (!list_empty(&graveyard)) {
-		mnt = list_first_entry(&graveyard, struct vfsmount, mnt_expire);
+		mnt = list_first_entry(&graveyard, struct mount, mnt_expire);
 		touch_mnt_namespace(mnt->mnt_ns);
 		umount_tree(mnt, 1, &umounts);
 	}
@@ -2129,9 +1968,9 @@
  * search the list of submounts for a given mountpoint, and move any
  * shrinkable submounts to the 'graveyard' list.
  */
-static int select_submounts(struct vfsmount *parent, struct list_head *graveyard)
+static int select_submounts(struct mount *parent, struct list_head *graveyard)
 {
-	struct vfsmount *this_parent = parent;
+	struct mount *this_parent = parent;
 	struct list_head *next;
 	int found = 0;
 
@@ -2140,10 +1979,10 @@
 resume:
 	while (next != &this_parent->mnt_mounts) {
 		struct list_head *tmp = next;
-		struct vfsmount *mnt = list_entry(tmp, struct vfsmount, mnt_child);
+		struct mount *mnt = list_entry(tmp, struct mount, mnt_child);
 
 		next = tmp->next;
-		if (!(mnt->mnt_flags & MNT_SHRINKABLE))
+		if (!(mnt->mnt.mnt_flags & MNT_SHRINKABLE))
 			continue;
 		/*
 		 * Descend a level if the d_mounts list is non-empty.
@@ -2175,15 +2014,15 @@
  *
  * vfsmount_lock must be held for write
  */
-static void shrink_submounts(struct vfsmount *mnt, struct list_head *umounts)
+static void shrink_submounts(struct mount *mnt, struct list_head *umounts)
 {
 	LIST_HEAD(graveyard);
-	struct vfsmount *m;
+	struct mount *m;
 
 	/* extract submounts of 'mountpoint' from the expiration list */
 	while (select_submounts(mnt, &graveyard)) {
 		while (!list_empty(&graveyard)) {
-			m = list_first_entry(&graveyard, struct vfsmount,
+			m = list_first_entry(&graveyard, struct mount,
 						mnt_expire);
 			touch_mnt_namespace(m->mnt_ns);
 			umount_tree(m, 1, umounts);
@@ -2370,12 +2209,13 @@
 
 void mnt_make_longterm(struct vfsmount *mnt)
 {
-	__mnt_make_longterm(mnt);
+	__mnt_make_longterm(real_mount(mnt));
 }
 
-void mnt_make_shortterm(struct vfsmount *mnt)
+void mnt_make_shortterm(struct vfsmount *m)
 {
 #ifdef CONFIG_SMP
+	struct mount *mnt = real_mount(m);
 	if (atomic_add_unless(&mnt->mnt_longterm, -1, 1))
 		return;
 	br_write_lock(vfsmount_lock);
@@ -2393,7 +2233,9 @@
 {
 	struct mnt_namespace *new_ns;
 	struct vfsmount *rootmnt = NULL, *pwdmnt = NULL;
-	struct vfsmount *p, *q;
+	struct mount *p, *q;
+	struct mount *old = mnt_ns->root;
+	struct mount *new;
 
 	new_ns = alloc_mnt_ns();
 	if (IS_ERR(new_ns))
@@ -2401,15 +2243,15 @@
 
 	down_write(&namespace_sem);
 	/* First pass: copy the tree topology */
-	new_ns->root = copy_tree(mnt_ns->root, mnt_ns->root->mnt_root,
-					CL_COPY_ALL | CL_EXPIRE);
-	if (!new_ns->root) {
+	new = copy_tree(old, old->mnt.mnt_root, CL_COPY_ALL | CL_EXPIRE);
+	if (!new) {
 		up_write(&namespace_sem);
 		kfree(new_ns);
 		return ERR_PTR(-ENOMEM);
 	}
+	new_ns->root = new;
 	br_write_lock(vfsmount_lock);
-	list_add_tail(&new_ns->list, &new_ns->root->mnt_list);
+	list_add_tail(&new_ns->list, &new->mnt_list);
 	br_write_unlock(vfsmount_lock);
 
 	/*
@@ -2417,27 +2259,27 @@
 	 * as belonging to new namespace.  We have already acquired a private
 	 * fs_struct, so tsk->fs->lock is not needed.
 	 */
-	p = mnt_ns->root;
-	q = new_ns->root;
+	p = old;
+	q = new;
 	while (p) {
 		q->mnt_ns = new_ns;
 		__mnt_make_longterm(q);
 		if (fs) {
-			if (p == fs->root.mnt) {
-				fs->root.mnt = mntget(q);
+			if (&p->mnt == fs->root.mnt) {
+				fs->root.mnt = mntget(&q->mnt);
 				__mnt_make_longterm(q);
-				mnt_make_shortterm(p);
-				rootmnt = p;
+				mnt_make_shortterm(&p->mnt);
+				rootmnt = &p->mnt;
 			}
-			if (p == fs->pwd.mnt) {
-				fs->pwd.mnt = mntget(q);
+			if (&p->mnt == fs->pwd.mnt) {
+				fs->pwd.mnt = mntget(&q->mnt);
 				__mnt_make_longterm(q);
-				mnt_make_shortterm(p);
-				pwdmnt = p;
+				mnt_make_shortterm(&p->mnt);
+				pwdmnt = &p->mnt;
 			}
 		}
-		p = next_mnt(p, mnt_ns->root);
-		q = next_mnt(q, new_ns->root);
+		p = next_mnt(p, old);
+		q = next_mnt(q, new);
 	}
 	up_write(&namespace_sem);
 
@@ -2470,22 +2312,20 @@
  * create_mnt_ns - creates a private namespace and adds a root filesystem
  * @mnt: pointer to the new root filesystem mountpoint
  */
-struct mnt_namespace *create_mnt_ns(struct vfsmount *mnt)
+static struct mnt_namespace *create_mnt_ns(struct vfsmount *m)
 {
-	struct mnt_namespace *new_ns;
-
-	new_ns = alloc_mnt_ns();
+	struct mnt_namespace *new_ns = alloc_mnt_ns();
 	if (!IS_ERR(new_ns)) {
+		struct mount *mnt = real_mount(m);
 		mnt->mnt_ns = new_ns;
 		__mnt_make_longterm(mnt);
 		new_ns->root = mnt;
-		list_add(&new_ns->list, &new_ns->root->mnt_list);
+		list_add(&new_ns->list, &mnt->mnt_list);
 	} else {
-		mntput(mnt);
+		mntput(m);
 	}
 	return new_ns;
 }
-EXPORT_SYMBOL(create_mnt_ns);
 
 struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
 {
@@ -2559,6 +2399,31 @@
 }
 
 /*
+ * Return true if path is reachable from root
+ *
+ * namespace_sem or vfsmount_lock is held
+ */
+bool is_path_reachable(struct mount *mnt, struct dentry *dentry,
+			 const struct path *root)
+{
+	while (&mnt->mnt != root->mnt && mnt_has_parent(mnt)) {
+		dentry = mnt->mnt_mountpoint;
+		mnt = mnt->mnt_parent;
+	}
+	return &mnt->mnt == root->mnt && is_subdir(dentry, root->dentry);
+}
+
+int path_is_under(struct path *path1, struct path *path2)
+{
+	int res;
+	br_read_lock(vfsmount_lock);
+	res = is_path_reachable(real_mount(path1->mnt), path1->dentry, path2);
+	br_read_unlock(vfsmount_lock);
+	return res;
+}
+EXPORT_SYMBOL(path_is_under);
+
+/*
  * pivot_root Semantics:
  * Moves the root file system of the current process to the directory put_old,
  * makes new_root as the new root file system of the current process, and sets
@@ -2586,8 +2451,8 @@
 SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
 		const char __user *, put_old)
 {
-	struct vfsmount *tmp;
 	struct path new, old, parent_path, root_parent, root;
+	struct mount *new_mnt, *root_mnt;
 	int error;
 
 	if (!capable(CAP_SYS_ADMIN))
@@ -2611,11 +2476,13 @@
 		goto out3;
 
 	error = -EINVAL;
-	if (IS_MNT_SHARED(old.mnt) ||
-		IS_MNT_SHARED(new.mnt->mnt_parent) ||
-		IS_MNT_SHARED(root.mnt->mnt_parent))
+	new_mnt = real_mount(new.mnt);
+	root_mnt = real_mount(root.mnt);
+	if (IS_MNT_SHARED(real_mount(old.mnt)) ||
+		IS_MNT_SHARED(new_mnt->mnt_parent) ||
+		IS_MNT_SHARED(root_mnt->mnt_parent))
 		goto out4;
-	if (!check_mnt(root.mnt) || !check_mnt(new.mnt))
+	if (!check_mnt(root_mnt) || !check_mnt(new_mnt))
 		goto out4;
 	error = -ENOENT;
 	if (d_unlinked(new.dentry))
@@ -2629,33 +2496,22 @@
 	error = -EINVAL;
 	if (root.mnt->mnt_root != root.dentry)
 		goto out4; /* not a mountpoint */
-	if (root.mnt->mnt_parent == root.mnt)
+	if (!mnt_has_parent(root_mnt))
 		goto out4; /* not attached */
 	if (new.mnt->mnt_root != new.dentry)
 		goto out4; /* not a mountpoint */
-	if (new.mnt->mnt_parent == new.mnt)
+	if (!mnt_has_parent(new_mnt))
 		goto out4; /* not attached */
 	/* make sure we can reach put_old from new_root */
-	tmp = old.mnt;
-	if (tmp != new.mnt) {
-		for (;;) {
-			if (tmp->mnt_parent == tmp)
-				goto out4; /* already mounted on put_old */
-			if (tmp->mnt_parent == new.mnt)
-				break;
-			tmp = tmp->mnt_parent;
-		}
-		if (!is_subdir(tmp->mnt_mountpoint, new.dentry))
-			goto out4;
-	} else if (!is_subdir(old.dentry, new.dentry))
+	if (!is_path_reachable(real_mount(old.mnt), old.dentry, &new))
 		goto out4;
 	br_write_lock(vfsmount_lock);
-	detach_mnt(new.mnt, &parent_path);
-	detach_mnt(root.mnt, &root_parent);
+	detach_mnt(new_mnt, &parent_path);
+	detach_mnt(root_mnt, &root_parent);
 	/* mount old root on put_old */
-	attach_mnt(root.mnt, &old);
+	attach_mnt(root_mnt, &old);
 	/* mount new_root on / */
-	attach_mnt(new.mnt, &root_parent);
+	attach_mnt(new_mnt, &root_parent);
 	touch_mnt_namespace(current->nsproxy->mnt_ns);
 	br_write_unlock(vfsmount_lock);
 	chroot_fs_refs(&root, &new);
@@ -2693,8 +2549,8 @@
 	init_task.nsproxy->mnt_ns = ns;
 	get_mnt_ns(ns);
 
-	root.mnt = ns->root;
-	root.dentry = ns->root->mnt_root;
+	root.mnt = mnt;
+	root.dentry = mnt->mnt_root;
 
 	set_fs_pwd(current->fs, &root);
 	set_fs_root(current->fs, &root);
@@ -2707,7 +2563,7 @@
 
 	init_rwsem(&namespace_sem);
 
-	mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct vfsmount),
+	mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct mount),
 			0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
 
 	mount_hashtable = (struct list_head *)__get_free_page(GFP_ATOMIC);
@@ -2747,7 +2603,6 @@
 	release_mounts(&umount_list);
 	kfree(ns);
 }
-EXPORT_SYMBOL(put_mnt_ns);
 
 struct vfsmount *kern_mount_data(struct file_system_type *type, void *data)
 {
@@ -2776,5 +2631,5 @@
 
 bool our_mnt(struct vfsmount *mnt)
 {
-	return check_mnt(mnt);
+	return check_mnt(real_mount(mnt));
 }
diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
index 9c51f62..aeed93a 100644
--- a/fs/ncpfs/dir.c
+++ b/fs/ncpfs/dir.c
@@ -30,15 +30,15 @@
 
 static int ncp_readdir(struct file *, void *, filldir_t);
 
-static int ncp_create(struct inode *, struct dentry *, int, struct nameidata *);
+static int ncp_create(struct inode *, struct dentry *, umode_t, struct nameidata *);
 static struct dentry *ncp_lookup(struct inode *, struct dentry *, struct nameidata *);
 static int ncp_unlink(struct inode *, struct dentry *);
-static int ncp_mkdir(struct inode *, struct dentry *, int);
+static int ncp_mkdir(struct inode *, struct dentry *, umode_t);
 static int ncp_rmdir(struct inode *, struct dentry *);
 static int ncp_rename(struct inode *, struct dentry *,
 	  	      struct inode *, struct dentry *);
 static int ncp_mknod(struct inode * dir, struct dentry *dentry,
-		     int mode, dev_t rdev);
+		     umode_t mode, dev_t rdev);
 #if defined(CONFIG_NCPFS_EXTRAS) || defined(CONFIG_NCPFS_NFS_NS)
 extern int ncp_symlink(struct inode *, struct dentry *, const char *);
 #else
@@ -919,7 +919,7 @@
 	goto out;
 }
 
-int ncp_create_new(struct inode *dir, struct dentry *dentry, int mode,
+int ncp_create_new(struct inode *dir, struct dentry *dentry, umode_t mode,
 		   dev_t rdev, __le32 attributes)
 {
 	struct ncp_server *server = NCP_SERVER(dir);
@@ -928,7 +928,7 @@
 	int opmode;
 	__u8 __name[NCP_MAXPATHLEN + 1];
 	
-	PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
+	PPRINTK("ncp_create_new: creating %s/%s, mode=%hx\n",
 		dentry->d_parent->d_name.name, dentry->d_name.name, mode);
 
 	ncp_age_dentry(server, dentry);
@@ -979,13 +979,13 @@
 	return error;
 }
 
-static int ncp_create(struct inode *dir, struct dentry *dentry, int mode,
+static int ncp_create(struct inode *dir, struct dentry *dentry, umode_t mode,
 		struct nameidata *nd)
 {
 	return ncp_create_new(dir, dentry, mode, 0, 0);
 }
 
-static int ncp_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+static int ncp_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 {
 	struct ncp_entry_info finfo;
 	struct ncp_server *server = NCP_SERVER(dir);
@@ -1201,12 +1201,12 @@
 }
 
 static int ncp_mknod(struct inode * dir, struct dentry *dentry,
-		     int mode, dev_t rdev)
+		     umode_t mode, dev_t rdev)
 {
 	if (!new_valid_dev(rdev))
 		return -EINVAL;
 	if (ncp_is_nfs_extras(NCP_SERVER(dir), NCP_FINFO(dir)->volNumber)) {
-		DPRINTK(KERN_DEBUG "ncp_mknod: mode = 0%o\n", mode);
+		DPRINTK(KERN_DEBUG "ncp_mknod: mode = 0%ho\n", mode);
 		return ncp_create_new(dir, dentry, mode, rdev, 0);
 	}
 	return -EPERM; /* Strange, but true */
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
index 5b5fa33..3d1e34f 100644
--- a/fs/ncpfs/inode.c
+++ b/fs/ncpfs/inode.c
@@ -44,7 +44,7 @@
 static void ncp_evict_inode(struct inode *);
 static void ncp_put_super(struct super_block *);
 static int  ncp_statfs(struct dentry *, struct kstatfs *);
-static int  ncp_show_options(struct seq_file *, struct vfsmount *);
+static int  ncp_show_options(struct seq_file *, struct dentry *);
 
 static struct kmem_cache * ncp_inode_cachep;
 
@@ -60,7 +60,6 @@
 static void ncp_i_callback(struct rcu_head *head)
 {
 	struct inode *inode = container_of(head, struct inode, i_rcu);
-	INIT_LIST_HEAD(&inode->i_dentry);
 	kmem_cache_free(ncp_inode_cachep, NCP_FINFO(inode));
 }
 
@@ -323,9 +322,9 @@
 		flush_work_sync(&server->timeout_tq);
 }
 
-static int  ncp_show_options(struct seq_file *seq, struct vfsmount *mnt)
+static int  ncp_show_options(struct seq_file *seq, struct dentry *root)
 {
-	struct ncp_server *server = NCP_SBP(mnt->mnt_sb);
+	struct ncp_server *server = NCP_SBP(root->d_sb);
 	unsigned int tmp;
 
 	if (server->m.uid != 0)
@@ -548,7 +547,7 @@
 
 	error = bdi_setup_and_register(&server->bdi, "ncpfs", BDI_CAP_MAP_COPY);
 	if (error)
-		goto out_bdi;
+		goto out_fput;
 
 	server->ncp_filp = ncp_filp;
 	server->ncp_sock = sock;
@@ -559,7 +558,7 @@
 		error = -EBADF;
 		server->info_filp = fget(data.info_fd);
 		if (!server->info_filp)
-			goto out_fput;
+			goto out_bdi;
 		error = -ENOTSOCK;
 		sock_inode = server->info_filp->f_path.dentry->d_inode;
 		if (!S_ISSOCK(sock_inode->i_mode))
@@ -746,9 +745,9 @@
 out_fput2:
 	if (server->info_filp)
 		fput(server->info_filp);
-out_fput:
-	bdi_destroy(&server->bdi);
 out_bdi:
+	bdi_destroy(&server->bdi);
+out_fput:
 	/* 23/12/1998 Marcin Dalecki <dalecki@cs.net.pl>:
 	 * 
 	 * The previously used put_filp(ncp_filp); was bogus, since
diff --git a/fs/ncpfs/ioctl.c b/fs/ncpfs/ioctl.c
index 790e92a..6958adf 100644
--- a/fs/ncpfs/ioctl.c
+++ b/fs/ncpfs/ioctl.c
@@ -901,7 +901,7 @@
 	ret = __ncp_ioctl(inode, cmd, arg);
 outDropWrite:
 	if (need_drop_write)
-		mnt_drop_write(filp->f_path.mnt);
+		mnt_drop_write_file(filp);
 out:
 	return ret;
 }
diff --git a/fs/ncpfs/ncplib_kernel.h b/fs/ncpfs/ncplib_kernel.h
index 09881e6..32c0658 100644
--- a/fs/ncpfs/ncplib_kernel.h
+++ b/fs/ncpfs/ncplib_kernel.h
@@ -114,7 +114,7 @@
 int ncp_dirhandle_free(struct ncp_server *, __u8 dirhandle);
 
 int ncp_create_new(struct inode *dir, struct dentry *dentry,
-                          int mode, dev_t rdev, __le32 attributes);
+                          umode_t mode, dev_t rdev, __le32 attributes);
 
 static inline int ncp_is_nfs_extras(struct ncp_server* server, unsigned int volnum) {
 #ifdef CONFIG_NCPFS_NFS_NS
diff --git a/fs/ncpfs/symlink.c b/fs/ncpfs/symlink.c
index 661f861..52439dd 100644
--- a/fs/ncpfs/symlink.c
+++ b/fs/ncpfs/symlink.c
@@ -108,7 +108,7 @@
 	char *rawlink;
 	int length, err, i, outlen;
 	int kludge;
-	int mode;
+	umode_t mode;
 	__le32 attr;
 	unsigned int hdr;
 
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index ac28990..fd9a872 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -47,13 +47,13 @@
 static int nfs_closedir(struct inode *, struct file *);
 static int nfs_readdir(struct file *, void *, filldir_t);
 static struct dentry *nfs_lookup(struct inode *, struct dentry *, struct nameidata *);
-static int nfs_create(struct inode *, struct dentry *, int, struct nameidata *);
-static int nfs_mkdir(struct inode *, struct dentry *, int);
+static int nfs_create(struct inode *, struct dentry *, umode_t, struct nameidata *);
+static int nfs_mkdir(struct inode *, struct dentry *, umode_t);
 static int nfs_rmdir(struct inode *, struct dentry *);
 static int nfs_unlink(struct inode *, struct dentry *);
 static int nfs_symlink(struct inode *, struct dentry *, const char *);
 static int nfs_link(struct dentry *, struct inode *, struct dentry *);
-static int nfs_mknod(struct inode *, struct dentry *, int, dev_t);
+static int nfs_mknod(struct inode *, struct dentry *, umode_t, dev_t);
 static int nfs_rename(struct inode *, struct dentry *,
 		      struct inode *, struct dentry *);
 static int nfs_fsync_dir(struct file *, loff_t, loff_t, int);
@@ -112,7 +112,7 @@
 #ifdef CONFIG_NFS_V4
 
 static struct dentry *nfs_atomic_lookup(struct inode *, struct dentry *, struct nameidata *);
-static int nfs_open_create(struct inode *dir, struct dentry *dentry, int mode, struct nameidata *nd);
+static int nfs_open_create(struct inode *dir, struct dentry *dentry, umode_t mode, struct nameidata *nd);
 const struct inode_operations nfs4_dir_inode_operations = {
 	.create		= nfs_open_create,
 	.lookup		= nfs_atomic_lookup,
@@ -1368,18 +1368,7 @@
 
 static struct nfs_open_context *create_nfs_open_context(struct dentry *dentry, int open_flags)
 {
-	struct nfs_open_context *ctx;
-	struct rpc_cred *cred;
-	fmode_t fmode = flags_to_mode(open_flags);
-
-	cred = rpc_lookup_cred();
-	if (IS_ERR(cred))
-		return ERR_CAST(cred);
-	ctx = alloc_nfs_open_context(dentry, cred, fmode);
-	put_rpccred(cred);
-	if (ctx == NULL)
-		return ERR_PTR(-ENOMEM);
-	return ctx;
+	return alloc_nfs_open_context(dentry, flags_to_mode(open_flags));
 }
 
 static int do_open(struct inode *inode, struct file *filp)
@@ -1584,8 +1573,8 @@
 	return nfs_lookup_revalidate(dentry, nd);
 }
 
-static int nfs_open_create(struct inode *dir, struct dentry *dentry, int mode,
-		struct nameidata *nd)
+static int nfs_open_create(struct inode *dir, struct dentry *dentry,
+		umode_t mode, struct nameidata *nd)
 {
 	struct nfs_open_context *ctx = NULL;
 	struct iattr attr;
@@ -1675,8 +1664,8 @@
  * that the operation succeeded on the server, but an error in the
  * reply path made it appear to have failed.
  */
-static int nfs_create(struct inode *dir, struct dentry *dentry, int mode,
-		struct nameidata *nd)
+static int nfs_create(struct inode *dir, struct dentry *dentry,
+		umode_t mode, struct nameidata *nd)
 {
 	struct iattr attr;
 	int error;
@@ -1704,7 +1693,7 @@
  * See comments for nfs_proc_create regarding failed operations.
  */
 static int
-nfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev)
+nfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev)
 {
 	struct iattr attr;
 	int status;
@@ -1730,7 +1719,7 @@
 /*
  * See comments for nfs_proc_create regarding failed operations.
  */
-static int nfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+static int nfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 {
 	struct iattr attr;
 	int error;
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index eca56d4..606ef0f 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -147,7 +147,7 @@
 	 * origin == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
 	 * the cached file length
 	 */
-	if (origin != SEEK_SET || origin != SEEK_CUR) {
+	if (origin != SEEK_SET && origin != SEEK_CUR) {
 		struct inode *inode = filp->f_mapping->host;
 
 		int retval = nfs_revalidate_file_size(inode, filp);
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 50a15fa..81db25e 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -38,6 +38,7 @@
 #include <linux/nfs_xdr.h>
 #include <linux/slab.h>
 #include <linux/compat.h>
+#include <linux/freezer.h>
 
 #include <asm/system.h>
 #include <asm/uaccess.h>
@@ -77,7 +78,7 @@
 {
 	if (fatal_signal_pending(current))
 		return -ERESTARTSYS;
-	schedule();
+	freezable_schedule();
 	return 0;
 }
 
@@ -629,23 +630,28 @@
 	nfs_revalidate_inode(server, inode);
 }
 
-struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, struct rpc_cred *cred, fmode_t f_mode)
+struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, fmode_t f_mode)
 {
 	struct nfs_open_context *ctx;
+	struct rpc_cred *cred = rpc_lookup_cred();
+	if (IS_ERR(cred))
+		return ERR_CAST(cred);
 
 	ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
-	if (ctx != NULL) {
-		nfs_sb_active(dentry->d_sb);
-		ctx->dentry = dget(dentry);
-		ctx->cred = get_rpccred(cred);
-		ctx->state = NULL;
-		ctx->mode = f_mode;
-		ctx->flags = 0;
-		ctx->error = 0;
-		nfs_init_lock_context(&ctx->lock_context);
-		ctx->lock_context.open_context = ctx;
-		INIT_LIST_HEAD(&ctx->list);
+	if (!ctx) {
+		put_rpccred(cred);
+		return ERR_PTR(-ENOMEM);
 	}
+	nfs_sb_active(dentry->d_sb);
+	ctx->dentry = dget(dentry);
+	ctx->cred = cred;
+	ctx->state = NULL;
+	ctx->mode = f_mode;
+	ctx->flags = 0;
+	ctx->error = 0;
+	nfs_init_lock_context(&ctx->lock_context);
+	ctx->lock_context.open_context = ctx;
+	INIT_LIST_HEAD(&ctx->list);
 	return ctx;
 }
 
@@ -738,15 +744,10 @@
 int nfs_open(struct inode *inode, struct file *filp)
 {
 	struct nfs_open_context *ctx;
-	struct rpc_cred *cred;
 
-	cred = rpc_lookup_cred();
-	if (IS_ERR(cred))
-		return PTR_ERR(cred);
-	ctx = alloc_nfs_open_context(filp->f_path.dentry, cred, filp->f_mode);
-	put_rpccred(cred);
-	if (ctx == NULL)
-		return -ENOMEM;
+	ctx = alloc_nfs_open_context(filp->f_path.dentry, filp->f_mode);
+	if (IS_ERR(ctx))
+		return PTR_ERR(ctx);
 	nfs_file_set_open_context(filp, ctx);
 	put_nfs_open_context(ctx);
 	nfs_fscache_set_inode_cookie(inode, filp);
@@ -1464,7 +1465,6 @@
 static void nfs_i_callback(struct rcu_head *head)
 {
 	struct inode *inode = container_of(head, struct inode, i_rcu);
-	INIT_LIST_HEAD(&inode->i_dentry);
 	kmem_cache_free(nfs_inode_cachep, NFS_I(inode));
 }
 
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index d4bc9ed9..91943953 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -17,6 +17,7 @@
 #include <linux/nfs_page.h>
 #include <linux/lockd/bind.h>
 #include <linux/nfs_mount.h>
+#include <linux/freezer.h>
 
 #include "iostat.h"
 #include "internal.h"
@@ -32,7 +33,7 @@
 		res = rpc_call_sync(clnt, msg, flags);
 		if (res != -EJUKEBOX && res != -EKEYEXPIRED)
 			break;
-		schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME);
+		freezable_schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME);
 		res = -ERESTARTSYS;
 	} while (!fatal_signal_pending(current));
 	return res;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index be2bbac..dcda0ba 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -39,6 +39,8 @@
 #include <linux/delay.h>
 #include <linux/errno.h>
 #include <linux/string.h>
+#include <linux/ratelimit.h>
+#include <linux/printk.h>
 #include <linux/slab.h>
 #include <linux/sunrpc/clnt.h>
 #include <linux/sunrpc/gss_api.h>
@@ -53,6 +55,7 @@
 #include <linux/sunrpc/bc_xprt.h>
 #include <linux/xattr.h>
 #include <linux/utsname.h>
+#include <linux/freezer.h>
 
 #include "nfs4_fs.h"
 #include "delegation.h"
@@ -241,7 +244,7 @@
 		*timeout = NFS4_POLL_RETRY_MIN;
 	if (*timeout > NFS4_POLL_RETRY_MAX)
 		*timeout = NFS4_POLL_RETRY_MAX;
-	schedule_timeout_killable(*timeout);
+	freezable_schedule_timeout_killable(*timeout);
 	if (fatal_signal_pending(current))
 		res = -ERESTARTSYS;
 	*timeout <<= 1;
@@ -894,6 +897,8 @@
 
 static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode)
 {
+	if (delegation == NULL)
+		return 0;
 	if ((delegation->type & fmode) != fmode)
 		return 0;
 	if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags))
@@ -1036,8 +1041,7 @@
 		}
 		rcu_read_lock();
 		delegation = rcu_dereference(nfsi->delegation);
-		if (delegation == NULL ||
-		    !can_open_delegated(delegation, fmode)) {
+		if (!can_open_delegated(delegation, fmode)) {
 			rcu_read_unlock();
 			break;
 		}
@@ -1091,7 +1095,12 @@
 		if (delegation)
 			delegation_flags = delegation->flags;
 		rcu_read_unlock();
-		if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
+		if (data->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR) {
+			pr_err_ratelimited("NFS: Broken NFSv4 server %s is "
+					"returning a delegation for "
+					"OPEN(CLAIM_DELEGATE_CUR)\n",
+					NFS_CLIENT(inode)->cl_server);
+		} else if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
 			nfs_inode_set_delegation(state->inode,
 					data->owner->so_cred,
 					&data->o_res);
@@ -1423,11 +1432,9 @@
 			goto out_no_action;
 		rcu_read_lock();
 		delegation = rcu_dereference(NFS_I(data->state->inode)->delegation);
-		if (delegation != NULL &&
-		    test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) == 0) {
-			rcu_read_unlock();
-			goto out_no_action;
-		}
+		if (data->o_arg.claim != NFS4_OPEN_CLAIM_DELEGATE_CUR &&
+		    can_open_delegated(delegation, data->o_arg.fmode))
+			goto unlock_no_action;
 		rcu_read_unlock();
 	}
 	/* Update sequence id. */
@@ -1444,6 +1451,8 @@
 		return;
 	rpc_call_start(task);
 	return;
+unlock_no_action:
+	rcu_read_unlock();
 out_no_action:
 	task->tk_action = NULL;
 
@@ -3950,7 +3959,7 @@
 static unsigned long
 nfs4_set_lock_task_retry(unsigned long timeout)
 {
-	schedule_timeout_killable(timeout);
+	freezable_schedule_timeout_killable(timeout);
 	timeout <<= 1;
 	if (timeout > NFS4_LOCK_MAXTIMEOUT)
 		return NFS4_LOCK_MAXTIMEOUT;
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 39914be..6a7107a 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1156,11 +1156,13 @@
 		if (status >= 0) {
 			status = nfs4_reclaim_locks(state, ops);
 			if (status >= 0) {
+				spin_lock(&state->state_lock);
 				list_for_each_entry(lock, &state->lock_states, ls_locks) {
 					if (!(lock->ls_flags & NFS_LOCK_INITIALIZED))
 						printk("%s: Lock reclaim failed!\n",
 							__func__);
 				}
+				spin_unlock(&state->state_lock);
 				nfs4_put_open_state(state);
 				goto restart;
 			}
@@ -1224,10 +1226,12 @@
 	clear_bit(NFS_O_RDONLY_STATE, &state->flags);
 	clear_bit(NFS_O_WRONLY_STATE, &state->flags);
 	clear_bit(NFS_O_RDWR_STATE, &state->flags);
+	spin_lock(&state->state_lock);
 	list_for_each_entry(lock, &state->lock_states, ls_locks) {
 		lock->ls_seqid.flags = 0;
 		lock->ls_flags &= ~NFS_LOCK_INITIALIZED;
 	}
+	spin_unlock(&state->state_lock);
 }
 
 static void nfs4_reset_seqids(struct nfs_server *server,
@@ -1350,12 +1354,14 @@
 static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
 {
 	switch (error) {
+		case 0:
+			break;
 		case -NFS4ERR_CB_PATH_DOWN:
 			nfs_handle_cb_pathdown(clp);
-			return 0;
+			break;
 		case -NFS4ERR_NO_GRACE:
 			nfs4_state_end_reclaim_reboot(clp);
-			return 0;
+			break;
 		case -NFS4ERR_STALE_CLIENTID:
 		case -NFS4ERR_LEASE_MOVED:
 			set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
@@ -1375,13 +1381,15 @@
 		case -NFS4ERR_SEQ_MISORDERED:
 			set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
 			/* Zero session reset errors */
-			return 0;
+			break;
 		case -EKEYEXPIRED:
 			/* Nothing we can do */
 			nfs4_warn_keyexpired(clp->cl_hostname);
-			return 0;
+			break;
+		default:
+			return error;
 	}
-	return error;
+	return 0;
 }
 
 static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recovery_ops *ops)
@@ -1428,7 +1436,7 @@
 	struct rpc_cred *cred;
 	const struct nfs4_state_maintenance_ops *ops =
 		clp->cl_mvops->state_renewal_ops;
-	int status = -NFS4ERR_EXPIRED;
+	int status;
 
 	/* Is the client already known to have an expired lease? */
 	if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
@@ -1438,6 +1446,7 @@
 	spin_unlock(&clp->cl_lock);
 	if (cred == NULL) {
 		cred = nfs4_get_setclientid_cred(clp);
+		status = -ENOKEY;
 		if (cred == NULL)
 			goto out;
 	}
@@ -1525,16 +1534,16 @@
 {
 	if (!flags)
 		return;
-	else if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED)
+	if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED)
 		nfs41_handle_server_reboot(clp);
-	else if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED |
+	if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED |
 			    SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED |
 			    SEQ4_STATUS_ADMIN_STATE_REVOKED |
 			    SEQ4_STATUS_LEASE_MOVED))
 		nfs41_handle_state_revoked(clp);
-	else if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED)
+	if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED)
 		nfs41_handle_recallable_state_revoked(clp);
-	else if (flags & (SEQ4_STATUS_CB_PATH_DOWN |
+	if (flags & (SEQ4_STATUS_CB_PATH_DOWN |
 			    SEQ4_STATUS_BACKCHANNEL_FAULT |
 			    SEQ4_STATUS_CB_PATH_DOWN_SESSION))
 		nfs41_handle_cb_path_down(clp);
@@ -1662,10 +1671,10 @@
 
 		if (test_and_clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state)) {
 			status = nfs4_check_lease(clp);
+			if (status < 0)
+				goto out_error;
 			if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
 				continue;
-			if (status < 0 && status != -NFS4ERR_CB_PATH_DOWN)
-				goto out_error;
 		}
 
 		/* Initialize or reset the session */
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index f48125d..0c672588 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -41,6 +41,7 @@
 #include <linux/nfs_fs.h>
 #include <linux/nfs_page.h>
 #include <linux/lockd/bind.h>
+#include <linux/freezer.h>
 #include "internal.h"
 
 #define NFSDBG_FACILITY		NFSDBG_PROC
@@ -59,7 +60,7 @@
 		res = rpc_call_sync(clnt, msg, flags);
 		if (res != -EKEYEXPIRED)
 			break;
-		schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME);
+		freezable_schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME);
 		res = -ERESTARTSYS;
 	} while (!fatal_signal_pending(current));
 	return res;
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 1347774..e463967 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -41,7 +41,6 @@
 #include <linux/lockd/bind.h>
 #include <linux/seq_file.h>
 #include <linux/mount.h>
-#include <linux/mnt_namespace.h>
 #include <linux/namei.h>
 #include <linux/nfs_idmap.h>
 #include <linux/vfs.h>
@@ -263,10 +262,10 @@
 
 static void nfs_umount_begin(struct super_block *);
 static int  nfs_statfs(struct dentry *, struct kstatfs *);
-static int  nfs_show_options(struct seq_file *, struct vfsmount *);
-static int  nfs_show_devname(struct seq_file *, struct vfsmount *);
-static int  nfs_show_path(struct seq_file *, struct vfsmount *);
-static int  nfs_show_stats(struct seq_file *, struct vfsmount *);
+static int  nfs_show_options(struct seq_file *, struct dentry *);
+static int  nfs_show_devname(struct seq_file *, struct dentry *);
+static int  nfs_show_path(struct seq_file *, struct dentry *);
+static int  nfs_show_stats(struct seq_file *, struct dentry *);
 static struct dentry *nfs_fs_mount(struct file_system_type *,
 		int, const char *, void *);
 static struct dentry *nfs_xdev_mount(struct file_system_type *fs_type,
@@ -721,9 +720,9 @@
 /*
  * Describe the mount options on this VFS mountpoint
  */
-static int nfs_show_options(struct seq_file *m, struct vfsmount *mnt)
+static int nfs_show_options(struct seq_file *m, struct dentry *root)
 {
-	struct nfs_server *nfss = NFS_SB(mnt->mnt_sb);
+	struct nfs_server *nfss = NFS_SB(root->d_sb);
 
 	nfs_show_mount_options(m, nfss, 0);
 
@@ -761,14 +760,14 @@
 #endif
 #endif
 
-static int nfs_show_devname(struct seq_file *m, struct vfsmount *mnt)
+static int nfs_show_devname(struct seq_file *m, struct dentry *root)
 {
 	char *page = (char *) __get_free_page(GFP_KERNEL);
 	char *devname, *dummy;
 	int err = 0;
 	if (!page)
 		return -ENOMEM;
-	devname = nfs_path(&dummy, mnt->mnt_root, page, PAGE_SIZE);
+	devname = nfs_path(&dummy, root, page, PAGE_SIZE);
 	if (IS_ERR(devname))
 		err = PTR_ERR(devname);
 	else
@@ -777,7 +776,7 @@
 	return err;
 }
 
-static int nfs_show_path(struct seq_file *m, struct vfsmount *mnt)
+static int nfs_show_path(struct seq_file *m, struct dentry *dentry)
 {
 	seq_puts(m, "/");
 	return 0;
@@ -786,10 +785,10 @@
 /*
  * Present statistical information for this VFS mountpoint
  */
-static int nfs_show_stats(struct seq_file *m, struct vfsmount *mnt)
+static int nfs_show_stats(struct seq_file *m, struct dentry *root)
 {
 	int i, cpu;
-	struct nfs_server *nfss = NFS_SB(mnt->mnt_sb);
+	struct nfs_server *nfss = NFS_SB(root->d_sb);
 	struct rpc_auth *auth = nfss->client->cl_auth;
 	struct nfs_iostats totals = { };
 
@@ -799,10 +798,10 @@
 	 * Display all mount option settings
 	 */
 	seq_printf(m, "\n\topts:\t");
-	seq_puts(m, mnt->mnt_sb->s_flags & MS_RDONLY ? "ro" : "rw");
-	seq_puts(m, mnt->mnt_sb->s_flags & MS_SYNCHRONOUS ? ",sync" : "");
-	seq_puts(m, mnt->mnt_sb->s_flags & MS_NOATIME ? ",noatime" : "");
-	seq_puts(m, mnt->mnt_sb->s_flags & MS_NODIRATIME ? ",nodiratime" : "");
+	seq_puts(m, root->d_sb->s_flags & MS_RDONLY ? "ro" : "rw");
+	seq_puts(m, root->d_sb->s_flags & MS_SYNCHRONOUS ? ",sync" : "");
+	seq_puts(m, root->d_sb->s_flags & MS_NOATIME ? ",noatime" : "");
+	seq_puts(m, root->d_sb->s_flags & MS_NODIRATIME ? ",nodiratime" : "");
 	nfs_show_mount_options(m, nfss, 1);
 
 	seq_printf(m, "\n\tage:\t%lu", (jiffies - nfss->mount_time) / HZ);
@@ -2788,11 +2787,15 @@
 		const char *export_path)
 {
 	struct dentry *dentry;
-	int ret = nfs_referral_loop_protect();
+	int err;
 
-	if (ret) {
+	if (IS_ERR(root_mnt))
+		return ERR_CAST(root_mnt);
+
+	err = nfs_referral_loop_protect();
+	if (err) {
 		mntput(root_mnt);
-		return ERR_PTR(ret);
+		return ERR_PTR(err);
 	}
 
 	dentry = mount_subtree(root_mnt, export_path);
@@ -2816,9 +2819,7 @@
 			data->nfs_server.hostname);
 	data->nfs_server.export_path = export_path;
 
-	res = ERR_CAST(root_mnt);
-	if (!IS_ERR(root_mnt))
-		res = nfs_follow_remote_path(root_mnt, export_path);
+	res = nfs_follow_remote_path(root_mnt, export_path);
 
 	dfprintk(MOUNT, "<-- nfs4_try_mount() = %ld%s\n",
 			IS_ERR(res) ? PTR_ERR(res) : 0,
@@ -3079,9 +3080,7 @@
 			flags, data, data->hostname);
 	data->mnt_path = export_path;
 
-	res = ERR_CAST(root_mnt);
-	if (!IS_ERR(root_mnt))
-		res = nfs_follow_remote_path(root_mnt, export_path);
+	res = nfs_follow_remote_path(root_mnt, export_path);
 	dprintk("<-- nfs4_referral_mount() = %ld%s\n",
 			IS_ERR(res) ? PTR_ERR(res) : 0,
 			IS_ERR(res) ? " [error]" : "");
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index fa38336..c5e28ed8 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -838,7 +838,7 @@
 			return status;
 		}
 	}
-	status = mnt_want_write(cstate->current_fh.fh_export->ex_path.mnt);
+	status = fh_want_write(&cstate->current_fh);
 	if (status)
 		return status;
 	status = nfs_ok;
@@ -856,7 +856,7 @@
 	status = nfsd_setattr(rqstp, &cstate->current_fh, &setattr->sa_iattr,
 				0, (time_t)0);
 out:
-	mnt_drop_write(cstate->current_fh.fh_export->ex_path.mnt);
+	fh_drop_write(&cstate->current_fh);
 	return status;
 }
 
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
index ed083b9..80a0be9 100644
--- a/fs/nfsd/nfs4recover.c
+++ b/fs/nfsd/nfs4recover.c
@@ -147,11 +147,11 @@
 	status = -EEXIST;
 	if (dentry->d_inode)
 		goto out_put;
-	status = mnt_want_write(rec_file->f_path.mnt);
+	status = mnt_want_write_file(rec_file);
 	if (status)
 		goto out_put;
 	status = vfs_mkdir(dir->d_inode, dentry, S_IRWXU);
-	mnt_drop_write(rec_file->f_path.mnt);
+	mnt_drop_write_file(rec_file);
 out_put:
 	dput(dentry);
 out_unlock:
@@ -268,7 +268,7 @@
 	if (!rec_file || !clp->cl_firststate)
 		return;
 
-	status = mnt_want_write(rec_file->f_path.mnt);
+	status = mnt_want_write_file(rec_file);
 	if (status)
 		goto out;
 	clp->cl_firststate = 0;
@@ -281,7 +281,7 @@
 	nfs4_reset_creds(original_cred);
 	if (status == 0)
 		vfs_fsync(rec_file, 0);
-	mnt_drop_write(rec_file->f_path.mnt);
+	mnt_drop_write_file(rec_file);
 out:
 	if (status)
 		printk("NFSD: Failed to remove expired client state directory"
@@ -311,13 +311,13 @@
 
 	if (!rec_file)
 		return;
-	status = mnt_want_write(rec_file->f_path.mnt);
+	status = mnt_want_write_file(rec_file);
 	if (status)
 		goto out;
 	status = nfsd4_list_rec_dir(purge_old);
 	if (status == 0)
 		vfs_fsync(rec_file, 0);
-	mnt_drop_write(rec_file->f_path.mnt);
+	mnt_drop_write_file(rec_file);
 out:
 	if (status)
 		printk("nfsd4: failed to purge old clients from recovery"
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 47e94e3..9ca16dc 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -658,7 +658,7 @@
 /*
  * XXX: If we run out of reserved DRC memory we could (up to a point)
  * re-negotiate active sessions and reduce their slot usage to make
- * rooom for new connections. For now we just fail the create session.
+ * room for new connections. For now we just fail the create session.
  */
 static int nfsd4_get_drc_mem(int slotsize, u32 num)
 {
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index c45a2ea..bb4a11d 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -272,7 +272,7 @@
 	 * 2.  Is that directory a mount point, or
 	 * 3.  Is that directory the root of an exported file system?
 	 */
-	error = nlmsvc_unlock_all_by_sb(path.mnt->mnt_sb);
+	error = nlmsvc_unlock_all_by_sb(path.dentry->d_sb);
 
 	path_put(&path);
 	return error;
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
index c763de5..68454e7 100644
--- a/fs/nfsd/nfsfh.c
+++ b/fs/nfsd/nfsfh.c
@@ -59,7 +59,7 @@
  * the write call).
  */
 static inline __be32
-nfsd_mode_check(struct svc_rqst *rqstp, umode_t mode, int requested)
+nfsd_mode_check(struct svc_rqst *rqstp, umode_t mode, umode_t requested)
 {
 	mode &= S_IFMT;
 
@@ -293,7 +293,7 @@
  * include/linux/nfsd/nfsd.h.
  */
 __be32
-fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, int access)
+fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, int access)
 {
 	struct svc_export *exp;
 	struct dentry	*dentry;
diff --git a/fs/nfsd/nfsfh.h b/fs/nfsd/nfsfh.h
index c16f8d8..e5e6707 100644
--- a/fs/nfsd/nfsfh.h
+++ b/fs/nfsd/nfsfh.h
@@ -102,7 +102,7 @@
 /*
  * Function prototypes
  */
-__be32	fh_verify(struct svc_rqst *, struct svc_fh *, int, int);
+__be32	fh_verify(struct svc_rqst *, struct svc_fh *, umode_t, int);
 __be32	fh_compose(struct svc_fh *, struct svc_export *, struct dentry *, struct svc_fh *);
 __be32	fh_update(struct svc_fh *);
 void	fh_put(struct svc_fh *);
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 7a2e442..d25a723 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -307,7 +307,7 @@
 	struct dentry	*dentry;
 	struct inode	*inode;
 	int		accmode = NFSD_MAY_SATTR;
-	int		ftype = 0;
+	umode_t		ftype = 0;
 	__be32		err;
 	int		host_err;
 	int		size_change = 0;
@@ -730,7 +730,7 @@
  * N.B. After this call fhp needs an fh_put
  */
 __be32
-nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
+nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
 			int access, struct file **filp)
 {
 	struct dentry	*dentry;
@@ -1300,7 +1300,7 @@
 		goto out;
 	}
 
-	host_err = mnt_want_write(fhp->fh_export->ex_path.mnt);
+	host_err = fh_want_write(fhp);
 	if (host_err)
 		goto out_nfserr;
 
@@ -1325,7 +1325,7 @@
 		break;
 	}
 	if (host_err < 0) {
-		mnt_drop_write(fhp->fh_export->ex_path.mnt);
+		fh_drop_write(fhp);
 		goto out_nfserr;
 	}
 
@@ -1339,7 +1339,7 @@
 	err2 = nfserrno(commit_metadata(fhp));
 	if (err2)
 		err = err2;
-	mnt_drop_write(fhp->fh_export->ex_path.mnt);
+	fh_drop_write(fhp);
 	/*
 	 * Update the file handle to get the new inode info.
 	 */
@@ -1430,7 +1430,7 @@
 		v_atime = verifier[1]&0x7fffffff;
 	}
 	
-	host_err = mnt_want_write(fhp->fh_export->ex_path.mnt);
+	host_err = fh_want_write(fhp);
 	if (host_err)
 		goto out_nfserr;
 	if (dchild->d_inode) {
@@ -1469,13 +1469,13 @@
 		case NFS3_CREATE_GUARDED:
 			err = nfserr_exist;
 		}
-		mnt_drop_write(fhp->fh_export->ex_path.mnt);
+		fh_drop_write(fhp);
 		goto out;
 	}
 
 	host_err = vfs_create(dirp, dchild, iap->ia_mode, NULL);
 	if (host_err < 0) {
-		mnt_drop_write(fhp->fh_export->ex_path.mnt);
+		fh_drop_write(fhp);
 		goto out_nfserr;
 	}
 	if (created)
@@ -1503,7 +1503,7 @@
 	if (!err)
 		err = nfserrno(commit_metadata(fhp));
 
-	mnt_drop_write(fhp->fh_export->ex_path.mnt);
+	fh_drop_write(fhp);
 	/*
 	 * Update the filehandle to get the new inode info.
 	 */
@@ -1600,7 +1600,7 @@
 	if (IS_ERR(dnew))
 		goto out_nfserr;
 
-	host_err = mnt_want_write(fhp->fh_export->ex_path.mnt);
+	host_err = fh_want_write(fhp);
 	if (host_err)
 		goto out_nfserr;
 
@@ -1621,7 +1621,7 @@
 		err = nfserrno(commit_metadata(fhp));
 	fh_unlock(fhp);
 
-	mnt_drop_write(fhp->fh_export->ex_path.mnt);
+	fh_drop_write(fhp);
 
 	cerr = fh_compose(resfhp, fhp->fh_export, dnew, fhp);
 	dput(dnew);
@@ -1674,7 +1674,7 @@
 
 	dold = tfhp->fh_dentry;
 
-	host_err = mnt_want_write(tfhp->fh_export->ex_path.mnt);
+	host_err = fh_want_write(tfhp);
 	if (host_err) {
 		err = nfserrno(host_err);
 		goto out_dput;
@@ -1699,7 +1699,7 @@
 			err = nfserrno(host_err);
 	}
 out_drop_write:
-	mnt_drop_write(tfhp->fh_export->ex_path.mnt);
+	fh_drop_write(tfhp);
 out_dput:
 	dput(dnew);
 out_unlock:
@@ -1776,7 +1776,7 @@
 	host_err = -EXDEV;
 	if (ffhp->fh_export->ex_path.mnt != tfhp->fh_export->ex_path.mnt)
 		goto out_dput_new;
-	host_err = mnt_want_write(ffhp->fh_export->ex_path.mnt);
+	host_err = fh_want_write(ffhp);
 	if (host_err)
 		goto out_dput_new;
 
@@ -1795,7 +1795,7 @@
 			host_err = commit_metadata(ffhp);
 	}
 out_drop_write:
-	mnt_drop_write(ffhp->fh_export->ex_path.mnt);
+	fh_drop_write(ffhp);
  out_dput_new:
 	dput(ndentry);
  out_dput_old:
@@ -1854,7 +1854,7 @@
 	if (!type)
 		type = rdentry->d_inode->i_mode & S_IFMT;
 
-	host_err = mnt_want_write(fhp->fh_export->ex_path.mnt);
+	host_err = fh_want_write(fhp);
 	if (host_err)
 		goto out_put;
 
@@ -1868,7 +1868,7 @@
 	if (!host_err)
 		host_err = commit_metadata(fhp);
 out_drop_write:
-	mnt_drop_write(fhp->fh_export->ex_path.mnt);
+	fh_drop_write(fhp);
 out_put:
 	dput(rdentry);
 
@@ -2270,7 +2270,7 @@
 	} else
 		size = 0;
 
-	error = mnt_want_write(fhp->fh_export->ex_path.mnt);
+	error = fh_want_write(fhp);
 	if (error)
 		goto getout;
 	if (size)
@@ -2284,7 +2284,7 @@
 				error = 0;
 		}
 	}
-	mnt_drop_write(fhp->fh_export->ex_path.mnt);
+	fh_drop_write(fhp);
 
 getout:
 	kfree(value);
diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h
index 3f54ad0..1dcd238 100644
--- a/fs/nfsd/vfs.h
+++ b/fs/nfsd/vfs.h
@@ -66,7 +66,7 @@
 __be32		nfsd_commit(struct svc_rqst *, struct svc_fh *,
 				loff_t, unsigned long);
 #endif /* CONFIG_NFSD_V3 */
-__be32		nfsd_open(struct svc_rqst *, struct svc_fh *, int,
+__be32		nfsd_open(struct svc_rqst *, struct svc_fh *, umode_t,
 				int, struct file **);
 void		nfsd_close(struct file *);
 __be32 		nfsd_read(struct svc_rqst *, struct svc_fh *,
@@ -106,4 +106,14 @@
 int nfsd_set_posix_acl(struct svc_fh *, int, struct posix_acl *);
 #endif
 
+static inline int fh_want_write(struct svc_fh *fh)
+{
+	return mnt_want_write(fh->fh_export->ex_path.mnt);
+}
+
+static inline void fh_drop_write(struct svc_fh *fh)
+{
+	mnt_drop_write(fh->fh_export->ex_path.mnt);
+}
+
 #endif /* LINUX_NFSD_VFS_H */
diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c
index 3a19239..ca35b3a 100644
--- a/fs/nilfs2/dir.c
+++ b/fs/nilfs2/dir.c
@@ -251,7 +251,7 @@
 
 static void nilfs_set_de_type(struct nilfs_dir_entry *de, struct inode *inode)
 {
-	mode_t mode = inode->i_mode;
+	umode_t mode = inode->i_mode;
 
 	de->file_type = nilfs_type_by_mode[(mode & S_IFMT)>>S_SHIFT];
 }
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index b50ffb7..8f7b95a 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -291,7 +291,7 @@
 	.is_partially_uptodate  = block_is_partially_uptodate,
 };
 
-struct inode *nilfs_new_inode(struct inode *dir, int mode)
+struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
 {
 	struct super_block *sb = dir->i_sb;
 	struct the_nilfs *nilfs = sb->s_fs_info;
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
index 41d6743..8866496 100644
--- a/fs/nilfs2/ioctl.c
+++ b/fs/nilfs2/ioctl.c
@@ -27,7 +27,7 @@
 #include <linux/uaccess.h>	/* copy_from_user(), copy_to_user() */
 #include <linux/vmalloc.h>
 #include <linux/compat.h>	/* compat_ptr() */
-#include <linux/mount.h>	/* mnt_want_write(), mnt_drop_write() */
+#include <linux/mount.h>	/* mnt_want_write_file(), mnt_drop_write_file() */
 #include <linux/buffer_head.h>
 #include <linux/nilfs2_fs.h>
 #include "nilfs.h"
@@ -119,7 +119,7 @@
 	if (get_user(flags, (int __user *)argp))
 		return -EFAULT;
 
-	ret = mnt_want_write(filp->f_path.mnt);
+	ret = mnt_want_write_file(filp);
 	if (ret)
 		return ret;
 
@@ -154,7 +154,7 @@
 	ret = nilfs_transaction_commit(inode->i_sb);
 out:
 	mutex_unlock(&inode->i_mutex);
-	mnt_drop_write(filp->f_path.mnt);
+	mnt_drop_write_file(filp);
 	return ret;
 }
 
@@ -174,7 +174,7 @@
 	if (!capable(CAP_SYS_ADMIN))
 		return -EPERM;
 
-	ret = mnt_want_write(filp->f_path.mnt);
+	ret = mnt_want_write_file(filp);
 	if (ret)
 		return ret;
 
@@ -194,7 +194,7 @@
 
 	up_read(&inode->i_sb->s_umount);
 out:
-	mnt_drop_write(filp->f_path.mnt);
+	mnt_drop_write_file(filp);
 	return ret;
 }
 
@@ -210,7 +210,7 @@
 	if (!capable(CAP_SYS_ADMIN))
 		return -EPERM;
 
-	ret = mnt_want_write(filp->f_path.mnt);
+	ret = mnt_want_write_file(filp);
 	if (ret)
 		return ret;
 
@@ -225,7 +225,7 @@
 	else
 		nilfs_transaction_commit(inode->i_sb); /* never fails */
 out:
-	mnt_drop_write(filp->f_path.mnt);
+	mnt_drop_write_file(filp);
 	return ret;
 }
 
@@ -591,7 +591,7 @@
 	if (!capable(CAP_SYS_ADMIN))
 		return -EPERM;
 
-	ret = mnt_want_write(filp->f_path.mnt);
+	ret = mnt_want_write_file(filp);
 	if (ret)
 		return ret;
 
@@ -625,6 +625,9 @@
 		if (argv[n].v_nmembs > nsegs * nilfs->ns_blocks_per_segment)
 			goto out_free;
 
+		if (argv[n].v_nmembs >= UINT_MAX / argv[n].v_size)
+			goto out_free;
+
 		len = argv[n].v_size * argv[n].v_nmembs;
 		base = (void __user *)(unsigned long)argv[n].v_base;
 		if (len == 0) {
@@ -672,7 +675,7 @@
 		vfree(kbufs[n]);
 	kfree(kbufs[4]);
 out:
-	mnt_drop_write(filp->f_path.mnt);
+	mnt_drop_write_file(filp);
 	return ret;
 }
 
@@ -707,7 +710,7 @@
 	if (!capable(CAP_SYS_ADMIN))
 		goto out;
 
-	ret = mnt_want_write(filp->f_path.mnt);
+	ret = mnt_want_write_file(filp);
 	if (ret)
 		goto out;
 
@@ -718,7 +721,7 @@
 	ret = nilfs_resize_fs(inode->i_sb, newsize);
 
 out_drop_write:
-	mnt_drop_write(filp->f_path.mnt);
+	mnt_drop_write_file(filp);
 out:
 	return ret;
 }
@@ -842,6 +845,19 @@
 	case FS_IOC32_GETVERSION:
 		cmd = FS_IOC_GETVERSION;
 		break;
+	case NILFS_IOCTL_CHANGE_CPMODE:
+	case NILFS_IOCTL_DELETE_CHECKPOINT:
+	case NILFS_IOCTL_GET_CPINFO:
+	case NILFS_IOCTL_GET_CPSTAT:
+	case NILFS_IOCTL_GET_SUINFO:
+	case NILFS_IOCTL_GET_SUSTAT:
+	case NILFS_IOCTL_GET_VINFO:
+	case NILFS_IOCTL_GET_BDESCS:
+	case NILFS_IOCTL_CLEAN_SEGMENTS:
+	case NILFS_IOCTL_SYNC:
+	case NILFS_IOCTL_RESIZE:
+	case NILFS_IOCTL_SET_ALLOC_RANGE:
+		break;
 	default:
 		return -ENOIOCTLCMD;
 	}
diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c
index 768982d..1cd3f62 100644
--- a/fs/nilfs2/namei.c
+++ b/fs/nilfs2/namei.c
@@ -84,7 +84,7 @@
  * If the create succeeds, we fill in the inode information
  * with d_instantiate().
  */
-static int nilfs_create(struct inode *dir, struct dentry *dentry, int mode,
+static int nilfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
 			struct nameidata *nd)
 {
 	struct inode *inode;
@@ -112,7 +112,7 @@
 }
 
 static int
-nilfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev)
+nilfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev)
 {
 	struct inode *inode;
 	struct nilfs_transaction_info ti;
@@ -213,7 +213,7 @@
 	return err;
 }
 
-static int nilfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+static int nilfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 {
 	struct inode *inode;
 	struct nilfs_transaction_info ti;
diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h
index 3777d13..250add8 100644
--- a/fs/nilfs2/nilfs.h
+++ b/fs/nilfs2/nilfs.h
@@ -246,7 +246,7 @@
 /* inode.c */
 void nilfs_inode_add_blocks(struct inode *inode, int n);
 void nilfs_inode_sub_blocks(struct inode *inode, int n);
-extern struct inode *nilfs_new_inode(struct inode *, int);
+extern struct inode *nilfs_new_inode(struct inode *, umode_t);
 extern void nilfs_free_inode(struct inode *);
 extern int nilfs_get_block(struct inode *, sector_t, struct buffer_head *, int);
 extern void nilfs_set_inode_flags(struct inode *);
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index bb24ab6..0e72ad6 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -2470,7 +2470,7 @@
 
 	if (freezing(current)) {
 		spin_unlock(&sci->sc_state_lock);
-		refrigerator();
+		try_to_freeze();
 		spin_lock(&sci->sc_state_lock);
 	} else {
 		DEFINE_WAIT(wait);
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index 8351c44..08e3d4f 100644
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -175,8 +175,6 @@
 	struct inode *inode = container_of(head, struct inode, i_rcu);
 	struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
 
-	INIT_LIST_HEAD(&inode->i_dentry);
-
 	if (mdi) {
 		kfree(mdi->mi_bgl); /* kfree(NULL) is safe */
 		kfree(mdi);
@@ -650,11 +648,11 @@
 	return 0;
 }
 
-static int nilfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
+static int nilfs_show_options(struct seq_file *seq, struct dentry *dentry)
 {
-	struct super_block *sb = vfs->mnt_sb;
+	struct super_block *sb = dentry->d_sb;
 	struct the_nilfs *nilfs = sb->s_fs_info;
-	struct nilfs_root *root = NILFS_I(vfs->mnt_root->d_inode)->i_root;
+	struct nilfs_root *root = NILFS_I(dentry->d_inode)->i_root;
 
 	if (!nilfs_test_opt(nilfs, BARRIER))
 		seq_puts(seq, ",nobarrier");
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index 9fde1c0..3568c8a 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -16,6 +16,8 @@
 
 #include <asm/ioctls.h>
 
+#include "../../mount.h"
+
 #define FANOTIFY_DEFAULT_MAX_EVENTS	16384
 #define FANOTIFY_DEFAULT_MAX_MARKS	8192
 #define FANOTIFY_DEFAULT_MAX_LISTENERS	128
@@ -546,7 +548,7 @@
 
 	removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags);
 	fsnotify_put_mark(fsn_mark);
-	if (removed & mnt->mnt_fsnotify_mask)
+	if (removed & real_mount(mnt)->mnt_fsnotify_mask)
 		fsnotify_recalc_vfsmount_mask(mnt);
 
 	return 0;
@@ -623,7 +625,7 @@
 	}
 	added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
 
-	if (added & ~mnt->mnt_fsnotify_mask)
+	if (added & ~real_mount(mnt)->mnt_fsnotify_mask)
 		fsnotify_recalc_vfsmount_mask(mnt);
 err:
 	fsnotify_put_mark(fsn_mark);
diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
index 79b47cb..ccb14d3 100644
--- a/fs/notify/fsnotify.c
+++ b/fs/notify/fsnotify.c
@@ -26,6 +26,7 @@
 
 #include <linux/fsnotify_backend.h>
 #include "fsnotify.h"
+#include "../mount.h"
 
 /*
  * Clear all of the marks on an inode when it is being evicted from core
@@ -205,13 +206,13 @@
 	struct fsnotify_mark *inode_mark = NULL, *vfsmount_mark = NULL;
 	struct fsnotify_group *inode_group, *vfsmount_group;
 	struct fsnotify_event *event = NULL;
-	struct vfsmount *mnt;
+	struct mount *mnt;
 	int idx, ret = 0;
 	/* global tests shouldn't care about events on child only the specific event */
 	__u32 test_mask = (mask & ~FS_EVENT_ON_CHILD);
 
 	if (data_is == FSNOTIFY_EVENT_PATH)
-		mnt = ((struct path *)data)->mnt;
+		mnt = real_mount(((struct path *)data)->mnt);
 	else
 		mnt = NULL;
 
@@ -262,11 +263,11 @@
 			/* we didn't use the vfsmount_mark */
 			vfsmount_group = NULL;
 		} else if (vfsmount_group > inode_group) {
-			ret = send_to_group(to_tell, mnt, NULL, vfsmount_mark, mask, data,
+			ret = send_to_group(to_tell, &mnt->mnt, NULL, vfsmount_mark, mask, data,
 					    data_is, cookie, file_name, &event);
 			inode_group = NULL;
 		} else {
-			ret = send_to_group(to_tell, mnt, inode_mark, vfsmount_mark,
+			ret = send_to_group(to_tell, &mnt->mnt, inode_mark, vfsmount_mark,
 					    mask, data, data_is, cookie, file_name,
 					    &event);
 		}
diff --git a/fs/notify/vfsmount_mark.c b/fs/notify/vfsmount_mark.c
index 778fe6c..b7b4b0e 100644
--- a/fs/notify/vfsmount_mark.c
+++ b/fs/notify/vfsmount_mark.c
@@ -28,15 +28,17 @@
 
 #include <linux/fsnotify_backend.h>
 #include "fsnotify.h"
+#include "../mount.h"
 
 void fsnotify_clear_marks_by_mount(struct vfsmount *mnt)
 {
 	struct fsnotify_mark *mark, *lmark;
 	struct hlist_node *pos, *n;
+	struct mount *m = real_mount(mnt);
 	LIST_HEAD(free_list);
 
 	spin_lock(&mnt->mnt_root->d_lock);
-	hlist_for_each_entry_safe(mark, pos, n, &mnt->mnt_fsnotify_marks, m.m_list) {
+	hlist_for_each_entry_safe(mark, pos, n, &m->mnt_fsnotify_marks, m.m_list) {
 		list_add(&mark->m.free_m_list, &free_list);
 		hlist_del_init_rcu(&mark->m.m_list);
 		fsnotify_get_mark(mark);
@@ -59,15 +61,16 @@
  */
 static void fsnotify_recalc_vfsmount_mask_locked(struct vfsmount *mnt)
 {
+	struct mount *m = real_mount(mnt);
 	struct fsnotify_mark *mark;
 	struct hlist_node *pos;
 	__u32 new_mask = 0;
 
 	assert_spin_locked(&mnt->mnt_root->d_lock);
 
-	hlist_for_each_entry(mark, pos, &mnt->mnt_fsnotify_marks, m.m_list)
+	hlist_for_each_entry(mark, pos, &m->mnt_fsnotify_marks, m.m_list)
 		new_mask |= mark->mask;
-	mnt->mnt_fsnotify_mask = new_mask;
+	m->mnt_fsnotify_mask = new_mask;
 }
 
 /*
@@ -101,12 +104,13 @@
 static struct fsnotify_mark *fsnotify_find_vfsmount_mark_locked(struct fsnotify_group *group,
 								struct vfsmount *mnt)
 {
+	struct mount *m = real_mount(mnt);
 	struct fsnotify_mark *mark;
 	struct hlist_node *pos;
 
 	assert_spin_locked(&mnt->mnt_root->d_lock);
 
-	hlist_for_each_entry(mark, pos, &mnt->mnt_fsnotify_marks, m.m_list) {
+	hlist_for_each_entry(mark, pos, &m->mnt_fsnotify_marks, m.m_list) {
 		if (mark->group == group) {
 			fsnotify_get_mark(mark);
 			return mark;
@@ -140,6 +144,7 @@
 			       struct fsnotify_group *group, struct vfsmount *mnt,
 			       int allow_dups)
 {
+	struct mount *m = real_mount(mnt);
 	struct fsnotify_mark *lmark;
 	struct hlist_node *node, *last = NULL;
 	int ret = 0;
@@ -154,13 +159,13 @@
 	mark->m.mnt = mnt;
 
 	/* is mark the first mark? */
-	if (hlist_empty(&mnt->mnt_fsnotify_marks)) {
-		hlist_add_head_rcu(&mark->m.m_list, &mnt->mnt_fsnotify_marks);
+	if (hlist_empty(&m->mnt_fsnotify_marks)) {
+		hlist_add_head_rcu(&mark->m.m_list, &m->mnt_fsnotify_marks);
 		goto out;
 	}
 
 	/* should mark be in the middle of the current list? */
-	hlist_for_each_entry(lmark, node, &mnt->mnt_fsnotify_marks, m.m_list) {
+	hlist_for_each_entry(lmark, node, &m->mnt_fsnotify_marks, m.m_list) {
 		last = node;
 
 		if ((lmark->group == group) && !allow_dups) {
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index 97e2dac..2eaa666 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -335,7 +335,6 @@
 static void ntfs_i_callback(struct rcu_head *head)
 {
 	struct inode *inode = container_of(head, struct inode, i_rcu);
-	INIT_LIST_HEAD(&inode->i_dentry);
 	kmem_cache_free(ntfs_big_inode_cache, NTFS_I(inode));
 }
 
@@ -2301,16 +2300,16 @@
 /**
  * ntfs_show_options - show mount options in /proc/mounts
  * @sf:		seq_file in which to write our mount options
- * @mnt:	vfs mount whose mount options to display
+ * @root:	root of the mounted tree whose mount options to display
  *
  * Called by the VFS once for each mounted ntfs volume when someone reads
  * /proc/mounts in order to display the NTFS specific mount options of each
- * mount. The mount options of the vfs mount @mnt are written to the seq file
+ * mount. The mount options of fs specified by @root are written to the seq file
  * @sf and success is returned.
  */
-int ntfs_show_options(struct seq_file *sf, struct vfsmount *mnt)
+int ntfs_show_options(struct seq_file *sf, struct dentry *root)
 {
-	ntfs_volume *vol = NTFS_SB(mnt->mnt_sb);
+	ntfs_volume *vol = NTFS_SB(root->d_sb);
 	int i;
 
 	seq_printf(sf, ",uid=%i", vol->uid);
diff --git a/fs/ntfs/inode.h b/fs/ntfs/inode.h
index fe8e7e9..db29695 100644
--- a/fs/ntfs/inode.h
+++ b/fs/ntfs/inode.h
@@ -298,7 +298,7 @@
 
 extern int ntfs_read_inode_mount(struct inode *vi);
 
-extern int ntfs_show_options(struct seq_file *sf, struct vfsmount *mnt);
+extern int ntfs_show_options(struct seq_file *sf, struct dentry *root);
 
 #ifdef NTFS_RW
 
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
index b52706d..608be45 100644
--- a/fs/ntfs/super.c
+++ b/fs/ntfs/super.c
@@ -104,7 +104,7 @@
 	int errors = 0, sloppy = 0;
 	uid_t uid = (uid_t)-1;
 	gid_t gid = (gid_t)-1;
-	mode_t fmask = (mode_t)-1, dmask = (mode_t)-1;
+	umode_t fmask = (umode_t)-1, dmask = (umode_t)-1;
 	int mft_zone_multiplier = -1, on_errors = -1;
 	int show_sys_files = -1, case_sensitive = -1, disable_sparse = -1;
 	struct nls_table *nls_map = NULL, *old_nls;
@@ -287,9 +287,9 @@
 		vol->uid = uid;
 	if (gid != (gid_t)-1)
 		vol->gid = gid;
-	if (fmask != (mode_t)-1)
+	if (fmask != (umode_t)-1)
 		vol->fmask = fmask;
-	if (dmask != (mode_t)-1)
+	if (dmask != (umode_t)-1)
 		vol->dmask = dmask;
 	if (show_sys_files != -1) {
 		if (show_sys_files)
diff --git a/fs/ntfs/volume.h b/fs/ntfs/volume.h
index 406ab55..15e3ba8 100644
--- a/fs/ntfs/volume.h
+++ b/fs/ntfs/volume.h
@@ -48,8 +48,8 @@
 	unsigned long flags;		/* Miscellaneous flags, see below. */
 	uid_t uid;			/* uid that files will be mounted as. */
 	gid_t gid;			/* gid that files will be mounted as. */
-	mode_t fmask;			/* The mask for file permissions. */
-	mode_t dmask;			/* The mask for directory
+	umode_t fmask;			/* The mask for file permissions. */
+	umode_t dmask;			/* The mask for directory
 					   permissions. */
 	u8 mft_zone_multiplier;		/* Initial mft zone multiplier. */
 	u8 on_errors;			/* What to do on filesystem errors. */
diff --git a/fs/ocfs2/cluster/netdebug.c b/fs/ocfs2/cluster/netdebug.c
index dc45deb..73ba819 100644
--- a/fs/ocfs2/cluster/netdebug.c
+++ b/fs/ocfs2/cluster/netdebug.c
@@ -553,7 +553,7 @@
 
 int o2net_debugfs_init(void)
 {
-	mode_t mode = S_IFREG|S_IRUSR;
+	umode_t mode = S_IFREG|S_IRUSR;
 
 	o2net_dentry = debugfs_create_dir(O2NET_DEBUG_DIR, NULL);
 	if (o2net_dentry)
diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c
index b420767..abfac0d 100644
--- a/fs/ocfs2/dlmfs/dlmfs.c
+++ b/fs/ocfs2/dlmfs/dlmfs.c
@@ -354,7 +354,6 @@
 static void dlmfs_i_callback(struct rcu_head *head)
 {
 	struct inode *inode = container_of(head, struct inode, i_rcu);
-	INIT_LIST_HEAD(&inode->i_dentry);
 	kmem_cache_free(dlmfs_inode_cache, DLMFS_I(inode));
 }
 
@@ -401,16 +400,14 @@
 static struct inode *dlmfs_get_root_inode(struct super_block *sb)
 {
 	struct inode *inode = new_inode(sb);
-	int mode = S_IFDIR | 0755;
+	umode_t mode = S_IFDIR | 0755;
 	struct dlmfs_inode_private *ip;
 
 	if (inode) {
 		ip = DLMFS_I(inode);
 
 		inode->i_ino = get_next_ino();
-		inode->i_mode = mode;
-		inode->i_uid = current_fsuid();
-		inode->i_gid = current_fsgid();
+		inode_init_owner(inode, NULL, mode);
 		inode->i_mapping->backing_dev_info = &dlmfs_backing_dev_info;
 		inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
 		inc_nlink(inode);
@@ -424,7 +421,7 @@
 
 static struct inode *dlmfs_get_inode(struct inode *parent,
 				     struct dentry *dentry,
-				     int mode)
+				     umode_t mode)
 {
 	struct super_block *sb = parent->i_sb;
 	struct inode * inode = new_inode(sb);
@@ -434,9 +431,7 @@
 		return NULL;
 
 	inode->i_ino = get_next_ino();
-	inode->i_mode = mode;
-	inode->i_uid = current_fsuid();
-	inode->i_gid = current_fsgid();
+	inode_init_owner(inode, parent, mode);
 	inode->i_mapping->backing_dev_info = &dlmfs_backing_dev_info;
 	inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
 
@@ -473,13 +468,6 @@
 		inc_nlink(inode);
 		break;
 	}
-
-	if (parent->i_mode & S_ISGID) {
-		inode->i_gid = parent->i_gid;
-		if (S_ISDIR(mode))
-			inode->i_mode |= S_ISGID;
-	}
-
 	return inode;
 }
 
@@ -489,7 +477,7 @@
 /* SMP-safe */
 static int dlmfs_mkdir(struct inode * dir,
 		       struct dentry * dentry,
-		       int mode)
+		       umode_t mode)
 {
 	int status;
 	struct inode *inode = NULL;
@@ -537,7 +525,7 @@
 
 static int dlmfs_create(struct inode *dir,
 			struct dentry *dentry,
-			int mode,
+			umode_t mode,
 			struct nameidata *nd)
 {
 	int status = 0;
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 6e39668..061591a 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -2128,7 +2128,7 @@
 		 * remove_suid() calls ->setattr without any hint that
 		 * we may have already done our cluster locking. Since
 		 * ocfs2_setattr() *must* take cluster locks to
-		 * proceeed, this will lead us to recursively lock the
+		 * proceed, this will lead us to recursively lock the
 		 * inode. There's also the dinode i_size state which
 		 * can be lost via setattr during extending writes (we
 		 * set inode->i_size at the end of a write. */
diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c
index 726ff26..a6fda3c 100644
--- a/fs/ocfs2/ioctl.c
+++ b/fs/ocfs2/ioctl.c
@@ -906,12 +906,12 @@
 		if (get_user(flags, (int __user *) arg))
 			return -EFAULT;
 
-		status = mnt_want_write(filp->f_path.mnt);
+		status = mnt_want_write_file(filp);
 		if (status)
 			return status;
 		status = ocfs2_set_inode_attr(inode, flags,
 			OCFS2_FL_MODIFIABLE);
-		mnt_drop_write(filp->f_path.mnt);
+		mnt_drop_write_file(filp);
 		return status;
 	case OCFS2_IOC_RESVSP:
 	case OCFS2_IOC_RESVSP64:
diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c
index 184c76b..b1e3fce 100644
--- a/fs/ocfs2/move_extents.c
+++ b/fs/ocfs2/move_extents.c
@@ -1059,7 +1059,7 @@
 	struct ocfs2_move_extents range;
 	struct ocfs2_move_extents_context *context = NULL;
 
-	status = mnt_want_write(filp->f_path.mnt);
+	status = mnt_want_write_file(filp);
 	if (status)
 		return status;
 
@@ -1145,7 +1145,7 @@
 
 	kfree(context);
 
-	mnt_drop_write(filp->f_path.mnt);
+	mnt_drop_write_file(filp);
 
 	return status;
 }
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index a8b2bfe..be24469 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -185,7 +185,7 @@
 	return ret;
 }
 
-static struct inode *ocfs2_get_init_inode(struct inode *dir, int mode)
+static struct inode *ocfs2_get_init_inode(struct inode *dir, umode_t mode)
 {
 	struct inode *inode;
 
@@ -207,7 +207,7 @@
 
 static int ocfs2_mknod(struct inode *dir,
 		       struct dentry *dentry,
-		       int mode,
+		       umode_t mode,
 		       dev_t dev)
 {
 	int status = 0;
@@ -602,7 +602,7 @@
 
 static int ocfs2_mkdir(struct inode *dir,
 		       struct dentry *dentry,
-		       int mode)
+		       umode_t mode)
 {
 	int ret;
 
@@ -617,7 +617,7 @@
 
 static int ocfs2_create(struct inode *dir,
 			struct dentry *dentry,
-			int mode,
+			umode_t mode,
 			struct nameidata *nd)
 {
 	int ret;
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 4994f8b..604e12c 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -108,7 +108,7 @@
 			       int is_remount);
 static int ocfs2_check_set_options(struct super_block *sb,
 				   struct mount_options *options);
-static int ocfs2_show_options(struct seq_file *s, struct vfsmount *mnt);
+static int ocfs2_show_options(struct seq_file *s, struct dentry *root);
 static void ocfs2_put_super(struct super_block *sb);
 static int ocfs2_mount_volume(struct super_block *sb);
 static int ocfs2_remount(struct super_block *sb, int *flags, char *data);
@@ -569,7 +569,6 @@
 static void ocfs2_i_callback(struct rcu_head *head)
 {
 	struct inode *inode = container_of(head, struct inode, i_rcu);
-	INIT_LIST_HEAD(&inode->i_dentry);
 	kmem_cache_free(ocfs2_inode_cachep, OCFS2_I(inode));
 }
 
@@ -1534,9 +1533,9 @@
 	return status;
 }
 
-static int ocfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
+static int ocfs2_show_options(struct seq_file *s, struct dentry *root)
 {
-	struct ocfs2_super *osb = OCFS2_SB(mnt->mnt_sb);
+	struct ocfs2_super *osb = OCFS2_SB(root->d_sb);
 	unsigned long opts = osb->s_mount_opt;
 	unsigned int local_alloc_megs;
 
@@ -1568,8 +1567,7 @@
 	if (osb->preferred_slot != OCFS2_INVALID_SLOT)
 		seq_printf(s, ",preferred_slot=%d", osb->preferred_slot);
 
-	if (!(mnt->mnt_flags & MNT_NOATIME) && !(mnt->mnt_flags & MNT_RELATIME))
-		seq_printf(s, ",atime_quantum=%u", osb->s_atime_quantum);
+	seq_printf(s, ",atime_quantum=%u", osb->s_atime_quantum);
 
 	if (osb->osb_commit_interval)
 		seq_printf(s, ",commit=%u",
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index aa9e877..0ba9ea1 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -623,7 +623,7 @@
 
 int ocfs2_calc_xattr_init(struct inode *dir,
 			  struct buffer_head *dir_bh,
-			  int mode,
+			  umode_t mode,
 			  struct ocfs2_security_xattr_info *si,
 			  int *want_clusters,
 			  int *xattr_credits,
diff --git a/fs/ocfs2/xattr.h b/fs/ocfs2/xattr.h
index d63cfb7..e5c7f15 100644
--- a/fs/ocfs2/xattr.h
+++ b/fs/ocfs2/xattr.h
@@ -68,7 +68,7 @@
 			     struct ocfs2_security_xattr_info *,
 			     int *, int *, struct ocfs2_alloc_context **);
 int ocfs2_calc_xattr_init(struct inode *, struct buffer_head *,
-			  int, struct ocfs2_security_xattr_info *,
+			  umode_t, struct ocfs2_security_xattr_info *,
 			  int *, int *, int *);
 
 /*
diff --git a/fs/omfs/dir.c b/fs/omfs/dir.c
index 98e5442..f00576e 100644
--- a/fs/omfs/dir.c
+++ b/fs/omfs/dir.c
@@ -255,7 +255,7 @@
 	return 0;
 }
 
-static int omfs_add_node(struct inode *dir, struct dentry *dentry, int mode)
+static int omfs_add_node(struct inode *dir, struct dentry *dentry, umode_t mode)
 {
 	int err;
 	struct inode *inode = omfs_new_inode(dir, mode);
@@ -279,12 +279,12 @@
 	return err;
 }
 
-static int omfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+static int omfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 {
 	return omfs_add_node(dir, dentry, mode | S_IFDIR);
 }
 
-static int omfs_create(struct inode *dir, struct dentry *dentry, int mode,
+static int omfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
 		struct nameidata *nd)
 {
 	return omfs_add_node(dir, dentry, mode | S_IFREG);
diff --git a/fs/omfs/inode.c b/fs/omfs/inode.c
index e043c4c..6065bb0 100644
--- a/fs/omfs/inode.c
+++ b/fs/omfs/inode.c
@@ -28,7 +28,7 @@
 	return sb_bread(sb, clus_to_blk(sbi, block));
 }
 
-struct inode *omfs_new_inode(struct inode *dir, int mode)
+struct inode *omfs_new_inode(struct inode *dir, umode_t mode)
 {
 	struct inode *inode;
 	u64 new_block;
diff --git a/fs/omfs/omfs.h b/fs/omfs/omfs.h
index 7d414fe..8941f12 100644
--- a/fs/omfs/omfs.h
+++ b/fs/omfs/omfs.h
@@ -60,7 +60,7 @@
 /* inode.c */
 extern struct buffer_head *omfs_bread(struct super_block *sb, sector_t block);
 extern struct inode *omfs_iget(struct super_block *sb, ino_t inode);
-extern struct inode *omfs_new_inode(struct inode *dir, int mode);
+extern struct inode *omfs_new_inode(struct inode *dir, umode_t mode);
 extern int omfs_reserve_block(struct super_block *sb, sector_t block);
 extern int omfs_find_empty_block(struct super_block *sb, int mode, ino_t *ino);
 extern int omfs_sync_inode(struct inode *inode);
diff --git a/fs/open.c b/fs/open.c
index 22c41b5..77becc0 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -456,7 +456,7 @@
 	if (error)
 		return error;
 	mutex_lock(&inode->i_mutex);
-	error = security_path_chmod(path->dentry, path->mnt, mode);
+	error = security_path_chmod(path, mode);
 	if (error)
 		goto out_unlock;
 	newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
@@ -468,7 +468,7 @@
 	return error;
 }
 
-SYSCALL_DEFINE2(fchmod, unsigned int, fd, mode_t, mode)
+SYSCALL_DEFINE2(fchmod, unsigned int, fd, umode_t, mode)
 {
 	struct file * file;
 	int err = -EBADF;
@@ -482,7 +482,7 @@
 	return err;
 }
 
-SYSCALL_DEFINE3(fchmodat, int, dfd, const char __user *, filename, mode_t, mode)
+SYSCALL_DEFINE3(fchmodat, int, dfd, const char __user *, filename, umode_t, mode)
 {
 	struct path path;
 	int error;
@@ -495,7 +495,7 @@
 	return error;
 }
 
-SYSCALL_DEFINE2(chmod, const char __user *, filename, mode_t, mode)
+SYSCALL_DEFINE2(chmod, const char __user *, filename, umode_t, mode)
 {
 	return sys_fchmodat(AT_FDCWD, filename, mode);
 }
@@ -608,7 +608,7 @@
 	dentry = file->f_path.dentry;
 	audit_inode(NULL, dentry);
 	error = chown_common(&file->f_path, user, group);
-	mnt_drop_write(file->f_path.mnt);
+	mnt_drop_write_file(file);
 out_fput:
 	fput(file);
 out:
@@ -877,7 +877,7 @@
 
 EXPORT_SYMBOL(fd_install);
 
-static inline int build_open_flags(int flags, int mode, struct open_flags *op)
+static inline int build_open_flags(int flags, umode_t mode, struct open_flags *op)
 {
 	int lookup_flags = 0;
 	int acc_mode;
@@ -948,7 +948,7 @@
  * have to.  But in generally you should not do this, so please move
  * along, nothing to see here..
  */
-struct file *filp_open(const char *filename, int flags, int mode)
+struct file *filp_open(const char *filename, int flags, umode_t mode)
 {
 	struct open_flags op;
 	int lookup = build_open_flags(flags, mode, &op);
@@ -970,7 +970,7 @@
 }
 EXPORT_SYMBOL(file_open_root);
 
-long do_sys_open(int dfd, const char __user *filename, int flags, int mode)
+long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
 {
 	struct open_flags op;
 	int lookup = build_open_flags(flags, mode, &op);
@@ -994,7 +994,7 @@
 	return fd;
 }
 
-SYSCALL_DEFINE3(open, const char __user *, filename, int, flags, int, mode)
+SYSCALL_DEFINE3(open, const char __user *, filename, int, flags, umode_t, mode)
 {
 	long ret;
 
@@ -1008,7 +1008,7 @@
 }
 
 SYSCALL_DEFINE4(openat, int, dfd, const char __user *, filename, int, flags,
-		int, mode)
+		umode_t, mode)
 {
 	long ret;
 
@@ -1027,7 +1027,7 @@
  * For backward compatibility?  Maybe this should be moved
  * into arch/i386 instead?
  */
-SYSCALL_DEFINE2(creat, const char __user *, pathname, int, mode)
+SYSCALL_DEFINE2(creat, const char __user *, pathname, umode_t, mode)
 {
 	return sys_open(pathname, O_CREAT | O_WRONLY | O_TRUNC, mode);
 }
diff --git a/fs/openpromfs/inode.c b/fs/openpromfs/inode.c
index e4e0ff7..a88c03b 100644
--- a/fs/openpromfs/inode.c
+++ b/fs/openpromfs/inode.c
@@ -346,7 +346,6 @@
 static void openprom_i_callback(struct rcu_head *head)
 {
 	struct inode *inode = container_of(head, struct inode, i_rcu);
-	INIT_LIST_HEAD(&inode->i_dentry);
 	kmem_cache_free(op_inode_cachep, OP_I(inode));
 }
 
diff --git a/fs/pipe.c b/fs/pipe.c
index 4065f07..f0e485d 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -1290,11 +1290,4 @@
 	return err;
 }
 
-static void __exit exit_pipe_fs(void)
-{
-	kern_unmount(pipe_mnt);
-	unregister_filesystem(&pipe_fs_type);
-}
-
 fs_initcall(init_pipe_fs);
-module_exit(exit_pipe_fs);
diff --git a/fs/pnode.c b/fs/pnode.c
index d42514e..ab5fa9e 100644
--- a/fs/pnode.c
+++ b/fs/pnode.c
@@ -13,45 +13,30 @@
 #include "pnode.h"
 
 /* return the next shared peer mount of @p */
-static inline struct vfsmount *next_peer(struct vfsmount *p)
+static inline struct mount *next_peer(struct mount *p)
 {
-	return list_entry(p->mnt_share.next, struct vfsmount, mnt_share);
+	return list_entry(p->mnt_share.next, struct mount, mnt_share);
 }
 
-static inline struct vfsmount *first_slave(struct vfsmount *p)
+static inline struct mount *first_slave(struct mount *p)
 {
-	return list_entry(p->mnt_slave_list.next, struct vfsmount, mnt_slave);
+	return list_entry(p->mnt_slave_list.next, struct mount, mnt_slave);
 }
 
-static inline struct vfsmount *next_slave(struct vfsmount *p)
+static inline struct mount *next_slave(struct mount *p)
 {
-	return list_entry(p->mnt_slave.next, struct vfsmount, mnt_slave);
+	return list_entry(p->mnt_slave.next, struct mount, mnt_slave);
 }
 
-/*
- * Return true if path is reachable from root
- *
- * namespace_sem is held, and mnt is attached
- */
-static bool is_path_reachable(struct vfsmount *mnt, struct dentry *dentry,
-			 const struct path *root)
+static struct mount *get_peer_under_root(struct mount *mnt,
+					 struct mnt_namespace *ns,
+					 const struct path *root)
 {
-	while (mnt != root->mnt && mnt->mnt_parent != mnt) {
-		dentry = mnt->mnt_mountpoint;
-		mnt = mnt->mnt_parent;
-	}
-	return mnt == root->mnt && is_subdir(dentry, root->dentry);
-}
-
-static struct vfsmount *get_peer_under_root(struct vfsmount *mnt,
-					    struct mnt_namespace *ns,
-					    const struct path *root)
-{
-	struct vfsmount *m = mnt;
+	struct mount *m = mnt;
 
 	do {
 		/* Check the namespace first for optimization */
-		if (m->mnt_ns == ns && is_path_reachable(m, m->mnt_root, root))
+		if (m->mnt_ns == ns && is_path_reachable(m, m->mnt.mnt_root, root))
 			return m;
 
 		m = next_peer(m);
@@ -66,12 +51,12 @@
  *
  * Caller must hold namespace_sem
  */
-int get_dominating_id(struct vfsmount *mnt, const struct path *root)
+int get_dominating_id(struct mount *mnt, const struct path *root)
 {
-	struct vfsmount *m;
+	struct mount *m;
 
 	for (m = mnt->mnt_master; m != NULL; m = m->mnt_master) {
-		struct vfsmount *d = get_peer_under_root(m, mnt->mnt_ns, root);
+		struct mount *d = get_peer_under_root(m, mnt->mnt_ns, root);
 		if (d)
 			return d->mnt_group_id;
 	}
@@ -79,10 +64,10 @@
 	return 0;
 }
 
-static int do_make_slave(struct vfsmount *mnt)
+static int do_make_slave(struct mount *mnt)
 {
-	struct vfsmount *peer_mnt = mnt, *master = mnt->mnt_master;
-	struct vfsmount *slave_mnt;
+	struct mount *peer_mnt = mnt, *master = mnt->mnt_master;
+	struct mount *slave_mnt;
 
 	/*
 	 * slave 'mnt' to a peer mount that has the
@@ -90,7 +75,7 @@
 	 * slave it to anything that is available.
 	 */
 	while ((peer_mnt = next_peer(peer_mnt)) != mnt &&
-	       peer_mnt->mnt_root != mnt->mnt_root) ;
+	       peer_mnt->mnt.mnt_root != mnt->mnt.mnt_root) ;
 
 	if (peer_mnt == mnt) {
 		peer_mnt = next_peer(mnt);
@@ -116,7 +101,7 @@
 		struct list_head *p = &mnt->mnt_slave_list;
 		while (!list_empty(p)) {
                         slave_mnt = list_first_entry(p,
-					struct vfsmount, mnt_slave);
+					struct mount, mnt_slave);
 			list_del_init(&slave_mnt->mnt_slave);
 			slave_mnt->mnt_master = NULL;
 		}
@@ -129,7 +114,7 @@
 /*
  * vfsmount lock must be held for write
  */
-void change_mnt_propagation(struct vfsmount *mnt, int type)
+void change_mnt_propagation(struct mount *mnt, int type)
 {
 	if (type == MS_SHARED) {
 		set_mnt_shared(mnt);
@@ -140,9 +125,9 @@
 		list_del_init(&mnt->mnt_slave);
 		mnt->mnt_master = NULL;
 		if (type == MS_UNBINDABLE)
-			mnt->mnt_flags |= MNT_UNBINDABLE;
+			mnt->mnt.mnt_flags |= MNT_UNBINDABLE;
 		else
-			mnt->mnt_flags &= ~MNT_UNBINDABLE;
+			mnt->mnt.mnt_flags &= ~MNT_UNBINDABLE;
 	}
 }
 
@@ -156,20 +141,19 @@
  * vfsmount found while iterating with propagation_next() is
  * a peer of one we'd found earlier.
  */
-static struct vfsmount *propagation_next(struct vfsmount *m,
-					 struct vfsmount *origin)
+static struct mount *propagation_next(struct mount *m,
+					 struct mount *origin)
 {
 	/* are there any slaves of this mount? */
 	if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list))
 		return first_slave(m);
 
 	while (1) {
-		struct vfsmount *next;
-		struct vfsmount *master = m->mnt_master;
+		struct mount *master = m->mnt_master;
 
 		if (master == origin->mnt_master) {
-			next = next_peer(m);
-			return ((next == origin) ? NULL : next);
+			struct mount *next = next_peer(m);
+			return (next == origin) ? NULL : next;
 		} else if (m->mnt_slave.next != &master->mnt_slave_list)
 			return next_slave(m);
 
@@ -187,13 +171,13 @@
  * @type	return CL_SLAVE if the new mount has to be
  * 		cloned as a slave.
  */
-static struct vfsmount *get_source(struct vfsmount *dest,
-					struct vfsmount *last_dest,
-					struct vfsmount *last_src,
-					int *type)
+static struct mount *get_source(struct mount *dest,
+				struct mount *last_dest,
+				struct mount *last_src,
+				int *type)
 {
-	struct vfsmount *p_last_src = NULL;
-	struct vfsmount *p_last_dest = NULL;
+	struct mount *p_last_src = NULL;
+	struct mount *p_last_dest = NULL;
 
 	while (last_dest != dest->mnt_master) {
 		p_last_dest = last_dest;
@@ -233,33 +217,33 @@
  * @source_mnt: source mount.
  * @tree_list : list of heads of trees to be attached.
  */
-int propagate_mnt(struct vfsmount *dest_mnt, struct dentry *dest_dentry,
-		    struct vfsmount *source_mnt, struct list_head *tree_list)
+int propagate_mnt(struct mount *dest_mnt, struct dentry *dest_dentry,
+		    struct mount *source_mnt, struct list_head *tree_list)
 {
-	struct vfsmount *m, *child;
+	struct mount *m, *child;
 	int ret = 0;
-	struct vfsmount *prev_dest_mnt = dest_mnt;
-	struct vfsmount *prev_src_mnt  = source_mnt;
+	struct mount *prev_dest_mnt = dest_mnt;
+	struct mount *prev_src_mnt  = source_mnt;
 	LIST_HEAD(tmp_list);
 	LIST_HEAD(umount_list);
 
 	for (m = propagation_next(dest_mnt, dest_mnt); m;
 			m = propagation_next(m, dest_mnt)) {
 		int type;
-		struct vfsmount *source;
+		struct mount *source;
 
 		if (IS_MNT_NEW(m))
 			continue;
 
 		source =  get_source(m, prev_dest_mnt, prev_src_mnt, &type);
 
-		if (!(child = copy_tree(source, source->mnt_root, type))) {
+		if (!(child = copy_tree(source, source->mnt.mnt_root, type))) {
 			ret = -ENOMEM;
 			list_splice(tree_list, tmp_list.prev);
 			goto out;
 		}
 
-		if (is_subdir(dest_dentry, m->mnt_root)) {
+		if (is_subdir(dest_dentry, m->mnt.mnt_root)) {
 			mnt_set_mountpoint(m, dest_dentry, child);
 			list_add_tail(&child->mnt_hash, tree_list);
 		} else {
@@ -275,7 +259,7 @@
 out:
 	br_write_lock(vfsmount_lock);
 	while (!list_empty(&tmp_list)) {
-		child = list_first_entry(&tmp_list, struct vfsmount, mnt_hash);
+		child = list_first_entry(&tmp_list, struct mount, mnt_hash);
 		umount_tree(child, 0, &umount_list);
 	}
 	br_write_unlock(vfsmount_lock);
@@ -286,7 +270,7 @@
 /*
  * return true if the refcount is greater than count
  */
-static inline int do_refcount_check(struct vfsmount *mnt, int count)
+static inline int do_refcount_check(struct mount *mnt, int count)
 {
 	int mycount = mnt_get_count(mnt) - mnt->mnt_ghosts;
 	return (mycount > count);
@@ -302,10 +286,10 @@
  *
  * vfsmount lock must be held for write
  */
-int propagate_mount_busy(struct vfsmount *mnt, int refcnt)
+int propagate_mount_busy(struct mount *mnt, int refcnt)
 {
-	struct vfsmount *m, *child;
-	struct vfsmount *parent = mnt->mnt_parent;
+	struct mount *m, *child;
+	struct mount *parent = mnt->mnt_parent;
 	int ret = 0;
 
 	if (mnt == parent)
@@ -321,7 +305,7 @@
 
 	for (m = propagation_next(parent, parent); m;
 	     		m = propagation_next(m, parent)) {
-		child = __lookup_mnt(m, mnt->mnt_mountpoint, 0);
+		child = __lookup_mnt(&m->mnt, mnt->mnt_mountpoint, 0);
 		if (child && list_empty(&child->mnt_mounts) &&
 		    (ret = do_refcount_check(child, 1)))
 			break;
@@ -333,17 +317,17 @@
  * NOTE: unmounting 'mnt' naturally propagates to all other mounts its
  * parent propagates to.
  */
-static void __propagate_umount(struct vfsmount *mnt)
+static void __propagate_umount(struct mount *mnt)
 {
-	struct vfsmount *parent = mnt->mnt_parent;
-	struct vfsmount *m;
+	struct mount *parent = mnt->mnt_parent;
+	struct mount *m;
 
 	BUG_ON(parent == mnt);
 
 	for (m = propagation_next(parent, parent); m;
 			m = propagation_next(m, parent)) {
 
-		struct vfsmount *child = __lookup_mnt(m,
+		struct mount *child = __lookup_mnt(&m->mnt,
 					mnt->mnt_mountpoint, 0);
 		/*
 		 * umount the child only if the child has no
@@ -363,7 +347,7 @@
  */
 int propagate_umount(struct list_head *list)
 {
-	struct vfsmount *mnt;
+	struct mount *mnt;
 
 	list_for_each_entry(mnt, list, mnt_hash)
 		__propagate_umount(mnt);
diff --git a/fs/pnode.h b/fs/pnode.h
index 1ea4ae1..65c6097 100644
--- a/fs/pnode.h
+++ b/fs/pnode.h
@@ -9,13 +9,13 @@
 #define _LINUX_PNODE_H
 
 #include <linux/list.h>
-#include <linux/mount.h>
+#include "mount.h"
 
-#define IS_MNT_SHARED(mnt) (mnt->mnt_flags & MNT_SHARED)
-#define IS_MNT_SLAVE(mnt) (mnt->mnt_master)
-#define IS_MNT_NEW(mnt)  (!mnt->mnt_ns)
-#define CLEAR_MNT_SHARED(mnt) (mnt->mnt_flags &= ~MNT_SHARED)
-#define IS_MNT_UNBINDABLE(mnt) (mnt->mnt_flags & MNT_UNBINDABLE)
+#define IS_MNT_SHARED(m) ((m)->mnt.mnt_flags & MNT_SHARED)
+#define IS_MNT_SLAVE(m) ((m)->mnt_master)
+#define IS_MNT_NEW(m)  (!(m)->mnt_ns)
+#define CLEAR_MNT_SHARED(m) ((m)->mnt.mnt_flags &= ~MNT_SHARED)
+#define IS_MNT_UNBINDABLE(m) ((m)->mnt.mnt_flags & MNT_UNBINDABLE)
 
 #define CL_EXPIRE    		0x01
 #define CL_SLAVE     		0x02
@@ -23,17 +23,25 @@
 #define CL_MAKE_SHARED 		0x08
 #define CL_PRIVATE 		0x10
 
-static inline void set_mnt_shared(struct vfsmount *mnt)
+static inline void set_mnt_shared(struct mount *mnt)
 {
-	mnt->mnt_flags &= ~MNT_SHARED_MASK;
-	mnt->mnt_flags |= MNT_SHARED;
+	mnt->mnt.mnt_flags &= ~MNT_SHARED_MASK;
+	mnt->mnt.mnt_flags |= MNT_SHARED;
 }
 
-void change_mnt_propagation(struct vfsmount *, int);
-int propagate_mnt(struct vfsmount *, struct dentry *, struct vfsmount *,
+void change_mnt_propagation(struct mount *, int);
+int propagate_mnt(struct mount *, struct dentry *, struct mount *,
 		struct list_head *);
 int propagate_umount(struct list_head *);
-int propagate_mount_busy(struct vfsmount *, int);
-void mnt_release_group_id(struct vfsmount *);
-int get_dominating_id(struct vfsmount *mnt, const struct path *root);
+int propagate_mount_busy(struct mount *, int);
+void mnt_release_group_id(struct mount *);
+int get_dominating_id(struct mount *mnt, const struct path *root);
+unsigned int mnt_get_count(struct mount *mnt);
+void mnt_set_mountpoint(struct mount *, struct dentry *,
+			struct mount *);
+void release_mounts(struct list_head *);
+void umount_tree(struct mount *, int, struct list_head *);
+struct mount *copy_tree(struct mount *, struct dentry *, int);
+bool is_path_reachable(struct mount *, struct dentry *,
+			 const struct path *root);
 #endif /* _LINUX_PNODE_H */
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 3a1dafd..8c344f0 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -394,8 +394,8 @@
 
 	sigemptyset(&sigign);
 	sigemptyset(&sigcatch);
-	cutime = cstime = utime = stime = cputime_zero;
-	cgtime = gtime = cputime_zero;
+	cutime = cstime = utime = stime = 0;
+	cgtime = gtime = 0;
 
 	if (lock_task_sighand(task, &flags)) {
 		struct signal_struct *sig = task->signal;
@@ -423,14 +423,14 @@
 			do {
 				min_flt += t->min_flt;
 				maj_flt += t->maj_flt;
-				gtime = cputime_add(gtime, t->gtime);
+				gtime += t->gtime;
 				t = next_thread(t);
 			} while (t != task);
 
 			min_flt += sig->min_flt;
 			maj_flt += sig->maj_flt;
 			thread_group_times(task, &utime, &stime);
-			gtime = cputime_add(gtime, sig->gtime);
+			gtime += sig->gtime;
 		}
 
 		sid = task_session_nr_ns(task, ns);
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 851ba3d..a1dddda 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -101,7 +101,7 @@
 struct pid_entry {
 	char *name;
 	int len;
-	mode_t mode;
+	umode_t mode;
 	const struct inode_operations *iop;
 	const struct file_operations *fop;
 	union proc_op op;
@@ -631,120 +631,6 @@
 	.setattr	= proc_setattr,
 };
 
-static int mounts_open_common(struct inode *inode, struct file *file,
-			      const struct seq_operations *op)
-{
-	struct task_struct *task = get_proc_task(inode);
-	struct nsproxy *nsp;
-	struct mnt_namespace *ns = NULL;
-	struct path root;
-	struct proc_mounts *p;
-	int ret = -EINVAL;
-
-	if (task) {
-		rcu_read_lock();
-		nsp = task_nsproxy(task);
-		if (nsp) {
-			ns = nsp->mnt_ns;
-			if (ns)
-				get_mnt_ns(ns);
-		}
-		rcu_read_unlock();
-		if (ns && get_task_root(task, &root) == 0)
-			ret = 0;
-		put_task_struct(task);
-	}
-
-	if (!ns)
-		goto err;
-	if (ret)
-		goto err_put_ns;
-
-	ret = -ENOMEM;
-	p = kmalloc(sizeof(struct proc_mounts), GFP_KERNEL);
-	if (!p)
-		goto err_put_path;
-
-	file->private_data = &p->m;
-	ret = seq_open(file, op);
-	if (ret)
-		goto err_free;
-
-	p->m.private = p;
-	p->ns = ns;
-	p->root = root;
-	p->m.poll_event = ns->event;
-
-	return 0;
-
- err_free:
-	kfree(p);
- err_put_path:
-	path_put(&root);
- err_put_ns:
-	put_mnt_ns(ns);
- err:
-	return ret;
-}
-
-static int mounts_release(struct inode *inode, struct file *file)
-{
-	struct proc_mounts *p = file->private_data;
-	path_put(&p->root);
-	put_mnt_ns(p->ns);
-	return seq_release(inode, file);
-}
-
-static unsigned mounts_poll(struct file *file, poll_table *wait)
-{
-	struct proc_mounts *p = file->private_data;
-	unsigned res = POLLIN | POLLRDNORM;
-
-	poll_wait(file, &p->ns->poll, wait);
-	if (mnt_had_events(p))
-		res |= POLLERR | POLLPRI;
-
-	return res;
-}
-
-static int mounts_open(struct inode *inode, struct file *file)
-{
-	return mounts_open_common(inode, file, &mounts_op);
-}
-
-static const struct file_operations proc_mounts_operations = {
-	.open		= mounts_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= mounts_release,
-	.poll		= mounts_poll,
-};
-
-static int mountinfo_open(struct inode *inode, struct file *file)
-{
-	return mounts_open_common(inode, file, &mountinfo_op);
-}
-
-static const struct file_operations proc_mountinfo_operations = {
-	.open		= mountinfo_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= mounts_release,
-	.poll		= mounts_poll,
-};
-
-static int mountstats_open(struct inode *inode, struct file *file)
-{
-	return mounts_open_common(inode, file, &mountstats_op);
-}
-
-static const struct file_operations proc_mountstats_operations = {
-	.open		= mountstats_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= mounts_release,
-};
-
 #define PROC_BLOCK_SIZE	(3*1024)		/* 4K page size but our output routines use some slack for overruns */
 
 static ssize_t proc_info_read(struct file * file, char __user * buf,
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 10090d9..2edf34f 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -597,7 +597,7 @@
 
 static struct proc_dir_entry *__proc_create(struct proc_dir_entry **parent,
 					  const char *name,
-					  mode_t mode,
+					  umode_t mode,
 					  nlink_t nlink)
 {
 	struct proc_dir_entry *ent = NULL;
@@ -659,7 +659,7 @@
 }
 EXPORT_SYMBOL(proc_symlink);
 
-struct proc_dir_entry *proc_mkdir_mode(const char *name, mode_t mode,
+struct proc_dir_entry *proc_mkdir_mode(const char *name, umode_t mode,
 		struct proc_dir_entry *parent)
 {
 	struct proc_dir_entry *ent;
@@ -699,7 +699,7 @@
 }
 EXPORT_SYMBOL(proc_mkdir);
 
-struct proc_dir_entry *create_proc_entry(const char *name, mode_t mode,
+struct proc_dir_entry *create_proc_entry(const char *name, umode_t mode,
 					 struct proc_dir_entry *parent)
 {
 	struct proc_dir_entry *ent;
@@ -728,7 +728,7 @@
 }
 EXPORT_SYMBOL(create_proc_entry);
 
-struct proc_dir_entry *proc_create_data(const char *name, mode_t mode,
+struct proc_dir_entry *proc_create_data(const char *name, umode_t mode,
 					struct proc_dir_entry *parent,
 					const struct file_operations *proc_fops,
 					void *data)
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index 7737c54..51a1766 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -77,7 +77,6 @@
 static void proc_i_callback(struct rcu_head *head)
 {
 	struct inode *inode = container_of(head, struct inode, i_rcu);
-	INIT_LIST_HEAD(&inode->i_dentry);
 	kmem_cache_free(proc_inode_cachep, PROC_I(inode));
 }
 
diff --git a/fs/proc/namespaces.c b/fs/proc/namespaces.c
index be177f7..27da860 100644
--- a/fs/proc/namespaces.c
+++ b/fs/proc/namespaces.c
@@ -9,7 +9,6 @@
 #include <linux/file.h>
 #include <linux/utsname.h>
 #include <net/net_namespace.h>
-#include <linux/mnt_namespace.h>
 #include <linux/ipc_namespace.h>
 #include <linux/pid_namespace.h>
 #include "internal.h"
diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
index f738024..06e1cc1 100644
--- a/fs/proc/proc_net.c
+++ b/fs/proc/proc_net.c
@@ -179,7 +179,7 @@
 
 
 struct proc_dir_entry *proc_net_fops_create(struct net *net,
-	const char *name, mode_t mode, const struct file_operations *fops)
+	const char *name, umode_t mode, const struct file_operations *fops)
 {
 	return proc_create(name, mode, net->proc_net, fops);
 }
diff --git a/fs/proc/root.c b/fs/proc/root.c
index 9a8a2b7..03102d9 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -91,20 +91,18 @@
 
 void __init proc_root_init(void)
 {
-	struct vfsmount *mnt;
 	int err;
 
 	proc_init_inodecache();
 	err = register_filesystem(&proc_fs_type);
 	if (err)
 		return;
-	mnt = kern_mount_data(&proc_fs_type, &init_pid_ns);
-	if (IS_ERR(mnt)) {
+	err = pid_ns_prepare_proc(&init_pid_ns);
+	if (err) {
 		unregister_filesystem(&proc_fs_type);
 		return;
 	}
 
-	init_pid_ns.proc_mnt = mnt;
 	proc_symlink("mounts", NULL, "self/mounts");
 
 	proc_net_init();
@@ -209,5 +207,5 @@
 
 void pid_ns_release_proc(struct pid_namespace *ns)
 {
-	mntput(ns->proc_mnt);
+	kern_unmount(ns->proc_mnt);
 }
diff --git a/fs/proc/stat.c b/fs/proc/stat.c
index 2a30d67..d76ca6a 100644
--- a/fs/proc/stat.c
+++ b/fs/proc/stat.c
@@ -22,31 +22,29 @@
 #define arch_idle_time(cpu) 0
 #endif
 
-static cputime64_t get_idle_time(int cpu)
+static u64 get_idle_time(int cpu)
 {
-	u64 idle_time = get_cpu_idle_time_us(cpu, NULL);
-	cputime64_t idle;
+	u64 idle, idle_time = get_cpu_idle_time_us(cpu, NULL);
 
 	if (idle_time == -1ULL) {
 		/* !NO_HZ so we can rely on cpustat.idle */
-		idle = kstat_cpu(cpu).cpustat.idle;
-		idle = cputime64_add(idle, arch_idle_time(cpu));
+		idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE];
+		idle += arch_idle_time(cpu);
 	} else
-		idle = nsecs_to_jiffies64(1000 * idle_time);
+		idle = usecs_to_cputime64(idle_time);
 
 	return idle;
 }
 
-static cputime64_t get_iowait_time(int cpu)
+static u64 get_iowait_time(int cpu)
 {
-	u64 iowait_time = get_cpu_iowait_time_us(cpu, NULL);
-	cputime64_t iowait;
+	u64 iowait, iowait_time = get_cpu_iowait_time_us(cpu, NULL);
 
 	if (iowait_time == -1ULL)
 		/* !NO_HZ so we can rely on cpustat.iowait */
-		iowait = kstat_cpu(cpu).cpustat.iowait;
+		iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT];
 	else
-		iowait = nsecs_to_jiffies64(1000 * iowait_time);
+		iowait = usecs_to_cputime64(iowait_time);
 
 	return iowait;
 }
@@ -55,33 +53,30 @@
 {
 	int i, j;
 	unsigned long jif;
-	cputime64_t user, nice, system, idle, iowait, irq, softirq, steal;
-	cputime64_t guest, guest_nice;
+	u64 user, nice, system, idle, iowait, irq, softirq, steal;
+	u64 guest, guest_nice;
 	u64 sum = 0;
 	u64 sum_softirq = 0;
 	unsigned int per_softirq_sums[NR_SOFTIRQS] = {0};
 	struct timespec boottime;
 
 	user = nice = system = idle = iowait =
-		irq = softirq = steal = cputime64_zero;
-	guest = guest_nice = cputime64_zero;
+		irq = softirq = steal = 0;
+	guest = guest_nice = 0;
 	getboottime(&boottime);
 	jif = boottime.tv_sec;
 
 	for_each_possible_cpu(i) {
-		user = cputime64_add(user, kstat_cpu(i).cpustat.user);
-		nice = cputime64_add(nice, kstat_cpu(i).cpustat.nice);
-		system = cputime64_add(system, kstat_cpu(i).cpustat.system);
-		idle = cputime64_add(idle, get_idle_time(i));
-		iowait = cputime64_add(iowait, get_iowait_time(i));
-		irq = cputime64_add(irq, kstat_cpu(i).cpustat.irq);
-		softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq);
-		steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal);
-		guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest);
-		guest_nice = cputime64_add(guest_nice,
-			kstat_cpu(i).cpustat.guest_nice);
-		sum += kstat_cpu_irqs_sum(i);
-		sum += arch_irq_stat_cpu(i);
+		user += kcpustat_cpu(i).cpustat[CPUTIME_USER];
+		nice += kcpustat_cpu(i).cpustat[CPUTIME_NICE];
+		system += kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
+		idle += get_idle_time(i);
+		iowait += get_iowait_time(i);
+		irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
+		softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
+		steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
+		guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
+		guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
 
 		for (j = 0; j < NR_SOFTIRQS; j++) {
 			unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
@@ -106,16 +101,16 @@
 		(unsigned long long)cputime64_to_clock_t(guest_nice));
 	for_each_online_cpu(i) {
 		/* Copy values here to work around gcc-2.95.3, gcc-2.96 */
-		user = kstat_cpu(i).cpustat.user;
-		nice = kstat_cpu(i).cpustat.nice;
-		system = kstat_cpu(i).cpustat.system;
+		user = kcpustat_cpu(i).cpustat[CPUTIME_USER];
+		nice = kcpustat_cpu(i).cpustat[CPUTIME_NICE];
+		system = kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
 		idle = get_idle_time(i);
 		iowait = get_iowait_time(i);
-		irq = kstat_cpu(i).cpustat.irq;
-		softirq = kstat_cpu(i).cpustat.softirq;
-		steal = kstat_cpu(i).cpustat.steal;
-		guest = kstat_cpu(i).cpustat.guest;
-		guest_nice = kstat_cpu(i).cpustat.guest_nice;
+		irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
+		softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
+		steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
+		guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
+		guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
 		seq_printf(p,
 			"cpu%d %llu %llu %llu %llu %llu %llu %llu %llu %llu "
 			"%llu\n",
diff --git a/fs/proc/uptime.c b/fs/proc/uptime.c
index 766b1d4..9610ac7 100644
--- a/fs/proc/uptime.c
+++ b/fs/proc/uptime.c
@@ -11,15 +11,20 @@
 {
 	struct timespec uptime;
 	struct timespec idle;
+	u64 idletime;
+	u64 nsec;
+	u32 rem;
 	int i;
-	cputime_t idletime = cputime_zero;
 
+	idletime = 0;
 	for_each_possible_cpu(i)
-		idletime = cputime64_add(idletime, kstat_cpu(i).cpustat.idle);
+		idletime += (__force u64) kcpustat_cpu(i).cpustat[CPUTIME_IDLE];
 
 	do_posix_clock_monotonic_gettime(&uptime);
 	monotonic_to_bootbased(&uptime);
-	cputime_to_timespec(idletime, &idle);
+	nsec = cputime64_to_jiffies64(idletime) * TICK_NSEC;
+	idle.tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem);
+	idle.tv_nsec = rem;
 	seq_printf(m, "%lu.%02lu %lu.%02lu\n",
 			(unsigned long) uptime.tv_sec,
 			(uptime.tv_nsec / (NSEC_PER_SEC / 100)),
diff --git a/fs/proc_namespace.c b/fs/proc_namespace.c
new file mode 100644
index 0000000..1241285
--- /dev/null
+++ b/fs/proc_namespace.c
@@ -0,0 +1,333 @@
+/*
+ * fs/proc_namespace.c - handling of /proc/<pid>/{mounts,mountinfo,mountstats}
+ *
+ * In fact, that's a piece of procfs; it's *almost* isolated from
+ * the rest of fs/proc, but has rather close relationships with
+ * fs/namespace.c, thus here instead of fs/proc
+ *
+ */
+#include <linux/mnt_namespace.h>
+#include <linux/nsproxy.h>
+#include <linux/security.h>
+#include <linux/fs_struct.h>
+#include "proc/internal.h" /* only for get_proc_task() in ->open() */
+
+#include "pnode.h"
+#include "internal.h"
+
+static unsigned mounts_poll(struct file *file, poll_table *wait)
+{
+	struct proc_mounts *p = file->private_data;
+	struct mnt_namespace *ns = p->ns;
+	unsigned res = POLLIN | POLLRDNORM;
+
+	poll_wait(file, &p->ns->poll, wait);
+
+	br_read_lock(vfsmount_lock);
+	if (p->m.poll_event != ns->event) {
+		p->m.poll_event = ns->event;
+		res |= POLLERR | POLLPRI;
+	}
+	br_read_unlock(vfsmount_lock);
+
+	return res;
+}
+
+struct proc_fs_info {
+	int flag;
+	const char *str;
+};
+
+static int show_sb_opts(struct seq_file *m, struct super_block *sb)
+{
+	static const struct proc_fs_info fs_info[] = {
+		{ MS_SYNCHRONOUS, ",sync" },
+		{ MS_DIRSYNC, ",dirsync" },
+		{ MS_MANDLOCK, ",mand" },
+		{ 0, NULL }
+	};
+	const struct proc_fs_info *fs_infop;
+
+	for (fs_infop = fs_info; fs_infop->flag; fs_infop++) {
+		if (sb->s_flags & fs_infop->flag)
+			seq_puts(m, fs_infop->str);
+	}
+
+	return security_sb_show_options(m, sb);
+}
+
+static void show_mnt_opts(struct seq_file *m, struct vfsmount *mnt)
+{
+	static const struct proc_fs_info mnt_info[] = {
+		{ MNT_NOSUID, ",nosuid" },
+		{ MNT_NODEV, ",nodev" },
+		{ MNT_NOEXEC, ",noexec" },
+		{ MNT_NOATIME, ",noatime" },
+		{ MNT_NODIRATIME, ",nodiratime" },
+		{ MNT_RELATIME, ",relatime" },
+		{ 0, NULL }
+	};
+	const struct proc_fs_info *fs_infop;
+
+	for (fs_infop = mnt_info; fs_infop->flag; fs_infop++) {
+		if (mnt->mnt_flags & fs_infop->flag)
+			seq_puts(m, fs_infop->str);
+	}
+}
+
+static inline void mangle(struct seq_file *m, const char *s)
+{
+	seq_escape(m, s, " \t\n\\");
+}
+
+static void show_type(struct seq_file *m, struct super_block *sb)
+{
+	mangle(m, sb->s_type->name);
+	if (sb->s_subtype && sb->s_subtype[0]) {
+		seq_putc(m, '.');
+		mangle(m, sb->s_subtype);
+	}
+}
+
+static int show_vfsmnt(struct seq_file *m, struct vfsmount *mnt)
+{
+	struct mount *r = real_mount(mnt);
+	int err = 0;
+	struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
+	struct super_block *sb = mnt_path.dentry->d_sb;
+
+	if (sb->s_op->show_devname) {
+		err = sb->s_op->show_devname(m, mnt_path.dentry);
+		if (err)
+			goto out;
+	} else {
+		mangle(m, r->mnt_devname ? r->mnt_devname : "none");
+	}
+	seq_putc(m, ' ');
+	seq_path(m, &mnt_path, " \t\n\\");
+	seq_putc(m, ' ');
+	show_type(m, sb);
+	seq_puts(m, __mnt_is_readonly(mnt) ? " ro" : " rw");
+	err = show_sb_opts(m, sb);
+	if (err)
+		goto out;
+	show_mnt_opts(m, mnt);
+	if (sb->s_op->show_options)
+		err = sb->s_op->show_options(m, mnt_path.dentry);
+	seq_puts(m, " 0 0\n");
+out:
+	return err;
+}
+
+static int show_mountinfo(struct seq_file *m, struct vfsmount *mnt)
+{
+	struct proc_mounts *p = m->private;
+	struct mount *r = real_mount(mnt);
+	struct super_block *sb = mnt->mnt_sb;
+	struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
+	struct path root = p->root;
+	int err = 0;
+
+	seq_printf(m, "%i %i %u:%u ", r->mnt_id, r->mnt_parent->mnt_id,
+		   MAJOR(sb->s_dev), MINOR(sb->s_dev));
+	if (sb->s_op->show_path)
+		err = sb->s_op->show_path(m, mnt->mnt_root);
+	else
+		seq_dentry(m, mnt->mnt_root, " \t\n\\");
+	if (err)
+		goto out;
+	seq_putc(m, ' ');
+
+	/* mountpoints outside of chroot jail will give SEQ_SKIP on this */
+	err = seq_path_root(m, &mnt_path, &root, " \t\n\\");
+	if (err)
+		goto out;
+
+	seq_puts(m, mnt->mnt_flags & MNT_READONLY ? " ro" : " rw");
+	show_mnt_opts(m, mnt);
+
+	/* Tagged fields ("foo:X" or "bar") */
+	if (IS_MNT_SHARED(r))
+		seq_printf(m, " shared:%i", r->mnt_group_id);
+	if (IS_MNT_SLAVE(r)) {
+		int master = r->mnt_master->mnt_group_id;
+		int dom = get_dominating_id(r, &p->root);
+		seq_printf(m, " master:%i", master);
+		if (dom && dom != master)
+			seq_printf(m, " propagate_from:%i", dom);
+	}
+	if (IS_MNT_UNBINDABLE(r))
+		seq_puts(m, " unbindable");
+
+	/* Filesystem specific data */
+	seq_puts(m, " - ");
+	show_type(m, sb);
+	seq_putc(m, ' ');
+	if (sb->s_op->show_devname)
+		err = sb->s_op->show_devname(m, mnt->mnt_root);
+	else
+		mangle(m, r->mnt_devname ? r->mnt_devname : "none");
+	if (err)
+		goto out;
+	seq_puts(m, sb->s_flags & MS_RDONLY ? " ro" : " rw");
+	err = show_sb_opts(m, sb);
+	if (err)
+		goto out;
+	if (sb->s_op->show_options)
+		err = sb->s_op->show_options(m, mnt->mnt_root);
+	seq_putc(m, '\n');
+out:
+	return err;
+}
+
+static int show_vfsstat(struct seq_file *m, struct vfsmount *mnt)
+{
+	struct mount *r = real_mount(mnt);
+	struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
+	struct super_block *sb = mnt_path.dentry->d_sb;
+	int err = 0;
+
+	/* device */
+	if (sb->s_op->show_devname) {
+		seq_puts(m, "device ");
+		err = sb->s_op->show_devname(m, mnt_path.dentry);
+	} else {
+		if (r->mnt_devname) {
+			seq_puts(m, "device ");
+			mangle(m, r->mnt_devname);
+		} else
+			seq_puts(m, "no device");
+	}
+
+	/* mount point */
+	seq_puts(m, " mounted on ");
+	seq_path(m, &mnt_path, " \t\n\\");
+	seq_putc(m, ' ');
+
+	/* file system type */
+	seq_puts(m, "with fstype ");
+	show_type(m, sb);
+
+	/* optional statistics */
+	if (sb->s_op->show_stats) {
+		seq_putc(m, ' ');
+		if (!err)
+			err = sb->s_op->show_stats(m, mnt_path.dentry);
+	}
+
+	seq_putc(m, '\n');
+	return err;
+}
+
+static int mounts_open_common(struct inode *inode, struct file *file,
+			      int (*show)(struct seq_file *, struct vfsmount *))
+{
+	struct task_struct *task = get_proc_task(inode);
+	struct nsproxy *nsp;
+	struct mnt_namespace *ns = NULL;
+	struct path root;
+	struct proc_mounts *p;
+	int ret = -EINVAL;
+
+	if (!task)
+		goto err;
+
+	rcu_read_lock();
+	nsp = task_nsproxy(task);
+	if (!nsp) {
+		rcu_read_unlock();
+		put_task_struct(task);
+		goto err;
+	}
+	ns = nsp->mnt_ns;
+	if (!ns) {
+		rcu_read_unlock();
+		put_task_struct(task);
+		goto err;
+	}
+	get_mnt_ns(ns);
+	rcu_read_unlock();
+	task_lock(task);
+	if (!task->fs) {
+		task_unlock(task);
+		put_task_struct(task);
+		ret = -ENOENT;
+		goto err_put_ns;
+	}
+	get_fs_root(task->fs, &root);
+	task_unlock(task);
+	put_task_struct(task);
+
+	ret = -ENOMEM;
+	p = kmalloc(sizeof(struct proc_mounts), GFP_KERNEL);
+	if (!p)
+		goto err_put_path;
+
+	file->private_data = &p->m;
+	ret = seq_open(file, &mounts_op);
+	if (ret)
+		goto err_free;
+
+	p->m.private = p;
+	p->ns = ns;
+	p->root = root;
+	p->m.poll_event = ns->event;
+	p->show = show;
+
+	return 0;
+
+ err_free:
+	kfree(p);
+ err_put_path:
+	path_put(&root);
+ err_put_ns:
+	put_mnt_ns(ns);
+ err:
+	return ret;
+}
+
+static int mounts_release(struct inode *inode, struct file *file)
+{
+	struct proc_mounts *p = file->private_data;
+	path_put(&p->root);
+	put_mnt_ns(p->ns);
+	return seq_release(inode, file);
+}
+
+static int mounts_open(struct inode *inode, struct file *file)
+{
+	return mounts_open_common(inode, file, show_vfsmnt);
+}
+
+static int mountinfo_open(struct inode *inode, struct file *file)
+{
+	return mounts_open_common(inode, file, show_mountinfo);
+}
+
+static int mountstats_open(struct inode *inode, struct file *file)
+{
+	return mounts_open_common(inode, file, show_vfsstat);
+}
+
+const struct file_operations proc_mounts_operations = {
+	.open		= mounts_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= mounts_release,
+	.poll		= mounts_poll,
+};
+
+const struct file_operations proc_mountinfo_operations = {
+	.open		= mountinfo_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= mounts_release,
+	.poll		= mounts_poll,
+};
+
+const struct file_operations proc_mountstats_operations = {
+	.open		= mountstats_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= mounts_release,
+};
diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c
index 379a02d..b3b426e 100644
--- a/fs/pstore/inode.c
+++ b/fs/pstore/inode.c
@@ -80,7 +80,8 @@
 {
 	struct pstore_private *p = dentry->d_inode->i_private;
 
-	p->psi->erase(p->type, p->id, p->psi);
+	if (p->psi->erase)
+		p->psi->erase(p->type, p->id, p->psi);
 
 	return simple_unlink(dir, dentry);
 }
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
index 57bbf90..9ec22d3 100644
--- a/fs/pstore/platform.c
+++ b/fs/pstore/platform.c
@@ -122,7 +122,7 @@
 		memcpy(dst, s1 + s1_start, l1_cpy);
 		memcpy(dst + l1_cpy, s2 + s2_start, l2_cpy);
 
-		ret = psinfo->write(PSTORE_TYPE_DMESG, &id, part,
+		ret = psinfo->write(PSTORE_TYPE_DMESG, reason, &id, part,
 				   hsize + l1_cpy + l2_cpy, psinfo);
 		if (ret == 0 && reason == KMSG_DUMP_OOPS && pstore_is_mounted())
 			pstore_new_entry = 1;
@@ -207,8 +207,7 @@
 		return;
 
 	mutex_lock(&psi->read_mutex);
-	rc = psi->open(psi);
-	if (rc)
+	if (psi->open && psi->open(psi))
 		goto out;
 
 	while ((size = psi->read(&id, &type, &time, &buf, psi)) > 0) {
@@ -219,7 +218,8 @@
 		if (rc && (rc != -EEXIST || !quiet))
 			failed++;
 	}
-	psi->close(psi);
+	if (psi->close)
+		psi->close(psi);
 out:
 	mutex_unlock(&psi->read_mutex);
 
@@ -243,33 +243,5 @@
 	mod_timer(&pstore_timer, jiffies + PSTORE_INTERVAL);
 }
 
-/*
- * Call platform driver to write a record to the
- * persistent store.
- */
-int pstore_write(enum pstore_type_id type, char *buf, size_t size)
-{
-	u64		id;
-	int		ret;
-	unsigned long	flags;
-
-	if (!psinfo)
-		return -ENODEV;
-
-	if (size > psinfo->bufsize)
-		return -EFBIG;
-
-	spin_lock_irqsave(&psinfo->buf_lock, flags);
-	memcpy(psinfo->buf, buf, size);
-	ret = psinfo->write(type, &id, 0, size, psinfo);
-	if (ret == 0 && pstore_is_mounted())
-		pstore_mkfile(PSTORE_TYPE_DMESG, psinfo->name, id, psinfo->buf,
-			      size, CURRENT_TIME, psinfo);
-	spin_unlock_irqrestore(&psinfo->buf_lock, flags);
-
-	return 0;
-}
-EXPORT_SYMBOL_GPL(pstore_write);
-
 module_param(backend, charp, 0444);
 MODULE_PARM_DESC(backend, "Pstore backend to use");
diff --git a/fs/qnx4/inode.c b/fs/qnx4/inode.c
index 3bdd214..2bfd987 100644
--- a/fs/qnx4/inode.c
+++ b/fs/qnx4/inode.c
@@ -199,12 +199,13 @@
 					if (!strcmp(rootdir->di_fname,
 						    QNX4_BMNAME)) {
 						found = 1;
-						qnx4_sb(sb)->BitMap = kmalloc( sizeof( struct qnx4_inode_entry ), GFP_KERNEL );
+						qnx4_sb(sb)->BitMap = kmemdup(rootdir,
+									      sizeof(struct qnx4_inode_entry),
+									      GFP_KERNEL);
 						if (!qnx4_sb(sb)->BitMap) {
 							brelse (bh);
 							return "not enough memory for bitmap inode";
-						}
-						memcpy( qnx4_sb(sb)->BitMap, rootdir, sizeof( struct qnx4_inode_entry ) );	/* keep bitmap inode known */
+						}/* keep bitmap inode known */
 						break;
 					}
 				}
@@ -427,7 +428,6 @@
 static void qnx4_i_callback(struct rcu_head *head)
 {
 	struct inode *inode = container_of(head, struct inode, i_rcu);
-	INIT_LIST_HEAD(&inode->i_dentry);
 	kmem_cache_free(qnx4_inode_cachep, qnx4_i(inode));
 }
 
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 5b572c8..5ec59b2 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -73,7 +73,6 @@
 #include <linux/security.h>
 #include <linux/kmod.h>
 #include <linux/namei.h>
-#include <linux/buffer_head.h>
 #include <linux/capability.h>
 #include <linux/quotaops.h>
 #include "../internal.h" /* ugh */
@@ -2199,7 +2198,7 @@
 	if (error)
 		return error;
 	/* Quota file not on the same filesystem? */
-	if (path->mnt->mnt_sb != sb)
+	if (path->dentry->d_sb != sb)
 		error = -EXDEV;
 	else
 		error = vfs_load_quota_inode(path->dentry->d_inode, type,
diff --git a/fs/quota/quota.c b/fs/quota/quota.c
index 35f4b0e..7898cd6 100644
--- a/fs/quota/quota.c
+++ b/fs/quota/quota.c
@@ -13,7 +13,6 @@
 #include <linux/kernel.h>
 #include <linux/security.h>
 #include <linux/syscalls.h>
-#include <linux/buffer_head.h>
 #include <linux/capability.h>
 #include <linux/quotaops.h>
 #include <linux/types.h>
diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c
index 462ceb3..aec766a 100644
--- a/fs/ramfs/inode.c
+++ b/fs/ramfs/inode.c
@@ -52,7 +52,7 @@
 };
 
 struct inode *ramfs_get_inode(struct super_block *sb,
-				const struct inode *dir, int mode, dev_t dev)
+				const struct inode *dir, umode_t mode, dev_t dev)
 {
 	struct inode * inode = new_inode(sb);
 
@@ -92,7 +92,7 @@
  */
 /* SMP-safe */
 static int
-ramfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
+ramfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
 {
 	struct inode * inode = ramfs_get_inode(dir->i_sb, dir, mode, dev);
 	int error = -ENOSPC;
@@ -106,7 +106,7 @@
 	return error;
 }
 
-static int ramfs_mkdir(struct inode * dir, struct dentry * dentry, int mode)
+static int ramfs_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode)
 {
 	int retval = ramfs_mknod(dir, dentry, mode | S_IFDIR, 0);
 	if (!retval)
@@ -114,7 +114,7 @@
 	return retval;
 }
 
-static int ramfs_create(struct inode *dir, struct dentry *dentry, int mode, struct nameidata *nd)
+static int ramfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, struct nameidata *nd)
 {
 	return ramfs_mknod(dir, dentry, mode | S_IFREG, 0);
 }
diff --git a/fs/reiserfs/bitmap.c b/fs/reiserfs/bitmap.c
index d1aca1df..a945cd2 100644
--- a/fs/reiserfs/bitmap.c
+++ b/fs/reiserfs/bitmap.c
@@ -13,6 +13,7 @@
 #include <linux/reiserfs_fs_sb.h>
 #include <linux/reiserfs_fs_i.h>
 #include <linux/quotaops.h>
+#include <linux/seq_file.h>
 
 #define PREALLOCATION_SIZE 9
 
@@ -634,6 +635,96 @@
 	return 0;
 }
 
+static void print_sep(struct seq_file *seq, int *first)
+{
+	if (!*first)
+		seq_puts(seq, ":");
+	else
+		*first = 0;
+}
+
+void show_alloc_options(struct seq_file *seq, struct super_block *s)
+{
+	int first = 1;
+
+	if (SB_ALLOC_OPTS(s) == ((1 << _ALLOC_skip_busy) |
+		(1 << _ALLOC_dirid_groups) | (1 << _ALLOC_packing_groups)))
+		return;
+
+	seq_puts(seq, ",alloc=");
+
+	if (TEST_OPTION(concentrating_formatted_nodes, s)) {
+		print_sep(seq, &first);
+		if (REISERFS_SB(s)->s_alloc_options.border != 10) {
+			seq_printf(seq, "concentrating_formatted_nodes=%d",
+				100 / REISERFS_SB(s)->s_alloc_options.border);
+		} else
+			seq_puts(seq, "concentrating_formatted_nodes");
+	}
+	if (TEST_OPTION(displacing_large_files, s)) {
+		print_sep(seq, &first);
+		if (REISERFS_SB(s)->s_alloc_options.large_file_size != 16) {
+			seq_printf(seq, "displacing_large_files=%lu",
+			    REISERFS_SB(s)->s_alloc_options.large_file_size);
+		} else
+			seq_puts(seq, "displacing_large_files");
+	}
+	if (TEST_OPTION(displacing_new_packing_localities, s)) {
+		print_sep(seq, &first);
+		seq_puts(seq, "displacing_new_packing_localities");
+	}
+	if (TEST_OPTION(old_hashed_relocation, s)) {
+		print_sep(seq, &first);
+		seq_puts(seq, "old_hashed_relocation");
+	}
+	if (TEST_OPTION(new_hashed_relocation, s)) {
+		print_sep(seq, &first);
+		seq_puts(seq, "new_hashed_relocation");
+	}
+	if (TEST_OPTION(dirid_groups, s)) {
+		print_sep(seq, &first);
+		seq_puts(seq, "dirid_groups");
+	}
+	if (TEST_OPTION(oid_groups, s)) {
+		print_sep(seq, &first);
+		seq_puts(seq, "oid_groups");
+	}
+	if (TEST_OPTION(packing_groups, s)) {
+		print_sep(seq, &first);
+		seq_puts(seq, "packing_groups");
+	}
+	if (TEST_OPTION(hashed_formatted_nodes, s)) {
+		print_sep(seq, &first);
+		seq_puts(seq, "hashed_formatted_nodes");
+	}
+	if (TEST_OPTION(skip_busy, s)) {
+		print_sep(seq, &first);
+		seq_puts(seq, "skip_busy");
+	}
+	if (TEST_OPTION(hundredth_slices, s)) {
+		print_sep(seq, &first);
+		seq_puts(seq, "hundredth_slices");
+	}
+	if (TEST_OPTION(old_way, s)) {
+		print_sep(seq, &first);
+		seq_puts(seq, "old_way");
+	}
+	if (TEST_OPTION(displace_based_on_dirid, s)) {
+		print_sep(seq, &first);
+		seq_puts(seq, "displace_based_on_dirid");
+	}
+	if (REISERFS_SB(s)->s_alloc_options.preallocmin != 0) {
+		print_sep(seq, &first);
+		seq_printf(seq, "preallocmin=%d",
+				REISERFS_SB(s)->s_alloc_options.preallocmin);
+	}
+	if (REISERFS_SB(s)->s_alloc_options.preallocsize != 17) {
+		print_sep(seq, &first);
+		seq_printf(seq, "preallocsize=%d",
+				REISERFS_SB(s)->s_alloc_options.preallocsize);
+	}
+}
+
 static inline void new_hashed_relocation(reiserfs_blocknr_hint_t * hint)
 {
 	char *hash_in;
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 950f13a..9e8cd5a 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -1766,7 +1766,7 @@
    for the fresh inode.  This can only be done outside a transaction, so
    if we return non-zero, we also end the transaction.  */
 int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
-		       struct inode *dir, int mode, const char *symname,
+		       struct inode *dir, umode_t mode, const char *symname,
 		       /* 0 for regular, EMTRY_DIR_SIZE for dirs,
 		          strlen (symname) for symlinks) */
 		       loff_t i_size, struct dentry *dentry,
diff --git a/fs/reiserfs/ioctl.c b/fs/reiserfs/ioctl.c
index 4e15305..950e3d1 100644
--- a/fs/reiserfs/ioctl.c
+++ b/fs/reiserfs/ioctl.c
@@ -55,7 +55,7 @@
 				break;
 			}
 
-			err = mnt_want_write(filp->f_path.mnt);
+			err = mnt_want_write_file(filp);
 			if (err)
 				break;
 
@@ -96,7 +96,7 @@
 			inode->i_ctime = CURRENT_TIME_SEC;
 			mark_inode_dirty(inode);
 setflags_out:
-			mnt_drop_write(filp->f_path.mnt);
+			mnt_drop_write_file(filp);
 			break;
 		}
 	case REISERFS_IOC_GETVERSION:
@@ -107,7 +107,7 @@
 			err = -EPERM;
 			break;
 		}
-		err = mnt_want_write(filp->f_path.mnt);
+		err = mnt_want_write_file(filp);
 		if (err)
 			break;
 		if (get_user(inode->i_generation, (int __user *)arg)) {
@@ -117,7 +117,7 @@
 		inode->i_ctime = CURRENT_TIME_SEC;
 		mark_inode_dirty(inode);
 setversion_out:
-		mnt_drop_write(filp->f_path.mnt);
+		mnt_drop_write_file(filp);
 		break;
 	default:
 		err = -ENOTTY;
diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
index 80058e8..1463788 100644
--- a/fs/reiserfs/namei.c
+++ b/fs/reiserfs/namei.c
@@ -559,7 +559,7 @@
 ** outside of a transaction, so we had to pull some bits of
 ** reiserfs_new_inode out into this func.
 */
-static int new_inode_init(struct inode *inode, struct inode *dir, int mode)
+static int new_inode_init(struct inode *inode, struct inode *dir, umode_t mode)
 {
 	/* Make inode invalid - just in case we are going to drop it before
 	 * the initialization happens */
@@ -572,7 +572,7 @@
 	return 0;
 }
 
-static int reiserfs_create(struct inode *dir, struct dentry *dentry, int mode,
+static int reiserfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
 			   struct nameidata *nd)
 {
 	int retval;
@@ -643,7 +643,7 @@
 	return retval;
 }
 
-static int reiserfs_mknod(struct inode *dir, struct dentry *dentry, int mode,
+static int reiserfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
 			  dev_t rdev)
 {
 	int retval;
@@ -721,7 +721,7 @@
 	return retval;
 }
 
-static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 {
 	int retval;
 	struct inode *inode;
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 14363b9..19c454e 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -28,6 +28,7 @@
 #include <linux/mount.h>
 #include <linux/namei.h>
 #include <linux/crc32.h>
+#include <linux/seq_file.h>
 
 struct file_system_type reiserfs_fs_type;
 
@@ -61,6 +62,7 @@
 
 static int reiserfs_remount(struct super_block *s, int *flags, char *data);
 static int reiserfs_statfs(struct dentry *dentry, struct kstatfs *buf);
+void show_alloc_options(struct seq_file *seq, struct super_block *s);
 
 static int reiserfs_sync_fs(struct super_block *s, int wait)
 {
@@ -532,7 +534,6 @@
 static void reiserfs_i_callback(struct rcu_head *head)
 {
 	struct inode *inode = container_of(head, struct inode, i_rcu);
-	INIT_LIST_HEAD(&inode->i_dentry);
 	kmem_cache_free(reiserfs_inode_cachep, REISERFS_I(inode));
 }
 
@@ -597,6 +598,82 @@
 	reiserfs_write_unlock_once(inode->i_sb, lock_depth);
 }
 
+static int reiserfs_show_options(struct seq_file *seq, struct dentry *root)
+{
+	struct super_block *s = root->d_sb;
+	struct reiserfs_journal *journal = SB_JOURNAL(s);
+	long opts = REISERFS_SB(s)->s_mount_opt;
+
+	if (opts & (1 << REISERFS_LARGETAIL))
+		seq_puts(seq, ",tails=on");
+	else if (!(opts & (1 << REISERFS_SMALLTAIL)))
+		seq_puts(seq, ",notail");
+	/* tails=small is default so we don't show it */
+
+	if (!(opts & (1 << REISERFS_BARRIER_FLUSH)))
+		seq_puts(seq, ",barrier=none");
+	/* barrier=flush is default so we don't show it */
+
+	if (opts & (1 << REISERFS_ERROR_CONTINUE))
+		seq_puts(seq, ",errors=continue");
+	else if (opts & (1 << REISERFS_ERROR_PANIC))
+		seq_puts(seq, ",errors=panic");
+	/* errors=ro is default so we don't show it */
+
+	if (opts & (1 << REISERFS_DATA_LOG))
+		seq_puts(seq, ",data=journal");
+	else if (opts & (1 << REISERFS_DATA_WRITEBACK))
+		seq_puts(seq, ",data=writeback");
+	/* data=ordered is default so we don't show it */
+
+	if (opts & (1 << REISERFS_ATTRS))
+		seq_puts(seq, ",attrs");
+
+	if (opts & (1 << REISERFS_XATTRS_USER))
+		seq_puts(seq, ",user_xattr");
+
+	if (opts & (1 << REISERFS_EXPOSE_PRIVROOT))
+		seq_puts(seq, ",expose_privroot");
+
+	if (opts & (1 << REISERFS_POSIXACL))
+		seq_puts(seq, ",acl");
+
+	if (REISERFS_SB(s)->s_jdev)
+		seq_printf(seq, ",jdev=%s", REISERFS_SB(s)->s_jdev);
+
+	if (journal->j_max_commit_age != journal->j_default_max_commit_age)
+		seq_printf(seq, ",commit=%d", journal->j_max_commit_age);
+
+#ifdef CONFIG_QUOTA
+	if (REISERFS_SB(s)->s_qf_names[USRQUOTA])
+		seq_printf(seq, ",usrjquota=%s", REISERFS_SB(s)->s_qf_names[USRQUOTA]);
+	else if (opts & (1 << REISERFS_USRQUOTA))
+		seq_puts(seq, ",usrquota");
+	if (REISERFS_SB(s)->s_qf_names[GRPQUOTA])
+		seq_printf(seq, ",grpjquota=%s", REISERFS_SB(s)->s_qf_names[GRPQUOTA]);
+	else if (opts & (1 << REISERFS_GRPQUOTA))
+		seq_puts(seq, ",grpquota");
+	if (REISERFS_SB(s)->s_jquota_fmt) {
+		if (REISERFS_SB(s)->s_jquota_fmt == QFMT_VFS_OLD)
+			seq_puts(seq, ",jqfmt=vfsold");
+		else if (REISERFS_SB(s)->s_jquota_fmt == QFMT_VFS_V0)
+			seq_puts(seq, ",jqfmt=vfsv0");
+	}
+#endif
+
+	/* Block allocator options */
+	if (opts & (1 << REISERFS_NO_BORDER))
+		seq_puts(seq, ",block-allocator=noborder");
+	if (opts & (1 << REISERFS_NO_UNHASHED_RELOCATION))
+		seq_puts(seq, ",block-allocator=no_unhashed_relocation");
+	if (opts & (1 << REISERFS_HASHED_RELOCATION))
+		seq_puts(seq, ",block-allocator=hashed_relocation");
+	if (opts & (1 << REISERFS_TEST4))
+		seq_puts(seq, ",block-allocator=test4");
+	show_alloc_options(seq, s);
+	return 0;
+}
+
 #ifdef CONFIG_QUOTA
 static ssize_t reiserfs_quota_write(struct super_block *, int, const char *,
 				    size_t, loff_t);
@@ -617,7 +694,7 @@
 	.unfreeze_fs = reiserfs_unfreeze,
 	.statfs = reiserfs_statfs,
 	.remount_fs = reiserfs_remount,
-	.show_options = generic_show_options,
+	.show_options = reiserfs_show_options,
 #ifdef CONFIG_QUOTA
 	.quota_read = reiserfs_quota_read,
 	.quota_write = reiserfs_quota_write,
@@ -915,9 +992,9 @@
 		{"jdev",.arg_required = 'j',.values = NULL},
 		{"nolargeio",.arg_required = 'w',.values = NULL},
 		{"commit",.arg_required = 'c',.values = NULL},
-		{"usrquota",.setmask = 1 << REISERFS_QUOTA},
-		{"grpquota",.setmask = 1 << REISERFS_QUOTA},
-		{"noquota",.clrmask = 1 << REISERFS_QUOTA},
+		{"usrquota",.setmask = 1 << REISERFS_USRQUOTA},
+		{"grpquota",.setmask = 1 << REISERFS_GRPQUOTA},
+		{"noquota",.clrmask = 1 << REISERFS_USRQUOTA | 1 << REISERFS_GRPQUOTA},
 		{"errors",.arg_required = 'e',.values = error_actions},
 		{"usrjquota",.arg_required =
 		 'u' | (1 << REISERFS_OPT_ALLOWEMPTY),.values = NULL},
@@ -1031,12 +1108,19 @@
 					return 0;
 				}
 				strcpy(qf_names[qtype], arg);
-				*mount_options |= 1 << REISERFS_QUOTA;
+				if (qtype == USRQUOTA)
+					*mount_options |= 1 << REISERFS_USRQUOTA;
+				else
+					*mount_options |= 1 << REISERFS_GRPQUOTA;
 			} else {
 				if (qf_names[qtype] !=
 				    REISERFS_SB(s)->s_qf_names[qtype])
 					kfree(qf_names[qtype]);
 				qf_names[qtype] = NULL;
+				if (qtype == USRQUOTA)
+					*mount_options &= ~(1 << REISERFS_USRQUOTA);
+				else
+					*mount_options &= ~(1 << REISERFS_GRPQUOTA);
 			}
 		}
 		if (c == 'f') {
@@ -1075,9 +1159,10 @@
 				 "journaled quota format not specified.");
 		return 0;
 	}
-	/* This checking is not precise wrt the quota type but for our purposes it is sufficient */
-	if (!(*mount_options & (1 << REISERFS_QUOTA))
-	    && sb_any_quota_loaded(s)) {
+	if ((!(*mount_options & (1 << REISERFS_USRQUOTA)) &&
+	       sb_has_quota_loaded(s, USRQUOTA)) ||
+	    (!(*mount_options & (1 << REISERFS_GRPQUOTA)) &&
+	       sb_has_quota_loaded(s, GRPQUOTA))) {
 		reiserfs_warning(s, "super-6516", "quota options must "
 				 "be present when quota is turned on.");
 		return 0;
@@ -1225,7 +1310,8 @@
 	safe_mask |= 1 << REISERFS_ERROR_RO;
 	safe_mask |= 1 << REISERFS_ERROR_CONTINUE;
 	safe_mask |= 1 << REISERFS_ERROR_PANIC;
-	safe_mask |= 1 << REISERFS_QUOTA;
+	safe_mask |= 1 << REISERFS_USRQUOTA;
+	safe_mask |= 1 << REISERFS_GRPQUOTA;
 
 	/* Update the bitmask, taking care to keep
 	 * the bits we're not allowed to change here */
@@ -1672,6 +1758,14 @@
 	     &commit_max_age, qf_names, &qfmt) == 0) {
 		goto error;
 	}
+	if (jdev_name && jdev_name[0]) {
+		REISERFS_SB(s)->s_jdev = kstrdup(jdev_name, GFP_KERNEL);
+		if (!REISERFS_SB(s)->s_jdev) {
+			SWARN(silent, s, "", "Cannot allocate memory for "
+				"journal device name");
+			goto error;
+		}
+	}
 #ifdef CONFIG_QUOTA
 	handle_quota_files(s, qf_names, &qfmt);
 #endif
@@ -2054,12 +2148,13 @@
 	int err;
 	struct inode *inode;
 	struct reiserfs_transaction_handle th;
+	int opt = type == USRQUOTA ? REISERFS_USRQUOTA : REISERFS_GRPQUOTA;
 
-	if (!(REISERFS_SB(sb)->s_mount_opt & (1 << REISERFS_QUOTA)))
+	if (!(REISERFS_SB(sb)->s_mount_opt & (1 << opt)))
 		return -EINVAL;
 
 	/* Quotafile not on the same filesystem? */
-	if (path->mnt->mnt_sb != sb) {
+	if (path->dentry->d_sb != sb) {
 		err = -EXDEV;
 		goto out;
 	}
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index 6bc346c..c24deda 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -66,7 +66,7 @@
 }
 #endif
 
-static int xattr_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+static int xattr_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 {
 	BUG_ON(!mutex_is_locked(&dir->i_mutex));
 	return dir->i_op->mkdir(dir, dentry, mode);
diff --git a/fs/romfs/super.c b/fs/romfs/super.c
index 8b4089f..bb36ab7 100644
--- a/fs/romfs/super.c
+++ b/fs/romfs/super.c
@@ -403,7 +403,6 @@
 static void romfs_i_callback(struct rcu_head *head)
 {
 	struct inode *inode = container_of(head, struct inode, i_rcu);
-	INIT_LIST_HEAD(&inode->i_dentry);
 	kmem_cache_free(romfs_inode_cachep, ROMFS_I(inode));
 }
 
diff --git a/fs/seq_file.c b/fs/seq_file.c
index dba43c3..4023d6b 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -397,7 +397,7 @@
  *      Returns pointer past last written character in @s, or NULL in case of
  *      failure.
  */
-char *mangle_path(char *s, char *p, char *esc)
+char *mangle_path(char *s, const char *p, const char *esc)
 {
 	while (s <= p) {
 		char c = *p++;
@@ -427,7 +427,7 @@
  * return the absolute path of 'path', as represented by the
  * dentry / mnt pair in the path parameter.
  */
-int seq_path(struct seq_file *m, struct path *path, char *esc)
+int seq_path(struct seq_file *m, const struct path *path, const char *esc)
 {
 	char *buf;
 	size_t size = seq_get_buf(m, &buf);
@@ -450,8 +450,8 @@
 /*
  * Same as seq_path, but relative to supplied root.
  */
-int seq_path_root(struct seq_file *m, struct path *path, struct path *root,
-		  char *esc)
+int seq_path_root(struct seq_file *m, const struct path *path,
+		  const struct path *root, const char *esc)
 {
 	char *buf;
 	size_t size = seq_get_buf(m, &buf);
@@ -480,7 +480,7 @@
 /*
  * returns the path of the 'dentry' from the root of its filesystem.
  */
-int seq_dentry(struct seq_file *m, struct dentry *dentry, char *esc)
+int seq_dentry(struct seq_file *m, struct dentry *dentry, const char *esc)
 {
 	char *buf;
 	size_t size = seq_get_buf(m, &buf);
diff --git a/fs/splice.c b/fs/splice.c
index fa2defa..1ec0493 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -25,7 +25,6 @@
 #include <linux/mm_inline.h>
 #include <linux/swap.h>
 #include <linux/writeback.h>
-#include <linux/buffer_head.h>
 #include <linux/module.h>
 #include <linux/syscalls.h>
 #include <linux/uio.h>
diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c
index 2da1715..d0858c2 100644
--- a/fs/squashfs/super.c
+++ b/fs/squashfs/super.c
@@ -464,7 +464,6 @@
 static void squashfs_i_callback(struct rcu_head *head)
 {
 	struct inode *inode = container_of(head, struct inode, i_rcu);
-	INIT_LIST_HEAD(&inode->i_dentry);
 	kmem_cache_free(squashfs_inode_cachep, squashfs_i(inode));
 }
 
diff --git a/fs/statfs.c b/fs/statfs.c
index 9cf04a1..2aa6a22 100644
--- a/fs/statfs.c
+++ b/fs/statfs.c
@@ -7,6 +7,7 @@
 #include <linux/statfs.h>
 #include <linux/security.h>
 #include <linux/uaccess.h>
+#include "internal.h"
 
 static int flags_by_mnt(int mnt_flags)
 {
@@ -45,7 +46,7 @@
 		flags_by_sb(mnt->mnt_sb->s_flags);
 }
 
-int statfs_by_dentry(struct dentry *dentry, struct kstatfs *buf)
+static int statfs_by_dentry(struct dentry *dentry, struct kstatfs *buf)
 {
 	int retval;
 
@@ -205,19 +206,23 @@
 	return error;
 }
 
-SYSCALL_DEFINE2(ustat, unsigned, dev, struct ustat __user *, ubuf)
+int vfs_ustat(dev_t dev, struct kstatfs *sbuf)
 {
-	struct super_block *s;
-	struct ustat tmp;
-	struct kstatfs sbuf;
+	struct super_block *s = user_get_super(dev);
 	int err;
-
-	s = user_get_super(new_decode_dev(dev));
 	if (!s)
 		return -EINVAL;
 
-	err = statfs_by_dentry(s->s_root, &sbuf);
+	err = statfs_by_dentry(s->s_root, sbuf);
 	drop_super(s);
+	return err;
+}
+
+SYSCALL_DEFINE2(ustat, unsigned, dev, struct ustat __user *, ubuf)
+{
+	struct ustat tmp;
+	struct kstatfs sbuf;
+	int err = vfs_ustat(new_decode_dev(dev), &sbuf);
 	if (err)
 		return err;
 
diff --git a/fs/super.c b/fs/super.c
index afd0f1a..de41e1e 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -136,12 +136,13 @@
 		INIT_LIST_HEAD(&s->s_files);
 #endif
 		s->s_bdi = &default_backing_dev_info;
-		INIT_LIST_HEAD(&s->s_instances);
+		INIT_HLIST_NODE(&s->s_instances);
 		INIT_HLIST_BL_HEAD(&s->s_anon);
 		INIT_LIST_HEAD(&s->s_inodes);
 		INIT_LIST_HEAD(&s->s_dentry_lru);
 		INIT_LIST_HEAD(&s->s_inode_lru);
 		spin_lock_init(&s->s_inode_lru_lock);
+		INIT_LIST_HEAD(&s->s_mounts);
 		init_rwsem(&s->s_umount);
 		mutex_init(&s->s_lock);
 		lockdep_set_class(&s->s_umount, &type->s_umount_key);
@@ -200,6 +201,7 @@
 	free_percpu(s->s_files);
 #endif
 	security_sb_free(s);
+	WARN_ON(!list_empty(&s->s_mounts));
 	kfree(s->s_subtype);
 	kfree(s->s_options);
 	kfree(s);
@@ -210,7 +212,7 @@
 /*
  * Drop a superblock's refcount.  The caller must hold sb_lock.
  */
-void __put_super(struct super_block *sb)
+static void __put_super(struct super_block *sb)
 {
 	if (!--sb->s_count) {
 		list_del_init(&sb->s_list);
@@ -225,7 +227,7 @@
  *	Drops a temporary reference, frees superblock if there's no
  *	references left.
  */
-void put_super(struct super_block *sb)
+static void put_super(struct super_block *sb)
 {
 	spin_lock(&sb_lock);
 	__put_super(sb);
@@ -328,7 +330,7 @@
 bool grab_super_passive(struct super_block *sb)
 {
 	spin_lock(&sb_lock);
-	if (list_empty(&sb->s_instances)) {
+	if (hlist_unhashed(&sb->s_instances)) {
 		spin_unlock(&sb_lock);
 		return false;
 	}
@@ -337,7 +339,7 @@
 	spin_unlock(&sb_lock);
 
 	if (down_read_trylock(&sb->s_umount)) {
-		if (sb->s_root)
+		if (sb->s_root && (sb->s_flags & MS_BORN))
 			return true;
 		up_read(&sb->s_umount);
 	}
@@ -400,7 +402,7 @@
 	}
 	spin_lock(&sb_lock);
 	/* should be initialized for __put_super_and_need_restart() */
-	list_del_init(&sb->s_instances);
+	hlist_del_init(&sb->s_instances);
 	spin_unlock(&sb_lock);
 	up_write(&sb->s_umount);
 }
@@ -420,13 +422,14 @@
 			void *data)
 {
 	struct super_block *s = NULL;
+	struct hlist_node *node;
 	struct super_block *old;
 	int err;
 
 retry:
 	spin_lock(&sb_lock);
 	if (test) {
-		list_for_each_entry(old, &type->fs_supers, s_instances) {
+		hlist_for_each_entry(old, node, &type->fs_supers, s_instances) {
 			if (!test(old, data))
 				continue;
 			if (!grab_super(old))
@@ -462,7 +465,7 @@
 	s->s_type = type;
 	strlcpy(s->s_id, type->name, sizeof(s->s_id));
 	list_add_tail(&s->s_list, &super_blocks);
-	list_add(&s->s_instances, &type->fs_supers);
+	hlist_add_head(&s->s_instances, &type->fs_supers);
 	spin_unlock(&sb_lock);
 	get_filesystem(type);
 	register_shrinker(&s->s_shrink);
@@ -497,14 +500,14 @@
 
 	spin_lock(&sb_lock);
 	list_for_each_entry(sb, &super_blocks, s_list) {
-		if (list_empty(&sb->s_instances))
+		if (hlist_unhashed(&sb->s_instances))
 			continue;
 		if (sb->s_op->write_super && sb->s_dirt) {
 			sb->s_count++;
 			spin_unlock(&sb_lock);
 
 			down_read(&sb->s_umount);
-			if (sb->s_root && sb->s_dirt)
+			if (sb->s_root && sb->s_dirt && (sb->s_flags & MS_BORN))
 				sb->s_op->write_super(sb);
 			up_read(&sb->s_umount);
 
@@ -533,13 +536,13 @@
 
 	spin_lock(&sb_lock);
 	list_for_each_entry(sb, &super_blocks, s_list) {
-		if (list_empty(&sb->s_instances))
+		if (hlist_unhashed(&sb->s_instances))
 			continue;
 		sb->s_count++;
 		spin_unlock(&sb_lock);
 
 		down_read(&sb->s_umount);
-		if (sb->s_root)
+		if (sb->s_root && (sb->s_flags & MS_BORN))
 			f(sb, arg);
 		up_read(&sb->s_umount);
 
@@ -566,14 +569,15 @@
 	void (*f)(struct super_block *, void *), void *arg)
 {
 	struct super_block *sb, *p = NULL;
+	struct hlist_node *node;
 
 	spin_lock(&sb_lock);
-	list_for_each_entry(sb, &type->fs_supers, s_instances) {
+	hlist_for_each_entry(sb, node, &type->fs_supers, s_instances) {
 		sb->s_count++;
 		spin_unlock(&sb_lock);
 
 		down_read(&sb->s_umount);
-		if (sb->s_root)
+		if (sb->s_root && (sb->s_flags & MS_BORN))
 			f(sb, arg);
 		up_read(&sb->s_umount);
 
@@ -607,14 +611,14 @@
 	spin_lock(&sb_lock);
 rescan:
 	list_for_each_entry(sb, &super_blocks, s_list) {
-		if (list_empty(&sb->s_instances))
+		if (hlist_unhashed(&sb->s_instances))
 			continue;
 		if (sb->s_bdev == bdev) {
 			sb->s_count++;
 			spin_unlock(&sb_lock);
 			down_read(&sb->s_umount);
 			/* still alive? */
-			if (sb->s_root)
+			if (sb->s_root && (sb->s_flags & MS_BORN))
 				return sb;
 			up_read(&sb->s_umount);
 			/* nope, got unmounted */
@@ -647,7 +651,7 @@
 restart:
 	spin_lock(&sb_lock);
 	list_for_each_entry(sb, &super_blocks, s_list) {
-		if (list_empty(&sb->s_instances))
+		if (hlist_unhashed(&sb->s_instances))
 			continue;
 		if (sb->s_bdev == bdev) {
 			if (grab_super(sb)) /* drops sb_lock */
@@ -667,14 +671,14 @@
 	spin_lock(&sb_lock);
 rescan:
 	list_for_each_entry(sb, &super_blocks, s_list) {
-		if (list_empty(&sb->s_instances))
+		if (hlist_unhashed(&sb->s_instances))
 			continue;
 		if (sb->s_dev ==  dev) {
 			sb->s_count++;
 			spin_unlock(&sb_lock);
 			down_read(&sb->s_umount);
 			/* still alive? */
-			if (sb->s_root)
+			if (sb->s_root && (sb->s_flags & MS_BORN))
 				return sb;
 			up_read(&sb->s_umount);
 			/* nope, got unmounted */
@@ -719,23 +723,29 @@
 	/* If we are remounting RDONLY and current sb is read/write,
 	   make sure there are no rw files opened */
 	if (remount_ro) {
-		if (force)
+		if (force) {
 			mark_files_ro(sb);
-		else if (!fs_may_remount_ro(sb))
-			return -EBUSY;
+		} else {
+			retval = sb_prepare_remount_readonly(sb);
+			if (retval)
+				return retval;
+		}
 	}
 
 	if (sb->s_op->remount_fs) {
 		retval = sb->s_op->remount_fs(sb, &flags, data);
 		if (retval) {
 			if (!force)
-				return retval;
+				goto cancel_readonly;
 			/* If forced remount, go ahead despite any errors */
 			WARN(1, "forced remount of a %s fs returned %i\n",
 			     sb->s_type->name, retval);
 		}
 	}
 	sb->s_flags = (sb->s_flags & ~MS_RMT_MASK) | (flags & MS_RMT_MASK);
+	/* Needs to be ordered wrt mnt_is_readonly() */
+	smp_wmb();
+	sb->s_readonly_remount = 0;
 
 	/*
 	 * Some filesystems modify their metadata via some other path than the
@@ -748,6 +758,10 @@
 	if (remount_ro && sb->s_bdev)
 		invalidate_bdev(sb->s_bdev);
 	return 0;
+
+cancel_readonly:
+	sb->s_readonly_remount = 0;
+	return retval;
 }
 
 static void do_emergency_remount(struct work_struct *work)
@@ -756,12 +770,13 @@
 
 	spin_lock(&sb_lock);
 	list_for_each_entry(sb, &super_blocks, s_list) {
-		if (list_empty(&sb->s_instances))
+		if (hlist_unhashed(&sb->s_instances))
 			continue;
 		sb->s_count++;
 		spin_unlock(&sb_lock);
 		down_write(&sb->s_umount);
-		if (sb->s_root && sb->s_bdev && !(sb->s_flags & MS_RDONLY)) {
+		if (sb->s_root && sb->s_bdev && (sb->s_flags & MS_BORN) &&
+		    !(sb->s_flags & MS_RDONLY)) {
 			/*
 			 * What lock protects sb->s_flags??
 			 */
@@ -1144,6 +1159,11 @@
 		return -EBUSY;
 	}
 
+	if (!(sb->s_flags & MS_BORN)) {
+		up_write(&sb->s_umount);
+		return 0;	/* sic - it's "nothing to do" */
+	}
+
 	if (sb->s_flags & MS_RDONLY) {
 		sb->s_frozen = SB_FREEZE_TRANS;
 		smp_wmb();
diff --git a/fs/sync.c b/fs/sync.c
index 101b8ef..f3501ef 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -14,7 +14,6 @@
 #include <linux/linkage.h>
 #include <linux/pagemap.h>
 #include <linux/quotaops.h>
-#include <linux/buffer_head.h>
 #include <linux/backing-dev.h>
 #include "internal.h"
 
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index d4e6080..62f4fb3 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -518,7 +518,7 @@
 }
 
 int sysfs_add_file_mode(struct sysfs_dirent *dir_sd,
-			const struct attribute *attr, int type, mode_t amode)
+			const struct attribute *attr, int type, umode_t amode)
 {
 	umode_t mode = (amode & S_IALLUGO) | S_IFREG;
 	struct sysfs_addrm_cxt acxt;
@@ -618,7 +618,7 @@
  *
  */
 int sysfs_chmod_file(struct kobject *kobj, const struct attribute *attr,
-		     mode_t mode)
+		     umode_t mode)
 {
 	struct sysfs_dirent *sd;
 	struct iattr newattrs;
diff --git a/fs/sysfs/group.c b/fs/sysfs/group.c
index 194414f..dd1701c 100644
--- a/fs/sysfs/group.c
+++ b/fs/sysfs/group.c
@@ -33,7 +33,7 @@
 	int error = 0, i;
 
 	for (i = 0, attr = grp->attrs; *attr && !error; i++, attr++) {
-		mode_t mode = 0;
+		umode_t mode = 0;
 
 		/* in update mode, we're changing the permissions or
 		 * visibility.  Do this by first removing then
diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c
index c81b22f3..4a802b4 100644
--- a/fs/sysfs/inode.c
+++ b/fs/sysfs/inode.c
@@ -187,7 +187,7 @@
 	return error;
 }
 
-static inline void set_default_inode_attr(struct inode * inode, mode_t mode)
+static inline void set_default_inode_attr(struct inode * inode, umode_t mode)
 {
 	inode->i_mode = mode;
 	inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
diff --git a/fs/sysfs/sysfs.h b/fs/sysfs/sysfs.h
index ce29e28..7484a36 100644
--- a/fs/sysfs/sysfs.h
+++ b/fs/sysfs/sysfs.h
@@ -79,7 +79,7 @@
 	};
 
 	unsigned int		s_flags;
-	unsigned short		s_mode;
+	umode_t 		s_mode;
 	ino_t			s_ino;
 	struct sysfs_inode_attrs *s_iattr;
 };
@@ -229,7 +229,7 @@
 		   const struct attribute *attr, int type);
 
 int sysfs_add_file_mode(struct sysfs_dirent *dir_sd,
-			const struct attribute *attr, int type, mode_t amode);
+			const struct attribute *attr, int type, umode_t amode);
 /*
  * bin.c
  */
diff --git a/fs/sysv/ialloc.c b/fs/sysv/ialloc.c
index 0c96c98..8233b02 100644
--- a/fs/sysv/ialloc.c
+++ b/fs/sysv/ialloc.c
@@ -132,7 +132,7 @@
 	brelse(bh);
 }
 
-struct inode * sysv_new_inode(const struct inode * dir, mode_t mode)
+struct inode * sysv_new_inode(const struct inode * dir, umode_t mode)
 {
 	struct super_block *sb = dir->i_sb;
 	struct sysv_sb_info *sbi = SYSV_SB(sb);
diff --git a/fs/sysv/inode.c b/fs/sysv/inode.c
index 25ffb3e..3da5ce2 100644
--- a/fs/sysv/inode.c
+++ b/fs/sysv/inode.c
@@ -336,7 +336,6 @@
 static void sysv_i_callback(struct rcu_head *head)
 {
 	struct inode *inode = container_of(head, struct inode, i_rcu);
-	INIT_LIST_HEAD(&inode->i_dentry);
 	kmem_cache_free(sysv_inode_cachep, SYSV_I(inode));
 }
 
diff --git a/fs/sysv/itree.c b/fs/sysv/itree.c
index fa8d43c..90b54b4 100644
--- a/fs/sysv/itree.c
+++ b/fs/sysv/itree.c
@@ -442,7 +442,7 @@
 
 int sysv_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
 {
-	struct super_block *s = mnt->mnt_sb;
+	struct super_block *s = dentry->d_sb;
 	generic_fillattr(dentry->d_inode, stat);
 	stat->blocks = (s->s_blocksize / 512) * sysv_nblocks(s, stat->size);
 	stat->blksize = s->s_blocksize;
diff --git a/fs/sysv/namei.c b/fs/sysv/namei.c
index e474fbc..b217797 100644
--- a/fs/sysv/namei.c
+++ b/fs/sysv/namei.c
@@ -61,7 +61,7 @@
 	return NULL;
 }
 
-static int sysv_mknod(struct inode * dir, struct dentry * dentry, int mode, dev_t rdev)
+static int sysv_mknod(struct inode * dir, struct dentry * dentry, umode_t mode, dev_t rdev)
 {
 	struct inode * inode;
 	int err;
@@ -80,7 +80,7 @@
 	return err;
 }
 
-static int sysv_create(struct inode * dir, struct dentry * dentry, int mode, struct nameidata *nd)
+static int sysv_create(struct inode * dir, struct dentry * dentry, umode_t mode, struct nameidata *nd)
 {
 	return sysv_mknod(dir, dentry, mode, 0);
 }
@@ -131,7 +131,7 @@
 	return add_nondir(dentry, inode);
 }
 
-static int sysv_mkdir(struct inode * dir, struct dentry *dentry, int mode)
+static int sysv_mkdir(struct inode * dir, struct dentry *dentry, umode_t mode)
 {
 	struct inode * inode;
 	int err = -EMLINK;
diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
index bb55cdb..0e4b821 100644
--- a/fs/sysv/sysv.h
+++ b/fs/sysv/sysv.h
@@ -125,7 +125,7 @@
 /* ialloc.c */
 extern struct sysv_inode *sysv_raw_inode(struct super_block *, unsigned,
 			struct buffer_head **);
-extern struct inode * sysv_new_inode(const struct inode *, mode_t);
+extern struct inode * sysv_new_inode(const struct inode *, umode_t);
 extern void sysv_free_inode(struct inode *);
 extern unsigned long sysv_count_free_inodes(struct super_block *);
 
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index 6834920..d6fe1c7 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -56,7 +56,7 @@
  *
  * This function returns the inherited flags.
  */
-static int inherit_flags(const struct inode *dir, int mode)
+static int inherit_flags(const struct inode *dir, umode_t mode)
 {
 	int flags;
 	const struct ubifs_inode *ui = ubifs_inode(dir);
@@ -86,7 +86,7 @@
  * case of failure.
  */
 struct inode *ubifs_new_inode(struct ubifs_info *c, const struct inode *dir,
-			      int mode)
+			      umode_t mode)
 {
 	struct inode *inode;
 	struct ubifs_inode *ui;
@@ -253,7 +253,7 @@
 	return ERR_PTR(err);
 }
 
-static int ubifs_create(struct inode *dir, struct dentry *dentry, int mode,
+static int ubifs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
 			struct nameidata *nd)
 {
 	struct inode *inode;
@@ -268,7 +268,7 @@
 	 * parent directory inode.
 	 */
 
-	dbg_gen("dent '%.*s', mode %#x in dir ino %lu",
+	dbg_gen("dent '%.*s', mode %#hx in dir ino %lu",
 		dentry->d_name.len, dentry->d_name.name, mode, dir->i_ino);
 
 	err = ubifs_budget_space(c, &req);
@@ -712,7 +712,7 @@
 	return err;
 }
 
-static int ubifs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+static int ubifs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 {
 	struct inode *inode;
 	struct ubifs_inode *dir_ui = ubifs_inode(dir);
@@ -725,7 +725,7 @@
 	 * directory inode.
 	 */
 
-	dbg_gen("dent '%.*s', mode %#x in dir ino %lu",
+	dbg_gen("dent '%.*s', mode %#hx in dir ino %lu",
 		dentry->d_name.len, dentry->d_name.name, mode, dir->i_ino);
 
 	err = ubifs_budget_space(c, &req);
@@ -769,7 +769,7 @@
 }
 
 static int ubifs_mknod(struct inode *dir, struct dentry *dentry,
-		       int mode, dev_t rdev)
+		       umode_t mode, dev_t rdev)
 {
 	struct inode *inode;
 	struct ubifs_inode *ui;
diff --git a/fs/ubifs/ioctl.c b/fs/ubifs/ioctl.c
index 548acf4..1a7e2d8 100644
--- a/fs/ubifs/ioctl.c
+++ b/fs/ubifs/ioctl.c
@@ -173,12 +173,12 @@
 		 * Make sure the file-system is read-write and make sure it
 		 * will not become read-only while we are changing the flags.
 		 */
-		err = mnt_want_write(file->f_path.mnt);
+		err = mnt_want_write_file(file);
 		if (err)
 			return err;
 		dbg_gen("set flags: %#x, i_flags %#x", flags, inode->i_flags);
 		err = setflags(inode, flags);
-		mnt_drop_write(file->f_path.mnt);
+		mnt_drop_write_file(file);
 		return err;
 	}
 
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 20403dc..63765d5 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -276,7 +276,6 @@
 {
 	struct inode *inode = container_of(head, struct inode, i_rcu);
 	struct ubifs_inode *ui = ubifs_inode(inode);
-	INIT_LIST_HEAD(&inode->i_dentry);
 	kmem_cache_free(ubifs_inode_slab, ui);
 }
 
@@ -420,9 +419,9 @@
 	return 0;
 }
 
-static int ubifs_show_options(struct seq_file *s, struct vfsmount *mnt)
+static int ubifs_show_options(struct seq_file *s, struct dentry *root)
 {
-	struct ubifs_info *c = mnt->mnt_sb->s_fs_info;
+	struct ubifs_info *c = root->d_sb->s_fs_info;
 
 	if (c->mount_opts.unmount_mode == 2)
 		seq_printf(s, ",fast_unmount");
@@ -2264,19 +2263,12 @@
 		return -EINVAL;
 	}
 
-	err = register_filesystem(&ubifs_fs_type);
-	if (err) {
-		ubifs_err("cannot register file system, error %d", err);
-		return err;
-	}
-
-	err = -ENOMEM;
 	ubifs_inode_slab = kmem_cache_create("ubifs_inode_slab",
 				sizeof(struct ubifs_inode), 0,
 				SLAB_MEM_SPREAD | SLAB_RECLAIM_ACCOUNT,
 				&inode_slab_ctor);
 	if (!ubifs_inode_slab)
-		goto out_reg;
+		return -ENOMEM;
 
 	register_shrinker(&ubifs_shrinker_info);
 
@@ -2288,15 +2280,20 @@
 	if (err)
 		goto out_compr;
 
+	err = register_filesystem(&ubifs_fs_type);
+	if (err) {
+		ubifs_err("cannot register file system, error %d", err);
+		goto out_dbg;
+	}
 	return 0;
 
+out_dbg:
+	dbg_debugfs_exit();
 out_compr:
 	ubifs_compressors_exit();
 out_shrinker:
 	unregister_shrinker(&ubifs_shrinker_info);
 	kmem_cache_destroy(ubifs_inode_slab);
-out_reg:
-	unregister_filesystem(&ubifs_fs_type);
 	return err;
 }
 /* late_initcall to let compressors initialize first */
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
index 27f2255..12e9477 100644
--- a/fs/ubifs/ubifs.h
+++ b/fs/ubifs/ubifs.h
@@ -1734,7 +1734,7 @@
 
 /* dir.c */
 struct inode *ubifs_new_inode(struct ubifs_info *c, const struct inode *dir,
-			      int mode);
+			      umode_t mode);
 int ubifs_getattr(struct vfsmount *mnt, struct dentry *dentry,
 		  struct kstat *stat);
 
diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c
index 6fb7e0a..05ab481 100644
--- a/fs/udf/ialloc.c
+++ b/fs/udf/ialloc.c
@@ -46,7 +46,7 @@
 	udf_free_blocks(sb, NULL, &UDF_I(inode)->i_location, 0, 1);
 }
 
-struct inode *udf_new_inode(struct inode *dir, int mode, int *err)
+struct inode *udf_new_inode(struct inode *dir, umode_t mode, int *err)
 {
 	struct super_block *sb = dir->i_sb;
 	struct udf_sb_info *sbi = UDF_SB(sb);
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 4fd1d80..4598904 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -48,7 +48,7 @@
 
 #define EXTENT_MERGE_SIZE 5
 
-static mode_t udf_convert_permissions(struct fileEntry *);
+static umode_t udf_convert_permissions(struct fileEntry *);
 static int udf_update_inode(struct inode *, int);
 static void udf_fill_inode(struct inode *, struct buffer_head *);
 static int udf_sync_inode(struct inode *inode);
@@ -1452,9 +1452,9 @@
 	return 0;
 }
 
-static mode_t udf_convert_permissions(struct fileEntry *fe)
+static umode_t udf_convert_permissions(struct fileEntry *fe)
 {
-	mode_t mode;
+	umode_t mode;
 	uint32_t permissions;
 	uint32_t flags;
 
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index 4639e13..08bf46e 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -552,7 +552,7 @@
 	return udf_write_fi(inode, cfi, fi, fibh, NULL, NULL);
 }
 
-static int udf_create(struct inode *dir, struct dentry *dentry, int mode,
+static int udf_create(struct inode *dir, struct dentry *dentry, umode_t mode,
 		      struct nameidata *nd)
 {
 	struct udf_fileident_bh fibh;
@@ -596,7 +596,7 @@
 	return 0;
 }
 
-static int udf_mknod(struct inode *dir, struct dentry *dentry, int mode,
+static int udf_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
 		     dev_t rdev)
 {
 	struct inode *inode;
@@ -640,7 +640,7 @@
 	return err;
 }
 
-static int udf_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+static int udf_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 {
 	struct inode *inode;
 	struct udf_fileident_bh fibh;
diff --git a/fs/udf/super.c b/fs/udf/super.c
index e185253..0c33225 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -89,7 +89,7 @@
 static void udf_close_lvid(struct super_block *);
 static unsigned int udf_count_free(struct super_block *);
 static int udf_statfs(struct dentry *, struct kstatfs *);
-static int udf_show_options(struct seq_file *, struct vfsmount *);
+static int udf_show_options(struct seq_file *, struct dentry *);
 
 struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct udf_sb_info *sbi)
 {
@@ -138,7 +138,6 @@
 static void udf_i_callback(struct rcu_head *head)
 {
 	struct inode *inode = container_of(head, struct inode, i_rcu);
-	INIT_LIST_HEAD(&inode->i_dentry);
 	kmem_cache_free(udf_inode_cachep, UDF_I(inode));
 }
 
@@ -196,11 +195,11 @@
 	unsigned int fileset;
 	unsigned int rootdir;
 	unsigned int flags;
-	mode_t umask;
+	umode_t umask;
 	gid_t gid;
 	uid_t uid;
-	mode_t fmode;
-	mode_t dmode;
+	umode_t fmode;
+	umode_t dmode;
 	struct nls_table *nls_map;
 };
 
@@ -250,9 +249,9 @@
 	return 0;
 }
 
-static int udf_show_options(struct seq_file *seq, struct vfsmount *mnt)
+static int udf_show_options(struct seq_file *seq, struct dentry *root)
 {
-	struct super_block *sb = mnt->mnt_sb;
+	struct super_block *sb = root->d_sb;
 	struct udf_sb_info *sbi = UDF_SB(sb);
 
 	if (!UDF_QUERY_FLAG(sb, UDF_FLAG_STRICT))
@@ -280,11 +279,11 @@
 	if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_SET))
 		seq_printf(seq, ",gid=%u", sbi->s_gid);
 	if (sbi->s_umask != 0)
-		seq_printf(seq, ",umask=%o", sbi->s_umask);
+		seq_printf(seq, ",umask=%ho", sbi->s_umask);
 	if (sbi->s_fmode != UDF_INVALID_MODE)
-		seq_printf(seq, ",mode=%o", sbi->s_fmode);
+		seq_printf(seq, ",mode=%ho", sbi->s_fmode);
 	if (sbi->s_dmode != UDF_INVALID_MODE)
-		seq_printf(seq, ",dmode=%o", sbi->s_dmode);
+		seq_printf(seq, ",dmode=%ho", sbi->s_dmode);
 	if (UDF_QUERY_FLAG(sb, UDF_FLAG_SESSION_SET))
 		seq_printf(seq, ",session=%u", sbi->s_session);
 	if (UDF_QUERY_FLAG(sb, UDF_FLAG_LASTBLOCK_SET))
diff --git a/fs/udf/udf_sb.h b/fs/udf/udf_sb.h
index 5142a82..42ad69a 100644
--- a/fs/udf/udf_sb.h
+++ b/fs/udf/udf_sb.h
@@ -50,7 +50,7 @@
 #define UDF_SPARABLE_MAP15		0x1522U
 #define UDF_METADATA_MAP25		0x2511U
 
-#define UDF_INVALID_MODE		((mode_t)-1)
+#define UDF_INVALID_MODE		((umode_t)-1)
 
 #pragma pack(1) /* XXX(hch): Why?  This file just defines in-core structures */
 
@@ -127,11 +127,11 @@
 	struct buffer_head	*s_lvid_bh;
 
 	/* Default permissions */
-	mode_t			s_umask;
+	umode_t			s_umask;
 	gid_t			s_gid;
 	uid_t			s_uid;
-	mode_t			s_fmode;
-	mode_t			s_dmode;
+	umode_t			s_fmode;
+	umode_t			s_dmode;
 	/* Lock protecting consistency of above permission settings */
 	rwlock_t		s_cred_lock;
 
diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h
index f34e6fc..ebe1031 100644
--- a/fs/udf/udfdecl.h
+++ b/fs/udf/udfdecl.h
@@ -215,7 +215,7 @@
 
 /* ialloc.c */
 extern void udf_free_inode(struct inode *);
-extern struct inode *udf_new_inode(struct inode *, int, int *);
+extern struct inode *udf_new_inode(struct inode *, umode_t, int *);
 
 /* truncate.c */
 extern void udf_truncate_tail_extent(struct inode *);
diff --git a/fs/ufs/ialloc.c b/fs/ufs/ialloc.c
index 78a4c70..4ec5c10 100644
--- a/fs/ufs/ialloc.c
+++ b/fs/ufs/ialloc.c
@@ -170,7 +170,7 @@
  * For other inodes, search forward from the parent directory's block
  * group to find a free inode.
  */
-struct inode * ufs_new_inode(struct inode * dir, int mode)
+struct inode *ufs_new_inode(struct inode *dir, umode_t mode)
 {
 	struct super_block * sb;
 	struct ufs_sb_info * sbi;
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index 879b134..9094e1d 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -583,7 +583,7 @@
 {
 	struct ufs_inode_info *ufsi = UFS_I(inode);
 	struct super_block *sb = inode->i_sb;
-	mode_t mode;
+	umode_t mode;
 
 	/*
 	 * Copy data to the in-core inode.
@@ -630,7 +630,7 @@
 {
 	struct ufs_inode_info *ufsi = UFS_I(inode);
 	struct super_block *sb = inode->i_sb;
-	mode_t mode;
+	umode_t mode;
 
 	UFSD("Reading ufs2 inode, ino %lu\n", inode->i_ino);
 	/*
diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c
index 639d491..38cac19 100644
--- a/fs/ufs/namei.c
+++ b/fs/ufs/namei.c
@@ -70,7 +70,7 @@
  * If the create succeeds, we fill in the inode information
  * with d_instantiate(). 
  */
-static int ufs_create (struct inode * dir, struct dentry * dentry, int mode,
+static int ufs_create (struct inode * dir, struct dentry * dentry, umode_t mode,
 		struct nameidata *nd)
 {
 	struct inode *inode;
@@ -94,7 +94,7 @@
 	return err;
 }
 
-static int ufs_mknod (struct inode * dir, struct dentry *dentry, int mode, dev_t rdev)
+static int ufs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev)
 {
 	struct inode *inode;
 	int err;
@@ -180,7 +180,7 @@
 	return error;
 }
 
-static int ufs_mkdir(struct inode * dir, struct dentry * dentry, int mode)
+static int ufs_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode)
 {
 	struct inode * inode;
 	int err = -EMLINK;
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index 3915ade..5246ee3 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -1351,9 +1351,9 @@
 	return 0;
 }
 
-static int ufs_show_options(struct seq_file *seq, struct vfsmount *vfs)
+static int ufs_show_options(struct seq_file *seq, struct dentry *root)
 {
-	struct ufs_sb_info *sbi = UFS_SB(vfs->mnt_sb);
+	struct ufs_sb_info *sbi = UFS_SB(root->d_sb);
 	unsigned mval = sbi->s_mount_opt & UFS_MOUNT_UFSTYPE;
 	const struct match_token *tp = tokens;
 
@@ -1425,7 +1425,6 @@
 static void ufs_i_callback(struct rcu_head *head)
 {
 	struct inode *inode = container_of(head, struct inode, i_rcu);
-	INIT_LIST_HEAD(&inode->i_dentry);
 	kmem_cache_free(ufs_inode_cachep, UFS_I(inode));
 }
 
diff --git a/fs/ufs/ufs.h b/fs/ufs/ufs.h
index c26f2bce..528750b 100644
--- a/fs/ufs/ufs.h
+++ b/fs/ufs/ufs.h
@@ -104,7 +104,7 @@
 
 /* ialloc.c */
 extern void ufs_free_inode (struct inode *inode);
-extern struct inode * ufs_new_inode (struct inode *, int);
+extern struct inode * ufs_new_inode (struct inode *, umode_t);
 
 /* inode.c */
 extern struct inode *ufs_iget(struct super_block *, unsigned long);
diff --git a/fs/xattr.c b/fs/xattr.c
index 67583de..82f4337 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -397,7 +397,7 @@
 	error = mnt_want_write_file(f);
 	if (!error) {
 		error = setxattr(dentry, name, value, size, flags);
-		mnt_drop_write(f->f_path.mnt);
+		mnt_drop_write_file(f);
 	}
 	fput(f);
 	return error;
@@ -624,7 +624,7 @@
 	error = mnt_want_write_file(f);
 	if (!error) {
 		error = removexattr(dentry, name);
-		mnt_drop_write(f->f_path.mnt);
+		mnt_drop_write_file(f);
 	}
 	fput(f);
 	return error;
diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c
index 76e4266..ac702a6 100644
--- a/fs/xfs/xfs_acl.c
+++ b/fs/xfs/xfs_acl.c
@@ -39,7 +39,7 @@
 	struct posix_acl_entry *acl_e;
 	struct posix_acl *acl;
 	struct xfs_acl_entry *ace;
-	int count, i;
+	unsigned int count, i;
 
 	count = be32_to_cpu(aclp->acl_cnt);
 	if (count > XFS_ACL_MAX_ENTRIES)
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index cf0ac05..4dff85c 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -1370,7 +1370,7 @@
 			goto restart;
 		}
 		/*
-		 * clear the LRU reference count so the bufer doesn't get
+		 * clear the LRU reference count so the buffer doesn't get
 		 * ignored in xfs_buf_rele().
 		 */
 		atomic_set(&bp->b_lru_ref, 0);
@@ -1701,12 +1701,8 @@
 		struct list_head tmp;
 		struct blk_plug plug;
 
-		if (unlikely(freezing(current))) {
-			set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
-			refrigerator();
-		} else {
-			clear_bit(XBT_FORCE_SLEEP, &target->bt_flags);
-		}
+		if (unlikely(freezing(current)))
+			try_to_freeze();
 
 		/* sleep for a long time if there is nothing to do. */
 		if (list_empty(&target->bt_delwri_queue))
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index 5bab046..df7ffb0 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -90,8 +90,7 @@
 	{ _XBF_DELWRI_Q,	"DELWRI_Q" }
 
 typedef enum {
-	XBT_FORCE_SLEEP = 0,
-	XBT_FORCE_FLUSH = 1,
+	XBT_FORCE_FLUSH = 0,
 } xfs_buftarg_flags_t;
 
 typedef struct xfs_buftarg {
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
index 25d7280..b4ff40b 100644
--- a/fs/xfs/xfs_dquot.c
+++ b/fs/xfs/xfs_dquot.c
@@ -39,20 +39,19 @@
 #include "xfs_qm.h"
 #include "xfs_trace.h"
 
-
 /*
-   LOCK ORDER
-
-   inode lock		    (ilock)
-   dquot hash-chain lock    (hashlock)
-   xqm dquot freelist lock  (freelistlock
-   mount's dquot list lock  (mplistlock)
-   user dquot lock - lock ordering among dquots is based on the uid or gid
-   group dquot lock - similar to udquots. Between the two dquots, the udquot
-		      has to be locked first.
-   pin lock - the dquot lock must be held to take this lock.
-   flush lock - ditto.
-*/
+ * Lock order:
+ *
+ * ip->i_lock
+ *   qh->qh_lock
+ *     qi->qi_dqlist_lock
+ *       dquot->q_qlock (xfs_dqlock() and friends)
+ *         dquot->q_flush (xfs_dqflock() and friends)
+ *         xfs_Gqm->qm_dqfrlist_lock
+ *
+ * If two dquots need to be locked the order is user before group/project,
+ * otherwise by the lowest id first, see xfs_dqlock2.
+ */
 
 #ifdef DEBUG
 xfs_buftarg_t *xfs_dqerror_target;
@@ -155,24 +154,6 @@
 }
 
 /*
- * This is what a 'fresh' dquot inside a dquot chunk looks like on disk.
- */
-STATIC void
-xfs_qm_dqinit_core(
-	xfs_dqid_t	id,
-	uint		type,
-	xfs_dqblk_t	*d)
-{
-	/*
-	 * Caller has zero'd the entire dquot 'chunk' already.
-	 */
-	d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
-	d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
-	d->dd_diskdq.d_id = cpu_to_be32(id);
-	d->dd_diskdq.d_flags = type;
-}
-
-/*
  * If default limits are in force, push them into the dquot now.
  * We overwrite the dquot limits only if they are zero and this
  * is not the root dquot.
@@ -328,8 +309,13 @@
 	curid = id - (id % q->qi_dqperchunk);
 	ASSERT(curid >= 0);
 	memset(d, 0, BBTOB(q->qi_dqchunklen));
-	for (i = 0; i < q->qi_dqperchunk; i++, d++, curid++)
-		xfs_qm_dqinit_core(curid, type, d);
+	for (i = 0; i < q->qi_dqperchunk; i++, d++, curid++) {
+		d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
+		d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
+		d->dd_diskdq.d_id = cpu_to_be32(curid);
+		d->dd_diskdq.d_flags = type;
+	}
+
 	xfs_trans_dquot_buf(tp, bp,
 			    (type & XFS_DQ_USER ? XFS_BLF_UDQUOT_BUF :
 			    ((type & XFS_DQ_PROJ) ? XFS_BLF_PDQUOT_BUF :
@@ -564,36 +550,62 @@
  * Read in the ondisk dquot using dqtobp() then copy it to an incore version,
  * and release the buffer immediately.
  *
+ * If XFS_QMOPT_DQALLOC is set, allocate a dquot on disk if it needed.
  */
-/* ARGSUSED */
-STATIC int
+int
 xfs_qm_dqread(
-	xfs_trans_t	**tpp,
-	xfs_dqid_t	id,
-	xfs_dquot_t	*dqp,	/* dquot to get filled in */
-	uint		flags)
+	struct xfs_mount	*mp,
+	xfs_dqid_t		id,
+	uint			type,
+	uint			flags,
+	struct xfs_dquot	**O_dqpp)
 {
-	xfs_disk_dquot_t *ddqp;
-	xfs_buf_t	 *bp;
-	int		 error;
-	xfs_trans_t	 *tp;
+	struct xfs_dquot	*dqp;
+	struct xfs_disk_dquot	*ddqp;
+	struct xfs_buf		*bp;
+	struct xfs_trans	*tp = NULL;
+	int			error;
+	int			cancelflags = 0;
 
-	ASSERT(tpp);
+	dqp = xfs_qm_dqinit(mp, id, type);
 
 	trace_xfs_dqread(dqp);
 
+	if (flags & XFS_QMOPT_DQALLOC) {
+		tp = xfs_trans_alloc(mp, XFS_TRANS_QM_DQALLOC);
+		error = xfs_trans_reserve(tp, XFS_QM_DQALLOC_SPACE_RES(mp),
+				XFS_WRITE_LOG_RES(mp) +
+				/*
+				 * Round the chunklen up to the next multiple
+				 * of 128 (buf log item chunk size)).
+				 */
+				BBTOB(mp->m_quotainfo->qi_dqchunklen) - 1 + 128,
+				0,
+				XFS_TRANS_PERM_LOG_RES,
+				XFS_WRITE_LOG_COUNT);
+		if (error)
+			goto error1;
+		cancelflags = XFS_TRANS_RELEASE_LOG_RES;
+	}
+
 	/*
 	 * get a pointer to the on-disk dquot and the buffer containing it
 	 * dqp already knows its own type (GROUP/USER).
 	 */
-	if ((error = xfs_qm_dqtobp(tpp, dqp, &ddqp, &bp, flags))) {
-		return (error);
+	error = xfs_qm_dqtobp(&tp, dqp, &ddqp, &bp, flags);
+	if (error) {
+		/*
+		 * This can happen if quotas got turned off (ESRCH),
+		 * or if the dquot didn't exist on disk and we ask to
+		 * allocate (ENOENT).
+		 */
+		trace_xfs_dqread_fail(dqp);
+		cancelflags |= XFS_TRANS_ABORT;
+		goto error1;
 	}
-	tp = *tpp;
 
 	/* copy everything from disk dquot to the incore dquot */
 	memcpy(&dqp->q_core, ddqp, sizeof(xfs_disk_dquot_t));
-	ASSERT(be32_to_cpu(dqp->q_core.d_id) == id);
 	xfs_qm_dquot_logitem_init(dqp);
 
 	/*
@@ -622,77 +634,22 @@
 	ASSERT(xfs_buf_islocked(bp));
 	xfs_trans_brelse(tp, bp);
 
-	return (error);
-}
-
-
-/*
- * allocate an incore dquot from the kernel heap,
- * and fill its core with quota information kept on disk.
- * If XFS_QMOPT_DQALLOC is set, it'll allocate a dquot on disk
- * if it wasn't already allocated.
- */
-STATIC int
-xfs_qm_idtodq(
-	xfs_mount_t	*mp,
-	xfs_dqid_t	id,	 /* gid or uid, depending on type */
-	uint		type,	 /* UDQUOT or GDQUOT */
-	uint		flags,	 /* DQALLOC, DQREPAIR */
-	xfs_dquot_t	**O_dqpp)/* OUT : incore dquot, not locked */
-{
-	xfs_dquot_t	*dqp;
-	int		error;
-	xfs_trans_t	*tp;
-	int		cancelflags=0;
-
-	dqp = xfs_qm_dqinit(mp, id, type);
-	tp = NULL;
-	if (flags & XFS_QMOPT_DQALLOC) {
-		tp = xfs_trans_alloc(mp, XFS_TRANS_QM_DQALLOC);
-		error = xfs_trans_reserve(tp, XFS_QM_DQALLOC_SPACE_RES(mp),
-				XFS_WRITE_LOG_RES(mp) +
-				BBTOB(mp->m_quotainfo->qi_dqchunklen) - 1 +
-				128,
-				0,
-				XFS_TRANS_PERM_LOG_RES,
-				XFS_WRITE_LOG_COUNT);
-		if (error) {
-			cancelflags = 0;
-			goto error0;
-		}
-		cancelflags = XFS_TRANS_RELEASE_LOG_RES;
-	}
-
-	/*
-	 * Read it from disk; xfs_dqread() takes care of
-	 * all the necessary initialization of dquot's fields (locks, etc)
-	 */
-	if ((error = xfs_qm_dqread(&tp, id, dqp, flags))) {
-		/*
-		 * This can happen if quotas got turned off (ESRCH),
-		 * or if the dquot didn't exist on disk and we ask to
-		 * allocate (ENOENT).
-		 */
-		trace_xfs_dqread_fail(dqp);
-		cancelflags |= XFS_TRANS_ABORT;
-		goto error0;
-	}
 	if (tp) {
-		if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES)))
-			goto error1;
+		error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
+		if (error)
+			goto error0;
 	}
 
 	*O_dqpp = dqp;
-	return (0);
+	return error;
 
- error0:
-	ASSERT(error);
+error1:
 	if (tp)
 		xfs_trans_cancel(tp, cancelflags);
- error1:
+error0:
 	xfs_qm_dqdestroy(dqp);
 	*O_dqpp = NULL;
-	return (error);
+	return error;
 }
 
 /*
@@ -710,12 +667,9 @@
 	xfs_dquot_t		**O_dqpp)
 {
 	xfs_dquot_t		*dqp;
-	uint			flist_locked;
 
 	ASSERT(mutex_is_locked(&qh->qh_lock));
 
-	flist_locked = B_FALSE;
-
 	/*
 	 * Traverse the hashchain looking for a match
 	 */
@@ -725,70 +679,31 @@
 		 * dqlock to look at the id field of the dquot, since the
 		 * id can't be modified without the hashlock anyway.
 		 */
-		if (be32_to_cpu(dqp->q_core.d_id) == id && dqp->q_mount == mp) {
-			trace_xfs_dqlookup_found(dqp);
+		if (be32_to_cpu(dqp->q_core.d_id) != id || dqp->q_mount != mp)
+			continue;
 
-			/*
-			 * All in core dquots must be on the dqlist of mp
-			 */
-			ASSERT(!list_empty(&dqp->q_mplist));
+		trace_xfs_dqlookup_found(dqp);
 
-			xfs_dqlock(dqp);
-			if (dqp->q_nrefs == 0) {
-				ASSERT(!list_empty(&dqp->q_freelist));
-				if (!mutex_trylock(&xfs_Gqm->qm_dqfrlist_lock)) {
-					trace_xfs_dqlookup_want(dqp);
-
-					/*
-					 * We may have raced with dqreclaim_one()
-					 * (and lost). So, flag that we don't
-					 * want the dquot to be reclaimed.
-					 */
-					dqp->dq_flags |= XFS_DQ_WANT;
-					xfs_dqunlock(dqp);
-					mutex_lock(&xfs_Gqm->qm_dqfrlist_lock);
-					xfs_dqlock(dqp);
-					dqp->dq_flags &= ~(XFS_DQ_WANT);
-				}
-				flist_locked = B_TRUE;
-			}
-
-			/*
-			 * id couldn't have changed; we had the hashlock all
-			 * along
-			 */
-			ASSERT(be32_to_cpu(dqp->q_core.d_id) == id);
-
-			if (flist_locked) {
-				if (dqp->q_nrefs != 0) {
-					mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
-					flist_locked = B_FALSE;
-				} else {
-					/* take it off the freelist */
-					trace_xfs_dqlookup_freelist(dqp);
-					list_del_init(&dqp->q_freelist);
-					xfs_Gqm->qm_dqfrlist_cnt--;
-				}
-			}
-
-			XFS_DQHOLD(dqp);
-
-			if (flist_locked)
-				mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
-			/*
-			 * move the dquot to the front of the hashchain
-			 */
-			ASSERT(mutex_is_locked(&qh->qh_lock));
-			list_move(&dqp->q_hashlist, &qh->qh_list);
-			trace_xfs_dqlookup_done(dqp);
-			*O_dqpp = dqp;
-			return 0;
+		xfs_dqlock(dqp);
+		if (dqp->dq_flags & XFS_DQ_FREEING) {
+			*O_dqpp = NULL;
+			xfs_dqunlock(dqp);
+			return -1;
 		}
+
+		dqp->q_nrefs++;
+
+		/*
+		 * move the dquot to the front of the hashchain
+		 */
+		list_move(&dqp->q_hashlist, &qh->qh_list);
+		trace_xfs_dqlookup_done(dqp);
+		*O_dqpp = dqp;
+		return 0;
 	}
 
 	*O_dqpp = NULL;
-	ASSERT(mutex_is_locked(&qh->qh_lock));
-	return (1);
+	return 1;
 }
 
 /*
@@ -829,11 +744,7 @@
 			return (EIO);
 		}
 	}
-#endif
 
- again:
-
-#ifdef DEBUG
 	ASSERT(type == XFS_DQ_USER ||
 	       type == XFS_DQ_PROJ ||
 	       type == XFS_DQ_GROUP);
@@ -845,13 +756,21 @@
 			ASSERT(ip->i_gdquot == NULL);
 	}
 #endif
+
+restart:
 	mutex_lock(&h->qh_lock);
 
 	/*
 	 * Look in the cache (hashtable).
 	 * The chain is kept locked during lookup.
 	 */
-	if (xfs_qm_dqlookup(mp, id, h, O_dqpp) == 0) {
+	switch (xfs_qm_dqlookup(mp, id, h, O_dqpp)) {
+	case -1:
+		XQM_STATS_INC(xqmstats.xs_qm_dquot_dups);
+		mutex_unlock(&h->qh_lock);
+		delay(1);
+		goto restart;
+	case 0:
 		XQM_STATS_INC(xqmstats.xs_qm_dqcachehits);
 		/*
 		 * The dquot was found, moved to the front of the chain,
@@ -862,9 +781,11 @@
 		ASSERT(XFS_DQ_IS_LOCKED(*O_dqpp));
 		mutex_unlock(&h->qh_lock);
 		trace_xfs_dqget_hit(*O_dqpp);
-		return (0);	/* success */
+		return 0;	/* success */
+	default:
+		XQM_STATS_INC(xqmstats.xs_qm_dqcachemisses);
+		break;
 	}
-	XQM_STATS_INC(xqmstats.xs_qm_dqcachemisses);
 
 	/*
 	 * Dquot cache miss. We don't want to keep the inode lock across
@@ -882,41 +803,18 @@
 	version = h->qh_version;
 	mutex_unlock(&h->qh_lock);
 
-	/*
-	 * Allocate the dquot on the kernel heap, and read the ondisk
-	 * portion off the disk. Also, do all the necessary initialization
-	 * This can return ENOENT if dquot didn't exist on disk and we didn't
-	 * ask it to allocate; ESRCH if quotas got turned off suddenly.
-	 */
-	if ((error = xfs_qm_idtodq(mp, id, type,
-				  flags & (XFS_QMOPT_DQALLOC|XFS_QMOPT_DQREPAIR|
-					   XFS_QMOPT_DOWARN),
-				  &dqp))) {
-		if (ip)
-			xfs_ilock(ip, XFS_ILOCK_EXCL);
-		return (error);
-	}
+	error = xfs_qm_dqread(mp, id, type, flags, &dqp);
 
-	/*
-	 * See if this is mount code calling to look at the overall quota limits
-	 * which are stored in the id == 0 user or group's dquot.
-	 * Since we may not have done a quotacheck by this point, just return
-	 * the dquot without attaching it to any hashtables, lists, etc, or even
-	 * taking a reference.
-	 * The caller must dqdestroy this once done.
-	 */
-	if (flags & XFS_QMOPT_DQSUSER) {
-		ASSERT(id == 0);
-		ASSERT(! ip);
-		goto dqret;
-	}
+	if (ip)
+		xfs_ilock(ip, XFS_ILOCK_EXCL);
+
+	if (error)
+		return error;
 
 	/*
 	 * Dquot lock comes after hashlock in the lock ordering
 	 */
 	if (ip) {
-		xfs_ilock(ip, XFS_ILOCK_EXCL);
-
 		/*
 		 * A dquot could be attached to this inode by now, since
 		 * we had dropped the ilock.
@@ -961,16 +859,21 @@
 		 * lock order between the two dquots here since dqp isn't
 		 * on any findable lists yet.
 		 */
-		if (xfs_qm_dqlookup(mp, id, h, &tmpdqp) == 0) {
+		switch (xfs_qm_dqlookup(mp, id, h, &tmpdqp)) {
+		case 0:
+		case -1:
 			/*
-			 * Duplicate found. Just throw away the new dquot
-			 * and start over.
+			 * Duplicate found, either in cache or on its way out.
+			 * Just throw away the new dquot and start over.
 			 */
-			xfs_qm_dqput(tmpdqp);
+			if (tmpdqp)
+				xfs_qm_dqput(tmpdqp);
 			mutex_unlock(&h->qh_lock);
 			xfs_qm_dqdestroy(dqp);
 			XQM_STATS_INC(xqmstats.xs_qm_dquot_dups);
-			goto again;
+			goto restart;
+		default:
+			break;
 		}
 	}
 
@@ -1015,67 +918,49 @@
  */
 void
 xfs_qm_dqput(
-	xfs_dquot_t	*dqp)
+	struct xfs_dquot	*dqp)
 {
-	xfs_dquot_t	*gdqp;
+	struct xfs_dquot	*gdqp;
 
 	ASSERT(dqp->q_nrefs > 0);
 	ASSERT(XFS_DQ_IS_LOCKED(dqp));
 
 	trace_xfs_dqput(dqp);
 
-	if (dqp->q_nrefs != 1) {
-		dqp->q_nrefs--;
+recurse:
+	if (--dqp->q_nrefs > 0) {
 		xfs_dqunlock(dqp);
 		return;
 	}
 
-	/*
-	 * drop the dqlock and acquire the freelist and dqlock
-	 * in the right order; but try to get it out-of-order first
-	 */
-	if (!mutex_trylock(&xfs_Gqm->qm_dqfrlist_lock)) {
-		trace_xfs_dqput_wait(dqp);
-		xfs_dqunlock(dqp);
-		mutex_lock(&xfs_Gqm->qm_dqfrlist_lock);
-		xfs_dqlock(dqp);
-	}
+	trace_xfs_dqput_free(dqp);
 
-	while (1) {
-		gdqp = NULL;
-
-		/* We can't depend on nrefs being == 1 here */
-		if (--dqp->q_nrefs == 0) {
-			trace_xfs_dqput_free(dqp);
-
-			list_add_tail(&dqp->q_freelist, &xfs_Gqm->qm_dqfrlist);
-			xfs_Gqm->qm_dqfrlist_cnt++;
-
-			/*
-			 * If we just added a udquot to the freelist, then
-			 * we want to release the gdquot reference that
-			 * it (probably) has. Otherwise it'll keep the
-			 * gdquot from getting reclaimed.
-			 */
-			if ((gdqp = dqp->q_gdquot)) {
-				/*
-				 * Avoid a recursive dqput call
-				 */
-				xfs_dqlock(gdqp);
-				dqp->q_gdquot = NULL;
-			}
-		}
-		xfs_dqunlock(dqp);
-
-		/*
-		 * If we had a group quota inside the user quota as a hint,
-		 * release it now.
-		 */
-		if (! gdqp)
-			break;
-		dqp = gdqp;
+	mutex_lock(&xfs_Gqm->qm_dqfrlist_lock);
+	if (list_empty(&dqp->q_freelist)) {
+		list_add_tail(&dqp->q_freelist, &xfs_Gqm->qm_dqfrlist);
+		xfs_Gqm->qm_dqfrlist_cnt++;
 	}
 	mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
+
+	/*
+	 * If we just added a udquot to the freelist, then we want to release
+	 * the gdquot reference that it (probably) has. Otherwise it'll keep
+	 * the gdquot from getting reclaimed.
+	 */
+	gdqp = dqp->q_gdquot;
+	if (gdqp) {
+		xfs_dqlock(gdqp);
+		dqp->q_gdquot = NULL;
+	}
+	xfs_dqunlock(dqp);
+
+	/*
+	 * If we had a group quota hint, release it now.
+	 */
+	if (gdqp) {
+		dqp = gdqp;
+		goto recurse;
+	}
 }
 
 /*
@@ -1169,7 +1054,7 @@
 	 * If not dirty, or it's pinned and we are not supposed to block, nada.
 	 */
 	if (!XFS_DQ_IS_DIRTY(dqp) ||
-	    (!(flags & SYNC_WAIT) && atomic_read(&dqp->q_pincount) > 0)) {
+	    ((flags & SYNC_TRYLOCK) && atomic_read(&dqp->q_pincount) > 0)) {
 		xfs_dqfunlock(dqp);
 		return 0;
 	}
@@ -1257,40 +1142,17 @@
 
 }
 
-int
-xfs_qm_dqlock_nowait(
-	xfs_dquot_t *dqp)
-{
-	return mutex_trylock(&dqp->q_qlock);
-}
-
-void
-xfs_dqlock(
-	xfs_dquot_t *dqp)
-{
-	mutex_lock(&dqp->q_qlock);
-}
-
 void
 xfs_dqunlock(
 	xfs_dquot_t *dqp)
 {
-	mutex_unlock(&(dqp->q_qlock));
+	xfs_dqunlock_nonotify(dqp);
 	if (dqp->q_logitem.qli_dquot == dqp) {
-		/* Once was dqp->q_mount, but might just have been cleared */
 		xfs_trans_unlocked_item(dqp->q_logitem.qli_item.li_ailp,
-					(xfs_log_item_t*)&(dqp->q_logitem));
+					&dqp->q_logitem.qli_item);
 	}
 }
 
-
-void
-xfs_dqunlock_nonotify(
-	xfs_dquot_t *dqp)
-{
-	mutex_unlock(&(dqp->q_qlock));
-}
-
 /*
  * Lock two xfs_dquot structures.
  *
@@ -1319,43 +1181,18 @@
 	}
 }
 
-
 /*
- * Take a dquot out of the mount's dqlist as well as the hashlist.
- * This is called via unmount as well as quotaoff, and the purge
- * will always succeed unless there are soft (temp) references
- * outstanding.
- *
- * This returns 0 if it was purged, 1 if it wasn't. It's not an error code
- * that we're returning! XXXsup - not cool.
+ * Take a dquot out of the mount's dqlist as well as the hashlist.  This is
+ * called via unmount as well as quotaoff, and the purge will always succeed.
  */
-/* ARGSUSED */
-int
+void
 xfs_qm_dqpurge(
-	xfs_dquot_t	*dqp)
+	struct xfs_dquot	*dqp)
 {
-	xfs_dqhash_t	*qh = dqp->q_hash;
-	xfs_mount_t	*mp = dqp->q_mount;
-
-	ASSERT(mutex_is_locked(&mp->m_quotainfo->qi_dqlist_lock));
-	ASSERT(mutex_is_locked(&dqp->q_hash->qh_lock));
+	struct xfs_mount	*mp = dqp->q_mount;
+	struct xfs_dqhash	*qh = dqp->q_hash;
 
 	xfs_dqlock(dqp);
-	/*
-	 * We really can't afford to purge a dquot that is
-	 * referenced, because these are hard refs.
-	 * It shouldn't happen in general because we went thru _all_ inodes in
-	 * dqrele_all_inodes before calling this and didn't let the mountlock go.
-	 * However it is possible that we have dquots with temporary
-	 * references that are not attached to an inode. e.g. see xfs_setattr().
-	 */
-	if (dqp->q_nrefs != 0) {
-		xfs_dqunlock(dqp);
-		mutex_unlock(&dqp->q_hash->qh_lock);
-		return (1);
-	}
-
-	ASSERT(!list_empty(&dqp->q_freelist));
 
 	/*
 	 * If we're turning off quotas, we have to make sure that, for
@@ -1370,23 +1207,18 @@
 		 * Block on the flush lock after nudging dquot buffer,
 		 * if it is incore.
 		 */
-		xfs_qm_dqflock_pushbuf_wait(dqp);
+		xfs_dqflock_pushbuf_wait(dqp);
 	}
 
 	/*
-	 * XXXIf we're turning this type of quotas off, we don't care
+	 * If we are turning this type of quotas off, we don't care
 	 * about the dirty metadata sitting in this dquot. OTOH, if
 	 * we're unmounting, we do care, so we flush it and wait.
 	 */
 	if (XFS_DQ_IS_DIRTY(dqp)) {
 		int	error;
 
-		/* dqflush unlocks dqflock */
 		/*
-		 * Given that dqpurge is a very rare occurrence, it is OK
-		 * that we're holding the hashlist and mplist locks
-		 * across the disk write. But, ... XXXsup
-		 *
 		 * We don't care about getting disk errors here. We need
 		 * to purge this dquot anyway, so we go ahead regardless.
 		 */
@@ -1396,38 +1228,44 @@
 				__func__, dqp);
 		xfs_dqflock(dqp);
 	}
+
 	ASSERT(atomic_read(&dqp->q_pincount) == 0);
 	ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
 	       !(dqp->q_logitem.qli_item.li_flags & XFS_LI_IN_AIL));
 
+	xfs_dqfunlock(dqp);
+	xfs_dqunlock(dqp);
+
+	mutex_lock(&qh->qh_lock);
 	list_del_init(&dqp->q_hashlist);
 	qh->qh_version++;
+	mutex_unlock(&qh->qh_lock);
+
+	mutex_lock(&mp->m_quotainfo->qi_dqlist_lock);
 	list_del_init(&dqp->q_mplist);
 	mp->m_quotainfo->qi_dqreclaims++;
 	mp->m_quotainfo->qi_dquots--;
+	mutex_unlock(&mp->m_quotainfo->qi_dqlist_lock);
+
 	/*
-	 * XXX Move this to the front of the freelist, if we can get the
-	 * freelist lock.
+	 * We move dquots to the freelist as soon as their reference count
+	 * hits zero, so it really should be on the freelist here.
 	 */
+	mutex_lock(&xfs_Gqm->qm_dqfrlist_lock);
 	ASSERT(!list_empty(&dqp->q_freelist));
+	list_del_init(&dqp->q_freelist);
+	xfs_Gqm->qm_dqfrlist_cnt--;
+	mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
 
-	dqp->q_mount = NULL;
-	dqp->q_hash = NULL;
-	dqp->dq_flags = XFS_DQ_INACTIVE;
-	memset(&dqp->q_core, 0, sizeof(dqp->q_core));
-	xfs_dqfunlock(dqp);
-	xfs_dqunlock(dqp);
-	mutex_unlock(&qh->qh_lock);
-	return (0);
+	xfs_qm_dqdestroy(dqp);
 }
 
-
 /*
  * Give the buffer a little push if it is incore and
  * wait on the flush lock.
  */
 void
-xfs_qm_dqflock_pushbuf_wait(
+xfs_dqflock_pushbuf_wait(
 	xfs_dquot_t	*dqp)
 {
 	xfs_mount_t	*mp = dqp->q_mount;
diff --git a/fs/xfs/xfs_dquot.h b/fs/xfs/xfs_dquot.h
index 34b7e94..a1d91d8 100644
--- a/fs/xfs/xfs_dquot.h
+++ b/fs/xfs/xfs_dquot.h
@@ -80,8 +80,6 @@
 	XFS_QLOCK_NESTED,
 };
 
-#define XFS_DQHOLD(dqp)		((dqp)->q_nrefs++)
-
 /*
  * Manage the q_flush completion queue embedded in the dquot.  This completion
  * queue synchronizes processes attempting to flush the in-core dquot back to
@@ -102,6 +100,21 @@
 	complete(&dqp->q_flush);
 }
 
+static inline int xfs_dqlock_nowait(struct xfs_dquot *dqp)
+{
+	return mutex_trylock(&dqp->q_qlock);
+}
+
+static inline void xfs_dqlock(struct xfs_dquot *dqp)
+{
+	mutex_lock(&dqp->q_qlock);
+}
+
+static inline void xfs_dqunlock_nonotify(struct xfs_dquot *dqp)
+{
+	mutex_unlock(&dqp->q_qlock);
+}
+
 #define XFS_DQ_IS_LOCKED(dqp)	(mutex_is_locked(&((dqp)->q_qlock)))
 #define XFS_DQ_IS_DIRTY(dqp)	((dqp)->dq_flags & XFS_DQ_DIRTY)
 #define XFS_QM_ISUDQ(dqp)	((dqp)->dq_flags & XFS_DQ_USER)
@@ -116,12 +129,12 @@
 				     (XFS_IS_UQUOTA_ON((d)->q_mount)) : \
 				     (XFS_IS_OQUOTA_ON((d)->q_mount))))
 
+extern int		xfs_qm_dqread(struct xfs_mount *, xfs_dqid_t, uint,
+					uint, struct xfs_dquot	**);
 extern void		xfs_qm_dqdestroy(xfs_dquot_t *);
 extern int		xfs_qm_dqflush(xfs_dquot_t *, uint);
-extern int		xfs_qm_dqpurge(xfs_dquot_t *);
+extern void		xfs_qm_dqpurge(xfs_dquot_t *);
 extern void		xfs_qm_dqunpin_wait(xfs_dquot_t *);
-extern int		xfs_qm_dqlock_nowait(xfs_dquot_t *);
-extern void		xfs_qm_dqflock_pushbuf_wait(xfs_dquot_t *dqp);
 extern void		xfs_qm_adjust_dqtimers(xfs_mount_t *,
 					xfs_disk_dquot_t *);
 extern void		xfs_qm_adjust_dqlimits(xfs_mount_t *,
@@ -129,9 +142,17 @@
 extern int		xfs_qm_dqget(xfs_mount_t *, xfs_inode_t *,
 					xfs_dqid_t, uint, uint, xfs_dquot_t **);
 extern void		xfs_qm_dqput(xfs_dquot_t *);
-extern void		xfs_dqlock(xfs_dquot_t *);
-extern void		xfs_dqlock2(xfs_dquot_t *, xfs_dquot_t *);
-extern void		xfs_dqunlock(xfs_dquot_t *);
-extern void		xfs_dqunlock_nonotify(xfs_dquot_t *);
+
+extern void		xfs_dqlock2(struct xfs_dquot *, struct xfs_dquot *);
+extern void		xfs_dqunlock(struct xfs_dquot *);
+extern void		xfs_dqflock_pushbuf_wait(struct xfs_dquot *dqp);
+
+static inline struct xfs_dquot *xfs_qm_dqhold(struct xfs_dquot *dqp)
+{
+	xfs_dqlock(dqp);
+	dqp->q_nrefs++;
+	xfs_dqunlock(dqp);
+	return dqp;
+}
 
 #endif /* __XFS_DQUOT_H__ */
diff --git a/fs/xfs/xfs_dquot_item.c b/fs/xfs/xfs_dquot_item.c
index 0dee0b7..34baeae 100644
--- a/fs/xfs/xfs_dquot_item.c
+++ b/fs/xfs/xfs_dquot_item.c
@@ -73,7 +73,6 @@
 	logvec->i_len  = sizeof(xfs_disk_dquot_t);
 	logvec->i_type = XLOG_REG_TYPE_DQUOT;
 
-	ASSERT(2 == lip->li_desc->lid_size);
 	qlip->qli_format.qlf_size = 2;
 
 }
@@ -134,7 +133,7 @@
 	 * lock without sleeping, then there must not have been
 	 * anyone in the process of flushing the dquot.
 	 */
-	error = xfs_qm_dqflush(dqp, 0);
+	error = xfs_qm_dqflush(dqp, SYNC_TRYLOCK);
 	if (error)
 		xfs_warn(dqp->q_mount, "%s: push error %d on dqp %p",
 			__func__, error, dqp);
@@ -237,7 +236,7 @@
 	if (atomic_read(&dqp->q_pincount) > 0)
 		return XFS_ITEM_PINNED;
 
-	if (!xfs_qm_dqlock_nowait(dqp))
+	if (!xfs_dqlock_nowait(dqp))
 		return XFS_ITEM_LOCKED;
 
 	if (!xfs_dqflock_nowait(dqp)) {
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 753ed9b..f675f3d 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -209,10 +209,10 @@
 
 	/*
 	 * First check if the VFS inode is marked dirty.  All the dirtying
-	 * of non-transactional updates no goes through mark_inode_dirty*,
-	 * which allows us to distinguish beteeen pure timestamp updates
+	 * of non-transactional updates do not go through mark_inode_dirty*,
+	 * which allows us to distinguish between pure timestamp updates
 	 * and i_size updates which need to be caught for fdatasync.
-	 * After that also theck for the dirty state in the XFS inode, which
+	 * After that also check for the dirty state in the XFS inode, which
 	 * might gets cleared when the inode gets written out via the AIL
 	 * or xfs_iflush_cluster.
 	 */
diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c
index 169380e..dad1a31 100644
--- a/fs/xfs/xfs_ialloc.c
+++ b/fs/xfs/xfs_ialloc.c
@@ -447,7 +447,7 @@
 xfs_ialloc_ag_select(
 	xfs_trans_t	*tp,		/* transaction pointer */
 	xfs_ino_t	parent,		/* parent directory inode number */
-	mode_t		mode,		/* bits set to indicate file type */
+	umode_t		mode,		/* bits set to indicate file type */
 	int		okalloc)	/* ok to allocate more space */
 {
 	xfs_buf_t	*agbp;		/* allocation group header buffer */
@@ -640,7 +640,7 @@
 xfs_dialloc(
 	xfs_trans_t	*tp,		/* transaction pointer */
 	xfs_ino_t	parent,		/* parent inode (directory) */
-	mode_t		mode,		/* mode bits for new inode */
+	umode_t		mode,		/* mode bits for new inode */
 	int		okalloc,	/* ok to allocate more space */
 	xfs_buf_t	**IO_agbp,	/* in/out ag header's buffer */
 	boolean_t	*alloc_done,	/* true if we needed to replenish
diff --git a/fs/xfs/xfs_ialloc.h b/fs/xfs/xfs_ialloc.h
index bb53854..666a037 100644
--- a/fs/xfs/xfs_ialloc.h
+++ b/fs/xfs/xfs_ialloc.h
@@ -81,7 +81,7 @@
 xfs_dialloc(
 	struct xfs_trans *tp,		/* transaction pointer */
 	xfs_ino_t	parent,		/* parent inode (directory) */
-	mode_t		mode,		/* mode bits for new inode */
+	umode_t		mode,		/* mode bits for new inode */
 	int		okalloc,	/* ok to allocate more space */
 	struct xfs_buf	**agbp,		/* buf for a.g. inode header */
 	boolean_t	*alloc_done,	/* an allocation was done to replenish
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
index 0fa98b1..3960a06 100644
--- a/fs/xfs/xfs_iget.c
+++ b/fs/xfs/xfs_iget.c
@@ -107,7 +107,6 @@
 	struct inode		*inode = container_of(head, struct inode, i_rcu);
 	struct xfs_inode	*ip = XFS_I(inode);
 
-	INIT_LIST_HEAD(&inode->i_dentry);
 	kmem_zone_free(xfs_inode_zone, ip);
 }
 
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 755ee81..9dda7cc 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -961,7 +961,7 @@
 xfs_ialloc(
 	xfs_trans_t	*tp,
 	xfs_inode_t	*pip,
-	mode_t		mode,
+	umode_t		mode,
 	xfs_nlink_t	nlink,
 	xfs_dev_t	rdev,
 	prid_t		prid,
@@ -1002,7 +1002,7 @@
 		return error;
 	ASSERT(ip != NULL);
 
-	ip->i_d.di_mode = (__uint16_t)mode;
+	ip->i_d.di_mode = mode;
 	ip->i_d.di_onlink = 0;
 	ip->i_d.di_nlink = nlink;
 	ASSERT(ip->i_d.di_nlink == nlink);
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index b4cd473..f0e6b15 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -481,7 +481,7 @@
 /*
  * xfs_inode.c prototypes.
  */
-int		xfs_ialloc(struct xfs_trans *, xfs_inode_t *, mode_t,
+int		xfs_ialloc(struct xfs_trans *, xfs_inode_t *, umode_t,
 			   xfs_nlink_t, xfs_dev_t, prid_t, int,
 			   struct xfs_buf **, boolean_t *, xfs_inode_t **);
 
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index abaafdb..cfd6c7f 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -437,7 +437,6 @@
 	 * Assert that no attribute-related log flags are set.
 	 */
 	if (!XFS_IFORK_Q(ip)) {
-		ASSERT(nvecs == lip->li_desc->lid_size);
 		iip->ili_format.ilf_size = nvecs;
 		ASSERT(!(iip->ili_format.ilf_fields &
 			 (XFS_ILOG_ADATA | XFS_ILOG_ABROOT | XFS_ILOG_AEXT)));
@@ -521,7 +520,6 @@
 		break;
 	}
 
-	ASSERT(nvecs == lip->li_desc->lid_size);
 	iip->ili_format.ilf_size = nvecs;
 }
 
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index d99a905..76f3ca5 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -559,23 +559,23 @@
 					ops[i].am_flags);
 			break;
 		case ATTR_OP_SET:
-			ops[i].am_error = mnt_want_write(parfilp->f_path.mnt);
+			ops[i].am_error = mnt_want_write_file(parfilp);
 			if (ops[i].am_error)
 				break;
 			ops[i].am_error = xfs_attrmulti_attr_set(
 					dentry->d_inode, attr_name,
 					ops[i].am_attrvalue, ops[i].am_length,
 					ops[i].am_flags);
-			mnt_drop_write(parfilp->f_path.mnt);
+			mnt_drop_write_file(parfilp);
 			break;
 		case ATTR_OP_REMOVE:
-			ops[i].am_error = mnt_want_write(parfilp->f_path.mnt);
+			ops[i].am_error = mnt_want_write_file(parfilp);
 			if (ops[i].am_error)
 				break;
 			ops[i].am_error = xfs_attrmulti_attr_remove(
 					dentry->d_inode, attr_name,
 					ops[i].am_flags);
-			mnt_drop_write(parfilp->f_path.mnt);
+			mnt_drop_write_file(parfilp);
 			break;
 		default:
 			ops[i].am_error = EINVAL;
diff --git a/fs/xfs/xfs_ioctl32.c b/fs/xfs/xfs_ioctl32.c
index 54e623b..f9ccb7b 100644
--- a/fs/xfs/xfs_ioctl32.c
+++ b/fs/xfs/xfs_ioctl32.c
@@ -454,23 +454,23 @@
 					&ops[i].am_length, ops[i].am_flags);
 			break;
 		case ATTR_OP_SET:
-			ops[i].am_error = mnt_want_write(parfilp->f_path.mnt);
+			ops[i].am_error = mnt_want_write_file(parfilp);
 			if (ops[i].am_error)
 				break;
 			ops[i].am_error = xfs_attrmulti_attr_set(
 					dentry->d_inode, attr_name,
 					compat_ptr(ops[i].am_attrvalue),
 					ops[i].am_length, ops[i].am_flags);
-			mnt_drop_write(parfilp->f_path.mnt);
+			mnt_drop_write_file(parfilp);
 			break;
 		case ATTR_OP_REMOVE:
-			ops[i].am_error = mnt_want_write(parfilp->f_path.mnt);
+			ops[i].am_error = mnt_want_write_file(parfilp);
 			if (ops[i].am_error)
 				break;
 			ops[i].am_error = xfs_attrmulti_attr_remove(
 					dentry->d_inode, attr_name,
 					ops[i].am_flags);
-			mnt_drop_write(parfilp->f_path.mnt);
+			mnt_drop_write_file(parfilp);
 			break;
 		default:
 			ops[i].am_error = EINVAL;
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index 23ce927..f9babd1 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -168,7 +168,7 @@
 xfs_vn_mknod(
 	struct inode	*dir,
 	struct dentry	*dentry,
-	int		mode,
+	umode_t		mode,
 	dev_t		rdev)
 {
 	struct inode	*inode;
@@ -231,7 +231,7 @@
 xfs_vn_create(
 	struct inode	*dir,
 	struct dentry	*dentry,
-	int		mode,
+	umode_t		mode,
 	struct nameidata *nd)
 {
 	return xfs_vn_mknod(dir, dentry, mode, 0);
@@ -241,7 +241,7 @@
 xfs_vn_mkdir(
 	struct inode	*dir,
 	struct dentry	*dentry,
-	int		mode)
+	umode_t		mode)
 {
 	return xfs_vn_mknod(dir, dentry, mode|S_IFDIR, 0);
 }
@@ -366,7 +366,7 @@
 	struct xfs_inode *cip = NULL;
 	struct xfs_name	name;
 	int		error;
-	mode_t		mode;
+	umode_t		mode;
 
 	mode = S_IFLNK |
 		(irix_symlink_mode ? 0777 & ~current_umask() : S_IRWXUGO);
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 34817ad..e2cc356 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -760,38 +760,6 @@
 	INIT_LIST_HEAD(&item->li_cil);
 }
 
-/*
- * Write region vectors to log.  The write happens using the space reservation
- * of the ticket (tic).  It is not a requirement that all writes for a given
- * transaction occur with one call to xfs_log_write(). However, it is important
- * to note that the transaction reservation code makes an assumption about the
- * number of log headers a transaction requires that may be violated if you
- * don't pass all the transaction vectors in one call....
- */
-int
-xfs_log_write(
-	struct xfs_mount	*mp,
-	struct xfs_log_iovec	reg[],
-	int			nentries,
-	struct xlog_ticket	*tic,
-	xfs_lsn_t		*start_lsn)
-{
-	struct log		*log = mp->m_log;
-	int			error;
-	struct xfs_log_vec	vec = {
-		.lv_niovecs = nentries,
-		.lv_iovecp = reg,
-	};
-
-	if (XLOG_FORCED_SHUTDOWN(log))
-		return XFS_ERROR(EIO);
-
-	error = xlog_write(log, &vec, tic, start_lsn, NULL, 0);
-	if (error)
-		xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR);
-	return error;
-}
-
 void
 xfs_log_move_tail(xfs_mount_t	*mp,
 		  xfs_lsn_t	tail_lsn)
@@ -1685,7 +1653,7 @@
 	};
 
 	xfs_warn(mp,
-		"xfs_log_write: reservation summary:\n"
+		"xlog_write: reservation summary:\n"
 		"  trans type  = %s (%u)\n"
 		"  unit res    = %d bytes\n"
 		"  current res = %d bytes\n"
@@ -1714,7 +1682,7 @@
 	}
 
 	xfs_alert_tag(mp, XFS_PTAG_LOGRES,
-		"xfs_log_write: reservation ran out. Need to up reservation");
+		"xlog_write: reservation ran out. Need to up reservation");
 	xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
 }
 
@@ -1968,23 +1936,21 @@
 	*start_lsn = 0;
 
 	len = xlog_write_calc_vec_length(ticket, log_vector);
-	if (log->l_cilp) {
-		/*
-		 * Region headers and bytes are already accounted for.
-		 * We only need to take into account start records and
-		 * split regions in this function.
-		 */
-		if (ticket->t_flags & XLOG_TIC_INITED)
-			ticket->t_curr_res -= sizeof(xlog_op_header_t);
 
-		/*
-		 * Commit record headers need to be accounted for. These
-		 * come in as separate writes so are easy to detect.
-		 */
-		if (flags & (XLOG_COMMIT_TRANS | XLOG_UNMOUNT_TRANS))
-			ticket->t_curr_res -= sizeof(xlog_op_header_t);
-	} else
-		ticket->t_curr_res -= len;
+	/*
+	 * Region headers and bytes are already accounted for.
+	 * We only need to take into account start records and
+	 * split regions in this function.
+	 */
+	if (ticket->t_flags & XLOG_TIC_INITED)
+		ticket->t_curr_res -= sizeof(xlog_op_header_t);
+
+	/*
+	 * Commit record headers need to be accounted for. These
+	 * come in as separate writes so are easy to detect.
+	 */
+	if (flags & (XLOG_COMMIT_TRANS | XLOG_UNMOUNT_TRANS))
+		ticket->t_curr_res -= sizeof(xlog_op_header_t);
 
 	if (ticket->t_curr_res < 0)
 		xlog_print_tic_res(log->l_mp, ticket);
@@ -2931,8 +2897,7 @@
 
 	XFS_STATS_INC(xs_log_force);
 
-	if (log->l_cilp)
-		xlog_cil_force(log);
+	xlog_cil_force(log);
 
 	spin_lock(&log->l_icloglock);
 
@@ -3081,11 +3046,9 @@
 
 	XFS_STATS_INC(xs_log_force);
 
-	if (log->l_cilp) {
-		lsn = xlog_cil_force_lsn(log, lsn);
-		if (lsn == NULLCOMMITLSN)
-			return 0;
-	}
+	lsn = xlog_cil_force_lsn(log, lsn);
+	if (lsn == NULLCOMMITLSN)
+		return 0;
 
 try_again:
 	spin_lock(&log->l_icloglock);
@@ -3653,7 +3616,7 @@
 	 * completed transactions are flushed to disk with the xfs_log_force()
 	 * call below.
 	 */
-	if (!logerror && (mp->m_flags & XFS_MOUNT_DELAYLOG))
+	if (!logerror)
 		xlog_cil_force(log);
 
 	/*
diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h
index 3f7bf45..2aee3b2 100644
--- a/fs/xfs/xfs_log.h
+++ b/fs/xfs/xfs_log.h
@@ -174,11 +174,6 @@
 			  __uint8_t	   clientid,
 			  uint		   flags,
 			  uint		   t_type);
-int	  xfs_log_write(struct xfs_mount *mp,
-			xfs_log_iovec_t  region[],
-			int		 nentries,
-			struct xlog_ticket *ticket,
-			xfs_lsn_t	 *start_lsn);
 int	  xfs_log_unmount_write(struct xfs_mount *mp);
 void      xfs_log_unmount(struct xfs_mount *mp);
 int	  xfs_log_force_umount(struct xfs_mount *mp, int logerror);
@@ -189,8 +184,7 @@
 struct xlog_ticket *xfs_log_ticket_get(struct xlog_ticket *ticket);
 void	  xfs_log_ticket_put(struct xlog_ticket *ticket);
 
-void	xfs_log_commit_cil(struct xfs_mount *mp, struct xfs_trans *tp,
-				struct xfs_log_vec *log_vector,
+int	xfs_log_commit_cil(struct xfs_mount *mp, struct xfs_trans *tp,
 				xfs_lsn_t *commit_lsn, int flags);
 bool	xfs_log_item_in_current_chkpt(struct xfs_log_item *lip);
 
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
index c7755d5..d4fadbe 100644
--- a/fs/xfs/xfs_log_cil.c
+++ b/fs/xfs/xfs_log_cil.c
@@ -32,10 +32,7 @@
 #include "xfs_discard.h"
 
 /*
- * Perform initial CIL structure initialisation. If the CIL is not
- * enabled in this filesystem, ensure the log->l_cilp is null so
- * we can check this conditional to determine if we are doing delayed
- * logging or not.
+ * Perform initial CIL structure initialisation.
  */
 int
 xlog_cil_init(
@@ -44,10 +41,6 @@
 	struct xfs_cil	*cil;
 	struct xfs_cil_ctx *ctx;
 
-	log->l_cilp = NULL;
-	if (!(log->l_mp->m_flags & XFS_MOUNT_DELAYLOG))
-		return 0;
-
 	cil = kmem_zalloc(sizeof(*cil), KM_SLEEP|KM_MAYFAIL);
 	if (!cil)
 		return ENOMEM;
@@ -80,9 +73,6 @@
 xlog_cil_destroy(
 	struct log	*log)
 {
-	if (!log->l_cilp)
-		return;
-
 	if (log->l_cilp->xc_ctx) {
 		if (log->l_cilp->xc_ctx->ticket)
 			xfs_log_ticket_put(log->l_cilp->xc_ctx->ticket);
@@ -137,9 +127,6 @@
 xlog_cil_init_post_recovery(
 	struct log	*log)
 {
-	if (!log->l_cilp)
-		return;
-
 	log->l_cilp->xc_ctx->ticket = xlog_cil_ticket_alloc(log);
 	log->l_cilp->xc_ctx->sequence = 1;
 	log->l_cilp->xc_ctx->commit_lsn = xlog_assign_lsn(log->l_curr_cycle,
@@ -172,37 +159,73 @@
  * format the regions into the iclog as though they are being formatted
  * directly out of the objects themselves.
  */
-static void
-xlog_cil_format_items(
-	struct log		*log,
-	struct xfs_log_vec	*log_vector)
+static struct xfs_log_vec *
+xlog_cil_prepare_log_vecs(
+	struct xfs_trans	*tp)
 {
-	struct xfs_log_vec *lv;
+	struct xfs_log_item_desc *lidp;
+	struct xfs_log_vec	*lv = NULL;
+	struct xfs_log_vec	*ret_lv = NULL;
 
-	ASSERT(log_vector);
-	for (lv = log_vector; lv; lv = lv->lv_next) {
+
+	/* Bail out if we didn't find a log item.  */
+	if (list_empty(&tp->t_items)) {
+		ASSERT(0);
+		return NULL;
+	}
+
+	list_for_each_entry(lidp, &tp->t_items, lid_trans) {
+		struct xfs_log_vec *new_lv;
 		void	*ptr;
 		int	index;
 		int	len = 0;
+		uint	niovecs;
+
+		/* Skip items which aren't dirty in this transaction. */
+		if (!(lidp->lid_flags & XFS_LID_DIRTY))
+			continue;
+
+		/* Skip items that do not have any vectors for writing */
+		niovecs = IOP_SIZE(lidp->lid_item);
+		if (!niovecs)
+			continue;
+
+		new_lv = kmem_zalloc(sizeof(*new_lv) +
+				niovecs * sizeof(struct xfs_log_iovec),
+				KM_SLEEP);
+
+		/* The allocated iovec region lies beyond the log vector. */
+		new_lv->lv_iovecp = (struct xfs_log_iovec *)&new_lv[1];
+		new_lv->lv_niovecs = niovecs;
+		new_lv->lv_item = lidp->lid_item;
 
 		/* build the vector array and calculate it's length */
-		IOP_FORMAT(lv->lv_item, lv->lv_iovecp);
-		for (index = 0; index < lv->lv_niovecs; index++)
-			len += lv->lv_iovecp[index].i_len;
+		IOP_FORMAT(new_lv->lv_item, new_lv->lv_iovecp);
+		for (index = 0; index < new_lv->lv_niovecs; index++)
+			len += new_lv->lv_iovecp[index].i_len;
 
-		lv->lv_buf_len = len;
-		lv->lv_buf = kmem_alloc(lv->lv_buf_len, KM_SLEEP|KM_NOFS);
-		ptr = lv->lv_buf;
+		new_lv->lv_buf_len = len;
+		new_lv->lv_buf = kmem_alloc(new_lv->lv_buf_len,
+				KM_SLEEP|KM_NOFS);
+		ptr = new_lv->lv_buf;
 
-		for (index = 0; index < lv->lv_niovecs; index++) {
-			struct xfs_log_iovec *vec = &lv->lv_iovecp[index];
+		for (index = 0; index < new_lv->lv_niovecs; index++) {
+			struct xfs_log_iovec *vec = &new_lv->lv_iovecp[index];
 
 			memcpy(ptr, vec->i_addr, vec->i_len);
 			vec->i_addr = ptr;
 			ptr += vec->i_len;
 		}
-		ASSERT(ptr == lv->lv_buf + lv->lv_buf_len);
+		ASSERT(ptr == new_lv->lv_buf + new_lv->lv_buf_len);
+
+		if (!ret_lv)
+			ret_lv = new_lv;
+		else
+			lv->lv_next = new_lv;
+		lv = new_lv;
 	}
+
+	return ret_lv;
 }
 
 /*
@@ -256,7 +279,7 @@
  * Insert the log items into the CIL and calculate the difference in space
  * consumed by the item. Add the space to the checkpoint ticket and calculate
  * if the change requires additional log metadata. If it does, take that space
- * as well. Remove the amount of space we addded to the checkpoint ticket from
+ * as well. Remove the amount of space we added to the checkpoint ticket from
  * the current transaction ticket so that the accounting works out correctly.
  */
 static void
@@ -635,28 +658,30 @@
  * background commit, returns without it held once background commits are
  * allowed again.
  */
-void
+int
 xfs_log_commit_cil(
 	struct xfs_mount	*mp,
 	struct xfs_trans	*tp,
-	struct xfs_log_vec	*log_vector,
 	xfs_lsn_t		*commit_lsn,
 	int			flags)
 {
 	struct log		*log = mp->m_log;
 	int			log_flags = 0;
 	int			push = 0;
+	struct xfs_log_vec	*log_vector;
 
 	if (flags & XFS_TRANS_RELEASE_LOG_RES)
 		log_flags = XFS_LOG_REL_PERM_RESERV;
 
 	/*
-	 * do all the hard work of formatting items (including memory
+	 * Do all the hard work of formatting items (including memory
 	 * allocation) outside the CIL context lock. This prevents stalling CIL
 	 * pushes when we are low on memory and a transaction commit spends a
 	 * lot of time in memory reclaim.
 	 */
-	xlog_cil_format_items(log, log_vector);
+	log_vector = xlog_cil_prepare_log_vecs(tp);
+	if (!log_vector)
+		return ENOMEM;
 
 	/* lock out background commit */
 	down_read(&log->l_cilp->xc_ctx_lock);
@@ -709,6 +734,7 @@
 	 */
 	if (push)
 		xlog_cil_push(log, 0);
+	return 0;
 }
 
 /*
@@ -786,8 +812,6 @@
 {
 	struct xfs_cil_ctx *ctx;
 
-	if (!(lip->li_mountp->m_flags & XFS_MOUNT_DELAYLOG))
-		return false;
 	if (list_empty(&lip->li_cil))
 		return false;
 
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index bb24dac..19f69e2 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -219,7 +219,6 @@
 #define XFS_MOUNT_WSYNC		(1ULL << 0)	/* for nfs - all metadata ops
 						   must be synchronous except
 						   for space allocations */
-#define XFS_MOUNT_DELAYLOG	(1ULL << 1)	/* delayed logging is enabled */
 #define XFS_MOUNT_WAS_CLEAN	(1ULL << 3)
 #define XFS_MOUNT_FS_SHUTDOWN	(1ULL << 4)	/* atomic stop of all filesystem
 						   operations, typically for
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index 0bbb1a4..671f37e 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -154,12 +154,17 @@
 xfs_qm_destroy(
 	struct xfs_qm	*xqm)
 {
-	struct xfs_dquot *dqp, *n;
 	int		hsize, i;
 
 	ASSERT(xqm != NULL);
 	ASSERT(xqm->qm_nrefs == 0);
+
 	unregister_shrinker(&xfs_qm_shaker);
+
+	mutex_lock(&xqm->qm_dqfrlist_lock);
+	ASSERT(list_empty(&xqm->qm_dqfrlist));
+	mutex_unlock(&xqm->qm_dqfrlist_lock);
+
 	hsize = xqm->qm_dqhashmask + 1;
 	for (i = 0; i < hsize; i++) {
 		xfs_qm_list_destroy(&(xqm->qm_usr_dqhtable[i]));
@@ -171,17 +176,6 @@
 	xqm->qm_grp_dqhtable = NULL;
 	xqm->qm_dqhashmask = 0;
 
-	/* frlist cleanup */
-	mutex_lock(&xqm->qm_dqfrlist_lock);
-	list_for_each_entry_safe(dqp, n, &xqm->qm_dqfrlist, q_freelist) {
-		xfs_dqlock(dqp);
-		list_del_init(&dqp->q_freelist);
-		xfs_Gqm->qm_dqfrlist_cnt--;
-		xfs_dqunlock(dqp);
-		xfs_qm_dqdestroy(dqp);
-	}
-	mutex_unlock(&xqm->qm_dqfrlist_lock);
-	mutex_destroy(&xqm->qm_dqfrlist_lock);
 	kmem_free(xqm);
 }
 
@@ -232,34 +226,10 @@
 xfs_qm_rele_quotafs_ref(
 	struct xfs_mount *mp)
 {
-	xfs_dquot_t	*dqp, *n;
-
 	ASSERT(xfs_Gqm);
 	ASSERT(xfs_Gqm->qm_nrefs > 0);
 
 	/*
-	 * Go thru the freelist and destroy all inactive dquots.
-	 */
-	mutex_lock(&xfs_Gqm->qm_dqfrlist_lock);
-
-	list_for_each_entry_safe(dqp, n, &xfs_Gqm->qm_dqfrlist, q_freelist) {
-		xfs_dqlock(dqp);
-		if (dqp->dq_flags & XFS_DQ_INACTIVE) {
-			ASSERT(dqp->q_mount == NULL);
-			ASSERT(! XFS_DQ_IS_DIRTY(dqp));
-			ASSERT(list_empty(&dqp->q_hashlist));
-			ASSERT(list_empty(&dqp->q_mplist));
-			list_del_init(&dqp->q_freelist);
-			xfs_Gqm->qm_dqfrlist_cnt--;
-			xfs_dqunlock(dqp);
-			xfs_qm_dqdestroy(dqp);
-		} else {
-			xfs_dqunlock(dqp);
-		}
-	}
-	mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
-
-	/*
 	 * Destroy the entire XQM. If somebody mounts with quotaon, this'll
 	 * be restarted.
 	 */
@@ -415,8 +385,7 @@
  */
 STATIC int
 xfs_qm_dqflush_all(
-	struct xfs_mount	*mp,
-	int			sync_mode)
+	struct xfs_mount	*mp)
 {
 	struct xfs_quotainfo	*q = mp->m_quotainfo;
 	int			recl;
@@ -429,7 +398,8 @@
 	mutex_lock(&q->qi_dqlist_lock);
 	list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) {
 		xfs_dqlock(dqp);
-		if (! XFS_DQ_IS_DIRTY(dqp)) {
+		if ((dqp->dq_flags & XFS_DQ_FREEING) ||
+		    !XFS_DQ_IS_DIRTY(dqp)) {
 			xfs_dqunlock(dqp);
 			continue;
 		}
@@ -444,14 +414,14 @@
 			 * out immediately.  We'll be able to acquire
 			 * the flush lock when the I/O completes.
 			 */
-			xfs_qm_dqflock_pushbuf_wait(dqp);
+			xfs_dqflock_pushbuf_wait(dqp);
 		}
 		/*
 		 * Let go of the mplist lock. We don't want to hold it
 		 * across a disk write.
 		 */
 		mutex_unlock(&q->qi_dqlist_lock);
-		error = xfs_qm_dqflush(dqp, sync_mode);
+		error = xfs_qm_dqflush(dqp, 0);
 		xfs_dqunlock(dqp);
 		if (error)
 			return error;
@@ -468,6 +438,7 @@
 	/* return ! busy */
 	return 0;
 }
+
 /*
  * Release the group dquot pointers the user dquots may be
  * carrying around as a hint. mplist is locked on entry and exit.
@@ -478,31 +449,26 @@
 {
 	struct xfs_quotainfo	*q = mp->m_quotainfo;
 	struct xfs_dquot	*dqp, *gdqp;
-	int			nrecl;
 
  again:
 	ASSERT(mutex_is_locked(&q->qi_dqlist_lock));
 	list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) {
 		xfs_dqlock(dqp);
-		if ((gdqp = dqp->q_gdquot)) {
-			xfs_dqlock(gdqp);
-			dqp->q_gdquot = NULL;
+		if (dqp->dq_flags & XFS_DQ_FREEING) {
+			xfs_dqunlock(dqp);
+			mutex_unlock(&q->qi_dqlist_lock);
+			delay(1);
+			mutex_lock(&q->qi_dqlist_lock);
+			goto again;
 		}
+
+		gdqp = dqp->q_gdquot;
+		if (gdqp)
+			dqp->q_gdquot = NULL;
 		xfs_dqunlock(dqp);
 
-		if (gdqp) {
-			/*
-			 * Can't hold the mplist lock across a dqput.
-			 * XXXmust convert to marker based iterations here.
-			 */
-			nrecl = q->qi_dqreclaims;
-			mutex_unlock(&q->qi_dqlist_lock);
-			xfs_qm_dqput(gdqp);
-
-			mutex_lock(&q->qi_dqlist_lock);
-			if (nrecl != q->qi_dqreclaims)
-				goto again;
-		}
+		if (gdqp)
+			xfs_qm_dqrele(gdqp);
 	}
 }
 
@@ -520,8 +486,8 @@
 	struct xfs_quotainfo	*q = mp->m_quotainfo;
 	struct xfs_dquot	*dqp, *n;
 	uint			dqtype;
-	int			nrecl;
-	int			nmisses;
+	int			nmisses = 0;
+	LIST_HEAD		(dispose_list);
 
 	if (!q)
 		return 0;
@@ -540,47 +506,26 @@
 	 */
 	xfs_qm_detach_gdquots(mp);
 
-      again:
-	nmisses = 0;
-	ASSERT(mutex_is_locked(&q->qi_dqlist_lock));
 	/*
-	 * Try to get rid of all of the unwanted dquots. The idea is to
-	 * get them off mplist and hashlist, but leave them on freelist.
+	 * Try to get rid of all of the unwanted dquots.
 	 */
 	list_for_each_entry_safe(dqp, n, &q->qi_dqlist, q_mplist) {
-		/*
-		 * It's OK to look at the type without taking dqlock here.
-		 * We're holding the mplist lock here, and that's needed for
-		 * a dqreclaim.
-		 */
-		if ((dqp->dq_flags & dqtype) == 0)
-			continue;
-
-		if (!mutex_trylock(&dqp->q_hash->qh_lock)) {
-			nrecl = q->qi_dqreclaims;
-			mutex_unlock(&q->qi_dqlist_lock);
-			mutex_lock(&dqp->q_hash->qh_lock);
-			mutex_lock(&q->qi_dqlist_lock);
-
-			/*
-			 * XXXTheoretically, we can get into a very long
-			 * ping pong game here.
-			 * No one can be adding dquots to the mplist at
-			 * this point, but somebody might be taking things off.
-			 */
-			if (nrecl != q->qi_dqreclaims) {
-				mutex_unlock(&dqp->q_hash->qh_lock);
-				goto again;
-			}
+		xfs_dqlock(dqp);
+		if ((dqp->dq_flags & dqtype) != 0 &&
+		    !(dqp->dq_flags & XFS_DQ_FREEING)) {
+			if (dqp->q_nrefs == 0) {
+				dqp->dq_flags |= XFS_DQ_FREEING;
+				list_move_tail(&dqp->q_mplist, &dispose_list);
+			} else
+				nmisses++;
 		}
-
-		/*
-		 * Take the dquot off the mplist and hashlist. It may remain on
-		 * freelist in INACTIVE state.
-		 */
-		nmisses += xfs_qm_dqpurge(dqp);
+		xfs_dqunlock(dqp);
 	}
 	mutex_unlock(&q->qi_dqlist_lock);
+
+	list_for_each_entry_safe(dqp, n, &dispose_list, q_mplist)
+		xfs_qm_dqpurge(dqp);
+
 	return nmisses;
 }
 
@@ -648,12 +593,9 @@
 		 */
 		dqp = udqhint->q_gdquot;
 		if (dqp && be32_to_cpu(dqp->q_core.d_id) == id) {
-			xfs_dqlock(dqp);
-			XFS_DQHOLD(dqp);
 			ASSERT(*IO_idqpp == NULL);
-			*IO_idqpp = dqp;
 
-			xfs_dqunlock(dqp);
+			*IO_idqpp = xfs_qm_dqhold(dqp);
 			xfs_dqunlock(udqhint);
 			return 0;
 		}
@@ -693,11 +635,7 @@
 
 /*
  * Given a udquot and gdquot, attach a ptr to the group dquot in the
- * udquot as a hint for future lookups. The idea sounds simple, but the
- * execution isn't, because the udquot might have a group dquot attached
- * already and getting rid of that gets us into lock ordering constraints.
- * The process is complicated more by the fact that the dquots may or may not
- * be locked on entry.
+ * udquot as a hint for future lookups.
  */
 STATIC void
 xfs_qm_dqattach_grouphint(
@@ -708,45 +646,17 @@
 
 	xfs_dqlock(udq);
 
-	if ((tmp = udq->q_gdquot)) {
-		if (tmp == gdq) {
-			xfs_dqunlock(udq);
-			return;
-		}
+	tmp = udq->q_gdquot;
+	if (tmp) {
+		if (tmp == gdq)
+			goto done;
 
 		udq->q_gdquot = NULL;
-		/*
-		 * We can't keep any dqlocks when calling dqrele,
-		 * because the freelist lock comes before dqlocks.
-		 */
-		xfs_dqunlock(udq);
-		/*
-		 * we took a hard reference once upon a time in dqget,
-		 * so give it back when the udquot no longer points at it
-		 * dqput() does the unlocking of the dquot.
-		 */
 		xfs_qm_dqrele(tmp);
-
-		xfs_dqlock(udq);
-		xfs_dqlock(gdq);
-
-	} else {
-		ASSERT(XFS_DQ_IS_LOCKED(udq));
-		xfs_dqlock(gdq);
 	}
 
-	ASSERT(XFS_DQ_IS_LOCKED(udq));
-	ASSERT(XFS_DQ_IS_LOCKED(gdq));
-	/*
-	 * Somebody could have attached a gdquot here,
-	 * when we dropped the uqlock. If so, just do nothing.
-	 */
-	if (udq->q_gdquot == NULL) {
-		XFS_DQHOLD(gdq);
-		udq->q_gdquot = gdq;
-	}
-
-	xfs_dqunlock(gdq);
+	udq->q_gdquot = xfs_qm_dqhold(gdq);
+done:
 	xfs_dqunlock(udq);
 }
 
@@ -813,17 +723,13 @@
 		ASSERT(ip->i_gdquot);
 
 		/*
-		 * We may or may not have the i_udquot locked at this point,
-		 * but this check is OK since we don't depend on the i_gdquot to
-		 * be accurate 100% all the time. It is just a hint, and this
-		 * will succeed in general.
+		 * We do not have i_udquot locked at this point, but this check
+		 * is OK since we don't depend on the i_gdquot to be accurate
+		 * 100% all the time. It is just a hint, and this will
+		 * succeed in general.
 		 */
-		if (ip->i_udquot->q_gdquot == ip->i_gdquot)
-			goto done;
-		/*
-		 * Attach i_gdquot to the gdquot hint inside the i_udquot.
-		 */
-		xfs_qm_dqattach_grouphint(ip->i_udquot, ip->i_gdquot);
+		if (ip->i_udquot->q_gdquot != ip->i_gdquot)
+			xfs_qm_dqattach_grouphint(ip->i_udquot, ip->i_gdquot);
 	}
 
  done:
@@ -879,100 +785,6 @@
 	}
 }
 
-int
-xfs_qm_sync(
-	struct xfs_mount	*mp,
-	int			flags)
-{
-	struct xfs_quotainfo	*q = mp->m_quotainfo;
-	int			recl, restarts;
-	struct xfs_dquot	*dqp;
-	int			error;
-
-	if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
-		return 0;
-
-	restarts = 0;
-
-  again:
-	mutex_lock(&q->qi_dqlist_lock);
-	/*
-	 * dqpurge_all() also takes the mplist lock and iterate thru all dquots
-	 * in quotaoff. However, if the QUOTA_ACTIVE bits are not cleared
-	 * when we have the mplist lock, we know that dquots will be consistent
-	 * as long as we have it locked.
-	 */
-	if (!XFS_IS_QUOTA_ON(mp)) {
-		mutex_unlock(&q->qi_dqlist_lock);
-		return 0;
-	}
-	ASSERT(mutex_is_locked(&q->qi_dqlist_lock));
-	list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) {
-		/*
-		 * If this is vfs_sync calling, then skip the dquots that
-		 * don't 'seem' to be dirty. ie. don't acquire dqlock.
-		 * This is very similar to what xfs_sync does with inodes.
-		 */
-		if (flags & SYNC_TRYLOCK) {
-			if (!XFS_DQ_IS_DIRTY(dqp))
-				continue;
-			if (!xfs_qm_dqlock_nowait(dqp))
-				continue;
-		} else {
-			xfs_dqlock(dqp);
-		}
-
-		/*
-		 * Now, find out for sure if this dquot is dirty or not.
-		 */
-		if (! XFS_DQ_IS_DIRTY(dqp)) {
-			xfs_dqunlock(dqp);
-			continue;
-		}
-
-		/* XXX a sentinel would be better */
-		recl = q->qi_dqreclaims;
-		if (!xfs_dqflock_nowait(dqp)) {
-			if (flags & SYNC_TRYLOCK) {
-				xfs_dqunlock(dqp);
-				continue;
-			}
-			/*
-			 * If we can't grab the flush lock then if the caller
-			 * really wanted us to give this our best shot, so
-			 * see if we can give a push to the buffer before we wait
-			 * on the flush lock. At this point, we know that
-			 * even though the dquot is being flushed,
-			 * it has (new) dirty data.
-			 */
-			xfs_qm_dqflock_pushbuf_wait(dqp);
-		}
-		/*
-		 * Let go of the mplist lock. We don't want to hold it
-		 * across a disk write
-		 */
-		mutex_unlock(&q->qi_dqlist_lock);
-		error = xfs_qm_dqflush(dqp, flags);
-		xfs_dqunlock(dqp);
-		if (error && XFS_FORCED_SHUTDOWN(mp))
-			return 0;	/* Need to prevent umount failure */
-		else if (error)
-			return error;
-
-		mutex_lock(&q->qi_dqlist_lock);
-		if (recl != q->qi_dqreclaims) {
-			if (++restarts >= XFS_QM_SYNC_MAX_RESTARTS)
-				break;
-
-			mutex_unlock(&q->qi_dqlist_lock);
-			goto again;
-		}
-	}
-
-	mutex_unlock(&q->qi_dqlist_lock);
-	return 0;
-}
-
 /*
  * The hash chains and the mplist use the same xfs_dqhash structure as
  * their list head, but we can take the mplist qh_lock and one of the
@@ -1034,18 +846,21 @@
 	/*
 	 * We try to get the limits from the superuser's limits fields.
 	 * This is quite hacky, but it is standard quota practice.
+	 *
 	 * We look at the USR dquot with id == 0 first, but if user quotas
 	 * are not enabled we goto the GRP dquot with id == 0.
 	 * We don't really care to keep separate default limits for user
 	 * and group quotas, at least not at this point.
+	 *
+	 * Since we may not have done a quotacheck by this point, just read
+	 * the dquot without attaching it to any hashtables or lists.
 	 */
-	error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)0,
-			     XFS_IS_UQUOTA_RUNNING(mp) ? XFS_DQ_USER : 
-			     (XFS_IS_GQUOTA_RUNNING(mp) ? XFS_DQ_GROUP :
-				XFS_DQ_PROJ),
-			     XFS_QMOPT_DQSUSER|XFS_QMOPT_DOWARN,
-			     &dqp);
-	if (! error) {
+	error = xfs_qm_dqread(mp, 0,
+			XFS_IS_UQUOTA_RUNNING(mp) ? XFS_DQ_USER :
+			 (XFS_IS_GQUOTA_RUNNING(mp) ? XFS_DQ_GROUP :
+			  XFS_DQ_PROJ),
+			XFS_QMOPT_DOWARN, &dqp);
+	if (!error) {
 		xfs_disk_dquot_t	*ddqp = &dqp->q_core;
 
 		/*
@@ -1072,11 +887,6 @@
 		qinf->qi_rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit);
 		qinf->qi_rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit);
  
-		/*
-		 * We sent the XFS_QMOPT_DQSUSER flag to dqget because
-		 * we don't want this dquot cached. We haven't done a
-		 * quotacheck yet, and quotacheck doesn't like incore dquots.
-		 */
 		xfs_qm_dqdestroy(dqp);
 	} else {
 		qinf->qi_btimelimit = XFS_QM_BTIMELIMIT;
@@ -1661,7 +1471,7 @@
 	 * successfully.
 	 */
 	if (!error)
-		error = xfs_qm_dqflush_all(mp, 0);
+		error = xfs_qm_dqflush_all(mp);
 
 	/*
 	 * We can get this error if we couldn't do a dquot allocation inside
@@ -1793,59 +1603,33 @@
 
 
 /*
- * Just pop the least recently used dquot off the freelist and
- * recycle it. The returned dquot is locked.
+ * Pop the least recently used dquot off the freelist and recycle it.
  */
-STATIC xfs_dquot_t *
+STATIC struct xfs_dquot *
 xfs_qm_dqreclaim_one(void)
 {
-	xfs_dquot_t	*dqpout;
-	xfs_dquot_t	*dqp;
-	int		restarts;
-	int		startagain;
+	struct xfs_dquot	*dqp;
+	int			restarts = 0;
 
-	restarts = 0;
-	dqpout = NULL;
-
-	/* lockorder: hashchainlock, freelistlock, mplistlock, dqlock, dqflock */
-again:
-	startagain = 0;
 	mutex_lock(&xfs_Gqm->qm_dqfrlist_lock);
-
+restart:
 	list_for_each_entry(dqp, &xfs_Gqm->qm_dqfrlist, q_freelist) {
 		struct xfs_mount *mp = dqp->q_mount;
-		xfs_dqlock(dqp);
+
+		if (!xfs_dqlock_nowait(dqp))
+			continue;
 
 		/*
-		 * We are racing with dqlookup here. Naturally we don't
-		 * want to reclaim a dquot that lookup wants. We release the
-		 * freelist lock and start over, so that lookup will grab
-		 * both the dquot and the freelistlock.
+		 * This dquot has already been grabbed by dqlookup.
+		 * Remove it from the freelist and try again.
 		 */
-		if (dqp->dq_flags & XFS_DQ_WANT) {
-			ASSERT(! (dqp->dq_flags & XFS_DQ_INACTIVE));
-
+		if (dqp->q_nrefs) {
 			trace_xfs_dqreclaim_want(dqp);
 			XQM_STATS_INC(xqmstats.xs_qm_dqwants);
-			restarts++;
-			startagain = 1;
-			goto dqunlock;
-		}
 
-		/*
-		 * If the dquot is inactive, we are assured that it is
-		 * not on the mplist or the hashlist, and that makes our
-		 * life easier.
-		 */
-		if (dqp->dq_flags & XFS_DQ_INACTIVE) {
-			ASSERT(mp == NULL);
-			ASSERT(! XFS_DQ_IS_DIRTY(dqp));
-			ASSERT(list_empty(&dqp->q_hashlist));
-			ASSERT(list_empty(&dqp->q_mplist));
 			list_del_init(&dqp->q_freelist);
 			xfs_Gqm->qm_dqfrlist_cnt--;
-			dqpout = dqp;
-			XQM_STATS_INC(xqmstats.xs_qm_dqinact_reclaims);
+			restarts++;
 			goto dqunlock;
 		}
 
@@ -1874,64 +1658,49 @@
 			 * We flush it delayed write, so don't bother
 			 * releasing the freelist lock.
 			 */
-			error = xfs_qm_dqflush(dqp, 0);
+			error = xfs_qm_dqflush(dqp, SYNC_TRYLOCK);
 			if (error) {
 				xfs_warn(mp, "%s: dquot %p flush failed",
 					__func__, dqp);
 			}
 			goto dqunlock;
 		}
+		xfs_dqfunlock(dqp);
 
 		/*
-		 * We're trying to get the hashlock out of order. This races
-		 * with dqlookup; so, we giveup and goto the next dquot if
-		 * we couldn't get the hashlock. This way, we won't starve
-		 * a dqlookup process that holds the hashlock that is
-		 * waiting for the freelist lock.
+		 * Prevent lookup now that we are going to reclaim the dquot.
+		 * Once XFS_DQ_FREEING is set lookup won't touch the dquot,
+		 * thus we can drop the lock now.
 		 */
-		if (!mutex_trylock(&dqp->q_hash->qh_lock)) {
-			restarts++;
-			goto dqfunlock;
-		}
+		dqp->dq_flags |= XFS_DQ_FREEING;
+		xfs_dqunlock(dqp);
 
-		/*
-		 * This races with dquot allocation code as well as dqflush_all
-		 * and reclaim code. So, if we failed to grab the mplist lock,
-		 * giveup everything and start over.
-		 */
-		if (!mutex_trylock(&mp->m_quotainfo->qi_dqlist_lock)) {
-			restarts++;
-			startagain = 1;
-			goto qhunlock;
-		}
+		mutex_lock(&dqp->q_hash->qh_lock);
+		list_del_init(&dqp->q_hashlist);
+		dqp->q_hash->qh_version++;
+		mutex_unlock(&dqp->q_hash->qh_lock);
 
-		ASSERT(dqp->q_nrefs == 0);
+		mutex_lock(&mp->m_quotainfo->qi_dqlist_lock);
 		list_del_init(&dqp->q_mplist);
 		mp->m_quotainfo->qi_dquots--;
 		mp->m_quotainfo->qi_dqreclaims++;
-		list_del_init(&dqp->q_hashlist);
-		dqp->q_hash->qh_version++;
+		mutex_unlock(&mp->m_quotainfo->qi_dqlist_lock);
+
+		ASSERT(dqp->q_nrefs == 0);
 		list_del_init(&dqp->q_freelist);
 		xfs_Gqm->qm_dqfrlist_cnt--;
-		dqpout = dqp;
-		mutex_unlock(&mp->m_quotainfo->qi_dqlist_lock);
-qhunlock:
-		mutex_unlock(&dqp->q_hash->qh_lock);
-dqfunlock:
-		xfs_dqfunlock(dqp);
+
+		mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
+		return dqp;
 dqunlock:
 		xfs_dqunlock(dqp);
-		if (dqpout)
-			break;
 		if (restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)
 			break;
-		if (startagain) {
-			mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
-			goto again;
-		}
+		goto restart;
 	}
+
 	mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
-	return dqpout;
+	return NULL;
 }
 
 /*
@@ -2151,10 +1920,7 @@
 			 * this to caller
 			 */
 			ASSERT(ip->i_udquot);
-			uq = ip->i_udquot;
-			xfs_dqlock(uq);
-			XFS_DQHOLD(uq);
-			xfs_dqunlock(uq);
+			uq = xfs_qm_dqhold(ip->i_udquot);
 		}
 	}
 	if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
@@ -2175,10 +1941,7 @@
 			xfs_ilock(ip, lockflags);
 		} else {
 			ASSERT(ip->i_gdquot);
-			gq = ip->i_gdquot;
-			xfs_dqlock(gq);
-			XFS_DQHOLD(gq);
-			xfs_dqunlock(gq);
+			gq = xfs_qm_dqhold(ip->i_gdquot);
 		}
 	} else if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
 		if (xfs_get_projid(ip) != prid) {
@@ -2198,10 +1961,7 @@
 			xfs_ilock(ip, lockflags);
 		} else {
 			ASSERT(ip->i_gdquot);
-			gq = ip->i_gdquot;
-			xfs_dqlock(gq);
-			XFS_DQHOLD(gq);
-			xfs_dqunlock(gq);
+			gq = xfs_qm_dqhold(ip->i_gdquot);
 		}
 	}
 	if (uq)
@@ -2251,14 +2011,10 @@
 	xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1);
 
 	/*
-	 * Take an extra reference, because the inode
-	 * is going to keep this dquot pointer even
-	 * after the trans_commit.
+	 * Take an extra reference, because the inode is going to keep
+	 * this dquot pointer even after the trans_commit.
 	 */
-	xfs_dqlock(newdq);
-	XFS_DQHOLD(newdq);
-	xfs_dqunlock(newdq);
-	*IO_olddq = newdq;
+	*IO_olddq = xfs_qm_dqhold(newdq);
 
 	return prevdq;
 }
@@ -2390,25 +2146,21 @@
 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
 
 	if (udqp) {
-		xfs_dqlock(udqp);
-		XFS_DQHOLD(udqp);
-		xfs_dqunlock(udqp);
 		ASSERT(ip->i_udquot == NULL);
-		ip->i_udquot = udqp;
 		ASSERT(XFS_IS_UQUOTA_ON(mp));
 		ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id));
+
+		ip->i_udquot = xfs_qm_dqhold(udqp);
 		xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
 	}
 	if (gdqp) {
-		xfs_dqlock(gdqp);
-		XFS_DQHOLD(gdqp);
-		xfs_dqunlock(gdqp);
 		ASSERT(ip->i_gdquot == NULL);
-		ip->i_gdquot = gdqp;
 		ASSERT(XFS_IS_OQUOTA_ON(mp));
 		ASSERT((XFS_IS_GQUOTA_ON(mp) ?
 			ip->i_d.di_gid : xfs_get_projid(ip)) ==
 				be32_to_cpu(gdqp->q_core.d_id));
+
+		ip->i_gdquot = xfs_qm_dqhold(gdqp);
 		xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
 	}
 }
diff --git a/fs/xfs/xfs_qm.h b/fs/xfs/xfs_qm.h
index 43b9abe..9b4f3ad 100644
--- a/fs/xfs/xfs_qm.h
+++ b/fs/xfs/xfs_qm.h
@@ -33,12 +33,6 @@
 extern kmem_zone_t	*qm_dqtrxzone;
 
 /*
- * Used in xfs_qm_sync called by xfs_sync to count the max times that it can
- * iterate over the mountpt's dquot list in one call.
- */
-#define XFS_QM_SYNC_MAX_RESTARTS	7
-
-/*
  * Ditto, for xfs_qm_dqreclaim_one.
  */
 #define XFS_QM_RECLAIM_MAX_RESTARTS	4
diff --git a/fs/xfs/xfs_quota.h b/fs/xfs/xfs_quota.h
index a595f29..8a0807e 100644
--- a/fs/xfs/xfs_quota.h
+++ b/fs/xfs/xfs_quota.h
@@ -87,8 +87,7 @@
 #define XFS_DQ_PROJ		0x0002		/* project quota */
 #define XFS_DQ_GROUP		0x0004		/* a group quota */
 #define XFS_DQ_DIRTY		0x0008		/* dquot is dirty */
-#define XFS_DQ_WANT		0x0010		/* for lookup/reclaim race */
-#define XFS_DQ_INACTIVE		0x0020		/* dq off mplist & hashlist */
+#define XFS_DQ_FREEING		0x0010		/* dquot is beeing torn down */
 
 #define XFS_DQ_ALLTYPES		(XFS_DQ_USER|XFS_DQ_PROJ|XFS_DQ_GROUP)
 
@@ -97,8 +96,7 @@
 	{ XFS_DQ_PROJ,		"PROJ" }, \
 	{ XFS_DQ_GROUP,		"GROUP" }, \
 	{ XFS_DQ_DIRTY,		"DIRTY" }, \
-	{ XFS_DQ_WANT,		"WANT" }, \
-	{ XFS_DQ_INACTIVE,	"INACTIVE" }
+	{ XFS_DQ_FREEING,	"FREEING" }
 
 /*
  * In the worst case, when both user and group quotas are on,
@@ -199,7 +197,6 @@
 #define XFS_QMOPT_UQUOTA	0x0000004 /* user dquot requested */
 #define XFS_QMOPT_PQUOTA	0x0000008 /* project dquot requested */
 #define XFS_QMOPT_FORCE_RES	0x0000010 /* ignore quota limits */
-#define XFS_QMOPT_DQSUSER	0x0000020 /* don't cache super users dquot */
 #define XFS_QMOPT_SBVERSION	0x0000040 /* change superblock version num */
 #define XFS_QMOPT_DOWARN        0x0000400 /* increase warning cnt if needed */
 #define XFS_QMOPT_DQREPAIR	0x0001000 /* repair dquot if damaged */
@@ -326,7 +323,6 @@
 extern void xfs_qm_dqdetach(struct xfs_inode *);
 extern void xfs_qm_dqrele(struct xfs_dquot *);
 extern void xfs_qm_statvfs(struct xfs_inode *, struct kstatfs *);
-extern int xfs_qm_sync(struct xfs_mount *, int);
 extern int xfs_qm_newmount(struct xfs_mount *, uint *, uint *);
 extern void xfs_qm_mount_quotas(struct xfs_mount *);
 extern void xfs_qm_unmount(struct xfs_mount *);
@@ -366,10 +362,6 @@
 #define xfs_qm_dqdetach(ip)
 #define xfs_qm_dqrele(d)
 #define xfs_qm_statvfs(ip, s)
-static inline int xfs_qm_sync(struct xfs_mount *mp, int flags)
-{
-	return 0;
-}
 #define xfs_qm_newmount(mp, a, b)					(0)
 #define xfs_qm_mount_quotas(mp)
 #define xfs_qm_unmount(mp)
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 3eca58f..281961c 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -199,7 +199,6 @@
 	mp->m_flags |= XFS_MOUNT_BARRIER;
 	mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE;
 	mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
-	mp->m_flags |= XFS_MOUNT_DELAYLOG;
 
 	/*
 	 * These can be overridden by the mount option parsing.
@@ -353,11 +352,11 @@
 			mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE);
 			mp->m_qflags &= ~XFS_OQUOTA_ENFD;
 		} else if (!strcmp(this_char, MNTOPT_DELAYLOG)) {
-			mp->m_flags |= XFS_MOUNT_DELAYLOG;
-		} else if (!strcmp(this_char, MNTOPT_NODELAYLOG)) {
-			mp->m_flags &= ~XFS_MOUNT_DELAYLOG;
 			xfs_warn(mp,
-	"nodelaylog is deprecated and will be removed in Linux 3.3");
+	"delaylog is the default now, option is deprecated.");
+		} else if (!strcmp(this_char, MNTOPT_NODELAYLOG)) {
+			xfs_warn(mp,
+	"nodelaylog support has been removed, option is deprecated.");
 		} else if (!strcmp(this_char, MNTOPT_DISCARD)) {
 			mp->m_flags |= XFS_MOUNT_DISCARD;
 		} else if (!strcmp(this_char, MNTOPT_NODISCARD)) {
@@ -395,13 +394,6 @@
 		return EINVAL;
 	}
 
-	if ((mp->m_flags & XFS_MOUNT_DISCARD) &&
-	    !(mp->m_flags & XFS_MOUNT_DELAYLOG)) {
-		xfs_warn(mp,
-	"the discard option is incompatible with the nodelaylog option");
-		return EINVAL;
-	}
-
 #ifndef CONFIG_XFS_QUOTA
 	if (XFS_IS_QUOTA_RUNNING(mp)) {
 		xfs_warn(mp, "quota support not available in this kernel.");
@@ -501,7 +493,6 @@
 		{ XFS_MOUNT_ATTR2,		"," MNTOPT_ATTR2 },
 		{ XFS_MOUNT_FILESTREAMS,	"," MNTOPT_FILESTREAM },
 		{ XFS_MOUNT_GRPID,		"," MNTOPT_GRPID },
-		{ XFS_MOUNT_DELAYLOG,		"," MNTOPT_DELAYLOG },
 		{ XFS_MOUNT_DISCARD,		"," MNTOPT_DISCARD },
 		{ 0, NULL }
 	};
@@ -869,27 +860,6 @@
 }
 
 STATIC int
-xfs_log_inode(
-	struct xfs_inode	*ip)
-{
-	struct xfs_mount	*mp = ip->i_mount;
-	struct xfs_trans	*tp;
-	int			error;
-
-	tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
-	error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0);
-	if (error) {
-		xfs_trans_cancel(tp, 0);
-		return error;
-	}
-
-	xfs_ilock(ip, XFS_ILOCK_EXCL);
-	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
-	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
-	return xfs_trans_commit(tp, 0);
-}
-
-STATIC int
 xfs_fs_write_inode(
 	struct inode		*inode,
 	struct writeback_control *wbc)
@@ -902,10 +872,8 @@
 
 	if (XFS_FORCED_SHUTDOWN(mp))
 		return -XFS_ERROR(EIO);
-	if (!ip->i_update_core)
-		return 0;
 
-	if (wbc->sync_mode == WB_SYNC_ALL) {
+	if (wbc->sync_mode == WB_SYNC_ALL || wbc->for_kupdate) {
 		/*
 		 * Make sure the inode has made it it into the log.  Instead
 		 * of forcing it all the way to stable storage using a
@@ -913,11 +881,14 @@
 		 * ->sync_fs call do that for thus, which reduces the number
 		 * of synchronous log forces dramatically.
 		 */
-		error = xfs_log_inode(ip);
+		error = xfs_log_dirty_inode(ip, NULL, 0);
 		if (error)
 			goto out;
 		return 0;
 	} else {
+		if (!ip->i_update_core)
+			return 0;
+
 		/*
 		 * We make this non-blocking if the inode is contended, return
 		 * EAGAIN to indicate to the caller that they did not succeed.
@@ -1034,17 +1005,10 @@
 	int			error;
 
 	/*
-	 * Not much we can do for the first async pass.  Writing out the
-	 * superblock would be counter-productive as we are going to redirty
-	 * when writing out other data and metadata (and writing out a single
-	 * block is quite fast anyway).
-	 *
-	 * Try to asynchronously kick off quota syncing at least.
+	 * Doing anything during the async pass would be counterproductive.
 	 */
-	if (!wait) {
-		xfs_qm_sync(mp, SYNC_TRYLOCK);
+	if (!wait)
 		return 0;
-	}
 
 	error = xfs_quiesce_data(mp);
 	if (error)
@@ -1258,9 +1222,9 @@
 STATIC int
 xfs_fs_show_options(
 	struct seq_file		*m,
-	struct vfsmount		*mnt)
+	struct dentry		*root)
 {
-	return -xfs_showargs(XFS_M(mnt->mnt_sb), m);
+	return -xfs_showargs(XFS_M(root->d_sb), m);
 }
 
 /*
@@ -1641,12 +1605,12 @@
 xfs_init_workqueues(void)
 {
 	/*
-	 * max_active is set to 8 to give enough concurency to allow
-	 * multiple work operations on each CPU to run. This allows multiple
-	 * filesystems to be running sync work concurrently, and scales with
-	 * the number of CPUs in the system.
+	 * We never want to the same work item to run twice, reclaiming inodes
+	 * or idling the log is not going to get any faster by multiple CPUs
+	 * competing for ressources.  Use the default large max_active value
+	 * so that even lots of filesystems can perform these task in parallel.
 	 */
-	xfs_syncd_wq = alloc_workqueue("xfssyncd", WQ_CPU_INTENSIVE, 8);
+	xfs_syncd_wq = alloc_workqueue("xfssyncd", WQ_NON_REENTRANT, 0);
 	if (!xfs_syncd_wq)
 		return -ENOMEM;
 	return 0;
diff --git a/fs/xfs/xfs_sync.c b/fs/xfs/xfs_sync.c
index be5c51d..72c01a1 100644
--- a/fs/xfs/xfs_sync.c
+++ b/fs/xfs/xfs_sync.c
@@ -336,6 +336,32 @@
 	return error;
 }
 
+int
+xfs_log_dirty_inode(
+	struct xfs_inode	*ip,
+	struct xfs_perag	*pag,
+	int			flags)
+{
+	struct xfs_mount	*mp = ip->i_mount;
+	struct xfs_trans	*tp;
+	int			error;
+
+	if (!ip->i_update_core)
+		return 0;
+
+	tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
+	error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0);
+	if (error) {
+		xfs_trans_cancel(tp, 0);
+		return error;
+	}
+
+	xfs_ilock(ip, XFS_ILOCK_EXCL);
+	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+	return xfs_trans_commit(tp, 0);
+}
+
 /*
  * When remounting a filesystem read-only or freezing the filesystem, we have
  * two phases to execute. This first phase is syncing the data before we
@@ -359,10 +385,17 @@
 {
 	int			error, error2 = 0;
 
-	xfs_qm_sync(mp, SYNC_TRYLOCK);
-	xfs_qm_sync(mp, SYNC_WAIT);
+	/*
+	 * Log all pending size and timestamp updates.  The vfs writeback
+	 * code is supposed to do this, but due to its overagressive
+	 * livelock detection it will skip inodes where appending writes
+	 * were written out in the first non-blocking sync phase if their
+	 * completion took long enough that it happened after taking the
+	 * timestamp for the cut-off in the blocking phase.
+	 */
+	xfs_inode_ag_iterator(mp, xfs_log_dirty_inode, 0);
 
-	/* force out the newly dirtied log buffers */
+	/* force out the log */
 	xfs_log_force(mp, XFS_LOG_SYNC);
 
 	/* write superblock and hoover up shutdown errors */
@@ -470,7 +503,6 @@
 			error = xfs_fs_log_dummy(mp);
 		else
 			xfs_log_force(mp, 0);
-		error = xfs_qm_sync(mp, SYNC_TRYLOCK);
 
 		/* start pushing all the metadata that is currently dirty */
 		xfs_ail_push_all(mp->m_ail);
diff --git a/fs/xfs/xfs_sync.h b/fs/xfs/xfs_sync.h
index 941202e..fa96547 100644
--- a/fs/xfs/xfs_sync.h
+++ b/fs/xfs/xfs_sync.h
@@ -34,6 +34,8 @@
 
 void xfs_flush_inodes(struct xfs_inode *ip);
 
+int xfs_log_dirty_inode(struct xfs_inode *ip, struct xfs_perag *pag, int flags);
+
 int xfs_reclaim_inodes(struct xfs_mount *mp, int mode);
 int xfs_reclaim_inodes_count(struct xfs_mount *mp);
 void xfs_reclaim_inodes_nr(struct xfs_mount *mp, int nr_to_scan);
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index 4940357..a9d5b1e 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -743,8 +743,6 @@
 DEFINE_DQUOT_EVENT(xfs_dqread);
 DEFINE_DQUOT_EVENT(xfs_dqread_fail);
 DEFINE_DQUOT_EVENT(xfs_dqlookup_found);
-DEFINE_DQUOT_EVENT(xfs_dqlookup_want);
-DEFINE_DQUOT_EVENT(xfs_dqlookup_freelist);
 DEFINE_DQUOT_EVENT(xfs_dqlookup_done);
 DEFINE_DQUOT_EVENT(xfs_dqget_hit);
 DEFINE_DQUOT_EVENT(xfs_dqget_miss);
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index 1f35b2f..329b06a 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -1158,7 +1158,6 @@
 
 	lidp->lid_item = lip;
 	lidp->lid_flags = 0;
-	lidp->lid_size = 0;
 	list_add_tail(&lidp->lid_trans, &tp->t_items);
 
 	lip->li_desc = lidp;
@@ -1210,219 +1209,6 @@
 	}
 }
 
-/*
- * Unlock the items associated with a transaction.
- *
- * Items which were not logged should be freed.  Those which were logged must
- * still be tracked so they can be unpinned when the transaction commits.
- */
-STATIC void
-xfs_trans_unlock_items(
-	struct xfs_trans	*tp,
-	xfs_lsn_t		commit_lsn)
-{
-	struct xfs_log_item_desc *lidp, *next;
-
-	list_for_each_entry_safe(lidp, next, &tp->t_items, lid_trans) {
-		struct xfs_log_item	*lip = lidp->lid_item;
-
-		lip->li_desc = NULL;
-
-		if (commit_lsn != NULLCOMMITLSN)
-			IOP_COMMITTING(lip, commit_lsn);
-		IOP_UNLOCK(lip);
-
-		/*
-		 * Free the descriptor if the item is not dirty
-		 * within this transaction.
-		 */
-		if (!(lidp->lid_flags & XFS_LID_DIRTY))
-			xfs_trans_free_item_desc(lidp);
-	}
-}
-
-/*
- * Total up the number of log iovecs needed to commit this
- * transaction.  The transaction itself needs one for the
- * transaction header.  Ask each dirty item in turn how many
- * it needs to get the total.
- */
-static uint
-xfs_trans_count_vecs(
-	struct xfs_trans	*tp)
-{
-	int			nvecs;
-	struct xfs_log_item_desc *lidp;
-
-	nvecs = 1;
-
-	/* In the non-debug case we need to start bailing out if we
-	 * didn't find a log_item here, return zero and let trans_commit
-	 * deal with it.
-	 */
-	if (list_empty(&tp->t_items)) {
-		ASSERT(0);
-		return 0;
-	}
-
-	list_for_each_entry(lidp, &tp->t_items, lid_trans) {
-		/*
-		 * Skip items which aren't dirty in this transaction.
-		 */
-		if (!(lidp->lid_flags & XFS_LID_DIRTY))
-			continue;
-		lidp->lid_size = IOP_SIZE(lidp->lid_item);
-		nvecs += lidp->lid_size;
-	}
-
-	return nvecs;
-}
-
-/*
- * Fill in the vector with pointers to data to be logged
- * by this transaction.  The transaction header takes
- * the first vector, and then each dirty item takes the
- * number of vectors it indicated it needed in xfs_trans_count_vecs().
- *
- * As each item fills in the entries it needs, also pin the item
- * so that it cannot be flushed out until the log write completes.
- */
-static void
-xfs_trans_fill_vecs(
-	struct xfs_trans	*tp,
-	struct xfs_log_iovec	*log_vector)
-{
-	struct xfs_log_item_desc *lidp;
-	struct xfs_log_iovec	*vecp;
-	uint			nitems;
-
-	/*
-	 * Skip over the entry for the transaction header, we'll
-	 * fill that in at the end.
-	 */
-	vecp = log_vector + 1;
-
-	nitems = 0;
-	ASSERT(!list_empty(&tp->t_items));
-	list_for_each_entry(lidp, &tp->t_items, lid_trans) {
-		/* Skip items which aren't dirty in this transaction. */
-		if (!(lidp->lid_flags & XFS_LID_DIRTY))
-			continue;
-
-		/*
-		 * The item may be marked dirty but not log anything.  This can
-		 * be used to get called when a transaction is committed.
-		 */
-		if (lidp->lid_size)
-			nitems++;
-		IOP_FORMAT(lidp->lid_item, vecp);
-		vecp += lidp->lid_size;
-		IOP_PIN(lidp->lid_item);
-	}
-
-	/*
-	 * Now that we've counted the number of items in this transaction, fill
-	 * in the transaction header. Note that the transaction header does not
-	 * have a log item.
-	 */
-	tp->t_header.th_magic = XFS_TRANS_HEADER_MAGIC;
-	tp->t_header.th_type = tp->t_type;
-	tp->t_header.th_num_items = nitems;
-	log_vector->i_addr = (xfs_caddr_t)&tp->t_header;
-	log_vector->i_len = sizeof(xfs_trans_header_t);
-	log_vector->i_type = XLOG_REG_TYPE_TRANSHDR;
-}
-
-/*
- * The committed item processing consists of calling the committed routine of
- * each logged item, updating the item's position in the AIL if necessary, and
- * unpinning each item.  If the committed routine returns -1, then do nothing
- * further with the item because it may have been freed.
- *
- * Since items are unlocked when they are copied to the incore log, it is
- * possible for two transactions to be completing and manipulating the same
- * item simultaneously.  The AIL lock will protect the lsn field of each item.
- * The value of this field can never go backwards.
- *
- * We unpin the items after repositioning them in the AIL, because otherwise
- * they could be immediately flushed and we'd have to race with the flusher
- * trying to pull the item from the AIL as we add it.
- */
-static void
-xfs_trans_item_committed(
-	struct xfs_log_item	*lip,
-	xfs_lsn_t		commit_lsn,
-	int			aborted)
-{
-	xfs_lsn_t		item_lsn;
-	struct xfs_ail		*ailp;
-
-	if (aborted)
-		lip->li_flags |= XFS_LI_ABORTED;
-	item_lsn = IOP_COMMITTED(lip, commit_lsn);
-
-	/* item_lsn of -1 means the item needs no further processing */
-	if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0)
-		return;
-
-	/*
-	 * If the returned lsn is greater than what it contained before, update
-	 * the location of the item in the AIL.  If it is not, then do nothing.
-	 * Items can never move backwards in the AIL.
-	 *
-	 * While the new lsn should usually be greater, it is possible that a
-	 * later transaction completing simultaneously with an earlier one
-	 * using the same item could complete first with a higher lsn.  This
-	 * would cause the earlier transaction to fail the test below.
-	 */
-	ailp = lip->li_ailp;
-	spin_lock(&ailp->xa_lock);
-	if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0) {
-		/*
-		 * This will set the item's lsn to item_lsn and update the
-		 * position of the item in the AIL.
-		 *
-		 * xfs_trans_ail_update() drops the AIL lock.
-		 */
-		xfs_trans_ail_update(ailp, lip, item_lsn);
-	} else {
-		spin_unlock(&ailp->xa_lock);
-	}
-
-	/*
-	 * Now that we've repositioned the item in the AIL, unpin it so it can
-	 * be flushed. Pass information about buffer stale state down from the
-	 * log item flags, if anyone else stales the buffer we do not want to
-	 * pay any attention to it.
-	 */
-	IOP_UNPIN(lip, 0);
-}
-
-/*
- * This is typically called by the LM when a transaction has been fully
- * committed to disk.  It needs to unpin the items which have
- * been logged by the transaction and update their positions
- * in the AIL if necessary.
- *
- * This also gets called when the transactions didn't get written out
- * because of an I/O error. Abortflag & XFS_LI_ABORTED is set then.
- */
-STATIC void
-xfs_trans_committed(
-	void			*arg,
-	int			abortflag)
-{
-	struct xfs_trans	*tp = arg;
-	struct xfs_log_item_desc *lidp, *next;
-
-	list_for_each_entry_safe(lidp, next, &tp->t_items, lid_trans) {
-		xfs_trans_item_committed(lidp->lid_item, tp->t_lsn, abortflag);
-		xfs_trans_free_item_desc(lidp);
-	}
-
-	xfs_trans_free(tp);
-}
-
 static inline void
 xfs_log_item_batch_insert(
 	struct xfs_ail		*ailp,
@@ -1538,258 +1324,6 @@
 }
 
 /*
- * Called from the trans_commit code when we notice that the filesystem is in
- * the middle of a forced shutdown.
- *
- * When we are called here, we have already pinned all the items in the
- * transaction. However, neither IOP_COMMITTING or IOP_UNLOCK has been called
- * so we can simply walk the items in the transaction, unpin them with an abort
- * flag and then free the items. Note that unpinning the items can result in
- * them being freed immediately, so we need to use a safe list traversal method
- * here.
- */
-STATIC void
-xfs_trans_uncommit(
-	struct xfs_trans	*tp,
-	uint			flags)
-{
-	struct xfs_log_item_desc *lidp, *n;
-
-	list_for_each_entry_safe(lidp, n, &tp->t_items, lid_trans) {
-		if (lidp->lid_flags & XFS_LID_DIRTY)
-			IOP_UNPIN(lidp->lid_item, 1);
-	}
-
-	xfs_trans_unreserve_and_mod_sb(tp);
-	xfs_trans_unreserve_and_mod_dquots(tp);
-
-	xfs_trans_free_items(tp, NULLCOMMITLSN, flags);
-	xfs_trans_free(tp);
-}
-
-/*
- * Format the transaction direct to the iclog. This isolates the physical
- * transaction commit operation from the logical operation and hence allows
- * other methods to be introduced without affecting the existing commit path.
- */
-static int
-xfs_trans_commit_iclog(
-	struct xfs_mount	*mp,
-	struct xfs_trans	*tp,
-	xfs_lsn_t		*commit_lsn,
-	int			flags)
-{
-	int			shutdown;
-	int			error;
-	int			log_flags = 0;
-	struct xlog_in_core	*commit_iclog;
-#define XFS_TRANS_LOGVEC_COUNT  16
-	struct xfs_log_iovec	log_vector_fast[XFS_TRANS_LOGVEC_COUNT];
-	struct xfs_log_iovec	*log_vector;
-	uint			nvec;
-
-
-	/*
-	 * Ask each log item how many log_vector entries it will
-	 * need so we can figure out how many to allocate.
-	 * Try to avoid the kmem_alloc() call in the common case
-	 * by using a vector from the stack when it fits.
-	 */
-	nvec = xfs_trans_count_vecs(tp);
-	if (nvec == 0) {
-		return ENOMEM;	/* triggers a shutdown! */
-	} else if (nvec <= XFS_TRANS_LOGVEC_COUNT) {
-		log_vector = log_vector_fast;
-	} else {
-		log_vector = (xfs_log_iovec_t *)kmem_alloc(nvec *
-						   sizeof(xfs_log_iovec_t),
-						   KM_SLEEP);
-	}
-
-	/*
-	 * Fill in the log_vector and pin the logged items, and
-	 * then write the transaction to the log.
-	 */
-	xfs_trans_fill_vecs(tp, log_vector);
-
-	if (flags & XFS_TRANS_RELEASE_LOG_RES)
-		log_flags = XFS_LOG_REL_PERM_RESERV;
-
-	error = xfs_log_write(mp, log_vector, nvec, tp->t_ticket, &(tp->t_lsn));
-
-	/*
-	 * The transaction is committed incore here, and can go out to disk
-	 * at any time after this call.  However, all the items associated
-	 * with the transaction are still locked and pinned in memory.
-	 */
-	*commit_lsn = xfs_log_done(mp, tp->t_ticket, &commit_iclog, log_flags);
-
-	tp->t_commit_lsn = *commit_lsn;
-	trace_xfs_trans_commit_lsn(tp);
-
-	if (nvec > XFS_TRANS_LOGVEC_COUNT)
-		kmem_free(log_vector);
-
-	/*
-	 * If we got a log write error. Unpin the logitems that we
-	 * had pinned, clean up, free trans structure, and return error.
-	 */
-	if (error || *commit_lsn == -1) {
-		current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
-		xfs_trans_uncommit(tp, flags|XFS_TRANS_ABORT);
-		return XFS_ERROR(EIO);
-	}
-
-	/*
-	 * Once the transaction has committed, unused
-	 * reservations need to be released and changes to
-	 * the superblock need to be reflected in the in-core
-	 * version.  Do that now.
-	 */
-	xfs_trans_unreserve_and_mod_sb(tp);
-
-	/*
-	 * Tell the LM to call the transaction completion routine
-	 * when the log write with LSN commit_lsn completes (e.g.
-	 * when the transaction commit really hits the on-disk log).
-	 * After this call we cannot reference tp, because the call
-	 * can happen at any time and the call will free the transaction
-	 * structure pointed to by tp.  The only case where we call
-	 * the completion routine (xfs_trans_committed) directly is
-	 * if the log is turned off on a debug kernel or we're
-	 * running in simulation mode (the log is explicitly turned
-	 * off).
-	 */
-	tp->t_logcb.cb_func = xfs_trans_committed;
-	tp->t_logcb.cb_arg = tp;
-
-	/*
-	 * We need to pass the iclog buffer which was used for the
-	 * transaction commit record into this function, and attach
-	 * the callback to it. The callback must be attached before
-	 * the items are unlocked to avoid racing with other threads
-	 * waiting for an item to unlock.
-	 */
-	shutdown = xfs_log_notify(mp, commit_iclog, &(tp->t_logcb));
-
-	/*
-	 * Mark this thread as no longer being in a transaction
-	 */
-	current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
-
-	/*
-	 * Once all the items of the transaction have been copied
-	 * to the in core log and the callback is attached, the
-	 * items can be unlocked.
-	 *
-	 * This will free descriptors pointing to items which were
-	 * not logged since there is nothing more to do with them.
-	 * For items which were logged, we will keep pointers to them
-	 * so they can be unpinned after the transaction commits to disk.
-	 * This will also stamp each modified meta-data item with
-	 * the commit lsn of this transaction for dependency tracking
-	 * purposes.
-	 */
-	xfs_trans_unlock_items(tp, *commit_lsn);
-
-	/*
-	 * If we detected a log error earlier, finish committing
-	 * the transaction now (unpin log items, etc).
-	 *
-	 * Order is critical here, to avoid using the transaction
-	 * pointer after its been freed (by xfs_trans_committed
-	 * either here now, or as a callback).  We cannot do this
-	 * step inside xfs_log_notify as was done earlier because
-	 * of this issue.
-	 */
-	if (shutdown)
-		xfs_trans_committed(tp, XFS_LI_ABORTED);
-
-	/*
-	 * Now that the xfs_trans_committed callback has been attached,
-	 * and the items are released we can finally allow the iclog to
-	 * go to disk.
-	 */
-	return xfs_log_release_iclog(mp, commit_iclog);
-}
-
-/*
- * Walk the log items and allocate log vector structures for
- * each item large enough to fit all the vectors they require.
- * Note that this format differs from the old log vector format in
- * that there is no transaction header in these log vectors.
- */
-STATIC struct xfs_log_vec *
-xfs_trans_alloc_log_vecs(
-	xfs_trans_t	*tp)
-{
-	struct xfs_log_item_desc *lidp;
-	struct xfs_log_vec	*lv = NULL;
-	struct xfs_log_vec	*ret_lv = NULL;
-
-
-	/* Bail out if we didn't find a log item.  */
-	if (list_empty(&tp->t_items)) {
-		ASSERT(0);
-		return NULL;
-	}
-
-	list_for_each_entry(lidp, &tp->t_items, lid_trans) {
-		struct xfs_log_vec *new_lv;
-
-		/* Skip items which aren't dirty in this transaction. */
-		if (!(lidp->lid_flags & XFS_LID_DIRTY))
-			continue;
-
-		/* Skip items that do not have any vectors for writing */
-		lidp->lid_size = IOP_SIZE(lidp->lid_item);
-		if (!lidp->lid_size)
-			continue;
-
-		new_lv = kmem_zalloc(sizeof(*new_lv) +
-				lidp->lid_size * sizeof(struct xfs_log_iovec),
-				KM_SLEEP);
-
-		/* The allocated iovec region lies beyond the log vector. */
-		new_lv->lv_iovecp = (struct xfs_log_iovec *)&new_lv[1];
-		new_lv->lv_niovecs = lidp->lid_size;
-		new_lv->lv_item = lidp->lid_item;
-		if (!ret_lv)
-			ret_lv = new_lv;
-		else
-			lv->lv_next = new_lv;
-		lv = new_lv;
-	}
-
-	return ret_lv;
-}
-
-static int
-xfs_trans_commit_cil(
-	struct xfs_mount	*mp,
-	struct xfs_trans	*tp,
-	xfs_lsn_t		*commit_lsn,
-	int			flags)
-{
-	struct xfs_log_vec	*log_vector;
-
-	/*
-	 * Get each log item to allocate a vector structure for
-	 * the log item to to pass to the log write code. The
-	 * CIL commit code will format the vector and save it away.
-	 */
-	log_vector = xfs_trans_alloc_log_vecs(tp);
-	if (!log_vector)
-		return ENOMEM;
-
-	xfs_log_commit_cil(mp, tp, log_vector, commit_lsn, flags);
-
-	current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
-	xfs_trans_free(tp);
-	return 0;
-}
-
-/*
  * Commit the given transaction to the log.
  *
  * XFS disk error handling mechanism is not based on a typical
@@ -1845,17 +1379,16 @@
 		xfs_trans_apply_sb_deltas(tp);
 	xfs_trans_apply_dquot_deltas(tp);
 
-	if (mp->m_flags & XFS_MOUNT_DELAYLOG)
-		error = xfs_trans_commit_cil(mp, tp, &commit_lsn, flags);
-	else
-		error = xfs_trans_commit_iclog(mp, tp, &commit_lsn, flags);
-
+	error = xfs_log_commit_cil(mp, tp, &commit_lsn, flags);
 	if (error == ENOMEM) {
 		xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR);
 		error = XFS_ERROR(EIO);
 		goto out_unreserve;
 	}
 
+	current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
+	xfs_trans_free(tp);
+
 	/*
 	 * If the transaction needs to be synchronous, then force the
 	 * log out now and wait for it.
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index 3ae713c..f611870 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -163,9 +163,8 @@
  */
 struct xfs_log_item_desc {
 	struct xfs_log_item	*lid_item;
-	ushort			lid_size;
-	unsigned char		lid_flags;
 	struct list_head	lid_trans;
+	unsigned char		lid_flags;
 };
 
 #define XFS_LID_DIRTY		0x1
diff --git a/fs/xfs/xfs_utils.c b/fs/xfs/xfs_utils.c
index 8b32d1a..89dbb4a 100644
--- a/fs/xfs/xfs_utils.c
+++ b/fs/xfs/xfs_utils.c
@@ -53,7 +53,7 @@
 					   output: may be a new transaction. */
 	xfs_inode_t	*dp,		/* directory within whose allocate
 					   the inode. */
-	mode_t		mode,
+	umode_t		mode,
 	xfs_nlink_t	nlink,
 	xfs_dev_t	rdev,
 	prid_t		prid,		/* project id */
diff --git a/fs/xfs/xfs_utils.h b/fs/xfs/xfs_utils.h
index 456fca3..5eeab46 100644
--- a/fs/xfs/xfs_utils.h
+++ b/fs/xfs/xfs_utils.h
@@ -18,7 +18,7 @@
 #ifndef __XFS_UTILS_H__
 #define __XFS_UTILS_H__
 
-extern int xfs_dir_ialloc(xfs_trans_t **, xfs_inode_t *, mode_t, xfs_nlink_t,
+extern int xfs_dir_ialloc(xfs_trans_t **, xfs_inode_t *, umode_t, xfs_nlink_t,
 				xfs_dev_t, prid_t, int, xfs_inode_t **, int *);
 extern int xfs_droplink(xfs_trans_t *, xfs_inode_t *);
 extern int xfs_bumplink(xfs_trans_t *, xfs_inode_t *);
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index ce9268a..f2fea86 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -822,7 +822,7 @@
 xfs_create(
 	xfs_inode_t		*dp,
 	struct xfs_name		*name,
-	mode_t			mode,
+	umode_t			mode,
 	xfs_dev_t		rdev,
 	xfs_inode_t		**ipp)
 {
@@ -1481,7 +1481,7 @@
 	xfs_inode_t		*dp,
 	struct xfs_name		*link_name,
 	const char		*target_path,
-	mode_t			mode,
+	umode_t			mode,
 	xfs_inode_t		**ipp)
 {
 	xfs_mount_t		*mp = dp->i_mount;
diff --git a/fs/xfs/xfs_vnodeops.h b/fs/xfs/xfs_vnodeops.h
index 35d3d51..0c877cb 100644
--- a/fs/xfs/xfs_vnodeops.h
+++ b/fs/xfs/xfs_vnodeops.h
@@ -26,7 +26,7 @@
 int xfs_inactive(struct xfs_inode *ip);
 int xfs_lookup(struct xfs_inode *dp, struct xfs_name *name,
 		struct xfs_inode **ipp, struct xfs_name *ci_name);
-int xfs_create(struct xfs_inode *dp, struct xfs_name *name, mode_t mode,
+int xfs_create(struct xfs_inode *dp, struct xfs_name *name, umode_t mode,
 		xfs_dev_t rdev, struct xfs_inode **ipp);
 int xfs_remove(struct xfs_inode *dp, struct xfs_name *name,
 		struct xfs_inode *ip);
@@ -35,7 +35,7 @@
 int xfs_readdir(struct xfs_inode	*dp, void *dirent, size_t bufsize,
 		       xfs_off_t *offset, filldir_t filldir);
 int xfs_symlink(struct xfs_inode *dp, struct xfs_name *link_name,
-		const char *target_path, mode_t mode, struct xfs_inode **ipp);
+		const char *target_path, umode_t mode, struct xfs_inode **ipp);
 int xfs_set_dmattrs(struct xfs_inode *ip, u_int evmask, u_int16_t state);
 int xfs_change_file_space(struct xfs_inode *ip, int cmd,
 		xfs_flock64_t *bf, xfs_off_t offset, int attr_flags);
diff --git a/include/asm-generic/cputime.h b/include/asm-generic/cputime.h
index 62ce682..9a62937 100644
--- a/include/asm-generic/cputime.h
+++ b/include/asm-generic/cputime.h
@@ -4,70 +4,66 @@
 #include <linux/time.h>
 #include <linux/jiffies.h>
 
-typedef unsigned long cputime_t;
+typedef unsigned long __nocast cputime_t;
 
-#define cputime_zero			(0UL)
 #define cputime_one_jiffy		jiffies_to_cputime(1)
-#define cputime_max			((~0UL >> 1) - 1)
-#define cputime_add(__a, __b)		((__a) +  (__b))
-#define cputime_sub(__a, __b)		((__a) -  (__b))
-#define cputime_div(__a, __n)		((__a) /  (__n))
-#define cputime_halve(__a)		((__a) >> 1)
-#define cputime_eq(__a, __b)		((__a) == (__b))
-#define cputime_gt(__a, __b)		((__a) >  (__b))
-#define cputime_ge(__a, __b)		((__a) >= (__b))
-#define cputime_lt(__a, __b)		((__a) <  (__b))
-#define cputime_le(__a, __b)		((__a) <= (__b))
-#define cputime_to_jiffies(__ct)	(__ct)
+#define cputime_to_jiffies(__ct)	(__force unsigned long)(__ct)
 #define cputime_to_scaled(__ct)		(__ct)
-#define jiffies_to_cputime(__hz)	(__hz)
+#define jiffies_to_cputime(__hz)	(__force cputime_t)(__hz)
 
-typedef u64 cputime64_t;
+typedef u64 __nocast cputime64_t;
 
-#define cputime64_zero (0ULL)
-#define cputime64_add(__a, __b)		((__a) + (__b))
-#define cputime64_sub(__a, __b)		((__a) - (__b))
-#define cputime64_to_jiffies64(__ct)	(__ct)
-#define jiffies64_to_cputime64(__jif)	(__jif)
-#define cputime_to_cputime64(__ct)	((u64) __ct)
-#define cputime64_gt(__a, __b)		((__a) >  (__b))
+#define cputime64_to_jiffies64(__ct)	(__force u64)(__ct)
+#define jiffies64_to_cputime64(__jif)	(__force cputime64_t)(__jif)
 
-#define nsecs_to_cputime64(__ct)	nsecs_to_jiffies64(__ct)
+#define nsecs_to_cputime64(__ct)	\
+	jiffies64_to_cputime64(nsecs_to_jiffies64(__ct))
 
 
 /*
  * Convert cputime to microseconds and back.
  */
-#define cputime_to_usecs(__ct)		jiffies_to_usecs(__ct)
-#define usecs_to_cputime(__msecs)	usecs_to_jiffies(__msecs)
+#define cputime_to_usecs(__ct)		\
+	jiffies_to_usecs(cputime_to_jiffies(__ct))
+#define usecs_to_cputime(__usec)	\
+	jiffies_to_cputime(usecs_to_jiffies(__usec))
+#define usecs_to_cputime64(__usec)	\
+	jiffies64_to_cputime64(nsecs_to_jiffies64((__usec) * 1000))
 
 /*
  * Convert cputime to seconds and back.
  */
-#define cputime_to_secs(jif)		((jif) / HZ)
-#define secs_to_cputime(sec)		((sec) * HZ)
+#define cputime_to_secs(jif)		(cputime_to_jiffies(jif) / HZ)
+#define secs_to_cputime(sec)		jiffies_to_cputime((sec) * HZ)
 
 /*
  * Convert cputime to timespec and back.
  */
-#define timespec_to_cputime(__val)	timespec_to_jiffies(__val)
-#define cputime_to_timespec(__ct,__val)	jiffies_to_timespec(__ct,__val)
+#define timespec_to_cputime(__val)	\
+	jiffies_to_cputime(timespec_to_jiffies(__val))
+#define cputime_to_timespec(__ct,__val)	\
+	jiffies_to_timespec(cputime_to_jiffies(__ct),__val)
 
 /*
  * Convert cputime to timeval and back.
  */
-#define timeval_to_cputime(__val)	timeval_to_jiffies(__val)
-#define cputime_to_timeval(__ct,__val)	jiffies_to_timeval(__ct,__val)
+#define timeval_to_cputime(__val)	\
+	jiffies_to_cputime(timeval_to_jiffies(__val))
+#define cputime_to_timeval(__ct,__val)	\
+	jiffies_to_timeval(cputime_to_jiffies(__ct),__val)
 
 /*
  * Convert cputime to clock and back.
  */
-#define cputime_to_clock_t(__ct)	jiffies_to_clock_t(__ct)
-#define clock_t_to_cputime(__x)		clock_t_to_jiffies(__x)
+#define cputime_to_clock_t(__ct)	\
+	jiffies_to_clock_t(cputime_to_jiffies(__ct))
+#define clock_t_to_cputime(__x)		\
+	jiffies_to_cputime(clock_t_to_jiffies(__x))
 
 /*
  * Convert cputime64 to clock.
  */
-#define cputime64_to_clock_t(__ct)	jiffies_64_to_clock_t(__ct)
+#define cputime64_to_clock_t(__ct)	\
+	jiffies_64_to_clock_t(cputime64_to_jiffies64(__ct))
 
 #endif
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h
index 8c86210..d466c8d 100644
--- a/include/asm-generic/gpio.h
+++ b/include/asm-generic/gpio.h
@@ -4,6 +4,7 @@
 #include <linux/kernel.h>
 #include <linux/types.h>
 #include <linux/errno.h>
+#include <linux/of.h>
 
 #ifdef CONFIG_GPIOLIB
 
@@ -128,13 +129,14 @@
 	 */
 	struct device_node *of_node;
 	int of_gpio_n_cells;
-	int (*of_xlate)(struct gpio_chip *gc, struct device_node *np,
-		        const void *gpio_spec, u32 *flags);
+	int (*of_xlate)(struct gpio_chip *gc,
+		        const struct of_phandle_args *gpiospec, u32 *flags);
 #endif
 };
 
 extern const char *gpiochip_is_requested(struct gpio_chip *chip,
 			unsigned offset);
+extern struct gpio_chip *gpio_to_chip(unsigned gpio);
 extern int __must_check gpiochip_reserve(int start, int ngpio);
 
 /* add/remove chips */
diff --git a/include/asm-generic/socket.h b/include/asm-generic/socket.h
index 9a6115e..49c1704 100644
--- a/include/asm-generic/socket.h
+++ b/include/asm-generic/socket.h
@@ -64,4 +64,7 @@
 #define SO_DOMAIN		39
 
 #define SO_RXQ_OVFL             40
+
+#define SO_WIFI_STATUS		41
+#define SCM_WIFI_STATUS	SO_WIFI_STATUS
 #endif /* __ASM_GENERIC_SOCKET_H */
diff --git a/include/asm-generic/types.h b/include/asm-generic/types.h
index 7a0f69e..bd39806 100644
--- a/include/asm-generic/types.h
+++ b/include/asm-generic/types.h
@@ -6,10 +6,4 @@
  */
 #include <asm-generic/int-ll64.h>
 
-#ifndef __ASSEMBLY__
-
-typedef unsigned short umode_t;
-
-#endif /* __ASSEMBLY__ */
-
 #endif /* _ASM_GENERIC_TYPES_H */
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 1f9e951..e8acca89 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -820,7 +820,7 @@
 	 * Specifically, the timestamp in @vblank_time should correspond as
 	 * closely as possible to the time when the first video scanline of
 	 * the video frame after the end of VBLANK will start scanning out,
-	 * the time immmediately after end of the VBLANK interval. If the
+	 * the time immediately after end of the VBLANK interval. If the
 	 * @crtc is currently inside VBLANK, this will be a time in the future.
 	 * If the @crtc is currently scanning out a frame, this will be the
 	 * past start time of the current scanout. This is meant to adhere
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 4e4fbb8..14b6cd0 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -182,8 +182,11 @@
 	{0x1002, 0x6748, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x6749, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x6750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6751, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x6758, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x6759, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x675B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x675D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x675F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x6760, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x6761, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
@@ -195,8 +198,10 @@
 	{0x1002, 0x6767, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x6768, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x6770, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x6778, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x6779, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x677B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x6840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x6841, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x6842, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
@@ -246,6 +251,7 @@
 	{0x1002, 0x68f2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x68f8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x68f9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x68fa, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x68fe, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x7100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x7101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
@@ -488,6 +494,8 @@
 	{0x1002, 0x9647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
 	{0x1002, 0x9648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
 	{0x1002, 0x964a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+	{0x1002, 0x964b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+	{0x1002, 0x964c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
 	{0x1002, 0x964e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
 	{0x1002, 0x964f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
 	{0x1002, 0x9710, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
@@ -502,6 +510,8 @@
 	{0x1002, 0x9805, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
 	{0x1002, 0x9806, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
 	{0x1002, 0x9807, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+	{0x1002, 0x9808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+	{0x1002, 0x9809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
 	{0, 0, 0}
 
 #define r128_PCI_IDS \
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index 619b565..c94e717 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -185,6 +185,7 @@
 header-y += if_pppox.h
 header-y += if_slip.h
 header-y += if_strip.h
+header-y += if_team.h
 header-y += if_tr.h
 header-y += if_tun.h
 header-y += if_tunnel.h
@@ -194,7 +195,9 @@
 header-y += in.h
 header-y += in6.h
 header-y += in_route.h
+header-y += sock_diag.h
 header-y += inet_diag.h
+header-y += unix_diag.h
 header-y += inotify.h
 header-y += input.h
 header-y += ioctl.h
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index fcbbe71..724c69c 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -16,6 +16,7 @@
 
 #include <linux/clk.h>
 #include <linux/device.h>
+#include <linux/mod_devicetable.h>
 #include <linux/err.h>
 #include <linux/resource.h>
 #include <linux/regulator/consumer.h>
@@ -35,12 +36,6 @@
 	unsigned int		irq[AMBA_NR_IRQS];
 };
 
-struct amba_id {
-	unsigned int		id;
-	unsigned int		mask;
-	void			*data;
-};
-
 struct amba_driver {
 	struct device_driver	drv;
 	int			(*probe)(struct amba_device *, const struct amba_id *);
diff --git a/include/linux/amba/pl022.h b/include/linux/amba/pl022.h
index 4ce98f5..572f637 100644
--- a/include/linux/amba/pl022.h
+++ b/include/linux/amba/pl022.h
@@ -238,6 +238,9 @@
  * @enable_dma: if true enables DMA driven transfers.
  * @dma_rx_param: parameter to locate an RX DMA channel.
  * @dma_tx_param: parameter to locate a TX DMA channel.
+ * @autosuspend_delay: delay in ms following transfer completion before the
+ *     runtime power management system suspends the device. A setting of 0
+ *     indicates no delay and the device will be suspended immediately.
  */
 struct pl022_ssp_controller {
 	u16 bus_id;
@@ -246,6 +249,7 @@
 	bool (*dma_filter)(struct dma_chan *chan, void *filter_param);
 	void *dma_rx_param;
 	void *dma_tx_param;
+	int autosuspend_delay;
 };
 
 /**
diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
index 49a83ca..f4ff882 100644
--- a/include/linux/atmdev.h
+++ b/include/linux/atmdev.h
@@ -445,16 +445,6 @@
 
 void atm_dev_release_vccs(struct atm_dev *dev);
 
-/*
- * This is approximately the algorithm used by alloc_skb.
- *
- */
-
-static inline int atm_guess_pdu2truesize(int size)
-{
-	return SKB_DATA_ALIGN(size) + sizeof(struct skb_shared_info);
-}
-
 
 static inline void atm_force_charge(struct atm_vcc *vcc,int truesize)
 {
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 2f81c6f..426ab9f 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -468,13 +468,13 @@
 #define audit_get_sessionid(t) ((t)->sessionid)
 extern void audit_log_task_context(struct audit_buffer *ab);
 extern void __audit_ipc_obj(struct kern_ipc_perm *ipcp);
-extern void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, mode_t mode);
+extern void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode);
 extern int audit_bprm(struct linux_binprm *bprm);
 extern void audit_socketcall(int nargs, unsigned long *args);
 extern int audit_sockaddr(int len, void *addr);
 extern void __audit_fd_pair(int fd1, int fd2);
 extern int audit_set_macxattr(const char *name);
-extern void __audit_mq_open(int oflag, mode_t mode, struct mq_attr *attr);
+extern void __audit_mq_open(int oflag, umode_t mode, struct mq_attr *attr);
 extern void __audit_mq_sendrecv(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec *abs_timeout);
 extern void __audit_mq_notify(mqd_t mqdes, const struct sigevent *notification);
 extern void __audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat);
@@ -494,12 +494,12 @@
 	if (unlikely(!audit_dummy_context()))
 		__audit_fd_pair(fd1, fd2);
 }
-static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, mode_t mode)
+static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode)
 {
 	if (unlikely(!audit_dummy_context()))
 		__audit_ipc_set_perm(qbytes, uid, gid, mode);
 }
-static inline void audit_mq_open(int oflag, mode_t mode, struct mq_attr *attr)
+static inline void audit_mq_open(int oflag, umode_t mode, struct mq_attr *attr)
 {
 	if (unlikely(!audit_dummy_context()))
 		__audit_mq_open(oflag, mode, attr);
diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h
index 4d4b59d..f4b8346 100644
--- a/include/linux/bcma/bcma.h
+++ b/include/linux/bcma/bcma.h
@@ -205,61 +205,82 @@
 	struct ssb_sprom sprom;
 };
 
-extern inline u32 bcma_read8(struct bcma_device *core, u16 offset)
+static inline u32 bcma_read8(struct bcma_device *core, u16 offset)
 {
 	return core->bus->ops->read8(core, offset);
 }
-extern inline u32 bcma_read16(struct bcma_device *core, u16 offset)
+static inline u32 bcma_read16(struct bcma_device *core, u16 offset)
 {
 	return core->bus->ops->read16(core, offset);
 }
-extern inline u32 bcma_read32(struct bcma_device *core, u16 offset)
+static inline u32 bcma_read32(struct bcma_device *core, u16 offset)
 {
 	return core->bus->ops->read32(core, offset);
 }
-extern inline
+static inline
 void bcma_write8(struct bcma_device *core, u16 offset, u32 value)
 {
 	core->bus->ops->write8(core, offset, value);
 }
-extern inline
+static inline
 void bcma_write16(struct bcma_device *core, u16 offset, u32 value)
 {
 	core->bus->ops->write16(core, offset, value);
 }
-extern inline
+static inline
 void bcma_write32(struct bcma_device *core, u16 offset, u32 value)
 {
 	core->bus->ops->write32(core, offset, value);
 }
 #ifdef CONFIG_BCMA_BLOCKIO
-extern inline void bcma_block_read(struct bcma_device *core, void *buffer,
+static inline void bcma_block_read(struct bcma_device *core, void *buffer,
 				   size_t count, u16 offset, u8 reg_width)
 {
 	core->bus->ops->block_read(core, buffer, count, offset, reg_width);
 }
-extern inline void bcma_block_write(struct bcma_device *core, const void *buffer,
-				    size_t count, u16 offset, u8 reg_width)
+static inline void bcma_block_write(struct bcma_device *core,
+				    const void *buffer, size_t count,
+				    u16 offset, u8 reg_width)
 {
 	core->bus->ops->block_write(core, buffer, count, offset, reg_width);
 }
 #endif
-extern inline u32 bcma_aread32(struct bcma_device *core, u16 offset)
+static inline u32 bcma_aread32(struct bcma_device *core, u16 offset)
 {
 	return core->bus->ops->aread32(core, offset);
 }
-extern inline
+static inline
 void bcma_awrite32(struct bcma_device *core, u16 offset, u32 value)
 {
 	core->bus->ops->awrite32(core, offset, value);
 }
 
-#define bcma_mask32(cc, offset, mask) \
-	bcma_write32(cc, offset, bcma_read32(cc, offset) & (mask))
-#define bcma_set32(cc, offset, set) \
-	bcma_write32(cc, offset, bcma_read32(cc, offset) | (set))
-#define bcma_maskset32(cc, offset, mask, set) \
-	bcma_write32(cc, offset, (bcma_read32(cc, offset) & (mask)) | (set))
+static inline void bcma_mask32(struct bcma_device *cc, u16 offset, u32 mask)
+{
+	bcma_write32(cc, offset, bcma_read32(cc, offset) & mask);
+}
+static inline void bcma_set32(struct bcma_device *cc, u16 offset, u32 set)
+{
+	bcma_write32(cc, offset, bcma_read32(cc, offset) | set);
+}
+static inline void bcma_maskset32(struct bcma_device *cc,
+				  u16 offset, u32 mask, u32 set)
+{
+	bcma_write32(cc, offset, (bcma_read32(cc, offset) & mask) | set);
+}
+static inline void bcma_mask16(struct bcma_device *cc, u16 offset, u16 mask)
+{
+	bcma_write16(cc, offset, bcma_read16(cc, offset) & mask);
+}
+static inline void bcma_set16(struct bcma_device *cc, u16 offset, u16 set)
+{
+	bcma_write16(cc, offset, bcma_read16(cc, offset) | set);
+}
+static inline void bcma_maskset16(struct bcma_device *cc,
+				  u16 offset, u16 mask, u16 set)
+{
+	bcma_write16(cc, offset, (bcma_read16(cc, offset) & mask) | set);
+}
 
 extern bool bcma_core_is_enabled(struct bcma_device *core);
 extern void bcma_core_disable(struct bcma_device *core, u32 flags);
diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h
index 1526d96..a33086a 100644
--- a/include/linux/bcma/bcma_driver_chipcommon.h
+++ b/include/linux/bcma/bcma_driver_chipcommon.h
@@ -203,6 +203,7 @@
 #define BCMA_CC_PMU_CTL			0x0600 /* PMU control */
 #define  BCMA_CC_PMU_CTL_ILP_DIV	0xFFFF0000 /* ILP div mask */
 #define  BCMA_CC_PMU_CTL_ILP_DIV_SHIFT	16
+#define  BCMA_CC_PMU_CTL_PLL_UPD	0x00000400
 #define  BCMA_CC_PMU_CTL_NOILPONW	0x00000200 /* No ILP on wait */
 #define  BCMA_CC_PMU_CTL_HTREQEN	0x00000100 /* HT req enable */
 #define  BCMA_CC_PMU_CTL_ALPREQEN	0x00000080 /* ALP req enable */
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index a3ef66a..3c1063a 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -22,8 +22,14 @@
 #include <asm/bitops.h>
 
 #define for_each_set_bit(bit, addr, size) \
-	for ((bit) = find_first_bit((addr), (size)); \
-	     (bit) < (size); \
+	for ((bit) = find_first_bit((addr), (size));		\
+	     (bit) < (size);					\
+	     (bit) = find_next_bit((addr), (size), (bit) + 1))
+
+/* same as for_each_set_bit() but use bit as value to start with */
+#define for_each_set_bit_cont(bit, addr, size) \
+	for ((bit) = find_next_bit((addr), (size), (bit));	\
+	     (bit) < (size);					\
 	     (bit) = find_next_bit((addr), (size), (bit) + 1))
 
 static __inline__ int get_bitmask_order(unsigned int count)
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index c7a6d3b..94acd81 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -805,9 +805,6 @@
  */
 extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
 					spinlock_t *lock, int node_id);
-extern struct request_queue *blk_init_allocated_queue_node(struct request_queue *,
-							   request_fn_proc *,
-							   spinlock_t *, int node_id);
 extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
 extern struct request_queue *blk_init_allocated_queue(struct request_queue *,
 						      request_fn_proc *, spinlock_t *);
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index ab344a5..66d3e95 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -44,7 +44,7 @@
 				       unsigned long endpfn);
 extern unsigned long init_bootmem(unsigned long addr, unsigned long memend);
 
-unsigned long free_all_memory_core_early(int nodeid);
+extern unsigned long free_low_memory_core_early(int nodeid);
 extern unsigned long free_all_bootmem_node(pg_data_t *pgdat);
 extern unsigned long free_all_bootmem(void);
 
diff --git a/include/linux/can/platform/cc770.h b/include/linux/can/platform/cc770.h
new file mode 100644
index 0000000..7702641
--- /dev/null
+++ b/include/linux/can/platform/cc770.h
@@ -0,0 +1,33 @@
+#ifndef _CAN_PLATFORM_CC770_H_
+#define _CAN_PLATFORM_CC770_H_
+
+/* CPU Interface Register (0x02) */
+#define CPUIF_CEN	0x01	/* Clock Out Enable */
+#define CPUIF_MUX	0x04	/* Multiplex */
+#define CPUIF_SLP	0x08	/* Sleep */
+#define CPUIF_PWD	0x10	/* Power Down Mode */
+#define CPUIF_DMC	0x20	/* Divide Memory Clock */
+#define CPUIF_DSC	0x40	/* Divide System Clock */
+#define CPUIF_RST	0x80	/* Hardware Reset Status */
+
+/* Clock Out Register (0x1f) */
+#define CLKOUT_CD_MASK  0x0f	/* Clock Divider mask */
+#define CLKOUT_SL_MASK	0x30	/* Slew Rate mask */
+#define CLKOUT_SL_SHIFT	4
+
+/* Bus Configuration Register (0x2f) */
+#define BUSCFG_DR0	0x01	/* Disconnect RX0 Input / Select RX input */
+#define BUSCFG_DR1	0x02	/* Disconnect RX1 Input / Silent mode */
+#define BUSCFG_DT1	0x08	/* Disconnect TX1 Output */
+#define BUSCFG_POL	0x20	/* Polarity dominant or recessive */
+#define BUSCFG_CBY	0x40	/* Input Comparator Bypass */
+
+struct cc770_platform_data {
+	u32 osc_freq;	/* CAN bus oscillator frequency in Hz */
+
+	u8 cir;		/* CPU Interface Register */
+	u8 cor;		/* Clock Out Register */
+	u8 bcr;		/* Bus Configuration Register */
+};
+
+#endif	/* !_CAN_PLATFORM_CC770_H_ */
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 1b7f9d5..a17becc 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -319,7 +319,7 @@
 	 * If not 0, file mode is set to this value, otherwise it will
 	 * be figured out automatically
 	 */
-	mode_t mode;
+	umode_t mode;
 
 	/*
 	 * If non-zero, defines the maximum length of string that can
diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h
index ac663c1..0bd390c 100644
--- a/include/linux/cgroup_subsys.h
+++ b/include/linux/cgroup_subsys.h
@@ -59,8 +59,16 @@
 SUBSYS(blkio)
 #endif
 
+/* */
+
 #ifdef CONFIG_CGROUP_PERF
 SUBSYS(perf)
 #endif
 
 /* */
+
+#ifdef CONFIG_NETPRIO_CGROUP
+SUBSYS(net_prio)
+#endif
+
+/* */
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index c86c940..081147d 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -71,7 +71,7 @@
 
 /**
  * cyclecounter_cyc2ns - converts cycle counter cycles to nanoseconds
- * @tc:		Pointer to cycle counter.
+ * @cc:		Pointer to cycle counter.
  * @cycles:	Cycles
  *
  * XXX - This could use some mult_lxl_ll() asm optimization. Same code
@@ -114,7 +114,7 @@
  *                        time base as values returned by
  *                        timecounter_read()
  * @tc:		Pointer to time counter.
- * @cycle:	a value returned by tc->cc->read()
+ * @cycle_tstamp:	a value returned by tc->cc->read()
  *
  * Cycle counts that are converted correctly as long as they
  * fall into the interval [-1/2 max cycle count, +1/2 max cycle count],
@@ -156,11 +156,12 @@
  * @mult:		cycle to nanosecond multiplier
  * @shift:		cycle to nanosecond divisor (power of two)
  * @max_idle_ns:	max idle time permitted by the clocksource (nsecs)
- * @maxadj		maximum adjustment value to mult (~11%)
+ * @maxadj:		maximum adjustment value to mult (~11%)
  * @flags:		flags describing special properties
  * @archdata:		arch-specific data
  * @suspend:		suspend function for the clocksource, if necessary
  * @resume:		resume function for the clocksource, if necessary
+ * @cycle_last:		most recent cycle counter value seen by ::read()
  */
 struct clocksource {
 	/*
@@ -187,6 +188,7 @@
 	void (*suspend)(struct clocksource *cs);
 	void (*resume)(struct clocksource *cs);
 
+	/* private: */
 #ifdef CONFIG_CLOCKSOURCE_WATCHDOG
 	/* Watchdog related data, used by the framework */
 	struct list_head wd_list;
@@ -261,6 +263,9 @@
 
 /**
  * clocksource_cyc2ns - converts clocksource cycles to nanoseconds
+ * @cycles:	cycles
+ * @mult:	cycle to nanosecond multiplier
+ * @shift:	cycle to nanosecond divisor (power of two)
  *
  * Converts cycles to nanoseconds, using the given mult and shift.
  *
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 66ed067..41c9f65 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -422,9 +422,9 @@
 asmlinkage long compat_sys_vmsplice(int fd, const struct compat_iovec __user *,
 				    unsigned int nr_segs, unsigned int flags);
 asmlinkage long compat_sys_open(const char __user *filename, int flags,
-				int mode);
+				umode_t mode);
 asmlinkage long compat_sys_openat(unsigned int dfd, const char __user *filename,
-				  int flags, int mode);
+				  int flags, umode_t mode);
 asmlinkage long compat_sys_open_by_handle_at(int mountdirfd,
 					     struct file_handle __user *handle,
 					     int flags);
diff --git a/include/linux/configfs.h b/include/linux/configfs.h
index 3081c58..34025df 100644
--- a/include/linux/configfs.h
+++ b/include/linux/configfs.h
@@ -124,7 +124,7 @@
 struct configfs_attribute {
 	const char		*ca_name;
 	struct module 		*ca_owner;
-	mode_t			ca_mode;
+	umode_t			ca_mode;
 };
 
 /*
diff --git a/include/linux/cordic.h b/include/linux/cordic.h
index f932093..cf68ca4 100644
--- a/include/linux/cordic.h
+++ b/include/linux/cordic.h
@@ -35,8 +35,8 @@
  * @theta: angle in degrees for which i/q coordinate is to be calculated.
  * @coord: function output parameter holding the i/q coordinate.
  *
- * The function calculates the i/q coordinate for a given angle using
- * cordic algorithm. The coordinate consists of a real (i) and an
+ * The function calculates the i/q coordinate for a given angle using the
+ * CORDIC algorithm. The coordinate consists of a real (i) and an
  * imaginary (q) part. The real part is essentially the cosine of the
  * angle and the imaginary part is the sine of the angle. The returned
  * values are scaled by 2^16 for precision. The range for theta is
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 6cb60fd..1f65875 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -14,7 +14,7 @@
 #ifndef _LINUX_CPU_H_
 #define _LINUX_CPU_H_
 
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/node.h>
 #include <linux/compiler.h>
 #include <linux/cpumask.h>
@@ -22,19 +22,20 @@
 struct cpu {
 	int node_id;		/* The node which contains the CPU */
 	int hotpluggable;	/* creates sysfs control file if hotpluggable */
-	struct sys_device sysdev;
+	struct device dev;
 };
 
 extern int register_cpu(struct cpu *cpu, int num);
-extern struct sys_device *get_cpu_sysdev(unsigned cpu);
+extern struct device *get_cpu_device(unsigned cpu);
+extern bool cpu_is_hotpluggable(unsigned cpu);
 
-extern int cpu_add_sysdev_attr(struct sysdev_attribute *attr);
-extern void cpu_remove_sysdev_attr(struct sysdev_attribute *attr);
+extern int cpu_add_dev_attr(struct device_attribute *attr);
+extern void cpu_remove_dev_attr(struct device_attribute *attr);
 
-extern int cpu_add_sysdev_attr_group(struct attribute_group *attrs);
-extern void cpu_remove_sysdev_attr_group(struct attribute_group *attrs);
+extern int cpu_add_dev_attr_group(struct attribute_group *attrs);
+extern void cpu_remove_dev_attr_group(struct attribute_group *attrs);
 
-extern int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls);
+extern int sched_create_sysfs_power_savings_entries(struct device *dev);
 
 #ifdef CONFIG_HOTPLUG_CPU
 extern void unregister_cpu(struct cpu *cpu);
@@ -160,7 +161,7 @@
 }
 
 #endif /* CONFIG_SMP */
-extern struct sysdev_class cpu_sysdev_class;
+extern struct bus_type cpu_subsys;
 
 #ifdef CONFIG_HOTPLUG_CPU
 /* Stop CPUs going up and down. */
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 7408af8..23f81de 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -130,7 +130,6 @@
 #ifdef CONFIG_CPU_IDLE
 extern void disable_cpuidle(void);
 extern int cpuidle_idle_call(void);
-
 extern int cpuidle_register_driver(struct cpuidle_driver *drv);
 struct cpuidle_driver *cpuidle_get_driver(void);
 extern void cpuidle_unregister_driver(struct cpuidle_driver *drv);
@@ -145,7 +144,6 @@
 #else
 static inline void disable_cpuidle(void) { }
 static inline int cpuidle_idle_call(void) { return -ENODEV; }
-
 static inline int cpuidle_register_driver(struct cpuidle_driver *drv)
 {return -ENODEV; }
 static inline struct cpuidle_driver *cpuidle_get_driver(void) {return NULL; }
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index e7d9b20..6169c26 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -16,6 +16,7 @@
 #define _DEBUGFS_H_
 
 #include <linux/fs.h>
+#include <linux/seq_file.h>
 
 #include <linux/types.h>
 
@@ -26,6 +27,17 @@
 	unsigned long size;
 };
 
+struct debugfs_reg32 {
+	char *name;
+	unsigned long offset;
+};
+
+struct debugfs_regset32 {
+	struct debugfs_reg32 *regs;
+	int nregs;
+	void __iomem *base;
+};
+
 extern struct dentry *arch_debugfs_dir;
 
 #if defined(CONFIG_DEBUG_FS)
@@ -34,7 +46,7 @@
 extern const struct file_operations debugfs_file_operations;
 extern const struct inode_operations debugfs_link_operations;
 
-struct dentry *debugfs_create_file(const char *name, mode_t mode,
+struct dentry *debugfs_create_file(const char *name, umode_t mode,
 				   struct dentry *parent, void *data,
 				   const struct file_operations *fops);
 
@@ -49,31 +61,38 @@
 struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
                 struct dentry *new_dir, const char *new_name);
 
-struct dentry *debugfs_create_u8(const char *name, mode_t mode,
+struct dentry *debugfs_create_u8(const char *name, umode_t mode,
 				 struct dentry *parent, u8 *value);
-struct dentry *debugfs_create_u16(const char *name, mode_t mode,
+struct dentry *debugfs_create_u16(const char *name, umode_t mode,
 				  struct dentry *parent, u16 *value);
-struct dentry *debugfs_create_u32(const char *name, mode_t mode,
+struct dentry *debugfs_create_u32(const char *name, umode_t mode,
 				  struct dentry *parent, u32 *value);
-struct dentry *debugfs_create_u64(const char *name, mode_t mode,
+struct dentry *debugfs_create_u64(const char *name, umode_t mode,
 				  struct dentry *parent, u64 *value);
-struct dentry *debugfs_create_x8(const char *name, mode_t mode,
+struct dentry *debugfs_create_x8(const char *name, umode_t mode,
 				 struct dentry *parent, u8 *value);
-struct dentry *debugfs_create_x16(const char *name, mode_t mode,
+struct dentry *debugfs_create_x16(const char *name, umode_t mode,
 				  struct dentry *parent, u16 *value);
-struct dentry *debugfs_create_x32(const char *name, mode_t mode,
+struct dentry *debugfs_create_x32(const char *name, umode_t mode,
 				  struct dentry *parent, u32 *value);
-struct dentry *debugfs_create_x64(const char *name, mode_t mode,
+struct dentry *debugfs_create_x64(const char *name, umode_t mode,
 				  struct dentry *parent, u64 *value);
-struct dentry *debugfs_create_size_t(const char *name, mode_t mode,
+struct dentry *debugfs_create_size_t(const char *name, umode_t mode,
 				     struct dentry *parent, size_t *value);
-struct dentry *debugfs_create_bool(const char *name, mode_t mode,
+struct dentry *debugfs_create_bool(const char *name, umode_t mode,
 				  struct dentry *parent, u32 *value);
 
-struct dentry *debugfs_create_blob(const char *name, mode_t mode,
+struct dentry *debugfs_create_blob(const char *name, umode_t mode,
 				  struct dentry *parent,
 				  struct debugfs_blob_wrapper *blob);
 
+struct dentry *debugfs_create_regset32(const char *name, mode_t mode,
+				     struct dentry *parent,
+				     struct debugfs_regset32 *regset);
+
+int debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs,
+			 int nregs, void __iomem *base, char *prefix);
+
 bool debugfs_initialized(void);
 
 #else
@@ -86,7 +105,7 @@
  * want to duplicate the design decision mistakes of procfs and devfs again.
  */
 
-static inline struct dentry *debugfs_create_file(const char *name, mode_t mode,
+static inline struct dentry *debugfs_create_file(const char *name, umode_t mode,
 					struct dentry *parent, void *data,
 					const struct file_operations *fops)
 {
@@ -118,76 +137,83 @@
 	return ERR_PTR(-ENODEV);
 }
 
-static inline struct dentry *debugfs_create_u8(const char *name, mode_t mode,
+static inline struct dentry *debugfs_create_u8(const char *name, umode_t mode,
 					       struct dentry *parent,
 					       u8 *value)
 {
 	return ERR_PTR(-ENODEV);
 }
 
-static inline struct dentry *debugfs_create_u16(const char *name, mode_t mode,
+static inline struct dentry *debugfs_create_u16(const char *name, umode_t mode,
 						struct dentry *parent,
 						u16 *value)
 {
 	return ERR_PTR(-ENODEV);
 }
 
-static inline struct dentry *debugfs_create_u32(const char *name, mode_t mode,
+static inline struct dentry *debugfs_create_u32(const char *name, umode_t mode,
 						struct dentry *parent,
 						u32 *value)
 {
 	return ERR_PTR(-ENODEV);
 }
 
-static inline struct dentry *debugfs_create_u64(const char *name, mode_t mode,
+static inline struct dentry *debugfs_create_u64(const char *name, umode_t mode,
 						struct dentry *parent,
 						u64 *value)
 {
 	return ERR_PTR(-ENODEV);
 }
 
-static inline struct dentry *debugfs_create_x8(const char *name, mode_t mode,
+static inline struct dentry *debugfs_create_x8(const char *name, umode_t mode,
 					       struct dentry *parent,
 					       u8 *value)
 {
 	return ERR_PTR(-ENODEV);
 }
 
-static inline struct dentry *debugfs_create_x16(const char *name, mode_t mode,
+static inline struct dentry *debugfs_create_x16(const char *name, umode_t mode,
 						struct dentry *parent,
 						u16 *value)
 {
 	return ERR_PTR(-ENODEV);
 }
 
-static inline struct dentry *debugfs_create_x32(const char *name, mode_t mode,
+static inline struct dentry *debugfs_create_x32(const char *name, umode_t mode,
 						struct dentry *parent,
 						u32 *value)
 {
 	return ERR_PTR(-ENODEV);
 }
 
-static inline struct dentry *debugfs_create_size_t(const char *name, mode_t mode,
+static inline struct dentry *debugfs_create_size_t(const char *name, umode_t mode,
 				     struct dentry *parent,
 				     size_t *value)
 {
 	return ERR_PTR(-ENODEV);
 }
 
-static inline struct dentry *debugfs_create_bool(const char *name, mode_t mode,
+static inline struct dentry *debugfs_create_bool(const char *name, umode_t mode,
 						 struct dentry *parent,
 						 u32 *value)
 {
 	return ERR_PTR(-ENODEV);
 }
 
-static inline struct dentry *debugfs_create_blob(const char *name, mode_t mode,
+static inline struct dentry *debugfs_create_blob(const char *name, umode_t mode,
 				  struct dentry *parent,
 				  struct debugfs_blob_wrapper *blob)
 {
 	return ERR_PTR(-ENODEV);
 }
 
+static inline struct dentry *debugfs_create_regset32(const char *name,
+				   mode_t mode, struct dentry *parent,
+				   struct debugfs_regset32 *regset)
+{
+	return ERR_PTR(-ENODEV);
+}
+
 static inline bool debugfs_initialized(void)
 {
 	return false;
diff --git a/include/linux/debugobjects.h b/include/linux/debugobjects.h
index 65970b8..0e5f578 100644
--- a/include/linux/debugobjects.h
+++ b/include/linux/debugobjects.h
@@ -46,6 +46,8 @@
  *			fails
  * @fixup_free:		fixup function, which is called when the free check
  *			fails
+ * @fixup_assert_init:  fixup function, which is called when the assert_init
+ *			check fails
  */
 struct debug_obj_descr {
 	const char		*name;
@@ -54,6 +56,7 @@
 	int (*fixup_activate)	(void *addr, enum debug_obj_state state);
 	int (*fixup_destroy)	(void *addr, enum debug_obj_state state);
 	int (*fixup_free)	(void *addr, enum debug_obj_state state);
+	int (*fixup_assert_init)(void *addr, enum debug_obj_state state);
 };
 
 #ifdef CONFIG_DEBUG_OBJECTS
@@ -64,6 +67,7 @@
 extern void debug_object_deactivate(void *addr, struct debug_obj_descr *descr);
 extern void debug_object_destroy   (void *addr, struct debug_obj_descr *descr);
 extern void debug_object_free      (void *addr, struct debug_obj_descr *descr);
+extern void debug_object_assert_init(void *addr, struct debug_obj_descr *descr);
 
 /*
  * Active state:
@@ -89,6 +93,8 @@
 debug_object_destroy   (void *addr, struct debug_obj_descr *descr) { }
 static inline void
 debug_object_free      (void *addr, struct debug_obj_descr *descr) { }
+static inline void
+debug_object_assert_init(void *addr, struct debug_obj_descr *descr) { }
 
 static inline void debug_objects_early_init(void) { }
 static inline void debug_objects_mem_init(void) { }
diff --git a/include/linux/device.h b/include/linux/device.h
index 3136ede..5b3adb8 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -53,6 +53,8 @@
  * struct bus_type - The bus type of the device
  *
  * @name:	The name of the bus.
+ * @dev_name:	Used for subsystems to enumerate devices like ("foo%u", dev->id).
+ * @dev_root:	Default device to use as the parent.
  * @bus_attrs:	Default attributes of the bus.
  * @dev_attrs:	Default attributes of the devices on the bus.
  * @drv_attrs:	Default attributes of the device drivers on the bus.
@@ -86,6 +88,8 @@
  */
 struct bus_type {
 	const char		*name;
+	const char		*dev_name;
+	struct device		*dev_root;
 	struct bus_attribute	*bus_attrs;
 	struct device_attribute	*dev_attrs;
 	struct driver_attribute	*drv_attrs;
@@ -106,12 +110,30 @@
 	struct subsys_private *p;
 };
 
-extern int __must_check bus_register(struct bus_type *bus);
+/* This is a #define to keep the compiler from merging different
+ * instances of the __key variable */
+#define bus_register(subsys)			\
+({						\
+	static struct lock_class_key __key;	\
+	__bus_register(subsys, &__key);	\
+})
+extern int __must_check __bus_register(struct bus_type *bus,
+				       struct lock_class_key *key);
 extern void bus_unregister(struct bus_type *bus);
 
 extern int __must_check bus_rescan_devices(struct bus_type *bus);
 
 /* iterator helpers for buses */
+struct subsys_dev_iter {
+	struct klist_iter		ki;
+	const struct device_type	*type;
+};
+void subsys_dev_iter_init(struct subsys_dev_iter *iter,
+			 struct bus_type *subsys,
+			 struct device *start,
+			 const struct device_type *type);
+struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter);
+void subsys_dev_iter_exit(struct subsys_dev_iter *iter);
 
 int bus_for_each_dev(struct bus_type *bus, struct device *start, void *data,
 		     int (*fn)(struct device *dev, void *data));
@@ -121,10 +143,10 @@
 struct device *bus_find_device_by_name(struct bus_type *bus,
 				       struct device *start,
 				       const char *name);
-
+struct device *subsys_find_device_by_id(struct bus_type *bus, unsigned int id,
+					struct device *hint);
 int bus_for_each_drv(struct bus_type *bus, struct device_driver *start,
 		     void *data, int (*fn)(struct device_driver *, void *));
-
 void bus_sort_breadthfirst(struct bus_type *bus,
 			   int (*compare)(const struct device *a,
 					  const struct device *b));
@@ -256,6 +278,33 @@
 				  int (*match)(struct device *dev, void *data));
 
 /**
+ * struct subsys_interface - interfaces to device functions
+ * @name        name of the device function
+ * @subsystem   subsytem of the devices to attach to
+ * @node        the list of functions registered at the subsystem
+ * @add         device hookup to device function handler
+ * @remove      device hookup to device function handler
+ *
+ * Simple interfaces attached to a subsystem. Multiple interfaces can
+ * attach to a subsystem and its devices. Unlike drivers, they do not
+ * exclusively claim or control devices. Interfaces usually represent
+ * a specific functionality of a subsystem/class of devices.
+ */
+struct subsys_interface {
+	const char *name;
+	struct bus_type *subsys;
+	struct list_head node;
+	int (*add_dev)(struct device *dev, struct subsys_interface *sif);
+	int (*remove_dev)(struct device *dev, struct subsys_interface *sif);
+};
+
+int subsys_interface_register(struct subsys_interface *sif);
+void subsys_interface_unregister(struct subsys_interface *sif);
+
+int subsys_system_register(struct bus_type *subsys,
+			   const struct attribute_group **groups);
+
+/**
  * struct class - device classes
  * @name:	Name of the class.
  * @owner:	The module owner.
@@ -294,7 +343,7 @@
 	struct kobject			*dev_kobj;
 
 	int (*dev_uevent)(struct device *dev, struct kobj_uevent_env *env);
-	char *(*devnode)(struct device *dev, mode_t *mode);
+	char *(*devnode)(struct device *dev, umode_t *mode);
 
 	void (*class_release)(struct class *class);
 	void (*dev_release)(struct device *dev);
@@ -423,7 +472,7 @@
 	const char *name;
 	const struct attribute_group **groups;
 	int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
-	char *(*devnode)(struct device *dev, mode_t *mode);
+	char *(*devnode)(struct device *dev, umode_t *mode);
 	void (*release)(struct device *dev);
 
 	const struct dev_pm_ops *pm;
@@ -438,11 +487,31 @@
 			 const char *buf, size_t count);
 };
 
-#define DEVICE_ATTR(_name, _mode, _show, _store) \
-struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store)
+struct dev_ext_attribute {
+	struct device_attribute attr;
+	void *var;
+};
 
-extern int __must_check device_create_file(struct device *device,
-					const struct device_attribute *entry);
+ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
+			  char *buf);
+ssize_t device_store_ulong(struct device *dev, struct device_attribute *attr,
+			   const char *buf, size_t count);
+ssize_t device_show_int(struct device *dev, struct device_attribute *attr,
+			char *buf);
+ssize_t device_store_int(struct device *dev, struct device_attribute *attr,
+			 const char *buf, size_t count);
+
+#define DEVICE_ATTR(_name, _mode, _show, _store) \
+	struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store)
+#define DEVICE_ULONG_ATTR(_name, _mode, _var) \
+	struct dev_ext_attribute dev_attr_##_name = \
+		{ __ATTR(_name, _mode, device_show_ulong, device_store_ulong), &(_var) }
+#define DEVICE_INT_ATTR(_name, _mode, _var) \
+	struct dev_ext_attribute dev_attr_##_name = \
+		{ __ATTR(_name, _mode, device_show_ulong, device_store_ulong), &(_var) }
+
+extern int device_create_file(struct device *device,
+			      const struct device_attribute *entry);
 extern void device_remove_file(struct device *dev,
 			       const struct device_attribute *attr);
 extern int __must_check device_create_bin_file(struct device *dev,
@@ -490,6 +559,9 @@
 extern void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp);
 extern void devm_kfree(struct device *dev, void *p);
 
+void __iomem *devm_request_and_ioremap(struct device *dev,
+			struct resource *res);
+
 struct device_dma_parameters {
 	/*
 	 * a low level driver may set these to teach IOMMU code about
@@ -600,6 +672,7 @@
 	struct device_node	*of_node; /* associated device tree node */
 
 	dev_t			devt;	/* dev_t, creates the sysfs "dev" */
+	u32			id;	/* device instance */
 
 	spinlock_t		devres_lock;
 	struct list_head	devres_head;
@@ -720,7 +793,7 @@
 extern int device_move(struct device *dev, struct device *new_parent,
 		       enum dpm_order dpm_order);
 extern const char *device_get_devnode(struct device *dev,
-				      mode_t *mode, const char **tmp);
+				      umode_t *mode, const char **tmp);
 extern void *dev_get_drvdata(const struct device *dev);
 extern int dev_set_drvdata(struct device *dev, void *data);
 
@@ -924,4 +997,25 @@
 #define sysfs_deprecated 0
 #endif
 
+/**
+ * module_driver() - Helper macro for drivers that don't do anything
+ * special in module init/exit. This eliminates a lot of boilerplate.
+ * Each module may only use this macro once, and calling it replaces
+ * module_init() and module_exit().
+ *
+ * Use this macro to construct bus specific macros for registering
+ * drivers, and do not use it on its own.
+ */
+#define module_driver(__driver, __register, __unregister) \
+static int __init __driver##_init(void) \
+{ \
+	return __register(&(__driver)); \
+} \
+module_init(__driver##_init); \
+static void __exit __driver##_exit(void) \
+{ \
+	__unregister(&(__driver)); \
+} \
+module_exit(__driver##_exit);
+
 #endif /* _DEVICE_H_ */
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
new file mode 100644
index 0000000..f8ac076
--- /dev/null
+++ b/include/linux/dma-buf.h
@@ -0,0 +1,176 @@
+/*
+ * Header file for dma buffer sharing framework.
+ *
+ * Copyright(C) 2011 Linaro Limited. All rights reserved.
+ * Author: Sumit Semwal <sumit.semwal@ti.com>
+ *
+ * Many thanks to linaro-mm-sig list, and specially
+ * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
+ * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
+ * refining of this idea.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __DMA_BUF_H__
+#define __DMA_BUF_H__
+
+#include <linux/file.h>
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/scatterlist.h>
+#include <linux/list.h>
+#include <linux/dma-mapping.h>
+
+struct dma_buf;
+struct dma_buf_attachment;
+
+/**
+ * struct dma_buf_ops - operations possible on struct dma_buf
+ * @attach: [optional] allows different devices to 'attach' themselves to the
+ *	    given buffer. It might return -EBUSY to signal that backing storage
+ *	    is already allocated and incompatible with the requirements
+ *	    of requesting device.
+ * @detach: [optional] detach a given device from this buffer.
+ * @map_dma_buf: returns list of scatter pages allocated, increases usecount
+ *		 of the buffer. Requires atleast one attach to be called
+ *		 before. Returned sg list should already be mapped into
+ *		 _device_ address space. This call may sleep. May also return
+ *		 -EINTR. Should return -EINVAL if attach hasn't been called yet.
+ * @unmap_dma_buf: decreases usecount of buffer, might deallocate scatter
+ *		   pages.
+ * @release: release this buffer; to be called after the last dma_buf_put.
+ */
+struct dma_buf_ops {
+	int (*attach)(struct dma_buf *, struct device *,
+			struct dma_buf_attachment *);
+
+	void (*detach)(struct dma_buf *, struct dma_buf_attachment *);
+
+	/* For {map,unmap}_dma_buf below, any specific buffer attributes
+	 * required should get added to device_dma_parameters accessible
+	 * via dev->dma_params.
+	 */
+	struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *,
+						enum dma_data_direction);
+	void (*unmap_dma_buf)(struct dma_buf_attachment *,
+						struct sg_table *);
+	/* TODO: Add try_map_dma_buf version, to return immed with -EBUSY
+	 * if the call would block.
+	 */
+
+	/* after final dma_buf_put() */
+	void (*release)(struct dma_buf *);
+
+};
+
+/**
+ * struct dma_buf - shared buffer object
+ * @size: size of the buffer
+ * @file: file pointer used for sharing buffers across, and for refcounting.
+ * @attachments: list of dma_buf_attachment that denotes all devices attached.
+ * @ops: dma_buf_ops associated with this buffer object.
+ * @priv: exporter specific private data for this buffer object.
+ */
+struct dma_buf {
+	size_t size;
+	struct file *file;
+	struct list_head attachments;
+	const struct dma_buf_ops *ops;
+	/* mutex to serialize list manipulation and other ops */
+	struct mutex lock;
+	void *priv;
+};
+
+/**
+ * struct dma_buf_attachment - holds device-buffer attachment data
+ * @dmabuf: buffer for this attachment.
+ * @dev: device attached to the buffer.
+ * @node: list of dma_buf_attachment.
+ * @priv: exporter specific attachment data.
+ *
+ * This structure holds the attachment information between the dma_buf buffer
+ * and its user device(s). The list contains one attachment struct per device
+ * attached to the buffer.
+ */
+struct dma_buf_attachment {
+	struct dma_buf *dmabuf;
+	struct device *dev;
+	struct list_head node;
+	void *priv;
+};
+
+#ifdef CONFIG_DMA_SHARED_BUFFER
+struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
+							struct device *dev);
+void dma_buf_detach(struct dma_buf *dmabuf,
+				struct dma_buf_attachment *dmabuf_attach);
+struct dma_buf *dma_buf_export(void *priv, struct dma_buf_ops *ops,
+			size_t size, int flags);
+int dma_buf_fd(struct dma_buf *dmabuf);
+struct dma_buf *dma_buf_get(int fd);
+void dma_buf_put(struct dma_buf *dmabuf);
+
+struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *,
+					enum dma_data_direction);
+void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *);
+#else
+
+static inline struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
+							struct device *dev)
+{
+	return ERR_PTR(-ENODEV);
+}
+
+static inline void dma_buf_detach(struct dma_buf *dmabuf,
+				  struct dma_buf_attachment *dmabuf_attach)
+{
+	return;
+}
+
+static inline struct dma_buf *dma_buf_export(void *priv,
+						struct dma_buf_ops *ops,
+						size_t size, int flags)
+{
+	return ERR_PTR(-ENODEV);
+}
+
+static inline int dma_buf_fd(struct dma_buf *dmabuf)
+{
+	return -ENODEV;
+}
+
+static inline struct dma_buf *dma_buf_get(int fd)
+{
+	return ERR_PTR(-ENODEV);
+}
+
+static inline void dma_buf_put(struct dma_buf *dmabuf)
+{
+	return;
+}
+
+static inline struct sg_table *dma_buf_map_attachment(
+	struct dma_buf_attachment *attach, enum dma_data_direction write)
+{
+	return ERR_PTR(-ENODEV);
+}
+
+static inline void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
+						struct sg_table *sg)
+{
+	return;
+}
+
+#endif /* CONFIG_DMA_SHARED_BUFFER */
+
+#endif /* __DMA_BUF_H__ */
diff --git a/include/linux/dma_remapping.h b/include/linux/dma_remapping.h
index ef90cbd..57c9a8a 100644
--- a/include/linux/dma_remapping.h
+++ b/include/linux/dma_remapping.h
@@ -31,6 +31,7 @@
 extern int iommu_calculate_agaw(struct intel_iommu *iommu);
 extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu);
 extern int dmar_disabled;
+extern int intel_iommu_enabled;
 #else
 static inline int iommu_calculate_agaw(struct intel_iommu *iommu)
 {
@@ -44,6 +45,7 @@
 {
 }
 #define dmar_disabled	(1)
+#define intel_iommu_enabled (0)
 #endif
 
 
diff --git a/include/linux/dynamic_queue_limits.h b/include/linux/dynamic_queue_limits.h
new file mode 100644
index 0000000..5621547
--- /dev/null
+++ b/include/linux/dynamic_queue_limits.h
@@ -0,0 +1,97 @@
+/*
+ * Dynamic queue limits (dql) - Definitions
+ *
+ * Copyright (c) 2011, Tom Herbert <therbert@google.com>
+ *
+ * This header file contains the definitions for dynamic queue limits (dql).
+ * dql would be used in conjunction with a producer/consumer type queue
+ * (possibly a HW queue).  Such a queue would have these general properties:
+ *
+ *   1) Objects are queued up to some limit specified as number of objects.
+ *   2) Periodically a completion process executes which retires consumed
+ *      objects.
+ *   3) Starvation occurs when limit has been reached, all queued data has
+ *      actually been consumed, but completion processing has not yet run
+ *      so queuing new data is blocked.
+ *   4) Minimizing the amount of queued data is desirable.
+ *
+ * The goal of dql is to calculate the limit as the minimum number of objects
+ * needed to prevent starvation.
+ *
+ * The primary functions of dql are:
+ *    dql_queued - called when objects are enqueued to record number of objects
+ *    dql_avail - returns how many objects are available to be queued based
+ *      on the object limit and how many objects are already enqueued
+ *    dql_completed - called at completion time to indicate how many objects
+ *      were retired from the queue
+ *
+ * The dql implementation does not implement any locking for the dql data
+ * structures, the higher layer should provide this.  dql_queued should
+ * be serialized to prevent concurrent execution of the function; this
+ * is also true for  dql_completed.  However, dql_queued and dlq_completed  can
+ * be executed concurrently (i.e. they can be protected by different locks).
+ */
+
+#ifndef _LINUX_DQL_H
+#define _LINUX_DQL_H
+
+#ifdef __KERNEL__
+
+struct dql {
+	/* Fields accessed in enqueue path (dql_queued) */
+	unsigned int	num_queued;		/* Total ever queued */
+	unsigned int	adj_limit;		/* limit + num_completed */
+	unsigned int	last_obj_cnt;		/* Count at last queuing */
+
+	/* Fields accessed only by completion path (dql_completed) */
+
+	unsigned int	limit ____cacheline_aligned_in_smp; /* Current limit */
+	unsigned int	num_completed;		/* Total ever completed */
+
+	unsigned int	prev_ovlimit;		/* Previous over limit */
+	unsigned int	prev_num_queued;	/* Previous queue total */
+	unsigned int	prev_last_obj_cnt;	/* Previous queuing cnt */
+
+	unsigned int	lowest_slack;		/* Lowest slack found */
+	unsigned long	slack_start_time;	/* Time slacks seen */
+
+	/* Configuration */
+	unsigned int	max_limit;		/* Max limit */
+	unsigned int	min_limit;		/* Minimum limit */
+	unsigned int	slack_hold_time;	/* Time to measure slack */
+};
+
+/* Set some static maximums */
+#define DQL_MAX_OBJECT (UINT_MAX / 16)
+#define DQL_MAX_LIMIT ((UINT_MAX / 2) - DQL_MAX_OBJECT)
+
+/*
+ * Record number of objects queued. Assumes that caller has already checked
+ * availability in the queue with dql_avail.
+ */
+static inline void dql_queued(struct dql *dql, unsigned int count)
+{
+	BUG_ON(count > DQL_MAX_OBJECT);
+
+	dql->num_queued += count;
+	dql->last_obj_cnt = count;
+}
+
+/* Returns how many objects can be queued, < 0 indicates over limit. */
+static inline int dql_avail(const struct dql *dql)
+{
+	return dql->adj_limit - dql->num_queued;
+}
+
+/* Record number of completed objects and recalculate the limit. */
+void dql_completed(struct dql *dql, unsigned int count);
+
+/* Reset dql state */
+void dql_reset(struct dql *dql);
+
+/* Initialize dql state */
+int dql_init(struct dql *dql, unsigned hold_time);
+
+#endif /* _KERNEL_ */
+
+#endif /* _LINUX_DQL_H */
diff --git a/include/linux/edac.h b/include/linux/edac.h
index 055b248..1cd3947 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -13,7 +13,7 @@
 #define _LINUX_EDAC_H_
 
 #include <linux/atomic.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
 
 #define EDAC_OPSTATE_INVAL	-1
 #define EDAC_OPSTATE_POLL	0
@@ -23,12 +23,12 @@
 extern int edac_op_state;
 extern int edac_err_assert;
 extern atomic_t edac_handlers;
-extern struct sysdev_class edac_class;
+extern struct bus_type edac_subsys;
 
 extern int edac_handler_set(void);
 extern void edac_atomic_assert_error(void);
-extern struct sysdev_class *edac_get_sysfs_class(void);
-extern void edac_put_sysfs_class(void);
+extern struct bus_type *edac_get_sysfs_subsys(void);
+extern void edac_put_sysfs_subsys(void);
 
 static inline void opstate_init(void)
 {
diff --git a/include/linux/eeprom_93cx6.h b/include/linux/eeprom_93cx6.h
index c4627cb..e50f98b 100644
--- a/include/linux/eeprom_93cx6.h
+++ b/include/linux/eeprom_93cx6.h
@@ -33,6 +33,7 @@
 #define PCI_EEPROM_WIDTH_93C86	8
 #define PCI_EEPROM_WIDTH_OPCODE	3
 #define PCI_EEPROM_WRITE_OPCODE	0x05
+#define PCI_EEPROM_ERASE_OPCODE 0x07
 #define PCI_EEPROM_READ_OPCODE	0x06
 #define PCI_EEPROM_EWDS_OPCODE	0x10
 #define PCI_EEPROM_EWEN_OPCODE	0x13
@@ -46,6 +47,7 @@
  * @register_write(struct eeprom_93cx6 *eeprom): handler to
  * write to the eeprom register by using all reg_* fields.
  * @width: eeprom width, should be one of the PCI_EEPROM_WIDTH_* defines
+ * @drive_data: Set if we're driving the data line.
  * @reg_data_in: register field to indicate data input
  * @reg_data_out: register field to indicate data output
  * @reg_data_clock: register field to set the data clock
@@ -62,6 +64,7 @@
 
 	int width;
 
+	char drive_data;
 	char reg_data_in;
 	char reg_data_out;
 	char reg_data_clock;
@@ -72,3 +75,8 @@
 	const u8 word, u16 *data);
 extern void eeprom_93cx6_multiread(struct eeprom_93cx6 *eeprom,
 	const u8 word, __le16 *data, const u16 words);
+
+extern void eeprom_93cx6_wren(struct eeprom_93cx6 *eeprom, bool enable);
+
+extern void eeprom_93cx6_write(struct eeprom_93cx6 *eeprom,
+			       u8 addr, u16 data);
diff --git a/include/linux/errqueue.h b/include/linux/errqueue.h
index 034072c..fd0628b 100644
--- a/include/linux/errqueue.h
+++ b/include/linux/errqueue.h
@@ -17,14 +17,15 @@
 #define SO_EE_ORIGIN_LOCAL	1
 #define SO_EE_ORIGIN_ICMP	2
 #define SO_EE_ORIGIN_ICMP6	3
-#define SO_EE_ORIGIN_TIMESTAMPING 4
+#define SO_EE_ORIGIN_TXSTATUS	4
+#define SO_EE_ORIGIN_TIMESTAMPING SO_EE_ORIGIN_TXSTATUS
 
 #define SO_EE_OFFENDER(ee)	((struct sockaddr*)((ee)+1))
 
 #ifdef __KERNEL__
 
 #include <net/ip.h>
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 #include <linux/ipv6.h>
 #endif
 
@@ -33,7 +34,7 @@
 struct sock_exterr_skb {
 	union {
 		struct inet_skb_parm	h4;
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 		struct inet6_skb_parm	h6;
 #endif
 	} header;
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index de33de1..da5b2de 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -489,7 +489,10 @@
  * on return.
  *
  * For %ETHTOOL_GRXCLSRLCNT, @rule_cnt is set to the number of defined
- * rules on return.
+ * rules on return.  If @data is non-zero on return then it is the
+ * size of the rule table, plus the flag %RX_CLS_LOC_SPECIAL if the
+ * driver supports any special location values.  If that flag is not
+ * set in @data then special location values should not be used.
  *
  * For %ETHTOOL_GRXCLSRULE, @fs.@location specifies the location of an
  * existing rule on entry and @fs contains the rule on return.
@@ -501,10 +504,23 @@
  * must use the second parameter to get_rxnfc() instead of @rule_locs.
  *
  * For %ETHTOOL_SRXCLSRLINS, @fs specifies the rule to add or update.
- * @fs.@location specifies the location to use and must not be ignored.
+ * @fs.@location either specifies the location to use or is a special
+ * location value with %RX_CLS_LOC_SPECIAL flag set.  On return,
+ * @fs.@location is the actual rule location.
  *
  * For %ETHTOOL_SRXCLSRLDEL, @fs.@location specifies the location of an
  * existing rule on entry.
+ *
+ * A driver supporting the special location values for
+ * %ETHTOOL_SRXCLSRLINS may add the rule at any suitable unused
+ * location, and may remove a rule at a later location (lower
+ * priority) that matches exactly the same set of flows.  The special
+ * values are: %RX_CLS_LOC_ANY, selecting any location;
+ * %RX_CLS_LOC_FIRST, selecting the first suitable location (maximum
+ * priority); and %RX_CLS_LOC_LAST, selecting the last suitable
+ * location (minimum priority).  Additional special values may be
+ * defined in future and drivers must return -%EINVAL for any
+ * unrecognised value.
  */
 struct ethtool_rxnfc {
 	__u32				cmd;
@@ -543,9 +559,15 @@
 /**
  * struct ethtool_rxfh_indir - command to get or set RX flow hash indirection
  * @cmd: Specific command number - %ETHTOOL_GRXFHINDIR or %ETHTOOL_SRXFHINDIR
- * @size: On entry, the array size of the user buffer.  On return from
- *	%ETHTOOL_GRXFHINDIR, the array size of the hardware indirection table.
+ * @size: On entry, the array size of the user buffer, which may be zero.
+ *	On return from %ETHTOOL_GRXFHINDIR, the array size of the hardware
+ *	indirection table.
  * @ring_index: RX ring/queue index for each hash value
+ *
+ * For %ETHTOOL_GRXFHINDIR, a @size of zero means that only the size
+ * should be returned.  For %ETHTOOL_SRXFHINDIR, a @size of zero means
+ * the table should be reset to default values.  This last feature
+ * is not supported by the original implementations.
  */
 struct ethtool_rxfh_indir {
 	__u32	cmd;
@@ -724,9 +746,6 @@
 
 #include <linux/rculist.h>
 
-/* needed by dev_disable_lro() */
-extern int __ethtool_set_flags(struct net_device *dev, u32 flags);
-
 extern int __ethtool_get_settings(struct net_device *dev,
 				  struct ethtool_cmd *cmd);
 
@@ -750,19 +769,18 @@
 
 /* Some generic methods drivers may use in their ethtool_ops */
 u32 ethtool_op_get_link(struct net_device *dev);
-u32 ethtool_op_get_tx_csum(struct net_device *dev);
-int ethtool_op_set_tx_csum(struct net_device *dev, u32 data);
-int ethtool_op_set_tx_hw_csum(struct net_device *dev, u32 data);
-int ethtool_op_set_tx_ipv6_csum(struct net_device *dev, u32 data);
-u32 ethtool_op_get_sg(struct net_device *dev);
-int ethtool_op_set_sg(struct net_device *dev, u32 data);
-u32 ethtool_op_get_tso(struct net_device *dev);
-int ethtool_op_set_tso(struct net_device *dev, u32 data);
-u32 ethtool_op_get_ufo(struct net_device *dev);
-int ethtool_op_set_ufo(struct net_device *dev, u32 data);
-u32 ethtool_op_get_flags(struct net_device *dev);
-int ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported);
-bool ethtool_invalid_flags(struct net_device *dev, u32 data, u32 supported);
+
+/**
+ * ethtool_rxfh_indir_default - get default value for RX flow hash indirection
+ * @index: Index in RX flow hash indirection table
+ * @n_rx_rings: Number of RX rings to use
+ *
+ * This function provides the default policy for RX flow hash indirection.
+ */
+static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings)
+{
+	return index % n_rx_rings;
+}
 
 /**
  * struct ethtool_ops - optional netdev operations
@@ -807,22 +825,6 @@
  * @get_pauseparam: Report pause parameters
  * @set_pauseparam: Set pause parameters.  Returns a negative error code
  *	or zero.
- * @get_rx_csum: Deprecated in favour of the netdev feature %NETIF_F_RXCSUM.
- *	Report whether receive checksums are turned on or off.
- * @set_rx_csum: Deprecated in favour of generic netdev features.  Turn
- *	receive checksum on or off.  Returns a negative error code or zero.
- * @get_tx_csum: Deprecated as redundant. Report whether transmit checksums
- *	are turned on or off.
- * @set_tx_csum: Deprecated in favour of generic netdev features.  Turn
- *	transmit checksums on or off.  Returns a negative error code or zero.
- * @get_sg: Deprecated as redundant.  Report whether scatter-gather is
- *	enabled.  
- * @set_sg: Deprecated in favour of generic netdev features.  Turn
- *	scatter-gather on or off. Returns a negative error code or zero.
- * @get_tso: Deprecated as redundant.  Report whether TCP segmentation
- *	offload is enabled.
- * @set_tso: Deprecated in favour of generic netdev features.  Turn TCP
- *	segmentation offload on or off.  Returns a negative error code or zero.
  * @self_test: Run specified self-tests
  * @get_strings: Return a set of strings that describe the requested objects
  * @set_phys_id: Identify the physical devices, e.g. by flashing an LED
@@ -844,15 +846,6 @@
  *	negative error code or zero.
  * @complete: Function to be called after any other operation except
  *	@begin.  Will be called even if the other operation failed.
- * @get_ufo: Deprecated as redundant.  Report whether UDP fragmentation
- *	offload is enabled.
- * @set_ufo: Deprecated in favour of generic netdev features.  Turn UDP
- *	fragmentation offload on or off.  Returns a negative error code or zero.
- * @get_flags: Deprecated as redundant.  Report features included in
- *	&enum ethtool_flags that are enabled.  
- * @set_flags: Deprecated in favour of generic netdev features.  Turn
- *	features included in &enum ethtool_flags on or off.  Returns a
- *	negative error code or zero.
  * @get_priv_flags: Report driver-specific feature flags.
  * @set_priv_flags: Set driver-specific feature flags.  Returns a negative
  *	error code or zero.
@@ -866,11 +859,13 @@
  * @reset: Reset (part of) the device, as specified by a bitmask of
  *	flags from &enum ethtool_reset_flags.  Returns a negative
  *	error code or zero.
- * @set_rx_ntuple: Set an RX n-tuple rule.  Returns a negative error code
- *	or zero.
+ * @get_rxfh_indir_size: Get the size of the RX flow hash indirection table.
+ *	Returns zero if not supported for this specific device.
  * @get_rxfh_indir: Get the contents of the RX flow hash indirection table.
+ *	Will not be called if @get_rxfh_indir_size returns zero.
  *	Returns a negative error code or zero.
  * @set_rxfh_indir: Set the contents of the RX flow hash indirection table.
+ *	Will not be called if @get_rxfh_indir_size returns zero.
  *	Returns a negative error code or zero.
  * @get_channels: Get number of channels.
  * @set_channels: Set number of channels.  Returns a negative error code or
@@ -917,14 +912,6 @@
 				  struct ethtool_pauseparam*);
 	int	(*set_pauseparam)(struct net_device *,
 				  struct ethtool_pauseparam*);
-	u32	(*get_rx_csum)(struct net_device *);
-	int	(*set_rx_csum)(struct net_device *, u32);
-	u32	(*get_tx_csum)(struct net_device *);
-	int	(*set_tx_csum)(struct net_device *, u32);
-	u32	(*get_sg)(struct net_device *);
-	int	(*set_sg)(struct net_device *, u32);
-	u32	(*get_tso)(struct net_device *);
-	int	(*set_tso)(struct net_device *, u32);
 	void	(*self_test)(struct net_device *, struct ethtool_test *, u64 *);
 	void	(*get_strings)(struct net_device *, u32 stringset, u8 *);
 	int	(*set_phys_id)(struct net_device *, enum ethtool_phys_id_state);
@@ -932,10 +919,6 @@
 				     struct ethtool_stats *, u64 *);
 	int	(*begin)(struct net_device *);
 	void	(*complete)(struct net_device *);
-	u32	(*get_ufo)(struct net_device *);
-	int	(*set_ufo)(struct net_device *, u32);
-	u32	(*get_flags)(struct net_device *);
-	int	(*set_flags)(struct net_device *, u32);
 	u32	(*get_priv_flags)(struct net_device *);
 	int	(*set_priv_flags)(struct net_device *, u32);
 	int	(*get_sset_count)(struct net_device *, int);
@@ -944,12 +927,9 @@
 	int	(*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *);
 	int	(*flash_device)(struct net_device *, struct ethtool_flash *);
 	int	(*reset)(struct net_device *, u32 *);
-	int	(*set_rx_ntuple)(struct net_device *,
-				 struct ethtool_rx_ntuple *);
-	int	(*get_rxfh_indir)(struct net_device *,
-				  struct ethtool_rxfh_indir *);
-	int	(*set_rxfh_indir)(struct net_device *,
-				  const struct ethtool_rxfh_indir *);
+	u32	(*get_rxfh_indir_size)(struct net_device *);
+	int	(*get_rxfh_indir)(struct net_device *, u32 *);
+	int	(*set_rxfh_indir)(struct net_device *, const u32 *);
 	void	(*get_channels)(struct net_device *, struct ethtool_channels *);
 	int	(*set_channels)(struct net_device *, struct ethtool_channels *);
 	int	(*get_dump_flag)(struct net_device *, struct ethtool_dump *);
@@ -1173,6 +1153,12 @@
 
 #define	RX_CLS_FLOW_DISC	0xffffffffffffffffULL
 
+/* Special RX classification rule insert location values */
+#define RX_CLS_LOC_SPECIAL	0x80000000	/* flag */
+#define RX_CLS_LOC_ANY		0xffffffff
+#define RX_CLS_LOC_FIRST	0xfffffffe
+#define RX_CLS_LOC_LAST		0xfffffffd
+
 /* Reset flags */
 /* The reset() operation must clear the flags for the components which
  * were actually reset.  On successful return, the flags indicate the
diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h
index dec9911..f957085 100644
--- a/include/linux/ext3_fs.h
+++ b/include/linux/ext3_fs.h
@@ -884,7 +884,7 @@
 
 /* ialloc.c */
 extern struct inode * ext3_new_inode (handle_t *, struct inode *,
-				      const struct qstr *, int);
+				      const struct qstr *, umode_t);
 extern void ext3_free_inode (handle_t *, struct inode *);
 extern struct inode * ext3_orphan_get (struct super_block *, unsigned long);
 extern unsigned long ext3_count_free_inodes (struct super_block *);
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
index a5386e3..0ab54e1 100644
--- a/include/linux/freezer.h
+++ b/include/linux/freezer.h
@@ -5,71 +5,58 @@
 
 #include <linux/sched.h>
 #include <linux/wait.h>
+#include <linux/atomic.h>
 
 #ifdef CONFIG_FREEZER
+extern atomic_t system_freezing_cnt;	/* nr of freezing conds in effect */
+extern bool pm_freezing;		/* PM freezing in effect */
+extern bool pm_nosig_freezing;		/* PM nosig freezing in effect */
+
 /*
  * Check if a process has been frozen
  */
-static inline int frozen(struct task_struct *p)
+static inline bool frozen(struct task_struct *p)
 {
 	return p->flags & PF_FROZEN;
 }
 
+extern bool freezing_slow_path(struct task_struct *p);
+
 /*
  * Check if there is a request to freeze a process
  */
-static inline int freezing(struct task_struct *p)
+static inline bool freezing(struct task_struct *p)
 {
-	return test_tsk_thread_flag(p, TIF_FREEZE);
-}
-
-/*
- * Request that a process be frozen
- */
-static inline void set_freeze_flag(struct task_struct *p)
-{
-	set_tsk_thread_flag(p, TIF_FREEZE);
-}
-
-/*
- * Sometimes we may need to cancel the previous 'freeze' request
- */
-static inline void clear_freeze_flag(struct task_struct *p)
-{
-	clear_tsk_thread_flag(p, TIF_FREEZE);
-}
-
-static inline bool should_send_signal(struct task_struct *p)
-{
-	return !(p->flags & PF_FREEZER_NOSIG);
+	if (likely(!atomic_read(&system_freezing_cnt)))
+		return false;
+	return freezing_slow_path(p);
 }
 
 /* Takes and releases task alloc lock using task_lock() */
-extern int thaw_process(struct task_struct *p);
+extern void __thaw_task(struct task_struct *t);
 
-extern void refrigerator(void);
+extern bool __refrigerator(bool check_kthr_stop);
 extern int freeze_processes(void);
 extern int freeze_kernel_threads(void);
 extern void thaw_processes(void);
 
-static inline int try_to_freeze(void)
+static inline bool try_to_freeze(void)
 {
-	if (freezing(current)) {
-		refrigerator();
-		return 1;
-	} else
-		return 0;
+	might_sleep();
+	if (likely(!freezing(current)))
+		return false;
+	return __refrigerator(false);
 }
 
-extern bool freeze_task(struct task_struct *p, bool sig_only);
-extern void cancel_freezing(struct task_struct *p);
+extern bool freeze_task(struct task_struct *p);
+extern bool set_freezable(void);
 
 #ifdef CONFIG_CGROUP_FREEZER
-extern int cgroup_freezing_or_frozen(struct task_struct *task);
+extern bool cgroup_freezing(struct task_struct *task);
 #else /* !CONFIG_CGROUP_FREEZER */
-static inline int cgroup_freezing_or_frozen(struct task_struct *task)
+static inline bool cgroup_freezing(struct task_struct *task)
 {
-	return 0;
+	return false;
 }
 #endif /* !CONFIG_CGROUP_FREEZER */
 
@@ -80,33 +67,27 @@
  * appropriately in case the child has exited before the freezing of tasks is
  * complete.  However, we don't want kernel threads to be frozen in unexpected
  * places, so we allow them to block freeze_processes() instead or to set
- * PF_NOFREEZE if needed and PF_FREEZER_SKIP is only set for userland vfork
- * parents.  Fortunately, in the ____call_usermodehelper() case the parent won't
- * really block freeze_processes(), since ____call_usermodehelper() (the child)
- * does a little before exec/exit and it can't be frozen before waking up the
- * parent.
+ * PF_NOFREEZE if needed. Fortunately, in the ____call_usermodehelper() case the
+ * parent won't really block freeze_processes(), since ____call_usermodehelper()
+ * (the child) does a little before exec/exit and it can't be frozen before
+ * waking up the parent.
  */
 
-/*
- * If the current task is a user space one, tell the freezer not to count it as
- * freezable.
- */
+
+/* Tell the freezer not to count the current task as freezable. */
 static inline void freezer_do_not_count(void)
 {
-	if (current->mm)
-		current->flags |= PF_FREEZER_SKIP;
+	current->flags |= PF_FREEZER_SKIP;
 }
 
 /*
- * If the current task is a user space one, tell the freezer to count it as
- * freezable again and try to freeze it.
+ * Tell the freezer to count the current task as freezable again and try to
+ * freeze it.
  */
 static inline void freezer_count(void)
 {
-	if (current->mm) {
-		current->flags &= ~PF_FREEZER_SKIP;
-		try_to_freeze();
-	}
+	current->flags &= ~PF_FREEZER_SKIP;
+	try_to_freeze();
 }
 
 /*
@@ -118,21 +99,29 @@
 }
 
 /*
- * Tell the freezer that the current task should be frozen by it
+ * These macros are intended to be used whenever you want allow a task that's
+ * sleeping in TASK_UNINTERRUPTIBLE or TASK_KILLABLE state to be frozen. Note
+ * that neither return any clear indication of whether a freeze event happened
+ * while in this function.
  */
-static inline void set_freezable(void)
-{
-	current->flags &= ~PF_NOFREEZE;
-}
 
-/*
- * Tell the freezer that the current task should be frozen by it and that it
- * should send a fake signal to the task to freeze it.
- */
-static inline void set_freezable_with_signal(void)
-{
-	current->flags &= ~(PF_NOFREEZE | PF_FREEZER_NOSIG);
-}
+/* Like schedule(), but should not block the freezer. */
+#define freezable_schedule()						\
+({									\
+	freezer_do_not_count();						\
+	schedule();							\
+	freezer_count();						\
+})
+
+/* Like schedule_timeout_killable(), but should not block the freezer. */
+#define freezable_schedule_timeout_killable(timeout)			\
+({									\
+	long __retval;							\
+	freezer_do_not_count();						\
+	__retval = schedule_timeout_killable(timeout);			\
+	freezer_count();						\
+	__retval;							\
+})
 
 /*
  * Freezer-friendly wrappers around wait_event_interruptible(),
@@ -152,47 +141,51 @@
 #define wait_event_freezable(wq, condition)				\
 ({									\
 	int __retval;							\
-	do {								\
+	for (;;) {							\
 		__retval = wait_event_interruptible(wq, 		\
 				(condition) || freezing(current));	\
-		if (__retval && !freezing(current))			\
+		if (__retval || (condition))				\
 			break;						\
-		else if (!(condition))					\
-			__retval = -ERESTARTSYS;			\
-	} while (try_to_freeze());					\
+		try_to_freeze();					\
+	}								\
 	__retval;							\
 })
 
-
 #define wait_event_freezable_timeout(wq, condition, timeout)		\
 ({									\
 	long __retval = timeout;					\
-	do {								\
+	for (;;) {							\
 		__retval = wait_event_interruptible_timeout(wq,		\
 				(condition) || freezing(current),	\
 				__retval); 				\
-	} while (try_to_freeze());					\
+		if (__retval <= 0 || (condition))			\
+			break;						\
+		try_to_freeze();					\
+	}								\
 	__retval;							\
 })
-#else /* !CONFIG_FREEZER */
-static inline int frozen(struct task_struct *p) { return 0; }
-static inline int freezing(struct task_struct *p) { return 0; }
-static inline void set_freeze_flag(struct task_struct *p) {}
-static inline void clear_freeze_flag(struct task_struct *p) {}
-static inline int thaw_process(struct task_struct *p) { return 1; }
 
-static inline void refrigerator(void) {}
+#else /* !CONFIG_FREEZER */
+static inline bool frozen(struct task_struct *p) { return false; }
+static inline bool freezing(struct task_struct *p) { return false; }
+static inline void __thaw_task(struct task_struct *t) {}
+
+static inline bool __refrigerator(bool check_kthr_stop) { return false; }
 static inline int freeze_processes(void) { return -ENOSYS; }
 static inline int freeze_kernel_threads(void) { return -ENOSYS; }
 static inline void thaw_processes(void) {}
 
-static inline int try_to_freeze(void) { return 0; }
+static inline bool try_to_freeze(void) { return false; }
 
 static inline void freezer_do_not_count(void) {}
 static inline void freezer_count(void) {}
 static inline int freezer_should_skip(struct task_struct *p) { return 0; }
 static inline void set_freezable(void) {}
-static inline void set_freezable_with_signal(void) {}
+
+#define freezable_schedule()  schedule()
+
+#define freezable_schedule_timeout_killable(timeout)			\
+	schedule_timeout_killable(timeout)
 
 #define wait_event_freezable(wq, condition)				\
 		wait_event_interruptible(wq, condition)
diff --git a/include/linux/fs.h b/include/linux/fs.h
index e0bc4ff..7aacf31 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1428,6 +1428,7 @@
 #else
 	struct list_head	s_files;
 #endif
+	struct list_head	s_mounts;	/* list of mounts; _not_ for fs use */
 	/* s_dentry_lru, s_nr_dentry_unused protected by dcache.c lru locks */
 	struct list_head	s_dentry_lru;	/* unused dentry lru */
 	int			s_nr_dentry_unused;	/* # of dentry on lru */
@@ -1440,7 +1441,7 @@
 	struct block_device	*s_bdev;
 	struct backing_dev_info *s_bdi;
 	struct mtd_info		*s_mtd;
-	struct list_head	s_instances;
+	struct hlist_node	s_instances;
 	struct quota_info	s_dquot;	/* Diskquota specific options */
 
 	int			s_frozen;
@@ -1481,6 +1482,12 @@
 	int cleancache_poolid;
 
 	struct shrinker s_shrink;	/* per-sb shrinker handle */
+
+	/* Number of inodes with nlink == 0 but still referenced */
+	atomic_long_t s_remove_count;
+
+	/* Being remounted read-only */
+	int s_readonly_remount;
 };
 
 /* superblock cache pruning functions */
@@ -1516,9 +1523,9 @@
 /*
  * VFS helper functions..
  */
-extern int vfs_create(struct inode *, struct dentry *, int, struct nameidata *);
-extern int vfs_mkdir(struct inode *, struct dentry *, int);
-extern int vfs_mknod(struct inode *, struct dentry *, int, dev_t);
+extern int vfs_create(struct inode *, struct dentry *, umode_t, struct nameidata *);
+extern int vfs_mkdir(struct inode *, struct dentry *, umode_t);
+extern int vfs_mknod(struct inode *, struct dentry *, umode_t, dev_t);
 extern int vfs_symlink(struct inode *, struct dentry *, const char *);
 extern int vfs_link(struct dentry *, struct inode *, struct dentry *);
 extern int vfs_rmdir(struct inode *, struct dentry *);
@@ -1534,7 +1541,7 @@
  * VFS file helper functions.
  */
 extern void inode_init_owner(struct inode *inode, const struct inode *dir,
-			mode_t mode);
+			umode_t mode);
 /*
  * VFS FS_IOC_FIEMAP helper definitions.
  */
@@ -1619,13 +1626,13 @@
 	int (*readlink) (struct dentry *, char __user *,int);
 	void (*put_link) (struct dentry *, struct nameidata *, void *);
 
-	int (*create) (struct inode *,struct dentry *,int, struct nameidata *);
+	int (*create) (struct inode *,struct dentry *,umode_t,struct nameidata *);
 	int (*link) (struct dentry *,struct inode *,struct dentry *);
 	int (*unlink) (struct inode *,struct dentry *);
 	int (*symlink) (struct inode *,struct dentry *,const char *);
-	int (*mkdir) (struct inode *,struct dentry *,int);
+	int (*mkdir) (struct inode *,struct dentry *,umode_t);
 	int (*rmdir) (struct inode *,struct dentry *);
-	int (*mknod) (struct inode *,struct dentry *,int,dev_t);
+	int (*mknod) (struct inode *,struct dentry *,umode_t,dev_t);
 	int (*rename) (struct inode *, struct dentry *,
 			struct inode *, struct dentry *);
 	void (*truncate) (struct inode *);
@@ -1672,10 +1679,10 @@
 	int (*remount_fs) (struct super_block *, int *, char *);
 	void (*umount_begin) (struct super_block *);
 
-	int (*show_options)(struct seq_file *, struct vfsmount *);
-	int (*show_devname)(struct seq_file *, struct vfsmount *);
-	int (*show_path)(struct seq_file *, struct vfsmount *);
-	int (*show_stats)(struct seq_file *, struct vfsmount *);
+	int (*show_options)(struct seq_file *, struct dentry *);
+	int (*show_devname)(struct seq_file *, struct dentry *);
+	int (*show_path)(struct seq_file *, struct dentry *);
+	int (*show_stats)(struct seq_file *, struct dentry *);
 #ifdef CONFIG_QUOTA
 	ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
 	ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
@@ -1764,31 +1771,10 @@
 	__mark_inode_dirty(inode, I_DIRTY_SYNC);
 }
 
-/**
- * set_nlink - directly set an inode's link count
- * @inode: inode
- * @nlink: new nlink (should be non-zero)
- *
- * This is a low-level filesystem helper to replace any
- * direct filesystem manipulation of i_nlink.
- */
-static inline void set_nlink(struct inode *inode, unsigned int nlink)
-{
-	inode->__i_nlink = nlink;
-}
-
-/**
- * inc_nlink - directly increment an inode's link count
- * @inode: inode
- *
- * This is a low-level filesystem helper to replace any
- * direct filesystem manipulation of i_nlink.  Currently,
- * it is only here for parity with dec_nlink().
- */
-static inline void inc_nlink(struct inode *inode)
-{
-	inode->__i_nlink++;
-}
+extern void inc_nlink(struct inode *inode);
+extern void drop_nlink(struct inode *inode);
+extern void clear_nlink(struct inode *inode);
+extern void set_nlink(struct inode *inode, unsigned int nlink);
 
 static inline void inode_inc_link_count(struct inode *inode)
 {
@@ -1796,35 +1782,6 @@
 	mark_inode_dirty(inode);
 }
 
-/**
- * drop_nlink - directly drop an inode's link count
- * @inode: inode
- *
- * This is a low-level filesystem helper to replace any
- * direct filesystem manipulation of i_nlink.  In cases
- * where we are attempting to track writes to the
- * filesystem, a decrement to zero means an imminent
- * write when the file is truncated and actually unlinked
- * on the filesystem.
- */
-static inline void drop_nlink(struct inode *inode)
-{
-	inode->__i_nlink--;
-}
-
-/**
- * clear_nlink - directly zero an inode's link count
- * @inode: inode
- *
- * This is a low-level filesystem helper to replace any
- * direct filesystem manipulation of i_nlink.  See
- * drop_nlink() for why we care about i_nlink hitting zero.
- */
-static inline void clear_nlink(struct inode *inode)
-{
-	inode->__i_nlink = 0;
-}
-
 static inline void inode_dec_link_count(struct inode *inode)
 {
 	drop_nlink(inode);
@@ -1864,7 +1821,7 @@
 	void (*kill_sb) (struct super_block *);
 	struct module *owner;
 	struct file_system_type * next;
-	struct list_head fs_supers;
+	struct hlist_head fs_supers;
 
 	struct lock_class_key s_lock_key;
 	struct lock_class_key s_umount_key;
@@ -1939,7 +1896,7 @@
 extern int vfs_statfs(struct path *, struct kstatfs *);
 extern int user_statfs(const char __user *, struct kstatfs *);
 extern int fd_statfs(int, struct kstatfs *);
-extern int statfs_by_dentry(struct dentry *, struct kstatfs *);
+extern int vfs_ustat(dev_t, struct kstatfs *);
 extern int freeze_super(struct super_block *super);
 extern int thaw_super(struct super_block *super);
 extern bool our_mnt(struct vfsmount *mnt);
@@ -2054,8 +2011,8 @@
 extern int do_fallocate(struct file *file, int mode, loff_t offset,
 			loff_t len);
 extern long do_sys_open(int dfd, const char __user *filename, int flags,
-			int mode);
-extern struct file *filp_open(const char *, int, int);
+			umode_t mode);
+extern struct file *filp_open(const char *, int, umode_t);
 extern struct file *file_open_root(struct dentry *, struct vfsmount *,
 				   const char *, int);
 extern struct file * dentry_open(struct dentry *, struct vfsmount *, int,
@@ -2092,6 +2049,7 @@
 extern void bdput(struct block_device *);
 extern void invalidate_bdev(struct block_device *);
 extern int sync_blockdev(struct block_device *bdev);
+extern void kill_bdev(struct block_device *);
 extern struct super_block *freeze_bdev(struct block_device *);
 extern void emergency_thaw_all(void);
 extern int thaw_bdev(struct block_device *bdev, struct super_block *sb);
@@ -2099,6 +2057,7 @@
 #else
 static inline void bd_forget(struct inode *inode) {}
 static inline int sync_blockdev(struct block_device *bdev) { return 0; }
+static inline void kill_bdev(struct block_device *bdev) {}
 static inline void invalidate_bdev(struct block_device *bdev) {}
 
 static inline struct super_block *freeze_bdev(struct block_device *sb)
@@ -2191,8 +2150,6 @@
 extern const struct file_operations write_pipefifo_fops;
 extern const struct file_operations rdwr_pipefifo_fops;
 
-extern int fs_may_remount_ro(struct super_block *);
-
 #ifdef CONFIG_BLOCK
 /*
  * return READ, READA, or WRITE
@@ -2415,6 +2372,7 @@
 				unsigned long nr_segs, loff_t pos);
 extern int blkdev_fsync(struct file *filp, loff_t start, loff_t end,
 			int datasync);
+extern void block_sync_page(struct page *page);
 
 /* fs/splice.c */
 extern ssize_t generic_file_splice_read(struct file *, loff_t *,
@@ -2531,7 +2489,6 @@
 extern struct file_system_type *get_fs_type(const char *name);
 extern struct super_block *get_super(struct block_device *);
 extern struct super_block *get_active_super(struct block_device *bdev);
-extern struct super_block *user_get_super(dev_t);
 extern void drop_super(struct super_block *sb);
 extern void iterate_supers(void (*)(struct super_block *, void *), void *);
 extern void iterate_supers_type(struct file_system_type *,
@@ -2590,7 +2547,7 @@
 
 extern void file_update_time(struct file *file);
 
-extern int generic_show_options(struct seq_file *m, struct vfsmount *mnt);
+extern int generic_show_options(struct seq_file *m, struct dentry *root);
 extern void save_mount_options(struct super_block *sb, char *options);
 extern void replace_mount_options(struct super_block *sb, char *options);
 
@@ -2691,7 +2648,7 @@
 #define OPEN_FMODE(flag) ((__force fmode_t)(((flag + 1) & O_ACCMODE) | \
 					    (flag & __FMODE_NONOTIFY)))
 
-static inline int is_sxid(mode_t mode)
+static inline int is_sxid(umode_t mode)
 {
 	return (mode & S_ISUID) || ((mode & S_ISGID) && (mode & S_IXGRP));
 }
diff --git a/include/linux/genetlink.h b/include/linux/genetlink.h
index 61549b2..73c28de 100644
--- a/include/linux/genetlink.h
+++ b/include/linux/genetlink.h
@@ -85,6 +85,30 @@
 /* All generic netlink requests are serialized by a global lock.  */
 extern void genl_lock(void);
 extern void genl_unlock(void);
+#ifdef CONFIG_PROVE_LOCKING
+extern int lockdep_genl_is_held(void);
+#endif
+
+/**
+ * rcu_dereference_genl - rcu_dereference with debug checking
+ * @p: The pointer to read, prior to dereferencing
+ *
+ * Do an rcu_dereference(p), but check caller either holds rcu_read_lock()
+ * or genl mutex. Note : Please prefer genl_dereference() or rcu_dereference()
+ */
+#define rcu_dereference_genl(p)					\
+	rcu_dereference_check(p, lockdep_genl_is_held())
+
+/**
+ * genl_dereference - fetch RCU pointer when updates are prevented by genl mutex
+ * @p: The pointer to read, prior to dereferencing
+ *
+ * Return the value of the specified RCU-protected pointer, but omit
+ * both the smp_read_barrier_depends() and the ACCESS_ONCE(), because
+ * caller holds genl mutex.
+ */
+#define genl_dereference(p)					\
+	rcu_dereference_protected(p, lockdep_genl_is_held())
 
 #endif /* __KERNEL__ */
 
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 6d18f35..fe23ee7 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -163,7 +163,7 @@
                                          * disks that can't be partitioned. */
 
 	char disk_name[DISK_NAME_LEN];	/* name of major driver */
-	char *(*devnode)(struct gendisk *gd, mode_t *mode);
+	char *(*devnode)(struct gendisk *gd, umode_t *mode);
 
 	unsigned int events;		/* supported events */
 	unsigned int async_events;	/* async events, subset of all */
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index f743883..bb7f309 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -139,20 +139,7 @@
 extern void account_system_vtime(struct task_struct *tsk);
 #endif
 
-#if defined(CONFIG_NO_HZ)
 #if defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU)
-extern void rcu_enter_nohz(void);
-extern void rcu_exit_nohz(void);
-
-static inline void rcu_irq_enter(void)
-{
-	rcu_exit_nohz();
-}
-
-static inline void rcu_irq_exit(void)
-{
-	rcu_enter_nohz();
-}
 
 static inline void rcu_nmi_enter(void)
 {
@@ -163,17 +150,9 @@
 }
 
 #else
-extern void rcu_irq_enter(void);
-extern void rcu_irq_exit(void);
 extern void rcu_nmi_enter(void);
 extern void rcu_nmi_exit(void);
 #endif
-#else
-# define rcu_irq_enter() do { } while (0)
-# define rcu_irq_exit() do { } while (0)
-# define rcu_nmi_enter() do { } while (0)
-# define rcu_nmi_exit() do { } while (0)
-#endif /* #if defined(CONFIG_NO_HZ) */
 
 /*
  * It is safe to do non-atomic ops on ->hardirq_context,
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 07d103a..8e25a91 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -482,6 +482,19 @@
 {
 	return adap->nr;
 }
+
+/**
+ * module_i2c_driver() - Helper macro for registering a I2C driver
+ * @__i2c_driver: i2c_driver struct
+ *
+ * Helper macro for I2C drivers which do not do anything special in module
+ * init/exit. This eliminates a lot of boilerplate. Each module may only
+ * use this macro once, and calling it replaces module_init() and module_exit()
+ */
+#define module_i2c_driver(__i2c_driver) \
+	module_driver(__i2c_driver, i2c_add_driver, \
+			i2c_del_driver)
+
 #endif /* I2C */
 #endif /* __KERNEL__ */
 
diff --git a/include/linux/ide.h b/include/linux/ide.h
index 4255785..501370b 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -920,7 +920,7 @@
 
 typedef struct {
 	const char	*name;
-	mode_t		mode;
+	umode_t		mode;
 	const struct file_operations *proc_fops;
 } ide_proc_entry_t;
 
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 48363c3..210e2c3 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -128,6 +128,7 @@
 #define IEEE80211_QOS_CTL_ACK_POLICY_NOACK	0x0020
 #define IEEE80211_QOS_CTL_ACK_POLICY_NO_EXPL	0x0040
 #define IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK	0x0060
+#define IEEE80211_QOS_CTL_ACK_POLICY_MASK	0x0060
 /* A-MSDU 802.11n */
 #define IEEE80211_QOS_CTL_A_MSDU_PRESENT	0x0080
 /* Mesh Control 802.11s */
@@ -543,6 +544,15 @@
 	       cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_NULLFUNC);
 }
 
+/**
+ * ieee80211_is_first_frag - check if IEEE80211_SCTL_FRAG is not set
+ * @seq_ctrl: frame sequence control bytes in little-endian byteorder
+ */
+static inline int ieee80211_is_first_frag(__le16 seq_ctrl)
+{
+	return (seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0;
+}
+
 struct ieee80211s_hdr {
 	u8 flags;
 	u8 ttl;
@@ -770,6 +780,9 @@
 	} u;
 } __attribute__ ((packed));
 
+/* Supported Rates value encodings in 802.11n-2009 7.3.2.2 */
+#define BSS_MEMBERSHIP_SELECTOR_HT_PHY	127
+
 /* mgmt header + 1 byte category code */
 #define IEEE80211_MIN_ACTION_SIZE offsetof(struct ieee80211_mgmt, u.action.u)
 
@@ -1552,6 +1565,8 @@
 #define WLAN_CIPHER_SUITE_WEP104	0x000FAC05
 #define WLAN_CIPHER_SUITE_AES_CMAC	0x000FAC06
 
+#define WLAN_CIPHER_SUITE_SMS4		0x00147201
+
 /* AKM suite selectors */
 #define WLAN_AKM_SUITE_8021X		0x000FAC01
 #define WLAN_AKM_SUITE_PSK		0x000FAC02
@@ -1689,6 +1704,23 @@
 }
 
 /**
+ * ieee80211_is_public_action - check if frame is a public action frame
+ * @hdr: the frame
+ * @len: length of the frame
+ */
+static inline bool ieee80211_is_public_action(struct ieee80211_hdr *hdr,
+					      size_t len)
+{
+	struct ieee80211_mgmt *mgmt = (void *)hdr;
+
+	if (len < IEEE80211_MIN_ACTION_SIZE)
+		return false;
+	if (!ieee80211_is_action(hdr->frame_control))
+		return false;
+	return mgmt->u.action.category == WLAN_CATEGORY_PUBLIC;
+}
+
+/**
  * ieee80211_fhss_chan_to_freq - get channel frequency
  * @channel: the FHSS channel
  *
diff --git a/include/linux/if.h b/include/linux/if.h
index db20bd4..06b6ef6 100644
--- a/include/linux/if.h
+++ b/include/linux/if.h
@@ -79,6 +79,7 @@
 #define IFF_TX_SKB_SHARING	0x10000	/* The interface supports sharing
 					 * skbs on transmit */
 #define IFF_UNICAST_FLT	0x20000		/* Supports unicast filtering	*/
+#define IFF_TEAM_PORT	0x40000		/* device used as team port */
 
 #define IF_GET_IFACE	0x0001		/* for querying only */
 #define IF_GET_PROTO	0x0002
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h
index e473003..56d907a 100644
--- a/include/linux/if_ether.h
+++ b/include/linux/if_ether.h
@@ -79,6 +79,7 @@
 #define ETH_P_PAE	0x888E		/* Port Access Entity (IEEE 802.1X) */
 #define ETH_P_AOE	0x88A2		/* ATA over Ethernet		*/
 #define ETH_P_8021AD	0x88A8          /* 802.1ad Service VLAN		*/
+#define ETH_P_802_EX1	0x88B5		/* 802.1 Local Experimental 1.  */
 #define ETH_P_TIPC	0x88CA		/* TIPC 			*/
 #define ETH_P_8021AH	0x88E7          /* 802.1ah Backbone Service Tag */
 #define ETH_P_1588	0x88F7		/* IEEE 1588 Timesync */
diff --git a/include/linux/if_team.h b/include/linux/if_team.h
new file mode 100644
index 0000000..828181f
--- /dev/null
+++ b/include/linux/if_team.h
@@ -0,0 +1,242 @@
+/*
+ * include/linux/if_team.h - Network team device driver header
+ * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _LINUX_IF_TEAM_H_
+#define _LINUX_IF_TEAM_H_
+
+#ifdef __KERNEL__
+
+struct team_pcpu_stats {
+	u64			rx_packets;
+	u64			rx_bytes;
+	u64			rx_multicast;
+	u64			tx_packets;
+	u64			tx_bytes;
+	struct u64_stats_sync	syncp;
+	u32			rx_dropped;
+	u32			tx_dropped;
+};
+
+struct team;
+
+struct team_port {
+	struct net_device *dev;
+	struct hlist_node hlist; /* node in hash list */
+	struct list_head list; /* node in ordinary list */
+	struct team *team;
+	int index;
+
+	/*
+	 * A place for storing original values of the device before it
+	 * become a port.
+	 */
+	struct {
+		unsigned char dev_addr[MAX_ADDR_LEN];
+		unsigned int mtu;
+	} orig;
+
+	bool linkup;
+	u32 speed;
+	u8 duplex;
+
+	struct rcu_head rcu;
+};
+
+struct team_mode_ops {
+	int (*init)(struct team *team);
+	void (*exit)(struct team *team);
+	rx_handler_result_t (*receive)(struct team *team,
+				       struct team_port *port,
+				       struct sk_buff *skb);
+	bool (*transmit)(struct team *team, struct sk_buff *skb);
+	int (*port_enter)(struct team *team, struct team_port *port);
+	void (*port_leave)(struct team *team, struct team_port *port);
+	void (*port_change_mac)(struct team *team, struct team_port *port);
+};
+
+enum team_option_type {
+	TEAM_OPTION_TYPE_U32,
+	TEAM_OPTION_TYPE_STRING,
+};
+
+struct team_option {
+	struct list_head list;
+	const char *name;
+	enum team_option_type type;
+	int (*getter)(struct team *team, void *arg);
+	int (*setter)(struct team *team, void *arg);
+};
+
+struct team_mode {
+	struct list_head list;
+	const char *kind;
+	struct module *owner;
+	size_t priv_size;
+	const struct team_mode_ops *ops;
+};
+
+#define TEAM_PORT_HASHBITS 4
+#define TEAM_PORT_HASHENTRIES (1 << TEAM_PORT_HASHBITS)
+
+#define TEAM_MODE_PRIV_LONGS 4
+#define TEAM_MODE_PRIV_SIZE (sizeof(long) * TEAM_MODE_PRIV_LONGS)
+
+struct team {
+	struct net_device *dev; /* associated netdevice */
+	struct team_pcpu_stats __percpu *pcpu_stats;
+
+	struct mutex lock; /* used for overall locking, e.g. port lists write */
+
+	/*
+	 * port lists with port count
+	 */
+	int port_count;
+	struct hlist_head port_hlist[TEAM_PORT_HASHENTRIES];
+	struct list_head port_list;
+
+	struct list_head option_list;
+
+	const struct team_mode *mode;
+	struct team_mode_ops ops;
+	long mode_priv[TEAM_MODE_PRIV_LONGS];
+};
+
+static inline struct hlist_head *team_port_index_hash(struct team *team,
+						      int port_index)
+{
+	return &team->port_hlist[port_index & (TEAM_PORT_HASHENTRIES - 1)];
+}
+
+static inline struct team_port *team_get_port_by_index(struct team *team,
+						       int port_index)
+{
+	struct hlist_node *p;
+	struct team_port *port;
+	struct hlist_head *head = team_port_index_hash(team, port_index);
+
+	hlist_for_each_entry(port, p, head, hlist)
+		if (port->index == port_index)
+			return port;
+	return NULL;
+}
+static inline struct team_port *team_get_port_by_index_rcu(struct team *team,
+							   int port_index)
+{
+	struct hlist_node *p;
+	struct team_port *port;
+	struct hlist_head *head = team_port_index_hash(team, port_index);
+
+	hlist_for_each_entry_rcu(port, p, head, hlist)
+		if (port->index == port_index)
+			return port;
+	return NULL;
+}
+
+extern int team_port_set_team_mac(struct team_port *port);
+extern int team_options_register(struct team *team,
+				 const struct team_option *option,
+				 size_t option_count);
+extern void team_options_unregister(struct team *team,
+				    const struct team_option *option,
+				    size_t option_count);
+extern int team_mode_register(struct team_mode *mode);
+extern int team_mode_unregister(struct team_mode *mode);
+
+#endif /* __KERNEL__ */
+
+#define TEAM_STRING_MAX_LEN 32
+
+/**********************************
+ * NETLINK_GENERIC netlink family.
+ **********************************/
+
+enum {
+	TEAM_CMD_NOOP,
+	TEAM_CMD_OPTIONS_SET,
+	TEAM_CMD_OPTIONS_GET,
+	TEAM_CMD_PORT_LIST_GET,
+
+	__TEAM_CMD_MAX,
+	TEAM_CMD_MAX = (__TEAM_CMD_MAX - 1),
+};
+
+enum {
+	TEAM_ATTR_UNSPEC,
+	TEAM_ATTR_TEAM_IFINDEX,		/* u32 */
+	TEAM_ATTR_LIST_OPTION,		/* nest */
+	TEAM_ATTR_LIST_PORT,		/* nest */
+
+	__TEAM_ATTR_MAX,
+	TEAM_ATTR_MAX = __TEAM_ATTR_MAX - 1,
+};
+
+/* Nested layout of get/set msg:
+ *
+ *	[TEAM_ATTR_LIST_OPTION]
+ *		[TEAM_ATTR_ITEM_OPTION]
+ *			[TEAM_ATTR_OPTION_*], ...
+ *		[TEAM_ATTR_ITEM_OPTION]
+ *			[TEAM_ATTR_OPTION_*], ...
+ *		...
+ *	[TEAM_ATTR_LIST_PORT]
+ *		[TEAM_ATTR_ITEM_PORT]
+ *			[TEAM_ATTR_PORT_*], ...
+ *		[TEAM_ATTR_ITEM_PORT]
+ *			[TEAM_ATTR_PORT_*], ...
+ *		...
+ */
+
+enum {
+	TEAM_ATTR_ITEM_OPTION_UNSPEC,
+	TEAM_ATTR_ITEM_OPTION,		/* nest */
+
+	__TEAM_ATTR_ITEM_OPTION_MAX,
+	TEAM_ATTR_ITEM_OPTION_MAX = __TEAM_ATTR_ITEM_OPTION_MAX - 1,
+};
+
+enum {
+	TEAM_ATTR_OPTION_UNSPEC,
+	TEAM_ATTR_OPTION_NAME,		/* string */
+	TEAM_ATTR_OPTION_CHANGED,	/* flag */
+	TEAM_ATTR_OPTION_TYPE,		/* u8 */
+	TEAM_ATTR_OPTION_DATA,		/* dynamic */
+
+	__TEAM_ATTR_OPTION_MAX,
+	TEAM_ATTR_OPTION_MAX = __TEAM_ATTR_OPTION_MAX - 1,
+};
+
+enum {
+	TEAM_ATTR_ITEM_PORT_UNSPEC,
+	TEAM_ATTR_ITEM_PORT,		/* nest */
+
+	__TEAM_ATTR_ITEM_PORT_MAX,
+	TEAM_ATTR_ITEM_PORT_MAX = __TEAM_ATTR_ITEM_PORT_MAX - 1,
+};
+
+enum {
+	TEAM_ATTR_PORT_UNSPEC,
+	TEAM_ATTR_PORT_IFINDEX,		/* u32 */
+	TEAM_ATTR_PORT_CHANGED,		/* flag */
+	TEAM_ATTR_PORT_LINKUP,		/* flag */
+	TEAM_ATTR_PORT_SPEED,		/* u32 */
+	TEAM_ATTR_PORT_DUPLEX,		/* u8 */
+
+	__TEAM_ATTR_PORT_MAX,
+	TEAM_ATTR_PORT_MAX = __TEAM_ATTR_PORT_MAX - 1,
+};
+
+/*
+ * NETLINK_GENERIC related info
+ */
+#define TEAM_GENL_NAME "team"
+#define TEAM_GENL_VERSION 0x1
+#define TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME "change_event"
+
+#endif /* _LINUX_IF_TEAM_H_ */
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 12d5543..13aff1e 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -74,22 +74,7 @@
 /* found in socket.c */
 extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *));
 
-/* if this changes, algorithm will have to be reworked because this
- * depends on completely exhausting the VLAN identifier space.  Thus
- * it gives constant time look-up, but in many cases it wastes memory.
- */
-#define VLAN_GROUP_ARRAY_SPLIT_PARTS  8
-#define VLAN_GROUP_ARRAY_PART_LEN     (VLAN_N_VID/VLAN_GROUP_ARRAY_SPLIT_PARTS)
-
-struct vlan_group {
-	struct net_device	*real_dev; /* The ethernet(like) device
-					    * the vlan is attached to.
-					    */
-	unsigned int		nr_vlans;
-	struct hlist_node	hlist;	/* linked list */
-	struct net_device **vlan_devices_arrays[VLAN_GROUP_ARRAY_SPLIT_PARTS];
-	struct rcu_head		rcu;
-};
+struct vlan_info;
 
 static inline int is_vlan_dev(struct net_device *dev)
 {
@@ -109,6 +94,13 @@
 extern bool vlan_do_receive(struct sk_buff **skb, bool last_handler);
 extern struct sk_buff *vlan_untag(struct sk_buff *skb);
 
+extern int vlan_vid_add(struct net_device *dev, unsigned short vid);
+extern void vlan_vid_del(struct net_device *dev, unsigned short vid);
+
+extern int vlan_vids_add_by_dev(struct net_device *dev,
+				const struct net_device *by_dev);
+extern void vlan_vids_del_by_dev(struct net_device *dev,
+				 const struct net_device *by_dev);
 #else
 static inline struct net_device *
 __vlan_find_dev_deep(struct net_device *real_dev, u16 vlan_id)
@@ -139,6 +131,26 @@
 {
 	return skb;
 }
+
+static inline int vlan_vid_add(struct net_device *dev, unsigned short vid)
+{
+	return 0;
+}
+
+static inline void vlan_vid_del(struct net_device *dev, unsigned short vid)
+{
+}
+
+static inline int vlan_vids_add_by_dev(struct net_device *dev,
+				       const struct net_device *by_dev)
+{
+	return 0;
+}
+
+static inline void vlan_vids_del_by_dev(struct net_device *dev,
+					const struct net_device *by_dev)
+{
+}
 #endif
 
 /**
@@ -310,6 +322,40 @@
 
 	return protocol;
 }
+
+static inline void vlan_set_encap_proto(struct sk_buff *skb,
+					struct vlan_hdr *vhdr)
+{
+	__be16 proto;
+	unsigned char *rawp;
+
+	/*
+	 * Was a VLAN packet, grab the encapsulated protocol, which the layer
+	 * three protocols care about.
+	 */
+
+	proto = vhdr->h_vlan_encapsulated_proto;
+	if (ntohs(proto) >= 1536) {
+		skb->protocol = proto;
+		return;
+	}
+
+	rawp = skb->data;
+	if (*(unsigned short *) rawp == 0xFFFF)
+		/*
+		 * This is a magic hack to spot IPX packets. Older Novell
+		 * breaks the protocol design and runs IPX over 802.3 without
+		 * an 802.2 LLC layer. We look for FFFF which isn't a used
+		 * 802.2 SSAP/DSAP. This won't work for fault tolerant netware
+		 * but does for the rest.
+		 */
+		skb->protocol = htons(ETH_P_802_3);
+	else
+		/*
+		 * Real 802.2 LLC
+		 */
+		skb->protocol = htons(ETH_P_802_2);
+}
 #endif /* __KERNEL__ */
 
 /* VLAN IOCTLs are found in sockios.h */
@@ -352,7 +398,7 @@
 		unsigned int skb_priority;
 		unsigned int name_type;
 		unsigned int bind_type;
-		unsigned int flag; /* Matches vlan_dev_info flags */
+		unsigned int flag; /* Matches vlan_dev_priv flags */
         } u;
 
 	short vlan_qos;   
diff --git a/include/linux/inet_diag.h b/include/linux/inet_diag.h
index abf5028..34e8d52 100644
--- a/include/linux/inet_diag.h
+++ b/include/linux/inet_diag.h
@@ -22,7 +22,7 @@
 
 /* Request structure */
 
-struct inet_diag_req {
+struct inet_diag_req_compat {
 	__u8	idiag_family;		/* Family of addresses. */
 	__u8	idiag_src_len;
 	__u8	idiag_dst_len;
@@ -34,6 +34,15 @@
 	__u32	idiag_dbs;		/* Tables to dump (NI) */
 };
 
+struct inet_diag_req {
+	__u8	sdiag_family;
+	__u8	sdiag_protocol;
+	__u8	idiag_ext;
+	__u8	pad;
+	__u32	idiag_states;
+	struct inet_diag_sockid id;
+};
+
 enum {
 	INET_DIAG_REQ_NONE,
 	INET_DIAG_REQ_BYTECODE,
@@ -99,9 +108,10 @@
 	INET_DIAG_CONG,
 	INET_DIAG_TOS,
 	INET_DIAG_TCLASS,
+	INET_DIAG_SKMEMINFO,
 };
 
-#define INET_DIAG_MAX INET_DIAG_TCLASS
+#define INET_DIAG_MAX INET_DIAG_SKMEMINFO
 
 
 /* INET_DIAG_MEM */
@@ -125,16 +135,41 @@
 #ifdef __KERNEL__
 struct sock;
 struct inet_hashinfo;
+struct nlattr;
+struct nlmsghdr;
+struct sk_buff;
+struct netlink_callback;
 
 struct inet_diag_handler {
-	struct inet_hashinfo    *idiag_hashinfo;
+	void			(*dump)(struct sk_buff *skb,
+					struct netlink_callback *cb,
+					struct inet_diag_req *r,
+					struct nlattr *bc);
+
+	int			(*dump_one)(struct sk_buff *in_skb,
+					const struct nlmsghdr *nlh,
+					struct inet_diag_req *req);
+
 	void			(*idiag_get_info)(struct sock *sk,
 						  struct inet_diag_msg *r,
 						  void *info);
-	__u16                   idiag_info_size;
 	__u16                   idiag_type;
 };
 
+struct inet_connection_sock;
+int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
+			      struct sk_buff *skb, struct inet_diag_req *req,
+			      u32 pid, u32 seq, u16 nlmsg_flags,
+			      const struct nlmsghdr *unlh);
+void inet_diag_dump_icsk(struct inet_hashinfo *h, struct sk_buff *skb,
+		struct netlink_callback *cb, struct inet_diag_req *r,
+		struct nlattr *bc);
+int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo,
+		struct sk_buff *in_skb, const struct nlmsghdr *nlh,
+		struct inet_diag_req *req);
+
+int inet_diag_bc_sk(const struct nlattr *_bc, struct sock *sk);
+
 extern int  inet_diag_register(const struct inet_diag_handler *handler);
 extern void inet_diag_unregister(const struct inet_diag_handler *handler);
 #endif /* __KERNEL__ */
diff --git a/include/linux/ipc.h b/include/linux/ipc.h
index 3b1594d..30e8161 100644
--- a/include/linux/ipc.h
+++ b/include/linux/ipc.h
@@ -93,7 +93,7 @@
 	gid_t		gid;
 	uid_t		cuid;
 	gid_t		cgid;
-	mode_t		mode; 
+	umode_t		mode; 
 	unsigned long	seq;
 	void		*security;
 };
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 0c99776..6318268 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -404,7 +404,7 @@
 
 extern int inet6_sk_rebuild_header(struct sock *sk);
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static inline struct ipv6_pinfo * inet6_sk(const struct sock *__sk)
 {
 	return inet_sk(__sk)->pinet6;
@@ -515,7 +515,7 @@
 #define inet6_rcv_saddr(__sk)	NULL
 #define tcp_twsk_ipv6only(__sk)		0
 #define inet_v6_ipv6only(__sk)		0
-#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
+#endif /* IS_ENABLED(CONFIG_IPV6) */
 
 #define INET6_MATCH(__sk, __net, __hash, __saddr, __daddr, __ports, __dif)\
 	(((__sk)->sk_hash == (__hash)) && sock_net((__sk)) == (__net)	&& \
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index 99834e58..bd4272b 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -91,10 +91,11 @@
 
 extern void irq_domain_add(struct irq_domain *domain);
 extern void irq_domain_del(struct irq_domain *domain);
+
+extern struct irq_domain_ops irq_domain_simple_ops;
 #endif /* CONFIG_IRQ_DOMAIN */
 
 #if defined(CONFIG_IRQ_DOMAIN) && defined(CONFIG_OF_IRQ)
-extern struct irq_domain_ops irq_domain_simple_ops;
 extern void irq_domain_add_simple(struct device_node *controller, int irq_base);
 extern void irq_domain_generate_simple(const struct of_device_id *match,
 					u64 phys_base, unsigned int irq_start);
diff --git a/include/linux/iscsi_boot_sysfs.h b/include/linux/iscsi_boot_sysfs.h
index f0a2f8b..2a8b1659 100644
--- a/include/linux/iscsi_boot_sysfs.h
+++ b/include/linux/iscsi_boot_sysfs.h
@@ -91,7 +91,7 @@
 	 * The enum of the type. This can be any value of the above
 	 * properties.
 	 */
-	mode_t (*is_visible) (void *data, int type);
+	umode_t (*is_visible) (void *data, int type);
 
 	/*
 	 * Driver specific release function.
@@ -110,20 +110,20 @@
 iscsi_boot_create_initiator(struct iscsi_boot_kset *boot_kset, int index,
 			    void *data,
 			    ssize_t (*show) (void *data, int type, char *buf),
-			    mode_t (*is_visible) (void *data, int type),
+			    umode_t (*is_visible) (void *data, int type),
 			    void (*release) (void *data));
 
 struct iscsi_boot_kobj *
 iscsi_boot_create_ethernet(struct iscsi_boot_kset *boot_kset, int index,
 			   void *data,
 			   ssize_t (*show) (void *data, int type, char *buf),
-			   mode_t (*is_visible) (void *data, int type),
+			   umode_t (*is_visible) (void *data, int type),
 			   void (*release) (void *data));
 struct iscsi_boot_kobj *
 iscsi_boot_create_target(struct iscsi_boot_kset *boot_kset, int index,
 			 void *data,
 			 ssize_t (*show) (void *data, int type, char *buf),
-			 mode_t (*is_visible) (void *data, int type),
+			 umode_t (*is_visible) (void *data, int type),
 			 void (*release) (void *data));
 
 struct iscsi_boot_kset *iscsi_boot_create_kset(const char *set_name);
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 388b0d4..5ce8b14 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -3,6 +3,7 @@
 
 #include <linux/types.h>
 #include <linux/compiler.h>
+#include <linux/workqueue.h>
 
 #if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
 
@@ -14,6 +15,12 @@
 #endif
 };
 
+struct jump_label_key_deferred {
+	struct jump_label_key key;
+	unsigned long timeout;
+	struct delayed_work work;
+};
+
 # include <asm/jump_label.h>
 # define HAVE_JUMP_LABEL
 #endif	/* CC_HAVE_ASM_GOTO && CONFIG_JUMP_LABEL */
@@ -51,8 +58,11 @@
 extern int jump_label_text_reserved(void *start, void *end);
 extern void jump_label_inc(struct jump_label_key *key);
 extern void jump_label_dec(struct jump_label_key *key);
+extern void jump_label_dec_deferred(struct jump_label_key_deferred *key);
 extern bool jump_label_enabled(struct jump_label_key *key);
 extern void jump_label_apply_nops(struct module *mod);
+extern void jump_label_rate_limit(struct jump_label_key_deferred *key,
+		unsigned long rl);
 
 #else  /* !HAVE_JUMP_LABEL */
 
@@ -68,6 +78,10 @@
 {
 }
 
+struct jump_label_key_deferred {
+	struct jump_label_key  key;
+};
+
 static __always_inline bool static_branch(struct jump_label_key *key)
 {
 	if (unlikely(atomic_read(&key->enabled)))
@@ -85,6 +99,11 @@
 	atomic_dec(&key->enabled);
 }
 
+static inline void jump_label_dec_deferred(struct jump_label_key_deferred *key)
+{
+	jump_label_dec(&key->key);
+}
+
 static inline int jump_label_text_reserved(void *start, void *end)
 {
 	return 0;
@@ -102,6 +121,14 @@
 {
 	return 0;
 }
+
+static inline void jump_label_rate_limit(struct jump_label_key_deferred *key,
+		unsigned long rl)
+{
+}
 #endif	/* HAVE_JUMP_LABEL */
 
+#define jump_label_key_enabled	((struct jump_label_key){ .enabled = ATOMIC_INIT(1), })
+#define jump_label_key_disabled	((struct jump_label_key){ .enabled = ATOMIC_INIT(0), })
+
 #endif	/* _LINUX_JUMP_LABEL_H */
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index 0cce2db..2fbd905 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -6,6 +6,7 @@
 #include <linux/percpu.h>
 #include <linux/cpumask.h>
 #include <linux/interrupt.h>
+#include <linux/sched.h>
 #include <asm/irq.h>
 #include <asm/cputime.h>
 
@@ -15,21 +16,25 @@
  * used by rstatd/perfmeter
  */
 
-struct cpu_usage_stat {
-	cputime64_t user;
-	cputime64_t nice;
-	cputime64_t system;
-	cputime64_t softirq;
-	cputime64_t irq;
-	cputime64_t idle;
-	cputime64_t iowait;
-	cputime64_t steal;
-	cputime64_t guest;
-	cputime64_t guest_nice;
+enum cpu_usage_stat {
+	CPUTIME_USER,
+	CPUTIME_NICE,
+	CPUTIME_SYSTEM,
+	CPUTIME_SOFTIRQ,
+	CPUTIME_IRQ,
+	CPUTIME_IDLE,
+	CPUTIME_IOWAIT,
+	CPUTIME_STEAL,
+	CPUTIME_GUEST,
+	CPUTIME_GUEST_NICE,
+	NR_STATS,
+};
+
+struct kernel_cpustat {
+	u64 cpustat[NR_STATS];
 };
 
 struct kernel_stat {
-	struct cpu_usage_stat	cpustat;
 #ifndef CONFIG_GENERIC_HARDIRQS
        unsigned int irqs[NR_IRQS];
 #endif
@@ -38,10 +43,13 @@
 };
 
 DECLARE_PER_CPU(struct kernel_stat, kstat);
+DECLARE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
 
-#define kstat_cpu(cpu)	per_cpu(kstat, cpu)
 /* Must have preemption disabled for this to be meaningful. */
-#define kstat_this_cpu	__get_cpu_var(kstat)
+#define kstat_this_cpu (&__get_cpu_var(kstat))
+#define kcpustat_this_cpu (&__get_cpu_var(kernel_cpustat))
+#define kstat_cpu(cpu) per_cpu(kstat, cpu)
+#define kcpustat_cpu(cpu) per_cpu(kernel_cpustat, cpu)
 
 extern unsigned long long nr_context_switches(void);
 
diff --git a/include/linux/kmod.h b/include/linux/kmod.h
index b16f653..722f477 100644
--- a/include/linux/kmod.h
+++ b/include/linux/kmod.h
@@ -117,5 +117,7 @@
 extern int usermodehelper_disable(void);
 extern void usermodehelper_enable(void);
 extern bool usermodehelper_is_disabled(void);
+extern void read_lock_usermodehelper(void);
+extern void read_unlock_usermodehelper(void);
 
 #endif /* __LINUX_KMOD_H__ */
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index ad81e1c..fc615a9 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -191,8 +191,6 @@
 }
 
 extern struct kobject *kset_find_obj(struct kset *, const char *);
-extern struct kobject *kset_find_obj_hinted(struct kset *, const char *,
-						struct kobject *);
 
 /* The global /sys/kernel/ kobject for people to chain off of */
 extern struct kobject *kernel_kobj;
diff --git a/include/linux/kref.h b/include/linux/kref.h
index d4a62ab..abc0120 100644
--- a/include/linux/kref.h
+++ b/include/linux/kref.h
@@ -15,16 +15,81 @@
 #ifndef _KREF_H_
 #define _KREF_H_
 
-#include <linux/types.h>
+#include <linux/bug.h>
+#include <linux/atomic.h>
 
 struct kref {
 	atomic_t refcount;
 };
 
-void kref_init(struct kref *kref);
-void kref_get(struct kref *kref);
-int kref_put(struct kref *kref, void (*release) (struct kref *kref));
-int kref_sub(struct kref *kref, unsigned int count,
-	     void (*release) (struct kref *kref));
+/**
+ * kref_init - initialize object.
+ * @kref: object in question.
+ */
+static inline void kref_init(struct kref *kref)
+{
+	atomic_set(&kref->refcount, 1);
+}
 
+/**
+ * kref_get - increment refcount for object.
+ * @kref: object.
+ */
+static inline void kref_get(struct kref *kref)
+{
+	WARN_ON(!atomic_read(&kref->refcount));
+	atomic_inc(&kref->refcount);
+}
+
+/**
+ * kref_sub - subtract a number of refcounts for object.
+ * @kref: object.
+ * @count: Number of recounts to subtract.
+ * @release: pointer to the function that will clean up the object when the
+ *	     last reference to the object is released.
+ *	     This pointer is required, and it is not acceptable to pass kfree
+ *	     in as this function.  If the caller does pass kfree to this
+ *	     function, you will be publicly mocked mercilessly by the kref
+ *	     maintainer, and anyone else who happens to notice it.  You have
+ *	     been warned.
+ *
+ * Subtract @count from the refcount, and if 0, call release().
+ * Return 1 if the object was removed, otherwise return 0.  Beware, if this
+ * function returns 0, you still can not count on the kref from remaining in
+ * memory.  Only use the return value if you want to see if the kref is now
+ * gone, not present.
+ */
+static inline int kref_sub(struct kref *kref, unsigned int count,
+	     void (*release)(struct kref *kref))
+{
+	WARN_ON(release == NULL);
+
+	if (atomic_sub_and_test((int) count, &kref->refcount)) {
+		release(kref);
+		return 1;
+	}
+	return 0;
+}
+
+/**
+ * kref_put - decrement refcount for object.
+ * @kref: object.
+ * @release: pointer to the function that will clean up the object when the
+ *	     last reference to the object is released.
+ *	     This pointer is required, and it is not acceptable to pass kfree
+ *	     in as this function.  If the caller does pass kfree to this
+ *	     function, you will be publicly mocked mercilessly by the kref
+ *	     maintainer, and anyone else who happens to notice it.  You have
+ *	     been warned.
+ *
+ * Decrement the refcount, and if 0, call release().
+ * Return 1 if the object was removed, otherwise return 0.  Beware, if this
+ * function returns 0, you still can not count on the kref from remaining in
+ * memory.  Only use the return value if you want to see if the kref is now
+ * gone, not present.
+ */
+static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref))
+{
+	return kref_sub(kref, 1, release);
+}
 #endif /* _KREF_H_ */
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index 5cac19b..0714b24 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -35,6 +35,7 @@
 void kthread_bind(struct task_struct *k, unsigned int cpu);
 int kthread_stop(struct task_struct *k);
 int kthread_should_stop(void);
+bool kthread_freezable_should_stop(bool *was_frozen);
 void *kthread_data(struct task_struct *k);
 
 int kthreadd(void *unused);
diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index c3892fc..68e67e5 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -557,6 +557,7 @@
 #define KVM_CAP_MAX_VCPUS 66       /* returns max vcpus per vm */
 #define KVM_CAP_PPC_PAPR 68
 #define KVM_CAP_S390_GMAP 71
+#define KVM_CAP_TSC_DEADLINE_TIMER 72
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
diff --git a/include/linux/latencytop.h b/include/linux/latencytop.h
index b0e9989..e23121f 100644
--- a/include/linux/latencytop.h
+++ b/include/linux/latencytop.h
@@ -10,6 +10,8 @@
 #define _INCLUDE_GUARD_LATENCYTOP_H_
 
 #include <linux/compiler.h>
+struct task_struct;
+
 #ifdef CONFIG_LATENCYTOP
 
 #define LT_SAVECOUNT		32
@@ -23,7 +25,6 @@
 };
 
 
-struct task_struct;
 
 extern int latencytop_enabled;
 void __account_scheduler_latency(struct task_struct *task, int usecs, int inter);
diff --git a/include/linux/lglock.h b/include/linux/lglock.h
index f549056..87f402c 100644
--- a/include/linux/lglock.h
+++ b/include/linux/lglock.h
@@ -22,6 +22,7 @@
 #include <linux/spinlock.h>
 #include <linux/lockdep.h>
 #include <linux/percpu.h>
+#include <linux/cpu.h>
 
 /* can make br locks by using local lock for read side, global lock for write */
 #define br_lock_init(name)	name##_lock_init()
@@ -72,9 +73,31 @@
 
 #define DEFINE_LGLOCK(name)						\
 									\
+ DEFINE_SPINLOCK(name##_cpu_lock);					\
+ cpumask_t name##_cpus __read_mostly;					\
  DEFINE_PER_CPU(arch_spinlock_t, name##_lock);				\
  DEFINE_LGLOCK_LOCKDEP(name);						\
 									\
+ static int								\
+ name##_lg_cpu_callback(struct notifier_block *nb,			\
+				unsigned long action, void *hcpu)	\
+ {									\
+	switch (action & ~CPU_TASKS_FROZEN) {				\
+	case CPU_UP_PREPARE:						\
+		spin_lock(&name##_cpu_lock);				\
+		cpu_set((unsigned long)hcpu, name##_cpus);		\
+		spin_unlock(&name##_cpu_lock);				\
+		break;							\
+	case CPU_UP_CANCELED: case CPU_DEAD:				\
+		spin_lock(&name##_cpu_lock);				\
+		cpu_clear((unsigned long)hcpu, name##_cpus);		\
+		spin_unlock(&name##_cpu_lock);				\
+	}								\
+	return NOTIFY_OK;						\
+ }									\
+ static struct notifier_block name##_lg_cpu_notifier = {		\
+	.notifier_call = name##_lg_cpu_callback,			\
+ };									\
  void name##_lock_init(void) {						\
 	int i;								\
 	LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \
@@ -83,6 +106,11 @@
 		lock = &per_cpu(name##_lock, i);			\
 		*lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;	\
 	}								\
+	register_hotcpu_notifier(&name##_lg_cpu_notifier);		\
+	get_online_cpus();						\
+	for_each_online_cpu(i)						\
+		cpu_set(i, name##_cpus);				\
+	put_online_cpus();						\
  }									\
  EXPORT_SYMBOL(name##_lock_init);					\
 									\
@@ -124,9 +152,9 @@
 									\
  void name##_global_lock_online(void) {					\
 	int i;								\
-	preempt_disable();						\
+	spin_lock(&name##_cpu_lock);					\
 	rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_);		\
-	for_each_online_cpu(i) {					\
+	for_each_cpu(i, &name##_cpus) {					\
 		arch_spinlock_t *lock;					\
 		lock = &per_cpu(name##_lock, i);			\
 		arch_spin_lock(lock);					\
@@ -137,12 +165,12 @@
  void name##_global_unlock_online(void) {				\
 	int i;								\
 	rwlock_release(&name##_lock_dep_map, 1, _RET_IP_);		\
-	for_each_online_cpu(i) {					\
+	for_each_cpu(i, &name##_cpus) {					\
 		arch_spinlock_t *lock;					\
 		lock = &per_cpu(name##_lock, i);			\
 		arch_spin_unlock(lock);					\
 	}								\
-	preempt_enable();						\
+	spin_unlock(&name##_cpu_lock);					\
  }									\
  EXPORT_SYMBOL(name##_global_unlock_online);				\
 									\
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index ff9abff..90b0656 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -301,7 +301,7 @@
 	return ipv4_is_loopback(sin->sin_addr.s_addr);
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static inline int __nlm_privileged_request6(const struct sockaddr *sap)
 {
 	const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
@@ -314,12 +314,12 @@
 
 	return ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LOOPBACK;
 }
-#else	/* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
+#else	/* IS_ENABLED(CONFIG_IPV6) */
 static inline int __nlm_privileged_request6(const struct sockaddr *sap)
 {
 	return 0;
 }
-#endif	/* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
+#endif	/* IS_ENABLED(CONFIG_IPV6) */
 
 /*
  * Ensure incoming requests are from local privileged callers.
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index b6a56e3..d36619e 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -343,6 +343,8 @@
 
 #define lockdep_assert_held(l)	WARN_ON(debug_locks && !lockdep_is_held(l))
 
+#define lockdep_recursing(tsk)	((tsk)->lockdep_recursion)
+
 #else /* !LOCKDEP */
 
 static inline void lockdep_off(void)
@@ -392,6 +394,8 @@
 
 #define lockdep_assert_held(l)			do { } while (0)
 
+#define lockdep_recursing(tsk)			(0)
+
 #endif /* !LOCKDEP */
 
 #ifdef CONFIG_LOCK_STAT
diff --git a/include/linux/log2.h b/include/linux/log2.h
index 25b8086..fd7ff3d 100644
--- a/include/linux/log2.h
+++ b/include/linux/log2.h
@@ -185,7 +185,6 @@
 #define rounddown_pow_of_two(n)			\
 (						\
 	__builtin_constant_p(n) ? (		\
-		(n == 1) ? 0 :			\
 		(1UL << ilog2(n))) :		\
 	__rounddown_pow_of_two(n)		\
  )
diff --git a/include/linux/mdio-bitbang.h b/include/linux/mdio-bitbang.h
index 0fe00cd..76f52bb 100644
--- a/include/linux/mdio-bitbang.h
+++ b/include/linux/mdio-bitbang.h
@@ -32,6 +32,8 @@
 
 struct mdiobb_ctrl {
 	const struct mdiobb_ops *ops;
+	/* reset callback */
+	int (*reset)(struct mii_bus *bus);
 };
 
 /* The returned bus is not yet registered with the phy layer. */
diff --git a/include/linux/mdio-gpio.h b/include/linux/mdio-gpio.h
index e9d3fdf..7c9fe3c 100644
--- a/include/linux/mdio-gpio.h
+++ b/include/linux/mdio-gpio.h
@@ -20,6 +20,8 @@
 
 	unsigned int phy_mask;
 	int irqs[PHY_MAX_ADDR];
+	/* reset callback */
+	int (*reset)(struct mii_bus *bus);
 };
 
 #endif /* __LINUX_MDIO_GPIO_H */
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index e6b843e..a6bb102 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -2,8 +2,6 @@
 #define _LINUX_MEMBLOCK_H
 #ifdef __KERNEL__
 
-#define MEMBLOCK_ERROR	0
-
 #ifdef CONFIG_HAVE_MEMBLOCK
 /*
  * Logical memory blocks.
@@ -19,81 +17,161 @@
 #include <linux/init.h>
 #include <linux/mm.h>
 
-#include <asm/memblock.h>
-
 #define INIT_MEMBLOCK_REGIONS	128
 
 struct memblock_region {
 	phys_addr_t base;
 	phys_addr_t size;
+#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
+	int nid;
+#endif
 };
 
 struct memblock_type {
 	unsigned long cnt;	/* number of regions */
 	unsigned long max;	/* size of the allocated array */
+	phys_addr_t total_size;	/* size of all regions */
 	struct memblock_region *regions;
 };
 
 struct memblock {
 	phys_addr_t current_limit;
-	phys_addr_t memory_size;	/* Updated by memblock_analyze() */
 	struct memblock_type memory;
 	struct memblock_type reserved;
 };
 
 extern struct memblock memblock;
 extern int memblock_debug;
-extern int memblock_can_resize;
 
 #define memblock_dbg(fmt, ...) \
 	if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
 
-u64 memblock_find_in_range(u64 start, u64 end, u64 size, u64 align);
+phys_addr_t memblock_find_in_range_node(phys_addr_t start, phys_addr_t end,
+				phys_addr_t size, phys_addr_t align, int nid);
+phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
+				   phys_addr_t size, phys_addr_t align);
 int memblock_free_reserved_regions(void);
 int memblock_reserve_reserved_regions(void);
 
-extern void memblock_init(void);
-extern void memblock_analyze(void);
-extern long memblock_add(phys_addr_t base, phys_addr_t size);
-extern long memblock_remove(phys_addr_t base, phys_addr_t size);
-extern long memblock_free(phys_addr_t base, phys_addr_t size);
-extern long memblock_reserve(phys_addr_t base, phys_addr_t size);
+void memblock_allow_resize(void);
+int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
+int memblock_add(phys_addr_t base, phys_addr_t size);
+int memblock_remove(phys_addr_t base, phys_addr_t size);
+int memblock_free(phys_addr_t base, phys_addr_t size);
+int memblock_reserve(phys_addr_t base, phys_addr_t size);
 
-/* The numa aware allocator is only available if
- * CONFIG_ARCH_POPULATES_NODE_MAP is set
+#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
+void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
+			  unsigned long *out_end_pfn, int *out_nid);
+
+/**
+ * for_each_mem_pfn_range - early memory pfn range iterator
+ * @i: an integer used as loop variable
+ * @nid: node selector, %MAX_NUMNODES for all nodes
+ * @p_start: ptr to ulong for start pfn of the range, can be %NULL
+ * @p_end: ptr to ulong for end pfn of the range, can be %NULL
+ * @p_nid: ptr to int for nid of the range, can be %NULL
+ *
+ * Walks over configured memory ranges.  Available after early_node_map is
+ * populated.
  */
-extern phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align,
-					int nid);
-extern phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align,
-					    int nid);
+#define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid)		\
+	for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
+	     i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
+#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
 
-extern phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align);
+void __next_free_mem_range(u64 *idx, int nid, phys_addr_t *out_start,
+			   phys_addr_t *out_end, int *out_nid);
+
+/**
+ * for_each_free_mem_range - iterate through free memblock areas
+ * @i: u64 used as loop variable
+ * @nid: node selector, %MAX_NUMNODES for all nodes
+ * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
+ * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
+ * @p_nid: ptr to int for nid of the range, can be %NULL
+ *
+ * Walks over free (memory && !reserved) areas of memblock.  Available as
+ * soon as memblock is initialized.
+ */
+#define for_each_free_mem_range(i, nid, p_start, p_end, p_nid)		\
+	for (i = 0,							\
+	     __next_free_mem_range(&i, nid, p_start, p_end, p_nid);	\
+	     i != (u64)ULLONG_MAX;					\
+	     __next_free_mem_range(&i, nid, p_start, p_end, p_nid))
+
+void __next_free_mem_range_rev(u64 *idx, int nid, phys_addr_t *out_start,
+			       phys_addr_t *out_end, int *out_nid);
+
+/**
+ * for_each_free_mem_range_reverse - rev-iterate through free memblock areas
+ * @i: u64 used as loop variable
+ * @nid: node selector, %MAX_NUMNODES for all nodes
+ * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
+ * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
+ * @p_nid: ptr to int for nid of the range, can be %NULL
+ *
+ * Walks over free (memory && !reserved) areas of memblock in reverse
+ * order.  Available as soon as memblock is initialized.
+ */
+#define for_each_free_mem_range_reverse(i, nid, p_start, p_end, p_nid)	\
+	for (i = (u64)ULLONG_MAX,					\
+	     __next_free_mem_range_rev(&i, nid, p_start, p_end, p_nid);	\
+	     i != (u64)ULLONG_MAX;					\
+	     __next_free_mem_range_rev(&i, nid, p_start, p_end, p_nid))
+
+#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
+int memblock_set_node(phys_addr_t base, phys_addr_t size, int nid);
+
+static inline void memblock_set_region_node(struct memblock_region *r, int nid)
+{
+	r->nid = nid;
+}
+
+static inline int memblock_get_region_node(const struct memblock_region *r)
+{
+	return r->nid;
+}
+#else
+static inline void memblock_set_region_node(struct memblock_region *r, int nid)
+{
+}
+
+static inline int memblock_get_region_node(const struct memblock_region *r)
+{
+	return 0;
+}
+#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
+
+phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid);
+phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
+
+phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align);
 
 /* Flags for memblock_alloc_base() amd __memblock_alloc_base() */
 #define MEMBLOCK_ALLOC_ANYWHERE	(~(phys_addr_t)0)
 #define MEMBLOCK_ALLOC_ACCESSIBLE	0
 
-extern phys_addr_t memblock_alloc_base(phys_addr_t size,
-					 phys_addr_t align,
-					 phys_addr_t max_addr);
-extern phys_addr_t __memblock_alloc_base(phys_addr_t size,
-					   phys_addr_t align,
-					   phys_addr_t max_addr);
-extern phys_addr_t memblock_phys_mem_size(void);
-extern phys_addr_t memblock_start_of_DRAM(void);
-extern phys_addr_t memblock_end_of_DRAM(void);
-extern void memblock_enforce_memory_limit(phys_addr_t memory_limit);
-extern int memblock_is_memory(phys_addr_t addr);
-extern int memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
-extern int memblock_is_reserved(phys_addr_t addr);
-extern int memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
+phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align,
+				phys_addr_t max_addr);
+phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align,
+				  phys_addr_t max_addr);
+phys_addr_t memblock_phys_mem_size(void);
+phys_addr_t memblock_start_of_DRAM(void);
+phys_addr_t memblock_end_of_DRAM(void);
+void memblock_enforce_memory_limit(phys_addr_t memory_limit);
+int memblock_is_memory(phys_addr_t addr);
+int memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
+int memblock_is_reserved(phys_addr_t addr);
+int memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
 
-extern void memblock_dump_all(void);
+extern void __memblock_dump_all(void);
 
-/* Provided by the architecture */
-extern phys_addr_t memblock_nid_range(phys_addr_t start, phys_addr_t end, int *nid);
-extern int memblock_memory_can_coalesce(phys_addr_t addr1, phys_addr_t size1,
-				   phys_addr_t addr2, phys_addr_t size2);
+static inline void memblock_dump_all(void)
+{
+	if (memblock_debug)
+		__memblock_dump_all();
+}
 
 /**
  * memblock_set_current_limit - Set the current allocation limit to allow
@@ -101,7 +179,7 @@
  *                         accessible during boot
  * @limit: New limit value (physical address)
  */
-extern void memblock_set_current_limit(phys_addr_t limit);
+void memblock_set_current_limit(phys_addr_t limit);
 
 
 /*
@@ -154,9 +232,9 @@
 	     region++)
 
 
-#ifdef ARCH_DISCARD_MEMBLOCK
-#define __init_memblock __init
-#define __initdata_memblock __initdata
+#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
+#define __init_memblock __meminit
+#define __initdata_memblock __meminitdata
 #else
 #define __init_memblock
 #define __initdata_memblock
@@ -165,7 +243,7 @@
 #else
 static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align)
 {
-	return MEMBLOCK_ERROR;
+	return 0;
 }
 
 #endif /* CONFIG_HAVE_MEMBLOCK */
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index b87068a..9b296ea 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -85,6 +85,9 @@
 extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
 extern struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm);
 
+extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
+extern struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont);
+
 static inline
 int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup)
 {
@@ -381,5 +384,25 @@
 }
 #endif
 
+enum {
+	UNDER_LIMIT,
+	SOFT_LIMIT,
+	OVER_LIMIT,
+};
+
+#ifdef CONFIG_INET
+struct sock;
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+void sock_update_memcg(struct sock *sk);
+void sock_release_memcg(struct sock *sk);
+#else
+static inline void sock_update_memcg(struct sock *sk)
+{
+}
+static inline void sock_release_memcg(struct sock *sk)
+{
+}
+#endif /* CONFIG_CGROUP_MEM_RES_CTLR_KMEM */
+#endif /* CONFIG_INET */
 #endif /* _LINUX_MEMCONTROL_H */
 
diff --git a/include/linux/memory.h b/include/linux/memory.h
index 935699b..1ac7f6e 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -15,7 +15,6 @@
 #ifndef _LINUX_MEMORY_H_
 #define _LINUX_MEMORY_H_
 
-#include <linux/sysdev.h>
 #include <linux/node.h>
 #include <linux/compiler.h>
 #include <linux/mutex.h>
@@ -38,7 +37,7 @@
 	int phys_device;		/* to which fru does this belong? */
 	void *hw;			/* optional pointer to fw/hw data */
 	int (*phys_callback)(struct memory_block *);
-	struct sys_device sysdev;
+	struct device dev;
 };
 
 int arch_get_memory_phys_device(unsigned long start_pfn);
diff --git a/include/linux/mfd/da9052/da9052.h b/include/linux/mfd/da9052/da9052.h
new file mode 100644
index 0000000..5702d1b
--- /dev/null
+++ b/include/linux/mfd/da9052/da9052.h
@@ -0,0 +1,131 @@
+/*
+ * da9052 declarations for DA9052 PMICs.
+ *
+ * Copyright(c) 2011 Dialog Semiconductor Ltd.
+ *
+ * Author: David Dajun Chen <dchen@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __MFD_DA9052_DA9052_H
+#define __MFD_DA9052_DA9052_H
+
+#include <linux/interrupt.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/completion.h>
+#include <linux/list.h>
+#include <linux/mfd/core.h>
+
+#include <linux/mfd/da9052/reg.h>
+
+#define DA9052_IRQ_DCIN	0
+#define DA9052_IRQ_VBUS	1
+#define DA9052_IRQ_DCINREM	2
+#define DA9052_IRQ_VBUSREM	3
+#define DA9052_IRQ_VDDLOW	4
+#define DA9052_IRQ_ALARM	5
+#define DA9052_IRQ_SEQRDY	6
+#define DA9052_IRQ_COMP1V2	7
+#define DA9052_IRQ_NONKEY	8
+#define DA9052_IRQ_IDFLOAT	9
+#define DA9052_IRQ_IDGND	10
+#define DA9052_IRQ_CHGEND	11
+#define DA9052_IRQ_TBAT	12
+#define DA9052_IRQ_ADC_EOM	13
+#define DA9052_IRQ_PENDOWN	14
+#define DA9052_IRQ_TSIREADY	15
+#define DA9052_IRQ_GPI0	16
+#define DA9052_IRQ_GPI1	17
+#define DA9052_IRQ_GPI2	18
+#define DA9052_IRQ_GPI3	19
+#define DA9052_IRQ_GPI4	20
+#define DA9052_IRQ_GPI5	21
+#define DA9052_IRQ_GPI6	22
+#define DA9052_IRQ_GPI7	23
+#define DA9052_IRQ_GPI8	24
+#define DA9052_IRQ_GPI9	25
+#define DA9052_IRQ_GPI10	26
+#define DA9052_IRQ_GPI11	27
+#define DA9052_IRQ_GPI12	28
+#define DA9052_IRQ_GPI13	29
+#define DA9052_IRQ_GPI14	30
+#define DA9052_IRQ_GPI15	31
+
+enum da9052_chip_id {
+	DA9052,
+	DA9053_AA,
+	DA9053_BA,
+	DA9053_BB,
+};
+
+struct da9052_pdata;
+
+struct da9052 {
+	struct mutex io_lock;
+
+	struct device *dev;
+	struct regmap *regmap;
+
+	int irq_base;
+	u8 chip_id;
+
+	int chip_irq;
+};
+
+/* Device I/O API */
+static inline int da9052_reg_read(struct da9052 *da9052, unsigned char reg)
+{
+	int val, ret;
+
+	ret = regmap_read(da9052->regmap, reg, &val);
+	if (ret < 0)
+		return ret;
+	return val;
+}
+
+static inline int da9052_reg_write(struct da9052 *da9052, unsigned char reg,
+				    unsigned char val)
+{
+	return regmap_write(da9052->regmap, reg, val);
+}
+
+static inline int da9052_group_read(struct da9052 *da9052, unsigned char reg,
+				     unsigned reg_cnt, unsigned char *val)
+{
+	return regmap_bulk_read(da9052->regmap, reg, val, reg_cnt);
+}
+
+static inline int da9052_group_write(struct da9052 *da9052, unsigned char reg,
+				      unsigned reg_cnt, unsigned char *val)
+{
+	return regmap_raw_write(da9052->regmap, reg, val, reg_cnt);
+}
+
+static inline int da9052_reg_update(struct da9052 *da9052, unsigned char reg,
+				     unsigned char bit_mask,
+				     unsigned char reg_val)
+{
+	return regmap_update_bits(da9052->regmap, reg, bit_mask, reg_val);
+}
+
+int da9052_device_init(struct da9052 *da9052, u8 chip_id);
+void da9052_device_exit(struct da9052 *da9052);
+
+extern struct regmap_config da9052_regmap_config;
+
+#endif /* __MFD_DA9052_DA9052_H */
diff --git a/include/linux/mfd/da9052/pdata.h b/include/linux/mfd/da9052/pdata.h
new file mode 100644
index 0000000..62c5c3c
--- /dev/null
+++ b/include/linux/mfd/da9052/pdata.h
@@ -0,0 +1,40 @@
+/*
+ * Platform data declarations for DA9052 PMICs.
+ *
+ * Copyright(c) 2011 Dialog Semiconductor Ltd.
+ *
+ * Author: David Dajun Chen <dchen@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __MFD_DA9052_PDATA_H__
+#define __MFD_DA9052_PDATA_H__
+
+#define DA9052_MAX_REGULATORS	14
+
+struct da9052;
+
+struct da9052_pdata {
+	struct led_platform_data *pled;
+	int (*init) (struct da9052 *da9052);
+	int irq_base;
+	int gpio_base;
+	int use_for_apm;
+	struct regulator_init_data *regulators[DA9052_MAX_REGULATORS];
+};
+
+#endif
diff --git a/include/linux/mfd/da9052/reg.h b/include/linux/mfd/da9052/reg.h
new file mode 100644
index 0000000..b97f730
--- /dev/null
+++ b/include/linux/mfd/da9052/reg.h
@@ -0,0 +1,749 @@
+/*
+ * Register declarations for DA9052 PMICs.
+ *
+ * Copyright(c) 2011 Dialog Semiconductor Ltd.
+ *
+ * Author: David Dajun Chen <dchen@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __LINUX_MFD_DA9052_REG_H
+#define __LINUX_MFD_DA9052_REG_H
+
+/* PAGE REGISTERS */
+#define DA9052_PAGE0_CON_REG		0
+#define DA9052_PAGE1_CON_REG		128
+
+/* STATUS REGISTERS */
+#define DA9052_STATUS_A_REG		1
+#define DA9052_STATUS_B_REG		2
+#define DA9052_STATUS_C_REG		3
+#define DA9052_STATUS_D_REG		4
+
+/* EVENT REGISTERS */
+#define DA9052_EVENT_A_REG		5
+#define DA9052_EVENT_B_REG		6
+#define DA9052_EVENT_C_REG		7
+#define DA9052_EVENT_D_REG		8
+#define DA9052_FAULTLOG_REG		9
+
+/* IRQ REGISTERS */
+#define DA9052_IRQ_MASK_A_REG		10
+#define DA9052_IRQ_MASK_B_REG		11
+#define DA9052_IRQ_MASK_C_REG		12
+#define DA9052_IRQ_MASK_D_REG		13
+
+/* CONTROL REGISTERS */
+#define DA9052_CONTROL_A_REG		14
+#define DA9052_CONTROL_B_REG		15
+#define DA9052_CONTROL_C_REG		16
+#define DA9052_CONTROL_D_REG		17
+
+#define DA9052_PDDIS_REG		18
+#define DA9052_INTERFACE_REG		19
+#define DA9052_RESET_REG		20
+
+/* GPIO REGISTERS */
+#define DA9052_GPIO_0_1_REG		21
+#define DA9052_GPIO_2_3_REG		22
+#define DA9052_GPIO_4_5_REG		23
+#define DA9052_GPIO_6_7_REG		24
+#define DA9052_GPIO_14_15_REG		28
+
+/* POWER SEQUENCER CONTROL REGISTERS */
+#define DA9052_ID_0_1_REG		29
+#define DA9052_ID_2_3_REG		30
+#define DA9052_ID_4_5_REG		31
+#define DA9052_ID_6_7_REG		32
+#define DA9052_ID_8_9_REG		33
+#define DA9052_ID_10_11_REG		34
+#define DA9052_ID_12_13_REG		35
+#define DA9052_ID_14_15_REG		36
+#define DA9052_ID_16_17_REG		37
+#define DA9052_ID_18_19_REG		38
+#define DA9052_ID_20_21_REG		39
+#define DA9052_SEQ_STATUS_REG		40
+#define DA9052_SEQ_A_REG		41
+#define DA9052_SEQ_B_REG		42
+#define DA9052_SEQ_TIMER_REG		43
+
+/* LDO AND BUCK REGISTERS */
+#define DA9052_BUCKA_REG		44
+#define DA9052_BUCKB_REG		45
+#define DA9052_BUCKCORE_REG		46
+#define DA9052_BUCKPRO_REG		47
+#define DA9052_BUCKMEM_REG		48
+#define DA9052_BUCKPERI_REG		49
+#define DA9052_LDO1_REG		50
+#define DA9052_LDO2_REG		51
+#define DA9052_LDO3_REG		52
+#define DA9052_LDO4_REG		53
+#define DA9052_LDO5_REG		54
+#define DA9052_LDO6_REG		55
+#define DA9052_LDO7_REG		56
+#define DA9052_LDO8_REG		57
+#define DA9052_LDO9_REG		58
+#define DA9052_LDO10_REG		59
+#define DA9052_SUPPLY_REG		60
+#define DA9052_PULLDOWN_REG		61
+#define DA9052_CHGBUCK_REG		62
+#define DA9052_WAITCONT_REG		63
+#define DA9052_ISET_REG		64
+#define DA9052_BATCHG_REG		65
+
+/* BATTERY CONTROL REGISTRS */
+#define DA9052_CHG_CONT_REG		66
+#define DA9052_INPUT_CONT_REG		67
+#define DA9052_CHG_TIME_REG		68
+#define DA9052_BBAT_CONT_REG		69
+
+/* LED CONTROL REGISTERS */
+#define DA9052_BOOST_REG		70
+#define DA9052_LED_CONT_REG		71
+#define DA9052_LEDMIN123_REG		72
+#define DA9052_LED1_CONF_REG		73
+#define DA9052_LED2_CONF_REG		74
+#define DA9052_LED3_CONF_REG		75
+#define DA9052_LED1CONT_REG		76
+#define DA9052_LED2CONT_REG		77
+#define DA9052_LED3CONT_REG		78
+#define DA9052_LED_CONT_4_REG		79
+#define DA9052_LED_CONT_5_REG		80
+
+/* ADC CONTROL REGISTERS */
+#define DA9052_ADC_MAN_REG		81
+#define DA9052_ADC_CONT_REG		82
+#define DA9052_ADC_RES_L_REG		83
+#define DA9052_ADC_RES_H_REG		84
+#define DA9052_VDD_RES_REG		85
+#define DA9052_VDD_MON_REG		86
+
+#define DA9052_ICHG_AV_REG		87
+#define DA9052_ICHG_THD_REG		88
+#define DA9052_ICHG_END_REG		89
+#define DA9052_TBAT_RES_REG		90
+#define DA9052_TBAT_HIGHP_REG		91
+#define DA9052_TBAT_HIGHN_REG		92
+#define DA9052_TBAT_LOW_REG		93
+#define DA9052_T_OFFSET_REG		94
+
+#define DA9052_ADCIN4_RES_REG		95
+#define DA9052_AUTO4_HIGH_REG		96
+#define DA9052_AUTO4_LOW_REG		97
+#define DA9052_ADCIN5_RES_REG		98
+#define DA9052_AUTO5_HIGH_REG		99
+#define DA9052_AUTO5_LOW_REG		100
+#define DA9052_ADCIN6_RES_REG		101
+#define DA9052_AUTO6_HIGH_REG		102
+#define DA9052_AUTO6_LOW_REG		103
+
+#define DA9052_TJUNC_RES_REG		104
+
+/* TSI CONTROL REGISTERS */
+#define DA9052_TSI_CONT_A_REG		105
+#define DA9052_TSI_CONT_B_REG		106
+#define DA9052_TSI_X_MSB_REG		107
+#define DA9052_TSI_Y_MSB_REG		108
+#define DA9052_TSI_LSB_REG		109
+#define DA9052_TSI_Z_MSB_REG		110
+
+/* RTC COUNT REGISTERS */
+#define DA9052_COUNT_S_REG		111
+#define DA9052_COUNT_MI_REG		112
+#define DA9052_COUNT_H_REG		113
+#define DA9052_COUNT_D_REG		114
+#define DA9052_COUNT_MO_REG		115
+#define DA9052_COUNT_Y_REG		116
+
+/* RTC CONTROL REGISTERS */
+#define DA9052_ALARM_MI_REG		117
+#define DA9052_ALARM_H_REG		118
+#define DA9052_ALARM_D_REG		119
+#define DA9052_ALARM_MO_REG		120
+#define DA9052_ALARM_Y_REG		121
+#define DA9052_SECOND_A_REG		122
+#define DA9052_SECOND_B_REG		123
+#define DA9052_SECOND_C_REG		124
+#define DA9052_SECOND_D_REG		125
+
+/* PAGE CONFIGURATION BIT */
+#define DA9052_PAGE_CONF		0X80
+
+/* STATUS REGISTER A BITS */
+#define DA9052_STATUSA_VDATDET		0X80
+#define DA9052_STATUSA_VBUSSEL		0X40
+#define DA9052_STATUSA_DCINSEL		0X20
+#define DA9052_STATUSA_VBUSDET		0X10
+#define DA9052_STATUSA_DCINDET		0X08
+#define DA9052_STATUSA_IDGND		0X04
+#define DA9052_STATUSA_IDFLOAT		0X02
+#define DA9052_STATUSA_NONKEY		0X01
+
+/* STATUS REGISTER B BITS */
+#define DA9052_STATUSB_COMPDET		0X80
+#define DA9052_STATUSB_SEQUENCING	0X40
+#define DA9052_STATUSB_GPFB2		0X20
+#define DA9052_STATUSB_CHGTO		0X10
+#define DA9052_STATUSB_CHGEND		0X08
+#define DA9052_STATUSB_CHGLIM		0X04
+#define DA9052_STATUSB_CHGPRE		0X02
+#define DA9052_STATUSB_CHGATT		0X01
+
+/* STATUS REGISTER C BITS */
+#define DA9052_STATUSC_GPI7		0X80
+#define DA9052_STATUSC_GPI6		0X40
+#define DA9052_STATUSC_GPI5		0X20
+#define DA9052_STATUSC_GPI4		0X10
+#define DA9052_STATUSC_GPI3		0X08
+#define DA9052_STATUSC_GPI2		0X04
+#define DA9052_STATUSC_GPI1		0X02
+#define DA9052_STATUSC_GPI0		0X01
+
+/* STATUS REGISTER D BITS */
+#define DA9052_STATUSD_GPI15		0X80
+#define DA9052_STATUSD_GPI14		0X40
+#define DA9052_STATUSD_GPI13		0X20
+#define DA9052_STATUSD_GPI12		0X10
+#define DA9052_STATUSD_GPI11		0X08
+#define DA9052_STATUSD_GPI10		0X04
+#define DA9052_STATUSD_GPI9		0X02
+#define DA9052_STATUSD_GPI8		0X01
+
+/* EVENT REGISTER A BITS */
+#define DA9052_EVENTA_ECOMP1V2		0X80
+#define DA9052_EVENTA_ESEQRDY		0X40
+#define DA9052_EVENTA_EALRAM		0X20
+#define DA9052_EVENTA_EVDDLOW		0X10
+#define DA9052_EVENTA_EVBUSREM		0X08
+#define DA9052_EVENTA_EDCINREM		0X04
+#define DA9052_EVENTA_EVBUSDET		0X02
+#define DA9052_EVENTA_EDCINDET		0X01
+
+/* EVENT REGISTER B BITS */
+#define DA9052_EVENTB_ETSIREADY	0X80
+#define DA9052_EVENTB_EPENDOWN		0X40
+#define DA9052_EVENTB_EADCEOM		0X20
+#define DA9052_EVENTB_ETBAT		0X10
+#define DA9052_EVENTB_ECHGEND		0X08
+#define DA9052_EVENTB_EIDGND		0X04
+#define DA9052_EVENTB_EIDFLOAT		0X02
+#define DA9052_EVENTB_ENONKEY		0X01
+
+/* EVENT REGISTER C BITS */
+#define DA9052_EVENTC_EGPI7		0X80
+#define DA9052_EVENTC_EGPI6		0X40
+#define DA9052_EVENTC_EGPI5		0X20
+#define DA9052_EVENTC_EGPI4		0X10
+#define DA9052_EVENTC_EGPI3		0X08
+#define DA9052_EVENTC_EGPI2		0X04
+#define DA9052_EVENTC_EGPI1		0X02
+#define DA9052_EVENTC_EGPI0		0X01
+
+/* EVENT REGISTER D BITS */
+#define DA9052_EVENTD_EGPI15		0X80
+#define DA9052_EVENTD_EGPI14		0X40
+#define DA9052_EVENTD_EGPI13		0X20
+#define DA9052_EVENTD_EGPI12		0X10
+#define DA9052_EVENTD_EGPI11		0X08
+#define DA9052_EVENTD_EGPI10		0X04
+#define DA9052_EVENTD_EGPI9		0X02
+#define DA9052_EVENTD_EGPI8		0X01
+
+/* IRQ MASK REGISTERS BITS */
+#define DA9052_M_NONKEY		0X0100
+
+/* TSI EVENT REGISTERS BITS */
+#define DA9052_E_PEN_DOWN		0X4000
+#define DA9052_E_TSI_READY		0X8000
+
+/* FAULT LOG REGISTER BITS */
+#define DA9052_FAULTLOG_WAITSET	0X80
+#define DA9052_FAULTLOG_NSDSET		0X40
+#define DA9052_FAULTLOG_KEYSHUT	0X20
+#define DA9052_FAULTLOG_TEMPOVER	0X08
+#define DA9052_FAULTLOG_VDDSTART	0X04
+#define DA9052_FAULTLOG_VDDFAULT	0X02
+#define DA9052_FAULTLOG_TWDERROR	0X01
+
+/* CONTROL REGISTER A BITS */
+#define DA9052_CONTROLA_GPIV		0X80
+#define DA9052_CONTROLA_PMOTYPE	0X20
+#define DA9052_CONTROLA_PMOV		0X10
+#define DA9052_CONTROLA_PMIV		0X08
+#define DA9052_CONTROLA_PMIFV		0X08
+#define DA9052_CONTROLA_PWR1EN		0X04
+#define DA9052_CONTROLA_PWREN		0X02
+#define DA9052_CONTROLA_SYSEN		0X01
+
+/* CONTROL REGISTER B BITS */
+#define DA9052_CONTROLB_SHUTDOWN	0X80
+#define DA9052_CONTROLB_DEEPSLEEP	0X40
+#define DA9052_CONTROL_B_WRITEMODE	0X20
+#define DA9052_CONTROLB_BBATEN		0X10
+#define DA9052_CONTROLB_OTPREADEN	0X08
+#define DA9052_CONTROLB_AUTOBOOT	0X04
+#define DA9052_CONTROLB_ACTDIODE	0X02
+#define DA9052_CONTROLB_BUCKMERGE	0X01
+
+/* CONTROL REGISTER C BITS */
+#define DA9052_CONTROLC_BLINKDUR	0X80
+#define DA9052_CONTROLC_BLINKFRQ	0X60
+#define DA9052_CONTROLC_DEBOUNCING	0X1C
+#define DA9052_CONTROLC_PMFB2PIN	0X02
+#define DA9052_CONTROLC_PMFB1PIN	0X01
+
+/* CONTROL REGISTER D BITS */
+#define DA9052_CONTROLD_WATCHDOG	0X80
+#define DA9052_CONTROLD_ACCDETEN	0X40
+#define DA9052_CONTROLD_GPI1415SD	0X20
+#define DA9052_CONTROLD_NONKEYSD	0X10
+#define DA9052_CONTROLD_KEEPACTEN	0X08
+#define DA9052_CONTROLD_TWDSCALE	0X07
+
+/* POWER DOWN DISABLE REGISTER BITS */
+#define DA9052_PDDIS_PMCONTPD		0X80
+#define DA9052_PDDIS_OUT32KPD		0X40
+#define DA9052_PDDIS_CHGBBATPD		0X20
+#define DA9052_PDDIS_CHGPD		0X10
+#define DA9052_PDDIS_HS2WIREPD		0X08
+#define DA9052_PDDIS_PMIFPD		0X04
+#define DA9052_PDDIS_GPADCPD		0X02
+#define DA9052_PDDIS_GPIOPD		0X01
+
+/* CONTROL REGISTER D BITS */
+#define DA9052_INTERFACE_IFBASEADDR	0XE0
+#define DA9052_INTERFACE_NCSPOL	0X10
+#define DA9052_INTERFACE_RWPOL		0X08
+#define DA9052_INTERFACE_CPHA		0X04
+#define DA9052_INTERFACE_CPOL		0X02
+#define DA9052_INTERFACE_IFTYPE	0X01
+
+/* CONTROL REGISTER D BITS */
+#define DA9052_RESET_RESETEVENT	0XC0
+#define DA9052_RESET_RESETTIMER	0X3F
+
+/* GPIO REGISTERS */
+/* GPIO CONTROL REGISTER BITS */
+#define DA9052_GPIO_EVEN_PORT_PIN	0X03
+#define DA9052_GPIO_EVEN_PORT_TYPE	0X04
+#define DA9052_GPIO_EVEN_PORT_MODE	0X08
+
+#define DA9052_GPIO_ODD_PORT_PIN	0X30
+#define DA9052_GPIO_ODD_PORT_TYPE	0X40
+#define DA9052_GPIO_ODD_PORT_MODE	0X80
+
+/*POWER SEQUENCER REGISTER BITS */
+/* SEQ CONTROL REGISTER BITS FOR ID 0 AND 1 */
+#define DA9052_ID01_LDO1STEP		0XF0
+#define DA9052_ID01_SYSPRE		0X04
+#define DA9052_ID01_DEFSUPPLY		0X02
+#define DA9052_ID01_NRESMODE		0X01
+
+/* SEQ CONTROL REGISTER BITS FOR ID 2 AND 3 */
+#define DA9052_ID23_LDO3STEP		0XF0
+#define DA9052_ID23_LDO2STEP		0X0F
+
+/* SEQ CONTROL REGISTER BITS FOR ID 4 AND 5 */
+#define DA9052_ID45_LDO5STEP		0XF0
+#define DA9052_ID45_LDO4STEP		0X0F
+
+/* SEQ CONTROL REGISTER BITS FOR ID 6 AND 7 */
+#define DA9052_ID67_LDO7STEP		0XF0
+#define DA9052_ID67_LDO6STEP		0X0F
+
+/* SEQ CONTROL REGISTER BITS FOR ID 8 AND 9 */
+#define DA9052_ID89_LDO9STEP		0XF0
+#define DA9052_ID89_LDO8STEP		0X0F
+
+/* SEQ CONTROL REGISTER BITS FOR ID 10 AND 11 */
+#define DA9052_ID1011_PDDISSTEP	0XF0
+#define DA9052_ID1011_LDO10STEP	0X0F
+
+/* SEQ CONTROL REGISTER BITS FOR ID 12 AND 13 */
+#define DA9052_ID1213_VMEMSWSTEP	0XF0
+#define DA9052_ID1213_VPERISWSTEP	0X0F
+
+/* SEQ CONTROL REGISTER BITS FOR ID 14 AND 15 */
+#define DA9052_ID1415_BUCKPROSTEP	0XF0
+#define DA9052_ID1415_BUCKCORESTEP	0X0F
+
+/* SEQ CONTROL REGISTER BITS FOR ID 16 AND 17 */
+#define DA9052_ID1617_BUCKPERISTEP	0XF0
+#define DA9052_ID1617_BUCKMEMSTEP	0X0F
+
+/* SEQ CONTROL REGISTER BITS FOR ID 18 AND 19 */
+#define DA9052_ID1819_GPRISE2STEP	0XF0
+#define DA9052_ID1819_GPRISE1STEP	0X0F
+
+/* SEQ CONTROL REGISTER BITS FOR ID 20 AND 21 */
+#define DA9052_ID2021_GPFALL2STEP	0XF0
+#define DA9052_ID2021_GPFALL1STEP	0X0F
+
+/* POWER SEQ STATUS REGISTER BITS */
+#define DA9052_SEQSTATUS_SEQPOINTER	0XF0
+#define DA9052_SEQSTATUS_WAITSTEP	0X0F
+
+/* POWER SEQ A REGISTER BITS */
+#define DA9052_SEQA_POWEREND		0XF0
+#define DA9052_SEQA_SYSTEMEND		0X0F
+
+/* POWER SEQ B REGISTER BITS */
+#define DA9052_SEQB_PARTDOWN		0XF0
+#define DA9052_SEQB_MAXCOUNT		0X0F
+
+/* POWER SEQ TIMER REGISTER BITS */
+#define DA9052_SEQTIMER_SEQDUMMY	0XF0
+#define DA9052_SEQTIMER_SEQTIME	0X0F
+
+/*POWER SUPPLY CONTROL REGISTER BITS */
+/* BUCK REGISTER A BITS */
+#define DA9052_BUCKA_BPROILIM		0XC0
+#define DA9052_BUCKA_BPROMODE		0X30
+#define DA9052_BUCKA_BCOREILIM		0X0C
+#define DA9052_BUCKA_BCOREMODE		0X03
+
+/* BUCK REGISTER B BITS */
+#define DA9052_BUCKB_BERIILIM		0XC0
+#define DA9052_BUCKB_BPERIMODE		0X30
+#define DA9052_BUCKB_BMEMILIM		0X0C
+#define DA9052_BUCKB_BMEMMODE		0X03
+
+/* BUCKCORE REGISTER BITS */
+#define DA9052_BUCKCORE_BCORECONF	0X80
+#define DA9052_BUCKCORE_BCOREEN	0X40
+#define DA9052_BUCKCORE_VBCORE		0X3F
+
+/* BUCKPRO REGISTER BITS */
+#define DA9052_BUCKPRO_BPROCONF	0X80
+#define DA9052_BUCKPRO_BPROEN		0X40
+#define DA9052_BUCKPRO_VBPRO		0X3F
+
+/* BUCKMEM REGISTER BITS */
+#define DA9052_BUCKMEM_BMEMCONF	0X80
+#define DA9052_BUCKMEM_BMEMEN		0X40
+#define DA9052_BUCKMEM_VBMEM		0X3F
+
+/* BUCKPERI REGISTER BITS */
+#define DA9052_BUCKPERI_BPERICONF	0X80
+#define DA9052_BUCKPERI_BPERIEN	0X40
+#define DA9052_BUCKPERI_BPERIHS	0X20
+#define DA9052_BUCKPERI_VBPERI		0X1F
+
+/* LDO1 REGISTER BITS */
+#define DA9052_LDO1_LDO1CONF		0X80
+#define DA9052_LDO1_LDO1EN		0X40
+#define DA9052_LDO1_VLDO1		0X1F
+
+/* LDO2 REGISTER BITS */
+#define DA9052_LDO2_LDO2CONF		0X80
+#define DA9052_LDO2_LDO2EN		0X40
+#define DA9052_LDO2_VLDO2		0X3F
+
+/* LDO3 REGISTER BITS */
+#define DA9052_LDO3_LDO3CONF		0X80
+#define DA9052_LDO3_LDO3EN		0X40
+#define DA9052_LDO3_VLDO3		0X3F
+
+/* LDO4 REGISTER BITS */
+#define DA9052_LDO4_LDO4CONF		0X80
+#define DA9052_LDO4_LDO4EN		0X40
+#define DA9052_LDO4_VLDO4		0X3F
+
+/* LDO5 REGISTER BITS */
+#define DA9052_LDO5_LDO5CONF		0X80
+#define DA9052_LDO5_LDO5EN		0X40
+#define DA9052_LDO5_VLDO5		0X3F
+
+/* LDO6 REGISTER BITS */
+#define DA9052_LDO6_LDO6CONF		0X80
+#define DA9052_LDO6_LDO6EN		0X40
+#define DA9052_LDO6_VLDO6		0X3F
+
+/* LDO7 REGISTER BITS */
+#define DA9052_LDO7_LDO7CONF		0X80
+#define DA9052_LDO7_LDO7EN		0X40
+#define DA9052_LDO7_VLDO7		0X3F
+
+/* LDO8 REGISTER BITS */
+#define DA9052_LDO8_LDO8CONF		0X80
+#define DA9052_LDO8_LDO8EN		0X40
+#define DA9052_LDO8_VLDO8		0X3F
+
+/* LDO9 REGISTER BITS */
+#define DA9052_LDO9_LDO9CONF		0X80
+#define DA9052_LDO9_LDO9EN		0X40
+#define DA9052_LDO9_VLDO9		0X3F
+
+/* LDO10 REGISTER BITS */
+#define DA9052_LDO10_LDO10CONF		0X80
+#define DA9052_LDO10_LDO10EN		0X40
+#define DA9052_LDO10_VLDO10		0X3F
+
+/* SUPPLY REGISTER BITS */
+#define DA9052_SUPPLY_VLOCK		0X80
+#define DA9052_SUPPLY_VMEMSWEN		0X40
+#define DA9052_SUPPLY_VPERISWEN	0X20
+#define DA9052_SUPPLY_VLDO3GO		0X10
+#define DA9052_SUPPLY_VLDO2GO		0X08
+#define DA9052_SUPPLY_VBMEMGO		0X04
+#define DA9052_SUPPLY_VBPROGO		0X02
+#define DA9052_SUPPLY_VBCOREGO		0X01
+
+/* PULLDOWN REGISTER BITS */
+#define DA9052_PULLDOWN_LDO5PDDIS	0X20
+#define DA9052_PULLDOWN_LDO2PDDIS	0X10
+#define DA9052_PULLDOWN_LDO1PDDIS	0X08
+#define DA9052_PULLDOWN_MEMPDDIS	0X04
+#define DA9052_PULLDOWN_PROPDDIS	0X02
+#define DA9052_PULLDOWN_COREPDDIS	0X01
+
+/* BAT CHARGER REGISTER BITS */
+/* CHARGER BUCK REGISTER BITS */
+#define DA9052_CHGBUCK_CHGTEMP		0X80
+#define DA9052_CHGBUCK_CHGUSBILIM	0X40
+#define DA9052_CHGBUCK_CHGBUCKLP	0X20
+#define DA9052_CHGBUCK_CHGBUCKEN	0X10
+#define DA9052_CHGBUCK_ISETBUCK	0X0F
+
+/* WAIT COUNTER REGISTER BITS */
+#define DA9052_WAITCONT_WAITDIR	0X80
+#define DA9052_WAITCONT_RTCCLOCK	0X40
+#define DA9052_WAITCONT_WAITMODE	0X20
+#define DA9052_WAITCONT_EN32KOUT	0X10
+#define DA9052_WAITCONT_DELAYTIME	0X0F
+
+/* ISET CONTROL REGISTER BITS */
+#define DA9052_ISET_ISETDCIN		0XF0
+#define DA9052_ISET_ISETVBUS		0X0F
+
+/* BATTERY CHARGER CONTROL REGISTER BITS */
+#define DA9052_BATCHG_ICHGPRE		0XC0
+#define DA9052_BATCHG_ICHGBAT		0X3F
+
+/* CHARGER COUNTER REGISTER BITS */
+#define DA9052_CHG_CONT_VCHG_BAT	0XF8
+#define DA9052_CHG_CONT_TCTR		0X07
+
+/* INPUT CONTROL REGISTER BITS */
+#define DA9052_INPUT_CONT_TCTR_MODE	0X80
+#define DA9052_INPUT_CONT_VBUS_SUSP	0X10
+#define DA9052_INPUT_CONT_DCIN_SUSP	0X08
+
+/* CHARGING TIME REGISTER BITS */
+#define DA9052_CHGTIME_CHGTIME		0XFF
+
+/* BACKUP BATTERY CONTROL REGISTER BITS */
+#define DA9052_BBATCONT_BCHARGERISET	0XF0
+#define DA9052_BBATCONT_BCHARGERVSET	0X0F
+
+/* LED REGISTERS BITS */
+/* LED BOOST REGISTER BITS */
+#define DA9052_BOOST_EBFAULT		0X80
+#define DA9052_BOOST_MBFAULT		0X40
+#define DA9052_BOOST_BOOSTFRQ		0X20
+#define DA9052_BOOST_BOOSTILIM		0X10
+#define DA9052_BOOST_LED3INEN		0X08
+#define DA9052_BOOST_LED2INEN		0X04
+#define DA9052_BOOST_LED1INEN		0X02
+#define DA9052_BOOST_BOOSTEN		0X01
+
+/* LED CONTROL REGISTER BITS */
+#define DA9052_LEDCONT_SELLEDMODE	0X80
+#define DA9052_LEDCONT_LED3ICONT	0X40
+#define DA9052_LEDCONT_LED3RAMP	0X20
+#define DA9052_LEDCONT_LED3EN		0X10
+#define DA9052_LEDCONT_LED2RAMP	0X08
+#define DA9052_LEDCONT_LED2EN		0X04
+#define DA9052_LEDCONT_LED1RAMP	0X02
+#define DA9052_LEDCONT_LED1EN		0X01
+
+/* LEDMIN123 REGISTER BIT */
+#define DA9052_LEDMIN123_LEDMINCURRENT	0XFF
+
+/* LED1CONF REGISTER BIT */
+#define DA9052_LED1CONF_LED1CURRENT	0XFF
+
+/* LED2CONF REGISTER BIT */
+#define DA9052_LED2CONF_LED2CURRENT	0XFF
+
+/* LED3CONF REGISTER BIT */
+#define DA9052_LED3CONF_LED3CURRENT	0XFF
+
+/* LED COUNT REGISTER BIT */
+#define DA9052_LED_CONT_DIM		0X80
+
+/* ADC MAN REGISTERS BITS */
+#define DA9052_ADC_MAN_MAN_CONV	0X10
+#define DA9052_ADC_MAN_MUXSEL_VDDOUT	0X00
+#define DA9052_ADC_MAN_MUXSEL_ICH	0X01
+#define DA9052_ADC_MAN_MUXSEL_TBAT	0X02
+#define DA9052_ADC_MAN_MUXSEL_VBAT	0X03
+#define DA9052_ADC_MAN_MUXSEL_AD4	0X04
+#define DA9052_ADC_MAN_MUXSEL_AD5	0X05
+#define DA9052_ADC_MAN_MUXSEL_AD6	0X06
+#define DA9052_ADC_MAN_MUXSEL_VBBAT	0X09
+
+/* ADC CONTROL REGSISTERS BITS */
+#define DA9052_ADCCONT_COMP1V2EN	0X80
+#define DA9052_ADCCONT_ADCMODE		0X40
+#define DA9052_ADCCONT_TBATISRCEN	0X20
+#define DA9052_ADCCONT_AD4ISRCEN	0X10
+#define DA9052_ADCCONT_AUTOAD6EN	0X08
+#define DA9052_ADCCONT_AUTOAD5EN	0X04
+#define DA9052_ADCCONT_AUTOAD4EN	0X02
+#define DA9052_ADCCONT_AUTOVDDEN	0X01
+
+/* ADC 10 BIT MANUAL CONVERSION RESULT LOW REGISTER */
+#define DA9052_ADC_RES_LSB		0X03
+
+/* ADC 10 BIT MANUAL CONVERSION RESULT HIGH REGISTER */
+#define DA9052_ADCRESH_ADCRESMSB	0XFF
+
+/* VDD RES REGSISTER BIT*/
+#define DA9052_VDDRES_VDDOUTRES	0XFF
+
+/* VDD MON REGSISTER BIT */
+#define DA9052_VDDMON_VDDOUTMON	0XFF
+
+/* ICHG_AV REGSISTER BIT */
+#define DA9052_ICHGAV_ICHGAV		0XFF
+
+/* ICHG_THD REGSISTER BIT */
+#define DA9052_ICHGTHD_ICHGTHD		0XFF
+
+/* ICHG_END REGSISTER BIT */
+#define DA9052_ICHGEND_ICHGEND		0XFF
+
+/* TBAT_RES REGSISTER BIT */
+#define DA9052_TBATRES_TBATRES		0XFF
+
+/* TBAT_HIGHP REGSISTER BIT */
+#define DA9052_TBATHIGHP_TBATHIGHP	0XFF
+
+/* TBAT_HIGHN REGSISTER BIT */
+#define DA9052_TBATHIGHN_TBATHIGHN	0XFF
+
+/* TBAT_LOW REGSISTER BIT */
+#define DA9052_TBATLOW_TBATLOW		0XFF
+
+/* T_OFFSET REGSISTER BIT */
+#define DA9052_TOFFSET_TOFFSET		0XFF
+
+/* ADCIN4_RES REGSISTER BIT */
+#define DA9052_ADCIN4RES_ADCIN4RES	0XFF
+
+/* ADCIN4_HIGH REGSISTER BIT */
+#define DA9052_AUTO4HIGH_AUTO4HIGH	0XFF
+
+/* ADCIN4_LOW REGSISTER BIT */
+#define DA9052_AUTO4LOW_AUTO4LOW	0XFF
+
+/* ADCIN5_RES REGSISTER BIT */
+#define DA9052_ADCIN5RES_ADCIN5RES	0XFF
+
+/* ADCIN5_HIGH REGSISTER BIT */
+#define DA9052_AUTO5HIGH_AUTOHIGH	0XFF
+
+/* ADCIN5_LOW REGSISTER BIT */
+#define DA9052_AUTO5LOW_AUTO5LOW	0XFF
+
+/* ADCIN6_RES REGSISTER BIT */
+#define DA9052_ADCIN6RES_ADCIN6RES	0XFF
+
+/* ADCIN6_HIGH REGSISTER BIT */
+#define DA9052_AUTO6HIGH_AUTO6HIGH	0XFF
+
+/* ADCIN6_LOW REGSISTER BIT */
+#define DA9052_AUTO6LOW_AUTO6LOW	0XFF
+
+/* TJUNC_RES REGSISTER BIT*/
+#define DA9052_TJUNCRES_TJUNCRES	0XFF
+
+/* TSI REGISTER */
+/* TSI CONTROL REGISTER A BITS */
+#define DA9052_TSICONTA_TSIDELAY	0XC0
+#define DA9052_TSICONTA_TSISKIP	0X38
+#define DA9052_TSICONTA_TSIMODE	0X04
+#define DA9052_TSICONTA_PENDETEN	0X02
+#define DA9052_TSICONTA_AUTOTSIEN	0X01
+
+/* TSI CONTROL REGISTER B BITS */
+#define DA9052_TSICONTB_ADCREF		0X80
+#define DA9052_TSICONTB_TSIMAN		0X40
+#define DA9052_TSICONTB_TSIMUX		0X30
+#define DA9052_TSICONTB_TSISEL3	0X08
+#define DA9052_TSICONTB_TSISEL2	0X04
+#define DA9052_TSICONTB_TSISEL1	0X02
+#define DA9052_TSICONTB_TSISEL0	0X01
+
+/* TSI X CO-ORDINATE MSB RESULT REGISTER BITS */
+#define DA9052_TSIXMSB_TSIXM		0XFF
+
+/* TSI Y CO-ORDINATE MSB RESULT REGISTER BITS */
+#define DA9052_TSIYMSB_TSIYM		0XFF
+
+/* TSI CO-ORDINATE LSB RESULT REGISTER BITS */
+#define DA9052_TSILSB_PENDOWN		0X40
+#define DA9052_TSILSB_TSIZL		0X30
+#define DA9052_TSILSB_TSIYL		0X0C
+#define DA9052_TSILSB_TSIXL		0X03
+
+/* TSI Z MEASUREMENT MSB RESULT REGISTER BIT */
+#define DA9052_TSIZMSB_TSIZM		0XFF
+
+/* RTC REGISTER */
+/* RTC TIMER SECONDS REGISTER BITS */
+#define DA9052_COUNTS_MONITOR		0X40
+#define DA9052_RTC_SEC			0X3F
+
+/* RTC TIMER MINUTES REGISTER BIT */
+#define DA9052_RTC_MIN			0X3F
+
+/* RTC TIMER HOUR REGISTER BIT */
+#define DA9052_RTC_HOUR		0X1F
+
+/* RTC TIMER DAYS REGISTER BIT */
+#define DA9052_RTC_DAY			0X1F
+
+/* RTC TIMER MONTHS REGISTER BIT */
+#define DA9052_RTC_MONTH		0X0F
+
+/* RTC TIMER YEARS REGISTER BIT */
+#define DA9052_RTC_YEAR		0X3F
+
+/* RTC ALARM MINUTES REGISTER BITS */
+#define DA9052_ALARMM_I_TICK_TYPE	0X80
+#define DA9052_ALARMMI_ALARMTYPE	0X40
+
+/* RTC ALARM YEARS REGISTER BITS */
+#define DA9052_ALARM_Y_TICK_ON		0X80
+#define DA9052_ALARM_Y_ALARM_ON	0X40
+
+/* RTC SECONDS REGISTER A BITS */
+#define DA9052_SECONDA_SECONDSA	0XFF
+
+/* RTC SECONDS REGISTER B BITS */
+#define DA9052_SECONDB_SECONDSB	0XFF
+
+/* RTC SECONDS REGISTER C BITS */
+#define DA9052_SECONDC_SECONDSC	0XFF
+
+/* RTC SECONDS REGISTER D BITS */
+#define DA9052_SECONDD_SECONDSD	0XFF
+
+#endif
+/* __LINUX_MFD_DA9052_REG_H */
diff --git a/include/linux/mii.h b/include/linux/mii.h
index 2774823..2783eca 100644
--- a/include/linux/mii.h
+++ b/include/linux/mii.h
@@ -9,6 +9,7 @@
 #define __LINUX_MII_H__
 
 #include <linux/types.h>
+#include <linux/ethtool.h>
 
 /* Generic MII registers. */
 #define MII_BMCR		0x00	/* Basic mode control register */
@@ -240,6 +241,205 @@
 }
 
 /**
+ * ethtool_adv_to_mii_adv_t
+ * @ethadv: the ethtool advertisement settings
+ *
+ * A small helper function that translates ethtool advertisement
+ * settings to phy autonegotiation advertisements for the
+ * MII_ADVERTISE register.
+ */
+static inline u32 ethtool_adv_to_mii_adv_t(u32 ethadv)
+{
+	u32 result = 0;
+
+	if (ethadv & ADVERTISED_10baseT_Half)
+		result |= ADVERTISE_10HALF;
+	if (ethadv & ADVERTISED_10baseT_Full)
+		result |= ADVERTISE_10FULL;
+	if (ethadv & ADVERTISED_100baseT_Half)
+		result |= ADVERTISE_100HALF;
+	if (ethadv & ADVERTISED_100baseT_Full)
+		result |= ADVERTISE_100FULL;
+	if (ethadv & ADVERTISED_Pause)
+		result |= ADVERTISE_PAUSE_CAP;
+	if (ethadv & ADVERTISED_Asym_Pause)
+		result |= ADVERTISE_PAUSE_ASYM;
+
+	return result;
+}
+
+/**
+ * mii_adv_to_ethtool_adv_t
+ * @adv: value of the MII_ADVERTISE register
+ *
+ * A small helper function that translates MII_ADVERTISE bits
+ * to ethtool advertisement settings.
+ */
+static inline u32 mii_adv_to_ethtool_adv_t(u32 adv)
+{
+	u32 result = 0;
+
+	if (adv & ADVERTISE_10HALF)
+		result |= ADVERTISED_10baseT_Half;
+	if (adv & ADVERTISE_10FULL)
+		result |= ADVERTISED_10baseT_Full;
+	if (adv & ADVERTISE_100HALF)
+		result |= ADVERTISED_100baseT_Half;
+	if (adv & ADVERTISE_100FULL)
+		result |= ADVERTISED_100baseT_Full;
+	if (adv & ADVERTISE_PAUSE_CAP)
+		result |= ADVERTISED_Pause;
+	if (adv & ADVERTISE_PAUSE_ASYM)
+		result |= ADVERTISED_Asym_Pause;
+
+	return result;
+}
+
+/**
+ * ethtool_adv_to_mii_ctrl1000_t
+ * @ethadv: the ethtool advertisement settings
+ *
+ * A small helper function that translates ethtool advertisement
+ * settings to phy autonegotiation advertisements for the
+ * MII_CTRL1000 register when in 1000T mode.
+ */
+static inline u32 ethtool_adv_to_mii_ctrl1000_t(u32 ethadv)
+{
+	u32 result = 0;
+
+	if (ethadv & ADVERTISED_1000baseT_Half)
+		result |= ADVERTISE_1000HALF;
+	if (ethadv & ADVERTISED_1000baseT_Full)
+		result |= ADVERTISE_1000FULL;
+
+	return result;
+}
+
+/**
+ * mii_ctrl1000_to_ethtool_adv_t
+ * @adv: value of the MII_CTRL1000 register
+ *
+ * A small helper function that translates MII_CTRL1000
+ * bits, when in 1000Base-T mode, to ethtool
+ * advertisement settings.
+ */
+static inline u32 mii_ctrl1000_to_ethtool_adv_t(u32 adv)
+{
+	u32 result = 0;
+
+	if (adv & ADVERTISE_1000HALF)
+		result |= ADVERTISED_1000baseT_Half;
+	if (adv & ADVERTISE_1000FULL)
+		result |= ADVERTISED_1000baseT_Full;
+
+	return result;
+}
+
+/**
+ * mii_lpa_to_ethtool_lpa_t
+ * @adv: value of the MII_LPA register
+ *
+ * A small helper function that translates MII_LPA
+ * bits, when in 1000Base-T mode, to ethtool
+ * LP advertisement settings.
+ */
+static inline u32 mii_lpa_to_ethtool_lpa_t(u32 lpa)
+{
+	u32 result = 0;
+
+	if (lpa & LPA_LPACK)
+		result |= ADVERTISED_Autoneg;
+
+	return result | mii_adv_to_ethtool_adv_t(lpa);
+}
+
+/**
+ * mii_stat1000_to_ethtool_lpa_t
+ * @adv: value of the MII_STAT1000 register
+ *
+ * A small helper function that translates MII_STAT1000
+ * bits, when in 1000Base-T mode, to ethtool
+ * advertisement settings.
+ */
+static inline u32 mii_stat1000_to_ethtool_lpa_t(u32 lpa)
+{
+	u32 result = 0;
+
+	if (lpa & LPA_1000HALF)
+		result |= ADVERTISED_1000baseT_Half;
+	if (lpa & LPA_1000FULL)
+		result |= ADVERTISED_1000baseT_Full;
+
+	return result;
+}
+
+/**
+ * ethtool_adv_to_mii_adv_x
+ * @ethadv: the ethtool advertisement settings
+ *
+ * A small helper function that translates ethtool advertisement
+ * settings to phy autonegotiation advertisements for the
+ * MII_CTRL1000 register when in 1000Base-X mode.
+ */
+static inline u32 ethtool_adv_to_mii_adv_x(u32 ethadv)
+{
+	u32 result = 0;
+
+	if (ethadv & ADVERTISED_1000baseT_Half)
+		result |= ADVERTISE_1000XHALF;
+	if (ethadv & ADVERTISED_1000baseT_Full)
+		result |= ADVERTISE_1000XFULL;
+	if (ethadv & ADVERTISED_Pause)
+		result |= ADVERTISE_1000XPAUSE;
+	if (ethadv & ADVERTISED_Asym_Pause)
+		result |= ADVERTISE_1000XPSE_ASYM;
+
+	return result;
+}
+
+/**
+ * mii_adv_to_ethtool_adv_x
+ * @adv: value of the MII_CTRL1000 register
+ *
+ * A small helper function that translates MII_CTRL1000
+ * bits, when in 1000Base-X mode, to ethtool
+ * advertisement settings.
+ */
+static inline u32 mii_adv_to_ethtool_adv_x(u32 adv)
+{
+	u32 result = 0;
+
+	if (adv & ADVERTISE_1000XHALF)
+		result |= ADVERTISED_1000baseT_Half;
+	if (adv & ADVERTISE_1000XFULL)
+		result |= ADVERTISED_1000baseT_Full;
+	if (adv & ADVERTISE_1000XPAUSE)
+		result |= ADVERTISED_Pause;
+	if (adv & ADVERTISE_1000XPSE_ASYM)
+		result |= ADVERTISED_Asym_Pause;
+
+	return result;
+}
+
+/**
+ * mii_lpa_to_ethtool_lpa_x
+ * @adv: value of the MII_LPA register
+ *
+ * A small helper function that translates MII_LPA
+ * bits, when in 1000Base-X mode, to ethtool
+ * LP advertisement settings.
+ */
+static inline u32 mii_lpa_to_ethtool_lpa_x(u32 lpa)
+{
+	u32 result = 0;
+
+	if (lpa & LPA_LPACK)
+		result |= ADVERTISED_Autoneg;
+
+	return result | mii_adv_to_ethtool_adv_x(lpa);
+}
+
+/**
  * mii_advertise_flowctrl - get flow control advertisement flags
  * @cap: Flow control capabilities (FLOW_CTRL_RX, FLOW_CTRL_TX or both)
  */
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
index c41d727..3208524 100644
--- a/include/linux/miscdevice.h
+++ b/include/linux/miscdevice.h
@@ -54,7 +54,7 @@
 	struct device *parent;
 	struct device *this_device;
 	const char *nodename;
-	mode_t mode;
+	umode_t mode;
 };
 
 extern int misc_register(struct miscdevice * misc);
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h
index b56e458..9958ff2 100644
--- a/include/linux/mlx4/cmd.h
+++ b/include/linux/mlx4/cmd.h
@@ -59,12 +59,15 @@
 	MLX4_CMD_HW_HEALTH_CHECK = 0x50,
 	MLX4_CMD_SET_PORT	 = 0xc,
 	MLX4_CMD_SET_NODE	 = 0x5a,
+	MLX4_CMD_QUERY_FUNC	 = 0x56,
 	MLX4_CMD_ACCESS_DDR	 = 0x2e,
 	MLX4_CMD_MAP_ICM	 = 0xffa,
 	MLX4_CMD_UNMAP_ICM	 = 0xff9,
 	MLX4_CMD_MAP_ICM_AUX	 = 0xffc,
 	MLX4_CMD_UNMAP_ICM_AUX	 = 0xffb,
 	MLX4_CMD_SET_ICM_SIZE	 = 0xffd,
+	/*master notify fw on finish for slave's flr*/
+	MLX4_CMD_INFORM_FLR_DONE = 0x5b,
 
 	/* TPT commands */
 	MLX4_CMD_SW2HW_MPT	 = 0xd,
@@ -119,6 +122,26 @@
 	/* miscellaneous commands */
 	MLX4_CMD_DIAG_RPRT	 = 0x30,
 	MLX4_CMD_NOP		 = 0x31,
+	MLX4_CMD_ACCESS_MEM	 = 0x2e,
+	MLX4_CMD_SET_VEP	 = 0x52,
+
+	/* Ethernet specific commands */
+	MLX4_CMD_SET_VLAN_FLTR	 = 0x47,
+	MLX4_CMD_SET_MCAST_FLTR	 = 0x48,
+	MLX4_CMD_DUMP_ETH_STATS	 = 0x49,
+
+	/* Communication channel commands */
+	MLX4_CMD_ARM_COMM_CHANNEL = 0x57,
+	MLX4_CMD_GEN_EQE	 = 0x58,
+
+	/* virtual commands */
+	MLX4_CMD_ALLOC_RES	 = 0xf00,
+	MLX4_CMD_FREE_RES	 = 0xf01,
+	MLX4_CMD_MCAST_ATTACH	 = 0xf05,
+	MLX4_CMD_UCAST_ATTACH	 = 0xf06,
+	MLX4_CMD_PROMISC         = 0xf08,
+	MLX4_CMD_QUERY_FUNC_CAP  = 0xf0a,
+	MLX4_CMD_QP_ATTACH	 = 0xf0b,
 
 	/* debug commands */
 	MLX4_CMD_QUERY_DEBUG_MSG = 0x2a,
@@ -126,6 +149,7 @@
 
 	/* statistics commands */
 	MLX4_CMD_QUERY_IF_STAT	 = 0X54,
+	MLX4_CMD_SET_IF_STAT	 = 0X55,
 };
 
 enum {
@@ -135,7 +159,8 @@
 };
 
 enum {
-	MLX4_MAILBOX_SIZE	=  4096
+	MLX4_MAILBOX_SIZE	= 4096,
+	MLX4_ACCESS_MEM_ALIGN	= 256,
 };
 
 enum {
@@ -148,6 +173,11 @@
 	MLX4_SET_PORT_GID_TABLE = 0x5,
 };
 
+enum {
+	MLX4_CMD_WRAPPED,
+	MLX4_CMD_NATIVE
+};
+
 struct mlx4_dev;
 
 struct mlx4_cmd_mailbox {
@@ -157,23 +187,24 @@
 
 int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
 	       int out_is_imm, u32 in_modifier, u8 op_modifier,
-	       u16 op, unsigned long timeout);
+	       u16 op, unsigned long timeout, int native);
 
 /* Invoke a command with no output parameter */
 static inline int mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u32 in_modifier,
-			   u8 op_modifier, u16 op, unsigned long timeout)
+			   u8 op_modifier, u16 op, unsigned long timeout,
+			   int native)
 {
 	return __mlx4_cmd(dev, in_param, NULL, 0, in_modifier,
-			  op_modifier, op, timeout);
+			  op_modifier, op, timeout, native);
 }
 
 /* Invoke a command with an output mailbox */
 static inline int mlx4_cmd_box(struct mlx4_dev *dev, u64 in_param, u64 out_param,
 			       u32 in_modifier, u8 op_modifier, u16 op,
-			       unsigned long timeout)
+			       unsigned long timeout, int native)
 {
 	return __mlx4_cmd(dev, in_param, &out_param, 0, in_modifier,
-			  op_modifier, op, timeout);
+			  op_modifier, op, timeout, native);
 }
 
 /*
@@ -183,13 +214,17 @@
  */
 static inline int mlx4_cmd_imm(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
 			       u32 in_modifier, u8 op_modifier, u16 op,
-			       unsigned long timeout)
+			       unsigned long timeout, int native)
 {
 	return __mlx4_cmd(dev, in_param, out_param, 1, in_modifier,
-			  op_modifier, op, timeout);
+			  op_modifier, op, timeout, native);
 }
 
 struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev);
 void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox);
 
+u32 mlx4_comm_get_version(void);
+
+#define MLX4_COMM_GET_IF_REV(cmd_chan_ver) (u8)((cmd_chan_ver) >> 8)
+
 #endif /* MLX4_CMD_H */
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 84b0b18..5c4fe8e 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -47,6 +47,9 @@
 enum {
 	MLX4_FLAG_MSI_X		= 1 << 0,
 	MLX4_FLAG_OLD_PORT_CMDS	= 1 << 1,
+	MLX4_FLAG_MASTER	= 1 << 2,
+	MLX4_FLAG_SLAVE		= 1 << 3,
+	MLX4_FLAG_SRIOV		= 1 << 4,
 };
 
 enum {
@@ -58,6 +61,15 @@
 };
 
 enum {
+	MLX4_MAX_NUM_PF		= 16,
+	MLX4_MAX_NUM_VF		= 64,
+	MLX4_MFUNC_MAX		= 80,
+	MLX4_MFUNC_EQ_NUM	= 4,
+	MLX4_MFUNC_MAX_EQES     = 8,
+	MLX4_MFUNC_EQE_MASK     = (MLX4_MFUNC_MAX_EQES - 1)
+};
+
+enum {
 	MLX4_DEV_CAP_FLAG_RC		= 1LL <<  0,
 	MLX4_DEV_CAP_FLAG_UC		= 1LL <<  1,
 	MLX4_DEV_CAP_FLAG_UD		= 1LL <<  2,
@@ -77,11 +89,13 @@
 	MLX4_DEV_CAP_FLAG_IBOE		= 1LL << 30,
 	MLX4_DEV_CAP_FLAG_UC_LOOPBACK	= 1LL << 32,
 	MLX4_DEV_CAP_FLAG_FCS_KEEP	= 1LL << 34,
-	MLX4_DEV_CAP_FLAG_WOL		= 1LL << 38,
+	MLX4_DEV_CAP_FLAG_WOL_PORT1	= 1LL << 37,
+	MLX4_DEV_CAP_FLAG_WOL_PORT2	= 1LL << 38,
 	MLX4_DEV_CAP_FLAG_UDP_RSS	= 1LL << 40,
 	MLX4_DEV_CAP_FLAG_VEP_UC_STEER	= 1LL << 41,
 	MLX4_DEV_CAP_FLAG_VEP_MC_STEER	= 1LL << 42,
-	MLX4_DEV_CAP_FLAG_COUNTERS	= 1LL << 48
+	MLX4_DEV_CAP_FLAG_COUNTERS	= 1LL << 48,
+	MLX4_DEV_CAP_FLAG_SENSE_SUPPORT	= 1LL << 55
 };
 
 #define MLX4_ATTR_EXTENDED_PORT_INFO	cpu_to_be16(0xff90)
@@ -116,7 +130,11 @@
 	MLX4_EVENT_TYPE_PORT_CHANGE	   = 0x09,
 	MLX4_EVENT_TYPE_EQ_OVERFLOW	   = 0x0f,
 	MLX4_EVENT_TYPE_ECC_DETECT	   = 0x0e,
-	MLX4_EVENT_TYPE_CMD		   = 0x0a
+	MLX4_EVENT_TYPE_CMD		   = 0x0a,
+	MLX4_EVENT_TYPE_VEP_UPDATE	   = 0x19,
+	MLX4_EVENT_TYPE_COMM_CHANNEL	   = 0x18,
+	MLX4_EVENT_TYPE_FLR_EVENT	   = 0x1c,
+	MLX4_EVENT_TYPE_NONE		   = 0xff,
 };
 
 enum {
@@ -183,6 +201,7 @@
 };
 
 enum mlx4_port_type {
+	MLX4_PORT_TYPE_NONE	= 0,
 	MLX4_PORT_TYPE_IB	= 1,
 	MLX4_PORT_TYPE_ETH	= 2,
 	MLX4_PORT_TYPE_AUTO	= 3
@@ -215,6 +234,7 @@
 
 struct mlx4_caps {
 	u64			fw_ver;
+	u32			function;
 	int			num_ports;
 	int			vl_cap[MLX4_MAX_PORTS + 1];
 	int			ib_mtu_cap[MLX4_MAX_PORTS + 1];
@@ -229,6 +249,7 @@
 	u64			trans_code[MLX4_MAX_PORTS + 1];
 	int			local_ca_ack_delay;
 	int			num_uars;
+	u32			uar_page_size;
 	int			bf_reg_size;
 	int			bf_regs_per_page;
 	int			max_sq_sg;
@@ -252,8 +273,7 @@
 	int			num_comp_vectors;
 	int			comp_pool;
 	int			num_mpts;
-	int			num_mtt_segs;
-	int			mtts_per_seg;
+	int			num_mtts;
 	int			fmr_reserved_mtts;
 	int			reserved_mtts;
 	int			reserved_mrws;
@@ -283,7 +303,9 @@
 	int                     log_num_prios;
 	enum mlx4_port_type	port_type[MLX4_MAX_PORTS + 1];
 	u8			supported_type[MLX4_MAX_PORTS + 1];
-	u32			port_mask;
+	u8                      suggested_type[MLX4_MAX_PORTS + 1];
+	u8                      default_sense[MLX4_MAX_PORTS + 1];
+	u32			port_mask[MLX4_MAX_PORTS + 1];
 	enum mlx4_port_type	possible_type[MLX4_MAX_PORTS + 1];
 	u32			max_counters;
 	u8			ext_port_cap[MLX4_MAX_PORTS + 1];
@@ -303,7 +325,7 @@
 };
 
 struct mlx4_mtt {
-	u32			first_seg;
+	u32			offset;
 	int			order;
 	int			page_shift;
 };
@@ -465,10 +487,12 @@
 struct mlx4_dev {
 	struct pci_dev	       *pdev;
 	unsigned long		flags;
+	unsigned long		num_slaves;
 	struct mlx4_caps	caps;
 	struct radix_tree_root	qp_table_tree;
 	u8			rev_id;
 	char			board_id[MLX4_BOARD_ID_LEN];
+	int			num_vfs;
 };
 
 struct mlx4_init_port_param {
@@ -487,14 +511,32 @@
 
 #define mlx4_foreach_port(port, dev, type)				\
 	for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++)	\
-		if (((type) == MLX4_PORT_TYPE_IB ? (dev)->caps.port_mask : \
-		     ~(dev)->caps.port_mask) & 1 << ((port) - 1))
+		if ((type) == (dev)->caps.port_mask[(port)])
 
-#define mlx4_foreach_ib_transport_port(port, dev)			\
-	for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++)	\
-		if (((dev)->caps.port_mask & 1 << ((port) - 1)) ||	\
-		    ((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
+#define mlx4_foreach_ib_transport_port(port, dev)                         \
+	for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++)	  \
+		if (((dev)->caps.port_mask[port] == MLX4_PORT_TYPE_IB) || \
+			((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
 
+static inline int mlx4_is_master(struct mlx4_dev *dev)
+{
+	return dev->flags & MLX4_FLAG_MASTER;
+}
+
+static inline int mlx4_is_qp_reserved(struct mlx4_dev *dev, u32 qpn)
+{
+	return (qpn < dev->caps.sqp_start + 8);
+}
+
+static inline int mlx4_is_mfunc(struct mlx4_dev *dev)
+{
+	return dev->flags & (MLX4_FLAG_SLAVE | MLX4_FLAG_MASTER);
+}
+
+static inline int mlx4_is_slave(struct mlx4_dev *dev)
+{
+	return dev->flags & MLX4_FLAG_SLAVE;
+}
 
 int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
 		   struct mlx4_buf *buf);
@@ -560,6 +602,10 @@
 int mlx4_INIT_PORT(struct mlx4_dev *dev, int port);
 int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port);
 
+int mlx4_unicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
+			int block_mcast_loopback, enum mlx4_protocol prot);
+int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
+			enum mlx4_protocol prot);
 int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
 			  int block_mcast_loopback, enum mlx4_protocol protocol);
 int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
@@ -570,9 +616,11 @@
 int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port);
 int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode);
 
-int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn, u8 wrap);
-void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int qpn);
-int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac, u8 wrap);
+int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac);
+void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac);
+int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac);
+int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn);
+void mlx4_put_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int qpn);
 
 int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
 int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index 48cc4cb..bee8fa2 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -97,6 +97,33 @@
 	MLX4_QP_BIT_RIC				= 1 <<	4,
 };
 
+enum {
+	MLX4_RSS_HASH_XOR			= 0,
+	MLX4_RSS_HASH_TOP			= 1,
+
+	MLX4_RSS_UDP_IPV6			= 1 << 0,
+	MLX4_RSS_UDP_IPV4			= 1 << 1,
+	MLX4_RSS_TCP_IPV6			= 1 << 2,
+	MLX4_RSS_IPV6				= 1 << 3,
+	MLX4_RSS_TCP_IPV4			= 1 << 4,
+	MLX4_RSS_IPV4				= 1 << 5,
+
+	/* offset of mlx4_rss_context within mlx4_qp_context.pri_path */
+	MLX4_RSS_OFFSET_IN_QPC_PRI_PATH		= 0x24,
+	/* offset of being RSS indirection QP within mlx4_qp_context.flags */
+	MLX4_RSS_QPC_FLAG_OFFSET		= 13,
+};
+
+struct mlx4_rss_context {
+	__be32			base_qpn;
+	__be32			default_qpn;
+	u16			reserved;
+	u8			hash_fn;
+	u8			flags;
+	__be32			rss_key[10];
+	__be32			base_qpn_udp;
+};
+
 struct mlx4_qp_path {
 	u8			fl;
 	u8			reserved1[2];
@@ -183,6 +210,7 @@
 	 * [4]   IP checksum
 	 * [3:2] C (generate completion queue entry)
 	 * [1]   SE (solicited event)
+	 * [0]   FL (force loopback)
 	 */
 	__be32			srcrb_flags;
 	/*
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 4baadd1..5d9b4c9 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1253,41 +1253,34 @@
 extern void free_area_init(unsigned long * zones_size);
 extern void free_area_init_node(int nid, unsigned long * zones_size,
 		unsigned long zone_start_pfn, unsigned long *zholes_size);
-#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
+#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
 /*
- * With CONFIG_ARCH_POPULATES_NODE_MAP set, an architecture may initialise its
+ * With CONFIG_HAVE_MEMBLOCK_NODE_MAP set, an architecture may initialise its
  * zones, allocate the backing mem_map and account for memory holes in a more
  * architecture independent manner. This is a substitute for creating the
  * zone_sizes[] and zholes_size[] arrays and passing them to
  * free_area_init_node()
  *
  * An architecture is expected to register range of page frames backed by
- * physical memory with add_active_range() before calling
+ * physical memory with memblock_add[_node]() before calling
  * free_area_init_nodes() passing in the PFN each zone ends at. At a basic
  * usage, an architecture is expected to do something like
  *
  * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn,
  * 							 max_highmem_pfn};
  * for_each_valid_physical_page_range()
- * 	add_active_range(node_id, start_pfn, end_pfn)
+ * 	memblock_add_node(base, size, nid)
  * free_area_init_nodes(max_zone_pfns);
  *
- * If the architecture guarantees that there are no holes in the ranges
- * registered with add_active_range(), free_bootmem_active_regions()
- * will call free_bootmem_node() for each registered physical page range.
- * Similarly sparse_memory_present_with_active_regions() calls
- * memory_present() for each range when SPARSEMEM is enabled.
+ * free_bootmem_with_active_regions() calls free_bootmem_node() for each
+ * registered physical page range.  Similarly
+ * sparse_memory_present_with_active_regions() calls memory_present() for
+ * each range when SPARSEMEM is enabled.
  *
  * See mm/page_alloc.c for more information on each function exposed by
- * CONFIG_ARCH_POPULATES_NODE_MAP
+ * CONFIG_HAVE_MEMBLOCK_NODE_MAP.
  */
 extern void free_area_init_nodes(unsigned long *max_zone_pfn);
-extern void add_active_range(unsigned int nid, unsigned long start_pfn,
-					unsigned long end_pfn);
-extern void remove_active_range(unsigned int nid, unsigned long start_pfn,
-					unsigned long end_pfn);
-extern void remove_all_active_ranges(void);
-void sort_node_map(void);
 unsigned long node_map_pfn_alignment(void);
 unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
 						unsigned long end_pfn);
@@ -1300,14 +1293,11 @@
 						unsigned long max_low_pfn);
 int add_from_early_node_map(struct range *range, int az,
 				   int nr_range, int nid);
-u64 __init find_memory_core_early(int nid, u64 size, u64 align,
-					u64 goal, u64 limit);
-typedef int (*work_fn_t)(unsigned long, unsigned long, void *);
-extern void work_with_active_regions(int nid, work_fn_t work_fn, void *data);
 extern void sparse_memory_present_with_active_regions(int nid);
-#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
 
-#if !defined(CONFIG_ARCH_POPULATES_NODE_MAP) && \
+#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
+
+#if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \
     !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID)
 static inline int __early_pfn_to_nid(unsigned long pfn)
 {
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 415f2db..c8ef9bc 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -218,6 +218,7 @@
 #define MMC_QUIRK_INAND_CMD38	(1<<6)		/* iNAND devices have broken CMD38 */
 #define MMC_QUIRK_BLK_NO_CMD23	(1<<7)		/* Avoid CMD23 for regular multiblock */
 #define MMC_QUIRK_BROKEN_BYTE_MODE_512 (1<<8)	/* Avoid sending 512 bytes in */
+#define MMC_QUIRK_LONG_READ_TIME (1<<9)		/* Data read time > CSD says */
 						/* byte mode */
 	unsigned int    poweroff_notify_state;	/* eMMC4.5 notify feature */
 #define MMC_NO_POWER_NOTIFICATION	0
@@ -433,6 +434,11 @@
 	return c->quirks & MMC_QUIRK_BROKEN_BYTE_MODE_512;
 }
 
+static inline int mmc_card_long_read_time(const struct mmc_card *c)
+{
+	return c->quirks & MMC_QUIRK_LONG_READ_TIME;
+}
+
 #define mmc_card_name(c)	((c)->cid.prod_name)
 #define mmc_card_id(c)		(dev_name(&(c)->dev))
 
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 188cb2f..3ac040f 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -598,13 +598,13 @@
 #endif
 };
 
-#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
+#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
 struct node_active_region {
 	unsigned long start_pfn;
 	unsigned long end_pfn;
 	int nid;
 };
-#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
+#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
 
 #ifndef CONFIG_DISCONTIGMEM
 /* The array of struct pages - for discontigmem use pgdat->lmem_map */
@@ -720,7 +720,7 @@
 
 static inline int zone_movable_is_highmem(void)
 {
-#if defined(CONFIG_HIGHMEM) && defined(CONFIG_ARCH_POPULATES_NODE_MAP)
+#if defined(CONFIG_HIGHMEM) && defined(CONFIG_HAVE_MEMBLOCK_NODE)
 	return movable_zone == ZONE_HIGHMEM;
 #else
 	return 0;
@@ -938,7 +938,7 @@
 #endif
 
 #if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \
-	!defined(CONFIG_ARCH_POPULATES_NODE_MAP)
+	!defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
 static inline unsigned long early_pfn_to_nid(unsigned long pfn)
 {
 	return 0;
diff --git a/include/linux/mnt_namespace.h b/include/linux/mnt_namespace.h
index 2930485..5a8e390 100644
--- a/include/linux/mnt_namespace.h
+++ b/include/linux/mnt_namespace.h
@@ -2,39 +2,16 @@
 #define _NAMESPACE_H_
 #ifdef __KERNEL__
 
-#include <linux/path.h>
-#include <linux/seq_file.h>
-#include <linux/wait.h>
-
-struct mnt_namespace {
-	atomic_t		count;
-	struct vfsmount *	root;
-	struct list_head	list;
-	wait_queue_head_t poll;
-	int event;
-};
-
-struct proc_mounts {
-	struct seq_file m; /* must be the first element */
-	struct mnt_namespace *ns;
-	struct path root;
-};
-
+struct mnt_namespace;
 struct fs_struct;
 
-extern struct mnt_namespace *create_mnt_ns(struct vfsmount *mnt);
 extern struct mnt_namespace *copy_mnt_ns(unsigned long, struct mnt_namespace *,
 		struct fs_struct *);
 extern void put_mnt_ns(struct mnt_namespace *ns);
-static inline void get_mnt_ns(struct mnt_namespace *ns)
-{
-	atomic_inc(&ns->count);
-}
 
-extern const struct seq_operations mounts_op;
-extern const struct seq_operations mountinfo_op;
-extern const struct seq_operations mountstats_op;
-extern int mnt_had_events(struct proc_mounts *);
+extern const struct file_operations proc_mounts_operations;
+extern const struct file_operations proc_mountinfo_operations;
+extern const struct file_operations proc_mountstats_operations;
 
 #endif
 #endif
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 468819c..83ac071 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -542,4 +542,22 @@
 	kernel_ulong_t driver_data;	/* data private to the driver */
 };
 
+/**
+ * struct amba_id - identifies a device on an AMBA bus
+ * @id: The significant bits if the hardware device ID
+ * @mask: Bitmask specifying which bits of the id field are significant when
+ *	matching.  A driver binds to a device when ((hardware device ID) & mask)
+ *	== id.
+ * @data: Private data used by the driver.
+ */
+struct amba_id {
+	unsigned int		id;
+	unsigned int		mask;
+#ifndef __KERNEL__
+	kernel_ulong_t		data;
+#else
+	void			*data;
+#endif
+};
+
 #endif /* LINUX_MOD_DEVICETABLE_H */
diff --git a/include/linux/mount.h b/include/linux/mount.h
index 33fe53d..d7029f4 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -47,45 +47,10 @@
 
 #define MNT_INTERNAL	0x4000
 
-struct mnt_pcp {
-	int mnt_count;
-	int mnt_writers;
-};
-
 struct vfsmount {
-	struct list_head mnt_hash;
-	struct vfsmount *mnt_parent;	/* fs we are mounted on */
-	struct dentry *mnt_mountpoint;	/* dentry of mountpoint */
 	struct dentry *mnt_root;	/* root of the mounted tree */
 	struct super_block *mnt_sb;	/* pointer to superblock */
-#ifdef CONFIG_SMP
-	struct mnt_pcp __percpu *mnt_pcp;
-	atomic_t mnt_longterm;		/* how many of the refs are longterm */
-#else
-	int mnt_count;
-	int mnt_writers;
-#endif
-	struct list_head mnt_mounts;	/* list of children, anchored here */
-	struct list_head mnt_child;	/* and going through their mnt_child */
 	int mnt_flags;
-	/* 4 bytes hole on 64bits arches without fsnotify */
-#ifdef CONFIG_FSNOTIFY
-	__u32 mnt_fsnotify_mask;
-	struct hlist_head mnt_fsnotify_marks;
-#endif
-	const char *mnt_devname;	/* Name of device e.g. /dev/dsk/hda1 */
-	struct list_head mnt_list;
-	struct list_head mnt_expire;	/* link in fs-specific expiry list */
-	struct list_head mnt_share;	/* circular list of shared mounts */
-	struct list_head mnt_slave_list;/* list of slave mounts */
-	struct list_head mnt_slave;	/* slave list entry */
-	struct vfsmount *mnt_master;	/* slave is on master->mnt_slave_list */
-	struct mnt_namespace *mnt_ns;	/* containing namespace */
-	int mnt_id;			/* mount identifier */
-	int mnt_group_id;		/* peer group identifier */
-	int mnt_expiry_mark;		/* true if marked for expiry */
-	int mnt_pinned;
-	int mnt_ghosts;
 };
 
 struct file; /* forward dec */
@@ -94,15 +59,13 @@
 extern int mnt_want_write_file(struct file *file);
 extern int mnt_clone_write(struct vfsmount *mnt);
 extern void mnt_drop_write(struct vfsmount *mnt);
+extern void mnt_drop_write_file(struct file *file);
 extern void mntput(struct vfsmount *mnt);
 extern struct vfsmount *mntget(struct vfsmount *mnt);
 extern void mnt_pin(struct vfsmount *mnt);
 extern void mnt_unpin(struct vfsmount *mnt);
 extern int __mnt_is_readonly(struct vfsmount *mnt);
 
-extern struct vfsmount *do_kern_mount(const char *fstype, int flags,
-				      const char *name, void *data);
-
 struct file_system_type;
 extern struct vfsmount *vfs_kern_mount(struct file_system_type *type,
 				      int flags, const char *name,
diff --git a/include/linux/neighbour.h b/include/linux/neighbour.h
index a7003b7..b188f68 100644
--- a/include/linux/neighbour.h
+++ b/include/linux/neighbour.h
@@ -116,6 +116,7 @@
 	NDTPA_PROXY_DELAY,		/* u64, msecs */
 	NDTPA_PROXY_QLEN,		/* u32 */
 	NDTPA_LOCKTIME,			/* u64, msecs */
+	NDTPA_QUEUE_LENBYTES,		/* u32 */
 	__NDTPA_MAX
 };
 #define NDTPA_MAX (__NDTPA_MAX - 1)
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
new file mode 100644
index 0000000..77f5202
--- /dev/null
+++ b/include/linux/netdev_features.h
@@ -0,0 +1,146 @@
+/*
+ * Network device features.
+ *
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_NETDEV_FEATURES_H
+#define _LINUX_NETDEV_FEATURES_H
+
+#include <linux/types.h>
+
+typedef u64 netdev_features_t;
+
+enum {
+	NETIF_F_SG_BIT,			/* Scatter/gather IO. */
+	NETIF_F_IP_CSUM_BIT,		/* Can checksum TCP/UDP over IPv4. */
+	__UNUSED_NETIF_F_1,
+	NETIF_F_HW_CSUM_BIT,		/* Can checksum all the packets. */
+	NETIF_F_IPV6_CSUM_BIT,		/* Can checksum TCP/UDP over IPV6 */
+	NETIF_F_HIGHDMA_BIT,		/* Can DMA to high memory. */
+	NETIF_F_FRAGLIST_BIT,		/* Scatter/gather IO. */
+	NETIF_F_HW_VLAN_TX_BIT,		/* Transmit VLAN hw acceleration */
+	NETIF_F_HW_VLAN_RX_BIT,		/* Receive VLAN hw acceleration */
+	NETIF_F_HW_VLAN_FILTER_BIT,	/* Receive filtering on VLAN */
+	NETIF_F_VLAN_CHALLENGED_BIT,	/* Device cannot handle VLAN packets */
+	NETIF_F_GSO_BIT,		/* Enable software GSO. */
+	NETIF_F_LLTX_BIT,		/* LockLess TX - deprecated. Please */
+					/* do not use LLTX in new drivers */
+	NETIF_F_NETNS_LOCAL_BIT,	/* Does not change network namespaces */
+	NETIF_F_GRO_BIT,		/* Generic receive offload */
+	NETIF_F_LRO_BIT,		/* large receive offload */
+
+	/**/NETIF_F_GSO_SHIFT,		/* keep the order of SKB_GSO_* bits */
+	NETIF_F_TSO_BIT			/* ... TCPv4 segmentation */
+		= NETIF_F_GSO_SHIFT,
+	NETIF_F_UFO_BIT,		/* ... UDPv4 fragmentation */
+	NETIF_F_GSO_ROBUST_BIT,		/* ... ->SKB_GSO_DODGY */
+	NETIF_F_TSO_ECN_BIT,		/* ... TCP ECN support */
+	NETIF_F_TSO6_BIT,		/* ... TCPv6 segmentation */
+	NETIF_F_FSO_BIT,		/* ... FCoE segmentation */
+	NETIF_F_GSO_RESERVED1,		/* ... free (fill GSO_MASK to 8 bits) */
+	/**/NETIF_F_GSO_LAST,		/* [can't be last bit, see GSO_MASK] */
+	NETIF_F_GSO_RESERVED2		/* ... free (fill GSO_MASK to 8 bits) */
+		= NETIF_F_GSO_LAST,
+
+	NETIF_F_FCOE_CRC_BIT,		/* FCoE CRC32 */
+	NETIF_F_SCTP_CSUM_BIT,		/* SCTP checksum offload */
+	NETIF_F_FCOE_MTU_BIT,		/* Supports max FCoE MTU, 2158 bytes*/
+	NETIF_F_NTUPLE_BIT,		/* N-tuple filters supported */
+	NETIF_F_RXHASH_BIT,		/* Receive hashing offload */
+	NETIF_F_RXCSUM_BIT,		/* Receive checksumming offload */
+	NETIF_F_NOCACHE_COPY_BIT,	/* Use no-cache copyfromuser */
+	NETIF_F_LOOPBACK_BIT,		/* Enable loopback */
+
+	/*
+	 * Add your fresh new feature above and remember to update
+	 * netdev_features_strings[] in net/core/ethtool.c and maybe
+	 * some feature mask #defines below. Please also describe it
+	 * in Documentation/networking/netdev-features.txt.
+	 */
+
+	/**/NETDEV_FEATURE_COUNT
+};
+
+/* copy'n'paste compression ;) */
+#define __NETIF_F_BIT(bit)	((netdev_features_t)1 << (bit))
+#define __NETIF_F(name)		__NETIF_F_BIT(NETIF_F_##name##_BIT)
+
+#define NETIF_F_FCOE_CRC	__NETIF_F(FCOE_CRC)
+#define NETIF_F_FCOE_MTU	__NETIF_F(FCOE_MTU)
+#define NETIF_F_FRAGLIST	__NETIF_F(FRAGLIST)
+#define NETIF_F_FSO		__NETIF_F(FSO)
+#define NETIF_F_GRO		__NETIF_F(GRO)
+#define NETIF_F_GSO		__NETIF_F(GSO)
+#define NETIF_F_GSO_ROBUST	__NETIF_F(GSO_ROBUST)
+#define NETIF_F_HIGHDMA		__NETIF_F(HIGHDMA)
+#define NETIF_F_HW_CSUM		__NETIF_F(HW_CSUM)
+#define NETIF_F_HW_VLAN_FILTER	__NETIF_F(HW_VLAN_FILTER)
+#define NETIF_F_HW_VLAN_RX	__NETIF_F(HW_VLAN_RX)
+#define NETIF_F_HW_VLAN_TX	__NETIF_F(HW_VLAN_TX)
+#define NETIF_F_IP_CSUM		__NETIF_F(IP_CSUM)
+#define NETIF_F_IPV6_CSUM	__NETIF_F(IPV6_CSUM)
+#define NETIF_F_LLTX		__NETIF_F(LLTX)
+#define NETIF_F_LOOPBACK	__NETIF_F(LOOPBACK)
+#define NETIF_F_LRO		__NETIF_F(LRO)
+#define NETIF_F_NETNS_LOCAL	__NETIF_F(NETNS_LOCAL)
+#define NETIF_F_NOCACHE_COPY	__NETIF_F(NOCACHE_COPY)
+#define NETIF_F_NTUPLE		__NETIF_F(NTUPLE)
+#define NETIF_F_RXCSUM		__NETIF_F(RXCSUM)
+#define NETIF_F_RXHASH		__NETIF_F(RXHASH)
+#define NETIF_F_SCTP_CSUM	__NETIF_F(SCTP_CSUM)
+#define NETIF_F_SG		__NETIF_F(SG)
+#define NETIF_F_TSO6		__NETIF_F(TSO6)
+#define NETIF_F_TSO_ECN		__NETIF_F(TSO_ECN)
+#define NETIF_F_TSO		__NETIF_F(TSO)
+#define NETIF_F_UFO		__NETIF_F(UFO)
+#define NETIF_F_VLAN_CHALLENGED	__NETIF_F(VLAN_CHALLENGED)
+
+/* Features valid for ethtool to change */
+/* = all defined minus driver/device-class-related */
+#define NETIF_F_NEVER_CHANGE	(NETIF_F_VLAN_CHALLENGED | \
+				 NETIF_F_LLTX | NETIF_F_NETNS_LOCAL)
+
+/* remember that ((t)1 << t_BITS) is undefined in C99 */
+#define NETIF_F_ETHTOOL_BITS	((__NETIF_F_BIT(NETDEV_FEATURE_COUNT - 1) | \
+		(__NETIF_F_BIT(NETDEV_FEATURE_COUNT - 1) - 1)) & \
+		~NETIF_F_NEVER_CHANGE)
+
+/* Segmentation offload feature mask */
+#define NETIF_F_GSO_MASK	(__NETIF_F_BIT(NETIF_F_GSO_LAST + 1) - \
+		__NETIF_F_BIT(NETIF_F_GSO_SHIFT))
+
+/* List of features with software fallbacks. */
+#define NETIF_F_GSO_SOFTWARE	(NETIF_F_TSO | NETIF_F_TSO_ECN | \
+				 NETIF_F_TSO6 | NETIF_F_UFO)
+
+#define NETIF_F_GEN_CSUM	NETIF_F_HW_CSUM
+#define NETIF_F_V4_CSUM		(NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM)
+#define NETIF_F_V6_CSUM		(NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM)
+#define NETIF_F_ALL_CSUM	(NETIF_F_V4_CSUM | NETIF_F_V6_CSUM)
+
+#define NETIF_F_ALL_TSO 	(NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
+
+#define NETIF_F_ALL_FCOE	(NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \
+				 NETIF_F_FSO)
+
+/*
+ * If one device supports one of these features, then enable them
+ * for all in netdev_increment_features.
+ */
+#define NETIF_F_ONE_FOR_ALL	(NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \
+				 NETIF_F_SG | NETIF_F_HIGHDMA |		\
+				 NETIF_F_FRAGLIST | NETIF_F_VLAN_CHALLENGED)
+/*
+ * If one device doesn't support one of these features, then disable it
+ * for all in netdev_increment_features.
+ */
+#define NETIF_F_ALL_FOR_ALL	(NETIF_F_NOCACHE_COPY | NETIF_F_FSO)
+
+/* changeable features with no special hardware requirements */
+#define NETIF_F_SOFT_FEATURES	(NETIF_F_GSO | NETIF_F_GRO)
+
+#endif	/* _LINUX_NETDEV_FEATURES_H */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index a82ad4d..a1d1095 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -43,6 +43,7 @@
 #include <linux/rculist.h>
 #include <linux/dmaengine.h>
 #include <linux/workqueue.h>
+#include <linux/dynamic_queue_limits.h>
 
 #include <linux/ethtool.h>
 #include <net/net_namespace.h>
@@ -50,8 +51,10 @@
 #ifdef CONFIG_DCB
 #include <net/dcbnl.h>
 #endif
+#include <net/netprio_cgroup.h>
 
-struct vlan_group;
+#include <linux/netdev_features.h>
+
 struct netpoll_info;
 struct phy_device;
 /* 802.11 specific */
@@ -141,22 +144,20 @@
  *	used.
  */
 
-#if defined(CONFIG_WLAN) || defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
+#if defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
 # if defined(CONFIG_MAC80211_MESH)
 #  define LL_MAX_HEADER 128
 # else
 #  define LL_MAX_HEADER 96
 # endif
-#elif defined(CONFIG_TR) || defined(CONFIG_TR_MODULE)
+#elif IS_ENABLED(CONFIG_TR)
 # define LL_MAX_HEADER 48
 #else
 # define LL_MAX_HEADER 32
 #endif
 
-#if !defined(CONFIG_NET_IPIP) && !defined(CONFIG_NET_IPIP_MODULE) && \
-    !defined(CONFIG_NET_IPGRE) &&  !defined(CONFIG_NET_IPGRE_MODULE) && \
-    !defined(CONFIG_IPV6_SIT) && !defined(CONFIG_IPV6_SIT_MODULE) && \
-    !defined(CONFIG_IPV6_TUNNEL) && !defined(CONFIG_IPV6_TUNNEL_MODULE)
+#if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \
+    !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL)
 #define MAX_HEADER LL_MAX_HEADER
 #else
 #define MAX_HEADER (LL_MAX_HEADER + 48)
@@ -212,6 +213,11 @@
 #include <linux/cache.h>
 #include <linux/skbuff.h>
 
+#ifdef CONFIG_RPS
+#include <linux/jump_label.h>
+extern struct jump_label_key rps_needed;
+#endif
+
 struct neighbour;
 struct neigh_parms;
 struct sk_buff;
@@ -272,16 +278,11 @@
  *
  * We could use other alignment values, but we must maintain the
  * relationship HH alignment <= LL alignment.
- *
- * LL_ALLOCATED_SPACE also takes into account the tailroom the device
- * may need.
  */
 #define LL_RESERVED_SPACE(dev) \
 	((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
 #define LL_RESERVED_SPACE_EXTRA(dev,extra) \
 	((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
-#define LL_ALLOCATED_SPACE(dev) \
-	((((dev)->hard_header_len+(dev)->needed_headroom+(dev)->needed_tailroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
 
 struct header_ops {
 	int	(*create) (struct sk_buff *skb, struct net_device *dev,
@@ -516,11 +517,23 @@
 #endif
 
 enum netdev_queue_state_t {
-	__QUEUE_STATE_XOFF,
+	__QUEUE_STATE_DRV_XOFF,
+	__QUEUE_STATE_STACK_XOFF,
 	__QUEUE_STATE_FROZEN,
-#define QUEUE_STATE_XOFF_OR_FROZEN ((1 << __QUEUE_STATE_XOFF)		| \
-				    (1 << __QUEUE_STATE_FROZEN))
+#define QUEUE_STATE_ANY_XOFF ((1 << __QUEUE_STATE_DRV_XOFF)		| \
+			      (1 << __QUEUE_STATE_STACK_XOFF))
+#define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF		| \
+					(1 << __QUEUE_STATE_FROZEN))
 };
+/*
+ * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue.  The
+ * netif_tx_* functions below are used to manipulate this flag.  The
+ * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit
+ * queue independently.  The netif_xmit_*stopped functions below are called
+ * to check if the queue has been stopped by the driver or stack (either
+ * of the XOFF bits are set in the state).  Drivers should not need to call
+ * netif_xmit*stopped functions, they should only be using netif_tx_*.
+ */
 
 struct netdev_queue {
 /*
@@ -528,9 +541,8 @@
  */
 	struct net_device	*dev;
 	struct Qdisc		*qdisc;
-	unsigned long		state;
 	struct Qdisc		*qdisc_sleeping;
-#if defined(CONFIG_RPS) || defined(CONFIG_XPS)
+#ifdef CONFIG_SYSFS
 	struct kobject		kobj;
 #endif
 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
@@ -545,6 +557,18 @@
 	 * please use this field instead of dev->trans_start
 	 */
 	unsigned long		trans_start;
+
+	/*
+	 * Number of TX timeouts for this queue
+	 * (/sys/class/net/DEV/Q/trans_timeout)
+	 */
+	unsigned long		trans_timeout;
+
+	unsigned long		state;
+
+#ifdef CONFIG_BQL
+	struct dql		dql;
+#endif
 } ____cacheline_aligned_in_smp;
 
 static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
@@ -573,7 +597,7 @@
 	struct rcu_head rcu;
 	u16 cpus[0];
 };
-#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + (_num * sizeof(u16)))
+#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
 
 /*
  * The rps_dev_flow structure contains the mapping of a flow to a CPU, the
@@ -597,7 +621,7 @@
 	struct rps_dev_flow flows[0];
 };
 #define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
-    (_num * sizeof(struct rps_dev_flow)))
+    ((_num) * sizeof(struct rps_dev_flow)))
 
 /*
  * The rps_sock_flow_table contains mappings of flows to the last CPU
@@ -608,7 +632,7 @@
 	u16 ents[0];
 };
 #define	RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \
-    (_num * sizeof(u16)))
+    ((_num) * sizeof(u16)))
 
 #define RPS_NO_CPU 0xffff
 
@@ -660,7 +684,7 @@
 	struct rcu_head rcu;
 	u16 queues[0];
 };
-#define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + (_num * sizeof(u16)))
+#define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
 #define XPS_MIN_MAP_ALLOC ((L1_CACHE_BYTES - sizeof(struct xps_map))	\
     / sizeof(u16))
 
@@ -683,6 +707,23 @@
 	u16 offset;
 };
 
+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+/*
+ * This structure is to hold information about the device
+ * configured to run FCoE protocol stack.
+ */
+struct netdev_fcoe_hbainfo {
+	char	manufacturer[64];
+	char	serial_number[64];
+	char	hardware_version[64];
+	char	driver_version[64];
+	char	optionrom_version[64];
+	char	firmware_version[64];
+	char	model[256];
+	char	model_description[256];
+};
+#endif
+
 /*
  * This structure defines the management hooks for network devices.
  * The following hooks can be defined; unless noted otherwise, they are
@@ -767,11 +808,11 @@
  *	3. Update dev->stats asynchronously and atomically, and define
  *	   neither operation.
  *
- * void (*ndo_vlan_rx_add_vid)(struct net_device *dev, unsigned short vid);
+ * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, unsigned short vid);
  *	If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER)
  *	this function is called when a VLAN id is registered.
  *
- * void (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid);
+ * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid);
  *	If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER)
  *	this function is called when a VLAN id is unregistered.
  *
@@ -823,6 +864,13 @@
  *	perform necessary setup and returns 1 to indicate the device is set up
  *	successfully to perform DDP on this I/O, otherwise this returns 0.
  *
+ * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
+ *			       struct netdev_fcoe_hbainfo *hbainfo);
+ *	Called when the FCoE Protocol stack wants information on the underlying
+ *	device. This information is utilized by the FCoE protocol stack to
+ *	register attributes with Fiber Channel management service as per the
+ *	FC-GS Fabric Device Management Information(FDMI) specification.
+ *
  * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type);
  *	Called when the underlying device wants to override default World Wide
  *	Name (WWN) generation mechanism in FCoE protocol stack to pass its own
@@ -845,12 +893,13 @@
  *	Called to release previously enslaved netdev.
  *
  *      Feature/offload setting functions.
- * u32 (*ndo_fix_features)(struct net_device *dev, u32 features);
+ * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
+ *		netdev_features_t features);
  *	Adjusts the requested feature flags according to device-specific
  *	constraints, and returns the resulting flags. Must not modify
  *	the device state.
  *
- * int (*ndo_set_features)(struct net_device *dev, u32 features);
+ * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
  *	Called to update device configuration to new features. Passed
  *	feature set might be less than what was returned by ndo_fix_features()).
  *	Must return >0 or -errno if it changed dev->features itself.
@@ -885,9 +934,9 @@
 						     struct rtnl_link_stats64 *storage);
 	struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
 
-	void			(*ndo_vlan_rx_add_vid)(struct net_device *dev,
+	int			(*ndo_vlan_rx_add_vid)(struct net_device *dev,
 						       unsigned short vid);
-	void			(*ndo_vlan_rx_kill_vid)(struct net_device *dev,
+	int			(*ndo_vlan_rx_kill_vid)(struct net_device *dev,
 						        unsigned short vid);
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	void                    (*ndo_poll_controller)(struct net_device *dev);
@@ -912,7 +961,7 @@
 	int			(*ndo_get_vf_port)(struct net_device *dev,
 						   int vf, struct sk_buff *skb);
 	int			(*ndo_setup_tc)(struct net_device *dev, u8 tc);
-#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#if IS_ENABLED(CONFIG_FCOE)
 	int			(*ndo_fcoe_enable)(struct net_device *dev);
 	int			(*ndo_fcoe_disable)(struct net_device *dev);
 	int			(*ndo_fcoe_ddp_setup)(struct net_device *dev,
@@ -925,9 +974,11 @@
 						       u16 xid,
 						       struct scatterlist *sgl,
 						       unsigned int sgc);
+	int			(*ndo_fcoe_get_hbainfo)(struct net_device *dev,
+							struct netdev_fcoe_hbainfo *hbainfo);
 #endif
 
-#if defined(CONFIG_LIBFCOE) || defined(CONFIG_LIBFCOE_MODULE)
+#if IS_ENABLED(CONFIG_LIBFCOE)
 #define NETDEV_FCOE_WWNN 0
 #define NETDEV_FCOE_WWPN 1
 	int			(*ndo_fcoe_get_wwn)(struct net_device *dev,
@@ -944,10 +995,12 @@
 						 struct net_device *slave_dev);
 	int			(*ndo_del_slave)(struct net_device *dev,
 						 struct net_device *slave_dev);
-	u32			(*ndo_fix_features)(struct net_device *dev,
-						    u32 features);
+	netdev_features_t	(*ndo_fix_features)(struct net_device *dev,
+						    netdev_features_t features);
 	int			(*ndo_set_features)(struct net_device *dev,
-						    u32 features);
+						    netdev_features_t features);
+	int			(*ndo_neigh_construct)(struct neighbour *n);
+	void			(*ndo_neigh_destroy)(struct neighbour *n);
 };
 
 /*
@@ -997,91 +1050,13 @@
 	struct list_head	unreg_list;
 
 	/* currently active device features */
-	u32			features;
+	netdev_features_t	features;
 	/* user-changeable features */
-	u32			hw_features;
+	netdev_features_t	hw_features;
 	/* user-requested features */
-	u32			wanted_features;
+	netdev_features_t	wanted_features;
 	/* mask of features inheritable by VLAN devices */
-	u32			vlan_features;
-
-	/* Net device feature bits; if you change something,
-	 * also update netdev_features_strings[] in ethtool.c */
-
-#define NETIF_F_SG		1	/* Scatter/gather IO. */
-#define NETIF_F_IP_CSUM		2	/* Can checksum TCP/UDP over IPv4. */
-#define NETIF_F_NO_CSUM		4	/* Does not require checksum. F.e. loopack. */
-#define NETIF_F_HW_CSUM		8	/* Can checksum all the packets. */
-#define NETIF_F_IPV6_CSUM	16	/* Can checksum TCP/UDP over IPV6 */
-#define NETIF_F_HIGHDMA		32	/* Can DMA to high memory. */
-#define NETIF_F_FRAGLIST	64	/* Scatter/gather IO. */
-#define NETIF_F_HW_VLAN_TX	128	/* Transmit VLAN hw acceleration */
-#define NETIF_F_HW_VLAN_RX	256	/* Receive VLAN hw acceleration */
-#define NETIF_F_HW_VLAN_FILTER	512	/* Receive filtering on VLAN */
-#define NETIF_F_VLAN_CHALLENGED	1024	/* Device cannot handle VLAN packets */
-#define NETIF_F_GSO		2048	/* Enable software GSO. */
-#define NETIF_F_LLTX		4096	/* LockLess TX - deprecated. Please */
-					/* do not use LLTX in new drivers */
-#define NETIF_F_NETNS_LOCAL	8192	/* Does not change network namespaces */
-#define NETIF_F_GRO		16384	/* Generic receive offload */
-#define NETIF_F_LRO		32768	/* large receive offload */
-
-/* the GSO_MASK reserves bits 16 through 23 */
-#define NETIF_F_FCOE_CRC	(1 << 24) /* FCoE CRC32 */
-#define NETIF_F_SCTP_CSUM	(1 << 25) /* SCTP checksum offload */
-#define NETIF_F_FCOE_MTU	(1 << 26) /* Supports max FCoE MTU, 2158 bytes*/
-#define NETIF_F_NTUPLE		(1 << 27) /* N-tuple filters supported */
-#define NETIF_F_RXHASH		(1 << 28) /* Receive hashing offload */
-#define NETIF_F_RXCSUM		(1 << 29) /* Receive checksumming offload */
-#define NETIF_F_NOCACHE_COPY	(1 << 30) /* Use no-cache copyfromuser */
-#define NETIF_F_LOOPBACK	(1 << 31) /* Enable loopback */
-
-	/* Segmentation offload features */
-#define NETIF_F_GSO_SHIFT	16
-#define NETIF_F_GSO_MASK	0x00ff0000
-#define NETIF_F_TSO		(SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT)
-#define NETIF_F_UFO		(SKB_GSO_UDP << NETIF_F_GSO_SHIFT)
-#define NETIF_F_GSO_ROBUST	(SKB_GSO_DODGY << NETIF_F_GSO_SHIFT)
-#define NETIF_F_TSO_ECN		(SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT)
-#define NETIF_F_TSO6		(SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT)
-#define NETIF_F_FSO		(SKB_GSO_FCOE << NETIF_F_GSO_SHIFT)
-
-	/* Features valid for ethtool to change */
-	/* = all defined minus driver/device-class-related */
-#define NETIF_F_NEVER_CHANGE	(NETIF_F_VLAN_CHALLENGED | \
-				  NETIF_F_LLTX | NETIF_F_NETNS_LOCAL)
-#define NETIF_F_ETHTOOL_BITS	(0xff3fffff & ~NETIF_F_NEVER_CHANGE)
-
-	/* List of features with software fallbacks. */
-#define NETIF_F_GSO_SOFTWARE	(NETIF_F_TSO | NETIF_F_TSO_ECN | \
-				 NETIF_F_TSO6 | NETIF_F_UFO)
-
-
-#define NETIF_F_GEN_CSUM	(NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
-#define NETIF_F_V4_CSUM		(NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM)
-#define NETIF_F_V6_CSUM		(NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM)
-#define NETIF_F_ALL_CSUM	(NETIF_F_V4_CSUM | NETIF_F_V6_CSUM)
-
-#define NETIF_F_ALL_TSO 	(NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
-
-#define NETIF_F_ALL_FCOE	(NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \
-				 NETIF_F_FSO)
-
-	/*
-	 * If one device supports one of these features, then enable them
-	 * for all in netdev_increment_features.
-	 */
-#define NETIF_F_ONE_FOR_ALL	(NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \
-				 NETIF_F_SG | NETIF_F_HIGHDMA |		\
-				 NETIF_F_FRAGLIST | NETIF_F_VLAN_CHALLENGED)
-	/*
-	 * If one device doesn't support one of these features, then disable it
-	 * for all in netdev_increment_features.
-	 */
-#define NETIF_F_ALL_FOR_ALL	(NETIF_F_NOCACHE_COPY | NETIF_F_FSO)
-
-	/* changeable features with no special hardware requirements */
-#define NETIF_F_SOFT_FEATURES	(NETIF_F_GSO | NETIF_F_GRO)
+	netdev_features_t	vlan_features;
 
 	/* Interface index. Unique device identifier	*/
 	int			ifindex;
@@ -1132,6 +1107,7 @@
 	unsigned char		perm_addr[MAX_ADDR_LEN]; /* permanent hw address */
 	unsigned char		addr_assign_type; /* hw address assignment type */
 	unsigned char		addr_len;	/* hardware address length	*/
+	unsigned char		neigh_priv_len;
 	unsigned short          dev_id;		/* for shared network cards */
 
 	spinlock_t		addr_list_lock;
@@ -1144,11 +1120,11 @@
 
 	/* Protocol specific pointers */
 
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
-	struct vlan_group __rcu	*vlgrp;		/* VLAN group */
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
+	struct vlan_info __rcu	*vlan_info;	/* VLAN info */
 #endif
-#ifdef CONFIG_NET_DSA
-	void			*dsa_ptr;	/* dsa specific data */
+#if IS_ENABLED(CONFIG_NET_DSA)
+	struct dsa_switch_tree	*dsa_ptr;	/* dsa specific data */
 #endif
 	void 			*atalk_ptr;	/* AppleTalk link 	*/
 	struct in_device __rcu	*ip_ptr;	/* IPv4 specific data	*/
@@ -1184,9 +1160,11 @@
 
 	unsigned char		broadcast[MAX_ADDR_LEN];	/* hw bcast add	*/
 
-#if defined(CONFIG_RPS) || defined(CONFIG_XPS)
+#ifdef CONFIG_SYSFS
 	struct kset		*queues_kset;
+#endif
 
+#ifdef CONFIG_RPS
 	struct netdev_rx_queue	*_rx;
 
 	/* Number of RX queues allocated at register_netdev() time */
@@ -1308,10 +1286,13 @@
 	struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
 	u8 prio_tc_map[TC_BITMASK + 1];
 
-#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#if IS_ENABLED(CONFIG_FCOE)
 	/* max exchange id for FCoE LRO by ddp */
 	unsigned int		fcoe_ddp_xid;
 #endif
+#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
+	struct netprio_map __rcu *priomap;
+#endif
 	/* phy device may attach itself for hardware timestamping */
 	struct phy_device *phydev;
 
@@ -1515,7 +1496,7 @@
 					 struct packet_type *,
 					 struct net_device *);
 	struct sk_buff		*(*gso_segment)(struct sk_buff *skb,
-						u32 features);
+						netdev_features_t features);
 	int			(*gso_send_check)(struct sk_buff *skb);
 	struct sk_buff		**(*gro_receive)(struct sk_buff **head,
 					       struct sk_buff *skb);
@@ -1783,7 +1764,7 @@
 
 static inline void netif_schedule_queue(struct netdev_queue *txq)
 {
-	if (!test_bit(__QUEUE_STATE_XOFF, &txq->state))
+	if (!(txq->state & QUEUE_STATE_ANY_XOFF))
 		__netif_schedule(txq->qdisc);
 }
 
@@ -1797,7 +1778,7 @@
 
 static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
 {
-	clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
+	clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
 }
 
 /**
@@ -1829,7 +1810,7 @@
 		return;
 	}
 #endif
-	if (test_and_clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state))
+	if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state))
 		__netif_schedule(dev_queue->qdisc);
 }
 
@@ -1861,7 +1842,7 @@
 		pr_info("netif_stop_queue() cannot be called before register_netdev()\n");
 		return;
 	}
-	set_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
+	set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
 }
 
 /**
@@ -1888,7 +1869,7 @@
 
 static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
 {
-	return test_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
+	return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
 }
 
 /**
@@ -1902,9 +1883,68 @@
 	return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
 }
 
-static inline int netif_tx_queue_frozen_or_stopped(const struct netdev_queue *dev_queue)
+static inline int netif_xmit_stopped(const struct netdev_queue *dev_queue)
 {
-	return dev_queue->state & QUEUE_STATE_XOFF_OR_FROZEN;
+	return dev_queue->state & QUEUE_STATE_ANY_XOFF;
+}
+
+static inline int netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
+{
+	return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
+}
+
+static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
+					unsigned int bytes)
+{
+#ifdef CONFIG_BQL
+	dql_queued(&dev_queue->dql, bytes);
+	if (unlikely(dql_avail(&dev_queue->dql) < 0)) {
+		set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
+		if (unlikely(dql_avail(&dev_queue->dql) >= 0))
+			clear_bit(__QUEUE_STATE_STACK_XOFF,
+			    &dev_queue->state);
+	}
+#endif
+}
+
+static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
+{
+	netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
+}
+
+static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
+					     unsigned pkts, unsigned bytes)
+{
+#ifdef CONFIG_BQL
+	if (likely(bytes)) {
+		dql_completed(&dev_queue->dql, bytes);
+		if (unlikely(test_bit(__QUEUE_STATE_STACK_XOFF,
+		    &dev_queue->state) &&
+		    dql_avail(&dev_queue->dql) >= 0)) {
+			if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF,
+			     &dev_queue->state))
+				netif_schedule_queue(dev_queue);
+		}
+	}
+#endif
+}
+
+static inline void netdev_completed_queue(struct net_device *dev,
+					  unsigned pkts, unsigned bytes)
+{
+	netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes);
+}
+
+static inline void netdev_tx_reset_queue(struct netdev_queue *q)
+{
+#ifdef CONFIG_BQL
+	dql_reset(&q->dql);
+#endif
+}
+
+static inline void netdev_reset_queue(struct net_device *dev_queue)
+{
+	netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
 }
 
 /**
@@ -1991,7 +2031,7 @@
 	if (netpoll_trap())
 		return;
 #endif
-	if (test_and_clear_bit(__QUEUE_STATE_XOFF, &txq->state))
+	if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state))
 		__netif_schedule(txq->qdisc);
 }
 
@@ -2520,7 +2560,8 @@
 extern int netdev_set_bond_master(struct net_device *dev,
 				  struct net_device *master);
 extern int skb_checksum_help(struct sk_buff *skb);
-extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, u32 features);
+extern struct sk_buff *skb_gso_segment(struct sk_buff *skb,
+	netdev_features_t features);
 #ifdef CONFIG_BUG
 extern void netdev_rx_csum_fault(struct net_device *dev);
 #else
@@ -2549,11 +2590,13 @@
 
 extern void linkwatch_run_queue(void);
 
-static inline u32 netdev_get_wanted_features(struct net_device *dev)
+static inline netdev_features_t netdev_get_wanted_features(
+	struct net_device *dev)
 {
 	return (dev->features & ~dev->hw_features) | dev->wanted_features;
 }
-u32 netdev_increment_features(u32 all, u32 one, u32 mask);
+netdev_features_t netdev_increment_features(netdev_features_t all,
+	netdev_features_t one, netdev_features_t mask);
 int __netdev_update_features(struct net_device *dev);
 void netdev_update_features(struct net_device *dev);
 void netdev_change_features(struct net_device *dev);
@@ -2561,21 +2604,31 @@
 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
 					struct net_device *dev);
 
-u32 netif_skb_features(struct sk_buff *skb);
+netdev_features_t netif_skb_features(struct sk_buff *skb);
 
-static inline int net_gso_ok(u32 features, int gso_type)
+static inline int net_gso_ok(netdev_features_t features, int gso_type)
 {
-	int feature = gso_type << NETIF_F_GSO_SHIFT;
+	netdev_features_t feature = gso_type << NETIF_F_GSO_SHIFT;
+
+	/* check flags correspondence */
+	BUILD_BUG_ON(SKB_GSO_TCPV4   != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
+	BUILD_BUG_ON(SKB_GSO_UDP     != (NETIF_F_UFO >> NETIF_F_GSO_SHIFT));
+	BUILD_BUG_ON(SKB_GSO_DODGY   != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
+	BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
+	BUILD_BUG_ON(SKB_GSO_TCPV6   != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
+	BUILD_BUG_ON(SKB_GSO_FCOE    != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
+
 	return (features & feature) == feature;
 }
 
-static inline int skb_gso_ok(struct sk_buff *skb, u32 features)
+static inline int skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
 {
 	return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
 	       (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
 }
 
-static inline int netif_needs_gso(struct sk_buff *skb, int features)
+static inline int netif_needs_gso(struct sk_buff *skb,
+	netdev_features_t features)
 {
 	return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
 		unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
@@ -2594,22 +2647,6 @@
 
 extern struct pernet_operations __net_initdata loopback_net_ops;
 
-static inline u32 dev_ethtool_get_rx_csum(struct net_device *dev)
-{
-	if (dev->features & NETIF_F_RXCSUM)
-		return 1;
-	if (!dev->ethtool_ops || !dev->ethtool_ops->get_rx_csum)
-		return 0;
-	return dev->ethtool_ops->get_rx_csum(dev);
-}
-
-static inline u32 dev_ethtool_get_flags(struct net_device *dev)
-{
-	if (!dev->ethtool_ops || !dev->ethtool_ops->get_flags)
-		return 0;
-	return dev->ethtool_ops->get_flags(dev);
-}
-
 /* Logging, debugging and troubleshooting/diagnostic helpers. */
 
 /* netdev_printk helpers, similar to dev_printk */
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 857f502..b809265 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -162,6 +162,24 @@
 
 extern struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
 
+#if defined(CONFIG_JUMP_LABEL)
+#include <linux/jump_label.h>
+extern struct jump_label_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
+static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook)
+{
+	if (__builtin_constant_p(pf) &&
+	    __builtin_constant_p(hook))
+		return static_branch(&nf_hooks_needed[pf][hook]);
+
+	return !list_empty(&nf_hooks[pf][hook]);
+}
+#else
+static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook)
+{
+	return !list_empty(&nf_hooks[pf][hook]);
+}
+#endif
+
 int nf_hook_slow(u_int8_t pf, unsigned int hook, struct sk_buff *skb,
 		 struct net_device *indev, struct net_device *outdev,
 		 int (*okfn)(struct sk_buff *), int thresh);
@@ -179,11 +197,9 @@
 				 struct net_device *outdev,
 				 int (*okfn)(struct sk_buff *), int thresh)
 {
-#ifndef CONFIG_NETFILTER_DEBUG
-	if (list_empty(&nf_hooks[pf][hook]))
-		return 1;
-#endif
-	return nf_hook_slow(pf, hook, skb, indev, outdev, okfn, thresh);
+	if (nf_hooks_active(pf, hook))
+		return nf_hook_slow(pf, hook, skb, indev, outdev, okfn, thresh);
+	return 1;
 }
 
 static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sk_buff *skb,
diff --git a/include/linux/netfilter/Kbuild b/include/linux/netfilter/Kbuild
index a1b410c..e144f54 100644
--- a/include/linux/netfilter/Kbuild
+++ b/include/linux/netfilter/Kbuild
@@ -5,7 +5,9 @@
 header-y += nf_conntrack_sctp.h
 header-y += nf_conntrack_tcp.h
 header-y += nf_conntrack_tuple_common.h
+header-y += nf_nat.h
 header-y += nfnetlink.h
+header-y += nfnetlink_acct.h
 header-y += nfnetlink_compat.h
 header-y += nfnetlink_conntrack.h
 header-y += nfnetlink_log.h
@@ -21,6 +23,7 @@
 header-y += xt_IDLETIMER.h
 header-y += xt_LED.h
 header-y += xt_MARK.h
+header-y += xt_nfacct.h
 header-y += xt_NFLOG.h
 header-y += xt_NFQUEUE.h
 header-y += xt_RATEEST.h
@@ -40,6 +43,7 @@
 header-y += xt_dccp.h
 header-y += xt_devgroup.h
 header-y += xt_dscp.h
+header-y += xt_ecn.h
 header-y += xt_esp.h
 header-y += xt_hashlimit.h
 header-y += xt_helper.h
diff --git a/include/linux/netfilter/nf_conntrack_common.h b/include/linux/netfilter/nf_conntrack_common.h
index 0d3dd66..9e3a283 100644
--- a/include/linux/netfilter/nf_conntrack_common.h
+++ b/include/linux/netfilter/nf_conntrack_common.h
@@ -83,6 +83,10 @@
 	/* Conntrack is a fake untracked entry */
 	IPS_UNTRACKED_BIT = 12,
 	IPS_UNTRACKED = (1 << IPS_UNTRACKED_BIT),
+
+	/* Conntrack has a userspace helper. */
+	IPS_USERSPACE_HELPER_BIT = 13,
+	IPS_USERSPACE_HELPER = (1 << IPS_USERSPACE_HELPER_BIT),
 };
 
 /* Connection tracking event types */
diff --git a/include/linux/netfilter/nf_conntrack_tuple_common.h b/include/linux/netfilter/nf_conntrack_tuple_common.h
index 2ea22b0..2f6bbc5 100644
--- a/include/linux/netfilter/nf_conntrack_tuple_common.h
+++ b/include/linux/netfilter/nf_conntrack_tuple_common.h
@@ -7,6 +7,33 @@
 	IP_CT_DIR_MAX
 };
 
+/* The protocol-specific manipulable parts of the tuple: always in
+ * network order
+ */
+union nf_conntrack_man_proto {
+	/* Add other protocols here. */
+	__be16 all;
+
+	struct {
+		__be16 port;
+	} tcp;
+	struct {
+		__be16 port;
+	} udp;
+	struct {
+		__be16 id;
+	} icmp;
+	struct {
+		__be16 port;
+	} dccp;
+	struct {
+		__be16 port;
+	} sctp;
+	struct {
+		__be16 key;	/* GRE key is 32bit, PPtP only uses 16bit */
+	} gre;
+};
+
 #define CTINFO2DIR(ctinfo) ((ctinfo) >= IP_CT_IS_REPLY ? IP_CT_DIR_REPLY : IP_CT_DIR_ORIGINAL)
 
 #endif /* _NF_CONNTRACK_TUPLE_COMMON_H */
diff --git a/include/linux/netfilter/nf_nat.h b/include/linux/netfilter/nf_nat.h
new file mode 100644
index 0000000..8df2d13
--- /dev/null
+++ b/include/linux/netfilter/nf_nat.h
@@ -0,0 +1,25 @@
+#ifndef _NETFILTER_NF_NAT_H
+#define _NETFILTER_NF_NAT_H
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_conntrack_tuple_common.h>
+
+#define NF_NAT_RANGE_MAP_IPS		1
+#define NF_NAT_RANGE_PROTO_SPECIFIED	2
+#define NF_NAT_RANGE_PROTO_RANDOM	4
+#define NF_NAT_RANGE_PERSISTENT		8
+
+struct nf_nat_ipv4_range {
+	unsigned int			flags;
+	__be32				min_ip;
+	__be32				max_ip;
+	union nf_conntrack_man_proto	min;
+	union nf_conntrack_man_proto	max;
+};
+
+struct nf_nat_ipv4_multi_range_compat {
+	unsigned int			rangesize;
+	struct nf_nat_ipv4_range	range[1];
+};
+
+#endif /* _NETFILTER_NF_NAT_H */
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index 74d3386..b64454c 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -48,7 +48,8 @@
 #define NFNL_SUBSYS_ULOG		4
 #define NFNL_SUBSYS_OSF			5
 #define NFNL_SUBSYS_IPSET		6
-#define NFNL_SUBSYS_COUNT		7
+#define NFNL_SUBSYS_ACCT		7
+#define NFNL_SUBSYS_COUNT		8
 
 #ifdef __KERNEL__
 
diff --git a/include/linux/netfilter/nfnetlink_acct.h b/include/linux/netfilter/nfnetlink_acct.h
new file mode 100644
index 0000000..7c4279b
--- /dev/null
+++ b/include/linux/netfilter/nfnetlink_acct.h
@@ -0,0 +1,36 @@
+#ifndef _NFNL_ACCT_H_
+#define _NFNL_ACCT_H_
+
+#ifndef NFACCT_NAME_MAX
+#define NFACCT_NAME_MAX		32
+#endif
+
+enum nfnl_acct_msg_types {
+	NFNL_MSG_ACCT_NEW,
+	NFNL_MSG_ACCT_GET,
+	NFNL_MSG_ACCT_GET_CTRZERO,
+	NFNL_MSG_ACCT_DEL,
+	NFNL_MSG_ACCT_MAX
+};
+
+enum nfnl_acct_type {
+	NFACCT_UNSPEC,
+	NFACCT_NAME,
+	NFACCT_PKTS,
+	NFACCT_BYTES,
+	NFACCT_USE,
+	__NFACCT_MAX
+};
+#define NFACCT_MAX (__NFACCT_MAX - 1)
+
+#ifdef __KERNEL__
+
+struct nf_acct;
+
+extern struct nf_acct *nfnl_acct_find_get(const char *filter_name);
+extern void nfnl_acct_put(struct nf_acct *acct);
+extern void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct);
+
+#endif /* __KERNEL__ */
+
+#endif /* _NFNL_ACCT_H */
diff --git a/include/linux/netfilter/xt_CT.h b/include/linux/netfilter/xt_CT.h
index b56e768..6390f099 100644
--- a/include/linux/netfilter/xt_CT.h
+++ b/include/linux/netfilter/xt_CT.h
@@ -3,7 +3,8 @@
 
 #include <linux/types.h>
 
-#define XT_CT_NOTRACK	0x1
+#define XT_CT_NOTRACK		0x1
+#define XT_CT_USERSPACE_HELPER	0x2
 
 struct xt_ct_target_info {
 	__u16 flags;
diff --git a/include/linux/netfilter/xt_ecn.h b/include/linux/netfilter/xt_ecn.h
new file mode 100644
index 0000000..7158fca
--- /dev/null
+++ b/include/linux/netfilter/xt_ecn.h
@@ -0,0 +1,35 @@
+/* iptables module for matching the ECN header in IPv4 and TCP header
+ *
+ * (C) 2002 Harald Welte <laforge@gnumonks.org>
+ *
+ * This software is distributed under GNU GPL v2, 1991
+ * 
+ * ipt_ecn.h,v 1.4 2002/08/05 19:39:00 laforge Exp
+*/
+#ifndef _XT_ECN_H
+#define _XT_ECN_H
+
+#include <linux/types.h>
+#include <linux/netfilter/xt_dscp.h>
+
+#define XT_ECN_IP_MASK	(~XT_DSCP_MASK)
+
+#define XT_ECN_OP_MATCH_IP	0x01
+#define XT_ECN_OP_MATCH_ECE	0x10
+#define XT_ECN_OP_MATCH_CWR	0x20
+
+#define XT_ECN_OP_MATCH_MASK	0xce
+
+/* match info */
+struct xt_ecn_info {
+	__u8 operation;
+	__u8 invert;
+	__u8 ip_ect;
+	union {
+		struct {
+			__u8 ect;
+		} tcp;
+	} proto;
+};
+
+#endif /* _XT_ECN_H */
diff --git a/include/linux/netfilter/xt_nfacct.h b/include/linux/netfilter/xt_nfacct.h
new file mode 100644
index 0000000..3e19c8a
--- /dev/null
+++ b/include/linux/netfilter/xt_nfacct.h
@@ -0,0 +1,13 @@
+#ifndef _XT_NFACCT_MATCH_H
+#define _XT_NFACCT_MATCH_H
+
+#include <linux/netfilter/nfnetlink_acct.h>
+
+struct nf_acct;
+
+struct xt_nfacct_match_info {
+	char		name[NFACCT_NAME_MAX];
+	struct nf_acct	*nfacct;
+};
+
+#endif /* _XT_NFACCT_MATCH_H */
diff --git a/include/linux/netfilter/xt_rpfilter.h b/include/linux/netfilter/xt_rpfilter.h
new file mode 100644
index 0000000..8358d4f
--- /dev/null
+++ b/include/linux/netfilter/xt_rpfilter.h
@@ -0,0 +1,23 @@
+#ifndef _XT_RPATH_H
+#define _XT_RPATH_H
+
+#include <linux/types.h>
+
+enum {
+	XT_RPFILTER_LOOSE = 1 << 0,
+	XT_RPFILTER_VALID_MARK = 1 << 1,
+	XT_RPFILTER_ACCEPT_LOCAL = 1 << 2,
+	XT_RPFILTER_INVERT = 1 << 3,
+#ifdef __KERNEL__
+	XT_RPFILTER_OPTION_MASK = XT_RPFILTER_LOOSE |
+				  XT_RPFILTER_VALID_MARK |
+				  XT_RPFILTER_ACCEPT_LOCAL |
+				  XT_RPFILTER_INVERT,
+#endif
+};
+
+struct xt_rpfilter_info {
+	__u8 flags;
+};
+
+#endif
diff --git a/include/linux/netfilter_ipv4/Kbuild b/include/linux/netfilter_ipv4/Kbuild
index c3b4548..f9930c8 100644
--- a/include/linux/netfilter_ipv4/Kbuild
+++ b/include/linux/netfilter_ipv4/Kbuild
@@ -12,4 +12,3 @@
 header-y += ipt_ecn.h
 header-y += ipt_realm.h
 header-y += ipt_ttl.h
-header-y += nf_nat.h
diff --git a/include/linux/netfilter_ipv4/ipt_ecn.h b/include/linux/netfilter_ipv4/ipt_ecn.h
index eabf95f..0e0c063 100644
--- a/include/linux/netfilter_ipv4/ipt_ecn.h
+++ b/include/linux/netfilter_ipv4/ipt_ecn.h
@@ -1,35 +1,15 @@
-/* iptables module for matching the ECN header in IPv4 and TCP header
- *
- * (C) 2002 Harald Welte <laforge@gnumonks.org>
- *
- * This software is distributed under GNU GPL v2, 1991
- * 
- * ipt_ecn.h,v 1.4 2002/08/05 19:39:00 laforge Exp
-*/
 #ifndef _IPT_ECN_H
 #define _IPT_ECN_H
 
-#include <linux/types.h>
-#include <linux/netfilter/xt_dscp.h>
+#include <linux/netfilter/xt_ecn.h>
+#define ipt_ecn_info xt_ecn_info
 
-#define IPT_ECN_IP_MASK	(~XT_DSCP_MASK)
-
-#define IPT_ECN_OP_MATCH_IP	0x01
-#define IPT_ECN_OP_MATCH_ECE	0x10
-#define IPT_ECN_OP_MATCH_CWR	0x20
-
-#define IPT_ECN_OP_MATCH_MASK	0xce
-
-/* match info */
-struct ipt_ecn_info {
-	__u8 operation;
-	__u8 invert;
-	__u8 ip_ect;
-	union {
-		struct {
-			__u8 ect;
-		} tcp;
-	} proto;
+enum {
+	IPT_ECN_IP_MASK       = XT_ECN_IP_MASK,
+	IPT_ECN_OP_MATCH_IP   = XT_ECN_OP_MATCH_IP,
+	IPT_ECN_OP_MATCH_ECE  = XT_ECN_OP_MATCH_ECE,
+	IPT_ECN_OP_MATCH_CWR  = XT_ECN_OP_MATCH_CWR,
+	IPT_ECN_OP_MATCH_MASK = XT_ECN_OP_MATCH_MASK,
 };
 
-#endif /* _IPT_ECN_H */
+#endif /* IPT_ECN_H */
diff --git a/include/linux/netfilter_ipv4/nf_nat.h b/include/linux/netfilter_ipv4/nf_nat.h
deleted file mode 100644
index 7a861d0..0000000
--- a/include/linux/netfilter_ipv4/nf_nat.h
+++ /dev/null
@@ -1,58 +0,0 @@
-#ifndef _LINUX_NF_NAT_H
-#define _LINUX_NF_NAT_H
-
-#include <linux/types.h>
-
-#define IP_NAT_RANGE_MAP_IPS 1
-#define IP_NAT_RANGE_PROTO_SPECIFIED 2
-#define IP_NAT_RANGE_PROTO_RANDOM 4
-#define IP_NAT_RANGE_PERSISTENT 8
-
-/* The protocol-specific manipulable parts of the tuple. */
-union nf_conntrack_man_proto {
-	/* Add other protocols here. */
-	__be16 all;
-
-	struct {
-		__be16 port;
-	} tcp;
-	struct {
-		__be16 port;
-	} udp;
-	struct {
-		__be16 id;
-	} icmp;
-	struct {
-		__be16 port;
-	} dccp;
-	struct {
-		__be16 port;
-	} sctp;
-	struct {
-		__be16 key;	/* GRE key is 32bit, PPtP only uses 16bit */
-	} gre;
-};
-
-/* Single range specification. */
-struct nf_nat_range {
-	/* Set to OR of flags above. */
-	unsigned int flags;
-
-	/* Inclusive: network order. */
-	__be32 min_ip, max_ip;
-
-	/* Inclusive: network order */
-	union nf_conntrack_man_proto min, max;
-};
-
-/* For backwards compat: don't use in modern code. */
-struct nf_nat_multi_range_compat {
-	unsigned int rangesize; /* Must be 1. */
-
-	/* hangs off end. */
-	struct nf_nat_range range[1];
-};
-
-#define nf_nat_multi_range nf_nat_multi_range_compat
-
-#endif
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 8374d29..52e4895 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -8,7 +8,7 @@
 #define NETLINK_UNUSED		1	/* Unused number				*/
 #define NETLINK_USERSOCK	2	/* Reserved for user mode socket protocols 	*/
 #define NETLINK_FIREWALL	3	/* Firewalling hook				*/
-#define NETLINK_INET_DIAG	4	/* INET socket monitoring			*/
+#define NETLINK_SOCK_DIAG	4	/* socket monitoring				*/
 #define NETLINK_NFLOG		5	/* netfilter/iptables ULOG */
 #define NETLINK_XFRM		6	/* ipsec */
 #define NETLINK_SELINUX		7	/* SELinux event notifications */
@@ -27,6 +27,8 @@
 #define NETLINK_RDMA		20
 #define NETLINK_CRYPTO		21	/* Crypto layer */
 
+#define NETLINK_INET_DIAG	NETLINK_SOCK_DIAG
+
 #define MAX_LINKS 32		
 
 struct sockaddr_nl {
diff --git a/include/linux/nfc.h b/include/linux/nfc.h
index 36cb955..01d4e5d 100644
--- a/include/linux/nfc.h
+++ b/include/linux/nfc.h
@@ -62,6 +62,8 @@
 	NFC_CMD_GET_DEVICE,
 	NFC_CMD_DEV_UP,
 	NFC_CMD_DEV_DOWN,
+	NFC_CMD_DEP_LINK_UP,
+	NFC_CMD_DEP_LINK_DOWN,
 	NFC_CMD_START_POLL,
 	NFC_CMD_STOP_POLL,
 	NFC_CMD_GET_TARGET,
@@ -86,6 +88,9 @@
  * @NFC_ATTR_TARGET_SENS_RES: NFC-A targets extra information such as NFCID
  * @NFC_ATTR_TARGET_SEL_RES: NFC-A targets extra information (useful if the
  *	target is not NFC-Forum compliant)
+ * @NFC_ATTR_TARGET_NFCID1: NFC-A targets identifier, max 10 bytes
+ * @NFC_ATTR_COMM_MODE: Passive or active mode
+ * @NFC_ATTR_RF_MODE: Initiator or target
  */
 enum nfc_attrs {
 	NFC_ATTR_UNSPEC,
@@ -95,6 +100,9 @@
 	NFC_ATTR_TARGET_INDEX,
 	NFC_ATTR_TARGET_SENS_RES,
 	NFC_ATTR_TARGET_SEL_RES,
+	NFC_ATTR_TARGET_NFCID1,
+	NFC_ATTR_COMM_MODE,
+	NFC_ATTR_RF_MODE,
 /* private: internal use only */
 	__NFC_ATTR_AFTER_LAST
 };
@@ -111,6 +119,14 @@
 
 #define NFC_PROTO_MAX		6
 
+/* NFC communication modes */
+#define NFC_COMM_ACTIVE  0
+#define NFC_COMM_PASSIVE 1
+
+/* NFC RF modes */
+#define NFC_RF_INITIATOR 0
+#define NFC_RF_TARGET    1
+
 /* NFC protocols masks used in bitsets */
 #define NFC_PROTO_JEWEL_MASK	(1 << NFC_PROTO_JEWEL)
 #define NFC_PROTO_MIFARE_MASK	(1 << NFC_PROTO_MIFARE)
@@ -125,9 +141,22 @@
 	__u32 nfc_protocol;
 };
 
+#define NFC_LLCP_MAX_SERVICE_NAME 63
+struct sockaddr_nfc_llcp {
+	sa_family_t sa_family;
+	__u32 dev_idx;
+	__u32 target_idx;
+	__u32 nfc_protocol;
+	__u8 dsap; /* Destination SAP, if known */
+	__u8 ssap; /* Source SAP to be bound to */
+	char service_name[NFC_LLCP_MAX_SERVICE_NAME]; /* Service name URI */;
+	size_t service_name_len;
+};
+
 /* NFC socket protocols */
 #define NFC_SOCKPROTO_RAW	0
-#define NFC_SOCKPROTO_MAX	1
+#define NFC_SOCKPROTO_LLCP	1
+#define NFC_SOCKPROTO_MAX	2
 
 #define NFC_HEADER_SIZE 1
 
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 92ecf55..8c29950 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -373,7 +373,7 @@
 extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx);
 extern void put_nfs_open_context(struct nfs_open_context *ctx);
 extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_cred *cred, fmode_t mode);
-extern struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, struct rpc_cred *cred, fmode_t f_mode);
+extern struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, fmode_t f_mode);
 extern void nfs_file_set_open_context(struct file *filp, struct nfs_open_context *ctx);
 extern struct nfs_lock_context *nfs_get_lock_context(struct nfs_open_context *ctx);
 extern void nfs_put_lock_context(struct nfs_lock_context *l_ctx);
diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h
index 8049bf7..0f5ff37 100644
--- a/include/linux/nl80211.h
+++ b/include/linux/nl80211.h
@@ -509,6 +509,38 @@
  * @NL80211_CMD_TDLS_OPER: Perform a high-level TDLS command (e.g. link setup).
  * @NL80211_CMD_TDLS_MGMT: Send a TDLS management frame.
  *
+ * @NL80211_CMD_UNEXPECTED_FRAME: Used by an application controlling an AP
+ *	(or GO) interface (i.e. hostapd) to ask for unexpected frames to
+ *	implement sending deauth to stations that send unexpected class 3
+ *	frames. Also used as the event sent by the kernel when such a frame
+ *	is received.
+ *	For the event, the %NL80211_ATTR_MAC attribute carries the TA and
+ *	other attributes like the interface index are present.
+ *	If used as the command it must have an interface index and you can
+ *	only unsubscribe from the event by closing the socket. Subscription
+ *	is also for %NL80211_CMD_UNEXPECTED_4ADDR_FRAME events.
+ *
+ * @NL80211_CMD_UNEXPECTED_4ADDR_FRAME: Sent as an event indicating that the
+ *	associated station identified by %NL80211_ATTR_MAC sent a 4addr frame
+ *	and wasn't already in a 4-addr VLAN. The event will be sent similarly
+ *	to the %NL80211_CMD_UNEXPECTED_FRAME event, to the same listener.
+ *
+ * @NL80211_CMD_PROBE_CLIENT: Probe an associated station on an AP interface
+ *	by sending a null data frame to it and reporting when the frame is
+ *	acknowleged. This is used to allow timing out inactive clients. Uses
+ *	%NL80211_ATTR_IFINDEX and %NL80211_ATTR_MAC. The command returns a
+ *	direct reply with an %NL80211_ATTR_COOKIE that is later used to match
+ *	up the event with the request. The event includes the same data and
+ *	has %NL80211_ATTR_ACK set if the frame was ACKed.
+ *
+ * @NL80211_CMD_REGISTER_BEACONS: Register this socket to receive beacons from
+ *	other BSSes when any interfaces are in AP mode. This helps implement
+ *	OLBC handling in hostapd. Beacons are reported in %NL80211_CMD_FRAME
+ *	messages. Note that per PHY only one application may register.
+ *
+ * @NL80211_CMD_SET_NOACK_MAP: sets a bitmap for the individual TIDs whether
+ *      No Acknowledgement Policy should be applied.
+ *
  * @NL80211_CMD_MAX: highest used command number
  * @__NL80211_CMD_AFTER_LAST: internal use
  */
@@ -638,6 +670,16 @@
 	NL80211_CMD_TDLS_OPER,
 	NL80211_CMD_TDLS_MGMT,
 
+	NL80211_CMD_UNEXPECTED_FRAME,
+
+	NL80211_CMD_PROBE_CLIENT,
+
+	NL80211_CMD_REGISTER_BEACONS,
+
+	NL80211_CMD_UNEXPECTED_4ADDR_FRAME,
+
+	NL80211_CMD_SET_NOACK_MAP,
+
 	/* add new commands above here */
 
 	/* used to define NL80211_CMD_MAX below */
@@ -658,6 +700,8 @@
 #define NL80211_CMD_DISASSOCIATE NL80211_CMD_DISASSOCIATE
 #define NL80211_CMD_REG_BEACON_HINT NL80211_CMD_REG_BEACON_HINT
 
+#define NL80211_ATTR_FEATURE_FLAGS NL80211_ATTR_FEATURE_FLAGS
+
 /* source-level API compatibility */
 #define NL80211_CMD_GET_MESH_PARAMS NL80211_CMD_GET_MESH_CONFIG
 #define NL80211_CMD_SET_MESH_PARAMS NL80211_CMD_SET_MESH_CONFIG
@@ -1109,6 +1153,46 @@
  *	%NL80211_CMD_TDLS_MGMT. Otherwise %NL80211_CMD_TDLS_OPER should be
  *	used for asking the driver to perform a TDLS operation.
  *
+ * @NL80211_ATTR_DEVICE_AP_SME: This u32 attribute may be listed for devices
+ *	that have AP support to indicate that they have the AP SME integrated
+ *	with support for the features listed in this attribute, see
+ *	&enum nl80211_ap_sme_features.
+ *
+ * @NL80211_ATTR_DONT_WAIT_FOR_ACK: Used with %NL80211_CMD_FRAME, this tells
+ *	the driver to not wait for an acknowledgement. Note that due to this,
+ *	it will also not give a status callback nor return a cookie. This is
+ *	mostly useful for probe responses to save airtime.
+ *
+ * @NL80211_ATTR_FEATURE_FLAGS: This u32 attribute contains flags from
+ *	&enum nl80211_feature_flags and is advertised in wiphy information.
+ * @NL80211_ATTR_PROBE_RESP_OFFLOAD: Indicates that the HW responds to probe
+ *
+ *	requests while operating in AP-mode.
+ *	This attribute holds a bitmap of the supported protocols for
+ *	offloading (see &enum nl80211_probe_resp_offload_support_attr).
+ *
+ * @NL80211_ATTR_PROBE_RESP: Probe Response template data. Contains the entire
+ *	probe-response frame. The DA field in the 802.11 header is zero-ed out,
+ *	to be filled by the FW.
+ * @NL80211_ATTR_DISABLE_HT:  Force HT capable interfaces to disable
+ *      this feature.  Currently, only supported in mac80211 drivers.
+ * @NL80211_ATTR_HT_CAPABILITY_MASK: Specify which bits of the
+ *      ATTR_HT_CAPABILITY to which attention should be paid.
+ *      Currently, only mac80211 NICs support this feature.
+ *      The values that may be configured are:
+ *       MCS rates, MAX-AMSDU, HT-20-40 and HT_CAP_SGI_40
+ *       AMPDU density and AMPDU factor.
+ *      All values are treated as suggestions and may be ignored
+ *      by the driver as required.  The actual values may be seen in
+ *      the station debugfs ht_caps file.
+ *
+ * @NL80211_ATTR_DFS_REGION: region for regulatory rules which this country
+ *    abides to when initiating radiation on DFS channels. A country maps
+ *    to one DFS region.
+ *
+ * @NL80211_ATTR_NOACK_MAP: This u16 bitmap contains the No Ack Policy of
+ *      up to 16 TIDs.
+ *
  * @NL80211_ATTR_MAX: highest attribute number currently defined
  * @__NL80211_ATTR_AFTER_LAST: internal use
  */
@@ -1337,6 +1421,23 @@
 	NL80211_ATTR_TDLS_SUPPORT,
 	NL80211_ATTR_TDLS_EXTERNAL_SETUP,
 
+	NL80211_ATTR_DEVICE_AP_SME,
+
+	NL80211_ATTR_DONT_WAIT_FOR_ACK,
+
+	NL80211_ATTR_FEATURE_FLAGS,
+
+	NL80211_ATTR_PROBE_RESP_OFFLOAD,
+
+	NL80211_ATTR_PROBE_RESP,
+
+	NL80211_ATTR_DFS_REGION,
+
+	NL80211_ATTR_DISABLE_HT,
+	NL80211_ATTR_HT_CAPABILITY_MASK,
+
+	NL80211_ATTR_NOACK_MAP,
+
 	/* add attributes here, update the policy in nl80211.c */
 
 	__NL80211_ATTR_AFTER_LAST,
@@ -1371,6 +1472,7 @@
 #define NL80211_ATTR_AKM_SUITES NL80211_ATTR_AKM_SUITES
 #define NL80211_ATTR_KEY NL80211_ATTR_KEY
 #define NL80211_ATTR_KEYS NL80211_ATTR_KEYS
+#define NL80211_ATTR_FEATURE_FLAGS NL80211_ATTR_FEATURE_FLAGS
 
 #define NL80211_MAX_SUPP_RATES			32
 #define NL80211_MAX_SUPP_REG_RULES		32
@@ -1434,7 +1536,11 @@
  * @NL80211_STA_FLAG_WME: station is WME/QoS capable
  * @NL80211_STA_FLAG_MFP: station uses management frame protection
  * @NL80211_STA_FLAG_AUTHENTICATED: station is authenticated
- * @NL80211_STA_FLAG_TDLS_PEER: station is a TDLS peer
+ * @NL80211_STA_FLAG_TDLS_PEER: station is a TDLS peer -- this flag should
+ *	only be used in managed mode (even in the flags mask). Note that the
+ *	flag can't be changed, it is only valid while adding a station, and
+ *	attempts to change it will silently be ignored (rather than rejected
+ *	as errors.)
  * @NL80211_STA_FLAG_MAX: highest station flag number currently defined
  * @__NL80211_STA_FLAG_AFTER_LAST: internal use
  */
@@ -1549,6 +1655,7 @@
  *     containing info as possible, see &enum nl80211_sta_bss_param
  * @NL80211_STA_INFO_CONNECTED_TIME: time since the station is last connected
  * @NL80211_STA_INFO_STA_FLAGS: Contains a struct nl80211_sta_flag_update.
+ * @NL80211_STA_INFO_BEACON_LOSS: count of times beacon loss was detected (u32)
  * @__NL80211_STA_INFO_AFTER_LAST: internal
  * @NL80211_STA_INFO_MAX: highest possible station info attribute
  */
@@ -1571,6 +1678,7 @@
 	NL80211_STA_INFO_BSS_PARAM,
 	NL80211_STA_INFO_CONNECTED_TIME,
 	NL80211_STA_INFO_STA_FLAGS,
+	NL80211_STA_INFO_BEACON_LOSS,
 
 	/* keep last */
 	__NL80211_STA_INFO_AFTER_LAST,
@@ -1845,6 +1953,21 @@
 };
 
 /**
+ * enum nl80211_dfs_regions - regulatory DFS regions
+ *
+ * @NL80211_DFS_UNSET: Country has no DFS master region specified
+ * @NL80211_DFS_FCC_: Country follows DFS master rules from FCC
+ * @NL80211_DFS_FCC_: Country follows DFS master rules from ETSI
+ * @NL80211_DFS_JP_: Country follows DFS master rules from JP/MKK/Telec
+ */
+enum nl80211_dfs_regions {
+	NL80211_DFS_UNSET	= 0,
+	NL80211_DFS_FCC		= 1,
+	NL80211_DFS_ETSI	= 2,
+	NL80211_DFS_JP		= 3,
+};
+
+/**
  * enum nl80211_survey_info - survey information
  *
  * These attribute types are used with %NL80211_ATTR_SURVEY_INFO
@@ -1977,6 +2100,10 @@
  * access to a broader network beyond the MBSS.  This is done via Root
  * Announcement frames.
  *
+ * @NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL: The minimum interval of time (in
+ * TUs) during which a mesh STA can send only one Action frame containing a
+ * PERR element.
+ *
  * @NL80211_MESHCONF_ATTR_MAX: highest possible mesh configuration attribute
  *
  * @__NL80211_MESHCONF_ATTR_AFTER_LAST: internal use
@@ -2000,6 +2127,7 @@
 	NL80211_MESHCONF_ELEMENT_TTL,
 	NL80211_MESHCONF_HWMP_RANN_INTERVAL,
 	NL80211_MESHCONF_GATE_ANNOUNCEMENTS,
+	NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL,
 
 	/* keep last */
 	__NL80211_MESHCONF_ATTR_AFTER_LAST,
@@ -2650,4 +2778,45 @@
 	NL80211_TDLS_DISABLE_LINK,
 };
 
+/*
+ * enum nl80211_ap_sme_features - device-integrated AP features
+ * Reserved for future use, no bits are defined in
+ * NL80211_ATTR_DEVICE_AP_SME yet.
+enum nl80211_ap_sme_features {
+};
+ */
+
+/**
+ * enum nl80211_feature_flags - device/driver features
+ * @NL80211_FEATURE_SK_TX_STATUS: This driver supports reflecting back
+ *	TX status to the socket error queue when requested with the
+ *	socket option.
+ * @NL80211_FEATURE_HT_IBSS: This driver supports IBSS with HT datarates.
+ */
+enum nl80211_feature_flags {
+	NL80211_FEATURE_SK_TX_STATUS	= 1 << 0,
+	NL80211_FEATURE_HT_IBSS		= 1 << 1,
+};
+
+/**
+ * enum nl80211_probe_resp_offload_support_attr - optional supported
+ *	protocols for probe-response offloading by the driver/FW.
+ *	To be used with the %NL80211_ATTR_PROBE_RESP_OFFLOAD attribute.
+ *	Each enum value represents a bit in the bitmap of supported
+ *	protocols. Typically a subset of probe-requests belonging to a
+ *	supported protocol will be excluded from offload and uploaded
+ *	to the host.
+ *
+ * @NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS: Support for WPS ver. 1
+ * @NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2: Support for WPS ver. 2
+ * @NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P: Support for P2P
+ * @NL80211_PROBE_RESP_OFFLOAD_SUPPORT_80211U: Support for 802.11u
+ */
+enum nl80211_probe_resp_offload_support_attr {
+	NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS =	1<<0,
+	NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 =	1<<1,
+	NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P =	1<<2,
+	NL80211_PROBE_RESP_OFFLOAD_SUPPORT_80211U =	1<<3,
+};
+
 #endif /* __LINUX_NL80211_H */
diff --git a/include/linux/node.h b/include/linux/node.h
index 92370e2..624e53c 100644
--- a/include/linux/node.h
+++ b/include/linux/node.h
@@ -14,12 +14,12 @@
 #ifndef _LINUX_NODE_H_
 #define _LINUX_NODE_H_
 
-#include <linux/sysdev.h>
+#include <linux/device.h>
 #include <linux/cpumask.h>
 #include <linux/workqueue.h>
 
 struct node {
-	struct sys_device	sysdev;
+	struct device	dev;
 
 #if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HUGETLBFS)
 	struct work_struct	node_work;
@@ -80,6 +80,6 @@
 }
 #endif
 
-#define to_node(sys_device) container_of(sys_device, struct node, sysdev)
+#define to_node(device) container_of(device, struct node, dev)
 
 #endif /* _LINUX_NODE_H_ */
diff --git a/include/linux/of.h b/include/linux/of.h
index 4948552..a75a831 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -65,6 +65,27 @@
 #endif
 };
 
+#define MAX_PHANDLE_ARGS 8
+struct of_phandle_args {
+	struct device_node *np;
+	int args_count;
+	uint32_t args[MAX_PHANDLE_ARGS];
+};
+
+#if defined(CONFIG_SPARC) || !defined(CONFIG_OF)
+/* Dummy ref counting routines - to be implemented later */
+static inline struct device_node *of_node_get(struct device_node *node)
+{
+	return node;
+}
+static inline void of_node_put(struct device_node *node)
+{
+}
+#else
+extern struct device_node *of_node_get(struct device_node *node);
+extern void of_node_put(struct device_node *node);
+#endif
+
 #ifdef CONFIG_OF
 
 /* Pointer for first entry in chain of all nodes. */
@@ -95,21 +116,6 @@
 
 extern struct device_node *of_find_all_nodes(struct device_node *prev);
 
-#if defined(CONFIG_SPARC)
-/* Dummy ref counting routines - to be implemented later */
-static inline struct device_node *of_node_get(struct device_node *node)
-{
-	return node;
-}
-static inline void of_node_put(struct device_node *node)
-{
-}
-
-#else
-extern struct device_node *of_node_get(struct device_node *node);
-extern void of_node_put(struct device_node *node);
-#endif
-
 /*
  * OF address retrieval & translation
  */
@@ -219,8 +225,8 @@
 extern const void *of_get_property(const struct device_node *node,
 				const char *name,
 				int *lenp);
-#define for_each_property(pp, properties) \
-	for (pp = properties; pp != NULL; pp = pp->next)
+#define for_each_property_of_node(dn, pp) \
+	for (pp = dn->properties; pp != NULL; pp = pp->next)
 
 extern int of_n_addr_cells(struct device_node *np);
 extern int of_n_size_cells(struct device_node *np);
@@ -230,9 +236,9 @@
 extern struct device_node *of_parse_phandle(struct device_node *np,
 					    const char *phandle_name,
 					    int index);
-extern int of_parse_phandles_with_args(struct device_node *np,
+extern int of_parse_phandle_with_args(struct device_node *np,
 	const char *list_name, const char *cells_name, int index,
-	struct device_node **out_node, const void **out_args);
+	struct of_phandle_args *out_args);
 
 extern void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align));
 extern int of_alias_get_id(struct device_node *np, const char *stem);
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index c84d900..ed136ad 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -71,7 +71,7 @@
 				unsigned long node,
 				const char *compat);
 extern int of_fdt_match(struct boot_param_header *blob, unsigned long node,
-			const char **compat);
+			const char *const *compat);
 extern void of_fdt_unflatten_tree(unsigned long *blob,
 			       struct device_node **mynodes);
 
@@ -88,7 +88,7 @@
 extern void *of_get_flat_dt_prop(unsigned long node, const char *name,
 				 unsigned long *size);
 extern int of_flat_dt_is_compatible(unsigned long node, const char *name);
-extern int of_flat_dt_match(unsigned long node, const char **matches);
+extern int of_flat_dt_match(unsigned long node, const char *const *matches);
 extern unsigned long of_get_flat_dt_root(void);
 
 extern int early_init_dt_scan_chosen(unsigned long node, const char *uname,
diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h
index 52280a2..b254052 100644
--- a/include/linux/of_gpio.h
+++ b/include/linux/of_gpio.h
@@ -18,6 +18,7 @@
 #include <linux/kernel.h>
 #include <linux/errno.h>
 #include <linux/gpio.h>
+#include <linux/of.h>
 
 struct device_node;
 
@@ -57,8 +58,9 @@
 extern void of_gpiochip_add(struct gpio_chip *gc);
 extern void of_gpiochip_remove(struct gpio_chip *gc);
 extern struct gpio_chip *of_node_to_gpiochip(struct device_node *np);
-extern int of_gpio_simple_xlate(struct gpio_chip *gc, struct device_node *np,
-				const void *gpio_spec, u32 *flags);
+extern int of_gpio_simple_xlate(struct gpio_chip *gc,
+				const struct of_phandle_args *gpiospec,
+				u32 *flags);
 
 #else /* CONFIG_OF_GPIO */
 
@@ -75,8 +77,8 @@
 }
 
 static inline int of_gpio_simple_xlate(struct gpio_chip *gc,
-				       struct device_node *np,
-				       const void *gpio_spec, u32 *flags)
+				       const struct of_phandle_args *gpiospec,
+				       u32 *flags)
 {
 	return -ENOSYS;
 }
diff --git a/include/linux/openvswitch.h b/include/linux/openvswitch.h
new file mode 100644
index 0000000..eb1efa5
--- /dev/null
+++ b/include/linux/openvswitch.h
@@ -0,0 +1,452 @@
+/*
+ * Copyright (c) 2007-2011 Nicira Networks.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#ifndef _LINUX_OPENVSWITCH_H
+#define _LINUX_OPENVSWITCH_H 1
+
+#include <linux/types.h>
+
+/**
+ * struct ovs_header - header for OVS Generic Netlink messages.
+ * @dp_ifindex: ifindex of local port for datapath (0 to make a request not
+ * specific to a datapath).
+ *
+ * Attributes following the header are specific to a particular OVS Generic
+ * Netlink family, but all of the OVS families use this header.
+ */
+
+struct ovs_header {
+	int dp_ifindex;
+};
+
+/* Datapaths. */
+
+#define OVS_DATAPATH_FAMILY  "ovs_datapath"
+#define OVS_DATAPATH_MCGROUP "ovs_datapath"
+#define OVS_DATAPATH_VERSION 0x1
+
+enum ovs_datapath_cmd {
+	OVS_DP_CMD_UNSPEC,
+	OVS_DP_CMD_NEW,
+	OVS_DP_CMD_DEL,
+	OVS_DP_CMD_GET,
+	OVS_DP_CMD_SET
+};
+
+/**
+ * enum ovs_datapath_attr - attributes for %OVS_DP_* commands.
+ * @OVS_DP_ATTR_NAME: Name of the network device that serves as the "local
+ * port".  This is the name of the network device whose dp_ifindex is given in
+ * the &struct ovs_header.  Always present in notifications.  Required in
+ * %OVS_DP_NEW requests.  May be used as an alternative to specifying
+ * dp_ifindex in other requests (with a dp_ifindex of 0).
+ * @OVS_DP_ATTR_UPCALL_PID: The Netlink socket in userspace that is initially
+ * set on the datapath port (for OVS_ACTION_ATTR_MISS).  Only valid on
+ * %OVS_DP_CMD_NEW requests. A value of zero indicates that upcalls should
+ * not be sent.
+ * @OVS_DP_ATTR_STATS: Statistics about packets that have passed through the
+ * datapath.  Always present in notifications.
+ *
+ * These attributes follow the &struct ovs_header within the Generic Netlink
+ * payload for %OVS_DP_* commands.
+ */
+enum ovs_datapath_attr {
+	OVS_DP_ATTR_UNSPEC,
+	OVS_DP_ATTR_NAME,       /* name of dp_ifindex netdev */
+	OVS_DP_ATTR_UPCALL_PID, /* Netlink PID to receive upcalls */
+	OVS_DP_ATTR_STATS,      /* struct ovs_dp_stats */
+	__OVS_DP_ATTR_MAX
+};
+
+#define OVS_DP_ATTR_MAX (__OVS_DP_ATTR_MAX - 1)
+
+struct ovs_dp_stats {
+	__u64 n_hit;             /* Number of flow table matches. */
+	__u64 n_missed;          /* Number of flow table misses. */
+	__u64 n_lost;            /* Number of misses not sent to userspace. */
+	__u64 n_flows;           /* Number of flows present */
+};
+
+struct ovs_vport_stats {
+	__u64   rx_packets;		/* total packets received       */
+	__u64   tx_packets;		/* total packets transmitted    */
+	__u64   rx_bytes;		/* total bytes received         */
+	__u64   tx_bytes;		/* total bytes transmitted      */
+	__u64   rx_errors;		/* bad packets received         */
+	__u64   tx_errors;		/* packet transmit problems     */
+	__u64   rx_dropped;		/* no space in linux buffers    */
+	__u64   tx_dropped;		/* no space available in linux  */
+};
+
+/* Fixed logical ports. */
+#define OVSP_LOCAL      ((__u16)0)
+
+/* Packet transfer. */
+
+#define OVS_PACKET_FAMILY "ovs_packet"
+#define OVS_PACKET_VERSION 0x1
+
+enum ovs_packet_cmd {
+	OVS_PACKET_CMD_UNSPEC,
+
+	/* Kernel-to-user notifications. */
+	OVS_PACKET_CMD_MISS,    /* Flow table miss. */
+	OVS_PACKET_CMD_ACTION,  /* OVS_ACTION_ATTR_USERSPACE action. */
+
+	/* Userspace commands. */
+	OVS_PACKET_CMD_EXECUTE  /* Apply actions to a packet. */
+};
+
+/**
+ * enum ovs_packet_attr - attributes for %OVS_PACKET_* commands.
+ * @OVS_PACKET_ATTR_PACKET: Present for all notifications.  Contains the entire
+ * packet as received, from the start of the Ethernet header onward.  For
+ * %OVS_PACKET_CMD_ACTION, %OVS_PACKET_ATTR_PACKET reflects changes made by
+ * actions preceding %OVS_ACTION_ATTR_USERSPACE, but %OVS_PACKET_ATTR_KEY is
+ * the flow key extracted from the packet as originally received.
+ * @OVS_PACKET_ATTR_KEY: Present for all notifications.  Contains the flow key
+ * extracted from the packet as nested %OVS_KEY_ATTR_* attributes.  This allows
+ * userspace to adapt its flow setup strategy by comparing its notion of the
+ * flow key against the kernel's.
+ * @OVS_PACKET_ATTR_ACTIONS: Contains actions for the packet.  Used
+ * for %OVS_PACKET_CMD_EXECUTE.  It has nested %OVS_ACTION_ATTR_* attributes.
+ * @OVS_PACKET_ATTR_USERDATA: Present for an %OVS_PACKET_CMD_ACTION
+ * notification if the %OVS_ACTION_ATTR_USERSPACE action specified an
+ * %OVS_USERSPACE_ATTR_USERDATA attribute.
+ *
+ * These attributes follow the &struct ovs_header within the Generic Netlink
+ * payload for %OVS_PACKET_* commands.
+ */
+enum ovs_packet_attr {
+	OVS_PACKET_ATTR_UNSPEC,
+	OVS_PACKET_ATTR_PACKET,      /* Packet data. */
+	OVS_PACKET_ATTR_KEY,         /* Nested OVS_KEY_ATTR_* attributes. */
+	OVS_PACKET_ATTR_ACTIONS,     /* Nested OVS_ACTION_ATTR_* attributes. */
+	OVS_PACKET_ATTR_USERDATA,    /* u64 OVS_ACTION_ATTR_USERSPACE arg. */
+	__OVS_PACKET_ATTR_MAX
+};
+
+#define OVS_PACKET_ATTR_MAX (__OVS_PACKET_ATTR_MAX - 1)
+
+/* Virtual ports. */
+
+#define OVS_VPORT_FAMILY  "ovs_vport"
+#define OVS_VPORT_MCGROUP "ovs_vport"
+#define OVS_VPORT_VERSION 0x1
+
+enum ovs_vport_cmd {
+	OVS_VPORT_CMD_UNSPEC,
+	OVS_VPORT_CMD_NEW,
+	OVS_VPORT_CMD_DEL,
+	OVS_VPORT_CMD_GET,
+	OVS_VPORT_CMD_SET
+};
+
+enum ovs_vport_type {
+	OVS_VPORT_TYPE_UNSPEC,
+	OVS_VPORT_TYPE_NETDEV,   /* network device */
+	OVS_VPORT_TYPE_INTERNAL, /* network device implemented by datapath */
+	__OVS_VPORT_TYPE_MAX
+};
+
+#define OVS_VPORT_TYPE_MAX (__OVS_VPORT_TYPE_MAX - 1)
+
+/**
+ * enum ovs_vport_attr - attributes for %OVS_VPORT_* commands.
+ * @OVS_VPORT_ATTR_PORT_NO: 32-bit port number within datapath.
+ * @OVS_VPORT_ATTR_TYPE: 32-bit %OVS_VPORT_TYPE_* constant describing the type
+ * of vport.
+ * @OVS_VPORT_ATTR_NAME: Name of vport.  For a vport based on a network device
+ * this is the name of the network device.  Maximum length %IFNAMSIZ-1 bytes
+ * plus a null terminator.
+ * @OVS_VPORT_ATTR_OPTIONS: Vport-specific configuration information.
+ * @OVS_VPORT_ATTR_UPCALL_PID: The Netlink socket in userspace that
+ * OVS_PACKET_CMD_MISS upcalls will be directed to for packets received on
+ * this port.  A value of zero indicates that upcalls should not be sent.
+ * @OVS_VPORT_ATTR_STATS: A &struct ovs_vport_stats giving statistics for
+ * packets sent or received through the vport.
+ *
+ * These attributes follow the &struct ovs_header within the Generic Netlink
+ * payload for %OVS_VPORT_* commands.
+ *
+ * For %OVS_VPORT_CMD_NEW requests, the %OVS_VPORT_ATTR_TYPE and
+ * %OVS_VPORT_ATTR_NAME attributes are required.  %OVS_VPORT_ATTR_PORT_NO is
+ * optional; if not specified a free port number is automatically selected.
+ * Whether %OVS_VPORT_ATTR_OPTIONS is required or optional depends on the type
+ * of vport.
+ * and other attributes are ignored.
+ *
+ * For other requests, if %OVS_VPORT_ATTR_NAME is specified then it is used to
+ * look up the vport to operate on; otherwise dp_idx from the &struct
+ * ovs_header plus %OVS_VPORT_ATTR_PORT_NO determine the vport.
+ */
+enum ovs_vport_attr {
+	OVS_VPORT_ATTR_UNSPEC,
+	OVS_VPORT_ATTR_PORT_NO,	/* u32 port number within datapath */
+	OVS_VPORT_ATTR_TYPE,	/* u32 OVS_VPORT_TYPE_* constant. */
+	OVS_VPORT_ATTR_NAME,	/* string name, up to IFNAMSIZ bytes long */
+	OVS_VPORT_ATTR_OPTIONS, /* nested attributes, varies by vport type */
+	OVS_VPORT_ATTR_UPCALL_PID, /* u32 Netlink PID to receive upcalls */
+	OVS_VPORT_ATTR_STATS,	/* struct ovs_vport_stats */
+	__OVS_VPORT_ATTR_MAX
+};
+
+#define OVS_VPORT_ATTR_MAX (__OVS_VPORT_ATTR_MAX - 1)
+
+/* Flows. */
+
+#define OVS_FLOW_FAMILY  "ovs_flow"
+#define OVS_FLOW_MCGROUP "ovs_flow"
+#define OVS_FLOW_VERSION 0x1
+
+enum ovs_flow_cmd {
+	OVS_FLOW_CMD_UNSPEC,
+	OVS_FLOW_CMD_NEW,
+	OVS_FLOW_CMD_DEL,
+	OVS_FLOW_CMD_GET,
+	OVS_FLOW_CMD_SET
+};
+
+struct ovs_flow_stats {
+	__u64 n_packets;         /* Number of matched packets. */
+	__u64 n_bytes;           /* Number of matched bytes. */
+};
+
+enum ovs_key_attr {
+	OVS_KEY_ATTR_UNSPEC,
+	OVS_KEY_ATTR_ENCAP,	/* Nested set of encapsulated attributes. */
+	OVS_KEY_ATTR_PRIORITY,  /* u32 skb->priority */
+	OVS_KEY_ATTR_IN_PORT,   /* u32 OVS dp port number */
+	OVS_KEY_ATTR_ETHERNET,  /* struct ovs_key_ethernet */
+	OVS_KEY_ATTR_VLAN,	/* be16 VLAN TCI */
+	OVS_KEY_ATTR_ETHERTYPE,	/* be16 Ethernet type */
+	OVS_KEY_ATTR_IPV4,      /* struct ovs_key_ipv4 */
+	OVS_KEY_ATTR_IPV6,      /* struct ovs_key_ipv6 */
+	OVS_KEY_ATTR_TCP,       /* struct ovs_key_tcp */
+	OVS_KEY_ATTR_UDP,       /* struct ovs_key_udp */
+	OVS_KEY_ATTR_ICMP,      /* struct ovs_key_icmp */
+	OVS_KEY_ATTR_ICMPV6,    /* struct ovs_key_icmpv6 */
+	OVS_KEY_ATTR_ARP,       /* struct ovs_key_arp */
+	OVS_KEY_ATTR_ND,        /* struct ovs_key_nd */
+	__OVS_KEY_ATTR_MAX
+};
+
+#define OVS_KEY_ATTR_MAX (__OVS_KEY_ATTR_MAX - 1)
+
+/**
+ * enum ovs_frag_type - IPv4 and IPv6 fragment type
+ * @OVS_FRAG_TYPE_NONE: Packet is not a fragment.
+ * @OVS_FRAG_TYPE_FIRST: Packet is a fragment with offset 0.
+ * @OVS_FRAG_TYPE_LATER: Packet is a fragment with nonzero offset.
+ *
+ * Used as the @ipv4_frag in &struct ovs_key_ipv4 and as @ipv6_frag &struct
+ * ovs_key_ipv6.
+ */
+enum ovs_frag_type {
+	OVS_FRAG_TYPE_NONE,
+	OVS_FRAG_TYPE_FIRST,
+	OVS_FRAG_TYPE_LATER,
+	__OVS_FRAG_TYPE_MAX
+};
+
+#define OVS_FRAG_TYPE_MAX (__OVS_FRAG_TYPE_MAX - 1)
+
+struct ovs_key_ethernet {
+	__u8	 eth_src[6];
+	__u8	 eth_dst[6];
+};
+
+struct ovs_key_ipv4 {
+	__be32 ipv4_src;
+	__be32 ipv4_dst;
+	__u8   ipv4_proto;
+	__u8   ipv4_tos;
+	__u8   ipv4_ttl;
+	__u8   ipv4_frag;	/* One of OVS_FRAG_TYPE_*. */
+};
+
+struct ovs_key_ipv6 {
+	__be32 ipv6_src[4];
+	__be32 ipv6_dst[4];
+	__be32 ipv6_label;	/* 20-bits in least-significant bits. */
+	__u8   ipv6_proto;
+	__u8   ipv6_tclass;
+	__u8   ipv6_hlimit;
+	__u8   ipv6_frag;	/* One of OVS_FRAG_TYPE_*. */
+};
+
+struct ovs_key_tcp {
+	__be16 tcp_src;
+	__be16 tcp_dst;
+};
+
+struct ovs_key_udp {
+	__be16 udp_src;
+	__be16 udp_dst;
+};
+
+struct ovs_key_icmp {
+	__u8 icmp_type;
+	__u8 icmp_code;
+};
+
+struct ovs_key_icmpv6 {
+	__u8 icmpv6_type;
+	__u8 icmpv6_code;
+};
+
+struct ovs_key_arp {
+	__be32 arp_sip;
+	__be32 arp_tip;
+	__be16 arp_op;
+	__u8   arp_sha[6];
+	__u8   arp_tha[6];
+};
+
+struct ovs_key_nd {
+	__u32 nd_target[4];
+	__u8  nd_sll[6];
+	__u8  nd_tll[6];
+};
+
+/**
+ * enum ovs_flow_attr - attributes for %OVS_FLOW_* commands.
+ * @OVS_FLOW_ATTR_KEY: Nested %OVS_KEY_ATTR_* attributes specifying the flow
+ * key.  Always present in notifications.  Required for all requests (except
+ * dumps).
+ * @OVS_FLOW_ATTR_ACTIONS: Nested %OVS_ACTION_ATTR_* attributes specifying
+ * the actions to take for packets that match the key.  Always present in
+ * notifications.  Required for %OVS_FLOW_CMD_NEW requests, optional for
+ * %OVS_FLOW_CMD_SET requests.
+ * @OVS_FLOW_ATTR_STATS: &struct ovs_flow_stats giving statistics for this
+ * flow.  Present in notifications if the stats would be nonzero.  Ignored in
+ * requests.
+ * @OVS_FLOW_ATTR_TCP_FLAGS: An 8-bit value giving the OR'd value of all of the
+ * TCP flags seen on packets in this flow.  Only present in notifications for
+ * TCP flows, and only if it would be nonzero.  Ignored in requests.
+ * @OVS_FLOW_ATTR_USED: A 64-bit integer giving the time, in milliseconds on
+ * the system monotonic clock, at which a packet was last processed for this
+ * flow.  Only present in notifications if a packet has been processed for this
+ * flow.  Ignored in requests.
+ * @OVS_FLOW_ATTR_CLEAR: If present in a %OVS_FLOW_CMD_SET request, clears the
+ * last-used time, accumulated TCP flags, and statistics for this flow.
+ * Otherwise ignored in requests.  Never present in notifications.
+ *
+ * These attributes follow the &struct ovs_header within the Generic Netlink
+ * payload for %OVS_FLOW_* commands.
+ */
+enum ovs_flow_attr {
+	OVS_FLOW_ATTR_UNSPEC,
+	OVS_FLOW_ATTR_KEY,       /* Sequence of OVS_KEY_ATTR_* attributes. */
+	OVS_FLOW_ATTR_ACTIONS,   /* Nested OVS_ACTION_ATTR_* attributes. */
+	OVS_FLOW_ATTR_STATS,     /* struct ovs_flow_stats. */
+	OVS_FLOW_ATTR_TCP_FLAGS, /* 8-bit OR'd TCP flags. */
+	OVS_FLOW_ATTR_USED,      /* u64 msecs last used in monotonic time. */
+	OVS_FLOW_ATTR_CLEAR,     /* Flag to clear stats, tcp_flags, used. */
+	__OVS_FLOW_ATTR_MAX
+};
+
+#define OVS_FLOW_ATTR_MAX (__OVS_FLOW_ATTR_MAX - 1)
+
+/**
+ * enum ovs_sample_attr - Attributes for %OVS_ACTION_ATTR_SAMPLE action.
+ * @OVS_SAMPLE_ATTR_PROBABILITY: 32-bit fraction of packets to sample with
+ * @OVS_ACTION_ATTR_SAMPLE.  A value of 0 samples no packets, a value of
+ * %UINT32_MAX samples all packets and intermediate values sample intermediate
+ * fractions of packets.
+ * @OVS_SAMPLE_ATTR_ACTIONS: Set of actions to execute in sampling event.
+ * Actions are passed as nested attributes.
+ *
+ * Executes the specified actions with the given probability on a per-packet
+ * basis.
+ */
+enum ovs_sample_attr {
+	OVS_SAMPLE_ATTR_UNSPEC,
+	OVS_SAMPLE_ATTR_PROBABILITY, /* u32 number */
+	OVS_SAMPLE_ATTR_ACTIONS,     /* Nested OVS_ACTION_ATTR_* attributes. */
+	__OVS_SAMPLE_ATTR_MAX,
+};
+
+#define OVS_SAMPLE_ATTR_MAX (__OVS_SAMPLE_ATTR_MAX - 1)
+
+/**
+ * enum ovs_userspace_attr - Attributes for %OVS_ACTION_ATTR_USERSPACE action.
+ * @OVS_USERSPACE_ATTR_PID: u32 Netlink PID to which the %OVS_PACKET_CMD_ACTION
+ * message should be sent.  Required.
+ * @OVS_USERSPACE_ATTR_USERDATA: If present, its u64 argument is copied to the
+ * %OVS_PACKET_CMD_ACTION message as %OVS_PACKET_ATTR_USERDATA,
+ */
+enum ovs_userspace_attr {
+	OVS_USERSPACE_ATTR_UNSPEC,
+	OVS_USERSPACE_ATTR_PID,	      /* u32 Netlink PID to receive upcalls. */
+	OVS_USERSPACE_ATTR_USERDATA,  /* u64 optional user-specified cookie. */
+	__OVS_USERSPACE_ATTR_MAX
+};
+
+#define OVS_USERSPACE_ATTR_MAX (__OVS_USERSPACE_ATTR_MAX - 1)
+
+/**
+ * struct ovs_action_push_vlan - %OVS_ACTION_ATTR_PUSH_VLAN action argument.
+ * @vlan_tpid: Tag protocol identifier (TPID) to push.
+ * @vlan_tci: Tag control identifier (TCI) to push.  The CFI bit must be set
+ * (but it will not be set in the 802.1Q header that is pushed).
+ *
+ * The @vlan_tpid value is typically %ETH_P_8021Q.  The only acceptable TPID
+ * values are those that the kernel module also parses as 802.1Q headers, to
+ * prevent %OVS_ACTION_ATTR_PUSH_VLAN followed by %OVS_ACTION_ATTR_POP_VLAN
+ * from having surprising results.
+ */
+struct ovs_action_push_vlan {
+	__be16 vlan_tpid;	/* 802.1Q TPID. */
+	__be16 vlan_tci;	/* 802.1Q TCI (VLAN ID and priority). */
+};
+
+/**
+ * enum ovs_action_attr - Action types.
+ *
+ * @OVS_ACTION_ATTR_OUTPUT: Output packet to port.
+ * @OVS_ACTION_ATTR_USERSPACE: Send packet to userspace according to nested
+ * %OVS_USERSPACE_ATTR_* attributes.
+ * @OVS_ACTION_ATTR_SET: Replaces the contents of an existing header.  The
+ * single nested %OVS_KEY_ATTR_* attribute specifies a header to modify and its
+ * value.
+ * @OVS_ACTION_ATTR_PUSH_VLAN: Push a new outermost 802.1Q header onto the
+ * packet.
+ * @OVS_ACTION_ATTR_POP_VLAN: Pop the outermost 802.1Q header off the packet.
+ * @OVS_ACTION_ATTR_SAMPLE: Probabilitically executes actions, as specified in
+ * the nested %OVS_SAMPLE_ATTR_* attributes.
+ *
+ * Only a single header can be set with a single %OVS_ACTION_ATTR_SET.  Not all
+ * fields within a header are modifiable, e.g. the IPv4 protocol and fragment
+ * type may not be changed.
+ */
+
+enum ovs_action_attr {
+	OVS_ACTION_ATTR_UNSPEC,
+	OVS_ACTION_ATTR_OUTPUT,	      /* u32 port number. */
+	OVS_ACTION_ATTR_USERSPACE,    /* Nested OVS_USERSPACE_ATTR_*. */
+	OVS_ACTION_ATTR_SET,          /* One nested OVS_KEY_ATTR_*. */
+	OVS_ACTION_ATTR_PUSH_VLAN,    /* struct ovs_action_push_vlan. */
+	OVS_ACTION_ATTR_POP_VLAN,     /* No argument. */
+	OVS_ACTION_ATTR_SAMPLE,       /* Nested OVS_SAMPLE_ATTR_*. */
+	__OVS_ACTION_ATTR_MAX
+};
+
+#define OVS_ACTION_ATTR_MAX (__OVS_ACTION_ATTR_MAX - 1)
+
+#endif /* _LINUX_OPENVSWITCH_H */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index b1f8912..0885561 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -54,6 +54,7 @@
 	PERF_COUNT_HW_BUS_CYCLES		= 6,
 	PERF_COUNT_HW_STALLED_CYCLES_FRONTEND	= 7,
 	PERF_COUNT_HW_STALLED_CYCLES_BACKEND	= 8,
+	PERF_COUNT_HW_REF_CPU_CYCLES		= 9,
 
 	PERF_COUNT_HW_MAX,			/* non-ABI */
 };
@@ -890,6 +891,7 @@
 	int				nr_active;
 	int				is_active;
 	int				nr_stat;
+	int				nr_freq;
 	int				rotate_disable;
 	atomic_t			refcount;
 	struct task_struct		*task;
@@ -1063,12 +1065,12 @@
 	}
 }
 
-extern struct jump_label_key perf_sched_events;
+extern struct jump_label_key_deferred perf_sched_events;
 
 static inline void perf_event_task_sched_in(struct task_struct *prev,
 					    struct task_struct *task)
 {
-	if (static_branch(&perf_sched_events))
+	if (static_branch(&perf_sched_events.key))
 		__perf_event_task_sched_in(prev, task);
 }
 
@@ -1077,7 +1079,7 @@
 {
 	perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0);
 
-	if (static_branch(&perf_sched_events))
+	if (static_branch(&perf_sched_events.key))
 		__perf_event_task_sched_out(prev, next);
 }
 
diff --git a/include/linux/phonet.h b/include/linux/phonet.h
index f53a416..f48bfc8 100644
--- a/include/linux/phonet.h
+++ b/include/linux/phonet.h
@@ -38,6 +38,7 @@
 #define PNPIPE_ENCAP		1
 #define PNPIPE_IFINDEX		2
 #define PNPIPE_HANDLE		3
+#define PNPIPE_INITSTATE	4
 
 #define PNADDR_ANY		0
 #define PNADDR_BROADCAST	0xFC
@@ -49,6 +50,7 @@
 
 /* ioctls */
 #define SIOCPNGETOBJECT		(SIOCPROTOPRIVATE + 0)
+#define SIOCPNENABLEPIPE	(SIOCPROTOPRIVATE + 13)
 #define SIOCPNADDRESOURCE	(SIOCPROTOPRIVATE + 14)
 #define SIOCPNDELRESOURCE	(SIOCPROTOPRIVATE + 15)
 
diff --git a/include/linux/pkt_sched.h b/include/linux/pkt_sched.h
index 7281d5a..8f1b928 100644
--- a/include/linux/pkt_sched.h
+++ b/include/linux/pkt_sched.h
@@ -162,25 +162,24 @@
 	unsigned	flows;		/* Maximal number of flows  */
 };
 
+struct tc_sfq_qopt_v1 {
+	struct tc_sfq_qopt v0;
+	unsigned int	depth;		/* max number of packets per flow */
+	unsigned int	headdrop;
+};
+
+
 struct tc_sfq_xstats {
 	__s32		allot;
 };
 
-/*
- *  NOTE: limit, divisor and flows are hardwired to code at the moment.
- *
- *	limit=flows=128, divisor=1024;
- *
- *	The only reason for this is efficiency, it is possible
- *	to change these parameters in compile time.
- */
-
 /* RED section */
 
 enum {
 	TCA_RED_UNSPEC,
 	TCA_RED_PARMS,
 	TCA_RED_STAB,
+	TCA_RED_MAX_P,
 	__TCA_RED_MAX,
 };
 
@@ -194,8 +193,9 @@
 	unsigned char   Plog;		/* log(P_max/(qth_max-qth_min))	*/
 	unsigned char   Scell_log;	/* cell size for idle damping */
 	unsigned char	flags;
-#define TC_RED_ECN	1
-#define TC_RED_HARDDROP	2
+#define TC_RED_ECN		1
+#define TC_RED_HARDDROP		2
+#define TC_RED_ADAPTATIVE	4
 };
 
 struct tc_red_xstats {
@@ -214,6 +214,7 @@
        TCA_GRED_PARMS,
        TCA_GRED_STAB,
        TCA_GRED_DPS,
+       TCA_GRED_MAX_P,
 	   __TCA_GRED_MAX,
 };
 
@@ -253,6 +254,7 @@
 	TCA_CHOKE_UNSPEC,
 	TCA_CHOKE_PARMS,
 	TCA_CHOKE_STAB,
+	TCA_CHOKE_MAX_P,
 	__TCA_CHOKE_MAX,
 };
 
@@ -465,6 +467,7 @@
 	TCA_NETEM_REORDER,
 	TCA_NETEM_CORRUPT,
 	TCA_NETEM_LOSS,
+	TCA_NETEM_RATE,
 	__TCA_NETEM_MAX,
 };
 
@@ -495,6 +498,13 @@
 	__u32	correlation;
 };
 
+struct tc_netem_rate {
+	__u32	rate;	/* byte/s */
+	__s32	packet_overhead;
+	__u32	cell_size;
+	__s32	cell_overhead;
+};
+
 enum {
 	NETEM_LOSS_UNSPEC,
 	NETEM_LOSS_GI,		/* General Intuitive - 4 state model */
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index 2a23f7d..60e9994 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -63,7 +63,7 @@
 		u64 dma_mask;
 };
 extern struct platform_device *platform_device_register_full(
-		struct platform_device_info *pdevinfo);
+		const struct platform_device_info *pdevinfo);
 
 /**
  * platform_device_register_resndata - add a platform-level device with
@@ -196,16 +196,8 @@
  * calling it replaces module_init() and module_exit()
  */
 #define module_platform_driver(__platform_driver) \
-static int __init __platform_driver##_init(void) \
-{ \
-	return platform_driver_register(&(__platform_driver)); \
-} \
-module_init(__platform_driver##_init); \
-static void __exit __platform_driver##_exit(void) \
-{ \
-	platform_driver_unregister(&(__platform_driver)); \
-} \
-module_exit(__platform_driver##_exit);
+	module_driver(__platform_driver, platform_driver_register, \
+			platform_driver_unregister)
 
 extern struct platform_device *platform_create_bundle(struct platform_driver *driver,
 					int (*probe)(struct platform_device *),
@@ -264,62 +256,34 @@
 }
 #endif /* MODULE */
 
-#ifdef CONFIG_PM_SLEEP
-extern int platform_pm_prepare(struct device *dev);
-extern void platform_pm_complete(struct device *dev);
-#else
-#define platform_pm_prepare	NULL
-#define platform_pm_complete	NULL
-#endif
-
 #ifdef CONFIG_SUSPEND
 extern int platform_pm_suspend(struct device *dev);
-extern int platform_pm_suspend_noirq(struct device *dev);
 extern int platform_pm_resume(struct device *dev);
-extern int platform_pm_resume_noirq(struct device *dev);
 #else
 #define platform_pm_suspend		NULL
 #define platform_pm_resume		NULL
-#define platform_pm_suspend_noirq	NULL
-#define platform_pm_resume_noirq	NULL
 #endif
 
 #ifdef CONFIG_HIBERNATE_CALLBACKS
 extern int platform_pm_freeze(struct device *dev);
-extern int platform_pm_freeze_noirq(struct device *dev);
 extern int platform_pm_thaw(struct device *dev);
-extern int platform_pm_thaw_noirq(struct device *dev);
 extern int platform_pm_poweroff(struct device *dev);
-extern int platform_pm_poweroff_noirq(struct device *dev);
 extern int platform_pm_restore(struct device *dev);
-extern int platform_pm_restore_noirq(struct device *dev);
 #else
 #define platform_pm_freeze		NULL
 #define platform_pm_thaw		NULL
 #define platform_pm_poweroff		NULL
 #define platform_pm_restore		NULL
-#define platform_pm_freeze_noirq	NULL
-#define platform_pm_thaw_noirq		NULL
-#define platform_pm_poweroff_noirq	NULL
-#define platform_pm_restore_noirq	NULL
 #endif
 
 #ifdef CONFIG_PM_SLEEP
 #define USE_PLATFORM_PM_SLEEP_OPS \
-	.prepare = platform_pm_prepare, \
-	.complete = platform_pm_complete, \
 	.suspend = platform_pm_suspend, \
 	.resume = platform_pm_resume, \
 	.freeze = platform_pm_freeze, \
 	.thaw = platform_pm_thaw, \
 	.poweroff = platform_pm_poweroff, \
-	.restore = platform_pm_restore, \
-	.suspend_noirq = platform_pm_suspend_noirq, \
-	.resume_noirq = platform_pm_resume_noirq, \
-	.freeze_noirq = platform_pm_freeze_noirq, \
-	.thaw_noirq = platform_pm_thaw_noirq, \
-	.poweroff_noirq = platform_pm_poweroff_noirq, \
-	.restore_noirq = platform_pm_restore_noirq,
+	.restore = platform_pm_restore,
 #else
 #define USE_PLATFORM_PM_SLEEP_OPS
 #endif
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 3f3ed83..e4982ac 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -300,19 +300,6 @@
 	SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
 }
 
-/*
- * Use this for subsystems (bus types, device types, device classes) that don't
- * need any special suspend/resume handling in addition to invoking the PM
- * callbacks provided by device drivers supporting both the system sleep PM and
- * runtime PM, make the pm member point to generic_subsys_pm_ops.
- */
-#ifdef CONFIG_PM
-extern struct dev_pm_ops generic_subsys_pm_ops;
-#define GENERIC_SUBSYS_PM_OPS	(&generic_subsys_pm_ops)
-#else
-#define GENERIC_SUBSYS_PM_OPS	NULL
-#endif
-
 /**
  * PM_EVENT_ messages
  *
@@ -521,6 +508,8 @@
 	unsigned long		active_jiffies;
 	unsigned long		suspended_jiffies;
 	unsigned long		accounting_timestamp;
+	ktime_t			suspend_time;
+	s64			max_time_suspended_ns;
 #endif
 	struct pm_subsys_data	*subsys_data;  /* Owned by the subsystem. */
 	struct pm_qos_constraints *constraints;
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 65633e5..a03a0ad 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -10,6 +10,7 @@
 #define _LINUX_PM_DOMAIN_H
 
 #include <linux/device.h>
+#include <linux/err.h>
 
 enum gpd_status {
 	GPD_STATE_ACTIVE = 0,	/* PM domain is active */
@@ -21,6 +22,23 @@
 
 struct dev_power_governor {
 	bool (*power_down_ok)(struct dev_pm_domain *domain);
+	bool (*stop_ok)(struct device *dev);
+};
+
+struct gpd_dev_ops {
+	int (*start)(struct device *dev);
+	int (*stop)(struct device *dev);
+	int (*save_state)(struct device *dev);
+	int (*restore_state)(struct device *dev);
+	int (*suspend)(struct device *dev);
+	int (*suspend_late)(struct device *dev);
+	int (*resume_early)(struct device *dev);
+	int (*resume)(struct device *dev);
+	int (*freeze)(struct device *dev);
+	int (*freeze_late)(struct device *dev);
+	int (*thaw_early)(struct device *dev);
+	int (*thaw)(struct device *dev);
+	bool (*active_wakeup)(struct device *dev);
 };
 
 struct generic_pm_domain {
@@ -32,6 +50,7 @@
 	struct mutex lock;
 	struct dev_power_governor *gov;
 	struct work_struct power_off_work;
+	char *name;
 	unsigned int in_progress;	/* Number of devices being suspended now */
 	atomic_t sd_count;	/* Number of subdomains with power "on" */
 	enum gpd_status status;	/* Current state of the domain */
@@ -44,10 +63,13 @@
 	bool suspend_power_off;	/* Power status before system suspend */
 	bool dev_irq_safe;	/* Device callbacks are IRQ-safe */
 	int (*power_off)(struct generic_pm_domain *domain);
+	s64 power_off_latency_ns;
 	int (*power_on)(struct generic_pm_domain *domain);
-	int (*start_device)(struct device *dev);
-	int (*stop_device)(struct device *dev);
-	bool (*active_wakeup)(struct device *dev);
+	s64 power_on_latency_ns;
+	struct gpd_dev_ops dev_ops;
+	s64 break_even_ns;	/* Power break even for the entire domain. */
+	s64 max_off_time_ns;	/* Maximum allowed "suspended" time. */
+	ktime_t power_off_time;
 };
 
 static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd)
@@ -62,8 +84,18 @@
 	struct list_head slave_node;
 };
 
+struct gpd_timing_data {
+	s64 stop_latency_ns;
+	s64 start_latency_ns;
+	s64 save_state_latency_ns;
+	s64 restore_state_latency_ns;
+	s64 break_even_ns;
+};
+
 struct generic_pm_domain_data {
 	struct pm_domain_data base;
+	struct gpd_dev_ops ops;
+	struct gpd_timing_data td;
 	bool need_restore;
 };
 
@@ -73,18 +105,54 @@
 }
 
 #ifdef CONFIG_PM_GENERIC_DOMAINS
-extern int pm_genpd_add_device(struct generic_pm_domain *genpd,
-			       struct device *dev);
+static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev)
+{
+	return to_gpd_data(dev->power.subsys_data->domain_data);
+}
+
+extern struct dev_power_governor simple_qos_governor;
+
+extern struct generic_pm_domain *dev_to_genpd(struct device *dev);
+extern int __pm_genpd_add_device(struct generic_pm_domain *genpd,
+				 struct device *dev,
+				 struct gpd_timing_data *td);
+
+static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,
+				      struct device *dev)
+{
+	return __pm_genpd_add_device(genpd, dev, NULL);
+}
+
 extern int pm_genpd_remove_device(struct generic_pm_domain *genpd,
 				  struct device *dev);
 extern int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
 				  struct generic_pm_domain *new_subdomain);
 extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
 				     struct generic_pm_domain *target);
+extern int pm_genpd_add_callbacks(struct device *dev,
+				  struct gpd_dev_ops *ops,
+				  struct gpd_timing_data *td);
+extern int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td);
 extern void pm_genpd_init(struct generic_pm_domain *genpd,
 			  struct dev_power_governor *gov, bool is_off);
+
 extern int pm_genpd_poweron(struct generic_pm_domain *genpd);
+
+extern bool default_stop_ok(struct device *dev);
+
+extern struct dev_power_governor pm_domain_always_on_gov;
 #else
+
+static inline struct generic_pm_domain *dev_to_genpd(struct device *dev)
+{
+	return ERR_PTR(-ENOSYS);
+}
+static inline int __pm_genpd_add_device(struct generic_pm_domain *genpd,
+					struct device *dev,
+					struct gpd_timing_data *td)
+{
+	return -ENOSYS;
+}
 static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,
 				      struct device *dev)
 {
@@ -105,14 +173,35 @@
 {
 	return -ENOSYS;
 }
-static inline void pm_genpd_init(struct generic_pm_domain *genpd,
-				 struct dev_power_governor *gov, bool is_off) {}
+static inline int pm_genpd_add_callbacks(struct device *dev,
+					 struct gpd_dev_ops *ops,
+					 struct gpd_timing_data *td)
+{
+	return -ENOSYS;
+}
+static inline int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
+{
+	return -ENOSYS;
+}
+static inline void pm_genpd_init(struct generic_pm_domain *genpd, bool is_off)
+{
+}
 static inline int pm_genpd_poweron(struct generic_pm_domain *genpd)
 {
 	return -ENOSYS;
 }
+static inline bool default_stop_ok(struct device *dev)
+{
+	return false;
+}
+#define pm_domain_always_on_gov NULL
 #endif
 
+static inline int pm_genpd_remove_callbacks(struct device *dev)
+{
+	return __pm_genpd_remove_callbacks(dev, true);
+}
+
 #ifdef CONFIG_PM_GENERIC_DOMAINS_RUNTIME
 extern void genpd_queue_power_off_work(struct generic_pm_domain *genpd);
 extern void pm_genpd_poweroff_unused(void);
diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h
index 83b0ea3..e5bbcba 100644
--- a/include/linux/pm_qos.h
+++ b/include/linux/pm_qos.h
@@ -78,6 +78,7 @@
 int pm_qos_request_active(struct pm_qos_request *req);
 s32 pm_qos_read_value(struct pm_qos_constraints *c);
 
+s32 __dev_pm_qos_read_value(struct device *dev);
 s32 dev_pm_qos_read_value(struct device *dev);
 int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
 			   s32 value);
@@ -91,6 +92,8 @@
 int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier);
 void dev_pm_qos_constraints_init(struct device *dev);
 void dev_pm_qos_constraints_destroy(struct device *dev);
+int dev_pm_qos_add_ancestor_request(struct device *dev,
+				    struct dev_pm_qos_request *req, s32 value);
 #else
 static inline int pm_qos_update_target(struct pm_qos_constraints *c,
 				       struct plist_node *node,
@@ -119,6 +122,8 @@
 static inline s32 pm_qos_read_value(struct pm_qos_constraints *c)
 			{ return 0; }
 
+static inline s32 __dev_pm_qos_read_value(struct device *dev)
+			{ return 0; }
 static inline s32 dev_pm_qos_read_value(struct device *dev)
 			{ return 0; }
 static inline int dev_pm_qos_add_request(struct device *dev,
@@ -150,6 +155,9 @@
 {
 	dev->power.power_state = PMSG_INVALID;
 }
+static inline int dev_pm_qos_add_ancestor_request(struct device *dev,
+				    struct dev_pm_qos_request *req, s32 value)
+			{ return 0; }
 #endif
 
 #endif
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index d3085e7..609daae 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -45,6 +45,8 @@
 extern void __pm_runtime_use_autosuspend(struct device *dev, bool use);
 extern void pm_runtime_set_autosuspend_delay(struct device *dev, int delay);
 extern unsigned long pm_runtime_autosuspend_expiration(struct device *dev);
+extern void pm_runtime_update_max_time_suspended(struct device *dev,
+						 s64 delta_ns);
 
 static inline bool pm_children_suspended(struct device *dev)
 {
@@ -148,6 +150,9 @@
 static inline unsigned long pm_runtime_autosuspend_expiration(
 				struct device *dev) { return 0; }
 
+static inline void pm_runtime_update_max_time_suspended(struct device *dev,
+							s64 delta_ns) {}
+
 #endif /* !CONFIG_PM_RUNTIME */
 
 static inline int pm_runtime_idle(struct device *dev)
diff --git a/include/linux/poison.h b/include/linux/poison.h
index 79159de..2110a81 100644
--- a/include/linux/poison.h
+++ b/include/linux/poison.h
@@ -40,12 +40,6 @@
 #define	RED_INACTIVE	0x09F911029D74E35BULL	/* when obj is inactive */
 #define	RED_ACTIVE	0xD84156C5635688C0ULL	/* when obj is active */
 
-#ifdef CONFIG_PHYS_ADDR_T_64BIT
-#define MEMBLOCK_INACTIVE	0x3a84fb0144c9e71bULL
-#else
-#define MEMBLOCK_INACTIVE	0x44c9e71bUL
-#endif
-
 #define SLUB_RED_INACTIVE	0xbb
 #define SLUB_RED_ACTIVE		0xcc
 
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index 643b96c..6d9e575 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -50,7 +50,7 @@
 
 struct proc_dir_entry {
 	unsigned int low_ino;
-	mode_t mode;
+	umode_t mode;
 	nlink_t nlink;
 	uid_t uid;
 	gid_t gid;
@@ -106,9 +106,9 @@
 
 void proc_flush_task(struct task_struct *task);
 
-extern struct proc_dir_entry *create_proc_entry(const char *name, mode_t mode,
+extern struct proc_dir_entry *create_proc_entry(const char *name, umode_t mode,
 						struct proc_dir_entry *parent);
-struct proc_dir_entry *proc_create_data(const char *name, mode_t mode,
+struct proc_dir_entry *proc_create_data(const char *name, umode_t mode,
 				struct proc_dir_entry *parent,
 				const struct file_operations *proc_fops,
 				void *data);
@@ -146,17 +146,17 @@
 extern struct proc_dir_entry *proc_symlink(const char *,
 		struct proc_dir_entry *, const char *);
 extern struct proc_dir_entry *proc_mkdir(const char *,struct proc_dir_entry *);
-extern struct proc_dir_entry *proc_mkdir_mode(const char *name, mode_t mode,
+extern struct proc_dir_entry *proc_mkdir_mode(const char *name, umode_t mode,
 			struct proc_dir_entry *parent);
 
-static inline struct proc_dir_entry *proc_create(const char *name, mode_t mode,
+static inline struct proc_dir_entry *proc_create(const char *name, umode_t mode,
 	struct proc_dir_entry *parent, const struct file_operations *proc_fops)
 {
 	return proc_create_data(name, mode, parent, proc_fops, NULL);
 }
 
 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
-	mode_t mode, struct proc_dir_entry *base, 
+	umode_t mode, struct proc_dir_entry *base, 
 	read_proc_t *read_proc, void * data)
 {
 	struct proc_dir_entry *res=create_proc_entry(name,mode,base);
@@ -168,7 +168,7 @@
 }
  
 extern struct proc_dir_entry *proc_net_fops_create(struct net *net,
-	const char *name, mode_t mode, const struct file_operations *fops);
+	const char *name, umode_t mode, const struct file_operations *fops);
 extern void proc_net_remove(struct net *net, const char *name);
 extern struct proc_dir_entry *proc_net_mkdir(struct net *net, const char *name,
 	struct proc_dir_entry *parent);
@@ -185,15 +185,15 @@
 }
 
 static inline struct proc_dir_entry *create_proc_entry(const char *name,
-	mode_t mode, struct proc_dir_entry *parent) { return NULL; }
+	umode_t mode, struct proc_dir_entry *parent) { return NULL; }
 static inline struct proc_dir_entry *proc_create(const char *name,
-	mode_t mode, struct proc_dir_entry *parent,
+	umode_t mode, struct proc_dir_entry *parent,
 	const struct file_operations *proc_fops)
 {
 	return NULL;
 }
 static inline struct proc_dir_entry *proc_create_data(const char *name,
-	mode_t mode, struct proc_dir_entry *parent,
+	umode_t mode, struct proc_dir_entry *parent,
 	const struct file_operations *proc_fops, void *data)
 {
 	return NULL;
@@ -205,10 +205,10 @@
 static inline struct proc_dir_entry *proc_mkdir(const char *name,
 	struct proc_dir_entry *parent) {return NULL;}
 static inline struct proc_dir_entry *proc_mkdir_mode(const char *name,
-	mode_t mode, struct proc_dir_entry *parent) { return NULL; }
+	umode_t mode, struct proc_dir_entry *parent) { return NULL; }
 
 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
-	mode_t mode, struct proc_dir_entry *base, 
+	umode_t mode, struct proc_dir_entry *base, 
 	read_proc_t *read_proc, void * data) { return NULL; }
 
 struct tty_driver;
diff --git a/include/linux/pstore.h b/include/linux/pstore.h
index 2ca8cde..e1461e1 100644
--- a/include/linux/pstore.h
+++ b/include/linux/pstore.h
@@ -22,6 +22,9 @@
 #ifndef _LINUX_PSTORE_H
 #define _LINUX_PSTORE_H
 
+#include <linux/time.h>
+#include <linux/kmsg_dump.h>
+
 /* types */
 enum pstore_type_id {
 	PSTORE_TYPE_DMESG	= 0,
@@ -41,7 +44,8 @@
 	ssize_t		(*read)(u64 *id, enum pstore_type_id *type,
 			struct timespec *time, char **buf,
 			struct pstore_info *psi);
-	int		(*write)(enum pstore_type_id type, u64 *id,
+	int		(*write)(enum pstore_type_id type,
+			enum kmsg_dump_reason reason, u64 *id,
 			unsigned int part, size_t size, struct pstore_info *psi);
 	int		(*erase)(enum pstore_type_id type, u64 id,
 			struct pstore_info *psi);
@@ -50,18 +54,12 @@
 
 #ifdef CONFIG_PSTORE
 extern int pstore_register(struct pstore_info *);
-extern int pstore_write(enum pstore_type_id type, char *buf, size_t size);
 #else
 static inline int
 pstore_register(struct pstore_info *psi)
 {
 	return -ENODEV;
 }
-static inline int
-pstore_write(enum pstore_type_id type, char *buf, size_t size)
-{
-	return -ENODEV;
-}
 #endif
 
 #endif /*_LINUX_PSTORE_H*/
diff --git a/include/linux/raid/md_p.h b/include/linux/raid/md_p.h
index 9e65d9e..6f6df86 100644
--- a/include/linux/raid/md_p.h
+++ b/include/linux/raid/md_p.h
@@ -277,7 +277,10 @@
 					   */
 #define	MD_FEATURE_RESHAPE_ACTIVE	4
 #define	MD_FEATURE_BAD_BLOCKS		8 /* badblock list is not empty */
-
-#define	MD_FEATURE_ALL			(1|2|4|8)
+#define	MD_FEATURE_REPLACEMENT		16 /* This device is replacing an
+					    * active device with same 'role'.
+					    * 'recovery_offset' is also set.
+					    */
+#define	MD_FEATURE_ALL			(1|2|4|8|16)
 
 #endif 
diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h
index 2b59cc8..53272e9 100644
--- a/include/linux/raid/pq.h
+++ b/include/linux/raid/pq.h
@@ -132,7 +132,7 @@
 						     PROT_READ|PROT_WRITE,   \
 						     MAP_PRIVATE|MAP_ANONYMOUS,\
 						     0, 0))
-# define free_pages(x, y)	munmap((void *)(x), (y)*PAGE_SIZE)
+# define free_pages(x, y)	munmap((void *)(x), PAGE_SIZE << (y))
 
 static inline void cpu_relax(void)
 {
diff --git a/include/linux/ramfs.h b/include/linux/ramfs.h
index 3a8f0c9..5bf5500 100644
--- a/include/linux/ramfs.h
+++ b/include/linux/ramfs.h
@@ -2,7 +2,7 @@
 #define _LINUX_RAMFS_H
 
 struct inode *ramfs_get_inode(struct super_block *sb, const struct inode *dir,
-	 int mode, dev_t dev);
+	 umode_t mode, dev_t dev);
 extern struct dentry *ramfs_mount(struct file_system_type *fs_type,
 	 int flags, const char *dev_name, void *data);
 
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 2cf4226..81c04f4 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -51,6 +51,8 @@
 #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
 extern void rcutorture_record_test_transition(void);
 extern void rcutorture_record_progress(unsigned long vernum);
+extern void do_trace_rcu_torture_read(char *rcutorturename,
+				      struct rcu_head *rhp);
 #else
 static inline void rcutorture_record_test_transition(void)
 {
@@ -58,6 +60,12 @@
 static inline void rcutorture_record_progress(unsigned long vernum)
 {
 }
+#ifdef CONFIG_RCU_TRACE
+extern void do_trace_rcu_torture_read(char *rcutorturename,
+				      struct rcu_head *rhp);
+#else
+#define do_trace_rcu_torture_read(rcutorturename, rhp) do { } while (0)
+#endif
 #endif
 
 #define UINT_CMP_GE(a, b)	(UINT_MAX / 2 >= (a) - (b))
@@ -177,23 +185,10 @@
 extern void rcu_bh_qs(int cpu);
 extern void rcu_check_callbacks(int cpu, int user);
 struct notifier_block;
-
-#ifdef CONFIG_NO_HZ
-
-extern void rcu_enter_nohz(void);
-extern void rcu_exit_nohz(void);
-
-#else /* #ifdef CONFIG_NO_HZ */
-
-static inline void rcu_enter_nohz(void)
-{
-}
-
-static inline void rcu_exit_nohz(void)
-{
-}
-
-#endif /* #else #ifdef CONFIG_NO_HZ */
+extern void rcu_idle_enter(void);
+extern void rcu_idle_exit(void);
+extern void rcu_irq_enter(void);
+extern void rcu_irq_exit(void);
 
 /*
  * Infrastructure to implement the synchronize_() primitives in
@@ -233,22 +228,30 @@
 
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 
+#ifdef CONFIG_PROVE_RCU
+extern int rcu_is_cpu_idle(void);
+#else /* !CONFIG_PROVE_RCU */
+static inline int rcu_is_cpu_idle(void)
+{
+	return 0;
+}
+#endif /* else !CONFIG_PROVE_RCU */
+
+static inline void rcu_lock_acquire(struct lockdep_map *map)
+{
+	WARN_ON_ONCE(rcu_is_cpu_idle());
+	lock_acquire(map, 0, 0, 2, 1, NULL, _THIS_IP_);
+}
+
+static inline void rcu_lock_release(struct lockdep_map *map)
+{
+	WARN_ON_ONCE(rcu_is_cpu_idle());
+	lock_release(map, 1, _THIS_IP_);
+}
+
 extern struct lockdep_map rcu_lock_map;
-# define rcu_read_acquire() \
-		lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
-# define rcu_read_release()	lock_release(&rcu_lock_map, 1, _THIS_IP_)
-
 extern struct lockdep_map rcu_bh_lock_map;
-# define rcu_read_acquire_bh() \
-		lock_acquire(&rcu_bh_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
-# define rcu_read_release_bh()	lock_release(&rcu_bh_lock_map, 1, _THIS_IP_)
-
 extern struct lockdep_map rcu_sched_lock_map;
-# define rcu_read_acquire_sched() \
-		lock_acquire(&rcu_sched_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
-# define rcu_read_release_sched() \
-		lock_release(&rcu_sched_lock_map, 1, _THIS_IP_)
-
 extern int debug_lockdep_rcu_enabled(void);
 
 /**
@@ -262,11 +265,18 @@
  *
  * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot
  * and while lockdep is disabled.
+ *
+ * Note that rcu_read_lock() and the matching rcu_read_unlock() must
+ * occur in the same context, for example, it is illegal to invoke
+ * rcu_read_unlock() in process context if the matching rcu_read_lock()
+ * was invoked from within an irq handler.
  */
 static inline int rcu_read_lock_held(void)
 {
 	if (!debug_lockdep_rcu_enabled())
 		return 1;
+	if (rcu_is_cpu_idle())
+		return 0;
 	return lock_is_held(&rcu_lock_map);
 }
 
@@ -290,6 +300,19 @@
  *
  * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
  * and while lockdep is disabled.
+ *
+ * Note that if the CPU is in the idle loop from an RCU point of
+ * view (ie: that we are in the section between rcu_idle_enter() and
+ * rcu_idle_exit()) then rcu_read_lock_held() returns false even if the CPU
+ * did an rcu_read_lock().  The reason for this is that RCU ignores CPUs
+ * that are in such a section, considering these as in extended quiescent
+ * state, so such a CPU is effectively never in an RCU read-side critical
+ * section regardless of what RCU primitives it invokes.  This state of
+ * affairs is required --- we need to keep an RCU-free window in idle
+ * where the CPU may possibly enter into low power mode. This way we can
+ * notice an extended quiescent state to other CPUs that started a grace
+ * period. Otherwise we would delay any grace period as long as we run in
+ * the idle task.
  */
 #ifdef CONFIG_PREEMPT_COUNT
 static inline int rcu_read_lock_sched_held(void)
@@ -298,6 +321,8 @@
 
 	if (!debug_lockdep_rcu_enabled())
 		return 1;
+	if (rcu_is_cpu_idle())
+		return 0;
 	if (debug_locks)
 		lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
 	return lockdep_opinion || preempt_count() != 0 || irqs_disabled();
@@ -311,12 +336,8 @@
 
 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
 
-# define rcu_read_acquire()		do { } while (0)
-# define rcu_read_release()		do { } while (0)
-# define rcu_read_acquire_bh()		do { } while (0)
-# define rcu_read_release_bh()		do { } while (0)
-# define rcu_read_acquire_sched()	do { } while (0)
-# define rcu_read_release_sched()	do { } while (0)
+# define rcu_lock_acquire(a)		do { } while (0)
+# define rcu_lock_release(a)		do { } while (0)
 
 static inline int rcu_read_lock_held(void)
 {
@@ -637,7 +658,7 @@
 {
 	__rcu_read_lock();
 	__acquire(RCU);
-	rcu_read_acquire();
+	rcu_lock_acquire(&rcu_lock_map);
 }
 
 /*
@@ -657,7 +678,7 @@
  */
 static inline void rcu_read_unlock(void)
 {
-	rcu_read_release();
+	rcu_lock_release(&rcu_lock_map);
 	__release(RCU);
 	__rcu_read_unlock();
 }
@@ -673,12 +694,17 @@
  * critical sections in interrupt context can use just rcu_read_lock(),
  * though this should at least be commented to avoid confusing people
  * reading the code.
+ *
+ * Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh()
+ * must occur in the same context, for example, it is illegal to invoke
+ * rcu_read_unlock_bh() from one task if the matching rcu_read_lock_bh()
+ * was invoked from some other task.
  */
 static inline void rcu_read_lock_bh(void)
 {
 	local_bh_disable();
 	__acquire(RCU_BH);
-	rcu_read_acquire_bh();
+	rcu_lock_acquire(&rcu_bh_lock_map);
 }
 
 /*
@@ -688,7 +714,7 @@
  */
 static inline void rcu_read_unlock_bh(void)
 {
-	rcu_read_release_bh();
+	rcu_lock_release(&rcu_bh_lock_map);
 	__release(RCU_BH);
 	local_bh_enable();
 }
@@ -700,12 +726,17 @@
  * are being done using call_rcu_sched() or synchronize_rcu_sched().
  * Read-side critical sections can also be introduced by anything that
  * disables preemption, including local_irq_disable() and friends.
+ *
+ * Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched()
+ * must occur in the same context, for example, it is illegal to invoke
+ * rcu_read_unlock_sched() from process context if the matching
+ * rcu_read_lock_sched() was invoked from an NMI handler.
  */
 static inline void rcu_read_lock_sched(void)
 {
 	preempt_disable();
 	__acquire(RCU_SCHED);
-	rcu_read_acquire_sched();
+	rcu_lock_acquire(&rcu_sched_lock_map);
 }
 
 /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
@@ -722,7 +753,7 @@
  */
 static inline void rcu_read_unlock_sched(void)
 {
-	rcu_read_release_sched();
+	rcu_lock_release(&rcu_sched_lock_map);
 	__release(RCU_SCHED);
 	preempt_enable();
 }
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 690276a..eb93921 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -23,9 +23,8 @@
 /* An enum of all the supported cache types */
 enum regcache_type {
 	REGCACHE_NONE,
-	REGCACHE_INDEXED,
 	REGCACHE_RBTREE,
-	REGCACHE_LZO
+	REGCACHE_COMPRESSED
 };
 
 /**
@@ -83,7 +82,7 @@
 	bool (*precious_reg)(struct device *dev, unsigned int reg);
 
 	unsigned int max_register;
-	struct reg_default *reg_defaults;
+	const struct reg_default *reg_defaults;
 	unsigned int num_reg_defaults;
 	enum regcache_type cache_type;
 	const void *reg_defaults_raw;
@@ -129,6 +128,8 @@
 			       const struct regmap_config *config);
 
 void regmap_exit(struct regmap *map);
+int regmap_reinit_cache(struct regmap *map,
+			const struct regmap_config *config);
 int regmap_write(struct regmap *map, unsigned int reg, unsigned int val);
 int regmap_raw_write(struct regmap *map, unsigned int reg,
 		     const void *val, size_t val_len);
@@ -139,9 +140,61 @@
 		     size_t val_count);
 int regmap_update_bits(struct regmap *map, unsigned int reg,
 		       unsigned int mask, unsigned int val);
+int regmap_update_bits_check(struct regmap *map, unsigned int reg,
+			     unsigned int mask, unsigned int val,
+			     bool *change);
 
 int regcache_sync(struct regmap *map);
 void regcache_cache_only(struct regmap *map, bool enable);
 void regcache_cache_bypass(struct regmap *map, bool enable);
+void regcache_mark_dirty(struct regmap *map);
+
+/**
+ * Description of an IRQ for the generic regmap irq_chip.
+ *
+ * @reg_offset: Offset of the status/mask register within the bank
+ * @mask:       Mask used to flag/control the register.
+ */
+struct regmap_irq {
+	unsigned int reg_offset;
+	unsigned int mask;
+};
+
+/**
+ * Description of a generic regmap irq_chip.  This is not intended to
+ * handle every possible interrupt controller, but it should handle a
+ * substantial proportion of those that are found in the wild.
+ *
+ * @name:        Descriptive name for IRQ controller.
+ *
+ * @status_base: Base status register address.
+ * @mask_base:   Base mask register address.
+ * @ack_base:    Base ack address.  If zero then the chip is clear on read.
+ *
+ * @num_regs:    Number of registers in each control bank.
+ * @irqs:        Descriptors for individual IRQs.  Interrupt numbers are
+ *               assigned based on the index in the array of the interrupt.
+ * @num_irqs:    Number of descriptors.
+ */
+struct regmap_irq_chip {
+	const char *name;
+
+	unsigned int status_base;
+	unsigned int mask_base;
+	unsigned int ack_base;
+
+	int num_regs;
+
+	const struct regmap_irq *irqs;
+	int num_irqs;
+};
+
+struct regmap_irq_chip_data;
+
+int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
+			int irq_base, struct regmap_irq_chip *chip,
+			struct regmap_irq_chip_data **data);
+void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *data);
+int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data);
 
 #endif
diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
index 96d465f..2213ddc 100644
--- a/include/linux/reiserfs_fs.h
+++ b/include/linux/reiserfs_fs.h
@@ -1759,13 +1759,14 @@
 					      REISERFS_QUOTA_TRANS_BLOCKS(sb)))
 
 #ifdef CONFIG_QUOTA
+#define REISERFS_QUOTA_OPTS ((1 << REISERFS_USRQUOTA) | (1 << REISERFS_GRPQUOTA))
 /* We need to update data and inode (atime) */
-#define REISERFS_QUOTA_TRANS_BLOCKS(s) (REISERFS_SB(s)->s_mount_opt & (1<<REISERFS_QUOTA) ? 2 : 0)
+#define REISERFS_QUOTA_TRANS_BLOCKS(s) (REISERFS_SB(s)->s_mount_opt & REISERFS_QUOTA_OPTS ? 2 : 0)
 /* 1 balancing, 1 bitmap, 1 data per write + stat data update */
-#define REISERFS_QUOTA_INIT_BLOCKS(s) (REISERFS_SB(s)->s_mount_opt & (1<<REISERFS_QUOTA) ? \
+#define REISERFS_QUOTA_INIT_BLOCKS(s) (REISERFS_SB(s)->s_mount_opt & REISERFS_QUOTA_OPTS ? \
 (DQUOT_INIT_ALLOC*(JOURNAL_PER_BALANCE_CNT+2)+DQUOT_INIT_REWRITE+1) : 0)
 /* same as with INIT */
-#define REISERFS_QUOTA_DEL_BLOCKS(s) (REISERFS_SB(s)->s_mount_opt & (1<<REISERFS_QUOTA) ? \
+#define REISERFS_QUOTA_DEL_BLOCKS(s) (REISERFS_SB(s)->s_mount_opt & REISERFS_QUOTA_OPTS ? \
 (DQUOT_DEL_ALLOC*(JOURNAL_PER_BALANCE_CNT+2)+DQUOT_DEL_REWRITE+1) : 0)
 #else
 #define REISERFS_QUOTA_TRANS_BLOCKS(s) 0
@@ -2056,7 +2057,7 @@
 
 struct reiserfs_security_handle;
 int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
-		       struct inode *dir, int mode,
+		       struct inode *dir, umode_t mode,
 		       const char *symname, loff_t i_size,
 		       struct dentry *dentry, struct inode *inode,
 		       struct reiserfs_security_handle *security);
diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
index 52c83b6..8c9e85c 100644
--- a/include/linux/reiserfs_fs_sb.h
+++ b/include/linux/reiserfs_fs_sb.h
@@ -417,6 +417,7 @@
 	char *s_qf_names[MAXQUOTAS];
 	int s_jquota_fmt;
 #endif
+	char *s_jdev;		/* Stored jdev for mount option showing */
 #ifdef CONFIG_REISERFS_CHECK
 
 	struct tree_balance *cur_tb;	/*
@@ -482,7 +483,8 @@
 	REISERFS_ERROR_RO,
 	REISERFS_ERROR_CONTINUE,
 
-	REISERFS_QUOTA,		/* Some quota option specified */
+	REISERFS_USRQUOTA,	/* User quota option specified */
+	REISERFS_GRPQUOTA,	/* Group quota option specified */
 
 	REISERFS_TEST1,
 	REISERFS_TEST2,
diff --git a/include/linux/relay.h b/include/linux/relay.h
index 14a86bc..a822fd7 100644
--- a/include/linux/relay.h
+++ b/include/linux/relay.h
@@ -144,7 +144,7 @@
 	 */
 	struct dentry *(*create_buf_file)(const char *filename,
 					  struct dentry *parent,
-					  int mode,
+					  umode_t mode,
 					  struct rchan_buf *buf,
 					  int *is_global);
 
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 1c4f3e9..ad93e1e 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -220,7 +220,7 @@
 			((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
 #define task_contributes_to_load(task)	\
 				((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
-				 (task->flags & PF_FREEZING) == 0)
+				 (task->flags & PF_FROZEN) == 0)
 
 #define __set_task_state(tsk, state_value)		\
 	do { (tsk)->state = (state_value); } while (0)
@@ -273,9 +273,11 @@
 
 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
 extern void select_nohz_load_balancer(int stop_tick);
+extern void set_cpu_sd_state_idle(void);
 extern int get_nohz_timer_target(void);
 #else
 static inline void select_nohz_load_balancer(int stop_tick) { }
+static inline void set_cpu_sd_state_idle(void) { }
 #endif
 
 /*
@@ -483,8 +485,8 @@
 
 #define INIT_CPUTIME	\
 	(struct task_cputime) {					\
-		.utime = cputime_zero,				\
-		.stime = cputime_zero,				\
+		.utime = 0,					\
+		.stime = 0,					\
 		.sum_exec_runtime = 0,				\
 	}
 
@@ -901,6 +903,10 @@
 	 * single CPU.
 	 */
 	unsigned int power, power_orig;
+	/*
+	 * Number of busy cpus in this group.
+	 */
+	atomic_t nr_busy_cpus;
 };
 
 struct sched_group {
@@ -925,6 +931,15 @@
 	return to_cpumask(sg->cpumask);
 }
 
+/**
+ * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
+ * @group: The group whose first cpu is to be returned.
+ */
+static inline unsigned int group_first_cpu(struct sched_group *group)
+{
+	return cpumask_first(sched_group_cpus(group));
+}
+
 struct sched_domain_attr {
 	int relax_domain_level;
 };
@@ -1315,8 +1330,8 @@
 	 * older sibling, respectively.  (p->father can be replaced with 
 	 * p->real_parent->pid)
 	 */
-	struct task_struct *real_parent; /* real parent process */
-	struct task_struct *parent; /* recipient of SIGCHLD, wait4() reports */
+	struct task_struct __rcu *real_parent; /* real parent process */
+	struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */
 	/*
 	 * children/sibling forms the list of my natural children
 	 */
@@ -1772,7 +1787,6 @@
 #define PF_MEMALLOC	0x00000800	/* Allocating memory */
 #define PF_NPROC_EXCEEDED 0x00001000	/* set_user noticed that RLIMIT_NPROC was exceeded */
 #define PF_USED_MATH	0x00002000	/* if unset the fpu must be initialized before use */
-#define PF_FREEZING	0x00004000	/* freeze in progress. do not account to load */
 #define PF_NOFREEZE	0x00008000	/* this thread should not be frozen */
 #define PF_FROZEN	0x00010000	/* frozen for system suspend */
 #define PF_FSTRANS	0x00020000	/* inside a filesystem transaction */
@@ -1788,7 +1802,6 @@
 #define PF_MEMPOLICY	0x10000000	/* Non-default NUMA mempolicy */
 #define PF_MUTEX_TESTER	0x20000000	/* Thread belongs to the rt mutex tester */
 #define PF_FREEZER_SKIP	0x40000000	/* Freezer should not count it as freezable */
-#define PF_FREEZER_NOSIG 0x80000000	/* Freezer won't send signals to it */
 
 /*
  * Only the _current_ task can read/write to tsk->flags, but other
@@ -2070,6 +2083,14 @@
 extern int sched_setscheduler_nocheck(struct task_struct *, int,
 				      const struct sched_param *);
 extern struct task_struct *idle_task(int cpu);
+/**
+ * is_idle_task - is the specified task an idle task?
+ * @tsk: the task in question.
+ */
+static inline bool is_idle_task(struct task_struct *p)
+{
+	return p->pid == 0;
+}
 extern struct task_struct *curr_task(int cpu);
 extern void set_curr_task(int cpu, struct task_struct *p);
 
diff --git a/include/linux/security.h b/include/linux/security.h
index 19d8e04..98112cf 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -186,7 +186,7 @@
  * Security module identifier.
  *
  * @name:
- *	A string that acts as a unique identifeir for the LSM with max number
+ *	A string that acts as a unique identifier for the LSM with max number
  *	of characters = SECURITY_NAME_MAX.
  *
  * Security hooks for program execution operations.
@@ -275,7 +275,7 @@
  *	@copy copied data which will be passed to the security module.
  *	Returns 0 if the copy was successful.
  * @sb_remount:
- *	Extracts security system specifc mount options and verifys no changes
+ *	Extracts security system specific mount options and verifies no changes
  *	are being made to those options.
  *	@sb superblock being remounted
  *	@data contains the filesystem-specific data.
@@ -380,15 +380,15 @@
  *	Return 0 if permission is granted.
  * @inode_mkdir:
  *	Check permissions to create a new directory in the existing directory
- *	associated with inode strcture @dir.
- *	@dir containst the inode structure of parent of the directory to be created.
+ *	associated with inode structure @dir.
+ *	@dir contains the inode structure of parent of the directory to be created.
  *	@dentry contains the dentry structure of new directory.
  *	@mode contains the mode of new directory.
  *	Return 0 if permission is granted.
  * @path_mkdir:
  *	Check permissions to create a new directory in the existing directory
- *	associated with path strcture @path.
- *	@dir containst the path structure of parent of the directory
+ *	associated with path structure @path.
+ *	@dir contains the path structure of parent of the directory
  *	to be created.
  *	@dentry contains the dentry structure of new directory.
  *	@mode contains the mode of new directory.
@@ -578,7 +578,7 @@
  *	@file contains the file structure.
  *	@cmd contains the operation to perform.
  *	@arg contains the operational arguments.
- *	Check permission for an ioctl operation on @file.  Note that @arg can
+ *	Check permission for an ioctl operation on @file.  Note that @arg
  *	sometimes represents a user space pointer; in other cases, it may be a
  *	simple integer value.  When @arg represents a user space pointer, it
  *	should never be used by the security module.
@@ -606,7 +606,7 @@
  *	Return 0 if permission is granted.
  * @file_fcntl:
  *	Check permission before allowing the file operation specified by @cmd
- *	from being performed on the file @file.  Note that @arg can sometimes
+ *	from being performed on the file @file.  Note that @arg sometimes
  *	represents a user space pointer; in other cases, it may be a simple
  *	integer value.  When @arg represents a user space pointer, it should
  *	never be used by the security module.
@@ -793,7 +793,7 @@
  *	information can be saved using the eff_cap field of the
  *	netlink_skb_parms structure.  Also may be used to provide fine
  *	grained control over message transmission.
- *	@sk associated sock of task sending the message.,
+ *	@sk associated sock of task sending the message.
  *	@skb contains the sk_buff structure for the netlink message.
  *	Return 0 if the information was successfully saved and message
  *	is allowed to be transmitted.
@@ -1080,9 +1080,9 @@
  *	should free it.
  *	@key points to the key to be queried.
  *	@_buffer points to a pointer that should be set to point to the
- *	 resulting string (if no label or an error occurs).
+ *	resulting string (if no label or an error occurs).
  *	Return the length of the string (including terminating NUL) or -ve if
- *      an error.
+ *	an error.
  *	May also return 0 (and a NULL buffer pointer) if there is no label.
  *
  * Security hooks affecting all System V IPC operations.
@@ -1268,7 +1268,7 @@
  *	credentials.
  *	@tsk contains the task_struct for the process.
  *	@cred contains the credentials to use.
- *      @ns contains the user namespace we want the capability in
+ *	@ns contains the user namespace we want the capability in
  *	@cap contains the capability <include/linux/capability.h>.
  *	@audit: Whether to write an audit message or not
  *	Return 0 if the capability is granted for @tsk.
@@ -1370,7 +1370,7 @@
  * 	@ctxlen contains the length of @ctx.
  *
  * @inode_getsecctx:
- * 	Returns a string containing all relavent security context information
+ *	Returns a string containing all relevant security context information
  *
  * 	@inode we wish to get the security context of.
  *	@ctx is a pointer in which to place the allocated security context.
@@ -1424,9 +1424,9 @@
 
 #ifdef CONFIG_SECURITY_PATH
 	int (*path_unlink) (struct path *dir, struct dentry *dentry);
-	int (*path_mkdir) (struct path *dir, struct dentry *dentry, int mode);
+	int (*path_mkdir) (struct path *dir, struct dentry *dentry, umode_t mode);
 	int (*path_rmdir) (struct path *dir, struct dentry *dentry);
-	int (*path_mknod) (struct path *dir, struct dentry *dentry, int mode,
+	int (*path_mknod) (struct path *dir, struct dentry *dentry, umode_t mode,
 			   unsigned int dev);
 	int (*path_truncate) (struct path *path);
 	int (*path_symlink) (struct path *dir, struct dentry *dentry,
@@ -1435,8 +1435,7 @@
 			  struct dentry *new_dentry);
 	int (*path_rename) (struct path *old_dir, struct dentry *old_dentry,
 			    struct path *new_dir, struct dentry *new_dentry);
-	int (*path_chmod) (struct dentry *dentry, struct vfsmount *mnt,
-			   mode_t mode);
+	int (*path_chmod) (struct path *path, umode_t mode);
 	int (*path_chown) (struct path *path, uid_t uid, gid_t gid);
 	int (*path_chroot) (struct path *path);
 #endif
@@ -1447,16 +1446,16 @@
 				    const struct qstr *qstr, char **name,
 				    void **value, size_t *len);
 	int (*inode_create) (struct inode *dir,
-			     struct dentry *dentry, int mode);
+			     struct dentry *dentry, umode_t mode);
 	int (*inode_link) (struct dentry *old_dentry,
 			   struct inode *dir, struct dentry *new_dentry);
 	int (*inode_unlink) (struct inode *dir, struct dentry *dentry);
 	int (*inode_symlink) (struct inode *dir,
 			      struct dentry *dentry, const char *old_name);
-	int (*inode_mkdir) (struct inode *dir, struct dentry *dentry, int mode);
+	int (*inode_mkdir) (struct inode *dir, struct dentry *dentry, umode_t mode);
 	int (*inode_rmdir) (struct inode *dir, struct dentry *dentry);
 	int (*inode_mknod) (struct inode *dir, struct dentry *dentry,
-			    int mode, dev_t dev);
+			    umode_t mode, dev_t dev);
 	int (*inode_rename) (struct inode *old_dir, struct dentry *old_dentry,
 			     struct inode *new_dir, struct dentry *new_dentry);
 	int (*inode_readlink) (struct dentry *dentry);
@@ -1716,15 +1715,15 @@
 int security_old_inode_init_security(struct inode *inode, struct inode *dir,
 				     const struct qstr *qstr, char **name,
 				     void **value, size_t *len);
-int security_inode_create(struct inode *dir, struct dentry *dentry, int mode);
+int security_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode);
 int security_inode_link(struct dentry *old_dentry, struct inode *dir,
 			 struct dentry *new_dentry);
 int security_inode_unlink(struct inode *dir, struct dentry *dentry);
 int security_inode_symlink(struct inode *dir, struct dentry *dentry,
 			   const char *old_name);
-int security_inode_mkdir(struct inode *dir, struct dentry *dentry, int mode);
+int security_inode_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode);
 int security_inode_rmdir(struct inode *dir, struct dentry *dentry);
-int security_inode_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev);
+int security_inode_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev);
 int security_inode_rename(struct inode *old_dir, struct dentry *old_dentry,
 			  struct inode *new_dir, struct dentry *new_dentry);
 int security_inode_readlink(struct dentry *dentry);
@@ -2056,12 +2055,12 @@
 						   char **name, void **value,
 						   size_t *len)
 {
-	return 0;
+	return -EOPNOTSUPP;
 }
 
 static inline int security_inode_create(struct inode *dir,
 					 struct dentry *dentry,
-					 int mode)
+					 umode_t mode)
 {
 	return 0;
 }
@@ -2855,9 +2854,9 @@
 
 #ifdef CONFIG_SECURITY_PATH
 int security_path_unlink(struct path *dir, struct dentry *dentry);
-int security_path_mkdir(struct path *dir, struct dentry *dentry, int mode);
+int security_path_mkdir(struct path *dir, struct dentry *dentry, umode_t mode);
 int security_path_rmdir(struct path *dir, struct dentry *dentry);
-int security_path_mknod(struct path *dir, struct dentry *dentry, int mode,
+int security_path_mknod(struct path *dir, struct dentry *dentry, umode_t mode,
 			unsigned int dev);
 int security_path_truncate(struct path *path);
 int security_path_symlink(struct path *dir, struct dentry *dentry,
@@ -2866,8 +2865,7 @@
 		       struct dentry *new_dentry);
 int security_path_rename(struct path *old_dir, struct dentry *old_dentry,
 			 struct path *new_dir, struct dentry *new_dentry);
-int security_path_chmod(struct dentry *dentry, struct vfsmount *mnt,
-			mode_t mode);
+int security_path_chmod(struct path *path, umode_t mode);
 int security_path_chown(struct path *path, uid_t uid, gid_t gid);
 int security_path_chroot(struct path *path);
 #else	/* CONFIG_SECURITY_PATH */
@@ -2877,7 +2875,7 @@
 }
 
 static inline int security_path_mkdir(struct path *dir, struct dentry *dentry,
-				      int mode)
+				      umode_t mode)
 {
 	return 0;
 }
@@ -2888,7 +2886,7 @@
 }
 
 static inline int security_path_mknod(struct path *dir, struct dentry *dentry,
-				      int mode, unsigned int dev)
+				      umode_t mode, unsigned int dev)
 {
 	return 0;
 }
@@ -2919,9 +2917,7 @@
 	return 0;
 }
 
-static inline int security_path_chmod(struct dentry *dentry,
-				      struct vfsmount *mnt,
-				      mode_t mode)
+static inline int security_path_chmod(struct path *path, umode_t mode)
 {
 	return 0;
 }
@@ -3010,7 +3006,7 @@
 
 #ifdef CONFIG_SECURITYFS
 
-extern struct dentry *securityfs_create_file(const char *name, mode_t mode,
+extern struct dentry *securityfs_create_file(const char *name, umode_t mode,
 					     struct dentry *parent, void *data,
 					     const struct file_operations *fops);
 extern struct dentry *securityfs_create_dir(const char *name, struct dentry *parent);
@@ -3025,7 +3021,7 @@
 }
 
 static inline struct dentry *securityfs_create_file(const char *name,
-						    mode_t mode,
+						    umode_t mode,
 						    struct dentry *parent,
 						    void *data,
 						    const struct file_operations *fops)
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index 0b69a46..44f1514 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -74,7 +74,7 @@
 	}
 }
 
-char *mangle_path(char *s, char *p, char *esc);
+char *mangle_path(char *s, const char *p, const char *esc);
 int seq_open(struct file *, const struct seq_operations *);
 ssize_t seq_read(struct file *, char __user *, size_t, loff_t *);
 loff_t seq_lseek(struct file *, loff_t, int);
@@ -86,10 +86,10 @@
 
 __printf(2, 3) int seq_printf(struct seq_file *, const char *, ...);
 
-int seq_path(struct seq_file *, struct path *, char *);
-int seq_dentry(struct seq_file *, struct dentry *, char *);
-int seq_path_root(struct seq_file *m, struct path *path, struct path *root,
-		  char *esc);
+int seq_path(struct seq_file *, const struct path *, const char *);
+int seq_dentry(struct seq_file *, struct dentry *, const char *);
+int seq_path_root(struct seq_file *m, const struct path *path,
+		  const struct path *root, const char *esc);
 int seq_bitmap(struct seq_file *m, const unsigned long *bits,
 				   unsigned int nr_bits);
 static inline int seq_cpumask(struct seq_file *m, const struct cpumask *mask)
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index 1f05bbe..8f012f8 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -66,6 +66,7 @@
  * dependent on the 8250 driver.
  */
 struct uart_port;
+struct uart_8250_port;
 
 int serial8250_register_port(struct uart_port *);
 void serial8250_unregister_port(int line);
@@ -81,7 +82,11 @@
 		struct ktermios *termios, struct ktermios *old);
 extern void serial8250_do_pm(struct uart_port *port, unsigned int state,
 			     unsigned int oldstate);
+extern int fsl8250_handle_irq(struct uart_port *port);
 int serial8250_handle_irq(struct uart_port *port, unsigned int iir);
+unsigned char serial8250_rx_chars(struct uart_8250_port *up, unsigned char lsr);
+void serial8250_tx_chars(struct uart_8250_port *up);
+unsigned int serial8250_modem_status(struct uart_8250_port *up);
 
 extern void serial8250_set_isa_configurator(void (*v)
 					(int port, struct uart_port *up,
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index eadf33d..b67305e 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -351,6 +351,7 @@
 #define UPF_CONS_FLOW		((__force upf_t) (1 << 23))
 #define UPF_SHARE_IRQ		((__force upf_t) (1 << 24))
 #define UPF_EXAR_EFR		((__force upf_t) (1 << 25))
+#define UPF_IIR_ONCE		((__force upf_t) (1 << 26))
 /* The exact UART type is known and should not be probed.  */
 #define UPF_FIXED_TYPE		((__force upf_t) (1 << 27))
 #define UPF_BOOT_AUTOCONF	((__force upf_t) (1 << 28))
@@ -483,10 +484,19 @@
 /*
  * The following are helper functions for the low level drivers.
  */
+
+extern void uart_handle_dcd_change(struct uart_port *uport,
+		unsigned int status);
+extern void uart_handle_cts_change(struct uart_port *uport,
+		unsigned int status);
+
+extern void uart_insert_char(struct uart_port *port, unsigned int status,
+		 unsigned int overrun, unsigned int ch, unsigned int flag);
+
+#ifdef SUPPORT_SYSRQ
 static inline int
 uart_handle_sysrq_char(struct uart_port *port, unsigned int ch)
 {
-#ifdef SUPPORT_SYSRQ
 	if (port->sysrq) {
 		if (ch && time_before(jiffies, port->sysrq)) {
 			handle_sysrq(ch);
@@ -495,11 +505,10 @@
 		}
 		port->sysrq = 0;
 	}
-#endif
 	return 0;
 }
-#ifndef SUPPORT_SYSRQ
-#define uart_handle_sysrq_char(port,ch) uart_handle_sysrq_char(port, 0)
+#else
+#define uart_handle_sysrq_char(port,ch) ({ (void)port; 0; })
 #endif
 
 /*
@@ -522,89 +531,6 @@
 	return 0;
 }
 
-/**
- *	uart_handle_dcd_change - handle a change of carrier detect state
- *	@uport: uart_port structure for the open port
- *	@status: new carrier detect status, nonzero if active
- */
-static inline void
-uart_handle_dcd_change(struct uart_port *uport, unsigned int status)
-{
-	struct uart_state *state = uport->state;
-	struct tty_port *port = &state->port;
-	struct tty_ldisc *ld = tty_ldisc_ref(port->tty);
-	struct pps_event_time ts;
-
-	if (ld && ld->ops->dcd_change)
-		pps_get_ts(&ts);
-
-	uport->icount.dcd++;
-#ifdef CONFIG_HARD_PPS
-	if ((uport->flags & UPF_HARDPPS_CD) && status)
-		hardpps();
-#endif
-
-	if (port->flags & ASYNC_CHECK_CD) {
-		if (status)
-			wake_up_interruptible(&port->open_wait);
-		else if (port->tty)
-			tty_hangup(port->tty);
-	}
-
-	if (ld && ld->ops->dcd_change)
-		ld->ops->dcd_change(port->tty, status, &ts);
-	if (ld)
-		tty_ldisc_deref(ld);
-}
-
-/**
- *	uart_handle_cts_change - handle a change of clear-to-send state
- *	@uport: uart_port structure for the open port
- *	@status: new clear to send status, nonzero if active
- */
-static inline void
-uart_handle_cts_change(struct uart_port *uport, unsigned int status)
-{
-	struct tty_port *port = &uport->state->port;
-	struct tty_struct *tty = port->tty;
-
-	uport->icount.cts++;
-
-	if (port->flags & ASYNC_CTS_FLOW) {
-		if (tty->hw_stopped) {
-			if (status) {
-				tty->hw_stopped = 0;
-				uport->ops->start_tx(uport);
-				uart_write_wakeup(uport);
-			}
-		} else {
-			if (!status) {
-				tty->hw_stopped = 1;
-				uport->ops->stop_tx(uport);
-			}
-		}
-	}
-}
-
-#include <linux/tty_flip.h>
-
-static inline void
-uart_insert_char(struct uart_port *port, unsigned int status,
-		 unsigned int overrun, unsigned int ch, unsigned int flag)
-{
-	struct tty_struct *tty = port->state->port.tty;
-
-	if ((status & port->ignore_status_mask & ~overrun) == 0)
-		tty_insert_flip_char(tty, ch, flag);
-
-	/*
-	 * Overrun is special.  Since it's reported immediately,
-	 * it doesn't affect the current character.
-	 */
-	if (status & ~port->ignore_status_mask & overrun)
-		tty_insert_flip_char(tty, 0, TTY_OVERRUN);
-}
-
 /*
  *	UART_ENABLE_MS - determine if port should enable modem status irqs
  */
diff --git a/include/linux/sh_intc.h b/include/linux/sh_intc.h
index 5812fef..b160645 100644
--- a/include/linux/sh_intc.h
+++ b/include/linux/sh_intc.h
@@ -95,6 +95,7 @@
 	unsigned int num_resources;
 	intc_enum force_enable;
 	intc_enum force_disable;
+	bool skip_syscore_suspend;
 	struct intc_hw_desc hw;
 };
 
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index 9291ac3..e4c711c 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -30,7 +30,7 @@
 	spinlock_t stat_lock;	    /* Serialize shmem_sb_info changes */
 	uid_t uid;		    /* Mount uid for root directory */
 	gid_t gid;		    /* Mount gid for root directory */
-	mode_t mode;		    /* Mount mode for root directory */
+	umode_t mode;		    /* Mount mode for root directory */
 	struct mempolicy *mpol;     /* default memory policy for mappings */
 };
 
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index fe86488..50db9b0 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -30,6 +30,7 @@
 #include <linux/dmaengine.h>
 #include <linux/hrtimer.h>
 #include <linux/dma-mapping.h>
+#include <linux/netdev_features.h>
 
 /* Don't change this without changing skb_csum_unnecessary! */
 #define CHECKSUM_NONE 0
@@ -87,7 +88,6 @@
  *	at device setup time.
  *	NETIF_F_HW_CSUM	- it is clever device, it is able to checksum
  *			  everything.
- *	NETIF_F_NO_CSUM - loopback or reliable single hop media.
  *	NETIF_F_IP_CSUM - device is dumb. It is able to csum only
  *			  TCP/UDP over IPv4. Sigh. Vendors like this
  *			  way by an unknown reason. Though, see comment above
@@ -128,13 +128,17 @@
 
 struct sk_buff;
 
-/* To allow 64K frame to be packed as single skb without frag_list. Since
- * GRO uses frags we allocate at least 16 regardless of page size.
+/* To allow 64K frame to be packed as single skb without frag_list we
+ * require 64K/PAGE_SIZE pages plus 1 additional page to allow for
+ * buffers which do not start on a page boundary.
+ *
+ * Since GRO uses frags we allocate at least 16 regardless of page
+ * size.
  */
-#if (65536/PAGE_SIZE + 2) < 16
+#if (65536/PAGE_SIZE + 1) < 16
 #define MAX_SKB_FRAGS 16UL
 #else
-#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 2)
+#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
 #endif
 
 typedef struct skb_frag_struct skb_frag_t;
@@ -218,6 +222,9 @@
 
 	/* device driver supports TX zero-copy buffers */
 	SKBTX_DEV_ZEROCOPY = 1 << 4,
+
+	/* generate wifi status information (where possible) */
+	SKBTX_WIFI_STATUS = 1 << 5,
 };
 
 /*
@@ -235,15 +242,15 @@
  * the end of the header data, ie. at skb->end.
  */
 struct skb_shared_info {
-	unsigned short	nr_frags;
+	unsigned char	nr_frags;
+	__u8		tx_flags;
 	unsigned short	gso_size;
 	/* Warning: this field is not always filled in (UFO)! */
 	unsigned short	gso_segs;
 	unsigned short  gso_type;
-	__be32          ip6_frag_id;
-	__u8		tx_flags;
 	struct sk_buff	*frag_list;
 	struct skb_shared_hwtstamps hwtstamps;
+	__be32          ip6_frag_id;
 
 	/*
 	 * Warning : all fields before dataref are cleared in __alloc_skb()
@@ -352,6 +359,8 @@
  *	@ooo_okay: allow the mapping of a socket to a queue to be changed
  *	@l4_rxhash: indicate rxhash is a canonical 4-tuple hash over transport
  *		ports.
+ *	@wifi_acked_valid: wifi_acked was set
+ *	@wifi_acked: whether frame was acked on wifi or not
  *	@dma_cookie: a cookie to one of several possible DMA operations
  *		done by skb DMA functions
  *	@secmark: security marking
@@ -445,10 +454,11 @@
 #endif
 	__u8			ooo_okay:1;
 	__u8			l4_rxhash:1;
+	__u8			wifi_acked_valid:1;
+	__u8			wifi_acked:1;
+	/* 10/12 bit hole (depending on ndisc_nodetype presence) */
 	kmemcheck_bitfield_end(flags2);
 
-	/* 0/13 bit hole */
-
 #ifdef CONFIG_NET_DMA
 	dma_cookie_t		dma_cookie;
 #endif
@@ -540,6 +550,7 @@
 extern void	       __kfree_skb(struct sk_buff *skb);
 extern struct sk_buff *__alloc_skb(unsigned int size,
 				   gfp_t priority, int fclone, int node);
+extern struct sk_buff *build_skb(void *data);
 static inline struct sk_buff *alloc_skb(unsigned int size,
 					gfp_t priority)
 {
@@ -561,8 +572,9 @@
 				 gfp_t priority);
 extern struct sk_buff *skb_copy(const struct sk_buff *skb,
 				gfp_t priority);
-extern struct sk_buff *pskb_copy(struct sk_buff *skb,
-				 gfp_t gfp_mask);
+extern struct sk_buff *__pskb_copy(struct sk_buff *skb,
+				 int headroom, gfp_t gfp_mask);
+
 extern int	       pskb_expand_head(struct sk_buff *skb,
 					int nhead, int ntail,
 					gfp_t gfp_mask);
@@ -1662,38 +1674,6 @@
 }
 
 /**
- *	__netdev_alloc_page - allocate a page for ps-rx on a specific device
- *	@dev: network device to receive on
- *	@gfp_mask: alloc_pages_node mask
- *
- * 	Allocate a new page. dev currently unused.
- *
- * 	%NULL is returned if there is no free memory.
- */
-static inline struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask)
-{
-	return alloc_pages_node(NUMA_NO_NODE, gfp_mask, 0);
-}
-
-/**
- *	netdev_alloc_page - allocate a page for ps-rx on a specific device
- *	@dev: network device to receive on
- *
- * 	Allocate a new page. dev currently unused.
- *
- * 	%NULL is returned if there is no free memory.
- */
-static inline struct page *netdev_alloc_page(struct net_device *dev)
-{
-	return __netdev_alloc_page(dev, GFP_ATOMIC);
-}
-
-static inline void netdev_free_page(struct net_device *dev, struct page *page)
-{
-	__free_page(page);
-}
-
-/**
  * skb_frag_page - retrieve the page refered to by a paged fragment
  * @frag: the paged fragment
  *
@@ -1824,6 +1804,12 @@
 			    frag->page_offset + offset, size, dir);
 }
 
+static inline struct sk_buff *pskb_copy(struct sk_buff *skb,
+					gfp_t gfp_mask)
+{
+	return __pskb_copy(skb, skb_headroom(skb), gfp_mask);
+}
+
 /**
  *	skb_clone_writable - is the header of a clone writable
  *	@skb: buffer to check
@@ -2105,7 +2091,8 @@
 extern int	       skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
 				 int shiftlen);
 
-extern struct sk_buff *skb_segment(struct sk_buff *skb, u32 features);
+extern struct sk_buff *skb_segment(struct sk_buff *skb,
+				   netdev_features_t features);
 
 static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
 				       int len, void *buffer)
@@ -2263,6 +2250,15 @@
 	sw_tx_timestamp(skb);
 }
 
+/**
+ * skb_complete_wifi_ack - deliver skb with wifi status
+ *
+ * @skb: the original outgoing packet
+ * @acked: ack status
+ *
+ */
+void skb_complete_wifi_ack(struct sk_buff *skb, bool acked);
+
 extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
 extern __sum16 __skb_checksum_complete(struct sk_buff *skb);
 
diff --git a/include/linux/smscphy.h b/include/linux/smscphy.h
new file mode 100644
index 0000000..ce718cb
--- /dev/null
+++ b/include/linux/smscphy.h
@@ -0,0 +1,25 @@
+#ifndef __LINUX_SMSCPHY_H__
+#define __LINUX_SMSCPHY_H__
+
+#define MII_LAN83C185_ISF 29 /* Interrupt Source Flags */
+#define MII_LAN83C185_IM  30 /* Interrupt Mask */
+#define MII_LAN83C185_CTRL_STATUS 17 /* Mode/Status Register */
+
+#define MII_LAN83C185_ISF_INT1 (1<<1) /* Auto-Negotiation Page Received */
+#define MII_LAN83C185_ISF_INT2 (1<<2) /* Parallel Detection Fault */
+#define MII_LAN83C185_ISF_INT3 (1<<3) /* Auto-Negotiation LP Ack */
+#define MII_LAN83C185_ISF_INT4 (1<<4) /* Link Down */
+#define MII_LAN83C185_ISF_INT5 (1<<5) /* Remote Fault Detected */
+#define MII_LAN83C185_ISF_INT6 (1<<6) /* Auto-Negotiation complete */
+#define MII_LAN83C185_ISF_INT7 (1<<7) /* ENERGYON */
+
+#define MII_LAN83C185_ISF_INT_ALL (0x0e)
+
+#define MII_LAN83C185_ISF_INT_PHYLIB_EVENTS \
+	(MII_LAN83C185_ISF_INT6 | MII_LAN83C185_ISF_INT4 | \
+	 MII_LAN83C185_ISF_INT7)
+
+#define MII_LAN83C185_EDPWRDOWN (1 << 13) /* EDPWRDOWN */
+#define MII_LAN83C185_ENERGYON  (1 << 1)  /* ENERGYON */
+
+#endif /* __LINUX_SMSCPHY_H__ */
diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
new file mode 100644
index 0000000..251729a
--- /dev/null
+++ b/include/linux/sock_diag.h
@@ -0,0 +1,48 @@
+#ifndef __SOCK_DIAG_H__
+#define __SOCK_DIAG_H__
+
+#include <linux/types.h>
+
+#define SOCK_DIAG_BY_FAMILY 20
+
+struct sock_diag_req {
+	__u8	sdiag_family;
+	__u8	sdiag_protocol;
+};
+
+enum {
+	SK_MEMINFO_RMEM_ALLOC,
+	SK_MEMINFO_RCVBUF,
+	SK_MEMINFO_WMEM_ALLOC,
+	SK_MEMINFO_SNDBUF,
+	SK_MEMINFO_FWD_ALLOC,
+	SK_MEMINFO_WMEM_QUEUED,
+	SK_MEMINFO_OPTMEM,
+
+	SK_MEMINFO_VARS,
+};
+
+#ifdef __KERNEL__
+struct sk_buff;
+struct nlmsghdr;
+struct sock;
+
+struct sock_diag_handler {
+	__u8 family;
+	int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
+};
+
+int sock_diag_register(struct sock_diag_handler *h);
+void sock_diag_unregister(struct sock_diag_handler *h);
+
+void sock_diag_register_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh));
+void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh));
+
+int sock_diag_check_cookie(void *sk, __u32 *cookie);
+void sock_diag_save_cookie(void *sk, __u32 *cookie);
+
+int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attr);
+
+extern struct sock *sock_diag_nlsk;
+#endif /* KERNEL */
+#endif
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index bb4f5fb..176fce9 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -200,6 +200,17 @@
 		driver_unregister(&sdrv->driver);
 }
 
+/**
+ * module_spi_driver() - Helper macro for registering a SPI driver
+ * @__spi_driver: spi_driver struct
+ *
+ * Helper macro for SPI drivers which do not do anything special in module
+ * init/exit. This eliminates a lot of boilerplate. Each module may only
+ * use this macro once, and calling it replaces module_init() and module_exit()
+ */
+#define module_spi_driver(__spi_driver) \
+	module_driver(__spi_driver, spi_register_driver, \
+			spi_unregister_driver)
 
 /**
  * struct spi_master - interface to SPI master controller
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index 58971e8..e1b0059 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -28,6 +28,7 @@
 #define _LINUX_SRCU_H
 
 #include <linux/mutex.h>
+#include <linux/rcupdate.h>
 
 struct srcu_struct_array {
 	int c[2];
@@ -60,18 +61,10 @@
 	__init_srcu_struct((sp), #sp, &__srcu_key); \
 })
 
-# define srcu_read_acquire(sp) \
-		lock_acquire(&(sp)->dep_map, 0, 0, 2, 1, NULL, _THIS_IP_)
-# define srcu_read_release(sp) \
-		lock_release(&(sp)->dep_map, 1, _THIS_IP_)
-
 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
 
 int init_srcu_struct(struct srcu_struct *sp);
 
-# define srcu_read_acquire(sp)  do { } while (0)
-# define srcu_read_release(sp)  do { } while (0)
-
 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
 
 void cleanup_srcu_struct(struct srcu_struct *sp);
@@ -90,12 +83,32 @@
  * read-side critical section.  In absence of CONFIG_DEBUG_LOCK_ALLOC,
  * this assumes we are in an SRCU read-side critical section unless it can
  * prove otherwise.
+ *
+ * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot
+ * and while lockdep is disabled.
+ *
+ * Note that if the CPU is in the idle loop from an RCU point of view
+ * (ie: that we are in the section between rcu_idle_enter() and
+ * rcu_idle_exit()) then srcu_read_lock_held() returns false even if
+ * the CPU did an srcu_read_lock().  The reason for this is that RCU
+ * ignores CPUs that are in such a section, considering these as in
+ * extended quiescent state, so such a CPU is effectively never in an
+ * RCU read-side critical section regardless of what RCU primitives it
+ * invokes.  This state of affairs is required --- we need to keep an
+ * RCU-free window in idle where the CPU may possibly enter into low
+ * power mode. This way we can notice an extended quiescent state to
+ * other CPUs that started a grace period. Otherwise we would delay any
+ * grace period as long as we run in the idle task.
  */
 static inline int srcu_read_lock_held(struct srcu_struct *sp)
 {
-	if (debug_locks)
-		return lock_is_held(&sp->dep_map);
-	return 1;
+	if (rcu_is_cpu_idle())
+		return 0;
+
+	if (!debug_lockdep_rcu_enabled())
+		return 1;
+
+	return lock_is_held(&sp->dep_map);
 }
 
 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
@@ -145,12 +158,17 @@
  * one way to indirectly wait on an SRCU grace period is to acquire
  * a mutex that is held elsewhere while calling synchronize_srcu() or
  * synchronize_srcu_expedited().
+ *
+ * Note that srcu_read_lock() and the matching srcu_read_unlock() must
+ * occur in the same context, for example, it is illegal to invoke
+ * srcu_read_unlock() in an irq handler if the matching srcu_read_lock()
+ * was invoked in process context.
  */
 static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp)
 {
 	int retval = __srcu_read_lock(sp);
 
-	srcu_read_acquire(sp);
+	rcu_lock_acquire(&(sp)->dep_map);
 	return retval;
 }
 
@@ -164,8 +182,51 @@
 static inline void srcu_read_unlock(struct srcu_struct *sp, int idx)
 	__releases(sp)
 {
-	srcu_read_release(sp);
+	rcu_lock_release(&(sp)->dep_map);
 	__srcu_read_unlock(sp, idx);
 }
 
+/**
+ * srcu_read_lock_raw - register a new reader for an SRCU-protected structure.
+ * @sp: srcu_struct in which to register the new reader.
+ *
+ * Enter an SRCU read-side critical section.  Similar to srcu_read_lock(),
+ * but avoids the RCU-lockdep checking.  This means that it is legal to
+ * use srcu_read_lock_raw() in one context, for example, in an exception
+ * handler, and then have the matching srcu_read_unlock_raw() in another
+ * context, for example in the task that took the exception.
+ *
+ * However, the entire SRCU read-side critical section must reside within a
+ * single task.  For example, beware of using srcu_read_lock_raw() in
+ * a device interrupt handler and srcu_read_unlock() in the interrupted
+ * task:  This will not work if interrupts are threaded.
+ */
+static inline int srcu_read_lock_raw(struct srcu_struct *sp)
+{
+	unsigned long flags;
+	int ret;
+
+	local_irq_save(flags);
+	ret =  __srcu_read_lock(sp);
+	local_irq_restore(flags);
+	return ret;
+}
+
+/**
+ * srcu_read_unlock_raw - unregister reader from an SRCU-protected structure.
+ * @sp: srcu_struct in which to unregister the old reader.
+ * @idx: return value from corresponding srcu_read_lock_raw().
+ *
+ * Exit an SRCU read-side critical section without lockdep-RCU checking.
+ * See srcu_read_lock_raw() for more details.
+ */
+static inline void srcu_read_unlock_raw(struct srcu_struct *sp, int idx)
+{
+	unsigned long flags;
+
+	local_irq_save(flags);
+	__srcu_read_unlock(sp, idx);
+	local_irq_restore(flags);
+}
+
 #endif
diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h
index 061e560..dcf35b0 100644
--- a/include/linux/ssb/ssb.h
+++ b/include/linux/ssb/ssb.h
@@ -94,6 +94,15 @@
 		} ghz5;		/* 5GHz band */
 	} antenna_gain;
 
+	struct {
+		struct {
+			u8 tssipos, extpa_gain, pdet_range, tr_iso, antswlut;
+		} ghz2;
+		struct {
+			u8 tssipos, extpa_gain, pdet_range, tr_iso, antswlut;
+		} ghz5;
+	} fem;
+
 	/* TODO - add any parameters needed from rev 2, 3, 4, 5 or 8 SPROMs */
 };
 
diff --git a/include/linux/ssb/ssb_regs.h b/include/linux/ssb/ssb_regs.h
index 9894120..c814ae6 100644
--- a/include/linux/ssb/ssb_regs.h
+++ b/include/linux/ssb/ssb_regs.h
@@ -432,6 +432,23 @@
 #define  SSB_SPROM8_RXPO2G		0x00FF	/* 2GHz RX power offset */
 #define  SSB_SPROM8_RXPO5G		0xFF00	/* 5GHz RX power offset */
 #define  SSB_SPROM8_RXPO5G_SHIFT	8
+#define SSB_SPROM8_FEM2G		0x00AE
+#define SSB_SPROM8_FEM5G		0x00B0
+#define  SSB_SROM8_FEM_TSSIPOS		0x0001
+#define  SSB_SROM8_FEM_TSSIPOS_SHIFT	0
+#define  SSB_SROM8_FEM_EXTPA_GAIN	0x0006
+#define  SSB_SROM8_FEM_EXTPA_GAIN_SHIFT	1
+#define  SSB_SROM8_FEM_PDET_RANGE	0x00F8
+#define  SSB_SROM8_FEM_PDET_RANGE_SHIFT	3
+#define  SSB_SROM8_FEM_TR_ISO		0x0700
+#define  SSB_SROM8_FEM_TR_ISO_SHIFT	8
+#define  SSB_SROM8_FEM_ANTSWLUT		0xF800
+#define  SSB_SROM8_FEM_ANTSWLUT_SHIFT	11
+#define SSB_SPROM8_THERMAL		0x00B2
+#define SSB_SPROM8_MPWR_RAWTS		0x00B4
+#define SSB_SPROM8_TS_SLP_OPT_CORRX	0x00B6
+#define SSB_SPROM8_FOC_HWIQ_IQSWP	0x00B8
+#define SSB_SPROM8_PHYCAL_TEMPDELTA	0x00BA
 #define SSB_SPROM8_MAXP_BG		0x00C0  /* Max Power 2GHz in path 1 */
 #define  SSB_SPROM8_MAXP_BG_MASK	0x00FF  /* Mask for Max Power 2GHz */
 #define  SSB_SPROM8_ITSSI_BG		0xFF00	/* Mask for path 1 itssi_bg */
diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
index 5efd8ce..57531f8 100644
--- a/include/linux/sunrpc/cache.h
+++ b/include/linux/sunrpc/cache.h
@@ -203,7 +203,7 @@
 extern void cache_unregister_net(struct cache_detail *cd, struct net *net);
 
 extern int sunrpc_cache_register_pipefs(struct dentry *parent, const char *,
-					mode_t, struct cache_detail *);
+					umode_t, struct cache_detail *);
 extern void sunrpc_cache_unregister_pipefs(struct cache_detail *);
 
 extern void qword_add(char **bpp, int *lp, char *str);
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 3d8f9c4..2c5993a 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -215,7 +215,7 @@
 	return true;
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static inline bool __rpc_cmp_addr6(const struct sockaddr *sap1,
 				   const struct sockaddr *sap2)
 {
@@ -237,10 +237,10 @@
 	struct sockaddr_in6 *dsin6 = (struct sockaddr_in6 *) dst;
 
 	dsin6->sin6_family = ssin6->sin6_family;
-	ipv6_addr_copy(&dsin6->sin6_addr, &ssin6->sin6_addr);
+	dsin6->sin6_addr = ssin6->sin6_addr;
 	return true;
 }
-#else	/* !(CONFIG_IPV6 || CONFIG_IPV6_MODULE) */
+#else	/* !(IS_ENABLED(CONFIG_IPV6) */
 static inline bool __rpc_cmp_addr6(const struct sockaddr *sap1,
 				   const struct sockaddr *sap2)
 {
@@ -252,7 +252,7 @@
 {
 	return false;
 }
-#endif	/* !(CONFIG_IPV6 || CONFIG_IPV6_MODULE) */
+#endif	/* !(IS_ENABLED(CONFIG_IPV6) */
 
 /**
  * rpc_cmp_addr - compare the address portion of two sockaddrs.
diff --git a/include/linux/sunrpc/rpc_pipe_fs.h b/include/linux/sunrpc/rpc_pipe_fs.h
index e4ea430..2bb03d7 100644
--- a/include/linux/sunrpc/rpc_pipe_fs.h
+++ b/include/linux/sunrpc/rpc_pipe_fs.h
@@ -55,7 +55,7 @@
 struct cache_detail;
 extern struct dentry *rpc_create_cache_dir(struct dentry *,
 					   struct qstr *,
-					   mode_t umode,
+					   umode_t umode,
 					   struct cache_detail *);
 extern void rpc_remove_cache_dir(struct dentry *);
 
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 57a6924..95040cc 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -6,6 +6,7 @@
 #include <linux/init.h>
 #include <linux/pm.h>
 #include <linux/mm.h>
+#include <linux/freezer.h>
 #include <asm/errno.h>
 
 #ifdef CONFIG_VT
@@ -331,6 +332,8 @@
 #define PM_RESTORE_PREPARE	0x0005 /* Going to restore a saved image */
 #define PM_POST_RESTORE		0x0006 /* Restore failed */
 
+extern struct mutex pm_mutex;
+
 #ifdef CONFIG_PM_SLEEP
 void save_processor_state(void);
 void restore_processor_state(void);
@@ -351,6 +354,19 @@
 extern bool pm_wakeup_pending(void);
 extern bool pm_get_wakeup_count(unsigned int *count);
 extern bool pm_save_wakeup_count(unsigned int count);
+
+static inline void lock_system_sleep(void)
+{
+	freezer_do_not_count();
+	mutex_lock(&pm_mutex);
+}
+
+static inline void unlock_system_sleep(void)
+{
+	mutex_unlock(&pm_mutex);
+	freezer_count();
+}
+
 #else /* !CONFIG_PM_SLEEP */
 
 static inline int register_pm_notifier(struct notifier_block *nb)
@@ -366,28 +382,11 @@
 #define pm_notifier(fn, pri)	do { (void)(fn); } while (0)
 
 static inline bool pm_wakeup_pending(void) { return false; }
-#endif /* !CONFIG_PM_SLEEP */
 
-extern struct mutex pm_mutex;
-
-#ifndef CONFIG_HIBERNATE_CALLBACKS
 static inline void lock_system_sleep(void) {}
 static inline void unlock_system_sleep(void) {}
 
-#else
-
-/* Let some subsystems like memory hotadd exclude hibernation */
-
-static inline void lock_system_sleep(void)
-{
-	mutex_lock(&pm_mutex);
-}
-
-static inline void unlock_system_sleep(void)
-{
-	mutex_unlock(&pm_mutex);
-}
-#endif
+#endif /* !CONFIG_PM_SLEEP */
 
 #ifdef CONFIG_ARCH_SAVE_PAGE_KEYS
 /*
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 86a24b11..515669f 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -475,7 +475,7 @@
 asmlinkage long sys_pivot_root(const char __user *new_root,
 				const char __user *put_old);
 asmlinkage long sys_chroot(const char __user *filename);
-asmlinkage long sys_mknod(const char __user *filename, int mode,
+asmlinkage long sys_mknod(const char __user *filename, umode_t mode,
 				unsigned dev);
 asmlinkage long sys_link(const char __user *oldname,
 				const char __user *newname);
@@ -483,8 +483,8 @@
 asmlinkage long sys_unlink(const char __user *pathname);
 asmlinkage long sys_rename(const char __user *oldname,
 				const char __user *newname);
-asmlinkage long sys_chmod(const char __user *filename, mode_t mode);
-asmlinkage long sys_fchmod(unsigned int fd, mode_t mode);
+asmlinkage long sys_chmod(const char __user *filename, umode_t mode);
+asmlinkage long sys_fchmod(unsigned int fd, umode_t mode);
 
 asmlinkage long sys_fcntl(unsigned int fd, unsigned int cmd, unsigned long arg);
 #if BITS_PER_LONG == 32
@@ -517,9 +517,9 @@
 			       loff_t __user *offset, size_t count);
 asmlinkage long sys_readlink(const char __user *path,
 				char __user *buf, int bufsiz);
-asmlinkage long sys_creat(const char __user *pathname, int mode);
+asmlinkage long sys_creat(const char __user *pathname, umode_t mode);
 asmlinkage long sys_open(const char __user *filename,
-				int flags, int mode);
+				int flags, umode_t mode);
 asmlinkage long sys_close(unsigned int fd);
 asmlinkage long sys_access(const char __user *filename, int mode);
 asmlinkage long sys_vhangup(void);
@@ -582,7 +582,7 @@
 asmlinkage long sys_pwritev(unsigned long fd, const struct iovec __user *vec,
 			    unsigned long vlen, unsigned long pos_l, unsigned long pos_h);
 asmlinkage long sys_getcwd(char __user *buf, unsigned long size);
-asmlinkage long sys_mkdir(const char __user *pathname, int mode);
+asmlinkage long sys_mkdir(const char __user *pathname, umode_t mode);
 asmlinkage long sys_chdir(const char __user *filename);
 asmlinkage long sys_fchdir(unsigned int fd);
 asmlinkage long sys_rmdir(const char __user *pathname);
@@ -679,7 +679,7 @@
 asmlinkage long sys_ipc(unsigned int call, int first, unsigned long second,
 		unsigned long third, void __user *ptr, long fifth);
 
-asmlinkage long sys_mq_open(const char __user *name, int oflag, mode_t mode, struct mq_attr __user *attr);
+asmlinkage long sys_mq_open(const char __user *name, int oflag, umode_t mode, struct mq_attr __user *attr);
 asmlinkage long sys_mq_unlink(const char __user *name);
 asmlinkage long sys_mq_timedsend(mqd_t mqdes, const char __user *msg_ptr, size_t msg_len, unsigned int msg_prio, const struct timespec __user *abs_timeout);
 asmlinkage long sys_mq_timedreceive(mqd_t mqdes, char __user *msg_ptr, size_t msg_len, unsigned int __user *msg_prio, const struct timespec __user *abs_timeout);
@@ -753,11 +753,11 @@
 asmlinkage long sys_spu_run(int fd, __u32 __user *unpc,
 				 __u32 __user *ustatus);
 asmlinkage long sys_spu_create(const char __user *name,
-		unsigned int flags, mode_t mode, int fd);
+		unsigned int flags, umode_t mode, int fd);
 
-asmlinkage long sys_mknodat(int dfd, const char __user * filename, int mode,
+asmlinkage long sys_mknodat(int dfd, const char __user * filename, umode_t mode,
 			    unsigned dev);
-asmlinkage long sys_mkdirat(int dfd, const char __user * pathname, int mode);
+asmlinkage long sys_mkdirat(int dfd, const char __user * pathname, umode_t mode);
 asmlinkage long sys_unlinkat(int dfd, const char __user * pathname, int flag);
 asmlinkage long sys_symlinkat(const char __user * oldname,
 			      int newdfd, const char __user * newname);
@@ -769,11 +769,11 @@
 			      struct timeval __user *utimes);
 asmlinkage long sys_faccessat(int dfd, const char __user *filename, int mode);
 asmlinkage long sys_fchmodat(int dfd, const char __user * filename,
-			     mode_t mode);
+			     umode_t mode);
 asmlinkage long sys_fchownat(int dfd, const char __user *filename, uid_t user,
 			     gid_t group, int flag);
 asmlinkage long sys_openat(int dfd, const char __user *filename, int flags,
-			   int mode);
+			   umode_t mode);
 asmlinkage long sys_newfstatat(int dfd, const char __user *filename,
 			       struct stat __user *statbuf, int flag);
 asmlinkage long sys_fstatat64(int dfd, const char __user *filename,
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 703cfa3..bb9127d 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -1038,7 +1038,7 @@
 	const char *procname;		/* Text ID for /proc/sys, or zero */
 	void *data;
 	int maxlen;
-	mode_t mode;
+	umode_t mode;
 	struct ctl_table *child;
 	struct ctl_table *parent;	/* Automatically set */
 	proc_handler *proc_handler;	/* Callback for text formatting */
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index dac0859..0010009 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -25,7 +25,7 @@
 
 struct attribute {
 	const char		*name;
-	mode_t			mode;
+	umode_t			mode;
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 	struct lock_class_key	*key;
 	struct lock_class_key	skey;
@@ -55,7 +55,7 @@
 
 struct attribute_group {
 	const char		*name;
-	mode_t			(*is_visible)(struct kobject *,
+	umode_t			(*is_visible)(struct kobject *,
 					      struct attribute *, int);
 	struct attribute	**attrs;
 };
@@ -133,7 +133,7 @@
 int __must_check sysfs_create_files(struct kobject *kobj,
 				   const struct attribute **attr);
 int __must_check sysfs_chmod_file(struct kobject *kobj,
-				  const struct attribute *attr, mode_t mode);
+				  const struct attribute *attr, umode_t mode);
 void sysfs_remove_file(struct kobject *kobj, const struct attribute *attr);
 void sysfs_remove_files(struct kobject *kobj, const struct attribute **attr);
 
@@ -221,7 +221,7 @@
 }
 
 static inline int sysfs_chmod_file(struct kobject *kobj,
-				   const struct attribute *attr, mode_t mode)
+				   const struct attribute *attr, umode_t mode)
 {
 	return 0;
 }
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 7f59ee9..46a85c9 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -238,6 +238,11 @@
 	u32	end_seq;
 };
 
+/*These are used to set the sack_ok field in struct tcp_options_received */
+#define TCP_SACK_SEEN     (1 << 0)   /*1 = peer is SACK capable, */
+#define TCP_FACK_ENABLED  (1 << 1)   /*1 = FACK is enabled locally*/
+#define TCP_DSACK_SEEN    (1 << 2)   /*1 = DSACK was received from peer*/
+
 struct tcp_options_received {
 /*	PAWS/RTTM data	*/
 	long	ts_recent_stamp;/* Time we stored ts_recent (for aging) */
diff --git a/include/linux/tick.h b/include/linux/tick.h
index b232ccc..ab8be90 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -7,6 +7,7 @@
 #define _LINUX_TICK_H
 
 #include <linux/clockchips.h>
+#include <linux/irqflags.h>
 
 #ifdef CONFIG_GENERIC_CLOCKEVENTS
 
@@ -121,14 +122,16 @@
 #endif /* !CONFIG_GENERIC_CLOCKEVENTS */
 
 # ifdef CONFIG_NO_HZ
-extern void tick_nohz_stop_sched_tick(int inidle);
-extern void tick_nohz_restart_sched_tick(void);
+extern void tick_nohz_idle_enter(void);
+extern void tick_nohz_idle_exit(void);
+extern void tick_nohz_irq_exit(void);
 extern ktime_t tick_nohz_get_sleep_length(void);
 extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
 extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time);
 # else
-static inline void tick_nohz_stop_sched_tick(int inidle) { }
-static inline void tick_nohz_restart_sched_tick(void) { }
+static inline void tick_nohz_idle_enter(void) { }
+static inline void tick_nohz_idle_exit(void) { }
+
 static inline ktime_t tick_nohz_get_sleep_length(void)
 {
 	ktime_t len = { .tv64 = NSEC_PER_SEC/HZ };
diff --git a/include/linux/types.h b/include/linux/types.h
index 57a9723..e5fa503 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -24,6 +24,7 @@
 typedef __kernel_dev_t		dev_t;
 typedef __kernel_ino_t		ino_t;
 typedef __kernel_mode_t		mode_t;
+typedef unsigned short		umode_t;
 typedef __kernel_nlink_t	nlink_t;
 typedef __kernel_off_t		off_t;
 typedef __kernel_pid_t		pid_t;
@@ -188,7 +189,7 @@
  * aligned_u64 should be used in defining kernel<->userspace ABIs to avoid
  * common 32/64-bit compat problems.
  * 64-bit values align to 4-byte boundaries on x86_32 (and possibly other
- * architectures) and to 8-byte boundaries on 64-bit architetures.  The new
+ * architectures) and to 8-byte boundaries on 64-bit architectures.  The new
  * aligned_64 type enforces 8-byte alignment so that structs containing
  * aligned_64 values have the same alignment on 32-bit and 64-bit architectures.
  * No conversions are necessary between 32-bit user-space and a 64-bit kernel.
diff --git a/include/linux/unix_diag.h b/include/linux/unix_diag.h
new file mode 100644
index 0000000..b1d2bf1
--- /dev/null
+++ b/include/linux/unix_diag.h
@@ -0,0 +1,54 @@
+#ifndef __UNIX_DIAG_H__
+#define __UNIX_DIAG_H__
+
+#include <linux/types.h>
+
+struct unix_diag_req {
+	__u8	sdiag_family;
+	__u8	sdiag_protocol;
+	__u16	pad;
+	__u32	udiag_states;
+	__u32	udiag_ino;
+	__u32	udiag_show;
+	__u32	udiag_cookie[2];
+};
+
+#define UDIAG_SHOW_NAME		0x00000001	/* show name (not path) */
+#define UDIAG_SHOW_VFS		0x00000002	/* show VFS inode info */
+#define UDIAG_SHOW_PEER		0x00000004	/* show peer socket info */
+#define UDIAG_SHOW_ICONS	0x00000008	/* show pending connections */
+#define UDIAG_SHOW_RQLEN	0x00000010	/* show skb receive queue len */
+#define UDIAG_SHOW_MEMINFO	0x00000020	/* show memory info of a socket */
+
+struct unix_diag_msg {
+	__u8	udiag_family;
+	__u8	udiag_type;
+	__u8	udiag_state;
+	__u8	pad;
+
+	__u32	udiag_ino;
+	__u32	udiag_cookie[2];
+};
+
+enum {
+	UNIX_DIAG_NAME,
+	UNIX_DIAG_VFS,
+	UNIX_DIAG_PEER,
+	UNIX_DIAG_ICONS,
+	UNIX_DIAG_RQLEN,
+	UNIX_DIAG_MEMINFO,
+
+	UNIX_DIAG_MAX,
+};
+
+struct unix_diag_vfs {
+	__u32	udiag_vfs_ino;
+	__u32	udiag_vfs_dev;
+};
+
+struct unix_diag_rqlen {
+	__u32	udiag_rqueue;
+	__u32	udiag_wqueue;
+};
+
+#endif
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 4269c3f..27a4e16 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -935,7 +935,7 @@
  */
 struct usb_class_driver {
 	char *name;
-	char *(*devnode)(struct device *dev, mode_t *mode);
+	char *(*devnode)(struct device *dev, umode_t *mode);
 	const struct file_operations *fops;
 	int minor_base;
 };
@@ -953,6 +953,18 @@
 
 extern void usb_deregister(struct usb_driver *);
 
+/**
+ * module_usb_driver() - Helper macro for registering a USB driver
+ * @__usb_driver: usb_driver struct
+ *
+ * Helper macro for USB drivers which do not do anything special in module
+ * init/exit. This eliminates a lot of boilerplate. Each module may only
+ * use this macro once, and calling it replaces module_init() and module_exit()
+ */
+#define module_usb_driver(__usb_driver) \
+	module_driver(__usb_driver, usb_register, \
+		       usb_deregister)
+
 extern int usb_register_device_driver(struct usb_device_driver *,
 			struct module *);
 extern void usb_deregister_device_driver(struct usb_device_driver *);
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index e9e72bd..5206d65 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -102,6 +102,10 @@
  *	vdev: the virtio_device
  *	This gives the final feature bits for the device: it can change
  *	the dev->feature bits if it wants.
+ * @bus_name: return the bus name associated with the device
+ *	vdev: the virtio_device
+ *      This returns a pointer to the bus name a la pci_name from which
+ *      the caller can then copy.
  */
 typedef void vq_callback_t(struct virtqueue *);
 struct virtio_config_ops {
@@ -119,6 +123,7 @@
 	void (*del_vqs)(struct virtio_device *);
 	u32 (*get_features)(struct virtio_device *vdev);
 	void (*finalize_features)(struct virtio_device *vdev);
+	const char *(*bus_name)(struct virtio_device *vdev);
 };
 
 /* If driver didn't advertise the feature, it will never appear. */
@@ -184,5 +189,14 @@
 		return ERR_PTR(err);
 	return vq;
 }
+
+static inline
+const char *virtio_bus_name(struct virtio_device *vdev)
+{
+	if (!vdev->config->bus_name)
+		return "virtio";
+	return vdev->config->bus_name(vdev);
+}
+
 #endif /* __KERNEL__ */
 #endif /* _LINUX_VIRTIO_CONFIG_H */
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 4bde182..dcdfc2b 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -131,6 +131,7 @@
  */
 extern rwlock_t vmlist_lock;
 extern struct vm_struct *vmlist;
+extern __init void vm_area_add_early(struct vm_struct *vm);
 extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
 
 #ifdef CONFIG_SMP
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 3efc9f3..a9ce45e 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -77,13 +77,13 @@
 #define __WAIT_BIT_KEY_INITIALIZER(word, bit)				\
 	{ .flags = word, .bit_nr = bit, }
 
-extern void __init_waitqueue_head(wait_queue_head_t *q, struct lock_class_key *);
+extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *);
 
 #define init_waitqueue_head(q)				\
 	do {						\
 		static struct lock_class_key __key;	\
 							\
-		__init_waitqueue_head((q), &__key);	\
+		__init_waitqueue_head((q), #q, &__key);	\
 	} while (0)
 
 #ifdef CONFIG_LOCKDEP
diff --git a/include/linux/wanrouter.h b/include/linux/wanrouter.h
index e0aa396..3157cc1 100644
--- a/include/linux/wanrouter.h
+++ b/include/linux/wanrouter.h
@@ -309,7 +309,7 @@
 #define WANOPT_EVEN	2
 
 /* CHDLC Protocol Options */
-/* DF Commmented out for now.
+/* DF Commented out for now.
 
 #define WANOPT_CHDLC_NO_DCD		IGNORE_DCD_FOR_LINK_STAT
 #define WANOPT_CHDLC_NO_CTS		IGNORE_CTS_FOR_LINK_STAT
diff --git a/include/linux/wl12xx.h b/include/linux/wl12xx.h
index 4b69739..0d63731 100644
--- a/include/linux/wl12xx.h
+++ b/include/linux/wl12xx.h
@@ -54,6 +54,9 @@
 	int board_ref_clock;
 	int board_tcxo_clock;
 	unsigned long platform_quirks;
+	bool pwr_in_suspend;
+
+	struct wl1271_if_operations *ops;
 };
 
 /* Platform does not support level trigger interrupts */
@@ -73,6 +76,6 @@
 
 #endif
 
-const struct wl12xx_platform_data *wl12xx_get_platform_data(void);
+struct wl12xx_platform_data *wl12xx_get_platform_data(void);
 
 #endif
diff --git a/include/media/soc_camera.h b/include/media/soc_camera.h
index b1377b9..5fb2c3d 100644
--- a/include/media/soc_camera.h
+++ b/include/media/soc_camera.h
@@ -254,7 +254,7 @@
 static inline struct video_device *soc_camera_i2c_to_vdev(const struct i2c_client *client)
 {
 	struct v4l2_subdev *sd = i2c_get_clientdata(client);
-	struct soc_camera_device *icd = (struct soc_camera_device *)sd->grp_id;
+	struct soc_camera_device *icd = v4l2_get_subdev_hostdata(sd);
 	return icd ? icd->vdev : NULL;
 }
 
@@ -279,6 +279,11 @@
 	return container_of(vq, struct soc_camera_device, vb_vidq);
 }
 
+static inline u32 soc_camera_grp_id(const struct soc_camera_device *icd)
+{
+	return (icd->iface << 8) | (icd->devnum + 1);
+}
+
 void soc_camera_lock(struct vb2_queue *vq);
 void soc_camera_unlock(struct vb2_queue *vq);
 
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index cbc6bb0..f68dce2 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -151,7 +151,8 @@
 			       const struct in6_addr *src_addr);
 extern int ipv6_is_mld(struct sk_buff *skb, int nexthdr);
 
-extern void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len);
+extern void addrconf_prefix_rcv(struct net_device *dev,
+				u8 *opt, int len, bool sllao);
 
 /*
  *	anycast prototypes (anycast.c)
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index 91ab5b0..5a4e29b 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -11,10 +11,13 @@
 extern void unix_gc(void);
 extern void wait_for_unix_gc(void);
 extern struct sock *unix_get_socket(struct file *filp);
+extern struct sock *unix_peer_get(struct sock *);
 
 #define UNIX_HASH_SIZE	256
 
 extern unsigned int unix_tot_inflight;
+extern spinlock_t unix_table_lock;
+extern struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1];
 
 struct unix_address {
 	atomic_t	refcnt;
@@ -63,6 +66,9 @@
 
 #define peer_wait peer_wq.wait
 
+long unix_inq_len(struct sock *sk);
+long unix_outq_len(struct sock *sk);
+
 #ifdef CONFIG_SYSCTL
 extern int unix_sysctl_register(struct net *net);
 extern void unix_sysctl_unregister(struct net *net);
diff --git a/include/net/arp.h b/include/net/arp.h
index 4979af8b..0013dc8 100644
--- a/include/net/arp.h
+++ b/include/net/arp.h
@@ -23,7 +23,7 @@
 
 	rcu_read_lock_bh();
 	nht = rcu_dereference_bh(tbl->nht);
-	hash_val = arp_hashfn(key, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
+	hash_val = arp_hashfn(key, dev, nht->hash_rnd[0]) >> (32 - nht->hash_shift);
 	for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
 	     n != NULL;
 	     n = rcu_dereference_bh(n->next)) {
diff --git a/include/net/atmclip.h b/include/net/atmclip.h
index 497ef64..5865924 100644
--- a/include/net/atmclip.h
+++ b/include/net/atmclip.h
@@ -15,7 +15,6 @@
 
 
 #define CLIP_VCC(vcc) ((struct clip_vcc *) ((vcc)->user_back))
-#define NEIGH2ENTRY(neigh) ((struct atmarp_entry *) (neigh)->primary_key)
 
 struct sk_buff;
 
@@ -36,24 +35,18 @@
 
 
 struct atmarp_entry {
-	__be32		ip;		/* IP address */
 	struct clip_vcc	*vccs;		/* active VCCs; NULL if resolution is
 					   pending */
 	unsigned long	expires;	/* entry expiration time */
 	struct neighbour *neigh;	/* neighbour back-pointer */
 };
 
-
 #define PRIV(dev) ((struct clip_priv *) netdev_priv(dev))
 
-
 struct clip_priv {
 	int number;			/* for convenience ... */
 	spinlock_t xoff_lock;		/* ensures that pop is atomic (SMP) */
 	struct net_device *next;	/* next CLIP interface */
 };
 
-
-extern struct neigh_table *clip_tbl_hook;
-
 #endif
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index e86af08..abaad6e 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -36,6 +36,11 @@
 #define PF_BLUETOOTH	AF_BLUETOOTH
 #endif
 
+/* Bluetooth versions */
+#define BLUETOOTH_VER_1_1	1
+#define BLUETOOTH_VER_1_2	2
+#define BLUETOOTH_VER_2_0	3
+
 /* Reserv for core and drivers use */
 #define BT_SKB_RESERVE	8
 
@@ -77,6 +82,33 @@
 #define BT_POWER_FORCE_ACTIVE_OFF 0
 #define BT_POWER_FORCE_ACTIVE_ON  1
 
+#define BT_CHANNEL_POLICY	10
+
+/* BR/EDR only (default policy)
+ *   AMP controllers cannot be used.
+ *   Channel move requests from the remote device are denied.
+ *   If the L2CAP channel is currently using AMP, move the channel to BR/EDR.
+ */
+#define BT_CHANNEL_POLICY_BREDR_ONLY		0
+
+/* BR/EDR Preferred
+ *   Allow use of AMP controllers.
+ *   If the L2CAP channel is currently on AMP, move it to BR/EDR.
+ *   Channel move requests from the remote device are allowed.
+ */
+#define BT_CHANNEL_POLICY_BREDR_PREFERRED	1
+
+/* AMP Preferred
+ *   Allow use of AMP controllers
+ *   If the L2CAP channel is currently on BR/EDR and AMP controller
+ *     resources are available, initiate a channel move to AMP.
+ *   Channel move requests from the remote device are allowed.
+ *   If the L2CAP socket has not been connected yet, try to create
+ *     and configure the channel directly on an AMP controller rather
+ *     than BR/EDR.
+ */
+#define BT_CHANNEL_POLICY_AMP_PREFERRED		2
+
 __printf(2, 3)
 int bt_printk(const char *level, const char *fmt, ...);
 
@@ -158,7 +190,7 @@
 	__u8 pkt_type;
 	__u8 incoming;
 	__u16 expect;
-	__u8 tx_seq;
+	__u16 tx_seq;
 	__u8 retries;
 	__u8 sar;
 	unsigned short channel;
@@ -218,32 +250,10 @@
 
 extern struct dentry *bt_debugfs;
 
-#ifdef CONFIG_BT_L2CAP
 int l2cap_init(void);
 void l2cap_exit(void);
-#else
-static inline int l2cap_init(void)
-{
-	return 0;
-}
 
-static inline void l2cap_exit(void)
-{
-}
-#endif
-
-#ifdef CONFIG_BT_SCO
 int sco_init(void);
 void sco_exit(void);
-#else
-static inline int sco_init(void)
-{
-	return 0;
-}
-
-static inline void sco_exit(void)
-{
-}
-#endif
 
 #endif /* __BLUETOOTH_H */
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index aaf79af..5b2fed5 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -88,6 +88,14 @@
 	HCI_RESET,
 };
 
+/*
+ * BR/EDR and/or LE controller flags: the flags defined here should represent
+ * states from the controller.
+ */
+enum {
+	HCI_LE_SCAN,
+};
+
 /* HCI ioctl defines */
 #define HCIDEVUP	_IOW('H', 201, int)
 #define HCIDEVDOWN	_IOW('H', 202, int)
@@ -202,6 +210,7 @@
 
 #define LMP_EV4		0x01
 #define LMP_EV5		0x02
+#define LMP_NO_BREDR	0x20
 #define LMP_LE		0x40
 
 #define LMP_SNIFF_SUBR	0x02
@@ -264,6 +273,17 @@
 #define HCI_LK_SMP_IRK			0x82
 #define HCI_LK_SMP_CSRK			0x83
 
+/* ---- HCI Error Codes ---- */
+#define HCI_ERROR_AUTH_FAILURE		0x05
+#define HCI_ERROR_REJ_BAD_ADDR		0x0f
+#define HCI_ERROR_REMOTE_USER_TERM	0x13
+#define HCI_ERROR_LOCAL_HOST_TERM	0x16
+#define HCI_ERROR_PAIRING_NOT_ALLOWED	0x18
+
+/* Flow control modes */
+#define HCI_FLOW_CTL_MODE_PACKET_BASED	0x00
+#define HCI_FLOW_CTL_MODE_BLOCK_BASED	0x01
+
 /* -----  HCI Commands ---- */
 #define HCI_OP_NOP			0x0000
 
@@ -446,6 +466,14 @@
 
 #define HCI_OP_USER_CONFIRM_NEG_REPLY	0x042d
 
+#define HCI_OP_USER_PASSKEY_REPLY		0x042e
+struct hci_cp_user_passkey_reply {
+	bdaddr_t bdaddr;
+	__le32	passkey;
+} __packed;
+
+#define HCI_OP_USER_PASSKEY_NEG_REPLY	0x042f
+
 #define HCI_OP_REMOTE_OOB_DATA_REPLY	0x0430
 struct hci_cp_remote_oob_data_reply {
 	bdaddr_t bdaddr;
@@ -662,6 +690,12 @@
 
 #define HCI_OP_READ_INQ_RSP_TX_POWER	0x0c58
 
+#define HCI_OP_READ_FLOW_CONTROL_MODE	0x0c66
+struct hci_rp_read_flow_control_mode {
+	__u8     status;
+	__u8     mode;
+} __packed;
+
 #define HCI_OP_WRITE_LE_HOST_SUPPORTED	0x0c6d
 struct hci_cp_write_le_host_supported {
 	__u8 le;
@@ -716,6 +750,14 @@
 	bdaddr_t bdaddr;
 } __packed;
 
+#define HCI_OP_READ_DATA_BLOCK_SIZE	0x100a
+struct hci_rp_read_data_block_size {
+	__u8     status;
+	__le16   max_acl_len;
+	__le16   block_len;
+	__le16   num_blocks;
+} __packed;
+
 #define HCI_OP_WRITE_PAGE_SCAN_ACTIVITY	0x0c1c
 struct hci_cp_write_page_scan_activity {
 	__le16   interval;
@@ -726,6 +768,21 @@
 	#define PAGE_SCAN_TYPE_STANDARD		0x00
 	#define PAGE_SCAN_TYPE_INTERLACED	0x01
 
+#define HCI_OP_READ_LOCAL_AMP_INFO	0x1409
+struct hci_rp_read_local_amp_info {
+	__u8     status;
+	__u8     amp_status;
+	__le32   total_bw;
+	__le32   max_bw;
+	__le32   min_latency;
+	__le32   max_pdu;
+	__u8     amp_type;
+	__le16   pal_cap;
+	__le16   max_assoc_size;
+	__le32   max_flush_to;
+	__le32   be_flush_to;
+} __packed;
+
 #define HCI_OP_LE_SET_EVENT_MASK	0x2001
 struct hci_cp_le_set_event_mask {
 	__u8     mask[8];
@@ -738,6 +795,18 @@
 	__u8     le_max_pkt;
 } __packed;
 
+#define HCI_OP_LE_SET_SCAN_PARAM	0x200b
+struct hci_cp_le_set_scan_param {
+	__u8    type;
+	__le16  interval;
+	__le16  window;
+	__u8    own_address_type;
+	__u8    filter_policy;
+} __packed;
+
+#define LE_SCANNING_DISABLED		0x00
+#define LE_SCANNING_ENABLED		0x01
+
 #define HCI_OP_LE_SET_SCAN_ENABLE	0x200c
 struct hci_cp_le_set_scan_enable {
 	__u8     enable;
@@ -913,9 +982,14 @@
 } __packed;
 
 #define HCI_EV_NUM_COMP_PKTS		0x13
+struct hci_comp_pkts_info {
+	__le16   handle;
+	__le16   count;
+} __packed;
+
 struct hci_ev_num_comp_pkts {
 	__u8     num_hndl;
-	/* variable length part */
+	struct hci_comp_pkts_info handles[0];
 } __packed;
 
 #define HCI_EV_MODE_CHANGE		0x14
@@ -1054,6 +1128,11 @@
 	__le32		passkey;
 } __packed;
 
+#define HCI_EV_USER_PASSKEY_REQUEST	0x34
+struct hci_ev_user_passkey_req {
+	bdaddr_t	bdaddr;
+} __packed;
+
 #define HCI_EV_REMOTE_OOB_DATA_REQUEST	0x35
 struct hci_ev_remote_oob_data_request {
 	bdaddr_t bdaddr;
@@ -1309,4 +1388,6 @@
 };
 #define IREQ_CACHE_FLUSH 0x0001
 
+extern int enable_hs;
+
 #endif /* __HCI_H */
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 3779ea3..5e2e984 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -28,9 +28,8 @@
 #include <linux/interrupt.h>
 #include <net/bluetooth/hci.h>
 
-/* HCI upper protocols */
-#define HCI_PROTO_L2CAP	0
-#define HCI_PROTO_SCO	1
+/* HCI priority */
+#define HCI_PRIO_MAX	7
 
 /* HCI Core structures */
 struct inquiry_data {
@@ -51,14 +50,12 @@
 };
 
 struct inquiry_cache {
-	spinlock_t		lock;
 	__u32			timestamp;
 	struct inquiry_entry	*list;
 };
 
 struct hci_conn_hash {
 	struct list_head list;
-	spinlock_t       lock;
 	unsigned int     acl_num;
 	unsigned int     sco_num;
 	unsigned int     le_num;
@@ -115,7 +112,7 @@
 #define NUM_REASSEMBLY 4
 struct hci_dev {
 	struct list_head list;
-	spinlock_t	lock;
+	struct mutex	lock;
 	atomic_t	refcnt;
 
 	char		name[8];
@@ -150,6 +147,19 @@
 	__u16		sniff_min_interval;
 	__u16		sniff_max_interval;
 
+	__u8		amp_status;
+	__u32		amp_total_bw;
+	__u32		amp_max_bw;
+	__u32		amp_min_latency;
+	__u32		amp_max_pdu;
+	__u8		amp_type;
+	__u16		amp_pal_cap;
+	__u16		amp_assoc_size;
+	__u32		amp_max_flush_to;
+	__u32		amp_be_flush_to;
+
+	__u8		flow_ctl_mode;
+
 	unsigned int	auto_accept_delay;
 
 	unsigned long	quirks;
@@ -166,6 +176,11 @@
 	unsigned int	sco_pkts;
 	unsigned int	le_pkts;
 
+	__u16		block_len;
+	__u16		block_mtu;
+	__u16		num_blocks;
+	__u16		block_cnt;
+
 	unsigned long	acl_last_tx;
 	unsigned long	sco_last_tx;
 	unsigned long	le_last_tx;
@@ -173,13 +188,18 @@
 	struct workqueue_struct	*workqueue;
 
 	struct work_struct	power_on;
-	struct work_struct	power_off;
-	struct timer_list	off_timer;
+	struct delayed_work	power_off;
+
+	__u16			discov_timeout;
+	struct delayed_work	discov_off;
+
+	struct delayed_work	service_cache;
 
 	struct timer_list	cmd_timer;
-	struct tasklet_struct	cmd_task;
-	struct tasklet_struct	rx_task;
-	struct tasklet_struct	tx_task;
+
+	struct work_struct	rx_work;
+	struct work_struct	cmd_work;
+	struct work_struct	tx_work;
 
 	struct sk_buff_head	rx_q;
 	struct sk_buff_head	raw_q;
@@ -195,6 +215,8 @@
 
 	__u16			init_last_cmd;
 
+	struct list_head	mgmt_pending;
+
 	struct inquiry_cache	inq_cache;
 	struct hci_conn_hash	conn_hash;
 	struct list_head	blacklist;
@@ -206,7 +228,7 @@
 	struct list_head	remote_oob_data;
 
 	struct list_head	adv_entries;
-	struct timer_list	adv_timer;
+	struct delayed_work	adv_work;
 
 	struct hci_dev_stats	stat;
 
@@ -226,6 +248,8 @@
 
 	struct module		*owner;
 
+	unsigned long		dev_flags;
+
 	int (*open)(struct hci_dev *hdev);
 	int (*close)(struct hci_dev *hdev);
 	int (*flush)(struct hci_dev *hdev);
@@ -273,20 +297,19 @@
 	unsigned int	sent;
 
 	struct sk_buff_head data_q;
+	struct list_head chan_list;
 
-	struct timer_list disc_timer;
+	struct delayed_work disc_work;
 	struct timer_list idle_timer;
 	struct timer_list auto_accept_timer;
 
-	struct work_struct work_add;
-	struct work_struct work_del;
-
 	struct device	dev;
 	atomic_t	devref;
 
 	struct hci_dev	*hdev;
 	void		*l2cap_data;
 	void		*sco_data;
+	void		*smp_conn;
 
 	struct hci_conn	*link;
 
@@ -295,25 +318,39 @@
 	void (*disconn_cfm_cb)	(struct hci_conn *conn, u8 reason);
 };
 
-extern struct hci_proto *hci_proto[];
+struct hci_chan {
+	struct list_head list;
+
+	struct hci_conn *conn;
+	struct sk_buff_head data_q;
+	unsigned int	sent;
+};
+
 extern struct list_head hci_dev_list;
 extern struct list_head hci_cb_list;
 extern rwlock_t hci_dev_list_lock;
 extern rwlock_t hci_cb_list_lock;
 
+/* ----- HCI interface to upper protocols ----- */
+extern int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr);
+extern int l2cap_connect_cfm(struct hci_conn *hcon, u8 status);
+extern int l2cap_disconn_ind(struct hci_conn *hcon);
+extern int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason);
+extern int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt);
+extern int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags);
+
+extern int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr);
+extern int sco_connect_cfm(struct hci_conn *hcon, __u8 status);
+extern int sco_disconn_cfm(struct hci_conn *hcon, __u8 reason);
+extern int sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb);
+
 /* ----- Inquiry cache ----- */
 #define INQUIRY_CACHE_AGE_MAX   (HZ*30)   /* 30 seconds */
 #define INQUIRY_ENTRY_AGE_MAX   (HZ*60)   /* 60 seconds */
 
-#define inquiry_cache_lock(c)		spin_lock(&c->lock)
-#define inquiry_cache_unlock(c)		spin_unlock(&c->lock)
-#define inquiry_cache_lock_bh(c)	spin_lock_bh(&c->lock)
-#define inquiry_cache_unlock_bh(c)	spin_unlock_bh(&c->lock)
-
 static inline void inquiry_cache_init(struct hci_dev *hdev)
 {
 	struct inquiry_cache *c = &hdev->inq_cache;
-	spin_lock_init(&c->lock);
 	c->list = NULL;
 }
 
@@ -353,15 +390,15 @@
 {
 	struct hci_conn_hash *h = &hdev->conn_hash;
 	INIT_LIST_HEAD(&h->list);
-	spin_lock_init(&h->lock);
 	h->acl_num = 0;
 	h->sco_num = 0;
+	h->le_num = 0;
 }
 
 static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
 {
 	struct hci_conn_hash *h = &hdev->conn_hash;
-	list_add(&c->list, &h->list);
+	list_add_rcu(&c->list, &h->list);
 	switch (c->type) {
 	case ACL_LINK:
 		h->acl_num++;
@@ -379,7 +416,10 @@
 static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c)
 {
 	struct hci_conn_hash *h = &hdev->conn_hash;
-	list_del(&c->list);
+
+	list_del_rcu(&c->list);
+	synchronize_rcu();
+
 	switch (c->type) {
 	case ACL_LINK:
 		h->acl_num--;
@@ -414,14 +454,18 @@
 								__u16 handle)
 {
 	struct hci_conn_hash *h = &hdev->conn_hash;
-	struct list_head *p;
 	struct hci_conn  *c;
 
-	list_for_each(p, &h->list) {
-		c = list_entry(p, struct hci_conn, list);
-		if (c->handle == handle)
+	rcu_read_lock();
+
+	list_for_each_entry_rcu(c, &h->list, list) {
+		if (c->handle == handle) {
+			rcu_read_unlock();
 			return c;
+		}
 	}
+	rcu_read_unlock();
+
 	return NULL;
 }
 
@@ -429,14 +473,19 @@
 							__u8 type, bdaddr_t *ba)
 {
 	struct hci_conn_hash *h = &hdev->conn_hash;
-	struct list_head *p;
 	struct hci_conn  *c;
 
-	list_for_each(p, &h->list) {
-		c = list_entry(p, struct hci_conn, list);
-		if (c->type == type && !bacmp(&c->dst, ba))
+	rcu_read_lock();
+
+	list_for_each_entry_rcu(c, &h->list, list) {
+		if (c->type == type && !bacmp(&c->dst, ba)) {
+			rcu_read_unlock();
 			return c;
+		}
 	}
+
+	rcu_read_unlock();
+
 	return NULL;
 }
 
@@ -444,14 +493,19 @@
 							__u8 type, __u16 state)
 {
 	struct hci_conn_hash *h = &hdev->conn_hash;
-	struct list_head *p;
 	struct hci_conn  *c;
 
-	list_for_each(p, &h->list) {
-		c = list_entry(p, struct hci_conn, list);
-		if (c->type == type && c->state == state)
+	rcu_read_lock();
+
+	list_for_each_entry_rcu(c, &h->list, list) {
+		if (c->type == type && c->state == state) {
+			rcu_read_unlock();
 			return c;
+		}
 	}
+
+	rcu_read_unlock();
+
 	return NULL;
 }
 
@@ -466,6 +520,10 @@
 void hci_conn_hash_flush(struct hci_dev *hdev);
 void hci_conn_check_pending(struct hci_dev *hdev);
 
+struct hci_chan *hci_chan_create(struct hci_conn *conn);
+int hci_chan_del(struct hci_chan *chan);
+void hci_chan_list_flush(struct hci_conn *conn);
+
 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
 						__u8 sec_level, __u8 auth_type);
 int hci_conn_check_link_mode(struct hci_conn *conn);
@@ -475,7 +533,6 @@
 int hci_conn_switch_role(struct hci_conn *conn, __u8 role);
 
 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active);
-void hci_conn_enter_sniff_mode(struct hci_conn *conn);
 
 void hci_conn_hold_device(struct hci_conn *conn);
 void hci_conn_put_device(struct hci_conn *conn);
@@ -483,7 +540,7 @@
 static inline void hci_conn_hold(struct hci_conn *conn)
 {
 	atomic_inc(&conn->refcnt);
-	del_timer(&conn->disc_timer);
+	cancel_delayed_work_sync(&conn->disc_work);
 }
 
 static inline void hci_conn_put(struct hci_conn *conn)
@@ -502,7 +559,9 @@
 		} else {
 			timeo = msecs_to_jiffies(10);
 		}
-		mod_timer(&conn->disc_timer, jiffies + timeo);
+		cancel_delayed_work_sync(&conn->disc_work);
+		queue_delayed_work(conn->hdev->workqueue,
+					&conn->disc_work, jiffies + timeo);
 	}
 }
 
@@ -534,10 +593,8 @@
 	try_module_get(d->owner) ? __hci_dev_hold(d) : NULL;	\
 })
 
-#define hci_dev_lock(d)		spin_lock(&d->lock)
-#define hci_dev_unlock(d)	spin_unlock(&d->lock)
-#define hci_dev_lock_bh(d)	spin_lock_bh(&d->lock)
-#define hci_dev_unlock_bh(d)	spin_unlock_bh(&d->lock)
+#define hci_dev_lock(d)		mutex_lock(&d->lock)
+#define hci_dev_unlock(d)	mutex_unlock(&d->lock)
 
 struct hci_dev *hci_dev_get(int index);
 struct hci_dev *hci_get_route(bdaddr_t *src, bdaddr_t *dst);
@@ -545,7 +602,7 @@
 struct hci_dev *hci_alloc_dev(void);
 void hci_free_dev(struct hci_dev *hdev);
 int hci_register_dev(struct hci_dev *hdev);
-int hci_unregister_dev(struct hci_dev *hdev);
+void hci_unregister_dev(struct hci_dev *hdev);
 int hci_suspend_dev(struct hci_dev *hdev);
 int hci_resume_dev(struct hci_dev *hdev);
 int hci_dev_open(__u16 dev);
@@ -599,8 +656,9 @@
 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count);
 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count);
 
-int hci_register_sysfs(struct hci_dev *hdev);
-void hci_unregister_sysfs(struct hci_dev *hdev);
+void hci_init_sysfs(struct hci_dev *hdev);
+int hci_add_sysfs(struct hci_dev *hdev);
+void hci_del_sysfs(struct hci_dev *hdev);
 void hci_conn_init_sysfs(struct hci_conn *conn);
 void hci_conn_add_sysfs(struct hci_conn *conn);
 void hci_conn_del_sysfs(struct hci_conn *conn);
@@ -621,53 +679,40 @@
 #define lmp_host_le_capable(dev)   ((dev)->extfeatures[0] & LMP_HOST_LE)
 
 /* ----- HCI protocols ----- */
-struct hci_proto {
-	char		*name;
-	unsigned int	id;
-	unsigned long	flags;
-
-	void		*priv;
-
-	int (*connect_ind)	(struct hci_dev *hdev, bdaddr_t *bdaddr,
-								__u8 type);
-	int (*connect_cfm)	(struct hci_conn *conn, __u8 status);
-	int (*disconn_ind)	(struct hci_conn *conn);
-	int (*disconn_cfm)	(struct hci_conn *conn, __u8 reason);
-	int (*recv_acldata)	(struct hci_conn *conn, struct sk_buff *skb,
-								__u16 flags);
-	int (*recv_scodata)	(struct hci_conn *conn, struct sk_buff *skb);
-	int (*security_cfm)	(struct hci_conn *conn, __u8 status,
-								__u8 encrypt);
-};
-
 static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr,
 								__u8 type)
 {
-	register struct hci_proto *hp;
-	int mask = 0;
+	switch (type) {
+	case ACL_LINK:
+		return l2cap_connect_ind(hdev, bdaddr);
 
-	hp = hci_proto[HCI_PROTO_L2CAP];
-	if (hp && hp->connect_ind)
-		mask |= hp->connect_ind(hdev, bdaddr, type);
+	case SCO_LINK:
+	case ESCO_LINK:
+		return sco_connect_ind(hdev, bdaddr);
 
-	hp = hci_proto[HCI_PROTO_SCO];
-	if (hp && hp->connect_ind)
-		mask |= hp->connect_ind(hdev, bdaddr, type);
-
-	return mask;
+	default:
+		BT_ERR("unknown link type %d", type);
+		return -EINVAL;
+	}
 }
 
 static inline void hci_proto_connect_cfm(struct hci_conn *conn, __u8 status)
 {
-	register struct hci_proto *hp;
+	switch (conn->type) {
+	case ACL_LINK:
+	case LE_LINK:
+		l2cap_connect_cfm(conn, status);
+		break;
 
-	hp = hci_proto[HCI_PROTO_L2CAP];
-	if (hp && hp->connect_cfm)
-		hp->connect_cfm(conn, status);
+	case SCO_LINK:
+	case ESCO_LINK:
+		sco_connect_cfm(conn, status);
+		break;
 
-	hp = hci_proto[HCI_PROTO_SCO];
-	if (hp && hp->connect_cfm)
-		hp->connect_cfm(conn, status);
+	default:
+		BT_ERR("unknown link type %d", conn->type);
+		break;
+	}
 
 	if (conn->connect_cfm_cb)
 		conn->connect_cfm_cb(conn, status);
@@ -675,31 +720,29 @@
 
 static inline int hci_proto_disconn_ind(struct hci_conn *conn)
 {
-	register struct hci_proto *hp;
-	int reason = 0x13;
+	if (conn->type != ACL_LINK && conn->type != LE_LINK)
+		return HCI_ERROR_REMOTE_USER_TERM;
 
-	hp = hci_proto[HCI_PROTO_L2CAP];
-	if (hp && hp->disconn_ind)
-		reason = hp->disconn_ind(conn);
-
-	hp = hci_proto[HCI_PROTO_SCO];
-	if (hp && hp->disconn_ind)
-		reason = hp->disconn_ind(conn);
-
-	return reason;
+	return l2cap_disconn_ind(conn);
 }
 
 static inline void hci_proto_disconn_cfm(struct hci_conn *conn, __u8 reason)
 {
-	register struct hci_proto *hp;
+	switch (conn->type) {
+	case ACL_LINK:
+	case LE_LINK:
+		l2cap_disconn_cfm(conn, reason);
+		break;
 
-	hp = hci_proto[HCI_PROTO_L2CAP];
-	if (hp && hp->disconn_cfm)
-		hp->disconn_cfm(conn, reason);
+	case SCO_LINK:
+	case ESCO_LINK:
+		sco_disconn_cfm(conn, reason);
+		break;
 
-	hp = hci_proto[HCI_PROTO_SCO];
-	if (hp && hp->disconn_cfm)
-		hp->disconn_cfm(conn, reason);
+	default:
+		BT_ERR("unknown link type %d", conn->type);
+		break;
+	}
 
 	if (conn->disconn_cfm_cb)
 		conn->disconn_cfm_cb(conn, reason);
@@ -707,21 +750,16 @@
 
 static inline void hci_proto_auth_cfm(struct hci_conn *conn, __u8 status)
 {
-	register struct hci_proto *hp;
 	__u8 encrypt;
 
+	if (conn->type != ACL_LINK && conn->type != LE_LINK)
+		return;
+
 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
 		return;
 
 	encrypt = (conn->link_mode & HCI_LM_ENCRYPT) ? 0x01 : 0x00;
-
-	hp = hci_proto[HCI_PROTO_L2CAP];
-	if (hp && hp->security_cfm)
-		hp->security_cfm(conn, status, encrypt);
-
-	hp = hci_proto[HCI_PROTO_SCO];
-	if (hp && hp->security_cfm)
-		hp->security_cfm(conn, status, encrypt);
+	l2cap_security_cfm(conn, status, encrypt);
 
 	if (conn->security_cfm_cb)
 		conn->security_cfm_cb(conn, status);
@@ -730,23 +768,15 @@
 static inline void hci_proto_encrypt_cfm(struct hci_conn *conn, __u8 status,
 								__u8 encrypt)
 {
-	register struct hci_proto *hp;
+	if (conn->type != ACL_LINK && conn->type != LE_LINK)
+		return;
 
-	hp = hci_proto[HCI_PROTO_L2CAP];
-	if (hp && hp->security_cfm)
-		hp->security_cfm(conn, status, encrypt);
-
-	hp = hci_proto[HCI_PROTO_SCO];
-	if (hp && hp->security_cfm)
-		hp->security_cfm(conn, status, encrypt);
+	l2cap_security_cfm(conn, status, encrypt);
 
 	if (conn->security_cfm_cb)
 		conn->security_cfm_cb(conn, status);
 }
 
-int hci_register_proto(struct hci_proto *hproto);
-int hci_unregister_proto(struct hci_proto *hproto);
-
 /* ----- HCI callbacks ----- */
 struct hci_cb {
 	struct list_head list;
@@ -771,13 +801,13 @@
 
 	encrypt = (conn->link_mode & HCI_LM_ENCRYPT) ? 0x01 : 0x00;
 
-	read_lock_bh(&hci_cb_list_lock);
+	read_lock(&hci_cb_list_lock);
 	list_for_each(p, &hci_cb_list) {
 		struct hci_cb *cb = list_entry(p, struct hci_cb, list);
 		if (cb->security_cfm)
 			cb->security_cfm(conn, status, encrypt);
 	}
-	read_unlock_bh(&hci_cb_list_lock);
+	read_unlock(&hci_cb_list_lock);
 }
 
 static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status,
@@ -793,26 +823,26 @@
 
 	hci_proto_encrypt_cfm(conn, status, encrypt);
 
-	read_lock_bh(&hci_cb_list_lock);
+	read_lock(&hci_cb_list_lock);
 	list_for_each(p, &hci_cb_list) {
 		struct hci_cb *cb = list_entry(p, struct hci_cb, list);
 		if (cb->security_cfm)
 			cb->security_cfm(conn, status, encrypt);
 	}
-	read_unlock_bh(&hci_cb_list_lock);
+	read_unlock(&hci_cb_list_lock);
 }
 
 static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status)
 {
 	struct list_head *p;
 
-	read_lock_bh(&hci_cb_list_lock);
+	read_lock(&hci_cb_list_lock);
 	list_for_each(p, &hci_cb_list) {
 		struct hci_cb *cb = list_entry(p, struct hci_cb, list);
 		if (cb->key_change_cfm)
 			cb->key_change_cfm(conn, status);
 	}
-	read_unlock_bh(&hci_cb_list_lock);
+	read_unlock(&hci_cb_list_lock);
 }
 
 static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status,
@@ -820,13 +850,13 @@
 {
 	struct list_head *p;
 
-	read_lock_bh(&hci_cb_list_lock);
+	read_lock(&hci_cb_list_lock);
 	list_for_each(p, &hci_cb_list) {
 		struct hci_cb *cb = list_entry(p, struct hci_cb, list);
 		if (cb->role_switch_cfm)
 			cb->role_switch_cfm(conn, status, role);
 	}
-	read_unlock_bh(&hci_cb_list_lock);
+	read_unlock(&hci_cb_list_lock);
 }
 
 int hci_register_cb(struct hci_cb *hcb);
@@ -836,7 +866,7 @@
 int hci_unregister_notifier(struct notifier_block *nb);
 
 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param);
-void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
+void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags);
 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb);
 
 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode);
@@ -849,44 +879,63 @@
 
 /* Management interface */
 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t len);
-int mgmt_index_added(u16 index);
-int mgmt_index_removed(u16 index);
-int mgmt_powered(u16 index, u8 powered);
-int mgmt_discoverable(u16 index, u8 discoverable);
-int mgmt_connectable(u16 index, u8 connectable);
-int mgmt_new_key(u16 index, struct link_key *key, u8 persistent);
-int mgmt_connected(u16 index, bdaddr_t *bdaddr, u8 link_type);
-int mgmt_disconnected(u16 index, bdaddr_t *bdaddr);
-int mgmt_disconnect_failed(u16 index);
-int mgmt_connect_failed(u16 index, bdaddr_t *bdaddr, u8 status);
-int mgmt_pin_code_request(u16 index, bdaddr_t *bdaddr, u8 secure);
-int mgmt_pin_code_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status);
-int mgmt_pin_code_neg_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status);
-int mgmt_user_confirm_request(u16 index, bdaddr_t *bdaddr, __le32 value,
-							u8 confirm_hint);
-int mgmt_user_confirm_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status);
-int mgmt_user_confirm_neg_reply_complete(u16 index, bdaddr_t *bdaddr,
+int mgmt_index_added(struct hci_dev *hdev);
+int mgmt_index_removed(struct hci_dev *hdev);
+int mgmt_powered(struct hci_dev *hdev, u8 powered);
+int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable);
+int mgmt_connectable(struct hci_dev *hdev, u8 connectable);
+int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status);
+int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
+								u8 persistent);
+int mgmt_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+								u8 addr_type);
+int mgmt_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+								u8 addr_type);
+int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 status);
+int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+						u8 addr_type, u8 status);
+int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure);
+int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
 								u8 status);
-int mgmt_auth_failed(u16 index, bdaddr_t *bdaddr, u8 status);
-int mgmt_set_local_name_complete(u16 index, u8 *name, u8 status);
-int mgmt_read_local_oob_data_reply_complete(u16 index, u8 *hash, u8 *randomizer,
+int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
 								u8 status);
-int mgmt_device_found(u16 index, bdaddr_t *bdaddr, u8 *dev_class, s8 rssi,
-								u8 *eir);
-int mgmt_remote_name(u16 index, bdaddr_t *bdaddr, u8 *name);
-int mgmt_discovering(u16 index, u8 discovering);
-int mgmt_device_blocked(u16 index, bdaddr_t *bdaddr);
-int mgmt_device_unblocked(u16 index, bdaddr_t *bdaddr);
+int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
+						__le32 value, u8 confirm_hint);
+int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+								u8 status);
+int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev,
+						bdaddr_t *bdaddr, u8 status);
+int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr);
+int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+								u8 status);
+int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev,
+						bdaddr_t *bdaddr, u8 status);
+int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 status);
+int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status);
+int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
+						u8 *randomizer, u8 status);
+int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+				u8 addr_type, u8 *dev_class, s8 rssi, u8 *eir);
+int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *name);
+int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status);
+int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status);
+int mgmt_discovering(struct hci_dev *hdev, u8 discovering);
+int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr);
+int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr);
 
 /* HCI info for socket */
 #define hci_pi(sk) ((struct hci_pinfo *) sk)
 
+/* HCI socket flags */
+#define HCI_PI_MGMT_INIT	0
+
 struct hci_pinfo {
 	struct bt_sock    bt;
 	struct hci_dev    *hdev;
 	struct hci_filter filter;
 	__u32             cmsg_mask;
 	unsigned short   channel;
+	unsigned long     flags;
 };
 
 /* HCI security filter */
@@ -915,4 +964,7 @@
 void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16]);
 void hci_le_ltk_neg_reply(struct hci_conn *conn);
 
+int hci_do_inquiry(struct hci_dev *hdev, u8 length);
+int hci_cancel_inquiry(struct hci_dev *hdev);
+
 #endif /* __HCI_CORE_H */
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index 6cc18f3..68f5891 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -27,17 +27,23 @@
 #ifndef __L2CAP_H
 #define __L2CAP_H
 
+#include <asm/unaligned.h>
+
 /* L2CAP defaults */
 #define L2CAP_DEFAULT_MTU		672
 #define L2CAP_DEFAULT_MIN_MTU		48
 #define L2CAP_DEFAULT_FLUSH_TO		0xffff
 #define L2CAP_DEFAULT_TX_WINDOW		63
+#define L2CAP_DEFAULT_EXT_WINDOW	0x3FFF
 #define L2CAP_DEFAULT_MAX_TX		3
 #define L2CAP_DEFAULT_RETRANS_TO	2000    /* 2 seconds */
 #define L2CAP_DEFAULT_MONITOR_TO	12000   /* 12 seconds */
 #define L2CAP_DEFAULT_MAX_PDU_SIZE	1009    /* Sized for 3-DH5 packet */
 #define L2CAP_DEFAULT_ACK_TO		200
 #define L2CAP_LE_DEFAULT_MTU		23
+#define L2CAP_DEFAULT_MAX_SDU_SIZE	0xFFFF
+#define L2CAP_DEFAULT_SDU_ITIME		0xFFFFFFFF
+#define L2CAP_DEFAULT_ACC_LAT		0xFFFFFFFF
 
 #define L2CAP_DISC_TIMEOUT             (100)
 #define L2CAP_DISC_REJ_TIMEOUT         (5000)  /*  5 seconds */
@@ -91,52 +97,82 @@
 #define L2CAP_ECHO_RSP		0x09
 #define L2CAP_INFO_REQ		0x0a
 #define L2CAP_INFO_RSP		0x0b
+#define L2CAP_CREATE_CHAN_REQ	0x0c
+#define L2CAP_CREATE_CHAN_RSP	0x0d
+#define L2CAP_MOVE_CHAN_REQ	0x0e
+#define L2CAP_MOVE_CHAN_RSP	0x0f
+#define L2CAP_MOVE_CHAN_CFM	0x10
+#define L2CAP_MOVE_CHAN_CFM_RSP	0x11
 #define L2CAP_CONN_PARAM_UPDATE_REQ	0x12
 #define L2CAP_CONN_PARAM_UPDATE_RSP	0x13
 
-/* L2CAP feature mask */
+/* L2CAP extended feature mask */
 #define L2CAP_FEAT_FLOWCTL	0x00000001
 #define L2CAP_FEAT_RETRANS	0x00000002
+#define L2CAP_FEAT_BIDIR_QOS	0x00000004
 #define L2CAP_FEAT_ERTM		0x00000008
 #define L2CAP_FEAT_STREAMING	0x00000010
 #define L2CAP_FEAT_FCS		0x00000020
+#define L2CAP_FEAT_EXT_FLOW	0x00000040
 #define L2CAP_FEAT_FIXED_CHAN	0x00000080
+#define L2CAP_FEAT_EXT_WINDOW	0x00000100
+#define L2CAP_FEAT_UCD		0x00000200
 
 /* L2CAP checksum option */
 #define L2CAP_FCS_NONE		0x00
 #define L2CAP_FCS_CRC16		0x01
 
-/* L2CAP Control Field bit masks */
-#define L2CAP_CTRL_SAR               0xC000
-#define L2CAP_CTRL_REQSEQ            0x3F00
-#define L2CAP_CTRL_TXSEQ             0x007E
-#define L2CAP_CTRL_RETRANS           0x0080
-#define L2CAP_CTRL_FINAL             0x0080
-#define L2CAP_CTRL_POLL              0x0010
-#define L2CAP_CTRL_SUPERVISE         0x000C
-#define L2CAP_CTRL_FRAME_TYPE        0x0001 /* I- or S-Frame */
+/* L2CAP fixed channels */
+#define L2CAP_FC_L2CAP		0x02
+#define L2CAP_FC_A2MP		0x08
 
-#define L2CAP_CTRL_TXSEQ_SHIFT      1
-#define L2CAP_CTRL_REQSEQ_SHIFT     8
-#define L2CAP_CTRL_SAR_SHIFT       14
+/* L2CAP Control Field bit masks */
+#define L2CAP_CTRL_SAR			0xC000
+#define L2CAP_CTRL_REQSEQ		0x3F00
+#define L2CAP_CTRL_TXSEQ		0x007E
+#define L2CAP_CTRL_SUPERVISE		0x000C
+
+#define L2CAP_CTRL_RETRANS		0x0080
+#define L2CAP_CTRL_FINAL		0x0080
+#define L2CAP_CTRL_POLL			0x0010
+#define L2CAP_CTRL_FRAME_TYPE		0x0001 /* I- or S-Frame */
+
+#define L2CAP_CTRL_TXSEQ_SHIFT		1
+#define L2CAP_CTRL_SUPER_SHIFT		2
+#define L2CAP_CTRL_REQSEQ_SHIFT		8
+#define L2CAP_CTRL_SAR_SHIFT		14
+
+/* L2CAP Extended Control Field bit mask */
+#define L2CAP_EXT_CTRL_TXSEQ		0xFFFC0000
+#define L2CAP_EXT_CTRL_SAR		0x00030000
+#define L2CAP_EXT_CTRL_SUPERVISE	0x00030000
+#define L2CAP_EXT_CTRL_REQSEQ		0x0000FFFC
+
+#define L2CAP_EXT_CTRL_POLL		0x00040000
+#define L2CAP_EXT_CTRL_FINAL		0x00000002
+#define L2CAP_EXT_CTRL_FRAME_TYPE	0x00000001 /* I- or S-Frame */
+
+#define L2CAP_EXT_CTRL_REQSEQ_SHIFT	2
+#define L2CAP_EXT_CTRL_SAR_SHIFT	16
+#define L2CAP_EXT_CTRL_SUPER_SHIFT	16
+#define L2CAP_EXT_CTRL_TXSEQ_SHIFT	18
 
 /* L2CAP Supervisory Function */
-#define L2CAP_SUPER_RCV_READY           0x0000
-#define L2CAP_SUPER_REJECT              0x0004
-#define L2CAP_SUPER_RCV_NOT_READY       0x0008
-#define L2CAP_SUPER_SELECT_REJECT       0x000C
+#define L2CAP_SUPER_RR		0x00
+#define L2CAP_SUPER_REJ		0x01
+#define L2CAP_SUPER_RNR		0x02
+#define L2CAP_SUPER_SREJ	0x03
 
 /* L2CAP Segmentation and Reassembly */
-#define L2CAP_SDU_UNSEGMENTED       0x0000
-#define L2CAP_SDU_START             0x4000
-#define L2CAP_SDU_END               0x8000
-#define L2CAP_SDU_CONTINUE          0xC000
+#define L2CAP_SAR_UNSEGMENTED	0x00
+#define L2CAP_SAR_START		0x01
+#define L2CAP_SAR_END		0x02
+#define L2CAP_SAR_CONTINUE	0x03
 
 /* L2CAP Command rej. reasons */
-#define L2CAP_REJ_NOT_UNDERSTOOD      0x0000
-#define L2CAP_REJ_MTU_EXCEEDED        0x0001
-#define L2CAP_REJ_INVALID_CID         0x0002
-
+#define L2CAP_REJ_NOT_UNDERSTOOD	0x0000
+#define L2CAP_REJ_MTU_EXCEEDED		0x0001
+#define L2CAP_REJ_INVALID_CID		0x0002
 
 /* L2CAP structures */
 struct l2cap_hdr {
@@ -144,6 +180,12 @@
 	__le16     cid;
 } __packed;
 #define L2CAP_HDR_SIZE		4
+#define L2CAP_ENH_HDR_SIZE	6
+#define L2CAP_EXT_HDR_SIZE	8
+
+#define L2CAP_FCS_SIZE		2
+#define L2CAP_SDULEN_SIZE	2
+#define L2CAP_PSMLEN_SIZE	2
 
 struct l2cap_cmd_hdr {
 	__u8       code;
@@ -188,14 +230,15 @@
 #define L2CAP_CID_DYN_START	0x0040
 #define L2CAP_CID_DYN_END	0xffff
 
-/* connect result */
+/* connect/create channel results */
 #define L2CAP_CR_SUCCESS	0x0000
 #define L2CAP_CR_PEND		0x0001
 #define L2CAP_CR_BAD_PSM	0x0002
 #define L2CAP_CR_SEC_BLOCK	0x0003
 #define L2CAP_CR_NO_MEM		0x0004
+#define L2CAP_CR_BAD_AMP	0x0005
 
-/* connect status */
+/* connect/create channel status */
 #define L2CAP_CS_NO_INFO	0x0000
 #define L2CAP_CS_AUTHEN_PEND	0x0001
 #define L2CAP_CS_AUTHOR_PEND	0x0002
@@ -217,6 +260,8 @@
 #define L2CAP_CONF_UNACCEPT	0x0001
 #define L2CAP_CONF_REJECT	0x0002
 #define L2CAP_CONF_UNKNOWN	0x0003
+#define L2CAP_CONF_PENDING	0x0004
+#define L2CAP_CONF_EFS_REJECT	0x0005
 
 struct l2cap_conf_opt {
 	__u8       type;
@@ -233,6 +278,8 @@
 #define L2CAP_CONF_QOS		0x03
 #define L2CAP_CONF_RFC		0x04
 #define L2CAP_CONF_FCS		0x05
+#define L2CAP_CONF_EFS		0x06
+#define L2CAP_CONF_EWS		0x07
 
 #define L2CAP_CONF_MAX_SIZE	22
 
@@ -251,6 +298,21 @@
 #define L2CAP_MODE_ERTM		0x03
 #define L2CAP_MODE_STREAMING	0x04
 
+struct l2cap_conf_efs {
+	__u8	id;
+	__u8	stype;
+	__le16	msdu;
+	__le32	sdu_itime;
+	__le32	acc_lat;
+	__le32	flush_to;
+} __packed;
+
+#define L2CAP_SERV_NOTRAFIC	0x00
+#define L2CAP_SERV_BESTEFFORT	0x01
+#define L2CAP_SERV_GUARANTEED	0x02
+
+#define L2CAP_BESTEFFORT_ID	0x01
+
 struct l2cap_disconn_req {
 	__le16     dcid;
 	__le16     scid;
@@ -271,14 +333,57 @@
 	__u8        data[0];
 } __packed;
 
+struct l2cap_create_chan_req {
+	__le16      psm;
+	__le16      scid;
+	__u8        amp_id;
+} __packed;
+
+struct l2cap_create_chan_rsp {
+	__le16      dcid;
+	__le16      scid;
+	__le16      result;
+	__le16      status;
+} __packed;
+
+struct l2cap_move_chan_req {
+	__le16      icid;
+	__u8        dest_amp_id;
+} __packed;
+
+struct l2cap_move_chan_rsp {
+	__le16      icid;
+	__le16      result;
+} __packed;
+
+#define L2CAP_MR_SUCCESS	0x0000
+#define L2CAP_MR_PEND		0x0001
+#define L2CAP_MR_BAD_ID		0x0002
+#define L2CAP_MR_SAME_ID	0x0003
+#define L2CAP_MR_NOT_SUPP	0x0004
+#define L2CAP_MR_COLLISION	0x0005
+#define L2CAP_MR_NOT_ALLOWED	0x0006
+
+struct l2cap_move_chan_cfm {
+	__le16      icid;
+	__le16      result;
+} __packed;
+
+#define L2CAP_MC_CONFIRMED	0x0000
+#define L2CAP_MC_UNCONFIRMED	0x0001
+
+struct l2cap_move_chan_cfm_rsp {
+	__le16      icid;
+} __packed;
+
 /* info type */
-#define L2CAP_IT_CL_MTU     0x0001
-#define L2CAP_IT_FEAT_MASK  0x0002
-#define L2CAP_IT_FIXED_CHAN 0x0003
+#define L2CAP_IT_CL_MTU		0x0001
+#define L2CAP_IT_FEAT_MASK	0x0002
+#define L2CAP_IT_FIXED_CHAN	0x0003
 
 /* info result */
-#define L2CAP_IR_SUCCESS    0x0000
-#define L2CAP_IR_NOTSUPP    0x0001
+#define L2CAP_IR_SUCCESS	0x0000
+#define L2CAP_IR_NOTSUPP	0x0001
 
 struct l2cap_conn_param_update_req {
 	__le16      min;
@@ -297,7 +402,7 @@
 
 /* ----- L2CAP channels and connections ----- */
 struct srej_list {
-	__u8	tx_seq;
+	__u16	tx_seq;
 	struct list_head list;
 };
 
@@ -319,14 +424,11 @@
 	__u16		flush_to;
 	__u8		mode;
 	__u8		chan_type;
+	__u8		chan_policy;
 
 	__le16		sport;
 
 	__u8		sec_level;
-	__u8		role_switch;
-	__u8		force_reliable;
-	__u8		flushable;
-	__u8		force_active;
 
 	__u8		ident;
 
@@ -337,7 +439,8 @@
 
 	__u8		fcs;
 
-	__u8		tx_win;
+	__u16		tx_win;
+	__u16		tx_win_max;
 	__u8		max_tx;
 	__u16		retrans_timeout;
 	__u16		monitor_timeout;
@@ -345,29 +448,45 @@
 
 	unsigned long	conf_state;
 	unsigned long	conn_state;
+	unsigned long	flags;
 
-	__u8		next_tx_seq;
-	__u8		expected_ack_seq;
-	__u8		expected_tx_seq;
-	__u8		buffer_seq;
-	__u8		buffer_seq_srej;
-	__u8		srej_save_reqseq;
-	__u8		frames_sent;
-	__u8		unacked_frames;
+	__u16		next_tx_seq;
+	__u16		expected_ack_seq;
+	__u16		expected_tx_seq;
+	__u16		buffer_seq;
+	__u16		buffer_seq_srej;
+	__u16		srej_save_reqseq;
+	__u16		frames_sent;
+	__u16		unacked_frames;
 	__u8		retry_count;
 	__u8		num_acked;
 	__u16		sdu_len;
 	struct sk_buff	*sdu;
 	struct sk_buff	*sdu_last_frag;
 
-	__u8		remote_tx_win;
+	__u16		remote_tx_win;
 	__u8		remote_max_tx;
 	__u16		remote_mps;
 
-	struct timer_list	chan_timer;
-	struct timer_list	retrans_timer;
-	struct timer_list	monitor_timer;
-	struct timer_list	ack_timer;
+	__u8		local_id;
+	__u8		local_stype;
+	__u16		local_msdu;
+	__u32		local_sdu_itime;
+	__u32		local_acc_lat;
+	__u32		local_flush_to;
+
+	__u8		remote_id;
+	__u8		remote_stype;
+	__u16		remote_msdu;
+	__u32		remote_sdu_itime;
+	__u32		remote_acc_lat;
+	__u32		remote_flush_to;
+
+	struct delayed_work	chan_timer;
+	struct delayed_work	retrans_timer;
+	struct delayed_work	monitor_timer;
+	struct delayed_work	ack_timer;
+
 	struct sk_buff		*tx_send_head;
 	struct sk_buff_head	tx_q;
 	struct sk_buff_head	srej_q;
@@ -391,6 +510,7 @@
 
 struct l2cap_conn {
 	struct hci_conn	*hcon;
+	struct hci_chan	*hchan;
 
 	bdaddr_t	*dst;
 	bdaddr_t	*src;
@@ -402,7 +522,7 @@
 	__u8		info_state;
 	__u8		info_ident;
 
-	struct timer_list info_timer;
+	struct delayed_work info_timer;
 
 	spinlock_t	lock;
 
@@ -412,11 +532,11 @@
 
 	__u8		disc_reason;
 
-	struct timer_list security_timer;
+	struct delayed_work  security_timer;
 	struct smp_chan *smp_chan;
 
 	struct list_head chan_l;
-	rwlock_t	chan_lock;
+	struct mutex	chan_lock;
 };
 
 #define L2CAP_INFO_CL_MTU_REQ_SENT	0x01
@@ -445,6 +565,9 @@
 	CONF_CONNECT_PEND,
 	CONF_NO_FCS_RECV,
 	CONF_STATE2_DEVICE,
+	CONF_EWS_RECV,
+	CONF_LOC_CONF_PEND,
+	CONF_REM_CONF_PEND,
 };
 
 #define L2CAP_CONF_MAX_CONF_REQ 2
@@ -462,6 +585,44 @@
 	CONN_RNR_SENT,
 };
 
+/* Definitions for flags in l2cap_chan */
+enum {
+	FLAG_ROLE_SWITCH,
+	FLAG_FORCE_ACTIVE,
+	FLAG_FORCE_RELIABLE,
+	FLAG_FLUSHABLE,
+	FLAG_EXT_CTRL,
+	FLAG_EFS_ENABLE,
+};
+
+static inline void l2cap_chan_hold(struct l2cap_chan *c)
+{
+	atomic_inc(&c->refcnt);
+}
+
+static inline void l2cap_chan_put(struct l2cap_chan *c)
+{
+	if (atomic_dec_and_test(&c->refcnt))
+		kfree(c);
+}
+
+static inline void l2cap_set_timer(struct l2cap_chan *chan,
+					struct delayed_work *work, long timeout)
+{
+	BT_DBG("chan %p state %d timeout %ld", chan, chan->state, timeout);
+
+	if (!__cancel_delayed_work(work))
+		l2cap_chan_hold(chan);
+	schedule_delayed_work(work, timeout);
+}
+
+static inline void l2cap_clear_timer(struct l2cap_chan *chan,
+					struct delayed_work *work)
+{
+	if (__cancel_delayed_work(work))
+		l2cap_chan_put(chan);
+}
+
 #define __set_chan_timer(c, t) l2cap_set_timer(c, &c->chan_timer, (t))
 #define __clear_chan_timer(c) l2cap_clear_timer(c, &c->chan_timer)
 #define __set_retrans_timer(c) l2cap_set_timer(c, &c->retrans_timer, \
@@ -474,6 +635,22 @@
 		L2CAP_DEFAULT_ACK_TO);
 #define __clear_ack_timer(c) l2cap_clear_timer(c, &c->ack_timer)
 
+static inline int __seq_offset(struct l2cap_chan *chan, __u16 seq1, __u16 seq2)
+{
+	int offset;
+
+	offset = (seq1 - seq2) % (chan->tx_win_max + 1);
+	if (offset < 0)
+		offset += (chan->tx_win_max + 1);
+
+	return offset;
+}
+
+static inline __u16 __next_seq(struct l2cap_chan *chan, __u16 seq)
+{
+	return (seq + 1) % (chan->tx_win_max + 1);
+}
+
 static inline int l2cap_tx_window_full(struct l2cap_chan *ch)
 {
 	int sub;
@@ -486,13 +663,164 @@
 	return sub == ch->remote_tx_win;
 }
 
-#define __get_txseq(ctrl)	(((ctrl) & L2CAP_CTRL_TXSEQ) >> 1)
-#define __get_reqseq(ctrl)	(((ctrl) & L2CAP_CTRL_REQSEQ) >> 8)
-#define __is_iframe(ctrl)	(!((ctrl) & L2CAP_CTRL_FRAME_TYPE))
-#define __is_sframe(ctrl)	((ctrl) & L2CAP_CTRL_FRAME_TYPE)
-#define __is_sar_start(ctrl)	(((ctrl) & L2CAP_CTRL_SAR) == L2CAP_SDU_START)
+static inline __u16 __get_reqseq(struct l2cap_chan *chan, __u32 ctrl)
+{
+	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+		return (ctrl & L2CAP_EXT_CTRL_REQSEQ) >>
+						L2CAP_EXT_CTRL_REQSEQ_SHIFT;
+	else
+		return (ctrl & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
+}
 
-extern int disable_ertm;
+static inline __u32 __set_reqseq(struct l2cap_chan *chan, __u32 reqseq)
+{
+	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+		return (reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT) &
+							L2CAP_EXT_CTRL_REQSEQ;
+	else
+		return (reqseq << L2CAP_CTRL_REQSEQ_SHIFT) & L2CAP_CTRL_REQSEQ;
+}
+
+static inline __u16 __get_txseq(struct l2cap_chan *chan, __u32 ctrl)
+{
+	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+		return (ctrl & L2CAP_EXT_CTRL_TXSEQ) >>
+						L2CAP_EXT_CTRL_TXSEQ_SHIFT;
+	else
+		return (ctrl & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
+}
+
+static inline __u32 __set_txseq(struct l2cap_chan *chan, __u32 txseq)
+{
+	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+		return (txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT) &
+							L2CAP_EXT_CTRL_TXSEQ;
+	else
+		return (txseq << L2CAP_CTRL_TXSEQ_SHIFT) & L2CAP_CTRL_TXSEQ;
+}
+
+static inline bool __is_sframe(struct l2cap_chan *chan, __u32 ctrl)
+{
+	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+		return ctrl & L2CAP_EXT_CTRL_FRAME_TYPE;
+	else
+		return ctrl & L2CAP_CTRL_FRAME_TYPE;
+}
+
+static inline __u32 __set_sframe(struct l2cap_chan *chan)
+{
+	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+		return L2CAP_EXT_CTRL_FRAME_TYPE;
+	else
+		return L2CAP_CTRL_FRAME_TYPE;
+}
+
+static inline __u8 __get_ctrl_sar(struct l2cap_chan *chan, __u32 ctrl)
+{
+	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+		return (ctrl & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
+	else
+		return (ctrl & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
+}
+
+static inline __u32 __set_ctrl_sar(struct l2cap_chan *chan, __u32 sar)
+{
+	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+		return (sar << L2CAP_EXT_CTRL_SAR_SHIFT) & L2CAP_EXT_CTRL_SAR;
+	else
+		return (sar << L2CAP_CTRL_SAR_SHIFT) & L2CAP_CTRL_SAR;
+}
+
+static inline bool __is_sar_start(struct l2cap_chan *chan, __u32 ctrl)
+{
+	return __get_ctrl_sar(chan, ctrl) == L2CAP_SAR_START;
+}
+
+static inline __u32 __get_sar_mask(struct l2cap_chan *chan)
+{
+	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+		return L2CAP_EXT_CTRL_SAR;
+	else
+		return L2CAP_CTRL_SAR;
+}
+
+static inline __u8 __get_ctrl_super(struct l2cap_chan *chan, __u32 ctrl)
+{
+	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+		return (ctrl & L2CAP_EXT_CTRL_SUPERVISE) >>
+						L2CAP_EXT_CTRL_SUPER_SHIFT;
+	else
+		return (ctrl & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
+}
+
+static inline __u32 __set_ctrl_super(struct l2cap_chan *chan, __u32 super)
+{
+	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+		return (super << L2CAP_EXT_CTRL_SUPER_SHIFT) &
+						L2CAP_EXT_CTRL_SUPERVISE;
+	else
+		return (super << L2CAP_CTRL_SUPER_SHIFT) &
+							L2CAP_CTRL_SUPERVISE;
+}
+
+static inline __u32 __set_ctrl_final(struct l2cap_chan *chan)
+{
+	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+		return L2CAP_EXT_CTRL_FINAL;
+	else
+		return L2CAP_CTRL_FINAL;
+}
+
+static inline bool __is_ctrl_final(struct l2cap_chan *chan, __u32 ctrl)
+{
+	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+		return ctrl & L2CAP_EXT_CTRL_FINAL;
+	else
+		return ctrl & L2CAP_CTRL_FINAL;
+}
+
+static inline __u32 __set_ctrl_poll(struct l2cap_chan *chan)
+{
+	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+		return L2CAP_EXT_CTRL_POLL;
+	else
+		return L2CAP_CTRL_POLL;
+}
+
+static inline bool __is_ctrl_poll(struct l2cap_chan *chan, __u32 ctrl)
+{
+	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+		return ctrl & L2CAP_EXT_CTRL_POLL;
+	else
+		return ctrl & L2CAP_CTRL_POLL;
+}
+
+static inline __u32 __get_control(struct l2cap_chan *chan, void *p)
+{
+	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+		return get_unaligned_le32(p);
+	else
+		return get_unaligned_le16(p);
+}
+
+static inline void __put_control(struct l2cap_chan *chan, __u32 control,
+								void *p)
+{
+	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+		return put_unaligned_le32(control, p);
+	else
+		return put_unaligned_le16(control, p);
+}
+
+static inline __u8 __ctrl_size(struct l2cap_chan *chan)
+{
+	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+		return L2CAP_EXT_HDR_SIZE - L2CAP_HDR_SIZE;
+	else
+		return L2CAP_ENH_HDR_SIZE - L2CAP_HDR_SIZE;
+}
+
+extern bool disable_ertm;
 
 int l2cap_init_sockets(void);
 void l2cap_cleanup_sockets(void);
@@ -506,8 +834,11 @@
 struct l2cap_chan *l2cap_chan_create(struct sock *sk);
 void l2cap_chan_close(struct l2cap_chan *chan, int reason);
 void l2cap_chan_destroy(struct l2cap_chan *chan);
-int l2cap_chan_connect(struct l2cap_chan *chan);
-int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len);
+inline int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
+								bdaddr_t *dst);
+int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
+								u32 priority);
 void l2cap_chan_busy(struct l2cap_chan *chan, int busy);
+int l2cap_chan_check_security(struct l2cap_chan *chan);
 
 #endif /* __L2CAP_H */
diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
index d66da0f..be65d34 100644
--- a/include/net/bluetooth/mgmt.h
+++ b/include/net/bluetooth/mgmt.h
@@ -23,6 +23,23 @@
 
 #define MGMT_INDEX_NONE			0xFFFF
 
+#define MGMT_STATUS_SUCCESS		0x00
+#define MGMT_STATUS_UNKNOWN_COMMAND	0x01
+#define MGMT_STATUS_NOT_CONNECTED	0x02
+#define MGMT_STATUS_FAILED		0x03
+#define MGMT_STATUS_CONNECT_FAILED	0x04
+#define MGMT_STATUS_AUTH_FAILED		0x05
+#define MGMT_STATUS_NOT_PAIRED		0x06
+#define MGMT_STATUS_NO_RESOURCES	0x07
+#define MGMT_STATUS_TIMEOUT		0x08
+#define MGMT_STATUS_ALREADY_CONNECTED	0x09
+#define MGMT_STATUS_BUSY		0x0a
+#define MGMT_STATUS_REJECTED		0x0b
+#define MGMT_STATUS_NOT_SUPPORTED	0x0c
+#define MGMT_STATUS_INVALID_PARAMS	0x0d
+#define MGMT_STATUS_DISCONNECTED	0x0e
+#define MGMT_STATUS_NOT_POWERED		0x0f
+
 struct mgmt_hdr {
 	__le16 opcode;
 	__le16 index;
@@ -44,22 +61,29 @@
 /* Reserve one extra byte for names in management messages so that they
  * are always guaranteed to be nul-terminated */
 #define MGMT_MAX_NAME_LENGTH		(HCI_MAX_NAME_LENGTH + 1)
+#define MGMT_MAX_SHORT_NAME_LENGTH	(10 + 1)
+
+#define MGMT_SETTING_POWERED		0x00000001
+#define MGMT_SETTING_CONNECTABLE	0x00000002
+#define MGMT_SETTING_FAST_CONNECTABLE	0x00000004
+#define MGMT_SETTING_DISCOVERABLE	0x00000008
+#define MGMT_SETTING_PAIRABLE		0x00000010
+#define MGMT_SETTING_LINK_SECURITY	0x00000020
+#define MGMT_SETTING_SSP		0x00000040
+#define MGMT_SETTING_BREDR		0x00000080
+#define MGMT_SETTING_HS			0x00000100
+#define MGMT_SETTING_LE			0x00000200
 
 #define MGMT_OP_READ_INFO		0x0004
 struct mgmt_rp_read_info {
-	__u8 type;
-	__u8 powered;
-	__u8 connectable;
-	__u8 discoverable;
-	__u8 pairable;
-	__u8 sec_mode;
 	bdaddr_t bdaddr;
+	__u8 version;
+	__le16 manufacturer;
+	__le32 supported_settings;
+	__le32 current_settings;
 	__u8 dev_class[3];
-	__u8 features[8];
-	__u16 manufacturer;
-	__u8 hci_ver;
-	__u16 hci_rev;
 	__u8 name[MGMT_MAX_NAME_LENGTH];
+	__u8 short_name[MGMT_MAX_SHORT_NAME_LENGTH];
 } __packed;
 
 struct mgmt_mode {
@@ -69,70 +93,97 @@
 #define MGMT_OP_SET_POWERED		0x0005
 
 #define MGMT_OP_SET_DISCOVERABLE	0x0006
+struct mgmt_cp_set_discoverable {
+	__u8 val;
+	__u16 timeout;
+} __packed;
 
 #define MGMT_OP_SET_CONNECTABLE		0x0007
 
-#define MGMT_OP_SET_PAIRABLE		0x0008
+#define MGMT_OP_SET_FAST_CONNECTABLE	0x0008
 
-#define MGMT_OP_ADD_UUID		0x0009
-struct mgmt_cp_add_uuid {
-	__u8 uuid[16];
-	__u8 svc_hint;
-} __packed;
+#define MGMT_OP_SET_PAIRABLE		0x0009
 
-#define MGMT_OP_REMOVE_UUID		0x000A
-struct mgmt_cp_remove_uuid {
-	__u8 uuid[16];
-} __packed;
+#define MGMT_OP_SET_LINK_SECURITY	0x000A
 
-#define MGMT_OP_SET_DEV_CLASS		0x000B
+#define MGMT_OP_SET_SSP			0x000B
+
+#define MGMT_OP_SET_HS			0x000C
+
+#define MGMT_OP_SET_LE			0x000D
+
+#define MGMT_OP_SET_DEV_CLASS		0x000E
 struct mgmt_cp_set_dev_class {
 	__u8 major;
 	__u8 minor;
 } __packed;
 
-#define MGMT_OP_SET_SERVICE_CACHE	0x000C
-struct mgmt_cp_set_service_cache {
-	__u8 enable;
+#define MGMT_OP_SET_LOCAL_NAME		0x000F
+struct mgmt_cp_set_local_name {
+	__u8 name[MGMT_MAX_NAME_LENGTH];
 } __packed;
 
-struct mgmt_key_info {
+#define MGMT_OP_ADD_UUID		0x0010
+struct mgmt_cp_add_uuid {
+	__u8 uuid[16];
+	__u8 svc_hint;
+} __packed;
+
+#define MGMT_OP_REMOVE_UUID		0x0011
+struct mgmt_cp_remove_uuid {
+	__u8 uuid[16];
+} __packed;
+
+struct mgmt_link_key_info {
 	bdaddr_t bdaddr;
 	u8 type;
 	u8 val[16];
 	u8 pin_len;
-	u8 dlen;
-	u8 data[0];
 } __packed;
 
-#define MGMT_OP_LOAD_KEYS		0x000D
-struct mgmt_cp_load_keys {
+#define MGMT_OP_LOAD_LINK_KEYS		0x0012
+struct mgmt_cp_load_link_keys {
 	__u8 debug_keys;
 	__le16 key_count;
-	struct mgmt_key_info keys[0];
+	struct mgmt_link_key_info keys[0];
 } __packed;
 
-#define MGMT_OP_REMOVE_KEY		0x000E
-struct mgmt_cp_remove_key {
+#define MGMT_OP_REMOVE_KEYS		0x0013
+struct mgmt_cp_remove_keys {
 	bdaddr_t bdaddr;
 	__u8 disconnect;
 } __packed;
+struct mgmt_rp_remove_keys {
+	bdaddr_t bdaddr;
+	__u8 status;
+};
 
-#define MGMT_OP_DISCONNECT		0x000F
+#define MGMT_OP_DISCONNECT		0x0014
 struct mgmt_cp_disconnect {
 	bdaddr_t bdaddr;
 } __packed;
 struct mgmt_rp_disconnect {
 	bdaddr_t bdaddr;
+	__u8 status;
 } __packed;
 
-#define MGMT_OP_GET_CONNECTIONS		0x0010
+#define MGMT_ADDR_BREDR			0x00
+#define MGMT_ADDR_LE_PUBLIC		0x01
+#define MGMT_ADDR_LE_RANDOM		0x02
+#define MGMT_ADDR_INVALID		0xff
+
+struct mgmt_addr_info {
+	bdaddr_t bdaddr;
+	__u8 type;
+} __packed;
+
+#define MGMT_OP_GET_CONNECTIONS		0x0015
 struct mgmt_rp_get_connections {
 	__le16 conn_count;
-	bdaddr_t conn[0];
+	struct mgmt_addr_info addr[0];
 } __packed;
 
-#define MGMT_OP_PIN_CODE_REPLY		0x0011
+#define MGMT_OP_PIN_CODE_REPLY		0x0016
 struct mgmt_cp_pin_code_reply {
 	bdaddr_t bdaddr;
 	__u8 pin_len;
@@ -143,27 +194,27 @@
 	uint8_t status;
 } __packed;
 
-#define MGMT_OP_PIN_CODE_NEG_REPLY	0x0012
+#define MGMT_OP_PIN_CODE_NEG_REPLY	0x0017
 struct mgmt_cp_pin_code_neg_reply {
 	bdaddr_t bdaddr;
 } __packed;
 
-#define MGMT_OP_SET_IO_CAPABILITY	0x0013
+#define MGMT_OP_SET_IO_CAPABILITY	0x0018
 struct mgmt_cp_set_io_capability {
 	__u8 io_capability;
 } __packed;
 
-#define MGMT_OP_PAIR_DEVICE		0x0014
+#define MGMT_OP_PAIR_DEVICE		0x0019
 struct mgmt_cp_pair_device {
-	bdaddr_t bdaddr;
+	struct mgmt_addr_info addr;
 	__u8 io_cap;
 } __packed;
 struct mgmt_rp_pair_device {
-	bdaddr_t bdaddr;
+	struct mgmt_addr_info addr;
 	__u8 status;
 } __packed;
 
-#define MGMT_OP_USER_CONFIRM_REPLY	0x0015
+#define MGMT_OP_USER_CONFIRM_REPLY	0x001A
 struct mgmt_cp_user_confirm_reply {
 	bdaddr_t bdaddr;
 } __packed;
@@ -172,50 +223,71 @@
 	__u8 status;
 } __packed;
 
-#define MGMT_OP_USER_CONFIRM_NEG_REPLY	0x0016
-
-#define MGMT_OP_SET_LOCAL_NAME		0x0017
-struct mgmt_cp_set_local_name {
-	__u8 name[MGMT_MAX_NAME_LENGTH];
+#define MGMT_OP_USER_CONFIRM_NEG_REPLY	0x001B
+struct mgmt_cp_user_confirm_neg_reply {
+	bdaddr_t bdaddr;
 } __packed;
 
-#define MGMT_OP_READ_LOCAL_OOB_DATA	0x0018
+#define MGMT_OP_USER_PASSKEY_REPLY	0x001C
+struct mgmt_cp_user_passkey_reply {
+	bdaddr_t bdaddr;
+	__le32 passkey;
+} __packed;
+struct mgmt_rp_user_passkey_reply {
+	bdaddr_t bdaddr;
+	__u8 status;
+} __packed;
+
+#define MGMT_OP_USER_PASSKEY_NEG_REPLY	0x001D
+struct mgmt_cp_user_passkey_neg_reply {
+	bdaddr_t bdaddr;
+} __packed;
+
+#define MGMT_OP_READ_LOCAL_OOB_DATA	0x001E
 struct mgmt_rp_read_local_oob_data {
 	__u8 hash[16];
 	__u8 randomizer[16];
 } __packed;
 
-#define MGMT_OP_ADD_REMOTE_OOB_DATA	0x0019
+#define MGMT_OP_ADD_REMOTE_OOB_DATA	0x001F
 struct mgmt_cp_add_remote_oob_data {
 	bdaddr_t bdaddr;
 	__u8 hash[16];
 	__u8 randomizer[16];
 } __packed;
 
-#define MGMT_OP_REMOVE_REMOTE_OOB_DATA	0x001A
+#define MGMT_OP_REMOVE_REMOTE_OOB_DATA	0x0020
 struct mgmt_cp_remove_remote_oob_data {
 	bdaddr_t bdaddr;
 } __packed;
 
-#define MGMT_OP_START_DISCOVERY		0x001B
+#define MGMT_OP_START_DISCOVERY		0x0021
+struct mgmt_cp_start_discovery {
+	__u8 type;
+} __packed;
 
-#define MGMT_OP_STOP_DISCOVERY		0x001C
+#define MGMT_OP_STOP_DISCOVERY		0x0022
 
-#define MGMT_OP_BLOCK_DEVICE		0x001D
+#define MGMT_OP_CONFIRM_NAME		0x0023
+struct mgmt_cp_confirm_name {
+	bdaddr_t bdaddr;
+	__u8 name_known;
+} __packed;
+struct mgmt_rp_confirm_name {
+	bdaddr_t bdaddr;
+	__u8 status;
+} __packed;
+
+#define MGMT_OP_BLOCK_DEVICE		0x0024
 struct mgmt_cp_block_device {
 	bdaddr_t bdaddr;
 } __packed;
 
-#define MGMT_OP_UNBLOCK_DEVICE		0x001E
+#define MGMT_OP_UNBLOCK_DEVICE		0x0025
 struct mgmt_cp_unblock_device {
 	bdaddr_t bdaddr;
 } __packed;
 
-#define MGMT_OP_SET_FAST_CONNECTABLE	0x001F
-struct mgmt_cp_set_fast_connectable {
-	__u8 enable;
-} __packed;
-
 #define MGMT_EV_CMD_COMPLETE		0x0001
 struct mgmt_ev_cmd_complete {
 	__le16 opcode;
@@ -237,83 +309,82 @@
 
 #define MGMT_EV_INDEX_REMOVED		0x0005
 
-#define MGMT_EV_POWERED			0x0006
+#define MGMT_EV_NEW_SETTINGS		0x0006
 
-#define MGMT_EV_DISCOVERABLE		0x0007
+#define MGMT_EV_CLASS_OF_DEV_CHANGED	0x0007
+struct mgmt_ev_class_of_dev_changed {
+	__u8 dev_class[3];
+};
 
-#define MGMT_EV_CONNECTABLE		0x0008
+#define MGMT_EV_LOCAL_NAME_CHANGED	0x0008
+struct mgmt_ev_local_name_changed {
+	__u8 name[MGMT_MAX_NAME_LENGTH];
+	__u8 short_name[MGMT_MAX_SHORT_NAME_LENGTH];
+} __packed;
 
-#define MGMT_EV_PAIRABLE		0x0009
-
-#define MGMT_EV_NEW_KEY			0x000A
-struct mgmt_ev_new_key {
+#define MGMT_EV_NEW_LINK_KEY		0x0009
+struct mgmt_ev_new_link_key {
 	__u8 store_hint;
-	struct mgmt_key_info key;
+	struct mgmt_link_key_info key;
 } __packed;
 
-#define MGMT_EV_CONNECTED		0x000B
-struct mgmt_ev_connected {
-	bdaddr_t bdaddr;
-	__u8 link_type;
-} __packed;
+#define MGMT_EV_CONNECTED		0x000A
 
-#define MGMT_EV_DISCONNECTED		0x000C
-struct mgmt_ev_disconnected {
-	bdaddr_t bdaddr;
-} __packed;
+#define MGMT_EV_DISCONNECTED		0x000B
 
-#define MGMT_EV_CONNECT_FAILED		0x000D
+#define MGMT_EV_CONNECT_FAILED		0x000C
 struct mgmt_ev_connect_failed {
-	bdaddr_t bdaddr;
+	struct mgmt_addr_info addr;
 	__u8 status;
 } __packed;
 
-#define MGMT_EV_PIN_CODE_REQUEST	0x000E
+#define MGMT_EV_PIN_CODE_REQUEST	0x000D
 struct mgmt_ev_pin_code_request {
 	bdaddr_t bdaddr;
 	__u8 secure;
 } __packed;
 
-#define MGMT_EV_USER_CONFIRM_REQUEST	0x000F
+#define MGMT_EV_USER_CONFIRM_REQUEST	0x000E
 struct mgmt_ev_user_confirm_request {
 	bdaddr_t bdaddr;
 	__u8 confirm_hint;
 	__le32 value;
 } __packed;
 
+#define MGMT_EV_USER_PASSKEY_REQUEST	0x000F
+struct mgmt_ev_user_passkey_request {
+	bdaddr_t bdaddr;
+} __packed;
+
 #define MGMT_EV_AUTH_FAILED		0x0010
 struct mgmt_ev_auth_failed {
 	bdaddr_t bdaddr;
 	__u8 status;
 } __packed;
 
-#define MGMT_EV_LOCAL_NAME_CHANGED	0x0011
-struct mgmt_ev_local_name_changed {
-	__u8 name[MGMT_MAX_NAME_LENGTH];
-} __packed;
-
-#define MGMT_EV_DEVICE_FOUND		0x0012
+#define MGMT_EV_DEVICE_FOUND		0x0011
 struct mgmt_ev_device_found {
-	bdaddr_t bdaddr;
+	struct mgmt_addr_info addr;
 	__u8 dev_class[3];
 	__s8 rssi;
+	__u8 confirm_name;
 	__u8 eir[HCI_MAX_EIR_LENGTH];
 } __packed;
 
-#define MGMT_EV_REMOTE_NAME		0x0013
+#define MGMT_EV_REMOTE_NAME		0x0012
 struct mgmt_ev_remote_name {
 	bdaddr_t bdaddr;
 	__u8 name[MGMT_MAX_NAME_LENGTH];
 } __packed;
 
-#define MGMT_EV_DISCOVERING		0x0014
+#define MGMT_EV_DISCOVERING		0x0013
 
-#define MGMT_EV_DEVICE_BLOCKED		0x0015
+#define MGMT_EV_DEVICE_BLOCKED		0x0014
 struct mgmt_ev_device_blocked {
 	bdaddr_t bdaddr;
 } __packed;
 
-#define MGMT_EV_DEVICE_UNBLOCKED	0x0016
+#define MGMT_EV_DEVICE_UNBLOCKED	0x0015
 struct mgmt_ev_device_unblocked {
 	bdaddr_t bdaddr;
 } __packed;
diff --git a/include/net/bluetooth/smp.h b/include/net/bluetooth/smp.h
index 15b97d5..aeaf5fa 100644
--- a/include/net/bluetooth/smp.h
+++ b/include/net/bluetooth/smp.h
@@ -115,6 +115,10 @@
 #define SMP_MIN_ENC_KEY_SIZE		7
 #define SMP_MAX_ENC_KEY_SIZE		16
 
+#define SMP_FLAG_TK_VALID	1
+#define SMP_FLAG_CFM_PENDING	2
+#define SMP_FLAG_MITM_AUTH	3
+
 struct smp_chan {
 	struct l2cap_conn *conn;
 	u8		preq[7]; /* SMP Pairing Request */
@@ -124,6 +128,7 @@
 	u8		pcnf[16]; /* SMP Pairing Confirm */
 	u8		tk[16]; /* SMP Temporary Key */
 	u8		smp_key_size;
+	unsigned long	smp_flags;
 	struct crypto_blkcipher	*tfm;
 	struct work_struct confirm;
 	struct work_struct random;
@@ -134,6 +139,7 @@
 int smp_conn_security(struct l2cap_conn *conn, __u8 sec_level);
 int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb);
 int smp_distribute_keys(struct l2cap_conn *conn, __u8 force);
+int smp_user_confirm_reply(struct hci_conn *conn, u16 mgmt_op, __le32 passkey);
 
 void smp_chan_destroy(struct l2cap_conn *conn);
 
diff --git a/include/net/caif/caif_dev.h b/include/net/caif/caif_dev.h
index c011281..ef2dd94 100644
--- a/include/net/caif/caif_dev.h
+++ b/include/net/caif/caif_dev.h
@@ -9,6 +9,7 @@
 
 #include <net/caif/caif_layer.h>
 #include <net/caif/cfcnfg.h>
+#include <net/caif/caif_device.h>
 #include <linux/caif/caif_socket.h>
 #include <linux/if.h>
 #include <linux/net.h>
@@ -104,4 +105,24 @@
  */
 void caif_free_client(struct cflayer *adap_layer);
 
+/**
+ * struct caif_enroll_dev - Enroll a net-device as a CAIF Link layer
+ * @dev:		Network device to enroll.
+ * @caifdev:		Configuration information from CAIF Link Layer
+ * @link_support:	Link layer support layer
+ * @head_room:		Head room needed by link support layer
+ * @layer:		Lowest layer in CAIF stack
+ * @rcv_fun:		Receive function for CAIF stack.
+ *
+ * This function enroll a CAIF link layer into CAIF Stack and
+ * expects the interface to be able to handle CAIF payload.
+ * The link_support layer is used to add any Link Layer specific
+ * framing.
+ */
+void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
+			struct cflayer *link_support, int head_room,
+			struct cflayer **layer, int (**rcv_func)(
+				struct sk_buff *, struct net_device *,
+				struct packet_type *, struct net_device *));
+
 #endif /* CAIF_DEV_H_ */
diff --git a/include/net/caif/caif_layer.h b/include/net/caif/caif_layer.h
index 35bc788..0f3a391 100644
--- a/include/net/caif/caif_layer.h
+++ b/include/net/caif/caif_layer.h
@@ -121,9 +121,7 @@
  * @transmit:	Packet transmit funciton.
  * @ctrlcmd:	Used for control signalling upwards in the stack.
  * @modemcmd:	Used for control signaling downwards in the stack.
- * @prio:	Priority of this layer.
  * @id:		The identity of this layer
- * @type:	The type of this layer
  * @name:	Name of the layer.
  *
  *  This structure defines the layered structure in CAIF.
@@ -230,9 +228,7 @@
 	 */
 	int (*modemcmd) (struct cflayer *layr, enum caif_modemcmd ctrl);
 
-	unsigned short prio;
 	unsigned int id;
-	unsigned int type;
 	char name[CAIF_LAYER_NAME_SZ];
 };
 
diff --git a/include/net/caif/caif_spi.h b/include/net/caif/caif_spi.h
index 87c3d11..aa6a485 100644
--- a/include/net/caif/caif_spi.h
+++ b/include/net/caif/caif_spi.h
@@ -55,8 +55,8 @@
 struct cfspi_xfer {
 	u16 tx_dma_len;
 	u16 rx_dma_len;
-	void *va_tx;
-	dma_addr_t pa_tx;
+	void *va_tx[2];
+	dma_addr_t pa_tx[2];
 	void *va_rx;
 	dma_addr_t pa_rx;
 };
diff --git a/include/net/caif/cfcnfg.h b/include/net/caif/cfcnfg.h
index 3e93a4a..90b4ff8 100644
--- a/include/net/caif/cfcnfg.h
+++ b/include/net/caif/cfcnfg.h
@@ -14,18 +14,6 @@
 struct cfcnfg;
 
 /**
- * enum cfcnfg_phy_type -  Types of physical layers defined in CAIF Stack
- *
- * @CFPHYTYPE_FRAG:	Fragmented frames physical interface.
- * @CFPHYTYPE_CAIF:	Generic CAIF physical interface
- */
-enum cfcnfg_phy_type {
-	CFPHYTYPE_FRAG = 1,
-	CFPHYTYPE_CAIF,
-	CFPHYTYPE_MAX
-};
-
-/**
  * enum cfcnfg_phy_preference - Physical preference HW Abstraction
  *
  * @CFPHYPREF_UNSPECIFIED:	Default physical interface
@@ -66,21 +54,20 @@
  * cfcnfg_add_phy_layer() - Adds a physical layer to the CAIF stack.
  * @cnfg:	Pointer to a CAIF configuration object, created by
  *		cfcnfg_create().
- * @phy_type:	Specifies the type of physical interface, e.g.
- *			CFPHYTYPE_FRAG.
  * @dev:	Pointer to link layer device
  * @phy_layer:	Specify the physical layer. The transmit function
  *		MUST be set in the structure.
  * @pref:	The phy (link layer) preference.
+ * @link_support: Protocol implementation for link layer specific protocol.
  * @fcs:	Specify if checksum is used in CAIF Framing Layer.
- * @stx:	Specify if Start Of Frame eXtention is used.
+ * @head_room:	Head space needed by link specific protocol.
  */
-
 void
-cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
+cfcnfg_add_phy_layer(struct cfcnfg *cnfg,
 		     struct net_device *dev, struct cflayer *phy_layer,
 		     enum cfcnfg_phy_preference pref,
-		     bool fcs, bool stx);
+		     struct cflayer *link_support,
+		     bool fcs, int head_room);
 
 /**
  * cfcnfg_del_phy_layer - Deletes an phy layer from the CAIF stack.
diff --git a/include/net/caif/cfserl.h b/include/net/caif/cfserl.h
index b837432..f121299 100644
--- a/include/net/caif/cfserl.h
+++ b/include/net/caif/cfserl.h
@@ -8,5 +8,5 @@
 #define CFSERL_H_
 #include <net/caif/caif_layer.h>
 
-struct cflayer *cfserl_create(int type, int instance, bool use_stx);
-#endif				/* CFSERL_H_ */
+struct cflayer *cfserl_create(int instance, bool use_stx);
+#endif
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 95852e3..15f4be7 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -391,6 +391,8 @@
  * @assocresp_ies: extra information element(s) to add into (Re)Association
  *	Response frames or %NULL
  * @assocresp_ies_len: length of assocresp_ies in octets
+ * @probe_resp_len: length of probe response template (@probe_resp)
+ * @probe_resp: probe response template (AP mode only)
  */
 struct beacon_parameters {
 	u8 *head, *tail;
@@ -408,6 +410,8 @@
 	size_t proberesp_ies_len;
 	const u8 *assocresp_ies;
 	size_t assocresp_ies_len;
+	int probe_resp_len;
+	u8 *probe_resp;
 };
 
 /**
@@ -501,6 +505,7 @@
  * @STATION_INFO_CONNECTED_TIME: @connected_time filled
  * @STATION_INFO_ASSOC_REQ_IES: @assoc_req_ies filled
  * @STATION_INFO_STA_FLAGS: @sta_flags filled
+ * @STATION_INFO_BEACON_LOSS_COUNT: @beacon_loss_count filled
  */
 enum station_info_flags {
 	STATION_INFO_INACTIVE_TIME	= 1<<0,
@@ -521,7 +526,8 @@
 	STATION_INFO_BSS_PARAM          = 1<<15,
 	STATION_INFO_CONNECTED_TIME	= 1<<16,
 	STATION_INFO_ASSOC_REQ_IES	= 1<<17,
-	STATION_INFO_STA_FLAGS		= 1<<18
+	STATION_INFO_STA_FLAGS		= 1<<18,
+	STATION_INFO_BEACON_LOSS_COUNT	= 1<<19
 };
 
 /**
@@ -619,6 +625,7 @@
  *	the cfg80211_new_sta() calls to notify user space of the IEs.
  * @assoc_req_ies_len: Length of assoc_req_ies buffer in octets.
  * @sta_flags: station flags mask & values
+ * @beacon_loss_count: Number of times beacon loss event has triggered.
  */
 struct station_info {
 	u32 filled;
@@ -646,6 +653,8 @@
 	const u8 *assoc_req_ies;
 	size_t assoc_req_ies_len;
 
+	u32 beacon_loss_count;
+
 	/*
 	 * Note: Add a new enum station_info_flags value for each new field and
 	 * use it to check which fields are initialized.
@@ -778,6 +787,7 @@
 	u16 min_discovery_timeout;
 	u32 dot11MeshHWMPactivePathTimeout;
 	u16 dot11MeshHWMPpreqMinInterval;
+	u16 dot11MeshHWMPperrMinInterval;
 	u16 dot11MeshHWMPnetDiameterTraversalTime;
 	u8  dot11MeshHWMPRootMode;
 	u16 dot11MeshHWMPRannInterval;
@@ -798,6 +808,7 @@
  * @ie_len: length of vendor information elements
  * @is_authenticated: this mesh requires authentication
  * @is_secure: this mesh uses security
+ * @mcast_rate: multicat rate for Mesh Node [6Mbps is the default for 802.11a]
  *
  * These parameters are fixed when the mesh is created.
  */
@@ -810,6 +821,7 @@
 	u8 ie_len;
 	bool is_authenticated;
 	bool is_secure;
+	int mcast_rate[IEEE80211_NUM_BANDS];
 };
 
 /**
@@ -1040,6 +1052,15 @@
 };
 
 /**
+ * enum cfg80211_assoc_req_flags - Over-ride default behaviour in association.
+ *
+ * @ASSOC_REQ_DISABLE_HT:  Disable HT (802.11n)
+ */
+enum cfg80211_assoc_req_flags {
+	ASSOC_REQ_DISABLE_HT		= BIT(0),
+};
+
+/**
  * struct cfg80211_assoc_request - (Re)Association request data
  *
  * This structure provides information needed to complete IEEE 802.11
@@ -1050,6 +1071,10 @@
  * @use_mfp: Use management frame protection (IEEE 802.11w) in this association
  * @crypto: crypto settings
  * @prev_bssid: previous BSSID, if not %NULL use reassociate frame
+ * @flags:  See &enum cfg80211_assoc_req_flags
+ * @ht_capa:  HT Capabilities over-rides.  Values set in ht_capa_mask
+ *   will be used in ht_capa.  Un-supported values will be ignored.
+ * @ht_capa_mask:  The bits of ht_capa which are to be used.
  */
 struct cfg80211_assoc_request {
 	struct cfg80211_bss *bss;
@@ -1057,6 +1082,9 @@
 	size_t ie_len;
 	struct cfg80211_crypto_settings crypto;
 	bool use_mfp;
+	u32 flags;
+	struct ieee80211_ht_cap ht_capa;
+	struct ieee80211_ht_cap ht_capa_mask;
 };
 
 /**
@@ -1126,6 +1154,7 @@
 	u8 *ssid;
 	u8 *bssid;
 	struct ieee80211_channel *channel;
+	enum nl80211_channel_type channel_type;
 	u8 *ie;
 	u8 ssid_len, ie_len;
 	u16 beacon_interval;
@@ -1155,6 +1184,10 @@
  * @key_len: length of WEP key for shared key authentication
  * @key_idx: index of WEP key for shared key authentication
  * @key: WEP key for shared key authentication
+ * @flags:  See &enum cfg80211_assoc_req_flags
+ * @ht_capa:  HT Capabilities over-rides.  Values set in ht_capa_mask
+ *   will be used in ht_capa.  Un-supported values will be ignored.
+ * @ht_capa_mask:  The bits of ht_capa which are to be used.
  */
 struct cfg80211_connect_params {
 	struct ieee80211_channel *channel;
@@ -1168,6 +1201,9 @@
 	struct cfg80211_crypto_settings crypto;
 	const u8 *key;
 	u8 key_len, key_idx;
+	u32 flags;
+	struct ieee80211_ht_cap ht_capa;
+	struct ieee80211_ht_cap ht_capa_mask;
 };
 
 /**
@@ -1315,7 +1351,12 @@
  *
  * @add_station: Add a new station.
  * @del_station: Remove a station; @mac may be NULL to remove all stations.
- * @change_station: Modify a given station.
+ * @change_station: Modify a given station. Note that flags changes are not much
+ *	validated in cfg80211, in particular the auth/assoc/authorized flags
+ *	might come to the driver in invalid combinations -- make sure to check
+ *	them, also against the existing state! Also, supported_rates changes are
+ *	not checked in station mode -- drivers need to reject (or ignore) them
+ *	for anything but TDLS peers.
  * @get_station: get station information for the station identified by @mac
  * @dump_station: dump station callback -- resume dump at index @idx
  *
@@ -1342,6 +1383,9 @@
  *	doesn't verify much. Note, however, that the passed netdev may be
  *	%NULL as well if the user requested changing the channel for the
  *	device itself, or for a monitor interface.
+ * @get_channel: Get the current operating channel, should return %NULL if
+ *	there's no single defined operating channel if for example the
+ *	device implements channel hopping for multi-channel virtual interfaces.
  *
  * @scan: Request to do a scan. If returning zero, the scan request is given
  *	the driver, and will be valid until passed to cfg80211_scan_done().
@@ -1369,7 +1413,8 @@
  *	have changed. The actual parameter values are available in
  *	struct wiphy. If returning an error, no value should be changed.
  *
- * @set_tx_power: set the transmit power according to the parameters
+ * @set_tx_power: set the transmit power according to the parameters,
+ *	the power passed is in mBm, to get dBm use MBM_TO_DBM().
  * @get_tx_power: store the current TX power into the dbm variable;
  *	return 0 if successful
  *
@@ -1432,6 +1477,11 @@
  *
  * @tdls_mgmt: Transmit a TDLS management frame.
  * @tdls_oper: Perform a high-level TDLS operation (e.g. TDLS link setup).
+ *
+ * @probe_client: probe an associated client, must return a cookie that it
+ *	later passes to cfg80211_probe_status().
+ *
+ * @set_noack_map: Set the NoAck Map for the TIDs.
  */
 struct cfg80211_ops {
 	int	(*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
@@ -1585,7 +1635,7 @@
 			  enum nl80211_channel_type channel_type,
 			  bool channel_type_valid, unsigned int wait,
 			  const u8 *buf, size_t len, bool no_cck,
-			  u64 *cookie);
+			  bool dont_wait_for_ack, u64 *cookie);
 	int	(*mgmt_tx_cancel_wait)(struct wiphy *wiphy,
 				       struct net_device *dev,
 				       u64 cookie);
@@ -1621,6 +1671,15 @@
 			     u16 status_code, const u8 *buf, size_t len);
 	int	(*tdls_oper)(struct wiphy *wiphy, struct net_device *dev,
 			     u8 *peer, enum nl80211_tdls_operation oper);
+
+	int	(*probe_client)(struct wiphy *wiphy, struct net_device *dev,
+				const u8 *peer, u64 *cookie);
+
+	int	(*set_noack_map)(struct wiphy *wiphy,
+				  struct net_device *dev,
+				  u16 noack_map);
+
+	struct ieee80211_channel *(*get_channel)(struct wiphy *wiphy);
 };
 
 /*
@@ -1645,7 +1704,9 @@
  *	regulatory domain no user regulatory domain can enable these channels
  *	at a later time. This can be used for devices which do not have
  *	calibration information guaranteed for frequencies or settings
- *	outside of its regulatory domain.
+ *	outside of its regulatory domain. If used in combination with
+ *	WIPHY_FLAG_CUSTOM_REGULATORY the inspected country IE power settings
+ *	will be followed.
  * @WIPHY_FLAG_DISABLE_BEACON_HINTS: enable this if your driver needs to ensure
  *	that passive scan flags and beaconing flags may not be lifted by
  *	cfg80211 due to regulatory beacon hints. For more information on beacon
@@ -1679,6 +1740,14 @@
  *	teardown packets should be sent through the @NL80211_CMD_TDLS_MGMT
  *	command. When this flag is not set, @NL80211_CMD_TDLS_OPER should be
  *	used for asking the driver/firmware to perform a TDLS operation.
+ * @WIPHY_FLAG_HAVE_AP_SME: device integrates AP SME
+ * @WIPHY_FLAG_REPORTS_OBSS: the device will report beacons from other BSSes
+ *	when there are virtual interfaces in AP mode by calling
+ *	cfg80211_report_obss_beacon().
+ * @WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD: When operating as an AP, the device
+ *	responds to probe-requests in hardware.
+ * @WIPHY_FLAG_OFFCHAN_TX: Device supports direct off-channel TX.
+ * @WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL: Device supports remain-on-channel call.
  */
 enum wiphy_flags {
 	WIPHY_FLAG_CUSTOM_REGULATORY		= BIT(0),
@@ -1697,6 +1766,11 @@
 	WIPHY_FLAG_AP_UAPSD			= BIT(14),
 	WIPHY_FLAG_SUPPORTS_TDLS		= BIT(15),
 	WIPHY_FLAG_TDLS_EXTERNAL_SETUP		= BIT(16),
+	WIPHY_FLAG_HAVE_AP_SME			= BIT(17),
+	WIPHY_FLAG_REPORTS_OBSS			= BIT(18),
+	WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD	= BIT(19),
+	WIPHY_FLAG_OFFCHAN_TX			= BIT(20),
+	WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL	= BIT(21),
 };
 
 /**
@@ -1869,6 +1943,7 @@
  * @software_iftypes: bitmask of software interface types, these are not
  *	subject to any restrictions since they are purely managed in SW.
  * @flags: wiphy flags, see &enum wiphy_flags
+ * @features: features advertised to nl80211, see &enum nl80211_feature_flags.
  * @bss_priv_size: each BSS struct has private data allocated with it,
  *	this variable determines its size
  * @max_scan_ssids: maximum number of SSIDs the device can scan for in
@@ -1907,6 +1982,10 @@
  *	may request, if implemented.
  *
  * @wowlan: WoWLAN support information
+ *
+ * @ap_sme_capa: AP SME capabilities, flags from &enum nl80211_ap_sme_features.
+ * @ht_capa_mod_mask:  Specify what ht_cap values can be over-ridden.
+ *	If null, then none can be over-ridden.
  */
 struct wiphy {
 	/* assign these fields before you register the wiphy */
@@ -1928,7 +2007,9 @@
 	/* Supported interface modes, OR together BIT(NL80211_IFTYPE_...) */
 	u16 interface_modes;
 
-	u32 flags;
+	u32 flags, features;
+
+	u32 ap_sme_capa;
 
 	enum cfg80211_signal_type signal_type;
 
@@ -1960,6 +2041,13 @@
 	u32 available_antennas_tx;
 	u32 available_antennas_rx;
 
+	/*
+	 * Bitmap of supported protocols for probe response offloading
+	 * see &enum nl80211_probe_resp_offload_support_attr. Only valid
+	 * when the wiphy flag @WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD is set.
+	 */
+	u32 probe_resp_offload;
+
 	/* If multiple wiphys are registered and you're handed e.g.
 	 * a regular netdev with assigned ieee80211_ptr, you won't
 	 * know whether it points to a wiphy your driver has registered
@@ -1987,6 +2075,8 @@
 	/* dir in debugfs: ieee80211/<wiphyname> */
 	struct dentry *debugfsdir;
 
+	const struct ieee80211_ht_cap *ht_capa_mod_mask;
+
 #ifdef CONFIG_NET_NS
 	/* the network namespace this phy lives in currently */
 	struct net *_net;
@@ -2183,6 +2273,8 @@
 
 	int beacon_interval;
 
+	u32 ap_unexpected_nlpid;
+
 #ifdef CONFIG_CFG80211_WEXT
 	/* wext data */
 	struct {
@@ -2349,69 +2441,6 @@
 extern const unsigned char rfc1042_header[6];
 extern const unsigned char bridge_tunnel_header[6];
 
-/* Parsed Information Elements */
-struct ieee802_11_elems {
-	u8 *ie_start;
-	size_t total_len;
-
-	/* pointers to IEs */
-	u8 *ssid;
-	u8 *supp_rates;
-	u8 *fh_params;
-	u8 *ds_params;
-	u8 *cf_params;
-	struct ieee80211_tim_ie *tim;
-	u8 *ibss_params;
-	u8 *challenge;
-	u8 *wpa;
-	u8 *rsn;
-	u8 *erp_info;
-	u8 *ext_supp_rates;
-	u8 *wmm_info;
-	u8 *wmm_param;
-	struct ieee80211_ht_cap *ht_cap_elem;
-	struct ieee80211_ht_info *ht_info_elem;
-	struct ieee80211_meshconf_ie *mesh_config;
-	u8 *mesh_id;
-	u8 *peering;
-	u8 *preq;
-	u8 *prep;
-	u8 *perr;
-	struct ieee80211_rann_ie *rann;
-	u8 *ch_switch_elem;
-	u8 *country_elem;
-	u8 *pwr_constr_elem;
-	u8 *quiet_elem;	/* first quite element */
-	u8 *timeout_int;
-
-	/* length of them, respectively */
-	u8 ssid_len;
-	u8 supp_rates_len;
-	u8 fh_params_len;
-	u8 ds_params_len;
-	u8 cf_params_len;
-	u8 tim_len;
-	u8 ibss_params_len;
-	u8 challenge_len;
-	u8 wpa_len;
-	u8 rsn_len;
-	u8 erp_info_len;
-	u8 ext_supp_rates_len;
-	u8 wmm_info_len;
-	u8 wmm_param_len;
-	u8 mesh_id_len;
-	u8 peering_len;
-	u8 preq_len;
-	u8 prep_len;
-	u8 perr_len;
-	u8 ch_switch_elem_len;
-	u8 country_elem_len;
-	u8 pwr_constr_elem_len;
-	u8 quiet_elem_len;
-	u8 num_of_quiet_elem;	/* can be more the one */
-	u8 timeout_int_len;
-};
-
 /**
  * ieee80211_get_hdrlen_from_skb - get header length from data
  *
@@ -2636,8 +2665,10 @@
  *
  * This informs cfg80211 that BSS information was found and
  * the BSS should be updated/added.
+ *
+ * NOTE: Returns a referenced struct, must be released with cfg80211_put_bss()!
  */
-struct cfg80211_bss*
+struct cfg80211_bss * __must_check
 cfg80211_inform_bss_frame(struct wiphy *wiphy,
 			  struct ieee80211_channel *channel,
 			  struct ieee80211_mgmt *mgmt, size_t len,
@@ -2659,8 +2690,10 @@
  *
  * This informs cfg80211 that BSS information was found and
  * the BSS should be updated/added.
+ *
+ * NOTE: Returns a referenced struct, must be released with cfg80211_put_bss()!
  */
-struct cfg80211_bss*
+struct cfg80211_bss * __must_check
 cfg80211_inform_bss(struct wiphy *wiphy,
 		    struct ieee80211_channel *channel,
 		    const u8 *bssid,
@@ -3043,6 +3076,32 @@
 		     const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp);
 
 /**
+ * cfg80211_roamed_bss - notify cfg80211 of roaming
+ *
+ * @dev: network device
+ * @bss: entry of bss to which STA got roamed
+ * @req_ie: association request IEs (maybe be %NULL)
+ * @req_ie_len: association request IEs length
+ * @resp_ie: association response IEs (may be %NULL)
+ * @resp_ie_len: assoc response IEs length
+ * @gfp: allocation flags
+ *
+ * This is just a wrapper to notify cfg80211 of roaming event with driver
+ * passing bss to avoid a race in timeout of the bss entry. It should be
+ * called by the underlying driver whenever it roamed from one AP to another
+ * while connected. Drivers which have roaming implemented in firmware
+ * may use this function to avoid a race in bss entry timeout where the bss
+ * entry of the new AP is seen in the driver, but gets timed out by the time
+ * it is accessed in __cfg80211_roamed() due to delay in scheduling
+ * rdev->event_work. In case of any failures, the reference is released
+ * either in cfg80211_roamed_bss() or in __cfg80211_romed(), Otherwise,
+ * it will be released while diconneting from the current bss.
+ */
+void cfg80211_roamed_bss(struct net_device *dev, struct cfg80211_bss *bss,
+			 const u8 *req_ie, size_t req_ie_len,
+			 const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp);
+
+/**
  * cfg80211_disconnected - notify cfg80211 that connection was dropped
  *
  * @dev: network device
@@ -3189,6 +3248,74 @@
 void cfg80211_pmksa_candidate_notify(struct net_device *dev, int index,
 				     const u8 *bssid, bool preauth, gfp_t gfp);
 
+/**
+ * cfg80211_rx_spurious_frame - inform userspace about a spurious frame
+ * @dev: The device the frame matched to
+ * @addr: the transmitter address
+ * @gfp: context flags
+ *
+ * This function is used in AP mode (only!) to inform userspace that
+ * a spurious class 3 frame was received, to be able to deauth the
+ * sender.
+ * Returns %true if the frame was passed to userspace (or this failed
+ * for a reason other than not having a subscription.)
+ */
+bool cfg80211_rx_spurious_frame(struct net_device *dev,
+				const u8 *addr, gfp_t gfp);
+
+/**
+ * cfg80211_rx_unexpected_4addr_frame - inform about unexpected WDS frame
+ * @dev: The device the frame matched to
+ * @addr: the transmitter address
+ * @gfp: context flags
+ *
+ * This function is used in AP mode (only!) to inform userspace that
+ * an associated station sent a 4addr frame but that wasn't expected.
+ * It is allowed and desirable to send this event only once for each
+ * station to avoid event flooding.
+ * Returns %true if the frame was passed to userspace (or this failed
+ * for a reason other than not having a subscription.)
+ */
+bool cfg80211_rx_unexpected_4addr_frame(struct net_device *dev,
+					const u8 *addr, gfp_t gfp);
+
+/**
+ * cfg80211_probe_status - notify userspace about probe status
+ * @dev: the device the probe was sent on
+ * @addr: the address of the peer
+ * @cookie: the cookie filled in @probe_client previously
+ * @acked: indicates whether probe was acked or not
+ * @gfp: allocation flags
+ */
+void cfg80211_probe_status(struct net_device *dev, const u8 *addr,
+			   u64 cookie, bool acked, gfp_t gfp);
+
+/**
+ * cfg80211_report_obss_beacon - report beacon from other APs
+ * @wiphy: The wiphy that received the beacon
+ * @frame: the frame
+ * @len: length of the frame
+ * @freq: frequency the frame was received on
+ * @gfp: allocation flags
+ *
+ * Use this function to report to userspace when a beacon was
+ * received. It is not useful to call this when there is no
+ * netdev that is in AP/GO mode.
+ */
+void cfg80211_report_obss_beacon(struct wiphy *wiphy,
+				 const u8 *frame, size_t len,
+				 int freq, gfp_t gfp);
+
+/*
+ * cfg80211_can_beacon_sec_chan - test if ht40 on extension channel can be used
+ * @wiphy: the wiphy
+ * @chan: main channel
+ * @channel_type: HT mode
+ */
+int cfg80211_can_beacon_sec_chan(struct wiphy *wiphy,
+				 struct ieee80211_channel *chan,
+				 enum nl80211_channel_type channel_type);
+
 /* Logging, debugging and troubleshooting/diagnostic helpers. */
 
 /* wiphy_printk helpers, similar to dev_printk */
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 839f768..7828ebf 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -11,6 +11,11 @@
 #ifndef __LINUX_NET_DSA_H
 #define __LINUX_NET_DSA_H
 
+#include <linux/if_ether.h>
+#include <linux/list.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+
 #define DSA_MAX_SWITCHES	4
 #define DSA_MAX_PORTS		12
 
@@ -54,8 +59,143 @@
 	struct dsa_chip_data	*chip;
 };
 
-extern bool dsa_uses_dsa_tags(void *dsa_ptr);
-extern bool dsa_uses_trailer_tags(void *dsa_ptr);
+struct dsa_switch_tree {
+	/*
+	 * Configuration data for the platform device that owns
+	 * this dsa switch tree instance.
+	 */
+	struct dsa_platform_data	*pd;
 
+	/*
+	 * Reference to network device to use, and which tagging
+	 * protocol to use.
+	 */
+	struct net_device	*master_netdev;
+	__be16			tag_protocol;
+
+	/*
+	 * The switch and port to which the CPU is attached.
+	 */
+	s8			cpu_switch;
+	s8			cpu_port;
+
+	/*
+	 * Link state polling.
+	 */
+	int			link_poll_needed;
+	struct work_struct	link_poll_work;
+	struct timer_list	link_poll_timer;
+
+	/*
+	 * Data for the individual switch chips.
+	 */
+	struct dsa_switch	*ds[DSA_MAX_SWITCHES];
+};
+
+struct dsa_switch {
+	/*
+	 * Parent switch tree, and switch index.
+	 */
+	struct dsa_switch_tree	*dst;
+	int			index;
+
+	/*
+	 * Configuration data for this switch.
+	 */
+	struct dsa_chip_data	*pd;
+
+	/*
+	 * The used switch driver.
+	 */
+	struct dsa_switch_driver	*drv;
+
+	/*
+	 * Reference to mii bus to use.
+	 */
+	struct mii_bus		*master_mii_bus;
+
+	/*
+	 * Slave mii_bus and devices for the individual ports.
+	 */
+	u32			dsa_port_mask;
+	u32			phys_port_mask;
+	struct mii_bus		*slave_mii_bus;
+	struct net_device	*ports[DSA_MAX_PORTS];
+};
+
+static inline bool dsa_is_cpu_port(struct dsa_switch *ds, int p)
+{
+	return !!(ds->index == ds->dst->cpu_switch && p == ds->dst->cpu_port);
+}
+
+static inline u8 dsa_upstream_port(struct dsa_switch *ds)
+{
+	struct dsa_switch_tree *dst = ds->dst;
+
+	/*
+	 * If this is the root switch (i.e. the switch that connects
+	 * to the CPU), return the cpu port number on this switch.
+	 * Else return the (DSA) port number that connects to the
+	 * switch that is one hop closer to the cpu.
+	 */
+	if (dst->cpu_switch == ds->index)
+		return dst->cpu_port;
+	else
+		return ds->pd->rtable[dst->cpu_switch];
+}
+
+struct dsa_switch_driver {
+	struct list_head	list;
+
+	__be16			tag_protocol;
+	int			priv_size;
+
+	/*
+	 * Probing and setup.
+	 */
+	char	*(*probe)(struct mii_bus *bus, int sw_addr);
+	int	(*setup)(struct dsa_switch *ds);
+	int	(*set_addr)(struct dsa_switch *ds, u8 *addr);
+
+	/*
+	 * Access to the switch's PHY registers.
+	 */
+	int	(*phy_read)(struct dsa_switch *ds, int port, int regnum);
+	int	(*phy_write)(struct dsa_switch *ds, int port,
+			     int regnum, u16 val);
+
+	/*
+	 * Link state polling and IRQ handling.
+	 */
+	void	(*poll_link)(struct dsa_switch *ds);
+
+	/*
+	 * ethtool hardware statistics.
+	 */
+	void	(*get_strings)(struct dsa_switch *ds, int port, uint8_t *data);
+	void	(*get_ethtool_stats)(struct dsa_switch *ds,
+				     int port, uint64_t *data);
+	int	(*get_sset_count)(struct dsa_switch *ds);
+};
+
+void register_switch_driver(struct dsa_switch_driver *type);
+void unregister_switch_driver(struct dsa_switch_driver *type);
+
+/*
+ * The original DSA tag format and some other tag formats have no
+ * ethertype, which means that we need to add a little hack to the
+ * networking receive path to make sure that received frames get
+ * the right ->protocol assigned to them when one of those tag
+ * formats is in use.
+ */
+static inline bool dsa_uses_dsa_tags(struct dsa_switch_tree *dst)
+{
+	return !!(dst->tag_protocol == htons(ETH_P_DSA));
+}
+
+static inline bool dsa_uses_trailer_tags(struct dsa_switch_tree *dst)
+{
+	return !!(dst->tag_protocol == htons(ETH_P_TRAILER));
+}
 
 #endif
diff --git a/include/net/dst.h b/include/net/dst.h
index 6faec1a..344c8dd 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -53,6 +53,7 @@
 #define DST_NOHASH		0x0008
 #define DST_NOCACHE		0x0010
 #define DST_NOCOUNT		0x0020
+#define DST_NOPEER		0x0040
 
 	short			error;
 	short			obsolete;
@@ -86,12 +87,12 @@
 	};
 };
 
-static inline struct neighbour *dst_get_neighbour(struct dst_entry *dst)
+static inline struct neighbour *dst_get_neighbour_noref(struct dst_entry *dst)
 {
 	return rcu_dereference(dst->_neighbour);
 }
 
-static inline struct neighbour *dst_get_neighbour_raw(struct dst_entry *dst)
+static inline struct neighbour *dst_get_neighbour_noref_raw(struct dst_entry *dst)
 {
 	return rcu_dereference_raw(dst->_neighbour);
 }
@@ -392,7 +393,7 @@
 		struct neighbour *n;
 
 		rcu_read_lock();
-		n = dst_get_neighbour(dst);
+		n = dst_get_neighbour_noref(dst);
 		neigh_confirm(n);
 		rcu_read_unlock();
 	}
diff --git a/include/net/flow.h b/include/net/flow.h
index a094477..da1f064 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -59,8 +59,11 @@
 #define flowi4_proto		__fl_common.flowic_proto
 #define flowi4_flags		__fl_common.flowic_flags
 #define flowi4_secid		__fl_common.flowic_secid
-	__be32			daddr;
+
+	/* (saddr,daddr) must be grouped, same order as in IP header */
 	__be32			saddr;
+	__be32			daddr;
+
 	union flowi_uli		uli;
 #define fl4_sport		uli.ports.sport
 #define fl4_dport		uli.ports.dport
@@ -207,6 +210,7 @@
 		u8 dir, flow_resolve_t resolver, void *ctx);
 
 extern void flow_cache_flush(void);
+extern void flow_cache_flush_deferred(void);
 extern atomic_t flow_cache_genid;
 
 #endif
diff --git a/include/net/flow_keys.h b/include/net/flow_keys.h
new file mode 100644
index 0000000..80461c1
--- /dev/null
+++ b/include/net/flow_keys.h
@@ -0,0 +1,16 @@
+#ifndef _NET_FLOW_KEYS_H
+#define _NET_FLOW_KEYS_H
+
+struct flow_keys {
+	/* (src,dst) must be grouped, in the same way than in IP header */
+	__be32 src;
+	__be32 dst;
+	union {
+		__be32 ports;
+		__be16 port16[2];
+	};
+	u8 ip_proto;
+};
+
+extern bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow);
+#endif
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index 82d8d09..7db3299 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -128,6 +128,8 @@
 				  struct genl_multicast_group *grp);
 extern void genl_unregister_mc_group(struct genl_family *family,
 				     struct genl_multicast_group *grp);
+extern void genl_notify(struct sk_buff *skb, struct net *net, u32 pid,
+			u32 group, struct nlmsghdr *nlh, gfp_t flags);
 
 /**
  * genlmsg_put - Add generic netlink header to netlink message
diff --git a/include/net/icmp.h b/include/net/icmp.h
index f0698b9..75d6156 100644
--- a/include/net/icmp.h
+++ b/include/net/icmp.h
@@ -31,8 +31,8 @@
 extern const struct icmp_err icmp_err_convert[];
 #define ICMP_INC_STATS(net, field)	SNMP_INC_STATS((net)->mib.icmp_statistics, field)
 #define ICMP_INC_STATS_BH(net, field)	SNMP_INC_STATS_BH((net)->mib.icmp_statistics, field)
-#define ICMPMSGOUT_INC_STATS(net, field)	SNMP_INC_STATS((net)->mib.icmpmsg_statistics, field+256)
-#define ICMPMSGIN_INC_STATS_BH(net, field)	SNMP_INC_STATS_BH((net)->mib.icmpmsg_statistics, field)
+#define ICMPMSGOUT_INC_STATS(net, field)	SNMP_INC_STATS_ATOMIC_LONG((net)->mib.icmpmsg_statistics, field+256)
+#define ICMPMSGIN_INC_STATS_BH(net, field)	SNMP_INC_STATS_ATOMIC_LONG((net)->mib.icmpmsg_statistics, field)
 
 struct dst_entry;
 struct net_proto_family;
diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h
index 7e2c4d4..7139254 100644
--- a/include/net/ieee80211_radiotap.h
+++ b/include/net/ieee80211_radiotap.h
@@ -271,14 +271,6 @@
 #define IEEE80211_RADIOTAP_MCS_FEC_LDPC		0x10
 
 
-/* Ugly macro to convert literal channel numbers into their mhz equivalents
- * There are certianly some conditions that will break this (like feeding it '30')
- * but they shouldn't arise since nothing talks on channel 30. */
-#define ieee80211chan2mhz(x) \
-	(((x) <= 14) ? \
-	(((x) == 14) ? 2484 : ((x) * 5) + 2407) : \
-	((x) + 1000) * 5)
-
 /* helpers */
 static inline int ieee80211_get_radiotap_len(unsigned char *data)
 {
diff --git a/include/net/ieee802154.h b/include/net/ieee802154.h
index d52685d..ee59f8b 100644
--- a/include/net/ieee802154.h
+++ b/include/net/ieee802154.h
@@ -21,11 +21,14 @@
  * Maxim Gorbachyov <maxim.gorbachev@siemens.com>
  * Maxim Osipov <maxim.osipov@siemens.com>
  * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
+ * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
  */
 
 #ifndef NET_IEEE802154_H
 #define NET_IEEE802154_H
 
+#define IEEE802154_MTU			127
+
 #define IEEE802154_FC_TYPE_BEACON	0x0	/* Frame is beacon */
 #define	IEEE802154_FC_TYPE_DATA		0x1	/* Frame is data */
 #define IEEE802154_FC_TYPE_ACK		0x2	/* Frame is acknowledgment */
@@ -56,6 +59,9 @@
 	(((x) & IEEE802154_FC_DAMODE_MASK) >> IEEE802154_FC_DAMODE_SHIFT)
 
 
+/* MAC footer size */
+#define IEEE802154_MFR_SIZE	2 /* 2 octets */
+
 /* MAC's Command Frames Identifiers */
 #define IEEE802154_CMD_ASSOCIATION_REQ		0x01
 #define IEEE802154_CMD_ASSOCIATION_RESP		0x02
diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h
index e46674d..00cbb43 100644
--- a/include/net/inet6_hashtables.h
+++ b/include/net/inet6_hashtables.h
@@ -15,7 +15,7 @@
 #define _INET6_HASHTABLES_H
 
 
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 #include <linux/in6.h>
 #include <linux/ipv6.h>
 #include <linux/types.h>
@@ -110,5 +110,5 @@
 				 const struct in6_addr *saddr, const __be16 sport,
 				 const struct in6_addr *daddr, const __be16 dport,
 				 const int dif);
-#endif /* defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) */
+#endif /* IS_ENABLED(CONFIG_IPV6) */
 #endif /* _INET6_HASHTABLES_H */
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index e6db62e..dbf9aab 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -143,9 +143,9 @@
 	return (void *)inet_csk(sk)->icsk_ca_priv;
 }
 
-extern struct sock *inet_csk_clone(struct sock *sk,
-				   const struct request_sock *req,
-				   const gfp_t priority);
+extern struct sock *inet_csk_clone_lock(const struct sock *sk,
+					const struct request_sock *req,
+					const gfp_t priority);
 
 enum inet_csk_ack_state_t {
 	ICSK_ACK_SCHED	= 1,
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index f941964..e3e4051 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -71,7 +71,7 @@
 
 struct inet_request_sock {
 	struct request_sock	req;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	u16			inet6_rsk_offset;
 #endif
 	__be16			loc_port;
@@ -139,7 +139,7 @@
 struct inet_sock {
 	/* sk and pinet6 has to be the first two members of inet_sock */
 	struct sock		sk;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	struct ipv6_pinfo	*pinet6;
 #endif
 	/* Socket demultiplex comparisons on incoming packets. */
@@ -188,7 +188,7 @@
 	memcpy(inet_sk(sk_to) + 1, inet_sk(sk_from) + 1,
 	       sk_from->sk_prot->obj_size - ancestor_size);
 }
-#if !(defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE))
+#if !(IS_ENABLED(CONFIG_IPV6))
 static inline void inet_sk_copy_descendant(struct sock *sk_to,
 					   const struct sock *sk_from)
 {
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index e8c25b9..ba52c83 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -218,20 +218,12 @@
 static inline
 struct net *twsk_net(const struct inet_timewait_sock *twsk)
 {
-#ifdef CONFIG_NET_NS
-	return rcu_dereference_raw(twsk->tw_net); /* protected by locking, */
-						  /* reference counting, */
-						  /* initialization, or RCU. */
-#else
-	return &init_net;
-#endif
+	return read_pnet(&twsk->tw_net);
 }
 
 static inline
 void twsk_net_set(struct inet_timewait_sock *twsk, struct net *net)
 {
-#ifdef CONFIG_NET_NS
-	rcu_assign_pointer(twsk->tw_net, net);
-#endif
+	write_pnet(&twsk->tw_net, net);
 }
 #endif	/* _INET_TIMEWAIT_SOCK_ */
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index e9ff3fc..06b795d 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -87,7 +87,7 @@
 {
 	struct inetpeer_addr daddr;
 
-	ipv6_addr_copy((struct in6_addr *)daddr.addr.a6, v6daddr);
+	*(struct in6_addr *)daddr.addr.a6 = *v6daddr;
 	daddr.family = AF_INET6;
 	return inet_getpeer(&daddr, create);
 }
diff --git a/include/net/ip.h b/include/net/ip.h
index eca0ef7..775009f 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -353,14 +353,14 @@
 		memcpy(buf, &naddr, sizeof(naddr));
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 #include <linux/ipv6.h>
 #endif
 
 static __inline__ void inet_reset_saddr(struct sock *sk)
 {
 	inet_sk(sk)->inet_rcv_saddr = inet_sk(sk)->inet_saddr = 0;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	if (sk->sk_family == PF_INET6) {
 		struct ipv6_pinfo *np = inet6_sk(sk);
 
@@ -379,7 +379,7 @@
 	switch (sk->sk_family) {
 	case AF_INET:
 		return inet_sk(sk)->mc_loop;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	case AF_INET6:
 		return inet6_sk(sk)->mc_loop;
 #endif
@@ -450,7 +450,7 @@
  *	Functions provided by ip_sockglue.c
  */
 
-extern int	ip_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
+extern void	ipv4_pktinfo_prepare(struct sk_buff *skb);
 extern void	ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb);
 extern int	ip_cmsg_send(struct net *net,
 			     struct msghdr *msg, struct ipcm_cookie *ipc);
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 5735a0f..b26bb81 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -86,9 +86,6 @@
 struct rt6_info {
 	struct dst_entry		dst;
 
-#define rt6i_dev			dst.dev
-#define rt6i_expires			dst.expires
-
 	/*
 	 * Tail elements of dst_entry (__refcnt etc.)
 	 * and these elements (rarely used in hot path) are in
@@ -202,6 +199,10 @@
 					     const struct in6_addr *daddr, int dst_len,
 					     const struct in6_addr *saddr, int src_len);
 
+extern void			fib6_clean_all_ro(struct net *net,
+					       int (*func)(struct rt6_info *, void *arg),
+					       int prune, void *arg);
+
 extern void			fib6_clean_all(struct net *net,
 					       int (*func)(struct rt6_info *, void *arg),
 					       int prune, void *arg);
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 5e91b72..2ad92ca 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -70,6 +70,8 @@
 extern struct dst_entry *	ip6_route_output(struct net *net,
 						 const struct sock *sk,
 						 struct flowi6 *fl6);
+extern struct dst_entry *	ip6_route_lookup(struct net *net,
+						 struct flowi6 *fl6, int flags);
 
 extern int			ip6_route_init(void);
 extern void			ip6_route_cleanup(void);
@@ -95,14 +97,14 @@
 
 extern struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
 					 struct neighbour *neigh,
-					 const struct in6_addr *addr);
+					 struct flowi6 *fl6);
 extern int icmp6_dst_gc(void);
 
 extern void fib6_force_start_gc(struct net *net);
 
 extern struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
 					   const struct in6_addr *addr,
-					   int anycast);
+					   bool anycast);
 
 extern int			ip6_dst_hoplimit(struct dst_entry *dst);
 
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 873d5be..ebe517f 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -21,7 +21,7 @@
 #include <linux/netfilter.h>		/* for union nf_inet_addr */
 #include <linux/ip.h>
 #include <linux/ipv6.h>			/* for struct ipv6hdr */
-#include <net/ipv6.h>			/* for ipv6_addr_copy */
+#include <net/ipv6.h>
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 #include <net/netfilter/nf_conntrack.h>
 #endif
@@ -119,8 +119,8 @@
 		const struct ipv6hdr *iph = nh;
 		iphdr->len = sizeof(struct ipv6hdr);
 		iphdr->protocol = iph->nexthdr;
-		ipv6_addr_copy(&iphdr->saddr.in6, &iph->saddr);
-		ipv6_addr_copy(&iphdr->daddr.in6, &iph->daddr);
+		iphdr->saddr.in6 = iph->saddr;
+		iphdr->daddr.in6 = iph->daddr;
 	} else
 #endif
 	{
@@ -137,7 +137,7 @@
 {
 #ifdef CONFIG_IP_VS_IPV6
 	if (af == AF_INET6)
-		ipv6_addr_copy(&dst->in6, &src->in6);
+		dst->in6 = src->in6;
 	else
 #endif
 	dst->ip = src->ip;
@@ -1207,7 +1207,7 @@
 extern struct ip_vs_dest *
 ip_vs_find_dest(struct net *net, int af, const union nf_inet_addr *daddr,
 		__be16 dport, const union nf_inet_addr *vaddr, __be16 vport,
-		__u16 protocol, __u32 fwmark);
+		__u16 protocol, __u32 fwmark, __u32 flags);
 extern struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp);
 
 
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index a366a8a..e4170a2 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -132,6 +132,15 @@
 	SNMP_INC_STATS##modifier((net)->mib.statname##_statistics, (field));\
 })
 
+/* per device and per net counters are atomic_long_t */
+#define _DEVINC_ATOMIC_ATOMIC(net, statname, idev, field)		\
+({									\
+	struct inet6_dev *_idev = (idev);				\
+	if (likely(_idev != NULL))					\
+		SNMP_INC_STATS_ATOMIC_LONG((_idev)->stats.statname##dev, (field)); \
+	SNMP_INC_STATS_ATOMIC_LONG((net)->mib.statname##_statistics, (field));\
+})
+
 #define _DEVADD(net, statname, modifier, idev, field, val)		\
 ({									\
 	struct inet6_dev *_idev = (idev);				\
@@ -168,11 +177,11 @@
 		_DEVINCATOMIC(net, icmpv6, _BH, idev, field)
 
 #define ICMP6MSGOUT_INC_STATS(net, idev, field)		\
-	_DEVINCATOMIC(net, icmpv6msg, , idev, field +256)
+	_DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field +256)
 #define ICMP6MSGOUT_INC_STATS_BH(net, idev, field)	\
-	_DEVINCATOMIC(net, icmpv6msg, _BH, idev, field +256)
+	_DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field +256)
 #define ICMP6MSGIN_INC_STATS_BH(net, idev, field)	\
-	_DEVINCATOMIC(net, icmpv6msg, _BH, idev, field)
+	_DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field)
 
 struct ip6_ra_chain {
 	struct ip6_ra_chain	*next;
@@ -300,11 +309,6 @@
 		  ((a1->s6_addr32[3] ^ a2->s6_addr32[3]) & m->s6_addr32[3]));
 }
 
-static inline void ipv6_addr_copy(struct in6_addr *a1, const struct in6_addr *a2)
-{
-	memcpy(a1, a2, sizeof(struct in6_addr));
-}
-
 static inline void ipv6_addr_prefix(struct in6_addr *pfx, 
 				    const struct in6_addr *addr,
 				    int plen)
@@ -554,7 +558,7 @@
 						    u8 *proto);
 
 extern int			ipv6_skip_exthdr(const struct sk_buff *, int start,
-					         u8 *nexthdrp);
+					         u8 *nexthdrp, __be16 *frag_offp);
 
 extern int 			ipv6_ext_hdr(u8 nexthdr);
 
diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
index f2419cf..0954ec9 100644
--- a/include/net/iucv/af_iucv.h
+++ b/include/net/iucv/af_iucv.h
@@ -27,7 +27,6 @@
 	IUCV_OPEN,
 	IUCV_BOUND,
 	IUCV_LISTEN,
-	IUCV_SEVERED,
 	IUCV_DISCONN,
 	IUCV_CLOSING,
 	IUCV_CLOSED
@@ -146,7 +145,6 @@
 			    poll_table *wait);
 void iucv_sock_link(struct iucv_sock_list *l, struct sock *s);
 void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *s);
-int  iucv_sock_wait_cnt(struct sock *sk, unsigned long timeo);
 void iucv_accept_enqueue(struct sock *parent, struct sock *sk);
 void iucv_accept_unlink(struct sock *sk);
 struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock);
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 72eddd1..d49928b 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -166,6 +166,7 @@
  *	that it is only ever disabled for station mode.
  * @BSS_CHANGED_IDLE: Idle changed for this BSS/interface.
  * @BSS_CHANGED_SSID: SSID changed for this BSS (AP mode)
+ * @BSS_CHANGED_AP_PROBE_RESP: Probe Response changed for this BSS (AP mode)
  */
 enum ieee80211_bss_change {
 	BSS_CHANGED_ASSOC		= 1<<0,
@@ -184,6 +185,7 @@
 	BSS_CHANGED_QOS			= 1<<13,
 	BSS_CHANGED_IDLE		= 1<<14,
 	BSS_CHANGED_SSID		= 1<<15,
+	BSS_CHANGED_AP_PROBE_RESP	= 1<<16,
 
 	/* when adding here, make sure to change ieee80211_reconfig */
 };
@@ -518,7 +520,7 @@
  * @flags: transmit info flags, defined above
  * @band: the band to transmit on (use for checking for races)
  * @antenna_sel_tx: antenna to use, 0 for automatic diversity
- * @pad: padding, ignore
+ * @ack_frame_id: internal frame ID for TX status, used internally
  * @control: union for control data
  * @status: union for status data
  * @driver_data: array of driver_data pointers
@@ -535,8 +537,7 @@
 
 	u8 antenna_sel_tx;
 
-	/* 2 byte hole */
-	u8 pad[2];
+	u16 ack_frame_id;
 
 	union {
 		struct {
@@ -901,6 +902,10 @@
  * @IEEE80211_KEY_FLAG_SW_MGMT: This flag should be set by the driver for a
  *	CCMP key if it requires CCMP encryption of management frames (MFP) to
  *	be done in software.
+ * @IEEE80211_KEY_FLAG_PUT_IV_SPACE: This flag should be set by the driver
+ *	for a CCMP key if space should be prepared for the IV, but the IV
+ *	itself should not be generated. Do not set together with
+ *	@IEEE80211_KEY_FLAG_GENERATE_IV on the same key.
  */
 enum ieee80211_key_flags {
 	IEEE80211_KEY_FLAG_WMM_STA	= 1<<0,
@@ -908,6 +913,7 @@
 	IEEE80211_KEY_FLAG_GENERATE_MMIC= 1<<2,
 	IEEE80211_KEY_FLAG_PAIRWISE	= 1<<3,
 	IEEE80211_KEY_FLAG_SW_MGMT	= 1<<4,
+	IEEE80211_KEY_FLAG_PUT_IV_SPACE = 1<<5,
 };
 
 /**
@@ -1304,6 +1310,16 @@
 }
 
 /**
+ * ieee80211_free_txskb - free TX skb
+ * @hw: the hardware
+ * @skb: the skb
+ *
+ * Free a transmit skb. Use this funtion when some failure
+ * to transmit happened and thus status cannot be reported.
+ */
+void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
+
+/**
  * DOC: Hardware crypto acceleration
  *
  * mac80211 is capable of taking advantage of many hardware
@@ -1423,7 +1439,7 @@
  * DOC: Beacon filter support
  *
  * Some hardware have beacon filter support to reduce host cpu wakeups
- * which will reduce system power consumption. It usuallly works so that
+ * which will reduce system power consumption. It usually works so that
  * the firmware creates a checksum of the beacon but omits all constantly
  * changing elements (TSF, TIM etc). Whenever the checksum changes the
  * beacon is forwarded to the host, otherwise it will be just dropped. That
@@ -1744,11 +1760,21 @@
  *	skb contains the buffer starting from the IEEE 802.11 header.
  *	The low-level driver should send the frame out based on
  *	configuration in the TX control data. This handler should,
- *	preferably, never fail and stop queues appropriately, more
- *	importantly, however, it must never fail for A-MPDU-queues.
- *	This function should return NETDEV_TX_OK except in very
- *	limited cases.
- *	Must be implemented and atomic.
+ *	preferably, never fail and stop queues appropriately.
+ *	This must be implemented if @tx_frags is not.
+ *	Must be atomic.
+ *
+ * @tx_frags: Called to transmit multiple fragments of a single MSDU.
+ *	This handler must consume all fragments, sending out some of
+ *	them only is useless and it can't ask for some of them to be
+ *	queued again. If the frame is not fragmented the queue has a
+ *	single SKB only. To avoid issues with the networking stack
+ *	when TX status is reported the frames should be removed from
+ *	the skb queue.
+ *	If this is used, the tx_info @vif and @sta pointers will be
+ *	invalid -- you must not use them in that case.
+ *	This must be implemented if @tx isn't.
+ *	Must be atomic.
  *
  * @start: Called before the first netdevice attached to the hardware
  *	is enabled. This should turn on the hardware and must turn on
@@ -2085,6 +2111,8 @@
  */
 struct ieee80211_ops {
 	void (*tx)(struct ieee80211_hw *hw, struct sk_buff *skb);
+	void (*tx_frags)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+			 struct ieee80211_sta *sta, struct sk_buff_head *skbs);
 	int (*start)(struct ieee80211_hw *hw);
 	void (*stop)(struct ieee80211_hw *hw);
 #ifdef CONFIG_PM
@@ -2661,6 +2689,19 @@
 }
 
 /**
+ * ieee80211_proberesp_get - retrieve a Probe Response template
+ * @hw: pointer obtained from ieee80211_alloc_hw().
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ *
+ * Creates a Probe Response template which can, for example, be uploaded to
+ * hardware. The destination address should be set by the caller.
+ *
+ * Can only be called in AP mode.
+ */
+struct sk_buff *ieee80211_proberesp_get(struct ieee80211_hw *hw,
+					struct ieee80211_vif *vif);
+
+/**
  * ieee80211_pspoll_get - retrieve a PS Poll template
  * @hw: pointer obtained from ieee80211_alloc_hw().
  * @vif: &struct ieee80211_vif pointer from the add_interface callback.
@@ -3461,9 +3502,12 @@
  *
  * @IEEE80211_RC_HT_CHANGED: The HT parameters of the operating channel have
  *	changed, rate control algorithm can update its internal state if needed.
+ * @IEEE80211_RC_SMPS_CHANGED: The SMPS state of the station changed, the rate
+ *	control algorithm needs to adjust accordingly.
  */
 enum rate_control_changed {
-	IEEE80211_RC_HT_CHANGED = BIT(0)
+	IEEE80211_RC_HT_CHANGED		= BIT(0),
+	IEEE80211_RC_SMPS_CHANGED	= BIT(1),
 };
 
 /**
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index 62beeb9..e3133c2 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -79,6 +79,42 @@
 	__u8		nd_opt_len;
 } __packed;
 
+static inline u32 ndisc_hashfn(const void *pkey, const struct net_device *dev, __u32 *hash_rnd)
+{
+	const u32 *p32 = pkey;
+
+	return (((p32[0] ^ dev->ifindex) * hash_rnd[0]) +
+		(p32[1] * hash_rnd[1]) +
+		(p32[2] * hash_rnd[2]) +
+		(p32[3] * hash_rnd[3]));
+}
+
+static inline struct neighbour *__ipv6_neigh_lookup(struct neigh_table *tbl, struct net_device *dev, const void *pkey)
+{
+	struct neigh_hash_table *nht;
+	const u32 *p32 = pkey;
+	struct neighbour *n;
+	u32 hash_val;
+
+	rcu_read_lock_bh();
+	nht = rcu_dereference_bh(tbl->nht);
+	hash_val = ndisc_hashfn(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
+	for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
+	     n != NULL;
+	     n = rcu_dereference_bh(n->next)) {
+		u32 *n32 = (u32 *) n->primary_key;
+		if (n->dev == dev &&
+		    ((n32[0] ^ p32[0]) | (n32[1] ^ p32[1]) |
+		     (n32[2] ^ p32[2]) | (n32[3] ^ p32[3])) == 0) {
+			if (!atomic_inc_not_zero(&n->refcnt))
+				n = NULL;
+			break;
+		}
+	}
+	rcu_read_unlock_bh();
+
+	return n;
+}
 
 extern int			ndisc_init(void);
 
@@ -145,13 +181,4 @@
 extern void 			inet6_ifinfo_notify(int event,
 						    struct inet6_dev *idev);
 
-static inline struct neighbour * ndisc_get_neigh(struct net_device *dev, const struct in6_addr *addr)
-{
-
-	if (dev)
-		return __neigh_lookup_errno(&nd_tbl, addr, dev);
-
-	return ERR_PTR(-ENODEV);
-}
-
 #endif
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 2720884..34c996f 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -59,7 +59,7 @@
 	int	reachable_time;
 	int	delay_probe_time;
 
-	int	queue_len;
+	int	queue_len_bytes;
 	int	ucast_probes;
 	int	app_probes;
 	int	mcast_probes;
@@ -99,6 +99,7 @@
 	rwlock_t		lock;
 	atomic_t		refcnt;
 	struct sk_buff_head	arp_queue;
+	unsigned int		arp_queue_len_bytes;
 	struct timer_list	timer;
 	unsigned long		used;
 	atomic_t		probes;
@@ -138,10 +139,12 @@
  *	neighbour table manipulation
  */
 
+#define NEIGH_NUM_HASH_RND	4
+
 struct neigh_hash_table {
 	struct neighbour __rcu	**hash_buckets;
 	unsigned int		hash_shift;
-	__u32			hash_rnd;
+	__u32			hash_rnd[NEIGH_NUM_HASH_RND];
 	struct rcu_head		rcu;
 };
 
@@ -153,7 +156,7 @@
 	int			key_len;
 	__u32			(*hash)(const void *pkey,
 					const struct net_device *dev,
-					__u32 hash_rnd);
+					__u32 *hash_rnd);
 	int			(*constructor)(struct neighbour *);
 	int			(*pconstructor)(struct pneigh_entry *);
 	void			(*pdestructor)(struct pneigh_entry *);
@@ -172,12 +175,18 @@
 	atomic_t		entries;
 	rwlock_t		lock;
 	unsigned long		last_rand;
-	struct kmem_cache	*kmem_cachep;
 	struct neigh_statistics	__percpu *stats;
 	struct neigh_hash_table __rcu *nht;
 	struct pneigh_entry	**phash_buckets;
 };
 
+#define NEIGH_PRIV_ALIGN	sizeof(long long)
+
+static inline void *neighbour_priv(const struct neighbour *n)
+{
+	return (char *)n + ALIGN(sizeof(*n) + n->tbl->key_len, NEIGH_PRIV_ALIGN);
+}
+
 /* flags for neigh_update() */
 #define NEIGH_UPDATE_F_OVERRIDE			0x00000001
 #define NEIGH_UPDATE_F_WEAK_OVERRIDE		0x00000002
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 3bb6fa0..ee547c1 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -77,7 +77,7 @@
 	struct netns_packet	packet;
 	struct netns_unix	unx;
 	struct netns_ipv4	ipv4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	struct netns_ipv6	ipv6;
 #endif
 #if defined(CONFIG_IP_DCCP) || defined(CONFIG_IP_DCCP_MODULE)
diff --git a/include/net/netfilter/nf_conntrack_acct.h b/include/net/netfilter/nf_conntrack_acct.h
index 4e9c63a..463ae8e 100644
--- a/include/net/netfilter/nf_conntrack_acct.h
+++ b/include/net/netfilter/nf_conntrack_acct.h
@@ -15,8 +15,8 @@
 #include <net/netfilter/nf_conntrack_extend.h>
 
 struct nf_conn_counter {
-	u_int64_t packets;
-	u_int64_t bytes;
+	atomic64_t packets;
+	atomic64_t bytes;
 };
 
 static inline
diff --git a/include/net/netfilter/nf_conntrack_expect.h b/include/net/netfilter/nf_conntrack_expect.h
index 0f8a8c5..4619caa 100644
--- a/include/net/netfilter/nf_conntrack_expect.h
+++ b/include/net/netfilter/nf_conntrack_expect.h
@@ -91,7 +91,6 @@
 
 void nf_ct_remove_expectations(struct nf_conn *ct);
 void nf_ct_unexpect_related(struct nf_conntrack_expect *exp);
-void nf_ct_remove_userspace_expectations(void);
 
 /* Allocate space for an expectation: this is mandatory before calling
    nf_ct_expect_related.  You will have to call put afterwards. */
diff --git a/include/net/netfilter/nf_conntrack_tuple.h b/include/net/netfilter/nf_conntrack_tuple.h
index 2f8fb77..aea3f82 100644
--- a/include/net/netfilter/nf_conntrack_tuple.h
+++ b/include/net/netfilter/nf_conntrack_tuple.h
@@ -12,7 +12,6 @@
 
 #include <linux/netfilter/x_tables.h>
 #include <linux/netfilter/nf_conntrack_tuple_common.h>
-#include <linux/netfilter_ipv4/nf_nat.h>
 #include <linux/list_nulls.h>
 
 /* A `tuple' is a structure containing the information to uniquely
diff --git a/include/net/netfilter/nf_nat.h b/include/net/netfilter/nf_nat.h
index b8872df..b4de990 100644
--- a/include/net/netfilter/nf_nat.h
+++ b/include/net/netfilter/nf_nat.h
@@ -1,14 +1,12 @@
 #ifndef _NF_NAT_H
 #define _NF_NAT_H
 #include <linux/netfilter_ipv4.h>
-#include <linux/netfilter_ipv4/nf_nat.h>
+#include <linux/netfilter/nf_nat.h>
 #include <net/netfilter/nf_conntrack_tuple.h>
 
-#define NF_NAT_MAPPING_TYPE_MAX_NAMELEN 16
-
 enum nf_nat_manip_type {
-	IP_NAT_MANIP_SRC,
-	IP_NAT_MANIP_DST
+	NF_NAT_MANIP_SRC,
+	NF_NAT_MANIP_DST
 };
 
 /* SRC manip occurs POST_ROUTING or LOCAL_IN */
@@ -52,7 +50,7 @@
 
 /* Set up the info structure to map into this range. */
 extern unsigned int nf_nat_setup_info(struct nf_conn *ct,
-				      const struct nf_nat_range *range,
+				      const struct nf_nat_ipv4_range *range,
 				      enum nf_nat_manip_type maniptype);
 
 /* Is this tuple already taken? (not by us)*/
diff --git a/include/net/netfilter/nf_nat_core.h b/include/net/netfilter/nf_nat_core.h
index 3dc7b98..b13d8d1 100644
--- a/include/net/netfilter/nf_nat_core.h
+++ b/include/net/netfilter/nf_nat_core.h
@@ -20,7 +20,7 @@
 static inline int nf_nat_initialized(struct nf_conn *ct,
 				     enum nf_nat_manip_type manip)
 {
-	if (manip == IP_NAT_MANIP_SRC)
+	if (manip == NF_NAT_MANIP_SRC)
 		return ct->status & IPS_SRC_NAT_DONE;
 	else
 		return ct->status & IPS_DST_NAT_DONE;
diff --git a/include/net/netfilter/nf_nat_protocol.h b/include/net/netfilter/nf_nat_protocol.h
index 93cc90d..7b0b511 100644
--- a/include/net/netfilter/nf_nat_protocol.h
+++ b/include/net/netfilter/nf_nat_protocol.h
@@ -4,14 +4,12 @@
 #include <net/netfilter/nf_nat.h>
 #include <linux/netfilter/nfnetlink_conntrack.h>
 
-struct nf_nat_range;
+struct nf_nat_ipv4_range;
 
 struct nf_nat_protocol {
 	/* Protocol number. */
 	unsigned int protonum;
 
-	struct module *me;
-
 	/* Translate a packet to the target according to manip type.
 	   Return true if succeeded. */
 	bool (*manip_pkt)(struct sk_buff *skb,
@@ -30,15 +28,12 @@
 	   possible.  Per-protocol part of tuple is initialized to the
 	   incoming packet. */
 	void (*unique_tuple)(struct nf_conntrack_tuple *tuple,
-			     const struct nf_nat_range *range,
+			     const struct nf_nat_ipv4_range *range,
 			     enum nf_nat_manip_type maniptype,
 			     const struct nf_conn *ct);
 
-	int (*range_to_nlattr)(struct sk_buff *skb,
-			       const struct nf_nat_range *range);
-
 	int (*nlattr_to_range)(struct nlattr *tb[],
-			       struct nf_nat_range *range);
+			       struct nf_nat_ipv4_range *range);
 };
 
 /* Protocol registration. */
@@ -61,14 +56,12 @@
 				  const union nf_conntrack_man_proto *max);
 
 extern void nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple,
-				      const struct nf_nat_range *range,
+				      const struct nf_nat_ipv4_range *range,
 				      enum nf_nat_manip_type maniptype,
 				      const struct nf_conn *ct,
 				      u_int16_t *rover);
 
-extern int nf_nat_proto_range_to_nlattr(struct sk_buff *skb,
-					const struct nf_nat_range *range);
 extern int nf_nat_proto_nlattr_to_range(struct nlattr *tb[],
-					struct nf_nat_range *range);
+					struct nf_nat_ipv4_range *range);
 
 #endif /*_NF_NAT_PROTO_H*/
diff --git a/include/net/netfilter/nf_tproxy_core.h b/include/net/netfilter/nf_tproxy_core.h
index e505358..75ca929 100644
--- a/include/net/netfilter/nf_tproxy_core.h
+++ b/include/net/netfilter/nf_tproxy_core.h
@@ -131,7 +131,7 @@
 	return sk;
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static inline struct sock *
 nf_tproxy_get_sock_v6(struct net *net, const u8 protocol,
 		      const struct in6_addr *saddr, const struct in6_addr *daddr,
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index d786b4f..bbd023a 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -55,6 +55,7 @@
 	int current_rt_cache_rebuild_count;
 
 	unsigned int sysctl_ping_group_range[2];
+	long sysctl_tcp_mem[3];
 
 	atomic_t rt_genid;
 	atomic_t dev_addr_genid;
diff --git a/include/net/netns/mib.h b/include/net/netns/mib.h
index 0b44112..d542a4b 100644
--- a/include/net/netns/mib.h
+++ b/include/net/netns/mib.h
@@ -10,15 +10,15 @@
 	DEFINE_SNMP_STAT(struct udp_mib, udp_statistics);
 	DEFINE_SNMP_STAT(struct udp_mib, udplite_statistics);
 	DEFINE_SNMP_STAT(struct icmp_mib, icmp_statistics);
-	DEFINE_SNMP_STAT(struct icmpmsg_mib, icmpmsg_statistics);
+	DEFINE_SNMP_STAT_ATOMIC(struct icmpmsg_mib, icmpmsg_statistics);
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	struct proc_dir_entry *proc_net_devsnmp6;
 	DEFINE_SNMP_STAT(struct udp_mib, udp_stats_in6);
 	DEFINE_SNMP_STAT(struct udp_mib, udplite_stats_in6);
 	DEFINE_SNMP_STAT(struct ipstats_mib, ipv6_statistics);
 	DEFINE_SNMP_STAT(struct icmpv6_mib, icmpv6_statistics);
-	DEFINE_SNMP_STAT(struct icmpv6msg_mib, icmpv6msg_statistics);
+	DEFINE_SNMP_STAT_ATOMIC(struct icmpv6msg_mib, icmpv6msg_statistics);
 #endif
 #ifdef CONFIG_XFRM_STATISTICS
 	DEFINE_SNMP_STAT(struct linux_xfrm_mib, xfrm_statistics);
diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
index 748f91f..5299e69 100644
--- a/include/net/netns/xfrm.h
+++ b/include/net/netns/xfrm.h
@@ -56,7 +56,7 @@
 #endif
 
 	struct dst_ops		xfrm4_dst_ops;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	struct dst_ops		xfrm6_dst_ops;
 #endif
 };
diff --git a/include/net/netprio_cgroup.h b/include/net/netprio_cgroup.h
new file mode 100644
index 0000000..e503b87
--- /dev/null
+++ b/include/net/netprio_cgroup.h
@@ -0,0 +1,57 @@
+/*
+ * netprio_cgroup.h			Control Group Priority set
+ *
+ *
+ * Authors:	Neil Horman <nhorman@tuxdriver.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#ifndef _NETPRIO_CGROUP_H
+#define _NETPRIO_CGROUP_H
+#include <linux/module.h>
+#include <linux/cgroup.h>
+#include <linux/hardirq.h>
+#include <linux/rcupdate.h>
+
+
+struct netprio_map {
+	struct rcu_head rcu;
+	u32 priomap_len;
+	u32 priomap[];
+};
+
+#ifdef CONFIG_CGROUPS
+
+struct cgroup_netprio_state {
+	struct cgroup_subsys_state css;
+	u32 prioidx;
+};
+
+#ifndef CONFIG_NETPRIO_CGROUP
+extern int net_prio_subsys_id;
+#endif
+
+extern void sock_update_netprioidx(struct sock *sk);
+
+static inline struct cgroup_netprio_state
+		*task_netprio_state(struct task_struct *p)
+{
+#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
+	return container_of(task_subsys_state(p, net_prio_subsys_id),
+			    struct cgroup_netprio_state, css);
+#else
+	return NULL;
+#endif
+}
+
+#else
+
+#define sock_update_netprioidx(sk)
+#endif
+
+#endif  /* _NET_CLS_CGROUP_H */
diff --git a/include/net/nfc/nci.h b/include/net/nfc/nci.h
index 39b85bc..2be95e2 100644
--- a/include/net/nfc/nci.h
+++ b/include/net/nfc/nci.h
@@ -34,32 +34,30 @@
 #define NCI_MAX_NUM_CONN					10
 
 /* NCI Status Codes */
-#define	NCI_STATUS_OK						0x00
-#define	NCI_STATUS_REJECTED					0x01
-#define	NCI_STATUS_MESSAGE_CORRUPTED				0x02
-#define	NCI_STATUS_BUFFER_FULL					0x03
-#define	NCI_STATUS_FAILED					0x04
-#define	NCI_STATUS_NOT_INITIALIZED				0x05
-#define	NCI_STATUS_SYNTAX_ERROR					0x06
-#define	NCI_STATUS_SEMANTIC_ERROR				0x07
-#define	NCI_STATUS_UNKNOWN_GID					0x08
-#define	NCI_STATUS_UNKNOWN_OID					0x09
-#define	NCI_STATUS_INVALID_PARAM				0x0a
-#define	NCI_STATUS_MESSAGE_SIZE_EXCEEDED			0x0b
+#define NCI_STATUS_OK						0x00
+#define NCI_STATUS_REJECTED					0x01
+#define NCI_STATUS_RF_FRAME_CORRUPTED				0x02
+#define NCI_STATUS_FAILED					0x03
+#define NCI_STATUS_NOT_INITIALIZED				0x04
+#define NCI_STATUS_SYNTAX_ERROR					0x05
+#define NCI_STATUS_SEMANTIC_ERROR				0x06
+#define NCI_STATUS_UNKNOWN_GID					0x07
+#define NCI_STATUS_UNKNOWN_OID					0x08
+#define NCI_STATUS_INVALID_PARAM				0x09
+#define NCI_STATUS_MESSAGE_SIZE_EXCEEDED			0x0a
 /* Discovery Specific Status Codes */
-#define	NCI_STATUS_DISCOVERY_ALREADY_STARTED			0xa0
-#define	NCI_STATUS_DISCOVERY_TARGET_ACTIVATION_FAILED		0xa1
+#define NCI_STATUS_DISCOVERY_ALREADY_STARTED			0xa0
+#define NCI_STATUS_DISCOVERY_TARGET_ACTIVATION_FAILED		0xa1
+#define NCI_STATUS_DISCOVERY_TEAR_DOWN				0xa2
 /* RF Interface Specific Status Codes */
-#define	NCI_STATUS_RF_TRANSMISSION_ERROR			0xb0
-#define	NCI_STATUS_RF_PROTOCOL_ERROR				0xb1
-#define	NCI_STATUS_RF_TIMEOUT_ERROR				0xb2
-#define	NCI_STATUS_RF_LINK_LOSS_ERROR				0xb3
+#define NCI_STATUS_RF_TRANSMISSION_ERROR			0xb0
+#define NCI_STATUS_RF_PROTOCOL_ERROR				0xb1
+#define NCI_STATUS_RF_TIMEOUT_ERROR				0xb2
 /* NFCEE Interface Specific Status Codes */
-#define	NCI_STATUS_MAX_ACTIVE_NFCEE_INTERFACES_REACHED		0xc0
-#define	NCI_STATUS_NFCEE_INTERFACE_ACTIVATION_FAILED		0xc1
-#define	NCI_STATUS_NFCEE_TRANSMISSION_ERROR			0xc2
-#define	NCI_STATUS_NFCEE_PROTOCOL_ERROR				0xc3
-#define NCI_STATUS_NFCEE_TIMEOUT_ERROR				0xc4
+#define NCI_STATUS_NFCEE_INTERFACE_ACTIVATION_FAILED		0xc0
+#define NCI_STATUS_NFCEE_TRANSMISSION_ERROR			0xc1
+#define NCI_STATUS_NFCEE_PROTOCOL_ERROR				0xc2
+#define NCI_STATUS_NFCEE_TIMEOUT_ERROR				0xc3
 
 /* NCI RF Technology and Mode */
 #define NCI_NFC_A_PASSIVE_POLL_MODE				0x00
@@ -67,11 +65,28 @@
 #define NCI_NFC_F_PASSIVE_POLL_MODE				0x02
 #define NCI_NFC_A_ACTIVE_POLL_MODE				0x03
 #define NCI_NFC_F_ACTIVE_POLL_MODE				0x05
+#define NCI_NFC_15693_PASSIVE_POLL_MODE				0x06
 #define NCI_NFC_A_PASSIVE_LISTEN_MODE				0x80
 #define NCI_NFC_B_PASSIVE_LISTEN_MODE				0x81
 #define NCI_NFC_F_PASSIVE_LISTEN_MODE				0x82
 #define NCI_NFC_A_ACTIVE_LISTEN_MODE				0x83
 #define NCI_NFC_F_ACTIVE_LISTEN_MODE				0x85
+#define NCI_NFC_15693_PASSIVE_LISTEN_MODE			0x86
+
+/* NCI RF Technologies */
+#define NCI_NFC_RF_TECHNOLOGY_A					0x00
+#define NCI_NFC_RF_TECHNOLOGY_B					0x01
+#define NCI_NFC_RF_TECHNOLOGY_F					0x02
+#define NCI_NFC_RF_TECHNOLOGY_15693				0x03
+
+/* NCI Bit Rates */
+#define NCI_NFC_BIT_RATE_106					0x00
+#define NCI_NFC_BIT_RATE_212					0x01
+#define NCI_NFC_BIT_RATE_424					0x02
+#define NCI_NFC_BIT_RATE_848					0x03
+#define NCI_NFC_BIT_RATE_1695					0x04
+#define NCI_NFC_BIT_RATE_3390					0x05
+#define NCI_NFC_BIT_RATE_6780					0x06
 
 /* NCI RF Protocols */
 #define NCI_RF_PROTOCOL_UNKNOWN					0x00
@@ -82,37 +97,30 @@
 #define NCI_RF_PROTOCOL_NFC_DEP					0x05
 
 /* NCI RF Interfaces */
-#define NCI_RF_INTERFACE_RFU					0x00
-#define	NCI_RF_INTERFACE_FRAME					0x01
-#define	NCI_RF_INTERFACE_ISO_DEP				0x02
-#define	NCI_RF_INTERFACE_NFC_DEP				0x03
+#define NCI_RF_INTERFACE_NFCEE_DIRECT				0x00
+#define NCI_RF_INTERFACE_FRAME					0x01
+#define NCI_RF_INTERFACE_ISO_DEP				0x02
+#define NCI_RF_INTERFACE_NFC_DEP				0x03
+
+/* NCI Reset types */
+#define NCI_RESET_TYPE_KEEP_CONFIG				0x00
+#define NCI_RESET_TYPE_RESET_CONFIG				0x01
+
+/* NCI Static RF connection ID */
+#define NCI_STATIC_RF_CONN_ID					0x00
+
+/* NCI Data Flow Control */
+#define NCI_DATA_FLOW_CONTROL_NOT_USED				0xff
 
 /* NCI RF_DISCOVER_MAP_CMD modes */
 #define NCI_DISC_MAP_MODE_POLL					0x01
 #define NCI_DISC_MAP_MODE_LISTEN				0x02
-#define NCI_DISC_MAP_MODE_BOTH					0x03
-
-/* NCI Discovery Types */
-#define NCI_DISCOVERY_TYPE_POLL_A_PASSIVE			0x00
-#define	NCI_DISCOVERY_TYPE_POLL_B_PASSIVE			0x01
-#define	NCI_DISCOVERY_TYPE_POLL_F_PASSIVE			0x02
-#define	NCI_DISCOVERY_TYPE_POLL_A_ACTIVE			0x03
-#define	NCI_DISCOVERY_TYPE_POLL_F_ACTIVE			0x05
-#define	NCI_DISCOVERY_TYPE_WAKEUP_A_PASSIVE			0x06
-#define	NCI_DISCOVERY_TYPE_WAKEUP_B_PASSIVE			0x07
-#define	NCI_DISCOVERY_TYPE_WAKEUP_A_ACTIVE			0x09
-#define	NCI_DISCOVERY_TYPE_LISTEN_A_PASSIVE			0x80
-#define	NCI_DISCOVERY_TYPE_LISTEN_B_PASSIVE			0x81
-#define	NCI_DISCOVERY_TYPE_LISTEN_F_PASSIVE			0x82
-#define	NCI_DISCOVERY_TYPE_LISTEN_A_ACTIVE			0x83
-#define	NCI_DISCOVERY_TYPE_LISTEN_F_ACTIVE			0x85
 
 /* NCI Deactivation Type */
-#define	NCI_DEACTIVATE_TYPE_IDLE_MODE				0x00
-#define	NCI_DEACTIVATE_TYPE_SLEEP_MODE				0x01
-#define	NCI_DEACTIVATE_TYPE_SLEEP_AF_MODE			0x02
-#define	NCI_DEACTIVATE_TYPE_RF_LINK_LOSS			0x03
-#define	NCI_DEACTIVATE_TYPE_DISCOVERY_ERROR			0x04
+#define NCI_DEACTIVATE_TYPE_IDLE_MODE				0x00
+#define NCI_DEACTIVATE_TYPE_SLEEP_MODE				0x01
+#define NCI_DEACTIVATE_TYPE_SLEEP_AF_MODE			0x02
+#define NCI_DEACTIVATE_TYPE_DISCOVERY				0x03
 
 /* Message Type (MT) */
 #define NCI_MT_DATA_PKT						0x00
@@ -144,10 +152,10 @@
 #define nci_conn_id(hdr)		(__u8)(((hdr)[0])&0x0f)
 
 /* GID values */
-#define	NCI_GID_CORE						0x0
-#define	NCI_GID_RF_MGMT						0x1
-#define	NCI_GID_NFCEE_MGMT					0x2
-#define	NCI_GID_PROPRIETARY					0xf
+#define NCI_GID_CORE						0x0
+#define NCI_GID_RF_MGMT						0x1
+#define NCI_GID_NFCEE_MGMT					0x2
+#define NCI_GID_PROPRIETARY					0xf
 
 /* ---- NCI Packet structures ---- */
 #define NCI_CTRL_HDR_SIZE					3
@@ -169,24 +177,17 @@
 /* -----  NCI Commands ---- */
 /* ------------------------ */
 #define NCI_OP_CORE_RESET_CMD		nci_opcode_pack(NCI_GID_CORE, 0x00)
-
-#define NCI_OP_CORE_INIT_CMD		nci_opcode_pack(NCI_GID_CORE, 0x01)
-
-#define NCI_OP_CORE_SET_CONFIG_CMD	nci_opcode_pack(NCI_GID_CORE, 0x02)
-
-#define NCI_OP_CORE_CONN_CREATE_CMD	nci_opcode_pack(NCI_GID_CORE, 0x04)
-struct nci_core_conn_create_cmd {
-	__u8	target_handle;
-	__u8	num_target_specific_params;
+struct nci_core_reset_cmd {
+	__u8	reset_type;
 } __packed;
 
-#define NCI_OP_CORE_CONN_CLOSE_CMD	nci_opcode_pack(NCI_GID_CORE, 0x06)
+#define NCI_OP_CORE_INIT_CMD		nci_opcode_pack(NCI_GID_CORE, 0x01)
 
 #define NCI_OP_RF_DISCOVER_MAP_CMD	nci_opcode_pack(NCI_GID_RF_MGMT, 0x00)
 struct disc_map_config {
 	__u8	rf_protocol;
 	__u8	mode;
-	__u8	rf_interface_type;
+	__u8	rf_interface;
 } __packed;
 
 struct nci_rf_disc_map_cmd {
@@ -197,7 +198,7 @@
 
 #define NCI_OP_RF_DISCOVER_CMD		nci_opcode_pack(NCI_GID_RF_MGMT, 0x03)
 struct disc_config {
-	__u8	type;
+	__u8	rf_tech_and_mode;
 	__u8	frequency;
 } __packed;
 
@@ -218,6 +219,7 @@
 struct nci_core_reset_rsp {
 	__u8	status;
 	__u8	nci_ver;
+	__u8	config_status;
 } __packed;
 
 #define NCI_OP_CORE_INIT_RSP		nci_opcode_pack(NCI_GID_CORE, 0x01)
@@ -232,24 +234,12 @@
 struct nci_core_init_rsp_2 {
 	__u8	max_logical_connections;
 	__le16	max_routing_table_size;
-	__u8	max_control_packet_payload_length;
-	__le16	rf_sending_buffer_size;
-	__le16	rf_receiving_buffer_size;
-	__le16	manufacturer_id;
+	__u8	max_ctrl_pkt_payload_len;
+	__le16	max_size_for_large_params;
+	__u8	manufact_id;
+	__le32	manufact_specific_info;
 } __packed;
 
-#define NCI_OP_CORE_SET_CONFIG_RSP	nci_opcode_pack(NCI_GID_CORE, 0x02)
-
-#define NCI_OP_CORE_CONN_CREATE_RSP	nci_opcode_pack(NCI_GID_CORE, 0x04)
-struct nci_core_conn_create_rsp {
-	__u8	status;
-	__u8	max_pkt_payload_size;
-	__u8	initial_num_credits;
-	__u8	conn_id;
-} __packed;
-
-#define NCI_OP_CORE_CONN_CLOSE_RSP	nci_opcode_pack(NCI_GID_CORE, 0x06)
-
 #define NCI_OP_RF_DISCOVER_MAP_RSP	nci_opcode_pack(NCI_GID_RF_MGMT, 0x00)
 
 #define NCI_OP_RF_DISCOVER_RSP		nci_opcode_pack(NCI_GID_RF_MGMT, 0x03)
@@ -259,7 +249,7 @@
 /* --------------------------- */
 /* ---- NCI Notifications ---- */
 /* --------------------------- */
-#define NCI_OP_CORE_CONN_CREDITS_NTF	nci_opcode_pack(NCI_GID_CORE, 0x07)
+#define NCI_OP_CORE_CONN_CREDITS_NTF	nci_opcode_pack(NCI_GID_CORE, 0x06)
 struct conn_credit_entry {
 	__u8	conn_id;
 	__u8	credits;
@@ -270,12 +260,13 @@
 	struct conn_credit_entry	conn_entries[NCI_MAX_NUM_CONN];
 } __packed;
 
-#define NCI_OP_RF_FIELD_INFO_NTF	nci_opcode_pack(NCI_GID_CORE, 0x08)
-struct nci_rf_field_info_ntf {
-	__u8	rf_field_status;
+#define NCI_OP_CORE_INTF_ERROR_NTF	nci_opcode_pack(NCI_GID_CORE, 0x08)
+struct nci_core_intf_error_ntf {
+	__u8	status;
+	__u8	conn_id;
 } __packed;
 
-#define NCI_OP_RF_ACTIVATE_NTF		nci_opcode_pack(NCI_GID_RF_MGMT, 0x05)
+#define NCI_OP_RF_INTF_ACTIVATED_NTF	nci_opcode_pack(NCI_GID_RF_MGMT, 0x05)
 struct rf_tech_specific_params_nfca_poll {
 	__u16	sens_res;
 	__u8	nfcid1_len;	/* 0, 4, 7, or 10 Bytes */
@@ -289,17 +280,22 @@
 	__u8	rats_res[20];
 };
 
-struct nci_rf_activate_ntf {
-	__u8	target_handle;
+struct nci_rf_intf_activated_ntf {
+	__u8	rf_discovery_id;
+	__u8	rf_interface;
 	__u8	rf_protocol;
-	__u8	rf_tech_and_mode;
+	__u8	activation_rf_tech_and_mode;
+	__u8	max_data_pkt_payload_size;
+	__u8	initial_num_credits;
 	__u8	rf_tech_specific_params_len;
 
 	union {
 		struct rf_tech_specific_params_nfca_poll nfca_poll;
 	} rf_tech_specific_params;
 
-	__u8	rf_interface_type;
+	__u8	data_exch_rf_tech_and_mode;
+	__u8	data_exch_tx_bit_rate;
+	__u8	data_exch_rx_bit_rate;
 	__u8	activation_params_len;
 
 	union {
@@ -309,5 +305,9 @@
 } __packed;
 
 #define NCI_OP_RF_DEACTIVATE_NTF	nci_opcode_pack(NCI_GID_RF_MGMT, 0x06)
+struct nci_rf_deactivate_ntf {
+	__u8	type;
+	__u8	reason;
+} __packed;
 
 #endif /* __NCI_H */
diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h
index b8b4bbd..bccd89e 100644
--- a/include/net/nfc/nci_core.h
+++ b/include/net/nfc/nci_core.h
@@ -109,15 +109,14 @@
 				[NCI_MAX_SUPPORTED_RF_INTERFACES];
 	__u8			max_logical_connections;
 	__u16			max_routing_table_size;
-	__u8			max_control_packet_payload_length;
-	__u16			rf_sending_buffer_size;
-	__u16			rf_receiving_buffer_size;
-	__u16			manufacturer_id;
+	__u8			max_ctrl_pkt_payload_len;
+	__u16			max_size_for_large_params;
+	__u8			manufact_id;
+	__u32			manufact_specific_info;
 
-	/* received during NCI_OP_CORE_CONN_CREATE_RSP for static conn 0 */
-	__u8			max_pkt_payload_size;
+	/* received during NCI_OP_RF_INTF_ACTIVATED_NTF */
+	__u8			max_data_pkt_payload_size;
 	__u8			initial_num_credits;
-	__u8			conn_id;
 
 	/* stored during nci_data_exchange */
 	data_exchange_cb_t	data_exchange_cb;
diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h
index 6a7f602..8696b77 100644
--- a/include/net/nfc/nfc.h
+++ b/include/net/nfc/nfc.h
@@ -52,6 +52,9 @@
 	int (*dev_down)(struct nfc_dev *dev);
 	int (*start_poll)(struct nfc_dev *dev, u32 protocols);
 	void (*stop_poll)(struct nfc_dev *dev);
+	int (*dep_link_up)(struct nfc_dev *dev, int target_idx,
+				u8 comm_mode, u8 rf_mode);
+	int (*dep_link_down)(struct nfc_dev *dev);
 	int (*activate_target)(struct nfc_dev *dev, u32 target_idx,
 							u32 protocol);
 	void (*deactivate_target)(struct nfc_dev *dev, u32 target_idx);
@@ -60,11 +63,17 @@
 							void *cb_context);
 };
 
+#define NFC_TARGET_IDX_ANY -1
+#define NFC_MAX_GT_LEN 48
+#define NFC_MAX_NFCID1_LEN 10
+
 struct nfc_target {
 	u32 idx;
 	u32 supported_protocols;
 	u16 sens_res;
 	u8 sel_res;
+	u8 nfcid1_len;
+	u8 nfcid1[NFC_MAX_NFCID1_LEN];
 };
 
 struct nfc_genl_data {
@@ -83,6 +92,8 @@
 	bool dev_up;
 	bool polling;
 	bool remote_activated;
+	bool dep_link_up;
+	u32 dep_rf_mode;
 	struct nfc_genl_data genl_data;
 	u32 supported_protocols;
 
@@ -157,9 +168,20 @@
 	return dev_name(&dev->dev);
 }
 
-struct sk_buff *nfc_alloc_skb(unsigned int size, gfp_t gfp);
+struct sk_buff *nfc_alloc_send_skb(struct nfc_dev *dev, struct sock *sk,
+					unsigned int flags, unsigned int size,
+					unsigned int *err);
+struct sk_buff *nfc_alloc_recv_skb(unsigned int size, gfp_t gfp);
+
+int nfc_set_remote_general_bytes(struct nfc_dev *dev,
+					u8 *gt, u8 gt_len);
+
+u8 *nfc_get_local_general_bytes(struct nfc_dev *dev, u8 *gt_len);
 
 int nfc_targets_found(struct nfc_dev *dev, struct nfc_target *targets,
 							int ntargets);
 
+int nfc_dep_link_is_up(struct nfc_dev *dev, u32 target_idx,
+		       u8 comm_mode, u8 rf_mode);
+
 #endif /* __NET_NFC_H */
diff --git a/include/net/protocol.h b/include/net/protocol.h
index 6f7eb80..875f489 100644
--- a/include/net/protocol.h
+++ b/include/net/protocol.h
@@ -25,7 +25,7 @@
 #define _PROTOCOL_H
 
 #include <linux/in6.h>
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 #include <linux/ipv6.h>
 #endif
 
@@ -38,7 +38,7 @@
 	void			(*err_handler)(struct sk_buff *skb, u32 info);
 	int			(*gso_send_check)(struct sk_buff *skb);
 	struct sk_buff	       *(*gso_segment)(struct sk_buff *skb,
-					       u32 features);
+					       netdev_features_t features);
 	struct sk_buff	      **(*gro_receive)(struct sk_buff **head,
 					       struct sk_buff *skb);
 	int			(*gro_complete)(struct sk_buff *skb);
@@ -46,7 +46,7 @@
 				netns_ok:1;
 };
 
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 struct inet6_protocol {
 	int	(*handler)(struct sk_buff *skb);
 
@@ -57,7 +57,7 @@
 
 	int	(*gso_send_check)(struct sk_buff *skb);
 	struct sk_buff *(*gso_segment)(struct sk_buff *skb,
-				       u32 features);
+				       netdev_features_t features);
 	struct sk_buff **(*gro_receive)(struct sk_buff **head,
 					struct sk_buff *skb);
 	int	(*gro_complete)(struct sk_buff *skb);
@@ -91,7 +91,7 @@
 
 extern const struct net_protocol __rcu *inet_protos[MAX_INET_PROTOS];
 
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 extern const struct inet6_protocol __rcu *inet6_protos[MAX_INET_PROTOS];
 #endif
 
@@ -100,7 +100,7 @@
 extern void	inet_register_protosw(struct inet_protosw *p);
 extern void	inet_unregister_protosw(struct inet_protosw *p);
 
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 extern int	inet6_add_protocol(const struct inet6_protocol *prot, unsigned char num);
 extern int	inet6_del_protocol(const struct inet6_protocol *prot, unsigned char num);
 extern int	inet6_register_protosw(struct inet_protosw *p);
diff --git a/include/net/red.h b/include/net/red.h
index b72a3b8..baab385 100644
--- a/include/net/red.h
+++ b/include/net/red.h
@@ -5,6 +5,7 @@
 #include <net/pkt_sched.h>
 #include <net/inet_ecn.h>
 #include <net/dsfield.h>
+#include <linux/reciprocal_div.h>
 
 /*	Random Early Detection (RED) algorithm.
 	=======================================
@@ -87,6 +88,29 @@
 	etc.
  */
 
+/*
+ * Adaptative RED : An Algorithm for Increasing the Robustness of RED's AQM
+ * (Sally FLoyd, Ramakrishna Gummadi, and Scott Shenker) August 2001
+ *
+ * Every 500 ms:
+ *  if (avg > target and max_p <= 0.5)
+ *   increase max_p : max_p += alpha;
+ *  else if (avg < target and max_p >= 0.01)
+ *   decrease max_p : max_p *= beta;
+ *
+ * target :[qth_min + 0.4*(qth_min - qth_max),
+ *          qth_min + 0.6*(qth_min - qth_max)].
+ * alpha : min(0.01, max_p / 4)
+ * beta : 0.9
+ * max_P is a Q0.32 fixed point number (with 32 bits mantissa)
+ * max_P between 0.01 and 0.5 (1% - 50%) [ Its no longer a negative power of two ]
+ */
+#define RED_ONE_PERCENT ((u32)DIV_ROUND_CLOSEST(1ULL<<32, 100))
+
+#define MAX_P_MIN (1 * RED_ONE_PERCENT)
+#define MAX_P_MAX (50 * RED_ONE_PERCENT)
+#define MAX_P_ALPHA(val) min(MAX_P_MIN, val / 4)
+
 #define RED_STAB_SIZE	256
 #define RED_STAB_MASK	(RED_STAB_SIZE - 1)
 
@@ -101,76 +125,109 @@
 
 struct red_parms {
 	/* Parameters */
-	u32		qth_min;	/* Min avg length threshold: A scaled */
-	u32		qth_max;	/* Max avg length threshold: A scaled */
+	u32		qth_min;	/* Min avg length threshold: Wlog scaled */
+	u32		qth_max;	/* Max avg length threshold: Wlog scaled */
 	u32		Scell_max;
-	u32		Rmask;		/* Cached random mask, see red_rmask */
+	u32		max_P;		/* probability, [0 .. 1.0] 32 scaled */
+	u32		max_P_reciprocal; /* reciprocal_value(max_P / qth_delta) */
+	u32		qth_delta;	/* max_th - min_th */
+	u32		target_min;	/* min_th + 0.4*(max_th - min_th) */
+	u32		target_max;	/* min_th + 0.6*(max_th - min_th) */
 	u8		Scell_log;
 	u8		Wlog;		/* log(W)		*/
 	u8		Plog;		/* random number bits	*/
 	u8		Stab[RED_STAB_SIZE];
+};
 
+struct red_vars {
 	/* Variables */
 	int		qcount;		/* Number of packets since last random
 					   number generation */
 	u32		qR;		/* Cached random number */
 
-	unsigned long	qavg;		/* Average queue length: A scaled */
+	unsigned long	qavg;		/* Average queue length: Wlog scaled */
 	ktime_t		qidlestart;	/* Start of current idle period */
 };
 
-static inline u32 red_rmask(u8 Plog)
+static inline u32 red_maxp(u8 Plog)
 {
-	return Plog < 32 ? ((1 << Plog) - 1) : ~0UL;
+	return Plog < 32 ? (~0U >> Plog) : ~0U;
 }
 
-static inline void red_set_parms(struct red_parms *p,
-				 u32 qth_min, u32 qth_max, u8 Wlog, u8 Plog,
-				 u8 Scell_log, u8 *stab)
+static inline void red_set_vars(struct red_vars *v)
 {
 	/* Reset average queue length, the value is strictly bound
 	 * to the parameters below, reseting hurts a bit but leaving
 	 * it might result in an unreasonable qavg for a while. --TGR
 	 */
-	p->qavg		= 0;
+	v->qavg		= 0;
 
-	p->qcount	= -1;
+	v->qcount	= -1;
+}
+
+static inline void red_set_parms(struct red_parms *p,
+				 u32 qth_min, u32 qth_max, u8 Wlog, u8 Plog,
+				 u8 Scell_log, u8 *stab, u32 max_P)
+{
+	int delta = qth_max - qth_min;
+	u32 max_p_delta;
+
 	p->qth_min	= qth_min << Wlog;
 	p->qth_max	= qth_max << Wlog;
 	p->Wlog		= Wlog;
 	p->Plog		= Plog;
-	p->Rmask	= red_rmask(Plog);
+	if (delta < 0)
+		delta = 1;
+	p->qth_delta	= delta;
+	if (!max_P) {
+		max_P = red_maxp(Plog);
+		max_P *= delta; /* max_P = (qth_max - qth_min)/2^Plog */
+	}
+	p->max_P = max_P;
+	max_p_delta = max_P / delta;
+	max_p_delta = max(max_p_delta, 1U);
+	p->max_P_reciprocal  = reciprocal_value(max_p_delta);
+
+	/* RED Adaptative target :
+	 * [min_th + 0.4*(min_th - max_th),
+	 *  min_th + 0.6*(min_th - max_th)].
+	 */
+	delta /= 5;
+	p->target_min = qth_min + 2*delta;
+	p->target_max = qth_min + 3*delta;
+
 	p->Scell_log	= Scell_log;
 	p->Scell_max	= (255 << Scell_log);
 
 	memcpy(p->Stab, stab, sizeof(p->Stab));
 }
 
-static inline int red_is_idling(struct red_parms *p)
+static inline int red_is_idling(const struct red_vars *v)
 {
-	return p->qidlestart.tv64 != 0;
+	return v->qidlestart.tv64 != 0;
 }
 
-static inline void red_start_of_idle_period(struct red_parms *p)
+static inline void red_start_of_idle_period(struct red_vars *v)
 {
-	p->qidlestart = ktime_get();
+	v->qidlestart = ktime_get();
 }
 
-static inline void red_end_of_idle_period(struct red_parms *p)
+static inline void red_end_of_idle_period(struct red_vars *v)
 {
-	p->qidlestart.tv64 = 0;
+	v->qidlestart.tv64 = 0;
 }
 
-static inline void red_restart(struct red_parms *p)
+static inline void red_restart(struct red_vars *v)
 {
-	red_end_of_idle_period(p);
-	p->qavg = 0;
-	p->qcount = -1;
+	red_end_of_idle_period(v);
+	v->qavg = 0;
+	v->qcount = -1;
 }
 
-static inline unsigned long red_calc_qavg_from_idle_time(struct red_parms *p)
+static inline unsigned long red_calc_qavg_from_idle_time(const struct red_parms *p,
+							 const struct red_vars *v)
 {
-	s64 delta = ktime_us_delta(ktime_get(), p->qidlestart);
+	s64 delta = ktime_us_delta(ktime_get(), v->qidlestart);
 	long us_idle = min_t(s64, delta, p->Scell_max);
 	int  shift;
 
@@ -197,7 +254,7 @@
 	shift = p->Stab[(us_idle >> p->Scell_log) & RED_STAB_MASK];
 
 	if (shift)
-		return p->qavg >> shift;
+		return v->qavg >> shift;
 	else {
 		/* Approximate initial part of exponent with linear function:
 		 *
@@ -206,16 +263,17 @@
 		 * Seems, it is the best solution to
 		 * problem of too coarse exponent tabulation.
 		 */
-		us_idle = (p->qavg * (u64)us_idle) >> p->Scell_log;
+		us_idle = (v->qavg * (u64)us_idle) >> p->Scell_log;
 
-		if (us_idle < (p->qavg >> 1))
-			return p->qavg - us_idle;
+		if (us_idle < (v->qavg >> 1))
+			return v->qavg - us_idle;
 		else
-			return p->qavg >> 1;
+			return v->qavg >> 1;
 	}
 }
 
-static inline unsigned long red_calc_qavg_no_idle_time(struct red_parms *p,
+static inline unsigned long red_calc_qavg_no_idle_time(const struct red_parms *p,
+						       const struct red_vars *v,
 						       unsigned int backlog)
 {
 	/*
@@ -227,42 +285,46 @@
 	 *
 	 * --ANK (980924)
 	 */
-	return p->qavg + (backlog - (p->qavg >> p->Wlog));
+	return v->qavg + (backlog - (v->qavg >> p->Wlog));
 }
 
-static inline unsigned long red_calc_qavg(struct red_parms *p,
+static inline unsigned long red_calc_qavg(const struct red_parms *p,
+					  const struct red_vars *v,
 					  unsigned int backlog)
 {
-	if (!red_is_idling(p))
-		return red_calc_qavg_no_idle_time(p, backlog);
+	if (!red_is_idling(v))
+		return red_calc_qavg_no_idle_time(p, v, backlog);
 	else
-		return red_calc_qavg_from_idle_time(p);
+		return red_calc_qavg_from_idle_time(p, v);
 }
 
-static inline u32 red_random(struct red_parms *p)
+
+static inline u32 red_random(const struct red_parms *p)
 {
-	return net_random() & p->Rmask;
+	return reciprocal_divide(net_random(), p->max_P_reciprocal);
 }
 
-static inline int red_mark_probability(struct red_parms *p, unsigned long qavg)
+static inline int red_mark_probability(const struct red_parms *p,
+				       const struct red_vars *v,
+				       unsigned long qavg)
 {
 	/* The formula used below causes questions.
 
-	   OK. qR is random number in the interval 0..Rmask
+	   OK. qR is random number in the interval
+		(0..1/max_P)*(qth_max-qth_min)
 	   i.e. 0..(2^Plog). If we used floating point
 	   arithmetics, it would be: (2^Plog)*rnd_num,
 	   where rnd_num is less 1.
 
 	   Taking into account, that qavg have fixed
-	   point at Wlog, and Plog is related to max_P by
-	   max_P = (qth_max-qth_min)/2^Plog; two lines
+	   point at Wlog, two lines
 	   below have the following floating point equivalent:
 
 	   max_P*(qavg - qth_min)/(qth_max-qth_min) < rnd/qcount
 
 	   Any questions? --ANK (980924)
 	 */
-	return !(((qavg - p->qth_min) >> p->Wlog) * p->qcount < p->qR);
+	return !(((qavg - p->qth_min) >> p->Wlog) * v->qcount < v->qR);
 }
 
 enum {
@@ -271,7 +333,7 @@
 	RED_ABOVE_MAX_TRESH,
 };
 
-static inline int red_cmp_thresh(struct red_parms *p, unsigned long qavg)
+static inline int red_cmp_thresh(const struct red_parms *p, unsigned long qavg)
 {
 	if (qavg < p->qth_min)
 		return RED_BELOW_MIN_THRESH;
@@ -287,27 +349,29 @@
 	RED_HARD_MARK,
 };
 
-static inline int red_action(struct red_parms *p, unsigned long qavg)
+static inline int red_action(const struct red_parms *p,
+			     struct red_vars *v,
+			     unsigned long qavg)
 {
 	switch (red_cmp_thresh(p, qavg)) {
 		case RED_BELOW_MIN_THRESH:
-			p->qcount = -1;
+			v->qcount = -1;
 			return RED_DONT_MARK;
 
 		case RED_BETWEEN_TRESH:
-			if (++p->qcount) {
-				if (red_mark_probability(p, qavg)) {
-					p->qcount = 0;
-					p->qR = red_random(p);
+			if (++v->qcount) {
+				if (red_mark_probability(p, v, qavg)) {
+					v->qcount = 0;
+					v->qR = red_random(p);
 					return RED_PROB_MARK;
 				}
 			} else
-				p->qR = red_random(p);
+				v->qR = red_random(p);
 
 			return RED_DONT_MARK;
 
 		case RED_ABOVE_MAX_TRESH:
-			p->qcount = -1;
+			v->qcount = -1;
 			return RED_HARD_MARK;
 	}
 
@@ -315,4 +379,25 @@
 	return RED_DONT_MARK;
 }
 
+static inline void red_adaptative_algo(struct red_parms *p, struct red_vars *v)
+{
+	unsigned long qavg;
+	u32 max_p_delta;
+
+	qavg = v->qavg;
+	if (red_is_idling(v))
+		qavg = red_calc_qavg_from_idle_time(p, v);
+
+	/* p->qavg is fixed point number with point at Wlog */
+	qavg >>= p->Wlog;
+
+	if (qavg > p->target_max && p->max_P <= MAX_P_MAX)
+		p->max_P += MAX_P_ALPHA(p->max_P); /* maxp = maxp + alpha */
+	else if (qavg < p->target_min && p->max_P >= MAX_P_MIN)
+		p->max_P = (p->max_P/10)*9; /* maxp = maxp * Beta */
+
+	max_p_delta = DIV_ROUND_CLOSEST(p->max_P, p->qth_delta);
+	max_p_delta = max(max_p_delta, 1U);
+	p->max_P_reciprocal = reciprocal_value(max_p_delta);
+}
 #endif
diff --git a/include/net/regulatory.h b/include/net/regulatory.h
index eb7d3c2..a5f7993 100644
--- a/include/net/regulatory.h
+++ b/include/net/regulatory.h
@@ -48,6 +48,10 @@
  * 	99 - built by driver but a specific alpha2 cannot be determined
  * 	98 - result of an intersection between two regulatory domains
  *	97 - regulatory domain has not yet been configured
+ * @dfs_region: If CRDA responded with a regulatory domain that requires
+ *	DFS master operation on a known DFS region (NL80211_DFS_*),
+ *	dfs_region represents that region. Drivers can use this and the
+ *	@alpha2 to adjust their device's DFS parameters as required.
  * @intersect: indicates whether the wireless core should intersect
  * 	the requested regulatory domain with the presently set regulatory
  * 	domain.
@@ -67,6 +71,7 @@
 	int wiphy_idx;
 	enum nl80211_reg_initiator initiator;
 	char alpha2[2];
+	u8 dfs_region;
 	bool intersect;
 	bool processed;
 	enum environment_cap country_ie_env;
@@ -93,6 +98,7 @@
 struct ieee80211_regdomain {
 	u32 n_reg_rules;
 	char alpha2[2];
+	u8 dfs_region;
 	struct ieee80211_reg_rule reg_rules[];
 };
 
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 6a72a58..d368561 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -71,7 +71,7 @@
 #include <linux/jiffies.h>
 #include <linux/idr.h>
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 #include <net/ipv6.h>
 #include <net/ip6_route.h>
 #endif
@@ -383,7 +383,7 @@
 /* Size of Supported Address Parameter for 'x' address types. */
 #define SCTP_SAT_LEN(x) (sizeof(struct sctp_paramhdr) + (x) * sizeof(__u16))
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 
 void sctp_v6_pf_init(void);
 void sctp_v6_pf_exit(void);
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index e90e7a9..88949a9 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -235,12 +235,15 @@
 
 	/* Flag to indicate whether computing and verifying checksum
 	 * is disabled. */
-        int checksum_disable;
+        bool checksum_disable;
 
 	/* Threshold for rwnd update SACKS.  Receive buffer shifted this many
 	 * bits is an indicator of when to send and window update SACK.
 	 */
 	int rwnd_update_shift;
+
+	/* Threshold for autoclose timeout, in seconds. */
+	unsigned long max_autoclose;
 } sctp_globals;
 
 #define sctp_rto_initial		(sctp_globals.rto_initial)
@@ -281,6 +284,7 @@
 #define sctp_auth_enable		(sctp_globals.auth_enable)
 #define sctp_checksum_disable		(sctp_globals.checksum_disable)
 #define sctp_rwnd_upd_shift		(sctp_globals.rwnd_update_shift)
+#define sctp_max_autoclose		(sctp_globals.max_autoclose)
 
 /* SCTP Socket type: UDP or TCP style. */
 typedef enum {
@@ -365,7 +369,7 @@
        return (struct sock *)sp;
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 struct sctp6_sock {
        struct sctp_sock  sctp;
        struct ipv6_pinfo inet6;
@@ -1085,6 +1089,7 @@
 unsigned long sctp_transport_timeout(struct sctp_transport *);
 void sctp_transport_reset(struct sctp_transport *);
 void sctp_transport_update_pmtu(struct sctp_transport *, u32);
+void sctp_transport_immediate_rtx(struct sctp_transport *);
 
 
 /* This is the structure we use to queue packets as they come into
diff --git a/include/net/snmp.h b/include/net/snmp.h
index 8f0f9ac..2f65e16 100644
--- a/include/net/snmp.h
+++ b/include/net/snmp.h
@@ -67,7 +67,7 @@
 
 #define ICMPMSG_MIB_MAX	__ICMPMSG_MIB_MAX
 struct icmpmsg_mib {
-	unsigned long	mibs[ICMPMSG_MIB_MAX];
+	atomic_long_t	mibs[ICMPMSG_MIB_MAX];
 };
 
 /* ICMP6 (IPv6-ICMP) */
@@ -84,7 +84,7 @@
 #define ICMP6MSG_MIB_MAX  __ICMP6MSG_MIB_MAX
 /* per network ns counters */
 struct icmpv6msg_mib {
-	unsigned long	mibs[ICMP6MSG_MIB_MAX];
+	atomic_long_t	mibs[ICMP6MSG_MIB_MAX];
 };
 /* per device counters, (shared on all cpus) */
 struct icmpv6msg_mib_device {
diff --git a/include/net/sock.h b/include/net/sock.h
index abb6e0f..bb972d2 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -53,6 +53,8 @@
 #include <linux/security.h>
 #include <linux/slab.h>
 #include <linux/uaccess.h>
+#include <linux/memcontrol.h>
+#include <linux/res_counter.h>
 
 #include <linux/filter.h>
 #include <linux/rculist_nulls.h>
@@ -62,6 +64,22 @@
 #include <net/dst.h>
 #include <net/checksum.h>
 
+struct cgroup;
+struct cgroup_subsys;
+#ifdef CONFIG_NET
+int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss);
+void mem_cgroup_sockets_destroy(struct cgroup *cgrp, struct cgroup_subsys *ss);
+#else
+static inline
+int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss)
+{
+	return 0;
+}
+static inline
+void mem_cgroup_sockets_destroy(struct cgroup *cgrp, struct cgroup_subsys *ss)
+{
+}
+#endif
 /*
  * This structure really needs to be cleaned up.
  * Most of it is for TCP, and not used by any of
@@ -167,6 +185,7 @@
 	/* public: */
 };
 
+struct cg_proto;
 /**
   *	struct sock - network layer representation of sockets
   *	@__sk_common: shared layout with inet_timewait_sock
@@ -227,6 +246,7 @@
   *	@sk_security: used by security modules
   *	@sk_mark: generic packet mark
   *	@sk_classid: this socket's cgroup classid
+  *	@sk_cgrp: this socket's cgroup-specific proto data
   *	@sk_write_pending: a write to stream socket waits to start
   *	@sk_state_change: callback to indicate change in the state of the sock
   *	@sk_data_ready: callback to indicate there is data to be processed
@@ -306,8 +326,8 @@
 	kmemcheck_bitfield_end(flags);
 	int			sk_wmem_queued;
 	gfp_t			sk_allocation;
-	int			sk_route_caps;
-	int			sk_route_nocaps;
+	netdev_features_t	sk_route_caps;
+	netdev_features_t	sk_route_nocaps;
 	int			sk_gso_type;
 	unsigned int		sk_gso_max_size;
 	int			sk_rcvlowat;
@@ -320,6 +340,9 @@
 	unsigned short		sk_ack_backlog;
 	unsigned short		sk_max_ack_backlog;
 	__u32			sk_priority;
+#ifdef CONFIG_CGROUPS
+	__u32			sk_cgrp_prioidx;
+#endif
 	struct pid		*sk_peer_pid;
 	const struct cred	*sk_peer_cred;
 	long			sk_rcvtimeo;
@@ -338,6 +361,7 @@
 #endif
 	__u32			sk_mark;
 	u32			sk_classid;
+	struct cg_proto		*sk_cgrp;
 	void			(*sk_state_change)(struct sock *sk);
 	void			(*sk_data_ready)(struct sock *sk, int bytes);
 	void			(*sk_write_space)(struct sock *sk);
@@ -563,6 +587,7 @@
 	SOCK_FASYNC, /* fasync() active */
 	SOCK_RXQ_OVFL,
 	SOCK_ZEROCOPY, /* buffers from userspace */
+	SOCK_WIFI_STATUS, /* push wifi status to userspace */
 };
 
 static inline void sock_copy_flags(struct sock *nsk, struct sock *osk)
@@ -637,12 +662,14 @@
 
 /*
  * Take into account size of receive queue and backlog queue
+ * Do not take into account this skb truesize,
+ * to allow even a single big packet to come.
  */
 static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb)
 {
 	unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);
 
-	return qsize + skb->truesize > sk->sk_rcvbuf;
+	return qsize > sk->sk_rcvbuf;
 }
 
 /* The per-socket spinlock must be held here. */
@@ -833,6 +860,37 @@
 #ifdef SOCK_REFCNT_DEBUG
 	atomic_t		socks;
 #endif
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+	/*
+	 * cgroup specific init/deinit functions. Called once for all
+	 * protocols that implement it, from cgroups populate function.
+	 * This function has to setup any files the protocol want to
+	 * appear in the kmem cgroup filesystem.
+	 */
+	int			(*init_cgroup)(struct cgroup *cgrp,
+					       struct cgroup_subsys *ss);
+	void			(*destroy_cgroup)(struct cgroup *cgrp,
+						  struct cgroup_subsys *ss);
+	struct cg_proto		*(*proto_cgroup)(struct mem_cgroup *memcg);
+#endif
+};
+
+struct cg_proto {
+	void			(*enter_memory_pressure)(struct sock *sk);
+	struct res_counter	*memory_allocated;	/* Current allocated memory. */
+	struct percpu_counter	*sockets_allocated;	/* Current number of sockets. */
+	int			*memory_pressure;
+	long			*sysctl_mem;
+	/*
+	 * memcg field is used to find which memcg we belong directly
+	 * Each memcg struct can hold more than one cg_proto, so container_of
+	 * won't really cut.
+	 *
+	 * The elegant solution would be having an inverse function to
+	 * proto_cgroup in struct proto, but that means polluting the structure
+	 * for everybody, instead of just for memcg users.
+	 */
+	struct mem_cgroup	*memcg;
 };
 
 extern int proto_register(struct proto *prot, int alloc_slab);
@@ -851,7 +909,7 @@
 	       sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks));
 }
 
-static inline void sk_refcnt_debug_release(const struct sock *sk)
+inline void sk_refcnt_debug_release(const struct sock *sk)
 {
 	if (atomic_read(&sk->sk_refcnt) != 1)
 		printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n",
@@ -863,6 +921,208 @@
 #define sk_refcnt_debug_release(sk) do { } while (0)
 #endif /* SOCK_REFCNT_DEBUG */
 
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+extern struct jump_label_key memcg_socket_limit_enabled;
+static inline struct cg_proto *parent_cg_proto(struct proto *proto,
+					       struct cg_proto *cg_proto)
+{
+	return proto->proto_cgroup(parent_mem_cgroup(cg_proto->memcg));
+}
+#define mem_cgroup_sockets_enabled static_branch(&memcg_socket_limit_enabled)
+#else
+#define mem_cgroup_sockets_enabled 0
+static inline struct cg_proto *parent_cg_proto(struct proto *proto,
+					       struct cg_proto *cg_proto)
+{
+	return NULL;
+}
+#endif
+
+
+static inline bool sk_has_memory_pressure(const struct sock *sk)
+{
+	return sk->sk_prot->memory_pressure != NULL;
+}
+
+static inline bool sk_under_memory_pressure(const struct sock *sk)
+{
+	if (!sk->sk_prot->memory_pressure)
+		return false;
+
+	if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
+		return !!*sk->sk_cgrp->memory_pressure;
+
+	return !!*sk->sk_prot->memory_pressure;
+}
+
+static inline void sk_leave_memory_pressure(struct sock *sk)
+{
+	int *memory_pressure = sk->sk_prot->memory_pressure;
+
+	if (!memory_pressure)
+		return;
+
+	if (*memory_pressure)
+		*memory_pressure = 0;
+
+	if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
+		struct cg_proto *cg_proto = sk->sk_cgrp;
+		struct proto *prot = sk->sk_prot;
+
+		for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
+			if (*cg_proto->memory_pressure)
+				*cg_proto->memory_pressure = 0;
+	}
+
+}
+
+static inline void sk_enter_memory_pressure(struct sock *sk)
+{
+	if (!sk->sk_prot->enter_memory_pressure)
+		return;
+
+	if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
+		struct cg_proto *cg_proto = sk->sk_cgrp;
+		struct proto *prot = sk->sk_prot;
+
+		for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
+			cg_proto->enter_memory_pressure(sk);
+	}
+
+	sk->sk_prot->enter_memory_pressure(sk);
+}
+
+static inline long sk_prot_mem_limits(const struct sock *sk, int index)
+{
+	long *prot = sk->sk_prot->sysctl_mem;
+	if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
+		prot = sk->sk_cgrp->sysctl_mem;
+	return prot[index];
+}
+
+static inline void memcg_memory_allocated_add(struct cg_proto *prot,
+					      unsigned long amt,
+					      int *parent_status)
+{
+	struct res_counter *fail;
+	int ret;
+
+	ret = res_counter_charge(prot->memory_allocated,
+				 amt << PAGE_SHIFT, &fail);
+
+	if (ret < 0)
+		*parent_status = OVER_LIMIT;
+}
+
+static inline void memcg_memory_allocated_sub(struct cg_proto *prot,
+					      unsigned long amt)
+{
+	res_counter_uncharge(prot->memory_allocated, amt << PAGE_SHIFT);
+}
+
+static inline u64 memcg_memory_allocated_read(struct cg_proto *prot)
+{
+	u64 ret;
+	ret = res_counter_read_u64(prot->memory_allocated, RES_USAGE);
+	return ret >> PAGE_SHIFT;
+}
+
+static inline long
+sk_memory_allocated(const struct sock *sk)
+{
+	struct proto *prot = sk->sk_prot;
+	if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
+		return memcg_memory_allocated_read(sk->sk_cgrp);
+
+	return atomic_long_read(prot->memory_allocated);
+}
+
+static inline long
+sk_memory_allocated_add(struct sock *sk, int amt, int *parent_status)
+{
+	struct proto *prot = sk->sk_prot;
+
+	if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
+		memcg_memory_allocated_add(sk->sk_cgrp, amt, parent_status);
+		/* update the root cgroup regardless */
+		atomic_long_add_return(amt, prot->memory_allocated);
+		return memcg_memory_allocated_read(sk->sk_cgrp);
+	}
+
+	return atomic_long_add_return(amt, prot->memory_allocated);
+}
+
+static inline void
+sk_memory_allocated_sub(struct sock *sk, int amt, int parent_status)
+{
+	struct proto *prot = sk->sk_prot;
+
+	if (mem_cgroup_sockets_enabled && sk->sk_cgrp &&
+	    parent_status != OVER_LIMIT) /* Otherwise was uncharged already */
+		memcg_memory_allocated_sub(sk->sk_cgrp, amt);
+
+	atomic_long_sub(amt, prot->memory_allocated);
+}
+
+static inline void sk_sockets_allocated_dec(struct sock *sk)
+{
+	struct proto *prot = sk->sk_prot;
+
+	if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
+		struct cg_proto *cg_proto = sk->sk_cgrp;
+
+		for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
+			percpu_counter_dec(cg_proto->sockets_allocated);
+	}
+
+	percpu_counter_dec(prot->sockets_allocated);
+}
+
+static inline void sk_sockets_allocated_inc(struct sock *sk)
+{
+	struct proto *prot = sk->sk_prot;
+
+	if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
+		struct cg_proto *cg_proto = sk->sk_cgrp;
+
+		for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
+			percpu_counter_inc(cg_proto->sockets_allocated);
+	}
+
+	percpu_counter_inc(prot->sockets_allocated);
+}
+
+static inline int
+sk_sockets_allocated_read_positive(struct sock *sk)
+{
+	struct proto *prot = sk->sk_prot;
+
+	if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
+		return percpu_counter_sum_positive(sk->sk_cgrp->sockets_allocated);
+
+	return percpu_counter_sum_positive(prot->sockets_allocated);
+}
+
+static inline int
+proto_sockets_allocated_sum_positive(struct proto *prot)
+{
+	return percpu_counter_sum_positive(prot->sockets_allocated);
+}
+
+static inline long
+proto_memory_allocated(struct proto *prot)
+{
+	return atomic_long_read(prot->memory_allocated);
+}
+
+static inline bool
+proto_memory_pressure(struct proto *prot)
+{
+	if (!prot->memory_pressure)
+		return false;
+	return !!*prot->memory_pressure;
+}
+
 
 #ifdef CONFIG_PROC_FS
 /* Called with local bh disabled */
@@ -1089,8 +1349,8 @@
 					  struct proto *prot);
 extern void			sk_free(struct sock *sk);
 extern void			sk_release_kernel(struct sock *sk);
-extern struct sock		*sk_clone(const struct sock *sk,
-					  const gfp_t priority);
+extern struct sock		*sk_clone_lock(const struct sock *sk,
+					       const gfp_t priority);
 
 extern struct sk_buff		*sock_wmalloc(struct sock *sk,
 					      unsigned long size, int force,
@@ -1393,7 +1653,7 @@
 
 extern void sk_setup_caps(struct sock *sk, struct dst_entry *dst);
 
-static inline void sk_nocaps_add(struct sock *sk, int flags)
+static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
 {
 	sk->sk_route_nocaps |= flags;
 	sk->sk_route_caps &= ~flags;
@@ -1670,7 +1930,7 @@
 
 	page = alloc_pages(sk->sk_allocation, 0);
 	if (!page) {
-		sk->sk_prot->enter_memory_pressure(sk);
+		sk_enter_memory_pressure(sk);
 		sk_stream_moderate_sndbuf(sk);
 	}
 	return page;
@@ -1714,6 +1974,8 @@
 
 extern void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
 	struct sk_buff *skb);
+extern void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
+	struct sk_buff *skb);
 
 static __inline__ void
 sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
@@ -1741,6 +2003,9 @@
 		__sock_recv_timestamp(msg, sk, skb);
 	else
 		sk->sk_stamp = kt;
+
+	if (sock_flag(sk, SOCK_WIFI_STATUS) && skb->wifi_acked_valid)
+		__sock_recv_wifi_status(msg, sk, skb);
 }
 
 extern void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
diff --git a/include/net/tcp.h b/include/net/tcp.h
index bb18c4d..0118ea9 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -44,6 +44,7 @@
 #include <net/dst.h>
 
 #include <linux/seq_file.h>
+#include <linux/memcontrol.h>
 
 extern struct inet_hashinfo tcp_hashinfo;
 
@@ -229,7 +230,6 @@
 extern int sysctl_tcp_reordering;
 extern int sysctl_tcp_ecn;
 extern int sysctl_tcp_dsack;
-extern long sysctl_tcp_mem[3];
 extern int sysctl_tcp_wmem[3];
 extern int sysctl_tcp_rmem[3];
 extern int sysctl_tcp_app_win;
@@ -285,7 +285,7 @@
 	}
 
 	if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
-	    atomic_long_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])
+	    sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2))
 		return true;
 	return false;
 }
@@ -628,7 +628,7 @@
 struct tcp_skb_cb {
 	union {
 		struct inet_skb_parm	h4;
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 		struct inet6_skb_parm	h6;
 #endif
 	} header;	/* For incoming frames		*/
@@ -773,12 +773,12 @@
 
 static inline int tcp_is_fack(const struct tcp_sock *tp)
 {
-	return tp->rx_opt.sack_ok & 2;
+	return tp->rx_opt.sack_ok & TCP_FACK_ENABLED;
 }
 
 static inline void tcp_enable_fack(struct tcp_sock *tp)
 {
-	tp->rx_opt.sack_ok |= 2;
+	tp->rx_opt.sack_ok |= TCP_FACK_ENABLED;
 }
 
 static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
@@ -834,6 +834,14 @@
 extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh);
 extern __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
 
+/* The maximum number of MSS of available cwnd for which TSO defers
+ * sending if not using sysctl_tcp_tso_win_divisor.
+ */
+static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp)
+{
+	return 3;
+}
+
 /* Slow start with delack produces 3 packets of burst, so that
  * it is safe "de facto".  This will be the default - same as
  * the default reordering threshold - but if reordering increases,
@@ -1144,7 +1152,7 @@
 /* - sock block */
 struct tcp_md5sig_info {
 	struct tcp4_md5sig_key	*keys4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	struct tcp6_md5sig_key	*keys6;
 	u32			entries6;
 	u32			alloced6;
@@ -1171,7 +1179,7 @@
 
 union tcp_md5sum_block {
 	struct tcp4_pseudohdr ip4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	struct tcp6_pseudohdr ip6;
 #endif
 };
@@ -1430,7 +1438,8 @@
 extern void tcp_v4_destroy_sock(struct sock *sk);
 
 extern int tcp_v4_gso_send_check(struct sk_buff *skb);
-extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, u32 features);
+extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
+				       netdev_features_t features);
 extern struct sk_buff **tcp_gro_receive(struct sk_buff **head,
 					struct sk_buff *skb);
 extern struct sk_buff **tcp4_gro_receive(struct sk_buff **head,
diff --git a/include/net/tcp_memcontrol.h b/include/net/tcp_memcontrol.h
new file mode 100644
index 0000000..3512082
--- /dev/null
+++ b/include/net/tcp_memcontrol.h
@@ -0,0 +1,19 @@
+#ifndef _TCP_MEMCG_H
+#define _TCP_MEMCG_H
+
+struct tcp_memcontrol {
+	struct cg_proto cg_proto;
+	/* per-cgroup tcp memory pressure knobs */
+	struct res_counter tcp_memory_allocated;
+	struct percpu_counter tcp_sockets_allocated;
+	/* those two are read-mostly, leave them at the end */
+	long tcp_prot_mem[3];
+	int tcp_memory_pressure;
+};
+
+struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg);
+int tcp_init_cgroup(struct cgroup *cgrp, struct cgroup_subsys *ss);
+void tcp_destroy_cgroup(struct cgroup *cgrp, struct cgroup_subsys *ss);
+unsigned long long tcp_max_memory(const struct mem_cgroup *memcg);
+void tcp_prot_mem(struct mem_cgroup *memcg, long val, int idx);
+#endif /* _TCP_MEMCG_H */
diff --git a/include/net/udp.h b/include/net/udp.h
index 3b285f4..e39592f 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -41,7 +41,7 @@
 struct udp_skb_cb {
 	union {
 		struct inet_skb_parm	h4;
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 		struct inet6_skb_parm	h6;
 #endif
 	} header;
@@ -194,9 +194,15 @@
 extern struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
 				    __be32 daddr, __be16 dport,
 				    int dif);
+extern struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
+				    __be32 daddr, __be16 dport,
+				    int dif, struct udp_table *tbl);
 extern struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport,
 				    const struct in6_addr *daddr, __be16 dport,
 				    int dif);
+extern struct sock *__udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport,
+				    const struct in6_addr *daddr, __be16 dport,
+				    int dif, struct udp_table *tbl);
 
 /*
  * 	SNMP statistics for UDP and UDP-Lite
@@ -217,7 +223,7 @@
 	else	    SNMP_INC_STATS_USER((net)->mib.udp_stats_in6, field);      \
 } while(0)
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 #define UDPX_INC_STATS_BH(sk, field) \
 	do { \
 		if ((sk)->sk_family == AF_INET) \
@@ -258,5 +264,6 @@
 extern void udp_init(void);
 
 extern int udp4_ufo_send_check(struct sk_buff *skb);
-extern struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, u32 features);
+extern struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
+	netdev_features_t features);
 #endif	/* _UDP_H */
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index b203e14..89174e29 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -827,6 +827,14 @@
 	return true;
 }
 
+static inline bool addr4_match(__be32 a1, __be32 a2, u8 prefixlen)
+{
+	/* C99 6.5.7 (3): u32 << 32 is undefined behaviour */
+	if (prefixlen == 0)
+		return true;
+	return !((a1 ^ a2) & htonl(0xFFFFFFFFu << (32 - prefixlen)));
+}
+
 static __inline__
 __be16 xfrm_flowi_sport(const struct flowi *fl, const union flowi_uli *uli)
 {
@@ -1209,8 +1217,8 @@
 		memcpy(&daddr->a4, &fl->u.ip4.daddr, sizeof(daddr->a4));
 		break;
 	case AF_INET6:
-		ipv6_addr_copy((struct in6_addr *)&saddr->a6, &fl->u.ip6.saddr);
-		ipv6_addr_copy((struct in6_addr *)&daddr->a6, &fl->u.ip6.daddr);
+		*(struct in6_addr *)saddr->a6 = fl->u.ip6.saddr;
+		*(struct in6_addr *)daddr->a6 = fl->u.ip6.daddr;
 		break;
 	}
 }
diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
index 639a449..9996539 100644
--- a/include/rdma/ib_addr.h
+++ b/include/rdma/ib_addr.h
@@ -281,7 +281,7 @@
 static inline struct net_device *rdma_vlan_dev_real_dev(const struct net_device *dev)
 {
 	return dev->priv_flags & IFF_802_1Q_VLAN ?
-		vlan_dev_real_dev(dev) : 0;
+		vlan_dev_real_dev(dev) : NULL;
 }
 
 #endif /* IB_ADDR_H */
diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h
index c8f94e8..83f77ac 100644
--- a/include/rdma/ib_cm.h
+++ b/include/rdma/ib_cm.h
@@ -38,6 +38,9 @@
 #include <rdma/ib_mad.h>
 #include <rdma/ib_sa.h>
 
+/* ib_cm and ib_user_cm modules share /sys/class/infiniband_cm */
+extern struct class cm_class;
+
 enum ib_cm_state {
 	IB_CM_IDLE,
 	IB_CM_LISTEN,
diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h
index d1e95c6..5a35a2a 100644
--- a/include/scsi/libfcoe.h
+++ b/include/scsi/libfcoe.h
@@ -147,6 +147,7 @@
 	u8 map_dest;
 	u8 spma;
 	u8 probe_tries;
+	u8 priority;
 	u8 dest_addr[ETH_ALEN];
 	u8 ctl_src_addr[ETH_ALEN];
 
@@ -301,6 +302,7 @@
  * @lport:		       The associated local port
  * @fcoe_pending_queue:	       The pending Rx queue of skbs
  * @fcoe_pending_queue_active: Indicates if the pending queue is active
+ * @priority:		       Packet priority (DCB)
  * @max_queue_depth:	       Max queue depth of pending queue
  * @min_queue_depth:	       Min queue depth of pending queue
  * @timer:		       The queue timer
@@ -316,6 +318,7 @@
 	struct fc_lport	      *lport;
 	struct sk_buff_head   fcoe_pending_queue;
 	u8		      fcoe_pending_queue_active;
+	u8		      priority;
 	u32		      max_queue_depth;
 	u32		      min_queue_depth;
 	struct timer_list     timer;
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index 5994bcc..87f34c3 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -142,7 +142,7 @@
 	int (*get_iface_param) (struct iscsi_iface *iface,
 				enum iscsi_param_type param_type,
 				int param, char *buf);
-	mode_t (*attr_is_visible)(int param_type, int param);
+	umode_t (*attr_is_visible)(int param_type, int param);
 	int (*bsg_request)(struct bsg_job *job);
 };
 
diff --git a/include/sound/info.h b/include/sound/info.h
index 5492cc4..9ca1a49 100644
--- a/include/sound/info.h
+++ b/include/sound/info.h
@@ -72,7 +72,7 @@
 
 struct snd_info_entry {
 	const char *name;
-	mode_t mode;
+	umode_t mode;
 	long size;
 	unsigned short content;
 	union {
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index 669fbd6..d2d88be 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -241,24 +241,73 @@
 
 /*
  * Tracepoint for dyntick-idle entry/exit events.  These take a string
- * as argument: "Start" for entering dyntick-idle mode and "End" for
- * leaving it.
+ * as argument: "Start" for entering dyntick-idle mode, "End" for
+ * leaving it, "--=" for events moving towards idle, and "++=" for events
+ * moving away from idle.  "Error on entry: not idle task" and "Error on
+ * exit: not idle task" indicate that a non-idle task is erroneously
+ * toying with the idle loop.
+ *
+ * These events also take a pair of numbers, which indicate the nesting
+ * depth before and after the event of interest.  Note that task-related
+ * events use the upper bits of each number, while interrupt-related
+ * events use the lower bits.
  */
 TRACE_EVENT(rcu_dyntick,
 
-	TP_PROTO(char *polarity),
+	TP_PROTO(char *polarity, long long oldnesting, long long newnesting),
 
-	TP_ARGS(polarity),
+	TP_ARGS(polarity, oldnesting, newnesting),
 
 	TP_STRUCT__entry(
 		__field(char *, polarity)
+		__field(long long, oldnesting)
+		__field(long long, newnesting)
 	),
 
 	TP_fast_assign(
 		__entry->polarity = polarity;
+		__entry->oldnesting = oldnesting;
+		__entry->newnesting = newnesting;
 	),
 
-	TP_printk("%s", __entry->polarity)
+	TP_printk("%s %llx %llx", __entry->polarity,
+		  __entry->oldnesting, __entry->newnesting)
+);
+
+/*
+ * Tracepoint for RCU preparation for idle, the goal being to get RCU
+ * processing done so that the current CPU can shut off its scheduling
+ * clock and enter dyntick-idle mode.  One way to accomplish this is
+ * to drain all RCU callbacks from this CPU, and the other is to have
+ * done everything RCU requires for the current grace period.  In this
+ * latter case, the CPU will be awakened at the end of the current grace
+ * period in order to process the remainder of its callbacks.
+ *
+ * These tracepoints take a string as argument:
+ *
+ *	"No callbacks": Nothing to do, no callbacks on this CPU.
+ *	"In holdoff": Nothing to do, holding off after unsuccessful attempt.
+ *	"Begin holdoff": Attempt failed, don't retry until next jiffy.
+ *	"Dyntick with callbacks": Entering dyntick-idle despite callbacks.
+ *	"More callbacks": Still more callbacks, try again to clear them out.
+ *	"Callbacks drained": All callbacks processed, off to dyntick idle!
+ *	"Timer": Timer fired to cause CPU to continue processing callbacks.
+ */
+TRACE_EVENT(rcu_prep_idle,
+
+	TP_PROTO(char *reason),
+
+	TP_ARGS(reason),
+
+	TP_STRUCT__entry(
+		__field(char *, reason)
+	),
+
+	TP_fast_assign(
+		__entry->reason = reason;
+	),
+
+	TP_printk("%s", __entry->reason)
 );
 
 /*
@@ -412,27 +461,71 @@
 
 /*
  * Tracepoint for exiting rcu_do_batch after RCU callbacks have been
- * invoked.  The first argument is the name of the RCU flavor and
- * the second argument is number of callbacks actually invoked.
+ * invoked.  The first argument is the name of the RCU flavor,
+ * the second argument is number of callbacks actually invoked,
+ * the third argument (cb) is whether or not any of the callbacks that
+ * were ready to invoke at the beginning of this batch are still
+ * queued, the fourth argument (nr) is the return value of need_resched(),
+ * the fifth argument (iit) is 1 if the current task is the idle task,
+ * and the sixth argument (risk) is the return value from
+ * rcu_is_callbacks_kthread().
  */
 TRACE_EVENT(rcu_batch_end,
 
-	TP_PROTO(char *rcuname, int callbacks_invoked),
+	TP_PROTO(char *rcuname, int callbacks_invoked,
+		 bool cb, bool nr, bool iit, bool risk),
 
-	TP_ARGS(rcuname, callbacks_invoked),
+	TP_ARGS(rcuname, callbacks_invoked, cb, nr, iit, risk),
 
 	TP_STRUCT__entry(
 		__field(char *, rcuname)
 		__field(int, callbacks_invoked)
+		__field(bool, cb)
+		__field(bool, nr)
+		__field(bool, iit)
+		__field(bool, risk)
 	),
 
 	TP_fast_assign(
 		__entry->rcuname = rcuname;
 		__entry->callbacks_invoked = callbacks_invoked;
+		__entry->cb = cb;
+		__entry->nr = nr;
+		__entry->iit = iit;
+		__entry->risk = risk;
 	),
 
-	TP_printk("%s CBs-invoked=%d",
-		  __entry->rcuname, __entry->callbacks_invoked)
+	TP_printk("%s CBs-invoked=%d idle=%c%c%c%c",
+		  __entry->rcuname, __entry->callbacks_invoked,
+		  __entry->cb ? 'C' : '.',
+		  __entry->nr ? 'S' : '.',
+		  __entry->iit ? 'I' : '.',
+		  __entry->risk ? 'R' : '.')
+);
+
+/*
+ * Tracepoint for rcutorture readers.  The first argument is the name
+ * of the RCU flavor from rcutorture's viewpoint and the second argument
+ * is the callback address.
+ */
+TRACE_EVENT(rcu_torture_read,
+
+	TP_PROTO(char *rcutorturename, struct rcu_head *rhp),
+
+	TP_ARGS(rcutorturename, rhp),
+
+	TP_STRUCT__entry(
+		__field(char *, rcutorturename)
+		__field(struct rcu_head *, rhp)
+	),
+
+	TP_fast_assign(
+		__entry->rcutorturename = rcutorturename;
+		__entry->rhp = rhp;
+	),
+
+	TP_printk("%s torture read %p",
+		  __entry->rcutorturename, __entry->rhp)
 );
 
 #else /* #ifdef CONFIG_RCU_TRACE */
@@ -443,13 +536,16 @@
 #define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0)
 #define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, grplo, grphi, gp_tasks) do { } while (0)
 #define trace_rcu_fqs(rcuname, gpnum, cpu, qsevent) do { } while (0)
-#define trace_rcu_dyntick(polarity) do { } while (0)
+#define trace_rcu_dyntick(polarity, oldnesting, newnesting) do { } while (0)
+#define trace_rcu_prep_idle(reason) do { } while (0)
 #define trace_rcu_callback(rcuname, rhp, qlen) do { } while (0)
 #define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen) do { } while (0)
 #define trace_rcu_batch_start(rcuname, qlen, blimit) do { } while (0)
 #define trace_rcu_invoke_callback(rcuname, rhp) do { } while (0)
 #define trace_rcu_invoke_kfree_callback(rcuname, rhp, offset) do { } while (0)
-#define trace_rcu_batch_end(rcuname, callbacks_invoked) do { } while (0)
+#define trace_rcu_batch_end(rcuname, callbacks_invoked, cb, nr, iit, risk) \
+	do { } while (0)
+#define trace_rcu_torture_read(rcutorturename, rhp) do { } while (0)
 
 #endif /* #else #ifdef CONFIG_RCU_TRACE */
 
diff --git a/include/trace/events/regmap.h b/include/trace/events/regmap.h
index 1e3193b..12fbf43 100644
--- a/include/trace/events/regmap.h
+++ b/include/trace/events/regmap.h
@@ -55,6 +55,15 @@
 
 );
 
+DEFINE_EVENT(regmap_reg, regmap_reg_read_cache,
+
+	TP_PROTO(struct device *dev, unsigned int reg,
+		 unsigned int val),
+
+	TP_ARGS(dev, reg, val)
+
+);
+
 DECLARE_EVENT_CLASS(regmap_block,
 
 	TP_PROTO(struct device *dev, unsigned int reg, int count),
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 959ff18..6ba596b 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -331,6 +331,13 @@
 	     TP_ARGS(tsk, delay));
 
 /*
+ * Tracepoint for accounting blocked time (time the task is in uninterruptible).
+ */
+DEFINE_EVENT(sched_stat_template, sched_stat_blocked,
+	     TP_PROTO(struct task_struct *tsk, u64 delay),
+	     TP_ARGS(tsk, delay));
+
+/*
  * Tracepoint for accounting runtime (time the task is executing
  * on a CPU).
  */
@@ -363,6 +370,56 @@
 			(unsigned long long)__entry->vruntime)
 );
 
+#ifdef CREATE_TRACE_POINTS
+static inline u64 trace_get_sleeptime(struct task_struct *tsk)
+{
+#ifdef CONFIG_SCHEDSTATS
+	u64 block, sleep;
+
+	block = tsk->se.statistics.block_start;
+	sleep = tsk->se.statistics.sleep_start;
+	tsk->se.statistics.block_start = 0;
+	tsk->se.statistics.sleep_start = 0;
+
+	return block ? block : sleep ? sleep : 0;
+#else
+	return 0;
+#endif
+}
+#endif
+
+/*
+ * Tracepoint for accounting sleeptime (time the task is sleeping
+ * or waiting for I/O).
+ */
+TRACE_EVENT(sched_stat_sleeptime,
+
+	TP_PROTO(struct task_struct *tsk, u64 now),
+
+	TP_ARGS(tsk, now),
+
+	TP_STRUCT__entry(
+		__array( char,	comm,	TASK_COMM_LEN	)
+		__field( pid_t,	pid			)
+		__field( u64,	sleeptime		)
+	),
+
+	TP_fast_assign(
+		memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+		__entry->pid		= tsk->pid;
+		__entry->sleeptime = trace_get_sleeptime(tsk);
+		__entry->sleeptime = __entry->sleeptime ?
+				now - __entry->sleeptime : 0;
+	)
+	TP_perf_assign(
+		__perf_count(__entry->sleeptime);
+	),
+
+	TP_printk("comm=%s pid=%d sleeptime=%Lu [ns]",
+			__entry->comm, __entry->pid,
+			(unsigned long long)__entry->sleeptime)
+);
+
 /*
  * Tracepoint for showing priority inheritance modifying a tasks
  * priority.
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index b99caa8..99d1d0d 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -21,6 +21,16 @@
 		{I_REFERENCED,		"I_REFERENCED"}		\
 	)
 
+#define WB_WORK_REASON							\
+		{WB_REASON_BACKGROUND,		"background"},		\
+		{WB_REASON_TRY_TO_FREE_PAGES,	"try_to_free_pages"},	\
+		{WB_REASON_SYNC,		"sync"},		\
+		{WB_REASON_PERIODIC,		"periodic"},		\
+		{WB_REASON_LAPTOP_TIMER,	"laptop_timer"},	\
+		{WB_REASON_FREE_MORE_MEM,	"free_more_memory"},	\
+		{WB_REASON_FS_FREE_SPACE,	"fs_free_space"},	\
+		{WB_REASON_FORKER_THREAD,	"forker_thread"}
+
 struct wb_writeback_work;
 
 DECLARE_EVENT_CLASS(writeback_work_class,
@@ -55,7 +65,7 @@
 		  __entry->for_kupdate,
 		  __entry->range_cyclic,
 		  __entry->for_background,
-		  wb_reason_name[__entry->reason]
+		  __print_symbolic(__entry->reason, WB_WORK_REASON)
 	)
 );
 #define DEFINE_WRITEBACK_WORK_EVENT(name) \
@@ -184,7 +194,8 @@
 		__entry->older,	/* older_than_this in jiffies */
 		__entry->age,	/* older_than_this in relative milliseconds */
 		__entry->moved,
-		wb_reason_name[__entry->reason])
+		__print_symbolic(__entry->reason, WB_WORK_REASON)
+	)
 );
 
 TRACE_EVENT(global_dirty_state,
diff --git a/include/xen/balloon.h b/include/xen/balloon.h
index d29c153..cc2e1a7 100644
--- a/include/xen/balloon.h
+++ b/include/xen/balloon.h
@@ -29,11 +29,11 @@
 		bool highmem);
 void free_xenballooned_pages(int nr_pages, struct page **pages);
 
-struct sys_device;
+struct device;
 #ifdef CONFIG_XEN_SELFBALLOONING
-extern int register_xen_selfballooning(struct sys_device *sysdev);
+extern int register_xen_selfballooning(struct device *dev);
 #else
-static inline int register_xen_selfballooning(struct sys_device *sysdev)
+static inline int register_xen_selfballooning(struct device *dev)
 {
 	return -ENOSYS;
 }
diff --git a/include/xen/interface/io/xs_wire.h b/include/xen/interface/io/xs_wire.h
index f0b6890..f6f07aa 100644
--- a/include/xen/interface/io/xs_wire.h
+++ b/include/xen/interface/io/xs_wire.h
@@ -29,8 +29,7 @@
     XS_IS_DOMAIN_INTRODUCED,
     XS_RESUME,
     XS_SET_TARGET,
-    XS_RESTRICT,
-    XS_RESET_WATCHES
+    XS_RESTRICT
 };
 
 #define XS_WRITE_NONE "NONE"
diff --git a/init/Kconfig b/init/Kconfig
index 43298f9..a075765 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -469,14 +469,14 @@
 
 config RCU_FAST_NO_HZ
 	bool "Accelerate last non-dyntick-idle CPU's grace periods"
-	depends on TREE_RCU && NO_HZ && SMP
+	depends on NO_HZ && SMP
 	default n
 	help
 	  This option causes RCU to attempt to accelerate grace periods
-	  in order to allow the final CPU to enter dynticks-idle state
-	  more quickly.  On the other hand, this option increases the
-	  overhead of the dynticks-idle checking, particularly on systems
-	  with large numbers of CPUs.
+	  in order to allow CPUs to enter dynticks-idle state more
+	  quickly.  On the other hand, this option increases the overhead
+	  of the dynticks-idle checking, particularly on systems with
+	  large numbers of CPUs.
 
 	  Say Y if energy efficiency is critically important, particularly
 	  	if you have relatively few CPUs.
@@ -689,6 +689,17 @@
 	  For those who want to have the feature enabled by default should
 	  select this option (if, for some reason, they need to disable it
 	  then swapaccount=0 does the trick).
+config CGROUP_MEM_RES_CTLR_KMEM
+	bool "Memory Resource Controller Kernel Memory accounting (EXPERIMENTAL)"
+	depends on CGROUP_MEM_RES_CTLR && EXPERIMENTAL
+	default n
+	help
+	  The Kernel Memory extension for Memory Resource Controller can limit
+	  the amount of memory used by kernel objects in the system. Those are
+	  fundamentally different from the entities handled by the standard
+	  Memory Controller, which are page-based, and can be swapped. Users of
+	  the kmem extension can use it to guarantee that no group of processes
+	  will ever exhaust kernel resources alone.
 
 config CGROUP_PERF
 	bool "Enable perf_event per-cpu per-container group (cgroup) monitoring"
diff --git a/init/do_mounts.c b/init/do_mounts.c
index 0f6e1d9..b2eee02 100644
--- a/init/do_mounts.c
+++ b/init/do_mounts.c
@@ -325,17 +325,19 @@
 
 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
 {
+	struct super_block *s;
 	int err = sys_mount(name, "/root", fs, flags, data);
 	if (err)
 		return err;
 
 	sys_chdir((const char __user __force *)"/root");
-	ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
+	s = current->fs->pwd.dentry->d_sb;
+	ROOT_DEV = s->s_dev;
 	printk(KERN_INFO
 	       "VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
-	       current->fs->pwd.mnt->mnt_sb->s_type->name,
-	       current->fs->pwd.mnt->mnt_sb->s_flags & MS_RDONLY ?
-	       " readonly" : "", MAJOR(ROOT_DEV), MINOR(ROOT_DEV));
+	       s->s_type->name,
+	       s->s_flags & MS_RDONLY ?  " readonly" : "",
+	       MAJOR(ROOT_DEV), MINOR(ROOT_DEV));
 	return 0;
 }
 
diff --git a/init/initramfs.c b/init/initramfs.c
index 2531811..8216c30 100644
--- a/init/initramfs.c
+++ b/init/initramfs.c
@@ -22,7 +22,7 @@
 
 static __initdata struct hash {
 	int ino, minor, major;
-	mode_t mode;
+	umode_t mode;
 	struct hash *next;
 	char name[N_ALIGN(PATH_MAX)];
 } *head[32];
@@ -35,7 +35,7 @@
 }
 
 static char __init *find_link(int major, int minor, int ino,
-			      mode_t mode, char *name)
+			      umode_t mode, char *name)
 {
 	struct hash **p, *q;
 	for (p = head + hash(major, minor, ino); *p; p = &(*p)->next) {
@@ -120,7 +120,7 @@
 /* cpio header parsing */
 
 static __initdata unsigned long ino, major, minor, nlink;
-static __initdata mode_t mode;
+static __initdata umode_t mode;
 static __initdata unsigned long body_len, name_len;
 static __initdata uid_t uid;
 static __initdata gid_t gid;
@@ -276,7 +276,7 @@
 	return 0;
 }
 
-static void __init clean_path(char *path, mode_t mode)
+static void __init clean_path(char *path, umode_t mode)
 {
 	struct stat st;
 
diff --git a/init/main.c b/init/main.c
index 217ed23..2c76efb 100644
--- a/init/main.c
+++ b/init/main.c
@@ -469,13 +469,12 @@
 	char * command_line;
 	extern const struct kernel_param __start___param[], __stop___param[];
 
-	smp_setup_processor_id();
-
 	/*
 	 * Need to run as early as possible, to initialize the
 	 * lockdep hash:
 	 */
 	lockdep_init();
+	smp_setup_processor_id();
 	debug_objects_early_init();
 
 	/*
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index 2e0ecfc..9a142a2 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -108,7 +108,7 @@
 }
 
 static struct inode *mqueue_get_inode(struct super_block *sb,
-		struct ipc_namespace *ipc_ns, int mode,
+		struct ipc_namespace *ipc_ns, umode_t mode,
 		struct mq_attr *attr)
 {
 	struct user_struct *u = current_user();
@@ -243,7 +243,6 @@
 static void mqueue_i_callback(struct rcu_head *head)
 {
 	struct inode *inode = container_of(head, struct inode, i_rcu);
-	INIT_LIST_HEAD(&inode->i_dentry);
 	kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode));
 }
 
@@ -296,7 +295,7 @@
 }
 
 static int mqueue_create(struct inode *dir, struct dentry *dentry,
-				int mode, struct nameidata *nd)
+				umode_t mode, struct nameidata *nd)
 {
 	struct inode *inode;
 	struct mq_attr *attr = dentry->d_fsdata;
@@ -611,7 +610,7 @@
  * Invoked when creating a new queue via sys_mq_open
  */
 static struct file *do_create(struct ipc_namespace *ipc_ns, struct dentry *dir,
-			struct dentry *dentry, int oflag, mode_t mode,
+			struct dentry *dentry, int oflag, umode_t mode,
 			struct mq_attr *attr)
 {
 	const struct cred *cred = current_cred();
@@ -680,7 +679,7 @@
 	return ERR_PTR(ret);
 }
 
-SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, mode_t, mode,
+SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode,
 		struct mq_attr __user *, u_attr)
 {
 	struct dentry *dentry;
@@ -1269,7 +1268,7 @@
 
 void mq_put_mnt(struct ipc_namespace *ns)
 {
-	mntput(ns->mq_mnt);
+	kern_unmount(ns->mq_mnt);
 }
 
 static int __init init_mqueue_fs(void)
@@ -1291,11 +1290,9 @@
 
 	spin_lock_init(&mq_lock);
 
-	init_ipc_ns.mq_mnt = kern_mount_data(&mqueue_fs_type, &init_ipc_ns);
-	if (IS_ERR(init_ipc_ns.mq_mnt)) {
-		error = PTR_ERR(init_ipc_ns.mq_mnt);
+	error = mq_init_ns(&init_ipc_ns);
+	if (error)
 		goto out_filesystem;
-	}
 
 	return 0;
 
diff --git a/ipc/msgutil.c b/ipc/msgutil.c
index 8b5ce5d3..5652101 100644
--- a/ipc/msgutil.c
+++ b/ipc/msgutil.c
@@ -27,11 +27,6 @@
  */
 struct ipc_namespace init_ipc_ns = {
 	.count		= ATOMIC_INIT(1),
-#ifdef CONFIG_POSIX_MQUEUE
-	.mq_queues_max   = DFLT_QUEUESMAX,
-	.mq_msg_max      = DFLT_MSGMAX,
-	.mq_msgsize_max  = DFLT_MSGSIZEMAX,
-#endif
 	.user_ns = &init_user_ns,
 };
 
diff --git a/kernel/Makefile b/kernel/Makefile
index e898c5b..f70396e 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -2,16 +2,15 @@
 # Makefile for the linux kernel.
 #
 
-obj-y     = sched.o fork.o exec_domain.o panic.o printk.o \
+obj-y     = fork.o exec_domain.o panic.o printk.o \
 	    cpu.o exit.o itimer.o time.o softirq.o resource.o \
 	    sysctl.o sysctl_binary.o capability.o ptrace.o timer.o user.o \
 	    signal.o sys.o kmod.o workqueue.o pid.o \
 	    rcupdate.o extable.o params.o posix-timers.o \
 	    kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
 	    hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \
-	    notifier.o ksysfs.o sched_clock.o cred.o \
-	    async.o range.o
-obj-y += groups.o
+	    notifier.o ksysfs.o cred.o \
+	    async.o range.o groups.o
 
 ifdef CONFIG_FUNCTION_TRACER
 # Do not trace debug files and internal ftrace files
@@ -20,10 +19,11 @@
 CFLAGS_REMOVE_mutex-debug.o = -pg
 CFLAGS_REMOVE_rtmutex-debug.o = -pg
 CFLAGS_REMOVE_cgroup-debug.o = -pg
-CFLAGS_REMOVE_sched_clock.o = -pg
 CFLAGS_REMOVE_irq_work.o = -pg
 endif
 
+obj-y += sched/
+
 obj-$(CONFIG_FREEZER) += freezer.o
 obj-$(CONFIG_PROFILING) += profile.o
 obj-$(CONFIG_SYSCTL_SYSCALL_CHECK) += sysctl_check.o
@@ -99,7 +99,6 @@
 obj-$(CONFIG_X86_DS) += trace/
 obj-$(CONFIG_RING_BUFFER) += trace/
 obj-$(CONFIG_TRACEPOINTS) += trace/
-obj-$(CONFIG_SMP) += sched_cpupri.o
 obj-$(CONFIG_IRQ_WORK) += irq_work.o
 obj-$(CONFIG_CPU_PM) += cpu_pm.o
 
@@ -110,15 +109,6 @@
 obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
 obj-$(CONFIG_JUMP_LABEL) += jump_label.o
 
-ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y)
-# According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is
-# needed for x86 only.  Why this used to be enabled for all architectures is beyond
-# me.  I suspect most platforms don't need this, but until we know that for sure
-# I turn this off for IA-64 only.  Andreas Schwab says it's also needed on m68k
-# to get a correct value for the wait-channel (WCHAN in ps). --davidm
-CFLAGS_sched.o := $(PROFILING) -fno-omit-frame-pointer
-endif
-
 $(obj)/configs.o: $(obj)/config_data.h
 
 # config_data.h contains the same information as ikconfig.h but gzipped.
diff --git a/kernel/acct.c b/kernel/acct.c
index fa7eb3d..02e6167 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -84,11 +84,10 @@
  * the cache line to have the data after getting the lock.
  */
 struct bsd_acct_struct {
-	volatile int		active;
-	volatile int		needcheck;
+	int			active;
+	unsigned long		needcheck;
 	struct file		*file;
 	struct pid_namespace	*ns;
-	struct timer_list	timer;
 	struct list_head	list;
 };
 
@@ -96,15 +95,6 @@
 static LIST_HEAD(acct_list);
 
 /*
- * Called whenever the timer says to check the free space.
- */
-static void acct_timeout(unsigned long x)
-{
-	struct bsd_acct_struct *acct = (struct bsd_acct_struct *)x;
-	acct->needcheck = 1;
-}
-
-/*
  * Check the amount of free space and suspend/resume accordingly.
  */
 static int check_free_space(struct bsd_acct_struct *acct, struct file *file)
@@ -112,12 +102,12 @@
 	struct kstatfs sbuf;
 	int res;
 	int act;
-	sector_t resume;
-	sector_t suspend;
+	u64 resume;
+	u64 suspend;
 
 	spin_lock(&acct_lock);
 	res = acct->active;
-	if (!file || !acct->needcheck)
+	if (!file || time_is_before_jiffies(acct->needcheck))
 		goto out;
 	spin_unlock(&acct_lock);
 
@@ -127,8 +117,8 @@
 	suspend = sbuf.f_blocks * SUSPEND;
 	resume = sbuf.f_blocks * RESUME;
 
-	sector_div(suspend, 100);
-	sector_div(resume, 100);
+	do_div(suspend, 100);
+	do_div(resume, 100);
 
 	if (sbuf.f_bavail <= suspend)
 		act = -1;
@@ -160,10 +150,7 @@
 		}
 	}
 
-	del_timer(&acct->timer);
-	acct->needcheck = 0;
-	acct->timer.expires = jiffies + ACCT_TIMEOUT*HZ;
-	add_timer(&acct->timer);
+	acct->needcheck = jiffies + ACCT_TIMEOUT*HZ;
 	res = acct->active;
 out:
 	spin_unlock(&acct_lock);
@@ -185,9 +172,7 @@
 	if (acct->file) {
 		old_acct = acct->file;
 		old_ns = acct->ns;
-		del_timer(&acct->timer);
 		acct->active = 0;
-		acct->needcheck = 0;
 		acct->file = NULL;
 		acct->ns = NULL;
 		list_del(&acct->list);
@@ -195,13 +180,9 @@
 	if (file) {
 		acct->file = file;
 		acct->ns = ns;
-		acct->needcheck = 0;
+		acct->needcheck = jiffies + ACCT_TIMEOUT*HZ;
 		acct->active = 1;
 		list_add(&acct->list, &acct_list);
-		/* It's been deleted if it was used before so this is safe */
-		setup_timer(&acct->timer, acct_timeout, (unsigned long)acct);
-		acct->timer.expires = jiffies + ACCT_TIMEOUT*HZ;
-		add_timer(&acct->timer);
 	}
 	if (old_acct) {
 		mnt_unpin(old_acct->f_path.mnt);
@@ -334,7 +315,7 @@
 	spin_lock(&acct_lock);
 restart:
 	list_for_each_entry(acct, &acct_list, list)
-		if (acct->file && acct->file->f_path.mnt->mnt_sb == sb) {
+		if (acct->file && acct->file->f_path.dentry->d_sb == sb) {
 			acct_file_reopen(acct, NULL, NULL);
 			goto restart;
 		}
@@ -348,7 +329,6 @@
 	if (acct == NULL)
 		return;
 
-	del_timer_sync(&acct->timer);
 	spin_lock(&acct_lock);
 	if (acct->file != NULL)
 		acct_file_reopen(acct, NULL, NULL);
@@ -498,7 +478,7 @@
 	 * Fill the accounting struct with the needed info as recorded
 	 * by the different kernel functions.
 	 */
-	memset((caddr_t)&ac, 0, sizeof(acct_t));
+	memset(&ac, 0, sizeof(acct_t));
 
 	ac.ac_version = ACCT_VERSION | ACCT_BYTEORDER;
 	strlcpy(ac.ac_comm, current->comm, sizeof(ac.ac_comm));
@@ -613,8 +593,8 @@
 		pacct->ac_flag |= ACORE;
 	if (current->flags & PF_SIGNALED)
 		pacct->ac_flag |= AXSIG;
-	pacct->ac_utime = cputime_add(pacct->ac_utime, current->utime);
-	pacct->ac_stime = cputime_add(pacct->ac_stime, current->stime);
+	pacct->ac_utime += current->utime;
+	pacct->ac_stime += current->stime;
 	pacct->ac_minflt += current->min_flt;
 	pacct->ac_majflt += current->maj_flt;
 	spin_unlock_irq(&current->sighand->siglock);
diff --git a/kernel/audit.c b/kernel/audit.c
index 09fae26..2c1d6ab 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -1260,12 +1260,13 @@
 		avail = audit_expand(ab,
 			max_t(unsigned, AUDIT_BUFSIZ, 1+len-avail));
 		if (!avail)
-			goto out;
+			goto out_va_end;
 		len = vsnprintf(skb_tail_pointer(skb), avail, fmt, args2);
 	}
-	va_end(args2);
 	if (len > 0)
 		skb_put(skb, len);
+out_va_end:
+	va_end(args2);
 out:
 	return;
 }
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 47b7fc1..e7fe2b0 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -210,12 +210,12 @@
 		struct {
 			uid_t			uid;
 			gid_t			gid;
-			mode_t			mode;
+			umode_t			mode;
 			u32			osid;
 			int			has_perm;
 			uid_t			perm_uid;
 			gid_t			perm_gid;
-			mode_t			perm_mode;
+			umode_t			perm_mode;
 			unsigned long		qbytes;
 		} ipc;
 		struct {
@@ -234,7 +234,7 @@
 		} mq_sendrecv;
 		struct {
 			int			oflag;
-			mode_t			mode;
+			umode_t			mode;
 			struct mq_attr		attr;
 		} mq_open;
 		struct {
@@ -308,7 +308,7 @@
 static int audit_match_filetype(struct audit_context *ctx, int which)
 {
 	unsigned index = which & ~S_IFMT;
-	mode_t mode = which & S_IFMT;
+	umode_t mode = which & S_IFMT;
 
 	if (unlikely(!ctx))
 		return 0;
@@ -1249,7 +1249,7 @@
 	case AUDIT_IPC: {
 		u32 osid = context->ipc.osid;
 
-		audit_log_format(ab, "ouid=%u ogid=%u mode=%#o",
+		audit_log_format(ab, "ouid=%u ogid=%u mode=%#ho",
 			 context->ipc.uid, context->ipc.gid, context->ipc.mode);
 		if (osid) {
 			char *ctx = NULL;
@@ -1267,7 +1267,7 @@
 			ab = audit_log_start(context, GFP_KERNEL,
 					     AUDIT_IPC_SET_PERM);
 			audit_log_format(ab,
-				"qbytes=%lx ouid=%u ogid=%u mode=%#o",
+				"qbytes=%lx ouid=%u ogid=%u mode=%#ho",
 				context->ipc.qbytes,
 				context->ipc.perm_uid,
 				context->ipc.perm_gid,
@@ -1278,7 +1278,7 @@
 		break; }
 	case AUDIT_MQ_OPEN: {
 		audit_log_format(ab,
-			"oflag=0x%x mode=%#o mq_flags=0x%lx mq_maxmsg=%ld "
+			"oflag=0x%x mode=%#ho mq_flags=0x%lx mq_maxmsg=%ld "
 			"mq_msgsize=%ld mq_curmsgs=%ld",
 			context->mq_open.oflag, context->mq_open.mode,
 			context->mq_open.attr.mq_flags,
@@ -1502,7 +1502,7 @@
 
 		if (n->ino != (unsigned long)-1) {
 			audit_log_format(ab, " inode=%lu"
-					 " dev=%02x:%02x mode=%#o"
+					 " dev=%02x:%02x mode=%#ho"
 					 " ouid=%u ogid=%u rdev=%02x:%02x",
 					 n->ino,
 					 MAJOR(n->dev),
@@ -2160,7 +2160,7 @@
  * @attr: queue attributes
  *
  */
-void __audit_mq_open(int oflag, mode_t mode, struct mq_attr *attr)
+void __audit_mq_open(int oflag, umode_t mode, struct mq_attr *attr)
 {
 	struct audit_context *context = current->audit_context;
 
@@ -2260,7 +2260,7 @@
  *
  * Called only after audit_ipc_obj().
  */
-void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, mode_t mode)
+void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode)
 {
 	struct audit_context *context = current->audit_context;
 
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index d9d5648..7cab65f 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -760,7 +760,7 @@
  * -> cgroup_mkdir.
  */
 
-static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, int mode);
+static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode);
 static struct dentry *cgroup_lookup(struct inode *, struct dentry *, struct nameidata *);
 static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry);
 static int cgroup_populate_dir(struct cgroup *cgrp);
@@ -775,7 +775,7 @@
 static int alloc_css_id(struct cgroup_subsys *ss,
 			struct cgroup *parent, struct cgroup *child);
 
-static struct inode *cgroup_new_inode(mode_t mode, struct super_block *sb)
+static struct inode *cgroup_new_inode(umode_t mode, struct super_block *sb)
 {
 	struct inode *inode = new_inode(sb);
 
@@ -1038,9 +1038,9 @@
 	return 0;
 }
 
-static int cgroup_show_options(struct seq_file *seq, struct vfsmount *vfs)
+static int cgroup_show_options(struct seq_file *seq, struct dentry *dentry)
 {
-	struct cgroupfs_root *root = vfs->mnt_sb->s_fs_info;
+	struct cgroupfs_root *root = dentry->d_sb->s_fs_info;
 	struct cgroup_subsys *ss;
 
 	mutex_lock(&cgroup_mutex);
@@ -2098,11 +2098,6 @@
 			continue;
 		/* get old css_set pointer */
 		task_lock(tsk);
-		if (tsk->flags & PF_EXITING) {
-			/* ignore this task if it's going away */
-			task_unlock(tsk);
-			continue;
-		}
 		oldcg = tsk->cgroups;
 		get_css_set(oldcg);
 		task_unlock(tsk);
@@ -2590,7 +2585,7 @@
 	return __d_cft(file->f_dentry);
 }
 
-static int cgroup_create_file(struct dentry *dentry, mode_t mode,
+static int cgroup_create_file(struct dentry *dentry, umode_t mode,
 				struct super_block *sb)
 {
 	struct inode *inode;
@@ -2631,7 +2626,7 @@
  * @mode: mode to set on new directory.
  */
 static int cgroup_create_dir(struct cgroup *cgrp, struct dentry *dentry,
-				mode_t mode)
+				umode_t mode)
 {
 	struct dentry *parent;
 	int error = 0;
@@ -2658,9 +2653,9 @@
  * returns S_IRUGO if it has only a read handler
  * returns S_IWUSR if it has only a write hander
  */
-static mode_t cgroup_file_mode(const struct cftype *cft)
+static umode_t cgroup_file_mode(const struct cftype *cft)
 {
-	mode_t mode = 0;
+	umode_t mode = 0;
 
 	if (cft->mode)
 		return cft->mode;
@@ -2683,7 +2678,7 @@
 	struct dentry *dir = cgrp->dentry;
 	struct dentry *dentry;
 	int error;
-	mode_t mode;
+	umode_t mode;
 
 	char name[MAX_CGROUP_TYPE_NAMELEN + MAX_CFTYPE_NAME + 2] = { 0 };
 	if (subsys && !test_bit(ROOT_NOPREFIX, &cgrp->root->flags)) {
@@ -3757,7 +3752,7 @@
  * Must be called with the mutex on the parent inode held
  */
 static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
-			     mode_t mode)
+			     umode_t mode)
 {
 	struct cgroup *cgrp;
 	struct cgroupfs_root *root = parent->root;
@@ -3851,7 +3846,7 @@
 	return err;
 }
 
-static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 {
 	struct cgroup *c_parent = dentry->d_parent->d_fsdata;
 
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
index 213c035..fcb93fc 100644
--- a/kernel/cgroup_freezer.c
+++ b/kernel/cgroup_freezer.c
@@ -48,19 +48,17 @@
 			    struct freezer, css);
 }
 
-static inline int __cgroup_freezing_or_frozen(struct task_struct *task)
+bool cgroup_freezing(struct task_struct *task)
 {
-	enum freezer_state state = task_freezer(task)->state;
-	return (state == CGROUP_FREEZING) || (state == CGROUP_FROZEN);
-}
+	enum freezer_state state;
+	bool ret;
 
-int cgroup_freezing_or_frozen(struct task_struct *task)
-{
-	int result;
-	task_lock(task);
-	result = __cgroup_freezing_or_frozen(task);
-	task_unlock(task);
-	return result;
+	rcu_read_lock();
+	state = task_freezer(task)->state;
+	ret = state == CGROUP_FREEZING || state == CGROUP_FROZEN;
+	rcu_read_unlock();
+
+	return ret;
 }
 
 /*
@@ -102,9 +100,6 @@
  * freezer_can_attach():
  * cgroup_mutex (held by caller of can_attach)
  *
- * cgroup_freezing_or_frozen():
- * task->alloc_lock (to get task's cgroup)
- *
  * freezer_fork() (preserving fork() performance means can't take cgroup_mutex):
  * freezer->lock
  *  sighand->siglock (if the cgroup is freezing)
@@ -130,7 +125,7 @@
  *   write_lock css_set_lock (cgroup iterator start)
  *    task->alloc_lock
  *   read_lock css_set_lock (cgroup iterator start)
- *    task->alloc_lock (inside thaw_process(), prevents race with refrigerator())
+ *    task->alloc_lock (inside __thaw_task(), prevents race with refrigerator())
  *     sighand->siglock
  */
 static struct cgroup_subsys_state *freezer_create(struct cgroup_subsys *ss,
@@ -150,7 +145,11 @@
 static void freezer_destroy(struct cgroup_subsys *ss,
 			    struct cgroup *cgroup)
 {
-	kfree(cgroup_freezer(cgroup));
+	struct freezer *freezer = cgroup_freezer(cgroup);
+
+	if (freezer->state != CGROUP_THAWED)
+		atomic_dec(&system_freezing_cnt);
+	kfree(freezer);
 }
 
 /* task is frozen or will freeze immediately when next it gets woken */
@@ -184,13 +183,7 @@
 
 static int freezer_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
 {
-	rcu_read_lock();
-	if (__cgroup_freezing_or_frozen(tsk)) {
-		rcu_read_unlock();
-		return -EBUSY;
-	}
-	rcu_read_unlock();
-	return 0;
+	return cgroup_freezing(tsk) ? -EBUSY : 0;
 }
 
 static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
@@ -220,7 +213,7 @@
 
 	/* Locking avoids race with FREEZING -> THAWED transitions. */
 	if (freezer->state == CGROUP_FREEZING)
-		freeze_task(task, true);
+		freeze_task(task);
 	spin_unlock_irq(&freezer->lock);
 }
 
@@ -238,7 +231,7 @@
 	cgroup_iter_start(cgroup, &it);
 	while ((task = cgroup_iter_next(cgroup, &it))) {
 		ntotal++;
-		if (is_task_frozen_enough(task))
+		if (freezing(task) && is_task_frozen_enough(task))
 			nfrozen++;
 	}
 
@@ -286,10 +279,9 @@
 	struct task_struct *task;
 	unsigned int num_cant_freeze_now = 0;
 
-	freezer->state = CGROUP_FREEZING;
 	cgroup_iter_start(cgroup, &it);
 	while ((task = cgroup_iter_next(cgroup, &it))) {
-		if (!freeze_task(task, true))
+		if (!freeze_task(task))
 			continue;
 		if (is_task_frozen_enough(task))
 			continue;
@@ -307,12 +299,9 @@
 	struct task_struct *task;
 
 	cgroup_iter_start(cgroup, &it);
-	while ((task = cgroup_iter_next(cgroup, &it))) {
-		thaw_process(task);
-	}
+	while ((task = cgroup_iter_next(cgroup, &it)))
+		__thaw_task(task);
 	cgroup_iter_end(cgroup, &it);
-
-	freezer->state = CGROUP_THAWED;
 }
 
 static int freezer_change_state(struct cgroup *cgroup,
@@ -326,20 +315,24 @@
 	spin_lock_irq(&freezer->lock);
 
 	update_if_frozen(cgroup, freezer);
-	if (goal_state == freezer->state)
-		goto out;
 
 	switch (goal_state) {
 	case CGROUP_THAWED:
+		if (freezer->state != CGROUP_THAWED)
+			atomic_dec(&system_freezing_cnt);
+		freezer->state = CGROUP_THAWED;
 		unfreeze_cgroup(cgroup, freezer);
 		break;
 	case CGROUP_FROZEN:
+		if (freezer->state == CGROUP_THAWED)
+			atomic_inc(&system_freezing_cnt);
+		freezer->state = CGROUP_FREEZING;
 		retval = try_to_freeze_cgroup(cgroup, freezer);
 		break;
 	default:
 		BUG();
 	}
-out:
+
 	spin_unlock_irq(&freezer->lock);
 
 	return retval;
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 563f136..2060c6e 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -178,8 +178,7 @@
 	write_lock_irq(&tasklist_lock);
 	for_each_process(p) {
 		if (task_cpu(p) == cpu && p->state == TASK_RUNNING &&
-		    (!cputime_eq(p->utime, cputime_zero) ||
-		     !cputime_eq(p->stime, cputime_zero)))
+		    (p->utime || p->stime))
 			printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d "
 				"(state = %ld, flags = %x)\n",
 				p->comm, task_pid_nr(p), cpu,
@@ -380,6 +379,7 @@
 	cpu_maps_update_done();
 	return err;
 }
+EXPORT_SYMBOL_GPL(cpu_up);
 
 #ifdef CONFIG_PM_SLEEP_SMP
 static cpumask_var_t frozen_cpus;
@@ -470,7 +470,7 @@
 	cpu_maps_update_done();
 }
 
-static int alloc_frozen_cpus(void)
+static int __init alloc_frozen_cpus(void)
 {
 	if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
 		return -ENOMEM;
@@ -543,7 +543,7 @@
 }
 
 
-int cpu_hotplug_pm_sync_init(void)
+static int __init cpu_hotplug_pm_sync_init(void)
 {
 	pm_notifier(cpu_hotplug_pm_callback, 0);
 	return 0;
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 9fe58c4..0b1712d 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -123,6 +123,19 @@
 			    struct cpuset, css);
 }
 
+#ifdef CONFIG_NUMA
+static inline bool task_has_mempolicy(struct task_struct *task)
+{
+	return task->mempolicy;
+}
+#else
+static inline bool task_has_mempolicy(struct task_struct *task)
+{
+	return false;
+}
+#endif
+
+
 /* bits in struct cpuset flags field */
 typedef enum {
 	CS_CPU_EXCLUSIVE,
@@ -949,7 +962,7 @@
 static void cpuset_change_task_nodemask(struct task_struct *tsk,
 					nodemask_t *newmems)
 {
-	bool masks_disjoint = !nodes_intersects(*newmems, tsk->mems_allowed);
+	bool need_loop;
 
 repeat:
 	/*
@@ -962,6 +975,14 @@
 		return;
 
 	task_lock(tsk);
+	/*
+	 * Determine if a loop is necessary if another thread is doing
+	 * get_mems_allowed().  If at least one node remains unchanged and
+	 * tsk does not have a mempolicy, then an empty nodemask will not be
+	 * possible when mems_allowed is larger than a word.
+	 */
+	need_loop = task_has_mempolicy(tsk) ||
+			!nodes_intersects(*newmems, tsk->mems_allowed);
 	nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
 	mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1);
 
@@ -981,11 +1002,9 @@
 
 	/*
 	 * Allocation of memory is very fast, we needn't sleep when waiting
-	 * for the read-side.  No wait is necessary, however, if at least one
-	 * node remains unchanged.
+	 * for the read-side.
 	 */
-	while (masks_disjoint &&
-			ACCESS_ONCE(tsk->mems_allowed_change_disable)) {
+	while (need_loop && ACCESS_ONCE(tsk->mems_allowed_change_disable)) {
 		task_unlock(tsk);
 		if (!task_curr(tsk))
 			yield();
diff --git a/kernel/debug/kdb/kdb_support.c b/kernel/debug/kdb/kdb_support.c
index 5532dd3..7d6fb40 100644
--- a/kernel/debug/kdb/kdb_support.c
+++ b/kernel/debug/kdb/kdb_support.c
@@ -636,7 +636,7 @@
 		(p->exit_state & EXIT_ZOMBIE) ? 'Z' :
 		(p->exit_state & EXIT_DEAD) ? 'E' :
 		(p->state & TASK_INTERRUPTIBLE) ? 'S' : '?';
-	if (p->pid == 0) {
+	if (is_idle_task(p)) {
 		/* Idle task.  Is it really idle, apart from the kdb
 		 * interrupt? */
 		if (!kdb_task_has_cpu(p) || kgdb_info[cpu].irq_depth == 1) {
diff --git a/kernel/events/Makefile b/kernel/events/Makefile
index 89e5e8a..22d901f 100644
--- a/kernel/events/Makefile
+++ b/kernel/events/Makefile
@@ -2,5 +2,5 @@
 CFLAGS_REMOVE_core.o = -pg
 endif
 
-obj-y := core.o ring_buffer.o
+obj-y := core.o ring_buffer.o callchain.o
 obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
new file mode 100644
index 0000000..057e24b
--- /dev/null
+++ b/kernel/events/callchain.c
@@ -0,0 +1,191 @@
+/*
+ * Performance events callchain code, extracted from core.c:
+ *
+ *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
+ *  Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
+ *  Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ *  Copyright  ©  2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
+ *
+ * For licensing details see kernel-base/COPYING
+ */
+
+#include <linux/perf_event.h>
+#include <linux/slab.h>
+#include "internal.h"
+
+struct callchain_cpus_entries {
+	struct rcu_head			rcu_head;
+	struct perf_callchain_entry	*cpu_entries[0];
+};
+
+static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
+static atomic_t nr_callchain_events;
+static DEFINE_MUTEX(callchain_mutex);
+static struct callchain_cpus_entries *callchain_cpus_entries;
+
+
+__weak void perf_callchain_kernel(struct perf_callchain_entry *entry,
+				  struct pt_regs *regs)
+{
+}
+
+__weak void perf_callchain_user(struct perf_callchain_entry *entry,
+				struct pt_regs *regs)
+{
+}
+
+static void release_callchain_buffers_rcu(struct rcu_head *head)
+{
+	struct callchain_cpus_entries *entries;
+	int cpu;
+
+	entries = container_of(head, struct callchain_cpus_entries, rcu_head);
+
+	for_each_possible_cpu(cpu)
+		kfree(entries->cpu_entries[cpu]);
+
+	kfree(entries);
+}
+
+static void release_callchain_buffers(void)
+{
+	struct callchain_cpus_entries *entries;
+
+	entries = callchain_cpus_entries;
+	rcu_assign_pointer(callchain_cpus_entries, NULL);
+	call_rcu(&entries->rcu_head, release_callchain_buffers_rcu);
+}
+
+static int alloc_callchain_buffers(void)
+{
+	int cpu;
+	int size;
+	struct callchain_cpus_entries *entries;
+
+	/*
+	 * We can't use the percpu allocation API for data that can be
+	 * accessed from NMI. Use a temporary manual per cpu allocation
+	 * until that gets sorted out.
+	 */
+	size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]);
+
+	entries = kzalloc(size, GFP_KERNEL);
+	if (!entries)
+		return -ENOMEM;
+
+	size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS;
+
+	for_each_possible_cpu(cpu) {
+		entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,
+							 cpu_to_node(cpu));
+		if (!entries->cpu_entries[cpu])
+			goto fail;
+	}
+
+	rcu_assign_pointer(callchain_cpus_entries, entries);
+
+	return 0;
+
+fail:
+	for_each_possible_cpu(cpu)
+		kfree(entries->cpu_entries[cpu]);
+	kfree(entries);
+
+	return -ENOMEM;
+}
+
+int get_callchain_buffers(void)
+{
+	int err = 0;
+	int count;
+
+	mutex_lock(&callchain_mutex);
+
+	count = atomic_inc_return(&nr_callchain_events);
+	if (WARN_ON_ONCE(count < 1)) {
+		err = -EINVAL;
+		goto exit;
+	}
+
+	if (count > 1) {
+		/* If the allocation failed, give up */
+		if (!callchain_cpus_entries)
+			err = -ENOMEM;
+		goto exit;
+	}
+
+	err = alloc_callchain_buffers();
+	if (err)
+		release_callchain_buffers();
+exit:
+	mutex_unlock(&callchain_mutex);
+
+	return err;
+}
+
+void put_callchain_buffers(void)
+{
+	if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) {
+		release_callchain_buffers();
+		mutex_unlock(&callchain_mutex);
+	}
+}
+
+static struct perf_callchain_entry *get_callchain_entry(int *rctx)
+{
+	int cpu;
+	struct callchain_cpus_entries *entries;
+
+	*rctx = get_recursion_context(__get_cpu_var(callchain_recursion));
+	if (*rctx == -1)
+		return NULL;
+
+	entries = rcu_dereference(callchain_cpus_entries);
+	if (!entries)
+		return NULL;
+
+	cpu = smp_processor_id();
+
+	return &entries->cpu_entries[cpu][*rctx];
+}
+
+static void
+put_callchain_entry(int rctx)
+{
+	put_recursion_context(__get_cpu_var(callchain_recursion), rctx);
+}
+
+struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
+{
+	int rctx;
+	struct perf_callchain_entry *entry;
+
+
+	entry = get_callchain_entry(&rctx);
+	if (rctx == -1)
+		return NULL;
+
+	if (!entry)
+		goto exit_put;
+
+	entry->nr = 0;
+
+	if (!user_mode(regs)) {
+		perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
+		perf_callchain_kernel(entry, regs);
+		if (current->mm)
+			regs = task_pt_regs(current);
+		else
+			regs = NULL;
+	}
+
+	if (regs) {
+		perf_callchain_store(entry, PERF_CONTEXT_USER);
+		perf_callchain_user(entry, regs);
+	}
+
+exit_put:
+	put_callchain_entry(rctx);
+
+	return entry;
+}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index d3b9df5..3afc68c 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4,7 +4,7 @@
  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
  *  Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
  *  Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
- *  Copyright  ©  2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
+ *  Copyright  ©  2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
  *
  * For licensing details see kernel-base/COPYING
  */
@@ -128,7 +128,7 @@
  * perf_sched_events : >0 events exist
  * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
  */
-struct jump_label_key perf_sched_events __read_mostly;
+struct jump_label_key_deferred perf_sched_events __read_mostly;
 static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
 
 static atomic_t nr_mmap_events __read_mostly;
@@ -1130,6 +1130,8 @@
 	if (!is_software_event(event))
 		cpuctx->active_oncpu--;
 	ctx->nr_active--;
+	if (event->attr.freq && event->attr.sample_freq)
+		ctx->nr_freq--;
 	if (event->attr.exclusive || !cpuctx->active_oncpu)
 		cpuctx->exclusive = 0;
 }
@@ -1325,6 +1327,7 @@
 	}
 	raw_spin_unlock_irq(&ctx->lock);
 }
+EXPORT_SYMBOL_GPL(perf_event_disable);
 
 static void perf_set_shadow_time(struct perf_event *event,
 				 struct perf_event_context *ctx,
@@ -1406,6 +1409,8 @@
 	if (!is_software_event(event))
 		cpuctx->active_oncpu++;
 	ctx->nr_active++;
+	if (event->attr.freq && event->attr.sample_freq)
+		ctx->nr_freq++;
 
 	if (event->attr.exclusive)
 		cpuctx->exclusive = 1;
@@ -1662,8 +1667,7 @@
  * Note: this works for group members as well as group leaders
  * since the non-leader members' sibling_lists will be empty.
  */
-static void __perf_event_mark_enabled(struct perf_event *event,
-					struct perf_event_context *ctx)
+static void __perf_event_mark_enabled(struct perf_event *event)
 {
 	struct perf_event *sub;
 	u64 tstamp = perf_event_time(event);
@@ -1701,7 +1705,7 @@
 	 */
 	perf_cgroup_set_timestamp(current, ctx);
 
-	__perf_event_mark_enabled(event, ctx);
+	__perf_event_mark_enabled(event);
 
 	if (!event_filter_match(event)) {
 		if (is_cgroup_event(event))
@@ -1782,7 +1786,7 @@
 
 retry:
 	if (!ctx->is_active) {
-		__perf_event_mark_enabled(event, ctx);
+		__perf_event_mark_enabled(event);
 		goto out;
 	}
 
@@ -1809,6 +1813,7 @@
 out:
 	raw_spin_unlock_irq(&ctx->lock);
 }
+EXPORT_SYMBOL_GPL(perf_event_enable);
 
 int perf_event_refresh(struct perf_event *event, int refresh)
 {
@@ -2327,6 +2332,9 @@
 	u64 interrupts, now;
 	s64 delta;
 
+	if (!ctx->nr_freq)
+		return;
+
 	list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
 		if (event->state != PERF_EVENT_STATE_ACTIVE)
 			continue;
@@ -2382,12 +2390,14 @@
 {
 	u64 interval = (u64)cpuctx->jiffies_interval * TICK_NSEC;
 	struct perf_event_context *ctx = NULL;
-	int rotate = 0, remove = 1;
+	int rotate = 0, remove = 1, freq = 0;
 
 	if (cpuctx->ctx.nr_events) {
 		remove = 0;
 		if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
 			rotate = 1;
+		if (cpuctx->ctx.nr_freq)
+			freq = 1;
 	}
 
 	ctx = cpuctx->task_ctx;
@@ -2395,33 +2405,40 @@
 		remove = 0;
 		if (ctx->nr_events != ctx->nr_active)
 			rotate = 1;
+		if (ctx->nr_freq)
+			freq = 1;
 	}
 
+	if (!rotate && !freq)
+		goto done;
+
 	perf_ctx_lock(cpuctx, cpuctx->task_ctx);
 	perf_pmu_disable(cpuctx->ctx.pmu);
-	perf_ctx_adjust_freq(&cpuctx->ctx, interval);
-	if (ctx)
-		perf_ctx_adjust_freq(ctx, interval);
 
-	if (!rotate)
-		goto done;
+	if (freq) {
+		perf_ctx_adjust_freq(&cpuctx->ctx, interval);
+		if (ctx)
+			perf_ctx_adjust_freq(ctx, interval);
+	}
 
-	cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
-	if (ctx)
-		ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
+	if (rotate) {
+		cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
+		if (ctx)
+			ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
 
-	rotate_ctx(&cpuctx->ctx);
-	if (ctx)
-		rotate_ctx(ctx);
+		rotate_ctx(&cpuctx->ctx);
+		if (ctx)
+			rotate_ctx(ctx);
 
-	perf_event_sched_in(cpuctx, ctx, current);
+		perf_event_sched_in(cpuctx, ctx, current);
+	}
+
+	perf_pmu_enable(cpuctx->ctx.pmu);
+	perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
 
 done:
 	if (remove)
 		list_del_init(&cpuctx->rotation_list);
-
-	perf_pmu_enable(cpuctx->ctx.pmu);
-	perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
 }
 
 void perf_event_task_tick(void)
@@ -2448,7 +2465,7 @@
 	if (event->state >= PERF_EVENT_STATE_INACTIVE)
 		return 0;
 
-	__perf_event_mark_enabled(event, ctx);
+	__perf_event_mark_enabled(event);
 
 	return 1;
 }
@@ -2480,13 +2497,7 @@
 	raw_spin_lock(&ctx->lock);
 	task_ctx_sched_out(ctx);
 
-	list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
-		ret = event_enable_on_exec(event, ctx);
-		if (ret)
-			enabled = 1;
-	}
-
-	list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
+	list_for_each_entry(event, &ctx->event_list, event_entry) {
 		ret = event_enable_on_exec(event, ctx);
 		if (ret)
 			enabled = 1;
@@ -2574,215 +2585,6 @@
 }
 
 /*
- * Callchain support
- */
-
-struct callchain_cpus_entries {
-	struct rcu_head			rcu_head;
-	struct perf_callchain_entry	*cpu_entries[0];
-};
-
-static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
-static atomic_t nr_callchain_events;
-static DEFINE_MUTEX(callchain_mutex);
-struct callchain_cpus_entries *callchain_cpus_entries;
-
-
-__weak void perf_callchain_kernel(struct perf_callchain_entry *entry,
-				  struct pt_regs *regs)
-{
-}
-
-__weak void perf_callchain_user(struct perf_callchain_entry *entry,
-				struct pt_regs *regs)
-{
-}
-
-static void release_callchain_buffers_rcu(struct rcu_head *head)
-{
-	struct callchain_cpus_entries *entries;
-	int cpu;
-
-	entries = container_of(head, struct callchain_cpus_entries, rcu_head);
-
-	for_each_possible_cpu(cpu)
-		kfree(entries->cpu_entries[cpu]);
-
-	kfree(entries);
-}
-
-static void release_callchain_buffers(void)
-{
-	struct callchain_cpus_entries *entries;
-
-	entries = callchain_cpus_entries;
-	rcu_assign_pointer(callchain_cpus_entries, NULL);
-	call_rcu(&entries->rcu_head, release_callchain_buffers_rcu);
-}
-
-static int alloc_callchain_buffers(void)
-{
-	int cpu;
-	int size;
-	struct callchain_cpus_entries *entries;
-
-	/*
-	 * We can't use the percpu allocation API for data that can be
-	 * accessed from NMI. Use a temporary manual per cpu allocation
-	 * until that gets sorted out.
-	 */
-	size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]);
-
-	entries = kzalloc(size, GFP_KERNEL);
-	if (!entries)
-		return -ENOMEM;
-
-	size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS;
-
-	for_each_possible_cpu(cpu) {
-		entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,
-							 cpu_to_node(cpu));
-		if (!entries->cpu_entries[cpu])
-			goto fail;
-	}
-
-	rcu_assign_pointer(callchain_cpus_entries, entries);
-
-	return 0;
-
-fail:
-	for_each_possible_cpu(cpu)
-		kfree(entries->cpu_entries[cpu]);
-	kfree(entries);
-
-	return -ENOMEM;
-}
-
-static int get_callchain_buffers(void)
-{
-	int err = 0;
-	int count;
-
-	mutex_lock(&callchain_mutex);
-
-	count = atomic_inc_return(&nr_callchain_events);
-	if (WARN_ON_ONCE(count < 1)) {
-		err = -EINVAL;
-		goto exit;
-	}
-
-	if (count > 1) {
-		/* If the allocation failed, give up */
-		if (!callchain_cpus_entries)
-			err = -ENOMEM;
-		goto exit;
-	}
-
-	err = alloc_callchain_buffers();
-	if (err)
-		release_callchain_buffers();
-exit:
-	mutex_unlock(&callchain_mutex);
-
-	return err;
-}
-
-static void put_callchain_buffers(void)
-{
-	if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) {
-		release_callchain_buffers();
-		mutex_unlock(&callchain_mutex);
-	}
-}
-
-static int get_recursion_context(int *recursion)
-{
-	int rctx;
-
-	if (in_nmi())
-		rctx = 3;
-	else if (in_irq())
-		rctx = 2;
-	else if (in_softirq())
-		rctx = 1;
-	else
-		rctx = 0;
-
-	if (recursion[rctx])
-		return -1;
-
-	recursion[rctx]++;
-	barrier();
-
-	return rctx;
-}
-
-static inline void put_recursion_context(int *recursion, int rctx)
-{
-	barrier();
-	recursion[rctx]--;
-}
-
-static struct perf_callchain_entry *get_callchain_entry(int *rctx)
-{
-	int cpu;
-	struct callchain_cpus_entries *entries;
-
-	*rctx = get_recursion_context(__get_cpu_var(callchain_recursion));
-	if (*rctx == -1)
-		return NULL;
-
-	entries = rcu_dereference(callchain_cpus_entries);
-	if (!entries)
-		return NULL;
-
-	cpu = smp_processor_id();
-
-	return &entries->cpu_entries[cpu][*rctx];
-}
-
-static void
-put_callchain_entry(int rctx)
-{
-	put_recursion_context(__get_cpu_var(callchain_recursion), rctx);
-}
-
-static struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
-{
-	int rctx;
-	struct perf_callchain_entry *entry;
-
-
-	entry = get_callchain_entry(&rctx);
-	if (rctx == -1)
-		return NULL;
-
-	if (!entry)
-		goto exit_put;
-
-	entry->nr = 0;
-
-	if (!user_mode(regs)) {
-		perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
-		perf_callchain_kernel(entry, regs);
-		if (current->mm)
-			regs = task_pt_regs(current);
-		else
-			regs = NULL;
-	}
-
-	if (regs) {
-		perf_callchain_store(entry, PERF_CONTEXT_USER);
-		perf_callchain_user(entry, regs);
-	}
-
-exit_put:
-	put_callchain_entry(rctx);
-
-	return entry;
-}
-
-/*
  * Initialize the perf_event context in a task_struct:
  */
 static void __perf_event_init_context(struct perf_event_context *ctx)
@@ -2946,7 +2748,7 @@
 
 	if (!event->parent) {
 		if (event->attach_state & PERF_ATTACH_TASK)
-			jump_label_dec(&perf_sched_events);
+			jump_label_dec_deferred(&perf_sched_events);
 		if (event->attr.mmap || event->attr.mmap_data)
 			atomic_dec(&nr_mmap_events);
 		if (event->attr.comm)
@@ -2957,7 +2759,7 @@
 			put_callchain_buffers();
 		if (is_cgroup_event(event)) {
 			atomic_dec(&per_cpu(perf_cgroup_events, event->cpu));
-			jump_label_dec(&perf_sched_events);
+			jump_label_dec_deferred(&perf_sched_events);
 		}
 	}
 
@@ -3558,9 +3360,13 @@
 
 	rcu_read_lock();
 	rb = rcu_dereference(event->rb);
-	list_for_each_entry_rcu(event, &rb->event_list, rb_entry) {
+	if (!rb)
+		goto unlock;
+
+	list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
 		wake_up_all(&event->waitq);
-	}
+
+unlock:
 	rcu_read_unlock();
 }
 
@@ -4816,7 +4622,6 @@
 	struct hw_perf_event *hwc = &event->hw;
 	int throttle = 0;
 
-	data->period = event->hw.last_period;
 	if (!overflow)
 		overflow = perf_swevent_set_period(event);
 
@@ -4850,6 +4655,12 @@
 	if (!is_sampling_event(event))
 		return;
 
+	if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) {
+		data->period = nr;
+		return perf_swevent_overflow(event, 1, data, regs);
+	} else
+		data->period = event->hw.last_period;
+
 	if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
 		return perf_swevent_overflow(event, 1, data, regs);
 
@@ -5362,7 +5173,7 @@
 	regs = get_irq_regs();
 
 	if (regs && !perf_exclude_event(event, regs)) {
-		if (!(event->attr.exclude_idle && current->pid == 0))
+		if (!(event->attr.exclude_idle && is_idle_task(current)))
 			if (perf_event_overflow(event, &data, regs))
 				ret = HRTIMER_NORESTART;
 	}
@@ -5977,7 +5788,7 @@
 
 	if (!event->parent) {
 		if (event->attach_state & PERF_ATTACH_TASK)
-			jump_label_inc(&perf_sched_events);
+			jump_label_inc(&perf_sched_events.key);
 		if (event->attr.mmap || event->attr.mmap_data)
 			atomic_inc(&nr_mmap_events);
 		if (event->attr.comm)
@@ -6215,7 +6026,7 @@
 		 * - that may need work on context switch
 		 */
 		atomic_inc(&per_cpu(perf_cgroup_events, event->cpu));
-		jump_label_inc(&perf_sched_events);
+		jump_label_inc(&perf_sched_events.key);
 	}
 
 	/*
@@ -7061,6 +6872,9 @@
 
 	ret = init_hw_breakpoint();
 	WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
+
+	/* do not patch jump label more than once per second */
+	jump_label_rate_limit(&perf_sched_events, HZ);
 }
 
 static int __init perf_event_sysfs_init(void)
diff --git a/kernel/events/internal.h b/kernel/events/internal.h
index 64568a6..b0b107f 100644
--- a/kernel/events/internal.h
+++ b/kernel/events/internal.h
@@ -1,6 +1,10 @@
 #ifndef _KERNEL_EVENTS_INTERNAL_H
 #define _KERNEL_EVENTS_INTERNAL_H
 
+#include <linux/hardirq.h>
+
+/* Buffer handling */
+
 #define RING_BUFFER_WRITABLE		0x01
 
 struct ring_buffer {
@@ -67,7 +71,7 @@
 }
 #endif
 
-static unsigned long perf_data_size(struct ring_buffer *rb)
+static inline unsigned long perf_data_size(struct ring_buffer *rb)
 {
 	return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
 }
@@ -96,4 +100,37 @@
 	} while (len);
 }
 
+/* Callchain handling */
+extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
+extern int get_callchain_buffers(void);
+extern void put_callchain_buffers(void);
+
+static inline int get_recursion_context(int *recursion)
+{
+	int rctx;
+
+	if (in_nmi())
+		rctx = 3;
+	else if (in_irq())
+		rctx = 2;
+	else if (in_softirq())
+		rctx = 1;
+	else
+		rctx = 0;
+
+	if (recursion[rctx])
+		return -1;
+
+	recursion[rctx]++;
+	barrier();
+
+	return rctx;
+}
+
+static inline void put_recursion_context(int *recursion, int rctx)
+{
+	barrier();
+	recursion[rctx]--;
+}
+
 #endif /* _KERNEL_EVENTS_INTERNAL_H */
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index 7f3011c..6ddaba4 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -4,7 +4,7 @@
  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
  *  Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
  *  Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
- *  Copyright  ©  2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
+ *  Copyright  ©  2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
  *
  * For licensing details see kernel-base/COPYING
  */
diff --git a/kernel/exit.c b/kernel/exit.c
index d0b7d98..d9eab2e 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -121,9 +121,9 @@
 		 * We won't ever get here for the group leader, since it
 		 * will have been the last reference on the signal_struct.
 		 */
-		sig->utime = cputime_add(sig->utime, tsk->utime);
-		sig->stime = cputime_add(sig->stime, tsk->stime);
-		sig->gtime = cputime_add(sig->gtime, tsk->gtime);
+		sig->utime += tsk->utime;
+		sig->stime += tsk->stime;
+		sig->gtime += tsk->gtime;
 		sig->min_flt += tsk->min_flt;
 		sig->maj_flt += tsk->maj_flt;
 		sig->nvcsw += tsk->nvcsw;
@@ -679,8 +679,6 @@
 	tsk->mm = NULL;
 	up_read(&mm->mmap_sem);
 	enter_lazy_tlb(mm, current);
-	/* We don't want this task to be frozen prematurely */
-	clear_freeze_flag(tsk);
 	task_unlock(tsk);
 	mm_update_next_owner(mm);
 	mmput(mm);
@@ -1040,6 +1038,7 @@
 	exit_rcu();
 	/* causes final put_task_struct in finish_task_switch(). */
 	tsk->state = TASK_DEAD;
+	tsk->flags |= PF_NOFREEZE;	/* tell freezer to ignore us */
 	schedule();
 	BUG();
 	/* Avoid "noreturn function does return".  */
@@ -1255,19 +1254,9 @@
 		spin_lock_irq(&p->real_parent->sighand->siglock);
 		psig = p->real_parent->signal;
 		sig = p->signal;
-		psig->cutime =
-			cputime_add(psig->cutime,
-			cputime_add(tgutime,
-				    sig->cutime));
-		psig->cstime =
-			cputime_add(psig->cstime,
-			cputime_add(tgstime,
-				    sig->cstime));
-		psig->cgtime =
-			cputime_add(psig->cgtime,
-			cputime_add(p->gtime,
-			cputime_add(sig->gtime,
-				    sig->cgtime)));
+		psig->cutime += tgutime + sig->cutime;
+		psig->cstime += tgstime + sig->cstime;
+		psig->cgtime += p->gtime + sig->gtime + sig->cgtime;
 		psig->cmin_flt +=
 			p->min_flt + sig->min_flt + sig->cmin_flt;
 		psig->cmaj_flt +=
@@ -1540,8 +1529,15 @@
 	}
 
 	/* dead body doesn't have much to contribute */
-	if (p->exit_state == EXIT_DEAD)
+	if (unlikely(p->exit_state == EXIT_DEAD)) {
+		/*
+		 * But do not ignore this task until the tracer does
+		 * wait_task_zombie()->do_notify_parent().
+		 */
+		if (likely(!ptrace) && unlikely(ptrace_reparented(p)))
+			wo->notask_error = 0;
 		return 0;
+	}
 
 	/* slay zombie? */
 	if (p->exit_state == EXIT_ZOMBIE) {
diff --git a/kernel/fork.c b/kernel/fork.c
index da4a6a1..f34f894 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -992,7 +992,6 @@
 	new_flags |= PF_FORKNOEXEC;
 	new_flags |= PF_STARTING;
 	p->flags = new_flags;
-	clear_freeze_flag(p);
 }
 
 SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr)
@@ -1023,8 +1022,8 @@
  */
 static void posix_cpu_timers_init(struct task_struct *tsk)
 {
-	tsk->cputime_expires.prof_exp = cputime_zero;
-	tsk->cputime_expires.virt_exp = cputime_zero;
+	tsk->cputime_expires.prof_exp = 0;
+	tsk->cputime_expires.virt_exp = 0;
 	tsk->cputime_expires.sched_exp = 0;
 	INIT_LIST_HEAD(&tsk->cpu_timers[0]);
 	INIT_LIST_HEAD(&tsk->cpu_timers[1]);
@@ -1132,14 +1131,10 @@
 
 	init_sigpending(&p->pending);
 
-	p->utime = cputime_zero;
-	p->stime = cputime_zero;
-	p->gtime = cputime_zero;
-	p->utimescaled = cputime_zero;
-	p->stimescaled = cputime_zero;
+	p->utime = p->stime = p->gtime = 0;
+	p->utimescaled = p->stimescaled = 0;
 #ifndef CONFIG_VIRT_CPU_ACCOUNTING
-	p->prev_utime = cputime_zero;
-	p->prev_stime = cputime_zero;
+	p->prev_utime = p->prev_stime = 0;
 #endif
 #if defined(SPLIT_RSS_COUNTING)
 	memset(&p->rss_stat, 0, sizeof(p->rss_stat));
diff --git a/kernel/freezer.c b/kernel/freezer.c
index 7be56c5..9815b8d 100644
--- a/kernel/freezer.c
+++ b/kernel/freezer.c
@@ -9,101 +9,114 @@
 #include <linux/export.h>
 #include <linux/syscalls.h>
 #include <linux/freezer.h>
+#include <linux/kthread.h>
 
-/*
- * freezing is complete, mark current process as frozen
+/* total number of freezing conditions in effect */
+atomic_t system_freezing_cnt = ATOMIC_INIT(0);
+EXPORT_SYMBOL(system_freezing_cnt);
+
+/* indicate whether PM freezing is in effect, protected by pm_mutex */
+bool pm_freezing;
+bool pm_nosig_freezing;
+
+/* protects freezing and frozen transitions */
+static DEFINE_SPINLOCK(freezer_lock);
+
+/**
+ * freezing_slow_path - slow path for testing whether a task needs to be frozen
+ * @p: task to be tested
+ *
+ * This function is called by freezing() if system_freezing_cnt isn't zero
+ * and tests whether @p needs to enter and stay in frozen state.  Can be
+ * called under any context.  The freezers are responsible for ensuring the
+ * target tasks see the updated state.
  */
-static inline void frozen_process(void)
+bool freezing_slow_path(struct task_struct *p)
 {
-	if (!unlikely(current->flags & PF_NOFREEZE)) {
-		current->flags |= PF_FROZEN;
-		smp_wmb();
-	}
-	clear_freeze_flag(current);
+	if (p->flags & PF_NOFREEZE)
+		return false;
+
+	if (pm_nosig_freezing || cgroup_freezing(p))
+		return true;
+
+	if (pm_freezing && !(p->flags & PF_KTHREAD))
+		return true;
+
+	return false;
 }
+EXPORT_SYMBOL(freezing_slow_path);
 
 /* Refrigerator is place where frozen processes are stored :-). */
-void refrigerator(void)
+bool __refrigerator(bool check_kthr_stop)
 {
 	/* Hmm, should we be allowed to suspend when there are realtime
 	   processes around? */
-	long save;
+	bool was_frozen = false;
+	long save = current->state;
 
-	task_lock(current);
-	if (freezing(current)) {
-		frozen_process();
-		task_unlock(current);
-	} else {
-		task_unlock(current);
-		return;
-	}
-	save = current->state;
 	pr_debug("%s entered refrigerator\n", current->comm);
 
-	spin_lock_irq(&current->sighand->siglock);
-	recalc_sigpending(); /* We sent fake signal, clean it up */
-	spin_unlock_irq(&current->sighand->siglock);
-
-	/* prevent accounting of that task to load */
-	current->flags |= PF_FREEZING;
-
 	for (;;) {
 		set_current_state(TASK_UNINTERRUPTIBLE);
-		if (!frozen(current))
+
+		spin_lock_irq(&freezer_lock);
+		current->flags |= PF_FROZEN;
+		if (!freezing(current) ||
+		    (check_kthr_stop && kthread_should_stop()))
+			current->flags &= ~PF_FROZEN;
+		spin_unlock_irq(&freezer_lock);
+
+		if (!(current->flags & PF_FROZEN))
 			break;
+		was_frozen = true;
 		schedule();
 	}
 
-	/* Remove the accounting blocker */
-	current->flags &= ~PF_FREEZING;
-
 	pr_debug("%s left refrigerator\n", current->comm);
-	__set_current_state(save);
+
+	/*
+	 * Restore saved task state before returning.  The mb'd version
+	 * needs to be used; otherwise, it might silently break
+	 * synchronization which depends on ordered task state change.
+	 */
+	set_current_state(save);
+
+	return was_frozen;
 }
-EXPORT_SYMBOL(refrigerator);
+EXPORT_SYMBOL(__refrigerator);
 
 static void fake_signal_wake_up(struct task_struct *p)
 {
 	unsigned long flags;
 
-	spin_lock_irqsave(&p->sighand->siglock, flags);
-	signal_wake_up(p, 0);
-	spin_unlock_irqrestore(&p->sighand->siglock, flags);
+	if (lock_task_sighand(p, &flags)) {
+		signal_wake_up(p, 0);
+		unlock_task_sighand(p, &flags);
+	}
 }
 
 /**
- *	freeze_task - send a freeze request to given task
- *	@p: task to send the request to
- *	@sig_only: if set, the request will only be sent if the task has the
- *		PF_FREEZER_NOSIG flag unset
- *	Return value: 'false', if @sig_only is set and the task has
- *		PF_FREEZER_NOSIG set or the task is frozen, 'true', otherwise
+ * freeze_task - send a freeze request to given task
+ * @p: task to send the request to
  *
- *	The freeze request is sent by setting the tasks's TIF_FREEZE flag and
- *	either sending a fake signal to it or waking it up, depending on whether
- *	or not it has PF_FREEZER_NOSIG set.  If @sig_only is set and the task
- *	has PF_FREEZER_NOSIG set (ie. it is a typical kernel thread), its
- *	TIF_FREEZE flag will not be set.
+ * If @p is freezing, the freeze request is sent by setting %TIF_FREEZE
+ * flag and either sending a fake signal to it or waking it up, depending
+ * on whether it has %PF_FREEZER_NOSIG set.
+ *
+ * RETURNS:
+ * %false, if @p is not freezing or already frozen; %true, otherwise
  */
-bool freeze_task(struct task_struct *p, bool sig_only)
+bool freeze_task(struct task_struct *p)
 {
-	/*
-	 * We first check if the task is freezing and next if it has already
-	 * been frozen to avoid the race with frozen_process() which first marks
-	 * the task as frozen and next clears its TIF_FREEZE.
-	 */
-	if (!freezing(p)) {
-		smp_rmb();
-		if (frozen(p))
-			return false;
+	unsigned long flags;
 
-		if (!sig_only || should_send_signal(p))
-			set_freeze_flag(p);
-		else
-			return false;
+	spin_lock_irqsave(&freezer_lock, flags);
+	if (!freezing(p) || frozen(p)) {
+		spin_unlock_irqrestore(&freezer_lock, flags);
+		return false;
 	}
 
-	if (should_send_signal(p)) {
+	if (!(p->flags & PF_KTHREAD)) {
 		fake_signal_wake_up(p);
 		/*
 		 * fake_signal_wake_up() goes through p's scheduler
@@ -111,56 +124,48 @@
 		 * TASK_RUNNING transition can't race with task state
 		 * testing in try_to_freeze_tasks().
 		 */
-	} else if (sig_only) {
-		return false;
 	} else {
 		wake_up_state(p, TASK_INTERRUPTIBLE);
 	}
 
+	spin_unlock_irqrestore(&freezer_lock, flags);
 	return true;
 }
 
-void cancel_freezing(struct task_struct *p)
+void __thaw_task(struct task_struct *p)
 {
 	unsigned long flags;
 
-	if (freezing(p)) {
-		pr_debug("  clean up: %s\n", p->comm);
-		clear_freeze_flag(p);
-		spin_lock_irqsave(&p->sighand->siglock, flags);
-		recalc_sigpending_and_wake(p);
-		spin_unlock_irqrestore(&p->sighand->siglock, flags);
-	}
-}
-
-static int __thaw_process(struct task_struct *p)
-{
-	if (frozen(p)) {
-		p->flags &= ~PF_FROZEN;
-		return 1;
-	}
-	clear_freeze_flag(p);
-	return 0;
-}
-
-/*
- * Wake up a frozen process
- *
- * task_lock() is needed to prevent the race with refrigerator() which may
- * occur if the freezing of tasks fails.  Namely, without the lock, if the
- * freezing of tasks failed, thaw_tasks() might have run before a task in
- * refrigerator() could call frozen_process(), in which case the task would be
- * frozen and no one would thaw it.
- */
-int thaw_process(struct task_struct *p)
-{
-	task_lock(p);
-	if (__thaw_process(p) == 1) {
-		task_unlock(p);
+	/*
+	 * Clear freezing and kick @p if FROZEN.  Clearing is guaranteed to
+	 * be visible to @p as waking up implies wmb.  Waking up inside
+	 * freezer_lock also prevents wakeups from leaking outside
+	 * refrigerator.
+	 */
+	spin_lock_irqsave(&freezer_lock, flags);
+	if (frozen(p))
 		wake_up_process(p);
-		return 1;
-	}
-	task_unlock(p);
-	return 0;
+	spin_unlock_irqrestore(&freezer_lock, flags);
 }
-EXPORT_SYMBOL(thaw_process);
+
+/**
+ * set_freezable - make %current freezable
+ *
+ * Mark %current freezable and enter refrigerator if necessary.
+ */
+bool set_freezable(void)
+{
+	might_sleep();
+
+	/*
+	 * Modify flags while holding freezer_lock.  This ensures the
+	 * freezer notices that we aren't frozen yet or the freezing
+	 * condition is visible to try_to_freeze() below.
+	 */
+	spin_lock_irq(&freezer_lock);
+	current->flags &= ~PF_NOFREEZE;
+	spin_unlock_irq(&freezer_lock);
+
+	return try_to_freeze();
+}
+EXPORT_SYMBOL(set_freezable);
diff --git a/kernel/futex.c b/kernel/futex.c
index ea87f4d..1614be2 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -314,17 +314,29 @@
 #endif
 
 	lock_page(page_head);
+
+	/*
+	 * If page_head->mapping is NULL, then it cannot be a PageAnon
+	 * page; but it might be the ZERO_PAGE or in the gate area or
+	 * in a special mapping (all cases which we are happy to fail);
+	 * or it may have been a good file page when get_user_pages_fast
+	 * found it, but truncated or holepunched or subjected to
+	 * invalidate_complete_page2 before we got the page lock (also
+	 * cases which we are happy to fail).  And we hold a reference,
+	 * so refcount care in invalidate_complete_page's remove_mapping
+	 * prevents drop_caches from setting mapping to NULL beneath us.
+	 *
+	 * The case we do have to guard against is when memory pressure made
+	 * shmem_writepage move it from filecache to swapcache beneath us:
+	 * an unlikely race, but we do need to retry for page_head->mapping.
+	 */
 	if (!page_head->mapping) {
+		int shmem_swizzled = PageSwapCache(page_head);
 		unlock_page(page_head);
 		put_page(page_head);
-		/*
-		* ZERO_PAGE pages don't have a mapping. Avoid a busy loop
-		* trying to find one. RW mapping would have COW'd (and thus
-		* have a mapping) so this page is RO and won't ever change.
-		*/
-		if ((page_head == ZERO_PAGE(address)))
-			return -EFAULT;
-		goto again;
+		if (shmem_swizzled)
+			goto again;
+		return -EFAULT;
 	}
 
 	/*
diff --git a/kernel/hung_task.c b/kernel/hung_task.c
index 8b1748d..2e48ec0 100644
--- a/kernel/hung_task.c
+++ b/kernel/hung_task.c
@@ -74,11 +74,17 @@
 
 	/*
 	 * Ensure the task is not frozen.
-	 * Also, when a freshly created task is scheduled once, changes
-	 * its state to TASK_UNINTERRUPTIBLE without having ever been
-	 * switched out once, it musn't be checked.
+	 * Also, skip vfork and any other user process that freezer should skip.
 	 */
-	if (unlikely(t->flags & PF_FROZEN || !switch_count))
+	if (unlikely(t->flags & (PF_FROZEN | PF_FREEZER_SKIP)))
+	    return;
+
+	/*
+	 * When a freshly created task is scheduled once, changes its state to
+	 * TASK_UNINTERRUPTIBLE without having ever been switched out once, it
+	 * musn't be checked.
+	 */
+	if (unlikely(!switch_count))
 		return;
 
 	if (switch_count != t->last_switch_count) {
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 200ce83..1f9e265 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -135,6 +135,9 @@
 		return -EINVAL;
 	if (intsize < 1)
 		return -EINVAL;
+	if (d->nr_irq && ((intspec[0] < d->hwirq_base) ||
+	    (intspec[0] >= d->hwirq_base + d->nr_irq)))
+		return -EINVAL;
 
 	*out_hwirq = intspec[0];
 	*out_type = IRQ_TYPE_NONE;
@@ -143,11 +146,6 @@
 	return 0;
 }
 
-struct irq_domain_ops irq_domain_simple_ops = {
-	.dt_translate = irq_domain_simple_dt_translate,
-};
-EXPORT_SYMBOL_GPL(irq_domain_simple_ops);
-
 /**
  * irq_domain_create_simple() - Set up a 'simple' translation range
  */
@@ -182,3 +180,10 @@
 }
 EXPORT_SYMBOL_GPL(irq_domain_generate_simple);
 #endif /* CONFIG_OF_IRQ */
+
+struct irq_domain_ops irq_domain_simple_ops = {
+#ifdef CONFIG_OF_IRQ
+	.dt_translate = irq_domain_simple_dt_translate,
+#endif /* CONFIG_OF_IRQ */
+};
+EXPORT_SYMBOL_GPL(irq_domain_simple_ops);
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 1da999f..a9a9dbe 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -1292,7 +1292,7 @@
  *	and to set up the interrupt handler in the right order.
  *
  *	If you want to set up a threaded irq handler for your device
- *	then you need to supply @handler and @thread_fn. @handler ist
+ *	then you need to supply @handler and @thread_fn. @handler is
  *	still called in hard interrupt context and has to check
  *	whether the interrupt originates from the device. If yes it
  *	needs to disable the interrupt on the device and return
diff --git a/kernel/itimer.c b/kernel/itimer.c
index d802883..22000c3 100644
--- a/kernel/itimer.c
+++ b/kernel/itimer.c
@@ -52,22 +52,22 @@
 
 	cval = it->expires;
 	cinterval = it->incr;
-	if (!cputime_eq(cval, cputime_zero)) {
+	if (cval) {
 		struct task_cputime cputime;
 		cputime_t t;
 
 		thread_group_cputimer(tsk, &cputime);
 		if (clock_id == CPUCLOCK_PROF)
-			t = cputime_add(cputime.utime, cputime.stime);
+			t = cputime.utime + cputime.stime;
 		else
 			/* CPUCLOCK_VIRT */
 			t = cputime.utime;
 
-		if (cputime_le(cval, t))
+		if (cval < t)
 			/* about to fire */
 			cval = cputime_one_jiffy;
 		else
-			cval = cputime_sub(cval, t);
+			cval = cval - t;
 	}
 
 	spin_unlock_irq(&tsk->sighand->siglock);
@@ -161,10 +161,9 @@
 
 	cval = it->expires;
 	cinterval = it->incr;
-	if (!cputime_eq(cval, cputime_zero) ||
-	    !cputime_eq(nval, cputime_zero)) {
-		if (cputime_gt(nval, cputime_zero))
-			nval = cputime_add(nval, cputime_one_jiffy);
+	if (cval || nval) {
+		if (nval > 0)
+			nval += cputime_one_jiffy;
 		set_process_cpu_timer(tsk, clock_id, &nval, &cval);
 	}
 	it->expires = nval;
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index 66ff710..30c3c77 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -72,15 +72,46 @@
 	jump_label_unlock();
 }
 
-void jump_label_dec(struct jump_label_key *key)
+static void __jump_label_dec(struct jump_label_key *key,
+		unsigned long rate_limit, struct delayed_work *work)
 {
 	if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex))
 		return;
 
-	jump_label_update(key, JUMP_LABEL_DISABLE);
+	if (rate_limit) {
+		atomic_inc(&key->enabled);
+		schedule_delayed_work(work, rate_limit);
+	} else
+		jump_label_update(key, JUMP_LABEL_DISABLE);
+
 	jump_label_unlock();
 }
 
+static void jump_label_update_timeout(struct work_struct *work)
+{
+	struct jump_label_key_deferred *key =
+		container_of(work, struct jump_label_key_deferred, work.work);
+	__jump_label_dec(&key->key, 0, NULL);
+}
+
+void jump_label_dec(struct jump_label_key *key)
+{
+	__jump_label_dec(key, 0, NULL);
+}
+
+void jump_label_dec_deferred(struct jump_label_key_deferred *key)
+{
+	__jump_label_dec(&key->key, key->timeout, &key->work);
+}
+
+
+void jump_label_rate_limit(struct jump_label_key_deferred *key,
+		unsigned long rl)
+{
+	key->timeout = rl;
+	INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
+}
+
 static int addr_conflict(struct jump_entry *entry, void *start, void *end)
 {
 	if (entry->code <= (unsigned long)end &&
@@ -111,7 +142,7 @@
  * running code can override this to make the non-live update case
  * cheaper.
  */
-void __weak arch_jump_label_transform_static(struct jump_entry *entry,
+void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry,
 					    enum jump_label_type type)
 {
 	arch_jump_label_transform(entry, type);	
@@ -217,8 +248,13 @@
 	if (iter_start == iter_stop)
 		return;
 
-	for (iter = iter_start; iter < iter_stop; iter++)
-		arch_jump_label_transform_static(iter, JUMP_LABEL_DISABLE);
+	for (iter = iter_start; iter < iter_stop; iter++) {
+		struct jump_label_key *iterk;
+
+		iterk = (struct jump_label_key *)(unsigned long)iter->key;
+		arch_jump_label_transform_static(iter, jump_label_enabled(iterk) ?
+				JUMP_LABEL_ENABLE : JUMP_LABEL_DISABLE);
+	}
 }
 
 static int jump_label_add_module(struct module *mod)
@@ -258,8 +294,7 @@
 		key->next = jlm;
 
 		if (jump_label_enabled(key))
-			__jump_label_update(key, iter, iter_stop,
-					    JUMP_LABEL_ENABLE);
+			__jump_label_update(key, iter, iter_stop, JUMP_LABEL_ENABLE);
 	}
 
 	return 0;
diff --git a/kernel/kexec.c b/kernel/kexec.c
index dc7bc08..090ee10 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -1523,7 +1523,7 @@
 
 #ifdef CONFIG_KEXEC_JUMP
 	if (kexec_image->preserve_context) {
-		mutex_lock(&pm_mutex);
+		lock_system_sleep();
 		pm_prepare_console();
 		error = freeze_processes();
 		if (error) {
@@ -1576,7 +1576,7 @@
 		thaw_processes();
  Restore_console:
 		pm_restore_console();
-		mutex_unlock(&pm_mutex);
+		unlock_system_sleep();
 	}
 #endif
 
diff --git a/kernel/kmod.c b/kernel/kmod.c
index a4bea97..a0a8854 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -36,6 +36,7 @@
 #include <linux/resource.h>
 #include <linux/notifier.h>
 #include <linux/suspend.h>
+#include <linux/rwsem.h>
 #include <asm/uaccess.h>
 
 #include <trace/events/module.h>
@@ -50,6 +51,7 @@
 static kernel_cap_t usermodehelper_bset = CAP_FULL_SET;
 static kernel_cap_t usermodehelper_inheritable = CAP_FULL_SET;
 static DEFINE_SPINLOCK(umh_sysctl_lock);
+static DECLARE_RWSEM(umhelper_sem);
 
 #ifdef CONFIG_MODULES
 
@@ -275,6 +277,7 @@
  * If set, call_usermodehelper_exec() will exit immediately returning -EBUSY
  * (used for preventing user land processes from being created after the user
  * land has been frozen during a system-wide hibernation or suspend operation).
+ * Should always be manipulated under umhelper_sem acquired for write.
  */
 static int usermodehelper_disabled = 1;
 
@@ -282,17 +285,29 @@
 static atomic_t running_helpers = ATOMIC_INIT(0);
 
 /*
- * Wait queue head used by usermodehelper_pm_callback() to wait for all running
+ * Wait queue head used by usermodehelper_disable() to wait for all running
  * helpers to finish.
  */
 static DECLARE_WAIT_QUEUE_HEAD(running_helpers_waitq);
 
 /*
  * Time to wait for running_helpers to become zero before the setting of
- * usermodehelper_disabled in usermodehelper_pm_callback() fails
+ * usermodehelper_disabled in usermodehelper_disable() fails
  */
 #define RUNNING_HELPERS_TIMEOUT	(5 * HZ)
 
+void read_lock_usermodehelper(void)
+{
+	down_read(&umhelper_sem);
+}
+EXPORT_SYMBOL_GPL(read_lock_usermodehelper);
+
+void read_unlock_usermodehelper(void)
+{
+	up_read(&umhelper_sem);
+}
+EXPORT_SYMBOL_GPL(read_unlock_usermodehelper);
+
 /**
  * usermodehelper_disable - prevent new helpers from being started
  */
@@ -300,8 +315,10 @@
 {
 	long retval;
 
+	down_write(&umhelper_sem);
 	usermodehelper_disabled = 1;
-	smp_mb();
+	up_write(&umhelper_sem);
+
 	/*
 	 * From now on call_usermodehelper_exec() won't start any new
 	 * helpers, so it is sufficient if running_helpers turns out to
@@ -314,7 +331,9 @@
 	if (retval)
 		return 0;
 
+	down_write(&umhelper_sem);
 	usermodehelper_disabled = 0;
+	up_write(&umhelper_sem);
 	return -EAGAIN;
 }
 
@@ -323,7 +342,9 @@
  */
 void usermodehelper_enable(void)
 {
+	down_write(&umhelper_sem);
 	usermodehelper_disabled = 0;
+	up_write(&umhelper_sem);
 }
 
 /**
diff --git a/kernel/kthread.c b/kernel/kthread.c
index b6d216a..3d3de633 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -59,6 +59,31 @@
 EXPORT_SYMBOL(kthread_should_stop);
 
 /**
+ * kthread_freezable_should_stop - should this freezable kthread return now?
+ * @was_frozen: optional out parameter, indicates whether %current was frozen
+ *
+ * kthread_should_stop() for freezable kthreads, which will enter
+ * refrigerator if necessary.  This function is safe from kthread_stop() /
+ * freezer deadlock and freezable kthreads should use this function instead
+ * of calling try_to_freeze() directly.
+ */
+bool kthread_freezable_should_stop(bool *was_frozen)
+{
+	bool frozen = false;
+
+	might_sleep();
+
+	if (unlikely(freezing(current)))
+		frozen = __refrigerator(true);
+
+	if (was_frozen)
+		*was_frozen = frozen;
+
+	return kthread_should_stop();
+}
+EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
+
+/**
  * kthread_data - return data value specified on kthread creation
  * @task: kthread task in question
  *
@@ -257,7 +282,7 @@
 	set_cpus_allowed_ptr(tsk, cpu_all_mask);
 	set_mems_allowed(node_states[N_HIGH_MEMORY]);
 
-	current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG;
+	current->flags |= PF_NOFREEZE;
 
 	for (;;) {
 		set_current_state(TASK_INTERRUPTIBLE);
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index b2e08c9..8889f7d 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -431,6 +431,7 @@
  * about it later on, in lockdep_info().
  */
 static int lockdep_init_error;
+static const char *lock_init_error;
 static unsigned long lockdep_init_trace_data[20];
 static struct stack_trace lockdep_init_trace = {
 	.max_entries = ARRAY_SIZE(lockdep_init_trace_data),
@@ -499,36 +500,32 @@
 	usage[i] = '\0';
 }
 
-static int __print_lock_name(struct lock_class *class)
+static void __print_lock_name(struct lock_class *class)
 {
 	char str[KSYM_NAME_LEN];
 	const char *name;
 
 	name = class->name;
-	if (!name)
-		name = __get_key_name(class->key, str);
-
-	return printk("%s", name);
-}
-
-static void print_lock_name(struct lock_class *class)
-{
-	char str[KSYM_NAME_LEN], usage[LOCK_USAGE_CHARS];
-	const char *name;
-
-	get_usage_chars(class, usage);
-
-	name = class->name;
 	if (!name) {
 		name = __get_key_name(class->key, str);
-		printk(" (%s", name);
+		printk("%s", name);
 	} else {
-		printk(" (%s", name);
+		printk("%s", name);
 		if (class->name_version > 1)
 			printk("#%d", class->name_version);
 		if (class->subclass)
 			printk("/%d", class->subclass);
 	}
+}
+
+static void print_lock_name(struct lock_class *class)
+{
+	char usage[LOCK_USAGE_CHARS];
+
+	get_usage_chars(class, usage);
+
+	printk(" (");
+	__print_lock_name(class);
 	printk("){%s}", usage);
 }
 
@@ -568,11 +565,12 @@
 	}
 }
 
-static void print_kernel_version(void)
+static void print_kernel_ident(void)
 {
-	printk("%s %.*s\n", init_utsname()->release,
+	printk("%s %.*s %s\n", init_utsname()->release,
 		(int)strcspn(init_utsname()->version, " "),
-		init_utsname()->version);
+		init_utsname()->version,
+		print_tainted());
 }
 
 static int very_verbose(struct lock_class *class)
@@ -656,6 +654,7 @@
 	if (unlikely(!lockdep_initialized)) {
 		lockdep_init();
 		lockdep_init_error = 1;
+		lock_init_error = lock->name;
 		save_stack_trace(&lockdep_init_trace);
 	}
 #endif
@@ -723,7 +722,7 @@
 
 	class = look_up_lock_class(lock, subclass);
 	if (likely(class))
-		return class;
+		goto out_set_class_cache;
 
 	/*
 	 * Debug-check: all keys must be persistent!
@@ -808,6 +807,7 @@
 	graph_unlock();
 	raw_local_irq_restore(flags);
 
+out_set_class_cache:
 	if (!subclass || force)
 		lock->class_cache[0] = class;
 	else if (subclass < NR_LOCKDEP_CACHING_CLASSES)
@@ -1149,7 +1149,7 @@
 	printk("\n");
 	printk("======================================================\n");
 	printk("[ INFO: possible circular locking dependency detected ]\n");
-	print_kernel_version();
+	print_kernel_ident();
 	printk("-------------------------------------------------------\n");
 	printk("%s/%d is trying to acquire lock:\n",
 		curr->comm, task_pid_nr(curr));
@@ -1488,7 +1488,7 @@
 	printk("======================================================\n");
 	printk("[ INFO: %s-safe -> %s-unsafe lock order detected ]\n",
 		irqclass, irqclass);
-	print_kernel_version();
+	print_kernel_ident();
 	printk("------------------------------------------------------\n");
 	printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
 		curr->comm, task_pid_nr(curr),
@@ -1717,7 +1717,7 @@
 	printk("\n");
 	printk("=============================================\n");
 	printk("[ INFO: possible recursive locking detected ]\n");
-	print_kernel_version();
+	print_kernel_ident();
 	printk("---------------------------------------------\n");
 	printk("%s/%d is trying to acquire lock:\n",
 		curr->comm, task_pid_nr(curr));
@@ -2224,7 +2224,7 @@
 	printk("\n");
 	printk("=================================\n");
 	printk("[ INFO: inconsistent lock state ]\n");
-	print_kernel_version();
+	print_kernel_ident();
 	printk("---------------------------------\n");
 
 	printk("inconsistent {%s} -> {%s} usage.\n",
@@ -2289,7 +2289,7 @@
 	printk("\n");
 	printk("=========================================================\n");
 	printk("[ INFO: possible irq lock inversion dependency detected ]\n");
-	print_kernel_version();
+	print_kernel_ident();
 	printk("---------------------------------------------------------\n");
 	printk("%s/%d just changed the state of lock:\n",
 		curr->comm, task_pid_nr(curr));
@@ -3175,6 +3175,7 @@
 	printk("\n");
 	printk("=====================================\n");
 	printk("[ BUG: bad unlock balance detected! ]\n");
+	print_kernel_ident();
 	printk("-------------------------------------\n");
 	printk("%s/%d is trying to release lock (",
 		curr->comm, task_pid_nr(curr));
@@ -3619,6 +3620,7 @@
 	printk("\n");
 	printk("=================================\n");
 	printk("[ BUG: bad contention detected! ]\n");
+	print_kernel_ident();
 	printk("---------------------------------\n");
 	printk("%s/%d is trying to contend lock (",
 		curr->comm, task_pid_nr(curr));
@@ -3974,7 +3976,8 @@
 
 #ifdef CONFIG_DEBUG_LOCKDEP
 	if (lockdep_init_error) {
-		printk("WARNING: lockdep init error! Arch code didn't call lockdep_init() early enough?\n");
+		printk("WARNING: lockdep init error! lock-%s was acquired"
+			"before lockdep_init\n", lock_init_error);
 		printk("Call stack leading to lockdep invocation was:\n");
 		print_stack_trace(&lockdep_init_trace, 0);
 	}
@@ -3993,6 +3996,7 @@
 	printk("\n");
 	printk("=========================\n");
 	printk("[ BUG: held lock freed! ]\n");
+	print_kernel_ident();
 	printk("-------------------------\n");
 	printk("%s/%d is freeing memory %p-%p, with a lock still held there!\n",
 		curr->comm, task_pid_nr(curr), mem_from, mem_to-1);
@@ -4050,6 +4054,7 @@
 	printk("\n");
 	printk("=====================================\n");
 	printk("[ BUG: lock held at task exit time! ]\n");
+	print_kernel_ident();
 	printk("-------------------------------------\n");
 	printk("%s/%d is exiting with locks still held!\n",
 		curr->comm, task_pid_nr(curr));
@@ -4147,6 +4152,7 @@
 		printk("\n");
 		printk("================================================\n");
 		printk("[ BUG: lock held when returning to user space! ]\n");
+		print_kernel_ident();
 		printk("------------------------------------------------\n");
 		printk("%s/%d is leaving the kernel with locks still held!\n",
 				curr->comm, curr->pid);
@@ -4166,10 +4172,33 @@
 	printk("\n");
 	printk("===============================\n");
 	printk("[ INFO: suspicious RCU usage. ]\n");
+	print_kernel_ident();
 	printk("-------------------------------\n");
 	printk("%s:%d %s!\n", file, line, s);
 	printk("\nother info that might help us debug this:\n\n");
 	printk("\nrcu_scheduler_active = %d, debug_locks = %d\n", rcu_scheduler_active, debug_locks);
+
+	/*
+	 * If a CPU is in the RCU-free window in idle (ie: in the section
+	 * between rcu_idle_enter() and rcu_idle_exit(), then RCU
+	 * considers that CPU to be in an "extended quiescent state",
+	 * which means that RCU will be completely ignoring that CPU.
+	 * Therefore, rcu_read_lock() and friends have absolutely no
+	 * effect on a CPU running in that state. In other words, even if
+	 * such an RCU-idle CPU has called rcu_read_lock(), RCU might well
+	 * delete data structures out from under it.  RCU really has no
+	 * choice here: we need to keep an RCU-free window in idle where
+	 * the CPU may possibly enter into low power mode. This way we can
+	 * notice an extended quiescent state to other CPUs that started a grace
+	 * period. Otherwise we would delay any grace period as long as we run
+	 * in the idle task.
+	 *
+	 * So complain bitterly if someone does call rcu_read_lock(),
+	 * rcu_read_lock_bh() and so on from extended quiescent states.
+	 */
+	if (rcu_is_cpu_idle())
+		printk("RCU used illegally from extended quiescent state!\n");
+
 	lockdep_print_held_locks(curr);
 	printk("\nstack backtrace:\n");
 	dump_stack();
diff --git a/kernel/panic.c b/kernel/panic.c
index b2659360..3458469 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -237,11 +237,20 @@
 	 * Can't trust the integrity of the kernel anymore.
 	 * We don't call directly debug_locks_off() because the issue
 	 * is not necessarily serious enough to set oops_in_progress to 1
-	 * Also we want to keep up lockdep for staging development and
-	 * post-warning case.
+	 * Also we want to keep up lockdep for staging/out-of-tree
+	 * development and post-warning case.
 	 */
-	if (flag != TAINT_CRAP && flag != TAINT_WARN && __debug_locks_off())
-		printk(KERN_WARNING "Disabling lock debugging due to kernel taint\n");
+	switch (flag) {
+	case TAINT_CRAP:
+	case TAINT_OOT_MODULE:
+	case TAINT_WARN:
+	case TAINT_FIRMWARE_WORKAROUND:
+		break;
+
+	default:
+		if (__debug_locks_off())
+			printk(KERN_WARNING "Disabling lock debugging due to kernel taint\n");
+	}
 
 	set_bit(flag, &tainted_mask);
 }
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index e7cb76d..125cb67 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -78,7 +78,7 @@
 	if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
 		return now.sched < then.sched;
 	}  else {
-		return cputime_lt(now.cpu, then.cpu);
+		return now.cpu < then.cpu;
 	}
 }
 static inline void cpu_time_add(const clockid_t which_clock,
@@ -88,7 +88,7 @@
 	if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
 		acc->sched += val.sched;
 	}  else {
-		acc->cpu = cputime_add(acc->cpu, val.cpu);
+		acc->cpu += val.cpu;
 	}
 }
 static inline union cpu_time_count cpu_time_sub(const clockid_t which_clock,
@@ -98,25 +98,12 @@
 	if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
 		a.sched -= b.sched;
 	}  else {
-		a.cpu = cputime_sub(a.cpu, b.cpu);
+		a.cpu -= b.cpu;
 	}
 	return a;
 }
 
 /*
- * Divide and limit the result to res >= 1
- *
- * This is necessary to prevent signal delivery starvation, when the result of
- * the division would be rounded down to 0.
- */
-static inline cputime_t cputime_div_non_zero(cputime_t time, unsigned long div)
-{
-	cputime_t res = cputime_div(time, div);
-
-	return max_t(cputime_t, res, 1);
-}
-
-/*
  * Update expiry time from increment, and increase overrun count,
  * given the current clock sample.
  */
@@ -148,28 +135,26 @@
 	} else {
 		cputime_t delta, incr;
 
-		if (cputime_lt(now.cpu, timer->it.cpu.expires.cpu))
+		if (now.cpu < timer->it.cpu.expires.cpu)
 			return;
 		incr = timer->it.cpu.incr.cpu;
-		delta = cputime_sub(cputime_add(now.cpu, incr),
-				    timer->it.cpu.expires.cpu);
+		delta = now.cpu + incr - timer->it.cpu.expires.cpu;
 		/* Don't use (incr*2 < delta), incr*2 might overflow. */
-		for (i = 0; cputime_lt(incr, cputime_sub(delta, incr)); i++)
-			     incr = cputime_add(incr, incr);
-		for (; i >= 0; incr = cputime_halve(incr), i--) {
-			if (cputime_lt(delta, incr))
+		for (i = 0; incr < delta - incr; i++)
+			     incr += incr;
+		for (; i >= 0; incr = incr >> 1, i--) {
+			if (delta < incr)
 				continue;
-			timer->it.cpu.expires.cpu =
-				cputime_add(timer->it.cpu.expires.cpu, incr);
+			timer->it.cpu.expires.cpu += incr;
 			timer->it_overrun += 1 << i;
-			delta = cputime_sub(delta, incr);
+			delta -= incr;
 		}
 	}
 }
 
 static inline cputime_t prof_ticks(struct task_struct *p)
 {
-	return cputime_add(p->utime, p->stime);
+	return p->utime + p->stime;
 }
 static inline cputime_t virt_ticks(struct task_struct *p)
 {
@@ -248,8 +233,8 @@
 
 	t = tsk;
 	do {
-		times->utime = cputime_add(times->utime, t->utime);
-		times->stime = cputime_add(times->stime, t->stime);
+		times->utime += t->utime;
+		times->stime += t->stime;
 		times->sum_exec_runtime += task_sched_runtime(t);
 	} while_each_thread(tsk, t);
 out:
@@ -258,10 +243,10 @@
 
 static void update_gt_cputime(struct task_cputime *a, struct task_cputime *b)
 {
-	if (cputime_gt(b->utime, a->utime))
+	if (b->utime > a->utime)
 		a->utime = b->utime;
 
-	if (cputime_gt(b->stime, a->stime))
+	if (b->stime > a->stime)
 		a->stime = b->stime;
 
 	if (b->sum_exec_runtime > a->sum_exec_runtime)
@@ -306,7 +291,7 @@
 		return -EINVAL;
 	case CPUCLOCK_PROF:
 		thread_group_cputime(p, &cputime);
-		cpu->cpu = cputime_add(cputime.utime, cputime.stime);
+		cpu->cpu = cputime.utime + cputime.stime;
 		break;
 	case CPUCLOCK_VIRT:
 		thread_group_cputime(p, &cputime);
@@ -470,26 +455,24 @@
 			   unsigned long long sum_exec_runtime)
 {
 	struct cpu_timer_list *timer, *next;
-	cputime_t ptime = cputime_add(utime, stime);
+	cputime_t ptime = utime + stime;
 
 	list_for_each_entry_safe(timer, next, head, entry) {
 		list_del_init(&timer->entry);
-		if (cputime_lt(timer->expires.cpu, ptime)) {
-			timer->expires.cpu = cputime_zero;
+		if (timer->expires.cpu < ptime) {
+			timer->expires.cpu = 0;
 		} else {
-			timer->expires.cpu = cputime_sub(timer->expires.cpu,
-							 ptime);
+			timer->expires.cpu -= ptime;
 		}
 	}
 
 	++head;
 	list_for_each_entry_safe(timer, next, head, entry) {
 		list_del_init(&timer->entry);
-		if (cputime_lt(timer->expires.cpu, utime)) {
-			timer->expires.cpu = cputime_zero;
+		if (timer->expires.cpu < utime) {
+			timer->expires.cpu = 0;
 		} else {
-			timer->expires.cpu = cputime_sub(timer->expires.cpu,
-							 utime);
+			timer->expires.cpu -= utime;
 		}
 	}
 
@@ -520,8 +503,7 @@
 	struct signal_struct *const sig = tsk->signal;
 
 	cleanup_timers(tsk->signal->cpu_timers,
-		       cputime_add(tsk->utime, sig->utime),
-		       cputime_add(tsk->stime, sig->stime),
+		       tsk->utime + sig->utime, tsk->stime + sig->stime,
 		       tsk->se.sum_exec_runtime + sig->sum_sched_runtime);
 }
 
@@ -540,8 +522,7 @@
 
 static inline int expires_gt(cputime_t expires, cputime_t new_exp)
 {
-	return cputime_eq(expires, cputime_zero) ||
-	       cputime_gt(expires, new_exp);
+	return expires == 0 || expires > new_exp;
 }
 
 /*
@@ -651,7 +632,7 @@
 	default:
 		return -EINVAL;
 	case CPUCLOCK_PROF:
-		cpu->cpu = cputime_add(cputime.utime, cputime.stime);
+		cpu->cpu = cputime.utime + cputime.stime;
 		break;
 	case CPUCLOCK_VIRT:
 		cpu->cpu = cputime.utime;
@@ -918,12 +899,12 @@
 	unsigned long soft;
 
 	maxfire = 20;
-	tsk->cputime_expires.prof_exp = cputime_zero;
+	tsk->cputime_expires.prof_exp = 0;
 	while (!list_empty(timers)) {
 		struct cpu_timer_list *t = list_first_entry(timers,
 						      struct cpu_timer_list,
 						      entry);
-		if (!--maxfire || cputime_lt(prof_ticks(tsk), t->expires.cpu)) {
+		if (!--maxfire || prof_ticks(tsk) < t->expires.cpu) {
 			tsk->cputime_expires.prof_exp = t->expires.cpu;
 			break;
 		}
@@ -933,12 +914,12 @@
 
 	++timers;
 	maxfire = 20;
-	tsk->cputime_expires.virt_exp = cputime_zero;
+	tsk->cputime_expires.virt_exp = 0;
 	while (!list_empty(timers)) {
 		struct cpu_timer_list *t = list_first_entry(timers,
 						      struct cpu_timer_list,
 						      entry);
-		if (!--maxfire || cputime_lt(virt_ticks(tsk), t->expires.cpu)) {
+		if (!--maxfire || virt_ticks(tsk) < t->expires.cpu) {
 			tsk->cputime_expires.virt_exp = t->expires.cpu;
 			break;
 		}
@@ -1009,20 +990,19 @@
 static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
 			     cputime_t *expires, cputime_t cur_time, int signo)
 {
-	if (cputime_eq(it->expires, cputime_zero))
+	if (!it->expires)
 		return;
 
-	if (cputime_ge(cur_time, it->expires)) {
-		if (!cputime_eq(it->incr, cputime_zero)) {
-			it->expires = cputime_add(it->expires, it->incr);
+	if (cur_time >= it->expires) {
+		if (it->incr) {
+			it->expires += it->incr;
 			it->error += it->incr_error;
 			if (it->error >= onecputick) {
-				it->expires = cputime_sub(it->expires,
-							  cputime_one_jiffy);
+				it->expires -= cputime_one_jiffy;
 				it->error -= onecputick;
 			}
 		} else {
-			it->expires = cputime_zero;
+			it->expires = 0;
 		}
 
 		trace_itimer_expire(signo == SIGPROF ?
@@ -1031,9 +1011,7 @@
 		__group_send_sig_info(signo, SEND_SIG_PRIV, tsk);
 	}
 
-	if (!cputime_eq(it->expires, cputime_zero) &&
-	    (cputime_eq(*expires, cputime_zero) ||
-	     cputime_lt(it->expires, *expires))) {
+	if (it->expires && (!*expires || it->expires < *expires)) {
 		*expires = it->expires;
 	}
 }
@@ -1048,9 +1026,7 @@
  */
 static inline int task_cputime_zero(const struct task_cputime *cputime)
 {
-	if (cputime_eq(cputime->utime, cputime_zero) &&
-	    cputime_eq(cputime->stime, cputime_zero) &&
-	    cputime->sum_exec_runtime == 0)
+	if (!cputime->utime && !cputime->stime && !cputime->sum_exec_runtime)
 		return 1;
 	return 0;
 }
@@ -1076,15 +1052,15 @@
 	 */
 	thread_group_cputimer(tsk, &cputime);
 	utime = cputime.utime;
-	ptime = cputime_add(utime, cputime.stime);
+	ptime = utime + cputime.stime;
 	sum_sched_runtime = cputime.sum_exec_runtime;
 	maxfire = 20;
-	prof_expires = cputime_zero;
+	prof_expires = 0;
 	while (!list_empty(timers)) {
 		struct cpu_timer_list *tl = list_first_entry(timers,
 						      struct cpu_timer_list,
 						      entry);
-		if (!--maxfire || cputime_lt(ptime, tl->expires.cpu)) {
+		if (!--maxfire || ptime < tl->expires.cpu) {
 			prof_expires = tl->expires.cpu;
 			break;
 		}
@@ -1094,12 +1070,12 @@
 
 	++timers;
 	maxfire = 20;
-	virt_expires = cputime_zero;
+	virt_expires = 0;
 	while (!list_empty(timers)) {
 		struct cpu_timer_list *tl = list_first_entry(timers,
 						      struct cpu_timer_list,
 						      entry);
-		if (!--maxfire || cputime_lt(utime, tl->expires.cpu)) {
+		if (!--maxfire || utime < tl->expires.cpu) {
 			virt_expires = tl->expires.cpu;
 			break;
 		}
@@ -1154,8 +1130,7 @@
 			}
 		}
 		x = secs_to_cputime(soft);
-		if (cputime_eq(prof_expires, cputime_zero) ||
-		    cputime_lt(x, prof_expires)) {
+		if (!prof_expires || x < prof_expires) {
 			prof_expires = x;
 		}
 	}
@@ -1249,12 +1224,9 @@
 static inline int task_cputime_expired(const struct task_cputime *sample,
 					const struct task_cputime *expires)
 {
-	if (!cputime_eq(expires->utime, cputime_zero) &&
-	    cputime_ge(sample->utime, expires->utime))
+	if (expires->utime && sample->utime >= expires->utime)
 		return 1;
-	if (!cputime_eq(expires->stime, cputime_zero) &&
-	    cputime_ge(cputime_add(sample->utime, sample->stime),
-		       expires->stime))
+	if (expires->stime && sample->utime + sample->stime >= expires->stime)
 		return 1;
 	if (expires->sum_exec_runtime != 0 &&
 	    sample->sum_exec_runtime >= expires->sum_exec_runtime)
@@ -1389,18 +1361,18 @@
 		 * it to be relative, *newval argument is relative and we update
 		 * it to be absolute.
 		 */
-		if (!cputime_eq(*oldval, cputime_zero)) {
-			if (cputime_le(*oldval, now.cpu)) {
+		if (*oldval) {
+			if (*oldval <= now.cpu) {
 				/* Just about to fire. */
 				*oldval = cputime_one_jiffy;
 			} else {
-				*oldval = cputime_sub(*oldval, now.cpu);
+				*oldval -= now.cpu;
 			}
 		}
 
-		if (cputime_eq(*newval, cputime_zero))
+		if (!*newval)
 			return;
-		*newval = cputime_add(*newval, now.cpu);
+		*newval += now.cpu;
 	}
 
 	/*
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index a6b0503..6d6d288 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -43,8 +43,6 @@
 enum {
 	HIBERNATION_INVALID,
 	HIBERNATION_PLATFORM,
-	HIBERNATION_TEST,
-	HIBERNATION_TESTPROC,
 	HIBERNATION_SHUTDOWN,
 	HIBERNATION_REBOOT,
 	/* keep last */
@@ -55,7 +53,7 @@
 
 static int hibernation_mode = HIBERNATION_SHUTDOWN;
 
-static bool freezer_test_done;
+bool freezer_test_done;
 
 static const struct platform_hibernation_ops *hibernation_ops;
 
@@ -71,14 +69,14 @@
 		WARN_ON(1);
 		return;
 	}
-	mutex_lock(&pm_mutex);
+	lock_system_sleep();
 	hibernation_ops = ops;
 	if (ops)
 		hibernation_mode = HIBERNATION_PLATFORM;
 	else if (hibernation_mode == HIBERNATION_PLATFORM)
 		hibernation_mode = HIBERNATION_SHUTDOWN;
 
-	mutex_unlock(&pm_mutex);
+	unlock_system_sleep();
 }
 
 static bool entering_platform_hibernation;
@@ -96,15 +94,6 @@
 	mdelay(5000);
 }
 
-static int hibernation_testmode(int mode)
-{
-	if (hibernation_mode == mode) {
-		hibernation_debug_sleep();
-		return 1;
-	}
-	return 0;
-}
-
 static int hibernation_test(int level)
 {
 	if (pm_test_level == level) {
@@ -114,7 +103,6 @@
 	return 0;
 }
 #else /* !CONFIG_PM_DEBUG */
-static int hibernation_testmode(int mode) { return 0; }
 static int hibernation_test(int level) { return 0; }
 #endif /* !CONFIG_PM_DEBUG */
 
@@ -278,8 +266,7 @@
 		goto Platform_finish;
 
 	error = disable_nonboot_cpus();
-	if (error || hibernation_test(TEST_CPUS)
-	    || hibernation_testmode(HIBERNATION_TEST))
+	if (error || hibernation_test(TEST_CPUS))
 		goto Enable_cpus;
 
 	local_irq_disable();
@@ -333,7 +320,7 @@
  */
 int hibernation_snapshot(int platform_mode)
 {
-	pm_message_t msg = PMSG_RECOVER;
+	pm_message_t msg;
 	int error;
 
 	error = platform_begin(platform_mode);
@@ -349,8 +336,7 @@
 	if (error)
 		goto Cleanup;
 
-	if (hibernation_test(TEST_FREEZER) ||
-		hibernation_testmode(HIBERNATION_TESTPROC)) {
+	if (hibernation_test(TEST_FREEZER)) {
 
 		/*
 		 * Indicate to the caller that we are returning due to a
@@ -362,26 +348,26 @@
 
 	error = dpm_prepare(PMSG_FREEZE);
 	if (error) {
-		dpm_complete(msg);
+		dpm_complete(PMSG_RECOVER);
 		goto Cleanup;
 	}
 
 	suspend_console();
 	pm_restrict_gfp_mask();
+
 	error = dpm_suspend(PMSG_FREEZE);
-	if (error)
-		goto Recover_platform;
 
-	if (hibernation_test(TEST_DEVICES))
-		goto Recover_platform;
+	if (error || hibernation_test(TEST_DEVICES))
+		platform_recover(platform_mode);
+	else
+		error = create_image(platform_mode);
 
-	error = create_image(platform_mode);
 	/*
-	 * Control returns here (1) after the image has been created or the
+	 * In the case that we call create_image() above, the control
+	 * returns here (1) after the image has been created or the
 	 * image creation has failed and (2) after a successful restore.
 	 */
 
- Resume_devices:
 	/* We may need to release the preallocated image pages here. */
 	if (error || !in_suspend)
 		swsusp_free();
@@ -399,10 +385,6 @@
 	platform_end(platform_mode);
 	return error;
 
- Recover_platform:
-	platform_recover(platform_mode);
-	goto Resume_devices;
-
  Cleanup:
 	swsusp_free();
 	goto Close;
@@ -590,9 +572,6 @@
 static void power_down(void)
 {
 	switch (hibernation_mode) {
-	case HIBERNATION_TEST:
-	case HIBERNATION_TESTPROC:
-		break;
 	case HIBERNATION_REBOOT:
 		kernel_restart(NULL);
 		break;
@@ -611,17 +590,6 @@
 	while(1);
 }
 
-static int prepare_processes(void)
-{
-	int error = 0;
-
-	if (freeze_processes()) {
-		error = -EBUSY;
-		thaw_processes();
-	}
-	return error;
-}
-
 /**
  * hibernate - Carry out system hibernation, including saving the image.
  */
@@ -629,7 +597,7 @@
 {
 	int error;
 
-	mutex_lock(&pm_mutex);
+	lock_system_sleep();
 	/* The snapshot device should not be opened while we're running */
 	if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
 		error = -EBUSY;
@@ -654,7 +622,7 @@
 	sys_sync();
 	printk("done.\n");
 
-	error = prepare_processes();
+	error = freeze_processes();
 	if (error)
 		goto Finish;
 
@@ -697,7 +665,7 @@
 	pm_restore_console();
 	atomic_inc(&snapshot_device_available);
  Unlock:
-	mutex_unlock(&pm_mutex);
+	unlock_system_sleep();
 	return error;
 }
 
@@ -811,11 +779,13 @@
 		goto close_finish;
 
 	error = create_basic_memory_bitmaps();
-	if (error)
+	if (error) {
+		usermodehelper_enable();
 		goto close_finish;
+	}
 
 	pr_debug("PM: Preparing processes for restore.\n");
-	error = prepare_processes();
+	error = freeze_processes();
 	if (error) {
 		swsusp_close(FMODE_READ);
 		goto Done;
@@ -855,8 +825,6 @@
 	[HIBERNATION_PLATFORM]	= "platform",
 	[HIBERNATION_SHUTDOWN]	= "shutdown",
 	[HIBERNATION_REBOOT]	= "reboot",
-	[HIBERNATION_TEST]	= "test",
-	[HIBERNATION_TESTPROC]	= "testproc",
 };
 
 /*
@@ -865,17 +833,15 @@
  * Hibernation can be handled in several ways.  There are a few different ways
  * to put the system into the sleep state: using the platform driver (e.g. ACPI
  * or other hibernation_ops), powering it off or rebooting it (for testing
- * mostly), or using one of the two available test modes.
+ * mostly).
  *
  * The sysfs file /sys/power/disk provides an interface for selecting the
  * hibernation mode to use.  Reading from this file causes the available modes
- * to be printed.  There are 5 modes that can be supported:
+ * to be printed.  There are 3 modes that can be supported:
  *
  *	'platform'
  *	'shutdown'
  *	'reboot'
- *	'test'
- *	'testproc'
  *
  * If a platform hibernation driver is in use, 'platform' will be supported
  * and will be used by default.  Otherwise, 'shutdown' will be used by default.
@@ -899,8 +865,6 @@
 		switch (i) {
 		case HIBERNATION_SHUTDOWN:
 		case HIBERNATION_REBOOT:
-		case HIBERNATION_TEST:
-		case HIBERNATION_TESTPROC:
 			break;
 		case HIBERNATION_PLATFORM:
 			if (hibernation_ops)
@@ -929,7 +893,7 @@
 	p = memchr(buf, '\n', n);
 	len = p ? p - buf : n;
 
-	mutex_lock(&pm_mutex);
+	lock_system_sleep();
 	for (i = HIBERNATION_FIRST; i <= HIBERNATION_MAX; i++) {
 		if (len == strlen(hibernation_modes[i])
 		    && !strncmp(buf, hibernation_modes[i], len)) {
@@ -941,8 +905,6 @@
 		switch (mode) {
 		case HIBERNATION_SHUTDOWN:
 		case HIBERNATION_REBOOT:
-		case HIBERNATION_TEST:
-		case HIBERNATION_TESTPROC:
 			hibernation_mode = mode;
 			break;
 		case HIBERNATION_PLATFORM:
@@ -957,7 +919,7 @@
 	if (!error)
 		pr_debug("PM: Hibernation mode set to '%s'\n",
 			 hibernation_modes[mode]);
-	mutex_unlock(&pm_mutex);
+	unlock_system_sleep();
 	return error ? error : n;
 }
 
@@ -984,9 +946,9 @@
 	if (maj != MAJOR(res) || min != MINOR(res))
 		goto out;
 
-	mutex_lock(&pm_mutex);
+	lock_system_sleep();
 	swsusp_resume_device = res;
-	mutex_unlock(&pm_mutex);
+	unlock_system_sleep();
 	printk(KERN_INFO "PM: Starting manual resume from disk\n");
 	noresume = 0;
 	software_resume();
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 36e0f09..9824b41e 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -3,7 +3,7 @@
  *
  * Copyright (c) 2003 Patrick Mochel
  * Copyright (c) 2003 Open Source Development Lab
- * 
+ *
  * This file is released under the GPLv2
  *
  */
@@ -116,7 +116,7 @@
 	p = memchr(buf, '\n', n);
 	len = p ? p - buf : n;
 
-	mutex_lock(&pm_mutex);
+	lock_system_sleep();
 
 	level = TEST_FIRST;
 	for (s = &pm_tests[level]; level <= TEST_MAX; s++, level++)
@@ -126,7 +126,7 @@
 			break;
 		}
 
-	mutex_unlock(&pm_mutex);
+	unlock_system_sleep();
 
 	return error ? error : n;
 }
@@ -240,7 +240,7 @@
  *	'standby' (Power-On Suspend), 'mem' (Suspend-to-RAM), and
  *	'disk' (Suspend-to-Disk).
  *
- *	store() accepts one of those strings, translates it into the 
+ *	store() accepts one of those strings, translates it into the
  *	proper enumerated value, and initiates a suspend transition.
  */
 static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
@@ -282,7 +282,7 @@
 	/* First, check if we are requested to hibernate */
 	if (len == 4 && !strncmp(buf, "disk", len)) {
 		error = hibernate();
-  goto Exit;
+		goto Exit;
 	}
 
 #ifdef CONFIG_SUSPEND
diff --git a/kernel/power/power.h b/kernel/power/power.h
index 23a2db1..0c4defe 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -50,6 +50,8 @@
 #define SPARE_PAGES	((1024 * 1024) >> PAGE_SHIFT)
 
 /* kernel/power/hibernate.c */
+extern bool freezer_test_done;
+
 extern int hibernation_snapshot(int platform_mode);
 extern int hibernation_restore(int platform_mode);
 extern int hibernation_platform_enter(void);
diff --git a/kernel/power/process.c b/kernel/power/process.c
index addbbe5..77274c9 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -22,16 +22,7 @@
  */
 #define TIMEOUT	(20 * HZ)
 
-static inline int freezable(struct task_struct * p)
-{
-	if ((p == current) ||
-	    (p->flags & PF_NOFREEZE) ||
-	    (p->exit_state != 0))
-		return 0;
-	return 1;
-}
-
-static int try_to_freeze_tasks(bool sig_only)
+static int try_to_freeze_tasks(bool user_only)
 {
 	struct task_struct *g, *p;
 	unsigned long end_time;
@@ -46,17 +37,14 @@
 
 	end_time = jiffies + TIMEOUT;
 
-	if (!sig_only)
+	if (!user_only)
 		freeze_workqueues_begin();
 
 	while (true) {
 		todo = 0;
 		read_lock(&tasklist_lock);
 		do_each_thread(g, p) {
-			if (frozen(p) || !freezable(p))
-				continue;
-
-			if (!freeze_task(p, sig_only))
+			if (p == current || !freeze_task(p))
 				continue;
 
 			/*
@@ -77,7 +65,7 @@
 		} while_each_thread(g, p);
 		read_unlock(&tasklist_lock);
 
-		if (!sig_only) {
+		if (!user_only) {
 			wq_busy = freeze_workqueues_busy();
 			todo += wq_busy;
 		}
@@ -103,11 +91,6 @@
 	elapsed_csecs = elapsed_csecs64;
 
 	if (todo) {
-		/* This does not unfreeze processes that are already frozen
-		 * (we have slightly ugly calling convention in that respect,
-		 * and caller must call thaw_processes() if something fails),
-		 * but it cleans up leftover PF_FREEZE requests.
-		 */
 		printk("\n");
 		printk(KERN_ERR "Freezing of tasks %s after %d.%02d seconds "
 		       "(%d tasks refusing to freeze, wq_busy=%d):\n",
@@ -115,15 +98,11 @@
 		       elapsed_csecs / 100, elapsed_csecs % 100,
 		       todo - wq_busy, wq_busy);
 
-		thaw_workqueues();
-
 		read_lock(&tasklist_lock);
 		do_each_thread(g, p) {
-			task_lock(p);
-			if (!wakeup && freezing(p) && !freezer_should_skip(p))
+			if (!wakeup && !freezer_should_skip(p) &&
+			    p != current && freezing(p) && !frozen(p))
 				sched_show_task(p);
-			cancel_freezing(p);
-			task_unlock(p);
 		} while_each_thread(g, p);
 		read_unlock(&tasklist_lock);
 	} else {
@@ -136,12 +115,18 @@
 
 /**
  * freeze_processes - Signal user space processes to enter the refrigerator.
+ *
+ * On success, returns 0.  On failure, -errno and system is fully thawed.
  */
 int freeze_processes(void)
 {
 	int error;
 
+	if (!pm_freezing)
+		atomic_inc(&system_freezing_cnt);
+
 	printk("Freezing user space processes ... ");
+	pm_freezing = true;
 	error = try_to_freeze_tasks(true);
 	if (!error) {
 		printk("done.");
@@ -150,17 +135,22 @@
 	printk("\n");
 	BUG_ON(in_atomic());
 
+	if (error)
+		thaw_processes();
 	return error;
 }
 
 /**
  * freeze_kernel_threads - Make freezable kernel threads go to the refrigerator.
+ *
+ * On success, returns 0.  On failure, -errno and system is fully thawed.
  */
 int freeze_kernel_threads(void)
 {
 	int error;
 
 	printk("Freezing remaining freezable tasks ... ");
+	pm_nosig_freezing = true;
 	error = try_to_freeze_tasks(false);
 	if (!error)
 		printk("done.");
@@ -168,37 +158,32 @@
 	printk("\n");
 	BUG_ON(in_atomic());
 
+	if (error)
+		thaw_processes();
 	return error;
 }
 
-static void thaw_tasks(bool nosig_only)
-{
-	struct task_struct *g, *p;
-
-	read_lock(&tasklist_lock);
-	do_each_thread(g, p) {
-		if (!freezable(p))
-			continue;
-
-		if (nosig_only && should_send_signal(p))
-			continue;
-
-		if (cgroup_freezing_or_frozen(p))
-			continue;
-
-		thaw_process(p);
-	} while_each_thread(g, p);
-	read_unlock(&tasklist_lock);
-}
-
 void thaw_processes(void)
 {
+	struct task_struct *g, *p;
+
+	if (pm_freezing)
+		atomic_dec(&system_freezing_cnt);
+	pm_freezing = false;
+	pm_nosig_freezing = false;
+
 	oom_killer_enable();
 
 	printk("Restarting tasks ... ");
+
 	thaw_workqueues();
-	thaw_tasks(true);
-	thaw_tasks(false);
+
+	read_lock(&tasklist_lock);
+	do_each_thread(g, p) {
+		__thaw_task(p);
+	} while_each_thread(g, p);
+	read_unlock(&tasklist_lock);
+
 	schedule();
 	printk("done.\n");
 }
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 4953dc0..4fd51be 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -42,9 +42,9 @@
  */
 void suspend_set_ops(const struct platform_suspend_ops *ops)
 {
-	mutex_lock(&pm_mutex);
+	lock_system_sleep();
 	suspend_ops = ops;
-	mutex_unlock(&pm_mutex);
+	unlock_system_sleep();
 }
 EXPORT_SYMBOL_GPL(suspend_set_ops);
 
@@ -106,13 +106,11 @@
 		goto Finish;
 
 	error = suspend_freeze_processes();
-	if (error) {
-		suspend_stats.failed_freeze++;
-		dpm_save_failed_step(SUSPEND_FREEZE);
-	} else
+	if (!error)
 		return 0;
 
-	suspend_thaw_processes();
+	suspend_stats.failed_freeze++;
+	dpm_save_failed_step(SUSPEND_FREEZE);
 	usermodehelper_enable();
  Finish:
 	pm_notifier_call_chain(PM_POST_SUSPEND);
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 11a594c..3739ecc 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -18,7 +18,6 @@
 #include <linux/bitops.h>
 #include <linux/genhd.h>
 #include <linux/device.h>
-#include <linux/buffer_head.h>
 #include <linux/bio.h>
 #include <linux/blkdev.h>
 #include <linux/swap.h>
diff --git a/kernel/power/user.c b/kernel/power/user.c
index 6d8f535..6b1ab7a 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -21,6 +21,7 @@
 #include <linux/swapops.h>
 #include <linux/pm.h>
 #include <linux/fs.h>
+#include <linux/compat.h>
 #include <linux/console.h>
 #include <linux/cpu.h>
 #include <linux/freezer.h>
@@ -30,28 +31,6 @@
 
 #include "power.h"
 
-/*
- * NOTE: The SNAPSHOT_SET_SWAP_FILE and SNAPSHOT_PMOPS ioctls are obsolete and
- * will be removed in the future.  They are only preserved here for
- * compatibility with existing userland utilities.
- */
-#define SNAPSHOT_SET_SWAP_FILE	_IOW(SNAPSHOT_IOC_MAGIC, 10, unsigned int)
-#define SNAPSHOT_PMOPS		_IOW(SNAPSHOT_IOC_MAGIC, 12, unsigned int)
-
-#define PMOPS_PREPARE	1
-#define PMOPS_ENTER	2
-#define PMOPS_FINISH	3
-
-/*
- * NOTE: The following ioctl definitions are wrong and have been replaced with
- * correct ones.  They are only preserved here for compatibility with existing
- * userland utilities and will be removed in the future.
- */
-#define SNAPSHOT_ATOMIC_SNAPSHOT	_IOW(SNAPSHOT_IOC_MAGIC, 3, void *)
-#define SNAPSHOT_SET_IMAGE_SIZE		_IOW(SNAPSHOT_IOC_MAGIC, 6, unsigned long)
-#define SNAPSHOT_AVAIL_SWAP		_IOR(SNAPSHOT_IOC_MAGIC, 7, void *)
-#define SNAPSHOT_GET_SWAP_PAGE		_IOR(SNAPSHOT_IOC_MAGIC, 8, void *)
-
 
 #define SNAPSHOT_MINOR	231
 
@@ -71,7 +50,7 @@
 	struct snapshot_data *data;
 	int error;
 
-	mutex_lock(&pm_mutex);
+	lock_system_sleep();
 
 	if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
 		error = -EBUSY;
@@ -123,7 +102,7 @@
 	data->platform_support = 0;
 
  Unlock:
-	mutex_unlock(&pm_mutex);
+	unlock_system_sleep();
 
 	return error;
 }
@@ -132,7 +111,7 @@
 {
 	struct snapshot_data *data;
 
-	mutex_lock(&pm_mutex);
+	lock_system_sleep();
 
 	swsusp_free();
 	free_basic_memory_bitmaps();
@@ -146,7 +125,7 @@
 			PM_POST_HIBERNATION : PM_POST_RESTORE);
 	atomic_inc(&snapshot_device_available);
 
-	mutex_unlock(&pm_mutex);
+	unlock_system_sleep();
 
 	return 0;
 }
@@ -158,7 +137,7 @@
 	ssize_t res;
 	loff_t pg_offp = *offp & ~PAGE_MASK;
 
-	mutex_lock(&pm_mutex);
+	lock_system_sleep();
 
 	data = filp->private_data;
 	if (!data->ready) {
@@ -179,7 +158,7 @@
 		*offp += res;
 
  Unlock:
-	mutex_unlock(&pm_mutex);
+	unlock_system_sleep();
 
 	return res;
 }
@@ -191,7 +170,7 @@
 	ssize_t res;
 	loff_t pg_offp = *offp & ~PAGE_MASK;
 
-	mutex_lock(&pm_mutex);
+	lock_system_sleep();
 
 	data = filp->private_data;
 
@@ -208,20 +187,11 @@
 	if (res > 0)
 		*offp += res;
 unlock:
-	mutex_unlock(&pm_mutex);
+	unlock_system_sleep();
 
 	return res;
 }
 
-static void snapshot_deprecated_ioctl(unsigned int cmd)
-{
-	if (printk_ratelimit())
-		printk(KERN_NOTICE "%pf: ioctl '%.8x' is deprecated and will "
-				"be removed soon, update your suspend-to-disk "
-				"utilities\n",
-				__builtin_return_address(0), cmd);
-}
-
 static long snapshot_ioctl(struct file *filp, unsigned int cmd,
 							unsigned long arg)
 {
@@ -257,11 +227,9 @@
 			break;
 
 		error = freeze_processes();
-		if (error) {
-			thaw_processes();
+		if (error)
 			usermodehelper_enable();
-		}
-		if (!error)
+		else
 			data->frozen = 1;
 		break;
 
@@ -274,8 +242,6 @@
 		data->frozen = 0;
 		break;
 
-	case SNAPSHOT_ATOMIC_SNAPSHOT:
-		snapshot_deprecated_ioctl(cmd);
 	case SNAPSHOT_CREATE_IMAGE:
 		if (data->mode != O_RDONLY || !data->frozen  || data->ready) {
 			error = -EPERM;
@@ -283,10 +249,15 @@
 		}
 		pm_restore_gfp_mask();
 		error = hibernation_snapshot(data->platform_support);
-		if (!error)
+		if (!error) {
 			error = put_user(in_suspend, (int __user *)arg);
-		if (!error)
-			data->ready = 1;
+			if (!error && !freezer_test_done)
+				data->ready = 1;
+			if (freezer_test_done) {
+				freezer_test_done = false;
+				thaw_processes();
+			}
+		}
 		break;
 
 	case SNAPSHOT_ATOMIC_RESTORE:
@@ -305,8 +276,6 @@
 		data->ready = 0;
 		break;
 
-	case SNAPSHOT_SET_IMAGE_SIZE:
-		snapshot_deprecated_ioctl(cmd);
 	case SNAPSHOT_PREF_IMAGE_SIZE:
 		image_size = arg;
 		break;
@@ -321,16 +290,12 @@
 		error = put_user(size, (loff_t __user *)arg);
 		break;
 
-	case SNAPSHOT_AVAIL_SWAP:
-		snapshot_deprecated_ioctl(cmd);
 	case SNAPSHOT_AVAIL_SWAP_SIZE:
 		size = count_swap_pages(data->swap, 1);
 		size <<= PAGE_SHIFT;
 		error = put_user(size, (loff_t __user *)arg);
 		break;
 
-	case SNAPSHOT_GET_SWAP_PAGE:
-		snapshot_deprecated_ioctl(cmd);
 	case SNAPSHOT_ALLOC_SWAP_PAGE:
 		if (data->swap < 0 || data->swap >= MAX_SWAPFILES) {
 			error = -ENODEV;
@@ -353,27 +318,6 @@
 		free_all_swap_pages(data->swap);
 		break;
 
-	case SNAPSHOT_SET_SWAP_FILE: /* This ioctl is deprecated */
-		snapshot_deprecated_ioctl(cmd);
-		if (!swsusp_swap_in_use()) {
-			/*
-			 * User space encodes device types as two-byte values,
-			 * so we need to recode them
-			 */
-			if (old_decode_dev(arg)) {
-				data->swap = swap_type_of(old_decode_dev(arg),
-							0, NULL);
-				if (data->swap < 0)
-					error = -ENODEV;
-			} else {
-				data->swap = -1;
-				error = -EINVAL;
-			}
-		} else {
-			error = -EPERM;
-		}
-		break;
-
 	case SNAPSHOT_S2RAM:
 		if (!data->frozen) {
 			error = -EPERM;
@@ -396,33 +340,6 @@
 			error = hibernation_platform_enter();
 		break;
 
-	case SNAPSHOT_PMOPS: /* This ioctl is deprecated */
-		snapshot_deprecated_ioctl(cmd);
-		error = -EINVAL;
-
-		switch (arg) {
-
-		case PMOPS_PREPARE:
-			data->platform_support = 1;
-			error = 0;
-			break;
-
-		case PMOPS_ENTER:
-			if (data->platform_support)
-				error = hibernation_platform_enter();
-			break;
-
-		case PMOPS_FINISH:
-			if (data->platform_support)
-				error = 0;
-			break;
-
-		default:
-			printk(KERN_ERR "SNAPSHOT_PMOPS: invalid argument %ld\n", arg);
-
-		}
-		break;
-
 	case SNAPSHOT_SET_SWAP_AREA:
 		if (swsusp_swap_in_use()) {
 			error = -EPERM;
@@ -464,6 +381,66 @@
 	return error;
 }
 
+#ifdef CONFIG_COMPAT
+
+struct compat_resume_swap_area {
+	compat_loff_t offset;
+	u32 dev;
+} __packed;
+
+static long
+snapshot_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	BUILD_BUG_ON(sizeof(loff_t) != sizeof(compat_loff_t));
+
+	switch (cmd) {
+	case SNAPSHOT_GET_IMAGE_SIZE:
+	case SNAPSHOT_AVAIL_SWAP_SIZE:
+	case SNAPSHOT_ALLOC_SWAP_PAGE: {
+		compat_loff_t __user *uoffset = compat_ptr(arg);
+		loff_t offset;
+		mm_segment_t old_fs;
+		int err;
+
+		old_fs = get_fs();
+		set_fs(KERNEL_DS);
+		err = snapshot_ioctl(file, cmd, (unsigned long) &offset);
+		set_fs(old_fs);
+		if (!err && put_user(offset, uoffset))
+			err = -EFAULT;
+		return err;
+	}
+
+	case SNAPSHOT_CREATE_IMAGE:
+		return snapshot_ioctl(file, cmd,
+				      (unsigned long) compat_ptr(arg));
+
+	case SNAPSHOT_SET_SWAP_AREA: {
+		struct compat_resume_swap_area __user *u_swap_area =
+			compat_ptr(arg);
+		struct resume_swap_area swap_area;
+		mm_segment_t old_fs;
+		int err;
+
+		err = get_user(swap_area.offset, &u_swap_area->offset);
+		err |= get_user(swap_area.dev, &u_swap_area->dev);
+		if (err)
+			return -EFAULT;
+		old_fs = get_fs();
+		set_fs(KERNEL_DS);
+		err = snapshot_ioctl(file, SNAPSHOT_SET_SWAP_AREA,
+				     (unsigned long) &swap_area);
+		set_fs(old_fs);
+		return err;
+	}
+
+	default:
+		return snapshot_ioctl(file, cmd, arg);
+	}
+}
+
+#endif /* CONFIG_COMPAT */
+
 static const struct file_operations snapshot_fops = {
 	.open = snapshot_open,
 	.release = snapshot_release,
@@ -471,6 +448,9 @@
 	.write = snapshot_write,
 	.llseek = no_llseek,
 	.unlocked_ioctl = snapshot_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = snapshot_compat_ioctl,
+#endif
 };
 
 static struct miscdevice snapshot_device = {
diff --git a/kernel/printk.c b/kernel/printk.c
index 7982a0a..989e4a5 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -199,7 +199,7 @@
 		unsigned long mem;
 
 		mem = memblock_alloc(new_log_buf_len, PAGE_SIZE);
-		if (mem == MEMBLOCK_ERROR)
+		if (!mem)
 			return;
 		new_log_buf = __va(mem);
 	} else {
@@ -688,6 +688,7 @@
 
 	oops_timestamp = jiffies;
 
+	debug_locks_off();
 	/* If a crash is occurring, make sure we can't deadlock */
 	raw_spin_lock_init(&logbuf_lock);
 	/* And make sure that we print immediately */
@@ -840,9 +841,8 @@
 	boot_delay_msec();
 	printk_delay();
 
-	preempt_disable();
 	/* This stops the holder of console_sem just where we want him */
-	raw_local_irq_save(flags);
+	local_irq_save(flags);
 	this_cpu = smp_processor_id();
 
 	/*
@@ -856,7 +856,7 @@
 		 * recursion and return - but flag the recursion so that
 		 * it can be printed at the next appropriate moment:
 		 */
-		if (!oops_in_progress) {
+		if (!oops_in_progress && !lockdep_recursing(current)) {
 			recursion_bug = 1;
 			goto out_restore_irqs;
 		}
@@ -962,9 +962,8 @@
 
 	lockdep_on();
 out_restore_irqs:
-	raw_local_irq_restore(flags);
+	local_irq_restore(flags);
 
-	preempt_enable();
 	return printed_len;
 }
 EXPORT_SYMBOL(printk);
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 24d0447..78ab24a 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -96,9 +96,20 @@
 	 */
 	if (!(child->flags & PF_EXITING) &&
 	    (child->signal->flags & SIGNAL_STOP_STOPPED ||
-	     child->signal->group_stop_count))
+	     child->signal->group_stop_count)) {
 		child->jobctl |= JOBCTL_STOP_PENDING;
 
+		/*
+		 * This is only possible if this thread was cloned by the
+		 * traced task running in the stopped group, set the signal
+		 * for the future reports.
+		 * FIXME: we should change ptrace_init_task() to handle this
+		 * case.
+		 */
+		if (!(child->jobctl & JOBCTL_STOP_SIGMASK))
+			child->jobctl |= SIGSTOP;
+	}
+
 	/*
 	 * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick
 	 * @child in the butt.  Note that @resume should be used iff @child
diff --git a/kernel/rcu.h b/kernel/rcu.h
index f600868..aa88baa 100644
--- a/kernel/rcu.h
+++ b/kernel/rcu.h
@@ -30,6 +30,13 @@
 #endif /* #else #ifdef CONFIG_RCU_TRACE */
 
 /*
+ * Process-level increment to ->dynticks_nesting field.  This allows for
+ * architectures that use half-interrupts and half-exceptions from
+ * process context.
+ */
+#define DYNTICK_TASK_NESTING (LLONG_MAX / 2 - 1)
+
+/*
  * debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally
  * by call_rcu() and rcu callback execution, and are therefore not part of the
  * RCU API. Leaving in rcupdate.h because they are used by all RCU flavors.
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index c5b98e5..2bc4e13 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -93,6 +93,8 @@
 {
 	if (!debug_lockdep_rcu_enabled())
 		return 1;
+	if (rcu_is_cpu_idle())
+		return 0;
 	return in_softirq() || irqs_disabled();
 }
 EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
@@ -316,3 +318,13 @@
 };
 EXPORT_SYMBOL_GPL(rcuhead_debug_descr);
 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
+
+#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) || defined(CONFIG_RCU_TRACE)
+void do_trace_rcu_torture_read(char *rcutorturename, struct rcu_head *rhp)
+{
+	trace_rcu_torture_read(rcutorturename, rhp);
+}
+EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read);
+#else
+#define do_trace_rcu_torture_read(rcutorturename, rhp) do { } while (0)
+#endif
diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
index 636af6d..977296d 100644
--- a/kernel/rcutiny.c
+++ b/kernel/rcutiny.c
@@ -53,31 +53,137 @@
 
 #include "rcutiny_plugin.h"
 
-#ifdef CONFIG_NO_HZ
+static long long rcu_dynticks_nesting = DYNTICK_TASK_NESTING;
 
-static long rcu_dynticks_nesting = 1;
-
-/*
- * Enter dynticks-idle mode, which is an extended quiescent state
- * if we have fully entered that mode (i.e., if the new value of
- * dynticks_nesting is zero).
- */
-void rcu_enter_nohz(void)
+/* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcutree.c. */
+static void rcu_idle_enter_common(long long oldval)
 {
-	if (--rcu_dynticks_nesting == 0)
-		rcu_sched_qs(0); /* implies rcu_bh_qsctr_inc(0) */
+	if (rcu_dynticks_nesting) {
+		RCU_TRACE(trace_rcu_dyntick("--=",
+					    oldval, rcu_dynticks_nesting));
+		return;
+	}
+	RCU_TRACE(trace_rcu_dyntick("Start", oldval, rcu_dynticks_nesting));
+	if (!is_idle_task(current)) {
+		struct task_struct *idle = idle_task(smp_processor_id());
+
+		RCU_TRACE(trace_rcu_dyntick("Error on entry: not idle task",
+					    oldval, rcu_dynticks_nesting));
+		ftrace_dump(DUMP_ALL);
+		WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
+			  current->pid, current->comm,
+			  idle->pid, idle->comm); /* must be idle task! */
+	}
+	rcu_sched_qs(0); /* implies rcu_bh_qsctr_inc(0) */
 }
 
 /*
- * Exit dynticks-idle mode, so that we are no longer in an extended
- * quiescent state.
+ * Enter idle, which is an extended quiescent state if we have fully
+ * entered that mode (i.e., if the new value of dynticks_nesting is zero).
  */
-void rcu_exit_nohz(void)
+void rcu_idle_enter(void)
 {
+	unsigned long flags;
+	long long oldval;
+
+	local_irq_save(flags);
+	oldval = rcu_dynticks_nesting;
+	rcu_dynticks_nesting = 0;
+	rcu_idle_enter_common(oldval);
+	local_irq_restore(flags);
+}
+
+/*
+ * Exit an interrupt handler towards idle.
+ */
+void rcu_irq_exit(void)
+{
+	unsigned long flags;
+	long long oldval;
+
+	local_irq_save(flags);
+	oldval = rcu_dynticks_nesting;
+	rcu_dynticks_nesting--;
+	WARN_ON_ONCE(rcu_dynticks_nesting < 0);
+	rcu_idle_enter_common(oldval);
+	local_irq_restore(flags);
+}
+
+/* Common code for rcu_idle_exit() and rcu_irq_enter(), see kernel/rcutree.c. */
+static void rcu_idle_exit_common(long long oldval)
+{
+	if (oldval) {
+		RCU_TRACE(trace_rcu_dyntick("++=",
+					    oldval, rcu_dynticks_nesting));
+		return;
+	}
+	RCU_TRACE(trace_rcu_dyntick("End", oldval, rcu_dynticks_nesting));
+	if (!is_idle_task(current)) {
+		struct task_struct *idle = idle_task(smp_processor_id());
+
+		RCU_TRACE(trace_rcu_dyntick("Error on exit: not idle task",
+			  oldval, rcu_dynticks_nesting));
+		ftrace_dump(DUMP_ALL);
+		WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
+			  current->pid, current->comm,
+			  idle->pid, idle->comm); /* must be idle task! */
+	}
+}
+
+/*
+ * Exit idle, so that we are no longer in an extended quiescent state.
+ */
+void rcu_idle_exit(void)
+{
+	unsigned long flags;
+	long long oldval;
+
+	local_irq_save(flags);
+	oldval = rcu_dynticks_nesting;
+	WARN_ON_ONCE(oldval != 0);
+	rcu_dynticks_nesting = DYNTICK_TASK_NESTING;
+	rcu_idle_exit_common(oldval);
+	local_irq_restore(flags);
+}
+
+/*
+ * Enter an interrupt handler, moving away from idle.
+ */
+void rcu_irq_enter(void)
+{
+	unsigned long flags;
+	long long oldval;
+
+	local_irq_save(flags);
+	oldval = rcu_dynticks_nesting;
 	rcu_dynticks_nesting++;
+	WARN_ON_ONCE(rcu_dynticks_nesting == 0);
+	rcu_idle_exit_common(oldval);
+	local_irq_restore(flags);
 }
 
-#endif /* #ifdef CONFIG_NO_HZ */
+#ifdef CONFIG_PROVE_RCU
+
+/*
+ * Test whether RCU thinks that the current CPU is idle.
+ */
+int rcu_is_cpu_idle(void)
+{
+	return !rcu_dynticks_nesting;
+}
+EXPORT_SYMBOL(rcu_is_cpu_idle);
+
+#endif /* #ifdef CONFIG_PROVE_RCU */
+
+/*
+ * Test whether the current CPU was interrupted from idle.  Nested
+ * interrupts don't count, we must be running at the first interrupt
+ * level.
+ */
+int rcu_is_cpu_rrupt_from_idle(void)
+{
+	return rcu_dynticks_nesting <= 0;
+}
 
 /*
  * Helper function for rcu_sched_qs() and rcu_bh_qs().
@@ -126,14 +232,13 @@
 
 /*
  * Check to see if the scheduling-clock interrupt came from an extended
- * quiescent state, and, if so, tell RCU about it.
+ * quiescent state, and, if so, tell RCU about it.  This function must
+ * be called from hardirq context.  It is normally called from the
+ * scheduling-clock interrupt.
  */
 void rcu_check_callbacks(int cpu, int user)
 {
-	if (user ||
-	    (idle_cpu(cpu) &&
-	     !in_softirq() &&
-	     hardirq_count() <= (1 << HARDIRQ_SHIFT)))
+	if (user || rcu_is_cpu_rrupt_from_idle())
 		rcu_sched_qs(cpu);
 	else if (!in_softirq())
 		rcu_bh_qs(cpu);
@@ -154,7 +259,11 @@
 	/* If no RCU callbacks ready to invoke, just return. */
 	if (&rcp->rcucblist == rcp->donetail) {
 		RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, -1));
-		RCU_TRACE(trace_rcu_batch_end(rcp->name, 0));
+		RCU_TRACE(trace_rcu_batch_end(rcp->name, 0,
+					      ACCESS_ONCE(rcp->rcucblist),
+					      need_resched(),
+					      is_idle_task(current),
+					      rcu_is_callbacks_kthread()));
 		return;
 	}
 
@@ -183,7 +292,9 @@
 		RCU_TRACE(cb_count++);
 	}
 	RCU_TRACE(rcu_trace_sub_qlen(rcp, cb_count));
-	RCU_TRACE(trace_rcu_batch_end(rcp->name, cb_count));
+	RCU_TRACE(trace_rcu_batch_end(rcp->name, cb_count, 0, need_resched(),
+				      is_idle_task(current),
+				      rcu_is_callbacks_kthread()));
 }
 
 static void rcu_process_callbacks(struct softirq_action *unused)
diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
index 2b0484a..9cb1ae4 100644
--- a/kernel/rcutiny_plugin.h
+++ b/kernel/rcutiny_plugin.h
@@ -312,8 +312,8 @@
 	rt_mutex_lock(&mtx);
 	rt_mutex_unlock(&mtx);  /* Keep lockdep happy. */
 
-	return rcu_preempt_ctrlblk.boost_tasks != NULL ||
-	       rcu_preempt_ctrlblk.exp_tasks != NULL;
+	return ACCESS_ONCE(rcu_preempt_ctrlblk.boost_tasks) != NULL ||
+	       ACCESS_ONCE(rcu_preempt_ctrlblk.exp_tasks) != NULL;
 }
 
 /*
@@ -885,6 +885,19 @@
 	wake_up(&rcu_kthread_wq);
 }
 
+#ifdef CONFIG_RCU_TRACE
+
+/*
+ * Is the current CPU running the RCU-callbacks kthread?
+ * Caller must have preemption disabled.
+ */
+static bool rcu_is_callbacks_kthread(void)
+{
+	return rcu_kthread_task == current;
+}
+
+#endif /* #ifdef CONFIG_RCU_TRACE */
+
 /*
  * This kthread invokes RCU callbacks whose grace periods have
  * elapsed.  It is awakened as needed, and takes the place of the
@@ -938,6 +951,18 @@
 	raise_softirq(RCU_SOFTIRQ);
 }
 
+#ifdef CONFIG_RCU_TRACE
+
+/*
+ * There is no callback kthread, so this thread is never it.
+ */
+static bool rcu_is_callbacks_kthread(void)
+{
+	return false;
+}
+
+#endif /* #ifdef CONFIG_RCU_TRACE */
+
 void rcu_init(void)
 {
 	open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 764825c..88f17b8 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -61,9 +61,11 @@
 static int shuffle_interval = 3; /* Interval between shuffles (in sec)*/
 static int stutter = 5;		/* Start/stop testing interval (in sec) */
 static int irqreader = 1;	/* RCU readers from irq (timers). */
-static int fqs_duration = 0;	/* Duration of bursts (us), 0 to disable. */
-static int fqs_holdoff = 0;	/* Hold time within burst (us). */
+static int fqs_duration;	/* Duration of bursts (us), 0 to disable. */
+static int fqs_holdoff;		/* Hold time within burst (us). */
 static int fqs_stutter = 3;	/* Wait time between bursts (s). */
+static int onoff_interval;	/* Wait time between CPU hotplugs, 0=disable. */
+static int shutdown_secs;	/* Shutdown time (s).  <=0 for no shutdown. */
 static int test_boost = 1;	/* Test RCU prio boost: 0=no, 1=maybe, 2=yes. */
 static int test_boost_interval = 7; /* Interval between boost tests, seconds. */
 static int test_boost_duration = 4; /* Duration of each boost test, seconds. */
@@ -91,6 +93,10 @@
 MODULE_PARM_DESC(fqs_holdoff, "Holdoff time within fqs bursts (us)");
 module_param(fqs_stutter, int, 0444);
 MODULE_PARM_DESC(fqs_stutter, "Wait time between fqs bursts (s)");
+module_param(onoff_interval, int, 0444);
+MODULE_PARM_DESC(onoff_interval, "Time between CPU hotplugs (s), 0=disable");
+module_param(shutdown_secs, int, 0444);
+MODULE_PARM_DESC(shutdown_secs, "Shutdown time (s), zero to disable.");
 module_param(test_boost, int, 0444);
 MODULE_PARM_DESC(test_boost, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
 module_param(test_boost_interval, int, 0444);
@@ -119,6 +125,10 @@
 static struct task_struct *stutter_task;
 static struct task_struct *fqs_task;
 static struct task_struct *boost_tasks[NR_CPUS];
+static struct task_struct *shutdown_task;
+#ifdef CONFIG_HOTPLUG_CPU
+static struct task_struct *onoff_task;
+#endif /* #ifdef CONFIG_HOTPLUG_CPU */
 
 #define RCU_TORTURE_PIPE_LEN 10
 
@@ -149,6 +159,10 @@
 static long n_rcu_torture_boost_failure;
 static long n_rcu_torture_boosts;
 static long n_rcu_torture_timers;
+static long n_offline_attempts;
+static long n_offline_successes;
+static long n_online_attempts;
+static long n_online_successes;
 static struct list_head rcu_torture_removed;
 static cpumask_var_t shuffle_tmp_mask;
 
@@ -160,6 +174,8 @@
 #define RCUTORTURE_RUNNABLE_INIT 0
 #endif
 int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT;
+module_param(rcutorture_runnable, int, 0444);
+MODULE_PARM_DESC(rcutorture_runnable, "Start rcutorture at boot");
 
 #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU)
 #define rcu_can_boost() 1
@@ -167,6 +183,7 @@
 #define rcu_can_boost() 0
 #endif /* #else #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
 
+static unsigned long shutdown_time;	/* jiffies to system shutdown. */
 static unsigned long boost_starttime;	/* jiffies of next boost test start. */
 DEFINE_MUTEX(boost_mutex);		/* protect setting boost_starttime */
 					/*  and boost task create/destroy. */
@@ -182,6 +199,9 @@
  */
 static DEFINE_MUTEX(fullstop_mutex);
 
+/* Forward reference. */
+static void rcu_torture_cleanup(void);
+
 /*
  * Detect and respond to a system shutdown.
  */
@@ -612,6 +632,30 @@
 	.name		= "srcu"
 };
 
+static int srcu_torture_read_lock_raw(void) __acquires(&srcu_ctl)
+{
+	return srcu_read_lock_raw(&srcu_ctl);
+}
+
+static void srcu_torture_read_unlock_raw(int idx) __releases(&srcu_ctl)
+{
+	srcu_read_unlock_raw(&srcu_ctl, idx);
+}
+
+static struct rcu_torture_ops srcu_raw_ops = {
+	.init		= srcu_torture_init,
+	.cleanup	= srcu_torture_cleanup,
+	.readlock	= srcu_torture_read_lock_raw,
+	.read_delay	= srcu_read_delay,
+	.readunlock	= srcu_torture_read_unlock_raw,
+	.completed	= srcu_torture_completed,
+	.deferred_free	= rcu_sync_torture_deferred_free,
+	.sync		= srcu_torture_synchronize,
+	.cb_barrier	= NULL,
+	.stats		= srcu_torture_stats,
+	.name		= "srcu_raw"
+};
+
 static void srcu_torture_synchronize_expedited(void)
 {
 	synchronize_srcu_expedited(&srcu_ctl);
@@ -913,6 +957,18 @@
 	return 0;
 }
 
+void rcutorture_trace_dump(void)
+{
+	static atomic_t beenhere = ATOMIC_INIT(0);
+
+	if (atomic_read(&beenhere))
+		return;
+	if (atomic_xchg(&beenhere, 1) != 0)
+		return;
+	do_trace_rcu_torture_read(cur_ops->name, (struct rcu_head *)~0UL);
+	ftrace_dump(DUMP_ALL);
+}
+
 /*
  * RCU torture reader from timer handler.  Dereferences rcu_torture_current,
  * incrementing the corresponding element of the pipeline array.  The
@@ -934,6 +990,7 @@
 				  rcu_read_lock_bh_held() ||
 				  rcu_read_lock_sched_held() ||
 				  srcu_read_lock_held(&srcu_ctl));
+	do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
 	if (p == NULL) {
 		/* Leave because rcu_torture_writer is not yet underway */
 		cur_ops->readunlock(idx);
@@ -951,6 +1008,8 @@
 		/* Should not happen, but... */
 		pipe_count = RCU_TORTURE_PIPE_LEN;
 	}
+	if (pipe_count > 1)
+		rcutorture_trace_dump();
 	__this_cpu_inc(rcu_torture_count[pipe_count]);
 	completed = cur_ops->completed() - completed;
 	if (completed > RCU_TORTURE_PIPE_LEN) {
@@ -994,6 +1053,7 @@
 					  rcu_read_lock_bh_held() ||
 					  rcu_read_lock_sched_held() ||
 					  srcu_read_lock_held(&srcu_ctl));
+		do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
 		if (p == NULL) {
 			/* Wait for rcu_torture_writer to get underway */
 			cur_ops->readunlock(idx);
@@ -1009,6 +1069,8 @@
 			/* Should not happen, but... */
 			pipe_count = RCU_TORTURE_PIPE_LEN;
 		}
+		if (pipe_count > 1)
+			rcutorture_trace_dump();
 		__this_cpu_inc(rcu_torture_count[pipe_count]);
 		completed = cur_ops->completed() - completed;
 		if (completed > RCU_TORTURE_PIPE_LEN) {
@@ -1056,7 +1118,8 @@
 	cnt += sprintf(&page[cnt],
 		       "rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d "
 		       "rtmbe: %d rtbke: %ld rtbre: %ld "
-		       "rtbf: %ld rtb: %ld nt: %ld",
+		       "rtbf: %ld rtb: %ld nt: %ld "
+		       "onoff: %ld/%ld:%ld/%ld",
 		       rcu_torture_current,
 		       rcu_torture_current_version,
 		       list_empty(&rcu_torture_freelist),
@@ -1068,7 +1131,11 @@
 		       n_rcu_torture_boost_rterror,
 		       n_rcu_torture_boost_failure,
 		       n_rcu_torture_boosts,
-		       n_rcu_torture_timers);
+		       n_rcu_torture_timers,
+		       n_online_successes,
+		       n_online_attempts,
+		       n_offline_successes,
+		       n_offline_attempts);
 	if (atomic_read(&n_rcu_torture_mberror) != 0 ||
 	    n_rcu_torture_boost_ktrerror != 0 ||
 	    n_rcu_torture_boost_rterror != 0 ||
@@ -1232,12 +1299,14 @@
 		"shuffle_interval=%d stutter=%d irqreader=%d "
 		"fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
 		"test_boost=%d/%d test_boost_interval=%d "
-		"test_boost_duration=%d\n",
+		"test_boost_duration=%d shutdown_secs=%d "
+		"onoff_interval=%d\n",
 		torture_type, tag, nrealreaders, nfakewriters,
 		stat_interval, verbose, test_no_idle_hz, shuffle_interval,
 		stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
 		test_boost, cur_ops->can_boost,
-		test_boost_interval, test_boost_duration);
+		test_boost_interval, test_boost_duration, shutdown_secs,
+		onoff_interval);
 }
 
 static struct notifier_block rcutorture_shutdown_nb = {
@@ -1287,6 +1356,131 @@
 	return 0;
 }
 
+/*
+ * Cause the rcutorture test to shutdown the system after the test has
+ * run for the time specified by the shutdown_secs module parameter.
+ */
+static int
+rcu_torture_shutdown(void *arg)
+{
+	long delta;
+	unsigned long jiffies_snap;
+
+	VERBOSE_PRINTK_STRING("rcu_torture_shutdown task started");
+	jiffies_snap = ACCESS_ONCE(jiffies);
+	while (ULONG_CMP_LT(jiffies_snap, shutdown_time) &&
+	       !kthread_should_stop()) {
+		delta = shutdown_time - jiffies_snap;
+		if (verbose)
+			printk(KERN_ALERT "%s" TORTURE_FLAG
+			       "rcu_torture_shutdown task: %lu "
+			       "jiffies remaining\n",
+			       torture_type, delta);
+		schedule_timeout_interruptible(delta);
+		jiffies_snap = ACCESS_ONCE(jiffies);
+	}
+	if (kthread_should_stop()) {
+		VERBOSE_PRINTK_STRING("rcu_torture_shutdown task stopping");
+		return 0;
+	}
+
+	/* OK, shut down the system. */
+
+	VERBOSE_PRINTK_STRING("rcu_torture_shutdown task shutting down system");
+	shutdown_task = NULL;	/* Avoid self-kill deadlock. */
+	rcu_torture_cleanup();	/* Get the success/failure message. */
+	kernel_power_off();	/* Shut down the system. */
+	return 0;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+/*
+ * Execute random CPU-hotplug operations at the interval specified
+ * by the onoff_interval.
+ */
+static int
+rcu_torture_onoff(void *arg)
+{
+	int cpu;
+	int maxcpu = -1;
+	DEFINE_RCU_RANDOM(rand);
+
+	VERBOSE_PRINTK_STRING("rcu_torture_onoff task started");
+	for_each_online_cpu(cpu)
+		maxcpu = cpu;
+	WARN_ON(maxcpu < 0);
+	while (!kthread_should_stop()) {
+		cpu = (rcu_random(&rand) >> 4) % (maxcpu + 1);
+		if (cpu_online(cpu) && cpu_is_hotpluggable(cpu)) {
+			if (verbose)
+				printk(KERN_ALERT "%s" TORTURE_FLAG
+				       "rcu_torture_onoff task: offlining %d\n",
+				       torture_type, cpu);
+			n_offline_attempts++;
+			if (cpu_down(cpu) == 0) {
+				if (verbose)
+					printk(KERN_ALERT "%s" TORTURE_FLAG
+					       "rcu_torture_onoff task: "
+					       "offlined %d\n",
+					       torture_type, cpu);
+				n_offline_successes++;
+			}
+		} else if (cpu_is_hotpluggable(cpu)) {
+			if (verbose)
+				printk(KERN_ALERT "%s" TORTURE_FLAG
+				       "rcu_torture_onoff task: onlining %d\n",
+				       torture_type, cpu);
+			n_online_attempts++;
+			if (cpu_up(cpu) == 0) {
+				if (verbose)
+					printk(KERN_ALERT "%s" TORTURE_FLAG
+					       "rcu_torture_onoff task: "
+					       "onlined %d\n",
+					       torture_type, cpu);
+				n_online_successes++;
+			}
+		}
+		schedule_timeout_interruptible(onoff_interval * HZ);
+	}
+	VERBOSE_PRINTK_STRING("rcu_torture_onoff task stopping");
+	return 0;
+}
+
+static int
+rcu_torture_onoff_init(void)
+{
+	if (onoff_interval <= 0)
+		return 0;
+	onoff_task = kthread_run(rcu_torture_onoff, NULL, "rcu_torture_onoff");
+	if (IS_ERR(onoff_task)) {
+		onoff_task = NULL;
+		return PTR_ERR(onoff_task);
+	}
+	return 0;
+}
+
+static void rcu_torture_onoff_cleanup(void)
+{
+	if (onoff_task == NULL)
+		return;
+	VERBOSE_PRINTK_STRING("Stopping rcu_torture_onoff task");
+	kthread_stop(onoff_task);
+}
+
+#else /* #ifdef CONFIG_HOTPLUG_CPU */
+
+static void
+rcu_torture_onoff_init(void)
+{
+}
+
+static void rcu_torture_onoff_cleanup(void)
+{
+}
+
+#endif /* #else #ifdef CONFIG_HOTPLUG_CPU */
+
 static int rcutorture_cpu_notify(struct notifier_block *self,
 				 unsigned long action, void *hcpu)
 {
@@ -1391,6 +1585,11 @@
 		for_each_possible_cpu(i)
 			rcutorture_booster_cleanup(i);
 	}
+	if (shutdown_task != NULL) {
+		VERBOSE_PRINTK_STRING("Stopping rcu_torture_shutdown task");
+		kthread_stop(shutdown_task);
+	}
+	rcu_torture_onoff_cleanup();
 
 	/* Wait for all RCU callbacks to fire.  */
 
@@ -1416,7 +1615,7 @@
 	static struct rcu_torture_ops *torture_ops[] =
 		{ &rcu_ops, &rcu_sync_ops, &rcu_expedited_ops,
 		  &rcu_bh_ops, &rcu_bh_sync_ops, &rcu_bh_expedited_ops,
-		  &srcu_ops, &srcu_expedited_ops,
+		  &srcu_ops, &srcu_raw_ops, &srcu_expedited_ops,
 		  &sched_ops, &sched_sync_ops, &sched_expedited_ops, };
 
 	mutex_lock(&fullstop_mutex);
@@ -1607,6 +1806,18 @@
 			}
 		}
 	}
+	if (shutdown_secs > 0) {
+		shutdown_time = jiffies + shutdown_secs * HZ;
+		shutdown_task = kthread_run(rcu_torture_shutdown, NULL,
+					    "rcu_torture_shutdown");
+		if (IS_ERR(shutdown_task)) {
+			firsterr = PTR_ERR(shutdown_task);
+			VERBOSE_PRINTK_ERRSTRING("Failed to create shutdown");
+			shutdown_task = NULL;
+			goto unwind;
+		}
+	}
+	rcu_torture_onoff_init();
 	register_reboot_notifier(&rcutorture_shutdown_nb);
 	rcutorture_record_test_transition();
 	mutex_unlock(&fullstop_mutex);
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 6b76d81..6c4a672 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -69,7 +69,7 @@
 		NUM_RCU_LVL_3, \
 		NUM_RCU_LVL_4, /* == MAX_RCU_LVLS */ \
 	}, \
-	.signaled = RCU_GP_IDLE, \
+	.fqs_state = RCU_GP_IDLE, \
 	.gpnum = -300, \
 	.completed = -300, \
 	.onofflock = __RAW_SPIN_LOCK_UNLOCKED(&structname##_state.onofflock), \
@@ -195,12 +195,10 @@
 }
 EXPORT_SYMBOL_GPL(rcu_note_context_switch);
 
-#ifdef CONFIG_NO_HZ
 DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
-	.dynticks_nesting = 1,
+	.dynticks_nesting = DYNTICK_TASK_NESTING,
 	.dynticks = ATOMIC_INIT(1),
 };
-#endif /* #ifdef CONFIG_NO_HZ */
 
 static int blimit = 10;		/* Maximum callbacks per rcu_do_batch. */
 static int qhimark = 10000;	/* If this many pending, ignore blimit. */
@@ -328,11 +326,11 @@
 		return 1;
 	}
 
-	/* If preemptible RCU, no point in sending reschedule IPI. */
-	if (rdp->preemptible)
-		return 0;
-
-	/* The CPU is online, so send it a reschedule IPI. */
+	/*
+	 * The CPU is online, so send it a reschedule IPI.  This forces
+	 * it through the scheduler, and (inefficiently) also handles cases
+	 * where idle loops fail to inform RCU about the CPU being idle.
+	 */
 	if (rdp->cpu != smp_processor_id())
 		smp_send_reschedule(rdp->cpu);
 	else
@@ -343,59 +341,181 @@
 
 #endif /* #ifdef CONFIG_SMP */
 
-#ifdef CONFIG_NO_HZ
-
-/**
- * rcu_enter_nohz - inform RCU that current CPU is entering nohz
+/*
+ * rcu_idle_enter_common - inform RCU that current CPU is moving towards idle
  *
- * Enter nohz mode, in other words, -leave- the mode in which RCU
- * read-side critical sections can occur.  (Though RCU read-side
- * critical sections can occur in irq handlers in nohz mode, a possibility
- * handled by rcu_irq_enter() and rcu_irq_exit()).
+ * If the new value of the ->dynticks_nesting counter now is zero,
+ * we really have entered idle, and must do the appropriate accounting.
+ * The caller must have disabled interrupts.
  */
-void rcu_enter_nohz(void)
+static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval)
 {
-	unsigned long flags;
-	struct rcu_dynticks *rdtp;
+	trace_rcu_dyntick("Start", oldval, 0);
+	if (!is_idle_task(current)) {
+		struct task_struct *idle = idle_task(smp_processor_id());
 
-	local_irq_save(flags);
-	rdtp = &__get_cpu_var(rcu_dynticks);
-	if (--rdtp->dynticks_nesting) {
-		local_irq_restore(flags);
-		return;
+		trace_rcu_dyntick("Error on entry: not idle task", oldval, 0);
+		ftrace_dump(DUMP_ALL);
+		WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
+			  current->pid, current->comm,
+			  idle->pid, idle->comm); /* must be idle task! */
 	}
-	trace_rcu_dyntick("Start");
+	rcu_prepare_for_idle(smp_processor_id());
 	/* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
 	smp_mb__before_atomic_inc();  /* See above. */
 	atomic_inc(&rdtp->dynticks);
 	smp_mb__after_atomic_inc();  /* Force ordering with next sojourn. */
 	WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
-	local_irq_restore(flags);
 }
 
-/*
- * rcu_exit_nohz - inform RCU that current CPU is leaving nohz
+/**
+ * rcu_idle_enter - inform RCU that current CPU is entering idle
  *
- * Exit nohz mode, in other words, -enter- the mode in which RCU
- * read-side critical sections normally occur.
+ * Enter idle mode, in other words, -leave- the mode in which RCU
+ * read-side critical sections can occur.  (Though RCU read-side
+ * critical sections can occur in irq handlers in idle, a possibility
+ * handled by irq_enter() and irq_exit().)
+ *
+ * We crowbar the ->dynticks_nesting field to zero to allow for
+ * the possibility of usermode upcalls having messed up our count
+ * of interrupt nesting level during the prior busy period.
  */
-void rcu_exit_nohz(void)
+void rcu_idle_enter(void)
 {
 	unsigned long flags;
+	long long oldval;
 	struct rcu_dynticks *rdtp;
 
 	local_irq_save(flags);
 	rdtp = &__get_cpu_var(rcu_dynticks);
-	if (rdtp->dynticks_nesting++) {
-		local_irq_restore(flags);
-		return;
-	}
+	oldval = rdtp->dynticks_nesting;
+	rdtp->dynticks_nesting = 0;
+	rcu_idle_enter_common(rdtp, oldval);
+	local_irq_restore(flags);
+}
+
+/**
+ * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle
+ *
+ * Exit from an interrupt handler, which might possibly result in entering
+ * idle mode, in other words, leaving the mode in which read-side critical
+ * sections can occur.
+ *
+ * This code assumes that the idle loop never does anything that might
+ * result in unbalanced calls to irq_enter() and irq_exit().  If your
+ * architecture violates this assumption, RCU will give you what you
+ * deserve, good and hard.  But very infrequently and irreproducibly.
+ *
+ * Use things like work queues to work around this limitation.
+ *
+ * You have been warned.
+ */
+void rcu_irq_exit(void)
+{
+	unsigned long flags;
+	long long oldval;
+	struct rcu_dynticks *rdtp;
+
+	local_irq_save(flags);
+	rdtp = &__get_cpu_var(rcu_dynticks);
+	oldval = rdtp->dynticks_nesting;
+	rdtp->dynticks_nesting--;
+	WARN_ON_ONCE(rdtp->dynticks_nesting < 0);
+	if (rdtp->dynticks_nesting)
+		trace_rcu_dyntick("--=", oldval, rdtp->dynticks_nesting);
+	else
+		rcu_idle_enter_common(rdtp, oldval);
+	local_irq_restore(flags);
+}
+
+/*
+ * rcu_idle_exit_common - inform RCU that current CPU is moving away from idle
+ *
+ * If the new value of the ->dynticks_nesting counter was previously zero,
+ * we really have exited idle, and must do the appropriate accounting.
+ * The caller must have disabled interrupts.
+ */
+static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval)
+{
 	smp_mb__before_atomic_inc();  /* Force ordering w/previous sojourn. */
 	atomic_inc(&rdtp->dynticks);
 	/* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
 	smp_mb__after_atomic_inc();  /* See above. */
 	WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
-	trace_rcu_dyntick("End");
+	rcu_cleanup_after_idle(smp_processor_id());
+	trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
+	if (!is_idle_task(current)) {
+		struct task_struct *idle = idle_task(smp_processor_id());
+
+		trace_rcu_dyntick("Error on exit: not idle task",
+				  oldval, rdtp->dynticks_nesting);
+		ftrace_dump(DUMP_ALL);
+		WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
+			  current->pid, current->comm,
+			  idle->pid, idle->comm); /* must be idle task! */
+	}
+}
+
+/**
+ * rcu_idle_exit - inform RCU that current CPU is leaving idle
+ *
+ * Exit idle mode, in other words, -enter- the mode in which RCU
+ * read-side critical sections can occur.
+ *
+ * We crowbar the ->dynticks_nesting field to DYNTICK_TASK_NESTING to
+ * allow for the possibility of usermode upcalls messing up our count
+ * of interrupt nesting level during the busy period that is just
+ * now starting.
+ */
+void rcu_idle_exit(void)
+{
+	unsigned long flags;
+	struct rcu_dynticks *rdtp;
+	long long oldval;
+
+	local_irq_save(flags);
+	rdtp = &__get_cpu_var(rcu_dynticks);
+	oldval = rdtp->dynticks_nesting;
+	WARN_ON_ONCE(oldval != 0);
+	rdtp->dynticks_nesting = DYNTICK_TASK_NESTING;
+	rcu_idle_exit_common(rdtp, oldval);
+	local_irq_restore(flags);
+}
+
+/**
+ * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
+ *
+ * Enter an interrupt handler, which might possibly result in exiting
+ * idle mode, in other words, entering the mode in which read-side critical
+ * sections can occur.
+ *
+ * Note that the Linux kernel is fully capable of entering an interrupt
+ * handler that it never exits, for example when doing upcalls to
+ * user mode!  This code assumes that the idle loop never does upcalls to
+ * user mode.  If your architecture does do upcalls from the idle loop (or
+ * does anything else that results in unbalanced calls to the irq_enter()
+ * and irq_exit() functions), RCU will give you what you deserve, good
+ * and hard.  But very infrequently and irreproducibly.
+ *
+ * Use things like work queues to work around this limitation.
+ *
+ * You have been warned.
+ */
+void rcu_irq_enter(void)
+{
+	unsigned long flags;
+	struct rcu_dynticks *rdtp;
+	long long oldval;
+
+	local_irq_save(flags);
+	rdtp = &__get_cpu_var(rcu_dynticks);
+	oldval = rdtp->dynticks_nesting;
+	rdtp->dynticks_nesting++;
+	WARN_ON_ONCE(rdtp->dynticks_nesting == 0);
+	if (oldval)
+		trace_rcu_dyntick("++=", oldval, rdtp->dynticks_nesting);
+	else
+		rcu_idle_exit_common(rdtp, oldval);
 	local_irq_restore(flags);
 }
 
@@ -442,27 +562,37 @@
 	WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
 }
 
-/**
- * rcu_irq_enter - inform RCU of entry to hard irq context
- *
- * If the CPU was idle with dynamic ticks active, this updates the
- * rdtp->dynticks to let the RCU handling know that the CPU is active.
- */
-void rcu_irq_enter(void)
-{
-	rcu_exit_nohz();
-}
+#ifdef CONFIG_PROVE_RCU
 
 /**
- * rcu_irq_exit - inform RCU of exit from hard irq context
+ * rcu_is_cpu_idle - see if RCU thinks that the current CPU is idle
  *
- * If the CPU was idle with dynamic ticks active, update the rdp->dynticks
- * to put let the RCU handling be aware that the CPU is going back to idle
- * with no ticks.
+ * If the current CPU is in its idle loop and is neither in an interrupt
+ * or NMI handler, return true.
  */
-void rcu_irq_exit(void)
+int rcu_is_cpu_idle(void)
 {
-	rcu_enter_nohz();
+	int ret;
+
+	preempt_disable();
+	ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
+	preempt_enable();
+	return ret;
+}
+EXPORT_SYMBOL(rcu_is_cpu_idle);
+
+#endif /* #ifdef CONFIG_PROVE_RCU */
+
+/**
+ * rcu_is_cpu_rrupt_from_idle - see if idle or immediately interrupted from idle
+ *
+ * If the current CPU is idle or running at a first-level (not nested)
+ * interrupt from idle, return true.  The caller must have at least
+ * disabled preemption.
+ */
+int rcu_is_cpu_rrupt_from_idle(void)
+{
+	return __get_cpu_var(rcu_dynticks).dynticks_nesting <= 1;
 }
 
 #ifdef CONFIG_SMP
@@ -475,7 +605,7 @@
 static int dyntick_save_progress_counter(struct rcu_data *rdp)
 {
 	rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
-	return 0;
+	return (rdp->dynticks_snap & 0x1) == 0;
 }
 
 /*
@@ -512,26 +642,6 @@
 
 #endif /* #ifdef CONFIG_SMP */
 
-#else /* #ifdef CONFIG_NO_HZ */
-
-#ifdef CONFIG_SMP
-
-static int dyntick_save_progress_counter(struct rcu_data *rdp)
-{
-	return 0;
-}
-
-static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
-{
-	return rcu_implicit_offline_qs(rdp);
-}
-
-#endif /* #ifdef CONFIG_SMP */
-
-#endif /* #else #ifdef CONFIG_NO_HZ */
-
-int rcu_cpu_stall_suppress __read_mostly;
-
 static void record_gp_stall_check_time(struct rcu_state *rsp)
 {
 	rsp->gp_start = jiffies;
@@ -866,8 +976,8 @@
 	/* Advance to a new grace period and initialize state. */
 	rsp->gpnum++;
 	trace_rcu_grace_period(rsp->name, rsp->gpnum, "start");
-	WARN_ON_ONCE(rsp->signaled == RCU_GP_INIT);
-	rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */
+	WARN_ON_ONCE(rsp->fqs_state == RCU_GP_INIT);
+	rsp->fqs_state = RCU_GP_INIT; /* Hold off force_quiescent_state. */
 	rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
 	record_gp_stall_check_time(rsp);
 
@@ -877,7 +987,7 @@
 		rnp->qsmask = rnp->qsmaskinit;
 		rnp->gpnum = rsp->gpnum;
 		rnp->completed = rsp->completed;
-		rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */
+		rsp->fqs_state = RCU_SIGNAL_INIT; /* force_quiescent_state OK */
 		rcu_start_gp_per_cpu(rsp, rnp, rdp);
 		rcu_preempt_boost_start_gp(rnp);
 		trace_rcu_grace_period_init(rsp->name, rnp->gpnum,
@@ -927,7 +1037,7 @@
 
 	rnp = rcu_get_root(rsp);
 	raw_spin_lock(&rnp->lock);		/* irqs already disabled. */
-	rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */
+	rsp->fqs_state = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */
 	raw_spin_unlock(&rnp->lock);		/* irqs remain disabled. */
 	raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
 }
@@ -991,7 +1101,7 @@
 
 	rsp->completed = rsp->gpnum;  /* Declare the grace period complete. */
 	trace_rcu_grace_period(rsp->name, rsp->completed, "end");
-	rsp->signaled = RCU_GP_IDLE;
+	rsp->fqs_state = RCU_GP_IDLE;
 	rcu_start_gp(rsp, flags);  /* releases root node's rnp->lock. */
 }
 
@@ -1221,7 +1331,7 @@
 	else
 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
 	if (need_report & RCU_OFL_TASKS_EXP_GP)
-		rcu_report_exp_rnp(rsp, rnp);
+		rcu_report_exp_rnp(rsp, rnp, true);
 	rcu_node_kthread_setaffinity(rnp, -1);
 }
 
@@ -1263,7 +1373,9 @@
 	/* If no callbacks are ready, just return.*/
 	if (!cpu_has_callbacks_ready_to_invoke(rdp)) {
 		trace_rcu_batch_start(rsp->name, 0, 0);
-		trace_rcu_batch_end(rsp->name, 0);
+		trace_rcu_batch_end(rsp->name, 0, !!ACCESS_ONCE(rdp->nxtlist),
+				    need_resched(), is_idle_task(current),
+				    rcu_is_callbacks_kthread());
 		return;
 	}
 
@@ -1291,12 +1403,17 @@
 		debug_rcu_head_unqueue(list);
 		__rcu_reclaim(rsp->name, list);
 		list = next;
-		if (++count >= bl)
+		/* Stop only if limit reached and CPU has something to do. */
+		if (++count >= bl &&
+		    (need_resched() ||
+		     (!is_idle_task(current) && !rcu_is_callbacks_kthread())))
 			break;
 	}
 
 	local_irq_save(flags);
-	trace_rcu_batch_end(rsp->name, count);
+	trace_rcu_batch_end(rsp->name, count, !!list, need_resched(),
+			    is_idle_task(current),
+			    rcu_is_callbacks_kthread());
 
 	/* Update count, and requeue any remaining callbacks. */
 	rdp->qlen -= count;
@@ -1334,16 +1451,14 @@
  * (user mode or idle loop for rcu, non-softirq execution for rcu_bh).
  * Also schedule RCU core processing.
  *
- * This function must be called with hardirqs disabled.  It is normally
+ * This function must be called from hardirq context.  It is normally
  * invoked from the scheduling-clock interrupt.  If rcu_pending returns
  * false, there is no point in invoking rcu_check_callbacks().
  */
 void rcu_check_callbacks(int cpu, int user)
 {
 	trace_rcu_utilization("Start scheduler-tick");
-	if (user ||
-	    (idle_cpu(cpu) && rcu_scheduler_active &&
-	     !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
+	if (user || rcu_is_cpu_rrupt_from_idle()) {
 
 		/*
 		 * Get here if this CPU took its interrupt from user
@@ -1457,7 +1572,7 @@
 		goto unlock_fqs_ret;  /* no GP in progress, time updated. */
 	}
 	rsp->fqs_active = 1;
-	switch (rsp->signaled) {
+	switch (rsp->fqs_state) {
 	case RCU_GP_IDLE:
 	case RCU_GP_INIT:
 
@@ -1473,7 +1588,7 @@
 		force_qs_rnp(rsp, dyntick_save_progress_counter);
 		raw_spin_lock(&rnp->lock);  /* irqs already disabled */
 		if (rcu_gp_in_progress(rsp))
-			rsp->signaled = RCU_FORCE_QS;
+			rsp->fqs_state = RCU_FORCE_QS;
 		break;
 
 	case RCU_FORCE_QS:
@@ -1812,7 +1927,7 @@
  * by the current CPU, even if none need be done immediately, returning
  * 1 if so.
  */
-static int rcu_needs_cpu_quick_check(int cpu)
+static int rcu_cpu_has_callbacks(int cpu)
 {
 	/* RCU callbacks either ready or pending? */
 	return per_cpu(rcu_sched_data, cpu).nxtlist ||
@@ -1913,9 +2028,9 @@
 	for (i = 0; i < RCU_NEXT_SIZE; i++)
 		rdp->nxttail[i] = &rdp->nxtlist;
 	rdp->qlen = 0;
-#ifdef CONFIG_NO_HZ
 	rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
-#endif /* #ifdef CONFIG_NO_HZ */
+	WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_NESTING);
+	WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
 	rdp->cpu = cpu;
 	rdp->rsp = rsp;
 	raw_spin_unlock_irqrestore(&rnp->lock, flags);
@@ -1942,6 +2057,10 @@
 	rdp->qlen_last_fqs_check = 0;
 	rdp->n_force_qs_snap = rsp->n_force_qs;
 	rdp->blimit = blimit;
+	rdp->dynticks->dynticks_nesting = DYNTICK_TASK_NESTING;
+	atomic_set(&rdp->dynticks->dynticks,
+		   (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
+	rcu_prepare_for_idle_init(cpu);
 	raw_spin_unlock(&rnp->lock);		/* irqs remain disabled. */
 
 	/*
@@ -2023,6 +2142,7 @@
 		rcu_send_cbs_to_online(&rcu_bh_state);
 		rcu_send_cbs_to_online(&rcu_sched_state);
 		rcu_preempt_send_cbs_to_online();
+		rcu_cleanup_after_idle(cpu);
 		break;
 	case CPU_DEAD:
 	case CPU_DEAD_FROZEN:
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index 849ce9e..fddff92 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -84,9 +84,10 @@
  * Dynticks per-CPU state.
  */
 struct rcu_dynticks {
-	int dynticks_nesting;	/* Track irq/process nesting level. */
-	int dynticks_nmi_nesting; /* Track NMI nesting level. */
-	atomic_t dynticks;	/* Even value for dynticks-idle, else odd. */
+	long long dynticks_nesting; /* Track irq/process nesting level. */
+				    /* Process level is worth LLONG_MAX/2. */
+	int dynticks_nmi_nesting;   /* Track NMI nesting level. */
+	atomic_t dynticks;	    /* Even value for idle, else odd. */
 };
 
 /* RCU's kthread states for tracing. */
@@ -274,16 +275,12 @@
 					/* did other CPU force QS recently? */
 	long		blimit;		/* Upper limit on a processed batch */
 
-#ifdef CONFIG_NO_HZ
 	/* 3) dynticks interface. */
 	struct rcu_dynticks *dynticks;	/* Shared per-CPU dynticks state. */
 	int dynticks_snap;		/* Per-GP tracking for dynticks. */
-#endif /* #ifdef CONFIG_NO_HZ */
 
 	/* 4) reasons this CPU needed to be kicked by force_quiescent_state */
-#ifdef CONFIG_NO_HZ
 	unsigned long dynticks_fqs;	/* Kicked due to dynticks idle. */
-#endif /* #ifdef CONFIG_NO_HZ */
 	unsigned long offline_fqs;	/* Kicked due to being offline. */
 	unsigned long resched_ipi;	/* Sent a resched IPI. */
 
@@ -302,16 +299,12 @@
 	struct rcu_state *rsp;
 };
 
-/* Values for signaled field in struct rcu_state. */
+/* Values for fqs_state field in struct rcu_state. */
 #define RCU_GP_IDLE		0	/* No grace period in progress. */
 #define RCU_GP_INIT		1	/* Grace period being initialized. */
 #define RCU_SAVE_DYNTICK	2	/* Need to scan dyntick state. */
 #define RCU_FORCE_QS		3	/* Need to force quiescent state. */
-#ifdef CONFIG_NO_HZ
 #define RCU_SIGNAL_INIT		RCU_SAVE_DYNTICK
-#else /* #ifdef CONFIG_NO_HZ */
-#define RCU_SIGNAL_INIT		RCU_FORCE_QS
-#endif /* #else #ifdef CONFIG_NO_HZ */
 
 #define RCU_JIFFIES_TILL_FORCE_QS	 3	/* for rsp->jiffies_force_qs */
 
@@ -361,7 +354,7 @@
 
 	/* The following fields are guarded by the root rcu_node's lock. */
 
-	u8	signaled ____cacheline_internodealigned_in_smp;
+	u8	fqs_state ____cacheline_internodealigned_in_smp;
 						/* Force QS state. */
 	u8	fqs_active;			/* force_quiescent_state() */
 						/*  is running. */
@@ -451,7 +444,8 @@
 static void rcu_preempt_process_callbacks(void);
 void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
 #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU)
-static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp);
+static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
+			       bool wake);
 #endif /* #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU) */
 static int rcu_preempt_pending(int cpu);
 static int rcu_preempt_needs_cpu(int cpu);
@@ -461,6 +455,7 @@
 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
 static void invoke_rcu_callbacks_kthread(void);
+static bool rcu_is_callbacks_kthread(void);
 #ifdef CONFIG_RCU_BOOST
 static void rcu_preempt_do_callbacks(void);
 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp,
@@ -473,5 +468,8 @@
 #endif /* #ifdef CONFIG_RCU_BOOST */
 static void rcu_cpu_kthread_setrt(int cpu, int to_rt);
 static void __cpuinit rcu_prepare_kthreads(int cpu);
+static void rcu_prepare_for_idle_init(int cpu);
+static void rcu_cleanup_after_idle(int cpu);
+static void rcu_prepare_for_idle(int cpu);
 
 #endif /* #ifndef RCU_TREE_NONCORE */
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 4b9b9f8..8bb35d7 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -312,6 +312,7 @@
 {
 	int empty;
 	int empty_exp;
+	int empty_exp_now;
 	unsigned long flags;
 	struct list_head *np;
 #ifdef CONFIG_RCU_BOOST
@@ -382,8 +383,10 @@
 		/*
 		 * If this was the last task on the current list, and if
 		 * we aren't waiting on any CPUs, report the quiescent state.
-		 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock.
+		 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock,
+		 * so we must take a snapshot of the expedited state.
 		 */
+		empty_exp_now = !rcu_preempted_readers_exp(rnp);
 		if (!empty && !rcu_preempt_blocked_readers_cgp(rnp)) {
 			trace_rcu_quiescent_state_report("preempt_rcu",
 							 rnp->gpnum,
@@ -406,8 +409,8 @@
 		 * If this was the last task on the expedited lists,
 		 * then we need to report up the rcu_node hierarchy.
 		 */
-		if (!empty_exp && !rcu_preempted_readers_exp(rnp))
-			rcu_report_exp_rnp(&rcu_preempt_state, rnp);
+		if (!empty_exp && empty_exp_now)
+			rcu_report_exp_rnp(&rcu_preempt_state, rnp, true);
 	} else {
 		local_irq_restore(flags);
 	}
@@ -729,9 +732,13 @@
  * recursively up the tree.  (Calm down, calm down, we do the recursion
  * iteratively!)
  *
+ * Most callers will set the "wake" flag, but the task initiating the
+ * expedited grace period need not wake itself.
+ *
  * Caller must hold sync_rcu_preempt_exp_mutex.
  */
-static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
+static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
+			       bool wake)
 {
 	unsigned long flags;
 	unsigned long mask;
@@ -744,7 +751,8 @@
 		}
 		if (rnp->parent == NULL) {
 			raw_spin_unlock_irqrestore(&rnp->lock, flags);
-			wake_up(&sync_rcu_preempt_exp_wq);
+			if (wake)
+				wake_up(&sync_rcu_preempt_exp_wq);
 			break;
 		}
 		mask = rnp->grpmask;
@@ -777,7 +785,7 @@
 		must_wait = 1;
 	}
 	if (!must_wait)
-		rcu_report_exp_rnp(rsp, rnp);
+		rcu_report_exp_rnp(rsp, rnp, false); /* Don't wake self. */
 }
 
 /*
@@ -1069,9 +1077,9 @@
  * report on tasks preempted in RCU read-side critical sections during
  * expedited RCU grace periods.
  */
-static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
+static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
+			       bool wake)
 {
-	return;
 }
 
 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
@@ -1157,8 +1165,6 @@
 
 #endif /* #else #ifdef CONFIG_RCU_TRACE */
 
-static struct lock_class_key rcu_boost_class;
-
 /*
  * Carry out RCU priority boosting on the task indicated by ->exp_tasks
  * or ->boost_tasks, advancing the pointer to the next task in the
@@ -1221,15 +1227,13 @@
 	 */
 	t = container_of(tb, struct task_struct, rcu_node_entry);
 	rt_mutex_init_proxy_locked(&mtx, t);
-	/* Avoid lockdep false positives.  This rt_mutex is its own thing. */
-	lockdep_set_class_and_name(&mtx.wait_lock, &rcu_boost_class,
-				   "rcu_boost_mutex");
 	t->rcu_boost_mutex = &mtx;
 	raw_spin_unlock_irqrestore(&rnp->lock, flags);
 	rt_mutex_lock(&mtx);  /* Side effect: boosts task t's priority. */
 	rt_mutex_unlock(&mtx);  /* Keep lockdep happy. */
 
-	return rnp->exp_tasks != NULL || rnp->boost_tasks != NULL;
+	return ACCESS_ONCE(rnp->exp_tasks) != NULL ||
+	       ACCESS_ONCE(rnp->boost_tasks) != NULL;
 }
 
 /*
@@ -1329,6 +1333,15 @@
 }
 
 /*
+ * Is the current CPU running the RCU-callbacks kthread?
+ * Caller must have preemption disabled.
+ */
+static bool rcu_is_callbacks_kthread(void)
+{
+	return __get_cpu_var(rcu_cpu_kthread_task) == current;
+}
+
+/*
  * Set the affinity of the boost kthread.  The CPU-hotplug locks are
  * held, so no one should be messing with the existence of the boost
  * kthread.
@@ -1772,6 +1785,11 @@
 	WARN_ON_ONCE(1);
 }
 
+static bool rcu_is_callbacks_kthread(void)
+{
+	return false;
+}
+
 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
 {
 }
@@ -1907,7 +1925,7 @@
 		 * grace period works for us.
 		 */
 		get_online_cpus();
-		snap = atomic_read(&sync_sched_expedited_started) - 1;
+		snap = atomic_read(&sync_sched_expedited_started);
 		smp_mb(); /* ensure read is before try_stop_cpus(). */
 	}
 
@@ -1939,88 +1957,243 @@
  * 1 if so.  This function is part of the RCU implementation; it is -not-
  * an exported member of the RCU API.
  *
- * Because we have preemptible RCU, just check whether this CPU needs
- * any flavor of RCU.  Do not chew up lots of CPU cycles with preemption
- * disabled in a most-likely vain attempt to cause RCU not to need this CPU.
+ * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs
+ * any flavor of RCU.
  */
 int rcu_needs_cpu(int cpu)
 {
-	return rcu_needs_cpu_quick_check(cpu);
+	return rcu_cpu_has_callbacks(cpu);
+}
+
+/*
+ * Because we do not have RCU_FAST_NO_HZ, don't bother initializing for it.
+ */
+static void rcu_prepare_for_idle_init(int cpu)
+{
+}
+
+/*
+ * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
+ * after it.
+ */
+static void rcu_cleanup_after_idle(int cpu)
+{
+}
+
+/*
+ * Do the idle-entry grace-period work, which, because CONFIG_RCU_FAST_NO_HZ=y,
+ * is nothing.
+ */
+static void rcu_prepare_for_idle(int cpu)
+{
 }
 
 #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
 
-#define RCU_NEEDS_CPU_FLUSHES 5
+/*
+ * This code is invoked when a CPU goes idle, at which point we want
+ * to have the CPU do everything required for RCU so that it can enter
+ * the energy-efficient dyntick-idle mode.  This is handled by a
+ * state machine implemented by rcu_prepare_for_idle() below.
+ *
+ * The following three proprocessor symbols control this state machine:
+ *
+ * RCU_IDLE_FLUSHES gives the maximum number of times that we will attempt
+ *	to satisfy RCU.  Beyond this point, it is better to incur a periodic
+ *	scheduling-clock interrupt than to loop through the state machine
+ *	at full power.
+ * RCU_IDLE_OPT_FLUSHES gives the number of RCU_IDLE_FLUSHES that are
+ *	optional if RCU does not need anything immediately from this
+ *	CPU, even if this CPU still has RCU callbacks queued.  The first
+ *	times through the state machine are mandatory: we need to give
+ *	the state machine a chance to communicate a quiescent state
+ *	to the RCU core.
+ * RCU_IDLE_GP_DELAY gives the number of jiffies that a CPU is permitted
+ *	to sleep in dyntick-idle mode with RCU callbacks pending.  This
+ *	is sized to be roughly one RCU grace period.  Those energy-efficiency
+ *	benchmarkers who might otherwise be tempted to set this to a large
+ *	number, be warned: Setting RCU_IDLE_GP_DELAY too high can hang your
+ *	system.  And if you are -that- concerned about energy efficiency,
+ *	just power the system down and be done with it!
+ *
+ * The values below work well in practice.  If future workloads require
+ * adjustment, they can be converted into kernel config parameters, though
+ * making the state machine smarter might be a better option.
+ */
+#define RCU_IDLE_FLUSHES 5		/* Number of dyntick-idle tries. */
+#define RCU_IDLE_OPT_FLUSHES 3		/* Optional dyntick-idle tries. */
+#define RCU_IDLE_GP_DELAY 6		/* Roughly one grace period. */
+
 static DEFINE_PER_CPU(int, rcu_dyntick_drain);
 static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff);
+static DEFINE_PER_CPU(struct hrtimer, rcu_idle_gp_timer);
+static ktime_t rcu_idle_gp_wait;
 
 /*
- * Check to see if any future RCU-related work will need to be done
- * by the current CPU, even if none need be done immediately, returning
- * 1 if so.  This function is part of the RCU implementation; it is -not-
- * an exported member of the RCU API.
+ * Allow the CPU to enter dyntick-idle mode if either: (1) There are no
+ * callbacks on this CPU, (2) this CPU has not yet attempted to enter
+ * dyntick-idle mode, or (3) this CPU is in the process of attempting to
+ * enter dyntick-idle mode.  Otherwise, if we have recently tried and failed
+ * to enter dyntick-idle mode, we refuse to try to enter it.  After all,
+ * it is better to incur scheduling-clock interrupts than to spin
+ * continuously for the same time duration!
+ */
+int rcu_needs_cpu(int cpu)
+{
+	/* If no callbacks, RCU doesn't need the CPU. */
+	if (!rcu_cpu_has_callbacks(cpu))
+		return 0;
+	/* Otherwise, RCU needs the CPU only if it recently tried and failed. */
+	return per_cpu(rcu_dyntick_holdoff, cpu) == jiffies;
+}
+
+/*
+ * Timer handler used to force CPU to start pushing its remaining RCU
+ * callbacks in the case where it entered dyntick-idle mode with callbacks
+ * pending.  The hander doesn't really need to do anything because the
+ * real work is done upon re-entry to idle, or by the next scheduling-clock
+ * interrupt should idle not be re-entered.
+ */
+static enum hrtimer_restart rcu_idle_gp_timer_func(struct hrtimer *hrtp)
+{
+	trace_rcu_prep_idle("Timer");
+	return HRTIMER_NORESTART;
+}
+
+/*
+ * Initialize the timer used to pull CPUs out of dyntick-idle mode.
+ */
+static void rcu_prepare_for_idle_init(int cpu)
+{
+	static int firsttime = 1;
+	struct hrtimer *hrtp = &per_cpu(rcu_idle_gp_timer, cpu);
+
+	hrtimer_init(hrtp, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	hrtp->function = rcu_idle_gp_timer_func;
+	if (firsttime) {
+		unsigned int upj = jiffies_to_usecs(RCU_IDLE_GP_DELAY);
+
+		rcu_idle_gp_wait = ns_to_ktime(upj * (u64)1000);
+		firsttime = 0;
+	}
+}
+
+/*
+ * Clean up for exit from idle.  Because we are exiting from idle, there
+ * is no longer any point to rcu_idle_gp_timer, so cancel it.  This will
+ * do nothing if this timer is not active, so just cancel it unconditionally.
+ */
+static void rcu_cleanup_after_idle(int cpu)
+{
+	hrtimer_cancel(&per_cpu(rcu_idle_gp_timer, cpu));
+}
+
+/*
+ * Check to see if any RCU-related work can be done by the current CPU,
+ * and if so, schedule a softirq to get it done.  This function is part
+ * of the RCU implementation; it is -not- an exported member of the RCU API.
  *
- * Because we are not supporting preemptible RCU, attempt to accelerate
- * any current grace periods so that RCU no longer needs this CPU, but
- * only if all other CPUs are already in dynticks-idle mode.  This will
- * allow the CPU cores to be powered down immediately, as opposed to after
- * waiting many milliseconds for grace periods to elapse.
+ * The idea is for the current CPU to clear out all work required by the
+ * RCU core for the current grace period, so that this CPU can be permitted
+ * to enter dyntick-idle mode.  In some cases, it will need to be awakened
+ * at the end of the grace period by whatever CPU ends the grace period.
+ * This allows CPUs to go dyntick-idle more quickly, and to reduce the
+ * number of wakeups by a modest integer factor.
  *
  * Because it is not legal to invoke rcu_process_callbacks() with irqs
  * disabled, we do one pass of force_quiescent_state(), then do a
  * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked
  * later.  The per-cpu rcu_dyntick_drain variable controls the sequencing.
+ *
+ * The caller must have disabled interrupts.
  */
-int rcu_needs_cpu(int cpu)
+static void rcu_prepare_for_idle(int cpu)
 {
-	int c = 0;
-	int snap;
-	int thatcpu;
+	unsigned long flags;
 
-	/* Check for being in the holdoff period. */
-	if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies)
-		return rcu_needs_cpu_quick_check(cpu);
+	local_irq_save(flags);
 
-	/* Don't bother unless we are the last non-dyntick-idle CPU. */
-	for_each_online_cpu(thatcpu) {
-		if (thatcpu == cpu)
-			continue;
-		snap = atomic_add_return(0, &per_cpu(rcu_dynticks,
-						     thatcpu).dynticks);
-		smp_mb(); /* Order sampling of snap with end of grace period. */
-		if ((snap & 0x1) != 0) {
-			per_cpu(rcu_dyntick_drain, cpu) = 0;
-			per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
-			return rcu_needs_cpu_quick_check(cpu);
-		}
+	/*
+	 * If there are no callbacks on this CPU, enter dyntick-idle mode.
+	 * Also reset state to avoid prejudicing later attempts.
+	 */
+	if (!rcu_cpu_has_callbacks(cpu)) {
+		per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
+		per_cpu(rcu_dyntick_drain, cpu) = 0;
+		local_irq_restore(flags);
+		trace_rcu_prep_idle("No callbacks");
+		return;
+	}
+
+	/*
+	 * If in holdoff mode, just return.  We will presumably have
+	 * refrained from disabling the scheduling-clock tick.
+	 */
+	if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies) {
+		local_irq_restore(flags);
+		trace_rcu_prep_idle("In holdoff");
+		return;
 	}
 
 	/* Check and update the rcu_dyntick_drain sequencing. */
 	if (per_cpu(rcu_dyntick_drain, cpu) <= 0) {
 		/* First time through, initialize the counter. */
-		per_cpu(rcu_dyntick_drain, cpu) = RCU_NEEDS_CPU_FLUSHES;
+		per_cpu(rcu_dyntick_drain, cpu) = RCU_IDLE_FLUSHES;
+	} else if (per_cpu(rcu_dyntick_drain, cpu) <= RCU_IDLE_OPT_FLUSHES &&
+		   !rcu_pending(cpu)) {
+		/* Can we go dyntick-idle despite still having callbacks? */
+		trace_rcu_prep_idle("Dyntick with callbacks");
+		per_cpu(rcu_dyntick_drain, cpu) = 0;
+		per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
+		hrtimer_start(&per_cpu(rcu_idle_gp_timer, cpu),
+			      rcu_idle_gp_wait, HRTIMER_MODE_REL);
+		return; /* Nothing more to do immediately. */
 	} else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) {
 		/* We have hit the limit, so time to give up. */
 		per_cpu(rcu_dyntick_holdoff, cpu) = jiffies;
-		return rcu_needs_cpu_quick_check(cpu);
+		local_irq_restore(flags);
+		trace_rcu_prep_idle("Begin holdoff");
+		invoke_rcu_core();  /* Force the CPU out of dyntick-idle. */
+		return;
 	}
 
-	/* Do one step pushing remaining RCU callbacks through. */
+	/*
+	 * Do one step of pushing the remaining RCU callbacks through
+	 * the RCU core state machine.
+	 */
+#ifdef CONFIG_TREE_PREEMPT_RCU
+	if (per_cpu(rcu_preempt_data, cpu).nxtlist) {
+		local_irq_restore(flags);
+		rcu_preempt_qs(cpu);
+		force_quiescent_state(&rcu_preempt_state, 0);
+		local_irq_save(flags);
+	}
+#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
 	if (per_cpu(rcu_sched_data, cpu).nxtlist) {
+		local_irq_restore(flags);
 		rcu_sched_qs(cpu);
 		force_quiescent_state(&rcu_sched_state, 0);
-		c = c || per_cpu(rcu_sched_data, cpu).nxtlist;
+		local_irq_save(flags);
 	}
 	if (per_cpu(rcu_bh_data, cpu).nxtlist) {
+		local_irq_restore(flags);
 		rcu_bh_qs(cpu);
 		force_quiescent_state(&rcu_bh_state, 0);
-		c = c || per_cpu(rcu_bh_data, cpu).nxtlist;
+		local_irq_save(flags);
 	}
 
-	/* If RCU callbacks are still pending, RCU still needs this CPU. */
-	if (c)
+	/*
+	 * If RCU callbacks are still pending, RCU still needs this CPU.
+	 * So try forcing the callbacks through the grace period.
+	 */
+	if (rcu_cpu_has_callbacks(cpu)) {
+		local_irq_restore(flags);
+		trace_rcu_prep_idle("More callbacks");
 		invoke_rcu_core();
-	return c;
+	} else {
+		local_irq_restore(flags);
+		trace_rcu_prep_idle("Callbacks drained");
+	}
 }
 
 #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
index 9feffa4..654cfe6 100644
--- a/kernel/rcutree_trace.c
+++ b/kernel/rcutree_trace.c
@@ -67,13 +67,11 @@
 		   rdp->completed, rdp->gpnum,
 		   rdp->passed_quiesce, rdp->passed_quiesce_gpnum,
 		   rdp->qs_pending);
-#ifdef CONFIG_NO_HZ
-	seq_printf(m, " dt=%d/%d/%d df=%lu",
+	seq_printf(m, " dt=%d/%llx/%d df=%lu",
 		   atomic_read(&rdp->dynticks->dynticks),
 		   rdp->dynticks->dynticks_nesting,
 		   rdp->dynticks->dynticks_nmi_nesting,
 		   rdp->dynticks_fqs);
-#endif /* #ifdef CONFIG_NO_HZ */
 	seq_printf(m, " of=%lu ri=%lu", rdp->offline_fqs, rdp->resched_ipi);
 	seq_printf(m, " ql=%ld qs=%c%c%c%c",
 		   rdp->qlen,
@@ -141,13 +139,11 @@
 		   rdp->completed, rdp->gpnum,
 		   rdp->passed_quiesce, rdp->passed_quiesce_gpnum,
 		   rdp->qs_pending);
-#ifdef CONFIG_NO_HZ
-	seq_printf(m, ",%d,%d,%d,%lu",
+	seq_printf(m, ",%d,%llx,%d,%lu",
 		   atomic_read(&rdp->dynticks->dynticks),
 		   rdp->dynticks->dynticks_nesting,
 		   rdp->dynticks->dynticks_nmi_nesting,
 		   rdp->dynticks_fqs);
-#endif /* #ifdef CONFIG_NO_HZ */
 	seq_printf(m, ",%lu,%lu", rdp->offline_fqs, rdp->resched_ipi);
 	seq_printf(m, ",%ld,\"%c%c%c%c\"", rdp->qlen,
 		   ".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] !=
@@ -171,9 +167,7 @@
 static int show_rcudata_csv(struct seq_file *m, void *unused)
 {
 	seq_puts(m, "\"CPU\",\"Online?\",\"c\",\"g\",\"pq\",\"pgp\",\"pq\",");
-#ifdef CONFIG_NO_HZ
 	seq_puts(m, "\"dt\",\"dt nesting\",\"dt NMI nesting\",\"df\",");
-#endif /* #ifdef CONFIG_NO_HZ */
 	seq_puts(m, "\"of\",\"ri\",\"ql\",\"qs\"");
 #ifdef CONFIG_RCU_BOOST
 	seq_puts(m, "\"kt\",\"ktl\"");
@@ -278,7 +272,7 @@
 	gpnum = rsp->gpnum;
 	seq_printf(m, "c=%lu g=%lu s=%d jfq=%ld j=%x "
 		      "nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu\n",
-		   rsp->completed, gpnum, rsp->signaled,
+		   rsp->completed, gpnum, rsp->fqs_state,
 		   (long)(rsp->jiffies_force_qs - jiffies),
 		   (int)(jiffies & 0xffff),
 		   rsp->n_force_qs, rsp->n_force_qs_ngp,
diff --git a/kernel/relay.c b/kernel/relay.c
index 226fade..4335e1d 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -302,7 +302,7 @@
  */
 static struct dentry *create_buf_file_default_callback(const char *filename,
 						       struct dentry *parent,
-						       int mode,
+						       umode_t mode,
 						       struct rchan_buf *buf,
 						       int *is_global)
 {
diff --git a/kernel/rtmutex-debug.c b/kernel/rtmutex-debug.c
index 8eafd1b..16502d3 100644
--- a/kernel/rtmutex-debug.c
+++ b/kernel/rtmutex-debug.c
@@ -101,6 +101,7 @@
 
 	printk("\n============================================\n");
 	printk(  "[ BUG: circular locking deadlock detected! ]\n");
+	printk("%s\n", print_tainted());
 	printk(  "--------------------------------------------\n");
 	printk("%s/%d is deadlocking current task %s/%d\n\n",
 	       task->comm, task_pid_nr(task),
diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
index 3d9f31c..98ec494 100644
--- a/kernel/rtmutex-tester.c
+++ b/kernel/rtmutex-tester.c
@@ -6,11 +6,11 @@
  *  Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
  *
  */
+#include <linux/device.h>
 #include <linux/kthread.h>
 #include <linux/export.h>
 #include <linux/sched.h>
 #include <linux/spinlock.h>
-#include <linux/sysdev.h>
 #include <linux/timer.h>
 #include <linux/freezer.h>
 
@@ -27,7 +27,7 @@
 	int			opdata;
 	int			mutexes[MAX_RT_TEST_MUTEXES];
 	int			event;
-	struct sys_device	sysdev;
+	struct device		dev;
 };
 
 static struct test_thread_data thread_data[MAX_RT_TEST_THREADS];
@@ -271,7 +271,7 @@
  *
  * opcode:data
  */
-static ssize_t sysfs_test_command(struct sys_device *dev, struct sysdev_attribute *attr,
+static ssize_t sysfs_test_command(struct device *dev, struct device_attribute *attr,
 				  const char *buf, size_t count)
 {
 	struct sched_param schedpar;
@@ -279,8 +279,8 @@
 	char cmdbuf[32];
 	int op, dat, tid, ret;
 
-	td = container_of(dev, struct test_thread_data, sysdev);
-	tid = td->sysdev.id;
+	td = container_of(dev, struct test_thread_data, dev);
+	tid = td->dev.id;
 
 	/* strings from sysfs write are not 0 terminated! */
 	if (count >= sizeof(cmdbuf))
@@ -334,7 +334,7 @@
  * @dev:	thread to query
  * @buf:	char buffer to be filled with thread status info
  */
-static ssize_t sysfs_test_status(struct sys_device *dev, struct sysdev_attribute *attr,
+static ssize_t sysfs_test_status(struct device *dev, struct device_attribute *attr,
 				 char *buf)
 {
 	struct test_thread_data *td;
@@ -342,8 +342,8 @@
 	char *curr = buf;
 	int i;
 
-	td = container_of(dev, struct test_thread_data, sysdev);
-	tsk = threads[td->sysdev.id];
+	td = container_of(dev, struct test_thread_data, dev);
+	tsk = threads[td->dev.id];
 
 	spin_lock(&rttest_lock);
 
@@ -360,28 +360,29 @@
 	spin_unlock(&rttest_lock);
 
 	curr += sprintf(curr, ", T: %p, R: %p\n", tsk,
-			mutexes[td->sysdev.id].owner);
+			mutexes[td->dev.id].owner);
 
 	return curr - buf;
 }
 
-static SYSDEV_ATTR(status, 0600, sysfs_test_status, NULL);
-static SYSDEV_ATTR(command, 0600, NULL, sysfs_test_command);
+static DEVICE_ATTR(status, 0600, sysfs_test_status, NULL);
+static DEVICE_ATTR(command, 0600, NULL, sysfs_test_command);
 
-static struct sysdev_class rttest_sysclass = {
+static struct bus_type rttest_subsys = {
 	.name = "rttest",
+	.dev_name = "rttest",
 };
 
 static int init_test_thread(int id)
 {
-	thread_data[id].sysdev.cls = &rttest_sysclass;
-	thread_data[id].sysdev.id = id;
+	thread_data[id].dev.bus = &rttest_subsys;
+	thread_data[id].dev.id = id;
 
 	threads[id] = kthread_run(test_func, &thread_data[id], "rt-test-%d", id);
 	if (IS_ERR(threads[id]))
 		return PTR_ERR(threads[id]);
 
-	return sysdev_register(&thread_data[id].sysdev);
+	return device_register(&thread_data[id].dev);
 }
 
 static int init_rttest(void)
@@ -393,7 +394,7 @@
 	for (i = 0; i < MAX_RT_TEST_MUTEXES; i++)
 		rt_mutex_init(&mutexes[i]);
 
-	ret = sysdev_class_register(&rttest_sysclass);
+	ret = subsys_system_register(&rttest_subsys, NULL);
 	if (ret)
 		return ret;
 
@@ -401,10 +402,10 @@
 		ret = init_test_thread(i);
 		if (ret)
 			break;
-		ret = sysdev_create_file(&thread_data[i].sysdev, &attr_status);
+		ret = device_create_file(&thread_data[i].dev, &dev_attr_status);
 		if (ret)
 			break;
-		ret = sysdev_create_file(&thread_data[i].sysdev, &attr_command);
+		ret = device_create_file(&thread_data[i].dev, &dev_attr_command);
 		if (ret)
 			break;
 	}
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
index f9d8482..a242e69 100644
--- a/kernel/rtmutex.c
+++ b/kernel/rtmutex.c
@@ -579,7 +579,6 @@
 		    struct rt_mutex_waiter *waiter)
 {
 	int ret = 0;
-	int was_disabled;
 
 	for (;;) {
 		/* Try to acquire the lock: */
@@ -602,17 +601,10 @@
 
 		raw_spin_unlock(&lock->wait_lock);
 
-		was_disabled = irqs_disabled();
-		if (was_disabled)
-			local_irq_enable();
-
 		debug_rt_mutex_print_deadlock(waiter);
 
 		schedule_rt_mutex(lock);
 
-		if (was_disabled)
-			local_irq_disable();
-
 		raw_spin_lock(&lock->wait_lock);
 		set_current_state(state);
 	}
diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
new file mode 100644
index 0000000..9a7dd35
--- /dev/null
+++ b/kernel/sched/Makefile
@@ -0,0 +1,20 @@
+ifdef CONFIG_FUNCTION_TRACER
+CFLAGS_REMOVE_clock.o = -pg
+endif
+
+ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y)
+# According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is
+# needed for x86 only.  Why this used to be enabled for all architectures is beyond
+# me.  I suspect most platforms don't need this, but until we know that for sure
+# I turn this off for IA-64 only.  Andreas Schwab says it's also needed on m68k
+# to get a correct value for the wait-channel (WCHAN in ps). --davidm
+CFLAGS_core.o := $(PROFILING) -fno-omit-frame-pointer
+endif
+
+obj-y += core.o clock.o idle_task.o fair.o rt.o stop_task.o
+obj-$(CONFIG_SMP) += cpupri.o
+obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o
+obj-$(CONFIG_SCHEDSTATS) += stats.o
+obj-$(CONFIG_SCHED_DEBUG) += debug.o
+
+
diff --git a/kernel/sched_autogroup.c b/kernel/sched/auto_group.c
similarity index 87%
rename from kernel/sched_autogroup.c
rename to kernel/sched/auto_group.c
index 429242f..e8a1f83 100644
--- a/kernel/sched_autogroup.c
+++ b/kernel/sched/auto_group.c
@@ -1,15 +1,19 @@
 #ifdef CONFIG_SCHED_AUTOGROUP
 
+#include "sched.h"
+
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
 #include <linux/kallsyms.h>
 #include <linux/utsname.h>
+#include <linux/security.h>
+#include <linux/export.h>
 
 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
 static struct autogroup autogroup_default;
 static atomic_t autogroup_seq_nr;
 
-static void __init autogroup_init(struct task_struct *init_task)
+void __init autogroup_init(struct task_struct *init_task)
 {
 	autogroup_default.tg = &root_task_group;
 	kref_init(&autogroup_default.kref);
@@ -17,7 +21,7 @@
 	init_task->signal->autogroup = &autogroup_default;
 }
 
-static inline void autogroup_free(struct task_group *tg)
+void autogroup_free(struct task_group *tg)
 {
 	kfree(tg->autogroup);
 }
@@ -59,10 +63,6 @@
 	return ag;
 }
 
-#ifdef CONFIG_RT_GROUP_SCHED
-static void free_rt_sched_group(struct task_group *tg);
-#endif
-
 static inline struct autogroup *autogroup_create(void)
 {
 	struct autogroup *ag = kzalloc(sizeof(*ag), GFP_KERNEL);
@@ -108,8 +108,7 @@
 	return autogroup_kref_get(&autogroup_default);
 }
 
-static inline bool
-task_wants_autogroup(struct task_struct *p, struct task_group *tg)
+bool task_wants_autogroup(struct task_struct *p, struct task_group *tg)
 {
 	if (tg != &root_task_group)
 		return false;
@@ -127,22 +126,6 @@
 	return true;
 }
 
-static inline bool task_group_is_autogroup(struct task_group *tg)
-{
-	return !!tg->autogroup;
-}
-
-static inline struct task_group *
-autogroup_task_group(struct task_struct *p, struct task_group *tg)
-{
-	int enabled = ACCESS_ONCE(sysctl_sched_autogroup_enabled);
-
-	if (enabled && task_wants_autogroup(p, tg))
-		return p->signal->autogroup->tg;
-
-	return tg;
-}
-
 static void
 autogroup_move_group(struct task_struct *p, struct autogroup *ag)
 {
@@ -263,7 +246,7 @@
 #endif /* CONFIG_PROC_FS */
 
 #ifdef CONFIG_SCHED_DEBUG
-static inline int autogroup_path(struct task_group *tg, char *buf, int buflen)
+int autogroup_path(struct task_group *tg, char *buf, int buflen)
 {
 	if (!task_group_is_autogroup(tg))
 		return 0;
diff --git a/kernel/sched_autogroup.h b/kernel/sched/auto_group.h
similarity index 66%
rename from kernel/sched_autogroup.h
rename to kernel/sched/auto_group.h
index c2f0e72..8bd0471 100644
--- a/kernel/sched_autogroup.h
+++ b/kernel/sched/auto_group.h
@@ -1,5 +1,8 @@
 #ifdef CONFIG_SCHED_AUTOGROUP
 
+#include <linux/kref.h>
+#include <linux/rwsem.h>
+
 struct autogroup {
 	/*
 	 * reference doesn't mean how many thread attach to this
@@ -13,9 +16,28 @@
 	int			nice;
 };
 
-static inline bool task_group_is_autogroup(struct task_group *tg);
+extern void autogroup_init(struct task_struct *init_task);
+extern void autogroup_free(struct task_group *tg);
+
+static inline bool task_group_is_autogroup(struct task_group *tg)
+{
+	return !!tg->autogroup;
+}
+
+extern bool task_wants_autogroup(struct task_struct *p, struct task_group *tg);
+
 static inline struct task_group *
-autogroup_task_group(struct task_struct *p, struct task_group *tg);
+autogroup_task_group(struct task_struct *p, struct task_group *tg)
+{
+	int enabled = ACCESS_ONCE(sysctl_sched_autogroup_enabled);
+
+	if (enabled && task_wants_autogroup(p, tg))
+		return p->signal->autogroup->tg;
+
+	return tg;
+}
+
+extern int autogroup_path(struct task_group *tg, char *buf, int buflen);
 
 #else /* !CONFIG_SCHED_AUTOGROUP */
 
diff --git a/kernel/sched_clock.c b/kernel/sched/clock.c
similarity index 100%
rename from kernel/sched_clock.c
rename to kernel/sched/clock.c
diff --git a/kernel/sched.c b/kernel/sched/core.c
similarity index 78%
rename from kernel/sched.c
rename to kernel/sched/core.c
index d6b149c..0ac0f81 100644
--- a/kernel/sched.c
+++ b/kernel/sched/core.c
@@ -1,5 +1,5 @@
 /*
- *  kernel/sched.c
+ *  kernel/sched/core.c
  *
  *  Kernel scheduler and related syscalls
  *
@@ -56,7 +56,6 @@
 #include <linux/percpu.h>
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
-#include <linux/stop_machine.h>
 #include <linux/sysctl.h>
 #include <linux/syscalls.h>
 #include <linux/times.h>
@@ -75,129 +74,17 @@
 
 #include <asm/tlb.h>
 #include <asm/irq_regs.h>
-#include <asm/mutex.h>
 #ifdef CONFIG_PARAVIRT
 #include <asm/paravirt.h>
 #endif
 
-#include "sched_cpupri.h"
-#include "workqueue_sched.h"
-#include "sched_autogroup.h"
+#include "sched.h"
+#include "../workqueue_sched.h"
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/sched.h>
 
-/*
- * Convert user-nice values [ -20 ... 0 ... 19 ]
- * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
- * and back.
- */
-#define NICE_TO_PRIO(nice)	(MAX_RT_PRIO + (nice) + 20)
-#define PRIO_TO_NICE(prio)	((prio) - MAX_RT_PRIO - 20)
-#define TASK_NICE(p)		PRIO_TO_NICE((p)->static_prio)
-
-/*
- * 'User priority' is the nice value converted to something we
- * can work with better when scaling various scheduler parameters,
- * it's a [ 0 ... 39 ] range.
- */
-#define USER_PRIO(p)		((p)-MAX_RT_PRIO)
-#define TASK_USER_PRIO(p)	USER_PRIO((p)->static_prio)
-#define MAX_USER_PRIO		(USER_PRIO(MAX_PRIO))
-
-/*
- * Helpers for converting nanosecond timing to jiffy resolution
- */
-#define NS_TO_JIFFIES(TIME)	((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
-
-#define NICE_0_LOAD		SCHED_LOAD_SCALE
-#define NICE_0_SHIFT		SCHED_LOAD_SHIFT
-
-/*
- * These are the 'tuning knobs' of the scheduler:
- *
- * default timeslice is 100 msecs (used only for SCHED_RR tasks).
- * Timeslices get refilled after they expire.
- */
-#define DEF_TIMESLICE		(100 * HZ / 1000)
-
-/*
- * single value that denotes runtime == period, ie unlimited time.
- */
-#define RUNTIME_INF	((u64)~0ULL)
-
-static inline int rt_policy(int policy)
-{
-	if (policy == SCHED_FIFO || policy == SCHED_RR)
-		return 1;
-	return 0;
-}
-
-static inline int task_has_rt_policy(struct task_struct *p)
-{
-	return rt_policy(p->policy);
-}
-
-/*
- * This is the priority-queue data structure of the RT scheduling class:
- */
-struct rt_prio_array {
-	DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
-	struct list_head queue[MAX_RT_PRIO];
-};
-
-struct rt_bandwidth {
-	/* nests inside the rq lock: */
-	raw_spinlock_t		rt_runtime_lock;
-	ktime_t			rt_period;
-	u64			rt_runtime;
-	struct hrtimer		rt_period_timer;
-};
-
-static struct rt_bandwidth def_rt_bandwidth;
-
-static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
-
-static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
-{
-	struct rt_bandwidth *rt_b =
-		container_of(timer, struct rt_bandwidth, rt_period_timer);
-	ktime_t now;
-	int overrun;
-	int idle = 0;
-
-	for (;;) {
-		now = hrtimer_cb_get_time(timer);
-		overrun = hrtimer_forward(timer, now, rt_b->rt_period);
-
-		if (!overrun)
-			break;
-
-		idle = do_sched_rt_period_timer(rt_b, overrun);
-	}
-
-	return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
-}
-
-static
-void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
-{
-	rt_b->rt_period = ns_to_ktime(period);
-	rt_b->rt_runtime = runtime;
-
-	raw_spin_lock_init(&rt_b->rt_runtime_lock);
-
-	hrtimer_init(&rt_b->rt_period_timer,
-			CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-	rt_b->rt_period_timer.function = sched_rt_period_timer;
-}
-
-static inline int rt_bandwidth_enabled(void)
-{
-	return sysctl_sched_rt_runtime >= 0;
-}
-
-static void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period)
+void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period)
 {
 	unsigned long delta;
 	ktime_t soft, hard, now;
@@ -217,580 +104,12 @@
 	}
 }
 
-static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
-{
-	if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
-		return;
-
-	if (hrtimer_active(&rt_b->rt_period_timer))
-		return;
-
-	raw_spin_lock(&rt_b->rt_runtime_lock);
-	start_bandwidth_timer(&rt_b->rt_period_timer, rt_b->rt_period);
-	raw_spin_unlock(&rt_b->rt_runtime_lock);
-}
-
-#ifdef CONFIG_RT_GROUP_SCHED
-static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
-{
-	hrtimer_cancel(&rt_b->rt_period_timer);
-}
-#endif
-
-/*
- * sched_domains_mutex serializes calls to init_sched_domains,
- * detach_destroy_domains and partition_sched_domains.
- */
-static DEFINE_MUTEX(sched_domains_mutex);
-
-#ifdef CONFIG_CGROUP_SCHED
-
-#include <linux/cgroup.h>
-
-struct cfs_rq;
-
-static LIST_HEAD(task_groups);
-
-struct cfs_bandwidth {
-#ifdef CONFIG_CFS_BANDWIDTH
-	raw_spinlock_t lock;
-	ktime_t period;
-	u64 quota, runtime;
-	s64 hierarchal_quota;
-	u64 runtime_expires;
-
-	int idle, timer_active;
-	struct hrtimer period_timer, slack_timer;
-	struct list_head throttled_cfs_rq;
-
-	/* statistics */
-	int nr_periods, nr_throttled;
-	u64 throttled_time;
-#endif
-};
-
-/* task group related information */
-struct task_group {
-	struct cgroup_subsys_state css;
-
-#ifdef CONFIG_FAIR_GROUP_SCHED
-	/* schedulable entities of this group on each cpu */
-	struct sched_entity **se;
-	/* runqueue "owned" by this group on each cpu */
-	struct cfs_rq **cfs_rq;
-	unsigned long shares;
-
-	atomic_t load_weight;
-#endif
-
-#ifdef CONFIG_RT_GROUP_SCHED
-	struct sched_rt_entity **rt_se;
-	struct rt_rq **rt_rq;
-
-	struct rt_bandwidth rt_bandwidth;
-#endif
-
-	struct rcu_head rcu;
-	struct list_head list;
-
-	struct task_group *parent;
-	struct list_head siblings;
-	struct list_head children;
-
-#ifdef CONFIG_SCHED_AUTOGROUP
-	struct autogroup *autogroup;
-#endif
-
-	struct cfs_bandwidth cfs_bandwidth;
-};
-
-/* task_group_lock serializes the addition/removal of task groups */
-static DEFINE_SPINLOCK(task_group_lock);
-
-#ifdef CONFIG_FAIR_GROUP_SCHED
-
-# define ROOT_TASK_GROUP_LOAD	NICE_0_LOAD
-
-/*
- * A weight of 0 or 1 can cause arithmetics problems.
- * A weight of a cfs_rq is the sum of weights of which entities
- * are queued on this cfs_rq, so a weight of a entity should not be
- * too large, so as the shares value of a task group.
- * (The default weight is 1024 - so there's no practical
- *  limitation from this.)
- */
-#define MIN_SHARES	(1UL <<  1)
-#define MAX_SHARES	(1UL << 18)
-
-static int root_task_group_load = ROOT_TASK_GROUP_LOAD;
-#endif
-
-/* Default task group.
- *	Every task in system belong to this group at bootup.
- */
-struct task_group root_task_group;
-
-#endif	/* CONFIG_CGROUP_SCHED */
-
-/* CFS-related fields in a runqueue */
-struct cfs_rq {
-	struct load_weight load;
-	unsigned long nr_running, h_nr_running;
-
-	u64 exec_clock;
-	u64 min_vruntime;
-#ifndef CONFIG_64BIT
-	u64 min_vruntime_copy;
-#endif
-
-	struct rb_root tasks_timeline;
-	struct rb_node *rb_leftmost;
-
-	struct list_head tasks;
-	struct list_head *balance_iterator;
-
-	/*
-	 * 'curr' points to currently running entity on this cfs_rq.
-	 * It is set to NULL otherwise (i.e when none are currently running).
-	 */
-	struct sched_entity *curr, *next, *last, *skip;
-
-#ifdef	CONFIG_SCHED_DEBUG
-	unsigned int nr_spread_over;
-#endif
-
-#ifdef CONFIG_FAIR_GROUP_SCHED
-	struct rq *rq;	/* cpu runqueue to which this cfs_rq is attached */
-
-	/*
-	 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
-	 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
-	 * (like users, containers etc.)
-	 *
-	 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
-	 * list is used during load balance.
-	 */
-	int on_list;
-	struct list_head leaf_cfs_rq_list;
-	struct task_group *tg;	/* group that "owns" this runqueue */
-
-#ifdef CONFIG_SMP
-	/*
-	 * the part of load.weight contributed by tasks
-	 */
-	unsigned long task_weight;
-
-	/*
-	 *   h_load = weight * f(tg)
-	 *
-	 * Where f(tg) is the recursive weight fraction assigned to
-	 * this group.
-	 */
-	unsigned long h_load;
-
-	/*
-	 * Maintaining per-cpu shares distribution for group scheduling
-	 *
-	 * load_stamp is the last time we updated the load average
-	 * load_last is the last time we updated the load average and saw load
-	 * load_unacc_exec_time is currently unaccounted execution time
-	 */
-	u64 load_avg;
-	u64 load_period;
-	u64 load_stamp, load_last, load_unacc_exec_time;
-
-	unsigned long load_contribution;
-#endif
-#ifdef CONFIG_CFS_BANDWIDTH
-	int runtime_enabled;
-	u64 runtime_expires;
-	s64 runtime_remaining;
-
-	u64 throttled_timestamp;
-	int throttled, throttle_count;
-	struct list_head throttled_list;
-#endif
-#endif
-};
-
-#ifdef CONFIG_FAIR_GROUP_SCHED
-#ifdef CONFIG_CFS_BANDWIDTH
-static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
-{
-	return &tg->cfs_bandwidth;
-}
-
-static inline u64 default_cfs_period(void);
-static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun);
-static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b);
-
-static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
-{
-	struct cfs_bandwidth *cfs_b =
-		container_of(timer, struct cfs_bandwidth, slack_timer);
-	do_sched_cfs_slack_timer(cfs_b);
-
-	return HRTIMER_NORESTART;
-}
-
-static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
-{
-	struct cfs_bandwidth *cfs_b =
-		container_of(timer, struct cfs_bandwidth, period_timer);
-	ktime_t now;
-	int overrun;
-	int idle = 0;
-
-	for (;;) {
-		now = hrtimer_cb_get_time(timer);
-		overrun = hrtimer_forward(timer, now, cfs_b->period);
-
-		if (!overrun)
-			break;
-
-		idle = do_sched_cfs_period_timer(cfs_b, overrun);
-	}
-
-	return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
-}
-
-static void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
-{
-	raw_spin_lock_init(&cfs_b->lock);
-	cfs_b->runtime = 0;
-	cfs_b->quota = RUNTIME_INF;
-	cfs_b->period = ns_to_ktime(default_cfs_period());
-
-	INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
-	hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-	cfs_b->period_timer.function = sched_cfs_period_timer;
-	hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-	cfs_b->slack_timer.function = sched_cfs_slack_timer;
-}
-
-static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
-{
-	cfs_rq->runtime_enabled = 0;
-	INIT_LIST_HEAD(&cfs_rq->throttled_list);
-}
-
-/* requires cfs_b->lock, may release to reprogram timer */
-static void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
-{
-	/*
-	 * The timer may be active because we're trying to set a new bandwidth
-	 * period or because we're racing with the tear-down path
-	 * (timer_active==0 becomes visible before the hrtimer call-back
-	 * terminates).  In either case we ensure that it's re-programmed
-	 */
-	while (unlikely(hrtimer_active(&cfs_b->period_timer))) {
-		raw_spin_unlock(&cfs_b->lock);
-		/* ensure cfs_b->lock is available while we wait */
-		hrtimer_cancel(&cfs_b->period_timer);
-
-		raw_spin_lock(&cfs_b->lock);
-		/* if someone else restarted the timer then we're done */
-		if (cfs_b->timer_active)
-			return;
-	}
-
-	cfs_b->timer_active = 1;
-	start_bandwidth_timer(&cfs_b->period_timer, cfs_b->period);
-}
-
-static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
-{
-	hrtimer_cancel(&cfs_b->period_timer);
-	hrtimer_cancel(&cfs_b->slack_timer);
-}
-#else
-static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
-static void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
-static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
-
-static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
-{
-	return NULL;
-}
-#endif /* CONFIG_CFS_BANDWIDTH */
-#endif /* CONFIG_FAIR_GROUP_SCHED */
-
-/* Real-Time classes' related field in a runqueue: */
-struct rt_rq {
-	struct rt_prio_array active;
-	unsigned long rt_nr_running;
-#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
-	struct {
-		int curr; /* highest queued rt task prio */
-#ifdef CONFIG_SMP
-		int next; /* next highest */
-#endif
-	} highest_prio;
-#endif
-#ifdef CONFIG_SMP
-	unsigned long rt_nr_migratory;
-	unsigned long rt_nr_total;
-	int overloaded;
-	struct plist_head pushable_tasks;
-#endif
-	int rt_throttled;
-	u64 rt_time;
-	u64 rt_runtime;
-	/* Nests inside the rq lock: */
-	raw_spinlock_t rt_runtime_lock;
-
-#ifdef CONFIG_RT_GROUP_SCHED
-	unsigned long rt_nr_boosted;
-
-	struct rq *rq;
-	struct list_head leaf_rt_rq_list;
-	struct task_group *tg;
-#endif
-};
-
-#ifdef CONFIG_SMP
-
-/*
- * We add the notion of a root-domain which will be used to define per-domain
- * variables. Each exclusive cpuset essentially defines an island domain by
- * fully partitioning the member cpus from any other cpuset. Whenever a new
- * exclusive cpuset is created, we also create and attach a new root-domain
- * object.
- *
- */
-struct root_domain {
-	atomic_t refcount;
-	atomic_t rto_count;
-	struct rcu_head rcu;
-	cpumask_var_t span;
-	cpumask_var_t online;
-
-	/*
-	 * The "RT overload" flag: it gets set if a CPU has more than
-	 * one runnable RT task.
-	 */
-	cpumask_var_t rto_mask;
-	struct cpupri cpupri;
-};
-
-/*
- * By default the system creates a single root-domain with all cpus as
- * members (mimicking the global state we have today).
- */
-static struct root_domain def_root_domain;
-
-#endif /* CONFIG_SMP */
-
-/*
- * This is the main, per-CPU runqueue data structure.
- *
- * Locking rule: those places that want to lock multiple runqueues
- * (such as the load balancing or the thread migration code), lock
- * acquire operations must be ordered by ascending &runqueue.
- */
-struct rq {
-	/* runqueue lock: */
-	raw_spinlock_t lock;
-
-	/*
-	 * nr_running and cpu_load should be in the same cacheline because
-	 * remote CPUs use both these fields when doing load calculation.
-	 */
-	unsigned long nr_running;
-	#define CPU_LOAD_IDX_MAX 5
-	unsigned long cpu_load[CPU_LOAD_IDX_MAX];
-	unsigned long last_load_update_tick;
-#ifdef CONFIG_NO_HZ
-	u64 nohz_stamp;
-	unsigned char nohz_balance_kick;
-#endif
-	int skip_clock_update;
-
-	/* capture load from *all* tasks on this cpu: */
-	struct load_weight load;
-	unsigned long nr_load_updates;
-	u64 nr_switches;
-
-	struct cfs_rq cfs;
-	struct rt_rq rt;
-
-#ifdef CONFIG_FAIR_GROUP_SCHED
-	/* list of leaf cfs_rq on this cpu: */
-	struct list_head leaf_cfs_rq_list;
-#endif
-#ifdef CONFIG_RT_GROUP_SCHED
-	struct list_head leaf_rt_rq_list;
-#endif
-
-	/*
-	 * This is part of a global counter where only the total sum
-	 * over all CPUs matters. A task can increase this counter on
-	 * one CPU and if it got migrated afterwards it may decrease
-	 * it on another CPU. Always updated under the runqueue lock:
-	 */
-	unsigned long nr_uninterruptible;
-
-	struct task_struct *curr, *idle, *stop;
-	unsigned long next_balance;
-	struct mm_struct *prev_mm;
-
-	u64 clock;
-	u64 clock_task;
-
-	atomic_t nr_iowait;
-
-#ifdef CONFIG_SMP
-	struct root_domain *rd;
-	struct sched_domain *sd;
-
-	unsigned long cpu_power;
-
-	unsigned char idle_balance;
-	/* For active balancing */
-	int post_schedule;
-	int active_balance;
-	int push_cpu;
-	struct cpu_stop_work active_balance_work;
-	/* cpu of this runqueue: */
-	int cpu;
-	int online;
-
-	u64 rt_avg;
-	u64 age_stamp;
-	u64 idle_stamp;
-	u64 avg_idle;
-#endif
-
-#ifdef CONFIG_IRQ_TIME_ACCOUNTING
-	u64 prev_irq_time;
-#endif
-#ifdef CONFIG_PARAVIRT
-	u64 prev_steal_time;
-#endif
-#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
-	u64 prev_steal_time_rq;
-#endif
-
-	/* calc_load related fields */
-	unsigned long calc_load_update;
-	long calc_load_active;
-
-#ifdef CONFIG_SCHED_HRTICK
-#ifdef CONFIG_SMP
-	int hrtick_csd_pending;
-	struct call_single_data hrtick_csd;
-#endif
-	struct hrtimer hrtick_timer;
-#endif
-
-#ifdef CONFIG_SCHEDSTATS
-	/* latency stats */
-	struct sched_info rq_sched_info;
-	unsigned long long rq_cpu_time;
-	/* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
-
-	/* sys_sched_yield() stats */
-	unsigned int yld_count;
-
-	/* schedule() stats */
-	unsigned int sched_switch;
-	unsigned int sched_count;
-	unsigned int sched_goidle;
-
-	/* try_to_wake_up() stats */
-	unsigned int ttwu_count;
-	unsigned int ttwu_local;
-#endif
-
-#ifdef CONFIG_SMP
-	struct llist_head wake_list;
-#endif
-};
-
-static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
-
-
-static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
-
-static inline int cpu_of(struct rq *rq)
-{
-#ifdef CONFIG_SMP
-	return rq->cpu;
-#else
-	return 0;
-#endif
-}
-
-#define rcu_dereference_check_sched_domain(p) \
-	rcu_dereference_check((p), \
-			      lockdep_is_held(&sched_domains_mutex))
-
-/*
- * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
- * See detach_destroy_domains: synchronize_sched for details.
- *
- * The domain tree of any CPU may only be accessed from within
- * preempt-disabled sections.
- */
-#define for_each_domain(cpu, __sd) \
-	for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent)
-
-#define cpu_rq(cpu)		(&per_cpu(runqueues, (cpu)))
-#define this_rq()		(&__get_cpu_var(runqueues))
-#define task_rq(p)		cpu_rq(task_cpu(p))
-#define cpu_curr(cpu)		(cpu_rq(cpu)->curr)
-#define raw_rq()		(&__raw_get_cpu_var(runqueues))
-
-#ifdef CONFIG_CGROUP_SCHED
-
-/*
- * Return the group to which this tasks belongs.
- *
- * We use task_subsys_state_check() and extend the RCU verification with
- * pi->lock and rq->lock because cpu_cgroup_attach() holds those locks for each
- * task it moves into the cgroup. Therefore by holding either of those locks,
- * we pin the task to the current cgroup.
- */
-static inline struct task_group *task_group(struct task_struct *p)
-{
-	struct task_group *tg;
-	struct cgroup_subsys_state *css;
-
-	css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
-			lockdep_is_held(&p->pi_lock) ||
-			lockdep_is_held(&task_rq(p)->lock));
-	tg = container_of(css, struct task_group, css);
-
-	return autogroup_task_group(p, tg);
-}
-
-/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
-static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
-{
-#ifdef CONFIG_FAIR_GROUP_SCHED
-	p->se.cfs_rq = task_group(p)->cfs_rq[cpu];
-	p->se.parent = task_group(p)->se[cpu];
-#endif
-
-#ifdef CONFIG_RT_GROUP_SCHED
-	p->rt.rt_rq  = task_group(p)->rt_rq[cpu];
-	p->rt.parent = task_group(p)->rt_se[cpu];
-#endif
-}
-
-#else /* CONFIG_CGROUP_SCHED */
-
-static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
-static inline struct task_group *task_group(struct task_struct *p)
-{
-	return NULL;
-}
-
-#endif /* CONFIG_CGROUP_SCHED */
+DEFINE_MUTEX(sched_domains_mutex);
+DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
 
 static void update_rq_clock_task(struct rq *rq, s64 delta);
 
-static void update_rq_clock(struct rq *rq)
+void update_rq_clock(struct rq *rq)
 {
 	s64 delta;
 
@@ -803,44 +122,14 @@
 }
 
 /*
- * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
- */
-#ifdef CONFIG_SCHED_DEBUG
-# define const_debug __read_mostly
-#else
-# define const_debug static const
-#endif
-
-/**
- * runqueue_is_locked - Returns true if the current cpu runqueue is locked
- * @cpu: the processor in question.
- *
- * This interface allows printk to be called with the runqueue lock
- * held and know whether or not it is OK to wake up the klogd.
- */
-int runqueue_is_locked(int cpu)
-{
-	return raw_spin_is_locked(&cpu_rq(cpu)->lock);
-}
-
-/*
  * Debugging: various feature bits
  */
 
 #define SCHED_FEAT(name, enabled)	\
-	__SCHED_FEAT_##name ,
-
-enum {
-#include "sched_features.h"
-};
-
-#undef SCHED_FEAT
-
-#define SCHED_FEAT(name, enabled)	\
 	(1UL << __SCHED_FEAT_##name) * enabled |
 
 const_debug unsigned int sysctl_sched_features =
-#include "sched_features.h"
+#include "features.h"
 	0;
 
 #undef SCHED_FEAT
@@ -850,7 +139,7 @@
 	#name ,
 
 static __read_mostly char *sched_feat_names[] = {
-#include "sched_features.h"
+#include "features.h"
 	NULL
 };
 
@@ -860,7 +149,7 @@
 {
 	int i;
 
-	for (i = 0; sched_feat_names[i]; i++) {
+	for (i = 0; i < __SCHED_FEAT_NR; i++) {
 		if (!(sysctl_sched_features & (1UL << i)))
 			seq_puts(m, "NO_");
 		seq_printf(m, "%s ", sched_feat_names[i]);
@@ -870,6 +159,36 @@
 	return 0;
 }
 
+#ifdef HAVE_JUMP_LABEL
+
+#define jump_label_key__true  jump_label_key_enabled
+#define jump_label_key__false jump_label_key_disabled
+
+#define SCHED_FEAT(name, enabled)	\
+	jump_label_key__##enabled ,
+
+struct jump_label_key sched_feat_keys[__SCHED_FEAT_NR] = {
+#include "features.h"
+};
+
+#undef SCHED_FEAT
+
+static void sched_feat_disable(int i)
+{
+	if (jump_label_enabled(&sched_feat_keys[i]))
+		jump_label_dec(&sched_feat_keys[i]);
+}
+
+static void sched_feat_enable(int i)
+{
+	if (!jump_label_enabled(&sched_feat_keys[i]))
+		jump_label_inc(&sched_feat_keys[i]);
+}
+#else
+static void sched_feat_disable(int i) { };
+static void sched_feat_enable(int i) { };
+#endif /* HAVE_JUMP_LABEL */
+
 static ssize_t
 sched_feat_write(struct file *filp, const char __user *ubuf,
 		size_t cnt, loff_t *ppos)
@@ -893,17 +212,20 @@
 		cmp += 3;
 	}
 
-	for (i = 0; sched_feat_names[i]; i++) {
+	for (i = 0; i < __SCHED_FEAT_NR; i++) {
 		if (strcmp(cmp, sched_feat_names[i]) == 0) {
-			if (neg)
+			if (neg) {
 				sysctl_sched_features &= ~(1UL << i);
-			else
+				sched_feat_disable(i);
+			} else {
 				sysctl_sched_features |= (1UL << i);
+				sched_feat_enable(i);
+			}
 			break;
 		}
 	}
 
-	if (!sched_feat_names[i])
+	if (i == __SCHED_FEAT_NR)
 		return -EINVAL;
 
 	*ppos += cnt;
@@ -932,10 +254,7 @@
 	return 0;
 }
 late_initcall(sched_init_debug);
-
-#endif
-
-#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
+#endif /* CONFIG_SCHED_DEBUG */
 
 /*
  * Number of tasks to iterate in a single balance run.
@@ -957,7 +276,7 @@
  */
 unsigned int sysctl_sched_rt_period = 1000000;
 
-static __read_mostly int scheduler_running;
+__read_mostly int scheduler_running;
 
 /*
  * part of the period that we allow rt tasks to run in us.
@@ -965,112 +284,7 @@
  */
 int sysctl_sched_rt_runtime = 950000;
 
-static inline u64 global_rt_period(void)
-{
-	return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
-}
 
-static inline u64 global_rt_runtime(void)
-{
-	if (sysctl_sched_rt_runtime < 0)
-		return RUNTIME_INF;
-
-	return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
-}
-
-#ifndef prepare_arch_switch
-# define prepare_arch_switch(next)	do { } while (0)
-#endif
-#ifndef finish_arch_switch
-# define finish_arch_switch(prev)	do { } while (0)
-#endif
-
-static inline int task_current(struct rq *rq, struct task_struct *p)
-{
-	return rq->curr == p;
-}
-
-static inline int task_running(struct rq *rq, struct task_struct *p)
-{
-#ifdef CONFIG_SMP
-	return p->on_cpu;
-#else
-	return task_current(rq, p);
-#endif
-}
-
-#ifndef __ARCH_WANT_UNLOCKED_CTXSW
-static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
-{
-#ifdef CONFIG_SMP
-	/*
-	 * We can optimise this out completely for !SMP, because the
-	 * SMP rebalancing from interrupt is the only thing that cares
-	 * here.
-	 */
-	next->on_cpu = 1;
-#endif
-}
-
-static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
-{
-#ifdef CONFIG_SMP
-	/*
-	 * After ->on_cpu is cleared, the task can be moved to a different CPU.
-	 * We must ensure this doesn't happen until the switch is completely
-	 * finished.
-	 */
-	smp_wmb();
-	prev->on_cpu = 0;
-#endif
-#ifdef CONFIG_DEBUG_SPINLOCK
-	/* this is a valid case when another task releases the spinlock */
-	rq->lock.owner = current;
-#endif
-	/*
-	 * If we are tracking spinlock dependencies then we have to
-	 * fix up the runqueue lock - which gets 'carried over' from
-	 * prev into current:
-	 */
-	spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
-
-	raw_spin_unlock_irq(&rq->lock);
-}
-
-#else /* __ARCH_WANT_UNLOCKED_CTXSW */
-static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
-{
-#ifdef CONFIG_SMP
-	/*
-	 * We can optimise this out completely for !SMP, because the
-	 * SMP rebalancing from interrupt is the only thing that cares
-	 * here.
-	 */
-	next->on_cpu = 1;
-#endif
-#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
-	raw_spin_unlock_irq(&rq->lock);
-#else
-	raw_spin_unlock(&rq->lock);
-#endif
-}
-
-static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
-{
-#ifdef CONFIG_SMP
-	/*
-	 * After ->on_cpu is cleared, the task can be moved to a different CPU.
-	 * We must ensure this doesn't happen until the switch is completely
-	 * finished.
-	 */
-	smp_wmb();
-	prev->on_cpu = 0;
-#endif
-#ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW
-	local_irq_enable();
-#endif
-}
-#endif /* __ARCH_WANT_UNLOCKED_CTXSW */
 
 /*
  * __task_rq_lock - lock the rq @p resides on.
@@ -1153,20 +367,6 @@
  * rq->lock.
  */
 
-/*
- * Use hrtick when:
- *  - enabled by features
- *  - hrtimer is actually high res
- */
-static inline int hrtick_enabled(struct rq *rq)
-{
-	if (!sched_feat(HRTICK))
-		return 0;
-	if (!cpu_active(cpu_of(rq)))
-		return 0;
-	return hrtimer_is_hres_active(&rq->hrtick_timer);
-}
-
 static void hrtick_clear(struct rq *rq)
 {
 	if (hrtimer_active(&rq->hrtick_timer))
@@ -1210,7 +410,7 @@
  *
  * called with rq->lock held and irqs disabled
  */
-static void hrtick_start(struct rq *rq, u64 delay)
+void hrtick_start(struct rq *rq, u64 delay)
 {
 	struct hrtimer *timer = &rq->hrtick_timer;
 	ktime_t time = ktime_add_ns(timer->base->get_time(), delay);
@@ -1254,7 +454,7 @@
  *
  * called with rq->lock held and irqs disabled
  */
-static void hrtick_start(struct rq *rq, u64 delay)
+void hrtick_start(struct rq *rq, u64 delay)
 {
 	__hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0,
 			HRTIMER_MODE_REL_PINNED, 0);
@@ -1305,7 +505,7 @@
 #define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
 #endif
 
-static void resched_task(struct task_struct *p)
+void resched_task(struct task_struct *p)
 {
 	int cpu;
 
@@ -1326,7 +526,7 @@
 		smp_send_reschedule(cpu);
 }
 
-static void resched_cpu(int cpu)
+void resched_cpu(int cpu)
 {
 	struct rq *rq = cpu_rq(cpu);
 	unsigned long flags;
@@ -1407,7 +607,8 @@
 
 static inline bool got_nohz_idle_kick(void)
 {
-	return idle_cpu(smp_processor_id()) && this_rq()->nohz_balance_kick;
+	int cpu = smp_processor_id();
+	return idle_cpu(cpu) && test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu));
 }
 
 #else /* CONFIG_NO_HZ */
@@ -1419,12 +620,7 @@
 
 #endif /* CONFIG_NO_HZ */
 
-static u64 sched_avg_period(void)
-{
-	return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
-}
-
-static void sched_avg_update(struct rq *rq)
+void sched_avg_update(struct rq *rq)
 {
 	s64 period = sched_avg_period();
 
@@ -1440,193 +636,23 @@
 	}
 }
 
-static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
-{
-	rq->rt_avg += rt_delta;
-	sched_avg_update(rq);
-}
-
 #else /* !CONFIG_SMP */
-static void resched_task(struct task_struct *p)
+void resched_task(struct task_struct *p)
 {
 	assert_raw_spin_locked(&task_rq(p)->lock);
 	set_tsk_need_resched(p);
 }
-
-static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
-{
-}
-
-static void sched_avg_update(struct rq *rq)
-{
-}
 #endif /* CONFIG_SMP */
 
-#if BITS_PER_LONG == 32
-# define WMULT_CONST	(~0UL)
-#else
-# define WMULT_CONST	(1UL << 32)
-#endif
-
-#define WMULT_SHIFT	32
-
-/*
- * Shift right and round:
- */
-#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
-
-/*
- * delta *= weight / lw
- */
-static unsigned long
-calc_delta_mine(unsigned long delta_exec, unsigned long weight,
-		struct load_weight *lw)
-{
-	u64 tmp;
-
-	/*
-	 * weight can be less than 2^SCHED_LOAD_RESOLUTION for task group sched
-	 * entities since MIN_SHARES = 2. Treat weight as 1 if less than
-	 * 2^SCHED_LOAD_RESOLUTION.
-	 */
-	if (likely(weight > (1UL << SCHED_LOAD_RESOLUTION)))
-		tmp = (u64)delta_exec * scale_load_down(weight);
-	else
-		tmp = (u64)delta_exec;
-
-	if (!lw->inv_weight) {
-		unsigned long w = scale_load_down(lw->weight);
-
-		if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
-			lw->inv_weight = 1;
-		else if (unlikely(!w))
-			lw->inv_weight = WMULT_CONST;
-		else
-			lw->inv_weight = WMULT_CONST / w;
-	}
-
-	/*
-	 * Check whether we'd overflow the 64-bit multiplication:
-	 */
-	if (unlikely(tmp > WMULT_CONST))
-		tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
-			WMULT_SHIFT/2);
-	else
-		tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT);
-
-	return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
-}
-
-static inline void update_load_add(struct load_weight *lw, unsigned long inc)
-{
-	lw->weight += inc;
-	lw->inv_weight = 0;
-}
-
-static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
-{
-	lw->weight -= dec;
-	lw->inv_weight = 0;
-}
-
-static inline void update_load_set(struct load_weight *lw, unsigned long w)
-{
-	lw->weight = w;
-	lw->inv_weight = 0;
-}
-
-/*
- * To aid in avoiding the subversion of "niceness" due to uneven distribution
- * of tasks with abnormal "nice" values across CPUs the contribution that
- * each task makes to its run queue's load is weighted according to its
- * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
- * scaled version of the new time slice allocation that they receive on time
- * slice expiry etc.
- */
-
-#define WEIGHT_IDLEPRIO                3
-#define WMULT_IDLEPRIO         1431655765
-
-/*
- * Nice levels are multiplicative, with a gentle 10% change for every
- * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
- * nice 1, it will get ~10% less CPU time than another CPU-bound task
- * that remained on nice 0.
- *
- * The "10% effect" is relative and cumulative: from _any_ nice level,
- * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
- * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
- * If a task goes up by ~10% and another task goes down by ~10% then
- * the relative distance between them is ~25%.)
- */
-static const int prio_to_weight[40] = {
- /* -20 */     88761,     71755,     56483,     46273,     36291,
- /* -15 */     29154,     23254,     18705,     14949,     11916,
- /* -10 */      9548,      7620,      6100,      4904,      3906,
- /*  -5 */      3121,      2501,      1991,      1586,      1277,
- /*   0 */      1024,       820,       655,       526,       423,
- /*   5 */       335,       272,       215,       172,       137,
- /*  10 */       110,        87,        70,        56,        45,
- /*  15 */        36,        29,        23,        18,        15,
-};
-
-/*
- * Inverse (2^32/x) values of the prio_to_weight[] array, precalculated.
- *
- * In cases where the weight does not change often, we can use the
- * precalculated inverse to speed up arithmetics by turning divisions
- * into multiplications:
- */
-static const u32 prio_to_wmult[40] = {
- /* -20 */     48388,     59856,     76040,     92818,    118348,
- /* -15 */    147320,    184698,    229616,    287308,    360437,
- /* -10 */    449829,    563644,    704093,    875809,   1099582,
- /*  -5 */   1376151,   1717300,   2157191,   2708050,   3363326,
- /*   0 */   4194304,   5237765,   6557202,   8165337,  10153587,
- /*   5 */  12820798,  15790321,  19976592,  24970740,  31350126,
- /*  10 */  39045157,  49367440,  61356676,  76695844,  95443717,
- /*  15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
-};
-
-/* Time spent by the tasks of the cpu accounting group executing in ... */
-enum cpuacct_stat_index {
-	CPUACCT_STAT_USER,	/* ... user mode */
-	CPUACCT_STAT_SYSTEM,	/* ... kernel mode */
-
-	CPUACCT_STAT_NSTATS,
-};
-
-#ifdef CONFIG_CGROUP_CPUACCT
-static void cpuacct_charge(struct task_struct *tsk, u64 cputime);
-static void cpuacct_update_stats(struct task_struct *tsk,
-		enum cpuacct_stat_index idx, cputime_t val);
-#else
-static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
-static inline void cpuacct_update_stats(struct task_struct *tsk,
-		enum cpuacct_stat_index idx, cputime_t val) {}
-#endif
-
-static inline void inc_cpu_load(struct rq *rq, unsigned long load)
-{
-	update_load_add(&rq->load, load);
-}
-
-static inline void dec_cpu_load(struct rq *rq, unsigned long load)
-{
-	update_load_sub(&rq->load, load);
-}
-
 #if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
 			(defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH)))
-typedef int (*tg_visitor)(struct task_group *, void *);
-
 /*
  * Iterate task_group tree rooted at *from, calling @down when first entering a
  * node and @up when leaving it for the final time.
  *
  * Caller must hold rcu_lock or sufficient equivalent.
  */
-static int walk_tg_tree_from(struct task_group *from,
+int walk_tg_tree_from(struct task_group *from,
 			     tg_visitor down, tg_visitor up, void *data)
 {
 	struct task_group *parent, *child;
@@ -1657,270 +683,13 @@
 	return ret;
 }
 
-/*
- * Iterate the full tree, calling @down when first entering a node and @up when
- * leaving it for the final time.
- *
- * Caller must hold rcu_lock or sufficient equivalent.
- */
-
-static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
-{
-	return walk_tg_tree_from(&root_task_group, down, up, data);
-}
-
-static int tg_nop(struct task_group *tg, void *data)
+int tg_nop(struct task_group *tg, void *data)
 {
 	return 0;
 }
 #endif
 
-#ifdef CONFIG_SMP
-/* Used instead of source_load when we know the type == 0 */
-static unsigned long weighted_cpuload(const int cpu)
-{
-	return cpu_rq(cpu)->load.weight;
-}
-
-/*
- * Return a low guess at the load of a migration-source cpu weighted
- * according to the scheduling class and "nice" value.
- *
- * We want to under-estimate the load of migration sources, to
- * balance conservatively.
- */
-static unsigned long source_load(int cpu, int type)
-{
-	struct rq *rq = cpu_rq(cpu);
-	unsigned long total = weighted_cpuload(cpu);
-
-	if (type == 0 || !sched_feat(LB_BIAS))
-		return total;
-
-	return min(rq->cpu_load[type-1], total);
-}
-
-/*
- * Return a high guess at the load of a migration-target cpu weighted
- * according to the scheduling class and "nice" value.
- */
-static unsigned long target_load(int cpu, int type)
-{
-	struct rq *rq = cpu_rq(cpu);
-	unsigned long total = weighted_cpuload(cpu);
-
-	if (type == 0 || !sched_feat(LB_BIAS))
-		return total;
-
-	return max(rq->cpu_load[type-1], total);
-}
-
-static unsigned long power_of(int cpu)
-{
-	return cpu_rq(cpu)->cpu_power;
-}
-
-static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
-
-static unsigned long cpu_avg_load_per_task(int cpu)
-{
-	struct rq *rq = cpu_rq(cpu);
-	unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
-
-	if (nr_running)
-		return rq->load.weight / nr_running;
-
-	return 0;
-}
-
-#ifdef CONFIG_PREEMPT
-
-static void double_rq_lock(struct rq *rq1, struct rq *rq2);
-
-/*
- * fair double_lock_balance: Safely acquires both rq->locks in a fair
- * way at the expense of forcing extra atomic operations in all
- * invocations.  This assures that the double_lock is acquired using the
- * same underlying policy as the spinlock_t on this architecture, which
- * reduces latency compared to the unfair variant below.  However, it
- * also adds more overhead and therefore may reduce throughput.
- */
-static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
-	__releases(this_rq->lock)
-	__acquires(busiest->lock)
-	__acquires(this_rq->lock)
-{
-	raw_spin_unlock(&this_rq->lock);
-	double_rq_lock(this_rq, busiest);
-
-	return 1;
-}
-
-#else
-/*
- * Unfair double_lock_balance: Optimizes throughput at the expense of
- * latency by eliminating extra atomic operations when the locks are
- * already in proper order on entry.  This favors lower cpu-ids and will
- * grant the double lock to lower cpus over higher ids under contention,
- * regardless of entry order into the function.
- */
-static int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
-	__releases(this_rq->lock)
-	__acquires(busiest->lock)
-	__acquires(this_rq->lock)
-{
-	int ret = 0;
-
-	if (unlikely(!raw_spin_trylock(&busiest->lock))) {
-		if (busiest < this_rq) {
-			raw_spin_unlock(&this_rq->lock);
-			raw_spin_lock(&busiest->lock);
-			raw_spin_lock_nested(&this_rq->lock,
-					      SINGLE_DEPTH_NESTING);
-			ret = 1;
-		} else
-			raw_spin_lock_nested(&busiest->lock,
-					      SINGLE_DEPTH_NESTING);
-	}
-	return ret;
-}
-
-#endif /* CONFIG_PREEMPT */
-
-/*
- * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
- */
-static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
-{
-	if (unlikely(!irqs_disabled())) {
-		/* printk() doesn't work good under rq->lock */
-		raw_spin_unlock(&this_rq->lock);
-		BUG_ON(1);
-	}
-
-	return _double_lock_balance(this_rq, busiest);
-}
-
-static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
-	__releases(busiest->lock)
-{
-	raw_spin_unlock(&busiest->lock);
-	lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
-}
-
-/*
- * double_rq_lock - safely lock two runqueues
- *
- * Note this does not disable interrupts like task_rq_lock,
- * you need to do so manually before calling.
- */
-static void double_rq_lock(struct rq *rq1, struct rq *rq2)
-	__acquires(rq1->lock)
-	__acquires(rq2->lock)
-{
-	BUG_ON(!irqs_disabled());
-	if (rq1 == rq2) {
-		raw_spin_lock(&rq1->lock);
-		__acquire(rq2->lock);	/* Fake it out ;) */
-	} else {
-		if (rq1 < rq2) {
-			raw_spin_lock(&rq1->lock);
-			raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
-		} else {
-			raw_spin_lock(&rq2->lock);
-			raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
-		}
-	}
-}
-
-/*
- * double_rq_unlock - safely unlock two runqueues
- *
- * Note this does not restore interrupts like task_rq_unlock,
- * you need to do so manually after calling.
- */
-static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
-	__releases(rq1->lock)
-	__releases(rq2->lock)
-{
-	raw_spin_unlock(&rq1->lock);
-	if (rq1 != rq2)
-		raw_spin_unlock(&rq2->lock);
-	else
-		__release(rq2->lock);
-}
-
-#else /* CONFIG_SMP */
-
-/*
- * double_rq_lock - safely lock two runqueues
- *
- * Note this does not disable interrupts like task_rq_lock,
- * you need to do so manually before calling.
- */
-static void double_rq_lock(struct rq *rq1, struct rq *rq2)
-	__acquires(rq1->lock)
-	__acquires(rq2->lock)
-{
-	BUG_ON(!irqs_disabled());
-	BUG_ON(rq1 != rq2);
-	raw_spin_lock(&rq1->lock);
-	__acquire(rq2->lock);	/* Fake it out ;) */
-}
-
-/*
- * double_rq_unlock - safely unlock two runqueues
- *
- * Note this does not restore interrupts like task_rq_unlock,
- * you need to do so manually after calling.
- */
-static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
-	__releases(rq1->lock)
-	__releases(rq2->lock)
-{
-	BUG_ON(rq1 != rq2);
-	raw_spin_unlock(&rq1->lock);
-	__release(rq2->lock);
-}
-
-#endif
-
-static void calc_load_account_idle(struct rq *this_rq);
-static void update_sysctl(void);
-static int get_update_sysctl_factor(void);
-static void update_cpu_load(struct rq *this_rq);
-
-static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
-{
-	set_task_rq(p, cpu);
-#ifdef CONFIG_SMP
-	/*
-	 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
-	 * successfully executed on another CPU. We must ensure that updates of
-	 * per-task data have been completed by this moment.
-	 */
-	smp_wmb();
-	task_thread_info(p)->cpu = cpu;
-#endif
-}
-
-static const struct sched_class rt_sched_class;
-
-#define sched_class_highest (&stop_sched_class)
-#define for_each_class(class) \
-   for (class = sched_class_highest; class; class = class->next)
-
-#include "sched_stats.h"
-
-static void inc_nr_running(struct rq *rq)
-{
-	rq->nr_running++;
-}
-
-static void dec_nr_running(struct rq *rq)
-{
-	rq->nr_running--;
-}
+void update_cpu_load(struct rq *this_rq);
 
 static void set_load_weight(struct task_struct *p)
 {
@@ -1957,7 +726,7 @@
 /*
  * activate_task - move a task to the runqueue.
  */
-static void activate_task(struct rq *rq, struct task_struct *p, int flags)
+void activate_task(struct rq *rq, struct task_struct *p, int flags)
 {
 	if (task_contributes_to_load(p))
 		rq->nr_uninterruptible--;
@@ -1968,7 +737,7 @@
 /*
  * deactivate_task - remove a task from the runqueue.
  */
-static void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
+void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
 {
 	if (task_contributes_to_load(p))
 		rq->nr_uninterruptible++;
@@ -2159,14 +928,14 @@
 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
 static int irqtime_account_hi_update(void)
 {
-	struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
+	u64 *cpustat = kcpustat_this_cpu->cpustat;
 	unsigned long flags;
 	u64 latest_ns;
 	int ret = 0;
 
 	local_irq_save(flags);
 	latest_ns = this_cpu_read(cpu_hardirq_time);
-	if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat->irq))
+	if (nsecs_to_cputime64(latest_ns) > cpustat[CPUTIME_IRQ])
 		ret = 1;
 	local_irq_restore(flags);
 	return ret;
@@ -2174,14 +943,14 @@
 
 static int irqtime_account_si_update(void)
 {
-	struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
+	u64 *cpustat = kcpustat_this_cpu->cpustat;
 	unsigned long flags;
 	u64 latest_ns;
 	int ret = 0;
 
 	local_irq_save(flags);
 	latest_ns = this_cpu_read(cpu_softirq_time);
-	if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat->softirq))
+	if (nsecs_to_cputime64(latest_ns) > cpustat[CPUTIME_SOFTIRQ])
 		ret = 1;
 	local_irq_restore(flags);
 	return ret;
@@ -2193,15 +962,6 @@
 
 #endif
 
-#include "sched_idletask.c"
-#include "sched_fair.c"
-#include "sched_rt.c"
-#include "sched_autogroup.c"
-#include "sched_stoptask.c"
-#ifdef CONFIG_SCHED_DEBUG
-# include "sched_debug.c"
-#endif
-
 void sched_set_stop_task(int cpu, struct task_struct *stop)
 {
 	struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
@@ -2299,7 +1059,7 @@
 		p->sched_class->prio_changed(rq, p, oldprio);
 }
 
-static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
+void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
 {
 	const struct sched_class *class;
 
@@ -2325,38 +1085,6 @@
 }
 
 #ifdef CONFIG_SMP
-/*
- * Is this task likely cache-hot:
- */
-static int
-task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
-{
-	s64 delta;
-
-	if (p->sched_class != &fair_sched_class)
-		return 0;
-
-	if (unlikely(p->policy == SCHED_IDLE))
-		return 0;
-
-	/*
-	 * Buddy candidates are cache hot:
-	 */
-	if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
-			(&p->se == cfs_rq_of(&p->se)->next ||
-			 &p->se == cfs_rq_of(&p->se)->last))
-		return 1;
-
-	if (sysctl_sched_migration_cost == -1)
-		return 1;
-	if (sysctl_sched_migration_cost == 0)
-		return 0;
-
-	delta = now - p->se.exec_start;
-
-	return delta < (s64)sysctl_sched_migration_cost;
-}
-
 void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
 {
 #ifdef CONFIG_SCHED_DEBUG
@@ -2783,6 +1511,11 @@
 
 }
 #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
+
+static inline int ttwu_share_cache(int this_cpu, int that_cpu)
+{
+	return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
+}
 #endif /* CONFIG_SMP */
 
 static void ttwu_queue(struct task_struct *p, int cpu)
@@ -2790,7 +1523,7 @@
 	struct rq *rq = cpu_rq(cpu);
 
 #if defined(CONFIG_SMP)
-	if (sched_feat(TTWU_QUEUE) && cpu != smp_processor_id()) {
+	if (sched_feat(TTWU_QUEUE) && !ttwu_share_cache(smp_processor_id(), cpu)) {
 		sched_clock_cpu(cpu); /* sync clocks x-cpu */
 		ttwu_queue_remote(p, cpu);
 		return;
@@ -3204,6 +1937,7 @@
 	local_irq_enable();
 #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
 	finish_lock_switch(rq, prev);
+	trace_sched_stat_sleeptime(current, rq->clock);
 
 	fire_sched_in_preempt_notifiers(current);
 	if (mm)
@@ -3439,7 +2173,7 @@
  */
 static atomic_long_t calc_load_tasks_idle;
 
-static void calc_load_account_idle(struct rq *this_rq)
+void calc_load_account_idle(struct rq *this_rq)
 {
 	long delta;
 
@@ -3583,7 +2317,7 @@
 	 */
 }
 #else
-static void calc_load_account_idle(struct rq *this_rq)
+void calc_load_account_idle(struct rq *this_rq)
 {
 }
 
@@ -3726,7 +2460,7 @@
  * scheduler tick (TICK_NSEC). With tickless idle this will not be called
  * every tick. We fix it up based on jiffies.
  */
-static void update_cpu_load(struct rq *this_rq)
+void update_cpu_load(struct rq *this_rq)
 {
 	unsigned long this_load = this_rq->load.weight;
 	unsigned long curr_jiffies = jiffies;
@@ -3804,8 +2538,10 @@
 #endif
 
 DEFINE_PER_CPU(struct kernel_stat, kstat);
+DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
 
 EXPORT_PER_CPU_SYMBOL(kstat);
+EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
 
 /*
  * Return any ns on the sched_clock that have not yet been accounted in
@@ -3858,6 +2594,42 @@
 	return ns;
 }
 
+#ifdef CONFIG_CGROUP_CPUACCT
+struct cgroup_subsys cpuacct_subsys;
+struct cpuacct root_cpuacct;
+#endif
+
+static inline void task_group_account_field(struct task_struct *p, int index,
+					    u64 tmp)
+{
+#ifdef CONFIG_CGROUP_CPUACCT
+	struct kernel_cpustat *kcpustat;
+	struct cpuacct *ca;
+#endif
+	/*
+	 * Since all updates are sure to touch the root cgroup, we
+	 * get ourselves ahead and touch it first. If the root cgroup
+	 * is the only cgroup, then nothing else should be necessary.
+	 *
+	 */
+	__get_cpu_var(kernel_cpustat).cpustat[index] += tmp;
+
+#ifdef CONFIG_CGROUP_CPUACCT
+	if (unlikely(!cpuacct_subsys.active))
+		return;
+
+	rcu_read_lock();
+	ca = task_ca(p);
+	while (ca && (ca != &root_cpuacct)) {
+		kcpustat = this_cpu_ptr(ca->cpustat);
+		kcpustat->cpustat[index] += tmp;
+		ca = parent_ca(ca);
+	}
+	rcu_read_unlock();
+#endif
+}
+
+
 /*
  * Account user cpu time to a process.
  * @p: the process that the cpu time gets accounted to
@@ -3867,22 +2639,18 @@
 void account_user_time(struct task_struct *p, cputime_t cputime,
 		       cputime_t cputime_scaled)
 {
-	struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
-	cputime64_t tmp;
+	int index;
 
 	/* Add user time to process. */
-	p->utime = cputime_add(p->utime, cputime);
-	p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
+	p->utime += cputime;
+	p->utimescaled += cputime_scaled;
 	account_group_user_time(p, cputime);
 
-	/* Add user time to cpustat. */
-	tmp = cputime_to_cputime64(cputime);
-	if (TASK_NICE(p) > 0)
-		cpustat->nice = cputime64_add(cpustat->nice, tmp);
-	else
-		cpustat->user = cputime64_add(cpustat->user, tmp);
+	index = (TASK_NICE(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
 
-	cpuacct_update_stats(p, CPUACCT_STAT_USER, cputime);
+	/* Add user time to cpustat. */
+	task_group_account_field(p, index, (__force u64) cputime);
+
 	/* Account for user time used */
 	acct_update_integrals(p);
 }
@@ -3896,24 +2664,21 @@
 static void account_guest_time(struct task_struct *p, cputime_t cputime,
 			       cputime_t cputime_scaled)
 {
-	cputime64_t tmp;
-	struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
-
-	tmp = cputime_to_cputime64(cputime);
+	u64 *cpustat = kcpustat_this_cpu->cpustat;
 
 	/* Add guest time to process. */
-	p->utime = cputime_add(p->utime, cputime);
-	p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
+	p->utime += cputime;
+	p->utimescaled += cputime_scaled;
 	account_group_user_time(p, cputime);
-	p->gtime = cputime_add(p->gtime, cputime);
+	p->gtime += cputime;
 
 	/* Add guest time to cpustat. */
 	if (TASK_NICE(p) > 0) {
-		cpustat->nice = cputime64_add(cpustat->nice, tmp);
-		cpustat->guest_nice = cputime64_add(cpustat->guest_nice, tmp);
+		cpustat[CPUTIME_NICE] += (__force u64) cputime;
+		cpustat[CPUTIME_GUEST_NICE] += (__force u64) cputime;
 	} else {
-		cpustat->user = cputime64_add(cpustat->user, tmp);
-		cpustat->guest = cputime64_add(cpustat->guest, tmp);
+		cpustat[CPUTIME_USER] += (__force u64) cputime;
+		cpustat[CPUTIME_GUEST] += (__force u64) cputime;
 	}
 }
 
@@ -3926,18 +2691,15 @@
  */
 static inline
 void __account_system_time(struct task_struct *p, cputime_t cputime,
-			cputime_t cputime_scaled, cputime64_t *target_cputime64)
+			cputime_t cputime_scaled, int index)
 {
-	cputime64_t tmp = cputime_to_cputime64(cputime);
-
 	/* Add system time to process. */
-	p->stime = cputime_add(p->stime, cputime);
-	p->stimescaled = cputime_add(p->stimescaled, cputime_scaled);
+	p->stime += cputime;
+	p->stimescaled += cputime_scaled;
 	account_group_system_time(p, cputime);
 
 	/* Add system time to cpustat. */
-	*target_cputime64 = cputime64_add(*target_cputime64, tmp);
-	cpuacct_update_stats(p, CPUACCT_STAT_SYSTEM, cputime);
+	task_group_account_field(p, index, (__force u64) cputime);
 
 	/* Account for system time used */
 	acct_update_integrals(p);
@@ -3953,8 +2715,7 @@
 void account_system_time(struct task_struct *p, int hardirq_offset,
 			 cputime_t cputime, cputime_t cputime_scaled)
 {
-	struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
-	cputime64_t *target_cputime64;
+	int index;
 
 	if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
 		account_guest_time(p, cputime, cputime_scaled);
@@ -3962,13 +2723,13 @@
 	}
 
 	if (hardirq_count() - hardirq_offset)
-		target_cputime64 = &cpustat->irq;
+		index = CPUTIME_IRQ;
 	else if (in_serving_softirq())
-		target_cputime64 = &cpustat->softirq;
+		index = CPUTIME_SOFTIRQ;
 	else
-		target_cputime64 = &cpustat->system;
+		index = CPUTIME_SYSTEM;
 
-	__account_system_time(p, cputime, cputime_scaled, target_cputime64);
+	__account_system_time(p, cputime, cputime_scaled, index);
 }
 
 /*
@@ -3977,10 +2738,9 @@
  */
 void account_steal_time(cputime_t cputime)
 {
-	struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
-	cputime64_t cputime64 = cputime_to_cputime64(cputime);
+	u64 *cpustat = kcpustat_this_cpu->cpustat;
 
-	cpustat->steal = cputime64_add(cpustat->steal, cputime64);
+	cpustat[CPUTIME_STEAL] += (__force u64) cputime;
 }
 
 /*
@@ -3989,14 +2749,13 @@
  */
 void account_idle_time(cputime_t cputime)
 {
-	struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
-	cputime64_t cputime64 = cputime_to_cputime64(cputime);
+	u64 *cpustat = kcpustat_this_cpu->cpustat;
 	struct rq *rq = this_rq();
 
 	if (atomic_read(&rq->nr_iowait) > 0)
-		cpustat->iowait = cputime64_add(cpustat->iowait, cputime64);
+		cpustat[CPUTIME_IOWAIT] += (__force u64) cputime;
 	else
-		cpustat->idle = cputime64_add(cpustat->idle, cputime64);
+		cpustat[CPUTIME_IDLE] += (__force u64) cputime;
 }
 
 static __always_inline bool steal_account_process_tick(void)
@@ -4046,16 +2805,15 @@
 						struct rq *rq)
 {
 	cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
-	cputime64_t tmp = cputime_to_cputime64(cputime_one_jiffy);
-	struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
+	u64 *cpustat = kcpustat_this_cpu->cpustat;
 
 	if (steal_account_process_tick())
 		return;
 
 	if (irqtime_account_hi_update()) {
-		cpustat->irq = cputime64_add(cpustat->irq, tmp);
+		cpustat[CPUTIME_IRQ] += (__force u64) cputime_one_jiffy;
 	} else if (irqtime_account_si_update()) {
-		cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
+		cpustat[CPUTIME_SOFTIRQ] += (__force u64) cputime_one_jiffy;
 	} else if (this_cpu_ksoftirqd() == p) {
 		/*
 		 * ksoftirqd time do not get accounted in cpu_softirq_time.
@@ -4063,7 +2821,7 @@
 		 * Also, p->stime needs to be updated for ksoftirqd.
 		 */
 		__account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
-					&cpustat->softirq);
+					CPUTIME_SOFTIRQ);
 	} else if (user_tick) {
 		account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
 	} else if (p == rq->idle) {
@@ -4072,7 +2830,7 @@
 		account_guest_time(p, cputime_one_jiffy, one_jiffy_scaled);
 	} else {
 		__account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
-					&cpustat->system);
+					CPUTIME_SYSTEM);
 	}
 }
 
@@ -4171,7 +2929,7 @@
 
 void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
 {
-	cputime_t rtime, utime = p->utime, total = cputime_add(utime, p->stime);
+	cputime_t rtime, utime = p->utime, total = utime + p->stime;
 
 	/*
 	 * Use CFS's precise accounting:
@@ -4179,11 +2937,11 @@
 	rtime = nsecs_to_cputime(p->se.sum_exec_runtime);
 
 	if (total) {
-		u64 temp = rtime;
+		u64 temp = (__force u64) rtime;
 
-		temp *= utime;
-		do_div(temp, total);
-		utime = (cputime_t)temp;
+		temp *= (__force u64) utime;
+		do_div(temp, (__force u32) total);
+		utime = (__force cputime_t) temp;
 	} else
 		utime = rtime;
 
@@ -4191,7 +2949,7 @@
 	 * Compare with previous values, to keep monotonicity:
 	 */
 	p->prev_utime = max(p->prev_utime, utime);
-	p->prev_stime = max(p->prev_stime, cputime_sub(rtime, p->prev_utime));
+	p->prev_stime = max(p->prev_stime, rtime - p->prev_utime);
 
 	*ut = p->prev_utime;
 	*st = p->prev_stime;
@@ -4208,21 +2966,20 @@
 
 	thread_group_cputime(p, &cputime);
 
-	total = cputime_add(cputime.utime, cputime.stime);
+	total = cputime.utime + cputime.stime;
 	rtime = nsecs_to_cputime(cputime.sum_exec_runtime);
 
 	if (total) {
-		u64 temp = rtime;
+		u64 temp = (__force u64) rtime;
 
-		temp *= cputime.utime;
-		do_div(temp, total);
-		utime = (cputime_t)temp;
+		temp *= (__force u64) cputime.utime;
+		do_div(temp, (__force u32) total);
+		utime = (__force cputime_t) temp;
 	} else
 		utime = rtime;
 
 	sig->prev_utime = max(sig->prev_utime, utime);
-	sig->prev_stime = max(sig->prev_stime,
-			      cputime_sub(rtime, sig->prev_utime));
+	sig->prev_stime = max(sig->prev_stime, rtime - sig->prev_utime);
 
 	*ut = sig->prev_utime;
 	*st = sig->prev_stime;
@@ -4321,6 +3078,9 @@
 {
 	struct pt_regs *regs = get_irq_regs();
 
+	if (oops_in_progress)
+		return;
+
 	printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
 		prev->comm, prev->pid, preempt_count());
 
@@ -5852,6 +4612,13 @@
 		 */
 		if (preempt && rq != p_rq)
 			resched_task(p_rq->curr);
+	} else {
+		/*
+		 * We might have set it in task_yield_fair(), but are
+		 * not going to schedule(), so don't want to skip
+		 * the next update.
+		 */
+		rq->skip_clock_update = 0;
 	}
 
 out:
@@ -6019,7 +4786,7 @@
 	free = stack_not_used(p);
 #endif
 	printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
-		task_pid_nr(p), task_pid_nr(p->real_parent),
+		task_pid_nr(p), task_pid_nr(rcu_dereference(p->real_parent)),
 		(unsigned long)task_thread_info(p)->flags);
 
 	show_stack(p, NULL);
@@ -6118,53 +4885,6 @@
 #endif
 }
 
-/*
- * Increase the granularity value when there are more CPUs,
- * because with more CPUs the 'effective latency' as visible
- * to users decreases. But the relationship is not linear,
- * so pick a second-best guess by going with the log2 of the
- * number of CPUs.
- *
- * This idea comes from the SD scheduler of Con Kolivas:
- */
-static int get_update_sysctl_factor(void)
-{
-	unsigned int cpus = min_t(int, num_online_cpus(), 8);
-	unsigned int factor;
-
-	switch (sysctl_sched_tunable_scaling) {
-	case SCHED_TUNABLESCALING_NONE:
-		factor = 1;
-		break;
-	case SCHED_TUNABLESCALING_LINEAR:
-		factor = cpus;
-		break;
-	case SCHED_TUNABLESCALING_LOG:
-	default:
-		factor = 1 + ilog2(cpus);
-		break;
-	}
-
-	return factor;
-}
-
-static void update_sysctl(void)
-{
-	unsigned int factor = get_update_sysctl_factor();
-
-#define SET_SYSCTL(name) \
-	(sysctl_##name = (factor) * normalized_sysctl_##name)
-	SET_SYSCTL(sched_min_granularity);
-	SET_SYSCTL(sched_latency);
-	SET_SYSCTL(sched_wakeup_granularity);
-#undef SET_SYSCTL
-}
-
-static inline void sched_init_granularity(void)
-{
-	update_sysctl();
-}
-
 #ifdef CONFIG_SMP
 void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
 {
@@ -6351,30 +5071,6 @@
 	rq->calc_load_active = 0;
 }
 
-#ifdef CONFIG_CFS_BANDWIDTH
-static void unthrottle_offline_cfs_rqs(struct rq *rq)
-{
-	struct cfs_rq *cfs_rq;
-
-	for_each_leaf_cfs_rq(rq, cfs_rq) {
-		struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
-
-		if (!cfs_rq->runtime_enabled)
-			continue;
-
-		/*
-		 * clock_task is not advancing so we just need to make sure
-		 * there's some valid quota amount
-		 */
-		cfs_rq->runtime_remaining = cfs_b->quota;
-		if (cfs_rq_throttled(cfs_rq))
-			unthrottle_cfs_rq(cfs_rq);
-	}
-}
-#else
-static void unthrottle_offline_cfs_rqs(struct rq *rq) {}
-#endif
-
 /*
  * Migrate all tasks from the rq, sleeping tasks will be migrated by
  * try_to_wake_up()->select_task_rq().
@@ -6480,7 +5176,7 @@
 static void
 set_table_entry(struct ctl_table *entry,
 		const char *procname, void *data, int maxlen,
-		mode_t mode, proc_handler *proc_handler)
+		umode_t mode, proc_handler *proc_handler)
 {
 	entry->procname = procname;
 	entry->data = data;
@@ -6980,6 +5676,12 @@
 	return -ENOMEM;
 }
 
+/*
+ * By default the system creates a single root-domain with all cpus as
+ * members (mimicking the global state we have today).
+ */
+struct root_domain def_root_domain;
+
 static void init_defrootdomain(void)
 {
 	init_rootdomain(&def_root_domain);
@@ -7051,6 +5753,31 @@
 }
 
 /*
+ * Keep a special pointer to the highest sched_domain that has
+ * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this
+ * allows us to avoid some pointer chasing select_idle_sibling().
+ *
+ * Also keep a unique ID per domain (we use the first cpu number in
+ * the cpumask of the domain), this allows us to quickly tell if
+ * two cpus are in the same cache domain, see ttwu_share_cache().
+ */
+DEFINE_PER_CPU(struct sched_domain *, sd_llc);
+DEFINE_PER_CPU(int, sd_llc_id);
+
+static void update_top_cache_domain(int cpu)
+{
+	struct sched_domain *sd;
+	int id = cpu;
+
+	sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES);
+	if (sd)
+		id = cpumask_first(sched_domain_span(sd));
+
+	rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
+	per_cpu(sd_llc_id, cpu) = id;
+}
+
+/*
  * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
  * hold the hotplug lock.
  */
@@ -7089,6 +5816,8 @@
 	tmp = rq->sd;
 	rcu_assign_pointer(rq->sd, sd);
 	destroy_sched_domains(tmp, cpu);
+
+	update_top_cache_domain(cpu);
 }
 
 /* cpus with isolated domains */
@@ -7248,7 +5977,7 @@
 			continue;
 
 		sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
-				GFP_KERNEL, cpu_to_node(i));
+				GFP_KERNEL, cpu_to_node(cpu));
 
 		if (!sg)
 			goto fail;
@@ -7386,6 +6115,12 @@
 		return;
 
 	update_group_power(sd, cpu);
+	atomic_set(&sg->sgp->nr_busy_cpus, sg->group_weight);
+}
+
+int __weak arch_sd_sibling_asym_packing(void)
+{
+       return 0*SD_ASYM_PACKING;
 }
 
 /*
@@ -7940,54 +6675,52 @@
 }
 
 #ifdef CONFIG_SCHED_MC
-static ssize_t sched_mc_power_savings_show(struct sysdev_class *class,
-					   struct sysdev_class_attribute *attr,
-					   char *page)
+static ssize_t sched_mc_power_savings_show(struct device *dev,
+					   struct device_attribute *attr,
+					   char *buf)
 {
-	return sprintf(page, "%u\n", sched_mc_power_savings);
+	return sprintf(buf, "%u\n", sched_mc_power_savings);
 }
-static ssize_t sched_mc_power_savings_store(struct sysdev_class *class,
-					    struct sysdev_class_attribute *attr,
+static ssize_t sched_mc_power_savings_store(struct device *dev,
+					    struct device_attribute *attr,
 					    const char *buf, size_t count)
 {
 	return sched_power_savings_store(buf, count, 0);
 }
-static SYSDEV_CLASS_ATTR(sched_mc_power_savings, 0644,
-			 sched_mc_power_savings_show,
-			 sched_mc_power_savings_store);
+static DEVICE_ATTR(sched_mc_power_savings, 0644,
+		   sched_mc_power_savings_show,
+		   sched_mc_power_savings_store);
 #endif
 
 #ifdef CONFIG_SCHED_SMT
-static ssize_t sched_smt_power_savings_show(struct sysdev_class *dev,
-					    struct sysdev_class_attribute *attr,
-					    char *page)
+static ssize_t sched_smt_power_savings_show(struct device *dev,
+					    struct device_attribute *attr,
+					    char *buf)
 {
-	return sprintf(page, "%u\n", sched_smt_power_savings);
+	return sprintf(buf, "%u\n", sched_smt_power_savings);
 }
-static ssize_t sched_smt_power_savings_store(struct sysdev_class *dev,
-					     struct sysdev_class_attribute *attr,
+static ssize_t sched_smt_power_savings_store(struct device *dev,
+					    struct device_attribute *attr,
 					     const char *buf, size_t count)
 {
 	return sched_power_savings_store(buf, count, 1);
 }
-static SYSDEV_CLASS_ATTR(sched_smt_power_savings, 0644,
+static DEVICE_ATTR(sched_smt_power_savings, 0644,
 		   sched_smt_power_savings_show,
 		   sched_smt_power_savings_store);
 #endif
 
-int __init sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
+int __init sched_create_sysfs_power_savings_entries(struct device *dev)
 {
 	int err = 0;
 
 #ifdef CONFIG_SCHED_SMT
 	if (smt_capable())
-		err = sysfs_create_file(&cls->kset.kobj,
-					&attr_sched_smt_power_savings.attr);
+		err = device_create_file(dev, &dev_attr_sched_smt_power_savings);
 #endif
 #ifdef CONFIG_SCHED_MC
 	if (!err && mc_capable())
-		err = sysfs_create_file(&cls->kset.kobj,
-					&attr_sched_mc_power_savings.attr);
+		err = device_create_file(dev, &dev_attr_sched_mc_power_savings);
 #endif
 	return err;
 }
@@ -8023,29 +6756,6 @@
 	}
 }
 
-static int update_runtime(struct notifier_block *nfb,
-				unsigned long action, void *hcpu)
-{
-	int cpu = (int)(long)hcpu;
-
-	switch (action) {
-	case CPU_DOWN_PREPARE:
-	case CPU_DOWN_PREPARE_FROZEN:
-		disable_runtime(cpu_rq(cpu));
-		return NOTIFY_OK;
-
-	case CPU_DOWN_FAILED:
-	case CPU_DOWN_FAILED_FROZEN:
-	case CPU_ONLINE:
-	case CPU_ONLINE_FROZEN:
-		enable_runtime(cpu_rq(cpu));
-		return NOTIFY_OK;
-
-	default:
-		return NOTIFY_DONE;
-	}
-}
-
 void __init sched_init_smp(void)
 {
 	cpumask_var_t non_isolated_cpus;
@@ -8094,104 +6804,11 @@
 		&& addr < (unsigned long)__sched_text_end);
 }
 
-static void init_cfs_rq(struct cfs_rq *cfs_rq)
-{
-	cfs_rq->tasks_timeline = RB_ROOT;
-	INIT_LIST_HEAD(&cfs_rq->tasks);
-	cfs_rq->min_vruntime = (u64)(-(1LL << 20));
-#ifndef CONFIG_64BIT
-	cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
-#endif
-}
-
-static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
-{
-	struct rt_prio_array *array;
-	int i;
-
-	array = &rt_rq->active;
-	for (i = 0; i < MAX_RT_PRIO; i++) {
-		INIT_LIST_HEAD(array->queue + i);
-		__clear_bit(i, array->bitmap);
-	}
-	/* delimiter for bitsearch: */
-	__set_bit(MAX_RT_PRIO, array->bitmap);
-
-#if defined CONFIG_SMP
-	rt_rq->highest_prio.curr = MAX_RT_PRIO;
-	rt_rq->highest_prio.next = MAX_RT_PRIO;
-	rt_rq->rt_nr_migratory = 0;
-	rt_rq->overloaded = 0;
-	plist_head_init(&rt_rq->pushable_tasks);
+#ifdef CONFIG_CGROUP_SCHED
+struct task_group root_task_group;
 #endif
 
-	rt_rq->rt_time = 0;
-	rt_rq->rt_throttled = 0;
-	rt_rq->rt_runtime = 0;
-	raw_spin_lock_init(&rt_rq->rt_runtime_lock);
-}
-
-#ifdef CONFIG_FAIR_GROUP_SCHED
-static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
-				struct sched_entity *se, int cpu,
-				struct sched_entity *parent)
-{
-	struct rq *rq = cpu_rq(cpu);
-
-	cfs_rq->tg = tg;
-	cfs_rq->rq = rq;
-#ifdef CONFIG_SMP
-	/* allow initial update_cfs_load() to truncate */
-	cfs_rq->load_stamp = 1;
-#endif
-	init_cfs_rq_runtime(cfs_rq);
-
-	tg->cfs_rq[cpu] = cfs_rq;
-	tg->se[cpu] = se;
-
-	/* se could be NULL for root_task_group */
-	if (!se)
-		return;
-
-	if (!parent)
-		se->cfs_rq = &rq->cfs;
-	else
-		se->cfs_rq = parent->my_q;
-
-	se->my_q = cfs_rq;
-	update_load_set(&se->load, 0);
-	se->parent = parent;
-}
-#endif
-
-#ifdef CONFIG_RT_GROUP_SCHED
-static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
-		struct sched_rt_entity *rt_se, int cpu,
-		struct sched_rt_entity *parent)
-{
-	struct rq *rq = cpu_rq(cpu);
-
-	rt_rq->highest_prio.curr = MAX_RT_PRIO;
-	rt_rq->rt_nr_boosted = 0;
-	rt_rq->rq = rq;
-	rt_rq->tg = tg;
-
-	tg->rt_rq[cpu] = rt_rq;
-	tg->rt_se[cpu] = rt_se;
-
-	if (!rt_se)
-		return;
-
-	if (!parent)
-		rt_se->rt_rq = &rq->rt;
-	else
-		rt_se->rt_rq = parent->my_q;
-
-	rt_se->my_q = rt_rq;
-	rt_se->parent = parent;
-	INIT_LIST_HEAD(&rt_se->run_list);
-}
-#endif
+DECLARE_PER_CPU(cpumask_var_t, load_balance_tmpmask);
 
 void __init sched_init(void)
 {
@@ -8249,9 +6866,17 @@
 #ifdef CONFIG_CGROUP_SCHED
 	list_add(&root_task_group.list, &task_groups);
 	INIT_LIST_HEAD(&root_task_group.children);
+	INIT_LIST_HEAD(&root_task_group.siblings);
 	autogroup_init(&init_task);
+
 #endif /* CONFIG_CGROUP_SCHED */
 
+#ifdef CONFIG_CGROUP_CPUACCT
+	root_cpuacct.cpustat = &kernel_cpustat;
+	root_cpuacct.cpuusage = alloc_percpu(u64);
+	/* Too early, not expected to fail */
+	BUG_ON(!root_cpuacct.cpuusage);
+#endif
 	for_each_possible_cpu(i) {
 		struct rq *rq;
 
@@ -8263,7 +6888,7 @@
 		init_cfs_rq(&rq->cfs);
 		init_rt_rq(&rq->rt, rq);
 #ifdef CONFIG_FAIR_GROUP_SCHED
-		root_task_group.shares = root_task_group_load;
+		root_task_group.shares = ROOT_TASK_GROUP_LOAD;
 		INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
 		/*
 		 * How much cpu bandwidth does root_task_group get?
@@ -8313,7 +6938,7 @@
 		rq->avg_idle = 2*sysctl_sched_migration_cost;
 		rq_attach_root(rq, &def_root_domain);
 #ifdef CONFIG_NO_HZ
-		rq->nohz_balance_kick = 0;
+		rq->nohz_flags = 0;
 #endif
 #endif
 		init_rq_hrtick(rq);
@@ -8326,10 +6951,6 @@
 	INIT_HLIST_HEAD(&init_task.preempt_notifiers);
 #endif
 
-#ifdef CONFIG_SMP
-	open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
-#endif
-
 #ifdef CONFIG_RT_MUTEXES
 	plist_head_init(&init_task.pi_waiters);
 #endif
@@ -8357,17 +6978,11 @@
 
 #ifdef CONFIG_SMP
 	zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT);
-#ifdef CONFIG_NO_HZ
-	zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
-	alloc_cpumask_var(&nohz.grp_idle_mask, GFP_NOWAIT);
-	atomic_set(&nohz.load_balancer, nr_cpu_ids);
-	atomic_set(&nohz.first_pick_cpu, nr_cpu_ids);
-	atomic_set(&nohz.second_pick_cpu, nr_cpu_ids);
-#endif
 	/* May be allocated at isolcpus cmdline parse time */
 	if (cpu_isolated_map == NULL)
 		zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
-#endif /* SMP */
+#endif
+	init_sched_fair_class();
 
 	scheduler_running = 1;
 }
@@ -8519,169 +7134,14 @@
 
 #endif
 
-#ifdef CONFIG_FAIR_GROUP_SCHED
-static void free_fair_sched_group(struct task_group *tg)
-{
-	int i;
-
-	destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));
-
-	for_each_possible_cpu(i) {
-		if (tg->cfs_rq)
-			kfree(tg->cfs_rq[i]);
-		if (tg->se)
-			kfree(tg->se[i]);
-	}
-
-	kfree(tg->cfs_rq);
-	kfree(tg->se);
-}
-
-static
-int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
-{
-	struct cfs_rq *cfs_rq;
-	struct sched_entity *se;
-	int i;
-
-	tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
-	if (!tg->cfs_rq)
-		goto err;
-	tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
-	if (!tg->se)
-		goto err;
-
-	tg->shares = NICE_0_LOAD;
-
-	init_cfs_bandwidth(tg_cfs_bandwidth(tg));
-
-	for_each_possible_cpu(i) {
-		cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
-				      GFP_KERNEL, cpu_to_node(i));
-		if (!cfs_rq)
-			goto err;
-
-		se = kzalloc_node(sizeof(struct sched_entity),
-				  GFP_KERNEL, cpu_to_node(i));
-		if (!se)
-			goto err_free_rq;
-
-		init_cfs_rq(cfs_rq);
-		init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
-	}
-
-	return 1;
-
-err_free_rq:
-	kfree(cfs_rq);
-err:
-	return 0;
-}
-
-static inline void unregister_fair_sched_group(struct task_group *tg, int cpu)
-{
-	struct rq *rq = cpu_rq(cpu);
-	unsigned long flags;
-
-	/*
-	* Only empty task groups can be destroyed; so we can speculatively
-	* check on_list without danger of it being re-added.
-	*/
-	if (!tg->cfs_rq[cpu]->on_list)
-		return;
-
-	raw_spin_lock_irqsave(&rq->lock, flags);
-	list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
-	raw_spin_unlock_irqrestore(&rq->lock, flags);
-}
-#else /* !CONFIG_FAIR_GROUP_SCHED */
-static inline void free_fair_sched_group(struct task_group *tg)
-{
-}
-
-static inline
-int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
-{
-	return 1;
-}
-
-static inline void unregister_fair_sched_group(struct task_group *tg, int cpu)
-{
-}
-#endif /* CONFIG_FAIR_GROUP_SCHED */
-
 #ifdef CONFIG_RT_GROUP_SCHED
-static void free_rt_sched_group(struct task_group *tg)
-{
-	int i;
-
-	if (tg->rt_se)
-		destroy_rt_bandwidth(&tg->rt_bandwidth);
-
-	for_each_possible_cpu(i) {
-		if (tg->rt_rq)
-			kfree(tg->rt_rq[i]);
-		if (tg->rt_se)
-			kfree(tg->rt_se[i]);
-	}
-
-	kfree(tg->rt_rq);
-	kfree(tg->rt_se);
-}
-
-static
-int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
-{
-	struct rt_rq *rt_rq;
-	struct sched_rt_entity *rt_se;
-	int i;
-
-	tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
-	if (!tg->rt_rq)
-		goto err;
-	tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
-	if (!tg->rt_se)
-		goto err;
-
-	init_rt_bandwidth(&tg->rt_bandwidth,
-			ktime_to_ns(def_rt_bandwidth.rt_period), 0);
-
-	for_each_possible_cpu(i) {
-		rt_rq = kzalloc_node(sizeof(struct rt_rq),
-				     GFP_KERNEL, cpu_to_node(i));
-		if (!rt_rq)
-			goto err;
-
-		rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
-				     GFP_KERNEL, cpu_to_node(i));
-		if (!rt_se)
-			goto err_free_rq;
-
-		init_rt_rq(rt_rq, cpu_rq(i));
-		rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
-		init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
-	}
-
-	return 1;
-
-err_free_rq:
-	kfree(rt_rq);
-err:
-	return 0;
-}
 #else /* !CONFIG_RT_GROUP_SCHED */
-static inline void free_rt_sched_group(struct task_group *tg)
-{
-}
-
-static inline
-int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
-{
-	return 1;
-}
 #endif /* CONFIG_RT_GROUP_SCHED */
 
 #ifdef CONFIG_CGROUP_SCHED
+/* task_group_lock serializes the addition/removal of task groups */
+static DEFINE_SPINLOCK(task_group_lock);
+
 static void free_sched_group(struct task_group *tg)
 {
 	free_fair_sched_group(tg);
@@ -8787,47 +7247,6 @@
 #endif /* CONFIG_CGROUP_SCHED */
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
-static DEFINE_MUTEX(shares_mutex);
-
-int sched_group_set_shares(struct task_group *tg, unsigned long shares)
-{
-	int i;
-	unsigned long flags;
-
-	/*
-	 * We can't change the weight of the root cgroup.
-	 */
-	if (!tg->se[0])
-		return -EINVAL;
-
-	shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
-
-	mutex_lock(&shares_mutex);
-	if (tg->shares == shares)
-		goto done;
-
-	tg->shares = shares;
-	for_each_possible_cpu(i) {
-		struct rq *rq = cpu_rq(i);
-		struct sched_entity *se;
-
-		se = tg->se[i];
-		/* Propagate contribution to hierarchy */
-		raw_spin_lock_irqsave(&rq->lock, flags);
-		for_each_sched_entity(se)
-			update_cfs_shares(group_cfs_rq(se));
-		raw_spin_unlock_irqrestore(&rq->lock, flags);
-	}
-
-done:
-	mutex_unlock(&shares_mutex);
-	return 0;
-}
-
-unsigned long sched_group_shares(struct task_group *tg)
-{
-	return tg->shares;
-}
 #endif
 
 #if defined(CONFIG_RT_GROUP_SCHED) || defined(CONFIG_CFS_BANDWIDTH)
@@ -8852,7 +7271,7 @@
 	struct task_struct *g, *p;
 
 	do_each_thread(g, p) {
-		if (rt_task(p) && rt_rq_of_se(&p->rt)->tg == tg)
+		if (rt_task(p) && task_rq(p)->rt.tg == tg)
 			return 1;
 	} while_each_thread(g, p);
 
@@ -9203,8 +7622,8 @@
 
 static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
 {
-	int i, ret = 0, runtime_enabled;
-	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
+	int i, ret = 0, runtime_enabled, runtime_was_enabled;
+	struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
 
 	if (tg == &root_task_group)
 		return -EINVAL;
@@ -9231,6 +7650,8 @@
 		goto out_unlock;
 
 	runtime_enabled = quota != RUNTIME_INF;
+	runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
+	account_cfs_bandwidth_used(runtime_enabled, runtime_was_enabled);
 	raw_spin_lock_irq(&cfs_b->lock);
 	cfs_b->period = ns_to_ktime(period);
 	cfs_b->quota = quota;
@@ -9246,13 +7667,13 @@
 
 	for_each_possible_cpu(i) {
 		struct cfs_rq *cfs_rq = tg->cfs_rq[i];
-		struct rq *rq = rq_of(cfs_rq);
+		struct rq *rq = cfs_rq->rq;
 
 		raw_spin_lock_irq(&rq->lock);
 		cfs_rq->runtime_enabled = runtime_enabled;
 		cfs_rq->runtime_remaining = 0;
 
-		if (cfs_rq_throttled(cfs_rq))
+		if (cfs_rq->throttled)
 			unthrottle_cfs_rq(cfs_rq);
 		raw_spin_unlock_irq(&rq->lock);
 	}
@@ -9266,7 +7687,7 @@
 {
 	u64 quota, period;
 
-	period = ktime_to_ns(tg_cfs_bandwidth(tg)->period);
+	period = ktime_to_ns(tg->cfs_bandwidth.period);
 	if (cfs_quota_us < 0)
 		quota = RUNTIME_INF;
 	else
@@ -9279,10 +7700,10 @@
 {
 	u64 quota_us;
 
-	if (tg_cfs_bandwidth(tg)->quota == RUNTIME_INF)
+	if (tg->cfs_bandwidth.quota == RUNTIME_INF)
 		return -1;
 
-	quota_us = tg_cfs_bandwidth(tg)->quota;
+	quota_us = tg->cfs_bandwidth.quota;
 	do_div(quota_us, NSEC_PER_USEC);
 
 	return quota_us;
@@ -9293,10 +7714,7 @@
 	u64 quota, period;
 
 	period = (u64)cfs_period_us * NSEC_PER_USEC;
-	quota = tg_cfs_bandwidth(tg)->quota;
-
-	if (period <= 0)
-		return -EINVAL;
+	quota = tg->cfs_bandwidth.quota;
 
 	return tg_set_cfs_bandwidth(tg, period, quota);
 }
@@ -9305,7 +7723,7 @@
 {
 	u64 cfs_period_us;
 
-	cfs_period_us = ktime_to_ns(tg_cfs_bandwidth(tg)->period);
+	cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period);
 	do_div(cfs_period_us, NSEC_PER_USEC);
 
 	return cfs_period_us;
@@ -9365,13 +7783,13 @@
 static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
 {
 	struct cfs_schedulable_data *d = data;
-	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
+	struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
 	s64 quota = 0, parent_quota = -1;
 
 	if (!tg->parent) {
 		quota = RUNTIME_INF;
 	} else {
-		struct cfs_bandwidth *parent_b = tg_cfs_bandwidth(tg->parent);
+		struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth;
 
 		quota = normalize_cfs_quota(tg, d);
 		parent_quota = parent_b->hierarchal_quota;
@@ -9415,7 +7833,7 @@
 		struct cgroup_map_cb *cb)
 {
 	struct task_group *tg = cgroup_tg(cgrp);
-	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
+	struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
 
 	cb->fill(cb, "nr_periods", cfs_b->nr_periods);
 	cb->fill(cb, "nr_throttled", cfs_b->nr_throttled);
@@ -9516,38 +7934,16 @@
  * (balbir@in.ibm.com).
  */
 
-/* track cpu usage of a group of tasks and its child groups */
-struct cpuacct {
-	struct cgroup_subsys_state css;
-	/* cpuusage holds pointer to a u64-type object on every cpu */
-	u64 __percpu *cpuusage;
-	struct percpu_counter cpustat[CPUACCT_STAT_NSTATS];
-	struct cpuacct *parent;
-};
-
-struct cgroup_subsys cpuacct_subsys;
-
-/* return cpu accounting group corresponding to this container */
-static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp)
-{
-	return container_of(cgroup_subsys_state(cgrp, cpuacct_subsys_id),
-			    struct cpuacct, css);
-}
-
-/* return cpu accounting group to which this task belongs */
-static inline struct cpuacct *task_ca(struct task_struct *tsk)
-{
-	return container_of(task_subsys_state(tsk, cpuacct_subsys_id),
-			    struct cpuacct, css);
-}
-
 /* create a new cpu accounting group */
 static struct cgroup_subsys_state *cpuacct_create(
 	struct cgroup_subsys *ss, struct cgroup *cgrp)
 {
-	struct cpuacct *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
-	int i;
+	struct cpuacct *ca;
 
+	if (!cgrp->parent)
+		return &root_cpuacct.css;
+
+	ca = kzalloc(sizeof(*ca), GFP_KERNEL);
 	if (!ca)
 		goto out;
 
@@ -9555,18 +7951,13 @@
 	if (!ca->cpuusage)
 		goto out_free_ca;
 
-	for (i = 0; i < CPUACCT_STAT_NSTATS; i++)
-		if (percpu_counter_init(&ca->cpustat[i], 0))
-			goto out_free_counters;
-
-	if (cgrp->parent)
-		ca->parent = cgroup_ca(cgrp->parent);
+	ca->cpustat = alloc_percpu(struct kernel_cpustat);
+	if (!ca->cpustat)
+		goto out_free_cpuusage;
 
 	return &ca->css;
 
-out_free_counters:
-	while (--i >= 0)
-		percpu_counter_destroy(&ca->cpustat[i]);
+out_free_cpuusage:
 	free_percpu(ca->cpuusage);
 out_free_ca:
 	kfree(ca);
@@ -9579,10 +7970,8 @@
 cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
 {
 	struct cpuacct *ca = cgroup_ca(cgrp);
-	int i;
 
-	for (i = 0; i < CPUACCT_STAT_NSTATS; i++)
-		percpu_counter_destroy(&ca->cpustat[i]);
+	free_percpu(ca->cpustat);
 	free_percpu(ca->cpuusage);
 	kfree(ca);
 }
@@ -9675,16 +8064,31 @@
 };
 
 static int cpuacct_stats_show(struct cgroup *cgrp, struct cftype *cft,
-		struct cgroup_map_cb *cb)
+			      struct cgroup_map_cb *cb)
 {
 	struct cpuacct *ca = cgroup_ca(cgrp);
-	int i;
+	int cpu;
+	s64 val = 0;
 
-	for (i = 0; i < CPUACCT_STAT_NSTATS; i++) {
-		s64 val = percpu_counter_read(&ca->cpustat[i]);
-		val = cputime64_to_clock_t(val);
-		cb->fill(cb, cpuacct_stat_desc[i], val);
+	for_each_online_cpu(cpu) {
+		struct kernel_cpustat *kcpustat = per_cpu_ptr(ca->cpustat, cpu);
+		val += kcpustat->cpustat[CPUTIME_USER];
+		val += kcpustat->cpustat[CPUTIME_NICE];
 	}
+	val = cputime64_to_clock_t(val);
+	cb->fill(cb, cpuacct_stat_desc[CPUACCT_STAT_USER], val);
+
+	val = 0;
+	for_each_online_cpu(cpu) {
+		struct kernel_cpustat *kcpustat = per_cpu_ptr(ca->cpustat, cpu);
+		val += kcpustat->cpustat[CPUTIME_SYSTEM];
+		val += kcpustat->cpustat[CPUTIME_IRQ];
+		val += kcpustat->cpustat[CPUTIME_SOFTIRQ];
+	}
+
+	val = cputime64_to_clock_t(val);
+	cb->fill(cb, cpuacct_stat_desc[CPUACCT_STAT_SYSTEM], val);
+
 	return 0;
 }
 
@@ -9714,7 +8118,7 @@
  *
  * called with rq->lock held.
  */
-static void cpuacct_charge(struct task_struct *tsk, u64 cputime)
+void cpuacct_charge(struct task_struct *tsk, u64 cputime)
 {
 	struct cpuacct *ca;
 	int cpu;
@@ -9728,7 +8132,7 @@
 
 	ca = task_ca(tsk);
 
-	for (; ca; ca = ca->parent) {
+	for (; ca; ca = parent_ca(ca)) {
 		u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
 		*cpuusage += cputime;
 	}
@@ -9736,45 +8140,6 @@
 	rcu_read_unlock();
 }
 
-/*
- * When CONFIG_VIRT_CPU_ACCOUNTING is enabled one jiffy can be very large
- * in cputime_t units. As a result, cpuacct_update_stats calls
- * percpu_counter_add with values large enough to always overflow the
- * per cpu batch limit causing bad SMP scalability.
- *
- * To fix this we scale percpu_counter_batch by cputime_one_jiffy so we
- * batch the same amount of time with CONFIG_VIRT_CPU_ACCOUNTING disabled
- * and enabled. We cap it at INT_MAX which is the largest allowed batch value.
- */
-#ifdef CONFIG_SMP
-#define CPUACCT_BATCH	\
-	min_t(long, percpu_counter_batch * cputime_one_jiffy, INT_MAX)
-#else
-#define CPUACCT_BATCH	0
-#endif
-
-/*
- * Charge the system/user time to the task's accounting group.
- */
-static void cpuacct_update_stats(struct task_struct *tsk,
-		enum cpuacct_stat_index idx, cputime_t val)
-{
-	struct cpuacct *ca;
-	int batch = CPUACCT_BATCH;
-
-	if (unlikely(!cpuacct_subsys.active))
-		return;
-
-	rcu_read_lock();
-	ca = task_ca(tsk);
-
-	do {
-		__percpu_counter_add(&ca->cpustat[idx], val, batch);
-		ca = ca->parent;
-	} while (ca);
-	rcu_read_unlock();
-}
-
 struct cgroup_subsys cpuacct_subsys = {
 	.name = "cpuacct",
 	.create = cpuacct_create,
diff --git a/kernel/sched_cpupri.c b/kernel/sched/cpupri.c
similarity index 98%
rename from kernel/sched_cpupri.c
rename to kernel/sched/cpupri.c
index a86cf9d..b0d798e 100644
--- a/kernel/sched_cpupri.c
+++ b/kernel/sched/cpupri.c
@@ -1,5 +1,5 @@
 /*
- *  kernel/sched_cpupri.c
+ *  kernel/sched/cpupri.c
  *
  *  CPU priority management
  *
@@ -28,7 +28,7 @@
  */
 
 #include <linux/gfp.h>
-#include "sched_cpupri.h"
+#include "cpupri.h"
 
 /* Convert between a 140 based task->prio, and our 102 based cpupri */
 static int convert_prio(int prio)
diff --git a/kernel/sched_cpupri.h b/kernel/sched/cpupri.h
similarity index 100%
rename from kernel/sched_cpupri.h
rename to kernel/sched/cpupri.h
diff --git a/kernel/sched_debug.c b/kernel/sched/debug.c
similarity index 98%
rename from kernel/sched_debug.c
rename to kernel/sched/debug.c
index a6710a1..2a075e1 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched/debug.c
@@ -1,5 +1,5 @@
 /*
- * kernel/time/sched_debug.c
+ * kernel/sched/debug.c
  *
  * Print the CFS rbtree
  *
@@ -16,6 +16,8 @@
 #include <linux/kallsyms.h>
 #include <linux/utsname.h>
 
+#include "sched.h"
+
 static DEFINE_SPINLOCK(sched_debug_lock);
 
 /*
@@ -373,7 +375,7 @@
 	return 0;
 }
 
-static void sysrq_sched_debug_show(void)
+void sysrq_sched_debug_show(void)
 {
 	sched_debug_show(NULL, NULL);
 }
diff --git a/kernel/sched_fair.c b/kernel/sched/fair.c
similarity index 86%
rename from kernel/sched_fair.c
rename to kernel/sched/fair.c
index a78ed27..8e42de9 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched/fair.c
@@ -23,6 +23,13 @@
 #include <linux/latencytop.h>
 #include <linux/sched.h>
 #include <linux/cpumask.h>
+#include <linux/slab.h>
+#include <linux/profile.h>
+#include <linux/interrupt.h>
+
+#include <trace/events/sched.h>
+
+#include "sched.h"
 
 /*
  * Targeted preemption latency for CPU-bound tasks:
@@ -103,7 +110,110 @@
 unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
 #endif
 
-static const struct sched_class fair_sched_class;
+/*
+ * Increase the granularity value when there are more CPUs,
+ * because with more CPUs the 'effective latency' as visible
+ * to users decreases. But the relationship is not linear,
+ * so pick a second-best guess by going with the log2 of the
+ * number of CPUs.
+ *
+ * This idea comes from the SD scheduler of Con Kolivas:
+ */
+static int get_update_sysctl_factor(void)
+{
+	unsigned int cpus = min_t(int, num_online_cpus(), 8);
+	unsigned int factor;
+
+	switch (sysctl_sched_tunable_scaling) {
+	case SCHED_TUNABLESCALING_NONE:
+		factor = 1;
+		break;
+	case SCHED_TUNABLESCALING_LINEAR:
+		factor = cpus;
+		break;
+	case SCHED_TUNABLESCALING_LOG:
+	default:
+		factor = 1 + ilog2(cpus);
+		break;
+	}
+
+	return factor;
+}
+
+static void update_sysctl(void)
+{
+	unsigned int factor = get_update_sysctl_factor();
+
+#define SET_SYSCTL(name) \
+	(sysctl_##name = (factor) * normalized_sysctl_##name)
+	SET_SYSCTL(sched_min_granularity);
+	SET_SYSCTL(sched_latency);
+	SET_SYSCTL(sched_wakeup_granularity);
+#undef SET_SYSCTL
+}
+
+void sched_init_granularity(void)
+{
+	update_sysctl();
+}
+
+#if BITS_PER_LONG == 32
+# define WMULT_CONST	(~0UL)
+#else
+# define WMULT_CONST	(1UL << 32)
+#endif
+
+#define WMULT_SHIFT	32
+
+/*
+ * Shift right and round:
+ */
+#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
+
+/*
+ * delta *= weight / lw
+ */
+static unsigned long
+calc_delta_mine(unsigned long delta_exec, unsigned long weight,
+		struct load_weight *lw)
+{
+	u64 tmp;
+
+	/*
+	 * weight can be less than 2^SCHED_LOAD_RESOLUTION for task group sched
+	 * entities since MIN_SHARES = 2. Treat weight as 1 if less than
+	 * 2^SCHED_LOAD_RESOLUTION.
+	 */
+	if (likely(weight > (1UL << SCHED_LOAD_RESOLUTION)))
+		tmp = (u64)delta_exec * scale_load_down(weight);
+	else
+		tmp = (u64)delta_exec;
+
+	if (!lw->inv_weight) {
+		unsigned long w = scale_load_down(lw->weight);
+
+		if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
+			lw->inv_weight = 1;
+		else if (unlikely(!w))
+			lw->inv_weight = WMULT_CONST;
+		else
+			lw->inv_weight = WMULT_CONST / w;
+	}
+
+	/*
+	 * Check whether we'd overflow the 64-bit multiplication:
+	 */
+	if (unlikely(tmp > WMULT_CONST))
+		tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
+			WMULT_SHIFT/2);
+	else
+		tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT);
+
+	return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
+}
+
+
+const struct sched_class fair_sched_class;
 
 /**************************************************************
  * CFS operations on generic schedulable entities:
@@ -413,7 +523,7 @@
 	rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
 }
 
-static struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
+struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
 {
 	struct rb_node *left = cfs_rq->rb_leftmost;
 
@@ -434,7 +544,7 @@
 }
 
 #ifdef CONFIG_SCHED_DEBUG
-static struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
+struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
 {
 	struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
 
@@ -684,7 +794,7 @@
 {
 	update_load_add(&cfs_rq->load, se->load.weight);
 	if (!parent_entity(se))
-		inc_cpu_load(rq_of(cfs_rq), se->load.weight);
+		update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
 	if (entity_is_task(se)) {
 		add_cfs_task_weight(cfs_rq, se->load.weight);
 		list_add(&se->group_node, &cfs_rq->tasks);
@@ -697,7 +807,7 @@
 {
 	update_load_sub(&cfs_rq->load, se->load.weight);
 	if (!parent_entity(se))
-		dec_cpu_load(rq_of(cfs_rq), se->load.weight);
+		update_load_sub(&rq_of(cfs_rq)->load, se->load.weight);
 	if (entity_is_task(se)) {
 		add_cfs_task_weight(cfs_rq, -se->load.weight);
 		list_del_init(&se->group_node);
@@ -893,7 +1003,6 @@
 		if (unlikely(delta > se->statistics.sleep_max))
 			se->statistics.sleep_max = delta;
 
-		se->statistics.sleep_start = 0;
 		se->statistics.sum_sleep_runtime += delta;
 
 		if (tsk) {
@@ -910,7 +1019,6 @@
 		if (unlikely(delta > se->statistics.block_max))
 			se->statistics.block_max = delta;
 
-		se->statistics.block_start = 0;
 		se->statistics.sum_sleep_runtime += delta;
 
 		if (tsk) {
@@ -920,6 +1028,8 @@
 				trace_sched_stat_iowait(tsk, delta);
 			}
 
+			trace_sched_stat_blocked(tsk, delta);
+
 			/*
 			 * Blocking time is in units of nanosecs, so shift by
 			 * 20 to get a milliseconds-range estimation of the
@@ -1287,6 +1397,32 @@
  */
 
 #ifdef CONFIG_CFS_BANDWIDTH
+
+#ifdef HAVE_JUMP_LABEL
+static struct jump_label_key __cfs_bandwidth_used;
+
+static inline bool cfs_bandwidth_used(void)
+{
+	return static_branch(&__cfs_bandwidth_used);
+}
+
+void account_cfs_bandwidth_used(int enabled, int was_enabled)
+{
+	/* only need to count groups transitioning between enabled/!enabled */
+	if (enabled && !was_enabled)
+		jump_label_inc(&__cfs_bandwidth_used);
+	else if (!enabled && was_enabled)
+		jump_label_dec(&__cfs_bandwidth_used);
+}
+#else /* HAVE_JUMP_LABEL */
+static bool cfs_bandwidth_used(void)
+{
+	return true;
+}
+
+void account_cfs_bandwidth_used(int enabled, int was_enabled) {}
+#endif /* HAVE_JUMP_LABEL */
+
 /*
  * default period for cfs group bandwidth.
  * default: 0.1s, units: nanoseconds
@@ -1308,7 +1444,7 @@
  *
  * requires cfs_b->lock
  */
-static void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
+void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
 {
 	u64 now;
 
@@ -1320,6 +1456,11 @@
 	cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period);
 }
 
+static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
+{
+	return &tg->cfs_bandwidth;
+}
+
 /* returns 0 on failure to allocate runtime */
 static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
 {
@@ -1421,7 +1562,7 @@
 static __always_inline void account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
 						   unsigned long delta_exec)
 {
-	if (!cfs_rq->runtime_enabled)
+	if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
 		return;
 
 	__account_cfs_rq_runtime(cfs_rq, delta_exec);
@@ -1429,13 +1570,13 @@
 
 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
 {
-	return cfs_rq->throttled;
+	return cfs_bandwidth_used() && cfs_rq->throttled;
 }
 
 /* check whether cfs_rq, or any parent, is throttled */
 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
 {
-	return cfs_rq->throttle_count;
+	return cfs_bandwidth_used() && cfs_rq->throttle_count;
 }
 
 /*
@@ -1530,7 +1671,7 @@
 	raw_spin_unlock(&cfs_b->lock);
 }
 
-static void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
+void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
 {
 	struct rq *rq = rq_of(cfs_rq);
 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
@@ -1756,6 +1897,9 @@
 
 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
 {
+	if (!cfs_bandwidth_used())
+		return;
+
 	if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
 		return;
 
@@ -1801,6 +1945,9 @@
  */
 static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
 {
+	if (!cfs_bandwidth_used())
+		return;
+
 	/* an active group must be handled by the update_curr()->put() path */
 	if (!cfs_rq->runtime_enabled || cfs_rq->curr)
 		return;
@@ -1818,6 +1965,9 @@
 /* conditionally throttle active cfs_rq's from put_prev_entity() */
 static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
 {
+	if (!cfs_bandwidth_used())
+		return;
+
 	if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
 		return;
 
@@ -1830,7 +1980,112 @@
 
 	throttle_cfs_rq(cfs_rq);
 }
-#else
+
+static inline u64 default_cfs_period(void);
+static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun);
+static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b);
+
+static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
+{
+	struct cfs_bandwidth *cfs_b =
+		container_of(timer, struct cfs_bandwidth, slack_timer);
+	do_sched_cfs_slack_timer(cfs_b);
+
+	return HRTIMER_NORESTART;
+}
+
+static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
+{
+	struct cfs_bandwidth *cfs_b =
+		container_of(timer, struct cfs_bandwidth, period_timer);
+	ktime_t now;
+	int overrun;
+	int idle = 0;
+
+	for (;;) {
+		now = hrtimer_cb_get_time(timer);
+		overrun = hrtimer_forward(timer, now, cfs_b->period);
+
+		if (!overrun)
+			break;
+
+		idle = do_sched_cfs_period_timer(cfs_b, overrun);
+	}
+
+	return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
+}
+
+void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
+{
+	raw_spin_lock_init(&cfs_b->lock);
+	cfs_b->runtime = 0;
+	cfs_b->quota = RUNTIME_INF;
+	cfs_b->period = ns_to_ktime(default_cfs_period());
+
+	INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
+	hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	cfs_b->period_timer.function = sched_cfs_period_timer;
+	hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	cfs_b->slack_timer.function = sched_cfs_slack_timer;
+}
+
+static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
+{
+	cfs_rq->runtime_enabled = 0;
+	INIT_LIST_HEAD(&cfs_rq->throttled_list);
+}
+
+/* requires cfs_b->lock, may release to reprogram timer */
+void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
+{
+	/*
+	 * The timer may be active because we're trying to set a new bandwidth
+	 * period or because we're racing with the tear-down path
+	 * (timer_active==0 becomes visible before the hrtimer call-back
+	 * terminates).  In either case we ensure that it's re-programmed
+	 */
+	while (unlikely(hrtimer_active(&cfs_b->period_timer))) {
+		raw_spin_unlock(&cfs_b->lock);
+		/* ensure cfs_b->lock is available while we wait */
+		hrtimer_cancel(&cfs_b->period_timer);
+
+		raw_spin_lock(&cfs_b->lock);
+		/* if someone else restarted the timer then we're done */
+		if (cfs_b->timer_active)
+			return;
+	}
+
+	cfs_b->timer_active = 1;
+	start_bandwidth_timer(&cfs_b->period_timer, cfs_b->period);
+}
+
+static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
+{
+	hrtimer_cancel(&cfs_b->period_timer);
+	hrtimer_cancel(&cfs_b->slack_timer);
+}
+
+void unthrottle_offline_cfs_rqs(struct rq *rq)
+{
+	struct cfs_rq *cfs_rq;
+
+	for_each_leaf_cfs_rq(rq, cfs_rq) {
+		struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
+
+		if (!cfs_rq->runtime_enabled)
+			continue;
+
+		/*
+		 * clock_task is not advancing so we just need to make sure
+		 * there's some valid quota amount
+		 */
+		cfs_rq->runtime_remaining = cfs_b->quota;
+		if (cfs_rq_throttled(cfs_rq))
+			unthrottle_cfs_rq(cfs_rq);
+	}
+}
+
+#else /* CONFIG_CFS_BANDWIDTH */
 static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
 				     unsigned long delta_exec) {}
 static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
@@ -1852,8 +2107,22 @@
 {
 	return 0;
 }
+
+void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
+
+#ifdef CONFIG_FAIR_GROUP_SCHED
+static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
 #endif
 
+static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
+{
+	return NULL;
+}
+static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
+void unthrottle_offline_cfs_rqs(struct rq *rq) {}
+
+#endif /* CONFIG_CFS_BANDWIDTH */
+
 /**************************************************
  * CFS operations on tasks:
  */
@@ -1866,7 +2135,7 @@
 
 	WARN_ON(task_rq(p) != rq);
 
-	if (hrtick_enabled(rq) && cfs_rq->nr_running > 1) {
+	if (cfs_rq->nr_running > 1) {
 		u64 slice = sched_slice(cfs_rq, se);
 		u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
 		s64 delta = slice - ran;
@@ -1897,7 +2166,7 @@
 {
 	struct task_struct *curr = rq->curr;
 
-	if (curr->sched_class != &fair_sched_class)
+	if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class)
 		return;
 
 	if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
@@ -2020,6 +2289,61 @@
 }
 
 #ifdef CONFIG_SMP
+/* Used instead of source_load when we know the type == 0 */
+static unsigned long weighted_cpuload(const int cpu)
+{
+	return cpu_rq(cpu)->load.weight;
+}
+
+/*
+ * Return a low guess at the load of a migration-source cpu weighted
+ * according to the scheduling class and "nice" value.
+ *
+ * We want to under-estimate the load of migration sources, to
+ * balance conservatively.
+ */
+static unsigned long source_load(int cpu, int type)
+{
+	struct rq *rq = cpu_rq(cpu);
+	unsigned long total = weighted_cpuload(cpu);
+
+	if (type == 0 || !sched_feat(LB_BIAS))
+		return total;
+
+	return min(rq->cpu_load[type-1], total);
+}
+
+/*
+ * Return a high guess at the load of a migration-target cpu weighted
+ * according to the scheduling class and "nice" value.
+ */
+static unsigned long target_load(int cpu, int type)
+{
+	struct rq *rq = cpu_rq(cpu);
+	unsigned long total = weighted_cpuload(cpu);
+
+	if (type == 0 || !sched_feat(LB_BIAS))
+		return total;
+
+	return max(rq->cpu_load[type-1], total);
+}
+
+static unsigned long power_of(int cpu)
+{
+	return cpu_rq(cpu)->cpu_power;
+}
+
+static unsigned long cpu_avg_load_per_task(int cpu)
+{
+	struct rq *rq = cpu_rq(cpu);
+	unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
+
+	if (nr_running)
+		return rq->load.weight / nr_running;
+
+	return 0;
+}
+
 
 static void task_waking_fair(struct task_struct *p)
 {
@@ -2327,7 +2651,7 @@
 	int prev_cpu = task_cpu(p);
 	struct sched_domain *sd;
 	struct sched_group *sg;
-	int i, smt = 0;
+	int i;
 
 	/*
 	 * If the task is going to be woken-up on this cpu and if it is
@@ -2347,19 +2671,9 @@
 	 * Otherwise, iterate the domains and find an elegible idle cpu.
 	 */
 	rcu_read_lock();
-again:
-	for_each_domain(target, sd) {
-		if (!smt && (sd->flags & SD_SHARE_CPUPOWER))
-			continue;
 
-		if (!(sd->flags & SD_SHARE_PKG_RESOURCES)) {
-			if (!smt) {
-				smt = 1;
-				goto again;
-			}
-			break;
-		}
-
+	sd = rcu_dereference(per_cpu(sd_llc, target));
+	for_each_lower_domain(sd) {
 		sg = sd->groups;
 		do {
 			if (!cpumask_intersects(sched_group_cpus(sg),
@@ -2406,6 +2720,9 @@
 	int want_sd = 1;
 	int sync = wake_flags & WF_SYNC;
 
+	if (p->rt.nr_cpus_allowed == 1)
+		return prev_cpu;
+
 	if (sd_flag & SD_BALANCE_WAKE) {
 		if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
 			want_affine = 1;
@@ -2690,7 +3007,8 @@
 	} while (cfs_rq);
 
 	p = task_of(se);
-	hrtick_start_fair(rq, p);
+	if (hrtick_enabled(rq))
+		hrtick_start_fair(rq, p);
 
 	return p;
 }
@@ -2734,6 +3052,12 @@
 		 * Update run-time statistics of the 'current'.
 		 */
 		update_curr(cfs_rq);
+		/*
+		 * Tell update_rq_clock() that we've just updated,
+		 * so we don't do microscopic update in schedule()
+		 * and double the fastpath cost.
+		 */
+		 rq->skip_clock_update = 1;
 	}
 
 	set_skip_buddy(se);
@@ -2774,12 +3098,48 @@
 }
 
 /*
+ * Is this task likely cache-hot:
+ */
+static int
+task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
+{
+	s64 delta;
+
+	if (p->sched_class != &fair_sched_class)
+		return 0;
+
+	if (unlikely(p->policy == SCHED_IDLE))
+		return 0;
+
+	/*
+	 * Buddy candidates are cache hot:
+	 */
+	if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
+			(&p->se == cfs_rq_of(&p->se)->next ||
+			 &p->se == cfs_rq_of(&p->se)->last))
+		return 1;
+
+	if (sysctl_sched_migration_cost == -1)
+		return 1;
+	if (sysctl_sched_migration_cost == 0)
+		return 0;
+
+	delta = now - p->se.exec_start;
+
+	return delta < (s64)sysctl_sched_migration_cost;
+}
+
+#define LBF_ALL_PINNED	0x01
+#define LBF_NEED_BREAK	0x02
+#define LBF_ABORT	0x04
+
+/*
  * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
  */
 static
 int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
 		     struct sched_domain *sd, enum cpu_idle_type idle,
-		     int *all_pinned)
+		     int *lb_flags)
 {
 	int tsk_cache_hot = 0;
 	/*
@@ -2792,7 +3152,7 @@
 		schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
 		return 0;
 	}
-	*all_pinned = 0;
+	*lb_flags &= ~LBF_ALL_PINNED;
 
 	if (task_running(rq, p)) {
 		schedstat_inc(p, se.statistics.nr_failed_migrations_running);
@@ -2866,7 +3226,7 @@
 static unsigned long
 balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
 	      unsigned long max_load_move, struct sched_domain *sd,
-	      enum cpu_idle_type idle, int *all_pinned,
+	      enum cpu_idle_type idle, int *lb_flags,
 	      struct cfs_rq *busiest_cfs_rq)
 {
 	int loops = 0, pulled = 0;
@@ -2877,12 +3237,14 @@
 		goto out;
 
 	list_for_each_entry_safe(p, n, &busiest_cfs_rq->tasks, se.group_node) {
-		if (loops++ > sysctl_sched_nr_migrate)
+		if (loops++ > sysctl_sched_nr_migrate) {
+			*lb_flags |= LBF_NEED_BREAK;
 			break;
+		}
 
 		if ((p->se.load.weight >> 1) > rem_load_move ||
 		    !can_migrate_task(p, busiest, this_cpu, sd, idle,
-				      all_pinned))
+				      lb_flags))
 			continue;
 
 		pull_task(busiest, p, this_rq, this_cpu);
@@ -2895,8 +3257,10 @@
 		 * kernels will stop after the first task is pulled to minimize
 		 * the critical section.
 		 */
-		if (idle == CPU_NEWLY_IDLE)
+		if (idle == CPU_NEWLY_IDLE) {
+			*lb_flags |= LBF_ABORT;
 			break;
+		}
 #endif
 
 		/*
@@ -3001,7 +3365,7 @@
 load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
 		  unsigned long max_load_move,
 		  struct sched_domain *sd, enum cpu_idle_type idle,
-		  int *all_pinned)
+		  int *lb_flags)
 {
 	long rem_load_move = max_load_move;
 	struct cfs_rq *busiest_cfs_rq;
@@ -3014,6 +3378,9 @@
 		unsigned long busiest_weight = busiest_cfs_rq->load.weight;
 		u64 rem_load, moved_load;
 
+		if (*lb_flags & (LBF_NEED_BREAK|LBF_ABORT))
+			break;
+
 		/*
 		 * empty group or part of a throttled hierarchy
 		 */
@@ -3025,7 +3392,7 @@
 		rem_load = div_u64(rem_load, busiest_h_load + 1);
 
 		moved_load = balance_tasks(this_rq, this_cpu, busiest,
-				rem_load, sd, idle, all_pinned,
+				rem_load, sd, idle, lb_flags,
 				busiest_cfs_rq);
 
 		if (!moved_load)
@@ -3051,10 +3418,10 @@
 load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
 		  unsigned long max_load_move,
 		  struct sched_domain *sd, enum cpu_idle_type idle,
-		  int *all_pinned)
+		  int *lb_flags)
 {
 	return balance_tasks(this_rq, this_cpu, busiest,
-			max_load_move, sd, idle, all_pinned,
+			max_load_move, sd, idle, lb_flags,
 			&busiest->cfs);
 }
 #endif
@@ -3069,29 +3436,30 @@
 static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
 		      unsigned long max_load_move,
 		      struct sched_domain *sd, enum cpu_idle_type idle,
-		      int *all_pinned)
+		      int *lb_flags)
 {
 	unsigned long total_load_moved = 0, load_moved;
 
 	do {
 		load_moved = load_balance_fair(this_rq, this_cpu, busiest,
 				max_load_move - total_load_moved,
-				sd, idle, all_pinned);
+				sd, idle, lb_flags);
 
 		total_load_moved += load_moved;
 
+		if (*lb_flags & (LBF_NEED_BREAK|LBF_ABORT))
+			break;
+
 #ifdef CONFIG_PREEMPT
 		/*
 		 * NEWIDLE balancing is a source of latency, so preemptible
 		 * kernels will stop after the first task is pulled to minimize
 		 * the critical section.
 		 */
-		if (idle == CPU_NEWLY_IDLE && this_rq->nr_running)
+		if (idle == CPU_NEWLY_IDLE && this_rq->nr_running) {
+			*lb_flags |= LBF_ABORT;
 			break;
-
-		if (raw_spin_is_contended(&this_rq->lock) ||
-				raw_spin_is_contended(&busiest->lock))
-			break;
+		}
 #endif
 	} while (load_moved && max_load_move > total_load_moved);
 
@@ -3153,15 +3521,6 @@
 };
 
 /**
- * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
- * @group: The group whose first cpu is to be returned.
- */
-static inline unsigned int group_first_cpu(struct sched_group *group)
-{
-	return cpumask_first(sched_group_cpus(group));
-}
-
-/**
  * get_sd_load_idx - Obtain the load index for a given sched domain.
  * @sd: The sched_domain whose load_idx is to be obtained.
  * @idle: The Idle status of the CPU for whose sd load_icx is obtained.
@@ -3410,7 +3769,7 @@
 	sdg->sgp->power = power;
 }
 
-static void update_group_power(struct sched_domain *sd, int cpu)
+void update_group_power(struct sched_domain *sd, int cpu)
 {
 	struct sched_domain *child = sd->child;
 	struct sched_group *group, *sdg = sd->groups;
@@ -3676,11 +4035,6 @@
 	} while (sg != sd->groups);
 }
 
-int __weak arch_sd_sibling_asym_packing(void)
-{
-       return 0*SD_ASYM_PACKING;
-}
-
 /**
  * check_asym_packing - Check to see if the group is packed into the
  *			sched doman.
@@ -4044,7 +4398,7 @@
 #define MAX_PINNED_INTERVAL	512
 
 /* Working cpumask for load_balance and load_balance_newidle. */
-static DEFINE_PER_CPU(cpumask_var_t, load_balance_tmpmask);
+DEFINE_PER_CPU(cpumask_var_t, load_balance_tmpmask);
 
 static int need_active_balance(struct sched_domain *sd, int idle,
 			       int busiest_cpu, int this_cpu)
@@ -4095,7 +4449,7 @@
 			struct sched_domain *sd, enum cpu_idle_type idle,
 			int *balance)
 {
-	int ld_moved, all_pinned = 0, active_balance = 0;
+	int ld_moved, lb_flags = 0, active_balance = 0;
 	struct sched_group *group;
 	unsigned long imbalance;
 	struct rq *busiest;
@@ -4136,11 +4490,11 @@
 		 * still unbalanced. ld_moved simply stays zero, so it is
 		 * correctly treated as an imbalance.
 		 */
-		all_pinned = 1;
+		lb_flags |= LBF_ALL_PINNED;
 		local_irq_save(flags);
 		double_rq_lock(this_rq, busiest);
 		ld_moved = move_tasks(this_rq, this_cpu, busiest,
-				      imbalance, sd, idle, &all_pinned);
+				      imbalance, sd, idle, &lb_flags);
 		double_rq_unlock(this_rq, busiest);
 		local_irq_restore(flags);
 
@@ -4150,8 +4504,16 @@
 		if (ld_moved && this_cpu != smp_processor_id())
 			resched_cpu(this_cpu);
 
+		if (lb_flags & LBF_ABORT)
+			goto out_balanced;
+
+		if (lb_flags & LBF_NEED_BREAK) {
+			lb_flags &= ~LBF_NEED_BREAK;
+			goto redo;
+		}
+
 		/* All tasks on this runqueue were pinned by CPU affinity */
-		if (unlikely(all_pinned)) {
+		if (unlikely(lb_flags & LBF_ALL_PINNED)) {
 			cpumask_clear_cpu(cpu_of(busiest), cpus);
 			if (!cpumask_empty(cpus))
 				goto redo;
@@ -4181,7 +4543,7 @@
 					tsk_cpus_allowed(busiest->curr))) {
 				raw_spin_unlock_irqrestore(&busiest->lock,
 							    flags);
-				all_pinned = 1;
+				lb_flags |= LBF_ALL_PINNED;
 				goto out_one_pinned;
 			}
 
@@ -4234,7 +4596,8 @@
 
 out_one_pinned:
 	/* tune up the balancing interval */
-	if ((all_pinned && sd->balance_interval < MAX_PINNED_INTERVAL) ||
+	if (((lb_flags & LBF_ALL_PINNED) &&
+			sd->balance_interval < MAX_PINNED_INTERVAL) ||
 			(sd->balance_interval < sd->max_interval))
 		sd->balance_interval *= 2;
 
@@ -4247,7 +4610,7 @@
  * idle_balance is called by schedule() if this_cpu is about to become
  * idle. Attempts to pull tasks from other CPUs.
  */
-static void idle_balance(int this_cpu, struct rq *this_rq)
+void idle_balance(int this_cpu, struct rq *this_rq)
 {
 	struct sched_domain *sd;
 	int pulled_task = 0;
@@ -4362,28 +4725,16 @@
 #ifdef CONFIG_NO_HZ
 /*
  * idle load balancing details
- * - One of the idle CPUs nominates itself as idle load_balancer, while
- *   entering idle.
- * - This idle load balancer CPU will also go into tickless mode when
- *   it is idle, just like all other idle CPUs
  * - When one of the busy CPUs notice that there may be an idle rebalancing
  *   needed, they will kick the idle load balancer, which then does idle
  *   load balancing for all the idle CPUs.
  */
 static struct {
-	atomic_t load_balancer;
-	atomic_t first_pick_cpu;
-	atomic_t second_pick_cpu;
 	cpumask_var_t idle_cpus_mask;
-	cpumask_var_t grp_idle_mask;
+	atomic_t nr_cpus;
 	unsigned long next_balance;     /* in jiffy units */
 } nohz ____cacheline_aligned;
 
-int get_nohz_load_balancer(void)
-{
-	return atomic_read(&nohz.load_balancer);
-}
-
 #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
 /**
  * lowest_flag_domain - Return lowest sched_domain containing flag.
@@ -4420,33 +4771,6 @@
 		(sd && (sd->flags & flag)); sd = sd->parent)
 
 /**
- * is_semi_idle_group - Checks if the given sched_group is semi-idle.
- * @ilb_group:	group to be checked for semi-idleness
- *
- * Returns:	1 if the group is semi-idle. 0 otherwise.
- *
- * We define a sched_group to be semi idle if it has atleast one idle-CPU
- * and atleast one non-idle CPU. This helper function checks if the given
- * sched_group is semi-idle or not.
- */
-static inline int is_semi_idle_group(struct sched_group *ilb_group)
-{
-	cpumask_and(nohz.grp_idle_mask, nohz.idle_cpus_mask,
-					sched_group_cpus(ilb_group));
-
-	/*
-	 * A sched_group is semi-idle when it has atleast one busy cpu
-	 * and atleast one idle cpu.
-	 */
-	if (cpumask_empty(nohz.grp_idle_mask))
-		return 0;
-
-	if (cpumask_equal(nohz.grp_idle_mask, sched_group_cpus(ilb_group)))
-		return 0;
-
-	return 1;
-}
-/**
  * find_new_ilb - Finds the optimum idle load balancer for nomination.
  * @cpu:	The cpu which is nominating a new idle_load_balancer.
  *
@@ -4460,9 +4784,9 @@
  */
 static int find_new_ilb(int cpu)
 {
+	int ilb = cpumask_first(nohz.idle_cpus_mask);
+	struct sched_group *ilbg;
 	struct sched_domain *sd;
-	struct sched_group *ilb_group;
-	int ilb = nr_cpu_ids;
 
 	/*
 	 * Have idle load balancer selection from semi-idle packages only
@@ -4480,23 +4804,28 @@
 
 	rcu_read_lock();
 	for_each_flag_domain(cpu, sd, SD_POWERSAVINGS_BALANCE) {
-		ilb_group = sd->groups;
+		ilbg = sd->groups;
 
 		do {
-			if (is_semi_idle_group(ilb_group)) {
-				ilb = cpumask_first(nohz.grp_idle_mask);
+			if (ilbg->group_weight !=
+				atomic_read(&ilbg->sgp->nr_busy_cpus)) {
+				ilb = cpumask_first_and(nohz.idle_cpus_mask,
+							sched_group_cpus(ilbg));
 				goto unlock;
 			}
 
-			ilb_group = ilb_group->next;
+			ilbg = ilbg->next;
 
-		} while (ilb_group != sd->groups);
+		} while (ilbg != sd->groups);
 	}
 unlock:
 	rcu_read_unlock();
 
 out_done:
-	return ilb;
+	if (ilb < nr_cpu_ids && idle_cpu(ilb))
+		return ilb;
+
+	return nr_cpu_ids;
 }
 #else /*  (CONFIG_SCHED_MC || CONFIG_SCHED_SMT) */
 static inline int find_new_ilb(int call_cpu)
@@ -4516,99 +4845,68 @@
 
 	nohz.next_balance++;
 
-	ilb_cpu = get_nohz_load_balancer();
+	ilb_cpu = find_new_ilb(cpu);
 
-	if (ilb_cpu >= nr_cpu_ids) {
-		ilb_cpu = cpumask_first(nohz.idle_cpus_mask);
-		if (ilb_cpu >= nr_cpu_ids)
-			return;
-	}
+	if (ilb_cpu >= nr_cpu_ids)
+		return;
 
-	if (!cpu_rq(ilb_cpu)->nohz_balance_kick) {
-		cpu_rq(ilb_cpu)->nohz_balance_kick = 1;
-
-		smp_mb();
-		/*
-		 * Use smp_send_reschedule() instead of resched_cpu().
-		 * This way we generate a sched IPI on the target cpu which
-		 * is idle. And the softirq performing nohz idle load balance
-		 * will be run before returning from the IPI.
-		 */
-		smp_send_reschedule(ilb_cpu);
-	}
+	if (test_and_set_bit(NOHZ_BALANCE_KICK, nohz_flags(ilb_cpu)))
+		return;
+	/*
+	 * Use smp_send_reschedule() instead of resched_cpu().
+	 * This way we generate a sched IPI on the target cpu which
+	 * is idle. And the softirq performing nohz idle load balance
+	 * will be run before returning from the IPI.
+	 */
+	smp_send_reschedule(ilb_cpu);
 	return;
 }
 
+static inline void set_cpu_sd_state_busy(void)
+{
+	struct sched_domain *sd;
+	int cpu = smp_processor_id();
+
+	if (!test_bit(NOHZ_IDLE, nohz_flags(cpu)))
+		return;
+	clear_bit(NOHZ_IDLE, nohz_flags(cpu));
+
+	rcu_read_lock();
+	for_each_domain(cpu, sd)
+		atomic_inc(&sd->groups->sgp->nr_busy_cpus);
+	rcu_read_unlock();
+}
+
+void set_cpu_sd_state_idle(void)
+{
+	struct sched_domain *sd;
+	int cpu = smp_processor_id();
+
+	if (test_bit(NOHZ_IDLE, nohz_flags(cpu)))
+		return;
+	set_bit(NOHZ_IDLE, nohz_flags(cpu));
+
+	rcu_read_lock();
+	for_each_domain(cpu, sd)
+		atomic_dec(&sd->groups->sgp->nr_busy_cpus);
+	rcu_read_unlock();
+}
+
 /*
- * This routine will try to nominate the ilb (idle load balancing)
- * owner among the cpus whose ticks are stopped. ilb owner will do the idle
- * load balancing on behalf of all those cpus.
- *
- * When the ilb owner becomes busy, we will not have new ilb owner until some
- * idle CPU wakes up and goes back to idle or some busy CPU tries to kick
- * idle load balancing by kicking one of the idle CPUs.
- *
- * Ticks are stopped for the ilb owner as well, with busy CPU kicking this
- * ilb owner CPU in future (when there is a need for idle load balancing on
- * behalf of all idle CPUs).
+ * This routine will record that this cpu is going idle with tick stopped.
+ * This info will be used in performing idle load balancing in the future.
  */
 void select_nohz_load_balancer(int stop_tick)
 {
 	int cpu = smp_processor_id();
 
 	if (stop_tick) {
-		if (!cpu_active(cpu)) {
-			if (atomic_read(&nohz.load_balancer) != cpu)
-				return;
-
-			/*
-			 * If we are going offline and still the leader,
-			 * give up!
-			 */
-			if (atomic_cmpxchg(&nohz.load_balancer, cpu,
-					   nr_cpu_ids) != cpu)
-				BUG();
-
+		if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))
 			return;
-		}
 
 		cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
-
-		if (atomic_read(&nohz.first_pick_cpu) == cpu)
-			atomic_cmpxchg(&nohz.first_pick_cpu, cpu, nr_cpu_ids);
-		if (atomic_read(&nohz.second_pick_cpu) == cpu)
-			atomic_cmpxchg(&nohz.second_pick_cpu, cpu, nr_cpu_ids);
-
-		if (atomic_read(&nohz.load_balancer) >= nr_cpu_ids) {
-			int new_ilb;
-
-			/* make me the ilb owner */
-			if (atomic_cmpxchg(&nohz.load_balancer, nr_cpu_ids,
-					   cpu) != nr_cpu_ids)
-				return;
-
-			/*
-			 * Check to see if there is a more power-efficient
-			 * ilb.
-			 */
-			new_ilb = find_new_ilb(cpu);
-			if (new_ilb < nr_cpu_ids && new_ilb != cpu) {
-				atomic_set(&nohz.load_balancer, nr_cpu_ids);
-				resched_cpu(new_ilb);
-				return;
-			}
-			return;
-		}
-	} else {
-		if (!cpumask_test_cpu(cpu, nohz.idle_cpus_mask))
-			return;
-
-		cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
-
-		if (atomic_read(&nohz.load_balancer) == cpu)
-			if (atomic_cmpxchg(&nohz.load_balancer, cpu,
-					   nr_cpu_ids) != cpu)
-				BUG();
+		atomic_inc(&nohz.nr_cpus);
+		set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
 	}
 	return;
 }
@@ -4622,7 +4920,7 @@
  * Scale the max load_balance interval with the number of CPUs in the system.
  * This trades load-balance latency on larger machines for less cross talk.
  */
-static void update_max_interval(void)
+void update_max_interval(void)
 {
 	max_load_balance_interval = HZ*num_online_cpus()/10;
 }
@@ -4714,11 +5012,12 @@
 	struct rq *rq;
 	int balance_cpu;
 
-	if (idle != CPU_IDLE || !this_rq->nohz_balance_kick)
-		return;
+	if (idle != CPU_IDLE ||
+	    !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)))
+		goto end;
 
 	for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
-		if (balance_cpu == this_cpu)
+		if (balance_cpu == this_cpu || !idle_cpu(balance_cpu))
 			continue;
 
 		/*
@@ -4726,10 +5025,8 @@
 		 * work being done for other cpus. Next load
 		 * balancing owner will pick it up.
 		 */
-		if (need_resched()) {
-			this_rq->nohz_balance_kick = 0;
+		if (need_resched())
 			break;
-		}
 
 		raw_spin_lock_irq(&this_rq->lock);
 		update_rq_clock(this_rq);
@@ -4743,53 +5040,75 @@
 			this_rq->next_balance = rq->next_balance;
 	}
 	nohz.next_balance = this_rq->next_balance;
-	this_rq->nohz_balance_kick = 0;
+end:
+	clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu));
 }
 
 /*
- * Current heuristic for kicking the idle load balancer
- * - first_pick_cpu is the one of the busy CPUs. It will kick
- *   idle load balancer when it has more than one process active. This
- *   eliminates the need for idle load balancing altogether when we have
- *   only one running process in the system (common case).
- * - If there are more than one busy CPU, idle load balancer may have
- *   to run for active_load_balance to happen (i.e., two busy CPUs are
- *   SMT or core siblings and can run better if they move to different
- *   physical CPUs). So, second_pick_cpu is the second of the busy CPUs
- *   which will kick idle load balancer as soon as it has any load.
+ * Current heuristic for kicking the idle load balancer in the presence
+ * of an idle cpu is the system.
+ *   - This rq has more than one task.
+ *   - At any scheduler domain level, this cpu's scheduler group has multiple
+ *     busy cpu's exceeding the group's power.
+ *   - For SD_ASYM_PACKING, if the lower numbered cpu's in the scheduler
+ *     domain span are idle.
  */
 static inline int nohz_kick_needed(struct rq *rq, int cpu)
 {
 	unsigned long now = jiffies;
-	int ret;
-	int first_pick_cpu, second_pick_cpu;
+	struct sched_domain *sd;
+
+	if (unlikely(idle_cpu(cpu)))
+		return 0;
+
+       /*
+	* We may be recently in ticked or tickless idle mode. At the first
+	* busy tick after returning from idle, we will update the busy stats.
+	*/
+	set_cpu_sd_state_busy();
+	if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
+		clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
+		cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
+		atomic_dec(&nohz.nr_cpus);
+	}
+
+	/*
+	 * None are in tickless mode and hence no need for NOHZ idle load
+	 * balancing.
+	 */
+	if (likely(!atomic_read(&nohz.nr_cpus)))
+		return 0;
 
 	if (time_before(now, nohz.next_balance))
 		return 0;
 
-	if (idle_cpu(cpu))
-		return 0;
+	if (rq->nr_running >= 2)
+		goto need_kick;
 
-	first_pick_cpu = atomic_read(&nohz.first_pick_cpu);
-	second_pick_cpu = atomic_read(&nohz.second_pick_cpu);
+	rcu_read_lock();
+	for_each_domain(cpu, sd) {
+		struct sched_group *sg = sd->groups;
+		struct sched_group_power *sgp = sg->sgp;
+		int nr_busy = atomic_read(&sgp->nr_busy_cpus);
 
-	if (first_pick_cpu < nr_cpu_ids && first_pick_cpu != cpu &&
-	    second_pick_cpu < nr_cpu_ids && second_pick_cpu != cpu)
-		return 0;
+		if (sd->flags & SD_SHARE_PKG_RESOURCES && nr_busy > 1)
+			goto need_kick_unlock;
 
-	ret = atomic_cmpxchg(&nohz.first_pick_cpu, nr_cpu_ids, cpu);
-	if (ret == nr_cpu_ids || ret == cpu) {
-		atomic_cmpxchg(&nohz.second_pick_cpu, cpu, nr_cpu_ids);
-		if (rq->nr_running > 1)
-			return 1;
-	} else {
-		ret = atomic_cmpxchg(&nohz.second_pick_cpu, nr_cpu_ids, cpu);
-		if (ret == nr_cpu_ids || ret == cpu) {
-			if (rq->nr_running)
-				return 1;
-		}
+		if (sd->flags & SD_ASYM_PACKING && nr_busy != sg->group_weight
+		    && (cpumask_first_and(nohz.idle_cpus_mask,
+					  sched_domain_span(sd)) < cpu))
+			goto need_kick_unlock;
+
+		if (!(sd->flags & (SD_SHARE_PKG_RESOURCES | SD_ASYM_PACKING)))
+			break;
 	}
+	rcu_read_unlock();
 	return 0;
+
+need_kick_unlock:
+	rcu_read_unlock();
+need_kick:
+	return 1;
 }
 #else
 static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
@@ -4824,14 +5143,14 @@
 /*
  * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
  */
-static inline void trigger_load_balance(struct rq *rq, int cpu)
+void trigger_load_balance(struct rq *rq, int cpu)
 {
 	/* Don't need to rebalance while attached to NULL domain */
 	if (time_after_eq(jiffies, rq->next_balance) &&
 	    likely(!on_null_domain(cpu)))
 		raise_softirq(SCHED_SOFTIRQ);
 #ifdef CONFIG_NO_HZ
-	else if (nohz_kick_needed(rq, cpu) && likely(!on_null_domain(cpu)))
+	if (nohz_kick_needed(rq, cpu) && likely(!on_null_domain(cpu)))
 		nohz_balancer_kick(cpu);
 #endif
 }
@@ -4846,15 +5165,6 @@
 	update_sysctl();
 }
 
-#else	/* CONFIG_SMP */
-
-/*
- * on UP we do not need to balance between CPUs:
- */
-static inline void idle_balance(int cpu, struct rq *rq)
-{
-}
-
 #endif /* CONFIG_SMP */
 
 /*
@@ -4878,8 +5188,8 @@
  */
 static void task_fork_fair(struct task_struct *p)
 {
-	struct cfs_rq *cfs_rq = task_cfs_rq(current);
-	struct sched_entity *se = &p->se, *curr = cfs_rq->curr;
+	struct cfs_rq *cfs_rq;
+	struct sched_entity *se = &p->se, *curr;
 	int this_cpu = smp_processor_id();
 	struct rq *rq = this_rq();
 	unsigned long flags;
@@ -4888,6 +5198,9 @@
 
 	update_rq_clock(rq);
 
+	cfs_rq = task_cfs_rq(current);
+	curr = cfs_rq->curr;
+
 	if (unlikely(task_cpu(p) != this_cpu)) {
 		rcu_read_lock();
 		__set_task_cpu(p, this_cpu);
@@ -4997,6 +5310,16 @@
 	}
 }
 
+void init_cfs_rq(struct cfs_rq *cfs_rq)
+{
+	cfs_rq->tasks_timeline = RB_ROOT;
+	INIT_LIST_HEAD(&cfs_rq->tasks);
+	cfs_rq->min_vruntime = (u64)(-(1LL << 20));
+#ifndef CONFIG_64BIT
+	cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
+#endif
+}
+
 #ifdef CONFIG_FAIR_GROUP_SCHED
 static void task_move_group_fair(struct task_struct *p, int on_rq)
 {
@@ -5013,13 +5336,182 @@
 	 * to another cgroup's rq. This does somewhat interfere with the
 	 * fair sleeper stuff for the first placement, but who cares.
 	 */
+	/*
+	 * When !on_rq, vruntime of the task has usually NOT been normalized.
+	 * But there are some cases where it has already been normalized:
+	 *
+	 * - Moving a forked child which is waiting for being woken up by
+	 *   wake_up_new_task().
+	 * - Moving a task which has been woken up by try_to_wake_up() and
+	 *   waiting for actually being woken up by sched_ttwu_pending().
+	 *
+	 * To prevent boost or penalty in the new cfs_rq caused by delta
+	 * min_vruntime between the two cfs_rqs, we skip vruntime adjustment.
+	 */
+	if (!on_rq && (!p->se.sum_exec_runtime || p->state == TASK_WAKING))
+		on_rq = 1;
+
 	if (!on_rq)
 		p->se.vruntime -= cfs_rq_of(&p->se)->min_vruntime;
 	set_task_rq(p, task_cpu(p));
 	if (!on_rq)
 		p->se.vruntime += cfs_rq_of(&p->se)->min_vruntime;
 }
+
+void free_fair_sched_group(struct task_group *tg)
+{
+	int i;
+
+	destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));
+
+	for_each_possible_cpu(i) {
+		if (tg->cfs_rq)
+			kfree(tg->cfs_rq[i]);
+		if (tg->se)
+			kfree(tg->se[i]);
+	}
+
+	kfree(tg->cfs_rq);
+	kfree(tg->se);
+}
+
+int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
+{
+	struct cfs_rq *cfs_rq;
+	struct sched_entity *se;
+	int i;
+
+	tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
+	if (!tg->cfs_rq)
+		goto err;
+	tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
+	if (!tg->se)
+		goto err;
+
+	tg->shares = NICE_0_LOAD;
+
+	init_cfs_bandwidth(tg_cfs_bandwidth(tg));
+
+	for_each_possible_cpu(i) {
+		cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
+				      GFP_KERNEL, cpu_to_node(i));
+		if (!cfs_rq)
+			goto err;
+
+		se = kzalloc_node(sizeof(struct sched_entity),
+				  GFP_KERNEL, cpu_to_node(i));
+		if (!se)
+			goto err_free_rq;
+
+		init_cfs_rq(cfs_rq);
+		init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
+	}
+
+	return 1;
+
+err_free_rq:
+	kfree(cfs_rq);
+err:
+	return 0;
+}
+
+void unregister_fair_sched_group(struct task_group *tg, int cpu)
+{
+	struct rq *rq = cpu_rq(cpu);
+	unsigned long flags;
+
+	/*
+	* Only empty task groups can be destroyed; so we can speculatively
+	* check on_list without danger of it being re-added.
+	*/
+	if (!tg->cfs_rq[cpu]->on_list)
+		return;
+
+	raw_spin_lock_irqsave(&rq->lock, flags);
+	list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
+	raw_spin_unlock_irqrestore(&rq->lock, flags);
+}
+
+void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
+			struct sched_entity *se, int cpu,
+			struct sched_entity *parent)
+{
+	struct rq *rq = cpu_rq(cpu);
+
+	cfs_rq->tg = tg;
+	cfs_rq->rq = rq;
+#ifdef CONFIG_SMP
+	/* allow initial update_cfs_load() to truncate */
+	cfs_rq->load_stamp = 1;
 #endif
+	init_cfs_rq_runtime(cfs_rq);
+
+	tg->cfs_rq[cpu] = cfs_rq;
+	tg->se[cpu] = se;
+
+	/* se could be NULL for root_task_group */
+	if (!se)
+		return;
+
+	if (!parent)
+		se->cfs_rq = &rq->cfs;
+	else
+		se->cfs_rq = parent->my_q;
+
+	se->my_q = cfs_rq;
+	update_load_set(&se->load, 0);
+	se->parent = parent;
+}
+
+static DEFINE_MUTEX(shares_mutex);
+
+int sched_group_set_shares(struct task_group *tg, unsigned long shares)
+{
+	int i;
+	unsigned long flags;
+
+	/*
+	 * We can't change the weight of the root cgroup.
+	 */
+	if (!tg->se[0])
+		return -EINVAL;
+
+	shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
+
+	mutex_lock(&shares_mutex);
+	if (tg->shares == shares)
+		goto done;
+
+	tg->shares = shares;
+	for_each_possible_cpu(i) {
+		struct rq *rq = cpu_rq(i);
+		struct sched_entity *se;
+
+		se = tg->se[i];
+		/* Propagate contribution to hierarchy */
+		raw_spin_lock_irqsave(&rq->lock, flags);
+		for_each_sched_entity(se)
+			update_cfs_shares(group_cfs_rq(se));
+		raw_spin_unlock_irqrestore(&rq->lock, flags);
+	}
+
+done:
+	mutex_unlock(&shares_mutex);
+	return 0;
+}
+#else /* CONFIG_FAIR_GROUP_SCHED */
+
+void free_fair_sched_group(struct task_group *tg) { }
+
+int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
+{
+	return 1;
+}
+
+void unregister_fair_sched_group(struct task_group *tg, int cpu) { }
+
+#endif /* CONFIG_FAIR_GROUP_SCHED */
+
 
 static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
 {
@@ -5039,7 +5531,7 @@
 /*
  * All the scheduling class methods:
  */
-static const struct sched_class fair_sched_class = {
+const struct sched_class fair_sched_class = {
 	.next			= &idle_sched_class,
 	.enqueue_task		= enqueue_task_fair,
 	.dequeue_task		= dequeue_task_fair,
@@ -5076,7 +5568,7 @@
 };
 
 #ifdef CONFIG_SCHED_DEBUG
-static void print_cfs_stats(struct seq_file *m, int cpu)
+void print_cfs_stats(struct seq_file *m, int cpu)
 {
 	struct cfs_rq *cfs_rq;
 
@@ -5086,3 +5578,15 @@
 	rcu_read_unlock();
 }
 #endif
+
+__init void init_sched_fair_class(void)
+{
+#ifdef CONFIG_SMP
+	open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
+
+#ifdef CONFIG_NO_HZ
+	zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
+#endif
+#endif /* SMP */
+
+}
diff --git a/kernel/sched_features.h b/kernel/sched/features.h
similarity index 74%
rename from kernel/sched_features.h
rename to kernel/sched/features.h
index 8480224..e61fd73 100644
--- a/kernel/sched_features.h
+++ b/kernel/sched/features.h
@@ -3,13 +3,13 @@
  * them to run sooner, but does not allow tons of sleepers to
  * rip the spread apart.
  */
-SCHED_FEAT(GENTLE_FAIR_SLEEPERS, 1)
+SCHED_FEAT(GENTLE_FAIR_SLEEPERS, true)
 
 /*
  * Place new tasks ahead so that they do not starve already running
  * tasks
  */
-SCHED_FEAT(START_DEBIT, 1)
+SCHED_FEAT(START_DEBIT, true)
 
 /*
  * Based on load and program behaviour, see if it makes sense to place
@@ -17,54 +17,54 @@
  * improve cache locality. Typically used with SYNC wakeups as
  * generated by pipes and the like, see also SYNC_WAKEUPS.
  */
-SCHED_FEAT(AFFINE_WAKEUPS, 1)
+SCHED_FEAT(AFFINE_WAKEUPS, true)
 
 /*
  * Prefer to schedule the task we woke last (assuming it failed
  * wakeup-preemption), since its likely going to consume data we
  * touched, increases cache locality.
  */
-SCHED_FEAT(NEXT_BUDDY, 0)
+SCHED_FEAT(NEXT_BUDDY, false)
 
 /*
  * Prefer to schedule the task that ran last (when we did
  * wake-preempt) as that likely will touch the same data, increases
  * cache locality.
  */
-SCHED_FEAT(LAST_BUDDY, 1)
+SCHED_FEAT(LAST_BUDDY, true)
 
 /*
  * Consider buddies to be cache hot, decreases the likelyness of a
  * cache buddy being migrated away, increases cache locality.
  */
-SCHED_FEAT(CACHE_HOT_BUDDY, 1)
+SCHED_FEAT(CACHE_HOT_BUDDY, true)
 
 /*
  * Use arch dependent cpu power functions
  */
-SCHED_FEAT(ARCH_POWER, 0)
+SCHED_FEAT(ARCH_POWER, false)
 
-SCHED_FEAT(HRTICK, 0)
-SCHED_FEAT(DOUBLE_TICK, 0)
-SCHED_FEAT(LB_BIAS, 1)
+SCHED_FEAT(HRTICK, false)
+SCHED_FEAT(DOUBLE_TICK, false)
+SCHED_FEAT(LB_BIAS, true)
 
 /*
  * Spin-wait on mutex acquisition when the mutex owner is running on
  * another cpu -- assumes that when the owner is running, it will soon
  * release the lock. Decreases scheduling overhead.
  */
-SCHED_FEAT(OWNER_SPIN, 1)
+SCHED_FEAT(OWNER_SPIN, true)
 
 /*
  * Decrement CPU power based on time not spent running tasks
  */
-SCHED_FEAT(NONTASK_POWER, 1)
+SCHED_FEAT(NONTASK_POWER, true)
 
 /*
  * Queue remote wakeups on the target CPU and process them
  * using the scheduler IPI. Reduces rq->lock contention/bounces.
  */
-SCHED_FEAT(TTWU_QUEUE, 1)
+SCHED_FEAT(TTWU_QUEUE, true)
 
-SCHED_FEAT(FORCE_SD_OVERLAP, 0)
-SCHED_FEAT(RT_RUNTIME_SHARE, 1)
+SCHED_FEAT(FORCE_SD_OVERLAP, false)
+SCHED_FEAT(RT_RUNTIME_SHARE, true)
diff --git a/kernel/sched_idletask.c b/kernel/sched/idle_task.c
similarity index 96%
rename from kernel/sched_idletask.c
rename to kernel/sched/idle_task.c
index 0a51882..91b4c95 100644
--- a/kernel/sched_idletask.c
+++ b/kernel/sched/idle_task.c
@@ -1,3 +1,5 @@
+#include "sched.h"
+
 /*
  * idle-task scheduling class.
  *
@@ -71,7 +73,7 @@
 /*
  * Simple, special scheduling class for the per-CPU idle tasks:
  */
-static const struct sched_class idle_sched_class = {
+const struct sched_class idle_sched_class = {
 	/* .next is NULL */
 	/* no enqueue/yield_task for idle tasks */
 
diff --git a/kernel/sched_rt.c b/kernel/sched/rt.c
similarity index 89%
rename from kernel/sched_rt.c
rename to kernel/sched/rt.c
index 583a136..3640ebb 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched/rt.c
@@ -3,7 +3,92 @@
  * policies)
  */
 
+#include "sched.h"
+
+#include <linux/slab.h>
+
+static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
+
+struct rt_bandwidth def_rt_bandwidth;
+
+static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
+{
+	struct rt_bandwidth *rt_b =
+		container_of(timer, struct rt_bandwidth, rt_period_timer);
+	ktime_t now;
+	int overrun;
+	int idle = 0;
+
+	for (;;) {
+		now = hrtimer_cb_get_time(timer);
+		overrun = hrtimer_forward(timer, now, rt_b->rt_period);
+
+		if (!overrun)
+			break;
+
+		idle = do_sched_rt_period_timer(rt_b, overrun);
+	}
+
+	return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
+}
+
+void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
+{
+	rt_b->rt_period = ns_to_ktime(period);
+	rt_b->rt_runtime = runtime;
+
+	raw_spin_lock_init(&rt_b->rt_runtime_lock);
+
+	hrtimer_init(&rt_b->rt_period_timer,
+			CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	rt_b->rt_period_timer.function = sched_rt_period_timer;
+}
+
+static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
+{
+	if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
+		return;
+
+	if (hrtimer_active(&rt_b->rt_period_timer))
+		return;
+
+	raw_spin_lock(&rt_b->rt_runtime_lock);
+	start_bandwidth_timer(&rt_b->rt_period_timer, rt_b->rt_period);
+	raw_spin_unlock(&rt_b->rt_runtime_lock);
+}
+
+void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
+{
+	struct rt_prio_array *array;
+	int i;
+
+	array = &rt_rq->active;
+	for (i = 0; i < MAX_RT_PRIO; i++) {
+		INIT_LIST_HEAD(array->queue + i);
+		__clear_bit(i, array->bitmap);
+	}
+	/* delimiter for bitsearch: */
+	__set_bit(MAX_RT_PRIO, array->bitmap);
+
+#if defined CONFIG_SMP
+	rt_rq->highest_prio.curr = MAX_RT_PRIO;
+	rt_rq->highest_prio.next = MAX_RT_PRIO;
+	rt_rq->rt_nr_migratory = 0;
+	rt_rq->overloaded = 0;
+	plist_head_init(&rt_rq->pushable_tasks);
+#endif
+
+	rt_rq->rt_time = 0;
+	rt_rq->rt_throttled = 0;
+	rt_rq->rt_runtime = 0;
+	raw_spin_lock_init(&rt_rq->rt_runtime_lock);
+}
+
 #ifdef CONFIG_RT_GROUP_SCHED
+static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
+{
+	hrtimer_cancel(&rt_b->rt_period_timer);
+}
 
 #define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
 
@@ -25,6 +110,91 @@
 	return rt_se->rt_rq;
 }
 
+void free_rt_sched_group(struct task_group *tg)
+{
+	int i;
+
+	if (tg->rt_se)
+		destroy_rt_bandwidth(&tg->rt_bandwidth);
+
+	for_each_possible_cpu(i) {
+		if (tg->rt_rq)
+			kfree(tg->rt_rq[i]);
+		if (tg->rt_se)
+			kfree(tg->rt_se[i]);
+	}
+
+	kfree(tg->rt_rq);
+	kfree(tg->rt_se);
+}
+
+void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
+		struct sched_rt_entity *rt_se, int cpu,
+		struct sched_rt_entity *parent)
+{
+	struct rq *rq = cpu_rq(cpu);
+
+	rt_rq->highest_prio.curr = MAX_RT_PRIO;
+	rt_rq->rt_nr_boosted = 0;
+	rt_rq->rq = rq;
+	rt_rq->tg = tg;
+
+	tg->rt_rq[cpu] = rt_rq;
+	tg->rt_se[cpu] = rt_se;
+
+	if (!rt_se)
+		return;
+
+	if (!parent)
+		rt_se->rt_rq = &rq->rt;
+	else
+		rt_se->rt_rq = parent->my_q;
+
+	rt_se->my_q = rt_rq;
+	rt_se->parent = parent;
+	INIT_LIST_HEAD(&rt_se->run_list);
+}
+
+int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
+{
+	struct rt_rq *rt_rq;
+	struct sched_rt_entity *rt_se;
+	int i;
+
+	tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
+	if (!tg->rt_rq)
+		goto err;
+	tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
+	if (!tg->rt_se)
+		goto err;
+
+	init_rt_bandwidth(&tg->rt_bandwidth,
+			ktime_to_ns(def_rt_bandwidth.rt_period), 0);
+
+	for_each_possible_cpu(i) {
+		rt_rq = kzalloc_node(sizeof(struct rt_rq),
+				     GFP_KERNEL, cpu_to_node(i));
+		if (!rt_rq)
+			goto err;
+
+		rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
+				     GFP_KERNEL, cpu_to_node(i));
+		if (!rt_se)
+			goto err_free_rq;
+
+		init_rt_rq(rt_rq, cpu_rq(i));
+		rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
+		init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
+	}
+
+	return 1;
+
+err_free_rq:
+	kfree(rt_rq);
+err:
+	return 0;
+}
+
 #else /* CONFIG_RT_GROUP_SCHED */
 
 #define rt_entity_is_task(rt_se) (1)
@@ -47,6 +217,12 @@
 	return &rq->rt;
 }
 
+void free_rt_sched_group(struct task_group *tg) { }
+
+int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
+{
+	return 1;
+}
 #endif /* CONFIG_RT_GROUP_SCHED */
 
 #ifdef CONFIG_SMP
@@ -556,6 +732,28 @@
 	raw_spin_unlock_irqrestore(&rq->lock, flags);
 }
 
+int update_runtime(struct notifier_block *nfb, unsigned long action, void *hcpu)
+{
+	int cpu = (int)(long)hcpu;
+
+	switch (action) {
+	case CPU_DOWN_PREPARE:
+	case CPU_DOWN_PREPARE_FROZEN:
+		disable_runtime(cpu_rq(cpu));
+		return NOTIFY_OK;
+
+	case CPU_DOWN_FAILED:
+	case CPU_DOWN_FAILED_FROZEN:
+	case CPU_ONLINE:
+	case CPU_ONLINE_FROZEN:
+		enable_runtime(cpu_rq(cpu));
+		return NOTIFY_OK;
+
+	default:
+		return NOTIFY_DONE;
+	}
+}
+
 static int balance_runtime(struct rt_rq *rt_rq)
 {
 	int more = 0;
@@ -648,7 +846,7 @@
 	if (rt_rq->rt_throttled)
 		return rt_rq_throttled(rt_rq);
 
-	if (sched_rt_runtime(rt_rq) >= sched_rt_period(rt_rq))
+	if (runtime >= sched_rt_period(rt_rq))
 		return 0;
 
 	balance_runtime(rt_rq);
@@ -957,8 +1155,8 @@
 }
 
 /*
- * Put task to the end of the run list without the overhead of dequeue
- * followed by enqueue.
+ * Put task to the head or the end of the run list without the overhead of
+ * dequeue followed by enqueue.
  */
 static void
 requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
@@ -1002,6 +1200,9 @@
 
 	cpu = task_cpu(p);
 
+	if (p->rt.nr_cpus_allowed == 1)
+		goto out;
+
 	/* For anything but wake ups, just return the task_cpu */
 	if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
 		goto out;
@@ -1178,8 +1379,6 @@
 /* Only try algorithms three times */
 #define RT_MAX_TRIES 3
 
-static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
-
 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
 {
 	if (!task_running(rq, p) &&
@@ -1653,13 +1852,14 @@
 		pull_rt_task(rq);
 }
 
-static inline void init_sched_rt_class(void)
+void init_sched_rt_class(void)
 {
 	unsigned int i;
 
-	for_each_possible_cpu(i)
+	for_each_possible_cpu(i) {
 		zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
 					GFP_KERNEL, cpu_to_node(i));
+	}
 }
 #endif /* CONFIG_SMP */
 
@@ -1800,7 +2000,7 @@
 		return 0;
 }
 
-static const struct sched_class rt_sched_class = {
+const struct sched_class rt_sched_class = {
 	.next			= &fair_sched_class,
 	.enqueue_task		= enqueue_task_rt,
 	.dequeue_task		= dequeue_task_rt,
@@ -1835,7 +2035,7 @@
 #ifdef CONFIG_SCHED_DEBUG
 extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
 
-static void print_rt_stats(struct seq_file *m, int cpu)
+void print_rt_stats(struct seq_file *m, int cpu)
 {
 	rt_rq_iter_t iter;
 	struct rt_rq *rt_rq;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
new file mode 100644
index 0000000..98c0c26
--- /dev/null
+++ b/kernel/sched/sched.h
@@ -0,0 +1,1166 @@
+
+#include <linux/sched.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/stop_machine.h>
+
+#include "cpupri.h"
+
+extern __read_mostly int scheduler_running;
+
+/*
+ * Convert user-nice values [ -20 ... 0 ... 19 ]
+ * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
+ * and back.
+ */
+#define NICE_TO_PRIO(nice)	(MAX_RT_PRIO + (nice) + 20)
+#define PRIO_TO_NICE(prio)	((prio) - MAX_RT_PRIO - 20)
+#define TASK_NICE(p)		PRIO_TO_NICE((p)->static_prio)
+
+/*
+ * 'User priority' is the nice value converted to something we
+ * can work with better when scaling various scheduler parameters,
+ * it's a [ 0 ... 39 ] range.
+ */
+#define USER_PRIO(p)		((p)-MAX_RT_PRIO)
+#define TASK_USER_PRIO(p)	USER_PRIO((p)->static_prio)
+#define MAX_USER_PRIO		(USER_PRIO(MAX_PRIO))
+
+/*
+ * Helpers for converting nanosecond timing to jiffy resolution
+ */
+#define NS_TO_JIFFIES(TIME)	((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
+
+#define NICE_0_LOAD		SCHED_LOAD_SCALE
+#define NICE_0_SHIFT		SCHED_LOAD_SHIFT
+
+/*
+ * These are the 'tuning knobs' of the scheduler:
+ *
+ * default timeslice is 100 msecs (used only for SCHED_RR tasks).
+ * Timeslices get refilled after they expire.
+ */
+#define DEF_TIMESLICE		(100 * HZ / 1000)
+
+/*
+ * single value that denotes runtime == period, ie unlimited time.
+ */
+#define RUNTIME_INF	((u64)~0ULL)
+
+static inline int rt_policy(int policy)
+{
+	if (policy == SCHED_FIFO || policy == SCHED_RR)
+		return 1;
+	return 0;
+}
+
+static inline int task_has_rt_policy(struct task_struct *p)
+{
+	return rt_policy(p->policy);
+}
+
+/*
+ * This is the priority-queue data structure of the RT scheduling class:
+ */
+struct rt_prio_array {
+	DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
+	struct list_head queue[MAX_RT_PRIO];
+};
+
+struct rt_bandwidth {
+	/* nests inside the rq lock: */
+	raw_spinlock_t		rt_runtime_lock;
+	ktime_t			rt_period;
+	u64			rt_runtime;
+	struct hrtimer		rt_period_timer;
+};
+
+extern struct mutex sched_domains_mutex;
+
+#ifdef CONFIG_CGROUP_SCHED
+
+#include <linux/cgroup.h>
+
+struct cfs_rq;
+struct rt_rq;
+
+static LIST_HEAD(task_groups);
+
+struct cfs_bandwidth {
+#ifdef CONFIG_CFS_BANDWIDTH
+	raw_spinlock_t lock;
+	ktime_t period;
+	u64 quota, runtime;
+	s64 hierarchal_quota;
+	u64 runtime_expires;
+
+	int idle, timer_active;
+	struct hrtimer period_timer, slack_timer;
+	struct list_head throttled_cfs_rq;
+
+	/* statistics */
+	int nr_periods, nr_throttled;
+	u64 throttled_time;
+#endif
+};
+
+/* task group related information */
+struct task_group {
+	struct cgroup_subsys_state css;
+
+#ifdef CONFIG_FAIR_GROUP_SCHED
+	/* schedulable entities of this group on each cpu */
+	struct sched_entity **se;
+	/* runqueue "owned" by this group on each cpu */
+	struct cfs_rq **cfs_rq;
+	unsigned long shares;
+
+	atomic_t load_weight;
+#endif
+
+#ifdef CONFIG_RT_GROUP_SCHED
+	struct sched_rt_entity **rt_se;
+	struct rt_rq **rt_rq;
+
+	struct rt_bandwidth rt_bandwidth;
+#endif
+
+	struct rcu_head rcu;
+	struct list_head list;
+
+	struct task_group *parent;
+	struct list_head siblings;
+	struct list_head children;
+
+#ifdef CONFIG_SCHED_AUTOGROUP
+	struct autogroup *autogroup;
+#endif
+
+	struct cfs_bandwidth cfs_bandwidth;
+};
+
+#ifdef CONFIG_FAIR_GROUP_SCHED
+#define ROOT_TASK_GROUP_LOAD	NICE_0_LOAD
+
+/*
+ * A weight of 0 or 1 can cause arithmetics problems.
+ * A weight of a cfs_rq is the sum of weights of which entities
+ * are queued on this cfs_rq, so a weight of a entity should not be
+ * too large, so as the shares value of a task group.
+ * (The default weight is 1024 - so there's no practical
+ *  limitation from this.)
+ */
+#define MIN_SHARES	(1UL <<  1)
+#define MAX_SHARES	(1UL << 18)
+#endif
+
+/* Default task group.
+ *	Every task in system belong to this group at bootup.
+ */
+extern struct task_group root_task_group;
+
+typedef int (*tg_visitor)(struct task_group *, void *);
+
+extern int walk_tg_tree_from(struct task_group *from,
+			     tg_visitor down, tg_visitor up, void *data);
+
+/*
+ * Iterate the full tree, calling @down when first entering a node and @up when
+ * leaving it for the final time.
+ *
+ * Caller must hold rcu_lock or sufficient equivalent.
+ */
+static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
+{
+	return walk_tg_tree_from(&root_task_group, down, up, data);
+}
+
+extern int tg_nop(struct task_group *tg, void *data);
+
+extern void free_fair_sched_group(struct task_group *tg);
+extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent);
+extern void unregister_fair_sched_group(struct task_group *tg, int cpu);
+extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
+			struct sched_entity *se, int cpu,
+			struct sched_entity *parent);
+extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
+extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
+
+extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b);
+extern void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
+extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
+
+extern void free_rt_sched_group(struct task_group *tg);
+extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent);
+extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
+		struct sched_rt_entity *rt_se, int cpu,
+		struct sched_rt_entity *parent);
+
+#else /* CONFIG_CGROUP_SCHED */
+
+struct cfs_bandwidth { };
+
+#endif	/* CONFIG_CGROUP_SCHED */
+
+/* CFS-related fields in a runqueue */
+struct cfs_rq {
+	struct load_weight load;
+	unsigned long nr_running, h_nr_running;
+
+	u64 exec_clock;
+	u64 min_vruntime;
+#ifndef CONFIG_64BIT
+	u64 min_vruntime_copy;
+#endif
+
+	struct rb_root tasks_timeline;
+	struct rb_node *rb_leftmost;
+
+	struct list_head tasks;
+	struct list_head *balance_iterator;
+
+	/*
+	 * 'curr' points to currently running entity on this cfs_rq.
+	 * It is set to NULL otherwise (i.e when none are currently running).
+	 */
+	struct sched_entity *curr, *next, *last, *skip;
+
+#ifdef	CONFIG_SCHED_DEBUG
+	unsigned int nr_spread_over;
+#endif
+
+#ifdef CONFIG_FAIR_GROUP_SCHED
+	struct rq *rq;	/* cpu runqueue to which this cfs_rq is attached */
+
+	/*
+	 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
+	 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
+	 * (like users, containers etc.)
+	 *
+	 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
+	 * list is used during load balance.
+	 */
+	int on_list;
+	struct list_head leaf_cfs_rq_list;
+	struct task_group *tg;	/* group that "owns" this runqueue */
+
+#ifdef CONFIG_SMP
+	/*
+	 * the part of load.weight contributed by tasks
+	 */
+	unsigned long task_weight;
+
+	/*
+	 *   h_load = weight * f(tg)
+	 *
+	 * Where f(tg) is the recursive weight fraction assigned to
+	 * this group.
+	 */
+	unsigned long h_load;
+
+	/*
+	 * Maintaining per-cpu shares distribution for group scheduling
+	 *
+	 * load_stamp is the last time we updated the load average
+	 * load_last is the last time we updated the load average and saw load
+	 * load_unacc_exec_time is currently unaccounted execution time
+	 */
+	u64 load_avg;
+	u64 load_period;
+	u64 load_stamp, load_last, load_unacc_exec_time;
+
+	unsigned long load_contribution;
+#endif /* CONFIG_SMP */
+#ifdef CONFIG_CFS_BANDWIDTH
+	int runtime_enabled;
+	u64 runtime_expires;
+	s64 runtime_remaining;
+
+	u64 throttled_timestamp;
+	int throttled, throttle_count;
+	struct list_head throttled_list;
+#endif /* CONFIG_CFS_BANDWIDTH */
+#endif /* CONFIG_FAIR_GROUP_SCHED */
+};
+
+static inline int rt_bandwidth_enabled(void)
+{
+	return sysctl_sched_rt_runtime >= 0;
+}
+
+/* Real-Time classes' related field in a runqueue: */
+struct rt_rq {
+	struct rt_prio_array active;
+	unsigned long rt_nr_running;
+#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
+	struct {
+		int curr; /* highest queued rt task prio */
+#ifdef CONFIG_SMP
+		int next; /* next highest */
+#endif
+	} highest_prio;
+#endif
+#ifdef CONFIG_SMP
+	unsigned long rt_nr_migratory;
+	unsigned long rt_nr_total;
+	int overloaded;
+	struct plist_head pushable_tasks;
+#endif
+	int rt_throttled;
+	u64 rt_time;
+	u64 rt_runtime;
+	/* Nests inside the rq lock: */
+	raw_spinlock_t rt_runtime_lock;
+
+#ifdef CONFIG_RT_GROUP_SCHED
+	unsigned long rt_nr_boosted;
+
+	struct rq *rq;
+	struct list_head leaf_rt_rq_list;
+	struct task_group *tg;
+#endif
+};
+
+#ifdef CONFIG_SMP
+
+/*
+ * We add the notion of a root-domain which will be used to define per-domain
+ * variables. Each exclusive cpuset essentially defines an island domain by
+ * fully partitioning the member cpus from any other cpuset. Whenever a new
+ * exclusive cpuset is created, we also create and attach a new root-domain
+ * object.
+ *
+ */
+struct root_domain {
+	atomic_t refcount;
+	atomic_t rto_count;
+	struct rcu_head rcu;
+	cpumask_var_t span;
+	cpumask_var_t online;
+
+	/*
+	 * The "RT overload" flag: it gets set if a CPU has more than
+	 * one runnable RT task.
+	 */
+	cpumask_var_t rto_mask;
+	struct cpupri cpupri;
+};
+
+extern struct root_domain def_root_domain;
+
+#endif /* CONFIG_SMP */
+
+/*
+ * This is the main, per-CPU runqueue data structure.
+ *
+ * Locking rule: those places that want to lock multiple runqueues
+ * (such as the load balancing or the thread migration code), lock
+ * acquire operations must be ordered by ascending &runqueue.
+ */
+struct rq {
+	/* runqueue lock: */
+	raw_spinlock_t lock;
+
+	/*
+	 * nr_running and cpu_load should be in the same cacheline because
+	 * remote CPUs use both these fields when doing load calculation.
+	 */
+	unsigned long nr_running;
+	#define CPU_LOAD_IDX_MAX 5
+	unsigned long cpu_load[CPU_LOAD_IDX_MAX];
+	unsigned long last_load_update_tick;
+#ifdef CONFIG_NO_HZ
+	u64 nohz_stamp;
+	unsigned long nohz_flags;
+#endif
+	int skip_clock_update;
+
+	/* capture load from *all* tasks on this cpu: */
+	struct load_weight load;
+	unsigned long nr_load_updates;
+	u64 nr_switches;
+
+	struct cfs_rq cfs;
+	struct rt_rq rt;
+
+#ifdef CONFIG_FAIR_GROUP_SCHED
+	/* list of leaf cfs_rq on this cpu: */
+	struct list_head leaf_cfs_rq_list;
+#endif
+#ifdef CONFIG_RT_GROUP_SCHED
+	struct list_head leaf_rt_rq_list;
+#endif
+
+	/*
+	 * This is part of a global counter where only the total sum
+	 * over all CPUs matters. A task can increase this counter on
+	 * one CPU and if it got migrated afterwards it may decrease
+	 * it on another CPU. Always updated under the runqueue lock:
+	 */
+	unsigned long nr_uninterruptible;
+
+	struct task_struct *curr, *idle, *stop;
+	unsigned long next_balance;
+	struct mm_struct *prev_mm;
+
+	u64 clock;
+	u64 clock_task;
+
+	atomic_t nr_iowait;
+
+#ifdef CONFIG_SMP
+	struct root_domain *rd;
+	struct sched_domain *sd;
+
+	unsigned long cpu_power;
+
+	unsigned char idle_balance;
+	/* For active balancing */
+	int post_schedule;
+	int active_balance;
+	int push_cpu;
+	struct cpu_stop_work active_balance_work;
+	/* cpu of this runqueue: */
+	int cpu;
+	int online;
+
+	u64 rt_avg;
+	u64 age_stamp;
+	u64 idle_stamp;
+	u64 avg_idle;
+#endif
+
+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
+	u64 prev_irq_time;
+#endif
+#ifdef CONFIG_PARAVIRT
+	u64 prev_steal_time;
+#endif
+#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
+	u64 prev_steal_time_rq;
+#endif
+
+	/* calc_load related fields */
+	unsigned long calc_load_update;
+	long calc_load_active;
+
+#ifdef CONFIG_SCHED_HRTICK
+#ifdef CONFIG_SMP
+	int hrtick_csd_pending;
+	struct call_single_data hrtick_csd;
+#endif
+	struct hrtimer hrtick_timer;
+#endif
+
+#ifdef CONFIG_SCHEDSTATS
+	/* latency stats */
+	struct sched_info rq_sched_info;
+	unsigned long long rq_cpu_time;
+	/* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
+
+	/* sys_sched_yield() stats */
+	unsigned int yld_count;
+
+	/* schedule() stats */
+	unsigned int sched_switch;
+	unsigned int sched_count;
+	unsigned int sched_goidle;
+
+	/* try_to_wake_up() stats */
+	unsigned int ttwu_count;
+	unsigned int ttwu_local;
+#endif
+
+#ifdef CONFIG_SMP
+	struct llist_head wake_list;
+#endif
+};
+
+static inline int cpu_of(struct rq *rq)
+{
+#ifdef CONFIG_SMP
+	return rq->cpu;
+#else
+	return 0;
+#endif
+}
+
+DECLARE_PER_CPU(struct rq, runqueues);
+
+#define cpu_rq(cpu)		(&per_cpu(runqueues, (cpu)))
+#define this_rq()		(&__get_cpu_var(runqueues))
+#define task_rq(p)		cpu_rq(task_cpu(p))
+#define cpu_curr(cpu)		(cpu_rq(cpu)->curr)
+#define raw_rq()		(&__raw_get_cpu_var(runqueues))
+
+#ifdef CONFIG_SMP
+
+#define rcu_dereference_check_sched_domain(p) \
+	rcu_dereference_check((p), \
+			      lockdep_is_held(&sched_domains_mutex))
+
+/*
+ * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
+ * See detach_destroy_domains: synchronize_sched for details.
+ *
+ * The domain tree of any CPU may only be accessed from within
+ * preempt-disabled sections.
+ */
+#define for_each_domain(cpu, __sd) \
+	for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \
+			__sd; __sd = __sd->parent)
+
+#define for_each_lower_domain(sd) for (; sd; sd = sd->child)
+
+/**
+ * highest_flag_domain - Return highest sched_domain containing flag.
+ * @cpu:	The cpu whose highest level of sched domain is to
+ *		be returned.
+ * @flag:	The flag to check for the highest sched_domain
+ *		for the given cpu.
+ *
+ * Returns the highest sched_domain of a cpu which contains the given flag.
+ */
+static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
+{
+	struct sched_domain *sd, *hsd = NULL;
+
+	for_each_domain(cpu, sd) {
+		if (!(sd->flags & flag))
+			break;
+		hsd = sd;
+	}
+
+	return hsd;
+}
+
+DECLARE_PER_CPU(struct sched_domain *, sd_llc);
+DECLARE_PER_CPU(int, sd_llc_id);
+
+#endif /* CONFIG_SMP */
+
+#include "stats.h"
+#include "auto_group.h"
+
+#ifdef CONFIG_CGROUP_SCHED
+
+/*
+ * Return the group to which this tasks belongs.
+ *
+ * We use task_subsys_state_check() and extend the RCU verification with
+ * pi->lock and rq->lock because cpu_cgroup_attach() holds those locks for each
+ * task it moves into the cgroup. Therefore by holding either of those locks,
+ * we pin the task to the current cgroup.
+ */
+static inline struct task_group *task_group(struct task_struct *p)
+{
+	struct task_group *tg;
+	struct cgroup_subsys_state *css;
+
+	css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
+			lockdep_is_held(&p->pi_lock) ||
+			lockdep_is_held(&task_rq(p)->lock));
+	tg = container_of(css, struct task_group, css);
+
+	return autogroup_task_group(p, tg);
+}
+
+/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
+static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
+{
+#if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED)
+	struct task_group *tg = task_group(p);
+#endif
+
+#ifdef CONFIG_FAIR_GROUP_SCHED
+	p->se.cfs_rq = tg->cfs_rq[cpu];
+	p->se.parent = tg->se[cpu];
+#endif
+
+#ifdef CONFIG_RT_GROUP_SCHED
+	p->rt.rt_rq  = tg->rt_rq[cpu];
+	p->rt.parent = tg->rt_se[cpu];
+#endif
+}
+
+#else /* CONFIG_CGROUP_SCHED */
+
+static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
+static inline struct task_group *task_group(struct task_struct *p)
+{
+	return NULL;
+}
+
+#endif /* CONFIG_CGROUP_SCHED */
+
+static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
+{
+	set_task_rq(p, cpu);
+#ifdef CONFIG_SMP
+	/*
+	 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
+	 * successfuly executed on another CPU. We must ensure that updates of
+	 * per-task data have been completed by this moment.
+	 */
+	smp_wmb();
+	task_thread_info(p)->cpu = cpu;
+#endif
+}
+
+/*
+ * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
+ */
+#ifdef CONFIG_SCHED_DEBUG
+# include <linux/jump_label.h>
+# define const_debug __read_mostly
+#else
+# define const_debug const
+#endif
+
+extern const_debug unsigned int sysctl_sched_features;
+
+#define SCHED_FEAT(name, enabled)	\
+	__SCHED_FEAT_##name ,
+
+enum {
+#include "features.h"
+	__SCHED_FEAT_NR,
+};
+
+#undef SCHED_FEAT
+
+#if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL)
+static __always_inline bool static_branch__true(struct jump_label_key *key)
+{
+	return likely(static_branch(key)); /* Not out of line branch. */
+}
+
+static __always_inline bool static_branch__false(struct jump_label_key *key)
+{
+	return unlikely(static_branch(key)); /* Out of line branch. */
+}
+
+#define SCHED_FEAT(name, enabled)					\
+static __always_inline bool static_branch_##name(struct jump_label_key *key) \
+{									\
+	return static_branch__##enabled(key);				\
+}
+
+#include "features.h"
+
+#undef SCHED_FEAT
+
+extern struct jump_label_key sched_feat_keys[__SCHED_FEAT_NR];
+#define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
+#else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */
+#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
+#endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */
+
+static inline u64 global_rt_period(void)
+{
+	return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
+}
+
+static inline u64 global_rt_runtime(void)
+{
+	if (sysctl_sched_rt_runtime < 0)
+		return RUNTIME_INF;
+
+	return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
+}
+
+
+
+static inline int task_current(struct rq *rq, struct task_struct *p)
+{
+	return rq->curr == p;
+}
+
+static inline int task_running(struct rq *rq, struct task_struct *p)
+{
+#ifdef CONFIG_SMP
+	return p->on_cpu;
+#else
+	return task_current(rq, p);
+#endif
+}
+
+
+#ifndef prepare_arch_switch
+# define prepare_arch_switch(next)	do { } while (0)
+#endif
+#ifndef finish_arch_switch
+# define finish_arch_switch(prev)	do { } while (0)
+#endif
+
+#ifndef __ARCH_WANT_UNLOCKED_CTXSW
+static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
+{
+#ifdef CONFIG_SMP
+	/*
+	 * We can optimise this out completely for !SMP, because the
+	 * SMP rebalancing from interrupt is the only thing that cares
+	 * here.
+	 */
+	next->on_cpu = 1;
+#endif
+}
+
+static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
+{
+#ifdef CONFIG_SMP
+	/*
+	 * After ->on_cpu is cleared, the task can be moved to a different CPU.
+	 * We must ensure this doesn't happen until the switch is completely
+	 * finished.
+	 */
+	smp_wmb();
+	prev->on_cpu = 0;
+#endif
+#ifdef CONFIG_DEBUG_SPINLOCK
+	/* this is a valid case when another task releases the spinlock */
+	rq->lock.owner = current;
+#endif
+	/*
+	 * If we are tracking spinlock dependencies then we have to
+	 * fix up the runqueue lock - which gets 'carried over' from
+	 * prev into current:
+	 */
+	spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
+
+	raw_spin_unlock_irq(&rq->lock);
+}
+
+#else /* __ARCH_WANT_UNLOCKED_CTXSW */
+static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
+{
+#ifdef CONFIG_SMP
+	/*
+	 * We can optimise this out completely for !SMP, because the
+	 * SMP rebalancing from interrupt is the only thing that cares
+	 * here.
+	 */
+	next->on_cpu = 1;
+#endif
+#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
+	raw_spin_unlock_irq(&rq->lock);
+#else
+	raw_spin_unlock(&rq->lock);
+#endif
+}
+
+static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
+{
+#ifdef CONFIG_SMP
+	/*
+	 * After ->on_cpu is cleared, the task can be moved to a different CPU.
+	 * We must ensure this doesn't happen until the switch is completely
+	 * finished.
+	 */
+	smp_wmb();
+	prev->on_cpu = 0;
+#endif
+#ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW
+	local_irq_enable();
+#endif
+}
+#endif /* __ARCH_WANT_UNLOCKED_CTXSW */
+
+
+static inline void update_load_add(struct load_weight *lw, unsigned long inc)
+{
+	lw->weight += inc;
+	lw->inv_weight = 0;
+}
+
+static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
+{
+	lw->weight -= dec;
+	lw->inv_weight = 0;
+}
+
+static inline void update_load_set(struct load_weight *lw, unsigned long w)
+{
+	lw->weight = w;
+	lw->inv_weight = 0;
+}
+
+/*
+ * To aid in avoiding the subversion of "niceness" due to uneven distribution
+ * of tasks with abnormal "nice" values across CPUs the contribution that
+ * each task makes to its run queue's load is weighted according to its
+ * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
+ * scaled version of the new time slice allocation that they receive on time
+ * slice expiry etc.
+ */
+
+#define WEIGHT_IDLEPRIO                3
+#define WMULT_IDLEPRIO         1431655765
+
+/*
+ * Nice levels are multiplicative, with a gentle 10% change for every
+ * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
+ * nice 1, it will get ~10% less CPU time than another CPU-bound task
+ * that remained on nice 0.
+ *
+ * The "10% effect" is relative and cumulative: from _any_ nice level,
+ * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
+ * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
+ * If a task goes up by ~10% and another task goes down by ~10% then
+ * the relative distance between them is ~25%.)
+ */
+static const int prio_to_weight[40] = {
+ /* -20 */     88761,     71755,     56483,     46273,     36291,
+ /* -15 */     29154,     23254,     18705,     14949,     11916,
+ /* -10 */      9548,      7620,      6100,      4904,      3906,
+ /*  -5 */      3121,      2501,      1991,      1586,      1277,
+ /*   0 */      1024,       820,       655,       526,       423,
+ /*   5 */       335,       272,       215,       172,       137,
+ /*  10 */       110,        87,        70,        56,        45,
+ /*  15 */        36,        29,        23,        18,        15,
+};
+
+/*
+ * Inverse (2^32/x) values of the prio_to_weight[] array, precalculated.
+ *
+ * In cases where the weight does not change often, we can use the
+ * precalculated inverse to speed up arithmetics by turning divisions
+ * into multiplications:
+ */
+static const u32 prio_to_wmult[40] = {
+ /* -20 */     48388,     59856,     76040,     92818,    118348,
+ /* -15 */    147320,    184698,    229616,    287308,    360437,
+ /* -10 */    449829,    563644,    704093,    875809,   1099582,
+ /*  -5 */   1376151,   1717300,   2157191,   2708050,   3363326,
+ /*   0 */   4194304,   5237765,   6557202,   8165337,  10153587,
+ /*   5 */  12820798,  15790321,  19976592,  24970740,  31350126,
+ /*  10 */  39045157,  49367440,  61356676,  76695844,  95443717,
+ /*  15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
+};
+
+/* Time spent by the tasks of the cpu accounting group executing in ... */
+enum cpuacct_stat_index {
+	CPUACCT_STAT_USER,	/* ... user mode */
+	CPUACCT_STAT_SYSTEM,	/* ... kernel mode */
+
+	CPUACCT_STAT_NSTATS,
+};
+
+
+#define sched_class_highest (&stop_sched_class)
+#define for_each_class(class) \
+   for (class = sched_class_highest; class; class = class->next)
+
+extern const struct sched_class stop_sched_class;
+extern const struct sched_class rt_sched_class;
+extern const struct sched_class fair_sched_class;
+extern const struct sched_class idle_sched_class;
+
+
+#ifdef CONFIG_SMP
+
+extern void trigger_load_balance(struct rq *rq, int cpu);
+extern void idle_balance(int this_cpu, struct rq *this_rq);
+
+#else	/* CONFIG_SMP */
+
+static inline void idle_balance(int cpu, struct rq *rq)
+{
+}
+
+#endif
+
+extern void sysrq_sched_debug_show(void);
+extern void sched_init_granularity(void);
+extern void update_max_interval(void);
+extern void update_group_power(struct sched_domain *sd, int cpu);
+extern int update_runtime(struct notifier_block *nfb, unsigned long action, void *hcpu);
+extern void init_sched_rt_class(void);
+extern void init_sched_fair_class(void);
+
+extern void resched_task(struct task_struct *p);
+extern void resched_cpu(int cpu);
+
+extern struct rt_bandwidth def_rt_bandwidth;
+extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
+
+extern void update_cpu_load(struct rq *this_rq);
+
+#ifdef CONFIG_CGROUP_CPUACCT
+#include <linux/cgroup.h>
+/* track cpu usage of a group of tasks and its child groups */
+struct cpuacct {
+	struct cgroup_subsys_state css;
+	/* cpuusage holds pointer to a u64-type object on every cpu */
+	u64 __percpu *cpuusage;
+	struct kernel_cpustat __percpu *cpustat;
+};
+
+/* return cpu accounting group corresponding to this container */
+static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp)
+{
+	return container_of(cgroup_subsys_state(cgrp, cpuacct_subsys_id),
+			    struct cpuacct, css);
+}
+
+/* return cpu accounting group to which this task belongs */
+static inline struct cpuacct *task_ca(struct task_struct *tsk)
+{
+	return container_of(task_subsys_state(tsk, cpuacct_subsys_id),
+			    struct cpuacct, css);
+}
+
+static inline struct cpuacct *parent_ca(struct cpuacct *ca)
+{
+	if (!ca || !ca->css.cgroup->parent)
+		return NULL;
+	return cgroup_ca(ca->css.cgroup->parent);
+}
+
+extern void cpuacct_charge(struct task_struct *tsk, u64 cputime);
+#else
+static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
+#endif
+
+static inline void inc_nr_running(struct rq *rq)
+{
+	rq->nr_running++;
+}
+
+static inline void dec_nr_running(struct rq *rq)
+{
+	rq->nr_running--;
+}
+
+extern void update_rq_clock(struct rq *rq);
+
+extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
+extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
+
+extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
+
+extern const_debug unsigned int sysctl_sched_time_avg;
+extern const_debug unsigned int sysctl_sched_nr_migrate;
+extern const_debug unsigned int sysctl_sched_migration_cost;
+
+static inline u64 sched_avg_period(void)
+{
+	return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
+}
+
+void calc_load_account_idle(struct rq *this_rq);
+
+#ifdef CONFIG_SCHED_HRTICK
+
+/*
+ * Use hrtick when:
+ *  - enabled by features
+ *  - hrtimer is actually high res
+ */
+static inline int hrtick_enabled(struct rq *rq)
+{
+	if (!sched_feat(HRTICK))
+		return 0;
+	if (!cpu_active(cpu_of(rq)))
+		return 0;
+	return hrtimer_is_hres_active(&rq->hrtick_timer);
+}
+
+void hrtick_start(struct rq *rq, u64 delay);
+
+#else
+
+static inline int hrtick_enabled(struct rq *rq)
+{
+	return 0;
+}
+
+#endif /* CONFIG_SCHED_HRTICK */
+
+#ifdef CONFIG_SMP
+extern void sched_avg_update(struct rq *rq);
+static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
+{
+	rq->rt_avg += rt_delta;
+	sched_avg_update(rq);
+}
+#else
+static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { }
+static inline void sched_avg_update(struct rq *rq) { }
+#endif
+
+extern void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period);
+
+#ifdef CONFIG_SMP
+#ifdef CONFIG_PREEMPT
+
+static inline void double_rq_lock(struct rq *rq1, struct rq *rq2);
+
+/*
+ * fair double_lock_balance: Safely acquires both rq->locks in a fair
+ * way at the expense of forcing extra atomic operations in all
+ * invocations.  This assures that the double_lock is acquired using the
+ * same underlying policy as the spinlock_t on this architecture, which
+ * reduces latency compared to the unfair variant below.  However, it
+ * also adds more overhead and therefore may reduce throughput.
+ */
+static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
+	__releases(this_rq->lock)
+	__acquires(busiest->lock)
+	__acquires(this_rq->lock)
+{
+	raw_spin_unlock(&this_rq->lock);
+	double_rq_lock(this_rq, busiest);
+
+	return 1;
+}
+
+#else
+/*
+ * Unfair double_lock_balance: Optimizes throughput at the expense of
+ * latency by eliminating extra atomic operations when the locks are
+ * already in proper order on entry.  This favors lower cpu-ids and will
+ * grant the double lock to lower cpus over higher ids under contention,
+ * regardless of entry order into the function.
+ */
+static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
+	__releases(this_rq->lock)
+	__acquires(busiest->lock)
+	__acquires(this_rq->lock)
+{
+	int ret = 0;
+
+	if (unlikely(!raw_spin_trylock(&busiest->lock))) {
+		if (busiest < this_rq) {
+			raw_spin_unlock(&this_rq->lock);
+			raw_spin_lock(&busiest->lock);
+			raw_spin_lock_nested(&this_rq->lock,
+					      SINGLE_DEPTH_NESTING);
+			ret = 1;
+		} else
+			raw_spin_lock_nested(&busiest->lock,
+					      SINGLE_DEPTH_NESTING);
+	}
+	return ret;
+}
+
+#endif /* CONFIG_PREEMPT */
+
+/*
+ * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
+ */
+static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest)
+{
+	if (unlikely(!irqs_disabled())) {
+		/* printk() doesn't work good under rq->lock */
+		raw_spin_unlock(&this_rq->lock);
+		BUG_ON(1);
+	}
+
+	return _double_lock_balance(this_rq, busiest);
+}
+
+static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
+	__releases(busiest->lock)
+{
+	raw_spin_unlock(&busiest->lock);
+	lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
+}
+
+/*
+ * double_rq_lock - safely lock two runqueues
+ *
+ * Note this does not disable interrupts like task_rq_lock,
+ * you need to do so manually before calling.
+ */
+static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
+	__acquires(rq1->lock)
+	__acquires(rq2->lock)
+{
+	BUG_ON(!irqs_disabled());
+	if (rq1 == rq2) {
+		raw_spin_lock(&rq1->lock);
+		__acquire(rq2->lock);	/* Fake it out ;) */
+	} else {
+		if (rq1 < rq2) {
+			raw_spin_lock(&rq1->lock);
+			raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
+		} else {
+			raw_spin_lock(&rq2->lock);
+			raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
+		}
+	}
+}
+
+/*
+ * double_rq_unlock - safely unlock two runqueues
+ *
+ * Note this does not restore interrupts like task_rq_unlock,
+ * you need to do so manually after calling.
+ */
+static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
+	__releases(rq1->lock)
+	__releases(rq2->lock)
+{
+	raw_spin_unlock(&rq1->lock);
+	if (rq1 != rq2)
+		raw_spin_unlock(&rq2->lock);
+	else
+		__release(rq2->lock);
+}
+
+#else /* CONFIG_SMP */
+
+/*
+ * double_rq_lock - safely lock two runqueues
+ *
+ * Note this does not disable interrupts like task_rq_lock,
+ * you need to do so manually before calling.
+ */
+static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
+	__acquires(rq1->lock)
+	__acquires(rq2->lock)
+{
+	BUG_ON(!irqs_disabled());
+	BUG_ON(rq1 != rq2);
+	raw_spin_lock(&rq1->lock);
+	__acquire(rq2->lock);	/* Fake it out ;) */
+}
+
+/*
+ * double_rq_unlock - safely unlock two runqueues
+ *
+ * Note this does not restore interrupts like task_rq_unlock,
+ * you need to do so manually after calling.
+ */
+static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
+	__releases(rq1->lock)
+	__releases(rq2->lock)
+{
+	BUG_ON(rq1 != rq2);
+	raw_spin_unlock(&rq1->lock);
+	__release(rq2->lock);
+}
+
+#endif
+
+extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq);
+extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq);
+extern void print_cfs_stats(struct seq_file *m, int cpu);
+extern void print_rt_stats(struct seq_file *m, int cpu);
+
+extern void init_cfs_rq(struct cfs_rq *cfs_rq);
+extern void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq);
+extern void unthrottle_offline_cfs_rqs(struct rq *rq);
+
+extern void account_cfs_bandwidth_used(int enabled, int was_enabled);
+
+#ifdef CONFIG_NO_HZ
+enum rq_nohz_flag_bits {
+	NOHZ_TICK_STOPPED,
+	NOHZ_BALANCE_KICK,
+	NOHZ_IDLE,
+};
+
+#define nohz_flags(cpu)	(&cpu_rq(cpu)->nohz_flags)
+#endif
diff --git a/kernel/sched/stats.c b/kernel/sched/stats.c
new file mode 100644
index 0000000..2a581ba
--- /dev/null
+++ b/kernel/sched/stats.c
@@ -0,0 +1,111 @@
+
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/seq_file.h>
+#include <linux/proc_fs.h>
+
+#include "sched.h"
+
+/*
+ * bump this up when changing the output format or the meaning of an existing
+ * format, so that tools can adapt (or abort)
+ */
+#define SCHEDSTAT_VERSION 15
+
+static int show_schedstat(struct seq_file *seq, void *v)
+{
+	int cpu;
+	int mask_len = DIV_ROUND_UP(NR_CPUS, 32) * 9;
+	char *mask_str = kmalloc(mask_len, GFP_KERNEL);
+
+	if (mask_str == NULL)
+		return -ENOMEM;
+
+	seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION);
+	seq_printf(seq, "timestamp %lu\n", jiffies);
+	for_each_online_cpu(cpu) {
+		struct rq *rq = cpu_rq(cpu);
+#ifdef CONFIG_SMP
+		struct sched_domain *sd;
+		int dcount = 0;
+#endif
+
+		/* runqueue-specific stats */
+		seq_printf(seq,
+		    "cpu%d %u %u %u %u %u %u %llu %llu %lu",
+		    cpu, rq->yld_count,
+		    rq->sched_switch, rq->sched_count, rq->sched_goidle,
+		    rq->ttwu_count, rq->ttwu_local,
+		    rq->rq_cpu_time,
+		    rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount);
+
+		seq_printf(seq, "\n");
+
+#ifdef CONFIG_SMP
+		/* domain-specific stats */
+		rcu_read_lock();
+		for_each_domain(cpu, sd) {
+			enum cpu_idle_type itype;
+
+			cpumask_scnprintf(mask_str, mask_len,
+					  sched_domain_span(sd));
+			seq_printf(seq, "domain%d %s", dcount++, mask_str);
+			for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES;
+					itype++) {
+				seq_printf(seq, " %u %u %u %u %u %u %u %u",
+				    sd->lb_count[itype],
+				    sd->lb_balanced[itype],
+				    sd->lb_failed[itype],
+				    sd->lb_imbalance[itype],
+				    sd->lb_gained[itype],
+				    sd->lb_hot_gained[itype],
+				    sd->lb_nobusyq[itype],
+				    sd->lb_nobusyg[itype]);
+			}
+			seq_printf(seq,
+				   " %u %u %u %u %u %u %u %u %u %u %u %u\n",
+			    sd->alb_count, sd->alb_failed, sd->alb_pushed,
+			    sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed,
+			    sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed,
+			    sd->ttwu_wake_remote, sd->ttwu_move_affine,
+			    sd->ttwu_move_balance);
+		}
+		rcu_read_unlock();
+#endif
+	}
+	kfree(mask_str);
+	return 0;
+}
+
+static int schedstat_open(struct inode *inode, struct file *file)
+{
+	unsigned int size = PAGE_SIZE * (1 + num_online_cpus() / 32);
+	char *buf = kmalloc(size, GFP_KERNEL);
+	struct seq_file *m;
+	int res;
+
+	if (!buf)
+		return -ENOMEM;
+	res = single_open(file, show_schedstat, NULL);
+	if (!res) {
+		m = file->private_data;
+		m->buf = buf;
+		m->size = size;
+	} else
+		kfree(buf);
+	return res;
+}
+
+static const struct file_operations proc_schedstat_operations = {
+	.open    = schedstat_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = single_release,
+};
+
+static int __init proc_schedstat_init(void)
+{
+	proc_create("schedstat", 0, NULL, &proc_schedstat_operations);
+	return 0;
+}
+module_init(proc_schedstat_init);
diff --git a/kernel/sched_stats.h b/kernel/sched/stats.h
similarity index 70%
rename from kernel/sched_stats.h
rename to kernel/sched/stats.h
index 87f9e36..2ef90a5 100644
--- a/kernel/sched_stats.h
+++ b/kernel/sched/stats.h
@@ -1,108 +1,5 @@
 
 #ifdef CONFIG_SCHEDSTATS
-/*
- * bump this up when changing the output format or the meaning of an existing
- * format, so that tools can adapt (or abort)
- */
-#define SCHEDSTAT_VERSION 15
-
-static int show_schedstat(struct seq_file *seq, void *v)
-{
-	int cpu;
-	int mask_len = DIV_ROUND_UP(NR_CPUS, 32) * 9;
-	char *mask_str = kmalloc(mask_len, GFP_KERNEL);
-
-	if (mask_str == NULL)
-		return -ENOMEM;
-
-	seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION);
-	seq_printf(seq, "timestamp %lu\n", jiffies);
-	for_each_online_cpu(cpu) {
-		struct rq *rq = cpu_rq(cpu);
-#ifdef CONFIG_SMP
-		struct sched_domain *sd;
-		int dcount = 0;
-#endif
-
-		/* runqueue-specific stats */
-		seq_printf(seq,
-		    "cpu%d %u %u %u %u %u %u %llu %llu %lu",
-		    cpu, rq->yld_count,
-		    rq->sched_switch, rq->sched_count, rq->sched_goidle,
-		    rq->ttwu_count, rq->ttwu_local,
-		    rq->rq_cpu_time,
-		    rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount);
-
-		seq_printf(seq, "\n");
-
-#ifdef CONFIG_SMP
-		/* domain-specific stats */
-		rcu_read_lock();
-		for_each_domain(cpu, sd) {
-			enum cpu_idle_type itype;
-
-			cpumask_scnprintf(mask_str, mask_len,
-					  sched_domain_span(sd));
-			seq_printf(seq, "domain%d %s", dcount++, mask_str);
-			for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES;
-					itype++) {
-				seq_printf(seq, " %u %u %u %u %u %u %u %u",
-				    sd->lb_count[itype],
-				    sd->lb_balanced[itype],
-				    sd->lb_failed[itype],
-				    sd->lb_imbalance[itype],
-				    sd->lb_gained[itype],
-				    sd->lb_hot_gained[itype],
-				    sd->lb_nobusyq[itype],
-				    sd->lb_nobusyg[itype]);
-			}
-			seq_printf(seq,
-				   " %u %u %u %u %u %u %u %u %u %u %u %u\n",
-			    sd->alb_count, sd->alb_failed, sd->alb_pushed,
-			    sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed,
-			    sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed,
-			    sd->ttwu_wake_remote, sd->ttwu_move_affine,
-			    sd->ttwu_move_balance);
-		}
-		rcu_read_unlock();
-#endif
-	}
-	kfree(mask_str);
-	return 0;
-}
-
-static int schedstat_open(struct inode *inode, struct file *file)
-{
-	unsigned int size = PAGE_SIZE * (1 + num_online_cpus() / 32);
-	char *buf = kmalloc(size, GFP_KERNEL);
-	struct seq_file *m;
-	int res;
-
-	if (!buf)
-		return -ENOMEM;
-	res = single_open(file, show_schedstat, NULL);
-	if (!res) {
-		m = file->private_data;
-		m->buf = buf;
-		m->size = size;
-	} else
-		kfree(buf);
-	return res;
-}
-
-static const struct file_operations proc_schedstat_operations = {
-	.open    = schedstat_open,
-	.read    = seq_read,
-	.llseek  = seq_lseek,
-	.release = single_release,
-};
-
-static int __init proc_schedstat_init(void)
-{
-	proc_create("schedstat", 0, NULL, &proc_schedstat_operations);
-	return 0;
-}
-module_init(proc_schedstat_init);
 
 /*
  * Expects runqueue lock to be held for atomicity of update
@@ -283,8 +180,7 @@
 		return;
 
 	raw_spin_lock(&cputimer->lock);
-	cputimer->cputime.utime =
-		cputime_add(cputimer->cputime.utime, cputime);
+	cputimer->cputime.utime += cputime;
 	raw_spin_unlock(&cputimer->lock);
 }
 
@@ -307,8 +203,7 @@
 		return;
 
 	raw_spin_lock(&cputimer->lock);
-	cputimer->cputime.stime =
-		cputime_add(cputimer->cputime.stime, cputime);
+	cputimer->cputime.stime += cputime;
 	raw_spin_unlock(&cputimer->lock);
 }
 
diff --git a/kernel/sched_stoptask.c b/kernel/sched/stop_task.c
similarity index 96%
rename from kernel/sched_stoptask.c
rename to kernel/sched/stop_task.c
index 8b44e7f..7b386e8 100644
--- a/kernel/sched_stoptask.c
+++ b/kernel/sched/stop_task.c
@@ -1,3 +1,5 @@
+#include "sched.h"
+
 /*
  * stop-task scheduling class.
  *
@@ -80,7 +82,7 @@
 /*
  * Simple, special scheduling class for the per-CPU stop tasks:
  */
-static const struct sched_class stop_sched_class = {
+const struct sched_class stop_sched_class = {
 	.next			= &rt_sched_class,
 
 	.enqueue_task		= enqueue_task_stop,
diff --git a/kernel/signal.c b/kernel/signal.c
index b3f78d0..56ce3a6 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1629,10 +1629,8 @@
 	info.si_uid = __task_cred(tsk)->uid;
 	rcu_read_unlock();
 
-	info.si_utime = cputime_to_clock_t(cputime_add(tsk->utime,
-				tsk->signal->utime));
-	info.si_stime = cputime_to_clock_t(cputime_add(tsk->stime,
-				tsk->signal->stime));
+	info.si_utime = cputime_to_clock_t(tsk->utime + tsk->signal->utime);
+	info.si_stime = cputime_to_clock_t(tsk->stime + tsk->signal->stime);
 
 	info.si_status = tsk->exit_code & 0x7f;
 	if (tsk->exit_code & 0x80)
@@ -1994,8 +1992,6 @@
 		 */
 		if (!(sig->flags & SIGNAL_STOP_STOPPED))
 			sig->group_exit_code = signr;
-		else
-			WARN_ON_ONCE(!current->ptrace);
 
 		sig->group_stop_count = 0;
 
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 2c71d91..4eb3a0f 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -347,12 +347,12 @@
 	if (!in_interrupt() && local_softirq_pending())
 		invoke_softirq();
 
-	rcu_irq_exit();
 #ifdef CONFIG_NO_HZ
 	/* Make sure that timer wheel updates are propagated */
 	if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched())
-		tick_nohz_stop_sched_tick(0);
+		tick_nohz_irq_exit();
 #endif
+	rcu_irq_exit();
 	preempt_enable_no_resched();
 }
 
diff --git a/kernel/sys.c b/kernel/sys.c
index 481611f..ddf8155 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1605,7 +1605,7 @@
 	unsigned long maxrss = 0;
 
 	memset((char *) r, 0, sizeof *r);
-	utime = stime = cputime_zero;
+	utime = stime = 0;
 
 	if (who == RUSAGE_THREAD) {
 		task_times(current, &utime, &stime);
@@ -1635,8 +1635,8 @@
 
 		case RUSAGE_SELF:
 			thread_group_times(p, &tgutime, &tgstime);
-			utime = cputime_add(utime, tgutime);
-			stime = cputime_add(stime, tgstime);
+			utime += tgutime;
+			stime += tgstime;
 			r->ru_nvcsw += p->signal->nvcsw;
 			r->ru_nivcsw += p->signal->nivcsw;
 			r->ru_minflt += p->signal->min_flt;
diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
index 6318b51..a650694 100644
--- a/kernel/sysctl_binary.c
+++ b/kernel/sysctl_binary.c
@@ -1354,7 +1354,7 @@
 
 	fput(file);
 out_putname:
-	putname(pathname);
+	__putname(pathname);
 out:
 	return result;
 }
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig
index b26c2228..2cf9cc7 100644
--- a/kernel/time/Kconfig
+++ b/kernel/time/Kconfig
@@ -25,7 +25,7 @@
 config GENERIC_CLOCKEVENTS_BUILD
 	bool
 	default y
-	depends on GENERIC_CLOCKEVENTS || GENERIC_CLOCKEVENTS_MIGR
+	depends on GENERIC_CLOCKEVENTS
 
 config GENERIC_CLOCKEVENTS_MIN_ADJUST
 	bool
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index c4eb71c..9cd928f 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -17,7 +17,6 @@
 #include <linux/module.h>
 #include <linux/notifier.h>
 #include <linux/smp.h>
-#include <linux/sysdev.h>
 
 #include "tick-internal.h"
 
@@ -387,7 +386,6 @@
 	 * released list and do a notify add later.
 	 */
 	if (old) {
-		old->event_handler = clockevents_handle_noop;
 		clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED);
 		list_del(&old->list);
 		list_add(&old->list, &clockevents_released);
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index da2f760..a45ca16 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -23,8 +23,8 @@
  *   o Allow clocksource drivers to be unregistered
  */
 
+#include <linux/device.h>
 #include <linux/clocksource.h>
-#include <linux/sysdev.h>
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */
@@ -647,7 +647,7 @@
 
 /**
  * __clocksource_updatefreq_scale - Used update clocksource with new freq
- * @t:		clocksource to be registered
+ * @cs:		clocksource to be registered
  * @scale:	Scale factor multiplied against freq to get clocksource hz
  * @freq:	clocksource frequency (cycles per second) divided by scale
  *
@@ -699,7 +699,7 @@
 
 /**
  * __clocksource_register_scale - Used to install new clocksources
- * @t:		clocksource to be registered
+ * @cs:		clocksource to be registered
  * @scale:	Scale factor multiplied against freq to get clocksource hz
  * @freq:	clocksource frequency (cycles per second) divided by scale
  *
@@ -727,7 +727,7 @@
 
 /**
  * clocksource_register - Used to install new clocksources
- * @t:		clocksource to be registered
+ * @cs:		clocksource to be registered
  *
  * Returns -EBUSY if registration fails, zero otherwise.
  */
@@ -761,6 +761,8 @@
 
 /**
  * clocksource_change_rating - Change the rating of a registered clocksource
+ * @cs:		clocksource to be changed
+ * @rating:	new rating
  */
 void clocksource_change_rating(struct clocksource *cs, int rating)
 {
@@ -772,6 +774,7 @@
 
 /**
  * clocksource_unregister - remove a registered clocksource
+ * @cs:	clocksource to be unregistered
  */
 void clocksource_unregister(struct clocksource *cs)
 {
@@ -787,13 +790,14 @@
 /**
  * sysfs_show_current_clocksources - sysfs interface for current clocksource
  * @dev:	unused
+ * @attr:	unused
  * @buf:	char buffer to be filled with clocksource list
  *
  * Provides sysfs interface for listing current clocksource.
  */
 static ssize_t
-sysfs_show_current_clocksources(struct sys_device *dev,
-				struct sysdev_attribute *attr, char *buf)
+sysfs_show_current_clocksources(struct device *dev,
+				struct device_attribute *attr, char *buf)
 {
 	ssize_t count = 0;
 
@@ -807,14 +811,15 @@
 /**
  * sysfs_override_clocksource - interface for manually overriding clocksource
  * @dev:	unused
+ * @attr:	unused
  * @buf:	name of override clocksource
  * @count:	length of buffer
  *
  * Takes input from sysfs interface for manually overriding the default
  * clocksource selection.
  */
-static ssize_t sysfs_override_clocksource(struct sys_device *dev,
-					  struct sysdev_attribute *attr,
+static ssize_t sysfs_override_clocksource(struct device *dev,
+					  struct device_attribute *attr,
 					  const char *buf, size_t count)
 {
 	size_t ret = count;
@@ -842,13 +847,14 @@
 /**
  * sysfs_show_available_clocksources - sysfs interface for listing clocksource
  * @dev:	unused
+ * @attr:	unused
  * @buf:	char buffer to be filled with clocksource list
  *
  * Provides sysfs interface for listing registered clocksources
  */
 static ssize_t
-sysfs_show_available_clocksources(struct sys_device *dev,
-				  struct sysdev_attribute *attr,
+sysfs_show_available_clocksources(struct device *dev,
+				  struct device_attribute *attr,
 				  char *buf)
 {
 	struct clocksource *src;
@@ -877,35 +883,36 @@
 /*
  * Sysfs setup bits:
  */
-static SYSDEV_ATTR(current_clocksource, 0644, sysfs_show_current_clocksources,
+static DEVICE_ATTR(current_clocksource, 0644, sysfs_show_current_clocksources,
 		   sysfs_override_clocksource);
 
-static SYSDEV_ATTR(available_clocksource, 0444,
+static DEVICE_ATTR(available_clocksource, 0444,
 		   sysfs_show_available_clocksources, NULL);
 
-static struct sysdev_class clocksource_sysclass = {
+static struct bus_type clocksource_subsys = {
 	.name = "clocksource",
+	.dev_name = "clocksource",
 };
 
-static struct sys_device device_clocksource = {
+static struct device device_clocksource = {
 	.id	= 0,
-	.cls	= &clocksource_sysclass,
+	.bus	= &clocksource_subsys,
 };
 
 static int __init init_clocksource_sysfs(void)
 {
-	int error = sysdev_class_register(&clocksource_sysclass);
+	int error = subsys_system_register(&clocksource_subsys, NULL);
 
 	if (!error)
-		error = sysdev_register(&device_clocksource);
+		error = device_register(&device_clocksource);
 	if (!error)
-		error = sysdev_create_file(
+		error = device_create_file(
 				&device_clocksource,
-				&attr_current_clocksource);
+				&dev_attr_current_clocksource);
 	if (!error)
-		error = sysdev_create_file(
+		error = device_create_file(
 				&device_clocksource,
-				&attr_available_clocksource);
+				&dev_attr_available_clocksource);
 	return error;
 }
 
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 4042064..7656642 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -275,42 +275,17 @@
 }
 EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us);
 
-/**
- * tick_nohz_stop_sched_tick - stop the idle tick from the idle task
- *
- * When the next event is more than a tick into the future, stop the idle tick
- * Called either from the idle loop or from irq_exit() when an idle period was
- * just interrupted by an interrupt which did not cause a reschedule.
- */
-void tick_nohz_stop_sched_tick(int inidle)
+static void tick_nohz_stop_sched_tick(struct tick_sched *ts)
 {
-	unsigned long seq, last_jiffies, next_jiffies, delta_jiffies, flags;
-	struct tick_sched *ts;
+	unsigned long seq, last_jiffies, next_jiffies, delta_jiffies;
 	ktime_t last_update, expires, now;
 	struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
 	u64 time_delta;
 	int cpu;
 
-	local_irq_save(flags);
-
 	cpu = smp_processor_id();
 	ts = &per_cpu(tick_cpu_sched, cpu);
 
-	/*
-	 * Call to tick_nohz_start_idle stops the last_update_time from being
-	 * updated. Thus, it must not be called in the event we are called from
-	 * irq_exit() with the prior state different than idle.
-	 */
-	if (!inidle && !ts->inidle)
-		goto end;
-
-	/*
-	 * Set ts->inidle unconditionally. Even if the system did not
-	 * switch to NOHZ mode the cpu frequency governers rely on the
-	 * update of the idle time accounting in tick_nohz_start_idle().
-	 */
-	ts->inidle = 1;
-
 	now = tick_nohz_start_idle(cpu, ts);
 
 	/*
@@ -326,10 +301,10 @@
 	}
 
 	if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
-		goto end;
+		return;
 
 	if (need_resched())
-		goto end;
+		return;
 
 	if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
 		static int ratelimit;
@@ -339,7 +314,7 @@
 			       (unsigned int) local_softirq_pending());
 			ratelimit++;
 		}
-		goto end;
+		return;
 	}
 
 	ts->idle_calls++;
@@ -434,7 +409,6 @@
 			ts->idle_tick = hrtimer_get_expires(&ts->sched_timer);
 			ts->tick_stopped = 1;
 			ts->idle_jiffies = last_jiffies;
-			rcu_enter_nohz();
 		}
 
 		ts->idle_sleeps++;
@@ -472,8 +446,64 @@
 	ts->next_jiffies = next_jiffies;
 	ts->last_jiffies = last_jiffies;
 	ts->sleep_length = ktime_sub(dev->next_event, now);
-end:
-	local_irq_restore(flags);
+}
+
+/**
+ * tick_nohz_idle_enter - stop the idle tick from the idle task
+ *
+ * When the next event is more than a tick into the future, stop the idle tick
+ * Called when we start the idle loop.
+ *
+ * The arch is responsible of calling:
+ *
+ * - rcu_idle_enter() after its last use of RCU before the CPU is put
+ *  to sleep.
+ * - rcu_idle_exit() before the first use of RCU after the CPU is woken up.
+ */
+void tick_nohz_idle_enter(void)
+{
+	struct tick_sched *ts;
+
+	WARN_ON_ONCE(irqs_disabled());
+
+	/*
+ 	 * Update the idle state in the scheduler domain hierarchy
+ 	 * when tick_nohz_stop_sched_tick() is called from the idle loop.
+ 	 * State will be updated to busy during the first busy tick after
+ 	 * exiting idle.
+ 	 */
+	set_cpu_sd_state_idle();
+
+	local_irq_disable();
+
+	ts = &__get_cpu_var(tick_cpu_sched);
+	/*
+	 * set ts->inidle unconditionally. even if the system did not
+	 * switch to nohz mode the cpu frequency governers rely on the
+	 * update of the idle time accounting in tick_nohz_start_idle().
+	 */
+	ts->inidle = 1;
+	tick_nohz_stop_sched_tick(ts);
+
+	local_irq_enable();
+}
+
+/**
+ * tick_nohz_irq_exit - update next tick event from interrupt exit
+ *
+ * When an interrupt fires while we are idle and it doesn't cause
+ * a reschedule, it may still add, modify or delete a timer, enqueue
+ * an RCU callback, etc...
+ * So we need to re-calculate and reprogram the next tick event.
+ */
+void tick_nohz_irq_exit(void)
+{
+	struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
+
+	if (!ts->inidle)
+		return;
+
+	tick_nohz_stop_sched_tick(ts);
 }
 
 /**
@@ -515,11 +545,13 @@
 }
 
 /**
- * tick_nohz_restart_sched_tick - restart the idle tick from the idle task
+ * tick_nohz_idle_exit - restart the idle tick from the idle task
  *
  * Restart the idle tick when the CPU is woken up from idle
+ * This also exit the RCU extended quiescent state. The CPU
+ * can use RCU again after this function is called.
  */
-void tick_nohz_restart_sched_tick(void)
+void tick_nohz_idle_exit(void)
 {
 	int cpu = smp_processor_id();
 	struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
@@ -529,6 +561,7 @@
 	ktime_t now;
 
 	local_irq_disable();
+
 	if (ts->idle_active || (ts->inidle && ts->tick_stopped))
 		now = ktime_get();
 
@@ -543,8 +576,6 @@
 
 	ts->inidle = 0;
 
-	rcu_exit_nohz();
-
 	/* Update jiffies first */
 	select_nohz_load_balancer(0);
 	tick_do_update_jiffies64(now);
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 2378413..0c63581 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -131,7 +131,7 @@
 	/* calculate the delta since the last update_wall_time: */
 	cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
 
-	/* return delta convert to nanoseconds using ntp adjusted mult. */
+	/* return delta convert to nanoseconds. */
 	return clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
 }
 
@@ -813,11 +813,11 @@
 	 * First we shift it down from NTP_SHIFT to clocksource->shifted nsecs.
 	 *
 	 * Note we subtract one in the shift, so that error is really error*2.
-	 * This "saves" dividing(shifting) intererval twice, but keeps the
-	 * (error > interval) comparision as still measuring if error is
+	 * This "saves" dividing(shifting) interval twice, but keeps the
+	 * (error > interval) comparison as still measuring if error is
 	 * larger then half an interval.
 	 *
-	 * Note: It does not "save" on aggrivation when reading the code.
+	 * Note: It does not "save" on aggravation when reading the code.
 	 */
 	error = timekeeper.ntp_error >> (timekeeper.ntp_error_shift - 1);
 	if (error > interval) {
@@ -833,7 +833,7 @@
 		 * nanosecond, and store the amount rounded up into
 		 * the error. This causes the likely below to be unlikely.
 		 *
-		 * The properfix is to avoid rounding up by using
+		 * The proper fix is to avoid rounding up by using
 		 * the high precision timekeeper.xtime_nsec instead of
 		 * xtime.tv_nsec everywhere. Fixing this will take some
 		 * time.
diff --git a/kernel/timer.c b/kernel/timer.c
index 9c3c62b..a297ffc 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -427,6 +427,12 @@
 	}
 }
 
+/* Stub timer callback for improperly used timers. */
+static void stub_timer(unsigned long data)
+{
+	WARN_ON(1);
+}
+
 /*
  * fixup_activate is called when:
  * - an active object is activated
@@ -450,7 +456,8 @@
 			debug_object_activate(timer, &timer_debug_descr);
 			return 0;
 		} else {
-			WARN_ON_ONCE(1);
+			setup_timer(timer, stub_timer, 0);
+			return 1;
 		}
 		return 0;
 
@@ -480,12 +487,40 @@
 	}
 }
 
+/*
+ * fixup_assert_init is called when:
+ * - an untracked/uninit-ed object is found
+ */
+static int timer_fixup_assert_init(void *addr, enum debug_obj_state state)
+{
+	struct timer_list *timer = addr;
+
+	switch (state) {
+	case ODEBUG_STATE_NOTAVAILABLE:
+		if (timer->entry.prev == TIMER_ENTRY_STATIC) {
+			/*
+			 * This is not really a fixup. The timer was
+			 * statically initialized. We just make sure that it
+			 * is tracked in the object tracker.
+			 */
+			debug_object_init(timer, &timer_debug_descr);
+			return 0;
+		} else {
+			setup_timer(timer, stub_timer, 0);
+			return 1;
+		}
+	default:
+		return 0;
+	}
+}
+
 static struct debug_obj_descr timer_debug_descr = {
-	.name		= "timer_list",
-	.debug_hint	= timer_debug_hint,
-	.fixup_init	= timer_fixup_init,
-	.fixup_activate	= timer_fixup_activate,
-	.fixup_free	= timer_fixup_free,
+	.name			= "timer_list",
+	.debug_hint		= timer_debug_hint,
+	.fixup_init		= timer_fixup_init,
+	.fixup_activate		= timer_fixup_activate,
+	.fixup_free		= timer_fixup_free,
+	.fixup_assert_init	= timer_fixup_assert_init,
 };
 
 static inline void debug_timer_init(struct timer_list *timer)
@@ -508,6 +543,11 @@
 	debug_object_free(timer, &timer_debug_descr);
 }
 
+static inline void debug_timer_assert_init(struct timer_list *timer)
+{
+	debug_object_assert_init(timer, &timer_debug_descr);
+}
+
 static void __init_timer(struct timer_list *timer,
 			 const char *name,
 			 struct lock_class_key *key);
@@ -531,6 +571,7 @@
 static inline void debug_timer_init(struct timer_list *timer) { }
 static inline void debug_timer_activate(struct timer_list *timer) { }
 static inline void debug_timer_deactivate(struct timer_list *timer) { }
+static inline void debug_timer_assert_init(struct timer_list *timer) { }
 #endif
 
 static inline void debug_init(struct timer_list *timer)
@@ -552,6 +593,11 @@
 	trace_timer_cancel(timer);
 }
 
+static inline void debug_assert_init(struct timer_list *timer)
+{
+	debug_timer_assert_init(timer);
+}
+
 static void __init_timer(struct timer_list *timer,
 			 const char *name,
 			 struct lock_class_key *key)
@@ -902,6 +948,8 @@
 	unsigned long flags;
 	int ret = 0;
 
+	debug_assert_init(timer);
+
 	timer_stats_timer_clear_start_info(timer);
 	if (timer_pending(timer)) {
 		base = lock_timer_base(timer, &flags);
@@ -932,6 +980,8 @@
 	unsigned long flags;
 	int ret = -1;
 
+	debug_assert_init(timer);
+
 	base = lock_timer_base(timer, &flags);
 
 	if (base->running_timer == timer)
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 16fc34a..cdea7b5 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -402,7 +402,7 @@
 
 static struct dentry *blk_create_buf_file_callback(const char *filename,
 						   struct dentry *parent,
-						   int mode,
+						   umode_t mode,
 						   struct rchan_buf *buf,
 						   int *is_global)
 {
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index f2bd275..a3f1bc5 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -338,7 +338,8 @@
 /* trace_flags holds trace_options default values */
 unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
 	TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
-	TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE;
+	TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
+	TRACE_ITER_IRQ_INFO;
 
 static int trace_stop_count;
 static DEFINE_RAW_SPINLOCK(tracing_start_lock);
@@ -426,6 +427,7 @@
 	"record-cmd",
 	"overwrite",
 	"disable_on_free",
+	"irq-info",
 	NULL
 };
 
@@ -1843,6 +1845,33 @@
 	trace_event_read_unlock();
 }
 
+static void
+get_total_entries(struct trace_array *tr, unsigned long *total, unsigned long *entries)
+{
+	unsigned long count;
+	int cpu;
+
+	*total = 0;
+	*entries = 0;
+
+	for_each_tracing_cpu(cpu) {
+		count = ring_buffer_entries_cpu(tr->buffer, cpu);
+		/*
+		 * If this buffer has skipped entries, then we hold all
+		 * entries for the trace and we need to ignore the
+		 * ones before the time stamp.
+		 */
+		if (tr->data[cpu]->skipped_entries) {
+			count -= tr->data[cpu]->skipped_entries;
+			/* total is the same as the entries */
+			*total += count;
+		} else
+			*total += count +
+				ring_buffer_overrun_cpu(tr->buffer, cpu);
+		*entries += count;
+	}
+}
+
 static void print_lat_help_header(struct seq_file *m)
 {
 	seq_puts(m, "#                  _------=> CPU#            \n");
@@ -1855,12 +1884,35 @@
 	seq_puts(m, "#     \\   /      |||||  \\    |   /           \n");
 }
 
-static void print_func_help_header(struct seq_file *m)
+static void print_event_info(struct trace_array *tr, struct seq_file *m)
 {
-	seq_puts(m, "#           TASK-PID    CPU#    TIMESTAMP  FUNCTION\n");
+	unsigned long total;
+	unsigned long entries;
+
+	get_total_entries(tr, &total, &entries);
+	seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu   #P:%d\n",
+		   entries, total, num_online_cpus());
+	seq_puts(m, "#\n");
+}
+
+static void print_func_help_header(struct trace_array *tr, struct seq_file *m)
+{
+	print_event_info(tr, m);
+	seq_puts(m, "#           TASK-PID   CPU#      TIMESTAMP  FUNCTION\n");
 	seq_puts(m, "#              | |       |          |         |\n");
 }
 
+static void print_func_help_header_irq(struct trace_array *tr, struct seq_file *m)
+{
+	print_event_info(tr, m);
+	seq_puts(m, "#                              _-----=> irqs-off\n");
+	seq_puts(m, "#                             / _----=> need-resched\n");
+	seq_puts(m, "#                            | / _---=> hardirq/softirq\n");
+	seq_puts(m, "#                            || / _--=> preempt-depth\n");
+	seq_puts(m, "#                            ||| /     delay\n");
+	seq_puts(m, "#           TASK-PID   CPU#  ||||    TIMESTAMP  FUNCTION\n");
+	seq_puts(m, "#              | |       |   ||||       |         |\n");
+}
 
 void
 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
@@ -1869,32 +1921,14 @@
 	struct trace_array *tr = iter->tr;
 	struct trace_array_cpu *data = tr->data[tr->cpu];
 	struct tracer *type = current_trace;
-	unsigned long entries = 0;
-	unsigned long total = 0;
-	unsigned long count;
+	unsigned long entries;
+	unsigned long total;
 	const char *name = "preemption";
-	int cpu;
 
 	if (type)
 		name = type->name;
 
-
-	for_each_tracing_cpu(cpu) {
-		count = ring_buffer_entries_cpu(tr->buffer, cpu);
-		/*
-		 * If this buffer has skipped entries, then we hold all
-		 * entries for the trace and we need to ignore the
-		 * ones before the time stamp.
-		 */
-		if (tr->data[cpu]->skipped_entries) {
-			count -= tr->data[cpu]->skipped_entries;
-			/* total is the same as the entries */
-			total += count;
-		} else
-			total += count +
-				ring_buffer_overrun_cpu(tr->buffer, cpu);
-		entries += count;
-	}
+	get_total_entries(tr, &total, &entries);
 
 	seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
 		   name, UTS_RELEASE);
@@ -2140,6 +2174,21 @@
 	return print_trace_fmt(iter);
 }
 
+void trace_latency_header(struct seq_file *m)
+{
+	struct trace_iterator *iter = m->private;
+
+	/* print nothing if the buffers are empty */
+	if (trace_empty(iter))
+		return;
+
+	if (iter->iter_flags & TRACE_FILE_LAT_FMT)
+		print_trace_header(m, iter);
+
+	if (!(trace_flags & TRACE_ITER_VERBOSE))
+		print_lat_help_header(m);
+}
+
 void trace_default_header(struct seq_file *m)
 {
 	struct trace_iterator *iter = m->private;
@@ -2155,8 +2204,12 @@
 		if (!(trace_flags & TRACE_ITER_VERBOSE))
 			print_lat_help_header(m);
 	} else {
-		if (!(trace_flags & TRACE_ITER_VERBOSE))
-			print_func_help_header(m);
+		if (!(trace_flags & TRACE_ITER_VERBOSE)) {
+			if (trace_flags & TRACE_ITER_IRQ_INFO)
+				print_func_help_header_irq(iter->tr, m);
+			else
+				print_func_help_header(iter->tr, m);
+		}
 	}
 }
 
@@ -4385,7 +4438,7 @@
 };
 
 struct dentry *trace_create_file(const char *name,
-				 mode_t mode,
+				 umode_t mode,
 				 struct dentry *parent,
 				 void *data,
 				 const struct file_operations *fops)
@@ -4775,6 +4828,7 @@
 {
 	__ftrace_dump(true, oops_dump_mode);
 }
+EXPORT_SYMBOL_GPL(ftrace_dump);
 
 __init static int tracer_alloc_buffers(void)
 {
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 092e1f8..b93ecba 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -312,7 +312,7 @@
 void tracing_reset_current_online_cpus(void);
 int tracing_open_generic(struct inode *inode, struct file *filp);
 struct dentry *trace_create_file(const char *name,
-				 mode_t mode,
+				 umode_t mode,
 				 struct dentry *parent,
 				 void *data,
 				 const struct file_operations *fops);
@@ -370,6 +370,7 @@
 		    unsigned long ip,
 		    unsigned long parent_ip,
 		    unsigned long flags, int pc);
+void trace_latency_header(struct seq_file *m);
 void trace_default_header(struct seq_file *m);
 void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
 int trace_empty(struct trace_iterator *iter);
@@ -654,6 +655,7 @@
 	TRACE_ITER_RECORD_CMD		= 0x100000,
 	TRACE_ITER_OVERWRITE		= 0x200000,
 	TRACE_ITER_STOP_ON_FREE		= 0x400000,
+	TRACE_ITER_IRQ_INFO		= 0x800000,
 };
 
 /*
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 95dc31e..f04cc31 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -27,6 +27,12 @@
 #include "trace.h"
 #include "trace_output.h"
 
+#define DEFAULT_SYS_FILTER_MESSAGE					\
+	"### global filter ###\n"					\
+	"# Use this to set filters for multiple events.\n"		\
+	"# Only events with the given fields will be affected.\n"	\
+	"# If no events are modified, an error message will be displayed here"
+
 enum filter_op_ids
 {
 	OP_OR,
@@ -646,7 +652,7 @@
 	if (filter && filter->filter_string)
 		trace_seq_printf(s, "%s\n", filter->filter_string);
 	else
-		trace_seq_printf(s, "none\n");
+		trace_seq_printf(s, DEFAULT_SYS_FILTER_MESSAGE "\n");
 	mutex_unlock(&event_mutex);
 }
 
@@ -1838,7 +1844,10 @@
 	if (!filter)
 		goto out;
 
-	replace_filter_string(filter, filter_string);
+	/* System filters just show a default message */
+	kfree(filter->filter_string);
+	filter->filter_string = NULL;
+
 	/*
 	 * No event actually uses the system filter
 	 * we can free it without synchronize_sched().
@@ -1848,14 +1857,12 @@
 
 	parse_init(ps, filter_ops, filter_string);
 	err = filter_parse(ps);
-	if (err) {
-		append_filter_err(ps, system->filter);
-		goto out;
-	}
+	if (err)
+		goto err_filter;
 
 	err = replace_system_preds(system, ps, filter_string);
 	if (err)
-		append_filter_err(ps, system->filter);
+		goto err_filter;
 
 out:
 	filter_opstack_clear(ps);
@@ -1865,6 +1872,11 @@
 	mutex_unlock(&event_mutex);
 
 	return err;
+
+err_filter:
+	replace_filter_string(filter, filter_string);
+	append_filter_err(ps, system->filter);
+	goto out;
 }
 
 #ifdef CONFIG_PERF_EVENTS
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 20dad0d..99d20e9 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -280,9 +280,20 @@
 }
 
 static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { }
-static void irqsoff_print_header(struct seq_file *s) { }
 static void irqsoff_trace_open(struct trace_iterator *iter) { }
 static void irqsoff_trace_close(struct trace_iterator *iter) { }
+
+#ifdef CONFIG_FUNCTION_TRACER
+static void irqsoff_print_header(struct seq_file *s)
+{
+	trace_default_header(s);
+}
+#else
+static void irqsoff_print_header(struct seq_file *s)
+{
+	trace_latency_header(s);
+}
+#endif /* CONFIG_FUNCTION_TRACER */
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 
 /*
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 5199930..0d6ff35 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -627,11 +627,23 @@
 	unsigned long usec_rem = do_div(t, USEC_PER_SEC);
 	unsigned long secs = (unsigned long)t;
 	char comm[TASK_COMM_LEN];
+	int ret;
 
 	trace_find_cmdline(entry->pid, comm);
 
-	return trace_seq_printf(s, "%16s-%-5d [%03d] %5lu.%06lu: ",
-				comm, entry->pid, iter->cpu, secs, usec_rem);
+	ret = trace_seq_printf(s, "%16s-%-5d [%03d] ",
+			       comm, entry->pid, iter->cpu);
+	if (!ret)
+		return 0;
+
+	if (trace_flags & TRACE_ITER_IRQ_INFO) {
+		ret = trace_print_lat_fmt(s, entry);
+		if (!ret)
+			return 0;
+	}
+
+	return trace_seq_printf(s, " %5lu.%06lu: ",
+				secs, usec_rem);
 }
 
 int trace_print_lat_context(struct trace_iterator *iter)
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index e4a70c0..ff791ea 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -280,9 +280,20 @@
 }
 
 static void wakeup_graph_return(struct ftrace_graph_ret *trace) { }
-static void wakeup_print_header(struct seq_file *s) { }
 static void wakeup_trace_open(struct trace_iterator *iter) { }
 static void wakeup_trace_close(struct trace_iterator *iter) { }
+
+#ifdef CONFIG_FUNCTION_TRACER
+static void wakeup_print_header(struct seq_file *s)
+{
+	trace_default_header(s);
+}
+#else
+static void wakeup_print_header(struct seq_file *s)
+{
+	trace_latency_header(s);
+}
+#endif /* CONFIG_FUNCTION_TRACER */
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 
 /*
diff --git a/kernel/tsacct.c b/kernel/tsacct.c
index 5bbfac8..23b4d78 100644
--- a/kernel/tsacct.c
+++ b/kernel/tsacct.c
@@ -127,7 +127,7 @@
 
 		local_irq_save(flags);
 		time = tsk->stime + tsk->utime;
-		dtime = cputime_sub(time, tsk->acct_timexpd);
+		dtime = time - tsk->acct_timexpd;
 		jiffies_to_timeval(cputime_to_jiffies(dtime), &value);
 		delta = value.tv_sec;
 		delta = delta * USEC_PER_SEC + value.tv_usec;
diff --git a/kernel/wait.c b/kernel/wait.c
index 26fa779..7fdd9ea 100644
--- a/kernel/wait.c
+++ b/kernel/wait.c
@@ -10,10 +10,10 @@
 #include <linux/wait.h>
 #include <linux/hash.h>
 
-void __init_waitqueue_head(wait_queue_head_t *q, struct lock_class_key *key)
+void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *key)
 {
 	spin_lock_init(&q->lock);
-	lockdep_set_class(&q->lock, key);
+	lockdep_set_class_and_name(&q->lock, key, name);
 	INIT_LIST_HEAD(&q->task_list);
 }
 
diff --git a/lib/Kconfig b/lib/Kconfig
index 32f3e5a..f34be64 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -244,6 +244,9 @@
 	bool
 	depends on SMP
 
+config DQL
+	bool
+
 #
 # Netlink attribute parsing support is select'ed if needed
 #
@@ -270,10 +273,9 @@
 	  If unsure, say N.
 
 config CORDIC
-	tristate "Cordic function"
+	tristate "CORDIC algorithm"
 	help
-	  The option provides arithmetic function using cordic algorithm
-	  so its calculations are in fixed point. Modules can select this
-	  when they require this function. Module will be called cordic.
+	  This option provides an implementation of the CORDIC algorithm;
+	  calculations are in fixed point. Module will be called cordic.
 
 endmenu
diff --git a/lib/Makefile b/lib/Makefile
index a4da283..c0ffaaf 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -17,7 +17,7 @@
 lib-$(CONFIG_MMU) += ioremap.o
 lib-$(CONFIG_SMP) += cpumask.o
 
-lib-y	+= kobject.o kref.o klist.o
+lib-y	+= kobject.o klist.o
 
 obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
 	 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
@@ -115,6 +115,8 @@
 
 obj-$(CONFIG_CORDIC) += cordic.o
 
+obj-$(CONFIG_DQL) += dynamic_queue_limits.o
+
 hostprogs-y	:= gen_crc32table
 clean-files	:= crc32table.h
 
diff --git a/lib/cordic.c b/lib/cordic.c
index aa27a88..6cf4778 100644
--- a/lib/cordic.c
+++ b/lib/cordic.c
@@ -96,6 +96,6 @@
 }
 EXPORT_SYMBOL(cordic_calc_iq);
 
-MODULE_DESCRIPTION("Cordic functions");
+MODULE_DESCRIPTION("CORDIC algorithm");
 MODULE_AUTHOR("Broadcom Corporation");
 MODULE_LICENSE("Dual BSD/GPL");
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index a78b7c6..77cb245 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -268,12 +268,16 @@
  * Try to repair the damage, so we have a better chance to get useful
  * debug output.
  */
-static void
+static int
 debug_object_fixup(int (*fixup)(void *addr, enum debug_obj_state state),
 		   void * addr, enum debug_obj_state state)
 {
+	int fixed = 0;
+
 	if (fixup)
-		debug_objects_fixups += fixup(addr, state);
+		fixed = fixup(addr, state);
+	debug_objects_fixups += fixed;
+	return fixed;
 }
 
 static void debug_object_is_on_stack(void *addr, int onstack)
@@ -386,6 +390,9 @@
 	struct debug_bucket *db;
 	struct debug_obj *obj;
 	unsigned long flags;
+	struct debug_obj o = { .object = addr,
+			       .state = ODEBUG_STATE_NOTAVAILABLE,
+			       .descr = descr };
 
 	if (!debug_objects_enabled)
 		return;
@@ -425,8 +432,9 @@
 	 * let the type specific code decide whether this is
 	 * true or not.
 	 */
-	debug_object_fixup(descr->fixup_activate, addr,
-			   ODEBUG_STATE_NOTAVAILABLE);
+	if (debug_object_fixup(descr->fixup_activate, addr,
+			   ODEBUG_STATE_NOTAVAILABLE))
+		debug_print_object(&o, "activate");
 }
 
 /**
@@ -563,6 +571,44 @@
 }
 
 /**
+ * debug_object_assert_init - debug checks when object should be init-ed
+ * @addr:	address of the object
+ * @descr:	pointer to an object specific debug description structure
+ */
+void debug_object_assert_init(void *addr, struct debug_obj_descr *descr)
+{
+	struct debug_bucket *db;
+	struct debug_obj *obj;
+	unsigned long flags;
+
+	if (!debug_objects_enabled)
+		return;
+
+	db = get_bucket((unsigned long) addr);
+
+	raw_spin_lock_irqsave(&db->lock, flags);
+
+	obj = lookup_object(addr, db);
+	if (!obj) {
+		struct debug_obj o = { .object = addr,
+				       .state = ODEBUG_STATE_NOTAVAILABLE,
+				       .descr = descr };
+
+		raw_spin_unlock_irqrestore(&db->lock, flags);
+		/*
+		 * Maybe the object is static.  Let the type specific
+		 * code decide what to do.
+		 */
+		if (debug_object_fixup(descr->fixup_assert_init, addr,
+				       ODEBUG_STATE_NOTAVAILABLE))
+			debug_print_object(&o, "assert_init");
+		return;
+	}
+
+	raw_spin_unlock_irqrestore(&db->lock, flags);
+}
+
+/**
  * debug_object_active_state - debug checks object usage state machine
  * @addr:	address of the object
  * @descr:	pointer to an object specific debug description structure
diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
index a7b80c1..31c5f76 100644
--- a/lib/decompress_bunzip2.c
+++ b/lib/decompress_bunzip2.c
@@ -1,4 +1,3 @@
-/* vi: set sw = 4 ts = 4: */
 /*	Small bzip2 deflate implementation, by Rob Landley (rob@landley.net).
 
 	Based on bzip2 decompression code by Julian R Seward (jseward@acm.org),
@@ -691,7 +690,7 @@
 		outbuf = malloc(BZIP2_IOBUF_SIZE);
 
 	if (!outbuf) {
-		error("Could not allocate output bufer");
+		error("Could not allocate output buffer");
 		return RETVAL_OUT_OF_MEMORY;
 	}
 	if (buf)
@@ -699,7 +698,7 @@
 	else
 		inbuf = malloc(BZIP2_IOBUF_SIZE);
 	if (!inbuf) {
-		error("Could not allocate input bufer");
+		error("Could not allocate input buffer");
 		i = RETVAL_OUT_OF_MEMORY;
 		goto exit_0;
 	}
diff --git a/lib/decompress_unlzma.c b/lib/decompress_unlzma.c
index 476c65a..32adb73 100644
--- a/lib/decompress_unlzma.c
+++ b/lib/decompress_unlzma.c
@@ -562,7 +562,7 @@
 	else
 		inbuf = malloc(LZMA_IOBUF_SIZE);
 	if (!inbuf) {
-		error("Could not allocate input bufer");
+		error("Could not allocate input buffer");
 		goto exit_0;
 	}
 
diff --git a/lib/devres.c b/lib/devres.c
index 7c0e953..4fbc09e 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -85,6 +85,57 @@
 }
 EXPORT_SYMBOL(devm_iounmap);
 
+/**
+ * devm_request_and_ioremap() - Check, request region, and ioremap resource
+ * @dev: Generic device to handle the resource for
+ * @res: resource to be handled
+ *
+ * Takes all necessary steps to ioremap a mem resource. Uses managed device, so
+ * everything is undone on driver detach. Checks arguments, so you can feed
+ * it the result from e.g. platform_get_resource() directly. Returns the
+ * remapped pointer or NULL on error. Usage example:
+ *
+ *	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ *	base = devm_request_and_ioremap(&pdev->dev, res);
+ *	if (!base)
+ *		return -EADDRNOTAVAIL;
+ */
+void __iomem *devm_request_and_ioremap(struct device *dev,
+			struct resource *res)
+{
+	resource_size_t size;
+	const char *name;
+	void __iomem *dest_ptr;
+
+	BUG_ON(!dev);
+
+	if (!res || resource_type(res) != IORESOURCE_MEM) {
+		dev_err(dev, "invalid resource\n");
+		return NULL;
+	}
+
+	size = resource_size(res);
+	name = res->name ?: dev_name(dev);
+
+	if (!devm_request_mem_region(dev, res->start, size, name)) {
+		dev_err(dev, "can't request region for resource %pR\n", res);
+		return NULL;
+	}
+
+	if (res->flags & IORESOURCE_CACHEABLE)
+		dest_ptr = devm_ioremap(dev, res->start, size);
+	else
+		dest_ptr = devm_ioremap_nocache(dev, res->start, size);
+
+	if (!dest_ptr) {
+		dev_err(dev, "ioremap failed for resource %pR\n", res);
+		devm_release_mem_region(dev, res->start, size);
+	}
+
+	return dest_ptr;
+}
+EXPORT_SYMBOL(devm_request_and_ioremap);
+
 #ifdef CONFIG_HAS_IOPORT
 /*
  * Generic iomap devres
@@ -348,5 +399,5 @@
 	}
 }
 EXPORT_SYMBOL(pcim_iounmap_regions);
-#endif
-#endif
+#endif /* CONFIG_PCI */
+#endif /* CONFIG_HAS_IOPORT */
diff --git a/lib/dynamic_queue_limits.c b/lib/dynamic_queue_limits.c
new file mode 100644
index 0000000..3d1bdcd
--- /dev/null
+++ b/lib/dynamic_queue_limits.c
@@ -0,0 +1,133 @@
+/*
+ * Dynamic byte queue limits.  See include/linux/dynamic_queue_limits.h
+ *
+ * Copyright (c) 2011, Tom Herbert <therbert@google.com>
+ */
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/ctype.h>
+#include <linux/kernel.h>
+#include <linux/dynamic_queue_limits.h>
+
+#define POSDIFF(A, B) ((A) > (B) ? (A) - (B) : 0)
+
+/* Records completed count and recalculates the queue limit */
+void dql_completed(struct dql *dql, unsigned int count)
+{
+	unsigned int inprogress, prev_inprogress, limit;
+	unsigned int ovlimit, all_prev_completed, completed;
+
+	/* Can't complete more than what's in queue */
+	BUG_ON(count > dql->num_queued - dql->num_completed);
+
+	completed = dql->num_completed + count;
+	limit = dql->limit;
+	ovlimit = POSDIFF(dql->num_queued - dql->num_completed, limit);
+	inprogress = dql->num_queued - completed;
+	prev_inprogress = dql->prev_num_queued - dql->num_completed;
+	all_prev_completed = POSDIFF(completed, dql->prev_num_queued);
+
+	if ((ovlimit && !inprogress) ||
+	    (dql->prev_ovlimit && all_prev_completed)) {
+		/*
+		 * Queue considered starved if:
+		 *   - The queue was over-limit in the last interval,
+		 *     and there is no more data in the queue.
+		 *  OR
+		 *   - The queue was over-limit in the previous interval and
+		 *     when enqueuing it was possible that all queued data
+		 *     had been consumed.  This covers the case when queue
+		 *     may have becomes starved between completion processing
+		 *     running and next time enqueue was scheduled.
+		 *
+		 *     When queue is starved increase the limit by the amount
+		 *     of bytes both sent and completed in the last interval,
+		 *     plus any previous over-limit.
+		 */
+		limit += POSDIFF(completed, dql->prev_num_queued) +
+		     dql->prev_ovlimit;
+		dql->slack_start_time = jiffies;
+		dql->lowest_slack = UINT_MAX;
+	} else if (inprogress && prev_inprogress && !all_prev_completed) {
+		/*
+		 * Queue was not starved, check if the limit can be decreased.
+		 * A decrease is only considered if the queue has been busy in
+		 * the whole interval (the check above).
+		 *
+		 * If there is slack, the amount of execess data queued above
+		 * the the amount needed to prevent starvation, the queue limit
+		 * can be decreased.  To avoid hysteresis we consider the
+		 * minimum amount of slack found over several iterations of the
+		 * completion routine.
+		 */
+		unsigned int slack, slack_last_objs;
+
+		/*
+		 * Slack is the maximum of
+		 *   - The queue limit plus previous over-limit minus twice
+		 *     the number of objects completed.  Note that two times
+		 *     number of completed bytes is a basis for an upper bound
+		 *     of the limit.
+		 *   - Portion of objects in the last queuing operation that
+		 *     was not part of non-zero previous over-limit.  That is
+		 *     "round down" by non-overlimit portion of the last
+		 *     queueing operation.
+		 */
+		slack = POSDIFF(limit + dql->prev_ovlimit,
+		    2 * (completed - dql->num_completed));
+		slack_last_objs = dql->prev_ovlimit ?
+		    POSDIFF(dql->prev_last_obj_cnt, dql->prev_ovlimit) : 0;
+
+		slack = max(slack, slack_last_objs);
+
+		if (slack < dql->lowest_slack)
+			dql->lowest_slack = slack;
+
+		if (time_after(jiffies,
+			       dql->slack_start_time + dql->slack_hold_time)) {
+			limit = POSDIFF(limit, dql->lowest_slack);
+			dql->slack_start_time = jiffies;
+			dql->lowest_slack = UINT_MAX;
+		}
+	}
+
+	/* Enforce bounds on limit */
+	limit = clamp(limit, dql->min_limit, dql->max_limit);
+
+	if (limit != dql->limit) {
+		dql->limit = limit;
+		ovlimit = 0;
+	}
+
+	dql->adj_limit = limit + completed;
+	dql->prev_ovlimit = ovlimit;
+	dql->prev_last_obj_cnt = dql->last_obj_cnt;
+	dql->num_completed = completed;
+	dql->prev_num_queued = dql->num_queued;
+}
+EXPORT_SYMBOL(dql_completed);
+
+void dql_reset(struct dql *dql)
+{
+	/* Reset all dynamic values */
+	dql->limit = 0;
+	dql->num_queued = 0;
+	dql->num_completed = 0;
+	dql->last_obj_cnt = 0;
+	dql->prev_num_queued = 0;
+	dql->prev_last_obj_cnt = 0;
+	dql->prev_ovlimit = 0;
+	dql->lowest_slack = UINT_MAX;
+	dql->slack_start_time = jiffies;
+}
+EXPORT_SYMBOL(dql_reset);
+
+int dql_init(struct dql *dql, unsigned hold_time)
+{
+	dql->max_limit = DQL_MAX_LIMIT;
+	dql->min_limit = 0;
+	dql->slack_hold_time = hold_time;
+	dql_reset(dql);
+	return 0;
+}
+EXPORT_SYMBOL(dql_init);
diff --git a/lib/fault-inject.c b/lib/fault-inject.c
index 4f75540..b4801f5 100644
--- a/lib/fault-inject.c
+++ b/lib/fault-inject.c
@@ -149,7 +149,7 @@
 
 DEFINE_SIMPLE_ATTRIBUTE(fops_ul, debugfs_ul_get, debugfs_ul_set, "%llu\n");
 
-static struct dentry *debugfs_create_ul(const char *name, mode_t mode,
+static struct dentry *debugfs_create_ul(const char *name, umode_t mode,
 				struct dentry *parent, unsigned long *value)
 {
 	return debugfs_create_file(name, mode, parent, value, &fops_ul);
@@ -169,7 +169,7 @@
 			debugfs_stacktrace_depth_set, "%llu\n");
 
 static struct dentry *debugfs_create_stacktrace_depth(
-	const char *name, mode_t mode,
+	const char *name, umode_t mode,
 	struct dentry *parent, unsigned long *value)
 {
 	return debugfs_create_file(name, mode, parent, value,
@@ -193,7 +193,7 @@
 DEFINE_SIMPLE_ATTRIBUTE(fops_atomic_t, debugfs_atomic_t_get,
 			debugfs_atomic_t_set, "%lld\n");
 
-static struct dentry *debugfs_create_atomic_t(const char *name, mode_t mode,
+static struct dentry *debugfs_create_atomic_t(const char *name, umode_t mode,
 				struct dentry *parent, atomic_t *value)
 {
 	return debugfs_create_file(name, mode, parent, value, &fops_atomic_t);
@@ -202,7 +202,7 @@
 struct dentry *fault_create_debugfs_attr(const char *name,
 			struct dentry *parent, struct fault_attr *attr)
 {
-	mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
+	umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
 	struct dentry *dir;
 
 	dir = debugfs_create_dir(name, parent);
diff --git a/lib/kobject.c b/lib/kobject.c
index 640bd98..c33d7a1 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -746,43 +746,11 @@
  */
 struct kobject *kset_find_obj(struct kset *kset, const char *name)
 {
-	return kset_find_obj_hinted(kset, name, NULL);
-}
-
-/**
- * kset_find_obj_hinted - search for object in kset given a predecessor hint.
- * @kset: kset we're looking in.
- * @name: object's name.
- * @hint: hint to possible object's predecessor.
- *
- * Check the hint's next object and if it is a match return it directly,
- * otherwise, fall back to the behavior of kset_find_obj().  Either way
- * a reference for the returned object is held and the reference on the
- * hinted object is released.
- */
-struct kobject *kset_find_obj_hinted(struct kset *kset, const char *name,
-				     struct kobject *hint)
-{
 	struct kobject *k;
 	struct kobject *ret = NULL;
 
 	spin_lock(&kset->list_lock);
 
-	if (!hint)
-		goto slow_search;
-
-	/* end of list detection */
-	if (hint->entry.next == kset->list.next)
-		goto slow_search;
-
-	k = container_of(hint->entry.next, struct kobject, entry);
-	if (!kobject_name(k) || strcmp(kobject_name(k), name))
-		goto slow_search;
-
-	ret = kobject_get(k);
-	goto unlock_exit;
-
-slow_search:
 	list_for_each_entry(k, &kset->list, entry) {
 		if (kobject_name(k) && !strcmp(kobject_name(k), name)) {
 			ret = kobject_get(k);
@@ -790,12 +758,7 @@
 		}
 	}
 
-unlock_exit:
 	spin_unlock(&kset->list_lock);
-
-	if (hint)
-		kobject_put(hint);
-
 	return ret;
 }
 
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index ad72a03..e66e9b6 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -259,6 +259,9 @@
 		struct sk_buff *skb;
 		size_t len;
 
+		if (!netlink_has_listeners(uevent_sock, 1))
+			continue;
+
 		/* allocate message with the maximum possible size */
 		len = strlen(action_string) + strlen(devpath) + 2;
 		skb = alloc_skb(len + env->buflen, GFP_KERNEL);
diff --git a/lib/kref.c b/lib/kref.c
deleted file mode 100644
index 3efb882..0000000
--- a/lib/kref.c
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * kref.c - library routines for handling generic reference counted objects
- *
- * Copyright (C) 2004 Greg Kroah-Hartman <greg@kroah.com>
- * Copyright (C) 2004 IBM Corp.
- *
- * based on lib/kobject.c which was:
- * Copyright (C) 2002-2003 Patrick Mochel <mochel@osdl.org>
- *
- * This file is released under the GPLv2.
- *
- */
-
-#include <linux/kref.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-
-/**
- * kref_init - initialize object.
- * @kref: object in question.
- */
-void kref_init(struct kref *kref)
-{
-	atomic_set(&kref->refcount, 1);
-	smp_mb();
-}
-
-/**
- * kref_get - increment refcount for object.
- * @kref: object.
- */
-void kref_get(struct kref *kref)
-{
-	WARN_ON(!atomic_read(&kref->refcount));
-	atomic_inc(&kref->refcount);
-	smp_mb__after_atomic_inc();
-}
-
-/**
- * kref_put - decrement refcount for object.
- * @kref: object.
- * @release: pointer to the function that will clean up the object when the
- *	     last reference to the object is released.
- *	     This pointer is required, and it is not acceptable to pass kfree
- *	     in as this function.
- *
- * Decrement the refcount, and if 0, call release().
- * Return 1 if the object was removed, otherwise return 0.  Beware, if this
- * function returns 0, you still can not count on the kref from remaining in
- * memory.  Only use the return value if you want to see if the kref is now
- * gone, not present.
- */
-int kref_put(struct kref *kref, void (*release)(struct kref *kref))
-{
-	WARN_ON(release == NULL);
-	WARN_ON(release == (void (*)(struct kref *))kfree);
-
-	if (atomic_dec_and_test(&kref->refcount)) {
-		release(kref);
-		return 1;
-	}
-	return 0;
-}
-
-
-/**
- * kref_sub - subtract a number of refcounts for object.
- * @kref: object.
- * @count: Number of recounts to subtract.
- * @release: pointer to the function that will clean up the object when the
- *	     last reference to the object is released.
- *	     This pointer is required, and it is not acceptable to pass kfree
- *	     in as this function.
- *
- * Subtract @count from the refcount, and if 0, call release().
- * Return 1 if the object was removed, otherwise return 0.  Beware, if this
- * function returns 0, you still can not count on the kref from remaining in
- * memory.  Only use the return value if you want to see if the kref is now
- * gone, not present.
- */
-int kref_sub(struct kref *kref, unsigned int count,
-	     void (*release)(struct kref *kref))
-{
-	WARN_ON(release == NULL);
-	WARN_ON(release == (void (*)(struct kref *))kfree);
-
-	if (atomic_sub_and_test((int) count, &kref->refcount)) {
-		release(kref);
-		return 1;
-	}
-	return 0;
-}
-
-EXPORT_SYMBOL(kref_init);
-EXPORT_SYMBOL(kref_get);
-EXPORT_SYMBOL(kref_put);
-EXPORT_SYMBOL(kref_sub);
diff --git a/lib/reciprocal_div.c b/lib/reciprocal_div.c
index 6a3bd48..75510e9 100644
--- a/lib/reciprocal_div.c
+++ b/lib/reciprocal_div.c
@@ -1,5 +1,6 @@
 #include <asm/div64.h>
 #include <linux/reciprocal_div.h>
+#include <linux/export.h>
 
 u32 reciprocal_value(u32 k)
 {
@@ -7,3 +8,4 @@
 	do_div(val, k);
 	return (u32)val;
 }
+EXPORT_SYMBOL(reciprocal_value);
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 993599e..8e75003 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -777,6 +777,18 @@
 	return string(buf, end, uuid, spec);
 }
 
+static
+char *netdev_feature_string(char *buf, char *end, const u8 *addr,
+		      struct printf_spec spec)
+{
+	spec.flags |= SPECIAL | SMALL | ZEROPAD;
+	if (spec.field_width == -1)
+		spec.field_width = 2 + 2 * sizeof(netdev_features_t);
+	spec.base = 16;
+
+	return number(buf, end, *(const netdev_features_t *)addr, spec);
+}
+
 int kptr_restrict __read_mostly;
 
 /*
@@ -824,6 +836,7 @@
  *       Do not use this feature without some mechanism to verify the
  *       correctness of the format string and va_list arguments.
  * - 'K' For a kernel pointer that should be hidden from unprivileged users
+ * - 'NF' For a netdev_features_t
  *
  * Note: The difference between 'S' and 'F' is that on ia64 and ppc64
  * function pointers are really function descriptors, which contain a
@@ -896,6 +909,12 @@
 		       has_capability_noaudit(current, CAP_SYSLOG))))
 			ptr = NULL;
 		break;
+	case 'N':
+		switch (fmt[1]) {
+		case 'F':
+			return netdev_feature_string(buf, end, ptr, spec);
+		}
+		break;
 	}
 	spec.flags |= SMALL;
 	if (spec.field_width == -1) {
diff --git a/mm/Kconfig b/mm/Kconfig
index 011b110..e338407 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -131,6 +131,12 @@
 config HAVE_MEMBLOCK
 	boolean
 
+config HAVE_MEMBLOCK_NODE_MAP
+	boolean
+
+config ARCH_DISCARD_MEMBLOCK
+	boolean
+
 config NO_BOOTMEM
 	boolean
 
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 71034f4..7ba8fea 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -600,14 +600,10 @@
 
 	/*
 	 * Finally, kill the kernel thread. We don't need to be RCU
-	 * safe anymore, since the bdi is gone from visibility. Force
-	 * unfreeze of the thread before calling kthread_stop(), otherwise
-	 * it would never exet if it is currently stuck in the refrigerator.
+	 * safe anymore, since the bdi is gone from visibility.
 	 */
-	if (bdi->wb.task) {
-		thaw_process(bdi->wb.task);
+	if (bdi->wb.task)
 		kthread_stop(bdi->wb.task);
-	}
 }
 
 /*
diff --git a/mm/compaction.c b/mm/compaction.c
index 899d956..1253d7a 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -721,23 +721,23 @@
 }
 
 #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
-ssize_t sysfs_compact_node(struct sys_device *dev,
-			struct sysdev_attribute *attr,
+ssize_t sysfs_compact_node(struct device *dev,
+			struct device_attribute *attr,
 			const char *buf, size_t count)
 {
 	compact_node(dev->id);
 
 	return count;
 }
-static SYSDEV_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node);
+static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node);
 
 int compaction_register_node(struct node *node)
 {
-	return sysdev_create_file(&node->sysdev, &attr_compact);
+	return device_create_file(&node->dev, &dev_attr_compact);
 }
 
 void compaction_unregister_node(struct node *node)
 {
-	return sysdev_remove_file(&node->sysdev, &attr_compact);
+	return device_remove_file(&node->dev, &dev_attr_compact);
 }
 #endif /* CONFIG_SYSFS && CONFIG_NUMA */
diff --git a/mm/failslab.c b/mm/failslab.c
index 0dd7b8f..fefaaba 100644
--- a/mm/failslab.c
+++ b/mm/failslab.c
@@ -35,7 +35,7 @@
 static int __init failslab_debugfs_init(void)
 {
 	struct dentry *dir;
-	mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
+	umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
 
 	dir = fault_create_debugfs_attr("failslab", NULL, &failslab.attr);
 	if (IS_ERR(dir))
diff --git a/mm/filemap.c b/mm/filemap.c
index c0018f2..a0701e6 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1828,7 +1828,7 @@
 		page = __page_cache_alloc(gfp | __GFP_COLD);
 		if (!page)
 			return ERR_PTR(-ENOMEM);
-		err = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL);
+		err = add_to_page_cache_lru(page, mapping, index, gfp);
 		if (unlikely(err)) {
 			page_cache_release(page);
 			if (err == -EEXIST)
@@ -1925,10 +1925,7 @@
  * @gfp:	the page allocator flags to use if allocating
  *
  * This is the same as "read_mapping_page(mapping, index, NULL)", but with
- * any new page allocations done using the specified allocation flags. Note
- * that the Radix tree operations will still use GFP_KERNEL, so you can't
- * expect to do this atomically or anything like that - but you can pass in
- * other page requirements.
+ * any new page allocations done using the specified allocation flags.
  *
  * If the page does not get brought uptodate, return -EIO.
  */
@@ -1971,7 +1968,7 @@
  */
 int should_remove_suid(struct dentry *dentry)
 {
-	mode_t mode = dentry->d_inode->i_mode;
+	umode_t mode = dentry->d_inode->i_mode;
 	int kill = 0;
 
 	/* suid always must be killed */
@@ -2407,7 +2404,6 @@
 						iov_iter_count(i));
 
 again:
-
 		/*
 		 * Bring in the user page that we will copy from _first_.
 		 * Otherwise there's a nasty deadlock on copying from the
@@ -2463,7 +2459,10 @@
 		written += copied;
 
 		balance_dirty_pages_ratelimited(mapping);
-
+		if (fatal_signal_pending(current)) {
+			status = -EINTR;
+			break;
+		}
 	} while (iov_iter_count(i));
 
 	return written ? written : status;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 73f17c0..7acd125 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -901,7 +901,6 @@
 	h->resv_huge_pages += delta;
 	ret = 0;
 
-	spin_unlock(&hugetlb_lock);
 	/* Free the needed pages to the hugetlb pool */
 	list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
 		if ((--needed) < 0)
@@ -915,6 +914,7 @@
 		VM_BUG_ON(page_count(page));
 		enqueue_huge_page(h, page);
 	}
+	spin_unlock(&hugetlb_lock);
 
 	/* Free unnecessary surplus pages to the buddy allocator */
 free:
@@ -1592,9 +1592,9 @@
 
 /*
  * node_hstate/s - associate per node hstate attributes, via their kobjects,
- * with node sysdevs in node_devices[] using a parallel array.  The array
- * index of a node sysdev or _hstate == node id.
- * This is here to avoid any static dependency of the node sysdev driver, in
+ * with node devices in node_devices[] using a parallel array.  The array
+ * index of a node device or _hstate == node id.
+ * This is here to avoid any static dependency of the node device driver, in
  * the base kernel, on the hugetlb module.
  */
 struct node_hstate {
@@ -1604,7 +1604,7 @@
 struct node_hstate node_hstates[MAX_NUMNODES];
 
 /*
- * A subset of global hstate attributes for node sysdevs
+ * A subset of global hstate attributes for node devices
  */
 static struct attribute *per_node_hstate_attrs[] = {
 	&nr_hugepages_attr.attr,
@@ -1618,7 +1618,7 @@
 };
 
 /*
- * kobj_to_node_hstate - lookup global hstate for node sysdev hstate attr kobj.
+ * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
  * Returns node id via non-NULL nidp.
  */
 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
@@ -1641,13 +1641,13 @@
 }
 
 /*
- * Unregister hstate attributes from a single node sysdev.
+ * Unregister hstate attributes from a single node device.
  * No-op if no hstate attributes attached.
  */
 void hugetlb_unregister_node(struct node *node)
 {
 	struct hstate *h;
-	struct node_hstate *nhs = &node_hstates[node->sysdev.id];
+	struct node_hstate *nhs = &node_hstates[node->dev.id];
 
 	if (!nhs->hugepages_kobj)
 		return;		/* no hstate attributes */
@@ -1663,7 +1663,7 @@
 }
 
 /*
- * hugetlb module exit:  unregister hstate attributes from node sysdevs
+ * hugetlb module exit:  unregister hstate attributes from node devices
  * that have them.
  */
 static void hugetlb_unregister_all_nodes(void)
@@ -1671,7 +1671,7 @@
 	int nid;
 
 	/*
-	 * disable node sysdev registrations.
+	 * disable node device registrations.
 	 */
 	register_hugetlbfs_with_node(NULL, NULL);
 
@@ -1683,20 +1683,20 @@
 }
 
 /*
- * Register hstate attributes for a single node sysdev.
+ * Register hstate attributes for a single node device.
  * No-op if attributes already registered.
  */
 void hugetlb_register_node(struct node *node)
 {
 	struct hstate *h;
-	struct node_hstate *nhs = &node_hstates[node->sysdev.id];
+	struct node_hstate *nhs = &node_hstates[node->dev.id];
 	int err;
 
 	if (nhs->hugepages_kobj)
 		return;		/* already allocated */
 
 	nhs->hugepages_kobj = kobject_create_and_add("hugepages",
-							&node->sysdev.kobj);
+							&node->dev.kobj);
 	if (!nhs->hugepages_kobj)
 		return;
 
@@ -1707,7 +1707,7 @@
 		if (err) {
 			printk(KERN_ERR "Hugetlb: Unable to add hstate %s"
 					" for node %d\n",
-						h->name, node->sysdev.id);
+						h->name, node->dev.id);
 			hugetlb_unregister_node(node);
 			break;
 		}
@@ -1716,8 +1716,8 @@
 
 /*
  * hugetlb init time:  register hstate attributes for all registered node
- * sysdevs of nodes that have memory.  All on-line nodes should have
- * registered their associated sysdev by this time.
+ * devices of nodes that have memory.  All on-line nodes should have
+ * registered their associated device by this time.
  */
 static void hugetlb_register_all_nodes(void)
 {
@@ -1725,12 +1725,12 @@
 
 	for_each_node_state(nid, N_HIGH_MEMORY) {
 		struct node *node = &node_devices[nid];
-		if (node->sysdev.id == nid)
+		if (node->dev.id == nid)
 			hugetlb_register_node(node);
 	}
 
 	/*
-	 * Let the node sysdev driver know we're here so it can
+	 * Let the node device driver know we're here so it can
 	 * [un]register hstate attributes on node hotplug.
 	 */
 	register_hugetlbfs_with_node(hugetlb_register_node,
diff --git a/mm/memblock.c b/mm/memblock.c
index 84bec49..2f55f19 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -20,12 +20,23 @@
 #include <linux/seq_file.h>
 #include <linux/memblock.h>
 
-struct memblock memblock __initdata_memblock;
+static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
+static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
+
+struct memblock memblock __initdata_memblock = {
+	.memory.regions		= memblock_memory_init_regions,
+	.memory.cnt		= 1,	/* empty dummy entry */
+	.memory.max		= INIT_MEMBLOCK_REGIONS,
+
+	.reserved.regions	= memblock_reserved_init_regions,
+	.reserved.cnt		= 1,	/* empty dummy entry */
+	.reserved.max		= INIT_MEMBLOCK_REGIONS,
+
+	.current_limit		= MEMBLOCK_ALLOC_ANYWHERE,
+};
 
 int memblock_debug __initdata_memblock;
-int memblock_can_resize __initdata_memblock;
-static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS + 1] __initdata_memblock;
-static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS + 1] __initdata_memblock;
+static int memblock_can_resize __initdata_memblock;
 
 /* inline so we don't get a warning when pr_debug is compiled out */
 static inline const char *memblock_type_name(struct memblock_type *type)
@@ -38,20 +49,15 @@
 		return "unknown";
 }
 
+/* adjust *@size so that (@base + *@size) doesn't overflow, return new size */
+static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size)
+{
+	return *size = min(*size, (phys_addr_t)ULLONG_MAX - base);
+}
+
 /*
  * Address comparison utilities
  */
-
-static phys_addr_t __init_memblock memblock_align_down(phys_addr_t addr, phys_addr_t size)
-{
-	return addr & ~(size - 1);
-}
-
-static phys_addr_t __init_memblock memblock_align_up(phys_addr_t addr, phys_addr_t size)
-{
-	return (addr + (size - 1)) & ~(size - 1);
-}
-
 static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
 				       phys_addr_t base2, phys_addr_t size2)
 {
@@ -73,83 +79,66 @@
 	return (i < type->cnt) ? i : -1;
 }
 
-/*
- * Find, allocate, deallocate or reserve unreserved regions. All allocations
- * are top-down.
+/**
+ * memblock_find_in_range_node - find free area in given range and node
+ * @start: start of candidate range
+ * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
+ * @size: size of free area to find
+ * @align: alignment of free area to find
+ * @nid: nid of the free area to find, %MAX_NUMNODES for any node
+ *
+ * Find @size free area aligned to @align in the specified range and node.
+ *
+ * RETURNS:
+ * Found address on success, %0 on failure.
  */
-
-static phys_addr_t __init_memblock memblock_find_region(phys_addr_t start, phys_addr_t end,
-					  phys_addr_t size, phys_addr_t align)
+phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t start,
+					phys_addr_t end, phys_addr_t size,
+					phys_addr_t align, int nid)
 {
-	phys_addr_t base, res_base;
-	long j;
+	phys_addr_t this_start, this_end, cand;
+	u64 i;
 
-	/* In case, huge size is requested */
-	if (end < size)
-		return MEMBLOCK_ERROR;
+	/* align @size to avoid excessive fragmentation on reserved array */
+	size = round_up(size, align);
 
-	base = memblock_align_down((end - size), align);
-
-	/* Prevent allocations returning 0 as it's also used to
-	 * indicate an allocation failure
-	 */
-	if (start == 0)
-		start = PAGE_SIZE;
-
-	while (start <= base) {
-		j = memblock_overlaps_region(&memblock.reserved, base, size);
-		if (j < 0)
-			return base;
-		res_base = memblock.reserved.regions[j].base;
-		if (res_base < size)
-			break;
-		base = memblock_align_down(res_base - size, align);
-	}
-
-	return MEMBLOCK_ERROR;
-}
-
-static phys_addr_t __init_memblock memblock_find_base(phys_addr_t size,
-			phys_addr_t align, phys_addr_t start, phys_addr_t end)
-{
-	long i;
-
-	BUG_ON(0 == size);
-
-	/* Pump up max_addr */
+	/* pump up @end */
 	if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
 		end = memblock.current_limit;
 
-	/* We do a top-down search, this tends to limit memory
-	 * fragmentation by keeping early boot allocs near the
-	 * top of memory
-	 */
-	for (i = memblock.memory.cnt - 1; i >= 0; i--) {
-		phys_addr_t memblockbase = memblock.memory.regions[i].base;
-		phys_addr_t memblocksize = memblock.memory.regions[i].size;
-		phys_addr_t bottom, top, found;
+	/* adjust @start to avoid underflow and allocating the first page */
+	start = max3(start, size, (phys_addr_t)PAGE_SIZE);
+	end = max(start, end);
 
-		if (memblocksize < size)
-			continue;
-		if ((memblockbase + memblocksize) <= start)
-			break;
-		bottom = max(memblockbase, start);
-		top = min(memblockbase + memblocksize, end);
-		if (bottom >= top)
-			continue;
-		found = memblock_find_region(bottom, top, size, align);
-		if (found != MEMBLOCK_ERROR)
-			return found;
+	for_each_free_mem_range_reverse(i, nid, &this_start, &this_end, NULL) {
+		this_start = clamp(this_start, start, end);
+		this_end = clamp(this_end, start, end);
+
+		cand = round_down(this_end - size, align);
+		if (cand >= this_start)
+			return cand;
 	}
-	return MEMBLOCK_ERROR;
+	return 0;
 }
 
-/*
- * Find a free area with specified alignment in a specific range.
+/**
+ * memblock_find_in_range - find free area in given range
+ * @start: start of candidate range
+ * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
+ * @size: size of free area to find
+ * @align: alignment of free area to find
+ *
+ * Find @size free area aligned to @align in the specified range.
+ *
+ * RETURNS:
+ * Found address on success, %0 on failure.
  */
-u64 __init_memblock memblock_find_in_range(u64 start, u64 end, u64 size, u64 align)
+phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
+					phys_addr_t end, phys_addr_t size,
+					phys_addr_t align)
 {
-	return memblock_find_base(size, align, start, end);
+	return memblock_find_in_range_node(start, end, size, align,
+					   MAX_NUMNODES);
 }
 
 /*
@@ -178,25 +167,21 @@
 
 static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
 {
-	unsigned long i;
-
-	for (i = r; i < type->cnt - 1; i++) {
-		type->regions[i].base = type->regions[i + 1].base;
-		type->regions[i].size = type->regions[i + 1].size;
-	}
+	type->total_size -= type->regions[r].size;
+	memmove(&type->regions[r], &type->regions[r + 1],
+		(type->cnt - (r + 1)) * sizeof(type->regions[r]));
 	type->cnt--;
 
 	/* Special case for empty arrays */
 	if (type->cnt == 0) {
+		WARN_ON(type->total_size != 0);
 		type->cnt = 1;
 		type->regions[0].base = 0;
 		type->regions[0].size = 0;
+		memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
 	}
 }
 
-/* Defined below but needed now */
-static long memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size);
-
 static int __init_memblock memblock_double_array(struct memblock_type *type)
 {
 	struct memblock_region *new_array, *old_array;
@@ -226,10 +211,10 @@
 	 */
 	if (use_slab) {
 		new_array = kmalloc(new_size, GFP_KERNEL);
-		addr = new_array == NULL ? MEMBLOCK_ERROR : __pa(new_array);
+		addr = new_array ? __pa(new_array) : 0;
 	} else
-		addr = memblock_find_base(new_size, sizeof(phys_addr_t), 0, MEMBLOCK_ALLOC_ACCESSIBLE);
-	if (addr == MEMBLOCK_ERROR) {
+		addr = memblock_find_in_range(0, MEMBLOCK_ALLOC_ACCESSIBLE, new_size, sizeof(phys_addr_t));
+	if (!addr) {
 		pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
 		       memblock_type_name(type), type->max, type->max * 2);
 		return -1;
@@ -254,7 +239,7 @@
 		return 0;
 
 	/* Add the new reserved region now. Should not fail ! */
-	BUG_ON(memblock_add_region(&memblock.reserved, addr, new_size));
+	BUG_ON(memblock_reserve(addr, new_size));
 
 	/* If the array wasn't our static init one, then free it. We only do
 	 * that before SLAB is available as later on, we don't know whether
@@ -268,233 +253,496 @@
 	return 0;
 }
 
-int __init_memblock __weak memblock_memory_can_coalesce(phys_addr_t addr1, phys_addr_t size1,
-					  phys_addr_t addr2, phys_addr_t size2)
+/**
+ * memblock_merge_regions - merge neighboring compatible regions
+ * @type: memblock type to scan
+ *
+ * Scan @type and merge neighboring compatible regions.
+ */
+static void __init_memblock memblock_merge_regions(struct memblock_type *type)
 {
-	return 1;
-}
+	int i = 0;
 
-static long __init_memblock memblock_add_region(struct memblock_type *type,
-						phys_addr_t base, phys_addr_t size)
-{
-	phys_addr_t end = base + size;
-	int i, slot = -1;
+	/* cnt never goes below 1 */
+	while (i < type->cnt - 1) {
+		struct memblock_region *this = &type->regions[i];
+		struct memblock_region *next = &type->regions[i + 1];
 
-	/* First try and coalesce this MEMBLOCK with others */
-	for (i = 0; i < type->cnt; i++) {
-		struct memblock_region *rgn = &type->regions[i];
-		phys_addr_t rend = rgn->base + rgn->size;
-
-		/* Exit if there's no possible hits */
-		if (rgn->base > end || rgn->size == 0)
-			break;
-
-		/* Check if we are fully enclosed within an existing
-		 * block
-		 */
-		if (rgn->base <= base && rend >= end)
-			return 0;
-
-		/* Check if we overlap or are adjacent with the bottom
-		 * of a block.
-		 */
-		if (base < rgn->base && end >= rgn->base) {
-			/* If we can't coalesce, create a new block */
-			if (!memblock_memory_can_coalesce(base, size,
-							  rgn->base,
-							  rgn->size)) {
-				/* Overlap & can't coalesce are mutually
-				 * exclusive, if you do that, be prepared
-				 * for trouble
-				 */
-				WARN_ON(end != rgn->base);
-				goto new_block;
-			}
-			/* We extend the bottom of the block down to our
-			 * base
-			 */
-			rgn->base = base;
-			rgn->size = rend - base;
-
-			/* Return if we have nothing else to allocate
-			 * (fully coalesced)
-			 */
-			if (rend >= end)
-				return 0;
-
-			/* We continue processing from the end of the
-			 * coalesced block.
-			 */
-			base = rend;
-			size = end - base;
-		}
-
-		/* Now check if we overlap or are adjacent with the
-		 * top of a block
-		 */
-		if (base <= rend && end >= rend) {
-			/* If we can't coalesce, create a new block */
-			if (!memblock_memory_can_coalesce(rgn->base,
-							  rgn->size,
-							  base, size)) {
-				/* Overlap & can't coalesce are mutually
-				 * exclusive, if you do that, be prepared
-				 * for trouble
-				 */
-				WARN_ON(rend != base);
-				goto new_block;
-			}
-			/* We adjust our base down to enclose the
-			 * original block and destroy it. It will be
-			 * part of our new allocation. Since we've
-			 * freed an entry, we know we won't fail
-			 * to allocate one later, so we won't risk
-			 * losing the original block allocation.
-			 */
-			size += (base - rgn->base);
-			base = rgn->base;
-			memblock_remove_region(type, i--);
-		}
-	}
-
-	/* If the array is empty, special case, replace the fake
-	 * filler region and return
-	 */
-	if ((type->cnt == 1) && (type->regions[0].size == 0)) {
-		type->regions[0].base = base;
-		type->regions[0].size = size;
-		return 0;
-	}
-
- new_block:
-	/* If we are out of space, we fail. It's too late to resize the array
-	 * but then this shouldn't have happened in the first place.
-	 */
-	if (WARN_ON(type->cnt >= type->max))
-		return -1;
-
-	/* Couldn't coalesce the MEMBLOCK, so add it to the sorted table. */
-	for (i = type->cnt - 1; i >= 0; i--) {
-		if (base < type->regions[i].base) {
-			type->regions[i+1].base = type->regions[i].base;
-			type->regions[i+1].size = type->regions[i].size;
-		} else {
-			type->regions[i+1].base = base;
-			type->regions[i+1].size = size;
-			slot = i + 1;
-			break;
-		}
-	}
-	if (base < type->regions[0].base) {
-		type->regions[0].base = base;
-		type->regions[0].size = size;
-		slot = 0;
-	}
-	type->cnt++;
-
-	/* The array is full ? Try to resize it. If that fails, we undo
-	 * our allocation and return an error
-	 */
-	if (type->cnt == type->max && memblock_double_array(type)) {
-		BUG_ON(slot < 0);
-		memblock_remove_region(type, slot);
-		return -1;
-	}
-
-	return 0;
-}
-
-long __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
-{
-	return memblock_add_region(&memblock.memory, base, size);
-
-}
-
-static long __init_memblock __memblock_remove(struct memblock_type *type,
-					      phys_addr_t base, phys_addr_t size)
-{
-	phys_addr_t end = base + size;
-	int i;
-
-	/* Walk through the array for collisions */
-	for (i = 0; i < type->cnt; i++) {
-		struct memblock_region *rgn = &type->regions[i];
-		phys_addr_t rend = rgn->base + rgn->size;
-
-		/* Nothing more to do, exit */
-		if (rgn->base > end || rgn->size == 0)
-			break;
-
-		/* If we fully enclose the block, drop it */
-		if (base <= rgn->base && end >= rend) {
-			memblock_remove_region(type, i--);
+		if (this->base + this->size != next->base ||
+		    memblock_get_region_node(this) !=
+		    memblock_get_region_node(next)) {
+			BUG_ON(this->base + this->size > next->base);
+			i++;
 			continue;
 		}
 
-		/* If we are fully enclosed within a block
-		 * then we need to split it and we are done
-		 */
-		if (base > rgn->base && end < rend) {
-			rgn->size = base - rgn->base;
-			if (!memblock_add_region(type, end, rend - end))
-				return 0;
-			/* Failure to split is bad, we at least
-			 * restore the block before erroring
-			 */
-			rgn->size = rend - rgn->base;
-			WARN_ON(1);
-			return -1;
-		}
-
-		/* Check if we need to trim the bottom of a block */
-		if (rgn->base < end && rend > end) {
-			rgn->size -= end - rgn->base;
-			rgn->base = end;
-			break;
-		}
-
-		/* And check if we need to trim the top of a block */
-		if (base < rend)
-			rgn->size -= rend - base;
-
+		this->size += next->size;
+		memmove(next, next + 1, (type->cnt - (i + 1)) * sizeof(*next));
+		type->cnt--;
 	}
+}
+
+/**
+ * memblock_insert_region - insert new memblock region
+ * @type: memblock type to insert into
+ * @idx: index for the insertion point
+ * @base: base address of the new region
+ * @size: size of the new region
+ *
+ * Insert new memblock region [@base,@base+@size) into @type at @idx.
+ * @type must already have extra room to accomodate the new region.
+ */
+static void __init_memblock memblock_insert_region(struct memblock_type *type,
+						   int idx, phys_addr_t base,
+						   phys_addr_t size, int nid)
+{
+	struct memblock_region *rgn = &type->regions[idx];
+
+	BUG_ON(type->cnt >= type->max);
+	memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn));
+	rgn->base = base;
+	rgn->size = size;
+	memblock_set_region_node(rgn, nid);
+	type->cnt++;
+	type->total_size += size;
+}
+
+/**
+ * memblock_add_region - add new memblock region
+ * @type: memblock type to add new region into
+ * @base: base address of the new region
+ * @size: size of the new region
+ * @nid: nid of the new region
+ *
+ * Add new memblock region [@base,@base+@size) into @type.  The new region
+ * is allowed to overlap with existing ones - overlaps don't affect already
+ * existing regions.  @type is guaranteed to be minimal (all neighbouring
+ * compatible regions are merged) after the addition.
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+static int __init_memblock memblock_add_region(struct memblock_type *type,
+				phys_addr_t base, phys_addr_t size, int nid)
+{
+	bool insert = false;
+	phys_addr_t obase = base;
+	phys_addr_t end = base + memblock_cap_size(base, &size);
+	int i, nr_new;
+
+	/* special case for empty array */
+	if (type->regions[0].size == 0) {
+		WARN_ON(type->cnt != 1 || type->total_size);
+		type->regions[0].base = base;
+		type->regions[0].size = size;
+		memblock_set_region_node(&type->regions[0], nid);
+		type->total_size = size;
+		return 0;
+	}
+repeat:
+	/*
+	 * The following is executed twice.  Once with %false @insert and
+	 * then with %true.  The first counts the number of regions needed
+	 * to accomodate the new area.  The second actually inserts them.
+	 */
+	base = obase;
+	nr_new = 0;
+
+	for (i = 0; i < type->cnt; i++) {
+		struct memblock_region *rgn = &type->regions[i];
+		phys_addr_t rbase = rgn->base;
+		phys_addr_t rend = rbase + rgn->size;
+
+		if (rbase >= end)
+			break;
+		if (rend <= base)
+			continue;
+		/*
+		 * @rgn overlaps.  If it separates the lower part of new
+		 * area, insert that portion.
+		 */
+		if (rbase > base) {
+			nr_new++;
+			if (insert)
+				memblock_insert_region(type, i++, base,
+						       rbase - base, nid);
+		}
+		/* area below @rend is dealt with, forget about it */
+		base = min(rend, end);
+	}
+
+	/* insert the remaining portion */
+	if (base < end) {
+		nr_new++;
+		if (insert)
+			memblock_insert_region(type, i, base, end - base, nid);
+	}
+
+	/*
+	 * If this was the first round, resize array and repeat for actual
+	 * insertions; otherwise, merge and return.
+	 */
+	if (!insert) {
+		while (type->cnt + nr_new > type->max)
+			if (memblock_double_array(type) < 0)
+				return -ENOMEM;
+		insert = true;
+		goto repeat;
+	} else {
+		memblock_merge_regions(type);
+		return 0;
+	}
+}
+
+int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size,
+				       int nid)
+{
+	return memblock_add_region(&memblock.memory, base, size, nid);
+}
+
+int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
+{
+	return memblock_add_region(&memblock.memory, base, size, MAX_NUMNODES);
+}
+
+/**
+ * memblock_isolate_range - isolate given range into disjoint memblocks
+ * @type: memblock type to isolate range for
+ * @base: base of range to isolate
+ * @size: size of range to isolate
+ * @start_rgn: out parameter for the start of isolated region
+ * @end_rgn: out parameter for the end of isolated region
+ *
+ * Walk @type and ensure that regions don't cross the boundaries defined by
+ * [@base,@base+@size).  Crossing regions are split at the boundaries,
+ * which may create at most two more regions.  The index of the first
+ * region inside the range is returned in *@start_rgn and end in *@end_rgn.
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+static int __init_memblock memblock_isolate_range(struct memblock_type *type,
+					phys_addr_t base, phys_addr_t size,
+					int *start_rgn, int *end_rgn)
+{
+	phys_addr_t end = base + memblock_cap_size(base, &size);
+	int i;
+
+	*start_rgn = *end_rgn = 0;
+
+	/* we'll create at most two more regions */
+	while (type->cnt + 2 > type->max)
+		if (memblock_double_array(type) < 0)
+			return -ENOMEM;
+
+	for (i = 0; i < type->cnt; i++) {
+		struct memblock_region *rgn = &type->regions[i];
+		phys_addr_t rbase = rgn->base;
+		phys_addr_t rend = rbase + rgn->size;
+
+		if (rbase >= end)
+			break;
+		if (rend <= base)
+			continue;
+
+		if (rbase < base) {
+			/*
+			 * @rgn intersects from below.  Split and continue
+			 * to process the next region - the new top half.
+			 */
+			rgn->base = base;
+			rgn->size -= base - rbase;
+			type->total_size -= base - rbase;
+			memblock_insert_region(type, i, rbase, base - rbase,
+					       memblock_get_region_node(rgn));
+		} else if (rend > end) {
+			/*
+			 * @rgn intersects from above.  Split and redo the
+			 * current region - the new bottom half.
+			 */
+			rgn->base = end;
+			rgn->size -= end - rbase;
+			type->total_size -= end - rbase;
+			memblock_insert_region(type, i--, rbase, end - rbase,
+					       memblock_get_region_node(rgn));
+		} else {
+			/* @rgn is fully contained, record it */
+			if (!*end_rgn)
+				*start_rgn = i;
+			*end_rgn = i + 1;
+		}
+	}
+
 	return 0;
 }
 
-long __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
+static int __init_memblock __memblock_remove(struct memblock_type *type,
+					     phys_addr_t base, phys_addr_t size)
+{
+	int start_rgn, end_rgn;
+	int i, ret;
+
+	ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
+	if (ret)
+		return ret;
+
+	for (i = end_rgn - 1; i >= start_rgn; i--)
+		memblock_remove_region(type, i);
+	return 0;
+}
+
+int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
 {
 	return __memblock_remove(&memblock.memory, base, size);
 }
 
-long __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
+int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
 {
+	memblock_dbg("   memblock_free: [%#016llx-%#016llx] %pF\n",
+		     (unsigned long long)base,
+		     (unsigned long long)base + size,
+		     (void *)_RET_IP_);
+
 	return __memblock_remove(&memblock.reserved, base, size);
 }
 
-long __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
+int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
 {
 	struct memblock_type *_rgn = &memblock.reserved;
 
+	memblock_dbg("memblock_reserve: [%#016llx-%#016llx] %pF\n",
+		     (unsigned long long)base,
+		     (unsigned long long)base + size,
+		     (void *)_RET_IP_);
 	BUG_ON(0 == size);
 
-	return memblock_add_region(_rgn, base, size);
+	return memblock_add_region(_rgn, base, size, MAX_NUMNODES);
+}
+
+/**
+ * __next_free_mem_range - next function for for_each_free_mem_range()
+ * @idx: pointer to u64 loop variable
+ * @nid: nid: node selector, %MAX_NUMNODES for all nodes
+ * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
+ * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
+ * @p_nid: ptr to int for nid of the range, can be %NULL
+ *
+ * Find the first free area from *@idx which matches @nid, fill the out
+ * parameters, and update *@idx for the next iteration.  The lower 32bit of
+ * *@idx contains index into memory region and the upper 32bit indexes the
+ * areas before each reserved region.  For example, if reserved regions
+ * look like the following,
+ *
+ *	0:[0-16), 1:[32-48), 2:[128-130)
+ *
+ * The upper 32bit indexes the following regions.
+ *
+ *	0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX)
+ *
+ * As both region arrays are sorted, the function advances the two indices
+ * in lockstep and returns each intersection.
+ */
+void __init_memblock __next_free_mem_range(u64 *idx, int nid,
+					   phys_addr_t *out_start,
+					   phys_addr_t *out_end, int *out_nid)
+{
+	struct memblock_type *mem = &memblock.memory;
+	struct memblock_type *rsv = &memblock.reserved;
+	int mi = *idx & 0xffffffff;
+	int ri = *idx >> 32;
+
+	for ( ; mi < mem->cnt; mi++) {
+		struct memblock_region *m = &mem->regions[mi];
+		phys_addr_t m_start = m->base;
+		phys_addr_t m_end = m->base + m->size;
+
+		/* only memory regions are associated with nodes, check it */
+		if (nid != MAX_NUMNODES && nid != memblock_get_region_node(m))
+			continue;
+
+		/* scan areas before each reservation for intersection */
+		for ( ; ri < rsv->cnt + 1; ri++) {
+			struct memblock_region *r = &rsv->regions[ri];
+			phys_addr_t r_start = ri ? r[-1].base + r[-1].size : 0;
+			phys_addr_t r_end = ri < rsv->cnt ? r->base : ULLONG_MAX;
+
+			/* if ri advanced past mi, break out to advance mi */
+			if (r_start >= m_end)
+				break;
+			/* if the two regions intersect, we're done */
+			if (m_start < r_end) {
+				if (out_start)
+					*out_start = max(m_start, r_start);
+				if (out_end)
+					*out_end = min(m_end, r_end);
+				if (out_nid)
+					*out_nid = memblock_get_region_node(m);
+				/*
+				 * The region which ends first is advanced
+				 * for the next iteration.
+				 */
+				if (m_end <= r_end)
+					mi++;
+				else
+					ri++;
+				*idx = (u32)mi | (u64)ri << 32;
+				return;
+			}
+		}
+	}
+
+	/* signal end of iteration */
+	*idx = ULLONG_MAX;
+}
+
+/**
+ * __next_free_mem_range_rev - next function for for_each_free_mem_range_reverse()
+ * @idx: pointer to u64 loop variable
+ * @nid: nid: node selector, %MAX_NUMNODES for all nodes
+ * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
+ * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
+ * @p_nid: ptr to int for nid of the range, can be %NULL
+ *
+ * Reverse of __next_free_mem_range().
+ */
+void __init_memblock __next_free_mem_range_rev(u64 *idx, int nid,
+					   phys_addr_t *out_start,
+					   phys_addr_t *out_end, int *out_nid)
+{
+	struct memblock_type *mem = &memblock.memory;
+	struct memblock_type *rsv = &memblock.reserved;
+	int mi = *idx & 0xffffffff;
+	int ri = *idx >> 32;
+
+	if (*idx == (u64)ULLONG_MAX) {
+		mi = mem->cnt - 1;
+		ri = rsv->cnt;
+	}
+
+	for ( ; mi >= 0; mi--) {
+		struct memblock_region *m = &mem->regions[mi];
+		phys_addr_t m_start = m->base;
+		phys_addr_t m_end = m->base + m->size;
+
+		/* only memory regions are associated with nodes, check it */
+		if (nid != MAX_NUMNODES && nid != memblock_get_region_node(m))
+			continue;
+
+		/* scan areas before each reservation for intersection */
+		for ( ; ri >= 0; ri--) {
+			struct memblock_region *r = &rsv->regions[ri];
+			phys_addr_t r_start = ri ? r[-1].base + r[-1].size : 0;
+			phys_addr_t r_end = ri < rsv->cnt ? r->base : ULLONG_MAX;
+
+			/* if ri advanced past mi, break out to advance mi */
+			if (r_end <= m_start)
+				break;
+			/* if the two regions intersect, we're done */
+			if (m_end > r_start) {
+				if (out_start)
+					*out_start = max(m_start, r_start);
+				if (out_end)
+					*out_end = min(m_end, r_end);
+				if (out_nid)
+					*out_nid = memblock_get_region_node(m);
+
+				if (m_start >= r_start)
+					mi--;
+				else
+					ri--;
+				*idx = (u32)mi | (u64)ri << 32;
+				return;
+			}
+		}
+	}
+
+	*idx = ULLONG_MAX;
+}
+
+#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
+/*
+ * Common iterator interface used to define for_each_mem_range().
+ */
+void __init_memblock __next_mem_pfn_range(int *idx, int nid,
+				unsigned long *out_start_pfn,
+				unsigned long *out_end_pfn, int *out_nid)
+{
+	struct memblock_type *type = &memblock.memory;
+	struct memblock_region *r;
+
+	while (++*idx < type->cnt) {
+		r = &type->regions[*idx];
+
+		if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
+			continue;
+		if (nid == MAX_NUMNODES || nid == r->nid)
+			break;
+	}
+	if (*idx >= type->cnt) {
+		*idx = -1;
+		return;
+	}
+
+	if (out_start_pfn)
+		*out_start_pfn = PFN_UP(r->base);
+	if (out_end_pfn)
+		*out_end_pfn = PFN_DOWN(r->base + r->size);
+	if (out_nid)
+		*out_nid = r->nid;
+}
+
+/**
+ * memblock_set_node - set node ID on memblock regions
+ * @base: base of area to set node ID for
+ * @size: size of area to set node ID for
+ * @nid: node ID to set
+ *
+ * Set the nid of memblock memory regions in [@base,@base+@size) to @nid.
+ * Regions which cross the area boundaries are split as necessary.
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
+				      int nid)
+{
+	struct memblock_type *type = &memblock.memory;
+	int start_rgn, end_rgn;
+	int i, ret;
+
+	ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
+	if (ret)
+		return ret;
+
+	for (i = start_rgn; i < end_rgn; i++)
+		type->regions[i].nid = nid;
+
+	memblock_merge_regions(type);
+	return 0;
+}
+#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
+
+static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size,
+					phys_addr_t align, phys_addr_t max_addr,
+					int nid)
+{
+	phys_addr_t found;
+
+	found = memblock_find_in_range_node(0, max_addr, size, align, nid);
+	if (found && !memblock_reserve(found, size))
+		return found;
+
+	return 0;
+}
+
+phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
+{
+	return memblock_alloc_base_nid(size, align, MEMBLOCK_ALLOC_ACCESSIBLE, nid);
 }
 
 phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
 {
-	phys_addr_t found;
-
-	/* We align the size to limit fragmentation. Without this, a lot of
-	 * small allocs quickly eat up the whole reserve array on sparc
-	 */
-	size = memblock_align_up(size, align);
-
-	found = memblock_find_base(size, align, 0, max_addr);
-	if (found != MEMBLOCK_ERROR &&
-	    !memblock_add_region(&memblock.reserved, found, size))
-		return found;
-
-	return 0;
+	return memblock_alloc_base_nid(size, align, max_addr, MAX_NUMNODES);
 }
 
 phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
@@ -515,105 +763,13 @@
 	return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
 }
 
-
-/*
- * Additional node-local allocators. Search for node memory is bottom up
- * and walks memblock regions within that node bottom-up as well, but allocation
- * within an memblock region is top-down. XXX I plan to fix that at some stage
- *
- * WARNING: Only available after early_node_map[] has been populated,
- * on some architectures, that is after all the calls to add_active_range()
- * have been done to populate it.
- */
-
-phys_addr_t __weak __init memblock_nid_range(phys_addr_t start, phys_addr_t end, int *nid)
-{
-#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
-	/*
-	 * This code originates from sparc which really wants use to walk by addresses
-	 * and returns the nid. This is not very convenient for early_pfn_map[] users
-	 * as the map isn't sorted yet, and it really wants to be walked by nid.
-	 *
-	 * For now, I implement the inefficient method below which walks the early
-	 * map multiple times. Eventually we may want to use an ARCH config option
-	 * to implement a completely different method for both case.
-	 */
-	unsigned long start_pfn, end_pfn;
-	int i;
-
-	for (i = 0; i < MAX_NUMNODES; i++) {
-		get_pfn_range_for_nid(i, &start_pfn, &end_pfn);
-		if (start < PFN_PHYS(start_pfn) || start >= PFN_PHYS(end_pfn))
-			continue;
-		*nid = i;
-		return min(end, PFN_PHYS(end_pfn));
-	}
-#endif
-	*nid = 0;
-
-	return end;
-}
-
-static phys_addr_t __init memblock_alloc_nid_region(struct memblock_region *mp,
-					       phys_addr_t size,
-					       phys_addr_t align, int nid)
-{
-	phys_addr_t start, end;
-
-	start = mp->base;
-	end = start + mp->size;
-
-	start = memblock_align_up(start, align);
-	while (start < end) {
-		phys_addr_t this_end;
-		int this_nid;
-
-		this_end = memblock_nid_range(start, end, &this_nid);
-		if (this_nid == nid) {
-			phys_addr_t ret = memblock_find_region(start, this_end, size, align);
-			if (ret != MEMBLOCK_ERROR &&
-			    !memblock_add_region(&memblock.reserved, ret, size))
-				return ret;
-		}
-		start = this_end;
-	}
-
-	return MEMBLOCK_ERROR;
-}
-
-phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
-{
-	struct memblock_type *mem = &memblock.memory;
-	int i;
-
-	BUG_ON(0 == size);
-
-	/* We align the size to limit fragmentation. Without this, a lot of
-	 * small allocs quickly eat up the whole reserve array on sparc
-	 */
-	size = memblock_align_up(size, align);
-
-	/* We do a bottom-up search for a region with the right
-	 * nid since that's easier considering how memblock_nid_range()
-	 * works
-	 */
-	for (i = 0; i < mem->cnt; i++) {
-		phys_addr_t ret = memblock_alloc_nid_region(&mem->regions[i],
-					       size, align, nid);
-		if (ret != MEMBLOCK_ERROR)
-			return ret;
-	}
-
-	return 0;
-}
-
 phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
 {
 	phys_addr_t res = memblock_alloc_nid(size, align, nid);
 
 	if (res)
 		return res;
-	return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE);
+	return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
 }
 
 
@@ -621,10 +777,9 @@
  * Remaining API functions
  */
 
-/* You must call memblock_analyze() before this. */
 phys_addr_t __init memblock_phys_mem_size(void)
 {
-	return memblock.memory_size;
+	return memblock.memory.total_size;
 }
 
 /* lowest address */
@@ -640,45 +795,28 @@
 	return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
 }
 
-/* You must call memblock_analyze() after this. */
-void __init memblock_enforce_memory_limit(phys_addr_t memory_limit)
+void __init memblock_enforce_memory_limit(phys_addr_t limit)
 {
 	unsigned long i;
-	phys_addr_t limit;
-	struct memblock_region *p;
+	phys_addr_t max_addr = (phys_addr_t)ULLONG_MAX;
 
-	if (!memory_limit)
+	if (!limit)
 		return;
 
-	/* Truncate the memblock regions to satisfy the memory limit. */
-	limit = memory_limit;
+	/* find out max address */
 	for (i = 0; i < memblock.memory.cnt; i++) {
-		if (limit > memblock.memory.regions[i].size) {
-			limit -= memblock.memory.regions[i].size;
-			continue;
-		}
+		struct memblock_region *r = &memblock.memory.regions[i];
 
-		memblock.memory.regions[i].size = limit;
-		memblock.memory.cnt = i + 1;
-		break;
+		if (limit <= r->size) {
+			max_addr = r->base + limit;
+			break;
+		}
+		limit -= r->size;
 	}
 
-	memory_limit = memblock_end_of_DRAM();
-
-	/* And truncate any reserves above the limit also. */
-	for (i = 0; i < memblock.reserved.cnt; i++) {
-		p = &memblock.reserved.regions[i];
-
-		if (p->base > memory_limit)
-			p->size = 0;
-		else if ((p->base + p->size) > memory_limit)
-			p->size = memory_limit - p->base;
-
-		if (p->size == 0) {
-			memblock_remove_region(&memblock.reserved, i);
-			i--;
-		}
-	}
+	/* truncate both memory and reserved regions */
+	__memblock_remove(&memblock.memory, max_addr, (phys_addr_t)ULLONG_MAX);
+	__memblock_remove(&memblock.reserved, max_addr, (phys_addr_t)ULLONG_MAX);
 }
 
 static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
@@ -712,16 +850,18 @@
 int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
 {
 	int idx = memblock_search(&memblock.memory, base);
+	phys_addr_t end = base + memblock_cap_size(base, &size);
 
 	if (idx == -1)
 		return 0;
 	return memblock.memory.regions[idx].base <= base &&
 		(memblock.memory.regions[idx].base +
-		 memblock.memory.regions[idx].size) >= (base + size);
+		 memblock.memory.regions[idx].size) >= end;
 }
 
 int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
 {
+	memblock_cap_size(base, &size);
 	return memblock_overlaps_region(&memblock.reserved, base, size) >= 0;
 }
 
@@ -731,86 +871,45 @@
 	memblock.current_limit = limit;
 }
 
-static void __init_memblock memblock_dump(struct memblock_type *region, char *name)
+static void __init_memblock memblock_dump(struct memblock_type *type, char *name)
 {
 	unsigned long long base, size;
 	int i;
 
-	pr_info(" %s.cnt  = 0x%lx\n", name, region->cnt);
+	pr_info(" %s.cnt  = 0x%lx\n", name, type->cnt);
 
-	for (i = 0; i < region->cnt; i++) {
-		base = region->regions[i].base;
-		size = region->regions[i].size;
+	for (i = 0; i < type->cnt; i++) {
+		struct memblock_region *rgn = &type->regions[i];
+		char nid_buf[32] = "";
 
-		pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes\n",
-		    name, i, base, base + size - 1, size);
+		base = rgn->base;
+		size = rgn->size;
+#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
+		if (memblock_get_region_node(rgn) != MAX_NUMNODES)
+			snprintf(nid_buf, sizeof(nid_buf), " on node %d",
+				 memblock_get_region_node(rgn));
+#endif
+		pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes%s\n",
+			name, i, base, base + size - 1, size, nid_buf);
 	}
 }
 
-void __init_memblock memblock_dump_all(void)
+void __init_memblock __memblock_dump_all(void)
 {
-	if (!memblock_debug)
-		return;
-
 	pr_info("MEMBLOCK configuration:\n");
-	pr_info(" memory size = 0x%llx\n", (unsigned long long)memblock.memory_size);
+	pr_info(" memory size = %#llx reserved size = %#llx\n",
+		(unsigned long long)memblock.memory.total_size,
+		(unsigned long long)memblock.reserved.total_size);
 
 	memblock_dump(&memblock.memory, "memory");
 	memblock_dump(&memblock.reserved, "reserved");
 }
 
-void __init memblock_analyze(void)
+void __init memblock_allow_resize(void)
 {
-	int i;
-
-	/* Check marker in the unused last array entry */
-	WARN_ON(memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS].base
-		!= MEMBLOCK_INACTIVE);
-	WARN_ON(memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS].base
-		!= MEMBLOCK_INACTIVE);
-
-	memblock.memory_size = 0;
-
-	for (i = 0; i < memblock.memory.cnt; i++)
-		memblock.memory_size += memblock.memory.regions[i].size;
-
-	/* We allow resizing from there */
 	memblock_can_resize = 1;
 }
 
-void __init memblock_init(void)
-{
-	static int init_done __initdata = 0;
-
-	if (init_done)
-		return;
-	init_done = 1;
-
-	/* Hookup the initial arrays */
-	memblock.memory.regions	= memblock_memory_init_regions;
-	memblock.memory.max		= INIT_MEMBLOCK_REGIONS;
-	memblock.reserved.regions	= memblock_reserved_init_regions;
-	memblock.reserved.max	= INIT_MEMBLOCK_REGIONS;
-
-	/* Write a marker in the unused last array entry */
-	memblock.memory.regions[INIT_MEMBLOCK_REGIONS].base = MEMBLOCK_INACTIVE;
-	memblock.reserved.regions[INIT_MEMBLOCK_REGIONS].base = MEMBLOCK_INACTIVE;
-
-	/* Create a dummy zero size MEMBLOCK which will get coalesced away later.
-	 * This simplifies the memblock_add() code below...
-	 */
-	memblock.memory.regions[0].base = 0;
-	memblock.memory.regions[0].size = 0;
-	memblock.memory.cnt = 1;
-
-	/* Ditto. */
-	memblock.reserved.regions[0].base = 0;
-	memblock.reserved.regions[0].size = 0;
-	memblock.reserved.cnt = 1;
-
-	memblock.current_limit = MEMBLOCK_ALLOC_ANYWHERE;
-}
-
 static int __init early_memblock(char *p)
 {
 	if (p && strstr(p, "debug"))
@@ -819,7 +918,7 @@
 }
 early_param("memblock", early_memblock);
 
-#if defined(CONFIG_DEBUG_FS) && !defined(ARCH_DISCARD_MEMBLOCK)
+#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_ARCH_DISCARD_MEMBLOCK)
 
 static int memblock_debug_show(struct seq_file *m, void *private)
 {
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 6aff93c..94da8ee 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -50,6 +50,8 @@
 #include <linux/cpu.h>
 #include <linux/oom.h>
 #include "internal.h"
+#include <net/sock.h>
+#include <net/tcp_memcontrol.h>
 
 #include <asm/uaccess.h>
 
@@ -286,6 +288,10 @@
 	 */
 	struct mem_cgroup_stat_cpu nocpu_base;
 	spinlock_t pcp_counter_lock;
+
+#ifdef CONFIG_INET
+	struct tcp_memcontrol tcp_mem;
+#endif
 };
 
 /* Stuffs for move charges at task migration. */
@@ -365,7 +371,58 @@
 
 static void mem_cgroup_get(struct mem_cgroup *memcg);
 static void mem_cgroup_put(struct mem_cgroup *memcg);
-static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
+
+/* Writing them here to avoid exposing memcg's inner layout */
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+#ifdef CONFIG_INET
+#include <net/sock.h>
+#include <net/ip.h>
+
+static bool mem_cgroup_is_root(struct mem_cgroup *memcg);
+void sock_update_memcg(struct sock *sk)
+{
+	/* A socket spends its whole life in the same cgroup */
+	if (sk->sk_cgrp) {
+		WARN_ON(1);
+		return;
+	}
+	if (static_branch(&memcg_socket_limit_enabled)) {
+		struct mem_cgroup *memcg;
+
+		BUG_ON(!sk->sk_prot->proto_cgroup);
+
+		rcu_read_lock();
+		memcg = mem_cgroup_from_task(current);
+		if (!mem_cgroup_is_root(memcg)) {
+			mem_cgroup_get(memcg);
+			sk->sk_cgrp = sk->sk_prot->proto_cgroup(memcg);
+		}
+		rcu_read_unlock();
+	}
+}
+EXPORT_SYMBOL(sock_update_memcg);
+
+void sock_release_memcg(struct sock *sk)
+{
+	if (static_branch(&memcg_socket_limit_enabled) && sk->sk_cgrp) {
+		struct mem_cgroup *memcg;
+		WARN_ON(!sk->sk_cgrp->memcg);
+		memcg = sk->sk_cgrp->memcg;
+		mem_cgroup_put(memcg);
+	}
+}
+
+struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg)
+{
+	if (!memcg || mem_cgroup_is_root(memcg))
+		return NULL;
+
+	return &memcg->tcp_mem.cg_proto;
+}
+EXPORT_SYMBOL(tcp_proto_cgroup);
+#endif /* CONFIG_INET */
+#endif /* CONFIG_CGROUP_MEM_RES_CTLR_KMEM */
+
 static void drain_all_stock_async(struct mem_cgroup *memcg);
 
 static struct mem_cgroup_per_zone *
@@ -745,7 +802,7 @@
 	preempt_enable();
 }
 
-static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
+struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
 {
 	return container_of(cgroup_subsys_state(cont,
 				mem_cgroup_subsys_id), struct mem_cgroup,
@@ -4612,6 +4669,36 @@
 }
 #endif /* CONFIG_NUMA */
 
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+static int register_kmem_files(struct cgroup *cont, struct cgroup_subsys *ss)
+{
+	/*
+	 * Part of this would be better living in a separate allocation
+	 * function, leaving us with just the cgroup tree population work.
+	 * We, however, depend on state such as network's proto_list that
+	 * is only initialized after cgroup creation. I found the less
+	 * cumbersome way to deal with it to defer it all to populate time
+	 */
+	return mem_cgroup_sockets_init(cont, ss);
+};
+
+static void kmem_cgroup_destroy(struct cgroup_subsys *ss,
+				struct cgroup *cont)
+{
+	mem_cgroup_sockets_destroy(cont, ss);
+}
+#else
+static int register_kmem_files(struct cgroup *cont, struct cgroup_subsys *ss)
+{
+	return 0;
+}
+
+static void kmem_cgroup_destroy(struct cgroup_subsys *ss,
+				struct cgroup *cont)
+{
+}
+#endif
+
 static struct cftype mem_cgroup_files[] = {
 	{
 		.name = "usage_in_bytes",
@@ -4843,12 +4930,13 @@
 /*
  * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
  */
-static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
+struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
 {
 	if (!memcg->res.parent)
 		return NULL;
 	return mem_cgroup_from_res_counter(memcg->res.parent, res);
 }
+EXPORT_SYMBOL(parent_mem_cgroup);
 
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
 static void __init enable_swap_cgroup(void)
@@ -4907,9 +4995,9 @@
 		int cpu;
 		enable_swap_cgroup();
 		parent = NULL;
-		root_mem_cgroup = memcg;
 		if (mem_cgroup_soft_limit_tree_init())
 			goto free_out;
+		root_mem_cgroup = memcg;
 		for_each_possible_cpu(cpu) {
 			struct memcg_stock_pcp *stock =
 						&per_cpu(memcg_stock, cpu);
@@ -4948,7 +5036,6 @@
 	return &memcg->css;
 free_out:
 	__mem_cgroup_free(memcg);
-	root_mem_cgroup = NULL;
 	return ERR_PTR(error);
 }
 
@@ -4965,6 +5052,8 @@
 {
 	struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
 
+	kmem_cgroup_destroy(ss, cont);
+
 	mem_cgroup_put(memcg);
 }
 
@@ -4978,6 +5067,10 @@
 
 	if (!ret)
 		ret = register_memsw_files(cont, ss);
+
+	if (!ret)
+		ret = register_kmem_files(cont, ss);
+
 	return ret;
 }
 
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index adc3954..c3fdbcb 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -636,6 +636,7 @@
 	struct vm_area_struct *prev;
 	struct vm_area_struct *vma;
 	int err = 0;
+	pgoff_t pgoff;
 	unsigned long vmstart;
 	unsigned long vmend;
 
@@ -643,13 +644,21 @@
 	if (!vma || vma->vm_start > start)
 		return -EFAULT;
 
+	if (start > vma->vm_start)
+		prev = vma;
+
 	for (; vma && vma->vm_start < end; prev = vma, vma = next) {
 		next = vma->vm_next;
 		vmstart = max(start, vma->vm_start);
 		vmend   = min(end, vma->vm_end);
 
+		if (mpol_equal(vma_policy(vma), new_pol))
+			continue;
+
+		pgoff = vma->vm_pgoff +
+			((vmstart - vma->vm_start) >> PAGE_SHIFT);
 		prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
-				  vma->anon_vma, vma->vm_file, vma->vm_pgoff,
+				  vma->anon_vma, vma->vm_file, pgoff,
 				  new_pol);
 		if (prev) {
 			vma = prev;
diff --git a/mm/nobootmem.c b/mm/nobootmem.c
index 7fa41b4..24f0fc1 100644
--- a/mm/nobootmem.c
+++ b/mm/nobootmem.c
@@ -41,14 +41,13 @@
 	if (limit > memblock.current_limit)
 		limit = memblock.current_limit;
 
-	addr = find_memory_core_early(nid, size, align, goal, limit);
-
-	if (addr == MEMBLOCK_ERROR)
+	addr = memblock_find_in_range_node(goal, limit, size, align, nid);
+	if (!addr)
 		return NULL;
 
 	ptr = phys_to_virt(addr);
 	memset(ptr, 0, size);
-	memblock_x86_reserve_range(addr, addr + size, "BOOTMEM");
+	memblock_reserve(addr, size);
 	/*
 	 * The min_count is set to 0 so that bootmem allocated blocks
 	 * are never reported as leaks.
@@ -107,23 +106,27 @@
 		__free_pages_bootmem(pfn_to_page(i), 0);
 }
 
-unsigned long __init free_all_memory_core_early(int nodeid)
+unsigned long __init free_low_memory_core_early(int nodeid)
 {
-	int i;
-	u64 start, end;
 	unsigned long count = 0;
-	struct range *range = NULL;
-	int nr_range;
+	phys_addr_t start, end;
+	u64 i;
 
-	nr_range = get_free_all_memory_range(&range, nodeid);
+	/* free reserved array temporarily so that it's treated as free area */
+	memblock_free_reserved_regions();
 
-	for (i = 0; i < nr_range; i++) {
-		start = range[i].start;
-		end = range[i].end;
-		count += end - start;
-		__free_pages_memory(start, end);
+	for_each_free_mem_range(i, MAX_NUMNODES, &start, &end, NULL) {
+		unsigned long start_pfn = PFN_UP(start);
+		unsigned long end_pfn = min_t(unsigned long,
+					      PFN_DOWN(end), max_low_pfn);
+		if (start_pfn < end_pfn) {
+			__free_pages_memory(start_pfn, end_pfn);
+			count += end_pfn - start_pfn;
+		}
 	}
 
+	/* put region array back? */
+	memblock_reserve_reserved_regions();
 	return count;
 }
 
@@ -137,7 +140,7 @@
 {
 	register_page_bootmem_info_node(pgdat);
 
-	/* free_all_memory_core_early(MAX_NUMNODES) will be called later */
+	/* free_low_memory_core_early(MAX_NUMNODES) will be called later */
 	return 0;
 }
 
@@ -155,7 +158,7 @@
 	 * Use MAX_NUMNODES will make sure all ranges in early_node_map[]
 	 *  will be used instead of only Node0 related
 	 */
-	return free_all_memory_core_early(MAX_NUMNODES);
+	return free_low_memory_core_early(MAX_NUMNODES);
 }
 
 /**
@@ -172,7 +175,7 @@
 			      unsigned long size)
 {
 	kmemleak_free_part(__va(physaddr), size);
-	memblock_x86_free_range(physaddr, physaddr + size);
+	memblock_free(physaddr, size);
 }
 
 /**
@@ -187,7 +190,7 @@
 void __init free_bootmem(unsigned long addr, unsigned long size)
 {
 	kmemleak_free_part(__va(addr), size);
-	memblock_x86_free_range(addr, addr + size);
+	memblock_free(addr, size);
 }
 
 static void * __init ___alloc_bootmem_nopanic(unsigned long size,
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 76f2c5a..eeb27e2 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -176,7 +176,7 @@
 unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem,
 		      const nodemask_t *nodemask, unsigned long totalpages)
 {
-	int points;
+	long points;
 
 	if (oom_unkillable_task(p, mem, nodemask))
 		return 0;
@@ -328,7 +328,7 @@
 		 */
 		if (test_tsk_thread_flag(p, TIF_MEMDIE)) {
 			if (unlikely(frozen(p)))
-				thaw_process(p);
+				__thaw_task(p);
 			return ERR_PTR(-1UL);
 		}
 		if (!p->mm)
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 7125248..8616ef3 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -32,7 +32,7 @@
 #include <linux/sysctl.h>
 #include <linux/cpu.h>
 #include <linux/syscalls.h>
-#include <linux/buffer_head.h>
+#include <linux/buffer_head.h> /* __set_page_dirty_buffers */
 #include <linux/pagevec.h>
 #include <trace/events/writeback.h>
 
@@ -411,8 +411,13 @@
  *
  * Returns @bdi's dirty limit in pages. The term "dirty" in the context of
  * dirty balancing includes all PG_dirty, PG_writeback and NFS unstable pages.
- * And the "limit" in the name is not seriously taken as hard limit in
- * balance_dirty_pages().
+ *
+ * Note that balance_dirty_pages() will only seriously take it as a hard limit
+ * when sleeping max_pause per page is not enough to keep the dirty pages under
+ * control. For example, when the device is completely stalled due to some error
+ * conditions, or when there are 1000 dd tasks writing to a slow 10MB/s USB key.
+ * In the other normal situations, it acts more gently by throttling the tasks
+ * more (rather than completely block them) when the bdi dirty pages go high.
  *
  * It allocates high/low dirty limits to fast/slow devices, in order to prevent
  * - starving fast devices
@@ -594,6 +599,13 @@
 	 */
 	if (unlikely(bdi_thresh > thresh))
 		bdi_thresh = thresh;
+	/*
+	 * It's very possible that bdi_thresh is close to 0 not because the
+	 * device is slow, but that it has remained inactive for long time.
+	 * Honour such devices a reasonable good (hopefully IO efficient)
+	 * threshold, so that the occasional writes won't be blocked and active
+	 * writes can rampup the threshold quickly.
+	 */
 	bdi_thresh = max(bdi_thresh, (limit - dirty) / 8);
 	/*
 	 * scale global setpoint to bdi's:
@@ -977,8 +989,7 @@
 	 *
 	 * 8 serves as the safety ratio.
 	 */
-	if (bdi_dirty)
-		t = min(t, bdi_dirty * HZ / (8 * bw + 1));
+	t = min(t, bdi_dirty * HZ / (8 * bw + 1));
 
 	/*
 	 * The pause time will be settled within range (max_pause/4, max_pause).
@@ -1136,6 +1147,19 @@
 		if (task_ratelimit)
 			break;
 
+		/*
+		 * In the case of an unresponding NFS server and the NFS dirty
+		 * pages exceeds dirty_thresh, give the other good bdi's a pipe
+		 * to go through, so that tasks on them still remain responsive.
+		 *
+		 * In theory 1 page is enough to keep the comsumer-producer
+		 * pipe going: the flusher cleans 1 page => the task dirties 1
+		 * more page. However bdi_dirty has accounting errors.  So use
+		 * the larger and more IO friendly bdi_stat_error.
+		 */
+		if (bdi_dirty <= bdi_stat_error(bdi))
+			break;
+
 		if (fatal_signal_pending(current))
 			break;
 	}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 2b8ba3a..7990ca1 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -181,39 +181,17 @@
 static unsigned long __meminitdata nr_all_pages;
 static unsigned long __meminitdata dma_reserve;
 
-#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
-  /*
-   * MAX_ACTIVE_REGIONS determines the maximum number of distinct
-   * ranges of memory (RAM) that may be registered with add_active_range().
-   * Ranges passed to add_active_range() will be merged if possible
-   * so the number of times add_active_range() can be called is
-   * related to the number of nodes and the number of holes
-   */
-  #ifdef CONFIG_MAX_ACTIVE_REGIONS
-    /* Allow an architecture to set MAX_ACTIVE_REGIONS to save memory */
-    #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS
-  #else
-    #if MAX_NUMNODES >= 32
-      /* If there can be many nodes, allow up to 50 holes per node */
-      #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50)
-    #else
-      /* By default, allow up to 256 distinct regions */
-      #define MAX_ACTIVE_REGIONS 256
-    #endif
-  #endif
+#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
+static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
+static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
+static unsigned long __initdata required_kernelcore;
+static unsigned long __initdata required_movablecore;
+static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
 
-  static struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS];
-  static int __meminitdata nr_nodemap_entries;
-  static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
-  static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
-  static unsigned long __initdata required_kernelcore;
-  static unsigned long __initdata required_movablecore;
-  static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
-
-  /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
-  int movable_zone;
-  EXPORT_SYMBOL(movable_zone);
-#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
+/* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
+int movable_zone;
+EXPORT_SYMBOL(movable_zone);
+#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
 
 #if MAX_NUMNODES > 1
 int nr_node_ids __read_mostly = MAX_NUMNODES;
@@ -333,8 +311,8 @@
  *
  * The remaining PAGE_SIZE pages are called "tail pages".
  *
- * All pages have PG_compound set.  All pages have their ->private pointing at
- * the head page (even the head page has this).
+ * All pages have PG_compound set.  All tail pages have their ->first_page
+ * pointing at the head page.
  *
  * The first tail page's ->lru.next holds the address of the compound page's
  * put_page() function.  Its ->lru.prev holds the order of allocation.
@@ -706,10 +684,10 @@
 		int loop;
 
 		prefetchw(page);
-		for (loop = 0; loop < BITS_PER_LONG; loop++) {
+		for (loop = 0; loop < (1 << order); loop++) {
 			struct page *p = &page[loop];
 
-			if (loop + 1 < BITS_PER_LONG)
+			if (loop + 1 < (1 << order))
 				prefetchw(p + 1);
 			__ClearPageReserved(p);
 			set_page_count(p, 0);
@@ -1408,7 +1386,7 @@
 
 static int __init fail_page_alloc_debugfs(void)
 {
-	mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
+	umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
 	struct dentry *dir;
 
 	dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
@@ -3737,35 +3715,7 @@
 	return 0;
 }
 
-#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
-/*
- * Basic iterator support. Return the first range of PFNs for a node
- * Note: nid == MAX_NUMNODES returns first region regardless of node
- */
-static int __meminit first_active_region_index_in_nid(int nid)
-{
-	int i;
-
-	for (i = 0; i < nr_nodemap_entries; i++)
-		if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
-			return i;
-
-	return -1;
-}
-
-/*
- * Basic iterator support. Return the next active range of PFNs for a node
- * Note: nid == MAX_NUMNODES returns next region regardless of node
- */
-static int __meminit next_active_region_index_in_nid(int index, int nid)
-{
-	for (index = index + 1; index < nr_nodemap_entries; index++)
-		if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
-			return index;
-
-	return -1;
-}
-
+#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
 #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
 /*
  * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
@@ -3775,15 +3725,12 @@
  */
 int __meminit __early_pfn_to_nid(unsigned long pfn)
 {
-	int i;
+	unsigned long start_pfn, end_pfn;
+	int i, nid;
 
-	for (i = 0; i < nr_nodemap_entries; i++) {
-		unsigned long start_pfn = early_node_map[i].start_pfn;
-		unsigned long end_pfn = early_node_map[i].end_pfn;
-
+	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
 		if (start_pfn <= pfn && pfn < end_pfn)
-			return early_node_map[i].nid;
-	}
+			return nid;
 	/* This is a memory hole */
 	return -1;
 }
@@ -3812,11 +3759,6 @@
 }
 #endif
 
-/* Basic iterator support to walk early_node_map[] */
-#define for_each_active_range_index_in_nid(i, nid) \
-	for (i = first_active_region_index_in_nid(nid); i != -1; \
-				i = next_active_region_index_in_nid(i, nid))
-
 /**
  * free_bootmem_with_active_regions - Call free_bootmem_node for each active range
  * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
@@ -3826,122 +3768,34 @@
  * add_active_ranges() contain no holes and may be freed, this
  * this function may be used instead of calling free_bootmem() manually.
  */
-void __init free_bootmem_with_active_regions(int nid,
-						unsigned long max_low_pfn)
+void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn)
 {
-	int i;
+	unsigned long start_pfn, end_pfn;
+	int i, this_nid;
 
-	for_each_active_range_index_in_nid(i, nid) {
-		unsigned long size_pages = 0;
-		unsigned long end_pfn = early_node_map[i].end_pfn;
+	for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) {
+		start_pfn = min(start_pfn, max_low_pfn);
+		end_pfn = min(end_pfn, max_low_pfn);
 
-		if (early_node_map[i].start_pfn >= max_low_pfn)
-			continue;
-
-		if (end_pfn > max_low_pfn)
-			end_pfn = max_low_pfn;
-
-		size_pages = end_pfn - early_node_map[i].start_pfn;
-		free_bootmem_node(NODE_DATA(early_node_map[i].nid),
-				PFN_PHYS(early_node_map[i].start_pfn),
-				size_pages << PAGE_SHIFT);
+		if (start_pfn < end_pfn)
+			free_bootmem_node(NODE_DATA(this_nid),
+					  PFN_PHYS(start_pfn),
+					  (end_pfn - start_pfn) << PAGE_SHIFT);
 	}
 }
 
-#ifdef CONFIG_HAVE_MEMBLOCK
-/*
- * Basic iterator support. Return the last range of PFNs for a node
- * Note: nid == MAX_NUMNODES returns last region regardless of node
- */
-static int __meminit last_active_region_index_in_nid(int nid)
-{
-	int i;
-
-	for (i = nr_nodemap_entries - 1; i >= 0; i--)
-		if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
-			return i;
-
-	return -1;
-}
-
-/*
- * Basic iterator support. Return the previous active range of PFNs for a node
- * Note: nid == MAX_NUMNODES returns next region regardless of node
- */
-static int __meminit previous_active_region_index_in_nid(int index, int nid)
-{
-	for (index = index - 1; index >= 0; index--)
-		if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
-			return index;
-
-	return -1;
-}
-
-#define for_each_active_range_index_in_nid_reverse(i, nid) \
-	for (i = last_active_region_index_in_nid(nid); i != -1; \
-				i = previous_active_region_index_in_nid(i, nid))
-
-u64 __init find_memory_core_early(int nid, u64 size, u64 align,
-					u64 goal, u64 limit)
-{
-	int i;
-
-	/* Need to go over early_node_map to find out good range for node */
-	for_each_active_range_index_in_nid_reverse(i, nid) {
-		u64 addr;
-		u64 ei_start, ei_last;
-		u64 final_start, final_end;
-
-		ei_last = early_node_map[i].end_pfn;
-		ei_last <<= PAGE_SHIFT;
-		ei_start = early_node_map[i].start_pfn;
-		ei_start <<= PAGE_SHIFT;
-
-		final_start = max(ei_start, goal);
-		final_end = min(ei_last, limit);
-
-		if (final_start >= final_end)
-			continue;
-
-		addr = memblock_find_in_range(final_start, final_end, size, align);
-
-		if (addr == MEMBLOCK_ERROR)
-			continue;
-
-		return addr;
-	}
-
-	return MEMBLOCK_ERROR;
-}
-#endif
-
 int __init add_from_early_node_map(struct range *range, int az,
 				   int nr_range, int nid)
 {
+	unsigned long start_pfn, end_pfn;
 	int i;
-	u64 start, end;
 
 	/* need to go over early_node_map to find out good range for node */
-	for_each_active_range_index_in_nid(i, nid) {
-		start = early_node_map[i].start_pfn;
-		end = early_node_map[i].end_pfn;
-		nr_range = add_range(range, az, nr_range, start, end);
-	}
+	for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL)
+		nr_range = add_range(range, az, nr_range, start_pfn, end_pfn);
 	return nr_range;
 }
 
-void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data)
-{
-	int i;
-	int ret;
-
-	for_each_active_range_index_in_nid(i, nid) {
-		ret = work_fn(early_node_map[i].start_pfn,
-			      early_node_map[i].end_pfn, data);
-		if (ret)
-			break;
-	}
-}
 /**
  * sparse_memory_present_with_active_regions - Call memory_present for each active range
  * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
@@ -3952,12 +3806,11 @@
  */
 void __init sparse_memory_present_with_active_regions(int nid)
 {
-	int i;
+	unsigned long start_pfn, end_pfn;
+	int i, this_nid;
 
-	for_each_active_range_index_in_nid(i, nid)
-		memory_present(early_node_map[i].nid,
-				early_node_map[i].start_pfn,
-				early_node_map[i].end_pfn);
+	for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid)
+		memory_present(this_nid, start_pfn, end_pfn);
 }
 
 /**
@@ -3974,13 +3827,15 @@
 void __meminit get_pfn_range_for_nid(unsigned int nid,
 			unsigned long *start_pfn, unsigned long *end_pfn)
 {
+	unsigned long this_start_pfn, this_end_pfn;
 	int i;
+
 	*start_pfn = -1UL;
 	*end_pfn = 0;
 
-	for_each_active_range_index_in_nid(i, nid) {
-		*start_pfn = min(*start_pfn, early_node_map[i].start_pfn);
-		*end_pfn = max(*end_pfn, early_node_map[i].end_pfn);
+	for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
+		*start_pfn = min(*start_pfn, this_start_pfn);
+		*end_pfn = max(*end_pfn, this_end_pfn);
 	}
 
 	if (*start_pfn == -1UL)
@@ -4083,46 +3938,16 @@
 				unsigned long range_start_pfn,
 				unsigned long range_end_pfn)
 {
-	int i = 0;
-	unsigned long prev_end_pfn = 0, hole_pages = 0;
-	unsigned long start_pfn;
+	unsigned long nr_absent = range_end_pfn - range_start_pfn;
+	unsigned long start_pfn, end_pfn;
+	int i;
 
-	/* Find the end_pfn of the first active range of pfns in the node */
-	i = first_active_region_index_in_nid(nid);
-	if (i == -1)
-		return 0;
-
-	prev_end_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
-
-	/* Account for ranges before physical memory on this node */
-	if (early_node_map[i].start_pfn > range_start_pfn)
-		hole_pages = prev_end_pfn - range_start_pfn;
-
-	/* Find all holes for the zone within the node */
-	for (; i != -1; i = next_active_region_index_in_nid(i, nid)) {
-
-		/* No need to continue if prev_end_pfn is outside the zone */
-		if (prev_end_pfn >= range_end_pfn)
-			break;
-
-		/* Make sure the end of the zone is not within the hole */
-		start_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
-		prev_end_pfn = max(prev_end_pfn, range_start_pfn);
-
-		/* Update the hole size cound and move on */
-		if (start_pfn > range_start_pfn) {
-			BUG_ON(prev_end_pfn > start_pfn);
-			hole_pages += start_pfn - prev_end_pfn;
-		}
-		prev_end_pfn = early_node_map[i].end_pfn;
+	for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
+		start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
+		end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
+		nr_absent -= end_pfn - start_pfn;
 	}
-
-	/* Account for ranges past physical memory on this node */
-	if (range_end_pfn > prev_end_pfn)
-		hole_pages += range_end_pfn -
-				max(range_start_pfn, prev_end_pfn);
-
-	return hole_pages;
+	return nr_absent;
 }
 
 /**
@@ -4143,14 +3968,14 @@
 					unsigned long zone_type,
 					unsigned long *ignored)
 {
+	unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
+	unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
 	unsigned long node_start_pfn, node_end_pfn;
 	unsigned long zone_start_pfn, zone_end_pfn;
 
 	get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
-	zone_start_pfn = max(arch_zone_lowest_possible_pfn[zone_type],
-							node_start_pfn);
-	zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type],
-							node_end_pfn);
+	zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
+	zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
 
 	adjust_zone_range_for_zone_movable(nid, zone_type,
 			node_start_pfn, node_end_pfn,
@@ -4158,7 +3983,7 @@
 	return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
 }
 
-#else
+#else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
 static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
 					unsigned long zone_type,
 					unsigned long *zones_size)
@@ -4176,7 +4001,7 @@
 	return zholes_size[zone_type];
 }
 
-#endif
+#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
 
 static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
 		unsigned long *zones_size, unsigned long *zholes_size)
@@ -4399,10 +4224,10 @@
 	 */
 	if (pgdat == NODE_DATA(0)) {
 		mem_map = NODE_DATA(0)->node_mem_map;
-#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
+#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
 		if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
 			mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
-#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
+#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
 	}
 #endif
 #endif /* CONFIG_FLAT_NODE_MEM_MAP */
@@ -4427,7 +4252,7 @@
 	free_area_init_core(pgdat, zones_size, zholes_size);
 }
 
-#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
+#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
 
 #if MAX_NUMNODES > 1
 /*
@@ -4449,170 +4274,6 @@
 #endif
 
 /**
- * add_active_range - Register a range of PFNs backed by physical memory
- * @nid: The node ID the range resides on
- * @start_pfn: The start PFN of the available physical memory
- * @end_pfn: The end PFN of the available physical memory
- *
- * These ranges are stored in an early_node_map[] and later used by
- * free_area_init_nodes() to calculate zone sizes and holes. If the
- * range spans a memory hole, it is up to the architecture to ensure
- * the memory is not freed by the bootmem allocator. If possible
- * the range being registered will be merged with existing ranges.
- */
-void __init add_active_range(unsigned int nid, unsigned long start_pfn,
-						unsigned long end_pfn)
-{
-	int i;
-
-	mminit_dprintk(MMINIT_TRACE, "memory_register",
-			"Entering add_active_range(%d, %#lx, %#lx) "
-			"%d entries of %d used\n",
-			nid, start_pfn, end_pfn,
-			nr_nodemap_entries, MAX_ACTIVE_REGIONS);
-
-	mminit_validate_memmodel_limits(&start_pfn, &end_pfn);
-
-	/* Merge with existing active regions if possible */
-	for (i = 0; i < nr_nodemap_entries; i++) {
-		if (early_node_map[i].nid != nid)
-			continue;
-
-		/* Skip if an existing region covers this new one */
-		if (start_pfn >= early_node_map[i].start_pfn &&
-				end_pfn <= early_node_map[i].end_pfn)
-			return;
-
-		/* Merge forward if suitable */
-		if (start_pfn <= early_node_map[i].end_pfn &&
-				end_pfn > early_node_map[i].end_pfn) {
-			early_node_map[i].end_pfn = end_pfn;
-			return;
-		}
-
-		/* Merge backward if suitable */
-		if (start_pfn < early_node_map[i].start_pfn &&
-				end_pfn >= early_node_map[i].start_pfn) {
-			early_node_map[i].start_pfn = start_pfn;
-			return;
-		}
-	}
-
-	/* Check that early_node_map is large enough */
-	if (i >= MAX_ACTIVE_REGIONS) {
-		printk(KERN_CRIT "More than %d memory regions, truncating\n",
-							MAX_ACTIVE_REGIONS);
-		return;
-	}
-
-	early_node_map[i].nid = nid;
-	early_node_map[i].start_pfn = start_pfn;
-	early_node_map[i].end_pfn = end_pfn;
-	nr_nodemap_entries = i + 1;
-}
-
-/**
- * remove_active_range - Shrink an existing registered range of PFNs
- * @nid: The node id the range is on that should be shrunk
- * @start_pfn: The new PFN of the range
- * @end_pfn: The new PFN of the range
- *
- * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node.
- * The map is kept near the end physical page range that has already been
- * registered. This function allows an arch to shrink an existing registered
- * range.
- */
-void __init remove_active_range(unsigned int nid, unsigned long start_pfn,
-				unsigned long end_pfn)
-{
-	int i, j;
-	int removed = 0;
-
-	printk(KERN_DEBUG "remove_active_range (%d, %lu, %lu)\n",
-			  nid, start_pfn, end_pfn);
-
-	/* Find the old active region end and shrink */
-	for_each_active_range_index_in_nid(i, nid) {
-		if (early_node_map[i].start_pfn >= start_pfn &&
-		    early_node_map[i].end_pfn <= end_pfn) {
-			/* clear it */
-			early_node_map[i].start_pfn = 0;
-			early_node_map[i].end_pfn = 0;
-			removed = 1;
-			continue;
-		}
-		if (early_node_map[i].start_pfn < start_pfn &&
-		    early_node_map[i].end_pfn > start_pfn) {
-			unsigned long temp_end_pfn = early_node_map[i].end_pfn;
-			early_node_map[i].end_pfn = start_pfn;
-			if (temp_end_pfn > end_pfn)
-				add_active_range(nid, end_pfn, temp_end_pfn);
-			continue;
-		}
-		if (early_node_map[i].start_pfn >= start_pfn &&
-		    early_node_map[i].end_pfn > end_pfn &&
-		    early_node_map[i].start_pfn < end_pfn) {
-			early_node_map[i].start_pfn = end_pfn;
-			continue;
-		}
-	}
-
-	if (!removed)
-		return;
-
-	/* remove the blank ones */
-	for (i = nr_nodemap_entries - 1; i > 0; i--) {
-		if (early_node_map[i].nid != nid)
-			continue;
-		if (early_node_map[i].end_pfn)
-			continue;
-		/* we found it, get rid of it */
-		for (j = i; j < nr_nodemap_entries - 1; j++)
-			memcpy(&early_node_map[j], &early_node_map[j+1],
-				sizeof(early_node_map[j]));
-		j = nr_nodemap_entries - 1;
-		memset(&early_node_map[j], 0, sizeof(early_node_map[j]));
-		nr_nodemap_entries--;
-	}
-}
-
-/**
- * remove_all_active_ranges - Remove all currently registered regions
- *
- * During discovery, it may be found that a table like SRAT is invalid
- * and an alternative discovery method must be used. This function removes
- * all currently registered regions.
- */
-void __init remove_all_active_ranges(void)
-{
-	memset(early_node_map, 0, sizeof(early_node_map));
-	nr_nodemap_entries = 0;
-}
-
-/* Compare two active node_active_regions */
-static int __init cmp_node_active_region(const void *a, const void *b)
-{
-	struct node_active_region *arange = (struct node_active_region *)a;
-	struct node_active_region *brange = (struct node_active_region *)b;
-
-	/* Done this way to avoid overflows */
-	if (arange->start_pfn > brange->start_pfn)
-		return 1;
-	if (arange->start_pfn < brange->start_pfn)
-		return -1;
-
-	return 0;
-}
-
-/* sort the node_map by start_pfn */
-void __init sort_node_map(void)
-{
-	sort(early_node_map, (size_t)nr_nodemap_entries,
-			sizeof(struct node_active_region),
-			cmp_node_active_region, NULL);
-}
-
-/**
  * node_map_pfn_alignment - determine the maximum internode alignment
  *
  * This function should be called after node map is populated and sorted.
@@ -4634,15 +4295,11 @@
 unsigned long __init node_map_pfn_alignment(void)
 {
 	unsigned long accl_mask = 0, last_end = 0;
+	unsigned long start, end, mask;
 	int last_nid = -1;
-	int i;
+	int i, nid;
 
-	for_each_active_range_index_in_nid(i, MAX_NUMNODES) {
-		int nid = early_node_map[i].nid;
-		unsigned long start = early_node_map[i].start_pfn;
-		unsigned long end = early_node_map[i].end_pfn;
-		unsigned long mask;
-
+	for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
 		if (!start || last_nid < 0 || last_nid == nid) {
 			last_nid = nid;
 			last_end = end;
@@ -4669,12 +4326,12 @@
 /* Find the lowest pfn for a node */
 static unsigned long __init find_min_pfn_for_node(int nid)
 {
-	int i;
 	unsigned long min_pfn = ULONG_MAX;
+	unsigned long start_pfn;
+	int i;
 
-	/* Assuming a sorted map, the first range found has the starting pfn */
-	for_each_active_range_index_in_nid(i, nid)
-		min_pfn = min(min_pfn, early_node_map[i].start_pfn);
+	for_each_mem_pfn_range(i, nid, &start_pfn, NULL, NULL)
+		min_pfn = min(min_pfn, start_pfn);
 
 	if (min_pfn == ULONG_MAX) {
 		printk(KERN_WARNING
@@ -4703,15 +4360,16 @@
  */
 static unsigned long __init early_calculate_totalpages(void)
 {
-	int i;
 	unsigned long totalpages = 0;
+	unsigned long start_pfn, end_pfn;
+	int i, nid;
 
-	for (i = 0; i < nr_nodemap_entries; i++) {
-		unsigned long pages = early_node_map[i].end_pfn -
-						early_node_map[i].start_pfn;
+	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
+		unsigned long pages = end_pfn - start_pfn;
+
 		totalpages += pages;
 		if (pages)
-			node_set_state(early_node_map[i].nid, N_HIGH_MEMORY);
+			node_set_state(nid, N_HIGH_MEMORY);
 	}
   	return totalpages;
 }
@@ -4766,6 +4424,8 @@
 	/* Spread kernelcore memory as evenly as possible throughout nodes */
 	kernelcore_node = required_kernelcore / usable_nodes;
 	for_each_node_state(nid, N_HIGH_MEMORY) {
+		unsigned long start_pfn, end_pfn;
+
 		/*
 		 * Recalculate kernelcore_node if the division per node
 		 * now exceeds what is necessary to satisfy the requested
@@ -4782,13 +4442,10 @@
 		kernelcore_remaining = kernelcore_node;
 
 		/* Go through each range of PFNs within this node */
-		for_each_active_range_index_in_nid(i, nid) {
-			unsigned long start_pfn, end_pfn;
+		for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
 			unsigned long size_pages;
 
-			start_pfn = max(early_node_map[i].start_pfn,
-						zone_movable_pfn[nid]);
-			end_pfn = early_node_map[i].end_pfn;
+			start_pfn = max(start_pfn, zone_movable_pfn[nid]);
 			if (start_pfn >= end_pfn)
 				continue;
 
@@ -4890,11 +4547,8 @@
  */
 void __init free_area_init_nodes(unsigned long *max_zone_pfn)
 {
-	unsigned long nid;
-	int i;
-
-	/* Sort early_node_map as initialisation assumes it is sorted */
-	sort_node_map();
+	unsigned long start_pfn, end_pfn;
+	int i, nid;
 
 	/* Record where the zone boundaries are */
 	memset(arch_zone_lowest_possible_pfn, 0,
@@ -4941,11 +4595,9 @@
 	}
 
 	/* Print out the early_node_map[] */
-	printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries);
-	for (i = 0; i < nr_nodemap_entries; i++)
-		printk("  %3d: %0#10lx -> %0#10lx\n", early_node_map[i].nid,
-						early_node_map[i].start_pfn,
-						early_node_map[i].end_pfn);
+	printk("Early memory PFN ranges\n");
+	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
+		printk("  %3d: %0#10lx -> %0#10lx\n", nid, start_pfn, end_pfn);
 
 	/* Initialise every node */
 	mminit_verify_pageflags_layout();
@@ -4998,7 +4650,7 @@
 early_param("kernelcore", cmdline_parse_kernelcore);
 early_param("movablecore", cmdline_parse_movablecore);
 
-#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
+#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
 
 /**
  * set_dma_reserve - set the specified number of pages reserved in the first zone
diff --git a/mm/percpu.c b/mm/percpu.c
index 3bb810a..716eb4a 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1023,9 +1023,11 @@
 		if (!is_vmalloc_addr(addr))
 			return __pa(addr);
 		else
-			return page_to_phys(vmalloc_to_page(addr));
+			return page_to_phys(vmalloc_to_page(addr)) +
+			       offset_in_page(addr);
 	} else
-		return page_to_phys(pcpu_addr_to_page(addr));
+		return page_to_phys(pcpu_addr_to_page(addr)) +
+		       offset_in_page(addr);
 }
 
 /**
diff --git a/mm/shmem.c b/mm/shmem.c
index d6722506..feead19 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1092,7 +1092,7 @@
 }
 
 static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir,
-				     int mode, dev_t dev, unsigned long flags)
+				     umode_t mode, dev_t dev, unsigned long flags)
 {
 	struct inode *inode;
 	struct shmem_inode_info *info;
@@ -1456,7 +1456,7 @@
  * File creation. Allocate an inode, and we're done..
  */
 static int
-shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
+shmem_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
 {
 	struct inode *inode;
 	int error = -ENOSPC;
@@ -1489,7 +1489,7 @@
 	return error;
 }
 
-static int shmem_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+static int shmem_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 {
 	int error;
 
@@ -1499,7 +1499,7 @@
 	return 0;
 }
 
-static int shmem_create(struct inode *dir, struct dentry *dentry, int mode,
+static int shmem_create(struct inode *dir, struct dentry *dentry, umode_t mode,
 		struct nameidata *nd)
 {
 	return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
@@ -2118,9 +2118,9 @@
 	return error;
 }
 
-static int shmem_show_options(struct seq_file *seq, struct vfsmount *vfs)
+static int shmem_show_options(struct seq_file *seq, struct dentry *root)
 {
-	struct shmem_sb_info *sbinfo = SHMEM_SB(vfs->mnt_sb);
+	struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb);
 
 	if (sbinfo->max_blocks != shmem_default_max_blocks())
 		seq_printf(seq, ",size=%luk",
@@ -2128,7 +2128,7 @@
 	if (sbinfo->max_inodes != shmem_default_max_inodes())
 		seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
 	if (sbinfo->mode != (S_IRWXUGO | S_ISVTX))
-		seq_printf(seq, ",mode=%03o", sbinfo->mode);
+		seq_printf(seq, ",mode=%03ho", sbinfo->mode);
 	if (sbinfo->uid != 0)
 		seq_printf(seq, ",uid=%u", sbinfo->uid);
 	if (sbinfo->gid != 0)
@@ -2234,13 +2234,12 @@
 static void shmem_destroy_callback(struct rcu_head *head)
 {
 	struct inode *inode = container_of(head, struct inode, i_rcu);
-	INIT_LIST_HEAD(&inode->i_dentry);
 	kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
 }
 
 static void shmem_destroy_inode(struct inode *inode)
 {
-	if ((inode->i_mode & S_IFMT) == S_IFREG)
+	if (S_ISREG(inode->i_mode))
 		mpol_free_shared_policy(&SHMEM_I(inode)->policy);
 	call_rcu(&inode->i_rcu, shmem_destroy_callback);
 }
diff --git a/mm/slub.c b/mm/slub.c
index ed3334d..09ccee8 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -368,7 +368,7 @@
 	VM_BUG_ON(!irqs_disabled());
 #ifdef CONFIG_CMPXCHG_DOUBLE
 	if (s->flags & __CMPXCHG_DOUBLE) {
-		if (cmpxchg_double(&page->freelist,
+		if (cmpxchg_double(&page->freelist, &page->counters,
 			freelist_old, counters_old,
 			freelist_new, counters_new))
 		return 1;
@@ -402,7 +402,7 @@
 {
 #ifdef CONFIG_CMPXCHG_DOUBLE
 	if (s->flags & __CMPXCHG_DOUBLE) {
-		if (cmpxchg_double(&page->freelist,
+		if (cmpxchg_double(&page->freelist, &page->counters,
 			freelist_old, counters_old,
 			freelist_new, counters_new))
 		return 1;
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 78cc4d1..ea6b32d 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -13,7 +13,6 @@
 #include <linux/swapops.h>
 #include <linux/init.h>
 #include <linux/pagemap.h>
-#include <linux/buffer_head.h>
 #include <linux/backing-dev.h>
 #include <linux/pagevec.h>
 #include <linux/migrate.h>
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 1d8b32f..21fdf46 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1118,6 +1118,32 @@
 EXPORT_SYMBOL(vm_map_ram);
 
 /**
+ * vm_area_add_early - add vmap area early during boot
+ * @vm: vm_struct to add
+ *
+ * This function is used to add fixed kernel vm area to vmlist before
+ * vmalloc_init() is called.  @vm->addr, @vm->size, and @vm->flags
+ * should contain proper values and the other fields should be zero.
+ *
+ * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
+ */
+void __init vm_area_add_early(struct vm_struct *vm)
+{
+	struct vm_struct *tmp, **p;
+
+	BUG_ON(vmap_initialized);
+	for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
+		if (tmp->addr >= vm->addr) {
+			BUG_ON(tmp->addr < vm->addr + vm->size);
+			break;
+		} else
+			BUG_ON(tmp->addr + tmp->size > vm->addr);
+	}
+	vm->next = *p;
+	*p = vm;
+}
+
+/**
  * vm_area_register_early - register vmap area early during boot
  * @vm: vm_struct to register
  * @align: requested alignment
@@ -1139,8 +1165,7 @@
 
 	vm->addr = (void *)addr;
 
-	vm->next = vmlist;
-	vmlist = vm;
+	vm_area_add_early(vm);
 }
 
 void __init vmalloc_init(void)
@@ -1290,7 +1315,7 @@
 		unsigned long align, unsigned long flags, unsigned long start,
 		unsigned long end, int node, gfp_t gfp_mask, void *caller)
 {
-	static struct vmap_area *va;
+	struct vmap_area *va;
 	struct vm_struct *area;
 
 	BUG_ON(in_interrupt());
diff --git a/mm/vmscan.c b/mm/vmscan.c
index f54a05b..11adc890 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -3475,16 +3475,16 @@
  * a specified node's per zone unevictable lists for evictable pages.
  */
 
-static ssize_t read_scan_unevictable_node(struct sys_device *dev,
-					  struct sysdev_attribute *attr,
+static ssize_t read_scan_unevictable_node(struct device *dev,
+					  struct device_attribute *attr,
 					  char *buf)
 {
 	warn_scan_unevictable_pages();
 	return sprintf(buf, "0\n");	/* always zero; should fit... */
 }
 
-static ssize_t write_scan_unevictable_node(struct sys_device *dev,
-					   struct sysdev_attribute *attr,
+static ssize_t write_scan_unevictable_node(struct device *dev,
+					   struct device_attribute *attr,
 					const char *buf, size_t count)
 {
 	warn_scan_unevictable_pages();
@@ -3492,17 +3492,17 @@
 }
 
 
-static SYSDEV_ATTR(scan_unevictable_pages, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(scan_unevictable_pages, S_IRUGO | S_IWUSR,
 			read_scan_unevictable_node,
 			write_scan_unevictable_node);
 
 int scan_unevictable_register_node(struct node *node)
 {
-	return sysdev_create_file(&node->sysdev, &attr_scan_unevictable_pages);
+	return device_create_file(&node->dev, &dev_attr_scan_unevictable_pages);
 }
 
 void scan_unevictable_unregister_node(struct node *node)
 {
-	sysdev_remove_file(&node->sysdev, &attr_scan_unevictable_pages);
+	device_remove_file(&node->dev, &dev_attr_scan_unevictable_pages);
 }
 #endif
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 5471628..efea35b 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -51,27 +51,6 @@
 
 /* End of global variables definitions. */
 
-static void vlan_group_free(struct vlan_group *grp)
-{
-	int i;
-
-	for (i = 0; i < VLAN_GROUP_ARRAY_SPLIT_PARTS; i++)
-		kfree(grp->vlan_devices_arrays[i]);
-	kfree(grp);
-}
-
-static struct vlan_group *vlan_group_alloc(struct net_device *real_dev)
-{
-	struct vlan_group *grp;
-
-	grp = kzalloc(sizeof(struct vlan_group), GFP_KERNEL);
-	if (!grp)
-		return NULL;
-
-	grp->real_dev = real_dev;
-	return grp;
-}
-
 static int vlan_group_prealloc_vid(struct vlan_group *vg, u16 vlan_id)
 {
 	struct net_device **array;
@@ -92,32 +71,29 @@
 	return 0;
 }
 
-static void vlan_rcu_free(struct rcu_head *rcu)
-{
-	vlan_group_free(container_of(rcu, struct vlan_group, rcu));
-}
-
 void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
 {
-	struct vlan_dev_info *vlan = vlan_dev_info(dev);
+	struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
 	struct net_device *real_dev = vlan->real_dev;
-	const struct net_device_ops *ops = real_dev->netdev_ops;
+	struct vlan_info *vlan_info;
 	struct vlan_group *grp;
 	u16 vlan_id = vlan->vlan_id;
 
 	ASSERT_RTNL();
 
-	grp = rtnl_dereference(real_dev->vlgrp);
-	BUG_ON(!grp);
+	vlan_info = rtnl_dereference(real_dev->vlan_info);
+	BUG_ON(!vlan_info);
+
+	grp = &vlan_info->grp;
 
 	/* Take it out of our own structures, but be sure to interlock with
 	 * HW accelerating devices or SW vlan input packet processing if
 	 * VLAN is not 0 (leave it there for 802.1p).
 	 */
-	if (vlan_id && (real_dev->features & NETIF_F_HW_VLAN_FILTER))
-		ops->ndo_vlan_rx_kill_vid(real_dev, vlan_id);
+	if (vlan_id)
+		vlan_vid_del(real_dev, vlan_id);
 
-	grp->nr_vlans--;
+	grp->nr_vlan_devs--;
 
 	if (vlan->flags & VLAN_FLAG_GVRP)
 		vlan_gvrp_request_leave(dev);
@@ -129,16 +105,9 @@
 	 */
 	unregister_netdevice_queue(dev, head);
 
-	/* If the group is now empty, kill off the group. */
-	if (grp->nr_vlans == 0) {
+	if (grp->nr_vlan_devs == 0)
 		vlan_gvrp_uninit_applicant(real_dev);
 
-		RCU_INIT_POINTER(real_dev->vlgrp, NULL);
-
-		/* Free the group, after all cpu's are done. */
-		call_rcu(&grp->rcu, vlan_rcu_free);
-	}
-
 	/* Get rid of the vlan's reference to real_dev */
 	dev_put(real_dev);
 }
@@ -167,21 +136,26 @@
 
 int register_vlan_dev(struct net_device *dev)
 {
-	struct vlan_dev_info *vlan = vlan_dev_info(dev);
+	struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
 	struct net_device *real_dev = vlan->real_dev;
-	const struct net_device_ops *ops = real_dev->netdev_ops;
 	u16 vlan_id = vlan->vlan_id;
-	struct vlan_group *grp, *ngrp = NULL;
+	struct vlan_info *vlan_info;
+	struct vlan_group *grp;
 	int err;
 
-	grp = rtnl_dereference(real_dev->vlgrp);
-	if (!grp) {
-		ngrp = grp = vlan_group_alloc(real_dev);
-		if (!grp)
-			return -ENOBUFS;
+	err = vlan_vid_add(real_dev, vlan_id);
+	if (err)
+		return err;
+
+	vlan_info = rtnl_dereference(real_dev->vlan_info);
+	/* vlan_info should be there now. vlan_vid_add took care of it */
+	BUG_ON(!vlan_info);
+
+	grp = &vlan_info->grp;
+	if (grp->nr_vlan_devs == 0) {
 		err = vlan_gvrp_init_applicant(real_dev);
 		if (err < 0)
-			goto out_free_group;
+			goto out_vid_del;
 	}
 
 	err = vlan_group_prealloc_vid(grp, vlan_id);
@@ -192,7 +166,7 @@
 	if (err < 0)
 		goto out_uninit_applicant;
 
-	/* Account for reference in struct vlan_dev_info */
+	/* Account for reference in struct vlan_dev_priv */
 	dev_hold(real_dev);
 
 	netif_stacked_transfer_operstate(real_dev, dev);
@@ -202,24 +176,15 @@
 	 * it into our local structure.
 	 */
 	vlan_group_set_device(grp, vlan_id, dev);
-	grp->nr_vlans++;
-
-	if (ngrp) {
-		rcu_assign_pointer(real_dev->vlgrp, ngrp);
-	}
-	if (real_dev->features & NETIF_F_HW_VLAN_FILTER)
-		ops->ndo_vlan_rx_add_vid(real_dev, vlan_id);
+	grp->nr_vlan_devs++;
 
 	return 0;
 
 out_uninit_applicant:
-	if (ngrp)
+	if (grp->nr_vlan_devs == 0)
 		vlan_gvrp_uninit_applicant(real_dev);
-out_free_group:
-	if (ngrp) {
-		/* Free the group, after all cpu's are done. */
-		call_rcu(&ngrp->rcu, vlan_rcu_free);
-	}
+out_vid_del:
+	vlan_vid_del(real_dev, vlan_id);
 	return err;
 }
 
@@ -267,7 +232,7 @@
 		snprintf(name, IFNAMSIZ, "vlan%.4i", vlan_id);
 	}
 
-	new_dev = alloc_netdev(sizeof(struct vlan_dev_info), name, vlan_setup);
+	new_dev = alloc_netdev(sizeof(struct vlan_dev_priv), name, vlan_setup);
 
 	if (new_dev == NULL)
 		return -ENOBUFS;
@@ -278,10 +243,10 @@
 	 */
 	new_dev->mtu = real_dev->mtu;
 
-	vlan_dev_info(new_dev)->vlan_id = vlan_id;
-	vlan_dev_info(new_dev)->real_dev = real_dev;
-	vlan_dev_info(new_dev)->dent = NULL;
-	vlan_dev_info(new_dev)->flags = VLAN_FLAG_REORDER_HDR;
+	vlan_dev_priv(new_dev)->vlan_id = vlan_id;
+	vlan_dev_priv(new_dev)->real_dev = real_dev;
+	vlan_dev_priv(new_dev)->dent = NULL;
+	vlan_dev_priv(new_dev)->flags = VLAN_FLAG_REORDER_HDR;
 
 	new_dev->rtnl_link_ops = &vlan_link_ops;
 	err = register_vlan_dev(new_dev);
@@ -298,7 +263,7 @@
 static void vlan_sync_address(struct net_device *dev,
 			      struct net_device *vlandev)
 {
-	struct vlan_dev_info *vlan = vlan_dev_info(vlandev);
+	struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
 
 	/* May be called without an actual change */
 	if (!compare_ether_addr(vlan->real_dev_addr, dev->dev_addr))
@@ -360,25 +325,26 @@
 {
 	struct net_device *dev = ptr;
 	struct vlan_group *grp;
+	struct vlan_info *vlan_info;
 	int i, flgs;
 	struct net_device *vlandev;
-	struct vlan_dev_info *vlan;
+	struct vlan_dev_priv *vlan;
 	LIST_HEAD(list);
 
 	if (is_vlan_dev(dev))
 		__vlan_device_event(dev, event);
 
 	if ((event == NETDEV_UP) &&
-	    (dev->features & NETIF_F_HW_VLAN_FILTER) &&
-	    dev->netdev_ops->ndo_vlan_rx_add_vid) {
+	    (dev->features & NETIF_F_HW_VLAN_FILTER)) {
 		pr_info("adding VLAN 0 to HW filter on device %s\n",
 			dev->name);
-		dev->netdev_ops->ndo_vlan_rx_add_vid(dev, 0);
+		vlan_vid_add(dev, 0);
 	}
 
-	grp = rtnl_dereference(dev->vlgrp);
-	if (!grp)
+	vlan_info = rtnl_dereference(dev->vlan_info);
+	if (!vlan_info)
 		goto out;
+	grp = &vlan_info->grp;
 
 	/* It is OK that we do not hold the group lock right now,
 	 * as we run under the RTNL lock.
@@ -447,7 +413,7 @@
 			if (!(flgs & IFF_UP))
 				continue;
 
-			vlan = vlan_dev_info(vlandev);
+			vlan = vlan_dev_priv(vlandev);
 			if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
 				dev_change_flags(vlandev, flgs & ~IFF_UP);
 			netif_stacked_transfer_operstate(dev, vlandev);
@@ -465,7 +431,7 @@
 			if (flgs & IFF_UP)
 				continue;
 
-			vlan = vlan_dev_info(vlandev);
+			vlan = vlan_dev_priv(vlandev);
 			if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
 				dev_change_flags(vlandev, flgs | IFF_UP);
 			netif_stacked_transfer_operstate(dev, vlandev);
@@ -482,9 +448,9 @@
 			if (!vlandev)
 				continue;
 
-			/* unregistration of last vlan destroys group, abort
+			/* removal of last vid destroys vlan_info, abort
 			 * afterwards */
-			if (grp->nr_vlans == 1)
+			if (vlan_info->nr_vids == 1)
 				i = VLAN_N_VID;
 
 			unregister_vlan_dev(vlandev, &list);
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
index 9fd45f3..a4886d9 100644
--- a/net/8021q/vlan.h
+++ b/net/8021q/vlan.h
@@ -3,6 +3,7 @@
 
 #include <linux/if_vlan.h>
 #include <linux/u64_stats_sync.h>
+#include <linux/list.h>
 
 
 /**
@@ -40,8 +41,10 @@
 	u32			tx_dropped;
 };
 
+struct netpoll;
+
 /**
- *	struct vlan_dev_info - VLAN private device data
+ *	struct vlan_dev_priv - VLAN private device data
  *	@nr_ingress_mappings: number of ingress priority mappings
  *	@ingress_priority_map: ingress priority mappings
  *	@nr_egress_mappings: number of egress priority mappings
@@ -53,7 +56,7 @@
  *	@dent: proc dir entry
  *	@vlan_pcpu_stats: ptr to percpu rx stats
  */
-struct vlan_dev_info {
+struct vlan_dev_priv {
 	unsigned int				nr_ingress_mappings;
 	u32					ingress_priority_map[8];
 	unsigned int				nr_egress_mappings;
@@ -67,13 +70,39 @@
 
 	struct proc_dir_entry			*dent;
 	struct vlan_pcpu_stats __percpu		*vlan_pcpu_stats;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	struct netpoll				*netpoll;
+#endif
 };
 
-static inline struct vlan_dev_info *vlan_dev_info(const struct net_device *dev)
+static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev)
 {
 	return netdev_priv(dev);
 }
 
+/* if this changes, algorithm will have to be reworked because this
+ * depends on completely exhausting the VLAN identifier space.  Thus
+ * it gives constant time look-up, but in many cases it wastes memory.
+ */
+#define VLAN_GROUP_ARRAY_SPLIT_PARTS  8
+#define VLAN_GROUP_ARRAY_PART_LEN     (VLAN_N_VID/VLAN_GROUP_ARRAY_SPLIT_PARTS)
+
+struct vlan_group {
+	unsigned int		nr_vlan_devs;
+	struct hlist_node	hlist;	/* linked list */
+	struct net_device **vlan_devices_arrays[VLAN_GROUP_ARRAY_SPLIT_PARTS];
+};
+
+struct vlan_info {
+	struct net_device	*real_dev; /* The ethernet(like) device
+					    * the vlan is attached to.
+					    */
+	struct vlan_group	grp;
+	struct list_head	vid_list;
+	unsigned int		nr_vids;
+	struct rcu_head		rcu;
+};
+
 static inline struct net_device *vlan_group_get_device(struct vlan_group *vg,
 						       u16 vlan_id)
 {
@@ -97,10 +126,10 @@
 static inline struct net_device *vlan_find_dev(struct net_device *real_dev,
 					       u16 vlan_id)
 {
-	struct vlan_group *grp = rcu_dereference_rtnl(real_dev->vlgrp);
+	struct vlan_info *vlan_info = rcu_dereference_rtnl(real_dev->vlan_info);
 
-	if (grp)
-		return vlan_group_get_device(grp, vlan_id);
+	if (vlan_info)
+		return vlan_group_get_device(&vlan_info->grp, vlan_id);
 
 	return NULL;
 }
@@ -121,7 +150,7 @@
 static inline u32 vlan_get_ingress_priority(struct net_device *dev,
 					    u16 vlan_tci)
 {
-	struct vlan_dev_info *vip = vlan_dev_info(dev);
+	struct vlan_dev_priv *vip = vlan_dev_priv(dev);
 
 	return vip->ingress_priority_map[(vlan_tci >> VLAN_PRIO_SHIFT) & 0x7];
 }
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index f5ffc02..4d39d80 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -36,7 +36,7 @@
 			skb->pkt_type = PACKET_HOST;
 	}
 
-	if (!(vlan_dev_info(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR)) {
+	if (!(vlan_dev_priv(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR)) {
 		unsigned int offset = skb->data - skb_mac_header(skb);
 
 		/*
@@ -55,7 +55,7 @@
 	skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci);
 	skb->vlan_tci = 0;
 
-	rx_stats = this_cpu_ptr(vlan_dev_info(vlan_dev)->vlan_pcpu_stats);
+	rx_stats = this_cpu_ptr(vlan_dev_priv(vlan_dev)->vlan_pcpu_stats);
 
 	u64_stats_update_begin(&rx_stats->syncp);
 	rx_stats->rx_packets++;
@@ -71,10 +71,10 @@
 struct net_device *__vlan_find_dev_deep(struct net_device *real_dev,
 					u16 vlan_id)
 {
-	struct vlan_group *grp = rcu_dereference_rtnl(real_dev->vlgrp);
+	struct vlan_info *vlan_info = rcu_dereference_rtnl(real_dev->vlan_info);
 
-	if (grp) {
-		return vlan_group_get_device(grp, vlan_id);
+	if (vlan_info) {
+		return vlan_group_get_device(&vlan_info->grp, vlan_id);
 	} else {
 		/*
 		 * Bonding slaves do not have grp assigned to themselves.
@@ -90,13 +90,13 @@
 
 struct net_device *vlan_dev_real_dev(const struct net_device *dev)
 {
-	return vlan_dev_info(dev)->real_dev;
+	return vlan_dev_priv(dev)->real_dev;
 }
 EXPORT_SYMBOL(vlan_dev_real_dev);
 
 u16 vlan_dev_vlan_id(const struct net_device *dev)
 {
-	return vlan_dev_info(dev)->vlan_id;
+	return vlan_dev_priv(dev)->vlan_id;
 }
 EXPORT_SYMBOL(vlan_dev_vlan_id);
 
@@ -110,39 +110,6 @@
 	return skb;
 }
 
-static void vlan_set_encap_proto(struct sk_buff *skb, struct vlan_hdr *vhdr)
-{
-	__be16 proto;
-	unsigned char *rawp;
-
-	/*
-	 * Was a VLAN packet, grab the encapsulated protocol, which the layer
-	 * three protocols care about.
-	 */
-
-	proto = vhdr->h_vlan_encapsulated_proto;
-	if (ntohs(proto) >= 1536) {
-		skb->protocol = proto;
-		return;
-	}
-
-	rawp = skb->data;
-	if (*(unsigned short *) rawp == 0xFFFF)
-		/*
-		 * This is a magic hack to spot IPX packets. Older Novell
-		 * breaks the protocol design and runs IPX over 802.3 without
-		 * an 802.2 LLC layer. We look for FFFF which isn't a used
-		 * 802.2 SSAP/DSAP. This won't work for fault tolerant netware
-		 * but does for the rest.
-		 */
-		skb->protocol = htons(ETH_P_802_3);
-	else
-		/*
-		 * Real 802.2 LLC
-		 */
-		skb->protocol = htons(ETH_P_802_2);
-}
-
 struct sk_buff *vlan_untag(struct sk_buff *skb)
 {
 	struct vlan_hdr *vhdr;
@@ -179,3 +146,226 @@
 	kfree_skb(skb);
 	return NULL;
 }
+
+
+/*
+ * vlan info and vid list
+ */
+
+static void vlan_group_free(struct vlan_group *grp)
+{
+	int i;
+
+	for (i = 0; i < VLAN_GROUP_ARRAY_SPLIT_PARTS; i++)
+		kfree(grp->vlan_devices_arrays[i]);
+}
+
+static void vlan_info_free(struct vlan_info *vlan_info)
+{
+	vlan_group_free(&vlan_info->grp);
+	kfree(vlan_info);
+}
+
+static void vlan_info_rcu_free(struct rcu_head *rcu)
+{
+	vlan_info_free(container_of(rcu, struct vlan_info, rcu));
+}
+
+static struct vlan_info *vlan_info_alloc(struct net_device *dev)
+{
+	struct vlan_info *vlan_info;
+
+	vlan_info = kzalloc(sizeof(struct vlan_info), GFP_KERNEL);
+	if (!vlan_info)
+		return NULL;
+
+	vlan_info->real_dev = dev;
+	INIT_LIST_HEAD(&vlan_info->vid_list);
+	return vlan_info;
+}
+
+struct vlan_vid_info {
+	struct list_head list;
+	unsigned short vid;
+	int refcount;
+};
+
+static struct vlan_vid_info *vlan_vid_info_get(struct vlan_info *vlan_info,
+					       unsigned short vid)
+{
+	struct vlan_vid_info *vid_info;
+
+	list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
+		if (vid_info->vid == vid)
+			return vid_info;
+	}
+	return NULL;
+}
+
+static struct vlan_vid_info *vlan_vid_info_alloc(unsigned short vid)
+{
+	struct vlan_vid_info *vid_info;
+
+	vid_info = kzalloc(sizeof(struct vlan_vid_info), GFP_KERNEL);
+	if (!vid_info)
+		return NULL;
+	vid_info->vid = vid;
+
+	return vid_info;
+}
+
+static int __vlan_vid_add(struct vlan_info *vlan_info, unsigned short vid,
+			  struct vlan_vid_info **pvid_info)
+{
+	struct net_device *dev = vlan_info->real_dev;
+	const struct net_device_ops *ops = dev->netdev_ops;
+	struct vlan_vid_info *vid_info;
+	int err;
+
+	vid_info = vlan_vid_info_alloc(vid);
+	if (!vid_info)
+		return -ENOMEM;
+
+	if ((dev->features & NETIF_F_HW_VLAN_FILTER) &&
+	    ops->ndo_vlan_rx_add_vid) {
+		err =  ops->ndo_vlan_rx_add_vid(dev, vid);
+		if (err) {
+			kfree(vid_info);
+			return err;
+		}
+	}
+	list_add(&vid_info->list, &vlan_info->vid_list);
+	vlan_info->nr_vids++;
+	*pvid_info = vid_info;
+	return 0;
+}
+
+int vlan_vid_add(struct net_device *dev, unsigned short vid)
+{
+	struct vlan_info *vlan_info;
+	struct vlan_vid_info *vid_info;
+	bool vlan_info_created = false;
+	int err;
+
+	ASSERT_RTNL();
+
+	vlan_info = rtnl_dereference(dev->vlan_info);
+	if (!vlan_info) {
+		vlan_info = vlan_info_alloc(dev);
+		if (!vlan_info)
+			return -ENOMEM;
+		vlan_info_created = true;
+	}
+	vid_info = vlan_vid_info_get(vlan_info, vid);
+	if (!vid_info) {
+		err = __vlan_vid_add(vlan_info, vid, &vid_info);
+		if (err)
+			goto out_free_vlan_info;
+	}
+	vid_info->refcount++;
+
+	if (vlan_info_created)
+		rcu_assign_pointer(dev->vlan_info, vlan_info);
+
+	return 0;
+
+out_free_vlan_info:
+	if (vlan_info_created)
+		kfree(vlan_info);
+	return err;
+}
+EXPORT_SYMBOL(vlan_vid_add);
+
+static void __vlan_vid_del(struct vlan_info *vlan_info,
+			   struct vlan_vid_info *vid_info)
+{
+	struct net_device *dev = vlan_info->real_dev;
+	const struct net_device_ops *ops = dev->netdev_ops;
+	unsigned short vid = vid_info->vid;
+	int err;
+
+	if ((dev->features & NETIF_F_HW_VLAN_FILTER) &&
+	     ops->ndo_vlan_rx_kill_vid) {
+		err = ops->ndo_vlan_rx_kill_vid(dev, vid);
+		if (err) {
+			pr_warn("failed to kill vid %d for device %s\n",
+				vid, dev->name);
+		}
+	}
+	list_del(&vid_info->list);
+	kfree(vid_info);
+	vlan_info->nr_vids--;
+}
+
+void vlan_vid_del(struct net_device *dev, unsigned short vid)
+{
+	struct vlan_info *vlan_info;
+	struct vlan_vid_info *vid_info;
+
+	ASSERT_RTNL();
+
+	vlan_info = rtnl_dereference(dev->vlan_info);
+	if (!vlan_info)
+		return;
+
+	vid_info = vlan_vid_info_get(vlan_info, vid);
+	if (!vid_info)
+		return;
+	vid_info->refcount--;
+	if (vid_info->refcount == 0) {
+		__vlan_vid_del(vlan_info, vid_info);
+		if (vlan_info->nr_vids == 0) {
+			RCU_INIT_POINTER(dev->vlan_info, NULL);
+			call_rcu(&vlan_info->rcu, vlan_info_rcu_free);
+		}
+	}
+}
+EXPORT_SYMBOL(vlan_vid_del);
+
+int vlan_vids_add_by_dev(struct net_device *dev,
+			 const struct net_device *by_dev)
+{
+	struct vlan_vid_info *vid_info;
+	struct vlan_info *vlan_info;
+	int err;
+
+	ASSERT_RTNL();
+
+	vlan_info = rtnl_dereference(by_dev->vlan_info);
+	if (!vlan_info)
+		return 0;
+
+	list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
+		err = vlan_vid_add(dev, vid_info->vid);
+		if (err)
+			goto unwind;
+	}
+	return 0;
+
+unwind:
+	list_for_each_entry_continue_reverse(vid_info,
+					     &vlan_info->vid_list,
+					     list) {
+		vlan_vid_del(dev, vid_info->vid);
+	}
+
+	return err;
+}
+EXPORT_SYMBOL(vlan_vids_add_by_dev);
+
+void vlan_vids_del_by_dev(struct net_device *dev,
+			  const struct net_device *by_dev)
+{
+	struct vlan_vid_info *vid_info;
+	struct vlan_info *vlan_info;
+
+	ASSERT_RTNL();
+
+	vlan_info = rtnl_dereference(by_dev->vlan_info);
+	if (!vlan_info)
+		return;
+
+	list_for_each_entry(vid_info, &vlan_info->vid_list, list)
+		vlan_vid_del(dev, vid_info->vid);
+}
+EXPORT_SYMBOL(vlan_vids_del_by_dev);
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index bc25286..9988d4a 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -33,6 +33,7 @@
 #include "vlan.h"
 #include "vlanproc.h"
 #include <linux/if_vlan.h>
+#include <linux/netpoll.h>
 
 /*
  *	Rebuild the Ethernet MAC header. This is called after an ARP
@@ -72,7 +73,7 @@
 {
 	struct vlan_priority_tci_mapping *mp;
 
-	mp = vlan_dev_info(dev)->egress_priority_map[(skb->priority & 0xF)];
+	mp = vlan_dev_priv(dev)->egress_priority_map[(skb->priority & 0xF)];
 	while (mp) {
 		if (mp->priority == skb->priority) {
 			return mp->vlan_qos; /* This should already be shifted
@@ -103,10 +104,10 @@
 	u16 vlan_tci = 0;
 	int rc;
 
-	if (!(vlan_dev_info(dev)->flags & VLAN_FLAG_REORDER_HDR)) {
+	if (!(vlan_dev_priv(dev)->flags & VLAN_FLAG_REORDER_HDR)) {
 		vhdr = (struct vlan_hdr *) skb_push(skb, VLAN_HLEN);
 
-		vlan_tci = vlan_dev_info(dev)->vlan_id;
+		vlan_tci = vlan_dev_priv(dev)->vlan_id;
 		vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb);
 		vhdr->h_vlan_TCI = htons(vlan_tci);
 
@@ -129,7 +130,7 @@
 		saddr = dev->dev_addr;
 
 	/* Now make the underlying real hard header */
-	dev = vlan_dev_info(dev)->real_dev;
+	dev = vlan_dev_priv(dev)->real_dev;
 	rc = dev_hard_header(skb, dev, type, daddr, saddr, len + vhdrlen);
 	if (rc > 0)
 		rc += vhdrlen;
@@ -149,27 +150,29 @@
 	 * OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs...
 	 */
 	if (veth->h_vlan_proto != htons(ETH_P_8021Q) ||
-	    vlan_dev_info(dev)->flags & VLAN_FLAG_REORDER_HDR) {
+	    vlan_dev_priv(dev)->flags & VLAN_FLAG_REORDER_HDR) {
 		u16 vlan_tci;
-		vlan_tci = vlan_dev_info(dev)->vlan_id;
+		vlan_tci = vlan_dev_priv(dev)->vlan_id;
 		vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb);
 		skb = __vlan_hwaccel_put_tag(skb, vlan_tci);
 	}
 
-	skb_set_dev(skb, vlan_dev_info(dev)->real_dev);
+	skb_set_dev(skb, vlan_dev_priv(dev)->real_dev);
 	len = skb->len;
+	if (netpoll_tx_running(dev))
+		return skb->dev->netdev_ops->ndo_start_xmit(skb, skb->dev);
 	ret = dev_queue_xmit(skb);
 
 	if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
 		struct vlan_pcpu_stats *stats;
 
-		stats = this_cpu_ptr(vlan_dev_info(dev)->vlan_pcpu_stats);
+		stats = this_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats);
 		u64_stats_update_begin(&stats->syncp);
 		stats->tx_packets++;
 		stats->tx_bytes += len;
 		u64_stats_update_end(&stats->syncp);
 	} else {
-		this_cpu_inc(vlan_dev_info(dev)->vlan_pcpu_stats->tx_dropped);
+		this_cpu_inc(vlan_dev_priv(dev)->vlan_pcpu_stats->tx_dropped);
 	}
 
 	return ret;
@@ -180,7 +183,7 @@
 	/* TODO: gotta make sure the underlying layer can handle it,
 	 * maybe an IFF_VLAN_CAPABLE flag for devices?
 	 */
-	if (vlan_dev_info(dev)->real_dev->mtu < new_mtu)
+	if (vlan_dev_priv(dev)->real_dev->mtu < new_mtu)
 		return -ERANGE;
 
 	dev->mtu = new_mtu;
@@ -191,7 +194,7 @@
 void vlan_dev_set_ingress_priority(const struct net_device *dev,
 				   u32 skb_prio, u16 vlan_prio)
 {
-	struct vlan_dev_info *vlan = vlan_dev_info(dev);
+	struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
 
 	if (vlan->ingress_priority_map[vlan_prio & 0x7] && !skb_prio)
 		vlan->nr_ingress_mappings--;
@@ -204,7 +207,7 @@
 int vlan_dev_set_egress_priority(const struct net_device *dev,
 				 u32 skb_prio, u16 vlan_prio)
 {
-	struct vlan_dev_info *vlan = vlan_dev_info(dev);
+	struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
 	struct vlan_priority_tci_mapping *mp = NULL;
 	struct vlan_priority_tci_mapping *np;
 	u32 vlan_qos = (vlan_prio << VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK;
@@ -241,7 +244,7 @@
 /* Flags are defined in the vlan_flags enum in include/linux/if_vlan.h file. */
 int vlan_dev_change_flags(const struct net_device *dev, u32 flags, u32 mask)
 {
-	struct vlan_dev_info *vlan = vlan_dev_info(dev);
+	struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
 	u32 old_flags = vlan->flags;
 
 	if (mask & ~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP |
@@ -261,12 +264,12 @@
 
 void vlan_dev_get_realdev_name(const struct net_device *dev, char *result)
 {
-	strncpy(result, vlan_dev_info(dev)->real_dev->name, 23);
+	strncpy(result, vlan_dev_priv(dev)->real_dev->name, 23);
 }
 
 static int vlan_dev_open(struct net_device *dev)
 {
-	struct vlan_dev_info *vlan = vlan_dev_info(dev);
+	struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
 	struct net_device *real_dev = vlan->real_dev;
 	int err;
 
@@ -313,7 +316,7 @@
 
 static int vlan_dev_stop(struct net_device *dev)
 {
-	struct vlan_dev_info *vlan = vlan_dev_info(dev);
+	struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
 	struct net_device *real_dev = vlan->real_dev;
 
 	dev_mc_unsync(real_dev, dev);
@@ -332,7 +335,7 @@
 
 static int vlan_dev_set_mac_address(struct net_device *dev, void *p)
 {
-	struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+	struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
 	struct sockaddr *addr = p;
 	int err;
 
@@ -358,7 +361,7 @@
 
 static int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 {
-	struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+	struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
 	const struct net_device_ops *ops = real_dev->netdev_ops;
 	struct ifreq ifrr;
 	int err = -EOPNOTSUPP;
@@ -383,7 +386,7 @@
 
 static int vlan_dev_neigh_setup(struct net_device *dev, struct neigh_parms *pa)
 {
-	struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+	struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
 	const struct net_device_ops *ops = real_dev->netdev_ops;
 	int err = 0;
 
@@ -397,7 +400,7 @@
 static int vlan_dev_fcoe_ddp_setup(struct net_device *dev, u16 xid,
 				   struct scatterlist *sgl, unsigned int sgc)
 {
-	struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+	struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
 	const struct net_device_ops *ops = real_dev->netdev_ops;
 	int rc = 0;
 
@@ -409,7 +412,7 @@
 
 static int vlan_dev_fcoe_ddp_done(struct net_device *dev, u16 xid)
 {
-	struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+	struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
 	const struct net_device_ops *ops = real_dev->netdev_ops;
 	int len = 0;
 
@@ -421,7 +424,7 @@
 
 static int vlan_dev_fcoe_enable(struct net_device *dev)
 {
-	struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+	struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
 	const struct net_device_ops *ops = real_dev->netdev_ops;
 	int rc = -EINVAL;
 
@@ -432,7 +435,7 @@
 
 static int vlan_dev_fcoe_disable(struct net_device *dev)
 {
-	struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+	struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
 	const struct net_device_ops *ops = real_dev->netdev_ops;
 	int rc = -EINVAL;
 
@@ -443,7 +446,7 @@
 
 static int vlan_dev_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
 {
-	struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+	struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
 	const struct net_device_ops *ops = real_dev->netdev_ops;
 	int rc = -EINVAL;
 
@@ -455,7 +458,7 @@
 static int vlan_dev_fcoe_ddp_target(struct net_device *dev, u16 xid,
 				    struct scatterlist *sgl, unsigned int sgc)
 {
-	struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+	struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
 	const struct net_device_ops *ops = real_dev->netdev_ops;
 	int rc = 0;
 
@@ -468,7 +471,7 @@
 
 static void vlan_dev_change_rx_flags(struct net_device *dev, int change)
 {
-	struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+	struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
 
 	if (dev->flags & IFF_UP) {
 		if (change & IFF_ALLMULTI)
@@ -480,8 +483,8 @@
 
 static void vlan_dev_set_rx_mode(struct net_device *vlan_dev)
 {
-	dev_mc_sync(vlan_dev_info(vlan_dev)->real_dev, vlan_dev);
-	dev_uc_sync(vlan_dev_info(vlan_dev)->real_dev, vlan_dev);
+	dev_mc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
+	dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
 }
 
 /*
@@ -519,7 +522,7 @@
 
 static int vlan_dev_init(struct net_device *dev)
 {
-	struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+	struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
 	int subclass = 0;
 
 	netif_carrier_off(dev);
@@ -568,8 +571,8 @@
 
 	vlan_dev_set_lockdep_class(dev, subclass);
 
-	vlan_dev_info(dev)->vlan_pcpu_stats = alloc_percpu(struct vlan_pcpu_stats);
-	if (!vlan_dev_info(dev)->vlan_pcpu_stats)
+	vlan_dev_priv(dev)->vlan_pcpu_stats = alloc_percpu(struct vlan_pcpu_stats);
+	if (!vlan_dev_priv(dev)->vlan_pcpu_stats)
 		return -ENOMEM;
 
 	return 0;
@@ -578,7 +581,7 @@
 static void vlan_dev_uninit(struct net_device *dev)
 {
 	struct vlan_priority_tci_mapping *pm;
-	struct vlan_dev_info *vlan = vlan_dev_info(dev);
+	struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
 	int i;
 
 	free_percpu(vlan->vlan_pcpu_stats);
@@ -591,18 +594,17 @@
 	}
 }
 
-static u32 vlan_dev_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t vlan_dev_fix_features(struct net_device *dev,
+	netdev_features_t features)
 {
-	struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+	struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
 	u32 old_features = features;
 
-	features &= real_dev->features;
 	features &= real_dev->vlan_features;
+	features |= NETIF_F_RXCSUM;
+	features &= real_dev->features;
 
 	features |= old_features & NETIF_F_SOFT_FEATURES;
-
-	if (dev_ethtool_get_rx_csum(real_dev))
-		features |= NETIF_F_RXCSUM;
 	features |= NETIF_F_LLTX;
 
 	return features;
@@ -611,7 +613,7 @@
 static int vlan_ethtool_get_settings(struct net_device *dev,
 				     struct ethtool_cmd *cmd)
 {
-	const struct vlan_dev_info *vlan = vlan_dev_info(dev);
+	const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
 
 	return __ethtool_get_settings(vlan->real_dev, cmd);
 }
@@ -627,7 +629,7 @@
 static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
 {
 
-	if (vlan_dev_info(dev)->vlan_pcpu_stats) {
+	if (vlan_dev_priv(dev)->vlan_pcpu_stats) {
 		struct vlan_pcpu_stats *p;
 		u32 rx_errors = 0, tx_dropped = 0;
 		int i;
@@ -636,7 +638,7 @@
 			u64 rxpackets, rxbytes, rxmulticast, txpackets, txbytes;
 			unsigned int start;
 
-			p = per_cpu_ptr(vlan_dev_info(dev)->vlan_pcpu_stats, i);
+			p = per_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats, i);
 			do {
 				start = u64_stats_fetch_begin_bh(&p->syncp);
 				rxpackets	= p->rx_packets;
@@ -661,6 +663,57 @@
 	return stats;
 }
 
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void vlan_dev_poll_controller(struct net_device *dev)
+{
+	return;
+}
+
+static int vlan_dev_netpoll_setup(struct net_device *dev, struct netpoll_info *npinfo)
+{
+	struct vlan_dev_priv *info = vlan_dev_priv(dev);
+	struct net_device *real_dev = info->real_dev;
+	struct netpoll *netpoll;
+	int err = 0;
+
+	netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL);
+	err = -ENOMEM;
+	if (!netpoll)
+		goto out;
+
+	netpoll->dev = real_dev;
+	strlcpy(netpoll->dev_name, real_dev->name, IFNAMSIZ);
+
+	err = __netpoll_setup(netpoll);
+	if (err) {
+		kfree(netpoll);
+		goto out;
+	}
+
+	info->netpoll = netpoll;
+
+out:
+	return err;
+}
+
+static void vlan_dev_netpoll_cleanup(struct net_device *dev)
+{
+	struct vlan_dev_priv *info = vlan_dev_priv(dev);
+	struct netpoll *netpoll = info->netpoll;
+
+	if (!netpoll)
+		return;
+
+	info->netpoll = NULL;
+
+        /* Wait for transmitting packets to finish before freeing. */
+        synchronize_rcu_bh();
+
+        __netpoll_cleanup(netpoll);
+        kfree(netpoll);
+}
+#endif /* CONFIG_NET_POLL_CONTROLLER */
+
 static const struct ethtool_ops vlan_ethtool_ops = {
 	.get_settings	        = vlan_ethtool_get_settings,
 	.get_drvinfo	        = vlan_ethtool_get_drvinfo,
@@ -689,6 +742,11 @@
 	.ndo_fcoe_get_wwn	= vlan_dev_fcoe_get_wwn,
 	.ndo_fcoe_ddp_target	= vlan_dev_fcoe_ddp_target,
 #endif
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller	= vlan_dev_poll_controller,
+	.ndo_netpoll_setup	= vlan_dev_netpoll_setup,
+	.ndo_netpoll_cleanup	= vlan_dev_netpoll_cleanup,
+#endif
 	.ndo_fix_features	= vlan_dev_fix_features,
 };
 
diff --git a/net/8021q/vlan_gvrp.c b/net/8021q/vlan_gvrp.c
index 061cece..6f975535 100644
--- a/net/8021q/vlan_gvrp.c
+++ b/net/8021q/vlan_gvrp.c
@@ -29,7 +29,7 @@
 
 int vlan_gvrp_request_join(const struct net_device *dev)
 {
-	const struct vlan_dev_info *vlan = vlan_dev_info(dev);
+	const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
 	__be16 vlan_id = htons(vlan->vlan_id);
 
 	return garp_request_join(vlan->real_dev, &vlan_gvrp_app,
@@ -38,7 +38,7 @@
 
 void vlan_gvrp_request_leave(const struct net_device *dev)
 {
-	const struct vlan_dev_info *vlan = vlan_dev_info(dev);
+	const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
 	__be16 vlan_id = htons(vlan->vlan_id);
 
 	garp_request_leave(vlan->real_dev, &vlan_gvrp_app,
diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
index 235c219..5071136 100644
--- a/net/8021q/vlan_netlink.c
+++ b/net/8021q/vlan_netlink.c
@@ -105,7 +105,7 @@
 static int vlan_newlink(struct net *src_net, struct net_device *dev,
 			struct nlattr *tb[], struct nlattr *data[])
 {
-	struct vlan_dev_info *vlan = vlan_dev_info(dev);
+	struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
 	struct net_device *real_dev;
 	int err;
 
@@ -149,7 +149,7 @@
 
 static size_t vlan_get_size(const struct net_device *dev)
 {
-	struct vlan_dev_info *vlan = vlan_dev_info(dev);
+	struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
 
 	return nla_total_size(2) +	/* IFLA_VLAN_ID */
 	       sizeof(struct ifla_vlan_flags) + /* IFLA_VLAN_FLAGS */
@@ -159,14 +159,14 @@
 
 static int vlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
 {
-	struct vlan_dev_info *vlan = vlan_dev_info(dev);
+	struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
 	struct vlan_priority_tci_mapping *pm;
 	struct ifla_vlan_flags f;
 	struct ifla_vlan_qos_mapping m;
 	struct nlattr *nest;
 	unsigned int i;
 
-	NLA_PUT_U16(skb, IFLA_VLAN_ID, vlan_dev_info(dev)->vlan_id);
+	NLA_PUT_U16(skb, IFLA_VLAN_ID, vlan_dev_priv(dev)->vlan_id);
 	if (vlan->flags) {
 		f.flags = vlan->flags;
 		f.mask  = ~0;
@@ -218,7 +218,7 @@
 	.kind		= "vlan",
 	.maxtype	= IFLA_VLAN_MAX,
 	.policy		= vlan_policy,
-	.priv_size	= sizeof(struct vlan_dev_info),
+	.priv_size	= sizeof(struct vlan_dev_priv),
 	.setup		= vlan_setup,
 	.validate	= vlan_validate,
 	.newlink	= vlan_newlink,
diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c
index d34b6da..c718fd3 100644
--- a/net/8021q/vlanproc.c
+++ b/net/8021q/vlanproc.c
@@ -168,13 +168,13 @@
 
 int vlan_proc_add_dev(struct net_device *vlandev)
 {
-	struct vlan_dev_info *dev_info = vlan_dev_info(vlandev);
+	struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
 	struct vlan_net *vn = net_generic(dev_net(vlandev), vlan_net_id);
 
-	dev_info->dent =
+	vlan->dent =
 		proc_create_data(vlandev->name, S_IFREG|S_IRUSR|S_IWUSR,
 				 vn->proc_vlan_dir, &vlandev_fops, vlandev);
-	if (!dev_info->dent)
+	if (!vlan->dent)
 		return -ENOBUFS;
 	return 0;
 }
@@ -187,10 +187,10 @@
 	struct vlan_net *vn = net_generic(dev_net(vlandev), vlan_net_id);
 
 	/** NOTE:  This will consume the memory pointed to by dent, it seems. */
-	if (vlan_dev_info(vlandev)->dent) {
-		remove_proc_entry(vlan_dev_info(vlandev)->dent->name,
+	if (vlan_dev_priv(vlandev)->dent) {
+		remove_proc_entry(vlan_dev_priv(vlandev)->dent->name,
 				  vn->proc_vlan_dir);
-		vlan_dev_info(vlandev)->dent = NULL;
+		vlan_dev_priv(vlandev)->dent = NULL;
 	}
 	return 0;
 }
@@ -268,10 +268,10 @@
 			   nmtype ? nmtype :  "UNKNOWN");
 	} else {
 		const struct net_device *vlandev = v;
-		const struct vlan_dev_info *dev_info = vlan_dev_info(vlandev);
+		const struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
 
 		seq_printf(seq, "%-15s| %d  | %s\n",  vlandev->name,
-			   dev_info->vlan_id,    dev_info->real_dev->name);
+			   vlan->vlan_id,    vlan->real_dev->name);
 	}
 	return 0;
 }
@@ -279,7 +279,7 @@
 static int vlandev_seq_show(struct seq_file *seq, void *offset)
 {
 	struct net_device *vlandev = (struct net_device *) seq->private;
-	const struct vlan_dev_info *dev_info = vlan_dev_info(vlandev);
+	const struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
 	struct rtnl_link_stats64 temp;
 	const struct rtnl_link_stats64 *stats;
 	static const char fmt64[] = "%30s %12llu\n";
@@ -291,8 +291,8 @@
 	stats = dev_get_stats(vlandev, &temp);
 	seq_printf(seq,
 		   "%s  VID: %d	 REORDER_HDR: %i  dev->priv_flags: %hx\n",
-		   vlandev->name, dev_info->vlan_id,
-		   (int)(dev_info->flags & 1), vlandev->priv_flags);
+		   vlandev->name, vlan->vlan_id,
+		   (int)(vlan->flags & 1), vlandev->priv_flags);
 
 	seq_printf(seq, fmt64, "total frames received", stats->rx_packets);
 	seq_printf(seq, fmt64, "total bytes received", stats->rx_bytes);
@@ -300,23 +300,23 @@
 	seq_puts(seq, "\n");
 	seq_printf(seq, fmt64, "total frames transmitted", stats->tx_packets);
 	seq_printf(seq, fmt64, "total bytes transmitted", stats->tx_bytes);
-	seq_printf(seq, "Device: %s", dev_info->real_dev->name);
+	seq_printf(seq, "Device: %s", vlan->real_dev->name);
 	/* now show all PRIORITY mappings relating to this VLAN */
 	seq_printf(seq, "\nINGRESS priority mappings: "
 			"0:%u  1:%u  2:%u  3:%u  4:%u  5:%u  6:%u 7:%u\n",
-		   dev_info->ingress_priority_map[0],
-		   dev_info->ingress_priority_map[1],
-		   dev_info->ingress_priority_map[2],
-		   dev_info->ingress_priority_map[3],
-		   dev_info->ingress_priority_map[4],
-		   dev_info->ingress_priority_map[5],
-		   dev_info->ingress_priority_map[6],
-		   dev_info->ingress_priority_map[7]);
+		   vlan->ingress_priority_map[0],
+		   vlan->ingress_priority_map[1],
+		   vlan->ingress_priority_map[2],
+		   vlan->ingress_priority_map[3],
+		   vlan->ingress_priority_map[4],
+		   vlan->ingress_priority_map[5],
+		   vlan->ingress_priority_map[6],
+		   vlan->ingress_priority_map[7]);
 
 	seq_printf(seq, " EGRESS priority mappings: ");
 	for (i = 0; i < 16; i++) {
 		const struct vlan_priority_tci_mapping *mp
-			= dev_info->egress_priority_map[i];
+			= vlan->egress_priority_map[i];
 		while (mp) {
 			seq_printf(seq, "%u:%hu ",
 				   mp->priority, ((mp->vlan_qos >> 13) & 0x7));
diff --git a/net/Kconfig b/net/Kconfig
index a073148..e07272d 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -215,6 +215,7 @@
 source "net/dcb/Kconfig"
 source "net/dns_resolver/Kconfig"
 source "net/batman-adv/Kconfig"
+source "net/openvswitch/Kconfig"
 
 config RPS
 	boolean
@@ -232,6 +233,19 @@
 	depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS
 	default y
 
+config NETPRIO_CGROUP
+	tristate "Network priority cgroup"
+	depends on CGROUPS
+	---help---
+	  Cgroup subsystem for use in assigning processes to network priorities on
+	  a per-interface basis
+
+config BQL
+	boolean
+	depends on SYSFS
+	select DQL
+	default y
+
 config HAVE_BPF_JIT
 	bool
 
diff --git a/net/Makefile b/net/Makefile
index acdde49..ad432fa 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -69,3 +69,4 @@
 obj-$(CONFIG_CEPH_LIB)		+= ceph/
 obj-$(CONFIG_BATMAN_ADV)	+= batman-adv/
 obj-$(CONFIG_NFC)		+= nfc/
+obj-$(CONFIG_OPENVSWITCH)	+= openvswitch/
diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
index f41f026..876fbe8 100644
--- a/net/atm/atm_misc.c
+++ b/net/atm/atm_misc.c
@@ -26,7 +26,7 @@
 				 gfp_t gfp_flags)
 {
 	struct sock *sk = sk_atm(vcc);
-	int guess = atm_guess_pdu2truesize(pdu_size);
+	int guess = SKB_TRUESIZE(pdu_size);
 
 	atm_force_charge(vcc, guess);
 	if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) {
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index d07223c..353fccf 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -53,6 +53,7 @@
 static const unsigned char ethertype_ipv6[] = { ETHERTYPE_IPV6 };
 static const unsigned char llc_oui_pid_pad[] =
 			{ LLC, SNAP_BRIDGED, PID_ETHERNET, PAD_BRIDGED };
+static const unsigned char pad[] = { PAD_BRIDGED };
 static const unsigned char llc_oui_ipv4[] = { LLC, SNAP_ROUTED, ETHERTYPE_IPV4 };
 static const unsigned char llc_oui_ipv6[] = { LLC, SNAP_ROUTED, ETHERTYPE_IPV6 };
 
@@ -202,7 +203,10 @@
 {
 	struct br2684_dev *brdev = BRPRIV(dev);
 	struct atm_vcc *atmvcc;
-	int minheadroom = (brvcc->encaps == e_llc) ? 10 : 2;
+	int minheadroom = (brvcc->encaps == e_llc) ?
+		((brdev->payload == p_bridged) ?
+			sizeof(llc_oui_pid_pad) : sizeof(llc_oui_ipv4)) :
+		((brdev->payload == p_bridged) ? BR2684_PAD_LEN : 0);
 
 	if (skb_headroom(skb) < minheadroom) {
 		struct sk_buff *skb2 = skb_realloc_headroom(skb, minheadroom);
@@ -450,7 +454,7 @@
 			skb->pkt_type = PACKET_HOST;
 		} else { /* p_bridged */
 			/* first 2 chars should be 0 */
-			if (*((u16 *) (skb->data)) != 0)
+			if (memcmp(skb->data, pad, BR2684_PAD_LEN) != 0)
 				goto error;
 			skb_pull(skb, BR2684_PAD_LEN);
 			skb->protocol = eth_type_trans(skb, net_dev);
@@ -489,15 +493,11 @@
  */
 static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg)
 {
-	struct sk_buff_head queue;
-	int err;
 	struct br2684_vcc *brvcc;
-	struct sk_buff *skb, *tmp;
-	struct sk_buff_head *rq;
 	struct br2684_dev *brdev;
 	struct net_device *net_dev;
 	struct atm_backend_br2684 be;
-	unsigned long flags;
+	int err;
 
 	if (copy_from_user(&be, arg, sizeof be))
 		return -EFAULT;
@@ -550,23 +550,6 @@
 	atmvcc->push = br2684_push;
 	atmvcc->pop = br2684_pop;
 
-	__skb_queue_head_init(&queue);
-	rq = &sk_atm(atmvcc)->sk_receive_queue;
-
-	spin_lock_irqsave(&rq->lock, flags);
-	skb_queue_splice_init(rq, &queue);
-	spin_unlock_irqrestore(&rq->lock, flags);
-
-	skb_queue_walk_safe(&queue, skb, tmp) {
-		struct net_device *dev;
-
-		br2684_push(atmvcc, skb);
-		dev = skb->dev;
-
-		dev->stats.rx_bytes -= skb->len;
-		dev->stats.rx_packets--;
-	}
-
 	/* initialize netdev carrier state */
 	if (atmvcc->dev->signal == ATM_PHY_SIG_LOST)
 		netif_carrier_off(net_dev);
@@ -574,6 +557,10 @@
 		netif_carrier_on(net_dev);
 
 	__module_get(THIS_MODULE);
+
+	/* re-process everything received between connection setup and
+	   backend setup */
+	vcc_process_recv_queue(atmvcc);
 	return 0;
 
 error:
@@ -600,6 +587,7 @@
 	struct br2684_dev *brdev = BRPRIV(netdev);
 
 	ether_setup(netdev);
+	netdev->hard_header_len += sizeof(llc_oui_pid_pad); /* worst case */
 	brdev->net_dev = netdev;
 
 	netdev->netdev_ops = &br2684_netdev_ops;
@@ -612,7 +600,7 @@
 	struct br2684_dev *brdev = BRPRIV(netdev);
 
 	brdev->net_dev = netdev;
-	netdev->hard_header_len = 0;
+	netdev->hard_header_len = sizeof(llc_oui_ipv4); /* worst case */
 	netdev->netdev_ops = &br2684_netdev_ops_routed;
 	netdev->addr_len = 0;
 	netdev->mtu = 1500;
diff --git a/net/atm/clip.c b/net/atm/clip.c
index 8523940..c12c258 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -33,6 +33,7 @@
 #include <linux/slab.h>
 #include <net/route.h> /* for struct rtable and routing */
 #include <net/icmp.h> /* icmp_send */
+#include <net/arp.h>
 #include <linux/param.h> /* for HZ */
 #include <linux/uaccess.h>
 #include <asm/byteorder.h> /* for htons etc. */
@@ -119,7 +120,7 @@
 /* The neighbour entry n->lock is held. */
 static int neigh_check_cb(struct neighbour *n)
 {
-	struct atmarp_entry *entry = NEIGH2ENTRY(n);
+	struct atmarp_entry *entry = neighbour_priv(n);
 	struct clip_vcc *cv;
 
 	for (cv = entry->vccs; cv; cv = cv->next) {
@@ -189,6 +190,13 @@
 	struct clip_vcc *clip_vcc = CLIP_VCC(vcc);
 
 	pr_debug("\n");
+
+	if (!clip_devs) {
+		atm_return(vcc, skb->truesize);
+		kfree_skb(skb);
+		return;
+	}
+
 	if (!skb) {
 		pr_debug("removing VCC %p\n", clip_vcc);
 		if (clip_vcc->entry)
@@ -255,8 +263,10 @@
 
 static void clip_neigh_solicit(struct neighbour *neigh, struct sk_buff *skb)
 {
+	__be32 *ip = (__be32 *) neigh->primary_key;
+
 	pr_debug("(neigh %p, skb %p)\n", neigh, skb);
-	to_atmarpd(act_need, PRIV(neigh->dev)->number, NEIGH2ENTRY(neigh)->ip);
+	to_atmarpd(act_need, PRIV(neigh->dev)->number, *ip);
 }
 
 static void clip_neigh_error(struct neighbour *neigh, struct sk_buff *skb)
@@ -277,72 +287,24 @@
 
 static int clip_constructor(struct neighbour *neigh)
 {
-	struct atmarp_entry *entry = NEIGH2ENTRY(neigh);
-	struct net_device *dev = neigh->dev;
-	struct in_device *in_dev;
-	struct neigh_parms *parms;
+	struct atmarp_entry *entry = neighbour_priv(neigh);
 
-	pr_debug("(neigh %p, entry %p)\n", neigh, entry);
-	neigh->type = inet_addr_type(&init_net, entry->ip);
+	if (neigh->tbl->family != AF_INET)
+		return -EINVAL;
+
 	if (neigh->type != RTN_UNICAST)
 		return -EINVAL;
 
-	rcu_read_lock();
-	in_dev = __in_dev_get_rcu(dev);
-	if (!in_dev) {
-		rcu_read_unlock();
-		return -EINVAL;
-	}
-
-	parms = in_dev->arp_parms;
-	__neigh_parms_put(neigh->parms);
-	neigh->parms = neigh_parms_clone(parms);
-	rcu_read_unlock();
-
+	neigh->nud_state = NUD_NONE;
 	neigh->ops = &clip_neigh_ops;
-	neigh->output = neigh->nud_state & NUD_VALID ?
-	    neigh->ops->connected_output : neigh->ops->output;
+	neigh->output = neigh->ops->output;
 	entry->neigh = neigh;
 	entry->vccs = NULL;
 	entry->expires = jiffies - 1;
+
 	return 0;
 }
 
-static u32 clip_hash(const void *pkey, const struct net_device *dev, __u32 rnd)
-{
-	return jhash_2words(*(u32 *) pkey, dev->ifindex, rnd);
-}
-
-static struct neigh_table clip_tbl = {
-	.family 	= AF_INET,
-	.entry_size 	= sizeof(struct neighbour)+sizeof(struct atmarp_entry),
-	.key_len 	= 4,
-	.hash 		= clip_hash,
-	.constructor 	= clip_constructor,
-	.id 		= "clip_arp_cache",
-
-	/* parameters are copied from ARP ... */
-	.parms = {
-		.tbl 			= &clip_tbl,
-		.base_reachable_time 	= 30 * HZ,
-		.retrans_time 		= 1 * HZ,
-		.gc_staletime 		= 60 * HZ,
-		.reachable_time 	= 30 * HZ,
-		.delay_probe_time 	= 5 * HZ,
-		.queue_len 		= 3,
-		.ucast_probes 		= 3,
-		.mcast_probes 		= 3,
-		.anycast_delay 		= 1 * HZ,
-		.proxy_delay 		= (8 * HZ) / 10,
-		.proxy_qlen 		= 64,
-		.locktime 		= 1 * HZ,
-	},
-	.gc_interval 	= 30 * HZ,
-	.gc_thresh1 	= 128,
-	.gc_thresh2 	= 512,
-	.gc_thresh3 	= 1024,
-};
-
 /* @@@ copy bh locking from arp.c -- need to bh-enable atm code before */
 
 /*
@@ -376,28 +338,19 @@
 		dev->stats.tx_dropped++;
 		return NETDEV_TX_OK;
 	}
-	n = dst_get_neighbour(dst);
+	n = dst_get_neighbour_noref(dst);
 	if (!n) {
-#if 0
-		n = clip_find_neighbour(skb_dst(skb), 1);
-		if (!n) {
-			dev_kfree_skb(skb);	/* lost that one */
-			dev->stats.tx_dropped++;
-			return 0;
-		}
-		dst_set_neighbour(dst, n);
-#endif
 		pr_err("NO NEIGHBOUR !\n");
 		dev_kfree_skb(skb);
 		dev->stats.tx_dropped++;
 		return NETDEV_TX_OK;
 	}
-	entry = NEIGH2ENTRY(n);
+	entry = neighbour_priv(n);
 	if (!entry->vccs) {
 		if (time_after(jiffies, entry->expires)) {
 			/* should be resolved */
 			entry->expires = jiffies + ATMARP_RETRY_DELAY * HZ;
-			to_atmarpd(act_need, PRIV(dev)->number, entry->ip);
+			to_atmarpd(act_need, PRIV(dev)->number, *((__be32 *)n->primary_key));
 		}
 		if (entry->neigh->arp_queue.qlen < ATMARP_MAX_UNRES_PACKETS)
 			skb_queue_tail(&entry->neigh->arp_queue, skb);
@@ -448,10 +401,7 @@
 
 static int clip_mkip(struct atm_vcc *vcc, int timeout)
 {
-	struct sk_buff_head *rq, queue;
 	struct clip_vcc *clip_vcc;
-	struct sk_buff *skb, *tmp;
-	unsigned long flags;
 
 	if (!vcc->push)
 		return -EBADFD;
@@ -472,29 +422,9 @@
 	vcc->push = clip_push;
 	vcc->pop = clip_pop;
 
-	__skb_queue_head_init(&queue);
-	rq = &sk_atm(vcc)->sk_receive_queue;
-
-	spin_lock_irqsave(&rq->lock, flags);
-	skb_queue_splice_init(rq, &queue);
-	spin_unlock_irqrestore(&rq->lock, flags);
-
 	/* re-process everything received between connection setup and MKIP */
-	skb_queue_walk_safe(&queue, skb, tmp) {
-		if (!clip_devs) {
-			atm_return(vcc, skb->truesize);
-			kfree_skb(skb);
-		} else {
-			struct net_device *dev = skb->dev;
-			unsigned int len = skb->len;
+	vcc_process_recv_queue(vcc);
 
-			skb_get(skb);
-			clip_push(vcc, skb);
-			dev->stats.rx_packets--;
-			dev->stats.rx_bytes -= len;
-			kfree_skb(skb);
-		}
-	}
 	return 0;
 }
 
@@ -523,11 +453,11 @@
 	rt = ip_route_output(&init_net, ip, 0, 1, 0);
 	if (IS_ERR(rt))
 		return PTR_ERR(rt);
-	neigh = __neigh_lookup(&clip_tbl, &ip, rt->dst.dev, 1);
+	neigh = __neigh_lookup(&arp_tbl, &ip, rt->dst.dev, 1);
 	ip_rt_put(rt);
 	if (!neigh)
 		return -ENOMEM;
-	entry = NEIGH2ENTRY(neigh);
+	entry = neighbour_priv(neigh);
 	if (entry != clip_vcc->entry) {
 		if (!clip_vcc->entry)
 			pr_debug("add\n");
@@ -544,13 +474,15 @@
 }
 
 static const struct net_device_ops clip_netdev_ops = {
-	.ndo_start_xmit = clip_start_xmit,
+	.ndo_start_xmit		= clip_start_xmit,
+	.ndo_neigh_construct	= clip_constructor,
 };
 
 static void clip_setup(struct net_device *dev)
 {
 	dev->netdev_ops = &clip_netdev_ops;
 	dev->type = ARPHRD_ATM;
+	dev->neigh_priv_len = sizeof(struct atmarp_entry);
 	dev->hard_header_len = RFC1483LLC_LEN;
 	dev->mtu = RFC1626_MTU;
 	dev->tx_queue_len = 100;	/* "normal" queue (packets) */
@@ -604,10 +536,8 @@
 	if (!net_eq(dev_net(dev), &init_net))
 		return NOTIFY_DONE;
 
-	if (event == NETDEV_UNREGISTER) {
-		neigh_ifdown(&clip_tbl, dev);
+	if (event == NETDEV_UNREGISTER)
 		return NOTIFY_DONE;
-	}
 
 	/* ignore non-CLIP devices */
 	if (dev->type != ARPHRD_ATM || dev->netdev_ops != &clip_netdev_ops)
@@ -787,9 +717,10 @@
 /* This means the neighbour entry has no attached VCC objects. */
 #define SEQ_NO_VCC_TOKEN	((void *) 2)
 
-static void atmarp_info(struct seq_file *seq, struct net_device *dev,
+static void atmarp_info(struct seq_file *seq, struct neighbour *n,
 			struct atmarp_entry *entry, struct clip_vcc *clip_vcc)
 {
+	struct net_device *dev = n->dev;
 	unsigned long exp;
 	char buf[17];
 	int svc, llc, off;
@@ -809,8 +740,7 @@
 	seq_printf(seq, "%-6s%-4s%-4s%5ld ",
 		   dev->name, svc ? "SVC" : "PVC", llc ? "LLC" : "NULL", exp);
 
-	off = scnprintf(buf, sizeof(buf) - 1, "%pI4",
-			&entry->ip);
+	off = scnprintf(buf, sizeof(buf) - 1, "%pI4", n->primary_key);
 	while (off < 16)
 		buf[off++] = ' ';
 	buf[off] = '\0';
@@ -881,14 +811,17 @@
 {
 	struct clip_seq_state *state = (struct clip_seq_state *)_state;
 
-	return clip_seq_vcc_walk(state, NEIGH2ENTRY(n), pos);
+	if (n->dev->type != ARPHRD_ATM)
+		return NULL;
+
+	return clip_seq_vcc_walk(state, neighbour_priv(n), pos);
 }
 
 static void *clip_seq_start(struct seq_file *seq, loff_t * pos)
 {
 	struct clip_seq_state *state = seq->private;
 	state->ns.neigh_sub_iter = clip_seq_sub_iter;
-	return neigh_seq_start(seq, pos, &clip_tbl, NEIGH_SEQ_NEIGH_ONLY);
+	return neigh_seq_start(seq, pos, &arp_tbl, NEIGH_SEQ_NEIGH_ONLY);
 }
 
 static int clip_seq_show(struct seq_file *seq, void *v)
@@ -900,10 +833,10 @@
 		seq_puts(seq, atm_arp_banner);
 	} else {
 		struct clip_seq_state *state = seq->private;
-		struct neighbour *n = v;
 		struct clip_vcc *vcc = state->vcc;
+		struct neighbour *n = v;
 
-		atmarp_info(seq, n->dev, NEIGH2ENTRY(n), vcc);
+		atmarp_info(seq, n, neighbour_priv(n), vcc);
 	}
 	return 0;
 }
@@ -934,9 +867,6 @@
 
 static int __init atm_clip_init(void)
 {
-	neigh_table_init_no_netlink(&clip_tbl);
-
-	clip_tbl_hook = &clip_tbl;
 	register_atm_ioctl(&clip_ioctl_ops);
 	register_netdevice_notifier(&clip_dev_notifier);
 	register_inetaddr_notifier(&clip_inet_notifier);
@@ -973,12 +903,6 @@
 	 */
 	del_timer_sync(&idle_timer);
 
-	/* Next, purge the table, so that the device
-	 * unregister loop below does not hang due to
-	 * device references remaining in the table.
-	 */
-	neigh_ifdown(&clip_tbl, NULL);
-
 	dev = clip_devs;
 	while (dev) {
 		next = PRIV(dev)->next;
@@ -986,11 +910,6 @@
 		free_netdev(dev);
 		dev = next;
 	}
-
-	/* Now it is safe to fully shutdown whole table. */
-	neigh_table_clear(&clip_tbl);
-
-	clip_tbl_hook = NULL;
 }
 
 static void __exit atm_clip_exit(void)
diff --git a/net/atm/common.c b/net/atm/common.c
index 14ff9fe..b4b44db 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -214,6 +214,26 @@
 }
 EXPORT_SYMBOL(vcc_release_async);
 
+void vcc_process_recv_queue(struct atm_vcc *vcc)
+{
+	struct sk_buff_head queue, *rq;
+	struct sk_buff *skb, *tmp;
+	unsigned long flags;
+
+	__skb_queue_head_init(&queue);
+	rq = &sk_atm(vcc)->sk_receive_queue;
+
+	spin_lock_irqsave(&rq->lock, flags);
+	skb_queue_splice_init(rq, &queue);
+	spin_unlock_irqrestore(&rq->lock, flags);
+
+	skb_queue_walk_safe(&queue, skb, tmp) {
+		__skb_unlink(skb, &queue);
+		vcc->push(vcc, skb);
+	}
+}
+EXPORT_SYMBOL(vcc_process_recv_queue);
+
 void atm_dev_signal_change(struct atm_dev *dev, char signal)
 {
 	pr_debug("%s signal=%d dev=%p number=%d dev->signal=%d\n",
@@ -502,8 +522,11 @@
 
 	if (sock->state != SS_CONNECTED)
 		return -ENOTCONN;
-	if (flags & ~MSG_DONTWAIT)		/* only handle MSG_DONTWAIT */
+
+	/* only handle MSG_DONTWAIT and MSG_PEEK */
+	if (flags & ~(MSG_DONTWAIT | MSG_PEEK))
 		return -EOPNOTSUPP;
+
 	vcc = ATM_SD(sock);
 	if (test_bit(ATM_VF_RELEASED, &vcc->flags) ||
 	    test_bit(ATM_VF_CLOSE, &vcc->flags) ||
@@ -524,8 +547,13 @@
 	if (error)
 		return error;
 	sock_recv_ts_and_drops(msg, sk, skb);
-	pr_debug("%d -= %d\n", atomic_read(&sk->sk_rmem_alloc), skb->truesize);
-	atm_return(vcc, skb->truesize);
+
+	if (!(flags & MSG_PEEK)) {
+		pr_debug("%d -= %d\n", atomic_read(&sk->sk_rmem_alloc),
+			 skb->truesize);
+		atm_return(vcc, skb->truesize);
+	}
+
 	skb_free_datagram(sk, skb);
 	return copied;
 }
diff --git a/net/atm/common.h b/net/atm/common.h
index f48a76b..cc3c2da 100644
--- a/net/atm/common.h
+++ b/net/atm/common.h
@@ -24,6 +24,7 @@
 		   char __user *optval, unsigned int optlen);
 int vcc_getsockopt(struct socket *sock, int level, int optname,
 		   char __user *optval, int __user *optlen);
+void vcc_process_recv_queue(struct atm_vcc *vcc);
 
 int atmpvc_init(void);
 void atmpvc_exit(void);
diff --git a/net/atm/pppoatm.c b/net/atm/pppoatm.c
index db4a11c..df35d9a 100644
--- a/net/atm/pppoatm.c
+++ b/net/atm/pppoatm.c
@@ -303,6 +303,10 @@
 	atmvcc->push = pppoatm_push;
 	atmvcc->pop = pppoatm_pop;
 	__module_get(THIS_MODULE);
+
+	/* re-process everything received between connection setup and
+	   backend setup */
+	vcc_process_recv_queue(atmvcc);
 	return 0;
 }
 
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index e7c69f4..3cd0a0d 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -402,14 +402,14 @@
 		break;
 
 	case AX25_T1:
-		if (ax25_ctl.arg < 1)
+		if (ax25_ctl.arg < 1 || ax25_ctl.arg > ULONG_MAX / HZ)
 			goto einval_put;
 		ax25->rtt = (ax25_ctl.arg * HZ) / 2;
 		ax25->t1  = ax25_ctl.arg * HZ;
 		break;
 
 	case AX25_T2:
-		if (ax25_ctl.arg < 1)
+		if (ax25_ctl.arg < 1 || ax25_ctl.arg > ULONG_MAX / HZ)
 			goto einval_put;
 		ax25->t2 = ax25_ctl.arg * HZ;
 		break;
@@ -422,10 +422,15 @@
 		break;
 
 	case AX25_T3:
+		if (ax25_ctl.arg > ULONG_MAX / HZ)
+			goto einval_put;
 		ax25->t3 = ax25_ctl.arg * HZ;
 		break;
 
 	case AX25_IDLE:
+		if (ax25_ctl.arg > ULONG_MAX / (60 * HZ))
+			goto einval_put;
+
 		ax25->idle = ax25_ctl.arg * 60 * HZ;
 		break;
 
@@ -540,15 +545,16 @@
 	ax25_cb *ax25;
 	struct net_device *dev;
 	char devname[IFNAMSIZ];
-	int opt, res = 0;
+	unsigned long opt;
+	int res = 0;
 
 	if (level != SOL_AX25)
 		return -ENOPROTOOPT;
 
-	if (optlen < sizeof(int))
+	if (optlen < sizeof(unsigned int))
 		return -EINVAL;
 
-	if (get_user(opt, (int __user *)optval))
+	if (get_user(opt, (unsigned int __user *)optval))
 		return -EFAULT;
 
 	lock_sock(sk);
@@ -571,7 +577,7 @@
 		break;
 
 	case AX25_T1:
-		if (opt < 1) {
+		if (opt < 1 || opt > ULONG_MAX / HZ) {
 			res = -EINVAL;
 			break;
 		}
@@ -580,7 +586,7 @@
 		break;
 
 	case AX25_T2:
-		if (opt < 1) {
+		if (opt < 1 || opt > ULONG_MAX / HZ) {
 			res = -EINVAL;
 			break;
 		}
@@ -596,7 +602,7 @@
 		break;
 
 	case AX25_T3:
-		if (opt < 1) {
+		if (opt < 1 || opt > ULONG_MAX / HZ) {
 			res = -EINVAL;
 			break;
 		}
@@ -604,7 +610,7 @@
 		break;
 
 	case AX25_IDLE:
-		if (opt < 0) {
+		if (opt > ULONG_MAX / (60 * HZ)) {
 			res = -EINVAL;
 			break;
 		}
@@ -612,7 +618,7 @@
 		break;
 
 	case AX25_BACKOFF:
-		if (opt < 0 || opt > 2) {
+		if (opt > 2) {
 			res = -EINVAL;
 			break;
 		}
diff --git a/net/batman-adv/bat_sysfs.c b/net/batman-adv/bat_sysfs.c
index b8a7414..c25492f 100644
--- a/net/batman-adv/bat_sysfs.c
+++ b/net/batman-adv/bat_sysfs.c
@@ -174,7 +174,7 @@
 	unsigned long uint_val;
 	int ret;
 
-	ret = strict_strtoul(buff, 10, &uint_val);
+	ret = kstrtoul(buff, 10, &uint_val);
 	if (ret) {
 		bat_info(net_dev,
 			 "%s: Invalid parameter received: %s\n",
@@ -239,7 +239,7 @@
 	unsigned long val;
 	int ret, vis_mode_tmp = -1;
 
-	ret = strict_strtoul(buff, 10, &val);
+	ret = kstrtoul(buff, 10, &val);
 
 	if (((count == 2) && (!ret) && (val == VIS_TYPE_CLIENT_UPDATE)) ||
 	    (strncmp(buff, "client", 6) == 0) ||
diff --git a/net/batman-adv/bitarray.c b/net/batman-adv/bitarray.c
index 0be9ff3..9bc63b2 100644
--- a/net/batman-adv/bitarray.c
+++ b/net/batman-adv/bitarray.c
@@ -155,7 +155,7 @@
 	/* sequence number is much newer, probably missed a lot of packets */
 
 	if ((seq_num_diff >= TQ_LOCAL_WINDOW_SIZE)
-		|| (seq_num_diff < EXPECTED_SEQNO_RANGE)) {
+		&& (seq_num_diff < EXPECTED_SEQNO_RANGE)) {
 		bat_dbg(DBG_BATMAN, bat_priv,
 			"We missed a lot of packets (%i) !\n",
 			seq_num_diff - 1);
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 619fb73..24403a7 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -25,6 +25,7 @@
 #include "gateway_common.h"
 #include "hard-interface.h"
 #include "originator.h"
+#include "translation-table.h"
 #include "routing.h"
 #include <linux/ip.h>
 #include <linux/ipv6.h>
@@ -572,108 +573,142 @@
 	return ret;
 }
 
-int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb,
-		 struct orig_node *old_gw)
+bool gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
 {
 	struct ethhdr *ethhdr;
 	struct iphdr *iphdr;
 	struct ipv6hdr *ipv6hdr;
 	struct udphdr *udphdr;
-	struct gw_node *curr_gw;
-	struct neigh_node *neigh_curr = NULL, *neigh_old = NULL;
-	unsigned int header_len = 0;
-	int ret = 1;
-
-	if (atomic_read(&bat_priv->gw_mode) == GW_MODE_OFF)
-		return 0;
 
 	/* check for ethernet header */
-	if (!pskb_may_pull(skb, header_len + ETH_HLEN))
-		return 0;
+	if (!pskb_may_pull(skb, *header_len + ETH_HLEN))
+		return false;
 	ethhdr = (struct ethhdr *)skb->data;
-	header_len += ETH_HLEN;
+	*header_len += ETH_HLEN;
 
 	/* check for initial vlan header */
 	if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) {
-		if (!pskb_may_pull(skb, header_len + VLAN_HLEN))
-			return 0;
+		if (!pskb_may_pull(skb, *header_len + VLAN_HLEN))
+			return false;
 		ethhdr = (struct ethhdr *)(skb->data + VLAN_HLEN);
-		header_len += VLAN_HLEN;
+		*header_len += VLAN_HLEN;
 	}
 
 	/* check for ip header */
 	switch (ntohs(ethhdr->h_proto)) {
 	case ETH_P_IP:
-		if (!pskb_may_pull(skb, header_len + sizeof(*iphdr)))
-			return 0;
-		iphdr = (struct iphdr *)(skb->data + header_len);
-		header_len += iphdr->ihl * 4;
+		if (!pskb_may_pull(skb, *header_len + sizeof(*iphdr)))
+			return false;
+		iphdr = (struct iphdr *)(skb->data + *header_len);
+		*header_len += iphdr->ihl * 4;
 
 		/* check for udp header */
 		if (iphdr->protocol != IPPROTO_UDP)
-			return 0;
+			return false;
 
 		break;
 	case ETH_P_IPV6:
-		if (!pskb_may_pull(skb, header_len + sizeof(*ipv6hdr)))
-			return 0;
-		ipv6hdr = (struct ipv6hdr *)(skb->data + header_len);
-		header_len += sizeof(*ipv6hdr);
+		if (!pskb_may_pull(skb, *header_len + sizeof(*ipv6hdr)))
+			return false;
+		ipv6hdr = (struct ipv6hdr *)(skb->data + *header_len);
+		*header_len += sizeof(*ipv6hdr);
 
 		/* check for udp header */
 		if (ipv6hdr->nexthdr != IPPROTO_UDP)
-			return 0;
+			return false;
 
 		break;
 	default:
-		return 0;
+		return false;
 	}
 
-	if (!pskb_may_pull(skb, header_len + sizeof(*udphdr)))
-		return 0;
-	udphdr = (struct udphdr *)(skb->data + header_len);
-	header_len += sizeof(*udphdr);
+	if (!pskb_may_pull(skb, *header_len + sizeof(*udphdr)))
+		return false;
+	udphdr = (struct udphdr *)(skb->data + *header_len);
+	*header_len += sizeof(*udphdr);
 
 	/* check for bootp port */
 	if ((ntohs(ethhdr->h_proto) == ETH_P_IP) &&
 	     (ntohs(udphdr->dest) != 67))
-		return 0;
+		return false;
 
 	if ((ntohs(ethhdr->h_proto) == ETH_P_IPV6) &&
 	    (ntohs(udphdr->dest) != 547))
-		return 0;
+		return false;
 
-	if (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER)
-		return -1;
+	return true;
+}
 
-	curr_gw = gw_get_selected_gw_node(bat_priv);
-	if (!curr_gw)
-		return 0;
+bool gw_out_of_range(struct bat_priv *bat_priv,
+		     struct sk_buff *skb, struct ethhdr *ethhdr)
+{
+	struct neigh_node *neigh_curr = NULL, *neigh_old = NULL;
+	struct orig_node *orig_dst_node = NULL;
+	struct gw_node *curr_gw = NULL;
+	bool ret, out_of_range = false;
+	unsigned int header_len = 0;
+	uint8_t curr_tq_avg;
 
-	/* If old_gw != NULL then this packet is unicast.
-	 * So, at this point we have to check the message type: if it is a
-	 * DHCPREQUEST we have to decide whether to drop it or not */
-	if (old_gw && curr_gw->orig_node != old_gw) {
-		if (is_type_dhcprequest(skb, header_len)) {
-			/* If the dhcp packet has been sent to a different gw,
-			 * we have to evaluate whether the old gw is still
-			 * reliable enough */
-			neigh_curr = find_router(bat_priv, curr_gw->orig_node,
-						 NULL);
-			neigh_old = find_router(bat_priv, old_gw, NULL);
-			if (!neigh_curr || !neigh_old)
-				goto free_neigh;
-			if (neigh_curr->tq_avg - neigh_old->tq_avg <
-								GW_THRESHOLD)
-				ret = -1;
-		}
+	ret = gw_is_dhcp_target(skb, &header_len);
+	if (!ret)
+		goto out;
+
+	orig_dst_node = transtable_search(bat_priv, ethhdr->h_source,
+					  ethhdr->h_dest);
+	if (!orig_dst_node)
+		goto out;
+
+	if (!orig_dst_node->gw_flags)
+		goto out;
+
+	ret = is_type_dhcprequest(skb, header_len);
+	if (!ret)
+		goto out;
+
+	switch (atomic_read(&bat_priv->gw_mode)) {
+	case GW_MODE_SERVER:
+		/* If we are a GW then we are our best GW. We can artificially
+		 * set the tq towards ourself as the maximum value */
+		curr_tq_avg = TQ_MAX_VALUE;
+		break;
+	case GW_MODE_CLIENT:
+		curr_gw = gw_get_selected_gw_node(bat_priv);
+		if (!curr_gw)
+			goto out;
+
+		/* packet is going to our gateway */
+		if (curr_gw->orig_node == orig_dst_node)
+			goto out;
+
+		/* If the dhcp packet has been sent to a different gw,
+		 * we have to evaluate whether the old gw is still
+		 * reliable enough */
+		neigh_curr = find_router(bat_priv, curr_gw->orig_node, NULL);
+		if (!neigh_curr)
+			goto out;
+
+		curr_tq_avg = neigh_curr->tq_avg;
+		break;
+	case GW_MODE_OFF:
+	default:
+		goto out;
 	}
-free_neigh:
+
+	neigh_old = find_router(bat_priv, orig_dst_node, NULL);
+	if (!neigh_old)
+		goto out;
+
+	if (curr_tq_avg - neigh_old->tq_avg > GW_THRESHOLD)
+		out_of_range = true;
+
+out:
+	if (orig_dst_node)
+		orig_node_free_ref(orig_dst_node);
+	if (curr_gw)
+		gw_node_free_ref(curr_gw);
 	if (neigh_old)
 		neigh_node_free_ref(neigh_old);
 	if (neigh_curr)
 		neigh_node_free_ref(neigh_curr);
-	if (curr_gw)
-		gw_node_free_ref(curr_gw);
-	return ret;
+	return out_of_range;
 }
diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h
index b9b983c..e1edba0 100644
--- a/net/batman-adv/gateway_client.h
+++ b/net/batman-adv/gateway_client.h
@@ -31,7 +31,8 @@
 void gw_node_delete(struct bat_priv *bat_priv, struct orig_node *orig_node);
 void gw_node_purge(struct bat_priv *bat_priv);
 int gw_client_seq_print_text(struct seq_file *seq, void *offset);
-int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb,
-		 struct orig_node *old_gw);
+bool gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len);
+bool gw_out_of_range(struct bat_priv *bat_priv,
+		     struct sk_buff *skb, struct ethhdr *ethhdr);
 
 #endif /* _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ */
diff --git a/net/batman-adv/gateway_common.c b/net/batman-adv/gateway_common.c
index 18661af..c4ac7b0 100644
--- a/net/batman-adv/gateway_common.c
+++ b/net/batman-adv/gateway_common.c
@@ -97,7 +97,7 @@
 			*tmp_ptr = '\0';
 	}
 
-	ret = strict_strtol(buff, 10, &ldown);
+	ret = kstrtol(buff, 10, &ldown);
 	if (ret) {
 		bat_err(net_dev,
 			"Download speed of gateway mode invalid: %s\n",
@@ -122,7 +122,7 @@
 				*tmp_ptr = '\0';
 		}
 
-		ret = strict_strtol(slash_ptr + 1, 10, &lup);
+		ret = kstrtol(slash_ptr + 1, 10, &lup);
 		if (ret) {
 			bat_err(net_dev,
 				"Upload speed of gateway mode invalid: "
diff --git a/net/batman-adv/hash.c b/net/batman-adv/hash.c
index 2a17250..d1da29d 100644
--- a/net/batman-adv/hash.c
+++ b/net/batman-adv/hash.c
@@ -25,7 +25,7 @@
 /* clears the hash */
 static void hash_init(struct hashtable_t *hash)
 {
-	int i;
+	uint32_t i;
 
 	for (i = 0 ; i < hash->size; i++) {
 		INIT_HLIST_HEAD(&hash->table[i]);
@@ -42,7 +42,7 @@
 }
 
 /* allocates and clears the hash */
-struct hashtable_t *hash_new(int size)
+struct hashtable_t *hash_new(uint32_t size)
 {
 	struct hashtable_t *hash;
 
diff --git a/net/batman-adv/hash.h b/net/batman-adv/hash.h
index d20aa71..4768717 100644
--- a/net/batman-adv/hash.h
+++ b/net/batman-adv/hash.h
@@ -33,17 +33,17 @@
 /* the hashfunction, should return an index
  * based on the key in the data of the first
  * argument and the size the second */
-typedef int (*hashdata_choose_cb)(const void *, int);
+typedef uint32_t (*hashdata_choose_cb)(const void *, uint32_t);
 typedef void (*hashdata_free_cb)(struct hlist_node *, void *);
 
 struct hashtable_t {
 	struct hlist_head *table;   /* the hashtable itself with the buckets */
 	spinlock_t *list_locks;     /* spinlock for each hash list entry */
-	int size;		    /* size of hashtable */
+	uint32_t size;		    /* size of hashtable */
 };
 
 /* allocates and clears the hash */
-struct hashtable_t *hash_new(int size);
+struct hashtable_t *hash_new(uint32_t size);
 
 /* free only the hashtable and the hash itself. */
 void hash_destroy(struct hashtable_t *hash);
@@ -57,7 +57,7 @@
 	struct hlist_head *head;
 	struct hlist_node *node, *node_tmp;
 	spinlock_t *list_lock; /* spinlock to protect write access */
-	int i;
+	uint32_t i;
 
 	for (i = 0; i < hash->size; i++) {
 		head = &hash->table[i];
@@ -93,7 +93,8 @@
 			   hashdata_choose_cb choose,
 			   const void *data, struct hlist_node *data_node)
 {
-	int index, ret = -1;
+	uint32_t index;
+	int ret = -1;
 	struct hlist_head *head;
 	struct hlist_node *node;
 	spinlock_t *list_lock; /* spinlock to protect write access */
@@ -137,7 +138,7 @@
 				hashdata_compare_cb compare,
 				hashdata_choose_cb choose, void *data)
 {
-	size_t index;
+	uint32_t index;
 	struct hlist_node *node;
 	struct hlist_head *head;
 	void *data_save = NULL;
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c
index ac3520e..d9c1e7b 100644
--- a/net/batman-adv/icmp_socket.c
+++ b/net/batman-adv/icmp_socket.c
@@ -136,10 +136,9 @@
 
 	spin_unlock_bh(&socket_client->lock);
 
-	error = __copy_to_user(buf, &socket_packet->icmp_packet,
-			       socket_packet->icmp_len);
+	packet_len = min(count, socket_packet->icmp_len);
+	error = copy_to_user(buf, &socket_packet->icmp_packet, packet_len);
 
-	packet_len = socket_packet->icmp_len;
 	kfree(socket_packet);
 
 	if (error)
@@ -187,12 +186,7 @@
 	skb_reserve(skb, sizeof(struct ethhdr));
 	icmp_packet = (struct icmp_packet_rr *)skb_put(skb, packet_len);
 
-	if (!access_ok(VERIFY_READ, buff, packet_len)) {
-		len = -EFAULT;
-		goto free_skb;
-	}
-
-	if (__copy_from_user(icmp_packet, buff, packet_len)) {
+	if (copy_from_user(icmp_packet, buff, packet_len)) {
 		len = -EFAULT;
 		goto free_skb;
 	}
@@ -217,7 +211,7 @@
 
 	if (icmp_packet->version != COMPAT_VERSION) {
 		icmp_packet->msg_type = PARAMETER_PROBLEM;
-		icmp_packet->ttl = COMPAT_VERSION;
+		icmp_packet->version = COMPAT_VERSION;
 		bat_socket_add_packet(socket_client, icmp_packet, packet_len);
 		goto free_skb;
 	}
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index 964ad4d..86354e0 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -28,7 +28,7 @@
 #define DRIVER_DEVICE "batman-adv"
 
 #ifndef SOURCE_VERSION
-#define SOURCE_VERSION "2011.4.0"
+#define SOURCE_VERSION "2012.0.0"
 #endif
 
 /* B.A.T.M.A.N. parameters */
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 0e5b772..0bc2045 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -164,7 +164,7 @@
 	struct hlist_head *head;
 	spinlock_t *list_lock; /* spinlock to protect write access */
 	struct orig_node *orig_node;
-	int i;
+	uint32_t i;
 
 	if (!hash)
 		return;
@@ -350,7 +350,7 @@
 	struct hlist_head *head;
 	spinlock_t *list_lock; /* spinlock to protect write access */
 	struct orig_node *orig_node;
-	int i;
+	uint32_t i;
 
 	if (!hash)
 		return;
@@ -413,7 +413,8 @@
 	int batman_count = 0;
 	int last_seen_secs;
 	int last_seen_msecs;
-	int i, ret = 0;
+	uint32_t i;
+	int ret = 0;
 
 	primary_if = primary_if_get_selected(bat_priv);
 
@@ -519,7 +520,8 @@
 	struct hlist_node *node;
 	struct hlist_head *head;
 	struct orig_node *orig_node;
-	int i, ret;
+	uint32_t i;
+	int ret;
 
 	/* resize all orig nodes because orig_node->bcast_own(_sum) depend on
 	 * if_num */
@@ -601,7 +603,8 @@
 	struct hlist_head *head;
 	struct hard_iface *hard_iface_tmp;
 	struct orig_node *orig_node;
-	int i, ret;
+	uint32_t i;
+	int ret;
 
 	/* resize all orig nodes because orig_node->bcast_own(_sum) depend on
 	 * if_num */
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
index cfc1f60..67765ff 100644
--- a/net/batman-adv/originator.h
+++ b/net/batman-adv/originator.h
@@ -42,7 +42,7 @@
 
 /* hashfunction to choose an entry in a hash table of given size */
 /* hash algorithm from http://en.wikipedia.org/wiki/Hash_table */
-static inline int choose_orig(const void *data, int32_t size)
+static inline uint32_t choose_orig(const void *data, uint32_t size)
 {
 	const unsigned char *key = data;
 	uint32_t hash = 0;
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index f961cc5..773e606 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -39,7 +39,7 @@
 	struct hlist_head *head;
 	struct orig_node *orig_node;
 	unsigned long *word;
-	int i;
+	uint32_t i;
 	size_t word_index;
 
 	for (i = 0; i < hash->size; i++) {
@@ -578,6 +578,7 @@
 {
 	struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
 	struct tt_query_packet *tt_query;
+	uint16_t tt_len;
 	struct ethhdr *ethhdr;
 
 	/* drop packet if it has not necessary minimum size */
@@ -616,13 +617,21 @@
 		}
 		break;
 	case TT_RESPONSE:
-		/* packet needs to be linearized to access the TT changes */
-		if (skb_linearize(skb) < 0)
-			goto out;
+		if (is_my_mac(tt_query->dst)) {
+			/* packet needs to be linearized to access the TT
+			 * changes */
+			if (skb_linearize(skb) < 0)
+				goto out;
 
-		if (is_my_mac(tt_query->dst))
+			tt_len = tt_query->tt_data * sizeof(struct tt_change);
+
+			/* Ensure we have all the claimed data */
+			if (unlikely(skb_headlen(skb) <
+				     sizeof(struct tt_query_packet) + tt_len))
+				goto out;
+
 			handle_tt_response(bat_priv, tt_query);
-		else {
+		} else {
 			bat_dbg(DBG_TT, bat_priv,
 				"Routing TT_RESPONSE to %pM [%c]\n",
 				tt_query->dst,
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index f9cc957..987c75a 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -563,10 +563,10 @@
 	struct bcast_packet *bcast_packet;
 	struct vlan_ethhdr *vhdr;
 	struct softif_neigh *curr_softif_neigh = NULL;
-	struct orig_node *orig_node = NULL;
+	unsigned int header_len = 0;
 	int data_len = skb->len, ret;
 	short vid = -1;
-	bool do_bcast;
+	bool do_bcast = false;
 
 	if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
 		goto dropped;
@@ -598,17 +598,28 @@
 	/* Register the client MAC in the transtable */
 	tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif);
 
-	orig_node = transtable_search(bat_priv, ethhdr->h_source,
-				      ethhdr->h_dest);
-	do_bcast = is_multicast_ether_addr(ethhdr->h_dest);
-	if (do_bcast || (orig_node && orig_node->gw_flags)) {
-		ret = gw_is_target(bat_priv, skb, orig_node);
+	if (is_multicast_ether_addr(ethhdr->h_dest)) {
+		do_bcast = true;
 
-		if (ret < 0)
-			goto dropped;
-
-		if (ret)
-			do_bcast = false;
+		switch (atomic_read(&bat_priv->gw_mode)) {
+		case GW_MODE_SERVER:
+			/* gateway servers should not send dhcp
+			 * requests into the mesh */
+			ret = gw_is_dhcp_target(skb, &header_len);
+			if (ret)
+				goto dropped;
+			break;
+		case GW_MODE_CLIENT:
+			/* gateway clients should send dhcp requests
+			 * via unicast to their gateway */
+			ret = gw_is_dhcp_target(skb, &header_len);
+			if (ret)
+				do_bcast = false;
+			break;
+		case GW_MODE_OFF:
+		default:
+			break;
+		}
 	}
 
 	/* ethernet packet should be broadcasted */
@@ -644,6 +655,12 @@
 
 	/* unicast packet */
 	} else {
+		if (atomic_read(&bat_priv->gw_mode) != GW_MODE_OFF) {
+			ret = gw_out_of_range(bat_priv, skb, ethhdr);
+			if (ret)
+				goto dropped;
+		}
+
 		ret = unicast_send_skb(skb, bat_priv);
 		if (ret != 0)
 			goto dropped_freed;
@@ -662,8 +679,6 @@
 		softif_neigh_free_ref(curr_softif_neigh);
 	if (primary_if)
 		hardif_free_ref(primary_if);
-	if (orig_node)
-		orig_node_free_ref(orig_node);
 	return NETDEV_TX_OK;
 }
 
@@ -859,7 +874,7 @@
 unreg_sysfs:
 	sysfs_del_meshif(soft_iface);
 unreg_soft_iface:
-	unregister_netdev(soft_iface);
+	unregister_netdevice(soft_iface);
 	return NULL;
 
 free_soft_iface:
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index c7aafc7..ab8dea8 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -36,18 +36,9 @@
 static void tt_purge(struct work_struct *work);
 
 /* returns 1 if they are the same mac addr */
-static int compare_ltt(const struct hlist_node *node, const void *data2)
+static int compare_tt(const struct hlist_node *node, const void *data2)
 {
-	const void *data1 = container_of(node, struct tt_local_entry,
-					 hash_entry);
-
-	return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
-}
-
-/* returns 1 if they are the same mac addr */
-static int compare_gtt(const struct hlist_node *node, const void *data2)
-{
-	const void *data1 = container_of(node, struct tt_global_entry,
+	const void *data1 = container_of(node, struct tt_common_entry,
 					 hash_entry);
 
 	return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
@@ -60,14 +51,13 @@
 			   msecs_to_jiffies(5000));
 }
 
-static struct tt_local_entry *tt_local_hash_find(struct bat_priv *bat_priv,
-						 const void *data)
+static struct tt_common_entry *tt_hash_find(struct hashtable_t *hash,
+					    const void *data)
 {
-	struct hashtable_t *hash = bat_priv->tt_local_hash;
 	struct hlist_head *head;
 	struct hlist_node *node;
-	struct tt_local_entry *tt_local_entry, *tt_local_entry_tmp = NULL;
-	int index;
+	struct tt_common_entry *tt_common_entry, *tt_common_entry_tmp = NULL;
+	uint32_t index;
 
 	if (!hash)
 		return NULL;
@@ -76,51 +66,46 @@
 	head = &hash->table[index];
 
 	rcu_read_lock();
-	hlist_for_each_entry_rcu(tt_local_entry, node, head, hash_entry) {
-		if (!compare_eth(tt_local_entry, data))
+	hlist_for_each_entry_rcu(tt_common_entry, node, head, hash_entry) {
+		if (!compare_eth(tt_common_entry, data))
 			continue;
 
-		if (!atomic_inc_not_zero(&tt_local_entry->refcount))
+		if (!atomic_inc_not_zero(&tt_common_entry->refcount))
 			continue;
 
-		tt_local_entry_tmp = tt_local_entry;
+		tt_common_entry_tmp = tt_common_entry;
 		break;
 	}
 	rcu_read_unlock();
 
-	return tt_local_entry_tmp;
+	return tt_common_entry_tmp;
+}
+
+static struct tt_local_entry *tt_local_hash_find(struct bat_priv *bat_priv,
+						 const void *data)
+{
+	struct tt_common_entry *tt_common_entry;
+	struct tt_local_entry *tt_local_entry = NULL;
+
+	tt_common_entry = tt_hash_find(bat_priv->tt_local_hash, data);
+	if (tt_common_entry)
+		tt_local_entry = container_of(tt_common_entry,
+					      struct tt_local_entry, common);
+	return tt_local_entry;
 }
 
 static struct tt_global_entry *tt_global_hash_find(struct bat_priv *bat_priv,
 						   const void *data)
 {
-	struct hashtable_t *hash = bat_priv->tt_global_hash;
-	struct hlist_head *head;
-	struct hlist_node *node;
-	struct tt_global_entry *tt_global_entry;
-	struct tt_global_entry *tt_global_entry_tmp = NULL;
-	int index;
+	struct tt_common_entry *tt_common_entry;
+	struct tt_global_entry *tt_global_entry = NULL;
 
-	if (!hash)
-		return NULL;
+	tt_common_entry = tt_hash_find(bat_priv->tt_global_hash, data);
+	if (tt_common_entry)
+		tt_global_entry = container_of(tt_common_entry,
+					       struct tt_global_entry, common);
+	return tt_global_entry;
 
-	index = choose_orig(data, hash->size);
-	head = &hash->table[index];
-
-	rcu_read_lock();
-	hlist_for_each_entry_rcu(tt_global_entry, node, head, hash_entry) {
-		if (!compare_eth(tt_global_entry, data))
-			continue;
-
-		if (!atomic_inc_not_zero(&tt_global_entry->refcount))
-			continue;
-
-		tt_global_entry_tmp = tt_global_entry;
-		break;
-	}
-	rcu_read_unlock();
-
-	return tt_global_entry_tmp;
 }
 
 static bool is_out_of_time(unsigned long starting_time, unsigned long timeout)
@@ -133,15 +118,18 @@
 
 static void tt_local_entry_free_ref(struct tt_local_entry *tt_local_entry)
 {
-	if (atomic_dec_and_test(&tt_local_entry->refcount))
-		kfree_rcu(tt_local_entry, rcu);
+	if (atomic_dec_and_test(&tt_local_entry->common.refcount))
+		kfree_rcu(tt_local_entry, common.rcu);
 }
 
 static void tt_global_entry_free_rcu(struct rcu_head *rcu)
 {
+	struct tt_common_entry *tt_common_entry;
 	struct tt_global_entry *tt_global_entry;
 
-	tt_global_entry = container_of(rcu, struct tt_global_entry, rcu);
+	tt_common_entry = container_of(rcu, struct tt_common_entry, rcu);
+	tt_global_entry = container_of(tt_common_entry, struct tt_global_entry,
+				       common);
 
 	if (tt_global_entry->orig_node)
 		orig_node_free_ref(tt_global_entry->orig_node);
@@ -151,8 +139,9 @@
 
 static void tt_global_entry_free_ref(struct tt_global_entry *tt_global_entry)
 {
-	if (atomic_dec_and_test(&tt_global_entry->refcount))
-		call_rcu(&tt_global_entry->rcu, tt_global_entry_free_rcu);
+	if (atomic_dec_and_test(&tt_global_entry->common.refcount))
+		call_rcu(&tt_global_entry->common.rcu,
+			 tt_global_entry_free_rcu);
 }
 
 static void tt_local_event(struct bat_priv *bat_priv, const uint8_t *addr,
@@ -201,6 +190,7 @@
 	struct bat_priv *bat_priv = netdev_priv(soft_iface);
 	struct tt_local_entry *tt_local_entry = NULL;
 	struct tt_global_entry *tt_global_entry = NULL;
+	int hash_added;
 
 	tt_local_entry = tt_local_hash_find(bat_priv, addr);
 
@@ -217,26 +207,33 @@
 		"Creating new local tt entry: %pM (ttvn: %d)\n", addr,
 		(uint8_t)atomic_read(&bat_priv->ttvn));
 
-	memcpy(tt_local_entry->addr, addr, ETH_ALEN);
-	tt_local_entry->last_seen = jiffies;
-	tt_local_entry->flags = NO_FLAGS;
+	memcpy(tt_local_entry->common.addr, addr, ETH_ALEN);
+	tt_local_entry->common.flags = NO_FLAGS;
 	if (is_wifi_iface(ifindex))
-		tt_local_entry->flags |= TT_CLIENT_WIFI;
-	atomic_set(&tt_local_entry->refcount, 2);
+		tt_local_entry->common.flags |= TT_CLIENT_WIFI;
+	atomic_set(&tt_local_entry->common.refcount, 2);
+	tt_local_entry->last_seen = jiffies;
 
 	/* the batman interface mac address should never be purged */
 	if (compare_eth(addr, soft_iface->dev_addr))
-		tt_local_entry->flags |= TT_CLIENT_NOPURGE;
+		tt_local_entry->common.flags |= TT_CLIENT_NOPURGE;
 
-	tt_local_event(bat_priv, addr, tt_local_entry->flags);
+	hash_added = hash_add(bat_priv->tt_local_hash, compare_tt, choose_orig,
+			 &tt_local_entry->common,
+			 &tt_local_entry->common.hash_entry);
+
+	if (unlikely(hash_added != 0)) {
+		/* remove the reference for the hash */
+		tt_local_entry_free_ref(tt_local_entry);
+		goto out;
+	}
+
+	tt_local_event(bat_priv, addr, tt_local_entry->common.flags);
 
 	/* The local entry has to be marked as NEW to avoid to send it in
 	 * a full table response going out before the next ttvn increment
 	 * (consistency check) */
-	tt_local_entry->flags |= TT_CLIENT_NEW;
-
-	hash_add(bat_priv->tt_local_hash, compare_ltt, choose_orig,
-		 tt_local_entry, &tt_local_entry->hash_entry);
+	tt_local_entry->common.flags |= TT_CLIENT_NEW;
 
 	/* remove address from global hash if present */
 	tt_global_entry = tt_global_hash_find(bat_priv, addr);
@@ -245,10 +242,11 @@
 	if (tt_global_entry) {
 		/* This node is probably going to update its tt table */
 		tt_global_entry->orig_node->tt_poss_change = true;
-		/* The global entry has to be marked as PENDING and has to be
+		/* The global entry has to be marked as ROAMING and has to be
 		 * kept for consistency purpose */
-		tt_global_entry->flags |= TT_CLIENT_PENDING;
-		send_roam_adv(bat_priv, tt_global_entry->addr,
+		tt_global_entry->common.flags |= TT_CLIENT_ROAM;
+		tt_global_entry->roam_at = jiffies;
+		send_roam_adv(bat_priv, tt_global_entry->common.addr,
 			      tt_global_entry->orig_node);
 	}
 out:
@@ -310,13 +308,12 @@
 	struct net_device *net_dev = (struct net_device *)seq->private;
 	struct bat_priv *bat_priv = netdev_priv(net_dev);
 	struct hashtable_t *hash = bat_priv->tt_local_hash;
-	struct tt_local_entry *tt_local_entry;
+	struct tt_common_entry *tt_common_entry;
 	struct hard_iface *primary_if;
 	struct hlist_node *node;
 	struct hlist_head *head;
-	size_t buf_size, pos;
-	char *buff;
-	int i, ret = 0;
+	uint32_t i;
+	int ret = 0;
 
 	primary_if = primary_if_get_selected(bat_priv);
 	if (!primary_if) {
@@ -337,51 +334,27 @@
 		   "announced via TT (TTVN: %u):\n",
 		   net_dev->name, (uint8_t)atomic_read(&bat_priv->ttvn));
 
-	buf_size = 1;
-	/* Estimate length for: " * xx:xx:xx:xx:xx:xx\n" */
 	for (i = 0; i < hash->size; i++) {
 		head = &hash->table[i];
 
 		rcu_read_lock();
-		__hlist_for_each_rcu(node, head)
-			buf_size += 29;
-		rcu_read_unlock();
-	}
-
-	buff = kmalloc(buf_size, GFP_ATOMIC);
-	if (!buff) {
-		ret = -ENOMEM;
-		goto out;
-	}
-
-	buff[0] = '\0';
-	pos = 0;
-
-	for (i = 0; i < hash->size; i++) {
-		head = &hash->table[i];
-
-		rcu_read_lock();
-		hlist_for_each_entry_rcu(tt_local_entry, node,
+		hlist_for_each_entry_rcu(tt_common_entry, node,
 					 head, hash_entry) {
-			pos += snprintf(buff + pos, 30, " * %pM "
-					"[%c%c%c%c%c]\n",
-					tt_local_entry->addr,
-					(tt_local_entry->flags &
+			seq_printf(seq, " * %pM [%c%c%c%c%c]\n",
+					tt_common_entry->addr,
+					(tt_common_entry->flags &
 					 TT_CLIENT_ROAM ? 'R' : '.'),
-					(tt_local_entry->flags &
+					(tt_common_entry->flags &
 					 TT_CLIENT_NOPURGE ? 'P' : '.'),
-					(tt_local_entry->flags &
+					(tt_common_entry->flags &
 					 TT_CLIENT_NEW ? 'N' : '.'),
-					(tt_local_entry->flags &
+					(tt_common_entry->flags &
 					 TT_CLIENT_PENDING ? 'X' : '.'),
-					(tt_local_entry->flags &
+					(tt_common_entry->flags &
 					 TT_CLIENT_WIFI ? 'W' : '.'));
 		}
 		rcu_read_unlock();
 	}
-
-	seq_printf(seq, "%s", buff);
-	kfree(buff);
 out:
 	if (primary_if)
 		hardif_free_ref(primary_if);
@@ -392,13 +365,13 @@
 				 struct tt_local_entry *tt_local_entry,
 				 uint16_t flags)
 {
-	tt_local_event(bat_priv, tt_local_entry->addr,
-		       tt_local_entry->flags | flags);
+	tt_local_event(bat_priv, tt_local_entry->common.addr,
+		       tt_local_entry->common.flags | flags);
 
 	/* The local client has to be marked as "pending to be removed" but has
 	 * to be kept in the table in order to send it in a full table
 	 * response issued before the net ttvn increment (consistency check) */
-	tt_local_entry->flags |= TT_CLIENT_PENDING;
+	tt_local_entry->common.flags |= TT_CLIENT_PENDING;
 }
 
 void tt_local_remove(struct bat_priv *bat_priv, const uint8_t *addr,
@@ -414,7 +387,7 @@
 			     (roaming ? TT_CLIENT_ROAM : NO_FLAGS));
 
 	bat_dbg(DBG_TT, bat_priv, "Local tt entry (%pM) pending to be removed: "
-		"%s\n", tt_local_entry->addr, message);
+		"%s\n", tt_local_entry->common.addr, message);
 out:
 	if (tt_local_entry)
 		tt_local_entry_free_ref(tt_local_entry);
@@ -424,23 +397,27 @@
 {
 	struct hashtable_t *hash = bat_priv->tt_local_hash;
 	struct tt_local_entry *tt_local_entry;
+	struct tt_common_entry *tt_common_entry;
 	struct hlist_node *node, *node_tmp;
 	struct hlist_head *head;
 	spinlock_t *list_lock; /* protects write access to the hash lists */
-	int i;
+	uint32_t i;
 
 	for (i = 0; i < hash->size; i++) {
 		head = &hash->table[i];
 		list_lock = &hash->list_locks[i];
 
 		spin_lock_bh(list_lock);
-		hlist_for_each_entry_safe(tt_local_entry, node, node_tmp,
+		hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
 					  head, hash_entry) {
-			if (tt_local_entry->flags & TT_CLIENT_NOPURGE)
+			tt_local_entry = container_of(tt_common_entry,
+						      struct tt_local_entry,
+						      common);
+			if (tt_local_entry->common.flags & TT_CLIENT_NOPURGE)
 				continue;
 
 			/* entry already marked for deletion */
-			if (tt_local_entry->flags & TT_CLIENT_PENDING)
+			if (tt_local_entry->common.flags & TT_CLIENT_PENDING)
 				continue;
 
 			if (!is_out_of_time(tt_local_entry->last_seen,
@@ -451,7 +428,7 @@
 					     TT_CLIENT_DEL);
 			bat_dbg(DBG_TT, bat_priv, "Local tt entry (%pM) "
 				"pending to be removed: timed out\n",
-				tt_local_entry->addr);
+				tt_local_entry->common.addr);
 		}
 		spin_unlock_bh(list_lock);
 	}
@@ -462,10 +439,11 @@
 {
 	struct hashtable_t *hash;
 	spinlock_t *list_lock; /* protects write access to the hash lists */
+	struct tt_common_entry *tt_common_entry;
 	struct tt_local_entry *tt_local_entry;
 	struct hlist_node *node, *node_tmp;
 	struct hlist_head *head;
-	int i;
+	uint32_t i;
 
 	if (!bat_priv->tt_local_hash)
 		return;
@@ -477,9 +455,12 @@
 		list_lock = &hash->list_locks[i];
 
 		spin_lock_bh(list_lock);
-		hlist_for_each_entry_safe(tt_local_entry, node, node_tmp,
+		hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
 					  head, hash_entry) {
 			hlist_del_rcu(node);
+			tt_local_entry = container_of(tt_common_entry,
+						      struct tt_local_entry,
+						      common);
 			tt_local_entry_free_ref(tt_local_entry);
 		}
 		spin_unlock_bh(list_lock);
@@ -527,6 +508,7 @@
 	struct tt_global_entry *tt_global_entry;
 	struct orig_node *orig_node_tmp;
 	int ret = 0;
+	int hash_added;
 
 	tt_global_entry = tt_global_hash_find(bat_priv, tt_addr);
 
@@ -537,18 +519,24 @@
 		if (!tt_global_entry)
 			goto out;
 
-		memcpy(tt_global_entry->addr, tt_addr, ETH_ALEN);
+		memcpy(tt_global_entry->common.addr, tt_addr, ETH_ALEN);
+		tt_global_entry->common.flags = NO_FLAGS;
+		atomic_set(&tt_global_entry->common.refcount, 2);
 		/* Assign the new orig_node */
 		atomic_inc(&orig_node->refcount);
 		tt_global_entry->orig_node = orig_node;
 		tt_global_entry->ttvn = ttvn;
-		tt_global_entry->flags = NO_FLAGS;
 		tt_global_entry->roam_at = 0;
-		atomic_set(&tt_global_entry->refcount, 2);
 
-		hash_add(bat_priv->tt_global_hash, compare_gtt,
-			 choose_orig, tt_global_entry,
-			 &tt_global_entry->hash_entry);
+		hash_added = hash_add(bat_priv->tt_global_hash, compare_tt,
+				 choose_orig, &tt_global_entry->common,
+				 &tt_global_entry->common.hash_entry);
+
+		if (unlikely(hash_added != 0)) {
+			/* remove the reference for the hash */
+			tt_global_entry_free_ref(tt_global_entry);
+			goto out_remove;
+		}
 		atomic_inc(&orig_node->tt_size);
 	} else {
 		if (tt_global_entry->orig_node != orig_node) {
@@ -559,20 +547,21 @@
 			orig_node_free_ref(orig_node_tmp);
 			atomic_inc(&orig_node->tt_size);
 		}
+		tt_global_entry->common.flags = NO_FLAGS;
 		tt_global_entry->ttvn = ttvn;
-		tt_global_entry->flags = NO_FLAGS;
 		tt_global_entry->roam_at = 0;
 	}
 
 	if (wifi)
-		tt_global_entry->flags |= TT_CLIENT_WIFI;
+		tt_global_entry->common.flags |= TT_CLIENT_WIFI;
 
 	bat_dbg(DBG_TT, bat_priv,
 		"Creating new global tt entry: %pM (via %pM)\n",
-		tt_global_entry->addr, orig_node->orig);
+		tt_global_entry->common.addr, orig_node->orig);
 
+out_remove:
 	/* remove address from local hash if present */
-	tt_local_remove(bat_priv, tt_global_entry->addr,
+	tt_local_remove(bat_priv, tt_global_entry->common.addr,
 			"global tt received", roaming);
 	ret = 1;
 out:
@@ -586,13 +575,13 @@
 	struct net_device *net_dev = (struct net_device *)seq->private;
 	struct bat_priv *bat_priv = netdev_priv(net_dev);
 	struct hashtable_t *hash = bat_priv->tt_global_hash;
+	struct tt_common_entry *tt_common_entry;
 	struct tt_global_entry *tt_global_entry;
 	struct hard_iface *primary_if;
 	struct hlist_node *node;
 	struct hlist_head *head;
-	size_t buf_size, pos;
-	char *buff;
-	int i, ret = 0;
+	uint32_t i;
+	int ret = 0;
 
 	primary_if = primary_if_get_selected(bat_priv);
 	if (!primary_if) {
@@ -615,53 +604,32 @@
 	seq_printf(seq, "       %-13s %s       %-15s %s %s\n",
 		   "Client", "(TTVN)", "Originator", "(Curr TTVN)", "Flags");
 
-	buf_size = 1;
-	/* Estimate length for: " * xx:xx:xx:xx:xx:xx (ttvn) via
-	 * xx:xx:xx:xx:xx:xx (cur_ttvn)\n"*/
 	for (i = 0; i < hash->size; i++) {
 		head = &hash->table[i];
 
 		rcu_read_lock();
-		__hlist_for_each_rcu(node, head)
-			buf_size += 67;
-		rcu_read_unlock();
-	}
-
-	buff = kmalloc(buf_size, GFP_ATOMIC);
-	if (!buff) {
-		ret = -ENOMEM;
-		goto out;
-	}
-
-	buff[0] = '\0';
-	pos = 0;
-
-	for (i = 0; i < hash->size; i++) {
-		head = &hash->table[i];
-
-		rcu_read_lock();
-		hlist_for_each_entry_rcu(tt_global_entry, node,
+		hlist_for_each_entry_rcu(tt_common_entry, node,
 					 head, hash_entry) {
-			pos += snprintf(buff + pos, 69,
-					" * %pM  (%3u) via %pM     (%3u)   "
-					"[%c%c%c]\n", tt_global_entry->addr,
+			tt_global_entry = container_of(tt_common_entry,
+						       struct tt_global_entry,
+						       common);
+			seq_printf(seq, " * %pM  (%3u) via %pM     (%3u)   "
+					"[%c%c%c]\n",
+					tt_global_entry->common.addr,
 					tt_global_entry->ttvn,
 					tt_global_entry->orig_node->orig,
 					(uint8_t) atomic_read(
 						&tt_global_entry->orig_node->
 						last_ttvn),
-					(tt_global_entry->flags &
+					(tt_global_entry->common.flags &
 					 TT_CLIENT_ROAM ? 'R' : '.'),
-					(tt_global_entry->flags &
+					(tt_global_entry->common.flags &
 					 TT_CLIENT_PENDING ? 'X' : '.'),
-					(tt_global_entry->flags &
+					(tt_global_entry->common.flags &
 					 TT_CLIENT_WIFI ? 'W' : '.'));
 		}
 		rcu_read_unlock();
 	}
-
-	seq_printf(seq, "%s", buff);
-	kfree(buff);
 out:
 	if (primary_if)
 		hardif_free_ref(primary_if);
@@ -677,13 +645,13 @@
 
 	bat_dbg(DBG_TT, bat_priv,
 		"Deleting global tt entry %pM (via %pM): %s\n",
-		tt_global_entry->addr, tt_global_entry->orig_node->orig,
+		tt_global_entry->common.addr, tt_global_entry->orig_node->orig,
 		message);
 
 	atomic_dec(&tt_global_entry->orig_node->tt_size);
 
-	hash_remove(bat_priv->tt_global_hash, compare_gtt, choose_orig,
-		    tt_global_entry->addr);
+	hash_remove(bat_priv->tt_global_hash, compare_tt, choose_orig,
+		    tt_global_entry->common.addr);
 out:
 	if (tt_global_entry)
 		tt_global_entry_free_ref(tt_global_entry);
@@ -694,6 +662,7 @@
 		   const char *message, bool roaming)
 {
 	struct tt_global_entry *tt_global_entry = NULL;
+	struct tt_local_entry *tt_local_entry = NULL;
 
 	tt_global_entry = tt_global_hash_find(bat_priv, addr);
 	if (!tt_global_entry)
@@ -701,22 +670,37 @@
 
 	if (tt_global_entry->orig_node == orig_node) {
 		if (roaming) {
-			tt_global_entry->flags |= TT_CLIENT_ROAM;
-			tt_global_entry->roam_at = jiffies;
-			goto out;
+			/* if we are deleting a global entry due to a roam
+			 * event, there are two possibilities:
+			 * 1) the client roamed from node A to node B => we mark
+			 *    it with TT_CLIENT_ROAM, we start a timer and we
+			 *    wait for node B to claim it. In case of timeout
+			 *    the entry is purged.
+			 * 2) the client roamed to us => we can directly delete
+			 *    the global entry, since it is useless now. */
+			tt_local_entry = tt_local_hash_find(bat_priv,
+							    tt_global_entry->common.addr);
+			if (!tt_local_entry) {
+				tt_global_entry->common.flags |= TT_CLIENT_ROAM;
+				tt_global_entry->roam_at = jiffies;
+				goto out;
+			}
 		}
 		_tt_global_del(bat_priv, tt_global_entry, message);
 	}
 out:
 	if (tt_global_entry)
 		tt_global_entry_free_ref(tt_global_entry);
+	if (tt_local_entry)
+		tt_local_entry_free_ref(tt_local_entry);
 }
 
 void tt_global_del_orig(struct bat_priv *bat_priv,
 			struct orig_node *orig_node, const char *message)
 {
 	struct tt_global_entry *tt_global_entry;
-	int i;
+	struct tt_common_entry *tt_common_entry;
+	uint32_t i;
 	struct hashtable_t *hash = bat_priv->tt_global_hash;
 	struct hlist_node *node, *safe;
 	struct hlist_head *head;
@@ -730,14 +714,18 @@
 		list_lock = &hash->list_locks[i];
 
 		spin_lock_bh(list_lock);
-		hlist_for_each_entry_safe(tt_global_entry, node, safe,
+		hlist_for_each_entry_safe(tt_common_entry, node, safe,
 					 head, hash_entry) {
+			tt_global_entry = container_of(tt_common_entry,
+						       struct tt_global_entry,
+						       common);
 			if (tt_global_entry->orig_node == orig_node) {
 				bat_dbg(DBG_TT, bat_priv,
 					"Deleting global tt entry %pM "
-					"(via %pM): originator time out\n",
-					tt_global_entry->addr,
-					tt_global_entry->orig_node->orig);
+					"(via %pM): %s\n",
+					tt_global_entry->common.addr,
+					tt_global_entry->orig_node->orig,
+					message);
 				hlist_del_rcu(node);
 				tt_global_entry_free_ref(tt_global_entry);
 			}
@@ -750,20 +738,24 @@
 static void tt_global_roam_purge(struct bat_priv *bat_priv)
 {
 	struct hashtable_t *hash = bat_priv->tt_global_hash;
+	struct tt_common_entry *tt_common_entry;
 	struct tt_global_entry *tt_global_entry;
 	struct hlist_node *node, *node_tmp;
 	struct hlist_head *head;
 	spinlock_t *list_lock; /* protects write access to the hash lists */
-	int i;
+	uint32_t i;
 
 	for (i = 0; i < hash->size; i++) {
 		head = &hash->table[i];
 		list_lock = &hash->list_locks[i];
 
 		spin_lock_bh(list_lock);
-		hlist_for_each_entry_safe(tt_global_entry, node, node_tmp,
+		hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
 					  head, hash_entry) {
-			if (!(tt_global_entry->flags & TT_CLIENT_ROAM))
+			tt_global_entry = container_of(tt_common_entry,
+						       struct tt_global_entry,
+						       common);
+			if (!(tt_global_entry->common.flags & TT_CLIENT_ROAM))
 				continue;
 			if (!is_out_of_time(tt_global_entry->roam_at,
 					    TT_CLIENT_ROAM_TIMEOUT * 1000))
@@ -771,7 +763,7 @@
 
 			bat_dbg(DBG_TT, bat_priv, "Deleting global "
 				"tt entry (%pM): Roaming timeout\n",
-				tt_global_entry->addr);
+				tt_global_entry->common.addr);
 			atomic_dec(&tt_global_entry->orig_node->tt_size);
 			hlist_del_rcu(node);
 			tt_global_entry_free_ref(tt_global_entry);
@@ -785,10 +777,11 @@
 {
 	struct hashtable_t *hash;
 	spinlock_t *list_lock; /* protects write access to the hash lists */
+	struct tt_common_entry *tt_common_entry;
 	struct tt_global_entry *tt_global_entry;
 	struct hlist_node *node, *node_tmp;
 	struct hlist_head *head;
-	int i;
+	uint32_t i;
 
 	if (!bat_priv->tt_global_hash)
 		return;
@@ -800,9 +793,12 @@
 		list_lock = &hash->list_locks[i];
 
 		spin_lock_bh(list_lock);
-		hlist_for_each_entry_safe(tt_global_entry, node, node_tmp,
+		hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
 					  head, hash_entry) {
 			hlist_del_rcu(node);
+			tt_global_entry = container_of(tt_common_entry,
+						       struct tt_global_entry,
+						       common);
 			tt_global_entry_free_ref(tt_global_entry);
 		}
 		spin_unlock_bh(list_lock);
@@ -818,8 +814,8 @@
 {
 	bool ret = false;
 
-	if (tt_local_entry->flags & TT_CLIENT_WIFI &&
-	    tt_global_entry->flags & TT_CLIENT_WIFI)
+	if (tt_local_entry->common.flags & TT_CLIENT_WIFI &&
+	    tt_global_entry->common.flags & TT_CLIENT_WIFI)
 		ret = true;
 
 	return ret;
@@ -852,7 +848,7 @@
 
 	/* A global client marked as PENDING has already moved from that
 	 * originator */
-	if (tt_global_entry->flags & TT_CLIENT_PENDING)
+	if (tt_global_entry->common.flags & TT_CLIENT_PENDING)
 		goto out;
 
 	orig_node = tt_global_entry->orig_node;
@@ -871,29 +867,34 @@
 {
 	uint16_t total = 0, total_one;
 	struct hashtable_t *hash = bat_priv->tt_global_hash;
+	struct tt_common_entry *tt_common_entry;
 	struct tt_global_entry *tt_global_entry;
 	struct hlist_node *node;
 	struct hlist_head *head;
-	int i, j;
+	uint32_t i;
+	int j;
 
 	for (i = 0; i < hash->size; i++) {
 		head = &hash->table[i];
 
 		rcu_read_lock();
-		hlist_for_each_entry_rcu(tt_global_entry, node,
+		hlist_for_each_entry_rcu(tt_common_entry, node,
 					 head, hash_entry) {
+			tt_global_entry = container_of(tt_common_entry,
+						       struct tt_global_entry,
+						       common);
 			if (compare_eth(tt_global_entry->orig_node,
 					orig_node)) {
 				/* Roaming clients are in the global table for
 				 * consistency only. They don't have to be
 				 * taken into account while computing the
 				 * global crc */
-				if (tt_global_entry->flags & TT_CLIENT_ROAM)
+				if (tt_common_entry->flags & TT_CLIENT_ROAM)
 					continue;
 				total_one = 0;
 				for (j = 0; j < ETH_ALEN; j++)
 					total_one = crc16_byte(total_one,
-						tt_global_entry->addr[j]);
+						tt_common_entry->addr[j]);
 				total ^= total_one;
 			}
 		}
@@ -908,25 +909,26 @@
 {
 	uint16_t total = 0, total_one;
 	struct hashtable_t *hash = bat_priv->tt_local_hash;
-	struct tt_local_entry *tt_local_entry;
+	struct tt_common_entry *tt_common_entry;
 	struct hlist_node *node;
 	struct hlist_head *head;
-	int i, j;
+	uint32_t i;
+	int j;
 
 	for (i = 0; i < hash->size; i++) {
 		head = &hash->table[i];
 
 		rcu_read_lock();
-		hlist_for_each_entry_rcu(tt_local_entry, node,
+		hlist_for_each_entry_rcu(tt_common_entry, node,
 					 head, hash_entry) {
 			/* not yet committed clients have not to be taken into
 			 * account while computing the CRC */
-			if (tt_local_entry->flags & TT_CLIENT_NEW)
+			if (tt_common_entry->flags & TT_CLIENT_NEW)
 				continue;
 			total_one = 0;
 			for (j = 0; j < ETH_ALEN; j++)
 				total_one = crc16_byte(total_one,
-						   tt_local_entry->addr[j]);
+						   tt_common_entry->addr[j]);
 			total ^= total_one;
 		}
 		rcu_read_unlock();
@@ -1015,21 +1017,25 @@
 /* data_ptr is useless here, but has to be kept to respect the prototype */
 static int tt_local_valid_entry(const void *entry_ptr, const void *data_ptr)
 {
-	const struct tt_local_entry *tt_local_entry = entry_ptr;
+	const struct tt_common_entry *tt_common_entry = entry_ptr;
 
-	if (tt_local_entry->flags & TT_CLIENT_NEW)
+	if (tt_common_entry->flags & TT_CLIENT_NEW)
 		return 0;
 	return 1;
 }
 
 static int tt_global_valid_entry(const void *entry_ptr, const void *data_ptr)
 {
-	const struct tt_global_entry *tt_global_entry = entry_ptr;
+	const struct tt_common_entry *tt_common_entry = entry_ptr;
+	const struct tt_global_entry *tt_global_entry;
 	const struct orig_node *orig_node = data_ptr;
 
-	if (tt_global_entry->flags & TT_CLIENT_ROAM)
+	if (tt_common_entry->flags & TT_CLIENT_ROAM)
 		return 0;
 
+	tt_global_entry = container_of(tt_common_entry, struct tt_global_entry,
+				       common);
+
 	return (tt_global_entry->orig_node == orig_node);
 }
 
@@ -1040,7 +1046,7 @@
 							      const void *),
 					      void *cb_data)
 {
-	struct tt_local_entry *tt_local_entry;
+	struct tt_common_entry *tt_common_entry;
 	struct tt_query_packet *tt_response;
 	struct tt_change *tt_change;
 	struct hlist_node *node;
@@ -1048,7 +1054,7 @@
 	struct sk_buff *skb = NULL;
 	uint16_t tt_tot, tt_count;
 	ssize_t tt_query_size = sizeof(struct tt_query_packet);
-	int i;
+	uint32_t i;
 
 	if (tt_query_size + tt_len > primary_if->soft_iface->mtu) {
 		tt_len = primary_if->soft_iface->mtu - tt_query_size;
@@ -1072,15 +1078,16 @@
 	for (i = 0; i < hash->size; i++) {
 		head = &hash->table[i];
 
-		hlist_for_each_entry_rcu(tt_local_entry, node,
+		hlist_for_each_entry_rcu(tt_common_entry, node,
 					 head, hash_entry) {
 			if (tt_count == tt_tot)
 				break;
 
-			if ((valid_cb) && (!valid_cb(tt_local_entry, cb_data)))
+			if ((valid_cb) && (!valid_cb(tt_common_entry, cb_data)))
 				continue;
 
-			memcpy(tt_change->addr, tt_local_entry->addr, ETH_ALEN);
+			memcpy(tt_change->addr, tt_common_entry->addr,
+			       ETH_ALEN);
 			tt_change->flags = NO_FLAGS;
 
 			tt_count++;
@@ -1187,11 +1194,11 @@
 		(tt_request->flags & TT_FULL_TABLE ? 'F' : '.'));
 
 	/* Let's get the orig node of the REAL destination */
-	req_dst_orig_node = get_orig_node(bat_priv, tt_request->dst);
+	req_dst_orig_node = orig_hash_find(bat_priv, tt_request->dst);
 	if (!req_dst_orig_node)
 		goto out;
 
-	res_dst_orig_node = get_orig_node(bat_priv, tt_request->src);
+	res_dst_orig_node = orig_hash_find(bat_priv, tt_request->src);
 	if (!res_dst_orig_node)
 		goto out;
 
@@ -1317,7 +1324,7 @@
 	my_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
 	req_ttvn = tt_request->ttvn;
 
-	orig_node = get_orig_node(bat_priv, tt_request->src);
+	orig_node = orig_hash_find(bat_priv, tt_request->src);
 	if (!orig_node)
 		goto out;
 
@@ -1497,7 +1504,7 @@
 		goto out;
 	/* Check if the client has been logically deleted (but is kept for
 	 * consistency purpose) */
-	if (tt_local_entry->flags & TT_CLIENT_PENDING)
+	if (tt_local_entry->common.flags & TT_CLIENT_PENDING)
 		goto out;
 	ret = true;
 out:
@@ -1720,45 +1727,53 @@
 	kfree(bat_priv->tt_buff);
 }
 
-/* This function will reset the specified flags from all the entries in
- * the given hash table and will increment num_local_tt for each involved
- * entry */
-static void tt_local_reset_flags(struct bat_priv *bat_priv, uint16_t flags)
+/* This function will enable or disable the specified flags for all the entries
+ * in the given hash table and returns the number of modified entries */
+static uint16_t tt_set_flags(struct hashtable_t *hash, uint16_t flags,
+			     bool enable)
 {
-	int i;
-	struct hashtable_t *hash = bat_priv->tt_local_hash;
+	uint32_t i;
+	uint16_t changed_num = 0;
 	struct hlist_head *head;
 	struct hlist_node *node;
-	struct tt_local_entry *tt_local_entry;
+	struct tt_common_entry *tt_common_entry;
 
 	if (!hash)
-		return;
+		goto out;
 
 	for (i = 0; i < hash->size; i++) {
 		head = &hash->table[i];
 
 		rcu_read_lock();
-		hlist_for_each_entry_rcu(tt_local_entry, node,
+		hlist_for_each_entry_rcu(tt_common_entry, node,
 					 head, hash_entry) {
-			if (!(tt_local_entry->flags & flags))
-				continue;
-			tt_local_entry->flags &= ~flags;
-			atomic_inc(&bat_priv->num_local_tt);
+			if (enable) {
+				if ((tt_common_entry->flags & flags) == flags)
+					continue;
+				tt_common_entry->flags |= flags;
+			} else {
+				if (!(tt_common_entry->flags & flags))
+					continue;
+				tt_common_entry->flags &= ~flags;
+			}
+			changed_num++;
 		}
 		rcu_read_unlock();
 	}
-
+out:
+	return changed_num;
 }
 
 /* Purge out all the tt local entries marked with TT_CLIENT_PENDING */
 static void tt_local_purge_pending_clients(struct bat_priv *bat_priv)
 {
 	struct hashtable_t *hash = bat_priv->tt_local_hash;
+	struct tt_common_entry *tt_common_entry;
 	struct tt_local_entry *tt_local_entry;
 	struct hlist_node *node, *node_tmp;
 	struct hlist_head *head;
 	spinlock_t *list_lock; /* protects write access to the hash lists */
-	int i;
+	uint32_t i;
 
 	if (!hash)
 		return;
@@ -1768,16 +1783,19 @@
 		list_lock = &hash->list_locks[i];
 
 		spin_lock_bh(list_lock);
-		hlist_for_each_entry_safe(tt_local_entry, node, node_tmp,
+		hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
 					  head, hash_entry) {
-			if (!(tt_local_entry->flags & TT_CLIENT_PENDING))
+			if (!(tt_common_entry->flags & TT_CLIENT_PENDING))
 				continue;
 
 			bat_dbg(DBG_TT, bat_priv, "Deleting local tt entry "
-				"(%pM): pending\n", tt_local_entry->addr);
+				"(%pM): pending\n", tt_common_entry->addr);
 
 			atomic_dec(&bat_priv->num_local_tt);
 			hlist_del_rcu(node);
+			tt_local_entry = container_of(tt_common_entry,
+						      struct tt_local_entry,
+						      common);
 			tt_local_entry_free_ref(tt_local_entry);
 		}
 		spin_unlock_bh(list_lock);
@@ -1787,7 +1805,11 @@
 
 void tt_commit_changes(struct bat_priv *bat_priv)
 {
-	tt_local_reset_flags(bat_priv, TT_CLIENT_NEW);
+	uint16_t changed_num = tt_set_flags(bat_priv->tt_local_hash,
+					    TT_CLIENT_NEW, false);
+	/* all the reset entries have now to be effectively counted as local
+	 * entries */
+	atomic_add(changed_num, &bat_priv->num_local_tt);
 	tt_local_purge_pending_clients(bat_priv);
 
 	/* Increment the TTVN only once per OGM interval */
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index ab8d0fe..e9eb043 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -222,24 +222,24 @@
 	struct icmp_packet_rr icmp_packet;
 };
 
-struct tt_local_entry {
+struct tt_common_entry {
 	uint8_t addr[ETH_ALEN];
 	struct hlist_node hash_entry;
-	unsigned long last_seen;
 	uint16_t flags;
 	atomic_t refcount;
 	struct rcu_head rcu;
 };
 
+struct tt_local_entry {
+	struct tt_common_entry common;
+	unsigned long last_seen;
+};
+
 struct tt_global_entry {
-	uint8_t addr[ETH_ALEN];
-	struct hlist_node hash_entry; /* entry in the global table */
+	struct tt_common_entry common;
 	struct orig_node *orig_node;
 	uint8_t ttvn;
-	uint16_t flags; /* only TT_GLOBAL_ROAM is used */
 	unsigned long roam_at; /* time at which TT_GLOBAL_ROAM was set */
-	atomic_t refcount;
-	struct rcu_head rcu;
 };
 
 struct tt_change_node {
diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c
index f81a6b6..cc3b9f2 100644
--- a/net/batman-adv/vis.c
+++ b/net/batman-adv/vis.c
@@ -66,7 +66,7 @@
 
 /* hash function to choose an entry in a hash table of given size */
 /* hash algorithm from http://en.wikipedia.org/wiki/Hash_table */
-static int vis_info_choose(const void *data, int size)
+static uint32_t vis_info_choose(const void *data, uint32_t size)
 {
 	const struct vis_info *vis_info = data;
 	const struct vis_packet *packet;
@@ -96,7 +96,7 @@
 	struct hlist_head *head;
 	struct hlist_node *node;
 	struct vis_info *vis_info, *vis_info_tmp = NULL;
-	int index;
+	uint32_t index;
 
 	if (!hash)
 		return NULL;
@@ -202,7 +202,8 @@
 	HLIST_HEAD(vis_if_list);
 	struct if_list_entry *entry;
 	struct hlist_node *pos, *n;
-	int i, j, ret = 0;
+	uint32_t i;
+	int j, ret = 0;
 	int vis_server = atomic_read(&bat_priv->vis_mode);
 	size_t buff_pos, buf_size;
 	char *buff;
@@ -556,7 +557,8 @@
 	struct hlist_head *head;
 	struct orig_node *orig_node;
 	struct vis_packet *packet;
-	int best_tq = -1, i;
+	int best_tq = -1;
+	uint32_t i;
 
 	packet = (struct vis_packet *)info->skb_packet->data;
 
@@ -607,8 +609,9 @@
 	struct vis_info *info = bat_priv->my_vis_info;
 	struct vis_packet *packet = (struct vis_packet *)info->skb_packet->data;
 	struct vis_info_entry *entry;
-	struct tt_local_entry *tt_local_entry;
-	int best_tq = -1, i;
+	struct tt_common_entry *tt_common_entry;
+	int best_tq = -1;
+	uint32_t i;
 
 	info->first_seen = jiffies;
 	packet->vis_type = atomic_read(&bat_priv->vis_mode);
@@ -669,13 +672,13 @@
 		head = &hash->table[i];
 
 		rcu_read_lock();
-		hlist_for_each_entry_rcu(tt_local_entry, node, head,
+		hlist_for_each_entry_rcu(tt_common_entry, node, head,
 					 hash_entry) {
 			entry = (struct vis_info_entry *)
 					skb_put(info->skb_packet,
 						sizeof(*entry));
 			memset(entry->src, 0, ETH_ALEN);
-			memcpy(entry->dest, tt_local_entry->addr, ETH_ALEN);
+			memcpy(entry->dest, tt_common_entry->addr, ETH_ALEN);
 			entry->quality = 0; /* 0 means TT */
 			packet->entries++;
 
@@ -696,7 +699,7 @@
  * held */
 static void purge_vis_packets(struct bat_priv *bat_priv)
 {
-	int i;
+	uint32_t i;
 	struct hashtable_t *hash = bat_priv->vis_hash;
 	struct hlist_node *node, *node_tmp;
 	struct hlist_head *head;
@@ -733,7 +736,7 @@
 	struct sk_buff *skb;
 	struct hard_iface *hard_iface;
 	uint8_t dstaddr[ETH_ALEN];
-	int i;
+	uint32_t i;
 
 
 	packet = (struct vis_packet *)info->skb_packet->data;
diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig
index bfb3dc0..9ec85eb 100644
--- a/net/bluetooth/Kconfig
+++ b/net/bluetooth/Kconfig
@@ -6,7 +6,11 @@
 	tristate "Bluetooth subsystem support"
 	depends on NET && !S390
 	depends on RFKILL || !RFKILL
+	select CRC16
 	select CRYPTO
+	select CRYPTO_BLKCIPHER
+	select CRYPTO_AES
+	select CRYPTO_ECB
 	help
 	  Bluetooth is low-cost, low-power, short-range wireless technology.
 	  It was designed as a replacement for cables and other short-range
@@ -15,10 +19,12 @@
 	  Bluetooth can be found at <http://www.bluetooth.com/>.
 
 	  Linux Bluetooth subsystem consist of several layers:
-	     Bluetooth Core (HCI device and connection manager, scheduler)
+	     Bluetooth Core
+		HCI device and connection manager, scheduler
+		SCO audio links
+		L2CAP (Logical Link Control and Adaptation Protocol)
+		SMP (Security Manager Protocol) on LE (Low Energy) links
 	     HCI Device drivers (Interface to the hardware)
-	     SCO Module (SCO audio links)
-	     L2CAP Module (Logical Link Control and Adaptation Protocol)
 	     RFCOMM Module (RFCOMM Protocol)  
 	     BNEP Module (Bluetooth Network Encapsulation Protocol)
 	     CMTP Module (CAPI Message Transport Protocol)
@@ -33,31 +39,6 @@
 	  to Bluetooth kernel modules are provided in the BlueZ packages.  For
 	  more information, see <http://www.bluez.org/>.
 
-if BT != n
-
-config BT_L2CAP
-	bool "L2CAP protocol support"
-	select CRC16
-	select CRYPTO
-	select CRYPTO_BLKCIPHER
-	select CRYPTO_AES
-	select CRYPTO_ECB
-	help
-	  L2CAP (Logical Link Control and Adaptation Protocol) provides
-	  connection oriented and connection-less data transport.  L2CAP
-	  support is required for most Bluetooth applications.
-
-	  Also included is support for SMP (Security Manager Protocol) which
-	  is the security layer on top of LE (Low Energy) links.
-
-config BT_SCO
-	bool "SCO links support"
-	help
-	  SCO link provides voice transport over Bluetooth.  SCO support is
-	  required for voice applications like Headset and Audio.
-
-endif
-
 source "net/bluetooth/rfcomm/Kconfig"
 
 source "net/bluetooth/bnep/Kconfig"
diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile
index 9b67f3d..2dc5a570 100644
--- a/net/bluetooth/Makefile
+++ b/net/bluetooth/Makefile
@@ -8,6 +8,5 @@
 obj-$(CONFIG_BT_CMTP)	+= cmtp/
 obj-$(CONFIG_BT_HIDP)	+= hidp/
 
-bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o hci_sock.o hci_sysfs.o lib.o
-bluetooth-$(CONFIG_BT_L2CAP)	+= l2cap_core.o l2cap_sock.o smp.o
-bluetooth-$(CONFIG_BT_SCO)	+= sco.o
+bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \
+	hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o sco.o lib.o
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 062124c..cdcfcab 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -199,15 +199,14 @@
 
 	BT_DBG("parent %p", parent);
 
-	local_bh_disable();
 	list_for_each_safe(p, n, &bt_sk(parent)->accept_q) {
 		sk = (struct sock *) list_entry(p, struct bt_sock, accept_q);
 
-		bh_lock_sock(sk);
+		lock_sock(sk);
 
 		/* FIXME: Is this check still needed */
 		if (sk->sk_state == BT_CLOSED) {
-			bh_unlock_sock(sk);
+			release_sock(sk);
 			bt_accept_unlink(sk);
 			continue;
 		}
@@ -218,14 +217,12 @@
 			if (newsock)
 				sock_graft(sk, newsock);
 
-			bh_unlock_sock(sk);
-			local_bh_enable();
+			release_sock(sk);
 			return sk;
 		}
 
-		bh_unlock_sock(sk);
+		release_sock(sk);
 	}
-	local_bh_enable();
 
 	return NULL;
 }
diff --git a/net/bluetooth/bnep/Kconfig b/net/bluetooth/bnep/Kconfig
index 35158b0..71791fc 100644
--- a/net/bluetooth/bnep/Kconfig
+++ b/net/bluetooth/bnep/Kconfig
@@ -1,6 +1,6 @@
 config BT_BNEP
 	tristate "BNEP protocol support"
-	depends on BT && BT_L2CAP
+	depends on BT
 	select CRC32
 	help
 	  BNEP (Bluetooth Network Encapsulation Protocol) is Ethernet
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 91bcd3a..a779ec7 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -56,8 +56,8 @@
 
 #define VERSION "1.3"
 
-static int compress_src = 1;
-static int compress_dst = 1;
+static bool compress_src = true;
+static bool compress_dst = true;
 
 static LIST_HEAD(bnep_session_list);
 static DECLARE_RWSEM(bnep_session_sem);
@@ -65,31 +65,24 @@
 static struct bnep_session *__bnep_get_session(u8 *dst)
 {
 	struct bnep_session *s;
-	struct list_head *p;
 
 	BT_DBG("");
 
-	list_for_each(p, &bnep_session_list) {
-		s = list_entry(p, struct bnep_session, list);
+	list_for_each_entry(s, &bnep_session_list, list)
 		if (!compare_ether_addr(dst, s->eh.h_source))
 			return s;
-	}
+
 	return NULL;
 }
 
 static void __bnep_link_session(struct bnep_session *s)
 {
-	/* It's safe to call __module_get() here because sessions are added
-	   by the socket layer which has to hold the reference to this module.
-	 */
-	__module_get(THIS_MODULE);
 	list_add(&s->list, &bnep_session_list);
 }
 
 static void __bnep_unlink_session(struct bnep_session *s)
 {
 	list_del(&s->list);
-	module_put(THIS_MODULE);
 }
 
 static int bnep_send(struct bnep_session *s, void *data, size_t len)
@@ -530,6 +523,7 @@
 
 	up_write(&bnep_session_sem);
 	free_netdev(dev);
+	module_put_and_exit(0);
 	return 0;
 }
 
@@ -616,9 +610,11 @@
 
 	__bnep_link_session(s);
 
+	__module_get(THIS_MODULE);
 	s->task = kthread_run(bnep_session, s, "kbnepd %s", dev->name);
 	if (IS_ERR(s->task)) {
 		/* Session thread start failed, gotta cleanup. */
+		module_put(THIS_MODULE);
 		unregister_netdev(dev);
 		__bnep_unlink_session(s);
 		err = PTR_ERR(s->task);
@@ -667,17 +663,14 @@
 
 int bnep_get_connlist(struct bnep_connlist_req *req)
 {
-	struct list_head *p;
+	struct bnep_session *s;
 	int err = 0, n = 0;
 
 	down_read(&bnep_session_sem);
 
-	list_for_each(p, &bnep_session_list) {
-		struct bnep_session *s;
+	list_for_each_entry(s, &bnep_session_list, list) {
 		struct bnep_conninfo ci;
 
-		s = list_entry(p, struct bnep_session, list);
-
 		__bnep_copy_ci(&ci, s);
 
 		if (copy_to_user(req->ci, &ci, sizeof(ci))) {
diff --git a/net/bluetooth/cmtp/Kconfig b/net/bluetooth/cmtp/Kconfig
index d6b0382..94cbf42 100644
--- a/net/bluetooth/cmtp/Kconfig
+++ b/net/bluetooth/cmtp/Kconfig
@@ -1,6 +1,6 @@
 config BT_CMTP
 	tristate "CMTP protocol support"
-	depends on BT && BT_L2CAP && ISDN_CAPI
+	depends on BT && ISDN_CAPI
 	help
 	  CMTP (CAPI Message Transport Protocol) is a transport layer
 	  for CAPI messages.  CMTP is required for the Bluetooth Common
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
index 7d00ddf..6c9c1fd 100644
--- a/net/bluetooth/cmtp/core.c
+++ b/net/bluetooth/cmtp/core.c
@@ -53,28 +53,24 @@
 static struct cmtp_session *__cmtp_get_session(bdaddr_t *bdaddr)
 {
 	struct cmtp_session *session;
-	struct list_head *p;
 
 	BT_DBG("");
 
-	list_for_each(p, &cmtp_session_list) {
-		session = list_entry(p, struct cmtp_session, list);
+	list_for_each_entry(session, &cmtp_session_list, list)
 		if (!bacmp(bdaddr, &session->bdaddr))
 			return session;
-	}
+
 	return NULL;
 }
 
 static void __cmtp_link_session(struct cmtp_session *session)
 {
-	__module_get(THIS_MODULE);
 	list_add(&session->list, &cmtp_session_list);
 }
 
 static void __cmtp_unlink_session(struct cmtp_session *session)
 {
 	list_del(&session->list);
-	module_put(THIS_MODULE);
 }
 
 static void __cmtp_copy_session(struct cmtp_session *session, struct cmtp_conninfo *ci)
@@ -327,6 +323,7 @@
 	up_write(&cmtp_session_sem);
 
 	kfree(session);
+	module_put_and_exit(0);
 	return 0;
 }
 
@@ -376,9 +373,11 @@
 
 	__cmtp_link_session(session);
 
+	__module_get(THIS_MODULE);
 	session->task = kthread_run(cmtp_session, session, "kcmtpd_ctr_%d",
 								session->num);
 	if (IS_ERR(session->task)) {
+		module_put(THIS_MODULE);
 		err = PTR_ERR(session->task);
 		goto unlink;
 	}
@@ -431,19 +430,16 @@
 
 int cmtp_get_connlist(struct cmtp_connlist_req *req)
 {
-	struct list_head *p;
+	struct cmtp_session *session;
 	int err = 0, n = 0;
 
 	BT_DBG("");
 
 	down_read(&cmtp_session_sem);
 
-	list_for_each(p, &cmtp_session_list) {
-		struct cmtp_session *session;
+	list_for_each_entry(session, &cmtp_session_list, list) {
 		struct cmtp_conninfo ci;
 
-		session = list_entry(p, struct cmtp_session, list);
-
 		__cmtp_copy_session(session, &ci);
 
 		if (copy_to_user(req->ci, &ci, sizeof(ci))) {
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index e0af723..3db4324 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -123,7 +123,7 @@
 
 	BT_DBG("%p", conn);
 
-	if (conn->hdev->hci_ver < 2)
+	if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2)
 		return;
 
 	bacpy(&cp.bdaddr, &conn->dst);
@@ -275,9 +275,10 @@
 	}
 }
 
-static void hci_conn_timeout(unsigned long arg)
+static void hci_conn_timeout(struct work_struct *work)
 {
-	struct hci_conn *conn = (void *) arg;
+	struct hci_conn *conn = container_of(work, struct hci_conn,
+							disc_work.work);
 	struct hci_dev *hdev = conn->hdev;
 	__u8 reason;
 
@@ -311,6 +312,42 @@
 	hci_dev_unlock(hdev);
 }
 
+/* Enter sniff mode */
+static void hci_conn_enter_sniff_mode(struct hci_conn *conn)
+{
+	struct hci_dev *hdev = conn->hdev;
+
+	BT_DBG("conn %p mode %d", conn, conn->mode);
+
+	if (test_bit(HCI_RAW, &hdev->flags))
+		return;
+
+	if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
+		return;
+
+	if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
+		return;
+
+	if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
+		struct hci_cp_sniff_subrate cp;
+		cp.handle             = cpu_to_le16(conn->handle);
+		cp.max_latency        = cpu_to_le16(0);
+		cp.min_remote_timeout = cpu_to_le16(0);
+		cp.min_local_timeout  = cpu_to_le16(0);
+		hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
+	}
+
+	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
+		struct hci_cp_sniff_mode cp;
+		cp.handle       = cpu_to_le16(conn->handle);
+		cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
+		cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
+		cp.attempt      = cpu_to_le16(4);
+		cp.timeout      = cpu_to_le16(1);
+		hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
+	}
+}
+
 static void hci_conn_idle(unsigned long arg)
 {
 	struct hci_conn *conn = (void *) arg;
@@ -325,12 +362,8 @@
 	struct hci_conn *conn = (void *) arg;
 	struct hci_dev *hdev = conn->hdev;
 
-	hci_dev_lock(hdev);
-
 	hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
 								&conn->dst);
-
-	hci_dev_unlock(hdev);
 }
 
 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
@@ -374,7 +407,9 @@
 
 	skb_queue_head_init(&conn->data_q);
 
-	setup_timer(&conn->disc_timer, hci_conn_timeout, (unsigned long)conn);
+	INIT_LIST_HEAD(&conn->chan_list);;
+
+	INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
 	setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn);
 	setup_timer(&conn->auto_accept_timer, hci_conn_auto_accept,
 							(unsigned long) conn);
@@ -383,8 +418,6 @@
 
 	hci_dev_hold(hdev);
 
-	tasklet_disable(&hdev->tx_task);
-
 	hci_conn_hash_add(hdev, conn);
 	if (hdev->notify)
 		hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
@@ -393,8 +426,6 @@
 
 	hci_conn_init_sysfs(conn);
 
-	tasklet_enable(&hdev->tx_task);
-
 	return conn;
 }
 
@@ -406,7 +437,7 @@
 
 	del_timer(&conn->idle_timer);
 
-	del_timer(&conn->disc_timer);
+	cancel_delayed_work_sync(&conn->disc_work);
 
 	del_timer(&conn->auto_accept_timer);
 
@@ -430,14 +461,13 @@
 		}
 	}
 
-	tasklet_disable(&hdev->tx_task);
+
+	hci_chan_list_flush(conn);
 
 	hci_conn_hash_del(hdev, conn);
 	if (hdev->notify)
 		hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
 
-	tasklet_enable(&hdev->tx_task);
-
 	skb_queue_purge(&conn->data_q);
 
 	hci_conn_put_device(conn);
@@ -453,16 +483,13 @@
 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
 {
 	int use_src = bacmp(src, BDADDR_ANY);
-	struct hci_dev *hdev = NULL;
-	struct list_head *p;
+	struct hci_dev *hdev = NULL, *d;
 
 	BT_DBG("%s -> %s", batostr(src), batostr(dst));
 
-	read_lock_bh(&hci_dev_list_lock);
+	read_lock(&hci_dev_list_lock);
 
-	list_for_each(p, &hci_dev_list) {
-		struct hci_dev *d = list_entry(p, struct hci_dev, list);
-
+	list_for_each_entry(d, &hci_dev_list, list) {
 		if (!test_bit(HCI_UP, &d->flags) || test_bit(HCI_RAW, &d->flags))
 			continue;
 
@@ -485,7 +512,7 @@
 	if (hdev)
 		hdev = hci_dev_hold(hdev);
 
-	read_unlock_bh(&hci_dev_list_lock);
+	read_unlock(&hci_dev_list_lock);
 	return hdev;
 }
 EXPORT_SYMBOL(hci_get_route);
@@ -673,7 +700,7 @@
 		goto encrypt;
 
 auth:
-	if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
+	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
 		return 0;
 
 	if (!hci_conn_auth(conn, sec_level, auth_type))
@@ -766,60 +793,18 @@
 			jiffies + msecs_to_jiffies(hdev->idle_timeout));
 }
 
-/* Enter sniff mode */
-void hci_conn_enter_sniff_mode(struct hci_conn *conn)
-{
-	struct hci_dev *hdev = conn->hdev;
-
-	BT_DBG("conn %p mode %d", conn, conn->mode);
-
-	if (test_bit(HCI_RAW, &hdev->flags))
-		return;
-
-	if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
-		return;
-
-	if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
-		return;
-
-	if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
-		struct hci_cp_sniff_subrate cp;
-		cp.handle             = cpu_to_le16(conn->handle);
-		cp.max_latency        = cpu_to_le16(0);
-		cp.min_remote_timeout = cpu_to_le16(0);
-		cp.min_local_timeout  = cpu_to_le16(0);
-		hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
-	}
-
-	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
-		struct hci_cp_sniff_mode cp;
-		cp.handle       = cpu_to_le16(conn->handle);
-		cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
-		cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
-		cp.attempt      = cpu_to_le16(4);
-		cp.timeout      = cpu_to_le16(1);
-		hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
-	}
-}
-
 /* Drop all connection on the device */
 void hci_conn_hash_flush(struct hci_dev *hdev)
 {
 	struct hci_conn_hash *h = &hdev->conn_hash;
-	struct list_head *p;
+	struct hci_conn *c;
 
 	BT_DBG("hdev %s", hdev->name);
 
-	p = h->list.next;
-	while (p != &h->list) {
-		struct hci_conn *c;
-
-		c = list_entry(p, struct hci_conn, list);
-		p = p->next;
-
+	list_for_each_entry_rcu(c, &h->list, list) {
 		c->state = BT_CLOSED;
 
-		hci_proto_disconn_cfm(c, 0x16);
+		hci_proto_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
 		hci_conn_del(c);
 	}
 }
@@ -855,10 +840,10 @@
 
 int hci_get_conn_list(void __user *arg)
 {
+	register struct hci_conn *c;
 	struct hci_conn_list_req req, *cl;
 	struct hci_conn_info *ci;
 	struct hci_dev *hdev;
-	struct list_head *p;
 	int n = 0, size, err;
 
 	if (copy_from_user(&req, arg, sizeof(req)))
@@ -881,11 +866,8 @@
 
 	ci = cl->conn_info;
 
-	hci_dev_lock_bh(hdev);
-	list_for_each(p, &hdev->conn_hash.list) {
-		register struct hci_conn *c;
-		c = list_entry(p, struct hci_conn, list);
-
+	hci_dev_lock(hdev);
+	list_for_each_entry(c, &hdev->conn_hash.list, list) {
 		bacpy(&(ci + n)->bdaddr, &c->dst);
 		(ci + n)->handle = c->handle;
 		(ci + n)->type  = c->type;
@@ -895,7 +877,7 @@
 		if (++n >= req.conn_num)
 			break;
 	}
-	hci_dev_unlock_bh(hdev);
+	hci_dev_unlock(hdev);
 
 	cl->dev_id = hdev->id;
 	cl->conn_num = n;
@@ -919,7 +901,7 @@
 	if (copy_from_user(&req, arg, sizeof(req)))
 		return -EFAULT;
 
-	hci_dev_lock_bh(hdev);
+	hci_dev_lock(hdev);
 	conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
 	if (conn) {
 		bacpy(&ci.bdaddr, &conn->dst);
@@ -929,7 +911,7 @@
 		ci.state = conn->state;
 		ci.link_mode = conn->link_mode;
 	}
-	hci_dev_unlock_bh(hdev);
+	hci_dev_unlock(hdev);
 
 	if (!conn)
 		return -ENOENT;
@@ -945,14 +927,60 @@
 	if (copy_from_user(&req, arg, sizeof(req)))
 		return -EFAULT;
 
-	hci_dev_lock_bh(hdev);
+	hci_dev_lock(hdev);
 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
 	if (conn)
 		req.type = conn->auth_type;
-	hci_dev_unlock_bh(hdev);
+	hci_dev_unlock(hdev);
 
 	if (!conn)
 		return -ENOENT;
 
 	return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
 }
+
+struct hci_chan *hci_chan_create(struct hci_conn *conn)
+{
+	struct hci_dev *hdev = conn->hdev;
+	struct hci_chan *chan;
+
+	BT_DBG("%s conn %p", hdev->name, conn);
+
+	chan = kzalloc(sizeof(struct hci_chan), GFP_ATOMIC);
+	if (!chan)
+		return NULL;
+
+	chan->conn = conn;
+	skb_queue_head_init(&chan->data_q);
+
+	list_add_rcu(&chan->list, &conn->chan_list);
+
+	return chan;
+}
+
+int hci_chan_del(struct hci_chan *chan)
+{
+	struct hci_conn *conn = chan->conn;
+	struct hci_dev *hdev = conn->hdev;
+
+	BT_DBG("%s conn %p chan %p", hdev->name, conn, chan);
+
+	list_del_rcu(&chan->list);
+
+	synchronize_rcu();
+
+	skb_queue_purge(&chan->data_q);
+	kfree(chan);
+
+	return 0;
+}
+
+void hci_chan_list_flush(struct hci_conn *conn)
+{
+	struct hci_chan *chan;
+
+	BT_DBG("conn %p", conn);
+
+	list_for_each_entry_rcu(chan, &conn->chan_list, list)
+		hci_chan_del(chan);
+}
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index be84ae3..845da3e 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -1,6 +1,7 @@
 /*
    BlueZ - Bluetooth protocol stack for Linux
    Copyright (C) 2000-2001 Qualcomm Incorporated
+   Copyright (C) 2011 ProFUSION Embedded Systems
 
    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
 
@@ -54,11 +55,11 @@
 
 #define AUTO_OFF_TIMEOUT 2000
 
-static void hci_cmd_task(unsigned long arg);
-static void hci_rx_task(unsigned long arg);
-static void hci_tx_task(unsigned long arg);
+int enable_hs;
 
-static DEFINE_RWLOCK(hci_task_lock);
+static void hci_rx_work(struct work_struct *work);
+static void hci_cmd_work(struct work_struct *work);
+static void hci_tx_work(struct work_struct *work);
 
 /* HCI device list */
 LIST_HEAD(hci_dev_list);
@@ -68,10 +69,6 @@
 LIST_HEAD(hci_cb_list);
 DEFINE_RWLOCK(hci_cb_list_lock);
 
-/* HCI protocols */
-#define HCI_MAX_PROTO	2
-struct hci_proto *hci_proto[HCI_MAX_PROTO];
-
 /* HCI notifiers list */
 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
 
@@ -190,33 +187,20 @@
 	hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
 }
 
-static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
+static void bredr_init(struct hci_dev *hdev)
 {
 	struct hci_cp_delete_stored_link_key cp;
-	struct sk_buff *skb;
 	__le16 param;
 	__u8 flt_type;
 
-	BT_DBG("%s %ld", hdev->name, opt);
-
-	/* Driver initialization */
-
-	/* Special commands */
-	while ((skb = skb_dequeue(&hdev->driver_init))) {
-		bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
-		skb->dev = (void *) hdev;
-
-		skb_queue_tail(&hdev->cmd_q, skb);
-		tasklet_schedule(&hdev->cmd_task);
-	}
-	skb_queue_purge(&hdev->driver_init);
+	hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
 
 	/* Mandatory initialization */
 
 	/* Reset */
 	if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
-			set_bit(HCI_RESET, &hdev->flags);
-			hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
+		set_bit(HCI_RESET, &hdev->flags);
+		hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
 	}
 
 	/* Read Local Supported Features */
@@ -228,18 +212,6 @@
 	/* Read Buffer Size (ACL mtu, max pkt, etc.) */
 	hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
 
-#if 0
-	/* Host buffer size */
-	{
-		struct hci_cp_host_buffer_size cp;
-		cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
-		cp.sco_mtu = HCI_MAX_SCO_SIZE;
-		cp.acl_max_pkt = cpu_to_le16(0xffff);
-		cp.sco_max_pkt = cpu_to_le16(0xffff);
-		hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
-	}
-#endif
-
 	/* Read BD Address */
 	hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
 
@@ -267,6 +239,51 @@
 	hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
 }
 
+static void amp_init(struct hci_dev *hdev)
+{
+	hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
+
+	/* Reset */
+	hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
+
+	/* Read Local Version */
+	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
+}
+
+static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
+{
+	struct sk_buff *skb;
+
+	BT_DBG("%s %ld", hdev->name, opt);
+
+	/* Driver initialization */
+
+	/* Special commands */
+	while ((skb = skb_dequeue(&hdev->driver_init))) {
+		bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
+		skb->dev = (void *) hdev;
+
+		skb_queue_tail(&hdev->cmd_q, skb);
+		queue_work(hdev->workqueue, &hdev->cmd_work);
+	}
+	skb_queue_purge(&hdev->driver_init);
+
+	switch (hdev->dev_type) {
+	case HCI_BREDR:
+		bredr_init(hdev);
+		break;
+
+	case HCI_AMP:
+		amp_init(hdev);
+		break;
+
+	default:
+		BT_ERR("Unknown device type %d", hdev->dev_type);
+		break;
+	}
+
+}
+
 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
 {
 	BT_DBG("%s", hdev->name);
@@ -319,8 +336,7 @@
  * Device is held on return. */
 struct hci_dev *hci_dev_get(int index)
 {
-	struct hci_dev *hdev = NULL;
-	struct list_head *p;
+	struct hci_dev *hdev = NULL, *d;
 
 	BT_DBG("%d", index);
 
@@ -328,8 +344,7 @@
 		return NULL;
 
 	read_lock(&hci_dev_list_lock);
-	list_for_each(p, &hci_dev_list) {
-		struct hci_dev *d = list_entry(p, struct hci_dev, list);
+	list_for_each_entry(d, &hci_dev_list, list) {
 		if (d->id == index) {
 			hdev = hci_dev_hold(d);
 			break;
@@ -445,14 +460,14 @@
 	if (!hdev)
 		return -ENODEV;
 
-	hci_dev_lock_bh(hdev);
+	hci_dev_lock(hdev);
 	if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
 				inquiry_cache_empty(hdev) ||
 				ir.flags & IREQ_CACHE_FLUSH) {
 		inquiry_cache_flush(hdev);
 		do_inquiry = 1;
 	}
-	hci_dev_unlock_bh(hdev);
+	hci_dev_unlock(hdev);
 
 	timeo = ir.length * msecs_to_jiffies(2000);
 
@@ -474,9 +489,9 @@
 		goto done;
 	}
 
-	hci_dev_lock_bh(hdev);
+	hci_dev_lock(hdev);
 	ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
-	hci_dev_unlock_bh(hdev);
+	hci_dev_unlock(hdev);
 
 	BT_DBG("num_rsp %d", ir.num_rsp);
 
@@ -523,8 +538,9 @@
 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
 		set_bit(HCI_RAW, &hdev->flags);
 
-	/* Treat all non BR/EDR controllers as raw devices for now */
-	if (hdev->dev_type != HCI_BREDR)
+	/* Treat all non BR/EDR controllers as raw devices if
+	   enable_hs is not set */
+	if (hdev->dev_type != HCI_BREDR && !enable_hs)
 		set_bit(HCI_RAW, &hdev->flags);
 
 	if (hdev->open(hdev)) {
@@ -551,13 +567,16 @@
 		hci_dev_hold(hdev);
 		set_bit(HCI_UP, &hdev->flags);
 		hci_notify(hdev, HCI_DEV_UP);
-		if (!test_bit(HCI_SETUP, &hdev->flags))
-			mgmt_powered(hdev->id, 1);
+		if (!test_bit(HCI_SETUP, &hdev->flags)) {
+			hci_dev_lock(hdev);
+			mgmt_powered(hdev, 1);
+			hci_dev_unlock(hdev);
+		}
 	} else {
 		/* Init failed, cleanup */
-		tasklet_kill(&hdev->rx_task);
-		tasklet_kill(&hdev->tx_task);
-		tasklet_kill(&hdev->cmd_task);
+		flush_work(&hdev->tx_work);
+		flush_work(&hdev->cmd_work);
+		flush_work(&hdev->rx_work);
 
 		skb_queue_purge(&hdev->cmd_q);
 		skb_queue_purge(&hdev->rx_q);
@@ -593,14 +612,25 @@
 		return 0;
 	}
 
-	/* Kill RX and TX tasks */
-	tasklet_kill(&hdev->rx_task);
-	tasklet_kill(&hdev->tx_task);
+	/* Flush RX and TX works */
+	flush_work(&hdev->tx_work);
+	flush_work(&hdev->rx_work);
 
-	hci_dev_lock_bh(hdev);
+	if (hdev->discov_timeout > 0) {
+		cancel_delayed_work(&hdev->discov_off);
+		hdev->discov_timeout = 0;
+	}
+
+	if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
+		cancel_delayed_work(&hdev->power_off);
+
+	if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->flags))
+		cancel_delayed_work(&hdev->service_cache);
+
+	hci_dev_lock(hdev);
 	inquiry_cache_flush(hdev);
 	hci_conn_hash_flush(hdev);
-	hci_dev_unlock_bh(hdev);
+	hci_dev_unlock(hdev);
 
 	hci_notify(hdev, HCI_DEV_DOWN);
 
@@ -613,12 +643,12 @@
 	if (!test_bit(HCI_RAW, &hdev->flags)) {
 		set_bit(HCI_INIT, &hdev->flags);
 		__hci_request(hdev, hci_reset_req, 0,
-					msecs_to_jiffies(HCI_INIT_TIMEOUT));
+					msecs_to_jiffies(250));
 		clear_bit(HCI_INIT, &hdev->flags);
 	}
 
-	/* Kill cmd task */
-	tasklet_kill(&hdev->cmd_task);
+	/* flush cmd  work */
+	flush_work(&hdev->cmd_work);
 
 	/* Drop queues */
 	skb_queue_purge(&hdev->rx_q);
@@ -636,7 +666,9 @@
 	 * and no tasks are scheduled. */
 	hdev->close(hdev);
 
-	mgmt_powered(hdev->id, 0);
+	hci_dev_lock(hdev);
+	mgmt_powered(hdev, 0);
+	hci_dev_unlock(hdev);
 
 	/* Clear flags */
 	hdev->flags = 0;
@@ -670,7 +702,6 @@
 		return -ENODEV;
 
 	hci_req_lock(hdev);
-	tasklet_disable(&hdev->tx_task);
 
 	if (!test_bit(HCI_UP, &hdev->flags))
 		goto done;
@@ -679,10 +710,10 @@
 	skb_queue_purge(&hdev->rx_q);
 	skb_queue_purge(&hdev->cmd_q);
 
-	hci_dev_lock_bh(hdev);
+	hci_dev_lock(hdev);
 	inquiry_cache_flush(hdev);
 	hci_conn_hash_flush(hdev);
-	hci_dev_unlock_bh(hdev);
+	hci_dev_unlock(hdev);
 
 	if (hdev->flush)
 		hdev->flush(hdev);
@@ -695,7 +726,6 @@
 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
 
 done:
-	tasklet_enable(&hdev->tx_task);
 	hci_req_unlock(hdev);
 	hci_dev_put(hdev);
 	return ret;
@@ -794,9 +824,9 @@
 
 int hci_get_dev_list(void __user *arg)
 {
+	struct hci_dev *hdev;
 	struct hci_dev_list_req *dl;
 	struct hci_dev_req *dr;
-	struct list_head *p;
 	int n = 0, size, err;
 	__u16 dev_num;
 
@@ -814,13 +844,10 @@
 
 	dr = dl->dev_req;
 
-	read_lock_bh(&hci_dev_list_lock);
-	list_for_each(p, &hci_dev_list) {
-		struct hci_dev *hdev;
-
-		hdev = list_entry(p, struct hci_dev, list);
-
-		hci_del_off_timer(hdev);
+	read_lock(&hci_dev_list_lock);
+	list_for_each_entry(hdev, &hci_dev_list, list) {
+		if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
+			cancel_delayed_work(&hdev->power_off);
 
 		if (!test_bit(HCI_MGMT, &hdev->flags))
 			set_bit(HCI_PAIRABLE, &hdev->flags);
@@ -831,7 +858,7 @@
 		if (++n >= dev_num)
 			break;
 	}
-	read_unlock_bh(&hci_dev_list_lock);
+	read_unlock(&hci_dev_list_lock);
 
 	dl->dev_num = n;
 	size = sizeof(*dl) + n * sizeof(*dr);
@@ -855,7 +882,8 @@
 	if (!hdev)
 		return -ENODEV;
 
-	hci_del_off_timer(hdev);
+	if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
+		cancel_delayed_work_sync(&hdev->power_off);
 
 	if (!test_bit(HCI_MGMT, &hdev->flags))
 		set_bit(HCI_PAIRABLE, &hdev->flags);
@@ -912,6 +940,7 @@
 	if (!hdev)
 		return NULL;
 
+	hci_init_sysfs(hdev);
 	skb_queue_head_init(&hdev->driver_init);
 
 	return hdev;
@@ -938,39 +967,41 @@
 		return;
 
 	if (test_bit(HCI_AUTO_OFF, &hdev->flags))
-		mod_timer(&hdev->off_timer,
-				jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
+		schedule_delayed_work(&hdev->power_off,
+					msecs_to_jiffies(AUTO_OFF_TIMEOUT));
 
 	if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
-		mgmt_index_added(hdev->id);
+		mgmt_index_added(hdev);
 }
 
 static void hci_power_off(struct work_struct *work)
 {
-	struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
+	struct hci_dev *hdev = container_of(work, struct hci_dev,
+							power_off.work);
 
 	BT_DBG("%s", hdev->name);
 
+	clear_bit(HCI_AUTO_OFF, &hdev->flags);
+
 	hci_dev_close(hdev->id);
 }
 
-static void hci_auto_off(unsigned long data)
+static void hci_discov_off(struct work_struct *work)
 {
-	struct hci_dev *hdev = (struct hci_dev *) data;
+	struct hci_dev *hdev;
+	u8 scan = SCAN_PAGE;
+
+	hdev = container_of(work, struct hci_dev, discov_off.work);
 
 	BT_DBG("%s", hdev->name);
 
-	clear_bit(HCI_AUTO_OFF, &hdev->flags);
+	hci_dev_lock(hdev);
 
-	queue_work(hdev->workqueue, &hdev->power_off);
-}
+	hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
 
-void hci_del_off_timer(struct hci_dev *hdev)
-{
-	BT_DBG("%s", hdev->name);
+	hdev->discov_timeout = 0;
 
-	clear_bit(HCI_AUTO_OFF, &hdev->flags);
-	del_timer(&hdev->off_timer);
+	hci_dev_unlock(hdev);
 }
 
 int hci_uuids_clear(struct hci_dev *hdev)
@@ -1007,16 +1038,11 @@
 
 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
 {
-	struct list_head *p;
+	struct link_key *k;
 
-	list_for_each(p, &hdev->link_keys) {
-		struct link_key *k;
-
-		k = list_entry(p, struct link_key, list);
-
+	list_for_each_entry(k, &hdev->link_keys, list)
 		if (bacmp(bdaddr, &k->bdaddr) == 0)
 			return k;
-	}
 
 	return NULL;
 }
@@ -1138,7 +1164,7 @@
 
 	persistent = hci_persistent_key(hdev, conn, type, old_key_type);
 
-	mgmt_new_key(hdev->id, key, persistent);
+	mgmt_new_link_key(hdev, key, persistent);
 
 	if (!persistent) {
 		list_del(&key->list);
@@ -1181,7 +1207,7 @@
 	memcpy(id->rand, rand, sizeof(id->rand));
 
 	if (new_key)
-		mgmt_new_key(hdev->id, key, old_key_type);
+		mgmt_new_link_key(hdev, key, old_key_type);
 
 	return 0;
 }
@@ -1209,7 +1235,7 @@
 
 	BT_ERR("%s command tx timeout", hdev->name);
 	atomic_set(&hdev->cmd_cnt, 1);
-	tasklet_schedule(&hdev->cmd_task);
+	queue_work(hdev->workqueue, &hdev->cmd_work);
 }
 
 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
@@ -1279,16 +1305,11 @@
 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
 						bdaddr_t *bdaddr)
 {
-	struct list_head *p;
+	struct bdaddr_list *b;
 
-	list_for_each(p, &hdev->blacklist) {
-		struct bdaddr_list *b;
-
-		b = list_entry(p, struct bdaddr_list, list);
-
+	list_for_each_entry(b, &hdev->blacklist, list)
 		if (bacmp(bdaddr, &b->bdaddr) == 0)
 			return b;
-	}
 
 	return NULL;
 }
@@ -1327,31 +1348,30 @@
 
 	list_add(&entry->list, &hdev->blacklist);
 
-	return mgmt_device_blocked(hdev->id, bdaddr);
+	return mgmt_device_blocked(hdev, bdaddr);
 }
 
 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
 {
 	struct bdaddr_list *entry;
 
-	if (bacmp(bdaddr, BDADDR_ANY) == 0) {
+	if (bacmp(bdaddr, BDADDR_ANY) == 0)
 		return hci_blacklist_clear(hdev);
-	}
 
 	entry = hci_blacklist_lookup(hdev, bdaddr);
-	if (!entry) {
+	if (!entry)
 		return -ENOENT;
-	}
 
 	list_del(&entry->list);
 	kfree(entry);
 
-	return mgmt_device_unblocked(hdev->id, bdaddr);
+	return mgmt_device_unblocked(hdev, bdaddr);
 }
 
-static void hci_clear_adv_cache(unsigned long arg)
+static void hci_clear_adv_cache(struct work_struct *work)
 {
-	struct hci_dev *hdev = (void *) arg;
+	struct hci_dev *hdev = container_of(work, struct hci_dev,
+							adv_work.work);
 
 	hci_dev_lock(hdev);
 
@@ -1425,7 +1445,7 @@
 int hci_register_dev(struct hci_dev *hdev)
 {
 	struct list_head *head = &hci_dev_list, *p;
-	int i, id = 0;
+	int i, id, error;
 
 	BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
 						hdev->bus, hdev->owner);
@@ -1433,7 +1453,12 @@
 	if (!hdev->open || !hdev->close || !hdev->destruct)
 		return -EINVAL;
 
-	write_lock_bh(&hci_dev_list_lock);
+	/* Do not allow HCI_AMP devices to register at index 0,
+	 * so the index can be used as the AMP controller ID.
+	 */
+	id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
+
+	write_lock(&hci_dev_list_lock);
 
 	/* Find first available device id */
 	list_for_each(p, &hci_dev_list) {
@@ -1444,12 +1469,13 @@
 
 	sprintf(hdev->name, "hci%d", id);
 	hdev->id = id;
-	list_add(&hdev->list, head);
+	list_add_tail(&hdev->list, head);
 
 	atomic_set(&hdev->refcnt, 1);
-	spin_lock_init(&hdev->lock);
+	mutex_init(&hdev->lock);
 
 	hdev->flags = 0;
+	hdev->dev_flags = 0;
 	hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
 	hdev->esco_type = (ESCO_HV1);
 	hdev->link_mode = (HCI_LM_ACCEPT);
@@ -1459,9 +1485,10 @@
 	hdev->sniff_max_interval = 800;
 	hdev->sniff_min_interval = 80;
 
-	tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
-	tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
-	tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
+	INIT_WORK(&hdev->rx_work, hci_rx_work);
+	INIT_WORK(&hdev->cmd_work, hci_cmd_work);
+	INIT_WORK(&hdev->tx_work, hci_tx_work);
+
 
 	skb_queue_head_init(&hdev->rx_q);
 	skb_queue_head_init(&hdev->cmd_q);
@@ -1479,6 +1506,8 @@
 
 	hci_conn_hash_init(hdev);
 
+	INIT_LIST_HEAD(&hdev->mgmt_pending);
+
 	INIT_LIST_HEAD(&hdev->blacklist);
 
 	INIT_LIST_HEAD(&hdev->uuids);
@@ -1488,24 +1517,29 @@
 	INIT_LIST_HEAD(&hdev->remote_oob_data);
 
 	INIT_LIST_HEAD(&hdev->adv_entries);
-	setup_timer(&hdev->adv_timer, hci_clear_adv_cache,
-						(unsigned long) hdev);
 
+	INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
 	INIT_WORK(&hdev->power_on, hci_power_on);
-	INIT_WORK(&hdev->power_off, hci_power_off);
-	setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
+	INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
+
+	INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
 
 	memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
 
 	atomic_set(&hdev->promisc, 0);
 
-	write_unlock_bh(&hci_dev_list_lock);
+	write_unlock(&hci_dev_list_lock);
 
-	hdev->workqueue = create_singlethread_workqueue(hdev->name);
-	if (!hdev->workqueue)
-		goto nomem;
+	hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
+							WQ_MEM_RECLAIM, 1);
+	if (!hdev->workqueue) {
+		error = -ENOMEM;
+		goto err;
+	}
 
-	hci_register_sysfs(hdev);
+	error = hci_add_sysfs(hdev);
+	if (error < 0)
+		goto err_wqueue;
 
 	hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
 				RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
@@ -1518,31 +1552,33 @@
 
 	set_bit(HCI_AUTO_OFF, &hdev->flags);
 	set_bit(HCI_SETUP, &hdev->flags);
-	queue_work(hdev->workqueue, &hdev->power_on);
+	schedule_work(&hdev->power_on);
 
 	hci_notify(hdev, HCI_DEV_REG);
 
 	return id;
 
-nomem:
-	write_lock_bh(&hci_dev_list_lock);
+err_wqueue:
+	destroy_workqueue(hdev->workqueue);
+err:
+	write_lock(&hci_dev_list_lock);
 	list_del(&hdev->list);
-	write_unlock_bh(&hci_dev_list_lock);
+	write_unlock(&hci_dev_list_lock);
 
-	return -ENOMEM;
+	return error;
 }
 EXPORT_SYMBOL(hci_register_dev);
 
 /* Unregister HCI device */
-int hci_unregister_dev(struct hci_dev *hdev)
+void hci_unregister_dev(struct hci_dev *hdev)
 {
 	int i;
 
 	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
 
-	write_lock_bh(&hci_dev_list_lock);
+	write_lock(&hci_dev_list_lock);
 	list_del(&hdev->list);
-	write_unlock_bh(&hci_dev_list_lock);
+	write_unlock(&hci_dev_list_lock);
 
 	hci_dev_do_close(hdev);
 
@@ -1550,8 +1586,15 @@
 		kfree_skb(hdev->reassembly[i]);
 
 	if (!test_bit(HCI_INIT, &hdev->flags) &&
-					!test_bit(HCI_SETUP, &hdev->flags))
-		mgmt_index_removed(hdev->id);
+					!test_bit(HCI_SETUP, &hdev->flags)) {
+		hci_dev_lock(hdev);
+		mgmt_index_removed(hdev);
+		hci_dev_unlock(hdev);
+	}
+
+	/* mgmt_index_removed should take care of emptying the
+	 * pending list */
+	BUG_ON(!list_empty(&hdev->mgmt_pending));
 
 	hci_notify(hdev, HCI_DEV_UNREG);
 
@@ -1560,24 +1603,21 @@
 		rfkill_destroy(hdev->rfkill);
 	}
 
-	hci_unregister_sysfs(hdev);
+	hci_del_sysfs(hdev);
 
-	hci_del_off_timer(hdev);
-	del_timer(&hdev->adv_timer);
+	cancel_delayed_work_sync(&hdev->adv_work);
 
 	destroy_workqueue(hdev->workqueue);
 
-	hci_dev_lock_bh(hdev);
+	hci_dev_lock(hdev);
 	hci_blacklist_clear(hdev);
 	hci_uuids_clear(hdev);
 	hci_link_keys_clear(hdev);
 	hci_remote_oob_data_clear(hdev);
 	hci_adv_entries_clear(hdev);
-	hci_dev_unlock_bh(hdev);
+	hci_dev_unlock(hdev);
 
 	__hci_dev_put(hdev);
-
-	return 0;
 }
 EXPORT_SYMBOL(hci_unregister_dev);
 
@@ -1613,9 +1653,8 @@
 	/* Time stamp */
 	__net_timestamp(skb);
 
-	/* Queue frame for rx task */
 	skb_queue_tail(&hdev->rx_q, skb);
-	tasklet_schedule(&hdev->rx_task);
+	queue_work(hdev->workqueue, &hdev->rx_work);
 
 	return 0;
 }
@@ -1787,59 +1826,13 @@
 
 /* ---- Interface to upper protocols ---- */
 
-/* Register/Unregister protocols.
- * hci_task_lock is used to ensure that no tasks are running. */
-int hci_register_proto(struct hci_proto *hp)
-{
-	int err = 0;
-
-	BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
-
-	if (hp->id >= HCI_MAX_PROTO)
-		return -EINVAL;
-
-	write_lock_bh(&hci_task_lock);
-
-	if (!hci_proto[hp->id])
-		hci_proto[hp->id] = hp;
-	else
-		err = -EEXIST;
-
-	write_unlock_bh(&hci_task_lock);
-
-	return err;
-}
-EXPORT_SYMBOL(hci_register_proto);
-
-int hci_unregister_proto(struct hci_proto *hp)
-{
-	int err = 0;
-
-	BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
-
-	if (hp->id >= HCI_MAX_PROTO)
-		return -EINVAL;
-
-	write_lock_bh(&hci_task_lock);
-
-	if (hci_proto[hp->id])
-		hci_proto[hp->id] = NULL;
-	else
-		err = -ENOENT;
-
-	write_unlock_bh(&hci_task_lock);
-
-	return err;
-}
-EXPORT_SYMBOL(hci_unregister_proto);
-
 int hci_register_cb(struct hci_cb *cb)
 {
 	BT_DBG("%p name %s", cb, cb->name);
 
-	write_lock_bh(&hci_cb_list_lock);
+	write_lock(&hci_cb_list_lock);
 	list_add(&cb->list, &hci_cb_list);
-	write_unlock_bh(&hci_cb_list_lock);
+	write_unlock(&hci_cb_list_lock);
 
 	return 0;
 }
@@ -1849,9 +1842,9 @@
 {
 	BT_DBG("%p name %s", cb, cb->name);
 
-	write_lock_bh(&hci_cb_list_lock);
+	write_lock(&hci_cb_list_lock);
 	list_del(&cb->list);
-	write_unlock_bh(&hci_cb_list_lock);
+	write_unlock(&hci_cb_list_lock);
 
 	return 0;
 }
@@ -1912,7 +1905,7 @@
 		hdev->init_last_cmd = opcode;
 
 	skb_queue_tail(&hdev->cmd_q, skb);
-	tasklet_schedule(&hdev->cmd_task);
+	queue_work(hdev->workqueue, &hdev->cmd_work);
 
 	return 0;
 }
@@ -1948,23 +1941,18 @@
 	hdr->dlen   = cpu_to_le16(len);
 }
 
-void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
+static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
+				struct sk_buff *skb, __u16 flags)
 {
 	struct hci_dev *hdev = conn->hdev;
 	struct sk_buff *list;
 
-	BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
-
-	skb->dev = (void *) hdev;
-	bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
-	hci_add_acl_hdr(skb, conn->handle, flags);
-
 	list = skb_shinfo(skb)->frag_list;
 	if (!list) {
 		/* Non fragmented */
 		BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
 
-		skb_queue_tail(&conn->data_q, skb);
+		skb_queue_tail(queue, skb);
 	} else {
 		/* Fragmented */
 		BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
@@ -1972,9 +1960,9 @@
 		skb_shinfo(skb)->frag_list = NULL;
 
 		/* Queue all fragments atomically */
-		spin_lock_bh(&conn->data_q.lock);
+		spin_lock(&queue->lock);
 
-		__skb_queue_tail(&conn->data_q, skb);
+		__skb_queue_tail(queue, skb);
 
 		flags &= ~ACL_START;
 		flags |= ACL_CONT;
@@ -1987,13 +1975,27 @@
 
 			BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
 
-			__skb_queue_tail(&conn->data_q, skb);
+			__skb_queue_tail(queue, skb);
 		} while (list);
 
-		spin_unlock_bh(&conn->data_q.lock);
+		spin_unlock(&queue->lock);
 	}
+}
 
-	tasklet_schedule(&hdev->tx_task);
+void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
+{
+	struct hci_conn *conn = chan->conn;
+	struct hci_dev *hdev = conn->hdev;
+
+	BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
+
+	skb->dev = (void *) hdev;
+	bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
+	hci_add_acl_hdr(skb, conn->handle, flags);
+
+	hci_queue_acl(conn, &chan->data_q, skb, flags);
+
+	queue_work(hdev->workqueue, &hdev->tx_work);
 }
 EXPORT_SYMBOL(hci_send_acl);
 
@@ -2016,7 +2018,7 @@
 	bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
 
 	skb_queue_tail(&conn->data_q, skb);
-	tasklet_schedule(&hdev->tx_task);
+	queue_work(hdev->workqueue, &hdev->tx_work);
 }
 EXPORT_SYMBOL(hci_send_sco);
 
@@ -2026,16 +2028,15 @@
 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
 {
 	struct hci_conn_hash *h = &hdev->conn_hash;
-	struct hci_conn *conn = NULL;
+	struct hci_conn *conn = NULL, *c;
 	int num = 0, min = ~0;
-	struct list_head *p;
 
 	/* We don't have to lock device here. Connections are always
 	 * added and removed with TX task disabled. */
-	list_for_each(p, &h->list) {
-		struct hci_conn *c;
-		c = list_entry(p, struct hci_conn, list);
 
+	rcu_read_lock();
+
+	list_for_each_entry_rcu(c, &h->list, list) {
 		if (c->type != type || skb_queue_empty(&c->data_q))
 			continue;
 
@@ -2053,6 +2054,8 @@
 			break;
 	}
 
+	rcu_read_unlock();
+
 	if (conn) {
 		int cnt, q;
 
@@ -2084,27 +2087,159 @@
 static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
 {
 	struct hci_conn_hash *h = &hdev->conn_hash;
-	struct list_head *p;
-	struct hci_conn  *c;
+	struct hci_conn *c;
 
 	BT_ERR("%s link tx timeout", hdev->name);
 
+	rcu_read_lock();
+
 	/* Kill stalled connections */
-	list_for_each(p, &h->list) {
-		c = list_entry(p, struct hci_conn, list);
+	list_for_each_entry_rcu(c, &h->list, list) {
 		if (c->type == type && c->sent) {
 			BT_ERR("%s killing stalled connection %s",
 				hdev->name, batostr(&c->dst));
 			hci_acl_disconn(c, 0x13);
 		}
 	}
+
+	rcu_read_unlock();
+}
+
+static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
+						int *quote)
+{
+	struct hci_conn_hash *h = &hdev->conn_hash;
+	struct hci_chan *chan = NULL;
+	int num = 0, min = ~0, cur_prio = 0;
+	struct hci_conn *conn;
+	int cnt, q, conn_num = 0;
+
+	BT_DBG("%s", hdev->name);
+
+	rcu_read_lock();
+
+	list_for_each_entry_rcu(conn, &h->list, list) {
+		struct hci_chan *tmp;
+
+		if (conn->type != type)
+			continue;
+
+		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
+			continue;
+
+		conn_num++;
+
+		list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
+			struct sk_buff *skb;
+
+			if (skb_queue_empty(&tmp->data_q))
+				continue;
+
+			skb = skb_peek(&tmp->data_q);
+			if (skb->priority < cur_prio)
+				continue;
+
+			if (skb->priority > cur_prio) {
+				num = 0;
+				min = ~0;
+				cur_prio = skb->priority;
+			}
+
+			num++;
+
+			if (conn->sent < min) {
+				min  = conn->sent;
+				chan = tmp;
+			}
+		}
+
+		if (hci_conn_num(hdev, type) == conn_num)
+			break;
+	}
+
+	rcu_read_unlock();
+
+	if (!chan)
+		return NULL;
+
+	switch (chan->conn->type) {
+	case ACL_LINK:
+		cnt = hdev->acl_cnt;
+		break;
+	case SCO_LINK:
+	case ESCO_LINK:
+		cnt = hdev->sco_cnt;
+		break;
+	case LE_LINK:
+		cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
+		break;
+	default:
+		cnt = 0;
+		BT_ERR("Unknown link type");
+	}
+
+	q = cnt / num;
+	*quote = q ? q : 1;
+	BT_DBG("chan %p quote %d", chan, *quote);
+	return chan;
+}
+
+static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
+{
+	struct hci_conn_hash *h = &hdev->conn_hash;
+	struct hci_conn *conn;
+	int num = 0;
+
+	BT_DBG("%s", hdev->name);
+
+	rcu_read_lock();
+
+	list_for_each_entry_rcu(conn, &h->list, list) {
+		struct hci_chan *chan;
+
+		if (conn->type != type)
+			continue;
+
+		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
+			continue;
+
+		num++;
+
+		list_for_each_entry_rcu(chan, &conn->chan_list, list) {
+			struct sk_buff *skb;
+
+			if (chan->sent) {
+				chan->sent = 0;
+				continue;
+			}
+
+			if (skb_queue_empty(&chan->data_q))
+				continue;
+
+			skb = skb_peek(&chan->data_q);
+			if (skb->priority >= HCI_PRIO_MAX - 1)
+				continue;
+
+			skb->priority = HCI_PRIO_MAX - 1;
+
+			BT_DBG("chan %p skb %p promoted to %d", chan, skb,
+								skb->priority);
+		}
+
+		if (hci_conn_num(hdev, type) == num)
+			break;
+	}
+
+	rcu_read_unlock();
+
 }
 
 static inline void hci_sched_acl(struct hci_dev *hdev)
 {
-	struct hci_conn *conn;
+	struct hci_chan *chan;
 	struct sk_buff *skb;
 	int quote;
+	unsigned int cnt;
 
 	BT_DBG("%s", hdev->name);
 
@@ -2118,19 +2253,35 @@
 			hci_link_tx_to(hdev, ACL_LINK);
 	}
 
-	while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
-		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
-			BT_DBG("skb %p len %d", skb, skb->len);
+	cnt = hdev->acl_cnt;
 
-			hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
+	while (hdev->acl_cnt &&
+			(chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
+		u32 priority = (skb_peek(&chan->data_q))->priority;
+		while (quote-- && (skb = skb_peek(&chan->data_q))) {
+			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
+					skb->len, skb->priority);
+
+			/* Stop if priority has changed */
+			if (skb->priority < priority)
+				break;
+
+			skb = skb_dequeue(&chan->data_q);
+
+			hci_conn_enter_active_mode(chan->conn,
+						bt_cb(skb)->force_active);
 
 			hci_send_frame(skb);
 			hdev->acl_last_tx = jiffies;
 
 			hdev->acl_cnt--;
-			conn->sent++;
+			chan->sent++;
+			chan->conn->sent++;
 		}
 	}
+
+	if (cnt != hdev->acl_cnt)
+		hci_prio_recalculate(hdev, ACL_LINK);
 }
 
 /* Schedule SCO */
@@ -2182,9 +2333,9 @@
 
 static inline void hci_sched_le(struct hci_dev *hdev)
 {
-	struct hci_conn *conn;
+	struct hci_chan *chan;
 	struct sk_buff *skb;
-	int quote, cnt;
+	int quote, cnt, tmp;
 
 	BT_DBG("%s", hdev->name);
 
@@ -2200,30 +2351,42 @@
 	}
 
 	cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
-	while (cnt && (conn = hci_low_sent(hdev, LE_LINK, &quote))) {
-		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
-			BT_DBG("skb %p len %d", skb, skb->len);
+	tmp = cnt;
+	while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
+		u32 priority = (skb_peek(&chan->data_q))->priority;
+		while (quote-- && (skb = skb_peek(&chan->data_q))) {
+			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
+					skb->len, skb->priority);
+
+			/* Stop if priority has changed */
+			if (skb->priority < priority)
+				break;
+
+			skb = skb_dequeue(&chan->data_q);
 
 			hci_send_frame(skb);
 			hdev->le_last_tx = jiffies;
 
 			cnt--;
-			conn->sent++;
+			chan->sent++;
+			chan->conn->sent++;
 		}
 	}
+
 	if (hdev->le_pkts)
 		hdev->le_cnt = cnt;
 	else
 		hdev->acl_cnt = cnt;
+
+	if (cnt != tmp)
+		hci_prio_recalculate(hdev, LE_LINK);
 }
 
-static void hci_tx_task(unsigned long arg)
+static void hci_tx_work(struct work_struct *work)
 {
-	struct hci_dev *hdev = (struct hci_dev *) arg;
+	struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
 	struct sk_buff *skb;
 
-	read_lock(&hci_task_lock);
-
 	BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
 		hdev->sco_cnt, hdev->le_cnt);
 
@@ -2240,8 +2403,6 @@
 	/* Send next queued raw (unknown type) packet */
 	while ((skb = skb_dequeue(&hdev->raw_q)))
 		hci_send_frame(skb);
-
-	read_unlock(&hci_task_lock);
 }
 
 /* ----- HCI RX task (incoming data processing) ----- */
@@ -2268,16 +2429,11 @@
 	hci_dev_unlock(hdev);
 
 	if (conn) {
-		register struct hci_proto *hp;
-
-		hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
+		hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
 
 		/* Send to upper protocol */
-		hp = hci_proto[HCI_PROTO_L2CAP];
-		if (hp && hp->recv_acldata) {
-			hp->recv_acldata(conn, skb, flags);
-			return;
-		}
+		l2cap_recv_acldata(conn, skb, flags);
+		return;
 	} else {
 		BT_ERR("%s ACL packet for unknown connection handle %d",
 			hdev->name, handle);
@@ -2306,14 +2462,9 @@
 	hci_dev_unlock(hdev);
 
 	if (conn) {
-		register struct hci_proto *hp;
-
 		/* Send to upper protocol */
-		hp = hci_proto[HCI_PROTO_SCO];
-		if (hp && hp->recv_scodata) {
-			hp->recv_scodata(conn, skb);
-			return;
-		}
+		sco_recv_scodata(conn, skb);
+		return;
 	} else {
 		BT_ERR("%s SCO packet for unknown connection handle %d",
 			hdev->name, handle);
@@ -2322,15 +2473,13 @@
 	kfree_skb(skb);
 }
 
-static void hci_rx_task(unsigned long arg)
+static void hci_rx_work(struct work_struct *work)
 {
-	struct hci_dev *hdev = (struct hci_dev *) arg;
+	struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
 	struct sk_buff *skb;
 
 	BT_DBG("%s", hdev->name);
 
-	read_lock(&hci_task_lock);
-
 	while ((skb = skb_dequeue(&hdev->rx_q))) {
 		if (atomic_read(&hdev->promisc)) {
 			/* Send copy to the sockets */
@@ -2355,6 +2504,7 @@
 		/* Process frame */
 		switch (bt_cb(skb)->pkt_type) {
 		case HCI_EVENT_PKT:
+			BT_DBG("%s Event packet", hdev->name);
 			hci_event_packet(hdev, skb);
 			break;
 
@@ -2373,13 +2523,11 @@
 			break;
 		}
 	}
-
-	read_unlock(&hci_task_lock);
 }
 
-static void hci_cmd_task(unsigned long arg)
+static void hci_cmd_work(struct work_struct *work)
 {
-	struct hci_dev *hdev = (struct hci_dev *) arg;
+	struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
 	struct sk_buff *skb;
 
 	BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
@@ -2403,7 +2551,38 @@
 				  jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
 		} else {
 			skb_queue_head(&hdev->cmd_q, skb);
-			tasklet_schedule(&hdev->cmd_task);
+			queue_work(hdev->workqueue, &hdev->cmd_work);
 		}
 	}
 }
+
+int hci_do_inquiry(struct hci_dev *hdev, u8 length)
+{
+	/* General inquiry access code (GIAC) */
+	u8 lap[3] = { 0x33, 0x8b, 0x9e };
+	struct hci_cp_inquiry cp;
+
+	BT_DBG("%s", hdev->name);
+
+	if (test_bit(HCI_INQUIRY, &hdev->flags))
+		return -EINPROGRESS;
+
+	memset(&cp, 0, sizeof(cp));
+	memcpy(&cp.lap, lap, sizeof(cp.lap));
+	cp.length  = length;
+
+	return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
+}
+
+int hci_cancel_inquiry(struct hci_dev *hdev)
+{
+	BT_DBG("%s", hdev->name);
+
+	if (!test_bit(HCI_INQUIRY, &hdev->flags))
+		return -EPERM;
+
+	return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
+}
+
+module_param(enable_hs, bool, 0644);
+MODULE_PARM_DESC(enable_hs, "Enable High Speed");
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index d7d96b6..4221bd2 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -45,7 +45,7 @@
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
 
-static int enable_le;
+static bool enable_le;
 
 /* Handle HCI Event packets */
 
@@ -55,12 +55,18 @@
 
 	BT_DBG("%s status 0x%x", hdev->name, status);
 
-	if (status)
+	if (status) {
+		hci_dev_lock(hdev);
+		mgmt_stop_discovery_failed(hdev, status);
+		hci_dev_unlock(hdev);
 		return;
+	}
 
-	if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) &&
-			test_bit(HCI_MGMT, &hdev->flags))
-		mgmt_discovering(hdev->id, 0);
+	clear_bit(HCI_INQUIRY, &hdev->flags);
+
+	hci_dev_lock(hdev);
+	mgmt_discovering(hdev, 0);
+	hci_dev_unlock(hdev);
 
 	hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
 
@@ -76,10 +82,6 @@
 	if (status)
 		return;
 
-	if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) &&
-				test_bit(HCI_MGMT, &hdev->flags))
-		mgmt_discovering(hdev->id, 0);
-
 	hci_conn_check_pending(hdev);
 }
 
@@ -192,6 +194,8 @@
 	clear_bit(HCI_RESET, &hdev->flags);
 
 	hci_req_complete(hdev, HCI_OP_RESET, status);
+
+	hdev->dev_flags = 0;
 }
 
 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
@@ -205,13 +209,15 @@
 	if (!sent)
 		return;
 
+	hci_dev_lock(hdev);
+
 	if (test_bit(HCI_MGMT, &hdev->flags))
-		mgmt_set_local_name_complete(hdev->id, sent, status);
+		mgmt_set_local_name_complete(hdev, sent, status);
 
-	if (status)
-		return;
+	if (status == 0)
+		memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
 
-	memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
+	hci_dev_unlock(hdev);
 }
 
 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
@@ -274,7 +280,8 @@
 
 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
 {
-	__u8 status = *((__u8 *) skb->data);
+	__u8 param, status = *((__u8 *) skb->data);
+	int old_pscan, old_iscan;
 	void *sent;
 
 	BT_DBG("%s status 0x%x", hdev->name, status);
@@ -283,28 +290,40 @@
 	if (!sent)
 		return;
 
-	if (!status) {
-		__u8 param = *((__u8 *) sent);
-		int old_pscan, old_iscan;
+	param = *((__u8 *) sent);
 
-		old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
-		old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
+	hci_dev_lock(hdev);
 
-		if (param & SCAN_INQUIRY) {
-			set_bit(HCI_ISCAN, &hdev->flags);
-			if (!old_iscan)
-				mgmt_discoverable(hdev->id, 1);
-		} else if (old_iscan)
-			mgmt_discoverable(hdev->id, 0);
-
-		if (param & SCAN_PAGE) {
-			set_bit(HCI_PSCAN, &hdev->flags);
-			if (!old_pscan)
-				mgmt_connectable(hdev->id, 1);
-		} else if (old_pscan)
-			mgmt_connectable(hdev->id, 0);
+	if (status != 0) {
+		mgmt_write_scan_failed(hdev, param, status);
+		hdev->discov_timeout = 0;
+		goto done;
 	}
 
+	old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
+	old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
+
+	if (param & SCAN_INQUIRY) {
+		set_bit(HCI_ISCAN, &hdev->flags);
+		if (!old_iscan)
+			mgmt_discoverable(hdev, 1);
+		if (hdev->discov_timeout > 0) {
+			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
+			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
+									to);
+		}
+	} else if (old_iscan)
+		mgmt_discoverable(hdev, 0);
+
+	if (param & SCAN_PAGE) {
+		set_bit(HCI_PSCAN, &hdev->flags);
+		if (!old_pscan)
+			mgmt_connectable(hdev, 1);
+	} else if (old_pscan)
+		mgmt_connectable(hdev, 0);
+
+done:
+	hci_dev_unlock(hdev);
 	hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
 }
 
@@ -359,11 +378,8 @@
 
 	BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
 
-	if (hdev->notify) {
-		tasklet_disable(&hdev->tx_task);
+	if (hdev->notify)
 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
-		tasklet_enable(&hdev->tx_task);
-	}
 }
 
 static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
@@ -390,11 +406,8 @@
 
 	BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
 
-	if (hdev->notify) {
-		tasklet_disable(&hdev->tx_task);
+	if (hdev->notify)
 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
-		tasklet_enable(&hdev->tx_task);
-	}
 }
 
 static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
@@ -481,7 +494,7 @@
 
 	/* CSR 1.1 dongles does not accept any bitfield so don't try to set
 	 * any event mask for pre 1.2 devices */
-	if (hdev->lmp_ver <= 1)
+	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
 		return;
 
 	events[4] |= 0x01; /* Flow Specification Complete */
@@ -543,9 +556,12 @@
 
 static void hci_setup(struct hci_dev *hdev)
 {
+	if (hdev->dev_type != HCI_BREDR)
+		return;
+
 	hci_setup_event_mask(hdev);
 
-	if (hdev->lmp_ver > 1)
+	if (hdev->hci_ver > BLUETOOTH_VER_1_1)
 		hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
 
 	if (hdev->features[6] & LMP_SIMPLE_PAIR) {
@@ -700,6 +716,21 @@
 	hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status);
 }
 
+static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
+						struct sk_buff *skb)
+{
+	struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
+
+	BT_DBG("%s status 0x%x", hdev->name, rp->status);
+
+	if (rp->status)
+		return;
+
+	hdev->flow_ctl_mode = rp->mode;
+
+	hci_req_complete(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, rp->status);
+}
+
 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
 {
 	struct hci_rp_read_buffer_size *rp = (void *) skb->data;
@@ -739,6 +770,28 @@
 	hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status);
 }
 
+static void hci_cc_read_data_block_size(struct hci_dev *hdev,
+							struct sk_buff *skb)
+{
+	struct hci_rp_read_data_block_size *rp = (void *) skb->data;
+
+	BT_DBG("%s status 0x%x", hdev->name, rp->status);
+
+	if (rp->status)
+		return;
+
+	hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
+	hdev->block_len = __le16_to_cpu(rp->block_len);
+	hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
+
+	hdev->block_cnt = hdev->num_blocks;
+
+	BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
+					hdev->block_cnt, hdev->block_len);
+
+	hci_req_complete(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, rp->status);
+}
+
 static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
 {
 	__u8 status = *((__u8 *) skb->data);
@@ -748,6 +801,30 @@
 	hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
 }
 
+static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
+		struct sk_buff *skb)
+{
+	struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
+
+	BT_DBG("%s status 0x%x", hdev->name, rp->status);
+
+	if (rp->status)
+		return;
+
+	hdev->amp_status = rp->amp_status;
+	hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
+	hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
+	hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
+	hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
+	hdev->amp_type = rp->amp_type;
+	hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
+	hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
+	hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
+	hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
+
+	hci_req_complete(hdev, HCI_OP_READ_LOCAL_AMP_INFO, rp->status);
+}
+
 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
 							struct sk_buff *skb)
 {
@@ -804,19 +881,24 @@
 
 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
 
+	hci_dev_lock(hdev);
+
 	if (test_bit(HCI_MGMT, &hdev->flags))
-		mgmt_pin_code_reply_complete(hdev->id, &rp->bdaddr, rp->status);
+		mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
 
 	if (rp->status != 0)
-		return;
+		goto unlock;
 
 	cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
 	if (!cp)
-		return;
+		goto unlock;
 
 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
 	if (conn)
 		conn->pin_length = cp->pin_len;
+
+unlock:
+	hci_dev_unlock(hdev);
 }
 
 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
@@ -825,10 +907,15 @@
 
 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
 
+	hci_dev_lock(hdev);
+
 	if (test_bit(HCI_MGMT, &hdev->flags))
-		mgmt_pin_code_neg_reply_complete(hdev->id, &rp->bdaddr,
+		mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
 								rp->status);
+
+	hci_dev_unlock(hdev);
 }
+
 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
 				       struct sk_buff *skb)
 {
@@ -855,9 +942,13 @@
 
 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
 
+	hci_dev_lock(hdev);
+
 	if (test_bit(HCI_MGMT, &hdev->flags))
-		mgmt_user_confirm_reply_complete(hdev->id, &rp->bdaddr,
+		mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr,
 								rp->status);
+
+	hci_dev_unlock(hdev);
 }
 
 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
@@ -867,9 +958,44 @@
 
 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
 
+	hci_dev_lock(hdev);
+
 	if (test_bit(HCI_MGMT, &hdev->flags))
-		mgmt_user_confirm_neg_reply_complete(hdev->id, &rp->bdaddr,
+		mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
 								rp->status);
+
+	hci_dev_unlock(hdev);
+}
+
+static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
+{
+	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
+
+	BT_DBG("%s status 0x%x", hdev->name, rp->status);
+
+	hci_dev_lock(hdev);
+
+	if (test_bit(HCI_MGMT, &hdev->flags))
+		mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr,
+								rp->status);
+
+	hci_dev_unlock(hdev);
+}
+
+static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
+							struct sk_buff *skb)
+{
+	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
+
+	BT_DBG("%s status 0x%x", hdev->name, rp->status);
+
+	hci_dev_lock(hdev);
+
+	if (test_bit(HCI_MGMT, &hdev->flags))
+		mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
+								rp->status);
+
+	hci_dev_unlock(hdev);
 }
 
 static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
@@ -879,8 +1005,17 @@
 
 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
 
-	mgmt_read_local_oob_data_reply_complete(hdev->id, rp->hash,
+	hci_dev_lock(hdev);
+	mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
 						rp->randomizer, rp->status);
+	hci_dev_unlock(hdev);
+}
+
+static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
+{
+	__u8 status = *((__u8 *) skb->data);
+
+	BT_DBG("%s status 0x%x", hdev->name, status);
 }
 
 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
@@ -898,14 +1033,28 @@
 	if (!cp)
 		return;
 
-	if (cp->enable == 0x01) {
-		del_timer(&hdev->adv_timer);
+	switch (cp->enable) {
+	case LE_SCANNING_ENABLED:
+		set_bit(HCI_LE_SCAN, &hdev->dev_flags);
+
+		cancel_delayed_work_sync(&hdev->adv_work);
 
 		hci_dev_lock(hdev);
 		hci_adv_entries_clear(hdev);
 		hci_dev_unlock(hdev);
-	} else if (cp->enable == 0x00) {
-		mod_timer(&hdev->adv_timer, jiffies + ADV_CLEAR_TIMEOUT);
+		break;
+
+	case LE_SCANNING_DISABLED:
+		clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
+
+		cancel_delayed_work_sync(&hdev->adv_work);
+		queue_delayed_work(hdev->workqueue, &hdev->adv_work,
+						 jiffies + ADV_CLEAR_TIMEOUT);
+		break;
+
+	default:
+		BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
+		break;
 	}
 }
 
@@ -955,12 +1104,18 @@
 	if (status) {
 		hci_req_complete(hdev, HCI_OP_INQUIRY, status);
 		hci_conn_check_pending(hdev);
+		hci_dev_lock(hdev);
+		if (test_bit(HCI_MGMT, &hdev->flags))
+			mgmt_start_discovery_failed(hdev, status);
+		hci_dev_unlock(hdev);
 		return;
 	}
 
-	if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags) &&
-				test_bit(HCI_MGMT, &hdev->flags))
-		mgmt_discovering(hdev->id, 1);
+	set_bit(HCI_INQUIRY, &hdev->flags);
+
+	hci_dev_lock(hdev);
+	mgmt_discovering(hdev, 1);
+	hci_dev_unlock(hdev);
 }
 
 static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
@@ -1339,13 +1494,16 @@
 
 	BT_DBG("%s status %d", hdev->name, status);
 
-	if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) &&
-				test_bit(HCI_MGMT, &hdev->flags))
-		mgmt_discovering(hdev->id, 0);
-
 	hci_req_complete(hdev, HCI_OP_INQUIRY, status);
 
 	hci_conn_check_pending(hdev);
+
+	if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
+		return;
+
+	hci_dev_lock(hdev);
+	mgmt_discovering(hdev, 0);
+	hci_dev_unlock(hdev);
 }
 
 static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
@@ -1361,12 +1519,6 @@
 
 	hci_dev_lock(hdev);
 
-	if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) {
-
-		if (test_bit(HCI_MGMT, &hdev->flags))
-			mgmt_discovering(hdev->id, 1);
-	}
-
 	for (; num_rsp; num_rsp--, info++) {
 		bacpy(&data.bdaddr, &info->bdaddr);
 		data.pscan_rep_mode	= info->pscan_rep_mode;
@@ -1377,8 +1529,8 @@
 		data.rssi		= 0x00;
 		data.ssp_mode		= 0x00;
 		hci_inquiry_cache_update(hdev, &data);
-		mgmt_device_found(hdev->id, &info->bdaddr, info->dev_class, 0,
-									NULL);
+		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
+						info->dev_class, 0, NULL);
 	}
 
 	hci_dev_unlock(hdev);
@@ -1412,7 +1564,8 @@
 			conn->state = BT_CONFIG;
 			hci_conn_hold(conn);
 			conn->disc_timeout = HCI_DISCONN_TIMEOUT;
-			mgmt_connected(hdev->id, &ev->bdaddr, conn->type);
+			mgmt_connected(hdev, &ev->bdaddr, conn->type,
+							conn->dst_type);
 		} else
 			conn->state = BT_CONNECTED;
 
@@ -1434,7 +1587,7 @@
 		}
 
 		/* Set packet type for incoming connection */
-		if (!conn->out && hdev->hci_ver < 3) {
+		if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
 			struct hci_cp_change_conn_ptype cp;
 			cp.handle = ev->handle;
 			cp.pkt_type = cpu_to_le16(conn->pkt_type);
@@ -1444,7 +1597,8 @@
 	} else {
 		conn->state = BT_CLOSED;
 		if (conn->type == ACL_LINK)
-			mgmt_connect_failed(hdev->id, &ev->bdaddr, ev->status);
+			mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
+						conn->dst_type, ev->status);
 	}
 
 	if (conn->type == ACL_LINK)
@@ -1531,7 +1685,7 @@
 		struct hci_cp_reject_conn_req cp;
 
 		bacpy(&cp.bdaddr, &ev->bdaddr);
-		cp.reason = 0x0f;
+		cp.reason = HCI_ERROR_REJ_BAD_ADDR;
 		hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
 	}
 }
@@ -1543,24 +1697,27 @@
 
 	BT_DBG("%s status %d", hdev->name, ev->status);
 
-	if (ev->status) {
-		mgmt_disconnect_failed(hdev->id);
-		return;
-	}
-
 	hci_dev_lock(hdev);
 
 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
 	if (!conn)
 		goto unlock;
 
-	conn->state = BT_CLOSED;
+	if (ev->status == 0)
+		conn->state = BT_CLOSED;
 
-	if (conn->type == ACL_LINK || conn->type == LE_LINK)
-		mgmt_disconnected(hdev->id, &conn->dst);
+	if (conn->type == ACL_LINK || conn->type == LE_LINK) {
+		if (ev->status != 0)
+			mgmt_disconnect_failed(hdev, &conn->dst, ev->status);
+		else
+			mgmt_disconnected(hdev, &conn->dst, conn->type,
+							conn->dst_type);
+	}
 
-	hci_proto_disconn_cfm(conn, ev->reason);
-	hci_conn_del(conn);
+	if (ev->status == 0) {
+		hci_proto_disconn_cfm(conn, ev->reason);
+		hci_conn_del(conn);
+	}
 
 unlock:
 	hci_dev_unlock(hdev);
@@ -1588,7 +1745,7 @@
 			conn->sec_level = conn->pending_sec_level;
 		}
 	} else {
-		mgmt_auth_failed(hdev->id, &conn->dst, ev->status);
+		mgmt_auth_failed(hdev, &conn->dst, ev->status);
 	}
 
 	clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
@@ -1643,7 +1800,7 @@
 	hci_dev_lock(hdev);
 
 	if (ev->status == 0 && test_bit(HCI_MGMT, &hdev->flags))
-		mgmt_remote_name(hdev->id, &ev->bdaddr, ev->name);
+		mgmt_remote_name(hdev, &ev->bdaddr, ev->name);
 
 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
 	if (!conn)
@@ -1894,10 +2051,22 @@
 		hci_cc_read_bd_addr(hdev, skb);
 		break;
 
+	case HCI_OP_READ_DATA_BLOCK_SIZE:
+		hci_cc_read_data_block_size(hdev, skb);
+		break;
+
 	case HCI_OP_WRITE_CA_TIMEOUT:
 		hci_cc_write_ca_timeout(hdev, skb);
 		break;
 
+	case HCI_OP_READ_FLOW_CONTROL_MODE:
+		hci_cc_read_flow_control_mode(hdev, skb);
+		break;
+
+	case HCI_OP_READ_LOCAL_AMP_INFO:
+		hci_cc_read_local_amp_info(hdev, skb);
+		break;
+
 	case HCI_OP_DELETE_STORED_LINK_KEY:
 		hci_cc_delete_stored_link_key(hdev, skb);
 		break;
@@ -1942,6 +2111,17 @@
 		hci_cc_user_confirm_neg_reply(hdev, skb);
 		break;
 
+	case HCI_OP_USER_PASSKEY_REPLY:
+		hci_cc_user_passkey_reply(hdev, skb);
+		break;
+
+	case HCI_OP_USER_PASSKEY_NEG_REPLY:
+		hci_cc_user_passkey_neg_reply(hdev, skb);
+
+	case HCI_OP_LE_SET_SCAN_PARAM:
+		hci_cc_le_set_scan_param(hdev, skb);
+		break;
+
 	case HCI_OP_LE_SET_SCAN_ENABLE:
 		hci_cc_le_set_scan_enable(hdev, skb);
 		break;
@@ -1969,7 +2149,7 @@
 	if (ev->ncmd) {
 		atomic_set(&hdev->cmd_cnt, 1);
 		if (!skb_queue_empty(&hdev->cmd_q))
-			tasklet_schedule(&hdev->cmd_task);
+			queue_work(hdev->workqueue, &hdev->cmd_work);
 	}
 }
 
@@ -2029,7 +2209,7 @@
 
 	case HCI_OP_DISCONNECT:
 		if (ev->status != 0)
-			mgmt_disconnect_failed(hdev->id);
+			mgmt_disconnect_failed(hdev, NULL, ev->status);
 		break;
 
 	case HCI_OP_LE_CREATE_CONN:
@@ -2051,7 +2231,7 @@
 	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
 		atomic_set(&hdev->cmd_cnt, 1);
 		if (!skb_queue_empty(&hdev->cmd_q))
-			tasklet_schedule(&hdev->cmd_task);
+			queue_work(hdev->workqueue, &hdev->cmd_work);
 	}
 }
 
@@ -2084,56 +2264,68 @@
 static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
 {
 	struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
-	__le16 *ptr;
 	int i;
 
 	skb_pull(skb, sizeof(*ev));
 
 	BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
 
+	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
+		BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
+		return;
+	}
+
 	if (skb->len < ev->num_hndl * 4) {
 		BT_DBG("%s bad parameters", hdev->name);
 		return;
 	}
 
-	tasklet_disable(&hdev->tx_task);
-
-	for (i = 0, ptr = (__le16 *) skb->data; i < ev->num_hndl; i++) {
+	for (i = 0; i < ev->num_hndl; i++) {
+		struct hci_comp_pkts_info *info = &ev->handles[i];
 		struct hci_conn *conn;
 		__u16  handle, count;
 
-		handle = get_unaligned_le16(ptr++);
-		count  = get_unaligned_le16(ptr++);
+		handle = __le16_to_cpu(info->handle);
+		count  = __le16_to_cpu(info->count);
 
 		conn = hci_conn_hash_lookup_handle(hdev, handle);
-		if (conn) {
-			conn->sent -= count;
+		if (!conn)
+			continue;
 
-			if (conn->type == ACL_LINK) {
+		conn->sent -= count;
+
+		switch (conn->type) {
+		case ACL_LINK:
+			hdev->acl_cnt += count;
+			if (hdev->acl_cnt > hdev->acl_pkts)
+				hdev->acl_cnt = hdev->acl_pkts;
+			break;
+
+		case LE_LINK:
+			if (hdev->le_pkts) {
+				hdev->le_cnt += count;
+				if (hdev->le_cnt > hdev->le_pkts)
+					hdev->le_cnt = hdev->le_pkts;
+			} else {
 				hdev->acl_cnt += count;
 				if (hdev->acl_cnt > hdev->acl_pkts)
 					hdev->acl_cnt = hdev->acl_pkts;
-			} else if (conn->type == LE_LINK) {
-				if (hdev->le_pkts) {
-					hdev->le_cnt += count;
-					if (hdev->le_cnt > hdev->le_pkts)
-						hdev->le_cnt = hdev->le_pkts;
-				} else {
-					hdev->acl_cnt += count;
-					if (hdev->acl_cnt > hdev->acl_pkts)
-						hdev->acl_cnt = hdev->acl_pkts;
-				}
-			} else {
-				hdev->sco_cnt += count;
-				if (hdev->sco_cnt > hdev->sco_pkts)
-					hdev->sco_cnt = hdev->sco_pkts;
 			}
+			break;
+
+		case SCO_LINK:
+			hdev->sco_cnt += count;
+			if (hdev->sco_cnt > hdev->sco_pkts)
+				hdev->sco_cnt = hdev->sco_pkts;
+			break;
+
+		default:
+			BT_ERR("Unknown type %d conn %p", conn->type, conn);
+			break;
 		}
 	}
 
-	tasklet_schedule(&hdev->tx_task);
-
-	tasklet_enable(&hdev->tx_task);
+	queue_work(hdev->workqueue, &hdev->tx_work);
 }
 
 static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
@@ -2194,7 +2386,7 @@
 		else
 			secure = 0;
 
-		mgmt_pin_code_request(hdev->id, &ev->bdaddr, secure);
+		mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
 	}
 
 unlock:
@@ -2363,12 +2555,6 @@
 
 	hci_dev_lock(hdev);
 
-	if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) {
-
-		if (test_bit(HCI_MGMT, &hdev->flags))
-			mgmt_discovering(hdev->id, 1);
-	}
-
 	if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
 		struct inquiry_info_with_rssi_and_pscan_mode *info;
 		info = (void *) (skb->data + 1);
@@ -2383,7 +2569,7 @@
 			data.rssi		= info->rssi;
 			data.ssp_mode		= 0x00;
 			hci_inquiry_cache_update(hdev, &data);
-			mgmt_device_found(hdev->id, &info->bdaddr,
+			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
 						info->dev_class, info->rssi,
 						NULL);
 		}
@@ -2400,7 +2586,7 @@
 			data.rssi		= info->rssi;
 			data.ssp_mode		= 0x00;
 			hci_inquiry_cache_update(hdev, &data);
-			mgmt_device_found(hdev->id, &info->bdaddr,
+			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
 						info->dev_class, info->rssi,
 						NULL);
 		}
@@ -2531,12 +2717,6 @@
 	if (!num_rsp)
 		return;
 
-	if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) {
-
-		if (test_bit(HCI_MGMT, &hdev->flags))
-			mgmt_discovering(hdev->id, 1);
-	}
-
 	hci_dev_lock(hdev);
 
 	for (; num_rsp; num_rsp--, info++) {
@@ -2549,8 +2729,8 @@
 		data.rssi		= info->rssi;
 		data.ssp_mode		= 0x01;
 		hci_inquiry_cache_update(hdev, &data);
-		mgmt_device_found(hdev->id, &info->bdaddr, info->dev_class,
-						info->rssi, info->data);
+		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
+				info->dev_class, info->rssi, info->data);
 	}
 
 	hci_dev_unlock(hdev);
@@ -2614,7 +2794,7 @@
 		struct hci_cp_io_capability_neg_reply cp;
 
 		bacpy(&cp.bdaddr, &ev->bdaddr);
-		cp.reason = 0x18; /* Pairing not allowed */
+		cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
 
 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
 							sizeof(cp), &cp);
@@ -2706,13 +2886,28 @@
 	}
 
 confirm:
-	mgmt_user_confirm_request(hdev->id, &ev->bdaddr, ev->passkey,
+	mgmt_user_confirm_request(hdev, &ev->bdaddr, ev->passkey,
 								confirm_hint);
 
 unlock:
 	hci_dev_unlock(hdev);
 }
 
+static inline void hci_user_passkey_request_evt(struct hci_dev *hdev,
+							struct sk_buff *skb)
+{
+	struct hci_ev_user_passkey_req *ev = (void *) skb->data;
+
+	BT_DBG("%s", hdev->name);
+
+	hci_dev_lock(hdev);
+
+	if (test_bit(HCI_MGMT, &hdev->flags))
+		mgmt_user_passkey_request(hdev, &ev->bdaddr);
+
+	hci_dev_unlock(hdev);
+}
+
 static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
 {
 	struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
@@ -2732,7 +2927,7 @@
 	 * event gets always produced as initiator and is also mapped to
 	 * the mgmt_auth_failed event */
 	if (!test_bit(HCI_CONN_AUTH_PEND, &conn->pend) && ev->status != 0)
-		mgmt_auth_failed(hdev->id, &conn->dst, ev->status);
+		mgmt_auth_failed(hdev, &conn->dst, ev->status);
 
 	hci_conn_put(conn);
 
@@ -2813,14 +3008,15 @@
 	}
 
 	if (ev->status) {
-		mgmt_connect_failed(hdev->id, &ev->bdaddr, ev->status);
+		mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
+						conn->dst_type, ev->status);
 		hci_proto_connect_cfm(conn, ev->status);
 		conn->state = BT_CLOSED;
 		hci_conn_del(conn);
 		goto unlock;
 	}
 
-	mgmt_connected(hdev->id, &ev->bdaddr, conn->type);
+	mgmt_connected(hdev, &ev->bdaddr, conn->type, conn->dst_type);
 
 	conn->sec_level = BT_SECURITY_LOW;
 	conn->handle = __le16_to_cpu(ev->handle);
@@ -3051,6 +3247,10 @@
 		hci_user_confirm_request_evt(hdev, skb);
 		break;
 
+	case HCI_EV_USER_PASSKEY_REQUEST:
+		hci_user_passkey_request_evt(hdev, skb);
+		break;
+
 	case HCI_EV_SIMPLE_PAIR_COMPLETE:
 		hci_simple_pair_complete_evt(hdev, skb);
 		break;
@@ -3104,5 +3304,5 @@
 	kfree_skb(skb);
 }
 
-module_param(enable_le, bool, 0444);
+module_param(enable_le, bool, 0644);
 MODULE_PARM_DESC(enable_le, "Enable LE support");
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index f6afe3d..6d94616 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -49,7 +49,7 @@
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
 
-static int enable_mgmt;
+static bool enable_mgmt;
 
 /* ----- HCI socket interface ----- */
 
@@ -188,11 +188,11 @@
 	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
 		return -EFAULT;
 
-	hci_dev_lock_bh(hdev);
+	hci_dev_lock(hdev);
 
 	err = hci_blacklist_add(hdev, &bdaddr);
 
-	hci_dev_unlock_bh(hdev);
+	hci_dev_unlock(hdev);
 
 	return err;
 }
@@ -205,11 +205,11 @@
 	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
 		return -EFAULT;
 
-	hci_dev_lock_bh(hdev);
+	hci_dev_lock(hdev);
 
 	err = hci_blacklist_del(hdev, &bdaddr);
 
-	hci_dev_unlock_bh(hdev);
+	hci_dev_unlock(hdev);
 
 	return err;
 }
@@ -343,8 +343,11 @@
 	if (haddr.hci_channel > HCI_CHANNEL_CONTROL)
 		return -EINVAL;
 
-	if (haddr.hci_channel == HCI_CHANNEL_CONTROL && !enable_mgmt)
-		return -EINVAL;
+	if (haddr.hci_channel == HCI_CHANNEL_CONTROL) {
+		if (!enable_mgmt)
+			return -EINVAL;
+		set_bit(HCI_PI_MGMT_INIT, &hci_pi(sk)->flags);
+	}
 
 	lock_sock(sk);
 
@@ -535,10 +538,10 @@
 
 		if (test_bit(HCI_RAW, &hdev->flags) || (ogf == 0x3f)) {
 			skb_queue_tail(&hdev->raw_q, skb);
-			tasklet_schedule(&hdev->tx_task);
+			queue_work(hdev->workqueue, &hdev->tx_work);
 		} else {
 			skb_queue_tail(&hdev->cmd_q, skb);
-			tasklet_schedule(&hdev->cmd_task);
+			queue_work(hdev->workqueue, &hdev->cmd_work);
 		}
 	} else {
 		if (!capable(CAP_NET_RAW)) {
@@ -547,7 +550,7 @@
 		}
 
 		skb_queue_tail(&hdev->raw_q, skb);
-		tasklet_schedule(&hdev->tx_task);
+		queue_work(hdev->workqueue, &hdev->tx_work);
 	}
 
 	err = len;
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 661b461..5210956 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -89,11 +89,35 @@
 	.release = bt_link_release,
 };
 
-static void add_conn(struct work_struct *work)
+/*
+ * The rfcomm tty device will possibly retain even when conn
+ * is down, and sysfs doesn't support move zombie device,
+ * so we should move the device before conn device is destroyed.
+ */
+static int __match_tty(struct device *dev, void *data)
 {
-	struct hci_conn *conn = container_of(work, struct hci_conn, work_add);
+	return !strncmp(dev_name(dev), "rfcomm", 6);
+}
+
+void hci_conn_init_sysfs(struct hci_conn *conn)
+{
 	struct hci_dev *hdev = conn->hdev;
 
+	BT_DBG("conn %p", conn);
+
+	conn->dev.type = &bt_link;
+	conn->dev.class = bt_class;
+	conn->dev.parent = &hdev->dev;
+
+	device_initialize(&conn->dev);
+}
+
+void hci_conn_add_sysfs(struct hci_conn *conn)
+{
+	struct hci_dev *hdev = conn->hdev;
+
+	BT_DBG("conn %p", conn);
+
 	dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle);
 
 	dev_set_drvdata(&conn->dev, conn);
@@ -106,19 +130,8 @@
 	hci_dev_hold(hdev);
 }
 
-/*
- * The rfcomm tty device will possibly retain even when conn
- * is down, and sysfs doesn't support move zombie device,
- * so we should move the device before conn device is destroyed.
- */
-static int __match_tty(struct device *dev, void *data)
+void hci_conn_del_sysfs(struct hci_conn *conn)
 {
-	return !strncmp(dev_name(dev), "rfcomm", 6);
-}
-
-static void del_conn(struct work_struct *work)
-{
-	struct hci_conn *conn = container_of(work, struct hci_conn, work_del);
 	struct hci_dev *hdev = conn->hdev;
 
 	if (!device_is_registered(&conn->dev))
@@ -140,36 +153,6 @@
 	hci_dev_put(hdev);
 }
 
-void hci_conn_init_sysfs(struct hci_conn *conn)
-{
-	struct hci_dev *hdev = conn->hdev;
-
-	BT_DBG("conn %p", conn);
-
-	conn->dev.type = &bt_link;
-	conn->dev.class = bt_class;
-	conn->dev.parent = &hdev->dev;
-
-	device_initialize(&conn->dev);
-
-	INIT_WORK(&conn->work_add, add_conn);
-	INIT_WORK(&conn->work_del, del_conn);
-}
-
-void hci_conn_add_sysfs(struct hci_conn *conn)
-{
-	BT_DBG("conn %p", conn);
-
-	queue_work(conn->hdev->workqueue, &conn->work_add);
-}
-
-void hci_conn_del_sysfs(struct hci_conn *conn)
-{
-	BT_DBG("conn %p", conn);
-
-	queue_work(conn->hdev->workqueue, &conn->work_del);
-}
-
 static inline char *host_bustostr(int bus)
 {
 	switch (bus) {
@@ -403,7 +386,7 @@
 	struct inquiry_cache *cache = &hdev->inq_cache;
 	struct inquiry_entry *e;
 
-	hci_dev_lock_bh(hdev);
+	hci_dev_lock(hdev);
 
 	for (e = cache->list; e; e = e->next) {
 		struct inquiry_data *data = &e->data;
@@ -416,7 +399,7 @@
 			   data->rssi, data->ssp_mode, e->timestamp);
 	}
 
-	hci_dev_unlock_bh(hdev);
+	hci_dev_unlock(hdev);
 
 	return 0;
 }
@@ -436,19 +419,14 @@
 static int blacklist_show(struct seq_file *f, void *p)
 {
 	struct hci_dev *hdev = f->private;
-	struct list_head *l;
+	struct bdaddr_list *b;
 
-	hci_dev_lock_bh(hdev);
+	hci_dev_lock(hdev);
 
-	list_for_each(l, &hdev->blacklist) {
-		struct bdaddr_list *b;
-
-		b = list_entry(l, struct bdaddr_list, list);
-
+	list_for_each_entry(b, &hdev->blacklist, list)
 		seq_printf(f, "%s\n", batostr(&b->bdaddr));
-	}
 
-	hci_dev_unlock_bh(hdev);
+	hci_dev_unlock(hdev);
 
 	return 0;
 }
@@ -485,19 +463,14 @@
 static int uuids_show(struct seq_file *f, void *p)
 {
 	struct hci_dev *hdev = f->private;
-	struct list_head *l;
+	struct bt_uuid *uuid;
 
-	hci_dev_lock_bh(hdev);
+	hci_dev_lock(hdev);
 
-	list_for_each(l, &hdev->uuids) {
-		struct bt_uuid *uuid;
-
-		uuid = list_entry(l, struct bt_uuid, list);
-
+	list_for_each_entry(uuid, &hdev->uuids, list)
 		print_bt_uuid(f, uuid->uuid);
-	}
 
-	hci_dev_unlock_bh(hdev);
+	hci_dev_unlock(hdev);
 
 	return 0;
 }
@@ -518,11 +491,11 @@
 {
 	struct hci_dev *hdev = data;
 
-	hci_dev_lock_bh(hdev);
+	hci_dev_lock(hdev);
 
 	hdev->auto_accept_delay = val;
 
-	hci_dev_unlock_bh(hdev);
+	hci_dev_unlock(hdev);
 
 	return 0;
 }
@@ -531,11 +504,11 @@
 {
 	struct hci_dev *hdev = data;
 
-	hci_dev_lock_bh(hdev);
+	hci_dev_lock(hdev);
 
 	*val = hdev->auto_accept_delay;
 
-	hci_dev_unlock_bh(hdev);
+	hci_dev_unlock(hdev);
 
 	return 0;
 }
@@ -543,22 +516,28 @@
 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
 					auto_accept_delay_set, "%llu\n");
 
-int hci_register_sysfs(struct hci_dev *hdev)
+void hci_init_sysfs(struct hci_dev *hdev)
+{
+	struct device *dev = &hdev->dev;
+
+	dev->type = &bt_host;
+	dev->class = bt_class;
+
+	dev_set_drvdata(dev, hdev);
+	device_initialize(dev);
+}
+
+int hci_add_sysfs(struct hci_dev *hdev)
 {
 	struct device *dev = &hdev->dev;
 	int err;
 
 	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
 
-	dev->type = &bt_host;
-	dev->class = bt_class;
 	dev->parent = hdev->parent;
-
 	dev_set_name(dev, "%s", hdev->name);
 
-	dev_set_drvdata(dev, hdev);
-
-	err = device_register(dev);
+	err = device_add(dev);
 	if (err < 0)
 		return err;
 
@@ -582,7 +561,7 @@
 	return 0;
 }
 
-void hci_unregister_sysfs(struct hci_dev *hdev)
+void hci_del_sysfs(struct hci_dev *hdev)
 {
 	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
 
diff --git a/net/bluetooth/hidp/Kconfig b/net/bluetooth/hidp/Kconfig
index 86a9154..4deaca7 100644
--- a/net/bluetooth/hidp/Kconfig
+++ b/net/bluetooth/hidp/Kconfig
@@ -1,6 +1,6 @@
 config BT_HIDP
 	tristate "HIDP protocol support"
-	depends on BT && BT_L2CAP && INPUT && HID_SUPPORT
+	depends on BT && INPUT && HID_SUPPORT
 	select HID
 	help
 	  HIDP (Human Interface Device Protocol) is a transport layer
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 075a3e9..d478be1 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -81,24 +81,20 @@
 static struct hidp_session *__hidp_get_session(bdaddr_t *bdaddr)
 {
 	struct hidp_session *session;
-	struct list_head *p;
 
 	BT_DBG("");
 
-	list_for_each(p, &hidp_session_list) {
-		session = list_entry(p, struct hidp_session, list);
+	list_for_each_entry(session, &hidp_session_list, list) {
 		if (!bacmp(bdaddr, &session->bdaddr))
 			return session;
 	}
+
 	return NULL;
 }
 
 static void __hidp_link_session(struct hidp_session *session)
 {
-	__module_get(THIS_MODULE);
 	list_add(&session->list, &hidp_session_list);
-
-	hci_conn_hold_device(session->conn);
 }
 
 static void __hidp_unlink_session(struct hidp_session *session)
@@ -106,7 +102,6 @@
 	hci_conn_put_device(session->conn);
 
 	list_del(&session->list);
-	module_put(THIS_MODULE);
 }
 
 static void __hidp_copy_session(struct hidp_session *session, struct hidp_conninfo *ci)
@@ -255,6 +250,9 @@
 
 	BT_DBG("session %p data %p size %d", session, data, size);
 
+	if (atomic_read(&session->terminate))
+		return -EIO;
+
 	skb = alloc_skb(size + 1, GFP_ATOMIC);
 	if (!skb) {
 		BT_ERR("Can't allocate memory for new frame");
@@ -329,6 +327,7 @@
 	struct sk_buff *skb;
 	size_t len;
 	int numbered_reports = hid->report_enum[report_type].numbered;
+	int ret;
 
 	switch (report_type) {
 	case HID_FEATURE_REPORT:
@@ -352,8 +351,9 @@
 	session->waiting_report_number = numbered_reports ? report_number : -1;
 	set_bit(HIDP_WAITING_FOR_RETURN, &session->flags);
 	data[0] = report_number;
-	if (hidp_send_ctrl_message(hid->driver_data, report_type, data, 1))
-		goto err_eio;
+	ret = hidp_send_ctrl_message(hid->driver_data, report_type, data, 1);
+	if (ret)
+		goto err;
 
 	/* Wait for the return of the report. The returned report
 	   gets put in session->report_return.  */
@@ -365,11 +365,13 @@
 			5*HZ);
 		if (res == 0) {
 			/* timeout */
-			goto err_eio;
+			ret = -EIO;
+			goto err;
 		}
 		if (res < 0) {
 			/* signal */
-			goto err_restartsys;
+			ret = -ERESTARTSYS;
+			goto err;
 		}
 	}
 
@@ -390,14 +392,10 @@
 
 	return len;
 
-err_restartsys:
+err:
 	clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags);
 	mutex_unlock(&session->report_mutex);
-	return -ERESTARTSYS;
-err_eio:
-	clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags);
-	mutex_unlock(&session->report_mutex);
-	return -EIO;
+	return ret;
 }
 
 static int hidp_output_raw_report(struct hid_device *hid, unsigned char *data, size_t count,
@@ -422,11 +420,10 @@
 
 	/* Set up our wait, and send the report request to the device. */
 	set_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags);
-	if (hidp_send_ctrl_message(hid->driver_data, report_type,
-			data, count)) {
-		ret = -ENOMEM;
+	ret = hidp_send_ctrl_message(hid->driver_data, report_type, data,
+									count);
+	if (ret)
 		goto err;
-	}
 
 	/* Wait for the ACK from the device. */
 	while (test_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags)) {
@@ -496,10 +493,9 @@
 	case HIDP_HSHK_ERR_INVALID_REPORT_ID:
 	case HIDP_HSHK_ERR_UNSUPPORTED_REQUEST:
 	case HIDP_HSHK_ERR_INVALID_PARAMETER:
-		if (test_bit(HIDP_WAITING_FOR_RETURN, &session->flags)) {
-			clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags);
+		if (test_and_clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags))
 			wake_up_interruptible(&session->report_queue);
-		}
+
 		/* FIXME: Call into SET_ GET_ handlers here */
 		break;
 
@@ -520,10 +516,8 @@
 	}
 
 	/* Wake up the waiting thread. */
-	if (test_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags)) {
-		clear_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags);
+	if (test_and_clear_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags))
 		wake_up_interruptible(&session->report_queue);
-	}
 }
 
 static void hidp_process_hid_control(struct hidp_session *session,
@@ -663,7 +657,24 @@
 	return kernel_sendmsg(sock, &msg, &iv, 1, len);
 }
 
-static void hidp_process_transmit(struct hidp_session *session)
+static void hidp_process_intr_transmit(struct hidp_session *session)
+{
+	struct sk_buff *skb;
+
+	BT_DBG("session %p", session);
+
+	while ((skb = skb_dequeue(&session->intr_transmit))) {
+		if (hidp_send_frame(session->intr_sock, skb->data, skb->len) < 0) {
+			skb_queue_head(&session->intr_transmit, skb);
+			break;
+		}
+
+		hidp_set_timer(session);
+		kfree_skb(skb);
+	}
+}
+
+static void hidp_process_ctrl_transmit(struct hidp_session *session)
 {
 	struct sk_buff *skb;
 
@@ -678,16 +689,6 @@
 		hidp_set_timer(session);
 		kfree_skb(skb);
 	}
-
-	while ((skb = skb_dequeue(&session->intr_transmit))) {
-		if (hidp_send_frame(session->intr_sock, skb->data, skb->len) < 0) {
-			skb_queue_head(&session->intr_transmit, skb);
-			break;
-		}
-
-		hidp_set_timer(session);
-		kfree_skb(skb);
-	}
 }
 
 static int hidp_session(void *arg)
@@ -700,6 +701,7 @@
 
 	BT_DBG("session %p", session);
 
+	__module_get(THIS_MODULE);
 	set_user_nice(current, -15);
 
 	init_waitqueue_entry(&ctrl_wait, current);
@@ -714,14 +716,6 @@
 				intr_sk->sk_state != BT_CONNECTED)
 			break;
 
-		while ((skb = skb_dequeue(&ctrl_sk->sk_receive_queue))) {
-			skb_orphan(skb);
-			if (!skb_linearize(skb))
-				hidp_recv_ctrl_frame(session, skb);
-			else
-				kfree_skb(skb);
-		}
-
 		while ((skb = skb_dequeue(&intr_sk->sk_receive_queue))) {
 			skb_orphan(skb);
 			if (!skb_linearize(skb))
@@ -730,7 +724,17 @@
 				kfree_skb(skb);
 		}
 
-		hidp_process_transmit(session);
+		hidp_process_intr_transmit(session);
+
+		while ((skb = skb_dequeue(&ctrl_sk->sk_receive_queue))) {
+			skb_orphan(skb);
+			if (!skb_linearize(skb))
+				hidp_recv_ctrl_frame(session, skb);
+			else
+				kfree_skb(skb);
+		}
+
+		hidp_process_ctrl_transmit(session);
 
 		schedule();
 		set_current_state(TASK_INTERRUPTIBLE);
@@ -739,6 +743,10 @@
 	remove_wait_queue(sk_sleep(intr_sk), &intr_wait);
 	remove_wait_queue(sk_sleep(ctrl_sk), &ctrl_wait);
 
+	clear_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags);
+	clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags);
+	wake_up_interruptible(&session->report_queue);
+
 	down_write(&hidp_session_sem);
 
 	hidp_del_timer(session);
@@ -772,34 +780,37 @@
 
 	kfree(session->rd_data);
 	kfree(session);
+	module_put_and_exit(0);
 	return 0;
 }
 
-static struct device *hidp_get_device(struct hidp_session *session)
+static struct hci_conn *hidp_get_connection(struct hidp_session *session)
 {
 	bdaddr_t *src = &bt_sk(session->ctrl_sock->sk)->src;
 	bdaddr_t *dst = &bt_sk(session->ctrl_sock->sk)->dst;
-	struct device *device = NULL;
+	struct hci_conn *conn;
 	struct hci_dev *hdev;
 
 	hdev = hci_get_route(dst, src);
 	if (!hdev)
 		return NULL;
 
-	session->conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
-	if (session->conn)
-		device = &session->conn->dev;
+	hci_dev_lock(hdev);
+	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
+	if (conn)
+		hci_conn_hold_device(conn);
+	hci_dev_unlock(hdev);
 
 	hci_dev_put(hdev);
 
-	return device;
+	return conn;
 }
 
 static int hidp_setup_input(struct hidp_session *session,
 				struct hidp_connadd_req *req)
 {
 	struct input_dev *input;
-	int err, i;
+	int i;
 
 	input = input_allocate_device();
 	if (!input)
@@ -842,17 +853,10 @@
 		input->relbit[0] |= BIT_MASK(REL_WHEEL);
 	}
 
-	input->dev.parent = hidp_get_device(session);
+	input->dev.parent = &session->conn->dev;
 
 	input->event = hidp_input_event;
 
-	err = input_register_device(input);
-	if (err < 0) {
-		input_free_device(input);
-		session->input = NULL;
-		return err;
-	}
-
 	return 0;
 }
 
@@ -949,7 +953,7 @@
 	strncpy(hid->phys, batostr(&bt_sk(session->ctrl_sock->sk)->src), 64);
 	strncpy(hid->uniq, batostr(&bt_sk(session->ctrl_sock->sk)->dst), 64);
 
-	hid->dev.parent = hidp_get_device(session);
+	hid->dev.parent = &session->conn->dev;
 	hid->ll_driver = &hidp_hid_driver;
 
 	hid->hid_get_raw_report = hidp_get_raw_report;
@@ -976,18 +980,20 @@
 			bacmp(&bt_sk(ctrl_sock->sk)->dst, &bt_sk(intr_sock->sk)->dst))
 		return -ENOTUNIQ;
 
-	session = kzalloc(sizeof(struct hidp_session), GFP_KERNEL);
-	if (!session)
-		return -ENOMEM;
-
 	BT_DBG("rd_data %p rd_size %d", req->rd_data, req->rd_size);
 
 	down_write(&hidp_session_sem);
 
 	s = __hidp_get_session(&bt_sk(ctrl_sock->sk)->dst);
 	if (s && s->state == BT_CONNECTED) {
-		err = -EEXIST;
-		goto failed;
+		up_write(&hidp_session_sem);
+		return -EEXIST;
+	}
+
+	session = kzalloc(sizeof(struct hidp_session), GFP_KERNEL);
+	if (!session) {
+		up_write(&hidp_session_sem);
+		return -ENOMEM;
 	}
 
 	bacpy(&session->bdaddr, &bt_sk(ctrl_sock->sk)->dst);
@@ -1003,6 +1009,12 @@
 	session->intr_sock = intr_sock;
 	session->state     = BT_CONNECTED;
 
+	session->conn = hidp_get_connection(session);
+	if (!session->conn) {
+		err = -ENOTCONN;
+		goto failed;
+	}
+
 	setup_timer(&session->timer, hidp_idle_timeout, (unsigned long)session);
 
 	skb_queue_head_init(&session->ctrl_transmit);
@@ -1015,9 +1027,11 @@
 	session->flags   = req->flags & (1 << HIDP_BLUETOOTH_VENDOR_ID);
 	session->idle_to = req->idle_to;
 
+	__hidp_link_session(session);
+
 	if (req->rd_size > 0) {
 		err = hidp_setup_hid(session, req);
-		if (err && err != -ENODEV)
+		if (err)
 			goto purge;
 	}
 
@@ -1027,8 +1041,6 @@
 			goto purge;
 	}
 
-	__hidp_link_session(session);
-
 	hidp_set_timer(session);
 
 	if (session->hid) {
@@ -1054,7 +1066,11 @@
 			!session->waiting_for_startup);
 	}
 
-	err = hid_add_device(session->hid);
+	if (session->hid)
+		err = hid_add_device(session->hid);
+	else
+		err = input_register_device(session->input);
+
 	if (err < 0) {
 		atomic_inc(&session->terminate);
 		wake_up_process(session->task);
@@ -1077,8 +1093,6 @@
 unlink:
 	hidp_del_timer(session);
 
-	__hidp_unlink_session(session);
-
 	if (session->input) {
 		input_unregister_device(session->input);
 		session->input = NULL;
@@ -1093,6 +1107,8 @@
 	session->rd_data = NULL;
 
 purge:
+	__hidp_unlink_session(session);
+
 	skb_queue_purge(&session->ctrl_transmit);
 	skb_queue_purge(&session->intr_transmit);
 
@@ -1134,19 +1150,16 @@
 
 int hidp_get_connlist(struct hidp_connlist_req *req)
 {
-	struct list_head *p;
+	struct hidp_session *session;
 	int err = 0, n = 0;
 
 	BT_DBG("");
 
 	down_read(&hidp_session_sem);
 
-	list_for_each(p, &hidp_session_list) {
-		struct hidp_session *session;
+	list_for_each_entry(session, &hidp_session_list, list) {
 		struct hidp_conninfo ci;
 
-		session = list_entry(p, struct hidp_session, list);
-
 		__hidp_copy_session(session, &ci);
 
 		if (copy_to_user(req->ci, &ci, sizeof(ci))) {
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 5ea94a1..aa78d8c 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -3,6 +3,7 @@
    Copyright (C) 2000-2001 Qualcomm Incorporated
    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
    Copyright (C) 2010 Google Inc.
+   Copyright (C) 2011 ProFUSION Embedded Systems
 
    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
 
@@ -56,10 +57,10 @@
 #include <net/bluetooth/l2cap.h>
 #include <net/bluetooth/smp.h>
 
-int disable_ertm;
+bool disable_ertm;
 
 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
-static u8 l2cap_fixed_chan[8] = { 0x02, };
+static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
 
 static LIST_HEAD(chan_list);
 static DEFINE_RWLOCK(chan_list_lock);
@@ -76,38 +77,38 @@
 
 /* ---- L2CAP channels ---- */
 
-static inline void chan_hold(struct l2cap_chan *c)
-{
-	atomic_inc(&c->refcnt);
-}
-
-static inline void chan_put(struct l2cap_chan *c)
-{
-	if (atomic_dec_and_test(&c->refcnt))
-		kfree(c);
-}
-
 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
 {
-	struct l2cap_chan *c;
+	struct l2cap_chan *c, *r = NULL;
 
-	list_for_each_entry(c, &conn->chan_l, list) {
-		if (c->dcid == cid)
-			return c;
+	rcu_read_lock();
+
+	list_for_each_entry_rcu(c, &conn->chan_l, list) {
+		if (c->dcid == cid) {
+			r = c;
+			break;
+		}
 	}
-	return NULL;
 
+	rcu_read_unlock();
+	return r;
 }
 
 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
 {
-	struct l2cap_chan *c;
+	struct l2cap_chan *c, *r = NULL;
 
-	list_for_each_entry(c, &conn->chan_l, list) {
-		if (c->scid == cid)
-			return c;
+	rcu_read_lock();
+
+	list_for_each_entry_rcu(c, &conn->chan_l, list) {
+		if (c->scid == cid) {
+			r = c;
+			break;
+		}
 	}
-	return NULL;
+
+	rcu_read_unlock();
+	return r;
 }
 
 /* Find channel with given SCID.
@@ -116,34 +117,36 @@
 {
 	struct l2cap_chan *c;
 
-	read_lock(&conn->chan_lock);
 	c = __l2cap_get_chan_by_scid(conn, cid);
 	if (c)
-		bh_lock_sock(c->sk);
-	read_unlock(&conn->chan_lock);
+		lock_sock(c->sk);
 	return c;
 }
 
 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
 {
-	struct l2cap_chan *c;
+	struct l2cap_chan *c, *r = NULL;
 
-	list_for_each_entry(c, &conn->chan_l, list) {
-		if (c->ident == ident)
-			return c;
+	rcu_read_lock();
+
+	list_for_each_entry_rcu(c, &conn->chan_l, list) {
+		if (c->ident == ident) {
+			r = c;
+			break;
+		}
 	}
-	return NULL;
+
+	rcu_read_unlock();
+	return r;
 }
 
 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
 {
 	struct l2cap_chan *c;
 
-	read_lock(&conn->chan_lock);
 	c = __l2cap_get_chan_by_ident(conn, ident);
 	if (c)
-		bh_lock_sock(c->sk);
-	read_unlock(&conn->chan_lock);
+		lock_sock(c->sk);
 	return c;
 }
 
@@ -153,12 +156,9 @@
 
 	list_for_each_entry(c, &chan_list, global_l) {
 		if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
-			goto found;
+			return c;
 	}
-
-	c = NULL;
-found:
-	return c;
+	return NULL;
 }
 
 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
@@ -217,45 +217,51 @@
 	return 0;
 }
 
-static void l2cap_set_timer(struct l2cap_chan *chan, struct timer_list *timer, long timeout)
+static char *state_to_string(int state)
 {
-	BT_DBG("chan %p state %d timeout %ld", chan->sk, chan->state, timeout);
+	switch(state) {
+	case BT_CONNECTED:
+		return "BT_CONNECTED";
+	case BT_OPEN:
+		return "BT_OPEN";
+	case BT_BOUND:
+		return "BT_BOUND";
+	case BT_LISTEN:
+		return "BT_LISTEN";
+	case BT_CONNECT:
+		return "BT_CONNECT";
+	case BT_CONNECT2:
+		return "BT_CONNECT2";
+	case BT_CONFIG:
+		return "BT_CONFIG";
+	case BT_DISCONN:
+		return "BT_DISCONN";
+	case BT_CLOSED:
+		return "BT_CLOSED";
+	}
 
-	if (!mod_timer(timer, jiffies + msecs_to_jiffies(timeout)))
-		chan_hold(chan);
-}
-
-static void l2cap_clear_timer(struct l2cap_chan *chan, struct timer_list *timer)
-{
-	BT_DBG("chan %p state %d", chan, chan->state);
-
-	if (timer_pending(timer) && del_timer(timer))
-		chan_put(chan);
+	return "invalid state";
 }
 
 static void l2cap_state_change(struct l2cap_chan *chan, int state)
 {
+	BT_DBG("%p %s -> %s", chan, state_to_string(chan->state),
+						state_to_string(state));
+
 	chan->state = state;
 	chan->ops->state_change(chan->data, state);
 }
 
-static void l2cap_chan_timeout(unsigned long arg)
+static void l2cap_chan_timeout(struct work_struct *work)
 {
-	struct l2cap_chan *chan = (struct l2cap_chan *) arg;
+	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
+							chan_timer.work);
 	struct sock *sk = chan->sk;
 	int reason;
 
 	BT_DBG("chan %p state %d", chan, chan->state);
 
-	bh_lock_sock(sk);
-
-	if (sock_owned_by_user(sk)) {
-		/* sk is owned by user. Try again later */
-		__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
-		bh_unlock_sock(sk);
-		chan_put(chan);
-		return;
-	}
+	lock_sock(sk);
 
 	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
 		reason = ECONNREFUSED;
@@ -267,10 +273,10 @@
 
 	l2cap_chan_close(chan, reason);
 
-	bh_unlock_sock(sk);
+	release_sock(sk);
 
 	chan->ops->close(chan->data);
-	chan_put(chan);
+	l2cap_chan_put(chan);
 }
 
 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
@@ -287,12 +293,14 @@
 	list_add(&chan->global_l, &chan_list);
 	write_unlock_bh(&chan_list_lock);
 
-	setup_timer(&chan->chan_timer, l2cap_chan_timeout, (unsigned long) chan);
+	INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
 
 	chan->state = BT_OPEN;
 
 	atomic_set(&chan->refcnt, 1);
 
+	BT_DBG("sk %p chan %p", sk, chan);
+
 	return chan;
 }
 
@@ -302,15 +310,15 @@
 	list_del(&chan->global_l);
 	write_unlock_bh(&chan_list_lock);
 
-	chan_put(chan);
+	l2cap_chan_put(chan);
 }
 
-static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
+static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
 {
 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
 			chan->psm, chan->dcid);
 
-	conn->disc_reason = 0x13;
+	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
 
 	chan->conn = conn;
 
@@ -337,9 +345,16 @@
 		chan->omtu = L2CAP_DEFAULT_MTU;
 	}
 
-	chan_hold(chan);
+	chan->local_id		= L2CAP_BESTEFFORT_ID;
+	chan->local_stype	= L2CAP_SERV_BESTEFFORT;
+	chan->local_msdu	= L2CAP_DEFAULT_MAX_SDU_SIZE;
+	chan->local_sdu_itime	= L2CAP_DEFAULT_SDU_ITIME;
+	chan->local_acc_lat	= L2CAP_DEFAULT_ACC_LAT;
+	chan->local_flush_to	= L2CAP_DEFAULT_FLUSH_TO;
 
-	list_add(&chan->list, &conn->chan_l);
+	l2cap_chan_hold(chan);
+
+	list_add_rcu(&chan->list, &conn->chan_l);
 }
 
 /* Delete channel.
@@ -356,10 +371,10 @@
 
 	if (conn) {
 		/* Delete from channel list */
-		write_lock_bh(&conn->chan_lock);
-		list_del(&chan->list);
-		write_unlock_bh(&conn->chan_lock);
-		chan_put(chan);
+		list_del_rcu(&chan->list);
+		synchronize_rcu();
+
+		l2cap_chan_put(chan);
 
 		chan->conn = NULL;
 		hci_conn_put(conn->hcon);
@@ -508,7 +523,7 @@
 }
 
 /* Service level security */
-static inline int l2cap_check_security(struct l2cap_chan *chan)
+int l2cap_chan_check_security(struct l2cap_chan *chan)
 {
 	struct l2cap_conn *conn = chan->conn;
 	__u8 auth_type;
@@ -556,34 +571,58 @@
 		flags = ACL_START;
 
 	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
+	skb->priority = HCI_PRIO_MAX;
 
-	hci_send_acl(conn->hcon, skb, flags);
+	hci_send_acl(conn->hchan, skb, flags);
 }
 
-static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
+static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
+{
+	struct hci_conn *hcon = chan->conn->hcon;
+	u16 flags;
+
+	BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
+							skb->priority);
+
+	if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
+					lmp_no_flush_capable(hcon->hdev))
+		flags = ACL_START_NO_FLUSH;
+	else
+		flags = ACL_START;
+
+	bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
+	hci_send_acl(chan->conn->hchan, skb, flags);
+}
+
+static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
 {
 	struct sk_buff *skb;
 	struct l2cap_hdr *lh;
 	struct l2cap_conn *conn = chan->conn;
-	int count, hlen = L2CAP_HDR_SIZE + 2;
-	u8 flags;
+	int count, hlen;
 
 	if (chan->state != BT_CONNECTED)
 		return;
 
-	if (chan->fcs == L2CAP_FCS_CRC16)
-		hlen += 2;
+	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+		hlen = L2CAP_EXT_HDR_SIZE;
+	else
+		hlen = L2CAP_ENH_HDR_SIZE;
 
-	BT_DBG("chan %p, control 0x%2.2x", chan, control);
+	if (chan->fcs == L2CAP_FCS_CRC16)
+		hlen += L2CAP_FCS_SIZE;
+
+	BT_DBG("chan %p, control 0x%8.8x", chan, control);
 
 	count = min_t(unsigned int, conn->mtu, hlen);
-	control |= L2CAP_CTRL_FRAME_TYPE;
+
+	control |= __set_sframe(chan);
 
 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
-		control |= L2CAP_CTRL_FINAL;
+		control |= __set_ctrl_final(chan);
 
 	if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
-		control |= L2CAP_CTRL_POLL;
+		control |= __set_ctrl_poll(chan);
 
 	skb = bt_skb_alloc(count, GFP_ATOMIC);
 	if (!skb)
@@ -592,32 +631,27 @@
 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
 	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
 	lh->cid = cpu_to_le16(chan->dcid);
-	put_unaligned_le16(control, skb_put(skb, 2));
+
+	__put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
 
 	if (chan->fcs == L2CAP_FCS_CRC16) {
-		u16 fcs = crc16(0, (u8 *)lh, count - 2);
-		put_unaligned_le16(fcs, skb_put(skb, 2));
+		u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
+		put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
 	}
 
-	if (lmp_no_flush_capable(conn->hcon->hdev))
-		flags = ACL_START_NO_FLUSH;
-	else
-		flags = ACL_START;
-
-	bt_cb(skb)->force_active = chan->force_active;
-
-	hci_send_acl(chan->conn->hcon, skb, flags);
+	skb->priority = HCI_PRIO_MAX;
+	l2cap_do_send(chan, skb);
 }
 
-static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control)
+static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
 {
 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
-		control |= L2CAP_SUPER_RCV_NOT_READY;
+		control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
 		set_bit(CONN_RNR_SENT, &chan->conn_state);
 	} else
-		control |= L2CAP_SUPER_RCV_READY;
+		control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
 
-	control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
+	control |= __set_reqseq(chan, chan->buffer_seq);
 
 	l2cap_send_sframe(chan, control);
 }
@@ -635,7 +669,7 @@
 		if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
 			return;
 
-		if (l2cap_check_security(chan) &&
+		if (l2cap_chan_check_security(chan) &&
 				__l2cap_no_conn_pending(chan)) {
 			struct l2cap_conn_req req;
 			req.scid = cpu_to_le16(chan->scid);
@@ -654,7 +688,7 @@
 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
 		conn->info_ident = l2cap_get_ident(conn);
 
-		mod_timer(&conn->info_timer, jiffies +
+		schedule_delayed_work(&conn->info_timer,
 					msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
 
 		l2cap_send_cmd(conn, conn->info_ident,
@@ -706,13 +740,13 @@
 /* ---- L2CAP connections ---- */
 static void l2cap_conn_start(struct l2cap_conn *conn)
 {
-	struct l2cap_chan *chan, *tmp;
+	struct l2cap_chan *chan;
 
 	BT_DBG("conn %p", conn);
 
-	read_lock(&conn->chan_lock);
+	rcu_read_lock();
 
-	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
+	list_for_each_entry_rcu(chan, &conn->chan_l, list) {
 		struct sock *sk = chan->sk;
 
 		bh_lock_sock(sk);
@@ -725,7 +759,7 @@
 		if (chan->state == BT_CONNECT) {
 			struct l2cap_conn_req req;
 
-			if (!l2cap_check_security(chan) ||
+			if (!l2cap_chan_check_security(chan) ||
 					!__l2cap_no_conn_pending(chan)) {
 				bh_unlock_sock(sk);
 				continue;
@@ -736,9 +770,7 @@
 					&chan->conf_state)) {
 				/* l2cap_chan_close() calls list_del(chan)
 				 * so release the lock */
-				read_unlock(&conn->chan_lock);
 				l2cap_chan_close(chan, ECONNRESET);
-				read_lock(&conn->chan_lock);
 				bh_unlock_sock(sk);
 				continue;
 			}
@@ -758,7 +790,7 @@
 			rsp.scid = cpu_to_le16(chan->dcid);
 			rsp.dcid = cpu_to_le16(chan->scid);
 
-			if (l2cap_check_security(chan)) {
+			if (l2cap_chan_check_security(chan)) {
 				if (bt_sk(sk)->defer_setup) {
 					struct sock *parent = bt_sk(sk)->parent;
 					rsp.result = cpu_to_le16(L2CAP_CR_PEND);
@@ -794,7 +826,7 @@
 		bh_unlock_sock(sk);
 	}
 
-	read_unlock(&conn->chan_lock);
+	rcu_read_unlock();
 }
 
 /* Find socket with cid and source bdaddr.
@@ -845,7 +877,7 @@
 
 	parent = pchan->sk;
 
-	bh_lock_sock(parent);
+	lock_sock(parent);
 
 	/* Check for backlog size */
 	if (sk_acceptq_is_full(parent)) {
@@ -859,8 +891,6 @@
 
 	sk = chan->sk;
 
-	write_lock_bh(&conn->chan_lock);
-
 	hci_conn_hold(conn->hcon);
 
 	bacpy(&bt_sk(sk)->src, conn->src);
@@ -868,17 +898,15 @@
 
 	bt_accept_enqueue(parent, sk);
 
-	__l2cap_chan_add(conn, chan);
+	l2cap_chan_add(conn, chan);
 
 	__set_chan_timer(chan, sk->sk_sndtimeo);
 
 	l2cap_state_change(chan, BT_CONNECTED);
 	parent->sk_data_ready(parent, 0);
 
-	write_unlock_bh(&conn->chan_lock);
-
 clean:
-	bh_unlock_sock(parent);
+	release_sock(parent);
 }
 
 static void l2cap_chan_ready(struct sock *sk)
@@ -910,9 +938,9 @@
 	if (conn->hcon->out && conn->hcon->type == LE_LINK)
 		smp_conn_security(conn, conn->hcon->pending_sec_level);
 
-	read_lock(&conn->chan_lock);
+	rcu_read_lock();
 
-	list_for_each_entry(chan, &conn->chan_l, list) {
+	list_for_each_entry_rcu(chan, &conn->chan_l, list) {
 		struct sock *sk = chan->sk;
 
 		bh_lock_sock(sk);
@@ -932,7 +960,7 @@
 		bh_unlock_sock(sk);
 	}
 
-	read_unlock(&conn->chan_lock);
+	rcu_read_unlock();
 }
 
 /* Notify sockets that we cannot guaranty reliability anymore */
@@ -942,21 +970,22 @@
 
 	BT_DBG("conn %p", conn);
 
-	read_lock(&conn->chan_lock);
+	rcu_read_lock();
 
-	list_for_each_entry(chan, &conn->chan_l, list) {
+	list_for_each_entry_rcu(chan, &conn->chan_l, list) {
 		struct sock *sk = chan->sk;
 
-		if (chan->force_reliable)
+		if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
 			sk->sk_err = err;
 	}
 
-	read_unlock(&conn->chan_lock);
+	rcu_read_unlock();
 }
 
-static void l2cap_info_timeout(unsigned long arg)
+static void l2cap_info_timeout(struct work_struct *work)
 {
-	struct l2cap_conn *conn = (void *) arg;
+	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
+							info_timer.work);
 
 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
 	conn->info_ident = 0;
@@ -980,17 +1009,19 @@
 	/* Kill channels */
 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
 		sk = chan->sk;
-		bh_lock_sock(sk);
+		lock_sock(sk);
 		l2cap_chan_del(chan, err);
-		bh_unlock_sock(sk);
+		release_sock(sk);
 		chan->ops->close(chan->data);
 	}
 
+	hci_chan_del(conn->hchan);
+
 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
-		del_timer_sync(&conn->info_timer);
+		__cancel_delayed_work(&conn->info_timer);
 
 	if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) {
-		del_timer(&conn->security_timer);
+		__cancel_delayed_work(&conn->security_timer);
 		smp_chan_destroy(conn);
 	}
 
@@ -998,9 +1029,10 @@
 	kfree(conn);
 }
 
-static void security_timeout(unsigned long arg)
+static void security_timeout(struct work_struct *work)
 {
-	struct l2cap_conn *conn = (void *) arg;
+	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
+						security_timer.work);
 
 	l2cap_conn_del(conn->hcon, ETIMEDOUT);
 }
@@ -1008,18 +1040,26 @@
 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
 {
 	struct l2cap_conn *conn = hcon->l2cap_data;
+	struct hci_chan *hchan;
 
 	if (conn || status)
 		return conn;
 
-	conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
-	if (!conn)
+	hchan = hci_chan_create(hcon);
+	if (!hchan)
 		return NULL;
 
+	conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
+	if (!conn) {
+		hci_chan_del(hchan);
+		return NULL;
+	}
+
 	hcon->l2cap_data = conn;
 	conn->hcon = hcon;
+	conn->hchan = hchan;
 
-	BT_DBG("hcon %p conn %p", hcon, conn);
+	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
 
 	if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
 		conn->mtu = hcon->hdev->le_mtu;
@@ -1032,29 +1072,19 @@
 	conn->feat_mask = 0;
 
 	spin_lock_init(&conn->lock);
-	rwlock_init(&conn->chan_lock);
 
 	INIT_LIST_HEAD(&conn->chan_l);
 
 	if (hcon->type == LE_LINK)
-		setup_timer(&conn->security_timer, security_timeout,
-						(unsigned long) conn);
+		INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
 	else
-		setup_timer(&conn->info_timer, l2cap_info_timeout,
-						(unsigned long) conn);
+		INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
 
-	conn->disc_reason = 0x13;
+	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
 
 	return conn;
 }
 
-static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
-{
-	write_lock_bh(&conn->chan_lock);
-	__l2cap_chan_add(conn, chan);
-	write_unlock_bh(&conn->chan_lock);
-}
-
 /* ---- Socket interface ---- */
 
 /* Find socket with psm and source bdaddr.
@@ -1090,11 +1120,10 @@
 	return c1;
 }
 
-int l2cap_chan_connect(struct l2cap_chan *chan)
+inline int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *dst)
 {
 	struct sock *sk = chan->sk;
 	bdaddr_t *src = &bt_sk(sk)->src;
-	bdaddr_t *dst = &bt_sk(sk)->dst;
 	struct l2cap_conn *conn;
 	struct hci_conn *hcon;
 	struct hci_dev *hdev;
@@ -1108,7 +1137,62 @@
 	if (!hdev)
 		return -EHOSTUNREACH;
 
-	hci_dev_lock_bh(hdev);
+	hci_dev_lock(hdev);
+
+	lock_sock(sk);
+
+	/* PSM must be odd and lsb of upper byte must be 0 */
+	if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
+					chan->chan_type != L2CAP_CHAN_RAW) {
+		err = -EINVAL;
+		goto done;
+	}
+
+	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
+		err = -EINVAL;
+		goto done;
+	}
+
+	switch (chan->mode) {
+	case L2CAP_MODE_BASIC:
+		break;
+	case L2CAP_MODE_ERTM:
+	case L2CAP_MODE_STREAMING:
+		if (!disable_ertm)
+			break;
+		/* fall through */
+	default:
+		err = -ENOTSUPP;
+		goto done;
+	}
+
+	switch (sk->sk_state) {
+	case BT_CONNECT:
+	case BT_CONNECT2:
+	case BT_CONFIG:
+		/* Already connecting */
+		err = 0;
+		goto done;
+
+	case BT_CONNECTED:
+		/* Already connected */
+		err = -EISCONN;
+		goto done;
+
+	case BT_OPEN:
+	case BT_BOUND:
+		/* Can connect */
+		break;
+
+	default:
+		err = -EBADFD;
+		goto done;
+	}
+
+	/* Set destination address and psm */
+	bacpy(&bt_sk(sk)->dst, src);
+	chan->psm = psm;
+	chan->dcid = cid;
 
 	auth_type = l2cap_get_auth_type(chan);
 
@@ -1142,7 +1226,7 @@
 	if (hcon->state == BT_CONNECTED) {
 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
 			__clear_chan_timer(chan);
-			if (l2cap_check_security(chan))
+			if (l2cap_chan_check_security(chan))
 				l2cap_state_change(chan, BT_CONNECTED);
 		} else
 			l2cap_do_start(chan);
@@ -1151,7 +1235,7 @@
 	err = 0;
 
 done:
-	hci_dev_unlock_bh(hdev);
+	hci_dev_unlock(hdev);
 	hci_dev_put(hdev);
 	return err;
 }
@@ -1188,17 +1272,18 @@
 	return err;
 }
 
-static void l2cap_monitor_timeout(unsigned long arg)
+static void l2cap_monitor_timeout(struct work_struct *work)
 {
-	struct l2cap_chan *chan = (void *) arg;
+	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
+							monitor_timer.work);
 	struct sock *sk = chan->sk;
 
 	BT_DBG("chan %p", chan);
 
-	bh_lock_sock(sk);
+	lock_sock(sk);
 	if (chan->retry_count >= chan->remote_max_tx) {
 		l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
-		bh_unlock_sock(sk);
+		release_sock(sk);
 		return;
 	}
 
@@ -1206,24 +1291,25 @@
 	__set_monitor_timer(chan);
 
 	l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
-	bh_unlock_sock(sk);
+	release_sock(sk);
 }
 
-static void l2cap_retrans_timeout(unsigned long arg)
+static void l2cap_retrans_timeout(struct work_struct *work)
 {
-	struct l2cap_chan *chan = (void *) arg;
+	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
+							retrans_timer.work);
 	struct sock *sk = chan->sk;
 
 	BT_DBG("chan %p", chan);
 
-	bh_lock_sock(sk);
+	lock_sock(sk);
 	chan->retry_count = 1;
 	__set_monitor_timer(chan);
 
 	set_bit(CONN_WAIT_F, &chan->conn_state);
 
 	l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
-	bh_unlock_sock(sk);
+	release_sock(sk);
 }
 
 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
@@ -1245,60 +1331,46 @@
 		__clear_retrans_timer(chan);
 }
 
-static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
-{
-	struct hci_conn *hcon = chan->conn->hcon;
-	u16 flags;
-
-	BT_DBG("chan %p, skb %p len %d", chan, skb, skb->len);
-
-	if (!chan->flushable && lmp_no_flush_capable(hcon->hdev))
-		flags = ACL_START_NO_FLUSH;
-	else
-		flags = ACL_START;
-
-	bt_cb(skb)->force_active = chan->force_active;
-	hci_send_acl(hcon, skb, flags);
-}
-
 static void l2cap_streaming_send(struct l2cap_chan *chan)
 {
 	struct sk_buff *skb;
-	u16 control, fcs;
+	u32 control;
+	u16 fcs;
 
 	while ((skb = skb_dequeue(&chan->tx_q))) {
-		control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
-		control |= chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
-		put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
+		control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
+		control |= __set_txseq(chan, chan->next_tx_seq);
+		__put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
 
 		if (chan->fcs == L2CAP_FCS_CRC16) {
-			fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
-			put_unaligned_le16(fcs, skb->data + skb->len - 2);
+			fcs = crc16(0, (u8 *)skb->data,
+						skb->len - L2CAP_FCS_SIZE);
+			put_unaligned_le16(fcs,
+					skb->data + skb->len - L2CAP_FCS_SIZE);
 		}
 
 		l2cap_do_send(chan, skb);
 
-		chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
+		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
 	}
 }
 
-static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
+static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
 {
 	struct sk_buff *skb, *tx_skb;
-	u16 control, fcs;
+	u16 fcs;
+	u32 control;
 
 	skb = skb_peek(&chan->tx_q);
 	if (!skb)
 		return;
 
-	do {
-		if (bt_cb(skb)->tx_seq == tx_seq)
-			break;
-
+	while (bt_cb(skb)->tx_seq != tx_seq) {
 		if (skb_queue_is_last(&chan->tx_q, skb))
 			return;
 
-	} while ((skb = skb_queue_next(&chan->tx_q, skb)));
+		skb = skb_queue_next(&chan->tx_q, skb);
+	}
 
 	if (chan->remote_max_tx &&
 			bt_cb(skb)->retries == chan->remote_max_tx) {
@@ -1308,20 +1380,23 @@
 
 	tx_skb = skb_clone(skb, GFP_ATOMIC);
 	bt_cb(skb)->retries++;
-	control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
-	control &= L2CAP_CTRL_SAR;
+
+	control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
+	control &= __get_sar_mask(chan);
 
 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
-		control |= L2CAP_CTRL_FINAL;
+		control |= __set_ctrl_final(chan);
 
-	control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
-			| (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
+	control |= __set_reqseq(chan, chan->buffer_seq);
+	control |= __set_txseq(chan, tx_seq);
 
-	put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
+	__put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
 
 	if (chan->fcs == L2CAP_FCS_CRC16) {
-		fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
-		put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
+		fcs = crc16(0, (u8 *)tx_skb->data,
+						tx_skb->len - L2CAP_FCS_SIZE);
+		put_unaligned_le16(fcs,
+				tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
 	}
 
 	l2cap_do_send(chan, tx_skb);
@@ -1330,7 +1405,8 @@
 static int l2cap_ertm_send(struct l2cap_chan *chan)
 {
 	struct sk_buff *skb, *tx_skb;
-	u16 control, fcs;
+	u16 fcs;
+	u32 control;
 	int nsent = 0;
 
 	if (chan->state != BT_CONNECTED)
@@ -1348,20 +1424,22 @@
 
 		bt_cb(skb)->retries++;
 
-		control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
-		control &= L2CAP_CTRL_SAR;
+		control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
+		control &= __get_sar_mask(chan);
 
 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
-			control |= L2CAP_CTRL_FINAL;
+			control |= __set_ctrl_final(chan);
 
-		control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
-				| (chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
-		put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
+		control |= __set_reqseq(chan, chan->buffer_seq);
+		control |= __set_txseq(chan, chan->next_tx_seq);
 
+		__put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
 
 		if (chan->fcs == L2CAP_FCS_CRC16) {
-			fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
-			put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
+			fcs = crc16(0, (u8 *)skb->data,
+						tx_skb->len - L2CAP_FCS_SIZE);
+			put_unaligned_le16(fcs, skb->data +
+						tx_skb->len - L2CAP_FCS_SIZE);
 		}
 
 		l2cap_do_send(chan, tx_skb);
@@ -1369,7 +1447,8 @@
 		__set_retrans_timer(chan);
 
 		bt_cb(skb)->tx_seq = chan->next_tx_seq;
-		chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
+
+		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
 
 		if (bt_cb(skb)->retries == 1)
 			chan->unacked_frames++;
@@ -1401,12 +1480,12 @@
 
 static void l2cap_send_ack(struct l2cap_chan *chan)
 {
-	u16 control = 0;
+	u32 control = 0;
 
-	control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
+	control |= __set_reqseq(chan, chan->buffer_seq);
 
 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
-		control |= L2CAP_SUPER_RCV_NOT_READY;
+		control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
 		set_bit(CONN_RNR_SENT, &chan->conn_state);
 		l2cap_send_sframe(chan, control);
 		return;
@@ -1415,20 +1494,20 @@
 	if (l2cap_ertm_send(chan) > 0)
 		return;
 
-	control |= L2CAP_SUPER_RCV_READY;
+	control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
 	l2cap_send_sframe(chan, control);
 }
 
 static void l2cap_send_srejtail(struct l2cap_chan *chan)
 {
 	struct srej_list *tail;
-	u16 control;
+	u32 control;
 
-	control = L2CAP_SUPER_SELECT_REJECT;
-	control |= L2CAP_CTRL_FINAL;
+	control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
+	control |= __set_ctrl_final(chan);
 
 	tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
-	control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
+	control |= __set_reqseq(chan, tail->tx_seq);
 
 	l2cap_send_sframe(chan, control);
 }
@@ -1456,6 +1535,8 @@
 		if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
 			return -EFAULT;
 
+		(*frag)->priority = skb->priority;
+
 		sent += count;
 		len  -= count;
 
@@ -1465,15 +1546,17 @@
 	return sent;
 }
 
-static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
+static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
+						struct msghdr *msg, size_t len,
+						u32 priority)
 {
 	struct sock *sk = chan->sk;
 	struct l2cap_conn *conn = chan->conn;
 	struct sk_buff *skb;
-	int err, count, hlen = L2CAP_HDR_SIZE + 2;
+	int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
 	struct l2cap_hdr *lh;
 
-	BT_DBG("sk %p len %d", sk, (int)len);
+	BT_DBG("sk %p len %d priority %u", sk, (int)len, priority);
 
 	count = min_t(unsigned int, (conn->mtu - hlen), len);
 	skb = bt_skb_send_alloc(sk, count + hlen,
@@ -1481,6 +1564,8 @@
 	if (!skb)
 		return ERR_PTR(err);
 
+	skb->priority = priority;
+
 	/* Create L2CAP header */
 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
 	lh->cid = cpu_to_le16(chan->dcid);
@@ -1495,7 +1580,9 @@
 	return skb;
 }
 
-static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
+static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
+						struct msghdr *msg, size_t len,
+						u32 priority)
 {
 	struct sock *sk = chan->sk;
 	struct l2cap_conn *conn = chan->conn;
@@ -1511,6 +1598,8 @@
 	if (!skb)
 		return ERR_PTR(err);
 
+	skb->priority = priority;
+
 	/* Create L2CAP header */
 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
 	lh->cid = cpu_to_le16(chan->dcid);
@@ -1526,12 +1615,12 @@
 
 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
 						struct msghdr *msg, size_t len,
-						u16 control, u16 sdulen)
+						u32 control, u16 sdulen)
 {
 	struct sock *sk = chan->sk;
 	struct l2cap_conn *conn = chan->conn;
 	struct sk_buff *skb;
-	int err, count, hlen = L2CAP_HDR_SIZE + 2;
+	int err, count, hlen;
 	struct l2cap_hdr *lh;
 
 	BT_DBG("sk %p len %d", sk, (int)len);
@@ -1539,11 +1628,16 @@
 	if (!conn)
 		return ERR_PTR(-ENOTCONN);
 
+	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+		hlen = L2CAP_EXT_HDR_SIZE;
+	else
+		hlen = L2CAP_ENH_HDR_SIZE;
+
 	if (sdulen)
-		hlen += 2;
+		hlen += L2CAP_SDULEN_SIZE;
 
 	if (chan->fcs == L2CAP_FCS_CRC16)
-		hlen += 2;
+		hlen += L2CAP_FCS_SIZE;
 
 	count = min_t(unsigned int, (conn->mtu - hlen), len);
 	skb = bt_skb_send_alloc(sk, count + hlen,
@@ -1555,9 +1649,11 @@
 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
 	lh->cid = cpu_to_le16(chan->dcid);
 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
-	put_unaligned_le16(control, skb_put(skb, 2));
+
+	__put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
+
 	if (sdulen)
-		put_unaligned_le16(sdulen, skb_put(skb, 2));
+		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
 
 	err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
 	if (unlikely(err < 0)) {
@@ -1566,7 +1662,7 @@
 	}
 
 	if (chan->fcs == L2CAP_FCS_CRC16)
-		put_unaligned_le16(0, skb_put(skb, 2));
+		put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
 
 	bt_cb(skb)->retries = 0;
 	return skb;
@@ -1576,11 +1672,11 @@
 {
 	struct sk_buff *skb;
 	struct sk_buff_head sar_queue;
-	u16 control;
+	u32 control;
 	size_t size = 0;
 
 	skb_queue_head_init(&sar_queue);
-	control = L2CAP_SDU_START;
+	control = __set_ctrl_sar(chan, L2CAP_SAR_START);
 	skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
 	if (IS_ERR(skb))
 		return PTR_ERR(skb);
@@ -1593,10 +1689,10 @@
 		size_t buflen;
 
 		if (len > chan->remote_mps) {
-			control = L2CAP_SDU_CONTINUE;
+			control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE);
 			buflen = chan->remote_mps;
 		} else {
-			control = L2CAP_SDU_END;
+			control = __set_ctrl_sar(chan, L2CAP_SAR_END);
 			buflen = len;
 		}
 
@@ -1617,15 +1713,16 @@
 	return size;
 }
 
-int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
+int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
+								u32 priority)
 {
 	struct sk_buff *skb;
-	u16 control;
+	u32 control;
 	int err;
 
 	/* Connectionless channel */
 	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
-		skb = l2cap_create_connless_pdu(chan, msg, len);
+		skb = l2cap_create_connless_pdu(chan, msg, len, priority);
 		if (IS_ERR(skb))
 			return PTR_ERR(skb);
 
@@ -1640,7 +1737,7 @@
 			return -EMSGSIZE;
 
 		/* Create a basic PDU */
-		skb = l2cap_create_basic_pdu(chan, msg, len);
+		skb = l2cap_create_basic_pdu(chan, msg, len, priority);
 		if (IS_ERR(skb))
 			return PTR_ERR(skb);
 
@@ -1652,7 +1749,7 @@
 	case L2CAP_MODE_STREAMING:
 		/* Entire SDU fits into one PDU */
 		if (len <= chan->remote_mps) {
-			control = L2CAP_SDU_UNSEGMENTED;
+			control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED);
 			skb = l2cap_create_iframe_pdu(chan, msg, len, control,
 									0);
 			if (IS_ERR(skb))
@@ -1704,8 +1801,9 @@
 
 	BT_DBG("conn %p", conn);
 
-	read_lock(&conn->chan_lock);
-	list_for_each_entry(chan, &conn->chan_l, list) {
+	rcu_read_lock();
+
+	list_for_each_entry_rcu(chan, &conn->chan_l, list) {
 		struct sock *sk = chan->sk;
 		if (chan->chan_type != L2CAP_CHAN_RAW)
 			continue;
@@ -1720,7 +1818,8 @@
 		if (chan->ops->recv(chan->data, nskb))
 			kfree_skb(nskb);
 	}
-	read_unlock(&conn->chan_lock);
+
+	rcu_read_unlock();
 }
 
 /* ---- L2CAP signalling commands ---- */
@@ -1850,37 +1949,64 @@
 	*ptr += L2CAP_CONF_OPT_SIZE + len;
 }
 
-static void l2cap_ack_timeout(unsigned long arg)
+static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
 {
-	struct l2cap_chan *chan = (void *) arg;
+	struct l2cap_conf_efs efs;
 
-	bh_lock_sock(chan->sk);
+	switch (chan->mode) {
+	case L2CAP_MODE_ERTM:
+		efs.id		= chan->local_id;
+		efs.stype	= chan->local_stype;
+		efs.msdu	= cpu_to_le16(chan->local_msdu);
+		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
+		efs.acc_lat	= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
+		efs.flush_to	= cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
+		break;
+
+	case L2CAP_MODE_STREAMING:
+		efs.id		= 1;
+		efs.stype	= L2CAP_SERV_BESTEFFORT;
+		efs.msdu	= cpu_to_le16(chan->local_msdu);
+		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
+		efs.acc_lat	= 0;
+		efs.flush_to	= 0;
+		break;
+
+	default:
+		return;
+	}
+
+	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
+							(unsigned long) &efs);
+}
+
+static void l2cap_ack_timeout(struct work_struct *work)
+{
+	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
+							ack_timer.work);
+
+	BT_DBG("chan %p", chan);
+
+	lock_sock(chan->sk);
 	l2cap_send_ack(chan);
-	bh_unlock_sock(chan->sk);
+	release_sock(chan->sk);
 }
 
 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
 {
-	struct sock *sk = chan->sk;
-
 	chan->expected_ack_seq = 0;
 	chan->unacked_frames = 0;
 	chan->buffer_seq = 0;
 	chan->num_acked = 0;
 	chan->frames_sent = 0;
 
-	setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
-							(unsigned long) chan);
-	setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
-							(unsigned long) chan);
-	setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
+	INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
+	INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
+	INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
 
 	skb_queue_head_init(&chan->srej_q);
 
 	INIT_LIST_HEAD(&chan->srej_l);
-
-
-	sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
 }
 
 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
@@ -1896,11 +2022,36 @@
 	}
 }
 
+static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
+{
+	return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
+}
+
+static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
+{
+	return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
+}
+
+static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
+{
+	if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
+						__l2cap_ews_supported(chan)) {
+		/* use extended control field */
+		set_bit(FLAG_EXT_CTRL, &chan->flags);
+		chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
+	} else {
+		chan->tx_win = min_t(u16, chan->tx_win,
+						L2CAP_DEFAULT_TX_WINDOW);
+		chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
+	}
+}
+
 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
 {
 	struct l2cap_conf_req *req = data;
 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
 	void *ptr = req->data;
+	u16 size;
 
 	BT_DBG("chan %p", chan);
 
@@ -1913,6 +2064,9 @@
 		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
 			break;
 
+		if (__l2cap_efs_supported(chan))
+			set_bit(FLAG_EFS_ENABLE, &chan->flags);
+
 		/* fall through */
 	default:
 		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
@@ -1942,17 +2096,27 @@
 
 	case L2CAP_MODE_ERTM:
 		rfc.mode            = L2CAP_MODE_ERTM;
-		rfc.txwin_size      = chan->tx_win;
 		rfc.max_transmit    = chan->max_tx;
 		rfc.retrans_timeout = 0;
 		rfc.monitor_timeout = 0;
-		rfc.max_pdu_size    = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
-		if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
-			rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
+
+		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
+						L2CAP_EXT_HDR_SIZE -
+						L2CAP_SDULEN_SIZE -
+						L2CAP_FCS_SIZE);
+		rfc.max_pdu_size = cpu_to_le16(size);
+
+		l2cap_txwin_setup(chan);
+
+		rfc.txwin_size = min_t(u16, chan->tx_win,
+						L2CAP_DEFAULT_TX_WINDOW);
 
 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
 							(unsigned long) &rfc);
 
+		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
+			l2cap_add_opt_efs(&ptr, chan);
+
 		if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
 			break;
 
@@ -1961,6 +2125,10 @@
 			chan->fcs = L2CAP_FCS_NONE;
 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
 		}
+
+		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
+								chan->tx_win);
 		break;
 
 	case L2CAP_MODE_STREAMING:
@@ -1969,13 +2137,19 @@
 		rfc.max_transmit    = 0;
 		rfc.retrans_timeout = 0;
 		rfc.monitor_timeout = 0;
-		rfc.max_pdu_size    = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
-		if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
-			rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
+
+		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
+						L2CAP_EXT_HDR_SIZE -
+						L2CAP_SDULEN_SIZE -
+						L2CAP_FCS_SIZE);
+		rfc.max_pdu_size = cpu_to_le16(size);
 
 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
 							(unsigned long) &rfc);
 
+		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
+			l2cap_add_opt_efs(&ptr, chan);
+
 		if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
 			break;
 
@@ -2002,8 +2176,11 @@
 	int type, hint, olen;
 	unsigned long val;
 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
+	struct l2cap_conf_efs efs;
+	u8 remote_efs = 0;
 	u16 mtu = L2CAP_DEFAULT_MTU;
 	u16 result = L2CAP_CONF_SUCCESS;
+	u16 size;
 
 	BT_DBG("chan %p", chan);
 
@@ -2033,7 +2210,22 @@
 		case L2CAP_CONF_FCS:
 			if (val == L2CAP_FCS_NONE)
 				set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
+			break;
 
+		case L2CAP_CONF_EFS:
+			remote_efs = 1;
+			if (olen == sizeof(efs))
+				memcpy(&efs, (void *) val, olen);
+			break;
+
+		case L2CAP_CONF_EWS:
+			if (!enable_hs)
+				return -ECONNREFUSED;
+
+			set_bit(FLAG_EXT_CTRL, &chan->flags);
+			set_bit(CONF_EWS_RECV, &chan->conf_state);
+			chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
+			chan->remote_tx_win = val;
 			break;
 
 		default:
@@ -2058,6 +2250,13 @@
 			break;
 		}
 
+		if (remote_efs) {
+			if (__l2cap_efs_supported(chan))
+				set_bit(FLAG_EFS_ENABLE, &chan->flags);
+			else
+				return -ECONNREFUSED;
+		}
+
 		if (chan->mode != rfc.mode)
 			return -ECONNREFUSED;
 
@@ -2076,7 +2275,6 @@
 					sizeof(rfc), (unsigned long) &rfc);
 	}
 
-
 	if (result == L2CAP_CONF_SUCCESS) {
 		/* Configure output options and let the other side know
 		 * which ones we don't like. */
@@ -2089,6 +2287,26 @@
 		}
 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
 
+		if (remote_efs) {
+			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
+					efs.stype != L2CAP_SERV_NOTRAFIC &&
+					efs.stype != chan->local_stype) {
+
+				result = L2CAP_CONF_UNACCEPT;
+
+				if (chan->num_conf_req >= 1)
+					return -ECONNREFUSED;
+
+				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
+							sizeof(efs),
+							(unsigned long) &efs);
+			} else {
+				/* Send PENDING Conf Rsp */
+				result = L2CAP_CONF_PENDING;
+				set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
+			}
+		}
+
 		switch (rfc.mode) {
 		case L2CAP_MODE_BASIC:
 			chan->fcs = L2CAP_FCS_NONE;
@@ -2096,13 +2314,20 @@
 			break;
 
 		case L2CAP_MODE_ERTM:
-			chan->remote_tx_win = rfc.txwin_size;
+			if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
+				chan->remote_tx_win = rfc.txwin_size;
+			else
+				rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
+
 			chan->remote_max_tx = rfc.max_transmit;
 
-			if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
-				rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
-
-			chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
+			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
+						chan->conn->mtu -
+						L2CAP_EXT_HDR_SIZE -
+						L2CAP_SDULEN_SIZE -
+						L2CAP_FCS_SIZE);
+			rfc.max_pdu_size = cpu_to_le16(size);
+			chan->remote_mps = size;
 
 			rfc.retrans_timeout =
 				le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
@@ -2114,13 +2339,29 @@
 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
 					sizeof(rfc), (unsigned long) &rfc);
 
+			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
+				chan->remote_id = efs.id;
+				chan->remote_stype = efs.stype;
+				chan->remote_msdu = le16_to_cpu(efs.msdu);
+				chan->remote_flush_to =
+						le32_to_cpu(efs.flush_to);
+				chan->remote_acc_lat =
+						le32_to_cpu(efs.acc_lat);
+				chan->remote_sdu_itime =
+					le32_to_cpu(efs.sdu_itime);
+				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
+					sizeof(efs), (unsigned long) &efs);
+			}
 			break;
 
 		case L2CAP_MODE_STREAMING:
-			if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
-				rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
-
-			chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
+			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
+						chan->conn->mtu -
+						L2CAP_EXT_HDR_SIZE -
+						L2CAP_SDULEN_SIZE -
+						L2CAP_FCS_SIZE);
+			rfc.max_pdu_size = cpu_to_le16(size);
+			chan->remote_mps = size;
 
 			set_bit(CONF_MODE_DONE, &chan->conf_state);
 
@@ -2152,7 +2393,8 @@
 	void *ptr = req->data;
 	int type, olen;
 	unsigned long val;
-	struct l2cap_conf_rfc rfc;
+	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
+	struct l2cap_conf_efs efs;
 
 	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
 
@@ -2188,6 +2430,26 @@
 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
 					sizeof(rfc), (unsigned long) &rfc);
 			break;
+
+		case L2CAP_CONF_EWS:
+			chan->tx_win = min_t(u16, val,
+						L2CAP_DEFAULT_EXT_WINDOW);
+			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
+							chan->tx_win);
+			break;
+
+		case L2CAP_CONF_EFS:
+			if (olen == sizeof(efs))
+				memcpy(&efs, (void *)val, olen);
+
+			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
+					efs.stype != L2CAP_SERV_NOTRAFIC &&
+					efs.stype != chan->local_stype)
+				return -ECONNREFUSED;
+
+			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
+					sizeof(efs), (unsigned long) &efs);
+			break;
 		}
 	}
 
@@ -2196,13 +2458,23 @@
 
 	chan->mode = rfc.mode;
 
-	if (*result == L2CAP_CONF_SUCCESS) {
+	if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
 		switch (rfc.mode) {
 		case L2CAP_MODE_ERTM:
 			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
 			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
+
+			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
+				chan->local_msdu = le16_to_cpu(efs.msdu);
+				chan->local_sdu_itime =
+						le32_to_cpu(efs.sdu_itime);
+				chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
+				chan->local_flush_to =
+						le32_to_cpu(efs.flush_to);
+			}
 			break;
+
 		case L2CAP_MODE_STREAMING:
 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
 		}
@@ -2271,6 +2543,16 @@
 		}
 	}
 
+	/* Use sane default values in case a misbehaving remote device
+	 * did not send an RFC option.
+	 */
+	rfc.mode = chan->mode;
+	rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
+	rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
+	rfc.max_pdu_size = cpu_to_le16(chan->imtu);
+
+	BT_ERR("Expected RFC option was not found, using defaults");
+
 done:
 	switch (rfc.mode) {
 	case L2CAP_MODE_ERTM:
@@ -2292,7 +2574,7 @@
 
 	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
 					cmd->ident == conn->info_ident) {
-		del_timer(&conn->info_timer);
+		__cancel_delayed_work(&conn->info_timer);
 
 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
 		conn->info_ident = 0;
@@ -2325,12 +2607,12 @@
 
 	parent = pchan->sk;
 
-	bh_lock_sock(parent);
+	lock_sock(parent);
 
 	/* Check if the ACL is secure enough (if not SDP) */
 	if (psm != cpu_to_le16(0x0001) &&
 				!hci_conn_check_link_mode(conn->hcon)) {
-		conn->disc_reason = 0x05;
+		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
 		result = L2CAP_CR_SEC_BLOCK;
 		goto response;
 	}
@@ -2349,11 +2631,8 @@
 
 	sk = chan->sk;
 
-	write_lock_bh(&conn->chan_lock);
-
 	/* Check if we already have channel with that dcid */
 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
-		write_unlock_bh(&conn->chan_lock);
 		sock_set_flag(sk, SOCK_ZAPPED);
 		chan->ops->close(chan->data);
 		goto response;
@@ -2368,7 +2647,7 @@
 
 	bt_accept_enqueue(parent, sk);
 
-	__l2cap_chan_add(conn, chan);
+	l2cap_chan_add(conn, chan);
 
 	dcid = chan->scid;
 
@@ -2377,7 +2656,7 @@
 	chan->ident = cmd->ident;
 
 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
-		if (l2cap_check_security(chan)) {
+		if (l2cap_chan_check_security(chan)) {
 			if (bt_sk(sk)->defer_setup) {
 				l2cap_state_change(chan, BT_CONNECT2);
 				result = L2CAP_CR_PEND;
@@ -2399,10 +2678,8 @@
 		status = L2CAP_CS_NO_INFO;
 	}
 
-	write_unlock_bh(&conn->chan_lock);
-
 response:
-	bh_unlock_sock(parent);
+	release_sock(parent);
 
 sendresp:
 	rsp.scid   = cpu_to_le16(scid);
@@ -2418,7 +2695,7 @@
 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
 		conn->info_ident = l2cap_get_ident(conn);
 
-		mod_timer(&conn->info_timer, jiffies +
+		schedule_delayed_work(&conn->info_timer,
 					msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
 
 		l2cap_send_cmd(conn, conn->info_ident,
@@ -2484,19 +2761,11 @@
 		break;
 
 	default:
-		/* don't delete l2cap channel if sk is owned by user */
-		if (sock_owned_by_user(sk)) {
-			l2cap_state_change(chan, BT_DISCONN);
-			__clear_chan_timer(chan);
-			__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
-			break;
-		}
-
 		l2cap_chan_del(chan, ECONNREFUSED);
 		break;
 	}
 
-	bh_unlock_sock(sk);
+	release_sock(sk);
 	return 0;
 }
 
@@ -2602,8 +2871,23 @@
 		chan->num_conf_req++;
 	}
 
+	/* Got Conf Rsp PENDING from remote side and asume we sent
+	   Conf Rsp PENDING in the code above */
+	if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
+			test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
+
+		/* check compatibility */
+
+		clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
+		set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
+
+		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
+					l2cap_build_conf_rsp(chan, rsp,
+					L2CAP_CONF_SUCCESS, 0x0000), rsp);
+	}
+
 unlock:
-	bh_unlock_sock(sk);
+	release_sock(sk);
 	return 0;
 }
 
@@ -2631,8 +2915,33 @@
 	switch (result) {
 	case L2CAP_CONF_SUCCESS:
 		l2cap_conf_rfc_get(chan, rsp->data, len);
+		clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
 		break;
 
+	case L2CAP_CONF_PENDING:
+		set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
+
+		if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
+			char buf[64];
+
+			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
+								buf, &result);
+			if (len < 0) {
+				l2cap_send_disconn_req(conn, chan, ECONNRESET);
+				goto done;
+			}
+
+			/* check compatibility */
+
+			clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
+			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
+
+			l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
+						l2cap_build_conf_rsp(chan, buf,
+						L2CAP_CONF_SUCCESS, 0x0000), buf);
+		}
+		goto done;
+
 	case L2CAP_CONF_UNACCEPT:
 		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
 			char req[64];
@@ -2685,7 +2994,7 @@
 	}
 
 done:
-	bh_unlock_sock(sk);
+	release_sock(sk);
 	return 0;
 }
 
@@ -2714,17 +3023,8 @@
 
 	sk->sk_shutdown = SHUTDOWN_MASK;
 
-	/* don't delete l2cap channel if sk is owned by user */
-	if (sock_owned_by_user(sk)) {
-		l2cap_state_change(chan, BT_DISCONN);
-		__clear_chan_timer(chan);
-		__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
-		bh_unlock_sock(sk);
-		return 0;
-	}
-
 	l2cap_chan_del(chan, ECONNRESET);
-	bh_unlock_sock(sk);
+	release_sock(sk);
 
 	chan->ops->close(chan->data);
 	return 0;
@@ -2748,17 +3048,8 @@
 
 	sk = chan->sk;
 
-	/* don't delete l2cap channel if sk is owned by user */
-	if (sock_owned_by_user(sk)) {
-		l2cap_state_change(chan,BT_DISCONN);
-		__clear_chan_timer(chan);
-		__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
-		bh_unlock_sock(sk);
-		return 0;
-	}
-
 	l2cap_chan_del(chan, 0);
-	bh_unlock_sock(sk);
+	release_sock(sk);
 
 	chan->ops->close(chan->data);
 	return 0;
@@ -2782,15 +3073,25 @@
 		if (!disable_ertm)
 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
 							 | L2CAP_FEAT_FCS;
+		if (enable_hs)
+			feat_mask |= L2CAP_FEAT_EXT_FLOW
+						| L2CAP_FEAT_EXT_WINDOW;
+
 		put_unaligned_le32(feat_mask, rsp->data);
 		l2cap_send_cmd(conn, cmd->ident,
 					L2CAP_INFO_RSP, sizeof(buf), buf);
 	} else if (type == L2CAP_IT_FIXED_CHAN) {
 		u8 buf[12];
 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
+
+		if (enable_hs)
+			l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
+		else
+			l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
+
 		rsp->type   = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
-		memcpy(buf + 4, l2cap_fixed_chan, 8);
+		memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
 		l2cap_send_cmd(conn, cmd->ident,
 					L2CAP_INFO_RSP, sizeof(buf), buf);
 	} else {
@@ -2819,7 +3120,7 @@
 			conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
 		return 0;
 
-	del_timer(&conn->info_timer);
+	__cancel_delayed_work(&conn->info_timer);
 
 	if (result != L2CAP_IR_SUCCESS) {
 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
@@ -2857,6 +3158,165 @@
 	return 0;
 }
 
+static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
+					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
+					void *data)
+{
+	struct l2cap_create_chan_req *req = data;
+	struct l2cap_create_chan_rsp rsp;
+	u16 psm, scid;
+
+	if (cmd_len != sizeof(*req))
+		return -EPROTO;
+
+	if (!enable_hs)
+		return -EINVAL;
+
+	psm = le16_to_cpu(req->psm);
+	scid = le16_to_cpu(req->scid);
+
+	BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
+
+	/* Placeholder: Always reject */
+	rsp.dcid = 0;
+	rsp.scid = cpu_to_le16(scid);
+	rsp.result = L2CAP_CR_NO_MEM;
+	rsp.status = L2CAP_CS_NO_INFO;
+
+	l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
+		       sizeof(rsp), &rsp);
+
+	return 0;
+}
+
+static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
+					struct l2cap_cmd_hdr *cmd, void *data)
+{
+	BT_DBG("conn %p", conn);
+
+	return l2cap_connect_rsp(conn, cmd, data);
+}
+
+static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
+							u16 icid, u16 result)
+{
+	struct l2cap_move_chan_rsp rsp;
+
+	BT_DBG("icid %d, result %d", icid, result);
+
+	rsp.icid = cpu_to_le16(icid);
+	rsp.result = cpu_to_le16(result);
+
+	l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
+}
+
+static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
+				struct l2cap_chan *chan, u16 icid, u16 result)
+{
+	struct l2cap_move_chan_cfm cfm;
+	u8 ident;
+
+	BT_DBG("icid %d, result %d", icid, result);
+
+	ident = l2cap_get_ident(conn);
+	if (chan)
+		chan->ident = ident;
+
+	cfm.icid = cpu_to_le16(icid);
+	cfm.result = cpu_to_le16(result);
+
+	l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
+}
+
+static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
+								u16 icid)
+{
+	struct l2cap_move_chan_cfm_rsp rsp;
+
+	BT_DBG("icid %d", icid);
+
+	rsp.icid = cpu_to_le16(icid);
+	l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
+}
+
+static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
+			struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
+{
+	struct l2cap_move_chan_req *req = data;
+	u16 icid = 0;
+	u16 result = L2CAP_MR_NOT_ALLOWED;
+
+	if (cmd_len != sizeof(*req))
+		return -EPROTO;
+
+	icid = le16_to_cpu(req->icid);
+
+	BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
+
+	if (!enable_hs)
+		return -EINVAL;
+
+	/* Placeholder: Always refuse */
+	l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
+
+	return 0;
+}
+
+static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
+			struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
+{
+	struct l2cap_move_chan_rsp *rsp = data;
+	u16 icid, result;
+
+	if (cmd_len != sizeof(*rsp))
+		return -EPROTO;
+
+	icid = le16_to_cpu(rsp->icid);
+	result = le16_to_cpu(rsp->result);
+
+	BT_DBG("icid %d, result %d", icid, result);
+
+	/* Placeholder: Always unconfirmed */
+	l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
+
+	return 0;
+}
+
+static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
+			struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
+{
+	struct l2cap_move_chan_cfm *cfm = data;
+	u16 icid, result;
+
+	if (cmd_len != sizeof(*cfm))
+		return -EPROTO;
+
+	icid = le16_to_cpu(cfm->icid);
+	result = le16_to_cpu(cfm->result);
+
+	BT_DBG("icid %d, result %d", icid, result);
+
+	l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
+
+	return 0;
+}
+
+static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
+			struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
+{
+	struct l2cap_move_chan_cfm_rsp *rsp = data;
+	u16 icid;
+
+	if (cmd_len != sizeof(*rsp))
+		return -EPROTO;
+
+	icid = le16_to_cpu(rsp->icid);
+
+	BT_DBG("icid %d", icid);
+
+	return 0;
+}
+
 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
 							u16 to_multiplier)
 {
@@ -2969,6 +3429,30 @@
 		err = l2cap_information_rsp(conn, cmd, data);
 		break;
 
+	case L2CAP_CREATE_CHAN_REQ:
+		err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
+		break;
+
+	case L2CAP_CREATE_CHAN_RSP:
+		err = l2cap_create_channel_rsp(conn, cmd, data);
+		break;
+
+	case L2CAP_MOVE_CHAN_REQ:
+		err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
+		break;
+
+	case L2CAP_MOVE_CHAN_RSP:
+		err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
+		break;
+
+	case L2CAP_MOVE_CHAN_CFM:
+		err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
+		break;
+
+	case L2CAP_MOVE_CHAN_CFM_RSP:
+		err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
+		break;
+
 	default:
 		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
 		err = -EINVAL;
@@ -3047,10 +3531,15 @@
 static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
 {
 	u16 our_fcs, rcv_fcs;
-	int hdr_size = L2CAP_HDR_SIZE + 2;
+	int hdr_size;
+
+	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+		hdr_size = L2CAP_EXT_HDR_SIZE;
+	else
+		hdr_size = L2CAP_ENH_HDR_SIZE;
 
 	if (chan->fcs == L2CAP_FCS_CRC16) {
-		skb_trim(skb, skb->len - 2);
+		skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
 		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
 		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
 
@@ -3062,14 +3551,14 @@
 
 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
 {
-	u16 control = 0;
+	u32 control = 0;
 
 	chan->frames_sent = 0;
 
-	control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
+	control |= __set_reqseq(chan, chan->buffer_seq);
 
 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
-		control |= L2CAP_SUPER_RCV_NOT_READY;
+		control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
 		l2cap_send_sframe(chan, control);
 		set_bit(CONN_RNR_SENT, &chan->conn_state);
 	}
@@ -3081,12 +3570,12 @@
 
 	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
 			chan->frames_sent == 0) {
-		control |= L2CAP_SUPER_RCV_READY;
+		control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
 		l2cap_send_sframe(chan, control);
 	}
 }
 
-static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u8 tx_seq, u8 sar)
+static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
 {
 	struct sk_buff *next_skb;
 	int tx_seq_offset, next_tx_seq_offset;
@@ -3095,23 +3584,15 @@
 	bt_cb(skb)->sar = sar;
 
 	next_skb = skb_peek(&chan->srej_q);
-	if (!next_skb) {
-		__skb_queue_tail(&chan->srej_q, skb);
-		return 0;
-	}
 
-	tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
-	if (tx_seq_offset < 0)
-		tx_seq_offset += 64;
+	tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
 
-	do {
+	while (next_skb) {
 		if (bt_cb(next_skb)->tx_seq == tx_seq)
 			return -EINVAL;
 
-		next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
-						chan->buffer_seq) % 64;
-		if (next_tx_seq_offset < 0)
-			next_tx_seq_offset += 64;
+		next_tx_seq_offset = __seq_offset(chan,
+				bt_cb(next_skb)->tx_seq, chan->buffer_seq);
 
 		if (next_tx_seq_offset > tx_seq_offset) {
 			__skb_queue_before(&chan->srej_q, next_skb, skb);
@@ -3119,9 +3600,10 @@
 		}
 
 		if (skb_queue_is_last(&chan->srej_q, next_skb))
-			break;
-
-	} while ((next_skb = skb_queue_next(&chan->srej_q, next_skb)));
+			next_skb = NULL;
+		else
+			next_skb = skb_queue_next(&chan->srej_q, next_skb);
+	}
 
 	__skb_queue_tail(&chan->srej_q, skb);
 
@@ -3147,24 +3629,24 @@
 	skb->truesize += new_frag->truesize;
 }
 
-static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
+static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
 {
 	int err = -EINVAL;
 
-	switch (control & L2CAP_CTRL_SAR) {
-	case L2CAP_SDU_UNSEGMENTED:
+	switch (__get_ctrl_sar(chan, control)) {
+	case L2CAP_SAR_UNSEGMENTED:
 		if (chan->sdu)
 			break;
 
 		err = chan->ops->recv(chan->data, skb);
 		break;
 
-	case L2CAP_SDU_START:
+	case L2CAP_SAR_START:
 		if (chan->sdu)
 			break;
 
 		chan->sdu_len = get_unaligned_le16(skb->data);
-		skb_pull(skb, 2);
+		skb_pull(skb, L2CAP_SDULEN_SIZE);
 
 		if (chan->sdu_len > chan->imtu) {
 			err = -EMSGSIZE;
@@ -3181,7 +3663,7 @@
 		err = 0;
 		break;
 
-	case L2CAP_SDU_CONTINUE:
+	case L2CAP_SAR_CONTINUE:
 		if (!chan->sdu)
 			break;
 
@@ -3195,7 +3677,7 @@
 		err = 0;
 		break;
 
-	case L2CAP_SDU_END:
+	case L2CAP_SAR_END:
 		if (!chan->sdu)
 			break;
 
@@ -3230,14 +3712,14 @@
 
 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
 {
-	u16 control;
+	u32 control;
 
 	BT_DBG("chan %p, Enter local busy", chan);
 
 	set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
 
-	control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
-	control |= L2CAP_SUPER_RCV_NOT_READY;
+	control = __set_reqseq(chan, chan->buffer_seq);
+	control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
 	l2cap_send_sframe(chan, control);
 
 	set_bit(CONN_RNR_SENT, &chan->conn_state);
@@ -3247,13 +3729,14 @@
 
 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
 {
-	u16 control;
+	u32 control;
 
 	if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
 		goto done;
 
-	control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
-	control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
+	control = __set_reqseq(chan, chan->buffer_seq);
+	control |= __set_ctrl_poll(chan);
+	control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
 	l2cap_send_sframe(chan, control);
 	chan->retry_count = 1;
 
@@ -3279,10 +3762,10 @@
 	}
 }
 
-static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
+static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
 {
 	struct sk_buff *skb;
-	u16 control;
+	u32 control;
 
 	while ((skb = skb_peek(&chan->srej_q)) &&
 			!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
@@ -3292,7 +3775,7 @@
 			break;
 
 		skb = skb_dequeue(&chan->srej_q);
-		control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
+		control = __set_ctrl_sar(chan, bt_cb(skb)->sar);
 		err = l2cap_reassemble_sdu(chan, skb, control);
 
 		if (err < 0) {
@@ -3300,16 +3783,15 @@
 			break;
 		}
 
-		chan->buffer_seq_srej =
-			(chan->buffer_seq_srej + 1) % 64;
-		tx_seq = (tx_seq + 1) % 64;
+		chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
+		tx_seq = __next_seq(chan, tx_seq);
 	}
 }
 
-static void l2cap_resend_srejframe(struct l2cap_chan *chan, u8 tx_seq)
+static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
 {
 	struct srej_list *l, *tmp;
-	u16 control;
+	u32 control;
 
 	list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
 		if (l->tx_seq == tx_seq) {
@@ -3317,45 +3799,53 @@
 			kfree(l);
 			return;
 		}
-		control = L2CAP_SUPER_SELECT_REJECT;
-		control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
+		control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
+		control |= __set_reqseq(chan, l->tx_seq);
 		l2cap_send_sframe(chan, control);
 		list_del(&l->list);
 		list_add_tail(&l->list, &chan->srej_l);
 	}
 }
 
-static void l2cap_send_srejframe(struct l2cap_chan *chan, u8 tx_seq)
+static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
 {
 	struct srej_list *new;
-	u16 control;
+	u32 control;
 
 	while (tx_seq != chan->expected_tx_seq) {
-		control = L2CAP_SUPER_SELECT_REJECT;
-		control |= chan->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
+		control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
+		control |= __set_reqseq(chan, chan->expected_tx_seq);
 		l2cap_send_sframe(chan, control);
 
 		new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
+		if (!new)
+			return -ENOMEM;
+
 		new->tx_seq = chan->expected_tx_seq;
-		chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
+
+		chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
+
 		list_add_tail(&new->list, &chan->srej_l);
 	}
-	chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
+
+	chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
+
+	return 0;
 }
 
-static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
+static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
 {
-	u8 tx_seq = __get_txseq(rx_control);
-	u8 req_seq = __get_reqseq(rx_control);
-	u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
+	u16 tx_seq = __get_txseq(chan, rx_control);
+	u16 req_seq = __get_reqseq(chan, rx_control);
+	u8 sar = __get_ctrl_sar(chan, rx_control);
 	int tx_seq_offset, expected_tx_seq_offset;
 	int num_to_ack = (chan->tx_win/6) + 1;
 	int err = 0;
 
-	BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan, skb->len,
+	BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
 							tx_seq, rx_control);
 
-	if (L2CAP_CTRL_FINAL & rx_control &&
+	if (__is_ctrl_final(chan, rx_control) &&
 			test_bit(CONN_WAIT_F, &chan->conn_state)) {
 		__clear_monitor_timer(chan);
 		if (chan->unacked_frames > 0)
@@ -3366,9 +3856,7 @@
 	chan->expected_ack_seq = req_seq;
 	l2cap_drop_acked_frames(chan);
 
-	tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
-	if (tx_seq_offset < 0)
-		tx_seq_offset += 64;
+	tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
 
 	/* invalid tx_seq */
 	if (tx_seq_offset >= chan->tx_win) {
@@ -3413,13 +3901,16 @@
 					return 0;
 				}
 			}
-			l2cap_send_srejframe(chan, tx_seq);
+
+			err = l2cap_send_srejframe(chan, tx_seq);
+			if (err < 0) {
+				l2cap_send_disconn_req(chan->conn, chan, -err);
+				return err;
+			}
 		}
 	} else {
-		expected_tx_seq_offset =
-			(chan->expected_tx_seq - chan->buffer_seq) % 64;
-		if (expected_tx_seq_offset < 0)
-			expected_tx_seq_offset += 64;
+		expected_tx_seq_offset = __seq_offset(chan,
+				chan->expected_tx_seq, chan->buffer_seq);
 
 		/* duplicated tx_seq */
 		if (tx_seq_offset < expected_tx_seq_offset)
@@ -3437,14 +3928,18 @@
 
 		set_bit(CONN_SEND_PBIT, &chan->conn_state);
 
-		l2cap_send_srejframe(chan, tx_seq);
+		err = l2cap_send_srejframe(chan, tx_seq);
+		if (err < 0) {
+			l2cap_send_disconn_req(chan->conn, chan, -err);
+			return err;
+		}
 
 		__clear_ack_timer(chan);
 	}
 	return 0;
 
 expected:
-	chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
+	chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
 
 	if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
 		bt_cb(skb)->tx_seq = tx_seq;
@@ -3454,22 +3949,24 @@
 	}
 
 	err = l2cap_reassemble_sdu(chan, skb, rx_control);
-	chan->buffer_seq = (chan->buffer_seq + 1) % 64;
+	chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
+
 	if (err < 0) {
 		l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
 		return err;
 	}
 
-	if (rx_control & L2CAP_CTRL_FINAL) {
+	if (__is_ctrl_final(chan, rx_control)) {
 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
 			l2cap_retransmit_frames(chan);
 	}
 
-	__set_ack_timer(chan);
 
 	chan->num_acked = (chan->num_acked + 1) % num_to_ack;
 	if (chan->num_acked == num_to_ack - 1)
 		l2cap_send_ack(chan);
+	else
+		__set_ack_timer(chan);
 
 	return 0;
 
@@ -3478,15 +3975,15 @@
 	return 0;
 }
 
-static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_control)
+static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
 {
-	BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, __get_reqseq(rx_control),
-						rx_control);
+	BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
+				__get_reqseq(chan, rx_control), rx_control);
 
-	chan->expected_ack_seq = __get_reqseq(rx_control);
+	chan->expected_ack_seq = __get_reqseq(chan, rx_control);
 	l2cap_drop_acked_frames(chan);
 
-	if (rx_control & L2CAP_CTRL_POLL) {
+	if (__is_ctrl_poll(chan, rx_control)) {
 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
 		if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
 			if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
@@ -3499,7 +3996,7 @@
 			l2cap_send_i_or_rr_or_rnr(chan);
 		}
 
-	} else if (rx_control & L2CAP_CTRL_FINAL) {
+	} else if (__is_ctrl_final(chan, rx_control)) {
 		clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
 
 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
@@ -3518,18 +4015,18 @@
 	}
 }
 
-static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_control)
+static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
 {
-	u8 tx_seq = __get_reqseq(rx_control);
+	u16 tx_seq = __get_reqseq(chan, rx_control);
 
-	BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
+	BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
 
 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
 
 	chan->expected_ack_seq = tx_seq;
 	l2cap_drop_acked_frames(chan);
 
-	if (rx_control & L2CAP_CTRL_FINAL) {
+	if (__is_ctrl_final(chan, rx_control)) {
 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
 			l2cap_retransmit_frames(chan);
 	} else {
@@ -3539,15 +4036,15 @@
 			set_bit(CONN_REJ_ACT, &chan->conn_state);
 	}
 }
-static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control)
+static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
 {
-	u8 tx_seq = __get_reqseq(rx_control);
+	u16 tx_seq = __get_reqseq(chan, rx_control);
 
-	BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
+	BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
 
 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
 
-	if (rx_control & L2CAP_CTRL_POLL) {
+	if (__is_ctrl_poll(chan, rx_control)) {
 		chan->expected_ack_seq = tx_seq;
 		l2cap_drop_acked_frames(chan);
 
@@ -3560,7 +4057,7 @@
 			chan->srej_save_reqseq = tx_seq;
 			set_bit(CONN_SREJ_ACT, &chan->conn_state);
 		}
-	} else if (rx_control & L2CAP_CTRL_FINAL) {
+	} else if (__is_ctrl_final(chan, rx_control)) {
 		if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
 				chan->srej_save_reqseq == tx_seq)
 			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
@@ -3575,37 +4072,39 @@
 	}
 }
 
-static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u16 rx_control)
+static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
 {
-	u8 tx_seq = __get_reqseq(rx_control);
+	u16 tx_seq = __get_reqseq(chan, rx_control);
 
-	BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
+	BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
 
 	set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
 	chan->expected_ack_seq = tx_seq;
 	l2cap_drop_acked_frames(chan);
 
-	if (rx_control & L2CAP_CTRL_POLL)
+	if (__is_ctrl_poll(chan, rx_control))
 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
 
 	if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
 		__clear_retrans_timer(chan);
-		if (rx_control & L2CAP_CTRL_POLL)
+		if (__is_ctrl_poll(chan, rx_control))
 			l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
 		return;
 	}
 
-	if (rx_control & L2CAP_CTRL_POLL)
+	if (__is_ctrl_poll(chan, rx_control)) {
 		l2cap_send_srejtail(chan);
-	else
-		l2cap_send_sframe(chan, L2CAP_SUPER_RCV_READY);
+	} else {
+		rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
+		l2cap_send_sframe(chan, rx_control);
+	}
 }
 
-static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
+static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
 {
-	BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len);
+	BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
 
-	if (L2CAP_CTRL_FINAL & rx_control &&
+	if (__is_ctrl_final(chan, rx_control) &&
 			test_bit(CONN_WAIT_F, &chan->conn_state)) {
 		__clear_monitor_timer(chan);
 		if (chan->unacked_frames > 0)
@@ -3613,20 +4112,20 @@
 		clear_bit(CONN_WAIT_F, &chan->conn_state);
 	}
 
-	switch (rx_control & L2CAP_CTRL_SUPERVISE) {
-	case L2CAP_SUPER_RCV_READY:
+	switch (__get_ctrl_super(chan, rx_control)) {
+	case L2CAP_SUPER_RR:
 		l2cap_data_channel_rrframe(chan, rx_control);
 		break;
 
-	case L2CAP_SUPER_REJECT:
+	case L2CAP_SUPER_REJ:
 		l2cap_data_channel_rejframe(chan, rx_control);
 		break;
 
-	case L2CAP_SUPER_SELECT_REJECT:
+	case L2CAP_SUPER_SREJ:
 		l2cap_data_channel_srejframe(chan, rx_control);
 		break;
 
-	case L2CAP_SUPER_RCV_NOT_READY:
+	case L2CAP_SUPER_RNR:
 		l2cap_data_channel_rnrframe(chan, rx_control);
 		break;
 	}
@@ -3638,12 +4137,12 @@
 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
 {
 	struct l2cap_chan *chan = l2cap_pi(sk)->chan;
-	u16 control;
-	u8 req_seq;
+	u32 control;
+	u16 req_seq;
 	int len, next_tx_seq_offset, req_seq_offset;
 
-	control = get_unaligned_le16(skb->data);
-	skb_pull(skb, 2);
+	control = __get_control(chan, skb->data);
+	skb_pull(skb, __ctrl_size(chan));
 	len = skb->len;
 
 	/*
@@ -3654,26 +4153,23 @@
 	if (l2cap_check_fcs(chan, skb))
 		goto drop;
 
-	if (__is_sar_start(control) && __is_iframe(control))
-		len -= 2;
+	if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
+		len -= L2CAP_SDULEN_SIZE;
 
 	if (chan->fcs == L2CAP_FCS_CRC16)
-		len -= 2;
+		len -= L2CAP_FCS_SIZE;
 
 	if (len > chan->mps) {
 		l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
 		goto drop;
 	}
 
-	req_seq = __get_reqseq(control);
-	req_seq_offset = (req_seq - chan->expected_ack_seq) % 64;
-	if (req_seq_offset < 0)
-		req_seq_offset += 64;
+	req_seq = __get_reqseq(chan, control);
 
-	next_tx_seq_offset =
-		(chan->next_tx_seq - chan->expected_ack_seq) % 64;
-	if (next_tx_seq_offset < 0)
-		next_tx_seq_offset += 64;
+	req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
+
+	next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
+						chan->expected_ack_seq);
 
 	/* check for invalid req-seq */
 	if (req_seq_offset > next_tx_seq_offset) {
@@ -3681,7 +4177,7 @@
 		goto drop;
 	}
 
-	if (__is_iframe(control)) {
+	if (!__is_sframe(chan, control)) {
 		if (len < 0) {
 			l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
 			goto drop;
@@ -3709,8 +4205,8 @@
 {
 	struct l2cap_chan *chan;
 	struct sock *sk = NULL;
-	u16 control;
-	u8 tx_seq;
+	u32 control;
+	u16 tx_seq;
 	int len;
 
 	chan = l2cap_get_chan_by_scid(conn, cid);
@@ -3741,33 +4237,28 @@
 		break;
 
 	case L2CAP_MODE_ERTM:
-		if (!sock_owned_by_user(sk)) {
-			l2cap_ertm_data_rcv(sk, skb);
-		} else {
-			if (sk_add_backlog(sk, skb))
-				goto drop;
-		}
+		l2cap_ertm_data_rcv(sk, skb);
 
 		goto done;
 
 	case L2CAP_MODE_STREAMING:
-		control = get_unaligned_le16(skb->data);
-		skb_pull(skb, 2);
+		control = __get_control(chan, skb->data);
+		skb_pull(skb, __ctrl_size(chan));
 		len = skb->len;
 
 		if (l2cap_check_fcs(chan, skb))
 			goto drop;
 
-		if (__is_sar_start(control))
-			len -= 2;
+		if (__is_sar_start(chan, control))
+			len -= L2CAP_SDULEN_SIZE;
 
 		if (chan->fcs == L2CAP_FCS_CRC16)
-			len -= 2;
+			len -= L2CAP_FCS_SIZE;
 
-		if (len > chan->mps || len < 0 || __is_sframe(control))
+		if (len > chan->mps || len < 0 || __is_sframe(chan, control))
 			goto drop;
 
-		tx_seq = __get_txseq(control);
+		tx_seq = __get_txseq(chan, control);
 
 		if (chan->expected_tx_seq != tx_seq) {
 			/* Frame(s) missing - must discard partial SDU */
@@ -3779,7 +4270,7 @@
 			/* TODO: Notify userland of missing data */
 		}
 
-		chan->expected_tx_seq = (tx_seq + 1) % 64;
+		chan->expected_tx_seq = __next_seq(chan, tx_seq);
 
 		if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
 			l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
@@ -3796,7 +4287,7 @@
 
 done:
 	if (sk)
-		bh_unlock_sock(sk);
+		release_sock(sk);
 
 	return 0;
 }
@@ -3812,7 +4303,7 @@
 
 	sk = chan->sk;
 
-	bh_lock_sock(sk);
+	lock_sock(sk);
 
 	BT_DBG("sk %p, len %d", sk, skb->len);
 
@@ -3830,7 +4321,7 @@
 
 done:
 	if (sk)
-		bh_unlock_sock(sk);
+		release_sock(sk);
 	return 0;
 }
 
@@ -3845,7 +4336,7 @@
 
 	sk = chan->sk;
 
-	bh_lock_sock(sk);
+	lock_sock(sk);
 
 	BT_DBG("sk %p, len %d", sk, skb->len);
 
@@ -3863,7 +4354,7 @@
 
 done:
 	if (sk)
-		bh_unlock_sock(sk);
+		release_sock(sk);
 	return 0;
 }
 
@@ -3913,14 +4404,11 @@
 
 /* ---- L2CAP interface with lower layer (HCI) ---- */
 
-static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
+int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
 {
 	int exact = 0, lm1 = 0, lm2 = 0;
 	struct l2cap_chan *c;
 
-	if (type != ACL_LINK)
-		return -EINVAL;
-
 	BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
 
 	/* Find listening sockets and check their link_mode */
@@ -3933,12 +4421,12 @@
 
 		if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
 			lm1 |= HCI_LM_ACCEPT;
-			if (c->role_switch)
+			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
 				lm1 |= HCI_LM_MASTER;
 			exact++;
 		} else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
 			lm2 |= HCI_LM_ACCEPT;
-			if (c->role_switch)
+			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
 				lm2 |= HCI_LM_MASTER;
 		}
 	}
@@ -3947,15 +4435,12 @@
 	return exact ? lm1 : lm2;
 }
 
-static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
+int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
 {
 	struct l2cap_conn *conn;
 
 	BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
 
-	if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
-		return -EINVAL;
-
 	if (!status) {
 		conn = l2cap_conn_add(hcon, status);
 		if (conn)
@@ -3966,27 +4451,22 @@
 	return 0;
 }
 
-static int l2cap_disconn_ind(struct hci_conn *hcon)
+int l2cap_disconn_ind(struct hci_conn *hcon)
 {
 	struct l2cap_conn *conn = hcon->l2cap_data;
 
 	BT_DBG("hcon %p", hcon);
 
-	if ((hcon->type != ACL_LINK && hcon->type != LE_LINK) || !conn)
-		return 0x13;
-
+	if (!conn)
+		return HCI_ERROR_REMOTE_USER_TERM;
 	return conn->disc_reason;
 }
 
-static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
+int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
 {
 	BT_DBG("hcon %p reason %d", hcon, reason);
 
-	if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
-		return -EINVAL;
-
 	l2cap_conn_del(hcon, bt_to_errno(reason));
-
 	return 0;
 }
 
@@ -4007,7 +4487,7 @@
 	}
 }
 
-static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
+int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
 {
 	struct l2cap_conn *conn = hcon->l2cap_data;
 	struct l2cap_chan *chan;
@@ -4019,12 +4499,12 @@
 
 	if (hcon->type == LE_LINK) {
 		smp_distribute_keys(conn, 0);
-		del_timer(&conn->security_timer);
+		__cancel_delayed_work(&conn->security_timer);
 	}
 
-	read_lock(&conn->chan_lock);
+	rcu_read_lock();
 
-	list_for_each_entry(chan, &conn->chan_l, list) {
+	list_for_each_entry_rcu(chan, &conn->chan_l, list) {
 		struct sock *sk = chan->sk;
 
 		bh_lock_sock(sk);
@@ -4102,12 +4582,12 @@
 		bh_unlock_sock(sk);
 	}
 
-	read_unlock(&conn->chan_lock);
+	rcu_read_unlock();
 
 	return 0;
 }
 
-static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
+int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
 {
 	struct l2cap_conn *conn = hcon->l2cap_data;
 
@@ -4168,11 +4648,11 @@
 				BT_ERR("Frame exceeding recv MTU (len %d, "
 							"MTU %d)", len,
 							chan->imtu);
-				bh_unlock_sock(sk);
+				release_sock(sk);
 				l2cap_conn_unreliable(conn, ECOMM);
 				goto drop;
 			}
-			bh_unlock_sock(sk);
+			release_sock(sk);
 		}
 
 		/* Allocate skb for the complete frame (with header) */
@@ -4254,17 +4734,6 @@
 
 static struct dentry *l2cap_debugfs;
 
-static struct hci_proto l2cap_hci_proto = {
-	.name		= "L2CAP",
-	.id		= HCI_PROTO_L2CAP,
-	.connect_ind	= l2cap_connect_ind,
-	.connect_cfm	= l2cap_connect_cfm,
-	.disconn_ind	= l2cap_disconn_ind,
-	.disconn_cfm	= l2cap_disconn_cfm,
-	.security_cfm	= l2cap_security_cfm,
-	.recv_acldata	= l2cap_recv_acldata
-};
-
 int __init l2cap_init(void)
 {
 	int err;
@@ -4273,13 +4742,6 @@
 	if (err < 0)
 		return err;
 
-	err = hci_register_proto(&l2cap_hci_proto);
-	if (err < 0) {
-		BT_ERR("L2CAP protocol registration failed");
-		bt_sock_unregister(BTPROTO_L2CAP);
-		goto error;
-	}
-
 	if (bt_debugfs) {
 		l2cap_debugfs = debugfs_create_file("l2cap", 0444,
 					bt_debugfs, NULL, &l2cap_debugfs_fops);
@@ -4288,19 +4750,11 @@
 	}
 
 	return 0;
-
-error:
-	l2cap_cleanup_sockets();
-	return err;
 }
 
 void l2cap_exit(void)
 {
 	debugfs_remove(l2cap_debugfs);
-
-	if (hci_unregister_proto(&l2cap_hci_proto) < 0)
-		BT_ERR("L2CAP protocol unregistration failed");
-
 	l2cap_cleanup_sockets();
 }
 
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 5c406d3..9ca5616 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -3,6 +3,7 @@
    Copyright (C) 2000-2001 Qualcomm Incorporated
    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
    Copyright (C) 2010 Google Inc.
+   Copyright (C) 2011 ProFUSION Embedded Systems
 
    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
 
@@ -122,70 +123,15 @@
 	if (la.l2_cid && la.l2_psm)
 		return -EINVAL;
 
-	lock_sock(sk);
-
-	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED
-			&& !(la.l2_psm || la.l2_cid)) {
-		err = -EINVAL;
-		goto done;
-	}
-
-	switch (chan->mode) {
-	case L2CAP_MODE_BASIC:
-		break;
-	case L2CAP_MODE_ERTM:
-	case L2CAP_MODE_STREAMING:
-		if (!disable_ertm)
-			break;
-		/* fall through */
-	default:
-		err = -ENOTSUPP;
-		goto done;
-	}
-
-	switch (sk->sk_state) {
-	case BT_CONNECT:
-	case BT_CONNECT2:
-	case BT_CONFIG:
-		/* Already connecting */
-		goto wait;
-
-	case BT_CONNECTED:
-		/* Already connected */
-		err = -EISCONN;
-		goto done;
-
-	case BT_OPEN:
-	case BT_BOUND:
-		/* Can connect */
-		break;
-
-	default:
-		err = -EBADFD;
-		goto done;
-	}
-
-	/* PSM must be odd and lsb of upper byte must be 0 */
-	if ((__le16_to_cpu(la.l2_psm) & 0x0101) != 0x0001 && !la.l2_cid &&
-					chan->chan_type != L2CAP_CHAN_RAW) {
-		err = -EINVAL;
-		goto done;
-	}
-
-	/* Set destination address and psm */
-	bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
-	chan->psm = la.l2_psm;
-	chan->dcid = la.l2_cid;
-
-	err = l2cap_chan_connect(l2cap_pi(sk)->chan);
+	err = l2cap_chan_connect(chan, la.l2_psm, la.l2_cid, &la.l2_bdaddr);
 	if (err)
 		goto done;
 
-wait:
 	err = bt_sock_wait_state(sk, BT_CONNECTED,
 			sock_sndtimeo(sk, flags & O_NONBLOCK));
 done:
-	release_sock(sk);
+	if (sock_owned_by_user(sk))
+		release_sock(sk);
 	return err;
 }
 
@@ -334,7 +280,7 @@
 		opts.mode     = chan->mode;
 		opts.fcs      = chan->fcs;
 		opts.max_tx   = chan->max_tx;
-		opts.txwin_size = (__u16)chan->tx_win;
+		opts.txwin_size = chan->tx_win;
 
 		len = min_t(unsigned int, len, sizeof(opts));
 		if (copy_to_user(optval, (char *) &opts, len))
@@ -359,10 +305,10 @@
 			break;
 		}
 
-		if (chan->role_switch)
+		if (test_bit(FLAG_ROLE_SWITCH, &chan->flags))
 			opt |= L2CAP_LM_MASTER;
 
-		if (chan->force_reliable)
+		if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
 			opt |= L2CAP_LM_RELIABLE;
 
 		if (put_user(opt, (u32 __user *) optval))
@@ -449,7 +395,8 @@
 		break;
 
 	case BT_FLUSHABLE:
-		if (put_user(chan->flushable, (u32 __user *) optval))
+		if (put_user(test_bit(FLAG_FLUSHABLE, &chan->flags),
+						(u32 __user *) optval))
 			err = -EFAULT;
 
 		break;
@@ -461,7 +408,7 @@
 			break;
 		}
 
-		pwr.force_active = chan->force_active;
+		pwr.force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
 
 		len = min_t(unsigned int, len, sizeof(pwr));
 		if (copy_to_user(optval, (char *) &pwr, len))
@@ -469,6 +416,16 @@
 
 		break;
 
+	case BT_CHANNEL_POLICY:
+		if (!enable_hs) {
+			err = -ENOPROTOOPT;
+			break;
+		}
+
+		if (put_user(chan->chan_policy, (u32 __user *) optval))
+			err = -EFAULT;
+		break;
+
 	default:
 		err = -ENOPROTOOPT;
 		break;
@@ -503,7 +460,7 @@
 		opts.mode     = chan->mode;
 		opts.fcs      = chan->fcs;
 		opts.max_tx   = chan->max_tx;
-		opts.txwin_size = (__u16)chan->tx_win;
+		opts.txwin_size = chan->tx_win;
 
 		len = min_t(unsigned int, sizeof(opts), optlen);
 		if (copy_from_user((char *) &opts, optval, len)) {
@@ -511,7 +468,7 @@
 			break;
 		}
 
-		if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) {
+		if (opts.txwin_size > L2CAP_DEFAULT_EXT_WINDOW) {
 			err = -EINVAL;
 			break;
 		}
@@ -535,7 +492,7 @@
 		chan->omtu = opts.omtu;
 		chan->fcs  = opts.fcs;
 		chan->max_tx = opts.max_tx;
-		chan->tx_win = (__u8)opts.txwin_size;
+		chan->tx_win = opts.txwin_size;
 		break;
 
 	case L2CAP_LM:
@@ -551,8 +508,15 @@
 		if (opt & L2CAP_LM_SECURE)
 			chan->sec_level = BT_SECURITY_HIGH;
 
-		chan->role_switch    = (opt & L2CAP_LM_MASTER);
-		chan->force_reliable = (opt & L2CAP_LM_RELIABLE);
+		if (opt & L2CAP_LM_MASTER)
+			set_bit(FLAG_ROLE_SWITCH, &chan->flags);
+		else
+			clear_bit(FLAG_ROLE_SWITCH, &chan->flags);
+
+		if (opt & L2CAP_LM_RELIABLE)
+			set_bit(FLAG_FORCE_RELIABLE, &chan->flags);
+		else
+			clear_bit(FLAG_FORCE_RELIABLE, &chan->flags);
 		break;
 
 	default:
@@ -608,8 +572,13 @@
 
 		chan->sec_level = sec.level;
 
+		if (!chan->conn)
+			break;
+
 		conn = chan->conn;
-		if (conn && chan->scid == L2CAP_CID_LE_DATA) {
+
+		/*change security for LE channels */
+		if (chan->scid == L2CAP_CID_LE_DATA) {
 			if (!conn->hcon->out) {
 				err = -EINVAL;
 				break;
@@ -617,9 +586,14 @@
 
 			if (smp_conn_security(conn, sec.level))
 				break;
-
-			err = 0;
 			sk->sk_state = BT_CONFIG;
+
+		/* or for ACL link, under defer_setup time */
+		} else if (sk->sk_state == BT_CONNECT2 &&
+					bt_sk(sk)->defer_setup) {
+			err = l2cap_chan_check_security(chan);
+		} else {
+			err = -EINVAL;
 		}
 		break;
 
@@ -658,7 +632,10 @@
 			}
 		}
 
-		chan->flushable = opt;
+		if (opt)
+			set_bit(FLAG_FLUSHABLE, &chan->flags);
+		else
+			clear_bit(FLAG_FLUSHABLE, &chan->flags);
 		break;
 
 	case BT_POWER:
@@ -675,7 +652,36 @@
 			err = -EFAULT;
 			break;
 		}
-		chan->force_active = pwr.force_active;
+
+		if (pwr.force_active)
+			set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
+		else
+			clear_bit(FLAG_FORCE_ACTIVE, &chan->flags);
+		break;
+
+	case BT_CHANNEL_POLICY:
+		if (!enable_hs) {
+			err = -ENOPROTOOPT;
+			break;
+		}
+
+		if (get_user(opt, (u32 __user *) optval)) {
+			err = -EFAULT;
+			break;
+		}
+
+		if (opt > BT_CHANNEL_POLICY_AMP_PREFERRED) {
+			err = -EINVAL;
+			break;
+		}
+
+		if (chan->mode != L2CAP_MODE_ERTM &&
+				chan->mode != L2CAP_MODE_STREAMING) {
+			err = -EOPNOTSUPP;
+			break;
+		}
+
+		chan->chan_policy = (u8) opt;
 		break;
 
 	default:
@@ -709,7 +715,7 @@
 		return -ENOTCONN;
 	}
 
-	err = l2cap_chan_send(chan, msg, len);
+	err = l2cap_chan_send(chan, msg, len, sk->sk_priority);
 
 	release_sock(sk);
 	return err;
@@ -931,11 +937,9 @@
 		chan->fcs  = pchan->fcs;
 		chan->max_tx = pchan->max_tx;
 		chan->tx_win = pchan->tx_win;
+		chan->tx_win_max = pchan->tx_win_max;
 		chan->sec_level = pchan->sec_level;
-		chan->role_switch = pchan->role_switch;
-		chan->force_reliable = pchan->force_reliable;
-		chan->flushable = pchan->flushable;
-		chan->force_active = pchan->force_active;
+		chan->flags = pchan->flags;
 
 		security_sk_clone(parent, sk);
 	} else {
@@ -964,12 +968,10 @@
 		chan->max_tx = L2CAP_DEFAULT_MAX_TX;
 		chan->fcs  = L2CAP_FCS_CRC16;
 		chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
+		chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
 		chan->sec_level = BT_SECURITY_LOW;
-		chan->role_switch = 0;
-		chan->force_reliable = 0;
-		chan->flushable = BT_FLUSHABLE_OFF;
-		chan->force_active = BT_POWER_FORCE_ACTIVE_ON;
-
+		chan->flags = 0;
+		set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
 	}
 
 	/* Default config options */
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 2c76342..2540944 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -22,6 +22,7 @@
 
 /* Bluetooth HCI Management interface */
 
+#include <linux/kernel.h>
 #include <linux/uaccess.h>
 #include <linux/module.h>
 #include <asm/unaligned.h>
@@ -29,26 +30,103 @@
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
 #include <net/bluetooth/mgmt.h>
+#include <net/bluetooth/smp.h>
 
 #define MGMT_VERSION	0
 #define MGMT_REVISION	1
 
+#define INQUIRY_LEN_BREDR 0x08 /* TGAP(100) */
+
+#define SERVICE_CACHE_TIMEOUT (5 * 1000)
+
 struct pending_cmd {
 	struct list_head list;
-	__u16 opcode;
+	u16 opcode;
 	int index;
 	void *param;
 	struct sock *sk;
 	void *user_data;
 };
 
-static LIST_HEAD(cmd_list);
+/* HCI to MGMT error code conversion table */
+static u8 mgmt_status_table[] = {
+	MGMT_STATUS_SUCCESS,
+	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
+	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
+	MGMT_STATUS_FAILED,		/* Hardware Failure */
+	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
+	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
+	MGMT_STATUS_NOT_PAIRED,		/* PIN or Key Missing */
+	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
+	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
+	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
+	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
+	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
+	MGMT_STATUS_BUSY,		/* Command Disallowed */
+	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
+	MGMT_STATUS_REJECTED,		/* Rejected Security */
+	MGMT_STATUS_REJECTED,		/* Rejected Personal */
+	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
+	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
+	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
+	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
+	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
+	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
+	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
+	MGMT_STATUS_BUSY,		/* Repeated Attempts */
+	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
+	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
+	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
+	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
+	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
+	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
+	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
+	MGMT_STATUS_FAILED,		/* Unspecified Error */
+	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
+	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
+	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
+	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
+	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
+	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
+	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
+	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
+	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
+	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
+	MGMT_STATUS_FAILED,		/* Transaction Collision */
+	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
+	MGMT_STATUS_REJECTED,		/* QoS Rejected */
+	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
+	MGMT_STATUS_REJECTED,		/* Insufficient Security */
+	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
+	MGMT_STATUS_BUSY,		/* Role Switch Pending */
+	MGMT_STATUS_FAILED,		/* Slot Violation */
+	MGMT_STATUS_FAILED,		/* Role Switch Failed */
+	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
+	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
+	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
+	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
+	MGMT_STATUS_BUSY,		/* Controller Busy */
+	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
+	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
+	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
+	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
+	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
+};
+
+static u8 mgmt_status(u8 hci_status)
+{
+	if (hci_status < ARRAY_SIZE(mgmt_status_table))
+		return mgmt_status_table[hci_status];
+
+	return MGMT_STATUS_FAILED;
+}
 
 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
 {
 	struct sk_buff *skb;
 	struct mgmt_hdr *hdr;
 	struct mgmt_ev_cmd_status *ev;
+	int err;
 
 	BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
 
@@ -66,10 +144,11 @@
 	ev->status = status;
 	put_unaligned_le16(cmd, &ev->opcode);
 
-	if (sock_queue_rcv_skb(sk, skb) < 0)
+	err = sock_queue_rcv_skb(sk, skb);
+	if (err < 0)
 		kfree_skb(skb);
 
-	return 0;
+	return err;
 }
 
 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, void *rp,
@@ -78,6 +157,7 @@
 	struct sk_buff *skb;
 	struct mgmt_hdr *hdr;
 	struct mgmt_ev_cmd_complete *ev;
+	int err;
 
 	BT_DBG("sock %p", sk);
 
@@ -97,10 +177,11 @@
 	if (rp)
 		memcpy(ev->data, rp, rp_len);
 
-	if (sock_queue_rcv_skb(sk, skb) < 0)
+	err = sock_queue_rcv_skb(sk, skb);
+	if (err < 0)
 		kfree_skb(skb);
 
-	return 0;
+	return err;;
 }
 
 static int read_version(struct sock *sk)
@@ -120,6 +201,7 @@
 {
 	struct mgmt_rp_read_index_list *rp;
 	struct list_head *p;
+	struct hci_dev *d;
 	size_t rp_len;
 	u16 count;
 	int i, err;
@@ -143,10 +225,9 @@
 	put_unaligned_le16(count, &rp->num_controllers);
 
 	i = 0;
-	list_for_each(p, &hci_dev_list) {
-		struct hci_dev *d = list_entry(p, struct hci_dev, list);
-
-		hci_del_off_timer(d);
+	list_for_each_entry(d, &hci_dev_list, list) {
+		if (test_and_clear_bit(HCI_AUTO_OFF, &d->flags))
+			cancel_delayed_work(&d->power_off);
 
 		if (test_bit(HCI_SETUP, &d->flags))
 			continue;
@@ -165,382 +246,61 @@
 	return err;
 }
 
-static int read_controller_info(struct sock *sk, u16 index)
+static u32 get_supported_settings(struct hci_dev *hdev)
 {
-	struct mgmt_rp_read_info rp;
-	struct hci_dev *hdev;
+	u32 settings = 0;
 
-	BT_DBG("sock %p hci%u", sk, index);
+	settings |= MGMT_SETTING_POWERED;
+	settings |= MGMT_SETTING_CONNECTABLE;
+	settings |= MGMT_SETTING_FAST_CONNECTABLE;
+	settings |= MGMT_SETTING_DISCOVERABLE;
+	settings |= MGMT_SETTING_PAIRABLE;
 
-	hdev = hci_dev_get(index);
-	if (!hdev)
-		return cmd_status(sk, index, MGMT_OP_READ_INFO, ENODEV);
+	if (hdev->features[6] & LMP_SIMPLE_PAIR)
+		settings |= MGMT_SETTING_SSP;
 
-	hci_del_off_timer(hdev);
+	if (!(hdev->features[4] & LMP_NO_BREDR)) {
+		settings |= MGMT_SETTING_BREDR;
+		settings |= MGMT_SETTING_LINK_SECURITY;
+	}
 
-	hci_dev_lock_bh(hdev);
+	if (hdev->features[4] & LMP_LE)
+		settings |= MGMT_SETTING_LE;
 
-	set_bit(HCI_MGMT, &hdev->flags);
+	return settings;
+}
 
-	memset(&rp, 0, sizeof(rp));
+static u32 get_current_settings(struct hci_dev *hdev)
+{
+	u32 settings = 0;
 
-	rp.type = hdev->dev_type;
+	if (test_bit(HCI_UP, &hdev->flags))
+		settings |= MGMT_SETTING_POWERED;
+	else
+		return settings;
 
-	rp.powered = test_bit(HCI_UP, &hdev->flags);
-	rp.connectable = test_bit(HCI_PSCAN, &hdev->flags);
-	rp.discoverable = test_bit(HCI_ISCAN, &hdev->flags);
-	rp.pairable = test_bit(HCI_PSCAN, &hdev->flags);
+	if (test_bit(HCI_PSCAN, &hdev->flags))
+		settings |= MGMT_SETTING_CONNECTABLE;
+
+	if (test_bit(HCI_ISCAN, &hdev->flags))
+		settings |= MGMT_SETTING_DISCOVERABLE;
+
+	if (test_bit(HCI_PAIRABLE, &hdev->flags))
+		settings |= MGMT_SETTING_PAIRABLE;
+
+	if (!(hdev->features[4] & LMP_NO_BREDR))
+		settings |= MGMT_SETTING_BREDR;
+
+	if (hdev->extfeatures[0] & LMP_HOST_LE)
+		settings |= MGMT_SETTING_LE;
 
 	if (test_bit(HCI_AUTH, &hdev->flags))
-		rp.sec_mode = 3;
-	else if (hdev->ssp_mode > 0)
-		rp.sec_mode = 4;
-	else
-		rp.sec_mode = 2;
+		settings |= MGMT_SETTING_LINK_SECURITY;
 
-	bacpy(&rp.bdaddr, &hdev->bdaddr);
-	memcpy(rp.features, hdev->features, 8);
-	memcpy(rp.dev_class, hdev->dev_class, 3);
-	put_unaligned_le16(hdev->manufacturer, &rp.manufacturer);
-	rp.hci_ver = hdev->hci_ver;
-	put_unaligned_le16(hdev->hci_rev, &rp.hci_rev);
+	if (hdev->ssp_mode > 0)
+		settings |= MGMT_SETTING_SSP;
 
-	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
-
-	hci_dev_unlock_bh(hdev);
-	hci_dev_put(hdev);
-
-	return cmd_complete(sk, index, MGMT_OP_READ_INFO, &rp, sizeof(rp));
-}
-
-static void mgmt_pending_free(struct pending_cmd *cmd)
-{
-	sock_put(cmd->sk);
-	kfree(cmd->param);
-	kfree(cmd);
-}
-
-static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
-						u16 index, void *data, u16 len)
-{
-	struct pending_cmd *cmd;
-
-	cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC);
-	if (!cmd)
-		return NULL;
-
-	cmd->opcode = opcode;
-	cmd->index = index;
-
-	cmd->param = kmalloc(len, GFP_ATOMIC);
-	if (!cmd->param) {
-		kfree(cmd);
-		return NULL;
-	}
-
-	if (data)
-		memcpy(cmd->param, data, len);
-
-	cmd->sk = sk;
-	sock_hold(sk);
-
-	list_add(&cmd->list, &cmd_list);
-
-	return cmd;
-}
-
-static void mgmt_pending_foreach(u16 opcode, int index,
-				void (*cb)(struct pending_cmd *cmd, void *data),
-				void *data)
-{
-	struct list_head *p, *n;
-
-	list_for_each_safe(p, n, &cmd_list) {
-		struct pending_cmd *cmd;
-
-		cmd = list_entry(p, struct pending_cmd, list);
-
-		if (cmd->opcode != opcode)
-			continue;
-
-		if (index >= 0 && cmd->index != index)
-			continue;
-
-		cb(cmd, data);
-	}
-}
-
-static struct pending_cmd *mgmt_pending_find(u16 opcode, int index)
-{
-	struct list_head *p;
-
-	list_for_each(p, &cmd_list) {
-		struct pending_cmd *cmd;
-
-		cmd = list_entry(p, struct pending_cmd, list);
-
-		if (cmd->opcode != opcode)
-			continue;
-
-		if (index >= 0 && cmd->index != index)
-			continue;
-
-		return cmd;
-	}
-
-	return NULL;
-}
-
-static void mgmt_pending_remove(struct pending_cmd *cmd)
-{
-	list_del(&cmd->list);
-	mgmt_pending_free(cmd);
-}
-
-static int set_powered(struct sock *sk, u16 index, unsigned char *data, u16 len)
-{
-	struct mgmt_mode *cp;
-	struct hci_dev *hdev;
-	struct pending_cmd *cmd;
-	int err, up;
-
-	cp = (void *) data;
-
-	BT_DBG("request for hci%u", index);
-
-	if (len != sizeof(*cp))
-		return cmd_status(sk, index, MGMT_OP_SET_POWERED, EINVAL);
-
-	hdev = hci_dev_get(index);
-	if (!hdev)
-		return cmd_status(sk, index, MGMT_OP_SET_POWERED, ENODEV);
-
-	hci_dev_lock_bh(hdev);
-
-	up = test_bit(HCI_UP, &hdev->flags);
-	if ((cp->val && up) || (!cp->val && !up)) {
-		err = cmd_status(sk, index, MGMT_OP_SET_POWERED, EALREADY);
-		goto failed;
-	}
-
-	if (mgmt_pending_find(MGMT_OP_SET_POWERED, index)) {
-		err = cmd_status(sk, index, MGMT_OP_SET_POWERED, EBUSY);
-		goto failed;
-	}
-
-	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, index, data, len);
-	if (!cmd) {
-		err = -ENOMEM;
-		goto failed;
-	}
-
-	if (cp->val)
-		queue_work(hdev->workqueue, &hdev->power_on);
-	else
-		queue_work(hdev->workqueue, &hdev->power_off);
-
-	err = 0;
-
-failed:
-	hci_dev_unlock_bh(hdev);
-	hci_dev_put(hdev);
-	return err;
-}
-
-static int set_discoverable(struct sock *sk, u16 index, unsigned char *data,
-									u16 len)
-{
-	struct mgmt_mode *cp;
-	struct hci_dev *hdev;
-	struct pending_cmd *cmd;
-	u8 scan;
-	int err;
-
-	cp = (void *) data;
-
-	BT_DBG("request for hci%u", index);
-
-	if (len != sizeof(*cp))
-		return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, EINVAL);
-
-	hdev = hci_dev_get(index);
-	if (!hdev)
-		return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, ENODEV);
-
-	hci_dev_lock_bh(hdev);
-
-	if (!test_bit(HCI_UP, &hdev->flags)) {
-		err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, ENETDOWN);
-		goto failed;
-	}
-
-	if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, index) ||
-			mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, index)) {
-		err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, EBUSY);
-		goto failed;
-	}
-
-	if (cp->val == test_bit(HCI_ISCAN, &hdev->flags) &&
-					test_bit(HCI_PSCAN, &hdev->flags)) {
-		err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, EALREADY);
-		goto failed;
-	}
-
-	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, index, data, len);
-	if (!cmd) {
-		err = -ENOMEM;
-		goto failed;
-	}
-
-	scan = SCAN_PAGE;
-
-	if (cp->val)
-		scan |= SCAN_INQUIRY;
-
-	err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
-	if (err < 0)
-		mgmt_pending_remove(cmd);
-
-failed:
-	hci_dev_unlock_bh(hdev);
-	hci_dev_put(hdev);
-
-	return err;
-}
-
-static int set_connectable(struct sock *sk, u16 index, unsigned char *data,
-									u16 len)
-{
-	struct mgmt_mode *cp;
-	struct hci_dev *hdev;
-	struct pending_cmd *cmd;
-	u8 scan;
-	int err;
-
-	cp = (void *) data;
-
-	BT_DBG("request for hci%u", index);
-
-	if (len != sizeof(*cp))
-		return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, EINVAL);
-
-	hdev = hci_dev_get(index);
-	if (!hdev)
-		return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, ENODEV);
-
-	hci_dev_lock_bh(hdev);
-
-	if (!test_bit(HCI_UP, &hdev->flags)) {
-		err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, ENETDOWN);
-		goto failed;
-	}
-
-	if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, index) ||
-			mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, index)) {
-		err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, EBUSY);
-		goto failed;
-	}
-
-	if (cp->val == test_bit(HCI_PSCAN, &hdev->flags)) {
-		err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, EALREADY);
-		goto failed;
-	}
-
-	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, index, data, len);
-	if (!cmd) {
-		err = -ENOMEM;
-		goto failed;
-	}
-
-	if (cp->val)
-		scan = SCAN_PAGE;
-	else
-		scan = 0;
-
-	err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
-	if (err < 0)
-		mgmt_pending_remove(cmd);
-
-failed:
-	hci_dev_unlock_bh(hdev);
-	hci_dev_put(hdev);
-
-	return err;
-}
-
-static int mgmt_event(u16 event, u16 index, void *data, u16 data_len,
-							struct sock *skip_sk)
-{
-	struct sk_buff *skb;
-	struct mgmt_hdr *hdr;
-
-	skb = alloc_skb(sizeof(*hdr) + data_len, GFP_ATOMIC);
-	if (!skb)
-		return -ENOMEM;
-
-	bt_cb(skb)->channel = HCI_CHANNEL_CONTROL;
-
-	hdr = (void *) skb_put(skb, sizeof(*hdr));
-	hdr->opcode = cpu_to_le16(event);
-	hdr->index = cpu_to_le16(index);
-	hdr->len = cpu_to_le16(data_len);
-
-	if (data)
-		memcpy(skb_put(skb, data_len), data, data_len);
-
-	hci_send_to_sock(NULL, skb, skip_sk);
-	kfree_skb(skb);
-
-	return 0;
-}
-
-static int send_mode_rsp(struct sock *sk, u16 opcode, u16 index, u8 val)
-{
-	struct mgmt_mode rp;
-
-	rp.val = val;
-
-	return cmd_complete(sk, index, opcode, &rp, sizeof(rp));
-}
-
-static int set_pairable(struct sock *sk, u16 index, unsigned char *data,
-									u16 len)
-{
-	struct mgmt_mode *cp, ev;
-	struct hci_dev *hdev;
-	int err;
-
-	cp = (void *) data;
-
-	BT_DBG("request for hci%u", index);
-
-	if (len != sizeof(*cp))
-		return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE, EINVAL);
-
-	hdev = hci_dev_get(index);
-	if (!hdev)
-		return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE, ENODEV);
-
-	hci_dev_lock_bh(hdev);
-
-	if (cp->val)
-		set_bit(HCI_PAIRABLE, &hdev->flags);
-	else
-		clear_bit(HCI_PAIRABLE, &hdev->flags);
-
-	err = send_mode_rsp(sk, MGMT_OP_SET_PAIRABLE, index, cp->val);
-	if (err < 0)
-		goto failed;
-
-	ev.val = cp->val;
-
-	err = mgmt_event(MGMT_EV_PAIRABLE, index, &ev, sizeof(ev), sk);
-
-failed:
-	hci_dev_unlock_bh(hdev);
-	hci_dev_put(hdev);
-
-	return err;
+	return settings;
 }
 
 #define EIR_FLAGS		0x01 /* flags */
@@ -587,7 +347,7 @@
 	u16 eir_len = 0;
 	u16 uuid16_list[HCI_MAX_EIR_LENGTH / sizeof(u16)];
 	int i, truncated = 0;
-	struct list_head *p;
+	struct bt_uuid *uuid;
 	size_t name_len;
 
 	name_len = strlen(hdev->dev_name);
@@ -612,8 +372,7 @@
 	memset(uuid16_list, 0, sizeof(uuid16_list));
 
 	/* Group all UUID16 types */
-	list_for_each(p, &hdev->uuids) {
-		struct bt_uuid *uuid = list_entry(p, struct bt_uuid, list);
+	list_for_each_entry(uuid, &hdev->uuids, list) {
 		u16 uuid16;
 
 		uuid16 = get_uuid16(uuid->uuid);
@@ -689,14 +448,11 @@
 
 static u8 get_service_classes(struct hci_dev *hdev)
 {
-	struct list_head *p;
+	struct bt_uuid *uuid;
 	u8 val = 0;
 
-	list_for_each(p, &hdev->uuids) {
-		struct bt_uuid *uuid = list_entry(p, struct bt_uuid, list);
-
+	list_for_each_entry(uuid, &hdev->uuids, list)
 		val |= uuid->svc_hint;
-	}
 
 	return val;
 }
@@ -720,6 +476,412 @@
 	return hci_send_cmd(hdev, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
 }
 
+static void service_cache_off(struct work_struct *work)
+{
+	struct hci_dev *hdev = container_of(work, struct hci_dev,
+							service_cache.work);
+
+	if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->flags))
+		return;
+
+	hci_dev_lock(hdev);
+
+	update_eir(hdev);
+	update_class(hdev);
+
+	hci_dev_unlock(hdev);
+}
+
+static void mgmt_init_hdev(struct hci_dev *hdev)
+{
+	if (!test_and_set_bit(HCI_MGMT, &hdev->flags))
+		INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
+
+	if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->flags))
+		schedule_delayed_work(&hdev->service_cache,
+				msecs_to_jiffies(SERVICE_CACHE_TIMEOUT));
+}
+
+static int read_controller_info(struct sock *sk, u16 index)
+{
+	struct mgmt_rp_read_info rp;
+	struct hci_dev *hdev;
+
+	BT_DBG("sock %p hci%u", sk, index);
+
+	hdev = hci_dev_get(index);
+	if (!hdev)
+		return cmd_status(sk, index, MGMT_OP_READ_INFO,
+						MGMT_STATUS_INVALID_PARAMS);
+
+	if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
+		cancel_delayed_work_sync(&hdev->power_off);
+
+	hci_dev_lock(hdev);
+
+	if (test_and_clear_bit(HCI_PI_MGMT_INIT, &hci_pi(sk)->flags))
+		mgmt_init_hdev(hdev);
+
+	memset(&rp, 0, sizeof(rp));
+
+	bacpy(&rp.bdaddr, &hdev->bdaddr);
+
+	rp.version = hdev->hci_ver;
+
+	put_unaligned_le16(hdev->manufacturer, &rp.manufacturer);
+
+	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
+	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
+
+	memcpy(rp.dev_class, hdev->dev_class, 3);
+
+	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
+
+	hci_dev_unlock(hdev);
+	hci_dev_put(hdev);
+
+	return cmd_complete(sk, index, MGMT_OP_READ_INFO, &rp, sizeof(rp));
+}
+
+static void mgmt_pending_free(struct pending_cmd *cmd)
+{
+	sock_put(cmd->sk);
+	kfree(cmd->param);
+	kfree(cmd);
+}
+
+static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
+							struct hci_dev *hdev,
+							void *data, u16 len)
+{
+	struct pending_cmd *cmd;
+
+	cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC);
+	if (!cmd)
+		return NULL;
+
+	cmd->opcode = opcode;
+	cmd->index = hdev->id;
+
+	cmd->param = kmalloc(len, GFP_ATOMIC);
+	if (!cmd->param) {
+		kfree(cmd);
+		return NULL;
+	}
+
+	if (data)
+		memcpy(cmd->param, data, len);
+
+	cmd->sk = sk;
+	sock_hold(sk);
+
+	list_add(&cmd->list, &hdev->mgmt_pending);
+
+	return cmd;
+}
+
+static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
+				void (*cb)(struct pending_cmd *cmd, void *data),
+				void *data)
+{
+	struct list_head *p, *n;
+
+	list_for_each_safe(p, n, &hdev->mgmt_pending) {
+		struct pending_cmd *cmd;
+
+		cmd = list_entry(p, struct pending_cmd, list);
+
+		if (opcode > 0 && cmd->opcode != opcode)
+			continue;
+
+		cb(cmd, data);
+	}
+}
+
+static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
+{
+	struct pending_cmd *cmd;
+
+	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
+		if (cmd->opcode == opcode)
+			return cmd;
+	}
+
+	return NULL;
+}
+
+static void mgmt_pending_remove(struct pending_cmd *cmd)
+{
+	list_del(&cmd->list);
+	mgmt_pending_free(cmd);
+}
+
+static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
+{
+	__le32 settings = cpu_to_le32(get_current_settings(hdev));
+
+	return cmd_complete(sk, hdev->id, opcode, &settings, sizeof(settings));
+}
+
+static int set_powered(struct sock *sk, u16 index, unsigned char *data, u16 len)
+{
+	struct mgmt_mode *cp;
+	struct hci_dev *hdev;
+	struct pending_cmd *cmd;
+	int err, up;
+
+	cp = (void *) data;
+
+	BT_DBG("request for hci%u", index);
+
+	if (len != sizeof(*cp))
+		return cmd_status(sk, index, MGMT_OP_SET_POWERED,
+						MGMT_STATUS_INVALID_PARAMS);
+
+	hdev = hci_dev_get(index);
+	if (!hdev)
+		return cmd_status(sk, index, MGMT_OP_SET_POWERED,
+						MGMT_STATUS_INVALID_PARAMS);
+
+	hci_dev_lock(hdev);
+
+	up = test_bit(HCI_UP, &hdev->flags);
+	if ((cp->val && up) || (!cp->val && !up)) {
+		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
+		goto failed;
+	}
+
+	if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
+		err = cmd_status(sk, index, MGMT_OP_SET_POWERED,
+							MGMT_STATUS_BUSY);
+		goto failed;
+	}
+
+	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
+	if (!cmd) {
+		err = -ENOMEM;
+		goto failed;
+	}
+
+	if (cp->val)
+		schedule_work(&hdev->power_on);
+	else
+		schedule_work(&hdev->power_off.work);
+
+	err = 0;
+
+failed:
+	hci_dev_unlock(hdev);
+	hci_dev_put(hdev);
+	return err;
+}
+
+static int set_discoverable(struct sock *sk, u16 index, unsigned char *data,
+									u16 len)
+{
+	struct mgmt_cp_set_discoverable *cp;
+	struct hci_dev *hdev;
+	struct pending_cmd *cmd;
+	u8 scan;
+	int err;
+
+	cp = (void *) data;
+
+	BT_DBG("request for hci%u", index);
+
+	if (len != sizeof(*cp))
+		return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE,
+						MGMT_STATUS_INVALID_PARAMS);
+
+	hdev = hci_dev_get(index);
+	if (!hdev)
+		return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE,
+						MGMT_STATUS_INVALID_PARAMS);
+
+	hci_dev_lock(hdev);
+
+	if (!test_bit(HCI_UP, &hdev->flags)) {
+		err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE,
+						MGMT_STATUS_NOT_POWERED);
+		goto failed;
+	}
+
+	if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
+			mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
+		err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE,
+							MGMT_STATUS_BUSY);
+		goto failed;
+	}
+
+	if (cp->val == test_bit(HCI_ISCAN, &hdev->flags) &&
+					test_bit(HCI_PSCAN, &hdev->flags)) {
+		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
+		goto failed;
+	}
+
+	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
+	if (!cmd) {
+		err = -ENOMEM;
+		goto failed;
+	}
+
+	scan = SCAN_PAGE;
+
+	if (cp->val)
+		scan |= SCAN_INQUIRY;
+	else
+		cancel_delayed_work(&hdev->discov_off);
+
+	err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
+	if (err < 0)
+		mgmt_pending_remove(cmd);
+
+	if (cp->val)
+		hdev->discov_timeout = get_unaligned_le16(&cp->timeout);
+
+failed:
+	hci_dev_unlock(hdev);
+	hci_dev_put(hdev);
+
+	return err;
+}
+
+static int set_connectable(struct sock *sk, u16 index, unsigned char *data,
+									u16 len)
+{
+	struct mgmt_mode *cp;
+	struct hci_dev *hdev;
+	struct pending_cmd *cmd;
+	u8 scan;
+	int err;
+
+	cp = (void *) data;
+
+	BT_DBG("request for hci%u", index);
+
+	if (len != sizeof(*cp))
+		return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE,
+						MGMT_STATUS_INVALID_PARAMS);
+
+	hdev = hci_dev_get(index);
+	if (!hdev)
+		return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE,
+						MGMT_STATUS_INVALID_PARAMS);
+
+	hci_dev_lock(hdev);
+
+	if (!test_bit(HCI_UP, &hdev->flags)) {
+		err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE,
+						MGMT_STATUS_NOT_POWERED);
+		goto failed;
+	}
+
+	if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
+			mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
+		err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE,
+							MGMT_STATUS_BUSY);
+		goto failed;
+	}
+
+	if (cp->val == test_bit(HCI_PSCAN, &hdev->flags)) {
+		err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
+		goto failed;
+	}
+
+	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
+	if (!cmd) {
+		err = -ENOMEM;
+		goto failed;
+	}
+
+	if (cp->val)
+		scan = SCAN_PAGE;
+	else
+		scan = 0;
+
+	err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
+	if (err < 0)
+		mgmt_pending_remove(cmd);
+
+failed:
+	hci_dev_unlock(hdev);
+	hci_dev_put(hdev);
+
+	return err;
+}
+
+static int mgmt_event(u16 event, struct hci_dev *hdev, void *data,
+					u16 data_len, struct sock *skip_sk)
+{
+	struct sk_buff *skb;
+	struct mgmt_hdr *hdr;
+
+	skb = alloc_skb(sizeof(*hdr) + data_len, GFP_ATOMIC);
+	if (!skb)
+		return -ENOMEM;
+
+	bt_cb(skb)->channel = HCI_CHANNEL_CONTROL;
+
+	hdr = (void *) skb_put(skb, sizeof(*hdr));
+	hdr->opcode = cpu_to_le16(event);
+	if (hdev)
+		hdr->index = cpu_to_le16(hdev->id);
+	else
+		hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
+	hdr->len = cpu_to_le16(data_len);
+
+	if (data)
+		memcpy(skb_put(skb, data_len), data, data_len);
+
+	hci_send_to_sock(NULL, skb, skip_sk);
+	kfree_skb(skb);
+
+	return 0;
+}
+
+static int set_pairable(struct sock *sk, u16 index, unsigned char *data,
+									u16 len)
+{
+	struct mgmt_mode *cp;
+	struct hci_dev *hdev;
+	__le32 ev;
+	int err;
+
+	cp = (void *) data;
+
+	BT_DBG("request for hci%u", index);
+
+	if (len != sizeof(*cp))
+		return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE,
+						MGMT_STATUS_INVALID_PARAMS);
+
+	hdev = hci_dev_get(index);
+	if (!hdev)
+		return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE,
+						MGMT_STATUS_INVALID_PARAMS);
+
+	hci_dev_lock(hdev);
+
+	if (cp->val)
+		set_bit(HCI_PAIRABLE, &hdev->flags);
+	else
+		clear_bit(HCI_PAIRABLE, &hdev->flags);
+
+	err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
+	if (err < 0)
+		goto failed;
+
+	ev = cpu_to_le32(get_current_settings(hdev));
+
+	err = mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), sk);
+
+failed:
+	hci_dev_unlock(hdev);
+	hci_dev_put(hdev);
+
+	return err;
+}
+
 static int add_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
 {
 	struct mgmt_cp_add_uuid *cp;
@@ -732,13 +894,15 @@
 	BT_DBG("request for hci%u", index);
 
 	if (len != sizeof(*cp))
-		return cmd_status(sk, index, MGMT_OP_ADD_UUID, EINVAL);
+		return cmd_status(sk, index, MGMT_OP_ADD_UUID,
+						MGMT_STATUS_INVALID_PARAMS);
 
 	hdev = hci_dev_get(index);
 	if (!hdev)
-		return cmd_status(sk, index, MGMT_OP_ADD_UUID, ENODEV);
+		return cmd_status(sk, index, MGMT_OP_ADD_UUID,
+						MGMT_STATUS_INVALID_PARAMS);
 
-	hci_dev_lock_bh(hdev);
+	hci_dev_lock(hdev);
 
 	uuid = kmalloc(sizeof(*uuid), GFP_ATOMIC);
 	if (!uuid) {
@@ -762,7 +926,7 @@
 	err = cmd_complete(sk, index, MGMT_OP_ADD_UUID, NULL, 0);
 
 failed:
-	hci_dev_unlock_bh(hdev);
+	hci_dev_unlock(hdev);
 	hci_dev_put(hdev);
 
 	return err;
@@ -781,13 +945,15 @@
 	BT_DBG("request for hci%u", index);
 
 	if (len != sizeof(*cp))
-		return cmd_status(sk, index, MGMT_OP_REMOVE_UUID, EINVAL);
+		return cmd_status(sk, index, MGMT_OP_REMOVE_UUID,
+						MGMT_STATUS_INVALID_PARAMS);
 
 	hdev = hci_dev_get(index);
 	if (!hdev)
-		return cmd_status(sk, index, MGMT_OP_REMOVE_UUID, ENODEV);
+		return cmd_status(sk, index, MGMT_OP_REMOVE_UUID,
+						MGMT_STATUS_INVALID_PARAMS);
 
-	hci_dev_lock_bh(hdev);
+	hci_dev_lock(hdev);
 
 	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
 		err = hci_uuids_clear(hdev);
@@ -807,7 +973,8 @@
 	}
 
 	if (found == 0) {
-		err = cmd_status(sk, index, MGMT_OP_REMOVE_UUID, ENOENT);
+		err = cmd_status(sk, index, MGMT_OP_REMOVE_UUID,
+						MGMT_STATUS_INVALID_PARAMS);
 		goto unlock;
 	}
 
@@ -822,7 +989,7 @@
 	err = cmd_complete(sk, index, MGMT_OP_REMOVE_UUID, NULL, 0);
 
 unlock:
-	hci_dev_unlock_bh(hdev);
+	hci_dev_unlock(hdev);
 	hci_dev_put(hdev);
 
 	return err;
@@ -840,97 +1007,71 @@
 	BT_DBG("request for hci%u", index);
 
 	if (len != sizeof(*cp))
-		return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS, EINVAL);
+		return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS,
+						MGMT_STATUS_INVALID_PARAMS);
 
 	hdev = hci_dev_get(index);
 	if (!hdev)
-		return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS, ENODEV);
+		return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS,
+						MGMT_STATUS_INVALID_PARAMS);
 
-	hci_dev_lock_bh(hdev);
+	hci_dev_lock(hdev);
 
 	hdev->major_class = cp->major;
 	hdev->minor_class = cp->minor;
 
+	if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->flags)) {
+		hci_dev_unlock(hdev);
+		cancel_delayed_work_sync(&hdev->service_cache);
+		hci_dev_lock(hdev);
+		update_eir(hdev);
+	}
+
 	err = update_class(hdev);
 
 	if (err == 0)
 		err = cmd_complete(sk, index, MGMT_OP_SET_DEV_CLASS, NULL, 0);
 
-	hci_dev_unlock_bh(hdev);
+	hci_dev_unlock(hdev);
 	hci_dev_put(hdev);
 
 	return err;
 }
 
-static int set_service_cache(struct sock *sk, u16 index,  unsigned char *data,
-									u16 len)
+static int load_link_keys(struct sock *sk, u16 index, unsigned char *data,
+								u16 len)
 {
 	struct hci_dev *hdev;
-	struct mgmt_cp_set_service_cache *cp;
-	int err;
-
-	cp = (void *) data;
-
-	if (len != sizeof(*cp))
-		return cmd_status(sk, index, MGMT_OP_SET_SERVICE_CACHE, EINVAL);
-
-	hdev = hci_dev_get(index);
-	if (!hdev)
-		return cmd_status(sk, index, MGMT_OP_SET_SERVICE_CACHE, ENODEV);
-
-	hci_dev_lock_bh(hdev);
-
-	BT_DBG("hci%u enable %d", index, cp->enable);
-
-	if (cp->enable) {
-		set_bit(HCI_SERVICE_CACHE, &hdev->flags);
-		err = 0;
-	} else {
-		clear_bit(HCI_SERVICE_CACHE, &hdev->flags);
-		err = update_class(hdev);
-		if (err == 0)
-			err = update_eir(hdev);
-	}
-
-	if (err == 0)
-		err = cmd_complete(sk, index, MGMT_OP_SET_SERVICE_CACHE, NULL,
-									0);
-
-	hci_dev_unlock_bh(hdev);
-	hci_dev_put(hdev);
-
-	return err;
-}
-
-static int load_keys(struct sock *sk, u16 index, unsigned char *data, u16 len)
-{
-	struct hci_dev *hdev;
-	struct mgmt_cp_load_keys *cp;
+	struct mgmt_cp_load_link_keys *cp;
 	u16 key_count, expected_len;
 	int i;
 
 	cp = (void *) data;
 
 	if (len < sizeof(*cp))
-		return -EINVAL;
+		return cmd_status(sk, index, MGMT_OP_LOAD_LINK_KEYS,
+						MGMT_STATUS_INVALID_PARAMS);
 
 	key_count = get_unaligned_le16(&cp->key_count);
 
-	expected_len = sizeof(*cp) + key_count * sizeof(struct mgmt_key_info);
+	expected_len = sizeof(*cp) + key_count *
+					sizeof(struct mgmt_link_key_info);
 	if (expected_len != len) {
-		BT_ERR("load_keys: expected %u bytes, got %u bytes",
+		BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
 							len, expected_len);
-		return -EINVAL;
+		return cmd_status(sk, index, MGMT_OP_LOAD_LINK_KEYS,
+						MGMT_STATUS_INVALID_PARAMS);
 	}
 
 	hdev = hci_dev_get(index);
 	if (!hdev)
-		return cmd_status(sk, index, MGMT_OP_LOAD_KEYS, ENODEV);
+		return cmd_status(sk, index, MGMT_OP_LOAD_LINK_KEYS,
+						MGMT_STATUS_INVALID_PARAMS);
 
 	BT_DBG("hci%u debug_keys %u key_count %u", index, cp->debug_keys,
 								key_count);
 
-	hci_dev_lock_bh(hdev);
+	hci_dev_lock(hdev);
 
 	hci_link_keys_clear(hdev);
 
@@ -942,58 +1083,84 @@
 		clear_bit(HCI_DEBUG_KEYS, &hdev->flags);
 
 	for (i = 0; i < key_count; i++) {
-		struct mgmt_key_info *key = &cp->keys[i];
+		struct mgmt_link_key_info *key = &cp->keys[i];
 
 		hci_add_link_key(hdev, NULL, 0, &key->bdaddr, key->val, key->type,
 								key->pin_len);
 	}
 
-	hci_dev_unlock_bh(hdev);
+	cmd_complete(sk, index, MGMT_OP_LOAD_LINK_KEYS, NULL, 0);
+
+	hci_dev_unlock(hdev);
 	hci_dev_put(hdev);
 
 	return 0;
 }
 
-static int remove_key(struct sock *sk, u16 index, unsigned char *data, u16 len)
+static int remove_keys(struct sock *sk, u16 index, unsigned char *data,
+								u16 len)
 {
 	struct hci_dev *hdev;
-	struct mgmt_cp_remove_key *cp;
+	struct mgmt_cp_remove_keys *cp;
+	struct mgmt_rp_remove_keys rp;
+	struct hci_cp_disconnect dc;
+	struct pending_cmd *cmd;
 	struct hci_conn *conn;
 	int err;
 
 	cp = (void *) data;
 
 	if (len != sizeof(*cp))
-		return cmd_status(sk, index, MGMT_OP_REMOVE_KEY, EINVAL);
+		return cmd_status(sk, index, MGMT_OP_REMOVE_KEYS,
+						MGMT_STATUS_INVALID_PARAMS);
 
 	hdev = hci_dev_get(index);
 	if (!hdev)
-		return cmd_status(sk, index, MGMT_OP_REMOVE_KEY, ENODEV);
+		return cmd_status(sk, index, MGMT_OP_REMOVE_KEYS,
+						MGMT_STATUS_INVALID_PARAMS);
 
-	hci_dev_lock_bh(hdev);
+	hci_dev_lock(hdev);
+
+	memset(&rp, 0, sizeof(rp));
+	bacpy(&rp.bdaddr, &cp->bdaddr);
+	rp.status = MGMT_STATUS_FAILED;
 
 	err = hci_remove_link_key(hdev, &cp->bdaddr);
 	if (err < 0) {
-		err = cmd_status(sk, index, MGMT_OP_REMOVE_KEY, -err);
+		rp.status = MGMT_STATUS_NOT_PAIRED;
 		goto unlock;
 	}
 
-	err = 0;
-
-	if (!test_bit(HCI_UP, &hdev->flags) || !cp->disconnect)
+	if (!test_bit(HCI_UP, &hdev->flags) || !cp->disconnect) {
+		err = cmd_complete(sk, index, MGMT_OP_REMOVE_KEYS, &rp,
+								sizeof(rp));
 		goto unlock;
+	}
 
 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
-	if (conn) {
-		struct hci_cp_disconnect dc;
-
-		put_unaligned_le16(conn->handle, &dc.handle);
-		dc.reason = 0x13; /* Remote User Terminated Connection */
-		err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
+	if (!conn) {
+		err = cmd_complete(sk, index, MGMT_OP_REMOVE_KEYS, &rp,
+								sizeof(rp));
+		goto unlock;
 	}
 
+	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_KEYS, hdev, cp, sizeof(*cp));
+	if (!cmd) {
+		err = -ENOMEM;
+		goto unlock;
+	}
+
+	put_unaligned_le16(conn->handle, &dc.handle);
+	dc.reason = 0x13; /* Remote User Terminated Connection */
+	err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
+	if (err < 0)
+		mgmt_pending_remove(cmd);
+
 unlock:
-	hci_dev_unlock_bh(hdev);
+	if (err < 0)
+		err = cmd_complete(sk, index, MGMT_OP_REMOVE_KEYS, &rp,
+								sizeof(rp));
+	hci_dev_unlock(hdev);
 	hci_dev_put(hdev);
 
 	return err;
@@ -1013,21 +1180,25 @@
 	cp = (void *) data;
 
 	if (len != sizeof(*cp))
-		return cmd_status(sk, index, MGMT_OP_DISCONNECT, EINVAL);
+		return cmd_status(sk, index, MGMT_OP_DISCONNECT,
+						MGMT_STATUS_INVALID_PARAMS);
 
 	hdev = hci_dev_get(index);
 	if (!hdev)
-		return cmd_status(sk, index, MGMT_OP_DISCONNECT, ENODEV);
+		return cmd_status(sk, index, MGMT_OP_DISCONNECT,
+						MGMT_STATUS_INVALID_PARAMS);
 
-	hci_dev_lock_bh(hdev);
+	hci_dev_lock(hdev);
 
 	if (!test_bit(HCI_UP, &hdev->flags)) {
-		err = cmd_status(sk, index, MGMT_OP_DISCONNECT, ENETDOWN);
+		err = cmd_status(sk, index, MGMT_OP_DISCONNECT,
+						MGMT_STATUS_NOT_POWERED);
 		goto failed;
 	}
 
-	if (mgmt_pending_find(MGMT_OP_DISCONNECT, index)) {
-		err = cmd_status(sk, index, MGMT_OP_DISCONNECT, EBUSY);
+	if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
+		err = cmd_status(sk, index, MGMT_OP_DISCONNECT,
+							MGMT_STATUS_BUSY);
 		goto failed;
 	}
 
@@ -1036,11 +1207,12 @@
 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->bdaddr);
 
 	if (!conn) {
-		err = cmd_status(sk, index, MGMT_OP_DISCONNECT, ENOTCONN);
+		err = cmd_status(sk, index, MGMT_OP_DISCONNECT,
+						MGMT_STATUS_NOT_CONNECTED);
 		goto failed;
 	}
 
-	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, index, data, len);
+	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
 	if (!cmd) {
 		err = -ENOMEM;
 		goto failed;
@@ -1054,16 +1226,36 @@
 		mgmt_pending_remove(cmd);
 
 failed:
-	hci_dev_unlock_bh(hdev);
+	hci_dev_unlock(hdev);
 	hci_dev_put(hdev);
 
 	return err;
 }
 
+static u8 link_to_mgmt(u8 link_type, u8 addr_type)
+{
+	switch (link_type) {
+	case LE_LINK:
+		switch (addr_type) {
+		case ADDR_LE_DEV_PUBLIC:
+			return MGMT_ADDR_LE_PUBLIC;
+		case ADDR_LE_DEV_RANDOM:
+			return MGMT_ADDR_LE_RANDOM;
+		default:
+			return MGMT_ADDR_INVALID;
+		}
+	case ACL_LINK:
+		return MGMT_ADDR_BREDR;
+	default:
+		return MGMT_ADDR_INVALID;
+	}
+}
+
 static int get_connections(struct sock *sk, u16 index)
 {
 	struct mgmt_rp_get_connections *rp;
 	struct hci_dev *hdev;
+	struct hci_conn *c;
 	struct list_head *p;
 	size_t rp_len;
 	u16 count;
@@ -1073,16 +1265,17 @@
 
 	hdev = hci_dev_get(index);
 	if (!hdev)
-		return cmd_status(sk, index, MGMT_OP_GET_CONNECTIONS, ENODEV);
+		return cmd_status(sk, index, MGMT_OP_GET_CONNECTIONS,
+						MGMT_STATUS_INVALID_PARAMS);
 
-	hci_dev_lock_bh(hdev);
+	hci_dev_lock(hdev);
 
 	count = 0;
 	list_for_each(p, &hdev->conn_hash.list) {
 		count++;
 	}
 
-	rp_len = sizeof(*rp) + (count * sizeof(bdaddr_t));
+	rp_len = sizeof(*rp) + (count * sizeof(struct mgmt_addr_info));
 	rp = kmalloc(rp_len, GFP_ATOMIC);
 	if (!rp) {
 		err = -ENOMEM;
@@ -1092,17 +1285,22 @@
 	put_unaligned_le16(count, &rp->conn_count);
 
 	i = 0;
-	list_for_each(p, &hdev->conn_hash.list) {
-		struct hci_conn *c = list_entry(p, struct hci_conn, list);
-
-		bacpy(&rp->conn[i++], &c->dst);
+	list_for_each_entry(c, &hdev->conn_hash.list, list) {
+		bacpy(&rp->addr[i].bdaddr, &c->dst);
+		rp->addr[i].type = link_to_mgmt(c->type, c->dst_type);
+		if (rp->addr[i].type == MGMT_ADDR_INVALID)
+			continue;
+		i++;
 	}
 
+	/* Recalculate length in case of filtered SCO connections, etc */
+	rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
+
 	err = cmd_complete(sk, index, MGMT_OP_GET_CONNECTIONS, rp, rp_len);
 
 unlock:
 	kfree(rp);
-	hci_dev_unlock_bh(hdev);
+	hci_dev_unlock(hdev);
 	hci_dev_put(hdev);
 	return err;
 }
@@ -1113,7 +1311,7 @@
 	struct pending_cmd *cmd;
 	int err;
 
-	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, index, cp,
+	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
 								sizeof(*cp));
 	if (!cmd)
 		return -ENOMEM;
@@ -1142,22 +1340,26 @@
 	cp = (void *) data;
 
 	if (len != sizeof(*cp))
-		return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, EINVAL);
+		return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY,
+						MGMT_STATUS_INVALID_PARAMS);
 
 	hdev = hci_dev_get(index);
 	if (!hdev)
-		return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENODEV);
+		return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY,
+						MGMT_STATUS_INVALID_PARAMS);
 
-	hci_dev_lock_bh(hdev);
+	hci_dev_lock(hdev);
 
 	if (!test_bit(HCI_UP, &hdev->flags)) {
-		err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENETDOWN);
+		err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY,
+						MGMT_STATUS_NOT_POWERED);
 		goto failed;
 	}
 
 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
 	if (!conn) {
-		err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENOTCONN);
+		err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY,
+						MGMT_STATUS_NOT_CONNECTED);
 		goto failed;
 	}
 
@@ -1169,12 +1371,12 @@
 		err = send_pin_code_neg_reply(sk, index, hdev, &ncp);
 		if (err >= 0)
 			err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY,
-								EINVAL);
+						MGMT_STATUS_INVALID_PARAMS);
 
 		goto failed;
 	}
 
-	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, index, data, len);
+	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
 	if (!cmd) {
 		err = -ENOMEM;
 		goto failed;
@@ -1189,7 +1391,7 @@
 		mgmt_pending_remove(cmd);
 
 failed:
-	hci_dev_unlock_bh(hdev);
+	hci_dev_unlock(hdev);
 	hci_dev_put(hdev);
 
 	return err;
@@ -1208,25 +1410,25 @@
 
 	if (len != sizeof(*cp))
 		return cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY,
-									EINVAL);
+						MGMT_STATUS_INVALID_PARAMS);
 
 	hdev = hci_dev_get(index);
 	if (!hdev)
 		return cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY,
-									ENODEV);
+						MGMT_STATUS_INVALID_PARAMS);
 
-	hci_dev_lock_bh(hdev);
+	hci_dev_lock(hdev);
 
 	if (!test_bit(HCI_UP, &hdev->flags)) {
 		err = cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY,
-								ENETDOWN);
+						MGMT_STATUS_NOT_POWERED);
 		goto failed;
 	}
 
 	err = send_pin_code_neg_reply(sk, index, hdev, cp);
 
 failed:
-	hci_dev_unlock_bh(hdev);
+	hci_dev_unlock(hdev);
 	hci_dev_put(hdev);
 
 	return err;
@@ -1243,20 +1445,22 @@
 	cp = (void *) data;
 
 	if (len != sizeof(*cp))
-		return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY, EINVAL);
+		return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY,
+						MGMT_STATUS_INVALID_PARAMS);
 
 	hdev = hci_dev_get(index);
 	if (!hdev)
-		return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY, ENODEV);
+		return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY,
+						MGMT_STATUS_INVALID_PARAMS);
 
-	hci_dev_lock_bh(hdev);
+	hci_dev_lock(hdev);
 
 	hdev->io_capability = cp->io_capability;
 
 	BT_DBG("%s IO capability set to 0x%02x", hdev->name,
 							hdev->io_capability);
 
-	hci_dev_unlock_bh(hdev);
+	hci_dev_unlock(hdev);
 	hci_dev_put(hdev);
 
 	return cmd_complete(sk, index, MGMT_OP_SET_IO_CAPABILITY, NULL, 0);
@@ -1265,19 +1469,12 @@
 static inline struct pending_cmd *find_pairing(struct hci_conn *conn)
 {
 	struct hci_dev *hdev = conn->hdev;
-	struct list_head *p;
+	struct pending_cmd *cmd;
 
-	list_for_each(p, &cmd_list) {
-		struct pending_cmd *cmd;
-
-		cmd = list_entry(p, struct pending_cmd, list);
-
+	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
 		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
 			continue;
 
-		if (cmd->index != hdev->id)
-			continue;
-
 		if (cmd->user_data != conn)
 			continue;
 
@@ -1292,7 +1489,8 @@
 	struct mgmt_rp_pair_device rp;
 	struct hci_conn *conn = cmd->user_data;
 
-	bacpy(&rp.bdaddr, &conn->dst);
+	bacpy(&rp.addr.bdaddr, &conn->dst);
+	rp.addr.type = link_to_mgmt(conn->type, conn->dst_type);
 	rp.status = status;
 
 	cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, &rp, sizeof(rp));
@@ -1314,20 +1512,18 @@
 	BT_DBG("status %u", status);
 
 	cmd = find_pairing(conn);
-	if (!cmd) {
+	if (!cmd)
 		BT_DBG("Unable to find a pending command");
-		return;
-	}
-
-	pairing_complete(cmd, status);
+	else
+		pairing_complete(cmd, status);
 }
 
 static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
 {
 	struct hci_dev *hdev;
 	struct mgmt_cp_pair_device *cp;
+	struct mgmt_rp_pair_device rp;
 	struct pending_cmd *cmd;
-	struct adv_entry *entry;
 	u8 sec_level, auth_type;
 	struct hci_conn *conn;
 	int err;
@@ -1337,13 +1533,15 @@
 	cp = (void *) data;
 
 	if (len != sizeof(*cp))
-		return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, EINVAL);
+		return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE,
+						MGMT_STATUS_INVALID_PARAMS);
 
 	hdev = hci_dev_get(index);
 	if (!hdev)
-		return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, ENODEV);
+		return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE,
+						MGMT_STATUS_INVALID_PARAMS);
 
-	hci_dev_lock_bh(hdev);
+	hci_dev_lock(hdev);
 
 	sec_level = BT_SECURITY_MEDIUM;
 	if (cp->io_cap == 0x03)
@@ -1351,26 +1549,33 @@
 	else
 		auth_type = HCI_AT_DEDICATED_BONDING_MITM;
 
-	entry = hci_find_adv_entry(hdev, &cp->bdaddr);
-	if (entry)
-		conn = hci_connect(hdev, LE_LINK, &cp->bdaddr, sec_level,
+	if (cp->addr.type == MGMT_ADDR_BREDR)
+		conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr, sec_level,
 								auth_type);
 	else
-		conn = hci_connect(hdev, ACL_LINK, &cp->bdaddr, sec_level,
+		conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr, sec_level,
 								auth_type);
 
+	memset(&rp, 0, sizeof(rp));
+	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
+	rp.addr.type = cp->addr.type;
+
 	if (IS_ERR(conn)) {
-		err = PTR_ERR(conn);
+		rp.status = -PTR_ERR(conn);
+		err = cmd_complete(sk, index, MGMT_OP_PAIR_DEVICE,
+							&rp, sizeof(rp));
 		goto unlock;
 	}
 
 	if (conn->connect_cfm_cb) {
 		hci_conn_put(conn);
-		err = cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, EBUSY);
+		rp.status = EBUSY;
+		err = cmd_complete(sk, index, MGMT_OP_PAIR_DEVICE,
+							&rp, sizeof(rp));
 		goto unlock;
 	}
 
-	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, index, data, len);
+	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
 	if (!cmd) {
 		err = -ENOMEM;
 		hci_conn_put(conn);
@@ -1378,7 +1583,7 @@
 	}
 
 	/* For LE, just connecting isn't a proof that the pairing finished */
-	if (!entry)
+	if (cp->addr.type == MGMT_ADDR_BREDR)
 		conn->connect_cfm_cb = pairing_complete_cb;
 
 	conn->security_cfm_cb = pairing_complete_cb;
@@ -1393,62 +1598,151 @@
 	err = 0;
 
 unlock:
-	hci_dev_unlock_bh(hdev);
+	hci_dev_unlock(hdev);
 	hci_dev_put(hdev);
 
 	return err;
 }
 
-static int user_confirm_reply(struct sock *sk, u16 index, unsigned char *data,
-							u16 len, int success)
+static int user_pairing_resp(struct sock *sk, u16 index, bdaddr_t *bdaddr,
+					u16 mgmt_op, u16 hci_op, __le32 passkey)
 {
-	struct mgmt_cp_user_confirm_reply *cp = (void *) data;
-	u16 mgmt_op, hci_op;
 	struct pending_cmd *cmd;
 	struct hci_dev *hdev;
+	struct hci_conn *conn;
 	int err;
 
-	BT_DBG("");
-
-	if (success) {
-		mgmt_op = MGMT_OP_USER_CONFIRM_REPLY;
-		hci_op = HCI_OP_USER_CONFIRM_REPLY;
-	} else {
-		mgmt_op = MGMT_OP_USER_CONFIRM_NEG_REPLY;
-		hci_op = HCI_OP_USER_CONFIRM_NEG_REPLY;
-	}
-
-	if (len != sizeof(*cp))
-		return cmd_status(sk, index, mgmt_op, EINVAL);
-
 	hdev = hci_dev_get(index);
 	if (!hdev)
-		return cmd_status(sk, index, mgmt_op, ENODEV);
+		return cmd_status(sk, index, mgmt_op,
+						MGMT_STATUS_INVALID_PARAMS);
 
-	hci_dev_lock_bh(hdev);
+	hci_dev_lock(hdev);
 
 	if (!test_bit(HCI_UP, &hdev->flags)) {
-		err = cmd_status(sk, index, mgmt_op, ENETDOWN);
-		goto failed;
+		err = cmd_status(sk, index, mgmt_op, MGMT_STATUS_NOT_POWERED);
+		goto done;
 	}
 
-	cmd = mgmt_pending_add(sk, mgmt_op, index, data, len);
+	/*
+	 * Check for an existing ACL link, if present pair via
+	 * HCI commands.
+	 *
+	 * If no ACL link is present, check for an LE link and if
+	 * present, pair via the SMP engine.
+	 *
+	 * If neither ACL nor LE links are present, fail with error.
+	 */
+	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, bdaddr);
+	if (!conn) {
+		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr);
+		if (!conn) {
+			err = cmd_status(sk, index, mgmt_op,
+						MGMT_STATUS_NOT_CONNECTED);
+			goto done;
+		}
+
+		/* Continue with pairing via SMP */
+		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
+
+		if (!err)
+			err = cmd_status(sk, index, mgmt_op,
+							MGMT_STATUS_SUCCESS);
+		else
+			err = cmd_status(sk, index, mgmt_op,
+							MGMT_STATUS_FAILED);
+
+		goto done;
+	}
+
+	cmd = mgmt_pending_add(sk, mgmt_op, hdev, bdaddr, sizeof(*bdaddr));
 	if (!cmd) {
 		err = -ENOMEM;
-		goto failed;
+		goto done;
 	}
 
-	err = hci_send_cmd(hdev, hci_op, sizeof(cp->bdaddr), &cp->bdaddr);
+	/* Continue with pairing via HCI */
+	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
+		struct hci_cp_user_passkey_reply cp;
+
+		bacpy(&cp.bdaddr, bdaddr);
+		cp.passkey = passkey;
+		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
+	} else
+		err = hci_send_cmd(hdev, hci_op, sizeof(*bdaddr), bdaddr);
+
 	if (err < 0)
 		mgmt_pending_remove(cmd);
 
-failed:
-	hci_dev_unlock_bh(hdev);
+done:
+	hci_dev_unlock(hdev);
 	hci_dev_put(hdev);
 
 	return err;
 }
 
+static int user_confirm_reply(struct sock *sk, u16 index, void *data, u16 len)
+{
+	struct mgmt_cp_user_confirm_reply *cp = (void *) data;
+
+	BT_DBG("");
+
+	if (len != sizeof(*cp))
+		return cmd_status(sk, index, MGMT_OP_USER_CONFIRM_REPLY,
+						MGMT_STATUS_INVALID_PARAMS);
+
+	return user_pairing_resp(sk, index, &cp->bdaddr,
+			MGMT_OP_USER_CONFIRM_REPLY,
+			HCI_OP_USER_CONFIRM_REPLY, 0);
+}
+
+static int user_confirm_neg_reply(struct sock *sk, u16 index, void *data,
+									u16 len)
+{
+	struct mgmt_cp_user_confirm_neg_reply *cp = data;
+
+	BT_DBG("");
+
+	if (len != sizeof(*cp))
+		return cmd_status(sk, index, MGMT_OP_USER_CONFIRM_NEG_REPLY,
+						MGMT_STATUS_INVALID_PARAMS);
+
+	return user_pairing_resp(sk, index, &cp->bdaddr,
+			MGMT_OP_USER_CONFIRM_NEG_REPLY,
+			HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
+}
+
+static int user_passkey_reply(struct sock *sk, u16 index, void *data, u16 len)
+{
+	struct mgmt_cp_user_passkey_reply *cp = (void *) data;
+
+	BT_DBG("");
+
+	if (len != sizeof(*cp))
+		return cmd_status(sk, index, MGMT_OP_USER_PASSKEY_REPLY,
+									EINVAL);
+
+	return user_pairing_resp(sk, index, &cp->bdaddr,
+			MGMT_OP_USER_PASSKEY_REPLY,
+			HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
+}
+
+static int user_passkey_neg_reply(struct sock *sk, u16 index, void *data,
+									u16 len)
+{
+	struct mgmt_cp_user_passkey_neg_reply *cp = (void *) data;
+
+	BT_DBG("");
+
+	if (len != sizeof(*cp))
+		return cmd_status(sk, index, MGMT_OP_USER_PASSKEY_NEG_REPLY,
+									EINVAL);
+
+	return user_pairing_resp(sk, index, &cp->bdaddr,
+			MGMT_OP_USER_PASSKEY_NEG_REPLY,
+			HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
+}
+
 static int set_local_name(struct sock *sk, u16 index, unsigned char *data,
 								u16 len)
 {
@@ -1461,15 +1755,17 @@
 	BT_DBG("");
 
 	if (len != sizeof(*mgmt_cp))
-		return cmd_status(sk, index, MGMT_OP_SET_LOCAL_NAME, EINVAL);
+		return cmd_status(sk, index, MGMT_OP_SET_LOCAL_NAME,
+						MGMT_STATUS_INVALID_PARAMS);
 
 	hdev = hci_dev_get(index);
 	if (!hdev)
-		return cmd_status(sk, index, MGMT_OP_SET_LOCAL_NAME, ENODEV);
+		return cmd_status(sk, index, MGMT_OP_SET_LOCAL_NAME,
+						MGMT_STATUS_INVALID_PARAMS);
 
-	hci_dev_lock_bh(hdev);
+	hci_dev_lock(hdev);
 
-	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, index, data, len);
+	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
 	if (!cmd) {
 		err = -ENOMEM;
 		goto failed;
@@ -1482,7 +1778,7 @@
 		mgmt_pending_remove(cmd);
 
 failed:
-	hci_dev_unlock_bh(hdev);
+	hci_dev_unlock(hdev);
 	hci_dev_put(hdev);
 
 	return err;
@@ -1499,28 +1795,29 @@
 	hdev = hci_dev_get(index);
 	if (!hdev)
 		return cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
-									ENODEV);
+						MGMT_STATUS_INVALID_PARAMS);
 
-	hci_dev_lock_bh(hdev);
+	hci_dev_lock(hdev);
 
 	if (!test_bit(HCI_UP, &hdev->flags)) {
 		err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
-								ENETDOWN);
+						MGMT_STATUS_NOT_POWERED);
 		goto unlock;
 	}
 
 	if (!(hdev->features[6] & LMP_SIMPLE_PAIR)) {
 		err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
-								EOPNOTSUPP);
+						MGMT_STATUS_NOT_SUPPORTED);
 		goto unlock;
 	}
 
-	if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, index)) {
-		err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA, EBUSY);
+	if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
+		err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
+							MGMT_STATUS_BUSY);
 		goto unlock;
 	}
 
-	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, index, NULL, 0);
+	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
 	if (!cmd) {
 		err = -ENOMEM;
 		goto unlock;
@@ -1531,7 +1828,7 @@
 		mgmt_pending_remove(cmd);
 
 unlock:
-	hci_dev_unlock_bh(hdev);
+	hci_dev_unlock(hdev);
 	hci_dev_put(hdev);
 
 	return err;
@@ -1548,24 +1845,25 @@
 
 	if (len != sizeof(*cp))
 		return cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA,
-									EINVAL);
+						MGMT_STATUS_INVALID_PARAMS);
 
 	hdev = hci_dev_get(index);
 	if (!hdev)
 		return cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA,
-									ENODEV);
+						MGMT_STATUS_INVALID_PARAMS);
 
-	hci_dev_lock_bh(hdev);
+	hci_dev_lock(hdev);
 
 	err = hci_add_remote_oob_data(hdev, &cp->bdaddr, cp->hash,
 								cp->randomizer);
 	if (err < 0)
-		err = cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA, -err);
+		err = cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA,
+							MGMT_STATUS_FAILED);
 	else
 		err = cmd_complete(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA, NULL,
 									0);
 
-	hci_dev_unlock_bh(hdev);
+	hci_dev_unlock(hdev);
 	hci_dev_put(hdev);
 
 	return err;
@@ -1582,62 +1880,68 @@
 
 	if (len != sizeof(*cp))
 		return cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
-									EINVAL);
+						MGMT_STATUS_INVALID_PARAMS);
 
 	hdev = hci_dev_get(index);
 	if (!hdev)
 		return cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
-									ENODEV);
+						MGMT_STATUS_INVALID_PARAMS);
 
-	hci_dev_lock_bh(hdev);
+	hci_dev_lock(hdev);
 
 	err = hci_remove_remote_oob_data(hdev, &cp->bdaddr);
 	if (err < 0)
 		err = cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
-									-err);
+						MGMT_STATUS_INVALID_PARAMS);
 	else
 		err = cmd_complete(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
 								NULL, 0);
 
-	hci_dev_unlock_bh(hdev);
+	hci_dev_unlock(hdev);
 	hci_dev_put(hdev);
 
 	return err;
 }
 
-static int start_discovery(struct sock *sk, u16 index)
+static int start_discovery(struct sock *sk, u16 index,
+						unsigned char *data, u16 len)
 {
-	u8 lap[3] = { 0x33, 0x8b, 0x9e };
-	struct hci_cp_inquiry cp;
+	struct mgmt_cp_start_discovery *cp = (void *) data;
 	struct pending_cmd *cmd;
 	struct hci_dev *hdev;
 	int err;
 
 	BT_DBG("hci%u", index);
 
+	if (len != sizeof(*cp))
+		return cmd_status(sk, index, MGMT_OP_START_DISCOVERY,
+						MGMT_STATUS_INVALID_PARAMS);
+
 	hdev = hci_dev_get(index);
 	if (!hdev)
-		return cmd_status(sk, index, MGMT_OP_START_DISCOVERY, ENODEV);
+		return cmd_status(sk, index, MGMT_OP_START_DISCOVERY,
+						MGMT_STATUS_INVALID_PARAMS);
 
-	hci_dev_lock_bh(hdev);
+	hci_dev_lock(hdev);
 
-	cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, index, NULL, 0);
+	if (!test_bit(HCI_UP, &hdev->flags)) {
+		err = cmd_status(sk, index, MGMT_OP_START_DISCOVERY,
+						MGMT_STATUS_NOT_POWERED);
+		goto failed;
+	}
+
+	cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
 	if (!cmd) {
 		err = -ENOMEM;
 		goto failed;
 	}
 
-	memset(&cp, 0, sizeof(cp));
-	memcpy(&cp.lap, lap, 3);
-	cp.length  = 0x08;
-	cp.num_rsp = 0x00;
-
-	err = hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
+	err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR);
 	if (err < 0)
 		mgmt_pending_remove(cmd);
 
 failed:
-	hci_dev_unlock_bh(hdev);
+	hci_dev_unlock(hdev);
 	hci_dev_put(hdev);
 
 	return err;
@@ -1653,22 +1957,23 @@
 
 	hdev = hci_dev_get(index);
 	if (!hdev)
-		return cmd_status(sk, index, MGMT_OP_STOP_DISCOVERY, ENODEV);
+		return cmd_status(sk, index, MGMT_OP_STOP_DISCOVERY,
+						MGMT_STATUS_INVALID_PARAMS);
 
-	hci_dev_lock_bh(hdev);
+	hci_dev_lock(hdev);
 
-	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, index, NULL, 0);
+	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
 	if (!cmd) {
 		err = -ENOMEM;
 		goto failed;
 	}
 
-	err = hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
+	err = hci_cancel_inquiry(hdev);
 	if (err < 0)
 		mgmt_pending_remove(cmd);
 
 failed:
-	hci_dev_unlock_bh(hdev);
+	hci_dev_unlock(hdev);
 	hci_dev_put(hdev);
 
 	return err;
@@ -1678,7 +1983,6 @@
 								u16 len)
 {
 	struct hci_dev *hdev;
-	struct pending_cmd *cmd;
 	struct mgmt_cp_block_device *cp = (void *) data;
 	int err;
 
@@ -1686,33 +1990,24 @@
 
 	if (len != sizeof(*cp))
 		return cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE,
-							EINVAL);
+						MGMT_STATUS_INVALID_PARAMS);
 
 	hdev = hci_dev_get(index);
 	if (!hdev)
 		return cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE,
-							ENODEV);
+						MGMT_STATUS_INVALID_PARAMS);
 
-	hci_dev_lock_bh(hdev);
-
-	cmd = mgmt_pending_add(sk, MGMT_OP_BLOCK_DEVICE, index, NULL, 0);
-	if (!cmd) {
-		err = -ENOMEM;
-		goto failed;
-	}
+	hci_dev_lock(hdev);
 
 	err = hci_blacklist_add(hdev, &cp->bdaddr);
-
 	if (err < 0)
-		err = cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE, -err);
+		err = cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE,
+							MGMT_STATUS_FAILED);
 	else
 		err = cmd_complete(sk, index, MGMT_OP_BLOCK_DEVICE,
 							NULL, 0);
 
-	mgmt_pending_remove(cmd);
-
-failed:
-	hci_dev_unlock_bh(hdev);
+	hci_dev_unlock(hdev);
 	hci_dev_put(hdev);
 
 	return err;
@@ -1722,7 +2017,6 @@
 								u16 len)
 {
 	struct hci_dev *hdev;
-	struct pending_cmd *cmd;
 	struct mgmt_cp_unblock_device *cp = (void *) data;
 	int err;
 
@@ -1730,33 +2024,25 @@
 
 	if (len != sizeof(*cp))
 		return cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE,
-								EINVAL);
+						MGMT_STATUS_INVALID_PARAMS);
 
 	hdev = hci_dev_get(index);
 	if (!hdev)
 		return cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE,
-								ENODEV);
+						MGMT_STATUS_INVALID_PARAMS);
 
-	hci_dev_lock_bh(hdev);
-
-	cmd = mgmt_pending_add(sk, MGMT_OP_UNBLOCK_DEVICE, index, NULL, 0);
-	if (!cmd) {
-		err = -ENOMEM;
-		goto failed;
-	}
+	hci_dev_lock(hdev);
 
 	err = hci_blacklist_del(hdev, &cp->bdaddr);
 
 	if (err < 0)
-		err = cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE, -err);
+		err = cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE,
+						MGMT_STATUS_INVALID_PARAMS);
 	else
 		err = cmd_complete(sk, index, MGMT_OP_UNBLOCK_DEVICE,
 								NULL, 0);
 
-	mgmt_pending_remove(cmd);
-
-failed:
-	hci_dev_unlock_bh(hdev);
+	hci_dev_unlock(hdev);
 	hci_dev_put(hdev);
 
 	return err;
@@ -1766,7 +2052,7 @@
 					unsigned char *data, u16 len)
 {
 	struct hci_dev *hdev;
-	struct mgmt_cp_set_fast_connectable *cp = (void *) data;
+	struct mgmt_mode *cp = (void *) data;
 	struct hci_cp_write_page_scan_activity acp;
 	u8 type;
 	int err;
@@ -1775,16 +2061,16 @@
 
 	if (len != sizeof(*cp))
 		return cmd_status(sk, index, MGMT_OP_SET_FAST_CONNECTABLE,
-								EINVAL);
+						MGMT_STATUS_INVALID_PARAMS);
 
 	hdev = hci_dev_get(index);
 	if (!hdev)
 		return cmd_status(sk, index, MGMT_OP_SET_FAST_CONNECTABLE,
-								ENODEV);
+						MGMT_STATUS_INVALID_PARAMS);
 
 	hci_dev_lock(hdev);
 
-	if (cp->enable) {
+	if (cp->val) {
 		type = PAGE_SCAN_TYPE_INTERLACED;
 		acp.interval = 0x0024;	/* 22.5 msec page scan interval */
 	} else {
@@ -1798,14 +2084,14 @@
 						sizeof(acp), &acp);
 	if (err < 0) {
 		err = cmd_status(sk, index, MGMT_OP_SET_FAST_CONNECTABLE,
-								-err);
+							MGMT_STATUS_FAILED);
 		goto done;
 	}
 
 	err = hci_send_cmd(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
 	if (err < 0) {
 		err = cmd_status(sk, index, MGMT_OP_SET_FAST_CONNECTABLE,
-								-err);
+							MGMT_STATUS_FAILED);
 		goto done;
 	}
 
@@ -1868,6 +2154,10 @@
 	case MGMT_OP_SET_CONNECTABLE:
 		err = set_connectable(sk, index, buf + sizeof(*hdr), len);
 		break;
+	case MGMT_OP_SET_FAST_CONNECTABLE:
+		err = set_fast_connectable(sk, index, buf + sizeof(*hdr),
+								len);
+		break;
 	case MGMT_OP_SET_PAIRABLE:
 		err = set_pairable(sk, index, buf + sizeof(*hdr), len);
 		break;
@@ -1880,14 +2170,11 @@
 	case MGMT_OP_SET_DEV_CLASS:
 		err = set_dev_class(sk, index, buf + sizeof(*hdr), len);
 		break;
-	case MGMT_OP_SET_SERVICE_CACHE:
-		err = set_service_cache(sk, index, buf + sizeof(*hdr), len);
+	case MGMT_OP_LOAD_LINK_KEYS:
+		err = load_link_keys(sk, index, buf + sizeof(*hdr), len);
 		break;
-	case MGMT_OP_LOAD_KEYS:
-		err = load_keys(sk, index, buf + sizeof(*hdr), len);
-		break;
-	case MGMT_OP_REMOVE_KEY:
-		err = remove_key(sk, index, buf + sizeof(*hdr), len);
+	case MGMT_OP_REMOVE_KEYS:
+		err = remove_keys(sk, index, buf + sizeof(*hdr), len);
 		break;
 	case MGMT_OP_DISCONNECT:
 		err = disconnect(sk, index, buf + sizeof(*hdr), len);
@@ -1908,10 +2195,18 @@
 		err = pair_device(sk, index, buf + sizeof(*hdr), len);
 		break;
 	case MGMT_OP_USER_CONFIRM_REPLY:
-		err = user_confirm_reply(sk, index, buf + sizeof(*hdr), len, 1);
+		err = user_confirm_reply(sk, index, buf + sizeof(*hdr), len);
 		break;
 	case MGMT_OP_USER_CONFIRM_NEG_REPLY:
-		err = user_confirm_reply(sk, index, buf + sizeof(*hdr), len, 0);
+		err = user_confirm_neg_reply(sk, index, buf + sizeof(*hdr),
+									len);
+		break;
+	case MGMT_OP_USER_PASSKEY_REPLY:
+		err = user_passkey_reply(sk, index, buf + sizeof(*hdr), len);
+		break;
+	case MGMT_OP_USER_PASSKEY_NEG_REPLY:
+		err = user_passkey_neg_reply(sk, index, buf + sizeof(*hdr),
+									len);
 		break;
 	case MGMT_OP_SET_LOCAL_NAME:
 		err = set_local_name(sk, index, buf + sizeof(*hdr), len);
@@ -1927,7 +2222,7 @@
 									len);
 		break;
 	case MGMT_OP_START_DISCOVERY:
-		err = start_discovery(sk, index);
+		err = start_discovery(sk, index, buf + sizeof(*hdr), len);
 		break;
 	case MGMT_OP_STOP_DISCOVERY:
 		err = stop_discovery(sk, index);
@@ -1938,13 +2233,10 @@
 	case MGMT_OP_UNBLOCK_DEVICE:
 		err = unblock_device(sk, index, buf + sizeof(*hdr), len);
 		break;
-	case MGMT_OP_SET_FAST_CONNECTABLE:
-		err = set_fast_connectable(sk, index, buf + sizeof(*hdr),
-								len);
-		break;
 	default:
 		BT_DBG("Unknown op %u", opcode);
-		err = cmd_status(sk, index, opcode, 0x01);
+		err = cmd_status(sk, index, opcode,
+						MGMT_STATUS_UNKNOWN_COMMAND);
 		break;
 	}
 
@@ -1958,30 +2250,39 @@
 	return err;
 }
 
-int mgmt_index_added(u16 index)
+static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
 {
-	return mgmt_event(MGMT_EV_INDEX_ADDED, index, NULL, 0, NULL);
+	u8 *status = data;
+
+	cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
+	mgmt_pending_remove(cmd);
 }
 
-int mgmt_index_removed(u16 index)
+int mgmt_index_added(struct hci_dev *hdev)
 {
-	return mgmt_event(MGMT_EV_INDEX_REMOVED, index, NULL, 0, NULL);
+	return mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
+}
+
+int mgmt_index_removed(struct hci_dev *hdev)
+{
+	u8 status = ENODEV;
+
+	mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
+
+	return mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
 }
 
 struct cmd_lookup {
 	u8 val;
 	struct sock *sk;
+	struct hci_dev *hdev;
 };
 
-static void mode_rsp(struct pending_cmd *cmd, void *data)
+static void settings_rsp(struct pending_cmd *cmd, void *data)
 {
-	struct mgmt_mode *cp = cmd->param;
 	struct cmd_lookup *match = data;
 
-	if (cp->val != match->val)
-		return;
-
-	send_mode_rsp(cmd->sk, cmd->opcode, cmd->index, cp->val);
+	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
 
 	list_del(&cmd->list);
 
@@ -1993,35 +2294,22 @@
 	mgmt_pending_free(cmd);
 }
 
-int mgmt_powered(u16 index, u8 powered)
+int mgmt_powered(struct hci_dev *hdev, u8 powered)
 {
-	struct mgmt_mode ev;
-	struct cmd_lookup match = { powered, NULL };
+	struct cmd_lookup match = { powered, NULL, hdev };
+	__le32 ev;
 	int ret;
 
-	mgmt_pending_foreach(MGMT_OP_SET_POWERED, index, mode_rsp, &match);
+	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
 
-	ev.val = powered;
+	if (!powered) {
+		u8 status = ENETDOWN;
+		mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
+	}
 
-	ret = mgmt_event(MGMT_EV_POWERED, index, &ev, sizeof(ev), match.sk);
+	ev = cpu_to_le32(get_current_settings(hdev));
 
-	if (match.sk)
-		sock_put(match.sk);
-
-	return ret;
-}
-
-int mgmt_discoverable(u16 index, u8 discoverable)
-{
-	struct mgmt_mode ev;
-	struct cmd_lookup match = { discoverable, NULL };
-	int ret;
-
-	mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, index, mode_rsp, &match);
-
-	ev.val = discoverable;
-
-	ret = mgmt_event(MGMT_EV_DISCOVERABLE, index, &ev, sizeof(ev),
+	ret = mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev),
 								match.sk);
 
 	if (match.sk)
@@ -2030,17 +2318,36 @@
 	return ret;
 }
 
-int mgmt_connectable(u16 index, u8 connectable)
+int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
 {
-	struct mgmt_mode ev;
-	struct cmd_lookup match = { connectable, NULL };
+	struct cmd_lookup match = { discoverable, NULL, hdev };
+	__le32 ev;
 	int ret;
 
-	mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, index, mode_rsp, &match);
+	mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev, settings_rsp, &match);
 
-	ev.val = connectable;
+	ev = cpu_to_le32(get_current_settings(hdev));
 
-	ret = mgmt_event(MGMT_EV_CONNECTABLE, index, &ev, sizeof(ev), match.sk);
+	ret = mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev),
+								match.sk);
+	if (match.sk)
+		sock_put(match.sk);
+
+	return ret;
+}
+
+int mgmt_connectable(struct hci_dev *hdev, u8 connectable)
+{
+	__le32 ev;
+	struct cmd_lookup match = { connectable, NULL, hdev };
+	int ret;
+
+	mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev, settings_rsp,
+								&match);
+
+	ev = cpu_to_le32(get_current_settings(hdev));
+
+	ret = mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), match.sk);
 
 	if (match.sk)
 		sock_put(match.sk);
@@ -2048,9 +2355,25 @@
 	return ret;
 }
 
-int mgmt_new_key(u16 index, struct link_key *key, u8 persistent)
+int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
 {
-	struct mgmt_ev_new_key ev;
+	u8 mgmt_err = mgmt_status(status);
+
+	if (scan & SCAN_PAGE)
+		mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
+						cmd_status_rsp, &mgmt_err);
+
+	if (scan & SCAN_INQUIRY)
+		mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
+						cmd_status_rsp, &mgmt_err);
+
+	return 0;
+}
+
+int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
+								u8 persistent)
+{
+	struct mgmt_ev_new_link_key ev;
 
 	memset(&ev, 0, sizeof(ev));
 
@@ -2060,17 +2383,18 @@
 	memcpy(ev.key.val, key->val, 16);
 	ev.key.pin_len = key->pin_len;
 
-	return mgmt_event(MGMT_EV_NEW_KEY, index, &ev, sizeof(ev), NULL);
+	return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
 }
 
-int mgmt_connected(u16 index, bdaddr_t *bdaddr, u8 link_type)
+int mgmt_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+								u8 addr_type)
 {
-	struct mgmt_ev_connected ev;
+	struct mgmt_addr_info ev;
 
 	bacpy(&ev.bdaddr, bdaddr);
-	ev.link_type = link_type;
+	ev.type = link_to_mgmt(link_type, addr_type);
 
-	return mgmt_event(MGMT_EV_CONNECTED, index, &ev, sizeof(ev), NULL);
+	return mgmt_event(MGMT_EV_CONNECTED, hdev, &ev, sizeof(ev), NULL);
 }
 
 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
@@ -2080,6 +2404,7 @@
 	struct mgmt_rp_disconnect rp;
 
 	bacpy(&rp.bdaddr, &cp->bdaddr);
+	rp.status = 0;
 
 	cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, &rp, sizeof(rp));
 
@@ -2089,75 +2414,110 @@
 	mgmt_pending_remove(cmd);
 }
 
-int mgmt_disconnected(u16 index, bdaddr_t *bdaddr)
+static void remove_keys_rsp(struct pending_cmd *cmd, void *data)
 {
-	struct mgmt_ev_disconnected ev;
+	u8 *status = data;
+	struct mgmt_cp_remove_keys *cp = cmd->param;
+	struct mgmt_rp_remove_keys rp;
+
+	memset(&rp, 0, sizeof(rp));
+	bacpy(&rp.bdaddr, &cp->bdaddr);
+	if (status != NULL)
+		rp.status = *status;
+
+	cmd_complete(cmd->sk, cmd->index, MGMT_OP_REMOVE_KEYS, &rp,
+								sizeof(rp));
+
+	mgmt_pending_remove(cmd);
+}
+
+int mgmt_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+								u8 addr_type)
+{
+	struct mgmt_addr_info ev;
 	struct sock *sk = NULL;
 	int err;
 
-	mgmt_pending_foreach(MGMT_OP_DISCONNECT, index, disconnect_rsp, &sk);
+	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
 
 	bacpy(&ev.bdaddr, bdaddr);
+	ev.type = link_to_mgmt(link_type, addr_type);
 
-	err = mgmt_event(MGMT_EV_DISCONNECTED, index, &ev, sizeof(ev), sk);
+	err = mgmt_event(MGMT_EV_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
 
 	if (sk)
 		sock_put(sk);
 
+	mgmt_pending_foreach(MGMT_OP_REMOVE_KEYS, hdev, remove_keys_rsp, NULL);
+
 	return err;
 }
 
-int mgmt_disconnect_failed(u16 index)
+int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 status)
 {
 	struct pending_cmd *cmd;
+	u8 mgmt_err = mgmt_status(status);
 	int err;
 
-	cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, index);
+	cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
 	if (!cmd)
 		return -ENOENT;
 
-	err = cmd_status(cmd->sk, index, MGMT_OP_DISCONNECT, EIO);
+	if (bdaddr) {
+		struct mgmt_rp_disconnect rp;
+
+		bacpy(&rp.bdaddr, bdaddr);
+		rp.status = status;
+
+		err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
+							&rp, sizeof(rp));
+	} else
+		err = cmd_status(cmd->sk, hdev->id, MGMT_OP_DISCONNECT,
+								mgmt_err);
 
 	mgmt_pending_remove(cmd);
 
 	return err;
 }
 
-int mgmt_connect_failed(u16 index, bdaddr_t *bdaddr, u8 status)
+int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+						u8 addr_type, u8 status)
 {
 	struct mgmt_ev_connect_failed ev;
 
-	bacpy(&ev.bdaddr, bdaddr);
-	ev.status = status;
+	bacpy(&ev.addr.bdaddr, bdaddr);
+	ev.addr.type = link_to_mgmt(link_type, addr_type);
+	ev.status = mgmt_status(status);
 
-	return mgmt_event(MGMT_EV_CONNECT_FAILED, index, &ev, sizeof(ev), NULL);
+	return mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
 }
 
-int mgmt_pin_code_request(u16 index, bdaddr_t *bdaddr, u8 secure)
+int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
 {
 	struct mgmt_ev_pin_code_request ev;
 
 	bacpy(&ev.bdaddr, bdaddr);
 	ev.secure = secure;
 
-	return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, index, &ev, sizeof(ev),
+	return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev),
 									NULL);
 }
 
-int mgmt_pin_code_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
+int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+								u8 status)
 {
 	struct pending_cmd *cmd;
 	struct mgmt_rp_pin_code_reply rp;
 	int err;
 
-	cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, index);
+	cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
 	if (!cmd)
 		return -ENOENT;
 
 	bacpy(&rp.bdaddr, bdaddr);
-	rp.status = status;
+	rp.status = mgmt_status(status);
 
-	err = cmd_complete(cmd->sk, index, MGMT_OP_PIN_CODE_REPLY, &rp,
+	err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY, &rp,
 								sizeof(rp));
 
 	mgmt_pending_remove(cmd);
@@ -2165,20 +2525,21 @@
 	return err;
 }
 
-int mgmt_pin_code_neg_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
+int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+								u8 status)
 {
 	struct pending_cmd *cmd;
 	struct mgmt_rp_pin_code_reply rp;
 	int err;
 
-	cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, index);
+	cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
 	if (!cmd)
 		return -ENOENT;
 
 	bacpy(&rp.bdaddr, bdaddr);
-	rp.status = status;
+	rp.status = mgmt_status(status);
 
-	err = cmd_complete(cmd->sk, index, MGMT_OP_PIN_CODE_NEG_REPLY, &rp,
+	err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY, &rp,
 								sizeof(rp));
 
 	mgmt_pending_remove(cmd);
@@ -2186,97 +2547,119 @@
 	return err;
 }
 
-int mgmt_user_confirm_request(u16 index, bdaddr_t *bdaddr, __le32 value,
-							u8 confirm_hint)
+int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
+						__le32 value, u8 confirm_hint)
 {
 	struct mgmt_ev_user_confirm_request ev;
 
-	BT_DBG("hci%u", index);
+	BT_DBG("%s", hdev->name);
 
 	bacpy(&ev.bdaddr, bdaddr);
 	ev.confirm_hint = confirm_hint;
 	put_unaligned_le32(value, &ev.value);
 
-	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, index, &ev, sizeof(ev),
+	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
 									NULL);
 }
 
-static int confirm_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status,
-								u8 opcode)
+int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr)
+{
+	struct mgmt_ev_user_passkey_request ev;
+
+	BT_DBG("%s", hdev->name);
+
+	bacpy(&ev.bdaddr, bdaddr);
+
+	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
+									NULL);
+}
+
+static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+							u8 status, u8 opcode)
 {
 	struct pending_cmd *cmd;
 	struct mgmt_rp_user_confirm_reply rp;
 	int err;
 
-	cmd = mgmt_pending_find(opcode, index);
+	cmd = mgmt_pending_find(opcode, hdev);
 	if (!cmd)
 		return -ENOENT;
 
 	bacpy(&rp.bdaddr, bdaddr);
-	rp.status = status;
-	err = cmd_complete(cmd->sk, index, opcode, &rp, sizeof(rp));
+	rp.status = mgmt_status(status);
+	err = cmd_complete(cmd->sk, hdev->id, opcode, &rp, sizeof(rp));
 
 	mgmt_pending_remove(cmd);
 
 	return err;
 }
 
-int mgmt_user_confirm_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
+int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+								u8 status)
 {
-	return confirm_reply_complete(index, bdaddr, status,
+	return user_pairing_resp_complete(hdev, bdaddr, status,
 						MGMT_OP_USER_CONFIRM_REPLY);
 }
 
-int mgmt_user_confirm_neg_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
+int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev,
+						bdaddr_t *bdaddr, u8 status)
 {
-	return confirm_reply_complete(index, bdaddr, status,
+	return user_pairing_resp_complete(hdev, bdaddr, status,
 					MGMT_OP_USER_CONFIRM_NEG_REPLY);
 }
 
-int mgmt_auth_failed(u16 index, bdaddr_t *bdaddr, u8 status)
+int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+								u8 status)
+{
+	return user_pairing_resp_complete(hdev, bdaddr, status,
+						MGMT_OP_USER_PASSKEY_REPLY);
+}
+
+int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev,
+						bdaddr_t *bdaddr, u8 status)
+{
+	return user_pairing_resp_complete(hdev, bdaddr, status,
+					MGMT_OP_USER_PASSKEY_NEG_REPLY);
+}
+
+int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 status)
 {
 	struct mgmt_ev_auth_failed ev;
 
 	bacpy(&ev.bdaddr, bdaddr);
-	ev.status = status;
+	ev.status = mgmt_status(status);
 
-	return mgmt_event(MGMT_EV_AUTH_FAILED, index, &ev, sizeof(ev), NULL);
+	return mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
 }
 
-int mgmt_set_local_name_complete(u16 index, u8 *name, u8 status)
+int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
 {
 	struct pending_cmd *cmd;
-	struct hci_dev *hdev;
 	struct mgmt_cp_set_local_name ev;
 	int err;
 
 	memset(&ev, 0, sizeof(ev));
 	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
 
-	cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, index);
+	cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
 	if (!cmd)
 		goto send_event;
 
 	if (status) {
-		err = cmd_status(cmd->sk, index, MGMT_OP_SET_LOCAL_NAME, EIO);
+		err = cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
+							mgmt_status(status));
 		goto failed;
 	}
 
-	hdev = hci_dev_get(index);
-	if (hdev) {
-		hci_dev_lock_bh(hdev);
-		update_eir(hdev);
-		hci_dev_unlock_bh(hdev);
-		hci_dev_put(hdev);
-	}
+	update_eir(hdev);
 
-	err = cmd_complete(cmd->sk, index, MGMT_OP_SET_LOCAL_NAME, &ev,
+	err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, &ev,
 								sizeof(ev));
 	if (err < 0)
 		goto failed;
 
 send_event:
-	err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, index, &ev, sizeof(ev),
+	err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
 							cmd ? cmd->sk : NULL);
 
 failed:
@@ -2285,29 +2668,31 @@
 	return err;
 }
 
-int mgmt_read_local_oob_data_reply_complete(u16 index, u8 *hash, u8 *randomizer,
-								u8 status)
+int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
+						u8 *randomizer, u8 status)
 {
 	struct pending_cmd *cmd;
 	int err;
 
-	BT_DBG("hci%u status %u", index, status);
+	BT_DBG("%s status %u", hdev->name, status);
 
-	cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, index);
+	cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
 	if (!cmd)
 		return -ENOENT;
 
 	if (status) {
-		err = cmd_status(cmd->sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
-									EIO);
+		err = cmd_status(cmd->sk, hdev->id,
+						MGMT_OP_READ_LOCAL_OOB_DATA,
+						mgmt_status(status));
 	} else {
 		struct mgmt_rp_read_local_oob_data rp;
 
 		memcpy(rp.hash, hash, sizeof(rp.hash));
 		memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer));
 
-		err = cmd_complete(cmd->sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
-							&rp, sizeof(rp));
+		err = cmd_complete(cmd->sk, hdev->id,
+						MGMT_OP_READ_LOCAL_OOB_DATA,
+						&rp, sizeof(rp));
 	}
 
 	mgmt_pending_remove(cmd);
@@ -2315,14 +2700,15 @@
 	return err;
 }
 
-int mgmt_device_found(u16 index, bdaddr_t *bdaddr, u8 *dev_class, s8 rssi,
-								u8 *eir)
+int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+				u8 addr_type, u8 *dev_class, s8 rssi, u8 *eir)
 {
 	struct mgmt_ev_device_found ev;
 
 	memset(&ev, 0, sizeof(ev));
 
-	bacpy(&ev.bdaddr, bdaddr);
+	bacpy(&ev.addr.bdaddr, bdaddr);
+	ev.addr.type = link_to_mgmt(link_type, addr_type);
 	ev.rssi = rssi;
 
 	if (eir)
@@ -2331,10 +2717,10 @@
 	if (dev_class)
 		memcpy(ev.dev_class, dev_class, sizeof(ev.dev_class));
 
-	return mgmt_event(MGMT_EV_DEVICE_FOUND, index, &ev, sizeof(ev), NULL);
+	return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, &ev, sizeof(ev), NULL);
 }
 
-int mgmt_remote_name(u16 index, bdaddr_t *bdaddr, u8 *name)
+int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *name)
 {
 	struct mgmt_ev_remote_name ev;
 
@@ -2343,37 +2729,79 @@
 	bacpy(&ev.bdaddr, bdaddr);
 	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
 
-	return mgmt_event(MGMT_EV_REMOTE_NAME, index, &ev, sizeof(ev), NULL);
+	return mgmt_event(MGMT_EV_REMOTE_NAME, hdev, &ev, sizeof(ev), NULL);
 }
 
-int mgmt_discovering(u16 index, u8 discovering)
+int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
 {
-	return mgmt_event(MGMT_EV_DISCOVERING, index, &discovering,
+	struct pending_cmd *cmd;
+	int err;
+
+	cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
+	if (!cmd)
+		return -ENOENT;
+
+	err = cmd_status(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status));
+	mgmt_pending_remove(cmd);
+
+	return err;
+}
+
+int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
+{
+	struct pending_cmd *cmd;
+	int err;
+
+	cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
+	if (!cmd)
+		return -ENOENT;
+
+	err = cmd_status(cmd->sk, hdev->id, cmd->opcode, status);
+	mgmt_pending_remove(cmd);
+
+	return err;
+}
+
+int mgmt_discovering(struct hci_dev *hdev, u8 discovering)
+{
+	struct pending_cmd *cmd;
+
+	if (discovering)
+		cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
+	else
+		cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
+
+	if (cmd != NULL) {
+		cmd_complete(cmd->sk, hdev->id, cmd->opcode, NULL, 0);
+		mgmt_pending_remove(cmd);
+	}
+
+	return mgmt_event(MGMT_EV_DISCOVERING, hdev, &discovering,
 						sizeof(discovering), NULL);
 }
 
-int mgmt_device_blocked(u16 index, bdaddr_t *bdaddr)
+int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr)
 {
 	struct pending_cmd *cmd;
 	struct mgmt_ev_device_blocked ev;
 
-	cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, index);
+	cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
 
 	bacpy(&ev.bdaddr, bdaddr);
 
-	return mgmt_event(MGMT_EV_DEVICE_BLOCKED, index, &ev, sizeof(ev),
-						cmd ? cmd->sk : NULL);
+	return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
+							cmd ? cmd->sk : NULL);
 }
 
-int mgmt_device_unblocked(u16 index, bdaddr_t *bdaddr)
+int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr)
 {
 	struct pending_cmd *cmd;
 	struct mgmt_ev_device_unblocked ev;
 
-	cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, index);
+	cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
 
 	bacpy(&ev.bdaddr, bdaddr);
 
-	return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, index, &ev, sizeof(ev),
-						cmd ? cmd->sk : NULL);
+	return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
+							cmd ? cmd->sk : NULL);
 }
diff --git a/net/bluetooth/rfcomm/Kconfig b/net/bluetooth/rfcomm/Kconfig
index 405a0e6..22e718b 100644
--- a/net/bluetooth/rfcomm/Kconfig
+++ b/net/bluetooth/rfcomm/Kconfig
@@ -1,6 +1,6 @@
 config BT_RFCOMM
 	tristate "RFCOMM protocol support"
-	depends on BT && BT_L2CAP
+	depends on BT
 	help
 	  RFCOMM provides connection oriented stream transport.  RFCOMM
 	  support is required for Dialup Networking, OBEX and other Bluetooth
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 4e32e18..501649bf 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -51,8 +51,8 @@
 
 #define VERSION "1.11"
 
-static int disable_cfc;
-static int l2cap_ertm;
+static bool disable_cfc;
+static bool l2cap_ertm;
 static int channel_mtu = -1;
 static unsigned int l2cap_mtu = RFCOMM_MAX_L2CAP_MTU;
 
@@ -377,13 +377,11 @@
 static struct rfcomm_dlc *rfcomm_dlc_get(struct rfcomm_session *s, u8 dlci)
 {
 	struct rfcomm_dlc *d;
-	struct list_head *p;
 
-	list_for_each(p, &s->dlcs) {
-		d = list_entry(p, struct rfcomm_dlc, list);
+	list_for_each_entry(d, &s->dlcs, list)
 		if (d->dlci == dlci)
 			return d;
-	}
+
 	return NULL;
 }
 
@@ -751,7 +749,6 @@
 /* ---- RFCOMM frame sending ---- */
 static int rfcomm_send_frame(struct rfcomm_session *s, u8 *data, int len)
 {
-	struct socket *sock = s->sock;
 	struct kvec iv = { data, len };
 	struct msghdr msg;
 
@@ -759,7 +756,14 @@
 
 	memset(&msg, 0, sizeof(msg));
 
-	return kernel_sendmsg(sock, &msg, &iv, 1, len);
+	return kernel_sendmsg(s->sock, &msg, &iv, 1, len);
+}
+
+static int rfcomm_send_cmd(struct rfcomm_session *s, struct rfcomm_cmd *cmd)
+{
+	BT_DBG("%p cmd %u", s, cmd->ctrl);
+
+	return rfcomm_send_frame(s, (void *) cmd, sizeof(*cmd));
 }
 
 static int rfcomm_send_sabm(struct rfcomm_session *s, u8 dlci)
@@ -773,7 +777,7 @@
 	cmd.len  = __len8(0);
 	cmd.fcs  = __fcs2((u8 *) &cmd);
 
-	return rfcomm_send_frame(s, (void *) &cmd, sizeof(cmd));
+	return rfcomm_send_cmd(s, &cmd);
 }
 
 static int rfcomm_send_ua(struct rfcomm_session *s, u8 dlci)
@@ -787,7 +791,7 @@
 	cmd.len  = __len8(0);
 	cmd.fcs  = __fcs2((u8 *) &cmd);
 
-	return rfcomm_send_frame(s, (void *) &cmd, sizeof(cmd));
+	return rfcomm_send_cmd(s, &cmd);
 }
 
 static int rfcomm_send_disc(struct rfcomm_session *s, u8 dlci)
@@ -801,7 +805,7 @@
 	cmd.len  = __len8(0);
 	cmd.fcs  = __fcs2((u8 *) &cmd);
 
-	return rfcomm_send_frame(s, (void *) &cmd, sizeof(cmd));
+	return rfcomm_send_cmd(s, &cmd);
 }
 
 static int rfcomm_queue_disc(struct rfcomm_dlc *d)
@@ -837,7 +841,7 @@
 	cmd.len  = __len8(0);
 	cmd.fcs  = __fcs2((u8 *) &cmd);
 
-	return rfcomm_send_frame(s, (void *) &cmd, sizeof(cmd));
+	return rfcomm_send_cmd(s, &cmd);
 }
 
 static int rfcomm_send_nsc(struct rfcomm_session *s, int cr, u8 type)
@@ -1146,6 +1150,7 @@
 			if (list_empty(&s->dlcs)) {
 				s->state = BT_DISCONN;
 				rfcomm_send_disc(s, 0);
+				rfcomm_session_clear_timer(s);
 			}
 
 			break;
@@ -2120,15 +2125,13 @@
 static int rfcomm_dlc_debugfs_show(struct seq_file *f, void *x)
 {
 	struct rfcomm_session *s;
-	struct list_head *pp, *p;
 
 	rfcomm_lock();
 
-	list_for_each(p, &session_list) {
-		s = list_entry(p, struct rfcomm_session, list);
-		list_for_each(pp, &s->dlcs) {
+	list_for_each_entry(s, &session_list, list) {
+		struct rfcomm_dlc *d;
+		list_for_each_entry(d, &s->dlcs, list) {
 			struct sock *sk = s->sock->sk;
-			struct rfcomm_dlc *d = list_entry(pp, struct rfcomm_dlc, list);
 
 			seq_printf(f, "%s %s %ld %d %d %d %d\n",
 						batostr(&bt_sk(sk)->src),
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 5417f61..aea2bdd 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -600,6 +600,8 @@
 			break;
 		}
 
+		skb->priority = sk->sk_priority;
+
 		err = rfcomm_dlc_send(d, skb);
 		if (err < 0) {
 			kfree_skb(skb);
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index c258796..fa8f4de5 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -34,6 +34,7 @@
 #include <linux/capability.h>
 #include <linux/slab.h>
 #include <linux/skbuff.h>
+#include <linux/workqueue.h>
 
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
@@ -65,7 +66,7 @@
 	struct rfcomm_dlc	*dlc;
 	struct tty_struct	*tty;
 	wait_queue_head_t       wait;
-	struct tasklet_struct   wakeup_task;
+	struct work_struct	wakeup_task;
 
 	struct device		*tty_dev;
 
@@ -81,7 +82,7 @@
 static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err);
 static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig);
 
-static void rfcomm_tty_wakeup(unsigned long arg);
+static void rfcomm_tty_wakeup(struct work_struct *work);
 
 /* ---- Device functions ---- */
 static void rfcomm_dev_destruct(struct rfcomm_dev *dev)
@@ -133,13 +134,10 @@
 static struct rfcomm_dev *__rfcomm_dev_get(int id)
 {
 	struct rfcomm_dev *dev;
-	struct list_head  *p;
 
-	list_for_each(p, &rfcomm_dev_list) {
-		dev = list_entry(p, struct rfcomm_dev, list);
+	list_for_each_entry(dev, &rfcomm_dev_list, list)
 		if (dev->id == id)
 			return dev;
-	}
 
 	return NULL;
 }
@@ -197,7 +195,7 @@
 
 static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
 {
-	struct rfcomm_dev *dev;
+	struct rfcomm_dev *dev, *entry;
 	struct list_head *head = &rfcomm_dev_list, *p;
 	int err = 0;
 
@@ -212,8 +210,8 @@
 	if (req->dev_id < 0) {
 		dev->id = 0;
 
-		list_for_each(p, &rfcomm_dev_list) {
-			if (list_entry(p, struct rfcomm_dev, list)->id != dev->id)
+		list_for_each_entry(entry, &rfcomm_dev_list, list) {
+			if (entry->id != dev->id)
 				break;
 
 			dev->id++;
@@ -222,9 +220,7 @@
 	} else {
 		dev->id = req->dev_id;
 
-		list_for_each(p, &rfcomm_dev_list) {
-			struct rfcomm_dev *entry = list_entry(p, struct rfcomm_dev, list);
-
+		list_for_each_entry(entry, &rfcomm_dev_list, list) {
 			if (entry->id == dev->id) {
 				err = -EADDRINUSE;
 				goto out;
@@ -257,7 +253,7 @@
 	atomic_set(&dev->opened, 0);
 
 	init_waitqueue_head(&dev->wait);
-	tasklet_init(&dev->wakeup_task, rfcomm_tty_wakeup, (unsigned long) dev);
+	INIT_WORK(&dev->wakeup_task, rfcomm_tty_wakeup);
 
 	skb_queue_head_init(&dev->pending);
 
@@ -351,7 +347,7 @@
 	struct rfcomm_dev *dev = (void *) skb->sk;
 	atomic_sub(skb->truesize, &dev->wmem_alloc);
 	if (test_bit(RFCOMM_TTY_ATTACHED, &dev->flags))
-		tasklet_schedule(&dev->wakeup_task);
+		queue_work(system_nrt_wq, &dev->wakeup_task);
 	rfcomm_dev_put(dev);
 }
 
@@ -455,9 +451,9 @@
 
 static int rfcomm_get_dev_list(void __user *arg)
 {
+	struct rfcomm_dev *dev;
 	struct rfcomm_dev_list_req *dl;
 	struct rfcomm_dev_info *di;
-	struct list_head *p;
 	int n = 0, size, err;
 	u16 dev_num;
 
@@ -479,8 +475,7 @@
 
 	read_lock_bh(&rfcomm_dev_lock);
 
-	list_for_each(p, &rfcomm_dev_list) {
-		struct rfcomm_dev *dev = list_entry(p, struct rfcomm_dev, list);
+	list_for_each_entry(dev, &rfcomm_dev_list, list) {
 		if (test_bit(RFCOMM_TTY_RELEASED, &dev->flags))
 			continue;
 		(di + n)->id      = dev->id;
@@ -635,9 +630,10 @@
 }
 
 /* ---- TTY functions ---- */
-static void rfcomm_tty_wakeup(unsigned long arg)
+static void rfcomm_tty_wakeup(struct work_struct *work)
 {
-	struct rfcomm_dev *dev = (void *) arg;
+	struct rfcomm_dev *dev = container_of(work, struct rfcomm_dev,
+								wakeup_task);
 	struct tty_struct *tty = dev->tty;
 	if (!tty)
 		return;
@@ -762,7 +758,7 @@
 		rfcomm_dlc_close(dev->dlc, 0);
 
 		clear_bit(RFCOMM_TTY_ATTACHED, &dev->flags);
-		tasklet_kill(&dev->wakeup_task);
+		cancel_work_sync(&dev->wakeup_task);
 
 		rfcomm_dlc_lock(dev->dlc);
 		tty->driver_data = NULL;
@@ -1155,9 +1151,11 @@
 
 int __init rfcomm_init_ttys(void)
 {
+	int error;
+
 	rfcomm_tty_driver = alloc_tty_driver(RFCOMM_TTY_PORTS);
 	if (!rfcomm_tty_driver)
-		return -1;
+		return -ENOMEM;
 
 	rfcomm_tty_driver->owner	= THIS_MODULE;
 	rfcomm_tty_driver->driver_name	= "rfcomm";
@@ -1172,10 +1170,11 @@
 	rfcomm_tty_driver->init_termios.c_lflag &= ~ICANON;
 	tty_set_operations(rfcomm_tty_driver, &rfcomm_ops);
 
-	if (tty_register_driver(rfcomm_tty_driver)) {
+	error = tty_register_driver(rfcomm_tty_driver);
+	if (error) {
 		BT_ERR("Can't register RFCOMM TTY driver");
 		put_tty_driver(rfcomm_tty_driver);
-		return -1;
+		return error;
 	}
 
 	BT_INFO("RFCOMM TTY layer initialized");
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index a324b00..5dc2f21 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -51,7 +51,7 @@
 #include <net/bluetooth/hci_core.h>
 #include <net/bluetooth/sco.h>
 
-static int disable_esco;
+static bool disable_esco;
 
 static const struct proto_ops sco_sock_ops;
 
@@ -189,7 +189,7 @@
 	if (!hdev)
 		return -EHOSTUNREACH;
 
-	hci_dev_lock_bh(hdev);
+	hci_dev_lock(hdev);
 
 	if (lmp_esco_capable(hdev) && !disable_esco)
 		type = ESCO_LINK;
@@ -225,7 +225,7 @@
 	}
 
 done:
-	hci_dev_unlock_bh(hdev);
+	hci_dev_unlock(hdev);
 	hci_dev_put(hdev);
 	return err;
 }
@@ -893,15 +893,12 @@
 }
 
 /* ----- SCO interface with lower layer (HCI) ----- */
-static int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type)
+int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
 {
 	register struct sock *sk;
 	struct hlist_node *node;
 	int lm = 0;
 
-	if (type != SCO_LINK && type != ESCO_LINK)
-		return -EINVAL;
-
 	BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
 
 	/* Find listening sockets */
@@ -921,13 +918,9 @@
 	return lm;
 }
 
-static int sco_connect_cfm(struct hci_conn *hcon, __u8 status)
+int sco_connect_cfm(struct hci_conn *hcon, __u8 status)
 {
 	BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
-
-	if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK)
-		return -EINVAL;
-
 	if (!status) {
 		struct sco_conn *conn;
 
@@ -940,19 +933,15 @@
 	return 0;
 }
 
-static int sco_disconn_cfm(struct hci_conn *hcon, __u8 reason)
+int sco_disconn_cfm(struct hci_conn *hcon, __u8 reason)
 {
 	BT_DBG("hcon %p reason %d", hcon, reason);
 
-	if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK)
-		return -EINVAL;
-
 	sco_conn_del(hcon, bt_to_errno(reason));
-
 	return 0;
 }
 
-static int sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb)
+int sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb)
 {
 	struct sco_conn *conn = hcon->sco_data;
 
@@ -1028,15 +1017,6 @@
 	.create	= sco_sock_create,
 };
 
-static struct hci_proto sco_hci_proto = {
-	.name		= "SCO",
-	.id		= HCI_PROTO_SCO,
-	.connect_ind	= sco_connect_ind,
-	.connect_cfm	= sco_connect_cfm,
-	.disconn_cfm	= sco_disconn_cfm,
-	.recv_scodata	= sco_recv_scodata
-};
-
 int __init sco_init(void)
 {
 	int err;
@@ -1051,13 +1031,6 @@
 		goto error;
 	}
 
-	err = hci_register_proto(&sco_hci_proto);
-	if (err < 0) {
-		BT_ERR("SCO protocol registration failed");
-		bt_sock_unregister(BTPROTO_SCO);
-		goto error;
-	}
-
 	if (bt_debugfs) {
 		sco_debugfs = debugfs_create_file("sco", 0444,
 					bt_debugfs, NULL, &sco_debugfs_fops);
@@ -1081,9 +1054,6 @@
 	if (bt_sock_unregister(BTPROTO_SCO) < 0)
 		BT_ERR("SCO socket unregistration failed");
 
-	if (hci_unregister_proto(&sco_hci_proto) < 0)
-		BT_ERR("SCO protocol unregistration failed");
-
 	proto_unregister(&sco_proto);
 }
 
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 759b635..32c47de 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -23,6 +23,7 @@
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
 #include <net/bluetooth/l2cap.h>
+#include <net/bluetooth/mgmt.h>
 #include <net/bluetooth/smp.h>
 #include <linux/crypto.h>
 #include <linux/scatterlist.h>
@@ -181,30 +182,53 @@
 	if (!skb)
 		return;
 
-	hci_send_acl(conn->hcon, skb, 0);
+	skb->priority = HCI_PRIO_MAX;
+	hci_send_acl(conn->hchan, skb, 0);
 
-	mod_timer(&conn->security_timer, jiffies +
+	cancel_delayed_work_sync(&conn->security_timer);
+	schedule_delayed_work(&conn->security_timer,
 					msecs_to_jiffies(SMP_TIMEOUT));
 }
 
+static __u8 authreq_to_seclevel(__u8 authreq)
+{
+	if (authreq & SMP_AUTH_MITM)
+		return BT_SECURITY_HIGH;
+	else
+		return BT_SECURITY_MEDIUM;
+}
+
+static __u8 seclevel_to_authreq(__u8 sec_level)
+{
+	switch (sec_level) {
+	case BT_SECURITY_HIGH:
+		return SMP_AUTH_MITM | SMP_AUTH_BONDING;
+	case BT_SECURITY_MEDIUM:
+		return SMP_AUTH_BONDING;
+	default:
+		return SMP_AUTH_NONE;
+	}
+}
+
 static void build_pairing_cmd(struct l2cap_conn *conn,
 				struct smp_cmd_pairing *req,
 				struct smp_cmd_pairing *rsp,
 				__u8 authreq)
 {
-	u8 dist_keys;
+	u8 dist_keys = 0;
 
-	dist_keys = 0;
 	if (test_bit(HCI_PAIRABLE, &conn->hcon->hdev->flags)) {
 		dist_keys = SMP_DIST_ENC_KEY;
 		authreq |= SMP_AUTH_BONDING;
+	} else {
+		authreq &= ~SMP_AUTH_BONDING;
 	}
 
 	if (rsp == NULL) {
 		req->io_capability = conn->hcon->io_capability;
 		req->oob_flag = SMP_OOB_NOT_PRESENT;
 		req->max_key_size = SMP_MAX_ENC_KEY_SIZE;
-		req->init_key_dist = dist_keys;
+		req->init_key_dist = 0;
 		req->resp_key_dist = dist_keys;
 		req->auth_req = authreq;
 		return;
@@ -213,7 +237,7 @@
 	rsp->io_capability = conn->hcon->io_capability;
 	rsp->oob_flag = SMP_OOB_NOT_PRESENT;
 	rsp->max_key_size = SMP_MAX_ENC_KEY_SIZE;
-	rsp->init_key_dist = req->init_key_dist & dist_keys;
+	rsp->init_key_dist = 0;
 	rsp->resp_key_dist = req->resp_key_dist & dist_keys;
 	rsp->auth_req = authreq;
 }
@@ -231,6 +255,107 @@
 	return 0;
 }
 
+static void smp_failure(struct l2cap_conn *conn, u8 reason, u8 send)
+{
+	if (send)
+		smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason),
+								&reason);
+
+	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->hcon->pend);
+	mgmt_auth_failed(conn->hcon->hdev, conn->dst, reason);
+	cancel_delayed_work_sync(&conn->security_timer);
+	smp_chan_destroy(conn);
+}
+
+#define JUST_WORKS	0x00
+#define JUST_CFM	0x01
+#define REQ_PASSKEY	0x02
+#define CFM_PASSKEY	0x03
+#define REQ_OOB		0x04
+#define OVERLAP		0xFF
+
+static const u8 gen_method[5][5] = {
+	{ JUST_WORKS,  JUST_CFM,    REQ_PASSKEY, JUST_WORKS, REQ_PASSKEY },
+	{ JUST_WORKS,  JUST_CFM,    REQ_PASSKEY, JUST_WORKS, REQ_PASSKEY },
+	{ CFM_PASSKEY, CFM_PASSKEY, REQ_PASSKEY, JUST_WORKS, CFM_PASSKEY },
+	{ JUST_WORKS,  JUST_CFM,    JUST_WORKS,  JUST_WORKS, JUST_CFM    },
+	{ CFM_PASSKEY, CFM_PASSKEY, REQ_PASSKEY, JUST_WORKS, OVERLAP     },
+};
+
+static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
+						u8 local_io, u8 remote_io)
+{
+	struct hci_conn *hcon = conn->hcon;
+	struct smp_chan *smp = conn->smp_chan;
+	u8 method;
+	u32 passkey = 0;
+	int ret = 0;
+
+	/* Initialize key for JUST WORKS */
+	memset(smp->tk, 0, sizeof(smp->tk));
+	clear_bit(SMP_FLAG_TK_VALID, &smp->smp_flags);
+
+	BT_DBG("tk_request: auth:%d lcl:%d rem:%d", auth, local_io, remote_io);
+
+	/* If neither side wants MITM, use JUST WORKS */
+	/* If either side has unknown io_caps, use JUST WORKS */
+	/* Otherwise, look up method from the table */
+	if (!(auth & SMP_AUTH_MITM) ||
+			local_io > SMP_IO_KEYBOARD_DISPLAY ||
+			remote_io > SMP_IO_KEYBOARD_DISPLAY)
+		method = JUST_WORKS;
+	else
+		method = gen_method[local_io][remote_io];
+
+	/* If not bonding, don't ask user to confirm a Zero TK */
+	if (!(auth & SMP_AUTH_BONDING) && method == JUST_CFM)
+		method = JUST_WORKS;
+
+	/* If Just Works, Continue with Zero TK */
+	if (method == JUST_WORKS) {
+		set_bit(SMP_FLAG_TK_VALID, &smp->smp_flags);
+		return 0;
+	}
+
+	/* Not Just Works/Confirm results in MITM Authentication */
+	if (method != JUST_CFM)
+		set_bit(SMP_FLAG_MITM_AUTH, &smp->smp_flags);
+
+	/* If both devices have Keyoard-Display I/O, the master
+	 * Confirms and the slave Enters the passkey.
+	 */
+	if (method == OVERLAP) {
+		if (hcon->link_mode & HCI_LM_MASTER)
+			method = CFM_PASSKEY;
+		else
+			method = REQ_PASSKEY;
+	}
+
+	/* Generate random passkey. Not valid until confirmed. */
+	if (method == CFM_PASSKEY) {
+		u8 key[16];
+
+		memset(key, 0, sizeof(key));
+		get_random_bytes(&passkey, sizeof(passkey));
+		passkey %= 1000000;
+		put_unaligned_le32(passkey, key);
+		swap128(key, smp->tk);
+		BT_DBG("PassKey: %d", passkey);
+	}
+
+	hci_dev_lock(hcon->hdev);
+
+	if (method == REQ_PASSKEY)
+		ret = mgmt_user_passkey_request(hcon->hdev, conn->dst);
+	else
+		ret = mgmt_user_confirm_request(hcon->hdev, conn->dst,
+						cpu_to_le32(passkey), 0);
+
+	hci_dev_unlock(hcon->hdev);
+
+	return ret;
+}
+
 static void confirm_work(struct work_struct *work)
 {
 	struct smp_chan *smp = container_of(work, struct smp_chan, confirm);
@@ -263,14 +388,15 @@
 		goto error;
 	}
 
+	clear_bit(SMP_FLAG_CFM_PENDING, &smp->smp_flags);
+
 	swap128(res, cp.confirm_val);
 	smp_send_cmd(smp->conn, SMP_CMD_PAIRING_CONFIRM, sizeof(cp), &cp);
 
 	return;
 
 error:
-	smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason), &reason);
-	smp_chan_destroy(conn);
+	smp_failure(conn, reason, 1);
 }
 
 static void random_work(struct work_struct *work)
@@ -353,8 +479,7 @@
 	return;
 
 error:
-	smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason), &reason);
-	smp_chan_destroy(conn);
+	smp_failure(conn, reason, 1);
 }
 
 static struct smp_chan *smp_chan_create(struct l2cap_conn *conn)
@@ -370,6 +495,7 @@
 
 	smp->conn = conn;
 	conn->smp_chan = smp;
+	conn->hcon->smp_conn = conn;
 
 	hci_conn_hold(conn->hcon);
 
@@ -378,19 +504,73 @@
 
 void smp_chan_destroy(struct l2cap_conn *conn)
 {
-	kfree(conn->smp_chan);
+	struct smp_chan *smp = conn->smp_chan;
+
+	clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->pend);
+
+	if (smp->tfm)
+		crypto_free_blkcipher(smp->tfm);
+
+	kfree(smp);
+	conn->smp_chan = NULL;
+	conn->hcon->smp_conn = NULL;
 	hci_conn_put(conn->hcon);
 }
 
+int smp_user_confirm_reply(struct hci_conn *hcon, u16 mgmt_op, __le32 passkey)
+{
+	struct l2cap_conn *conn = hcon->smp_conn;
+	struct smp_chan *smp;
+	u32 value;
+	u8 key[16];
+
+	BT_DBG("");
+
+	if (!conn)
+		return -ENOTCONN;
+
+	smp = conn->smp_chan;
+
+	switch (mgmt_op) {
+	case MGMT_OP_USER_PASSKEY_REPLY:
+		value = le32_to_cpu(passkey);
+		memset(key, 0, sizeof(key));
+		BT_DBG("PassKey: %d", value);
+		put_unaligned_le32(value, key);
+		swap128(key, smp->tk);
+		/* Fall Through */
+	case MGMT_OP_USER_CONFIRM_REPLY:
+		set_bit(SMP_FLAG_TK_VALID, &smp->smp_flags);
+		break;
+	case MGMT_OP_USER_PASSKEY_NEG_REPLY:
+	case MGMT_OP_USER_CONFIRM_NEG_REPLY:
+		smp_failure(conn, SMP_PASSKEY_ENTRY_FAILED, 1);
+		return 0;
+	default:
+		smp_failure(conn, SMP_PASSKEY_ENTRY_FAILED, 1);
+		return -EOPNOTSUPP;
+	}
+
+	/* If it is our turn to send Pairing Confirm, do so now */
+	if (test_bit(SMP_FLAG_CFM_PENDING, &smp->smp_flags))
+		queue_work(hcon->hdev->workqueue, &smp->confirm);
+
+	return 0;
+}
+
 static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
 {
 	struct smp_cmd_pairing rsp, *req = (void *) skb->data;
 	struct smp_chan *smp;
 	u8 key_size;
+	u8 auth = SMP_AUTH_NONE;
 	int ret;
 
 	BT_DBG("conn %p", conn);
 
+	if (conn->hcon->link_mode & HCI_LM_MASTER)
+		return SMP_CMD_NOTSUPP;
+
 	if (!test_and_set_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->pend))
 		smp = smp_chan_create(conn);
 
@@ -400,19 +580,16 @@
 	memcpy(&smp->preq[1], req, sizeof(*req));
 	skb_pull(skb, sizeof(*req));
 
-	if (req->oob_flag)
-		return SMP_OOB_NOT_AVAIL;
+	/* We didn't start the pairing, so match remote */
+	if (req->auth_req & SMP_AUTH_BONDING)
+		auth = req->auth_req;
 
-	/* We didn't start the pairing, so no requirements */
-	build_pairing_cmd(conn, req, &rsp, SMP_AUTH_NONE);
+	build_pairing_cmd(conn, req, &rsp, auth);
 
 	key_size = min(req->max_key_size, rsp.max_key_size);
 	if (check_enc_key_size(conn, key_size))
 		return SMP_ENC_KEY_SIZE;
 
-	/* Just works */
-	memset(smp->tk, 0, sizeof(smp->tk));
-
 	ret = smp_rand(smp->prnd);
 	if (ret)
 		return SMP_UNSPECIFIED;
@@ -422,6 +599,11 @@
 
 	smp_send_cmd(conn, SMP_CMD_PAIRING_RSP, sizeof(rsp), &rsp);
 
+	/* Request setup of TK */
+	ret = tk_request(conn, 0, auth, rsp.io_capability, req->io_capability);
+	if (ret)
+		return SMP_UNSPECIFIED;
+
 	return 0;
 }
 
@@ -430,11 +612,14 @@
 	struct smp_cmd_pairing *req, *rsp = (void *) skb->data;
 	struct smp_chan *smp = conn->smp_chan;
 	struct hci_dev *hdev = conn->hcon->hdev;
-	u8 key_size;
+	u8 key_size, auth = SMP_AUTH_NONE;
 	int ret;
 
 	BT_DBG("conn %p", conn);
 
+	if (!(conn->hcon->link_mode & HCI_LM_MASTER))
+		return SMP_CMD_NOTSUPP;
+
 	skb_pull(skb, sizeof(*rsp));
 
 	req = (void *) &smp->preq[1];
@@ -443,12 +628,6 @@
 	if (check_enc_key_size(conn, key_size))
 		return SMP_ENC_KEY_SIZE;
 
-	if (rsp->oob_flag)
-		return SMP_OOB_NOT_AVAIL;
-
-	/* Just works */
-	memset(smp->tk, 0, sizeof(smp->tk));
-
 	ret = smp_rand(smp->prnd);
 	if (ret)
 		return SMP_UNSPECIFIED;
@@ -456,6 +635,22 @@
 	smp->prsp[0] = SMP_CMD_PAIRING_RSP;
 	memcpy(&smp->prsp[1], rsp, sizeof(*rsp));
 
+	if ((req->auth_req & SMP_AUTH_BONDING) &&
+			(rsp->auth_req & SMP_AUTH_BONDING))
+		auth = SMP_AUTH_BONDING;
+
+	auth |= (req->auth_req | rsp->auth_req) & SMP_AUTH_MITM;
+
+	ret = tk_request(conn, 0, auth, rsp->io_capability, req->io_capability);
+	if (ret)
+		return SMP_UNSPECIFIED;
+
+	set_bit(SMP_FLAG_CFM_PENDING, &smp->smp_flags);
+
+	/* Can't compose response until we have been confirmed */
+	if (!test_bit(SMP_FLAG_TK_VALID, &smp->smp_flags))
+		return 0;
+
 	queue_work(hdev->workqueue, &smp->confirm);
 
 	return 0;
@@ -477,8 +672,10 @@
 		swap128(smp->prnd, random);
 		smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(random),
 								random);
-	} else {
+	} else if (test_bit(SMP_FLAG_TK_VALID, &smp->smp_flags)) {
 		queue_work(hdev->workqueue, &smp->confirm);
+	} else {
+		set_bit(SMP_FLAG_CFM_PENDING, &smp->smp_flags);
 	}
 
 	return 0;
@@ -531,7 +728,7 @@
 
 	BT_DBG("conn %p", conn);
 
-	hcon->pending_sec_level = BT_SECURITY_MEDIUM;
+	hcon->pending_sec_level = authreq_to_seclevel(rp->auth_req);
 
 	if (smp_ltk_encrypt(conn))
 		return 0;
@@ -558,6 +755,7 @@
 {
 	struct hci_conn *hcon = conn->hcon;
 	struct smp_chan *smp = conn->smp_chan;
+	__u8 authreq;
 
 	BT_DBG("conn %p hcon %p level 0x%2.2x", conn, hcon, sec_level);
 
@@ -578,18 +776,22 @@
 		return 0;
 
 	smp = smp_chan_create(conn);
+	if (!smp)
+		return 1;
+
+	authreq = seclevel_to_authreq(sec_level);
 
 	if (hcon->link_mode & HCI_LM_MASTER) {
 		struct smp_cmd_pairing cp;
 
-		build_pairing_cmd(conn, &cp, NULL, SMP_AUTH_NONE);
+		build_pairing_cmd(conn, &cp, NULL, authreq);
 		smp->preq[0] = SMP_CMD_PAIRING_REQ;
 		memcpy(&smp->preq[1], &cp, sizeof(cp));
 
 		smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp);
 	} else {
 		struct smp_cmd_security_req cp;
-		cp.auth_req = SMP_AUTH_NONE;
+		cp.auth_req = authreq;
 		smp_send_cmd(conn, SMP_CMD_SECURITY_REQ, sizeof(cp), &cp);
 	}
 
@@ -618,7 +820,7 @@
 
 	skb_pull(skb, sizeof(*rp));
 
-	hci_add_ltk(conn->hcon->hdev, 1, conn->src, smp->smp_key_size,
+	hci_add_ltk(conn->hcon->hdev, 1, conn->dst, smp->smp_key_size,
 						rp->ediv, rp->rand, smp->tk);
 
 	smp_distribute_keys(conn, 1);
@@ -646,6 +848,7 @@
 		break;
 
 	case SMP_CMD_PAIRING_FAIL:
+		smp_failure(conn, skb->data[0], 0);
 		reason = 0;
 		err = -EPERM;
 		break;
@@ -691,8 +894,7 @@
 
 done:
 	if (reason)
-		smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason),
-								&reason);
+		smp_failure(conn, reason, 1);
 
 	kfree_skb(skb);
 	return err;
@@ -781,7 +983,7 @@
 
 	if (conn->hcon->out || force) {
 		clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->pend);
-		del_timer(&conn->security_timer);
+		cancel_delayed_work_sync(&conn->security_timer);
 		smp_chan_destroy(conn);
 	}
 
diff --git a/net/bridge/br.c b/net/bridge/br.c
index f20c4fd..ba780cc 100644
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -62,7 +62,7 @@
 
 	brioctl_set(br_ioctl_deviceless_stub);
 
-#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
+#if IS_ENABLED(CONFIG_ATM_LANE)
 	br_fdb_test_addr_hook = br_fdb_test_addr;
 #endif
 
@@ -93,7 +93,7 @@
 	rcu_barrier(); /* Wait for completion of call_rcu()'s */
 
 	br_netfilter_fini();
-#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
+#if IS_ENABLED(CONFIG_ATM_LANE)
 	br_fdb_test_addr_hook = NULL;
 #endif
 
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index feb77ea..71773b0 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -170,8 +170,11 @@
 		return -EINVAL;
 
 	spin_lock_bh(&br->lock);
-	memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
-	br_stp_change_bridge_id(br, addr->sa_data);
+	if (compare_ether_addr(dev->dev_addr, addr->sa_data)) {
+		memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+		br_fdb_change_mac_address(br, addr->sa_data);
+		br_stp_change_bridge_id(br, addr->sa_data);
+	}
 	br->flags |= BR_SET_MAC_ADDR;
 	spin_unlock_bh(&br->lock);
 
@@ -186,7 +189,8 @@
 	strcpy(info->bus_info, "N/A");
 }
 
-static u32 br_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t br_fix_features(struct net_device *dev,
+	netdev_features_t features)
 {
 	struct net_bridge *br = netdev_priv(dev);
 
@@ -341,10 +345,10 @@
 	dev->priv_flags = IFF_EBRIDGE;
 
 	dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
-			NETIF_F_GSO_MASK | NETIF_F_NO_CSUM | NETIF_F_LLTX |
+			NETIF_F_GSO_MASK | NETIF_F_HW_CSUM | NETIF_F_LLTX |
 			NETIF_F_NETNS_LOCAL | NETIF_F_HW_VLAN_TX;
 	dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
-			   NETIF_F_GSO_MASK | NETIF_F_NO_CSUM |
+			   NETIF_F_GSO_MASK | NETIF_F_HW_CSUM |
 			   NETIF_F_HW_VLAN_TX;
 
 	br->dev = dev;
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index c8e7861..f963f6b 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -28,7 +28,8 @@
 static struct kmem_cache *br_fdb_cache __read_mostly;
 static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
 		      const unsigned char *addr);
-static void fdb_notify(const struct net_bridge_fdb_entry *, int);
+static void fdb_notify(struct net_bridge *br,
+		       const struct net_bridge_fdb_entry *, int);
 
 static u32 fdb_salt __read_mostly;
 
@@ -80,10 +81,10 @@
 	kmem_cache_free(br_fdb_cache, ent);
 }
 
-static inline void fdb_delete(struct net_bridge_fdb_entry *f)
+static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f)
 {
-	fdb_notify(f, RTM_DELNEIGH);
 	hlist_del_rcu(&f->hlist);
+	fdb_notify(br, f, RTM_DELNEIGH);
 	call_rcu(&f->rcu, fdb_rcu_free);
 }
 
@@ -114,7 +115,7 @@
 				}
 
 				/* delete old one */
-				fdb_delete(f);
+				fdb_delete(br, f);
 				goto insert;
 			}
 		}
@@ -126,6 +127,18 @@
 	spin_unlock_bh(&br->hash_lock);
 }
 
+void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
+{
+	struct net_bridge_fdb_entry *f;
+
+	/* If old entry was unassociated with any port, then delete it. */
+	f = __br_fdb_get(br, br->dev->dev_addr);
+	if (f && f->is_local && !f->dst)
+		fdb_delete(br, f);
+
+	fdb_insert(br, NULL, newaddr);
+}
+
 void br_fdb_cleanup(unsigned long _data)
 {
 	struct net_bridge *br = (struct net_bridge *)_data;
@@ -144,7 +157,7 @@
 				continue;
 			this_timer = f->updated + delay;
 			if (time_before_eq(this_timer, jiffies))
-				fdb_delete(f);
+				fdb_delete(br, f);
 			else if (time_before(this_timer, next_timer))
 				next_timer = this_timer;
 		}
@@ -165,7 +178,7 @@
 		struct hlist_node *h, *n;
 		hlist_for_each_entry_safe(f, h, n, &br->hash[i], hlist) {
 			if (!f->is_static)
-				fdb_delete(f);
+				fdb_delete(br, f);
 		}
 	}
 	spin_unlock_bh(&br->hash_lock);
@@ -209,7 +222,7 @@
 				}
 			}
 
-			fdb_delete(f);
+			fdb_delete(br, f);
 		skip_delete: ;
 		}
 	}
@@ -234,7 +247,7 @@
 	return NULL;
 }
 
-#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
+#if IS_ENABLED(CONFIG_ATM_LANE)
 /* Interface used by ATM LANE hook to test
  * if an addr is on some other bridge port */
 int br_fdb_test_addr(struct net_device *dev, unsigned char *addr)
@@ -249,7 +262,7 @@
 		ret = 0;
 	else {
 		fdb = __br_fdb_get(port->br, addr);
-		ret = fdb && fdb->dst->dev != dev &&
+		ret = fdb && fdb->dst && fdb->dst->dev != dev &&
 			fdb->dst->state == BR_STATE_FORWARDING;
 	}
 	rcu_read_unlock();
@@ -281,6 +294,10 @@
 			if (has_expired(br, f))
 				continue;
 
+			/* ignore pseudo entry for local MAC address */
+			if (!f->dst)
+				continue;
+
 			if (skip) {
 				--skip;
 				continue;
@@ -347,7 +364,6 @@
 		fdb->is_static = 0;
 		fdb->updated = fdb->used = jiffies;
 		hlist_add_head_rcu(&fdb->hlist, head);
-		fdb_notify(fdb, RTM_NEWNEIGH);
 	}
 	return fdb;
 }
@@ -371,7 +387,7 @@
 		br_warn(br, "adding interface %s with same address "
 		       "as a received packet\n",
 		       source->dev->name);
-		fdb_delete(fdb);
+		fdb_delete(br, fdb);
 	}
 
 	fdb = fdb_create(head, source, addr);
@@ -379,6 +395,7 @@
 		return -ENOMEM;
 
 	fdb->is_local = fdb->is_static = 1;
+	fdb_notify(br, fdb, RTM_NEWNEIGH);
 	return 0;
 }
 
@@ -424,9 +441,11 @@
 		}
 	} else {
 		spin_lock(&br->hash_lock);
-		if (likely(!fdb_find(head, addr)))
-			fdb_create(head, source, addr);
-
+		if (likely(!fdb_find(head, addr))) {
+			fdb = fdb_create(head, source, addr);
+			if (fdb)
+				fdb_notify(br, fdb, RTM_NEWNEIGH);
+		}
 		/* else  we lose race and someone else inserts
 		 * it first, don't bother updating
 		 */
@@ -446,7 +465,7 @@
 		return NUD_REACHABLE;
 }
 
-static int fdb_fill_info(struct sk_buff *skb,
+static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
 			 const struct net_bridge_fdb_entry *fdb,
 			 u32 pid, u32 seq, int type, unsigned int flags)
 {
@@ -459,14 +478,13 @@
 	if (nlh == NULL)
 		return -EMSGSIZE;
 
-
 	ndm = nlmsg_data(nlh);
 	ndm->ndm_family	 = AF_BRIDGE;
 	ndm->ndm_pad1    = 0;
 	ndm->ndm_pad2    = 0;
 	ndm->ndm_flags	 = 0;
 	ndm->ndm_type	 = 0;
-	ndm->ndm_ifindex = fdb->dst->dev->ifindex;
+	ndm->ndm_ifindex = fdb->dst ? fdb->dst->dev->ifindex : br->dev->ifindex;
 	ndm->ndm_state   = fdb_to_nud(fdb);
 
 	NLA_PUT(skb, NDA_LLADDR, ETH_ALEN, &fdb->addr);
@@ -491,9 +509,10 @@
 		+ nla_total_size(sizeof(struct nda_cacheinfo));
 }
 
-static void fdb_notify(const struct net_bridge_fdb_entry *fdb, int type)
+static void fdb_notify(struct net_bridge *br,
+		       const struct net_bridge_fdb_entry *fdb, int type)
 {
-	struct net *net = dev_net(fdb->dst->dev);
+	struct net *net = dev_net(br->dev);
 	struct sk_buff *skb;
 	int err = -ENOBUFS;
 
@@ -501,7 +520,7 @@
 	if (skb == NULL)
 		goto errout;
 
-	err = fdb_fill_info(skb, fdb, 0, 0, type, 0);
+	err = fdb_fill_info(skb, br, fdb, 0, 0, type, 0);
 	if (err < 0) {
 		/* -EMSGSIZE implies BUG in fdb_nlmsg_size() */
 		WARN_ON(err == -EMSGSIZE);
@@ -538,7 +557,7 @@
 				if (idx < cb->args[0])
 					goto skip;
 
-				if (fdb_fill_info(skb, f,
+				if (fdb_fill_info(skb, br, f,
 						  NETLINK_CB(cb->skb).pid,
 						  cb->nlh->nlmsg_seq,
 						  RTM_NEWNEIGH,
@@ -556,7 +575,7 @@
 	return skb->len;
 }
 
-/* Create new static fdb entry */
+/* Update (create or replace) forwarding database entry */
 static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr,
 			 __u16 state, __u16 flags)
 {
@@ -572,19 +591,25 @@
 		fdb = fdb_create(head, source, addr);
 		if (!fdb)
 			return -ENOMEM;
+		fdb_notify(br, fdb, RTM_NEWNEIGH);
 	} else {
 		if (flags & NLM_F_EXCL)
 			return -EEXIST;
-
-		if (flags & NLM_F_REPLACE)
-			fdb->updated = fdb->used = jiffies;
-		fdb->is_local = fdb->is_static = 0;
 	}
 
-	if (state & NUD_PERMANENT)
-		fdb->is_local = fdb->is_static = 1;
-	else if (state & NUD_NOARP)
-		fdb->is_static = 1;
+	if (fdb_to_nud(fdb) != state) {
+		if (state & NUD_PERMANENT)
+			fdb->is_local = fdb->is_static = 1;
+		else if (state & NUD_NOARP) {
+			fdb->is_local = 0;
+			fdb->is_static = 1;
+		} else
+			fdb->is_local = fdb->is_static = 0;
+
+		fdb->updated = fdb->used = jiffies;
+		fdb_notify(br, fdb, RTM_NEWNEIGH);
+	}
+
 	return 0;
 }
 
@@ -627,6 +652,11 @@
 		return -EINVAL;
 	}
 
+	if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE))) {
+		pr_info("bridge: RTM_NEWNEIGH with invalid state %#x\n", ndm->ndm_state);
+		return -EINVAL;
+	}
+
 	p = br_port_get_rtnl(dev);
 	if (p == NULL) {
 		pr_info("bridge: RTM_NEWNEIGH %s not a bridge port\n",
@@ -634,9 +664,15 @@
 		return -EINVAL;
 	}
 
-	spin_lock_bh(&p->br->hash_lock);
-	err = fdb_add_entry(p, addr, ndm->ndm_state, nlh->nlmsg_flags);
-	spin_unlock_bh(&p->br->hash_lock);
+	if (ndm->ndm_flags & NTF_USE) {
+		rcu_read_lock();
+		br_fdb_update(p->br, p, addr);
+		rcu_read_unlock();
+	} else {
+		spin_lock_bh(&p->br->hash_lock);
+		err = fdb_add_entry(p, addr, ndm->ndm_state, nlh->nlmsg_flags);
+		spin_unlock_bh(&p->br->hash_lock);
+	}
 
 	return err;
 }
@@ -651,7 +687,7 @@
 	if (!fdb)
 		return -ENOENT;
 
-	fdb_delete(fdb);
+	fdb_delete(p->br, fdb);
 	return 0;
 }
 
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index ee64287..61f6534 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -98,7 +98,7 @@
 /* called with rcu_read_lock */
 void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
 {
-	if (should_deliver(to, skb)) {
+	if (to && should_deliver(to, skb)) {
 		__br_deliver(to, skb);
 		return;
 	}
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index f603e5b..0a942fb 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -296,10 +296,11 @@
 /*
  * Recomputes features using slave's features
  */
-u32 br_features_recompute(struct net_bridge *br, u32 features)
+netdev_features_t br_features_recompute(struct net_bridge *br,
+	netdev_features_t features)
 {
 	struct net_bridge_port *p;
-	u32 mask;
+	netdev_features_t mask;
 
 	if (list_empty(&br->port_list))
 		return features;
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index a5f4e57..568d5bf 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -24,7 +24,7 @@
 #include <linux/slab.h>
 #include <linux/timer.h>
 #include <net/ip.h>
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 #include <net/ipv6.h>
 #include <net/mld.h>
 #include <net/addrconf.h>
@@ -36,7 +36,7 @@
 #define mlock_dereference(X, br) \
 	rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock))
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static inline int ipv6_is_transient_multicast(const struct in6_addr *addr)
 {
 	if (ipv6_addr_is_multicast(addr) && IPV6_ADDR_MC_FLAG_TRANSIENT(addr))
@@ -52,7 +52,7 @@
 	switch (a->proto) {
 	case htons(ETH_P_IP):
 		return a->u.ip4 == b->u.ip4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	case htons(ETH_P_IPV6):
 		return ipv6_addr_equal(&a->u.ip6, &b->u.ip6);
 #endif
@@ -65,7 +65,7 @@
 	return jhash_1word(mdb->secret, (__force u32)ip) & (mdb->max - 1);
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static inline int __br_ip6_hash(struct net_bridge_mdb_htable *mdb,
 				const struct in6_addr *ip)
 {
@@ -79,7 +79,7 @@
 	switch (ip->proto) {
 	case htons(ETH_P_IP):
 		return __br_ip4_hash(mdb, ip->u.ip4);
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	case htons(ETH_P_IPV6):
 		return __br_ip6_hash(mdb, &ip->u.ip6);
 #endif
@@ -121,13 +121,13 @@
 	return br_mdb_ip_get(mdb, &br_dst);
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static struct net_bridge_mdb_entry *br_mdb_ip6_get(
 	struct net_bridge_mdb_htable *mdb, const struct in6_addr *dst)
 {
 	struct br_ip br_dst;
 
-	ipv6_addr_copy(&br_dst.u.ip6, dst);
+	br_dst.u.ip6 = *dst;
 	br_dst.proto = htons(ETH_P_IPV6);
 
 	return br_mdb_ip_get(mdb, &br_dst);
@@ -152,9 +152,9 @@
 	case htons(ETH_P_IP):
 		ip.u.ip4 = ip_hdr(skb)->daddr;
 		break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	case htons(ETH_P_IPV6):
-		ipv6_addr_copy(&ip.u.ip6, &ipv6_hdr(skb)->daddr);
+		ip.u.ip6 = ipv6_hdr(skb)->daddr;
 		break;
 #endif
 	default:
@@ -411,7 +411,7 @@
 	return skb;
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
 						    const struct in6_addr *group)
 {
@@ -474,7 +474,7 @@
 	mldq->mld_cksum = 0;
 	mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval));
 	mldq->mld_reserved = 0;
-	ipv6_addr_copy(&mldq->mld_mca, group);
+	mldq->mld_mca = *group;
 
 	/* checksum */
 	mldq->mld_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
@@ -496,7 +496,7 @@
 	switch (addr->proto) {
 	case htons(ETH_P_IP):
 		return br_ip4_multicast_alloc_query(br, addr->u.ip4);
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	case htons(ETH_P_IPV6):
 		return br_ip6_multicast_alloc_query(br, &addr->u.ip6);
 #endif
@@ -773,7 +773,7 @@
 	return br_multicast_add_group(br, port, &br_group);
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static int br_ip6_multicast_add_group(struct net_bridge *br,
 				      struct net_bridge_port *port,
 				      const struct in6_addr *group)
@@ -783,7 +783,7 @@
 	if (!ipv6_is_transient_multicast(group))
 		return 0;
 
-	ipv6_addr_copy(&br_group.u.ip6, group);
+	br_group.u.ip6 = *group;
 	br_group.proto = htons(ETH_P_IPV6);
 
 	return br_multicast_add_group(br, port, &br_group);
@@ -845,7 +845,7 @@
 	br_group.proto = htons(ETH_P_IP);
 	__br_multicast_send_query(br, port, &br_group);
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	br_group.proto = htons(ETH_P_IPV6);
 	__br_multicast_send_query(br, port, &br_group);
 #endif
@@ -989,7 +989,7 @@
 	return err;
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static int br_ip6_multicast_mld2_report(struct net_bridge *br,
 					struct net_bridge_port *port,
 					struct sk_buff *skb)
@@ -1185,7 +1185,7 @@
 	return err;
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static int br_ip6_multicast_query(struct net_bridge *br,
 				  struct net_bridge_port *port,
 				  struct sk_buff *skb)
@@ -1334,7 +1334,7 @@
 	br_multicast_leave_group(br, port, &br_group);
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static void br_ip6_multicast_leave_group(struct net_bridge *br,
 					 struct net_bridge_port *port,
 					 const struct in6_addr *group)
@@ -1344,7 +1344,7 @@
 	if (!ipv6_is_transient_multicast(group))
 		return;
 
-	ipv6_addr_copy(&br_group.u.ip6, group);
+	br_group.u.ip6 = *group;
 	br_group.proto = htons(ETH_P_IPV6);
 
 	br_multicast_leave_group(br, port, &br_group);
@@ -1449,7 +1449,7 @@
 	return err;
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static int br_multicast_ipv6_rcv(struct net_bridge *br,
 				 struct net_bridge_port *port,
 				 struct sk_buff *skb)
@@ -1458,6 +1458,7 @@
 	const struct ipv6hdr *ip6h;
 	u8 icmp6_type;
 	u8 nexthdr;
+	__be16 frag_off;
 	unsigned len;
 	int offset;
 	int err;
@@ -1483,7 +1484,7 @@
 		return -EINVAL;
 
 	nexthdr = ip6h->nexthdr;
-	offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
+	offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr, &frag_off);
 
 	if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
 		return 0;
@@ -1595,7 +1596,7 @@
 	switch (skb->protocol) {
 	case htons(ETH_P_IP):
 		return br_multicast_ipv4_rcv(br, port, skb);
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	case htons(ETH_P_IPV6):
 		return br_multicast_ipv6_rcv(br, port, skb);
 #endif
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index d6ec372..8412247 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -114,12 +114,18 @@
 	return NULL;
 }
 
+static unsigned int fake_mtu(const struct dst_entry *dst)
+{
+	return dst->dev->mtu;
+}
+
 static struct dst_ops fake_dst_ops = {
 	.family =		AF_INET,
 	.protocol =		cpu_to_be16(ETH_P_IP),
 	.update_pmtu =		fake_update_pmtu,
 	.cow_metrics =		fake_cow_metrics,
 	.neigh_lookup =		fake_neigh_lookup,
+	.mtu =			fake_mtu,
 };
 
 /*
@@ -141,7 +147,7 @@
 	rt->dst.dev = br->dev;
 	rt->dst.path = &rt->dst;
 	dst_init_metrics(&rt->dst, br_dst_default_metrics, true);
-	rt->dst.flags	= DST_NOXFRM;
+	rt->dst.flags	= DST_NOXFRM | DST_NOPEER;
 	rt->dst.ops = &fake_dst_ops;
 }
 
@@ -356,7 +362,7 @@
 	if (!skb->dev)
 		goto free_skb;
 	dst = skb_dst(skb);
-	neigh = dst_get_neighbour(dst);
+	neigh = dst_get_neighbour_noref(dst);
 	if (neigh->hh.hh_len) {
 		neigh_hh_bridge(&neigh->hh, skb);
 		skb->dev = nf_bridge->physindev;
@@ -807,7 +813,7 @@
 	return NF_STOLEN;
 }
 
-#if defined(CONFIG_NF_CONNTRACK_IPV4) || defined(CONFIG_NF_CONNTRACK_IPV4_MODULE)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK_IPV4)
 static int br_nf_dev_queue_xmit(struct sk_buff *skb)
 {
 	int ret;
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index d7d6fb0..0b67a63 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -56,7 +56,7 @@
 {
 	union {
 		__be32	ip4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 		struct in6_addr ip6;
 #endif
 	} u;
@@ -348,6 +348,7 @@
 extern void br_fdb_flush(struct net_bridge *br);
 extern void br_fdb_changeaddr(struct net_bridge_port *p,
 			      const unsigned char *newaddr);
+extern void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr);
 extern void br_fdb_cleanup(unsigned long arg);
 extern void br_fdb_delete_by_port(struct net_bridge *br,
 				  const struct net_bridge_port *p, int do_all);
@@ -387,7 +388,8 @@
 extern int br_del_if(struct net_bridge *br,
 	      struct net_device *dev);
 extern int br_min_mtu(const struct net_bridge *br);
-extern u32 br_features_recompute(struct net_bridge *br, u32 features);
+extern netdev_features_t br_features_recompute(struct net_bridge *br,
+	netdev_features_t features);
 
 /* br_input.c */
 extern int br_handle_frame_finish(struct sk_buff *skb);
@@ -535,7 +537,7 @@
 extern unsigned long br_timer_value(const struct timer_list *timer);
 
 /* br.c */
-#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
+#if IS_ENABLED(CONFIG_ATM_LANE)
 extern int (*br_fdb_test_addr_hook)(struct net_device *dev, unsigned char *addr);
 #endif
 
diff --git a/net/bridge/netfilter/ebt_ip6.c b/net/bridge/netfilter/ebt_ip6.c
index 2ed0056..99c8566 100644
--- a/net/bridge/netfilter/ebt_ip6.c
+++ b/net/bridge/netfilter/ebt_ip6.c
@@ -55,9 +55,10 @@
 		return false;
 	if (info->bitmask & EBT_IP6_PROTO) {
 		uint8_t nexthdr = ih6->nexthdr;
+		__be16 frag_off;
 		int offset_ph;
 
-		offset_ph = ipv6_skip_exthdr(skb, sizeof(_ip6h), &nexthdr);
+		offset_ph = ipv6_skip_exthdr(skb, sizeof(_ip6h), &nexthdr, &frag_off);
 		if (offset_ph == -1)
 			return false;
 		if (FWINV(info->protocol != nexthdr, EBT_IP6_PROTO))
diff --git a/net/bridge/netfilter/ebt_log.c b/net/bridge/netfilter/ebt_log.c
index 6e5a8bb..f88ee53 100644
--- a/net/bridge/netfilter/ebt_log.c
+++ b/net/bridge/netfilter/ebt_log.c
@@ -107,12 +107,13 @@
 		goto out;
 	}
 
-#if defined(CONFIG_BRIDGE_EBT_IP6) || defined(CONFIG_BRIDGE_EBT_IP6_MODULE)
+#if IS_ENABLED(CONFIG_BRIDGE_EBT_IP6)
 	if ((bitmask & EBT_LOG_IP6) && eth_hdr(skb)->h_proto ==
 	   htons(ETH_P_IPV6)) {
 		const struct ipv6hdr *ih;
 		struct ipv6hdr _iph;
 		uint8_t nexthdr;
+		__be16 frag_off;
 		int offset_ph;
 
 		ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph);
@@ -123,7 +124,7 @@
 		printk(" IPv6 SRC=%pI6 IPv6 DST=%pI6, IPv6 priority=0x%01X, Next Header=%d",
 		       &ih->saddr, &ih->daddr, ih->priority, ih->nexthdr);
 		nexthdr = ih->nexthdr;
-		offset_ph = ipv6_skip_exthdr(skb, sizeof(_iph), &nexthdr);
+		offset_ph = ipv6_skip_exthdr(skb, sizeof(_iph), &nexthdr, &frag_off);
 		if (offset_ph == -1)
 			goto out;
 		print_ports(skb, nexthdr, offset_ph);
diff --git a/net/caif/Kconfig b/net/caif/Kconfig
index 529750d..936361e 100644
--- a/net/caif/Kconfig
+++ b/net/caif/Kconfig
@@ -40,3 +40,14 @@
 	If you select to build it as a built-in then the main CAIF device must
 	also be a built-in.
 	If unsure say Y.
+
+config CAIF_USB
+	tristate "CAIF USB support"
+	depends on CAIF
+	default n
+	---help---
+	Say Y if you are using CAIF over USB CDC NCM.
+	This can be either built-in or a loadable module,
+	If you select to build it as a built-in then the main CAIF device must
+	also be a built-in.
+	If unsure say N.
diff --git a/net/caif/Makefile b/net/caif/Makefile
index ebcd4e7..cc2b511 100644
--- a/net/caif/Makefile
+++ b/net/caif/Makefile
@@ -10,5 +10,6 @@
 obj-$(CONFIG_CAIF) += caif.o
 obj-$(CONFIG_CAIF_NETDEV) += chnl_net.o
 obj-$(CONFIG_CAIF) += caif_socket.o
+obj-$(CONFIG_CAIF_USB) += caif_usb.o
 
 export-y := caif.o
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index f1fa1f6..b0ce14f 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -17,6 +17,7 @@
 #include <linux/netdevice.h>
 #include <linux/mutex.h>
 #include <linux/module.h>
+#include <linux/spinlock.h>
 #include <net/netns/generic.h>
 #include <net/net_namespace.h>
 #include <net/pkt_sched.h>
@@ -24,6 +25,7 @@
 #include <net/caif/caif_layer.h>
 #include <net/caif/cfpkt.h>
 #include <net/caif/cfcnfg.h>
+#include <net/caif/cfserl.h>
 
 MODULE_LICENSE("GPL");
 
@@ -33,6 +35,10 @@
 	struct list_head list;
 	struct net_device *netdev;
 	int __percpu *pcpu_refcnt;
+	spinlock_t flow_lock;
+	struct sk_buff *xoff_skb;
+	void (*xoff_skb_dtor)(struct sk_buff *skb);
+	bool xoff;
 };
 
 struct caif_device_entry_list {
@@ -47,13 +53,14 @@
 };
 
 static int caif_net_id;
+static int q_high = 50; /* Percent */
 
 struct cfcnfg *get_cfcnfg(struct net *net)
 {
 	struct caif_net *caifn;
-	BUG_ON(!net);
 	caifn = net_generic(net, caif_net_id);
-	BUG_ON(!caifn);
+	if (!caifn)
+		return NULL;
 	return caifn->cfg;
 }
 EXPORT_SYMBOL(get_cfcnfg);
@@ -61,9 +68,9 @@
 static struct caif_device_entry_list *caif_device_list(struct net *net)
 {
 	struct caif_net *caifn;
-	BUG_ON(!net);
 	caifn = net_generic(net, caif_net_id);
-	BUG_ON(!caifn);
+	if (!caifn)
+		return NULL;
 	return &caifn->caifdevs;
 }
 
@@ -92,7 +99,8 @@
 	struct caif_device_entry *caifd;
 
 	caifdevs = caif_device_list(dev_net(dev));
-	BUG_ON(!caifdevs);
+	if (!caifdevs)
+		return NULL;
 
 	caifd = kzalloc(sizeof(*caifd), GFP_KERNEL);
 	if (!caifd)
@@ -112,7 +120,9 @@
 	struct caif_device_entry_list *caifdevs =
 	    caif_device_list(dev_net(dev));
 	struct caif_device_entry *caifd;
-	BUG_ON(!caifdevs);
+	if (!caifdevs)
+		return NULL;
+
 	list_for_each_entry_rcu(caifd, &caifdevs->list, list) {
 		if (caifd->netdev == dev)
 			return caifd;
@@ -120,15 +130,106 @@
 	return NULL;
 }
 
+void caif_flow_cb(struct sk_buff *skb)
+{
+	struct caif_device_entry *caifd;
+	void (*dtor)(struct sk_buff *skb) = NULL;
+	bool send_xoff;
+
+	WARN_ON(skb->dev == NULL);
+
+	rcu_read_lock();
+	caifd = caif_get(skb->dev);
+	caifd_hold(caifd);
+	rcu_read_unlock();
+
+	spin_lock_bh(&caifd->flow_lock);
+	send_xoff = caifd->xoff;
+	caifd->xoff = 0;
+	if (!WARN_ON(caifd->xoff_skb_dtor == NULL)) {
+		WARN_ON(caifd->xoff_skb != skb);
+		dtor = caifd->xoff_skb_dtor;
+		caifd->xoff_skb = NULL;
+		caifd->xoff_skb_dtor = NULL;
+	}
+	spin_unlock_bh(&caifd->flow_lock);
+
+	if (dtor)
+		dtor(skb);
+
+	if (send_xoff)
+		caifd->layer.up->
+			ctrlcmd(caifd->layer.up,
+				_CAIF_CTRLCMD_PHYIF_FLOW_ON_IND,
+				caifd->layer.id);
+	caifd_put(caifd);
+}
+
 static int transmit(struct cflayer *layer, struct cfpkt *pkt)
 {
-	int err;
+	int err, high = 0, qlen = 0;
+	struct caif_dev_common *caifdev;
 	struct caif_device_entry *caifd =
 	    container_of(layer, struct caif_device_entry, layer);
 	struct sk_buff *skb;
+	struct netdev_queue *txq;
+
+	rcu_read_lock_bh();
 
 	skb = cfpkt_tonative(pkt);
 	skb->dev = caifd->netdev;
+	skb_reset_network_header(skb);
+	skb->protocol = htons(ETH_P_CAIF);
+	caifdev = netdev_priv(caifd->netdev);
+
+	/* Check if we need to handle xoff */
+	if (likely(caifd->netdev->tx_queue_len == 0))
+		goto noxoff;
+
+	if (unlikely(caifd->xoff))
+		goto noxoff;
+
+	if (likely(!netif_queue_stopped(caifd->netdev))) {
+		/* If we run with a TX queue, check if the queue is too long*/
+		txq = netdev_get_tx_queue(skb->dev, 0);
+		qlen = qdisc_qlen(rcu_dereference_bh(txq->qdisc));
+
+		if (likely(qlen == 0))
+			goto noxoff;
+
+		high = (caifd->netdev->tx_queue_len * q_high) / 100;
+		if (likely(qlen < high))
+			goto noxoff;
+	}
+
+	/* Hold lock while accessing xoff */
+	spin_lock_bh(&caifd->flow_lock);
+	if (caifd->xoff) {
+		spin_unlock_bh(&caifd->flow_lock);
+		goto noxoff;
+	}
+
+	/*
+	 * Handle flow off, we do this by temporary hi-jacking this
+	 * skb's destructor function, and replace it with our own
+	 * flow-on callback. The callback will set flow-on and call
+	 * the original destructor.
+	 */
+
+	pr_debug("queue has stopped(%d) or is full (%d > %d)\n",
+			netif_queue_stopped(caifd->netdev),
+			qlen, high);
+	caifd->xoff = 1;
+	caifd->xoff_skb = skb;
+	caifd->xoff_skb_dtor = skb->destructor;
+	skb->destructor = caif_flow_cb;
+	spin_unlock_bh(&caifd->flow_lock);
+
+	caifd->layer.up->ctrlcmd(caifd->layer.up,
+					_CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND,
+					caifd->layer.id);
+noxoff:
+	rcu_read_unlock_bh();
 
 	err = dev_queue_xmit(skb);
 	if (err > 0)
@@ -172,7 +273,10 @@
 
 	/* Release reference to stack upwards */
 	caifd_put(caifd);
-	return 0;
+
+	if (err != 0)
+		err = NET_RX_DROP;
+	return err;
 }
 
 static struct packet_type caif_packet_type __read_mostly = {
@@ -203,6 +307,57 @@
 	caifd_put(caifd);
 }
 
+void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
+			struct cflayer *link_support, int head_room,
+			struct cflayer **layer, int (**rcv_func)(
+				struct sk_buff *, struct net_device *,
+				struct packet_type *, struct net_device *))
+{
+	struct caif_device_entry *caifd;
+	enum cfcnfg_phy_preference pref;
+	struct cfcnfg *cfg = get_cfcnfg(dev_net(dev));
+	struct caif_device_entry_list *caifdevs;
+
+	caifdevs = caif_device_list(dev_net(dev));
+	if (!cfg || !caifdevs)
+		return;
+	caifd = caif_device_alloc(dev);
+	if (!caifd)
+		return;
+	*layer = &caifd->layer;
+	spin_lock_init(&caifd->flow_lock);
+
+	switch (caifdev->link_select) {
+	case CAIF_LINK_HIGH_BANDW:
+		pref = CFPHYPREF_HIGH_BW;
+		break;
+	case CAIF_LINK_LOW_LATENCY:
+		pref = CFPHYPREF_LOW_LAT;
+		break;
+	default:
+		pref = CFPHYPREF_HIGH_BW;
+		break;
+	}
+	mutex_lock(&caifdevs->lock);
+	list_add_rcu(&caifd->list, &caifdevs->list);
+
+	strncpy(caifd->layer.name, dev->name,
+		sizeof(caifd->layer.name) - 1);
+	caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0;
+	caifd->layer.transmit = transmit;
+	cfcnfg_add_phy_layer(cfg,
+				dev,
+				&caifd->layer,
+				pref,
+				link_support,
+				caifdev->use_fcs,
+				head_room);
+	mutex_unlock(&caifdevs->lock);
+	if (rcv_func)
+		*rcv_func = receive;
+}
+EXPORT_SYMBOL(caif_enroll_dev);
+
 /* notify Caif of device events */
 static int caif_device_notify(struct notifier_block *me, unsigned long what,
 			      void *arg)
@@ -210,62 +365,40 @@
 	struct net_device *dev = arg;
 	struct caif_device_entry *caifd = NULL;
 	struct caif_dev_common *caifdev;
-	enum cfcnfg_phy_preference pref;
-	enum cfcnfg_phy_type phy_type;
 	struct cfcnfg *cfg;
+	struct cflayer *layer, *link_support;
+	int head_room = 0;
 	struct caif_device_entry_list *caifdevs;
 
-	if (dev->type != ARPHRD_CAIF)
-		return 0;
-
 	cfg = get_cfcnfg(dev_net(dev));
-	if (cfg == NULL)
+	caifdevs = caif_device_list(dev_net(dev));
+	if (!cfg || !caifdevs)
 		return 0;
 
-	caifdevs = caif_device_list(dev_net(dev));
+	caifd = caif_get(dev);
+	if (caifd == NULL && dev->type != ARPHRD_CAIF)
+		return 0;
 
 	switch (what) {
 	case NETDEV_REGISTER:
-		caifd = caif_device_alloc(dev);
-		if (!caifd)
-			return 0;
+		if (caifd != NULL)
+			break;
 
 		caifdev = netdev_priv(dev);
-		caifdev->flowctrl = dev_flowctrl;
 
-		caifd->layer.transmit = transmit;
-
-		if (caifdev->use_frag)
-			phy_type = CFPHYTYPE_FRAG;
-		else
-			phy_type = CFPHYTYPE_CAIF;
-
-		switch (caifdev->link_select) {
-		case CAIF_LINK_HIGH_BANDW:
-			pref = CFPHYPREF_HIGH_BW;
-			break;
-		case CAIF_LINK_LOW_LATENCY:
-			pref = CFPHYPREF_LOW_LAT;
-			break;
-		default:
-			pref = CFPHYPREF_HIGH_BW;
-			break;
+		link_support = NULL;
+		if (caifdev->use_frag) {
+			head_room = 1;
+			link_support = cfserl_create(dev->ifindex,
+							caifdev->use_stx);
+			if (!link_support) {
+				pr_warn("Out of memory\n");
+				break;
+			}
 		}
-		strncpy(caifd->layer.name, dev->name,
-			sizeof(caifd->layer.name) - 1);
-		caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0;
-
-		mutex_lock(&caifdevs->lock);
-		list_add_rcu(&caifd->list, &caifdevs->list);
-
-		cfcnfg_add_phy_layer(cfg,
-				     phy_type,
-				     dev,
-				     &caifd->layer,
-				     pref,
-				     caifdev->use_fcs,
-				     caifdev->use_stx);
-		mutex_unlock(&caifdevs->lock);
+		caif_enroll_dev(dev, caifdev, link_support, head_room,
+				&layer, NULL);
+		caifdev->flowctrl = dev_flowctrl;
 		break;
 
 	case NETDEV_UP:
@@ -277,6 +410,7 @@
 			break;
 		}
 
+		caifd->xoff = 0;
 		cfcnfg_set_phy_state(cfg, &caifd->layer, true);
 		rcu_read_unlock();
 
@@ -298,6 +432,24 @@
 		caifd->layer.up->ctrlcmd(caifd->layer.up,
 					 _CAIF_CTRLCMD_PHYIF_DOWN_IND,
 					 caifd->layer.id);
+
+		spin_lock_bh(&caifd->flow_lock);
+
+		/*
+		 * Replace our xoff-destructor with original destructor.
+		 * We trust that skb->destructor *always* is called before
+		 * the skb reference is invalid. The hijacked SKB destructor
+		 * takes the flow_lock so manipulating the skb->destructor here
+		 * should be safe.
+		*/
+		if (caifd->xoff_skb_dtor != NULL && caifd->xoff_skb != NULL)
+			caifd->xoff_skb->destructor = caifd->xoff_skb_dtor;
+
+		caifd->xoff = 0;
+		caifd->xoff_skb_dtor = NULL;
+		caifd->xoff_skb = NULL;
+
+		spin_unlock_bh(&caifd->flow_lock);
 		caifd_put(caifd);
 		break;
 
@@ -353,15 +505,15 @@
 static int caif_init_net(struct net *net)
 {
 	struct caif_net *caifn = net_generic(net, caif_net_id);
-	BUG_ON(!caifn);
+	if (WARN_ON(!caifn))
+		return -EINVAL;
+
 	INIT_LIST_HEAD(&caifn->caifdevs.list);
 	mutex_init(&caifn->caifdevs.lock);
 
 	caifn->cfg = cfcnfg_create();
-	if (!caifn->cfg) {
-		pr_warn("can't create cfcnfg\n");
+	if (!caifn->cfg)
 		return -ENOMEM;
-	}
 
 	return 0;
 }
@@ -371,17 +523,14 @@
 	struct caif_device_entry *caifd, *tmp;
 	struct caif_device_entry_list *caifdevs =
 	    caif_device_list(net);
-	struct cfcnfg *cfg;
+	struct cfcnfg *cfg =  get_cfcnfg(net);
+
+	if (!cfg || !caifdevs)
+		return;
 
 	rtnl_lock();
 	mutex_lock(&caifdevs->lock);
 
-	cfg = get_cfcnfg(net);
-	if (cfg == NULL) {
-		mutex_unlock(&caifdevs->lock);
-		return;
-	}
-
 	list_for_each_entry_safe(caifd, tmp, &caifdevs->list, list) {
 		int i = 0;
 		list_del_rcu(&caifd->list);
diff --git a/net/caif/caif_usb.c b/net/caif/caif_usb.c
new file mode 100644
index 0000000..5fc9eca
--- /dev/null
+++ b/net/caif/caif_usb.c
@@ -0,0 +1,208 @@
+/*
+ * CAIF USB handler
+ * Copyright (C) ST-Ericsson AB 2011
+ * Author:	Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/slab.h>
+#include <linux/netdevice.h>
+#include <linux/mii.h>
+#include <linux/usb.h>
+#include <linux/usb/usbnet.h>
+#include <net/netns/generic.h>
+#include <net/caif/caif_dev.h>
+#include <net/caif/caif_layer.h>
+#include <net/caif/cfpkt.h>
+#include <net/caif/cfcnfg.h>
+
+MODULE_LICENSE("GPL");
+
+#define CFUSB_PAD_DESCR_SZ 1	/* Alignment descriptor length */
+#define CFUSB_ALIGNMENT 4	/* Number of bytes to align. */
+#define CFUSB_MAX_HEADLEN (CFUSB_PAD_DESCR_SZ + CFUSB_ALIGNMENT-1)
+#define STE_USB_VID 0x04cc	/* USB Product ID for ST-Ericsson */
+#define STE_USB_PID_CAIF 0x2306	/* Product id for CAIF Modems */
+
+struct cfusbl {
+	struct cflayer layer;
+	u8 tx_eth_hdr[ETH_HLEN];
+};
+
+static bool pack_added;
+
+static int cfusbl_receive(struct cflayer *layr, struct cfpkt *pkt)
+{
+	u8 hpad;
+
+	/* Remove padding. */
+	cfpkt_extr_head(pkt, &hpad, 1);
+	cfpkt_extr_head(pkt, NULL, hpad);
+	return layr->up->receive(layr->up, pkt);
+}
+
+static int cfusbl_transmit(struct cflayer *layr, struct cfpkt *pkt)
+{
+	struct caif_payload_info *info;
+	u8 hpad;
+	u8 zeros[CFUSB_ALIGNMENT];
+	struct sk_buff *skb;
+	struct cfusbl *usbl = container_of(layr, struct cfusbl, layer);
+
+	skb = cfpkt_tonative(pkt);
+
+	skb_reset_network_header(skb);
+	skb->protocol = htons(ETH_P_IP);
+
+	info = cfpkt_info(pkt);
+	hpad = (info->hdr_len + CFUSB_PAD_DESCR_SZ) & (CFUSB_ALIGNMENT - 1);
+
+	if (skb_headroom(skb) < ETH_HLEN + CFUSB_PAD_DESCR_SZ + hpad) {
+		pr_warn("Headroom to small\n");
+		kfree_skb(skb);
+		return -EIO;
+	}
+	memset(zeros, 0, hpad);
+
+	cfpkt_add_head(pkt, zeros, hpad);
+	cfpkt_add_head(pkt, &hpad, 1);
+	cfpkt_add_head(pkt, usbl->tx_eth_hdr, sizeof(usbl->tx_eth_hdr));
+	return layr->dn->transmit(layr->dn, pkt);
+}
+
+static void cfusbl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
+					int phyid)
+{
+	if (layr->up && layr->up->ctrlcmd)
+		layr->up->ctrlcmd(layr->up, ctrl, layr->id);
+}
+
+struct cflayer *cfusbl_create(int phyid, u8 ethaddr[ETH_ALEN],
+					u8 braddr[ETH_ALEN])
+{
+	struct cfusbl *this = kmalloc(sizeof(struct cfusbl), GFP_ATOMIC);
+
+	if (!this) {
+		pr_warn("Out of memory\n");
+		return NULL;
+	}
+	caif_assert(offsetof(struct cfusbl, layer) == 0);
+
+	memset(this, 0, sizeof(struct cflayer));
+	this->layer.receive = cfusbl_receive;
+	this->layer.transmit = cfusbl_transmit;
+	this->layer.ctrlcmd = cfusbl_ctrlcmd;
+	snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "usb%d", phyid);
+	this->layer.id = phyid;
+
+	/*
+	 * Construct TX ethernet header:
+	 *	0-5	destination address
+	 *	5-11	source address
+	 *	12-13	protocol type
+	 */
+	memcpy(&this->tx_eth_hdr[ETH_ALEN], braddr, ETH_ALEN);
+	memcpy(&this->tx_eth_hdr[ETH_ALEN], ethaddr, ETH_ALEN);
+	this->tx_eth_hdr[12] = cpu_to_be16(ETH_P_802_EX1) & 0xff;
+	this->tx_eth_hdr[13] = (cpu_to_be16(ETH_P_802_EX1) >> 8) & 0xff;
+	pr_debug("caif ethernet TX-header dst:%pM src:%pM type:%02x%02x\n",
+			this->tx_eth_hdr, this->tx_eth_hdr + ETH_ALEN,
+			this->tx_eth_hdr[12], this->tx_eth_hdr[13]);
+
+	return (struct cflayer *) this;
+}
+
+static struct packet_type caif_usb_type __read_mostly = {
+	.type = cpu_to_be16(ETH_P_802_EX1),
+};
+
+static int cfusbl_device_notify(struct notifier_block *me, unsigned long what,
+			      void *arg)
+{
+	struct net_device *dev = arg;
+	struct caif_dev_common common;
+	struct cflayer *layer, *link_support;
+	struct usbnet	*usbnet = netdev_priv(dev);
+	struct usb_device	*usbdev = usbnet->udev;
+	struct ethtool_drvinfo drvinfo;
+
+	/*
+	 * Quirks: High-jack ethtool to find if we have a NCM device,
+	 * and find it's VID/PID.
+	 */
+	if (dev->ethtool_ops == NULL || dev->ethtool_ops->get_drvinfo == NULL)
+		return 0;
+
+	dev->ethtool_ops->get_drvinfo(dev, &drvinfo);
+	if (strncmp(drvinfo.driver, "cdc_ncm", 7) != 0)
+		return 0;
+
+	pr_debug("USB CDC NCM device VID:0x%4x PID:0x%4x\n",
+		le16_to_cpu(usbdev->descriptor.idVendor),
+		le16_to_cpu(usbdev->descriptor.idProduct));
+
+	/* Check for VID/PID that supports CAIF */
+	if (!(le16_to_cpu(usbdev->descriptor.idVendor) == STE_USB_VID &&
+		le16_to_cpu(usbdev->descriptor.idProduct) == STE_USB_PID_CAIF))
+		return 0;
+
+	if (what == NETDEV_UNREGISTER)
+		module_put(THIS_MODULE);
+
+	if (what != NETDEV_REGISTER)
+		return 0;
+
+	__module_get(THIS_MODULE);
+
+	memset(&common, 0, sizeof(common));
+	common.use_frag = false;
+	common.use_fcs = false;
+	common.use_stx = false;
+	common.link_select = CAIF_LINK_HIGH_BANDW;
+	common.flowctrl = NULL;
+
+	link_support = cfusbl_create(dev->ifindex, dev->dev_addr,
+					dev->broadcast);
+
+	if (!link_support)
+		return -ENOMEM;
+
+	if (dev->num_tx_queues > 1)
+		pr_warn("USB device uses more than one tx queue\n");
+
+	caif_enroll_dev(dev, &common, link_support, CFUSB_MAX_HEADLEN,
+			&layer, &caif_usb_type.func);
+	if (!pack_added)
+		dev_add_pack(&caif_usb_type);
+	pack_added = true;
+
+	strncpy(layer->name, dev->name,
+			sizeof(layer->name) - 1);
+	layer->name[sizeof(layer->name) - 1] = 0;
+
+	return 0;
+}
+
+static struct notifier_block caif_device_notifier = {
+	.notifier_call = cfusbl_device_notify,
+	.priority = 0,
+};
+
+static int __init cfusbl_init(void)
+{
+	return register_netdevice_notifier(&caif_device_notifier);
+}
+
+static void __exit cfusbl_exit(void)
+{
+	unregister_netdevice_notifier(&caif_device_notifier);
+	dev_remove_pack(&caif_usb_type);
+}
+
+module_init(cfusbl_init);
+module_exit(cfusbl_exit);
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c
index 00523ec..598aafb 100644
--- a/net/caif/cfcnfg.c
+++ b/net/caif/cfcnfg.c
@@ -45,8 +45,8 @@
 	/* Interface index */
 	int ifindex;
 
-	/* Use Start of frame extension */
-	bool use_stx;
+	/* Protocol head room added for CAIF link layer */
+	int head_room;
 
 	/* Use Start of frame checksum */
 	bool use_fcs;
@@ -187,11 +187,11 @@
 	if (channel_id != 0) {
 		struct cflayer *servl;
 		servl = cfmuxl_remove_uplayer(cfg->mux, channel_id);
+		cfctrl_linkdown_req(cfg->ctrl, channel_id, adap_layer);
 		if (servl != NULL)
 			layer_set_up(servl, NULL);
 	} else
 		pr_debug("nothing to disconnect\n");
-	cfctrl_linkdown_req(cfg->ctrl, channel_id, adap_layer);
 
 	/* Do RCU sync before initiating cleanup */
 	synchronize_rcu();
@@ -350,9 +350,7 @@
 
 	*ifindex = phy->ifindex;
 	*proto_tail = 2;
-	*proto_head =
-
-	protohead[param.linktype] + (phy->use_stx ? 1 : 0);
+	*proto_head = protohead[param.linktype] + phy->head_room;
 
 	rcu_read_unlock();
 
@@ -460,13 +458,13 @@
 }
 
 void
-cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
+cfcnfg_add_phy_layer(struct cfcnfg *cnfg,
 		     struct net_device *dev, struct cflayer *phy_layer,
 		     enum cfcnfg_phy_preference pref,
-		     bool fcs, bool stx)
+		     struct cflayer *link_support,
+		     bool fcs, int head_room)
 {
 	struct cflayer *frml;
-	struct cflayer *phy_driver = NULL;
 	struct cfcnfg_phyinfo *phyinfo = NULL;
 	int i;
 	u8 phyid;
@@ -482,26 +480,13 @@
 			goto got_phyid;
 	}
 	pr_warn("Too many CAIF Link Layers (max 6)\n");
-	goto out_err;
+	goto out;
 
 got_phyid:
 	phyinfo = kzalloc(sizeof(struct cfcnfg_phyinfo), GFP_ATOMIC);
 	if (!phyinfo)
 		goto out_err;
 
-	switch (phy_type) {
-	case CFPHYTYPE_FRAG:
-		phy_driver =
-		    cfserl_create(CFPHYTYPE_FRAG, phyid, stx);
-		if (!phy_driver)
-			goto out_err;
-		break;
-	case CFPHYTYPE_CAIF:
-		phy_driver = NULL;
-		break;
-	default:
-		goto out_err;
-	}
 	phy_layer->id = phyid;
 	phyinfo->pref = pref;
 	phyinfo->id = phyid;
@@ -509,7 +494,7 @@
 	phyinfo->dev_info.dev = dev;
 	phyinfo->phy_layer = phy_layer;
 	phyinfo->ifindex = dev->ifindex;
-	phyinfo->use_stx = stx;
+	phyinfo->head_room = head_room;
 	phyinfo->use_fcs = fcs;
 
 	frml = cffrml_create(phyid, fcs);
@@ -519,23 +504,23 @@
 	phyinfo->frm_layer = frml;
 	layer_set_up(frml, cnfg->mux);
 
-	if (phy_driver != NULL) {
-		phy_driver->id = phyid;
-		layer_set_dn(frml, phy_driver);
-		layer_set_up(phy_driver, frml);
-		layer_set_dn(phy_driver, phy_layer);
-		layer_set_up(phy_layer, phy_driver);
+	if (link_support != NULL) {
+		link_support->id = phyid;
+		layer_set_dn(frml, link_support);
+		layer_set_up(link_support, frml);
+		layer_set_dn(link_support, phy_layer);
+		layer_set_up(phy_layer, link_support);
 	} else {
 		layer_set_dn(frml, phy_layer);
 		layer_set_up(phy_layer, frml);
 	}
 
 	list_add_rcu(&phyinfo->node, &cnfg->phys);
+out:
 	mutex_unlock(&cnfg->lock);
 	return;
 
 out_err:
-	kfree(phy_driver);
 	kfree(phyinfo);
 	mutex_unlock(&cnfg->lock);
 }
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c
index df08c47..e335ba8 100644
--- a/net/caif/cfpkt_skbuff.c
+++ b/net/caif/cfpkt_skbuff.c
@@ -63,7 +63,6 @@
 	return (struct cfpkt *) skb;
 }
 
-
 struct cfpkt *cfpkt_fromnative(enum caif_direction dir, void *nativepkt)
 {
 	struct cfpkt *pkt = skb_to_pkt(nativepkt);
@@ -105,14 +104,12 @@
 	kfree_skb(skb);
 }
 
-
 inline bool cfpkt_more(struct cfpkt *pkt)
 {
 	struct sk_buff *skb = pkt_to_skb(pkt);
 	return skb->len > 0;
 }
 
-
 int cfpkt_peek_head(struct cfpkt *pkt, void *data, u16 len)
 {
 	struct sk_buff *skb = pkt_to_skb(pkt);
@@ -144,9 +141,11 @@
 	}
 	from = skb_pull(skb, len);
 	from -= len;
-	memcpy(data, from, len);
+	if (data)
+		memcpy(data, from, len);
 	return 0;
 }
+EXPORT_SYMBOL(cfpkt_extr_head);
 
 int cfpkt_extr_trail(struct cfpkt *pkt, void *dta, u16 len)
 {
@@ -170,13 +169,11 @@
 	return 0;
 }
 
-
 int cfpkt_pad_trail(struct cfpkt *pkt, u16 len)
 {
 	return cfpkt_add_body(pkt, NULL, len);
 }
 
-
 int cfpkt_add_body(struct cfpkt *pkt, const void *data, u16 len)
 {
 	struct sk_buff *skb = pkt_to_skb(pkt);
@@ -255,21 +252,19 @@
 	memcpy(to, data, len);
 	return 0;
 }
-
+EXPORT_SYMBOL(cfpkt_add_head);
 
 inline int cfpkt_add_trail(struct cfpkt *pkt, const void *data, u16 len)
 {
 	return cfpkt_add_body(pkt, data, len);
 }
 
-
 inline u16 cfpkt_getlen(struct cfpkt *pkt)
 {
 	struct sk_buff *skb = pkt_to_skb(pkt);
 	return skb->len;
 }
 
-
 inline u16 cfpkt_iterate(struct cfpkt *pkt,
 			    u16 (*iter_func)(u16, void *, u16),
 			    u16 data)
@@ -287,7 +282,6 @@
 	return iter_func(data, pkt->skb.data, cfpkt_getlen(pkt));
 }
 
-
 int cfpkt_setlen(struct cfpkt *pkt, u16 len)
 {
 	struct sk_buff *skb = pkt_to_skb(pkt);
@@ -399,3 +393,4 @@
 {
 	return (struct caif_payload_info *)&pkt_to_skb(pkt)->cb;
 }
+EXPORT_SYMBOL(cfpkt_info);
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c
index 81660f8..6dc75d4 100644
--- a/net/caif/cfrfml.c
+++ b/net/caif/cfrfml.c
@@ -190,7 +190,7 @@
 
 static int cfrfml_transmit_segment(struct cfrfml *rfml, struct cfpkt *pkt)
 {
-	caif_assert(cfpkt_getlen(pkt) < rfml->fragment_size);
+	caif_assert(cfpkt_getlen(pkt) < rfml->fragment_size + RFM_HEAD_SIZE);
 
 	/* Add info for MUX-layer to route the packet out. */
 	cfpkt_info(pkt)->channel_id = rfml->serv.layer.id;
diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c
index 797c8d1..8e68b97 100644
--- a/net/caif/cfserl.c
+++ b/net/caif/cfserl.c
@@ -31,7 +31,7 @@
 static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
 				int phyid);
 
-struct cflayer *cfserl_create(int type, int instance, bool use_stx)
+struct cflayer *cfserl_create(int instance, bool use_stx)
 {
 	struct cfserl *this = kzalloc(sizeof(struct cfserl), GFP_ATOMIC);
 	if (!this)
@@ -40,7 +40,6 @@
 	this->layer.receive = cfserl_receive;
 	this->layer.transmit = cfserl_transmit;
 	this->layer.ctrlcmd = cfserl_ctrlcmd;
-	this->layer.type = type;
 	this->usestx = use_stx;
 	spin_lock_init(&this->sync);
 	snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "ser1");
diff --git a/net/ceph/crush/mapper.c b/net/ceph/crush/mapper.c
index 42599e3..3a94eae 100644
--- a/net/ceph/crush/mapper.c
+++ b/net/ceph/crush/mapper.c
@@ -477,7 +477,6 @@
 	int i, j;
 	int numrep;
 	int firstn;
-	int rc = -1;
 
 	BUG_ON(ruleno >= map->max_rules);
 
@@ -491,23 +490,18 @@
 	 * that this may or may not correspond to the specific types
 	 * referenced by the crush rule.
 	 */
-	if (force >= 0) {
-		if (force >= map->max_devices ||
-		    map->device_parents[force] == 0) {
-			/*dprintk("CRUSH: forcefed device dne\n");*/
-			rc = -1;  /* force fed device dne */
-			goto out;
-		}
-		if (!is_out(map, weight, force, x)) {
-			while (1) {
-				force_context[++force_pos] = force;
-				if (force >= 0)
-					force = map->device_parents[force];
-				else
-					force = map->bucket_parents[-1-force];
-				if (force == 0)
-					break;
-			}
+	if (force >= 0 &&
+	    force < map->max_devices &&
+	    map->device_parents[force] != 0 &&
+	    !is_out(map, weight, force, x)) {
+		while (1) {
+			force_context[++force_pos] = force;
+			if (force >= 0)
+				force = map->device_parents[force];
+			else
+				force = map->bucket_parents[-1-force];
+			if (force == 0)
+				break;
 		}
 	}
 
@@ -600,10 +594,7 @@
 			BUG_ON(1);
 		}
 	}
-	rc = result_len;
-
-out:
-	return rc;
+	return result_len;
 }
 
 
diff --git a/net/core/Makefile b/net/core/Makefile
index 0d357b1..674641b 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -3,12 +3,13 @@
 #
 
 obj-y := sock.o request_sock.o skbuff.o iovec.o datagram.o stream.o scm.o \
-	 gen_stats.o gen_estimator.o net_namespace.o secure_seq.o
+	 gen_stats.o gen_estimator.o net_namespace.o secure_seq.o flow_dissector.o
 
 obj-$(CONFIG_SYSCTL) += sysctl_net_core.o
 
 obj-y		     += dev.o ethtool.o dev_addr_lists.o dst.o netevent.o \
-			neighbour.o rtnetlink.o utils.o link_watch.o filter.o
+			neighbour.o rtnetlink.o utils.o link_watch.o filter.o \
+			sock_diag.o
 
 obj-$(CONFIG_XFRM) += flow.o
 obj-y += net-sysfs.o
@@ -19,3 +20,4 @@
 obj-$(CONFIG_TRACEPOINTS) += net-traces.o
 obj-$(CONFIG_NET_DROP_MONITOR) += drop_monitor.o
 obj-$(CONFIG_NETWORK_PHY_TIMESTAMPING) += timestamping.o
+obj-$(CONFIG_NETPRIO_CGROUP) += netprio_cgroup.o
diff --git a/net/core/dev.c b/net/core/dev.c
index 5a13edf..f494675 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -133,10 +133,9 @@
 #include <linux/pci.h>
 #include <linux/inetdevice.h>
 #include <linux/cpu_rmap.h>
-#include <linux/if_tunnel.h>
-#include <linux/if_pppox.h>
-#include <linux/ppp_defs.h>
 #include <linux/net_tstamp.h>
+#include <linux/jump_label.h>
+#include <net/flow_keys.h>
 
 #include "net-sysfs.h"
 
@@ -1320,8 +1319,6 @@
  */
 void dev_disable_lro(struct net_device *dev)
 {
-	u32 flags;
-
 	/*
 	 * If we're trying to disable lro on a vlan device
 	 * use the underlying physical device instead
@@ -1329,15 +1326,9 @@
 	if (is_vlan_dev(dev))
 		dev = vlan_dev_real_dev(dev);
 
-	if (dev->ethtool_ops && dev->ethtool_ops->get_flags)
-		flags = dev->ethtool_ops->get_flags(dev);
-	else
-		flags = ethtool_op_get_flags(dev);
+	dev->wanted_features &= ~NETIF_F_LRO;
+	netdev_update_features(dev);
 
-	if (!(flags & ETH_FLAG_LRO))
-		return;
-
-	__ethtool_set_flags(dev, flags & ~ETH_FLAG_LRO);
 	if (unlikely(dev->features & NETIF_F_LRO))
 		netdev_WARN(dev, "failed to disable LRO!\n");
 }
@@ -1450,34 +1441,55 @@
 }
 EXPORT_SYMBOL(call_netdevice_notifiers);
 
-/* When > 0 there are consumers of rx skb time stamps */
-static atomic_t netstamp_needed = ATOMIC_INIT(0);
+static struct jump_label_key netstamp_needed __read_mostly;
+#ifdef HAVE_JUMP_LABEL
+/* We are not allowed to call jump_label_dec() from irq context
+ * If net_disable_timestamp() is called from irq context, defer the
+ * jump_label_dec() calls.
+ */
+static atomic_t netstamp_needed_deferred;
+#endif
 
 void net_enable_timestamp(void)
 {
-	atomic_inc(&netstamp_needed);
+#ifdef HAVE_JUMP_LABEL
+	int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
+
+	if (deferred) {
+		while (--deferred)
+			jump_label_dec(&netstamp_needed);
+		return;
+	}
+#endif
+	WARN_ON(in_interrupt());
+	jump_label_inc(&netstamp_needed);
 }
 EXPORT_SYMBOL(net_enable_timestamp);
 
 void net_disable_timestamp(void)
 {
-	atomic_dec(&netstamp_needed);
+#ifdef HAVE_JUMP_LABEL
+	if (in_interrupt()) {
+		atomic_inc(&netstamp_needed_deferred);
+		return;
+	}
+#endif
+	jump_label_dec(&netstamp_needed);
 }
 EXPORT_SYMBOL(net_disable_timestamp);
 
 static inline void net_timestamp_set(struct sk_buff *skb)
 {
-	if (atomic_read(&netstamp_needed))
+	skb->tstamp.tv64 = 0;
+	if (static_branch(&netstamp_needed))
 		__net_timestamp(skb);
-	else
-		skb->tstamp.tv64 = 0;
 }
 
-static inline void net_timestamp_check(struct sk_buff *skb)
-{
-	if (!skb->tstamp.tv64 && atomic_read(&netstamp_needed))
-		__net_timestamp(skb);
-}
+#define net_timestamp_check(COND, SKB)			\
+	if (static_branch(&netstamp_needed)) {		\
+		if ((COND) && !(SKB)->tstamp.tv64)	\
+			__net_timestamp(SKB);		\
+	}						\
 
 static int net_hwtstamp_validate(struct ifreq *ifr)
 {
@@ -1924,7 +1936,8 @@
  *	It may return NULL if the skb requires no segmentation.  This is
  *	only possible when GSO is used for verifying header integrity.
  */
-struct sk_buff *skb_gso_segment(struct sk_buff *skb, u32 features)
+struct sk_buff *skb_gso_segment(struct sk_buff *skb,
+	netdev_features_t features)
 {
 	struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
 	struct packet_type *ptype;
@@ -1954,9 +1967,9 @@
 		if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
 			dev->ethtool_ops->get_drvinfo(dev, &info);
 
-		WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d ip_summed=%d\n",
-		     info.driver, dev ? dev->features : 0L,
-		     skb->sk ? skb->sk->sk_route_caps : 0L,
+		WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d ip_summed=%d\n",
+		     info.driver, dev ? &dev->features : NULL,
+		     skb->sk ? &skb->sk->sk_route_caps : NULL,
 		     skb->len, skb->data_len, skb->ip_summed);
 
 		if (skb_header_cloned(skb) &&
@@ -2065,7 +2078,7 @@
  *	This function segments the given skb and stores the list of segments
  *	in skb->next.
  */
-static int dev_gso_segment(struct sk_buff *skb, int features)
+static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
 {
 	struct sk_buff *segs;
 
@@ -2104,7 +2117,7 @@
 	}
 }
 
-static bool can_checksum_protocol(unsigned long features, __be16 protocol)
+static bool can_checksum_protocol(netdev_features_t features, __be16 protocol)
 {
 	return ((features & NETIF_F_GEN_CSUM) ||
 		((features & NETIF_F_V4_CSUM) &&
@@ -2115,7 +2128,8 @@
 		 protocol == htons(ETH_P_FCOE)));
 }
 
-static u32 harmonize_features(struct sk_buff *skb, __be16 protocol, u32 features)
+static netdev_features_t harmonize_features(struct sk_buff *skb,
+	__be16 protocol, netdev_features_t features)
 {
 	if (!can_checksum_protocol(features, protocol)) {
 		features &= ~NETIF_F_ALL_CSUM;
@@ -2127,10 +2141,10 @@
 	return features;
 }
 
-u32 netif_skb_features(struct sk_buff *skb)
+netdev_features_t netif_skb_features(struct sk_buff *skb)
 {
 	__be16 protocol = skb->protocol;
-	u32 features = skb->dev->features;
+	netdev_features_t features = skb->dev->features;
 
 	if (protocol == htons(ETH_P_8021Q)) {
 		struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
@@ -2176,7 +2190,7 @@
 	unsigned int skb_len;
 
 	if (likely(!skb->next)) {
-		u32 features;
+		netdev_features_t features;
 
 		/*
 		 * If device doesn't need skb->dst, release it right now while
@@ -2257,7 +2271,7 @@
 			return rc;
 		}
 		txq_trans_update(txq);
-		if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
+		if (unlikely(netif_xmit_stopped(txq) && skb->next))
 			return NETDEV_TX_BUSY;
 	} while (skb->next);
 
@@ -2457,6 +2471,18 @@
 	return rc;
 }
 
+#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
+static void skb_update_prio(struct sk_buff *skb)
+{
+	struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
+
+	if ((!skb->priority) && (skb->sk) && map)
+		skb->priority = map->priomap[skb->sk->sk_cgrp_prioidx];
+}
+#else
+#define skb_update_prio(skb)
+#endif
+
 static DEFINE_PER_CPU(int, xmit_recursion);
 #define RECURSION_LIMIT 10
 
@@ -2497,6 +2523,8 @@
 	 */
 	rcu_read_lock_bh();
 
+	skb_update_prio(skb);
+
 	txq = dev_pick_tx(dev, skb);
 	q = rcu_dereference_bh(txq->qdisc);
 
@@ -2531,7 +2559,7 @@
 
 			HARD_TX_LOCK(dev, txq, cpu);
 
-			if (!netif_tx_queue_stopped(txq)) {
+			if (!netif_xmit_stopped(txq)) {
 				__this_cpu_inc(xmit_recursion);
 				rc = dev_hard_start_xmit(skb, dev, txq);
 				__this_cpu_dec(xmit_recursion);
@@ -2592,123 +2620,28 @@
  */
 void __skb_get_rxhash(struct sk_buff *skb)
 {
-	int nhoff, hash = 0, poff;
-	const struct ipv6hdr *ip6;
-	const struct iphdr *ip;
-	const struct vlan_hdr *vlan;
-	u8 ip_proto;
-	u32 addr1, addr2;
-	u16 proto;
-	union {
-		u32 v32;
-		u16 v16[2];
-	} ports;
+	struct flow_keys keys;
+	u32 hash;
 
-	nhoff = skb_network_offset(skb);
-	proto = skb->protocol;
+	if (!skb_flow_dissect(skb, &keys))
+		return;
 
-again:
-	switch (proto) {
-	case __constant_htons(ETH_P_IP):
-ip:
-		if (!pskb_may_pull(skb, sizeof(*ip) + nhoff))
-			goto done;
-
-		ip = (const struct iphdr *) (skb->data + nhoff);
-		if (ip_is_fragment(ip))
-			ip_proto = 0;
-		else
-			ip_proto = ip->protocol;
-		addr1 = (__force u32) ip->saddr;
-		addr2 = (__force u32) ip->daddr;
-		nhoff += ip->ihl * 4;
-		break;
-	case __constant_htons(ETH_P_IPV6):
-ipv6:
-		if (!pskb_may_pull(skb, sizeof(*ip6) + nhoff))
-			goto done;
-
-		ip6 = (const struct ipv6hdr *) (skb->data + nhoff);
-		ip_proto = ip6->nexthdr;
-		addr1 = (__force u32) ip6->saddr.s6_addr32[3];
-		addr2 = (__force u32) ip6->daddr.s6_addr32[3];
-		nhoff += 40;
-		break;
-	case __constant_htons(ETH_P_8021Q):
-		if (!pskb_may_pull(skb, sizeof(*vlan) + nhoff))
-			goto done;
-		vlan = (const struct vlan_hdr *) (skb->data + nhoff);
-		proto = vlan->h_vlan_encapsulated_proto;
-		nhoff += sizeof(*vlan);
-		goto again;
-	case __constant_htons(ETH_P_PPP_SES):
-		if (!pskb_may_pull(skb, PPPOE_SES_HLEN + nhoff))
-			goto done;
-		proto = *((__be16 *) (skb->data + nhoff +
-				      sizeof(struct pppoe_hdr)));
-		nhoff += PPPOE_SES_HLEN;
-		switch (proto) {
-		case __constant_htons(PPP_IP):
-			goto ip;
-		case __constant_htons(PPP_IPV6):
-			goto ipv6;
-		default:
-			goto done;
-		}
-	default:
-		goto done;
-	}
-
-	switch (ip_proto) {
-	case IPPROTO_GRE:
-		if (pskb_may_pull(skb, nhoff + 16)) {
-			u8 *h = skb->data + nhoff;
-			__be16 flags = *(__be16 *)h;
-
-			/*
-			 * Only look inside GRE if version zero and no
-			 * routing
-			 */
-			if (!(flags & (GRE_VERSION|GRE_ROUTING))) {
-				proto = *(__be16 *)(h + 2);
-				nhoff += 4;
-				if (flags & GRE_CSUM)
-					nhoff += 4;
-				if (flags & GRE_KEY)
-					nhoff += 4;
-				if (flags & GRE_SEQ)
-					nhoff += 4;
-				goto again;
-			}
-		}
-		break;
-	case IPPROTO_IPIP:
-		goto again;
-	default:
-		break;
-	}
-
-	ports.v32 = 0;
-	poff = proto_ports_offset(ip_proto);
-	if (poff >= 0) {
-		nhoff += poff;
-		if (pskb_may_pull(skb, nhoff + 4)) {
-			ports.v32 = * (__force u32 *) (skb->data + nhoff);
-			if (ports.v16[1] < ports.v16[0])
-				swap(ports.v16[0], ports.v16[1]);
-			skb->l4_rxhash = 1;
-		}
+	if (keys.ports) {
+		if ((__force u16)keys.port16[1] < (__force u16)keys.port16[0])
+			swap(keys.port16[0], keys.port16[1]);
+		skb->l4_rxhash = 1;
 	}
 
 	/* get a consistent hash (same value on both flow directions) */
-	if (addr2 < addr1)
-		swap(addr1, addr2);
+	if ((__force u32)keys.dst < (__force u32)keys.src)
+		swap(keys.dst, keys.src);
 
-	hash = jhash_3words(addr1, addr2, ports.v32, hashrnd);
+	hash = jhash_3words((__force u32)keys.dst,
+			    (__force u32)keys.src,
+			    (__force u32)keys.ports, hashrnd);
 	if (!hash)
 		hash = 1;
 
-done:
 	skb->rxhash = hash;
 }
 EXPORT_SYMBOL(__skb_get_rxhash);
@@ -2719,6 +2652,8 @@
 struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
 EXPORT_SYMBOL(rps_sock_flow_table);
 
+struct jump_label_key rps_needed __read_mostly;
+
 static struct rps_dev_flow *
 set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
 	    struct rps_dev_flow *rflow, u16 next_cpu)
@@ -2998,12 +2933,11 @@
 	if (netpoll_rx(skb))
 		return NET_RX_DROP;
 
-	if (netdev_tstamp_prequeue)
-		net_timestamp_check(skb);
+	net_timestamp_check(netdev_tstamp_prequeue, skb);
 
 	trace_netif_rx(skb);
 #ifdef CONFIG_RPS
-	{
+	if (static_branch(&rps_needed))	{
 		struct rps_dev_flow voidflow, *rflow = &voidflow;
 		int cpu;
 
@@ -3018,14 +2952,13 @@
 
 		rcu_read_unlock();
 		preempt_enable();
-	}
-#else
+	} else
+#endif
 	{
 		unsigned int qtail;
 		ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
 		put_cpu();
 	}
-#endif
 	return ret;
 }
 EXPORT_SYMBOL(netif_rx);
@@ -3231,8 +3164,7 @@
 	int ret = NET_RX_DROP;
 	__be16 type;
 
-	if (!netdev_tstamp_prequeue)
-		net_timestamp_check(skb);
+	net_timestamp_check(!netdev_tstamp_prequeue, skb);
 
 	trace_netif_receive_skb(skb);
 
@@ -3363,14 +3295,13 @@
  */
 int netif_receive_skb(struct sk_buff *skb)
 {
-	if (netdev_tstamp_prequeue)
-		net_timestamp_check(skb);
+	net_timestamp_check(netdev_tstamp_prequeue, skb);
 
 	if (skb_defer_rx_timestamp(skb))
 		return NET_RX_SUCCESS;
 
 #ifdef CONFIG_RPS
-	{
+	if (static_branch(&rps_needed)) {
 		struct rps_dev_flow voidflow, *rflow = &voidflow;
 		int cpu, ret;
 
@@ -3381,16 +3312,12 @@
 		if (cpu >= 0) {
 			ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
 			rcu_read_unlock();
-		} else {
-			rcu_read_unlock();
-			ret = __netif_receive_skb(skb);
+			return ret;
 		}
-
-		return ret;
+		rcu_read_unlock();
 	}
-#else
-	return __netif_receive_skb(skb);
 #endif
+	return __netif_receive_skb(skb);
 }
 EXPORT_SYMBOL(netif_receive_skb);
 
@@ -4539,7 +4466,7 @@
 
 static int __dev_set_promiscuity(struct net_device *dev, int inc)
 {
-	unsigned short old_flags = dev->flags;
+	unsigned int old_flags = dev->flags;
 	uid_t uid;
 	gid_t gid;
 
@@ -4596,7 +4523,7 @@
  */
 int dev_set_promiscuity(struct net_device *dev, int inc)
 {
-	unsigned short old_flags = dev->flags;
+	unsigned int old_flags = dev->flags;
 	int err;
 
 	err = __dev_set_promiscuity(dev, inc);
@@ -4623,7 +4550,7 @@
 
 int dev_set_allmulti(struct net_device *dev, int inc)
 {
-	unsigned short old_flags = dev->flags;
+	unsigned int old_flags = dev->flags;
 
 	ASSERT_RTNL();
 
@@ -4726,7 +4653,7 @@
 
 int __dev_change_flags(struct net_device *dev, unsigned int flags)
 {
-	int old_flags = dev->flags;
+	unsigned int old_flags = dev->flags;
 	int ret;
 
 	ASSERT_RTNL();
@@ -4809,10 +4736,10 @@
  *	Change settings on device based state flags. The flags are
  *	in the userspace exported format.
  */
-int dev_change_flags(struct net_device *dev, unsigned flags)
+int dev_change_flags(struct net_device *dev, unsigned int flags)
 {
-	int ret, changes;
-	int old_flags = dev->flags;
+	int ret;
+	unsigned int changes, old_flags = dev->flags;
 
 	ret = __dev_change_flags(dev, flags);
 	if (ret < 0)
@@ -5369,7 +5296,8 @@
 	list_del(&single);
 }
 
-static u32 netdev_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t netdev_fix_features(struct net_device *dev,
+	netdev_features_t features)
 {
 	/* Fix illegal checksum combinations */
 	if ((features & NETIF_F_HW_CSUM) &&
@@ -5378,12 +5306,6 @@
 		features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
 	}
 
-	if ((features & NETIF_F_NO_CSUM) &&
-	    (features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
-		netdev_warn(dev, "mixed no checksumming and other settings.\n");
-		features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
-	}
-
 	/* Fix illegal SG+CSUM combinations. */
 	if ((features & NETIF_F_SG) &&
 	    !(features & NETIF_F_ALL_CSUM)) {
@@ -5431,7 +5353,7 @@
 
 int __netdev_update_features(struct net_device *dev)
 {
-	u32 features;
+	netdev_features_t features;
 	int err = 0;
 
 	ASSERT_RTNL();
@@ -5447,16 +5369,16 @@
 	if (dev->features == features)
 		return 0;
 
-	netdev_dbg(dev, "Features changed: 0x%08x -> 0x%08x\n",
-		dev->features, features);
+	netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
+		&dev->features, &features);
 
 	if (dev->netdev_ops->ndo_set_features)
 		err = dev->netdev_ops->ndo_set_features(dev, features);
 
 	if (unlikely(err < 0)) {
 		netdev_err(dev,
-			"set_features() failed (%d); wanted 0x%08x, left 0x%08x\n",
-			err, features, dev->features);
+			"set_features() failed (%d); wanted %pNF, left %pNF\n",
+			err, &features, &dev->features);
 		return -1;
 	}
 
@@ -5555,6 +5477,9 @@
 	queue->xmit_lock_owner = -1;
 	netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
 	queue->dev = dev;
+#ifdef CONFIG_BQL
+	dql_init(&queue->dql, HZ);
+#endif
 }
 
 static int netif_alloc_netdev_queues(struct net_device *dev)
@@ -5640,11 +5565,12 @@
 	dev->wanted_features = dev->features & dev->hw_features;
 
 	/* Turn on no cache copy if HW is doing checksum */
-	dev->hw_features |= NETIF_F_NOCACHE_COPY;
-	if ((dev->features & NETIF_F_ALL_CSUM) &&
-	    !(dev->features & NETIF_F_NO_CSUM)) {
-		dev->wanted_features |= NETIF_F_NOCACHE_COPY;
-		dev->features |= NETIF_F_NOCACHE_COPY;
+	if (!(dev->flags & IFF_LOOPBACK)) {
+		dev->hw_features |= NETIF_F_NOCACHE_COPY;
+		if (dev->features & NETIF_F_ALL_CSUM) {
+			dev->wanted_features |= NETIF_F_NOCACHE_COPY;
+			dev->features |= NETIF_F_NOCACHE_COPY;
+		}
 	}
 
 	/* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
@@ -6380,7 +6306,8 @@
  *	@one to the master device with current feature set @all.  Will not
  *	enable anything that is off in @mask. Returns the new feature set.
  */
-u32 netdev_increment_features(u32 all, u32 one, u32 mask)
+netdev_features_t netdev_increment_features(netdev_features_t all,
+	netdev_features_t one, netdev_features_t mask)
 {
 	if (mask & NETIF_F_GEN_CSUM)
 		mask |= NETIF_F_ALL_CSUM;
@@ -6389,10 +6316,6 @@
 	all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
 	all &= one | ~NETIF_F_ALL_FOR_ALL;
 
-	/* If device needs checksumming, downgrade to it. */
-	if (all & (NETIF_F_ALL_CSUM & ~NETIF_F_NO_CSUM))
-		all &= ~NETIF_F_NO_CSUM;
-
 	/* If one device supports hw checksumming, set for all. */
 	if (all & NETIF_F_GEN_CSUM)
 		all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
diff --git a/net/core/dst.c b/net/core/dst.c
index d5e2c4c..43d94ce 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -366,7 +366,7 @@
 		dev_hold(dst->dev);
 		dev_put(dev);
 		rcu_read_lock();
-		neigh = dst_get_neighbour(dst);
+		neigh = dst_get_neighbour_noref(dst);
 		if (neigh && neigh->dev == dev) {
 			neigh->dev = dst->dev;
 			dev_hold(dst->dev);
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index f444817..921aa2b 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -36,235 +36,44 @@
 }
 EXPORT_SYMBOL(ethtool_op_get_link);
 
-u32 ethtool_op_get_tx_csum(struct net_device *dev)
-{
-	return (dev->features & NETIF_F_ALL_CSUM) != 0;
-}
-EXPORT_SYMBOL(ethtool_op_get_tx_csum);
-
-int ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
-{
-	if (data)
-		dev->features |= NETIF_F_IP_CSUM;
-	else
-		dev->features &= ~NETIF_F_IP_CSUM;
-
-	return 0;
-}
-EXPORT_SYMBOL(ethtool_op_set_tx_csum);
-
-int ethtool_op_set_tx_hw_csum(struct net_device *dev, u32 data)
-{
-	if (data)
-		dev->features |= NETIF_F_HW_CSUM;
-	else
-		dev->features &= ~NETIF_F_HW_CSUM;
-
-	return 0;
-}
-EXPORT_SYMBOL(ethtool_op_set_tx_hw_csum);
-
-int ethtool_op_set_tx_ipv6_csum(struct net_device *dev, u32 data)
-{
-	if (data)
-		dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
-	else
-		dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
-
-	return 0;
-}
-EXPORT_SYMBOL(ethtool_op_set_tx_ipv6_csum);
-
-u32 ethtool_op_get_sg(struct net_device *dev)
-{
-	return (dev->features & NETIF_F_SG) != 0;
-}
-EXPORT_SYMBOL(ethtool_op_get_sg);
-
-int ethtool_op_set_sg(struct net_device *dev, u32 data)
-{
-	if (data)
-		dev->features |= NETIF_F_SG;
-	else
-		dev->features &= ~NETIF_F_SG;
-
-	return 0;
-}
-EXPORT_SYMBOL(ethtool_op_set_sg);
-
-u32 ethtool_op_get_tso(struct net_device *dev)
-{
-	return (dev->features & NETIF_F_TSO) != 0;
-}
-EXPORT_SYMBOL(ethtool_op_get_tso);
-
-int ethtool_op_set_tso(struct net_device *dev, u32 data)
-{
-	if (data)
-		dev->features |= NETIF_F_TSO;
-	else
-		dev->features &= ~NETIF_F_TSO;
-
-	return 0;
-}
-EXPORT_SYMBOL(ethtool_op_set_tso);
-
-u32 ethtool_op_get_ufo(struct net_device *dev)
-{
-	return (dev->features & NETIF_F_UFO) != 0;
-}
-EXPORT_SYMBOL(ethtool_op_get_ufo);
-
-int ethtool_op_set_ufo(struct net_device *dev, u32 data)
-{
-	if (data)
-		dev->features |= NETIF_F_UFO;
-	else
-		dev->features &= ~NETIF_F_UFO;
-	return 0;
-}
-EXPORT_SYMBOL(ethtool_op_set_ufo);
-
-/* the following list of flags are the same as their associated
- * NETIF_F_xxx values in include/linux/netdevice.h
- */
-static const u32 flags_dup_features =
-	(ETH_FLAG_LRO | ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN | ETH_FLAG_NTUPLE |
-	 ETH_FLAG_RXHASH);
-
-u32 ethtool_op_get_flags(struct net_device *dev)
-{
-	/* in the future, this function will probably contain additional
-	 * handling for flags which are not so easily handled
-	 * by a simple masking operation
-	 */
-
-	return dev->features & flags_dup_features;
-}
-EXPORT_SYMBOL(ethtool_op_get_flags);
-
-/* Check if device can enable (or disable) particular feature coded in "data"
- * argument. Flags "supported" describe features that can be toggled by device.
- * If feature can not be toggled, it state (enabled or disabled) must match
- * hardcoded device features state, otherwise flags are marked as invalid.
- */
-bool ethtool_invalid_flags(struct net_device *dev, u32 data, u32 supported)
-{
-	u32 features = dev->features & flags_dup_features;
-	/* "data" can contain only flags_dup_features bits,
-	 * see __ethtool_set_flags */
-
-	return (features & ~supported) != (data & ~supported);
-}
-EXPORT_SYMBOL(ethtool_invalid_flags);
-
-int ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported)
-{
-	if (ethtool_invalid_flags(dev, data, supported))
-		return -EINVAL;
-
-	dev->features = ((dev->features & ~flags_dup_features) |
-			 (data & flags_dup_features));
-	return 0;
-}
-EXPORT_SYMBOL(ethtool_op_set_flags);
-
 /* Handlers for each ethtool command */
 
-#define ETHTOOL_DEV_FEATURE_WORDS	1
+#define ETHTOOL_DEV_FEATURE_WORDS	((NETDEV_FEATURE_COUNT + 31) / 32)
 
-static void ethtool_get_features_compat(struct net_device *dev,
-	struct ethtool_get_features_block *features)
-{
-	if (!dev->ethtool_ops)
-		return;
+static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN] = {
+	[NETIF_F_SG_BIT] =               "tx-scatter-gather",
+	[NETIF_F_IP_CSUM_BIT] =          "tx-checksum-ipv4",
+	[NETIF_F_HW_CSUM_BIT] =          "tx-checksum-ip-generic",
+	[NETIF_F_IPV6_CSUM_BIT] =        "tx-checksum-ipv6",
+	[NETIF_F_HIGHDMA_BIT] =          "highdma",
+	[NETIF_F_FRAGLIST_BIT] =         "tx-scatter-gather-fraglist",
+	[NETIF_F_HW_VLAN_TX_BIT] =       "tx-vlan-hw-insert",
 
-	/* getting RX checksum */
-	if (dev->ethtool_ops->get_rx_csum)
-		if (dev->ethtool_ops->get_rx_csum(dev))
-			features[0].active |= NETIF_F_RXCSUM;
+	[NETIF_F_HW_VLAN_RX_BIT] =       "rx-vlan-hw-parse",
+	[NETIF_F_HW_VLAN_FILTER_BIT] =   "rx-vlan-filter",
+	[NETIF_F_VLAN_CHALLENGED_BIT] =  "vlan-challenged",
+	[NETIF_F_GSO_BIT] =              "tx-generic-segmentation",
+	[NETIF_F_LLTX_BIT] =             "tx-lockless",
+	[NETIF_F_NETNS_LOCAL_BIT] =      "netns-local",
+	[NETIF_F_GRO_BIT] =              "rx-gro",
+	[NETIF_F_LRO_BIT] =              "rx-lro",
 
-	/* mark legacy-changeable features */
-	if (dev->ethtool_ops->set_sg)
-		features[0].available |= NETIF_F_SG;
-	if (dev->ethtool_ops->set_tx_csum)
-		features[0].available |= NETIF_F_ALL_CSUM;
-	if (dev->ethtool_ops->set_tso)
-		features[0].available |= NETIF_F_ALL_TSO;
-	if (dev->ethtool_ops->set_rx_csum)
-		features[0].available |= NETIF_F_RXCSUM;
-	if (dev->ethtool_ops->set_flags)
-		features[0].available |= flags_dup_features;
-}
+	[NETIF_F_TSO_BIT] =              "tx-tcp-segmentation",
+	[NETIF_F_UFO_BIT] =              "tx-udp-fragmentation",
+	[NETIF_F_GSO_ROBUST_BIT] =       "tx-gso-robust",
+	[NETIF_F_TSO_ECN_BIT] =          "tx-tcp-ecn-segmentation",
+	[NETIF_F_TSO6_BIT] =             "tx-tcp6-segmentation",
+	[NETIF_F_FSO_BIT] =              "tx-fcoe-segmentation",
 
-static int ethtool_set_feature_compat(struct net_device *dev,
-	int (*legacy_set)(struct net_device *, u32),
-	struct ethtool_set_features_block *features, u32 mask)
-{
-	u32 do_set;
-
-	if (!legacy_set)
-		return 0;
-
-	if (!(features[0].valid & mask))
-		return 0;
-
-	features[0].valid &= ~mask;
-
-	do_set = !!(features[0].requested & mask);
-
-	if (legacy_set(dev, do_set) < 0)
-		netdev_info(dev,
-			"Legacy feature change (%s) failed for 0x%08x\n",
-			do_set ? "set" : "clear", mask);
-
-	return 1;
-}
-
-static int ethtool_set_flags_compat(struct net_device *dev,
-	int (*legacy_set)(struct net_device *, u32),
-	struct ethtool_set_features_block *features, u32 mask)
-{
-	u32 value;
-
-	if (!legacy_set)
-		return 0;
-
-	if (!(features[0].valid & mask))
-		return 0;
-
-	value = dev->features & ~features[0].valid;
-	value |= features[0].requested;
-
-	features[0].valid &= ~mask;
-
-	if (legacy_set(dev, value & mask) < 0)
-		netdev_info(dev, "Legacy flags change failed\n");
-
-	return 1;
-}
-
-static int ethtool_set_features_compat(struct net_device *dev,
-	struct ethtool_set_features_block *features)
-{
-	int compat;
-
-	if (!dev->ethtool_ops)
-		return 0;
-
-	compat  = ethtool_set_feature_compat(dev, dev->ethtool_ops->set_sg,
-		features, NETIF_F_SG);
-	compat |= ethtool_set_feature_compat(dev, dev->ethtool_ops->set_tx_csum,
-		features, NETIF_F_ALL_CSUM);
-	compat |= ethtool_set_feature_compat(dev, dev->ethtool_ops->set_tso,
-		features, NETIF_F_ALL_TSO);
-	compat |= ethtool_set_feature_compat(dev, dev->ethtool_ops->set_rx_csum,
-		features, NETIF_F_RXCSUM);
-	compat |= ethtool_set_flags_compat(dev, dev->ethtool_ops->set_flags,
-		features, flags_dup_features);
-
-	return compat;
-}
+	[NETIF_F_FCOE_CRC_BIT] =         "tx-checksum-fcoe-crc",
+	[NETIF_F_SCTP_CSUM_BIT] =        "tx-checksum-sctp",
+	[NETIF_F_FCOE_MTU_BIT] =         "fcoe-mtu",
+	[NETIF_F_NTUPLE_BIT] =           "rx-ntuple-filter",
+	[NETIF_F_RXHASH_BIT] =           "rx-hashing",
+	[NETIF_F_RXCSUM_BIT] =           "rx-checksum",
+	[NETIF_F_NOCACHE_COPY_BIT] =     "tx-nocache-copy",
+	[NETIF_F_LOOPBACK_BIT] =         "loopback",
+};
 
 static int ethtool_get_features(struct net_device *dev, void __user *useraddr)
 {
@@ -272,18 +81,21 @@
 		.cmd = ETHTOOL_GFEATURES,
 		.size = ETHTOOL_DEV_FEATURE_WORDS,
 	};
-	struct ethtool_get_features_block features[ETHTOOL_DEV_FEATURE_WORDS] = {
-		{
-			.available = dev->hw_features,
-			.requested = dev->wanted_features,
-			.active = dev->features,
-			.never_changed = NETIF_F_NEVER_CHANGE,
-		},
-	};
+	struct ethtool_get_features_block features[ETHTOOL_DEV_FEATURE_WORDS];
 	u32 __user *sizeaddr;
 	u32 copy_size;
+	int i;
 
-	ethtool_get_features_compat(dev, features);
+	/* in case feature bits run out again */
+	BUILD_BUG_ON(ETHTOOL_DEV_FEATURE_WORDS * sizeof(u32) > sizeof(netdev_features_t));
+
+	for (i = 0; i < ETHTOOL_DEV_FEATURE_WORDS; ++i) {
+		features[i].available = (u32)(dev->hw_features >> (32 * i));
+		features[i].requested = (u32)(dev->wanted_features >> (32 * i));
+		features[i].active = (u32)(dev->features >> (32 * i));
+		features[i].never_changed =
+			(u32)(NETIF_F_NEVER_CHANGE >> (32 * i));
+	}
 
 	sizeaddr = useraddr + offsetof(struct ethtool_gfeatures, size);
 	if (get_user(copy_size, sizeaddr))
@@ -305,7 +117,8 @@
 {
 	struct ethtool_sfeatures cmd;
 	struct ethtool_set_features_block features[ETHTOOL_DEV_FEATURE_WORDS];
-	int ret = 0;
+	netdev_features_t wanted = 0, valid = 0;
+	int i, ret = 0;
 
 	if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
 		return -EFAULT;
@@ -317,65 +130,29 @@
 	if (copy_from_user(features, useraddr, sizeof(features)))
 		return -EFAULT;
 
-	if (features[0].valid & ~NETIF_F_ETHTOOL_BITS)
+	for (i = 0; i < ETHTOOL_DEV_FEATURE_WORDS; ++i) {
+		valid |= (netdev_features_t)features[i].valid << (32 * i);
+		wanted |= (netdev_features_t)features[i].requested << (32 * i);
+	}
+
+	if (valid & ~NETIF_F_ETHTOOL_BITS)
 		return -EINVAL;
 
-	if (ethtool_set_features_compat(dev, features))
-		ret |= ETHTOOL_F_COMPAT;
-
-	if (features[0].valid & ~dev->hw_features) {
-		features[0].valid &= dev->hw_features;
+	if (valid & ~dev->hw_features) {
+		valid &= dev->hw_features;
 		ret |= ETHTOOL_F_UNSUPPORTED;
 	}
 
-	dev->wanted_features &= ~features[0].valid;
-	dev->wanted_features |= features[0].valid & features[0].requested;
+	dev->wanted_features &= ~valid;
+	dev->wanted_features |= wanted & valid;
 	__netdev_update_features(dev);
 
-	if ((dev->wanted_features ^ dev->features) & features[0].valid)
+	if ((dev->wanted_features ^ dev->features) & valid)
 		ret |= ETHTOOL_F_WISH;
 
 	return ret;
 }
 
-static const char netdev_features_strings[ETHTOOL_DEV_FEATURE_WORDS * 32][ETH_GSTRING_LEN] = {
-	/* NETIF_F_SG */              "tx-scatter-gather",
-	/* NETIF_F_IP_CSUM */         "tx-checksum-ipv4",
-	/* NETIF_F_NO_CSUM */         "tx-checksum-unneeded",
-	/* NETIF_F_HW_CSUM */         "tx-checksum-ip-generic",
-	/* NETIF_F_IPV6_CSUM */       "tx-checksum-ipv6",
-	/* NETIF_F_HIGHDMA */         "highdma",
-	/* NETIF_F_FRAGLIST */        "tx-scatter-gather-fraglist",
-	/* NETIF_F_HW_VLAN_TX */      "tx-vlan-hw-insert",
-
-	/* NETIF_F_HW_VLAN_RX */      "rx-vlan-hw-parse",
-	/* NETIF_F_HW_VLAN_FILTER */  "rx-vlan-filter",
-	/* NETIF_F_VLAN_CHALLENGED */ "vlan-challenged",
-	/* NETIF_F_GSO */             "tx-generic-segmentation",
-	/* NETIF_F_LLTX */            "tx-lockless",
-	/* NETIF_F_NETNS_LOCAL */     "netns-local",
-	/* NETIF_F_GRO */             "rx-gro",
-	/* NETIF_F_LRO */             "rx-lro",
-
-	/* NETIF_F_TSO */             "tx-tcp-segmentation",
-	/* NETIF_F_UFO */             "tx-udp-fragmentation",
-	/* NETIF_F_GSO_ROBUST */      "tx-gso-robust",
-	/* NETIF_F_TSO_ECN */         "tx-tcp-ecn-segmentation",
-	/* NETIF_F_TSO6 */            "tx-tcp6-segmentation",
-	/* NETIF_F_FSO */             "tx-fcoe-segmentation",
-	"",
-	"",
-
-	/* NETIF_F_FCOE_CRC */        "tx-checksum-fcoe-crc",
-	/* NETIF_F_SCTP_CSUM */       "tx-checksum-sctp",
-	/* NETIF_F_FCOE_MTU */        "fcoe-mtu",
-	/* NETIF_F_NTUPLE */          "rx-ntuple-filter",
-	/* NETIF_F_RXHASH */          "rx-hashing",
-	/* NETIF_F_RXCSUM */          "rx-checksum",
-	/* NETIF_F_NOCACHE_COPY */    "tx-nocache-copy",
-	/* NETIF_F_LOOPBACK */        "loopback",
-};
-
 static int __ethtool_get_sset_count(struct net_device *dev, int sset)
 {
 	const struct ethtool_ops *ops = dev->ethtool_ops;
@@ -402,7 +179,7 @@
 		ops->get_strings(dev, stringset, data);
 }
 
-static u32 ethtool_get_feature_mask(u32 eth_cmd)
+static netdev_features_t ethtool_get_feature_mask(u32 eth_cmd)
 {
 	/* feature masks of legacy discrete ethtool ops */
 
@@ -433,136 +210,82 @@
 	}
 }
 
-static void *__ethtool_get_one_feature_actor(struct net_device *dev, u32 ethcmd)
-{
-	const struct ethtool_ops *ops = dev->ethtool_ops;
-
-	if (!ops)
-		return NULL;
-
-	switch (ethcmd) {
-	case ETHTOOL_GTXCSUM:
-		return ops->get_tx_csum;
-	case ETHTOOL_GRXCSUM:
-		return ops->get_rx_csum;
-	case ETHTOOL_SSG:
-		return ops->get_sg;
-	case ETHTOOL_STSO:
-		return ops->get_tso;
-	case ETHTOOL_SUFO:
-		return ops->get_ufo;
-	default:
-		return NULL;
-	}
-}
-
-static u32 __ethtool_get_rx_csum_oldbug(struct net_device *dev)
-{
-	return !!(dev->features & NETIF_F_ALL_CSUM);
-}
-
 static int ethtool_get_one_feature(struct net_device *dev,
 	char __user *useraddr, u32 ethcmd)
 {
-	u32 mask = ethtool_get_feature_mask(ethcmd);
+	netdev_features_t mask = ethtool_get_feature_mask(ethcmd);
 	struct ethtool_value edata = {
 		.cmd = ethcmd,
 		.data = !!(dev->features & mask),
 	};
 
-	/* compatibility with discrete get_ ops */
-	if (!(dev->hw_features & mask)) {
-		u32 (*actor)(struct net_device *);
-
-		actor = __ethtool_get_one_feature_actor(dev, ethcmd);
-
-		/* bug compatibility with old get_rx_csum */
-		if (ethcmd == ETHTOOL_GRXCSUM && !actor)
-			actor = __ethtool_get_rx_csum_oldbug;
-
-		if (actor)
-			edata.data = actor(dev);
-	}
-
 	if (copy_to_user(useraddr, &edata, sizeof(edata)))
 		return -EFAULT;
 	return 0;
 }
 
-static int __ethtool_set_tx_csum(struct net_device *dev, u32 data);
-static int __ethtool_set_rx_csum(struct net_device *dev, u32 data);
-static int __ethtool_set_sg(struct net_device *dev, u32 data);
-static int __ethtool_set_tso(struct net_device *dev, u32 data);
-static int __ethtool_set_ufo(struct net_device *dev, u32 data);
-
 static int ethtool_set_one_feature(struct net_device *dev,
 	void __user *useraddr, u32 ethcmd)
 {
 	struct ethtool_value edata;
-	u32 mask;
+	netdev_features_t mask;
 
 	if (copy_from_user(&edata, useraddr, sizeof(edata)))
 		return -EFAULT;
 
 	mask = ethtool_get_feature_mask(ethcmd);
 	mask &= dev->hw_features;
-	if (mask) {
-		if (edata.data)
-			dev->wanted_features |= mask;
-		else
-			dev->wanted_features &= ~mask;
-
-		__netdev_update_features(dev);
-		return 0;
-	}
-
-	/* Driver is not converted to ndo_fix_features or does not
-	 * support changing this offload. In the latter case it won't
-	 * have corresponding ethtool_ops field set.
-	 *
-	 * Following part is to be removed after all drivers advertise
-	 * their changeable features in netdev->hw_features and stop
-	 * using discrete offload setting ops.
-	 */
-
-	switch (ethcmd) {
-	case ETHTOOL_STXCSUM:
-		return __ethtool_set_tx_csum(dev, edata.data);
-	case ETHTOOL_SRXCSUM:
-		return __ethtool_set_rx_csum(dev, edata.data);
-	case ETHTOOL_SSG:
-		return __ethtool_set_sg(dev, edata.data);
-	case ETHTOOL_STSO:
-		return __ethtool_set_tso(dev, edata.data);
-	case ETHTOOL_SUFO:
-		return __ethtool_set_ufo(dev, edata.data);
-	default:
+	if (!mask)
 		return -EOPNOTSUPP;
-	}
+
+	if (edata.data)
+		dev->wanted_features |= mask;
+	else
+		dev->wanted_features &= ~mask;
+
+	__netdev_update_features(dev);
+
+	return 0;
 }
 
-int __ethtool_set_flags(struct net_device *dev, u32 data)
-{
-	u32 changed;
+#define ETH_ALL_FLAGS    (ETH_FLAG_LRO | ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN | \
+			  ETH_FLAG_NTUPLE | ETH_FLAG_RXHASH)
+#define ETH_ALL_FEATURES (NETIF_F_LRO | NETIF_F_HW_VLAN_RX | \
+			  NETIF_F_HW_VLAN_TX | NETIF_F_NTUPLE | NETIF_F_RXHASH)
 
-	if (data & ~flags_dup_features)
+static u32 __ethtool_get_flags(struct net_device *dev)
+{
+	u32 flags = 0;
+
+	if (dev->features & NETIF_F_LRO)	flags |= ETH_FLAG_LRO;
+	if (dev->features & NETIF_F_HW_VLAN_RX)	flags |= ETH_FLAG_RXVLAN;
+	if (dev->features & NETIF_F_HW_VLAN_TX)	flags |= ETH_FLAG_TXVLAN;
+	if (dev->features & NETIF_F_NTUPLE)	flags |= ETH_FLAG_NTUPLE;
+	if (dev->features & NETIF_F_RXHASH)	flags |= ETH_FLAG_RXHASH;
+
+	return flags;
+}
+
+static int __ethtool_set_flags(struct net_device *dev, u32 data)
+{
+	netdev_features_t features = 0, changed;
+
+	if (data & ~ETH_ALL_FLAGS)
 		return -EINVAL;
 
-	/* legacy set_flags() op */
-	if (dev->ethtool_ops->set_flags) {
-		if (unlikely(dev->hw_features & flags_dup_features))
-			netdev_warn(dev,
-				"driver BUG: mixed hw_features and set_flags()\n");
-		return dev->ethtool_ops->set_flags(dev, data);
-	}
+	if (data & ETH_FLAG_LRO)	features |= NETIF_F_LRO;
+	if (data & ETH_FLAG_RXVLAN)	features |= NETIF_F_HW_VLAN_RX;
+	if (data & ETH_FLAG_TXVLAN)	features |= NETIF_F_HW_VLAN_TX;
+	if (data & ETH_FLAG_NTUPLE)	features |= NETIF_F_NTUPLE;
+	if (data & ETH_FLAG_RXHASH)	features |= NETIF_F_RXHASH;
 
 	/* allow changing only bits set in hw_features */
-	changed = (data ^ dev->features) & flags_dup_features;
+	changed = (features ^ dev->features) & ETH_ALL_FEATURES;
 	if (changed & ~dev->hw_features)
 		return (changed & dev->hw_features) ? -EINVAL : -EOPNOTSUPP;
 
 	dev->wanted_features =
-		(dev->wanted_features & ~changed) | (data & dev->hw_features);
+		(dev->wanted_features & ~changed) | (features & changed);
 
 	__netdev_update_features(dev);
 
@@ -716,6 +439,7 @@
 {
 	struct ethtool_rxnfc info;
 	size_t info_size = sizeof(info);
+	int rc;
 
 	if (!dev->ethtool_ops->set_rxnfc)
 		return -EOPNOTSUPP;
@@ -731,7 +455,15 @@
 	if (copy_from_user(&info, useraddr, info_size))
 		return -EFAULT;
 
-	return dev->ethtool_ops->set_rxnfc(dev, &info);
+	rc = dev->ethtool_ops->set_rxnfc(dev, &info);
+	if (rc)
+		return rc;
+
+	if (cmd == ETHTOOL_SRXCLSRLINS &&
+	    copy_to_user(useraddr, &info, info_size))
+		return -EFAULT;
+
+	return 0;
 }
 
 static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev,
@@ -792,34 +524,44 @@
 static noinline_for_stack int ethtool_get_rxfh_indir(struct net_device *dev,
 						     void __user *useraddr)
 {
-	struct ethtool_rxfh_indir *indir;
-	u32 table_size;
-	size_t full_size;
+	u32 user_size, dev_size;
+	u32 *indir;
 	int ret;
 
-	if (!dev->ethtool_ops->get_rxfh_indir)
+	if (!dev->ethtool_ops->get_rxfh_indir_size ||
+	    !dev->ethtool_ops->get_rxfh_indir)
+		return -EOPNOTSUPP;
+	dev_size = dev->ethtool_ops->get_rxfh_indir_size(dev);
+	if (dev_size == 0)
 		return -EOPNOTSUPP;
 
-	if (copy_from_user(&table_size,
+	if (copy_from_user(&user_size,
 			   useraddr + offsetof(struct ethtool_rxfh_indir, size),
-			   sizeof(table_size)))
+			   sizeof(user_size)))
 		return -EFAULT;
 
-	if (table_size >
-	    (KMALLOC_MAX_SIZE - sizeof(*indir)) / sizeof(*indir->ring_index))
-		return -ENOMEM;
-	full_size = sizeof(*indir) + sizeof(*indir->ring_index) * table_size;
-	indir = kzalloc(full_size, GFP_USER);
+	if (copy_to_user(useraddr + offsetof(struct ethtool_rxfh_indir, size),
+			 &dev_size, sizeof(dev_size)))
+		return -EFAULT;
+
+	/* If the user buffer size is 0, this is just a query for the
+	 * device table size.  Otherwise, if it's smaller than the
+	 * device table size it's an error.
+	 */
+	if (user_size < dev_size)
+		return user_size == 0 ? 0 : -EINVAL;
+
+	indir = kcalloc(dev_size, sizeof(indir[0]), GFP_USER);
 	if (!indir)
 		return -ENOMEM;
 
-	indir->cmd = ETHTOOL_GRXFHINDIR;
-	indir->size = table_size;
 	ret = dev->ethtool_ops->get_rxfh_indir(dev, indir);
 	if (ret)
 		goto out;
 
-	if (copy_to_user(useraddr, indir, full_size))
+	if (copy_to_user(useraddr +
+			 offsetof(struct ethtool_rxfh_indir, ring_index[0]),
+			 indir, dev_size * sizeof(indir[0])))
 		ret = -EFAULT;
 
 out:
@@ -830,30 +572,56 @@
 static noinline_for_stack int ethtool_set_rxfh_indir(struct net_device *dev,
 						     void __user *useraddr)
 {
-	struct ethtool_rxfh_indir *indir;
-	u32 table_size;
-	size_t full_size;
+	struct ethtool_rxnfc rx_rings;
+	u32 user_size, dev_size, i;
+	u32 *indir;
 	int ret;
 
-	if (!dev->ethtool_ops->set_rxfh_indir)
+	if (!dev->ethtool_ops->get_rxfh_indir_size ||
+	    !dev->ethtool_ops->set_rxfh_indir ||
+	    !dev->ethtool_ops->get_rxnfc)
+		return -EOPNOTSUPP;
+	dev_size = dev->ethtool_ops->get_rxfh_indir_size(dev);
+	if (dev_size == 0)
 		return -EOPNOTSUPP;
 
-	if (copy_from_user(&table_size,
+	if (copy_from_user(&user_size,
 			   useraddr + offsetof(struct ethtool_rxfh_indir, size),
-			   sizeof(table_size)))
+			   sizeof(user_size)))
 		return -EFAULT;
 
-	if (table_size >
-	    (KMALLOC_MAX_SIZE - sizeof(*indir)) / sizeof(*indir->ring_index))
-		return -ENOMEM;
-	full_size = sizeof(*indir) + sizeof(*indir->ring_index) * table_size;
-	indir = kmalloc(full_size, GFP_USER);
+	if (user_size != 0 && user_size != dev_size)
+		return -EINVAL;
+
+	indir = kcalloc(dev_size, sizeof(indir[0]), GFP_USER);
 	if (!indir)
 		return -ENOMEM;
 
-	if (copy_from_user(indir, useraddr, full_size)) {
-		ret = -EFAULT;
+	rx_rings.cmd = ETHTOOL_GRXRINGS;
+	ret = dev->ethtool_ops->get_rxnfc(dev, &rx_rings, NULL);
+	if (ret)
 		goto out;
+
+	if (user_size == 0) {
+		for (i = 0; i < dev_size; i++)
+			indir[i] = ethtool_rxfh_indir_default(i, rx_rings.data);
+	} else {
+		if (copy_from_user(indir,
+				  useraddr +
+				  offsetof(struct ethtool_rxfh_indir,
+					   ring_index[0]),
+				  dev_size * sizeof(indir[0]))) {
+			ret = -EFAULT;
+			goto out;
+		}
+
+		/* Validate ring indices */
+		for (i = 0; i < dev_size; i++) {
+			if (indir[i] >= rx_rings.data) {
+				ret = -EINVAL;
+				goto out;
+			}
+		}
 	}
 
 	ret = dev->ethtool_ops->set_rxfh_indir(dev, indir);
@@ -863,58 +631,6 @@
 	return ret;
 }
 
-/*
- * ethtool does not (or did not) set masks for flow parameters that are
- * not specified, so if both value and mask are 0 then this must be
- * treated as equivalent to a mask with all bits set.  Implement that
- * here rather than in drivers.
- */
-static void rx_ntuple_fix_masks(struct ethtool_rx_ntuple_flow_spec *fs)
-{
-	struct ethtool_tcpip4_spec *entry = &fs->h_u.tcp_ip4_spec;
-	struct ethtool_tcpip4_spec *mask = &fs->m_u.tcp_ip4_spec;
-
-	if (fs->flow_type != TCP_V4_FLOW &&
-	    fs->flow_type != UDP_V4_FLOW &&
-	    fs->flow_type != SCTP_V4_FLOW)
-		return;
-
-	if (!(entry->ip4src | mask->ip4src))
-		mask->ip4src = htonl(0xffffffff);
-	if (!(entry->ip4dst | mask->ip4dst))
-		mask->ip4dst = htonl(0xffffffff);
-	if (!(entry->psrc | mask->psrc))
-		mask->psrc = htons(0xffff);
-	if (!(entry->pdst | mask->pdst))
-		mask->pdst = htons(0xffff);
-	if (!(entry->tos | mask->tos))
-		mask->tos = 0xff;
-	if (!(fs->vlan_tag | fs->vlan_tag_mask))
-		fs->vlan_tag_mask = 0xffff;
-	if (!(fs->data | fs->data_mask))
-		fs->data_mask = 0xffffffffffffffffULL;
-}
-
-static noinline_for_stack int ethtool_set_rx_ntuple(struct net_device *dev,
-						    void __user *useraddr)
-{
-	struct ethtool_rx_ntuple cmd;
-	const struct ethtool_ops *ops = dev->ethtool_ops;
-
-	if (!ops->set_rx_ntuple)
-		return -EOPNOTSUPP;
-
-	if (!(dev->features & NETIF_F_NTUPLE))
-		return -EINVAL;
-
-	if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
-		return -EFAULT;
-
-	rx_ntuple_fix_masks(&cmd.fs);
-
-	return ops->set_rx_ntuple(dev, &cmd);
-}
-
 static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
 {
 	struct ethtool_regs regs;
@@ -1231,81 +947,6 @@
 	return dev->ethtool_ops->set_pauseparam(dev, &pauseparam);
 }
 
-static int __ethtool_set_sg(struct net_device *dev, u32 data)
-{
-	int err;
-
-	if (!dev->ethtool_ops->set_sg)
-		return -EOPNOTSUPP;
-
-	if (data && !(dev->features & NETIF_F_ALL_CSUM))
-		return -EINVAL;
-
-	if (!data && dev->ethtool_ops->set_tso) {
-		err = dev->ethtool_ops->set_tso(dev, 0);
-		if (err)
-			return err;
-	}
-
-	if (!data && dev->ethtool_ops->set_ufo) {
-		err = dev->ethtool_ops->set_ufo(dev, 0);
-		if (err)
-			return err;
-	}
-	return dev->ethtool_ops->set_sg(dev, data);
-}
-
-static int __ethtool_set_tx_csum(struct net_device *dev, u32 data)
-{
-	int err;
-
-	if (!dev->ethtool_ops->set_tx_csum)
-		return -EOPNOTSUPP;
-
-	if (!data && dev->ethtool_ops->set_sg) {
-		err = __ethtool_set_sg(dev, 0);
-		if (err)
-			return err;
-	}
-
-	return dev->ethtool_ops->set_tx_csum(dev, data);
-}
-
-static int __ethtool_set_rx_csum(struct net_device *dev, u32 data)
-{
-	if (!dev->ethtool_ops->set_rx_csum)
-		return -EOPNOTSUPP;
-
-	if (!data)
-		dev->features &= ~NETIF_F_GRO;
-
-	return dev->ethtool_ops->set_rx_csum(dev, data);
-}
-
-static int __ethtool_set_tso(struct net_device *dev, u32 data)
-{
-	if (!dev->ethtool_ops->set_tso)
-		return -EOPNOTSUPP;
-
-	if (data && !(dev->features & NETIF_F_SG))
-		return -EINVAL;
-
-	return dev->ethtool_ops->set_tso(dev, data);
-}
-
-static int __ethtool_set_ufo(struct net_device *dev, u32 data)
-{
-	if (!dev->ethtool_ops->set_ufo)
-		return -EOPNOTSUPP;
-	if (data && !(dev->features & NETIF_F_SG))
-		return -EINVAL;
-	if (data && !((dev->features & NETIF_F_GEN_CSUM) ||
-		(dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
-			== (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM)))
-		return -EINVAL;
-	return dev->ethtool_ops->set_ufo(dev, data);
-}
-
 static int ethtool_self_test(struct net_device *dev, char __user *useraddr)
 {
 	struct ethtool_test test;
@@ -1771,9 +1412,7 @@
 		break;
 	case ETHTOOL_GFLAGS:
 		rc = ethtool_get_value(dev, useraddr, ethcmd,
-				       (dev->ethtool_ops->get_flags ?
-					dev->ethtool_ops->get_flags :
-					ethtool_op_get_flags));
+					__ethtool_get_flags);
 		break;
 	case ETHTOOL_SFLAGS:
 		rc = ethtool_set_value(dev, useraddr, __ethtool_set_flags);
@@ -1804,9 +1443,6 @@
 	case ETHTOOL_RESET:
 		rc = ethtool_reset(dev, useraddr);
 		break;
-	case ETHTOOL_SRXNTUPLE:
-		rc = ethtool_set_rx_ntuple(dev, useraddr);
-		break;
 	case ETHTOOL_GSSET_INFO:
 		rc = ethtool_get_sset_info(dev, useraddr);
 		break;
diff --git a/net/core/flow.c b/net/core/flow.c
index 8ae42de..e318c7e 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -358,6 +358,18 @@
 	put_online_cpus();
 }
 
+static void flow_cache_flush_task(struct work_struct *work)
+{
+	flow_cache_flush();
+}
+
+static DECLARE_WORK(flow_cache_flush_work, flow_cache_flush_task);
+
+void flow_cache_flush_deferred(void)
+{
+	schedule_work(&flow_cache_flush_work);
+}
+
 static int __cpuinit flow_cache_cpu_prepare(struct flow_cache *fc, int cpu)
 {
 	struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
new file mode 100644
index 0000000..0985b9b
--- /dev/null
+++ b/net/core/flow_dissector.c
@@ -0,0 +1,143 @@
+#include <linux/skbuff.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/if_vlan.h>
+#include <net/ip.h>
+#include <linux/if_tunnel.h>
+#include <linux/if_pppox.h>
+#include <linux/ppp_defs.h>
+#include <net/flow_keys.h>
+
+/* copy saddr & daddr, possibly using 64bit load/store
+ * Equivalent to :	flow->src = iph->saddr;
+ *			flow->dst = iph->daddr;
+ */
+static void iph_to_flow_copy_addrs(struct flow_keys *flow, const struct iphdr *iph)
+{
+	BUILD_BUG_ON(offsetof(typeof(*flow), dst) !=
+		     offsetof(typeof(*flow), src) + sizeof(flow->src));
+	memcpy(&flow->src, &iph->saddr, sizeof(flow->src) + sizeof(flow->dst));
+}
+
+bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow)
+{
+	int poff, nhoff = skb_network_offset(skb);
+	u8 ip_proto;
+	__be16 proto = skb->protocol;
+
+	memset(flow, 0, sizeof(*flow));
+
+again:
+	switch (proto) {
+	case __constant_htons(ETH_P_IP): {
+		const struct iphdr *iph;
+		struct iphdr _iph;
+ip:
+		iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
+		if (!iph)
+			return false;
+
+		if (ip_is_fragment(iph))
+			ip_proto = 0;
+		else
+			ip_proto = iph->protocol;
+		iph_to_flow_copy_addrs(flow, iph);
+		nhoff += iph->ihl * 4;
+		break;
+	}
+	case __constant_htons(ETH_P_IPV6): {
+		const struct ipv6hdr *iph;
+		struct ipv6hdr _iph;
+ipv6:
+		iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
+		if (!iph)
+			return false;
+
+		ip_proto = iph->nexthdr;
+		flow->src = iph->saddr.s6_addr32[3];
+		flow->dst = iph->daddr.s6_addr32[3];
+		nhoff += sizeof(struct ipv6hdr);
+		break;
+	}
+	case __constant_htons(ETH_P_8021Q): {
+		const struct vlan_hdr *vlan;
+		struct vlan_hdr _vlan;
+
+		vlan = skb_header_pointer(skb, nhoff, sizeof(_vlan), &_vlan);
+		if (!vlan)
+			return false;
+
+		proto = vlan->h_vlan_encapsulated_proto;
+		nhoff += sizeof(*vlan);
+		goto again;
+	}
+	case __constant_htons(ETH_P_PPP_SES): {
+		struct {
+			struct pppoe_hdr hdr;
+			__be16 proto;
+		} *hdr, _hdr;
+		hdr = skb_header_pointer(skb, nhoff, sizeof(_hdr), &_hdr);
+		if (!hdr)
+			return false;
+		proto = hdr->proto;
+		nhoff += PPPOE_SES_HLEN;
+		switch (proto) {
+		case __constant_htons(PPP_IP):
+			goto ip;
+		case __constant_htons(PPP_IPV6):
+			goto ipv6;
+		default:
+			return false;
+		}
+	}
+	default:
+		return false;
+	}
+
+	switch (ip_proto) {
+	case IPPROTO_GRE: {
+		struct gre_hdr {
+			__be16 flags;
+			__be16 proto;
+		} *hdr, _hdr;
+
+		hdr = skb_header_pointer(skb, nhoff, sizeof(_hdr), &_hdr);
+		if (!hdr)
+			return false;
+		/*
+		 * Only look inside GRE if version zero and no
+		 * routing
+		 */
+		if (!(hdr->flags & (GRE_VERSION|GRE_ROUTING))) {
+			proto = hdr->proto;
+			nhoff += 4;
+			if (hdr->flags & GRE_CSUM)
+				nhoff += 4;
+			if (hdr->flags & GRE_KEY)
+				nhoff += 4;
+			if (hdr->flags & GRE_SEQ)
+				nhoff += 4;
+			goto again;
+		}
+		break;
+	}
+	case IPPROTO_IPIP:
+		goto again;
+	default:
+		break;
+	}
+
+	flow->ip_proto = ip_proto;
+	poff = proto_ports_offset(ip_proto);
+	if (poff >= 0) {
+		__be32 *ports, _ports;
+
+		nhoff += poff;
+		ports = skb_header_pointer(skb, nhoff, sizeof(_ports), &_ports);
+		if (ports)
+			flow->ports = *ports;
+	}
+
+	return true;
+}
+EXPORT_SYMBOL(skb_flow_dissect);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 5ac07d3..e287346 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -238,6 +238,7 @@
 				   it to safe state.
 				 */
 				skb_queue_purge(&n->arp_queue);
+				n->arp_queue_len_bytes = 0;
 				n->output = neigh_blackhole;
 				if (n->nud_state & NUD_VALID)
 					n->nud_state = NUD_NOARP;
@@ -272,7 +273,7 @@
 }
 EXPORT_SYMBOL(neigh_ifdown);
 
-static struct neighbour *neigh_alloc(struct neigh_table *tbl)
+static struct neighbour *neigh_alloc(struct neigh_table *tbl, struct net_device *dev)
 {
 	struct neighbour *n = NULL;
 	unsigned long now = jiffies;
@@ -287,7 +288,15 @@
 			goto out_entries;
 	}
 
-	n = kmem_cache_zalloc(tbl->kmem_cachep, GFP_ATOMIC);
+	if (tbl->entry_size)
+		n = kzalloc(tbl->entry_size, GFP_ATOMIC);
+	else {
+		int sz = sizeof(*n) + tbl->key_len;
+
+		sz = ALIGN(sz, NEIGH_PRIV_ALIGN);
+		sz += dev->neigh_priv_len;
+		n = kzalloc(sz, GFP_ATOMIC);
+	}
 	if (!n)
 		goto out_entries;
 
@@ -313,11 +322,18 @@
 	goto out;
 }
 
+static void neigh_get_hash_rnd(u32 *x)
+{
+	get_random_bytes(x, sizeof(*x));
+	*x |= 1;
+}
+
 static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
 {
 	size_t size = (1 << shift) * sizeof(struct neighbour *);
 	struct neigh_hash_table *ret;
 	struct neighbour __rcu **buckets;
+	int i;
 
 	ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
 	if (!ret)
@@ -334,8 +350,8 @@
 	}
 	ret->hash_buckets = buckets;
 	ret->hash_shift = shift;
-	get_random_bytes(&ret->hash_rnd, sizeof(ret->hash_rnd));
-	ret->hash_rnd |= 1;
+	for (i = 0; i < NEIGH_NUM_HASH_RND; i++)
+		neigh_get_hash_rnd(&ret->hash_rnd[i]);
 	return ret;
 }
 
@@ -462,7 +478,7 @@
 	u32 hash_val;
 	int key_len = tbl->key_len;
 	int error;
-	struct neighbour *n1, *rc, *n = neigh_alloc(tbl);
+	struct neighbour *n1, *rc, *n = neigh_alloc(tbl, dev);
 	struct neigh_hash_table *nht;
 
 	if (!n) {
@@ -480,6 +496,14 @@
 		goto out_neigh_release;
 	}
 
+	if (dev->netdev_ops->ndo_neigh_construct) {
+		error = dev->netdev_ops->ndo_neigh_construct(n);
+		if (error < 0) {
+			rc = ERR_PTR(error);
+			goto out_neigh_release;
+		}
+	}
+
 	/* Device specific setup. */
 	if (n->parms->neigh_setup &&
 	    (error = n->parms->neigh_setup(n)) < 0) {
@@ -677,18 +701,14 @@
 		neigh_parms_destroy(parms);
 }
 
-static void neigh_destroy_rcu(struct rcu_head *head)
-{
-	struct neighbour *neigh = container_of(head, struct neighbour, rcu);
-
-	kmem_cache_free(neigh->tbl->kmem_cachep, neigh);
-}
 /*
  *	neighbour must already be out of the table;
  *
  */
 void neigh_destroy(struct neighbour *neigh)
 {
+	struct net_device *dev = neigh->dev;
+
 	NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
 
 	if (!neigh->dead) {
@@ -702,14 +722,18 @@
 		printk(KERN_WARNING "Impossible event.\n");
 
 	skb_queue_purge(&neigh->arp_queue);
+	neigh->arp_queue_len_bytes = 0;
 
-	dev_put(neigh->dev);
+	if (dev->netdev_ops->ndo_neigh_destroy)
+		dev->netdev_ops->ndo_neigh_destroy(neigh);
+
+	dev_put(dev);
 	neigh_parms_put(neigh->parms);
 
 	NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
 
 	atomic_dec(&neigh->tbl->entries);
-	call_rcu(&neigh->rcu, neigh_destroy_rcu);
+	kfree_rcu(neigh, rcu);
 }
 EXPORT_SYMBOL(neigh_destroy);
 
@@ -842,6 +866,7 @@
 		write_lock(&neigh->lock);
 	}
 	skb_queue_purge(&neigh->arp_queue);
+	neigh->arp_queue_len_bytes = 0;
 }
 
 static void neigh_probe(struct neighbour *neigh)
@@ -980,15 +1005,20 @@
 
 	if (neigh->nud_state == NUD_INCOMPLETE) {
 		if (skb) {
-			if (skb_queue_len(&neigh->arp_queue) >=
-			    neigh->parms->queue_len) {
+			while (neigh->arp_queue_len_bytes + skb->truesize >
+			       neigh->parms->queue_len_bytes) {
 				struct sk_buff *buff;
+
 				buff = __skb_dequeue(&neigh->arp_queue);
+				if (!buff)
+					break;
+				neigh->arp_queue_len_bytes -= buff->truesize;
 				kfree_skb(buff);
 				NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
 			}
 			skb_dst_force(skb);
 			__skb_queue_tail(&neigh->arp_queue, skb);
+			neigh->arp_queue_len_bytes += skb->truesize;
 		}
 		rc = 1;
 	}
@@ -1167,7 +1197,7 @@
 
 			rcu_read_lock();
 			/* On shaper/eql skb->dst->neighbour != neigh :( */
-			if (dst && (n2 = dst_get_neighbour(dst)) != NULL)
+			if (dst && (n2 = dst_get_neighbour_noref(dst)) != NULL)
 				n1 = n2;
 			n1->output(n1, skb);
 			rcu_read_unlock();
@@ -1175,6 +1205,7 @@
 			write_lock_bh(&neigh->lock);
 		}
 		skb_queue_purge(&neigh->arp_queue);
+		neigh->arp_queue_len_bytes = 0;
 	}
 out:
 	if (update_isrouter) {
@@ -1477,11 +1508,6 @@
 	tbl->parms.reachable_time =
 			  neigh_rand_reach_time(tbl->parms.base_reachable_time);
 
-	if (!tbl->kmem_cachep)
-		tbl->kmem_cachep =
-			kmem_cache_create(tbl->id, tbl->entry_size, 0,
-					  SLAB_HWCACHE_ALIGN|SLAB_PANIC,
-					  NULL);
 	tbl->stats = alloc_percpu(struct neigh_statistics);
 	if (!tbl->stats)
 		panic("cannot create neighbour cache statistics");
@@ -1566,9 +1592,6 @@
 	free_percpu(tbl->stats);
 	tbl->stats = NULL;
 
-	kmem_cache_destroy(tbl->kmem_cachep);
-	tbl->kmem_cachep = NULL;
-
 	return 0;
 }
 EXPORT_SYMBOL(neigh_table_clear);
@@ -1747,7 +1770,11 @@
 		NLA_PUT_U32(skb, NDTPA_IFINDEX, parms->dev->ifindex);
 
 	NLA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt));
-	NLA_PUT_U32(skb, NDTPA_QUEUE_LEN, parms->queue_len);
+	NLA_PUT_U32(skb, NDTPA_QUEUE_LENBYTES, parms->queue_len_bytes);
+	/* approximative value for deprecated QUEUE_LEN (in packets) */
+	NLA_PUT_U32(skb, NDTPA_QUEUE_LEN,
+		    DIV_ROUND_UP(parms->queue_len_bytes,
+				 SKB_TRUESIZE(ETH_FRAME_LEN)));
 	NLA_PUT_U32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen);
 	NLA_PUT_U32(skb, NDTPA_APP_PROBES, parms->app_probes);
 	NLA_PUT_U32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes);
@@ -1808,7 +1835,7 @@
 
 		rcu_read_lock_bh();
 		nht = rcu_dereference_bh(tbl->nht);
-		ndc.ndtc_hash_rnd = nht->hash_rnd;
+		ndc.ndtc_hash_rnd = nht->hash_rnd[0];
 		ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
 		rcu_read_unlock_bh();
 
@@ -1974,7 +2001,11 @@
 
 			switch (i) {
 			case NDTPA_QUEUE_LEN:
-				p->queue_len = nla_get_u32(tbp[i]);
+				p->queue_len_bytes = nla_get_u32(tbp[i]) *
+						     SKB_TRUESIZE(ETH_FRAME_LEN);
+				break;
+			case NDTPA_QUEUE_LENBYTES:
+				p->queue_len_bytes = nla_get_u32(tbp[i]);
 				break;
 			case NDTPA_PROXY_QLEN:
 				p->proxy_qlen = nla_get_u32(tbp[i]);
@@ -2638,117 +2669,158 @@
 
 #ifdef CONFIG_SYSCTL
 
-#define NEIGH_VARS_MAX 19
+static int proc_unres_qlen(ctl_table *ctl, int write, void __user *buffer,
+			   size_t *lenp, loff_t *ppos)
+{
+	int size, ret;
+	ctl_table tmp = *ctl;
+
+	tmp.data = &size;
+	size = DIV_ROUND_UP(*(int *)ctl->data, SKB_TRUESIZE(ETH_FRAME_LEN));
+	ret = proc_dointvec(&tmp, write, buffer, lenp, ppos);
+	if (write && !ret)
+		*(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN);
+	return ret;
+}
+
+enum {
+	NEIGH_VAR_MCAST_PROBE,
+	NEIGH_VAR_UCAST_PROBE,
+	NEIGH_VAR_APP_PROBE,
+	NEIGH_VAR_RETRANS_TIME,
+	NEIGH_VAR_BASE_REACHABLE_TIME,
+	NEIGH_VAR_DELAY_PROBE_TIME,
+	NEIGH_VAR_GC_STALETIME,
+	NEIGH_VAR_QUEUE_LEN,
+	NEIGH_VAR_QUEUE_LEN_BYTES,
+	NEIGH_VAR_PROXY_QLEN,
+	NEIGH_VAR_ANYCAST_DELAY,
+	NEIGH_VAR_PROXY_DELAY,
+	NEIGH_VAR_LOCKTIME,
+	NEIGH_VAR_RETRANS_TIME_MS,
+	NEIGH_VAR_BASE_REACHABLE_TIME_MS,
+	NEIGH_VAR_GC_INTERVAL,
+	NEIGH_VAR_GC_THRESH1,
+	NEIGH_VAR_GC_THRESH2,
+	NEIGH_VAR_GC_THRESH3,
+	NEIGH_VAR_MAX
+};
 
 static struct neigh_sysctl_table {
 	struct ctl_table_header *sysctl_header;
-	struct ctl_table neigh_vars[NEIGH_VARS_MAX];
+	struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1];
 	char *dev_name;
 } neigh_sysctl_template __read_mostly = {
 	.neigh_vars = {
-		{
+		[NEIGH_VAR_MCAST_PROBE] = {
 			.procname	= "mcast_solicit",
 			.maxlen		= sizeof(int),
 			.mode		= 0644,
 			.proc_handler	= proc_dointvec,
 		},
-		{
+		[NEIGH_VAR_UCAST_PROBE] = {
 			.procname	= "ucast_solicit",
 			.maxlen		= sizeof(int),
 			.mode		= 0644,
 			.proc_handler	= proc_dointvec,
 		},
-		{
+		[NEIGH_VAR_APP_PROBE] = {
 			.procname	= "app_solicit",
 			.maxlen		= sizeof(int),
 			.mode		= 0644,
 			.proc_handler	= proc_dointvec,
 		},
-		{
+		[NEIGH_VAR_RETRANS_TIME] = {
 			.procname	= "retrans_time",
 			.maxlen		= sizeof(int),
 			.mode		= 0644,
 			.proc_handler	= proc_dointvec_userhz_jiffies,
 		},
-		{
+		[NEIGH_VAR_BASE_REACHABLE_TIME] = {
 			.procname	= "base_reachable_time",
 			.maxlen		= sizeof(int),
 			.mode		= 0644,
 			.proc_handler	= proc_dointvec_jiffies,
 		},
-		{
+		[NEIGH_VAR_DELAY_PROBE_TIME] = {
 			.procname	= "delay_first_probe_time",
 			.maxlen		= sizeof(int),
 			.mode		= 0644,
 			.proc_handler	= proc_dointvec_jiffies,
 		},
-		{
+		[NEIGH_VAR_GC_STALETIME] = {
 			.procname	= "gc_stale_time",
 			.maxlen		= sizeof(int),
 			.mode		= 0644,
 			.proc_handler	= proc_dointvec_jiffies,
 		},
-		{
+		[NEIGH_VAR_QUEUE_LEN] = {
 			.procname	= "unres_qlen",
 			.maxlen		= sizeof(int),
 			.mode		= 0644,
+			.proc_handler	= proc_unres_qlen,
+		},
+		[NEIGH_VAR_QUEUE_LEN_BYTES] = {
+			.procname	= "unres_qlen_bytes",
+			.maxlen		= sizeof(int),
+			.mode		= 0644,
 			.proc_handler	= proc_dointvec,
 		},
-		{
+		[NEIGH_VAR_PROXY_QLEN] = {
 			.procname	= "proxy_qlen",
 			.maxlen		= sizeof(int),
 			.mode		= 0644,
 			.proc_handler	= proc_dointvec,
 		},
-		{
+		[NEIGH_VAR_ANYCAST_DELAY] = {
 			.procname	= "anycast_delay",
 			.maxlen		= sizeof(int),
 			.mode		= 0644,
 			.proc_handler	= proc_dointvec_userhz_jiffies,
 		},
-		{
+		[NEIGH_VAR_PROXY_DELAY] = {
 			.procname	= "proxy_delay",
 			.maxlen		= sizeof(int),
 			.mode		= 0644,
 			.proc_handler	= proc_dointvec_userhz_jiffies,
 		},
-		{
+		[NEIGH_VAR_LOCKTIME] = {
 			.procname	= "locktime",
 			.maxlen		= sizeof(int),
 			.mode		= 0644,
 			.proc_handler	= proc_dointvec_userhz_jiffies,
 		},
-		{
+		[NEIGH_VAR_RETRANS_TIME_MS] = {
 			.procname	= "retrans_time_ms",
 			.maxlen		= sizeof(int),
 			.mode		= 0644,
 			.proc_handler	= proc_dointvec_ms_jiffies,
 		},
-		{
+		[NEIGH_VAR_BASE_REACHABLE_TIME_MS] = {
 			.procname	= "base_reachable_time_ms",
 			.maxlen		= sizeof(int),
 			.mode		= 0644,
 			.proc_handler	= proc_dointvec_ms_jiffies,
 		},
-		{
+		[NEIGH_VAR_GC_INTERVAL] = {
 			.procname	= "gc_interval",
 			.maxlen		= sizeof(int),
 			.mode		= 0644,
 			.proc_handler	= proc_dointvec_jiffies,
 		},
-		{
+		[NEIGH_VAR_GC_THRESH1] = {
 			.procname	= "gc_thresh1",
 			.maxlen		= sizeof(int),
 			.mode		= 0644,
 			.proc_handler	= proc_dointvec,
 		},
-		{
+		[NEIGH_VAR_GC_THRESH2] = {
 			.procname	= "gc_thresh2",
 			.maxlen		= sizeof(int),
 			.mode		= 0644,
 			.proc_handler	= proc_dointvec,
 		},
-		{
+		[NEIGH_VAR_GC_THRESH3] = {
 			.procname	= "gc_thresh3",
 			.maxlen		= sizeof(int),
 			.mode		= 0644,
@@ -2781,47 +2853,49 @@
 	if (!t)
 		goto err;
 
-	t->neigh_vars[0].data  = &p->mcast_probes;
-	t->neigh_vars[1].data  = &p->ucast_probes;
-	t->neigh_vars[2].data  = &p->app_probes;
-	t->neigh_vars[3].data  = &p->retrans_time;
-	t->neigh_vars[4].data  = &p->base_reachable_time;
-	t->neigh_vars[5].data  = &p->delay_probe_time;
-	t->neigh_vars[6].data  = &p->gc_staletime;
-	t->neigh_vars[7].data  = &p->queue_len;
-	t->neigh_vars[8].data  = &p->proxy_qlen;
-	t->neigh_vars[9].data  = &p->anycast_delay;
-	t->neigh_vars[10].data = &p->proxy_delay;
-	t->neigh_vars[11].data = &p->locktime;
-	t->neigh_vars[12].data  = &p->retrans_time;
-	t->neigh_vars[13].data  = &p->base_reachable_time;
+	t->neigh_vars[NEIGH_VAR_MCAST_PROBE].data  = &p->mcast_probes;
+	t->neigh_vars[NEIGH_VAR_UCAST_PROBE].data  = &p->ucast_probes;
+	t->neigh_vars[NEIGH_VAR_APP_PROBE].data  = &p->app_probes;
+	t->neigh_vars[NEIGH_VAR_RETRANS_TIME].data  = &p->retrans_time;
+	t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].data  = &p->base_reachable_time;
+	t->neigh_vars[NEIGH_VAR_DELAY_PROBE_TIME].data  = &p->delay_probe_time;
+	t->neigh_vars[NEIGH_VAR_GC_STALETIME].data  = &p->gc_staletime;
+	t->neigh_vars[NEIGH_VAR_QUEUE_LEN].data  = &p->queue_len_bytes;
+	t->neigh_vars[NEIGH_VAR_QUEUE_LEN_BYTES].data  = &p->queue_len_bytes;
+	t->neigh_vars[NEIGH_VAR_PROXY_QLEN].data  = &p->proxy_qlen;
+	t->neigh_vars[NEIGH_VAR_ANYCAST_DELAY].data  = &p->anycast_delay;
+	t->neigh_vars[NEIGH_VAR_PROXY_DELAY].data = &p->proxy_delay;
+	t->neigh_vars[NEIGH_VAR_LOCKTIME].data = &p->locktime;
+	t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].data  = &p->retrans_time;
+	t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].data  = &p->base_reachable_time;
 
 	if (dev) {
 		dev_name_source = dev->name;
 		/* Terminate the table early */
-		memset(&t->neigh_vars[14], 0, sizeof(t->neigh_vars[14]));
+		memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
+		       sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
 	} else {
 		dev_name_source = neigh_path[NEIGH_CTL_PATH_DEV].procname;
-		t->neigh_vars[14].data = (int *)(p + 1);
-		t->neigh_vars[15].data = (int *)(p + 1) + 1;
-		t->neigh_vars[16].data = (int *)(p + 1) + 2;
-		t->neigh_vars[17].data = (int *)(p + 1) + 3;
+		t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = (int *)(p + 1);
+		t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = (int *)(p + 1) + 1;
+		t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = (int *)(p + 1) + 2;
+		t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = (int *)(p + 1) + 3;
 	}
 
 
 	if (handler) {
 		/* RetransTime */
-		t->neigh_vars[3].proc_handler = handler;
-		t->neigh_vars[3].extra1 = dev;
+		t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler;
+		t->neigh_vars[NEIGH_VAR_RETRANS_TIME].extra1 = dev;
 		/* ReachableTime */
-		t->neigh_vars[4].proc_handler = handler;
-		t->neigh_vars[4].extra1 = dev;
+		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler;
+		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].extra1 = dev;
 		/* RetransTime (in milliseconds)*/
-		t->neigh_vars[12].proc_handler = handler;
-		t->neigh_vars[12].extra1 = dev;
+		t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
+		t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].extra1 = dev;
 		/* ReachableTime (in milliseconds) */
-		t->neigh_vars[13].proc_handler = handler;
-		t->neigh_vars[13].extra1 = dev;
+		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
+		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].extra1 = dev;
 	}
 
 	t->dev_name = kstrdup(dev_name_source, GFP_KERNEL);
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index c71c434..abf4393 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -21,6 +21,7 @@
 #include <linux/wireless.h>
 #include <linux/vmalloc.h>
 #include <linux/export.h>
+#include <linux/jiffies.h>
 #include <net/wext.h>
 
 #include "net-sysfs.h"
@@ -606,9 +607,12 @@
 	rcu_assign_pointer(queue->rps_map, map);
 	spin_unlock(&rps_map_lock);
 
-	if (old_map)
+	if (map)
+		jump_label_inc(&rps_needed);
+	if (old_map) {
 		kfree_rcu(old_map, rcu);
-
+		jump_label_dec(&rps_needed);
+	}
 	free_cpumask_var(mask);
 	return len;
 }
@@ -618,15 +622,15 @@
 					   char *buf)
 {
 	struct rps_dev_flow_table *flow_table;
-	unsigned int val = 0;
+	unsigned long val = 0;
 
 	rcu_read_lock();
 	flow_table = rcu_dereference(queue->rps_flow_table);
 	if (flow_table)
-		val = flow_table->mask + 1;
+		val = (unsigned long)flow_table->mask + 1;
 	rcu_read_unlock();
 
-	return sprintf(buf, "%u\n", val);
+	return sprintf(buf, "%lu\n", val);
 }
 
 static void rps_dev_flow_table_release_work(struct work_struct *work)
@@ -650,33 +654,46 @@
 				     struct rx_queue_attribute *attr,
 				     const char *buf, size_t len)
 {
-	unsigned int count;
-	char *endp;
+	unsigned long mask, count;
 	struct rps_dev_flow_table *table, *old_table;
 	static DEFINE_SPINLOCK(rps_dev_flow_lock);
+	int rc;
 
 	if (!capable(CAP_NET_ADMIN))
 		return -EPERM;
 
-	count = simple_strtoul(buf, &endp, 0);
-	if (endp == buf)
-		return -EINVAL;
+	rc = kstrtoul(buf, 0, &count);
+	if (rc < 0)
+		return rc;
 
 	if (count) {
-		int i;
-
-		if (count > 1<<30) {
+		mask = count - 1;
+		/* mask = roundup_pow_of_two(count) - 1;
+		 * without overflows...
+		 */
+		while ((mask | (mask >> 1)) != mask)
+			mask |= (mask >> 1);
+		/* On 64 bit arches, must check mask fits in table->mask (u32),
+		 * and on 32bit arches, must check RPS_DEV_FLOW_TABLE_SIZE(mask + 1)
+		 * doesnt overflow.
+		 */
+#if BITS_PER_LONG > 32
+		if (mask > (unsigned long)(u32)mask)
+			return -EINVAL;
+#else
+		if (mask > (ULONG_MAX - RPS_DEV_FLOW_TABLE_SIZE(1))
+				/ sizeof(struct rps_dev_flow)) {
 			/* Enforce a limit to prevent overflow */
 			return -EINVAL;
 		}
-		count = roundup_pow_of_two(count);
-		table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(count));
+#endif
+		table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(mask + 1));
 		if (!table)
 			return -ENOMEM;
 
-		table->mask = count - 1;
-		for (i = 0; i < count; i++)
-			table->flows[i].cpu = RPS_NO_CPU;
+		table->mask = mask;
+		for (count = 0; count <= mask; count++)
+			table->flows[count].cpu = RPS_NO_CPU;
 	} else
 		table = NULL;
 
@@ -780,7 +797,7 @@
 #endif
 }
 
-#ifdef CONFIG_XPS
+#ifdef CONFIG_SYSFS
 /*
  * netdev_queue sysfs structures and functions.
  */
@@ -826,6 +843,133 @@
 	.store = netdev_queue_attr_store,
 };
 
+static ssize_t show_trans_timeout(struct netdev_queue *queue,
+				  struct netdev_queue_attribute *attribute,
+				  char *buf)
+{
+	unsigned long trans_timeout;
+
+	spin_lock_irq(&queue->_xmit_lock);
+	trans_timeout = queue->trans_timeout;
+	spin_unlock_irq(&queue->_xmit_lock);
+
+	return sprintf(buf, "%lu", trans_timeout);
+}
+
+static struct netdev_queue_attribute queue_trans_timeout =
+	__ATTR(tx_timeout, S_IRUGO, show_trans_timeout, NULL);
+
+#ifdef CONFIG_BQL
+/*
+ * Byte queue limits sysfs structures and functions.
+ */
+static ssize_t bql_show(char *buf, unsigned int value)
+{
+	return sprintf(buf, "%u\n", value);
+}
+
+static ssize_t bql_set(const char *buf, const size_t count,
+		       unsigned int *pvalue)
+{
+	unsigned int value;
+	int err;
+
+	if (!strcmp(buf, "max") || !strcmp(buf, "max\n"))
+		value = DQL_MAX_LIMIT;
+	else {
+		err = kstrtouint(buf, 10, &value);
+		if (err < 0)
+			return err;
+		if (value > DQL_MAX_LIMIT)
+			return -EINVAL;
+	}
+
+	*pvalue = value;
+
+	return count;
+}
+
+static ssize_t bql_show_hold_time(struct netdev_queue *queue,
+				  struct netdev_queue_attribute *attr,
+				  char *buf)
+{
+	struct dql *dql = &queue->dql;
+
+	return sprintf(buf, "%u\n", jiffies_to_msecs(dql->slack_hold_time));
+}
+
+static ssize_t bql_set_hold_time(struct netdev_queue *queue,
+				 struct netdev_queue_attribute *attribute,
+				 const char *buf, size_t len)
+{
+	struct dql *dql = &queue->dql;
+	unsigned value;
+	int err;
+
+	err = kstrtouint(buf, 10, &value);
+	if (err < 0)
+		return err;
+
+	dql->slack_hold_time = msecs_to_jiffies(value);
+
+	return len;
+}
+
+static struct netdev_queue_attribute bql_hold_time_attribute =
+	__ATTR(hold_time, S_IRUGO | S_IWUSR, bql_show_hold_time,
+	    bql_set_hold_time);
+
+static ssize_t bql_show_inflight(struct netdev_queue *queue,
+				 struct netdev_queue_attribute *attr,
+				 char *buf)
+{
+	struct dql *dql = &queue->dql;
+
+	return sprintf(buf, "%u\n", dql->num_queued - dql->num_completed);
+}
+
+static struct netdev_queue_attribute bql_inflight_attribute =
+	__ATTR(inflight, S_IRUGO | S_IWUSR, bql_show_inflight, NULL);
+
+#define BQL_ATTR(NAME, FIELD)						\
+static ssize_t bql_show_ ## NAME(struct netdev_queue *queue,		\
+				 struct netdev_queue_attribute *attr,	\
+				 char *buf)				\
+{									\
+	return bql_show(buf, queue->dql.FIELD);				\
+}									\
+									\
+static ssize_t bql_set_ ## NAME(struct netdev_queue *queue,		\
+				struct netdev_queue_attribute *attr,	\
+				const char *buf, size_t len)		\
+{									\
+	return bql_set(buf, len, &queue->dql.FIELD);			\
+}									\
+									\
+static struct netdev_queue_attribute bql_ ## NAME ## _attribute =	\
+	__ATTR(NAME, S_IRUGO | S_IWUSR, bql_show_ ## NAME,		\
+	    bql_set_ ## NAME);
+
+BQL_ATTR(limit, limit)
+BQL_ATTR(limit_max, max_limit)
+BQL_ATTR(limit_min, min_limit)
+
+static struct attribute *dql_attrs[] = {
+	&bql_limit_attribute.attr,
+	&bql_limit_max_attribute.attr,
+	&bql_limit_min_attribute.attr,
+	&bql_hold_time_attribute.attr,
+	&bql_inflight_attribute.attr,
+	NULL
+};
+
+static struct attribute_group dql_group = {
+	.name  = "byte_queue_limits",
+	.attrs  = dql_attrs,
+};
+#endif /* CONFIG_BQL */
+
+#ifdef CONFIG_XPS
 static inline unsigned int get_netdev_queue_index(struct netdev_queue *queue)
 {
 	struct net_device *dev = queue->dev;
@@ -890,6 +1034,52 @@
 #define xmap_dereference(P)		\
 	rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
 
+static void xps_queue_release(struct netdev_queue *queue)
+{
+	struct net_device *dev = queue->dev;
+	struct xps_dev_maps *dev_maps;
+	struct xps_map *map;
+	unsigned long index;
+	int i, pos, nonempty = 0;
+
+	index = get_netdev_queue_index(queue);
+
+	mutex_lock(&xps_map_mutex);
+	dev_maps = xmap_dereference(dev->xps_maps);
+
+	if (dev_maps) {
+		for_each_possible_cpu(i) {
+			map = xmap_dereference(dev_maps->cpu_map[i]);
+			if (!map)
+				continue;
+
+			for (pos = 0; pos < map->len; pos++)
+				if (map->queues[pos] == index)
+					break;
+
+			if (pos < map->len) {
+				if (map->len > 1)
+					map->queues[pos] =
+					    map->queues[--map->len];
+				else {
+					RCU_INIT_POINTER(dev_maps->cpu_map[i],
+					    NULL);
+					kfree_rcu(map, rcu);
+					map = NULL;
+				}
+			}
+			if (map)
+				nonempty = 1;
+		}
+
+		if (!nonempty) {
+			RCU_INIT_POINTER(dev->xps_maps, NULL);
+			kfree_rcu(dev_maps, rcu);
+		}
+	}
+	mutex_unlock(&xps_map_mutex);
+}
+
 static ssize_t store_xps_map(struct netdev_queue *queue,
 		      struct netdev_queue_attribute *attribute,
 		      const char *buf, size_t len)
@@ -901,7 +1091,7 @@
 	struct xps_map *map, *new_map;
 	struct xps_dev_maps *dev_maps, *new_dev_maps;
 	int nonempty = 0;
-	int numa_node = -2;
+	int numa_node_id = -2;
 
 	if (!capable(CAP_NET_ADMIN))
 		return -EPERM;
@@ -944,10 +1134,10 @@
 		need_set = cpumask_test_cpu(cpu, mask) && cpu_online(cpu);
 #ifdef CONFIG_NUMA
 		if (need_set) {
-			if (numa_node == -2)
-				numa_node = cpu_to_node(cpu);
-			else if (numa_node != cpu_to_node(cpu))
-				numa_node = -1;
+			if (numa_node_id == -2)
+				numa_node_id = cpu_to_node(cpu);
+			else if (numa_node_id != cpu_to_node(cpu))
+				numa_node_id = -1;
 		}
 #endif
 		if (need_set && pos >= map_len) {
@@ -997,7 +1187,7 @@
 	if (dev_maps)
 		kfree_rcu(dev_maps, rcu);
 
-	netdev_queue_numa_node_write(queue, (numa_node >= 0) ? numa_node :
+	netdev_queue_numa_node_write(queue, (numa_node_id >= 0) ? numa_node_id :
 					    NUMA_NO_NODE);
 
 	mutex_unlock(&xps_map_mutex);
@@ -1020,58 +1210,23 @@
 
 static struct netdev_queue_attribute xps_cpus_attribute =
     __ATTR(xps_cpus, S_IRUGO | S_IWUSR, show_xps_map, store_xps_map);
+#endif /* CONFIG_XPS */
 
 static struct attribute *netdev_queue_default_attrs[] = {
+	&queue_trans_timeout.attr,
+#ifdef CONFIG_XPS
 	&xps_cpus_attribute.attr,
+#endif
 	NULL
 };
 
 static void netdev_queue_release(struct kobject *kobj)
 {
 	struct netdev_queue *queue = to_netdev_queue(kobj);
-	struct net_device *dev = queue->dev;
-	struct xps_dev_maps *dev_maps;
-	struct xps_map *map;
-	unsigned long index;
-	int i, pos, nonempty = 0;
 
-	index = get_netdev_queue_index(queue);
-
-	mutex_lock(&xps_map_mutex);
-	dev_maps = xmap_dereference(dev->xps_maps);
-
-	if (dev_maps) {
-		for_each_possible_cpu(i) {
-			map = xmap_dereference(dev_maps->cpu_map[i]);
-			if (!map)
-				continue;
-
-			for (pos = 0; pos < map->len; pos++)
-				if (map->queues[pos] == index)
-					break;
-
-			if (pos < map->len) {
-				if (map->len > 1)
-					map->queues[pos] =
-					    map->queues[--map->len];
-				else {
-					RCU_INIT_POINTER(dev_maps->cpu_map[i],
-					    NULL);
-					kfree_rcu(map, rcu);
-					map = NULL;
-				}
-			}
-			if (map)
-				nonempty = 1;
-		}
-
-		if (!nonempty) {
-			RCU_INIT_POINTER(dev->xps_maps, NULL);
-			kfree_rcu(dev_maps, rcu);
-		}
-	}
-
-	mutex_unlock(&xps_map_mutex);
+#ifdef CONFIG_XPS
+	xps_queue_release(queue);
+#endif
 
 	memset(kobj, 0, sizeof(*kobj));
 	dev_put(queue->dev);
@@ -1092,22 +1247,29 @@
 	kobj->kset = net->queues_kset;
 	error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL,
 	    "tx-%u", index);
-	if (error) {
-		kobject_put(kobj);
-		return error;
-	}
+	if (error)
+		goto exit;
+
+#ifdef CONFIG_BQL
+	error = sysfs_create_group(kobj, &dql_group);
+	if (error)
+		goto exit;
+#endif
 
 	kobject_uevent(kobj, KOBJ_ADD);
 	dev_hold(queue->dev);
 
+	return 0;
+exit:
+	kobject_put(kobj);
 	return error;
 }
-#endif /* CONFIG_XPS */
+#endif /* CONFIG_SYSFS */
 
 int
 netdev_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
 {
-#ifdef CONFIG_XPS
+#ifdef CONFIG_SYSFS
 	int i;
 	int error = 0;
 
@@ -1119,20 +1281,26 @@
 		}
 	}
 
-	while (--i >= new_num)
-		kobject_put(&net->_tx[i].kobj);
+	while (--i >= new_num) {
+		struct netdev_queue *queue = net->_tx + i;
+
+#ifdef CONFIG_BQL
+		sysfs_remove_group(&queue->kobj, &dql_group);
+#endif
+		kobject_put(&queue->kobj);
+	}
 
 	return error;
 #else
 	return 0;
-#endif
+#endif /* CONFIG_SYSFS */
 }
 
 static int register_queue_kobjects(struct net_device *net)
 {
 	int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0;
 
-#if defined(CONFIG_RPS) || defined(CONFIG_XPS)
+#ifdef CONFIG_SYSFS
 	net->queues_kset = kset_create_and_add("queues",
 	    NULL, &net->dev.kobj);
 	if (!net->queues_kset)
@@ -1173,7 +1341,7 @@
 
 	net_rx_queue_update_kobjects(net, real_rx, 0);
 	netdev_queue_update_kobjects(net, real_tx, 0);
-#if defined(CONFIG_RPS) || defined(CONFIG_XPS)
+#ifdef CONFIG_SYSFS
 	kset_unregister(net->queues_kset);
 #endif
 }
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index cf64c1f..0d38808 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -76,7 +76,7 @@
 
 		local_irq_save(flags);
 		__netif_tx_lock(txq, smp_processor_id());
-		if (netif_tx_queue_frozen_or_stopped(txq) ||
+		if (netif_xmit_frozen_or_stopped(txq) ||
 		    ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) {
 			skb_queue_head(&npinfo->txq, skb);
 			__netif_tx_unlock(txq);
@@ -317,7 +317,7 @@
 		for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
 		     tries > 0; --tries) {
 			if (__netif_tx_trylock(txq)) {
-				if (!netif_tx_queue_stopped(txq)) {
+				if (!netif_xmit_stopped(txq)) {
 					status = ops->ndo_start_xmit(skb, dev);
 					if (status == NETDEV_TX_OK)
 						txq_trans_update(txq);
@@ -422,6 +422,7 @@
 	struct sk_buff *send_skb;
 	struct netpoll *np, *tmp;
 	unsigned long flags;
+	int hlen, tlen;
 	int hits = 0;
 
 	if (list_empty(&npinfo->rx_np))
@@ -479,8 +480,9 @@
 		if (tip != np->local_ip)
 			continue;
 
-		send_skb = find_skb(np, size + LL_ALLOCATED_SPACE(np->dev),
-				    LL_RESERVED_SPACE(np->dev));
+		hlen = LL_RESERVED_SPACE(np->dev);
+		tlen = np->dev->needed_tailroom;
+		send_skb = find_skb(np, size + hlen + tlen, hlen);
 		if (!send_skb)
 			continue;
 
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
new file mode 100644
index 0000000..3a9fd48
--- /dev/null
+++ b/net/core/netprio_cgroup.c
@@ -0,0 +1,344 @@
+/*
+ * net/core/netprio_cgroup.c	Priority Control Group
+ *
+ *		This program is free software; you can redistribute it and/or
+ *		modify it under the terms of the GNU General Public License
+ *		as published by the Free Software Foundation; either version
+ *		2 of the License, or (at your option) any later version.
+ *
+ * Authors:	Neil Horman <nhorman@tuxdriver.com>
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/skbuff.h>
+#include <linux/cgroup.h>
+#include <linux/rcupdate.h>
+#include <linux/atomic.h>
+#include <net/rtnetlink.h>
+#include <net/pkt_cls.h>
+#include <net/sock.h>
+#include <net/netprio_cgroup.h>
+
+static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss,
+					       struct cgroup *cgrp);
+static void cgrp_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp);
+static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp);
+
+struct cgroup_subsys net_prio_subsys = {
+	.name		= "net_prio",
+	.create		= cgrp_create,
+	.destroy	= cgrp_destroy,
+	.populate	= cgrp_populate,
+#ifdef CONFIG_NETPRIO_CGROUP
+	.subsys_id	= net_prio_subsys_id,
+#endif
+	.module		= THIS_MODULE
+};
+
+#define PRIOIDX_SZ 128
+
+static unsigned long prioidx_map[PRIOIDX_SZ];
+static DEFINE_SPINLOCK(prioidx_map_lock);
+static atomic_t max_prioidx = ATOMIC_INIT(0);
+
+static inline struct cgroup_netprio_state *cgrp_netprio_state(struct cgroup *cgrp)
+{
+	return container_of(cgroup_subsys_state(cgrp, net_prio_subsys_id),
+			    struct cgroup_netprio_state, css);
+}
+
+static int get_prioidx(u32 *prio)
+{
+	unsigned long flags;
+	u32 prioidx;
+
+	spin_lock_irqsave(&prioidx_map_lock, flags);
+	prioidx = find_first_zero_bit(prioidx_map, sizeof(unsigned long) * PRIOIDX_SZ);
+	set_bit(prioidx, prioidx_map);
+	spin_unlock_irqrestore(&prioidx_map_lock, flags);
+	if (prioidx == sizeof(unsigned long) * PRIOIDX_SZ)
+		return -ENOSPC;
+
+	atomic_set(&max_prioidx, prioidx);
+	*prio = prioidx;
+	return 0;
+}
+
+static void put_prioidx(u32 idx)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&prioidx_map_lock, flags);
+	clear_bit(idx, prioidx_map);
+	spin_unlock_irqrestore(&prioidx_map_lock, flags);
+}
+
+static void extend_netdev_table(struct net_device *dev, u32 new_len)
+{
+	size_t new_size = sizeof(struct netprio_map) +
+			   ((sizeof(u32) * new_len));
+	struct netprio_map *new_priomap = kzalloc(new_size, GFP_KERNEL);
+	struct netprio_map *old_priomap;
+	int i;
+
+	old_priomap  = rtnl_dereference(dev->priomap);
+
+	if (!new_priomap) {
+		printk(KERN_WARNING "Unable to alloc new priomap!\n");
+		return;
+	}
+
+	for (i = 0;
+	     old_priomap && (i < old_priomap->priomap_len);
+	     i++)
+		new_priomap->priomap[i] = old_priomap->priomap[i];
+
+	new_priomap->priomap_len = new_len;
+
+	rcu_assign_pointer(dev->priomap, new_priomap);
+	if (old_priomap)
+		kfree_rcu(old_priomap, rcu);
+}
+
+static void update_netdev_tables(void)
+{
+	struct net_device *dev;
+	u32 max_len = atomic_read(&max_prioidx);
+	struct netprio_map *map;
+
+	rtnl_lock();
+	for_each_netdev(&init_net, dev) {
+		map = rtnl_dereference(dev->priomap);
+		if ((!map) ||
+		    (map->priomap_len < max_len))
+			extend_netdev_table(dev, max_len);
+	}
+	rtnl_unlock();
+}
+
+static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss,
+						 struct cgroup *cgrp)
+{
+	struct cgroup_netprio_state *cs;
+	int ret;
+
+	cs = kzalloc(sizeof(*cs), GFP_KERNEL);
+	if (!cs)
+		return ERR_PTR(-ENOMEM);
+
+	if (cgrp->parent && cgrp_netprio_state(cgrp->parent)->prioidx) {
+		kfree(cs);
+		return ERR_PTR(-EINVAL);
+	}
+
+	ret = get_prioidx(&cs->prioidx);
+	if (ret != 0) {
+		printk(KERN_WARNING "No space in priority index array\n");
+		kfree(cs);
+		return ERR_PTR(ret);
+	}
+
+	return &cs->css;
+}
+
+static void cgrp_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
+{
+	struct cgroup_netprio_state *cs;
+	struct net_device *dev;
+	struct netprio_map *map;
+
+	cs = cgrp_netprio_state(cgrp);
+	rtnl_lock();
+	for_each_netdev(&init_net, dev) {
+		map = rtnl_dereference(dev->priomap);
+		if (map)
+			map->priomap[cs->prioidx] = 0;
+	}
+	rtnl_unlock();
+	put_prioidx(cs->prioidx);
+	kfree(cs);
+}
+
+static u64 read_prioidx(struct cgroup *cgrp, struct cftype *cft)
+{
+	return (u64)cgrp_netprio_state(cgrp)->prioidx;
+}
+
+static int read_priomap(struct cgroup *cont, struct cftype *cft,
+			struct cgroup_map_cb *cb)
+{
+	struct net_device *dev;
+	u32 prioidx = cgrp_netprio_state(cont)->prioidx;
+	u32 priority;
+	struct netprio_map *map;
+
+	rcu_read_lock();
+	for_each_netdev_rcu(&init_net, dev) {
+		map = rcu_dereference(dev->priomap);
+		priority = map ? map->priomap[prioidx] : 0;
+		cb->fill(cb, dev->name, priority);
+	}
+	rcu_read_unlock();
+	return 0;
+}
+
+static int write_priomap(struct cgroup *cgrp, struct cftype *cft,
+			 const char *buffer)
+{
+	char *devname = kstrdup(buffer, GFP_KERNEL);
+	int ret = -EINVAL;
+	u32 prioidx = cgrp_netprio_state(cgrp)->prioidx;
+	unsigned long priority;
+	char *priostr;
+	struct net_device *dev;
+	struct netprio_map *map;
+
+	if (!devname)
+		return -ENOMEM;
+
+	/*
+	 * Minimally sized valid priomap string
+	 */
+	if (strlen(devname) < 3)
+		goto out_free_devname;
+
+	priostr = strstr(devname, " ");
+	if (!priostr)
+		goto out_free_devname;
+
+	/*
+	 *Separate the devname from the associated priority
+	 *and advance the priostr poitner to the priority value
+	 */
+	*priostr = '\0';
+	priostr++;
+
+	/*
+	 * If the priostr points to NULL, we're at the end of the passed
+	 * in string, and its not a valid write
+	 */
+	if (*priostr == '\0')
+		goto out_free_devname;
+
+	ret = kstrtoul(priostr, 10, &priority);
+	if (ret < 0)
+		goto out_free_devname;
+
+	ret = -ENODEV;
+
+	dev = dev_get_by_name(&init_net, devname);
+	if (!dev)
+		goto out_free_devname;
+
+	update_netdev_tables();
+	ret = 0;
+	rcu_read_lock();
+	map = rcu_dereference(dev->priomap);
+	if (map)
+		map->priomap[prioidx] = priority;
+	rcu_read_unlock();
+	dev_put(dev);
+
+out_free_devname:
+	kfree(devname);
+	return ret;
+}
+
+static struct cftype ss_files[] = {
+	{
+		.name = "prioidx",
+		.read_u64 = read_prioidx,
+	},
+	{
+		.name = "ifpriomap",
+		.read_map = read_priomap,
+		.write_string = write_priomap,
+	},
+};
+
+static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
+{
+	return cgroup_add_files(cgrp, ss, ss_files, ARRAY_SIZE(ss_files));
+}
+
+static int netprio_device_event(struct notifier_block *unused,
+				unsigned long event, void *ptr)
+{
+	struct net_device *dev = ptr;
+	struct netprio_map *old;
+	u32 max_len = atomic_read(&max_prioidx);
+
+	/*
+	 * Note this is called with rtnl_lock held so we have update side
+	 * protection on our rcu assignments
+	 */
+
+	switch (event) {
+
+	case NETDEV_REGISTER:
+		if (max_len)
+			extend_netdev_table(dev, max_len);
+		break;
+	case NETDEV_UNREGISTER:
+		old = rtnl_dereference(dev->priomap);
+		RCU_INIT_POINTER(dev->priomap, NULL);
+		if (old)
+			kfree_rcu(old, rcu);
+		break;
+	}
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block netprio_device_notifier = {
+	.notifier_call = netprio_device_event
+};
+
+static int __init init_cgroup_netprio(void)
+{
+	int ret;
+
+	ret = cgroup_load_subsys(&net_prio_subsys);
+	if (ret)
+		goto out;
+#ifndef CONFIG_NETPRIO_CGROUP
+	smp_wmb();
+	net_prio_subsys_id = net_prio_subsys.subsys_id;
+#endif
+
+	register_netdevice_notifier(&netprio_device_notifier);
+
+out:
+	return ret;
+}
+
+static void __exit exit_cgroup_netprio(void)
+{
+	struct netprio_map *old;
+	struct net_device *dev;
+
+	unregister_netdevice_notifier(&netprio_device_notifier);
+
+	cgroup_unload_subsys(&net_prio_subsys);
+
+#ifndef CONFIG_NETPRIO_CGROUP
+	net_prio_subsys_id = -1;
+	synchronize_rcu();
+#endif
+
+	rtnl_lock();
+	for_each_netdev(&init_net, dev) {
+		old = rtnl_dereference(dev->priomap);
+		RCU_INIT_POINTER(dev->priomap, NULL);
+		if (old)
+			kfree_rcu(old, rcu);
+	}
+	rtnl_unlock();
+}
+
+module_init(init_cgroup_netprio);
+module_exit(exit_cgroup_netprio);
+MODULE_LICENSE("GPL v2");
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 0001c24..449fe0f 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -1304,7 +1304,7 @@
 		scan_ip6(buf, pkt_dev->in6_daddr.s6_addr);
 		snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->in6_daddr);
 
-		ipv6_addr_copy(&pkt_dev->cur_in6_daddr, &pkt_dev->in6_daddr);
+		pkt_dev->cur_in6_daddr = pkt_dev->in6_daddr;
 
 		if (debug)
 			printk(KERN_DEBUG "pktgen: dst6 set to: %s\n", buf);
@@ -1327,8 +1327,7 @@
 		scan_ip6(buf, pkt_dev->min_in6_daddr.s6_addr);
 		snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->min_in6_daddr);
 
-		ipv6_addr_copy(&pkt_dev->cur_in6_daddr,
-			       &pkt_dev->min_in6_daddr);
+		pkt_dev->cur_in6_daddr = pkt_dev->min_in6_daddr;
 		if (debug)
 			printk(KERN_DEBUG "pktgen: dst6_min set to: %s\n", buf);
 
@@ -1371,7 +1370,7 @@
 		scan_ip6(buf, pkt_dev->in6_saddr.s6_addr);
 		snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->in6_saddr);
 
-		ipv6_addr_copy(&pkt_dev->cur_in6_saddr, &pkt_dev->in6_saddr);
+		pkt_dev->cur_in6_saddr = pkt_dev->in6_saddr;
 
 		if (debug)
 			printk(KERN_DEBUG "pktgen: src6 set to: %s\n", buf);
@@ -2079,9 +2078,7 @@
 				     ifp = ifp->if_next) {
 					if (ifp->scope == IFA_LINK &&
 					    !(ifp->flags & IFA_F_TENTATIVE)) {
-						ipv6_addr_copy(&pkt_dev->
-							       cur_in6_saddr,
-							       &ifp->addr);
+						pkt_dev->cur_in6_saddr = ifp->addr;
 						err = 0;
 						break;
 					}
@@ -2958,8 +2955,8 @@
 	iph->payload_len = htons(sizeof(struct udphdr) + datalen);
 	iph->nexthdr = IPPROTO_UDP;
 
-	ipv6_addr_copy(&iph->daddr, &pkt_dev->cur_in6_daddr);
-	ipv6_addr_copy(&iph->saddr, &pkt_dev->cur_in6_saddr);
+	iph->daddr = pkt_dev->cur_in6_daddr;
+	iph->saddr = pkt_dev->cur_in6_saddr;
 
 	skb->mac_header = (skb->network_header - ETH_HLEN -
 			   pkt_dev->pkt_overhead);
@@ -3345,7 +3342,7 @@
 
 	__netif_tx_lock_bh(txq);
 
-	if (unlikely(netif_tx_queue_frozen_or_stopped(txq))) {
+	if (unlikely(netif_xmit_frozen_or_stopped(txq))) {
 		ret = NETDEV_TX_BUSY;
 		pkt_dev->last_ok = 0;
 		goto unlock;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 9083e82..dbf2dda 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -273,6 +273,17 @@
 
 static LIST_HEAD(link_ops);
 
+static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind)
+{
+	const struct rtnl_link_ops *ops;
+
+	list_for_each_entry(ops, &link_ops, list) {
+		if (!strcmp(ops->kind, kind))
+			return ops;
+	}
+	return NULL;
+}
+
 /**
  * __rtnl_link_register - Register rtnl_link_ops with rtnetlink.
  * @ops: struct rtnl_link_ops * to register
@@ -285,6 +296,9 @@
  */
 int __rtnl_link_register(struct rtnl_link_ops *ops)
 {
+	if (rtnl_link_ops_get(ops->kind))
+		return -EEXIST;
+
 	if (!ops->dellink)
 		ops->dellink = unregister_netdevice_queue;
 
@@ -351,17 +365,6 @@
 }
 EXPORT_SYMBOL_GPL(rtnl_link_unregister);
 
-static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind)
-{
-	const struct rtnl_link_ops *ops;
-
-	list_for_each_entry(ops, &link_ops, list) {
-		if (!strcmp(ops->kind, kind))
-			return ops;
-	}
-	return NULL;
-}
-
 static size_t rtnl_link_get_size(const struct net_device *dev)
 {
 	const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
index 925991a..6fd4460 100644
--- a/net/core/secure_seq.c
+++ b/net/core/secure_seq.c
@@ -36,7 +36,7 @@
 }
 #endif
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 __u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
 				   __be16 sport, __be16 dport)
 {
@@ -134,7 +134,7 @@
 EXPORT_SYMBOL_GPL(secure_ipv4_port_ephemeral);
 #endif
 
-#if defined(CONFIG_IP_DCCP) || defined(CONFIG_IP_DCCP_MODULE)
+#if IS_ENABLED(CONFIG_IP_DCCP)
 u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
 				__be16 sport, __be16 dport)
 {
@@ -156,7 +156,7 @@
 }
 EXPORT_SYMBOL(secure_dccp_sequence_number);
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
 				  __be16 sport, __be16 dport)
 {
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 3c30ee4..da0c97f 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -245,6 +245,55 @@
 EXPORT_SYMBOL(__alloc_skb);
 
 /**
+ * build_skb - build a network buffer
+ * @data: data buffer provided by caller
+ *
+ * Allocate a new &sk_buff. Caller provides space holding head and
+ * skb_shared_info. @data must have been allocated by kmalloc()
+ * The return is the new skb buffer.
+ * On a failure the return is %NULL, and @data is not freed.
+ * Notes :
+ *  Before IO, driver allocates only data buffer where NIC put incoming frame
+ *  Driver should add room at head (NET_SKB_PAD) and
+ *  MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info))
+ *  After IO, driver calls build_skb(), to allocate sk_buff and populate it
+ *  before giving packet to stack.
+ *  RX rings only contains data buffers, not full skbs.
+ */
+struct sk_buff *build_skb(void *data)
+{
+	struct skb_shared_info *shinfo;
+	struct sk_buff *skb;
+	unsigned int size;
+
+	skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
+	if (!skb)
+		return NULL;
+
+	size = ksize(data) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+	memset(skb, 0, offsetof(struct sk_buff, tail));
+	skb->truesize = SKB_TRUESIZE(size);
+	atomic_set(&skb->users, 1);
+	skb->head = data;
+	skb->data = data;
+	skb_reset_tail_pointer(skb);
+	skb->end = skb->tail + size;
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+	skb->mac_header = ~0U;
+#endif
+
+	/* make sure we initialize shinfo sequentially */
+	shinfo = skb_shinfo(skb);
+	memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
+	atomic_set(&shinfo->dataref, 1);
+	kmemcheck_annotate_variable(shinfo->destructor_arg);
+
+	return skb;
+}
+EXPORT_SYMBOL(build_skb);
+
+/**
  *	__netdev_alloc_skb - allocate an skbuff for rx on a specific device
  *	@dev: network device to receive on
  *	@length: length to allocate
@@ -403,7 +452,7 @@
 		WARN_ON(in_irq());
 		skb->destructor(skb);
 	}
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
 	nf_conntrack_put(skb->nfct);
 #endif
 #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
@@ -553,15 +602,14 @@
 	new->ip_summed		= old->ip_summed;
 	skb_copy_queue_mapping(new, old);
 	new->priority		= old->priority;
-#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
+#if IS_ENABLED(CONFIG_IP_VS)
 	new->ipvs_property	= old->ipvs_property;
 #endif
 	new->protocol		= old->protocol;
 	new->mark		= old->mark;
 	new->skb_iif		= old->skb_iif;
 	__nf_copy(new, old);
-#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
-    defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
 	new->nf_trace		= old->nf_trace;
 #endif
 #ifdef CONFIG_NET_SCHED
@@ -791,8 +839,9 @@
 EXPORT_SYMBOL(skb_copy);
 
 /**
- *	pskb_copy	-	create copy of an sk_buff with private head.
+ *	__pskb_copy	-	create copy of an sk_buff with private head.
  *	@skb: buffer to copy
+ *	@headroom: headroom of new skb
  *	@gfp_mask: allocation priority
  *
  *	Make a copy of both an &sk_buff and part of its data, located
@@ -803,16 +852,16 @@
  *	The returned buffer has a reference count of 1.
  */
 
-struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
+struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask)
 {
-	unsigned int size = skb_end_pointer(skb) - skb->head;
+	unsigned int size = skb_headlen(skb) + headroom;
 	struct sk_buff *n = alloc_skb(size, gfp_mask);
 
 	if (!n)
 		goto out;
 
 	/* Set the data pointer */
-	skb_reserve(n, skb_headroom(skb));
+	skb_reserve(n, headroom);
 	/* Set the tail pointer and length */
 	skb_put(n, skb_headlen(skb));
 	/* Copy the bytes */
@@ -848,7 +897,7 @@
 out:
 	return n;
 }
-EXPORT_SYMBOL(pskb_copy);
+EXPORT_SYMBOL(__pskb_copy);
 
 /**
  *	pskb_expand_head - reallocate header of &sk_buff
@@ -2621,7 +2670,7 @@
  *	a pointer to the first in a list of new skbs for the segments.
  *	In case of error it returns ERR_PTR(err).
  */
-struct sk_buff *skb_segment(struct sk_buff *skb, u32 features)
+struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
 {
 	struct sk_buff *segs = NULL;
 	struct sk_buff *tail = NULL;
@@ -3169,6 +3218,26 @@
 }
 EXPORT_SYMBOL_GPL(skb_tstamp_tx);
 
+void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
+{
+	struct sock *sk = skb->sk;
+	struct sock_exterr_skb *serr;
+	int err;
+
+	skb->wifi_acked_valid = 1;
+	skb->wifi_acked = acked;
+
+	serr = SKB_EXT_ERR(skb);
+	memset(serr, 0, sizeof(*serr));
+	serr->ee.ee_errno = ENOMSG;
+	serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS;
+
+	err = sock_queue_err_skb(sk, skb);
+	if (err)
+		kfree_skb(skb);
+}
+EXPORT_SYMBOL_GPL(skb_complete_wifi_ack);
+
 
 /**
  * skb_partial_csum_set - set up and verify partial csum values for packet
diff --git a/net/core/sock.c b/net/core/sock.c
index 4ed7b1d..002939c 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -111,6 +111,7 @@
 #include <linux/init.h>
 #include <linux/highmem.h>
 #include <linux/user_namespace.h>
+#include <linux/jump_label.h>
 
 #include <asm/uaccess.h>
 #include <asm/system.h>
@@ -125,6 +126,7 @@
 #include <net/xfrm.h>
 #include <linux/ipsec.h>
 #include <net/cls_cgroup.h>
+#include <net/netprio_cgroup.h>
 
 #include <linux/filter.h>
 
@@ -134,6 +136,46 @@
 #include <net/tcp.h>
 #endif
 
+static DEFINE_MUTEX(proto_list_mutex);
+static LIST_HEAD(proto_list);
+
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss)
+{
+	struct proto *proto;
+	int ret = 0;
+
+	mutex_lock(&proto_list_mutex);
+	list_for_each_entry(proto, &proto_list, node) {
+		if (proto->init_cgroup) {
+			ret = proto->init_cgroup(cgrp, ss);
+			if (ret)
+				goto out;
+		}
+	}
+
+	mutex_unlock(&proto_list_mutex);
+	return ret;
+out:
+	list_for_each_entry_continue_reverse(proto, &proto_list, node)
+		if (proto->destroy_cgroup)
+			proto->destroy_cgroup(cgrp, ss);
+	mutex_unlock(&proto_list_mutex);
+	return ret;
+}
+
+void mem_cgroup_sockets_destroy(struct cgroup *cgrp, struct cgroup_subsys *ss)
+{
+	struct proto *proto;
+
+	mutex_lock(&proto_list_mutex);
+	list_for_each_entry_reverse(proto, &proto_list, node)
+		if (proto->destroy_cgroup)
+			proto->destroy_cgroup(cgrp, ss);
+	mutex_unlock(&proto_list_mutex);
+}
+#endif
+
 /*
  * Each address family might have different locking rules, so we have
  * one slock key per address family:
@@ -141,6 +183,9 @@
 static struct lock_class_key af_family_keys[AF_MAX];
 static struct lock_class_key af_family_slock_keys[AF_MAX];
 
+struct jump_label_key memcg_socket_limit_enabled;
+EXPORT_SYMBOL(memcg_socket_limit_enabled);
+
 /*
  * Make lock validator output more readable. (we pre-construct these
  * strings build-time, so that runtime initialization of socket
@@ -221,10 +266,16 @@
 int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
 EXPORT_SYMBOL(sysctl_optmem_max);
 
-#if defined(CONFIG_CGROUPS) && !defined(CONFIG_NET_CLS_CGROUP)
+#if defined(CONFIG_CGROUPS)
+#if !defined(CONFIG_NET_CLS_CGROUP)
 int net_cls_subsys_id = -1;
 EXPORT_SYMBOL_GPL(net_cls_subsys_id);
 #endif
+#if !defined(CONFIG_NETPRIO_CGROUP)
+int net_prio_subsys_id = -1;
+EXPORT_SYMBOL_GPL(net_prio_subsys_id);
+#endif
+#endif
 
 static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
 {
@@ -269,14 +320,14 @@
 	}
 }
 
-static void sock_disable_timestamp(struct sock *sk, int flag)
+#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
+
+static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
 {
-	if (sock_flag(sk, flag)) {
-		sock_reset_flag(sk, flag);
-		if (!sock_flag(sk, SOCK_TIMESTAMP) &&
-		    !sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE)) {
+	if (sk->sk_flags & flags) {
+		sk->sk_flags &= ~flags;
+		if (!(sk->sk_flags & SK_FLAGS_TIMESTAMP))
 			net_disable_timestamp();
-		}
 	}
 }
 
@@ -288,11 +339,7 @@
 	unsigned long flags;
 	struct sk_buff_head *list = &sk->sk_receive_queue;
 
-	/* Cast sk->rcvbuf to unsigned... It's pointless, but reduces
-	   number of warnings when compiling with -W --ANK
-	 */
-	if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
-	    (unsigned)sk->sk_rcvbuf) {
+	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
 		atomic_inc(&sk->sk_drops);
 		trace_sock_rcvqueue_full(sk, skb);
 		return -ENOMEM;
@@ -682,7 +729,7 @@
 					      SOCK_TIMESTAMPING_RX_SOFTWARE);
 		else
 			sock_disable_timestamp(sk,
-					       SOCK_TIMESTAMPING_RX_SOFTWARE);
+					       (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
 		sock_valbool_flag(sk, SOCK_TIMESTAMPING_SOFTWARE,
 				  val & SOF_TIMESTAMPING_SOFTWARE);
 		sock_valbool_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE,
@@ -740,6 +787,11 @@
 	case SO_RXQ_OVFL:
 		sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
 		break;
+
+	case SO_WIFI_STATUS:
+		sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
+		break;
+
 	default:
 		ret = -ENOPROTOOPT;
 		break;
@@ -961,6 +1013,10 @@
 		v.val = !!sock_flag(sk, SOCK_RXQ_OVFL);
 		break;
 
+	case SO_WIFI_STATUS:
+		v.val = !!sock_flag(sk, SOCK_WIFI_STATUS);
+		break;
+
 	default:
 		return -ENOPROTOOPT;
 	}
@@ -1111,6 +1167,18 @@
 		sk->sk_classid = classid;
 }
 EXPORT_SYMBOL(sock_update_classid);
+
+void sock_update_netprioidx(struct sock *sk)
+{
+	struct cgroup_netprio_state *state;
+	if (in_interrupt())
+		return;
+	rcu_read_lock();
+	state = task_netprio_state(current);
+	sk->sk_cgrp_prioidx = state ? state->prioidx : 0;
+	rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(sock_update_netprioidx);
 #endif
 
 /**
@@ -1138,6 +1206,7 @@
 		atomic_set(&sk->sk_wmem_alloc, 1);
 
 		sock_update_classid(sk);
+		sock_update_netprioidx(sk);
 	}
 
 	return sk;
@@ -1158,8 +1227,7 @@
 		RCU_INIT_POINTER(sk->sk_filter, NULL);
 	}
 
-	sock_disable_timestamp(sk, SOCK_TIMESTAMP);
-	sock_disable_timestamp(sk, SOCK_TIMESTAMPING_RX_SOFTWARE);
+	sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
 
 	if (atomic_read(&sk->sk_omem_alloc))
 		printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n",
@@ -1204,7 +1272,14 @@
 }
 EXPORT_SYMBOL(sk_release_kernel);
 
-struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
+/**
+ *	sk_clone_lock - clone a socket, and lock its clone
+ *	@sk: the socket to clone
+ *	@priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
+ *
+ *	Caller must unlock socket even in error path (bh_unlock_sock(newsk))
+ */
+struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
 {
 	struct sock *newsk;
 
@@ -1288,16 +1363,15 @@
 		newsk->sk_wq = NULL;
 
 		if (newsk->sk_prot->sockets_allocated)
-			percpu_counter_inc(newsk->sk_prot->sockets_allocated);
+			sk_sockets_allocated_inc(newsk);
 
-		if (sock_flag(newsk, SOCK_TIMESTAMP) ||
-		    sock_flag(newsk, SOCK_TIMESTAMPING_RX_SOFTWARE))
+		if (newsk->sk_flags & SK_FLAGS_TIMESTAMP)
 			net_enable_timestamp();
 	}
 out:
 	return newsk;
 }
-EXPORT_SYMBOL_GPL(sk_clone);
+EXPORT_SYMBOL_GPL(sk_clone_lock);
 
 void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
 {
@@ -1677,30 +1751,34 @@
 	struct proto *prot = sk->sk_prot;
 	int amt = sk_mem_pages(size);
 	long allocated;
+	int parent_status = UNDER_LIMIT;
 
 	sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
-	allocated = atomic_long_add_return(amt, prot->memory_allocated);
+
+	allocated = sk_memory_allocated_add(sk, amt, &parent_status);
 
 	/* Under limit. */
-	if (allocated <= prot->sysctl_mem[0]) {
-		if (prot->memory_pressure && *prot->memory_pressure)
-			*prot->memory_pressure = 0;
+	if (parent_status == UNDER_LIMIT &&
+			allocated <= sk_prot_mem_limits(sk, 0)) {
+		sk_leave_memory_pressure(sk);
 		return 1;
 	}
 
-	/* Under pressure. */
-	if (allocated > prot->sysctl_mem[1])
-		if (prot->enter_memory_pressure)
-			prot->enter_memory_pressure(sk);
+	/* Under pressure. (we or our parents) */
+	if ((parent_status > SOFT_LIMIT) ||
+			allocated > sk_prot_mem_limits(sk, 1))
+		sk_enter_memory_pressure(sk);
 
-	/* Over hard limit. */
-	if (allocated > prot->sysctl_mem[2])
+	/* Over hard limit (we or our parents) */
+	if ((parent_status == OVER_LIMIT) ||
+			(allocated > sk_prot_mem_limits(sk, 2)))
 		goto suppress_allocation;
 
 	/* guarantee minimum buffer size under pressure */
 	if (kind == SK_MEM_RECV) {
 		if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
 			return 1;
+
 	} else { /* SK_MEM_SEND */
 		if (sk->sk_type == SOCK_STREAM) {
 			if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
@@ -1710,13 +1788,13 @@
 				return 1;
 	}
 
-	if (prot->memory_pressure) {
+	if (sk_has_memory_pressure(sk)) {
 		int alloc;
 
-		if (!*prot->memory_pressure)
+		if (!sk_under_memory_pressure(sk))
 			return 1;
-		alloc = percpu_counter_read_positive(prot->sockets_allocated);
-		if (prot->sysctl_mem[2] > alloc *
+		alloc = sk_sockets_allocated_read_positive(sk);
+		if (sk_prot_mem_limits(sk, 2) > alloc *
 		    sk_mem_pages(sk->sk_wmem_queued +
 				 atomic_read(&sk->sk_rmem_alloc) +
 				 sk->sk_forward_alloc))
@@ -1739,7 +1817,9 @@
 
 	/* Alas. Undo changes. */
 	sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
-	atomic_long_sub(amt, prot->memory_allocated);
+
+	sk_memory_allocated_sub(sk, amt, parent_status);
+
 	return 0;
 }
 EXPORT_SYMBOL(__sk_mem_schedule);
@@ -1750,15 +1830,13 @@
  */
 void __sk_mem_reclaim(struct sock *sk)
 {
-	struct proto *prot = sk->sk_prot;
-
-	atomic_long_sub(sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT,
-		   prot->memory_allocated);
+	sk_memory_allocated_sub(sk,
+				sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT, 0);
 	sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
 
-	if (prot->memory_pressure && *prot->memory_pressure &&
-	    (atomic_long_read(prot->memory_allocated) < prot->sysctl_mem[0]))
-		*prot->memory_pressure = 0;
+	if (sk_under_memory_pressure(sk) &&
+	    (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
+		sk_leave_memory_pressure(sk);
 }
 EXPORT_SYMBOL(__sk_mem_reclaim);
 
@@ -2129,16 +2207,15 @@
 void sock_enable_timestamp(struct sock *sk, int flag)
 {
 	if (!sock_flag(sk, flag)) {
+		unsigned long previous_flags = sk->sk_flags;
+
 		sock_set_flag(sk, flag);
 		/*
 		 * we just set one of the two flags which require net
 		 * time stamping, but time stamping might have been on
 		 * already because of the other one
 		 */
-		if (!sock_flag(sk,
-				flag == SOCK_TIMESTAMP ?
-				SOCK_TIMESTAMPING_RX_SOFTWARE :
-				SOCK_TIMESTAMP))
+		if (!(previous_flags & SK_FLAGS_TIMESTAMP))
 			net_enable_timestamp();
 	}
 }
@@ -2250,9 +2327,6 @@
 }
 EXPORT_SYMBOL(sk_common_release);
 
-static DEFINE_RWLOCK(proto_list_lock);
-static LIST_HEAD(proto_list);
-
 #ifdef CONFIG_PROC_FS
 #define PROTO_INUSE_NR	64	/* should be enough for the first time */
 struct prot_inuse {
@@ -2401,10 +2475,10 @@
 		}
 	}
 
-	write_lock(&proto_list_lock);
+	mutex_lock(&proto_list_mutex);
 	list_add(&prot->node, &proto_list);
 	assign_proto_idx(prot);
-	write_unlock(&proto_list_lock);
+	mutex_unlock(&proto_list_mutex);
 	return 0;
 
 out_free_timewait_sock_slab_name:
@@ -2427,10 +2501,10 @@
 
 void proto_unregister(struct proto *prot)
 {
-	write_lock(&proto_list_lock);
+	mutex_lock(&proto_list_mutex);
 	release_proto_idx(prot);
 	list_del(&prot->node);
-	write_unlock(&proto_list_lock);
+	mutex_unlock(&proto_list_mutex);
 
 	if (prot->slab != NULL) {
 		kmem_cache_destroy(prot->slab);
@@ -2453,9 +2527,9 @@
 
 #ifdef CONFIG_PROC_FS
 static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
-	__acquires(proto_list_lock)
+	__acquires(proto_list_mutex)
 {
-	read_lock(&proto_list_lock);
+	mutex_lock(&proto_list_mutex);
 	return seq_list_start_head(&proto_list, *pos);
 }
 
@@ -2465,25 +2539,36 @@
 }
 
 static void proto_seq_stop(struct seq_file *seq, void *v)
-	__releases(proto_list_lock)
+	__releases(proto_list_mutex)
 {
-	read_unlock(&proto_list_lock);
+	mutex_unlock(&proto_list_mutex);
 }
 
 static char proto_method_implemented(const void *method)
 {
 	return method == NULL ? 'n' : 'y';
 }
+static long sock_prot_memory_allocated(struct proto *proto)
+{
+	return proto->memory_allocated != NULL ? proto_memory_allocated(proto): -1L;
+}
+
+static char *sock_prot_memory_pressure(struct proto *proto)
+{
+	return proto->memory_pressure != NULL ?
+	proto_memory_pressure(proto) ? "yes" : "no" : "NI";
+}
 
 static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
 {
+
 	seq_printf(seq, "%-9s %4u %6d  %6ld   %-3s %6u   %-3s  %-10s "
 			"%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
 		   proto->name,
 		   proto->obj_size,
 		   sock_prot_inuse_get(seq_file_net(seq), proto),
-		   proto->memory_allocated != NULL ? atomic_long_read(proto->memory_allocated) : -1L,
-		   proto->memory_pressure != NULL ? *proto->memory_pressure ? "yes" : "no" : "NI",
+		   sock_prot_memory_allocated(proto),
+		   sock_prot_memory_pressure(proto),
 		   proto->max_header,
 		   proto->slab == NULL ? "no" : "yes",
 		   module_name(proto->owner),
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
new file mode 100644
index 0000000..b9868e1
--- /dev/null
+++ b/net/core/sock_diag.c
@@ -0,0 +1,192 @@
+#include <linux/mutex.h>
+#include <linux/socket.h>
+#include <linux/skbuff.h>
+#include <net/netlink.h>
+#include <net/net_namespace.h>
+#include <linux/module.h>
+#include <linux/rtnetlink.h>
+#include <net/sock.h>
+
+#include <linux/inet_diag.h>
+#include <linux/sock_diag.h>
+
+static struct sock_diag_handler *sock_diag_handlers[AF_MAX];
+static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
+static DEFINE_MUTEX(sock_diag_table_mutex);
+
+int sock_diag_check_cookie(void *sk, __u32 *cookie)
+{
+	if ((cookie[0] != INET_DIAG_NOCOOKIE ||
+	     cookie[1] != INET_DIAG_NOCOOKIE) &&
+	    ((u32)(unsigned long)sk != cookie[0] ||
+	     (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
+		return -ESTALE;
+	else
+		return 0;
+}
+EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
+
+void sock_diag_save_cookie(void *sk, __u32 *cookie)
+{
+	cookie[0] = (u32)(unsigned long)sk;
+	cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
+}
+EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
+
+int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attrtype)
+{
+	__u32 *mem;
+
+	mem = RTA_DATA(__RTA_PUT(skb, attrtype, SK_MEMINFO_VARS * sizeof(__u32)));
+
+	mem[SK_MEMINFO_RMEM_ALLOC] = sk_rmem_alloc_get(sk);
+	mem[SK_MEMINFO_RCVBUF] = sk->sk_rcvbuf;
+	mem[SK_MEMINFO_WMEM_ALLOC] = sk_wmem_alloc_get(sk);
+	mem[SK_MEMINFO_SNDBUF] = sk->sk_sndbuf;
+	mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc;
+	mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued;
+	mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
+
+	return 0;
+
+rtattr_failure:
+	return -EMSGSIZE;
+}
+EXPORT_SYMBOL_GPL(sock_diag_put_meminfo);
+
+void sock_diag_register_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh))
+{
+	mutex_lock(&sock_diag_table_mutex);
+	inet_rcv_compat = fn;
+	mutex_unlock(&sock_diag_table_mutex);
+}
+EXPORT_SYMBOL_GPL(sock_diag_register_inet_compat);
+
+void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh))
+{
+	mutex_lock(&sock_diag_table_mutex);
+	inet_rcv_compat = NULL;
+	mutex_unlock(&sock_diag_table_mutex);
+}
+EXPORT_SYMBOL_GPL(sock_diag_unregister_inet_compat);
+
+int sock_diag_register(struct sock_diag_handler *hndl)
+{
+	int err = 0;
+
+	if (hndl->family >= AF_MAX)
+		return -EINVAL;
+
+	mutex_lock(&sock_diag_table_mutex);
+	if (sock_diag_handlers[hndl->family])
+		err = -EBUSY;
+	else
+		sock_diag_handlers[hndl->family] = hndl;
+	mutex_unlock(&sock_diag_table_mutex);
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(sock_diag_register);
+
+void sock_diag_unregister(struct sock_diag_handler *hnld)
+{
+	int family = hnld->family;
+
+	if (family >= AF_MAX)
+		return;
+
+	mutex_lock(&sock_diag_table_mutex);
+	BUG_ON(sock_diag_handlers[family] != hnld);
+	sock_diag_handlers[family] = NULL;
+	mutex_unlock(&sock_diag_table_mutex);
+}
+EXPORT_SYMBOL_GPL(sock_diag_unregister);
+
+static inline struct sock_diag_handler *sock_diag_lock_handler(int family)
+{
+	if (sock_diag_handlers[family] == NULL)
+		request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
+				NETLINK_SOCK_DIAG, family);
+
+	mutex_lock(&sock_diag_table_mutex);
+	return sock_diag_handlers[family];
+}
+
+static inline void sock_diag_unlock_handler(struct sock_diag_handler *h)
+{
+	mutex_unlock(&sock_diag_table_mutex);
+}
+
+static int __sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+{
+	int err;
+	struct sock_diag_req *req = NLMSG_DATA(nlh);
+	struct sock_diag_handler *hndl;
+
+	if (nlmsg_len(nlh) < sizeof(*req))
+		return -EINVAL;
+
+	hndl = sock_diag_lock_handler(req->sdiag_family);
+	if (hndl == NULL)
+		err = -ENOENT;
+	else
+		err = hndl->dump(skb, nlh);
+	sock_diag_unlock_handler(hndl);
+
+	return err;
+}
+
+static int sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+{
+	int ret;
+
+	switch (nlh->nlmsg_type) {
+	case TCPDIAG_GETSOCK:
+	case DCCPDIAG_GETSOCK:
+		if (inet_rcv_compat == NULL)
+			request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
+					NETLINK_SOCK_DIAG, AF_INET);
+
+		mutex_lock(&sock_diag_table_mutex);
+		if (inet_rcv_compat != NULL)
+			ret = inet_rcv_compat(skb, nlh);
+		else
+			ret = -EOPNOTSUPP;
+		mutex_unlock(&sock_diag_table_mutex);
+
+		return ret;
+	case SOCK_DIAG_BY_FAMILY:
+		return __sock_diag_rcv_msg(skb, nlh);
+	default:
+		return -EINVAL;
+	}
+}
+
+static DEFINE_MUTEX(sock_diag_mutex);
+
+static void sock_diag_rcv(struct sk_buff *skb)
+{
+	mutex_lock(&sock_diag_mutex);
+	netlink_rcv_skb(skb, &sock_diag_rcv_msg);
+	mutex_unlock(&sock_diag_mutex);
+}
+
+struct sock *sock_diag_nlsk;
+EXPORT_SYMBOL_GPL(sock_diag_nlsk);
+
+static int __init sock_diag_init(void)
+{
+	sock_diag_nlsk = netlink_kernel_create(&init_net, NETLINK_SOCK_DIAG, 0,
+					sock_diag_rcv, NULL, THIS_MODULE);
+	return sock_diag_nlsk == NULL ? -ENOMEM : 0;
+}
+
+static void __exit sock_diag_exit(void)
+{
+	netlink_kernel_release(sock_diag_nlsk);
+}
+
+module_init(sock_diag_init);
+module_exit(sock_diag_exit);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_SOCK_DIAG);
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 77a65f0..d05559d 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -68,8 +68,13 @@
 
 		if (sock_table != orig_sock_table) {
 			rcu_assign_pointer(rps_sock_flow_table, sock_table);
-			synchronize_rcu();
-			vfree(orig_sock_table);
+			if (sock_table)
+				jump_label_inc(&rps_needed);
+			if (orig_sock_table) {
+				jump_label_dec(&rps_needed);
+				synchronize_rcu();
+				vfree(orig_sock_table);
+			}
 		}
 	}
 
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index 67164bb..f053198 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -29,7 +29,7 @@
 
 
 #ifdef CONFIG_IP_DCCP_CCID2_DEBUG
-static int ccid2_debug;
+static bool ccid2_debug;
 #define ccid2_pr_debug(format, a...)	DCCP_PR_DEBUG(ccid2_debug, format, ##a)
 #else
 #define ccid2_pr_debug(format, a...)
@@ -174,7 +174,7 @@
 /*
  *	Congestion window validation (RFC 2861).
  */
-static int ccid2_do_cwv = 1;
+static bool ccid2_do_cwv = true;
 module_param(ccid2_do_cwv, bool, 0644);
 MODULE_PARM_DESC(ccid2_do_cwv, "Perform RFC2861 Congestion Window Validation");
 
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index 3d604e1..5606273 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -38,7 +38,7 @@
 #include <asm/unaligned.h>
 
 #ifdef CONFIG_IP_DCCP_CCID3_DEBUG
-static int ccid3_debug;
+static bool ccid3_debug;
 #define ccid3_pr_debug(format, a...)	DCCP_PR_DEBUG(ccid3_debug, format, ##a)
 #else
 #define ccid3_pr_debug(format, a...)
diff --git a/net/dccp/ccids/lib/tfrc.c b/net/dccp/ccids/lib/tfrc.c
index 1f94b7e..62b5828 100644
--- a/net/dccp/ccids/lib/tfrc.c
+++ b/net/dccp/ccids/lib/tfrc.c
@@ -8,7 +8,7 @@
 #include "tfrc.h"
 
 #ifdef CONFIG_IP_DCCP_TFRC_DEBUG
-int tfrc_debug;
+bool tfrc_debug;
 module_param(tfrc_debug, bool, 0644);
 MODULE_PARM_DESC(tfrc_debug, "Enable TFRC debug messages");
 #endif
diff --git a/net/dccp/ccids/lib/tfrc.h b/net/dccp/ccids/lib/tfrc.h
index f8ee3f5..ed698c4 100644
--- a/net/dccp/ccids/lib/tfrc.h
+++ b/net/dccp/ccids/lib/tfrc.h
@@ -21,7 +21,7 @@
 #include "packet_history.h"
 
 #ifdef CONFIG_IP_DCCP_TFRC_DEBUG
-extern int tfrc_debug;
+extern bool tfrc_debug;
 #define tfrc_pr_debug(format, a...)	DCCP_PR_DEBUG(tfrc_debug, format, ##a)
 #else
 #define tfrc_pr_debug(format, a...)
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index 583490a..29d6bb6 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -39,7 +39,7 @@
 						  "%s: " fmt, __func__, ##a)
 
 #ifdef CONFIG_IP_DCCP_DEBUG
-extern int dccp_debug;
+extern bool dccp_debug;
 #define dccp_pr_debug(format, a...)	  DCCP_PR_DEBUG(dccp_debug, format, ##a)
 #define dccp_pr_debug_cat(format, a...)   DCCP_PRINTK(dccp_debug, format, ##a)
 #define dccp_debug(fmt, a...)		  dccp_pr_debug_cat(KERN_DEBUG fmt, ##a)
@@ -357,7 +357,7 @@
 struct dccp_skb_cb {
 	union {
 		struct inet_skb_parm	h4;
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 		struct inet6_skb_parm	h6;
 #endif
 	} header;
diff --git a/net/dccp/diag.c b/net/dccp/diag.c
index b21f261..8f16257 100644
--- a/net/dccp/diag.c
+++ b/net/dccp/diag.c
@@ -48,11 +48,23 @@
 		dccp_get_info(sk, _info);
 }
 
+static void dccp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
+		struct inet_diag_req *r, struct nlattr *bc)
+{
+	inet_diag_dump_icsk(&dccp_hashinfo, skb, cb, r, bc);
+}
+
+static int dccp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
+		struct inet_diag_req *req)
+{
+	return inet_diag_dump_one_icsk(&dccp_hashinfo, in_skb, nlh, req);
+}
+
 static const struct inet_diag_handler dccp_diag_handler = {
-	.idiag_hashinfo	 = &dccp_hashinfo,
+	.dump		 = dccp_diag_dump,
+	.dump_one	 = dccp_diag_dump_one,
 	.idiag_get_info	 = dccp_diag_get_info,
-	.idiag_type	 = DCCPDIAG_GETSOCK,
-	.idiag_info_size = sizeof(struct tcp_info),
+	.idiag_type	 = IPPROTO_DCCP,
 };
 
 static int __init dccp_diag_init(void)
@@ -71,4 +83,4 @@
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>");
 MODULE_DESCRIPTION("DCCP inet_diag handler");
-MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_INET_DIAG, DCCPDIAG_GETSOCK);
+MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-33 /* AF_INET - IPPROTO_DCCP */);
diff --git a/net/dccp/feat.c b/net/dccp/feat.c
index 23cea0e..78a2ad7 100644
--- a/net/dccp/feat.c
+++ b/net/dccp/feat.c
@@ -490,8 +490,8 @@
 	new->feat_num	     = feat;
 	new->is_local	     = local;
 	new->state	     = FEAT_INITIALISING;
-	new->needs_confirm   = 0;
-	new->empty_confirm   = 0;
+	new->needs_confirm   = false;
+	new->empty_confirm   = false;
 	new->val	     = *fval;
 	new->needs_mandatory = mandatory;
 
@@ -517,12 +517,12 @@
 	new->feat_num	     = feat;
 	new->is_local	     = local;
 	new->state	     = FEAT_STABLE;	/* transition in 6.6.2 */
-	new->needs_confirm   = 1;
+	new->needs_confirm   = true;
 	new->empty_confirm   = (fval == NULL);
 	new->val.nn	     = 0;		/* zeroes the whole structure */
 	if (!new->empty_confirm)
 		new->val     = *fval;
-	new->needs_mandatory = 0;
+	new->needs_mandatory = false;
 
 	return 0;
 }
@@ -1155,7 +1155,7 @@
 	}
 
 	if (dccp_feat_reconcile(&entry->val, val, len, server, true)) {
-		entry->empty_confirm = 0;
+		entry->empty_confirm = false;
 	} else if (is_mandatory) {
 		return DCCP_RESET_CODE_MANDATORY_ERROR;
 	} else if (entry->state == FEAT_INITIALISING) {
@@ -1171,10 +1171,10 @@
 		defval = dccp_feat_default_value(feat);
 		if (!dccp_feat_reconcile(&entry->val, &defval, 1, server, true))
 			return DCCP_RESET_CODE_OPTION_ERROR;
-		entry->empty_confirm = 1;
+		entry->empty_confirm = true;
 	}
-	entry->needs_confirm   = 1;
-	entry->needs_mandatory = 0;
+	entry->needs_confirm   = true;
+	entry->needs_mandatory = false;
 	entry->state	       = FEAT_STABLE;
 	return 0;
 
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 3f4e541..1c67fe8 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -474,10 +474,11 @@
 					   struct sk_buff *skb)
 {
 	struct rtable *rt;
+	const struct iphdr *iph = ip_hdr(skb);
 	struct flowi4 fl4 = {
 		.flowi4_oif = skb_rtable(skb)->rt_iif,
-		.daddr = ip_hdr(skb)->saddr,
-		.saddr = ip_hdr(skb)->daddr,
+		.daddr = iph->saddr,
+		.saddr = iph->daddr,
 		.flowi4_tos = RT_CONN_FLAGS(sk),
 		.flowi4_proto = sk->sk_protocol,
 		.fl4_sport = dccp_hdr(skb)->dccph_dport,
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 17ee85c..ce903f7 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -150,8 +150,8 @@
 			 */
 			memset(&fl6, 0, sizeof(fl6));
 			fl6.flowi6_proto = IPPROTO_DCCP;
-			ipv6_addr_copy(&fl6.daddr, &np->daddr);
-			ipv6_addr_copy(&fl6.saddr, &np->saddr);
+			fl6.daddr = np->daddr;
+			fl6.saddr = np->saddr;
 			fl6.flowi6_oif = sk->sk_bound_dev_if;
 			fl6.fl6_dport = inet->inet_dport;
 			fl6.fl6_sport = inet->inet_sport;
@@ -244,8 +244,8 @@
 
 	memset(&fl6, 0, sizeof(fl6));
 	fl6.flowi6_proto = IPPROTO_DCCP;
-	ipv6_addr_copy(&fl6.daddr, &ireq6->rmt_addr);
-	ipv6_addr_copy(&fl6.saddr, &ireq6->loc_addr);
+	fl6.daddr = ireq6->rmt_addr;
+	fl6.saddr = ireq6->loc_addr;
 	fl6.flowlabel = 0;
 	fl6.flowi6_oif = ireq6->iif;
 	fl6.fl6_dport = inet_rsk(req)->rmt_port;
@@ -270,7 +270,7 @@
 		dh->dccph_checksum = dccp_v6_csum_finish(skb,
 							 &ireq6->loc_addr,
 							 &ireq6->rmt_addr);
-		ipv6_addr_copy(&fl6.daddr, &ireq6->rmt_addr);
+		fl6.daddr = ireq6->rmt_addr;
 		err = ip6_xmit(sk, skb, &fl6, opt, np->tclass);
 		err = net_xmit_eval(err);
 	}
@@ -313,8 +313,8 @@
 							    &rxip6h->daddr);
 
 	memset(&fl6, 0, sizeof(fl6));
-	ipv6_addr_copy(&fl6.daddr, &rxip6h->saddr);
-	ipv6_addr_copy(&fl6.saddr, &rxip6h->daddr);
+	fl6.daddr = rxip6h->saddr;
+	fl6.saddr = rxip6h->daddr;
 
 	fl6.flowi6_proto = IPPROTO_DCCP;
 	fl6.flowi6_oif = inet6_iif(rxskb);
@@ -419,8 +419,8 @@
 		goto drop_and_free;
 
 	ireq6 = inet6_rsk(req);
-	ipv6_addr_copy(&ireq6->rmt_addr, &ipv6_hdr(skb)->saddr);
-	ipv6_addr_copy(&ireq6->loc_addr, &ipv6_hdr(skb)->daddr);
+	ireq6->rmt_addr = ipv6_hdr(skb)->saddr;
+	ireq6->loc_addr = ipv6_hdr(skb)->daddr;
 
 	if (ipv6_opt_accepted(sk, skb) ||
 	    np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
@@ -491,7 +491,7 @@
 
 		ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
 
-		ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
+		newnp->rcv_saddr = newnp->saddr;
 
 		inet_csk(newsk)->icsk_af_ops = &dccp_ipv6_mapped;
 		newsk->sk_backlog_rcv = dccp_v4_do_rcv;
@@ -526,9 +526,9 @@
 
 		memset(&fl6, 0, sizeof(fl6));
 		fl6.flowi6_proto = IPPROTO_DCCP;
-		ipv6_addr_copy(&fl6.daddr, &ireq6->rmt_addr);
+		fl6.daddr = ireq6->rmt_addr;
 		final_p = fl6_update_dst(&fl6, opt, &final);
-		ipv6_addr_copy(&fl6.saddr, &ireq6->loc_addr);
+		fl6.saddr = ireq6->loc_addr;
 		fl6.flowi6_oif = sk->sk_bound_dev_if;
 		fl6.fl6_dport = inet_rsk(req)->rmt_port;
 		fl6.fl6_sport = inet_rsk(req)->loc_port;
@@ -559,9 +559,9 @@
 
 	memcpy(newnp, np, sizeof(struct ipv6_pinfo));
 
-	ipv6_addr_copy(&newnp->daddr, &ireq6->rmt_addr);
-	ipv6_addr_copy(&newnp->saddr, &ireq6->loc_addr);
-	ipv6_addr_copy(&newnp->rcv_saddr, &ireq6->loc_addr);
+	newnp->daddr = ireq6->rmt_addr;
+	newnp->saddr = ireq6->loc_addr;
+	newnp->rcv_saddr = ireq6->loc_addr;
 	newsk->sk_bound_dev_if = ireq6->iif;
 
 	/* Now IPv6 options...
@@ -877,7 +877,7 @@
 			flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
 			if (flowlabel == NULL)
 				return -EINVAL;
-			ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
+			usin->sin6_addr = flowlabel->dst;
 			fl6_sock_release(flowlabel);
 		}
 	}
@@ -910,7 +910,7 @@
 			return -EINVAL;
 	}
 
-	ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
+	np->daddr = usin->sin6_addr;
 	np->flow_label = fl6.flowlabel;
 
 	/*
@@ -949,8 +949,8 @@
 		saddr = &np->rcv_saddr;
 
 	fl6.flowi6_proto = IPPROTO_DCCP;
-	ipv6_addr_copy(&fl6.daddr, &np->daddr);
-	ipv6_addr_copy(&fl6.saddr, saddr ? saddr : &np->saddr);
+	fl6.daddr = np->daddr;
+	fl6.saddr = saddr ? *saddr : np->saddr;
 	fl6.flowi6_oif = sk->sk_bound_dev_if;
 	fl6.fl6_dport = usin->sin6_port;
 	fl6.fl6_sport = inet->inet_sport;
@@ -966,11 +966,11 @@
 
 	if (saddr == NULL) {
 		saddr = &fl6.saddr;
-		ipv6_addr_copy(&np->rcv_saddr, saddr);
+		np->rcv_saddr = *saddr;
 	}
 
 	/* set the source address */
-	ipv6_addr_copy(&np->saddr, saddr);
+	np->saddr = *saddr;
 	inet->inet_rcv_saddr = LOOPBACK4_IPV6;
 
 	__ip6_dst_store(sk, dst, NULL, NULL);
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index d7041a0..5a7f90b 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -53,15 +53,15 @@
 	if (tw != NULL) {
 		const struct inet_connection_sock *icsk = inet_csk(sk);
 		const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 		if (tw->tw_family == PF_INET6) {
 			const struct ipv6_pinfo *np = inet6_sk(sk);
 			struct inet6_timewait_sock *tw6;
 
 			tw->tw_ipv6_offset = inet6_tw_offset(sk->sk_prot);
 			tw6 = inet6_twsk((struct sock *)tw);
-			ipv6_addr_copy(&tw6->tw_v6_daddr, &np->daddr);
-			ipv6_addr_copy(&tw6->tw_v6_rcv_saddr, &np->rcv_saddr);
+			tw6->tw_v6_daddr = np->daddr;
+			tw6->tw_v6_rcv_saddr = np->rcv_saddr;
 			tw->tw_ipv6only = np->ipv6only;
 		}
 #endif
@@ -100,7 +100,7 @@
 	 *   (* Generate a new socket and switch to that socket *)
 	 *   Set S := new socket for this port pair
 	 */
-	struct sock *newsk = inet_csk_clone(sk, req, GFP_ATOMIC);
+	struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
 
 	if (newsk != NULL) {
 		struct dccp_request_sock *dreq = dccp_rsk(req);
diff --git a/net/dccp/options.c b/net/dccp/options.c
index 4b2ab65..68fa6b7 100644
--- a/net/dccp/options.c
+++ b/net/dccp/options.c
@@ -544,7 +544,7 @@
 	}
 
 	if (unlikely(val == NULL || len == 0))
-		len = repeat_first = 0;
+		len = repeat_first = false;
 	tot_len = 3 + repeat_first + len;
 
 	if (DCCP_SKB_CB(skb)->dccpd_opt_len + tot_len > DCCP_MAX_OPT_LEN) {
diff --git a/net/dccp/probe.c b/net/dccp/probe.c
index 33d0e62..0a8d6eb 100644
--- a/net/dccp/probe.c
+++ b/net/dccp/probe.c
@@ -152,6 +152,17 @@
 	.llseek  = noop_llseek,
 };
 
+static __init int setup_jprobe(void)
+{
+	int ret = register_jprobe(&dccp_send_probe);
+
+	if (ret) {
+		request_module("dccp");
+		ret = register_jprobe(&dccp_send_probe);
+	}
+	return ret;
+}
+
 static __init int dccpprobe_init(void)
 {
 	int ret = -ENOMEM;
@@ -163,8 +174,7 @@
 	if (!proc_net_fops_create(&init_net, procname, S_IRUSR, &dccpprobe_fops))
 		goto err0;
 
-	try_then_request_module((ret = register_jprobe(&dccp_send_probe)) == 0,
-				"dccp");
+	ret = setup_jprobe();
 	if (ret)
 		goto err1;
 
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index e742f90..7065c0a 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -1099,7 +1099,7 @@
 MODULE_PARM_DESC(thash_entries, "Number of ehash buckets");
 
 #ifdef CONFIG_IP_DCCP_DEBUG
-int dccp_debug;
+bool dccp_debug;
 module_param(dccp_debug, bool, 0644);
 MODULE_PARM_DESC(dccp_debug, "Enable debug messages");
 
diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c
index 7f0eb08..befe426 100644
--- a/net/decnet/dn_neigh.c
+++ b/net/decnet/dn_neigh.c
@@ -88,9 +88,9 @@
 
 static u32 dn_neigh_hash(const void *pkey,
 			 const struct net_device *dev,
-			 __u32 hash_rnd)
+			 __u32 *hash_rnd)
 {
-	return jhash_2words(*(__u16 *)pkey, 0, hash_rnd);
+	return jhash_2words(*(__u16 *)pkey, 0, hash_rnd[0]);
 }
 
 struct neigh_table dn_neigh_table = {
@@ -107,7 +107,7 @@
 		.gc_staletime =	60 * HZ,
 		.reachable_time =		30 * HZ,
 		.delay_probe_time =	5 * HZ,
-		.queue_len =		3,
+		.queue_len_bytes =	64*1024,
 		.ucast_probes =	0,
 		.app_probes =		0,
 		.mcast_probes =	0,
@@ -202,7 +202,7 @@
 {
 	struct dst_entry *dst = skb_dst(skb);
 	struct dn_route *rt = (struct dn_route *)dst;
-	struct neighbour *neigh = dst_get_neighbour(dst);
+	struct neighbour *neigh = dst_get_neighbour_noref(dst);
 	struct net_device *dev = neigh->dev;
 	char mac_addr[ETH_ALEN];
 
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 94f4ec0..f31ce72 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -244,7 +244,7 @@
  */
 static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu)
 {
-	struct neighbour *n = dst_get_neighbour(dst);
+	struct neighbour *n = dst_get_neighbour_noref(dst);
 	u32 min_mtu = 230;
 	struct dn_dev *dn;
 
@@ -713,7 +713,7 @@
 static int dn_to_neigh_output(struct sk_buff *skb)
 {
 	struct dst_entry *dst = skb_dst(skb);
-	struct neighbour *n = dst_get_neighbour(dst);
+	struct neighbour *n = dst_get_neighbour_noref(dst);
 
 	return n->output(n, skb);
 }
@@ -728,7 +728,7 @@
 
 	int err = -EINVAL;
 
-	if ((neigh = dst_get_neighbour(dst)) == NULL)
+	if ((neigh = dst_get_neighbour_noref(dst)) == NULL)
 		goto error;
 
 	skb->dev = dev;
@@ -852,7 +852,7 @@
 	}
 	rt->rt_type = res->type;
 
-	if (dev != NULL && dst_get_neighbour(&rt->dst) == NULL) {
+	if (dev != NULL && dst_get_neighbour_noref(&rt->dst) == NULL) {
 		n = __neigh_lookup_errno(&dn_neigh_table, &rt->rt_gateway, dev);
 		if (IS_ERR(n))
 			return PTR_ERR(n);
diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig
index c53ded2..274791c 100644
--- a/net/dsa/Kconfig
+++ b/net/dsa/Kconfig
@@ -1,5 +1,5 @@
-menuconfig NET_DSA
-	bool "Distributed Switch Architecture support"
+config NET_DSA
+	tristate "Distributed Switch Architecture support"
 	default n
 	depends on EXPERIMENTAL && NETDEVICES && !S390
 	select PHYLIB
@@ -23,38 +23,4 @@
 	bool
 	default n
 
-
-# switch drivers
-config NET_DSA_MV88E6XXX
-	bool
-	default n
-
-config NET_DSA_MV88E6060
-	bool "Marvell 88E6060 ethernet switch chip support"
-	select NET_DSA_TAG_TRAILER
-	---help---
-	  This enables support for the Marvell 88E6060 ethernet switch
-	  chip.
-
-config NET_DSA_MV88E6XXX_NEED_PPU
-	bool
-	default n
-
-config NET_DSA_MV88E6131
-	bool "Marvell 88E6085/6095/6095F/6131 ethernet switch chip support"
-	select NET_DSA_MV88E6XXX
-	select NET_DSA_MV88E6XXX_NEED_PPU
-	select NET_DSA_TAG_DSA
-	---help---
-	  This enables support for the Marvell 88E6085/6095/6095F/6131
-	  ethernet switch chips.
-
-config NET_DSA_MV88E6123_61_65
-	bool "Marvell 88E6123/6161/6165 ethernet switch chip support"
-	select NET_DSA_MV88E6XXX
-	select NET_DSA_TAG_EDSA
-	---help---
-	  This enables support for the Marvell 88E6123/6161/6165
-	  ethernet switch chips.
-
 endif
diff --git a/net/dsa/Makefile b/net/dsa/Makefile
index 2374faf..7b9fcbb 100644
--- a/net/dsa/Makefile
+++ b/net/dsa/Makefile
@@ -1,13 +1,8 @@
-# tagging formats
-obj-$(CONFIG_NET_DSA_TAG_DSA) += tag_dsa.o
-obj-$(CONFIG_NET_DSA_TAG_EDSA) += tag_edsa.o
-obj-$(CONFIG_NET_DSA_TAG_TRAILER) += tag_trailer.o
-
-# switch drivers
-obj-$(CONFIG_NET_DSA_MV88E6XXX) += mv88e6xxx.o
-obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o
-obj-$(CONFIG_NET_DSA_MV88E6123_61_65) += mv88e6123_61_65.o
-obj-$(CONFIG_NET_DSA_MV88E6131) += mv88e6131.o
-
 # the core
-obj-$(CONFIG_NET_DSA) += dsa.o slave.o
+obj-$(CONFIG_NET_DSA) += dsa_core.o
+dsa_core-y += dsa.o slave.o
+
+# tagging formats
+dsa_core-$(CONFIG_NET_DSA_TAG_DSA) += tag_dsa.o
+dsa_core-$(CONFIG_NET_DSA_TAG_EDSA) += tag_edsa.o
+dsa_core-$(CONFIG_NET_DSA_TAG_TRAILER) += tag_trailer.o
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 0dc1589..88e7c2f 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -29,6 +29,7 @@
 	list_add_tail(&drv->list, &dsa_switch_drivers);
 	mutex_unlock(&dsa_switch_drivers_mutex);
 }
+EXPORT_SYMBOL_GPL(register_switch_driver);
 
 void unregister_switch_driver(struct dsa_switch_driver *drv)
 {
@@ -36,6 +37,7 @@
 	list_del_init(&drv->list);
 	mutex_unlock(&dsa_switch_drivers_mutex);
 }
+EXPORT_SYMBOL_GPL(unregister_switch_driver);
 
 static struct dsa_switch_driver *
 dsa_switch_probe(struct mii_bus *bus, int sw_addr, char **_name)
@@ -199,29 +201,6 @@
 }
 
 
-/* hooks for ethertype-less tagging formats *********************************/
-/*
- * The original DSA tag format and some other tag formats have no
- * ethertype, which means that we need to add a little hack to the
- * networking receive path to make sure that received frames get
- * the right ->protocol assigned to them when one of those tag
- * formats is in use.
- */
-bool dsa_uses_dsa_tags(void *dsa_ptr)
-{
-	struct dsa_switch_tree *dst = dsa_ptr;
-
-	return !!(dst->tag_protocol == htons(ETH_P_DSA));
-}
-
-bool dsa_uses_trailer_tags(void *dsa_ptr)
-{
-	struct dsa_switch_tree *dst = dsa_ptr;
-
-	return !!(dst->tag_protocol == htons(ETH_P_TRAILER));
-}
-
-
 /* link polling *************************************************************/
 static void dsa_link_poll_work(struct work_struct *ugly)
 {
@@ -419,12 +398,36 @@
 
 static int __init dsa_init_module(void)
 {
-	return platform_driver_register(&dsa_driver);
+	int rc;
+
+	rc = platform_driver_register(&dsa_driver);
+	if (rc)
+		return rc;
+
+#ifdef CONFIG_NET_DSA_TAG_DSA
+	dev_add_pack(&dsa_packet_type);
+#endif
+#ifdef CONFIG_NET_DSA_TAG_EDSA
+	dev_add_pack(&edsa_packet_type);
+#endif
+#ifdef CONFIG_NET_DSA_TAG_TRAILER
+	dev_add_pack(&trailer_packet_type);
+#endif
+	return 0;
 }
 module_init(dsa_init_module);
 
 static void __exit dsa_cleanup_module(void)
 {
+#ifdef CONFIG_NET_DSA_TAG_TRAILER
+	dev_remove_pack(&trailer_packet_type);
+#endif
+#ifdef CONFIG_NET_DSA_TAG_EDSA
+	dev_remove_pack(&edsa_packet_type);
+#endif
+#ifdef CONFIG_NET_DSA_TAG_DSA
+	dev_remove_pack(&dsa_packet_type);
+#endif
 	platform_driver_unregister(&dsa_driver);
 }
 module_exit(dsa_cleanup_module);
diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h
index 4b0ea05..d4cf5cc 100644
--- a/net/dsa/dsa_priv.h
+++ b/net/dsa/dsa_priv.h
@@ -11,97 +11,9 @@
 #ifndef __DSA_PRIV_H
 #define __DSA_PRIV_H
 
-#include <linux/list.h>
 #include <linux/phy.h>
-#include <linux/timer.h>
-#include <linux/workqueue.h>
 #include <net/dsa.h>
 
-struct dsa_switch {
-	/*
-	 * Parent switch tree, and switch index.
-	 */
-	struct dsa_switch_tree	*dst;
-	int			index;
-
-	/*
-	 * Configuration data for this switch.
-	 */
-	struct dsa_chip_data	*pd;
-
-	/*
-	 * The used switch driver.
-	 */
-	struct dsa_switch_driver	*drv;
-
-	/*
-	 * Reference to mii bus to use.
-	 */
-	struct mii_bus		*master_mii_bus;
-
-	/*
-	 * Slave mii_bus and devices for the individual ports.
-	 */
-	u32			dsa_port_mask;
-	u32			phys_port_mask;
-	struct mii_bus		*slave_mii_bus;
-	struct net_device	*ports[DSA_MAX_PORTS];
-};
-
-struct dsa_switch_tree {
-	/*
-	 * Configuration data for the platform device that owns
-	 * this dsa switch tree instance.
-	 */
-	struct dsa_platform_data	*pd;
-
-	/*
-	 * Reference to network device to use, and which tagging
-	 * protocol to use.
-	 */
-	struct net_device	*master_netdev;
-	__be16			tag_protocol;
-
-	/*
-	 * The switch and port to which the CPU is attached.
-	 */
-	s8			cpu_switch;
-	s8			cpu_port;
-
-	/*
-	 * Link state polling.
-	 */
-	int			link_poll_needed;
-	struct work_struct	link_poll_work;
-	struct timer_list	link_poll_timer;
-
-	/*
-	 * Data for the individual switch chips.
-	 */
-	struct dsa_switch	*ds[DSA_MAX_SWITCHES];
-};
-
-static inline bool dsa_is_cpu_port(struct dsa_switch *ds, int p)
-{
-	return !!(ds->index == ds->dst->cpu_switch && p == ds->dst->cpu_port);
-}
-
-static inline u8 dsa_upstream_port(struct dsa_switch *ds)
-{
-	struct dsa_switch_tree *dst = ds->dst;
-
-	/*
-	 * If this is the root switch (i.e. the switch that connects
-	 * to the CPU), return the cpu port number on this switch.
-	 * Else return the (DSA) port number that connects to the
-	 * switch that is one hop closer to the cpu.
-	 */
-	if (dst->cpu_switch == ds->index)
-		return dst->cpu_port;
-	else
-		return ds->pd->rtable[dst->cpu_switch];
-}
-
 struct dsa_slave_priv {
 	/*
 	 * The linux network interface corresponding to this
@@ -123,44 +35,8 @@
 	struct phy_device	*phy;
 };
 
-struct dsa_switch_driver {
-	struct list_head	list;
-
-	__be16			tag_protocol;
-	int			priv_size;
-
-	/*
-	 * Probing and setup.
-	 */
-	char	*(*probe)(struct mii_bus *bus, int sw_addr);
-	int	(*setup)(struct dsa_switch *ds);
-	int	(*set_addr)(struct dsa_switch *ds, u8 *addr);
-
-	/*
-	 * Access to the switch's PHY registers.
-	 */
-	int	(*phy_read)(struct dsa_switch *ds, int port, int regnum);
-	int	(*phy_write)(struct dsa_switch *ds, int port,
-			     int regnum, u16 val);
-
-	/*
-	 * Link state polling and IRQ handling.
-	 */
-	void	(*poll_link)(struct dsa_switch *ds);
-
-	/*
-	 * ethtool hardware statistics.
-	 */
-	void	(*get_strings)(struct dsa_switch *ds, int port, uint8_t *data);
-	void	(*get_ethtool_stats)(struct dsa_switch *ds,
-				     int port, uint64_t *data);
-	int	(*get_sset_count)(struct dsa_switch *ds);
-};
-
 /* dsa.c */
 extern char dsa_driver_version[];
-void register_switch_driver(struct dsa_switch_driver *type);
-void unregister_switch_driver(struct dsa_switch_driver *type);
 
 /* slave.c */
 void dsa_slave_mii_bus_init(struct dsa_switch *ds);
@@ -170,12 +46,15 @@
 
 /* tag_dsa.c */
 netdev_tx_t dsa_xmit(struct sk_buff *skb, struct net_device *dev);
+extern struct packet_type dsa_packet_type;
 
 /* tag_edsa.c */
 netdev_tx_t edsa_xmit(struct sk_buff *skb, struct net_device *dev);
+extern struct packet_type edsa_packet_type;
 
 /* tag_trailer.c */
 netdev_tx_t trailer_xmit(struct sk_buff *skb, struct net_device *dev);
+extern struct packet_type trailer_packet_type;
 
 
 #endif
diff --git a/net/dsa/tag_dsa.c b/net/dsa/tag_dsa.c
index 98dfe80..cacce1e 100644
--- a/net/dsa/tag_dsa.c
+++ b/net/dsa/tag_dsa.c
@@ -186,20 +186,7 @@
 	return 0;
 }
 
-static struct packet_type dsa_packet_type __read_mostly = {
+struct packet_type dsa_packet_type __read_mostly = {
 	.type	= cpu_to_be16(ETH_P_DSA),
 	.func	= dsa_rcv,
 };
-
-static int __init dsa_init_module(void)
-{
-	dev_add_pack(&dsa_packet_type);
-	return 0;
-}
-module_init(dsa_init_module);
-
-static void __exit dsa_cleanup_module(void)
-{
-	dev_remove_pack(&dsa_packet_type);
-}
-module_exit(dsa_cleanup_module);
diff --git a/net/dsa/tag_edsa.c b/net/dsa/tag_edsa.c
index 6f38332..e70c43c 100644
--- a/net/dsa/tag_edsa.c
+++ b/net/dsa/tag_edsa.c
@@ -205,20 +205,7 @@
 	return 0;
 }
 
-static struct packet_type edsa_packet_type __read_mostly = {
+struct packet_type edsa_packet_type __read_mostly = {
 	.type	= cpu_to_be16(ETH_P_EDSA),
 	.func	= edsa_rcv,
 };
-
-static int __init edsa_init_module(void)
-{
-	dev_add_pack(&edsa_packet_type);
-	return 0;
-}
-module_init(edsa_init_module);
-
-static void __exit edsa_cleanup_module(void)
-{
-	dev_remove_pack(&edsa_packet_type);
-}
-module_exit(edsa_cleanup_module);
diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c
index d6d7d0a..94bc260 100644
--- a/net/dsa/tag_trailer.c
+++ b/net/dsa/tag_trailer.c
@@ -114,20 +114,7 @@
 	return 0;
 }
 
-static struct packet_type trailer_packet_type __read_mostly = {
+struct packet_type trailer_packet_type __read_mostly = {
 	.type	= cpu_to_be16(ETH_P_TRAILER),
 	.func	= trailer_rcv,
 };
-
-static int __init trailer_init_module(void)
-{
-	dev_add_pack(&trailer_packet_type);
-	return 0;
-}
-module_init(trailer_init_module);
-
-static void __exit trailer_cleanup_module(void)
-{
-	dev_remove_pack(&trailer_packet_type);
-}
-module_exit(trailer_cleanup_module);
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
index 1c1f26c..7e717cb 100644
--- a/net/econet/af_econet.c
+++ b/net/econet/af_econet.c
@@ -322,6 +322,7 @@
 		/* Real hardware Econet.  We're not worthy etc. */
 #ifdef CONFIG_ECONET_NATIVE
 		unsigned short proto = 0;
+		int hlen, tlen;
 		int res;
 
 		if (len + 15 > dev->mtu) {
@@ -331,12 +332,14 @@
 
 		dev_hold(dev);
 
-		skb = sock_alloc_send_skb(sk, len + LL_ALLOCATED_SPACE(dev),
+		hlen = LL_RESERVED_SPACE(dev);
+		tlen = dev->needed_tailroom;
+		skb = sock_alloc_send_skb(sk, len + hlen + tlen,
 					  msg->msg_flags & MSG_DONTWAIT, &err);
 		if (skb == NULL)
 			goto out_unlock;
 
-		skb_reserve(skb, LL_RESERVED_SPACE(dev));
+		skb_reserve(skb, hlen);
 		skb_reset_network_header(skb);
 
 		eb = (struct ec_cb *)&skb->cb;
diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
index 19d6aef..e4ecc1e 100644
--- a/net/ieee802154/6lowpan.c
+++ b/net/ieee802154/6lowpan.c
@@ -50,8 +50,6 @@
  * SUCH DAMAGE.
  */
 
-#define DEBUG
-
 #include <linux/bitops.h>
 #include <linux/if_arp.h>
 #include <linux/module.h>
@@ -113,6 +111,20 @@
 	struct list_head list;
 };
 
+struct lowpan_fragment {
+	struct sk_buff		*skb;		/* skb to be assembled */
+	spinlock_t		lock;		/* concurency lock */
+	u16			length;		/* length to be assemled */
+	u32			bytes_rcv;	/* bytes received */
+	u16			tag;		/* current fragment tag */
+	struct timer_list	timer;		/* assembling timer */
+	struct list_head	list;		/* fragments list */
+};
+
+static unsigned short fragment_tag;
+static LIST_HEAD(lowpan_fragments);
+spinlock_t flist_lock;
+
 static inline struct
 lowpan_dev_info *lowpan_dev_info(const struct net_device *dev)
 {
@@ -234,6 +246,50 @@
 	return 0;
 }
 
+static void
+lowpan_compress_udp_header(u8 **hc06_ptr, struct sk_buff *skb)
+{
+	struct udphdr *uh = udp_hdr(skb);
+
+	pr_debug("(%s): UDP header compression\n", __func__);
+
+	if (((uh->source & LOWPAN_NHC_UDP_4BIT_MASK) ==
+				LOWPAN_NHC_UDP_4BIT_PORT) &&
+	    ((uh->dest & LOWPAN_NHC_UDP_4BIT_MASK) ==
+				LOWPAN_NHC_UDP_4BIT_PORT)) {
+		pr_debug("(%s): both ports compression to 4 bits\n", __func__);
+		**hc06_ptr = LOWPAN_NHC_UDP_CS_P_11;
+		**(hc06_ptr + 1) = /* subtraction is faster */
+		   (u8)((uh->dest - LOWPAN_NHC_UDP_4BIT_PORT) +
+		       ((uh->source & LOWPAN_NHC_UDP_4BIT_PORT) << 4));
+		*hc06_ptr += 2;
+	} else if ((uh->dest & LOWPAN_NHC_UDP_8BIT_MASK) ==
+			LOWPAN_NHC_UDP_8BIT_PORT) {
+		pr_debug("(%s): remove 8 bits of dest\n", __func__);
+		**hc06_ptr = LOWPAN_NHC_UDP_CS_P_01;
+		memcpy(*hc06_ptr + 1, &uh->source, 2);
+		**(hc06_ptr + 3) = (u8)(uh->dest - LOWPAN_NHC_UDP_8BIT_PORT);
+		*hc06_ptr += 4;
+	} else if ((uh->source & LOWPAN_NHC_UDP_8BIT_MASK) ==
+			LOWPAN_NHC_UDP_8BIT_PORT) {
+		pr_debug("(%s): remove 8 bits of source\n", __func__);
+		**hc06_ptr = LOWPAN_NHC_UDP_CS_P_10;
+		memcpy(*hc06_ptr + 1, &uh->dest, 2);
+		**(hc06_ptr + 3) = (u8)(uh->source - LOWPAN_NHC_UDP_8BIT_PORT);
+		*hc06_ptr += 4;
+	} else {
+		pr_debug("(%s): can't compress header\n", __func__);
+		**hc06_ptr = LOWPAN_NHC_UDP_CS_P_00;
+		memcpy(*hc06_ptr + 1, &uh->source, 2);
+		memcpy(*hc06_ptr + 3, &uh->dest, 2);
+		*hc06_ptr += 5;
+	}
+
+	/* checksum is always inline */
+	memcpy(*hc06_ptr, &uh->check, 2);
+	*hc06_ptr += 2;
+}
+
 static u8 lowpan_fetch_skb_u8(struct sk_buff *skb)
 {
 	u8 ret;
@@ -244,6 +300,73 @@
 	return ret;
 }
 
+static u16 lowpan_fetch_skb_u16(struct sk_buff *skb)
+{
+	u16 ret;
+
+	BUG_ON(!pskb_may_pull(skb, 2));
+
+	ret = skb->data[0] | (skb->data[1] << 8);
+	skb_pull(skb, 2);
+	return ret;
+}
+
+static int
+lowpan_uncompress_udp_header(struct sk_buff *skb)
+{
+	struct udphdr *uh = udp_hdr(skb);
+	u8 tmp;
+
+	tmp = lowpan_fetch_skb_u8(skb);
+
+	if ((tmp & LOWPAN_NHC_UDP_MASK) == LOWPAN_NHC_UDP_ID) {
+		pr_debug("(%s): UDP header uncompression\n", __func__);
+		switch (tmp & LOWPAN_NHC_UDP_CS_P_11) {
+		case LOWPAN_NHC_UDP_CS_P_00:
+			memcpy(&uh->source, &skb->data[0], 2);
+			memcpy(&uh->dest, &skb->data[2], 2);
+			skb_pull(skb, 4);
+			break;
+		case LOWPAN_NHC_UDP_CS_P_01:
+			memcpy(&uh->source, &skb->data[0], 2);
+			uh->dest =
+			   skb->data[2] + LOWPAN_NHC_UDP_8BIT_PORT;
+			skb_pull(skb, 3);
+			break;
+		case LOWPAN_NHC_UDP_CS_P_10:
+			uh->source = skb->data[0] + LOWPAN_NHC_UDP_8BIT_PORT;
+			memcpy(&uh->dest, &skb->data[1], 2);
+			skb_pull(skb, 3);
+			break;
+		case LOWPAN_NHC_UDP_CS_P_11:
+			uh->source =
+			   LOWPAN_NHC_UDP_4BIT_PORT + (skb->data[0] >> 4);
+			uh->dest =
+			   LOWPAN_NHC_UDP_4BIT_PORT + (skb->data[0] & 0x0f);
+			skb_pull(skb, 1);
+			break;
+		default:
+			pr_debug("(%s) ERROR: unknown UDP format\n", __func__);
+			goto err;
+			break;
+		}
+
+		pr_debug("(%s): uncompressed UDP ports: src = %d, dst = %d\n",
+					__func__, uh->source, uh->dest);
+
+		/* copy checksum */
+		memcpy(&uh->check, &skb->data[0], 2);
+		skb_pull(skb, 2);
+	} else {
+		pr_debug("(%s): ERROR: unsupported NH format\n", __func__);
+		goto err;
+	}
+
+	return 0;
+err:
+	return -EINVAL;
+}
+
 static int lowpan_header_create(struct sk_buff *skb,
 			   struct net_device *dev,
 			   unsigned short type, const void *_daddr,
@@ -342,8 +465,6 @@
 	if (hdr->nexthdr == UIP_PROTO_UDP)
 		iphc0 |= LOWPAN_IPHC_NH_C;
 
-/* TODO: next header compression */
-
 	if ((iphc0 & LOWPAN_IPHC_NH_C) == 0) {
 		*hc06_ptr = hdr->nexthdr;
 		hc06_ptr += 1;
@@ -431,8 +552,9 @@
 		}
 	}
 
-	/* TODO: UDP header compression */
-	/* TODO: Next Header compression */
+	/* UDP header compression */
+	if (hdr->nexthdr == UIP_PROTO_UDP)
+		lowpan_compress_udp_header(&hc06_ptr, skb);
 
 	head[0] = iphc0;
 	head[1] = iphc1;
@@ -467,6 +589,7 @@
 		memcpy(&(sa.hwaddr), saddr, 8);
 
 		mac_cb(skb)->flags = IEEE802154_FC_TYPE_DATA;
+
 		return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev,
 				type, (void *)&da, (void *)&sa, skb->len);
 	}
@@ -511,6 +634,21 @@
 	return stat;
 }
 
+static void lowpan_fragment_timer_expired(unsigned long entry_addr)
+{
+	struct lowpan_fragment *entry = (struct lowpan_fragment *)entry_addr;
+
+	pr_debug("%s: timer expired for frame with tag %d\n", __func__,
+								entry->tag);
+
+	spin_lock(&flist_lock);
+	list_del(&entry->list);
+	spin_unlock(&flist_lock);
+
+	dev_kfree_skb(entry->skb);
+	kfree(entry);
+}
+
 static int
 lowpan_process_data(struct sk_buff *skb)
 {
@@ -525,6 +663,107 @@
 	if (skb->len < 2)
 		goto drop;
 	iphc0 = lowpan_fetch_skb_u8(skb);
+
+	/* fragments assembling */
+	switch (iphc0 & LOWPAN_DISPATCH_MASK) {
+	case LOWPAN_DISPATCH_FRAG1:
+	case LOWPAN_DISPATCH_FRAGN:
+	{
+		struct lowpan_fragment *frame;
+		u8 len, offset;
+		u16 tag;
+		bool found = false;
+
+		len = lowpan_fetch_skb_u8(skb); /* frame length */
+		tag = lowpan_fetch_skb_u16(skb);
+
+		/*
+		 * check if frame assembling with the same tag is
+		 * already in progress
+		 */
+		spin_lock(&flist_lock);
+
+		list_for_each_entry(frame, &lowpan_fragments, list)
+			if (frame->tag == tag) {
+				found = true;
+				break;
+			}
+
+		/* alloc new frame structure */
+		if (!found) {
+			frame = kzalloc(sizeof(struct lowpan_fragment),
+								GFP_ATOMIC);
+			if (!frame)
+				goto unlock_and_drop;
+
+			INIT_LIST_HEAD(&frame->list);
+
+			frame->length = (iphc0 & 7) | (len << 3);
+			frame->tag = tag;
+
+			/* allocate buffer for frame assembling */
+			frame->skb = alloc_skb(frame->length +
+					sizeof(struct ipv6hdr), GFP_ATOMIC);
+
+			if (!frame->skb) {
+				kfree(frame);
+				goto unlock_and_drop;
+			}
+
+			frame->skb->priority = skb->priority;
+			frame->skb->dev = skb->dev;
+
+			/* reserve headroom for uncompressed ipv6 header */
+			skb_reserve(frame->skb, sizeof(struct ipv6hdr));
+			skb_put(frame->skb, frame->length);
+
+			init_timer(&frame->timer);
+			/* time out is the same as for ipv6 - 60 sec */
+			frame->timer.expires = jiffies + LOWPAN_FRAG_TIMEOUT;
+			frame->timer.data = (unsigned long)frame;
+			frame->timer.function = lowpan_fragment_timer_expired;
+
+			add_timer(&frame->timer);
+
+			list_add_tail(&frame->list, &lowpan_fragments);
+		}
+
+		if ((iphc0 & LOWPAN_DISPATCH_MASK) == LOWPAN_DISPATCH_FRAG1)
+			goto unlock_and_drop;
+
+		offset = lowpan_fetch_skb_u8(skb); /* fetch offset */
+
+		/* if payload fits buffer, copy it */
+		if (likely((offset * 8 + skb->len) <= frame->length))
+			skb_copy_to_linear_data_offset(frame->skb, offset * 8,
+							skb->data, skb->len);
+		else
+			goto unlock_and_drop;
+
+		frame->bytes_rcv += skb->len;
+
+		/* frame assembling complete */
+		if ((frame->bytes_rcv == frame->length) &&
+		     frame->timer.expires > jiffies) {
+			/* if timer haven't expired - first of all delete it */
+			del_timer(&frame->timer);
+			list_del(&frame->list);
+			spin_unlock(&flist_lock);
+
+			dev_kfree_skb(skb);
+			skb = frame->skb;
+			kfree(frame);
+			iphc0 = lowpan_fetch_skb_u8(skb);
+			break;
+		}
+		spin_unlock(&flist_lock);
+
+		return kfree_skb(skb), 0;
+	}
+	default:
+		break;
+	}
+
 	iphc1 = lowpan_fetch_skb_u8(skb);
 
 	_saddr = mac_cb(skb)->sa.hwaddr;
@@ -659,7 +898,10 @@
 			goto drop;
 	}
 
-	/* TODO: UDP header parse */
+	/* UDP data uncompression */
+	if (iphc0 & LOWPAN_IPHC_NH_C)
+		if (lowpan_uncompress_udp_header(skb))
+			goto drop;
 
 	/* Not fragmented package */
 	hdr.payload_len = htons(skb->len);
@@ -674,6 +916,9 @@
 	lowpan_raw_dump_table(__func__, "raw header dump", (u8 *)&hdr,
 							sizeof(hdr));
 	return lowpan_skb_deliver(skb, &hdr);
+
+unlock_and_drop:
+	spin_unlock(&flist_lock);
 drop:
 	kfree_skb(skb);
 	return -EINVAL;
@@ -692,18 +937,115 @@
 	return 0;
 }
 
+static int lowpan_get_mac_header_length(struct sk_buff *skb)
+{
+	/*
+	 * Currently long addressing mode is supported only, so the overall
+	 * header size is 21:
+	 * FC SeqNum DPAN DA  SA  Sec
+	 * 2  +  1  +  2 + 8 + 8 + 0  = 21
+	 */
+	return 21;
+}
+
+static int
+lowpan_fragment_xmit(struct sk_buff *skb, u8 *head,
+			int mlen, int plen, int offset)
+{
+	struct sk_buff *frag;
+	int hlen, ret;
+
+	/* if payload length is zero, therefore it's a first fragment */
+	hlen = (plen == 0 ? LOWPAN_FRAG1_HEAD_SIZE :  LOWPAN_FRAGN_HEAD_SIZE);
+
+	lowpan_raw_dump_inline(__func__, "6lowpan fragment header", head, hlen);
+
+	frag = dev_alloc_skb(hlen + mlen + plen + IEEE802154_MFR_SIZE);
+	if (!frag)
+		return -ENOMEM;
+
+	frag->priority = skb->priority;
+	frag->dev = skb->dev;
+
+	/* copy header, MFR and payload */
+	memcpy(skb_put(frag, mlen), skb->data, mlen);
+	memcpy(skb_put(frag, hlen), head, hlen);
+
+	if (plen)
+		skb_copy_from_linear_data_offset(skb, offset + mlen,
+					skb_put(frag, plen), plen);
+
+	lowpan_raw_dump_table(__func__, " raw fragment dump", frag->data,
+								frag->len);
+
+	ret = dev_queue_xmit(frag);
+
+	return ret;
+}
+
+static int
+lowpan_skb_fragmentation(struct sk_buff *skb)
+{
+	int  err, header_length, payload_length, tag, offset = 0;
+	u8 head[5];
+
+	header_length = lowpan_get_mac_header_length(skb);
+	payload_length = skb->len - header_length;
+	tag = fragment_tag++;
+
+	/* first fragment header */
+	head[0] = LOWPAN_DISPATCH_FRAG1 | (payload_length & 0x7);
+	head[1] = (payload_length >> 3) & 0xff;
+	head[2] = tag & 0xff;
+	head[3] = tag >> 8;
+
+	err = lowpan_fragment_xmit(skb, head, header_length, 0, 0);
+
+	/* next fragment header */
+	head[0] &= ~LOWPAN_DISPATCH_FRAG1;
+	head[0] |= LOWPAN_DISPATCH_FRAGN;
+
+	while ((payload_length - offset > 0) && (err >= 0)) {
+		int len = LOWPAN_FRAG_SIZE;
+
+		head[4] = offset / 8;
+
+		if (payload_length - offset < len)
+			len = payload_length - offset;
+
+		err = lowpan_fragment_xmit(skb, head, header_length,
+							len, offset);
+		offset += len;
+	}
+
+	return err;
+}
+
 static netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev)
 {
-	int err = 0;
+	int err = -1;
 
 	pr_debug("(%s): package xmit\n", __func__);
 
 	skb->dev = lowpan_dev_info(dev)->real_dev;
 	if (skb->dev == NULL) {
 		pr_debug("(%s) ERROR: no real wpan device found\n", __func__);
-		dev_kfree_skb(skb);
-	} else
+		goto error;
+	}
+
+	if (skb->len <= IEEE802154_MTU) {
 		err = dev_queue_xmit(skb);
+		goto out;
+	}
+
+	pr_debug("(%s): frame is too big, fragmentation is needed\n",
+								__func__);
+	err = lowpan_skb_fragmentation(skb);
+error:
+	dev_kfree_skb(skb);
+out:
+	if (err < 0)
+		pr_debug("(%s): ERROR: xmit failed\n", __func__);
 
 	return (err < 0 ? NETDEV_TX_BUSY : NETDEV_TX_OK);
 }
@@ -730,13 +1072,12 @@
 	dev->addr_len		= IEEE802154_ADDR_LEN;
 	memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN);
 	dev->type		= ARPHRD_IEEE802154;
-	dev->features		= NETIF_F_NO_CSUM;
 	/* Frame Control + Sequence Number + Address fields + Security Header */
 	dev->hard_header_len	= 2 + 1 + 20 + 14;
 	dev->needed_tailroom	= 2; /* FCS */
 	dev->mtu		= 1281;
 	dev->tx_queue_len	= 0;
-	dev->flags		= IFF_NOARP | IFF_BROADCAST;
+	dev->flags		= IFF_BROADCAST | IFF_MULTICAST;
 	dev->watchdog_timeo	= 0;
 
 	dev->netdev_ops		= &lowpan_netdev_ops;
@@ -765,8 +1106,15 @@
 		goto drop;
 
 	/* check that it's our buffer */
-	if ((skb->data[0] & 0xe0) == 0x60)
+	switch (skb->data[0] & 0xe0) {
+	case LOWPAN_DISPATCH_IPHC:	/* ipv6 datagram */
+	case LOWPAN_DISPATCH_FRAG1:	/* first fragment header */
+	case LOWPAN_DISPATCH_FRAGN:	/* next fragments headers */
 		lowpan_process_data(skb);
+		break;
+	default:
+		break;
+	}
 
 	return NET_RX_SUCCESS;
 
diff --git a/net/ieee802154/6lowpan.h b/net/ieee802154/6lowpan.h
index 5d8cf80..aeff3f3 100644
--- a/net/ieee802154/6lowpan.h
+++ b/net/ieee802154/6lowpan.h
@@ -159,6 +159,24 @@
 #define LOWPAN_DISPATCH_FRAG1	0xc0 /* 11000xxx */
 #define LOWPAN_DISPATCH_FRAGN	0xe0 /* 11100xxx */
 
+#define LOWPAN_DISPATCH_MASK	0xf8 /* 11111000 */
+
+#define LOWPAN_FRAG_TIMEOUT	(HZ * 60)	/* time-out 60 sec */
+
+#define LOWPAN_FRAG1_HEAD_SIZE	0x4
+#define LOWPAN_FRAGN_HEAD_SIZE	0x5
+
+/*
+ * According IEEE802.15.4 standard:
+ *   - MTU is 127 octets
+ *   - maximum MHR size is 37 octets
+ *   - MFR size is 2 octets
+ *
+ * so minimal payload size that we may guarantee is:
+ *   MTU - MHR - MFR = 88 octets
+ */
+#define LOWPAN_FRAG_SIZE	88
+
 /*
  * Values of fields within the IPHC encoding first byte
  * (C stands for compressed and I for inline)
@@ -201,6 +219,11 @@
 #define LOWPAN_NHC_UDP_CHECKSUMC	0x04
 #define LOWPAN_NHC_UDP_CHECKSUMI	0x00
 
+#define LOWPAN_NHC_UDP_4BIT_PORT	0xF0B0
+#define LOWPAN_NHC_UDP_4BIT_MASK	0xFFF0
+#define LOWPAN_NHC_UDP_8BIT_PORT	0xF000
+#define LOWPAN_NHC_UDP_8BIT_MASK	0xFF00
+
 /* values for port compression, _with checksum_ ie bit 5 set to 0 */
 #define LOWPAN_NHC_UDP_CS_P_00	0xF0 /* all inline */
 #define LOWPAN_NHC_UDP_CS_P_01	0xF1 /* source 16bit inline,
diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c
index faecf64..1b09eaa 100644
--- a/net/ieee802154/dgram.c
+++ b/net/ieee802154/dgram.c
@@ -209,6 +209,7 @@
 	unsigned mtu;
 	struct sk_buff *skb;
 	struct dgram_sock *ro = dgram_sk(sk);
+	int hlen, tlen;
 	int err;
 
 	if (msg->msg_flags & MSG_OOB) {
@@ -229,13 +230,15 @@
 	mtu = dev->mtu;
 	pr_debug("name = %s, mtu = %u\n", dev->name, mtu);
 
-	skb = sock_alloc_send_skb(sk, LL_ALLOCATED_SPACE(dev) + size,
+	hlen = LL_RESERVED_SPACE(dev);
+	tlen = dev->needed_tailroom;
+	skb = sock_alloc_send_skb(sk, hlen + tlen + size,
 			msg->msg_flags & MSG_DONTWAIT,
 			&err);
 	if (!skb)
 		goto out_dev;
 
-	skb_reserve(skb, LL_RESERVED_SPACE(dev));
+	skb_reserve(skb, hlen);
 
 	skb_reset_network_header(skb);
 
diff --git a/net/ieee802154/raw.c b/net/ieee802154/raw.c
index 10970ca..f96bae8 100644
--- a/net/ieee802154/raw.c
+++ b/net/ieee802154/raw.c
@@ -108,6 +108,7 @@
 	struct net_device *dev;
 	unsigned mtu;
 	struct sk_buff *skb;
+	int hlen, tlen;
 	int err;
 
 	if (msg->msg_flags & MSG_OOB) {
@@ -137,12 +138,14 @@
 		goto out_dev;
 	}
 
-	skb = sock_alloc_send_skb(sk, LL_ALLOCATED_SPACE(dev) + size,
+	hlen = LL_RESERVED_SPACE(dev);
+	tlen = dev->needed_tailroom;
+	skb = sock_alloc_send_skb(sk, hlen + tlen + size,
 			msg->msg_flags & MSG_DONTWAIT, &err);
 	if (!skb)
 		goto out_dev;
 
-	skb_reserve(skb, LL_RESERVED_SPACE(dev));
+	skb_reserve(skb, hlen);
 
 	skb_reset_mac_header(skb);
 	skb_reset_network_header(skb);
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index cbb505b..335ca7a 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -163,8 +163,6 @@
 	  operating on your network. Read
 	  <file:Documentation/filesystems/nfs/nfsroot.txt> for details.
 
-# not yet ready..
-#   bool '    IP: ARP support' CONFIG_IP_PNP_ARP
 config NET_IPIP
 	tristate "IP: tunneling"
 	select INET_TUNNEL
@@ -409,6 +407,10 @@
 	depends on INET_DIAG
 	def_tristate INET_DIAG
 
+config INET_UDP_DIAG
+	depends on INET_DIAG
+	def_tristate INET_DIAG && IPV6
+
 menuconfig TCP_CONG_ADVANCED
 	bool "TCP: advanced congestion control"
 	---help---
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
index f2dc69c..ff75d3b 100644
--- a/net/ipv4/Makefile
+++ b/net/ipv4/Makefile
@@ -34,6 +34,7 @@
 obj-$(CONFIG_NETFILTER)	+= netfilter.o netfilter/
 obj-$(CONFIG_INET_DIAG) += inet_diag.o 
 obj-$(CONFIG_INET_TCP_DIAG) += tcp_diag.o
+obj-$(CONFIG_INET_UDP_DIAG) += udp_diag.o
 obj-$(CONFIG_NET_TCPPROBE) += tcp_probe.o
 obj-$(CONFIG_TCP_CONG_BIC) += tcp_bic.o
 obj-$(CONFIG_TCP_CONG_CUBIC) += tcp_cubic.o
@@ -47,6 +48,7 @@
 obj-$(CONFIG_TCP_CONG_LP) += tcp_lp.o
 obj-$(CONFIG_TCP_CONG_YEAH) += tcp_yeah.o
 obj-$(CONFIG_TCP_CONG_ILLINOIS) += tcp_illinois.o
+obj-$(CONFIG_CGROUP_MEM_RES_CTLR_KMEM) += tcp_memcontrol.o
 obj-$(CONFIG_NETLABEL) += cipso_ipv4.o
 
 obj-$(CONFIG_XFRM) += xfrm4_policy.o xfrm4_state.o xfrm4_input.o \
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 1b5096a..f7b5670 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1250,7 +1250,8 @@
 	return err;
 }
 
-static struct sk_buff *inet_gso_segment(struct sk_buff *skb, u32 features)
+static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
+	netdev_features_t features)
 {
 	struct sk_buff *segs = ERR_PTR(-EINVAL);
 	struct iphdr *iph;
@@ -1572,9 +1573,9 @@
 			  sizeof(struct icmp_mib),
 			  __alignof__(struct icmp_mib)) < 0)
 		goto err_icmp_mib;
-	if (snmp_mib_init((void __percpu **)net->mib.icmpmsg_statistics,
-			  sizeof(struct icmpmsg_mib),
-			  __alignof__(struct icmpmsg_mib)) < 0)
+	net->mib.icmpmsg_statistics = kzalloc(sizeof(struct icmpmsg_mib),
+					      GFP_KERNEL);
+	if (!net->mib.icmpmsg_statistics)
 		goto err_icmpmsg_mib;
 
 	tcp_mib_init(net);
@@ -1598,7 +1599,7 @@
 
 static __net_exit void ipv4_mib_exit_net(struct net *net)
 {
-	snmp_mib_free((void __percpu **)net->mib.icmpmsg_statistics);
+	kfree(net->mib.icmpmsg_statistics);
 	snmp_mib_free((void __percpu **)net->mib.icmp_statistics);
 	snmp_mib_free((void __percpu **)net->mib.udplite_statistics);
 	snmp_mib_free((void __percpu **)net->mib.udp_statistics);
@@ -1671,6 +1672,8 @@
 	ip_static_sysctl_init();
 #endif
 
+	tcp_prot.sysctl_mem = init_net.ipv4.sysctl_tcp_mem;
+
 	/*
 	 *	Add all the base protocols.
 	 */
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 96a164a..59402be 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -112,11 +112,6 @@
 #include <net/arp.h>
 #include <net/ax25.h>
 #include <net/netrom.h>
-#if defined(CONFIG_ATM_CLIP) || defined(CONFIG_ATM_CLIP_MODULE)
-#include <net/atmclip.h>
-struct neigh_table *clip_tbl_hook;
-EXPORT_SYMBOL(clip_tbl_hook);
-#endif
 
 #include <asm/system.h>
 #include <linux/uaccess.h>
@@ -126,7 +121,7 @@
 /*
  *	Interface to generic neighbour cache.
  */
-static u32 arp_hash(const void *pkey, const struct net_device *dev, __u32 rnd);
+static u32 arp_hash(const void *pkey, const struct net_device *dev, __u32 *hash_rnd);
 static int arp_constructor(struct neighbour *neigh);
 static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb);
 static void arp_error_report(struct neighbour *neigh, struct sk_buff *skb);
@@ -164,7 +159,6 @@
 
 struct neigh_table arp_tbl = {
 	.family		= AF_INET,
-	.entry_size	= sizeof(struct neighbour) + 4,
 	.key_len	= 4,
 	.hash		= arp_hash,
 	.constructor	= arp_constructor,
@@ -177,7 +171,7 @@
 		.gc_staletime		= 60 * HZ,
 		.reachable_time		= 30 * HZ,
 		.delay_probe_time	= 5 * HZ,
-		.queue_len		= 3,
+		.queue_len_bytes	= 64*1024,
 		.ucast_probes		= 3,
 		.mcast_probes		= 3,
 		.anycast_delay		= 1 * HZ,
@@ -221,9 +215,9 @@
 
 static u32 arp_hash(const void *pkey,
 		    const struct net_device *dev,
-		    __u32 hash_rnd)
+		    __u32 *hash_rnd)
 {
-	return arp_hashfn(*(u32 *)pkey, dev, hash_rnd);
+	return arp_hashfn(*(u32 *)pkey, dev, *hash_rnd);
 }
 
 static int arp_constructor(struct neighbour *neigh)
@@ -283,9 +277,9 @@
 		default:
 			break;
 		case ARPHRD_ROSE:
-#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
+#if IS_ENABLED(CONFIG_AX25)
 		case ARPHRD_AX25:
-#if defined(CONFIG_NETROM) || defined(CONFIG_NETROM_MODULE)
+#if IS_ENABLED(CONFIG_NETROM)
 		case ARPHRD_NETROM:
 #endif
 			neigh->ops = &arp_broken_ops;
@@ -592,16 +586,18 @@
 	struct sk_buff *skb;
 	struct arphdr *arp;
 	unsigned char *arp_ptr;
+	int hlen = LL_RESERVED_SPACE(dev);
+	int tlen = dev->needed_tailroom;
 
 	/*
 	 *	Allocate a buffer
 	 */
 
-	skb = alloc_skb(arp_hdr_len(dev) + LL_ALLOCATED_SPACE(dev), GFP_ATOMIC);
+	skb = alloc_skb(arp_hdr_len(dev) + hlen + tlen, GFP_ATOMIC);
 	if (skb == NULL)
 		return NULL;
 
-	skb_reserve(skb, LL_RESERVED_SPACE(dev));
+	skb_reserve(skb, hlen);
 	skb_reset_network_header(skb);
 	arp = (struct arphdr *) skb_put(skb, arp_hdr_len(dev));
 	skb->dev = dev;
@@ -633,13 +629,13 @@
 		arp->ar_pro = htons(ETH_P_IP);
 		break;
 
-#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
+#if IS_ENABLED(CONFIG_AX25)
 	case ARPHRD_AX25:
 		arp->ar_hrd = htons(ARPHRD_AX25);
 		arp->ar_pro = htons(AX25_P_IP);
 		break;
 
-#if defined(CONFIG_NETROM) || defined(CONFIG_NETROM_MODULE)
+#if IS_ENABLED(CONFIG_NETROM)
 	case ARPHRD_NETROM:
 		arp->ar_hrd = htons(ARPHRD_NETROM);
 		arp->ar_pro = htons(AX25_P_IP);
@@ -647,13 +643,13 @@
 #endif
 #endif
 
-#if defined(CONFIG_FDDI) || defined(CONFIG_FDDI_MODULE)
+#if IS_ENABLED(CONFIG_FDDI)
 	case ARPHRD_FDDI:
 		arp->ar_hrd = htons(ARPHRD_ETHER);
 		arp->ar_pro = htons(ETH_P_IP);
 		break;
 #endif
-#if defined(CONFIG_TR) || defined(CONFIG_TR_MODULE)
+#if IS_ENABLED(CONFIG_TR)
 	case ARPHRD_IEEE802_TR:
 		arp->ar_hrd = htons(ARPHRD_IEEE802);
 		arp->ar_pro = htons(ETH_P_IP);
@@ -1040,7 +1036,7 @@
 			return -EINVAL;
 	}
 	switch (dev->type) {
-#if defined(CONFIG_FDDI) || defined(CONFIG_FDDI_MODULE)
+#if IS_ENABLED(CONFIG_FDDI)
 	case ARPHRD_FDDI:
 		/*
 		 * According to RFC 1390, FDDI devices should accept ARP
@@ -1286,7 +1282,7 @@
 }
 
 #ifdef CONFIG_PROC_FS
-#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
+#if IS_ENABLED(CONFIG_AX25)
 
 /* ------------------------------------------------------------------------ */
 /*
@@ -1334,7 +1330,7 @@
 
 	read_lock(&n->lock);
 	/* Convert hardware address to XX:XX:XX:XX ... form. */
-#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
+#if IS_ENABLED(CONFIG_AX25)
 	if (hatype == ARPHRD_AX25 || hatype == ARPHRD_NETROM)
 		ax2asc2((ax25_address *)n->ha, hbuffer);
 	else {
@@ -1347,7 +1343,7 @@
 	if (k != 0)
 		--k;
 	hbuffer[k] = 0;
-#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
+#if IS_ENABLED(CONFIG_AX25)
 	}
 #endif
 	sprintf(tbuf, "%pI4", n->primary_key);
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index 46339ba..799fc79 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -67,6 +67,7 @@
 
 	return err;
 }
+EXPORT_SYMBOL_GPL(fib_lookup);
 
 static int fib4_rule_action(struct fib_rule *rule, struct flowi *flp,
 			    int flags, struct fib_lookup_arg *arg)
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 37b6711..d04b13a 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1607,6 +1607,7 @@
 	rcu_read_unlock();
 	return ret;
 }
+EXPORT_SYMBOL_GPL(fib_table_lookup);
 
 /*
  * Remove the leaf and return parent.
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index b2ca095..fa057d1 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -304,9 +304,11 @@
 	struct igmpv3_report *pig;
 	struct net *net = dev_net(dev);
 	struct flowi4 fl4;
+	int hlen = LL_RESERVED_SPACE(dev);
+	int tlen = dev->needed_tailroom;
 
 	while (1) {
-		skb = alloc_skb(size + LL_ALLOCATED_SPACE(dev),
+		skb = alloc_skb(size + hlen + tlen,
 				GFP_ATOMIC | __GFP_NOWARN);
 		if (skb)
 			break;
@@ -327,7 +329,7 @@
 	skb_dst_set(skb, &rt->dst);
 	skb->dev = dev;
 
-	skb_reserve(skb, LL_RESERVED_SPACE(dev));
+	skb_reserve(skb, hlen);
 
 	skb_reset_network_header(skb);
 	pip = ip_hdr(skb);
@@ -647,6 +649,7 @@
 	__be32	group = pmc ? pmc->multiaddr : 0;
 	struct flowi4 fl4;
 	__be32	dst;
+	int hlen, tlen;
 
 	if (type == IGMPV3_HOST_MEMBERSHIP_REPORT)
 		return igmpv3_send_report(in_dev, pmc);
@@ -661,7 +664,9 @@
 	if (IS_ERR(rt))
 		return -1;
 
-	skb = alloc_skb(IGMP_SIZE+LL_ALLOCATED_SPACE(dev), GFP_ATOMIC);
+	hlen = LL_RESERVED_SPACE(dev);
+	tlen = dev->needed_tailroom;
+	skb = alloc_skb(IGMP_SIZE + hlen + tlen, GFP_ATOMIC);
 	if (skb == NULL) {
 		ip_rt_put(rt);
 		return -1;
@@ -669,7 +674,7 @@
 
 	skb_dst_set(skb, &rt->dst);
 
-	skb_reserve(skb, LL_RESERVED_SPACE(dev));
+	skb_reserve(skb, hlen);
 
 	skb_reset_network_header(skb);
 	iph = ip_hdr(skb);
@@ -1574,7 +1579,7 @@
  * Add multicast single-source filter to the interface list
  */
 static int ip_mc_add1_src(struct ip_mc_list *pmc, int sfmode,
-	__be32 *psfsrc, int delta)
+	__be32 *psfsrc)
 {
 	struct ip_sf_list *psf, *psf_prev;
 
@@ -1709,7 +1714,7 @@
 		pmc->sfcount[sfmode]++;
 	err = 0;
 	for (i=0; i<sfcount; i++) {
-		err = ip_mc_add1_src(pmc, sfmode, &psfsrc[i], delta);
+		err = ip_mc_add1_src(pmc, sfmode, &psfsrc[i]);
 		if (err)
 			break;
 	}
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index c14d88a..2e4e244 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -418,7 +418,7 @@
 	return jhash_2words((__force u32)raddr, (__force u32)rport, rnd) & (synq_hsize - 1);
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 #define AF_INET_FAMILY(fam) ((fam) == AF_INET)
 #else
 #define AF_INET_FAMILY(fam) 1
@@ -588,10 +588,19 @@
 }
 EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_prune);
 
-struct sock *inet_csk_clone(struct sock *sk, const struct request_sock *req,
-			    const gfp_t priority)
+/**
+ *	inet_csk_clone_lock - clone an inet socket, and lock its clone
+ *	@sk: the socket to clone
+ *	@req: request_sock
+ *	@priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
+ *
+ *	Caller must unlock socket even in error path (bh_unlock_sock(newsk))
+ */
+struct sock *inet_csk_clone_lock(const struct sock *sk,
+				 const struct request_sock *req,
+				 const gfp_t priority)
 {
-	struct sock *newsk = sk_clone(sk, priority);
+	struct sock *newsk = sk_clone_lock(sk, priority);
 
 	if (newsk != NULL) {
 		struct inet_connection_sock *newicsk = inet_csk(newsk);
@@ -615,7 +624,7 @@
 	}
 	return newsk;
 }
-EXPORT_SYMBOL_GPL(inet_csk_clone);
+EXPORT_SYMBOL_GPL(inet_csk_clone_lock);
 
 /*
  * At this point, there should be no process reference to this
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index ccee270..2240a8e 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -33,6 +33,7 @@
 #include <linux/stddef.h>
 
 #include <linux/inet_diag.h>
+#include <linux/sock_diag.h>
 
 static const struct inet_diag_handler **inet_diag_table;
 
@@ -45,24 +46,22 @@
 	u16 userlocks;
 };
 
-static struct sock *idiagnl;
-
 #define INET_DIAG_PUT(skb, attrtype, attrlen) \
 	RTA_DATA(__RTA_PUT(skb, attrtype, attrlen))
 
 static DEFINE_MUTEX(inet_diag_table_mutex);
 
-static const struct inet_diag_handler *inet_diag_lock_handler(int type)
+static const struct inet_diag_handler *inet_diag_lock_handler(int proto)
 {
-	if (!inet_diag_table[type])
-		request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
-			       NETLINK_INET_DIAG, type);
+	if (!inet_diag_table[proto])
+		request_module("net-pf-%d-proto-%d-type-%d-%d", PF_NETLINK,
+			       NETLINK_SOCK_DIAG, AF_INET, proto);
 
 	mutex_lock(&inet_diag_table_mutex);
-	if (!inet_diag_table[type])
+	if (!inet_diag_table[proto])
 		return ERR_PTR(-ENOENT);
 
-	return inet_diag_table[type];
+	return inet_diag_table[proto];
 }
 
 static inline void inet_diag_unlock_handler(
@@ -71,21 +70,21 @@
 	mutex_unlock(&inet_diag_table_mutex);
 }
 
-static int inet_csk_diag_fill(struct sock *sk,
-			      struct sk_buff *skb,
-			      int ext, u32 pid, u32 seq, u16 nlmsg_flags,
+int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
+			      struct sk_buff *skb, struct inet_diag_req *req,
+			      u32 pid, u32 seq, u16 nlmsg_flags,
 			      const struct nlmsghdr *unlh)
 {
 	const struct inet_sock *inet = inet_sk(sk);
-	const struct inet_connection_sock *icsk = inet_csk(sk);
 	struct inet_diag_msg *r;
 	struct nlmsghdr  *nlh;
 	void *info = NULL;
 	struct inet_diag_meminfo  *minfo = NULL;
 	unsigned char	 *b = skb_tail_pointer(skb);
 	const struct inet_diag_handler *handler;
+	int ext = req->idiag_ext;
 
-	handler = inet_diag_table[unlh->nlmsg_type];
+	handler = inet_diag_table[req->sdiag_protocol];
 	BUG_ON(handler == NULL);
 
 	nlh = NLMSG_PUT(skb, pid, seq, unlh->nlmsg_type, sizeof(*r));
@@ -97,25 +96,13 @@
 	if (ext & (1 << (INET_DIAG_MEMINFO - 1)))
 		minfo = INET_DIAG_PUT(skb, INET_DIAG_MEMINFO, sizeof(*minfo));
 
-	if (ext & (1 << (INET_DIAG_INFO - 1)))
-		info = INET_DIAG_PUT(skb, INET_DIAG_INFO,
-				     handler->idiag_info_size);
-
-	if ((ext & (1 << (INET_DIAG_CONG - 1))) && icsk->icsk_ca_ops) {
-		const size_t len = strlen(icsk->icsk_ca_ops->name);
-
-		strcpy(INET_DIAG_PUT(skb, INET_DIAG_CONG, len + 1),
-		       icsk->icsk_ca_ops->name);
-	}
-
 	r->idiag_family = sk->sk_family;
 	r->idiag_state = sk->sk_state;
 	r->idiag_timer = 0;
 	r->idiag_retrans = 0;
 
 	r->id.idiag_if = sk->sk_bound_dev_if;
-	r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
-	r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
+	sock_diag_save_cookie(sk, r->id.idiag_cookie);
 
 	r->id.idiag_sport = inet->inet_sport;
 	r->id.idiag_dport = inet->inet_dport;
@@ -128,20 +115,36 @@
 	if (ext & (1 << (INET_DIAG_TOS - 1)))
 		RTA_PUT_U8(skb, INET_DIAG_TOS, inet->tos);
 
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	if (r->idiag_family == AF_INET6) {
 		const struct ipv6_pinfo *np = inet6_sk(sk);
 
+		*(struct in6_addr *)r->id.idiag_src = np->rcv_saddr;
+		*(struct in6_addr *)r->id.idiag_dst = np->daddr;
 		if (ext & (1 << (INET_DIAG_TCLASS - 1)))
 			RTA_PUT_U8(skb, INET_DIAG_TCLASS, np->tclass);
-
-		ipv6_addr_copy((struct in6_addr *)r->id.idiag_src,
-			       &np->rcv_saddr);
-		ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst,
-			       &np->daddr);
 	}
 #endif
 
+	r->idiag_uid = sock_i_uid(sk);
+	r->idiag_inode = sock_i_ino(sk);
+
+	if (minfo) {
+		minfo->idiag_rmem = sk_rmem_alloc_get(sk);
+		minfo->idiag_wmem = sk->sk_wmem_queued;
+		minfo->idiag_fmem = sk->sk_forward_alloc;
+		minfo->idiag_tmem = sk_wmem_alloc_get(sk);
+	}
+
+	if (ext & (1 << (INET_DIAG_SKMEMINFO - 1)))
+		if (sock_diag_put_meminfo(sk, skb, INET_DIAG_SKMEMINFO))
+			goto rtattr_failure;
+
+	if (icsk == NULL) {
+		r->idiag_rqueue = r->idiag_wqueue = 0;
+		goto out;
+	}
+
 #define EXPIRES_IN_MS(tmo)  DIV_ROUND_UP((tmo - jiffies) * 1000, HZ)
 
 	if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
@@ -162,14 +165,14 @@
 	}
 #undef EXPIRES_IN_MS
 
-	r->idiag_uid = sock_i_uid(sk);
-	r->idiag_inode = sock_i_ino(sk);
+	if (ext & (1 << (INET_DIAG_INFO - 1)))
+		info = INET_DIAG_PUT(skb, INET_DIAG_INFO, sizeof(struct tcp_info));
 
-	if (minfo) {
-		minfo->idiag_rmem = sk_rmem_alloc_get(sk);
-		minfo->idiag_wmem = sk->sk_wmem_queued;
-		minfo->idiag_fmem = sk->sk_forward_alloc;
-		minfo->idiag_tmem = sk_wmem_alloc_get(sk);
+	if ((ext & (1 << (INET_DIAG_CONG - 1))) && icsk->icsk_ca_ops) {
+		const size_t len = strlen(icsk->icsk_ca_ops->name);
+
+		strcpy(INET_DIAG_PUT(skb, INET_DIAG_CONG, len + 1),
+		       icsk->icsk_ca_ops->name);
 	}
 
 	handler->idiag_get_info(sk, r, info);
@@ -178,6 +181,7 @@
 	    icsk->icsk_ca_ops && icsk->icsk_ca_ops->get_info)
 		icsk->icsk_ca_ops->get_info(sk, ext, skb);
 
+out:
 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
 	return skb->len;
 
@@ -186,10 +190,20 @@
 	nlmsg_trim(skb, b);
 	return -EMSGSIZE;
 }
+EXPORT_SYMBOL_GPL(inet_sk_diag_fill);
+
+static int inet_csk_diag_fill(struct sock *sk,
+			      struct sk_buff *skb, struct inet_diag_req *req,
+			      u32 pid, u32 seq, u16 nlmsg_flags,
+			      const struct nlmsghdr *unlh)
+{
+	return inet_sk_diag_fill(sk, inet_csk(sk),
+			skb, req, pid, seq, nlmsg_flags, unlh);
+}
 
 static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
-			       struct sk_buff *skb, int ext, u32 pid,
-			       u32 seq, u16 nlmsg_flags,
+			       struct sk_buff *skb, struct inet_diag_req *req,
+			       u32 pid, u32 seq, u16 nlmsg_flags,
 			       const struct nlmsghdr *unlh)
 {
 	long tmo;
@@ -210,8 +224,7 @@
 	r->idiag_family	      = tw->tw_family;
 	r->idiag_retrans      = 0;
 	r->id.idiag_if	      = tw->tw_bound_dev_if;
-	r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
-	r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
+	sock_diag_save_cookie(tw, r->id.idiag_cookie);
 	r->id.idiag_sport     = tw->tw_sport;
 	r->id.idiag_dport     = tw->tw_dport;
 	r->id.idiag_src[0]    = tw->tw_rcv_saddr;
@@ -223,15 +236,13 @@
 	r->idiag_wqueue	      = 0;
 	r->idiag_uid	      = 0;
 	r->idiag_inode	      = 0;
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	if (tw->tw_family == AF_INET6) {
 		const struct inet6_timewait_sock *tw6 =
 						inet6_twsk((struct sock *)tw);
 
-		ipv6_addr_copy((struct in6_addr *)r->id.idiag_src,
-			       &tw6->tw_v6_rcv_saddr);
-		ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst,
-			       &tw6->tw_v6_daddr);
+		*(struct in6_addr *)r->id.idiag_src = tw6->tw_v6_rcv_saddr;
+		*(struct in6_addr *)r->id.idiag_dst = tw6->tw_v6_daddr;
 	}
 #endif
 	nlh->nlmsg_len = skb_tail_pointer(skb) - previous_tail;
@@ -242,42 +253,31 @@
 }
 
 static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
-			int ext, u32 pid, u32 seq, u16 nlmsg_flags,
+			struct inet_diag_req *r, u32 pid, u32 seq, u16 nlmsg_flags,
 			const struct nlmsghdr *unlh)
 {
 	if (sk->sk_state == TCP_TIME_WAIT)
 		return inet_twsk_diag_fill((struct inet_timewait_sock *)sk,
-					   skb, ext, pid, seq, nlmsg_flags,
+					   skb, r, pid, seq, nlmsg_flags,
 					   unlh);
-	return inet_csk_diag_fill(sk, skb, ext, pid, seq, nlmsg_flags, unlh);
+	return inet_csk_diag_fill(sk, skb, r, pid, seq, nlmsg_flags, unlh);
 }
 
-static int inet_diag_get_exact(struct sk_buff *in_skb,
-			       const struct nlmsghdr *nlh)
+int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_skb,
+		const struct nlmsghdr *nlh, struct inet_diag_req *req)
 {
 	int err;
 	struct sock *sk;
-	struct inet_diag_req *req = NLMSG_DATA(nlh);
 	struct sk_buff *rep;
-	struct inet_hashinfo *hashinfo;
-	const struct inet_diag_handler *handler;
 
-	handler = inet_diag_lock_handler(nlh->nlmsg_type);
-	if (IS_ERR(handler)) {
-		err = PTR_ERR(handler);
-		goto unlock;
-	}
-
-	hashinfo = handler->idiag_hashinfo;
 	err = -EINVAL;
-
-	if (req->idiag_family == AF_INET) {
+	if (req->sdiag_family == AF_INET) {
 		sk = inet_lookup(&init_net, hashinfo, req->id.idiag_dst[0],
 				 req->id.idiag_dport, req->id.idiag_src[0],
 				 req->id.idiag_sport, req->id.idiag_if);
 	}
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
-	else if (req->idiag_family == AF_INET6) {
+#if IS_ENABLED(CONFIG_IPV6)
+	else if (req->sdiag_family == AF_INET6) {
 		sk = inet6_lookup(&init_net, hashinfo,
 				  (struct in6_addr *)req->id.idiag_dst,
 				  req->id.idiag_dport,
@@ -287,29 +287,26 @@
 	}
 #endif
 	else {
-		goto unlock;
+		goto out_nosk;
 	}
 
 	err = -ENOENT;
 	if (sk == NULL)
-		goto unlock;
+		goto out_nosk;
 
-	err = -ESTALE;
-	if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
-	     req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
-	    ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
-	     (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
+	err = sock_diag_check_cookie(sk, req->id.idiag_cookie);
+	if (err)
 		goto out;
 
 	err = -ENOMEM;
 	rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
 				     sizeof(struct inet_diag_meminfo) +
-				     handler->idiag_info_size + 64)),
+				     sizeof(struct tcp_info) + 64)),
 			GFP_KERNEL);
 	if (!rep)
 		goto out;
 
-	err = sk_diag_fill(sk, rep, req->idiag_ext,
+	err = sk_diag_fill(sk, rep, req,
 			   NETLINK_CB(in_skb).pid,
 			   nlh->nlmsg_seq, 0, nlh);
 	if (err < 0) {
@@ -317,7 +314,7 @@
 		kfree_skb(rep);
 		goto out;
 	}
-	err = netlink_unicast(idiagnl, rep, NETLINK_CB(in_skb).pid,
+	err = netlink_unicast(sock_diag_nlsk, rep, NETLINK_CB(in_skb).pid,
 			      MSG_DONTWAIT);
 	if (err > 0)
 		err = 0;
@@ -329,8 +326,25 @@
 		else
 			sock_put(sk);
 	}
-unlock:
+out_nosk:
+	return err;
+}
+EXPORT_SYMBOL_GPL(inet_diag_dump_one_icsk);
+
+static int inet_diag_get_exact(struct sk_buff *in_skb,
+			       const struct nlmsghdr *nlh,
+			       struct inet_diag_req *req)
+{
+	const struct inet_diag_handler *handler;
+	int err;
+
+	handler = inet_diag_lock_handler(req->sdiag_protocol);
+	if (IS_ERR(handler))
+		err = PTR_ERR(handler);
+	else
+		err = handler->dump_one(in_skb, nlh, req);
 	inet_diag_unlock_handler(handler);
+
 	return err;
 }
 
@@ -361,9 +375,12 @@
 }
 
 
-static int inet_diag_bc_run(const void *bc, int len,
-			    const struct inet_diag_entry *entry)
+static int inet_diag_bc_run(const struct nlattr *_bc,
+		const struct inet_diag_entry *entry)
 {
+	const void *bc = nla_data(_bc);
+	int len = nla_len(_bc);
+
 	while (len > 0) {
 		int yes = 1;
 		const struct inet_diag_bc_op *op = bc;
@@ -437,6 +454,35 @@
 	return len == 0;
 }
 
+int inet_diag_bc_sk(const struct nlattr *bc, struct sock *sk)
+{
+	struct inet_diag_entry entry;
+	struct inet_sock *inet = inet_sk(sk);
+
+	if (bc == NULL)
+		return 1;
+
+	entry.family = sk->sk_family;
+#if IS_ENABLED(CONFIG_IPV6)
+	if (entry.family == AF_INET6) {
+		struct ipv6_pinfo *np = inet6_sk(sk);
+
+		entry.saddr = np->rcv_saddr.s6_addr32;
+		entry.daddr = np->daddr.s6_addr32;
+	} else
+#endif
+	{
+		entry.saddr = &inet->inet_rcv_saddr;
+		entry.daddr = &inet->inet_daddr;
+	}
+	entry.sport = inet->inet_num;
+	entry.dport = ntohs(inet->inet_dport);
+	entry.userlocks = sk->sk_userlocks;
+
+	return inet_diag_bc_run(bc, &entry);
+}
+EXPORT_SYMBOL_GPL(inet_diag_bc_sk);
+
 static int valid_cc(const void *bc, int len, int cc)
 {
 	while (len >= 0) {
@@ -493,57 +539,29 @@
 
 static int inet_csk_diag_dump(struct sock *sk,
 			      struct sk_buff *skb,
-			      struct netlink_callback *cb)
+			      struct netlink_callback *cb,
+			      struct inet_diag_req *r,
+			      const struct nlattr *bc)
 {
-	struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
+	if (!inet_diag_bc_sk(bc, sk))
+		return 0;
 
-	if (nlmsg_attrlen(cb->nlh, sizeof(*r))) {
-		struct inet_diag_entry entry;
-		const struct nlattr *bc = nlmsg_find_attr(cb->nlh,
-							  sizeof(*r),
-							  INET_DIAG_REQ_BYTECODE);
-		struct inet_sock *inet = inet_sk(sk);
-
-		entry.family = sk->sk_family;
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
-		if (entry.family == AF_INET6) {
-			struct ipv6_pinfo *np = inet6_sk(sk);
-
-			entry.saddr = np->rcv_saddr.s6_addr32;
-			entry.daddr = np->daddr.s6_addr32;
-		} else
-#endif
-		{
-			entry.saddr = &inet->inet_rcv_saddr;
-			entry.daddr = &inet->inet_daddr;
-		}
-		entry.sport = inet->inet_num;
-		entry.dport = ntohs(inet->inet_dport);
-		entry.userlocks = sk->sk_userlocks;
-
-		if (!inet_diag_bc_run(nla_data(bc), nla_len(bc), &entry))
-			return 0;
-	}
-
-	return inet_csk_diag_fill(sk, skb, r->idiag_ext,
+	return inet_csk_diag_fill(sk, skb, r,
 				  NETLINK_CB(cb->skb).pid,
 				  cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
 }
 
 static int inet_twsk_diag_dump(struct inet_timewait_sock *tw,
 			       struct sk_buff *skb,
-			       struct netlink_callback *cb)
+			       struct netlink_callback *cb,
+			       struct inet_diag_req *r,
+			       const struct nlattr *bc)
 {
-	struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
-
-	if (nlmsg_attrlen(cb->nlh, sizeof(*r))) {
+	if (bc != NULL) {
 		struct inet_diag_entry entry;
-		const struct nlattr *bc = nlmsg_find_attr(cb->nlh,
-							  sizeof(*r),
-							  INET_DIAG_REQ_BYTECODE);
 
 		entry.family = tw->tw_family;
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 		if (tw->tw_family == AF_INET6) {
 			struct inet6_timewait_sock *tw6 =
 						inet6_twsk((struct sock *)tw);
@@ -559,11 +577,11 @@
 		entry.dport = ntohs(tw->tw_dport);
 		entry.userlocks = 0;
 
-		if (!inet_diag_bc_run(nla_data(bc), nla_len(bc), &entry))
+		if (!inet_diag_bc_run(bc, &entry))
 			return 0;
 	}
 
-	return inet_twsk_diag_fill(tw, skb, r->idiag_ext,
+	return inet_twsk_diag_fill(tw, skb, r,
 				   NETLINK_CB(cb->skb).pid,
 				   cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
 }
@@ -589,8 +607,7 @@
 	r->idiag_retrans = req->retrans;
 
 	r->id.idiag_if = sk->sk_bound_dev_if;
-	r->id.idiag_cookie[0] = (u32)(unsigned long)req;
-	r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
+	sock_diag_save_cookie(req, r->id.idiag_cookie);
 
 	tmo = req->expires - jiffies;
 	if (tmo < 0)
@@ -605,12 +622,10 @@
 	r->idiag_wqueue = 0;
 	r->idiag_uid = sock_i_uid(sk);
 	r->idiag_inode = 0;
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	if (r->idiag_family == AF_INET6) {
-		ipv6_addr_copy((struct in6_addr *)r->id.idiag_src,
-			       &inet6_rsk(req)->loc_addr);
-		ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst,
-			       &inet6_rsk(req)->rmt_addr);
+		*(struct in6_addr *)r->id.idiag_src = inet6_rsk(req)->loc_addr;
+		*(struct in6_addr *)r->id.idiag_dst = inet6_rsk(req)->rmt_addr;
 	}
 #endif
 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
@@ -623,13 +638,13 @@
 }
 
 static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
-			       struct netlink_callback *cb)
+			       struct netlink_callback *cb,
+			       struct inet_diag_req *r,
+			       const struct nlattr *bc)
 {
 	struct inet_diag_entry entry;
-	struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
 	struct inet_connection_sock *icsk = inet_csk(sk);
 	struct listen_sock *lopt;
-	const struct nlattr *bc = NULL;
 	struct inet_sock *inet = inet_sk(sk);
 	int j, s_j;
 	int reqnum, s_reqnum;
@@ -649,9 +664,7 @@
 	if (!lopt || !lopt->qlen)
 		goto out;
 
-	if (nlmsg_attrlen(cb->nlh, sizeof(*r))) {
-		bc = nlmsg_find_attr(cb->nlh, sizeof(*r),
-				     INET_DIAG_REQ_BYTECODE);
+	if (bc != NULL) {
 		entry.sport = inet->inet_num;
 		entry.userlocks = sk->sk_userlocks;
 	}
@@ -671,21 +684,20 @@
 
 			if (bc) {
 				entry.saddr =
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 					(entry.family == AF_INET6) ?
 					inet6_rsk(req)->loc_addr.s6_addr32 :
 #endif
 					&ireq->loc_addr;
 				entry.daddr =
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 					(entry.family == AF_INET6) ?
 					inet6_rsk(req)->rmt_addr.s6_addr32 :
 #endif
 					&ireq->rmt_addr;
 				entry.dport = ntohs(ireq->rmt_port);
 
-				if (!inet_diag_bc_run(nla_data(bc),
-						      nla_len(bc), &entry))
+				if (!inet_diag_bc_run(bc, &entry))
 					continue;
 			}
 
@@ -708,19 +720,11 @@
 	return err;
 }
 
-static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
+void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb,
+		struct netlink_callback *cb, struct inet_diag_req *r, struct nlattr *bc)
 {
 	int i, num;
 	int s_i, s_num;
-	struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
-	const struct inet_diag_handler *handler;
-	struct inet_hashinfo *hashinfo;
-
-	handler = inet_diag_lock_handler(cb->nlh->nlmsg_type);
-	if (IS_ERR(handler))
-		goto unlock;
-
-	hashinfo = handler->idiag_hashinfo;
 
 	s_i = cb->args[1];
 	s_num = num = cb->args[2];
@@ -745,6 +749,10 @@
 					continue;
 				}
 
+				if (r->sdiag_family != AF_UNSPEC &&
+						sk->sk_family != r->sdiag_family)
+					goto next_listen;
+
 				if (r->id.idiag_sport != inet->inet_sport &&
 				    r->id.idiag_sport)
 					goto next_listen;
@@ -754,7 +762,7 @@
 				    cb->args[3] > 0)
 					goto syn_recv;
 
-				if (inet_csk_diag_dump(sk, skb, cb) < 0) {
+				if (inet_csk_diag_dump(sk, skb, cb, r, bc) < 0) {
 					spin_unlock_bh(&ilb->lock);
 					goto done;
 				}
@@ -763,7 +771,7 @@
 				if (!(r->idiag_states & TCPF_SYN_RECV))
 					goto next_listen;
 
-				if (inet_diag_dump_reqs(skb, sk, cb) < 0) {
+				if (inet_diag_dump_reqs(skb, sk, cb, r, bc) < 0) {
 					spin_unlock_bh(&ilb->lock);
 					goto done;
 				}
@@ -785,7 +793,7 @@
 	}
 
 	if (!(r->idiag_states & ~(TCPF_LISTEN | TCPF_SYN_RECV)))
-		goto unlock;
+		goto out;
 
 	for (i = s_i; i <= hashinfo->ehash_mask; i++) {
 		struct inet_ehash_bucket *head = &hashinfo->ehash[i];
@@ -810,13 +818,16 @@
 				goto next_normal;
 			if (!(r->idiag_states & (1 << sk->sk_state)))
 				goto next_normal;
+			if (r->sdiag_family != AF_UNSPEC &&
+					sk->sk_family != r->sdiag_family)
+				goto next_normal;
 			if (r->id.idiag_sport != inet->inet_sport &&
 			    r->id.idiag_sport)
 				goto next_normal;
 			if (r->id.idiag_dport != inet->inet_dport &&
 			    r->id.idiag_dport)
 				goto next_normal;
-			if (inet_csk_diag_dump(sk, skb, cb) < 0) {
+			if (inet_csk_diag_dump(sk, skb, cb, r, bc) < 0) {
 				spin_unlock_bh(lock);
 				goto done;
 			}
@@ -832,13 +843,16 @@
 
 				if (num < s_num)
 					goto next_dying;
+				if (r->sdiag_family != AF_UNSPEC &&
+						tw->tw_family != r->sdiag_family)
+					goto next_dying;
 				if (r->id.idiag_sport != tw->tw_sport &&
 				    r->id.idiag_sport)
 					goto next_dying;
 				if (r->id.idiag_dport != tw->tw_dport &&
 				    r->id.idiag_dport)
 					goto next_dying;
-				if (inet_twsk_diag_dump(tw, skb, cb) < 0) {
+				if (inet_twsk_diag_dump(tw, skb, cb, r, bc) < 0) {
 					spin_unlock_bh(lock);
 					goto done;
 				}
@@ -852,15 +866,85 @@
 done:
 	cb->args[1] = i;
 	cb->args[2] = num;
-unlock:
+out:
+	;
+}
+EXPORT_SYMBOL_GPL(inet_diag_dump_icsk);
+
+static int __inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
+		struct inet_diag_req *r, struct nlattr *bc)
+{
+	const struct inet_diag_handler *handler;
+
+	handler = inet_diag_lock_handler(r->sdiag_protocol);
+	if (!IS_ERR(handler))
+		handler->dump(skb, cb, r, bc);
 	inet_diag_unlock_handler(handler);
+
 	return skb->len;
 }
 
-static int inet_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
 {
+	struct nlattr *bc = NULL;
 	int hdrlen = sizeof(struct inet_diag_req);
 
+	if (nlmsg_attrlen(cb->nlh, hdrlen))
+		bc = nlmsg_find_attr(cb->nlh, hdrlen, INET_DIAG_REQ_BYTECODE);
+
+	return __inet_diag_dump(skb, cb, (struct inet_diag_req *)NLMSG_DATA(cb->nlh), bc);
+}
+
+static inline int inet_diag_type2proto(int type)
+{
+	switch (type) {
+	case TCPDIAG_GETSOCK:
+		return IPPROTO_TCP;
+	case DCCPDIAG_GETSOCK:
+		return IPPROTO_DCCP;
+	default:
+		return 0;
+	}
+}
+
+static int inet_diag_dump_compat(struct sk_buff *skb, struct netlink_callback *cb)
+{
+	struct inet_diag_req_compat *rc = NLMSG_DATA(cb->nlh);
+	struct inet_diag_req req;
+	struct nlattr *bc = NULL;
+	int hdrlen = sizeof(struct inet_diag_req_compat);
+
+	req.sdiag_family = AF_UNSPEC; /* compatibility */
+	req.sdiag_protocol = inet_diag_type2proto(cb->nlh->nlmsg_type);
+	req.idiag_ext = rc->idiag_ext;
+	req.idiag_states = rc->idiag_states;
+	req.id = rc->id;
+
+	if (nlmsg_attrlen(cb->nlh, hdrlen))
+		bc = nlmsg_find_attr(cb->nlh, hdrlen, INET_DIAG_REQ_BYTECODE);
+
+	return __inet_diag_dump(skb, cb, &req, bc);
+}
+
+static int inet_diag_get_exact_compat(struct sk_buff *in_skb,
+			       const struct nlmsghdr *nlh)
+{
+	struct inet_diag_req_compat *rc = NLMSG_DATA(nlh);
+	struct inet_diag_req req;
+
+	req.sdiag_family = rc->idiag_family;
+	req.sdiag_protocol = inet_diag_type2proto(nlh->nlmsg_type);
+	req.idiag_ext = rc->idiag_ext;
+	req.idiag_states = rc->idiag_states;
+	req.id = rc->id;
+
+	return inet_diag_get_exact(in_skb, nlh, &req);
+}
+
+static int inet_diag_rcv_msg_compat(struct sk_buff *skb, struct nlmsghdr *nlh)
+{
+	int hdrlen = sizeof(struct inet_diag_req_compat);
+
 	if (nlh->nlmsg_type >= INET_DIAG_GETSOCK_MAX ||
 	    nlmsg_len(nlh) < hdrlen)
 		return -EINVAL;
@@ -877,28 +961,54 @@
 				return -EINVAL;
 		}
 
-		return netlink_dump_start(idiagnl, skb, nlh,
+		return netlink_dump_start(sock_diag_nlsk, skb, nlh,
+					  inet_diag_dump_compat, NULL, 0);
+	}
+
+	return inet_diag_get_exact_compat(skb, nlh);
+}
+
+static int inet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
+{
+	int hdrlen = sizeof(struct inet_diag_req);
+
+	if (nlmsg_len(h) < hdrlen)
+		return -EINVAL;
+
+	if (h->nlmsg_flags & NLM_F_DUMP) {
+		if (nlmsg_attrlen(h, hdrlen)) {
+			struct nlattr *attr;
+			attr = nlmsg_find_attr(h, hdrlen,
+					       INET_DIAG_REQ_BYTECODE);
+			if (attr == NULL ||
+			    nla_len(attr) < sizeof(struct inet_diag_bc_op) ||
+			    inet_diag_bc_audit(nla_data(attr), nla_len(attr)))
+				return -EINVAL;
+		}
+
+		return netlink_dump_start(sock_diag_nlsk, skb, h,
 					  inet_diag_dump, NULL, 0);
 	}
 
-	return inet_diag_get_exact(skb, nlh);
+	return inet_diag_get_exact(skb, h, (struct inet_diag_req *)NLMSG_DATA(h));
 }
 
-static DEFINE_MUTEX(inet_diag_mutex);
+static struct sock_diag_handler inet_diag_handler = {
+	.family = AF_INET,
+	.dump = inet_diag_handler_dump,
+};
 
-static void inet_diag_rcv(struct sk_buff *skb)
-{
-	mutex_lock(&inet_diag_mutex);
-	netlink_rcv_skb(skb, &inet_diag_rcv_msg);
-	mutex_unlock(&inet_diag_mutex);
-}
+static struct sock_diag_handler inet6_diag_handler = {
+	.family = AF_INET6,
+	.dump = inet_diag_handler_dump,
+};
 
 int inet_diag_register(const struct inet_diag_handler *h)
 {
 	const __u16 type = h->idiag_type;
 	int err = -EINVAL;
 
-	if (type >= INET_DIAG_GETSOCK_MAX)
+	if (type >= IPPROTO_MAX)
 		goto out;
 
 	mutex_lock(&inet_diag_table_mutex);
@@ -917,7 +1027,7 @@
 {
 	const __u16 type = h->idiag_type;
 
-	if (type >= INET_DIAG_GETSOCK_MAX)
+	if (type >= IPPROTO_MAX)
 		return;
 
 	mutex_lock(&inet_diag_table_mutex);
@@ -928,7 +1038,7 @@
 
 static int __init inet_diag_init(void)
 {
-	const int inet_diag_table_size = (INET_DIAG_GETSOCK_MAX *
+	const int inet_diag_table_size = (IPPROTO_MAX *
 					  sizeof(struct inet_diag_handler *));
 	int err = -ENOMEM;
 
@@ -936,25 +1046,35 @@
 	if (!inet_diag_table)
 		goto out;
 
-	idiagnl = netlink_kernel_create(&init_net, NETLINK_INET_DIAG, 0,
-					inet_diag_rcv, NULL, THIS_MODULE);
-	if (idiagnl == NULL)
-		goto out_free_table;
-	err = 0;
+	err = sock_diag_register(&inet_diag_handler);
+	if (err)
+		goto out_free_nl;
+
+	err = sock_diag_register(&inet6_diag_handler);
+	if (err)
+		goto out_free_inet;
+
+	sock_diag_register_inet_compat(inet_diag_rcv_msg_compat);
 out:
 	return err;
-out_free_table:
+
+out_free_inet:
+	sock_diag_unregister(&inet_diag_handler);
+out_free_nl:
 	kfree(inet_diag_table);
 	goto out;
 }
 
 static void __exit inet_diag_exit(void)
 {
-	netlink_kernel_release(idiagnl);
+	sock_diag_unregister(&inet6_diag_handler);
+	sock_diag_unregister(&inet_diag_handler);
+	sock_diag_unregister_inet_compat(inet_diag_rcv_msg_compat);
 	kfree(inet_diag_table);
 }
 
 module_init(inet_diag_init);
 module_exit(inet_diag_exit);
 MODULE_LICENSE("GPL");
-MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_INET_DIAG);
+MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2 /* AF_INET */);
+MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 10 /* AF_INET6 */);
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index fdaabf2..1f23a57 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -392,7 +392,7 @@
 	/* Is this the final fragment? */
 	if ((flags & IP_MF) == 0) {
 		/* If we already have some bits beyond end
-		 * or have different end, the segment is corrrupted.
+		 * or have different end, the segment is corrupted.
 		 */
 		if (end < qp->q.len ||
 		    ((qp->q.last_in & INET_FRAG_LAST_IN) && end != qp->q.len))
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index d55110e..2b53a1f 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -46,7 +46,7 @@
 #include <net/rtnetlink.h>
 #include <net/gre.h>
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 #include <net/ipv6.h>
 #include <net/ip6_fib.h>
 #include <net/ip6_route.h>
@@ -171,7 +171,7 @@
 	unsigned long	rx_bytes;
 	unsigned long	tx_packets;
 	unsigned long	tx_bytes;
-};
+} __attribute__((aligned(4*sizeof(unsigned long))));
 
 static struct net_device_stats *ipgre_get_stats(struct net_device *dev)
 {
@@ -729,9 +729,9 @@
 			if ((dst = rt->rt_gateway) == 0)
 				goto tx_error_icmp;
 		}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 		else if (skb->protocol == htons(ETH_P_IPV6)) {
-			struct neighbour *neigh = dst_get_neighbour(skb_dst(skb));
+			struct neighbour *neigh = dst_get_neighbour_noref(skb_dst(skb));
 			const struct in6_addr *addr6;
 			int addr_type;
 
@@ -799,7 +799,7 @@
 			goto tx_error;
 		}
 	}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	else if (skb->protocol == htons(ETH_P_IPV6)) {
 		struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb);
 
@@ -835,6 +835,8 @@
 	if (skb_headroom(skb) < max_headroom || skb_shared(skb)||
 	    (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
 		struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
+		if (max_headroom > dev->needed_headroom)
+			dev->needed_headroom = max_headroom;
 		if (!new_skb) {
 			ip_rt_put(rt);
 			dev->stats.tx_dropped++;
@@ -873,7 +875,7 @@
 	if ((iph->ttl = tiph->ttl) == 0) {
 		if (skb->protocol == htons(ETH_P_IP))
 			iph->ttl = old_iph->ttl;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 		else if (skb->protocol == htons(ETH_P_IPV6))
 			iph->ttl = ((const struct ipv6hdr *)old_iph)->hop_limit;
 #endif
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 0bc95f3..ff302bd 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -206,7 +206,7 @@
 	}
 
 	rcu_read_lock();
-	neigh = dst_get_neighbour(dst);
+	neigh = dst_get_neighbour_noref(dst);
 	if (neigh) {
 		int res = neigh_output(neigh, skb);
 
@@ -319,6 +319,20 @@
 			    !(IPCB(skb)->flags & IPSKB_REROUTED));
 }
 
+/*
+ * copy saddr and daddr, possibly using 64bit load/stores
+ * Equivalent to :
+ *   iph->saddr = fl4->saddr;
+ *   iph->daddr = fl4->daddr;
+ */
+static void ip_copy_addrs(struct iphdr *iph, const struct flowi4 *fl4)
+{
+	BUILD_BUG_ON(offsetof(typeof(*fl4), daddr) !=
+		     offsetof(typeof(*fl4), saddr) + sizeof(fl4->saddr));
+	memcpy(&iph->saddr, &fl4->saddr,
+	       sizeof(fl4->saddr) + sizeof(fl4->daddr));
+}
+
 int ip_queue_xmit(struct sk_buff *skb, struct flowi *fl)
 {
 	struct sock *sk = skb->sk;
@@ -381,8 +395,8 @@
 		iph->frag_off = 0;
 	iph->ttl      = ip_select_ttl(inet, &rt->dst);
 	iph->protocol = sk->sk_protocol;
-	iph->saddr    = fl4->saddr;
-	iph->daddr    = fl4->daddr;
+	ip_copy_addrs(iph, fl4);
+
 	/* Transport layer set skb->h.foo itself. */
 
 	if (inet_opt && inet_opt->opt.optlen) {
@@ -1337,8 +1351,7 @@
 	ip_select_ident(iph, &rt->dst, sk);
 	iph->ttl = ttl;
 	iph->protocol = sk->sk_protocol;
-	iph->saddr = fl4->saddr;
-	iph->daddr = fl4->daddr;
+	ip_copy_addrs(iph, fl4);
 
 	if (opt) {
 		iph->ihl += opt->optlen>>2;
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 09ff51b..8aa87c1 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -37,7 +37,7 @@
 #include <net/route.h>
 #include <net/xfrm.h>
 #include <net/compat.h>
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 #include <net/transp_v6.h>
 #endif
 
@@ -55,20 +55,13 @@
 /*
  *	SOL_IP control messages.
  */
+#define PKTINFO_SKB_CB(__skb) ((struct in_pktinfo *)((__skb)->cb))
 
 static void ip_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
 {
-	struct in_pktinfo info;
-	struct rtable *rt = skb_rtable(skb);
+	struct in_pktinfo info = *PKTINFO_SKB_CB(skb);
 
 	info.ipi_addr.s_addr = ip_hdr(skb)->daddr;
-	if (rt) {
-		info.ipi_ifindex = rt->rt_iif;
-		info.ipi_spec_dst.s_addr = rt->rt_spec_dst;
-	} else {
-		info.ipi_ifindex = 0;
-		info.ipi_spec_dst.s_addr = 0;
-	}
 
 	put_cmsg(msg, SOL_IP, IP_PKTINFO, sizeof(info), &info);
 }
@@ -515,7 +508,7 @@
 						sock_owned_by_user(sk));
 		if (inet->is_icsk) {
 			struct inet_connection_sock *icsk = inet_csk(sk);
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 			if (sk->sk_family == PF_INET ||
 			    (!((1 << sk->sk_state) &
 			       (TCPF_LISTEN | TCPF_CLOSE)) &&
@@ -526,7 +519,7 @@
 				if (opt)
 					icsk->icsk_ext_hdr_len += opt->opt.optlen;
 				icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie);
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 			}
 #endif
 		}
@@ -992,20 +985,28 @@
 }
 
 /**
- * ip_queue_rcv_skb - Queue an skb into sock receive queue
+ * ipv4_pktinfo_prepare - transfert some info from rtable to skb
  * @sk: socket
  * @skb: buffer
  *
- * Queues an skb into socket receive queue. If IP_CMSG_PKTINFO option
- * is not set, we drop skb dst entry now, while dst cache line is hot.
+ * To support IP_CMSG_PKTINFO option, we store rt_iif and rt_spec_dst
+ * in skb->cb[] before dst drop.
+ * This way, receiver doesnt make cache line misses to read rtable.
  */
-int ip_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+void ipv4_pktinfo_prepare(struct sk_buff *skb)
 {
-	if (!(inet_sk(sk)->cmsg_flags & IP_CMSG_PKTINFO))
-		skb_dst_drop(skb);
-	return sock_queue_rcv_skb(sk, skb);
+	struct in_pktinfo *pktinfo = PKTINFO_SKB_CB(skb);
+	const struct rtable *rt = skb_rtable(skb);
+
+	if (rt) {
+		pktinfo->ipi_ifindex = rt->rt_iif;
+		pktinfo->ipi_spec_dst.s_addr = rt->rt_spec_dst;
+	} else {
+		pktinfo->ipi_ifindex = 0;
+		pktinfo->ipi_spec_dst.s_addr = 0;
+	}
+	skb_dst_drop(skb);
 }
-EXPORT_SYMBOL(ip_queue_rcv_skb);
 
 int ip_setsockopt(struct sock *sk, int level,
 		int optname, char __user *optval, unsigned int optlen)
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 0da2afc..7e4ec9f 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -253,6 +253,10 @@
 		}
 	}
 
+	/* no point in waiting if we could not bring up at least one device */
+	if (!ic_first_dev)
+		goto have_carrier;
+
 	/* wait for a carrier on at least one device */
 	start = jiffies;
 	while (jiffies - start < msecs_to_jiffies(CONF_CARRIER_TIMEOUT)) {
@@ -763,13 +767,15 @@
 	struct sk_buff *skb;
 	struct bootp_pkt *b;
 	struct iphdr *h;
+	int hlen = LL_RESERVED_SPACE(dev);
+	int tlen = dev->needed_tailroom;
 
 	/* Allocate packet */
-	skb = alloc_skb(sizeof(struct bootp_pkt) + LL_ALLOCATED_SPACE(dev) + 15,
+	skb = alloc_skb(sizeof(struct bootp_pkt) + hlen + tlen + 15,
 			GFP_KERNEL);
 	if (!skb)
 		return;
-	skb_reserve(skb, LL_RESERVED_SPACE(dev));
+	skb_reserve(skb, hlen);
 	b = (struct bootp_pkt *) skb_put(skb, sizeof(struct bootp_pkt));
 	memset(b, 0, sizeof(struct bootp_pkt));
 
@@ -822,8 +828,13 @@
 	skb->dev = dev;
 	skb->protocol = htons(ETH_P_IP);
 	if (dev_hard_header(skb, dev, ntohs(skb->protocol),
-			    dev->broadcast, dev->dev_addr, skb->len) < 0 ||
-	    dev_queue_xmit(skb) < 0)
+			    dev->broadcast, dev->dev_addr, skb->len) < 0) {
+		kfree_skb(skb);
+		printk("E");
+		return;
+	}
+
+	if (dev_queue_xmit(skb) < 0)
 		printk("E");
 }
 
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 065effd..413ed1b 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -148,7 +148,7 @@
 	unsigned long	rx_bytes;
 	unsigned long	tx_packets;
 	unsigned long	tx_bytes;
-};
+} __attribute__((aligned(4*sizeof(unsigned long))));
 
 static struct net_device_stats *ipip_get_stats(struct net_device *dev)
 {
@@ -285,6 +285,8 @@
 	if (register_netdevice(dev) < 0)
 		goto failed_free;
 
+	strcpy(nt->parms.name, dev->name);
+
 	dev_hold(dev);
 	ipip_tunnel_link(ipn, nt);
 	return nt;
@@ -759,7 +761,6 @@
 	struct ip_tunnel *tunnel = netdev_priv(dev);
 
 	tunnel->dev = dev;
-	strcpy(tunnel->parms.name, dev->name);
 
 	memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
 	memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
@@ -825,6 +826,7 @@
 static int __net_init ipip_init_net(struct net *net)
 {
 	struct ipip_net *ipn = net_generic(net, ipip_net_id);
+	struct ip_tunnel *t;
 	int err;
 
 	ipn->tunnels[0] = ipn->tunnels_wc;
@@ -848,6 +850,9 @@
 	if ((err = register_netdev(ipn->fb_tunnel_dev)))
 		goto err_reg_dev;
 
+	t = netdev_priv(ipn->fb_tunnel_dev);
+
+	strcpy(t->parms.name, ipn->fb_tunnel_dev->name);
 	return 0;
 
 err_reg_dev:
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 76a7f07..8e54490 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1520,7 +1520,6 @@
 	struct mr_table *mrt;
 	struct vif_device *v;
 	int ct;
-	LIST_HEAD(list);
 
 	if (event != NETDEV_UNREGISTER)
 		return NOTIFY_DONE;
@@ -1529,10 +1528,9 @@
 		v = &mrt->vif_table[0];
 		for (ct = 0; ct < mrt->maxvif; ct++, v++) {
 			if (v->dev == dev)
-				vif_delete(mrt, ct, 1, &list);
+				vif_delete(mrt, ct, 1, NULL);
 		}
 	}
-	unregister_netdevice_many(&list);
 	return NOTIFY_DONE;
 }
 
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index f19f218..74dfc9e 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -27,7 +27,7 @@
 
 config NF_CONNTRACK_PROC_COMPAT
 	bool "proc/sysctl compatibility with old connection tracking"
-	depends on NF_CONNTRACK_IPV4
+	depends on NF_CONNTRACK_PROCFS && NF_CONNTRACK_IPV4
 	default y
 	help
 	  This option enables /proc and sysctl compatibility with the old
@@ -76,11 +76,21 @@
 config IP_NF_MATCH_ECN
 	tristate '"ecn" match support'
 	depends on NETFILTER_ADVANCED
-	help
-	  This option adds a `ECN' match, which allows you to match against
-	  the IPv4 and TCP header ECN fields.
+	select NETFILTER_XT_MATCH_ECN
+	---help---
+	This is a backwards-compat option for the user's convenience
+	(e.g. when running oldconfig). It selects
+	CONFIG_NETFILTER_XT_MATCH_ECN.
+
+config IP_NF_MATCH_RPFILTER
+	tristate '"rpfilter" reverse path filter match support'
+	depends on NETFILTER_ADVANCED
+	---help---
+	  This option allows you to match packets whose replies would
+	  go out via the interface the packet came in.
 
 	  To compile it as a module, choose M here.  If unsure, say N.
+	  The module will be called ipt_rpfilter.
 
 config IP_NF_MATCH_TTL
 	tristate '"ttl" match support'
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index dca2082..213a462 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -49,7 +49,7 @@
 
 # matches
 obj-$(CONFIG_IP_NF_MATCH_AH) += ipt_ah.o
-obj-$(CONFIG_IP_NF_MATCH_ECN) += ipt_ecn.o
+obj-$(CONFIG_IP_NF_MATCH_RPFILTER) += ipt_rpfilter.o
 
 # targets
 obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
index e59aabd..a057fe6 100644
--- a/net/ipv4/netfilter/ip_queue.c
+++ b/net/ipv4/netfilter/ip_queue.c
@@ -404,6 +404,7 @@
 	int status, type, pid, flags;
 	unsigned int nlmsglen, skblen;
 	struct nlmsghdr *nlh;
+	bool enable_timestamp = false;
 
 	skblen = skb->len;
 	if (skblen < sizeof(*nlh))
@@ -441,12 +442,13 @@
 			RCV_SKB_FAIL(-EBUSY);
 		}
 	} else {
-		net_enable_timestamp();
+		enable_timestamp = true;
 		peer_pid = pid;
 	}
 
 	spin_unlock_bh(&queue_lock);
-
+	if (enable_timestamp)
+		net_enable_timestamp();
 	status = ipq_receive_peer(NLMSG_DATA(nlh), type,
 				  nlmsglen - NLMSG_LENGTH(0));
 	if (status < 0)
diff --git a/net/ipv4/netfilter/ipt_MASQUERADE.c b/net/ipv4/netfilter/ipt_MASQUERADE.c
index 9931152..2f210c7 100644
--- a/net/ipv4/netfilter/ipt_MASQUERADE.c
+++ b/net/ipv4/netfilter/ipt_MASQUERADE.c
@@ -30,9 +30,9 @@
 /* FIXME: Multiple targets. --RR */
 static int masquerade_tg_check(const struct xt_tgchk_param *par)
 {
-	const struct nf_nat_multi_range_compat *mr = par->targinfo;
+	const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
 
-	if (mr->range[0].flags & IP_NAT_RANGE_MAP_IPS) {
+	if (mr->range[0].flags & NF_NAT_RANGE_MAP_IPS) {
 		pr_debug("bad MAP_IPS.\n");
 		return -EINVAL;
 	}
@@ -49,8 +49,8 @@
 	struct nf_conn *ct;
 	struct nf_conn_nat *nat;
 	enum ip_conntrack_info ctinfo;
-	struct nf_nat_range newrange;
-	const struct nf_nat_multi_range_compat *mr;
+	struct nf_nat_ipv4_range newrange;
+	const struct nf_nat_ipv4_multi_range_compat *mr;
 	const struct rtable *rt;
 	__be32 newsrc;
 
@@ -79,13 +79,13 @@
 	nat->masq_index = par->out->ifindex;
 
 	/* Transfer from original range. */
-	newrange = ((struct nf_nat_range)
-		{ mr->range[0].flags | IP_NAT_RANGE_MAP_IPS,
+	newrange = ((struct nf_nat_ipv4_range)
+		{ mr->range[0].flags | NF_NAT_RANGE_MAP_IPS,
 		  newsrc, newsrc,
 		  mr->range[0].min, mr->range[0].max });
 
 	/* Hand modified range to generic setup. */
-	return nf_nat_setup_info(ct, &newrange, IP_NAT_MANIP_SRC);
+	return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_SRC);
 }
 
 static int
@@ -139,7 +139,7 @@
 	.name		= "MASQUERADE",
 	.family		= NFPROTO_IPV4,
 	.target		= masquerade_tg,
-	.targetsize	= sizeof(struct nf_nat_multi_range_compat),
+	.targetsize	= sizeof(struct nf_nat_ipv4_multi_range_compat),
 	.table		= "nat",
 	.hooks		= 1 << NF_INET_POST_ROUTING,
 	.checkentry	= masquerade_tg_check,
diff --git a/net/ipv4/netfilter/ipt_NETMAP.c b/net/ipv4/netfilter/ipt_NETMAP.c
index 6cdb298..b5bfbba 100644
--- a/net/ipv4/netfilter/ipt_NETMAP.c
+++ b/net/ipv4/netfilter/ipt_NETMAP.c
@@ -24,9 +24,9 @@
 
 static int netmap_tg_check(const struct xt_tgchk_param *par)
 {
-	const struct nf_nat_multi_range_compat *mr = par->targinfo;
+	const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
 
-	if (!(mr->range[0].flags & IP_NAT_RANGE_MAP_IPS)) {
+	if (!(mr->range[0].flags & NF_NAT_RANGE_MAP_IPS)) {
 		pr_debug("bad MAP_IPS.\n");
 		return -EINVAL;
 	}
@@ -43,8 +43,8 @@
 	struct nf_conn *ct;
 	enum ip_conntrack_info ctinfo;
 	__be32 new_ip, netmask;
-	const struct nf_nat_multi_range_compat *mr = par->targinfo;
-	struct nf_nat_range newrange;
+	const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
+	struct nf_nat_ipv4_range newrange;
 
 	NF_CT_ASSERT(par->hooknum == NF_INET_PRE_ROUTING ||
 		     par->hooknum == NF_INET_POST_ROUTING ||
@@ -61,8 +61,8 @@
 		new_ip = ip_hdr(skb)->saddr & ~netmask;
 	new_ip |= mr->range[0].min_ip & netmask;
 
-	newrange = ((struct nf_nat_range)
-		{ mr->range[0].flags | IP_NAT_RANGE_MAP_IPS,
+	newrange = ((struct nf_nat_ipv4_range)
+		{ mr->range[0].flags | NF_NAT_RANGE_MAP_IPS,
 		  new_ip, new_ip,
 		  mr->range[0].min, mr->range[0].max });
 
@@ -74,7 +74,7 @@
 	.name 		= "NETMAP",
 	.family		= NFPROTO_IPV4,
 	.target 	= netmap_tg,
-	.targetsize	= sizeof(struct nf_nat_multi_range_compat),
+	.targetsize	= sizeof(struct nf_nat_ipv4_multi_range_compat),
 	.table		= "nat",
 	.hooks		= (1 << NF_INET_PRE_ROUTING) |
 			  (1 << NF_INET_POST_ROUTING) |
diff --git a/net/ipv4/netfilter/ipt_REDIRECT.c b/net/ipv4/netfilter/ipt_REDIRECT.c
index 18a0656..7c0103a 100644
--- a/net/ipv4/netfilter/ipt_REDIRECT.c
+++ b/net/ipv4/netfilter/ipt_REDIRECT.c
@@ -28,9 +28,9 @@
 /* FIXME: Take multiple ranges --RR */
 static int redirect_tg_check(const struct xt_tgchk_param *par)
 {
-	const struct nf_nat_multi_range_compat *mr = par->targinfo;
+	const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
 
-	if (mr->range[0].flags & IP_NAT_RANGE_MAP_IPS) {
+	if (mr->range[0].flags & NF_NAT_RANGE_MAP_IPS) {
 		pr_debug("bad MAP_IPS.\n");
 		return -EINVAL;
 	}
@@ -47,8 +47,8 @@
 	struct nf_conn *ct;
 	enum ip_conntrack_info ctinfo;
 	__be32 newdst;
-	const struct nf_nat_multi_range_compat *mr = par->targinfo;
-	struct nf_nat_range newrange;
+	const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
+	struct nf_nat_ipv4_range newrange;
 
 	NF_CT_ASSERT(par->hooknum == NF_INET_PRE_ROUTING ||
 		     par->hooknum == NF_INET_LOCAL_OUT);
@@ -76,20 +76,20 @@
 	}
 
 	/* Transfer from original range. */
-	newrange = ((struct nf_nat_range)
-		{ mr->range[0].flags | IP_NAT_RANGE_MAP_IPS,
+	newrange = ((struct nf_nat_ipv4_range)
+		{ mr->range[0].flags | NF_NAT_RANGE_MAP_IPS,
 		  newdst, newdst,
 		  mr->range[0].min, mr->range[0].max });
 
 	/* Hand modified range to generic setup. */
-	return nf_nat_setup_info(ct, &newrange, IP_NAT_MANIP_DST);
+	return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_DST);
 }
 
 static struct xt_target redirect_tg_reg __read_mostly = {
 	.name		= "REDIRECT",
 	.family		= NFPROTO_IPV4,
 	.target		= redirect_tg,
-	.targetsize	= sizeof(struct nf_nat_multi_range_compat),
+	.targetsize	= sizeof(struct nf_nat_ipv4_multi_range_compat),
 	.table		= "nat",
 	.hooks		= (1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT),
 	.checkentry	= redirect_tg_check,
diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c
index b550815..ba5756d 100644
--- a/net/ipv4/netfilter/ipt_ULOG.c
+++ b/net/ipv4/netfilter/ipt_ULOG.c
@@ -65,7 +65,7 @@
 module_param(flushtimeout, uint, 0600);
 MODULE_PARM_DESC(flushtimeout, "buffer flush timeout (hundredths of a second)");
 
-static int nflog = 1;
+static bool nflog = true;
 module_param(nflog, bool, 0400);
 MODULE_PARM_DESC(nflog, "register as internal netfilter logging module");
 
diff --git a/net/ipv4/netfilter/ipt_ecn.c b/net/ipv4/netfilter/ipt_ecn.c
deleted file mode 100644
index 2b57e52..0000000
--- a/net/ipv4/netfilter/ipt_ecn.c
+++ /dev/null
@@ -1,127 +0,0 @@
-/* IP tables module for matching the value of the IPv4 and TCP ECN bits
- *
- * (C) 2002 by Harald Welte <laforge@gnumonks.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <net/ip.h>
-#include <linux/module.h>
-#include <linux/skbuff.h>
-#include <linux/tcp.h>
-
-#include <linux/netfilter/x_tables.h>
-#include <linux/netfilter_ipv4/ip_tables.h>
-#include <linux/netfilter_ipv4/ipt_ecn.h>
-
-MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
-MODULE_DESCRIPTION("Xtables: Explicit Congestion Notification (ECN) flag match for IPv4");
-MODULE_LICENSE("GPL");
-
-static inline bool match_ip(const struct sk_buff *skb,
-			    const struct ipt_ecn_info *einfo)
-{
-	return ((ip_hdr(skb)->tos & IPT_ECN_IP_MASK) == einfo->ip_ect) ^
-	       !!(einfo->invert & IPT_ECN_OP_MATCH_IP);
-}
-
-static inline bool match_tcp(const struct sk_buff *skb,
-			     const struct ipt_ecn_info *einfo,
-			     bool *hotdrop)
-{
-	struct tcphdr _tcph;
-	const struct tcphdr *th;
-
-	/* In practice, TCP match does this, so can't fail.  But let's
-	 * be good citizens.
-	 */
-	th = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_tcph), &_tcph);
-	if (th == NULL) {
-		*hotdrop = false;
-		return false;
-	}
-
-	if (einfo->operation & IPT_ECN_OP_MATCH_ECE) {
-		if (einfo->invert & IPT_ECN_OP_MATCH_ECE) {
-			if (th->ece == 1)
-				return false;
-		} else {
-			if (th->ece == 0)
-				return false;
-		}
-	}
-
-	if (einfo->operation & IPT_ECN_OP_MATCH_CWR) {
-		if (einfo->invert & IPT_ECN_OP_MATCH_CWR) {
-			if (th->cwr == 1)
-				return false;
-		} else {
-			if (th->cwr == 0)
-				return false;
-		}
-	}
-
-	return true;
-}
-
-static bool ecn_mt(const struct sk_buff *skb, struct xt_action_param *par)
-{
-	const struct ipt_ecn_info *info = par->matchinfo;
-
-	if (info->operation & IPT_ECN_OP_MATCH_IP)
-		if (!match_ip(skb, info))
-			return false;
-
-	if (info->operation & (IPT_ECN_OP_MATCH_ECE|IPT_ECN_OP_MATCH_CWR)) {
-		if (!match_tcp(skb, info, &par->hotdrop))
-			return false;
-	}
-
-	return true;
-}
-
-static int ecn_mt_check(const struct xt_mtchk_param *par)
-{
-	const struct ipt_ecn_info *info = par->matchinfo;
-	const struct ipt_ip *ip = par->entryinfo;
-
-	if (info->operation & IPT_ECN_OP_MATCH_MASK)
-		return -EINVAL;
-
-	if (info->invert & IPT_ECN_OP_MATCH_MASK)
-		return -EINVAL;
-
-	if (info->operation & (IPT_ECN_OP_MATCH_ECE|IPT_ECN_OP_MATCH_CWR) &&
-	    (ip->proto != IPPROTO_TCP || ip->invflags & IPT_INV_PROTO)) {
-		pr_info("cannot match TCP bits in rule for non-tcp packets\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static struct xt_match ecn_mt_reg __read_mostly = {
-	.name		= "ecn",
-	.family		= NFPROTO_IPV4,
-	.match		= ecn_mt,
-	.matchsize	= sizeof(struct ipt_ecn_info),
-	.checkentry	= ecn_mt_check,
-	.me		= THIS_MODULE,
-};
-
-static int __init ecn_mt_init(void)
-{
-	return xt_register_match(&ecn_mt_reg);
-}
-
-static void __exit ecn_mt_exit(void)
-{
-	xt_unregister_match(&ecn_mt_reg);
-}
-
-module_init(ecn_mt_init);
-module_exit(ecn_mt_exit);
diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c
new file mode 100644
index 0000000..31371be
--- /dev/null
+++ b/net/ipv4/netfilter/ipt_rpfilter.c
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2011 Florian Westphal <fw@strlen.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * based on fib_frontend.c; Author: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/ip.h>
+#include <net/ip.h>
+#include <net/ip_fib.h>
+#include <net/route.h>
+
+#include <linux/netfilter/xt_rpfilter.h>
+#include <linux/netfilter/x_tables.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Florian Westphal <fw@strlen.de>");
+MODULE_DESCRIPTION("iptables: ipv4 reverse path filter match");
+
+/* don't try to find route from mcast/bcast/zeronet */
+static __be32 rpfilter_get_saddr(__be32 addr)
+{
+	if (ipv4_is_multicast(addr) || ipv4_is_lbcast(addr) ||
+	    ipv4_is_zeronet(addr))
+		return 0;
+	return addr;
+}
+
+static bool rpfilter_lookup_reverse(struct flowi4 *fl4,
+				const struct net_device *dev, u8 flags)
+{
+	struct fib_result res;
+	bool dev_match;
+	struct net *net = dev_net(dev);
+	int ret __maybe_unused;
+
+	if (fib_lookup(net, fl4, &res))
+		return false;
+
+	if (res.type != RTN_UNICAST) {
+		if (res.type != RTN_LOCAL || !(flags & XT_RPFILTER_ACCEPT_LOCAL))
+			return false;
+	}
+	dev_match = false;
+#ifdef CONFIG_IP_ROUTE_MULTIPATH
+	for (ret = 0; ret < res.fi->fib_nhs; ret++) {
+		struct fib_nh *nh = &res.fi->fib_nh[ret];
+
+		if (nh->nh_dev == dev) {
+			dev_match = true;
+			break;
+		}
+	}
+#else
+	if (FIB_RES_DEV(res) == dev)
+		dev_match = true;
+#endif
+	if (dev_match || flags & XT_RPFILTER_LOOSE)
+		return FIB_RES_NH(res).nh_scope <= RT_SCOPE_HOST;
+	return dev_match;
+}
+
+static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
+{
+	const struct xt_rpfilter_info *info;
+	const struct iphdr *iph;
+	struct flowi4 flow;
+	bool invert;
+
+	info = par->matchinfo;
+	invert = info->flags & XT_RPFILTER_INVERT;
+
+	if (par->in->flags & IFF_LOOPBACK)
+		return true ^ invert;
+
+	iph = ip_hdr(skb);
+	if (ipv4_is_multicast(iph->daddr)) {
+		if (ipv4_is_zeronet(iph->saddr))
+			return ipv4_is_local_multicast(iph->daddr) ^ invert;
+		flow.flowi4_iif = 0;
+	} else {
+		flow.flowi4_iif = dev_net(par->in)->loopback_dev->ifindex;
+	}
+
+	flow.daddr = iph->saddr;
+	flow.saddr = rpfilter_get_saddr(iph->daddr);
+	flow.flowi4_oif = 0;
+	flow.flowi4_mark = info->flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0;
+	flow.flowi4_tos = RT_TOS(iph->tos);
+	flow.flowi4_scope = RT_SCOPE_UNIVERSE;
+
+	return rpfilter_lookup_reverse(&flow, par->in, info->flags) ^ invert;
+}
+
+static int rpfilter_check(const struct xt_mtchk_param *par)
+{
+	const struct xt_rpfilter_info *info = par->matchinfo;
+	unsigned int options = ~XT_RPFILTER_OPTION_MASK;
+	if (info->flags & options) {
+		pr_info("unknown options encountered");
+		return -EINVAL;
+	}
+
+	if (strcmp(par->table, "mangle") != 0 &&
+	    strcmp(par->table, "raw") != 0) {
+		pr_info("match only valid in the \'raw\' "
+			"or \'mangle\' tables, not \'%s\'.\n", par->table);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static struct xt_match rpfilter_mt_reg __read_mostly = {
+	.name		= "rpfilter",
+	.family		= NFPROTO_IPV4,
+	.checkentry	= rpfilter_check,
+	.match		= rpfilter_mt,
+	.matchsize	= sizeof(struct xt_rpfilter_info),
+	.hooks		= (1 << NF_INET_PRE_ROUTING),
+	.me		= THIS_MODULE
+};
+
+static int __init rpfilter_mt_init(void)
+{
+	return xt_register_match(&rpfilter_mt_reg);
+}
+
+static void __exit rpfilter_mt_exit(void)
+{
+	xt_unregister_match(&rpfilter_mt_reg);
+}
+
+module_init(rpfilter_mt_init);
+module_exit(rpfilter_mt_exit);
diff --git a/net/ipv4/netfilter/iptable_filter.c b/net/ipv4/netfilter/iptable_filter.c
index c37641e..0e58f09 100644
--- a/net/ipv4/netfilter/iptable_filter.c
+++ b/net/ipv4/netfilter/iptable_filter.c
@@ -52,7 +52,7 @@
 static struct nf_hook_ops *filter_ops __read_mostly;
 
 /* Default to forward because I got too much mail already. */
-static int forward = NF_ACCEPT;
+static bool forward = NF_ACCEPT;
 module_param(forward, bool, 0000);
 
 static int __net_init iptable_filter_net_init(struct net *net)
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index 447bc5c..a708933 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -30,7 +30,6 @@
 #include <net/netfilter/nf_nat_helper.h>
 #include <net/netfilter/nf_conntrack_helper.h>
 #include <net/netfilter/nf_conntrack_l3proto.h>
-#include <net/netfilter/nf_conntrack_l4proto.h>
 #include <net/netfilter/nf_conntrack_zones.h>
 
 static DEFINE_SPINLOCK(nf_nat_lock);
@@ -57,7 +56,7 @@
 	/* Original src, to ensure we map it consistently if poss. */
 	hash = jhash_3words((__force u32)tuple->src.u3.ip,
 			    (__force u32)tuple->src.u.all ^ zone,
-			    tuple->dst.protonum, 0);
+			    tuple->dst.protonum, nf_conntrack_hash_rnd);
 	return ((u64)hash * net->ipv4.nat_htable_size) >> 32;
 }
 
@@ -82,14 +81,14 @@
  * that meet the constraints of range. */
 static int
 in_range(const struct nf_conntrack_tuple *tuple,
-	 const struct nf_nat_range *range)
+	 const struct nf_nat_ipv4_range *range)
 {
 	const struct nf_nat_protocol *proto;
 	int ret = 0;
 
 	/* If we are supposed to map IPs, then we must be in the
 	   range specified, otherwise let this drag us onto a new src IP. */
-	if (range->flags & IP_NAT_RANGE_MAP_IPS) {
+	if (range->flags & NF_NAT_RANGE_MAP_IPS) {
 		if (ntohl(tuple->src.u3.ip) < ntohl(range->min_ip) ||
 		    ntohl(tuple->src.u3.ip) > ntohl(range->max_ip))
 			return 0;
@@ -97,8 +96,8 @@
 
 	rcu_read_lock();
 	proto = __nf_nat_proto_find(tuple->dst.protonum);
-	if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) ||
-	    proto->in_range(tuple, IP_NAT_MANIP_SRC,
+	if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) ||
+	    proto->in_range(tuple, NF_NAT_MANIP_SRC,
 			    &range->min, &range->max))
 		ret = 1;
 	rcu_read_unlock();
@@ -123,7 +122,7 @@
 find_appropriate_src(struct net *net, u16 zone,
 		     const struct nf_conntrack_tuple *tuple,
 		     struct nf_conntrack_tuple *result,
-		     const struct nf_nat_range *range)
+		     const struct nf_nat_ipv4_range *range)
 {
 	unsigned int h = hash_by_src(net, zone, tuple);
 	const struct nf_conn_nat *nat;
@@ -157,7 +156,7 @@
 */
 static void
 find_best_ips_proto(u16 zone, struct nf_conntrack_tuple *tuple,
-		    const struct nf_nat_range *range,
+		    const struct nf_nat_ipv4_range *range,
 		    const struct nf_conn *ct,
 		    enum nf_nat_manip_type maniptype)
 {
@@ -166,10 +165,10 @@
 	u_int32_t minip, maxip, j;
 
 	/* No IP mapping?  Do nothing. */
-	if (!(range->flags & IP_NAT_RANGE_MAP_IPS))
+	if (!(range->flags & NF_NAT_RANGE_MAP_IPS))
 		return;
 
-	if (maniptype == IP_NAT_MANIP_SRC)
+	if (maniptype == NF_NAT_MANIP_SRC)
 		var_ipp = &tuple->src.u3.ip;
 	else
 		var_ipp = &tuple->dst.u3.ip;
@@ -189,7 +188,7 @@
 	minip = ntohl(range->min_ip);
 	maxip = ntohl(range->max_ip);
 	j = jhash_2words((__force u32)tuple->src.u3.ip,
-			 range->flags & IP_NAT_RANGE_PERSISTENT ?
+			 range->flags & NF_NAT_RANGE_PERSISTENT ?
 				0 : (__force u32)tuple->dst.u3.ip ^ zone, 0);
 	j = ((u64)j * (maxip - minip + 1)) >> 32;
 	*var_ipp = htonl(minip + j);
@@ -204,7 +203,7 @@
 static void
 get_unique_tuple(struct nf_conntrack_tuple *tuple,
 		 const struct nf_conntrack_tuple *orig_tuple,
-		 const struct nf_nat_range *range,
+		 const struct nf_nat_ipv4_range *range,
 		 struct nf_conn *ct,
 		 enum nf_nat_manip_type maniptype)
 {
@@ -219,8 +218,8 @@
 	   This is only required for source (ie. NAT/masq) mappings.
 	   So far, we don't do local source mappings, so multiple
 	   manips not an issue.  */
-	if (maniptype == IP_NAT_MANIP_SRC &&
-	    !(range->flags & IP_NAT_RANGE_PROTO_RANDOM)) {
+	if (maniptype == NF_NAT_MANIP_SRC &&
+	    !(range->flags & NF_NAT_RANGE_PROTO_RANDOM)) {
 		/* try the original tuple first */
 		if (in_range(orig_tuple, range)) {
 			if (!nf_nat_used_tuple(orig_tuple, ct)) {
@@ -247,8 +246,8 @@
 	proto = __nf_nat_proto_find(orig_tuple->dst.protonum);
 
 	/* Only bother mapping if it's not already in range and unique */
-	if (!(range->flags & IP_NAT_RANGE_PROTO_RANDOM)) {
-		if (range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) {
+	if (!(range->flags & NF_NAT_RANGE_PROTO_RANDOM)) {
+		if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) {
 			if (proto->in_range(tuple, maniptype, &range->min,
 					    &range->max) &&
 			    (range->min.all == range->max.all ||
@@ -267,7 +266,7 @@
 
 unsigned int
 nf_nat_setup_info(struct nf_conn *ct,
-		  const struct nf_nat_range *range,
+		  const struct nf_nat_ipv4_range *range,
 		  enum nf_nat_manip_type maniptype)
 {
 	struct net *net = nf_ct_net(ct);
@@ -284,8 +283,8 @@
 		}
 	}
 
-	NF_CT_ASSERT(maniptype == IP_NAT_MANIP_SRC ||
-		     maniptype == IP_NAT_MANIP_DST);
+	NF_CT_ASSERT(maniptype == NF_NAT_MANIP_SRC ||
+		     maniptype == NF_NAT_MANIP_DST);
 	BUG_ON(nf_nat_initialized(ct, maniptype));
 
 	/* What we've got will look like inverse of reply. Normally
@@ -306,19 +305,19 @@
 		nf_conntrack_alter_reply(ct, &reply);
 
 		/* Non-atomic: we own this at the moment. */
-		if (maniptype == IP_NAT_MANIP_SRC)
+		if (maniptype == NF_NAT_MANIP_SRC)
 			ct->status |= IPS_SRC_NAT;
 		else
 			ct->status |= IPS_DST_NAT;
 	}
 
-	if (maniptype == IP_NAT_MANIP_SRC) {
+	if (maniptype == NF_NAT_MANIP_SRC) {
 		unsigned int srchash;
 
 		srchash = hash_by_src(net, nf_ct_zone(ct),
 				      &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
 		spin_lock_bh(&nf_nat_lock);
-		/* nf_conntrack_alter_reply might re-allocate exntension aera */
+		/* nf_conntrack_alter_reply might re-allocate extension area */
 		nat = nfct_nat(ct);
 		nat->ct = ct;
 		hlist_add_head_rcu(&nat->bysource,
@@ -327,7 +326,7 @@
 	}
 
 	/* It's done. */
-	if (maniptype == IP_NAT_MANIP_DST)
+	if (maniptype == NF_NAT_MANIP_DST)
 		ct->status |= IPS_DST_NAT_DONE;
 	else
 		ct->status |= IPS_SRC_NAT_DONE;
@@ -361,7 +360,7 @@
 
 	iph = (void *)skb->data + iphdroff;
 
-	if (maniptype == IP_NAT_MANIP_SRC) {
+	if (maniptype == NF_NAT_MANIP_SRC) {
 		csum_replace4(&iph->check, iph->saddr, target->src.u3.ip);
 		iph->saddr = target->src.u3.ip;
 	} else {
@@ -381,7 +380,7 @@
 	unsigned long statusbit;
 	enum nf_nat_manip_type mtype = HOOK2MANIP(hooknum);
 
-	if (mtype == IP_NAT_MANIP_SRC)
+	if (mtype == NF_NAT_MANIP_SRC)
 		statusbit = IPS_SRC_NAT;
 	else
 		statusbit = IPS_DST_NAT;
@@ -414,8 +413,7 @@
 		struct icmphdr icmp;
 		struct iphdr ip;
 	} *inside;
-	const struct nf_conntrack_l4proto *l4proto;
-	struct nf_conntrack_tuple inner, target;
+	struct nf_conntrack_tuple target;
 	int hdrlen = ip_hdrlen(skb);
 	enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
 	unsigned long statusbit;
@@ -447,7 +445,7 @@
 			return 0;
 	}
 
-	if (manip == IP_NAT_MANIP_SRC)
+	if (manip == NF_NAT_MANIP_SRC)
 		statusbit = IPS_SRC_NAT;
 	else
 		statusbit = IPS_DST_NAT;
@@ -463,16 +461,6 @@
 		 "dir %s\n", skb, manip,
 		 dir == IP_CT_DIR_ORIGINAL ? "ORIG" : "REPLY");
 
-	/* rcu_read_lock()ed by nf_hook_slow */
-	l4proto = __nf_ct_l4proto_find(PF_INET, inside->ip.protocol);
-
-	if (!nf_ct_get_tuple(skb, hdrlen + sizeof(struct icmphdr),
-			     (hdrlen +
-			      sizeof(struct icmphdr) + inside->ip.ihl * 4),
-			     (u_int16_t)AF_INET, inside->ip.protocol,
-			     &inner, l3proto, l4proto))
-		return 0;
-
 	/* Change inner back to look like incoming packet.  We do the
 	   opposite manip on this hook to normal, because it might not
 	   pass all hooks (locally-generated ICMP).  Consider incoming
@@ -575,26 +563,6 @@
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/nfnetlink_conntrack.h>
 
-static const struct nf_nat_protocol *
-nf_nat_proto_find_get(u_int8_t protonum)
-{
-	const struct nf_nat_protocol *p;
-
-	rcu_read_lock();
-	p = __nf_nat_proto_find(protonum);
-	if (!try_module_get(p->me))
-		p = &nf_nat_unknown_protocol;
-	rcu_read_unlock();
-
-	return p;
-}
-
-static void
-nf_nat_proto_put(const struct nf_nat_protocol *p)
-{
-	module_put(p->me);
-}
-
 static const struct nla_policy protonat_nla_policy[CTA_PROTONAT_MAX+1] = {
 	[CTA_PROTONAT_PORT_MIN]	= { .type = NLA_U16 },
 	[CTA_PROTONAT_PORT_MAX]	= { .type = NLA_U16 },
@@ -602,7 +570,7 @@
 
 static int nfnetlink_parse_nat_proto(struct nlattr *attr,
 				     const struct nf_conn *ct,
-				     struct nf_nat_range *range)
+				     struct nf_nat_ipv4_range *range)
 {
 	struct nlattr *tb[CTA_PROTONAT_MAX+1];
 	const struct nf_nat_protocol *npt;
@@ -612,21 +580,23 @@
 	if (err < 0)
 		return err;
 
-	npt = nf_nat_proto_find_get(nf_ct_protonum(ct));
+	rcu_read_lock();
+	npt = __nf_nat_proto_find(nf_ct_protonum(ct));
 	if (npt->nlattr_to_range)
 		err = npt->nlattr_to_range(tb, range);
-	nf_nat_proto_put(npt);
+	rcu_read_unlock();
 	return err;
 }
 
 static const struct nla_policy nat_nla_policy[CTA_NAT_MAX+1] = {
 	[CTA_NAT_MINIP]		= { .type = NLA_U32 },
 	[CTA_NAT_MAXIP]		= { .type = NLA_U32 },
+	[CTA_NAT_PROTO]		= { .type = NLA_NESTED },
 };
 
 static int
 nfnetlink_parse_nat(const struct nlattr *nat,
-		    const struct nf_conn *ct, struct nf_nat_range *range)
+		    const struct nf_conn *ct, struct nf_nat_ipv4_range *range)
 {
 	struct nlattr *tb[CTA_NAT_MAX+1];
 	int err;
@@ -646,7 +616,7 @@
 		range->max_ip = nla_get_be32(tb[CTA_NAT_MAXIP]);
 
 	if (range->min_ip)
-		range->flags |= IP_NAT_RANGE_MAP_IPS;
+		range->flags |= NF_NAT_RANGE_MAP_IPS;
 
 	if (!tb[CTA_NAT_PROTO])
 		return 0;
@@ -663,7 +633,7 @@
 			  enum nf_nat_manip_type manip,
 			  const struct nlattr *attr)
 {
-	struct nf_nat_range range;
+	struct nf_nat_ipv4_range range;
 
 	if (nfnetlink_parse_nat(attr, ct, &range) < 0)
 		return -EINVAL;
diff --git a/net/ipv4/netfilter/nf_nat_h323.c b/net/ipv4/netfilter/nf_nat_h323.c
index b9a1136..dc1dd91 100644
--- a/net/ipv4/netfilter/nf_nat_h323.c
+++ b/net/ipv4/netfilter/nf_nat_h323.c
@@ -398,7 +398,7 @@
 static void ip_nat_q931_expect(struct nf_conn *new,
 			       struct nf_conntrack_expect *this)
 {
-	struct nf_nat_range range;
+	struct nf_nat_ipv4_range range;
 
 	if (this->tuple.src.u3.ip != 0) {	/* Only accept calls from GK */
 		nf_nat_follow_master(new, this);
@@ -409,16 +409,16 @@
 	BUG_ON(new->status & IPS_NAT_DONE_MASK);
 
 	/* Change src to where master sends to */
-	range.flags = IP_NAT_RANGE_MAP_IPS;
+	range.flags = NF_NAT_RANGE_MAP_IPS;
 	range.min_ip = range.max_ip = new->tuplehash[!this->dir].tuple.src.u3.ip;
-	nf_nat_setup_info(new, &range, IP_NAT_MANIP_SRC);
+	nf_nat_setup_info(new, &range, NF_NAT_MANIP_SRC);
 
 	/* For DST manip, map port here to where it's expected. */
-	range.flags = (IP_NAT_RANGE_MAP_IPS | IP_NAT_RANGE_PROTO_SPECIFIED);
+	range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED);
 	range.min = range.max = this->saved_proto;
 	range.min_ip = range.max_ip =
 	    new->master->tuplehash[!this->dir].tuple.src.u3.ip;
-	nf_nat_setup_info(new, &range, IP_NAT_MANIP_DST);
+	nf_nat_setup_info(new, &range, NF_NAT_MANIP_DST);
 }
 
 /****************************************************************************/
@@ -496,21 +496,21 @@
 static void ip_nat_callforwarding_expect(struct nf_conn *new,
 					 struct nf_conntrack_expect *this)
 {
-	struct nf_nat_range range;
+	struct nf_nat_ipv4_range range;
 
 	/* This must be a fresh one. */
 	BUG_ON(new->status & IPS_NAT_DONE_MASK);
 
 	/* Change src to where master sends to */
-	range.flags = IP_NAT_RANGE_MAP_IPS;
+	range.flags = NF_NAT_RANGE_MAP_IPS;
 	range.min_ip = range.max_ip = new->tuplehash[!this->dir].tuple.src.u3.ip;
-	nf_nat_setup_info(new, &range, IP_NAT_MANIP_SRC);
+	nf_nat_setup_info(new, &range, NF_NAT_MANIP_SRC);
 
 	/* For DST manip, map port here to where it's expected. */
-	range.flags = (IP_NAT_RANGE_MAP_IPS | IP_NAT_RANGE_PROTO_SPECIFIED);
+	range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED);
 	range.min = range.max = this->saved_proto;
 	range.min_ip = range.max_ip = this->saved_ip;
-	nf_nat_setup_info(new, &range, IP_NAT_MANIP_DST);
+	nf_nat_setup_info(new, &range, NF_NAT_MANIP_DST);
 }
 
 /****************************************************************************/
diff --git a/net/ipv4/netfilter/nf_nat_helper.c b/net/ipv4/netfilter/nf_nat_helper.c
index ebc5f88..af65958 100644
--- a/net/ipv4/netfilter/nf_nat_helper.c
+++ b/net/ipv4/netfilter/nf_nat_helper.c
@@ -253,12 +253,6 @@
 	struct udphdr *udph;
 	int datalen, oldlen;
 
-	/* UDP helpers might accidentally mangle the wrong packet */
-	iph = ip_hdr(skb);
-	if (skb->len < iph->ihl*4 + sizeof(*udph) +
-			       match_offset + match_len)
-		return 0;
-
 	if (!skb_make_writable(skb, skb->len))
 		return 0;
 
@@ -430,22 +424,22 @@
 void nf_nat_follow_master(struct nf_conn *ct,
 			  struct nf_conntrack_expect *exp)
 {
-	struct nf_nat_range range;
+	struct nf_nat_ipv4_range range;
 
 	/* This must be a fresh one. */
 	BUG_ON(ct->status & IPS_NAT_DONE_MASK);
 
 	/* Change src to where master sends to */
-	range.flags = IP_NAT_RANGE_MAP_IPS;
+	range.flags = NF_NAT_RANGE_MAP_IPS;
 	range.min_ip = range.max_ip
 		= ct->master->tuplehash[!exp->dir].tuple.dst.u3.ip;
-	nf_nat_setup_info(ct, &range, IP_NAT_MANIP_SRC);
+	nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC);
 
 	/* For DST manip, map port here to where it's expected. */
-	range.flags = (IP_NAT_RANGE_MAP_IPS | IP_NAT_RANGE_PROTO_SPECIFIED);
+	range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED);
 	range.min = range.max = exp->saved_proto;
 	range.min_ip = range.max_ip
 		= ct->master->tuplehash[!exp->dir].tuple.src.u3.ip;
-	nf_nat_setup_info(ct, &range, IP_NAT_MANIP_DST);
+	nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST);
 }
 EXPORT_SYMBOL(nf_nat_follow_master);
diff --git a/net/ipv4/netfilter/nf_nat_pptp.c b/net/ipv4/netfilter/nf_nat_pptp.c
index 3e8284b..c273d58 100644
--- a/net/ipv4/netfilter/nf_nat_pptp.c
+++ b/net/ipv4/netfilter/nf_nat_pptp.c
@@ -47,7 +47,7 @@
 	struct nf_conntrack_tuple t;
 	const struct nf_ct_pptp_master *ct_pptp_info;
 	const struct nf_nat_pptp *nat_pptp_info;
-	struct nf_nat_range range;
+	struct nf_nat_ipv4_range range;
 
 	ct_pptp_info = &nfct_help(master)->help.ct_pptp_info;
 	nat_pptp_info = &nfct_nat(master)->help.nat_pptp_info;
@@ -88,24 +88,24 @@
 	BUG_ON(ct->status & IPS_NAT_DONE_MASK);
 
 	/* Change src to where master sends to */
-	range.flags = IP_NAT_RANGE_MAP_IPS;
+	range.flags = NF_NAT_RANGE_MAP_IPS;
 	range.min_ip = range.max_ip
 		= ct->master->tuplehash[!exp->dir].tuple.dst.u3.ip;
 	if (exp->dir == IP_CT_DIR_ORIGINAL) {
-		range.flags |= IP_NAT_RANGE_PROTO_SPECIFIED;
+		range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
 		range.min = range.max = exp->saved_proto;
 	}
-	nf_nat_setup_info(ct, &range, IP_NAT_MANIP_SRC);
+	nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC);
 
 	/* For DST manip, map port here to where it's expected. */
-	range.flags = IP_NAT_RANGE_MAP_IPS;
+	range.flags = NF_NAT_RANGE_MAP_IPS;
 	range.min_ip = range.max_ip
 		= ct->master->tuplehash[!exp->dir].tuple.src.u3.ip;
 	if (exp->dir == IP_CT_DIR_REPLY) {
-		range.flags |= IP_NAT_RANGE_PROTO_SPECIFIED;
+		range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
 		range.min = range.max = exp->saved_proto;
 	}
-	nf_nat_setup_info(ct, &range, IP_NAT_MANIP_DST);
+	nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST);
 }
 
 /* outbound packets == from PNS to PAC */
diff --git a/net/ipv4/netfilter/nf_nat_proto_common.c b/net/ipv4/netfilter/nf_nat_proto_common.c
index a3d9976..9993bc9 100644
--- a/net/ipv4/netfilter/nf_nat_proto_common.c
+++ b/net/ipv4/netfilter/nf_nat_proto_common.c
@@ -26,7 +26,7 @@
 {
 	__be16 port;
 
-	if (maniptype == IP_NAT_MANIP_SRC)
+	if (maniptype == NF_NAT_MANIP_SRC)
 		port = tuple->src.u.all;
 	else
 		port = tuple->dst.u.all;
@@ -37,7 +37,7 @@
 EXPORT_SYMBOL_GPL(nf_nat_proto_in_range);
 
 void nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple,
-			       const struct nf_nat_range *range,
+			       const struct nf_nat_ipv4_range *range,
 			       enum nf_nat_manip_type maniptype,
 			       const struct nf_conn *ct,
 			       u_int16_t *rover)
@@ -46,15 +46,15 @@
 	__be16 *portptr;
 	u_int16_t off;
 
-	if (maniptype == IP_NAT_MANIP_SRC)
+	if (maniptype == NF_NAT_MANIP_SRC)
 		portptr = &tuple->src.u.all;
 	else
 		portptr = &tuple->dst.u.all;
 
 	/* If no range specified... */
-	if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)) {
+	if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) {
 		/* If it's dst rewrite, can't change port */
-		if (maniptype == IP_NAT_MANIP_DST)
+		if (maniptype == NF_NAT_MANIP_DST)
 			return;
 
 		if (ntohs(*portptr) < 1024) {
@@ -75,9 +75,9 @@
 		range_size = ntohs(range->max.all) - min + 1;
 	}
 
-	if (range->flags & IP_NAT_RANGE_PROTO_RANDOM)
+	if (range->flags & NF_NAT_RANGE_PROTO_RANDOM)
 		off = secure_ipv4_port_ephemeral(tuple->src.u3.ip, tuple->dst.u3.ip,
-						 maniptype == IP_NAT_MANIP_SRC
+						 maniptype == NF_NAT_MANIP_SRC
 						 ? tuple->dst.u.all
 						 : tuple->src.u.all);
 	else
@@ -87,7 +87,7 @@
 		*portptr = htons(min + off % range_size);
 		if (++i != range_size && nf_nat_used_tuple(tuple, ct))
 			continue;
-		if (!(range->flags & IP_NAT_RANGE_PROTO_RANDOM))
+		if (!(range->flags & NF_NAT_RANGE_PROTO_RANDOM))
 			*rover = off;
 		return;
 	}
@@ -96,31 +96,19 @@
 EXPORT_SYMBOL_GPL(nf_nat_proto_unique_tuple);
 
 #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
-int nf_nat_proto_range_to_nlattr(struct sk_buff *skb,
-				 const struct nf_nat_range *range)
-{
-	NLA_PUT_BE16(skb, CTA_PROTONAT_PORT_MIN, range->min.all);
-	NLA_PUT_BE16(skb, CTA_PROTONAT_PORT_MAX, range->max.all);
-	return 0;
-
-nla_put_failure:
-	return -1;
-}
-EXPORT_SYMBOL_GPL(nf_nat_proto_nlattr_to_range);
-
 int nf_nat_proto_nlattr_to_range(struct nlattr *tb[],
-				 struct nf_nat_range *range)
+				 struct nf_nat_ipv4_range *range)
 {
 	if (tb[CTA_PROTONAT_PORT_MIN]) {
 		range->min.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MIN]);
 		range->max.all = range->min.tcp.port;
-		range->flags |= IP_NAT_RANGE_PROTO_SPECIFIED;
+		range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
 	}
 	if (tb[CTA_PROTONAT_PORT_MAX]) {
 		range->max.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MAX]);
-		range->flags |= IP_NAT_RANGE_PROTO_SPECIFIED;
+		range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
 	}
 	return 0;
 }
-EXPORT_SYMBOL_GPL(nf_nat_proto_range_to_nlattr);
+EXPORT_SYMBOL_GPL(nf_nat_proto_nlattr_to_range);
 #endif
diff --git a/net/ipv4/netfilter/nf_nat_proto_dccp.c b/net/ipv4/netfilter/nf_nat_proto_dccp.c
index 570faf2..3f67138 100644
--- a/net/ipv4/netfilter/nf_nat_proto_dccp.c
+++ b/net/ipv4/netfilter/nf_nat_proto_dccp.c
@@ -24,7 +24,7 @@
 
 static void
 dccp_unique_tuple(struct nf_conntrack_tuple *tuple,
-		  const struct nf_nat_range *range,
+		  const struct nf_nat_ipv4_range *range,
 		  enum nf_nat_manip_type maniptype,
 		  const struct nf_conn *ct)
 {
@@ -54,7 +54,7 @@
 	iph = (struct iphdr *)(skb->data + iphdroff);
 	hdr = (struct dccp_hdr *)(skb->data + hdroff);
 
-	if (maniptype == IP_NAT_MANIP_SRC) {
+	if (maniptype == NF_NAT_MANIP_SRC) {
 		oldip = iph->saddr;
 		newip = tuple->src.u3.ip;
 		newport = tuple->src.u.dccp.port;
@@ -80,12 +80,10 @@
 
 static const struct nf_nat_protocol nf_nat_protocol_dccp = {
 	.protonum		= IPPROTO_DCCP,
-	.me			= THIS_MODULE,
 	.manip_pkt		= dccp_manip_pkt,
 	.in_range		= nf_nat_proto_in_range,
 	.unique_tuple		= dccp_unique_tuple,
 #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
-	.range_to_nlattr	= nf_nat_proto_range_to_nlattr,
 	.nlattr_to_range	= nf_nat_proto_nlattr_to_range,
 #endif
 };
diff --git a/net/ipv4/netfilter/nf_nat_proto_gre.c b/net/ipv4/netfilter/nf_nat_proto_gre.c
index bc8d83a..46ba0b9 100644
--- a/net/ipv4/netfilter/nf_nat_proto_gre.c
+++ b/net/ipv4/netfilter/nf_nat_proto_gre.c
@@ -39,7 +39,7 @@
 /* generate unique tuple ... */
 static void
 gre_unique_tuple(struct nf_conntrack_tuple *tuple,
-		 const struct nf_nat_range *range,
+		 const struct nf_nat_ipv4_range *range,
 		 enum nf_nat_manip_type maniptype,
 		 const struct nf_conn *ct)
 {
@@ -52,12 +52,12 @@
 	if (!ct->master)
 		return;
 
-	if (maniptype == IP_NAT_MANIP_SRC)
+	if (maniptype == NF_NAT_MANIP_SRC)
 		keyptr = &tuple->src.u.gre.key;
 	else
 		keyptr = &tuple->dst.u.gre.key;
 
-	if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)) {
+	if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) {
 		pr_debug("%p: NATing GRE PPTP\n", ct);
 		min = 1;
 		range_size = 0xffff;
@@ -99,7 +99,7 @@
 
 	/* we only have destination manip of a packet, since 'source key'
 	 * is not present in the packet itself */
-	if (maniptype != IP_NAT_MANIP_DST)
+	if (maniptype != NF_NAT_MANIP_DST)
 		return true;
 	switch (greh->version) {
 	case GRE_VERSION_1701:
@@ -119,12 +119,10 @@
 
 static const struct nf_nat_protocol gre = {
 	.protonum		= IPPROTO_GRE,
-	.me			= THIS_MODULE,
 	.manip_pkt		= gre_manip_pkt,
 	.in_range		= nf_nat_proto_in_range,
 	.unique_tuple		= gre_unique_tuple,
 #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
-	.range_to_nlattr	= nf_nat_proto_range_to_nlattr,
 	.nlattr_to_range	= nf_nat_proto_nlattr_to_range,
 #endif
 };
diff --git a/net/ipv4/netfilter/nf_nat_proto_icmp.c b/net/ipv4/netfilter/nf_nat_proto_icmp.c
index 9f4dc12..b351728 100644
--- a/net/ipv4/netfilter/nf_nat_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_nat_proto_icmp.c
@@ -30,7 +30,7 @@
 
 static void
 icmp_unique_tuple(struct nf_conntrack_tuple *tuple,
-		  const struct nf_nat_range *range,
+		  const struct nf_nat_ipv4_range *range,
 		  enum nf_nat_manip_type maniptype,
 		  const struct nf_conn *ct)
 {
@@ -40,7 +40,7 @@
 
 	range_size = ntohs(range->max.icmp.id) - ntohs(range->min.icmp.id) + 1;
 	/* If no range specified... */
-	if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED))
+	if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED))
 		range_size = 0xFFFF;
 
 	for (i = 0; ; ++id) {
@@ -74,12 +74,10 @@
 
 const struct nf_nat_protocol nf_nat_protocol_icmp = {
 	.protonum		= IPPROTO_ICMP,
-	.me			= THIS_MODULE,
 	.manip_pkt		= icmp_manip_pkt,
 	.in_range		= icmp_in_range,
 	.unique_tuple		= icmp_unique_tuple,
 #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
-	.range_to_nlattr	= nf_nat_proto_range_to_nlattr,
 	.nlattr_to_range	= nf_nat_proto_nlattr_to_range,
 #endif
 };
diff --git a/net/ipv4/netfilter/nf_nat_proto_sctp.c b/net/ipv4/netfilter/nf_nat_proto_sctp.c
index bd5a80a..3cce9b6 100644
--- a/net/ipv4/netfilter/nf_nat_proto_sctp.c
+++ b/net/ipv4/netfilter/nf_nat_proto_sctp.c
@@ -19,7 +19,7 @@
 
 static void
 sctp_unique_tuple(struct nf_conntrack_tuple *tuple,
-		  const struct nf_nat_range *range,
+		  const struct nf_nat_ipv4_range *range,
 		  enum nf_nat_manip_type maniptype,
 		  const struct nf_conn *ct)
 {
@@ -46,7 +46,7 @@
 	iph = (struct iphdr *)(skb->data + iphdroff);
 	hdr = (struct sctphdr *)(skb->data + hdroff);
 
-	if (maniptype == IP_NAT_MANIP_SRC) {
+	if (maniptype == NF_NAT_MANIP_SRC) {
 		/* Get rid of src ip and src pt */
 		oldip = iph->saddr;
 		newip = tuple->src.u3.ip;
@@ -70,12 +70,10 @@
 
 static const struct nf_nat_protocol nf_nat_protocol_sctp = {
 	.protonum		= IPPROTO_SCTP,
-	.me			= THIS_MODULE,
 	.manip_pkt		= sctp_manip_pkt,
 	.in_range		= nf_nat_proto_in_range,
 	.unique_tuple		= sctp_unique_tuple,
 #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
-	.range_to_nlattr	= nf_nat_proto_range_to_nlattr,
 	.nlattr_to_range	= nf_nat_proto_nlattr_to_range,
 #endif
 };
diff --git a/net/ipv4/netfilter/nf_nat_proto_tcp.c b/net/ipv4/netfilter/nf_nat_proto_tcp.c
index 0d67bb8..9fb4b4e 100644
--- a/net/ipv4/netfilter/nf_nat_proto_tcp.c
+++ b/net/ipv4/netfilter/nf_nat_proto_tcp.c
@@ -23,7 +23,7 @@
 
 static void
 tcp_unique_tuple(struct nf_conntrack_tuple *tuple,
-		 const struct nf_nat_range *range,
+		 const struct nf_nat_ipv4_range *range,
 		 enum nf_nat_manip_type maniptype,
 		 const struct nf_conn *ct)
 {
@@ -55,7 +55,7 @@
 	iph = (struct iphdr *)(skb->data + iphdroff);
 	hdr = (struct tcphdr *)(skb->data + hdroff);
 
-	if (maniptype == IP_NAT_MANIP_SRC) {
+	if (maniptype == NF_NAT_MANIP_SRC) {
 		/* Get rid of src ip and src pt */
 		oldip = iph->saddr;
 		newip = tuple->src.u3.ip;
@@ -82,12 +82,10 @@
 
 const struct nf_nat_protocol nf_nat_protocol_tcp = {
 	.protonum		= IPPROTO_TCP,
-	.me			= THIS_MODULE,
 	.manip_pkt		= tcp_manip_pkt,
 	.in_range		= nf_nat_proto_in_range,
 	.unique_tuple		= tcp_unique_tuple,
 #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
-	.range_to_nlattr	= nf_nat_proto_range_to_nlattr,
 	.nlattr_to_range	= nf_nat_proto_nlattr_to_range,
 #endif
 };
diff --git a/net/ipv4/netfilter/nf_nat_proto_udp.c b/net/ipv4/netfilter/nf_nat_proto_udp.c
index 0b1b860..9883336 100644
--- a/net/ipv4/netfilter/nf_nat_proto_udp.c
+++ b/net/ipv4/netfilter/nf_nat_proto_udp.c
@@ -22,7 +22,7 @@
 
 static void
 udp_unique_tuple(struct nf_conntrack_tuple *tuple,
-		 const struct nf_nat_range *range,
+		 const struct nf_nat_ipv4_range *range,
 		 enum nf_nat_manip_type maniptype,
 		 const struct nf_conn *ct)
 {
@@ -47,7 +47,7 @@
 	iph = (struct iphdr *)(skb->data + iphdroff);
 	hdr = (struct udphdr *)(skb->data + hdroff);
 
-	if (maniptype == IP_NAT_MANIP_SRC) {
+	if (maniptype == NF_NAT_MANIP_SRC) {
 		/* Get rid of src ip and src pt */
 		oldip = iph->saddr;
 		newip = tuple->src.u3.ip;
@@ -73,12 +73,10 @@
 
 const struct nf_nat_protocol nf_nat_protocol_udp = {
 	.protonum		= IPPROTO_UDP,
-	.me			= THIS_MODULE,
 	.manip_pkt		= udp_manip_pkt,
 	.in_range		= nf_nat_proto_in_range,
 	.unique_tuple		= udp_unique_tuple,
 #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
-	.range_to_nlattr	= nf_nat_proto_range_to_nlattr,
 	.nlattr_to_range	= nf_nat_proto_nlattr_to_range,
 #endif
 };
diff --git a/net/ipv4/netfilter/nf_nat_proto_udplite.c b/net/ipv4/netfilter/nf_nat_proto_udplite.c
index f83ef23..d24d10a 100644
--- a/net/ipv4/netfilter/nf_nat_proto_udplite.c
+++ b/net/ipv4/netfilter/nf_nat_proto_udplite.c
@@ -21,7 +21,7 @@
 
 static void
 udplite_unique_tuple(struct nf_conntrack_tuple *tuple,
-		     const struct nf_nat_range *range,
+		     const struct nf_nat_ipv4_range *range,
 		     enum nf_nat_manip_type maniptype,
 		     const struct nf_conn *ct)
 {
@@ -47,7 +47,7 @@
 	iph = (struct iphdr *)(skb->data + iphdroff);
 	hdr = (struct udphdr *)(skb->data + hdroff);
 
-	if (maniptype == IP_NAT_MANIP_SRC) {
+	if (maniptype == NF_NAT_MANIP_SRC) {
 		/* Get rid of src ip and src pt */
 		oldip = iph->saddr;
 		newip = tuple->src.u3.ip;
@@ -72,12 +72,10 @@
 
 static const struct nf_nat_protocol nf_nat_protocol_udplite = {
 	.protonum		= IPPROTO_UDPLITE,
-	.me			= THIS_MODULE,
 	.manip_pkt		= udplite_manip_pkt,
 	.in_range		= nf_nat_proto_in_range,
 	.unique_tuple		= udplite_unique_tuple,
 #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
-	.range_to_nlattr	= nf_nat_proto_range_to_nlattr,
 	.nlattr_to_range	= nf_nat_proto_nlattr_to_range,
 #endif
 };
diff --git a/net/ipv4/netfilter/nf_nat_proto_unknown.c b/net/ipv4/netfilter/nf_nat_proto_unknown.c
index a50f2bc..e0afe81 100644
--- a/net/ipv4/netfilter/nf_nat_proto_unknown.c
+++ b/net/ipv4/netfilter/nf_nat_proto_unknown.c
@@ -27,7 +27,7 @@
 }
 
 static void unknown_unique_tuple(struct nf_conntrack_tuple *tuple,
-				 const struct nf_nat_range *range,
+				 const struct nf_nat_ipv4_range *range,
 				 enum nf_nat_manip_type maniptype,
 				 const struct nf_conn *ct)
 {
@@ -46,7 +46,6 @@
 }
 
 const struct nf_nat_protocol nf_nat_unknown_protocol = {
-	/* .me isn't set: getting a ref to this cannot fail. */
 	.manip_pkt		= unknown_manip_pkt,
 	.in_range		= unknown_in_range,
 	.unique_tuple		= unknown_unique_tuple,
diff --git a/net/ipv4/netfilter/nf_nat_rule.c b/net/ipv4/netfilter/nf_nat_rule.c
index 733c9ab..d2a9dc31 100644
--- a/net/ipv4/netfilter/nf_nat_rule.c
+++ b/net/ipv4/netfilter/nf_nat_rule.c
@@ -44,7 +44,7 @@
 {
 	struct nf_conn *ct;
 	enum ip_conntrack_info ctinfo;
-	const struct nf_nat_multi_range_compat *mr = par->targinfo;
+	const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
 
 	NF_CT_ASSERT(par->hooknum == NF_INET_POST_ROUTING ||
 		     par->hooknum == NF_INET_LOCAL_IN);
@@ -56,7 +56,7 @@
 			    ctinfo == IP_CT_RELATED_REPLY));
 	NF_CT_ASSERT(par->out != NULL);
 
-	return nf_nat_setup_info(ct, &mr->range[0], IP_NAT_MANIP_SRC);
+	return nf_nat_setup_info(ct, &mr->range[0], NF_NAT_MANIP_SRC);
 }
 
 static unsigned int
@@ -64,7 +64,7 @@
 {
 	struct nf_conn *ct;
 	enum ip_conntrack_info ctinfo;
-	const struct nf_nat_multi_range_compat *mr = par->targinfo;
+	const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
 
 	NF_CT_ASSERT(par->hooknum == NF_INET_PRE_ROUTING ||
 		     par->hooknum == NF_INET_LOCAL_OUT);
@@ -74,12 +74,12 @@
 	/* Connection must be valid and new. */
 	NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED));
 
-	return nf_nat_setup_info(ct, &mr->range[0], IP_NAT_MANIP_DST);
+	return nf_nat_setup_info(ct, &mr->range[0], NF_NAT_MANIP_DST);
 }
 
 static int ipt_snat_checkentry(const struct xt_tgchk_param *par)
 {
-	const struct nf_nat_multi_range_compat *mr = par->targinfo;
+	const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
 
 	/* Must be a valid range */
 	if (mr->rangesize != 1) {
@@ -91,7 +91,7 @@
 
 static int ipt_dnat_checkentry(const struct xt_tgchk_param *par)
 {
-	const struct nf_nat_multi_range_compat *mr = par->targinfo;
+	const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
 
 	/* Must be a valid range */
 	if (mr->rangesize != 1) {
@@ -105,13 +105,13 @@
 alloc_null_binding(struct nf_conn *ct, unsigned int hooknum)
 {
 	/* Force range to this IP; let proto decide mapping for
-	   per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED).
+	   per-proto parts (hence not NF_NAT_RANGE_PROTO_SPECIFIED).
 	*/
-	struct nf_nat_range range;
+	struct nf_nat_ipv4_range range;
 
 	range.flags = 0;
 	pr_debug("Allocating NULL binding for %p (%pI4)\n", ct,
-		 HOOK2MANIP(hooknum) == IP_NAT_MANIP_SRC ?
+		 HOOK2MANIP(hooknum) == NF_NAT_MANIP_SRC ?
 		 &ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip :
 		 &ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip);
 
@@ -140,7 +140,7 @@
 static struct xt_target ipt_snat_reg __read_mostly = {
 	.name		= "SNAT",
 	.target		= ipt_snat_target,
-	.targetsize	= sizeof(struct nf_nat_multi_range_compat),
+	.targetsize	= sizeof(struct nf_nat_ipv4_multi_range_compat),
 	.table		= "nat",
 	.hooks		= (1 << NF_INET_POST_ROUTING) | (1 << NF_INET_LOCAL_IN),
 	.checkentry	= ipt_snat_checkentry,
@@ -150,7 +150,7 @@
 static struct xt_target ipt_dnat_reg __read_mostly = {
 	.name		= "DNAT",
 	.target		= ipt_dnat_target,
-	.targetsize	= sizeof(struct nf_nat_multi_range_compat),
+	.targetsize	= sizeof(struct nf_nat_ipv4_multi_range_compat),
 	.table		= "nat",
 	.hooks		= (1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT),
 	.checkentry	= ipt_dnat_checkentry,
diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/ipv4/netfilter/nf_nat_sip.c
index 78844d9..d0319f9 100644
--- a/net/ipv4/netfilter/nf_nat_sip.c
+++ b/net/ipv4/netfilter/nf_nat_sip.c
@@ -249,25 +249,25 @@
 static void ip_nat_sip_expected(struct nf_conn *ct,
 				struct nf_conntrack_expect *exp)
 {
-	struct nf_nat_range range;
+	struct nf_nat_ipv4_range range;
 
 	/* This must be a fresh one. */
 	BUG_ON(ct->status & IPS_NAT_DONE_MASK);
 
 	/* For DST manip, map port here to where it's expected. */
-	range.flags = (IP_NAT_RANGE_MAP_IPS | IP_NAT_RANGE_PROTO_SPECIFIED);
+	range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED);
 	range.min = range.max = exp->saved_proto;
 	range.min_ip = range.max_ip = exp->saved_ip;
-	nf_nat_setup_info(ct, &range, IP_NAT_MANIP_DST);
+	nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST);
 
 	/* Change src to where master sends to, but only if the connection
 	 * actually came from the same source. */
 	if (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip ==
 	    ct->master->tuplehash[exp->dir].tuple.src.u3.ip) {
-		range.flags = IP_NAT_RANGE_MAP_IPS;
+		range.flags = NF_NAT_RANGE_MAP_IPS;
 		range.min_ip = range.max_ip
 			= ct->master->tuplehash[!exp->dir].tuple.dst.u3.ip;
-		nf_nat_setup_info(ct, &range, IP_NAT_MANIP_SRC);
+		nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC);
 	}
 }
 
diff --git a/net/ipv4/netfilter/nf_nat_standalone.c b/net/ipv4/netfilter/nf_nat_standalone.c
index 9290048..3828a42 100644
--- a/net/ipv4/netfilter/nf_nat_standalone.c
+++ b/net/ipv4/netfilter/nf_nat_standalone.c
@@ -137,7 +137,7 @@
 				return ret;
 		} else
 			pr_debug("Already setup manip %s for ct %p\n",
-				 maniptype == IP_NAT_MANIP_SRC ? "SRC" : "DST",
+				 maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST",
 				 ct);
 		break;
 
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 466ea8b..3569d8e 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -56,17 +56,17 @@
 
 	local_bh_disable();
 	orphans = percpu_counter_sum_positive(&tcp_orphan_count);
-	sockets = percpu_counter_sum_positive(&tcp_sockets_allocated);
+	sockets = proto_sockets_allocated_sum_positive(&tcp_prot);
 	local_bh_enable();
 
 	socket_seq_show(seq);
 	seq_printf(seq, "TCP: inuse %d orphan %d tw %d alloc %d mem %ld\n",
 		   sock_prot_inuse_get(net, &tcp_prot), orphans,
 		   tcp_death_row.tw_count, sockets,
-		   atomic_long_read(&tcp_memory_allocated));
+		   proto_memory_allocated(&tcp_prot));
 	seq_printf(seq, "UDP: inuse %d mem %ld\n",
 		   sock_prot_inuse_get(net, &udp_prot),
-		   atomic_long_read(&udp_memory_allocated));
+		   proto_memory_allocated(&udp_prot));
 	seq_printf(seq, "UDPLITE: inuse %d\n",
 		   sock_prot_inuse_get(net, &udplite_prot));
 	seq_printf(seq, "RAW: inuse %d\n",
@@ -288,7 +288,7 @@
 
 	count = 0;
 	for (i = 0; i < ICMPMSG_MIB_MAX; i++) {
-		val = snmp_fold_field((void __percpu **) net->mib.icmpmsg_statistics, i);
+		val = atomic_long_read(&net->mib.icmpmsg_statistics->mibs[i]);
 		if (val) {
 			type[count] = i;
 			vals[count++] = val;
@@ -307,6 +307,7 @@
 {
 	int i;
 	struct net *net = seq->private;
+	atomic_long_t *ptr = net->mib.icmpmsg_statistics->mibs;
 
 	seq_puts(seq, "\nIcmp: InMsgs InErrors");
 	for (i=0; icmpmibmap[i].name != NULL; i++)
@@ -319,15 +320,13 @@
 		snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_INERRORS));
 	for (i=0; icmpmibmap[i].name != NULL; i++)
 		seq_printf(seq, " %lu",
-			snmp_fold_field((void __percpu **) net->mib.icmpmsg_statistics,
-				icmpmibmap[i].index));
+			   atomic_long_read(ptr + icmpmibmap[i].index));
 	seq_printf(seq, " %lu %lu",
 		snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_OUTMSGS),
 		snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_OUTERRORS));
 	for (i=0; icmpmibmap[i].name != NULL; i++)
 		seq_printf(seq, " %lu",
-			snmp_fold_field((void __percpu **) net->mib.icmpmsg_statistics,
-				icmpmibmap[i].index | 0x100));
+			   atomic_long_read(ptr + (icmpmibmap[i].index | 0x100)));
 }
 
 /*
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 007e2eb..3ccda5a 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -292,7 +292,8 @@
 {
 	/* Charge it to the socket. */
 
-	if (ip_queue_rcv_skb(sk, skb) < 0) {
+	ipv4_pktinfo_prepare(skb);
+	if (sock_queue_rcv_skb(sk, skb) < 0) {
 		kfree_skb(skb);
 		return NET_RX_DROP;
 	}
@@ -327,6 +328,7 @@
 	unsigned int iphlen;
 	int err;
 	struct rtable *rt = *rtp;
+	int hlen, tlen;
 
 	if (length > rt->dst.dev->mtu) {
 		ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
@@ -336,12 +338,14 @@
 	if (flags&MSG_PROBE)
 		goto out;
 
+	hlen = LL_RESERVED_SPACE(rt->dst.dev);
+	tlen = rt->dst.dev->needed_tailroom;
 	skb = sock_alloc_send_skb(sk,
-				  length + LL_ALLOCATED_SPACE(rt->dst.dev) + 15,
+				  length + hlen + tlen + 15,
 				  flags & MSG_DONTWAIT, &err);
 	if (skb == NULL)
 		goto error;
-	skb_reserve(skb, LL_RESERVED_SPACE(rt->dst.dev));
+	skb_reserve(skb, hlen);
 
 	skb->priority = sk->sk_priority;
 	skb->mark = sk->sk_mark;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 46af623..bcacf54 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -91,6 +91,7 @@
 #include <linux/rcupdate.h>
 #include <linux/times.h>
 #include <linux/slab.h>
+#include <linux/prefetch.h>
 #include <net/dst.h>
 #include <net/net_namespace.h>
 #include <net/protocol.h>
@@ -108,7 +109,6 @@
 #ifdef CONFIG_SYSCTL
 #include <linux/sysctl.h>
 #endif
-#include <net/atmclip.h>
 #include <net/secure_seq.h>
 
 #define RT_FL_TOS(oldflp4) \
@@ -120,6 +120,7 @@
 
 static int ip_rt_max_size;
 static int ip_rt_gc_timeout __read_mostly	= RT_GC_TIMEOUT;
+static int ip_rt_gc_interval __read_mostly  = 60 * HZ;
 static int ip_rt_gc_min_interval __read_mostly	= HZ / 2;
 static int ip_rt_redirect_number __read_mostly	= 9;
 static int ip_rt_redirect_load __read_mostly	= HZ / 50;
@@ -133,6 +134,9 @@
 static int rt_chain_length_max __read_mostly	= 20;
 static int redirect_genid;
 
+static struct delayed_work expires_work;
+static unsigned long expires_ljiffies;
+
 /*
  *	Interface to generic destination cache.
  */
@@ -420,7 +424,7 @@
 		int len, HHUptod;
 
 		rcu_read_lock();
-		n = dst_get_neighbour(&r->dst);
+		n = dst_get_neighbour_noref(&r->dst);
 		HHUptod = (n && (n->nud_state & NUD_CONNECTED)) ? 1 : 0;
 		rcu_read_unlock();
 
@@ -830,6 +834,97 @@
 	return ONE;
 }
 
+static void rt_check_expire(void)
+{
+	static unsigned int rover;
+	unsigned int i = rover, goal;
+	struct rtable *rth;
+	struct rtable __rcu **rthp;
+	unsigned long samples = 0;
+	unsigned long sum = 0, sum2 = 0;
+	unsigned long delta;
+	u64 mult;
+
+	delta = jiffies - expires_ljiffies;
+	expires_ljiffies = jiffies;
+	mult = ((u64)delta) << rt_hash_log;
+	if (ip_rt_gc_timeout > 1)
+		do_div(mult, ip_rt_gc_timeout);
+	goal = (unsigned int)mult;
+	if (goal > rt_hash_mask)
+		goal = rt_hash_mask + 1;
+	for (; goal > 0; goal--) {
+		unsigned long tmo = ip_rt_gc_timeout;
+		unsigned long length;
+
+		i = (i + 1) & rt_hash_mask;
+		rthp = &rt_hash_table[i].chain;
+
+		if (need_resched())
+			cond_resched();
+
+		samples++;
+
+		if (rcu_dereference_raw(*rthp) == NULL)
+			continue;
+		length = 0;
+		spin_lock_bh(rt_hash_lock_addr(i));
+		while ((rth = rcu_dereference_protected(*rthp,
+					lockdep_is_held(rt_hash_lock_addr(i)))) != NULL) {
+			prefetch(rth->dst.rt_next);
+			if (rt_is_expired(rth)) {
+				*rthp = rth->dst.rt_next;
+				rt_free(rth);
+				continue;
+			}
+			if (rth->dst.expires) {
+				/* Entry is expired even if it is in use */
+				if (time_before_eq(jiffies, rth->dst.expires)) {
+nofree:
+					tmo >>= 1;
+					rthp = &rth->dst.rt_next;
+					/*
+					 * We only count entries on
+					 * a chain with equal hash inputs once
+					 * so that entries for different QOS
+					 * levels, and other non-hash input
+					 * attributes don't unfairly skew
+					 * the length computation
+					 */
+					length += has_noalias(rt_hash_table[i].chain, rth);
+					continue;
+				}
+			} else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout))
+				goto nofree;
+
+			/* Cleanup aged off entries. */
+			*rthp = rth->dst.rt_next;
+			rt_free(rth);
+		}
+		spin_unlock_bh(rt_hash_lock_addr(i));
+		sum += length;
+		sum2 += length*length;
+	}
+	if (samples) {
+		unsigned long avg = sum / samples;
+		unsigned long sd = int_sqrt(sum2 / samples - avg*avg);
+		rt_chain_length_max = max_t(unsigned long,
+					ip_rt_gc_elasticity,
+					(avg + 4*sd) >> FRACT_BITS);
+	}
+	rover = i;
+}
+
+/*
+ * rt_worker_func() is run in process context.
+ * we call rt_check_expire() to scan part of the hash table
+ */
+static void rt_worker_func(struct work_struct *work)
+{
+	rt_check_expire();
+	schedule_delayed_work(&expires_work, ip_rt_gc_interval);
+}
+
 /*
  * Perturbation of rt_genid by a small quantity [1..256]
  * Using 8 bits of shuffling ensure we can call rt_cache_invalidate()
@@ -1019,23 +1114,18 @@
 
 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, const void *daddr)
 {
-	struct neigh_table *tbl = &arp_tbl;
 	static const __be32 inaddr_any = 0;
 	struct net_device *dev = dst->dev;
 	const __be32 *pkey = daddr;
 	struct neighbour *n;
 
-#if defined(CONFIG_ATM_CLIP) || defined(CONFIG_ATM_CLIP_MODULE)
-	if (dev->type == ARPHRD_ATM)
-		tbl = clip_tbl_hook;
-#endif
 	if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
 		pkey = &inaddr_any;
 
-	n = __ipv4_neigh_lookup(tbl, dev, *(__force u32 *)pkey);
+	n = __ipv4_neigh_lookup(&arp_tbl, dev, *(__force u32 *)pkey);
 	if (n)
 		return n;
-	return neigh_create(tbl, pkey, dev);
+	return neigh_create(&arp_tbl, pkey, dev);
 }
 
 static int rt_bind_neighbour(struct rtable *rt)
@@ -1271,7 +1361,7 @@
 {
 	struct rtable *rt = (struct rtable *) dst;
 
-	if (rt) {
+	if (rt && !(rt->dst.flags & DST_NOPEER)) {
 		if (rt->peer == NULL)
 			rt_bind_peer(rt, rt->rt_dst, 1);
 
@@ -1282,7 +1372,7 @@
 			iph->id = htons(inet_getid(rt->peer, more));
 			return;
 		}
-	} else
+	} else if (!rt)
 		printk(KERN_DEBUG "rt_bind_peer(0) @%p\n",
 		       __builtin_return_address(0));
 
@@ -3179,6 +3269,13 @@
 		.proc_handler	= proc_dointvec_jiffies,
 	},
 	{
+		.procname	= "gc_interval",
+		.data		= &ip_rt_gc_interval,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec_jiffies,
+	},
+	{
 		.procname	= "redirect_load",
 		.data		= &ip_rt_redirect_load,
 		.maxlen		= sizeof(int),
@@ -3388,6 +3485,11 @@
 	devinet_init();
 	ip_fib_init();
 
+	INIT_DELAYED_WORK_DEFERRABLE(&expires_work, rt_worker_func);
+	expires_ljiffies = jiffies;
+	schedule_delayed_work(&expires_work,
+		net_random() % ip_rt_gc_interval + ip_rt_gc_interval);
+
 	if (ip_rt_proc_init())
 		printk(KERN_ERR "Unable to create route proc files\n");
 #ifdef CONFIG_XFRM
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 90f6544..51fdbb4 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -245,7 +245,7 @@
 	if (!sysctl_tcp_timestamps)
 		return false;
 
-	tcp_opt->sack_ok = (options >> 4) & 0x1;
+	tcp_opt->sack_ok = (options & (1 << 4)) ? TCP_SACK_SEEN : 0;
 	*ecn_ok = (options >> 5) & 1;
 	if (*ecn_ok && !sysctl_tcp_ecn)
 		return false;
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 69fd720..4aa7e9d 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -14,6 +14,7 @@
 #include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/nsproxy.h>
+#include <linux/swap.h>
 #include <net/snmp.h>
 #include <net/icmp.h>
 #include <net/ip.h>
@@ -23,6 +24,7 @@
 #include <net/cipso_ipv4.h>
 #include <net/inet_frag.h>
 #include <net/ping.h>
+#include <net/tcp_memcontrol.h>
 
 static int zero;
 static int tcp_retr1_max = 255;
@@ -73,7 +75,7 @@
 }
 
 
-void inet_get_ping_group_range_table(struct ctl_table *table, gid_t *low, gid_t *high)
+static void inet_get_ping_group_range_table(struct ctl_table *table, gid_t *low, gid_t *high)
 {
 	gid_t *data = table->data;
 	unsigned seq;
@@ -86,7 +88,7 @@
 }
 
 /* Update system visible IP port range */
-static void set_ping_group_range(struct ctl_table *table, int range[2])
+static void set_ping_group_range(struct ctl_table *table, gid_t range[2])
 {
 	gid_t *data = table->data;
 	write_seqlock(&sysctl_local_ports.lock);
@@ -174,6 +176,49 @@
 	return ret;
 }
 
+static int ipv4_tcp_mem(ctl_table *ctl, int write,
+			   void __user *buffer, size_t *lenp,
+			   loff_t *ppos)
+{
+	int ret;
+	unsigned long vec[3];
+	struct net *net = current->nsproxy->net_ns;
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+	struct mem_cgroup *memcg;
+#endif
+
+	ctl_table tmp = {
+		.data = &vec,
+		.maxlen = sizeof(vec),
+		.mode = ctl->mode,
+	};
+
+	if (!write) {
+		ctl->data = &net->ipv4.sysctl_tcp_mem;
+		return proc_doulongvec_minmax(ctl, write, buffer, lenp, ppos);
+	}
+
+	ret = proc_doulongvec_minmax(&tmp, write, buffer, lenp, ppos);
+	if (ret)
+		return ret;
+
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+	rcu_read_lock();
+	memcg = mem_cgroup_from_task(current);
+
+	tcp_prot_mem(memcg, vec[0], 0);
+	tcp_prot_mem(memcg, vec[1], 1);
+	tcp_prot_mem(memcg, vec[2], 2);
+	rcu_read_unlock();
+#endif
+
+	net->ipv4.sysctl_tcp_mem[0] = vec[0];
+	net->ipv4.sysctl_tcp_mem[1] = vec[1];
+	net->ipv4.sysctl_tcp_mem[2] = vec[2];
+
+	return 0;
+}
+
 static struct ctl_table ipv4_table[] = {
 	{
 		.procname	= "tcp_timestamps",
@@ -433,13 +478,6 @@
 		.proc_handler	= proc_dointvec
 	},
 	{
-		.procname	= "tcp_mem",
-		.data		= &sysctl_tcp_mem,
-		.maxlen		= sizeof(sysctl_tcp_mem),
-		.mode		= 0644,
-		.proc_handler	= proc_doulongvec_minmax
-	},
-	{
 		.procname	= "tcp_wmem",
 		.data		= &sysctl_tcp_wmem,
 		.maxlen		= sizeof(sysctl_tcp_wmem),
@@ -721,6 +759,12 @@
 		.mode		= 0644,
 		.proc_handler	= ipv4_ping_group_range,
 	},
+	{
+		.procname	= "tcp_mem",
+		.maxlen		= sizeof(init_net.ipv4.sysctl_tcp_mem),
+		.mode		= 0644,
+		.proc_handler	= ipv4_tcp_mem,
+	},
 	{ }
 };
 
@@ -734,6 +778,7 @@
 static __net_init int ipv4_sysctl_init_net(struct net *net)
 {
 	struct ctl_table *table;
+	unsigned long limit;
 
 	table = ipv4_net_table;
 	if (!net_eq(net, &init_net)) {
@@ -769,6 +814,12 @@
 
 	net->ipv4.sysctl_rt_cache_rebuild_count = 4;
 
+	limit = nr_free_buffer_pages() / 8;
+	limit = max(limit, 128UL);
+	net->ipv4.sysctl_tcp_mem[0] = limit / 4 * 3;
+	net->ipv4.sysctl_tcp_mem[1] = limit;
+	net->ipv4.sysctl_tcp_mem[2] = net->ipv4.sysctl_tcp_mem[0] * 2;
+
 	net->ipv4.ipv4_hdr = register_net_sysctl_table(net,
 			net_ipv4_ctl_path, table);
 	if (net->ipv4.ipv4_hdr == NULL)
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 34f5db1..9bcdec3 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -282,11 +282,9 @@
 struct percpu_counter tcp_orphan_count;
 EXPORT_SYMBOL_GPL(tcp_orphan_count);
 
-long sysctl_tcp_mem[3] __read_mostly;
 int sysctl_tcp_wmem[3] __read_mostly;
 int sysctl_tcp_rmem[3] __read_mostly;
 
-EXPORT_SYMBOL(sysctl_tcp_mem);
 EXPORT_SYMBOL(sysctl_tcp_rmem);
 EXPORT_SYMBOL(sysctl_tcp_wmem);
 
@@ -888,18 +886,18 @@
 }
 EXPORT_SYMBOL(tcp_sendpage);
 
-#define TCP_PAGE(sk)	(sk->sk_sndmsg_page)
-#define TCP_OFF(sk)	(sk->sk_sndmsg_off)
-
-static inline int select_size(const struct sock *sk, int sg)
+static inline int select_size(const struct sock *sk, bool sg)
 {
 	const struct tcp_sock *tp = tcp_sk(sk);
 	int tmp = tp->mss_cache;
 
 	if (sg) {
-		if (sk_can_gso(sk))
-			tmp = 0;
-		else {
+		if (sk_can_gso(sk)) {
+			/* Small frames wont use a full page:
+			 * Payload will immediately follow tcp header.
+			 */
+			tmp = SKB_WITH_OVERHEAD(2048 - MAX_TCP_HEADER);
+		} else {
 			int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
 
 			if (tmp >= pgbreak &&
@@ -917,9 +915,9 @@
 	struct iovec *iov;
 	struct tcp_sock *tp = tcp_sk(sk);
 	struct sk_buff *skb;
-	int iovlen, flags;
+	int iovlen, flags, err, copied;
 	int mss_now, size_goal;
-	int sg, err, copied;
+	bool sg;
 	long timeo;
 
 	lock_sock(sk);
@@ -946,7 +944,7 @@
 	if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
 		goto out_err;
 
-	sg = sk->sk_route_caps & NETIF_F_SG;
+	sg = !!(sk->sk_route_caps & NETIF_F_SG);
 
 	while (--iovlen >= 0) {
 		size_t seglen = iov->iov_len;
@@ -1005,8 +1003,13 @@
 			} else {
 				int merge = 0;
 				int i = skb_shinfo(skb)->nr_frags;
-				struct page *page = TCP_PAGE(sk);
-				int off = TCP_OFF(sk);
+				struct page *page = sk->sk_sndmsg_page;
+				int off;
+
+				if (page && page_count(page) == 1)
+					sk->sk_sndmsg_off = 0;
+
+				off = sk->sk_sndmsg_off;
 
 				if (skb_can_coalesce(skb, i, page, off) &&
 				    off != PAGE_SIZE) {
@@ -1023,7 +1026,7 @@
 				} else if (page) {
 					if (off == PAGE_SIZE) {
 						put_page(page);
-						TCP_PAGE(sk) = page = NULL;
+						sk->sk_sndmsg_page = page = NULL;
 						off = 0;
 					}
 				} else
@@ -1049,9 +1052,9 @@
 					/* If this page was new, give it to the
 					 * socket so it does not get leaked.
 					 */
-					if (!TCP_PAGE(sk)) {
-						TCP_PAGE(sk) = page;
-						TCP_OFF(sk) = 0;
+					if (!sk->sk_sndmsg_page) {
+						sk->sk_sndmsg_page = page;
+						sk->sk_sndmsg_off = 0;
 					}
 					goto do_error;
 				}
@@ -1061,15 +1064,15 @@
 					skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
 				} else {
 					skb_fill_page_desc(skb, i, page, off, copy);
-					if (TCP_PAGE(sk)) {
+					if (sk->sk_sndmsg_page) {
 						get_page(page);
 					} else if (off + copy < PAGE_SIZE) {
 						get_page(page);
-						TCP_PAGE(sk) = page;
+						sk->sk_sndmsg_page = page;
 					}
 				}
 
-				TCP_OFF(sk) = off + copy;
+				sk->sk_sndmsg_off = off + copy;
 			}
 
 			if (!copied)
@@ -2653,7 +2656,8 @@
 EXPORT_SYMBOL(compat_tcp_getsockopt);
 #endif
 
-struct sk_buff *tcp_tso_segment(struct sk_buff *skb, u32 features)
+struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
+	netdev_features_t features)
 {
 	struct sk_buff *segs = ERR_PTR(-EINVAL);
 	struct tcphdr *th;
@@ -3272,14 +3276,9 @@
 	sysctl_tcp_max_orphans = cnt / 2;
 	sysctl_max_syn_backlog = max(128, cnt / 256);
 
-	limit = nr_free_buffer_pages() / 8;
-	limit = max(limit, 128UL);
-	sysctl_tcp_mem[0] = limit / 4 * 3;
-	sysctl_tcp_mem[1] = limit;
-	sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2;
-
 	/* Set per-socket limits to no more than 1/128 the pressure threshold */
-	limit = ((unsigned long)sysctl_tcp_mem[1]) << (PAGE_SHIFT - 7);
+	limit = ((unsigned long)init_net.ipv4.sysctl_tcp_mem[1])
+		<< (PAGE_SHIFT - 7);
 	max_share = min(4UL*1024*1024, limit);
 
 	sysctl_tcp_wmem[0] = SK_MEM_QUANTUM;
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 850c737..fc6d475 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -292,7 +292,7 @@
 	    left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd &&
 	    left * tp->mss_cache < sk->sk_gso_max_size)
 		return 1;
-	return left <= tcp_max_burst(tp);
+	return left <= tcp_max_tso_deferred_mss(tp);
 }
 EXPORT_SYMBOL_GPL(tcp_is_cwnd_limited);
 
diff --git a/net/ipv4/tcp_diag.c b/net/ipv4/tcp_diag.c
index 939edb3..8cd357a 100644
--- a/net/ipv4/tcp_diag.c
+++ b/net/ipv4/tcp_diag.c
@@ -34,11 +34,23 @@
 		tcp_get_info(sk, info);
 }
 
+static void tcp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
+		struct inet_diag_req *r, struct nlattr *bc)
+{
+	inet_diag_dump_icsk(&tcp_hashinfo, skb, cb, r, bc);
+}
+
+static int tcp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
+		struct inet_diag_req *req)
+{
+	return inet_diag_dump_one_icsk(&tcp_hashinfo, in_skb, nlh, req);
+}
+
 static const struct inet_diag_handler tcp_diag_handler = {
-	.idiag_hashinfo	 = &tcp_hashinfo,
+	.dump		 = tcp_diag_dump,
+	.dump_one	 = tcp_diag_dump_one,
 	.idiag_get_info	 = tcp_diag_get_info,
-	.idiag_type	 = TCPDIAG_GETSOCK,
-	.idiag_info_size = sizeof(struct tcp_info),
+	.idiag_type	 = IPPROTO_TCP,
 };
 
 static int __init tcp_diag_init(void)
@@ -54,4 +66,4 @@
 module_init(tcp_diag_init);
 module_exit(tcp_diag_exit);
 MODULE_LICENSE("GPL");
-MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_INET_DIAG, TCPDIAG_GETSOCK);
+MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-6 /* AF_INET - IPPROTO_TCP */);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 52b5c2d..2877c3e 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -322,7 +322,7 @@
 	/* Check #1 */
 	if (tp->rcv_ssthresh < tp->window_clamp &&
 	    (int)tp->rcv_ssthresh < tcp_space(sk) &&
-	    !tcp_memory_pressure) {
+	    !sk_under_memory_pressure(sk)) {
 		int incr;
 
 		/* Check #2. Increase window, if skb with such overhead
@@ -411,8 +411,8 @@
 
 	if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] &&
 	    !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) &&
-	    !tcp_memory_pressure &&
-	    atomic_long_read(&tcp_memory_allocated) < sysctl_tcp_mem[0]) {
+	    !sk_under_memory_pressure(sk) &&
+	    sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)) {
 		sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc),
 				    sysctl_tcp_rmem[2]);
 	}
@@ -865,13 +865,13 @@
 	/* RFC3517 uses different metric in lost marker => reset on change */
 	if (tcp_is_fack(tp))
 		tp->lost_skb_hint = NULL;
-	tp->rx_opt.sack_ok &= ~2;
+	tp->rx_opt.sack_ok &= ~TCP_FACK_ENABLED;
 }
 
 /* Take a notice that peer is sending D-SACKs */
 static void tcp_dsack_seen(struct tcp_sock *tp)
 {
-	tp->rx_opt.sack_ok |= 4;
+	tp->rx_opt.sack_ok |= TCP_DSACK_SEEN;
 }
 
 /* Initialize metrics on socket. */
@@ -2663,7 +2663,7 @@
 		       tp->snd_ssthresh, tp->prior_ssthresh,
 		       tp->packets_out);
 	}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	else if (sk->sk_family == AF_INET6) {
 		struct ipv6_pinfo *np = inet6_sk(sk);
 		printk(KERN_DEBUG "Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n",
@@ -2858,7 +2858,7 @@
 	struct tcp_sock *tp = tcp_sk(sk);
 	int state = TCP_CA_Open;
 
-	if (tcp_left_out(tp) || tcp_any_retrans_done(sk) || tp->undo_marker)
+	if (tcp_left_out(tp) || tcp_any_retrans_done(sk))
 		state = TCP_CA_Disorder;
 
 	if (inet_csk(sk)->icsk_ca_state != state) {
@@ -2881,7 +2881,8 @@
 
 	if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) {
 		tcp_try_keep_open(sk);
-		tcp_moderate_cwnd(tp);
+		if (inet_csk(sk)->icsk_ca_state != TCP_CA_Open)
+			tcp_moderate_cwnd(tp);
 	} else {
 		tcp_cwnd_down(sk, flag);
 	}
@@ -3009,11 +3010,11 @@
  * tcp_xmit_retransmit_queue().
  */
 static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
-				  int newly_acked_sacked, int flag)
+				  int newly_acked_sacked, bool is_dupack,
+				  int flag)
 {
 	struct inet_connection_sock *icsk = inet_csk(sk);
 	struct tcp_sock *tp = tcp_sk(sk);
-	int is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
 	int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) &&
 				    (tcp_fackets_out(tp) > tp->reordering));
 	int fast_rexmit = 0, mib_idx;
@@ -3066,17 +3067,6 @@
 			}
 			break;
 
-		case TCP_CA_Disorder:
-			tcp_try_undo_dsack(sk);
-			if (!tp->undo_marker ||
-			    /* For SACK case do not Open to allow to undo
-			     * catching for all duplicate ACKs. */
-			    tcp_is_reno(tp) || tp->snd_una != tp->high_seq) {
-				tp->undo_marker = 0;
-				tcp_set_ca_state(sk, TCP_CA_Open);
-			}
-			break;
-
 		case TCP_CA_Recovery:
 			if (tcp_is_reno(tp))
 				tcp_reset_reno_sack(tp);
@@ -3117,7 +3107,7 @@
 				tcp_add_reno_sack(sk);
 		}
 
-		if (icsk->icsk_ca_state == TCP_CA_Disorder)
+		if (icsk->icsk_ca_state <= TCP_CA_Disorder)
 			tcp_try_undo_dsack(sk);
 
 		if (!tcp_time_to_recover(sk)) {
@@ -3681,10 +3671,12 @@
 	u32 prior_snd_una = tp->snd_una;
 	u32 ack_seq = TCP_SKB_CB(skb)->seq;
 	u32 ack = TCP_SKB_CB(skb)->ack_seq;
+	bool is_dupack = false;
 	u32 prior_in_flight;
 	u32 prior_fackets;
 	int prior_packets;
 	int prior_sacked = tp->sacked_out;
+	int pkts_acked = 0;
 	int newly_acked_sacked = 0;
 	int frto_cwnd = 0;
 
@@ -3757,6 +3749,7 @@
 	/* See if we can take anything off of the retransmit queue. */
 	flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una);
 
+	pkts_acked = prior_packets - tp->packets_out;
 	newly_acked_sacked = (prior_packets - prior_sacked) -
 			     (tp->packets_out - tp->sacked_out);
 
@@ -3771,8 +3764,9 @@
 		if ((flag & FLAG_DATA_ACKED) && !frto_cwnd &&
 		    tcp_may_raise_cwnd(sk, flag))
 			tcp_cong_avoid(sk, ack, prior_in_flight);
-		tcp_fastretrans_alert(sk, prior_packets - tp->packets_out,
-				      newly_acked_sacked, flag);
+		is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
+		tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked,
+				      is_dupack, flag);
 	} else {
 		if ((flag & FLAG_DATA_ACKED) && !frto_cwnd)
 			tcp_cong_avoid(sk, ack, prior_in_flight);
@@ -3784,6 +3778,10 @@
 	return 1;
 
 no_queue:
+	/* If data was DSACKed, see if we can undo a cwnd reduction. */
+	if (flag & FLAG_DSACKING_ACK)
+		tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked,
+				      is_dupack, flag);
 	/* If this ack opens up a zero window, clear backoff.  It was
 	 * being used to time the probes, and is probably far higher than
 	 * it needs to be for normal retransmission.
@@ -3797,10 +3795,14 @@
 	return -1;
 
 old_ack:
+	/* If data was SACKed, tag it and see if we should send more data.
+	 * If data was DSACKed, see if we can undo a cwnd reduction.
+	 */
 	if (TCP_SKB_CB(skb)->sacked) {
-		tcp_sacktag_write_queue(sk, skb, prior_snd_una);
-		if (icsk->icsk_ca_state == TCP_CA_Open)
-			tcp_try_keep_open(sk);
+		flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una);
+		newly_acked_sacked = tp->sacked_out - prior_sacked;
+		tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked,
+				      is_dupack, flag);
 	}
 
 	SOCK_DEBUG(sk, "Ack %u before %u:%u\n", ack, tp->snd_una, tp->snd_nxt);
@@ -3876,7 +3878,7 @@
 			case TCPOPT_SACK_PERM:
 				if (opsize == TCPOLEN_SACK_PERM && th->syn &&
 				    !estab && sysctl_tcp_sack) {
-					opt_rx->sack_ok = 1;
+					opt_rx->sack_ok = TCP_SACK_SEEN;
 					tcp_sack_reset(opt_rx);
 				}
 				break;
@@ -4864,7 +4866,7 @@
 
 	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
 		tcp_clamp_window(sk);
-	else if (tcp_memory_pressure)
+	else if (sk_under_memory_pressure(sk))
 		tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
 
 	tcp_collapse_ofo_queue(sk);
@@ -4930,11 +4932,11 @@
 		return 0;
 
 	/* If we are under global TCP memory pressure, do not expand.  */
-	if (tcp_memory_pressure)
+	if (sk_under_memory_pressure(sk))
 		return 0;
 
 	/* If we are under soft global TCP memory pressure, do not expand.  */
-	if (atomic_long_read(&tcp_memory_allocated) >= sysctl_tcp_mem[0])
+	if (sk_memory_allocated(sk) >= sk_prot_mem_limits(sk, 0))
 		return 0;
 
 	/* If we filled the congestion window, do not expand.  */
@@ -5809,6 +5811,8 @@
 			goto discard;
 
 		if (th->syn) {
+			if (th->fin)
+				goto discard;
 			if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
 				return 1;
 
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index a9db4b1..1eb4ad5 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -73,6 +73,7 @@
 #include <net/xfrm.h>
 #include <net/netdma.h>
 #include <net/secure_seq.h>
+#include <net/tcp_memcontrol.h>
 
 #include <linux/inet.h>
 #include <linux/ipv6.h>
@@ -1511,6 +1512,7 @@
 	return NULL;
 put_and_exit:
 	tcp_clear_xmit_timers(newsk);
+	tcp_cleanup_congestion_control(newsk);
 	bh_unlock_sock(newsk);
 	sock_put(newsk);
 	goto exit;
@@ -1916,7 +1918,8 @@
 	sk->sk_rcvbuf = sysctl_tcp_rmem[1];
 
 	local_bh_disable();
-	percpu_counter_inc(&tcp_sockets_allocated);
+	sock_update_memcg(sk);
+	sk_sockets_allocated_inc(sk);
 	local_bh_enable();
 
 	return 0;
@@ -1972,7 +1975,8 @@
 		tp->cookie_values = NULL;
 	}
 
-	percpu_counter_dec(&tcp_sockets_allocated);
+	sk_sockets_allocated_dec(sk);
+	sock_release_memcg(sk);
 }
 EXPORT_SYMBOL(tcp_v4_destroy_sock);
 
@@ -2619,7 +2623,6 @@
 	.orphan_count		= &tcp_orphan_count,
 	.memory_allocated	= &tcp_memory_allocated,
 	.memory_pressure	= &tcp_memory_pressure,
-	.sysctl_mem		= sysctl_tcp_mem,
 	.sysctl_wmem		= sysctl_tcp_wmem,
 	.sysctl_rmem		= sysctl_tcp_rmem,
 	.max_header		= MAX_TCP_HEADER,
@@ -2633,10 +2636,14 @@
 	.compat_setsockopt	= compat_tcp_setsockopt,
 	.compat_getsockopt	= compat_tcp_getsockopt,
 #endif
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+	.init_cgroup		= tcp_init_cgroup,
+	.destroy_cgroup		= tcp_destroy_cgroup,
+	.proto_cgroup		= tcp_proto_cgroup,
+#endif
 };
 EXPORT_SYMBOL(tcp_prot);
 
-
 static int __net_init tcp_sk_init(struct net *net)
 {
 	return inet_ctl_sock_create(&net->ipv4.tcp_sock,
diff --git a/net/ipv4/tcp_memcontrol.c b/net/ipv4/tcp_memcontrol.c
new file mode 100644
index 0000000..7fed04f
--- /dev/null
+++ b/net/ipv4/tcp_memcontrol.c
@@ -0,0 +1,272 @@
+#include <net/tcp.h>
+#include <net/tcp_memcontrol.h>
+#include <net/sock.h>
+#include <net/ip.h>
+#include <linux/nsproxy.h>
+#include <linux/memcontrol.h>
+#include <linux/module.h>
+
+static u64 tcp_cgroup_read(struct cgroup *cont, struct cftype *cft);
+static int tcp_cgroup_write(struct cgroup *cont, struct cftype *cft,
+			    const char *buffer);
+static int tcp_cgroup_reset(struct cgroup *cont, unsigned int event);
+
+static struct cftype tcp_files[] = {
+	{
+		.name = "kmem.tcp.limit_in_bytes",
+		.write_string = tcp_cgroup_write,
+		.read_u64 = tcp_cgroup_read,
+		.private = RES_LIMIT,
+	},
+	{
+		.name = "kmem.tcp.usage_in_bytes",
+		.read_u64 = tcp_cgroup_read,
+		.private = RES_USAGE,
+	},
+	{
+		.name = "kmem.tcp.failcnt",
+		.private = RES_FAILCNT,
+		.trigger = tcp_cgroup_reset,
+		.read_u64 = tcp_cgroup_read,
+	},
+	{
+		.name = "kmem.tcp.max_usage_in_bytes",
+		.private = RES_MAX_USAGE,
+		.trigger = tcp_cgroup_reset,
+		.read_u64 = tcp_cgroup_read,
+	},
+};
+
+static inline struct tcp_memcontrol *tcp_from_cgproto(struct cg_proto *cg_proto)
+{
+	return container_of(cg_proto, struct tcp_memcontrol, cg_proto);
+}
+
+static void memcg_tcp_enter_memory_pressure(struct sock *sk)
+{
+	if (sk->sk_cgrp->memory_pressure)
+		*sk->sk_cgrp->memory_pressure = 1;
+}
+EXPORT_SYMBOL(memcg_tcp_enter_memory_pressure);
+
+int tcp_init_cgroup(struct cgroup *cgrp, struct cgroup_subsys *ss)
+{
+	/*
+	 * The root cgroup does not use res_counters, but rather,
+	 * rely on the data already collected by the network
+	 * subsystem
+	 */
+	struct res_counter *res_parent = NULL;
+	struct cg_proto *cg_proto, *parent_cg;
+	struct tcp_memcontrol *tcp;
+	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
+	struct mem_cgroup *parent = parent_mem_cgroup(memcg);
+	struct net *net = current->nsproxy->net_ns;
+
+	cg_proto = tcp_prot.proto_cgroup(memcg);
+	if (!cg_proto)
+		goto create_files;
+
+	tcp = tcp_from_cgproto(cg_proto);
+
+	tcp->tcp_prot_mem[0] = net->ipv4.sysctl_tcp_mem[0];
+	tcp->tcp_prot_mem[1] = net->ipv4.sysctl_tcp_mem[1];
+	tcp->tcp_prot_mem[2] = net->ipv4.sysctl_tcp_mem[2];
+	tcp->tcp_memory_pressure = 0;
+
+	parent_cg = tcp_prot.proto_cgroup(parent);
+	if (parent_cg)
+		res_parent = parent_cg->memory_allocated;
+
+	res_counter_init(&tcp->tcp_memory_allocated, res_parent);
+	percpu_counter_init(&tcp->tcp_sockets_allocated, 0);
+
+	cg_proto->enter_memory_pressure = memcg_tcp_enter_memory_pressure;
+	cg_proto->memory_pressure = &tcp->tcp_memory_pressure;
+	cg_proto->sysctl_mem = tcp->tcp_prot_mem;
+	cg_proto->memory_allocated = &tcp->tcp_memory_allocated;
+	cg_proto->sockets_allocated = &tcp->tcp_sockets_allocated;
+	cg_proto->memcg = memcg;
+
+create_files:
+	return cgroup_add_files(cgrp, ss, tcp_files,
+				ARRAY_SIZE(tcp_files));
+}
+EXPORT_SYMBOL(tcp_init_cgroup);
+
+void tcp_destroy_cgroup(struct cgroup *cgrp, struct cgroup_subsys *ss)
+{
+	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
+	struct cg_proto *cg_proto;
+	struct tcp_memcontrol *tcp;
+	u64 val;
+
+	cg_proto = tcp_prot.proto_cgroup(memcg);
+	if (!cg_proto)
+		return;
+
+	tcp = tcp_from_cgproto(cg_proto);
+	percpu_counter_destroy(&tcp->tcp_sockets_allocated);
+
+	val = res_counter_read_u64(&tcp->tcp_memory_allocated, RES_USAGE);
+
+	if (val != RESOURCE_MAX)
+		jump_label_dec(&memcg_socket_limit_enabled);
+}
+EXPORT_SYMBOL(tcp_destroy_cgroup);
+
+static int tcp_update_limit(struct mem_cgroup *memcg, u64 val)
+{
+	struct net *net = current->nsproxy->net_ns;
+	struct tcp_memcontrol *tcp;
+	struct cg_proto *cg_proto;
+	u64 old_lim;
+	int i;
+	int ret;
+
+	cg_proto = tcp_prot.proto_cgroup(memcg);
+	if (!cg_proto)
+		return -EINVAL;
+
+	if (val > RESOURCE_MAX)
+		val = RESOURCE_MAX;
+
+	tcp = tcp_from_cgproto(cg_proto);
+
+	old_lim = res_counter_read_u64(&tcp->tcp_memory_allocated, RES_LIMIT);
+	ret = res_counter_set_limit(&tcp->tcp_memory_allocated, val);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < 3; i++)
+		tcp->tcp_prot_mem[i] = min_t(long, val >> PAGE_SHIFT,
+					     net->ipv4.sysctl_tcp_mem[i]);
+
+	if (val == RESOURCE_MAX && old_lim != RESOURCE_MAX)
+		jump_label_dec(&memcg_socket_limit_enabled);
+	else if (old_lim == RESOURCE_MAX && val != RESOURCE_MAX)
+		jump_label_inc(&memcg_socket_limit_enabled);
+
+	return 0;
+}
+
+static int tcp_cgroup_write(struct cgroup *cont, struct cftype *cft,
+			    const char *buffer)
+{
+	struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
+	unsigned long long val;
+	int ret = 0;
+
+	switch (cft->private) {
+	case RES_LIMIT:
+		/* see memcontrol.c */
+		ret = res_counter_memparse_write_strategy(buffer, &val);
+		if (ret)
+			break;
+		ret = tcp_update_limit(memcg, val);
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+	return ret;
+}
+
+static u64 tcp_read_stat(struct mem_cgroup *memcg, int type, u64 default_val)
+{
+	struct tcp_memcontrol *tcp;
+	struct cg_proto *cg_proto;
+
+	cg_proto = tcp_prot.proto_cgroup(memcg);
+	if (!cg_proto)
+		return default_val;
+
+	tcp = tcp_from_cgproto(cg_proto);
+	return res_counter_read_u64(&tcp->tcp_memory_allocated, type);
+}
+
+static u64 tcp_read_usage(struct mem_cgroup *memcg)
+{
+	struct tcp_memcontrol *tcp;
+	struct cg_proto *cg_proto;
+
+	cg_proto = tcp_prot.proto_cgroup(memcg);
+	if (!cg_proto)
+		return atomic_long_read(&tcp_memory_allocated) << PAGE_SHIFT;
+
+	tcp = tcp_from_cgproto(cg_proto);
+	return res_counter_read_u64(&tcp->tcp_memory_allocated, RES_USAGE);
+}
+
+static u64 tcp_cgroup_read(struct cgroup *cont, struct cftype *cft)
+{
+	struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
+	u64 val;
+
+	switch (cft->private) {
+	case RES_LIMIT:
+		val = tcp_read_stat(memcg, RES_LIMIT, RESOURCE_MAX);
+		break;
+	case RES_USAGE:
+		val = tcp_read_usage(memcg);
+		break;
+	case RES_FAILCNT:
+	case RES_MAX_USAGE:
+		val = tcp_read_stat(memcg, cft->private, 0);
+		break;
+	default:
+		BUG();
+	}
+	return val;
+}
+
+static int tcp_cgroup_reset(struct cgroup *cont, unsigned int event)
+{
+	struct mem_cgroup *memcg;
+	struct tcp_memcontrol *tcp;
+	struct cg_proto *cg_proto;
+
+	memcg = mem_cgroup_from_cont(cont);
+	cg_proto = tcp_prot.proto_cgroup(memcg);
+	if (!cg_proto)
+		return 0;
+	tcp = tcp_from_cgproto(cg_proto);
+
+	switch (event) {
+	case RES_MAX_USAGE:
+		res_counter_reset_max(&tcp->tcp_memory_allocated);
+		break;
+	case RES_FAILCNT:
+		res_counter_reset_failcnt(&tcp->tcp_memory_allocated);
+		break;
+	}
+
+	return 0;
+}
+
+unsigned long long tcp_max_memory(const struct mem_cgroup *memcg)
+{
+	struct tcp_memcontrol *tcp;
+	struct cg_proto *cg_proto;
+
+	cg_proto = tcp_prot.proto_cgroup((struct mem_cgroup *)memcg);
+	if (!cg_proto)
+		return 0;
+
+	tcp = tcp_from_cgproto(cg_proto);
+	return res_counter_read_u64(&tcp->tcp_memory_allocated, RES_LIMIT);
+}
+
+void tcp_prot_mem(struct mem_cgroup *memcg, long val, int idx)
+{
+	struct tcp_memcontrol *tcp;
+	struct cg_proto *cg_proto;
+
+	cg_proto = tcp_prot.proto_cgroup(memcg);
+	if (!cg_proto)
+		return;
+
+	tcp = tcp_from_cgproto(cg_proto);
+
+	tcp->tcp_prot_mem[idx] = val;
+}
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 66363b6..550e755 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -336,15 +336,15 @@
 		tcptw->tw_ts_recent	= tp->rx_opt.ts_recent;
 		tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 		if (tw->tw_family == PF_INET6) {
 			struct ipv6_pinfo *np = inet6_sk(sk);
 			struct inet6_timewait_sock *tw6;
 
 			tw->tw_ipv6_offset = inet6_tw_offset(sk->sk_prot);
 			tw6 = inet6_twsk((struct sock *)tw);
-			ipv6_addr_copy(&tw6->tw_v6_daddr, &np->daddr);
-			ipv6_addr_copy(&tw6->tw_v6_rcv_saddr, &np->rcv_saddr);
+			tw6->tw_v6_daddr = np->daddr;
+			tw6->tw_v6_rcv_saddr = np->rcv_saddr;
 			tw->tw_tclass = np->tclass;
 			tw->tw_ipv6only = np->ipv6only;
 		}
@@ -425,7 +425,7 @@
  */
 struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, struct sk_buff *skb)
 {
-	struct sock *newsk = inet_csk_clone(sk, req, GFP_ATOMIC);
+	struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
 
 	if (newsk != NULL) {
 		const struct inet_request_sock *ireq = inet_rsk(req);
@@ -495,7 +495,9 @@
 		newtp->frto_counter = 0;
 		newtp->frto_highmark = 0;
 
-		newicsk->icsk_ca_ops = &tcp_init_congestion_ops;
+		if (newicsk->icsk_ca_ops != &tcp_init_congestion_ops &&
+		    !try_module_get(newicsk->icsk_ca_ops->owner))
+			newicsk->icsk_ca_ops = &tcp_init_congestion_ops;
 
 		tcp_set_ca_state(newsk, TCP_CA_Open);
 		tcp_init_xmit_timers(newsk);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 63170e29..8c8de27 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1093,6 +1093,13 @@
 {
 	int i, k, eat;
 
+	eat = min_t(int, len, skb_headlen(skb));
+	if (eat) {
+		__skb_pull(skb, eat);
+		len -= eat;
+		if (!len)
+			return;
+	}
 	eat = len;
 	k = 0;
 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
@@ -1124,11 +1131,7 @@
 	if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
 		return -ENOMEM;
 
-	/* If len == headlen, we avoid __skb_pull to preserve alignment. */
-	if (unlikely(len < skb_headlen(skb)))
-		__skb_pull(skb, len);
-	else
-		__pskb_trim_head(skb, len - skb_headlen(skb));
+	__pskb_trim_head(skb, len);
 
 	TCP_SKB_CB(skb)->seq += len;
 	skb->ip_summed = CHECKSUM_PARTIAL;
@@ -1581,7 +1584,7 @@
 		 * frame, so if we have space for more than 3 frames
 		 * then send now.
 		 */
-		if (limit > tcp_max_burst(tp) * tp->mss_cache)
+		if (limit > tcp_max_tso_deferred_mss(tp) * tp->mss_cache)
 			goto send_now;
 	}
 
@@ -1919,7 +1922,7 @@
 	if (free_space < (full_space >> 1)) {
 		icsk->icsk_ack.quick = 0;
 
-		if (tcp_memory_pressure)
+		if (sk_under_memory_pressure(sk))
 			tp->rcv_ssthresh = min(tp->rcv_ssthresh,
 					       4U * tp->advmss);
 
@@ -2147,7 +2150,15 @@
 	 */
 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
 
-	err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
+	/* make sure skb->data is aligned on arches that require it */
+	if (unlikely(NET_IP_ALIGN && ((unsigned long)skb->data & 3))) {
+		struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER,
+						   GFP_ATOMIC);
+		err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
+			     -ENOBUFS;
+	} else {
+		err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
+	}
 
 	if (err == 0) {
 		/* Update global TCP statistics. */
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 2e0f0af..a516d1e 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -171,13 +171,13 @@
 {
 	struct inet_connection_sock *icsk = inet_csk(sk);
 	int retry_until;
-	bool do_reset, syn_set = 0;
+	bool do_reset, syn_set = false;
 
 	if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
 		if (icsk->icsk_retransmits)
 			dst_negative_advice(sk);
 		retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
-		syn_set = 1;
+		syn_set = true;
 	} else {
 		if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0, 0)) {
 			/* Black hole detection */
@@ -261,7 +261,7 @@
 	}
 
 out:
-	if (tcp_memory_pressure)
+	if (sk_under_memory_pressure(sk))
 		sk_mem_reclaim(sk);
 out_unlock:
 	bh_unlock_sock(sk);
@@ -340,7 +340,7 @@
 			       &inet->inet_daddr, ntohs(inet->inet_dport),
 			       inet->inet_num, tp->snd_una, tp->snd_nxt);
 		}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 		else if (sk->sk_family == AF_INET6) {
 			struct ipv6_pinfo *np = inet6_sk(sk);
 			LIMIT_NETDEBUG(KERN_DEBUG "TCP: Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
diff --git a/net/ipv4/tunnel4.c b/net/ipv4/tunnel4.c
index ac3b3ee..0177598 100644
--- a/net/ipv4/tunnel4.c
+++ b/net/ipv4/tunnel4.c
@@ -105,7 +105,7 @@
 	return 0;
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static int tunnel64_rcv(struct sk_buff *skb)
 {
 	struct xfrm_tunnel *handler;
@@ -134,7 +134,7 @@
 			break;
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static void tunnel64_err(struct sk_buff *skb, u32 info)
 {
 	struct xfrm_tunnel *handler;
@@ -152,7 +152,7 @@
 	.netns_ok	=	1,
 };
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static const struct net_protocol tunnel64_protocol = {
 	.handler	=	tunnel64_rcv,
 	.err_handler	=	tunnel64_err,
@@ -167,7 +167,7 @@
 		printk(KERN_ERR "tunnel4 init: can't add protocol\n");
 		return -EAGAIN;
 	}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	if (inet_add_protocol(&tunnel64_protocol, IPPROTO_IPV6)) {
 		printk(KERN_ERR "tunnel64 init: can't add protocol\n");
 		inet_del_protocol(&tunnel4_protocol, IPPROTO_IPIP);
@@ -179,7 +179,7 @@
 
 static void __exit tunnel4_fini(void)
 {
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	if (inet_del_protocol(&tunnel64_protocol, IPPROTO_IPV6))
 		printk(KERN_ERR "tunnel64 close: can't remove protocol\n");
 #endif
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 5a65eea..5d075b5 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -445,7 +445,7 @@
 /* UDP is nearly always wildcards out the wazoo, it makes no sense to try
  * harder than this. -DaveM
  */
-static struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
+struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
 		__be16 sport, __be32 daddr, __be16 dport,
 		int dif, struct udp_table *udptable)
 {
@@ -512,6 +512,7 @@
 	rcu_read_unlock();
 	return result;
 }
+EXPORT_SYMBOL_GPL(__udp4_lib_lookup);
 
 static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb,
 						 __be16 sport, __be16 dport,
@@ -1358,7 +1359,7 @@
 	if (inet_sk(sk)->inet_daddr)
 		sock_rps_save_rxhash(sk, skb);
 
-	rc = ip_queue_rcv_skb(sk, skb);
+	rc = sock_queue_rcv_skb(sk, skb);
 	if (rc < 0) {
 		int is_udplite = IS_UDPLITE(sk);
 
@@ -1474,6 +1475,7 @@
 
 	rc = 0;
 
+	ipv4_pktinfo_prepare(skb);
 	bh_lock_sock(sk);
 	if (!sock_owned_by_user(sk))
 		rc = __udp_queue_rcv_skb(sk, skb);
@@ -2247,7 +2249,8 @@
 	return 0;
 }
 
-struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, u32 features)
+struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
+	netdev_features_t features)
 {
 	struct sk_buff *segs = ERR_PTR(-EINVAL);
 	unsigned int mss;
diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c
new file mode 100644
index 0000000..69f8a7c
--- /dev/null
+++ b/net/ipv4/udp_diag.c
@@ -0,0 +1,201 @@
+/*
+ * udp_diag.c	Module for monitoring UDP transport protocols sockets.
+ *
+ * Authors:	Pavel Emelyanov, <xemul@parallels.com>
+ *
+ *	This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ */
+
+
+#include <linux/module.h>
+#include <linux/inet_diag.h>
+#include <linux/udp.h>
+#include <net/udp.h>
+#include <net/udplite.h>
+#include <linux/inet_diag.h>
+#include <linux/sock_diag.h>
+
+static int sk_diag_dump(struct sock *sk, struct sk_buff *skb,
+		struct netlink_callback *cb, struct inet_diag_req *req,
+		struct nlattr *bc)
+{
+	if (!inet_diag_bc_sk(bc, sk))
+		return 0;
+
+	return inet_sk_diag_fill(sk, NULL, skb, req, NETLINK_CB(cb->skb).pid,
+			cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
+}
+
+static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb,
+		const struct nlmsghdr *nlh, struct inet_diag_req *req)
+{
+	int err = -EINVAL;
+	struct sock *sk;
+	struct sk_buff *rep;
+
+	if (req->sdiag_family == AF_INET)
+		sk = __udp4_lib_lookup(&init_net,
+				req->id.idiag_src[0], req->id.idiag_sport,
+				req->id.idiag_dst[0], req->id.idiag_dport,
+				req->id.idiag_if, tbl);
+#if IS_ENABLED(CONFIG_IPV6)
+	else if (req->sdiag_family == AF_INET6)
+		sk = __udp6_lib_lookup(&init_net,
+				(struct in6_addr *)req->id.idiag_src,
+				req->id.idiag_sport,
+				(struct in6_addr *)req->id.idiag_dst,
+				req->id.idiag_dport,
+				req->id.idiag_if, tbl);
+#endif
+	else
+		goto out_nosk;
+
+	err = -ENOENT;
+	if (sk == NULL)
+		goto out_nosk;
+
+	err = sock_diag_check_cookie(sk, req->id.idiag_cookie);
+	if (err)
+		goto out;
+
+	err = -ENOMEM;
+	rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
+				     sizeof(struct inet_diag_meminfo) +
+				     64)), GFP_KERNEL);
+	if (!rep)
+		goto out;
+
+	err = inet_sk_diag_fill(sk, NULL, rep, req,
+			   NETLINK_CB(in_skb).pid,
+			   nlh->nlmsg_seq, 0, nlh);
+	if (err < 0) {
+		WARN_ON(err == -EMSGSIZE);
+		kfree_skb(rep);
+		goto out;
+	}
+	err = netlink_unicast(sock_diag_nlsk, rep, NETLINK_CB(in_skb).pid,
+			      MSG_DONTWAIT);
+	if (err > 0)
+		err = 0;
+out:
+	if (sk)
+		sock_put(sk);
+out_nosk:
+	return err;
+}
+
+static void udp_dump(struct udp_table *table, struct sk_buff *skb, struct netlink_callback *cb,
+		struct inet_diag_req *r, struct nlattr *bc)
+{
+	int num, s_num, slot, s_slot;
+
+	s_slot = cb->args[0];
+	num = s_num = cb->args[1];
+
+	for (slot = s_slot; slot <= table->mask; num = s_num = 0, slot++) {
+		struct sock *sk;
+		struct hlist_nulls_node *node;
+		struct udp_hslot *hslot = &table->hash[slot];
+
+		if (hlist_nulls_empty(&hslot->head))
+			continue;
+
+		spin_lock_bh(&hslot->lock);
+		sk_nulls_for_each(sk, node, &hslot->head) {
+			struct inet_sock *inet = inet_sk(sk);
+
+			if (num < s_num)
+				goto next;
+			if (!(r->idiag_states & (1 << sk->sk_state)))
+				goto next;
+			if (r->sdiag_family != AF_UNSPEC &&
+					sk->sk_family != r->sdiag_family)
+				goto next;
+			if (r->id.idiag_sport != inet->inet_sport &&
+			    r->id.idiag_sport)
+				goto next;
+			if (r->id.idiag_dport != inet->inet_dport &&
+			    r->id.idiag_dport)
+				goto next;
+
+			if (sk_diag_dump(sk, skb, cb, r, bc) < 0) {
+				spin_unlock_bh(&hslot->lock);
+				goto done;
+			}
+next:
+			num++;
+		}
+		spin_unlock_bh(&hslot->lock);
+	}
+done:
+	cb->args[0] = slot;
+	cb->args[1] = num;
+}
+
+static void udp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
+		struct inet_diag_req *r, struct nlattr *bc)
+{
+	udp_dump(&udp_table, skb, cb, r, bc);
+}
+
+static int udp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
+		struct inet_diag_req *req)
+{
+	return udp_dump_one(&udp_table, in_skb, nlh, req);
+}
+
+static const struct inet_diag_handler udp_diag_handler = {
+	.dump		 = udp_diag_dump,
+	.dump_one	 = udp_diag_dump_one,
+	.idiag_type	 = IPPROTO_UDP,
+};
+
+static void udplite_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
+		struct inet_diag_req *r, struct nlattr *bc)
+{
+	udp_dump(&udplite_table, skb, cb, r, bc);
+}
+
+static int udplite_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
+		struct inet_diag_req *req)
+{
+	return udp_dump_one(&udplite_table, in_skb, nlh, req);
+}
+
+static const struct inet_diag_handler udplite_diag_handler = {
+	.dump		 = udplite_diag_dump,
+	.dump_one	 = udplite_diag_dump_one,
+	.idiag_type	 = IPPROTO_UDPLITE,
+};
+
+static int __init udp_diag_init(void)
+{
+	int err;
+
+	err = inet_diag_register(&udp_diag_handler);
+	if (err)
+		goto out;
+	err = inet_diag_register(&udplite_diag_handler);
+	if (err)
+		goto out_lite;
+out:
+	return err;
+out_lite:
+	inet_diag_unregister(&udp_diag_handler);
+	goto out;
+}
+
+static void __exit udp_diag_exit(void)
+{
+	inet_diag_unregister(&udplite_diag_handler);
+	inet_diag_unregister(&udp_diag_handler);
+}
+
+module_init(udp_diag_init);
+module_exit(udp_diag_exit);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-17 /* AF_INET - IPPROTO_UDP */);
+MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-136 /* AF_INET - IPPROTO_UDPLITE */);
diff --git a/net/ipv4/xfrm4_tunnel.c b/net/ipv4/xfrm4_tunnel.c
index 8280645..9247d9d 100644
--- a/net/ipv4/xfrm4_tunnel.c
+++ b/net/ipv4/xfrm4_tunnel.c
@@ -64,7 +64,7 @@
 	.priority	=	2,
 };
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static struct xfrm_tunnel xfrm64_tunnel_handler __read_mostly = {
 	.handler	=	xfrm_tunnel_rcv,
 	.err_handler	=	xfrm_tunnel_err,
@@ -84,7 +84,7 @@
 		xfrm_unregister_type(&ipip_type, AF_INET);
 		return -EAGAIN;
 	}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	if (xfrm4_tunnel_register(&xfrm64_tunnel_handler, AF_INET6)) {
 		printk(KERN_INFO "ipip init: can't add xfrm handler for AF_INET6\n");
 		xfrm4_tunnel_deregister(&xfrm_tunnel_handler, AF_INET);
@@ -97,7 +97,7 @@
 
 static void __exit ipip_fini(void)
 {
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	if (xfrm4_tunnel_deregister(&xfrm64_tunnel_handler, AF_INET6))
 		printk(KERN_INFO "ipip close: can't remove xfrm handler for AF_INET6\n");
 #endif
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index cf88df8..0ba0866 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -630,13 +630,13 @@
 		goto out;
 	}
 
-	rt = addrconf_dst_alloc(idev, addr, 0);
+	rt = addrconf_dst_alloc(idev, addr, false);
 	if (IS_ERR(rt)) {
 		err = PTR_ERR(rt);
 		goto out;
 	}
 
-	ipv6_addr_copy(&ifa->addr, addr);
+	ifa->addr = *addr;
 
 	spin_lock_init(&ifa->lock);
 	spin_lock_init(&ifa->state_lock);
@@ -650,16 +650,6 @@
 
 	ifa->rt = rt;
 
-	/*
-	 * part one of RFC 4429, section 3.3
-	 * We should not configure an address as
-	 * optimistic if we do not yet know the link
-	 * layer address of our nexhop router
-	 */
-
-	if (dst_get_neighbour_raw(&rt->dst) == NULL)
-		ifa->flags &= ~IFA_F_OPTIMISTIC;
-
 	ifa->idev = idev;
 	in6_dev_hold(idev);
 	/* For caller */
@@ -807,7 +797,7 @@
 				ip6_del_rt(rt);
 				rt = NULL;
 			} else if (!(rt->rt6i_flags & RTF_EXPIRES)) {
-				rt->rt6i_expires = expires;
+				rt->dst.expires = expires;
 				rt->rt6i_flags |= RTF_EXPIRES;
 			}
 		}
@@ -1228,7 +1218,7 @@
 	if (!hiscore->ifa)
 		return -EADDRNOTAVAIL;
 
-	ipv6_addr_copy(saddr, &hiscore->ifa->addr);
+	*saddr = hiscore->ifa->addr;
 	in6_ifa_put(hiscore->ifa);
 	return 0;
 }
@@ -1249,7 +1239,7 @@
 		list_for_each_entry(ifp, &idev->addr_list, if_list) {
 			if (ifp->scope == IFA_LINK &&
 			    !(ifp->flags & banned_flags)) {
-				ipv6_addr_copy(addr, &ifp->addr);
+				*addr = ifp->addr;
 				err = 0;
 				break;
 			}
@@ -1700,7 +1690,7 @@
 		.fc_protocol = RTPROT_KERNEL,
 	};
 
-	ipv6_addr_copy(&cfg.fc_dst, pfx);
+	cfg.fc_dst = *pfx;
 
 	/* Prevent useless cloning on PtP SIT.
 	   This thing is done here expecting that the whole
@@ -1733,7 +1723,7 @@
 	if (!fn)
 		goto out;
 	for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
-		if (rt->rt6i_dev->ifindex != dev->ifindex)
+		if (rt->dst.dev->ifindex != dev->ifindex)
 			continue;
 		if ((rt->rt6i_flags & flags) != flags)
 			continue;
@@ -1805,14 +1795,15 @@
 		return ERR_PTR(-EACCES);
 
 	/* Add default multicast route */
-	addrconf_add_mroute(dev);
+	if (!(dev->flags & IFF_LOOPBACK))
+		addrconf_add_mroute(dev);
 
 	/* Add link local route */
 	addrconf_add_lroute(dev);
 	return idev;
 }
 
-void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len)
+void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
 {
 	struct prefix_info *pinfo;
 	__u32 valid_lft;
@@ -1890,11 +1881,11 @@
 				rt = NULL;
 			} else if (addrconf_finite_timeout(rt_expires)) {
 				/* not infinity */
-				rt->rt6i_expires = jiffies + rt_expires;
+				rt->dst.expires = jiffies + rt_expires;
 				rt->rt6i_flags |= RTF_EXPIRES;
 			} else {
 				rt->rt6i_flags &= ~RTF_EXPIRES;
-				rt->rt6i_expires = 0;
+				rt->dst.expires = 0;
 			}
 		} else if (valid_lft) {
 			clock_t expires = 0;
@@ -1943,7 +1934,7 @@
 
 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
 			if (in6_dev->cnf.optimistic_dad &&
-			    !net->ipv6.devconf_all->forwarding)
+			    !net->ipv6.devconf_all->forwarding && sllao)
 				addr_flags = IFA_F_OPTIMISTIC;
 #endif
 
@@ -3077,20 +3068,39 @@
 struct if6_iter_state {
 	struct seq_net_private p;
 	int bucket;
+	int offset;
 };
 
-static struct inet6_ifaddr *if6_get_first(struct seq_file *seq)
+static struct inet6_ifaddr *if6_get_first(struct seq_file *seq, loff_t pos)
 {
 	struct inet6_ifaddr *ifa = NULL;
 	struct if6_iter_state *state = seq->private;
 	struct net *net = seq_file_net(seq);
+	int p = 0;
 
-	for (state->bucket = 0; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) {
+	/* initial bucket if pos is 0 */
+	if (pos == 0) {
+		state->bucket = 0;
+		state->offset = 0;
+	}
+
+	for (; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) {
 		struct hlist_node *n;
 		hlist_for_each_entry_rcu_bh(ifa, n, &inet6_addr_lst[state->bucket],
-					 addr_lst)
+					 addr_lst) {
+			/* sync with offset */
+			if (p < state->offset) {
+				p++;
+				continue;
+			}
+			state->offset++;
 			if (net_eq(dev_net(ifa->idev->dev), net))
 				return ifa;
+		}
+
+		/* prepare for next bucket */
+		state->offset = 0;
+		p = 0;
 	}
 	return NULL;
 }
@@ -3102,13 +3112,17 @@
 	struct net *net = seq_file_net(seq);
 	struct hlist_node *n = &ifa->addr_lst;
 
-	hlist_for_each_entry_continue_rcu_bh(ifa, n, addr_lst)
+	hlist_for_each_entry_continue_rcu_bh(ifa, n, addr_lst) {
+		state->offset++;
 		if (net_eq(dev_net(ifa->idev->dev), net))
 			return ifa;
+	}
 
 	while (++state->bucket < IN6_ADDR_HSIZE) {
+		state->offset = 0;
 		hlist_for_each_entry_rcu_bh(ifa, n,
 				     &inet6_addr_lst[state->bucket], addr_lst) {
+			state->offset++;
 			if (net_eq(dev_net(ifa->idev->dev), net))
 				return ifa;
 		}
@@ -3117,21 +3131,11 @@
 	return NULL;
 }
 
-static struct inet6_ifaddr *if6_get_idx(struct seq_file *seq, loff_t pos)
-{
-	struct inet6_ifaddr *ifa = if6_get_first(seq);
-
-	if (ifa)
-		while (pos && (ifa = if6_get_next(seq, ifa)) != NULL)
-			--pos;
-	return pos ? NULL : ifa;
-}
-
 static void *if6_seq_start(struct seq_file *seq, loff_t *pos)
 	__acquires(rcu_bh)
 {
 	rcu_read_lock_bh();
-	return if6_get_idx(seq, *pos);
+	return if6_get_first(seq, *pos);
 }
 
 static void *if6_seq_next(struct seq_file *seq, void *v, loff_t *pos)
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index d27c797..273f48d 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -347,7 +347,7 @@
 			 */
 			v4addr = LOOPBACK4_IPV6;
 			if (!(addr_type & IPV6_ADDR_MULTICAST))	{
-				if (!inet->transparent &&
+				if (!(inet->freebind || inet->transparent) &&
 				    !ipv6_chk_addr(net, &addr->sin6_addr,
 						   dev, 0)) {
 					err = -EADDRNOTAVAIL;
@@ -361,10 +361,10 @@
 	inet->inet_rcv_saddr = v4addr;
 	inet->inet_saddr = v4addr;
 
-	ipv6_addr_copy(&np->rcv_saddr, &addr->sin6_addr);
+	np->rcv_saddr = addr->sin6_addr;
 
 	if (!(addr_type & IPV6_ADDR_MULTICAST))
-		ipv6_addr_copy(&np->saddr, &addr->sin6_addr);
+		np->saddr = addr->sin6_addr;
 
 	/* Make sure we are allowed to bind here. */
 	if (sk->sk_prot->get_port(sk, snum)) {
@@ -458,14 +458,14 @@
 		    peer == 1)
 			return -ENOTCONN;
 		sin->sin6_port = inet->inet_dport;
-		ipv6_addr_copy(&sin->sin6_addr, &np->daddr);
+		sin->sin6_addr = np->daddr;
 		if (np->sndflow)
 			sin->sin6_flowinfo = np->flow_label;
 	} else {
 		if (ipv6_addr_any(&np->rcv_saddr))
-			ipv6_addr_copy(&sin->sin6_addr, &np->saddr);
+			sin->sin6_addr = np->saddr;
 		else
-			ipv6_addr_copy(&sin->sin6_addr, &np->rcv_saddr);
+			sin->sin6_addr = np->rcv_saddr;
 
 		sin->sin6_port = inet->inet_sport;
 	}
@@ -660,8 +660,8 @@
 
 		memset(&fl6, 0, sizeof(fl6));
 		fl6.flowi6_proto = sk->sk_protocol;
-		ipv6_addr_copy(&fl6.daddr, &np->daddr);
-		ipv6_addr_copy(&fl6.saddr, &np->saddr);
+		fl6.daddr = np->daddr;
+		fl6.saddr = np->saddr;
 		fl6.flowlabel = np->flow_label;
 		fl6.flowi6_oif = sk->sk_bound_dev_if;
 		fl6.flowi6_mark = sk->sk_mark;
@@ -769,7 +769,8 @@
 	return err;
 }
 
-static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, u32 features)
+static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
+	netdev_features_t features)
 {
 	struct sk_buff *segs = ERR_PTR(-EINVAL);
 	struct ipv6hdr *ipv6h;
@@ -985,9 +986,9 @@
 			  sizeof(struct icmpv6_mib),
 			  __alignof__(struct icmpv6_mib)) < 0)
 		goto err_icmp_mib;
-	if (snmp_mib_init((void __percpu **)net->mib.icmpv6msg_statistics,
-			  sizeof(struct icmpv6msg_mib),
-			  __alignof__(struct icmpv6msg_mib)) < 0)
+	net->mib.icmpv6msg_statistics = kzalloc(sizeof(struct icmpv6msg_mib),
+						GFP_KERNEL);
+	if (!net->mib.icmpv6msg_statistics)
 		goto err_icmpmsg_mib;
 	return 0;
 
@@ -1008,7 +1009,7 @@
 	snmp_mib_free((void __percpu **)net->mib.udplite_stats_in6);
 	snmp_mib_free((void __percpu **)net->mib.ipv6_statistics);
 	snmp_mib_free((void __percpu **)net->mib.icmpv6_statistics);
-	snmp_mib_free((void __percpu **)net->mib.icmpv6msg_statistics);
+	kfree(net->mib.icmpv6msg_statistics);
 }
 
 static int __net_init inet6_net_init(struct net *net)
@@ -1115,6 +1116,8 @@
 	if (err)
 		goto static_sysctl_fail;
 #endif
+	tcpv6_prot.sysctl_mem = init_net.ipv4.sysctl_tcp_mem;
+
 	/*
 	 *	ipngwg API draft makes clear that the correct semantics
 	 *	for TCP and UDP is to consider one TCP and UDP instance
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index 4c0f894..2ae79db 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -193,9 +193,9 @@
 						printk(KERN_WARNING "destopt hao: invalid header length: %u\n", hao->length);
 					goto bad;
 				}
-				ipv6_addr_copy(&final_addr, &hao->addr);
-				ipv6_addr_copy(&hao->addr, &iph->saddr);
-				ipv6_addr_copy(&iph->saddr, &final_addr);
+				final_addr = hao->addr;
+				hao->addr = iph->saddr;
+				iph->saddr = final_addr;
 			}
 			break;
 		}
@@ -241,13 +241,13 @@
 	segments = rthdr->hdrlen >> 1;
 
 	addrs = ((struct rt0_hdr *)rthdr)->addr;
-	ipv6_addr_copy(&final_addr, addrs + segments - 1);
+	final_addr = addrs[segments - 1];
 
 	addrs += segments - segments_left;
 	memmove(addrs + 1, addrs, (segments_left - 1) * sizeof(*addrs));
 
-	ipv6_addr_copy(addrs, &iph->daddr);
-	ipv6_addr_copy(&iph->daddr, &final_addr);
+	addrs[0] = iph->daddr;
+	iph->daddr = final_addr;
 }
 
 static int ipv6_clear_mutable_options(struct ipv6hdr *iph, int len, int dir)
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
index 674255f..59402b4 100644
--- a/net/ipv6/anycast.c
+++ b/net/ipv6/anycast.c
@@ -75,7 +75,7 @@
 	if (pac == NULL)
 		return -ENOMEM;
 	pac->acl_next = NULL;
-	ipv6_addr_copy(&pac->acl_addr, addr);
+	pac->acl_addr = *addr;
 
 	rcu_read_lock();
 	if (ifindex == 0) {
@@ -83,7 +83,7 @@
 
 		rt = rt6_lookup(net, addr, NULL, 0, 0);
 		if (rt) {
-			dev = rt->rt6i_dev;
+			dev = rt->dst.dev;
 			dst_release(&rt->dst);
 		} else if (ishost) {
 			err = -EADDRNOTAVAIL;
@@ -289,14 +289,14 @@
 		goto out;
 	}
 
-	rt = addrconf_dst_alloc(idev, addr, 1);
+	rt = addrconf_dst_alloc(idev, addr, true);
 	if (IS_ERR(rt)) {
 		kfree(aca);
 		err = PTR_ERR(rt);
 		goto out;
 	}
 
-	ipv6_addr_copy(&aca->aca_addr, addr);
+	aca->aca_addr = *addr;
 	aca->aca_idev = idev;
 	aca->aca_rt = rt;
 	aca->aca_users = 1;
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index e248069..ae08aee 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -71,7 +71,7 @@
 			flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
 			if (flowlabel == NULL)
 				return -EINVAL;
-			ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
+			usin->sin6_addr = flowlabel->dst;
 		}
 	}
 
@@ -143,7 +143,7 @@
 		}
 	}
 
-	ipv6_addr_copy(&np->daddr, daddr);
+	np->daddr = *daddr;
 	np->flow_label = fl6.flowlabel;
 
 	inet->inet_dport = usin->sin6_port;
@@ -154,8 +154,8 @@
 	 */
 
 	fl6.flowi6_proto = sk->sk_protocol;
-	ipv6_addr_copy(&fl6.daddr, &np->daddr);
-	ipv6_addr_copy(&fl6.saddr, &np->saddr);
+	fl6.daddr = np->daddr;
+	fl6.saddr = np->saddr;
 	fl6.flowi6_oif = sk->sk_bound_dev_if;
 	fl6.flowi6_mark = sk->sk_mark;
 	fl6.fl6_dport = inet->inet_dport;
@@ -179,10 +179,10 @@
 	/* source address lookup done in ip6_dst_lookup */
 
 	if (ipv6_addr_any(&np->saddr))
-		ipv6_addr_copy(&np->saddr, &fl6.saddr);
+		np->saddr = fl6.saddr;
 
 	if (ipv6_addr_any(&np->rcv_saddr)) {
-		ipv6_addr_copy(&np->rcv_saddr, &fl6.saddr);
+		np->rcv_saddr = fl6.saddr;
 		inet->inet_rcv_saddr = LOOPBACK4_IPV6;
 		if (sk->sk_prot->rehash)
 			sk->sk_prot->rehash(sk);
@@ -257,7 +257,7 @@
 	skb_put(skb, sizeof(struct ipv6hdr));
 	skb_reset_network_header(skb);
 	iph = ipv6_hdr(skb);
-	ipv6_addr_copy(&iph->daddr, &fl6->daddr);
+	iph->daddr = fl6->daddr;
 
 	serr = SKB_EXT_ERR(skb);
 	serr->ee.ee_errno = err;
@@ -294,7 +294,7 @@
 	skb_put(skb, sizeof(struct ipv6hdr));
 	skb_reset_network_header(skb);
 	iph = ipv6_hdr(skb);
-	ipv6_addr_copy(&iph->daddr, &fl6->daddr);
+	iph->daddr = fl6->daddr;
 
 	mtu_info = IP6CBMTU(skb);
 
@@ -303,7 +303,7 @@
 	mtu_info->ip6m_addr.sin6_port = 0;
 	mtu_info->ip6m_addr.sin6_flowinfo = 0;
 	mtu_info->ip6m_addr.sin6_scope_id = fl6->flowi6_oif;
-	ipv6_addr_copy(&mtu_info->ip6m_addr.sin6_addr, &ipv6_hdr(skb)->daddr);
+	mtu_info->ip6m_addr.sin6_addr = ipv6_hdr(skb)->daddr;
 
 	__skb_pull(skb, skb_tail_pointer(skb) - skb->data);
 	skb_reset_transport_header(skb);
@@ -354,8 +354,8 @@
 		sin->sin6_port = serr->port;
 		sin->sin6_scope_id = 0;
 		if (skb->protocol == htons(ETH_P_IPV6)) {
-			ipv6_addr_copy(&sin->sin6_addr,
-				  (struct in6_addr *)(nh + serr->addr_offset));
+			sin->sin6_addr =
+				*(struct in6_addr *)(nh + serr->addr_offset);
 			if (np->sndflow)
 				sin->sin6_flowinfo =
 					(*(__be32 *)(nh + serr->addr_offset - 24) &
@@ -376,7 +376,7 @@
 		sin->sin6_flowinfo = 0;
 		sin->sin6_scope_id = 0;
 		if (skb->protocol == htons(ETH_P_IPV6)) {
-			ipv6_addr_copy(&sin->sin6_addr, &ipv6_hdr(skb)->saddr);
+			sin->sin6_addr = ipv6_hdr(skb)->saddr;
 			if (np->rxopt.all)
 				datagram_recv_ctl(sk, msg, skb);
 			if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL)
@@ -451,7 +451,7 @@
 		sin->sin6_flowinfo = 0;
 		sin->sin6_port = 0;
 		sin->sin6_scope_id = mtu_info.ip6m_addr.sin6_scope_id;
-		ipv6_addr_copy(&sin->sin6_addr, &mtu_info.ip6m_addr.sin6_addr);
+		sin->sin6_addr = mtu_info.ip6m_addr.sin6_addr;
 	}
 
 	put_cmsg(msg, SOL_IPV6, IPV6_PATHMTU, sizeof(mtu_info), &mtu_info);
@@ -475,7 +475,7 @@
 		struct in6_pktinfo src_info;
 
 		src_info.ipi6_ifindex = opt->iif;
-		ipv6_addr_copy(&src_info.ipi6_addr, &ipv6_hdr(skb)->daddr);
+		src_info.ipi6_addr = ipv6_hdr(skb)->daddr;
 		put_cmsg(msg, SOL_IPV6, IPV6_PKTINFO, sizeof(src_info), &src_info);
 	}
 
@@ -550,7 +550,7 @@
 		struct in6_pktinfo src_info;
 
 		src_info.ipi6_ifindex = opt->iif;
-		ipv6_addr_copy(&src_info.ipi6_addr, &ipv6_hdr(skb)->daddr);
+		src_info.ipi6_addr = ipv6_hdr(skb)->daddr;
 		put_cmsg(msg, SOL_IPV6, IPV6_2292PKTINFO, sizeof(src_info), &src_info);
 	}
 	if (np->rxopt.bits.rxohlim) {
@@ -584,7 +584,7 @@
 			 */
 
 			sin6.sin6_family = AF_INET6;
-			ipv6_addr_copy(&sin6.sin6_addr, &ipv6_hdr(skb)->daddr);
+			sin6.sin6_addr = ipv6_hdr(skb)->daddr;
 			sin6.sin6_port = ports[1];
 			sin6.sin6_flowinfo = 0;
 			sin6.sin6_scope_id = 0;
@@ -654,12 +654,12 @@
 
 			if (addr_type != IPV6_ADDR_ANY) {
 				int strict = __ipv6_addr_src_scope(addr_type) <= IPV6_ADDR_SCOPE_LINKLOCAL;
-				if (!inet_sk(sk)->transparent &&
+				if (!(inet_sk(sk)->freebind || inet_sk(sk)->transparent) &&
 				    !ipv6_chk_addr(net, &src_info->ipi6_addr,
 						   strict ? dev : NULL, 0))
 					err = -EINVAL;
 				else
-					ipv6_addr_copy(&fl6->saddr, &src_info->ipi6_addr);
+					fl6->saddr = src_info->ipi6_addr;
 			}
 
 			rcu_read_unlock();
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index bf22a22..3d641b6 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -243,9 +243,9 @@
 	if (skb->ip_summed == CHECKSUM_COMPLETE)
 		skb->ip_summed = CHECKSUM_NONE;
 
-	ipv6_addr_copy(&tmp_addr, &ipv6h->saddr);
-	ipv6_addr_copy(&ipv6h->saddr, &hao->addr);
-	ipv6_addr_copy(&hao->addr, &tmp_addr);
+	tmp_addr = ipv6h->saddr;
+	ipv6h->saddr = hao->addr;
+	hao->addr = tmp_addr;
 
 	if (skb->tstamp.tv64 == 0)
 		__net_timestamp(skb);
@@ -461,9 +461,9 @@
 		return -1;
 	}
 
-	ipv6_addr_copy(&daddr, addr);
-	ipv6_addr_copy(addr, &ipv6_hdr(skb)->daddr);
-	ipv6_addr_copy(&ipv6_hdr(skb)->daddr, &daddr);
+	daddr = *addr;
+	*addr = ipv6_hdr(skb)->daddr;
+	ipv6_hdr(skb)->daddr = daddr;
 
 	skb_dst_drop(skb);
 	ip6_route_input(skb);
@@ -690,7 +690,7 @@
 		memcpy(phdr->addr, ihdr->addr + 1,
 		       (hops - 1) * sizeof(struct in6_addr));
 
-	ipv6_addr_copy(phdr->addr + (hops - 1), *addr_p);
+	phdr->addr[hops - 1] = **addr_p;
 	*addr_p = ihdr->addr;
 
 	phdr->rt_hdr.nexthdr = *proto;
@@ -888,8 +888,8 @@
 	if (!opt || !opt->srcrt)
 		return NULL;
 
-	ipv6_addr_copy(orig, &fl6->daddr);
-	ipv6_addr_copy(&fl6->daddr, ((struct rt0_hdr *)opt->srcrt)->addr);
+	*orig = fl6->daddr;
+	fl6->daddr = *((struct rt0_hdr *)opt->srcrt)->addr;
 	return orig;
 }
 
diff --git a/net/ipv6/exthdrs_core.c b/net/ipv6/exthdrs_core.c
index 37f548b..72957f4 100644
--- a/net/ipv6/exthdrs_core.c
+++ b/net/ipv6/exthdrs_core.c
@@ -57,6 +57,9 @@
  *	    it returns NULL.
  *	  - First fragment header is skipped, not-first ones
  *	    are considered as unparsable.
+ *	  - Reports the offset field of the final fragment header so it is
+ *	    possible to tell whether this is a first fragment, later fragment,
+ *	    or not fragmented.
  *	  - ESP is unparsable for now and considered like
  *	    normal payload protocol.
  *	  - Note also special handling of AUTH header. Thanks to IPsec wizards.
@@ -64,10 +67,13 @@
  * --ANK (980726)
  */
 
-int ipv6_skip_exthdr(const struct sk_buff *skb, int start, u8 *nexthdrp)
+int ipv6_skip_exthdr(const struct sk_buff *skb, int start, u8 *nexthdrp,
+		     __be16 *frag_offp)
 {
 	u8 nexthdr = *nexthdrp;
 
+	*frag_offp = 0;
+
 	while (ipv6_ext_hdr(nexthdr)) {
 		struct ipv6_opt_hdr _hdr, *hp;
 		int hdrlen;
@@ -87,7 +93,8 @@
 			if (fp == NULL)
 				return -1;
 
-			if (ntohs(*fp) & ~0x7)
+			*frag_offp = *fp;
+			if (ntohs(*frag_offp) & ~0x7)
 				break;
 			hdrlen = 8;
 		} else if (nexthdr == NEXTHDR_AUTH)
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index 2955715..b6c5731 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -96,7 +96,7 @@
 			if (!ipv6_prefix_equal(&saddr, &r->src.addr,
 					       r->src.plen))
 				goto again;
-			ipv6_addr_copy(&flp6->saddr, &saddr);
+			flp6->saddr = saddr;
 		}
 		goto out;
 	}
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 90868fb..01d46bf 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -135,11 +135,12 @@
 	int ptr = (u8 *)(ipv6_hdr(skb) + 1) - skb->data;
 	int len = skb->len - ptr;
 	__u8 nexthdr = ipv6_hdr(skb)->nexthdr;
+	__be16 frag_off;
 
 	if (len < 0)
 		return 1;
 
-	ptr = ipv6_skip_exthdr(skb, ptr, &nexthdr);
+	ptr = ipv6_skip_exthdr(skb, ptr, &nexthdr, &frag_off);
 	if (ptr < 0)
 		return 0;
 	if (nexthdr == IPPROTO_ICMPV6) {
@@ -290,9 +291,9 @@
 		if (likely(off >= 0)) {
 			hao = (struct ipv6_destopt_hao *)
 					(skb_network_header(skb) + off);
-			ipv6_addr_copy(&tmp, &iph->saddr);
-			ipv6_addr_copy(&iph->saddr, &hao->addr);
-			ipv6_addr_copy(&hao->addr, &tmp);
+			tmp = iph->saddr;
+			iph->saddr = hao->addr;
+			hao->addr = tmp;
 		}
 	}
 }
@@ -444,9 +445,9 @@
 
 	memset(&fl6, 0, sizeof(fl6));
 	fl6.flowi6_proto = IPPROTO_ICMPV6;
-	ipv6_addr_copy(&fl6.daddr, &hdr->saddr);
+	fl6.daddr = hdr->saddr;
 	if (saddr)
-		ipv6_addr_copy(&fl6.saddr, saddr);
+		fl6.saddr = *saddr;
 	fl6.flowi6_oif = iif;
 	fl6.fl6_icmp_type = type;
 	fl6.fl6_icmp_code = code;
@@ -538,9 +539,9 @@
 
 	memset(&fl6, 0, sizeof(fl6));
 	fl6.flowi6_proto = IPPROTO_ICMPV6;
-	ipv6_addr_copy(&fl6.daddr, &ipv6_hdr(skb)->saddr);
+	fl6.daddr = ipv6_hdr(skb)->saddr;
 	if (saddr)
-		ipv6_addr_copy(&fl6.saddr, saddr);
+		fl6.saddr = *saddr;
 	fl6.flowi6_oif = skb->dev->ifindex;
 	fl6.fl6_icmp_type = ICMPV6_ECHO_REPLY;
 	security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
@@ -596,6 +597,7 @@
 	int inner_offset;
 	int hash;
 	u8 nexthdr;
+	__be16 frag_off;
 
 	if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
 		return;
@@ -603,7 +605,8 @@
 	nexthdr = ((struct ipv6hdr *)skb->data)->nexthdr;
 	if (ipv6_ext_hdr(nexthdr)) {
 		/* now skip over extension headers */
-		inner_offset = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr);
+		inner_offset = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr),
+						&nexthdr, &frag_off);
 		if (inner_offset<0)
 			return;
 	} else {
@@ -786,8 +789,8 @@
 		      int oif)
 {
 	memset(fl6, 0, sizeof(*fl6));
-	ipv6_addr_copy(&fl6->saddr, saddr);
-	ipv6_addr_copy(&fl6->daddr, daddr);
+	fl6->saddr = *saddr;
+	fl6->daddr = *daddr;
 	fl6->flowi6_proto 	= IPPROTO_ICMPV6;
 	fl6->fl6_icmp_type	= type;
 	fl6->fl6_icmp_code	= 0;
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 1567fb1..02dd203 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -65,9 +65,9 @@
 
 	memset(&fl6, 0, sizeof(fl6));
 	fl6.flowi6_proto = IPPROTO_TCP;
-	ipv6_addr_copy(&fl6.daddr, &treq->rmt_addr);
+	fl6.daddr = treq->rmt_addr;
 	final_p = fl6_update_dst(&fl6, np->opt, &final);
-	ipv6_addr_copy(&fl6.saddr, &treq->loc_addr);
+	fl6.saddr = treq->loc_addr;
 	fl6.flowi6_oif = sk->sk_bound_dev_if;
 	fl6.flowi6_mark = sk->sk_mark;
 	fl6.fl6_dport = inet_rsk(req)->rmt_port;
@@ -157,7 +157,7 @@
 	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) uaddr;
 
 	sin6->sin6_family = AF_INET6;
-	ipv6_addr_copy(&sin6->sin6_addr, &np->daddr);
+	sin6->sin6_addr = np->daddr;
 	sin6->sin6_port	= inet_sk(sk)->inet_dport;
 	/* We do not store received flowlabel for TCP */
 	sin6->sin6_flowinfo = 0;
@@ -215,8 +215,8 @@
 
 	memset(&fl6, 0, sizeof(fl6));
 	fl6.flowi6_proto = sk->sk_protocol;
-	ipv6_addr_copy(&fl6.daddr, &np->daddr);
-	ipv6_addr_copy(&fl6.saddr, &np->saddr);
+	fl6.daddr = np->daddr;
+	fl6.saddr = np->saddr;
 	fl6.flowlabel = np->flow_label;
 	IP6_ECN_flow_xmit(sk, fl6.flowlabel);
 	fl6.flowi6_oif = sk->sk_bound_dev_if;
@@ -246,7 +246,7 @@
 	skb_dst_set_noref(skb, dst);
 
 	/* Restore final destination back after routing done */
-	ipv6_addr_copy(&fl6.daddr, &np->daddr);
+	fl6.daddr = np->daddr;
 
 	res = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass);
 	rcu_read_unlock();
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 93718f3..b82bcde 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -190,7 +190,7 @@
 	struct fib6_table *table;
 
 	table = kzalloc(sizeof(*table), GFP_ATOMIC);
-	if (table != NULL) {
+	if (table) {
 		table->tb6_id = id;
 		table->tb6_root.leaf = net->ipv6.ip6_null_entry;
 		table->tb6_root.fn_flags = RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO;
@@ -210,7 +210,7 @@
 		return tb;
 
 	tb = fib6_alloc_table(net, id);
-	if (tb != NULL)
+	if (tb)
 		fib6_link_table(net, tb);
 
 	return tb;
@@ -367,7 +367,7 @@
 	s_e = cb->args[1];
 
 	w = (void *)cb->args[2];
-	if (w == NULL) {
+	if (!w) {
 		/* New dump:
 		 *
 		 * 1. hook callback destructor.
@@ -379,7 +379,7 @@
 		 * 2. allocate and initialize walker.
 		 */
 		w = kzalloc(sizeof(*w), GFP_ATOMIC);
-		if (w == NULL)
+		if (!w)
 			return -ENOMEM;
 		w->func = fib6_dump_node;
 		cb->args[2] = (long)w;
@@ -425,7 +425,8 @@
 
 static struct fib6_node * fib6_add_1(struct fib6_node *root, void *addr,
 				     int addrlen, int plen,
-				     int offset)
+				     int offset, int allow_create,
+				     int replace_required)
 {
 	struct fib6_node *fn, *in, *ln;
 	struct fib6_node *pn = NULL;
@@ -447,8 +448,18 @@
 		 *	Prefix match
 		 */
 		if (plen < fn->fn_bit ||
-		    !ipv6_prefix_equal(&key->addr, addr, fn->fn_bit))
+		    !ipv6_prefix_equal(&key->addr, addr, fn->fn_bit)) {
+			if (!allow_create) {
+				if (replace_required) {
+					pr_warn("IPv6: Can't replace route, "
+						"no match found\n");
+					return ERR_PTR(-ENOENT);
+				}
+				pr_warn("IPv6: NLM_F_CREATE should be set "
+					"when creating new route\n");
+			}
 			goto insert_above;
+		}
 
 		/*
 		 *	Exact match ?
@@ -456,7 +467,7 @@
 
 		if (plen == fn->fn_bit) {
 			/* clean up an intermediate node */
-			if ((fn->fn_flags & RTN_RTINFO) == 0) {
+			if (!(fn->fn_flags & RTN_RTINFO)) {
 				rt6_release(fn->leaf);
 				fn->leaf = NULL;
 			}
@@ -477,6 +488,23 @@
 		fn = dir ? fn->right: fn->left;
 	} while (fn);
 
+	if (!allow_create) {
+		/* We should not create new node because
+		 * NLM_F_REPLACE was specified without NLM_F_CREATE
+		 * I assume it is safe to require NLM_F_CREATE when
+		 * REPLACE flag is used! Later we may want to remove the
+		 * check for replace_required, because according
+		 * to netlink specification, NLM_F_CREATE
+		 * MUST be specified if new route is created.
+		 * That would keep IPv6 consistent with IPv4
+		 */
+		if (replace_required) {
+			pr_warn("IPv6: Can't replace route, no match found\n");
+			return ERR_PTR(-ENOENT);
+		}
+		pr_warn("IPv6: NLM_F_CREATE should be set "
+			"when creating new route\n");
+	}
 	/*
 	 *	We walked to the bottom of tree.
 	 *	Create new leaf node without children.
@@ -484,7 +512,7 @@
 
 	ln = node_alloc();
 
-	if (ln == NULL)
+	if (!ln)
 		return NULL;
 	ln->fn_bit = plen;
 
@@ -527,7 +555,7 @@
 		in = node_alloc();
 		ln = node_alloc();
 
-		if (in == NULL || ln == NULL) {
+		if (!in || !ln) {
 			if (in)
 				node_free(in);
 			if (ln)
@@ -581,7 +609,7 @@
 
 		ln = node_alloc();
 
-		if (ln == NULL)
+		if (!ln)
 			return NULL;
 
 		ln->fn_bit = plen;
@@ -614,10 +642,15 @@
 {
 	struct rt6_info *iter = NULL;
 	struct rt6_info **ins;
+	int replace = (info->nlh &&
+		       (info->nlh->nlmsg_flags & NLM_F_REPLACE));
+	int add = (!info->nlh ||
+		   (info->nlh->nlmsg_flags & NLM_F_CREATE));
+	int found = 0;
 
 	ins = &fn->leaf;
 
-	for (iter = fn->leaf; iter; iter=iter->dst.rt6_next) {
+	for (iter = fn->leaf; iter; iter = iter->dst.rt6_next) {
 		/*
 		 *	Search for duplicates
 		 */
@@ -626,17 +659,24 @@
 			/*
 			 *	Same priority level
 			 */
+			if (info->nlh &&
+			    (info->nlh->nlmsg_flags & NLM_F_EXCL))
+				return -EEXIST;
+			if (replace) {
+				found++;
+				break;
+			}
 
-			if (iter->rt6i_dev == rt->rt6i_dev &&
+			if (iter->dst.dev == rt->dst.dev &&
 			    iter->rt6i_idev == rt->rt6i_idev &&
 			    ipv6_addr_equal(&iter->rt6i_gateway,
 					    &rt->rt6i_gateway)) {
-				if (!(iter->rt6i_flags&RTF_EXPIRES))
+				if (!(iter->rt6i_flags & RTF_EXPIRES))
 					return -EEXIST;
-				iter->rt6i_expires = rt->rt6i_expires;
-				if (!(rt->rt6i_flags&RTF_EXPIRES)) {
+				iter->dst.expires = rt->dst.expires;
+				if (!(rt->rt6i_flags & RTF_EXPIRES)) {
 					iter->rt6i_flags &= ~RTF_EXPIRES;
-					iter->rt6i_expires = 0;
+					iter->dst.expires = 0;
 				}
 				return -EEXIST;
 			}
@@ -655,17 +695,40 @@
 	/*
 	 *	insert node
 	 */
+	if (!replace) {
+		if (!add)
+			pr_warn("IPv6: NLM_F_CREATE should be set when creating new route\n");
 
-	rt->dst.rt6_next = iter;
-	*ins = rt;
-	rt->rt6i_node = fn;
-	atomic_inc(&rt->rt6i_ref);
-	inet6_rt_notify(RTM_NEWROUTE, rt, info);
-	info->nl_net->ipv6.rt6_stats->fib_rt_entries++;
+add:
+		rt->dst.rt6_next = iter;
+		*ins = rt;
+		rt->rt6i_node = fn;
+		atomic_inc(&rt->rt6i_ref);
+		inet6_rt_notify(RTM_NEWROUTE, rt, info);
+		info->nl_net->ipv6.rt6_stats->fib_rt_entries++;
 
-	if ((fn->fn_flags & RTN_RTINFO) == 0) {
-		info->nl_net->ipv6.rt6_stats->fib_route_nodes++;
-		fn->fn_flags |= RTN_RTINFO;
+		if (!(fn->fn_flags & RTN_RTINFO)) {
+			info->nl_net->ipv6.rt6_stats->fib_route_nodes++;
+			fn->fn_flags |= RTN_RTINFO;
+		}
+
+	} else {
+		if (!found) {
+			if (add)
+				goto add;
+			pr_warn("IPv6: NLM_F_REPLACE set, but no existing node found!\n");
+			return -ENOENT;
+		}
+		*ins = rt;
+		rt->rt6i_node = fn;
+		rt->dst.rt6_next = iter->dst.rt6_next;
+		atomic_inc(&rt->rt6i_ref);
+		inet6_rt_notify(RTM_NEWROUTE, rt, info);
+		rt6_release(iter);
+		if (!(fn->fn_flags & RTN_RTINFO)) {
+			info->nl_net->ipv6.rt6_stats->fib_route_nodes++;
+			fn->fn_flags |= RTN_RTINFO;
+		}
 	}
 
 	return 0;
@@ -674,7 +737,7 @@
 static __inline__ void fib6_start_gc(struct net *net, struct rt6_info *rt)
 {
 	if (!timer_pending(&net->ipv6.ip6_fib_timer) &&
-	    (rt->rt6i_flags & (RTF_EXPIRES|RTF_CACHE)))
+	    (rt->rt6i_flags & (RTF_EXPIRES | RTF_CACHE)))
 		mod_timer(&net->ipv6.ip6_fib_timer,
 			  jiffies + net->ipv6.sysctl.ip6_rt_gc_interval);
 }
@@ -696,11 +759,28 @@
 {
 	struct fib6_node *fn, *pn = NULL;
 	int err = -ENOMEM;
+	int allow_create = 1;
+	int replace_required = 0;
+
+	if (info->nlh) {
+		if (!(info->nlh->nlmsg_flags & NLM_F_CREATE))
+			allow_create = 0;
+		if (info->nlh->nlmsg_flags & NLM_F_REPLACE)
+			replace_required = 1;
+	}
+	if (!allow_create && !replace_required)
+		pr_warn("IPv6: RTM_NEWROUTE with no NLM_F_CREATE or NLM_F_REPLACE\n");
 
 	fn = fib6_add_1(root, &rt->rt6i_dst.addr, sizeof(struct in6_addr),
-			rt->rt6i_dst.plen, offsetof(struct rt6_info, rt6i_dst));
+			rt->rt6i_dst.plen, offsetof(struct rt6_info, rt6i_dst),
+			allow_create, replace_required);
 
-	if (fn == NULL)
+	if (IS_ERR(fn)) {
+		err = PTR_ERR(fn);
+		fn = NULL;
+	}
+
+	if (!fn)
 		goto out;
 
 	pn = fn;
@@ -709,7 +789,7 @@
 	if (rt->rt6i_src.plen) {
 		struct fib6_node *sn;
 
-		if (fn->subtree == NULL) {
+		if (!fn->subtree) {
 			struct fib6_node *sfn;
 
 			/*
@@ -724,7 +804,7 @@
 
 			/* Create subtree root node */
 			sfn = node_alloc();
-			if (sfn == NULL)
+			if (!sfn)
 				goto st_failure;
 
 			sfn->leaf = info->nl_net->ipv6.ip6_null_entry;
@@ -736,9 +816,10 @@
 
 			sn = fib6_add_1(sfn, &rt->rt6i_src.addr,
 					sizeof(struct in6_addr), rt->rt6i_src.plen,
-					offsetof(struct rt6_info, rt6i_src));
+					offsetof(struct rt6_info, rt6i_src),
+					allow_create, replace_required);
 
-			if (sn == NULL) {
+			if (!sn) {
 				/* If it is failed, discard just allocated
 				   root, and then (in st_failure) stale node
 				   in main tree.
@@ -753,13 +834,18 @@
 		} else {
 			sn = fib6_add_1(fn->subtree, &rt->rt6i_src.addr,
 					sizeof(struct in6_addr), rt->rt6i_src.plen,
-					offsetof(struct rt6_info, rt6i_src));
+					offsetof(struct rt6_info, rt6i_src),
+					allow_create, replace_required);
 
-			if (sn == NULL)
+			if (IS_ERR(sn)) {
+				err = PTR_ERR(sn);
+				sn = NULL;
+			}
+			if (!sn)
 				goto st_failure;
 		}
 
-		if (fn->leaf == NULL) {
+		if (!fn->leaf) {
 			fn->leaf = rt;
 			atomic_inc(&rt->rt6i_ref);
 		}
@@ -768,10 +854,9 @@
 #endif
 
 	err = fib6_add_rt2node(fn, rt, info);
-
-	if (err == 0) {
+	if (!err) {
 		fib6_start_gc(info->nl_net, rt);
-		if (!(rt->rt6i_flags&RTF_CACHE))
+		if (!(rt->rt6i_flags & RTF_CACHE))
 			fib6_prune_clones(info->nl_net, pn, rt);
 	}
 
@@ -819,7 +904,7 @@
  */
 
 struct lookup_args {
-	int		offset;		/* key offset on rt6_info	*/
+	int			offset;		/* key offset on rt6_info	*/
 	const struct in6_addr	*addr;		/* search key			*/
 };
 
@@ -849,11 +934,10 @@
 			fn = next;
 			continue;
 		}
-
 		break;
 	}
 
-	while(fn) {
+	while (fn) {
 		if (FIB6_SUBTREE(fn) || fn->fn_flags & RTN_RTINFO) {
 			struct rt6key *key;
 
@@ -900,8 +984,7 @@
 	};
 
 	fn = fib6_lookup_1(root, daddr ? args : args + 1);
-
-	if (fn == NULL || fn->fn_flags & RTN_TL_ROOT)
+	if (!fn || fn->fn_flags & RTN_TL_ROOT)
 		fn = root;
 
 	return fn;
@@ -961,7 +1044,7 @@
 	}
 #endif
 
-	if (fn && fn->fn_flags&RTN_RTINFO)
+	if (fn && fn->fn_flags & RTN_RTINFO)
 		return fn;
 
 	return NULL;
@@ -975,14 +1058,13 @@
 
 static struct rt6_info *fib6_find_prefix(struct net *net, struct fib6_node *fn)
 {
-	if (fn->fn_flags&RTN_ROOT)
+	if (fn->fn_flags & RTN_ROOT)
 		return net->ipv6.ip6_null_entry;
 
-	while(fn) {
-		if(fn->left)
+	while (fn) {
+		if (fn->left)
 			return fn->left->leaf;
-
-		if(fn->right)
+		if (fn->right)
 			return fn->right->leaf;
 
 		fn = FIB6_SUBTREE(fn);
@@ -1020,12 +1102,12 @@
 		if (children == 3 || FIB6_SUBTREE(fn)
 #ifdef CONFIG_IPV6_SUBTREES
 		    /* Subtree root (i.e. fn) may have one child */
-		    || (children && fn->fn_flags&RTN_ROOT)
+		    || (children && fn->fn_flags & RTN_ROOT)
 #endif
 		    ) {
 			fn->leaf = fib6_find_prefix(net, fn);
 #if RT6_DEBUG >= 2
-			if (fn->leaf==NULL) {
+			if (!fn->leaf) {
 				WARN_ON(!fn->leaf);
 				fn->leaf = net->ipv6.ip6_null_entry;
 			}
@@ -1058,7 +1140,7 @@
 
 		read_lock(&fib6_walker_lock);
 		FOR_WALKERS(w) {
-			if (child == NULL) {
+			if (!child) {
 				if (w->root == fn) {
 					w->root = w->node = NULL;
 					RT6_TRACE("W %p adjusted by delroot 1\n", w);
@@ -1087,7 +1169,7 @@
 		read_unlock(&fib6_walker_lock);
 
 		node_free(fn);
-		if (pn->fn_flags&RTN_RTINFO || FIB6_SUBTREE(pn))
+		if (pn->fn_flags & RTN_RTINFO || FIB6_SUBTREE(pn))
 			return pn;
 
 		rt6_release(pn->leaf);
@@ -1121,7 +1203,7 @@
 		if (w->state == FWS_C && w->leaf == rt) {
 			RT6_TRACE("walker %p adjusted by delroute\n", w);
 			w->leaf = rt->dst.rt6_next;
-			if (w->leaf == NULL)
+			if (!w->leaf)
 				w->state = FWS_U;
 		}
 	}
@@ -1130,7 +1212,7 @@
 	rt->dst.rt6_next = NULL;
 
 	/* If it was last route, expunge its radix tree node */
-	if (fn->leaf == NULL) {
+	if (!fn->leaf) {
 		fn->fn_flags &= ~RTN_RTINFO;
 		net->ipv6.rt6_stats->fib_route_nodes--;
 		fn = fib6_repair_tree(net, fn);
@@ -1144,7 +1226,7 @@
 		 * to still alive ones.
 		 */
 		while (fn) {
-			if (!(fn->fn_flags&RTN_RTINFO) && fn->leaf == rt) {
+			if (!(fn->fn_flags & RTN_RTINFO) && fn->leaf == rt) {
 				fn->leaf = fib6_find_prefix(net, fn);
 				atomic_inc(&fn->leaf->rt6i_ref);
 				rt6_release(rt);
@@ -1171,17 +1253,17 @@
 		return -ENOENT;
 	}
 #endif
-	if (fn == NULL || rt == net->ipv6.ip6_null_entry)
+	if (!fn || rt == net->ipv6.ip6_null_entry)
 		return -ENOENT;
 
 	WARN_ON(!(fn->fn_flags & RTN_RTINFO));
 
-	if (!(rt->rt6i_flags&RTF_CACHE)) {
+	if (!(rt->rt6i_flags & RTF_CACHE)) {
 		struct fib6_node *pn = fn;
 #ifdef CONFIG_IPV6_SUBTREES
 		/* clones of this route might be in another subtree */
 		if (rt->rt6i_src.plen) {
-			while (!(pn->fn_flags&RTN_ROOT))
+			while (!(pn->fn_flags & RTN_ROOT))
 				pn = pn->parent;
 			pn = pn->parent;
 		}
@@ -1232,11 +1314,11 @@
 
 	for (;;) {
 		fn = w->node;
-		if (fn == NULL)
+		if (!fn)
 			return 0;
 
 		if (w->prune && fn != w->root &&
-		    fn->fn_flags&RTN_RTINFO && w->state < FWS_C) {
+		    fn->fn_flags & RTN_RTINFO && w->state < FWS_C) {
 			w->state = FWS_C;
 			w->leaf = fn->leaf;
 		}
@@ -1265,7 +1347,7 @@
 			w->state = FWS_C;
 			w->leaf = fn->leaf;
 		case FWS_C:
-			if (w->leaf && fn->fn_flags&RTN_RTINFO) {
+			if (w->leaf && fn->fn_flags & RTN_RTINFO) {
 				int err;
 
 				if (w->count < w->skip) {
@@ -1380,6 +1462,26 @@
 	fib6_walk(&c.w);
 }
 
+void fib6_clean_all_ro(struct net *net, int (*func)(struct rt6_info *, void *arg),
+		    int prune, void *arg)
+{
+	struct fib6_table *table;
+	struct hlist_node *node;
+	struct hlist_head *head;
+	unsigned int h;
+
+	rcu_read_lock();
+	for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
+		head = &net->ipv6.fib_table_hash[h];
+		hlist_for_each_entry_rcu(table, node, head, tb6_hlist) {
+			read_lock_bh(&table->tb6_lock);
+			fib6_clean_tree(net, &table->tb6_root,
+					func, prune, arg);
+			read_unlock_bh(&table->tb6_lock);
+		}
+	}
+	rcu_read_unlock();
+}
 void fib6_clean_all(struct net *net, int (*func)(struct rt6_info *, void *arg),
 		    int prune, void *arg)
 {
@@ -1439,8 +1541,8 @@
 	 *	only if they are not in use now.
 	 */
 
-	if (rt->rt6i_flags&RTF_EXPIRES && rt->rt6i_expires) {
-		if (time_after(now, rt->rt6i_expires)) {
+	if (rt->rt6i_flags & RTF_EXPIRES && rt->dst.expires) {
+		if (time_after(now, rt->dst.expires)) {
 			RT6_TRACE("expiring %p\n", rt);
 			return -1;
 		}
@@ -1451,7 +1553,7 @@
 			RT6_TRACE("aging clone %p\n", rt);
 			return -1;
 		} else if ((rt->rt6i_flags & RTF_GATEWAY) &&
-			   (!(dst_get_neighbour_raw(&rt->dst)->flags & NTF_ROUTER))) {
+			   (!(dst_get_neighbour_noref_raw(&rt->dst)->flags & NTF_ROUTER))) {
 			RT6_TRACE("purging route %p via non-router but gateway\n",
 				  rt);
 			return -1;
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index 4566dbd..b7867a1 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -386,7 +386,7 @@
 		err = -EINVAL;
 		goto done;
 	}
-	ipv6_addr_copy(&fl->dst, &freq->flr_dst);
+	fl->dst = freq->flr_dst;
 	atomic_set(&fl->users, 1);
 	switch (fl->share) {
 	case IPV6_FL_S_EXCL:
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index a46c64e..1ca5d45 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -280,6 +280,7 @@
 			u8 *ptr = skb_network_header(skb) + opt->ra;
 			struct icmp6hdr *icmp6;
 			u8 nexthdr = hdr->nexthdr;
+			__be16 frag_off;
 			int offset;
 
 			/* Check if the value of Router Alert
@@ -293,7 +294,7 @@
 					goto out;
 				}
 				offset = ipv6_skip_exthdr(skb, sizeof(*hdr),
-							  &nexthdr);
+							  &nexthdr, &frag_off);
 				if (offset < 0)
 					goto out;
 
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 84d0bd5..d97e071 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -136,7 +136,7 @@
 	}
 
 	rcu_read_lock();
-	neigh = dst_get_neighbour(dst);
+	neigh = dst_get_neighbour_noref(dst);
 	if (neigh) {
 		int res = neigh_output(neigh, skb);
 
@@ -238,8 +238,8 @@
 	hdr->nexthdr = proto;
 	hdr->hop_limit = hlimit;
 
-	ipv6_addr_copy(&hdr->saddr, &fl6->saddr);
-	ipv6_addr_copy(&hdr->daddr, first_hop);
+	hdr->saddr = fl6->saddr;
+	hdr->daddr = *first_hop;
 
 	skb->priority = sk->sk_priority;
 	skb->mark = sk->sk_mark;
@@ -290,8 +290,8 @@
 	hdr->nexthdr = proto;
 	hdr->hop_limit = np->hop_limit;
 
-	ipv6_addr_copy(&hdr->saddr, saddr);
-	ipv6_addr_copy(&hdr->daddr, daddr);
+	hdr->saddr = *saddr;
+	hdr->daddr = *daddr;
 
 	return 0;
 }
@@ -329,10 +329,11 @@
 {
 	struct ipv6hdr *hdr = ipv6_hdr(skb);
 	u8 nexthdr = hdr->nexthdr;
+	__be16 frag_off;
 	int offset;
 
 	if (ipv6_ext_hdr(nexthdr)) {
-		offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr);
+		offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr, &frag_off);
 		if (offset < 0)
 			return 0;
 	} else
@@ -462,7 +463,7 @@
 	   send redirects to source routed frames.
 	   We don't send redirects to frames decapsulated from IPsec.
 	 */
-	n = dst_get_neighbour(dst);
+	n = dst_get_neighbour_noref(dst);
 	if (skb->dev == dst->dev && n && opt->srcrt == 0 && !skb_sec_path(skb)) {
 		struct in6_addr *target = NULL;
 		struct rt6_info *rt;
@@ -603,7 +604,7 @@
 	static atomic_t ipv6_fragmentation_id;
 	int old, new;
 
-	if (rt) {
+	if (rt && !(rt->dst.flags & DST_NOPEER)) {
 		struct inet_peer *peer;
 
 		if (!rt->rt6i_peer)
@@ -631,6 +632,7 @@
 	struct ipv6hdr *tmp_hdr;
 	struct frag_hdr *fh;
 	unsigned int mtu, hlen, left, len;
+	int hroom, troom;
 	__be32 frag_id = 0;
 	int ptr, offset = 0, err=0;
 	u8 *prevhdr, nexthdr = 0;
@@ -797,6 +799,8 @@
 	 */
 
 	*prevhdr = NEXTHDR_FRAGMENT;
+	hroom = LL_RESERVED_SPACE(rt->dst.dev);
+	troom = rt->dst.dev->needed_tailroom;
 
 	/*
 	 *	Keep copying data until we run out.
@@ -815,7 +819,8 @@
 		 *	Allocate buffer.
 		 */
 
-		if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_ALLOCATED_SPACE(rt->dst.dev), GFP_ATOMIC)) == NULL) {
+		if ((frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) +
+				      hroom + troom, GFP_ATOMIC)) == NULL) {
 			NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n");
 			IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
 				      IPSTATS_MIB_FRAGFAILS);
@@ -828,7 +833,7 @@
 		 */
 
 		ip6_copy_metadata(frag, skb);
-		skb_reserve(frag, LL_RESERVED_SPACE(rt->dst.dev));
+		skb_reserve(frag, hroom);
 		skb_put(frag, len + hlen + sizeof(struct frag_hdr));
 		skb_reset_network_header(frag);
 		fh = (struct frag_hdr *)(skb_network_header(frag) + hlen);
@@ -978,7 +983,7 @@
 	 * dst entry of the nexthop router
 	 */
 	rcu_read_lock();
-	n = dst_get_neighbour(*dst);
+	n = dst_get_neighbour_noref(*dst);
 	if (n && !(n->nud_state & NUD_VALID)) {
 		struct inet6_ifaddr *ifp;
 		struct flowi6 fl_gw6;
@@ -1059,7 +1064,7 @@
 	if (err)
 		return ERR_PTR(err);
 	if (final_dst)
-		ipv6_addr_copy(&fl6->daddr, final_dst);
+		fl6->daddr = *final_dst;
 	if (can_sleep)
 		fl6->flowi6_flags |= FLOWI_FLAG_CAN_SLEEP;
 
@@ -1095,7 +1100,7 @@
 	if (err)
 		return ERR_PTR(err);
 	if (final_dst)
-		ipv6_addr_copy(&fl6->daddr, final_dst);
+		fl6->daddr = *final_dst;
 	if (can_sleep)
 		fl6->flowi6_flags |= FLOWI_FLAG_CAN_SLEEP;
 
@@ -1588,7 +1593,7 @@
 	if (np->pmtudisc < IPV6_PMTUDISC_DO)
 		skb->local_df = 1;
 
-	ipv6_addr_copy(final_dst, &fl6->daddr);
+	*final_dst = fl6->daddr;
 	__skb_pull(skb, skb_network_header_len(skb));
 	if (opt && opt->opt_flen)
 		ipv6_push_frag_opts(skb, opt, &proto);
@@ -1604,8 +1609,8 @@
 
 	hdr->hop_limit = np->cork.hop_limit;
 	hdr->nexthdr = proto;
-	ipv6_addr_copy(&hdr->saddr, &fl6->saddr);
-	ipv6_addr_copy(&hdr->daddr, final_dst);
+	hdr->saddr = fl6->saddr;
+	hdr->daddr = *final_dst;
 
 	skb->priority = sk->sk_priority;
 	skb->mark = sk->sk_mark;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 4e2e9ff..e1f7761 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -93,7 +93,7 @@
 	unsigned long	rx_bytes;
 	unsigned long	tx_packets;
 	unsigned long	tx_bytes;
-};
+} __attribute__((aligned(4*sizeof(unsigned long))));
 
 static struct net_device_stats *ip6_get_stats(struct net_device *dev)
 {
@@ -653,8 +653,8 @@
 		rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr,
 				NULL, 0, 0);
 
-		if (rt && rt->rt6i_dev)
-			skb2->dev = rt->rt6i_dev;
+		if (rt && rt->dst.dev)
+			skb2->dev = rt->dst.dev;
 
 		icmpv6_send(skb2, rel_type, rel_code, rel_info);
 
@@ -979,8 +979,8 @@
 	ipv6_change_dsfield(ipv6h, ~INET_ECN_MASK, dsfield);
 	ipv6h->hop_limit = t->parms.hop_limit;
 	ipv6h->nexthdr = proto;
-	ipv6_addr_copy(&ipv6h->saddr, &fl6->saddr);
-	ipv6_addr_copy(&ipv6h->daddr, &fl6->daddr);
+	ipv6h->saddr = fl6->saddr;
+	ipv6h->daddr = fl6->daddr;
 	nf_reset(skb);
 	pkt_len = skb->len;
 	err = ip6_local_out(skb);
@@ -1155,8 +1155,8 @@
 	memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
 
 	/* Set up flowi template */
-	ipv6_addr_copy(&fl6->saddr, &p->laddr);
-	ipv6_addr_copy(&fl6->daddr, &p->raddr);
+	fl6->saddr = p->laddr;
+	fl6->daddr = p->raddr;
 	fl6->flowi6_oif = p->link;
 	fl6->flowlabel = 0;
 
@@ -1185,11 +1185,11 @@
 		if (rt == NULL)
 			return;
 
-		if (rt->rt6i_dev) {
-			dev->hard_header_len = rt->rt6i_dev->hard_header_len +
+		if (rt->dst.dev) {
+			dev->hard_header_len = rt->dst.dev->hard_header_len +
 				sizeof (struct ipv6hdr);
 
-			dev->mtu = rt->rt6i_dev->mtu - sizeof (struct ipv6hdr);
+			dev->mtu = rt->dst.dev->mtu - sizeof (struct ipv6hdr);
 			if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
 				dev->mtu-=8;
 
@@ -1212,8 +1212,8 @@
 static int
 ip6_tnl_change(struct ip6_tnl *t, struct ip6_tnl_parm *p)
 {
-	ipv6_addr_copy(&t->parms.laddr, &p->laddr);
-	ipv6_addr_copy(&t->parms.raddr, &p->raddr);
+	t->parms.laddr = p->laddr;
+	t->parms.raddr = p->raddr;
 	t->parms.flags = p->flags;
 	t->parms.hop_limit = p->hop_limit;
 	t->parms.encap_limit = p->encap_limit;
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 449a918..c7e95c8 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -1105,8 +1105,8 @@
 		msg->im6_msgtype = MRT6MSG_WHOLEPKT;
 		msg->im6_mif = mrt->mroute_reg_vif_num;
 		msg->im6_pad = 0;
-		ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr);
-		ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr);
+		msg->im6_src = ipv6_hdr(pkt)->saddr;
+		msg->im6_dst = ipv6_hdr(pkt)->daddr;
 
 		skb->ip_summed = CHECKSUM_UNNECESSARY;
 	} else
@@ -1131,8 +1131,8 @@
 	msg->im6_msgtype = assert;
 	msg->im6_mif = mifi;
 	msg->im6_pad = 0;
-	ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr);
-	ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr);
+	msg->im6_src = ipv6_hdr(pkt)->saddr;
+	msg->im6_dst = ipv6_hdr(pkt)->daddr;
 
 	skb_dst_set(skb, dst_clone(skb_dst(pkt)));
 	skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -2181,8 +2181,8 @@
 		iph->payload_len = 0;
 		iph->nexthdr = IPPROTO_NONE;
 		iph->hop_limit = 0;
-		ipv6_addr_copy(&iph->saddr, &rt->rt6i_src.addr);
-		ipv6_addr_copy(&iph->daddr, &rt->rt6i_dst.addr);
+		iph->saddr = rt->rt6i_src.addr;
+		iph->daddr = rt->rt6i_dst.addr;
 
 		err = ip6mr_cache_unresolved(mrt, vif, skb2);
 		read_unlock(&mrt_lock);
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 26cb08c..18a2719 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -435,7 +435,7 @@
 			goto e_inval;
 
 		np->sticky_pktinfo.ipi6_ifindex = pkt.ipi6_ifindex;
-		ipv6_addr_copy(&np->sticky_pktinfo.ipi6_addr, &pkt.ipi6_addr);
+		np->sticky_pktinfo.ipi6_addr = pkt.ipi6_addr;
 		retv = 0;
 		break;
 	}
@@ -980,8 +980,7 @@
 				struct in6_pktinfo src_info;
 				src_info.ipi6_ifindex = np->mcast_oif ? np->mcast_oif :
 					np->sticky_pktinfo.ipi6_ifindex;
-				np->mcast_oif? ipv6_addr_copy(&src_info.ipi6_addr, &np->daddr) :
-					ipv6_addr_copy(&src_info.ipi6_addr, &(np->sticky_pktinfo.ipi6_addr));
+				src_info.ipi6_addr = np->mcast_oif ? np->daddr : np->sticky_pktinfo.ipi6_addr;
 				put_cmsg(&msg, SOL_IPV6, IPV6_PKTINFO, sizeof(src_info), &src_info);
 			}
 			if (np->rxopt.bits.rxhlim) {
@@ -992,8 +991,7 @@
 				struct in6_pktinfo src_info;
 				src_info.ipi6_ifindex = np->mcast_oif ? np->mcast_oif :
 					np->sticky_pktinfo.ipi6_ifindex;
-				np->mcast_oif? ipv6_addr_copy(&src_info.ipi6_addr, &np->daddr) :
-					ipv6_addr_copy(&src_info.ipi6_addr, &(np->sticky_pktinfo.ipi6_addr));
+				src_info.ipi6_addr = np->mcast_oif ? np->daddr : np->sticky_pktinfo.ipi6_addr;
 				put_cmsg(&msg, SOL_IPV6, IPV6_2292PKTINFO, sizeof(src_info), &src_info);
 			}
 			if (np->rxopt.bits.rxohlim) {
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index ee7839f..b853f06 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -155,14 +155,14 @@
 		return -ENOMEM;
 
 	mc_lst->next = NULL;
-	ipv6_addr_copy(&mc_lst->addr, addr);
+	mc_lst->addr = *addr;
 
 	rcu_read_lock();
 	if (ifindex == 0) {
 		struct rt6_info *rt;
 		rt = rt6_lookup(net, addr, NULL, 0, 0);
 		if (rt) {
-			dev = rt->rt6i_dev;
+			dev = rt->dst.dev;
 			dst_release(&rt->dst);
 		}
 	} else
@@ -256,7 +256,7 @@
 		struct rt6_info *rt = rt6_lookup(net, group, NULL, 0, 0);
 
 		if (rt) {
-			dev = rt->rt6i_dev;
+			dev = rt->dst.dev;
 			dev_hold(dev);
 			dst_release(&rt->dst);
 		}
@@ -858,7 +858,7 @@
 
 	setup_timer(&mc->mca_timer, igmp6_timer_handler, (unsigned long)mc);
 
-	ipv6_addr_copy(&mc->mca_addr, addr);
+	mc->mca_addr = *addr;
 	mc->idev = idev; /* (reference taken) */
 	mc->mca_users = 1;
 	/* mca_stamp should be updated upon changes */
@@ -1343,13 +1343,15 @@
 	struct mld2_report *pmr;
 	struct in6_addr addr_buf;
 	const struct in6_addr *saddr;
+	int hlen = LL_RESERVED_SPACE(dev);
+	int tlen = dev->needed_tailroom;
 	int err;
 	u8 ra[8] = { IPPROTO_ICMPV6, 0,
 		     IPV6_TLV_ROUTERALERT, 2, 0, 0,
 		     IPV6_TLV_PADN, 0 };
 
 	/* we assume size > sizeof(ra) here */
-	size += LL_ALLOCATED_SPACE(dev);
+	size += hlen + tlen;
 	/* limit our allocations to order-0 page */
 	size = min_t(int, size, SKB_MAX_ORDER(0, 0));
 	skb = sock_alloc_send_skb(sk, size, 1, &err);
@@ -1357,7 +1359,7 @@
 	if (!skb)
 		return NULL;
 
-	skb_reserve(skb, LL_RESERVED_SPACE(dev));
+	skb_reserve(skb, hlen);
 
 	if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) {
 		/* <draft-ietf-magma-mld-source-05.txt>:
@@ -1408,18 +1410,11 @@
 					   csum_partial(skb_transport_header(skb),
 							mldlen, 0));
 
-	dst = icmp6_dst_alloc(skb->dev, NULL, &ipv6_hdr(skb)->daddr);
-
-	if (!dst) {
-		err = -ENOMEM;
-		goto err_out;
-	}
-
 	icmpv6_flow_init(net->ipv6.igmp_sk, &fl6, ICMPV6_MLD2_REPORT,
 			 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
 			 skb->dev->ifindex);
+	dst = icmp6_dst_alloc(skb->dev, NULL, &fl6);
 
-	dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0);
 	err = 0;
 	if (IS_ERR(dst)) {
 		err = PTR_ERR(dst);
@@ -1723,6 +1718,8 @@
 	struct mld_msg *hdr;
 	const struct in6_addr *snd_addr, *saddr;
 	struct in6_addr addr_buf;
+	int hlen = LL_RESERVED_SPACE(dev);
+	int tlen = dev->needed_tailroom;
 	int err, len, payload_len, full_len;
 	u8 ra[8] = { IPPROTO_ICMPV6, 0,
 		     IPV6_TLV_ROUTERALERT, 2, 0, 0,
@@ -1744,7 +1741,7 @@
 		      IPSTATS_MIB_OUT, full_len);
 	rcu_read_unlock();
 
-	skb = sock_alloc_send_skb(sk, LL_ALLOCATED_SPACE(dev) + full_len, 1, &err);
+	skb = sock_alloc_send_skb(sk, hlen + tlen + full_len, 1, &err);
 
 	if (skb == NULL) {
 		rcu_read_lock();
@@ -1754,7 +1751,7 @@
 		return;
 	}
 
-	skb_reserve(skb, LL_RESERVED_SPACE(dev));
+	skb_reserve(skb, hlen);
 
 	if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) {
 		/* <draft-ietf-magma-mld-source-05.txt>:
@@ -1772,7 +1769,7 @@
 	hdr = (struct mld_msg *) skb_put(skb, sizeof(struct mld_msg));
 	memset(hdr, 0, sizeof(struct mld_msg));
 	hdr->mld_type = type;
-	ipv6_addr_copy(&hdr->mld_mca, addr);
+	hdr->mld_mca = *addr;
 
 	hdr->mld_cksum = csum_ipv6_magic(saddr, snd_addr, len,
 					 IPPROTO_ICMPV6,
@@ -1781,17 +1778,10 @@
 	rcu_read_lock();
 	idev = __in6_dev_get(skb->dev);
 
-	dst = icmp6_dst_alloc(skb->dev, NULL, &ipv6_hdr(skb)->daddr);
-	if (!dst) {
-		err = -ENOMEM;
-		goto err_out;
-	}
-
 	icmpv6_flow_init(sk, &fl6, type,
 			 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
 			 skb->dev->ifindex);
-
-	dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0);
+	dst = icmp6_dst_alloc(skb->dev, NULL, &fl6);
 	if (IS_ERR(dst)) {
 		err = PTR_ERR(dst);
 		goto err_out;
@@ -1914,7 +1904,7 @@
  * Add multicast single-source filter to the interface list
  */
 static int ip6_mc_add1_src(struct ifmcaddr6 *pmc, int sfmode,
-	const struct in6_addr *psfsrc, int delta)
+	const struct in6_addr *psfsrc)
 {
 	struct ip6_sf_list *psf, *psf_prev;
 
@@ -2045,7 +2035,7 @@
 		pmc->mca_sfcount[sfmode]++;
 	err = 0;
 	for (i=0; i<sfcount; i++) {
-		err = ip6_mc_add1_src(pmc, sfmode, &psfsrc[i], delta);
+		err = ip6_mc_add1_src(pmc, sfmode, &psfsrc[i]);
 		if (err)
 			break;
 	}
diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c
index 43242e6..7e1e0fb 100644
--- a/net/ipv6/mip6.c
+++ b/net/ipv6/mip6.c
@@ -195,8 +195,8 @@
 		mip6_report_rl.stamp.tv_sec = stamp->tv_sec;
 		mip6_report_rl.stamp.tv_usec = stamp->tv_usec;
 		mip6_report_rl.iif = iif;
-		ipv6_addr_copy(&mip6_report_rl.src, src);
-		ipv6_addr_copy(&mip6_report_rl.dst, dst);
+		mip6_report_rl.src = *src;
+		mip6_report_rl.dst = *dst;
 		allow = 1;
 	}
 	spin_unlock_bh(&mip6_report_rl.lock);
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 0cb78d7..d8f02ef 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -93,7 +93,7 @@
 
 static u32 ndisc_hash(const void *pkey,
 		      const struct net_device *dev,
-		      __u32 rnd);
+		      __u32 *hash_rnd);
 static int ndisc_constructor(struct neighbour *neigh);
 static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb);
 static void ndisc_error_report(struct neighbour *neigh, struct sk_buff *skb);
@@ -126,7 +126,6 @@
 
 struct neigh_table nd_tbl = {
 	.family =	AF_INET6,
-	.entry_size =	sizeof(struct neighbour) + sizeof(struct in6_addr),
 	.key_len =	sizeof(struct in6_addr),
 	.hash =		ndisc_hash,
 	.constructor =	ndisc_constructor,
@@ -141,7 +140,7 @@
 		.gc_staletime		= 60 * HZ,
 		.reachable_time		= ND_REACHABLE_TIME,
 		.delay_probe_time	= 5 * HZ,
-		.queue_len		= 3,
+		.queue_len_bytes	= 64*1024,
 		.ucast_probes		= 3,
 		.mcast_probes		= 3,
 		.anycast_delay		= 1 * HZ,
@@ -350,16 +349,9 @@
 
 static u32 ndisc_hash(const void *pkey,
 		      const struct net_device *dev,
-		      __u32 hash_rnd)
+		      __u32 *hash_rnd)
 {
-	const u32 *p32 = pkey;
-	u32 addr_hash, i;
-
-	addr_hash = 0;
-	for (i = 0; i < (sizeof(struct in6_addr) / sizeof(u32)); i++)
-		addr_hash ^= *p32++;
-
-	return jhash_2words(addr_hash, dev->ifindex, hash_rnd);
+	return ndisc_hashfn(pkey, dev, hash_rnd);
 }
 
 static int ndisc_constructor(struct neighbour *neigh)
@@ -446,6 +438,8 @@
 	struct sock *sk = net->ipv6.ndisc_sk;
 	struct sk_buff *skb;
 	struct icmp6hdr *hdr;
+	int hlen = LL_RESERVED_SPACE(dev);
+	int tlen = dev->needed_tailroom;
 	int len;
 	int err;
 	u8 *opt;
@@ -459,7 +453,7 @@
 
 	skb = sock_alloc_send_skb(sk,
 				  (MAX_HEADER + sizeof(struct ipv6hdr) +
-				   len + LL_ALLOCATED_SPACE(dev)),
+				   len + hlen + tlen),
 				  1, &err);
 	if (!skb) {
 		ND_PRINTK0(KERN_ERR
@@ -468,7 +462,7 @@
 		return NULL;
 	}
 
-	skb_reserve(skb, LL_RESERVED_SPACE(dev));
+	skb_reserve(skb, hlen);
 	ip6_nd_hdr(sk, skb, dev, saddr, daddr, IPPROTO_ICMPV6, len);
 
 	skb->transport_header = skb->tail;
@@ -479,7 +473,7 @@
 
 	opt = skb_transport_header(skb) + sizeof(struct icmp6hdr);
 	if (target) {
-		ipv6_addr_copy((struct in6_addr *)opt, target);
+		*(struct in6_addr *)opt = *target;
 		opt += sizeof(*target);
 	}
 
@@ -515,14 +509,7 @@
 	type = icmp6h->icmp6_type;
 
 	icmpv6_flow_init(sk, &fl6, type, saddr, daddr, dev->ifindex);
-
-	dst = icmp6_dst_alloc(dev, neigh, daddr);
-	if (!dst) {
-		kfree_skb(skb);
-		return;
-	}
-
-	dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0);
+	dst = icmp6_dst_alloc(dev, neigh, &fl6);
 	if (IS_ERR(dst)) {
 		kfree_skb(skb);
 		return;
@@ -1237,7 +1224,7 @@
 	rt = rt6_get_dflt_router(&ipv6_hdr(skb)->saddr, skb->dev);
 
 	if (rt)
-		neigh = dst_get_neighbour(&rt->dst);
+		neigh = dst_get_neighbour_noref(&rt->dst);
 
 	if (rt && lifetime == 0) {
 		neigh_clone(neigh);
@@ -1257,7 +1244,7 @@
 			return;
 		}
 
-		neigh = dst_get_neighbour(&rt->dst);
+		neigh = dst_get_neighbour_noref(&rt->dst);
 		if (neigh == NULL) {
 			ND_PRINTK0(KERN_ERR
 				   "ICMPv6 RA: %s() got default router without neighbour.\n",
@@ -1271,7 +1258,7 @@
 	}
 
 	if (rt)
-		rt->rt6i_expires = jiffies + (HZ * lifetime);
+		rt->dst.expires = jiffies + (HZ * lifetime);
 
 	if (ra_msg->icmph.icmp6_hop_limit) {
 		in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
@@ -1381,7 +1368,9 @@
 		for (p = ndopts.nd_opts_pi;
 		     p;
 		     p = ndisc_next_option(p, ndopts.nd_opts_pi_end)) {
-			addrconf_prefix_rcv(skb->dev, (u8*)p, (p->nd_opt_len) << 3);
+			addrconf_prefix_rcv(skb->dev, (u8 *)p,
+					    (p->nd_opt_len) << 3,
+					    ndopts.nd_opts_src_lladdr != NULL);
 		}
 	}
 
@@ -1533,6 +1522,7 @@
 	struct inet6_dev *idev;
 	struct flowi6 fl6;
 	u8 *opt;
+	int hlen, tlen;
 	int rd_len;
 	int err;
 	u8 ha_buf[MAX_ADDR_LEN], *ha = NULL;
@@ -1590,9 +1580,11 @@
 	rd_len &= ~0x7;
 	len += rd_len;
 
+	hlen = LL_RESERVED_SPACE(dev);
+	tlen = dev->needed_tailroom;
 	buff = sock_alloc_send_skb(sk,
 				   (MAX_HEADER + sizeof(struct ipv6hdr) +
-				    len + LL_ALLOCATED_SPACE(dev)),
+				    len + hlen + tlen),
 				   1, &err);
 	if (buff == NULL) {
 		ND_PRINTK0(KERN_ERR
@@ -1601,7 +1593,7 @@
 		goto release;
 	}
 
-	skb_reserve(buff, LL_RESERVED_SPACE(dev));
+	skb_reserve(buff, hlen);
 	ip6_nd_hdr(sk, buff, dev, &saddr_buf, &ipv6_hdr(skb)->saddr,
 		   IPPROTO_ICMPV6, len);
 
@@ -1617,9 +1609,9 @@
 	 */
 
 	addrp = (struct in6_addr *)(icmph + 1);
-	ipv6_addr_copy(addrp, target);
+	*addrp = *target;
 	addrp++;
-	ipv6_addr_copy(addrp, &ipv6_hdr(skb)->daddr);
+	*addrp = ipv6_hdr(skb)->daddr;
 
 	opt = (u8*) (addrp + 1);
 
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index f792b34..9a68fb5 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -125,6 +125,16 @@
 
 	  To compile it as a module, choose M here.  If unsure, say N.
 
+config IP6_NF_MATCH_RPFILTER
+	tristate '"rpfilter" reverse path filter match support'
+	depends on NETFILTER_ADVANCED
+	---help---
+	  This option allows you to match packets whose replies would
+	  go out via the interface the packet came in.
+
+	  To compile it as a module, choose M here.  If unsure, say N.
+	  The module will be called ip6t_rpfilter.
+
 config IP6_NF_MATCH_RT
 	tristate '"rt" Routing header match support'
 	depends on NETFILTER_ADVANCED
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile
index abfee91..2eaed96 100644
--- a/net/ipv6/netfilter/Makefile
+++ b/net/ipv6/netfilter/Makefile
@@ -27,6 +27,7 @@
 obj-$(CONFIG_IP6_NF_MATCH_IPV6HEADER) += ip6t_ipv6header.o
 obj-$(CONFIG_IP6_NF_MATCH_MH) += ip6t_mh.o
 obj-$(CONFIG_IP6_NF_MATCH_OPTS) += ip6t_hbh.o
+obj-$(CONFIG_IP6_NF_MATCH_RPFILTER) += ip6t_rpfilter.o
 obj-$(CONFIG_IP6_NF_MATCH_RT) += ip6t_rt.o
 
 # targets
diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
index e63c397..fb80a23 100644
--- a/net/ipv6/netfilter/ip6_queue.c
+++ b/net/ipv6/netfilter/ip6_queue.c
@@ -405,6 +405,7 @@
 	int status, type, pid, flags;
 	unsigned int nlmsglen, skblen;
 	struct nlmsghdr *nlh;
+	bool enable_timestamp = false;
 
 	skblen = skb->len;
 	if (skblen < sizeof(*nlh))
@@ -442,11 +443,13 @@
 			RCV_SKB_FAIL(-EBUSY);
 		}
 	} else {
-		net_enable_timestamp();
+		enable_timestamp = true;
 		peer_pid = pid;
 	}
 
 	spin_unlock_bh(&queue_lock);
+	if (enable_timestamp)
+		net_enable_timestamp();
 
 	status = ipq_receive_peer(NLMSG_DATA(nlh), type,
 				  nlmsglen - NLMSG_LENGTH(0));
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c
index a5a4c5d..aad2fa4 100644
--- a/net/ipv6/netfilter/ip6t_REJECT.c
+++ b/net/ipv6/netfilter/ip6t_REJECT.c
@@ -49,6 +49,7 @@
 	const __u8 tclass = DEFAULT_TOS_VALUE;
 	struct dst_entry *dst = NULL;
 	u8 proto;
+	__be16 frag_off;
 	struct flowi6 fl6;
 
 	if ((!(ipv6_addr_type(&oip6h->saddr) & IPV6_ADDR_UNICAST)) ||
@@ -58,7 +59,7 @@
 	}
 
 	proto = oip6h->nexthdr;
-	tcphoff = ipv6_skip_exthdr(oldskb, ((u8*)(oip6h+1) - oldskb->data), &proto);
+	tcphoff = ipv6_skip_exthdr(oldskb, ((u8*)(oip6h+1) - oldskb->data), &proto, &frag_off);
 
 	if ((tcphoff < 0) || (tcphoff > oldskb->len)) {
 		pr_debug("Cannot get TCP header.\n");
@@ -93,8 +94,8 @@
 
 	memset(&fl6, 0, sizeof(fl6));
 	fl6.flowi6_proto = IPPROTO_TCP;
-	ipv6_addr_copy(&fl6.saddr, &oip6h->daddr);
-	ipv6_addr_copy(&fl6.daddr, &oip6h->saddr);
+	fl6.saddr = oip6h->daddr;
+	fl6.daddr = oip6h->saddr;
 	fl6.fl6_sport = otcph.dest;
 	fl6.fl6_dport = otcph.source;
 	security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6));
@@ -129,8 +130,8 @@
 	*(__be32 *)ip6h =  htonl(0x60000000 | (tclass << 20));
 	ip6h->hop_limit = ip6_dst_hoplimit(dst);
 	ip6h->nexthdr = IPPROTO_TCP;
-	ipv6_addr_copy(&ip6h->saddr, &oip6h->daddr);
-	ipv6_addr_copy(&ip6h->daddr, &oip6h->saddr);
+	ip6h->saddr = oip6h->daddr;
+	ip6h->daddr = oip6h->saddr;
 
 	tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr));
 	/* Truncate to length (no data) */
diff --git a/net/ipv6/netfilter/ip6t_rpfilter.c b/net/ipv6/netfilter/ip6t_rpfilter.c
new file mode 100644
index 0000000..5d1d8b0
--- /dev/null
+++ b/net/ipv6/netfilter/ip6t_rpfilter.c
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2011 Florian Westphal <fw@strlen.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/route.h>
+#include <net/ip6_fib.h>
+#include <net/ip6_route.h>
+
+#include <linux/netfilter/xt_rpfilter.h>
+#include <linux/netfilter/x_tables.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Florian Westphal <fw@strlen.de>");
+MODULE_DESCRIPTION("Xtables: IPv6 reverse path filter match");
+
+static bool rpfilter_addr_unicast(const struct in6_addr *addr)
+{
+	int addr_type = ipv6_addr_type(addr);
+	return addr_type & IPV6_ADDR_UNICAST;
+}
+
+static bool rpfilter_lookup_reverse6(const struct sk_buff *skb,
+				     const struct net_device *dev, u8 flags)
+{
+	struct rt6_info *rt;
+	struct ipv6hdr *iph = ipv6_hdr(skb);
+	bool ret = false;
+	struct flowi6 fl6 = {
+		.flowlabel = (* (__be32 *) iph) & IPV6_FLOWINFO_MASK,
+		.flowi6_proto = iph->nexthdr,
+		.daddr = iph->saddr,
+	};
+	int lookup_flags;
+
+	if (rpfilter_addr_unicast(&iph->daddr)) {
+		memcpy(&fl6.saddr, &iph->daddr, sizeof(struct in6_addr));
+		lookup_flags = RT6_LOOKUP_F_HAS_SADDR;
+	} else {
+		lookup_flags = 0;
+	}
+
+	fl6.flowi6_mark = flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0;
+	if ((flags & XT_RPFILTER_LOOSE) == 0) {
+		fl6.flowi6_oif = dev->ifindex;
+		lookup_flags |= RT6_LOOKUP_F_IFACE;
+	}
+
+	rt = (void *) ip6_route_lookup(dev_net(dev), &fl6, lookup_flags);
+	if (rt->dst.error)
+		goto out;
+
+	if (rt->rt6i_flags & (RTF_REJECT|RTF_ANYCAST))
+		goto out;
+
+	if (rt->rt6i_flags & RTF_LOCAL) {
+		ret = flags & XT_RPFILTER_ACCEPT_LOCAL;
+		goto out;
+	}
+
+	if (rt->rt6i_idev->dev == dev || (flags & XT_RPFILTER_LOOSE))
+		ret = true;
+ out:
+	dst_release(&rt->dst);
+	return ret;
+}
+
+static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
+{
+	const struct xt_rpfilter_info *info = par->matchinfo;
+	int saddrtype;
+	struct ipv6hdr *iph;
+	bool invert = info->flags & XT_RPFILTER_INVERT;
+
+	if (par->in->flags & IFF_LOOPBACK)
+		return true ^ invert;
+
+	iph = ipv6_hdr(skb);
+	saddrtype = ipv6_addr_type(&iph->saddr);
+	if (unlikely(saddrtype == IPV6_ADDR_ANY))
+		return true ^ invert; /* not routable: forward path will drop it */
+
+	return rpfilter_lookup_reverse6(skb, par->in, info->flags) ^ invert;
+}
+
+static int rpfilter_check(const struct xt_mtchk_param *par)
+{
+	const struct xt_rpfilter_info *info = par->matchinfo;
+	unsigned int options = ~XT_RPFILTER_OPTION_MASK;
+
+	if (info->flags & options) {
+		pr_info("unknown options encountered");
+		return -EINVAL;
+	}
+
+	if (strcmp(par->table, "mangle") != 0 &&
+	    strcmp(par->table, "raw") != 0) {
+		pr_info("match only valid in the \'raw\' "
+			"or \'mangle\' tables, not \'%s\'.\n", par->table);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static struct xt_match rpfilter_mt_reg __read_mostly = {
+	.name		= "rpfilter",
+	.family		= NFPROTO_IPV6,
+	.checkentry	= rpfilter_check,
+	.match		= rpfilter_mt,
+	.matchsize	= sizeof(struct xt_rpfilter_info),
+	.hooks		= (1 << NF_INET_PRE_ROUTING),
+	.me		= THIS_MODULE
+};
+
+static int __init rpfilter_mt_init(void)
+{
+	return xt_register_match(&rpfilter_mt_reg);
+}
+
+static void __exit rpfilter_mt_exit(void)
+{
+	xt_unregister_match(&rpfilter_mt_reg);
+}
+
+module_init(rpfilter_mt_init);
+module_exit(rpfilter_mt_exit);
diff --git a/net/ipv6/netfilter/ip6table_filter.c b/net/ipv6/netfilter/ip6table_filter.c
index c9e37c8..a8f6da9 100644
--- a/net/ipv6/netfilter/ip6table_filter.c
+++ b/net/ipv6/netfilter/ip6table_filter.c
@@ -44,7 +44,7 @@
 static struct nf_hook_ops *filter_ops __read_mostly;
 
 /* Default to forward because I got too much mail already. */
-static int forward = NF_ACCEPT;
+static bool forward = NF_ACCEPT;
 module_param(forward, bool, 0000);
 
 static int __net_init ip6table_filter_net_init(struct net *net)
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index 1008ce9..fdeb6d0 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -142,11 +142,7 @@
 	SNMP_MIB_SENTINEL
 };
 
-/* can be called either with percpu mib (pcpumib != NULL),
- * or shared one (smib != NULL)
- */
-static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, void __percpu **pcpumib,
-				     atomic_long_t *smib)
+static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, atomic_long_t *smib)
 {
 	char name[32];
 	int i;
@@ -163,14 +159,14 @@
 		snprintf(name, sizeof(name), "Icmp6%s%s",
 			i & 0x100 ? "Out" : "In", p);
 		seq_printf(seq, "%-32s\t%lu\n", name,
-			pcpumib ? snmp_fold_field(pcpumib, i) : atomic_long_read(smib + i));
+			   atomic_long_read(smib + i));
 	}
 
 	/* print by number (nonzero only) - ICMPMsgStat format */
 	for (i = 0; i < ICMP6MSG_MIB_MAX; i++) {
 		unsigned long val;
 
-		val = pcpumib ? snmp_fold_field(pcpumib, i) : atomic_long_read(smib + i);
+		val = atomic_long_read(smib + i);
 		if (!val)
 			continue;
 		snprintf(name, sizeof(name), "Icmp6%sType%u",
@@ -215,8 +211,7 @@
 			    snmp6_ipstats_list, offsetof(struct ipstats_mib, syncp));
 	snmp6_seq_show_item(seq, (void __percpu **)net->mib.icmpv6_statistics,
 			    NULL, snmp6_icmp6_list);
-	snmp6_seq_show_icmpv6msg(seq,
-			    (void __percpu **)net->mib.icmpv6msg_statistics, NULL);
+	snmp6_seq_show_icmpv6msg(seq, net->mib.icmpv6msg_statistics->mibs);
 	snmp6_seq_show_item(seq, (void __percpu **)net->mib.udp_stats_in6,
 			    NULL, snmp6_udp6_list);
 	snmp6_seq_show_item(seq, (void __percpu **)net->mib.udplite_stats_in6,
@@ -246,7 +241,7 @@
 			    snmp6_ipstats_list);
 	snmp6_seq_show_item(seq, NULL, idev->stats.icmpv6dev->mibs,
 			    snmp6_icmp6_list);
-	snmp6_seq_show_icmpv6msg(seq, NULL, idev->stats.icmpv6msgdev->mibs);
+	snmp6_seq_show_icmpv6msg(seq, idev->stats.icmpv6msgdev->mibs);
 	return 0;
 }
 
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 331af3b8..a4894f4 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -299,9 +299,9 @@
 	}
 
 	inet->inet_rcv_saddr = inet->inet_saddr = v4addr;
-	ipv6_addr_copy(&np->rcv_saddr, &addr->sin6_addr);
+	np->rcv_saddr = addr->sin6_addr;
 	if (!(addr_type & IPV6_ADDR_MULTICAST))
-		ipv6_addr_copy(&np->saddr, &addr->sin6_addr);
+		np->saddr = addr->sin6_addr;
 	err = 0;
 out_unlock:
 	rcu_read_unlock();
@@ -383,7 +383,8 @@
 	}
 
 	/* Charge it to the socket. */
-	if (ip_queue_rcv_skb(sk, skb) < 0) {
+	skb_dst_drop(skb);
+	if (sock_queue_rcv_skb(sk, skb) < 0) {
 		kfree_skb(skb);
 		return NET_RX_DROP;
 	}
@@ -494,7 +495,7 @@
 	if (sin6) {
 		sin6->sin6_family = AF_INET6;
 		sin6->sin6_port = 0;
-		ipv6_addr_copy(&sin6->sin6_addr, &ipv6_hdr(skb)->saddr);
+		sin6->sin6_addr = ipv6_hdr(skb)->saddr;
 		sin6->sin6_flowinfo = 0;
 		sin6->sin6_scope_id = 0;
 		if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
@@ -610,6 +611,8 @@
 	struct sk_buff *skb;
 	int err;
 	struct rt6_info *rt = (struct rt6_info *)*dstp;
+	int hlen = LL_RESERVED_SPACE(rt->dst.dev);
+	int tlen = rt->dst.dev->needed_tailroom;
 
 	if (length > rt->dst.dev->mtu) {
 		ipv6_local_error(sk, EMSGSIZE, fl6, rt->dst.dev->mtu);
@@ -619,11 +622,11 @@
 		goto out;
 
 	skb = sock_alloc_send_skb(sk,
-				  length + LL_ALLOCATED_SPACE(rt->dst.dev) + 15,
+				  length + hlen + tlen + 15,
 				  flags & MSG_DONTWAIT, &err);
 	if (skb == NULL)
 		goto error;
-	skb_reserve(skb, LL_RESERVED_SPACE(rt->dst.dev));
+	skb_reserve(skb, hlen);
 
 	skb->priority = sk->sk_priority;
 	skb->mark = sk->sk_mark;
@@ -843,11 +846,11 @@
 		goto out;
 
 	if (!ipv6_addr_any(daddr))
-		ipv6_addr_copy(&fl6.daddr, daddr);
+		fl6.daddr = *daddr;
 	else
 		fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
 	if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr))
-		ipv6_addr_copy(&fl6.saddr, &np->saddr);
+		fl6.saddr = np->saddr;
 
 	final_p = fl6_update_dst(&fl6, opt, &final);
 
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index dfb164e..b69fae7 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -153,8 +153,8 @@
 
 	fq->id = arg->id;
 	fq->user = arg->user;
-	ipv6_addr_copy(&fq->saddr, arg->src);
-	ipv6_addr_copy(&fq->daddr, arg->dst);
+	fq->saddr = *arg->src;
+	fq->daddr = *arg->dst;
 }
 EXPORT_SYMBOL(ip6_frag_init);
 
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 3399dd3..07361df 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -62,17 +62,6 @@
 #include <linux/sysctl.h>
 #endif
 
-/* Set to 3 to get tracing. */
-#define RT6_DEBUG 2
-
-#if RT6_DEBUG >= 3
-#define RDBG(x) printk x
-#define RT6_TRACE(x...) printk(KERN_DEBUG x)
-#else
-#define RDBG(x)
-#define RT6_TRACE(x...) do { ; } while (0)
-#endif
-
 static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort,
 				    const struct in6_addr *dest);
 static struct dst_entry	*ip6_dst_check(struct dst_entry *dst, u32 cookie);
@@ -134,7 +123,23 @@
 
 static struct neighbour *ip6_neigh_lookup(const struct dst_entry *dst, const void *daddr)
 {
-	return __neigh_lookup_errno(&nd_tbl, daddr, dst->dev);
+	struct neighbour *n = __ipv6_neigh_lookup(&nd_tbl, dst->dev, daddr);
+	if (n)
+		return n;
+	return neigh_create(&nd_tbl, daddr, dst->dev);
+}
+
+static int rt6_bind_neighbour(struct rt6_info *rt, struct net_device *dev)
+{
+	struct neighbour *n = __ipv6_neigh_lookup(&nd_tbl, dev, &rt->rt6i_gateway);
+	if (!n) {
+		n = neigh_create(&nd_tbl, &rt->rt6i_gateway, dev);
+		if (IS_ERR(n))
+			return PTR_ERR(n);
+	}
+	dst_set_neighbour(&rt->dst, n);
+
+	return 0;
 }
 
 static struct dst_ops ip6_dst_ops_template = {
@@ -247,9 +252,9 @@
 {
 	struct rt6_info *rt = dst_alloc(ops, dev, 0, 0, flags);
 
-	if (rt != NULL)
+	if (rt)
 		memset(&rt->rt6i_table, 0,
-			sizeof(*rt) - sizeof(struct dst_entry));
+		       sizeof(*rt) - sizeof(struct dst_entry));
 
 	return rt;
 }
@@ -263,7 +268,7 @@
 	if (!(rt->dst.flags & DST_HOST))
 		dst_destroy_metrics_generic(dst);
 
-	if (idev != NULL) {
+	if (idev) {
 		rt->rt6i_idev = NULL;
 		in6_dev_put(idev);
 	}
@@ -299,10 +304,10 @@
 	struct net_device *loopback_dev =
 		dev_net(dev)->loopback_dev;
 
-	if (dev != loopback_dev && idev != NULL && idev->dev == dev) {
+	if (dev != loopback_dev && idev && idev->dev == dev) {
 		struct inet6_dev *loopback_idev =
 			in6_dev_get(loopback_dev);
-		if (loopback_idev != NULL) {
+		if (loopback_idev) {
 			rt->rt6i_idev = loopback_idev;
 			in6_dev_put(idev);
 		}
@@ -312,7 +317,7 @@
 static __inline__ int rt6_check_expired(const struct rt6_info *rt)
 {
 	return (rt->rt6i_flags & RTF_EXPIRES) &&
-		time_after(jiffies, rt->rt6i_expires);
+		time_after(jiffies, rt->dst.expires);
 }
 
 static inline int rt6_need_strict(const struct in6_addr *daddr)
@@ -338,13 +343,13 @@
 		goto out;
 
 	for (sprt = rt; sprt; sprt = sprt->dst.rt6_next) {
-		struct net_device *dev = sprt->rt6i_dev;
+		struct net_device *dev = sprt->dst.dev;
 
 		if (oif) {
 			if (dev->ifindex == oif)
 				return sprt;
 			if (dev->flags & IFF_LOOPBACK) {
-				if (sprt->rt6i_idev == NULL ||
+				if (!sprt->rt6i_idev ||
 				    sprt->rt6i_idev->dev->ifindex != oif) {
 					if (flags & RT6_LOOKUP_F_IFACE && oif)
 						continue;
@@ -385,7 +390,7 @@
 	 * to no more than one per minute.
 	 */
 	rcu_read_lock();
-	neigh = rt ? dst_get_neighbour(&rt->dst) : NULL;
+	neigh = rt ? dst_get_neighbour_noref(&rt->dst) : NULL;
 	if (!neigh || (neigh->nud_state & NUD_VALID))
 		goto out;
 	read_lock_bh(&neigh->lock);
@@ -399,7 +404,7 @@
 
 		target = (struct in6_addr *)&neigh->primary_key;
 		addrconf_addr_solict_mult(target, &mcaddr);
-		ndisc_send_ns(rt->rt6i_dev, NULL, target, &mcaddr, NULL);
+		ndisc_send_ns(rt->dst.dev, NULL, target, &mcaddr, NULL);
 	} else {
 		read_unlock_bh(&neigh->lock);
 	}
@@ -417,7 +422,7 @@
  */
 static inline int rt6_check_dev(struct rt6_info *rt, int oif)
 {
-	struct net_device *dev = rt->rt6i_dev;
+	struct net_device *dev = rt->dst.dev;
 	if (!oif || dev->ifindex == oif)
 		return 2;
 	if ((dev->flags & IFF_LOOPBACK) &&
@@ -432,7 +437,7 @@
 	int m;
 
 	rcu_read_lock();
-	neigh = dst_get_neighbour(&rt->dst);
+	neigh = dst_get_neighbour_noref(&rt->dst);
 	if (rt->rt6i_flags & RTF_NONEXTHOP ||
 	    !(rt->rt6i_flags & RTF_GATEWAY))
 		m = 1;
@@ -518,9 +523,6 @@
 	struct rt6_info *match, *rt0;
 	struct net *net;
 
-	RT6_TRACE("%s(fn->leaf=%p, oif=%d)\n",
-		  __func__, fn->leaf, oif);
-
 	rt0 = fn->rr_ptr;
 	if (!rt0)
 		fn->rr_ptr = rt0 = fn->leaf;
@@ -539,10 +541,7 @@
 			fn->rr_ptr = next;
 	}
 
-	RT6_TRACE("%s() => %p\n",
-		  __func__, match);
-
-	net = dev_net(rt0->rt6i_dev);
+	net = dev_net(rt0->dst.dev);
 	return match ? match : net->ipv6.ip6_null_entry;
 }
 
@@ -611,7 +610,7 @@
 		if (!addrconf_finite_timeout(lifetime)) {
 			rt->rt6i_flags &= ~RTF_EXPIRES;
 		} else {
-			rt->rt6i_expires = jiffies + HZ * lifetime;
+			rt->dst.expires = jiffies + HZ * lifetime;
 			rt->rt6i_flags |= RTF_EXPIRES;
 		}
 		dst_release(&rt->dst);
@@ -636,7 +635,7 @@
 				goto restart; \
 		} \
 	} \
-} while(0)
+} while (0)
 
 static struct rt6_info *ip6_pol_route_lookup(struct net *net,
 					     struct fib6_table *table,
@@ -658,6 +657,13 @@
 
 }
 
+struct dst_entry * ip6_route_lookup(struct net *net, struct flowi6 *fl6,
+				    int flags)
+{
+	return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_lookup);
+}
+EXPORT_SYMBOL_GPL(ip6_route_lookup);
+
 struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
 			    const struct in6_addr *saddr, int oif, int strict)
 {
@@ -706,7 +712,7 @@
 int ip6_ins_rt(struct rt6_info *rt)
 {
 	struct nl_info info = {
-		.nl_net = dev_net(rt->rt6i_dev),
+		.nl_net = dev_net(rt->dst.dev),
 	};
 	return __ip6_ins_rt(rt, &info);
 }
@@ -724,29 +730,27 @@
 	rt = ip6_rt_copy(ort, daddr);
 
 	if (rt) {
-		struct neighbour *neigh;
 		int attempts = !in_softirq();
 
-		if (!(rt->rt6i_flags&RTF_GATEWAY)) {
-			if (rt->rt6i_dst.plen != 128 &&
+		if (!(rt->rt6i_flags & RTF_GATEWAY)) {
+			if (ort->rt6i_dst.plen != 128 &&
 			    ipv6_addr_equal(&ort->rt6i_dst.addr, daddr))
 				rt->rt6i_flags |= RTF_ANYCAST;
-			ipv6_addr_copy(&rt->rt6i_gateway, daddr);
+			rt->rt6i_gateway = *daddr;
 		}
 
 		rt->rt6i_flags |= RTF_CACHE;
 
 #ifdef CONFIG_IPV6_SUBTREES
 		if (rt->rt6i_src.plen && saddr) {
-			ipv6_addr_copy(&rt->rt6i_src.addr, saddr);
+			rt->rt6i_src.addr = *saddr;
 			rt->rt6i_src.plen = 128;
 		}
 #endif
 
 	retry:
-		neigh = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_gateway);
-		if (IS_ERR(neigh)) {
-			struct net *net = dev_net(rt->rt6i_dev);
+		if (rt6_bind_neighbour(rt, rt->dst.dev)) {
+			struct net *net = dev_net(rt->dst.dev);
 			int saved_rt_min_interval =
 				net->ipv6.sysctl.ip6_rt_gc_min_interval;
 			int saved_rt_elasticity =
@@ -771,8 +775,6 @@
 			dst_free(&rt->dst);
 			return NULL;
 		}
-		dst_set_neighbour(&rt->dst, neigh);
-
 	}
 
 	return rt;
@@ -785,7 +787,7 @@
 
 	if (rt) {
 		rt->rt6i_flags |= RTF_CACHE;
-		dst_set_neighbour(&rt->dst, neigh_clone(dst_get_neighbour_raw(&ort->dst)));
+		dst_set_neighbour(&rt->dst, neigh_clone(dst_get_neighbour_noref_raw(&ort->dst)));
 	}
 	return rt;
 }
@@ -819,7 +821,7 @@
 	dst_hold(&rt->dst);
 	read_unlock_bh(&table->tb6_lock);
 
-	if (!dst_get_neighbour_raw(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP))
+	if (!dst_get_neighbour_noref_raw(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP))
 		nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr);
 	else if (!(rt->dst.flags & DST_HOST))
 		nrt = rt6_alloc_clone(rt, &fl6->daddr);
@@ -875,7 +877,7 @@
 		.flowi6_iif = skb->dev->ifindex,
 		.daddr = iph->daddr,
 		.saddr = iph->saddr,
-		.flowlabel = (* (__be32 *) iph)&IPV6_FLOWINFO_MASK,
+		.flowlabel = (* (__be32 *) iph) & IPV6_FLOWINFO_MASK,
 		.flowi6_mark = skb->mark,
 		.flowi6_proto = iph->nexthdr,
 	};
@@ -932,9 +934,9 @@
 		rt->rt6i_idev = ort->rt6i_idev;
 		if (rt->rt6i_idev)
 			in6_dev_hold(rt->rt6i_idev);
-		rt->rt6i_expires = 0;
+		rt->dst.expires = 0;
 
-		ipv6_addr_copy(&rt->rt6i_gateway, &ort->rt6i_gateway);
+		rt->rt6i_gateway = ort->rt6i_gateway;
 		rt->rt6i_flags = ort->rt6i_flags & ~RTF_EXPIRES;
 		rt->rt6i_metric = 0;
 
@@ -997,7 +999,7 @@
 
 	rt = (struct rt6_info *) skb_dst(skb);
 	if (rt) {
-		if (rt->rt6i_flags&RTF_CACHE) {
+		if (rt->rt6i_flags & RTF_CACHE) {
 			dst_set_expires(&rt->dst, 0);
 			rt->rt6i_flags |= RTF_EXPIRES;
 		} else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT))
@@ -1067,34 +1069,38 @@
 
 struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
 				  struct neighbour *neigh,
-				  const struct in6_addr *addr)
+				  struct flowi6 *fl6)
 {
+	struct dst_entry *dst;
 	struct rt6_info *rt;
 	struct inet6_dev *idev = in6_dev_get(dev);
 	struct net *net = dev_net(dev);
 
-	if (unlikely(idev == NULL))
+	if (unlikely(!idev))
 		return NULL;
 
 	rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, dev, 0);
-	if (unlikely(rt == NULL)) {
+	if (unlikely(!rt)) {
 		in6_dev_put(idev);
+		dst = ERR_PTR(-ENOMEM);
 		goto out;
 	}
 
 	if (neigh)
 		neigh_hold(neigh);
 	else {
-		neigh = ndisc_get_neigh(dev, addr);
-		if (IS_ERR(neigh))
-			neigh = NULL;
+		neigh = ip6_neigh_lookup(&rt->dst, &fl6->daddr);
+		if (IS_ERR(neigh)) {
+			dst_free(&rt->dst);
+			return ERR_CAST(neigh);
+		}
 	}
 
 	rt->dst.flags |= DST_HOST;
 	rt->dst.output  = ip6_output;
 	dst_set_neighbour(&rt->dst, neigh);
 	atomic_set(&rt->dst.__refcnt, 1);
-	ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
+	rt->rt6i_dst.addr = fl6->daddr;
 	rt->rt6i_dst.plen = 128;
 	rt->rt6i_idev     = idev;
 	dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255);
@@ -1106,8 +1112,10 @@
 
 	fib6_force_start_gc(net);
 
+	dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0);
+
 out:
-	return &rt->dst;
+	return dst;
 }
 
 int icmp6_dst_gc(void)
@@ -1237,21 +1245,30 @@
 	if (cfg->fc_metric == 0)
 		cfg->fc_metric = IP6_RT_PRIO_USER;
 
-	table = fib6_new_table(net, cfg->fc_table);
-	if (table == NULL) {
-		err = -ENOBUFS;
-		goto out;
+	err = -ENOBUFS;
+	if (cfg->fc_nlinfo.nlh &&
+	    !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) {
+		table = fib6_get_table(net, cfg->fc_table);
+		if (!table) {
+			printk(KERN_WARNING "IPv6: NLM_F_CREATE should be specified when creating new route\n");
+			table = fib6_new_table(net, cfg->fc_table);
+		}
+	} else {
+		table = fib6_new_table(net, cfg->fc_table);
 	}
 
+	if (!table)
+		goto out;
+
 	rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, NULL, DST_NOCOUNT);
 
-	if (rt == NULL) {
+	if (!rt) {
 		err = -ENOMEM;
 		goto out;
 	}
 
 	rt->dst.obsolete = -1;
-	rt->rt6i_expires = (cfg->fc_flags & RTF_EXPIRES) ?
+	rt->dst.expires = (cfg->fc_flags & RTF_EXPIRES) ?
 				jiffies + clock_t_to_jiffies(cfg->fc_expires) :
 				0;
 
@@ -1294,8 +1311,9 @@
 	   they would result in kernel looping; promote them to reject routes
 	 */
 	if ((cfg->fc_flags & RTF_REJECT) ||
-	    (dev && (dev->flags&IFF_LOOPBACK) && !(addr_type&IPV6_ADDR_LOOPBACK)
-					      && !(cfg->fc_flags&RTF_LOCAL))) {
+	    (dev && (dev->flags & IFF_LOOPBACK) &&
+	     !(addr_type & IPV6_ADDR_LOOPBACK) &&
+	     !(cfg->fc_flags & RTF_LOCAL))) {
 		/* hold loopback dev/idev if we haven't done so. */
 		if (dev != net->loopback_dev) {
 			if (dev) {
@@ -1322,7 +1340,7 @@
 		int gwa_type;
 
 		gw_addr = &cfg->fc_gateway;
-		ipv6_addr_copy(&rt->rt6i_gateway, gw_addr);
+		rt->rt6i_gateway = *gw_addr;
 		gwa_type = ipv6_addr_type(gw_addr);
 
 		if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) {
@@ -1336,26 +1354,26 @@
 			   some exceptions. --ANK
 			 */
 			err = -EINVAL;
-			if (!(gwa_type&IPV6_ADDR_UNICAST))
+			if (!(gwa_type & IPV6_ADDR_UNICAST))
 				goto out;
 
 			grt = rt6_lookup(net, gw_addr, NULL, cfg->fc_ifindex, 1);
 
 			err = -EHOSTUNREACH;
-			if (grt == NULL)
+			if (!grt)
 				goto out;
 			if (dev) {
-				if (dev != grt->rt6i_dev) {
+				if (dev != grt->dst.dev) {
 					dst_release(&grt->dst);
 					goto out;
 				}
 			} else {
-				dev = grt->rt6i_dev;
+				dev = grt->dst.dev;
 				idev = grt->rt6i_idev;
 				dev_hold(dev);
 				in6_dev_hold(grt->rt6i_idev);
 			}
-			if (!(grt->rt6i_flags&RTF_GATEWAY))
+			if (!(grt->rt6i_flags & RTF_GATEWAY))
 				err = 0;
 			dst_release(&grt->dst);
 
@@ -1363,12 +1381,12 @@
 				goto out;
 		}
 		err = -EINVAL;
-		if (dev == NULL || (dev->flags&IFF_LOOPBACK))
+		if (!dev || (dev->flags & IFF_LOOPBACK))
 			goto out;
 	}
 
 	err = -ENODEV;
-	if (dev == NULL)
+	if (!dev)
 		goto out;
 
 	if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
@@ -1376,18 +1394,15 @@
 			err = -EINVAL;
 			goto out;
 		}
-		ipv6_addr_copy(&rt->rt6i_prefsrc.addr, &cfg->fc_prefsrc);
+		rt->rt6i_prefsrc.addr = cfg->fc_prefsrc;
 		rt->rt6i_prefsrc.plen = 128;
 	} else
 		rt->rt6i_prefsrc.plen = 0;
 
 	if (cfg->fc_flags & (RTF_GATEWAY | RTF_NONEXTHOP)) {
-		struct neighbour *n = __neigh_lookup_errno(&nd_tbl, &rt->rt6i_gateway, dev);
-		if (IS_ERR(n)) {
-			err = PTR_ERR(n);
+		err = rt6_bind_neighbour(rt, dev);
+		if (err)
 			goto out;
-		}
-		dst_set_neighbour(&rt->dst, n);
 	}
 
 	rt->rt6i_flags = cfg->fc_flags;
@@ -1433,7 +1448,7 @@
 {
 	int err;
 	struct fib6_table *table;
-	struct net *net = dev_net(rt->rt6i_dev);
+	struct net *net = dev_net(rt->dst.dev);
 
 	if (rt == net->ipv6.ip6_null_entry)
 		return -ENOENT;
@@ -1452,7 +1467,7 @@
 int ip6_del_rt(struct rt6_info *rt)
 {
 	struct nl_info info = {
-		.nl_net = dev_net(rt->rt6i_dev),
+		.nl_net = dev_net(rt->dst.dev),
 	};
 	return __ip6_del_rt(rt, &info);
 }
@@ -1465,7 +1480,7 @@
 	int err = -ESRCH;
 
 	table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
-	if (table == NULL)
+	if (!table)
 		return err;
 
 	read_lock_bh(&table->tb6_lock);
@@ -1477,8 +1492,8 @@
 	if (fn) {
 		for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
 			if (cfg->fc_ifindex &&
-			    (rt->rt6i_dev == NULL ||
-			     rt->rt6i_dev->ifindex != cfg->fc_ifindex))
+			    (!rt->dst.dev ||
+			     rt->dst.dev->ifindex != cfg->fc_ifindex))
 				continue;
 			if (cfg->fc_flags & RTF_GATEWAY &&
 			    !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
@@ -1540,7 +1555,7 @@
 			continue;
 		if (!(rt->rt6i_flags & RTF_GATEWAY))
 			continue;
-		if (fl6->flowi6_oif != rt->rt6i_dev->ifindex)
+		if (fl6->flowi6_oif != rt->dst.dev->ifindex)
 			continue;
 		if (!ipv6_addr_equal(&rdfl->gateway, &rt->rt6i_gateway))
 			continue;
@@ -1573,7 +1588,7 @@
 		},
 	};
 
-	ipv6_addr_copy(&rdfl.gateway, gateway);
+	rdfl.gateway = *gateway;
 
 	if (rt6_need_strict(dest))
 		flags |= RT6_LOOKUP_F_IFACE;
@@ -1618,18 +1633,18 @@
 	dst_confirm(&rt->dst);
 
 	/* Duplicate redirect: silently ignore. */
-	if (neigh == dst_get_neighbour_raw(&rt->dst))
+	if (neigh == dst_get_neighbour_noref_raw(&rt->dst))
 		goto out;
 
 	nrt = ip6_rt_copy(rt, dest);
-	if (nrt == NULL)
+	if (!nrt)
 		goto out;
 
 	nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
 	if (on_link)
 		nrt->rt6i_flags &= ~RTF_GATEWAY;
 
-	ipv6_addr_copy(&nrt->rt6i_gateway, (struct in6_addr*)neigh->primary_key);
+	nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
 	dst_set_neighbour(&nrt->dst, neigh_clone(neigh));
 
 	if (ip6_ins_rt(nrt))
@@ -1639,7 +1654,7 @@
 	netevent.new = &nrt->dst;
 	call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
 
-	if (rt->rt6i_flags&RTF_CACHE) {
+	if (rt->rt6i_flags & RTF_CACHE) {
 		ip6_del_rt(rt);
 		return;
 	}
@@ -1660,7 +1675,7 @@
 	int allfrag = 0;
 again:
 	rt = rt6_lookup(net, daddr, saddr, ifindex, 0);
-	if (rt == NULL)
+	if (!rt)
 		return;
 
 	if (rt6_check_expired(rt)) {
@@ -1710,7 +1725,7 @@
 	   1. It is connected route. Action: COW
 	   2. It is gatewayed route or NONEXTHOP route. Action: clone it.
 	 */
-	if (!dst_get_neighbour_raw(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP))
+	if (!dst_get_neighbour_noref_raw(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP))
 		nrt = rt6_alloc_cow(rt, daddr, saddr);
 	else
 		nrt = rt6_alloc_clone(rt, daddr);
@@ -1766,7 +1781,7 @@
 static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort,
 				    const struct in6_addr *dest)
 {
-	struct net *net = dev_net(ort->rt6i_dev);
+	struct net *net = dev_net(ort->dst.dev);
 	struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops,
 					    ort->dst.dev, 0);
 
@@ -1775,7 +1790,7 @@
 		rt->dst.output = ort->dst.output;
 		rt->dst.flags |= DST_HOST;
 
-		ipv6_addr_copy(&rt->rt6i_dst.addr, dest);
+		rt->rt6i_dst.addr = *dest;
 		rt->rt6i_dst.plen = 128;
 		dst_copy_metrics(&rt->dst, &ort->dst);
 		rt->dst.error = ort->dst.error;
@@ -1783,9 +1798,9 @@
 		if (rt->rt6i_idev)
 			in6_dev_hold(rt->rt6i_idev);
 		rt->dst.lastuse = jiffies;
-		rt->rt6i_expires = 0;
+		rt->dst.expires = 0;
 
-		ipv6_addr_copy(&rt->rt6i_gateway, &ort->rt6i_gateway);
+		rt->rt6i_gateway = ort->rt6i_gateway;
 		rt->rt6i_flags = ort->rt6i_flags & ~RTF_EXPIRES;
 		rt->rt6i_metric = 0;
 
@@ -1808,7 +1823,7 @@
 	struct fib6_table *table;
 
 	table = fib6_get_table(net, RT6_TABLE_INFO);
-	if (table == NULL)
+	if (!table)
 		return NULL;
 
 	write_lock_bh(&table->tb6_lock);
@@ -1817,7 +1832,7 @@
 		goto out;
 
 	for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
-		if (rt->rt6i_dev->ifindex != ifindex)
+		if (rt->dst.dev->ifindex != ifindex)
 			continue;
 		if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY))
 			continue;
@@ -1848,8 +1863,8 @@
 		.fc_nlinfo.nl_net = net,
 	};
 
-	ipv6_addr_copy(&cfg.fc_dst, prefix);
-	ipv6_addr_copy(&cfg.fc_gateway, gwaddr);
+	cfg.fc_dst = *prefix;
+	cfg.fc_gateway = *gwaddr;
 
 	/* We should treat it as a default route if prefix length is 0. */
 	if (!prefixlen)
@@ -1867,12 +1882,12 @@
 	struct fib6_table *table;
 
 	table = fib6_get_table(dev_net(dev), RT6_TABLE_DFLT);
-	if (table == NULL)
+	if (!table)
 		return NULL;
 
 	write_lock_bh(&table->tb6_lock);
 	for (rt = table->tb6_root.leaf; rt; rt=rt->dst.rt6_next) {
-		if (dev == rt->rt6i_dev &&
+		if (dev == rt->dst.dev &&
 		    ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
 		    ipv6_addr_equal(&rt->rt6i_gateway, addr))
 			break;
@@ -1898,7 +1913,7 @@
 		.fc_nlinfo.nl_net = dev_net(dev),
 	};
 
-	ipv6_addr_copy(&cfg.fc_gateway, gwaddr);
+	cfg.fc_gateway = *gwaddr;
 
 	ip6_route_add(&cfg);
 
@@ -1912,7 +1927,7 @@
 
 	/* NOTE: Keep consistent with rt6_get_dflt_router */
 	table = fib6_get_table(net, RT6_TABLE_DFLT);
-	if (table == NULL)
+	if (!table)
 		return;
 
 restart:
@@ -1944,9 +1959,9 @@
 
 	cfg->fc_nlinfo.nl_net = net;
 
-	ipv6_addr_copy(&cfg->fc_dst, &rtmsg->rtmsg_dst);
-	ipv6_addr_copy(&cfg->fc_src, &rtmsg->rtmsg_src);
-	ipv6_addr_copy(&cfg->fc_gateway, &rtmsg->rtmsg_gateway);
+	cfg->fc_dst = rtmsg->rtmsg_dst;
+	cfg->fc_src = rtmsg->rtmsg_src;
+	cfg->fc_gateway = rtmsg->rtmsg_gateway;
 }
 
 int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
@@ -2045,14 +2060,14 @@
 
 struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
 				    const struct in6_addr *addr,
-				    int anycast)
+				    bool anycast)
 {
 	struct net *net = dev_net(idev->dev);
 	struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops,
 					    net->loopback_dev, 0);
-	struct neighbour *neigh;
+	int err;
 
-	if (rt == NULL) {
+	if (!rt) {
 		if (net_ratelimit())
 			pr_warning("IPv6:  Maximum number of routes reached,"
 				   " consider increasing route/max_size.\n");
@@ -2072,15 +2087,13 @@
 		rt->rt6i_flags |= RTF_ANYCAST;
 	else
 		rt->rt6i_flags |= RTF_LOCAL;
-	neigh = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_gateway);
-	if (IS_ERR(neigh)) {
+	err = rt6_bind_neighbour(rt, rt->dst.dev);
+	if (err) {
 		dst_free(&rt->dst);
-
-		return ERR_CAST(neigh);
+		return ERR_PTR(err);
 	}
-	dst_set_neighbour(&rt->dst, neigh);
 
-	ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
+	rt->rt6i_dst.addr = *addr;
 	rt->rt6i_dst.plen = 128;
 	rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL);
 
@@ -2098,7 +2111,7 @@
 	struct inet6_dev *idev = ip6_dst_idev((struct dst_entry*)rt);
 	int err = 0;
 	if (rt->rt6i_prefsrc.plen)
-		ipv6_addr_copy(saddr, &rt->rt6i_prefsrc.addr);
+		*saddr = rt->rt6i_prefsrc.addr;
 	else
 		err = ipv6_dev_get_saddr(net, idev ? idev->dev : NULL,
 					 daddr, prefs, saddr);
@@ -2118,7 +2131,7 @@
 	struct net *net = ((struct arg_dev_net_ip *)arg)->net;
 	struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr;
 
-	if (((void *)rt->rt6i_dev == dev || dev == NULL) &&
+	if (((void *)rt->dst.dev == dev || !dev) &&
 	    rt != net->ipv6.ip6_null_entry &&
 	    ipv6_addr_equal(addr, &rt->rt6i_prefsrc.addr)) {
 		/* remove prefsrc entry */
@@ -2148,11 +2161,10 @@
 	const struct arg_dev_net *adn = arg;
 	const struct net_device *dev = adn->dev;
 
-	if ((rt->rt6i_dev == dev || dev == NULL) &&
-	    rt != adn->net->ipv6.ip6_null_entry) {
-		RT6_TRACE("deleted by ifdown %p\n", rt);
+	if ((rt->dst.dev == dev || !dev) &&
+	    rt != adn->net->ipv6.ip6_null_entry)
 		return -1;
-	}
+
 	return 0;
 }
 
@@ -2185,7 +2197,7 @@
 	*/
 
 	idev = __in6_dev_get(arg->dev);
-	if (idev == NULL)
+	if (!idev)
 		return 0;
 
 	/* For administrative MTU increase, there is no way to discover
@@ -2202,7 +2214,7 @@
 	   also have the lowest MTU, TOO BIG MESSAGE will be lead to
 	   PMTU discouvery.
 	 */
-	if (rt->rt6i_dev == arg->dev &&
+	if (rt->dst.dev == arg->dev &&
 	    !dst_metric_locked(&rt->dst, RTAX_MTU) &&
 	    (dst_mtu(&rt->dst) >= arg->mtu ||
 	     (dst_mtu(&rt->dst) < arg->mtu &&
@@ -2351,11 +2363,13 @@
 			 int iif, int type, u32 pid, u32 seq,
 			 int prefix, int nowait, unsigned int flags)
 {
+	const struct inet_peer *peer;
 	struct rtmsg *rtm;
 	struct nlmsghdr *nlh;
 	long expires;
 	u32 table;
 	struct neighbour *n;
+	u32 ts, tsage;
 
 	if (prefix) {	/* user wants prefix routes only */
 		if (!(rt->rt6i_flags & RTF_PREFIX_RT)) {
@@ -2365,7 +2379,7 @@
 	}
 
 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*rtm), flags);
-	if (nlh == NULL)
+	if (!nlh)
 		return -EMSGSIZE;
 
 	rtm = nlmsg_data(nlh);
@@ -2379,25 +2393,25 @@
 		table = RT6_TABLE_UNSPEC;
 	rtm->rtm_table = table;
 	NLA_PUT_U32(skb, RTA_TABLE, table);
-	if (rt->rt6i_flags&RTF_REJECT)
+	if (rt->rt6i_flags & RTF_REJECT)
 		rtm->rtm_type = RTN_UNREACHABLE;
-	else if (rt->rt6i_flags&RTF_LOCAL)
+	else if (rt->rt6i_flags & RTF_LOCAL)
 		rtm->rtm_type = RTN_LOCAL;
-	else if (rt->rt6i_dev && (rt->rt6i_dev->flags&IFF_LOOPBACK))
+	else if (rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK))
 		rtm->rtm_type = RTN_LOCAL;
 	else
 		rtm->rtm_type = RTN_UNICAST;
 	rtm->rtm_flags = 0;
 	rtm->rtm_scope = RT_SCOPE_UNIVERSE;
 	rtm->rtm_protocol = rt->rt6i_protocol;
-	if (rt->rt6i_flags&RTF_DYNAMIC)
+	if (rt->rt6i_flags & RTF_DYNAMIC)
 		rtm->rtm_protocol = RTPROT_REDIRECT;
 	else if (rt->rt6i_flags & RTF_ADDRCONF)
 		rtm->rtm_protocol = RTPROT_KERNEL;
-	else if (rt->rt6i_flags&RTF_DEFAULT)
+	else if (rt->rt6i_flags & RTF_DEFAULT)
 		rtm->rtm_protocol = RTPROT_RA;
 
-	if (rt->rt6i_flags&RTF_CACHE)
+	if (rt->rt6i_flags & RTF_CACHE)
 		rtm->rtm_flags |= RTM_F_CLONED;
 
 	if (dst) {
@@ -2437,7 +2451,7 @@
 
 	if (rt->rt6i_prefsrc.plen) {
 		struct in6_addr saddr_buf;
-		ipv6_addr_copy(&saddr_buf, &rt->rt6i_prefsrc.addr);
+		saddr_buf = rt->rt6i_prefsrc.addr;
 		NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf);
 	}
 
@@ -2445,24 +2459,31 @@
 		goto nla_put_failure;
 
 	rcu_read_lock();
-	n = dst_get_neighbour(&rt->dst);
+	n = dst_get_neighbour_noref(&rt->dst);
 	if (n)
 		NLA_PUT(skb, RTA_GATEWAY, 16, &n->primary_key);
 	rcu_read_unlock();
 
 	if (rt->dst.dev)
-		NLA_PUT_U32(skb, RTA_OIF, rt->rt6i_dev->ifindex);
+		NLA_PUT_U32(skb, RTA_OIF, rt->dst.dev->ifindex);
 
 	NLA_PUT_U32(skb, RTA_PRIORITY, rt->rt6i_metric);
 
 	if (!(rt->rt6i_flags & RTF_EXPIRES))
 		expires = 0;
-	else if (rt->rt6i_expires - jiffies < INT_MAX)
-		expires = rt->rt6i_expires - jiffies;
+	else if (rt->dst.expires - jiffies < INT_MAX)
+		expires = rt->dst.expires - jiffies;
 	else
 		expires = INT_MAX;
 
-	if (rtnl_put_cacheinfo(skb, &rt->dst, 0, 0, 0,
+	peer = rt->rt6i_peer;
+	ts = tsage = 0;
+	if (peer && peer->tcp_ts_stamp) {
+		ts = peer->tcp_ts;
+		tsage = get_seconds() - peer->tcp_ts_stamp;
+	}
+
+	if (rtnl_put_cacheinfo(skb, &rt->dst, 0, ts, tsage,
 			       expires, rt->dst.error) < 0)
 		goto nla_put_failure;
 
@@ -2511,14 +2532,14 @@
 		if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
 			goto errout;
 
-		ipv6_addr_copy(&fl6.saddr, nla_data(tb[RTA_SRC]));
+		fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]);
 	}
 
 	if (tb[RTA_DST]) {
 		if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
 			goto errout;
 
-		ipv6_addr_copy(&fl6.daddr, nla_data(tb[RTA_DST]));
+		fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]);
 	}
 
 	if (tb[RTA_IIF])
@@ -2537,7 +2558,7 @@
 	}
 
 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
-	if (skb == NULL) {
+	if (!skb) {
 		err = -ENOBUFS;
 		goto errout;
 	}
@@ -2572,10 +2593,10 @@
 	int err;
 
 	err = -ENOBUFS;
-	seq = info->nlh != NULL ? info->nlh->nlmsg_seq : 0;
+	seq = info->nlh ? info->nlh->nlmsg_seq : 0;
 
 	skb = nlmsg_new(rt6_nlmsg_size(), gfp_any());
-	if (skb == NULL)
+	if (!skb)
 		goto errout;
 
 	err = rt6_fill_node(net, skb, rt, NULL, NULL, 0,
@@ -2642,7 +2663,7 @@
 	seq_puts(m, "00000000000000000000000000000000 00 ");
 #endif
 	rcu_read_lock();
-	n = dst_get_neighbour(&rt->dst);
+	n = dst_get_neighbour_noref(&rt->dst);
 	if (n) {
 		seq_printf(m, "%pi6", n->primary_key);
 	} else {
@@ -2652,14 +2673,14 @@
 	seq_printf(m, " %08x %08x %08x %08x %8s\n",
 		   rt->rt6i_metric, atomic_read(&rt->dst.__refcnt),
 		   rt->dst.__use, rt->rt6i_flags,
-		   rt->rt6i_dev ? rt->rt6i_dev->name : "");
+		   rt->dst.dev ? rt->dst.dev->name : "");
 	return 0;
 }
 
 static int ipv6_route_show(struct seq_file *m, void *v)
 {
 	struct net *net = (struct net *)m->private;
-	fib6_clean_all(net, rt6_info_route, 0, m);
+	fib6_clean_all_ro(net, rt6_info_route, 0, m);
 	return 0;
 }
 
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index a7a1860..3b6dac9 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -91,7 +91,7 @@
 	unsigned long	rx_bytes;
 	unsigned long	tx_packets;
 	unsigned long	tx_bytes;
-};
+} __attribute__((aligned(4*sizeof(unsigned long))));
 
 static struct net_device_stats *ipip6_get_stats(struct net_device *dev)
 {
@@ -263,6 +263,8 @@
 	if (register_netdevice(dev) < 0)
 		goto failed_free;
 
+	strcpy(nt->parms.name, dev->name);
+
 	dev_hold(dev);
 
 	ipip6_tunnel_link(sitn, nt);
@@ -680,7 +682,7 @@
 		struct neighbour *neigh = NULL;
 
 		if (skb_dst(skb))
-			neigh = dst_get_neighbour(skb_dst(skb));
+			neigh = dst_get_neighbour_noref(skb_dst(skb));
 
 		if (neigh == NULL) {
 			if (net_ratelimit())
@@ -705,7 +707,7 @@
 		struct neighbour *neigh = NULL;
 
 		if (skb_dst(skb))
-			neigh = dst_get_neighbour(skb_dst(skb));
+			neigh = dst_get_neighbour_noref(skb_dst(skb));
 
 		if (neigh == NULL) {
 			if (net_ratelimit())
@@ -914,7 +916,7 @@
 				goto done;
 #ifdef CONFIG_IPV6_SIT_6RD
 		} else {
-			ipv6_addr_copy(&ip6rd.prefix, &t->ip6rd.prefix);
+			ip6rd.prefix = t->ip6rd.prefix;
 			ip6rd.relay_prefix = t->ip6rd.relay_prefix;
 			ip6rd.prefixlen = t->ip6rd.prefixlen;
 			ip6rd.relay_prefixlen = t->ip6rd.relay_prefixlen;
@@ -1082,7 +1084,7 @@
 			if (relay_prefix != ip6rd.relay_prefix)
 				goto done;
 
-			ipv6_addr_copy(&t->ip6rd.prefix, &prefix);
+			t->ip6rd.prefix = prefix;
 			t->ip6rd.relay_prefix = relay_prefix;
 			t->ip6rd.prefixlen = ip6rd.prefixlen;
 			t->ip6rd.relay_prefixlen = ip6rd.relay_prefixlen;
@@ -1144,7 +1146,6 @@
 	struct ip_tunnel *tunnel = netdev_priv(dev);
 
 	tunnel->dev = dev;
-	strcpy(tunnel->parms.name, dev->name);
 
 	memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
 	memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
@@ -1207,6 +1208,7 @@
 static int __net_init sit_init_net(struct net *net)
 {
 	struct sit_net *sitn = net_generic(net, sit_net_id);
+	struct ip_tunnel *t;
 	int err;
 
 	sitn->tunnels[0] = sitn->tunnels_wc;
@@ -1231,6 +1233,9 @@
 	if ((err = register_netdev(sitn->fb_tunnel_dev)))
 		goto err_reg_dev;
 
+	t = netdev_priv(sitn->fb_tunnel_dev);
+
+	strcpy(t->parms.name, sitn->fb_tunnel_dev->name);
 	return 0;
 
 err_reg_dev:
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 5a0d664..8e951d8 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -200,8 +200,8 @@
 	req->mss = mss;
 	ireq->rmt_port = th->source;
 	ireq->loc_port = th->dest;
-	ipv6_addr_copy(&ireq6->rmt_addr, &ipv6_hdr(skb)->saddr);
-	ipv6_addr_copy(&ireq6->loc_addr, &ipv6_hdr(skb)->daddr);
+	ireq6->rmt_addr = ipv6_hdr(skb)->saddr;
+	ireq6->loc_addr = ipv6_hdr(skb)->daddr;
 	if (ipv6_opt_accepted(sk, skb) ||
 	    np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
 	    np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
@@ -237,9 +237,9 @@
 		struct flowi6 fl6;
 		memset(&fl6, 0, sizeof(fl6));
 		fl6.flowi6_proto = IPPROTO_TCP;
-		ipv6_addr_copy(&fl6.daddr, &ireq6->rmt_addr);
+		fl6.daddr = ireq6->rmt_addr;
 		final_p = fl6_update_dst(&fl6, np->opt, &final);
-		ipv6_addr_copy(&fl6.saddr, &ireq6->loc_addr);
+		fl6.saddr = ireq6->loc_addr;
 		fl6.flowi6_oif = sk->sk_bound_dev_if;
 		fl6.flowi6_mark = sk->sk_mark;
 		fl6.fl6_dport = inet_rsk(req)->rmt_port;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 2dea4bb..906c7ca 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -62,6 +62,7 @@
 #include <net/netdma.h>
 #include <net/inet_common.h>
 #include <net/secure_seq.h>
+#include <net/tcp_memcontrol.h>
 
 #include <asm/uaccess.h>
 
@@ -153,7 +154,7 @@
 			flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
 			if (flowlabel == NULL)
 				return -EINVAL;
-			ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
+			usin->sin6_addr = flowlabel->dst;
 			fl6_sock_release(flowlabel);
 		}
 	}
@@ -195,7 +196,7 @@
 		tp->write_seq = 0;
 	}
 
-	ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
+	np->daddr = usin->sin6_addr;
 	np->flow_label = fl6.flowlabel;
 
 	/*
@@ -244,9 +245,8 @@
 		saddr = &np->rcv_saddr;
 
 	fl6.flowi6_proto = IPPROTO_TCP;
-	ipv6_addr_copy(&fl6.daddr, &np->daddr);
-	ipv6_addr_copy(&fl6.saddr,
-		       (saddr ? saddr : &np->saddr));
+	fl6.daddr = np->daddr;
+	fl6.saddr = saddr ? *saddr : np->saddr;
 	fl6.flowi6_oif = sk->sk_bound_dev_if;
 	fl6.flowi6_mark = sk->sk_mark;
 	fl6.fl6_dport = usin->sin6_port;
@@ -264,11 +264,11 @@
 
 	if (saddr == NULL) {
 		saddr = &fl6.saddr;
-		ipv6_addr_copy(&np->rcv_saddr, saddr);
+		np->rcv_saddr = *saddr;
 	}
 
 	/* set the source address */
-	ipv6_addr_copy(&np->saddr, saddr);
+	np->saddr = *saddr;
 	inet->inet_rcv_saddr = LOOPBACK4_IPV6;
 
 	sk->sk_gso_type = SKB_GSO_TCPV6;
@@ -398,8 +398,8 @@
 			 */
 			memset(&fl6, 0, sizeof(fl6));
 			fl6.flowi6_proto = IPPROTO_TCP;
-			ipv6_addr_copy(&fl6.daddr, &np->daddr);
-			ipv6_addr_copy(&fl6.saddr, &np->saddr);
+			fl6.daddr = np->daddr;
+			fl6.saddr = np->saddr;
 			fl6.flowi6_oif = sk->sk_bound_dev_if;
 			fl6.flowi6_mark = sk->sk_mark;
 			fl6.fl6_dport = inet->inet_dport;
@@ -489,8 +489,8 @@
 
 	memset(&fl6, 0, sizeof(fl6));
 	fl6.flowi6_proto = IPPROTO_TCP;
-	ipv6_addr_copy(&fl6.daddr, &treq->rmt_addr);
-	ipv6_addr_copy(&fl6.saddr, &treq->loc_addr);
+	fl6.daddr = treq->rmt_addr;
+	fl6.saddr = treq->loc_addr;
 	fl6.flowlabel = 0;
 	fl6.flowi6_oif = treq->iif;
 	fl6.flowi6_mark = sk->sk_mark;
@@ -512,7 +512,7 @@
 	if (skb) {
 		__tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
 
-		ipv6_addr_copy(&fl6.daddr, &treq->rmt_addr);
+		fl6.daddr = treq->rmt_addr;
 		err = ip6_xmit(sk, skb, &fl6, opt, np->tclass);
 		err = net_xmit_eval(err);
 	}
@@ -617,8 +617,7 @@
 			tp->md5sig_info->alloced6++;
 		}
 
-		ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr,
-			       peer);
+		tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr = *peer;
 		tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.key = newkey;
 		tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.keylen = newkeylen;
 
@@ -750,8 +749,8 @@
 
 	bp = &hp->md5_blk.ip6;
 	/* 1. TCP pseudo-header (RFC2460) */
-	ipv6_addr_copy(&bp->saddr, saddr);
-	ipv6_addr_copy(&bp->daddr, daddr);
+	bp->saddr = *saddr;
+	bp->daddr = *daddr;
 	bp->protocol = cpu_to_be32(IPPROTO_TCP);
 	bp->len = cpu_to_be32(nbytes);
 
@@ -1039,8 +1038,8 @@
 #endif
 
 	memset(&fl6, 0, sizeof(fl6));
-	ipv6_addr_copy(&fl6.daddr, &ipv6_hdr(skb)->saddr);
-	ipv6_addr_copy(&fl6.saddr, &ipv6_hdr(skb)->daddr);
+	fl6.daddr = ipv6_hdr(skb)->saddr;
+	fl6.saddr = ipv6_hdr(skb)->daddr;
 
 	buff->ip_summed = CHECKSUM_PARTIAL;
 	buff->csum = 0;
@@ -1250,8 +1249,8 @@
 	tcp_openreq_init(req, &tmp_opt, skb);
 
 	treq = inet6_rsk(req);
-	ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr);
-	ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr);
+	treq->rmt_addr = ipv6_hdr(skb)->saddr;
+	treq->loc_addr = ipv6_hdr(skb)->daddr;
 	if (!want_cookie || tmp_opt.tstamp_ok)
 		TCP_ECN_create_request(req, tcp_hdr(skb));
 
@@ -1381,7 +1380,7 @@
 
 		ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
 
-		ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
+		newnp->rcv_saddr = newnp->saddr;
 
 		inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
 		newsk->sk_backlog_rcv = tcp_v4_do_rcv;
@@ -1445,9 +1444,9 @@
 
 	memcpy(newnp, np, sizeof(struct ipv6_pinfo));
 
-	ipv6_addr_copy(&newnp->daddr, &treq->rmt_addr);
-	ipv6_addr_copy(&newnp->saddr, &treq->loc_addr);
-	ipv6_addr_copy(&newnp->rcv_saddr, &treq->loc_addr);
+	newnp->daddr = treq->rmt_addr;
+	newnp->saddr = treq->loc_addr;
+	newnp->rcv_saddr = treq->loc_addr;
 	newsk->sk_bound_dev_if = treq->iif;
 
 	/* Now IPv6 options...
@@ -1996,7 +1995,8 @@
 	sk->sk_rcvbuf = sysctl_tcp_rmem[1];
 
 	local_bh_disable();
-	percpu_counter_inc(&tcp_sockets_allocated);
+	sock_update_memcg(sk);
+	sk_sockets_allocated_inc(sk);
 	local_bh_enable();
 
 	return 0;
@@ -2215,7 +2215,6 @@
 	.memory_allocated	= &tcp_memory_allocated,
 	.memory_pressure	= &tcp_memory_pressure,
 	.orphan_count		= &tcp_orphan_count,
-	.sysctl_mem		= sysctl_tcp_mem,
 	.sysctl_wmem		= sysctl_tcp_wmem,
 	.sysctl_rmem		= sysctl_tcp_rmem,
 	.max_header		= MAX_TCP_HEADER,
@@ -2229,6 +2228,9 @@
 	.compat_setsockopt	= compat_tcp_setsockopt,
 	.compat_getsockopt	= compat_tcp_getsockopt,
 #endif
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+	.proto_cgroup		= tcp_proto_cgroup,
+#endif
 };
 
 static const struct inet6_protocol tcpv6_protocol = {
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 8c25419..4f96b5c 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -238,7 +238,7 @@
 	return result;
 }
 
-static struct sock *__udp6_lib_lookup(struct net *net,
+struct sock *__udp6_lib_lookup(struct net *net,
 				      const struct in6_addr *saddr, __be16 sport,
 				      const struct in6_addr *daddr, __be16 dport,
 				      int dif, struct udp_table *udptable)
@@ -305,6 +305,7 @@
 	rcu_read_unlock();
 	return result;
 }
+EXPORT_SYMBOL_GPL(__udp6_lib_lookup);
 
 static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb,
 					  __be16 sport, __be16 dport,
@@ -418,8 +419,7 @@
 			ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr,
 					       &sin6->sin6_addr);
 		else {
-			ipv6_addr_copy(&sin6->sin6_addr,
-				       &ipv6_hdr(skb)->saddr);
+			sin6->sin6_addr = ipv6_hdr(skb)->saddr;
 			if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
 				sin6->sin6_scope_id = IP6CB(skb)->iif;
 		}
@@ -539,7 +539,9 @@
 			goto drop;
 	}
 
-	if ((rc = ip_queue_rcv_skb(sk, skb)) < 0) {
+	skb_dst_drop(skb);
+	rc = sock_queue_rcv_skb(sk, skb);
+	if (rc < 0) {
 		/* Note that an ENOMEM error is charged twice */
 		if (rc == -ENOMEM)
 			UDP6_INC_STATS_BH(sock_net(sk),
@@ -1114,11 +1116,11 @@
 
 	fl6.flowi6_proto = sk->sk_protocol;
 	if (!ipv6_addr_any(daddr))
-		ipv6_addr_copy(&fl6.daddr, daddr);
+		fl6.daddr = *daddr;
 	else
 		fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
 	if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr))
-		ipv6_addr_copy(&fl6.saddr, &np->saddr);
+		fl6.saddr = np->saddr;
 	fl6.fl6_sport = inet->inet_sport;
 
 	final_p = fl6_update_dst(&fl6, opt, &final);
@@ -1299,7 +1301,8 @@
 	return 0;
 }
 
-static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, u32 features)
+static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
+	netdev_features_t features)
 {
 	struct sk_buff *segs = ERR_PTR(-EINVAL);
 	unsigned int mss;
diff --git a/net/ipv6/xfrm6_mode_beet.c b/net/ipv6/xfrm6_mode_beet.c
index 3437d7d..a81ce94 100644
--- a/net/ipv6/xfrm6_mode_beet.c
+++ b/net/ipv6/xfrm6_mode_beet.c
@@ -72,8 +72,8 @@
 		top_iph->nexthdr = IPPROTO_BEETPH;
 	}
 
-	ipv6_addr_copy(&top_iph->saddr, (struct in6_addr *)&x->props.saddr);
-	ipv6_addr_copy(&top_iph->daddr, (struct in6_addr *)&x->id.daddr);
+	top_iph->saddr = *(struct in6_addr *)&x->props.saddr;
+	top_iph->daddr = *(struct in6_addr *)&x->id.daddr;
 	return 0;
 }
 
@@ -99,8 +99,8 @@
 
 	ip6h = ipv6_hdr(skb);
 	ip6h->payload_len = htons(skb->len - size);
-	ipv6_addr_copy(&ip6h->daddr, (struct in6_addr *) &x->sel.daddr.a6);
-	ipv6_addr_copy(&ip6h->saddr, (struct in6_addr *) &x->sel.saddr.a6);
+	ip6h->daddr = *(struct in6_addr *)&x->sel.daddr.a6;
+	ip6h->saddr = *(struct in6_addr *)&x->sel.saddr.a6;
 	err = 0;
 out:
 	return err;
diff --git a/net/ipv6/xfrm6_mode_tunnel.c b/net/ipv6/xfrm6_mode_tunnel.c
index 4d6edff..261e6e6 100644
--- a/net/ipv6/xfrm6_mode_tunnel.c
+++ b/net/ipv6/xfrm6_mode_tunnel.c
@@ -55,8 +55,8 @@
 		dsfield &= ~INET_ECN_MASK;
 	ipv6_change_dsfield(top_iph, 0, dsfield);
 	top_iph->hop_limit = ip6_dst_hoplimit(dst->child);
-	ipv6_addr_copy(&top_iph->saddr, (const struct in6_addr *)&x->props.saddr);
-	ipv6_addr_copy(&top_iph->daddr, (const struct in6_addr *)&x->id.daddr);
+	top_iph->saddr = *(struct in6_addr *)&x->props.saddr;
+	top_iph->daddr = *(struct in6_addr *)&x->id.daddr;
 	return 0;
 }
 
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index faae417..4eeff89 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -49,7 +49,7 @@
 	struct sock *sk = skb->sk;
 
 	fl6.flowi6_oif = sk->sk_bound_dev_if;
-	ipv6_addr_copy(&fl6.daddr, &ipv6_hdr(skb)->daddr);
+	fl6.daddr = ipv6_hdr(skb)->daddr;
 
 	ipv6_local_rxpmtu(sk, &fl6, mtu);
 }
@@ -60,7 +60,7 @@
 	struct sock *sk = skb->sk;
 
 	fl6.fl6_dport = inet_sk(sk)->inet_dport;
-	ipv6_addr_copy(&fl6.daddr, &ipv6_hdr(skb)->daddr);
+	fl6.daddr = ipv6_hdr(skb)->daddr;
 
 	ipv6_local_error(sk, EMSGSIZE, &fl6, mtu);
 }
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index d879f7e..8ea65e0 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -132,8 +132,8 @@
 	memset(fl6, 0, sizeof(struct flowi6));
 	fl6->flowi6_mark = skb->mark;
 
-	ipv6_addr_copy(&fl6->daddr, reverse ? &hdr->saddr : &hdr->daddr);
-	ipv6_addr_copy(&fl6->saddr, reverse ? &hdr->daddr : &hdr->saddr);
+	fl6->daddr = reverse ? hdr->saddr : hdr->daddr;
+	fl6->saddr = reverse ? hdr->daddr : hdr->saddr;
 
 	while (nh + offset + 1 < skb->data ||
 	       pskb_may_pull(skb, nh + offset + 1 - skb->data)) {
diff --git a/net/ipv6/xfrm6_state.c b/net/ipv6/xfrm6_state.c
index f2d72b8..3f2f7c4 100644
--- a/net/ipv6/xfrm6_state.c
+++ b/net/ipv6/xfrm6_state.c
@@ -27,8 +27,8 @@
 
 	/* Initialize temporary selector matching only
 	 * to current session. */
-	ipv6_addr_copy((struct in6_addr *)&sel->daddr, &fl6->daddr);
-	ipv6_addr_copy((struct in6_addr *)&sel->saddr, &fl6->saddr);
+	*(struct in6_addr *)&sel->daddr = fl6->daddr;
+	*(struct in6_addr *)&sel->saddr = fl6->saddr;
 	sel->dport = xfrm_flowi_dport(fl, &fl6->uli);
 	sel->dport_mask = htons(0xffff);
 	sel->sport = xfrm_flowi_sport(fl, &fl6->uli);
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index c24f25a..bb14c34 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -2558,8 +2558,8 @@
 			self->errno = 0;
 			setup_timer(&self->watchdog, irda_discovery_timeout,
 					(unsigned long)self);
-			self->watchdog.expires = jiffies + (val * HZ/1000);
-			add_timer(&(self->watchdog));
+			mod_timer(&self->watchdog,
+				  jiffies + msecs_to_jiffies(val));
 
 			/* Wait for IR-LMP to call us back */
 			__wait_event_interruptible(self->query_wait,
diff --git a/net/irda/irlan/irlan_common.c b/net/irda/irlan/irlan_common.c
index 7791176..579617c 100644
--- a/net/irda/irlan/irlan_common.c
+++ b/net/irda/irlan/irlan_common.c
@@ -67,7 +67,7 @@
 static void *skey;
 
 /* Module parameters */
-static int eth;   /* Use "eth" or "irlan" name for devices */
+static bool eth;   /* Use "eth" or "irlan" name for devices */
 static int access = ACCESS_PEER; /* PEER, DIRECT or HOSTED */
 
 #ifdef CONFIG_PROC_FS
diff --git a/net/irda/irttp.c b/net/irda/irttp.c
index 32e3bb0..5c93f29 100644
--- a/net/irda/irttp.c
+++ b/net/irda/irttp.c
@@ -1461,14 +1461,12 @@
 	}
 
 	/* Allocate a new instance */
-	new = kmalloc(sizeof(struct tsap_cb), GFP_ATOMIC);
+	new = kmemdup(orig, sizeof(struct tsap_cb), GFP_ATOMIC);
 	if (!new) {
 		IRDA_DEBUG(0, "%s(), unable to kmalloc\n", __func__);
 		spin_unlock_irqrestore(&irttp->tsaps->hb_spinlock, flags);
 		return NULL;
 	}
-	/* Dup */
-	memcpy(new, orig, sizeof(struct tsap_cb));
 	spin_lock_init(&new->lock);
 
 	/* We don't need the old instance any more */
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 274d150..d5c5b8f 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -130,6 +130,17 @@
        memcpy(&dst[8], src, 8);
 }
 
+static void iucv_skb_queue_purge(struct sk_buff_head *list)
+{
+	struct sk_buff *skb;
+
+	while ((skb = skb_dequeue(list)) != NULL) {
+		if (skb->dev)
+			dev_put(skb->dev);
+		kfree_skb(skb);
+	}
+}
+
 static int afiucv_pm_prepare(struct device *dev)
 {
 #ifdef CONFIG_PM_DEBUG
@@ -164,10 +175,9 @@
 	read_lock(&iucv_sk_list.lock);
 	sk_for_each(sk, node, &iucv_sk_list.head) {
 		iucv = iucv_sk(sk);
-		skb_queue_purge(&iucv->send_skb_q);
+		iucv_skb_queue_purge(&iucv->send_skb_q);
 		skb_queue_purge(&iucv->backlog_skb_q);
 		switch (sk->sk_state) {
-		case IUCV_SEVERED:
 		case IUCV_DISCONN:
 		case IUCV_CLOSING:
 		case IUCV_CONNECTED:
@@ -212,7 +222,6 @@
 			sk->sk_state_change(sk);
 			break;
 		case IUCV_DISCONN:
-		case IUCV_SEVERED:
 		case IUCV_CLOSING:
 		case IUCV_LISTEN:
 		case IUCV_BOUND:
@@ -366,9 +375,7 @@
 	if (imsg)
 		memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message));
 
-	rcu_read_lock();
-	skb->dev = dev_get_by_index_rcu(net, sock->sk_bound_dev_if);
-	rcu_read_unlock();
+	skb->dev = dev_get_by_index(net, sock->sk_bound_dev_if);
 	if (!skb->dev)
 		return -ENODEV;
 	if (!(skb->dev->flags & IFF_UP))
@@ -388,6 +395,7 @@
 	err = dev_queue_xmit(skb);
 	if (err) {
 		skb_unlink(nskb, &iucv->send_skb_q);
+		dev_put(nskb->dev);
 		kfree_skb(nskb);
 	} else {
 		atomic_sub(confirm_recv, &iucv->msg_recv);
@@ -396,25 +404,6 @@
 	return err;
 }
 
-/* Timers */
-static void iucv_sock_timeout(unsigned long arg)
-{
-	struct sock *sk = (struct sock *)arg;
-
-	bh_lock_sock(sk);
-	sk->sk_err = ETIMEDOUT;
-	sk->sk_state_change(sk);
-	bh_unlock_sock(sk);
-
-	iucv_sock_kill(sk);
-	sock_put(sk);
-}
-
-static void iucv_sock_clear_timer(struct sock *sk)
-{
-	sk_stop_timer(sk, &sk->sk_timer);
-}
-
 static struct sock *__iucv_get_sock_by_name(char *nm)
 {
 	struct sock *sk;
@@ -467,7 +456,6 @@
 	int err, blen;
 	struct sk_buff *skb;
 
-	iucv_sock_clear_timer(sk);
 	lock_sock(sk);
 
 	switch (sk->sk_state) {
@@ -481,16 +469,14 @@
 			blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
 			skb = sock_alloc_send_skb(sk, blen, 1, &err);
 			if (skb) {
-				skb_reserve(skb,
-					sizeof(struct af_iucv_trans_hdr) +
-					ETH_HLEN);
+				skb_reserve(skb, blen);
 				err = afiucv_hs_send(NULL, sk, skb,
 						     AF_IUCV_FLAG_FIN);
 			}
 			sk->sk_state = IUCV_DISCONN;
 			sk->sk_state_change(sk);
 		}
-	case IUCV_DISCONN:
+	case IUCV_DISCONN:   /* fall through */
 		sk->sk_state = IUCV_CLOSING;
 		sk->sk_state_change(sk);
 
@@ -520,7 +506,7 @@
 		sk->sk_err = ECONNRESET;
 		sk->sk_state_change(sk);
 
-		skb_queue_purge(&iucv->send_skb_q);
+		iucv_skb_queue_purge(&iucv->send_skb_q);
 		skb_queue_purge(&iucv->backlog_skb_q);
 		break;
 
@@ -581,8 +567,6 @@
 	sk->sk_protocol = proto;
 	sk->sk_state	= IUCV_OPEN;
 
-	setup_timer(&sk->sk_timer, iucv_sock_timeout, (unsigned long)sk);
-
 	iucv_sock_link(&iucv_sk_list, sk);
 	return sk;
 }
@@ -675,16 +659,12 @@
 		}
 
 		if (sk->sk_state == IUCV_CONNECTED ||
-		    sk->sk_state == IUCV_SEVERED ||
-		    sk->sk_state == IUCV_DISCONN ||	/* due to PM restore */
+		    sk->sk_state == IUCV_DISCONN ||
 		    !newsock) {
 			iucv_accept_unlink(sk);
 			if (newsock)
 				sock_graft(sk, newsock);
 
-			if (sk->sk_state == IUCV_SEVERED)
-				sk->sk_state = IUCV_DISCONN;
-
 			release_sock(sk);
 			return sk;
 		}
@@ -739,7 +719,7 @@
 		if (!memcmp(dev->perm_addr, uid, 8)) {
 			memcpy(iucv->src_name, sa->siucv_name, 8);
 			memcpy(iucv->src_user_id, sa->siucv_user_id, 8);
-			sock->sk->sk_bound_dev_if = dev->ifindex;
+			sk->sk_bound_dev_if = dev->ifindex;
 			sk->sk_state = IUCV_BOUND;
 			iucv->transport = AF_IUCV_TRANS_HIPER;
 			if (!iucv->msglimit)
@@ -774,16 +754,13 @@
 static int iucv_sock_autobind(struct sock *sk)
 {
 	struct iucv_sock *iucv = iucv_sk(sk);
-	char query_buffer[80];
 	char name[12];
 	int err = 0;
 
-	/* Set the userid and name */
-	cpcmd("QUERY USERID", query_buffer, sizeof(query_buffer), &err);
-	if (unlikely(err))
+	if (unlikely(!pr_iucv))
 		return -EPROTO;
 
-	memcpy(iucv->src_user_id, query_buffer, 8);
+	memcpy(iucv->src_user_id, iucv_userid, 8);
 
 	write_lock_bh(&iucv_sk_list.lock);
 
@@ -1225,6 +1202,8 @@
 	return len;
 
 fail:
+	if (skb->dev)
+		dev_put(skb->dev);
 	kfree_skb(skb);
 out:
 	release_sock(sk);
@@ -1357,7 +1336,7 @@
 	int blen;
 	int err = 0;
 
-	if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) &&
+	if ((sk->sk_state == IUCV_DISCONN) &&
 	    skb_queue_empty(&iucv->backlog_skb_q) &&
 	    skb_queue_empty(&sk->sk_receive_queue) &&
 	    list_empty(&iucv->message_q.list))
@@ -1441,9 +1420,7 @@
 					ETH_HLEN;
 				sskb = sock_alloc_send_skb(sk, blen, 1, &err);
 				if (sskb) {
-					skb_reserve(sskb,
-						sizeof(struct af_iucv_trans_hdr)
-						+ ETH_HLEN);
+					skb_reserve(sskb, blen);
 					err = afiucv_hs_send(NULL, sk, sskb,
 							     AF_IUCV_FLAG_WIN);
 				}
@@ -1506,7 +1483,7 @@
 	if (sk->sk_state == IUCV_CLOSED)
 		mask |= POLLHUP;
 
-	if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED)
+	if (sk->sk_state == IUCV_DISCONN)
 		mask |= POLLIN;
 
 	if (sock_writeable(sk))
@@ -1533,7 +1510,6 @@
 	switch (sk->sk_state) {
 	case IUCV_DISCONN:
 	case IUCV_CLOSING:
-	case IUCV_SEVERED:
 	case IUCV_CLOSED:
 		err = -ENOTCONN;
 		goto fail;
@@ -1888,10 +1864,7 @@
 {
 	struct sock *sk = path->private;
 
-	if (!list_empty(&iucv_sk(sk)->accept_q))
-		sk->sk_state = IUCV_SEVERED;
-	else
-		sk->sk_state = IUCV_DISCONN;
+	sk->sk_state = IUCV_DISCONN;
 
 	sk->sk_state_change(sk);
 }
@@ -2051,10 +2024,7 @@
 	/* other end of connection closed */
 	if (iucv) {
 		bh_lock_sock(sk);
-		if (!list_empty(&iucv->accept_q))
-			sk->sk_state = IUCV_SEVERED;
-		else
-			sk->sk_state = IUCV_DISCONN;
+		sk->sk_state = IUCV_DISCONN;
 		sk->sk_state_change(sk);
 		bh_unlock_sock(sk);
 	}
@@ -2209,6 +2179,8 @@
 		break;
 	case 0:
 		/* plain data frame */
+		memcpy(CB_TRGCLS(skb), &trans_hdr->iucv_hdr.class,
+		       CB_TRGCLS_LEN);
 		err = afiucv_hs_callback_rx(sk, skb);
 		break;
 	default:
@@ -2259,6 +2231,7 @@
 			case TX_NOTIFY_OK:
 				__skb_unlink(this, list);
 				iucv_sock_wake_msglim(sk);
+				dev_put(this->dev);
 				kfree_skb(this);
 				break;
 			case TX_NOTIFY_PENDING:
@@ -2269,6 +2242,7 @@
 				atomic_dec(&iucv->pendings);
 				if (atomic_read(&iucv->pendings) <= 0)
 					iucv_sock_wake_msglim(sk);
+				dev_put(this->dev);
 				kfree_skb(this);
 				break;
 			case TX_NOTIFY_UNREACHABLE:
@@ -2277,11 +2251,9 @@
 			case TX_NOTIFY_GENERALERROR:
 			case TX_NOTIFY_DELAYED_GENERALERROR:
 				__skb_unlink(this, list);
+				dev_put(this->dev);
 				kfree_skb(this);
-				if (!list_empty(&iucv->accept_q))
-					sk->sk_state = IUCV_SEVERED;
-				else
-					sk->sk_state = IUCV_DISCONN;
+				sk->sk_state = IUCV_DISCONN;
 				sk->sk_state_change(sk);
 				break;
 			}
@@ -2291,6 +2263,13 @@
 	}
 	spin_unlock_irqrestore(&list->lock, flags);
 
+	if (sk->sk_state == IUCV_CLOSING) {
+		if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
+			sk->sk_state = IUCV_CLOSED;
+			sk->sk_state_change(sk);
+		}
+	}
+
 out_unlock:
 	bh_unlock_sock(sk);
 }
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 1e733e9..11dbb22 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -375,7 +375,7 @@
 	const struct sadb_address *sp = p;
 	const struct sockaddr *addr = (const struct sockaddr *)(sp + 1);
 	const struct sockaddr_in *sin;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	const struct sockaddr_in6 *sin6;
 #endif
 	int len;
@@ -387,7 +387,7 @@
 		    sp->sadb_address_prefixlen > 32)
 			return -EINVAL;
 		break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	case AF_INET6:
 		len = DIV_ROUND_UP(sizeof(*sp) + sizeof(*sin6), sizeof(uint64_t));
 		if (sp->sadb_address_len != len ||
@@ -469,7 +469,7 @@
 	if (s_addr->sa_family != d_addr->sa_family)
 		return 0;
 	if (s_addr->sa_family != AF_INET
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	    && s_addr->sa_family != AF_INET6
 #endif
 		)
@@ -579,7 +579,7 @@
 	switch (family) {
 	case AF_INET:
 		return sizeof(struct sockaddr_in);
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	case AF_INET6:
 		return sizeof(struct sockaddr_in6);
 #endif
@@ -595,7 +595,7 @@
 		xaddr->a4 =
 			((struct sockaddr_in *)sa)->sin_addr.s_addr;
 		return AF_INET;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	case AF_INET6:
 		memcpy(xaddr->a6,
 		       &((struct sockaddr_in6 *)sa)->sin6_addr,
@@ -639,7 +639,7 @@
 	case AF_INET:
 		xaddr = (xfrm_address_t *)&((const struct sockaddr_in *)(addr + 1))->sin_addr;
 		break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	case AF_INET6:
 		xaddr = (xfrm_address_t *)&((const struct sockaddr_in6 *)(addr + 1))->sin6_addr;
 		break;
@@ -705,14 +705,14 @@
 		memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
 		return 32;
 	    }
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	case AF_INET6:
 	    {
 		struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sa;
 		sin6->sin6_family = AF_INET6;
 		sin6->sin6_port = port;
 		sin6->sin6_flowinfo = 0;
-		ipv6_addr_copy(&sin6->sin6_addr, (const struct in6_addr *)xaddr->a6);
+		sin6->sin6_addr = *(struct in6_addr *)xaddr->a6;
 		sin6->sin6_scope_id = 0;
 		return 128;
 	    }
@@ -1311,7 +1311,7 @@
 		xdaddr = (xfrm_address_t *)&((struct sockaddr_in *)(daddr + 1))->sin_addr.s_addr;
 		xsaddr = (xfrm_address_t *)&((struct sockaddr_in *)(saddr + 1))->sin_addr.s_addr;
 		break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	case AF_INET6:
 		xdaddr = (xfrm_address_t *)&((struct sockaddr_in6 *)(daddr + 1))->sin6_addr;
 		xsaddr = (xfrm_address_t *)&((struct sockaddr_in6 *)(saddr + 1))->sin6_addr;
@@ -3146,7 +3146,7 @@
 			return NULL;
 		}
 		break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	case AF_INET6:
 		if (opt != IPV6_IPSEC_POLICY) {
 			*dir = -EOPNOTSUPP;
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index dfd3a64..a18e6c3 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -833,15 +833,15 @@
 		copied += used;
 		len -= used;
 
+		/* For non stream protcols we get one packet per recvmsg call */
+		if (sk->sk_type != SOCK_STREAM)
+			goto copy_uaddr;
+
 		if (!(flags & MSG_PEEK)) {
 			sk_eat_skb(sk, skb, 0);
 			*seq = 0;
 		}
 
-		/* For non stream protcols we get one packet per recvmsg call */
-		if (sk->sk_type != SOCK_STREAM)
-			goto copy_uaddr;
-
 		/* Partial read */
 		if (used + offset < skb->len)
 			continue;
@@ -857,6 +857,12 @@
 	}
 	if (llc_sk(sk)->cmsg_flags)
 		llc_cmsg_rcv(msg, skb);
+
+	if (!(flags & MSG_PEEK)) {
+			sk_eat_skb(sk, skb, 0);
+			*seq = 0;
+	}
+
 	goto out;
 }
 
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index 7d3b438..96ddb72 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -247,15 +247,3 @@
 	  and show them in debugfs.
 
 	  If unsure, say N.
-
-config MAC80211_DRIVER_API_TRACER
-	bool "Driver API tracer"
-	depends on MAC80211_DEBUG_MENU
-	depends on EVENT_TRACING
-	help
-	  Say Y here to make mac80211 register with the ftrace
-	  framework for the driver API -- you can then see which
-	  driver methods it is calling and which API functions
-	  drivers are calling by looking at the trace.
-
-	  If unsure, say Y.
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile
index fdb54e6..d540c3b 100644
--- a/net/mac80211/Makefile
+++ b/net/mac80211/Makefile
@@ -24,7 +24,8 @@
 	util.o \
 	wme.o \
 	event.o \
-	chan.o
+	chan.o \
+	driver-trace.o
 
 mac80211-$(CONFIG_MAC80211_LEDS) += led.o
 mac80211-$(CONFIG_MAC80211_DEBUGFS) += \
@@ -41,7 +42,6 @@
 
 mac80211-$(CONFIG_PM) += pm.o
 
-mac80211-$(CONFIG_MAC80211_DRIVER_API_TRACER) += driver-trace.o
 CFLAGS_driver-trace.o := -I$(src)
 
 # objects for PID algorithm
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index 93b2434..96debba 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -73,8 +73,11 @@
 	RCU_INIT_POINTER(sta->ampdu_mlme.tid_rx[tid], NULL);
 
 #ifdef CONFIG_MAC80211_HT_DEBUG
-	printk(KERN_DEBUG "Rx BA session stop requested for %pM tid %u\n",
-	       sta->sta.addr, tid);
+	printk(KERN_DEBUG
+	       "Rx BA session stop requested for %pM tid %u %s reason: %d\n",
+	       sta->sta.addr, tid,
+	       initiator == WLAN_BACK_RECIPIENT ? "recipient" : "inititator",
+	       (int)reason);
 #endif /* CONFIG_MAC80211_HT_DEBUG */
 
 	if (drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_STOP,
@@ -85,7 +88,7 @@
 	/* check if this is a self generated aggregation halt */
 	if (initiator == WLAN_BACK_RECIPIENT && tx)
 		ieee80211_send_delba(sta->sdata, sta->sta.addr,
-				     tid, 0, reason);
+				     tid, WLAN_BACK_RECIPIENT, reason);
 
 	del_timer_sync(&tid_rx->session_timer);
 	del_timer_sync(&tid_rx->reorder_timer);
@@ -109,7 +112,7 @@
 	int i;
 
 	rcu_read_lock();
-	sta = sta_info_get(sdata, addr);
+	sta = sta_info_get_bss(sdata, addr);
 	if (!sta) {
 		rcu_read_unlock();
 		return;
@@ -177,10 +180,13 @@
 	memcpy(mgmt->da, da, ETH_ALEN);
 	memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
 	if (sdata->vif.type == NL80211_IFTYPE_AP ||
-	    sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+	    sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
+	    sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
 		memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
 	else if (sdata->vif.type == NL80211_IFTYPE_STATION)
 		memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN);
+	else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
+		memcpy(mgmt->bssid, sdata->u.ibss.bssid, ETH_ALEN);
 
 	mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
 					  IEEE80211_STYPE_ACTION);
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index b064e4d..76be6174 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -55,6 +55,8 @@
  * @ampdu_action function will be called with the action
  * %IEEE80211_AMPDU_TX_STOP. In this case, the call must not fail,
  * and the driver must later call ieee80211_stop_tx_ba_cb_irqsafe().
+ * Note that the sta can get destroyed before the BA tear down is
+ * complete.
  */
 
 static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata,
@@ -78,10 +80,13 @@
 	memcpy(mgmt->da, da, ETH_ALEN);
 	memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
 	if (sdata->vif.type == NL80211_IFTYPE_AP ||
-	    sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+	    sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
+	    sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
 		memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
 	else if (sdata->vif.type == NL80211_IFTYPE_STATION)
 		memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN);
+	else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
+		memcpy(mgmt->bssid, sdata->u.ibss.bssid, ETH_ALEN);
 
 	mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
 					  IEEE80211_STYPE_ACTION);
@@ -102,7 +107,7 @@
 	mgmt->u.action.u.addba_req.start_seq_num =
 					cpu_to_le16(start_seq_num << 4);
 
-	ieee80211_tx_skb(sdata, skb);
+	ieee80211_tx_skb_tid(sdata, skb, tid);
 }
 
 void ieee80211_send_bar(struct ieee80211_vif *vif, u8 *ra, u16 tid, u16 ssn)
@@ -131,7 +136,7 @@
 	bar->start_seq_num = cpu_to_le16(ssn);
 
 	IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
-	ieee80211_tx_skb(sdata, skb);
+	ieee80211_tx_skb_tid(sdata, skb, tid);
 }
 EXPORT_SYMBOL(ieee80211_send_bar);
 
@@ -185,6 +190,7 @@
 #endif /* CONFIG_MAC80211_HT_DEBUG */
 
 	del_timer_sync(&tid_tx->addba_resp_timer);
+	del_timer_sync(&tid_tx->session_timer);
 
 	/*
 	 * After this packets are no longer handed right through
@@ -303,169 +309,6 @@
 	__release(agg_queue);
 }
 
-void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
-{
-	struct tid_ampdu_tx *tid_tx;
-	struct ieee80211_local *local = sta->local;
-	struct ieee80211_sub_if_data *sdata = sta->sdata;
-	u16 start_seq_num;
-	int ret;
-
-	tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
-
-	/*
-	 * While we're asking the driver about the aggregation,
-	 * stop the AC queue so that we don't have to worry
-	 * about frames that came in while we were doing that,
-	 * which would require us to put them to the AC pending
-	 * afterwards which just makes the code more complex.
-	 */
-	ieee80211_stop_queue_agg(local, tid);
-
-	clear_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);
-
-	/*
-	 * make sure no packets are being processed to get
-	 * valid starting sequence number
-	 */
-	synchronize_net();
-
-	start_seq_num = sta->tid_seq[tid] >> 4;
-
-	ret = drv_ampdu_action(local, sdata, IEEE80211_AMPDU_TX_START,
-			       &sta->sta, tid, &start_seq_num, 0);
-	if (ret) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
-		printk(KERN_DEBUG "BA request denied - HW unavailable for"
-					" tid %d\n", tid);
-#endif
-		spin_lock_bh(&sta->lock);
-		ieee80211_assign_tid_tx(sta, tid, NULL);
-		spin_unlock_bh(&sta->lock);
-
-		ieee80211_wake_queue_agg(local, tid);
-		kfree_rcu(tid_tx, rcu_head);
-		return;
-	}
-
-	/* we can take packets again now */
-	ieee80211_wake_queue_agg(local, tid);
-
-	/* activate the timer for the recipient's addBA response */
-	mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL);
-#ifdef CONFIG_MAC80211_HT_DEBUG
-	printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid);
-#endif
-
-	spin_lock_bh(&sta->lock);
-	sta->ampdu_mlme.addba_req_num[tid]++;
-	spin_unlock_bh(&sta->lock);
-
-	/* send AddBA request */
-	ieee80211_send_addba_request(sdata, sta->sta.addr, tid,
-				     tid_tx->dialog_token, start_seq_num,
-				     local->hw.max_tx_aggregation_subframes,
-				     tid_tx->timeout);
-}
-
-int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
-				  u16 timeout)
-{
-	struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
-	struct ieee80211_sub_if_data *sdata = sta->sdata;
-	struct ieee80211_local *local = sdata->local;
-	struct tid_ampdu_tx *tid_tx;
-	int ret = 0;
-
-	trace_api_start_tx_ba_session(pubsta, tid);
-
-	if (WARN_ON(!local->ops->ampdu_action))
-		return -EINVAL;
-
-	if ((tid >= STA_TID_NUM) ||
-	    !(local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION) ||
-	    (local->hw.flags & IEEE80211_HW_TX_AMPDU_SETUP_IN_HW))
-		return -EINVAL;
-
-#ifdef CONFIG_MAC80211_HT_DEBUG
-	printk(KERN_DEBUG "Open BA session requested for %pM tid %u\n",
-	       pubsta->addr, tid);
-#endif /* CONFIG_MAC80211_HT_DEBUG */
-
-	/*
-	 * The aggregation code is not prepared to handle
-	 * anything but STA/AP due to the BSSID handling.
-	 * IBSS could work in the code but isn't supported
-	 * by drivers or the standard.
-	 */
-	if (sdata->vif.type != NL80211_IFTYPE_STATION &&
-	    sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
-	    sdata->vif.type != NL80211_IFTYPE_AP)
-		return -EINVAL;
-
-	if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
-		printk(KERN_DEBUG "BA sessions blocked. "
-		       "Denying BA session request\n");
-#endif
-		return -EINVAL;
-	}
-
-	spin_lock_bh(&sta->lock);
-
-	/* we have tried too many times, receiver does not want A-MPDU */
-	if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) {
-		ret = -EBUSY;
-		goto err_unlock_sta;
-	}
-
-	tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
-	/* check if the TID is not in aggregation flow already */
-	if (tid_tx || sta->ampdu_mlme.tid_start_tx[tid]) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
-		printk(KERN_DEBUG "BA request denied - session is not "
-				 "idle on tid %u\n", tid);
-#endif /* CONFIG_MAC80211_HT_DEBUG */
-		ret = -EAGAIN;
-		goto err_unlock_sta;
-	}
-
-	/* prepare A-MPDU MLME for Tx aggregation */
-	tid_tx = kzalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC);
-	if (!tid_tx) {
-		ret = -ENOMEM;
-		goto err_unlock_sta;
-	}
-
-	skb_queue_head_init(&tid_tx->pending);
-	__set_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);
-
-	tid_tx->timeout = timeout;
-
-	/* Tx timer */
-	tid_tx->addba_resp_timer.function = sta_addba_resp_timer_expired;
-	tid_tx->addba_resp_timer.data = (unsigned long)&sta->timer_to_tid[tid];
-	init_timer(&tid_tx->addba_resp_timer);
-
-	/* assign a dialog token */
-	sta->ampdu_mlme.dialog_token_allocator++;
-	tid_tx->dialog_token = sta->ampdu_mlme.dialog_token_allocator;
-
-	/*
-	 * Finally, assign it to the start array; the work item will
-	 * collect it and move it to the normal array.
-	 */
-	sta->ampdu_mlme.tid_start_tx[tid] = tid_tx;
-
-	ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
-
-	/* this flow continues off the work */
- err_unlock_sta:
-	spin_unlock_bh(&sta->lock);
-	return ret;
-}
-EXPORT_SYMBOL(ieee80211_start_tx_ba_session);
-
 /*
  * splice packets from the STA's pending to the local pending,
  * requires a call to ieee80211_agg_splice_finish later
@@ -498,6 +341,228 @@
 	ieee80211_wake_queue_agg(local, tid);
 }
 
+void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
+{
+	struct tid_ampdu_tx *tid_tx;
+	struct ieee80211_local *local = sta->local;
+	struct ieee80211_sub_if_data *sdata = sta->sdata;
+	u16 start_seq_num;
+	int ret;
+
+	tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
+
+	/*
+	 * Start queuing up packets for this aggregation session.
+	 * We're going to release them once the driver is OK with
+	 * that.
+	 */
+	clear_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);
+
+	/*
+	 * Make sure no packets are being processed. This ensures that
+	 * we have a valid starting sequence number and that in-flight
+	 * packets have been flushed out and no packets for this TID
+	 * will go into the driver during the ampdu_action call.
+	 */
+	synchronize_net();
+
+	start_seq_num = sta->tid_seq[tid] >> 4;
+
+	ret = drv_ampdu_action(local, sdata, IEEE80211_AMPDU_TX_START,
+			       &sta->sta, tid, &start_seq_num, 0);
+	if (ret) {
+#ifdef CONFIG_MAC80211_HT_DEBUG
+		printk(KERN_DEBUG "BA request denied - HW unavailable for"
+					" tid %d\n", tid);
+#endif
+		spin_lock_bh(&sta->lock);
+		ieee80211_agg_splice_packets(local, tid_tx, tid);
+		ieee80211_assign_tid_tx(sta, tid, NULL);
+		ieee80211_agg_splice_finish(local, tid);
+		spin_unlock_bh(&sta->lock);
+
+		kfree_rcu(tid_tx, rcu_head);
+		return;
+	}
+
+	/* activate the timer for the recipient's addBA response */
+	mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL);
+#ifdef CONFIG_MAC80211_HT_DEBUG
+	printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid);
+#endif
+
+	spin_lock_bh(&sta->lock);
+	sta->ampdu_mlme.last_addba_req_time[tid] = jiffies;
+	sta->ampdu_mlme.addba_req_num[tid]++;
+	spin_unlock_bh(&sta->lock);
+
+	/* send AddBA request */
+	ieee80211_send_addba_request(sdata, sta->sta.addr, tid,
+				     tid_tx->dialog_token, start_seq_num,
+				     local->hw.max_tx_aggregation_subframes,
+				     tid_tx->timeout);
+}
+
+/*
+ * After accepting the AddBA Response we activated a timer,
+ * resetting it after each frame that we send.
+ */
+static void sta_tx_agg_session_timer_expired(unsigned long data)
+{
+	/* not an elegant detour, but there is no choice as the timer passes
+	 * only one argument, and various sta_info are needed here, so init
+	 * flow in sta_info_create gives the TID as data, while the timer_to_id
+	 * array gives the sta through container_of */
+	u8 *ptid = (u8 *)data;
+	u8 *timer_to_id = ptid - *ptid;
+	struct sta_info *sta = container_of(timer_to_id, struct sta_info,
+					 timer_to_tid[0]);
+
+#ifdef CONFIG_MAC80211_HT_DEBUG
+	printk(KERN_DEBUG "tx session timer expired on tid %d\n", (u16)*ptid);
+#endif
+
+	ieee80211_stop_tx_ba_session(&sta->sta, *ptid);
+}
+
+int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
+				  u16 timeout)
+{
+	struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
+	struct ieee80211_sub_if_data *sdata = sta->sdata;
+	struct ieee80211_local *local = sdata->local;
+	struct tid_ampdu_tx *tid_tx;
+	int ret = 0;
+
+	trace_api_start_tx_ba_session(pubsta, tid);
+
+	if (WARN_ON(!local->ops->ampdu_action))
+		return -EINVAL;
+
+	if ((tid >= STA_TID_NUM) ||
+	    !(local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION) ||
+	    (local->hw.flags & IEEE80211_HW_TX_AMPDU_SETUP_IN_HW))
+		return -EINVAL;
+
+#ifdef CONFIG_MAC80211_HT_DEBUG
+	printk(KERN_DEBUG "Open BA session requested for %pM tid %u\n",
+	       pubsta->addr, tid);
+#endif /* CONFIG_MAC80211_HT_DEBUG */
+
+	if (sdata->vif.type != NL80211_IFTYPE_STATION &&
+	    sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
+	    sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
+	    sdata->vif.type != NL80211_IFTYPE_AP &&
+	    sdata->vif.type != NL80211_IFTYPE_ADHOC)
+		return -EINVAL;
+
+	if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) {
+#ifdef CONFIG_MAC80211_HT_DEBUG
+		printk(KERN_DEBUG "BA sessions blocked. "
+		       "Denying BA session request\n");
+#endif
+		return -EINVAL;
+	}
+
+	/*
+	 * 802.11n-2009 11.5.1.1: If the initiating STA is an HT STA, is a
+	 * member of an IBSS, and has no other existing Block Ack agreement
+	 * with the recipient STA, then the initiating STA shall transmit a
+	 * Probe Request frame to the recipient STA and shall not transmit an
+	 * ADDBA Request frame unless it receives a Probe Response frame
+	 * from the recipient within dot11ADDBAFailureTimeout.
+	 *
+	 * The probe request mechanism for ADDBA is currently not implemented,
+	 * but we only build up Block Ack session with HT STAs. This information
+	 * is set when we receive a bss info from a probe response or a beacon.
+	 */
+	if (sta->sdata->vif.type == NL80211_IFTYPE_ADHOC &&
+	    !sta->sta.ht_cap.ht_supported) {
+#ifdef CONFIG_MAC80211_HT_DEBUG
+		printk(KERN_DEBUG "BA request denied - IBSS STA %pM"
+		       "does not advertise HT support\n", pubsta->addr);
+#endif /* CONFIG_MAC80211_HT_DEBUG */
+		return -EINVAL;
+	}
+
+	spin_lock_bh(&sta->lock);
+
+	/* we have tried too many times, receiver does not want A-MPDU */
+	if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) {
+		ret = -EBUSY;
+		goto err_unlock_sta;
+	}
+
+	/*
+	 * if we have tried more than HT_AGG_BURST_RETRIES times we
+	 * will spread our requests in time to avoid stalling connection
+	 * for too long
+	 */
+	if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_BURST_RETRIES &&
+	    time_before(jiffies, sta->ampdu_mlme.last_addba_req_time[tid] +
+			HT_AGG_RETRIES_PERIOD)) {
+#ifdef CONFIG_MAC80211_HT_DEBUG
+		printk(KERN_DEBUG "BA request denied - "
+		       "waiting a grace period after %d failed requests "
+		       "on tid %u\n",
+		       sta->ampdu_mlme.addba_req_num[tid], tid);
+#endif /* CONFIG_MAC80211_HT_DEBUG */
+		ret = -EBUSY;
+		goto err_unlock_sta;
+	}
+
+	tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
+	/* check if the TID is not in aggregation flow already */
+	if (tid_tx || sta->ampdu_mlme.tid_start_tx[tid]) {
+#ifdef CONFIG_MAC80211_HT_DEBUG
+		printk(KERN_DEBUG "BA request denied - session is not "
+				 "idle on tid %u\n", tid);
+#endif /* CONFIG_MAC80211_HT_DEBUG */
+		ret = -EAGAIN;
+		goto err_unlock_sta;
+	}
+
+	/* prepare A-MPDU MLME for Tx aggregation */
+	tid_tx = kzalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC);
+	if (!tid_tx) {
+		ret = -ENOMEM;
+		goto err_unlock_sta;
+	}
+
+	skb_queue_head_init(&tid_tx->pending);
+	__set_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);
+
+	tid_tx->timeout = timeout;
+
+	/* response timer */
+	tid_tx->addba_resp_timer.function = sta_addba_resp_timer_expired;
+	tid_tx->addba_resp_timer.data = (unsigned long)&sta->timer_to_tid[tid];
+	init_timer(&tid_tx->addba_resp_timer);
+
+	/* tx timer */
+	tid_tx->session_timer.function = sta_tx_agg_session_timer_expired;
+	tid_tx->session_timer.data = (unsigned long)&sta->timer_to_tid[tid];
+	init_timer(&tid_tx->session_timer);
+
+	/* assign a dialog token */
+	sta->ampdu_mlme.dialog_token_allocator++;
+	tid_tx->dialog_token = sta->ampdu_mlme.dialog_token_allocator;
+
+	/*
+	 * Finally, assign it to the start array; the work item will
+	 * collect it and move it to the normal array.
+	 */
+	sta->ampdu_mlme.tid_start_tx[tid] = tid_tx;
+
+	ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
+
+	/* this flow continues off the work */
+ err_unlock_sta:
+	spin_unlock_bh(&sta->lock);
+	return ret;
+}
+EXPORT_SYMBOL(ieee80211_start_tx_ba_session);
+
 static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
 					 struct sta_info *sta, u16 tid)
 {
@@ -551,7 +616,7 @@
 	}
 
 	mutex_lock(&local->sta_mtx);
-	sta = sta_info_get(sdata, ra);
+	sta = sta_info_get_bss(sdata, ra);
 	if (!sta) {
 		mutex_unlock(&local->sta_mtx);
 #ifdef CONFIG_MAC80211_HT_DEBUG
@@ -680,7 +745,7 @@
 
 	mutex_lock(&local->sta_mtx);
 
-	sta = sta_info_get(sdata, ra);
+	sta = sta_info_get_bss(sdata, ra);
 	if (!sta) {
 #ifdef CONFIG_MAC80211_HT_DEBUG
 		printk(KERN_DEBUG "Could not find station: %pM\n", ra);
@@ -818,6 +883,11 @@
 			ieee80211_agg_tx_operational(local, sta, tid);
 
 		sta->ampdu_mlme.addba_req_num[tid] = 0;
+
+		if (tid_tx->timeout)
+			mod_timer(&tid_tx->session_timer,
+				  TU_TO_EXP_TIME(tid_tx->timeout));
+
 	} else {
 		___ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR,
 						true);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index d06c65f..850bb96 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -102,6 +102,16 @@
 	return 0;
 }
 
+static int ieee80211_set_noack_map(struct wiphy *wiphy,
+				  struct net_device *dev,
+				  u16 noack_map)
+{
+	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+
+	sdata->noack_map = noack_map;
+	return 0;
+}
+
 static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
 			     u8 key_idx, bool pairwise, const u8 *mac_addr,
 			     struct key_params *params)
@@ -345,7 +355,8 @@
 			STATION_INFO_RX_DROP_MISC |
 			STATION_INFO_BSS_PARAM |
 			STATION_INFO_CONNECTED_TIME |
-			STATION_INFO_STA_FLAGS;
+			STATION_INFO_STA_FLAGS |
+			STATION_INFO_BEACON_LOSS_COUNT;
 
 	do_posix_clock_monotonic_gettime(&uptime);
 	sinfo->connected_time = uptime.tv_sec - sta->last_connected;
@@ -358,6 +369,7 @@
 	sinfo->tx_retries = sta->tx_retry_count;
 	sinfo->tx_failed = sta->tx_retry_failed;
 	sinfo->rx_dropped_misc = sta->rx_dropped;
+	sinfo->beacon_loss_count = sta->beacon_loss_count;
 
 	if ((sta->local->hw.flags & IEEE80211_HW_SIGNAL_DBM) ||
 	    (sta->local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)) {
@@ -411,7 +423,8 @@
 				BIT(NL80211_STA_FLAG_SHORT_PREAMBLE) |
 				BIT(NL80211_STA_FLAG_WME) |
 				BIT(NL80211_STA_FLAG_MFP) |
-				BIT(NL80211_STA_FLAG_AUTHENTICATED);
+				BIT(NL80211_STA_FLAG_AUTHENTICATED) |
+				BIT(NL80211_STA_FLAG_TDLS_PEER);
 	if (test_sta_flag(sta, WLAN_STA_AUTHORIZED))
 		sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_AUTHORIZED);
 	if (test_sta_flag(sta, WLAN_STA_SHORT_PREAMBLE))
@@ -422,6 +435,8 @@
 		sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_MFP);
 	if (test_sta_flag(sta, WLAN_STA_AUTH))
 		sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_AUTHENTICATED);
+	if (test_sta_flag(sta, WLAN_STA_TDLS_PEER))
+		sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_TDLS_PEER);
 }
 
 
@@ -488,6 +503,31 @@
 		(params->hidden_ssid != NL80211_HIDDEN_SSID_NOT_IN_USE);
 }
 
+static int ieee80211_set_probe_resp(struct ieee80211_sub_if_data *sdata,
+				    u8 *resp, size_t resp_len)
+{
+	struct sk_buff *new, *old;
+
+	if (!resp || !resp_len)
+		return -EINVAL;
+
+	old = rtnl_dereference(sdata->u.ap.probe_resp);
+
+	new = dev_alloc_skb(resp_len);
+	if (!new)
+		return -ENOMEM;
+
+	memcpy(skb_put(new, resp_len), resp, resp_len);
+
+	rcu_assign_pointer(sdata->u.ap.probe_resp, new);
+	synchronize_rcu();
+
+	if (old)
+		dev_kfree_skb(old);
+
+	return 0;
+}
+
 /*
  * This handles both adding a beacon and setting new beacon info
  */
@@ -498,6 +538,7 @@
 	int new_head_len, new_tail_len;
 	int size;
 	int err = -EINVAL;
+	u32 changed = 0;
 
 	old = rtnl_dereference(sdata->u.ap.beacon);
 
@@ -581,11 +622,17 @@
 
 	kfree(old);
 
-	ieee80211_config_ap_ssid(sdata, params);
+	err = ieee80211_set_probe_resp(sdata, params->probe_resp,
+				       params->probe_resp_len);
+	if (!err)
+		changed |= BSS_CHANGED_AP_PROBE_RESP;
 
-	ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED |
-						BSS_CHANGED_BEACON |
-						BSS_CHANGED_SSID);
+	ieee80211_config_ap_ssid(sdata, params);
+	changed |= BSS_CHANGED_BEACON_ENABLED |
+		   BSS_CHANGED_BEACON |
+		   BSS_CHANGED_SSID;
+
+	ieee80211_bss_info_change_notify(sdata, changed);
 	return 0;
 }
 
@@ -594,6 +641,8 @@
 {
 	struct ieee80211_sub_if_data *sdata;
 	struct beacon_data *old;
+	struct ieee80211_sub_if_data *vlan;
+	int ret;
 
 	sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 
@@ -601,7 +650,24 @@
 	if (old)
 		return -EALREADY;
 
-	return ieee80211_config_beacon(sdata, params);
+	ret = ieee80211_config_beacon(sdata, params);
+	if (ret)
+		return ret;
+
+	/*
+	 * Apply control port protocol, this allows us to
+	 * not encrypt dynamic WEP control frames.
+	 */
+	sdata->control_port_protocol = params->crypto.control_port_ethertype;
+	sdata->control_port_no_encrypt = params->crypto.control_port_no_encrypt;
+	list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) {
+		vlan->control_port_protocol =
+			params->crypto.control_port_ethertype;
+		vlan->control_port_no_encrypt =
+			params->crypto.control_port_no_encrypt;
+	}
+
+	return 0;
 }
 
 static int ieee80211_set_beacon(struct wiphy *wiphy, struct net_device *dev,
@@ -682,10 +748,11 @@
 	netif_rx_ni(skb);
 }
 
-static void sta_apply_parameters(struct ieee80211_local *local,
-				 struct sta_info *sta,
-				 struct station_parameters *params)
+static int sta_apply_parameters(struct ieee80211_local *local,
+				struct sta_info *sta,
+				struct station_parameters *params)
 {
+	int ret = 0;
 	u32 rates;
 	int i, j;
 	struct ieee80211_supported_band *sband;
@@ -697,13 +764,59 @@
 	mask = params->sta_flags_mask;
 	set = params->sta_flags_set;
 
+	/*
+	 * In mesh mode, we can clear AUTHENTICATED flag but must
+	 * also make ASSOCIATED follow appropriately for the driver
+	 * API. See also below, after AUTHORIZED changes.
+	 */
+	if (mask & BIT(NL80211_STA_FLAG_AUTHENTICATED)) {
+		/* cfg80211 should not allow this in non-mesh modes */
+		if (WARN_ON(!ieee80211_vif_is_mesh(&sdata->vif)))
+			return -EINVAL;
+
+		if (set & BIT(NL80211_STA_FLAG_AUTHENTICATED) &&
+		    !test_sta_flag(sta, WLAN_STA_AUTH)) {
+			ret = sta_info_move_state_checked(sta,
+					IEEE80211_STA_AUTH);
+			if (ret)
+				return ret;
+			ret = sta_info_move_state_checked(sta,
+					IEEE80211_STA_ASSOC);
+			if (ret)
+				return ret;
+		}
+	}
+
 	if (mask & BIT(NL80211_STA_FLAG_AUTHORIZED)) {
 		if (set & BIT(NL80211_STA_FLAG_AUTHORIZED))
-			set_sta_flag(sta, WLAN_STA_AUTHORIZED);
+			ret = sta_info_move_state_checked(sta,
+					IEEE80211_STA_AUTHORIZED);
 		else
-			clear_sta_flag(sta, WLAN_STA_AUTHORIZED);
+			ret = sta_info_move_state_checked(sta,
+					IEEE80211_STA_ASSOC);
+		if (ret)
+			return ret;
 	}
 
+	if (mask & BIT(NL80211_STA_FLAG_AUTHENTICATED)) {
+		/* cfg80211 should not allow this in non-mesh modes */
+		if (WARN_ON(!ieee80211_vif_is_mesh(&sdata->vif)))
+			return -EINVAL;
+
+		if (!(set & BIT(NL80211_STA_FLAG_AUTHENTICATED)) &&
+		    test_sta_flag(sta, WLAN_STA_AUTH)) {
+			ret = sta_info_move_state_checked(sta,
+					IEEE80211_STA_AUTH);
+			if (ret)
+				return ret;
+			ret = sta_info_move_state_checked(sta,
+					IEEE80211_STA_NONE);
+			if (ret)
+				return ret;
+		}
+	}
+
+
 	if (mask & BIT(NL80211_STA_FLAG_SHORT_PREAMBLE)) {
 		if (set & BIT(NL80211_STA_FLAG_SHORT_PREAMBLE))
 			set_sta_flag(sta, WLAN_STA_SHORT_PREAMBLE);
@@ -728,13 +841,6 @@
 			clear_sta_flag(sta, WLAN_STA_MFP);
 	}
 
-	if (mask & BIT(NL80211_STA_FLAG_AUTHENTICATED)) {
-		if (set & BIT(NL80211_STA_FLAG_AUTHENTICATED))
-			set_sta_flag(sta, WLAN_STA_AUTH);
-		else
-			clear_sta_flag(sta, WLAN_STA_AUTH);
-	}
-
 	if (mask & BIT(NL80211_STA_FLAG_TDLS_PEER)) {
 		if (set & BIT(NL80211_STA_FLAG_TDLS_PEER))
 			set_sta_flag(sta, WLAN_STA_TDLS_PEER);
@@ -778,7 +884,7 @@
 	}
 
 	if (params->ht_capa)
-		ieee80211_ht_cap_ie_to_sta_ht_cap(sband,
+		ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
 						  params->ht_capa,
 						  &sta->sta.ht_cap);
 
@@ -806,6 +912,8 @@
 			}
 #endif
 	}
+
+	return 0;
 }
 
 static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
@@ -832,22 +940,25 @@
 	if (is_multicast_ether_addr(mac))
 		return -EINVAL;
 
-	/* Only TDLS-supporting stations can add TDLS peers */
-	if ((params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) &&
-	    !((wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) &&
-	      sdata->vif.type == NL80211_IFTYPE_STATION))
-		return -ENOTSUPP;
-
 	sta = sta_info_alloc(sdata, mac, GFP_KERNEL);
 	if (!sta)
 		return -ENOMEM;
 
-	set_sta_flag(sta, WLAN_STA_AUTH);
-	set_sta_flag(sta, WLAN_STA_ASSOC);
+	sta_info_move_state(sta, IEEE80211_STA_AUTH);
+	sta_info_move_state(sta, IEEE80211_STA_ASSOC);
 
-	sta_apply_parameters(local, sta, params);
+	err = sta_apply_parameters(local, sta, params);
+	if (err) {
+		sta_info_free(local, sta);
+		return err;
+	}
 
-	rate_control_rate_init(sta);
+	/*
+	 * for TDLS, rate control should be initialized only when supported
+	 * rates are known.
+	 */
+	if (!test_sta_flag(sta, WLAN_STA_TDLS_PEER))
+		rate_control_rate_init(sta);
 
 	layer2_update = sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
 		sdata->vif.type == NL80211_IFTYPE_AP;
@@ -891,19 +1002,19 @@
 	struct sta_info *sta;
 	struct ieee80211_sub_if_data *vlansdata;
 
-	rcu_read_lock();
+	mutex_lock(&local->sta_mtx);
 
 	sta = sta_info_get_bss(sdata, mac);
 	if (!sta) {
-		rcu_read_unlock();
+		mutex_unlock(&local->sta_mtx);
 		return -ENOENT;
 	}
 
-	/* The TDLS bit cannot be toggled after the STA was added */
-	if ((params->sta_flags_mask & BIT(NL80211_STA_FLAG_TDLS_PEER)) &&
-	    !!(params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) !=
-	    !!test_sta_flag(sta, WLAN_STA_TDLS_PEER)) {
-		rcu_read_unlock();
+	/* in station mode, supported rates are only valid with TDLS */
+	if (sdata->vif.type == NL80211_IFTYPE_STATION &&
+	    params->supported_rates &&
+	    !test_sta_flag(sta, WLAN_STA_TDLS_PEER)) {
+		mutex_unlock(&local->sta_mtx);
 		return -EINVAL;
 	}
 
@@ -912,13 +1023,13 @@
 
 		if (vlansdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
 		    vlansdata->vif.type != NL80211_IFTYPE_AP) {
-			rcu_read_unlock();
+			mutex_unlock(&local->sta_mtx);
 			return -EINVAL;
 		}
 
 		if (params->vlan->ieee80211_ptr->use_4addr) {
 			if (vlansdata->u.vlan.sta) {
-				rcu_read_unlock();
+				mutex_unlock(&local->sta_mtx);
 				return -EBUSY;
 			}
 
@@ -931,7 +1042,10 @@
 
 	sta_apply_parameters(local, sta, params);
 
-	rcu_read_unlock();
+	if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) && params->supported_rates)
+		rate_control_rate_init(sta);
+
+	mutex_unlock(&local->sta_mtx);
 
 	if (sdata->vif.type == NL80211_IFTYPE_STATION &&
 	    params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED))
@@ -1123,6 +1237,8 @@
 {
 	u8 *new_ie;
 	const u8 *old_ie;
+	struct ieee80211_sub_if_data *sdata = container_of(ifmsh,
+					struct ieee80211_sub_if_data, u.mesh);
 
 	/* allocate information elements */
 	new_ie = NULL;
@@ -1149,6 +1265,10 @@
 	if (setup->is_secure)
 		ifmsh->security |= IEEE80211_MESH_SEC_SECURED;
 
+	/* mcast rate setting in Mesh Node */
+	memcpy(sdata->vif.bss_conf.mcast_rate, setup->mcast_rate,
+						sizeof(setup->mcast_rate));
+
 	return 0;
 }
 
@@ -1194,6 +1314,9 @@
 	if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL, mask))
 		conf->dot11MeshHWMPpreqMinInterval =
 			nconf->dot11MeshHWMPpreqMinInterval;
+	if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL, mask))
+		conf->dot11MeshHWMPperrMinInterval =
+			nconf->dot11MeshHWMPperrMinInterval;
 	if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME,
 			   mask))
 		conf->dot11MeshHWMPnetDiameterTraversalTime =
@@ -1394,7 +1517,7 @@
 	    (old_oper_type != local->_oper_channel_type))
 		ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
 
-	if ((sdata && sdata->vif.type != NL80211_IFTYPE_MONITOR) &&
+	if (sdata && sdata->vif.type != NL80211_IFTYPE_MONITOR &&
 	    old_vif_oper_type != sdata->vif.bss_conf.channel_type)
 		ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_HT);
 
@@ -1917,7 +2040,7 @@
 			     enum nl80211_channel_type channel_type,
 			     bool channel_type_valid, unsigned int wait,
 			     const u8 *buf, size_t len, bool no_cck,
-			     u64 *cookie)
+			     bool dont_wait_for_ack, u64 *cookie)
 {
 	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 	struct ieee80211_local *local = sdata->local;
@@ -1925,10 +2048,15 @@
 	struct sta_info *sta;
 	struct ieee80211_work *wk;
 	const struct ieee80211_mgmt *mgmt = (void *)buf;
-	u32 flags = IEEE80211_TX_INTFL_NL80211_FRAME_TX |
-		    IEEE80211_TX_CTL_REQ_TX_STATUS;
+	u32 flags;
 	bool is_offchan = false;
 
+	if (dont_wait_for_ack)
+		flags = IEEE80211_TX_CTL_NO_ACK;
+	else
+		flags = IEEE80211_TX_INTFL_NL80211_FRAME_TX |
+			IEEE80211_TX_CTL_REQ_TX_STATUS;
+
 	/* Check that we are on the requested channel for transmission */
 	if (chan != local->tmp_channel &&
 	    chan != local->oper_channel)
@@ -2488,6 +2616,82 @@
 	return 0;
 }
 
+static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev,
+				  const u8 *peer, u64 *cookie)
+{
+	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+	struct ieee80211_local *local = sdata->local;
+	struct ieee80211_qos_hdr *nullfunc;
+	struct sk_buff *skb;
+	int size = sizeof(*nullfunc);
+	__le16 fc;
+	bool qos;
+	struct ieee80211_tx_info *info;
+	struct sta_info *sta;
+
+	rcu_read_lock();
+	sta = sta_info_get(sdata, peer);
+	if (sta) {
+		qos = test_sta_flag(sta, WLAN_STA_WME);
+		rcu_read_unlock();
+	} else {
+		rcu_read_unlock();
+		return -ENOLINK;
+	}
+
+	if (qos) {
+		fc = cpu_to_le16(IEEE80211_FTYPE_DATA |
+				 IEEE80211_STYPE_QOS_NULLFUNC |
+				 IEEE80211_FCTL_FROMDS);
+	} else {
+		size -= 2;
+		fc = cpu_to_le16(IEEE80211_FTYPE_DATA |
+				 IEEE80211_STYPE_NULLFUNC |
+				 IEEE80211_FCTL_FROMDS);
+	}
+
+	skb = dev_alloc_skb(local->hw.extra_tx_headroom + size);
+	if (!skb)
+		return -ENOMEM;
+
+	skb->dev = dev;
+
+	skb_reserve(skb, local->hw.extra_tx_headroom);
+
+	nullfunc = (void *) skb_put(skb, size);
+	nullfunc->frame_control = fc;
+	nullfunc->duration_id = 0;
+	memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN);
+	memcpy(nullfunc->addr2, sdata->vif.addr, ETH_ALEN);
+	memcpy(nullfunc->addr3, sdata->vif.addr, ETH_ALEN);
+	nullfunc->seq_ctrl = 0;
+
+	info = IEEE80211_SKB_CB(skb);
+
+	info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS |
+		       IEEE80211_TX_INTFL_NL80211_FRAME_TX;
+
+	skb_set_queue_mapping(skb, IEEE80211_AC_VO);
+	skb->priority = 7;
+	if (qos)
+		nullfunc->qos_ctrl = cpu_to_le16(7);
+
+	local_bh_disable();
+	ieee80211_xmit(sdata, skb);
+	local_bh_enable();
+
+	*cookie = (unsigned long) skb;
+	return 0;
+}
+
+static struct ieee80211_channel *
+ieee80211_wiphy_get_channel(struct wiphy *wiphy)
+{
+	struct ieee80211_local *local = wiphy_priv(wiphy);
+
+	return local->oper_channel;
+}
+
 struct cfg80211_ops mac80211_config_ops = {
 	.add_virtual_intf = ieee80211_add_iface,
 	.del_virtual_intf = ieee80211_del_iface,
@@ -2553,4 +2757,7 @@
 	.set_rekey_data = ieee80211_set_rekey_data,
 	.tdls_oper = ieee80211_tdls_oper,
 	.tdls_mgmt = ieee80211_tdls_mgmt,
+	.probe_client = ieee80211_probe_client,
+	.get_channel = ieee80211_wiphy_get_channel,
+	.set_noack_map = ieee80211_set_noack_map,
 };
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 883996b..90baea5 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -97,40 +97,6 @@
 	.llseek = noop_llseek,
 };
 
-static ssize_t noack_read(struct file *file, char __user *user_buf,
-			  size_t count, loff_t *ppos)
-{
-	struct ieee80211_local *local = file->private_data;
-
-	return mac80211_format_buffer(user_buf, count, ppos, "%d\n",
-				      local->wifi_wme_noack_test);
-}
-
-static ssize_t noack_write(struct file *file,
-			   const char __user *user_buf,
-			   size_t count, loff_t *ppos)
-{
-	struct ieee80211_local *local = file->private_data;
-	char buf[10];
-	size_t len;
-
-	len = min(count, sizeof(buf) - 1);
-	if (copy_from_user(buf, user_buf, len))
-		return -EFAULT;
-	buf[len] = '\0';
-
-	local->wifi_wme_noack_test = !!simple_strtoul(buf, NULL, 0);
-
-	return count;
-}
-
-static const struct file_operations noack_ops = {
-	.read = noack_read,
-	.write = noack_write,
-	.open = mac80211_open_file_generic,
-	.llseek = default_llseek,
-};
-
 static ssize_t uapsd_queues_read(struct file *file, char __user *user_buf,
 				 size_t count, loff_t *ppos)
 {
@@ -190,7 +156,7 @@
 		return -EFAULT;
 	buf[len] = '\0';
 
-	ret = strict_strtoul(buf, 0, &val);
+	ret = kstrtoul(buf, 0, &val);
 
 	if (ret)
 		return -EINVAL;
@@ -398,7 +364,6 @@
 	DEBUGFS_ADD(wep_iv);
 	DEBUGFS_ADD(queues);
 	DEBUGFS_ADD_MODE(reset, 0200);
-	DEBUGFS_ADD(noack);
 	DEBUGFS_ADD(uapsd_queues);
 	DEBUGFS_ADD(uapsd_max_sp_len);
 	DEBUGFS_ADD(channel_type);
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 9352819..176c08f 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -321,6 +321,7 @@
 __IEEE80211_IF_FILE_W(tkip_mic_test);
 
 /* AP attributes */
+IEEE80211_IF_FILE(num_sta_authorized, u.ap.num_sta_authorized, ATOMIC);
 IEEE80211_IF_FILE(num_sta_ps, u.ap.num_sta_ps, ATOMIC);
 IEEE80211_IF_FILE(dtim_count, u.ap.dtim_count, DEC);
 
@@ -405,6 +406,8 @@
 		u.mesh.mshcfg.dot11MeshHWMPactivePathTimeout, DEC);
 IEEE80211_IF_FILE(dot11MeshHWMPpreqMinInterval,
 		u.mesh.mshcfg.dot11MeshHWMPpreqMinInterval, DEC);
+IEEE80211_IF_FILE(dot11MeshHWMPperrMinInterval,
+		u.mesh.mshcfg.dot11MeshHWMPperrMinInterval, DEC);
 IEEE80211_IF_FILE(dot11MeshHWMPnetDiameterTraversalTime,
 		u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime, DEC);
 IEEE80211_IF_FILE(dot11MeshHWMPmaxPREQretries,
@@ -456,6 +459,7 @@
 	DEBUGFS_ADD(rc_rateidx_mask_2ghz);
 	DEBUGFS_ADD(rc_rateidx_mask_5ghz);
 
+	DEBUGFS_ADD(num_sta_authorized);
 	DEBUGFS_ADD(num_sta_ps);
 	DEBUGFS_ADD(dtim_count);
 	DEBUGFS_ADD(num_buffered_multicast);
@@ -534,6 +538,7 @@
 	MESHPARAMS_ADD(dot11MeshMaxPeerLinks);
 	MESHPARAMS_ADD(dot11MeshHWMPactivePathTimeout);
 	MESHPARAMS_ADD(dot11MeshHWMPpreqMinInterval);
+	MESHPARAMS_ADD(dot11MeshHWMPperrMinInterval);
 	MESHPARAMS_ADD(dot11MeshHWMPnetDiameterTraversalTime);
 	MESHPARAMS_ADD(dot11MeshHWMPmaxPREQretries);
 	MESHPARAMS_ADD(path_refresh_time);
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 3110cbd..2406b3e 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -63,10 +63,10 @@
 	test_sta_flag(sta, WLAN_STA_##flg) ? #flg "\n" : ""
 
 	int res = scnprintf(buf, sizeof(buf),
-			    "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
+			    "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
 			    TEST(AUTH), TEST(ASSOC), TEST(PS_STA),
 			    TEST(PS_DRIVER), TEST(AUTHORIZED),
-			    TEST(SHORT_PREAMBLE), TEST(ASSOC_AP),
+			    TEST(SHORT_PREAMBLE),
 			    TEST(WME), TEST(WDS), TEST(CLEAR_PS_FILT),
 			    TEST(MFP), TEST(BLOCK_BA), TEST(PSPOLL),
 			    TEST(UAPSD), TEST(SP), TEST(TDLS_PEER),
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 5f165d7..e8960ae 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -5,11 +5,34 @@
 #include "ieee80211_i.h"
 #include "driver-trace.h"
 
+static inline void check_sdata_in_driver(struct ieee80211_sub_if_data *sdata)
+{
+	WARN_ON(!(sdata->flags & IEEE80211_SDATA_IN_DRIVER));
+}
+
+static inline struct ieee80211_sub_if_data *
+get_bss_sdata(struct ieee80211_sub_if_data *sdata)
+{
+	if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+		sdata = container_of(sdata->bss, struct ieee80211_sub_if_data,
+				     u.ap);
+
+	return sdata;
+}
+
 static inline void drv_tx(struct ieee80211_local *local, struct sk_buff *skb)
 {
 	local->ops->tx(&local->hw, skb);
 }
 
+static inline void drv_tx_frags(struct ieee80211_local *local,
+				struct ieee80211_vif *vif,
+				struct ieee80211_sta *sta,
+				struct sk_buff_head *skbs)
+{
+	local->ops->tx_frags(&local->hw, vif, sta, skbs);
+}
+
 static inline int drv_start(struct ieee80211_local *local)
 {
 	int ret;
@@ -69,15 +92,23 @@
 #endif
 
 static inline int drv_add_interface(struct ieee80211_local *local,
-				    struct ieee80211_vif *vif)
+				    struct ieee80211_sub_if_data *sdata)
 {
 	int ret;
 
 	might_sleep();
 
-	trace_drv_add_interface(local, vif_to_sdata(vif));
-	ret = local->ops->add_interface(&local->hw, vif);
+	if (WARN_ON(sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
+		    sdata->vif.type == NL80211_IFTYPE_MONITOR))
+		return -EINVAL;
+
+	trace_drv_add_interface(local, sdata);
+	ret = local->ops->add_interface(&local->hw, &sdata->vif);
 	trace_drv_return_int(local, ret);
+
+	if (ret == 0)
+		sdata->flags |= IEEE80211_SDATA_IN_DRIVER;
+
 	return ret;
 }
 
@@ -89,6 +120,8 @@
 
 	might_sleep();
 
+	check_sdata_in_driver(sdata);
+
 	trace_drv_change_interface(local, sdata, type, p2p);
 	ret = local->ops->change_interface(&local->hw, &sdata->vif, type, p2p);
 	trace_drv_return_int(local, ret);
@@ -96,12 +129,15 @@
 }
 
 static inline void drv_remove_interface(struct ieee80211_local *local,
-					struct ieee80211_vif *vif)
+					struct ieee80211_sub_if_data *sdata)
 {
 	might_sleep();
 
-	trace_drv_remove_interface(local, vif_to_sdata(vif));
-	local->ops->remove_interface(&local->hw, vif);
+	check_sdata_in_driver(sdata);
+
+	trace_drv_remove_interface(local, sdata);
+	local->ops->remove_interface(&local->hw, &sdata->vif);
+	sdata->flags &= ~IEEE80211_SDATA_IN_DRIVER;
 	trace_drv_return_void(local);
 }
 
@@ -124,6 +160,8 @@
 {
 	might_sleep();
 
+	check_sdata_in_driver(sdata);
+
 	trace_drv_bss_info_changed(local, sdata, info, changed);
 	if (local->ops->bss_info_changed)
 		local->ops->bss_info_changed(&local->hw, &sdata->vif, info, changed);
@@ -139,6 +177,8 @@
 
 	might_sleep();
 
+	check_sdata_in_driver(sdata);
+
 	trace_drv_tx_sync(local, sdata, bssid, type);
 	if (local->ops->tx_sync)
 		ret = local->ops->tx_sync(&local->hw, &sdata->vif,
@@ -154,6 +194,8 @@
 {
 	might_sleep();
 
+	check_sdata_in_driver(sdata);
+
 	trace_drv_finish_tx_sync(local, sdata, bssid, type);
 	if (local->ops->finish_tx_sync)
 		local->ops->finish_tx_sync(&local->hw, &sdata->vif,
@@ -211,6 +253,8 @@
 
 	might_sleep();
 
+	check_sdata_in_driver(sdata);
+
 	trace_drv_set_key(local, cmd, sdata, sta, key);
 	ret = local->ops->set_key(&local->hw, cmd, &sdata->vif, sta, key);
 	trace_drv_return_int(local, ret);
@@ -228,6 +272,8 @@
 	if (sta)
 		ista = &sta->sta;
 
+	check_sdata_in_driver(sdata);
+
 	trace_drv_update_tkip_key(local, sdata, conf, ista, iv32);
 	if (local->ops->update_tkip_key)
 		local->ops->update_tkip_key(&local->hw, &sdata->vif, conf,
@@ -243,6 +289,8 @@
 
 	might_sleep();
 
+	check_sdata_in_driver(sdata);
+
 	trace_drv_hw_scan(local, sdata);
 	ret = local->ops->hw_scan(&local->hw, &sdata->vif, req);
 	trace_drv_return_int(local, ret);
@@ -254,6 +302,8 @@
 {
 	might_sleep();
 
+	check_sdata_in_driver(sdata);
+
 	trace_drv_cancel_hw_scan(local, sdata);
 	local->ops->cancel_hw_scan(&local->hw, &sdata->vif);
 	trace_drv_return_void(local);
@@ -269,6 +319,8 @@
 
 	might_sleep();
 
+	check_sdata_in_driver(sdata);
+
 	trace_drv_sched_scan_start(local, sdata);
 	ret = local->ops->sched_scan_start(&local->hw, &sdata->vif,
 					      req, ies);
@@ -281,6 +333,8 @@
 {
 	might_sleep();
 
+	check_sdata_in_driver(sdata);
+
 	trace_drv_sched_scan_stop(local, sdata);
 	local->ops->sched_scan_stop(&local->hw, &sdata->vif);
 	trace_drv_return_void(local);
@@ -377,6 +431,9 @@
 				  enum sta_notify_cmd cmd,
 				  struct ieee80211_sta *sta)
 {
+	sdata = get_bss_sdata(sdata);
+	check_sdata_in_driver(sdata);
+
 	trace_drv_sta_notify(local, sdata, cmd, sta);
 	if (local->ops->sta_notify)
 		local->ops->sta_notify(&local->hw, &sdata->vif, cmd, sta);
@@ -391,6 +448,9 @@
 
 	might_sleep();
 
+	sdata = get_bss_sdata(sdata);
+	check_sdata_in_driver(sdata);
+
 	trace_drv_sta_add(local, sdata, sta);
 	if (local->ops->sta_add)
 		ret = local->ops->sta_add(&local->hw, &sdata->vif, sta);
@@ -406,6 +466,9 @@
 {
 	might_sleep();
 
+	sdata = get_bss_sdata(sdata);
+	check_sdata_in_driver(sdata);
+
 	trace_drv_sta_remove(local, sdata, sta);
 	if (local->ops->sta_remove)
 		local->ops->sta_remove(&local->hw, &sdata->vif, sta);
@@ -421,6 +484,8 @@
 
 	might_sleep();
 
+	check_sdata_in_driver(sdata);
+
 	trace_drv_conf_tx(local, sdata, queue, params);
 	if (local->ops->conf_tx)
 		ret = local->ops->conf_tx(&local->hw, &sdata->vif,
@@ -436,6 +501,8 @@
 
 	might_sleep();
 
+	check_sdata_in_driver(sdata);
+
 	trace_drv_get_tsf(local, sdata);
 	if (local->ops->get_tsf)
 		ret = local->ops->get_tsf(&local->hw, &sdata->vif);
@@ -449,6 +516,8 @@
 {
 	might_sleep();
 
+	check_sdata_in_driver(sdata);
+
 	trace_drv_set_tsf(local, sdata, tsf);
 	if (local->ops->set_tsf)
 		local->ops->set_tsf(&local->hw, &sdata->vif, tsf);
@@ -460,6 +529,8 @@
 {
 	might_sleep();
 
+	check_sdata_in_driver(sdata);
+
 	trace_drv_reset_tsf(local, sdata);
 	if (local->ops->reset_tsf)
 		local->ops->reset_tsf(&local->hw, &sdata->vif);
@@ -489,6 +560,9 @@
 
 	might_sleep();
 
+	sdata = get_bss_sdata(sdata);
+	check_sdata_in_driver(sdata);
+
 	trace_drv_ampdu_action(local, sdata, action, sta, tid, ssn, buf_size);
 
 	if (local->ops->ampdu_action)
@@ -644,6 +718,8 @@
 
 	might_sleep();
 
+	check_sdata_in_driver(sdata);
+
 	trace_drv_set_bitrate_mask(local, sdata, mask);
 	if (local->ops->set_bitrate_mask)
 		ret = local->ops->set_bitrate_mask(&local->hw,
@@ -657,6 +733,8 @@
 				      struct ieee80211_sub_if_data *sdata,
 				      struct cfg80211_gtk_rekey_data *data)
 {
+	check_sdata_in_driver(sdata);
+
 	trace_drv_set_rekey_data(local, sdata, data);
 	if (local->ops->set_rekey_data)
 		local->ops->set_rekey_data(&local->hw, &sdata->vif, data);
diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h
index 2af4fca..6e9df8f 100644
--- a/net/mac80211/driver-trace.h
+++ b/net/mac80211/driver-trace.h
@@ -5,17 +5,6 @@
 #include <net/mac80211.h>
 #include "ieee80211_i.h"
 
-#if !defined(CONFIG_MAC80211_DRIVER_API_TRACER) || defined(__CHECKER__)
-#undef TRACE_EVENT
-#define TRACE_EVENT(name, proto, ...) \
-static inline void trace_ ## name(proto) {}
-#undef DECLARE_EVENT_CLASS
-#define DECLARE_EVENT_CLASS(...)
-#undef DEFINE_EVENT
-#define DEFINE_EVENT(evt_class, name, proto, ...) \
-static inline void trace_ ## name(proto) {}
-#endif
-
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM mac80211
 
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index f0fb737..f25fff7 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -19,7 +19,84 @@
 #include "ieee80211_i.h"
 #include "rate.h"
 
-void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_supported_band *sband,
+bool ieee80111_cfg_override_disables_ht40(struct ieee80211_sub_if_data *sdata)
+{
+	const __le16 flg = cpu_to_le16(IEEE80211_HT_CAP_SUP_WIDTH_20_40);
+	if ((sdata->u.mgd.ht_capa_mask.cap_info & flg) &&
+	    !(sdata->u.mgd.ht_capa.cap_info & flg))
+		return true;
+	return false;
+}
+
+static void __check_htcap_disable(struct ieee80211_sub_if_data *sdata,
+				  struct ieee80211_sta_ht_cap *ht_cap,
+				  u16 flag)
+{
+	__le16 le_flag = cpu_to_le16(flag);
+	if (sdata->u.mgd.ht_capa_mask.cap_info & le_flag) {
+		if (!(sdata->u.mgd.ht_capa.cap_info & le_flag))
+			ht_cap->cap &= ~flag;
+	}
+}
+
+void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
+				     struct ieee80211_sta_ht_cap *ht_cap)
+{
+	u8 *scaps = (u8 *)(&sdata->u.mgd.ht_capa.mcs.rx_mask);
+	u8 *smask = (u8 *)(&sdata->u.mgd.ht_capa_mask.mcs.rx_mask);
+	int i;
+
+	if (sdata->vif.type != NL80211_IFTYPE_STATION) {
+		/* AP interfaces call this code when adding new stations,
+		 * so just silently ignore non station interfaces.
+		 */
+		return;
+	}
+
+	/* NOTE:  If you add more over-rides here, update register_hw
+	 * ht_capa_mod_msk logic in main.c as well.
+	 * And, if this method can ever change ht_cap.ht_supported, fix
+	 * the check in ieee80211_add_ht_ie.
+	 */
+
+	/* check for HT over-rides, MCS rates first. */
+	for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++) {
+		u8 m = smask[i];
+		ht_cap->mcs.rx_mask[i] &= ~m; /* turn off all masked bits */
+		/* Add back rates that are supported */
+		ht_cap->mcs.rx_mask[i] |= (m & scaps[i]);
+	}
+
+	/* Force removal of HT-40 capabilities? */
+	__check_htcap_disable(sdata, ht_cap, IEEE80211_HT_CAP_SUP_WIDTH_20_40);
+	__check_htcap_disable(sdata, ht_cap, IEEE80211_HT_CAP_SGI_40);
+
+	/* Allow user to disable the max-AMSDU bit. */
+	__check_htcap_disable(sdata, ht_cap, IEEE80211_HT_CAP_MAX_AMSDU);
+
+	/* Allow user to decrease AMPDU factor */
+	if (sdata->u.mgd.ht_capa_mask.ampdu_params_info &
+	    IEEE80211_HT_AMPDU_PARM_FACTOR) {
+		u8 n = sdata->u.mgd.ht_capa.ampdu_params_info
+			& IEEE80211_HT_AMPDU_PARM_FACTOR;
+		if (n < ht_cap->ampdu_factor)
+			ht_cap->ampdu_factor = n;
+	}
+
+	/* Allow the user to increase AMPDU density. */
+	if (sdata->u.mgd.ht_capa_mask.ampdu_params_info &
+	    IEEE80211_HT_AMPDU_PARM_DENSITY) {
+		u8 n = (sdata->u.mgd.ht_capa.ampdu_params_info &
+			IEEE80211_HT_AMPDU_PARM_DENSITY)
+			>> IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT;
+		if (n > ht_cap->ampdu_density)
+			ht_cap->ampdu_density = n;
+	}
+}
+
+
+void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
+				       struct ieee80211_supported_band *sband,
 				       struct ieee80211_ht_cap *ht_cap_ie,
 				       struct ieee80211_sta_ht_cap *ht_cap)
 {
@@ -103,6 +180,12 @@
 	/* handle MCS rate 32 too */
 	if (sband->ht_cap.mcs.rx_mask[32/8] & ht_cap_ie->mcs.rx_mask[32/8] & 1)
 		ht_cap->mcs.rx_mask[32/8] |= 1;
+
+	/*
+	 * If user has specified capability over-rides, take care
+	 * of that here.
+	 */
+	ieee80211_apply_htcap_overrides(sdata, ht_cap);
 }
 
 void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta, bool tx)
@@ -196,10 +279,13 @@
 	memcpy(mgmt->da, da, ETH_ALEN);
 	memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
 	if (sdata->vif.type == NL80211_IFTYPE_AP ||
-	    sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+	    sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
+	    sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
 		memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
 	else if (sdata->vif.type == NL80211_IFTYPE_STATION)
 		memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN);
+	else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
+		memcpy(mgmt->bssid, sdata->u.ibss.bssid, ETH_ALEN);
 
 	mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
 					  IEEE80211_STYPE_ACTION);
@@ -214,7 +300,7 @@
 	mgmt->u.action.u.delba.params = cpu_to_le16(params);
 	mgmt->u.action.u.delba.reason_code = cpu_to_le16(reason_code);
 
-	ieee80211_tx_skb(sdata, skb);
+	ieee80211_tx_skb_tid(sdata, skb, tid);
 }
 
 void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index ede9a8b..f8a32bf 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -77,6 +77,7 @@
 	struct cfg80211_bss *bss;
 	u32 bss_change;
 	u8 supp_rates[IEEE80211_MAX_SUPP_RATES];
+	enum nl80211_channel_type channel_type;
 
 	lockdep_assert_held(&ifibss->mtx);
 
@@ -97,6 +98,7 @@
 	/* if merging, indicate to driver that we leave the old IBSS */
 	if (sdata->vif.bss_conf.ibss_joined) {
 		sdata->vif.bss_conf.ibss_joined = false;
+		netif_carrier_off(sdata->dev);
 		ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_IBSS);
 	}
 
@@ -104,8 +106,16 @@
 
 	sdata->drop_unencrypted = capability & WLAN_CAPABILITY_PRIVACY ? 1 : 0;
 
-	local->oper_channel = chan;
-	WARN_ON(!ieee80211_set_channel_type(local, sdata, NL80211_CHAN_NO_HT));
+	channel_type = ifibss->channel_type;
+	if (channel_type > NL80211_CHAN_HT20 &&
+	    !cfg80211_can_beacon_sec_chan(local->hw.wiphy, chan, channel_type))
+		channel_type = NL80211_CHAN_HT20;
+	if (!ieee80211_set_channel_type(local, sdata, channel_type)) {
+		/* can only fail due to HT40+/- mismatch */
+		channel_type = NL80211_CHAN_HT20;
+		WARN_ON(!ieee80211_set_channel_type(local, sdata,
+						    NL80211_CHAN_HT20));
+	}
 	ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
 
 	sband = local->hw.wiphy->bands[chan->band];
@@ -171,6 +181,19 @@
 		memcpy(skb_put(skb, ifibss->ie_len),
 		       ifibss->ie, ifibss->ie_len);
 
+	/* add HT capability and information IEs */
+	if (channel_type && sband->ht_cap.ht_supported) {
+		pos = skb_put(skb, 4 +
+				   sizeof(struct ieee80211_ht_cap) +
+				   sizeof(struct ieee80211_ht_info));
+		pos = ieee80211_ie_build_ht_cap(pos, &sband->ht_cap,
+						sband->ht_cap.cap);
+		pos = ieee80211_ie_build_ht_info(pos,
+						 &sband->ht_cap,
+						 chan,
+						 channel_type);
+	}
+
 	if (local->hw.queues >= 4) {
 		pos = skb_put(skb, 9);
 		*pos++ = WLAN_EID_VENDOR_SPECIFIC;
@@ -194,6 +217,7 @@
 	bss_change |= BSS_CHANGED_BEACON;
 	bss_change |= BSS_CHANGED_BEACON_ENABLED;
 	bss_change |= BSS_CHANGED_BASIC_RATES;
+	bss_change |= BSS_CHANGED_HT;
 	bss_change |= BSS_CHANGED_IBSS;
 	sdata->vif.bss_conf.ibss_joined = true;
 	ieee80211_bss_info_change_notify(sdata, bss_change);
@@ -207,6 +231,7 @@
 	bss = cfg80211_inform_bss_frame(local->hw.wiphy, local->hw.conf.channel,
 					mgmt, skb->len, 0, GFP_KERNEL);
 	cfg80211_put_bss(bss);
+	netif_carrier_on(sdata->dev);
 	cfg80211_ibss_joined(sdata->dev, ifibss->bssid, GFP_KERNEL);
 }
 
@@ -250,6 +275,80 @@
 				  cbss->tsf);
 }
 
+static struct sta_info *ieee80211_ibss_finish_sta(struct sta_info *sta)
+	__acquires(RCU)
+{
+	struct ieee80211_sub_if_data *sdata = sta->sdata;
+	u8 addr[ETH_ALEN];
+
+	memcpy(addr, sta->sta.addr, ETH_ALEN);
+
+#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
+	wiphy_debug(sdata->local->hw.wiphy,
+		    "Adding new IBSS station %pM (dev=%s)\n",
+		    addr, sdata->name);
+#endif
+
+	sta_info_move_state(sta, IEEE80211_STA_AUTH);
+	sta_info_move_state(sta, IEEE80211_STA_ASSOC);
+	sta_info_move_state(sta, IEEE80211_STA_AUTHORIZED);
+
+	rate_control_rate_init(sta);
+
+	/* If it fails, maybe we raced another insertion? */
+	if (sta_info_insert_rcu(sta))
+		return sta_info_get(sdata, addr);
+	return sta;
+}
+
+static struct sta_info *
+ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
+		       const u8 *bssid, const u8 *addr,
+		       u32 supp_rates)
+	__acquires(RCU)
+{
+	struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
+	struct ieee80211_local *local = sdata->local;
+	struct sta_info *sta;
+	int band = local->hw.conf.channel->band;
+
+	/*
+	 * XXX: Consider removing the least recently used entry and
+	 * 	allow new one to be added.
+	 */
+	if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) {
+		if (net_ratelimit())
+			printk(KERN_DEBUG "%s: No room for a new IBSS STA entry %pM\n",
+			       sdata->name, addr);
+		rcu_read_lock();
+		return NULL;
+	}
+
+	if (ifibss->state == IEEE80211_IBSS_MLME_SEARCH) {
+		rcu_read_lock();
+		return NULL;
+	}
+
+	if (compare_ether_addr(bssid, sdata->u.ibss.bssid)) {
+		rcu_read_lock();
+		return NULL;
+	}
+
+	sta = sta_info_alloc(sdata, addr, GFP_KERNEL);
+	if (!sta) {
+		rcu_read_lock();
+		return NULL;
+	}
+
+	sta->last_rx = jiffies;
+
+	/* make sure mandatory rates are always added */
+	sta->sta.supp_rates[band] = supp_rates |
+			ieee80211_mandatory_rates(local, band);
+
+	return ieee80211_ibss_finish_sta(sta);
+}
+
 static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
 				  struct ieee80211_mgmt *mgmt,
 				  size_t len,
@@ -266,6 +365,8 @@
 	u64 beacon_timestamp, rx_timestamp;
 	u32 supp_rates = 0;
 	enum ieee80211_band band = rx_status->band;
+	struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band];
+	bool rates_updated = false;
 
 	if (elems->ds_params && elems->ds_params_len == 1)
 		freq = ieee80211_channel_to_frequency(elems->ds_params[0],
@@ -305,17 +406,51 @@
 						prev_rates,
 						sta->sta.supp_rates[band]);
 #endif
-					rate_control_rate_init(sta);
+					rates_updated = true;
 				}
-			} else
+			} else {
+				rcu_read_unlock();
 				sta = ieee80211_ibss_add_sta(sdata, mgmt->bssid,
-						mgmt->sa, supp_rates,
-						GFP_ATOMIC);
+						mgmt->sa, supp_rates);
+			}
 		}
 
 		if (sta && elems->wmm_info)
 			set_sta_flag(sta, WLAN_STA_WME);
 
+		if (sta && elems->ht_info_elem && elems->ht_cap_elem &&
+		    sdata->u.ibss.channel_type != NL80211_CHAN_NO_HT) {
+			/* we both use HT */
+			struct ieee80211_sta_ht_cap sta_ht_cap_new;
+			enum nl80211_channel_type channel_type =
+				ieee80211_ht_info_to_channel_type(
+							elems->ht_info_elem);
+
+			ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
+							  elems->ht_cap_elem,
+							  &sta_ht_cap_new);
+
+			/*
+			 * fall back to HT20 if we don't use or use
+			 * the other extension channel
+			 */
+			if ((channel_type == NL80211_CHAN_HT40MINUS ||
+			     channel_type == NL80211_CHAN_HT40PLUS) &&
+			    channel_type != sdata->u.ibss.channel_type)
+				sta_ht_cap_new.cap &=
+					~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+
+			if (memcmp(&sta->sta.ht_cap, &sta_ht_cap_new,
+				   sizeof(sta_ht_cap_new))) {
+				memcpy(&sta->sta.ht_cap, &sta_ht_cap_new,
+				       sizeof(sta_ht_cap_new));
+				rates_updated = true;
+			}
+		}
+
+		if (sta && rates_updated)
+			rate_control_rate_init(sta);
+
 		rcu_read_unlock();
 	}
 
@@ -404,21 +539,17 @@
 		ieee80211_sta_join_ibss(sdata, bss);
 		supp_rates = ieee80211_sta_get_rates(local, elems, band);
 		ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa,
-				       supp_rates, GFP_KERNEL);
+				       supp_rates);
+		rcu_read_unlock();
 	}
 
  put_bss:
 	ieee80211_rx_bss_put(local, bss);
 }
 
-/*
- * Add a new IBSS station, will also be called by the RX code when,
- * in IBSS mode, receiving a frame from a yet-unknown station, hence
- * must be callable in atomic context.
- */
-struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
-					u8 *bssid, u8 *addr, u32 supp_rates,
-					gfp_t gfp)
+void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata,
+			      const u8 *bssid, const u8 *addr,
+			      u32 supp_rates)
 {
 	struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
 	struct ieee80211_local *local = sdata->local;
@@ -433,37 +564,29 @@
 		if (net_ratelimit())
 			printk(KERN_DEBUG "%s: No room for a new IBSS STA entry %pM\n",
 			       sdata->name, addr);
-		return NULL;
+		return;
 	}
 
 	if (ifibss->state == IEEE80211_IBSS_MLME_SEARCH)
-		return NULL;
+		return;
 
 	if (compare_ether_addr(bssid, sdata->u.ibss.bssid))
-		return NULL;
+		return;
 
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
-	wiphy_debug(local->hw.wiphy, "Adding new IBSS station %pM (dev=%s)\n",
-		    addr, sdata->name);
-#endif
-
-	sta = sta_info_alloc(sdata, addr, gfp);
+	sta = sta_info_alloc(sdata, addr, GFP_ATOMIC);
 	if (!sta)
-		return NULL;
+		return;
 
 	sta->last_rx = jiffies;
-	set_sta_flag(sta, WLAN_STA_AUTHORIZED);
 
 	/* make sure mandatory rates are always added */
 	sta->sta.supp_rates[band] = supp_rates |
 			ieee80211_mandatory_rates(local, band);
 
-	rate_control_rate_init(sta);
-
-	/* If it fails, maybe we raced another insertion? */
-	if (sta_info_insert(sta))
-		return sta_info_get(sdata, addr);
-	return sta;
+	spin_lock(&ifibss->incomplete_lock);
+	list_add(&sta->list, &ifibss->incomplete_stations);
+	spin_unlock(&ifibss->incomplete_lock);
+	ieee80211_queue_work(&local->hw, &sdata->work);
 }
 
 static int ieee80211_sta_active_ibss(struct ieee80211_sub_if_data *sdata)
@@ -802,6 +925,7 @@
 void ieee80211_ibss_work(struct ieee80211_sub_if_data *sdata)
 {
 	struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
+	struct sta_info *sta;
 
 	mutex_lock(&ifibss->mtx);
 
@@ -813,6 +937,19 @@
 	if (!ifibss->ssid_len)
 		goto out;
 
+	spin_lock_bh(&ifibss->incomplete_lock);
+	while (!list_empty(&ifibss->incomplete_stations)) {
+		sta = list_first_entry(&ifibss->incomplete_stations,
+				       struct sta_info, list);
+		list_del(&sta->list);
+		spin_unlock_bh(&ifibss->incomplete_lock);
+
+		ieee80211_ibss_finish_sta(sta);
+		rcu_read_unlock();
+		spin_lock_bh(&ifibss->incomplete_lock);
+	}
+	spin_unlock_bh(&ifibss->incomplete_lock);
+
 	switch (ifibss->state) {
 	case IEEE80211_IBSS_MLME_SEARCH:
 		ieee80211_sta_find_ibss(sdata);
@@ -871,6 +1008,8 @@
 	setup_timer(&ifibss->timer, ieee80211_ibss_timer,
 		    (unsigned long) sdata);
 	mutex_init(&ifibss->mtx);
+	INIT_LIST_HEAD(&ifibss->incomplete_stations);
+	spin_lock_init(&ifibss->incomplete_lock);
 }
 
 /* scan finished notification */
@@ -894,12 +1033,18 @@
 			struct cfg80211_ibss_params *params)
 {
 	struct sk_buff *skb;
+	u32 changed = 0;
 
 	skb = dev_alloc_skb(sdata->local->hw.extra_tx_headroom +
-			    36 /* bitrates */ +
-			    34 /* SSID */ +
-			    3  /* DS params */ +
-			    4  /* IBSS params */ +
+			    sizeof(struct ieee80211_hdr_3addr) +
+			    12 /* struct ieee80211_mgmt.u.beacon */ +
+			    2 + IEEE80211_MAX_SSID_LEN /* max SSID */ +
+			    2 + 8 /* max Supported Rates */ +
+			    3 /* max DS params */ +
+			    4 /* IBSS params */ +
+			    2 + (IEEE80211_MAX_SUPP_RATES - 8) +
+			    2 + sizeof(struct ieee80211_ht_cap) +
+			    2 + sizeof(struct ieee80211_ht_info) +
 			    params->ie_len);
 	if (!skb)
 		return -ENOMEM;
@@ -920,13 +1065,18 @@
 	sdata->vif.bss_conf.beacon_int = params->beacon_interval;
 
 	sdata->u.ibss.channel = params->channel;
+	sdata->u.ibss.channel_type = params->channel_type;
 	sdata->u.ibss.fixed_channel = params->channel_fixed;
 
 	/* fix ourselves to that channel now already */
 	if (params->channel_fixed) {
 		sdata->local->oper_channel = params->channel;
-		WARN_ON(!ieee80211_set_channel_type(sdata->local, sdata,
-						    NL80211_CHAN_NO_HT));
+		if (!ieee80211_set_channel_type(sdata->local, sdata,
+					       params->channel_type)) {
+			mutex_unlock(&sdata->u.ibss.mtx);
+			kfree_skb(skb);
+			return -EINVAL;
+		}
 	}
 
 	if (params->ie) {
@@ -949,6 +1099,23 @@
 	ieee80211_recalc_idle(sdata->local);
 	mutex_unlock(&sdata->local->mtx);
 
+	/*
+	 * 802.11n-2009 9.13.3.1: In an IBSS, the HT Protection field is
+	 * reserved, but an HT STA shall protect HT transmissions as though
+	 * the HT Protection field were set to non-HT mixed mode.
+	 *
+	 * In an IBSS, the RIFS Mode field of the HT Operation element is
+	 * also reserved, but an HT STA shall operate as though this field
+	 * were set to 1.
+	 */
+
+	sdata->vif.bss_conf.ht_operation_mode |=
+		  IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED
+		| IEEE80211_HT_PARAM_RIFS_MODE;
+
+	changed |= BSS_CHANGED_HT;
+	ieee80211_bss_info_change_notify(sdata, changed);
+
 	ieee80211_queue_work(&sdata->local->hw, &sdata->work);
 
 	return 0;
@@ -962,6 +1129,7 @@
 	struct cfg80211_bss *cbss;
 	u16 capability;
 	int active_ibss;
+	struct sta_info *sta;
 
 	mutex_lock(&sdata->u.ibss.mtx);
 
@@ -991,6 +1159,20 @@
 
 	sta_info_flush(sdata->local, sdata);
 
+	spin_lock_bh(&ifibss->incomplete_lock);
+	while (!list_empty(&ifibss->incomplete_stations)) {
+		sta = list_first_entry(&ifibss->incomplete_stations,
+				       struct sta_info, list);
+		list_del(&sta->list);
+		spin_unlock_bh(&ifibss->incomplete_lock);
+
+		sta_info_free(local, sta);
+		spin_lock_bh(&ifibss->incomplete_lock);
+	}
+	spin_unlock_bh(&ifibss->incomplete_lock);
+
+	netif_carrier_off(sdata->dev);
+
 	/* remove beacon */
 	kfree(sdata->u.ibss.ie);
 	skb = rcu_dereference_protected(sdata->u.ibss.presp,
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index ea10a51..2f0642d 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -24,6 +24,7 @@
 #include <linux/spinlock.h>
 #include <linux/etherdevice.h>
 #include <linux/leds.h>
+#include <linux/idr.h>
 #include <net/ieee80211_radiotap.h>
 #include <net/cfg80211.h>
 #include <net/mac80211.h>
@@ -141,6 +142,7 @@
 
 struct ieee80211_tx_data {
 	struct sk_buff *skb;
+	struct sk_buff_head skbs;
 	struct ieee80211_local *local;
 	struct ieee80211_sub_if_data *sdata;
 	struct sta_info *sta;
@@ -184,12 +186,15 @@
  * enum ieee80211_rx_flags - RX data flags
  *
  * @IEEE80211_RX_CMNTR: received on cooked monitor already
+ * @IEEE80211_RX_BEACON_REPORTED: This frame was already reported
+ *	to cfg80211_report_obss_beacon().
  *
  * These flags are used across handling multiple interfaces
  * for a single frame.
  */
 enum ieee80211_rx_flags {
 	IEEE80211_RX_CMNTR		= BIT(0),
+	IEEE80211_RX_BEACON_REPORTED	= BIT(1),
 };
 
 struct ieee80211_rx_data {
@@ -228,6 +233,7 @@
 
 struct ieee80211_if_ap {
 	struct beacon_data __rcu *beacon;
+	struct sk_buff __rcu *probe_resp;
 
 	struct list_head vlans;
 
@@ -237,6 +243,7 @@
 	u8 tim[sizeof(unsigned long) * BITS_TO_LONGS(IEEE80211_MAX_AID + 1)];
 	struct sk_buff_head ps_bc_buf;
 	atomic_t num_sta_ps; /* number of stations in PS mode */
+	atomic_t num_sta_authorized; /* number of authorized stations */
 	int dtim_count;
 	bool dtim_bc_mc;
 };
@@ -443,6 +450,9 @@
 	 */
 	int rssi_min_thold, rssi_max_thold;
 	int last_ave_beacon_signal;
+
+	struct ieee80211_ht_cap ht_capa; /* configured ht-cap over-rides */
+	struct ieee80211_ht_cap ht_capa_mask; /* Valid parts of ht_capa */
 };
 
 struct ieee80211_if_ibss {
@@ -465,12 +475,16 @@
 	u8 ssid_len, ie_len;
 	u8 *ie;
 	struct ieee80211_channel *channel;
+	enum nl80211_channel_type channel_type;
 
 	unsigned long ibss_join_req;
 	/* probe response/beacon for IBSS */
 	struct sk_buff __rcu *presp;
 	struct sk_buff *skb;
 
+	spinlock_t incomplete_lock;
+	struct list_head incomplete_stations;
+
 	enum {
 		IEEE80211_IBSS_MLME_SEARCH,
 		IEEE80211_IBSS_MLME_JOINED,
@@ -505,7 +519,9 @@
 	atomic_t mpaths;
 	/* Timestamp of last SN update */
 	unsigned long last_sn_update;
-	/* Timestamp of last SN sent */
+	/* Time when it's ok to send next PERR */
+	unsigned long next_perr;
+	/* Timestamp of last PREQ sent */
 	unsigned long last_preq;
 	struct mesh_rmc *rmc;
 	spinlock_t mesh_preq_queue_lock;
@@ -543,6 +559,7 @@
  *	associated stations and deliver multicast frames both
  *	back to wireless media and to the local net stack.
  * @IEEE80211_SDATA_DISCONNECT_RESUME: Disconnect after resume.
+ * @IEEE80211_SDATA_IN_DRIVER: indicates interface was added to driver
  */
 enum ieee80211_sub_if_data_flags {
 	IEEE80211_SDATA_ALLMULTI		= BIT(0),
@@ -550,6 +567,7 @@
 	IEEE80211_SDATA_OPERATING_GMODE		= BIT(2),
 	IEEE80211_SDATA_DONT_BRIDGE_PACKETS	= BIT(3),
 	IEEE80211_SDATA_DISCONNECT_RESUME	= BIT(4),
+	IEEE80211_SDATA_IN_DRIVER		= BIT(5),
 };
 
 /**
@@ -600,6 +618,9 @@
 	struct ieee80211_fragment_entry	fragments[IEEE80211_FRAGMENT_MAX];
 	unsigned int fragment_next;
 
+	/* TID bitmap for NoAck policy */
+	u16 noack_map;
+
 	struct ieee80211_key __rcu *keys[NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS];
 	struct ieee80211_key __rcu *default_unicast_key;
 	struct ieee80211_key __rcu *default_multicast_key;
@@ -722,17 +743,16 @@
  *	operating channel
  * @SCAN_SET_CHANNEL: Set the next channel to be scanned
  * @SCAN_SEND_PROBE: Send probe requests and wait for probe responses
- * @SCAN_LEAVE_OPER_CHANNEL: Leave the operating channel, notify the AP
- *	about us leaving the channel and stop all associated STA interfaces
- * @SCAN_ENTER_OPER_CHANNEL: Enter the operating channel again, notify the
- *	AP about us being back and restart all associated STA interfaces
+ * @SCAN_SUSPEND: Suspend the scan and go back to operating channel to
+ *	send out data
+ * @SCAN_RESUME: Resume the scan and scan the next channel
  */
 enum mac80211_scan_state {
 	SCAN_DECISION,
 	SCAN_SET_CHANNEL,
 	SCAN_SEND_PROBE,
-	SCAN_LEAVE_OPER_CHANNEL,
-	SCAN_ENTER_OPER_CHANNEL,
+	SCAN_SUSPEND,
+	SCAN_RESUME,
 };
 
 struct ieee80211_local {
@@ -835,18 +855,15 @@
 
 	/* Station data */
 	/*
-	 * The mutex only protects the list and counter,
-	 * reads are done in RCU.
-	 * Additionally, the lock protects the hash table,
-	 * the pending list and each BSS's TIM bitmap.
+	 * The mutex only protects the list, hash table and
+	 * counter, reads are done with RCU.
 	 */
 	struct mutex sta_mtx;
-	spinlock_t sta_lock;
+	spinlock_t tim_lock;
 	unsigned long num_sta;
-	struct list_head sta_list, sta_pending_list;
+	struct list_head sta_list;
 	struct sta_info __rcu *sta_hash[STA_HASH_SIZE];
 	struct timer_list sta_cleanup;
-	struct work_struct sta_finish_work;
 	int sta_generation;
 
 	struct sk_buff_head pending[IEEE80211_MAX_QUEUES];
@@ -951,7 +968,6 @@
 	int total_ps_buffered; /* total number of all buffered unicast and
 				* multicast packets for power saving stations
 				*/
-	int wifi_wme_noack_test;
 	unsigned int wmm_acm; /* bit field of ACM bits (BIT(802.1D tag)) */
 
 	/*
@@ -1012,6 +1028,9 @@
 	u32 hw_roc_cookie;
 	bool hw_roc_for_tx;
 
+	struct idr ack_status_frames;
+	spinlock_t ack_status_lock;
+
 	/* dummy netdev for use w/ NAPI */
 	struct net_device napi_dev;
 
@@ -1030,6 +1049,69 @@
 	u16 tid;
 };
 
+/* Parsed Information Elements */
+struct ieee802_11_elems {
+	u8 *ie_start;
+	size_t total_len;
+
+	/* pointers to IEs */
+	u8 *ssid;
+	u8 *supp_rates;
+	u8 *fh_params;
+	u8 *ds_params;
+	u8 *cf_params;
+	struct ieee80211_tim_ie *tim;
+	u8 *ibss_params;
+	u8 *challenge;
+	u8 *wpa;
+	u8 *rsn;
+	u8 *erp_info;
+	u8 *ext_supp_rates;
+	u8 *wmm_info;
+	u8 *wmm_param;
+	struct ieee80211_ht_cap *ht_cap_elem;
+	struct ieee80211_ht_info *ht_info_elem;
+	struct ieee80211_meshconf_ie *mesh_config;
+	u8 *mesh_id;
+	u8 *peering;
+	u8 *preq;
+	u8 *prep;
+	u8 *perr;
+	struct ieee80211_rann_ie *rann;
+	u8 *ch_switch_elem;
+	u8 *country_elem;
+	u8 *pwr_constr_elem;
+	u8 *quiet_elem;	/* first quite element */
+	u8 *timeout_int;
+
+	/* length of them, respectively */
+	u8 ssid_len;
+	u8 supp_rates_len;
+	u8 fh_params_len;
+	u8 ds_params_len;
+	u8 cf_params_len;
+	u8 tim_len;
+	u8 ibss_params_len;
+	u8 challenge_len;
+	u8 wpa_len;
+	u8 rsn_len;
+	u8 erp_info_len;
+	u8 ext_supp_rates_len;
+	u8 wmm_info_len;
+	u8 wmm_param_len;
+	u8 mesh_id_len;
+	u8 peering_len;
+	u8 preq_len;
+	u8 prep_len;
+	u8 perr_len;
+	u8 ch_switch_elem_len;
+	u8 country_elem_len;
+	u8 pwr_constr_elem_len;
+	u8 quiet_elem_len;
+	u8 num_of_quiet_elem;	/* can be more the one */
+	u8 timeout_int_len;
+};
+
 static inline struct ieee80211_local *hw_to_local(
 	struct ieee80211_hw *hw)
 {
@@ -1090,9 +1172,8 @@
 /* IBSS code */
 void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local);
 void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata);
-struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
-					u8 *bssid, u8 *addr, u32 supp_rates,
-					gfp_t gfp);
+void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata,
+			      const u8 *bssid, const u8 *addr, u32 supp_rates);
 int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
 			struct cfg80211_ibss_params *params);
 int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata);
@@ -1140,13 +1221,9 @@
 void ieee80211_sched_scan_stopped_work(struct work_struct *work);
 
 /* off-channel helpers */
-bool ieee80211_cfg_on_oper_channel(struct ieee80211_local *local);
-void ieee80211_offchannel_enable_all_ps(struct ieee80211_local *local,
-					bool tell_ap);
 void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local,
 				    bool offchannel_ps_enable);
 void ieee80211_offchannel_return(struct ieee80211_local *local,
-				 bool enable_beaconing,
 				 bool offchannel_ps_disable);
 void ieee80211_hw_roc_setup(struct ieee80211_local *local);
 
@@ -1179,7 +1256,11 @@
 				       struct net_device *dev);
 
 /* HT */
-void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_supported_band *sband,
+bool ieee80111_cfg_override_disables_ht40(struct ieee80211_sub_if_data *sdata);
+void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
+				     struct ieee80211_sta_ht_cap *ht_cap);
+void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
+				       struct ieee80211_supported_band *sband,
 				       struct ieee80211_ht_cap *ht_cap_ie,
 				       struct ieee80211_sta_ht_cap *ht_cap);
 void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
@@ -1266,7 +1347,16 @@
 				     gfp_t gfp);
 void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata);
 void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb);
-void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb);
+
+void ieee80211_tx_skb_tid(struct ieee80211_sub_if_data *sdata,
+			  struct sk_buff *skb, int tid);
+static void inline ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata,
+				    struct sk_buff *skb)
+{
+	/* Send all internal mgmt frames on VO. Accordingly set TID to 7. */
+	ieee80211_tx_skb_tid(sdata, skb, 7);
+}
+
 void ieee802_11_parse_elems(u8 *start, size_t len,
 			    struct ieee802_11_elems *elems);
 u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
@@ -1334,6 +1424,12 @@
 size_t ieee80211_ie_split(const u8 *ies, size_t ielen,
 			  const u8 *ids, int n_ids, size_t offset);
 size_t ieee80211_ie_split_vendor(const u8 *ies, size_t ielen, size_t offset);
+u8 *ieee80211_ie_build_ht_cap(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
+			      u16 cap);
+u8 *ieee80211_ie_build_ht_info(u8 *pos,
+				struct ieee80211_sta_ht_cap *ht_cap,
+				struct ieee80211_channel *channel,
+				enum nl80211_channel_type channel_type);
 
 /* internal work items */
 void ieee80211_work_init(struct ieee80211_local *local);
@@ -1362,6 +1458,8 @@
 bool ieee80211_set_channel_type(struct ieee80211_local *local,
 				struct ieee80211_sub_if_data *sdata,
 				enum nl80211_channel_type chantype);
+enum nl80211_channel_type
+ieee80211_ht_info_to_channel_type(struct ieee80211_ht_info *ht_info);
 
 #ifdef CONFIG_MAC80211_NOINLINE
 #define debug_noinline noinline
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 30d7355..e47768c 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -188,11 +188,22 @@
 		if (!is_valid_ether_addr(sdata->u.wds.remote_addr))
 			return -ENOLINK;
 		break;
-	case NL80211_IFTYPE_AP_VLAN:
+	case NL80211_IFTYPE_AP_VLAN: {
+		struct ieee80211_sub_if_data *master;
+
 		if (!sdata->bss)
 			return -ENOLINK;
+
 		list_add(&sdata->u.vlan.list, &sdata->bss->vlans);
+
+		master = container_of(sdata->bss,
+				      struct ieee80211_sub_if_data, u.ap);
+		sdata->control_port_protocol =
+			master->control_port_protocol;
+		sdata->control_port_no_encrypt =
+			master->control_port_no_encrypt;
 		break;
+		}
 	case NL80211_IFTYPE_AP:
 		sdata->bss = &sdata->u.ap;
 		break;
@@ -265,7 +276,7 @@
 		break;
 	default:
 		if (coming_up) {
-			res = drv_add_interface(local, &sdata->vif);
+			res = drv_add_interface(local, sdata);
 			if (res)
 				goto err_stop;
 		}
@@ -282,10 +293,18 @@
 		changed |= ieee80211_reset_erp_info(sdata);
 		ieee80211_bss_info_change_notify(sdata, changed);
 
-		if (sdata->vif.type == NL80211_IFTYPE_STATION)
+		if (sdata->vif.type == NL80211_IFTYPE_STATION ||
+		    sdata->vif.type == NL80211_IFTYPE_ADHOC)
 			netif_carrier_off(dev);
 		else
 			netif_carrier_on(dev);
+
+		/*
+		 * set default queue parameters so drivers don't
+		 * need to initialise the hardware if the hardware
+		 * doesn't start up with sane defaults
+		 */
+		ieee80211_set_wmm_default(sdata);
 	}
 
 	set_bit(SDATA_STATE_RUNNING, &sdata->state);
@@ -299,8 +318,9 @@
 			goto err_del_interface;
 		}
 
-		/* no atomic bitop required since STA is not live yet */
-		set_sta_flag(sta, WLAN_STA_AUTHORIZED);
+		sta_info_move_state(sta, IEEE80211_STA_AUTH);
+		sta_info_move_state(sta, IEEE80211_STA_ASSOC);
+		sta_info_move_state(sta, IEEE80211_STA_AUTHORIZED);
 
 		res = sta_info_insert(sta);
 		if (res) {
@@ -329,15 +349,8 @@
 	if (coming_up)
 		local->open_count++;
 
-	if (hw_reconf_flags) {
+	if (hw_reconf_flags)
 		ieee80211_hw_config(local, hw_reconf_flags);
-		/*
-		 * set default queue parameters so drivers don't
-		 * need to initialise the hardware if the hardware
-		 * doesn't start up with sane defaults
-		 */
-		ieee80211_set_wmm_default(sdata);
-	}
 
 	ieee80211_recalc_ps(local, -1);
 
@@ -345,7 +358,7 @@
 
 	return 0;
  err_del_interface:
-	drv_remove_interface(local, &sdata->vif);
+	drv_remove_interface(local, sdata);
  err_stop:
 	if (!local->open_count)
 		drv_stop(local);
@@ -450,15 +463,19 @@
 		struct ieee80211_sub_if_data *vlan, *tmpsdata;
 		struct beacon_data *old_beacon =
 			rtnl_dereference(sdata->u.ap.beacon);
+		struct sk_buff *old_probe_resp =
+			rtnl_dereference(sdata->u.ap.probe_resp);
 
 		/* sdata_running will return false, so this will disable */
 		ieee80211_bss_info_change_notify(sdata,
 						 BSS_CHANGED_BEACON_ENABLED);
 
-		/* remove beacon */
+		/* remove beacon and probe response */
 		RCU_INIT_POINTER(sdata->u.ap.beacon, NULL);
+		RCU_INIT_POINTER(sdata->u.ap.probe_resp, NULL);
 		synchronize_rcu();
 		kfree(old_beacon);
+		kfree_skb(old_probe_resp);
 
 		/* down all dependent devices, that is VLANs */
 		list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans,
@@ -520,7 +537,7 @@
 		ieee80211_free_keys(sdata);
 
 		if (going_down)
-			drv_remove_interface(local, &sdata->vif);
+			drv_remove_interface(local, sdata);
 	}
 
 	sdata->bss = NULL;
@@ -656,7 +673,6 @@
 	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_hdr *hdr;
 	struct ieee80211_radiotap_header *rtap = (void *)skb->data;
-	u8 *p;
 
 	if (local->hw.queues < 4)
 		return 0;
@@ -667,19 +683,7 @@
 
 	hdr = (void *)((u8 *)skb->data + le16_to_cpu(rtap->it_len));
 
-	if (!ieee80211_is_data(hdr->frame_control)) {
-		skb->priority = 7;
-		return ieee802_1d_to_ac[skb->priority];
-	}
-	if (!ieee80211_is_data_qos(hdr->frame_control)) {
-		skb->priority = 0;
-		return ieee802_1d_to_ac[skb->priority];
-	}
-
-	p = ieee80211_get_qos_ctl(hdr);
-	skb->priority = *p & IEEE80211_QOS_CTL_TAG1D_MASK;
-
-	return ieee80211_downgrade_queue(local, skb);
+	return ieee80211_select_queue_80211(local, skb, hdr);
 }
 
 static const struct net_device_ops ieee80211_monitorif_ops = {
@@ -850,6 +854,8 @@
 	sdata->control_port_protocol = cpu_to_be16(ETH_P_PAE);
 	sdata->control_port_no_encrypt = false;
 
+	sdata->noack_map = 0;
+
 	/* only monitor differs */
 	sdata->dev->type = ARPHRD_ETHER;
 
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index fb02ea5..87a8974 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -134,9 +134,13 @@
 		key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE;
 
 		if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
-		      (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)))
+		      (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) ||
+		      (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)))
 			sdata->crypto_tx_tailroom_needed_cnt--;
 
+		WARN_ON((key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) &&
+			(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV));
+
 		return 0;
 	}
 
@@ -179,7 +183,8 @@
 	sdata = key->sdata;
 
 	if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
-	      (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)))
+	      (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) ||
+	      (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)))
 		increment_tailroom_need_count(sdata);
 
 	if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index cae4435..0a0d94a 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -47,7 +47,7 @@
 	if (atomic_read(&local->iff_allmultis))
 		new_flags |= FIF_ALLMULTI;
 
-	if (local->monitors || local->scanning)
+	if (local->monitors || test_bit(SCAN_SW_SCANNING, &local->scanning))
 		new_flags |= FIF_BCN_PRBRESP_PROMISC;
 
 	if (local->fif_probe_req || local->probe_req_reg)
@@ -92,18 +92,20 @@
 	ieee80211_configure_filter(local);
 }
 
-/*
- * Returns true if we are logically configured to be on
- * the operating channel AND the hardware-conf is currently
- * configured on the operating channel.  Compares channel-type
- * as well.
- */
-bool ieee80211_cfg_on_oper_channel(struct ieee80211_local *local)
+int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
 {
-	struct ieee80211_channel *chan, *scan_chan;
+	struct ieee80211_channel *chan;
+	int ret = 0;
+	int power;
 	enum nl80211_channel_type channel_type;
+	u32 offchannel_flag;
 
-	/* This logic needs to match logic in ieee80211_hw_config */
+	might_sleep();
+
+	/* If this off-channel logic ever changes,  ieee80211_on_oper_channel
+	 * may need to change as well.
+	 */
+	offchannel_flag = local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL;
 	if (local->scan_channel) {
 		chan = local->scan_channel;
 		/* If scanning on oper channel, use whatever channel-type
@@ -114,52 +116,7 @@
 		else
 			channel_type = NL80211_CHAN_NO_HT;
 	} else if (local->tmp_channel) {
-		chan = scan_chan = local->tmp_channel;
-		channel_type = local->tmp_channel_type;
-	} else {
-		chan = local->oper_channel;
-		channel_type = local->_oper_channel_type;
-	}
-
-	if (chan != local->oper_channel ||
-	    channel_type != local->_oper_channel_type)
-		return false;
-
-	/* Check current hardware-config against oper_channel. */
-	if ((local->oper_channel != local->hw.conf.channel) ||
-	    (local->_oper_channel_type != local->hw.conf.channel_type))
-		return false;
-
-	return true;
-}
-
-int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
-{
-	struct ieee80211_channel *chan, *scan_chan;
-	int ret = 0;
-	int power;
-	enum nl80211_channel_type channel_type;
-	u32 offchannel_flag;
-
-	might_sleep();
-
-	scan_chan = local->scan_channel;
-
-	/* If this off-channel logic ever changes,  ieee80211_on_oper_channel
-	 * may need to change as well.
-	 */
-	offchannel_flag = local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL;
-	if (scan_chan) {
-		chan = scan_chan;
-		/* If scanning on oper channel, use whatever channel-type
-		 * is currently in use.
-		 */
-		if (chan == local->oper_channel)
-			channel_type = local->_oper_channel_type;
-		else
-			channel_type = NL80211_CHAN_NO_HT;
-	} else if (local->tmp_channel) {
-		chan = scan_chan = local->tmp_channel;
+		chan = local->tmp_channel;
 		channel_type = local->tmp_channel_type;
 	} else {
 		chan = local->oper_channel;
@@ -193,8 +150,8 @@
 		changed |= IEEE80211_CONF_CHANGE_SMPS;
 	}
 
-	if ((local->scanning & SCAN_SW_SCANNING) ||
-	    (local->scanning & SCAN_HW_SCANNING))
+	if (test_bit(SCAN_SW_SCANNING, &local->scanning) ||
+	    test_bit(SCAN_HW_SCANNING, &local->scanning))
 		power = chan->max_power;
 	else
 		power = local->power_constr_level ?
@@ -436,9 +393,6 @@
 	sdata = IEEE80211_DEV_TO_SUB_IF(ndev);
 	bss_conf = &sdata->vif.bss_conf;
 
-	if (!ieee80211_sdata_running(sdata))
-		return NOTIFY_DONE;
-
 	/* ARP filtering is only supported in managed mode */
 	if (sdata->vif.type != NL80211_IFTYPE_STATION)
 		return NOTIFY_DONE;
@@ -467,7 +421,7 @@
 	}
 	bss_conf->arp_addr_cnt = c;
 
-	/* Configure driver only if associated */
+	/* Configure driver only if associated (which also implies it is up) */
 	if (ifmgd->associated) {
 		bss_conf->arp_filter_enabled = sdata->arp_filter_state;
 		ieee80211_bss_info_change_notify(sdata,
@@ -560,6 +514,19 @@
 	},
 };
 
+static const struct ieee80211_ht_cap mac80211_ht_capa_mod_mask = {
+	.ampdu_params_info = IEEE80211_HT_AMPDU_PARM_FACTOR |
+			     IEEE80211_HT_AMPDU_PARM_DENSITY,
+
+	.cap_info = cpu_to_le16(IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+				IEEE80211_HT_CAP_MAX_AMSDU |
+				IEEE80211_HT_CAP_SGI_40),
+	.mcs = {
+		.rx_mask = { 0xff, 0xff, 0xff, 0xff, 0xff,
+			     0xff, 0xff, 0xff, 0xff, 0xff, },
+	},
+};
+
 struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
 					const struct ieee80211_ops *ops)
 {
@@ -595,7 +562,13 @@
 
 	wiphy->flags |= WIPHY_FLAG_NETNS_OK |
 			WIPHY_FLAG_4ADDR_AP |
-			WIPHY_FLAG_4ADDR_STATION;
+			WIPHY_FLAG_4ADDR_STATION |
+			WIPHY_FLAG_REPORTS_OBSS |
+			WIPHY_FLAG_OFFCHAN_TX |
+			WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+
+	wiphy->features = NL80211_FEATURE_SK_TX_STATUS |
+			  NL80211_FEATURE_HT_IBSS;
 
 	if (!ops->set_key)
 		wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
@@ -608,7 +581,7 @@
 
 	local->hw.priv = (char *)local + ALIGN(sizeof(*local), NETDEV_ALIGN);
 
-	BUG_ON(!ops->tx);
+	BUG_ON(!ops->tx && !ops->tx_frags);
 	BUG_ON(!ops->start);
 	BUG_ON(!ops->stop);
 	BUG_ON(!ops->config);
@@ -628,6 +601,7 @@
 	local->user_power_level = -1;
 	local->uapsd_queues = IEEE80211_DEFAULT_UAPSD_QUEUES;
 	local->uapsd_max_sp_len = IEEE80211_DEFAULT_MAX_SP_LEN;
+	wiphy->ht_capa_mod_mask = &mac80211_ht_capa_mod_mask;
 
 	INIT_LIST_HEAD(&local->interfaces);
 
@@ -670,6 +644,11 @@
 	INIT_WORK(&local->sched_scan_stopped_work,
 		  ieee80211_sched_scan_stopped_work);
 
+	spin_lock_init(&local->ack_status_lock);
+	idr_init(&local->ack_status_frames);
+	/* preallocate at least one entry */
+	idr_pre_get(&local->ack_status_frames, GFP_KERNEL);
+
 	sta_info_init(local);
 
 	for (i = 0; i < IEEE80211_MAX_QUEUES; i++) {
@@ -1051,6 +1030,13 @@
 }
 EXPORT_SYMBOL(ieee80211_unregister_hw);
 
+static int ieee80211_free_ack_frame(int id, void *p, void *data)
+{
+	WARN_ONCE(1, "Have pending ack frames!\n");
+	kfree_skb(p);
+	return 0;
+}
+
 void ieee80211_free_hw(struct ieee80211_hw *hw)
 {
 	struct ieee80211_local *local = hw_to_local(hw);
@@ -1061,6 +1047,10 @@
 	if (local->wiphy_ciphers_allocated)
 		kfree(local->hw.wiphy->cipher_suites);
 
+	idr_for_each(&local->ack_status_frames,
+		     ieee80211_free_ack_frame, NULL);
+	idr_destroy(&local->ack_status_frames);
+
 	wiphy_free(local->hw.wiphy);
 }
 EXPORT_SYMBOL(ieee80211_free_hw);
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index a7078fd..c707c8b 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -76,6 +76,7 @@
 bool mesh_matches_local(struct ieee802_11_elems *ie, struct ieee80211_sub_if_data *sdata)
 {
 	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+	struct ieee80211_local *local = sdata->local;
 
 	/*
 	 * As support for each feature is added, check for matching
@@ -87,15 +88,23 @@
 	 *   - MDA enabled
 	 * - Power management control on fc
 	 */
-	if (ifmsh->mesh_id_len == ie->mesh_id_len &&
-		memcmp(ifmsh->mesh_id, ie->mesh_id, ie->mesh_id_len) == 0 &&
-		(ifmsh->mesh_pp_id == ie->mesh_config->meshconf_psel) &&
-		(ifmsh->mesh_pm_id == ie->mesh_config->meshconf_pmetric) &&
-		(ifmsh->mesh_cc_id == ie->mesh_config->meshconf_congest) &&
-		(ifmsh->mesh_sp_id == ie->mesh_config->meshconf_synch) &&
-		(ifmsh->mesh_auth_id == ie->mesh_config->meshconf_auth))
-		return true;
+	if (!(ifmsh->mesh_id_len == ie->mesh_id_len &&
+	     memcmp(ifmsh->mesh_id, ie->mesh_id, ie->mesh_id_len) == 0 &&
+	     (ifmsh->mesh_pp_id == ie->mesh_config->meshconf_psel) &&
+	     (ifmsh->mesh_pm_id == ie->mesh_config->meshconf_pmetric) &&
+	     (ifmsh->mesh_cc_id == ie->mesh_config->meshconf_congest) &&
+	     (ifmsh->mesh_sp_id == ie->mesh_config->meshconf_synch) &&
+	     (ifmsh->mesh_auth_id == ie->mesh_config->meshconf_auth)))
+		goto mismatch;
 
+	/* disallow peering with mismatched channel types for now */
+	if (ie->ht_info_elem &&
+	    (local->_oper_channel_type !=
+	     ieee80211_ht_info_to_channel_type(ie->ht_info_elem)))
+		goto mismatch;
+
+	return true;
+mismatch:
 	return false;
 }
 
@@ -341,6 +350,49 @@
 	return 0;
 }
 
+int mesh_add_ht_cap_ie(struct sk_buff *skb,
+		       struct ieee80211_sub_if_data *sdata)
+{
+	struct ieee80211_local *local = sdata->local;
+	struct ieee80211_supported_band *sband;
+	u8 *pos;
+
+	sband = local->hw.wiphy->bands[local->oper_channel->band];
+	if (!sband->ht_cap.ht_supported ||
+	    local->_oper_channel_type == NL80211_CHAN_NO_HT)
+		return 0;
+
+	if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_ht_cap))
+		return -ENOMEM;
+
+	pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_cap));
+	ieee80211_ie_build_ht_cap(pos, &sband->ht_cap, sband->ht_cap.cap);
+
+	return 0;
+}
+
+int mesh_add_ht_info_ie(struct sk_buff *skb,
+			struct ieee80211_sub_if_data *sdata)
+{
+	struct ieee80211_local *local = sdata->local;
+	struct ieee80211_channel *channel = local->oper_channel;
+	enum nl80211_channel_type channel_type = local->_oper_channel_type;
+	struct ieee80211_supported_band *sband =
+				local->hw.wiphy->bands[channel->band];
+	struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap;
+	u8 *pos;
+
+	if (!ht_cap->ht_supported || channel_type == NL80211_CHAN_NO_HT)
+		return 0;
+
+	if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_ht_info))
+		return -ENOMEM;
+
+	pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_info));
+	ieee80211_ie_build_ht_info(pos, ht_cap, channel, channel_type);
+
+	return 0;
+}
 static void ieee80211_mesh_path_timer(unsigned long data)
 {
 	struct ieee80211_sub_if_data *sdata =
@@ -697,6 +749,7 @@
 	atomic_set(&ifmsh->mpaths, 0);
 	mesh_rmc_init(sdata);
 	ifmsh->last_preq = jiffies;
+	ifmsh->next_perr = jiffies;
 	/* Allocate all mesh structures when creating the first mesh interface. */
 	if (!mesh_allocated)
 		ieee80211s_init();
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index 8c00e2d..bd14bd2 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -31,6 +31,8 @@
  * @MESH_PATH_FIXED: the mesh path has been manually set and should not be
  * 	modified
  * @MESH_PATH_RESOLVED: the mesh path can has been resolved
+ * @MESH_PATH_REQ_QUEUED: there is an unsent path request for this destination
+ * already queued up, waiting for the discovery process to start.
  *
  * MESH_PATH_RESOLVED is used by the mesh path timer to
  * decide when to stop or cancel the mesh path discovery.
@@ -41,6 +43,7 @@
 	MESH_PATH_SN_VALID =	BIT(2),
 	MESH_PATH_FIXED	=	BIT(3),
 	MESH_PATH_RESOLVED =	BIT(4),
+	MESH_PATH_REQ_QUEUED =	BIT(5),
 };
 
 /**
@@ -212,6 +215,10 @@
 			struct ieee80211_sub_if_data *sdata);
 int mesh_add_ds_params_ie(struct sk_buff *skb,
 			  struct ieee80211_sub_if_data *sdata);
+int mesh_add_ht_cap_ie(struct sk_buff *skb,
+		       struct ieee80211_sub_if_data *sdata);
+int mesh_add_ht_info_ie(struct sk_buff *skb,
+			struct ieee80211_sub_if_data *sdata);
 void mesh_rmc_free(struct ieee80211_sub_if_data *sdata);
 int mesh_rmc_init(struct ieee80211_sub_if_data *sdata);
 void ieee80211s_init(void);
@@ -226,6 +233,8 @@
 /* Mesh paths */
 int mesh_nexthop_lookup(struct sk_buff *skb,
 		struct ieee80211_sub_if_data *sdata);
+int mesh_nexthop_resolve(struct sk_buff *skb,
+			 struct ieee80211_sub_if_data *sdata);
 void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata);
 struct mesh_path *mesh_path_lookup(u8 *dst,
 		struct ieee80211_sub_if_data *sdata);
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 174040a..73abb75 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -113,20 +113,20 @@
 		struct ieee80211_sub_if_data *sdata)
 {
 	struct ieee80211_local *local = sdata->local;
-	struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400);
+	struct sk_buff *skb;
 	struct ieee80211_mgmt *mgmt;
-	u8 *pos;
-	int ie_len;
+	u8 *pos, ie_len;
+	int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.mesh_action) +
+		      sizeof(mgmt->u.action.u.mesh_action);
 
+	skb = dev_alloc_skb(local->hw.extra_tx_headroom +
+			    hdr_len +
+			    2 + 37); /* max HWMP IE */
 	if (!skb)
 		return -1;
 	skb_reserve(skb, local->hw.extra_tx_headroom);
-	/* 25 is the size of the common mgmt part (24) plus the size of the
-	 * common action part (1)
-	 */
-	mgmt = (struct ieee80211_mgmt *)
-		skb_put(skb, 25 + sizeof(mgmt->u.action.u.mesh_action));
-	memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.mesh_action));
+	mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
+	memset(mgmt, 0, hdr_len);
 	mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
 					  IEEE80211_STYPE_ACTION);
 
@@ -240,20 +240,24 @@
 		       struct ieee80211_sub_if_data *sdata)
 {
 	struct ieee80211_local *local = sdata->local;
-	struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400);
+	struct sk_buff *skb;
+	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
 	struct ieee80211_mgmt *mgmt;
-	u8 *pos;
-	int ie_len;
+	u8 *pos, ie_len;
+	int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.mesh_action) +
+		      sizeof(mgmt->u.action.u.mesh_action);
 
+	if (time_before(jiffies, ifmsh->next_perr))
+		return -EAGAIN;
+
+	skb = dev_alloc_skb(local->hw.extra_tx_headroom +
+			    hdr_len +
+			    2 + 15 /* PERR IE */);
 	if (!skb)
 		return -1;
 	skb_reserve(skb, local->tx_headroom + local->hw.extra_tx_headroom);
-	/* 25 is the size of the common mgmt part (24) plus the size of the
-	 * common action part (1)
-	 */
-	mgmt = (struct ieee80211_mgmt *)
-		skb_put(skb, 25 + sizeof(mgmt->u.action.u.mesh_action));
-	memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.mesh_action));
+	mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
+	memset(mgmt, 0, hdr_len);
 	mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
 					  IEEE80211_STYPE_ACTION);
 
@@ -290,6 +294,8 @@
 
 	/* see note in function header */
 	prepare_frame_for_deferred_tx(sdata, skb);
+	ifmsh->next_perr = TU_TO_EXP_TIME(
+				   ifmsh->mshcfg.dot11MeshHWMPperrMinInterval);
 	ieee80211_add_pending_skb(local, skb);
 	return 0;
 }
@@ -393,15 +399,13 @@
 		orig_metric = PREQ_IE_METRIC(hwmp_ie);
 		break;
 	case MPATH_PREP:
-		/* Originator here refers to the MP that was the destination in
-		 * the Path Request. The draft refers to that MP as the
-		 * destination address, even though usually it is the origin of
-		 * the PREP frame. We divert from the nomenclature in the draft
+		/* Originator here refers to the MP that was the target in the
+		 * Path Request. We divert from the nomenclature in the draft
 		 * so that we can easily use a single function to gather path
 		 * information from both PREQ and PREP frames.
 		 */
-		orig_addr = PREP_IE_ORIG_ADDR(hwmp_ie);
-		orig_sn = PREP_IE_ORIG_SN(hwmp_ie);
+		orig_addr = PREP_IE_TARGET_ADDR(hwmp_ie);
+		orig_sn = PREP_IE_TARGET_SN(hwmp_ie);
 		orig_lifetime = PREP_IE_LIFETIME(hwmp_ie);
 		orig_metric = PREP_IE_METRIC(hwmp_ie);
 		break;
@@ -562,9 +566,9 @@
 		ttl = ifmsh->mshcfg.element_ttl;
 		if (ttl != 0) {
 			mhwmp_dbg("replying to the PREQ");
-			mesh_path_sel_frame_tx(MPATH_PREP, 0, target_addr,
-				cpu_to_le32(target_sn), 0, orig_addr,
-				cpu_to_le32(orig_sn), mgmt->sa, 0, ttl,
+			mesh_path_sel_frame_tx(MPATH_PREP, 0, orig_addr,
+				cpu_to_le32(orig_sn), 0, target_addr,
+				cpu_to_le32(target_sn), mgmt->sa, 0, ttl,
 				cpu_to_le32(lifetime), cpu_to_le32(metric),
 				0, sdata);
 		} else
@@ -618,14 +622,8 @@
 
 	mhwmp_dbg("received PREP from %pM", PREP_IE_ORIG_ADDR(prep_elem));
 
-	/* Note that we divert from the draft nomenclature and denominate
-	 * destination to what the draft refers to as origininator. So in this
-	 * function destnation refers to the final destination of the PREP,
-	 * which corresponds with the originator of the PREQ which this PREP
-	 * replies
-	 */
-	target_addr = PREP_IE_TARGET_ADDR(prep_elem);
-	if (memcmp(target_addr, sdata->vif.addr, ETH_ALEN) == 0)
+	orig_addr = PREP_IE_ORIG_ADDR(prep_elem);
+	if (memcmp(orig_addr, sdata->vif.addr, ETH_ALEN) == 0)
 		/* destination, no forwarding required */
 		return;
 
@@ -636,7 +634,7 @@
 	}
 
 	rcu_read_lock();
-	mpath = mesh_path_lookup(target_addr, sdata);
+	mpath = mesh_path_lookup(orig_addr, sdata);
 	if (mpath)
 		spin_lock_bh(&mpath->state_lock);
 	else
@@ -651,7 +649,7 @@
 	flags = PREP_IE_FLAGS(prep_elem);
 	lifetime = PREP_IE_LIFETIME(prep_elem);
 	hopcount = PREP_IE_HOPCOUNT(prep_elem) + 1;
-	orig_addr = PREP_IE_ORIG_ADDR(prep_elem);
+	target_addr = PREP_IE_TARGET_ADDR(prep_elem);
 	target_sn = PREP_IE_TARGET_SN(prep_elem);
 	orig_sn = PREP_IE_ORIG_SN(prep_elem);
 
@@ -867,9 +865,20 @@
 		return;
 	}
 
+	spin_lock(&mpath->state_lock);
+	if (mpath->flags & MESH_PATH_REQ_QUEUED) {
+		spin_unlock(&mpath->state_lock);
+		spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
+		kfree(preq_node);
+		return;
+	}
+
 	memcpy(preq_node->dst, mpath->dst, ETH_ALEN);
 	preq_node->flags = flags;
 
+	mpath->flags |= MESH_PATH_REQ_QUEUED;
+	spin_unlock(&mpath->state_lock);
+
 	list_add_tail(&preq_node->list, &ifmsh->preq_queue.list);
 	++ifmsh->preq_queue_len;
 	spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
@@ -921,6 +930,7 @@
 		goto enddiscovery;
 
 	spin_lock_bh(&mpath->state_lock);
+	mpath->flags &= ~MESH_PATH_REQ_QUEUED;
 	if (preq_node->flags & PREQ_Q_F_START) {
 		if (mpath->flags & MESH_PATH_RESOLVING) {
 			spin_unlock_bh(&mpath->state_lock);
@@ -972,71 +982,97 @@
 	kfree(preq_node);
 }
 
-/**
- * mesh_nexthop_lookup - put the appropriate next hop on a mesh frame
+/* mesh_nexthop_resolve - lookup next hop for given skb and start path
+ * discovery if no forwarding information is found.
  *
  * @skb: 802.11 frame to be sent
  * @sdata: network subif the frame will be sent through
  *
- * Returns: 0 if the next hop was found. Nonzero otherwise. If no next hop is
- * found, the function will start a path discovery and queue the frame so it is
- * sent when the path is resolved. This means the caller must not free the skb
- * in this case.
+ * Returns: 0 if the next hop was found and -ENOENT if the frame was queued.
+ * skb is freeed here if no mpath could be allocated.
  */
-int mesh_nexthop_lookup(struct sk_buff *skb,
-			struct ieee80211_sub_if_data *sdata)
+int mesh_nexthop_resolve(struct sk_buff *skb,
+			 struct ieee80211_sub_if_data *sdata)
 {
-	struct sk_buff *skb_to_free = NULL;
-	struct mesh_path *mpath;
-	struct sta_info *next_hop;
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct mesh_path *mpath;
+	struct sk_buff *skb_to_free = NULL;
 	u8 *target_addr = hdr->addr3;
 	int err = 0;
 
 	rcu_read_lock();
-	mpath = mesh_path_lookup(target_addr, sdata);
+	err = mesh_nexthop_lookup(skb, sdata);
+	if (!err)
+		goto endlookup;
 
+	/* no nexthop found, start resolving */
+	mpath = mesh_path_lookup(target_addr, sdata);
 	if (!mpath) {
 		mesh_path_add(target_addr, sdata);
 		mpath = mesh_path_lookup(target_addr, sdata);
 		if (!mpath) {
-			sdata->u.mesh.mshstats.dropped_frames_no_route++;
+			mesh_path_discard_frame(skb, sdata);
 			err = -ENOSPC;
 			goto endlookup;
 		}
 	}
 
-	if (mpath->flags & MESH_PATH_ACTIVE) {
-		if (time_after(jiffies,
-			       mpath->exp_time -
-			       msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time)) &&
-		    !memcmp(sdata->vif.addr, hdr->addr4, ETH_ALEN) &&
-		    !(mpath->flags & MESH_PATH_RESOLVING) &&
-		    !(mpath->flags & MESH_PATH_FIXED)) {
-			mesh_queue_preq(mpath,
-					PREQ_Q_F_START | PREQ_Q_F_REFRESH);
-		}
-		next_hop = rcu_dereference(mpath->next_hop);
-		if (next_hop)
-			memcpy(hdr->addr1, next_hop->sta.addr, ETH_ALEN);
-		else
-			err = -ENOENT;
-	} else {
-		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-		if (!(mpath->flags & MESH_PATH_RESOLVING)) {
-			/* Start discovery only if it is not running yet */
-			mesh_queue_preq(mpath, PREQ_Q_F_START);
-		}
+	if (!(mpath->flags & MESH_PATH_RESOLVING))
+		mesh_queue_preq(mpath, PREQ_Q_F_START);
 
-		if (skb_queue_len(&mpath->frame_queue) >=
-				MESH_FRAME_QUEUE_LEN)
-			skb_to_free = skb_dequeue(&mpath->frame_queue);
+	if (skb_queue_len(&mpath->frame_queue) >= MESH_FRAME_QUEUE_LEN)
+		skb_to_free = skb_dequeue(&mpath->frame_queue);
 
-		info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
-		skb_queue_tail(&mpath->frame_queue, skb);
-		if (skb_to_free)
-			mesh_path_discard_frame(skb_to_free, sdata);
-		err = -ENOENT;
+	info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
+	ieee80211_set_qos_hdr(sdata, skb);
+	skb_queue_tail(&mpath->frame_queue, skb);
+	err = -ENOENT;
+	if (skb_to_free)
+		mesh_path_discard_frame(skb_to_free, sdata);
+
+endlookup:
+	rcu_read_unlock();
+	return err;
+}
+/**
+ * mesh_nexthop_lookup - put the appropriate next hop on a mesh frame. Calling
+ * this function is considered "using" the associated mpath, so preempt a path
+ * refresh if this mpath expires soon.
+ *
+ * @skb: 802.11 frame to be sent
+ * @sdata: network subif the frame will be sent through
+ *
+ * Returns: 0 if the next hop was found. Nonzero otherwise.
+ */
+int mesh_nexthop_lookup(struct sk_buff *skb,
+			struct ieee80211_sub_if_data *sdata)
+{
+	struct mesh_path *mpath;
+	struct sta_info *next_hop;
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+	u8 *target_addr = hdr->addr3;
+	int err = -ENOENT;
+
+	rcu_read_lock();
+	mpath = mesh_path_lookup(target_addr, sdata);
+
+	if (!mpath || !(mpath->flags & MESH_PATH_ACTIVE))
+		goto endlookup;
+
+	if (time_after(jiffies,
+		       mpath->exp_time -
+		       msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time)) &&
+	    !memcmp(sdata->vif.addr, hdr->addr4, ETH_ALEN) &&
+	    !(mpath->flags & MESH_PATH_RESOLVING) &&
+	    !(mpath->flags & MESH_PATH_FIXED))
+		mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH);
+
+	next_hop = rcu_dereference(mpath->next_hop);
+	if (next_hop) {
+		memcpy(hdr->addr1, next_hop->sta.addr, ETH_ALEN);
+		memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
+		err = 0;
 	}
 
 endlookup:
@@ -1061,6 +1097,7 @@
 	} else if (mpath->discovery_retries < max_preq_retries(sdata)) {
 		++mpath->discovery_retries;
 		mpath->discovery_timeout *= 2;
+		mpath->flags &= ~MESH_PATH_REQ_QUEUED;
 		spin_unlock_bh(&mpath->state_lock);
 		mesh_queue_preq(mpath, 0);
 	} else {
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 7f54c50..edf167e 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -69,8 +69,6 @@
 		lockdep_is_held(&pathtbl_resize_lock));
 }
 
-static int mesh_gate_add(struct mesh_table *tbl, struct mesh_path *mpath);
-
 /*
  * CAREFUL -- "tbl" must not be an expression,
  * in particular not an rcu_dereference(), since
@@ -213,7 +211,6 @@
 	struct ieee80211_hdr *hdr;
 	struct sk_buff_head tmpq;
 	unsigned long flags;
-	struct ieee80211_sub_if_data *sdata = mpath->sdata;
 
 	rcu_assign_pointer(mpath->next_hop, sta);
 
@@ -224,8 +221,7 @@
 	while ((skb = __skb_dequeue(&mpath->frame_queue)) != NULL) {
 		hdr = (struct ieee80211_hdr *) skb->data;
 		memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
-		skb_set_queue_mapping(skb, ieee80211_select_queue(sdata, skb));
-		ieee80211_set_qos_hdr(sdata, skb);
+		memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN);
 		__skb_queue_tail(&tmpq, skb);
 	}
 
@@ -269,6 +265,7 @@
 	next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr;
 	memcpy(hdr->addr1, next_hop, ETH_ALEN);
 	rcu_read_unlock();
+	memcpy(hdr->addr2, gate_mpath->sdata->vif.addr, ETH_ALEN);
 	memcpy(hdr->addr3, dst_addr, ETH_ALEN);
 }
 
@@ -423,21 +420,18 @@
 }
 
 /**
- * mesh_gate_add - mark mpath as path to a mesh gate and add to known_gates
- * @mesh_tbl: table which contains known_gates list
- * @mpath: mpath to known mesh gate
- *
- * Returns: 0 on success
- *
+ * mesh_path_add_gate - add the given mpath to a mesh gate to our path table
+ * @mpath: gate path to add to table
  */
-static int mesh_gate_add(struct mesh_table *tbl, struct mesh_path *mpath)
+int mesh_path_add_gate(struct mesh_path *mpath)
 {
+	struct mesh_table *tbl;
 	struct mpath_node *gate, *new_gate;
 	struct hlist_node *n;
 	int err;
 
 	rcu_read_lock();
-	tbl = rcu_dereference(tbl);
+	tbl = rcu_dereference(mesh_paths);
 
 	hlist_for_each_entry_rcu(gate, n, tbl->known_gates, list)
 		if (gate->mpath == mpath) {
@@ -481,8 +475,6 @@
 	struct mpath_node *gate;
 	struct hlist_node *p, *q;
 
-	tbl = rcu_dereference(tbl);
-
 	hlist_for_each_entry_safe(gate, p, q, tbl->known_gates, list)
 		if (gate->mpath == mpath) {
 			spin_lock_bh(&tbl->gates_lock);
@@ -501,16 +493,6 @@
 }
 
 /**
- *
- * mesh_path_add_gate - add the given mpath to a mesh gate to our path table
- * @mpath: gate path to add to table
- */
-int mesh_path_add_gate(struct mesh_path *mpath)
-{
-	return mesh_gate_add(mesh_paths, mpath);
-}
-
-/**
  * mesh_gate_num - number of gates known to this interface
  * @sdata: subif data
  */
@@ -991,38 +973,11 @@
  * @skb: frame to discard
  * @sdata: network subif the frame was to be sent through
  *
- * If the frame was being forwarded from another MP, a PERR frame will be sent
- * to the precursor.  The precursor's address (i.e. the previous hop) was saved
- * in addr1 of the frame-to-be-forwarded, and would only be overwritten once
- * the destination is successfully resolved.
- *
  * Locking: the function must me called within a rcu_read_lock region
  */
 void mesh_path_discard_frame(struct sk_buff *skb,
 			     struct ieee80211_sub_if_data *sdata)
 {
-	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
-	struct mesh_path *mpath;
-	u32 sn = 0;
-	__le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_NOFORWARD);
-
-	if (memcmp(hdr->addr4, sdata->vif.addr, ETH_ALEN) != 0) {
-		u8 *ra, *da;
-
-		da = hdr->addr3;
-		ra = hdr->addr1;
-		rcu_read_lock();
-		mpath = mesh_path_lookup(da, sdata);
-		if (mpath) {
-			spin_lock_bh(&mpath->state_lock);
-			sn = ++mpath->sn;
-			spin_unlock_bh(&mpath->state_lock);
-		}
-		rcu_read_unlock();
-		mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl, skb->data,
-				   cpu_to_le32(sn), reason, ra, sdata);
-	}
-
 	kfree_skb(skb);
 	sdata->u.mesh.mshstats.dropped_frames_no_route++;
 }
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 7e57f5d..41ef1b4 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -80,11 +80,15 @@
  *       on it in the lifecycle management section!
  */
 static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata,
-					 u8 *hw_addr, u32 rates)
+					 u8 *hw_addr, u32 rates,
+					 struct ieee802_11_elems *elems)
 {
 	struct ieee80211_local *local = sdata->local;
+	struct ieee80211_supported_band *sband;
 	struct sta_info *sta;
 
+	sband = local->hw.wiphy->bands[local->oper_channel->band];
+
 	if (local->num_sta >= MESH_MAX_PLINKS)
 		return NULL;
 
@@ -92,10 +96,17 @@
 	if (!sta)
 		return NULL;
 
-	set_sta_flag(sta, WLAN_STA_AUTH);
-	set_sta_flag(sta, WLAN_STA_AUTHORIZED);
+	sta_info_move_state(sta, IEEE80211_STA_AUTH);
+	sta_info_move_state(sta, IEEE80211_STA_ASSOC);
+	sta_info_move_state(sta, IEEE80211_STA_AUTHORIZED);
+
 	set_sta_flag(sta, WLAN_STA_WME);
+
 	sta->sta.supp_rates[local->hw.conf.channel->band] = rates;
+	if (elems->ht_cap_elem)
+		ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
+						  elems->ht_cap_elem,
+						  &sta->sta.ht_cap);
 	rate_control_rate_init(sta);
 
 	return sta;
@@ -153,23 +164,31 @@
 		enum ieee80211_self_protected_actioncode action,
 		u8 *da, __le16 llid, __le16 plid, __le16 reason) {
 	struct ieee80211_local *local = sdata->local;
-	struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400 +
-			sdata->u.mesh.ie_len);
+	struct sk_buff *skb;
 	struct ieee80211_mgmt *mgmt;
 	bool include_plid = false;
-	int ie_len = 4;
 	u16 peering_proto = 0;
-	u8 *pos;
+	u8 *pos, ie_len = 4;
+	int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.self_prot) +
+		      sizeof(mgmt->u.action.u.self_prot);
 
+	skb = dev_alloc_skb(local->hw.extra_tx_headroom +
+			    hdr_len +
+			    2 + /* capability info */
+			    2 + /* AID */
+			    2 + 8 + /* supported rates */
+			    2 + (IEEE80211_MAX_SUPP_RATES - 8) +
+			    2 + sdata->u.mesh.mesh_id_len +
+			    2 + sizeof(struct ieee80211_meshconf_ie) +
+			    2 + sizeof(struct ieee80211_ht_cap) +
+			    2 + sizeof(struct ieee80211_ht_info) +
+			    2 + 8 + /* peering IE */
+			    sdata->u.mesh.ie_len);
 	if (!skb)
 		return -1;
 	skb_reserve(skb, local->hw.extra_tx_headroom);
-	/* 25 is the size of the common mgmt part (24) plus the size of the
-	 * common action part (1)
-	 */
-	mgmt = (struct ieee80211_mgmt *)
-		skb_put(skb, 25 + sizeof(mgmt->u.action.u.self_prot));
-	memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.self_prot));
+	mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
+	memset(mgmt, 0, hdr_len);
 	mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
 					  IEEE80211_STYPE_ACTION);
 	memcpy(mgmt->da, da, ETH_ALEN);
@@ -235,6 +254,13 @@
 		memcpy(pos, &reason, 2);
 		pos += 2;
 	}
+
+	if (action != WLAN_SP_MESH_PEERING_CLOSE) {
+		if (mesh_add_ht_cap_ie(skb, sdata) ||
+		    mesh_add_ht_info_ie(skb, sdata))
+			return -1;
+	}
+
 	if (mesh_add_vendor_ies(skb, sdata))
 		return -1;
 
@@ -261,7 +287,7 @@
 					elems->ie_start, elems->total_len,
 					GFP_KERNEL);
 		else
-			sta = mesh_plink_alloc(sdata, hw_addr, rates);
+			sta = mesh_plink_alloc(sdata, hw_addr, rates, elems);
 		if (!sta)
 			return;
 		if (sta_info_insert_rcu(sta)) {
@@ -552,7 +578,7 @@
 		}
 
 		rates = ieee80211_sta_get_rates(local, &elems, rx_status->band);
-		sta = mesh_plink_alloc(sdata, mgmt->sa, rates);
+		sta = mesh_plink_alloc(sdata, mgmt->sa, rates, &elems);
 		if (!sta) {
 			mpl_dbg("Mesh plink error: plink table full\n");
 			return;
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index b1b1bb3..ecb4c84 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -209,6 +209,7 @@
 		channel_type = NL80211_CHAN_HT20;
 
 		if (!(ap_ht_cap_flags & IEEE80211_HT_CAP_40MHZ_INTOLERANT) &&
+		    !ieee80111_cfg_override_disables_ht40(sdata) &&
 		    (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) &&
 		    (hti->ht_param & IEEE80211_HT_PARAM_CHAN_WIDTH_ANY)) {
 			switch(hti->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
@@ -818,7 +819,7 @@
 	}
 
 	if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) &&
-	    (!(ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED))) {
+	    !(ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED)) {
 		netif_tx_stop_all_queues(sdata->dev);
 
 		if (drv_tx_frames_pending(local))
@@ -1120,6 +1121,8 @@
 
 	/* on the next assoc, re-program HT parameters */
 	sdata->ht_opmode_valid = false;
+	memset(&ifmgd->ht_capa, 0, sizeof(ifmgd->ht_capa));
+	memset(&ifmgd->ht_capa_mask, 0, sizeof(ifmgd->ht_capa_mask));
 
 	local->power_constr_level = 0;
 
@@ -1359,9 +1362,6 @@
 	ieee80211_set_disassoc(sdata, true, true);
 	mutex_unlock(&ifmgd->mtx);
 
-	mutex_lock(&local->mtx);
-	ieee80211_recalc_idle(local);
-	mutex_unlock(&local->mtx);
 	/*
 	 * must be outside lock due to cfg80211,
 	 * but that's not a problem.
@@ -1370,6 +1370,10 @@
 				       IEEE80211_STYPE_DEAUTH,
 				       WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
 				       NULL, true);
+
+	mutex_lock(&local->mtx);
+	ieee80211_recalc_idle(local);
+	mutex_unlock(&local->mtx);
 }
 
 void ieee80211_beacon_connection_loss_work(struct work_struct *work)
@@ -1377,6 +1381,16 @@
 	struct ieee80211_sub_if_data *sdata =
 		container_of(work, struct ieee80211_sub_if_data,
 			     u.mgd.beacon_connection_loss_work);
+	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+	struct sta_info *sta;
+
+	if (ifmgd->associated) {
+		rcu_read_lock();
+		sta = sta_info_get(sdata, ifmgd->bssid);
+		if (sta)
+			sta->beacon_loss_count++;
+		rcu_read_unlock();
+	}
 
 	if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR)
 		__ieee80211_connection_loss(sdata);
@@ -1468,6 +1482,47 @@
 	return RX_MGMT_CFG80211_DISASSOC;
 }
 
+static void ieee80211_get_rates(struct ieee80211_supported_band *sband,
+				u8 *supp_rates, unsigned int supp_rates_len,
+				u32 *rates, u32 *basic_rates,
+				bool *have_higher_than_11mbit,
+				int *min_rate, int *min_rate_index)
+{
+	int i, j;
+
+	for (i = 0; i < supp_rates_len; i++) {
+		int rate = (supp_rates[i] & 0x7f) * 5;
+		bool is_basic = !!(supp_rates[i] & 0x80);
+
+		if (rate > 110)
+			*have_higher_than_11mbit = true;
+
+		/*
+		 * BSS_MEMBERSHIP_SELECTOR_HT_PHY is defined in 802.11n-2009
+		 * 7.3.2.2 as a magic value instead of a rate. Hence, skip it.
+		 *
+		 * Note: Even through the membership selector and the basic
+		 *	 rate flag share the same bit, they are not exactly
+		 *	 the same.
+		 */
+		if (!!(supp_rates[i] & 0x80) &&
+		    (supp_rates[i] & 0x7f) == BSS_MEMBERSHIP_SELECTOR_HT_PHY)
+			continue;
+
+		for (j = 0; j < sband->n_bitrates; j++) {
+			if (sband->bitrates[j].bitrate == rate) {
+				*rates |= BIT(j);
+				if (is_basic)
+					*basic_rates |= BIT(j);
+				if (rate < *min_rate) {
+					*min_rate = rate;
+					*min_rate_index = j;
+				}
+				break;
+			}
+		}
+	}
+}
 
 static bool ieee80211_assoc_success(struct ieee80211_work *wk,
 				    struct ieee80211_mgmt *mgmt, size_t len)
@@ -1484,7 +1539,7 @@
 	struct ieee802_11_elems elems;
 	struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
 	u32 changed = 0;
-	int i, j, err;
+	int err;
 	bool have_higher_than_11mbit = false;
 	u16 ap_ht_cap_flags;
 	int min_rate = INT_MAX, min_rate_index = -1;
@@ -1532,57 +1587,23 @@
 		return false;
 	}
 
-	set_sta_flag(sta, WLAN_STA_AUTH);
-	set_sta_flag(sta, WLAN_STA_ASSOC);
-	set_sta_flag(sta, WLAN_STA_ASSOC_AP);
+	sta_info_move_state(sta, IEEE80211_STA_AUTH);
+	sta_info_move_state(sta, IEEE80211_STA_ASSOC);
 	if (!(ifmgd->flags & IEEE80211_STA_CONTROL_PORT))
-		set_sta_flag(sta, WLAN_STA_AUTHORIZED);
+		sta_info_move_state(sta, IEEE80211_STA_AUTHORIZED);
 
 	rates = 0;
 	basic_rates = 0;
 	sband = local->hw.wiphy->bands[wk->chan->band];
 
-	for (i = 0; i < elems.supp_rates_len; i++) {
-		int rate = (elems.supp_rates[i] & 0x7f) * 5;
-		bool is_basic = !!(elems.supp_rates[i] & 0x80);
+	ieee80211_get_rates(sband, elems.supp_rates, elems.supp_rates_len,
+			    &rates, &basic_rates, &have_higher_than_11mbit,
+			    &min_rate, &min_rate_index);
 
-		if (rate > 110)
-			have_higher_than_11mbit = true;
-
-		for (j = 0; j < sband->n_bitrates; j++) {
-			if (sband->bitrates[j].bitrate == rate) {
-				rates |= BIT(j);
-				if (is_basic)
-					basic_rates |= BIT(j);
-				if (rate < min_rate) {
-					min_rate = rate;
-					min_rate_index = j;
-				}
-				break;
-			}
-		}
-	}
-
-	for (i = 0; i < elems.ext_supp_rates_len; i++) {
-		int rate = (elems.ext_supp_rates[i] & 0x7f) * 5;
-		bool is_basic = !!(elems.ext_supp_rates[i] & 0x80);
-
-		if (rate > 110)
-			have_higher_than_11mbit = true;
-
-		for (j = 0; j < sband->n_bitrates; j++) {
-			if (sband->bitrates[j].bitrate == rate) {
-				rates |= BIT(j);
-				if (is_basic)
-					basic_rates |= BIT(j);
-				if (rate < min_rate) {
-					min_rate = rate;
-					min_rate_index = j;
-				}
-				break;
-			}
-		}
-	}
+	ieee80211_get_rates(sband, elems.ext_supp_rates,
+			    elems.ext_supp_rates_len, &rates, &basic_rates,
+			    &have_higher_than_11mbit,
+			    &min_rate, &min_rate_index);
 
 	/*
 	 * some buggy APs don't advertise basic_rates. use the lowest
@@ -1605,7 +1626,7 @@
 		sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE;
 
 	if (elems.ht_cap_elem && !(ifmgd->flags & IEEE80211_STA_DISABLE_11N))
-		ieee80211_ht_cap_ie_to_sta_ht_cap(sband,
+		ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
 				elems.ht_cap_elem, &sta->sta.ht_cap);
 
 	ap_ht_cap_flags = sta->sta.ht_cap.cap;
@@ -1974,7 +1995,7 @@
 
 		sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
 
-		ieee80211_ht_cap_ie_to_sta_ht_cap(sband,
+		ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
 				elems.ht_cap_elem, &sta->sta.ht_cap);
 
 		ap_ht_cap_flags = sta->sta.ht_cap.cap;
@@ -2128,9 +2149,6 @@
 
 	ieee80211_set_disassoc(sdata, true, true);
 	mutex_unlock(&ifmgd->mtx);
-	mutex_lock(&local->mtx);
-	ieee80211_recalc_idle(local);
-	mutex_unlock(&local->mtx);
 	/*
 	 * must be outside lock due to cfg80211,
 	 * but that's not a problem.
@@ -2138,6 +2156,11 @@
 	ieee80211_send_deauth_disassoc(sdata, bssid,
 			IEEE80211_STYPE_DEAUTH, reason,
 			NULL, true);
+
+	mutex_lock(&local->mtx);
+	ieee80211_recalc_idle(local);
+	mutex_unlock(&local->mtx);
+
 	mutex_lock(&ifmgd->mtx);
 }
 
@@ -2358,6 +2381,7 @@
 		    (unsigned long) sdata);
 
 	ifmgd->flags = 0;
+	ifmgd->powersave = sdata->wdev.ps;
 
 	mutex_init(&ifmgd->mtx);
 
@@ -2632,6 +2656,13 @@
 			ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
 
 
+	if (req->flags & ASSOC_REQ_DISABLE_HT)
+		ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
+
+	memcpy(&ifmgd->ht_capa, &req->ht_capa, sizeof(ifmgd->ht_capa));
+	memcpy(&ifmgd->ht_capa_mask, &req->ht_capa_mask,
+	       sizeof(ifmgd->ht_capa_mask));
+
 	if (req->ie && req->ie_len) {
 		memcpy(wk->ie, req->ie, req->ie_len);
 		wk->ie_len = req->ie_len;
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index 3d41441..f054e94 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -138,31 +138,16 @@
 	mutex_unlock(&local->iflist_mtx);
 }
 
-void ieee80211_offchannel_enable_all_ps(struct ieee80211_local *local,
-					bool tell_ap)
-{
-	struct ieee80211_sub_if_data *sdata;
-
-	mutex_lock(&local->iflist_mtx);
-	list_for_each_entry(sdata, &local->interfaces, list) {
-		if (!ieee80211_sdata_running(sdata))
-			continue;
-
-		if (sdata->vif.type == NL80211_IFTYPE_STATION &&
-		    sdata->u.mgd.associated)
-			ieee80211_offchannel_ps_enable(sdata, tell_ap);
-	}
-	mutex_unlock(&local->iflist_mtx);
-}
-
 void ieee80211_offchannel_return(struct ieee80211_local *local,
-				 bool enable_beaconing,
 				 bool offchannel_ps_disable)
 {
 	struct ieee80211_sub_if_data *sdata;
 
 	mutex_lock(&local->iflist_mtx);
 	list_for_each_entry(sdata, &local->interfaces, list) {
+		if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
+			clear_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
+
 		if (!ieee80211_sdata_running(sdata))
 			continue;
 
@@ -174,7 +159,6 @@
 		}
 
 		if (sdata->vif.type != NL80211_IFTYPE_MONITOR) {
-			clear_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
 			/*
 			 * This may wake up queues even though the driver
 			 * currently has them stopped. This is not very
@@ -188,11 +172,9 @@
 			netif_tx_wake_all_queues(sdata->dev);
 		}
 
-		/* Check to see if we should re-enable beaconing */
-		if (enable_beaconing &&
-		    (sdata->vif.type == NL80211_IFTYPE_AP ||
-		     sdata->vif.type == NL80211_IFTYPE_ADHOC ||
-		     sdata->vif.type == NL80211_IFTYPE_MESH_POINT))
+		if (sdata->vif.type == NL80211_IFTYPE_AP ||
+		    sdata->vif.type == NL80211_IFTYPE_ADHOC ||
+		    sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
 			ieee80211_bss_info_change_notify(
 				sdata, BSS_CHANGED_BEACON_ENABLED);
 	}
@@ -212,8 +194,6 @@
 		return;
 	}
 
-	ieee80211_recalc_idle(local);
-
 	if (local->hw_roc_skb) {
 		sdata = IEEE80211_DEV_TO_SUB_IF(local->hw_roc_dev);
 		ieee80211_tx_skb(sdata, local->hw_roc_skb);
@@ -227,6 +207,8 @@
 					  GFP_KERNEL);
 	}
 
+	ieee80211_recalc_idle(local);
+
 	mutex_unlock(&local->mtx);
 }
 
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index 9ee7164..596efaf 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -125,7 +125,7 @@
 		ieee80211_bss_info_change_notify(sdata,
 			BSS_CHANGED_BEACON_ENABLED);
 
-		drv_remove_interface(local, &sdata->vif);
+		drv_remove_interface(local, sdata);
 	}
 
 	/* stop hardware - this must stop RX */
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index 58a8955..b39dda5 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -334,8 +334,8 @@
 
 
 static void
-calc_rate_durations(struct minstrel_sta_info *mi, struct ieee80211_local *local,
-                    struct minstrel_rate *d, struct ieee80211_rate *rate)
+calc_rate_durations(struct ieee80211_local *local, struct minstrel_rate *d,
+		    struct ieee80211_rate *rate)
 {
 	int erp = !!(rate->flags & IEEE80211_RATE_ERP_G);
 
@@ -402,8 +402,7 @@
 
 		mr->rix = i;
 		mr->bitrate = sband->bitrates[i].bitrate / 5;
-		calc_rate_durations(mi, local, mr,
-				&sband->bitrates[i]);
+		calc_rate_durations(local, mr, &sband->bitrates[i]);
 
 		/* calculate maximum number of retransmissions before
 		 * fallback (based on maximum segment size) */
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index cdb2853..ff5f7b8 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -36,8 +36,17 @@
 /* Transmit duration for the raw data part of an average sized packet */
 #define MCS_DURATION(streams, sgi, bps) MCS_SYMBOL_TIME(sgi, MCS_NSYMS((streams) * (bps)))
 
+/*
+ * Define group sort order: HT40 -> SGI -> #streams
+ */
+#define GROUP_IDX(_streams, _sgi, _ht40)	\
+	MINSTREL_MAX_STREAMS * 2 * _ht40 +	\
+	MINSTREL_MAX_STREAMS * _sgi +		\
+	_streams - 1
+
 /* MCS rate information for an MCS group */
-#define MCS_GROUP(_streams, _sgi, _ht40) {				\
+#define MCS_GROUP(_streams, _sgi, _ht40)				\
+	[GROUP_IDX(_streams, _sgi, _ht40)] = {				\
 	.streams = _streams,						\
 	.flags =							\
 		(_sgi ? IEEE80211_TX_RC_SHORT_GI : 0) |			\
@@ -58,6 +67,9 @@
  * To enable sufficiently targeted rate sampling, MCS rates are divided into
  * groups, based on the number of streams and flags (HT40, SGI) that they
  * use.
+ *
+ * Sortorder has to be fixed for GROUP_IDX macro to be applicable:
+ * HT40 -> SGI -> #streams
  */
 const struct mcs_group minstrel_mcs_groups[] = {
 	MCS_GROUP(1, 0, 0),
@@ -102,21 +114,9 @@
 static int
 minstrel_ht_get_group_idx(struct ieee80211_tx_rate *rate)
 {
-	int streams = (rate->idx / MCS_GROUP_RATES) + 1;
-	u32 flags = IEEE80211_TX_RC_SHORT_GI | IEEE80211_TX_RC_40_MHZ_WIDTH;
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(minstrel_mcs_groups); i++) {
-		if (minstrel_mcs_groups[i].streams != streams)
-			continue;
-		if (minstrel_mcs_groups[i].flags != (rate->flags & flags))
-			continue;
-
-		return i;
-	}
-
-	WARN_ON(1);
-	return 0;
+	return GROUP_IDX((rate->idx / MCS_GROUP_RATES) + 1,
+			 !!(rate->flags & IEEE80211_TX_RC_SHORT_GI),
+			 !!(rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH));
 }
 
 static inline struct minstrel_rate_stats *
@@ -130,7 +130,7 @@
  * Recalculate success probabilities and counters for a rate using EWMA
  */
 static void
-minstrel_calc_rate_ewma(struct minstrel_priv *mp, struct minstrel_rate_stats *mr)
+minstrel_calc_rate_ewma(struct minstrel_rate_stats *mr)
 {
 	if (unlikely(mr->attempts > 0)) {
 		mr->sample_skipped = 0;
@@ -156,8 +156,7 @@
  * the expected number of retransmissions and their expected length
  */
 static void
-minstrel_ht_calc_tp(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
-                    int group, int rate)
+minstrel_ht_calc_tp(struct minstrel_ht_sta *mi, int group, int rate)
 {
 	struct minstrel_rate_stats *mr;
 	unsigned int usecs;
@@ -226,8 +225,8 @@
 			mr = &mg->rates[i];
 			mr->retry_updated = false;
 			index = MCS_GROUP_RATES * group + i;
-			minstrel_calc_rate_ewma(mp, mr);
-			minstrel_ht_calc_tp(mp, mi, group, i);
+			minstrel_calc_rate_ewma(mr);
+			minstrel_ht_calc_tp(mi, group, i);
 
 			if (!mr->cur_tp)
 				continue;
@@ -300,10 +299,10 @@
 static bool
 minstrel_ht_txstat_valid(struct ieee80211_tx_rate *rate)
 {
-	if (!rate->count)
+	if (rate->idx < 0)
 		return false;
 
-	if (rate->idx < 0)
+	if (!rate->count)
 		return false;
 
 	return !!(rate->flags & IEEE80211_TX_RC_MCS);
@@ -357,7 +356,7 @@
 }
 
 static void
-minstrel_aggr_check(struct minstrel_priv *mp, struct ieee80211_sta *pubsta, struct sk_buff *skb)
+minstrel_aggr_check(struct ieee80211_sta *pubsta, struct sk_buff *skb)
 {
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
 	struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
@@ -455,7 +454,7 @@
 	if (time_after(jiffies, mi->stats_update + (mp->update_interval / 2 * HZ) / 1000)) {
 		minstrel_ht_update_stats(mp, mi);
 		if (!(info->flags & IEEE80211_TX_CTL_AMPDU))
-			minstrel_aggr_check(mp, sta, skb);
+			minstrel_aggr_check(sta, skb);
 	}
 }
 
@@ -515,7 +514,6 @@
 static void
 minstrel_ht_set_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
                      struct ieee80211_tx_rate *rate, int index,
-                     struct ieee80211_tx_rate_control *txrc,
                      bool sample, bool rtscts)
 {
 	const struct mcs_group *group = &minstrel_mcs_groups[index / MCS_GROUP_RATES];
@@ -628,11 +626,11 @@
 	if (sample_idx >= 0) {
 		sample = true;
 		minstrel_ht_set_rate(mp, mi, &ar[0], sample_idx,
-			txrc, true, false);
+			true, false);
 		info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
 	} else {
 		minstrel_ht_set_rate(mp, mi, &ar[0], mi->max_tp_rate,
-			txrc, false, false);
+			false, false);
 	}
 
 	if (mp->hw->max_rates >= 3) {
@@ -643,13 +641,13 @@
 		 */
 		if (sample_idx >= 0)
 			minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_tp_rate,
-				txrc, false, false);
+				false, false);
 		else
 			minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_tp_rate2,
-				txrc, false, true);
+				false, true);
 
 		minstrel_ht_set_rate(mp, mi, &ar[2], mi->max_prob_rate,
-				     txrc, false, !sample);
+				     false, !sample);
 
 		ar[3].count = 0;
 		ar[3].idx = -1;
@@ -660,7 +658,7 @@
 		 * max_tp_rate -> max_prob_rate by default.
 		 */
 		minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_prob_rate,
-				     txrc, false, !sample);
+				     false, !sample);
 
 		ar[2].count = 0;
 		ar[2].idx = -1;
diff --git a/net/mac80211/rc80211_pid_algo.c b/net/mac80211/rc80211_pid_algo.c
index aeda654..502d3ec 100644
--- a/net/mac80211/rc80211_pid_algo.c
+++ b/net/mac80211/rc80211_pid_algo.c
@@ -318,7 +318,7 @@
 			rinfo[i].diff = i * pinfo->norm_offset;
 	}
 	for (i = 1; i < sband->n_bitrates; i++) {
-		s = 0;
+		s = false;
 		for (j = 0; j < sband->n_bitrates - i; j++)
 			if (unlikely(sband->bitrates[rinfo[j].index].bitrate >
 				     sband->bitrates[rinfo[j + 1].index].bitrate)) {
@@ -327,7 +327,7 @@
 				rinfo[j + 1].index = tmp;
 				rinfo[rinfo[j].index].rev_index = j;
 				rinfo[rinfo[j + 1].index].rev_index = j + 1;
-				s = 1;
+				s = true;
 			}
 		if (!s)
 			break;
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index fb123e2..f407427 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -28,6 +28,7 @@
 #include "wpa.h"
 #include "tkip.h"
 #include "wme.h"
+#include "rate.h"
 
 /*
  * monitor mode reception
@@ -748,10 +749,11 @@
 	struct ieee80211_local *local = rx->local;
 	struct ieee80211_hw *hw = &local->hw;
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
 	struct sta_info *sta = rx->sta;
 	struct tid_ampdu_rx *tid_agg_rx;
 	u16 sc;
-	int tid;
+	u8 tid, ack_policy;
 
 	if (!ieee80211_is_data_qos(hdr->frame_control))
 		goto dont_reorder;
@@ -764,6 +766,8 @@
 	if (!sta)
 		goto dont_reorder;
 
+	ack_policy = *ieee80211_get_qos_ctl(hdr) &
+		     IEEE80211_QOS_CTL_ACK_POLICY_MASK;
 	tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
 
 	tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
@@ -774,6 +778,15 @@
 	if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)))
 		goto dont_reorder;
 
+	/* not part of a BA session */
+	if (ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK &&
+	    ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL)
+		goto dont_reorder;
+
+	/* not actually part of this BA session */
+	if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
+		goto dont_reorder;
+
 	/* new, potentially un-ordered, ampdu frame - process it */
 
 	/* reset session timer */
@@ -858,6 +871,13 @@
 			    rx->sdata->control_port_protocol)
 				return RX_CONTINUE;
 		}
+
+		if (rx->sdata->vif.type == NL80211_IFTYPE_AP &&
+		    cfg80211_rx_spurious_frame(rx->sdata->dev,
+					       hdr->addr2,
+					       GFP_ATOMIC))
+			return RX_DROP_UNUSABLE;
+
 		return RX_DROP_MONITOR;
 	}
 
@@ -1327,15 +1347,20 @@
 
 		/*
 		 * If we receive a 4-addr nullfunc frame from a STA
-		 * that was not moved to a 4-addr STA vlan yet, drop
-		 * the frame to the monitor interface, to make sure
-		 * that hostapd sees it
+		 * that was not moved to a 4-addr STA vlan yet send
+		 * the event to userspace and for older hostapd drop
+		 * the frame to the monitor interface.
 		 */
 		if (ieee80211_has_a4(hdr->frame_control) &&
 		    (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
 		     (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
-		      !rx->sdata->u.vlan.sta)))
+		      !rx->sdata->u.vlan.sta))) {
+			if (!test_and_set_sta_flag(sta, WLAN_STA_4ADDR_EVENT))
+				cfg80211_rx_unexpected_4addr_frame(
+					rx->sdata->dev, sta->sta.addr,
+					GFP_ATOMIC);
 			return RX_DROP_MONITOR;
+		}
 		/*
 		 * Update counter and free packet here to avoid
 		 * counting this as a dropped packed.
@@ -1551,25 +1576,6 @@
 	return RX_CONTINUE;
 }
 
-static ieee80211_rx_result debug_noinline
-ieee80211_rx_h_remove_qos_control(struct ieee80211_rx_data *rx)
-{
-	u8 *data = rx->skb->data;
-	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)data;
-
-	if (!ieee80211_is_data_qos(hdr->frame_control))
-		return RX_CONTINUE;
-
-	/* remove the qos control field, update frame type and meta-data */
-	memmove(data + IEEE80211_QOS_CTL_LEN, data,
-		ieee80211_hdrlen(hdr->frame_control) - IEEE80211_QOS_CTL_LEN);
-	hdr = (struct ieee80211_hdr *)skb_pull(rx->skb, IEEE80211_QOS_CTL_LEN);
-	/* change frame type to non QOS */
-	hdr->frame_control &= ~cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
-
-	return RX_CONTINUE;
-}
-
 static int
 ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
 {
@@ -1802,7 +1808,12 @@
 	}
 
 	if (xmit_skb) {
-		/* send to wireless media */
+		/*
+		 * Send to wireless media and increase priority by 256 to
+		 * keep the received priority instead of reclassifying
+		 * the frame (see cfg80211_classify8021d).
+		 */
+		xmit_skb->priority += 256;
 		xmit_skb->protocol = htons(ETH_P_802_3);
 		skb_reset_network_header(xmit_skb);
 		skb_reset_mac_header(xmit_skb);
@@ -1871,13 +1882,16 @@
 static ieee80211_rx_result
 ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
 {
-	struct ieee80211_hdr *hdr;
+	struct ieee80211_hdr *fwd_hdr, *hdr;
+	struct ieee80211_tx_info *info;
 	struct ieee80211s_hdr *mesh_hdr;
-	unsigned int hdrlen;
 	struct sk_buff *skb = rx->skb, *fwd_skb;
 	struct ieee80211_local *local = rx->local;
 	struct ieee80211_sub_if_data *sdata = rx->sdata;
 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+	__le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_NOFORWARD);
+	u16 q, hdrlen;
 
 	hdr = (struct ieee80211_hdr *) skb->data;
 	hdrlen = ieee80211_hdrlen(hdr->frame_control);
@@ -1893,15 +1907,8 @@
 		return RX_CONTINUE;
 
 	if (!mesh_hdr->ttl)
-		/* illegal frame */
 		return RX_DROP_MONITOR;
 
-	if (ieee80211_queue_stopped(&local->hw, skb_get_queue_mapping(skb))) {
-		IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
-						dropped_frames_congestion);
-		return RX_DROP_MONITOR;
-	}
-
 	if (mesh_hdr->flags & MESH_FLAGS_AE) {
 		struct mesh_path *mppath;
 		char *proxied_addr;
@@ -1933,60 +1940,50 @@
 	    compare_ether_addr(sdata->vif.addr, hdr->addr3) == 0)
 		return RX_CONTINUE;
 
-	mesh_hdr->ttl--;
+	q = ieee80211_select_queue_80211(local, skb, hdr);
+	if (ieee80211_queue_stopped(&local->hw, q)) {
+		IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_congestion);
+		return RX_DROP_MONITOR;
+	}
+	skb_set_queue_mapping(skb, q);
 
-	if (status->rx_flags & IEEE80211_RX_RA_MATCH) {
-		if (!mesh_hdr->ttl)
-			IEEE80211_IFSTA_MESH_CTR_INC(&rx->sdata->u.mesh,
-						     dropped_frames_ttl);
-		else {
-			struct ieee80211_hdr *fwd_hdr;
-			struct ieee80211_tx_info *info;
+	if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
+		goto out;
 
-			fwd_skb = skb_copy(skb, GFP_ATOMIC);
-
-			if (!fwd_skb && net_ratelimit())
-				printk(KERN_DEBUG "%s: failed to clone mesh frame\n",
-						   sdata->name);
-			if (!fwd_skb)
-				goto out;
-
-			fwd_hdr =  (struct ieee80211_hdr *) fwd_skb->data;
-			memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN);
-			info = IEEE80211_SKB_CB(fwd_skb);
-			memset(info, 0, sizeof(*info));
-			info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
-			info->control.vif = &rx->sdata->vif;
-			if (is_multicast_ether_addr(fwd_hdr->addr1)) {
-				IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
-								fwded_mcast);
-				skb_set_queue_mapping(fwd_skb,
-					ieee80211_select_queue(sdata, fwd_skb));
-				ieee80211_set_qos_hdr(sdata, fwd_skb);
-			} else {
-				int err;
-				/*
-				 * Save TA to addr1 to send TA a path error if a
-				 * suitable next hop is not found
-				 */
-				memcpy(fwd_hdr->addr1, fwd_hdr->addr2,
-						ETH_ALEN);
-				err = mesh_nexthop_lookup(fwd_skb, sdata);
-				/* Failed to immediately resolve next hop:
-				 * fwded frame was dropped or will be added
-				 * later to the pending skb queue.  */
-				if (err)
-					return RX_DROP_MONITOR;
-
-				IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
-								fwded_unicast);
-			}
-			IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
-						     fwded_frames);
-			ieee80211_add_pending_skb(local, fwd_skb);
-		}
+	if (!--mesh_hdr->ttl) {
+		IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_ttl);
+		return RX_DROP_MONITOR;
 	}
 
+	fwd_skb = skb_copy(skb, GFP_ATOMIC);
+	if (!fwd_skb) {
+		if (net_ratelimit())
+			printk(KERN_DEBUG "%s: failed to clone mesh frame\n",
+					sdata->name);
+		goto out;
+	}
+
+	fwd_hdr =  (struct ieee80211_hdr *) fwd_skb->data;
+	info = IEEE80211_SKB_CB(fwd_skb);
+	memset(info, 0, sizeof(*info));
+	info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
+	info->control.vif = &rx->sdata->vif;
+	info->control.jiffies = jiffies;
+	if (is_multicast_ether_addr(fwd_hdr->addr1)) {
+		IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_mcast);
+		memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN);
+	} else if (!mesh_nexthop_lookup(fwd_skb, sdata)) {
+		IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_unicast);
+	} else {
+		/* unable to resolve next hop */
+		mesh_path_error_tx(ifmsh->mshcfg.element_ttl, fwd_hdr->addr3,
+				    0, reason, fwd_hdr->addr2, sdata);
+		IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_no_route);
+		return RX_DROP_MONITOR;
+	}
+
+	IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_frames);
+	ieee80211_add_pending_skb(local, fwd_skb);
  out:
 	if (is_multicast_ether_addr(hdr->addr1) ||
 	    sdata->dev->flags & IFF_PROMISC)
@@ -2014,12 +2011,17 @@
 		return RX_DROP_MONITOR;
 
 	/*
-	 * Allow the cooked monitor interface of an AP to see 4-addr frames so
-	 * that a 4-addr station can be detected and moved into a separate VLAN
+	 * Send unexpected-4addr-frame event to hostapd. For older versions,
+	 * also drop the frame to cooked monitor interfaces.
 	 */
 	if (ieee80211_has_a4(hdr->frame_control) &&
-	    sdata->vif.type == NL80211_IFTYPE_AP)
+	    sdata->vif.type == NL80211_IFTYPE_AP) {
+		if (rx->sta &&
+		    !test_and_set_sta_flag(rx->sta, WLAN_STA_4ADDR_EVENT))
+			cfg80211_rx_unexpected_4addr_frame(
+				rx->sdata->dev, rx->sta->sta.addr, GFP_ATOMIC);
 		return RX_DROP_MONITOR;
+	}
 
 	err = __ieee80211_data_to_8023(rx, &port_control);
 	if (unlikely(err))
@@ -2174,6 +2176,18 @@
 	if (!ieee80211_is_mgmt(mgmt->frame_control))
 		return RX_DROP_MONITOR;
 
+	if (rx->sdata->vif.type == NL80211_IFTYPE_AP &&
+	    ieee80211_is_beacon(mgmt->frame_control) &&
+	    !(rx->flags & IEEE80211_RX_BEACON_REPORTED)) {
+		struct ieee80211_rx_status *status;
+
+		status = IEEE80211_SKB_RXCB(rx->skb);
+		cfg80211_report_obss_beacon(rx->local->hw.wiphy,
+					    rx->skb->data, rx->skb->len,
+					    status->freq, GFP_ATOMIC);
+		rx->flags |= IEEE80211_RX_BEACON_REPORTED;
+	}
+
 	if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
 		return RX_DROP_MONITOR;
 
@@ -2206,16 +2220,69 @@
 		return RX_DROP_UNUSABLE;
 
 	switch (mgmt->u.action.category) {
-	case WLAN_CATEGORY_BACK:
-		/*
-		 * The aggregation code is not prepared to handle
-		 * anything but STA/AP due to the BSSID handling;
-		 * IBSS could work in the code but isn't supported
-		 * by drivers or the standard.
-		 */
+	case WLAN_CATEGORY_HT:
+		/* reject HT action frames from stations not supporting HT */
+		if (!rx->sta->sta.ht_cap.ht_supported)
+			goto invalid;
+
 		if (sdata->vif.type != NL80211_IFTYPE_STATION &&
+		    sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
 		    sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
-		    sdata->vif.type != NL80211_IFTYPE_AP)
+		    sdata->vif.type != NL80211_IFTYPE_AP &&
+		    sdata->vif.type != NL80211_IFTYPE_ADHOC)
+			break;
+
+		/* verify action & smps_control are present */
+		if (len < IEEE80211_MIN_ACTION_SIZE + 2)
+			goto invalid;
+
+		switch (mgmt->u.action.u.ht_smps.action) {
+		case WLAN_HT_ACTION_SMPS: {
+			struct ieee80211_supported_band *sband;
+			u8 smps;
+
+			/* convert to HT capability */
+			switch (mgmt->u.action.u.ht_smps.smps_control) {
+			case WLAN_HT_SMPS_CONTROL_DISABLED:
+				smps = WLAN_HT_CAP_SM_PS_DISABLED;
+				break;
+			case WLAN_HT_SMPS_CONTROL_STATIC:
+				smps = WLAN_HT_CAP_SM_PS_STATIC;
+				break;
+			case WLAN_HT_SMPS_CONTROL_DYNAMIC:
+				smps = WLAN_HT_CAP_SM_PS_DYNAMIC;
+				break;
+			default:
+				goto invalid;
+			}
+			smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT;
+
+			/* if no change do nothing */
+			if ((rx->sta->sta.ht_cap.cap &
+					IEEE80211_HT_CAP_SM_PS) == smps)
+				goto handled;
+
+			rx->sta->sta.ht_cap.cap &= ~IEEE80211_HT_CAP_SM_PS;
+			rx->sta->sta.ht_cap.cap |= smps;
+
+			sband = rx->local->hw.wiphy->bands[status->band];
+
+			rate_control_rate_update(local, sband, rx->sta,
+						 IEEE80211_RC_SMPS_CHANGED,
+						 local->_oper_channel_type);
+			goto handled;
+		}
+		default:
+			goto invalid;
+		}
+
+		break;
+	case WLAN_CATEGORY_BACK:
+		if (sdata->vif.type != NL80211_IFTYPE_STATION &&
+		    sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
+		    sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
+		    sdata->vif.type != NL80211_IFTYPE_AP &&
+		    sdata->vif.type != NL80211_IFTYPE_ADHOC)
 			break;
 
 		/* verify action_code is present */
@@ -2493,6 +2560,10 @@
 		goto out_free_skb;
 	rx->flags |= IEEE80211_RX_CMNTR;
 
+	/* If there are no cooked monitor interfaces, just free the SKB */
+	if (!local->cooked_mntrs)
+		goto out_free_skb;
+
 	if (skb_headroom(skb) < sizeof(*rthdr) &&
 	    pskb_expand_head(skb, sizeof(*rthdr), 0, GFP_ATOMIC))
 		goto out_free_skb;
@@ -2628,7 +2699,6 @@
 		if (ieee80211_vif_is_mesh(&rx->sdata->vif))
 			CALL_RXH(ieee80211_rx_h_mesh_fwding);
 #endif
-		CALL_RXH(ieee80211_rx_h_remove_qos_control)
 		CALL_RXH(ieee80211_rx_h_amsdu)
 		CALL_RXH(ieee80211_rx_h_data)
 		CALL_RXH(ieee80211_rx_h_ctrl);
@@ -2748,8 +2818,8 @@
 				rate_idx = 0; /* TODO: HT rates */
 			else
 				rate_idx = status->rate_idx;
-			rx->sta = ieee80211_ibss_add_sta(sdata, bssid,
-					hdr->addr2, BIT(rate_idx), GFP_ATOMIC);
+			ieee80211_ibss_rx_no_sta(sdata, bssid, hdr->addr2,
+						 BIT(rate_idx));
 		}
 		break;
 	case NL80211_IFTYPE_MESH_POINT:
@@ -2770,10 +2840,17 @@
 				return 0;
 		} else if (!ieee80211_bssid_match(bssid,
 					sdata->vif.addr)) {
+			/*
+			 * Accept public action frames even when the
+			 * BSSID doesn't match, this is used for P2P
+			 * and location updates. Note that mac80211
+			 * itself never looks at these frames.
+			 */
 			if (!(status->rx_flags & IEEE80211_RX_IN_SCAN) &&
-			    !ieee80211_is_beacon(hdr->frame_control) &&
-			    !(ieee80211_is_action(hdr->frame_control) &&
-			      sdata->vif.p2p))
+			    ieee80211_is_public_action(hdr, skb->len))
+				return 1;
+			if (!(status->rx_flags & IEEE80211_RX_IN_SCAN) &&
+			    !ieee80211_is_beacon(hdr->frame_control))
 				return 0;
 			status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
 		}
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 105436d..9270771 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -106,7 +106,7 @@
 	/* save the ERP value so that it is available at association time */
 	if (elems->erp_info && elems->erp_info_len >= 1) {
 		bss->erp_value = elems->erp_info[0];
-		bss->has_erp_value = 1;
+		bss->has_erp_value = true;
 	}
 
 	if (elems->tim) {
@@ -213,12 +213,7 @@
 	if (bss)
 		ieee80211_rx_bss_put(sdata->local, bss);
 
-	/* If we are on-operating-channel, and this packet is for the
-	 * current channel, pass the pkt on up the stack so that
-	 * the rest of the stack can make use of it.
-	 */
-	if (ieee80211_cfg_on_oper_channel(sdata->local)
-	    && (channel == sdata->local->oper_channel))
+	if (channel == sdata->local->oper_channel)
 		return RX_CONTINUE;
 
 	dev_kfree_skb(skb);
@@ -264,8 +259,6 @@
 				       bool was_hw_scan)
 {
 	struct ieee80211_local *local = hw_to_local(hw);
-	bool on_oper_chan;
-	bool enable_beacons = false;
 
 	lockdep_assert_held(&local->mtx);
 
@@ -298,25 +291,13 @@
 	local->scanning = 0;
 	local->scan_channel = NULL;
 
-	on_oper_chan = ieee80211_cfg_on_oper_channel(local);
-
-	if (was_hw_scan || !on_oper_chan)
-		ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
-	else
-		/* Set power back to normal operating levels. */
-		ieee80211_hw_config(local, 0);
+	/* Set power back to normal operating levels. */
+	ieee80211_hw_config(local, 0);
 
 	if (!was_hw_scan) {
-		bool on_oper_chan2;
 		ieee80211_configure_filter(local);
 		drv_sw_scan_complete(local);
-		on_oper_chan2 = ieee80211_cfg_on_oper_channel(local);
-		/* We should always be on-channel at this point. */
-		WARN_ON(!on_oper_chan2);
-		if (on_oper_chan2 && (on_oper_chan != on_oper_chan2))
-			enable_beacons = true;
-
-		ieee80211_offchannel_return(local, enable_beacons, true);
+		ieee80211_offchannel_return(local, true);
 	}
 
 	ieee80211_recalc_idle(local);
@@ -361,11 +342,7 @@
 	local->next_scan_state = SCAN_DECISION;
 	local->scan_channel_idx = 0;
 
-	/* We always want to use off-channel PS, even if we
-	 * are not really leaving oper-channel.  Don't
-	 * tell the AP though, as long as we are on-channel.
-	 */
-	ieee80211_offchannel_enable_all_ps(local, false);
+	ieee80211_offchannel_stop_vifs(local, true);
 
 	ieee80211_configure_filter(local);
 
@@ -373,8 +350,7 @@
 	ieee80211_hw_config(local, 0);
 
 	ieee80211_queue_delayed_work(&local->hw,
-				     &local->scan_work,
-				     IEEE80211_CHANNEL_TIME);
+				     &local->scan_work, 0);
 
 	return 0;
 }
@@ -510,98 +486,41 @@
 
 	next_chan = local->scan_req->channels[local->scan_channel_idx];
 
-	if (ieee80211_cfg_on_oper_channel(local)) {
-		/* We're currently on operating channel. */
-		if (next_chan == local->oper_channel)
-			/* We don't need to move off of operating channel. */
-			local->next_scan_state = SCAN_SET_CHANNEL;
-		else
-			/*
-			 * We do need to leave operating channel, as next
-			 * scan is somewhere else.
-			 */
-			local->next_scan_state = SCAN_LEAVE_OPER_CHANNEL;
-	} else {
-		/*
-		 * we're currently scanning a different channel, let's
-		 * see if we can scan another channel without interfering
-		 * with the current traffic situation.
-		 *
-		 * Since we don't know if the AP has pending frames for us
-		 * we can only check for our tx queues and use the current
-		 * pm_qos requirements for rx. Hence, if no tx traffic occurs
-		 * at all we will scan as many channels in a row as the pm_qos
-		 * latency allows us to. Additionally we also check for the
-		 * currently negotiated listen interval to prevent losing
-		 * frames unnecessarily.
-		 *
-		 * Otherwise switch back to the operating channel.
-		 */
+	/*
+	 * we're currently scanning a different channel, let's
+	 * see if we can scan another channel without interfering
+	 * with the current traffic situation.
+	 *
+	 * Since we don't know if the AP has pending frames for us
+	 * we can only check for our tx queues and use the current
+	 * pm_qos requirements for rx. Hence, if no tx traffic occurs
+	 * at all we will scan as many channels in a row as the pm_qos
+	 * latency allows us to. Additionally we also check for the
+	 * currently negotiated listen interval to prevent losing
+	 * frames unnecessarily.
+	 *
+	 * Otherwise switch back to the operating channel.
+	 */
 
-		bad_latency = time_after(jiffies +
-				ieee80211_scan_get_channel_time(next_chan),
-				local->leave_oper_channel_time +
-				usecs_to_jiffies(pm_qos_request(PM_QOS_NETWORK_LATENCY)));
+	bad_latency = time_after(jiffies +
+			ieee80211_scan_get_channel_time(next_chan),
+			local->leave_oper_channel_time +
+			usecs_to_jiffies(pm_qos_request(PM_QOS_NETWORK_LATENCY)));
 
-		listen_int_exceeded = time_after(jiffies +
-				ieee80211_scan_get_channel_time(next_chan),
-				local->leave_oper_channel_time +
-				usecs_to_jiffies(min_beacon_int * 1024) *
-				local->hw.conf.listen_interval);
+	listen_int_exceeded = time_after(jiffies +
+			ieee80211_scan_get_channel_time(next_chan),
+			local->leave_oper_channel_time +
+			usecs_to_jiffies(min_beacon_int * 1024) *
+			local->hw.conf.listen_interval);
 
-		if (associated && ( !tx_empty || bad_latency ||
-		    listen_int_exceeded))
-			local->next_scan_state = SCAN_ENTER_OPER_CHANNEL;
-		else
-			local->next_scan_state = SCAN_SET_CHANNEL;
-	}
+	if (associated && (!tx_empty || bad_latency || listen_int_exceeded))
+		local->next_scan_state = SCAN_SUSPEND;
+	else
+		local->next_scan_state = SCAN_SET_CHANNEL;
 
 	*next_delay = 0;
 }
 
-static void ieee80211_scan_state_leave_oper_channel(struct ieee80211_local *local,
-						    unsigned long *next_delay)
-{
-	/* PS will already be in off-channel mode,
-	 * we do that once at the beginning of scanning.
-	 */
-	ieee80211_offchannel_stop_vifs(local, false);
-
-	/*
-	 * What if the nullfunc frames didn't arrive?
-	 */
-	drv_flush(local, false);
-	if (local->ops->flush)
-		*next_delay = 0;
-	else
-		*next_delay = HZ / 10;
-
-	/* remember when we left the operating channel */
-	local->leave_oper_channel_time = jiffies;
-
-	/* advance to the next channel to be scanned */
-	local->next_scan_state = SCAN_SET_CHANNEL;
-}
-
-static void ieee80211_scan_state_enter_oper_channel(struct ieee80211_local *local,
-						    unsigned long *next_delay)
-{
-	/* switch back to the operating channel */
-	local->scan_channel = NULL;
-	if (!ieee80211_cfg_on_oper_channel(local))
-		ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
-
-	/*
-	 * Re-enable vifs and beaconing.  Leave PS
-	 * in off-channel state..will put that back
-	 * on-channel at the end of scanning.
-	 */
-	ieee80211_offchannel_return(local, true, false);
-
-	*next_delay = HZ / 5;
-	local->next_scan_state = SCAN_DECISION;
-}
-
 static void ieee80211_scan_state_set_channel(struct ieee80211_local *local,
 					     unsigned long *next_delay)
 {
@@ -613,10 +532,8 @@
 
 	local->scan_channel = chan;
 
-	/* Only call hw-config if we really need to change channels. */
-	if (chan != local->hw.conf.channel)
-		if (ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL))
-			skip = 1;
+	if (ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL))
+		skip = 1;
 
 	/* advance state machine to next channel/band */
 	local->scan_channel_idx++;
@@ -673,6 +590,44 @@
 	local->next_scan_state = SCAN_DECISION;
 }
 
+static void ieee80211_scan_state_suspend(struct ieee80211_local *local,
+					 unsigned long *next_delay)
+{
+	/* switch back to the operating channel */
+	local->scan_channel = NULL;
+	ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
+
+	/*
+	 * Re-enable vifs and beaconing.  Leave PS
+	 * in off-channel state..will put that back
+	 * on-channel at the end of scanning.
+	 */
+	ieee80211_offchannel_return(local, false);
+
+	*next_delay = HZ / 5;
+	/* afterwards, resume scan & go to next channel */
+	local->next_scan_state = SCAN_RESUME;
+}
+
+static void ieee80211_scan_state_resume(struct ieee80211_local *local,
+					unsigned long *next_delay)
+{
+	/* PS already is in off-channel mode */
+	ieee80211_offchannel_stop_vifs(local, false);
+
+	if (local->ops->flush) {
+		drv_flush(local, false);
+		*next_delay = 0;
+	} else
+		*next_delay = HZ / 10;
+
+	/* remember when we left the operating channel */
+	local->leave_oper_channel_time = jiffies;
+
+	/* advance to the next channel to be scanned */
+	local->next_scan_state = SCAN_SET_CHANNEL;
+}
+
 void ieee80211_scan_work(struct work_struct *work)
 {
 	struct ieee80211_local *local =
@@ -743,11 +698,11 @@
 		case SCAN_SEND_PROBE:
 			ieee80211_scan_state_send_probe(local, &next_delay);
 			break;
-		case SCAN_LEAVE_OPER_CHANNEL:
-			ieee80211_scan_state_leave_oper_channel(local, &next_delay);
+		case SCAN_SUSPEND:
+			ieee80211_scan_state_suspend(local, &next_delay);
 			break;
-		case SCAN_ENTER_OPER_CHANNEL:
-			ieee80211_scan_state_enter_oper_channel(local, &next_delay);
+		case SCAN_RESUME:
+			ieee80211_scan_state_resume(local, &next_delay);
 			break;
 		}
 	} while (next_delay == 0);
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 8eaa746..b197136 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -62,14 +62,14 @@
  * freed before they are done using it.
  */
 
-/* Caller must hold local->sta_lock */
+/* Caller must hold local->sta_mtx */
 static int sta_info_hash_del(struct ieee80211_local *local,
 			     struct sta_info *sta)
 {
 	struct sta_info *s;
 
 	s = rcu_dereference_protected(local->sta_hash[STA_HASH(sta->sta.addr)],
-				      lockdep_is_held(&local->sta_lock));
+				      lockdep_is_held(&local->sta_mtx));
 	if (!s)
 		return -ENOENT;
 	if (s == sta) {
@@ -81,7 +81,7 @@
 	while (rcu_access_pointer(s->hnext) &&
 	       rcu_access_pointer(s->hnext) != sta)
 		s = rcu_dereference_protected(s->hnext,
-					lockdep_is_held(&local->sta_lock));
+					lockdep_is_held(&local->sta_mtx));
 	if (rcu_access_pointer(s->hnext)) {
 		RCU_INIT_POINTER(s->hnext, sta->hnext);
 		return 0;
@@ -98,14 +98,12 @@
 	struct sta_info *sta;
 
 	sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)],
-				    lockdep_is_held(&local->sta_lock) ||
 				    lockdep_is_held(&local->sta_mtx));
 	while (sta) {
 		if (sta->sdata == sdata && !sta->dummy &&
 		    memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
 			break;
 		sta = rcu_dereference_check(sta->hnext,
-					    lockdep_is_held(&local->sta_lock) ||
 					    lockdep_is_held(&local->sta_mtx));
 	}
 	return sta;
@@ -119,14 +117,12 @@
 	struct sta_info *sta;
 
 	sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)],
-				    lockdep_is_held(&local->sta_lock) ||
 				    lockdep_is_held(&local->sta_mtx));
 	while (sta) {
 		if (sta->sdata == sdata &&
 		    memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
 			break;
 		sta = rcu_dereference_check(sta->hnext,
-					    lockdep_is_held(&local->sta_lock) ||
 					    lockdep_is_held(&local->sta_mtx));
 	}
 	return sta;
@@ -143,7 +139,6 @@
 	struct sta_info *sta;
 
 	sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)],
-				    lockdep_is_held(&local->sta_lock) ||
 				    lockdep_is_held(&local->sta_mtx));
 	while (sta) {
 		if ((sta->sdata == sdata ||
@@ -152,7 +147,6 @@
 		    memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
 			break;
 		sta = rcu_dereference_check(sta->hnext,
-					    lockdep_is_held(&local->sta_lock) ||
 					    lockdep_is_held(&local->sta_mtx));
 	}
 	return sta;
@@ -169,7 +163,6 @@
 	struct sta_info *sta;
 
 	sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)],
-				    lockdep_is_held(&local->sta_lock) ||
 				    lockdep_is_held(&local->sta_mtx));
 	while (sta) {
 		if ((sta->sdata == sdata ||
@@ -177,7 +170,6 @@
 		    memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
 			break;
 		sta = rcu_dereference_check(sta->hnext,
-					    lockdep_is_held(&local->sta_lock) ||
 					    lockdep_is_held(&local->sta_mtx));
 	}
 	return sta;
@@ -204,16 +196,17 @@
 }
 
 /**
- * __sta_info_free - internal STA free helper
+ * sta_info_free - free STA
  *
  * @local: pointer to the global information
  * @sta: STA info to free
  *
  * This function must undo everything done by sta_info_alloc()
- * that may happen before sta_info_insert().
+ * that may happen before sta_info_insert(). It may only be
+ * called when sta_info_insert() has not been attempted (and
+ * if that fails, the station is freed anyway.)
  */
-static void __sta_info_free(struct ieee80211_local *local,
-			    struct sta_info *sta)
+void sta_info_free(struct ieee80211_local *local, struct sta_info *sta)
 {
 	if (sta->rate_ctrl) {
 		rate_control_free_sta(sta);
@@ -227,10 +220,11 @@
 	kfree(sta);
 }
 
-/* Caller must hold local->sta_lock */
+/* Caller must hold local->sta_mtx */
 static void sta_info_hash_add(struct ieee80211_local *local,
 			      struct sta_info *sta)
 {
+	lockdep_assert_held(&local->sta_mtx);
 	sta->hnext = local->sta_hash[STA_HASH(sta->sta.addr)];
 	RCU_INIT_POINTER(local->sta_hash[STA_HASH(sta->sta.addr)], sta);
 }
@@ -280,7 +274,7 @@
 }
 
 struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
-				u8 *addr, gfp_t gfp)
+				const u8 *addr, gfp_t gfp)
 {
 	struct ieee80211_local *local = sdata->local;
 	struct sta_info *sta;
@@ -338,102 +332,6 @@
 	return sta;
 }
 
-static int sta_info_finish_insert(struct sta_info *sta,
-				bool async, bool dummy_reinsert)
-{
-	struct ieee80211_local *local = sta->local;
-	struct ieee80211_sub_if_data *sdata = sta->sdata;
-	struct station_info sinfo;
-	unsigned long flags;
-	int err = 0;
-
-	lockdep_assert_held(&local->sta_mtx);
-
-	if (!sta->dummy || dummy_reinsert) {
-		/* notify driver */
-		if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
-			sdata = container_of(sdata->bss,
-					     struct ieee80211_sub_if_data,
-					     u.ap);
-		err = drv_sta_add(local, sdata, &sta->sta);
-		if (err) {
-			if (!async)
-				return err;
-			printk(KERN_DEBUG "%s: failed to add IBSS STA %pM to "
-					  "driver (%d) - keeping it anyway.\n",
-			       sdata->name, sta->sta.addr, err);
-		} else {
-			sta->uploaded = true;
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
-			if (async)
-				wiphy_debug(local->hw.wiphy,
-					    "Finished adding IBSS STA %pM\n",
-					    sta->sta.addr);
-#endif
-		}
-
-		sdata = sta->sdata;
-	}
-
-	if (!dummy_reinsert) {
-		if (!async) {
-			local->num_sta++;
-			local->sta_generation++;
-			smp_mb();
-
-			/* make the station visible */
-			spin_lock_irqsave(&local->sta_lock, flags);
-			sta_info_hash_add(local, sta);
-			spin_unlock_irqrestore(&local->sta_lock, flags);
-		}
-
-		list_add(&sta->list, &local->sta_list);
-	} else {
-		sta->dummy = false;
-	}
-
-	if (!sta->dummy) {
-		ieee80211_sta_debugfs_add(sta);
-		rate_control_add_sta_debugfs(sta);
-
-		memset(&sinfo, 0, sizeof(sinfo));
-		sinfo.filled = 0;
-		sinfo.generation = local->sta_generation;
-		cfg80211_new_sta(sdata->dev, sta->sta.addr, &sinfo, GFP_KERNEL);
-	}
-
-	return 0;
-}
-
-static void sta_info_finish_pending(struct ieee80211_local *local)
-{
-	struct sta_info *sta;
-	unsigned long flags;
-
-	spin_lock_irqsave(&local->sta_lock, flags);
-	while (!list_empty(&local->sta_pending_list)) {
-		sta = list_first_entry(&local->sta_pending_list,
-				       struct sta_info, list);
-		list_del(&sta->list);
-		spin_unlock_irqrestore(&local->sta_lock, flags);
-
-		sta_info_finish_insert(sta, true, false);
-
-		spin_lock_irqsave(&local->sta_lock, flags);
-	}
-	spin_unlock_irqrestore(&local->sta_lock, flags);
-}
-
-static void sta_info_finish_work(struct work_struct *work)
-{
-	struct ieee80211_local *local =
-		container_of(work, struct ieee80211_local, sta_finish_work);
-
-	mutex_lock(&local->sta_mtx);
-	sta_info_finish_pending(local);
-	mutex_unlock(&local->sta_mtx);
-}
-
 static int sta_info_insert_check(struct sta_info *sta)
 {
 	struct ieee80211_sub_if_data *sdata = sta->sdata;
@@ -453,50 +351,15 @@
 	return 0;
 }
 
-static int sta_info_insert_ibss(struct sta_info *sta) __acquires(RCU)
-{
-	struct ieee80211_local *local = sta->local;
-	struct ieee80211_sub_if_data *sdata = sta->sdata;
-	unsigned long flags;
-
-	spin_lock_irqsave(&local->sta_lock, flags);
-	/* check if STA exists already */
-	if (sta_info_get_bss_rx(sdata, sta->sta.addr)) {
-		spin_unlock_irqrestore(&local->sta_lock, flags);
-		rcu_read_lock();
-		return -EEXIST;
-	}
-
-	local->num_sta++;
-	local->sta_generation++;
-	smp_mb();
-	sta_info_hash_add(local, sta);
-
-	list_add_tail(&sta->list, &local->sta_pending_list);
-
-	rcu_read_lock();
-	spin_unlock_irqrestore(&local->sta_lock, flags);
-
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
-	wiphy_debug(local->hw.wiphy, "Added IBSS STA %pM\n",
-			sta->sta.addr);
-#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
-
-	ieee80211_queue_work(&local->hw, &local->sta_finish_work);
-
-	return 0;
-}
-
 /*
  * should be called with sta_mtx locked
  * this function replaces the mutex lock
  * with a RCU lock
  */
-static int sta_info_insert_non_ibss(struct sta_info *sta) __acquires(RCU)
+static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
 {
 	struct ieee80211_local *local = sta->local;
 	struct ieee80211_sub_if_data *sdata = sta->sdata;
-	unsigned long flags;
 	struct sta_info *exist_sta;
 	bool dummy_reinsert = false;
 	int err = 0;
@@ -504,19 +367,8 @@
 	lockdep_assert_held(&local->sta_mtx);
 
 	/*
-	 * On first glance, this will look racy, because the code
-	 * in this function, which inserts a station with sleeping,
-	 * unlocks the sta_lock between checking existence in the
-	 * hash table and inserting into it.
-	 *
-	 * However, it is not racy against itself because it keeps
-	 * the mutex locked.
-	 */
-
-	spin_lock_irqsave(&local->sta_lock, flags);
-	/*
 	 * check if STA exists already.
-	 * only accept a scenario of a second call to sta_info_insert_non_ibss
+	 * only accept a scenario of a second call to sta_info_insert_finish
 	 * with a dummy station entry that was inserted earlier
 	 * in that case - assume that the dummy station flag should
 	 * be removed.
@@ -526,20 +378,47 @@
 		if (exist_sta == sta && sta->dummy) {
 			dummy_reinsert = true;
 		} else {
-			spin_unlock_irqrestore(&local->sta_lock, flags);
-			mutex_unlock(&local->sta_mtx);
-			rcu_read_lock();
-			return -EEXIST;
+			err = -EEXIST;
+			goto out_err;
 		}
 	}
 
-	spin_unlock_irqrestore(&local->sta_lock, flags);
+	if (!sta->dummy || dummy_reinsert) {
+		/* notify driver */
+		err = drv_sta_add(local, sdata, &sta->sta);
+		if (err) {
+			if (sdata->vif.type != NL80211_IFTYPE_ADHOC)
+				goto out_err;
+			printk(KERN_DEBUG "%s: failed to add IBSS STA %pM to "
+					  "driver (%d) - keeping it anyway.\n",
+			       sdata->name, sta->sta.addr, err);
+		} else
+			sta->uploaded = true;
+	}
 
-	err = sta_info_finish_insert(sta, false, dummy_reinsert);
-	if (err) {
-		mutex_unlock(&local->sta_mtx);
-		rcu_read_lock();
-		return err;
+	if (!dummy_reinsert) {
+		local->num_sta++;
+		local->sta_generation++;
+		smp_mb();
+
+		/* make the station visible */
+		sta_info_hash_add(local, sta);
+
+		list_add(&sta->list, &local->sta_list);
+	} else {
+		sta->dummy = false;
+	}
+
+	if (!sta->dummy) {
+		struct station_info sinfo;
+
+		ieee80211_sta_debugfs_add(sta);
+		rate_control_add_sta_debugfs(sta);
+
+		memset(&sinfo, 0, sizeof(sinfo));
+		sinfo.filled = 0;
+		sinfo.generation = local->sta_generation;
+		cfg80211_new_sta(sdata->dev, sta->sta.addr, &sinfo, GFP_KERNEL);
 	}
 
 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
@@ -555,54 +434,35 @@
 		mesh_accept_plinks_update(sdata);
 
 	return 0;
+ out_err:
+	mutex_unlock(&local->sta_mtx);
+	rcu_read_lock();
+	return err;
 }
 
 int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU)
 {
 	struct ieee80211_local *local = sta->local;
-	struct ieee80211_sub_if_data *sdata = sta->sdata;
 	int err = 0;
 
+	might_sleep();
+
 	err = sta_info_insert_check(sta);
 	if (err) {
 		rcu_read_lock();
 		goto out_free;
 	}
 
-	/*
-	 * In ad-hoc mode, we sometimes need to insert stations
-	 * from tasklet context from the RX path. To avoid races,
-	 * always do so in that case -- see the comment below.
-	 */
-	if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
-		err = sta_info_insert_ibss(sta);
-		if (err)
-			goto out_free;
-
-		return 0;
-	}
-
-	/*
-	 * It might seem that the function called below is in race against
-	 * the function call above that atomically inserts the station... That,
-	 * however, is not true because the above code can only
-	 * be invoked for IBSS interfaces, and the below code will
-	 * not be -- and the two do not race against each other as
-	 * the hash table also keys off the interface.
-	 */
-
-	might_sleep();
-
 	mutex_lock(&local->sta_mtx);
 
-	err = sta_info_insert_non_ibss(sta);
+	err = sta_info_insert_finish(sta);
 	if (err)
 		goto out_free;
 
 	return 0;
  out_free:
 	BUG_ON(!err);
-	__sta_info_free(local, sta);
+	sta_info_free(local, sta);
 	return err;
 }
 
@@ -629,7 +489,7 @@
 
 	might_sleep();
 
-	err = sta_info_insert_non_ibss(sta);
+	err = sta_info_insert_finish(sta);
 	rcu_read_unlock();
 	return err;
 }
@@ -716,7 +576,7 @@
 	}
 
  done:
-	spin_lock_irqsave(&local->sta_lock, flags);
+	spin_lock_irqsave(&local->tim_lock, flags);
 
 	if (indicate_tim)
 		__bss_tim_set(bss, sta->sta.aid);
@@ -729,7 +589,7 @@
 		local->tim_in_locked_section = false;
 	}
 
-	spin_unlock_irqrestore(&local->sta_lock, flags);
+	spin_unlock_irqrestore(&local->tim_lock, flags);
 }
 
 static bool sta_info_buffer_expired(struct sta_info *sta, struct sk_buff *skb)
@@ -853,8 +713,8 @@
 {
 	struct ieee80211_local *local;
 	struct ieee80211_sub_if_data *sdata;
-	unsigned long flags;
 	int ret, i, ac;
+	struct tid_ampdu_tx *tid_tx;
 
 	might_sleep();
 
@@ -873,15 +733,12 @@
 	set_sta_flag(sta, WLAN_STA_BLOCK_BA);
 	ieee80211_sta_tear_down_BA_sessions(sta, true);
 
-	spin_lock_irqsave(&local->sta_lock, flags);
 	ret = sta_info_hash_del(local, sta);
-	/* this might still be the pending list ... which is fine */
-	if (!ret)
-		list_del(&sta->list);
-	spin_unlock_irqrestore(&local->sta_lock, flags);
 	if (ret)
 		return ret;
 
+	list_del(&sta->list);
+
 	mutex_lock(&local->key_mtx);
 	for (i = 0; i < NUM_DEFAULT_KEYS; i++)
 		__ieee80211_key_free(key_mtx_dereference(local, sta->gtk[i]));
@@ -908,6 +765,9 @@
 	if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
 		RCU_INIT_POINTER(sdata->u.vlan.sta, NULL);
 
+	while (sta->sta_state > IEEE80211_STA_NONE)
+		sta_info_move_state(sta, sta->sta_state - 1);
+
 	if (sta->uploaded) {
 		if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
 			sdata = container_of(sdata->bss,
@@ -953,7 +813,36 @@
 	}
 #endif
 
-	__sta_info_free(local, sta);
+	/* There could be some memory leaks because of ampdu tx pending queue
+	 * not being freed before destroying the station info.
+	 *
+	 * Make sure that such queues are purged before freeing the station
+	 * info.
+	 * TODO: We have to somehow postpone the full destruction
+	 * until the aggregation stop completes. Refer
+	 * http://thread.gmane.org/gmane.linux.kernel.wireless.general/81936
+	 */
+
+	mutex_lock(&sta->ampdu_mlme.mtx);
+
+	for (i = 0; i < STA_TID_NUM; i++) {
+		tid_tx = rcu_dereference_protected_tid_tx(sta, i);
+		if (!tid_tx)
+			continue;
+		if (skb_queue_len(&tid_tx->pending)) {
+#ifdef CONFIG_MAC80211_HT_DEBUG
+			wiphy_debug(local->hw.wiphy, "TX A-MPDU  purging %d "
+				"packets for tid=%d\n",
+				skb_queue_len(&tid_tx->pending), i);
+#endif /* CONFIG_MAC80211_HT_DEBUG */
+			__skb_queue_purge(&tid_tx->pending);
+		}
+		kfree_rcu(tid_tx, rcu_head);
+	}
+
+	mutex_unlock(&sta->ampdu_mlme.mtx);
+
+	sta_info_free(local, sta);
 
 	return 0;
 }
@@ -1009,11 +898,9 @@
 
 void sta_info_init(struct ieee80211_local *local)
 {
-	spin_lock_init(&local->sta_lock);
+	spin_lock_init(&local->tim_lock);
 	mutex_init(&local->sta_mtx);
 	INIT_LIST_HEAD(&local->sta_list);
-	INIT_LIST_HEAD(&local->sta_pending_list);
-	INIT_WORK(&local->sta_finish_work, sta_info_finish_work);
 
 	setup_timer(&local->sta_cleanup, sta_info_cleanup,
 		    (unsigned long)local);
@@ -1042,9 +929,6 @@
 	might_sleep();
 
 	mutex_lock(&local->sta_mtx);
-
-	sta_info_finish_pending(local);
-
 	list_for_each_entry_safe(sta, tmp, &local->sta_list, list) {
 		if (!sdata || sdata == sta->sdata)
 			WARN_ON(__sta_info_destroy(sta));
@@ -1061,7 +945,11 @@
 	struct sta_info *sta, *tmp;
 
 	mutex_lock(&local->sta_mtx);
-	list_for_each_entry_safe(sta, tmp, &local->sta_list, list)
+
+	list_for_each_entry_safe(sta, tmp, &local->sta_list, list) {
+		if (sdata != sta->sdata)
+			continue;
+
 		if (time_after(jiffies, sta->last_rx + exp_time)) {
 #ifdef CONFIG_MAC80211_IBSS_DEBUG
 			printk(KERN_DEBUG "%s: expiring inactive STA %pM\n",
@@ -1069,6 +957,8 @@
 #endif
 			WARN_ON(__sta_info_destroy(sta));
 		}
+	}
+
 	mutex_unlock(&local->sta_mtx);
 }
 
@@ -1517,3 +1407,56 @@
 	sta_info_recalc_tim(sta);
 }
 EXPORT_SYMBOL(ieee80211_sta_set_buffered);
+
+int sta_info_move_state_checked(struct sta_info *sta,
+				enum ieee80211_sta_state new_state)
+{
+	might_sleep();
+
+	if (sta->sta_state == new_state)
+		return 0;
+
+	switch (new_state) {
+	case IEEE80211_STA_NONE:
+		if (sta->sta_state == IEEE80211_STA_AUTH)
+			clear_bit(WLAN_STA_AUTH, &sta->_flags);
+		else
+			return -EINVAL;
+		break;
+	case IEEE80211_STA_AUTH:
+		if (sta->sta_state == IEEE80211_STA_NONE)
+			set_bit(WLAN_STA_AUTH, &sta->_flags);
+		else if (sta->sta_state == IEEE80211_STA_ASSOC)
+			clear_bit(WLAN_STA_ASSOC, &sta->_flags);
+		else
+			return -EINVAL;
+		break;
+	case IEEE80211_STA_ASSOC:
+		if (sta->sta_state == IEEE80211_STA_AUTH) {
+			set_bit(WLAN_STA_ASSOC, &sta->_flags);
+		} else if (sta->sta_state == IEEE80211_STA_AUTHORIZED) {
+			if (sta->sdata->vif.type == NL80211_IFTYPE_AP)
+				atomic_dec(&sta->sdata->u.ap.num_sta_authorized);
+			clear_bit(WLAN_STA_AUTHORIZED, &sta->_flags);
+		} else
+			return -EINVAL;
+		break;
+	case IEEE80211_STA_AUTHORIZED:
+		if (sta->sta_state == IEEE80211_STA_ASSOC) {
+			if (sta->sdata->vif.type == NL80211_IFTYPE_AP)
+				atomic_inc(&sta->sdata->u.ap.num_sta_authorized);
+			set_bit(WLAN_STA_AUTHORIZED, &sta->_flags);
+		} else
+			return -EINVAL;
+		break;
+	default:
+		WARN(1, "invalid state %d", new_state);
+		return -EINVAL;
+	}
+
+	printk(KERN_DEBUG "%s: moving STA %pM to state %d\n",
+		sta->sdata->name, sta->sta.addr, new_state);
+	sta->sta_state = new_state;
+
+	return 0;
+}
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 8c8ce05..6f77f12 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -30,7 +30,6 @@
  *	when virtual port control is not in use.
  * @WLAN_STA_SHORT_PREAMBLE: Station is capable of receiving short-preamble
  *	frames.
- * @WLAN_STA_ASSOC_AP: We're associated to that station, it is an AP.
  * @WLAN_STA_WME: Station is a QoS-STA.
  * @WLAN_STA_WDS: Station is one of our WDS peers.
  * @WLAN_STA_CLEAR_PS_FILT: Clear PS filter in hardware (using the
@@ -52,6 +51,7 @@
  *	unblocks the station.
  * @WLAN_STA_SP: Station is in a service period, so don't try to
  *	reply to other uAPSD trigger frames or PS-Poll.
+ * @WLAN_STA_4ADDR_EVENT: 4-addr event was already sent for this frame.
  */
 enum ieee80211_sta_info_flags {
 	WLAN_STA_AUTH,
@@ -59,7 +59,6 @@
 	WLAN_STA_PS_STA,
 	WLAN_STA_AUTHORIZED,
 	WLAN_STA_SHORT_PREAMBLE,
-	WLAN_STA_ASSOC_AP,
 	WLAN_STA_WME,
 	WLAN_STA_WDS,
 	WLAN_STA_CLEAR_PS_FILT,
@@ -71,11 +70,22 @@
 	WLAN_STA_TDLS_PEER_AUTH,
 	WLAN_STA_UAPSD,
 	WLAN_STA_SP,
+	WLAN_STA_4ADDR_EVENT,
+};
+
+enum ieee80211_sta_state {
+	/* NOTE: These need to be ordered correctly! */
+	IEEE80211_STA_NONE,
+	IEEE80211_STA_AUTH,
+	IEEE80211_STA_ASSOC,
+	IEEE80211_STA_AUTHORIZED,
 };
 
 #define STA_TID_NUM 16
 #define ADDBA_RESP_INTERVAL HZ
-#define HT_AGG_MAX_RETRIES		0x3
+#define HT_AGG_MAX_RETRIES		15
+#define HT_AGG_BURST_RETRIES		3
+#define HT_AGG_RETRIES_PERIOD		(15 * HZ)
 
 #define HT_AGG_STATE_DRV_READY		0
 #define HT_AGG_STATE_RESPONSE_RECEIVED	1
@@ -88,6 +98,7 @@
  * struct tid_ampdu_tx - TID aggregation information (Tx).
  *
  * @rcu_head: rcu head for freeing structure
+ * @session_timer: check if we keep Tx-ing on the TID (by timeout value)
  * @addba_resp_timer: timer for peer's response to addba request
  * @pending: pending frames queue -- use sta's spinlock to protect
  * @dialog_token: dialog token for aggregation session
@@ -110,6 +121,7 @@
  */
 struct tid_ampdu_tx {
 	struct rcu_head rcu_head;
+	struct timer_list session_timer;
 	struct timer_list addba_resp_timer;
 	struct sk_buff_head pending;
 	unsigned long state;
@@ -169,6 +181,7 @@
  * @tid_tx: aggregation info for Tx per TID
  * @tid_start_tx: sessions where start was requested
  * @addba_req_num: number of times addBA request has been sent.
+ * @last_addba_req_time: timestamp of the last addBA request.
  * @dialog_token_allocator: dialog token enumerator for each new session;
  * @work: work struct for starting/stopping aggregation
  * @tid_rx_timer_expired: bitmap indicating on which TIDs the
@@ -188,6 +201,7 @@
 	struct work_struct work;
 	struct tid_ampdu_tx __rcu *tid_tx[STA_TID_NUM];
 	struct tid_ampdu_tx *tid_start_tx[STA_TID_NUM];
+	unsigned long last_addba_req_time[STA_TID_NUM];
 	u8 addba_req_num[STA_TID_NUM];
 	u8 dialog_token_allocator;
 };
@@ -260,6 +274,8 @@
  * @dummy: indicate a dummy station created for receiving
  *	EAP frames before association
  * @sta: station information we share with the driver
+ * @sta_state: duplicates information about station state (for debug)
+ * @beacon_loss_count: number of times beacon loss has triggered
  */
 struct sta_info {
 	/* General information, mostly static */
@@ -281,6 +297,8 @@
 
 	bool uploaded;
 
+	enum ieee80211_sta_state sta_state;
+
 	/* use the accessors defined below */
 	unsigned long _flags;
 
@@ -350,6 +368,7 @@
 #endif
 
 	unsigned int lost_packets;
+	unsigned int beacon_loss_count;
 
 	/* should be right in front of sta to be in the same cache line */
 	bool dummy;
@@ -369,12 +388,18 @@
 static inline void set_sta_flag(struct sta_info *sta,
 				enum ieee80211_sta_info_flags flag)
 {
+	WARN_ON(flag == WLAN_STA_AUTH ||
+		flag == WLAN_STA_ASSOC ||
+		flag == WLAN_STA_AUTHORIZED);
 	set_bit(flag, &sta->_flags);
 }
 
 static inline void clear_sta_flag(struct sta_info *sta,
 				  enum ieee80211_sta_info_flags flag)
 {
+	WARN_ON(flag == WLAN_STA_AUTH ||
+		flag == WLAN_STA_ASSOC ||
+		flag == WLAN_STA_AUTHORIZED);
 	clear_bit(flag, &sta->_flags);
 }
 
@@ -387,9 +412,32 @@
 static inline int test_and_clear_sta_flag(struct sta_info *sta,
 					  enum ieee80211_sta_info_flags flag)
 {
+	WARN_ON(flag == WLAN_STA_AUTH ||
+		flag == WLAN_STA_ASSOC ||
+		flag == WLAN_STA_AUTHORIZED);
 	return test_and_clear_bit(flag, &sta->_flags);
 }
 
+static inline int test_and_set_sta_flag(struct sta_info *sta,
+					enum ieee80211_sta_info_flags flag)
+{
+	WARN_ON(flag == WLAN_STA_AUTH ||
+		flag == WLAN_STA_ASSOC ||
+		flag == WLAN_STA_AUTHORIZED);
+	return test_and_set_bit(flag, &sta->_flags);
+}
+
+int sta_info_move_state_checked(struct sta_info *sta,
+				enum ieee80211_sta_state new_state);
+
+static inline void sta_info_move_state(struct sta_info *sta,
+				       enum ieee80211_sta_state new_state)
+{
+	int ret = sta_info_move_state_checked(sta, new_state);
+	WARN_ON_ONCE(ret);
+}
+
+
 void ieee80211_assign_tid_tx(struct sta_info *sta, int tid,
 			     struct tid_ampdu_tx *tid_tx);
 
@@ -480,7 +528,10 @@
  * until sta_info_insert().
  */
 struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
-				u8 *addr, gfp_t gfp);
+				const u8 *addr, gfp_t gfp);
+
+void sta_info_free(struct ieee80211_local *local, struct sta_info *sta);
+
 /*
  * Insert STA info into hash table/list, returns zero or a
  * -EEXIST if (if the same MAC address is already present).
@@ -491,7 +542,6 @@
  */
 int sta_info_insert(struct sta_info *sta);
 int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU);
-int sta_info_insert_atomic(struct sta_info *sta);
 int sta_info_reinsert(struct sta_info *sta);
 
 int sta_info_destroy_addr(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 16518f3..30c265c 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -340,7 +340,6 @@
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
 	struct ieee80211_local *local = hw_to_local(hw);
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-	u16 frag, type;
 	__le16 fc;
 	struct ieee80211_supported_band *sband;
 	struct ieee80211_sub_if_data *sdata;
@@ -476,12 +475,8 @@
 	 * Fragments are passed to low-level drivers as separate skbs, so these
 	 * are actually fragments, not frames. Update frame counters only for
 	 * the first fragment of the frame. */
-
-	frag = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
-	type = le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_FTYPE;
-
 	if (info->flags & IEEE80211_TX_STAT_ACK) {
-		if (frag == 0) {
+		if (ieee80211_is_first_frag(hdr->seq_ctrl)) {
 			local->dot11TransmittedFrameCount++;
 			if (is_multicast_ether_addr(hdr->addr1))
 				local->dot11MulticastTransmittedFrameCount++;
@@ -496,11 +491,11 @@
 		 * with a multicast address in the address 1 field of type Data
 		 * or Management. */
 		if (!is_multicast_ether_addr(hdr->addr1) ||
-		    type == IEEE80211_FTYPE_DATA ||
-		    type == IEEE80211_FTYPE_MGMT)
+		    ieee80211_is_data(fc) ||
+		    ieee80211_is_mgmt(fc))
 			local->dot11TransmittedFragmentCount++;
 	} else {
-		if (frag == 0)
+		if (ieee80211_is_first_frag(hdr->seq_ctrl))
 			local->dot11FailedCount++;
 	}
 
@@ -517,27 +512,54 @@
 	}
 
 	if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) {
-		struct ieee80211_work *wk;
 		u64 cookie = (unsigned long)skb;
 
-		rcu_read_lock();
-		list_for_each_entry_rcu(wk, &local->work_list, list) {
-			if (wk->type != IEEE80211_WORK_OFFCHANNEL_TX)
-				continue;
-			if (wk->offchan_tx.frame != skb)
-				continue;
-			wk->offchan_tx.status = true;
-			break;
-		}
-		rcu_read_unlock();
-		if (local->hw_roc_skb_for_status == skb) {
-			cookie = local->hw_roc_cookie ^ 2;
-			local->hw_roc_skb_for_status = NULL;
-		}
+		if (ieee80211_is_nullfunc(hdr->frame_control) ||
+		    ieee80211_is_qos_nullfunc(hdr->frame_control)) {
+			bool acked = info->flags & IEEE80211_TX_STAT_ACK;
+			cfg80211_probe_status(skb->dev, hdr->addr1,
+					      cookie, acked, GFP_ATOMIC);
+		} else {
+			struct ieee80211_work *wk;
 
-		cfg80211_mgmt_tx_status(
-			skb->dev, cookie, skb->data, skb->len,
-			!!(info->flags & IEEE80211_TX_STAT_ACK), GFP_ATOMIC);
+			rcu_read_lock();
+			list_for_each_entry_rcu(wk, &local->work_list, list) {
+				if (wk->type != IEEE80211_WORK_OFFCHANNEL_TX)
+					continue;
+				if (wk->offchan_tx.frame != skb)
+					continue;
+				wk->offchan_tx.status = true;
+				break;
+			}
+			rcu_read_unlock();
+			if (local->hw_roc_skb_for_status == skb) {
+				cookie = local->hw_roc_cookie ^ 2;
+				local->hw_roc_skb_for_status = NULL;
+			}
+
+			cfg80211_mgmt_tx_status(
+				skb->dev, cookie, skb->data, skb->len,
+				!!(info->flags & IEEE80211_TX_STAT_ACK),
+				GFP_ATOMIC);
+		}
+	}
+
+	if (unlikely(info->ack_frame_id)) {
+		struct sk_buff *ack_skb;
+		unsigned long flags;
+
+		spin_lock_irqsave(&local->ack_status_lock, flags);
+		ack_skb = idr_find(&local->ack_status_frames,
+				   info->ack_frame_id);
+		if (ack_skb)
+			idr_remove(&local->ack_status_frames,
+				   info->ack_frame_id);
+		spin_unlock_irqrestore(&local->ack_status_lock, flags);
+
+		/* consumes ack_skb */
+		if (ack_skb)
+			skb_complete_wifi_ack(ack_skb,
+				info->flags & IEEE80211_TX_STAT_ACK);
 	}
 
 	/* this was a transmitted frame, but now we want to reuse it */
@@ -545,7 +567,7 @@
 
 	/* Need to make a copy before skb->cb gets cleared */
 	send_to_cooked = !!(info->flags & IEEE80211_TX_CTL_INJECTED) ||
-			(type != IEEE80211_FTYPE_DATA);
+			 !(ieee80211_is_data(fc));
 
 	/*
 	 * This is a bit racy but we can avoid a lot of work
@@ -610,3 +632,29 @@
 				    num_packets, GFP_ATOMIC);
 }
 EXPORT_SYMBOL(ieee80211_report_low_ack);
+
+void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+	struct ieee80211_local *local = hw_to_local(hw);
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+	if (unlikely(info->ack_frame_id)) {
+		struct sk_buff *ack_skb;
+		unsigned long flags;
+
+		spin_lock_irqsave(&local->ack_status_lock, flags);
+		ack_skb = idr_find(&local->ack_status_frames,
+				   info->ack_frame_id);
+		if (ack_skb)
+			idr_remove(&local->ack_status_frames,
+				   info->ack_frame_id);
+		spin_unlock_irqrestore(&local->ack_status_lock, flags);
+
+		/* consumes ack_skb */
+		if (ack_skb)
+			dev_kfree_skb_any(ack_skb);
+	}
+
+	dev_kfree_skb_any(skb);
+}
+EXPORT_SYMBOL(ieee80211_free_txskb);
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 1f8b120..edcd1c7 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -36,7 +36,8 @@
 
 /* misc utils */
 
-static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr,
+static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
+				 struct sk_buff *skb, int group_addr,
 				 int next_frag_len)
 {
 	int rate, mrate, erp, dur, i;
@@ -44,7 +45,7 @@
 	struct ieee80211_local *local = tx->local;
 	struct ieee80211_supported_band *sband;
 	struct ieee80211_hdr *hdr;
-	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 
 	/* assume HW handles this */
 	if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS)
@@ -76,7 +77,7 @@
 	 *   at the highest possible rate belonging to the PHY rates in the
 	 *   BSSBasicRateSet
 	 */
-	hdr = (struct ieee80211_hdr *)tx->skb->data;
+	hdr = (struct ieee80211_hdr *)skb->data;
 	if (ieee80211_is_ctl(hdr->frame_control)) {
 		/* TODO: These control frames are not currently sent by
 		 * mac80211, but should they be implemented, this function
@@ -150,11 +151,15 @@
 		rate = mrate;
 	}
 
-	/* Time needed to transmit ACK
-	 * (10 bytes + 4-byte FCS = 112 bits) plus SIFS; rounded up
-	 * to closest integer */
-
-	dur = ieee80211_frame_duration(local, 10, rate, erp,
+	/* Don't calculate ACKs for QoS Frames with NoAck Policy set */
+	if (ieee80211_is_data_qos(hdr->frame_control) &&
+	    *(ieee80211_get_qos_ctl(hdr)) | IEEE80211_QOS_CTL_ACK_POLICY_NOACK)
+		dur = 0;
+	else
+		/* Time needed to transmit ACK
+		 * (10 bytes + 4-byte FCS = 112 bits) plus SIFS; rounded up
+		 * to closest integer */
+		dur = ieee80211_frame_duration(local, 10, rate, erp,
 				tx->sdata->vif.bss_conf.use_short_preamble);
 
 	if (next_frag_len) {
@@ -290,7 +295,6 @@
 
 	if (likely(tx->flags & IEEE80211_TX_UNICAST)) {
 		if (unlikely(!assoc &&
-			     tx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
 			     ieee80211_is_data(hdr->frame_control))) {
 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
 			printk(KERN_DEBUG "%s: dropped data frame to not "
@@ -300,17 +304,14 @@
 			I802_DEBUG_INC(tx->local->tx_handlers_drop_not_assoc);
 			return TX_DROP;
 		}
-	} else {
-		if (unlikely(ieee80211_is_data(hdr->frame_control) &&
-			     tx->local->num_sta == 0 &&
-			     tx->sdata->vif.type != NL80211_IFTYPE_ADHOC)) {
-			/*
-			 * No associated STAs - no need to send multicast
-			 * frames.
-			 */
-			return TX_DROP;
-		}
-		return TX_CONTINUE;
+	} else if (unlikely(tx->sdata->vif.type == NL80211_IFTYPE_AP &&
+			    ieee80211_is_data(hdr->frame_control) &&
+			    !atomic_read(&tx->sdata->u.ap.num_sta_authorized))) {
+		/*
+		 * No associated STAs - no need to send multicast
+		 * frames.
+		 */
+		return TX_DROP;
 	}
 
 	return TX_CONTINUE;
@@ -572,8 +573,6 @@
 		switch (tx->key->conf.cipher) {
 		case WLAN_CIPHER_SUITE_WEP40:
 		case WLAN_CIPHER_SUITE_WEP104:
-			if (ieee80211_is_auth(hdr->frame_control))
-				break;
 		case WLAN_CIPHER_SUITE_TKIP:
 			if (!ieee80211_is_data_present(hdr->frame_control))
 				tx->key = NULL;
@@ -637,6 +636,7 @@
 	else
 		txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1;
 	txrc.bss = (tx->sdata->vif.type == NL80211_IFTYPE_AP ||
+		    tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT ||
 		    tx->sdata->vif.type == NL80211_IFTYPE_ADHOC);
 
 	/* set up RTS protection if desired */
@@ -844,11 +844,13 @@
 	return TX_CONTINUE;
 }
 
-static int ieee80211_fragment(struct ieee80211_local *local,
+static int ieee80211_fragment(struct ieee80211_tx_data *tx,
 			      struct sk_buff *skb, int hdrlen,
 			      int frag_threshold)
 {
-	struct sk_buff *tail = skb, *tmp;
+	struct ieee80211_local *local = tx->local;
+	struct ieee80211_tx_info *info;
+	struct sk_buff *tmp;
 	int per_fragm = frag_threshold - hdrlen - FCS_LEN;
 	int pos = hdrlen + per_fragm;
 	int rem = skb->len - hdrlen - per_fragm;
@@ -856,6 +858,8 @@
 	if (WARN_ON(rem < 0))
 		return -EINVAL;
 
+	/* first fragment was already added to queue by caller */
+
 	while (rem) {
 		int fraglen = per_fragm;
 
@@ -868,12 +872,21 @@
 				    IEEE80211_ENCRYPT_TAILROOM);
 		if (!tmp)
 			return -ENOMEM;
-		tail->next = tmp;
-		tail = tmp;
+
+		__skb_queue_tail(&tx->skbs, tmp);
+
 		skb_reserve(tmp, local->tx_headroom +
 				 IEEE80211_ENCRYPT_HEADROOM);
 		/* copy control information */
 		memcpy(tmp->cb, skb->cb, sizeof(tmp->cb));
+
+		info = IEEE80211_SKB_CB(tmp);
+		info->flags &= ~(IEEE80211_TX_CTL_CLEAR_PS_FILT |
+				 IEEE80211_TX_CTL_FIRST_FRAGMENT);
+
+		if (rem)
+			info->flags |= IEEE80211_TX_CTL_MORE_FRAMES;
+
 		skb_copy_queue_mapping(tmp, skb);
 		tmp->priority = skb->priority;
 		tmp->dev = skb->dev;
@@ -885,6 +898,7 @@
 		pos += fraglen;
 	}
 
+	/* adjust first fragment's length */
 	skb->len = hdrlen + per_fragm;
 	return 0;
 }
@@ -899,6 +913,10 @@
 	int hdrlen;
 	int fragnum;
 
+	/* no matter what happens, tx->skb moves to tx->skbs */
+	__skb_queue_tail(&tx->skbs, skb);
+	tx->skb = NULL;
+
 	if (info->flags & IEEE80211_TX_CTL_DONTFRAG)
 		return TX_CONTINUE;
 
@@ -927,21 +945,21 @@
 	 * of the fragments then we will simply pretend to accept the skb
 	 * but store it away as pending.
 	 */
-	if (ieee80211_fragment(tx->local, skb, hdrlen, frag_threshold))
+	if (ieee80211_fragment(tx, skb, hdrlen, frag_threshold))
 		return TX_DROP;
 
 	/* update duration/seq/flags of fragments */
 	fragnum = 0;
-	do {
+
+	skb_queue_walk(&tx->skbs, skb) {
 		int next_len;
 		const __le16 morefrags = cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
 
 		hdr = (void *)skb->data;
 		info = IEEE80211_SKB_CB(skb);
 
-		if (skb->next) {
+		if (!skb_queue_is_last(&tx->skbs, skb)) {
 			hdr->frame_control |= morefrags;
-			next_len = skb->next->len;
 			/*
 			 * No multi-rate retries for fragmented frames, that
 			 * would completely throw off the NAV at other STAs.
@@ -956,10 +974,9 @@
 			hdr->frame_control &= ~morefrags;
 			next_len = 0;
 		}
-		hdr->duration_id = ieee80211_duration(tx, 0, next_len);
 		hdr->seq_ctrl |= cpu_to_le16(fragnum & IEEE80211_SCTL_FRAG);
 		fragnum++;
-	} while ((skb = skb->next));
+	}
 
 	return TX_CONTINUE;
 }
@@ -967,16 +984,16 @@
 static ieee80211_tx_result debug_noinline
 ieee80211_tx_h_stats(struct ieee80211_tx_data *tx)
 {
-	struct sk_buff *skb = tx->skb;
+	struct sk_buff *skb;
 
 	if (!tx->sta)
 		return TX_CONTINUE;
 
 	tx->sta->tx_packets++;
-	do {
+	skb_queue_walk(&tx->skbs, skb) {
 		tx->sta->tx_fragments++;
 		tx->sta->tx_bytes += skb->len;
-	} while ((skb = skb->next));
+	}
 
 	return TX_CONTINUE;
 }
@@ -1015,21 +1032,25 @@
 static ieee80211_tx_result debug_noinline
 ieee80211_tx_h_calculate_duration(struct ieee80211_tx_data *tx)
 {
-	struct sk_buff *skb = tx->skb;
+	struct sk_buff *skb;
 	struct ieee80211_hdr *hdr;
 	int next_len;
 	bool group_addr;
 
-	do {
+	skb_queue_walk(&tx->skbs, skb) {
 		hdr = (void *) skb->data;
 		if (unlikely(ieee80211_is_pspoll(hdr->frame_control)))
 			break; /* must not overwrite AID */
-		next_len = skb->next ? skb->next->len : 0;
+		if (!skb_queue_is_last(&tx->skbs, skb)) {
+			struct sk_buff *next = skb_queue_next(&tx->skbs, skb);
+			next_len = next->len;
+		} else
+			next_len = 0;
 		group_addr = is_multicast_ether_addr(hdr->addr1);
 
 		hdr->duration_id =
-			ieee80211_duration(tx, group_addr, next_len);
-	} while ((skb = skb->next));
+			ieee80211_duration(tx, skb, group_addr, next_len);
+	}
 
 	return TX_CONTINUE;
 }
@@ -1043,9 +1064,11 @@
 				  int tid)
 {
 	bool queued = false;
+	bool reset_agg_timer = false;
 
 	if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) {
 		info->flags |= IEEE80211_TX_CTL_AMPDU;
+		reset_agg_timer = true;
 	} else if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) {
 		/*
 		 * nothing -- this aggregation session is being started
@@ -1077,6 +1100,7 @@
 			/* do nothing, let packet pass through */
 		} else if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) {
 			info->flags |= IEEE80211_TX_CTL_AMPDU;
+			reset_agg_timer = true;
 		} else {
 			queued = true;
 			info->control.vif = &tx->sdata->vif;
@@ -1086,6 +1110,11 @@
 		spin_unlock(&tx->sta->lock);
 	}
 
+	/* reset session timer */
+	if (reset_agg_timer && tid_tx->timeout)
+		mod_timer(&tid_tx->session_timer,
+			  TU_TO_EXP_TIME(tid_tx->timeout));
+
 	return queued;
 }
 
@@ -1108,6 +1137,7 @@
 	tx->local = local;
 	tx->sdata = sdata;
 	tx->channel = local->hw.conf.channel;
+	__skb_queue_head_init(&tx->skbs);
 
 	/*
 	 * If this flag is set to true anywhere, and we get here,
@@ -1152,16 +1182,8 @@
 	if (is_multicast_ether_addr(hdr->addr1)) {
 		tx->flags &= ~IEEE80211_TX_UNICAST;
 		info->flags |= IEEE80211_TX_CTL_NO_ACK;
-	} else {
+	} else
 		tx->flags |= IEEE80211_TX_UNICAST;
-		if (unlikely(local->wifi_wme_noack_test))
-			info->flags |= IEEE80211_TX_CTL_NO_ACK;
-		/*
-		 * Flags are initialized to 0. Hence, no need to
-		 * explicitly unset IEEE80211_TX_CTL_NO_ACK since
-		 * it might already be set for injected frames.
-		 */
-	}
 
 	if (!(info->flags & IEEE80211_TX_CTL_DONTFRAG)) {
 		if (!(tx->flags & IEEE80211_TX_UNICAST) ||
@@ -1180,22 +1202,18 @@
 	return TX_CONTINUE;
 }
 
-/*
- * Returns false if the frame couldn't be transmitted but was queued instead.
- */
-static bool __ieee80211_tx(struct ieee80211_local *local, struct sk_buff **skbp,
-			   struct sta_info *sta, bool txpending)
+static bool ieee80211_tx_frags(struct ieee80211_local *local,
+			       struct ieee80211_vif *vif,
+			       struct ieee80211_sta *sta,
+			       struct sk_buff_head *skbs,
+			       bool txpending)
 {
-	struct sk_buff *skb = *skbp, *next;
+	struct sk_buff *skb, *tmp;
 	struct ieee80211_tx_info *info;
-	struct ieee80211_sub_if_data *sdata;
 	unsigned long flags;
-	int len;
-	bool fragm = false;
 
-	while (skb) {
+	skb_queue_walk_safe(skbs, skb, tmp) {
 		int q = skb_get_queue_mapping(skb);
-		__le16 fc;
 
 		spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
 		if (local->queue_stop_reasons[q] ||
@@ -1205,24 +1223,11 @@
 			 * transmission from the tx-pending tasklet when the
 			 * queue is woken again.
 			 */
-
-			do {
-				next = skb->next;
-				skb->next = NULL;
-				/*
-				 * NB: If txpending is true, next must already
-				 * be NULL since we must've gone through this
-				 * loop before already; therefore we can just
-				 * queue the frame to the head without worrying
-				 * about reordering of fragments.
-				 */
-				if (unlikely(txpending))
-					__skb_queue_head(&local->pending[q],
-							 skb);
-				else
-					__skb_queue_tail(&local->pending[q],
-							 skb);
-			} while ((skb = next));
+			if (txpending)
+				skb_queue_splice_init(skbs, &local->pending[q]);
+			else
+				skb_queue_splice_tail_init(skbs,
+							   &local->pending[q]);
 
 			spin_unlock_irqrestore(&local->queue_stop_reason_lock,
 					       flags);
@@ -1231,57 +1236,81 @@
 		spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
 
 		info = IEEE80211_SKB_CB(skb);
+		info->control.vif = vif;
+		info->control.sta = sta;
 
-		if (fragm)
-			info->flags &= ~(IEEE80211_TX_CTL_CLEAR_PS_FILT |
-					 IEEE80211_TX_CTL_FIRST_FRAGMENT);
-
-		next = skb->next;
-		len = skb->len;
-
-		if (next)
-			info->flags |= IEEE80211_TX_CTL_MORE_FRAMES;
-
-		sdata = vif_to_sdata(info->control.vif);
-
-		switch (sdata->vif.type) {
-		case NL80211_IFTYPE_MONITOR:
-			info->control.vif = NULL;
-			break;
-		case NL80211_IFTYPE_AP_VLAN:
-			info->control.vif = &container_of(sdata->bss,
-				struct ieee80211_sub_if_data, u.ap)->vif;
-			break;
-		default:
-			/* keep */
-			break;
-		}
-
-		if (sta && sta->uploaded)
-			info->control.sta = &sta->sta;
-		else
-			info->control.sta = NULL;
-
-		fc = ((struct ieee80211_hdr *)skb->data)->frame_control;
+		__skb_unlink(skb, skbs);
 		drv_tx(local, skb);
-
-		ieee80211_tpt_led_trig_tx(local, fc, len);
-		*skbp = skb = next;
-		ieee80211_led_tx(local, 1);
-		fragm = true;
 	}
 
 	return true;
 }
 
 /*
+ * Returns false if the frame couldn't be transmitted but was queued instead.
+ */
+static bool __ieee80211_tx(struct ieee80211_local *local,
+			   struct sk_buff_head *skbs, int led_len,
+			   struct sta_info *sta, bool txpending)
+{
+	struct ieee80211_tx_info *info;
+	struct ieee80211_sub_if_data *sdata;
+	struct ieee80211_vif *vif;
+	struct ieee80211_sta *pubsta;
+	struct sk_buff *skb;
+	bool result = true;
+	__le16 fc;
+
+	if (WARN_ON(skb_queue_empty(skbs)))
+		return true;
+
+	skb = skb_peek(skbs);
+	fc = ((struct ieee80211_hdr *)skb->data)->frame_control;
+	info = IEEE80211_SKB_CB(skb);
+	sdata = vif_to_sdata(info->control.vif);
+	if (sta && !sta->uploaded)
+		sta = NULL;
+
+	if (sta)
+		pubsta = &sta->sta;
+	else
+		pubsta = NULL;
+
+	switch (sdata->vif.type) {
+	case NL80211_IFTYPE_MONITOR:
+		sdata = NULL;
+		vif = NULL;
+		break;
+	case NL80211_IFTYPE_AP_VLAN:
+		sdata = container_of(sdata->bss,
+				     struct ieee80211_sub_if_data, u.ap);
+		/* fall through */
+	default:
+		vif = &sdata->vif;
+		break;
+	}
+
+	if (local->ops->tx_frags)
+		drv_tx_frags(local, vif, pubsta, skbs);
+	else
+		result = ieee80211_tx_frags(local, vif, pubsta, skbs,
+					    txpending);
+
+	ieee80211_tpt_led_trig_tx(local, fc, led_len);
+	ieee80211_led_tx(local, 1);
+
+	WARN_ON_ONCE(!skb_queue_empty(skbs));
+
+	return result;
+}
+
+/*
  * Invoke TX handlers, return 0 on success and non-zero if the
  * frame was dropped or queued.
  */
 static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
 {
-	struct sk_buff *skb = tx->skb;
-	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
 	ieee80211_tx_result res = TX_DROP;
 
 #define CALL_TXH(txh) \
@@ -1299,8 +1328,11 @@
 	if (!(tx->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL))
 		CALL_TXH(ieee80211_tx_h_rate_ctrl);
 
-	if (unlikely(info->flags & IEEE80211_TX_INTFL_RETRANSMISSION))
+	if (unlikely(info->flags & IEEE80211_TX_INTFL_RETRANSMISSION)) {
+		__skb_queue_tail(&tx->skbs, tx->skb);
+		tx->skb = NULL;
 		goto txh_done;
+	}
 
 	CALL_TXH(ieee80211_tx_h_michael_mic_add);
 	CALL_TXH(ieee80211_tx_h_sequence);
@@ -1315,13 +1347,10 @@
  txh_done:
 	if (unlikely(res == TX_DROP)) {
 		I802_DEBUG_INC(tx->local->tx_handlers_drop);
-		while (skb) {
-			struct sk_buff *next;
-
-			next = skb->next;
-			dev_kfree_skb(skb);
-			skb = next;
-		}
+		if (tx->skb)
+			dev_kfree_skb(tx->skb);
+		else
+			__skb_queue_purge(&tx->skbs);
 		return -1;
 	} else if (unlikely(res == TX_QUEUED)) {
 		I802_DEBUG_INC(tx->local->tx_handlers_queued);
@@ -1342,6 +1371,7 @@
 	ieee80211_tx_result res_prepare;
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 	bool result = true;
+	int led_len;
 
 	if (unlikely(skb->len < 10)) {
 		dev_kfree_skb(skb);
@@ -1351,6 +1381,7 @@
 	rcu_read_lock();
 
 	/* initialises tx */
+	led_len = skb->len;
 	res_prepare = ieee80211_tx_prepare(sdata, &tx, skb);
 
 	if (unlikely(res_prepare == TX_DROP)) {
@@ -1364,7 +1395,8 @@
 	info->band = tx.channel->band;
 
 	if (!invoke_tx_handlers(&tx))
-		result = __ieee80211_tx(local, &tx.skb, tx.sta, txpending);
+		result = __ieee80211_tx(local, &tx.skbs, led_len,
+					tx.sta, txpending);
  out:
 	rcu_read_unlock();
 	return result;
@@ -1431,7 +1463,7 @@
 	if (ieee80211_vif_is_mesh(&sdata->vif) &&
 	    ieee80211_is_data(hdr->frame_control) &&
 		!is_multicast_ether_addr(hdr->addr1))
-			if (mesh_nexthop_lookup(skb, sdata)) {
+			if (mesh_nexthop_resolve(skb, sdata)) {
 				/* skb queued: don't free */
 				rcu_read_unlock();
 				return;
@@ -1685,8 +1717,10 @@
 	int nh_pos, h_pos;
 	struct sta_info *sta = NULL;
 	bool wme_sta = false, authorized = false, tdls_auth = false;
-	struct sk_buff *tmp_skb;
 	bool tdls_direct = false;
+	bool multicast;
+	u32 info_flags = 0;
+	u16 info_id = 0;
 
 	if (unlikely(skb->len < ETH_HLEN)) {
 		ret = NETDEV_TX_OK;
@@ -1873,7 +1907,8 @@
 	 * if it is a multicast address (which can only happen
 	 * in AP mode)
 	 */
-	if (!is_multicast_ether_addr(hdr.addr1)) {
+	multicast = is_multicast_ether_addr(hdr.addr1);
+	if (!multicast) {
 		rcu_read_lock();
 		sta = sta_info_get(sdata, hdr.addr1);
 		if (sta) {
@@ -1914,11 +1949,54 @@
 		goto fail;
 	}
 
+	if (unlikely(!multicast && skb->sk &&
+		     skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS)) {
+		struct sk_buff *orig_skb = skb;
+
+		skb = skb_clone(skb, GFP_ATOMIC);
+		if (skb) {
+			unsigned long flags;
+			int id, r;
+
+			spin_lock_irqsave(&local->ack_status_lock, flags);
+			r = idr_get_new_above(&local->ack_status_frames,
+					      orig_skb, 1, &id);
+			if (r == -EAGAIN) {
+				idr_pre_get(&local->ack_status_frames,
+					    GFP_ATOMIC);
+				r = idr_get_new_above(&local->ack_status_frames,
+						      orig_skb, 1, &id);
+			}
+			if (WARN_ON(!id) || id > 0xffff) {
+				idr_remove(&local->ack_status_frames, id);
+				r = -ERANGE;
+			}
+			spin_unlock_irqrestore(&local->ack_status_lock, flags);
+
+			if (!r) {
+				info_id = id;
+				info_flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
+			} else if (skb_shared(skb)) {
+				kfree_skb(orig_skb);
+			} else {
+				kfree_skb(skb);
+				skb = orig_skb;
+			}
+		} else {
+			/* couldn't clone -- lose tx status ... */
+			skb = orig_skb;
+		}
+	}
+
 	/*
 	 * If the skb is shared we need to obtain our own copy.
 	 */
 	if (skb_shared(skb)) {
-		tmp_skb = skb;
+		struct sk_buff *tmp_skb = skb;
+
+		/* can't happen -- skb is a clone if info_id != 0 */
+		WARN_ON(info_id);
+
 		skb = skb_clone(skb, GFP_ATOMIC);
 		kfree_skb(tmp_skb);
 
@@ -2019,6 +2097,10 @@
 	memset(info, 0, sizeof(*info));
 
 	dev->trans_start = jiffies;
+
+	info->flags = info_flags;
+	info->ack_frame_id = info_id;
+
 	ieee80211_xmit(sdata, skb);
 
 	return NETDEV_TX_OK;
@@ -2062,10 +2144,15 @@
 	if (info->flags & IEEE80211_TX_INTFL_NEED_TXPROCESSING) {
 		result = ieee80211_tx(sdata, skb, true);
 	} else {
+		struct sk_buff_head skbs;
+
+		__skb_queue_head_init(&skbs);
+		__skb_queue_tail(&skbs, skb);
+
 		hdr = (struct ieee80211_hdr *)skb->data;
 		sta = sta_info_get(sdata, hdr->addr1);
 
-		result = __ieee80211_tx(local, &skb, sta, true);
+		result = __ieee80211_tx(local, &skbs, skb->len, sta, true);
 	}
 
 	return result;
@@ -2178,10 +2265,10 @@
 		/* Bitmap control */
 		*pos++ = n1 | aid0;
 		/* Part Virt Bitmap */
+		skb_put(skb, n2 - n1);
 		memcpy(pos, bss->tim + n1, n2 - n1 + 1);
 
 		tim[1] = n2 - n1 + 4;
-		skb_put(skb, n2 - n1);
 	} else {
 		*pos++ = aid0; /* Bitmap control */
 		*pos++ = 0; /* Part Virt Bitmap */
@@ -2246,9 +2333,9 @@
 			} else {
 				unsigned long flags;
 
-				spin_lock_irqsave(&local->sta_lock, flags);
+				spin_lock_irqsave(&local->tim_lock, flags);
 				ieee80211_beacon_add_tim(ap, skb, beacon);
-				spin_unlock_irqrestore(&local->sta_lock, flags);
+				spin_unlock_irqrestore(&local->tim_lock, flags);
 			}
 
 			if (tim_offset)
@@ -2279,22 +2366,31 @@
 	} else if (ieee80211_vif_is_mesh(&sdata->vif)) {
 		struct ieee80211_mgmt *mgmt;
 		u8 *pos;
+		int hdr_len = offsetof(struct ieee80211_mgmt, u.beacon) +
+			      sizeof(mgmt->u.beacon);
 
 #ifdef CONFIG_MAC80211_MESH
 		if (!sdata->u.mesh.mesh_id_len)
 			goto out;
 #endif
 
-		/* headroom, head length, tail length and maximum TIM length */
-		skb = dev_alloc_skb(local->tx_headroom + 400 +
-				sdata->u.mesh.ie_len);
+		skb = dev_alloc_skb(local->tx_headroom +
+				    hdr_len +
+				    2 + /* NULL SSID */
+				    2 + 8 + /* supported rates */
+				    2 + 3 + /* DS params */
+				    2 + (IEEE80211_MAX_SUPP_RATES - 8) +
+				    2 + sizeof(struct ieee80211_ht_cap) +
+				    2 + sizeof(struct ieee80211_ht_info) +
+				    2 + sdata->u.mesh.mesh_id_len +
+				    2 + sizeof(struct ieee80211_meshconf_ie) +
+				    sdata->u.mesh.ie_len);
 		if (!skb)
 			goto out;
 
 		skb_reserve(skb, local->hw.extra_tx_headroom);
-		mgmt = (struct ieee80211_mgmt *)
-			skb_put(skb, 24 + sizeof(mgmt->u.beacon));
-		memset(mgmt, 0, 24 + sizeof(mgmt->u.beacon));
+		mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
+		memset(mgmt, 0, hdr_len);
 		mgmt->frame_control =
 		    cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON);
 		memset(mgmt->da, 0xff, ETH_ALEN);
@@ -2313,6 +2409,8 @@
 		    mesh_add_ds_params_ie(skb, sdata) ||
 		    ieee80211_add_ext_srates_ie(&sdata->vif, skb) ||
 		    mesh_add_rsn_ie(skb, sdata) ||
+		    mesh_add_ht_cap_ie(skb, sdata) ||
+		    mesh_add_ht_info_ie(skb, sdata) ||
 		    mesh_add_meshid_ie(skb, sdata) ||
 		    mesh_add_meshconf_ie(skb, sdata) ||
 		    mesh_add_vendor_ies(skb, sdata)) {
@@ -2355,6 +2453,37 @@
 }
 EXPORT_SYMBOL(ieee80211_beacon_get_tim);
 
+struct sk_buff *ieee80211_proberesp_get(struct ieee80211_hw *hw,
+					struct ieee80211_vif *vif)
+{
+	struct ieee80211_if_ap *ap = NULL;
+	struct sk_buff *presp = NULL, *skb = NULL;
+	struct ieee80211_hdr *hdr;
+	struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+
+	if (sdata->vif.type != NL80211_IFTYPE_AP)
+		return NULL;
+
+	rcu_read_lock();
+
+	ap = &sdata->u.ap;
+	presp = rcu_dereference(ap->probe_resp);
+	if (!presp)
+		goto out;
+
+	skb = skb_copy(presp, GFP_ATOMIC);
+	if (!skb)
+		goto out;
+
+	hdr = (struct ieee80211_hdr *) skb->data;
+	memset(hdr->addr1, 0, sizeof(hdr->addr1));
+
+out:
+	rcu_read_unlock();
+	return skb;
+}
+EXPORT_SYMBOL(ieee80211_proberesp_get);
+
 struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw,
 				     struct ieee80211_vif *vif)
 {
@@ -2567,15 +2696,15 @@
 }
 EXPORT_SYMBOL(ieee80211_get_buffered_bc);
 
-void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
+void ieee80211_tx_skb_tid(struct ieee80211_sub_if_data *sdata,
+			  struct sk_buff *skb, int tid)
 {
 	skb_set_mac_header(skb, 0);
 	skb_set_network_header(skb, 0);
 	skb_set_transport_header(skb, 0);
 
-	/* Send all internal mgmt frames on VO. Accordingly set TID to 7. */
-	skb_set_queue_mapping(skb, IEEE80211_AC_VO);
-	skb->priority = 7;
+	skb_set_queue_mapping(skb, ieee802_1d_to_ac[tid]);
+	skb->priority = tid;
 
 	/*
 	 * The other path calling ieee80211_xmit is from the tasklet,
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index d5230ec..9919892 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -20,6 +20,7 @@
 #include <linux/etherdevice.h>
 #include <linux/if_arp.h>
 #include <linux/bitmap.h>
+#include <linux/crc32.h>
 #include <net/net_namespace.h>
 #include <net/cfg80211.h>
 #include <net/rtnetlink.h>
@@ -96,13 +97,13 @@
 
 void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx)
 {
-	struct sk_buff *skb = tx->skb;
+	struct sk_buff *skb;
 	struct ieee80211_hdr *hdr;
 
-	do {
+	skb_queue_walk(&tx->skbs, skb) {
 		hdr = (struct ieee80211_hdr *) skb->data;
 		hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
-	} while ((skb = skb->next));
+	}
 }
 
 int ieee80211_frame_duration(struct ieee80211_local *local, size_t len,
@@ -564,6 +565,172 @@
 }
 EXPORT_SYMBOL(ieee80211_queue_delayed_work);
 
+u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
+			       struct ieee802_11_elems *elems,
+			       u64 filter, u32 crc)
+{
+	size_t left = len;
+	u8 *pos = start;
+	bool calc_crc = filter != 0;
+
+	memset(elems, 0, sizeof(*elems));
+	elems->ie_start = start;
+	elems->total_len = len;
+
+	while (left >= 2) {
+		u8 id, elen;
+
+		id = *pos++;
+		elen = *pos++;
+		left -= 2;
+
+		if (elen > left)
+			break;
+
+		if (calc_crc && id < 64 && (filter & (1ULL << id)))
+			crc = crc32_be(crc, pos - 2, elen + 2);
+
+		switch (id) {
+		case WLAN_EID_SSID:
+			elems->ssid = pos;
+			elems->ssid_len = elen;
+			break;
+		case WLAN_EID_SUPP_RATES:
+			elems->supp_rates = pos;
+			elems->supp_rates_len = elen;
+			break;
+		case WLAN_EID_FH_PARAMS:
+			elems->fh_params = pos;
+			elems->fh_params_len = elen;
+			break;
+		case WLAN_EID_DS_PARAMS:
+			elems->ds_params = pos;
+			elems->ds_params_len = elen;
+			break;
+		case WLAN_EID_CF_PARAMS:
+			elems->cf_params = pos;
+			elems->cf_params_len = elen;
+			break;
+		case WLAN_EID_TIM:
+			if (elen >= sizeof(struct ieee80211_tim_ie)) {
+				elems->tim = (void *)pos;
+				elems->tim_len = elen;
+			}
+			break;
+		case WLAN_EID_IBSS_PARAMS:
+			elems->ibss_params = pos;
+			elems->ibss_params_len = elen;
+			break;
+		case WLAN_EID_CHALLENGE:
+			elems->challenge = pos;
+			elems->challenge_len = elen;
+			break;
+		case WLAN_EID_VENDOR_SPECIFIC:
+			if (elen >= 4 && pos[0] == 0x00 && pos[1] == 0x50 &&
+			    pos[2] == 0xf2) {
+				/* Microsoft OUI (00:50:F2) */
+
+				if (calc_crc)
+					crc = crc32_be(crc, pos - 2, elen + 2);
+
+				if (pos[3] == 1) {
+					/* OUI Type 1 - WPA IE */
+					elems->wpa = pos;
+					elems->wpa_len = elen;
+				} else if (elen >= 5 && pos[3] == 2) {
+					/* OUI Type 2 - WMM IE */
+					if (pos[4] == 0) {
+						elems->wmm_info = pos;
+						elems->wmm_info_len = elen;
+					} else if (pos[4] == 1) {
+						elems->wmm_param = pos;
+						elems->wmm_param_len = elen;
+					}
+				}
+			}
+			break;
+		case WLAN_EID_RSN:
+			elems->rsn = pos;
+			elems->rsn_len = elen;
+			break;
+		case WLAN_EID_ERP_INFO:
+			elems->erp_info = pos;
+			elems->erp_info_len = elen;
+			break;
+		case WLAN_EID_EXT_SUPP_RATES:
+			elems->ext_supp_rates = pos;
+			elems->ext_supp_rates_len = elen;
+			break;
+		case WLAN_EID_HT_CAPABILITY:
+			if (elen >= sizeof(struct ieee80211_ht_cap))
+				elems->ht_cap_elem = (void *)pos;
+			break;
+		case WLAN_EID_HT_INFORMATION:
+			if (elen >= sizeof(struct ieee80211_ht_info))
+				elems->ht_info_elem = (void *)pos;
+			break;
+		case WLAN_EID_MESH_ID:
+			elems->mesh_id = pos;
+			elems->mesh_id_len = elen;
+			break;
+		case WLAN_EID_MESH_CONFIG:
+			if (elen >= sizeof(struct ieee80211_meshconf_ie))
+				elems->mesh_config = (void *)pos;
+			break;
+		case WLAN_EID_PEER_MGMT:
+			elems->peering = pos;
+			elems->peering_len = elen;
+			break;
+		case WLAN_EID_PREQ:
+			elems->preq = pos;
+			elems->preq_len = elen;
+			break;
+		case WLAN_EID_PREP:
+			elems->prep = pos;
+			elems->prep_len = elen;
+			break;
+		case WLAN_EID_PERR:
+			elems->perr = pos;
+			elems->perr_len = elen;
+			break;
+		case WLAN_EID_RANN:
+			if (elen >= sizeof(struct ieee80211_rann_ie))
+				elems->rann = (void *)pos;
+			break;
+		case WLAN_EID_CHANNEL_SWITCH:
+			elems->ch_switch_elem = pos;
+			elems->ch_switch_elem_len = elen;
+			break;
+		case WLAN_EID_QUIET:
+			if (!elems->quiet_elem) {
+				elems->quiet_elem = pos;
+				elems->quiet_elem_len = elen;
+			}
+			elems->num_of_quiet_elem++;
+			break;
+		case WLAN_EID_COUNTRY:
+			elems->country_elem = pos;
+			elems->country_elem_len = elen;
+			break;
+		case WLAN_EID_PWR_CONSTRAINT:
+			elems->pwr_constr_elem = pos;
+			elems->pwr_constr_elem_len = elen;
+			break;
+		case WLAN_EID_TIMEOUT_INTERVAL:
+			elems->timeout_int = pos;
+			elems->timeout_int_len = elen;
+			break;
+		default:
+			break;
+		}
+
+		left -= elen;
+		pos += elen;
+	}
+
+	return crc;
+}
+
 void ieee802_11_parse_elems(u8 *start, size_t len,
 			    struct ieee802_11_elems *elems)
 {
@@ -812,23 +979,9 @@
 		offset = noffset;
 	}
 
-	if (sband->ht_cap.ht_supported) {
-		u16 cap = sband->ht_cap.cap;
-		__le16 tmp;
-
-		*pos++ = WLAN_EID_HT_CAPABILITY;
-		*pos++ = sizeof(struct ieee80211_ht_cap);
-		memset(pos, 0, sizeof(struct ieee80211_ht_cap));
-		tmp = cpu_to_le16(cap);
-		memcpy(pos, &tmp, sizeof(u16));
-		pos += sizeof(u16);
-		*pos++ = sband->ht_cap.ampdu_factor |
-			 (sband->ht_cap.ampdu_density <<
-				IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT);
-		memcpy(pos, &sband->ht_cap.mcs, sizeof(sband->ht_cap.mcs));
-		pos += sizeof(sband->ht_cap.mcs);
-		pos += 2 + 4 + 1; /* ext info, BF cap, antsel */
-	}
+	if (sband->ht_cap.ht_supported)
+		pos = ieee80211_ie_build_ht_cap(pos, &sband->ht_cap,
+						sband->ht_cap.cap);
 
 	/*
 	 * If adding more here, adjust code in main.c
@@ -989,16 +1142,6 @@
 		 */
 	}
 #endif
-
-	/* setup fragmentation threshold */
-	drv_set_frag_threshold(local, hw->wiphy->frag_threshold);
-
-	/* setup RTS threshold */
-	drv_set_rts_threshold(local, hw->wiphy->rts_threshold);
-
-	/* reset coverage class */
-	drv_set_coverage_class(local, hw->wiphy->coverage_class);
-
 	/* everything else happens only if HW was up & running */
 	if (!local->open_count)
 		goto wake_up;
@@ -1017,6 +1160,15 @@
 		return res;
 	}
 
+	/* setup fragmentation threshold */
+	drv_set_frag_threshold(local, hw->wiphy->frag_threshold);
+
+	/* setup RTS threshold */
+	drv_set_rts_threshold(local, hw->wiphy->rts_threshold);
+
+	/* reset coverage class */
+	drv_set_coverage_class(local, hw->wiphy->coverage_class);
+
 	ieee80211_led_radio(local, true);
 	ieee80211_mod_tpt_led_trig(local,
 				   IEEE80211_TPT_LEDTRIG_FL_RADIO, 0);
@@ -1026,7 +1178,7 @@
 		if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
 		    sdata->vif.type != NL80211_IFTYPE_MONITOR &&
 		    ieee80211_sdata_running(sdata))
-			res = drv_add_interface(local, &sdata->vif);
+			res = drv_add_interface(local, sdata);
 	}
 
 	/* add STAs back */
@@ -1076,11 +1228,13 @@
 			  BSS_CHANGED_BEACON_INT |
 			  BSS_CHANGED_BSSID |
 			  BSS_CHANGED_CQM |
-			  BSS_CHANGED_QOS;
+			  BSS_CHANGED_QOS |
+			  BSS_CHANGED_IDLE;
 
 		switch (sdata->vif.type) {
 		case NL80211_IFTYPE_STATION:
-			changed |= BSS_CHANGED_ASSOC;
+			changed |= BSS_CHANGED_ASSOC |
+				   BSS_CHANGED_ARP_FILTER;
 			mutex_lock(&sdata->u.mgd.mtx);
 			ieee80211_bss_info_change_notify(sdata, changed);
 			mutex_unlock(&sdata->u.mgd.mtx);
@@ -1090,6 +1244,10 @@
 			/* fall through */
 		case NL80211_IFTYPE_AP:
 			changed |= BSS_CHANGED_SSID;
+
+			if (sdata->vif.type == NL80211_IFTYPE_AP)
+				changed |= BSS_CHANGED_AP_PROBE_RESP;
+
 			/* fall through */
 		case NL80211_IFTYPE_MESH_POINT:
 			changed |= BSS_CHANGED_BEACON |
@@ -1111,6 +1269,8 @@
 		}
 	}
 
+	ieee80211_recalc_ps(local, -1);
+
 	/*
 	 * Clear the WLAN_STA_BLOCK_BA flag so new aggregation
 	 * sessions can be established after a resume.
@@ -1366,6 +1526,108 @@
 }
 EXPORT_SYMBOL(ieee80211_disable_rssi_reports);
 
+u8 *ieee80211_ie_build_ht_cap(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
+			      u16 cap)
+{
+	__le16 tmp;
+
+	*pos++ = WLAN_EID_HT_CAPABILITY;
+	*pos++ = sizeof(struct ieee80211_ht_cap);
+	memset(pos, 0, sizeof(struct ieee80211_ht_cap));
+
+	/* capability flags */
+	tmp = cpu_to_le16(cap);
+	memcpy(pos, &tmp, sizeof(u16));
+	pos += sizeof(u16);
+
+	/* AMPDU parameters */
+	*pos++ = ht_cap->ampdu_factor |
+		 (ht_cap->ampdu_density <<
+			IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT);
+
+	/* MCS set */
+	memcpy(pos, &ht_cap->mcs, sizeof(ht_cap->mcs));
+	pos += sizeof(ht_cap->mcs);
+
+	/* extended capabilities */
+	pos += sizeof(__le16);
+
+	/* BF capabilities */
+	pos += sizeof(__le32);
+
+	/* antenna selection */
+	pos += sizeof(u8);
+
+	return pos;
+}
+
+u8 *ieee80211_ie_build_ht_info(u8 *pos,
+			       struct ieee80211_sta_ht_cap *ht_cap,
+			       struct ieee80211_channel *channel,
+			       enum nl80211_channel_type channel_type)
+{
+	struct ieee80211_ht_info *ht_info;
+	/* Build HT Information */
+	*pos++ = WLAN_EID_HT_INFORMATION;
+	*pos++ = sizeof(struct ieee80211_ht_info);
+	ht_info = (struct ieee80211_ht_info *)pos;
+	ht_info->control_chan =
+			ieee80211_frequency_to_channel(channel->center_freq);
+	switch (channel_type) {
+	case NL80211_CHAN_HT40MINUS:
+		ht_info->ht_param = IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+		break;
+	case NL80211_CHAN_HT40PLUS:
+		ht_info->ht_param = IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+		break;
+	case NL80211_CHAN_HT20:
+	default:
+		ht_info->ht_param = IEEE80211_HT_PARAM_CHA_SEC_NONE;
+		break;
+	}
+	if (ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
+		ht_info->ht_param |= IEEE80211_HT_PARAM_CHAN_WIDTH_ANY;
+
+	/*
+	 * Note: According to 802.11n-2009 9.13.3.1, HT Protection field and
+	 * RIFS Mode are reserved in IBSS mode, therefore keep them at 0
+	 */
+	ht_info->operation_mode = 0x0000;
+	ht_info->stbc_param = 0x0000;
+
+	/* It seems that Basic MCS set and Supported MCS set
+	   are identical for the first 10 bytes */
+	memset(&ht_info->basic_set, 0, 16);
+	memcpy(&ht_info->basic_set, &ht_cap->mcs, 10);
+
+	return pos + sizeof(struct ieee80211_ht_info);
+}
+
+enum nl80211_channel_type
+ieee80211_ht_info_to_channel_type(struct ieee80211_ht_info *ht_info)
+{
+	enum nl80211_channel_type channel_type;
+
+	if (!ht_info)
+		return NL80211_CHAN_NO_HT;
+
+	switch (ht_info->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
+	case IEEE80211_HT_PARAM_CHA_SEC_NONE:
+		channel_type = NL80211_CHAN_HT20;
+		break;
+	case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
+		channel_type = NL80211_CHAN_HT40PLUS;
+		break;
+	case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
+		channel_type = NL80211_CHAN_HT40MINUS;
+		break;
+	default:
+		channel_type = NL80211_CHAN_NO_HT;
+	}
+
+	return channel_type;
+}
+
 int ieee80211_add_srates_ie(struct ieee80211_vif *vif, struct sk_buff *skb)
 {
 	struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c
index a1c6bfd..68ad351 100644
--- a/net/mac80211/wep.c
+++ b/net/mac80211/wep.c
@@ -330,13 +330,12 @@
 
 	ieee80211_tx_set_protected(tx);
 
-	skb = tx->skb;
-	do {
+	skb_queue_walk(&tx->skbs, skb) {
 		if (wep_encrypt_skb(tx, skb) < 0) {
 			I802_DEBUG_INC(tx->local->tx_handlers_drop_wep);
 			return TX_DROP;
 		}
-	} while ((skb = skb->next));
+	}
 
 	return TX_CONTINUE;
 }
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index fd52e69..89511be 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -52,6 +52,30 @@
 	}
 }
 
+/* Indicate which queue to use for this fully formed 802.11 frame */
+u16 ieee80211_select_queue_80211(struct ieee80211_local *local,
+				 struct sk_buff *skb,
+				 struct ieee80211_hdr *hdr)
+{
+	u8 *p;
+
+	if (local->hw.queues < 4)
+		return 0;
+
+	if (!ieee80211_is_data(hdr->frame_control)) {
+		skb->priority = 7;
+		return ieee802_1d_to_ac[skb->priority];
+	}
+	if (!ieee80211_is_data_qos(hdr->frame_control)) {
+		skb->priority = 0;
+		return ieee802_1d_to_ac[skb->priority];
+	}
+
+	p = ieee80211_get_qos_ctl(hdr);
+	skb->priority = *p & IEEE80211_QOS_CTL_TAG1D_MASK;
+
+	return ieee80211_downgrade_queue(local, skb);
+}
 
 /* Indicate which queue to use. */
 u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
@@ -83,7 +107,7 @@
 		break;
 #ifdef CONFIG_MAC80211_MESH
 	case NL80211_IFTYPE_MESH_POINT:
-		ra = skb->data;
+		qos = true;
 		break;
 #endif
 	case NL80211_IFTYPE_STATION:
@@ -139,16 +163,24 @@
 			   struct sk_buff *skb)
 {
 	struct ieee80211_hdr *hdr = (void *)skb->data;
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 
 	/* Fill in the QoS header if there is one. */
 	if (ieee80211_is_data_qos(hdr->frame_control)) {
 		u8 *p = ieee80211_get_qos_ctl(hdr);
-		u8 ack_policy = 0, tid;
+		u8 ack_policy, tid;
 
 		tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
 
-		if (unlikely(sdata->local->wifi_wme_noack_test))
+		/* preserve EOSP bit */
+		ack_policy = *p & IEEE80211_QOS_CTL_EOSP;
+
+		if (is_multicast_ether_addr(hdr->addr1) ||
+		    sdata->noack_map & BIT(tid)) {
 			ack_policy |= IEEE80211_QOS_CTL_ACK_POLICY_NOACK;
+			info->flags |= IEEE80211_TX_CTL_NO_ACK;
+		}
+
 		/* qos header is 2 bytes */
 		*p++ = ack_policy | tid;
 		*p = ieee80211_vif_is_mesh(&sdata->vif) ?
diff --git a/net/mac80211/wme.h b/net/mac80211/wme.h
index 34e166f..94edceb 100644
--- a/net/mac80211/wme.h
+++ b/net/mac80211/wme.h
@@ -15,6 +15,9 @@
 
 extern const int ieee802_1d_to_ac[8];
 
+u16 ieee80211_select_queue_80211(struct ieee80211_local *local,
+				 struct sk_buff *skb,
+				 struct ieee80211_hdr *hdr);
 u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
 			   struct sk_buff *skb);
 void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/work.c b/net/mac80211/work.c
index 6c53b6d..c6dd01a 100644
--- a/net/mac80211/work.c
+++ b/net/mac80211/work.c
@@ -94,7 +94,8 @@
 
 /* frame sending functions */
 
-static void ieee80211_add_ht_ie(struct sk_buff *skb, const u8 *ht_info_ie,
+static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata,
+				struct sk_buff *skb, const u8 *ht_info_ie,
 				struct ieee80211_supported_band *sband,
 				struct ieee80211_channel *channel,
 				enum ieee80211_smps_mode smps)
@@ -102,8 +103,10 @@
 	struct ieee80211_ht_info *ht_info;
 	u8 *pos;
 	u32 flags = channel->flags;
-	u16 cap = sband->ht_cap.cap;
-	__le16 tmp;
+	u16 cap;
+	struct ieee80211_sta_ht_cap ht_cap;
+
+	BUILD_BUG_ON(sizeof(ht_cap) != sizeof(sband->ht_cap));
 
 	if (!sband->ht_cap.ht_supported)
 		return;
@@ -114,9 +117,13 @@
 	if (ht_info_ie[1] < sizeof(struct ieee80211_ht_info))
 		return;
 
+	memcpy(&ht_cap, &sband->ht_cap, sizeof(ht_cap));
+	ieee80211_apply_htcap_overrides(sdata, &ht_cap);
+
 	ht_info = (struct ieee80211_ht_info *)(ht_info_ie + 2);
 
 	/* determine capability flags */
+	cap = ht_cap.cap;
 
 	switch (ht_info->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
 	case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
@@ -154,34 +161,8 @@
 	}
 
 	/* reserve and fill IE */
-
 	pos = skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2);
-	*pos++ = WLAN_EID_HT_CAPABILITY;
-	*pos++ = sizeof(struct ieee80211_ht_cap);
-	memset(pos, 0, sizeof(struct ieee80211_ht_cap));
-
-	/* capability flags */
-	tmp = cpu_to_le16(cap);
-	memcpy(pos, &tmp, sizeof(u16));
-	pos += sizeof(u16);
-
-	/* AMPDU parameters */
-	*pos++ = sband->ht_cap.ampdu_factor |
-		 (sband->ht_cap.ampdu_density <<
-			IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT);
-
-	/* MCS set */
-	memcpy(pos, &sband->ht_cap.mcs, sizeof(sband->ht_cap.mcs));
-	pos += sizeof(sband->ht_cap.mcs);
-
-	/* extended capabilities */
-	pos += sizeof(__le16);
-
-	/* BF capabilities */
-	pos += sizeof(__le32);
-
-	/* antenna selection */
-	pos += sizeof(u8);
+	ieee80211_ie_build_ht_cap(pos, &ht_cap, cap);
 }
 
 static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata,
@@ -356,7 +337,7 @@
 
 	if (wk->assoc.use_11n && wk->assoc.wmm_used &&
 	    local->hw.queues >= 4)
-		ieee80211_add_ht_ie(skb, wk->assoc.ht_information_ie,
+		ieee80211_add_ht_ie(sdata, skb, wk->assoc.ht_information_ie,
 				    sband, wk->chan, wk->assoc.smps);
 
 	/* if present, add any custom non-vendor IEs that go after HT */
@@ -881,44 +862,6 @@
 	kfree_skb(skb);
 }
 
-static bool ieee80211_work_ct_coexists(enum nl80211_channel_type wk_ct,
-				       enum nl80211_channel_type oper_ct)
-{
-	switch (wk_ct) {
-	case NL80211_CHAN_NO_HT:
-		return true;
-	case NL80211_CHAN_HT20:
-		if (oper_ct != NL80211_CHAN_NO_HT)
-			return true;
-		return false;
-	case NL80211_CHAN_HT40MINUS:
-	case NL80211_CHAN_HT40PLUS:
-		return (wk_ct == oper_ct);
-	}
-	WARN_ON(1); /* shouldn't get here */
-	return false;
-}
-
-static enum nl80211_channel_type
-ieee80211_calc_ct(enum nl80211_channel_type wk_ct,
-		  enum nl80211_channel_type oper_ct)
-{
-	switch (wk_ct) {
-	case NL80211_CHAN_NO_HT:
-		return oper_ct;
-	case NL80211_CHAN_HT20:
-		if (oper_ct != NL80211_CHAN_NO_HT)
-			return oper_ct;
-		return wk_ct;
-	case NL80211_CHAN_HT40MINUS:
-	case NL80211_CHAN_HT40PLUS:
-		return wk_ct;
-	}
-	WARN_ON(1); /* shouldn't get here */
-	return wk_ct;
-}
-
-
 static void ieee80211_work_timer(unsigned long data)
 {
 	struct ieee80211_local *local = (void *) data;
@@ -969,51 +912,12 @@
 		}
 
 		if (!started && !local->tmp_channel) {
-			bool on_oper_chan;
-			bool tmp_chan_changed = false;
-			bool on_oper_chan2;
-			enum nl80211_channel_type wk_ct;
-			on_oper_chan = ieee80211_cfg_on_oper_channel(local);
-
-			/* Work with existing channel type if possible. */
-			wk_ct = wk->chan_type;
-			if (wk->chan == local->hw.conf.channel)
-				wk_ct = ieee80211_calc_ct(wk->chan_type,
-						local->hw.conf.channel_type);
-
-			if (local->tmp_channel)
-				if ((local->tmp_channel != wk->chan) ||
-				    (local->tmp_channel_type != wk_ct))
-					tmp_chan_changed = true;
+			ieee80211_offchannel_stop_vifs(local, true);
 
 			local->tmp_channel = wk->chan;
-			local->tmp_channel_type = wk_ct;
-			/*
-			 * Leave the station vifs in awake mode if they
-			 * happen to be on the same channel as
-			 * the requested channel.
-			 */
-			on_oper_chan2 = ieee80211_cfg_on_oper_channel(local);
-			if (on_oper_chan != on_oper_chan2) {
-				if (on_oper_chan2) {
-					/* going off oper channel, PS too */
-					ieee80211_offchannel_stop_vifs(local,
-								       true);
-					ieee80211_hw_config(local, 0);
-				} else {
-					/* going on channel, but leave PS
-					 * off-channel. */
-					ieee80211_hw_config(local, 0);
-					ieee80211_offchannel_return(local,
-								    true,
-								    false);
-				}
-			} else if (tmp_chan_changed)
-				/* Still off-channel, but on some other
-				 * channel, so update hardware.
-				 * PS should already be off-channel.
-				 */
-				ieee80211_hw_config(local, 0);
+			local->tmp_channel_type = wk->chan_type;
+
+			ieee80211_hw_config(local, 0);
 
 			started = true;
 			wk->timeout = jiffies;
@@ -1082,34 +986,17 @@
 	list_for_each_entry(wk, &local->work_list, list) {
 		if (!wk->started)
 			continue;
-		if (wk->chan != local->tmp_channel)
-			continue;
-		if (!ieee80211_work_ct_coexists(wk->chan_type,
-						local->tmp_channel_type))
+		if (wk->chan != local->tmp_channel ||
+		    wk->chan_type != local->tmp_channel_type)
 			continue;
 		remain_off_channel = true;
 	}
 
 	if (!remain_off_channel && local->tmp_channel) {
 		local->tmp_channel = NULL;
-		/* If tmp_channel wasn't operating channel, then
-		 * we need to go back on-channel.
-		 * NOTE:  If we can ever be here while scannning,
-		 * or if the hw_config() channel config logic changes,
-		 * then we may need to do a more thorough check to see if
-		 * we still need to do a hardware config.  Currently,
-		 * we cannot be here while scanning, however.
-		 */
-		if (!ieee80211_cfg_on_oper_channel(local))
-			ieee80211_hw_config(local, 0);
+		ieee80211_hw_config(local, 0);
 
-		/* At the least, we need to disable offchannel_ps,
-		 * so just go ahead and run the entire offchannel
-		 * return logic here.  We *could* skip enabling
-		 * beaconing if we were already on-oper-channel
-		 * as a future optimization.
-		 */
-		ieee80211_offchannel_return(local, true, true);
+		ieee80211_offchannel_return(local, true);
 
 		/* give connection some time to breathe */
 		run_again(local, jiffies + HZ/2);
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index f614ce7..93aab07 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -223,14 +223,14 @@
 ieee80211_tx_result
 ieee80211_crypto_tkip_encrypt(struct ieee80211_tx_data *tx)
 {
-	struct sk_buff *skb = tx->skb;
+	struct sk_buff *skb;
 
 	ieee80211_tx_set_protected(tx);
 
-	do {
+	skb_queue_walk(&tx->skbs, skb) {
 		if (tkip_encrypt_skb(tx, skb) < 0)
 			return TX_DROP;
-	} while ((skb = skb->next));
+	}
 
 	return TX_CONTINUE;
 }
@@ -390,7 +390,8 @@
 	u8 scratch[6 * AES_BLOCK_SIZE];
 
 	if (info->control.hw_key &&
-	    !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV)) {
+	    !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV) &&
+	    !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)) {
 		/*
 		 * hwaccel has no need for preallocated room for CCMP
 		 * header or MIC fields
@@ -412,6 +413,12 @@
 
 	pos = skb_push(skb, CCMP_HDR_LEN);
 	memmove(pos, pos + CCMP_HDR_LEN, hdrlen);
+
+	/* the HW only needs room for the IV, but not the actual IV */
+	if (info->control.hw_key &&
+	    (info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE))
+		return 0;
+
 	hdr = (struct ieee80211_hdr *) pos;
 	pos += hdrlen;
 
@@ -442,14 +449,14 @@
 ieee80211_tx_result
 ieee80211_crypto_ccmp_encrypt(struct ieee80211_tx_data *tx)
 {
-	struct sk_buff *skb = tx->skb;
+	struct sk_buff *skb;
 
 	ieee80211_tx_set_protected(tx);
 
-	do {
+	skb_queue_walk(&tx->skbs, skb) {
 		if (ccmp_encrypt_skb(tx, skb) < 0)
 			return TX_DROP;
-	} while ((skb = skb->next));
+	}
 
 	return TX_CONTINUE;
 }
@@ -547,15 +554,22 @@
 ieee80211_tx_result
 ieee80211_crypto_aes_cmac_encrypt(struct ieee80211_tx_data *tx)
 {
-	struct sk_buff *skb = tx->skb;
-	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct sk_buff *skb;
+	struct ieee80211_tx_info *info;
 	struct ieee80211_key *key = tx->key;
 	struct ieee80211_mmie *mmie;
 	u8 aad[20];
 	u64 pn64;
 
+	if (WARN_ON(skb_queue_len(&tx->skbs) != 1))
+		return TX_DROP;
+
+	skb = skb_peek(&tx->skbs);
+
+	info = IEEE80211_SKB_CB(skb);
+
 	if (info->control.hw_key)
-		return 0;
+		return TX_CONTINUE;
 
 	if (WARN_ON(skb_tailroom(skb) < sizeof(*mmie)))
 		return TX_DROP;
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index d5597b7..f8ac4ef 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -4,6 +4,14 @@
 config NETFILTER_NETLINK
 	tristate
 
+config NETFILTER_NETLINK_ACCT
+tristate "Netfilter NFACCT over NFNETLINK interface"
+	depends on NETFILTER_ADVANCED
+	select NETFILTER_NETLINK
+	help
+	  If this option is enabled, the kernel will include support
+	  for extended accounting via NFNETLINK.
+
 config NETFILTER_NETLINK_QUEUE
 	tristate "Netfilter NFQUEUE over NFNETLINK interface"
 	depends on NETFILTER_ADVANCED
@@ -75,6 +83,16 @@
 
 	  If unsure, say `N'.
 
+config NF_CONNTRACK_PROCFS
+	bool "Supply CT list in procfs (OBSOLETE)"
+	default y
+	depends on PROC_FS
+	---help---
+	This option enables for the list of known conntrack entries
+	to be shown in procfs under net/netfilter/nf_conntrack. This
+	is considered obsolete in favor of using the conntrack(8)
+	tool which uses Netlink.
+
 config NF_CONNTRACK_EVENTS
 	bool "Connection tracking events"
 	depends on NETFILTER_ADVANCED
@@ -770,6 +788,15 @@
 
 	  To compile it as a module, choose M here.  If unsure, say N.
 
+config NETFILTER_XT_MATCH_ECN
+	tristate '"ecn" match support'
+	depends on NETFILTER_ADVANCED
+	---help---
+	This option adds an "ECN" match, which allows you to match against
+	the IPv4 and TCP header ECN fields.
+
+	To compile it as a module, choose M here. If unsure, say N.
+
 config NETFILTER_XT_MATCH_ESP
 	tristate '"esp" match support'
 	depends on NETFILTER_ADVANCED
@@ -879,6 +906,16 @@
 
 	  To compile it as a module, choose M here.  If unsure, say N.
 
+config NETFILTER_XT_MATCH_NFACCT
+	tristate '"nfacct" match support'
+	depends on NETFILTER_ADVANCED
+	select NETFILTER_NETLINK_ACCT
+	help
+	  This option allows you to use the extended accounting through
+	  nfnetlink_acct.
+
+	  To compile it as a module, choose M here.  If unsure, say N.
+
 config NETFILTER_XT_MATCH_OSF
 	tristate '"osf" Passive OS fingerprint match'
 	depends on NETFILTER_ADVANCED && NETFILTER_NETLINK
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 1a02853..40f4c3d 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -7,6 +7,7 @@
 obj-$(CONFIG_NETFILTER) = netfilter.o
 
 obj-$(CONFIG_NETFILTER_NETLINK) += nfnetlink.o
+obj-$(CONFIG_NETFILTER_NETLINK_ACCT) += nfnetlink_acct.o
 obj-$(CONFIG_NETFILTER_NETLINK_QUEUE) += nfnetlink_queue.o
 obj-$(CONFIG_NETFILTER_NETLINK_LOG) += nfnetlink_log.o
 
@@ -80,6 +81,7 @@
 obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
@@ -90,6 +92,7 @@
 obj-$(CONFIG_NETFILTER_XT_MATCH_LIMIT) += xt_limit.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_MAC) += xt_mac.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_MULTIPORT) += xt_multiport.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_NFACCT) += xt_nfacct.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_OSF) += xt_osf.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_OWNER) += xt_owner.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_PHYSDEV) += xt_physdev.o
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index afca6c7..b4e8ff0 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -54,6 +54,12 @@
 
 struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS] __read_mostly;
 EXPORT_SYMBOL(nf_hooks);
+
+#if defined(CONFIG_JUMP_LABEL)
+struct jump_label_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
+EXPORT_SYMBOL(nf_hooks_needed);
+#endif
+
 static DEFINE_MUTEX(nf_hook_mutex);
 
 int nf_register_hook(struct nf_hook_ops *reg)
@@ -70,6 +76,9 @@
 	}
 	list_add_rcu(&reg->list, elem->list.prev);
 	mutex_unlock(&nf_hook_mutex);
+#if defined(CONFIG_JUMP_LABEL)
+	jump_label_inc(&nf_hooks_needed[reg->pf][reg->hooknum]);
+#endif
 	return 0;
 }
 EXPORT_SYMBOL(nf_register_hook);
@@ -79,7 +88,9 @@
 	mutex_lock(&nf_hook_mutex);
 	list_del_rcu(&reg->list);
 	mutex_unlock(&nf_hook_mutex);
-
+#if defined(CONFIG_JUMP_LABEL)
+	jump_label_dec(&nf_hooks_needed[reg->pf][reg->hooknum]);
+#endif
 	synchronize_net();
 }
 EXPORT_SYMBOL(nf_unregister_hook);
@@ -218,7 +229,7 @@
 }
 EXPORT_SYMBOL(skb_make_writable);
 
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
 /* This does not belong here, but locally generated errors need it if connection
    tracking in use: without this, connection may not be in hash table, and hence
    manufactured ICMP or RST packets will not be associated with it. */
diff --git a/net/netfilter/ipset/ip_set_getport.c b/net/netfilter/ipset/ip_set_getport.c
index 052579f..1f03556 100644
--- a/net/netfilter/ipset/ip_set_getport.c
+++ b/net/netfilter/ipset/ip_set_getport.c
@@ -109,16 +109,18 @@
 }
 EXPORT_SYMBOL_GPL(ip_set_get_ip4_port);
 
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
 bool
 ip_set_get_ip6_port(const struct sk_buff *skb, bool src,
 		    __be16 *port, u8 *proto)
 {
 	int protoff;
 	u8 nexthdr;
+	__be16 frag_off;
 
 	nexthdr = ipv6_hdr(skb)->nexthdr;
-	protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr);
+	protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr,
+				   &frag_off);
 	if (protoff < 0)
 		return false;
 
diff --git a/net/netfilter/ipset/ip_set_hash_ip.c b/net/netfilter/ipset/ip_set_hash_ip.c
index f2d576e..4015fca 100644
--- a/net/netfilter/ipset/ip_set_hash_ip.c
+++ b/net/netfilter/ipset/ip_set_hash_ip.c
@@ -241,7 +241,7 @@
 static inline void
 hash_ip6_data_copy(struct hash_ip6_elem *dst, const struct hash_ip6_elem *src)
 {
-	ipv6_addr_copy(&dst->ip.in6, &src->ip.in6);
+	dst->ip.in6 = src->ip.in6;
 }
 
 static inline void
diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c
index 60d0165..2898819 100644
--- a/net/netfilter/ipset/ip_set_hash_net.c
+++ b/net/netfilter/ipset/ip_set_hash_net.c
@@ -267,7 +267,7 @@
 hash_net6_data_copy(struct hash_net6_elem *dst,
 		    const struct hash_net6_elem *src)
 {
-	ipv6_addr_copy(&dst->ip.in6, &src->ip.in6);
+	dst->ip.in6 = src->ip.in6;
 	dst->cidr = src->cidr;
 }
 
diff --git a/net/netfilter/ipvs/Kconfig b/net/netfilter/ipvs/Kconfig
index 70bd1d0..f987138 100644
--- a/net/netfilter/ipvs/Kconfig
+++ b/net/netfilter/ipvs/Kconfig
@@ -122,7 +122,6 @@
  
 config	IP_VS_WRR
 	tristate "weighted round-robin scheduling"
-	select GCD
 	---help---
 	  The weighted robin-robin scheduling algorithm directs network
 	  connections to different real servers based on server weights
@@ -232,6 +231,21 @@
 	  If you want to compile it in kernel, say Y. To compile it as a
 	  module, choose M here. If unsure, say N.
 
+comment 'IPVS SH scheduler'
+
+config IP_VS_SH_TAB_BITS
+	int "IPVS source hashing table size (the Nth power of 2)"
+	range 4 20
+	default 8
+	---help---
+	  The source hashing scheduler maps source IPs to destinations
+	  stored in a hash table. This table is tiled by each destination
+	  until all slots in the table are filled. When using weights to
+	  allow destinations to receive more connections, the table is
+	  tiled an amount proportional to the weights specified. The table
+	  needs to be large enough to effectively fit all the destinations
+	  multiplied by their respective weights.
+
 comment 'IPVS application helper'
 
 config	IP_VS_FTP
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index 12571fb..29fa5ba 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -616,7 +616,7 @@
 	if ((cp) && (!cp->dest)) {
 		dest = ip_vs_find_dest(ip_vs_conn_net(cp), cp->af, &cp->daddr,
 				       cp->dport, &cp->vaddr, cp->vport,
-				       cp->protocol, cp->fwmark);
+				       cp->protocol, cp->fwmark, cp->flags);
 		ip_vs_bind_dest(cp, dest);
 		return dest;
 	} else
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 093cc32..611c335 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -983,7 +983,7 @@
 	if (!cp)
 		return NF_ACCEPT;
 
-	ipv6_addr_copy(&snet.in6, &iph->saddr);
+	snet.in6 = iph->saddr;
 	return handle_response_icmp(AF_INET6, skb, &snet, cih->nexthdr, cp,
 				    pp, offset, sizeof(struct ipv6hdr));
 }
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 008bf97..b3afe18 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -85,7 +85,7 @@
 	};
 
 	rt = (struct rt6_info *)ip6_route_output(net, NULL, &fl6);
-	if (rt && rt->rt6i_dev && (rt->rt6i_dev->flags & IFF_LOOPBACK))
+	if (rt && rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK))
 		return 1;
 
 	return 0;
@@ -619,15 +619,21 @@
 				   const union nf_inet_addr *daddr,
 				   __be16 dport,
 				   const union nf_inet_addr *vaddr,
-				   __be16 vport, __u16 protocol, __u32 fwmark)
+				   __be16 vport, __u16 protocol, __u32 fwmark,
+				   __u32 flags)
 {
 	struct ip_vs_dest *dest;
 	struct ip_vs_service *svc;
+	__be16 port = dport;
 
 	svc = ip_vs_service_get(net, af, fwmark, protocol, vaddr, vport);
 	if (!svc)
 		return NULL;
-	dest = ip_vs_lookup_dest(svc, daddr, dport);
+	if (fwmark && (flags & IP_VS_CONN_F_FWD_MASK) != IP_VS_CONN_F_MASQ)
+		port = 0;
+	dest = ip_vs_lookup_dest(svc, daddr, port);
+	if (!dest)
+		dest = ip_vs_lookup_dest(svc, daddr, port ^ dport);
 	if (dest)
 		atomic_inc(&dest->refcnt);
 	ip_vs_service_put(svc);
diff --git a/net/netfilter/ipvs/ip_vs_pe_sip.c b/net/netfilter/ipvs/ip_vs_pe_sip.c
index 13d607a..1aa5cac 100644
--- a/net/netfilter/ipvs/ip_vs_pe_sip.c
+++ b/net/netfilter/ipvs/ip_vs_pe_sip.c
@@ -108,7 +108,7 @@
 				  struct ip_vs_conn *ct)
 
 {
-	bool ret = 0;
+	bool ret = false;
 
 	if (ct->af == p->af &&
 	    ip_vs_addr_equal(p->af, p->caddr, &ct->caddr) &&
@@ -121,7 +121,7 @@
 	    ct->protocol == p->protocol &&
 	    ct->pe_data && ct->pe_data_len == p->pe_data_len &&
 	    !memcmp(ct->pe_data, p->pe_data, p->pe_data_len))
-		ret = 1;
+		ret = true;
 
 	IP_VS_DBG_BUF(9, "SIP template match %s %s->%s:%d %s\n",
 		      ip_vs_proto_name(p->protocol),
diff --git a/net/netfilter/ipvs/ip_vs_sh.c b/net/netfilter/ipvs/ip_vs_sh.c
index 33815f4..069e8d4 100644
--- a/net/netfilter/ipvs/ip_vs_sh.c
+++ b/net/netfilter/ipvs/ip_vs_sh.c
@@ -30,6 +30,11 @@
  * server is dead or overloaded, the load balancer can bypass the cache
  * server and send requests to the original server directly.
  *
+ * The weight destination attribute can be used to control the
+ * distribution of connections to the destinations in servernode. The
+ * greater the weight, the more connections the destination
+ * will receive.
+ *
  */
 
 #define KMSG_COMPONENT "IPVS"
@@ -99,9 +104,11 @@
 	struct ip_vs_sh_bucket *b;
 	struct list_head *p;
 	struct ip_vs_dest *dest;
+	int d_count;
 
 	b = tbl;
 	p = &svc->destinations;
+	d_count = 0;
 	for (i=0; i<IP_VS_SH_TAB_SIZE; i++) {
 		if (list_empty(p)) {
 			b->dest = NULL;
@@ -113,7 +120,16 @@
 			atomic_inc(&dest->refcnt);
 			b->dest = dest;
 
-			p = p->next;
+			IP_VS_DBG_BUF(6, "assigned i: %d dest: %s weight: %d\n",
+				      i, IP_VS_DBG_ADDR(svc->af, &dest->addr),
+				      atomic_read(&dest->weight));
+
+			/* Don't move to next dest until filling weight */
+			if (++d_count >= atomic_read(&dest->weight)) {
+				p = p->next;
+				d_count = 0;
+			}
+
 		}
 		b++;
 	}
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index 3cdd479..8a0d6d6 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -603,9 +603,9 @@
 #ifdef CONFIG_IP_VS_IPV6
 	if (cp->af == AF_INET6) {
 		p += sizeof(struct ip_vs_sync_v6);
-		ipv6_addr_copy(&s->v6.caddr, &cp->caddr.in6);
-		ipv6_addr_copy(&s->v6.vaddr, &cp->vaddr.in6);
-		ipv6_addr_copy(&s->v6.daddr, &cp->daddr.in6);
+		s->v6.caddr = cp->caddr.in6;
+		s->v6.vaddr = cp->vaddr.in6;
+		s->v6.daddr = cp->daddr.in6;
 	} else
 #endif
 	{
@@ -740,7 +740,7 @@
 		 * but still handled.
 		 */
 		dest = ip_vs_find_dest(net, type, daddr, dport, param->vaddr,
-				       param->vport, protocol, fwmark);
+				       param->vport, protocol, fwmark, flags);
 
 		/*  Set the approprite ativity flag */
 		if (protocol == IPPROTO_TCP) {
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index aa2d720..7fd66de 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -207,7 +207,7 @@
 
 static inline int __ip_vs_is_local_route6(struct rt6_info *rt)
 {
-	return rt->rt6i_dev && rt->rt6i_dev->flags & IFF_LOOPBACK;
+	return rt->dst.dev && rt->dst.dev->flags & IFF_LOOPBACK;
 }
 
 static struct dst_entry *
@@ -235,7 +235,7 @@
 			goto out_err;
 		}
 	}
-	ipv6_addr_copy(ret_saddr, &fl6.saddr);
+	*ret_saddr = fl6.saddr;
 	return dst;
 
 out_err:
@@ -279,7 +279,7 @@
 				  atomic_read(&rt->dst.__refcnt));
 		}
 		if (ret_saddr)
-			ipv6_addr_copy(ret_saddr, &dest->dst_saddr.in6);
+			*ret_saddr = dest->dst_saddr.in6;
 		spin_unlock(&dest->dst_lock);
 	} else {
 		dst = __ip_vs_route_output_v6(net, daddr, ret_saddr, do_xfrm);
@@ -541,7 +541,7 @@
 	 * Avoid duplicate tuple in reply direction for NAT traffic
 	 * to local address when connection is sync-ed
 	 */
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
 	if (cp->flags & IP_VS_CONN_F_SYNC && local) {
 		enum ip_conntrack_info ctinfo;
 		struct nf_conn *ct = ct = nf_ct_get(skb, &ctinfo);
@@ -658,7 +658,7 @@
 	 * Avoid duplicate tuple in reply direction for NAT traffic
 	 * to local address when connection is sync-ed
 	 */
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
 	if (cp->flags & IP_VS_CONN_F_SYNC && local) {
 		enum ip_conntrack_info ctinfo;
 		struct nf_conn *ct = ct = nf_ct_get(skb, &ctinfo);
@@ -705,7 +705,7 @@
 	/* mangle the packet */
 	if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp))
 		goto tx_error;
-	ipv6_addr_copy(&ipv6_hdr(skb)->daddr, &cp->daddr.in6);
+	ipv6_hdr(skb)->daddr = cp->daddr.in6;
 
 	if (!local || !skb->dev) {
 		/* drop the old route when skb is not shared */
@@ -967,8 +967,8 @@
 	be16_add_cpu(&iph->payload_len, sizeof(*old_iph));
 	iph->priority		=	old_iph->priority;
 	memset(&iph->flow_lbl, 0, sizeof(iph->flow_lbl));
-	ipv6_addr_copy(&iph->daddr, &cp->daddr.in6);
-	ipv6_addr_copy(&iph->saddr, &saddr);
+	iph->daddr = cp->daddr.in6;
+	iph->saddr = saddr;
 	iph->hop_limit		=	old_iph->hop_limit;
 
 	/* Another hack: avoid icmp_send in ip_fragment */
@@ -1173,7 +1173,7 @@
 	 * Avoid duplicate tuple in reply direction for NAT traffic
 	 * to local address when connection is sync-ed
 	 */
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
 	if (cp->flags & IP_VS_CONN_F_SYNC && local) {
 		enum ip_conntrack_info ctinfo;
 		struct nf_conn *ct = ct = nf_ct_get(skb, &ctinfo);
@@ -1293,7 +1293,7 @@
 	 * Avoid duplicate tuple in reply direction for NAT traffic
 	 * to local address when connection is sync-ed
 	 */
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
 	if (cp->flags & IP_VS_CONN_F_SYNC && local) {
 		enum ip_conntrack_info ctinfo;
 		struct nf_conn *ct = ct = nf_ct_get(skb, &ctinfo);
diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
index 369df3f..f4f8cda 100644
--- a/net/netfilter/nf_conntrack_acct.c
+++ b/net/netfilter/nf_conntrack_acct.c
@@ -18,7 +18,7 @@
 #include <net/netfilter/nf_conntrack_extend.h>
 #include <net/netfilter/nf_conntrack_acct.h>
 
-static int nf_ct_acct __read_mostly;
+static bool nf_ct_acct __read_mostly;
 
 module_param_named(acct, nf_ct_acct, bool, 0644);
 MODULE_PARM_DESC(acct, "Enable connection tracking flow accounting.");
@@ -46,8 +46,8 @@
 		return 0;
 
 	return seq_printf(s, "packets=%llu bytes=%llu ",
-			  (unsigned long long)acct[dir].packets,
-			  (unsigned long long)acct[dir].bytes);
+			  (unsigned long long)atomic64_read(&acct[dir].packets),
+			  (unsigned long long)atomic64_read(&acct[dir].bytes));
 };
 EXPORT_SYMBOL_GPL(seq_print_acct);
 
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 7202b06..e875f89 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -67,6 +67,7 @@
 EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked);
 
 unsigned int nf_conntrack_hash_rnd __read_mostly;
+EXPORT_SYMBOL_GPL(nf_conntrack_hash_rnd);
 
 static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple, u16 zone)
 {
@@ -1044,10 +1045,8 @@
 
 		acct = nf_conn_acct_find(ct);
 		if (acct) {
-			spin_lock_bh(&ct->lock);
-			acct[CTINFO2DIR(ctinfo)].packets++;
-			acct[CTINFO2DIR(ctinfo)].bytes += skb->len;
-			spin_unlock_bh(&ct->lock);
+			atomic64_inc(&acct[CTINFO2DIR(ctinfo)].packets);
+			atomic64_add(skb->len, &acct[CTINFO2DIR(ctinfo)].bytes);
 		}
 	}
 }
@@ -1063,11 +1062,9 @@
 
 		acct = nf_conn_acct_find(ct);
 		if (acct) {
-			spin_lock_bh(&ct->lock);
-			acct[CTINFO2DIR(ctinfo)].packets++;
-			acct[CTINFO2DIR(ctinfo)].bytes +=
-				skb->len - skb_network_offset(skb);
-			spin_unlock_bh(&ct->lock);
+			atomic64_inc(&acct[CTINFO2DIR(ctinfo)].packets);
+			atomic64_add(skb->len - skb_network_offset(skb),
+				     &acct[CTINFO2DIR(ctinfo)].bytes);
 		}
 	}
 
@@ -1087,7 +1084,7 @@
 };
 #endif
 
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
 
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/nfnetlink_conntrack.h>
@@ -1342,8 +1339,7 @@
 					get_order(sz));
 	if (!hash) {
 		printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");
-		hash = __vmalloc(sz, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
-				 PAGE_KERNEL);
+		hash = vzalloc(sz);
 	}
 
 	if (hash && nulls)
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index 340c80d..4147ba3 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -38,8 +38,6 @@
 
 static struct kmem_cache *nf_ct_expect_cachep __read_mostly;
 
-static HLIST_HEAD(nf_ct_userspace_expect_list);
-
 /* nf_conntrack_expect helper functions */
 void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
 				u32 pid, int report)
@@ -47,14 +45,14 @@
 	struct nf_conn_help *master_help = nfct_help(exp->master);
 	struct net *net = nf_ct_exp_net(exp);
 
+	NF_CT_ASSERT(master_help);
 	NF_CT_ASSERT(!timer_pending(&exp->timeout));
 
 	hlist_del_rcu(&exp->hnode);
 	net->ct.expect_count--;
 
 	hlist_del(&exp->lnode);
-	if (!(exp->flags & NF_CT_EXPECT_USERSPACE))
-		master_help->expecting[exp->class]--;
+	master_help->expecting[exp->class]--;
 
 	nf_ct_expect_event_report(IPEXP_DESTROY, exp, pid, report);
 	nf_ct_expect_put(exp);
@@ -314,37 +312,34 @@
 }
 EXPORT_SYMBOL_GPL(nf_ct_expect_put);
 
-static void nf_ct_expect_insert(struct nf_conntrack_expect *exp)
+static int nf_ct_expect_insert(struct nf_conntrack_expect *exp)
 {
 	struct nf_conn_help *master_help = nfct_help(exp->master);
+	struct nf_conntrack_helper *helper;
 	struct net *net = nf_ct_exp_net(exp);
-	const struct nf_conntrack_expect_policy *p;
 	unsigned int h = nf_ct_expect_dst_hash(&exp->tuple);
 
 	/* two references : one for hash insert, one for the timer */
 	atomic_add(2, &exp->use);
 
-	if (master_help) {
-		hlist_add_head(&exp->lnode, &master_help->expectations);
-		master_help->expecting[exp->class]++;
-	} else if (exp->flags & NF_CT_EXPECT_USERSPACE)
-		hlist_add_head(&exp->lnode, &nf_ct_userspace_expect_list);
+	hlist_add_head(&exp->lnode, &master_help->expectations);
+	master_help->expecting[exp->class]++;
 
 	hlist_add_head_rcu(&exp->hnode, &net->ct.expect_hash[h]);
 	net->ct.expect_count++;
 
 	setup_timer(&exp->timeout, nf_ct_expectation_timed_out,
 		    (unsigned long)exp);
-	if (master_help) {
-		p = &rcu_dereference_protected(
-				master_help->helper,
-				lockdep_is_held(&nf_conntrack_lock)
-				)->expect_policy[exp->class];
-		exp->timeout.expires = jiffies + p->timeout * HZ;
+	helper = rcu_dereference_protected(master_help->helper,
+					   lockdep_is_held(&nf_conntrack_lock));
+	if (helper) {
+		exp->timeout.expires = jiffies +
+			helper->expect_policy[exp->class].timeout * HZ;
 	}
 	add_timer(&exp->timeout);
 
 	NF_CT_STAT_INC(net, expect_create);
+	return 0;
 }
 
 /* Race with expectations being used means we could have none to find; OK. */
@@ -389,14 +384,13 @@
 	struct nf_conntrack_expect *i;
 	struct nf_conn *master = expect->master;
 	struct nf_conn_help *master_help = nfct_help(master);
+	struct nf_conntrack_helper *helper;
 	struct net *net = nf_ct_exp_net(expect);
 	struct hlist_node *n;
 	unsigned int h;
 	int ret = 1;
 
-	/* Don't allow expectations created from kernel-space with no helper */
-	if (!(expect->flags & NF_CT_EXPECT_USERSPACE) &&
-	    (!master_help || (master_help && !master_help->helper))) {
+	if (!master_help) {
 		ret = -ESHUTDOWN;
 		goto out;
 	}
@@ -414,11 +408,10 @@
 		}
 	}
 	/* Will be over limit? */
-	if (master_help) {
-		p = &rcu_dereference_protected(
-			master_help->helper,
-			lockdep_is_held(&nf_conntrack_lock)
-			)->expect_policy[expect->class];
+	helper = rcu_dereference_protected(master_help->helper,
+					   lockdep_is_held(&nf_conntrack_lock));
+	if (helper) {
+		p = &helper->expect_policy[expect->class];
 		if (p->max_expected &&
 		    master_help->expecting[expect->class] >= p->max_expected) {
 			evict_oldest_expect(master, expect);
@@ -450,8 +443,9 @@
 	if (ret <= 0)
 		goto out;
 
-	ret = 0;
-	nf_ct_expect_insert(expect);
+	ret = nf_ct_expect_insert(expect);
+	if (ret < 0)
+		goto out;
 	spin_unlock_bh(&nf_conntrack_lock);
 	nf_ct_expect_event_report(IPEXP_NEW, expect, pid, report);
 	return ret;
@@ -461,22 +455,7 @@
 }
 EXPORT_SYMBOL_GPL(nf_ct_expect_related_report);
 
-void nf_ct_remove_userspace_expectations(void)
-{
-	struct nf_conntrack_expect *exp;
-	struct hlist_node *n, *next;
-
-	hlist_for_each_entry_safe(exp, n, next,
-				  &nf_ct_userspace_expect_list, lnode) {
-		if (del_timer(&exp->timeout)) {
-			nf_ct_unlink_expect(exp);
-			nf_ct_expect_put(exp);
-		}
-	}
-}
-EXPORT_SYMBOL_GPL(nf_ct_remove_userspace_expectations);
-
-#ifdef CONFIG_PROC_FS
+#ifdef CONFIG_NF_CONNTRACK_PROCFS
 struct ct_expect_iter_state {
 	struct seq_net_private p;
 	unsigned int bucket;
@@ -604,25 +583,25 @@
 	.llseek  = seq_lseek,
 	.release = seq_release_net,
 };
-#endif /* CONFIG_PROC_FS */
+#endif /* CONFIG_NF_CONNTRACK_PROCFS */
 
 static int exp_proc_init(struct net *net)
 {
-#ifdef CONFIG_PROC_FS
+#ifdef CONFIG_NF_CONNTRACK_PROCFS
 	struct proc_dir_entry *proc;
 
 	proc = proc_net_fops_create(net, "nf_conntrack_expect", 0440, &exp_file_ops);
 	if (!proc)
 		return -ENOMEM;
-#endif /* CONFIG_PROC_FS */
+#endif /* CONFIG_NF_CONNTRACK_PROCFS */
 	return 0;
 }
 
 static void exp_proc_remove(struct net *net)
 {
-#ifdef CONFIG_PROC_FS
+#ifdef CONFIG_NF_CONNTRACK_PROCFS
 	proc_net_remove(net, "nf_conntrack_expect");
-#endif /* CONFIG_PROC_FS */
+#endif /* CONFIG_NF_CONNTRACK_PROCFS */
 }
 
 module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0400);
diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c
index 6f5801e..8c5c95c 100644
--- a/net/netfilter/nf_conntrack_ftp.c
+++ b/net/netfilter/nf_conntrack_ftp.c
@@ -42,7 +42,7 @@
 static unsigned int ports_c;
 module_param_array(ports, ushort, &ports_c, 0400);
 
-static int loose;
+static bool loose;
 module_param(loose, bool, 0600);
 
 unsigned int (*nf_nat_ftp_hook)(struct sk_buff *skb,
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
index f03c2d4..722291f 100644
--- a/net/netfilter/nf_conntrack_h323_main.c
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -42,7 +42,7 @@
 module_param(gkrouted_only, int, 0600);
 MODULE_PARM_DESC(gkrouted_only, "only accept calls from gatekeeper");
 
-static int callforward_filter __read_mostly = 1;
+static bool callforward_filter __read_mostly = true;
 module_param(callforward_filter, bool, 0600);
 MODULE_PARM_DESC(callforward_filter, "only create call forwarding expectations "
 				     "if both endpoints are on different sides "
@@ -743,17 +743,16 @@
 		}
 		break;
 	}
-#if defined(CONFIG_NF_CONNTRACK_IPV6) || \
-    defined(CONFIG_NF_CONNTRACK_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK_IPV6)
 	case AF_INET6: {
 		struct flowi6 fl1, fl2;
 		struct rt6_info *rt1, *rt2;
 
 		memset(&fl1, 0, sizeof(fl1));
-		ipv6_addr_copy(&fl1.daddr, &src->in6);
+		fl1.daddr = src->in6;
 
 		memset(&fl2, 0, sizeof(fl2));
-		ipv6_addr_copy(&fl2.daddr, &dst->in6);
+		fl2.daddr = dst->in6;
 		if (!afinfo->route(&init_net, (struct dst_entry **)&rt1,
 				   flowi6_to_flowi(&fl1), false)) {
 			if (!afinfo->route(&init_net, (struct dst_entry **)&rt2,
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 93c4bdb..c9e0de0 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -121,6 +121,18 @@
 	int ret = 0;
 
 	if (tmpl != NULL) {
+		/* we've got a userspace helper. */
+		if (tmpl->status & IPS_USERSPACE_HELPER) {
+			help = nf_ct_helper_ext_add(ct, flags);
+			if (help == NULL) {
+				ret = -ENOMEM;
+				goto out;
+			}
+			rcu_assign_pointer(help->helper, NULL);
+			__set_bit(IPS_USERSPACE_HELPER_BIT, &ct->status);
+			ret = 0;
+			goto out;
+		}
 		help = nfct_help(tmpl);
 		if (help != NULL)
 			helper = help->helper;
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index ef21b22..e07dc3a 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -135,7 +135,7 @@
 static inline int
 ctnetlink_dump_timeout(struct sk_buff *skb, const struct nf_conn *ct)
 {
-	long timeout = (ct->timeout.expires - jiffies) / HZ;
+	long timeout = ((long)ct->timeout.expires - (long)jiffies) / HZ;
 
 	if (timeout < 0)
 		timeout = 0;
@@ -203,25 +203,18 @@
 }
 
 static int
-ctnetlink_dump_counters(struct sk_buff *skb, const struct nf_conn *ct,
-			enum ip_conntrack_dir dir)
+dump_counters(struct sk_buff *skb, u64 pkts, u64 bytes,
+	      enum ip_conntrack_dir dir)
 {
 	enum ctattr_type type = dir ? CTA_COUNTERS_REPLY: CTA_COUNTERS_ORIG;
 	struct nlattr *nest_count;
-	const struct nf_conn_counter *acct;
-
-	acct = nf_conn_acct_find(ct);
-	if (!acct)
-		return 0;
 
 	nest_count = nla_nest_start(skb, type | NLA_F_NESTED);
 	if (!nest_count)
 		goto nla_put_failure;
 
-	NLA_PUT_BE64(skb, CTA_COUNTERS_PACKETS,
-		     cpu_to_be64(acct[dir].packets));
-	NLA_PUT_BE64(skb, CTA_COUNTERS_BYTES,
-		     cpu_to_be64(acct[dir].bytes));
+	NLA_PUT_BE64(skb, CTA_COUNTERS_PACKETS, cpu_to_be64(pkts));
+	NLA_PUT_BE64(skb, CTA_COUNTERS_BYTES, cpu_to_be64(bytes));
 
 	nla_nest_end(skb, nest_count);
 
@@ -232,6 +225,27 @@
 }
 
 static int
+ctnetlink_dump_counters(struct sk_buff *skb, const struct nf_conn *ct,
+			enum ip_conntrack_dir dir, int type)
+{
+	struct nf_conn_counter *acct;
+	u64 pkts, bytes;
+
+	acct = nf_conn_acct_find(ct);
+	if (!acct)
+		return 0;
+
+	if (type == IPCTNL_MSG_CT_GET_CTRZERO) {
+		pkts = atomic64_xchg(&acct[dir].packets, 0);
+		bytes = atomic64_xchg(&acct[dir].bytes, 0);
+	} else {
+		pkts = atomic64_read(&acct[dir].packets);
+		bytes = atomic64_read(&acct[dir].bytes);
+	}
+	return dump_counters(skb, pkts, bytes, dir);
+}
+
+static int
 ctnetlink_dump_timestamp(struct sk_buff *skb, const struct nf_conn *ct)
 {
 	struct nlattr *nest_count;
@@ -393,15 +407,15 @@
 }
 
 static int
-ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
-		    int event, struct nf_conn *ct)
+ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type,
+		    struct nf_conn *ct)
 {
 	struct nlmsghdr *nlh;
 	struct nfgenmsg *nfmsg;
 	struct nlattr *nest_parms;
-	unsigned int flags = pid ? NLM_F_MULTI : 0;
+	unsigned int flags = pid ? NLM_F_MULTI : 0, event;
 
-	event |= NFNL_SUBSYS_CTNETLINK << 8;
+	event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_CT_NEW);
 	nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags);
 	if (nlh == NULL)
 		goto nlmsg_failure;
@@ -430,8 +444,8 @@
 
 	if (ctnetlink_dump_status(skb, ct) < 0 ||
 	    ctnetlink_dump_timeout(skb, ct) < 0 ||
-	    ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 ||
-	    ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0 ||
+	    ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL, type) < 0 ||
+	    ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY, type) < 0 ||
 	    ctnetlink_dump_timestamp(skb, ct) < 0 ||
 	    ctnetlink_dump_protoinfo(skb, ct) < 0 ||
 	    ctnetlink_dump_helpinfo(skb, ct) < 0 ||
@@ -612,8 +626,10 @@
 		goto nla_put_failure;
 
 	if (events & (1 << IPCT_DESTROY)) {
-		if (ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 ||
-		    ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0 ||
+		if (ctnetlink_dump_counters(skb, ct,
+					    IP_CT_DIR_ORIGINAL, type) < 0 ||
+		    ctnetlink_dump_counters(skb, ct,
+					    IP_CT_DIR_REPLY, type) < 0 ||
 		    ctnetlink_dump_timestamp(skb, ct) < 0)
 			goto nla_put_failure;
 	} else {
@@ -709,20 +725,13 @@
 			}
 			if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid,
 						cb->nlh->nlmsg_seq,
-						IPCTNL_MSG_CT_NEW, ct) < 0) {
+						NFNL_MSG_TYPE(
+							cb->nlh->nlmsg_type),
+						ct) < 0) {
 				nf_conntrack_get(&ct->ct_general);
 				cb->args[1] = (unsigned long)ct;
 				goto out;
 			}
-
-			if (NFNL_MSG_TYPE(cb->nlh->nlmsg_type) ==
-						IPCTNL_MSG_CT_GET_CTRZERO) {
-				struct nf_conn_counter *acct;
-
-				acct = nf_conn_acct_find(ct);
-				if (acct)
-					memset(acct, 0, sizeof(struct nf_conn_counter[IP_CT_DIR_MAX]));
-			}
 		}
 		if (cb->args[1]) {
 			cb->args[1] = 0;
@@ -1001,7 +1010,7 @@
 
 	rcu_read_lock();
 	err = ctnetlink_fill_info(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq,
-				  IPCTNL_MSG_CT_NEW, ct);
+				  NFNL_MSG_TYPE(nlh->nlmsg_type), ct);
 	rcu_read_unlock();
 	nf_ct_put(ct);
 	if (err <= 0)
@@ -1087,14 +1096,14 @@
 
 	if (cda[CTA_NAT_DST]) {
 		ret = ctnetlink_parse_nat_setup(ct,
-						IP_NAT_MANIP_DST,
+						NF_NAT_MANIP_DST,
 						cda[CTA_NAT_DST]);
 		if (ret < 0)
 			return ret;
 	}
 	if (cda[CTA_NAT_SRC]) {
 		ret = ctnetlink_parse_nat_setup(ct,
-						IP_NAT_MANIP_SRC,
+						NF_NAT_MANIP_SRC,
 						cda[CTA_NAT_SRC]);
 		if (ret < 0)
 			return ret;
@@ -1358,12 +1367,15 @@
 						    nf_ct_protonum(ct));
 		if (helper == NULL) {
 			rcu_read_unlock();
+			spin_unlock_bh(&nf_conntrack_lock);
 #ifdef CONFIG_MODULES
 			if (request_module("nfct-helper-%s", helpname) < 0) {
+				spin_lock_bh(&nf_conntrack_lock);
 				err = -EOPNOTSUPP;
 				goto err1;
 			}
 
+			spin_lock_bh(&nf_conntrack_lock);
 			rcu_read_lock();
 			helper = __nf_conntrack_helper_find(helpname,
 							    nf_ct_l3num(ct),
@@ -1638,7 +1650,7 @@
 			  const struct nf_conntrack_expect *exp)
 {
 	struct nf_conn *master = exp->master;
-	long timeout = (exp->timeout.expires - jiffies) / HZ;
+	long timeout = ((long)exp->timeout.expires - (long)jiffies) / HZ;
 	struct nf_conn_help *help;
 
 	if (timeout < 0)
@@ -1847,7 +1859,9 @@
 	if (err < 0)
 		return err;
 
-	if (cda[CTA_EXPECT_MASTER])
+	if (cda[CTA_EXPECT_TUPLE])
+		err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
+	else if (cda[CTA_EXPECT_MASTER])
 		err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER, u3);
 	else
 		return -EINVAL;
@@ -1869,25 +1883,30 @@
 
 	err = -ENOMEM;
 	skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
-	if (skb2 == NULL)
+	if (skb2 == NULL) {
+		nf_ct_expect_put(exp);
 		goto out;
+	}
 
 	rcu_read_lock();
 	err = ctnetlink_exp_fill_info(skb2, NETLINK_CB(skb).pid,
 				      nlh->nlmsg_seq, IPCTNL_MSG_EXP_NEW, exp);
 	rcu_read_unlock();
+	nf_ct_expect_put(exp);
 	if (err <= 0)
 		goto free;
 
-	nf_ct_expect_put(exp);
+	err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
+	if (err < 0)
+		goto out;
 
-	return netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
+	return 0;
 
 free:
 	kfree_skb(skb2);
 out:
-	nf_ct_expect_put(exp);
-	return err;
+	/* this avoids a loop in nfnetlink. */
+	return err == -EAGAIN ? -ENOBUFS : err;
 }
 
 static int
@@ -2023,6 +2042,10 @@
 	}
 	help = nfct_help(ct);
 	if (!help) {
+		err = -EOPNOTSUPP;
+		goto out;
+	}
+	if (test_bit(IPS_USERSPACE_HELPER_BIT, &ct->status)) {
 		if (!cda[CTA_EXPECT_TIMEOUT]) {
 			err = -EINVAL;
 			goto out;
@@ -2247,7 +2270,6 @@
 {
 	pr_info("ctnetlink: unregistering from nfnetlink.\n");
 
-	nf_ct_remove_userspace_expectations();
 	unregister_pernet_subsys(&ctnetlink_net_ops);
 	nfnetlink_subsys_unregister(&ctnl_exp_subsys);
 	nfnetlink_subsys_unregister(&ctnl_subsys);
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index 2e664a6..d6dde6d 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -629,7 +629,7 @@
 	return seq_printf(s, "%s ", dccp_state_names[ct->proto.dccp.state]);
 }
 
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
 static int dccp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
 			  struct nf_conn *ct)
 {
@@ -770,7 +770,7 @@
 	.error			= dccp_error,
 	.print_tuple		= dccp_print_tuple,
 	.print_conntrack	= dccp_print_conntrack,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
 	.to_nlattr		= dccp_to_nlattr,
 	.nlattr_size		= dccp_nlattr_size,
 	.from_nlattr		= nlattr_to_dccp,
@@ -792,7 +792,7 @@
 	.error			= dccp_error,
 	.print_tuple		= dccp_print_tuple,
 	.print_conntrack	= dccp_print_conntrack,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
 	.to_nlattr		= dccp_to_nlattr,
 	.nlattr_size		= dccp_nlattr_size,
 	.from_nlattr		= nlattr_to_dccp,
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
index d69facd..f033879 100644
--- a/net/netfilter/nf_conntrack_proto_gre.c
+++ b/net/netfilter/nf_conntrack_proto_gre.c
@@ -291,7 +291,7 @@
 	.new		 = gre_new,
 	.destroy	 = gre_destroy,
 	.me 		 = THIS_MODULE,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
 	.tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
 	.nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
 	.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index 6772b11..afa6913 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -461,7 +461,7 @@
 	return true;
 }
 
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
 
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/nfnetlink_conntrack.h>
@@ -666,7 +666,7 @@
 	.packet 		= sctp_packet,
 	.new 			= sctp_new,
 	.me 			= THIS_MODULE,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
 	.to_nlattr		= sctp_to_nlattr,
 	.nlattr_size		= sctp_nlattr_size,
 	.from_nlattr		= nlattr_to_sctp,
@@ -696,7 +696,7 @@
 	.packet 		= sctp_packet,
 	.new 			= sctp_new,
 	.me 			= THIS_MODULE,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
 	.to_nlattr		= sctp_to_nlattr,
 	.nlattr_size		= sctp_nlattr_size,
 	.from_nlattr		= nlattr_to_sctp,
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 8235b86..97b9f3e 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -1126,7 +1126,7 @@
 	return true;
 }
 
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
 
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/nfnetlink_conntrack.h>
@@ -1447,7 +1447,7 @@
 	.packet 		= tcp_packet,
 	.new 			= tcp_new,
 	.error			= tcp_error,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
 	.to_nlattr		= tcp_to_nlattr,
 	.nlattr_size		= tcp_nlattr_size,
 	.from_nlattr		= nlattr_to_tcp,
@@ -1479,7 +1479,7 @@
 	.packet 		= tcp_packet,
 	.new 			= tcp_new,
 	.error			= tcp_error,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
 	.to_nlattr		= tcp_to_nlattr,
 	.nlattr_size		= tcp_nlattr_size,
 	.from_nlattr		= nlattr_to_tcp,
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c
index 8289088..5f35757 100644
--- a/net/netfilter/nf_conntrack_proto_udp.c
+++ b/net/netfilter/nf_conntrack_proto_udp.c
@@ -188,7 +188,7 @@
 	.packet			= udp_packet,
 	.new			= udp_new,
 	.error			= udp_error,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
 	.tuple_to_nlattr	= nf_ct_port_tuple_to_nlattr,
 	.nlattr_to_tuple	= nf_ct_port_nlattr_to_tuple,
 	.nlattr_tuple_size	= nf_ct_port_nlattr_tuple_size,
@@ -216,7 +216,7 @@
 	.packet			= udp_packet,
 	.new			= udp_new,
 	.error			= udp_error,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
 	.tuple_to_nlattr	= nf_ct_port_tuple_to_nlattr,
 	.nlattr_to_tuple	= nf_ct_port_nlattr_to_tuple,
 	.nlattr_tuple_size	= nf_ct_port_nlattr_tuple_size,
diff --git a/net/netfilter/nf_conntrack_proto_udplite.c b/net/netfilter/nf_conntrack_proto_udplite.c
index 263b5a7..f52ca11 100644
--- a/net/netfilter/nf_conntrack_proto_udplite.c
+++ b/net/netfilter/nf_conntrack_proto_udplite.c
@@ -174,7 +174,7 @@
 	.packet			= udplite_packet,
 	.new			= udplite_new,
 	.error			= udplite_error,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
 	.tuple_to_nlattr	= nf_ct_port_tuple_to_nlattr,
 	.nlattr_tuple_size	= nf_ct_port_nlattr_tuple_size,
 	.nlattr_to_tuple	= nf_ct_port_nlattr_to_tuple,
@@ -198,7 +198,7 @@
 	.packet			= udplite_packet,
 	.new			= udplite_new,
 	.error			= udplite_error,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
 	.tuple_to_nlattr	= nf_ct_port_tuple_to_nlattr,
 	.nlattr_tuple_size	= nf_ct_port_nlattr_tuple_size,
 	.nlattr_to_tuple	= nf_ct_port_nlattr_to_tuple,
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 05e9feb..885f5ab 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -34,7 +34,7 @@
 
 MODULE_LICENSE("GPL");
 
-#ifdef CONFIG_PROC_FS
+#ifdef CONFIG_NF_CONNTRACK_PROCFS
 int
 print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple,
             const struct nf_conntrack_l3proto *l3proto,
@@ -396,7 +396,7 @@
 static void nf_conntrack_standalone_fini_proc(struct net *net)
 {
 }
-#endif /* CONFIG_PROC_FS */
+#endif /* CONFIG_NF_CONNTRACK_PROCFS */
 
 /* Sysctl support */
 
diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
index af7dd31..e8d27af 100644
--- a/net/netfilter/nf_conntrack_timestamp.c
+++ b/net/netfilter/nf_conntrack_timestamp.c
@@ -15,7 +15,7 @@
 #include <net/netfilter/nf_conntrack_extend.h>
 #include <net/netfilter/nf_conntrack_timestamp.h>
 
-static int nf_ct_tstamp __read_mostly;
+static bool nf_ct_tstamp __read_mostly;
 
 module_param_named(tstamp, nf_ct_tstamp, bool, 0644);
 MODULE_PARM_DESC(tstamp, "Enable connection tracking flow timestamping.");
diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c
new file mode 100644
index 0000000..11ba013
--- /dev/null
+++ b/net/netfilter/nfnetlink_acct.c
@@ -0,0 +1,361 @@
+/*
+ * (C) 2011 Pablo Neira Ayuso <pablo@netfilter.org>
+ * (C) 2011 Intra2net AG <http://www.intra2net.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation (or any later at your option).
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/netlink.h>
+#include <linux/rculist.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <net/netlink.h>
+#include <net/sock.h>
+#include <asm/atomic.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/nfnetlink_acct.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+MODULE_DESCRIPTION("nfacct: Extended Netfilter accounting infrastructure");
+
+static LIST_HEAD(nfnl_acct_list);
+
+struct nf_acct {
+	atomic64_t		pkts;
+	atomic64_t		bytes;
+	struct list_head	head;
+	atomic_t		refcnt;
+	char			name[NFACCT_NAME_MAX];
+	struct rcu_head		rcu_head;
+};
+
+static int
+nfnl_acct_new(struct sock *nfnl, struct sk_buff *skb,
+	     const struct nlmsghdr *nlh, const struct nlattr * const tb[])
+{
+	struct nf_acct *nfacct, *matching = NULL;
+	char *acct_name;
+
+	if (!tb[NFACCT_NAME])
+		return -EINVAL;
+
+	acct_name = nla_data(tb[NFACCT_NAME]);
+
+	list_for_each_entry(nfacct, &nfnl_acct_list, head) {
+		if (strncmp(nfacct->name, acct_name, NFACCT_NAME_MAX) != 0)
+			continue;
+
+                if (nlh->nlmsg_flags & NLM_F_EXCL)
+			return -EEXIST;
+
+		matching = nfacct;
+		break;
+        }
+
+	if (matching) {
+		if (nlh->nlmsg_flags & NLM_F_REPLACE) {
+			/* reset counters if you request a replacement. */
+			atomic64_set(&matching->pkts, 0);
+			atomic64_set(&matching->bytes, 0);
+			return 0;
+		}
+		return -EBUSY;
+	}
+
+	nfacct = kzalloc(sizeof(struct nf_acct), GFP_KERNEL);
+	if (nfacct == NULL)
+		return -ENOMEM;
+
+	strncpy(nfacct->name, nla_data(tb[NFACCT_NAME]), NFACCT_NAME_MAX);
+
+	if (tb[NFACCT_BYTES]) {
+		atomic64_set(&nfacct->bytes,
+			     be64_to_cpu(nla_get_u64(tb[NFACCT_BYTES])));
+	}
+	if (tb[NFACCT_PKTS]) {
+		atomic64_set(&nfacct->pkts,
+			     be64_to_cpu(nla_get_u64(tb[NFACCT_PKTS])));
+	}
+	atomic_set(&nfacct->refcnt, 1);
+	list_add_tail_rcu(&nfacct->head, &nfnl_acct_list);
+	return 0;
+}
+
+static int
+nfnl_acct_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type,
+		   int event, struct nf_acct *acct)
+{
+	struct nlmsghdr *nlh;
+	struct nfgenmsg *nfmsg;
+	unsigned int flags = pid ? NLM_F_MULTI : 0;
+	u64 pkts, bytes;
+
+	event |= NFNL_SUBSYS_ACCT << 8;
+	nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags);
+	if (nlh == NULL)
+		goto nlmsg_failure;
+
+	nfmsg = nlmsg_data(nlh);
+	nfmsg->nfgen_family = AF_UNSPEC;
+	nfmsg->version = NFNETLINK_V0;
+	nfmsg->res_id = 0;
+
+	NLA_PUT_STRING(skb, NFACCT_NAME, acct->name);
+
+	if (type == NFNL_MSG_ACCT_GET_CTRZERO) {
+		pkts = atomic64_xchg(&acct->pkts, 0);
+		bytes = atomic64_xchg(&acct->bytes, 0);
+	} else {
+		pkts = atomic64_read(&acct->pkts);
+		bytes = atomic64_read(&acct->bytes);
+	}
+	NLA_PUT_BE64(skb, NFACCT_PKTS, cpu_to_be64(pkts));
+	NLA_PUT_BE64(skb, NFACCT_BYTES, cpu_to_be64(bytes));
+	NLA_PUT_BE32(skb, NFACCT_USE, htonl(atomic_read(&acct->refcnt)));
+
+	nlmsg_end(skb, nlh);
+	return skb->len;
+
+nlmsg_failure:
+nla_put_failure:
+	nlmsg_cancel(skb, nlh);
+	return -1;
+}
+
+static int
+nfnl_acct_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+	struct nf_acct *cur, *last;
+
+	if (cb->args[2])
+		return 0;
+
+	last = (struct nf_acct *)cb->args[1];
+	if (cb->args[1])
+		cb->args[1] = 0;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(cur, &nfnl_acct_list, head) {
+		if (last && cur != last)
+			continue;
+
+		if (nfnl_acct_fill_info(skb, NETLINK_CB(cb->skb).pid,
+				       cb->nlh->nlmsg_seq,
+				       NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
+				       NFNL_MSG_ACCT_NEW, cur) < 0) {
+			cb->args[1] = (unsigned long)cur;
+			break;
+		}
+	}
+	if (!cb->args[1])
+		cb->args[2] = 1;
+	rcu_read_unlock();
+	return skb->len;
+}
+
+static int
+nfnl_acct_get(struct sock *nfnl, struct sk_buff *skb,
+	     const struct nlmsghdr *nlh, const struct nlattr * const tb[])
+{
+	int ret = -ENOENT;
+	struct nf_acct *cur;
+	char *acct_name;
+
+	if (nlh->nlmsg_flags & NLM_F_DUMP) {
+		return netlink_dump_start(nfnl, skb, nlh, nfnl_acct_dump,
+					  NULL, 0);
+	}
+
+	if (!tb[NFACCT_NAME])
+		return -EINVAL;
+	acct_name = nla_data(tb[NFACCT_NAME]);
+
+	list_for_each_entry(cur, &nfnl_acct_list, head) {
+		struct sk_buff *skb2;
+
+		if (strncmp(cur->name, acct_name, NFACCT_NAME_MAX)!= 0)
+			continue;
+
+		skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+		if (skb2 == NULL) {
+			ret = -ENOMEM;
+			break;
+		}
+
+		ret = nfnl_acct_fill_info(skb2, NETLINK_CB(skb).pid,
+					 nlh->nlmsg_seq,
+					 NFNL_MSG_TYPE(nlh->nlmsg_type),
+					 NFNL_MSG_ACCT_NEW, cur);
+		if (ret <= 0) {
+			kfree_skb(skb2);
+			break;
+		}
+		ret = netlink_unicast(nfnl, skb2, NETLINK_CB(skb).pid,
+					MSG_DONTWAIT);
+		if (ret > 0)
+			ret = 0;
+
+		/* this avoids a loop in nfnetlink. */
+		return ret == -EAGAIN ? -ENOBUFS : ret;
+	}
+	return ret;
+}
+
+/* try to delete object, fail if it is still in use. */
+static int nfnl_acct_try_del(struct nf_acct *cur)
+{
+	int ret = 0;
+
+	/* we want to avoid races with nfnl_acct_find_get. */
+	if (atomic_dec_and_test(&cur->refcnt)) {
+		/* We are protected by nfnl mutex. */
+		list_del_rcu(&cur->head);
+		kfree_rcu(cur, rcu_head);
+	} else {
+		/* still in use, restore reference counter. */
+		atomic_inc(&cur->refcnt);
+		ret = -EBUSY;
+	}
+	return ret;
+}
+
+static int
+nfnl_acct_del(struct sock *nfnl, struct sk_buff *skb,
+	     const struct nlmsghdr *nlh, const struct nlattr * const tb[])
+{
+	char *acct_name;
+	struct nf_acct *cur;
+	int ret = -ENOENT;
+
+	if (!tb[NFACCT_NAME]) {
+		list_for_each_entry(cur, &nfnl_acct_list, head)
+			nfnl_acct_try_del(cur);
+
+		return 0;
+	}
+	acct_name = nla_data(tb[NFACCT_NAME]);
+
+	list_for_each_entry(cur, &nfnl_acct_list, head) {
+		if (strncmp(cur->name, acct_name, NFACCT_NAME_MAX) != 0)
+			continue;
+
+		ret = nfnl_acct_try_del(cur);
+		if (ret < 0)
+			return ret;
+
+		break;
+	}
+	return ret;
+}
+
+static const struct nla_policy nfnl_acct_policy[NFACCT_MAX+1] = {
+	[NFACCT_NAME] = { .type = NLA_NUL_STRING, .len = NFACCT_NAME_MAX-1 },
+	[NFACCT_BYTES] = { .type = NLA_U64 },
+	[NFACCT_PKTS] = { .type = NLA_U64 },
+};
+
+static const struct nfnl_callback nfnl_acct_cb[NFNL_MSG_ACCT_MAX] = {
+	[NFNL_MSG_ACCT_NEW]		= { .call = nfnl_acct_new,
+					    .attr_count = NFACCT_MAX,
+					    .policy = nfnl_acct_policy },
+	[NFNL_MSG_ACCT_GET] 		= { .call = nfnl_acct_get,
+					    .attr_count = NFACCT_MAX,
+					    .policy = nfnl_acct_policy },
+	[NFNL_MSG_ACCT_GET_CTRZERO] 	= { .call = nfnl_acct_get,
+					    .attr_count = NFACCT_MAX,
+					    .policy = nfnl_acct_policy },
+	[NFNL_MSG_ACCT_DEL]		= { .call = nfnl_acct_del,
+					    .attr_count = NFACCT_MAX,
+					    .policy = nfnl_acct_policy },
+};
+
+static const struct nfnetlink_subsystem nfnl_acct_subsys = {
+	.name				= "acct",
+	.subsys_id			= NFNL_SUBSYS_ACCT,
+	.cb_count			= NFNL_MSG_ACCT_MAX,
+	.cb				= nfnl_acct_cb,
+};
+
+MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_ACCT);
+
+struct nf_acct *nfnl_acct_find_get(const char *acct_name)
+{
+	struct nf_acct *cur, *acct = NULL;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(cur, &nfnl_acct_list, head) {
+		if (strncmp(cur->name, acct_name, NFACCT_NAME_MAX)!= 0)
+			continue;
+
+		if (!try_module_get(THIS_MODULE))
+			goto err;
+
+		if (!atomic_inc_not_zero(&cur->refcnt)) {
+			module_put(THIS_MODULE);
+			goto err;
+		}
+
+		acct = cur;
+		break;
+	}
+err:
+	rcu_read_unlock();
+	return acct;
+}
+EXPORT_SYMBOL_GPL(nfnl_acct_find_get);
+
+void nfnl_acct_put(struct nf_acct *acct)
+{
+	atomic_dec(&acct->refcnt);
+	module_put(THIS_MODULE);
+}
+EXPORT_SYMBOL_GPL(nfnl_acct_put);
+
+void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct)
+{
+	atomic64_inc(&nfacct->pkts);
+	atomic64_add(skb->len, &nfacct->bytes);
+}
+EXPORT_SYMBOL_GPL(nfnl_acct_update);
+
+static int __init nfnl_acct_init(void)
+{
+	int ret;
+
+	pr_info("nfnl_acct: registering with nfnetlink.\n");
+	ret = nfnetlink_subsys_register(&nfnl_acct_subsys);
+	if (ret < 0) {
+		pr_err("nfnl_acct_init: cannot register with nfnetlink.\n");
+		goto err_out;
+	}
+	return 0;
+err_out:
+	return ret;
+}
+
+static void __exit nfnl_acct_exit(void)
+{
+	struct nf_acct *cur, *tmp;
+
+	pr_info("nfnl_acct: unregistering from nfnetlink.\n");
+	nfnetlink_subsys_unregister(&nfnl_acct_subsys);
+
+	list_for_each_entry_safe(cur, tmp, &nfnl_acct_list, head) {
+		list_del_rcu(&cur->head);
+		/* We are sure that our objects have no clients at this point,
+		 * it's safe to release them all without checking refcnt. */
+		kfree_rcu(cur, rcu_head);
+	}
+}
+
+module_init(nfnl_acct_init);
+module_exit(nfnl_acct_exit);
diff --git a/net/netfilter/xt_AUDIT.c b/net/netfilter/xt_AUDIT.c
index 4bca15a..ba92824 100644
--- a/net/netfilter/xt_AUDIT.c
+++ b/net/netfilter/xt_AUDIT.c
@@ -98,6 +98,7 @@
 	struct ipv6hdr _ip6h;
 	const struct ipv6hdr *ih;
 	u8 nexthdr;
+	__be16 frag_off;
 	int offset;
 
 	ih = skb_header_pointer(skb, skb_network_offset(skb), sizeof(_ip6h), &_ip6h);
@@ -108,7 +109,7 @@
 
 	nexthdr = ih->nexthdr;
 	offset = ipv6_skip_exthdr(skb, skb_network_offset(skb) + sizeof(_ip6h),
-				  &nexthdr);
+				  &nexthdr, &frag_off);
 
 	audit_log_format(ab, " saddr=%pI6c daddr=%pI6c proto=%hhu",
 			 &ih->saddr, &ih->daddr, nexthdr);
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index 0221d10..8e87123 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -62,8 +62,8 @@
 	int ret = 0;
 	u8 proto;
 
-	if (info->flags & ~XT_CT_NOTRACK)
-		return -EINVAL;
+	if (info->flags & ~(XT_CT_NOTRACK | XT_CT_USERSPACE_HELPER))
+		return -EOPNOTSUPP;
 
 	if (info->flags & XT_CT_NOTRACK) {
 		ct = nf_ct_untracked_get();
@@ -92,7 +92,9 @@
 				  GFP_KERNEL))
 		goto err3;
 
-	if (info->helper[0]) {
+	if (info->flags & XT_CT_USERSPACE_HELPER) {
+		__set_bit(IPS_USERSPACE_HELPER_BIT, &ct->status);
+	} else if (info->helper[0]) {
 		ret = -ENOENT;
 		proto = xt_ct_find_proto(par);
 		if (!proto) {
diff --git a/net/netfilter/xt_NFQUEUE.c b/net/netfilter/xt_NFQUEUE.c
index d4f4b5d..95237c8 100644
--- a/net/netfilter/xt_NFQUEUE.c
+++ b/net/netfilter/xt_NFQUEUE.c
@@ -49,7 +49,7 @@
 	return jhash_2words((__force u32)ipaddr, iph->protocol, jhash_initval);
 }
 
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
 static u32 hash_v6(const struct sk_buff *skb)
 {
 	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
@@ -74,7 +74,7 @@
 		if (par->family == NFPROTO_IPV4)
 			queue = (((u64) hash_v4(skb) * info->queues_total) >>
 				 32) + queue;
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
 		else if (par->family == NFPROTO_IPV6)
 			queue = (((u64) hash_v6(skb) * info->queues_total) >>
 				 32) + queue;
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index 9e63b43..190ad37 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -161,7 +161,7 @@
 		struct flowi6 *fl6 = &fl.u.ip6;
 
 		memset(fl6, 0, sizeof(*fl6));
-		ipv6_addr_copy(&fl6->daddr, &ipv6_hdr(skb)->saddr);
+		fl6->daddr = ipv6_hdr(skb)->saddr;
 	}
 	rcu_read_lock();
 	ai = nf_get_afinfo(family);
@@ -198,17 +198,18 @@
 	return XT_CONTINUE;
 }
 
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
 static unsigned int
 tcpmss_tg6(struct sk_buff *skb, const struct xt_action_param *par)
 {
 	struct ipv6hdr *ipv6h = ipv6_hdr(skb);
 	u8 nexthdr;
+	__be16 frag_off;
 	int tcphoff;
 	int ret;
 
 	nexthdr = ipv6h->nexthdr;
-	tcphoff = ipv6_skip_exthdr(skb, sizeof(*ipv6h), &nexthdr);
+	tcphoff = ipv6_skip_exthdr(skb, sizeof(*ipv6h), &nexthdr, &frag_off);
 	if (tcphoff < 0)
 		return NF_DROP;
 	ret = tcpmss_mangle_packet(skb, par->targinfo,
@@ -259,7 +260,7 @@
 	return -EINVAL;
 }
 
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
 static int tcpmss_tg6_check(const struct xt_tgchk_param *par)
 {
 	const struct xt_tcpmss_info *info = par->targinfo;
@@ -292,7 +293,7 @@
 		.proto		= IPPROTO_TCP,
 		.me		= THIS_MODULE,
 	},
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
 	{
 		.family		= NFPROTO_IPV6,
 		.name		= "TCPMSS",
diff --git a/net/netfilter/xt_TCPOPTSTRIP.c b/net/netfilter/xt_TCPOPTSTRIP.c
index 9dc9ecf..25fd1c4 100644
--- a/net/netfilter/xt_TCPOPTSTRIP.c
+++ b/net/netfilter/xt_TCPOPTSTRIP.c
@@ -80,16 +80,17 @@
 	       sizeof(struct iphdr) + sizeof(struct tcphdr));
 }
 
-#if defined(CONFIG_IP6_NF_MANGLE) || defined(CONFIG_IP6_NF_MANGLE_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_MANGLE)
 static unsigned int
 tcpoptstrip_tg6(struct sk_buff *skb, const struct xt_action_param *par)
 {
 	struct ipv6hdr *ipv6h = ipv6_hdr(skb);
 	int tcphoff;
 	u_int8_t nexthdr;
+	__be16 frag_off;
 
 	nexthdr = ipv6h->nexthdr;
-	tcphoff = ipv6_skip_exthdr(skb, sizeof(*ipv6h), &nexthdr);
+	tcphoff = ipv6_skip_exthdr(skb, sizeof(*ipv6h), &nexthdr, &frag_off);
 	if (tcphoff < 0)
 		return NF_DROP;
 
@@ -108,7 +109,7 @@
 		.targetsize = sizeof(struct xt_tcpoptstrip_target_info),
 		.me         = THIS_MODULE,
 	},
-#if defined(CONFIG_IP6_NF_MANGLE) || defined(CONFIG_IP6_NF_MANGLE_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_MANGLE)
 	{
 		.name       = "TCPOPTSTRIP",
 		.family     = NFPROTO_IPV6,
diff --git a/net/netfilter/xt_TEE.c b/net/netfilter/xt_TEE.c
index 5f054a0..3aae66f 100644
--- a/net/netfilter/xt_TEE.c
+++ b/net/netfilter/xt_TEE.c
@@ -25,13 +25,10 @@
 #include <linux/netfilter/x_tables.h>
 #include <linux/netfilter/xt_TEE.h>
 
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
 #	define WITH_CONNTRACK 1
 #	include <net/netfilter/nf_conntrack.h>
 #endif
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-#	define WITH_IPV6 1
-#endif
 
 struct xt_tee_priv {
 	struct notifier_block	notifier;
@@ -136,7 +133,7 @@
 	return XT_CONTINUE;
 }
 
-#ifdef WITH_IPV6
+#if IS_ENABLED(CONFIG_IPV6)
 static bool
 tee_tg_route6(struct sk_buff *skb, const struct xt_tee_tginfo *info)
 {
@@ -196,7 +193,7 @@
 	}
 	return XT_CONTINUE;
 }
-#endif /* WITH_IPV6 */
+#endif
 
 static int tee_netdev_event(struct notifier_block *this, unsigned long event,
 			    void *ptr)
@@ -276,7 +273,7 @@
 		.destroy    = tee_tg_destroy,
 		.me         = THIS_MODULE,
 	},
-#ifdef WITH_IPV6
+#if IS_ENABLED(CONFIG_IPV6)
 	{
 		.name       = "TEE",
 		.revision   = 1,
diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c
index dcfd57e..35a959a 100644
--- a/net/netfilter/xt_TPROXY.c
+++ b/net/netfilter/xt_TPROXY.c
@@ -22,7 +22,7 @@
 
 #include <net/netfilter/ipv4/nf_defrag_ipv4.h>
 
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
 #define XT_TPROXY_HAVE_IPV6 1
 #include <net/if_inet6.h>
 #include <net/addrconf.h>
diff --git a/net/netfilter/xt_addrtype.c b/net/netfilter/xt_addrtype.c
index b77d383..49c5ff7 100644
--- a/net/netfilter/xt_addrtype.c
+++ b/net/netfilter/xt_addrtype.c
@@ -16,7 +16,7 @@
 #include <linux/ip.h>
 #include <net/route.h>
 
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
 #include <net/ipv6.h>
 #include <net/ip6_route.h>
 #include <net/ip6_fib.h>
@@ -31,7 +31,7 @@
 MODULE_ALIAS("ipt_addrtype");
 MODULE_ALIAS("ip6t_addrtype");
 
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
 static u32 match_lookup_rt6(struct net *net, const struct net_device *dev,
 			    const struct in6_addr *addr)
 {
@@ -42,7 +42,7 @@
 	int route_err;
 
 	memset(&flow, 0, sizeof(flow));
-	ipv6_addr_copy(&flow.daddr, addr);
+	flow.daddr = *addr;
 	if (dev)
 		flow.flowi6_oif = dev->ifindex;
 
@@ -149,7 +149,7 @@
 	else if (info->flags & XT_ADDRTYPE_LIMIT_IFACE_OUT)
 		dev = par->out;
 
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
 	if (par->family == NFPROTO_IPV6)
 		return addrtype_mt6(net, dev, skb, info);
 #endif
@@ -190,7 +190,7 @@
 		return -EINVAL;
 	}
 
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
 	if (par->family == NFPROTO_IPV6) {
 		if ((info->source | info->dest) & XT_ADDRTYPE_BLACKHOLE) {
 			pr_err("ipv6 BLACKHOLE matching not supported\n");
diff --git a/net/netfilter/xt_connbytes.c b/net/netfilter/xt_connbytes.c
index 5b13850..e595e07 100644
--- a/net/netfilter/xt_connbytes.c
+++ b/net/netfilter/xt_connbytes.c
@@ -40,46 +40,46 @@
 	case XT_CONNBYTES_PKTS:
 		switch (sinfo->direction) {
 		case XT_CONNBYTES_DIR_ORIGINAL:
-			what = counters[IP_CT_DIR_ORIGINAL].packets;
+			what = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].packets);
 			break;
 		case XT_CONNBYTES_DIR_REPLY:
-			what = counters[IP_CT_DIR_REPLY].packets;
+			what = atomic64_read(&counters[IP_CT_DIR_REPLY].packets);
 			break;
 		case XT_CONNBYTES_DIR_BOTH:
-			what = counters[IP_CT_DIR_ORIGINAL].packets;
-			what += counters[IP_CT_DIR_REPLY].packets;
+			what = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].packets);
+			what += atomic64_read(&counters[IP_CT_DIR_REPLY].packets);
 			break;
 		}
 		break;
 	case XT_CONNBYTES_BYTES:
 		switch (sinfo->direction) {
 		case XT_CONNBYTES_DIR_ORIGINAL:
-			what = counters[IP_CT_DIR_ORIGINAL].bytes;
+			what = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].bytes);
 			break;
 		case XT_CONNBYTES_DIR_REPLY:
-			what = counters[IP_CT_DIR_REPLY].bytes;
+			what = atomic64_read(&counters[IP_CT_DIR_REPLY].bytes);
 			break;
 		case XT_CONNBYTES_DIR_BOTH:
-			what = counters[IP_CT_DIR_ORIGINAL].bytes;
-			what += counters[IP_CT_DIR_REPLY].bytes;
+			what = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].bytes);
+			what += atomic64_read(&counters[IP_CT_DIR_REPLY].bytes);
 			break;
 		}
 		break;
 	case XT_CONNBYTES_AVGPKT:
 		switch (sinfo->direction) {
 		case XT_CONNBYTES_DIR_ORIGINAL:
-			bytes = counters[IP_CT_DIR_ORIGINAL].bytes;
-			pkts  = counters[IP_CT_DIR_ORIGINAL].packets;
+			bytes = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].bytes);
+			pkts  = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].packets);
 			break;
 		case XT_CONNBYTES_DIR_REPLY:
-			bytes = counters[IP_CT_DIR_REPLY].bytes;
-			pkts  = counters[IP_CT_DIR_REPLY].packets;
+			bytes = atomic64_read(&counters[IP_CT_DIR_REPLY].bytes);
+			pkts  = atomic64_read(&counters[IP_CT_DIR_REPLY].packets);
 			break;
 		case XT_CONNBYTES_DIR_BOTH:
-			bytes = counters[IP_CT_DIR_ORIGINAL].bytes +
-				counters[IP_CT_DIR_REPLY].bytes;
-			pkts  = counters[IP_CT_DIR_ORIGINAL].packets +
-				counters[IP_CT_DIR_REPLY].packets;
+			bytes = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].bytes) +
+				atomic64_read(&counters[IP_CT_DIR_REPLY].bytes);
+			pkts  = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].packets) +
+				atomic64_read(&counters[IP_CT_DIR_REPLY].packets);
 			break;
 		}
 		if (pkts != 0)
@@ -87,10 +87,10 @@
 		break;
 	}
 
-	if (sinfo->count.to)
+	if (sinfo->count.to >= sinfo->count.from)
 		return what <= sinfo->count.to && what >= sinfo->count.from;
-	else
-		return what >= sinfo->count.from;
+	else /* inverted */
+		return what < sinfo->count.to || what > sinfo->count.from;
 }
 
 static int connbytes_mt_check(const struct xt_mtchk_param *par)
diff --git a/net/netfilter/xt_ecn.c b/net/netfilter/xt_ecn.c
new file mode 100644
index 0000000..3c831a8
--- /dev/null
+++ b/net/netfilter/xt_ecn.c
@@ -0,0 +1,179 @@
+/*
+ * Xtables module for matching the value of the IPv4/IPv6 and TCP ECN bits
+ *
+ * (C) 2002 by Harald Welte <laforge@gnumonks.org>
+ * (C) 2011 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <net/ip.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/tcp.h>
+
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_ecn.h>
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter_ipv6/ip6_tables.h>
+
+MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
+MODULE_DESCRIPTION("Xtables: Explicit Congestion Notification (ECN) flag match");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ipt_ecn");
+MODULE_ALIAS("ip6t_ecn");
+
+static bool match_tcp(const struct sk_buff *skb, struct xt_action_param *par)
+{
+	const struct xt_ecn_info *einfo = par->matchinfo;
+	struct tcphdr _tcph;
+	const struct tcphdr *th;
+
+	/* In practice, TCP match does this, so can't fail.  But let's
+	 * be good citizens.
+	 */
+	th = skb_header_pointer(skb, par->thoff, sizeof(_tcph), &_tcph);
+	if (th == NULL)
+		return false;
+
+	if (einfo->operation & XT_ECN_OP_MATCH_ECE) {
+		if (einfo->invert & XT_ECN_OP_MATCH_ECE) {
+			if (th->ece == 1)
+				return false;
+		} else {
+			if (th->ece == 0)
+				return false;
+		}
+	}
+
+	if (einfo->operation & XT_ECN_OP_MATCH_CWR) {
+		if (einfo->invert & XT_ECN_OP_MATCH_CWR) {
+			if (th->cwr == 1)
+				return false;
+		} else {
+			if (th->cwr == 0)
+				return false;
+		}
+	}
+
+	return true;
+}
+
+static inline bool match_ip(const struct sk_buff *skb,
+			    const struct xt_ecn_info *einfo)
+{
+	return ((ip_hdr(skb)->tos & XT_ECN_IP_MASK) == einfo->ip_ect) ^
+	       !!(einfo->invert & XT_ECN_OP_MATCH_IP);
+}
+
+static bool ecn_mt4(const struct sk_buff *skb, struct xt_action_param *par)
+{
+	const struct xt_ecn_info *info = par->matchinfo;
+
+	if (info->operation & XT_ECN_OP_MATCH_IP && !match_ip(skb, info))
+		return false;
+
+	if (info->operation & (XT_ECN_OP_MATCH_ECE | XT_ECN_OP_MATCH_CWR) &&
+	    !match_tcp(skb, par))
+		return false;
+
+	return true;
+}
+
+static int ecn_mt_check4(const struct xt_mtchk_param *par)
+{
+	const struct xt_ecn_info *info = par->matchinfo;
+	const struct ipt_ip *ip = par->entryinfo;
+
+	if (info->operation & XT_ECN_OP_MATCH_MASK)
+		return -EINVAL;
+
+	if (info->invert & XT_ECN_OP_MATCH_MASK)
+		return -EINVAL;
+
+	if (info->operation & (XT_ECN_OP_MATCH_ECE | XT_ECN_OP_MATCH_CWR) &&
+	    (ip->proto != IPPROTO_TCP || ip->invflags & IPT_INV_PROTO)) {
+		pr_info("cannot match TCP bits in rule for non-tcp packets\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static inline bool match_ipv6(const struct sk_buff *skb,
+			      const struct xt_ecn_info *einfo)
+{
+	return (((ipv6_hdr(skb)->flow_lbl[0] >> 4) & XT_ECN_IP_MASK) ==
+	        einfo->ip_ect) ^
+	       !!(einfo->invert & XT_ECN_OP_MATCH_IP);
+}
+
+static bool ecn_mt6(const struct sk_buff *skb, struct xt_action_param *par)
+{
+	const struct xt_ecn_info *info = par->matchinfo;
+
+	if (info->operation & XT_ECN_OP_MATCH_IP && !match_ipv6(skb, info))
+		return false;
+
+	if (info->operation & (XT_ECN_OP_MATCH_ECE | XT_ECN_OP_MATCH_CWR) &&
+	    !match_tcp(skb, par))
+		return false;
+
+	return true;
+}
+
+static int ecn_mt_check6(const struct xt_mtchk_param *par)
+{
+	const struct xt_ecn_info *info = par->matchinfo;
+	const struct ip6t_ip6 *ip = par->entryinfo;
+
+	if (info->operation & XT_ECN_OP_MATCH_MASK)
+		return -EINVAL;
+
+	if (info->invert & XT_ECN_OP_MATCH_MASK)
+		return -EINVAL;
+
+	if (info->operation & (XT_ECN_OP_MATCH_ECE | XT_ECN_OP_MATCH_CWR) &&
+	    (ip->proto != IPPROTO_TCP || ip->invflags & IP6T_INV_PROTO)) {
+		pr_info("cannot match TCP bits in rule for non-tcp packets\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static struct xt_match ecn_mt_reg[] __read_mostly = {
+	{
+		.name		= "ecn",
+		.family		= NFPROTO_IPV4,
+		.match		= ecn_mt4,
+		.matchsize	= sizeof(struct xt_ecn_info),
+		.checkentry	= ecn_mt_check4,
+		.me		= THIS_MODULE,
+	},
+	{
+		.name		= "ecn",
+		.family		= NFPROTO_IPV6,
+		.match		= ecn_mt6,
+		.matchsize	= sizeof(struct xt_ecn_info),
+		.checkentry	= ecn_mt_check6,
+		.me		= THIS_MODULE,
+	},
+};
+
+static int __init ecn_mt_init(void)
+{
+	return xt_register_matches(ecn_mt_reg, ARRAY_SIZE(ecn_mt_reg));
+}
+
+static void __exit ecn_mt_exit(void)
+{
+	xt_unregister_matches(ecn_mt_reg, ARRAY_SIZE(ecn_mt_reg));
+}
+
+module_init(ecn_mt_init);
+module_exit(ecn_mt_exit);
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index dfd52ba..8e49921 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -21,7 +21,7 @@
 #include <linux/mm.h>
 #include <linux/in.h>
 #include <linux/ip.h>
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
 #include <linux/ipv6.h>
 #include <net/ipv6.h>
 #endif
@@ -64,7 +64,7 @@
 			__be32 src;
 			__be32 dst;
 		} ip;
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
 		struct {
 			__be32 src[4];
 			__be32 dst[4];
@@ -413,7 +413,7 @@
 	return l ? htonl(ntohl(a) & ~0 << (32 - l)) : 0;
 }
 
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
 static void hashlimit_ipv6_mask(__be32 *i, unsigned int p)
 {
 	switch (p) {
@@ -445,6 +445,7 @@
 {
 	__be16 _ports[2], *ports;
 	u8 nexthdr;
+	__be16 frag_off;
 	int poff;
 
 	memset(dst, 0, sizeof(*dst));
@@ -463,7 +464,7 @@
 			return 0;
 		nexthdr = ip_hdr(skb)->protocol;
 		break;
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
 	case NFPROTO_IPV6:
 		if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_DIP) {
 			memcpy(&dst->ip6.dst, &ipv6_hdr(skb)->daddr,
@@ -480,7 +481,7 @@
 		      (XT_HASHLIMIT_HASH_DPT | XT_HASHLIMIT_HASH_SPT)))
 			return 0;
 		nexthdr = ipv6_hdr(skb)->nexthdr;
-		protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr);
+		protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr, &frag_off);
 		if ((int)protoff < 0)
 			return -1;
 		break;
@@ -615,7 +616,7 @@
 		.destroy        = hashlimit_mt_destroy,
 		.me             = THIS_MODULE,
 	},
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
 	{
 		.name           = "hashlimit",
 		.revision       = 1,
@@ -692,7 +693,7 @@
 				 ent->rateinfo.credit, ent->rateinfo.credit_cap,
 				 ent->rateinfo.cost);
 		break;
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
 	case NFPROTO_IPV6:
 		res = seq_printf(s, "%ld %pI6:%u->%pI6:%u %u %u %u\n",
 				 (long)(ent->expires - jiffies)/HZ,
@@ -760,7 +761,7 @@
 	hashlimit_net->ipt_hashlimit = proc_mkdir("ipt_hashlimit", net->proc_net);
 	if (!hashlimit_net->ipt_hashlimit)
 		return -ENOMEM;
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
 	hashlimit_net->ip6t_hashlimit = proc_mkdir("ip6t_hashlimit", net->proc_net);
 	if (!hashlimit_net->ip6t_hashlimit) {
 		proc_net_remove(net, "ipt_hashlimit");
@@ -773,7 +774,7 @@
 static void __net_exit hashlimit_proc_net_exit(struct net *net)
 {
 	proc_net_remove(net, "ipt_hashlimit");
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
 	proc_net_remove(net, "ip6t_hashlimit");
 #endif
 }
diff --git a/net/netfilter/xt_nfacct.c b/net/netfilter/xt_nfacct.c
new file mode 100644
index 0000000..b3be0ef
--- /dev/null
+++ b/net/netfilter/xt_nfacct.c
@@ -0,0 +1,76 @@
+/*
+ * (C) 2011 Pablo Neira Ayuso <pablo@netfilter.org>
+ * (C) 2011 Intra2net AG <http://www.intra2net.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 (or any
+ * later at your option) as published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/skbuff.h>
+
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/nfnetlink_acct.h>
+#include <linux/netfilter/xt_nfacct.h>
+
+MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+MODULE_DESCRIPTION("Xtables: match for the extended accounting infrastructure");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ipt_nfacct");
+MODULE_ALIAS("ip6t_nfacct");
+
+static bool nfacct_mt(const struct sk_buff *skb, struct xt_action_param *par)
+{
+	const struct xt_nfacct_match_info *info = par->targinfo;
+
+	nfnl_acct_update(skb, info->nfacct);
+
+	return true;
+}
+
+static int
+nfacct_mt_checkentry(const struct xt_mtchk_param *par)
+{
+	struct xt_nfacct_match_info *info = par->matchinfo;
+	struct nf_acct *nfacct;
+
+	nfacct = nfnl_acct_find_get(info->name);
+	if (nfacct == NULL) {
+		pr_info("xt_nfacct: accounting object with name `%s' "
+			"does not exists\n", info->name);
+		return -ENOENT;
+	}
+	info->nfacct = nfacct;
+	return 0;
+}
+
+static void
+nfacct_mt_destroy(const struct xt_mtdtor_param *par)
+{
+	const struct xt_nfacct_match_info *info = par->matchinfo;
+
+	nfnl_acct_put(info->nfacct);
+}
+
+static struct xt_match nfacct_mt_reg __read_mostly = {
+	.name       = "nfacct",
+	.family     = NFPROTO_UNSPEC,
+	.checkentry = nfacct_mt_checkentry,
+	.match      = nfacct_mt,
+	.destroy    = nfacct_mt_destroy,
+	.matchsize  = sizeof(struct xt_nfacct_match_info),
+	.me         = THIS_MODULE,
+};
+
+static int __init nfacct_mt_init(void)
+{
+	return xt_register_match(&nfacct_mt_reg);
+}
+
+static void __exit nfacct_mt_exit(void)
+{
+	xt_unregister_match(&nfacct_mt_reg);
+}
+
+module_init(nfacct_mt_init);
+module_exit(nfacct_mt_exit);
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index fe39f7e..72bb07f 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -22,7 +22,7 @@
 #include <net/netfilter/nf_tproxy_core.h>
 #include <net/netfilter/ipv4/nf_defrag_ipv4.h>
 
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
 #define XT_SOCKET_HAVE_IPV6 1
 #include <linux/netfilter_ipv6/ip6_tables.h>
 #include <net/netfilter/ipv6/nf_defrag_ipv6.h>
@@ -30,7 +30,7 @@
 
 #include <linux/netfilter/xt_socket.h>
 
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
 #define XT_SOCKET_HAVE_CONNTRACK 1
 #include <net/netfilter/nf_conntrack.h>
 #endif
@@ -214,6 +214,7 @@
 	struct icmp6hdr *icmph, _icmph;
 	__be16 *ports, _ports[2];
 	u8 inside_nexthdr;
+	__be16 inside_fragoff;
 	int inside_hdrlen;
 
 	icmph = skb_header_pointer(skb, outside_hdrlen,
@@ -229,7 +230,8 @@
 		return 1;
 	inside_nexthdr = inside_iph->nexthdr;
 
-	inside_hdrlen = ipv6_skip_exthdr(skb, outside_hdrlen + sizeof(_icmph) + sizeof(_inside_iph), &inside_nexthdr);
+	inside_hdrlen = ipv6_skip_exthdr(skb, outside_hdrlen + sizeof(_icmph) + sizeof(_inside_iph),
+					 &inside_nexthdr, &inside_fragoff);
 	if (inside_hdrlen < 0)
 		return 1; /* hjm: Packet has no/incomplete transport layer headers. */
 
diff --git a/net/netlabel/netlabel_addrlist.c b/net/netlabel/netlabel_addrlist.c
index 96b749d..6f17013 100644
--- a/net/netlabel/netlabel_addrlist.c
+++ b/net/netlabel/netlabel_addrlist.c
@@ -96,7 +96,7 @@
 }
 
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 /**
  * netlbl_af6list_search - Search for a matching IPv6 address entry
  * @addr: IPv6 address
@@ -185,7 +185,7 @@
 	return 0;
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 /**
  * netlbl_af6list_add - Add a new IPv6 address entry to a list
  * @entry: address entry
@@ -263,7 +263,7 @@
 	return entry;
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 /**
  * netlbl_af6list_remove_entry - Remove an IPv6 address entry
  * @entry: address entry
@@ -342,7 +342,7 @@
 	}
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 /**
  * netlbl_af6list_audit_addr - Audit an IPv6 address
  * @audit_buf: audit buffer
diff --git a/net/netlabel/netlabel_addrlist.h b/net/netlabel/netlabel_addrlist.h
index fdbc1d2..a1287ce 100644
--- a/net/netlabel/netlabel_addrlist.h
+++ b/net/netlabel/netlabel_addrlist.h
@@ -133,7 +133,7 @@
 }
 #endif
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 
 #define __af6list_entry(ptr) container_of(ptr, struct netlbl_af6list, list)
 
diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c
index 3f905e5..3820411 100644
--- a/net/netlabel/netlabel_domainhash.c
+++ b/net/netlabel/netlabel_domainhash.c
@@ -78,7 +78,7 @@
 	struct netlbl_dom_map *ptr;
 	struct netlbl_af4list *iter4;
 	struct netlbl_af4list *tmp4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	struct netlbl_af6list *iter6;
 	struct netlbl_af6list *tmp6;
 #endif /* IPv6 */
@@ -90,7 +90,7 @@
 			netlbl_af4list_remove_entry(iter4);
 			kfree(netlbl_domhsh_addr4_entry(iter4));
 		}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 		netlbl_af6list_foreach_safe(iter6, tmp6,
 					    &ptr->type_def.addrsel->list6) {
 			netlbl_af6list_remove_entry(iter6);
@@ -217,7 +217,7 @@
 			cipsov4 = map4->type_def.cipsov4;
 			netlbl_af4list_audit_addr(audit_buf, 0, NULL,
 						  addr4->addr, addr4->mask);
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 		} else if (addr6 != NULL) {
 			struct netlbl_domaddr6_map *map6;
 			map6 = netlbl_domhsh_addr6_entry(addr6);
@@ -306,7 +306,7 @@
 	struct netlbl_dom_map *entry_old;
 	struct netlbl_af4list *iter4;
 	struct netlbl_af4list *tmp4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	struct netlbl_af6list *iter6;
 	struct netlbl_af6list *tmp6;
 #endif /* IPv6 */
@@ -338,7 +338,7 @@
 					       &entry->type_def.addrsel->list4)
 				netlbl_domhsh_audit_add(entry, iter4, NULL,
 							ret_val, audit_info);
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 			netlbl_af6list_foreach_rcu(iter6,
 					       &entry->type_def.addrsel->list6)
 				netlbl_domhsh_audit_add(entry, NULL, iter6,
@@ -365,7 +365,7 @@
 				ret_val = -EEXIST;
 				goto add_return;
 			}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 		netlbl_af6list_foreach_rcu(iter6,
 					   &entry->type_def.addrsel->list6)
 			if (netlbl_af6list_search_exact(&iter6->addr,
@@ -386,7 +386,7 @@
 			if (ret_val != 0)
 				goto add_return;
 		}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 		netlbl_af6list_foreach_safe(iter6, tmp6,
 					    &entry->type_def.addrsel->list6) {
 			netlbl_af6list_remove_entry(iter6);
@@ -510,7 +510,7 @@
 	struct netlbl_dom_map *entry_map;
 	struct netlbl_af4list *entry_addr;
 	struct netlbl_af4list *iter4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	struct netlbl_af6list *iter6;
 #endif /* IPv6 */
 	struct netlbl_domaddr4_map *entry;
@@ -533,7 +533,7 @@
 		goto remove_af4_failure;
 	netlbl_af4list_foreach_rcu(iter4, &entry_map->type_def.addrsel->list4)
 		goto remove_af4_single_addr;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	netlbl_af6list_foreach_rcu(iter6, &entry_map->type_def.addrsel->list6)
 		goto remove_af4_single_addr;
 #endif /* IPv6 */
@@ -644,7 +644,7 @@
 	return netlbl_domhsh_addr4_entry(addr_iter);
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 /**
  * netlbl_domhsh_getentry_af6 - Get an entry from the domain hash table
  * @domain: the domain name to search for
diff --git a/net/netlabel/netlabel_domainhash.h b/net/netlabel/netlabel_domainhash.h
index bfcc0f7..90872c4 100644
--- a/net/netlabel/netlabel_domainhash.h
+++ b/net/netlabel/netlabel_domainhash.h
@@ -104,7 +104,7 @@
 		     int (*callback) (struct netlbl_dom_map *entry, void *arg),
 		     void *cb_arg);
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 struct netlbl_domaddr6_map *netlbl_domhsh_getentry_af6(const char *domain,
 						  const struct in6_addr *addr);
 #endif /* IPv6 */
diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c
index 824f184..2560e7b 100644
--- a/net/netlabel/netlabel_kapi.c
+++ b/net/netlabel/netlabel_kapi.c
@@ -147,7 +147,7 @@
 				goto cfg_unlbl_map_add_failure;
 			break;
 			}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 		case AF_INET6: {
 			const struct in6_addr *addr6 = addr;
 			const struct in6_addr *mask6 = mask;
@@ -155,12 +155,12 @@
 			if (map6 == NULL)
 				goto cfg_unlbl_map_add_failure;
 			map6->type = NETLBL_NLTYPE_UNLABELED;
-			ipv6_addr_copy(&map6->list.addr, addr6);
+			map6->list.addr = *addr6;
 			map6->list.addr.s6_addr32[0] &= mask6->s6_addr32[0];
 			map6->list.addr.s6_addr32[1] &= mask6->s6_addr32[1];
 			map6->list.addr.s6_addr32[2] &= mask6->s6_addr32[2];
 			map6->list.addr.s6_addr32[3] &= mask6->s6_addr32[3];
-			ipv6_addr_copy(&map6->list.mask, mask6);
+			map6->list.mask = *mask6;
 			map6->list.valid = 1;
 			ret_val = netlbl_af6list_add(&map6->list,
 						     &addrmap->list6);
@@ -227,7 +227,7 @@
 	case AF_INET:
 		addr_len = sizeof(struct in_addr);
 		break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	case AF_INET6:
 		addr_len = sizeof(struct in6_addr);
 		break;
@@ -270,7 +270,7 @@
 	case AF_INET:
 		addr_len = sizeof(struct in_addr);
 		break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	case AF_INET6:
 		addr_len = sizeof(struct in6_addr);
 		break;
@@ -673,7 +673,7 @@
 			ret_val = -ENOENT;
 		}
 		break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	case AF_INET6:
 		/* since we don't support any IPv6 labeling protocols right
 		 * now we can optimize everything away until we do */
@@ -724,7 +724,7 @@
 	case AF_INET:
 		ret_val = cipso_v4_sock_getattr(sk, secattr);
 		break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	case AF_INET6:
 		ret_val = -ENOMSG;
 		break;
@@ -782,7 +782,7 @@
 			ret_val = -ENOENT;
 		}
 		break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	case AF_INET6:
 		/* since we don't support any IPv6 labeling protocols right
 		 * now we can optimize everything away until we do */
@@ -853,7 +853,7 @@
 			ret_val = -ENOENT;
 		}
 		break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	case AF_INET6:
 		/* since we don't support any IPv6 labeling protocols right
 		 * now we can optimize everything away until we do */
@@ -926,7 +926,7 @@
 			ret_val = -ENOENT;
 		}
 		break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	case AF_INET6:
 		/* since we don't support any IPv6 labeling protocols right
 		 * now we can optimize everything away until we do */
@@ -965,7 +965,7 @@
 		    cipso_v4_skbuff_getattr(skb, secattr) == 0)
 			return 0;
 		break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	case AF_INET6:
 		break;
 #endif /* IPv6 */
diff --git a/net/netlabel/netlabel_mgmt.c b/net/netlabel/netlabel_mgmt.c
index bfa5558..4809e2e 100644
--- a/net/netlabel/netlabel_mgmt.c
+++ b/net/netlabel/netlabel_mgmt.c
@@ -184,7 +184,7 @@
 
 		entry->type = NETLBL_NLTYPE_ADDRSELECT;
 		entry->type_def.addrsel = addrmap;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	} else if (info->attrs[NLBL_MGMT_A_IPV6ADDR]) {
 		struct in6_addr *addr;
 		struct in6_addr *mask;
@@ -216,12 +216,12 @@
 			ret_val = -ENOMEM;
 			goto add_failure;
 		}
-		ipv6_addr_copy(&map->list.addr, addr);
+		map->list.addr = *addr;
 		map->list.addr.s6_addr32[0] &= mask->s6_addr32[0];
 		map->list.addr.s6_addr32[1] &= mask->s6_addr32[1];
 		map->list.addr.s6_addr32[2] &= mask->s6_addr32[2];
 		map->list.addr.s6_addr32[3] &= mask->s6_addr32[3];
-		ipv6_addr_copy(&map->list.mask, mask);
+		map->list.mask = *mask;
 		map->list.valid = 1;
 		map->type = entry->type;
 
@@ -270,7 +270,7 @@
 	struct nlattr *nla_a;
 	struct nlattr *nla_b;
 	struct netlbl_af4list *iter4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	struct netlbl_af6list *iter6;
 #endif
 
@@ -324,7 +324,7 @@
 
 			nla_nest_end(skb, nla_b);
 		}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 		netlbl_af6list_foreach_rcu(iter6,
 					   &entry->type_def.addrsel->list6) {
 			struct netlbl_domaddr6_map *map6;
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index e251c2c..4b5fa0f 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -170,7 +170,7 @@
 	struct netlbl_unlhsh_iface *iface;
 	struct netlbl_af4list *iter4;
 	struct netlbl_af4list *tmp4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	struct netlbl_af6list *iter6;
 	struct netlbl_af6list *tmp6;
 #endif /* IPv6 */
@@ -184,7 +184,7 @@
 		netlbl_af4list_remove_entry(iter4);
 		kfree(netlbl_unlhsh_addr4_entry(iter4));
 	}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	netlbl_af6list_foreach_safe(iter6, tmp6, &iface->addr6_list) {
 		netlbl_af6list_remove_entry(iter6);
 		kfree(netlbl_unlhsh_addr6_entry(iter6));
@@ -274,7 +274,7 @@
 	return ret_val;
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 /**
  * netlbl_unlhsh_add_addr6 - Add a new IPv6 address entry to the hash table
  * @iface: the associated interface entry
@@ -300,12 +300,12 @@
 	if (entry == NULL)
 		return -ENOMEM;
 
-	ipv6_addr_copy(&entry->list.addr, addr);
+	entry->list.addr = *addr;
 	entry->list.addr.s6_addr32[0] &= mask->s6_addr32[0];
 	entry->list.addr.s6_addr32[1] &= mask->s6_addr32[1];
 	entry->list.addr.s6_addr32[2] &= mask->s6_addr32[2];
 	entry->list.addr.s6_addr32[3] &= mask->s6_addr32[3];
-	ipv6_addr_copy(&entry->list.mask, mask);
+	entry->list.mask = *mask;
 	entry->list.valid = 1;
 	entry->secid = secid;
 
@@ -436,7 +436,7 @@
 						  mask4->s_addr);
 		break;
 	}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	case sizeof(struct in6_addr): {
 		const struct in6_addr *addr6 = addr;
 		const struct in6_addr *mask6 = mask;
@@ -531,7 +531,7 @@
 	return 0;
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 /**
  * netlbl_unlhsh_remove_addr6 - Remove an IPv6 address entry
  * @net: network namespace
@@ -606,14 +606,14 @@
 static void netlbl_unlhsh_condremove_iface(struct netlbl_unlhsh_iface *iface)
 {
 	struct netlbl_af4list *iter4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	struct netlbl_af6list *iter6;
 #endif /* IPv6 */
 
 	spin_lock(&netlbl_unlhsh_lock);
 	netlbl_af4list_foreach_rcu(iter4, &iface->addr4_list)
 		goto unlhsh_condremove_failure;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	netlbl_af6list_foreach_rcu(iter6, &iface->addr6_list)
 		goto unlhsh_condremove_failure;
 #endif /* IPv6 */
@@ -680,7 +680,7 @@
 						     iface, addr, mask,
 						     audit_info);
 		break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	case sizeof(struct in6_addr):
 		ret_val = netlbl_unlhsh_remove_addr6(net,
 						     iface, addr, mask,
@@ -1196,7 +1196,7 @@
 	struct netlbl_unlhsh_iface *iface;
 	struct list_head *iter_list;
 	struct netlbl_af4list *addr4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	struct netlbl_af6list *addr6;
 #endif
 
@@ -1228,7 +1228,7 @@
 					goto unlabel_staticlist_return;
 				}
 			}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 			netlbl_af6list_foreach_rcu(addr6,
 						   &iface->addr6_list) {
 				if (iter_addr6++ < skip_addr6)
@@ -1277,7 +1277,7 @@
 	u32 skip_addr6 = cb->args[1];
 	u32 iter_addr4 = 0;
 	struct netlbl_af4list *addr4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	u32 iter_addr6 = 0;
 	struct netlbl_af6list *addr6;
 #endif
@@ -1303,7 +1303,7 @@
 			goto unlabel_staticlistdef_return;
 		}
 	}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	netlbl_af6list_foreach_rcu(addr6, &iface->addr6_list) {
 		if (iter_addr6++ < skip_addr6)
 			continue;
@@ -1494,7 +1494,7 @@
 		secattr->attr.secid = netlbl_unlhsh_addr4_entry(addr4)->secid;
 		break;
 	}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	case PF_INET6: {
 		struct ipv6hdr *hdr6;
 		struct netlbl_af6list *addr6;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 1201b6d..629b061 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -139,12 +139,12 @@
 
 static ATOMIC_NOTIFIER_HEAD(netlink_chain);
 
-static u32 netlink_group_mask(u32 group)
+static inline u32 netlink_group_mask(u32 group)
 {
 	return group ? 1 << (group - 1) : 0;
 }
 
-static struct hlist_head *nl_pid_hashfn(struct nl_pid_hash *hash, u32 pid)
+static inline struct hlist_head *nl_pid_hashfn(struct nl_pid_hash *hash, u32 pid)
 {
 	return &hash->table[jhash_1word(pid, hash->rnd) & hash->mask];
 }
@@ -226,8 +226,7 @@
 		wake_up(&nl_table_wait);
 }
 
-static inline struct sock *netlink_lookup(struct net *net, int protocol,
-					  u32 pid)
+static struct sock *netlink_lookup(struct net *net, int protocol, u32 pid)
 {
 	struct nl_pid_hash *hash = &nl_table[protocol].hash;
 	struct hlist_head *head;
@@ -248,7 +247,7 @@
 	return sk;
 }
 
-static inline struct hlist_head *nl_pid_hash_zalloc(size_t size)
+static struct hlist_head *nl_pid_hash_zalloc(size_t size)
 {
 	if (size <= PAGE_SIZE)
 		return kzalloc(size, GFP_ATOMIC);
@@ -258,7 +257,7 @@
 					 get_order(size));
 }
 
-static inline void nl_pid_hash_free(struct hlist_head *table, size_t size)
+static void nl_pid_hash_free(struct hlist_head *table, size_t size)
 {
 	if (size <= PAGE_SIZE)
 		kfree(table);
@@ -578,7 +577,7 @@
 	return err;
 }
 
-static inline int netlink_capable(struct socket *sock, unsigned int flag)
+static inline int netlink_capable(const struct socket *sock, unsigned int flag)
 {
 	return (nl_table[sock->sk->sk_protocol].nl_nonroot & flag) ||
 	       capable(CAP_NET_ADMIN);
@@ -846,8 +845,7 @@
 	sock_put(sk);
 }
 
-static inline struct sk_buff *netlink_trim(struct sk_buff *skb,
-					   gfp_t allocation)
+static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation)
 {
 	int delta;
 
@@ -871,7 +869,7 @@
 	return skb;
 }
 
-static inline void netlink_rcv_wake(struct sock *sk)
+static void netlink_rcv_wake(struct sock *sk)
 {
 	struct netlink_sock *nlk = nlk_sk(sk);
 
@@ -881,7 +879,7 @@
 		wake_up_interruptible(&nlk->wait);
 }
 
-static inline int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb)
+static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb)
 {
 	int ret;
 	struct netlink_sock *nlk = nlk_sk(sk);
@@ -952,8 +950,7 @@
 }
 EXPORT_SYMBOL_GPL(netlink_has_listeners);
 
-static inline int netlink_broadcast_deliver(struct sock *sk,
-					    struct sk_buff *skb)
+static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
 {
 	struct netlink_sock *nlk = nlk_sk(sk);
 
@@ -962,7 +959,7 @@
 		skb_set_owner_r(skb, sk);
 		skb_queue_tail(&sk->sk_receive_queue, skb);
 		sk->sk_data_ready(sk, skb->len);
-		return atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf;
+		return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
 	}
 	return -1;
 }
@@ -982,7 +979,7 @@
 	void *tx_data;
 };
 
-static inline int do_one_broadcast(struct sock *sk,
+static int do_one_broadcast(struct sock *sk,
 				   struct netlink_broadcast_data *p)
 {
 	struct netlink_sock *nlk = nlk_sk(sk);
@@ -1110,8 +1107,7 @@
 	int code;
 };
 
-static inline int do_one_set_err(struct sock *sk,
-				 struct netlink_set_err_data *p)
+static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p)
 {
 	struct netlink_sock *nlk = nlk_sk(sk);
 	int ret = 0;
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 482fa57..a403b61 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -33,6 +33,14 @@
 }
 EXPORT_SYMBOL(genl_unlock);
 
+#ifdef CONFIG_PROVE_LOCKING
+int lockdep_genl_is_held(void)
+{
+	return lockdep_is_held(&genl_mutex);
+}
+EXPORT_SYMBOL(lockdep_genl_is_held);
+#endif
+
 #define GENL_FAM_TAB_SIZE	16
 #define GENL_FAM_TAB_MASK	(GENL_FAM_TAB_SIZE - 1)
 
@@ -98,7 +106,7 @@
 /* Of course we are going to have problems once we hit
  * 2^16 alive types, but that can only happen by year 2K
 */
-static inline u16 genl_generate_id(void)
+static u16 genl_generate_id(void)
 {
 	static u16 id_gen_idx = GENL_MIN_ID;
 	int i;
@@ -784,6 +792,15 @@
 
 		name = nla_data(info->attrs[CTRL_ATTR_FAMILY_NAME]);
 		res = genl_family_find_byname(name);
+#ifdef CONFIG_MODULES
+		if (res == NULL) {
+			genl_unlock();
+			request_module("net-pf-%d-proto-%d-type-%s",
+				       PF_NETLINK, NETLINK_GENERIC, name);
+			genl_lock();
+			res = genl_family_find_byname(name);
+		}
+#endif
 		err = -ENOENT;
 	}
 
@@ -946,3 +963,16 @@
 	return genlmsg_mcast(skb, pid, group, flags);
 }
 EXPORT_SYMBOL(genlmsg_multicast_allns);
+
+void genl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group,
+		 struct nlmsghdr *nlh, gfp_t flags)
+{
+	struct sock *sk = net->genl_sock;
+	int report = 0;
+
+	if (nlh)
+		report = nlmsg_report(nlh);
+
+	nlmsg_notify(sk, skb, pid, group, report, flags);
+}
+EXPORT_SYMBOL(genl_notify);
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 732152f..7dab229 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -306,26 +306,26 @@
 {
 	struct sock *sk = sock->sk;
 	struct nr_sock *nr = nr_sk(sk);
-	int opt;
+	unsigned long opt;
 
 	if (level != SOL_NETROM)
 		return -ENOPROTOOPT;
 
-	if (optlen < sizeof(int))
+	if (optlen < sizeof(unsigned int))
 		return -EINVAL;
 
-	if (get_user(opt, (int __user *)optval))
+	if (get_user(opt, (unsigned int __user *)optval))
 		return -EFAULT;
 
 	switch (optname) {
 	case NETROM_T1:
-		if (opt < 1)
+		if (opt < 1 || opt > ULONG_MAX / HZ)
 			return -EINVAL;
 		nr->t1 = opt * HZ;
 		return 0;
 
 	case NETROM_T2:
-		if (opt < 1)
+		if (opt < 1 || opt > ULONG_MAX / HZ)
 			return -EINVAL;
 		nr->t2 = opt * HZ;
 		return 0;
@@ -337,13 +337,13 @@
 		return 0;
 
 	case NETROM_T4:
-		if (opt < 1)
+		if (opt < 1 || opt > ULONG_MAX / HZ)
 			return -EINVAL;
 		nr->t4 = opt * HZ;
 		return 0;
 
 	case NETROM_IDLE:
-		if (opt < 0)
+		if (opt > ULONG_MAX / (60 * HZ))
 			return -EINVAL;
 		nr->idle = opt * 60 * HZ;
 		return 0;
@@ -1244,7 +1244,8 @@
 	case SIOCADDRT:
 	case SIOCDELRT:
 	case SIOCNRDECOBS:
-		if (!capable(CAP_NET_ADMIN)) return -EPERM;
+		if (!capable(CAP_NET_ADMIN))
+			return -EPERM;
 		return nr_rt_ioctl(cmd, argp);
 
 	default:
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
index 915a87b..2cf3301 100644
--- a/net/netrom/nr_route.c
+++ b/net/netrom/nr_route.c
@@ -670,14 +670,17 @@
 	case SIOCADDRT:
 		if (copy_from_user(&nr_route, arg, sizeof(struct nr_route_struct)))
 			return -EFAULT;
+		if (nr_route.ndigis > AX25_MAX_DIGIS)
+			return -EINVAL;
 		if ((dev = nr_ax25_dev_get(nr_route.device)) == NULL)
 			return -EINVAL;
-		if (nr_route.ndigis < 0 || nr_route.ndigis > AX25_MAX_DIGIS) {
-			dev_put(dev);
-			return -EINVAL;
-		}
 		switch (nr_route.type) {
 		case NETROM_NODE:
+			if (strnlen(nr_route.mnemonic, 7) == 7) {
+				ret = -EINVAL;
+				break;
+			}
+
 			ret = nr_add_node(&nr_route.callsign,
 				nr_route.mnemonic,
 				&nr_route.neighbour,
diff --git a/net/nfc/Kconfig b/net/nfc/Kconfig
index 58cddad..44c865b 100644
--- a/net/nfc/Kconfig
+++ b/net/nfc/Kconfig
@@ -14,5 +14,6 @@
 	  be called nfc.
 
 source "net/nfc/nci/Kconfig"
+source "net/nfc/llcp/Kconfig"
 
 source "drivers/nfc/Kconfig"
diff --git a/net/nfc/Makefile b/net/nfc/Makefile
index fbb550f..7b4a6dc 100644
--- a/net/nfc/Makefile
+++ b/net/nfc/Makefile
@@ -6,3 +6,4 @@
 obj-$(CONFIG_NFC_NCI) += nci/
 
 nfc-objs := core.o netlink.o af_nfc.o rawsock.o
+nfc-$(CONFIG_NFC_LLCP)	+= llcp/llcp.o llcp/commands.o llcp/sock.o
diff --git a/net/nfc/core.c b/net/nfc/core.c
index 47e02c1..3ddf6e6 100644
--- a/net/nfc/core.c
+++ b/net/nfc/core.c
@@ -21,10 +21,13 @@
  * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__
+
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/slab.h>
+#include <linux/nfc.h>
 
 #include "nfc.h"
 
@@ -33,25 +36,6 @@
 int nfc_devlist_generation;
 DEFINE_MUTEX(nfc_devlist_mutex);
 
-int nfc_printk(const char *level, const char *format, ...)
-{
-	struct va_format vaf;
-	va_list args;
-	int r;
-
-	va_start(args, format);
-
-	vaf.fmt = format;
-	vaf.va = &args;
-
-	r = printk("%sNFC: %pV\n", level, &vaf);
-
-	va_end(args);
-
-	return r;
-}
-EXPORT_SYMBOL(nfc_printk);
-
 /**
  * nfc_dev_up - turn on the NFC device
  *
@@ -63,7 +47,7 @@
 {
 	int rc = 0;
 
-	nfc_dbg("dev_name=%s", dev_name(&dev->dev));
+	pr_debug("dev_name=%s\n", dev_name(&dev->dev));
 
 	device_lock(&dev->dev);
 
@@ -97,7 +81,7 @@
 {
 	int rc = 0;
 
-	nfc_dbg("dev_name=%s", dev_name(&dev->dev));
+	pr_debug("dev_name=%s\n", dev_name(&dev->dev));
 
 	device_lock(&dev->dev);
 
@@ -139,7 +123,8 @@
 {
 	int rc;
 
-	nfc_dbg("dev_name=%s protocols=0x%x", dev_name(&dev->dev), protocols);
+	pr_debug("dev_name=%s protocols=0x%x\n",
+		 dev_name(&dev->dev), protocols);
 
 	if (!protocols)
 		return -EINVAL;
@@ -174,7 +159,7 @@
 {
 	int rc = 0;
 
-	nfc_dbg("dev_name=%s", dev_name(&dev->dev));
+	pr_debug("dev_name=%s\n", dev_name(&dev->dev));
 
 	device_lock(&dev->dev);
 
@@ -196,6 +181,86 @@
 	return rc;
 }
 
+int nfc_dep_link_up(struct nfc_dev *dev, int target_index,
+					u8 comm_mode, u8 rf_mode)
+{
+	int rc = 0;
+
+	pr_debug("dev_name=%s comm:%d rf:%d\n",
+			dev_name(&dev->dev), comm_mode, rf_mode);
+
+	if (!dev->ops->dep_link_up)
+		return -EOPNOTSUPP;
+
+	device_lock(&dev->dev);
+
+	if (!device_is_registered(&dev->dev)) {
+		rc = -ENODEV;
+		goto error;
+	}
+
+	if (dev->dep_link_up == true) {
+		rc = -EALREADY;
+		goto error;
+	}
+
+	rc = dev->ops->dep_link_up(dev, target_index, comm_mode, rf_mode);
+
+error:
+	device_unlock(&dev->dev);
+	return rc;
+}
+
+int nfc_dep_link_down(struct nfc_dev *dev)
+{
+	int rc = 0;
+
+	pr_debug("dev_name=%s\n", dev_name(&dev->dev));
+
+	if (!dev->ops->dep_link_down)
+		return -EOPNOTSUPP;
+
+	device_lock(&dev->dev);
+
+	if (!device_is_registered(&dev->dev)) {
+		rc = -ENODEV;
+		goto error;
+	}
+
+	if (dev->dep_link_up == false) {
+		rc = -EALREADY;
+		goto error;
+	}
+
+	if (dev->dep_rf_mode == NFC_RF_TARGET) {
+		rc = -EOPNOTSUPP;
+		goto error;
+	}
+
+	rc = dev->ops->dep_link_down(dev);
+	if (!rc) {
+		dev->dep_link_up = false;
+		nfc_llcp_mac_is_down(dev);
+		nfc_genl_dep_link_down_event(dev);
+	}
+
+error:
+	device_unlock(&dev->dev);
+	return rc;
+}
+
+int nfc_dep_link_is_up(struct nfc_dev *dev, u32 target_idx,
+					u8 comm_mode, u8 rf_mode)
+{
+	dev->dep_link_up = true;
+	dev->dep_rf_mode = rf_mode;
+
+	nfc_llcp_mac_is_up(dev, target_idx, comm_mode, rf_mode);
+
+	return nfc_genl_dep_link_up_event(dev, target_idx, comm_mode, rf_mode);
+}
+EXPORT_SYMBOL(nfc_dep_link_is_up);
+
 /**
  * nfc_activate_target - prepare the target for data exchange
  *
@@ -207,8 +272,8 @@
 {
 	int rc;
 
-	nfc_dbg("dev_name=%s target_idx=%u protocol=%u", dev_name(&dev->dev),
-							target_idx, protocol);
+	pr_debug("dev_name=%s target_idx=%u protocol=%u\n",
+		 dev_name(&dev->dev), target_idx, protocol);
 
 	device_lock(&dev->dev);
 
@@ -236,7 +301,8 @@
 {
 	int rc = 0;
 
-	nfc_dbg("dev_name=%s target_idx=%u", dev_name(&dev->dev), target_idx);
+	pr_debug("dev_name=%s target_idx=%u\n",
+		 dev_name(&dev->dev), target_idx);
 
 	device_lock(&dev->dev);
 
@@ -271,8 +337,8 @@
 {
 	int rc;
 
-	nfc_dbg("dev_name=%s target_idx=%u skb->len=%u", dev_name(&dev->dev),
-							target_idx, skb->len);
+	pr_debug("dev_name=%s target_idx=%u skb->len=%u\n",
+		 dev_name(&dev->dev), target_idx, skb->len);
 
 	device_lock(&dev->dev);
 
@@ -289,13 +355,54 @@
 	return rc;
 }
 
+int nfc_set_remote_general_bytes(struct nfc_dev *dev, u8 *gb, u8 gb_len)
+{
+	pr_debug("dev_name=%s gb_len=%d\n",
+			dev_name(&dev->dev), gb_len);
+
+	if (gb_len > NFC_MAX_GT_LEN)
+		return -EINVAL;
+
+	return nfc_llcp_set_remote_gb(dev, gb, gb_len);
+}
+EXPORT_SYMBOL(nfc_set_remote_general_bytes);
+
+u8 *nfc_get_local_general_bytes(struct nfc_dev *dev, u8 *gt_len)
+{
+	return nfc_llcp_general_bytes(dev, gt_len);
+}
+EXPORT_SYMBOL(nfc_get_local_general_bytes);
+
 /**
- * nfc_alloc_skb - allocate a skb for data exchange responses
+ * nfc_alloc_send_skb - allocate a skb for data exchange responses
  *
  * @size: size to allocate
  * @gfp: gfp flags
  */
-struct sk_buff *nfc_alloc_skb(unsigned int size, gfp_t gfp)
+struct sk_buff *nfc_alloc_send_skb(struct nfc_dev *dev, struct sock *sk,
+					unsigned int flags, unsigned int size,
+					unsigned int *err)
+{
+	struct sk_buff *skb;
+	unsigned int total_size;
+
+	total_size = size +
+		dev->tx_headroom + dev->tx_tailroom + NFC_HEADER_SIZE;
+
+	skb = sock_alloc_send_skb(sk, total_size, flags & MSG_DONTWAIT, err);
+	if (skb)
+		skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE);
+
+	return skb;
+}
+
+/**
+ * nfc_alloc_recv_skb - allocate a skb for data exchange responses
+ *
+ * @size: size to allocate
+ * @gfp: gfp flags
+ */
+struct sk_buff *nfc_alloc_recv_skb(unsigned int size, gfp_t gfp)
 {
 	struct sk_buff *skb;
 	unsigned int total_size;
@@ -308,7 +415,7 @@
 
 	return skb;
 }
-EXPORT_SYMBOL(nfc_alloc_skb);
+EXPORT_SYMBOL(nfc_alloc_recv_skb);
 
 /**
  * nfc_targets_found - inform that targets were found
@@ -326,7 +433,7 @@
 {
 	int i;
 
-	nfc_dbg("dev_name=%s n_targets=%d", dev_name(&dev->dev), n_targets);
+	pr_debug("dev_name=%s n_targets=%d\n", dev_name(&dev->dev), n_targets);
 
 	dev->polling = false;
 
@@ -360,7 +467,7 @@
 {
 	struct nfc_dev *dev = to_nfc_dev(d);
 
-	nfc_dbg("dev_name=%s", dev_name(&dev->dev));
+	pr_debug("dev_name=%s\n", dev_name(&dev->dev));
 
 	nfc_genl_data_exit(&dev->genl_data);
 	kfree(dev->targets);
@@ -446,7 +553,7 @@
 {
 	int rc;
 
-	nfc_dbg("dev_name=%s", dev_name(&dev->dev));
+	pr_debug("dev_name=%s\n", dev_name(&dev->dev));
 
 	mutex_lock(&nfc_devlist_mutex);
 	nfc_devlist_generation++;
@@ -456,11 +563,14 @@
 	if (rc < 0)
 		return rc;
 
+	rc = nfc_llcp_register_device(dev);
+	if (rc)
+		pr_err("Could not register llcp device\n");
+
 	rc = nfc_genl_device_added(dev);
 	if (rc)
-		nfc_dbg("The userspace won't be notified that the device %s was"
-						" added", dev_name(&dev->dev));
-
+		pr_debug("The userspace won't be notified that the device %s was added\n",
+			 dev_name(&dev->dev));
 
 	return 0;
 }
@@ -475,7 +585,7 @@
 {
 	int rc;
 
-	nfc_dbg("dev_name=%s", dev_name(&dev->dev));
+	pr_debug("dev_name=%s\n", dev_name(&dev->dev));
 
 	mutex_lock(&nfc_devlist_mutex);
 	nfc_devlist_generation++;
@@ -488,10 +598,12 @@
 
 	mutex_unlock(&nfc_devlist_mutex);
 
+	nfc_llcp_unregister_device(dev);
+
 	rc = nfc_genl_device_removed(dev);
 	if (rc)
-		nfc_dbg("The userspace won't be notified that the device %s"
-					" was removed", dev_name(&dev->dev));
+		pr_debug("The userspace won't be notified that the device %s was removed\n",
+			 dev_name(&dev->dev));
 
 }
 EXPORT_SYMBOL(nfc_unregister_device);
@@ -500,7 +612,7 @@
 {
 	int rc;
 
-	nfc_info("NFC Core ver %s", VERSION);
+	pr_info("NFC Core ver %s\n", VERSION);
 
 	rc = class_register(&nfc_class);
 	if (rc)
@@ -517,6 +629,10 @@
 	if (rc)
 		goto err_rawsock;
 
+	rc = nfc_llcp_init();
+	if (rc)
+		goto err_llcp_sock;
+
 	rc = af_nfc_init();
 	if (rc)
 		goto err_af_nfc;
@@ -524,6 +640,8 @@
 	return 0;
 
 err_af_nfc:
+	nfc_llcp_exit();
+err_llcp_sock:
 	rawsock_exit();
 err_rawsock:
 	nfc_genl_exit();
@@ -535,6 +653,7 @@
 static void __exit nfc_exit(void)
 {
 	af_nfc_exit();
+	nfc_llcp_exit();
 	rawsock_exit();
 	nfc_genl_exit();
 	class_unregister(&nfc_class);
diff --git a/net/nfc/llcp/Kconfig b/net/nfc/llcp/Kconfig
new file mode 100644
index 0000000..fbf5e81
--- /dev/null
+++ b/net/nfc/llcp/Kconfig
@@ -0,0 +1,7 @@
+config NFC_LLCP
+       depends on NFC && EXPERIMENTAL
+       bool "NFC LLCP support (EXPERIMENTAL)"
+       default n
+       help
+	 Say Y here if you want to build support for a kernel NFC LLCP
+	 implementation.
\ No newline at end of file
diff --git a/net/nfc/llcp/commands.c b/net/nfc/llcp/commands.c
new file mode 100644
index 0000000..151f2ef
--- /dev/null
+++ b/net/nfc/llcp/commands.c
@@ -0,0 +1,399 @@
+/*
+ * Copyright (C) 2011  Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#define pr_fmt(fmt) "llcp: %s: " fmt, __func__
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/nfc.h>
+
+#include <net/nfc/nfc.h>
+
+#include "../nfc.h"
+#include "llcp.h"
+
+static u8 llcp_tlv_length[LLCP_TLV_MAX] = {
+	0,
+	1, /* VERSION */
+	2, /* MIUX */
+	2, /* WKS */
+	1, /* LTO */
+	1, /* RW */
+	0, /* SN */
+	1, /* OPT */
+	0, /* SDREQ */
+	2, /* SDRES */
+
+};
+
+static u8 llcp_tlv8(u8 *tlv, u8 type)
+{
+	if (tlv[0] != type || tlv[1] != llcp_tlv_length[tlv[0]])
+		return 0;
+
+	return tlv[2];
+}
+
+static u8 llcp_tlv16(u8 *tlv, u8 type)
+{
+	if (tlv[0] != type || tlv[1] != llcp_tlv_length[tlv[0]])
+		return 0;
+
+	return be16_to_cpu(*((__be16 *)(tlv + 2)));
+}
+
+
+static u8 llcp_tlv_version(u8 *tlv)
+{
+	return llcp_tlv8(tlv, LLCP_TLV_VERSION);
+}
+
+static u16 llcp_tlv_miux(u8 *tlv)
+{
+	return llcp_tlv16(tlv, LLCP_TLV_MIUX) & 0x7f;
+}
+
+static u16 llcp_tlv_wks(u8 *tlv)
+{
+	return llcp_tlv16(tlv, LLCP_TLV_WKS);
+}
+
+static u16 llcp_tlv_lto(u8 *tlv)
+{
+	return llcp_tlv8(tlv, LLCP_TLV_LTO);
+}
+
+static u8 llcp_tlv_opt(u8 *tlv)
+{
+	return llcp_tlv8(tlv, LLCP_TLV_OPT);
+}
+
+static u8 llcp_tlv_rw(u8 *tlv)
+{
+	return llcp_tlv8(tlv, LLCP_TLV_RW) & 0xf;
+}
+
+u8 *nfc_llcp_build_tlv(u8 type, u8 *value, u8 value_length, u8 *tlv_length)
+{
+	u8 *tlv, length;
+
+	pr_debug("type %d\n", type);
+
+	if (type >= LLCP_TLV_MAX)
+		return NULL;
+
+	length = llcp_tlv_length[type];
+	if (length == 0 && value_length == 0)
+		return NULL;
+	else
+		length = value_length;
+
+	*tlv_length = 2 + length;
+	tlv = kzalloc(2 + length, GFP_KERNEL);
+	if (tlv == NULL)
+		return tlv;
+
+	tlv[0] = type;
+	tlv[1] = length;
+	memcpy(tlv + 2, value, length);
+
+	return tlv;
+}
+
+int nfc_llcp_parse_tlv(struct nfc_llcp_local *local,
+			u8 *tlv_array, u16 tlv_array_len)
+{
+	u8 *tlv = tlv_array, type, length, offset = 0;
+
+	pr_debug("TLV array length %d\n", tlv_array_len);
+
+	if (local == NULL)
+		return -ENODEV;
+
+	while (offset < tlv_array_len) {
+		type = tlv[0];
+		length = tlv[1];
+
+		pr_debug("type 0x%x length %d\n", type, length);
+
+		switch (type) {
+		case LLCP_TLV_VERSION:
+			local->remote_version = llcp_tlv_version(tlv);
+			break;
+		case LLCP_TLV_MIUX:
+			local->remote_miu = llcp_tlv_miux(tlv) + 128;
+			break;
+		case LLCP_TLV_WKS:
+			local->remote_wks = llcp_tlv_wks(tlv);
+			break;
+		case LLCP_TLV_LTO:
+			local->remote_lto = llcp_tlv_lto(tlv) * 10;
+			break;
+		case LLCP_TLV_OPT:
+			local->remote_opt = llcp_tlv_opt(tlv);
+			break;
+		case LLCP_TLV_RW:
+			local->remote_rw = llcp_tlv_rw(tlv);
+			break;
+		default:
+			pr_err("Invalid gt tlv value 0x%x\n", type);
+			break;
+		}
+
+		offset += length + 2;
+		tlv += length + 2;
+	}
+
+	pr_debug("version 0x%x miu %d lto %d opt 0x%x wks 0x%x rw %d\n",
+		local->remote_version, local->remote_miu,
+		local->remote_lto, local->remote_opt,
+		local->remote_wks, local->remote_rw);
+
+	return 0;
+}
+
+static struct sk_buff *llcp_add_header(struct sk_buff *pdu,
+					u8 dsap, u8 ssap, u8 ptype)
+{
+	u8 header[2];
+
+	pr_debug("ptype 0x%x dsap 0x%x ssap 0x%x\n", ptype, dsap, ssap);
+
+	header[0] = (u8)((dsap << 2) | (ptype >> 2));
+	header[1] = (u8)((ptype << 6) | ssap);
+
+	pr_debug("header 0x%x 0x%x\n", header[0], header[1]);
+
+	memcpy(skb_put(pdu, LLCP_HEADER_SIZE), header, LLCP_HEADER_SIZE);
+
+	return pdu;
+}
+
+static struct sk_buff *llcp_add_tlv(struct sk_buff *pdu, u8 *tlv, u8 tlv_length)
+{
+	/* XXX Add an skb length check */
+
+	if (tlv == NULL)
+		return NULL;
+
+	memcpy(skb_put(pdu, tlv_length), tlv, tlv_length);
+
+	return pdu;
+}
+
+static struct sk_buff *llcp_allocate_pdu(struct nfc_llcp_sock *sock,
+							u8 cmd, u16 size)
+{
+	struct sk_buff *skb;
+	int err;
+
+	if (sock->ssap == 0)
+		return NULL;
+
+	skb = nfc_alloc_send_skb(sock->dev, &sock->sk, MSG_DONTWAIT,
+					size + LLCP_HEADER_SIZE, &err);
+	if (skb == NULL) {
+		pr_err("Could not allocate PDU\n");
+		return NULL;
+	}
+
+	skb = llcp_add_header(skb, sock->dsap, sock->ssap, cmd);
+
+	return skb;
+}
+
+int nfc_llcp_disconnect(struct nfc_llcp_sock *sock)
+{
+	struct sk_buff *skb;
+	struct nfc_dev *dev;
+	struct nfc_llcp_local *local;
+	u16 size = 0;
+
+	pr_debug("Sending DISC\n");
+
+	local = sock->local;
+	if (local == NULL)
+		return -ENODEV;
+
+	dev = sock->dev;
+	if (dev == NULL)
+		return -ENODEV;
+
+	size += LLCP_HEADER_SIZE;
+	size += dev->tx_headroom + dev->tx_tailroom + NFC_HEADER_SIZE;
+
+	skb = alloc_skb(size, GFP_ATOMIC);
+	if (skb == NULL)
+		return -ENOMEM;
+
+	skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE);
+
+	skb = llcp_add_header(skb, sock->ssap, sock->dsap, LLCP_PDU_DISC);
+
+	skb_queue_tail(&local->tx_queue, skb);
+
+	return 0;
+}
+
+int nfc_llcp_send_symm(struct nfc_dev *dev)
+{
+	struct sk_buff *skb;
+	struct nfc_llcp_local *local;
+	u16 size = 0;
+
+	pr_debug("Sending SYMM\n");
+
+	local = nfc_llcp_find_local(dev);
+	if (local == NULL)
+		return -ENODEV;
+
+	size += LLCP_HEADER_SIZE;
+	size += dev->tx_headroom + dev->tx_tailroom + NFC_HEADER_SIZE;
+
+	skb = alloc_skb(size, GFP_KERNEL);
+	if (skb == NULL)
+		return -ENOMEM;
+
+	skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE);
+
+	skb = llcp_add_header(skb, 0, 0, LLCP_PDU_SYMM);
+
+	return nfc_data_exchange(dev, local->target_idx, skb,
+					nfc_llcp_recv, local);
+}
+
+int nfc_llcp_send_connect(struct nfc_llcp_sock *sock)
+{
+	struct nfc_llcp_local *local;
+	struct sk_buff *skb;
+	u8 *service_name_tlv = NULL, service_name_tlv_length;
+	int err;
+	u16 size = 0;
+
+	pr_debug("Sending CONNECT\n");
+
+	local = sock->local;
+	if (local == NULL)
+		return -ENODEV;
+
+	if (sock->service_name != NULL) {
+		service_name_tlv = nfc_llcp_build_tlv(LLCP_TLV_SN,
+					sock->service_name,
+					sock->service_name_len,
+					&service_name_tlv_length);
+		size += service_name_tlv_length;
+	}
+
+	pr_debug("SKB size %d SN length %zu\n", size, sock->service_name_len);
+
+	skb = llcp_allocate_pdu(sock, LLCP_PDU_CONNECT, size);
+	if (skb == NULL) {
+		err = -ENOMEM;
+		goto error_tlv;
+	}
+
+	if (service_name_tlv != NULL)
+		skb = llcp_add_tlv(skb, service_name_tlv,
+					service_name_tlv_length);
+
+	skb_queue_tail(&local->tx_queue, skb);
+
+	return 0;
+
+error_tlv:
+	pr_err("error %d\n", err);
+
+	kfree(service_name_tlv);
+
+	return err;
+}
+
+int nfc_llcp_send_cc(struct nfc_llcp_sock *sock)
+{
+	struct nfc_llcp_local *local;
+	struct sk_buff *skb;
+
+	pr_debug("Sending CC\n");
+
+	local = sock->local;
+	if (local == NULL)
+		return -ENODEV;
+
+	skb = llcp_allocate_pdu(sock, LLCP_PDU_CC, 0);
+	if (skb == NULL)
+		return -ENOMEM;
+
+	skb_queue_tail(&local->tx_queue, skb);
+
+	return 0;
+}
+
+int nfc_llcp_send_dm(struct nfc_llcp_local *local, u8 ssap, u8 dsap, u8 reason)
+{
+	struct sk_buff *skb;
+	struct nfc_dev *dev;
+	u16 size = 1; /* Reason code */
+
+	pr_debug("Sending DM reason 0x%x\n", reason);
+
+	if (local == NULL)
+		return -ENODEV;
+
+	dev = local->dev;
+	if (dev == NULL)
+		return -ENODEV;
+
+	size += LLCP_HEADER_SIZE;
+	size += dev->tx_headroom + dev->tx_tailroom + NFC_HEADER_SIZE;
+
+	skb = alloc_skb(size, GFP_KERNEL);
+	if (skb == NULL)
+		return -ENOMEM;
+
+	skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE);
+
+	skb = llcp_add_header(skb, ssap, dsap, LLCP_PDU_DM);
+
+	memcpy(skb_put(skb, 1), &reason, 1);
+
+	skb_queue_head(&local->tx_queue, skb);
+
+	return 0;
+}
+
+int nfc_llcp_send_disconnect(struct nfc_llcp_sock *sock)
+{
+	struct sk_buff *skb;
+	struct nfc_llcp_local *local;
+
+	pr_debug("Send DISC\n");
+
+	local = sock->local;
+	if (local == NULL)
+		return -ENODEV;
+
+	skb = llcp_allocate_pdu(sock, LLCP_PDU_DISC, 0);
+	if (skb == NULL)
+		return -ENOMEM;
+
+	skb_queue_head(&local->tx_queue, skb);
+
+	return 0;
+}
diff --git a/net/nfc/llcp/llcp.c b/net/nfc/llcp/llcp.c
new file mode 100644
index 0000000..1d32680
--- /dev/null
+++ b/net/nfc/llcp/llcp.c
@@ -0,0 +1,971 @@
+/*
+ * Copyright (C) 2011  Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#define pr_fmt(fmt) "llcp: %s: " fmt, __func__
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/nfc.h>
+
+#include "../nfc.h"
+#include "llcp.h"
+
+static u8 llcp_magic[3] = {0x46, 0x66, 0x6d};
+
+static struct list_head llcp_devices;
+
+static void nfc_llcp_socket_release(struct nfc_llcp_local *local)
+{
+	struct nfc_llcp_sock *parent, *s, *n;
+	struct sock *sk, *parent_sk;
+	int i;
+
+
+	mutex_lock(&local->socket_lock);
+
+	for (i = 0; i < LLCP_MAX_SAP; i++) {
+		parent = local->sockets[i];
+		if (parent == NULL)
+			continue;
+
+		/* Release all child sockets */
+		list_for_each_entry_safe(s, n, &parent->list, list) {
+			list_del(&s->list);
+			sk = &s->sk;
+
+			lock_sock(sk);
+
+			if (sk->sk_state == LLCP_CONNECTED)
+				nfc_put_device(s->dev);
+
+			sk->sk_state = LLCP_CLOSED;
+			sock_set_flag(sk, SOCK_DEAD);
+
+			release_sock(sk);
+		}
+
+		parent_sk = &parent->sk;
+
+		lock_sock(parent_sk);
+
+		if (parent_sk->sk_state == LLCP_LISTEN) {
+			struct nfc_llcp_sock *lsk, *n;
+			struct sock *accept_sk;
+
+			list_for_each_entry_safe(lsk, n, &parent->accept_queue,
+								accept_queue) {
+				accept_sk = &lsk->sk;
+				lock_sock(accept_sk);
+
+				nfc_llcp_accept_unlink(accept_sk);
+
+				accept_sk->sk_state = LLCP_CLOSED;
+				sock_set_flag(accept_sk, SOCK_DEAD);
+
+				release_sock(accept_sk);
+
+				sock_orphan(accept_sk);
+			}
+		}
+
+		if (parent_sk->sk_state == LLCP_CONNECTED)
+			nfc_put_device(parent->dev);
+
+		parent_sk->sk_state = LLCP_CLOSED;
+		sock_set_flag(parent_sk, SOCK_DEAD);
+
+		release_sock(parent_sk);
+	}
+
+	mutex_unlock(&local->socket_lock);
+}
+
+static void nfc_llcp_timeout_work(struct work_struct *work)
+{
+	struct nfc_llcp_local *local = container_of(work, struct nfc_llcp_local,
+							timeout_work);
+
+	nfc_dep_link_down(local->dev);
+}
+
+static void nfc_llcp_symm_timer(unsigned long data)
+{
+	struct nfc_llcp_local *local = (struct nfc_llcp_local *) data;
+
+	pr_err("SYMM timeout\n");
+
+	queue_work(local->timeout_wq, &local->timeout_work);
+}
+
+struct nfc_llcp_local *nfc_llcp_find_local(struct nfc_dev *dev)
+{
+	struct nfc_llcp_local *local, *n;
+
+	list_for_each_entry_safe(local, n, &llcp_devices, list)
+		if (local->dev == dev)
+			return local;
+
+	pr_debug("No device found\n");
+
+	return NULL;
+}
+
+static char *wks[] = {
+	NULL,
+	NULL, /* SDP */
+	"urn:nfc:sn:ip",
+	"urn:nfc:sn:obex",
+	"urn:nfc:sn:snep",
+};
+
+static int nfc_llcp_wks_sap(char *service_name, size_t service_name_len)
+{
+	int sap, num_wks;
+
+	pr_debug("%s\n", service_name);
+
+	if (service_name == NULL)
+		return -EINVAL;
+
+	num_wks = ARRAY_SIZE(wks);
+
+	for (sap = 0 ; sap < num_wks; sap++) {
+		if (wks[sap] == NULL)
+			continue;
+
+		if (strncmp(wks[sap], service_name, service_name_len) == 0)
+			return sap;
+	}
+
+	return -EINVAL;
+}
+
+u8 nfc_llcp_get_sdp_ssap(struct nfc_llcp_local *local,
+				struct nfc_llcp_sock *sock)
+{
+	mutex_lock(&local->sdp_lock);
+
+	if (sock->service_name != NULL && sock->service_name_len > 0) {
+		int ssap = nfc_llcp_wks_sap(sock->service_name,
+						sock->service_name_len);
+
+		if (ssap > 0) {
+			pr_debug("WKS %d\n", ssap);
+
+			/* This is a WKS, let's check if it's free */
+			if (local->local_wks & BIT(ssap)) {
+				mutex_unlock(&local->sdp_lock);
+
+				return LLCP_SAP_MAX;
+			}
+
+			set_bit(BIT(ssap), &local->local_wks);
+			mutex_unlock(&local->sdp_lock);
+
+			return ssap;
+		}
+
+		/*
+		 * This is not a well known service,
+		 * we should try to find a local SDP free spot
+		 */
+		ssap = find_first_zero_bit(&local->local_sdp, LLCP_SDP_NUM_SAP);
+		if (ssap == LLCP_SDP_NUM_SAP) {
+			mutex_unlock(&local->sdp_lock);
+
+			return LLCP_SAP_MAX;
+		}
+
+		pr_debug("SDP ssap %d\n", LLCP_WKS_NUM_SAP + ssap);
+
+		set_bit(BIT(ssap), &local->local_sdp);
+		mutex_unlock(&local->sdp_lock);
+
+		return LLCP_WKS_NUM_SAP + ssap;
+
+	} else if (sock->ssap != 0) {
+		if (sock->ssap < LLCP_WKS_NUM_SAP) {
+			if (!(local->local_wks & BIT(sock->ssap))) {
+				set_bit(BIT(sock->ssap), &local->local_wks);
+				mutex_unlock(&local->sdp_lock);
+
+				return sock->ssap;
+			}
+
+		} else if (sock->ssap < LLCP_SDP_NUM_SAP) {
+			if (!(local->local_sdp &
+				BIT(sock->ssap - LLCP_WKS_NUM_SAP))) {
+				set_bit(BIT(sock->ssap - LLCP_WKS_NUM_SAP),
+							&local->local_sdp);
+				mutex_unlock(&local->sdp_lock);
+
+				return sock->ssap;
+			}
+		}
+	}
+
+	mutex_unlock(&local->sdp_lock);
+
+	return LLCP_SAP_MAX;
+}
+
+u8 nfc_llcp_get_local_ssap(struct nfc_llcp_local *local)
+{
+	u8 local_ssap;
+
+	mutex_lock(&local->sdp_lock);
+
+	local_ssap = find_first_zero_bit(&local->local_sap, LLCP_LOCAL_NUM_SAP);
+	if (local_ssap == LLCP_LOCAL_NUM_SAP) {
+		mutex_unlock(&local->sdp_lock);
+		return LLCP_SAP_MAX;
+	}
+
+	set_bit(BIT(local_ssap), &local->local_sap);
+
+	mutex_unlock(&local->sdp_lock);
+
+	return local_ssap + LLCP_LOCAL_SAP_OFFSET;
+}
+
+void nfc_llcp_put_ssap(struct nfc_llcp_local *local, u8 ssap)
+{
+	u8 local_ssap;
+	unsigned long *sdp;
+
+	if (ssap < LLCP_WKS_NUM_SAP) {
+		local_ssap = ssap;
+		sdp = &local->local_wks;
+	} else if (ssap < LLCP_LOCAL_NUM_SAP) {
+		local_ssap = ssap - LLCP_WKS_NUM_SAP;
+		sdp = &local->local_sdp;
+	} else if (ssap < LLCP_MAX_SAP) {
+		local_ssap = ssap - LLCP_LOCAL_NUM_SAP;
+		sdp = &local->local_sap;
+	} else {
+		return;
+	}
+
+	mutex_lock(&local->sdp_lock);
+
+	clear_bit(1 << local_ssap, sdp);
+
+	mutex_unlock(&local->sdp_lock);
+}
+
+u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, u8 *general_bytes_len)
+{
+	struct nfc_llcp_local *local;
+
+	local = nfc_llcp_find_local(dev);
+	if (local == NULL) {
+		*general_bytes_len = 0;
+		return NULL;
+	}
+
+	*general_bytes_len = local->gb_len;
+
+	return local->gb;
+}
+
+static int nfc_llcp_build_gb(struct nfc_llcp_local *local)
+{
+	u8 *gb_cur, *version_tlv, version, version_length;
+	u8 *lto_tlv, lto, lto_length;
+	u8 *wks_tlv, wks_length;
+	u8 gb_len = 0;
+
+	version = LLCP_VERSION_11;
+	version_tlv = nfc_llcp_build_tlv(LLCP_TLV_VERSION, &version,
+							1, &version_length);
+	gb_len += version_length;
+
+	/* 1500 ms */
+	lto = 150;
+	lto_tlv = nfc_llcp_build_tlv(LLCP_TLV_VERSION, &lto, 1, &lto_length);
+	gb_len += lto_length;
+
+	pr_debug("Local wks 0x%lx\n", local->local_wks);
+	wks_tlv = nfc_llcp_build_tlv(LLCP_TLV_WKS, (u8 *)&local->local_wks, 2,
+								&wks_length);
+	gb_len += wks_length;
+
+	gb_len += ARRAY_SIZE(llcp_magic);
+
+	if (gb_len > NFC_MAX_GT_LEN) {
+		kfree(version_tlv);
+		return -EINVAL;
+	}
+
+	gb_cur = local->gb;
+
+	memcpy(gb_cur, llcp_magic, ARRAY_SIZE(llcp_magic));
+	gb_cur += ARRAY_SIZE(llcp_magic);
+
+	memcpy(gb_cur, version_tlv, version_length);
+	gb_cur += version_length;
+
+	memcpy(gb_cur, lto_tlv, lto_length);
+	gb_cur += lto_length;
+
+	memcpy(gb_cur, wks_tlv, wks_length);
+	gb_cur += wks_length;
+
+	kfree(version_tlv);
+	kfree(lto_tlv);
+
+	local->gb_len = gb_len;
+
+	return 0;
+}
+
+int nfc_llcp_set_remote_gb(struct nfc_dev *dev, u8 *gb, u8 gb_len)
+{
+	struct nfc_llcp_local *local = nfc_llcp_find_local(dev);
+
+	if (local == NULL) {
+		pr_err("No LLCP device\n");
+		return -ENODEV;
+	}
+
+	memset(local->remote_gb, 0, NFC_MAX_GT_LEN);
+	memcpy(local->remote_gb, gb, gb_len);
+	local->remote_gb_len = gb_len;
+
+	if (local->remote_gb == NULL ||
+			local->remote_gb_len == 0)
+		return -ENODEV;
+
+	if (memcmp(local->remote_gb, llcp_magic, 3)) {
+		pr_err("MAC does not support LLCP\n");
+		return -EINVAL;
+	}
+
+	return nfc_llcp_parse_tlv(local,
+			&local->remote_gb[3], local->remote_gb_len - 3);
+}
+
+static void nfc_llcp_tx_work(struct work_struct *work)
+{
+	struct nfc_llcp_local *local = container_of(work, struct nfc_llcp_local,
+							tx_work);
+	struct sk_buff *skb;
+
+	skb = skb_dequeue(&local->tx_queue);
+	if (skb != NULL) {
+		pr_debug("Sending pending skb\n");
+		nfc_data_exchange(local->dev, local->target_idx,
+					skb, nfc_llcp_recv, local);
+	} else {
+		nfc_llcp_send_symm(local->dev);
+	}
+
+	mod_timer(&local->link_timer,
+			jiffies + msecs_to_jiffies(local->remote_lto));
+}
+
+static u8 nfc_llcp_dsap(struct sk_buff *pdu)
+{
+	return (pdu->data[0] & 0xfc) >> 2;
+}
+
+static u8 nfc_llcp_ptype(struct sk_buff *pdu)
+{
+	return ((pdu->data[0] & 0x03) << 2) | ((pdu->data[1] & 0xc0) >> 6);
+}
+
+static u8 nfc_llcp_ssap(struct sk_buff *pdu)
+{
+	return pdu->data[1] & 0x3f;
+}
+
+static u8 nfc_llcp_ns(struct sk_buff *pdu)
+{
+	return pdu->data[2] >> 4;
+}
+
+static u8 nfc_llcp_nr(struct sk_buff *pdu)
+{
+	return pdu->data[2] & 0xf;
+}
+
+static void nfc_llcp_set_nrns(struct nfc_llcp_sock *sock, struct sk_buff *pdu)
+{
+	pdu->data[2] = (sock->send_n << 4) | ((sock->recv_n - 1) % 16);
+	sock->send_n = (sock->send_n + 1) % 16;
+	sock->recv_ack_n = (sock->recv_n - 1) % 16;
+}
+
+static struct nfc_llcp_sock *nfc_llcp_sock_get(struct nfc_llcp_local *local,
+						u8 ssap, u8 dsap)
+{
+	struct nfc_llcp_sock *sock, *llcp_sock, *n;
+
+	if (ssap == 0 && dsap == 0)
+		return NULL;
+
+	mutex_lock(&local->socket_lock);
+	sock = local->sockets[ssap];
+	if (sock == NULL) {
+		mutex_unlock(&local->socket_lock);
+		return NULL;
+	}
+
+	pr_debug("root dsap %d (%d)\n", sock->dsap, dsap);
+
+	if (sock->dsap == dsap) {
+		sock_hold(&sock->sk);
+		mutex_unlock(&local->socket_lock);
+		return sock;
+	}
+
+	list_for_each_entry_safe(llcp_sock, n, &sock->list, list) {
+		pr_debug("llcp_sock %p sk %p dsap %d\n", llcp_sock,
+				&llcp_sock->sk, llcp_sock->dsap);
+		if (llcp_sock->dsap == dsap) {
+			sock_hold(&llcp_sock->sk);
+			mutex_unlock(&local->socket_lock);
+			return llcp_sock;
+		}
+	}
+
+	pr_err("Could not find socket for %d %d\n", ssap, dsap);
+
+	mutex_unlock(&local->socket_lock);
+
+	return NULL;
+}
+
+static void nfc_llcp_sock_put(struct nfc_llcp_sock *sock)
+{
+	sock_put(&sock->sk);
+}
+
+static u8 *nfc_llcp_connect_sn(struct sk_buff *skb, size_t *sn_len)
+{
+	u8 *tlv = &skb->data[2], type, length;
+	size_t tlv_array_len = skb->len - LLCP_HEADER_SIZE, offset = 0;
+
+	while (offset < tlv_array_len) {
+		type = tlv[0];
+		length = tlv[1];
+
+		pr_debug("type 0x%x length %d\n", type, length);
+
+		if (type == LLCP_TLV_SN) {
+			*sn_len = length;
+			return &tlv[2];
+		}
+
+		offset += length + 2;
+		tlv += length + 2;
+	}
+
+	return NULL;
+}
+
+static void nfc_llcp_recv_connect(struct nfc_llcp_local *local,
+				struct sk_buff *skb)
+{
+	struct sock *new_sk, *parent;
+	struct nfc_llcp_sock *sock, *new_sock;
+	u8 dsap, ssap, bound_sap, reason;
+
+	dsap = nfc_llcp_dsap(skb);
+	ssap = nfc_llcp_ssap(skb);
+
+	pr_debug("%d %d\n", dsap, ssap);
+
+	nfc_llcp_parse_tlv(local, &skb->data[LLCP_HEADER_SIZE],
+				skb->len - LLCP_HEADER_SIZE);
+
+	if (dsap != LLCP_SAP_SDP) {
+		bound_sap = dsap;
+
+		mutex_lock(&local->socket_lock);
+		sock = local->sockets[dsap];
+		if (sock == NULL) {
+			mutex_unlock(&local->socket_lock);
+			reason = LLCP_DM_NOBOUND;
+			goto fail;
+		}
+
+		sock_hold(&sock->sk);
+		mutex_unlock(&local->socket_lock);
+
+		lock_sock(&sock->sk);
+
+		if (sock->dsap == LLCP_SAP_SDP &&
+				sock->sk.sk_state == LLCP_LISTEN)
+			goto enqueue;
+	} else {
+		u8 *sn;
+		size_t sn_len;
+
+		sn = nfc_llcp_connect_sn(skb, &sn_len);
+		if (sn == NULL) {
+			reason = LLCP_DM_NOBOUND;
+			goto fail;
+		}
+
+		pr_debug("Service name length %zu\n", sn_len);
+
+		mutex_lock(&local->socket_lock);
+		for (bound_sap = 0; bound_sap < LLCP_LOCAL_SAP_OFFSET;
+								bound_sap++) {
+			sock = local->sockets[bound_sap];
+			if (sock == NULL)
+				continue;
+
+			if (sock->service_name == NULL ||
+				sock->service_name_len == 0)
+					continue;
+
+			if (sock->service_name_len != sn_len)
+				continue;
+
+			if (sock->dsap == LLCP_SAP_SDP &&
+					sock->sk.sk_state == LLCP_LISTEN &&
+					!memcmp(sn, sock->service_name, sn_len)) {
+				pr_debug("Found service name at SAP %d\n",
+								bound_sap);
+				sock_hold(&sock->sk);
+				mutex_unlock(&local->socket_lock);
+
+				lock_sock(&sock->sk);
+
+				goto enqueue;
+			}
+		}
+		mutex_unlock(&local->socket_lock);
+	}
+
+	reason = LLCP_DM_NOBOUND;
+	goto fail;
+
+enqueue:
+	parent = &sock->sk;
+
+	if (sk_acceptq_is_full(parent)) {
+		reason = LLCP_DM_REJ;
+		release_sock(&sock->sk);
+		sock_put(&sock->sk);
+		goto fail;
+	}
+
+	new_sk = nfc_llcp_sock_alloc(NULL, parent->sk_type,
+				     GFP_ATOMIC);
+	if (new_sk == NULL) {
+		reason = LLCP_DM_REJ;
+		release_sock(&sock->sk);
+		sock_put(&sock->sk);
+		goto fail;
+	}
+
+	new_sock = nfc_llcp_sock(new_sk);
+	new_sock->dev = local->dev;
+	new_sock->local = local;
+	new_sock->nfc_protocol = sock->nfc_protocol;
+	new_sock->ssap = bound_sap;
+	new_sock->dsap = ssap;
+	new_sock->parent = parent;
+
+	pr_debug("new sock %p sk %p\n", new_sock, &new_sock->sk);
+
+	list_add_tail(&new_sock->list, &sock->list);
+
+	nfc_llcp_accept_enqueue(&sock->sk, new_sk);
+
+	nfc_get_device(local->dev->idx);
+
+	new_sk->sk_state = LLCP_CONNECTED;
+
+	/* Wake the listening processes */
+	parent->sk_data_ready(parent, 0);
+
+	/* Send CC */
+	nfc_llcp_send_cc(new_sock);
+
+	release_sock(&sock->sk);
+	sock_put(&sock->sk);
+
+	return;
+
+fail:
+	/* Send DM */
+	nfc_llcp_send_dm(local, dsap, ssap, reason);
+
+	return;
+
+}
+
+static void nfc_llcp_recv_hdlc(struct nfc_llcp_local *local,
+				struct sk_buff *skb)
+{
+	struct nfc_llcp_sock *llcp_sock;
+	struct sock *sk;
+	u8 dsap, ssap, ptype, ns, nr;
+
+	ptype = nfc_llcp_ptype(skb);
+	dsap = nfc_llcp_dsap(skb);
+	ssap = nfc_llcp_ssap(skb);
+	ns = nfc_llcp_ns(skb);
+	nr = nfc_llcp_nr(skb);
+
+	pr_debug("%d %d R %d S %d\n", dsap, ssap, nr, ns);
+
+	llcp_sock = nfc_llcp_sock_get(local, dsap, ssap);
+	if (llcp_sock == NULL) {
+		nfc_llcp_send_dm(local, dsap, ssap, LLCP_DM_NOCONN);
+		return;
+	}
+
+	sk = &llcp_sock->sk;
+	lock_sock(sk);
+	if (sk->sk_state == LLCP_CLOSED) {
+		release_sock(sk);
+		nfc_llcp_sock_put(llcp_sock);
+	}
+
+	if (ns == llcp_sock->recv_n)
+		llcp_sock->recv_n = (llcp_sock->recv_n + 1) % 16;
+	else
+		pr_err("Received out of sequence I PDU\n");
+
+	/* Pass the payload upstream */
+	if (ptype == LLCP_PDU_I) {
+		pr_debug("I frame, queueing on %p\n", &llcp_sock->sk);
+
+		skb_pull(skb, LLCP_HEADER_SIZE + LLCP_SEQUENCE_SIZE);
+		if (sock_queue_rcv_skb(&llcp_sock->sk, skb)) {
+			pr_err("receive queue is full\n");
+			skb_queue_head(&llcp_sock->tx_backlog_queue, skb);
+		}
+	}
+
+	/* Remove skbs from the pending queue */
+	if (llcp_sock->send_ack_n != nr) {
+		struct sk_buff *s, *tmp;
+
+		llcp_sock->send_ack_n = nr;
+
+		skb_queue_walk_safe(&llcp_sock->tx_pending_queue, s, tmp)
+			if (nfc_llcp_ns(s) <= nr) {
+				skb_unlink(s, &llcp_sock->tx_pending_queue);
+				kfree_skb(s);
+			}
+	}
+
+	/* Queue some I frames for transmission */
+	while (llcp_sock->remote_ready &&
+		skb_queue_len(&llcp_sock->tx_pending_queue) <= local->remote_rw) {
+		struct sk_buff *pdu, *pending_pdu;
+
+		pdu = skb_dequeue(&llcp_sock->tx_queue);
+		if (pdu == NULL)
+			break;
+
+		/* Update N(S)/N(R) */
+		nfc_llcp_set_nrns(llcp_sock, pdu);
+
+		pending_pdu = skb_clone(pdu, GFP_KERNEL);
+
+		skb_queue_tail(&local->tx_queue, pdu);
+		skb_queue_tail(&llcp_sock->tx_pending_queue, pending_pdu);
+	}
+
+	release_sock(sk);
+	nfc_llcp_sock_put(llcp_sock);
+}
+
+static void nfc_llcp_recv_disc(struct nfc_llcp_local *local,
+				struct sk_buff *skb)
+{
+	struct nfc_llcp_sock *llcp_sock;
+	struct sock *sk;
+	u8 dsap, ssap;
+
+	dsap = nfc_llcp_dsap(skb);
+	ssap = nfc_llcp_ssap(skb);
+
+	llcp_sock = nfc_llcp_sock_get(local, dsap, ssap);
+	if (llcp_sock == NULL) {
+		nfc_llcp_send_dm(local, dsap, ssap, LLCP_DM_NOCONN);
+		return;
+	}
+
+	sk = &llcp_sock->sk;
+	lock_sock(sk);
+	if (sk->sk_state == LLCP_CLOSED) {
+		release_sock(sk);
+		nfc_llcp_sock_put(llcp_sock);
+	}
+
+
+	if (sk->sk_state == LLCP_CONNECTED) {
+		nfc_put_device(local->dev);
+		sk->sk_state = LLCP_CLOSED;
+		sk->sk_state_change(sk);
+	}
+
+	nfc_llcp_send_dm(local, dsap, ssap, LLCP_DM_DISC);
+
+	release_sock(sk);
+	nfc_llcp_sock_put(llcp_sock);
+}
+
+static void nfc_llcp_recv_cc(struct nfc_llcp_local *local,
+				struct sk_buff *skb)
+{
+	struct nfc_llcp_sock *llcp_sock;
+	u8 dsap, ssap;
+
+
+	dsap = nfc_llcp_dsap(skb);
+	ssap = nfc_llcp_ssap(skb);
+
+	llcp_sock = nfc_llcp_sock_get(local, dsap, ssap);
+
+	if (llcp_sock == NULL)
+		llcp_sock = nfc_llcp_sock_get(local, dsap, LLCP_SAP_SDP);
+
+	if (llcp_sock == NULL) {
+		pr_err("Invalid CC\n");
+		nfc_llcp_send_dm(local, dsap, ssap, LLCP_DM_NOCONN);
+
+		return;
+	}
+
+	llcp_sock->dsap = ssap;
+
+	nfc_llcp_parse_tlv(local, &skb->data[LLCP_HEADER_SIZE],
+				skb->len - LLCP_HEADER_SIZE);
+
+	nfc_llcp_sock_put(llcp_sock);
+}
+
+static void nfc_llcp_rx_work(struct work_struct *work)
+{
+	struct nfc_llcp_local *local = container_of(work, struct nfc_llcp_local,
+								rx_work);
+	u8 dsap, ssap, ptype;
+	struct sk_buff *skb;
+
+	skb = local->rx_pending;
+	if (skb == NULL) {
+		pr_debug("No pending SKB\n");
+		return;
+	}
+
+	ptype = nfc_llcp_ptype(skb);
+	dsap = nfc_llcp_dsap(skb);
+	ssap = nfc_llcp_ssap(skb);
+
+	pr_debug("ptype 0x%x dsap 0x%x ssap 0x%x\n", ptype, dsap, ssap);
+
+	switch (ptype) {
+	case LLCP_PDU_SYMM:
+		pr_debug("SYMM\n");
+		break;
+
+	case LLCP_PDU_CONNECT:
+		pr_debug("CONNECT\n");
+		nfc_llcp_recv_connect(local, skb);
+		break;
+
+	case LLCP_PDU_DISC:
+		pr_debug("DISC\n");
+		nfc_llcp_recv_disc(local, skb);
+		break;
+
+	case LLCP_PDU_CC:
+		pr_debug("CC\n");
+		nfc_llcp_recv_cc(local, skb);
+		break;
+
+	case LLCP_PDU_I:
+	case LLCP_PDU_RR:
+		pr_debug("I frame\n");
+		nfc_llcp_recv_hdlc(local, skb);
+		break;
+
+	}
+
+	queue_work(local->tx_wq, &local->tx_work);
+	kfree_skb(local->rx_pending);
+	local->rx_pending = NULL;
+
+	return;
+}
+
+void nfc_llcp_recv(void *data, struct sk_buff *skb, int err)
+{
+	struct nfc_llcp_local *local = (struct nfc_llcp_local *) data;
+
+	pr_debug("Received an LLCP PDU\n");
+	if (err < 0) {
+		pr_err("err %d", err);
+		return;
+	}
+
+	local->rx_pending = skb_get(skb);
+	del_timer(&local->link_timer);
+	queue_work(local->rx_wq, &local->rx_work);
+
+	return;
+}
+
+void nfc_llcp_mac_is_down(struct nfc_dev *dev)
+{
+	struct nfc_llcp_local *local;
+
+	local = nfc_llcp_find_local(dev);
+	if (local == NULL)
+		return;
+
+	/* Close and purge all existing sockets */
+	nfc_llcp_socket_release(local);
+}
+
+void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx,
+			u8 comm_mode, u8 rf_mode)
+{
+	struct nfc_llcp_local *local;
+
+	pr_debug("rf mode %d\n", rf_mode);
+
+	local = nfc_llcp_find_local(dev);
+	if (local == NULL)
+		return;
+
+	local->target_idx = target_idx;
+	local->comm_mode = comm_mode;
+	local->rf_mode = rf_mode;
+
+	if (rf_mode == NFC_RF_INITIATOR) {
+		pr_debug("Queueing Tx work\n");
+
+		queue_work(local->tx_wq, &local->tx_work);
+	} else {
+		mod_timer(&local->link_timer,
+			jiffies + msecs_to_jiffies(local->remote_lto));
+	}
+}
+
+int nfc_llcp_register_device(struct nfc_dev *ndev)
+{
+	struct device *dev = &ndev->dev;
+	struct nfc_llcp_local *local;
+	char name[32];
+	int err;
+
+	local = kzalloc(sizeof(struct nfc_llcp_local), GFP_KERNEL);
+	if (local == NULL)
+		return -ENOMEM;
+
+	local->dev = ndev;
+	INIT_LIST_HEAD(&local->list);
+	mutex_init(&local->sdp_lock);
+	mutex_init(&local->socket_lock);
+	init_timer(&local->link_timer);
+	local->link_timer.data = (unsigned long) local;
+	local->link_timer.function = nfc_llcp_symm_timer;
+
+	skb_queue_head_init(&local->tx_queue);
+	INIT_WORK(&local->tx_work, nfc_llcp_tx_work);
+	snprintf(name, sizeof(name), "%s_llcp_tx_wq", dev_name(dev));
+	local->tx_wq = alloc_workqueue(name,
+			WQ_NON_REENTRANT | WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
+	if (local->tx_wq == NULL) {
+		err = -ENOMEM;
+		goto err_local;
+	}
+
+	local->rx_pending = NULL;
+	INIT_WORK(&local->rx_work, nfc_llcp_rx_work);
+	snprintf(name, sizeof(name), "%s_llcp_rx_wq", dev_name(dev));
+	local->rx_wq = alloc_workqueue(name,
+			WQ_NON_REENTRANT | WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
+	if (local->rx_wq == NULL) {
+		err = -ENOMEM;
+		goto err_tx_wq;
+	}
+
+	INIT_WORK(&local->timeout_work, nfc_llcp_timeout_work);
+	snprintf(name, sizeof(name), "%s_llcp_timeout_wq", dev_name(dev));
+	local->timeout_wq = alloc_workqueue(name,
+			WQ_NON_REENTRANT | WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
+	if (local->timeout_wq == NULL) {
+		err = -ENOMEM;
+		goto err_rx_wq;
+	}
+
+	nfc_llcp_build_gb(local);
+
+	local->remote_miu = LLCP_DEFAULT_MIU;
+	local->remote_lto = LLCP_DEFAULT_LTO;
+	local->remote_rw = LLCP_DEFAULT_RW;
+
+	list_add(&llcp_devices, &local->list);
+
+	return 0;
+
+err_rx_wq:
+	destroy_workqueue(local->rx_wq);
+
+err_tx_wq:
+	destroy_workqueue(local->tx_wq);
+
+err_local:
+	kfree(local);
+
+	return 0;
+}
+
+void nfc_llcp_unregister_device(struct nfc_dev *dev)
+{
+	struct nfc_llcp_local *local = nfc_llcp_find_local(dev);
+
+	if (local == NULL) {
+		pr_debug("No such device\n");
+		return;
+	}
+
+	list_del(&local->list);
+	nfc_llcp_socket_release(local);
+	del_timer_sync(&local->link_timer);
+	skb_queue_purge(&local->tx_queue);
+	destroy_workqueue(local->tx_wq);
+	destroy_workqueue(local->rx_wq);
+	kfree_skb(local->rx_pending);
+	kfree(local);
+}
+
+int __init nfc_llcp_init(void)
+{
+	INIT_LIST_HEAD(&llcp_devices);
+
+	return nfc_llcp_sock_init();
+}
+
+void nfc_llcp_exit(void)
+{
+	nfc_llcp_sock_exit();
+}
diff --git a/net/nfc/llcp/llcp.h b/net/nfc/llcp/llcp.h
new file mode 100644
index 0000000..0ad2e33
--- /dev/null
+++ b/net/nfc/llcp/llcp.h
@@ -0,0 +1,193 @@
+/*
+ * Copyright (C) 2011  Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+enum llcp_state {
+	LLCP_CONNECTED = 1, /* wait_for_packet() wants that */
+	LLCP_CLOSED,
+	LLCP_BOUND,
+	LLCP_LISTEN,
+};
+
+#define LLCP_DEFAULT_LTO 100
+#define LLCP_DEFAULT_RW  1
+#define LLCP_DEFAULT_MIU 128
+
+#define LLCP_WKS_NUM_SAP   16
+#define LLCP_SDP_NUM_SAP   16
+#define LLCP_LOCAL_NUM_SAP 32
+#define LLCP_LOCAL_SAP_OFFSET (LLCP_WKS_NUM_SAP + LLCP_SDP_NUM_SAP)
+#define LLCP_MAX_SAP (LLCP_WKS_NUM_SAP + LLCP_SDP_NUM_SAP + LLCP_LOCAL_NUM_SAP)
+
+struct nfc_llcp_sock;
+
+struct nfc_llcp_local {
+	struct list_head list;
+	struct nfc_dev *dev;
+
+	struct mutex sdp_lock;
+	struct mutex socket_lock;
+
+	struct timer_list link_timer;
+	struct sk_buff_head tx_queue;
+	struct workqueue_struct	*tx_wq;
+	struct work_struct	 tx_work;
+	struct workqueue_struct	*rx_wq;
+	struct work_struct	 rx_work;
+	struct sk_buff *rx_pending;
+	struct workqueue_struct	*timeout_wq;
+	struct work_struct	 timeout_work;
+
+	u32 target_idx;
+	u8 rf_mode;
+	u8 comm_mode;
+	unsigned long local_wks;      /* Well known services */
+	unsigned long local_sdp;      /* Local services  */
+	unsigned long local_sap; /* Local SAPs, not available for discovery */
+
+	/* local */
+	u8 gb[NFC_MAX_GT_LEN];
+	u8 gb_len;
+
+	/* remote */
+	u8 remote_gb[NFC_MAX_GT_LEN];
+	u8 remote_gb_len;
+
+	u8  remote_version;
+	u16 remote_miu;
+	u16 remote_lto;
+	u8  remote_opt;
+	u16 remote_wks;
+	u8  remote_rw;
+
+	/* sockets array */
+	struct nfc_llcp_sock *sockets[LLCP_MAX_SAP];
+};
+
+struct nfc_llcp_sock {
+	struct sock sk;
+	struct list_head list;
+	struct nfc_dev *dev;
+	struct nfc_llcp_local *local;
+	u32 target_idx;
+	u32 nfc_protocol;
+
+	u8 ssap;
+	u8 dsap;
+	char *service_name;
+	size_t service_name_len;
+
+	/* Link variables */
+	u8 send_n;
+	u8 send_ack_n;
+	u8 recv_n;
+	u8 recv_ack_n;
+
+	/* Is the remote peer ready to receive */
+	u8 remote_ready;
+
+	struct sk_buff_head tx_queue;
+	struct sk_buff_head tx_pending_queue;
+	struct sk_buff_head tx_backlog_queue;
+
+	struct list_head accept_queue;
+	struct sock *parent;
+};
+
+#define nfc_llcp_sock(sk) ((struct nfc_llcp_sock *) (sk))
+#define nfc_llcp_dev(sk)  (nfc_llcp_sock((sk))->dev)
+
+#define LLCP_HEADER_SIZE   2
+#define LLCP_SEQUENCE_SIZE 1
+
+/* LLCP versions: 1.1 is 1.0 plus SDP */
+#define LLCP_VERSION_10 0x10
+#define LLCP_VERSION_11 0x11
+
+/* LLCP PDU types */
+#define LLCP_PDU_SYMM     0x0
+#define LLCP_PDU_PAX      0x1
+#define LLCP_PDU_AGF      0x2
+#define LLCP_PDU_UI       0x3
+#define LLCP_PDU_CONNECT  0x4
+#define LLCP_PDU_DISC     0x5
+#define LLCP_PDU_CC       0x6
+#define LLCP_PDU_DM       0x7
+#define LLCP_PDU_FRMR     0x8
+#define LLCP_PDU_SNL      0x9
+#define LLCP_PDU_I        0xc
+#define LLCP_PDU_RR       0xd
+#define LLCP_PDU_RNR      0xe
+
+/* Parameters TLV types */
+#define LLCP_TLV_VERSION 0x1
+#define LLCP_TLV_MIUX    0x2
+#define LLCP_TLV_WKS     0x3
+#define LLCP_TLV_LTO     0x4
+#define LLCP_TLV_RW      0x5
+#define LLCP_TLV_SN      0x6
+#define LLCP_TLV_OPT     0x7
+#define LLCP_TLV_SDREQ   0x8
+#define LLCP_TLV_SDRES   0x9
+#define LLCP_TLV_MAX     0xa
+
+/* Well known LLCP SAP */
+#define LLCP_SAP_SDP   0x1
+#define LLCP_SAP_IP    0x2
+#define LLCP_SAP_OBEX  0x3
+#define LLCP_SAP_SNEP  0x4
+#define LLCP_SAP_MAX   0xff
+
+/* Disconnection reason code */
+#define LLCP_DM_DISC    0x00
+#define LLCP_DM_NOCONN  0x01
+#define LLCP_DM_NOBOUND 0x02
+#define LLCP_DM_REJ     0x03
+
+
+struct nfc_llcp_local *nfc_llcp_find_local(struct nfc_dev *dev);
+u8 nfc_llcp_get_sdp_ssap(struct nfc_llcp_local *local,
+				struct nfc_llcp_sock *sock);
+u8 nfc_llcp_get_local_ssap(struct nfc_llcp_local *local);
+void nfc_llcp_put_ssap(struct nfc_llcp_local *local, u8 ssap);
+
+/* Sock API */
+struct sock *nfc_llcp_sock_alloc(struct socket *sock, int type, gfp_t gfp);
+void nfc_llcp_sock_free(struct nfc_llcp_sock *sock);
+void nfc_llcp_accept_unlink(struct sock *sk);
+void nfc_llcp_accept_enqueue(struct sock *parent, struct sock *sk);
+struct sock *nfc_llcp_accept_dequeue(struct sock *sk, struct socket *newsock);
+
+/* TLV API */
+int nfc_llcp_parse_tlv(struct nfc_llcp_local *local,
+			u8 *tlv_array, u16 tlv_array_len);
+
+/* Commands API */
+void nfc_llcp_recv(void *data, struct sk_buff *skb, int err);
+u8 *nfc_llcp_build_tlv(u8 type, u8 *value, u8 value_length, u8 *tlv_length);
+void nfc_llcp_recv(void *data, struct sk_buff *skb, int err);
+int nfc_llcp_disconnect(struct nfc_llcp_sock *sock);
+int nfc_llcp_send_symm(struct nfc_dev *dev);
+int nfc_llcp_send_connect(struct nfc_llcp_sock *sock);
+int nfc_llcp_send_cc(struct nfc_llcp_sock *sock);
+int nfc_llcp_send_dm(struct nfc_llcp_local *local, u8 ssap, u8 dsap, u8 reason);
+int nfc_llcp_send_disconnect(struct nfc_llcp_sock *sock);
+
+/* Socket API */
+int __init nfc_llcp_sock_init(void);
+void nfc_llcp_sock_exit(void);
diff --git a/net/nfc/llcp/sock.c b/net/nfc/llcp/sock.c
new file mode 100644
index 0000000..f738ccd
--- /dev/null
+++ b/net/nfc/llcp/sock.c
@@ -0,0 +1,675 @@
+/*
+ * Copyright (C) 2011  Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#define pr_fmt(fmt) "llcp: %s: " fmt, __func__
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/nfc.h>
+
+#include "../nfc.h"
+#include "llcp.h"
+
+static struct proto llcp_sock_proto = {
+	.name     = "NFC_LLCP",
+	.owner    = THIS_MODULE,
+	.obj_size = sizeof(struct nfc_llcp_sock),
+};
+
+static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
+{
+	struct sock *sk = sock->sk;
+	struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
+	struct nfc_llcp_local *local;
+	struct nfc_dev *dev;
+	struct sockaddr_nfc_llcp llcp_addr;
+	int len, ret = 0;
+
+	pr_debug("sk %p addr %p family %d\n", sk, addr, addr->sa_family);
+
+	if (!addr || addr->sa_family != AF_NFC)
+		return -EINVAL;
+
+	memset(&llcp_addr, 0, sizeof(llcp_addr));
+	len = min_t(unsigned int, sizeof(llcp_addr), alen);
+	memcpy(&llcp_addr, addr, len);
+
+	/* This is going to be a listening socket, dsap must be 0 */
+	if (llcp_addr.dsap != 0)
+		return -EINVAL;
+
+	lock_sock(sk);
+
+	if (sk->sk_state != LLCP_CLOSED) {
+		ret = -EBADFD;
+		goto error;
+	}
+
+	dev = nfc_get_device(llcp_addr.dev_idx);
+	if (dev == NULL) {
+		ret = -ENODEV;
+		goto error;
+	}
+
+	local = nfc_llcp_find_local(dev);
+	if (local == NULL) {
+		ret = -ENODEV;
+		goto put_dev;
+	}
+
+	llcp_sock->dev = dev;
+	llcp_sock->local = local;
+	llcp_sock->nfc_protocol = llcp_addr.nfc_protocol;
+	llcp_sock->service_name_len = min_t(unsigned int,
+			llcp_addr.service_name_len, NFC_LLCP_MAX_SERVICE_NAME);
+	llcp_sock->service_name = kmemdup(llcp_addr.service_name,
+				llcp_sock->service_name_len, GFP_KERNEL);
+
+	llcp_sock->ssap = nfc_llcp_get_sdp_ssap(local, llcp_sock);
+	if (llcp_sock->ssap == LLCP_MAX_SAP)
+		goto put_dev;
+
+	local->sockets[llcp_sock->ssap] = llcp_sock;
+
+	pr_debug("Socket bound to SAP %d\n", llcp_sock->ssap);
+
+	sk->sk_state = LLCP_BOUND;
+
+put_dev:
+	nfc_put_device(dev);
+
+error:
+	release_sock(sk);
+	return ret;
+}
+
+static int llcp_sock_listen(struct socket *sock, int backlog)
+{
+	struct sock *sk = sock->sk;
+	int ret = 0;
+
+	pr_debug("sk %p backlog %d\n", sk, backlog);
+
+	lock_sock(sk);
+
+	if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
+			|| sk->sk_state != LLCP_BOUND) {
+		ret = -EBADFD;
+		goto error;
+	}
+
+	sk->sk_max_ack_backlog = backlog;
+	sk->sk_ack_backlog = 0;
+
+	pr_debug("Socket listening\n");
+	sk->sk_state = LLCP_LISTEN;
+
+error:
+	release_sock(sk);
+
+	return ret;
+}
+
+void nfc_llcp_accept_unlink(struct sock *sk)
+{
+	struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
+
+	pr_debug("state %d\n", sk->sk_state);
+
+	list_del_init(&llcp_sock->accept_queue);
+	sk_acceptq_removed(llcp_sock->parent);
+	llcp_sock->parent = NULL;
+
+	sock_put(sk);
+}
+
+void nfc_llcp_accept_enqueue(struct sock *parent, struct sock *sk)
+{
+	struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
+	struct nfc_llcp_sock *llcp_sock_parent = nfc_llcp_sock(parent);
+
+	/* Lock will be free from unlink */
+	sock_hold(sk);
+
+	list_add_tail(&llcp_sock->accept_queue,
+			&llcp_sock_parent->accept_queue);
+	llcp_sock->parent = parent;
+	sk_acceptq_added(parent);
+}
+
+struct sock *nfc_llcp_accept_dequeue(struct sock *parent,
+					struct socket *newsock)
+{
+	struct nfc_llcp_sock *lsk, *n, *llcp_parent;
+	struct sock *sk;
+
+	llcp_parent = nfc_llcp_sock(parent);
+
+	list_for_each_entry_safe(lsk, n, &llcp_parent->accept_queue,
+							accept_queue) {
+		sk = &lsk->sk;
+		lock_sock(sk);
+
+		if (sk->sk_state == LLCP_CLOSED) {
+			release_sock(sk);
+			nfc_llcp_accept_unlink(sk);
+			continue;
+		}
+
+		if (sk->sk_state == LLCP_CONNECTED || !newsock) {
+			nfc_llcp_accept_unlink(sk);
+			if (newsock)
+				sock_graft(sk, newsock);
+
+			release_sock(sk);
+
+			pr_debug("Returning sk state %d\n", sk->sk_state);
+
+			return sk;
+		}
+
+		release_sock(sk);
+	}
+
+	return NULL;
+}
+
+static int llcp_sock_accept(struct socket *sock, struct socket *newsock,
+								int flags)
+{
+	DECLARE_WAITQUEUE(wait, current);
+	struct sock *sk = sock->sk, *new_sk;
+	long timeo;
+	int ret = 0;
+
+	pr_debug("parent %p\n", sk);
+
+	lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
+
+	if (sk->sk_state != LLCP_LISTEN) {
+		ret = -EBADFD;
+		goto error;
+	}
+
+	timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
+
+	/* Wait for an incoming connection. */
+	add_wait_queue_exclusive(sk_sleep(sk), &wait);
+	while (!(new_sk = nfc_llcp_accept_dequeue(sk, newsock))) {
+		set_current_state(TASK_INTERRUPTIBLE);
+
+		if (!timeo) {
+			ret = -EAGAIN;
+			break;
+		}
+
+		if (signal_pending(current)) {
+			ret = sock_intr_errno(timeo);
+			break;
+		}
+
+		release_sock(sk);
+		timeo = schedule_timeout(timeo);
+		lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
+	}
+	__set_current_state(TASK_RUNNING);
+	remove_wait_queue(sk_sleep(sk), &wait);
+
+	if (ret)
+		goto error;
+
+	newsock->state = SS_CONNECTED;
+
+	pr_debug("new socket %p\n", new_sk);
+
+error:
+	release_sock(sk);
+
+	return ret;
+}
+
+static int llcp_sock_getname(struct socket *sock, struct sockaddr *addr,
+			     int *len, int peer)
+{
+	struct sockaddr_nfc_llcp *llcp_addr = (struct sockaddr_nfc_llcp *) addr;
+	struct sock *sk = sock->sk;
+	struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
+
+	pr_debug("%p\n", sk);
+
+	addr->sa_family = AF_NFC;
+	*len = sizeof(struct sockaddr_nfc_llcp);
+
+	llcp_addr->dev_idx = llcp_sock->dev->idx;
+	llcp_addr->dsap = llcp_sock->dsap;
+	llcp_addr->ssap = llcp_sock->ssap;
+	llcp_addr->service_name_len = llcp_sock->service_name_len;
+	memcpy(llcp_addr->service_name, llcp_sock->service_name,
+					llcp_addr->service_name_len);
+
+	return 0;
+}
+
+static inline unsigned int llcp_accept_poll(struct sock *parent)
+{
+	struct nfc_llcp_sock *llcp_sock, *n, *parent_sock;
+	struct sock *sk;
+
+	parent_sock = nfc_llcp_sock(parent);
+
+	list_for_each_entry_safe(llcp_sock, n, &parent_sock->accept_queue,
+								accept_queue) {
+		sk = &llcp_sock->sk;
+
+		if (sk->sk_state == LLCP_CONNECTED)
+			return POLLIN | POLLRDNORM;
+	}
+
+	return 0;
+}
+
+static unsigned int llcp_sock_poll(struct file *file, struct socket *sock,
+							poll_table *wait)
+{
+	struct sock *sk = sock->sk;
+	unsigned int mask = 0;
+
+	pr_debug("%p\n", sk);
+
+	sock_poll_wait(file, sk_sleep(sk), wait);
+
+	if (sk->sk_state == LLCP_LISTEN)
+		return llcp_accept_poll(sk);
+
+	if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
+		mask |= POLLERR;
+
+	if (!skb_queue_empty(&sk->sk_receive_queue))
+		mask |= POLLIN;
+
+	if (sk->sk_state == LLCP_CLOSED)
+		mask |= POLLHUP;
+
+	return mask;
+}
+
+static int llcp_sock_release(struct socket *sock)
+{
+	struct sock *sk = sock->sk;
+	struct nfc_llcp_local *local;
+	struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
+
+	if (!sk)
+		return 0;
+
+	pr_debug("%p\n", sk);
+
+	local = llcp_sock->local;
+	if (local == NULL)
+		return -ENODEV;
+
+	mutex_lock(&local->socket_lock);
+
+	if (llcp_sock == local->sockets[llcp_sock->ssap]) {
+		local->sockets[llcp_sock->ssap] = NULL;
+	} else {
+		struct nfc_llcp_sock *parent, *s, *n;
+
+		parent = local->sockets[llcp_sock->ssap];
+
+		list_for_each_entry_safe(s, n, &parent->list, list)
+			if (llcp_sock == s) {
+				list_del(&s->list);
+				break;
+			}
+
+	}
+
+	mutex_unlock(&local->socket_lock);
+
+	lock_sock(sk);
+
+	/* Send a DISC */
+	if (sk->sk_state == LLCP_CONNECTED)
+		nfc_llcp_disconnect(llcp_sock);
+
+	if (sk->sk_state == LLCP_LISTEN) {
+		struct nfc_llcp_sock *lsk, *n;
+		struct sock *accept_sk;
+
+		list_for_each_entry_safe(lsk, n, &llcp_sock->accept_queue,
+								accept_queue) {
+			accept_sk = &lsk->sk;
+			lock_sock(accept_sk);
+
+			nfc_llcp_disconnect(lsk);
+			nfc_llcp_accept_unlink(accept_sk);
+
+			release_sock(accept_sk);
+
+			sock_set_flag(sk, SOCK_DEAD);
+			sock_orphan(accept_sk);
+			sock_put(accept_sk);
+		}
+	}
+
+	/* Freeing the SAP */
+	if ((sk->sk_state == LLCP_CONNECTED
+			&& llcp_sock->ssap > LLCP_LOCAL_SAP_OFFSET) ||
+	    sk->sk_state == LLCP_BOUND ||
+	    sk->sk_state == LLCP_LISTEN)
+		nfc_llcp_put_ssap(llcp_sock->local, llcp_sock->ssap);
+
+	sock_set_flag(sk, SOCK_DEAD);
+
+	release_sock(sk);
+
+	sock_orphan(sk);
+	sock_put(sk);
+
+	return 0;
+}
+
+static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
+							int len, int flags)
+{
+	struct sock *sk = sock->sk;
+	struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
+	struct sockaddr_nfc_llcp *addr = (struct sockaddr_nfc_llcp *)_addr;
+	struct nfc_dev *dev;
+	struct nfc_llcp_local *local;
+	int ret = 0;
+
+	pr_debug("sock %p sk %p flags 0x%x\n", sock, sk, flags);
+
+	if (!addr || len < sizeof(struct sockaddr_nfc) ||
+			addr->sa_family != AF_NFC) {
+		pr_err("Invalid socket\n");
+		return -EINVAL;
+	}
+
+	if (addr->service_name_len == 0 && addr->dsap == 0) {
+		pr_err("Missing service name or dsap\n");
+		return -EINVAL;
+	}
+
+	pr_debug("addr dev_idx=%u target_idx=%u protocol=%u\n", addr->dev_idx,
+					addr->target_idx, addr->nfc_protocol);
+
+	lock_sock(sk);
+
+	if (sk->sk_state == LLCP_CONNECTED) {
+		ret = -EISCONN;
+		goto error;
+	}
+
+	dev = nfc_get_device(addr->dev_idx);
+	if (dev == NULL) {
+		ret = -ENODEV;
+		goto error;
+	}
+
+	local = nfc_llcp_find_local(dev);
+	if (local == NULL) {
+		ret = -ENODEV;
+		goto put_dev;
+	}
+
+	device_lock(&dev->dev);
+	if (dev->dep_link_up == false) {
+		ret = -ENOLINK;
+		device_unlock(&dev->dev);
+		goto put_dev;
+	}
+	device_unlock(&dev->dev);
+
+	if (local->rf_mode == NFC_RF_INITIATOR &&
+			addr->target_idx != local->target_idx) {
+		ret = -ENOLINK;
+		goto put_dev;
+	}
+
+	llcp_sock->dev = dev;
+	llcp_sock->local = local;
+	llcp_sock->ssap = nfc_llcp_get_local_ssap(local);
+	if (llcp_sock->ssap == LLCP_SAP_MAX) {
+		ret = -ENOMEM;
+		goto put_dev;
+	}
+	if (addr->service_name_len == 0)
+		llcp_sock->dsap = addr->dsap;
+	else
+		llcp_sock->dsap = LLCP_SAP_SDP;
+	llcp_sock->nfc_protocol = addr->nfc_protocol;
+	llcp_sock->service_name_len = min_t(unsigned int,
+			addr->service_name_len, NFC_LLCP_MAX_SERVICE_NAME);
+	llcp_sock->service_name = kmemdup(addr->service_name,
+				 llcp_sock->service_name_len, GFP_KERNEL);
+
+	local->sockets[llcp_sock->ssap] = llcp_sock;
+
+	ret = nfc_llcp_send_connect(llcp_sock);
+	if (ret)
+		goto put_dev;
+
+	sk->sk_state = LLCP_CONNECTED;
+
+	release_sock(sk);
+	return 0;
+
+put_dev:
+	nfc_put_device(dev);
+
+error:
+	release_sock(sk);
+	return ret;
+}
+
+static int llcp_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
+			     struct msghdr *msg, size_t len, int flags)
+{
+	int noblock = flags & MSG_DONTWAIT;
+	struct sock *sk = sock->sk;
+	unsigned int copied, rlen;
+	struct sk_buff *skb, *cskb;
+	int err = 0;
+
+	pr_debug("%p %zu\n", sk, len);
+
+	lock_sock(sk);
+
+	if (sk->sk_state == LLCP_CLOSED &&
+			skb_queue_empty(&sk->sk_receive_queue)) {
+		release_sock(sk);
+		return 0;
+	}
+
+	release_sock(sk);
+
+	if (flags & (MSG_OOB))
+		return -EOPNOTSUPP;
+
+	skb = skb_recv_datagram(sk, flags, noblock, &err);
+	if (!skb) {
+		pr_err("Recv datagram failed state %d %d %d",
+				sk->sk_state, err, sock_error(sk));
+
+		if (sk->sk_shutdown & RCV_SHUTDOWN)
+			return 0;
+
+		return err;
+	}
+
+	rlen   = skb->len;		/* real length of skb */
+	copied = min_t(unsigned int, rlen, len);
+
+	cskb = skb;
+	if (memcpy_toiovec(msg->msg_iov, cskb->data, copied)) {
+		if (!(flags & MSG_PEEK))
+			skb_queue_head(&sk->sk_receive_queue, skb);
+		return -EFAULT;
+	}
+
+	/* Mark read part of skb as used */
+	if (!(flags & MSG_PEEK)) {
+
+		/* SOCK_STREAM: re-queue skb if it contains unreceived data */
+		if (sk->sk_type == SOCK_STREAM) {
+			skb_pull(skb, copied);
+			if (skb->len) {
+				skb_queue_head(&sk->sk_receive_queue, skb);
+				goto done;
+			}
+		}
+
+		kfree_skb(skb);
+	}
+
+	/* XXX Queue backlogged skbs */
+
+done:
+	/* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */
+	if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC))
+		copied = rlen;
+
+	return copied;
+}
+
+static const struct proto_ops llcp_sock_ops = {
+	.family         = PF_NFC,
+	.owner          = THIS_MODULE,
+	.bind           = llcp_sock_bind,
+	.connect        = llcp_sock_connect,
+	.release        = llcp_sock_release,
+	.socketpair     = sock_no_socketpair,
+	.accept         = llcp_sock_accept,
+	.getname        = llcp_sock_getname,
+	.poll           = llcp_sock_poll,
+	.ioctl          = sock_no_ioctl,
+	.listen         = llcp_sock_listen,
+	.shutdown       = sock_no_shutdown,
+	.setsockopt     = sock_no_setsockopt,
+	.getsockopt     = sock_no_getsockopt,
+	.sendmsg        = sock_no_sendmsg,
+	.recvmsg        = llcp_sock_recvmsg,
+	.mmap           = sock_no_mmap,
+};
+
+static void llcp_sock_destruct(struct sock *sk)
+{
+	struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
+
+	pr_debug("%p\n", sk);
+
+	if (sk->sk_state == LLCP_CONNECTED)
+		nfc_put_device(llcp_sock->dev);
+
+	skb_queue_purge(&sk->sk_receive_queue);
+
+	nfc_llcp_sock_free(llcp_sock);
+
+	if (!sock_flag(sk, SOCK_DEAD)) {
+		pr_err("Freeing alive NFC LLCP socket %p\n", sk);
+		return;
+	}
+}
+
+struct sock *nfc_llcp_sock_alloc(struct socket *sock, int type, gfp_t gfp)
+{
+	struct sock *sk;
+	struct nfc_llcp_sock *llcp_sock;
+
+	sk = sk_alloc(&init_net, PF_NFC, gfp, &llcp_sock_proto);
+	if (!sk)
+		return NULL;
+
+	llcp_sock = nfc_llcp_sock(sk);
+
+	sock_init_data(sock, sk);
+	sk->sk_state = LLCP_CLOSED;
+	sk->sk_protocol = NFC_SOCKPROTO_LLCP;
+	sk->sk_type = type;
+	sk->sk_destruct = llcp_sock_destruct;
+
+	llcp_sock->ssap = 0;
+	llcp_sock->dsap = LLCP_SAP_SDP;
+	llcp_sock->send_n = llcp_sock->send_ack_n = 0;
+	llcp_sock->recv_n = llcp_sock->recv_ack_n = 0;
+	llcp_sock->remote_ready = 1;
+	skb_queue_head_init(&llcp_sock->tx_queue);
+	skb_queue_head_init(&llcp_sock->tx_pending_queue);
+	skb_queue_head_init(&llcp_sock->tx_backlog_queue);
+	INIT_LIST_HEAD(&llcp_sock->list);
+	INIT_LIST_HEAD(&llcp_sock->accept_queue);
+
+	if (sock != NULL)
+		sock->state = SS_UNCONNECTED;
+
+	return sk;
+}
+
+void nfc_llcp_sock_free(struct nfc_llcp_sock *sock)
+{
+	kfree(sock->service_name);
+
+	skb_queue_purge(&sock->tx_queue);
+	skb_queue_purge(&sock->tx_pending_queue);
+	skb_queue_purge(&sock->tx_backlog_queue);
+
+	list_del_init(&sock->accept_queue);
+
+	sock->parent = NULL;
+}
+
+static int llcp_sock_create(struct net *net, struct socket *sock,
+				const struct nfc_protocol *nfc_proto)
+{
+	struct sock *sk;
+
+	pr_debug("%p\n", sock);
+
+	if (sock->type != SOCK_STREAM && sock->type != SOCK_DGRAM)
+		return -ESOCKTNOSUPPORT;
+
+	sock->ops = &llcp_sock_ops;
+
+	sk = nfc_llcp_sock_alloc(sock, sock->type, GFP_ATOMIC);
+	if (sk == NULL)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static const struct nfc_protocol llcp_nfc_proto = {
+	.id	  = NFC_SOCKPROTO_LLCP,
+	.proto    = &llcp_sock_proto,
+	.owner    = THIS_MODULE,
+	.create   = llcp_sock_create
+};
+
+int __init nfc_llcp_sock_init(void)
+{
+	return nfc_proto_register(&llcp_nfc_proto);
+}
+
+void nfc_llcp_sock_exit(void)
+{
+	nfc_proto_unregister(&llcp_nfc_proto);
+}
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
index 3925c657..7650139 100644
--- a/net/nfc/nci/core.c
+++ b/net/nfc/nci/core.c
@@ -25,6 +25,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__
+
 #include <linux/types.h>
 #include <linux/workqueue.h>
 #include <linux/completion.h>
@@ -69,7 +71,7 @@
 	__u32 timeout)
 {
 	int rc = 0;
-	unsigned long completion_rc;
+	long completion_rc;
 
 	ndev->req_status = NCI_REQ_PEND;
 
@@ -79,7 +81,7 @@
 							&ndev->req_completion,
 							timeout);
 
-	nfc_dbg("wait_for_completion return %ld", completion_rc);
+	pr_debug("wait_for_completion return %ld\n", completion_rc);
 
 	if (completion_rc > 0) {
 		switch (ndev->req_status) {
@@ -96,8 +98,8 @@
 			break;
 		}
 	} else {
-		nfc_err("wait_for_completion_interruptible_timeout failed %ld",
-			completion_rc);
+		pr_err("wait_for_completion_interruptible_timeout failed %ld\n",
+		       completion_rc);
 
 		rc = ((completion_rc == 0) ? (-ETIMEDOUT) : (completion_rc));
 	}
@@ -126,7 +128,10 @@
 
 static void nci_reset_req(struct nci_dev *ndev, unsigned long opt)
 {
-	nci_send_cmd(ndev, NCI_OP_CORE_RESET_CMD, 0, NULL);
+	struct nci_core_reset_cmd cmd;
+
+	cmd.reset_type = NCI_RESET_TYPE_RESET_CONFIG;
+	nci_send_cmd(ndev, NCI_OP_CORE_RESET_CMD, 1, &cmd);
 }
 
 static void nci_init_req(struct nci_dev *ndev, unsigned long opt)
@@ -136,17 +141,11 @@
 
 static void nci_init_complete_req(struct nci_dev *ndev, unsigned long opt)
 {
-	struct nci_core_conn_create_cmd conn_cmd;
 	struct nci_rf_disc_map_cmd cmd;
 	struct disc_map_config *cfg = cmd.mapping_configs;
 	__u8 *num = &cmd.num_mapping_configs;
 	int i;
 
-	/* create static rf connection */
-	conn_cmd.target_handle = 0;
-	conn_cmd.num_target_specific_params = 0;
-	nci_send_cmd(ndev, NCI_OP_CORE_CONN_CREATE_CMD, 2, &conn_cmd);
-
 	/* set rf mapping configurations */
 	*num = 0;
 
@@ -155,14 +154,16 @@
 		if (ndev->supported_rf_interfaces[i] ==
 			NCI_RF_INTERFACE_ISO_DEP) {
 			cfg[*num].rf_protocol = NCI_RF_PROTOCOL_ISO_DEP;
-			cfg[*num].mode = NCI_DISC_MAP_MODE_BOTH;
-			cfg[*num].rf_interface_type = NCI_RF_INTERFACE_ISO_DEP;
+			cfg[*num].mode = NCI_DISC_MAP_MODE_POLL |
+				NCI_DISC_MAP_MODE_LISTEN;
+			cfg[*num].rf_interface = NCI_RF_INTERFACE_ISO_DEP;
 			(*num)++;
 		} else if (ndev->supported_rf_interfaces[i] ==
 			NCI_RF_INTERFACE_NFC_DEP) {
 			cfg[*num].rf_protocol = NCI_RF_PROTOCOL_NFC_DEP;
-			cfg[*num].mode = NCI_DISC_MAP_MODE_BOTH;
-			cfg[*num].rf_interface_type = NCI_RF_INTERFACE_NFC_DEP;
+			cfg[*num].mode = NCI_DISC_MAP_MODE_POLL |
+				NCI_DISC_MAP_MODE_LISTEN;
+			cfg[*num].rf_interface = NCI_RF_INTERFACE_NFC_DEP;
 			(*num)++;
 		}
 
@@ -187,16 +188,16 @@
 		|| protocols & NFC_PROTO_MIFARE_MASK
 		|| protocols & NFC_PROTO_ISO14443_MASK
 		|| protocols & NFC_PROTO_NFC_DEP_MASK)) {
-		cmd.disc_configs[cmd.num_disc_configs].type =
-		NCI_DISCOVERY_TYPE_POLL_A_PASSIVE;
+		cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode =
+		NCI_NFC_A_PASSIVE_POLL_MODE;
 		cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
 		cmd.num_disc_configs++;
 	}
 
 	if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
 		(protocols & NFC_PROTO_ISO14443_MASK)) {
-		cmd.disc_configs[cmd.num_disc_configs].type =
-		NCI_DISCOVERY_TYPE_POLL_B_PASSIVE;
+		cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode =
+		NCI_NFC_B_PASSIVE_POLL_MODE;
 		cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
 		cmd.num_disc_configs++;
 	}
@@ -204,8 +205,8 @@
 	if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
 		(protocols & NFC_PROTO_FELICA_MASK
 		|| protocols & NFC_PROTO_NFC_DEP_MASK)) {
-		cmd.disc_configs[cmd.num_disc_configs].type =
-		NCI_DISCOVERY_TYPE_POLL_F_PASSIVE;
+		cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode =
+		NCI_NFC_F_PASSIVE_POLL_MODE;
 		cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
 		cmd.num_disc_configs++;
 	}
@@ -326,8 +327,6 @@
 {
 	struct nci_dev *ndev = (void *) arg;
 
-	nfc_dbg("entry");
-
 	atomic_set(&ndev->cmd_cnt, 1);
 	queue_work(ndev->cmd_wq, &ndev->cmd_work);
 }
@@ -336,8 +335,6 @@
 {
 	struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
 
-	nfc_dbg("entry");
-
 	return nci_open_device(ndev);
 }
 
@@ -345,8 +342,6 @@
 {
 	struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
 
-	nfc_dbg("entry");
-
 	return nci_close_device(ndev);
 }
 
@@ -355,20 +350,18 @@
 	struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
 	int rc;
 
-	nfc_dbg("entry");
-
 	if (test_bit(NCI_DISCOVERY, &ndev->flags)) {
-		nfc_err("unable to start poll, since poll is already active");
+		pr_err("unable to start poll, since poll is already active\n");
 		return -EBUSY;
 	}
 
 	if (ndev->target_active_prot) {
-		nfc_err("there is an active target");
+		pr_err("there is an active target\n");
 		return -EBUSY;
 	}
 
 	if (test_bit(NCI_POLL_ACTIVE, &ndev->flags)) {
-		nfc_dbg("target is active, implicitly deactivate...");
+		pr_debug("target is active, implicitly deactivate...\n");
 
 		rc = nci_request(ndev, nci_rf_deactivate_req, 0,
 			msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
@@ -389,10 +382,8 @@
 {
 	struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
 
-	nfc_dbg("entry");
-
 	if (!test_bit(NCI_DISCOVERY, &ndev->flags)) {
-		nfc_err("unable to stop poll, since poll is not active");
+		pr_err("unable to stop poll, since poll is not active\n");
 		return;
 	}
 
@@ -405,21 +396,21 @@
 {
 	struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
 
-	nfc_dbg("entry, target_idx %d, protocol 0x%x", target_idx, protocol);
+	pr_debug("target_idx %d, protocol 0x%x\n", target_idx, protocol);
 
 	if (!test_bit(NCI_POLL_ACTIVE, &ndev->flags)) {
-		nfc_err("there is no available target to activate");
+		pr_err("there is no available target to activate\n");
 		return -EINVAL;
 	}
 
 	if (ndev->target_active_prot) {
-		nfc_err("there is already an active target");
+		pr_err("there is already an active target\n");
 		return -EBUSY;
 	}
 
 	if (!(ndev->target_available_prots & (1 << protocol))) {
-		nfc_err("target does not support the requested protocol 0x%x",
-			protocol);
+		pr_err("target does not support the requested protocol 0x%x\n",
+		       protocol);
 		return -EINVAL;
 	}
 
@@ -433,10 +424,10 @@
 {
 	struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
 
-	nfc_dbg("entry, target_idx %d", target_idx);
+	pr_debug("target_idx %d\n", target_idx);
 
 	if (!ndev->target_active_prot) {
-		nfc_err("unable to deactivate target, no active target");
+		pr_err("unable to deactivate target, no active target\n");
 		return;
 	}
 
@@ -456,10 +447,10 @@
 	struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
 	int rc;
 
-	nfc_dbg("entry, target_idx %d, len %d", target_idx, skb->len);
+	pr_debug("target_idx %d, len %d\n", target_idx, skb->len);
 
 	if (!ndev->target_active_prot) {
-		nfc_err("unable to exchange data, no active target");
+		pr_err("unable to exchange data, no active target\n");
 		return -EINVAL;
 	}
 
@@ -470,7 +461,7 @@
 	ndev->data_exchange_cb = cb;
 	ndev->data_exchange_cb_context = cb_context;
 
-	rc = nci_send_data(ndev, ndev->conn_id, skb);
+	rc = nci_send_data(ndev, NCI_STATIC_RF_CONN_ID, skb);
 	if (rc)
 		clear_bit(NCI_DATA_EXCHANGE, &ndev->flags);
 
@@ -502,7 +493,7 @@
 {
 	struct nci_dev *ndev;
 
-	nfc_dbg("entry, supported_protocols 0x%x", supported_protocols);
+	pr_debug("supported_protocols 0x%x\n", supported_protocols);
 
 	if (!ops->open || !ops->close || !ops->send)
 		return NULL;
@@ -542,8 +533,6 @@
  */
 void nci_free_device(struct nci_dev *ndev)
 {
-	nfc_dbg("entry");
-
 	nfc_free_device(ndev->nfc_dev);
 	kfree(ndev);
 }
@@ -560,8 +549,6 @@
 	struct device *dev = &ndev->nfc_dev->dev;
 	char name[32];
 
-	nfc_dbg("entry");
-
 	rc = nfc_register_device(ndev->nfc_dev);
 	if (rc)
 		goto exit;
@@ -624,8 +611,6 @@
  */
 void nci_unregister_device(struct nci_dev *ndev)
 {
-	nfc_dbg("entry");
-
 	nci_close_device(ndev);
 
 	destroy_workqueue(ndev->cmd_wq);
@@ -645,7 +630,7 @@
 {
 	struct nci_dev *ndev = (struct nci_dev *) skb->dev;
 
-	nfc_dbg("entry, len %d", skb->len);
+	pr_debug("len %d\n", skb->len);
 
 	if (!ndev || (!test_bit(NCI_UP, &ndev->flags)
 		&& !test_bit(NCI_INIT, &ndev->flags))) {
@@ -665,7 +650,7 @@
 {
 	struct nci_dev *ndev = (struct nci_dev *) skb->dev;
 
-	nfc_dbg("entry, len %d", skb->len);
+	pr_debug("len %d\n", skb->len);
 
 	if (!ndev) {
 		kfree_skb(skb);
@@ -684,11 +669,11 @@
 	struct nci_ctrl_hdr *hdr;
 	struct sk_buff *skb;
 
-	nfc_dbg("entry, opcode 0x%x, plen %d", opcode, plen);
+	pr_debug("opcode 0x%x, plen %d\n", opcode, plen);
 
 	skb = nci_skb_alloc(ndev, (NCI_CTRL_HDR_SIZE + plen), GFP_KERNEL);
 	if (!skb) {
-		nfc_err("no memory for command");
+		pr_err("no memory for command\n");
 		return -ENOMEM;
 	}
 
@@ -718,7 +703,7 @@
 	struct nci_dev *ndev = container_of(work, struct nci_dev, tx_work);
 	struct sk_buff *skb;
 
-	nfc_dbg("entry, credits_cnt %d", atomic_read(&ndev->credits_cnt));
+	pr_debug("credits_cnt %d\n", atomic_read(&ndev->credits_cnt));
 
 	/* Send queued tx data */
 	while (atomic_read(&ndev->credits_cnt)) {
@@ -726,12 +711,15 @@
 		if (!skb)
 			return;
 
-		atomic_dec(&ndev->credits_cnt);
+		/* Check if data flow control is used */
+		if (atomic_read(&ndev->credits_cnt) !=
+				NCI_DATA_FLOW_CONTROL_NOT_USED)
+			atomic_dec(&ndev->credits_cnt);
 
-		nfc_dbg("NCI TX: MT=data, PBF=%d, conn_id=%d, plen=%d",
-				nci_pbf(skb->data),
-				nci_conn_id(skb->data),
-				nci_plen(skb->data));
+		pr_debug("NCI TX: MT=data, PBF=%d, conn_id=%d, plen=%d\n",
+			 nci_pbf(skb->data),
+			 nci_conn_id(skb->data),
+			 nci_plen(skb->data));
 
 		nci_send_frame(skb);
 	}
@@ -760,7 +748,7 @@
 			break;
 
 		default:
-			nfc_err("unknown MT 0x%x", nci_mt(skb->data));
+			pr_err("unknown MT 0x%x\n", nci_mt(skb->data));
 			kfree_skb(skb);
 			break;
 		}
@@ -774,7 +762,7 @@
 	struct nci_dev *ndev = container_of(work, struct nci_dev, cmd_work);
 	struct sk_buff *skb;
 
-	nfc_dbg("entry, cmd_cnt %d", atomic_read(&ndev->cmd_cnt));
+	pr_debug("cmd_cnt %d\n", atomic_read(&ndev->cmd_cnt));
 
 	/* Send queued command */
 	if (atomic_read(&ndev->cmd_cnt)) {
@@ -784,11 +772,11 @@
 
 		atomic_dec(&ndev->cmd_cnt);
 
-		nfc_dbg("NCI TX: MT=cmd, PBF=%d, GID=0x%x, OID=0x%x, plen=%d",
-				nci_pbf(skb->data),
-				nci_opcode_gid(nci_opcode(skb->data)),
-				nci_opcode_oid(nci_opcode(skb->data)),
-				nci_plen(skb->data));
+		pr_debug("NCI TX: MT=cmd, PBF=%d, GID=0x%x, OID=0x%x, plen=%d\n",
+			 nci_pbf(skb->data),
+			 nci_opcode_gid(nci_opcode(skb->data)),
+			 nci_opcode_oid(nci_opcode(skb->data)),
+			 nci_plen(skb->data));
 
 		nci_send_frame(skb);
 
diff --git a/net/nfc/nci/data.c b/net/nfc/nci/data.c
index e5ed90f..e5756b3 100644
--- a/net/nfc/nci/data.c
+++ b/net/nfc/nci/data.c
@@ -21,6 +21,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__
+
 #include <linux/types.h>
 #include <linux/interrupt.h>
 #include <linux/wait.h>
@@ -40,7 +42,7 @@
 	data_exchange_cb_t cb = ndev->data_exchange_cb;
 	void *cb_context = ndev->data_exchange_cb_context;
 
-	nfc_dbg("entry, len %d, err %d", ((skb) ? (skb->len) : (0)), err);
+	pr_debug("len %d, err %d\n", skb ? skb->len : 0, err);
 
 	if (cb) {
 		ndev->data_exchange_cb = NULL;
@@ -49,7 +51,7 @@
 		/* forward skb to nfc core */
 		cb(cb_context, skb, err);
 	} else if (skb) {
-		nfc_err("no rx callback, dropping rx data...");
+		pr_err("no rx callback, dropping rx data...\n");
 
 		/* no waiting callback, free skb */
 		kfree_skb(skb);
@@ -90,12 +92,13 @@
 	int frag_len;
 	int rc = 0;
 
-	nfc_dbg("entry, conn_id 0x%x, total_len %d", conn_id, total_len);
+	pr_debug("conn_id 0x%x, total_len %d\n", conn_id, total_len);
 
 	__skb_queue_head_init(&frags_q);
 
 	while (total_len) {
-		frag_len = min_t(int, total_len, ndev->max_pkt_payload_size);
+		frag_len =
+			min_t(int, total_len, ndev->max_data_pkt_payload_size);
 
 		skb_frag = nci_skb_alloc(ndev,
 					(NCI_DATA_HDR_SIZE + frag_len),
@@ -118,8 +121,8 @@
 		data += frag_len;
 		total_len -= frag_len;
 
-		nfc_dbg("frag_len %d, remaining total_len %d",
-			frag_len, total_len);
+		pr_debug("frag_len %d, remaining total_len %d\n",
+			 frag_len, total_len);
 	}
 
 	/* queue all fragments atomically */
@@ -148,10 +151,10 @@
 {
 	int rc = 0;
 
-	nfc_dbg("entry, conn_id 0x%x, plen %d", conn_id, skb->len);
+	pr_debug("conn_id 0x%x, plen %d\n", conn_id, skb->len);
 
 	/* check if the packet need to be fragmented */
-	if (skb->len <= ndev->max_pkt_payload_size) {
+	if (skb->len <= ndev->max_data_pkt_payload_size) {
 		/* no need to fragment packet */
 		nci_push_data_hdr(ndev, conn_id, skb, NCI_PBF_LAST);
 
@@ -160,7 +163,7 @@
 		/* fragment packet and queue the fragments */
 		rc = nci_queue_tx_data_frags(ndev, conn_id, skb);
 		if (rc) {
-			nfc_err("failed to fragment tx data packet");
+			pr_err("failed to fragment tx data packet\n");
 			goto free_exit;
 		}
 	}
@@ -190,7 +193,7 @@
 
 		/* first, make enough room for the already accumulated data */
 		if (skb_cow_head(skb, reassembly_len)) {
-			nfc_err("error adding room for accumulated rx data");
+			pr_err("error adding room for accumulated rx data\n");
 
 			kfree_skb(skb);
 			skb = 0;
@@ -227,19 +230,19 @@
 {
 	__u8 pbf = nci_pbf(skb->data);
 
-	nfc_dbg("entry, len %d", skb->len);
+	pr_debug("len %d\n", skb->len);
 
-	nfc_dbg("NCI RX: MT=data, PBF=%d, conn_id=%d, plen=%d",
-			nci_pbf(skb->data),
-			nci_conn_id(skb->data),
-			nci_plen(skb->data));
+	pr_debug("NCI RX: MT=data, PBF=%d, conn_id=%d, plen=%d\n",
+		 nci_pbf(skb->data),
+		 nci_conn_id(skb->data),
+		 nci_plen(skb->data));
 
 	/* strip the nci data header */
 	skb_pull(skb, NCI_DATA_HDR_SIZE);
 
 	if (ndev->target_active_prot == NFC_PROTO_MIFARE) {
 		/* frame I/F => remove the status byte */
-		nfc_dbg("NFC_PROTO_MIFARE => remove the status byte");
+		pr_debug("NFC_PROTO_MIFARE => remove the status byte\n");
 		skb_trim(skb, (skb->len - 1));
 	}
 
diff --git a/net/nfc/nci/lib.c b/net/nfc/nci/lib.c
index b19dc2f..6a63e5e 100644
--- a/net/nfc/nci/lib.c
+++ b/net/nfc/nci/lib.c
@@ -42,12 +42,9 @@
 	case NCI_STATUS_REJECTED:
 		return -EBUSY;
 
-	case NCI_STATUS_MESSAGE_CORRUPTED:
+	case NCI_STATUS_RF_FRAME_CORRUPTED:
 		return -EBADMSG;
 
-	case NCI_STATUS_BUFFER_FULL:
-		return -ENOBUFS;
-
 	case NCI_STATUS_NOT_INITIALIZED:
 		return -EHOSTDOWN;
 
@@ -80,12 +77,6 @@
 	case NCI_STATUS_NFCEE_TIMEOUT_ERROR:
 		return -ETIMEDOUT;
 
-	case NCI_STATUS_RF_LINK_LOSS_ERROR:
-		return -ENOLINK;
-
-	case NCI_STATUS_MAX_ACTIVE_NFCEE_INTERFACES_REACHED:
-		return -EDQUOT;
-
 	case NCI_STATUS_FAILED:
 	default:
 		return -ENOSYS;
diff --git a/net/nfc/nci/ntf.c b/net/nfc/nci/ntf.c
index 96633f5..b16a8dc 100644
--- a/net/nfc/nci/ntf.c
+++ b/net/nfc/nci/ntf.c
@@ -25,6 +25,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__
+
 #include <linux/types.h>
 #include <linux/interrupt.h>
 #include <linux/bitops.h>
@@ -43,18 +45,21 @@
 	struct nci_core_conn_credit_ntf *ntf = (void *) skb->data;
 	int i;
 
-	nfc_dbg("entry, num_entries %d", ntf->num_entries);
+	pr_debug("num_entries %d\n", ntf->num_entries);
 
 	if (ntf->num_entries > NCI_MAX_NUM_CONN)
 		ntf->num_entries = NCI_MAX_NUM_CONN;
 
 	/* update the credits */
 	for (i = 0; i < ntf->num_entries; i++) {
-		nfc_dbg("entry[%d]: conn_id %d, credits %d", i,
-			ntf->conn_entries[i].conn_id,
-			ntf->conn_entries[i].credits);
+		ntf->conn_entries[i].conn_id =
+			nci_conn_id(&ntf->conn_entries[i].conn_id);
 
-		if (ntf->conn_entries[i].conn_id == ndev->conn_id) {
+		pr_debug("entry[%d]: conn_id %d, credits %d\n",
+			 i, ntf->conn_entries[i].conn_id,
+			 ntf->conn_entries[i].credits);
+
+		if (ntf->conn_entries[i].conn_id == NCI_STATIC_RF_CONN_ID) {
 			/* found static rf connection */
 			atomic_add(ntf->conn_entries[i].credits,
 				&ndev->credits_cnt);
@@ -66,31 +71,34 @@
 		queue_work(ndev->tx_wq, &ndev->tx_work);
 }
 
-static void nci_rf_field_info_ntf_packet(struct nci_dev *ndev,
-					struct sk_buff *skb)
+static void nci_core_conn_intf_error_ntf_packet(struct nci_dev *ndev,
+						struct sk_buff *skb)
 {
-	struct nci_rf_field_info_ntf *ntf = (void *) skb->data;
+	struct nci_core_intf_error_ntf *ntf = (void *) skb->data;
 
-	nfc_dbg("entry, rf_field_status %d", ntf->rf_field_status);
+	ntf->conn_id = nci_conn_id(&ntf->conn_id);
+
+	pr_debug("status 0x%x, conn_id %d\n", ntf->status, ntf->conn_id);
+
+	/* complete the data exchange transaction, if exists */
+	if (test_bit(NCI_DATA_EXCHANGE, &ndev->flags))
+		nci_data_exchange_complete(ndev, NULL, -EIO);
 }
 
-static int nci_rf_activate_nfca_passive_poll(struct nci_dev *ndev,
-			struct nci_rf_activate_ntf *ntf, __u8 *data)
+static __u8 *nci_extract_rf_params_nfca_passive_poll(struct nci_dev *ndev,
+			struct nci_rf_intf_activated_ntf *ntf, __u8 *data)
 {
 	struct rf_tech_specific_params_nfca_poll *nfca_poll;
-	struct activation_params_nfca_poll_iso_dep *nfca_poll_iso_dep;
 
 	nfca_poll = &ntf->rf_tech_specific_params.nfca_poll;
-	nfca_poll_iso_dep = &ntf->activation_params.nfca_poll_iso_dep;
 
 	nfca_poll->sens_res = __le16_to_cpu(*((__u16 *)data));
 	data += 2;
 
 	nfca_poll->nfcid1_len = *data++;
 
-	nfc_dbg("sens_res 0x%x, nfcid1_len %d",
-		nfca_poll->sens_res,
-		nfca_poll->nfcid1_len);
+	pr_debug("sens_res 0x%x, nfcid1_len %d\n",
+		 nfca_poll->sens_res, nfca_poll->nfcid1_len);
 
 	memcpy(nfca_poll->nfcid1, data, nfca_poll->nfcid1_len);
 	data += nfca_poll->nfcid1_len;
@@ -100,32 +108,32 @@
 	if (nfca_poll->sel_res_len != 0)
 		nfca_poll->sel_res = *data++;
 
-	ntf->rf_interface_type = *data++;
-	ntf->activation_params_len = *data++;
+	pr_debug("sel_res_len %d, sel_res 0x%x\n",
+		 nfca_poll->sel_res_len,
+		 nfca_poll->sel_res);
 
-	nfc_dbg("sel_res_len %d, sel_res 0x%x, rf_interface_type %d, activation_params_len %d",
-		nfca_poll->sel_res_len,
-		nfca_poll->sel_res,
-		ntf->rf_interface_type,
-		ntf->activation_params_len);
+	return data;
+}
 
-	switch (ntf->rf_interface_type) {
-	case NCI_RF_INTERFACE_ISO_DEP:
-		nfca_poll_iso_dep->rats_res_len = *data++;
-		if (nfca_poll_iso_dep->rats_res_len > 0) {
-			memcpy(nfca_poll_iso_dep->rats_res,
+static int nci_extract_activation_params_iso_dep(struct nci_dev *ndev,
+			struct nci_rf_intf_activated_ntf *ntf, __u8 *data)
+{
+	struct activation_params_nfca_poll_iso_dep *nfca_poll;
+
+	switch (ntf->activation_rf_tech_and_mode) {
+	case NCI_NFC_A_PASSIVE_POLL_MODE:
+		nfca_poll = &ntf->activation_params.nfca_poll_iso_dep;
+		nfca_poll->rats_res_len = *data++;
+		if (nfca_poll->rats_res_len > 0) {
+			memcpy(nfca_poll->rats_res,
 				data,
-				nfca_poll_iso_dep->rats_res_len);
+				nfca_poll->rats_res_len);
 		}
 		break;
 
-	case NCI_RF_INTERFACE_FRAME:
-		/* no activation params */
-		break;
-
 	default:
-		nfc_err("unsupported rf_interface_type 0x%x",
-			ntf->rf_interface_type);
+		pr_err("unsupported activation_rf_tech_and_mode 0x%x\n",
+		       ntf->activation_rf_tech_and_mode);
 		return -EPROTO;
 	}
 
@@ -133,7 +141,7 @@
 }
 
 static void nci_target_found(struct nci_dev *ndev,
-				struct nci_rf_activate_ntf *ntf)
+				struct nci_rf_intf_activated_ntf *ntf)
 {
 	struct nfc_target nfc_tgt;
 
@@ -141,66 +149,121 @@
 		nfc_tgt.supported_protocols = NFC_PROTO_MIFARE_MASK;
 	else if (ntf->rf_protocol == NCI_RF_PROTOCOL_ISO_DEP)	/* 4A */
 		nfc_tgt.supported_protocols = NFC_PROTO_ISO14443_MASK;
+	else
+		nfc_tgt.supported_protocols = 0;
 
 	nfc_tgt.sens_res = ntf->rf_tech_specific_params.nfca_poll.sens_res;
 	nfc_tgt.sel_res = ntf->rf_tech_specific_params.nfca_poll.sel_res;
+	nfc_tgt.nfcid1_len = ntf->rf_tech_specific_params.nfca_poll.nfcid1_len;
+	if (nfc_tgt.nfcid1_len > 0) {
+		memcpy(nfc_tgt.nfcid1,
+			ntf->rf_tech_specific_params.nfca_poll.nfcid1,
+			nfc_tgt.nfcid1_len);
+	}
 
 	if (!(nfc_tgt.supported_protocols & ndev->poll_prots)) {
-		nfc_dbg("the target found does not have the desired protocol");
+		pr_debug("the target found does not have the desired protocol\n");
 		return;
 	}
 
-	nfc_dbg("new target found,  supported_protocols 0x%x",
-		nfc_tgt.supported_protocols);
+	pr_debug("new target found,  supported_protocols 0x%x\n",
+		 nfc_tgt.supported_protocols);
 
 	ndev->target_available_prots = nfc_tgt.supported_protocols;
+	ndev->max_data_pkt_payload_size = ntf->max_data_pkt_payload_size;
+	ndev->initial_num_credits = ntf->initial_num_credits;
+
+	/* set the available credits to initial value */
+	atomic_set(&ndev->credits_cnt, ndev->initial_num_credits);
 
 	nfc_targets_found(ndev->nfc_dev, &nfc_tgt, 1);
 }
 
-static void nci_rf_activate_ntf_packet(struct nci_dev *ndev,
-					struct sk_buff *skb)
+static void nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev,
+						struct sk_buff *skb)
 {
-	struct nci_rf_activate_ntf ntf;
+	struct nci_rf_intf_activated_ntf ntf;
 	__u8 *data = skb->data;
-	int rc = -1;
+	int err = 0;
 
 	clear_bit(NCI_DISCOVERY, &ndev->flags);
 	set_bit(NCI_POLL_ACTIVE, &ndev->flags);
 
-	ntf.target_handle = *data++;
+	ntf.rf_discovery_id = *data++;
+	ntf.rf_interface = *data++;
 	ntf.rf_protocol = *data++;
-	ntf.rf_tech_and_mode = *data++;
+	ntf.activation_rf_tech_and_mode = *data++;
+	ntf.max_data_pkt_payload_size = *data++;
+	ntf.initial_num_credits = *data++;
 	ntf.rf_tech_specific_params_len = *data++;
 
-	nfc_dbg("target_handle %d, rf_protocol 0x%x, rf_tech_and_mode 0x%x, rf_tech_specific_params_len %d",
-		ntf.target_handle,
-		ntf.rf_protocol,
-		ntf.rf_tech_and_mode,
-		ntf.rf_tech_specific_params_len);
+	pr_debug("rf_discovery_id %d\n", ntf.rf_discovery_id);
+	pr_debug("rf_interface 0x%x\n", ntf.rf_interface);
+	pr_debug("rf_protocol 0x%x\n", ntf.rf_protocol);
+	pr_debug("activation_rf_tech_and_mode 0x%x\n",
+		 ntf.activation_rf_tech_and_mode);
+	pr_debug("max_data_pkt_payload_size 0x%x\n",
+		 ntf.max_data_pkt_payload_size);
+	pr_debug("initial_num_credits 0x%x\n", ntf.initial_num_credits);
+	pr_debug("rf_tech_specific_params_len %d\n",
+		 ntf.rf_tech_specific_params_len);
 
-	switch (ntf.rf_tech_and_mode) {
-	case NCI_NFC_A_PASSIVE_POLL_MODE:
-		rc = nci_rf_activate_nfca_passive_poll(ndev, &ntf,
-			data);
-		break;
+	if (ntf.rf_tech_specific_params_len > 0) {
+		switch (ntf.activation_rf_tech_and_mode) {
+		case NCI_NFC_A_PASSIVE_POLL_MODE:
+			data = nci_extract_rf_params_nfca_passive_poll(ndev,
+				&ntf, data);
+			break;
 
-	default:
-		nfc_err("unsupported rf_tech_and_mode 0x%x",
-			ntf.rf_tech_and_mode);
-		return;
+		default:
+			pr_err("unsupported activation_rf_tech_and_mode 0x%x\n",
+			       ntf.activation_rf_tech_and_mode);
+			return;
+		}
 	}
 
-	if (!rc)
+	ntf.data_exch_rf_tech_and_mode = *data++;
+	ntf.data_exch_tx_bit_rate = *data++;
+	ntf.data_exch_rx_bit_rate = *data++;
+	ntf.activation_params_len = *data++;
+
+	pr_debug("data_exch_rf_tech_and_mode 0x%x\n",
+		 ntf.data_exch_rf_tech_and_mode);
+	pr_debug("data_exch_tx_bit_rate 0x%x\n",
+		 ntf.data_exch_tx_bit_rate);
+	pr_debug("data_exch_rx_bit_rate 0x%x\n",
+		 ntf.data_exch_rx_bit_rate);
+	pr_debug("activation_params_len %d\n",
+		 ntf.activation_params_len);
+
+	if (ntf.activation_params_len > 0) {
+		switch (ntf.rf_interface) {
+		case NCI_RF_INTERFACE_ISO_DEP:
+			err = nci_extract_activation_params_iso_dep(ndev,
+				&ntf, data);
+			break;
+
+		case NCI_RF_INTERFACE_FRAME:
+			/* no activation params */
+			break;
+
+		default:
+			pr_err("unsupported rf_interface 0x%x\n",
+			       ntf.rf_interface);
+			return;
+		}
+	}
+
+	if (!err)
 		nci_target_found(ndev, &ntf);
 }
 
 static void nci_rf_deactivate_ntf_packet(struct nci_dev *ndev,
 					struct sk_buff *skb)
 {
-	__u8 type = skb->data[0];
+	struct nci_rf_deactivate_ntf *ntf = (void *) skb->data;
 
-	nfc_dbg("entry, type 0x%x", type);
+	pr_debug("entry, type 0x%x, reason 0x%x\n", ntf->type, ntf->reason);
 
 	clear_bit(NCI_POLL_ACTIVE, &ndev->flags);
 	ndev->target_active_prot = 0;
@@ -223,11 +286,11 @@
 {
 	__u16 ntf_opcode = nci_opcode(skb->data);
 
-	nfc_dbg("NCI RX: MT=ntf, PBF=%d, GID=0x%x, OID=0x%x, plen=%d",
-			nci_pbf(skb->data),
-			nci_opcode_gid(ntf_opcode),
-			nci_opcode_oid(ntf_opcode),
-			nci_plen(skb->data));
+	pr_debug("NCI RX: MT=ntf, PBF=%d, GID=0x%x, OID=0x%x, plen=%d\n",
+		 nci_pbf(skb->data),
+		 nci_opcode_gid(ntf_opcode),
+		 nci_opcode_oid(ntf_opcode),
+		 nci_plen(skb->data));
 
 	/* strip the nci control header */
 	skb_pull(skb, NCI_CTRL_HDR_SIZE);
@@ -237,12 +300,12 @@
 		nci_core_conn_credits_ntf_packet(ndev, skb);
 		break;
 
-	case NCI_OP_RF_FIELD_INFO_NTF:
-		nci_rf_field_info_ntf_packet(ndev, skb);
+	case NCI_OP_CORE_INTF_ERROR_NTF:
+		nci_core_conn_intf_error_ntf_packet(ndev, skb);
 		break;
 
-	case NCI_OP_RF_ACTIVATE_NTF:
-		nci_rf_activate_ntf_packet(ndev, skb);
+	case NCI_OP_RF_INTF_ACTIVATED_NTF:
+		nci_rf_intf_activated_ntf_packet(ndev, skb);
 		break;
 
 	case NCI_OP_RF_DEACTIVATE_NTF:
@@ -250,7 +313,7 @@
 		break;
 
 	default:
-		nfc_err("unknown ntf opcode 0x%x", ntf_opcode);
+		pr_err("unknown ntf opcode 0x%x\n", ntf_opcode);
 		break;
 	}
 
diff --git a/net/nfc/nci/rsp.c b/net/nfc/nci/rsp.c
index 0403d4c..2840ae2 100644
--- a/net/nfc/nci/rsp.c
+++ b/net/nfc/nci/rsp.c
@@ -25,6 +25,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__
+
 #include <linux/types.h>
 #include <linux/interrupt.h>
 #include <linux/bitops.h>
@@ -40,12 +42,13 @@
 {
 	struct nci_core_reset_rsp *rsp = (void *) skb->data;
 
-	nfc_dbg("entry, status 0x%x", rsp->status);
+	pr_debug("status 0x%x\n", rsp->status);
 
-	if (rsp->status == NCI_STATUS_OK)
+	if (rsp->status == NCI_STATUS_OK) {
 		ndev->nci_ver = rsp->nci_ver;
-
-	nfc_dbg("nci_ver 0x%x", ndev->nci_ver);
+		pr_debug("nci_ver 0x%x, config_status 0x%x\n",
+			 rsp->nci_ver, rsp->config_status);
+	}
 
 	nci_req_complete(ndev, rsp->status);
 }
@@ -55,16 +58,16 @@
 	struct nci_core_init_rsp_1 *rsp_1 = (void *) skb->data;
 	struct nci_core_init_rsp_2 *rsp_2;
 
-	nfc_dbg("entry, status 0x%x", rsp_1->status);
+	pr_debug("status 0x%x\n", rsp_1->status);
 
 	if (rsp_1->status != NCI_STATUS_OK)
-		return;
+		goto exit;
 
 	ndev->nfcc_features = __le32_to_cpu(rsp_1->nfcc_features);
 	ndev->num_supported_rf_interfaces = rsp_1->num_supported_rf_interfaces;
 
 	if (ndev->num_supported_rf_interfaces >
-		NCI_MAX_SUPPORTED_RF_INTERFACES) {
+			NCI_MAX_SUPPORTED_RF_INTERFACES) {
 		ndev->num_supported_rf_interfaces =
 			NCI_MAX_SUPPORTED_RF_INTERFACES;
 	}
@@ -73,76 +76,56 @@
 		rsp_1->supported_rf_interfaces,
 		ndev->num_supported_rf_interfaces);
 
-	rsp_2 = (void *) (skb->data + 6 + ndev->num_supported_rf_interfaces);
+	rsp_2 = (void *) (skb->data + 6 + rsp_1->num_supported_rf_interfaces);
 
 	ndev->max_logical_connections =
 		rsp_2->max_logical_connections;
 	ndev->max_routing_table_size =
 		__le16_to_cpu(rsp_2->max_routing_table_size);
-	ndev->max_control_packet_payload_length =
-		rsp_2->max_control_packet_payload_length;
-	ndev->rf_sending_buffer_size =
-		__le16_to_cpu(rsp_2->rf_sending_buffer_size);
-	ndev->rf_receiving_buffer_size =
-		__le16_to_cpu(rsp_2->rf_receiving_buffer_size);
-	ndev->manufacturer_id =
-		__le16_to_cpu(rsp_2->manufacturer_id);
+	ndev->max_ctrl_pkt_payload_len =
+		rsp_2->max_ctrl_pkt_payload_len;
+	ndev->max_size_for_large_params =
+		__le16_to_cpu(rsp_2->max_size_for_large_params);
+	ndev->manufact_id =
+		rsp_2->manufact_id;
+	ndev->manufact_specific_info =
+		__le32_to_cpu(rsp_2->manufact_specific_info);
 
-	nfc_dbg("nfcc_features 0x%x",
-		ndev->nfcc_features);
-	nfc_dbg("num_supported_rf_interfaces %d",
-		ndev->num_supported_rf_interfaces);
-	nfc_dbg("supported_rf_interfaces[0] 0x%x",
-		ndev->supported_rf_interfaces[0]);
-	nfc_dbg("supported_rf_interfaces[1] 0x%x",
-		ndev->supported_rf_interfaces[1]);
-	nfc_dbg("supported_rf_interfaces[2] 0x%x",
-		ndev->supported_rf_interfaces[2]);
-	nfc_dbg("supported_rf_interfaces[3] 0x%x",
-		ndev->supported_rf_interfaces[3]);
-	nfc_dbg("max_logical_connections %d",
-		ndev->max_logical_connections);
-	nfc_dbg("max_routing_table_size %d",
-		ndev->max_routing_table_size);
-	nfc_dbg("max_control_packet_payload_length %d",
-		ndev->max_control_packet_payload_length);
-	nfc_dbg("rf_sending_buffer_size %d",
-		ndev->rf_sending_buffer_size);
-	nfc_dbg("rf_receiving_buffer_size %d",
-		ndev->rf_receiving_buffer_size);
-	nfc_dbg("manufacturer_id 0x%x",
-		ndev->manufacturer_id);
+	pr_debug("nfcc_features 0x%x\n",
+		 ndev->nfcc_features);
+	pr_debug("num_supported_rf_interfaces %d\n",
+		 ndev->num_supported_rf_interfaces);
+	pr_debug("supported_rf_interfaces[0] 0x%x\n",
+		 ndev->supported_rf_interfaces[0]);
+	pr_debug("supported_rf_interfaces[1] 0x%x\n",
+		 ndev->supported_rf_interfaces[1]);
+	pr_debug("supported_rf_interfaces[2] 0x%x\n",
+		 ndev->supported_rf_interfaces[2]);
+	pr_debug("supported_rf_interfaces[3] 0x%x\n",
+		 ndev->supported_rf_interfaces[3]);
+	pr_debug("max_logical_connections %d\n",
+		 ndev->max_logical_connections);
+	pr_debug("max_routing_table_size %d\n",
+		 ndev->max_routing_table_size);
+	pr_debug("max_ctrl_pkt_payload_len %d\n",
+		 ndev->max_ctrl_pkt_payload_len);
+	pr_debug("max_size_for_large_params %d\n",
+		 ndev->max_size_for_large_params);
+	pr_debug("manufact_id 0x%x\n",
+		 ndev->manufact_id);
+	pr_debug("manufact_specific_info 0x%x\n",
+		 ndev->manufact_specific_info);
 
+exit:
 	nci_req_complete(ndev, rsp_1->status);
 }
 
-static void nci_core_conn_create_rsp_packet(struct nci_dev *ndev,
-						struct sk_buff *skb)
-{
-	struct nci_core_conn_create_rsp *rsp = (void *) skb->data;
-
-	nfc_dbg("entry, status 0x%x", rsp->status);
-
-	if (rsp->status != NCI_STATUS_OK)
-		return;
-
-	ndev->max_pkt_payload_size = rsp->max_pkt_payload_size;
-	ndev->initial_num_credits = rsp->initial_num_credits;
-	ndev->conn_id = rsp->conn_id;
-
-	atomic_set(&ndev->credits_cnt, ndev->initial_num_credits);
-
-	nfc_dbg("max_pkt_payload_size %d", ndev->max_pkt_payload_size);
-	nfc_dbg("initial_num_credits %d", ndev->initial_num_credits);
-	nfc_dbg("conn_id %d", ndev->conn_id);
-}
-
 static void nci_rf_disc_map_rsp_packet(struct nci_dev *ndev,
 					struct sk_buff *skb)
 {
 	__u8 status = skb->data[0];
 
-	nfc_dbg("entry, status 0x%x", status);
+	pr_debug("status 0x%x\n", status);
 
 	nci_req_complete(ndev, status);
 }
@@ -151,7 +134,7 @@
 {
 	__u8 status = skb->data[0];
 
-	nfc_dbg("entry, status 0x%x", status);
+	pr_debug("status 0x%x\n", status);
 
 	if (status == NCI_STATUS_OK)
 		set_bit(NCI_DISCOVERY, &ndev->flags);
@@ -164,7 +147,7 @@
 {
 	__u8 status = skb->data[0];
 
-	nfc_dbg("entry, status 0x%x", status);
+	pr_debug("status 0x%x\n", status);
 
 	clear_bit(NCI_DISCOVERY, &ndev->flags);
 
@@ -178,11 +161,11 @@
 	/* we got a rsp, stop the cmd timer */
 	del_timer(&ndev->cmd_timer);
 
-	nfc_dbg("NCI RX: MT=rsp, PBF=%d, GID=0x%x, OID=0x%x, plen=%d",
-			nci_pbf(skb->data),
-			nci_opcode_gid(rsp_opcode),
-			nci_opcode_oid(rsp_opcode),
-			nci_plen(skb->data));
+	pr_debug("NCI RX: MT=rsp, PBF=%d, GID=0x%x, OID=0x%x, plen=%d\n",
+		 nci_pbf(skb->data),
+		 nci_opcode_gid(rsp_opcode),
+		 nci_opcode_oid(rsp_opcode),
+		 nci_plen(skb->data));
 
 	/* strip the nci control header */
 	skb_pull(skb, NCI_CTRL_HDR_SIZE);
@@ -196,10 +179,6 @@
 		nci_core_init_rsp_packet(ndev, skb);
 		break;
 
-	case NCI_OP_CORE_CONN_CREATE_RSP:
-		nci_core_conn_create_rsp_packet(ndev, skb);
-		break;
-
 	case NCI_OP_RF_DISCOVER_MAP_RSP:
 		nci_rf_disc_map_rsp_packet(ndev, skb);
 		break;
@@ -213,7 +192,7 @@
 		break;
 
 	default:
-		nfc_err("unknown rsp opcode 0x%x", rsp_opcode);
+		pr_err("unknown rsp opcode 0x%x\n", rsp_opcode);
 		break;
 	}
 
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index 03f8818..6989dfa 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -21,6 +21,8 @@
  * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__
+
 #include <net/genetlink.h>
 #include <linux/nfc.h>
 #include <linux/slab.h>
@@ -44,6 +46,8 @@
 	[NFC_ATTR_DEVICE_NAME] = { .type = NLA_STRING,
 				.len = NFC_DEVICE_NAME_MAXSIZE },
 	[NFC_ATTR_PROTOCOLS] = { .type = NLA_U32 },
+	[NFC_ATTR_COMM_MODE] = { .type = NLA_U8 },
+	[NFC_ATTR_RF_MODE] = { .type = NLA_U8 },
 };
 
 static int nfc_genl_send_target(struct sk_buff *msg, struct nfc_target *target,
@@ -51,8 +55,6 @@
 {
 	void *hdr;
 
-	nfc_dbg("entry");
-
 	hdr = genlmsg_put(msg, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq,
 				&nfc_genl_family, flags, NFC_CMD_GET_TARGET);
 	if (!hdr)
@@ -65,6 +67,9 @@
 				target->supported_protocols);
 	NLA_PUT_U16(msg, NFC_ATTR_TARGET_SENS_RES, target->sens_res);
 	NLA_PUT_U8(msg, NFC_ATTR_TARGET_SEL_RES, target->sel_res);
+	if (target->nfcid1_len > 0)
+		NLA_PUT(msg, NFC_ATTR_TARGET_NFCID1, target->nfcid1_len,
+				target->nfcid1);
 
 	return genlmsg_end(msg, hdr);
 
@@ -105,8 +110,6 @@
 	struct nfc_dev *dev = (struct nfc_dev *) cb->args[1];
 	int rc;
 
-	nfc_dbg("entry");
-
 	if (!dev) {
 		dev = __get_device_from_cb(cb);
 		if (IS_ERR(dev))
@@ -139,8 +142,6 @@
 {
 	struct nfc_dev *dev = (struct nfc_dev *) cb->args[1];
 
-	nfc_dbg("entry");
-
 	if (dev)
 		nfc_put_device(dev);
 
@@ -152,8 +153,6 @@
 	struct sk_buff *msg;
 	void *hdr;
 
-	nfc_dbg("entry");
-
 	dev->genl_data.poll_req_pid = 0;
 
 	msg = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
@@ -183,8 +182,6 @@
 	struct sk_buff *msg;
 	void *hdr;
 
-	nfc_dbg("entry");
-
 	msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
 	if (!msg)
 		return -ENOMEM;
@@ -216,8 +213,6 @@
 	struct sk_buff *msg;
 	void *hdr;
 
-	nfc_dbg("entry");
-
 	msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
 	if (!msg)
 		return -ENOMEM;
@@ -249,8 +244,6 @@
 {
 	void *hdr;
 
-	nfc_dbg("entry");
-
 	hdr = genlmsg_put(msg, pid, seq, &nfc_genl_family, flags,
 							NFC_CMD_GET_DEVICE);
 	if (!hdr)
@@ -277,8 +270,6 @@
 	struct nfc_dev *dev = (struct nfc_dev *) cb->args[1];
 	bool first_call = false;
 
-	nfc_dbg("entry");
-
 	if (!iter) {
 		first_call = true;
 		iter = kmalloc(sizeof(struct class_dev_iter), GFP_KERNEL);
@@ -319,14 +310,81 @@
 {
 	struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0];
 
-	nfc_dbg("entry");
-
 	nfc_device_iter_exit(iter);
 	kfree(iter);
 
 	return 0;
 }
 
+int nfc_genl_dep_link_up_event(struct nfc_dev *dev, u32 target_idx,
+						u8 comm_mode, u8 rf_mode)
+{
+	struct sk_buff *msg;
+	void *hdr;
+
+	pr_debug("DEP link is up\n");
+
+	msg = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
+	if (!msg)
+		return -ENOMEM;
+
+	hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0,
+				NFC_CMD_DEP_LINK_UP);
+	if (!hdr)
+		goto free_msg;
+
+	NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx);
+	if (rf_mode == NFC_RF_INITIATOR)
+		NLA_PUT_U32(msg, NFC_ATTR_TARGET_INDEX, target_idx);
+	NLA_PUT_U8(msg, NFC_ATTR_COMM_MODE, comm_mode);
+	NLA_PUT_U8(msg, NFC_ATTR_RF_MODE, rf_mode);
+
+	genlmsg_end(msg, hdr);
+
+	dev->dep_link_up = true;
+
+	genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_ATOMIC);
+
+	return 0;
+
+nla_put_failure:
+	genlmsg_cancel(msg, hdr);
+free_msg:
+	nlmsg_free(msg);
+	return -EMSGSIZE;
+}
+
+int nfc_genl_dep_link_down_event(struct nfc_dev *dev)
+{
+	struct sk_buff *msg;
+	void *hdr;
+
+	pr_debug("DEP link is down\n");
+
+	msg = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
+	if (!msg)
+		return -ENOMEM;
+
+	hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0,
+				NFC_CMD_DEP_LINK_DOWN);
+	if (!hdr)
+		goto free_msg;
+
+	NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx);
+
+	genlmsg_end(msg, hdr);
+
+	genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_ATOMIC);
+
+	return 0;
+
+nla_put_failure:
+	genlmsg_cancel(msg, hdr);
+free_msg:
+	nlmsg_free(msg);
+	return -EMSGSIZE;
+}
+
 static int nfc_genl_get_device(struct sk_buff *skb, struct genl_info *info)
 {
 	struct sk_buff *msg;
@@ -334,8 +392,6 @@
 	u32 idx;
 	int rc = -ENOBUFS;
 
-	nfc_dbg("entry");
-
 	if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
 		return -EINVAL;
 
@@ -373,8 +429,6 @@
 	int rc;
 	u32 idx;
 
-	nfc_dbg("entry");
-
 	if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
 		return -EINVAL;
 
@@ -396,8 +450,6 @@
 	int rc;
 	u32 idx;
 
-	nfc_dbg("entry");
-
 	if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
 		return -EINVAL;
 
@@ -420,7 +472,7 @@
 	u32 idx;
 	u32 protocols;
 
-	nfc_dbg("entry");
+	pr_debug("Poll start\n");
 
 	if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
 		!info->attrs[NFC_ATTR_PROTOCOLS])
@@ -451,8 +503,6 @@
 	int rc;
 	u32 idx;
 
-	nfc_dbg("entry");
-
 	if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
 		return -EINVAL;
 
@@ -478,6 +528,67 @@
 	return rc;
 }
 
+static int nfc_genl_dep_link_up(struct sk_buff *skb, struct genl_info *info)
+{
+	struct nfc_dev *dev;
+	int rc, tgt_idx;
+	u32 idx;
+	u8 comm, rf;
+
+	pr_debug("DEP link up\n");
+
+	if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
+			!info->attrs[NFC_ATTR_COMM_MODE] ||
+			!info->attrs[NFC_ATTR_RF_MODE])
+		return -EINVAL;
+
+	idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
+	if (!info->attrs[NFC_ATTR_TARGET_INDEX])
+		tgt_idx = NFC_TARGET_IDX_ANY;
+	else
+		tgt_idx = nla_get_u32(info->attrs[NFC_ATTR_TARGET_INDEX]);
+
+	comm = nla_get_u8(info->attrs[NFC_ATTR_COMM_MODE]);
+	rf = nla_get_u8(info->attrs[NFC_ATTR_RF_MODE]);
+
+	if (comm != NFC_COMM_ACTIVE && comm != NFC_COMM_PASSIVE)
+		return -EINVAL;
+
+	if (rf != NFC_RF_INITIATOR && comm != NFC_RF_TARGET)
+		return -EINVAL;
+
+	dev = nfc_get_device(idx);
+	if (!dev)
+		return -ENODEV;
+
+	rc = nfc_dep_link_up(dev, tgt_idx, comm, rf);
+
+	nfc_put_device(dev);
+
+	return rc;
+}
+
+static int nfc_genl_dep_link_down(struct sk_buff *skb, struct genl_info *info)
+{
+	struct nfc_dev *dev;
+	int rc;
+	u32 idx;
+
+	if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
+		return -EINVAL;
+
+	idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
+
+	dev = nfc_get_device(idx);
+	if (!dev)
+		return -ENODEV;
+
+	rc = nfc_dep_link_down(dev);
+
+	nfc_put_device(dev);
+	return rc;
+}
+
 static struct genl_ops nfc_genl_ops[] = {
 	{
 		.cmd = NFC_CMD_GET_DEVICE,
@@ -507,6 +618,16 @@
 		.policy = nfc_genl_policy,
 	},
 	{
+		.cmd = NFC_CMD_DEP_LINK_UP,
+		.doit = nfc_genl_dep_link_up,
+		.policy = nfc_genl_policy,
+	},
+	{
+		.cmd = NFC_CMD_DEP_LINK_DOWN,
+		.doit = nfc_genl_dep_link_down,
+		.policy = nfc_genl_policy,
+	},
+	{
 		.cmd = NFC_CMD_GET_TARGET,
 		.dumpit = nfc_genl_dump_targets,
 		.done = nfc_genl_dump_targets_done,
@@ -524,18 +645,16 @@
 	if (event != NETLINK_URELEASE || n->protocol != NETLINK_GENERIC)
 		goto out;
 
-	nfc_dbg("NETLINK_URELEASE event from id %d", n->pid);
+	pr_debug("NETLINK_URELEASE event from id %d\n", n->pid);
 
 	nfc_device_iter_init(&iter);
 	dev = nfc_device_iter_next(&iter);
 
 	while (dev) {
-		mutex_lock(&dev->genl_data.genl_data_mutex);
 		if (dev->genl_data.poll_req_pid == n->pid) {
 			nfc_stop_poll(dev);
 			dev->genl_data.poll_req_pid = 0;
 		}
-		mutex_unlock(&dev->genl_data.genl_data_mutex);
 		dev = nfc_device_iter_next(&iter);
 	}
 
diff --git a/net/nfc/nfc.h b/net/nfc/nfc.h
index d86583f..6d28d75 100644
--- a/net/nfc/nfc.h
+++ b/net/nfc/nfc.h
@@ -27,13 +27,6 @@
 #include <net/nfc/nfc.h>
 #include <net/sock.h>
 
-__printf(2, 3)
-int nfc_printk(const char *level, const char *fmt, ...);
-
-#define nfc_info(fmt, arg...) nfc_printk(KERN_INFO, fmt, ##arg)
-#define nfc_err(fmt, arg...) nfc_printk(KERN_ERR, fmt, ##arg)
-#define nfc_dbg(fmt, arg...) pr_debug(fmt "\n", ##arg)
-
 struct nfc_protocol {
 	int id;
 	struct proto *proto;
@@ -53,6 +46,60 @@
 #define to_rawsock_sk(_tx_work) \
 	((struct sock *) container_of(_tx_work, struct nfc_rawsock, tx_work))
 
+#ifdef CONFIG_NFC_LLCP
+
+void nfc_llcp_mac_is_down(struct nfc_dev *dev);
+void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx,
+			u8 comm_mode, u8 rf_mode);
+int nfc_llcp_register_device(struct nfc_dev *dev);
+void nfc_llcp_unregister_device(struct nfc_dev *dev);
+int nfc_llcp_set_remote_gb(struct nfc_dev *dev, u8 *gb, u8 gb_len);
+u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, u8 *general_bytes_len);
+int __init nfc_llcp_init(void);
+void nfc_llcp_exit(void);
+
+#else
+
+static inline void nfc_llcp_mac_is_down(struct nfc_dev *dev)
+{
+}
+
+static inline void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx,
+			u8 comm_mode, u8 rf_mode)
+{
+}
+
+static inline int nfc_llcp_register_device(struct nfc_dev *dev)
+{
+	return 0;
+}
+
+static inline void nfc_llcp_unregister_device(struct nfc_dev *dev)
+{
+}
+
+static inline int nfc_llcp_set_remote_gb(struct nfc_dev *dev, u8 *gb, u8 gb_len)
+{
+	return 0;
+}
+
+static inline u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, u8 *gb_len)
+{
+	*gb_len = 0;
+	return NULL;
+}
+
+static inline int nfc_llcp_init(void)
+{
+	return 0;
+}
+
+static inline void nfc_llcp_exit(void)
+{
+}
+
+#endif
+
 int __init rawsock_init(void);
 void rawsock_exit(void);
 
@@ -75,6 +122,10 @@
 int nfc_genl_device_added(struct nfc_dev *dev);
 int nfc_genl_device_removed(struct nfc_dev *dev);
 
+int nfc_genl_dep_link_up_event(struct nfc_dev *dev, u32 target_idx,
+			       u8 comm_mode, u8 rf_mode);
+int nfc_genl_dep_link_down_event(struct nfc_dev *dev);
+
 struct nfc_dev *nfc_get_device(unsigned idx);
 
 static inline void nfc_put_device(struct nfc_dev *dev)
@@ -109,6 +160,11 @@
 
 int nfc_stop_poll(struct nfc_dev *dev);
 
+int nfc_dep_link_up(struct nfc_dev *dev, int target_idx,
+				u8 comm_mode, u8 rf_mode);
+
+int nfc_dep_link_down(struct nfc_dev *dev);
+
 int nfc_activate_target(struct nfc_dev *dev, u32 target_idx, u32 protocol);
 
 int nfc_deactivate_target(struct nfc_dev *dev, u32 target_idx);
diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c
index ee7b2b3..2e2f8c6 100644
--- a/net/nfc/rawsock.c
+++ b/net/nfc/rawsock.c
@@ -21,6 +21,8 @@
  * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__
+
 #include <net/tcp_states.h>
 #include <linux/nfc.h>
 #include <linux/export.h>
@@ -29,7 +31,7 @@
 
 static void rawsock_write_queue_purge(struct sock *sk)
 {
-	nfc_dbg("sk=%p", sk);
+	pr_debug("sk=%p\n", sk);
 
 	spin_lock_bh(&sk->sk_write_queue.lock);
 	__skb_queue_purge(&sk->sk_write_queue);
@@ -39,7 +41,7 @@
 
 static void rawsock_report_error(struct sock *sk, int err)
 {
-	nfc_dbg("sk=%p err=%d", sk, err);
+	pr_debug("sk=%p err=%d\n", sk, err);
 
 	sk->sk_shutdown = SHUTDOWN_MASK;
 	sk->sk_err = -err;
@@ -52,7 +54,7 @@
 {
 	struct sock *sk = sock->sk;
 
-	nfc_dbg("sock=%p", sock);
+	pr_debug("sock=%p\n", sock);
 
 	sock_orphan(sk);
 	sock_put(sk);
@@ -68,14 +70,14 @@
 	struct nfc_dev *dev;
 	int rc = 0;
 
-	nfc_dbg("sock=%p sk=%p flags=%d", sock, sk, flags);
+	pr_debug("sock=%p sk=%p flags=%d\n", sock, sk, flags);
 
 	if (!addr || len < sizeof(struct sockaddr_nfc) ||
 		addr->sa_family != AF_NFC)
 		return -EINVAL;
 
-	nfc_dbg("addr dev_idx=%u target_idx=%u protocol=%u", addr->dev_idx,
-					addr->target_idx, addr->nfc_protocol);
+	pr_debug("addr dev_idx=%u target_idx=%u protocol=%u\n",
+		 addr->dev_idx, addr->target_idx, addr->nfc_protocol);
 
 	lock_sock(sk);
 
@@ -136,7 +138,7 @@
 
 	BUG_ON(in_irq());
 
-	nfc_dbg("sk=%p err=%d", sk, err);
+	pr_debug("sk=%p err=%d\n", sk, err);
 
 	if (err)
 		goto error;
@@ -172,7 +174,7 @@
 	struct sk_buff *skb;
 	int rc;
 
-	nfc_dbg("sk=%p target_idx=%u", sk, target_idx);
+	pr_debug("sk=%p target_idx=%u\n", sk, target_idx);
 
 	if (sk->sk_shutdown & SEND_SHUTDOWN) {
 		rawsock_write_queue_purge(sk);
@@ -198,7 +200,7 @@
 	struct sk_buff *skb;
 	int rc;
 
-	nfc_dbg("sock=%p sk=%p len=%zu", sock, sk, len);
+	pr_debug("sock=%p sk=%p len=%zu\n", sock, sk, len);
 
 	if (msg->msg_namelen)
 		return -EOPNOTSUPP;
@@ -206,13 +208,10 @@
 	if (sock->state != SS_CONNECTED)
 		return -ENOTCONN;
 
-	skb = sock_alloc_send_skb(sk, len + dev->tx_headroom + dev->tx_tailroom + NFC_HEADER_SIZE,
-					msg->msg_flags & MSG_DONTWAIT, &rc);
-	if (!skb)
+	skb = nfc_alloc_send_skb(dev, sk, msg->msg_flags, len, &rc);
+	if (skb == NULL)
 		return rc;
 
-	skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE);
-
 	rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
 	if (rc < 0) {
 		kfree_skb(skb);
@@ -239,7 +238,7 @@
 	int copied;
 	int rc;
 
-	nfc_dbg("sock=%p sk=%p len=%zu flags=%d", sock, sk, len, flags);
+	pr_debug("sock=%p sk=%p len=%zu flags=%d\n", sock, sk, len, flags);
 
 	skb = skb_recv_datagram(sk, flags, noblock, &rc);
 	if (!skb)
@@ -283,7 +282,7 @@
 
 static void rawsock_destruct(struct sock *sk)
 {
-	nfc_dbg("sk=%p", sk);
+	pr_debug("sk=%p\n", sk);
 
 	if (sk->sk_state == TCP_ESTABLISHED) {
 		nfc_deactivate_target(nfc_rawsock(sk)->dev,
@@ -294,7 +293,7 @@
 	skb_queue_purge(&sk->sk_receive_queue);
 
 	if (!sock_flag(sk, SOCK_DEAD)) {
-		nfc_err("Freeing alive NFC raw socket %p", sk);
+		pr_err("Freeing alive NFC raw socket %p\n", sk);
 		return;
 	}
 }
@@ -304,14 +303,14 @@
 {
 	struct sock *sk;
 
-	nfc_dbg("sock=%p", sock);
+	pr_debug("sock=%p\n", sock);
 
 	if (sock->type != SOCK_SEQPACKET)
 		return -ESOCKTNOSUPPORT;
 
 	sock->ops = &rawsock_ops;
 
-	sk = sk_alloc(net, PF_NFC, GFP_KERNEL, nfc_proto->proto);
+	sk = sk_alloc(net, PF_NFC, GFP_ATOMIC, nfc_proto->proto);
 	if (!sk)
 		return -ENOMEM;
 
diff --git a/net/openvswitch/Kconfig b/net/openvswitch/Kconfig
new file mode 100644
index 0000000..d9ea33c
--- /dev/null
+++ b/net/openvswitch/Kconfig
@@ -0,0 +1,28 @@
+#
+# Open vSwitch
+#
+
+config OPENVSWITCH
+	tristate "Open vSwitch"
+	---help---
+	  Open vSwitch is a multilayer Ethernet switch targeted at virtualized
+	  environments.  In addition to supporting a variety of features
+	  expected in a traditional hardware switch, it enables fine-grained
+	  programmatic extension and flow-based control of the network.  This
+	  control is useful in a wide variety of applications but is
+	  particularly important in multi-server virtualization deployments,
+	  which are often characterized by highly dynamic endpoints and the
+	  need to maintain logical abstractions for multiple tenants.
+
+	  The Open vSwitch datapath provides an in-kernel fast path for packet
+	  forwarding.  It is complemented by a userspace daemon, ovs-vswitchd,
+	  which is able to accept configuration from a variety of sources and
+	  translate it into packet processing rules.
+
+	  See http://openvswitch.org for more information and userspace
+	  utilities.
+
+	  To compile this code as a module, choose M here: the module will be
+	  called openvswitch.
+
+	  If unsure, say N.
diff --git a/net/openvswitch/Makefile b/net/openvswitch/Makefile
new file mode 100644
index 0000000..15e7384
--- /dev/null
+++ b/net/openvswitch/Makefile
@@ -0,0 +1,14 @@
+#
+# Makefile for Open vSwitch.
+#
+
+obj-$(CONFIG_OPENVSWITCH) += openvswitch.o
+
+openvswitch-y := \
+	actions.o \
+	datapath.o \
+	dp_notify.o \
+	flow.o \
+	vport.o \
+	vport-internal_dev.o \
+	vport-netdev.o \
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
new file mode 100644
index 0000000..2725d1b
--- /dev/null
+++ b/net/openvswitch/actions.c
@@ -0,0 +1,415 @@
+/*
+ * Copyright (c) 2007-2011 Nicira Networks.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/skbuff.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/openvswitch.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/in6.h>
+#include <linux/if_arp.h>
+#include <linux/if_vlan.h>
+#include <net/ip.h>
+#include <net/checksum.h>
+#include <net/dsfield.h>
+
+#include "datapath.h"
+#include "vport.h"
+
+static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
+			const struct nlattr *attr, int len, bool keep_skb);
+
+static int make_writable(struct sk_buff *skb, int write_len)
+{
+	if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
+		return 0;
+
+	return pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+}
+
+/* remove VLAN header from packet and update csum accrodingly. */
+static int __pop_vlan_tci(struct sk_buff *skb, __be16 *current_tci)
+{
+	struct vlan_hdr *vhdr;
+	int err;
+
+	err = make_writable(skb, VLAN_ETH_HLEN);
+	if (unlikely(err))
+		return err;
+
+	if (skb->ip_summed == CHECKSUM_COMPLETE)
+		skb->csum = csum_sub(skb->csum, csum_partial(skb->data
+					+ ETH_HLEN, VLAN_HLEN, 0));
+
+	vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
+	*current_tci = vhdr->h_vlan_TCI;
+
+	memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
+	__skb_pull(skb, VLAN_HLEN);
+
+	vlan_set_encap_proto(skb, vhdr);
+	skb->mac_header += VLAN_HLEN;
+	skb_reset_mac_len(skb);
+
+	return 0;
+}
+
+static int pop_vlan(struct sk_buff *skb)
+{
+	__be16 tci;
+	int err;
+
+	if (likely(vlan_tx_tag_present(skb))) {
+		skb->vlan_tci = 0;
+	} else {
+		if (unlikely(skb->protocol != htons(ETH_P_8021Q) ||
+			     skb->len < VLAN_ETH_HLEN))
+			return 0;
+
+		err = __pop_vlan_tci(skb, &tci);
+		if (err)
+			return err;
+	}
+	/* move next vlan tag to hw accel tag */
+	if (likely(skb->protocol != htons(ETH_P_8021Q) ||
+		   skb->len < VLAN_ETH_HLEN))
+		return 0;
+
+	err = __pop_vlan_tci(skb, &tci);
+	if (unlikely(err))
+		return err;
+
+	__vlan_hwaccel_put_tag(skb, ntohs(tci));
+	return 0;
+}
+
+static int push_vlan(struct sk_buff *skb, const struct ovs_action_push_vlan *vlan)
+{
+	if (unlikely(vlan_tx_tag_present(skb))) {
+		u16 current_tag;
+
+		/* push down current VLAN tag */
+		current_tag = vlan_tx_tag_get(skb);
+
+		if (!__vlan_put_tag(skb, current_tag))
+			return -ENOMEM;
+
+		if (skb->ip_summed == CHECKSUM_COMPLETE)
+			skb->csum = csum_add(skb->csum, csum_partial(skb->data
+					+ ETH_HLEN, VLAN_HLEN, 0));
+
+	}
+	__vlan_hwaccel_put_tag(skb, ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
+	return 0;
+}
+
+static int set_eth_addr(struct sk_buff *skb,
+			const struct ovs_key_ethernet *eth_key)
+{
+	int err;
+	err = make_writable(skb, ETH_HLEN);
+	if (unlikely(err))
+		return err;
+
+	memcpy(eth_hdr(skb)->h_source, eth_key->eth_src, ETH_ALEN);
+	memcpy(eth_hdr(skb)->h_dest, eth_key->eth_dst, ETH_ALEN);
+
+	return 0;
+}
+
+static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
+				__be32 *addr, __be32 new_addr)
+{
+	int transport_len = skb->len - skb_transport_offset(skb);
+
+	if (nh->protocol == IPPROTO_TCP) {
+		if (likely(transport_len >= sizeof(struct tcphdr)))
+			inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
+						 *addr, new_addr, 1);
+	} else if (nh->protocol == IPPROTO_UDP) {
+		if (likely(transport_len >= sizeof(struct udphdr)))
+			inet_proto_csum_replace4(&udp_hdr(skb)->check, skb,
+						 *addr, new_addr, 1);
+	}
+
+	csum_replace4(&nh->check, *addr, new_addr);
+	skb->rxhash = 0;
+	*addr = new_addr;
+}
+
+static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl)
+{
+	csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
+	nh->ttl = new_ttl;
+}
+
+static int set_ipv4(struct sk_buff *skb, const struct ovs_key_ipv4 *ipv4_key)
+{
+	struct iphdr *nh;
+	int err;
+
+	err = make_writable(skb, skb_network_offset(skb) +
+				 sizeof(struct iphdr));
+	if (unlikely(err))
+		return err;
+
+	nh = ip_hdr(skb);
+
+	if (ipv4_key->ipv4_src != nh->saddr)
+		set_ip_addr(skb, nh, &nh->saddr, ipv4_key->ipv4_src);
+
+	if (ipv4_key->ipv4_dst != nh->daddr)
+		set_ip_addr(skb, nh, &nh->daddr, ipv4_key->ipv4_dst);
+
+	if (ipv4_key->ipv4_tos != nh->tos)
+		ipv4_change_dsfield(nh, 0, ipv4_key->ipv4_tos);
+
+	if (ipv4_key->ipv4_ttl != nh->ttl)
+		set_ip_ttl(skb, nh, ipv4_key->ipv4_ttl);
+
+	return 0;
+}
+
+/* Must follow make_writable() since that can move the skb data. */
+static void set_tp_port(struct sk_buff *skb, __be16 *port,
+			 __be16 new_port, __sum16 *check)
+{
+	inet_proto_csum_replace2(check, skb, *port, new_port, 0);
+	*port = new_port;
+	skb->rxhash = 0;
+}
+
+static int set_udp_port(struct sk_buff *skb,
+			const struct ovs_key_udp *udp_port_key)
+{
+	struct udphdr *uh;
+	int err;
+
+	err = make_writable(skb, skb_transport_offset(skb) +
+				 sizeof(struct udphdr));
+	if (unlikely(err))
+		return err;
+
+	uh = udp_hdr(skb);
+	if (udp_port_key->udp_src != uh->source)
+		set_tp_port(skb, &uh->source, udp_port_key->udp_src, &uh->check);
+
+	if (udp_port_key->udp_dst != uh->dest)
+		set_tp_port(skb, &uh->dest, udp_port_key->udp_dst, &uh->check);
+
+	return 0;
+}
+
+static int set_tcp_port(struct sk_buff *skb,
+			const struct ovs_key_tcp *tcp_port_key)
+{
+	struct tcphdr *th;
+	int err;
+
+	err = make_writable(skb, skb_transport_offset(skb) +
+				 sizeof(struct tcphdr));
+	if (unlikely(err))
+		return err;
+
+	th = tcp_hdr(skb);
+	if (tcp_port_key->tcp_src != th->source)
+		set_tp_port(skb, &th->source, tcp_port_key->tcp_src, &th->check);
+
+	if (tcp_port_key->tcp_dst != th->dest)
+		set_tp_port(skb, &th->dest, tcp_port_key->tcp_dst, &th->check);
+
+	return 0;
+}
+
+static int do_output(struct datapath *dp, struct sk_buff *skb, int out_port)
+{
+	struct vport *vport;
+
+	if (unlikely(!skb))
+		return -ENOMEM;
+
+	vport = rcu_dereference(dp->ports[out_port]);
+	if (unlikely(!vport)) {
+		kfree_skb(skb);
+		return -ENODEV;
+	}
+
+	ovs_vport_send(vport, skb);
+	return 0;
+}
+
+static int output_userspace(struct datapath *dp, struct sk_buff *skb,
+			    const struct nlattr *attr)
+{
+	struct dp_upcall_info upcall;
+	const struct nlattr *a;
+	int rem;
+
+	upcall.cmd = OVS_PACKET_CMD_ACTION;
+	upcall.key = &OVS_CB(skb)->flow->key;
+	upcall.userdata = NULL;
+	upcall.pid = 0;
+
+	for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
+		 a = nla_next(a, &rem)) {
+		switch (nla_type(a)) {
+		case OVS_USERSPACE_ATTR_USERDATA:
+			upcall.userdata = a;
+			break;
+
+		case OVS_USERSPACE_ATTR_PID:
+			upcall.pid = nla_get_u32(a);
+			break;
+		}
+	}
+
+	return ovs_dp_upcall(dp, skb, &upcall);
+}
+
+static int sample(struct datapath *dp, struct sk_buff *skb,
+		  const struct nlattr *attr)
+{
+	const struct nlattr *acts_list = NULL;
+	const struct nlattr *a;
+	int rem;
+
+	for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
+		 a = nla_next(a, &rem)) {
+		switch (nla_type(a)) {
+		case OVS_SAMPLE_ATTR_PROBABILITY:
+			if (net_random() >= nla_get_u32(a))
+				return 0;
+			break;
+
+		case OVS_SAMPLE_ATTR_ACTIONS:
+			acts_list = a;
+			break;
+		}
+	}
+
+	return do_execute_actions(dp, skb, nla_data(acts_list),
+						 nla_len(acts_list), true);
+}
+
+static int execute_set_action(struct sk_buff *skb,
+				 const struct nlattr *nested_attr)
+{
+	int err = 0;
+
+	switch (nla_type(nested_attr)) {
+	case OVS_KEY_ATTR_PRIORITY:
+		skb->priority = nla_get_u32(nested_attr);
+		break;
+
+	case OVS_KEY_ATTR_ETHERNET:
+		err = set_eth_addr(skb, nla_data(nested_attr));
+		break;
+
+	case OVS_KEY_ATTR_IPV4:
+		err = set_ipv4(skb, nla_data(nested_attr));
+		break;
+
+	case OVS_KEY_ATTR_TCP:
+		err = set_tcp_port(skb, nla_data(nested_attr));
+		break;
+
+	case OVS_KEY_ATTR_UDP:
+		err = set_udp_port(skb, nla_data(nested_attr));
+		break;
+	}
+
+	return err;
+}
+
+/* Execute a list of actions against 'skb'. */
+static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
+			const struct nlattr *attr, int len, bool keep_skb)
+{
+	/* Every output action needs a separate clone of 'skb', but the common
+	 * case is just a single output action, so that doing a clone and
+	 * then freeing the original skbuff is wasteful.  So the following code
+	 * is slightly obscure just to avoid that. */
+	int prev_port = -1;
+	const struct nlattr *a;
+	int rem;
+
+	for (a = attr, rem = len; rem > 0;
+	     a = nla_next(a, &rem)) {
+		int err = 0;
+
+		if (prev_port != -1) {
+			do_output(dp, skb_clone(skb, GFP_ATOMIC), prev_port);
+			prev_port = -1;
+		}
+
+		switch (nla_type(a)) {
+		case OVS_ACTION_ATTR_OUTPUT:
+			prev_port = nla_get_u32(a);
+			break;
+
+		case OVS_ACTION_ATTR_USERSPACE:
+			output_userspace(dp, skb, a);
+			break;
+
+		case OVS_ACTION_ATTR_PUSH_VLAN:
+			err = push_vlan(skb, nla_data(a));
+			if (unlikely(err)) /* skb already freed. */
+				return err;
+			break;
+
+		case OVS_ACTION_ATTR_POP_VLAN:
+			err = pop_vlan(skb);
+			break;
+
+		case OVS_ACTION_ATTR_SET:
+			err = execute_set_action(skb, nla_data(a));
+			break;
+
+		case OVS_ACTION_ATTR_SAMPLE:
+			err = sample(dp, skb, a);
+			break;
+		}
+
+		if (unlikely(err)) {
+			kfree_skb(skb);
+			return err;
+		}
+	}
+
+	if (prev_port != -1) {
+		if (keep_skb)
+			skb = skb_clone(skb, GFP_ATOMIC);
+
+		do_output(dp, skb, prev_port);
+	} else if (!keep_skb)
+		consume_skb(skb);
+
+	return 0;
+}
+
+/* Execute a list of actions against 'skb'. */
+int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb)
+{
+	struct sw_flow_actions *acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts);
+
+	return do_execute_actions(dp, skb, acts->actions,
+					 acts->actions_len, false);
+}
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
new file mode 100644
index 0000000..9a27251
--- /dev/null
+++ b/net/openvswitch/datapath.c
@@ -0,0 +1,1912 @@
+/*
+ * Copyright (c) 2007-2011 Nicira Networks.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/if_arp.h>
+#include <linux/if_vlan.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/jhash.h>
+#include <linux/delay.h>
+#include <linux/time.h>
+#include <linux/etherdevice.h>
+#include <linux/genetlink.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/mutex.h>
+#include <linux/percpu.h>
+#include <linux/rcupdate.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/version.h>
+#include <linux/ethtool.h>
+#include <linux/wait.h>
+#include <asm/system.h>
+#include <asm/div64.h>
+#include <linux/highmem.h>
+#include <linux/netfilter_bridge.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/inetdevice.h>
+#include <linux/list.h>
+#include <linux/openvswitch.h>
+#include <linux/rculist.h>
+#include <linux/dmi.h>
+#include <linux/workqueue.h>
+#include <net/genetlink.h>
+
+#include "datapath.h"
+#include "flow.h"
+#include "vport-internal_dev.h"
+
+/**
+ * DOC: Locking:
+ *
+ * Writes to device state (add/remove datapath, port, set operations on vports,
+ * etc.) are protected by RTNL.
+ *
+ * Writes to other state (flow table modifications, set miscellaneous datapath
+ * parameters, etc.) are protected by genl_mutex.  The RTNL lock nests inside
+ * genl_mutex.
+ *
+ * Reads are protected by RCU.
+ *
+ * There are a few special cases (mostly stats) that have their own
+ * synchronization but they nest under all of above and don't interact with
+ * each other.
+ */
+
+/* Global list of datapaths to enable dumping them all out.
+ * Protected by genl_mutex.
+ */
+static LIST_HEAD(dps);
+
+#define REHASH_FLOW_INTERVAL (10 * 60 * HZ)
+static void rehash_flow_table(struct work_struct *work);
+static DECLARE_DELAYED_WORK(rehash_flow_wq, rehash_flow_table);
+
+static struct vport *new_vport(const struct vport_parms *);
+static int queue_gso_packets(int dp_ifindex, struct sk_buff *,
+			     const struct dp_upcall_info *);
+static int queue_userspace_packet(int dp_ifindex, struct sk_buff *,
+				  const struct dp_upcall_info *);
+
+/* Must be called with rcu_read_lock, genl_mutex, or RTNL lock. */
+static struct datapath *get_dp(int dp_ifindex)
+{
+	struct datapath *dp = NULL;
+	struct net_device *dev;
+
+	rcu_read_lock();
+	dev = dev_get_by_index_rcu(&init_net, dp_ifindex);
+	if (dev) {
+		struct vport *vport = ovs_internal_dev_get_vport(dev);
+		if (vport)
+			dp = vport->dp;
+	}
+	rcu_read_unlock();
+
+	return dp;
+}
+
+/* Must be called with rcu_read_lock or RTNL lock. */
+const char *ovs_dp_name(const struct datapath *dp)
+{
+	struct vport *vport = rcu_dereference_rtnl(dp->ports[OVSP_LOCAL]);
+	return vport->ops->get_name(vport);
+}
+
+static int get_dpifindex(struct datapath *dp)
+{
+	struct vport *local;
+	int ifindex;
+
+	rcu_read_lock();
+
+	local = rcu_dereference(dp->ports[OVSP_LOCAL]);
+	if (local)
+		ifindex = local->ops->get_ifindex(local);
+	else
+		ifindex = 0;
+
+	rcu_read_unlock();
+
+	return ifindex;
+}
+
+static void destroy_dp_rcu(struct rcu_head *rcu)
+{
+	struct datapath *dp = container_of(rcu, struct datapath, rcu);
+
+	ovs_flow_tbl_destroy((__force struct flow_table *)dp->table);
+	free_percpu(dp->stats_percpu);
+	kfree(dp);
+}
+
+/* Called with RTNL lock and genl_lock. */
+static struct vport *new_vport(const struct vport_parms *parms)
+{
+	struct vport *vport;
+
+	vport = ovs_vport_add(parms);
+	if (!IS_ERR(vport)) {
+		struct datapath *dp = parms->dp;
+
+		rcu_assign_pointer(dp->ports[parms->port_no], vport);
+		list_add(&vport->node, &dp->port_list);
+	}
+
+	return vport;
+}
+
+/* Called with RTNL lock. */
+void ovs_dp_detach_port(struct vport *p)
+{
+	ASSERT_RTNL();
+
+	/* First drop references to device. */
+	list_del(&p->node);
+	rcu_assign_pointer(p->dp->ports[p->port_no], NULL);
+
+	/* Then destroy it. */
+	ovs_vport_del(p);
+}
+
+/* Must be called with rcu_read_lock. */
+void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
+{
+	struct datapath *dp = p->dp;
+	struct sw_flow *flow;
+	struct dp_stats_percpu *stats;
+	struct sw_flow_key key;
+	u64 *stats_counter;
+	int error;
+	int key_len;
+
+	stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
+
+	/* Extract flow from 'skb' into 'key'. */
+	error = ovs_flow_extract(skb, p->port_no, &key, &key_len);
+	if (unlikely(error)) {
+		kfree_skb(skb);
+		return;
+	}
+
+	/* Look up flow. */
+	flow = ovs_flow_tbl_lookup(rcu_dereference(dp->table), &key, key_len);
+	if (unlikely(!flow)) {
+		struct dp_upcall_info upcall;
+
+		upcall.cmd = OVS_PACKET_CMD_MISS;
+		upcall.key = &key;
+		upcall.userdata = NULL;
+		upcall.pid = p->upcall_pid;
+		ovs_dp_upcall(dp, skb, &upcall);
+		consume_skb(skb);
+		stats_counter = &stats->n_missed;
+		goto out;
+	}
+
+	OVS_CB(skb)->flow = flow;
+
+	stats_counter = &stats->n_hit;
+	ovs_flow_used(OVS_CB(skb)->flow, skb);
+	ovs_execute_actions(dp, skb);
+
+out:
+	/* Update datapath statistics. */
+	u64_stats_update_begin(&stats->sync);
+	(*stats_counter)++;
+	u64_stats_update_end(&stats->sync);
+}
+
+static struct genl_family dp_packet_genl_family = {
+	.id = GENL_ID_GENERATE,
+	.hdrsize = sizeof(struct ovs_header),
+	.name = OVS_PACKET_FAMILY,
+	.version = OVS_PACKET_VERSION,
+	.maxattr = OVS_PACKET_ATTR_MAX
+};
+
+int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
+	      const struct dp_upcall_info *upcall_info)
+{
+	struct dp_stats_percpu *stats;
+	int dp_ifindex;
+	int err;
+
+	if (upcall_info->pid == 0) {
+		err = -ENOTCONN;
+		goto err;
+	}
+
+	dp_ifindex = get_dpifindex(dp);
+	if (!dp_ifindex) {
+		err = -ENODEV;
+		goto err;
+	}
+
+	if (!skb_is_gso(skb))
+		err = queue_userspace_packet(dp_ifindex, skb, upcall_info);
+	else
+		err = queue_gso_packets(dp_ifindex, skb, upcall_info);
+	if (err)
+		goto err;
+
+	return 0;
+
+err:
+	stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
+
+	u64_stats_update_begin(&stats->sync);
+	stats->n_lost++;
+	u64_stats_update_end(&stats->sync);
+
+	return err;
+}
+
+static int queue_gso_packets(int dp_ifindex, struct sk_buff *skb,
+			     const struct dp_upcall_info *upcall_info)
+{
+	struct dp_upcall_info later_info;
+	struct sw_flow_key later_key;
+	struct sk_buff *segs, *nskb;
+	int err;
+
+	segs = skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	/* Queue all of the segments. */
+	skb = segs;
+	do {
+		err = queue_userspace_packet(dp_ifindex, skb, upcall_info);
+		if (err)
+			break;
+
+		if (skb == segs && skb_shinfo(skb)->gso_type & SKB_GSO_UDP) {
+			/* The initial flow key extracted by ovs_flow_extract()
+			 * in this case is for a first fragment, so we need to
+			 * properly mark later fragments.
+			 */
+			later_key = *upcall_info->key;
+			later_key.ip.frag = OVS_FRAG_TYPE_LATER;
+
+			later_info = *upcall_info;
+			later_info.key = &later_key;
+			upcall_info = &later_info;
+		}
+	} while ((skb = skb->next));
+
+	/* Free all of the segments. */
+	skb = segs;
+	do {
+		nskb = skb->next;
+		if (err)
+			kfree_skb(skb);
+		else
+			consume_skb(skb);
+	} while ((skb = nskb));
+	return err;
+}
+
+static int queue_userspace_packet(int dp_ifindex, struct sk_buff *skb,
+				  const struct dp_upcall_info *upcall_info)
+{
+	struct ovs_header *upcall;
+	struct sk_buff *nskb = NULL;
+	struct sk_buff *user_skb; /* to be queued to userspace */
+	struct nlattr *nla;
+	unsigned int len;
+	int err;
+
+	if (vlan_tx_tag_present(skb)) {
+		nskb = skb_clone(skb, GFP_ATOMIC);
+		if (!nskb)
+			return -ENOMEM;
+
+		nskb = __vlan_put_tag(nskb, vlan_tx_tag_get(nskb));
+		if (!skb)
+			return -ENOMEM;
+
+		nskb->vlan_tci = 0;
+		skb = nskb;
+	}
+
+	if (nla_attr_size(skb->len) > USHRT_MAX) {
+		err = -EFBIG;
+		goto out;
+	}
+
+	len = sizeof(struct ovs_header);
+	len += nla_total_size(skb->len);
+	len += nla_total_size(FLOW_BUFSIZE);
+	if (upcall_info->cmd == OVS_PACKET_CMD_ACTION)
+		len += nla_total_size(8);
+
+	user_skb = genlmsg_new(len, GFP_ATOMIC);
+	if (!user_skb) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family,
+			     0, upcall_info->cmd);
+	upcall->dp_ifindex = dp_ifindex;
+
+	nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY);
+	ovs_flow_to_nlattrs(upcall_info->key, user_skb);
+	nla_nest_end(user_skb, nla);
+
+	if (upcall_info->userdata)
+		nla_put_u64(user_skb, OVS_PACKET_ATTR_USERDATA,
+			    nla_get_u64(upcall_info->userdata));
+
+	nla = __nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, skb->len);
+
+	skb_copy_and_csum_dev(skb, nla_data(nla));
+
+	err = genlmsg_unicast(&init_net, user_skb, upcall_info->pid);
+
+out:
+	kfree_skb(nskb);
+	return err;
+}
+
+/* Called with genl_mutex. */
+static int flush_flows(int dp_ifindex)
+{
+	struct flow_table *old_table;
+	struct flow_table *new_table;
+	struct datapath *dp;
+
+	dp = get_dp(dp_ifindex);
+	if (!dp)
+		return -ENODEV;
+
+	old_table = genl_dereference(dp->table);
+	new_table = ovs_flow_tbl_alloc(TBL_MIN_BUCKETS);
+	if (!new_table)
+		return -ENOMEM;
+
+	rcu_assign_pointer(dp->table, new_table);
+
+	ovs_flow_tbl_deferred_destroy(old_table);
+	return 0;
+}
+
+static int validate_actions(const struct nlattr *attr,
+				const struct sw_flow_key *key, int depth);
+
+static int validate_sample(const struct nlattr *attr,
+				const struct sw_flow_key *key, int depth)
+{
+	const struct nlattr *attrs[OVS_SAMPLE_ATTR_MAX + 1];
+	const struct nlattr *probability, *actions;
+	const struct nlattr *a;
+	int rem;
+
+	memset(attrs, 0, sizeof(attrs));
+	nla_for_each_nested(a, attr, rem) {
+		int type = nla_type(a);
+		if (!type || type > OVS_SAMPLE_ATTR_MAX || attrs[type])
+			return -EINVAL;
+		attrs[type] = a;
+	}
+	if (rem)
+		return -EINVAL;
+
+	probability = attrs[OVS_SAMPLE_ATTR_PROBABILITY];
+	if (!probability || nla_len(probability) != sizeof(u32))
+		return -EINVAL;
+
+	actions = attrs[OVS_SAMPLE_ATTR_ACTIONS];
+	if (!actions || (nla_len(actions) && nla_len(actions) < NLA_HDRLEN))
+		return -EINVAL;
+	return validate_actions(actions, key, depth + 1);
+}
+
+static int validate_set(const struct nlattr *a,
+			const struct sw_flow_key *flow_key)
+{
+	const struct nlattr *ovs_key = nla_data(a);
+	int key_type = nla_type(ovs_key);
+
+	/* There can be only one key in a action */
+	if (nla_total_size(nla_len(ovs_key)) != nla_len(a))
+		return -EINVAL;
+
+	if (key_type > OVS_KEY_ATTR_MAX ||
+	    nla_len(ovs_key) != ovs_key_lens[key_type])
+		return -EINVAL;
+
+	switch (key_type) {
+	const struct ovs_key_ipv4 *ipv4_key;
+
+	case OVS_KEY_ATTR_PRIORITY:
+	case OVS_KEY_ATTR_ETHERNET:
+		break;
+
+	case OVS_KEY_ATTR_IPV4:
+		if (flow_key->eth.type != htons(ETH_P_IP))
+			return -EINVAL;
+
+		if (!flow_key->ipv4.addr.src || !flow_key->ipv4.addr.dst)
+			return -EINVAL;
+
+		ipv4_key = nla_data(ovs_key);
+		if (ipv4_key->ipv4_proto != flow_key->ip.proto)
+			return -EINVAL;
+
+		if (ipv4_key->ipv4_frag != flow_key->ip.frag)
+			return -EINVAL;
+
+		break;
+
+	case OVS_KEY_ATTR_TCP:
+		if (flow_key->ip.proto != IPPROTO_TCP)
+			return -EINVAL;
+
+		if (!flow_key->ipv4.tp.src || !flow_key->ipv4.tp.dst)
+			return -EINVAL;
+
+		break;
+
+	case OVS_KEY_ATTR_UDP:
+		if (flow_key->ip.proto != IPPROTO_UDP)
+			return -EINVAL;
+
+		if (!flow_key->ipv4.tp.src || !flow_key->ipv4.tp.dst)
+			return -EINVAL;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int validate_userspace(const struct nlattr *attr)
+{
+	static const struct nla_policy userspace_policy[OVS_USERSPACE_ATTR_MAX + 1] =	{
+		[OVS_USERSPACE_ATTR_PID] = {.type = NLA_U32 },
+		[OVS_USERSPACE_ATTR_USERDATA] = {.type = NLA_U64 },
+	};
+	struct nlattr *a[OVS_USERSPACE_ATTR_MAX + 1];
+	int error;
+
+	error = nla_parse_nested(a, OVS_USERSPACE_ATTR_MAX,
+				 attr, userspace_policy);
+	if (error)
+		return error;
+
+	if (!a[OVS_USERSPACE_ATTR_PID] ||
+	    !nla_get_u32(a[OVS_USERSPACE_ATTR_PID]))
+		return -EINVAL;
+
+	return 0;
+}
+
+static int validate_actions(const struct nlattr *attr,
+				const struct sw_flow_key *key,  int depth)
+{
+	const struct nlattr *a;
+	int rem, err;
+
+	if (depth >= SAMPLE_ACTION_DEPTH)
+		return -EOVERFLOW;
+
+	nla_for_each_nested(a, attr, rem) {
+		/* Expected argument lengths, (u32)-1 for variable length. */
+		static const u32 action_lens[OVS_ACTION_ATTR_MAX + 1] = {
+			[OVS_ACTION_ATTR_OUTPUT] = sizeof(u32),
+			[OVS_ACTION_ATTR_USERSPACE] = (u32)-1,
+			[OVS_ACTION_ATTR_PUSH_VLAN] = sizeof(struct ovs_action_push_vlan),
+			[OVS_ACTION_ATTR_POP_VLAN] = 0,
+			[OVS_ACTION_ATTR_SET] = (u32)-1,
+			[OVS_ACTION_ATTR_SAMPLE] = (u32)-1
+		};
+		const struct ovs_action_push_vlan *vlan;
+		int type = nla_type(a);
+
+		if (type > OVS_ACTION_ATTR_MAX ||
+		    (action_lens[type] != nla_len(a) &&
+		     action_lens[type] != (u32)-1))
+			return -EINVAL;
+
+		switch (type) {
+		case OVS_ACTION_ATTR_UNSPEC:
+			return -EINVAL;
+
+		case OVS_ACTION_ATTR_USERSPACE:
+			err = validate_userspace(a);
+			if (err)
+				return err;
+			break;
+
+		case OVS_ACTION_ATTR_OUTPUT:
+			if (nla_get_u32(a) >= DP_MAX_PORTS)
+				return -EINVAL;
+			break;
+
+
+		case OVS_ACTION_ATTR_POP_VLAN:
+			break;
+
+		case OVS_ACTION_ATTR_PUSH_VLAN:
+			vlan = nla_data(a);
+			if (vlan->vlan_tpid != htons(ETH_P_8021Q))
+				return -EINVAL;
+			if (!(vlan->vlan_tci & htons(VLAN_TAG_PRESENT)))
+				return -EINVAL;
+			break;
+
+		case OVS_ACTION_ATTR_SET:
+			err = validate_set(a, key);
+			if (err)
+				return err;
+			break;
+
+		case OVS_ACTION_ATTR_SAMPLE:
+			err = validate_sample(a, key, depth);
+			if (err)
+				return err;
+			break;
+
+		default:
+			return -EINVAL;
+		}
+	}
+
+	if (rem > 0)
+		return -EINVAL;
+
+	return 0;
+}
+
+static void clear_stats(struct sw_flow *flow)
+{
+	flow->used = 0;
+	flow->tcp_flags = 0;
+	flow->packet_count = 0;
+	flow->byte_count = 0;
+}
+
+static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
+{
+	struct ovs_header *ovs_header = info->userhdr;
+	struct nlattr **a = info->attrs;
+	struct sw_flow_actions *acts;
+	struct sk_buff *packet;
+	struct sw_flow *flow;
+	struct datapath *dp;
+	struct ethhdr *eth;
+	int len;
+	int err;
+	int key_len;
+
+	err = -EINVAL;
+	if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
+	    !a[OVS_PACKET_ATTR_ACTIONS] ||
+	    nla_len(a[OVS_PACKET_ATTR_PACKET]) < ETH_HLEN)
+		goto err;
+
+	len = nla_len(a[OVS_PACKET_ATTR_PACKET]);
+	packet = __dev_alloc_skb(NET_IP_ALIGN + len, GFP_KERNEL);
+	err = -ENOMEM;
+	if (!packet)
+		goto err;
+	skb_reserve(packet, NET_IP_ALIGN);
+
+	memcpy(__skb_put(packet, len), nla_data(a[OVS_PACKET_ATTR_PACKET]), len);
+
+	skb_reset_mac_header(packet);
+	eth = eth_hdr(packet);
+
+	/* Normally, setting the skb 'protocol' field would be handled by a
+	 * call to eth_type_trans(), but it assumes there's a sending
+	 * device, which we may not have. */
+	if (ntohs(eth->h_proto) >= 1536)
+		packet->protocol = eth->h_proto;
+	else
+		packet->protocol = htons(ETH_P_802_2);
+
+	/* Build an sw_flow for sending this packet. */
+	flow = ovs_flow_alloc();
+	err = PTR_ERR(flow);
+	if (IS_ERR(flow))
+		goto err_kfree_skb;
+
+	err = ovs_flow_extract(packet, -1, &flow->key, &key_len);
+	if (err)
+		goto err_flow_free;
+
+	err = ovs_flow_metadata_from_nlattrs(&flow->key.phy.priority,
+					     &flow->key.phy.in_port,
+					     a[OVS_PACKET_ATTR_KEY]);
+	if (err)
+		goto err_flow_free;
+
+	err = validate_actions(a[OVS_PACKET_ATTR_ACTIONS], &flow->key, 0);
+	if (err)
+		goto err_flow_free;
+
+	flow->hash = ovs_flow_hash(&flow->key, key_len);
+
+	acts = ovs_flow_actions_alloc(a[OVS_PACKET_ATTR_ACTIONS]);
+	err = PTR_ERR(acts);
+	if (IS_ERR(acts))
+		goto err_flow_free;
+	rcu_assign_pointer(flow->sf_acts, acts);
+
+	OVS_CB(packet)->flow = flow;
+	packet->priority = flow->key.phy.priority;
+
+	rcu_read_lock();
+	dp = get_dp(ovs_header->dp_ifindex);
+	err = -ENODEV;
+	if (!dp)
+		goto err_unlock;
+
+	local_bh_disable();
+	err = ovs_execute_actions(dp, packet);
+	local_bh_enable();
+	rcu_read_unlock();
+
+	ovs_flow_free(flow);
+	return err;
+
+err_unlock:
+	rcu_read_unlock();
+err_flow_free:
+	ovs_flow_free(flow);
+err_kfree_skb:
+	kfree_skb(packet);
+err:
+	return err;
+}
+
+static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
+	[OVS_PACKET_ATTR_PACKET] = { .type = NLA_UNSPEC },
+	[OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
+	[OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
+};
+
+static struct genl_ops dp_packet_genl_ops[] = {
+	{ .cmd = OVS_PACKET_CMD_EXECUTE,
+	  .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
+	  .policy = packet_policy,
+	  .doit = ovs_packet_cmd_execute
+	}
+};
+
+static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats)
+{
+	int i;
+	struct flow_table *table = genl_dereference(dp->table);
+
+	stats->n_flows = ovs_flow_tbl_count(table);
+
+	stats->n_hit = stats->n_missed = stats->n_lost = 0;
+	for_each_possible_cpu(i) {
+		const struct dp_stats_percpu *percpu_stats;
+		struct dp_stats_percpu local_stats;
+		unsigned int start;
+
+		percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
+
+		do {
+			start = u64_stats_fetch_begin_bh(&percpu_stats->sync);
+			local_stats = *percpu_stats;
+		} while (u64_stats_fetch_retry_bh(&percpu_stats->sync, start));
+
+		stats->n_hit += local_stats.n_hit;
+		stats->n_missed += local_stats.n_missed;
+		stats->n_lost += local_stats.n_lost;
+	}
+}
+
+static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = {
+	[OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
+	[OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
+	[OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
+};
+
+static struct genl_family dp_flow_genl_family = {
+	.id = GENL_ID_GENERATE,
+	.hdrsize = sizeof(struct ovs_header),
+	.name = OVS_FLOW_FAMILY,
+	.version = OVS_FLOW_VERSION,
+	.maxattr = OVS_FLOW_ATTR_MAX
+};
+
+static struct genl_multicast_group ovs_dp_flow_multicast_group = {
+	.name = OVS_FLOW_MCGROUP
+};
+
+/* Called with genl_lock. */
+static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
+				  struct sk_buff *skb, u32 pid,
+				  u32 seq, u32 flags, u8 cmd)
+{
+	const int skb_orig_len = skb->len;
+	const struct sw_flow_actions *sf_acts;
+	struct ovs_flow_stats stats;
+	struct ovs_header *ovs_header;
+	struct nlattr *nla;
+	unsigned long used;
+	u8 tcp_flags;
+	int err;
+
+	sf_acts = rcu_dereference_protected(flow->sf_acts,
+					    lockdep_genl_is_held());
+
+	ovs_header = genlmsg_put(skb, pid, seq, &dp_flow_genl_family, flags, cmd);
+	if (!ovs_header)
+		return -EMSGSIZE;
+
+	ovs_header->dp_ifindex = get_dpifindex(dp);
+
+	nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY);
+	if (!nla)
+		goto nla_put_failure;
+	err = ovs_flow_to_nlattrs(&flow->key, skb);
+	if (err)
+		goto error;
+	nla_nest_end(skb, nla);
+
+	spin_lock_bh(&flow->lock);
+	used = flow->used;
+	stats.n_packets = flow->packet_count;
+	stats.n_bytes = flow->byte_count;
+	tcp_flags = flow->tcp_flags;
+	spin_unlock_bh(&flow->lock);
+
+	if (used)
+		NLA_PUT_U64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used));
+
+	if (stats.n_packets)
+		NLA_PUT(skb, OVS_FLOW_ATTR_STATS,
+			sizeof(struct ovs_flow_stats), &stats);
+
+	if (tcp_flags)
+		NLA_PUT_U8(skb, OVS_FLOW_ATTR_TCP_FLAGS, tcp_flags);
+
+	/* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
+	 * this is the first flow to be dumped into 'skb'.  This is unusual for
+	 * Netlink but individual action lists can be longer than
+	 * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this.
+	 * The userspace caller can always fetch the actions separately if it
+	 * really wants them.  (Most userspace callers in fact don't care.)
+	 *
+	 * This can only fail for dump operations because the skb is always
+	 * properly sized for single flows.
+	 */
+	err = nla_put(skb, OVS_FLOW_ATTR_ACTIONS, sf_acts->actions_len,
+		      sf_acts->actions);
+	if (err < 0 && skb_orig_len)
+		goto error;
+
+	return genlmsg_end(skb, ovs_header);
+
+nla_put_failure:
+	err = -EMSGSIZE;
+error:
+	genlmsg_cancel(skb, ovs_header);
+	return err;
+}
+
+static struct sk_buff *ovs_flow_cmd_alloc_info(struct sw_flow *flow)
+{
+	const struct sw_flow_actions *sf_acts;
+	int len;
+
+	sf_acts = rcu_dereference_protected(flow->sf_acts,
+					    lockdep_genl_is_held());
+
+	/* OVS_FLOW_ATTR_KEY */
+	len = nla_total_size(FLOW_BUFSIZE);
+	/* OVS_FLOW_ATTR_ACTIONS */
+	len += nla_total_size(sf_acts->actions_len);
+	/* OVS_FLOW_ATTR_STATS */
+	len += nla_total_size(sizeof(struct ovs_flow_stats));
+	/* OVS_FLOW_ATTR_TCP_FLAGS */
+	len += nla_total_size(1);
+	/* OVS_FLOW_ATTR_USED */
+	len += nla_total_size(8);
+
+	len += NLMSG_ALIGN(sizeof(struct ovs_header));
+
+	return genlmsg_new(len, GFP_KERNEL);
+}
+
+static struct sk_buff *ovs_flow_cmd_build_info(struct sw_flow *flow,
+					       struct datapath *dp,
+					       u32 pid, u32 seq, u8 cmd)
+{
+	struct sk_buff *skb;
+	int retval;
+
+	skb = ovs_flow_cmd_alloc_info(flow);
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	retval = ovs_flow_cmd_fill_info(flow, dp, skb, pid, seq, 0, cmd);
+	BUG_ON(retval < 0);
+	return skb;
+}
+
+static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
+{
+	struct nlattr **a = info->attrs;
+	struct ovs_header *ovs_header = info->userhdr;
+	struct sw_flow_key key;
+	struct sw_flow *flow;
+	struct sk_buff *reply;
+	struct datapath *dp;
+	struct flow_table *table;
+	int error;
+	int key_len;
+
+	/* Extract key. */
+	error = -EINVAL;
+	if (!a[OVS_FLOW_ATTR_KEY])
+		goto error;
+	error = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
+	if (error)
+		goto error;
+
+	/* Validate actions. */
+	if (a[OVS_FLOW_ATTR_ACTIONS]) {
+		error = validate_actions(a[OVS_FLOW_ATTR_ACTIONS], &key,  0);
+		if (error)
+			goto error;
+	} else if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW) {
+		error = -EINVAL;
+		goto error;
+	}
+
+	dp = get_dp(ovs_header->dp_ifindex);
+	error = -ENODEV;
+	if (!dp)
+		goto error;
+
+	table = genl_dereference(dp->table);
+	flow = ovs_flow_tbl_lookup(table, &key, key_len);
+	if (!flow) {
+		struct sw_flow_actions *acts;
+
+		/* Bail out if we're not allowed to create a new flow. */
+		error = -ENOENT;
+		if (info->genlhdr->cmd == OVS_FLOW_CMD_SET)
+			goto error;
+
+		/* Expand table, if necessary, to make room. */
+		if (ovs_flow_tbl_need_to_expand(table)) {
+			struct flow_table *new_table;
+
+			new_table = ovs_flow_tbl_expand(table);
+			if (!IS_ERR(new_table)) {
+				rcu_assign_pointer(dp->table, new_table);
+				ovs_flow_tbl_deferred_destroy(table);
+				table = genl_dereference(dp->table);
+			}
+		}
+
+		/* Allocate flow. */
+		flow = ovs_flow_alloc();
+		if (IS_ERR(flow)) {
+			error = PTR_ERR(flow);
+			goto error;
+		}
+		flow->key = key;
+		clear_stats(flow);
+
+		/* Obtain actions. */
+		acts = ovs_flow_actions_alloc(a[OVS_FLOW_ATTR_ACTIONS]);
+		error = PTR_ERR(acts);
+		if (IS_ERR(acts))
+			goto error_free_flow;
+		rcu_assign_pointer(flow->sf_acts, acts);
+
+		/* Put flow in bucket. */
+		flow->hash = ovs_flow_hash(&key, key_len);
+		ovs_flow_tbl_insert(table, flow);
+
+		reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid,
+						info->snd_seq,
+						OVS_FLOW_CMD_NEW);
+	} else {
+		/* We found a matching flow. */
+		struct sw_flow_actions *old_acts;
+		struct nlattr *acts_attrs;
+
+		/* Bail out if we're not allowed to modify an existing flow.
+		 * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
+		 * because Generic Netlink treats the latter as a dump
+		 * request.  We also accept NLM_F_EXCL in case that bug ever
+		 * gets fixed.
+		 */
+		error = -EEXIST;
+		if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW &&
+		    info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL))
+			goto error;
+
+		/* Update actions. */
+		old_acts = rcu_dereference_protected(flow->sf_acts,
+						     lockdep_genl_is_held());
+		acts_attrs = a[OVS_FLOW_ATTR_ACTIONS];
+		if (acts_attrs &&
+		   (old_acts->actions_len != nla_len(acts_attrs) ||
+		   memcmp(old_acts->actions, nla_data(acts_attrs),
+			  old_acts->actions_len))) {
+			struct sw_flow_actions *new_acts;
+
+			new_acts = ovs_flow_actions_alloc(acts_attrs);
+			error = PTR_ERR(new_acts);
+			if (IS_ERR(new_acts))
+				goto error;
+
+			rcu_assign_pointer(flow->sf_acts, new_acts);
+			ovs_flow_deferred_free_acts(old_acts);
+		}
+
+		reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid,
+					       info->snd_seq, OVS_FLOW_CMD_NEW);
+
+		/* Clear stats. */
+		if (a[OVS_FLOW_ATTR_CLEAR]) {
+			spin_lock_bh(&flow->lock);
+			clear_stats(flow);
+			spin_unlock_bh(&flow->lock);
+		}
+	}
+
+	if (!IS_ERR(reply))
+		genl_notify(reply, genl_info_net(info), info->snd_pid,
+			   ovs_dp_flow_multicast_group.id, info->nlhdr,
+			   GFP_KERNEL);
+	else
+		netlink_set_err(init_net.genl_sock, 0,
+				ovs_dp_flow_multicast_group.id, PTR_ERR(reply));
+	return 0;
+
+error_free_flow:
+	ovs_flow_free(flow);
+error:
+	return error;
+}
+
+static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
+{
+	struct nlattr **a = info->attrs;
+	struct ovs_header *ovs_header = info->userhdr;
+	struct sw_flow_key key;
+	struct sk_buff *reply;
+	struct sw_flow *flow;
+	struct datapath *dp;
+	struct flow_table *table;
+	int err;
+	int key_len;
+
+	if (!a[OVS_FLOW_ATTR_KEY])
+		return -EINVAL;
+	err = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
+	if (err)
+		return err;
+
+	dp = get_dp(ovs_header->dp_ifindex);
+	if (!dp)
+		return -ENODEV;
+
+	table = genl_dereference(dp->table);
+	flow = ovs_flow_tbl_lookup(table, &key, key_len);
+	if (!flow)
+		return -ENOENT;
+
+	reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid,
+					info->snd_seq, OVS_FLOW_CMD_NEW);
+	if (IS_ERR(reply))
+		return PTR_ERR(reply);
+
+	return genlmsg_reply(reply, info);
+}
+
+static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
+{
+	struct nlattr **a = info->attrs;
+	struct ovs_header *ovs_header = info->userhdr;
+	struct sw_flow_key key;
+	struct sk_buff *reply;
+	struct sw_flow *flow;
+	struct datapath *dp;
+	struct flow_table *table;
+	int err;
+	int key_len;
+
+	if (!a[OVS_FLOW_ATTR_KEY])
+		return flush_flows(ovs_header->dp_ifindex);
+	err = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
+	if (err)
+		return err;
+
+	dp = get_dp(ovs_header->dp_ifindex);
+	if (!dp)
+		return -ENODEV;
+
+	table = genl_dereference(dp->table);
+	flow = ovs_flow_tbl_lookup(table, &key, key_len);
+	if (!flow)
+		return -ENOENT;
+
+	reply = ovs_flow_cmd_alloc_info(flow);
+	if (!reply)
+		return -ENOMEM;
+
+	ovs_flow_tbl_remove(table, flow);
+
+	err = ovs_flow_cmd_fill_info(flow, dp, reply, info->snd_pid,
+				     info->snd_seq, 0, OVS_FLOW_CMD_DEL);
+	BUG_ON(err < 0);
+
+	ovs_flow_deferred_free(flow);
+
+	genl_notify(reply, genl_info_net(info), info->snd_pid,
+		    ovs_dp_flow_multicast_group.id, info->nlhdr, GFP_KERNEL);
+	return 0;
+}
+
+static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+	struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
+	struct datapath *dp;
+	struct flow_table *table;
+
+	dp = get_dp(ovs_header->dp_ifindex);
+	if (!dp)
+		return -ENODEV;
+
+	table = genl_dereference(dp->table);
+
+	for (;;) {
+		struct sw_flow *flow;
+		u32 bucket, obj;
+
+		bucket = cb->args[0];
+		obj = cb->args[1];
+		flow = ovs_flow_tbl_next(table, &bucket, &obj);
+		if (!flow)
+			break;
+
+		if (ovs_flow_cmd_fill_info(flow, dp, skb,
+					   NETLINK_CB(cb->skb).pid,
+					   cb->nlh->nlmsg_seq, NLM_F_MULTI,
+					   OVS_FLOW_CMD_NEW) < 0)
+			break;
+
+		cb->args[0] = bucket;
+		cb->args[1] = obj;
+	}
+	return skb->len;
+}
+
+static struct genl_ops dp_flow_genl_ops[] = {
+	{ .cmd = OVS_FLOW_CMD_NEW,
+	  .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
+	  .policy = flow_policy,
+	  .doit = ovs_flow_cmd_new_or_set
+	},
+	{ .cmd = OVS_FLOW_CMD_DEL,
+	  .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
+	  .policy = flow_policy,
+	  .doit = ovs_flow_cmd_del
+	},
+	{ .cmd = OVS_FLOW_CMD_GET,
+	  .flags = 0,		    /* OK for unprivileged users. */
+	  .policy = flow_policy,
+	  .doit = ovs_flow_cmd_get,
+	  .dumpit = ovs_flow_cmd_dump
+	},
+	{ .cmd = OVS_FLOW_CMD_SET,
+	  .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
+	  .policy = flow_policy,
+	  .doit = ovs_flow_cmd_new_or_set,
+	},
+};
+
+static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = {
+	[OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
+	[OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 },
+};
+
+static struct genl_family dp_datapath_genl_family = {
+	.id = GENL_ID_GENERATE,
+	.hdrsize = sizeof(struct ovs_header),
+	.name = OVS_DATAPATH_FAMILY,
+	.version = OVS_DATAPATH_VERSION,
+	.maxattr = OVS_DP_ATTR_MAX
+};
+
+static struct genl_multicast_group ovs_dp_datapath_multicast_group = {
+	.name = OVS_DATAPATH_MCGROUP
+};
+
+static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
+				u32 pid, u32 seq, u32 flags, u8 cmd)
+{
+	struct ovs_header *ovs_header;
+	struct ovs_dp_stats dp_stats;
+	int err;
+
+	ovs_header = genlmsg_put(skb, pid, seq, &dp_datapath_genl_family,
+				   flags, cmd);
+	if (!ovs_header)
+		goto error;
+
+	ovs_header->dp_ifindex = get_dpifindex(dp);
+
+	rcu_read_lock();
+	err = nla_put_string(skb, OVS_DP_ATTR_NAME, ovs_dp_name(dp));
+	rcu_read_unlock();
+	if (err)
+		goto nla_put_failure;
+
+	get_dp_stats(dp, &dp_stats);
+	NLA_PUT(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats), &dp_stats);
+
+	return genlmsg_end(skb, ovs_header);
+
+nla_put_failure:
+	genlmsg_cancel(skb, ovs_header);
+error:
+	return -EMSGSIZE;
+}
+
+static struct sk_buff *ovs_dp_cmd_build_info(struct datapath *dp, u32 pid,
+					     u32 seq, u8 cmd)
+{
+	struct sk_buff *skb;
+	int retval;
+
+	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	retval = ovs_dp_cmd_fill_info(dp, skb, pid, seq, 0, cmd);
+	if (retval < 0) {
+		kfree_skb(skb);
+		return ERR_PTR(retval);
+	}
+	return skb;
+}
+
+/* Called with genl_mutex and optionally with RTNL lock also. */
+static struct datapath *lookup_datapath(struct ovs_header *ovs_header,
+					struct nlattr *a[OVS_DP_ATTR_MAX + 1])
+{
+	struct datapath *dp;
+
+	if (!a[OVS_DP_ATTR_NAME])
+		dp = get_dp(ovs_header->dp_ifindex);
+	else {
+		struct vport *vport;
+
+		rcu_read_lock();
+		vport = ovs_vport_locate(nla_data(a[OVS_DP_ATTR_NAME]));
+		dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL;
+		rcu_read_unlock();
+	}
+	return dp ? dp : ERR_PTR(-ENODEV);
+}
+
+static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
+{
+	struct nlattr **a = info->attrs;
+	struct vport_parms parms;
+	struct sk_buff *reply;
+	struct datapath *dp;
+	struct vport *vport;
+	int err;
+
+	err = -EINVAL;
+	if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID])
+		goto err;
+
+	rtnl_lock();
+	err = -ENODEV;
+	if (!try_module_get(THIS_MODULE))
+		goto err_unlock_rtnl;
+
+	err = -ENOMEM;
+	dp = kzalloc(sizeof(*dp), GFP_KERNEL);
+	if (dp == NULL)
+		goto err_put_module;
+	INIT_LIST_HEAD(&dp->port_list);
+
+	/* Allocate table. */
+	err = -ENOMEM;
+	rcu_assign_pointer(dp->table, ovs_flow_tbl_alloc(TBL_MIN_BUCKETS));
+	if (!dp->table)
+		goto err_free_dp;
+
+	dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
+	if (!dp->stats_percpu) {
+		err = -ENOMEM;
+		goto err_destroy_table;
+	}
+
+	/* Set up our datapath device. */
+	parms.name = nla_data(a[OVS_DP_ATTR_NAME]);
+	parms.type = OVS_VPORT_TYPE_INTERNAL;
+	parms.options = NULL;
+	parms.dp = dp;
+	parms.port_no = OVSP_LOCAL;
+	parms.upcall_pid = nla_get_u32(a[OVS_DP_ATTR_UPCALL_PID]);
+
+	vport = new_vport(&parms);
+	if (IS_ERR(vport)) {
+		err = PTR_ERR(vport);
+		if (err == -EBUSY)
+			err = -EEXIST;
+
+		goto err_destroy_percpu;
+	}
+
+	reply = ovs_dp_cmd_build_info(dp, info->snd_pid,
+				      info->snd_seq, OVS_DP_CMD_NEW);
+	err = PTR_ERR(reply);
+	if (IS_ERR(reply))
+		goto err_destroy_local_port;
+
+	list_add_tail(&dp->list_node, &dps);
+	rtnl_unlock();
+
+	genl_notify(reply, genl_info_net(info), info->snd_pid,
+		    ovs_dp_datapath_multicast_group.id, info->nlhdr,
+		    GFP_KERNEL);
+	return 0;
+
+err_destroy_local_port:
+	ovs_dp_detach_port(rtnl_dereference(dp->ports[OVSP_LOCAL]));
+err_destroy_percpu:
+	free_percpu(dp->stats_percpu);
+err_destroy_table:
+	ovs_flow_tbl_destroy(genl_dereference(dp->table));
+err_free_dp:
+	kfree(dp);
+err_put_module:
+	module_put(THIS_MODULE);
+err_unlock_rtnl:
+	rtnl_unlock();
+err:
+	return err;
+}
+
+static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
+{
+	struct vport *vport, *next_vport;
+	struct sk_buff *reply;
+	struct datapath *dp;
+	int err;
+
+	rtnl_lock();
+	dp = lookup_datapath(info->userhdr, info->attrs);
+	err = PTR_ERR(dp);
+	if (IS_ERR(dp))
+		goto exit_unlock;
+
+	reply = ovs_dp_cmd_build_info(dp, info->snd_pid,
+				      info->snd_seq, OVS_DP_CMD_DEL);
+	err = PTR_ERR(reply);
+	if (IS_ERR(reply))
+		goto exit_unlock;
+
+	list_for_each_entry_safe(vport, next_vport, &dp->port_list, node)
+		if (vport->port_no != OVSP_LOCAL)
+			ovs_dp_detach_port(vport);
+
+	list_del(&dp->list_node);
+	ovs_dp_detach_port(rtnl_dereference(dp->ports[OVSP_LOCAL]));
+
+	/* rtnl_unlock() will wait until all the references to devices that
+	 * are pending unregistration have been dropped.  We do it here to
+	 * ensure that any internal devices (which contain DP pointers) are
+	 * fully destroyed before freeing the datapath.
+	 */
+	rtnl_unlock();
+
+	call_rcu(&dp->rcu, destroy_dp_rcu);
+	module_put(THIS_MODULE);
+
+	genl_notify(reply, genl_info_net(info), info->snd_pid,
+		    ovs_dp_datapath_multicast_group.id, info->nlhdr,
+		    GFP_KERNEL);
+
+	return 0;
+
+exit_unlock:
+	rtnl_unlock();
+	return err;
+}
+
+static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
+{
+	struct sk_buff *reply;
+	struct datapath *dp;
+	int err;
+
+	dp = lookup_datapath(info->userhdr, info->attrs);
+	if (IS_ERR(dp))
+		return PTR_ERR(dp);
+
+	reply = ovs_dp_cmd_build_info(dp, info->snd_pid,
+				      info->snd_seq, OVS_DP_CMD_NEW);
+	if (IS_ERR(reply)) {
+		err = PTR_ERR(reply);
+		netlink_set_err(init_net.genl_sock, 0,
+				ovs_dp_datapath_multicast_group.id, err);
+		return 0;
+	}
+
+	genl_notify(reply, genl_info_net(info), info->snd_pid,
+		    ovs_dp_datapath_multicast_group.id, info->nlhdr,
+		    GFP_KERNEL);
+
+	return 0;
+}
+
+static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
+{
+	struct sk_buff *reply;
+	struct datapath *dp;
+
+	dp = lookup_datapath(info->userhdr, info->attrs);
+	if (IS_ERR(dp))
+		return PTR_ERR(dp);
+
+	reply = ovs_dp_cmd_build_info(dp, info->snd_pid,
+				      info->snd_seq, OVS_DP_CMD_NEW);
+	if (IS_ERR(reply))
+		return PTR_ERR(reply);
+
+	return genlmsg_reply(reply, info);
+}
+
+static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+	struct datapath *dp;
+	int skip = cb->args[0];
+	int i = 0;
+
+	list_for_each_entry(dp, &dps, list_node) {
+		if (i < skip)
+			continue;
+		if (ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).pid,
+					 cb->nlh->nlmsg_seq, NLM_F_MULTI,
+					 OVS_DP_CMD_NEW) < 0)
+			break;
+		i++;
+	}
+
+	cb->args[0] = i;
+
+	return skb->len;
+}
+
+static struct genl_ops dp_datapath_genl_ops[] = {
+	{ .cmd = OVS_DP_CMD_NEW,
+	  .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
+	  .policy = datapath_policy,
+	  .doit = ovs_dp_cmd_new
+	},
+	{ .cmd = OVS_DP_CMD_DEL,
+	  .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
+	  .policy = datapath_policy,
+	  .doit = ovs_dp_cmd_del
+	},
+	{ .cmd = OVS_DP_CMD_GET,
+	  .flags = 0,		    /* OK for unprivileged users. */
+	  .policy = datapath_policy,
+	  .doit = ovs_dp_cmd_get,
+	  .dumpit = ovs_dp_cmd_dump
+	},
+	{ .cmd = OVS_DP_CMD_SET,
+	  .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
+	  .policy = datapath_policy,
+	  .doit = ovs_dp_cmd_set,
+	},
+};
+
+static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
+	[OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
+	[OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },
+	[OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
+	[OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
+	[OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_U32 },
+	[OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
+};
+
+static struct genl_family dp_vport_genl_family = {
+	.id = GENL_ID_GENERATE,
+	.hdrsize = sizeof(struct ovs_header),
+	.name = OVS_VPORT_FAMILY,
+	.version = OVS_VPORT_VERSION,
+	.maxattr = OVS_VPORT_ATTR_MAX
+};
+
+struct genl_multicast_group ovs_dp_vport_multicast_group = {
+	.name = OVS_VPORT_MCGROUP
+};
+
+/* Called with RTNL lock or RCU read lock. */
+static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
+				   u32 pid, u32 seq, u32 flags, u8 cmd)
+{
+	struct ovs_header *ovs_header;
+	struct ovs_vport_stats vport_stats;
+	int err;
+
+	ovs_header = genlmsg_put(skb, pid, seq, &dp_vport_genl_family,
+				 flags, cmd);
+	if (!ovs_header)
+		return -EMSGSIZE;
+
+	ovs_header->dp_ifindex = get_dpifindex(vport->dp);
+
+	NLA_PUT_U32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no);
+	NLA_PUT_U32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type);
+	NLA_PUT_STRING(skb, OVS_VPORT_ATTR_NAME, vport->ops->get_name(vport));
+	NLA_PUT_U32(skb, OVS_VPORT_ATTR_UPCALL_PID, vport->upcall_pid);
+
+	ovs_vport_get_stats(vport, &vport_stats);
+	NLA_PUT(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats),
+		&vport_stats);
+
+	err = ovs_vport_get_options(vport, skb);
+	if (err == -EMSGSIZE)
+		goto error;
+
+	return genlmsg_end(skb, ovs_header);
+
+nla_put_failure:
+	err = -EMSGSIZE;
+error:
+	genlmsg_cancel(skb, ovs_header);
+	return err;
+}
+
+/* Called with RTNL lock or RCU read lock. */
+struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 pid,
+					 u32 seq, u8 cmd)
+{
+	struct sk_buff *skb;
+	int retval;
+
+	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	retval = ovs_vport_cmd_fill_info(vport, skb, pid, seq, 0, cmd);
+	if (retval < 0) {
+		kfree_skb(skb);
+		return ERR_PTR(retval);
+	}
+	return skb;
+}
+
+/* Called with RTNL lock or RCU read lock. */
+static struct vport *lookup_vport(struct ovs_header *ovs_header,
+				  struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
+{
+	struct datapath *dp;
+	struct vport *vport;
+
+	if (a[OVS_VPORT_ATTR_NAME]) {
+		vport = ovs_vport_locate(nla_data(a[OVS_VPORT_ATTR_NAME]));
+		if (!vport)
+			return ERR_PTR(-ENODEV);
+		return vport;
+	} else if (a[OVS_VPORT_ATTR_PORT_NO]) {
+		u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
+
+		if (port_no >= DP_MAX_PORTS)
+			return ERR_PTR(-EFBIG);
+
+		dp = get_dp(ovs_header->dp_ifindex);
+		if (!dp)
+			return ERR_PTR(-ENODEV);
+
+		vport = rcu_dereference_rtnl(dp->ports[port_no]);
+		if (!vport)
+			return ERR_PTR(-ENOENT);
+		return vport;
+	} else
+		return ERR_PTR(-EINVAL);
+}
+
+static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
+{
+	struct nlattr **a = info->attrs;
+	struct ovs_header *ovs_header = info->userhdr;
+	struct vport_parms parms;
+	struct sk_buff *reply;
+	struct vport *vport;
+	struct datapath *dp;
+	u32 port_no;
+	int err;
+
+	err = -EINVAL;
+	if (!a[OVS_VPORT_ATTR_NAME] || !a[OVS_VPORT_ATTR_TYPE] ||
+	    !a[OVS_VPORT_ATTR_UPCALL_PID])
+		goto exit;
+
+	rtnl_lock();
+	dp = get_dp(ovs_header->dp_ifindex);
+	err = -ENODEV;
+	if (!dp)
+		goto exit_unlock;
+
+	if (a[OVS_VPORT_ATTR_PORT_NO]) {
+		port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
+
+		err = -EFBIG;
+		if (port_no >= DP_MAX_PORTS)
+			goto exit_unlock;
+
+		vport = rtnl_dereference(dp->ports[port_no]);
+		err = -EBUSY;
+		if (vport)
+			goto exit_unlock;
+	} else {
+		for (port_no = 1; ; port_no++) {
+			if (port_no >= DP_MAX_PORTS) {
+				err = -EFBIG;
+				goto exit_unlock;
+			}
+			vport = rtnl_dereference(dp->ports[port_no]);
+			if (!vport)
+				break;
+		}
+	}
+
+	parms.name = nla_data(a[OVS_VPORT_ATTR_NAME]);
+	parms.type = nla_get_u32(a[OVS_VPORT_ATTR_TYPE]);
+	parms.options = a[OVS_VPORT_ATTR_OPTIONS];
+	parms.dp = dp;
+	parms.port_no = port_no;
+	parms.upcall_pid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
+
+	vport = new_vport(&parms);
+	err = PTR_ERR(vport);
+	if (IS_ERR(vport))
+		goto exit_unlock;
+
+	reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
+					 OVS_VPORT_CMD_NEW);
+	if (IS_ERR(reply)) {
+		err = PTR_ERR(reply);
+		ovs_dp_detach_port(vport);
+		goto exit_unlock;
+	}
+	genl_notify(reply, genl_info_net(info), info->snd_pid,
+		    ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
+
+exit_unlock:
+	rtnl_unlock();
+exit:
+	return err;
+}
+
+static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
+{
+	struct nlattr **a = info->attrs;
+	struct sk_buff *reply;
+	struct vport *vport;
+	int err;
+
+	rtnl_lock();
+	vport = lookup_vport(info->userhdr, a);
+	err = PTR_ERR(vport);
+	if (IS_ERR(vport))
+		goto exit_unlock;
+
+	err = 0;
+	if (a[OVS_VPORT_ATTR_TYPE] &&
+	    nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type)
+		err = -EINVAL;
+
+	if (!err && a[OVS_VPORT_ATTR_OPTIONS])
+		err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
+	if (!err && a[OVS_VPORT_ATTR_UPCALL_PID])
+		vport->upcall_pid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
+
+	reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
+					 OVS_VPORT_CMD_NEW);
+	if (IS_ERR(reply)) {
+		err = PTR_ERR(reply);
+		netlink_set_err(init_net.genl_sock, 0,
+				ovs_dp_vport_multicast_group.id, err);
+		return 0;
+	}
+
+	genl_notify(reply, genl_info_net(info), info->snd_pid,
+		    ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
+
+exit_unlock:
+	rtnl_unlock();
+	return err;
+}
+
+static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
+{
+	struct nlattr **a = info->attrs;
+	struct sk_buff *reply;
+	struct vport *vport;
+	int err;
+
+	rtnl_lock();
+	vport = lookup_vport(info->userhdr, a);
+	err = PTR_ERR(vport);
+	if (IS_ERR(vport))
+		goto exit_unlock;
+
+	if (vport->port_no == OVSP_LOCAL) {
+		err = -EINVAL;
+		goto exit_unlock;
+	}
+
+	reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
+					 OVS_VPORT_CMD_DEL);
+	err = PTR_ERR(reply);
+	if (IS_ERR(reply))
+		goto exit_unlock;
+
+	ovs_dp_detach_port(vport);
+
+	genl_notify(reply, genl_info_net(info), info->snd_pid,
+		    ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
+
+exit_unlock:
+	rtnl_unlock();
+	return err;
+}
+
+static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
+{
+	struct nlattr **a = info->attrs;
+	struct ovs_header *ovs_header = info->userhdr;
+	struct sk_buff *reply;
+	struct vport *vport;
+	int err;
+
+	rcu_read_lock();
+	vport = lookup_vport(ovs_header, a);
+	err = PTR_ERR(vport);
+	if (IS_ERR(vport))
+		goto exit_unlock;
+
+	reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
+					 OVS_VPORT_CMD_NEW);
+	err = PTR_ERR(reply);
+	if (IS_ERR(reply))
+		goto exit_unlock;
+
+	rcu_read_unlock();
+
+	return genlmsg_reply(reply, info);
+
+exit_unlock:
+	rcu_read_unlock();
+	return err;
+}
+
+static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+	struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
+	struct datapath *dp;
+	u32 port_no;
+	int retval;
+
+	dp = get_dp(ovs_header->dp_ifindex);
+	if (!dp)
+		return -ENODEV;
+
+	rcu_read_lock();
+	for (port_no = cb->args[0]; port_no < DP_MAX_PORTS; port_no++) {
+		struct vport *vport;
+
+		vport = rcu_dereference(dp->ports[port_no]);
+		if (!vport)
+			continue;
+
+		if (ovs_vport_cmd_fill_info(vport, skb, NETLINK_CB(cb->skb).pid,
+					    cb->nlh->nlmsg_seq, NLM_F_MULTI,
+					    OVS_VPORT_CMD_NEW) < 0)
+			break;
+	}
+	rcu_read_unlock();
+
+	cb->args[0] = port_no;
+	retval = skb->len;
+
+	return retval;
+}
+
+static void rehash_flow_table(struct work_struct *work)
+{
+	struct datapath *dp;
+
+	genl_lock();
+
+	list_for_each_entry(dp, &dps, list_node) {
+		struct flow_table *old_table = genl_dereference(dp->table);
+		struct flow_table *new_table;
+
+		new_table = ovs_flow_tbl_rehash(old_table);
+		if (!IS_ERR(new_table)) {
+			rcu_assign_pointer(dp->table, new_table);
+			ovs_flow_tbl_deferred_destroy(old_table);
+		}
+	}
+
+	genl_unlock();
+
+	schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL);
+}
+
+static struct genl_ops dp_vport_genl_ops[] = {
+	{ .cmd = OVS_VPORT_CMD_NEW,
+	  .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
+	  .policy = vport_policy,
+	  .doit = ovs_vport_cmd_new
+	},
+	{ .cmd = OVS_VPORT_CMD_DEL,
+	  .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
+	  .policy = vport_policy,
+	  .doit = ovs_vport_cmd_del
+	},
+	{ .cmd = OVS_VPORT_CMD_GET,
+	  .flags = 0,		    /* OK for unprivileged users. */
+	  .policy = vport_policy,
+	  .doit = ovs_vport_cmd_get,
+	  .dumpit = ovs_vport_cmd_dump
+	},
+	{ .cmd = OVS_VPORT_CMD_SET,
+	  .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
+	  .policy = vport_policy,
+	  .doit = ovs_vport_cmd_set,
+	},
+};
+
+struct genl_family_and_ops {
+	struct genl_family *family;
+	struct genl_ops *ops;
+	int n_ops;
+	struct genl_multicast_group *group;
+};
+
+static const struct genl_family_and_ops dp_genl_families[] = {
+	{ &dp_datapath_genl_family,
+	  dp_datapath_genl_ops, ARRAY_SIZE(dp_datapath_genl_ops),
+	  &ovs_dp_datapath_multicast_group },
+	{ &dp_vport_genl_family,
+	  dp_vport_genl_ops, ARRAY_SIZE(dp_vport_genl_ops),
+	  &ovs_dp_vport_multicast_group },
+	{ &dp_flow_genl_family,
+	  dp_flow_genl_ops, ARRAY_SIZE(dp_flow_genl_ops),
+	  &ovs_dp_flow_multicast_group },
+	{ &dp_packet_genl_family,
+	  dp_packet_genl_ops, ARRAY_SIZE(dp_packet_genl_ops),
+	  NULL },
+};
+
+static void dp_unregister_genl(int n_families)
+{
+	int i;
+
+	for (i = 0; i < n_families; i++)
+		genl_unregister_family(dp_genl_families[i].family);
+}
+
+static int dp_register_genl(void)
+{
+	int n_registered;
+	int err;
+	int i;
+
+	n_registered = 0;
+	for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) {
+		const struct genl_family_and_ops *f = &dp_genl_families[i];
+
+		err = genl_register_family_with_ops(f->family, f->ops,
+						    f->n_ops);
+		if (err)
+			goto error;
+		n_registered++;
+
+		if (f->group) {
+			err = genl_register_mc_group(f->family, f->group);
+			if (err)
+				goto error;
+		}
+	}
+
+	return 0;
+
+error:
+	dp_unregister_genl(n_registered);
+	return err;
+}
+
+static int __init dp_init(void)
+{
+	struct sk_buff *dummy_skb;
+	int err;
+
+	BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > sizeof(dummy_skb->cb));
+
+	pr_info("Open vSwitch switching datapath\n");
+
+	err = ovs_flow_init();
+	if (err)
+		goto error;
+
+	err = ovs_vport_init();
+	if (err)
+		goto error_flow_exit;
+
+	err = register_netdevice_notifier(&ovs_dp_device_notifier);
+	if (err)
+		goto error_vport_exit;
+
+	err = dp_register_genl();
+	if (err < 0)
+		goto error_unreg_notifier;
+
+	schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL);
+
+	return 0;
+
+error_unreg_notifier:
+	unregister_netdevice_notifier(&ovs_dp_device_notifier);
+error_vport_exit:
+	ovs_vport_exit();
+error_flow_exit:
+	ovs_flow_exit();
+error:
+	return err;
+}
+
+static void dp_cleanup(void)
+{
+	cancel_delayed_work_sync(&rehash_flow_wq);
+	rcu_barrier();
+	dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
+	unregister_netdevice_notifier(&ovs_dp_device_notifier);
+	ovs_vport_exit();
+	ovs_flow_exit();
+}
+
+module_init(dp_init);
+module_exit(dp_cleanup);
+
+MODULE_DESCRIPTION("Open vSwitch switching datapath");
+MODULE_LICENSE("GPL");
diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h
new file mode 100644
index 0000000..5b9f884
--- /dev/null
+++ b/net/openvswitch/datapath.h
@@ -0,0 +1,125 @@
+/*
+ * Copyright (c) 2007-2011 Nicira Networks.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#ifndef DATAPATH_H
+#define DATAPATH_H 1
+
+#include <asm/page.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/u64_stats_sync.h>
+#include <linux/version.h>
+
+#include "flow.h"
+
+struct vport;
+
+#define DP_MAX_PORTS 1024
+#define SAMPLE_ACTION_DEPTH 3
+
+/**
+ * struct dp_stats_percpu - per-cpu packet processing statistics for a given
+ * datapath.
+ * @n_hit: Number of received packets for which a matching flow was found in
+ * the flow table.
+ * @n_miss: Number of received packets that had no matching flow in the flow
+ * table.  The sum of @n_hit and @n_miss is the number of packets that have
+ * been received by the datapath.
+ * @n_lost: Number of received packets that had no matching flow in the flow
+ * table that could not be sent to userspace (normally due to an overflow in
+ * one of the datapath's queues).
+ */
+struct dp_stats_percpu {
+	u64 n_hit;
+	u64 n_missed;
+	u64 n_lost;
+	struct u64_stats_sync sync;
+};
+
+/**
+ * struct datapath - datapath for flow-based packet switching
+ * @rcu: RCU callback head for deferred destruction.
+ * @list_node: Element in global 'dps' list.
+ * @n_flows: Number of flows currently in flow table.
+ * @table: Current flow table.  Protected by genl_lock and RCU.
+ * @ports: Map from port number to &struct vport.  %OVSP_LOCAL port
+ * always exists, other ports may be %NULL.  Protected by RTNL and RCU.
+ * @port_list: List of all ports in @ports in arbitrary order.  RTNL required
+ * to iterate or modify.
+ * @stats_percpu: Per-CPU datapath statistics.
+ *
+ * Context: See the comment on locking at the top of datapath.c for additional
+ * locking information.
+ */
+struct datapath {
+	struct rcu_head rcu;
+	struct list_head list_node;
+
+	/* Flow table. */
+	struct flow_table __rcu *table;
+
+	/* Switch ports. */
+	struct vport __rcu *ports[DP_MAX_PORTS];
+	struct list_head port_list;
+
+	/* Stats. */
+	struct dp_stats_percpu __percpu *stats_percpu;
+};
+
+/**
+ * struct ovs_skb_cb - OVS data in skb CB
+ * @flow: The flow associated with this packet.  May be %NULL if no flow.
+ */
+struct ovs_skb_cb {
+	struct sw_flow		*flow;
+};
+#define OVS_CB(skb) ((struct ovs_skb_cb *)(skb)->cb)
+
+/**
+ * struct dp_upcall - metadata to include with a packet to send to userspace
+ * @cmd: One of %OVS_PACKET_CMD_*.
+ * @key: Becomes %OVS_PACKET_ATTR_KEY.  Must be nonnull.
+ * @userdata: If nonnull, its u64 value is extracted and passed to userspace as
+ * %OVS_PACKET_ATTR_USERDATA.
+ * @pid: Netlink PID to which packet should be sent.  If @pid is 0 then no
+ * packet is sent and the packet is accounted in the datapath's @n_lost
+ * counter.
+ */
+struct dp_upcall_info {
+	u8 cmd;
+	const struct sw_flow_key *key;
+	const struct nlattr *userdata;
+	u32 pid;
+};
+
+extern struct notifier_block ovs_dp_device_notifier;
+extern struct genl_multicast_group ovs_dp_vport_multicast_group;
+
+void ovs_dp_process_received_packet(struct vport *, struct sk_buff *);
+void ovs_dp_detach_port(struct vport *);
+int ovs_dp_upcall(struct datapath *, struct sk_buff *,
+		  const struct dp_upcall_info *);
+
+const char *ovs_dp_name(const struct datapath *dp);
+struct sk_buff *ovs_vport_cmd_build_info(struct vport *, u32 pid, u32 seq,
+					 u8 cmd);
+
+int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb);
+#endif /* datapath.h */
diff --git a/net/openvswitch/dp_notify.c b/net/openvswitch/dp_notify.c
new file mode 100644
index 0000000..4673651
--- /dev/null
+++ b/net/openvswitch/dp_notify.c
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2007-2011 Nicira Networks.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#include <linux/netdevice.h>
+#include <net/genetlink.h>
+
+#include "datapath.h"
+#include "vport-internal_dev.h"
+#include "vport-netdev.h"
+
+static int dp_device_event(struct notifier_block *unused, unsigned long event,
+			   void *ptr)
+{
+	struct net_device *dev = ptr;
+	struct vport *vport;
+
+	if (ovs_is_internal_dev(dev))
+		vport = ovs_internal_dev_get_vport(dev);
+	else
+		vport = ovs_netdev_get_vport(dev);
+
+	if (!vport)
+		return NOTIFY_DONE;
+
+	switch (event) {
+	case NETDEV_UNREGISTER:
+		if (!ovs_is_internal_dev(dev)) {
+			struct sk_buff *notify;
+
+			notify = ovs_vport_cmd_build_info(vport, 0, 0,
+							  OVS_VPORT_CMD_DEL);
+			ovs_dp_detach_port(vport);
+			if (IS_ERR(notify)) {
+				netlink_set_err(init_net.genl_sock, 0,
+						ovs_dp_vport_multicast_group.id,
+						PTR_ERR(notify));
+				break;
+			}
+
+			genlmsg_multicast(notify, 0, ovs_dp_vport_multicast_group.id,
+					  GFP_KERNEL);
+		}
+		break;
+	}
+
+	return NOTIFY_DONE;
+}
+
+struct notifier_block ovs_dp_device_notifier = {
+	.notifier_call = dp_device_event
+};
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
new file mode 100644
index 0000000..fe7f020
--- /dev/null
+++ b/net/openvswitch/flow.c
@@ -0,0 +1,1346 @@
+/*
+ * Copyright (c) 2007-2011 Nicira Networks.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#include "flow.h"
+#include "datapath.h"
+#include <linux/uaccess.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <net/llc_pdu.h>
+#include <linux/kernel.h>
+#include <linux/jhash.h>
+#include <linux/jiffies.h>
+#include <linux/llc.h>
+#include <linux/module.h>
+#include <linux/in.h>
+#include <linux/rcupdate.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/icmp.h>
+#include <linux/icmpv6.h>
+#include <linux/rculist.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/ndisc.h>
+
+static struct kmem_cache *flow_cache;
+
+static int check_header(struct sk_buff *skb, int len)
+{
+	if (unlikely(skb->len < len))
+		return -EINVAL;
+	if (unlikely(!pskb_may_pull(skb, len)))
+		return -ENOMEM;
+	return 0;
+}
+
+static bool arphdr_ok(struct sk_buff *skb)
+{
+	return pskb_may_pull(skb, skb_network_offset(skb) +
+				  sizeof(struct arp_eth_header));
+}
+
+static int check_iphdr(struct sk_buff *skb)
+{
+	unsigned int nh_ofs = skb_network_offset(skb);
+	unsigned int ip_len;
+	int err;
+
+	err = check_header(skb, nh_ofs + sizeof(struct iphdr));
+	if (unlikely(err))
+		return err;
+
+	ip_len = ip_hdrlen(skb);
+	if (unlikely(ip_len < sizeof(struct iphdr) ||
+		     skb->len < nh_ofs + ip_len))
+		return -EINVAL;
+
+	skb_set_transport_header(skb, nh_ofs + ip_len);
+	return 0;
+}
+
+static bool tcphdr_ok(struct sk_buff *skb)
+{
+	int th_ofs = skb_transport_offset(skb);
+	int tcp_len;
+
+	if (unlikely(!pskb_may_pull(skb, th_ofs + sizeof(struct tcphdr))))
+		return false;
+
+	tcp_len = tcp_hdrlen(skb);
+	if (unlikely(tcp_len < sizeof(struct tcphdr) ||
+		     skb->len < th_ofs + tcp_len))
+		return false;
+
+	return true;
+}
+
+static bool udphdr_ok(struct sk_buff *skb)
+{
+	return pskb_may_pull(skb, skb_transport_offset(skb) +
+				  sizeof(struct udphdr));
+}
+
+static bool icmphdr_ok(struct sk_buff *skb)
+{
+	return pskb_may_pull(skb, skb_transport_offset(skb) +
+				  sizeof(struct icmphdr));
+}
+
+u64 ovs_flow_used_time(unsigned long flow_jiffies)
+{
+	struct timespec cur_ts;
+	u64 cur_ms, idle_ms;
+
+	ktime_get_ts(&cur_ts);
+	idle_ms = jiffies_to_msecs(jiffies - flow_jiffies);
+	cur_ms = (u64)cur_ts.tv_sec * MSEC_PER_SEC +
+		 cur_ts.tv_nsec / NSEC_PER_MSEC;
+
+	return cur_ms - idle_ms;
+}
+
+#define SW_FLOW_KEY_OFFSET(field)		\
+	(offsetof(struct sw_flow_key, field) +	\
+	 FIELD_SIZEOF(struct sw_flow_key, field))
+
+static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key,
+			 int *key_lenp)
+{
+	unsigned int nh_ofs = skb_network_offset(skb);
+	unsigned int nh_len;
+	int payload_ofs;
+	struct ipv6hdr *nh;
+	uint8_t nexthdr;
+	__be16 frag_off;
+	int err;
+
+	*key_lenp = SW_FLOW_KEY_OFFSET(ipv6.label);
+
+	err = check_header(skb, nh_ofs + sizeof(*nh));
+	if (unlikely(err))
+		return err;
+
+	nh = ipv6_hdr(skb);
+	nexthdr = nh->nexthdr;
+	payload_ofs = (u8 *)(nh + 1) - skb->data;
+
+	key->ip.proto = NEXTHDR_NONE;
+	key->ip.tos = ipv6_get_dsfield(nh);
+	key->ip.ttl = nh->hop_limit;
+	key->ipv6.label = *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
+	key->ipv6.addr.src = nh->saddr;
+	key->ipv6.addr.dst = nh->daddr;
+
+	payload_ofs = ipv6_skip_exthdr(skb, payload_ofs, &nexthdr, &frag_off);
+	if (unlikely(payload_ofs < 0))
+		return -EINVAL;
+
+	if (frag_off) {
+		if (frag_off & htons(~0x7))
+			key->ip.frag = OVS_FRAG_TYPE_LATER;
+		else
+			key->ip.frag = OVS_FRAG_TYPE_FIRST;
+	}
+
+	nh_len = payload_ofs - nh_ofs;
+	skb_set_transport_header(skb, nh_ofs + nh_len);
+	key->ip.proto = nexthdr;
+	return nh_len;
+}
+
+static bool icmp6hdr_ok(struct sk_buff *skb)
+{
+	return pskb_may_pull(skb, skb_transport_offset(skb) +
+				  sizeof(struct icmp6hdr));
+}
+
+#define TCP_FLAGS_OFFSET 13
+#define TCP_FLAG_MASK 0x3f
+
+void ovs_flow_used(struct sw_flow *flow, struct sk_buff *skb)
+{
+	u8 tcp_flags = 0;
+
+	if (flow->key.eth.type == htons(ETH_P_IP) &&
+	    flow->key.ip.proto == IPPROTO_TCP) {
+		u8 *tcp = (u8 *)tcp_hdr(skb);
+		tcp_flags = *(tcp + TCP_FLAGS_OFFSET) & TCP_FLAG_MASK;
+	}
+
+	spin_lock(&flow->lock);
+	flow->used = jiffies;
+	flow->packet_count++;
+	flow->byte_count += skb->len;
+	flow->tcp_flags |= tcp_flags;
+	spin_unlock(&flow->lock);
+}
+
+struct sw_flow_actions *ovs_flow_actions_alloc(const struct nlattr *actions)
+{
+	int actions_len = nla_len(actions);
+	struct sw_flow_actions *sfa;
+
+	/* At least DP_MAX_PORTS actions are required to be able to flood a
+	 * packet to every port.  Factor of 2 allows for setting VLAN tags,
+	 * etc. */
+	if (actions_len > 2 * DP_MAX_PORTS * nla_total_size(4))
+		return ERR_PTR(-EINVAL);
+
+	sfa = kmalloc(sizeof(*sfa) + actions_len, GFP_KERNEL);
+	if (!sfa)
+		return ERR_PTR(-ENOMEM);
+
+	sfa->actions_len = actions_len;
+	memcpy(sfa->actions, nla_data(actions), actions_len);
+	return sfa;
+}
+
+struct sw_flow *ovs_flow_alloc(void)
+{
+	struct sw_flow *flow;
+
+	flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
+	if (!flow)
+		return ERR_PTR(-ENOMEM);
+
+	spin_lock_init(&flow->lock);
+	flow->sf_acts = NULL;
+
+	return flow;
+}
+
+static struct hlist_head *find_bucket(struct flow_table *table, u32 hash)
+{
+	hash = jhash_1word(hash, table->hash_seed);
+	return flex_array_get(table->buckets,
+				(hash & (table->n_buckets - 1)));
+}
+
+static struct flex_array *alloc_buckets(unsigned int n_buckets)
+{
+	struct flex_array *buckets;
+	int i, err;
+
+	buckets = flex_array_alloc(sizeof(struct hlist_head *),
+				   n_buckets, GFP_KERNEL);
+	if (!buckets)
+		return NULL;
+
+	err = flex_array_prealloc(buckets, 0, n_buckets, GFP_KERNEL);
+	if (err) {
+		flex_array_free(buckets);
+		return NULL;
+	}
+
+	for (i = 0; i < n_buckets; i++)
+		INIT_HLIST_HEAD((struct hlist_head *)
+					flex_array_get(buckets, i));
+
+	return buckets;
+}
+
+static void free_buckets(struct flex_array *buckets)
+{
+	flex_array_free(buckets);
+}
+
+struct flow_table *ovs_flow_tbl_alloc(int new_size)
+{
+	struct flow_table *table = kmalloc(sizeof(*table), GFP_KERNEL);
+
+	if (!table)
+		return NULL;
+
+	table->buckets = alloc_buckets(new_size);
+
+	if (!table->buckets) {
+		kfree(table);
+		return NULL;
+	}
+	table->n_buckets = new_size;
+	table->count = 0;
+	table->node_ver = 0;
+	table->keep_flows = false;
+	get_random_bytes(&table->hash_seed, sizeof(u32));
+
+	return table;
+}
+
+void ovs_flow_tbl_destroy(struct flow_table *table)
+{
+	int i;
+
+	if (!table)
+		return;
+
+	if (table->keep_flows)
+		goto skip_flows;
+
+	for (i = 0; i < table->n_buckets; i++) {
+		struct sw_flow *flow;
+		struct hlist_head *head = flex_array_get(table->buckets, i);
+		struct hlist_node *node, *n;
+		int ver = table->node_ver;
+
+		hlist_for_each_entry_safe(flow, node, n, head, hash_node[ver]) {
+			hlist_del_rcu(&flow->hash_node[ver]);
+			ovs_flow_free(flow);
+		}
+	}
+
+skip_flows:
+	free_buckets(table->buckets);
+	kfree(table);
+}
+
+static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
+{
+	struct flow_table *table = container_of(rcu, struct flow_table, rcu);
+
+	ovs_flow_tbl_destroy(table);
+}
+
+void ovs_flow_tbl_deferred_destroy(struct flow_table *table)
+{
+	if (!table)
+		return;
+
+	call_rcu(&table->rcu, flow_tbl_destroy_rcu_cb);
+}
+
+struct sw_flow *ovs_flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *last)
+{
+	struct sw_flow *flow;
+	struct hlist_head *head;
+	struct hlist_node *n;
+	int ver;
+	int i;
+
+	ver = table->node_ver;
+	while (*bucket < table->n_buckets) {
+		i = 0;
+		head = flex_array_get(table->buckets, *bucket);
+		hlist_for_each_entry_rcu(flow, n, head, hash_node[ver]) {
+			if (i < *last) {
+				i++;
+				continue;
+			}
+			*last = i + 1;
+			return flow;
+		}
+		(*bucket)++;
+		*last = 0;
+	}
+
+	return NULL;
+}
+
+static void flow_table_copy_flows(struct flow_table *old, struct flow_table *new)
+{
+	int old_ver;
+	int i;
+
+	old_ver = old->node_ver;
+	new->node_ver = !old_ver;
+
+	/* Insert in new table. */
+	for (i = 0; i < old->n_buckets; i++) {
+		struct sw_flow *flow;
+		struct hlist_head *head;
+		struct hlist_node *n;
+
+		head = flex_array_get(old->buckets, i);
+
+		hlist_for_each_entry(flow, n, head, hash_node[old_ver])
+			ovs_flow_tbl_insert(new, flow);
+	}
+	old->keep_flows = true;
+}
+
+static struct flow_table *__flow_tbl_rehash(struct flow_table *table, int n_buckets)
+{
+	struct flow_table *new_table;
+
+	new_table = ovs_flow_tbl_alloc(n_buckets);
+	if (!new_table)
+		return ERR_PTR(-ENOMEM);
+
+	flow_table_copy_flows(table, new_table);
+
+	return new_table;
+}
+
+struct flow_table *ovs_flow_tbl_rehash(struct flow_table *table)
+{
+	return __flow_tbl_rehash(table, table->n_buckets);
+}
+
+struct flow_table *ovs_flow_tbl_expand(struct flow_table *table)
+{
+	return __flow_tbl_rehash(table, table->n_buckets * 2);
+}
+
+void ovs_flow_free(struct sw_flow *flow)
+{
+	if (unlikely(!flow))
+		return;
+
+	kfree((struct sf_flow_acts __force *)flow->sf_acts);
+	kmem_cache_free(flow_cache, flow);
+}
+
+/* RCU callback used by ovs_flow_deferred_free. */
+static void rcu_free_flow_callback(struct rcu_head *rcu)
+{
+	struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
+
+	ovs_flow_free(flow);
+}
+
+/* Schedules 'flow' to be freed after the next RCU grace period.
+ * The caller must hold rcu_read_lock for this to be sensible. */
+void ovs_flow_deferred_free(struct sw_flow *flow)
+{
+	call_rcu(&flow->rcu, rcu_free_flow_callback);
+}
+
+/* RCU callback used by ovs_flow_deferred_free_acts. */
+static void rcu_free_acts_callback(struct rcu_head *rcu)
+{
+	struct sw_flow_actions *sf_acts = container_of(rcu,
+			struct sw_flow_actions, rcu);
+	kfree(sf_acts);
+}
+
+/* Schedules 'sf_acts' to be freed after the next RCU grace period.
+ * The caller must hold rcu_read_lock for this to be sensible. */
+void ovs_flow_deferred_free_acts(struct sw_flow_actions *sf_acts)
+{
+	call_rcu(&sf_acts->rcu, rcu_free_acts_callback);
+}
+
+static int parse_vlan(struct sk_buff *skb, struct sw_flow_key *key)
+{
+	struct qtag_prefix {
+		__be16 eth_type; /* ETH_P_8021Q */
+		__be16 tci;
+	};
+	struct qtag_prefix *qp;
+
+	if (unlikely(skb->len < sizeof(struct qtag_prefix) + sizeof(__be16)))
+		return 0;
+
+	if (unlikely(!pskb_may_pull(skb, sizeof(struct qtag_prefix) +
+					 sizeof(__be16))))
+		return -ENOMEM;
+
+	qp = (struct qtag_prefix *) skb->data;
+	key->eth.tci = qp->tci | htons(VLAN_TAG_PRESENT);
+	__skb_pull(skb, sizeof(struct qtag_prefix));
+
+	return 0;
+}
+
+static __be16 parse_ethertype(struct sk_buff *skb)
+{
+	struct llc_snap_hdr {
+		u8  dsap;  /* Always 0xAA */
+		u8  ssap;  /* Always 0xAA */
+		u8  ctrl;
+		u8  oui[3];
+		__be16 ethertype;
+	};
+	struct llc_snap_hdr *llc;
+	__be16 proto;
+
+	proto = *(__be16 *) skb->data;
+	__skb_pull(skb, sizeof(__be16));
+
+	if (ntohs(proto) >= 1536)
+		return proto;
+
+	if (skb->len < sizeof(struct llc_snap_hdr))
+		return htons(ETH_P_802_2);
+
+	if (unlikely(!pskb_may_pull(skb, sizeof(struct llc_snap_hdr))))
+		return htons(0);
+
+	llc = (struct llc_snap_hdr *) skb->data;
+	if (llc->dsap != LLC_SAP_SNAP ||
+	    llc->ssap != LLC_SAP_SNAP ||
+	    (llc->oui[0] | llc->oui[1] | llc->oui[2]) != 0)
+		return htons(ETH_P_802_2);
+
+	__skb_pull(skb, sizeof(struct llc_snap_hdr));
+	return llc->ethertype;
+}
+
+static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key,
+			int *key_lenp, int nh_len)
+{
+	struct icmp6hdr *icmp = icmp6_hdr(skb);
+	int error = 0;
+	int key_len;
+
+	/* The ICMPv6 type and code fields use the 16-bit transport port
+	 * fields, so we need to store them in 16-bit network byte order.
+	 */
+	key->ipv6.tp.src = htons(icmp->icmp6_type);
+	key->ipv6.tp.dst = htons(icmp->icmp6_code);
+	key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
+
+	if (icmp->icmp6_code == 0 &&
+	    (icmp->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION ||
+	     icmp->icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT)) {
+		int icmp_len = skb->len - skb_transport_offset(skb);
+		struct nd_msg *nd;
+		int offset;
+
+		key_len = SW_FLOW_KEY_OFFSET(ipv6.nd);
+
+		/* In order to process neighbor discovery options, we need the
+		 * entire packet.
+		 */
+		if (unlikely(icmp_len < sizeof(*nd)))
+			goto out;
+		if (unlikely(skb_linearize(skb))) {
+			error = -ENOMEM;
+			goto out;
+		}
+
+		nd = (struct nd_msg *)skb_transport_header(skb);
+		key->ipv6.nd.target = nd->target;
+		key_len = SW_FLOW_KEY_OFFSET(ipv6.nd);
+
+		icmp_len -= sizeof(*nd);
+		offset = 0;
+		while (icmp_len >= 8) {
+			struct nd_opt_hdr *nd_opt =
+				 (struct nd_opt_hdr *)(nd->opt + offset);
+			int opt_len = nd_opt->nd_opt_len * 8;
+
+			if (unlikely(!opt_len || opt_len > icmp_len))
+				goto invalid;
+
+			/* Store the link layer address if the appropriate
+			 * option is provided.  It is considered an error if
+			 * the same link layer option is specified twice.
+			 */
+			if (nd_opt->nd_opt_type == ND_OPT_SOURCE_LL_ADDR
+			    && opt_len == 8) {
+				if (unlikely(!is_zero_ether_addr(key->ipv6.nd.sll)))
+					goto invalid;
+				memcpy(key->ipv6.nd.sll,
+				    &nd->opt[offset+sizeof(*nd_opt)], ETH_ALEN);
+			} else if (nd_opt->nd_opt_type == ND_OPT_TARGET_LL_ADDR
+				   && opt_len == 8) {
+				if (unlikely(!is_zero_ether_addr(key->ipv6.nd.tll)))
+					goto invalid;
+				memcpy(key->ipv6.nd.tll,
+				    &nd->opt[offset+sizeof(*nd_opt)], ETH_ALEN);
+			}
+
+			icmp_len -= opt_len;
+			offset += opt_len;
+		}
+	}
+
+	goto out;
+
+invalid:
+	memset(&key->ipv6.nd.target, 0, sizeof(key->ipv6.nd.target));
+	memset(key->ipv6.nd.sll, 0, sizeof(key->ipv6.nd.sll));
+	memset(key->ipv6.nd.tll, 0, sizeof(key->ipv6.nd.tll));
+
+out:
+	*key_lenp = key_len;
+	return error;
+}
+
+/**
+ * ovs_flow_extract - extracts a flow key from an Ethernet frame.
+ * @skb: sk_buff that contains the frame, with skb->data pointing to the
+ * Ethernet header
+ * @in_port: port number on which @skb was received.
+ * @key: output flow key
+ * @key_lenp: length of output flow key
+ *
+ * The caller must ensure that skb->len >= ETH_HLEN.
+ *
+ * Returns 0 if successful, otherwise a negative errno value.
+ *
+ * Initializes @skb header pointers as follows:
+ *
+ *    - skb->mac_header: the Ethernet header.
+ *
+ *    - skb->network_header: just past the Ethernet header, or just past the
+ *      VLAN header, to the first byte of the Ethernet payload.
+ *
+ *    - skb->transport_header: If key->dl_type is ETH_P_IP or ETH_P_IPV6
+ *      on output, then just past the IP header, if one is present and
+ *      of a correct length, otherwise the same as skb->network_header.
+ *      For other key->dl_type values it is left untouched.
+ */
+int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key,
+		 int *key_lenp)
+{
+	int error = 0;
+	int key_len = SW_FLOW_KEY_OFFSET(eth);
+	struct ethhdr *eth;
+
+	memset(key, 0, sizeof(*key));
+
+	key->phy.priority = skb->priority;
+	key->phy.in_port = in_port;
+
+	skb_reset_mac_header(skb);
+
+	/* Link layer.  We are guaranteed to have at least the 14 byte Ethernet
+	 * header in the linear data area.
+	 */
+	eth = eth_hdr(skb);
+	memcpy(key->eth.src, eth->h_source, ETH_ALEN);
+	memcpy(key->eth.dst, eth->h_dest, ETH_ALEN);
+
+	__skb_pull(skb, 2 * ETH_ALEN);
+
+	if (vlan_tx_tag_present(skb))
+		key->eth.tci = htons(skb->vlan_tci);
+	else if (eth->h_proto == htons(ETH_P_8021Q))
+		if (unlikely(parse_vlan(skb, key)))
+			return -ENOMEM;
+
+	key->eth.type = parse_ethertype(skb);
+	if (unlikely(key->eth.type == htons(0)))
+		return -ENOMEM;
+
+	skb_reset_network_header(skb);
+	__skb_push(skb, skb->data - skb_mac_header(skb));
+
+	/* Network layer. */
+	if (key->eth.type == htons(ETH_P_IP)) {
+		struct iphdr *nh;
+		__be16 offset;
+
+		key_len = SW_FLOW_KEY_OFFSET(ipv4.addr);
+
+		error = check_iphdr(skb);
+		if (unlikely(error)) {
+			if (error == -EINVAL) {
+				skb->transport_header = skb->network_header;
+				error = 0;
+			}
+			goto out;
+		}
+
+		nh = ip_hdr(skb);
+		key->ipv4.addr.src = nh->saddr;
+		key->ipv4.addr.dst = nh->daddr;
+
+		key->ip.proto = nh->protocol;
+		key->ip.tos = nh->tos;
+		key->ip.ttl = nh->ttl;
+
+		offset = nh->frag_off & htons(IP_OFFSET);
+		if (offset) {
+			key->ip.frag = OVS_FRAG_TYPE_LATER;
+			goto out;
+		}
+		if (nh->frag_off & htons(IP_MF) ||
+			 skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
+			key->ip.frag = OVS_FRAG_TYPE_FIRST;
+
+		/* Transport layer. */
+		if (key->ip.proto == IPPROTO_TCP) {
+			key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
+			if (tcphdr_ok(skb)) {
+				struct tcphdr *tcp = tcp_hdr(skb);
+				key->ipv4.tp.src = tcp->source;
+				key->ipv4.tp.dst = tcp->dest;
+			}
+		} else if (key->ip.proto == IPPROTO_UDP) {
+			key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
+			if (udphdr_ok(skb)) {
+				struct udphdr *udp = udp_hdr(skb);
+				key->ipv4.tp.src = udp->source;
+				key->ipv4.tp.dst = udp->dest;
+			}
+		} else if (key->ip.proto == IPPROTO_ICMP) {
+			key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
+			if (icmphdr_ok(skb)) {
+				struct icmphdr *icmp = icmp_hdr(skb);
+				/* The ICMP type and code fields use the 16-bit
+				 * transport port fields, so we need to store
+				 * them in 16-bit network byte order. */
+				key->ipv4.tp.src = htons(icmp->type);
+				key->ipv4.tp.dst = htons(icmp->code);
+			}
+		}
+
+	} else if (key->eth.type == htons(ETH_P_ARP) && arphdr_ok(skb)) {
+		struct arp_eth_header *arp;
+
+		arp = (struct arp_eth_header *)skb_network_header(skb);
+
+		if (arp->ar_hrd == htons(ARPHRD_ETHER)
+				&& arp->ar_pro == htons(ETH_P_IP)
+				&& arp->ar_hln == ETH_ALEN
+				&& arp->ar_pln == 4) {
+
+			/* We only match on the lower 8 bits of the opcode. */
+			if (ntohs(arp->ar_op) <= 0xff)
+				key->ip.proto = ntohs(arp->ar_op);
+
+			if (key->ip.proto == ARPOP_REQUEST
+					|| key->ip.proto == ARPOP_REPLY) {
+				memcpy(&key->ipv4.addr.src, arp->ar_sip, sizeof(key->ipv4.addr.src));
+				memcpy(&key->ipv4.addr.dst, arp->ar_tip, sizeof(key->ipv4.addr.dst));
+				memcpy(key->ipv4.arp.sha, arp->ar_sha, ETH_ALEN);
+				memcpy(key->ipv4.arp.tha, arp->ar_tha, ETH_ALEN);
+				key_len = SW_FLOW_KEY_OFFSET(ipv4.arp);
+			}
+		}
+	} else if (key->eth.type == htons(ETH_P_IPV6)) {
+		int nh_len;             /* IPv6 Header + Extensions */
+
+		nh_len = parse_ipv6hdr(skb, key, &key_len);
+		if (unlikely(nh_len < 0)) {
+			if (nh_len == -EINVAL)
+				skb->transport_header = skb->network_header;
+			else
+				error = nh_len;
+			goto out;
+		}
+
+		if (key->ip.frag == OVS_FRAG_TYPE_LATER)
+			goto out;
+		if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
+			key->ip.frag = OVS_FRAG_TYPE_FIRST;
+
+		/* Transport layer. */
+		if (key->ip.proto == NEXTHDR_TCP) {
+			key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
+			if (tcphdr_ok(skb)) {
+				struct tcphdr *tcp = tcp_hdr(skb);
+				key->ipv6.tp.src = tcp->source;
+				key->ipv6.tp.dst = tcp->dest;
+			}
+		} else if (key->ip.proto == NEXTHDR_UDP) {
+			key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
+			if (udphdr_ok(skb)) {
+				struct udphdr *udp = udp_hdr(skb);
+				key->ipv6.tp.src = udp->source;
+				key->ipv6.tp.dst = udp->dest;
+			}
+		} else if (key->ip.proto == NEXTHDR_ICMP) {
+			key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
+			if (icmp6hdr_ok(skb)) {
+				error = parse_icmpv6(skb, key, &key_len, nh_len);
+				if (error < 0)
+					goto out;
+			}
+		}
+	}
+
+out:
+	*key_lenp = key_len;
+	return error;
+}
+
+u32 ovs_flow_hash(const struct sw_flow_key *key, int key_len)
+{
+	return jhash2((u32 *)key, DIV_ROUND_UP(key_len, sizeof(u32)), 0);
+}
+
+struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *table,
+				struct sw_flow_key *key, int key_len)
+{
+	struct sw_flow *flow;
+	struct hlist_node *n;
+	struct hlist_head *head;
+	u32 hash;
+
+	hash = ovs_flow_hash(key, key_len);
+
+	head = find_bucket(table, hash);
+	hlist_for_each_entry_rcu(flow, n, head, hash_node[table->node_ver]) {
+
+		if (flow->hash == hash &&
+		    !memcmp(&flow->key, key, key_len)) {
+			return flow;
+		}
+	}
+	return NULL;
+}
+
+void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow)
+{
+	struct hlist_head *head;
+
+	head = find_bucket(table, flow->hash);
+	hlist_add_head_rcu(&flow->hash_node[table->node_ver], head);
+	table->count++;
+}
+
+void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
+{
+	hlist_del_rcu(&flow->hash_node[table->node_ver]);
+	table->count--;
+	BUG_ON(table->count < 0);
+}
+
+/* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute.  */
+const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = {
+	[OVS_KEY_ATTR_ENCAP] = -1,
+	[OVS_KEY_ATTR_PRIORITY] = sizeof(u32),
+	[OVS_KEY_ATTR_IN_PORT] = sizeof(u32),
+	[OVS_KEY_ATTR_ETHERNET] = sizeof(struct ovs_key_ethernet),
+	[OVS_KEY_ATTR_VLAN] = sizeof(__be16),
+	[OVS_KEY_ATTR_ETHERTYPE] = sizeof(__be16),
+	[OVS_KEY_ATTR_IPV4] = sizeof(struct ovs_key_ipv4),
+	[OVS_KEY_ATTR_IPV6] = sizeof(struct ovs_key_ipv6),
+	[OVS_KEY_ATTR_TCP] = sizeof(struct ovs_key_tcp),
+	[OVS_KEY_ATTR_UDP] = sizeof(struct ovs_key_udp),
+	[OVS_KEY_ATTR_ICMP] = sizeof(struct ovs_key_icmp),
+	[OVS_KEY_ATTR_ICMPV6] = sizeof(struct ovs_key_icmpv6),
+	[OVS_KEY_ATTR_ARP] = sizeof(struct ovs_key_arp),
+	[OVS_KEY_ATTR_ND] = sizeof(struct ovs_key_nd),
+};
+
+static int ipv4_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_len,
+				  const struct nlattr *a[], u32 *attrs)
+{
+	const struct ovs_key_icmp *icmp_key;
+	const struct ovs_key_tcp *tcp_key;
+	const struct ovs_key_udp *udp_key;
+
+	switch (swkey->ip.proto) {
+	case IPPROTO_TCP:
+		if (!(*attrs & (1 << OVS_KEY_ATTR_TCP)))
+			return -EINVAL;
+		*attrs &= ~(1 << OVS_KEY_ATTR_TCP);
+
+		*key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
+		tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]);
+		swkey->ipv4.tp.src = tcp_key->tcp_src;
+		swkey->ipv4.tp.dst = tcp_key->tcp_dst;
+		break;
+
+	case IPPROTO_UDP:
+		if (!(*attrs & (1 << OVS_KEY_ATTR_UDP)))
+			return -EINVAL;
+		*attrs &= ~(1 << OVS_KEY_ATTR_UDP);
+
+		*key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
+		udp_key = nla_data(a[OVS_KEY_ATTR_UDP]);
+		swkey->ipv4.tp.src = udp_key->udp_src;
+		swkey->ipv4.tp.dst = udp_key->udp_dst;
+		break;
+
+	case IPPROTO_ICMP:
+		if (!(*attrs & (1 << OVS_KEY_ATTR_ICMP)))
+			return -EINVAL;
+		*attrs &= ~(1 << OVS_KEY_ATTR_ICMP);
+
+		*key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
+		icmp_key = nla_data(a[OVS_KEY_ATTR_ICMP]);
+		swkey->ipv4.tp.src = htons(icmp_key->icmp_type);
+		swkey->ipv4.tp.dst = htons(icmp_key->icmp_code);
+		break;
+	}
+
+	return 0;
+}
+
+static int ipv6_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_len,
+				  const struct nlattr *a[], u32 *attrs)
+{
+	const struct ovs_key_icmpv6 *icmpv6_key;
+	const struct ovs_key_tcp *tcp_key;
+	const struct ovs_key_udp *udp_key;
+
+	switch (swkey->ip.proto) {
+	case IPPROTO_TCP:
+		if (!(*attrs & (1 << OVS_KEY_ATTR_TCP)))
+			return -EINVAL;
+		*attrs &= ~(1 << OVS_KEY_ATTR_TCP);
+
+		*key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
+		tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]);
+		swkey->ipv6.tp.src = tcp_key->tcp_src;
+		swkey->ipv6.tp.dst = tcp_key->tcp_dst;
+		break;
+
+	case IPPROTO_UDP:
+		if (!(*attrs & (1 << OVS_KEY_ATTR_UDP)))
+			return -EINVAL;
+		*attrs &= ~(1 << OVS_KEY_ATTR_UDP);
+
+		*key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
+		udp_key = nla_data(a[OVS_KEY_ATTR_UDP]);
+		swkey->ipv6.tp.src = udp_key->udp_src;
+		swkey->ipv6.tp.dst = udp_key->udp_dst;
+		break;
+
+	case IPPROTO_ICMPV6:
+		if (!(*attrs & (1 << OVS_KEY_ATTR_ICMPV6)))
+			return -EINVAL;
+		*attrs &= ~(1 << OVS_KEY_ATTR_ICMPV6);
+
+		*key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
+		icmpv6_key = nla_data(a[OVS_KEY_ATTR_ICMPV6]);
+		swkey->ipv6.tp.src = htons(icmpv6_key->icmpv6_type);
+		swkey->ipv6.tp.dst = htons(icmpv6_key->icmpv6_code);
+
+		if (swkey->ipv6.tp.src == htons(NDISC_NEIGHBOUR_SOLICITATION) ||
+		    swkey->ipv6.tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT)) {
+			const struct ovs_key_nd *nd_key;
+
+			if (!(*attrs & (1 << OVS_KEY_ATTR_ND)))
+				return -EINVAL;
+			*attrs &= ~(1 << OVS_KEY_ATTR_ND);
+
+			*key_len = SW_FLOW_KEY_OFFSET(ipv6.nd);
+			nd_key = nla_data(a[OVS_KEY_ATTR_ND]);
+			memcpy(&swkey->ipv6.nd.target, nd_key->nd_target,
+			       sizeof(swkey->ipv6.nd.target));
+			memcpy(swkey->ipv6.nd.sll, nd_key->nd_sll, ETH_ALEN);
+			memcpy(swkey->ipv6.nd.tll, nd_key->nd_tll, ETH_ALEN);
+		}
+		break;
+	}
+
+	return 0;
+}
+
+static int parse_flow_nlattrs(const struct nlattr *attr,
+			      const struct nlattr *a[], u32 *attrsp)
+{
+	const struct nlattr *nla;
+	u32 attrs;
+	int rem;
+
+	attrs = 0;
+	nla_for_each_nested(nla, attr, rem) {
+		u16 type = nla_type(nla);
+		int expected_len;
+
+		if (type > OVS_KEY_ATTR_MAX || attrs & (1 << type))
+			return -EINVAL;
+
+		expected_len = ovs_key_lens[type];
+		if (nla_len(nla) != expected_len && expected_len != -1)
+			return -EINVAL;
+
+		attrs |= 1 << type;
+		a[type] = nla;
+	}
+	if (rem)
+		return -EINVAL;
+
+	*attrsp = attrs;
+	return 0;
+}
+
+/**
+ * ovs_flow_from_nlattrs - parses Netlink attributes into a flow key.
+ * @swkey: receives the extracted flow key.
+ * @key_lenp: number of bytes used in @swkey.
+ * @attr: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
+ * sequence.
+ */
+int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp,
+		      const struct nlattr *attr)
+{
+	const struct nlattr *a[OVS_KEY_ATTR_MAX + 1];
+	const struct ovs_key_ethernet *eth_key;
+	int key_len;
+	u32 attrs;
+	int err;
+
+	memset(swkey, 0, sizeof(struct sw_flow_key));
+	key_len = SW_FLOW_KEY_OFFSET(eth);
+
+	err = parse_flow_nlattrs(attr, a, &attrs);
+	if (err)
+		return err;
+
+	/* Metadata attributes. */
+	if (attrs & (1 << OVS_KEY_ATTR_PRIORITY)) {
+		swkey->phy.priority = nla_get_u32(a[OVS_KEY_ATTR_PRIORITY]);
+		attrs &= ~(1 << OVS_KEY_ATTR_PRIORITY);
+	}
+	if (attrs & (1 << OVS_KEY_ATTR_IN_PORT)) {
+		u32 in_port = nla_get_u32(a[OVS_KEY_ATTR_IN_PORT]);
+		if (in_port >= DP_MAX_PORTS)
+			return -EINVAL;
+		swkey->phy.in_port = in_port;
+		attrs &= ~(1 << OVS_KEY_ATTR_IN_PORT);
+	} else {
+		swkey->phy.in_port = USHRT_MAX;
+	}
+
+	/* Data attributes. */
+	if (!(attrs & (1 << OVS_KEY_ATTR_ETHERNET)))
+		return -EINVAL;
+	attrs &= ~(1 << OVS_KEY_ATTR_ETHERNET);
+
+	eth_key = nla_data(a[OVS_KEY_ATTR_ETHERNET]);
+	memcpy(swkey->eth.src, eth_key->eth_src, ETH_ALEN);
+	memcpy(swkey->eth.dst, eth_key->eth_dst, ETH_ALEN);
+
+	if (attrs & (1u << OVS_KEY_ATTR_ETHERTYPE) &&
+	    nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]) == htons(ETH_P_8021Q)) {
+		const struct nlattr *encap;
+		__be16 tci;
+
+		if (attrs != ((1 << OVS_KEY_ATTR_VLAN) |
+			      (1 << OVS_KEY_ATTR_ETHERTYPE) |
+			      (1 << OVS_KEY_ATTR_ENCAP)))
+			return -EINVAL;
+
+		encap = a[OVS_KEY_ATTR_ENCAP];
+		tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
+		if (tci & htons(VLAN_TAG_PRESENT)) {
+			swkey->eth.tci = tci;
+
+			err = parse_flow_nlattrs(encap, a, &attrs);
+			if (err)
+				return err;
+		} else if (!tci) {
+			/* Corner case for truncated 802.1Q header. */
+			if (nla_len(encap))
+				return -EINVAL;
+
+			swkey->eth.type = htons(ETH_P_8021Q);
+			*key_lenp = key_len;
+			return 0;
+		} else {
+			return -EINVAL;
+		}
+	}
+
+	if (attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) {
+		swkey->eth.type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]);
+		if (ntohs(swkey->eth.type) < 1536)
+			return -EINVAL;
+		attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE);
+	} else {
+		swkey->eth.type = htons(ETH_P_802_2);
+	}
+
+	if (swkey->eth.type == htons(ETH_P_IP)) {
+		const struct ovs_key_ipv4 *ipv4_key;
+
+		if (!(attrs & (1 << OVS_KEY_ATTR_IPV4)))
+			return -EINVAL;
+		attrs &= ~(1 << OVS_KEY_ATTR_IPV4);
+
+		key_len = SW_FLOW_KEY_OFFSET(ipv4.addr);
+		ipv4_key = nla_data(a[OVS_KEY_ATTR_IPV4]);
+		if (ipv4_key->ipv4_frag > OVS_FRAG_TYPE_MAX)
+			return -EINVAL;
+		swkey->ip.proto = ipv4_key->ipv4_proto;
+		swkey->ip.tos = ipv4_key->ipv4_tos;
+		swkey->ip.ttl = ipv4_key->ipv4_ttl;
+		swkey->ip.frag = ipv4_key->ipv4_frag;
+		swkey->ipv4.addr.src = ipv4_key->ipv4_src;
+		swkey->ipv4.addr.dst = ipv4_key->ipv4_dst;
+
+		if (swkey->ip.frag != OVS_FRAG_TYPE_LATER) {
+			err = ipv4_flow_from_nlattrs(swkey, &key_len, a, &attrs);
+			if (err)
+				return err;
+		}
+	} else if (swkey->eth.type == htons(ETH_P_IPV6)) {
+		const struct ovs_key_ipv6 *ipv6_key;
+
+		if (!(attrs & (1 << OVS_KEY_ATTR_IPV6)))
+			return -EINVAL;
+		attrs &= ~(1 << OVS_KEY_ATTR_IPV6);
+
+		key_len = SW_FLOW_KEY_OFFSET(ipv6.label);
+		ipv6_key = nla_data(a[OVS_KEY_ATTR_IPV6]);
+		if (ipv6_key->ipv6_frag > OVS_FRAG_TYPE_MAX)
+			return -EINVAL;
+		swkey->ipv6.label = ipv6_key->ipv6_label;
+		swkey->ip.proto = ipv6_key->ipv6_proto;
+		swkey->ip.tos = ipv6_key->ipv6_tclass;
+		swkey->ip.ttl = ipv6_key->ipv6_hlimit;
+		swkey->ip.frag = ipv6_key->ipv6_frag;
+		memcpy(&swkey->ipv6.addr.src, ipv6_key->ipv6_src,
+		       sizeof(swkey->ipv6.addr.src));
+		memcpy(&swkey->ipv6.addr.dst, ipv6_key->ipv6_dst,
+		       sizeof(swkey->ipv6.addr.dst));
+
+		if (swkey->ip.frag != OVS_FRAG_TYPE_LATER) {
+			err = ipv6_flow_from_nlattrs(swkey, &key_len, a, &attrs);
+			if (err)
+				return err;
+		}
+	} else if (swkey->eth.type == htons(ETH_P_ARP)) {
+		const struct ovs_key_arp *arp_key;
+
+		if (!(attrs & (1 << OVS_KEY_ATTR_ARP)))
+			return -EINVAL;
+		attrs &= ~(1 << OVS_KEY_ATTR_ARP);
+
+		key_len = SW_FLOW_KEY_OFFSET(ipv4.arp);
+		arp_key = nla_data(a[OVS_KEY_ATTR_ARP]);
+		swkey->ipv4.addr.src = arp_key->arp_sip;
+		swkey->ipv4.addr.dst = arp_key->arp_tip;
+		if (arp_key->arp_op & htons(0xff00))
+			return -EINVAL;
+		swkey->ip.proto = ntohs(arp_key->arp_op);
+		memcpy(swkey->ipv4.arp.sha, arp_key->arp_sha, ETH_ALEN);
+		memcpy(swkey->ipv4.arp.tha, arp_key->arp_tha, ETH_ALEN);
+	}
+
+	if (attrs)
+		return -EINVAL;
+	*key_lenp = key_len;
+
+	return 0;
+}
+
+/**
+ * ovs_flow_metadata_from_nlattrs - parses Netlink attributes into a flow key.
+ * @in_port: receives the extracted input port.
+ * @key: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
+ * sequence.
+ *
+ * This parses a series of Netlink attributes that form a flow key, which must
+ * take the same form accepted by flow_from_nlattrs(), but only enough of it to
+ * get the metadata, that is, the parts of the flow key that cannot be
+ * extracted from the packet itself.
+ */
+int ovs_flow_metadata_from_nlattrs(u32 *priority, u16 *in_port,
+			       const struct nlattr *attr)
+{
+	const struct nlattr *nla;
+	int rem;
+
+	*in_port = USHRT_MAX;
+	*priority = 0;
+
+	nla_for_each_nested(nla, attr, rem) {
+		int type = nla_type(nla);
+
+		if (type <= OVS_KEY_ATTR_MAX && ovs_key_lens[type] > 0) {
+			if (nla_len(nla) != ovs_key_lens[type])
+				return -EINVAL;
+
+			switch (type) {
+			case OVS_KEY_ATTR_PRIORITY:
+				*priority = nla_get_u32(nla);
+				break;
+
+			case OVS_KEY_ATTR_IN_PORT:
+				if (nla_get_u32(nla) >= DP_MAX_PORTS)
+					return -EINVAL;
+				*in_port = nla_get_u32(nla);
+				break;
+			}
+		}
+	}
+	if (rem)
+		return -EINVAL;
+	return 0;
+}
+
+int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb)
+{
+	struct ovs_key_ethernet *eth_key;
+	struct nlattr *nla, *encap;
+
+	if (swkey->phy.priority)
+		NLA_PUT_U32(skb, OVS_KEY_ATTR_PRIORITY, swkey->phy.priority);
+
+	if (swkey->phy.in_port != USHRT_MAX)
+		NLA_PUT_U32(skb, OVS_KEY_ATTR_IN_PORT, swkey->phy.in_port);
+
+	nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key));
+	if (!nla)
+		goto nla_put_failure;
+	eth_key = nla_data(nla);
+	memcpy(eth_key->eth_src, swkey->eth.src, ETH_ALEN);
+	memcpy(eth_key->eth_dst, swkey->eth.dst, ETH_ALEN);
+
+	if (swkey->eth.tci || swkey->eth.type == htons(ETH_P_8021Q)) {
+		NLA_PUT_BE16(skb, OVS_KEY_ATTR_ETHERTYPE, htons(ETH_P_8021Q));
+		NLA_PUT_BE16(skb, OVS_KEY_ATTR_VLAN, swkey->eth.tci);
+		encap = nla_nest_start(skb, OVS_KEY_ATTR_ENCAP);
+		if (!swkey->eth.tci)
+			goto unencap;
+	} else {
+		encap = NULL;
+	}
+
+	if (swkey->eth.type == htons(ETH_P_802_2))
+		goto unencap;
+
+	NLA_PUT_BE16(skb, OVS_KEY_ATTR_ETHERTYPE, swkey->eth.type);
+
+	if (swkey->eth.type == htons(ETH_P_IP)) {
+		struct ovs_key_ipv4 *ipv4_key;
+
+		nla = nla_reserve(skb, OVS_KEY_ATTR_IPV4, sizeof(*ipv4_key));
+		if (!nla)
+			goto nla_put_failure;
+		ipv4_key = nla_data(nla);
+		ipv4_key->ipv4_src = swkey->ipv4.addr.src;
+		ipv4_key->ipv4_dst = swkey->ipv4.addr.dst;
+		ipv4_key->ipv4_proto = swkey->ip.proto;
+		ipv4_key->ipv4_tos = swkey->ip.tos;
+		ipv4_key->ipv4_ttl = swkey->ip.ttl;
+		ipv4_key->ipv4_frag = swkey->ip.frag;
+	} else if (swkey->eth.type == htons(ETH_P_IPV6)) {
+		struct ovs_key_ipv6 *ipv6_key;
+
+		nla = nla_reserve(skb, OVS_KEY_ATTR_IPV6, sizeof(*ipv6_key));
+		if (!nla)
+			goto nla_put_failure;
+		ipv6_key = nla_data(nla);
+		memcpy(ipv6_key->ipv6_src, &swkey->ipv6.addr.src,
+				sizeof(ipv6_key->ipv6_src));
+		memcpy(ipv6_key->ipv6_dst, &swkey->ipv6.addr.dst,
+				sizeof(ipv6_key->ipv6_dst));
+		ipv6_key->ipv6_label = swkey->ipv6.label;
+		ipv6_key->ipv6_proto = swkey->ip.proto;
+		ipv6_key->ipv6_tclass = swkey->ip.tos;
+		ipv6_key->ipv6_hlimit = swkey->ip.ttl;
+		ipv6_key->ipv6_frag = swkey->ip.frag;
+	} else if (swkey->eth.type == htons(ETH_P_ARP)) {
+		struct ovs_key_arp *arp_key;
+
+		nla = nla_reserve(skb, OVS_KEY_ATTR_ARP, sizeof(*arp_key));
+		if (!nla)
+			goto nla_put_failure;
+		arp_key = nla_data(nla);
+		memset(arp_key, 0, sizeof(struct ovs_key_arp));
+		arp_key->arp_sip = swkey->ipv4.addr.src;
+		arp_key->arp_tip = swkey->ipv4.addr.dst;
+		arp_key->arp_op = htons(swkey->ip.proto);
+		memcpy(arp_key->arp_sha, swkey->ipv4.arp.sha, ETH_ALEN);
+		memcpy(arp_key->arp_tha, swkey->ipv4.arp.tha, ETH_ALEN);
+	}
+
+	if ((swkey->eth.type == htons(ETH_P_IP) ||
+	     swkey->eth.type == htons(ETH_P_IPV6)) &&
+	     swkey->ip.frag != OVS_FRAG_TYPE_LATER) {
+
+		if (swkey->ip.proto == IPPROTO_TCP) {
+			struct ovs_key_tcp *tcp_key;
+
+			nla = nla_reserve(skb, OVS_KEY_ATTR_TCP, sizeof(*tcp_key));
+			if (!nla)
+				goto nla_put_failure;
+			tcp_key = nla_data(nla);
+			if (swkey->eth.type == htons(ETH_P_IP)) {
+				tcp_key->tcp_src = swkey->ipv4.tp.src;
+				tcp_key->tcp_dst = swkey->ipv4.tp.dst;
+			} else if (swkey->eth.type == htons(ETH_P_IPV6)) {
+				tcp_key->tcp_src = swkey->ipv6.tp.src;
+				tcp_key->tcp_dst = swkey->ipv6.tp.dst;
+			}
+		} else if (swkey->ip.proto == IPPROTO_UDP) {
+			struct ovs_key_udp *udp_key;
+
+			nla = nla_reserve(skb, OVS_KEY_ATTR_UDP, sizeof(*udp_key));
+			if (!nla)
+				goto nla_put_failure;
+			udp_key = nla_data(nla);
+			if (swkey->eth.type == htons(ETH_P_IP)) {
+				udp_key->udp_src = swkey->ipv4.tp.src;
+				udp_key->udp_dst = swkey->ipv4.tp.dst;
+			} else if (swkey->eth.type == htons(ETH_P_IPV6)) {
+				udp_key->udp_src = swkey->ipv6.tp.src;
+				udp_key->udp_dst = swkey->ipv6.tp.dst;
+			}
+		} else if (swkey->eth.type == htons(ETH_P_IP) &&
+			   swkey->ip.proto == IPPROTO_ICMP) {
+			struct ovs_key_icmp *icmp_key;
+
+			nla = nla_reserve(skb, OVS_KEY_ATTR_ICMP, sizeof(*icmp_key));
+			if (!nla)
+				goto nla_put_failure;
+			icmp_key = nla_data(nla);
+			icmp_key->icmp_type = ntohs(swkey->ipv4.tp.src);
+			icmp_key->icmp_code = ntohs(swkey->ipv4.tp.dst);
+		} else if (swkey->eth.type == htons(ETH_P_IPV6) &&
+			   swkey->ip.proto == IPPROTO_ICMPV6) {
+			struct ovs_key_icmpv6 *icmpv6_key;
+
+			nla = nla_reserve(skb, OVS_KEY_ATTR_ICMPV6,
+						sizeof(*icmpv6_key));
+			if (!nla)
+				goto nla_put_failure;
+			icmpv6_key = nla_data(nla);
+			icmpv6_key->icmpv6_type = ntohs(swkey->ipv6.tp.src);
+			icmpv6_key->icmpv6_code = ntohs(swkey->ipv6.tp.dst);
+
+			if (icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_SOLICITATION ||
+			    icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_ADVERTISEMENT) {
+				struct ovs_key_nd *nd_key;
+
+				nla = nla_reserve(skb, OVS_KEY_ATTR_ND, sizeof(*nd_key));
+				if (!nla)
+					goto nla_put_failure;
+				nd_key = nla_data(nla);
+				memcpy(nd_key->nd_target, &swkey->ipv6.nd.target,
+							sizeof(nd_key->nd_target));
+				memcpy(nd_key->nd_sll, swkey->ipv6.nd.sll, ETH_ALEN);
+				memcpy(nd_key->nd_tll, swkey->ipv6.nd.tll, ETH_ALEN);
+			}
+		}
+	}
+
+unencap:
+	if (encap)
+		nla_nest_end(skb, encap);
+
+	return 0;
+
+nla_put_failure:
+	return -EMSGSIZE;
+}
+
+/* Initializes the flow module.
+ * Returns zero if successful or a negative error code. */
+int ovs_flow_init(void)
+{
+	flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow), 0,
+					0, NULL);
+	if (flow_cache == NULL)
+		return -ENOMEM;
+
+	return 0;
+}
+
+/* Uninitializes the flow module. */
+void ovs_flow_exit(void)
+{
+	kmem_cache_destroy(flow_cache);
+}
diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h
new file mode 100644
index 0000000..2747dc2
--- /dev/null
+++ b/net/openvswitch/flow.h
@@ -0,0 +1,199 @@
+/*
+ * Copyright (c) 2007-2011 Nicira Networks.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#ifndef FLOW_H
+#define FLOW_H 1
+
+#include <linux/kernel.h>
+#include <linux/netlink.h>
+#include <linux/openvswitch.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/rcupdate.h>
+#include <linux/if_ether.h>
+#include <linux/in6.h>
+#include <linux/jiffies.h>
+#include <linux/time.h>
+#include <linux/flex_array.h>
+#include <net/inet_ecn.h>
+
+struct sk_buff;
+
+struct sw_flow_actions {
+	struct rcu_head rcu;
+	u32 actions_len;
+	struct nlattr actions[];
+};
+
+struct sw_flow_key {
+	struct {
+		u32	priority;	/* Packet QoS priority. */
+		u16	in_port;	/* Input switch port (or USHRT_MAX). */
+	} phy;
+	struct {
+		u8     src[ETH_ALEN];	/* Ethernet source address. */
+		u8     dst[ETH_ALEN];	/* Ethernet destination address. */
+		__be16 tci;		/* 0 if no VLAN, VLAN_TAG_PRESENT set otherwise. */
+		__be16 type;		/* Ethernet frame type. */
+	} eth;
+	struct {
+		u8     proto;		/* IP protocol or lower 8 bits of ARP opcode. */
+		u8     tos;		/* IP ToS. */
+		u8     ttl;		/* IP TTL/hop limit. */
+		u8     frag;		/* One of OVS_FRAG_TYPE_*. */
+	} ip;
+	union {
+		struct {
+			struct {
+				__be32 src;	/* IP source address. */
+				__be32 dst;	/* IP destination address. */
+			} addr;
+			union {
+				struct {
+					__be16 src;		/* TCP/UDP source port. */
+					__be16 dst;		/* TCP/UDP destination port. */
+				} tp;
+				struct {
+					u8 sha[ETH_ALEN];	/* ARP source hardware address. */
+					u8 tha[ETH_ALEN];	/* ARP target hardware address. */
+				} arp;
+			};
+		} ipv4;
+		struct {
+			struct {
+				struct in6_addr src;	/* IPv6 source address. */
+				struct in6_addr dst;	/* IPv6 destination address. */
+			} addr;
+			__be32 label;			/* IPv6 flow label. */
+			struct {
+				__be16 src;		/* TCP/UDP source port. */
+				__be16 dst;		/* TCP/UDP destination port. */
+			} tp;
+			struct {
+				struct in6_addr target;	/* ND target address. */
+				u8 sll[ETH_ALEN];	/* ND source link layer address. */
+				u8 tll[ETH_ALEN];	/* ND target link layer address. */
+			} nd;
+		} ipv6;
+	};
+};
+
+struct sw_flow {
+	struct rcu_head rcu;
+	struct hlist_node hash_node[2];
+	u32 hash;
+
+	struct sw_flow_key key;
+	struct sw_flow_actions __rcu *sf_acts;
+
+	spinlock_t lock;	/* Lock for values below. */
+	unsigned long used;	/* Last used time (in jiffies). */
+	u64 packet_count;	/* Number of packets matched. */
+	u64 byte_count;		/* Number of bytes matched. */
+	u8 tcp_flags;		/* Union of seen TCP flags. */
+};
+
+struct arp_eth_header {
+	__be16      ar_hrd;	/* format of hardware address   */
+	__be16      ar_pro;	/* format of protocol address   */
+	unsigned char   ar_hln;	/* length of hardware address   */
+	unsigned char   ar_pln;	/* length of protocol address   */
+	__be16      ar_op;	/* ARP opcode (command)     */
+
+	/* Ethernet+IPv4 specific members. */
+	unsigned char       ar_sha[ETH_ALEN];	/* sender hardware address  */
+	unsigned char       ar_sip[4];		/* sender IP address        */
+	unsigned char       ar_tha[ETH_ALEN];	/* target hardware address  */
+	unsigned char       ar_tip[4];		/* target IP address        */
+} __packed;
+
+int ovs_flow_init(void);
+void ovs_flow_exit(void);
+
+struct sw_flow *ovs_flow_alloc(void);
+void ovs_flow_deferred_free(struct sw_flow *);
+void ovs_flow_free(struct sw_flow *flow);
+
+struct sw_flow_actions *ovs_flow_actions_alloc(const struct nlattr *);
+void ovs_flow_deferred_free_acts(struct sw_flow_actions *);
+
+int ovs_flow_extract(struct sk_buff *, u16 in_port, struct sw_flow_key *,
+		     int *key_lenp);
+void ovs_flow_used(struct sw_flow *, struct sk_buff *);
+u64 ovs_flow_used_time(unsigned long flow_jiffies);
+
+/* Upper bound on the length of a nlattr-formatted flow key.  The longest
+ * nlattr-formatted flow key would be:
+ *
+ *                         struct  pad  nl hdr  total
+ *                         ------  ---  ------  -----
+ *  OVS_KEY_ATTR_PRIORITY      4    --     4      8
+ *  OVS_KEY_ATTR_IN_PORT       4    --     4      8
+ *  OVS_KEY_ATTR_ETHERNET     12    --     4     16
+ *  OVS_KEY_ATTR_8021Q         4    --     4      8
+ *  OVS_KEY_ATTR_ETHERTYPE     2     2     4      8
+ *  OVS_KEY_ATTR_IPV6         40    --     4     44
+ *  OVS_KEY_ATTR_ICMPV6        2     2     4      8
+ *  OVS_KEY_ATTR_ND           28    --     4     32
+ *  -------------------------------------------------
+ *  total                                       132
+ */
+#define FLOW_BUFSIZE 132
+
+int ovs_flow_to_nlattrs(const struct sw_flow_key *, struct sk_buff *);
+int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp,
+		      const struct nlattr *);
+int ovs_flow_metadata_from_nlattrs(u32 *priority, u16 *in_port,
+			       const struct nlattr *);
+
+#define TBL_MIN_BUCKETS		1024
+
+struct flow_table {
+	struct flex_array *buckets;
+	unsigned int count, n_buckets;
+	struct rcu_head rcu;
+	int node_ver;
+	u32 hash_seed;
+	bool keep_flows;
+};
+
+static inline int ovs_flow_tbl_count(struct flow_table *table)
+{
+	return table->count;
+}
+
+static inline int ovs_flow_tbl_need_to_expand(struct flow_table *table)
+{
+	return (table->count > table->n_buckets);
+}
+
+struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *table,
+				    struct sw_flow_key *key, int len);
+void ovs_flow_tbl_destroy(struct flow_table *table);
+void ovs_flow_tbl_deferred_destroy(struct flow_table *table);
+struct flow_table *ovs_flow_tbl_alloc(int new_size);
+struct flow_table *ovs_flow_tbl_expand(struct flow_table *table);
+struct flow_table *ovs_flow_tbl_rehash(struct flow_table *table);
+void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow);
+void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow);
+u32 ovs_flow_hash(const struct sw_flow_key *key, int key_len);
+
+struct sw_flow *ovs_flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *idx);
+extern const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1];
+
+#endif /* flow.h */
diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c
new file mode 100644
index 0000000..8fc28b8
--- /dev/null
+++ b/net/openvswitch/vport-internal_dev.c
@@ -0,0 +1,241 @@
+/*
+ * Copyright (c) 2007-2011 Nicira Networks.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#include <linux/hardirq.h>
+#include <linux/if_vlan.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/skbuff.h>
+#include <linux/version.h>
+
+#include "datapath.h"
+#include "vport-internal_dev.h"
+#include "vport-netdev.h"
+
+struct internal_dev {
+	struct vport *vport;
+};
+
+static struct internal_dev *internal_dev_priv(struct net_device *netdev)
+{
+	return netdev_priv(netdev);
+}
+
+/* This function is only called by the kernel network layer.*/
+static struct rtnl_link_stats64 *internal_dev_get_stats(struct net_device *netdev,
+							struct rtnl_link_stats64 *stats)
+{
+	struct vport *vport = ovs_internal_dev_get_vport(netdev);
+	struct ovs_vport_stats vport_stats;
+
+	ovs_vport_get_stats(vport, &vport_stats);
+
+	/* The tx and rx stats need to be swapped because the
+	 * switch and host OS have opposite perspectives. */
+	stats->rx_packets	= vport_stats.tx_packets;
+	stats->tx_packets	= vport_stats.rx_packets;
+	stats->rx_bytes		= vport_stats.tx_bytes;
+	stats->tx_bytes		= vport_stats.rx_bytes;
+	stats->rx_errors	= vport_stats.tx_errors;
+	stats->tx_errors	= vport_stats.rx_errors;
+	stats->rx_dropped	= vport_stats.tx_dropped;
+	stats->tx_dropped	= vport_stats.rx_dropped;
+
+	return stats;
+}
+
+static int internal_dev_mac_addr(struct net_device *dev, void *p)
+{
+	struct sockaddr *addr = p;
+
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EADDRNOTAVAIL;
+	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+	return 0;
+}
+
+/* Called with rcu_read_lock_bh. */
+static int internal_dev_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+	rcu_read_lock();
+	ovs_vport_receive(internal_dev_priv(netdev)->vport, skb);
+	rcu_read_unlock();
+	return 0;
+}
+
+static int internal_dev_open(struct net_device *netdev)
+{
+	netif_start_queue(netdev);
+	return 0;
+}
+
+static int internal_dev_stop(struct net_device *netdev)
+{
+	netif_stop_queue(netdev);
+	return 0;
+}
+
+static void internal_dev_getinfo(struct net_device *netdev,
+				 struct ethtool_drvinfo *info)
+{
+	strcpy(info->driver, "openvswitch");
+}
+
+static const struct ethtool_ops internal_dev_ethtool_ops = {
+	.get_drvinfo	= internal_dev_getinfo,
+	.get_link	= ethtool_op_get_link,
+};
+
+static int internal_dev_change_mtu(struct net_device *netdev, int new_mtu)
+{
+	if (new_mtu < 68)
+		return -EINVAL;
+
+	netdev->mtu = new_mtu;
+	return 0;
+}
+
+static void internal_dev_destructor(struct net_device *dev)
+{
+	struct vport *vport = ovs_internal_dev_get_vport(dev);
+
+	ovs_vport_free(vport);
+	free_netdev(dev);
+}
+
+static const struct net_device_ops internal_dev_netdev_ops = {
+	.ndo_open = internal_dev_open,
+	.ndo_stop = internal_dev_stop,
+	.ndo_start_xmit = internal_dev_xmit,
+	.ndo_set_mac_address = internal_dev_mac_addr,
+	.ndo_change_mtu = internal_dev_change_mtu,
+	.ndo_get_stats64 = internal_dev_get_stats,
+};
+
+static void do_setup(struct net_device *netdev)
+{
+	ether_setup(netdev);
+
+	netdev->netdev_ops = &internal_dev_netdev_ops;
+
+	netdev->priv_flags &= ~IFF_TX_SKB_SHARING;
+	netdev->destructor = internal_dev_destructor;
+	SET_ETHTOOL_OPS(netdev, &internal_dev_ethtool_ops);
+	netdev->tx_queue_len = 0;
+
+	netdev->features = NETIF_F_LLTX | NETIF_F_SG | NETIF_F_FRAGLIST |
+				NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | NETIF_F_TSO;
+
+	netdev->vlan_features = netdev->features;
+	netdev->features |= NETIF_F_HW_VLAN_TX;
+	netdev->hw_features = netdev->features & ~NETIF_F_LLTX;
+	random_ether_addr(netdev->dev_addr);
+}
+
+static struct vport *internal_dev_create(const struct vport_parms *parms)
+{
+	struct vport *vport;
+	struct netdev_vport *netdev_vport;
+	struct internal_dev *internal_dev;
+	int err;
+
+	vport = ovs_vport_alloc(sizeof(struct netdev_vport),
+				&ovs_internal_vport_ops, parms);
+	if (IS_ERR(vport)) {
+		err = PTR_ERR(vport);
+		goto error;
+	}
+
+	netdev_vport = netdev_vport_priv(vport);
+
+	netdev_vport->dev = alloc_netdev(sizeof(struct internal_dev),
+					 parms->name, do_setup);
+	if (!netdev_vport->dev) {
+		err = -ENOMEM;
+		goto error_free_vport;
+	}
+
+	internal_dev = internal_dev_priv(netdev_vport->dev);
+	internal_dev->vport = vport;
+
+	err = register_netdevice(netdev_vport->dev);
+	if (err)
+		goto error_free_netdev;
+
+	dev_set_promiscuity(netdev_vport->dev, 1);
+	netif_start_queue(netdev_vport->dev);
+
+	return vport;
+
+error_free_netdev:
+	free_netdev(netdev_vport->dev);
+error_free_vport:
+	ovs_vport_free(vport);
+error:
+	return ERR_PTR(err);
+}
+
+static void internal_dev_destroy(struct vport *vport)
+{
+	struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
+
+	netif_stop_queue(netdev_vport->dev);
+	dev_set_promiscuity(netdev_vport->dev, -1);
+
+	/* unregister_netdevice() waits for an RCU grace period. */
+	unregister_netdevice(netdev_vport->dev);
+}
+
+static int internal_dev_recv(struct vport *vport, struct sk_buff *skb)
+{
+	struct net_device *netdev = netdev_vport_priv(vport)->dev;
+	int len;
+
+	len = skb->len;
+	skb->dev = netdev;
+	skb->pkt_type = PACKET_HOST;
+	skb->protocol = eth_type_trans(skb, netdev);
+
+	netif_rx(skb);
+
+	return len;
+}
+
+const struct vport_ops ovs_internal_vport_ops = {
+	.type		= OVS_VPORT_TYPE_INTERNAL,
+	.create		= internal_dev_create,
+	.destroy	= internal_dev_destroy,
+	.get_name	= ovs_netdev_get_name,
+	.get_ifindex	= ovs_netdev_get_ifindex,
+	.send		= internal_dev_recv,
+};
+
+int ovs_is_internal_dev(const struct net_device *netdev)
+{
+	return netdev->netdev_ops == &internal_dev_netdev_ops;
+}
+
+struct vport *ovs_internal_dev_get_vport(struct net_device *netdev)
+{
+	if (!ovs_is_internal_dev(netdev))
+		return NULL;
+
+	return internal_dev_priv(netdev)->vport;
+}
diff --git a/net/openvswitch/vport-internal_dev.h b/net/openvswitch/vport-internal_dev.h
new file mode 100644
index 0000000..3454447
--- /dev/null
+++ b/net/openvswitch/vport-internal_dev.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2007-2011 Nicira Networks.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#ifndef VPORT_INTERNAL_DEV_H
+#define VPORT_INTERNAL_DEV_H 1
+
+#include "datapath.h"
+#include "vport.h"
+
+int ovs_is_internal_dev(const struct net_device *);
+struct vport *ovs_internal_dev_get_vport(struct net_device *);
+
+#endif /* vport-internal_dev.h */
diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c
new file mode 100644
index 0000000..c1068ae
--- /dev/null
+++ b/net/openvswitch/vport-netdev.c
@@ -0,0 +1,198 @@
+/*
+ * Copyright (c) 2007-2011 Nicira Networks.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/if_arp.h>
+#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
+#include <linux/kernel.h>
+#include <linux/llc.h>
+#include <linux/rtnetlink.h>
+#include <linux/skbuff.h>
+
+#include <net/llc.h>
+
+#include "datapath.h"
+#include "vport-internal_dev.h"
+#include "vport-netdev.h"
+
+/* Must be called with rcu_read_lock. */
+static void netdev_port_receive(struct vport *vport, struct sk_buff *skb)
+{
+	if (unlikely(!vport)) {
+		kfree_skb(skb);
+		return;
+	}
+
+	/* Make our own copy of the packet.  Otherwise we will mangle the
+	 * packet for anyone who came before us (e.g. tcpdump via AF_PACKET).
+	 * (No one comes after us, since we tell handle_bridge() that we took
+	 * the packet.) */
+	skb = skb_share_check(skb, GFP_ATOMIC);
+	if (unlikely(!skb))
+		return;
+
+	skb_push(skb, ETH_HLEN);
+	ovs_vport_receive(vport, skb);
+}
+
+/* Called with rcu_read_lock and bottom-halves disabled. */
+static rx_handler_result_t netdev_frame_hook(struct sk_buff **pskb)
+{
+	struct sk_buff *skb = *pskb;
+	struct vport *vport;
+
+	if (unlikely(skb->pkt_type == PACKET_LOOPBACK))
+		return RX_HANDLER_PASS;
+
+	vport = ovs_netdev_get_vport(skb->dev);
+
+	netdev_port_receive(vport, skb);
+
+	return RX_HANDLER_CONSUMED;
+}
+
+static struct vport *netdev_create(const struct vport_parms *parms)
+{
+	struct vport *vport;
+	struct netdev_vport *netdev_vport;
+	int err;
+
+	vport = ovs_vport_alloc(sizeof(struct netdev_vport),
+				&ovs_netdev_vport_ops, parms);
+	if (IS_ERR(vport)) {
+		err = PTR_ERR(vport);
+		goto error;
+	}
+
+	netdev_vport = netdev_vport_priv(vport);
+
+	netdev_vport->dev = dev_get_by_name(&init_net, parms->name);
+	if (!netdev_vport->dev) {
+		err = -ENODEV;
+		goto error_free_vport;
+	}
+
+	if (netdev_vport->dev->flags & IFF_LOOPBACK ||
+	    netdev_vport->dev->type != ARPHRD_ETHER ||
+	    ovs_is_internal_dev(netdev_vport->dev)) {
+		err = -EINVAL;
+		goto error_put;
+	}
+
+	err = netdev_rx_handler_register(netdev_vport->dev, netdev_frame_hook,
+					 vport);
+	if (err)
+		goto error_put;
+
+	dev_set_promiscuity(netdev_vport->dev, 1);
+	netdev_vport->dev->priv_flags |= IFF_OVS_DATAPATH;
+
+	return vport;
+
+error_put:
+	dev_put(netdev_vport->dev);
+error_free_vport:
+	ovs_vport_free(vport);
+error:
+	return ERR_PTR(err);
+}
+
+static void netdev_destroy(struct vport *vport)
+{
+	struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
+
+	netdev_vport->dev->priv_flags &= ~IFF_OVS_DATAPATH;
+	netdev_rx_handler_unregister(netdev_vport->dev);
+	dev_set_promiscuity(netdev_vport->dev, -1);
+
+	synchronize_rcu();
+
+	dev_put(netdev_vport->dev);
+	ovs_vport_free(vport);
+}
+
+const char *ovs_netdev_get_name(const struct vport *vport)
+{
+	const struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
+	return netdev_vport->dev->name;
+}
+
+int ovs_netdev_get_ifindex(const struct vport *vport)
+{
+	const struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
+	return netdev_vport->dev->ifindex;
+}
+
+static unsigned packet_length(const struct sk_buff *skb)
+{
+	unsigned length = skb->len - ETH_HLEN;
+
+	if (skb->protocol == htons(ETH_P_8021Q))
+		length -= VLAN_HLEN;
+
+	return length;
+}
+
+static int netdev_send(struct vport *vport, struct sk_buff *skb)
+{
+	struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
+	int mtu = netdev_vport->dev->mtu;
+	int len;
+
+	if (unlikely(packet_length(skb) > mtu && !skb_is_gso(skb))) {
+		if (net_ratelimit())
+			pr_warn("%s: dropped over-mtu packet: %d > %d\n",
+				ovs_dp_name(vport->dp), packet_length(skb), mtu);
+		goto error;
+	}
+
+	if (unlikely(skb_warn_if_lro(skb)))
+		goto error;
+
+	skb->dev = netdev_vport->dev;
+	len = skb->len;
+	dev_queue_xmit(skb);
+
+	return len;
+
+error:
+	kfree_skb(skb);
+	ovs_vport_record_error(vport, VPORT_E_TX_DROPPED);
+	return 0;
+}
+
+/* Returns null if this device is not attached to a datapath. */
+struct vport *ovs_netdev_get_vport(struct net_device *dev)
+{
+	if (likely(dev->priv_flags & IFF_OVS_DATAPATH))
+		return (struct vport *)
+			rcu_dereference_rtnl(dev->rx_handler_data);
+	else
+		return NULL;
+}
+
+const struct vport_ops ovs_netdev_vport_ops = {
+	.type		= OVS_VPORT_TYPE_NETDEV,
+	.create		= netdev_create,
+	.destroy	= netdev_destroy,
+	.get_name	= ovs_netdev_get_name,
+	.get_ifindex	= ovs_netdev_get_ifindex,
+	.send		= netdev_send,
+};
diff --git a/net/openvswitch/vport-netdev.h b/net/openvswitch/vport-netdev.h
new file mode 100644
index 0000000..fd9b008
--- /dev/null
+++ b/net/openvswitch/vport-netdev.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2007-2011 Nicira Networks.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#ifndef VPORT_NETDEV_H
+#define VPORT_NETDEV_H 1
+
+#include <linux/netdevice.h>
+
+#include "vport.h"
+
+struct vport *ovs_netdev_get_vport(struct net_device *dev);
+
+struct netdev_vport {
+	struct net_device *dev;
+};
+
+static inline struct netdev_vport *
+netdev_vport_priv(const struct vport *vport)
+{
+	return vport_priv(vport);
+}
+
+const char *ovs_netdev_get_name(const struct vport *);
+const char *ovs_netdev_get_config(const struct vport *);
+int ovs_netdev_get_ifindex(const struct vport *);
+
+#endif /* vport_netdev.h */
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
new file mode 100644
index 0000000..7f0ef37
--- /dev/null
+++ b/net/openvswitch/vport.c
@@ -0,0 +1,398 @@
+/*
+ * Copyright (c) 2007-2011 Nicira Networks.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#include <linux/dcache.h>
+#include <linux/etherdevice.h>
+#include <linux/if.h>
+#include <linux/if_vlan.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/percpu.h>
+#include <linux/rcupdate.h>
+#include <linux/rtnetlink.h>
+#include <linux/compat.h>
+#include <linux/version.h>
+
+#include "vport.h"
+#include "vport-internal_dev.h"
+
+/* List of statically compiled vport implementations.  Don't forget to also
+ * add yours to the list at the bottom of vport.h. */
+static const struct vport_ops *vport_ops_list[] = {
+	&ovs_netdev_vport_ops,
+	&ovs_internal_vport_ops,
+};
+
+/* Protected by RCU read lock for reading, RTNL lock for writing. */
+static struct hlist_head *dev_table;
+#define VPORT_HASH_BUCKETS 1024
+
+/**
+ *	ovs_vport_init - initialize vport subsystem
+ *
+ * Called at module load time to initialize the vport subsystem.
+ */
+int ovs_vport_init(void)
+{
+	dev_table = kzalloc(VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
+			    GFP_KERNEL);
+	if (!dev_table)
+		return -ENOMEM;
+
+	return 0;
+}
+
+/**
+ *	ovs_vport_exit - shutdown vport subsystem
+ *
+ * Called at module exit time to shutdown the vport subsystem.
+ */
+void ovs_vport_exit(void)
+{
+	kfree(dev_table);
+}
+
+static struct hlist_head *hash_bucket(const char *name)
+{
+	unsigned int hash = full_name_hash(name, strlen(name));
+	return &dev_table[hash & (VPORT_HASH_BUCKETS - 1)];
+}
+
+/**
+ *	ovs_vport_locate - find a port that has already been created
+ *
+ * @name: name of port to find
+ *
+ * Must be called with RTNL or RCU read lock.
+ */
+struct vport *ovs_vport_locate(const char *name)
+{
+	struct hlist_head *bucket = hash_bucket(name);
+	struct vport *vport;
+	struct hlist_node *node;
+
+	hlist_for_each_entry_rcu(vport, node, bucket, hash_node)
+		if (!strcmp(name, vport->ops->get_name(vport)))
+			return vport;
+
+	return NULL;
+}
+
+/**
+ *	ovs_vport_alloc - allocate and initialize new vport
+ *
+ * @priv_size: Size of private data area to allocate.
+ * @ops: vport device ops
+ *
+ * Allocate and initialize a new vport defined by @ops.  The vport will contain
+ * a private data area of size @priv_size that can be accessed using
+ * vport_priv().  vports that are no longer needed should be released with
+ * vport_free().
+ */
+struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops,
+			  const struct vport_parms *parms)
+{
+	struct vport *vport;
+	size_t alloc_size;
+
+	alloc_size = sizeof(struct vport);
+	if (priv_size) {
+		alloc_size = ALIGN(alloc_size, VPORT_ALIGN);
+		alloc_size += priv_size;
+	}
+
+	vport = kzalloc(alloc_size, GFP_KERNEL);
+	if (!vport)
+		return ERR_PTR(-ENOMEM);
+
+	vport->dp = parms->dp;
+	vport->port_no = parms->port_no;
+	vport->upcall_pid = parms->upcall_pid;
+	vport->ops = ops;
+
+	vport->percpu_stats = alloc_percpu(struct vport_percpu_stats);
+	if (!vport->percpu_stats) {
+		kfree(vport);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	spin_lock_init(&vport->stats_lock);
+
+	return vport;
+}
+
+/**
+ *	ovs_vport_free - uninitialize and free vport
+ *
+ * @vport: vport to free
+ *
+ * Frees a vport allocated with vport_alloc() when it is no longer needed.
+ *
+ * The caller must ensure that an RCU grace period has passed since the last
+ * time @vport was in a datapath.
+ */
+void ovs_vport_free(struct vport *vport)
+{
+	free_percpu(vport->percpu_stats);
+	kfree(vport);
+}
+
+/**
+ *	ovs_vport_add - add vport device (for kernel callers)
+ *
+ * @parms: Information about new vport.
+ *
+ * Creates a new vport with the specified configuration (which is dependent on
+ * device type).  RTNL lock must be held.
+ */
+struct vport *ovs_vport_add(const struct vport_parms *parms)
+{
+	struct vport *vport;
+	int err = 0;
+	int i;
+
+	ASSERT_RTNL();
+
+	for (i = 0; i < ARRAY_SIZE(vport_ops_list); i++) {
+		if (vport_ops_list[i]->type == parms->type) {
+			vport = vport_ops_list[i]->create(parms);
+			if (IS_ERR(vport)) {
+				err = PTR_ERR(vport);
+				goto out;
+			}
+
+			hlist_add_head_rcu(&vport->hash_node,
+					   hash_bucket(vport->ops->get_name(vport)));
+			return vport;
+		}
+	}
+
+	err = -EAFNOSUPPORT;
+
+out:
+	return ERR_PTR(err);
+}
+
+/**
+ *	ovs_vport_set_options - modify existing vport device (for kernel callers)
+ *
+ * @vport: vport to modify.
+ * @port: New configuration.
+ *
+ * Modifies an existing device with the specified configuration (which is
+ * dependent on device type).  RTNL lock must be held.
+ */
+int ovs_vport_set_options(struct vport *vport, struct nlattr *options)
+{
+	ASSERT_RTNL();
+
+	if (!vport->ops->set_options)
+		return -EOPNOTSUPP;
+	return vport->ops->set_options(vport, options);
+}
+
+/**
+ *	ovs_vport_del - delete existing vport device
+ *
+ * @vport: vport to delete.
+ *
+ * Detaches @vport from its datapath and destroys it.  It is possible to fail
+ * for reasons such as lack of memory.  RTNL lock must be held.
+ */
+void ovs_vport_del(struct vport *vport)
+{
+	ASSERT_RTNL();
+
+	hlist_del_rcu(&vport->hash_node);
+
+	vport->ops->destroy(vport);
+}
+
+/**
+ *	ovs_vport_get_stats - retrieve device stats
+ *
+ * @vport: vport from which to retrieve the stats
+ * @stats: location to store stats
+ *
+ * Retrieves transmit, receive, and error stats for the given device.
+ *
+ * Must be called with RTNL lock or rcu_read_lock.
+ */
+void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats)
+{
+	int i;
+
+	memset(stats, 0, sizeof(*stats));
+
+	/* We potentially have 2 sources of stats that need to be combined:
+	 * those we have collected (split into err_stats and percpu_stats) from
+	 * set_stats() and device error stats from netdev->get_stats() (for
+	 * errors that happen  downstream and therefore aren't reported through
+	 * our vport_record_error() function).
+	 * Stats from first source are reported by ovs (OVS_VPORT_ATTR_STATS).
+	 * netdev-stats can be directly read over netlink-ioctl.
+	 */
+
+	spin_lock_bh(&vport->stats_lock);
+
+	stats->rx_errors	= vport->err_stats.rx_errors;
+	stats->tx_errors	= vport->err_stats.tx_errors;
+	stats->tx_dropped	= vport->err_stats.tx_dropped;
+	stats->rx_dropped	= vport->err_stats.rx_dropped;
+
+	spin_unlock_bh(&vport->stats_lock);
+
+	for_each_possible_cpu(i) {
+		const struct vport_percpu_stats *percpu_stats;
+		struct vport_percpu_stats local_stats;
+		unsigned int start;
+
+		percpu_stats = per_cpu_ptr(vport->percpu_stats, i);
+
+		do {
+			start = u64_stats_fetch_begin_bh(&percpu_stats->sync);
+			local_stats = *percpu_stats;
+		} while (u64_stats_fetch_retry_bh(&percpu_stats->sync, start));
+
+		stats->rx_bytes		+= local_stats.rx_bytes;
+		stats->rx_packets	+= local_stats.rx_packets;
+		stats->tx_bytes		+= local_stats.tx_bytes;
+		stats->tx_packets	+= local_stats.tx_packets;
+	}
+}
+
+/**
+ *	ovs_vport_get_options - retrieve device options
+ *
+ * @vport: vport from which to retrieve the options.
+ * @skb: sk_buff where options should be appended.
+ *
+ * Retrieves the configuration of the given device, appending an
+ * %OVS_VPORT_ATTR_OPTIONS attribute that in turn contains nested
+ * vport-specific attributes to @skb.
+ *
+ * Returns 0 if successful, -EMSGSIZE if @skb has insufficient room, or another
+ * negative error code if a real error occurred.  If an error occurs, @skb is
+ * left unmodified.
+ *
+ * Must be called with RTNL lock or rcu_read_lock.
+ */
+int ovs_vport_get_options(const struct vport *vport, struct sk_buff *skb)
+{
+	struct nlattr *nla;
+
+	nla = nla_nest_start(skb, OVS_VPORT_ATTR_OPTIONS);
+	if (!nla)
+		return -EMSGSIZE;
+
+	if (vport->ops->get_options) {
+		int err = vport->ops->get_options(vport, skb);
+		if (err) {
+			nla_nest_cancel(skb, nla);
+			return err;
+		}
+	}
+
+	nla_nest_end(skb, nla);
+	return 0;
+}
+
+/**
+ *	ovs_vport_receive - pass up received packet to the datapath for processing
+ *
+ * @vport: vport that received the packet
+ * @skb: skb that was received
+ *
+ * Must be called with rcu_read_lock.  The packet cannot be shared and
+ * skb->data should point to the Ethernet header.  The caller must have already
+ * called compute_ip_summed() to initialize the checksumming fields.
+ */
+void ovs_vport_receive(struct vport *vport, struct sk_buff *skb)
+{
+	struct vport_percpu_stats *stats;
+
+	stats = per_cpu_ptr(vport->percpu_stats, smp_processor_id());
+
+	u64_stats_update_begin(&stats->sync);
+	stats->rx_packets++;
+	stats->rx_bytes += skb->len;
+	u64_stats_update_end(&stats->sync);
+
+	ovs_dp_process_received_packet(vport, skb);
+}
+
+/**
+ *	ovs_vport_send - send a packet on a device
+ *
+ * @vport: vport on which to send the packet
+ * @skb: skb to send
+ *
+ * Sends the given packet and returns the length of data sent.  Either RTNL
+ * lock or rcu_read_lock must be held.
+ */
+int ovs_vport_send(struct vport *vport, struct sk_buff *skb)
+{
+	int sent = vport->ops->send(vport, skb);
+
+	if (likely(sent)) {
+		struct vport_percpu_stats *stats;
+
+		stats = per_cpu_ptr(vport->percpu_stats, smp_processor_id());
+
+		u64_stats_update_begin(&stats->sync);
+		stats->tx_packets++;
+		stats->tx_bytes += sent;
+		u64_stats_update_end(&stats->sync);
+	}
+	return sent;
+}
+
+/**
+ *	ovs_vport_record_error - indicate device error to generic stats layer
+ *
+ * @vport: vport that encountered the error
+ * @err_type: one of enum vport_err_type types to indicate the error type
+ *
+ * If using the vport generic stats layer indicate that an error of the given
+ * type has occured.
+ */
+void ovs_vport_record_error(struct vport *vport, enum vport_err_type err_type)
+{
+	spin_lock(&vport->stats_lock);
+
+	switch (err_type) {
+	case VPORT_E_RX_DROPPED:
+		vport->err_stats.rx_dropped++;
+		break;
+
+	case VPORT_E_RX_ERROR:
+		vport->err_stats.rx_errors++;
+		break;
+
+	case VPORT_E_TX_DROPPED:
+		vport->err_stats.tx_dropped++;
+		break;
+
+	case VPORT_E_TX_ERROR:
+		vport->err_stats.tx_errors++;
+		break;
+	};
+
+	spin_unlock(&vport->stats_lock);
+}
diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h
new file mode 100644
index 0000000..1960962
--- /dev/null
+++ b/net/openvswitch/vport.h
@@ -0,0 +1,205 @@
+/*
+ * Copyright (c) 2007-2011 Nicira Networks.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#ifndef VPORT_H
+#define VPORT_H 1
+
+#include <linux/list.h>
+#include <linux/openvswitch.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/u64_stats_sync.h>
+
+#include "datapath.h"
+
+struct vport;
+struct vport_parms;
+
+/* The following definitions are for users of the vport subsytem: */
+
+int ovs_vport_init(void);
+void ovs_vport_exit(void);
+
+struct vport *ovs_vport_add(const struct vport_parms *);
+void ovs_vport_del(struct vport *);
+
+struct vport *ovs_vport_locate(const char *name);
+
+void ovs_vport_get_stats(struct vport *, struct ovs_vport_stats *);
+
+int ovs_vport_set_options(struct vport *, struct nlattr *options);
+int ovs_vport_get_options(const struct vport *, struct sk_buff *);
+
+int ovs_vport_send(struct vport *, struct sk_buff *);
+
+/* The following definitions are for implementers of vport devices: */
+
+struct vport_percpu_stats {
+	u64 rx_bytes;
+	u64 rx_packets;
+	u64 tx_bytes;
+	u64 tx_packets;
+	struct u64_stats_sync sync;
+};
+
+struct vport_err_stats {
+	u64 rx_dropped;
+	u64 rx_errors;
+	u64 tx_dropped;
+	u64 tx_errors;
+};
+
+/**
+ * struct vport - one port within a datapath
+ * @rcu: RCU callback head for deferred destruction.
+ * @port_no: Index into @dp's @ports array.
+ * @dp: Datapath to which this port belongs.
+ * @node: Element in @dp's @port_list.
+ * @upcall_pid: The Netlink port to use for packets received on this port that
+ * miss the flow table.
+ * @hash_node: Element in @dev_table hash table in vport.c.
+ * @ops: Class structure.
+ * @percpu_stats: Points to per-CPU statistics used and maintained by vport
+ * @stats_lock: Protects @err_stats;
+ * @err_stats: Points to error statistics used and maintained by vport
+ */
+struct vport {
+	struct rcu_head rcu;
+	u16 port_no;
+	struct datapath	*dp;
+	struct list_head node;
+	u32 upcall_pid;
+
+	struct hlist_node hash_node;
+	const struct vport_ops *ops;
+
+	struct vport_percpu_stats __percpu *percpu_stats;
+
+	spinlock_t stats_lock;
+	struct vport_err_stats err_stats;
+};
+
+/**
+ * struct vport_parms - parameters for creating a new vport
+ *
+ * @name: New vport's name.
+ * @type: New vport's type.
+ * @options: %OVS_VPORT_ATTR_OPTIONS attribute from Netlink message, %NULL if
+ * none was supplied.
+ * @dp: New vport's datapath.
+ * @port_no: New vport's port number.
+ */
+struct vport_parms {
+	const char *name;
+	enum ovs_vport_type type;
+	struct nlattr *options;
+
+	/* For ovs_vport_alloc(). */
+	struct datapath *dp;
+	u16 port_no;
+	u32 upcall_pid;
+};
+
+/**
+ * struct vport_ops - definition of a type of virtual port
+ *
+ * @type: %OVS_VPORT_TYPE_* value for this type of virtual port.
+ * @create: Create a new vport configured as specified.  On success returns
+ * a new vport allocated with ovs_vport_alloc(), otherwise an ERR_PTR() value.
+ * @destroy: Destroys a vport.  Must call vport_free() on the vport but not
+ * before an RCU grace period has elapsed.
+ * @set_options: Modify the configuration of an existing vport.  May be %NULL
+ * if modification is not supported.
+ * @get_options: Appends vport-specific attributes for the configuration of an
+ * existing vport to a &struct sk_buff.  May be %NULL for a vport that does not
+ * have any configuration.
+ * @get_name: Get the device's name.
+ * @get_config: Get the device's configuration.
+ * @get_ifindex: Get the system interface index associated with the device.
+ * May be null if the device does not have an ifindex.
+ * @send: Send a packet on the device.  Returns the length of the packet sent.
+ */
+struct vport_ops {
+	enum ovs_vport_type type;
+
+	/* Called with RTNL lock. */
+	struct vport *(*create)(const struct vport_parms *);
+	void (*destroy)(struct vport *);
+
+	int (*set_options)(struct vport *, struct nlattr *);
+	int (*get_options)(const struct vport *, struct sk_buff *);
+
+	/* Called with rcu_read_lock or RTNL lock. */
+	const char *(*get_name)(const struct vport *);
+	void (*get_config)(const struct vport *, void *);
+	int (*get_ifindex)(const struct vport *);
+
+	int (*send)(struct vport *, struct sk_buff *);
+};
+
+enum vport_err_type {
+	VPORT_E_RX_DROPPED,
+	VPORT_E_RX_ERROR,
+	VPORT_E_TX_DROPPED,
+	VPORT_E_TX_ERROR,
+};
+
+struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *,
+			      const struct vport_parms *);
+void ovs_vport_free(struct vport *);
+
+#define VPORT_ALIGN 8
+
+/**
+ *	vport_priv - access private data area of vport
+ *
+ * @vport: vport to access
+ *
+ * If a nonzero size was passed in priv_size of vport_alloc() a private data
+ * area was allocated on creation.  This allows that area to be accessed and
+ * used for any purpose needed by the vport implementer.
+ */
+static inline void *vport_priv(const struct vport *vport)
+{
+	return (u8 *)vport + ALIGN(sizeof(struct vport), VPORT_ALIGN);
+}
+
+/**
+ *	vport_from_priv - lookup vport from private data pointer
+ *
+ * @priv: Start of private data area.
+ *
+ * It is sometimes useful to translate from a pointer to the private data
+ * area to the vport, such as in the case where the private data pointer is
+ * the result of a hash table lookup.  @priv must point to the start of the
+ * private data area.
+ */
+static inline struct vport *vport_from_priv(const void *priv)
+{
+	return (struct vport *)(priv - ALIGN(sizeof(struct vport), VPORT_ALIGN));
+}
+
+void ovs_vport_receive(struct vport *, struct sk_buff *);
+void ovs_vport_record_error(struct vport *, enum vport_err_type err_type);
+
+/* List of statically compiled vport implementations.  Don't forget to also
+ * add yours to the list at the top of vport.c. */
+extern const struct vport_ops ovs_netdev_vport_ops;
+extern const struct vport_ops ovs_internal_vport_ops;
+
+#endif /* vport.h */
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 82a6f34..2dbb32b 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1499,10 +1499,11 @@
 
 	if (!skb) {
 		size_t reserved = LL_RESERVED_SPACE(dev);
+		int tlen = dev->needed_tailroom;
 		unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
 
 		rcu_read_unlock();
-		skb = sock_wmalloc(sk, len + reserved, 0, GFP_KERNEL);
+		skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
 		if (skb == NULL)
 			return -ENOBUFS;
 		/* FIXME: Save some space for broken drivers that write a hard
@@ -1630,8 +1631,7 @@
 	if (snaplen > res)
 		snaplen = res;
 
-	if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
-	    (unsigned)sk->sk_rcvbuf)
+	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
 		goto drop_n_acct;
 
 	if (skb_shared(skb)) {
@@ -1762,8 +1762,7 @@
 	if (po->tp_version <= TPACKET_V2) {
 		if (macoff + snaplen > po->rx_ring.frame_size) {
 			if (po->copy_thresh &&
-				atomic_read(&sk->sk_rmem_alloc) + skb->truesize
-				< (unsigned)sk->sk_rcvbuf) {
+			    atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
 				if (skb_shared(skb)) {
 					copy_skb = skb_clone(skb, GFP_ATOMIC);
 				} else {
@@ -1944,7 +1943,7 @@
 
 static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
 		void *frame, struct net_device *dev, int size_max,
-		__be16 proto, unsigned char *addr)
+		__be16 proto, unsigned char *addr, int hlen)
 {
 	union {
 		struct tpacket_hdr *h1;
@@ -1978,7 +1977,7 @@
 		return -EMSGSIZE;
 	}
 
-	skb_reserve(skb, LL_RESERVED_SPACE(dev));
+	skb_reserve(skb, hlen);
 	skb_reset_network_header(skb);
 
 	data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll);
@@ -2053,6 +2052,7 @@
 	unsigned char *addr;
 	int len_sum = 0;
 	int status = 0;
+	int hlen, tlen;
 
 	mutex_lock(&po->pg_vec_lock);
 
@@ -2101,16 +2101,17 @@
 		}
 
 		status = TP_STATUS_SEND_REQUEST;
+		hlen = LL_RESERVED_SPACE(dev);
+		tlen = dev->needed_tailroom;
 		skb = sock_alloc_send_skb(&po->sk,
-				LL_ALLOCATED_SPACE(dev)
-				+ sizeof(struct sockaddr_ll),
+				hlen + tlen + sizeof(struct sockaddr_ll),
 				0, &err);
 
 		if (unlikely(skb == NULL))
 			goto out_status;
 
 		tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
-				addr);
+				addr, hlen);
 
 		if (unlikely(tp_len < 0)) {
 			if (po->tp_loss) {
@@ -2207,6 +2208,7 @@
 	int vnet_hdr_len;
 	struct packet_sock *po = pkt_sk(sk);
 	unsigned short gso_type = 0;
+	int hlen, tlen;
 
 	/*
 	 *	Get and verify the address.
@@ -2291,8 +2293,9 @@
 		goto out_unlock;
 
 	err = -ENOBUFS;
-	skb = packet_alloc_skb(sk, LL_ALLOCATED_SPACE(dev),
-			       LL_RESERVED_SPACE(dev), len, vnet_hdr.hdr_len,
+	hlen = LL_RESERVED_SPACE(dev);
+	tlen = dev->needed_tailroom;
+	skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, vnet_hdr.hdr_len,
 			       msg->msg_flags & MSG_DONTWAIT, &err);
 	if (skb == NULL)
 		goto out_unlock;
@@ -2450,8 +2453,12 @@
 {
 	struct packet_sock *po = pkt_sk(sk);
 
-	if (po->fanout)
+	if (po->fanout) {
+		if (dev)
+			dev_put(dev);
+
 		return -EINVAL;
+	}
 
 	lock_sock(sk);
 
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index 2ba6e9f..9f60008 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -534,6 +534,29 @@
 	return pipe_handler_send_created_ind(sk);
 }
 
+static int pep_enableresp_rcv(struct sock *sk, struct sk_buff *skb)
+{
+	struct pnpipehdr *hdr = pnp_hdr(skb);
+
+	if (hdr->error_code != PN_PIPE_NO_ERROR)
+		return -ECONNREFUSED;
+
+	return pep_indicate(sk, PNS_PIPE_ENABLED_IND, 0 /* sub-blocks */,
+		NULL, 0, GFP_ATOMIC);
+
+}
+
+static void pipe_start_flow_control(struct sock *sk)
+{
+	struct pep_sock *pn = pep_sk(sk);
+
+	if (!pn_flow_safe(pn->tx_fc)) {
+		atomic_set(&pn->tx_credits, 1);
+		sk->sk_write_space(sk);
+	}
+	pipe_grant_credits(sk, GFP_ATOMIC);
+}
+
 /* Queue an skb to an actively connected sock.
  * Socket lock must be held. */
 static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
@@ -579,13 +602,25 @@
 			sk->sk_state = TCP_CLOSE_WAIT;
 			break;
 		}
+		if (pn->init_enable == PN_PIPE_DISABLE)
+			sk->sk_state = TCP_SYN_RECV;
+		else {
+			sk->sk_state = TCP_ESTABLISHED;
+			pipe_start_flow_control(sk);
+		}
+		break;
+
+	case PNS_PEP_ENABLE_RESP:
+		if (sk->sk_state != TCP_SYN_SENT)
+			break;
+
+		if (pep_enableresp_rcv(sk, skb)) {
+			sk->sk_state = TCP_CLOSE_WAIT;
+			break;
+		}
 
 		sk->sk_state = TCP_ESTABLISHED;
-		if (!pn_flow_safe(pn->tx_fc)) {
-			atomic_set(&pn->tx_credits, 1);
-			sk->sk_write_space(sk);
-		}
-		pipe_grant_credits(sk, GFP_ATOMIC);
+		pipe_start_flow_control(sk);
 		break;
 
 	case PNS_PEP_DISCONNECT_RESP:
@@ -864,14 +899,32 @@
 	int err;
 	u8 data[4] = { 0 /* sub-blocks */, PAD, PAD, PAD };
 
-	pn->pipe_handle = 1; /* anything but INVALID_HANDLE */
+	if (pn->pipe_handle == PN_PIPE_INVALID_HANDLE)
+		pn->pipe_handle = 1; /* anything but INVALID_HANDLE */
+
 	err = pipe_handler_request(sk, PNS_PEP_CONNECT_REQ,
-					PN_PIPE_ENABLE, data, 4);
+				pn->init_enable, data, 4);
 	if (err) {
 		pn->pipe_handle = PN_PIPE_INVALID_HANDLE;
 		return err;
 	}
+
 	sk->sk_state = TCP_SYN_SENT;
+
+	return 0;
+}
+
+static int pep_sock_enable(struct sock *sk, struct sockaddr *addr, int len)
+{
+	int err;
+
+	err = pipe_handler_request(sk, PNS_PEP_ENABLE_REQ, PAD,
+				NULL, 0);
+	if (err)
+		return err;
+
+	sk->sk_state = TCP_SYN_SENT;
+
 	return 0;
 }
 
@@ -879,11 +932,14 @@
 {
 	struct pep_sock *pn = pep_sk(sk);
 	int answ;
+	int ret = -ENOIOCTLCMD;
 
 	switch (cmd) {
 	case SIOCINQ:
-		if (sk->sk_state == TCP_LISTEN)
-			return -EINVAL;
+		if (sk->sk_state == TCP_LISTEN) {
+			ret = -EINVAL;
+			break;
+		}
 
 		lock_sock(sk);
 		if (sock_flag(sk, SOCK_URGINLINE) &&
@@ -894,10 +950,22 @@
 		else
 			answ = 0;
 		release_sock(sk);
-		return put_user(answ, (int __user *)arg);
+		ret = put_user(answ, (int __user *)arg);
+		break;
+
+	case SIOCPNENABLEPIPE:
+		lock_sock(sk);
+		if (sk->sk_state == TCP_SYN_SENT)
+			ret =  -EBUSY;
+		else if (sk->sk_state == TCP_ESTABLISHED)
+			ret = -EISCONN;
+		else
+			ret = pep_sock_enable(sk, NULL, 0);
+		release_sock(sk);
+		break;
 	}
 
-	return -ENOIOCTLCMD;
+	return ret;
 }
 
 static int pep_init(struct sock *sk)
@@ -960,6 +1028,18 @@
 		}
 		goto out_norel;
 
+	case PNPIPE_HANDLE:
+		if ((sk->sk_state == TCP_CLOSE) &&
+			(val >= 0) && (val < PN_PIPE_INVALID_HANDLE))
+			pn->pipe_handle = val;
+		else
+			err = -EINVAL;
+		break;
+
+	case PNPIPE_INITSTATE:
+		pn->init_enable = !!val;
+		break;
+
 	default:
 		err = -ENOPROTOOPT;
 	}
@@ -995,6 +1075,10 @@
 			return -EINVAL;
 		break;
 
+	case PNPIPE_INITSTATE:
+		val = pn->init_enable;
+		break;
+
 	default:
 		return -ENOPROTOOPT;
 	}
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index 5be1957..354760e 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -644,7 +644,7 @@
 	if (!capable(CAP_NET_ADMIN))
 		return -EPERM;
 
-	err = strict_strtoul(buf, 0, &state);
+	err = kstrtoul(buf, 0, &state);
 	if (err)
 		return err;
 
@@ -688,7 +688,7 @@
 	if (!capable(CAP_NET_ADMIN))
 		return -EPERM;
 
-	err = strict_strtoul(buf, 0, &state);
+	err = kstrtoul(buf, 0, &state);
 	if (err)
 		return err;
 
diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c
index 128677d..865adb6 100644
--- a/net/rfkill/rfkill-gpio.c
+++ b/net/rfkill/rfkill-gpio.c
@@ -105,7 +105,7 @@
 		ret = pdata->gpio_runtime_setup(pdev);
 		if (ret) {
 			pr_warn("%s: can't set up gpio\n", __func__);
-			return ret;
+			goto fail_alloc;
 		}
 	}
 
@@ -220,18 +220,7 @@
 	},
 };
 
-static int __init rfkill_gpio_init(void)
-{
-	return platform_driver_register(&rfkill_gpio_driver);
-}
-
-static void __exit rfkill_gpio_exit(void)
-{
-	platform_driver_unregister(&rfkill_gpio_driver);
-}
-
-module_init(rfkill_gpio_init);
-module_exit(rfkill_gpio_exit);
+module_platform_driver(rfkill_gpio_driver);
 
 MODULE_DESCRIPTION("gpio rfkill");
 MODULE_AUTHOR("NVIDIA");
diff --git a/net/rfkill/rfkill-regulator.c b/net/rfkill/rfkill-regulator.c
index 3ca7277..11da301 100644
--- a/net/rfkill/rfkill-regulator.c
+++ b/net/rfkill/rfkill-regulator.c
@@ -36,12 +36,12 @@
 	if (blocked) {
 		if (rfkill_data->reg_enabled) {
 			regulator_disable(rfkill_data->vcc);
-			rfkill_data->reg_enabled = 0;
+			rfkill_data->reg_enabled = false;
 		}
 	} else {
 		if (!rfkill_data->reg_enabled) {
 			regulator_enable(rfkill_data->vcc);
-			rfkill_data->reg_enabled = 1;
+			rfkill_data->reg_enabled = true;
 		}
 	}
 
@@ -96,7 +96,7 @@
 
 	if (regulator_is_enabled(vcc)) {
 		dev_dbg(&pdev->dev, "Regulator already enabled\n");
-		rfkill_data->reg_enabled = 1;
+		rfkill_data->reg_enabled = true;
 	}
 	rfkill_data->vcc = vcc;
 	rfkill_data->rf_kill = rf_kill;
@@ -144,17 +144,7 @@
 	},
 };
 
-static int __init rfkill_regulator_init(void)
-{
-	return platform_driver_register(&rfkill_regulator_driver);
-}
-module_init(rfkill_regulator_init);
-
-static void __exit rfkill_regulator_exit(void)
-{
-	platform_driver_unregister(&rfkill_regulator_driver);
-}
-module_exit(rfkill_regulator_exit);
+module_platform_driver(rfkill_regulator_driver);
 
 MODULE_AUTHOR("Guiming Zhuo <gmzhuo@gmail.com>");
 MODULE_AUTHOR("Antonio Ospite <ospite@studenti.unina.it>");
diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
index f99cfce..c3126e8 100644
--- a/net/rxrpc/ar-ack.c
+++ b/net/rxrpc/ar-ack.c
@@ -195,7 +195,7 @@
 		sp = rxrpc_skb(txb);
 
 		if (sp->need_resend) {
-			sp->need_resend = 0;
+			sp->need_resend = false;
 
 			/* each Tx packet has a new serial number */
 			sp->hdr.serial =
@@ -216,7 +216,7 @@
 		}
 
 		if (time_after_eq(jiffies + 1, sp->resend_at)) {
-			sp->need_resend = 1;
+			sp->need_resend = true;
 			resend |= 1;
 		} else if (resend & 2) {
 			if (time_before(sp->resend_at, resend_at))
@@ -265,7 +265,7 @@
 		if (sp->need_resend) {
 			;
 		} else if (time_after_eq(jiffies + 1, sp->resend_at)) {
-			sp->need_resend = 1;
+			sp->need_resend = true;
 			resend |= 1;
 		} else if (resend & 2) {
 			if (time_before(sp->resend_at, resend_at))
@@ -314,11 +314,11 @@
 
 		switch (sacks[loop]) {
 		case RXRPC_ACK_TYPE_ACK:
-			sp->need_resend = 0;
+			sp->need_resend = false;
 			*p_txb |= 1;
 			break;
 		case RXRPC_ACK_TYPE_NACK:
-			sp->need_resend = 1;
+			sp->need_resend = true;
 			*p_txb &= ~1;
 			resend = 1;
 			break;
@@ -344,13 +344,13 @@
 
 		if (*p_txb & 1) {
 			/* packet must have been discarded */
-			sp->need_resend = 1;
+			sp->need_resend = true;
 			*p_txb &= ~1;
 			resend |= 1;
 		} else if (sp->need_resend) {
 			;
 		} else if (time_after_eq(jiffies + 1, sp->resend_at)) {
-			sp->need_resend = 1;
+			sp->need_resend = true;
 			resend |= 1;
 		} else if (resend & 2) {
 			if (time_before(sp->resend_at, resend_at))
diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c
index 43ea7de..4cba13e 100644
--- a/net/rxrpc/ar-key.c
+++ b/net/rxrpc/ar-key.c
@@ -306,10 +306,9 @@
 	td->data_len = len;
 
 	if (len > 0) {
-		td->data = kmalloc(len, GFP_KERNEL);
+		td->data = kmemdup(xdr, len, GFP_KERNEL);
 		if (!td->data)
 			return -ENOMEM;
-		memcpy(td->data, xdr, len);
 		len = (len + 3) & ~3;
 		toklen -= len;
 		xdr += len >> 2;
@@ -401,10 +400,9 @@
 	_debug("ticket len %u", len);
 
 	if (len > 0) {
-		*_ticket = kmalloc(len, GFP_KERNEL);
+		*_ticket = kmemdup(xdr, len, GFP_KERNEL);
 		if (!*_ticket)
 			return -ENOMEM;
-		memcpy(*_ticket, xdr, len);
 		len = (len + 3) & ~3;
 		toklen -= len;
 		xdr += len >> 2;
diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
index 338d793..16ae887 100644
--- a/net/rxrpc/ar-output.c
+++ b/net/rxrpc/ar-output.c
@@ -486,7 +486,7 @@
 	_proto("Tx DATA %%%u { #%u }",
 	       ntohl(sp->hdr.serial), ntohl(sp->hdr.seq));
 
-	sp->need_resend = 0;
+	sp->need_resend = false;
 	sp->resend_at = jiffies + rxrpc_resend_timeout * HZ;
 	if (!test_and_set_bit(RXRPC_CALL_RUN_RTIMER, &call->flags)) {
 		_debug("run timer");
@@ -508,7 +508,7 @@
 
 	if (ret < 0) {
 		_debug("need instant resend %d", ret);
-		sp->need_resend = 1;
+		sp->need_resend = true;
 		rxrpc_instant_resend(call);
 	}
 
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 7b58230..1d8bd0d 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -26,6 +26,8 @@
 #include <net/pkt_cls.h>
 #include <net/ip.h>
 #include <net/route.h>
+#include <net/flow_keys.h>
+
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 #include <net/netfilter/nf_conntrack.h>
 #endif
@@ -66,134 +68,37 @@
 	return (a & 0xFFFFFFFF) ^ (BITS_PER_LONG > 32 ? a >> 32 : 0);
 }
 
-static u32 flow_get_src(const struct sk_buff *skb, int nhoff)
+static u32 flow_get_src(const struct sk_buff *skb, const struct flow_keys *flow)
 {
-	__be32 *data = NULL, hdata;
-
-	switch (skb->protocol) {
-	case htons(ETH_P_IP):
-		data = skb_header_pointer(skb,
-					  nhoff + offsetof(struct iphdr,
-							   saddr),
-					  4, &hdata);
-		break;
-	case htons(ETH_P_IPV6):
-		data = skb_header_pointer(skb,
-					 nhoff + offsetof(struct ipv6hdr,
-							  saddr.s6_addr32[3]),
-					 4, &hdata);
-		break;
-	}
-
-	if (data)
-		return ntohl(*data);
+	if (flow->src)
+		return ntohl(flow->src);
 	return addr_fold(skb->sk);
 }
 
-static u32 flow_get_dst(const struct sk_buff *skb, int nhoff)
+static u32 flow_get_dst(const struct sk_buff *skb, const struct flow_keys *flow)
 {
-	__be32 *data = NULL, hdata;
-
-	switch (skb->protocol) {
-	case htons(ETH_P_IP):
-		data = skb_header_pointer(skb,
-					  nhoff + offsetof(struct iphdr,
-							   daddr),
-					  4, &hdata);
-		break;
-	case htons(ETH_P_IPV6):
-		data = skb_header_pointer(skb,
-					 nhoff + offsetof(struct ipv6hdr,
-							  daddr.s6_addr32[3]),
-					 4, &hdata);
-		break;
-	}
-
-	if (data)
-		return ntohl(*data);
+	if (flow->dst)
+		return ntohl(flow->dst);
 	return addr_fold(skb_dst(skb)) ^ (__force u16)skb->protocol;
 }
 
-static u32 flow_get_proto(const struct sk_buff *skb, int nhoff)
+static u32 flow_get_proto(const struct sk_buff *skb, const struct flow_keys *flow)
 {
-	__u8 *data = NULL, hdata;
-
-	switch (skb->protocol) {
-	case htons(ETH_P_IP):
-		data = skb_header_pointer(skb,
-					  nhoff + offsetof(struct iphdr,
-							   protocol),
-					  1, &hdata);
-		break;
-	case htons(ETH_P_IPV6):
-		data = skb_header_pointer(skb,
-					 nhoff + offsetof(struct ipv6hdr,
-							  nexthdr),
-					 1, &hdata);
-		break;
-	}
-	if (data)
-		return *data;
-	return 0;
+	return flow->ip_proto;
 }
 
-/* helper function to get either src or dst port */
-static __be16 *flow_get_proto_common(const struct sk_buff *skb, int nhoff,
-				     __be16 *_port, int dst)
+static u32 flow_get_proto_src(const struct sk_buff *skb, const struct flow_keys *flow)
 {
-	__be16 *port = NULL;
-	int poff;
-
-	switch (skb->protocol) {
-	case htons(ETH_P_IP): {
-		struct iphdr *iph, _iph;
-
-		iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
-		if (!iph)
-			break;
-		if (ip_is_fragment(iph))
-			break;
-		poff = proto_ports_offset(iph->protocol);
-		if (poff >= 0)
-			port = skb_header_pointer(skb,
-					nhoff + iph->ihl * 4 + poff + dst,
-					sizeof(*_port), _port);
-		break;
-	}
-	case htons(ETH_P_IPV6): {
-		struct ipv6hdr *iph, _iph;
-
-		iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
-		if (!iph)
-			break;
-		poff = proto_ports_offset(iph->nexthdr);
-		if (poff >= 0)
-			port = skb_header_pointer(skb,
-					nhoff + sizeof(*iph) + poff + dst,
-					sizeof(*_port), _port);
-		break;
-	}
-	}
-
-	return port;
-}
-
-static u32 flow_get_proto_src(const struct sk_buff *skb, int nhoff)
-{
-	__be16 _port, *port = flow_get_proto_common(skb, nhoff, &_port, 0);
-
-	if (port)
-		return ntohs(*port);
+	if (flow->ports)
+		return ntohs(flow->port16[0]);
 
 	return addr_fold(skb->sk);
 }
 
-static u32 flow_get_proto_dst(const struct sk_buff *skb, int nhoff)
+static u32 flow_get_proto_dst(const struct sk_buff *skb, const struct flow_keys *flow)
 {
-	__be16 _port, *port = flow_get_proto_common(skb, nhoff, &_port, 2);
-
-	if (port)
-		return ntohs(*port);
+	if (flow->ports)
+		return ntohs(flow->port16[1]);
 
 	return addr_fold(skb_dst(skb)) ^ (__force u16)skb->protocol;
 }
@@ -239,7 +144,7 @@
 })
 #endif
 
-static u32 flow_get_nfct_src(const struct sk_buff *skb, int nhoff)
+static u32 flow_get_nfct_src(const struct sk_buff *skb, const struct flow_keys *flow)
 {
 	switch (skb->protocol) {
 	case htons(ETH_P_IP):
@@ -248,10 +153,10 @@
 		return ntohl(CTTUPLE(skb, src.u3.ip6[3]));
 	}
 fallback:
-	return flow_get_src(skb, nhoff);
+	return flow_get_src(skb, flow);
 }
 
-static u32 flow_get_nfct_dst(const struct sk_buff *skb, int nhoff)
+static u32 flow_get_nfct_dst(const struct sk_buff *skb, const struct flow_keys *flow)
 {
 	switch (skb->protocol) {
 	case htons(ETH_P_IP):
@@ -260,21 +165,21 @@
 		return ntohl(CTTUPLE(skb, dst.u3.ip6[3]));
 	}
 fallback:
-	return flow_get_dst(skb, nhoff);
+	return flow_get_dst(skb, flow);
 }
 
-static u32 flow_get_nfct_proto_src(const struct sk_buff *skb, int nhoff)
+static u32 flow_get_nfct_proto_src(const struct sk_buff *skb, const struct flow_keys *flow)
 {
 	return ntohs(CTTUPLE(skb, src.u.all));
 fallback:
-	return flow_get_proto_src(skb, nhoff);
+	return flow_get_proto_src(skb, flow);
 }
 
-static u32 flow_get_nfct_proto_dst(const struct sk_buff *skb, int nhoff)
+static u32 flow_get_nfct_proto_dst(const struct sk_buff *skb, const struct flow_keys *flow)
 {
 	return ntohs(CTTUPLE(skb, dst.u.all));
 fallback:
-	return flow_get_proto_dst(skb, nhoff);
+	return flow_get_proto_dst(skb, flow);
 }
 
 static u32 flow_get_rtclassid(const struct sk_buff *skb)
@@ -314,21 +219,19 @@
 	return skb_get_rxhash(skb);
 }
 
-static u32 flow_key_get(struct sk_buff *skb, int key)
+static u32 flow_key_get(struct sk_buff *skb, int key, struct flow_keys *flow)
 {
-	int nhoff = skb_network_offset(skb);
-
 	switch (key) {
 	case FLOW_KEY_SRC:
-		return flow_get_src(skb, nhoff);
+		return flow_get_src(skb, flow);
 	case FLOW_KEY_DST:
-		return flow_get_dst(skb, nhoff);
+		return flow_get_dst(skb, flow);
 	case FLOW_KEY_PROTO:
-		return flow_get_proto(skb, nhoff);
+		return flow_get_proto(skb, flow);
 	case FLOW_KEY_PROTO_SRC:
-		return flow_get_proto_src(skb, nhoff);
+		return flow_get_proto_src(skb, flow);
 	case FLOW_KEY_PROTO_DST:
-		return flow_get_proto_dst(skb, nhoff);
+		return flow_get_proto_dst(skb, flow);
 	case FLOW_KEY_IIF:
 		return flow_get_iif(skb);
 	case FLOW_KEY_PRIORITY:
@@ -338,13 +241,13 @@
 	case FLOW_KEY_NFCT:
 		return flow_get_nfct(skb);
 	case FLOW_KEY_NFCT_SRC:
-		return flow_get_nfct_src(skb, nhoff);
+		return flow_get_nfct_src(skb, flow);
 	case FLOW_KEY_NFCT_DST:
-		return flow_get_nfct_dst(skb, nhoff);
+		return flow_get_nfct_dst(skb, flow);
 	case FLOW_KEY_NFCT_PROTO_SRC:
-		return flow_get_nfct_proto_src(skb, nhoff);
+		return flow_get_nfct_proto_src(skb, flow);
 	case FLOW_KEY_NFCT_PROTO_DST:
-		return flow_get_nfct_proto_dst(skb, nhoff);
+		return flow_get_nfct_proto_dst(skb, flow);
 	case FLOW_KEY_RTCLASSID:
 		return flow_get_rtclassid(skb);
 	case FLOW_KEY_SKUID:
@@ -361,6 +264,16 @@
 	}
 }
 
+#define FLOW_KEYS_NEEDED ((1 << FLOW_KEY_SRC) | 		\
+			  (1 << FLOW_KEY_DST) |			\
+			  (1 << FLOW_KEY_PROTO) |		\
+			  (1 << FLOW_KEY_PROTO_SRC) |		\
+			  (1 << FLOW_KEY_PROTO_DST) | 		\
+			  (1 << FLOW_KEY_NFCT_SRC) |		\
+			  (1 << FLOW_KEY_NFCT_DST) |		\
+			  (1 << FLOW_KEY_NFCT_PROTO_SRC) |	\
+			  (1 << FLOW_KEY_NFCT_PROTO_DST))
+
 static int flow_classify(struct sk_buff *skb, const struct tcf_proto *tp,
 			 struct tcf_result *res)
 {
@@ -372,17 +285,20 @@
 	int r;
 
 	list_for_each_entry(f, &head->filters, list) {
-		u32 keys[f->nkeys];
+		u32 keys[FLOW_KEY_MAX + 1];
+		struct flow_keys flow_keys;
 
 		if (!tcf_em_tree_match(skb, &f->ematches, NULL))
 			continue;
 
 		keymask = f->keymask;
+		if (keymask & FLOW_KEYS_NEEDED)
+			skb_flow_dissect(skb, &flow_keys);
 
 		for (n = 0; n < f->nkeys; n++) {
 			key = ffs(keymask) - 1;
 			keymask &= ~(1 << key);
-			keys[n] = flow_key_get(skb, key);
+			keys[n] = flow_key_get(skb, key, &flow_keys);
 		}
 
 		if (f->mode == FLOW_MODE_HASH)
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index dca6c1a..3d8981f 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -618,20 +618,24 @@
 }
 EXPORT_SYMBOL(qdisc_class_hash_remove);
 
-/* Allocate an unique handle from space managed by kernel */
-
+/* Allocate an unique handle from space managed by kernel
+ * Possible range is [8000-FFFF]:0000 (0x8000 values)
+ */
 static u32 qdisc_alloc_handle(struct net_device *dev)
 {
-	int i = 0x10000;
+	int i = 0x8000;
 	static u32 autohandle = TC_H_MAKE(0x80000000U, 0);
 
 	do {
 		autohandle += TC_H_MAKE(0x10000U, 0);
 		if (autohandle == TC_H_MAKE(TC_H_ROOT, 0))
 			autohandle = TC_H_MAKE(0x80000000U, 0);
-	} while	(qdisc_lookup(dev, autohandle) && --i > 0);
+		if (!qdisc_lookup(dev, autohandle))
+			return autohandle;
+		cond_resched();
+	} while	(--i > 0);
 
-	return i > 0 ? autohandle : 0;
+	return 0;
 }
 
 void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index 3422b25..e465064 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -19,10 +19,7 @@
 #include <net/pkt_sched.h>
 #include <net/inet_ecn.h>
 #include <net/red.h>
-#include <linux/ip.h>
-#include <net/ip.h>
-#include <linux/ipv6.h>
-#include <net/ipv6.h>
+#include <net/flow_keys.h>
 
 /*
    CHOKe stateless AQM for fair bandwidth allocation
@@ -60,6 +57,7 @@
 	struct red_parms parms;
 
 /* Variables */
+	struct red_vars  vars;
 	struct tcf_proto *filter_list;
 	struct {
 		u32	prob_drop;	/* Early probability drops */
@@ -142,85 +140,10 @@
 	--sch->q.qlen;
 }
 
-/*
- * Compare flow of two packets
- *  Returns true only if source and destination address and port match.
- *          false for special cases
- */
-static bool choke_match_flow(struct sk_buff *skb1,
-			     struct sk_buff *skb2)
-{
-	int off1, off2, poff;
-	const u32 *ports1, *ports2;
-	u8 ip_proto;
-	__u32 hash1;
-
-	if (skb1->protocol != skb2->protocol)
-		return false;
-
-	/* Use hash value as quick check
-	 * Assumes that __skb_get_rxhash makes IP header and ports linear
-	 */
-	hash1 = skb_get_rxhash(skb1);
-	if (!hash1 || hash1 != skb_get_rxhash(skb2))
-		return false;
-
-	/* Probably match, but be sure to avoid hash collisions */
-	off1 = skb_network_offset(skb1);
-	off2 = skb_network_offset(skb2);
-
-	switch (skb1->protocol) {
-	case __constant_htons(ETH_P_IP): {
-		const struct iphdr *ip1, *ip2;
-
-		ip1 = (const struct iphdr *) (skb1->data + off1);
-		ip2 = (const struct iphdr *) (skb2->data + off2);
-
-		ip_proto = ip1->protocol;
-		if (ip_proto != ip2->protocol ||
-		    ip1->saddr != ip2->saddr || ip1->daddr != ip2->daddr)
-			return false;
-
-		if (ip_is_fragment(ip1) | ip_is_fragment(ip2))
-			ip_proto = 0;
-		off1 += ip1->ihl * 4;
-		off2 += ip2->ihl * 4;
-		break;
-	}
-
-	case __constant_htons(ETH_P_IPV6): {
-		const struct ipv6hdr *ip1, *ip2;
-
-		ip1 = (const struct ipv6hdr *) (skb1->data + off1);
-		ip2 = (const struct ipv6hdr *) (skb2->data + off2);
-
-		ip_proto = ip1->nexthdr;
-		if (ip_proto != ip2->nexthdr ||
-		    ipv6_addr_cmp(&ip1->saddr, &ip2->saddr) ||
-		    ipv6_addr_cmp(&ip1->daddr, &ip2->daddr))
-			return false;
-		off1 += 40;
-		off2 += 40;
-	}
-
-	default: /* Maybe compare MAC header here? */
-		return false;
-	}
-
-	poff = proto_ports_offset(ip_proto);
-	if (poff < 0)
-		return true;
-
-	off1 += poff;
-	off2 += poff;
-
-	ports1 = (__force u32 *)(skb1->data + off1);
-	ports2 = (__force u32 *)(skb2->data + off2);
-	return *ports1 == *ports2;
-}
-
 struct choke_skb_cb {
-	u16 classid;
+	u16			classid;
+	u8			keys_valid;
+	struct flow_keys	keys;
 };
 
 static inline struct choke_skb_cb *choke_skb_cb(const struct sk_buff *skb)
@@ -241,6 +164,32 @@
 }
 
 /*
+ * Compare flow of two packets
+ *  Returns true only if source and destination address and port match.
+ *          false for special cases
+ */
+static bool choke_match_flow(struct sk_buff *skb1,
+			     struct sk_buff *skb2)
+{
+	if (skb1->protocol != skb2->protocol)
+		return false;
+
+	if (!choke_skb_cb(skb1)->keys_valid) {
+		choke_skb_cb(skb1)->keys_valid = 1;
+		skb_flow_dissect(skb1, &choke_skb_cb(skb1)->keys);
+	}
+
+	if (!choke_skb_cb(skb2)->keys_valid) {
+		choke_skb_cb(skb2)->keys_valid = 1;
+		skb_flow_dissect(skb2, &choke_skb_cb(skb2)->keys);
+	}
+
+	return !memcmp(&choke_skb_cb(skb1)->keys,
+		       &choke_skb_cb(skb2)->keys,
+		       sizeof(struct flow_keys));
+}
+
+/*
  * Classify flow using either:
  *  1. pre-existing classification result in skb
  *  2. fast internal classification
@@ -317,7 +266,7 @@
 static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 {
 	struct choke_sched_data *q = qdisc_priv(sch);
-	struct red_parms *p = &q->parms;
+	const struct red_parms *p = &q->parms;
 	int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
 
 	if (q->filter_list) {
@@ -326,14 +275,15 @@
 			goto other_drop;	/* Packet was eaten by filter */
 	}
 
+	choke_skb_cb(skb)->keys_valid = 0;
 	/* Compute average queue usage (see RED) */
-	p->qavg = red_calc_qavg(p, sch->q.qlen);
-	if (red_is_idling(p))
-		red_end_of_idle_period(p);
+	q->vars.qavg = red_calc_qavg(p, &q->vars, sch->q.qlen);
+	if (red_is_idling(&q->vars))
+		red_end_of_idle_period(&q->vars);
 
 	/* Is queue small? */
-	if (p->qavg <= p->qth_min)
-		p->qcount = -1;
+	if (q->vars.qavg <= p->qth_min)
+		q->vars.qcount = -1;
 	else {
 		unsigned int idx;
 
@@ -345,8 +295,8 @@
 		}
 
 		/* Queue is large, always mark/drop */
-		if (p->qavg > p->qth_max) {
-			p->qcount = -1;
+		if (q->vars.qavg > p->qth_max) {
+			q->vars.qcount = -1;
 
 			sch->qstats.overlimits++;
 			if (use_harddrop(q) || !use_ecn(q) ||
@@ -356,10 +306,10 @@
 			}
 
 			q->stats.forced_mark++;
-		} else if (++p->qcount) {
-			if (red_mark_probability(p, p->qavg)) {
-				p->qcount = 0;
-				p->qR = red_random(p);
+		} else if (++q->vars.qcount) {
+			if (red_mark_probability(p, &q->vars, q->vars.qavg)) {
+				q->vars.qcount = 0;
+				q->vars.qR = red_random(p);
 
 				sch->qstats.overlimits++;
 				if (!use_ecn(q) || !INET_ECN_set_ce(skb)) {
@@ -370,7 +320,7 @@
 				q->stats.prob_mark++;
 			}
 		} else
-			p->qR = red_random(p);
+			q->vars.qR = red_random(p);
 	}
 
 	/* Admit new packet */
@@ -404,8 +354,8 @@
 	struct sk_buff *skb;
 
 	if (q->head == q->tail) {
-		if (!red_is_idling(&q->parms))
-			red_start_of_idle_period(&q->parms);
+		if (!red_is_idling(&q->vars))
+			red_start_of_idle_period(&q->vars);
 		return NULL;
 	}
 
@@ -428,8 +378,8 @@
 	if (len > 0)
 		q->stats.other++;
 	else {
-		if (!red_is_idling(&q->parms))
-			red_start_of_idle_period(&q->parms);
+		if (!red_is_idling(&q->vars))
+			red_start_of_idle_period(&q->vars);
 	}
 
 	return len;
@@ -439,12 +389,13 @@
 {
 	struct choke_sched_data *q = qdisc_priv(sch);
 
-	red_restart(&q->parms);
+	red_restart(&q->vars);
 }
 
 static const struct nla_policy choke_policy[TCA_CHOKE_MAX + 1] = {
 	[TCA_CHOKE_PARMS]	= { .len = sizeof(struct tc_red_qopt) },
 	[TCA_CHOKE_STAB]	= { .len = RED_STAB_SIZE },
+	[TCA_CHOKE_MAX_P]	= { .type = NLA_U32 },
 };
 
 
@@ -466,6 +417,7 @@
 	int err;
 	struct sk_buff **old = NULL;
 	unsigned int mask;
+	u32 max_P;
 
 	if (opt == NULL)
 		return -EINVAL;
@@ -478,6 +430,8 @@
 	    tb[TCA_CHOKE_STAB] == NULL)
 		return -EINVAL;
 
+	max_P = tb[TCA_CHOKE_MAX_P] ? nla_get_u32(tb[TCA_CHOKE_MAX_P]) : 0;
+
 	ctl = nla_data(tb[TCA_CHOKE_PARMS]);
 
 	if (ctl->limit > CHOKE_MAX_QUEUE)
@@ -527,10 +481,12 @@
 
 	red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
 		      ctl->Plog, ctl->Scell_log,
-		      nla_data(tb[TCA_CHOKE_STAB]));
+		      nla_data(tb[TCA_CHOKE_STAB]),
+		      max_P);
+	red_set_vars(&q->vars);
 
 	if (q->head == q->tail)
-		red_end_of_idle_period(&q->parms);
+		red_end_of_idle_period(&q->vars);
 
 	sch_tree_unlock(sch);
 	choke_free(old);
@@ -561,6 +517,7 @@
 		goto nla_put_failure;
 
 	NLA_PUT(skb, TCA_CHOKE_PARMS, sizeof(opt), &opt);
+	NLA_PUT_U32(skb, TCA_CHOKE_MAX_P, q->parms.max_P);
 	return nla_nest_end(skb, opts);
 
 nla_put_failure:
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 69fca27..67fc573 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -60,7 +60,7 @@
 
 		/* check the reason of requeuing without tx lock first */
 		txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
-		if (!netif_tx_queue_frozen_or_stopped(txq)) {
+		if (!netif_xmit_frozen_or_stopped(txq)) {
 			q->gso_skb = NULL;
 			q->q.qlen--;
 		} else
@@ -121,7 +121,7 @@
 	spin_unlock(root_lock);
 
 	HARD_TX_LOCK(dev, txq, smp_processor_id());
-	if (!netif_tx_queue_frozen_or_stopped(txq))
+	if (!netif_xmit_frozen_or_stopped(txq))
 		ret = dev_hard_start_xmit(skb, dev, txq);
 
 	HARD_TX_UNLOCK(dev, txq);
@@ -143,7 +143,7 @@
 		ret = dev_requeue_skb(skb, q);
 	}
 
-	if (ret && netif_tx_queue_frozen_or_stopped(txq))
+	if (ret && netif_xmit_frozen_or_stopped(txq))
 		ret = 0;
 
 	return ret;
@@ -242,10 +242,11 @@
 				 * old device drivers set dev->trans_start
 				 */
 				trans_start = txq->trans_start ? : dev->trans_start;
-				if (netif_tx_queue_stopped(txq) &&
+				if (netif_xmit_stopped(txq) &&
 				    time_after(jiffies, (trans_start +
 							 dev->watchdog_timeo))) {
 					some_queue_timedout = 1;
+					txq->trans_timeout++;
 					break;
 				}
 			}
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index b9493a0..0b15236 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -34,13 +34,14 @@
 
 struct gred_sched_data {
 	u32		limit;		/* HARD maximal queue length	*/
-	u32      	DP;		/* the drop pramaters */
+	u32		DP;		/* the drop parameters */
 	u32		bytesin;	/* bytes seen on virtualQ so far*/
 	u32		packetsin;	/* packets seen on virtualQ so far*/
 	u32		backlog;	/* bytes on the virtualQ */
 	u8		prio;		/* the prio of this vq */
 
 	struct red_parms parms;
+	struct red_vars  vars;
 	struct red_stats stats;
 };
 
@@ -55,7 +56,7 @@
 	u32		red_flags;
 	u32 		DPs;
 	u32 		def;
-	struct red_parms wred_set;
+	struct red_vars wred_set;
 };
 
 static inline int gred_wred_mode(struct gred_sched *table)
@@ -125,17 +126,17 @@
 	return skb->tc_index & GRED_VQ_MASK;
 }
 
-static inline void gred_load_wred_set(struct gred_sched *table,
+static inline void gred_load_wred_set(const struct gred_sched *table,
 				      struct gred_sched_data *q)
 {
-	q->parms.qavg = table->wred_set.qavg;
-	q->parms.qidlestart = table->wred_set.qidlestart;
+	q->vars.qavg = table->wred_set.qavg;
+	q->vars.qidlestart = table->wred_set.qidlestart;
 }
 
 static inline void gred_store_wred_set(struct gred_sched *table,
 				       struct gred_sched_data *q)
 {
-	table->wred_set.qavg = q->parms.qavg;
+	table->wred_set.qavg = q->vars.qavg;
 }
 
 static inline int gred_use_ecn(struct gred_sched *t)
@@ -170,7 +171,7 @@
 				goto drop;
 		}
 
-		/* fix tc_index? --could be controvesial but needed for
+		/* fix tc_index? --could be controversial but needed for
 		   requeueing */
 		skb->tc_index = (skb->tc_index & ~GRED_VQ_MASK) | dp;
 	}
@@ -181,8 +182,8 @@
 
 		for (i = 0; i < t->DPs; i++) {
 			if (t->tab[i] && t->tab[i]->prio < q->prio &&
-			    !red_is_idling(&t->tab[i]->parms))
-				qavg += t->tab[i]->parms.qavg;
+			    !red_is_idling(&t->tab[i]->vars))
+				qavg += t->tab[i]->vars.qavg;
 		}
 
 	}
@@ -193,15 +194,17 @@
 	if (gred_wred_mode(t))
 		gred_load_wred_set(t, q);
 
-	q->parms.qavg = red_calc_qavg(&q->parms, gred_backlog(t, q, sch));
+	q->vars.qavg = red_calc_qavg(&q->parms,
+				     &q->vars,
+				     gred_backlog(t, q, sch));
 
-	if (red_is_idling(&q->parms))
-		red_end_of_idle_period(&q->parms);
+	if (red_is_idling(&q->vars))
+		red_end_of_idle_period(&q->vars);
 
 	if (gred_wred_mode(t))
 		gred_store_wred_set(t, q);
 
-	switch (red_action(&q->parms, q->parms.qavg + qavg)) {
+	switch (red_action(&q->parms, &q->vars, q->vars.qavg + qavg)) {
 	case RED_DONT_MARK:
 		break;
 
@@ -260,7 +263,7 @@
 			q->backlog -= qdisc_pkt_len(skb);
 
 			if (!q->backlog && !gred_wred_mode(t))
-				red_start_of_idle_period(&q->parms);
+				red_start_of_idle_period(&q->vars);
 		}
 
 		return skb;
@@ -293,7 +296,7 @@
 			q->stats.other++;
 
 			if (!q->backlog && !gred_wred_mode(t))
-				red_start_of_idle_period(&q->parms);
+				red_start_of_idle_period(&q->vars);
 		}
 
 		qdisc_drop(skb, sch);
@@ -320,7 +323,7 @@
 		if (!q)
 			continue;
 
-		red_restart(&q->parms);
+		red_restart(&q->vars);
 		q->backlog = 0;
 	}
 }
@@ -379,29 +382,31 @@
 }
 
 static inline int gred_change_vq(struct Qdisc *sch, int dp,
-				 struct tc_gred_qopt *ctl, int prio, u8 *stab)
+				 struct tc_gred_qopt *ctl, int prio,
+				 u8 *stab, u32 max_P,
+				 struct gred_sched_data **prealloc)
 {
 	struct gred_sched *table = qdisc_priv(sch);
-	struct gred_sched_data *q;
+	struct gred_sched_data *q = table->tab[dp];
 
-	if (table->tab[dp] == NULL) {
-		table->tab[dp] = kzalloc(sizeof(*q), GFP_KERNEL);
-		if (table->tab[dp] == NULL)
+	if (!q) {
+		table->tab[dp] = q = *prealloc;
+		*prealloc = NULL;
+		if (!q)
 			return -ENOMEM;
 	}
 
-	q = table->tab[dp];
 	q->DP = dp;
 	q->prio = prio;
 	q->limit = ctl->limit;
 
 	if (q->backlog == 0)
-		red_end_of_idle_period(&q->parms);
+		red_end_of_idle_period(&q->vars);
 
 	red_set_parms(&q->parms,
 		      ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog,
-		      ctl->Scell_log, stab);
-
+		      ctl->Scell_log, stab, max_P);
+	red_set_vars(&q->vars);
 	return 0;
 }
 
@@ -409,6 +414,7 @@
 	[TCA_GRED_PARMS]	= { .len = sizeof(struct tc_gred_qopt) },
 	[TCA_GRED_STAB]		= { .len = 256 },
 	[TCA_GRED_DPS]		= { .len = sizeof(struct tc_gred_sopt) },
+	[TCA_GRED_MAX_P]	= { .type = NLA_U32 },
 };
 
 static int gred_change(struct Qdisc *sch, struct nlattr *opt)
@@ -418,6 +424,8 @@
 	struct nlattr *tb[TCA_GRED_MAX + 1];
 	int err, prio = GRED_DEF_PRIO;
 	u8 *stab;
+	u32 max_P;
+	struct gred_sched_data *prealloc;
 
 	if (opt == NULL)
 		return -EINVAL;
@@ -433,6 +441,8 @@
 	    tb[TCA_GRED_STAB] == NULL)
 		return -EINVAL;
 
+	max_P = tb[TCA_GRED_MAX_P] ? nla_get_u32(tb[TCA_GRED_MAX_P]) : 0;
+
 	err = -EINVAL;
 	ctl = nla_data(tb[TCA_GRED_PARMS]);
 	stab = nla_data(tb[TCA_GRED_STAB]);
@@ -455,9 +465,10 @@
 			prio = ctl->prio;
 	}
 
+	prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL);
 	sch_tree_lock(sch);
 
-	err = gred_change_vq(sch, ctl->DP, ctl, prio, stab);
+	err = gred_change_vq(sch, ctl->DP, ctl, prio, stab, max_P, &prealloc);
 	if (err < 0)
 		goto errout_locked;
 
@@ -471,6 +482,7 @@
 
 errout_locked:
 	sch_tree_unlock(sch);
+	kfree(prealloc);
 errout:
 	return err;
 }
@@ -498,6 +510,7 @@
 	struct gred_sched *table = qdisc_priv(sch);
 	struct nlattr *parms, *opts = NULL;
 	int i;
+	u32 max_p[MAX_DPs];
 	struct tc_gred_sopt sopt = {
 		.DPs	= table->DPs,
 		.def_DP	= table->def,
@@ -509,6 +522,14 @@
 	if (opts == NULL)
 		goto nla_put_failure;
 	NLA_PUT(skb, TCA_GRED_DPS, sizeof(sopt), &sopt);
+
+	for (i = 0; i < MAX_DPs; i++) {
+		struct gred_sched_data *q = table->tab[i];
+
+		max_p[i] = q ? q->parms.max_P : 0;
+	}
+	NLA_PUT(skb, TCA_GRED_MAX_P, sizeof(max_p), max_p);
+
 	parms = nla_nest_start(skb, TCA_GRED_PARMS);
 	if (parms == NULL)
 		goto nla_put_failure;
@@ -545,12 +566,12 @@
 		opt.bytesin	= q->bytesin;
 
 		if (gred_wred_mode(table)) {
-			q->parms.qidlestart =
-				table->tab[table->def]->parms.qidlestart;
-			q->parms.qavg = table->tab[table->def]->parms.qavg;
+			q->vars.qidlestart =
+				table->tab[table->def]->vars.qidlestart;
+			q->vars.qavg = table->tab[table->def]->vars.qavg;
 		}
 
-		opt.qave = red_calc_qavg(&q->parms, q->parms.qavg);
+		opt.qave = red_calc_qavg(&q->parms, &q->vars, q->vars.qavg);
 
 append_opt:
 		if (nla_append(skb, sizeof(opt), &opt) < 0)
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 6488e64..9bdca2e 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1368,6 +1368,7 @@
 	struct tc_hfsc_stats xstats;
 
 	cl->qstats.qlen = cl->qdisc->q.qlen;
+	cl->qstats.backlog = cl->qdisc->qstats.backlog;
 	xstats.level   = cl->level;
 	xstats.period  = cl->cl_vtperiod;
 	xstats.work    = cl->cl_total;
@@ -1561,6 +1562,15 @@
 	struct hfsc_sched *q = qdisc_priv(sch);
 	unsigned char *b = skb_tail_pointer(skb);
 	struct tc_hfsc_qopt qopt;
+	struct hfsc_class *cl;
+	struct hlist_node *n;
+	unsigned int i;
+
+	sch->qstats.backlog = 0;
+	for (i = 0; i < q->clhash.hashsize; i++) {
+		hlist_for_each_entry(cl, n, &q->clhash.hash[i], cl_common.hnode)
+			sch->qstats.backlog += cl->qdisc->qstats.backlog;
+	}
 
 	qopt.defcls = q->defcls;
 	NLA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt);
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
index f88256c..28de430 100644
--- a/net/sched/sch_mqprio.c
+++ b/net/sched/sch_mqprio.c
@@ -107,7 +107,7 @@
 	if (!netif_is_multiqueue(dev))
 		return -EOPNOTSUPP;
 
-	if (nla_len(opt) < sizeof(*qopt))
+	if (!opt || nla_len(opt) < sizeof(*qopt))
 		return -EINVAL;
 
 	qopt = nla_data(opt);
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index edc1950..49131d7 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -107,7 +107,8 @@
 		/* Check that target subqueue is available before
 		 * pulling an skb to avoid head-of-line blocking.
 		 */
-		if (!__netif_subqueue_stopped(qdisc_dev(sch), q->curband)) {
+		if (!netif_xmit_stopped(
+		    netdev_get_tx_queue(qdisc_dev(sch), q->curband))) {
 			qdisc = q->queues[q->curband];
 			skb = qdisc->dequeue(qdisc);
 			if (skb) {
@@ -138,7 +139,8 @@
 		/* Check that target subqueue is available before
 		 * pulling an skb to avoid head-of-line blocking.
 		 */
-		if (!__netif_subqueue_stopped(qdisc_dev(sch), curband)) {
+		if (!netif_xmit_stopped(
+		    netdev_get_tx_queue(qdisc_dev(sch), curband))) {
 			qdisc = q->queues[curband];
 			skb = qdisc->ops->peek(qdisc);
 			if (skb)
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index eb3b9a8..e7e1d0b 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -22,6 +22,7 @@
 #include <linux/skbuff.h>
 #include <linux/vmalloc.h>
 #include <linux/rtnetlink.h>
+#include <linux/reciprocal_div.h>
 
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
@@ -66,7 +67,11 @@
 */
 
 struct netem_sched_data {
+	/* internal t(ime)fifo qdisc uses sch->q and sch->limit */
+
+	/* optional qdisc for classful handling (NULL at netem init) */
 	struct Qdisc	*qdisc;
+
 	struct qdisc_watchdog watchdog;
 
 	psched_tdiff_t latency;
@@ -79,6 +84,11 @@
 	u32 duplicate;
 	u32 reorder;
 	u32 corrupt;
+	u32 rate;
+	s32 packet_overhead;
+	u32 cell_size;
+	u32 cell_size_reciprocal;
+	s32 cell_overhead;
 
 	struct crndstate {
 		u32 last;
@@ -111,7 +121,9 @@
 
 };
 
-/* Time stamp put into socket buffer control block */
+/* Time stamp put into socket buffer control block
+ * Only valid when skbs are in our internal t(ime)fifo queue.
+ */
 struct netem_skb_cb {
 	psched_time_t	time_to_send;
 };
@@ -298,6 +310,51 @@
 	return  x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
 }
 
+static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sched_data *q)
+{
+	u64 ticks;
+
+	len += q->packet_overhead;
+
+	if (q->cell_size) {
+		u32 cells = reciprocal_divide(len, q->cell_size_reciprocal);
+
+		if (len > cells * q->cell_size)	/* extra cell needed for remainder */
+			cells++;
+		len = cells * (q->cell_size + q->cell_overhead);
+	}
+
+	ticks = (u64)len * NSEC_PER_SEC;
+
+	do_div(ticks, q->rate);
+	return PSCHED_NS2TICKS(ticks);
+}
+
+static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
+{
+	struct sk_buff_head *list = &sch->q;
+	psched_time_t tnext = netem_skb_cb(nskb)->time_to_send;
+	struct sk_buff *skb;
+
+	if (likely(skb_queue_len(list) < sch->limit)) {
+		skb = skb_peek_tail(list);
+		/* Optimize for add at tail */
+		if (likely(!skb || tnext >= netem_skb_cb(skb)->time_to_send))
+			return qdisc_enqueue_tail(nskb, sch);
+
+		skb_queue_reverse_walk(list, skb) {
+			if (tnext >= netem_skb_cb(skb)->time_to_send)
+				break;
+		}
+
+		__skb_queue_after(list, skb, nskb);
+		sch->qstats.backlog += qdisc_pkt_len(nskb);
+		return NET_XMIT_SUCCESS;
+	}
+
+	return qdisc_reshape_fail(nskb, sch);
+}
+
 /*
  * Insert one skb into qdisc.
  * Note: parent depends on return value to account for queue length.
@@ -371,9 +428,27 @@
 				  &q->delay_cor, q->delay_dist);
 
 		now = psched_get_time();
+
+		if (q->rate) {
+			struct sk_buff_head *list = &sch->q;
+
+			delay += packet_len_2_sched_time(skb->len, q);
+
+			if (!skb_queue_empty(list)) {
+				/*
+				 * Last packet in queue is reference point (now).
+				 * First packet in queue is already in flight,
+				 * calculate this time bonus and substract
+				 * from delay.
+				 */
+				delay -= now - netem_skb_cb(skb_peek(list))->time_to_send;
+				now = netem_skb_cb(skb_peek_tail(list))->time_to_send;
+			}
+		}
+
 		cb->time_to_send = now + delay;
 		++q->counter;
-		ret = qdisc_enqueue(skb, q->qdisc);
+		ret = tfifo_enqueue(skb, sch);
 	} else {
 		/*
 		 * Do re-ordering by putting one out of N packets at the front
@@ -382,9 +457,9 @@
 		cb->time_to_send = psched_get_time();
 		q->counter = 0;
 
-		__skb_queue_head(&q->qdisc->q, skb);
-		q->qdisc->qstats.backlog += qdisc_pkt_len(skb);
-		q->qdisc->qstats.requeues++;
+		__skb_queue_head(&sch->q, skb);
+		sch->qstats.backlog += qdisc_pkt_len(skb);
+		sch->qstats.requeues++;
 		ret = NET_XMIT_SUCCESS;
 	}
 
@@ -395,19 +470,20 @@
 		}
 	}
 
-	sch->q.qlen++;
 	return NET_XMIT_SUCCESS;
 }
 
 static unsigned int netem_drop(struct Qdisc *sch)
 {
 	struct netem_sched_data *q = qdisc_priv(sch);
-	unsigned int len = 0;
+	unsigned int len;
 
-	if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
-		sch->q.qlen--;
+	len = qdisc_queue_drop(sch);
+	if (!len && q->qdisc && q->qdisc->ops->drop)
+	    len = q->qdisc->ops->drop(q->qdisc);
+	if (len)
 		sch->qstats.drops++;
-	}
+
 	return len;
 }
 
@@ -419,16 +495,16 @@
 	if (qdisc_is_throttled(sch))
 		return NULL;
 
-	skb = q->qdisc->ops->peek(q->qdisc);
+tfifo_dequeue:
+	skb = qdisc_peek_head(sch);
 	if (skb) {
 		const struct netem_skb_cb *cb = netem_skb_cb(skb);
-		psched_time_t now = psched_get_time();
 
 		/* if more time remaining? */
-		if (cb->time_to_send <= now) {
-			skb = qdisc_dequeue_peeked(q->qdisc);
+		if (cb->time_to_send <= psched_get_time()) {
+			skb = qdisc_dequeue_tail(sch);
 			if (unlikely(!skb))
-				return NULL;
+				goto qdisc_dequeue;
 
 #ifdef CONFIG_NET_CLS_ACT
 			/*
@@ -439,15 +515,37 @@
 				skb->tstamp.tv64 = 0;
 #endif
 
-			sch->q.qlen--;
+			if (q->qdisc) {
+				int err = qdisc_enqueue(skb, q->qdisc);
+
+				if (unlikely(err != NET_XMIT_SUCCESS)) {
+					if (net_xmit_drop_count(err)) {
+						sch->qstats.drops++;
+						qdisc_tree_decrease_qlen(sch, 1);
+					}
+				}
+				goto tfifo_dequeue;
+			}
+deliver:
 			qdisc_unthrottled(sch);
 			qdisc_bstats_update(sch, skb);
 			return skb;
 		}
 
+		if (q->qdisc) {
+			skb = q->qdisc->ops->dequeue(q->qdisc);
+			if (skb)
+				goto deliver;
+		}
 		qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send);
 	}
 
+qdisc_dequeue:
+	if (q->qdisc) {
+		skb = q->qdisc->ops->dequeue(q->qdisc);
+		if (skb)
+			goto deliver;
+	}
 	return NULL;
 }
 
@@ -455,8 +553,9 @@
 {
 	struct netem_sched_data *q = qdisc_priv(sch);
 
-	qdisc_reset(q->qdisc);
-	sch->q.qlen = 0;
+	qdisc_reset_queue(sch);
+	if (q->qdisc)
+		qdisc_reset(q->qdisc);
 	qdisc_watchdog_cancel(&q->watchdog);
 }
 
@@ -488,7 +587,7 @@
 		return -EINVAL;
 
 	s = sizeof(struct disttable) + n * sizeof(s16);
-	d = kmalloc(s, GFP_KERNEL);
+	d = kmalloc(s, GFP_KERNEL | __GFP_NOWARN);
 	if (!d)
 		d = vmalloc(s);
 	if (!d)
@@ -501,9 +600,10 @@
 	root_lock = qdisc_root_sleeping_lock(sch);
 
 	spin_lock_bh(root_lock);
-	dist_free(q->delay_dist);
-	q->delay_dist = d;
+	swap(q->delay_dist, d);
 	spin_unlock_bh(root_lock);
+
+	dist_free(d);
 	return 0;
 }
 
@@ -535,6 +635,19 @@
 	init_crandom(&q->corrupt_cor, r->correlation);
 }
 
+static void get_rate(struct Qdisc *sch, const struct nlattr *attr)
+{
+	struct netem_sched_data *q = qdisc_priv(sch);
+	const struct tc_netem_rate *r = nla_data(attr);
+
+	q->rate = r->rate;
+	q->packet_overhead = r->packet_overhead;
+	q->cell_size = r->cell_size;
+	if (q->cell_size)
+		q->cell_size_reciprocal = reciprocal_value(q->cell_size);
+	q->cell_overhead = r->cell_overhead;
+}
+
 static int get_loss_clg(struct Qdisc *sch, const struct nlattr *attr)
 {
 	struct netem_sched_data *q = qdisc_priv(sch);
@@ -548,7 +661,7 @@
 		case NETEM_LOSS_GI: {
 			const struct tc_netem_gimodel *gi = nla_data(la);
 
-			if (nla_len(la) != sizeof(struct tc_netem_gimodel)) {
+			if (nla_len(la) < sizeof(struct tc_netem_gimodel)) {
 				pr_info("netem: incorrect gi model size\n");
 				return -EINVAL;
 			}
@@ -567,8 +680,8 @@
 		case NETEM_LOSS_GE: {
 			const struct tc_netem_gemodel *ge = nla_data(la);
 
-			if (nla_len(la) != sizeof(struct tc_netem_gemodel)) {
-				pr_info("netem: incorrect gi model size\n");
+			if (nla_len(la) < sizeof(struct tc_netem_gemodel)) {
+				pr_info("netem: incorrect ge model size\n");
 				return -EINVAL;
 			}
 
@@ -594,6 +707,7 @@
 	[TCA_NETEM_CORR]	= { .len = sizeof(struct tc_netem_corr) },
 	[TCA_NETEM_REORDER]	= { .len = sizeof(struct tc_netem_reorder) },
 	[TCA_NETEM_CORRUPT]	= { .len = sizeof(struct tc_netem_corrupt) },
+	[TCA_NETEM_RATE]	= { .len = sizeof(struct tc_netem_rate) },
 	[TCA_NETEM_LOSS]	= { .type = NLA_NESTED },
 };
 
@@ -631,11 +745,7 @@
 	if (ret < 0)
 		return ret;
 
-	ret = fifo_set_limit(q->qdisc, qopt->limit);
-	if (ret) {
-		pr_info("netem: can't set fifo limit\n");
-		return ret;
-	}
+	sch->limit = qopt->limit;
 
 	q->latency = qopt->latency;
 	q->jitter = qopt->jitter;
@@ -666,6 +776,9 @@
 	if (tb[TCA_NETEM_CORRUPT])
 		get_corrupt(sch, tb[TCA_NETEM_CORRUPT]);
 
+	if (tb[TCA_NETEM_RATE])
+		get_rate(sch, tb[TCA_NETEM_RATE]);
+
 	q->loss_model = CLG_RANDOM;
 	if (tb[TCA_NETEM_LOSS])
 		ret = get_loss_clg(sch, tb[TCA_NETEM_LOSS]);
@@ -673,88 +786,6 @@
 	return ret;
 }
 
-/*
- * Special case version of FIFO queue for use by netem.
- * It queues in order based on timestamps in skb's
- */
-struct fifo_sched_data {
-	u32 limit;
-	psched_time_t oldest;
-};
-
-static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
-{
-	struct fifo_sched_data *q = qdisc_priv(sch);
-	struct sk_buff_head *list = &sch->q;
-	psched_time_t tnext = netem_skb_cb(nskb)->time_to_send;
-	struct sk_buff *skb;
-
-	if (likely(skb_queue_len(list) < q->limit)) {
-		/* Optimize for add at tail */
-		if (likely(skb_queue_empty(list) || tnext >= q->oldest)) {
-			q->oldest = tnext;
-			return qdisc_enqueue_tail(nskb, sch);
-		}
-
-		skb_queue_reverse_walk(list, skb) {
-			const struct netem_skb_cb *cb = netem_skb_cb(skb);
-
-			if (tnext >= cb->time_to_send)
-				break;
-		}
-
-		__skb_queue_after(list, skb, nskb);
-
-		sch->qstats.backlog += qdisc_pkt_len(nskb);
-
-		return NET_XMIT_SUCCESS;
-	}
-
-	return qdisc_reshape_fail(nskb, sch);
-}
-
-static int tfifo_init(struct Qdisc *sch, struct nlattr *opt)
-{
-	struct fifo_sched_data *q = qdisc_priv(sch);
-
-	if (opt) {
-		struct tc_fifo_qopt *ctl = nla_data(opt);
-		if (nla_len(opt) < sizeof(*ctl))
-			return -EINVAL;
-
-		q->limit = ctl->limit;
-	} else
-		q->limit = max_t(u32, qdisc_dev(sch)->tx_queue_len, 1);
-
-	q->oldest = PSCHED_PASTPERFECT;
-	return 0;
-}
-
-static int tfifo_dump(struct Qdisc *sch, struct sk_buff *skb)
-{
-	struct fifo_sched_data *q = qdisc_priv(sch);
-	struct tc_fifo_qopt opt = { .limit = q->limit };
-
-	NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
-	return skb->len;
-
-nla_put_failure:
-	return -1;
-}
-
-static struct Qdisc_ops tfifo_qdisc_ops __read_mostly = {
-	.id		=	"tfifo",
-	.priv_size	=	sizeof(struct fifo_sched_data),
-	.enqueue	=	tfifo_enqueue,
-	.dequeue	=	qdisc_dequeue_head,
-	.peek		=	qdisc_peek_head,
-	.drop		=	qdisc_queue_drop,
-	.init		=	tfifo_init,
-	.reset		=	qdisc_reset_queue,
-	.change		=	tfifo_init,
-	.dump		=	tfifo_dump,
-};
-
 static int netem_init(struct Qdisc *sch, struct nlattr *opt)
 {
 	struct netem_sched_data *q = qdisc_priv(sch);
@@ -766,18 +797,9 @@
 	qdisc_watchdog_init(&q->watchdog, sch);
 
 	q->loss_model = CLG_RANDOM;
-	q->qdisc = qdisc_create_dflt(sch->dev_queue, &tfifo_qdisc_ops,
-				     TC_H_MAKE(sch->handle, 1));
-	if (!q->qdisc) {
-		pr_notice("netem: qdisc create tfifo qdisc failed\n");
-		return -ENOMEM;
-	}
-
 	ret = netem_change(sch, opt);
-	if (ret) {
+	if (ret)
 		pr_info("netem: change failed\n");
-		qdisc_destroy(q->qdisc);
-	}
 	return ret;
 }
 
@@ -786,7 +808,8 @@
 	struct netem_sched_data *q = qdisc_priv(sch);
 
 	qdisc_watchdog_cancel(&q->watchdog);
-	qdisc_destroy(q->qdisc);
+	if (q->qdisc)
+		qdisc_destroy(q->qdisc);
 	dist_free(q->delay_dist);
 }
 
@@ -846,6 +869,7 @@
 	struct tc_netem_corr cor;
 	struct tc_netem_reorder reorder;
 	struct tc_netem_corrupt corrupt;
+	struct tc_netem_rate rate;
 
 	qopt.latency = q->latency;
 	qopt.jitter = q->jitter;
@@ -868,6 +892,12 @@
 	corrupt.correlation = q->corrupt_cor.rho;
 	NLA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt);
 
+	rate.rate = q->rate;
+	rate.packet_overhead = q->packet_overhead;
+	rate.cell_size = q->cell_size;
+	rate.cell_overhead = q->cell_overhead;
+	NLA_PUT(skb, TCA_NETEM_RATE, sizeof(rate), &rate);
+
 	if (dump_loss_model(q, skb) != 0)
 		goto nla_put_failure;
 
@@ -883,7 +913,7 @@
 {
 	struct netem_sched_data *q = qdisc_priv(sch);
 
-	if (cl != 1) 	/* only one class */
+	if (cl != 1 || !q->qdisc) 	/* only one class */
 		return -ENOENT;
 
 	tcm->tcm_handle |= TC_H_MIN(1);
@@ -897,14 +927,13 @@
 {
 	struct netem_sched_data *q = qdisc_priv(sch);
 
-	if (new == NULL)
-		new = &noop_qdisc;
-
 	sch_tree_lock(sch);
 	*old = q->qdisc;
 	q->qdisc = new;
-	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
-	qdisc_reset(*old);
+	if (*old) {
+		qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
+		qdisc_reset(*old);
+	}
 	sch_tree_unlock(sch);
 
 	return 0;
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index 1033434..e68cb44 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -211,6 +211,7 @@
 	struct nlattr *tb[TCA_QFQ_MAX + 1];
 	u32 weight, lmax, inv_w;
 	int i, err;
+	int delta_w;
 
 	if (tca[TCA_OPTIONS] == NULL) {
 		pr_notice("qfq: no options\n");
@@ -232,9 +233,10 @@
 
 	inv_w = ONE_FP / weight;
 	weight = ONE_FP / inv_w;
-	if (q->wsum + weight > QFQ_MAX_WSUM) {
+	delta_w = weight - (cl ? ONE_FP / cl->inv_w : 0);
+	if (q->wsum + delta_w > QFQ_MAX_WSUM) {
 		pr_notice("qfq: total weight out of range (%u + %u)\n",
-			  weight, q->wsum);
+			  delta_w, q->wsum);
 		return -EINVAL;
 	}
 
@@ -256,13 +258,12 @@
 				return err;
 		}
 
-		sch_tree_lock(sch);
-		if (tb[TCA_QFQ_WEIGHT]) {
-			q->wsum = weight - ONE_FP / cl->inv_w;
+		if (inv_w != cl->inv_w) {
+			sch_tree_lock(sch);
+			q->wsum += delta_w;
 			cl->inv_w = inv_w;
+			sch_tree_unlock(sch);
 		}
-		sch_tree_unlock(sch);
-
 		return 0;
 	}
 
@@ -277,7 +278,6 @@
 	i = qfq_calc_index(cl->inv_w, cl->lmax);
 
 	cl->grp = &q->groups[i];
-	q->wsum += weight;
 
 	cl->qdisc = qdisc_create_dflt(sch->dev_queue,
 				      &pfifo_qdisc_ops, classid);
@@ -294,6 +294,7 @@
 			return err;
 		}
 	}
+	q->wsum += weight;
 
 	sch_tree_lock(sch);
 	qdisc_class_hash_insert(&q->clhash, &cl->common);
@@ -817,11 +818,11 @@
 static void qfq_update_start(struct qfq_sched *q, struct qfq_class *cl)
 {
 	unsigned long mask;
-	uint32_t limit, roundedF;
+	u64 limit, roundedF;
 	int slot_shift = cl->grp->slot_shift;
 
 	roundedF = qfq_round_down(cl->F, slot_shift);
-	limit = qfq_round_down(q->V, slot_shift) + (1UL << slot_shift);
+	limit = qfq_round_down(q->V, slot_shift) + (1ULL << slot_shift);
 
 	if (!qfq_gt(cl->F, q->V) || qfq_gt(roundedF, limit)) {
 		/* timestamp was stale */
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index d617161..a5cc301 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -39,7 +39,9 @@
 struct red_sched_data {
 	u32			limit;		/* HARD maximal queue length */
 	unsigned char		flags;
+	struct timer_list	adapt_timer;
 	struct red_parms	parms;
+	struct red_vars		vars;
 	struct red_stats	stats;
 	struct Qdisc		*qdisc;
 };
@@ -60,12 +62,14 @@
 	struct Qdisc *child = q->qdisc;
 	int ret;
 
-	q->parms.qavg = red_calc_qavg(&q->parms, child->qstats.backlog);
+	q->vars.qavg = red_calc_qavg(&q->parms,
+				     &q->vars,
+				     child->qstats.backlog);
 
-	if (red_is_idling(&q->parms))
-		red_end_of_idle_period(&q->parms);
+	if (red_is_idling(&q->vars))
+		red_end_of_idle_period(&q->vars);
 
-	switch (red_action(&q->parms, q->parms.qavg)) {
+	switch (red_action(&q->parms, &q->vars, q->vars.qavg)) {
 	case RED_DONT_MARK:
 		break;
 
@@ -116,8 +120,8 @@
 		qdisc_bstats_update(sch, skb);
 		sch->q.qlen--;
 	} else {
-		if (!red_is_idling(&q->parms))
-			red_start_of_idle_period(&q->parms);
+		if (!red_is_idling(&q->vars))
+			red_start_of_idle_period(&q->vars);
 	}
 	return skb;
 }
@@ -143,8 +147,8 @@
 		return len;
 	}
 
-	if (!red_is_idling(&q->parms))
-		red_start_of_idle_period(&q->parms);
+	if (!red_is_idling(&q->vars))
+		red_start_of_idle_period(&q->vars);
 
 	return 0;
 }
@@ -155,18 +159,21 @@
 
 	qdisc_reset(q->qdisc);
 	sch->q.qlen = 0;
-	red_restart(&q->parms);
+	red_restart(&q->vars);
 }
 
 static void red_destroy(struct Qdisc *sch)
 {
 	struct red_sched_data *q = qdisc_priv(sch);
+
+	del_timer_sync(&q->adapt_timer);
 	qdisc_destroy(q->qdisc);
 }
 
 static const struct nla_policy red_policy[TCA_RED_MAX + 1] = {
 	[TCA_RED_PARMS]	= { .len = sizeof(struct tc_red_qopt) },
 	[TCA_RED_STAB]	= { .len = RED_STAB_SIZE },
+	[TCA_RED_MAX_P] = { .type = NLA_U32 },
 };
 
 static int red_change(struct Qdisc *sch, struct nlattr *opt)
@@ -176,6 +183,7 @@
 	struct tc_red_qopt *ctl;
 	struct Qdisc *child = NULL;
 	int err;
+	u32 max_P;
 
 	if (opt == NULL)
 		return -EINVAL;
@@ -188,6 +196,8 @@
 	    tb[TCA_RED_STAB] == NULL)
 		return -EINVAL;
 
+	max_P = tb[TCA_RED_MAX_P] ? nla_get_u32(tb[TCA_RED_MAX_P]) : 0;
+
 	ctl = nla_data(tb[TCA_RED_PARMS]);
 
 	if (ctl->limit > 0) {
@@ -205,22 +215,42 @@
 		q->qdisc = child;
 	}
 
-	red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
-				 ctl->Plog, ctl->Scell_log,
-				 nla_data(tb[TCA_RED_STAB]));
+	red_set_parms(&q->parms,
+		      ctl->qth_min, ctl->qth_max, ctl->Wlog,
+		      ctl->Plog, ctl->Scell_log,
+		      nla_data(tb[TCA_RED_STAB]),
+		      max_P);
+	red_set_vars(&q->vars);
+
+	del_timer(&q->adapt_timer);
+	if (ctl->flags & TC_RED_ADAPTATIVE)
+		mod_timer(&q->adapt_timer, jiffies + HZ/2);
 
 	if (!q->qdisc->q.qlen)
-		red_start_of_idle_period(&q->parms);
+		red_start_of_idle_period(&q->vars);
 
 	sch_tree_unlock(sch);
 	return 0;
 }
 
+static inline void red_adaptative_timer(unsigned long arg)
+{
+	struct Qdisc *sch = (struct Qdisc *)arg;
+	struct red_sched_data *q = qdisc_priv(sch);
+	spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
+
+	spin_lock(root_lock);
+	red_adaptative_algo(&q->parms, &q->vars);
+	mod_timer(&q->adapt_timer, jiffies + HZ/2);
+	spin_unlock(root_lock);
+}
+
 static int red_init(struct Qdisc *sch, struct nlattr *opt)
 {
 	struct red_sched_data *q = qdisc_priv(sch);
 
 	q->qdisc = &noop_qdisc;
+	setup_timer(&q->adapt_timer, red_adaptative_timer, (unsigned long)sch);
 	return red_change(sch, opt);
 }
 
@@ -243,6 +273,7 @@
 	if (opts == NULL)
 		goto nla_put_failure;
 	NLA_PUT(skb, TCA_RED_PARMS, sizeof(opt), &opt);
+	NLA_PUT_U32(skb, TCA_RED_MAX_P, q->parms.max_P);
 	return nla_nest_end(skb, opts);
 
 nla_put_failure:
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
index e83c272..96e42ca 100644
--- a/net/sched/sch_sfb.c
+++ b/net/sched/sch_sfb.c
@@ -26,6 +26,7 @@
 #include <net/ip.h>
 #include <net/pkt_sched.h>
 #include <net/inet_ecn.h>
+#include <net/flow_keys.h>
 
 /*
  * SFB uses two B[l][n] : L x N arrays of bins (L levels, N bins per level)
@@ -286,6 +287,7 @@
 	u32 minqlen = ~0;
 	u32 r, slot, salt, sfbhash;
 	int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
+	struct flow_keys keys;
 
 	if (unlikely(sch->q.qlen >= q->limit)) {
 		sch->qstats.overlimits++;
@@ -309,13 +311,19 @@
 		/* If using external classifiers, get result and record it. */
 		if (!sfb_classify(skb, q, &ret, &salt))
 			goto other_drop;
+		keys.src = salt;
+		keys.dst = 0;
+		keys.ports = 0;
 	} else {
-		salt = skb_get_rxhash(skb);
+		skb_flow_dissect(skb, &keys);
 	}
 
 	slot = q->slot;
 
-	sfbhash = jhash_1word(salt, q->bins[slot].perturbation);
+	sfbhash = jhash_3words((__force u32)keys.dst,
+			       (__force u32)keys.src,
+			       (__force u32)keys.ports,
+			       q->bins[slot].perturbation);
 	if (!sfbhash)
 		sfbhash = 1;
 	sfb_skb_cb(skb)->hashes[slot] = sfbhash;
@@ -347,7 +355,10 @@
 	if (unlikely(p_min >= SFB_MAX_PROB)) {
 		/* Inelastic flow */
 		if (q->double_buffering) {
-			sfbhash = jhash_1word(salt, q->bins[slot].perturbation);
+			sfbhash = jhash_3words((__force u32)keys.dst,
+					       (__force u32)keys.src,
+					       (__force u32)keys.ports,
+					       q->bins[slot].perturbation);
 			if (!sfbhash)
 				sfbhash = 1;
 			sfb_skb_cb(skb)->hashes[slot] = sfbhash;
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 4f5510e..0a79640 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -17,14 +17,13 @@
 #include <linux/in.h>
 #include <linux/errno.h>
 #include <linux/init.h>
-#include <linux/ipv6.h>
 #include <linux/skbuff.h>
 #include <linux/jhash.h>
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
-#include <net/ip.h>
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
+#include <net/flow_keys.h>
 
 
 /*	Stochastic Fairness Queuing algorithm.
@@ -67,16 +66,18 @@
 	SFQ is superior for this purpose.
 
 	IMPLEMENTATION:
-	This implementation limits maximal queue length to 128;
-	max mtu to 2^18-1; max 128 flows, number of hash buckets to 1024.
-	The only goal of this restrictions was that all data
-	fit into one 4K page on 32bit arches.
+	This implementation limits :
+	- maximal queue length per flow to 127 packets.
+	- max mtu to 2^18-1;
+	- max 65408 flows,
+	- number of hash buckets to 65536.
 
 	It is easy to increase these values, but not in flight.  */
 
-#define SFQ_DEPTH		128 /* max number of packets per flow */
-#define SFQ_SLOTS		128 /* max number of flows */
-#define SFQ_EMPTY_SLOT		255
+#define SFQ_MAX_DEPTH		127 /* max number of packets per flow */
+#define SFQ_DEFAULT_FLOWS	128
+#define SFQ_MAX_FLOWS		(0x10000 - SFQ_MAX_DEPTH - 1) /* max number of flows */
+#define SFQ_EMPTY_SLOT		0xffff
 #define SFQ_DEFAULT_HASH_DIVISOR 1024
 
 /* We use 16 bits to store allot, and want to handle packets up to 64K
@@ -85,13 +86,13 @@
 #define SFQ_ALLOT_SHIFT		3
 #define SFQ_ALLOT_SIZE(X)	DIV_ROUND_UP(X, 1 << SFQ_ALLOT_SHIFT)
 
-/* This type should contain at least SFQ_DEPTH + SFQ_SLOTS values */
-typedef unsigned char sfq_index;
+/* This type should contain at least SFQ_MAX_DEPTH + 1 + SFQ_MAX_FLOWS values */
+typedef u16 sfq_index;
 
 /*
  * We dont use pointers to save space.
- * Small indexes [0 ... SFQ_SLOTS - 1] are 'pointers' to slots[] array
- * while following values [SFQ_SLOTS ... SFQ_SLOTS + SFQ_DEPTH - 1]
+ * Small indexes [0 ... SFQ_MAX_FLOWS - 1] are 'pointers' to slots[] array
+ * while following values [SFQ_MAX_FLOWS ... SFQ_MAX_FLOWS + SFQ_MAX_DEPTH]
  * are 'pointers' to dep[] array
  */
 struct sfq_head {
@@ -103,28 +104,38 @@
 	struct sk_buff	*skblist_next;
 	struct sk_buff	*skblist_prev;
 	sfq_index	qlen; /* number of skbs in skblist */
-	sfq_index	next; /* next slot in sfq chain */
+	sfq_index	next; /* next slot in sfq RR chain */
 	struct sfq_head dep; /* anchor in dep[] chains */
 	unsigned short	hash; /* hash value (index in ht[]) */
 	short		allot; /* credit for this slot */
 };
 
 struct sfq_sched_data {
-/* Parameters */
-	int		perturb_period;
-	unsigned int	quantum;	/* Allotment per round: MUST BE >= MTU */
-	int		limit;
+/* frequently used fields */
+	int		limit;		/* limit of total number of packets in this qdisc */
 	unsigned int	divisor;	/* number of slots in hash table */
-/* Variables */
-	struct tcf_proto *filter_list;
-	struct timer_list perturb_timer;
+	unsigned int	maxflows;	/* number of flows in flows array */
+	int		headdrop;
+	int		maxdepth;	/* limit of packets per flow */
+
 	u32		perturbation;
+	struct tcf_proto *filter_list;
 	sfq_index	cur_depth;	/* depth of longest slot */
 	unsigned short  scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */
 	struct sfq_slot *tail;		/* current slot in round */
-	sfq_index	*ht;		/* Hash table (divisor slots) */
-	struct sfq_slot	slots[SFQ_SLOTS];
-	struct sfq_head	dep[SFQ_DEPTH];	/* Linked list of slots, indexed by depth */
+	sfq_index	*ht;		/* Hash table ('divisor' slots) */
+	struct sfq_slot	*slots;		/* Flows table ('maxflows' entries) */
+
+	struct sfq_head	dep[SFQ_MAX_DEPTH + 1];
+					/* Linked lists of slots, indexed by depth
+					 * dep[0] : list of unused flows
+					 * dep[1] : list of flows with 1 packet
+					 * dep[X] : list of flows with X packets
+					 */
+
+	int		perturb_period;
+	unsigned int	quantum;	/* Allotment per round: MUST BE >= MTU */
+	struct timer_list perturb_timer;
 };
 
 /*
@@ -132,66 +143,36 @@
  */
 static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index val)
 {
-	if (val < SFQ_SLOTS)
+	if (val < SFQ_MAX_FLOWS)
 		return &q->slots[val].dep;
-	return &q->dep[val - SFQ_SLOTS];
+	return &q->dep[val - SFQ_MAX_FLOWS];
 }
 
-static unsigned int sfq_fold_hash(struct sfq_sched_data *q, u32 h, u32 h1)
+/*
+ * In order to be able to quickly rehash our queue when timer changes
+ * q->perturbation, we store flow_keys in skb->cb[]
+ */
+struct sfq_skb_cb {
+       struct flow_keys        keys;
+};
+
+static inline struct sfq_skb_cb *sfq_skb_cb(const struct sk_buff *skb)
 {
-	return jhash_2words(h, h1, q->perturbation) & (q->divisor - 1);
+       BUILD_BUG_ON(sizeof(skb->cb) <
+               sizeof(struct qdisc_skb_cb) + sizeof(struct sfq_skb_cb));
+       return (struct sfq_skb_cb *)qdisc_skb_cb(skb)->data;
 }
 
-static unsigned int sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
+static unsigned int sfq_hash(const struct sfq_sched_data *q,
+			     const struct sk_buff *skb)
 {
-	u32 h, h2;
+	const struct flow_keys *keys = &sfq_skb_cb(skb)->keys;
+	unsigned int hash;
 
-	switch (skb->protocol) {
-	case htons(ETH_P_IP):
-	{
-		const struct iphdr *iph;
-		int poff;
-
-		if (!pskb_network_may_pull(skb, sizeof(*iph)))
-			goto err;
-		iph = ip_hdr(skb);
-		h = (__force u32)iph->daddr;
-		h2 = (__force u32)iph->saddr ^ iph->protocol;
-		if (ip_is_fragment(iph))
-			break;
-		poff = proto_ports_offset(iph->protocol);
-		if (poff >= 0 &&
-		    pskb_network_may_pull(skb, iph->ihl * 4 + 4 + poff)) {
-			iph = ip_hdr(skb);
-			h2 ^= *(u32 *)((void *)iph + iph->ihl * 4 + poff);
-		}
-		break;
-	}
-	case htons(ETH_P_IPV6):
-	{
-		const struct ipv6hdr *iph;
-		int poff;
-
-		if (!pskb_network_may_pull(skb, sizeof(*iph)))
-			goto err;
-		iph = ipv6_hdr(skb);
-		h = (__force u32)iph->daddr.s6_addr32[3];
-		h2 = (__force u32)iph->saddr.s6_addr32[3] ^ iph->nexthdr;
-		poff = proto_ports_offset(iph->nexthdr);
-		if (poff >= 0 &&
-		    pskb_network_may_pull(skb, sizeof(*iph) + 4 + poff)) {
-			iph = ipv6_hdr(skb);
-			h2 ^= *(u32 *)((void *)iph + sizeof(*iph) + poff);
-		}
-		break;
-	}
-	default:
-err:
-		h = (unsigned long)skb_dst(skb) ^ (__force u32)skb->protocol;
-		h2 = (unsigned long)skb->sk;
-	}
-
-	return sfq_fold_hash(q, h, h2);
+	hash = jhash_3words((__force u32)keys->dst,
+			    (__force u32)keys->src ^ keys->ip_proto,
+			    (__force u32)keys->ports, q->perturbation);
+	return hash & (q->divisor - 1);
 }
 
 static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
@@ -206,8 +187,10 @@
 	    TC_H_MIN(skb->priority) <= q->divisor)
 		return TC_H_MIN(skb->priority);
 
-	if (!q->filter_list)
+	if (!q->filter_list) {
+		skb_flow_dissect(skb, &sfq_skb_cb(skb)->keys);
 		return sfq_hash(q, skb) + 1;
+	}
 
 	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
 	result = tc_classify(skb, q->filter_list, &res);
@@ -228,18 +211,19 @@
 }
 
 /*
- * x : slot number [0 .. SFQ_SLOTS - 1]
+ * x : slot number [0 .. SFQ_MAX_FLOWS - 1]
  */
 static inline void sfq_link(struct sfq_sched_data *q, sfq_index x)
 {
 	sfq_index p, n;
-	int qlen = q->slots[x].qlen;
+	struct sfq_slot *slot = &q->slots[x];
+	int qlen = slot->qlen;
 
-	p = qlen + SFQ_SLOTS;
+	p = qlen + SFQ_MAX_FLOWS;
 	n = q->dep[qlen].next;
 
-	q->slots[x].dep.next = n;
-	q->slots[x].dep.prev = p;
+	slot->dep.next = n;
+	slot->dep.prev = p;
 
 	q->dep[qlen].next = x;		/* sfq_dep_head(q, p)->next = x */
 	sfq_dep_head(q, n)->prev = x;
@@ -304,6 +288,7 @@
 
 static inline void slot_queue_init(struct sfq_slot *slot)
 {
+	memset(slot, 0, sizeof(*slot));
 	slot->skblist_prev = slot->skblist_next = (struct sk_buff *)slot;
 }
 
@@ -334,7 +319,7 @@
 		x = q->dep[d].next;
 		slot = &q->slots[x];
 drop:
-		skb = slot_dequeue_tail(slot);
+		skb = q->headdrop ? slot_dequeue_head(slot) : slot_dequeue_tail(slot);
 		len = qdisc_pkt_len(skb);
 		sfq_dec(q, x);
 		kfree_skb(skb);
@@ -378,16 +363,27 @@
 	slot = &q->slots[x];
 	if (x == SFQ_EMPTY_SLOT) {
 		x = q->dep[0].next; /* get a free slot */
+		if (x >= SFQ_MAX_FLOWS)
+			return qdisc_drop(skb, sch);
 		q->ht[hash] = x;
 		slot = &q->slots[x];
 		slot->hash = hash;
 	}
 
-	/* If selected queue has length q->limit, do simple tail drop,
-	 * i.e. drop _this_ packet.
-	 */
-	if (slot->qlen >= q->limit)
-		return qdisc_drop(skb, sch);
+	if (slot->qlen >= q->maxdepth) {
+		struct sk_buff *head;
+
+		if (!q->headdrop)
+			return qdisc_drop(skb, sch);
+
+		head = slot_dequeue_head(slot);
+		sch->qstats.backlog -= qdisc_pkt_len(head);
+		qdisc_drop(head, sch);
+
+		sch->qstats.backlog += qdisc_pkt_len(skb);
+		slot_queue_add(slot, skb);
+		return NET_XMIT_CN;
+	}
 
 	sch->qstats.backlog += qdisc_pkt_len(skb);
 	slot_queue_add(slot, skb);
@@ -395,11 +391,11 @@
 	if (slot->qlen == 1) {		/* The flow is new */
 		if (q->tail == NULL) {	/* It is the first flow */
 			slot->next = x;
+			q->tail = slot;
 		} else {
 			slot->next = q->tail->next;
 			q->tail->next = x;
 		}
-		q->tail = slot;
 		slot->allot = q->scaled_quantum;
 	}
 	if (++sch->q.qlen <= q->limit)
@@ -468,12 +464,83 @@
 		kfree_skb(skb);
 }
 
+/*
+ * When q->perturbation is changed, we rehash all queued skbs
+ * to avoid OOO (Out Of Order) effects.
+ * We dont use sfq_dequeue()/sfq_enqueue() because we dont want to change
+ * counters.
+ */
+static void sfq_rehash(struct Qdisc *sch)
+{
+	struct sfq_sched_data *q = qdisc_priv(sch);
+	struct sk_buff *skb;
+	int i;
+	struct sfq_slot *slot;
+	struct sk_buff_head list;
+	int dropped = 0;
+
+	__skb_queue_head_init(&list);
+
+	for (i = 0; i < q->maxflows; i++) {
+		slot = &q->slots[i];
+		if (!slot->qlen)
+			continue;
+		while (slot->qlen) {
+			skb = slot_dequeue_head(slot);
+			sfq_dec(q, i);
+			__skb_queue_tail(&list, skb);
+		}
+		q->ht[slot->hash] = SFQ_EMPTY_SLOT;
+	}
+	q->tail = NULL;
+
+	while ((skb = __skb_dequeue(&list)) != NULL) {
+		unsigned int hash = sfq_hash(q, skb);
+		sfq_index x = q->ht[hash];
+
+		slot = &q->slots[x];
+		if (x == SFQ_EMPTY_SLOT) {
+			x = q->dep[0].next; /* get a free slot */
+			if (x >= SFQ_MAX_FLOWS) {
+drop:				sch->qstats.backlog -= qdisc_pkt_len(skb);
+				kfree_skb(skb);
+				dropped++;
+				continue;
+			}
+			q->ht[hash] = x;
+			slot = &q->slots[x];
+			slot->hash = hash;
+		}
+		if (slot->qlen >= q->maxdepth)
+			goto drop;
+		slot_queue_add(slot, skb);
+		sfq_inc(q, x);
+		if (slot->qlen == 1) {		/* The flow is new */
+			if (q->tail == NULL) {	/* It is the first flow */
+				slot->next = x;
+			} else {
+				slot->next = q->tail->next;
+				q->tail->next = x;
+			}
+			q->tail = slot;
+			slot->allot = q->scaled_quantum;
+		}
+	}
+	sch->q.qlen -= dropped;
+	qdisc_tree_decrease_qlen(sch, dropped);
+}
+
 static void sfq_perturbation(unsigned long arg)
 {
 	struct Qdisc *sch = (struct Qdisc *)arg;
 	struct sfq_sched_data *q = qdisc_priv(sch);
+	spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
 
+	spin_lock(root_lock);
 	q->perturbation = net_random();
+	if (!q->filter_list && q->tail)
+		sfq_rehash(sch);
+	spin_unlock(root_lock);
 
 	if (q->perturb_period)
 		mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
@@ -483,23 +550,39 @@
 {
 	struct sfq_sched_data *q = qdisc_priv(sch);
 	struct tc_sfq_qopt *ctl = nla_data(opt);
+	struct tc_sfq_qopt_v1 *ctl_v1 = NULL;
 	unsigned int qlen;
 
 	if (opt->nla_len < nla_attr_size(sizeof(*ctl)))
 		return -EINVAL;
-
+	if (opt->nla_len >= nla_attr_size(sizeof(*ctl_v1)))
+		ctl_v1 = nla_data(opt);
 	if (ctl->divisor &&
 	    (!is_power_of_2(ctl->divisor) || ctl->divisor > 65536))
 		return -EINVAL;
 
 	sch_tree_lock(sch);
-	q->quantum = ctl->quantum ? : psched_mtu(qdisc_dev(sch));
-	q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
+	if (ctl->quantum) {
+		q->quantum = ctl->quantum;
+		q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
+	}
 	q->perturb_period = ctl->perturb_period * HZ;
-	if (ctl->limit)
-		q->limit = min_t(u32, ctl->limit, SFQ_DEPTH - 1);
-	if (ctl->divisor)
+	if (ctl->flows)
+		q->maxflows = min_t(u32, ctl->flows, SFQ_MAX_FLOWS);
+	if (ctl->divisor) {
 		q->divisor = ctl->divisor;
+		q->maxflows = min_t(u32, q->maxflows, q->divisor);
+	}
+	if (ctl_v1) {
+		if (ctl_v1->depth)
+			q->maxdepth = min_t(u32, ctl_v1->depth, SFQ_MAX_DEPTH);
+		q->headdrop = ctl_v1->headdrop;
+	}
+	if (ctl->limit) {
+		q->limit = min_t(u32, ctl->limit, q->maxdepth * q->maxflows);
+		q->maxflows = min_t(u32, q->maxflows, q->limit);
+	}
+
 	qlen = sch->q.qlen;
 	while (sch->q.qlen > q->limit)
 		sfq_drop(sch);
@@ -514,46 +597,77 @@
 	return 0;
 }
 
+static void *sfq_alloc(size_t sz)
+{
+	void *ptr = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN);
+
+	if (!ptr)
+		ptr = vmalloc(sz);
+	return ptr;
+}
+
+static void sfq_free(void *addr)
+{
+	if (addr) {
+		if (is_vmalloc_addr(addr))
+			vfree(addr);
+		else
+			kfree(addr);
+	}
+}
+
+static void sfq_destroy(struct Qdisc *sch)
+{
+	struct sfq_sched_data *q = qdisc_priv(sch);
+
+	tcf_destroy_chain(&q->filter_list);
+	q->perturb_period = 0;
+	del_timer_sync(&q->perturb_timer);
+	sfq_free(q->ht);
+	sfq_free(q->slots);
+}
+
 static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
 {
 	struct sfq_sched_data *q = qdisc_priv(sch);
-	size_t sz;
 	int i;
 
 	q->perturb_timer.function = sfq_perturbation;
 	q->perturb_timer.data = (unsigned long)sch;
 	init_timer_deferrable(&q->perturb_timer);
 
-	for (i = 0; i < SFQ_DEPTH; i++) {
-		q->dep[i].next = i + SFQ_SLOTS;
-		q->dep[i].prev = i + SFQ_SLOTS;
+	for (i = 0; i < SFQ_MAX_DEPTH + 1; i++) {
+		q->dep[i].next = i + SFQ_MAX_FLOWS;
+		q->dep[i].prev = i + SFQ_MAX_FLOWS;
 	}
 
-	q->limit = SFQ_DEPTH - 1;
+	q->limit = SFQ_MAX_DEPTH;
+	q->maxdepth = SFQ_MAX_DEPTH;
 	q->cur_depth = 0;
 	q->tail = NULL;
 	q->divisor = SFQ_DEFAULT_HASH_DIVISOR;
-	if (opt == NULL) {
-		q->quantum = psched_mtu(qdisc_dev(sch));
-		q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
-		q->perturb_period = 0;
-		q->perturbation = net_random();
-	} else {
+	q->maxflows = SFQ_DEFAULT_FLOWS;
+	q->quantum = psched_mtu(qdisc_dev(sch));
+	q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
+	q->perturb_period = 0;
+	q->perturbation = net_random();
+
+	if (opt) {
 		int err = sfq_change(sch, opt);
 		if (err)
 			return err;
 	}
 
-	sz = sizeof(q->ht[0]) * q->divisor;
-	q->ht = kmalloc(sz, GFP_KERNEL);
-	if (!q->ht && sz > PAGE_SIZE)
-		q->ht = vmalloc(sz);
-	if (!q->ht)
+	q->ht = sfq_alloc(sizeof(q->ht[0]) * q->divisor);
+	q->slots = sfq_alloc(sizeof(q->slots[0]) * q->maxflows);
+	if (!q->ht || !q->slots) {
+		sfq_destroy(sch);
 		return -ENOMEM;
+	}
 	for (i = 0; i < q->divisor; i++)
 		q->ht[i] = SFQ_EMPTY_SLOT;
 
-	for (i = 0; i < SFQ_SLOTS; i++) {
+	for (i = 0; i < q->maxflows; i++) {
 		slot_queue_init(&q->slots[i]);
 		sfq_link(q, i);
 	}
@@ -564,31 +678,20 @@
 	return 0;
 }
 
-static void sfq_destroy(struct Qdisc *sch)
-{
-	struct sfq_sched_data *q = qdisc_priv(sch);
-
-	tcf_destroy_chain(&q->filter_list);
-	q->perturb_period = 0;
-	del_timer_sync(&q->perturb_timer);
-	if (is_vmalloc_addr(q->ht))
-		vfree(q->ht);
-	else
-		kfree(q->ht);
-}
-
 static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb)
 {
 	struct sfq_sched_data *q = qdisc_priv(sch);
 	unsigned char *b = skb_tail_pointer(skb);
-	struct tc_sfq_qopt opt;
+	struct tc_sfq_qopt_v1 opt;
 
-	opt.quantum = q->quantum;
-	opt.perturb_period = q->perturb_period / HZ;
-
-	opt.limit = q->limit;
-	opt.divisor = q->divisor;
-	opt.flows = q->limit;
+	memset(&opt, 0, sizeof(opt));
+	opt.v0.quantum	= q->quantum;
+	opt.v0.perturb_period = q->perturb_period / HZ;
+	opt.v0.limit	= q->limit;
+	opt.v0.divisor	= q->divisor;
+	opt.v0.flows	= q->maxflows;
+	opt.depth	= q->maxdepth;
+	opt.headdrop	= q->headdrop;
 
 	NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
 
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 1dcfb52..b8e1563 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -346,6 +346,7 @@
 	struct nlattr *nest;
 	struct tc_tbf_qopt opt;
 
+	sch->qstats.backlog = q->qdisc->qstats.backlog;
 	nest = nla_nest_start(skb, TCA_OPTIONS);
 	if (nest == NULL)
 		goto nla_put_failure;
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 4f4c52c..4532659 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -277,7 +277,7 @@
 		return 0;
 
 	rcu_read_lock();
-	mn = dst_get_neighbour(dst);
+	mn = dst_get_neighbour_noref(dst);
 	res = mn ? __teql_resolve(skb, skb_res, dev, txq, mn) : 0;
 	rcu_read_unlock();
 
@@ -310,7 +310,7 @@
 
 		if (slave_txq->qdisc_sleeping != q)
 			continue;
-		if (__netif_subqueue_stopped(slave, subq) ||
+		if (netif_xmit_stopped(netdev_get_tx_queue(slave, subq)) ||
 		    !netif_running(slave)) {
 			busy = 1;
 			continue;
@@ -321,7 +321,7 @@
 			if (__netif_tx_trylock(slave_txq)) {
 				unsigned int length = qdisc_pkt_len(skb);
 
-				if (!netif_tx_queue_frozen_or_stopped(slave_txq) &&
+				if (!netif_xmit_frozen_or_stopped(slave_txq) &&
 				    slave_ops->ndo_start_xmit(skb, slave) == NETDEV_TX_OK) {
 					txq_trans_update(slave_txq);
 					__netif_tx_unlock(slave_txq);
@@ -333,7 +333,7 @@
 				}
 				__netif_tx_unlock(slave_txq);
 			}
-			if (netif_queue_stopped(dev))
+			if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)))
 				busy = 1;
 			break;
 		case 1:
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 152b5b3..acd2edb 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -173,7 +173,7 @@
 	asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0;
 	asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
 	asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] =
-		(unsigned long)sp->autoclose * HZ;
+		min_t(unsigned long, sp->autoclose, sctp_max_autoclose) * HZ;
 
 	/* Initializes the timers */
 	for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index c8cc24e..68a385d 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -415,7 +415,7 @@
 	sctp_subtype_t subtype;
 	sctp_state_t state;
 	int error = 0;
-	int first_time = 1;	/* is this the first time through the looop */
+	int first_time = 1;	/* is this the first time through the loop */
 
 	if (ep->base.dead)
 		return;
diff --git a/net/sctp/input.c b/net/sctp/input.c
index b7692aa..80f71af 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -105,7 +105,7 @@
 struct sctp_input_cb {
 	union {
 		struct inet_skb_parm	h4;
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 		struct inet6_skb_parm	h6;
 #endif
 	} header;
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 8104278..91f4791 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -107,7 +107,7 @@
 		if (addr) {
 			addr->a.v6.sin6_family = AF_INET6;
 			addr->a.v6.sin6_port = 0;
-			ipv6_addr_copy(&addr->a.v6.sin6_addr, &ifa->addr);
+			addr->a.v6.sin6_addr = ifa->addr;
 			addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex;
 			addr->valid = 1;
 			spin_lock_bh(&sctp_local_addr_lock);
@@ -219,8 +219,8 @@
 	/* Fill in the dest address from the route entry passed with the skb
 	 * and the source address from the transport.
 	 */
-	ipv6_addr_copy(&fl6.daddr, &transport->ipaddr.v6.sin6_addr);
-	ipv6_addr_copy(&fl6.saddr, &transport->saddr.v6.sin6_addr);
+	fl6.daddr = transport->ipaddr.v6.sin6_addr;
+	fl6.saddr = transport->saddr.v6.sin6_addr;
 
 	fl6.flowlabel = np->flow_label;
 	IP6_ECN_flow_xmit(sk, fl6.flowlabel);
@@ -231,7 +231,7 @@
 
 	if (np->opt && np->opt->srcrt) {
 		struct rt0_hdr *rt0 = (struct rt0_hdr *) np->opt->srcrt;
-		ipv6_addr_copy(&fl6.daddr, rt0->addr);
+		fl6.daddr = *rt0->addr;
 	}
 
 	SCTP_DEBUG_PRINTK("%s: skb:%p, len:%d, src:%pI6 dst:%pI6\n",
@@ -265,7 +265,7 @@
 	sctp_scope_t scope;
 
 	memset(fl6, 0, sizeof(struct flowi6));
-	ipv6_addr_copy(&fl6->daddr, &daddr->v6.sin6_addr);
+	fl6->daddr = daddr->v6.sin6_addr;
 	fl6->fl6_dport = daddr->v6.sin6_port;
 	fl6->flowi6_proto = IPPROTO_SCTP;
 	if (ipv6_addr_type(&daddr->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL)
@@ -277,7 +277,7 @@
 		fl6->fl6_sport = htons(asoc->base.bind_addr.port);
 
 	if (saddr) {
-		ipv6_addr_copy(&fl6->saddr, &saddr->v6.sin6_addr);
+		fl6->saddr = saddr->v6.sin6_addr;
 		fl6->fl6_sport = saddr->v6.sin6_port;
 		SCTP_DEBUG_PRINTK("SRC=%pI6 - ", &fl6->saddr);
 	}
@@ -334,7 +334,7 @@
 	}
 	rcu_read_unlock();
 	if (baddr) {
-		ipv6_addr_copy(&fl6->saddr, &baddr->v6.sin6_addr);
+		fl6->saddr = baddr->v6.sin6_addr;
 		fl6->fl6_sport = baddr->v6.sin6_port;
 		dst = ip6_dst_lookup_flow(sk, fl6, NULL, false);
 	}
@@ -375,7 +375,7 @@
 
 	if (t->dst) {
 		saddr->v6.sin6_family = AF_INET6;
-		ipv6_addr_copy(&saddr->v6.sin6_addr, &fl6->saddr);
+		saddr->v6.sin6_addr = fl6->saddr;
 	}
 }
 
@@ -400,7 +400,7 @@
 		if (addr) {
 			addr->a.v6.sin6_family = AF_INET6;
 			addr->a.v6.sin6_port = 0;
-			ipv6_addr_copy(&addr->a.v6.sin6_addr, &ifp->addr);
+			addr->a.v6.sin6_addr = ifp->addr;
 			addr->a.v6.sin6_scope_id = dev->ifindex;
 			addr->valid = 1;
 			INIT_LIST_HEAD(&addr->list);
@@ -416,7 +416,6 @@
 static void sctp_v6_from_skb(union sctp_addr *addr,struct sk_buff *skb,
 			     int is_saddr)
 {
-	void *from;
 	__be16 *port;
 	struct sctphdr *sh;
 
@@ -428,12 +427,11 @@
 	sh = sctp_hdr(skb);
 	if (is_saddr) {
 		*port  = sh->source;
-		from = &ipv6_hdr(skb)->saddr;
+		addr->v6.sin6_addr = ipv6_hdr(skb)->saddr;
 	} else {
 		*port = sh->dest;
-		from = &ipv6_hdr(skb)->daddr;
+		addr->v6.sin6_addr = ipv6_hdr(skb)->daddr;
 	}
-	ipv6_addr_copy(&addr->v6.sin6_addr, from);
 }
 
 /* Initialize an sctp_addr from a socket. */
@@ -441,7 +439,7 @@
 {
 	addr->v6.sin6_family = AF_INET6;
 	addr->v6.sin6_port = 0;
-	ipv6_addr_copy(&addr->v6.sin6_addr, &inet6_sk(sk)->rcv_saddr);
+	addr->v6.sin6_addr = inet6_sk(sk)->rcv_saddr;
 }
 
 /* Initialize sk->sk_rcv_saddr from sctp_addr. */
@@ -454,7 +452,7 @@
 		inet6_sk(sk)->rcv_saddr.s6_addr32[3] =
 			addr->v4.sin_addr.s_addr;
 	} else {
-		ipv6_addr_copy(&inet6_sk(sk)->rcv_saddr, &addr->v6.sin6_addr);
+		inet6_sk(sk)->rcv_saddr = addr->v6.sin6_addr;
 	}
 }
 
@@ -467,7 +465,7 @@
 		inet6_sk(sk)->daddr.s6_addr32[2] = htonl(0x0000ffff);
 		inet6_sk(sk)->daddr.s6_addr32[3] = addr->v4.sin_addr.s_addr;
 	} else {
-		ipv6_addr_copy(&inet6_sk(sk)->daddr, &addr->v6.sin6_addr);
+		inet6_sk(sk)->daddr = addr->v6.sin6_addr;
 	}
 }
 
@@ -479,7 +477,7 @@
 	addr->v6.sin6_family = AF_INET6;
 	addr->v6.sin6_port = port;
 	addr->v6.sin6_flowinfo = 0; /* BUG */
-	ipv6_addr_copy(&addr->v6.sin6_addr, &param->v6.addr);
+	addr->v6.sin6_addr = param->v6.addr;
 	addr->v6.sin6_scope_id = iif;
 }
 
@@ -493,7 +491,7 @@
 
 	param->v6.param_hdr.type = SCTP_PARAM_IPV6_ADDRESS;
 	param->v6.param_hdr.length = htons(length);
-	ipv6_addr_copy(&param->v6.addr, &addr->v6.sin6_addr);
+	param->v6.addr = addr->v6.sin6_addr;
 
 	return length;
 }
@@ -504,7 +502,7 @@
 {
 	addr->sa.sa_family = AF_INET6;
 	addr->v6.sin6_port = port;
-	ipv6_addr_copy(&addr->v6.sin6_addr, saddr);
+	addr->v6.sin6_addr = *saddr;
 }
 
 /* Compare addresses exactly.
@@ -759,7 +757,7 @@
 		}
 
 		sin6from = &asoc->peer.primary_addr.v6;
-		ipv6_addr_copy(&sin6->sin6_addr, &sin6from->sin6_addr);
+		sin6->sin6_addr = sin6from->sin6_addr;
 		if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
 			sin6->sin6_scope_id = sin6from->sin6_scope_id;
 	}
@@ -787,7 +785,7 @@
 		}
 
 		/* Otherwise, just copy the v6 address. */
-		ipv6_addr_copy(&sin6->sin6_addr, &ipv6_hdr(skb)->saddr);
+		sin6->sin6_addr = ipv6_hdr(skb)->saddr;
 		if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) {
 			struct sctp_ulpevent *ev = sctp_skb2event(skb);
 			sin6->sin6_scope_id = ev->iif;
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 08b3cea..817174e 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -697,13 +697,7 @@
 	/* Keep track of how many bytes are in flight to the receiver. */
 	asoc->outqueue.outstanding_bytes += datasize;
 
-	/* Update our view of the receiver's rwnd. Include sk_buff overhead
-	 * while updating peer.rwnd so that it reduces the chances of a
-	 * receiver running out of receive buffer space even when receive
-	 * window is still open. This can happen when a sender is sending
-	 * sending small messages.
-	 */
-	datasize += sizeof(struct sk_buff);
+	/* Update our view of the receiver's rwnd. */
 	if (datasize < rwnd)
 		rwnd -= datasize;
 	else
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 14c2b06..cfeb1d4 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -411,8 +411,7 @@
 					chunk->transport->flight_size -=
 							sctp_data_size(chunk);
 				q->outstanding_bytes -= sctp_data_size(chunk);
-				q->asoc->peer.rwnd += (sctp_data_size(chunk) +
-							sizeof(struct sk_buff));
+				q->asoc->peer.rwnd += sctp_data_size(chunk);
 			}
 			continue;
 		}
@@ -432,8 +431,7 @@
 			 * (Section 7.2.4)), add the data size of those
 			 * chunks to the rwnd.
 			 */
-			q->asoc->peer.rwnd += (sctp_data_size(chunk) +
-						sizeof(struct sk_buff));
+			q->asoc->peer.rwnd += sctp_data_size(chunk);
 			q->outstanding_bytes -= sctp_data_size(chunk);
 			if (chunk->transport)
 				transport->flight_size -= sctp_data_size(chunk);
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 61b9fca..5942d27 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -637,7 +637,7 @@
 		    " for cmd %d at entry %p\n", &sctp_addr_waitq, &addrw->a, addrw->state,
 		    addrw);
 
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 		/* Now we send an ASCONF for each association */
 		/* Note. we currently don't handle link local IPv6 addressees */
 		if (addrw->a.sa.sa_family == AF_INET6) {
@@ -1285,6 +1285,9 @@
 	sctp_max_instreams    		= SCTP_DEFAULT_INSTREAMS;
 	sctp_max_outstreams   		= SCTP_DEFAULT_OUTSTREAMS;
 
+	/* Initialize maximum autoclose timeout. */
+	sctp_max_autoclose		= INT_MAX / HZ;
+
 	/* Initialize handle used for association ids. */
 	idr_init(&sctp_assocs_id);
 
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 0121e0a..a85eeeb 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -3400,8 +3400,10 @@
 		asconf_len -= length;
 	}
 
-	if (no_err && asoc->src_out_of_asoc_ok)
+	if (no_err && asoc->src_out_of_asoc_ok) {
 		asoc->src_out_of_asoc_ok = 0;
+		sctp_transport_immediate_rtx(asoc->peer.primary_path);
+	}
 
 	/* Free the cached last sent asconf chunk. */
 	list_del_init(&asconf->transmitted_list);
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 76388b0..1ff51c9 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -666,6 +666,7 @@
 				  struct sctp_chunk *chunk)
 {
 	sctp_sender_hb_info_t *hbinfo;
+	int was_unconfirmed = 0;
 
 	/* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of the
 	 * HEARTBEAT should clear the error counter of the destination
@@ -692,9 +693,11 @@
 	/* Mark the destination transport address as active if it is not so
 	 * marked.
 	 */
-	if ((t->state == SCTP_INACTIVE) || (t->state == SCTP_UNCONFIRMED))
+	if ((t->state == SCTP_INACTIVE) || (t->state == SCTP_UNCONFIRMED)) {
+		was_unconfirmed = 1;
 		sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP,
 					     SCTP_HEARTBEAT_SUCCESS);
+	}
 
 	/* The receiver of the HEARTBEAT ACK should also perform an
 	 * RTT measurement for that destination transport address
@@ -712,6 +715,9 @@
 	/* Update the heartbeat timer.  */
 	if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t)))
 		sctp_transport_hold(t);
+
+	if (was_unconfirmed && asoc->peer.transport_count == 1)
+		sctp_transport_immediate_rtx(t);
 }
 
 
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 13bf5fc..408ebd0 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -804,7 +804,7 @@
 				struct sockaddr_in6 *sin6;
 
 				sin6 = (struct sockaddr_in6 *)addrs;
-				ipv6_addr_copy(&asoc->asconf_addr_del_pending->v6.sin6_addr, &sin6->sin6_addr);
+				asoc->asconf_addr_del_pending->v6.sin6_addr = sin6->sin6_addr;
 			}
 			SCTP_DEBUG_PRINTK_IPADDR("send_asconf_del_ip: keep the last address asoc: %p ",
 			    " at %p\n", asoc, asoc->asconf_addr_del_pending,
@@ -2200,8 +2200,6 @@
 		return -EINVAL;
 	if (copy_from_user(&sp->autoclose, optval, optlen))
 		return -EFAULT;
-	/* make sure it won't exceed MAX_SCHEDULE_TIMEOUT */
-	sp->autoclose = min_t(long, sp->autoclose, MAX_SCHEDULE_TIMEOUT / HZ);
 
 	return 0;
 }
@@ -6841,7 +6839,7 @@
 	.sockets_allocated = &sctp_sockets_allocated,
 };
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 
 struct proto sctpv6_prot = {
 	.name		= "SCTPv6",
@@ -6872,4 +6870,4 @@
 	.memory_allocated = &sctp_memory_allocated,
 	.sockets_allocated = &sctp_sockets_allocated,
 };
-#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
+#endif /* IS_ENABLED(CONFIG_IPV6) */
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index 6b39529..60ffbd0 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -53,6 +53,10 @@
 static int sack_timer_max = 500;
 static int addr_scope_max = 3; /* check sctp_scope_policy_t in include/net/sctp/constants.h for max entries */
 static int rwnd_scale_max = 16;
+static unsigned long max_autoclose_min = 0;
+static unsigned long max_autoclose_max =
+	(MAX_SCHEDULE_TIMEOUT / HZ > UINT_MAX)
+	? UINT_MAX : MAX_SCHEDULE_TIMEOUT / HZ;
 
 extern long sysctl_sctp_mem[3];
 extern int sysctl_sctp_rmem[3];
@@ -258,6 +262,15 @@
 		.extra1		= &one,
 		.extra2		= &rwnd_scale_max,
 	},
+	{
+		.procname	= "max_autoclose",
+		.data		= &sctp_max_autoclose,
+		.maxlen		= sizeof(unsigned long),
+		.mode		= 0644,
+		.proc_handler	= &proc_doulongvec_minmax,
+		.extra1		= &max_autoclose_min,
+		.extra2		= &max_autoclose_max,
+	},
 
 	{ /* sentinel */ }
 };
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 394c57c..3889330 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -641,3 +641,19 @@
 	t->cacc.next_tsn_at_change = 0;
 	t->cacc.cacc_saw_newack = 0;
 }
+
+/* Schedule retransmission on the given transport */
+void sctp_transport_immediate_rtx(struct sctp_transport *t)
+{
+	/* Stop pending T3_rtx_timer */
+	if (timer_pending(&t->T3_rtx_timer)) {
+		(void)del_timer(&t->T3_rtx_timer);
+		sctp_transport_put(t);
+	}
+	sctp_retransmit(&t->asoc->outqueue, t, SCTP_RTXR_T3_RTX);
+	if (!timer_pending(&t->T3_rtx_timer)) {
+		if (!mod_timer(&t->T3_rtx_timer, jiffies + t->rto))
+			sctp_transport_hold(t);
+	}
+	return;
+}
diff --git a/net/socket.c b/net/socket.c
index 2877647..e56162c 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -538,6 +538,8 @@
 		*tx_flags |= SKBTX_HW_TSTAMP;
 	if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE))
 		*tx_flags |= SKBTX_SW_TSTAMP;
+	if (sock_flag(sk, SOCK_WIFI_STATUS))
+		*tx_flags |= SKBTX_WIFI_STATUS;
 	return 0;
 }
 EXPORT_SYMBOL(sock_tx_timestamp);
@@ -549,6 +551,8 @@
 
 	sock_update_classid(sock->sk);
 
+	sock_update_netprioidx(sock->sk);
+
 	si->sock = sock;
 	si->scm = NULL;
 	si->msg = msg;
@@ -674,6 +678,22 @@
 }
 EXPORT_SYMBOL_GPL(__sock_recv_timestamp);
 
+void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
+	struct sk_buff *skb)
+{
+	int ack;
+
+	if (!sock_flag(sk, SOCK_WIFI_STATUS))
+		return;
+	if (!skb->wifi_acked_valid)
+		return;
+
+	ack = skb->wifi_acked;
+
+	put_cmsg(msg, SOL_SOCKET, SCM_WIFI_STATUS, sizeof(ack), &ack);
+}
+EXPORT_SYMBOL_GPL(__sock_recv_wifi_status);
+
 static inline void sock_recv_drops(struct msghdr *msg, struct sock *sk,
 				   struct sk_buff *skb)
 {
@@ -2738,10 +2758,10 @@
 	case ETHTOOL_GRXRINGS:
 	case ETHTOOL_GRXCLSRLCNT:
 	case ETHTOOL_GRXCLSRULE:
+	case ETHTOOL_SRXCLSRLINS:
 		convert_out = true;
 		/* fall through */
 	case ETHTOOL_SRXCLSRLDEL:
-	case ETHTOOL_SRXCLSRLINS:
 		buf_size += sizeof(struct ethtool_rxnfc);
 		convert_in = true;
 		break;
@@ -2883,7 +2903,7 @@
 
 		return dev_ioctl(net, cmd, uifr);
 	default:
-		return -EINVAL;
+		return -ENOIOCTLCMD;
 	}
 }
 
@@ -3210,20 +3230,6 @@
 		return sock_do_ioctl(net, sock, cmd, arg);
 	}
 
-	/* Prevent warning from compat_sys_ioctl, these always
-	 * result in -EINVAL in the native case anyway. */
-	switch (cmd) {
-	case SIOCRTMSG:
-	case SIOCGIFCOUNT:
-	case SIOCSRARP:
-	case SIOCGRARP:
-	case SIOCDRARP:
-	case SIOCSIFLINK:
-	case SIOCGIFSLAVE:
-	case SIOCSIFSLAVE:
-		return -EINVAL;
-	}
-
 	return -ENOIOCTLCMD;
 }
 
diff --git a/net/sunrpc/addr.c b/net/sunrpc/addr.c
index 67a655e..ee77742 100644
--- a/net/sunrpc/addr.c
+++ b/net/sunrpc/addr.c
@@ -21,7 +21,7 @@
 #include <linux/slab.h>
 #include <linux/export.h>
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 
 static size_t rpc_ntop6_noscopeid(const struct sockaddr *sap,
 				  char *buf, const int buflen)
@@ -91,7 +91,7 @@
 	return len;
 }
 
-#else	/* !(defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)) */
+#else	/* !IS_ENABLED(CONFIG_IPV6) */
 
 static size_t rpc_ntop6_noscopeid(const struct sockaddr *sap,
 				  char *buf, const int buflen)
@@ -105,7 +105,7 @@
 	return 0;
 }
 
-#endif	/* !(defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)) */
+#endif	/* !IS_ENABLED(CONFIG_IPV6) */
 
 static int rpc_ntop4(const struct sockaddr *sap,
 		     char *buf, const size_t buflen)
@@ -155,7 +155,7 @@
 	return sizeof(struct sockaddr_in);
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static int rpc_parse_scope_id(const char *buf, const size_t buflen,
 			      const char *delim, struct sockaddr_in6 *sin6)
 {
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 72ad836..03b56bc 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -1778,7 +1778,7 @@
 };
 
 int sunrpc_cache_register_pipefs(struct dentry *parent,
-				 const char *name, mode_t umode,
+				 const char *name, umode_t umode,
 				 struct cache_detail *cd)
 {
 	struct qstr q;
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index bfddd68..63a7a7a 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -185,7 +185,6 @@
 rpc_i_callback(struct rcu_head *head)
 {
 	struct inode *inode = container_of(head, struct inode, i_rcu);
-	INIT_LIST_HEAD(&inode->i_dentry);
 	kmem_cache_free(rpc_inode_cachep, RPC_I(inode));
 }
 
@@ -954,7 +953,7 @@
 }
 
 struct dentry *rpc_create_cache_dir(struct dentry *parent, struct qstr *name,
-				    mode_t umode, struct cache_detail *cd)
+				    umode_t umode, struct cache_detail *cd)
 {
 	return rpc_mkdir_populate(parent, name, umode, NULL,
 			rpc_cachedir_populate, cd);
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index d12ffa5..3341d89 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -18,6 +18,7 @@
 #include <linux/smp.h>
 #include <linux/spinlock.h>
 #include <linux/mutex.h>
+#include <linux/freezer.h>
 
 #include <linux/sunrpc/clnt.h>
 
@@ -231,7 +232,7 @@
 {
 	if (fatal_signal_pending(current))
 		return -ERESTARTSYS;
-	schedule();
+	freezable_schedule();
 	return 0;
 }
 
@@ -590,6 +591,27 @@
 	task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
 }
 
+static void
+rpc_init_task_statistics(struct rpc_task *task)
+{
+	/* Initialize retry counters */
+	task->tk_garb_retry = 2;
+	task->tk_cred_retry = 2;
+	task->tk_rebind_retry = 2;
+
+	/* starting timestamp */
+	task->tk_start = ktime_get();
+}
+
+static void
+rpc_reset_task_statistics(struct rpc_task *task)
+{
+	task->tk_timeouts = 0;
+	task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_KILLED|RPC_TASK_SENT);
+
+	rpc_init_task_statistics(task);
+}
+
 /*
  * Helper that calls task->tk_ops->rpc_call_done if it exists
  */
@@ -602,6 +624,7 @@
 			WARN_ON(RPC_ASSASSINATED(task));
 			/* Always release the RPC slot and buffer memory */
 			xprt_release(task);
+			rpc_reset_task_statistics(task);
 		}
 	}
 }
@@ -804,11 +827,6 @@
 	task->tk_calldata = task_setup_data->callback_data;
 	INIT_LIST_HEAD(&task->tk_task);
 
-	/* Initialize retry counters */
-	task->tk_garb_retry = 2;
-	task->tk_cred_retry = 2;
-	task->tk_rebind_retry = 2;
-
 	task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW;
 	task->tk_owner = current->tgid;
 
@@ -818,8 +836,7 @@
 	if (task->tk_ops->rpc_call_prepare != NULL)
 		task->tk_action = rpc_prepare_task;
 
-	/* starting timestamp */
-	task->tk_start = ktime_get();
+	rpc_init_task_statistics(task);
 
 	dprintk("RPC:       new task initialized, procpid %u\n",
 				task_pid_nr(current));
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 6e03888..9d01d46 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -826,7 +826,7 @@
 	return error;
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 /*
  * Register an "inet6" protocol family netid with the local
  * rpcbind daemon via an rpcbind v4 SET request.
@@ -872,7 +872,7 @@
 
 	return error;
 }
-#endif	/* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
+#endif	/* IS_ENABLED(CONFIG_IPV6) */
 
 /*
  * Register a kernel RPC service via rpcbind version 4.
@@ -893,11 +893,11 @@
 		error = __svc_rpcb_register4(program, version,
 						protocol, port);
 		break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	case PF_INET6:
 		error = __svc_rpcb_register6(program, version,
 						protocol, port);
-#endif	/* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
+#endif
 	}
 
 	if (error < 0)
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 447cd0e..38649cf 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -179,13 +179,13 @@
 		.sin_addr.s_addr	= htonl(INADDR_ANY),
 		.sin_port		= htons(port),
 	};
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	struct sockaddr_in6 sin6 = {
 		.sin6_family		= AF_INET6,
 		.sin6_addr		= IN6ADDR_ANY_INIT,
 		.sin6_port		= htons(port),
 	};
-#endif	/* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
+#endif
 	struct sockaddr *sap;
 	size_t len;
 
@@ -194,12 +194,12 @@
 		sap = (struct sockaddr *)&sin;
 		len = sizeof(sin);
 		break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	case PF_INET6:
 		sap = (struct sockaddr *)&sin6;
 		len = sizeof(sin6);
 		break;
-#endif	/* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
+#endif
 	default:
 		return ERR_PTR(-EAFNOSUPPORT);
 	}
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index ce13632..01153ea 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -134,7 +134,7 @@
 	struct ip_map *item = container_of(citem, struct ip_map, h);
 
 	strcpy(new->m_class, item->m_class);
-	ipv6_addr_copy(&new->m_addr, &item->m_addr);
+	new->m_addr = item->m_addr;
 }
 static void update(struct cache_head *cnew, struct cache_head *citem)
 {
@@ -220,7 +220,7 @@
 		ipv6_addr_set_v4mapped(address.s4.sin_addr.s_addr,
 				&sin6.sin6_addr);
 		break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	case AF_INET6:
 		memcpy(&sin6, &address.s6, sizeof(sin6));
 		break;
@@ -274,7 +274,7 @@
 	}
 	im = container_of(h, struct ip_map, h);
 	/* class addr domain */
-	ipv6_addr_copy(&addr, &im->m_addr);
+	addr = im->m_addr;
 
 	if (test_bit(CACHE_VALID, &h->flags) &&
 	    !test_bit(CACHE_NEGATIVE, &h->flags))
@@ -297,7 +297,7 @@
 	struct cache_head *ch;
 
 	strcpy(ip.m_class, class);
-	ipv6_addr_copy(&ip.m_addr, addr);
+	ip.m_addr = *addr;
 	ch = sunrpc_cache_lookup(cd, &ip.h,
 				 hash_str(class, IP_HASHBITS) ^
 				 hash_ip6(*addr));
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 71bed1c..4653286 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -157,7 +157,7 @@
 			cmh->cmsg_level = SOL_IPV6;
 			cmh->cmsg_type = IPV6_PKTINFO;
 			pki->ipi6_ifindex = daddr->sin6_scope_id;
-			ipv6_addr_copy(&pki->ipi6_addr,	&daddr->sin6_addr);
+			pki->ipi6_addr = daddr->sin6_addr;
 			cmh->cmsg_len = CMSG_LEN(sizeof(*pki));
 		}
 		break;
@@ -523,7 +523,7 @@
 		return 0;
 
 	daddr->sin6_family = AF_INET6;
-	ipv6_addr_copy(&daddr->sin6_addr, &pki->ipi6_addr);
+	daddr->sin6_addr = pki->ipi6_addr;
 	daddr->sin6_scope_id = pki->ipi6_ifindex;
 	return 1;
 }
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index f4385e4..c64c0ef 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -995,13 +995,11 @@
 
 static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
 {
-	if (xprt_dynamic_free_slot(xprt, req))
-		return;
-
-	memset(req, 0, sizeof(*req));	/* mark unused */
-
 	spin_lock(&xprt->reserve_lock);
-	list_add(&req->rq_list, &xprt->free);
+	if (!xprt_dynamic_free_slot(xprt, req)) {
+		memset(req, 0, sizeof(*req));	/* mark unused */
+		list_add(&req->rq_list, &xprt->free);
+	}
 	rpc_wake_up_next(&xprt->backlog);
 	spin_unlock(&xprt->reserve_lock);
 }
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 28908f5..8eb87b1 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -46,7 +46,7 @@
 #define BCLINK_WIN_DEFAULT 20		/* bcast link window size (default) */
 
 /**
- * struct bcbearer_pair - a pair of bearers used by broadcast link
+ * struct tipc_bcbearer_pair - a pair of bearers used by broadcast link
  * @primary: pointer to primary bearer
  * @secondary: pointer to secondary bearer
  *
@@ -54,13 +54,13 @@
  * to be paired.
  */
 
-struct bcbearer_pair {
+struct tipc_bcbearer_pair {
 	struct tipc_bearer *primary;
 	struct tipc_bearer *secondary;
 };
 
 /**
- * struct bcbearer - bearer used by broadcast link
+ * struct tipc_bcbearer - bearer used by broadcast link
  * @bearer: (non-standard) broadcast bearer structure
  * @media: (non-standard) broadcast media structure
  * @bpairs: array of bearer pairs
@@ -74,50 +74,47 @@
  * prevented through use of the spinlock "bc_lock".
  */
 
-struct bcbearer {
+struct tipc_bcbearer {
 	struct tipc_bearer bearer;
-	struct media media;
-	struct bcbearer_pair bpairs[MAX_BEARERS];
-	struct bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1];
+	struct tipc_media media;
+	struct tipc_bcbearer_pair bpairs[MAX_BEARERS];
+	struct tipc_bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1];
 	struct tipc_node_map remains;
 	struct tipc_node_map remains_new;
 };
 
 /**
- * struct bclink - link used for broadcast messages
+ * struct tipc_bclink - link used for broadcast messages
  * @link: (non-standard) broadcast link structure
  * @node: (non-standard) node structure representing b'cast link's peer node
+ * @bcast_nodes: map of broadcast-capable nodes
  * @retransmit_to: node that most recently requested a retransmit
  *
  * Handles sequence numbering, fragmentation, bundling, etc.
  */
 
-struct bclink {
-	struct link link;
+struct tipc_bclink {
+	struct tipc_link link;
 	struct tipc_node node;
+	struct tipc_node_map bcast_nodes;
 	struct tipc_node *retransmit_to;
 };
 
+static struct tipc_bcbearer bcast_bearer;
+static struct tipc_bclink bcast_link;
 
-static struct bcbearer *bcbearer;
-static struct bclink *bclink;
-static struct link *bcl;
+static struct tipc_bcbearer *bcbearer = &bcast_bearer;
+static struct tipc_bclink *bclink = &bcast_link;
+static struct tipc_link *bcl = &bcast_link.link;
+
 static DEFINE_SPINLOCK(bc_lock);
 
-/* broadcast-capable node map */
-struct tipc_node_map tipc_bcast_nmap;
-
 const char tipc_bclink_name[] = "broadcast-link";
 
 static void tipc_nmap_diff(struct tipc_node_map *nm_a,
 			   struct tipc_node_map *nm_b,
 			   struct tipc_node_map *nm_diff);
 
-static u32 buf_seqno(struct sk_buff *buf)
-{
-	return msg_seqno(buf_msg(buf));
-}
-
 static u32 bcbuf_acks(struct sk_buff *buf)
 {
 	return (u32)(unsigned long)TIPC_SKB_CB(buf)->handle;
@@ -133,6 +130,19 @@
 	bcbuf_set_acks(buf, bcbuf_acks(buf) - 1);
 }
 
+void tipc_bclink_add_node(u32 addr)
+{
+	spin_lock_bh(&bc_lock);
+	tipc_nmap_add(&bclink->bcast_nodes, addr);
+	spin_unlock_bh(&bc_lock);
+}
+
+void tipc_bclink_remove_node(u32 addr)
+{
+	spin_lock_bh(&bc_lock);
+	tipc_nmap_remove(&bclink->bcast_nodes, addr);
+	spin_unlock_bh(&bc_lock);
+}
 
 static void bclink_set_last_sent(void)
 {
@@ -222,14 +232,36 @@
 	struct sk_buff *next;
 	unsigned int released = 0;
 
-	if (less_eq(acked, n_ptr->bclink.acked))
-		return;
-
 	spin_lock_bh(&bc_lock);
 
-	/* Skip over packets that node has previously acknowledged */
-
+	/* Bail out if tx queue is empty (no clean up is required) */
 	crs = bcl->first_out;
+	if (!crs)
+		goto exit;
+
+	/* Determine which messages need to be acknowledged */
+	if (acked == INVALID_LINK_SEQ) {
+		/*
+		 * Contact with specified node has been lost, so need to
+		 * acknowledge sent messages only (if other nodes still exist)
+		 * or both sent and unsent messages (otherwise)
+		 */
+		if (bclink->bcast_nodes.count)
+			acked = bcl->fsm_msg_cnt;
+		else
+			acked = bcl->next_out_no;
+	} else {
+		/*
+		 * Bail out if specified sequence number does not correspond
+		 * to a message that has been sent and not yet acknowledged
+		 */
+		if (less(acked, buf_seqno(crs)) ||
+		    less(bcl->fsm_msg_cnt, acked) ||
+		    less_eq(acked, n_ptr->bclink.acked))
+			goto exit;
+	}
+
+	/* Skip over packets that node has previously acknowledged */
 	while (crs && less_eq(buf_seqno(crs), n_ptr->bclink.acked))
 		crs = crs->next;
 
@@ -237,7 +269,15 @@
 
 	while (crs && less_eq(buf_seqno(crs), acked)) {
 		next = crs->next;
-		bcbuf_decr_acks(crs);
+
+		if (crs != bcl->next_out)
+			bcbuf_decr_acks(crs);
+		else {
+			bcbuf_set_acks(crs, 0);
+			bcl->next_out = next;
+			bclink_set_last_sent();
+		}
+
 		if (bcbuf_acks(crs) == 0) {
 			bcl->first_out = next;
 			bcl->out_queue_size--;
@@ -256,6 +296,7 @@
 	}
 	if (unlikely(released && !list_empty(&bcl->waiting_ports)))
 		tipc_link_wakeup_ports(bcl, 0);
+exit:
 	spin_unlock_bh(&bc_lock);
 }
 
@@ -267,7 +308,7 @@
 
 static void bclink_send_ack(struct tipc_node *n_ptr)
 {
-	struct link *l_ptr = n_ptr->active_links[n_ptr->addr & 1];
+	struct tipc_link *l_ptr = n_ptr->active_links[n_ptr->addr & 1];
 
 	if (l_ptr != NULL)
 		tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
@@ -402,13 +443,19 @@
 
 	spin_lock_bh(&bc_lock);
 
+	if (!bclink->bcast_nodes.count) {
+		res = msg_data_sz(buf_msg(buf));
+		buf_discard(buf);
+		goto exit;
+	}
+
 	res = tipc_link_send_buf(bcl, buf);
-	if (likely(res > 0))
+	if (likely(res >= 0)) {
 		bclink_set_last_sent();
-
-	bcl->stats.queue_sz_counts++;
-	bcl->stats.accu_queue_sz += bcl->out_queue_size;
-
+		bcl->stats.queue_sz_counts++;
+		bcl->stats.accu_queue_sz += bcl->out_queue_size;
+	}
+exit:
 	spin_unlock_bh(&bc_lock);
 	return res;
 }
@@ -572,13 +619,13 @@
 	if (likely(!msg_non_seq(buf_msg(buf)))) {
 		struct tipc_msg *msg;
 
-		bcbuf_set_acks(buf, tipc_bcast_nmap.count);
+		bcbuf_set_acks(buf, bclink->bcast_nodes.count);
 		msg = buf_msg(buf);
 		msg_set_non_seq(msg, 1);
 		msg_set_mc_netid(msg, tipc_net_id);
 		bcl->stats.sent_info++;
 
-		if (WARN_ON(!tipc_bcast_nmap.count)) {
+		if (WARN_ON(!bclink->bcast_nodes.count)) {
 			dump_stack();
 			return 0;
 		}
@@ -586,7 +633,7 @@
 
 	/* Send buffer over bearers until all targets reached */
 
-	bcbearer->remains = tipc_bcast_nmap;
+	bcbearer->remains = bclink->bcast_nodes;
 
 	for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
 		struct tipc_bearer *p = bcbearer->bpairs[bp_index].primary;
@@ -630,8 +677,8 @@
 
 void tipc_bcbearer_sort(void)
 {
-	struct bcbearer_pair *bp_temp = bcbearer->bpairs_temp;
-	struct bcbearer_pair *bp_curr;
+	struct tipc_bcbearer_pair *bp_temp = bcbearer->bpairs_temp;
+	struct tipc_bcbearer_pair *bp_curr;
 	int b_index;
 	int pri;
 
@@ -752,25 +799,13 @@
 	return 0;
 }
 
-int tipc_bclink_init(void)
+void tipc_bclink_init(void)
 {
-	bcbearer = kzalloc(sizeof(*bcbearer), GFP_ATOMIC);
-	bclink = kzalloc(sizeof(*bclink), GFP_ATOMIC);
-	if (!bcbearer || !bclink) {
-		warn("Broadcast link creation failed, no memory\n");
-		kfree(bcbearer);
-		bcbearer = NULL;
-		kfree(bclink);
-		bclink = NULL;
-		return -ENOMEM;
-	}
-
 	INIT_LIST_HEAD(&bcbearer->bearer.cong_links);
 	bcbearer->bearer.media = &bcbearer->media;
 	bcbearer->media.send_msg = tipc_bcbearer_send;
 	sprintf(bcbearer->media.name, "tipc-broadcast");
 
-	bcl = &bclink->link;
 	INIT_LIST_HEAD(&bcl->waiting_ports);
 	bcl->next_out_no = 1;
 	spin_lock_init(&bclink->node.lock);
@@ -780,22 +815,16 @@
 	bcl->b_ptr = &bcbearer->bearer;
 	bcl->state = WORKING_WORKING;
 	strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME);
-
-	return 0;
 }
 
 void tipc_bclink_stop(void)
 {
 	spin_lock_bh(&bc_lock);
-	if (bcbearer) {
-		tipc_link_stop(bcl);
-		bcl = NULL;
-		kfree(bclink);
-		bclink = NULL;
-		kfree(bcbearer);
-		bcbearer = NULL;
-	}
+	tipc_link_stop(bcl);
 	spin_unlock_bh(&bc_lock);
+
+	memset(bclink, 0, sizeof(*bclink));
+	memset(bcbearer, 0, sizeof(*bcbearer));
 }
 
 
@@ -864,9 +893,9 @@
  * tipc_port_list_add - add a port to a port list, ensuring no duplicates
  */
 
-void tipc_port_list_add(struct port_list *pl_ptr, u32 port)
+void tipc_port_list_add(struct tipc_port_list *pl_ptr, u32 port)
 {
-	struct port_list *item = pl_ptr;
+	struct tipc_port_list *item = pl_ptr;
 	int i;
 	int item_sz = PLSIZE;
 	int cnt = pl_ptr->count;
@@ -898,10 +927,10 @@
  *
  */
 
-void tipc_port_list_free(struct port_list *pl_ptr)
+void tipc_port_list_free(struct tipc_port_list *pl_ptr)
 {
-	struct port_list *item;
-	struct port_list *next;
+	struct tipc_port_list *item;
+	struct tipc_port_list *next;
 
 	for (item = pl_ptr->next; item; item = next) {
 		next = item->next;
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
index 06740da..b009666 100644
--- a/net/tipc/bcast.h
+++ b/net/tipc/bcast.h
@@ -51,20 +51,18 @@
 	u32 map[MAX_NODES / WSIZE];
 };
 
-extern struct tipc_node_map tipc_bcast_nmap;
-
 #define PLSIZE 32
 
 /**
- * struct port_list - set of node local destination ports
+ * struct tipc_port_list - set of node local destination ports
  * @count: # of ports in set (only valid for first entry in list)
  * @next: pointer to next entry in list
  * @ports: array of port references
  */
 
-struct port_list {
+struct tipc_port_list {
 	int count;
-	struct port_list *next;
+	struct tipc_port_list *next;
 	u32 ports[PLSIZE];
 };
 
@@ -85,11 +83,13 @@
 	return !memcmp(nm_a, nm_b, sizeof(*nm_a));
 }
 
-void tipc_port_list_add(struct port_list *pl_ptr, u32 port);
-void tipc_port_list_free(struct port_list *pl_ptr);
+void tipc_port_list_add(struct tipc_port_list *pl_ptr, u32 port);
+void tipc_port_list_free(struct tipc_port_list *pl_ptr);
 
-int  tipc_bclink_init(void);
+void tipc_bclink_init(void);
 void tipc_bclink_stop(void);
+void tipc_bclink_add_node(u32 addr);
+void tipc_bclink_remove_node(u32 addr);
 struct tipc_node *tipc_bclink_retransmit_to(void);
 void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked);
 int  tipc_bclink_send_msg(struct sk_buff *buf);
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index e2202de..329fb659 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -41,7 +41,7 @@
 
 #define MAX_ADDR_STR 32
 
-static struct media media_list[MAX_MEDIA];
+static struct tipc_media *media_list[MAX_MEDIA];
 static u32 media_count;
 
 struct tipc_bearer tipc_bearers[MAX_BEARERS];
@@ -65,17 +65,31 @@
 }
 
 /**
- * media_find - locates specified media object by name
+ * tipc_media_find - locates specified media object by name
  */
 
-static struct media *media_find(const char *name)
+struct tipc_media *tipc_media_find(const char *name)
 {
-	struct media *m_ptr;
 	u32 i;
 
-	for (i = 0, m_ptr = media_list; i < media_count; i++, m_ptr++) {
-		if (!strcmp(m_ptr->name, name))
-			return m_ptr;
+	for (i = 0; i < media_count; i++) {
+		if (!strcmp(media_list[i]->name, name))
+			return media_list[i];
+	}
+	return NULL;
+}
+
+/**
+ * media_find_id - locates specified media object by type identifier
+ */
+
+static struct tipc_media *media_find_id(u8 type)
+{
+	u32 i;
+
+	for (i = 0; i < media_count; i++) {
+		if (media_list[i]->type_id == type)
+			return media_list[i];
 	}
 	return NULL;
 }
@@ -86,87 +100,34 @@
  * Bearers for this media type must be activated separately at a later stage.
  */
 
-int  tipc_register_media(u32 media_type,
-			 char *name,
-			 int (*enable)(struct tipc_bearer *),
-			 void (*disable)(struct tipc_bearer *),
-			 int (*send_msg)(struct sk_buff *,
-					 struct tipc_bearer *,
-					 struct tipc_media_addr *),
-			 char *(*addr2str)(struct tipc_media_addr *a,
-					   char *str_buf, int str_size),
-			 struct tipc_media_addr *bcast_addr,
-			 const u32 bearer_priority,
-			 const u32 link_tolerance,  /* [ms] */
-			 const u32 send_window_limit)
+int tipc_register_media(struct tipc_media *m_ptr)
 {
-	struct media *m_ptr;
-	u32 media_id;
-	u32 i;
 	int res = -EINVAL;
 
 	write_lock_bh(&tipc_net_lock);
 
-	if (tipc_mode != TIPC_NET_MODE) {
-		warn("Media <%s> rejected, not in networked mode yet\n", name);
+	if (!media_name_valid(m_ptr->name))
 		goto exit;
-	}
-	if (!media_name_valid(name)) {
-		warn("Media <%s> rejected, illegal name\n", name);
+	if ((m_ptr->bcast_addr.media_id != m_ptr->type_id) ||
+	    !m_ptr->bcast_addr.broadcast)
 		goto exit;
-	}
-	if (!bcast_addr) {
-		warn("Media <%s> rejected, no broadcast address\n", name);
+	if (m_ptr->priority > TIPC_MAX_LINK_PRI)
 		goto exit;
-	}
-	if ((bearer_priority < TIPC_MIN_LINK_PRI) ||
-	    (bearer_priority > TIPC_MAX_LINK_PRI)) {
-		warn("Media <%s> rejected, illegal priority (%u)\n", name,
-		     bearer_priority);
+	if ((m_ptr->tolerance < TIPC_MIN_LINK_TOL) ||
+	    (m_ptr->tolerance > TIPC_MAX_LINK_TOL))
 		goto exit;
-	}
-	if ((link_tolerance < TIPC_MIN_LINK_TOL) ||
-	    (link_tolerance > TIPC_MAX_LINK_TOL)) {
-		warn("Media <%s> rejected, illegal tolerance (%u)\n", name,
-		     link_tolerance);
+	if (media_count >= MAX_MEDIA)
 		goto exit;
-	}
+	if (tipc_media_find(m_ptr->name) || media_find_id(m_ptr->type_id))
+		goto exit;
 
-	media_id = media_count++;
-	if (media_id >= MAX_MEDIA) {
-		warn("Media <%s> rejected, media limit reached (%u)\n", name,
-		     MAX_MEDIA);
-		media_count--;
-		goto exit;
-	}
-	for (i = 0; i < media_id; i++) {
-		if (media_list[i].type_id == media_type) {
-			warn("Media <%s> rejected, duplicate type (%u)\n", name,
-			     media_type);
-			media_count--;
-			goto exit;
-		}
-		if (!strcmp(name, media_list[i].name)) {
-			warn("Media <%s> rejected, duplicate name\n", name);
-			media_count--;
-			goto exit;
-		}
-	}
-
-	m_ptr = &media_list[media_id];
-	m_ptr->type_id = media_type;
-	m_ptr->send_msg = send_msg;
-	m_ptr->enable_bearer = enable;
-	m_ptr->disable_bearer = disable;
-	m_ptr->addr2str = addr2str;
-	memcpy(&m_ptr->bcast_addr, bcast_addr, sizeof(*bcast_addr));
-	strcpy(m_ptr->name, name);
-	m_ptr->priority = bearer_priority;
-	m_ptr->tolerance = link_tolerance;
-	m_ptr->window = send_window_limit;
+	media_list[media_count] = m_ptr;
+	media_count++;
 	res = 0;
 exit:
 	write_unlock_bh(&tipc_net_lock);
+	if (res)
+		warn("Media <%s> registration error\n", m_ptr->name);
 	return res;
 }
 
@@ -176,27 +137,19 @@
 
 void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a)
 {
-	struct media *m_ptr;
-	u32 media_type;
-	u32 i;
+	char addr_str[MAX_ADDR_STR];
+	struct tipc_media *m_ptr;
 
-	media_type = ntohl(a->type);
-	for (i = 0, m_ptr = media_list; i < media_count; i++, m_ptr++) {
-		if (m_ptr->type_id == media_type)
-			break;
-	}
+	m_ptr = media_find_id(a->media_id);
 
-	if ((i < media_count) && (m_ptr->addr2str != NULL)) {
-		char addr_str[MAX_ADDR_STR];
+	if (m_ptr && !m_ptr->addr2str(a, addr_str, sizeof(addr_str)))
+		tipc_printf(pb, "%s(%s)", m_ptr->name, addr_str);
+	else {
+		u32 i;
 
-		tipc_printf(pb, "%s(%s)", m_ptr->name,
-			    m_ptr->addr2str(a, addr_str, sizeof(addr_str)));
-	} else {
-		unchar *addr = (unchar *)&a->dev_addr;
-
-		tipc_printf(pb, "UNKNOWN(%u)", media_type);
-		for (i = 0; i < (sizeof(*a) - sizeof(a->type)); i++)
-			tipc_printf(pb, "-%02x", addr[i]);
+		tipc_printf(pb, "UNKNOWN(%u)", a->media_id);
+		for (i = 0; i < sizeof(a->value); i++)
+			tipc_printf(pb, "-%02x", a->value[i]);
 	}
 }
 
@@ -207,7 +160,6 @@
 struct sk_buff *tipc_media_get_names(void)
 {
 	struct sk_buff *buf;
-	struct media *m_ptr;
 	int i;
 
 	buf = tipc_cfg_reply_alloc(MAX_MEDIA * TLV_SPACE(TIPC_MAX_MEDIA_NAME));
@@ -215,9 +167,10 @@
 		return NULL;
 
 	read_lock_bh(&tipc_net_lock);
-	for (i = 0, m_ptr = media_list; i < media_count; i++, m_ptr++) {
-		tipc_cfg_append_tlv(buf, TIPC_TLV_MEDIA_NAME, m_ptr->name,
-				    strlen(m_ptr->name) + 1);
+	for (i = 0; i < media_count; i++) {
+		tipc_cfg_append_tlv(buf, TIPC_TLV_MEDIA_NAME,
+				    media_list[i]->name,
+				    strlen(media_list[i]->name) + 1);
 	}
 	read_unlock_bh(&tipc_net_lock);
 	return buf;
@@ -232,7 +185,7 @@
  */
 
 static int bearer_name_validate(const char *name,
-				struct bearer_name *name_parts)
+				struct tipc_bearer_names *name_parts)
 {
 	char name_copy[TIPC_MAX_BEARER_NAME];
 	char *media_name;
@@ -276,10 +229,10 @@
 }
 
 /**
- * bearer_find - locates bearer object with matching bearer name
+ * tipc_bearer_find - locates bearer object with matching bearer name
  */
 
-static struct tipc_bearer *bearer_find(const char *name)
+struct tipc_bearer *tipc_bearer_find(const char *name)
 {
 	struct tipc_bearer *b_ptr;
 	u32 i;
@@ -318,7 +271,6 @@
 struct sk_buff *tipc_bearer_get_names(void)
 {
 	struct sk_buff *buf;
-	struct media *m_ptr;
 	struct tipc_bearer *b_ptr;
 	int i, j;
 
@@ -327,10 +279,10 @@
 		return NULL;
 
 	read_lock_bh(&tipc_net_lock);
-	for (i = 0, m_ptr = media_list; i < media_count; i++, m_ptr++) {
+	for (i = 0; i < media_count; i++) {
 		for (j = 0; j < MAX_BEARERS; j++) {
 			b_ptr = &tipc_bearers[j];
-			if (b_ptr->active && (b_ptr->media == m_ptr)) {
+			if (b_ptr->active && (b_ptr->media == media_list[i])) {
 				tipc_cfg_append_tlv(buf, TIPC_TLV_BEARER_NAME,
 						    b_ptr->name,
 						    strlen(b_ptr->name) + 1);
@@ -366,7 +318,7 @@
 static int bearer_push(struct tipc_bearer *b_ptr)
 {
 	u32 res = 0;
-	struct link *ln, *tln;
+	struct tipc_link *ln, *tln;
 
 	if (b_ptr->blocked)
 		return 0;
@@ -412,7 +364,8 @@
  * bearer.lock is busy
  */
 
-static void tipc_bearer_schedule_unlocked(struct tipc_bearer *b_ptr, struct link *l_ptr)
+static void tipc_bearer_schedule_unlocked(struct tipc_bearer *b_ptr,
+						struct tipc_link *l_ptr)
 {
 	list_move_tail(&l_ptr->link_list, &b_ptr->cong_links);
 }
@@ -425,7 +378,7 @@
  * bearer.lock is free
  */
 
-void tipc_bearer_schedule(struct tipc_bearer *b_ptr, struct link *l_ptr)
+void tipc_bearer_schedule(struct tipc_bearer *b_ptr, struct tipc_link *l_ptr)
 {
 	spin_lock_bh(&b_ptr->lock);
 	tipc_bearer_schedule_unlocked(b_ptr, l_ptr);
@@ -438,7 +391,8 @@
  * and if there is, try to resolve it before returning.
  * 'tipc_net_lock' is read_locked when this function is called
  */
-int tipc_bearer_resolve_congestion(struct tipc_bearer *b_ptr, struct link *l_ptr)
+int tipc_bearer_resolve_congestion(struct tipc_bearer *b_ptr,
+					struct tipc_link *l_ptr)
 {
 	int res = 1;
 
@@ -457,7 +411,7 @@
  * tipc_bearer_congested - determines if bearer is currently congested
  */
 
-int tipc_bearer_congested(struct tipc_bearer *b_ptr, struct link *l_ptr)
+int tipc_bearer_congested(struct tipc_bearer *b_ptr, struct tipc_link *l_ptr)
 {
 	if (unlikely(b_ptr->blocked))
 		return 1;
@@ -473,8 +427,8 @@
 int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority)
 {
 	struct tipc_bearer *b_ptr;
-	struct media *m_ptr;
-	struct bearer_name b_name;
+	struct tipc_media *m_ptr;
+	struct tipc_bearer_names b_names;
 	char addr_string[16];
 	u32 bearer_id;
 	u32 with_this_prio;
@@ -486,7 +440,7 @@
 		     name);
 		return -ENOPROTOOPT;
 	}
-	if (!bearer_name_validate(name, &b_name)) {
+	if (!bearer_name_validate(name, &b_names)) {
 		warn("Bearer <%s> rejected, illegal name\n", name);
 		return -EINVAL;
 	}
@@ -511,10 +465,10 @@
 
 	write_lock_bh(&tipc_net_lock);
 
-	m_ptr = media_find(b_name.media_name);
+	m_ptr = tipc_media_find(b_names.media_name);
 	if (!m_ptr) {
 		warn("Bearer <%s> rejected, media <%s> not registered\n", name,
-		     b_name.media_name);
+		     b_names.media_name);
 		goto exit;
 	}
 
@@ -561,6 +515,8 @@
 
 	b_ptr->identity = bearer_id;
 	b_ptr->media = m_ptr;
+	b_ptr->tolerance = m_ptr->tolerance;
+	b_ptr->window = m_ptr->window;
 	b_ptr->net_plane = bearer_id + 'A';
 	b_ptr->active = 1;
 	b_ptr->priority = priority;
@@ -590,11 +546,11 @@
 int tipc_block_bearer(const char *name)
 {
 	struct tipc_bearer *b_ptr = NULL;
-	struct link *l_ptr;
-	struct link *temp_l_ptr;
+	struct tipc_link *l_ptr;
+	struct tipc_link *temp_l_ptr;
 
 	read_lock_bh(&tipc_net_lock);
-	b_ptr = bearer_find(name);
+	b_ptr = tipc_bearer_find(name);
 	if (!b_ptr) {
 		warn("Attempt to block unknown bearer <%s>\n", name);
 		read_unlock_bh(&tipc_net_lock);
@@ -625,8 +581,8 @@
 
 static void bearer_disable(struct tipc_bearer *b_ptr)
 {
-	struct link *l_ptr;
-	struct link *temp_l_ptr;
+	struct tipc_link *l_ptr;
+	struct tipc_link *temp_l_ptr;
 
 	info("Disabling bearer <%s>\n", b_ptr->name);
 	spin_lock_bh(&b_ptr->lock);
@@ -648,7 +604,7 @@
 	int res;
 
 	write_lock_bh(&tipc_net_lock);
-	b_ptr = bearer_find(name);
+	b_ptr = tipc_bearer_find(name);
 	if (b_ptr == NULL) {
 		warn("Attempt to disable unknown bearer <%s>\n", name);
 		res = -EINVAL;
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index d696f9e..d3eac56 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -43,32 +43,45 @@
 #define MAX_MEDIA	2
 
 /*
+ * Identifiers associated with TIPC message header media address info
+ *
+ * - address info field is 20 bytes long
+ * - media type identifier located at offset 3
+ * - remaining bytes vary according to media type
+ */
+
+#define TIPC_MEDIA_ADDR_SIZE	20
+#define TIPC_MEDIA_TYPE_OFFSET	3
+
+/*
  * Identifiers of supported TIPC media types
  */
 #define TIPC_MEDIA_TYPE_ETH	1
 
 /*
- * Destination address structure used by TIPC bearers when sending messages
- *
- * IMPORTANT: The fields of this structure MUST be stored using the specified
- * byte order indicated below, as the structure is exchanged between nodes
- * as part of a link setup process.
+ * struct tipc_media_addr - destination address used by TIPC bearers
+ * @value: address info (format defined by media)
+ * @media_id: TIPC media type identifier
+ * @broadcast: non-zero if address is a broadcast address
  */
+
 struct tipc_media_addr {
-	__be32  type;			/* bearer type (network byte order) */
-	union {
-		__u8   eth_addr[6];	/* 48 bit Ethernet addr (byte array) */
-	} dev_addr;
+	u8 value[TIPC_MEDIA_ADDR_SIZE];
+	u8 media_id;
+	u8 broadcast;
 };
 
 struct tipc_bearer;
 
 /**
- * struct media - TIPC media information available to internal users
+ * struct tipc_media - TIPC media information available to internal users
  * @send_msg: routine which handles buffer transmission
  * @enable_bearer: routine which enables a bearer
  * @disable_bearer: routine which disables a bearer
- * @addr2str: routine which converts bearer's address to string form
+ * @addr2str: routine which converts media address to string
+ * @str2addr: routine which converts media address from string
+ * @addr2msg: routine which converts media address to protocol message area
+ * @msg2addr: routine which converts media address from protocol message area
  * @bcast_addr: media address used in broadcasting
  * @priority: default link (and bearer) priority
  * @tolerance: default time (in ms) before declaring link failure
@@ -77,14 +90,16 @@
  * @name: media name
  */
 
-struct media {
+struct tipc_media {
 	int (*send_msg)(struct sk_buff *buf,
 			struct tipc_bearer *b_ptr,
 			struct tipc_media_addr *dest);
 	int (*enable_bearer)(struct tipc_bearer *b_ptr);
 	void (*disable_bearer)(struct tipc_bearer *b_ptr);
-	char *(*addr2str)(struct tipc_media_addr *a,
-			  char *str_buf, int str_size);
+	int (*addr2str)(struct tipc_media_addr *a, char *str_buf, int str_size);
+	int (*str2addr)(struct tipc_media_addr *a, char *str_buf);
+	int (*addr2msg)(struct tipc_media_addr *a, char *msg_area);
+	int (*msg2addr)(struct tipc_media_addr *a, char *msg_area);
 	struct tipc_media_addr bcast_addr;
 	u32 priority;
 	u32 tolerance;
@@ -103,6 +118,8 @@
  * @name: bearer name (format = media:interface)
  * @media: ptr to media structure associated with bearer
  * @priority: default link priority for bearer
+ * @window: default window size for bearer
+ * @tolerance: default link tolerance for bearer
  * @identity: array index of this bearer within TIPC bearer array
  * @link_req: ptr to (optional) structure making periodic link setup requests
  * @links: list of non-congested links associated with bearer
@@ -122,10 +139,12 @@
 	struct tipc_media_addr addr;		/* initalized by media */
 	char name[TIPC_MAX_BEARER_NAME];
 	spinlock_t lock;
-	struct media *media;
+	struct tipc_media *media;
 	u32 priority;
+	u32 window;
+	u32 tolerance;
 	u32 identity;
-	struct link_req *link_req;
+	struct tipc_link_req *link_req;
 	struct list_head links;
 	struct list_head cong_links;
 	int active;
@@ -133,28 +152,19 @@
 	struct tipc_node_map nodes;
 };
 
-struct bearer_name {
+struct tipc_bearer_names {
 	char media_name[TIPC_MAX_MEDIA_NAME];
 	char if_name[TIPC_MAX_IF_NAME];
 };
 
-struct link;
+struct tipc_link;
 
 extern struct tipc_bearer tipc_bearers[];
 
 /*
  * TIPC routines available to supported media types
  */
-int tipc_register_media(u32 media_type,
-		 char *media_name, int (*enable)(struct tipc_bearer *),
-		 void (*disable)(struct tipc_bearer *),
-		 int (*send_msg)(struct sk_buff *,
-			struct tipc_bearer *, struct tipc_media_addr *),
-		 char *(*addr2str)(struct tipc_media_addr *a,
-			char *str_buf, int str_size),
-		 struct tipc_media_addr *bcast_addr, const u32 bearer_priority,
-		 const u32 link_tolerance,  /* [ms] */
-		 const u32 send_window_limit);
+int tipc_register_media(struct tipc_media *m_ptr);
 
 void tipc_recv_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr);
 
@@ -170,16 +180,21 @@
 int  tipc_eth_media_start(void);
 void tipc_eth_media_stop(void);
 
+int tipc_media_set_priority(const char *name, u32 new_value);
+int tipc_media_set_window(const char *name, u32 new_value);
 void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a);
 struct sk_buff *tipc_media_get_names(void);
 
 struct sk_buff *tipc_bearer_get_names(void);
 void tipc_bearer_add_dest(struct tipc_bearer *b_ptr, u32 dest);
 void tipc_bearer_remove_dest(struct tipc_bearer *b_ptr, u32 dest);
-void tipc_bearer_schedule(struct tipc_bearer *b_ptr, struct link *l_ptr);
+void tipc_bearer_schedule(struct tipc_bearer *b_ptr, struct tipc_link *l_ptr);
+struct tipc_bearer *tipc_bearer_find(const char *name);
 struct tipc_bearer *tipc_bearer_find_interface(const char *if_name);
-int tipc_bearer_resolve_congestion(struct tipc_bearer *b_ptr, struct link *l_ptr);
-int tipc_bearer_congested(struct tipc_bearer *b_ptr, struct link *l_ptr);
+struct tipc_media *tipc_media_find(const char *name);
+int tipc_bearer_resolve_congestion(struct tipc_bearer *b_ptr,
+				   struct tipc_link *l_ptr);
+int tipc_bearer_congested(struct tipc_bearer *b_ptr, struct tipc_link *l_ptr);
 void tipc_bearer_stop(void);
 void tipc_bearer_lock_push(struct tipc_bearer *b_ptr);
 
diff --git a/net/tipc/config.c b/net/tipc/config.c
index b25a396..4785bf2 100644
--- a/net/tipc/config.c
+++ b/net/tipc/config.c
@@ -184,13 +184,12 @@
 						   " (cannot change node address once assigned)");
 
 	/*
-	 * Must release all spinlocks before calling start_net() because
-	 * Linux version of TIPC calls eth_media_start() which calls
-	 * register_netdevice_notifier() which may block!
-	 *
-	 * Temporarily releasing the lock should be harmless for non-Linux TIPC,
-	 * but Linux version of eth_media_start() should really be reworked
-	 * so that it can be called with spinlocks held.
+	 * Must temporarily release configuration spinlock while switching into
+	 * networking mode as it calls tipc_eth_media_start(), which may sleep.
+	 * Releasing the lock is harmless as other locally-issued configuration
+	 * commands won't occur until this one completes, and remotely-issued
+	 * configuration commands can't be received until a local configuration
+	 * command to enable the first bearer is received and processed.
 	 */
 
 	spin_unlock_bh(&config_lock);
diff --git a/net/tipc/core.c b/net/tipc/core.c
index c21331d..2691cd5 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -99,8 +99,8 @@
 
 static void tipc_core_stop_net(void)
 {
-	tipc_eth_media_stop();
 	tipc_net_stop();
+	tipc_eth_media_stop();
 }
 
 /**
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index f2fb96e..a00e5f8 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -45,7 +45,7 @@
 
 
 /**
- * struct link_req - information about an ongoing link setup request
+ * struct tipc_link_req - information about an ongoing link setup request
  * @bearer: bearer issuing requests
  * @dest: destination address for request messages
  * @domain: network domain to which links can be established
@@ -54,7 +54,7 @@
  * @timer: timer governing period between requests
  * @timer_intv: current interval between requests (in ms)
  */
-struct link_req {
+struct tipc_link_req {
 	struct tipc_bearer *bearer;
 	struct tipc_media_addr dest;
 	u32 domain;
@@ -84,7 +84,7 @@
 		msg_set_non_seq(msg, 1);
 		msg_set_dest_domain(msg, dest_domain);
 		msg_set_bc_netid(msg, tipc_net_id);
-		msg_set_media_addr(msg, &b_ptr->addr);
+		b_ptr->media->addr2msg(&b_ptr->addr, msg_media_addr(msg));
 	}
 	return buf;
 }
@@ -120,7 +120,7 @@
 void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr)
 {
 	struct tipc_node *n_ptr;
-	struct link *link;
+	struct tipc_link *link;
 	struct tipc_media_addr media_addr, *addr;
 	struct sk_buff *rbuf;
 	struct tipc_msg *msg = buf_msg(buf);
@@ -130,12 +130,15 @@
 	u32 type = msg_type(msg);
 	int link_fully_up;
 
-	msg_get_media_addr(msg, &media_addr);
+	media_addr.broadcast = 1;
+	b_ptr->media->msg2addr(&media_addr, msg_media_addr(msg));
 	buf_discard(buf);
 
 	/* Validate discovery message from requesting node */
 	if (net_id != tipc_net_id)
 		return;
+	if (media_addr.broadcast)
+		return;
 	if (!tipc_addr_domain_valid(dest))
 		return;
 	if (!tipc_addr_node_valid(orig))
@@ -215,7 +218,7 @@
  * and is either not currently searching or is searching at a slow rate
  */
 
-static void disc_update(struct link_req *req)
+static void disc_update(struct tipc_link_req *req)
 {
 	if (!req->num_nodes) {
 		if ((req->timer_intv == TIPC_LINK_REQ_INACTIVE) ||
@@ -231,7 +234,7 @@
  * @req: ptr to link request structure
  */
 
-void tipc_disc_add_dest(struct link_req *req)
+void tipc_disc_add_dest(struct tipc_link_req *req)
 {
 	req->num_nodes++;
 }
@@ -241,7 +244,7 @@
  * @req: ptr to link request structure
  */
 
-void tipc_disc_remove_dest(struct link_req *req)
+void tipc_disc_remove_dest(struct tipc_link_req *req)
 {
 	req->num_nodes--;
 	disc_update(req);
@@ -252,7 +255,7 @@
  * @req: ptr to link request structure
  */
 
-static void disc_send_msg(struct link_req *req)
+static void disc_send_msg(struct tipc_link_req *req)
 {
 	if (!req->bearer->blocked)
 		tipc_bearer_send(req->bearer, req->buf, &req->dest);
@@ -265,7 +268,7 @@
  * Called whenever a link setup request timer associated with a bearer expires.
  */
 
-static void disc_timeout(struct link_req *req)
+static void disc_timeout(struct tipc_link_req *req)
 {
 	int max_delay;
 
@@ -313,7 +316,7 @@
 int tipc_disc_create(struct tipc_bearer *b_ptr,
 		     struct tipc_media_addr *dest, u32 dest_domain)
 {
-	struct link_req *req;
+	struct tipc_link_req *req;
 
 	req = kmalloc(sizeof(*req), GFP_ATOMIC);
 	if (!req)
@@ -342,7 +345,7 @@
  * @req: ptr to link request structure
  */
 
-void tipc_disc_delete(struct link_req *req)
+void tipc_disc_delete(struct tipc_link_req *req)
 {
 	k_cancel_timer(&req->timer);
 	k_term_timer(&req->timer);
diff --git a/net/tipc/discover.h b/net/tipc/discover.h
index a3af595..75b67c4 100644
--- a/net/tipc/discover.h
+++ b/net/tipc/discover.h
@@ -37,13 +37,13 @@
 #ifndef _TIPC_DISCOVER_H
 #define _TIPC_DISCOVER_H
 
-struct link_req;
+struct tipc_link_req;
 
 int tipc_disc_create(struct tipc_bearer *b_ptr, struct tipc_media_addr *dest,
 		     u32 dest_domain);
-void tipc_disc_delete(struct link_req *req);
-void tipc_disc_add_dest(struct link_req *req);
-void tipc_disc_remove_dest(struct link_req *req);
+void tipc_disc_delete(struct tipc_link_req *req);
+void tipc_disc_add_dest(struct tipc_link_req *req);
+void tipc_disc_remove_dest(struct tipc_link_req *req);
 void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr);
 
 #endif
diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c
index e728d4c..527e3f0 100644
--- a/net/tipc/eth_media.c
+++ b/net/tipc/eth_media.c
@@ -38,28 +38,45 @@
 #include "bearer.h"
 
 #define MAX_ETH_BEARERS		MAX_BEARERS
-#define ETH_LINK_PRIORITY	TIPC_DEF_LINK_PRI
-#define ETH_LINK_TOLERANCE	TIPC_DEF_LINK_TOL
-#define ETH_LINK_WINDOW		TIPC_DEF_LINK_WIN
+
+#define ETH_ADDR_OFFSET	4	/* message header offset of MAC address */
 
 /**
  * struct eth_bearer - Ethernet bearer data structure
  * @bearer: ptr to associated "generic" bearer structure
  * @dev: ptr to associated Ethernet network device
  * @tipc_packet_type: used in binding TIPC to Ethernet driver
+ * @cleanup: work item used when disabling bearer
  */
 
 struct eth_bearer {
 	struct tipc_bearer *bearer;
 	struct net_device *dev;
 	struct packet_type tipc_packet_type;
+	struct work_struct cleanup;
 };
 
+static struct tipc_media eth_media_info;
 static struct eth_bearer eth_bearers[MAX_ETH_BEARERS];
 static int eth_started;
 static struct notifier_block notifier;
 
 /**
+ * eth_media_addr_set - initialize Ethernet media address structure
+ *
+ * Media-dependent "value" field stores MAC address in first 6 bytes
+ * and zeroes out the remaining bytes.
+ */
+
+static void eth_media_addr_set(struct tipc_media_addr *a, char *mac)
+{
+	memcpy(a->value, mac, ETH_ALEN);
+	memset(a->value + ETH_ALEN, 0, sizeof(a->value) - ETH_ALEN);
+	a->media_id = TIPC_MEDIA_TYPE_ETH;
+	a->broadcast = !memcmp(mac, eth_media_info.bcast_addr.value, ETH_ALEN);
+}
+
+/**
  * send_msg - send a TIPC message out over an Ethernet interface
  */
 
@@ -85,7 +102,7 @@
 
 	skb_reset_network_header(clone);
 	clone->dev = dev;
-	dev_hard_header(clone, dev, ETH_P_TIPC, &dest->dev_addr.eth_addr,
+	dev_hard_header(clone, dev, ETH_P_TIPC, dest->value,
 			dev->dev_addr, clone->len);
 	dev_queue_xmit(clone);
 	return 0;
@@ -172,22 +189,41 @@
 	tb_ptr->usr_handle = (void *)eb_ptr;
 	tb_ptr->mtu = dev->mtu;
 	tb_ptr->blocked = 0;
-	tb_ptr->addr.type = htonl(TIPC_MEDIA_TYPE_ETH);
-	memcpy(&tb_ptr->addr.dev_addr, dev->dev_addr, ETH_ALEN);
+	eth_media_addr_set(&tb_ptr->addr, (char *)dev->dev_addr);
 	return 0;
 }
 
 /**
+ * cleanup_bearer - break association between Ethernet bearer and interface
+ *
+ * This routine must be invoked from a work queue because it can sleep.
+ */
+
+static void cleanup_bearer(struct work_struct *work)
+{
+	struct eth_bearer *eb_ptr =
+		container_of(work, struct eth_bearer, cleanup);
+
+	dev_remove_pack(&eb_ptr->tipc_packet_type);
+	dev_put(eb_ptr->dev);
+	eb_ptr->dev = NULL;
+}
+
+/**
  * disable_bearer - detach TIPC bearer from an Ethernet interface
  *
- * We really should do dev_remove_pack() here, but this function can not be
- * called at tasklet level. => Use eth_bearer->bearer as a flag to throw away
- * incoming buffers, & postpone dev_remove_pack() to eth_media_stop() on exit.
+ * Mark Ethernet bearer as inactive so that incoming buffers are thrown away,
+ * then get worker thread to complete bearer cleanup.  (Can't do cleanup
+ * here because cleanup code needs to sleep and caller holds spinlocks.)
  */
 
 static void disable_bearer(struct tipc_bearer *tb_ptr)
 {
-	((struct eth_bearer *)tb_ptr->usr_handle)->bearer = NULL;
+	struct eth_bearer *eb_ptr = (struct eth_bearer *)tb_ptr->usr_handle;
+
+	eb_ptr->bearer = NULL;
+	INIT_WORK(&eb_ptr->cleanup, cleanup_bearer);
+	schedule_work(&eb_ptr->cleanup);
 }
 
 /**
@@ -246,18 +282,82 @@
  * eth_addr2str - convert Ethernet address to string
  */
 
-static char *eth_addr2str(struct tipc_media_addr *a, char *str_buf, int str_size)
+static int eth_addr2str(struct tipc_media_addr *a, char *str_buf, int str_size)
 {
-	unchar *addr = (unchar *)&a->dev_addr;
+	if (str_size < 18)	/* 18 = strlen("aa:bb:cc:dd:ee:ff\0") */
+		return 1;
 
-	if (str_size < 18)
-		*str_buf = '\0';
-	else
-		sprintf(str_buf, "%pM", addr);
-	return str_buf;
+	sprintf(str_buf, "%pM", a->value);
+	return 0;
 }
 
 /**
+ * eth_str2addr - convert string to Ethernet address
+ */
+
+static int eth_str2addr(struct tipc_media_addr *a, char *str_buf)
+{
+	char mac[ETH_ALEN];
+	int r;
+
+	r = sscanf(str_buf, "%02x:%02x:%02x:%02x:%02x:%02x",
+		       (u32 *)&mac[0], (u32 *)&mac[1], (u32 *)&mac[2],
+		       (u32 *)&mac[3], (u32 *)&mac[4], (u32 *)&mac[5]);
+
+	if (r != ETH_ALEN)
+		return 1;
+
+	eth_media_addr_set(a, mac);
+	return 0;
+}
+
+/**
+ * eth_str2addr - convert Ethernet address format to message header format
+ */
+
+static int eth_addr2msg(struct tipc_media_addr *a, char *msg_area)
+{
+	memset(msg_area, 0, TIPC_MEDIA_ADDR_SIZE);
+	msg_area[TIPC_MEDIA_TYPE_OFFSET] = TIPC_MEDIA_TYPE_ETH;
+	memcpy(msg_area + ETH_ADDR_OFFSET, a->value, ETH_ALEN);
+	return 0;
+}
+
+/**
+ * eth_str2addr - convert message header address format to Ethernet format
+ */
+
+static int eth_msg2addr(struct tipc_media_addr *a, char *msg_area)
+{
+	if (msg_area[TIPC_MEDIA_TYPE_OFFSET] != TIPC_MEDIA_TYPE_ETH)
+		return 1;
+
+	eth_media_addr_set(a, msg_area + ETH_ADDR_OFFSET);
+	return 0;
+}
+
+/*
+ * Ethernet media registration info
+ */
+
+static struct tipc_media eth_media_info = {
+	.send_msg	= send_msg,
+	.enable_bearer	= enable_bearer,
+	.disable_bearer	= disable_bearer,
+	.addr2str	= eth_addr2str,
+	.str2addr	= eth_str2addr,
+	.addr2msg	= eth_addr2msg,
+	.msg2addr	= eth_msg2addr,
+	.bcast_addr	= { { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+			    TIPC_MEDIA_TYPE_ETH, 1 },
+	.priority	= TIPC_DEF_LINK_PRI,
+	.tolerance	= TIPC_DEF_LINK_TOL,
+	.window		= TIPC_DEF_LINK_WIN,
+	.type_id	= TIPC_MEDIA_TYPE_ETH,
+	.name		= "eth"
+};
+
+/**
  * tipc_eth_media_start - activate Ethernet bearer support
  *
  * Register Ethernet media type with TIPC bearer code.  Also register
@@ -266,21 +366,12 @@
 
 int tipc_eth_media_start(void)
 {
-	struct tipc_media_addr bcast_addr;
 	int res;
 
 	if (eth_started)
 		return -EINVAL;
 
-	bcast_addr.type = htonl(TIPC_MEDIA_TYPE_ETH);
-	memset(&bcast_addr.dev_addr, 0xff, ETH_ALEN);
-
-	memset(eth_bearers, 0, sizeof(eth_bearers));
-
-	res = tipc_register_media(TIPC_MEDIA_TYPE_ETH, "eth",
-				  enable_bearer, disable_bearer, send_msg,
-				  eth_addr2str, &bcast_addr, ETH_LINK_PRIORITY,
-				  ETH_LINK_TOLERANCE, ETH_LINK_WINDOW);
+	res = tipc_register_media(&eth_media_info);
 	if (res)
 		return res;
 
@@ -298,22 +389,10 @@
 
 void tipc_eth_media_stop(void)
 {
-	int i;
-
 	if (!eth_started)
 		return;
 
+	flush_scheduled_work();
 	unregister_netdevice_notifier(&notifier);
-	for (i = 0; i < MAX_ETH_BEARERS ; i++) {
-		if (eth_bearers[i].bearer) {
-			eth_bearers[i].bearer->blocked = 1;
-			eth_bearers[i].bearer = NULL;
-		}
-		if (eth_bearers[i].dev) {
-			dev_remove_pack(&eth_bearers[i].tipc_packet_type);
-			dev_put(eth_bearers[i].dev);
-		}
-	}
-	memset(&eth_bearers, 0, sizeof(eth_bearers));
 	eth_started = 0;
 }
diff --git a/net/tipc/link.c b/net/tipc/link.c
index ae98a72..ac1832a 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -71,35 +71,36 @@
 #define START_CHANGEOVER 100000u
 
 /**
- * struct link_name - deconstructed link name
+ * struct tipc_link_name - deconstructed link name
  * @addr_local: network address of node at this end
  * @if_local: name of interface at this end
  * @addr_peer: network address of node at far end
  * @if_peer: name of interface at far end
  */
 
-struct link_name {
+struct tipc_link_name {
 	u32 addr_local;
 	char if_local[TIPC_MAX_IF_NAME];
 	u32 addr_peer;
 	char if_peer[TIPC_MAX_IF_NAME];
 };
 
-static void link_handle_out_of_seq_msg(struct link *l_ptr,
+static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
 				       struct sk_buff *buf);
-static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf);
-static int  link_recv_changeover_msg(struct link **l_ptr, struct sk_buff **buf);
-static void link_set_supervision_props(struct link *l_ptr, u32 tolerance);
+static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf);
+static int  link_recv_changeover_msg(struct tipc_link **l_ptr,
+				     struct sk_buff **buf);
+static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance);
 static int  link_send_sections_long(struct tipc_port *sender,
 				    struct iovec const *msg_sect,
 				    u32 num_sect, unsigned int total_len,
 				    u32 destnode);
-static void link_check_defragm_bufs(struct link *l_ptr);
-static void link_state_event(struct link *l_ptr, u32 event);
-static void link_reset_statistics(struct link *l_ptr);
-static void link_print(struct link *l_ptr, const char *str);
-static void link_start(struct link *l_ptr);
-static int link_send_long_buf(struct link *l_ptr, struct sk_buff *buf);
+static void link_check_defragm_bufs(struct tipc_link *l_ptr);
+static void link_state_event(struct tipc_link *l_ptr, u32 event);
+static void link_reset_statistics(struct tipc_link *l_ptr);
+static void link_print(struct tipc_link *l_ptr, const char *str);
+static void link_start(struct tipc_link *l_ptr);
+static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf);
 
 /*
  *  Simple link routines
@@ -110,7 +111,7 @@
 	return (i + 3) & ~3u;
 }
 
-static void link_init_max_pkt(struct link *l_ptr)
+static void link_init_max_pkt(struct tipc_link *l_ptr)
 {
 	u32 max_pkt;
 
@@ -127,14 +128,14 @@
 	l_ptr->max_pkt_probes = 0;
 }
 
-static u32 link_next_sent(struct link *l_ptr)
+static u32 link_next_sent(struct tipc_link *l_ptr)
 {
 	if (l_ptr->next_out)
-		return msg_seqno(buf_msg(l_ptr->next_out));
+		return buf_seqno(l_ptr->next_out);
 	return mod(l_ptr->next_out_no);
 }
 
-static u32 link_last_sent(struct link *l_ptr)
+static u32 link_last_sent(struct tipc_link *l_ptr)
 {
 	return mod(link_next_sent(l_ptr) - 1);
 }
@@ -143,28 +144,29 @@
  *  Simple non-static link routines (i.e. referenced outside this file)
  */
 
-int tipc_link_is_up(struct link *l_ptr)
+int tipc_link_is_up(struct tipc_link *l_ptr)
 {
 	if (!l_ptr)
 		return 0;
 	return link_working_working(l_ptr) || link_working_unknown(l_ptr);
 }
 
-int tipc_link_is_active(struct link *l_ptr)
+int tipc_link_is_active(struct tipc_link *l_ptr)
 {
 	return	(l_ptr->owner->active_links[0] == l_ptr) ||
 		(l_ptr->owner->active_links[1] == l_ptr);
 }
 
 /**
- * link_name_validate - validate & (optionally) deconstruct link name
+ * link_name_validate - validate & (optionally) deconstruct tipc_link name
  * @name - ptr to link name string
  * @name_parts - ptr to area for link name components (or NULL if not needed)
  *
  * Returns 1 if link name is valid, otherwise 0.
  */
 
-static int link_name_validate(const char *name, struct link_name *name_parts)
+static int link_name_validate(const char *name,
+				struct tipc_link_name *name_parts)
 {
 	char name_copy[TIPC_MAX_LINK_NAME];
 	char *addr_local;
@@ -238,7 +240,7 @@
  * tipc_node_delete() is called.)
  */
 
-static void link_timeout(struct link *l_ptr)
+static void link_timeout(struct tipc_link *l_ptr)
 {
 	tipc_node_lock(l_ptr->owner);
 
@@ -287,7 +289,7 @@
 	tipc_node_unlock(l_ptr->owner);
 }
 
-static void link_set_timer(struct link *l_ptr, u32 time)
+static void link_set_timer(struct tipc_link *l_ptr, u32 time)
 {
 	k_start_timer(&l_ptr->timer, time);
 }
@@ -301,11 +303,11 @@
  * Returns pointer to link.
  */
 
-struct link *tipc_link_create(struct tipc_node *n_ptr,
+struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
 			      struct tipc_bearer *b_ptr,
 			      const struct tipc_media_addr *media_addr)
 {
-	struct link *l_ptr;
+	struct tipc_link *l_ptr;
 	struct tipc_msg *msg;
 	char *if_name;
 	char addr_string[16];
@@ -343,7 +345,7 @@
 	l_ptr->checkpoint = 1;
 	l_ptr->peer_session = INVALID_SESSION;
 	l_ptr->b_ptr = b_ptr;
-	link_set_supervision_props(l_ptr, b_ptr->media->tolerance);
+	link_set_supervision_props(l_ptr, b_ptr->tolerance);
 	l_ptr->state = RESET_UNKNOWN;
 
 	l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
@@ -355,7 +357,7 @@
 	strcpy((char *)msg_data(msg), if_name);
 
 	l_ptr->priority = b_ptr->priority;
-	tipc_link_set_queue_limits(l_ptr, b_ptr->media->window);
+	tipc_link_set_queue_limits(l_ptr, b_ptr->window);
 
 	link_init_max_pkt(l_ptr);
 
@@ -382,7 +384,7 @@
  * to avoid a potential deadlock situation.
  */
 
-void tipc_link_delete(struct link *l_ptr)
+void tipc_link_delete(struct tipc_link *l_ptr)
 {
 	if (!l_ptr) {
 		err("Attempt to delete non-existent link\n");
@@ -401,7 +403,7 @@
 	kfree(l_ptr);
 }
 
-static void link_start(struct link *l_ptr)
+static void link_start(struct tipc_link *l_ptr)
 {
 	tipc_node_lock(l_ptr->owner);
 	link_state_event(l_ptr, STARTING_EVT);
@@ -418,7 +420,7 @@
  * has abated.
  */
 
-static int link_schedule_port(struct link *l_ptr, u32 origport, u32 sz)
+static int link_schedule_port(struct tipc_link *l_ptr, u32 origport, u32 sz)
 {
 	struct tipc_port *p_ptr;
 
@@ -440,7 +442,7 @@
 	return -ELINKCONG;
 }
 
-void tipc_link_wakeup_ports(struct link *l_ptr, int all)
+void tipc_link_wakeup_ports(struct tipc_link *l_ptr, int all)
 {
 	struct tipc_port *p_ptr;
 	struct tipc_port *temp_p_ptr;
@@ -475,7 +477,7 @@
  * @l_ptr: pointer to link
  */
 
-static void link_release_outqueue(struct link *l_ptr)
+static void link_release_outqueue(struct tipc_link *l_ptr)
 {
 	struct sk_buff *buf = l_ptr->first_out;
 	struct sk_buff *next;
@@ -494,7 +496,7 @@
  * @l_ptr: pointer to link
  */
 
-void tipc_link_reset_fragments(struct link *l_ptr)
+void tipc_link_reset_fragments(struct tipc_link *l_ptr)
 {
 	struct sk_buff *buf = l_ptr->defragm_buf;
 	struct sk_buff *next;
@@ -512,7 +514,7 @@
  * @l_ptr: pointer to link
  */
 
-void tipc_link_stop(struct link *l_ptr)
+void tipc_link_stop(struct tipc_link *l_ptr)
 {
 	struct sk_buff *buf;
 	struct sk_buff *next;
@@ -537,7 +539,7 @@
 	l_ptr->proto_msg_queue = NULL;
 }
 
-void tipc_link_reset(struct link *l_ptr)
+void tipc_link_reset(struct tipc_link *l_ptr)
 {
 	struct sk_buff *buf;
 	u32 prev_state = l_ptr->state;
@@ -597,7 +599,7 @@
 }
 
 
-static void link_activate(struct link *l_ptr)
+static void link_activate(struct tipc_link *l_ptr)
 {
 	l_ptr->next_in_no = l_ptr->stats.recv_info = 1;
 	tipc_node_link_up(l_ptr->owner, l_ptr);
@@ -610,9 +612,9 @@
  * @event: state machine event to process
  */
 
-static void link_state_event(struct link *l_ptr, unsigned event)
+static void link_state_event(struct tipc_link *l_ptr, unsigned event)
 {
-	struct link *other;
+	struct tipc_link *other;
 	u32 cont_intv = l_ptr->continuity_interval;
 
 	if (!l_ptr->started && (event != STARTING_EVT))
@@ -784,7 +786,7 @@
  * the tail of an existing one.
  */
 
-static int link_bundle_buf(struct link *l_ptr,
+static int link_bundle_buf(struct tipc_link *l_ptr,
 			   struct sk_buff *bundler,
 			   struct sk_buff *buf)
 {
@@ -813,7 +815,7 @@
 	return 1;
 }
 
-static void link_add_to_outqueue(struct link *l_ptr,
+static void link_add_to_outqueue(struct tipc_link *l_ptr,
 				 struct sk_buff *buf,
 				 struct tipc_msg *msg)
 {
@@ -834,7 +836,7 @@
 		l_ptr->stats.max_queue_sz = l_ptr->out_queue_size;
 }
 
-static void link_add_chain_to_outqueue(struct link *l_ptr,
+static void link_add_chain_to_outqueue(struct tipc_link *l_ptr,
 				       struct sk_buff *buf_chain,
 				       u32 long_msgno)
 {
@@ -859,7 +861,7 @@
  * has failed, and from link_send()
  */
 
-int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf)
+int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
 {
 	struct tipc_msg *msg = buf_msg(buf);
 	u32 size = msg_size(msg);
@@ -954,7 +956,7 @@
 
 int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector)
 {
-	struct link *l_ptr;
+	struct tipc_link *l_ptr;
 	struct tipc_node *n_ptr;
 	int res = -ELINKCONG;
 
@@ -988,7 +990,7 @@
 void tipc_link_send_names(struct list_head *message_list, u32 dest)
 {
 	struct tipc_node *n_ptr;
-	struct link *l_ptr;
+	struct tipc_link *l_ptr;
 	struct sk_buff *buf;
 	struct sk_buff *temp_buf;
 
@@ -1027,7 +1029,7 @@
  * Link is locked. Returns user data length.
  */
 
-static int link_send_buf_fast(struct link *l_ptr, struct sk_buff *buf,
+static int link_send_buf_fast(struct tipc_link *l_ptr, struct sk_buff *buf,
 			      u32 *used_max_pkt)
 {
 	struct tipc_msg *msg = buf_msg(buf);
@@ -1061,7 +1063,7 @@
  */
 int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode)
 {
-	struct link *l_ptr;
+	struct tipc_link *l_ptr;
 	struct tipc_node *n_ptr;
 	int res;
 	u32 selector = msg_origport(buf_msg(buf)) & 1;
@@ -1100,7 +1102,7 @@
 				 u32 destaddr)
 {
 	struct tipc_msg *hdr = &sender->phdr;
-	struct link *l_ptr;
+	struct tipc_link *l_ptr;
 	struct sk_buff *buf;
 	struct tipc_node *node;
 	int res;
@@ -1195,7 +1197,7 @@
 				   unsigned int total_len,
 				   u32 destaddr)
 {
-	struct link *l_ptr;
+	struct tipc_link *l_ptr;
 	struct tipc_node *node;
 	struct tipc_msg *hdr = &sender->phdr;
 	u32 dsz = total_len;
@@ -1342,7 +1344,7 @@
 /*
  * tipc_link_push_packet: Push one unsent packet to the media
  */
-u32 tipc_link_push_packet(struct link *l_ptr)
+u32 tipc_link_push_packet(struct tipc_link *l_ptr)
 {
 	struct sk_buff *buf = l_ptr->first_out;
 	u32 r_q_size = l_ptr->retransm_queue_size;
@@ -1354,7 +1356,7 @@
 	if (r_q_size && buf) {
 		u32 last = lesser(mod(r_q_head + r_q_size),
 				  link_last_sent(l_ptr));
-		u32 first = msg_seqno(buf_msg(buf));
+		u32 first = buf_seqno(buf);
 
 		while (buf && less(first, r_q_head)) {
 			first = mod(first + 1);
@@ -1403,7 +1405,7 @@
 	if (buf) {
 		struct tipc_msg *msg = buf_msg(buf);
 		u32 next = msg_seqno(msg);
-		u32 first = msg_seqno(buf_msg(l_ptr->first_out));
+		u32 first = buf_seqno(l_ptr->first_out);
 
 		if (mod(next - first) < l_ptr->queue_limit[0]) {
 			msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
@@ -1426,7 +1428,7 @@
  * push_queue(): push out the unsent messages of a link where
  *               congestion has abated. Node is locked
  */
-void tipc_link_push_queue(struct link *l_ptr)
+void tipc_link_push_queue(struct tipc_link *l_ptr)
 {
 	u32 res;
 
@@ -1470,7 +1472,8 @@
 	read_unlock_bh(&tipc_net_lock);
 }
 
-static void link_retransmit_failure(struct link *l_ptr, struct sk_buff *buf)
+static void link_retransmit_failure(struct tipc_link *l_ptr,
+					struct sk_buff *buf)
 {
 	struct tipc_msg *msg = buf_msg(buf);
 
@@ -1514,7 +1517,7 @@
 	}
 }
 
-void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *buf,
+void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *buf,
 			  u32 retransmits)
 {
 	struct tipc_msg *msg;
@@ -1558,7 +1561,7 @@
 		} else {
 			tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
 			l_ptr->stats.bearer_congs++;
-			l_ptr->retransm_queue_head = msg_seqno(buf_msg(buf));
+			l_ptr->retransm_queue_head = buf_seqno(buf);
 			l_ptr->retransm_queue_size = retransmits;
 			return;
 		}
@@ -1571,7 +1574,7 @@
  * link_insert_deferred_queue - insert deferred messages back into receive chain
  */
 
-static struct sk_buff *link_insert_deferred_queue(struct link *l_ptr,
+static struct sk_buff *link_insert_deferred_queue(struct tipc_link *l_ptr,
 						  struct sk_buff *buf)
 {
 	u32 seq_no;
@@ -1579,7 +1582,7 @@
 	if (l_ptr->oldest_deferred_in == NULL)
 		return buf;
 
-	seq_no = msg_seqno(buf_msg(l_ptr->oldest_deferred_in));
+	seq_no = buf_seqno(l_ptr->oldest_deferred_in);
 	if (seq_no == mod(l_ptr->next_in_no)) {
 		l_ptr->newest_deferred_in->next = buf;
 		buf = l_ptr->oldest_deferred_in;
@@ -1653,7 +1656,7 @@
 	read_lock_bh(&tipc_net_lock);
 	while (head) {
 		struct tipc_node *n_ptr;
-		struct link *l_ptr;
+		struct tipc_link *l_ptr;
 		struct sk_buff *crs;
 		struct sk_buff *buf = head;
 		struct tipc_msg *msg;
@@ -1733,14 +1736,12 @@
 
 		/* Release acked messages */
 
-		if (less(n_ptr->bclink.acked, msg_bcast_ack(msg))) {
-			if (tipc_node_is_up(n_ptr) && n_ptr->bclink.supported)
-				tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
-		}
+		if (tipc_node_is_up(n_ptr) && n_ptr->bclink.supported)
+			tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
 
 		crs = l_ptr->first_out;
 		while ((crs != l_ptr->next_out) &&
-		       less_eq(msg_seqno(buf_msg(crs)), ackd)) {
+		       less_eq(buf_seqno(crs), ackd)) {
 			struct sk_buff *next = crs->next;
 
 			buf_discard(crs);
@@ -1863,7 +1864,7 @@
 {
 	struct sk_buff *prev = NULL;
 	struct sk_buff *crs = *head;
-	u32 seq_no = msg_seqno(buf_msg(buf));
+	u32 seq_no = buf_seqno(buf);
 
 	buf->next = NULL;
 
@@ -1874,7 +1875,7 @@
 	}
 
 	/* Last ? */
-	if (less(msg_seqno(buf_msg(*tail)), seq_no)) {
+	if (less(buf_seqno(*tail), seq_no)) {
 		(*tail)->next = buf;
 		*tail = buf;
 		return 1;
@@ -1908,10 +1909,10 @@
  * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet
  */
 
-static void link_handle_out_of_seq_msg(struct link *l_ptr,
+static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
 				       struct sk_buff *buf)
 {
-	u32 seq_no = msg_seqno(buf_msg(buf));
+	u32 seq_no = buf_seqno(buf);
 
 	if (likely(msg_user(buf_msg(buf)) == LINK_PROTOCOL)) {
 		link_recv_proto_msg(l_ptr, buf);
@@ -1946,8 +1947,9 @@
 /*
  * Send protocol message to the other endpoint.
  */
-void tipc_link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg,
-			      u32 gap, u32 tolerance, u32 priority, u32 ack_mtu)
+void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ,
+				int probe_msg, u32 gap, u32 tolerance,
+				u32 priority, u32 ack_mtu)
 {
 	struct sk_buff *buf = NULL;
 	struct tipc_msg *msg = l_ptr->pmsg;
@@ -1973,10 +1975,10 @@
 		if (!tipc_link_is_up(l_ptr))
 			return;
 		if (l_ptr->next_out)
-			next_sent = msg_seqno(buf_msg(l_ptr->next_out));
+			next_sent = buf_seqno(l_ptr->next_out);
 		msg_set_next_sent(msg, next_sent);
 		if (l_ptr->oldest_deferred_in) {
-			u32 rec = msg_seqno(buf_msg(l_ptr->oldest_deferred_in));
+			u32 rec = buf_seqno(l_ptr->oldest_deferred_in);
 			gap = mod(rec - mod(l_ptr->next_in_no));
 		}
 		msg_set_seq_gap(msg, gap);
@@ -2064,7 +2066,7 @@
  * change at any time. The node with lowest address rules
  */
 
-static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf)
+static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf)
 {
 	u32 rec_gap = 0;
 	u32 max_pkt_info;
@@ -2197,12 +2199,12 @@
  * tipc_link_tunnel(): Send one message via a link belonging to
  * another bearer. Owner node is locked.
  */
-static void tipc_link_tunnel(struct link *l_ptr,
+static void tipc_link_tunnel(struct tipc_link *l_ptr,
 			     struct tipc_msg *tunnel_hdr,
 			     struct tipc_msg  *msg,
 			     u32 selector)
 {
-	struct link *tunnel;
+	struct tipc_link *tunnel;
 	struct sk_buff *buf;
 	u32 length = msg_size(msg);
 
@@ -2231,11 +2233,11 @@
  *               Owner node is locked.
  */
 
-void tipc_link_changeover(struct link *l_ptr)
+void tipc_link_changeover(struct tipc_link *l_ptr)
 {
 	u32 msgcount = l_ptr->out_queue_size;
 	struct sk_buff *crs = l_ptr->first_out;
-	struct link *tunnel = l_ptr->owner->active_links[0];
+	struct tipc_link *tunnel = l_ptr->owner->active_links[0];
 	struct tipc_msg tunnel_hdr;
 	int split_bundles;
 
@@ -2294,7 +2296,7 @@
 	}
 }
 
-void tipc_link_send_duplicate(struct link *l_ptr, struct link *tunnel)
+void tipc_link_send_duplicate(struct tipc_link *l_ptr, struct tipc_link *tunnel)
 {
 	struct sk_buff *iter;
 	struct tipc_msg tunnel_hdr;
@@ -2358,11 +2360,11 @@
  *  via other link. Node is locked. Return extracted buffer.
  */
 
-static int link_recv_changeover_msg(struct link **l_ptr,
+static int link_recv_changeover_msg(struct tipc_link **l_ptr,
 				    struct sk_buff **buf)
 {
 	struct sk_buff *tunnel_buf = *buf;
-	struct link *dest_link;
+	struct tipc_link *dest_link;
 	struct tipc_msg *msg;
 	struct tipc_msg *tunnel_msg = buf_msg(tunnel_buf);
 	u32 msg_typ = msg_type(tunnel_msg);
@@ -2462,7 +2464,7 @@
  * The buffer is complete, inclusive total message length.
  * Returns user data length.
  */
-static int link_send_long_buf(struct link *l_ptr, struct sk_buff *buf)
+static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
 {
 	struct sk_buff *buf_chain = NULL;
 	struct sk_buff *buf_chain_tail = (struct sk_buff *)&buf_chain;
@@ -2591,7 +2593,7 @@
 
 	/* Is there an incomplete message waiting for this fragment? */
 
-	while (pbuf && ((msg_seqno(buf_msg(pbuf)) != long_msg_seq_no) ||
+	while (pbuf && ((buf_seqno(pbuf) != long_msg_seq_no) ||
 			(msg_orignode(fragm) != msg_orignode(buf_msg(pbuf))))) {
 		prev = pbuf;
 		pbuf = pbuf->next;
@@ -2658,7 +2660,7 @@
  * @l_ptr: pointer to link
  */
 
-static void link_check_defragm_bufs(struct link *l_ptr)
+static void link_check_defragm_bufs(struct tipc_link *l_ptr)
 {
 	struct sk_buff *prev = NULL;
 	struct sk_buff *next = NULL;
@@ -2688,7 +2690,7 @@
 
 
 
-static void link_set_supervision_props(struct link *l_ptr, u32 tolerance)
+static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance)
 {
 	if ((tolerance < TIPC_MIN_LINK_TOL) || (tolerance > TIPC_MAX_LINK_TOL))
 		return;
@@ -2700,7 +2702,7 @@
 }
 
 
-void tipc_link_set_queue_limits(struct link *l_ptr, u32 window)
+void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window)
 {
 	/* Data messages from this node, inclusive FIRST_FRAGM */
 	l_ptr->queue_limit[TIPC_LOW_IMPORTANCE] = window;
@@ -2730,11 +2732,12 @@
  * Returns pointer to link (or 0 if invalid link name).
  */
 
-static struct link *link_find_link(const char *name, struct tipc_node **node)
+static struct tipc_link *link_find_link(const char *name,
+					struct tipc_node **node)
 {
-	struct link_name link_name_parts;
+	struct tipc_link_name link_name_parts;
 	struct tipc_bearer *b_ptr;
-	struct link *l_ptr;
+	struct tipc_link *l_ptr;
 
 	if (!link_name_validate(name, &link_name_parts))
 		return NULL;
@@ -2754,13 +2757,113 @@
 	return l_ptr;
 }
 
+/**
+ * link_value_is_valid -- validate proposed link tolerance/priority/window
+ *
+ * @cmd - value type (TIPC_CMD_SET_LINK_*)
+ * @new_value - the new value
+ *
+ * Returns 1 if value is within range, 0 if not.
+ */
+
+static int link_value_is_valid(u16 cmd, u32 new_value)
+{
+	switch (cmd) {
+	case TIPC_CMD_SET_LINK_TOL:
+		return (new_value >= TIPC_MIN_LINK_TOL) &&
+			(new_value <= TIPC_MAX_LINK_TOL);
+	case TIPC_CMD_SET_LINK_PRI:
+		return (new_value <= TIPC_MAX_LINK_PRI);
+	case TIPC_CMD_SET_LINK_WINDOW:
+		return (new_value >= TIPC_MIN_LINK_WIN) &&
+			(new_value <= TIPC_MAX_LINK_WIN);
+	}
+	return 0;
+}
+
+
+/**
+ * link_cmd_set_value - change priority/tolerance/window for link/bearer/media
+ * @name - ptr to link, bearer, or media name
+ * @new_value - new value of link, bearer, or media setting
+ * @cmd - which link, bearer, or media attribute to set (TIPC_CMD_SET_LINK_*)
+ *
+ * Caller must hold 'tipc_net_lock' to ensure link/bearer/media is not deleted.
+ *
+ * Returns 0 if value updated and negative value on error.
+ */
+
+static int link_cmd_set_value(const char *name, u32 new_value, u16 cmd)
+{
+	struct tipc_node *node;
+	struct tipc_link *l_ptr;
+	struct tipc_bearer *b_ptr;
+	struct tipc_media *m_ptr;
+
+	l_ptr = link_find_link(name, &node);
+	if (l_ptr) {
+		/*
+		 * acquire node lock for tipc_link_send_proto_msg().
+		 * see "TIPC locking policy" in net.c.
+		 */
+		tipc_node_lock(node);
+		switch (cmd) {
+		case TIPC_CMD_SET_LINK_TOL:
+			link_set_supervision_props(l_ptr, new_value);
+			tipc_link_send_proto_msg(l_ptr,
+				STATE_MSG, 0, 0, new_value, 0, 0);
+			break;
+		case TIPC_CMD_SET_LINK_PRI:
+			l_ptr->priority = new_value;
+			tipc_link_send_proto_msg(l_ptr,
+				STATE_MSG, 0, 0, 0, new_value, 0);
+			break;
+		case TIPC_CMD_SET_LINK_WINDOW:
+			tipc_link_set_queue_limits(l_ptr, new_value);
+			break;
+		}
+		tipc_node_unlock(node);
+		return 0;
+	}
+
+	b_ptr = tipc_bearer_find(name);
+	if (b_ptr) {
+		switch (cmd) {
+		case TIPC_CMD_SET_LINK_TOL:
+			b_ptr->tolerance = new_value;
+			return 0;
+		case TIPC_CMD_SET_LINK_PRI:
+			b_ptr->priority = new_value;
+			return 0;
+		case TIPC_CMD_SET_LINK_WINDOW:
+			b_ptr->window = new_value;
+			return 0;
+		}
+		return -EINVAL;
+	}
+
+	m_ptr = tipc_media_find(name);
+	if (!m_ptr)
+		return -ENODEV;
+	switch (cmd) {
+	case TIPC_CMD_SET_LINK_TOL:
+		m_ptr->tolerance = new_value;
+		return 0;
+	case TIPC_CMD_SET_LINK_PRI:
+		m_ptr->priority = new_value;
+		return 0;
+	case TIPC_CMD_SET_LINK_WINDOW:
+		m_ptr->window = new_value;
+		return 0;
+	}
+	return -EINVAL;
+}
+
 struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space,
 				     u16 cmd)
 {
 	struct tipc_link_config *args;
 	u32 new_value;
-	struct link *l_ptr;
-	struct tipc_node *node;
 	int res;
 
 	if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_CONFIG))
@@ -2769,6 +2872,10 @@
 	args = (struct tipc_link_config *)TLV_DATA(req_tlv_area);
 	new_value = ntohl(args->value);
 
+	if (!link_value_is_valid(cmd, new_value))
+		return tipc_cfg_reply_error_string(
+			"cannot change, value invalid");
+
 	if (!strcmp(args->name, tipc_bclink_name)) {
 		if ((cmd == TIPC_CMD_SET_LINK_WINDOW) &&
 		    (tipc_bclink_set_queue_limits(new_value) == 0))
@@ -2778,43 +2885,7 @@
 	}
 
 	read_lock_bh(&tipc_net_lock);
-	l_ptr = link_find_link(args->name, &node);
-	if (!l_ptr) {
-		read_unlock_bh(&tipc_net_lock);
-		return tipc_cfg_reply_error_string("link not found");
-	}
-
-	tipc_node_lock(node);
-	res = -EINVAL;
-	switch (cmd) {
-	case TIPC_CMD_SET_LINK_TOL:
-		if ((new_value >= TIPC_MIN_LINK_TOL) &&
-		    (new_value <= TIPC_MAX_LINK_TOL)) {
-			link_set_supervision_props(l_ptr, new_value);
-			tipc_link_send_proto_msg(l_ptr, STATE_MSG,
-						 0, 0, new_value, 0, 0);
-			res = 0;
-		}
-		break;
-	case TIPC_CMD_SET_LINK_PRI:
-		if ((new_value >= TIPC_MIN_LINK_PRI) &&
-		    (new_value <= TIPC_MAX_LINK_PRI)) {
-			l_ptr->priority = new_value;
-			tipc_link_send_proto_msg(l_ptr, STATE_MSG,
-						 0, 0, 0, new_value, 0);
-			res = 0;
-		}
-		break;
-	case TIPC_CMD_SET_LINK_WINDOW:
-		if ((new_value >= TIPC_MIN_LINK_WIN) &&
-		    (new_value <= TIPC_MAX_LINK_WIN)) {
-			tipc_link_set_queue_limits(l_ptr, new_value);
-			res = 0;
-		}
-		break;
-	}
-	tipc_node_unlock(node);
-
+	res = link_cmd_set_value(args->name, new_value, cmd);
 	read_unlock_bh(&tipc_net_lock);
 	if (res)
 		return tipc_cfg_reply_error_string("cannot change link setting");
@@ -2827,7 +2898,7 @@
  * @l_ptr: pointer to link
  */
 
-static void link_reset_statistics(struct link *l_ptr)
+static void link_reset_statistics(struct tipc_link *l_ptr)
 {
 	memset(&l_ptr->stats, 0, sizeof(l_ptr->stats));
 	l_ptr->stats.sent_info = l_ptr->next_out_no;
@@ -2837,7 +2908,7 @@
 struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space)
 {
 	char *link_name;
-	struct link *l_ptr;
+	struct tipc_link *l_ptr;
 	struct tipc_node *node;
 
 	if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
@@ -2885,7 +2956,7 @@
 static int tipc_link_stats(const char *name, char *buf, const u32 buf_size)
 {
 	struct print_buf pb;
-	struct link *l_ptr;
+	struct tipc_link *l_ptr;
 	struct tipc_node *node;
 	char *status;
 	u32 profile_total = 0;
@@ -3007,7 +3078,7 @@
 u32 tipc_link_get_max_pkt(u32 dest, u32 selector)
 {
 	struct tipc_node *n_ptr;
-	struct link *l_ptr;
+	struct tipc_link *l_ptr;
 	u32 res = MAX_PKT_DEFAULT;
 
 	if (dest == tipc_own_addr)
@@ -3026,7 +3097,7 @@
 	return res;
 }
 
-static void link_print(struct link *l_ptr, const char *str)
+static void link_print(struct tipc_link *l_ptr, const char *str)
 {
 	char print_area[256];
 	struct print_buf pb;
@@ -3046,13 +3117,12 @@
 	tipc_printf(buf, "NXI(%u):", mod(l_ptr->next_in_no));
 	tipc_printf(buf, "SQUE");
 	if (l_ptr->first_out) {
-		tipc_printf(buf, "[%u..", msg_seqno(buf_msg(l_ptr->first_out)));
+		tipc_printf(buf, "[%u..", buf_seqno(l_ptr->first_out));
 		if (l_ptr->next_out)
-			tipc_printf(buf, "%u..",
-				    msg_seqno(buf_msg(l_ptr->next_out)));
-		tipc_printf(buf, "%u]", msg_seqno(buf_msg(l_ptr->last_out)));
-		if ((mod(msg_seqno(buf_msg(l_ptr->last_out)) -
-			 msg_seqno(buf_msg(l_ptr->first_out)))
+			tipc_printf(buf, "%u..", buf_seqno(l_ptr->next_out));
+		tipc_printf(buf, "%u]", buf_seqno(l_ptr->last_out));
+		if ((mod(buf_seqno(l_ptr->last_out) -
+			 buf_seqno(l_ptr->first_out))
 		     != (l_ptr->out_queue_size - 1)) ||
 		    (l_ptr->last_out->next != NULL)) {
 			tipc_printf(buf, "\nSend queue inconsistency\n");
@@ -3064,8 +3134,8 @@
 		tipc_printf(buf, "[]");
 	tipc_printf(buf, "SQSIZ(%u)", l_ptr->out_queue_size);
 	if (l_ptr->oldest_deferred_in) {
-		u32 o = msg_seqno(buf_msg(l_ptr->oldest_deferred_in));
-		u32 n = msg_seqno(buf_msg(l_ptr->newest_deferred_in));
+		u32 o = buf_seqno(l_ptr->oldest_deferred_in);
+		u32 n = buf_seqno(l_ptr->newest_deferred_in);
 		tipc_printf(buf, ":RQUE[%u..%u]", o, n);
 		if (l_ptr->deferred_inqueue_sz != mod((n + 1) - o)) {
 			tipc_printf(buf, ":RQSIZ(%u)",
diff --git a/net/tipc/link.h b/net/tipc/link.h
index e56cb53..73c18c1 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -45,6 +45,12 @@
 #define PUSH_FINISHED 2
 
 /*
+ * Out-of-range value for link sequence numbers
+ */
+
+#define INVALID_LINK_SEQ 0x10000
+
+/*
  * Link states
  */
 
@@ -61,7 +67,7 @@
 #define MAX_PKT_DEFAULT 1500
 
 /**
- * struct link - TIPC link data structure
+ * struct tipc_link - TIPC link data structure
  * @addr: network address of link's peer node
  * @name: link name character string
  * @media_addr: media address to use when sending messages over link
@@ -109,7 +115,7 @@
  * @stats: collects statistics regarding link activity
  */
 
-struct link {
+struct tipc_link {
 	u32 addr;
 	char name[TIPC_MAX_LINK_NAME];
 	struct tipc_media_addr media_addr;
@@ -207,24 +213,24 @@
 
 struct tipc_port;
 
-struct link *tipc_link_create(struct tipc_node *n_ptr,
+struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
 			      struct tipc_bearer *b_ptr,
 			      const struct tipc_media_addr *media_addr);
-void tipc_link_delete(struct link *l_ptr);
-void tipc_link_changeover(struct link *l_ptr);
-void tipc_link_send_duplicate(struct link *l_ptr, struct link *dest);
-void tipc_link_reset_fragments(struct link *l_ptr);
-int tipc_link_is_up(struct link *l_ptr);
-int tipc_link_is_active(struct link *l_ptr);
-u32 tipc_link_push_packet(struct link *l_ptr);
-void tipc_link_stop(struct link *l_ptr);
+void tipc_link_delete(struct tipc_link *l_ptr);
+void tipc_link_changeover(struct tipc_link *l_ptr);
+void tipc_link_send_duplicate(struct tipc_link *l_ptr, struct tipc_link *dest);
+void tipc_link_reset_fragments(struct tipc_link *l_ptr);
+int tipc_link_is_up(struct tipc_link *l_ptr);
+int tipc_link_is_active(struct tipc_link *l_ptr);
+u32 tipc_link_push_packet(struct tipc_link *l_ptr);
+void tipc_link_stop(struct tipc_link *l_ptr);
 struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space, u16 cmd);
 struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space);
 struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space);
-void tipc_link_reset(struct link *l_ptr);
+void tipc_link_reset(struct tipc_link *l_ptr);
 int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector);
 void tipc_link_send_names(struct list_head *message_list, u32 dest);
-int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf);
+int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf);
 u32 tipc_link_get_max_pkt(u32 dest, u32 selector);
 int tipc_link_send_sections_fast(struct tipc_port *sender,
 				 struct iovec const *msg_sect,
@@ -235,19 +241,26 @@
 int  tipc_link_recv_fragment(struct sk_buff **pending,
 			     struct sk_buff **fb,
 			     struct tipc_msg **msg);
-void tipc_link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int prob, u32 gap,
-			      u32 tolerance, u32 priority, u32 acked_mtu);
-void tipc_link_push_queue(struct link *l_ptr);
+void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ, int prob,
+			      u32 gap, u32 tolerance, u32 priority,
+			      u32 acked_mtu);
+void tipc_link_push_queue(struct tipc_link *l_ptr);
 u32 tipc_link_defer_pkt(struct sk_buff **head, struct sk_buff **tail,
 		   struct sk_buff *buf);
-void tipc_link_wakeup_ports(struct link *l_ptr, int all);
-void tipc_link_set_queue_limits(struct link *l_ptr, u32 window);
-void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *start, u32 retransmits);
+void tipc_link_wakeup_ports(struct tipc_link *l_ptr, int all);
+void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window);
+void tipc_link_retransmit(struct tipc_link *l_ptr,
+			  struct sk_buff *start, u32 retransmits);
 
 /*
  * Link sequence number manipulation routines (uses modulo 2**16 arithmetic)
  */
 
+static inline u32 buf_seqno(struct sk_buff *buf)
+{
+	return msg_seqno(buf_msg(buf));
+}
+
 static inline u32 mod(u32 x)
 {
 	return x & 0xffffu;
@@ -282,32 +295,32 @@
  * Link status checking routines
  */
 
-static inline int link_working_working(struct link *l_ptr)
+static inline int link_working_working(struct tipc_link *l_ptr)
 {
 	return l_ptr->state == WORKING_WORKING;
 }
 
-static inline int link_working_unknown(struct link *l_ptr)
+static inline int link_working_unknown(struct tipc_link *l_ptr)
 {
 	return l_ptr->state == WORKING_UNKNOWN;
 }
 
-static inline int link_reset_unknown(struct link *l_ptr)
+static inline int link_reset_unknown(struct tipc_link *l_ptr)
 {
 	return l_ptr->state == RESET_UNKNOWN;
 }
 
-static inline int link_reset_reset(struct link *l_ptr)
+static inline int link_reset_reset(struct tipc_link *l_ptr)
 {
 	return l_ptr->state == RESET_RESET;
 }
 
-static inline int link_blocked(struct link *l_ptr)
+static inline int link_blocked(struct tipc_link *l_ptr)
 {
 	return l_ptr->exp_msg_count || l_ptr->blocked;
 }
 
-static inline int link_congested(struct link *l_ptr)
+static inline int link_congested(struct tipc_link *l_ptr)
 {
 	return l_ptr->out_queue_size >= l_ptr->queue_limit[0];
 }
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index 83d5096..3e4d3e2 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -333,11 +333,14 @@
 	}
 
 	if (msg_user(msg) ==  LINK_CONFIG) {
-		u32 *raw = (u32 *)msg;
-		struct tipc_media_addr *orig = (struct tipc_media_addr *)&raw[5];
+		struct tipc_media_addr orig;
+
 		tipc_printf(buf, ":DDOM(%x):", msg_dest_domain(msg));
 		tipc_printf(buf, ":NETID(%u):", msg_bc_netid(msg));
-		tipc_media_addr_printf(buf, orig);
+		memcpy(orig.value, msg_media_addr(msg), sizeof(orig.value));
+		orig.media_id = 0;
+		orig.broadcast = 0;
+		tipc_media_addr_printf(buf, &orig);
 	}
 	if (msg_user(msg) == BCAST_PROTOCOL) {
 		tipc_printf(buf, "BCNACK:AFTER(%u):", msg_bcgap_after(msg));
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index d93178f..7b0cda1 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -78,6 +78,8 @@
 
 #define MAX_MSG_SIZE (MAX_H_SIZE + TIPC_MAX_USER_MSG_SIZE)
 
+#define TIPC_MEDIA_ADDR_OFFSET	5
+
 
 struct tipc_msg {
 	__be32 hdr[15];
@@ -682,6 +684,10 @@
 	msg_set_bits(m, 5, 12, 0x1, r);
 }
 
+static inline char *msg_media_addr(struct tipc_msg *m)
+{
+	return (char *)&m->hdr[TIPC_MEDIA_ADDR_OFFSET];
+}
 
 /*
  * Word 9
@@ -734,14 +740,4 @@
 		   u32 num_sect, unsigned int total_len,
 			    int max_size, int usrmem, struct sk_buff **buf);
 
-static inline void msg_set_media_addr(struct tipc_msg *m, struct tipc_media_addr *a)
-{
-	memcpy(&((int *)m)[5], a, sizeof(*a));
-}
-
-static inline void msg_get_media_addr(struct tipc_msg *m, struct tipc_media_addr *a)
-{
-	memcpy(a, &((int *)m)[5], sizeof(*a));
-}
-
 #endif
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index b7ca1bd..98ebb37 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -176,7 +176,7 @@
 void tipc_named_node_up(unsigned long nodearg)
 {
 	struct tipc_node *n_ptr;
-	struct link *l_ptr;
+	struct tipc_link *l_ptr;
 	struct publication *publ;
 	struct distr_item *item = NULL;
 	struct sk_buff *buf = NULL;
@@ -322,10 +322,9 @@
 /**
  * tipc_named_reinit - re-initialize local publication list
  *
- * This routine is called whenever TIPC networking is (re)enabled.
+ * This routine is called whenever TIPC networking is enabled.
  * All existing publications by this node that have "cluster" or "zone" scope
- * are updated to reflect the node's current network address.
- * (If the node's address is unchanged, the update loop terminates immediately.)
+ * are updated to reflect the node's new network address.
  */
 
 void tipc_named_reinit(void)
@@ -333,10 +332,9 @@
 	struct publication *publ;
 
 	write_lock_bh(&tipc_nametbl_lock);
-	list_for_each_entry(publ, &publ_root, local_list) {
-		if (publ->node == tipc_own_addr)
-			break;
+
+	list_for_each_entry(publ, &publ_root, local_list)
 		publ->node = tipc_own_addr;
-	}
+
 	write_unlock_bh(&tipc_nametbl_lock);
 }
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index 46e6b6c..89eb562 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -251,8 +251,8 @@
 						    u32 type, u32 lower, u32 upper,
 						    u32 scope, u32 node, u32 port, u32 key)
 {
-	struct subscription *s;
-	struct subscription *st;
+	struct tipc_subscription *s;
+	struct tipc_subscription *st;
 	struct publication *publ;
 	struct sub_seq *sseq;
 	struct name_info *info;
@@ -381,7 +381,7 @@
 	struct sub_seq *sseq = nameseq_find_subseq(nseq, inst);
 	struct name_info *info;
 	struct sub_seq *free;
-	struct subscription *s, *st;
+	struct tipc_subscription *s, *st;
 	int removed_subseq = 0;
 
 	if (!sseq)
@@ -448,7 +448,8 @@
  * sequence overlapping with the requested sequence
  */
 
-static void tipc_nameseq_subscribe(struct name_seq *nseq, struct subscription *s)
+static void tipc_nameseq_subscribe(struct name_seq *nseq,
+					struct tipc_subscription *s)
 {
 	struct sub_seq *sseq = nseq->sseqs;
 
@@ -625,7 +626,7 @@
  */
 
 int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit,
-			      struct port_list *dports)
+			      struct tipc_port_list *dports)
 {
 	struct name_seq *seq;
 	struct sub_seq *sseq;
@@ -739,7 +740,7 @@
  * tipc_nametbl_subscribe - add a subscription object to the name table
  */
 
-void tipc_nametbl_subscribe(struct subscription *s)
+void tipc_nametbl_subscribe(struct tipc_subscription *s)
 {
 	u32 type = s->seq.type;
 	struct name_seq *seq;
@@ -763,7 +764,7 @@
  * tipc_nametbl_unsubscribe - remove a subscription object from name table
  */
 
-void tipc_nametbl_unsubscribe(struct subscription *s)
+void tipc_nametbl_unsubscribe(struct tipc_subscription *s)
 {
 	struct name_seq *seq;
 
diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h
index 62d77e5..8086b42 100644
--- a/net/tipc/name_table.h
+++ b/net/tipc/name_table.h
@@ -39,8 +39,8 @@
 
 #include "node_subscr.h"
 
-struct subscription;
-struct port_list;
+struct tipc_subscription;
+struct tipc_port_list;
 
 /*
  * TIPC name types reserved for internal TIPC use (both current and planned)
@@ -90,7 +90,7 @@
 struct sk_buff *tipc_nametbl_get(const void *req_tlv_area, int req_tlv_space);
 u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *node);
 int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit,
-			 struct port_list *dports);
+			 struct tipc_port_list *dports);
 int tipc_nametbl_publish_rsv(u32 ref, unsigned int scope,
 			struct tipc_name_seq const *seq);
 struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
@@ -100,8 +100,8 @@
 					u32 scope, u32 node, u32 ref, u32 key);
 struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower,
 					u32 node, u32 ref, u32 key);
-void tipc_nametbl_subscribe(struct subscription *s);
-void tipc_nametbl_unsubscribe(struct subscription *s);
+void tipc_nametbl_subscribe(struct tipc_subscription *s);
+void tipc_nametbl_unsubscribe(struct tipc_subscription *s);
 int tipc_nametbl_init(void);
 void tipc_nametbl_stop(void);
 
diff --git a/net/tipc/net.c b/net/tipc/net.c
index fafef6c..61afee7 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -174,7 +174,6 @@
 int tipc_net_start(u32 addr)
 {
 	char addr_string[16];
-	int res;
 
 	if (tipc_mode != TIPC_NODE_MODE)
 		return -ENOPROTOOPT;
@@ -187,9 +186,7 @@
 	tipc_named_reinit();
 	tipc_port_reinit();
 
-	res = tipc_bclink_init();
-	if (res)
-		return res;
+	tipc_bclink_init();
 
 	tipc_k_signal((Handler)tipc_subscr_start, 0);
 	tipc_k_signal((Handler)tipc_cfg_init, 0);
@@ -207,8 +204,8 @@
 	if (tipc_mode != TIPC_NET_MODE)
 		return;
 	write_lock_bh(&tipc_net_lock);
-	tipc_bearer_stop();
 	tipc_mode = TIPC_NODE_MODE;
+	tipc_bearer_stop();
 	tipc_bclink_stop();
 	list_for_each_entry_safe(node, t_node, &tipc_node_list, list)
 		tipc_node_delete(node);
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 27b4bb0..6b226fa 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -136,9 +136,9 @@
  * Link becomes active (alone or shared) or standby, depending on its priority.
  */
 
-void tipc_node_link_up(struct tipc_node *n_ptr, struct link *l_ptr)
+void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
 {
-	struct link **active = &n_ptr->active_links[0];
+	struct tipc_link **active = &n_ptr->active_links[0];
 
 	n_ptr->working_links++;
 
@@ -171,14 +171,14 @@
 
 static void node_select_active_links(struct tipc_node *n_ptr)
 {
-	struct link **active = &n_ptr->active_links[0];
+	struct tipc_link **active = &n_ptr->active_links[0];
 	u32 i;
 	u32 highest_prio = 0;
 
 	active[0] = active[1] = NULL;
 
 	for (i = 0; i < MAX_BEARERS; i++) {
-		struct link *l_ptr = n_ptr->links[i];
+		struct tipc_link *l_ptr = n_ptr->links[i];
 
 		if (!l_ptr || !tipc_link_is_up(l_ptr) ||
 		    (l_ptr->priority < highest_prio))
@@ -197,9 +197,9 @@
  * tipc_node_link_down - handle loss of link
  */
 
-void tipc_node_link_down(struct tipc_node *n_ptr, struct link *l_ptr)
+void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
 {
-	struct link **active;
+	struct tipc_link **active;
 
 	n_ptr->working_links--;
 
@@ -239,14 +239,14 @@
 	return tipc_node_active_links(n_ptr);
 }
 
-void tipc_node_attach_link(struct tipc_node *n_ptr, struct link *l_ptr)
+void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
 {
 	n_ptr->links[l_ptr->b_ptr->identity] = l_ptr;
 	atomic_inc(&tipc_num_links);
 	n_ptr->link_cnt++;
 }
 
-void tipc_node_detach_link(struct tipc_node *n_ptr, struct link *l_ptr)
+void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
 {
 	n_ptr->links[l_ptr->b_ptr->identity] = NULL;
 	atomic_dec(&tipc_num_links);
@@ -307,7 +307,7 @@
 	n_ptr->bclink.acked = tipc_bclink_get_last_sent();
 
 	if (n_ptr->bclink.supported) {
-		tipc_nmap_add(&tipc_bcast_nmap, n_ptr->addr);
+		tipc_bclink_add_node(n_ptr->addr);
 		if (n_ptr->addr < tipc_own_addr)
 			tipc_own_tag++;
 	}
@@ -350,9 +350,8 @@
 			n_ptr->bclink.defragm = NULL;
 		}
 
-		tipc_nmap_remove(&tipc_bcast_nmap, n_ptr->addr);
-		tipc_bclink_acknowledge(n_ptr,
-					mod(n_ptr->bclink.acked + 10000));
+		tipc_bclink_remove_node(n_ptr->addr);
+		tipc_bclink_acknowledge(n_ptr, INVALID_LINK_SEQ);
 		if (n_ptr->addr < tipc_own_addr)
 			tipc_own_tag--;
 
@@ -361,7 +360,7 @@
 
 	/* Abort link changeover */
 	for (i = 0; i < MAX_BEARERS; i++) {
-		struct link *l_ptr = n_ptr->links[i];
+		struct tipc_link *l_ptr = n_ptr->links[i];
 		if (!l_ptr)
 			continue;
 		l_ptr->reset_checkpoint = l_ptr->next_in_no;
diff --git a/net/tipc/node.h b/net/tipc/node.h
index 4f15cb4..0b1c5f8 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -79,8 +79,8 @@
 	struct hlist_node hash;
 	struct list_head list;
 	struct list_head nsub;
-	struct link *active_links[2];
-	struct link *links[MAX_BEARERS];
+	struct tipc_link *active_links[2];
+	struct tipc_link *links[MAX_BEARERS];
 	int link_cnt;
 	int working_links;
 	int block_setup;
@@ -117,10 +117,10 @@
 struct tipc_node *tipc_node_find(u32 addr);
 struct tipc_node *tipc_node_create(u32 addr);
 void tipc_node_delete(struct tipc_node *n_ptr);
-void tipc_node_attach_link(struct tipc_node *n_ptr, struct link *l_ptr);
-void tipc_node_detach_link(struct tipc_node *n_ptr, struct link *l_ptr);
-void tipc_node_link_down(struct tipc_node *n_ptr, struct link *l_ptr);
-void tipc_node_link_up(struct tipc_node *n_ptr, struct link *l_ptr);
+void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
+void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
+void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
+void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
 int tipc_node_active_links(struct tipc_node *n_ptr);
 int tipc_node_redundant_links(struct tipc_node *n_ptr);
 int tipc_node_is_up(struct tipc_node *n_ptr);
diff --git a/net/tipc/port.c b/net/tipc/port.c
index 54d812a..d91efc6 100644
--- a/net/tipc/port.c
+++ b/net/tipc/port.c
@@ -80,7 +80,7 @@
 	struct tipc_msg *hdr;
 	struct sk_buff *buf;
 	struct sk_buff *ibuf = NULL;
-	struct port_list dports = {0, NULL, };
+	struct tipc_port_list dports = {0, NULL, };
 	struct tipc_port *oport = tipc_port_deref(ref);
 	int ext_targets;
 	int res;
@@ -142,11 +142,11 @@
  * If there is no port list, perform a lookup to create one
  */
 
-void tipc_port_recv_mcast(struct sk_buff *buf, struct port_list *dp)
+void tipc_port_recv_mcast(struct sk_buff *buf, struct tipc_port_list *dp)
 {
 	struct tipc_msg *msg;
-	struct port_list dports = {0, NULL, };
-	struct port_list *item = dp;
+	struct tipc_port_list dports = {0, NULL, };
+	struct tipc_port_list *item = dp;
 	int cnt = 0;
 
 	msg = buf_msg(buf);
diff --git a/net/tipc/port.h b/net/tipc/port.h
index b9aa341..f751807 100644
--- a/net/tipc/port.h
+++ b/net/tipc/port.h
@@ -151,7 +151,7 @@
 };
 
 extern spinlock_t tipc_port_list_lock;
-struct port_list;
+struct tipc_port_list;
 
 /*
  * TIPC port manipulation routines
@@ -228,7 +228,7 @@
 			      unsigned int total_len, int err);
 struct sk_buff *tipc_port_get_ports(void);
 void tipc_port_recv_proto_msg(struct sk_buff *buf);
-void tipc_port_recv_mcast(struct sk_buff *buf, struct port_list *dp);
+void tipc_port_recv_mcast(struct sk_buff *buf, struct tipc_port_list *dp);
 void tipc_port_reinit(void);
 
 /**
diff --git a/net/tipc/ref.c b/net/tipc/ref.c
index 8311689..9e37b78 100644
--- a/net/tipc/ref.c
+++ b/net/tipc/ref.c
@@ -110,8 +110,7 @@
 
 	/* allocate table & mark all entries as uninitialized */
 
-	table = __vmalloc(actual_size * sizeof(struct reference),
-			  GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
+	table = vzalloc(actual_size * sizeof(struct reference));
 	if (table == NULL)
 		return -ENOMEM;
 
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 42b8324..e2f7c5d 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -185,9 +185,6 @@
 
 	/* Validate arguments */
 
-	if (!net_eq(net, &init_net))
-		return -EAFNOSUPPORT;
-
 	if (unlikely(protocol != 0))
 		return -EPROTONOSUPPORT;
 
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 1983717..8c49566 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -40,14 +40,14 @@
 #include "subscr.h"
 
 /**
- * struct subscriber - TIPC network topology subscriber
+ * struct tipc_subscriber - TIPC network topology subscriber
  * @port_ref: object reference to server port connecting to subscriber
  * @lock: pointer to spinlock controlling access to subscriber's server port
  * @subscriber_list: adjacent subscribers in top. server's list of subscribers
  * @subscription_list: list of subscription objects for this subscriber
  */
 
-struct subscriber {
+struct tipc_subscriber {
 	u32 port_ref;
 	spinlock_t *lock;
 	struct list_head subscriber_list;
@@ -92,7 +92,7 @@
  *       try to take the lock if the message is rejected and returned!
  */
 
-static void subscr_send_event(struct subscription *sub,
+static void subscr_send_event(struct tipc_subscription *sub,
 			      u32 found_lower,
 			      u32 found_upper,
 			      u32 event,
@@ -118,7 +118,7 @@
  * Returns 1 if there is overlap, otherwise 0.
  */
 
-int tipc_subscr_overlap(struct subscription *sub,
+int tipc_subscr_overlap(struct tipc_subscription *sub,
 			u32 found_lower,
 			u32 found_upper)
 
@@ -138,7 +138,7 @@
  * Protected by nameseq.lock in name_table.c
  */
 
-void tipc_subscr_report_overlap(struct subscription *sub,
+void tipc_subscr_report_overlap(struct tipc_subscription *sub,
 				u32 found_lower,
 				u32 found_upper,
 				u32 event,
@@ -158,7 +158,7 @@
  * subscr_timeout - subscription timeout has occurred
  */
 
-static void subscr_timeout(struct subscription *sub)
+static void subscr_timeout(struct tipc_subscription *sub)
 {
 	struct tipc_port *server_port;
 
@@ -205,7 +205,7 @@
  * Called with subscriber port locked.
  */
 
-static void subscr_del(struct subscription *sub)
+static void subscr_del(struct tipc_subscription *sub)
 {
 	tipc_nametbl_unsubscribe(sub);
 	list_del(&sub->subscription_list);
@@ -224,11 +224,11 @@
  * simply wait for it to be released, then claim it.)
  */
 
-static void subscr_terminate(struct subscriber *subscriber)
+static void subscr_terminate(struct tipc_subscriber *subscriber)
 {
 	u32 port_ref;
-	struct subscription *sub;
-	struct subscription *sub_temp;
+	struct tipc_subscription *sub;
+	struct tipc_subscription *sub_temp;
 
 	/* Invalidate subscriber reference */
 
@@ -278,10 +278,10 @@
  */
 
 static void subscr_cancel(struct tipc_subscr *s,
-			  struct subscriber *subscriber)
+			  struct tipc_subscriber *subscriber)
 {
-	struct subscription *sub;
-	struct subscription *sub_temp;
+	struct tipc_subscription *sub;
+	struct tipc_subscription *sub_temp;
 	int found = 0;
 
 	/* Find first matching subscription, exit if not found */
@@ -314,10 +314,10 @@
  * Called with subscriber port locked.
  */
 
-static struct subscription *subscr_subscribe(struct tipc_subscr *s,
-					     struct subscriber *subscriber)
+static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
+					     struct tipc_subscriber *subscriber)
 {
-	struct subscription *sub;
+	struct tipc_subscription *sub;
 	int swap;
 
 	/* Determine subscriber's endianness */
@@ -393,7 +393,7 @@
 				       unsigned int size,
 				       int reason)
 {
-	struct subscriber *subscriber = usr_handle;
+	struct tipc_subscriber *subscriber = usr_handle;
 	spinlock_t *subscriber_lock;
 
 	if (tipc_port_lock(port_ref) == NULL)
@@ -416,9 +416,9 @@
 				  const unchar *data,
 				  u32 size)
 {
-	struct subscriber *subscriber = usr_handle;
+	struct tipc_subscriber *subscriber = usr_handle;
 	spinlock_t *subscriber_lock;
-	struct subscription *sub;
+	struct tipc_subscription *sub;
 
 	/*
 	 * Lock subscriber's server port (& make a local copy of lock pointer,
@@ -471,12 +471,12 @@
 				   struct tipc_portid const *orig,
 				   struct tipc_name_seq const *dest)
 {
-	struct subscriber *subscriber;
+	struct tipc_subscriber *subscriber;
 	u32 server_port_ref;
 
 	/* Create subscriber object */
 
-	subscriber = kzalloc(sizeof(struct subscriber), GFP_ATOMIC);
+	subscriber = kzalloc(sizeof(struct tipc_subscriber), GFP_ATOMIC);
 	if (subscriber == NULL) {
 		warn("Subscriber rejected, no memory\n");
 		return;
@@ -568,8 +568,8 @@
 
 void tipc_subscr_stop(void)
 {
-	struct subscriber *subscriber;
-	struct subscriber *subscriber_temp;
+	struct tipc_subscriber *subscriber;
+	struct tipc_subscriber *subscriber_temp;
 	spinlock_t *subscriber_lock;
 
 	if (topsrv.setup_port) {
diff --git a/net/tipc/subscr.h b/net/tipc/subscr.h
index 4b06ef6..ef6529c 100644
--- a/net/tipc/subscr.h
+++ b/net/tipc/subscr.h
@@ -37,10 +37,10 @@
 #ifndef _TIPC_SUBSCR_H
 #define _TIPC_SUBSCR_H
 
-struct subscription;
+struct tipc_subscription;
 
 /**
- * struct subscription - TIPC network topology subscription object
+ * struct tipc_subscription - TIPC network topology subscription object
  * @seq: name sequence associated with subscription
  * @timeout: duration of subscription (in ms)
  * @filter: event filtering to be done for subscription
@@ -52,7 +52,7 @@
  * @evt: template for events generated by subscription
  */
 
-struct subscription {
+struct tipc_subscription {
 	struct tipc_name_seq seq;
 	u32 timeout;
 	u32 filter;
@@ -64,11 +64,11 @@
 	struct tipc_event evt;
 };
 
-int tipc_subscr_overlap(struct subscription *sub,
+int tipc_subscr_overlap(struct tipc_subscription *sub,
 			u32 found_lower,
 			u32 found_upper);
 
-void tipc_subscr_report_overlap(struct subscription *sub,
+void tipc_subscr_report_overlap(struct tipc_subscription *sub,
 				u32 found_lower,
 				u32 found_upper,
 				u32 event,
diff --git a/net/unix/Kconfig b/net/unix/Kconfig
index 5a69733..c2128b1 100644
--- a/net/unix/Kconfig
+++ b/net/unix/Kconfig
@@ -19,3 +19,10 @@
 
 	  Say Y unless you know what you are doing.
 
+config UNIX_DIAG
+	tristate "UNIX: socket monitoring interface"
+	depends on UNIX
+	default UNIX
+	---help---
+	  Support for UNIX socket monitoring interface used by the ss tool.
+	  If unsure, say Y.
diff --git a/net/unix/Makefile b/net/unix/Makefile
index b852a2b..b663c60 100644
--- a/net/unix/Makefile
+++ b/net/unix/Makefile
@@ -6,3 +6,6 @@
 
 unix-y			:= af_unix.o garbage.o
 unix-$(CONFIG_SYSCTL)	+= sysctl_net_unix.o
+
+obj-$(CONFIG_UNIX_DIAG)	+= unix_diag.o
+unix_diag-y		:= diag.o
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index b595a3d..aad8fb6 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -115,8 +115,10 @@
 #include <net/checksum.h>
 #include <linux/security.h>
 
-static struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1];
-static DEFINE_SPINLOCK(unix_table_lock);
+struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1];
+EXPORT_SYMBOL_GPL(unix_socket_table);
+DEFINE_SPINLOCK(unix_table_lock);
+EXPORT_SYMBOL_GPL(unix_table_lock);
 static atomic_long_t unix_nr_socks;
 
 #define unix_sockets_unbound	(&unix_socket_table[UNIX_HASH_SIZE])
@@ -172,7 +174,7 @@
 	return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
 }
 
-static struct sock *unix_peer_get(struct sock *s)
+struct sock *unix_peer_get(struct sock *s)
 {
 	struct sock *peer;
 
@@ -183,6 +185,7 @@
 	unix_state_unlock(s);
 	return peer;
 }
+EXPORT_SYMBOL_GPL(unix_peer_get);
 
 static inline void unix_release_addr(struct unix_address *addr)
 {
@@ -847,7 +850,7 @@
 	atomic_set(&addr->refcnt, 1);
 
 	if (sun_path[0]) {
-		unsigned int mode;
+		umode_t mode;
 		err = 0;
 		/*
 		 * Get the parent directory, calculate the hash for last
@@ -2062,6 +2065,36 @@
 	return 0;
 }
 
+long unix_inq_len(struct sock *sk)
+{
+	struct sk_buff *skb;
+	long amount = 0;
+
+	if (sk->sk_state == TCP_LISTEN)
+		return -EINVAL;
+
+	spin_lock(&sk->sk_receive_queue.lock);
+	if (sk->sk_type == SOCK_STREAM ||
+	    sk->sk_type == SOCK_SEQPACKET) {
+		skb_queue_walk(&sk->sk_receive_queue, skb)
+			amount += skb->len;
+	} else {
+		skb = skb_peek(&sk->sk_receive_queue);
+		if (skb)
+			amount = skb->len;
+	}
+	spin_unlock(&sk->sk_receive_queue.lock);
+
+	return amount;
+}
+EXPORT_SYMBOL_GPL(unix_inq_len);
+
+long unix_outq_len(struct sock *sk)
+{
+	return sk_wmem_alloc_get(sk);
+}
+EXPORT_SYMBOL_GPL(unix_outq_len);
+
 static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
 {
 	struct sock *sk = sock->sk;
@@ -2070,33 +2103,16 @@
 
 	switch (cmd) {
 	case SIOCOUTQ:
-		amount = sk_wmem_alloc_get(sk);
+		amount = unix_outq_len(sk);
 		err = put_user(amount, (int __user *)arg);
 		break;
 	case SIOCINQ:
-		{
-			struct sk_buff *skb;
-
-			if (sk->sk_state == TCP_LISTEN) {
-				err = -EINVAL;
-				break;
-			}
-
-			spin_lock(&sk->sk_receive_queue.lock);
-			if (sk->sk_type == SOCK_STREAM ||
-			    sk->sk_type == SOCK_SEQPACKET) {
-				skb_queue_walk(&sk->sk_receive_queue, skb)
-					amount += skb->len;
-			} else {
-				skb = skb_peek(&sk->sk_receive_queue);
-				if (skb)
-					amount = skb->len;
-			}
-			spin_unlock(&sk->sk_receive_queue.lock);
+		amount = unix_inq_len(sk);
+		if (amount < 0)
+			err = amount;
+		else
 			err = put_user(amount, (int __user *)arg);
-			break;
-		}
-
+		break;
 	default:
 		err = -ENOIOCTLCMD;
 		break;
diff --git a/net/unix/diag.c b/net/unix/diag.c
new file mode 100644
index 0000000..6b7697fd
--- /dev/null
+++ b/net/unix/diag.c
@@ -0,0 +1,329 @@
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/sock_diag.h>
+#include <linux/unix_diag.h>
+#include <linux/skbuff.h>
+#include <linux/module.h>
+#include <net/netlink.h>
+#include <net/af_unix.h>
+#include <net/tcp_states.h>
+
+#define UNIX_DIAG_PUT(skb, attrtype, attrlen) \
+	RTA_DATA(__RTA_PUT(skb, attrtype, attrlen))
+
+static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb)
+{
+	struct unix_address *addr = unix_sk(sk)->addr;
+	char *s;
+
+	if (addr) {
+		s = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_NAME, addr->len - sizeof(short));
+		memcpy(s, addr->name->sun_path, addr->len - sizeof(short));
+	}
+
+	return 0;
+
+rtattr_failure:
+	return -EMSGSIZE;
+}
+
+static int sk_diag_dump_vfs(struct sock *sk, struct sk_buff *nlskb)
+{
+	struct dentry *dentry = unix_sk(sk)->dentry;
+	struct unix_diag_vfs *uv;
+
+	if (dentry) {
+		uv = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_VFS, sizeof(*uv));
+		uv->udiag_vfs_ino = dentry->d_inode->i_ino;
+		uv->udiag_vfs_dev = dentry->d_sb->s_dev;
+	}
+
+	return 0;
+
+rtattr_failure:
+	return -EMSGSIZE;
+}
+
+static int sk_diag_dump_peer(struct sock *sk, struct sk_buff *nlskb)
+{
+	struct sock *peer;
+	int ino;
+
+	peer = unix_peer_get(sk);
+	if (peer) {
+		unix_state_lock(peer);
+		ino = sock_i_ino(peer);
+		unix_state_unlock(peer);
+		sock_put(peer);
+
+		RTA_PUT_U32(nlskb, UNIX_DIAG_PEER, ino);
+	}
+
+	return 0;
+rtattr_failure:
+	return -EMSGSIZE;
+}
+
+static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb)
+{
+	struct sk_buff *skb;
+	u32 *buf;
+	int i;
+
+	if (sk->sk_state == TCP_LISTEN) {
+		spin_lock(&sk->sk_receive_queue.lock);
+		buf = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_ICONS,
+				sk->sk_receive_queue.qlen * sizeof(u32));
+		i = 0;
+		skb_queue_walk(&sk->sk_receive_queue, skb) {
+			struct sock *req, *peer;
+
+			req = skb->sk;
+			/*
+			 * The state lock is outer for the same sk's
+			 * queue lock. With the other's queue locked it's
+			 * OK to lock the state.
+			 */
+			unix_state_lock_nested(req);
+			peer = unix_sk(req)->peer;
+			buf[i++] = (peer ? sock_i_ino(peer) : 0);
+			unix_state_unlock(req);
+		}
+		spin_unlock(&sk->sk_receive_queue.lock);
+	}
+
+	return 0;
+
+rtattr_failure:
+	spin_unlock(&sk->sk_receive_queue.lock);
+	return -EMSGSIZE;
+}
+
+static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb)
+{
+	struct unix_diag_rqlen *rql;
+
+	rql = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_RQLEN, sizeof(*rql));
+
+	if (sk->sk_state == TCP_LISTEN) {
+		rql->udiag_rqueue = sk->sk_receive_queue.qlen;
+		rql->udiag_wqueue = sk->sk_max_ack_backlog;
+	} else {
+		rql->udiag_rqueue = (__u32)unix_inq_len(sk);
+		rql->udiag_wqueue = (__u32)unix_outq_len(sk);
+	}
+
+	return 0;
+
+rtattr_failure:
+	return -EMSGSIZE;
+}
+
+static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
+		u32 pid, u32 seq, u32 flags, int sk_ino)
+{
+	unsigned char *b = skb_tail_pointer(skb);
+	struct nlmsghdr *nlh;
+	struct unix_diag_msg *rep;
+
+	nlh = NLMSG_PUT(skb, pid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rep));
+	nlh->nlmsg_flags = flags;
+
+	rep = NLMSG_DATA(nlh);
+
+	rep->udiag_family = AF_UNIX;
+	rep->udiag_type = sk->sk_type;
+	rep->udiag_state = sk->sk_state;
+	rep->udiag_ino = sk_ino;
+	sock_diag_save_cookie(sk, rep->udiag_cookie);
+
+	if ((req->udiag_show & UDIAG_SHOW_NAME) &&
+	    sk_diag_dump_name(sk, skb))
+		goto nlmsg_failure;
+
+	if ((req->udiag_show & UDIAG_SHOW_VFS) &&
+	    sk_diag_dump_vfs(sk, skb))
+		goto nlmsg_failure;
+
+	if ((req->udiag_show & UDIAG_SHOW_PEER) &&
+	    sk_diag_dump_peer(sk, skb))
+		goto nlmsg_failure;
+
+	if ((req->udiag_show & UDIAG_SHOW_ICONS) &&
+	    sk_diag_dump_icons(sk, skb))
+		goto nlmsg_failure;
+
+	if ((req->udiag_show & UDIAG_SHOW_RQLEN) &&
+	    sk_diag_show_rqlen(sk, skb))
+		goto nlmsg_failure;
+
+	if ((req->udiag_show & UDIAG_SHOW_MEMINFO) &&
+	    sock_diag_put_meminfo(sk, skb, UNIX_DIAG_MEMINFO))
+		goto nlmsg_failure;
+
+	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
+	return skb->len;
+
+nlmsg_failure:
+	nlmsg_trim(skb, b);
+	return -EMSGSIZE;
+}
+
+static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
+		u32 pid, u32 seq, u32 flags)
+{
+	int sk_ino;
+
+	unix_state_lock(sk);
+	sk_ino = sock_i_ino(sk);
+	unix_state_unlock(sk);
+
+	if (!sk_ino)
+		return 0;
+
+	return sk_diag_fill(sk, skb, req, pid, seq, flags, sk_ino);
+}
+
+static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+	struct unix_diag_req *req;
+	int num, s_num, slot, s_slot;
+
+	req = NLMSG_DATA(cb->nlh);
+
+	s_slot = cb->args[0];
+	num = s_num = cb->args[1];
+
+	spin_lock(&unix_table_lock);
+	for (slot = s_slot; slot <= UNIX_HASH_SIZE; s_num = 0, slot++) {
+		struct sock *sk;
+		struct hlist_node *node;
+
+		num = 0;
+		sk_for_each(sk, node, &unix_socket_table[slot]) {
+			if (num < s_num)
+				goto next;
+			if (!(req->udiag_states & (1 << sk->sk_state)))
+				goto next;
+			if (sk_diag_dump(sk, skb, req,
+					 NETLINK_CB(cb->skb).pid,
+					 cb->nlh->nlmsg_seq,
+					 NLM_F_MULTI) < 0)
+				goto done;
+next:
+			num++;
+		}
+	}
+done:
+	spin_unlock(&unix_table_lock);
+	cb->args[0] = slot;
+	cb->args[1] = num;
+
+	return skb->len;
+}
+
+static struct sock *unix_lookup_by_ino(int ino)
+{
+	int i;
+	struct sock *sk;
+
+	spin_lock(&unix_table_lock);
+	for (i = 0; i <= UNIX_HASH_SIZE; i++) {
+		struct hlist_node *node;
+
+		sk_for_each(sk, node, &unix_socket_table[i])
+			if (ino == sock_i_ino(sk)) {
+				sock_hold(sk);
+				spin_unlock(&unix_table_lock);
+
+				return sk;
+			}
+	}
+
+	spin_unlock(&unix_table_lock);
+	return NULL;
+}
+
+static int unix_diag_get_exact(struct sk_buff *in_skb,
+			       const struct nlmsghdr *nlh,
+			       struct unix_diag_req *req)
+{
+	int err = -EINVAL;
+	struct sock *sk;
+	struct sk_buff *rep;
+	unsigned int extra_len;
+
+	if (req->udiag_ino == 0)
+		goto out_nosk;
+
+	sk = unix_lookup_by_ino(req->udiag_ino);
+	err = -ENOENT;
+	if (sk == NULL)
+		goto out_nosk;
+
+	err = sock_diag_check_cookie(sk, req->udiag_cookie);
+	if (err)
+		goto out;
+
+	extra_len = 256;
+again:
+	err = -ENOMEM;
+	rep = alloc_skb(NLMSG_SPACE((sizeof(struct unix_diag_msg) + extra_len)),
+			GFP_KERNEL);
+	if (!rep)
+		goto out;
+
+	err = sk_diag_fill(sk, rep, req, NETLINK_CB(in_skb).pid,
+			   nlh->nlmsg_seq, 0, req->udiag_ino);
+	if (err < 0) {
+		kfree_skb(rep);
+		extra_len += 256;
+		if (extra_len >= PAGE_SIZE)
+			goto out;
+
+		goto again;
+	}
+	err = netlink_unicast(sock_diag_nlsk, rep, NETLINK_CB(in_skb).pid,
+			      MSG_DONTWAIT);
+	if (err > 0)
+		err = 0;
+out:
+	if (sk)
+		sock_put(sk);
+out_nosk:
+	return err;
+}
+
+static int unix_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
+{
+	int hdrlen = sizeof(struct unix_diag_req);
+
+	if (nlmsg_len(h) < hdrlen)
+		return -EINVAL;
+
+	if (h->nlmsg_flags & NLM_F_DUMP)
+		return netlink_dump_start(sock_diag_nlsk, skb, h,
+					  unix_diag_dump, NULL, 0);
+	else
+		return unix_diag_get_exact(skb, h, (struct unix_diag_req *)NLMSG_DATA(h));
+}
+
+static struct sock_diag_handler unix_diag_handler = {
+	.family = AF_UNIX,
+	.dump = unix_diag_handler_dump,
+};
+
+static int __init unix_diag_init(void)
+{
+	return sock_diag_register(&unix_diag_handler);
+}
+
+static void __exit unix_diag_exit(void)
+{
+	sock_diag_unregister(&unix_diag_handler);
+}
+
+module_init(unix_diag_init);
+module_exit(unix_diag_exit);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 1 /* AF_LOCAL */);
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index 1f1ef70..2e4444f 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -121,15 +121,16 @@
 
 config WIRELESS_EXT_SYSFS
 	bool "Wireless extensions sysfs files"
-	default y
 	depends on WEXT_CORE && SYSFS
 	help
 	  This option enables the deprecated wireless statistics
 	  files in /sys/class/net/*/wireless/. The same information
 	  is available via the ioctls as well.
 
-	  Say Y if you have programs using it, like old versions of
-	  hal.
+	  Say N. If you know you have ancient tools requiring it,
+	  like very old versions of hal (prior to 0.5.12 release),
+	  say Y and update the tools as soon as possible as this
+	  option will be removed soon.
 
 config LIB80211
 	tristate "Common routines for IEEE802.11 drivers"
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index 17cd0c0..2fcfe09 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -6,6 +6,7 @@
  * Copyright 2009	Johannes Berg <johannes@sipsolutions.net>
  */
 
+#include <linux/export.h>
 #include <net/cfg80211.h>
 #include "core.h"
 
@@ -44,9 +45,9 @@
 	return chan;
 }
 
-static bool can_beacon_sec_chan(struct wiphy *wiphy,
-				struct ieee80211_channel *chan,
-				enum nl80211_channel_type channel_type)
+int cfg80211_can_beacon_sec_chan(struct wiphy *wiphy,
+				  struct ieee80211_channel *chan,
+				  enum nl80211_channel_type channel_type)
 {
 	struct ieee80211_channel *sec_chan;
 	int diff;
@@ -75,6 +76,7 @@
 
 	return true;
 }
+EXPORT_SYMBOL(cfg80211_can_beacon_sec_chan);
 
 int cfg80211_set_freq(struct cfg80211_registered_device *rdev,
 		      struct wireless_dev *wdev, int freq,
@@ -109,8 +111,8 @@
 		switch (channel_type) {
 		case NL80211_CHAN_HT40PLUS:
 		case NL80211_CHAN_HT40MINUS:
-			if (!can_beacon_sec_chan(&rdev->wiphy, chan,
-						 channel_type)) {
+			if (!cfg80211_can_beacon_sec_chan(&rdev->wiphy, chan,
+							  channel_type)) {
 				printk(KERN_DEBUG
 				       "cfg80211: Secondary channel not "
 				       "allowed to initiate communication\n");
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 220f3bd..ccdfed8 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -492,6 +492,10 @@
 		    !(wiphy->wowlan.flags & WIPHY_WOWLAN_SUPPORTS_GTK_REKEY)))
 		return -EINVAL;
 
+	if (WARN_ON(wiphy->ap_sme_capa &&
+		    !(wiphy->flags & WIPHY_FLAG_HAVE_AP_SME)))
+		return -EINVAL;
+
 	if (WARN_ON(wiphy->addresses && !wiphy->n_addresses))
 		return -EINVAL;
 
diff --git a/net/wireless/core.h b/net/wireless/core.h
index b9ec306..43ad9c8 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -54,6 +54,8 @@
 	int opencount; /* also protected by devlist_mtx */
 	wait_queue_head_t dev_wait;
 
+	u32 ap_beacons_nlpid;
+
 	/* BSSes/scanning */
 	spinlock_t bss_lock;
 	struct list_head bss_list;
@@ -247,12 +249,11 @@
 			u16 status;
 		} cr;
 		struct {
-			struct ieee80211_channel *channel;
-			u8 bssid[ETH_ALEN];
 			const u8 *req_ie;
 			const u8 *resp_ie;
 			size_t req_ie_len;
 			size_t resp_ie_len;
+			struct cfg80211_bss *bss;
 		} rm;
 		struct {
 			const u8 *ie;
@@ -339,13 +340,17 @@
 			  const u8 *bssid, const u8 *prev_bssid,
 			  const u8 *ssid, int ssid_len,
 			  const u8 *ie, int ie_len, bool use_mfp,
-			  struct cfg80211_crypto_settings *crypt);
+			  struct cfg80211_crypto_settings *crypt,
+			  u32 assoc_flags, struct ieee80211_ht_cap *ht_capa,
+			  struct ieee80211_ht_cap *ht_capa_mask);
 int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
 			struct net_device *dev, struct ieee80211_channel *chan,
 			const u8 *bssid, const u8 *prev_bssid,
 			const u8 *ssid, int ssid_len,
 			const u8 *ie, int ie_len, bool use_mfp,
-			struct cfg80211_crypto_settings *crypt);
+			struct cfg80211_crypto_settings *crypt,
+			u32 assoc_flags, struct ieee80211_ht_cap *ht_capa,
+			struct ieee80211_ht_cap *ht_capa_mask);
 int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
 			   struct net_device *dev, const u8 *bssid,
 			   const u8 *ie, int ie_len, u16 reason,
@@ -376,7 +381,9 @@
 			  enum nl80211_channel_type channel_type,
 			  bool channel_type_valid, unsigned int wait,
 			  const u8 *buf, size_t len, bool no_cck,
-			  u64 *cookie);
+			  bool dont_wait_for_ack, u64 *cookie);
+void cfg80211_oper_and_ht_capa(struct ieee80211_ht_cap *ht_capa,
+			       const struct ieee80211_ht_cap *ht_capa_mask);
 
 /* SME */
 int __cfg80211_connect(struct cfg80211_registered_device *rdev,
@@ -395,8 +402,7 @@
 			struct net_device *dev, u16 reason,
 			bool wextev);
 void __cfg80211_roamed(struct wireless_dev *wdev,
-		       struct ieee80211_channel *channel,
-		       const u8 *bssid,
+		       struct cfg80211_bss *bss,
 		       const u8 *req_ie, size_t req_ie_len,
 		       const u8 *resp_ie, size_t resp_ie_len);
 int cfg80211_mgd_wext_connect(struct cfg80211_registered_device *rdev,
diff --git a/net/wireless/genregdb.awk b/net/wireless/genregdb.awk
index 53c143f..9392f8c 100644
--- a/net/wireless/genregdb.awk
+++ b/net/wireless/genregdb.awk
@@ -7,10 +7,17 @@
 #
 # Copyright 2009 John W. Linville <linville@tuxdriver.com>
 #
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
 #
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 
 BEGIN {
 	active = 0
diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c
index b7b7868..8c550df 100644
--- a/net/wireless/mesh.c
+++ b/net/wireless/mesh.c
@@ -20,6 +20,7 @@
  * interface
  */
 #define MESH_PREQ_MIN_INT	10
+#define MESH_PERR_MIN_INT	100
 #define MESH_DIAM_TRAVERSAL_TIME 50
 
 /*
@@ -47,6 +48,7 @@
 	.dot11MeshMaxPeerLinks = MESH_MAX_ESTAB_PLINKS,
 	.dot11MeshHWMPactivePathTimeout = MESH_PATH_TIMEOUT,
 	.dot11MeshHWMPpreqMinInterval = MESH_PREQ_MIN_INT,
+	.dot11MeshHWMPperrMinInterval = MESH_PERR_MIN_INT,
 	.dot11MeshHWMPnetDiameterTraversalTime = MESH_DIAM_TRAVERSAL_TIME,
 	.dot11MeshHWMPmaxPREQretries = MESH_MAX_PREQ_RETRIES,
 	.path_refresh_time = MESH_PATH_REFRESH_TIME,
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 21fc970..438dfc1 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -501,13 +501,32 @@
 	return err;
 }
 
+/*  Do a logical ht_capa &= ht_capa_mask.  */
+void cfg80211_oper_and_ht_capa(struct ieee80211_ht_cap *ht_capa,
+			       const struct ieee80211_ht_cap *ht_capa_mask)
+{
+	int i;
+	u8 *p1, *p2;
+	if (!ht_capa_mask) {
+		memset(ht_capa, 0, sizeof(*ht_capa));
+		return;
+	}
+
+	p1 = (u8*)(ht_capa);
+	p2 = (u8*)(ht_capa_mask);
+	for (i = 0; i<sizeof(*ht_capa); i++)
+		p1[i] &= p2[i];
+}
+
 int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
 			  struct net_device *dev,
 			  struct ieee80211_channel *chan,
 			  const u8 *bssid, const u8 *prev_bssid,
 			  const u8 *ssid, int ssid_len,
 			  const u8 *ie, int ie_len, bool use_mfp,
-			  struct cfg80211_crypto_settings *crypt)
+			  struct cfg80211_crypto_settings *crypt,
+			  u32 assoc_flags, struct ieee80211_ht_cap *ht_capa,
+			  struct ieee80211_ht_cap *ht_capa_mask)
 {
 	struct wireless_dev *wdev = dev->ieee80211_ptr;
 	struct cfg80211_assoc_request req;
@@ -537,6 +556,15 @@
 	memcpy(&req.crypto, crypt, sizeof(req.crypto));
 	req.use_mfp = use_mfp;
 	req.prev_bssid = prev_bssid;
+	req.flags = assoc_flags;
+	if (ht_capa)
+		memcpy(&req.ht_capa, ht_capa, sizeof(req.ht_capa));
+	if (ht_capa_mask)
+		memcpy(&req.ht_capa_mask, ht_capa_mask,
+		       sizeof(req.ht_capa_mask));
+	cfg80211_oper_and_ht_capa(&req.ht_capa_mask,
+				  rdev->wiphy.ht_capa_mod_mask);
+
 	req.bss = cfg80211_get_bss(&rdev->wiphy, chan, bssid, ssid, ssid_len,
 				   WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS);
 	if (!req.bss) {
@@ -574,14 +602,17 @@
 			const u8 *bssid, const u8 *prev_bssid,
 			const u8 *ssid, int ssid_len,
 			const u8 *ie, int ie_len, bool use_mfp,
-			struct cfg80211_crypto_settings *crypt)
+			struct cfg80211_crypto_settings *crypt,
+			u32 assoc_flags, struct ieee80211_ht_cap *ht_capa,
+			struct ieee80211_ht_cap *ht_capa_mask)
 {
 	struct wireless_dev *wdev = dev->ieee80211_ptr;
 	int err;
 
 	wdev_lock(wdev);
 	err = __cfg80211_mlme_assoc(rdev, dev, chan, bssid, prev_bssid,
-				    ssid, ssid_len, ie, ie_len, use_mfp, crypt);
+				    ssid, ssid_len, ie, ie_len, use_mfp, crypt,
+				    assoc_flags, ht_capa, ht_capa_mask);
 	wdev_unlock(wdev);
 
 	return err;
@@ -879,6 +910,9 @@
 	}
 
 	spin_unlock_bh(&wdev->mgmt_registrations_lock);
+
+	if (nlpid == wdev->ap_unexpected_nlpid)
+		wdev->ap_unexpected_nlpid = 0;
 }
 
 void cfg80211_mlme_purge_registrations(struct wireless_dev *wdev)
@@ -901,7 +935,7 @@
 			  enum nl80211_channel_type channel_type,
 			  bool channel_type_valid, unsigned int wait,
 			  const u8 *buf, size_t len, bool no_cck,
-			  u64 *cookie)
+			  bool dont_wait_for_ack, u64 *cookie)
 {
 	struct wireless_dev *wdev = dev->ieee80211_ptr;
 	const struct ieee80211_mgmt *mgmt;
@@ -992,7 +1026,8 @@
 	/* Transmit the Action frame as requested by user space */
 	return rdev->ops->mgmt_tx(&rdev->wiphy, dev, chan, offchan,
 				  channel_type, channel_type_valid,
-				  wait, buf, len, no_cck, cookie);
+				  wait, buf, len, no_cck, dont_wait_for_ack,
+				  cookie);
 }
 
 bool cfg80211_rx_mgmt(struct net_device *dev, int freq, const u8 *buf,
@@ -1107,3 +1142,30 @@
 	nl80211_pmksa_candidate_notify(rdev, dev, index, bssid, preauth, gfp);
 }
 EXPORT_SYMBOL(cfg80211_pmksa_candidate_notify);
+
+bool cfg80211_rx_spurious_frame(struct net_device *dev,
+				const u8 *addr, gfp_t gfp)
+{
+	struct wireless_dev *wdev = dev->ieee80211_ptr;
+
+	if (WARN_ON(wdev->iftype != NL80211_IFTYPE_AP &&
+		    wdev->iftype != NL80211_IFTYPE_P2P_GO))
+		return false;
+
+	return nl80211_unexpected_frame(dev, addr, gfp);
+}
+EXPORT_SYMBOL(cfg80211_rx_spurious_frame);
+
+bool cfg80211_rx_unexpected_4addr_frame(struct net_device *dev,
+					const u8 *addr, gfp_t gfp)
+{
+	struct wireless_dev *wdev = dev->ieee80211_ptr;
+
+	if (WARN_ON(wdev->iftype != NL80211_IFTYPE_AP &&
+		    wdev->iftype != NL80211_IFTYPE_P2P_GO &&
+		    wdev->iftype != NL80211_IFTYPE_AP_VLAN))
+		return false;
+
+	return nl80211_unexpected_4addr_frame(dev, addr, gfp);
+}
+EXPORT_SYMBOL(cfg80211_rx_unexpected_4addr_frame);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index ffafda5..b3d3cf8 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -47,22 +47,21 @@
 };
 
 /* internal helper: get rdev and dev */
-static int get_rdev_dev_by_info_ifindex(struct genl_info *info,
-				       struct cfg80211_registered_device **rdev,
-				       struct net_device **dev)
+static int get_rdev_dev_by_ifindex(struct net *netns, struct nlattr **attrs,
+				   struct cfg80211_registered_device **rdev,
+				   struct net_device **dev)
 {
-	struct nlattr **attrs = info->attrs;
 	int ifindex;
 
 	if (!attrs[NL80211_ATTR_IFINDEX])
 		return -EINVAL;
 
 	ifindex = nla_get_u32(attrs[NL80211_ATTR_IFINDEX]);
-	*dev = dev_get_by_index(genl_info_net(info), ifindex);
+	*dev = dev_get_by_index(netns, ifindex);
 	if (!*dev)
 		return -ENODEV;
 
-	*rdev = cfg80211_get_dev_from_ifindex(genl_info_net(info), ifindex);
+	*rdev = cfg80211_get_dev_from_ifindex(netns, ifindex);
 	if (IS_ERR(*rdev)) {
 		dev_put(*dev);
 		return PTR_ERR(*rdev);
@@ -98,7 +97,7 @@
 	[NL80211_ATTR_KEY_IDX] = { .type = NLA_U8 },
 	[NL80211_ATTR_KEY_CIPHER] = { .type = NLA_U32 },
 	[NL80211_ATTR_KEY_DEFAULT] = { .type = NLA_FLAG },
-	[NL80211_ATTR_KEY_SEQ] = { .type = NLA_BINARY, .len = 8 },
+	[NL80211_ATTR_KEY_SEQ] = { .type = NLA_BINARY, .len = 16 },
 	[NL80211_ATTR_KEY_TYPE] = { .type = NLA_U32 },
 
 	[NL80211_ATTR_BEACON_INTERVAL] = { .type = NLA_U32 },
@@ -196,6 +195,15 @@
 	[NL80211_ATTR_TDLS_OPERATION] = { .type = NLA_U8 },
 	[NL80211_ATTR_TDLS_SUPPORT] = { .type = NLA_FLAG },
 	[NL80211_ATTR_TDLS_EXTERNAL_SETUP] = { .type = NLA_FLAG },
+	[NL80211_ATTR_DONT_WAIT_FOR_ACK] = { .type = NLA_FLAG },
+	[NL80211_ATTR_PROBE_RESP] = { .type = NLA_BINARY,
+				      .len = IEEE80211_MAX_DATA_LEN },
+	[NL80211_ATTR_DFS_REGION] = { .type = NLA_U8 },
+	[NL80211_ATTR_DISABLE_HT] = { .type = NLA_FLAG },
+	[NL80211_ATTR_HT_CAPABILITY_MASK] = {
+		.len = NL80211_HT_CAPABILITY_LEN
+	},
+	[NL80211_ATTR_NOACK_MAP] = { .type = NLA_U16 },
 };
 
 /* policy for the key attributes */
@@ -203,7 +211,7 @@
 	[NL80211_KEY_DATA] = { .type = NLA_BINARY, .len = WLAN_MAX_KEY_LEN },
 	[NL80211_KEY_IDX] = { .type = NLA_U8 },
 	[NL80211_KEY_CIPHER] = { .type = NLA_U32 },
-	[NL80211_KEY_SEQ] = { .type = NLA_BINARY, .len = 8 },
+	[NL80211_KEY_SEQ] = { .type = NLA_BINARY, .len = 16 },
 	[NL80211_KEY_DEFAULT] = { .type = NLA_FLAG },
 	[NL80211_KEY_DEFAULT_MGMT] = { .type = NLA_FLAG },
 	[NL80211_KEY_TYPE] = { .type = NLA_U32 },
@@ -758,6 +766,10 @@
 	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_RX,
 		    dev->wiphy.available_antennas_rx);
 
+	if (dev->wiphy.flags & WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD)
+		NLA_PUT_U32(msg, NL80211_ATTR_PROBE_RESP_OFFLOAD,
+			    dev->wiphy.probe_resp_offload);
+
 	if ((dev->wiphy.available_antennas_tx ||
 	     dev->wiphy.available_antennas_rx) && dev->ops->get_antenna) {
 		u32 tx_ant = 0, rx_ant = 0;
@@ -874,7 +886,8 @@
 	CMD(set_pmksa, SET_PMKSA);
 	CMD(del_pmksa, DEL_PMKSA);
 	CMD(flush_pmksa, FLUSH_PMKSA);
-	CMD(remain_on_channel, REMAIN_ON_CHANNEL);
+	if (dev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL)
+		CMD(remain_on_channel, REMAIN_ON_CHANNEL);
 	CMD(set_bitrate_mask, SET_TX_BITRATE_MASK);
 	CMD(mgmt_tx, FRAME);
 	CMD(mgmt_tx_cancel_wait, FRAME_WAIT_CANCEL);
@@ -890,6 +903,16 @@
 	}
 	if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN)
 		CMD(sched_scan_start, START_SCHED_SCAN);
+	CMD(probe_client, PROBE_CLIENT);
+	CMD(set_noack_map, SET_NOACK_MAP);
+	if (dev->wiphy.flags & WIPHY_FLAG_REPORTS_OBSS) {
+		i++;
+		NLA_PUT_U32(msg, i, NL80211_CMD_REGISTER_BEACONS);
+	}
+
+#ifdef CONFIG_NL80211_TESTMODE
+	CMD(testmode_cmd, TESTMODE);
+#endif
 
 #undef CMD
 
@@ -905,11 +928,12 @@
 
 	nla_nest_end(msg, nl_cmds);
 
-	if (dev->ops->remain_on_channel)
+	if (dev->ops->remain_on_channel &&
+	    dev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL)
 		NLA_PUT_U32(msg, NL80211_ATTR_MAX_REMAIN_ON_CHANNEL_DURATION,
 			    dev->wiphy.max_remain_on_channel_duration);
 
-	if (dev->ops->mgmt_tx_cancel_wait)
+	if (dev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX)
 		NLA_PUT_FLAG(msg, NL80211_ATTR_OFFCHANNEL_TX_OK);
 
 	if (mgmt_stypes) {
@@ -1007,6 +1031,17 @@
 	if (nl80211_put_iface_combinations(&dev->wiphy, msg))
 		goto nla_put_failure;
 
+	if (dev->wiphy.flags & WIPHY_FLAG_HAVE_AP_SME)
+		NLA_PUT_U32(msg, NL80211_ATTR_DEVICE_AP_SME,
+			    dev->wiphy.ap_sme_capa);
+
+	NLA_PUT_U32(msg, NL80211_ATTR_FEATURE_FLAGS, dev->wiphy.features);
+
+	if (dev->wiphy.ht_capa_mod_mask)
+		NLA_PUT(msg, NL80211_ATTR_HT_CAPABILITY_MASK,
+			sizeof(*dev->wiphy.ht_capa_mod_mask),
+			dev->wiphy.ht_capa_mod_mask);
+
 	return genlmsg_end(msg, hdr);
 
  nla_put_failure:
@@ -1725,6 +1760,23 @@
 	return rdev->ops->del_virtual_intf(&rdev->wiphy, dev);
 }
 
+static int nl80211_set_noack_map(struct sk_buff *skb, struct genl_info *info)
+{
+	struct cfg80211_registered_device *rdev = info->user_ptr[0];
+	struct net_device *dev = info->user_ptr[1];
+	u16 noack_map;
+
+	if (!info->attrs[NL80211_ATTR_NOACK_MAP])
+		return -EINVAL;
+
+	if (!rdev->ops->set_noack_map)
+		return -EOPNOTSUPP;
+
+	noack_map = nla_get_u16(info->attrs[NL80211_ATTR_NOACK_MAP]);
+
+	return rdev->ops->set_noack_map(&rdev->wiphy, dev, noack_map);
+}
+
 struct get_key_cookie {
 	struct sk_buff *msg;
 	int error;
@@ -2155,6 +2207,13 @@
 			nla_len(info->attrs[NL80211_ATTR_IE_ASSOC_RESP]);
 	}
 
+	if (info->attrs[NL80211_ATTR_PROBE_RESP]) {
+		params.probe_resp =
+			nla_data(info->attrs[NL80211_ATTR_PROBE_RESP]);
+		params.probe_resp_len =
+			nla_len(info->attrs[NL80211_ATTR_PROBE_RESP]);
+	}
+
 	err = call(&rdev->wiphy, dev, &params);
 	if (!err && params.interval)
 		wdev->beacon_interval = params.interval;
@@ -2187,6 +2246,7 @@
 	[NL80211_STA_FLAG_WME] = { .type = NLA_FLAG },
 	[NL80211_STA_FLAG_MFP] = { .type = NLA_FLAG },
 	[NL80211_STA_FLAG_AUTHENTICATED] = { .type = NLA_FLAG },
+	[NL80211_STA_FLAG_TDLS_PEER] = { .type = NLA_FLAG },
 };
 
 static int parse_station_flags(struct genl_info *info,
@@ -2330,6 +2390,9 @@
 	if (sinfo->filled & STATION_INFO_TX_FAILED)
 		NLA_PUT_U32(msg, NL80211_STA_INFO_TX_FAILED,
 			    sinfo->tx_failed);
+	if (sinfo->filled & STATION_INFO_BEACON_LOSS_COUNT)
+		NLA_PUT_U32(msg, NL80211_STA_INFO_BEACON_LOSS,
+			    sinfo->beacon_loss_count);
 	if (sinfo->filled & STATION_INFO_BSS_PARAM) {
 		bss_param = nla_nest_start(msg, NL80211_STA_INFO_BSS_PARAM);
 		if (!bss_param)
@@ -2453,26 +2516,34 @@
 /*
  * Get vlan interface making sure it is running and on the right wiphy.
  */
-static int get_vlan(struct genl_info *info,
-		    struct cfg80211_registered_device *rdev,
-		    struct net_device **vlan)
+static struct net_device *get_vlan(struct genl_info *info,
+				   struct cfg80211_registered_device *rdev)
 {
 	struct nlattr *vlanattr = info->attrs[NL80211_ATTR_STA_VLAN];
-	*vlan = NULL;
+	struct net_device *v;
+	int ret;
 
-	if (vlanattr) {
-		*vlan = dev_get_by_index(genl_info_net(info),
-					 nla_get_u32(vlanattr));
-		if (!*vlan)
-			return -ENODEV;
-		if (!(*vlan)->ieee80211_ptr)
-			return -EINVAL;
-		if ((*vlan)->ieee80211_ptr->wiphy != &rdev->wiphy)
-			return -EINVAL;
-		if (!netif_running(*vlan))
-			return -ENETDOWN;
+	if (!vlanattr)
+		return NULL;
+
+	v = dev_get_by_index(genl_info_net(info), nla_get_u32(vlanattr));
+	if (!v)
+		return ERR_PTR(-ENODEV);
+
+	if (!v->ieee80211_ptr || v->ieee80211_ptr->wiphy != &rdev->wiphy) {
+		ret = -EINVAL;
+		goto error;
 	}
-	return 0;
+
+	if (!netif_running(v)) {
+		ret = -ENETDOWN;
+		goto error;
+	}
+
+	return v;
+ error:
+	dev_put(v);
+	return ERR_PTR(ret);
 }
 
 static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
@@ -2511,6 +2582,9 @@
 		params.ht_capa =
 			nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]);
 
+	if (!rdev->ops->change_station)
+		return -EOPNOTSUPP;
+
 	if (parse_station_flags(info, &params))
 		return -EINVAL;
 
@@ -2522,73 +2596,84 @@
 		params.plink_state =
 		    nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_STATE]);
 
-	err = get_vlan(info, rdev, &params.vlan);
-	if (err)
-		goto out;
-
-	/* validate settings */
-	err = 0;
-
 	switch (dev->ieee80211_ptr->iftype) {
 	case NL80211_IFTYPE_AP:
 	case NL80211_IFTYPE_AP_VLAN:
 	case NL80211_IFTYPE_P2P_GO:
 		/* disallow mesh-specific things */
 		if (params.plink_action)
-			err = -EINVAL;
+			return -EINVAL;
+
+		/* TDLS can't be set, ... */
+		if (params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))
+			return -EINVAL;
+		/*
+		 * ... but don't bother the driver with it. This works around
+		 * a hostapd/wpa_supplicant issue -- it always includes the
+		 * TLDS_PEER flag in the mask even for AP mode.
+		 */
+		params.sta_flags_mask &= ~BIT(NL80211_STA_FLAG_TDLS_PEER);
+
+		/* accept only the listed bits */
+		if (params.sta_flags_mask &
+				~(BIT(NL80211_STA_FLAG_AUTHORIZED) |
+				  BIT(NL80211_STA_FLAG_SHORT_PREAMBLE) |
+				  BIT(NL80211_STA_FLAG_WME) |
+				  BIT(NL80211_STA_FLAG_MFP)))
+			return -EINVAL;
+
+		/* must be last in here for error handling */
+		params.vlan = get_vlan(info, rdev);
+		if (IS_ERR(params.vlan))
+			return PTR_ERR(params.vlan);
 		break;
 	case NL80211_IFTYPE_P2P_CLIENT:
 	case NL80211_IFTYPE_STATION:
 		/* disallow things sta doesn't support */
 		if (params.plink_action)
-			err = -EINVAL;
-		if (params.vlan)
-			err = -EINVAL;
-		if (params.supported_rates &&
-		    !(params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)))
-			err = -EINVAL;
+			return -EINVAL;
 		if (params.ht_capa)
-			err = -EINVAL;
+			return -EINVAL;
 		if (params.listen_interval >= 0)
-			err = -EINVAL;
-		if (params.sta_flags_mask &
-				~(BIT(NL80211_STA_FLAG_AUTHORIZED) |
-				  BIT(NL80211_STA_FLAG_TDLS_PEER)))
-			err = -EINVAL;
-		/* can't change the TDLS bit */
-		if (!(params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) &&
-		    (params.sta_flags_mask & BIT(NL80211_STA_FLAG_TDLS_PEER)))
-			err = -EINVAL;
+			return -EINVAL;
+		/*
+		 * Don't allow userspace to change the TDLS_PEER flag,
+		 * but silently ignore attempts to change it since we
+		 * don't have state here to verify that it doesn't try
+		 * to change the flag.
+		 */
+		params.sta_flags_mask &= ~BIT(NL80211_STA_FLAG_TDLS_PEER);
+
+		/* reject any changes other than AUTHORIZED */
+		if (params.sta_flags_mask & ~BIT(NL80211_STA_FLAG_AUTHORIZED))
+			return -EINVAL;
 		break;
 	case NL80211_IFTYPE_MESH_POINT:
 		/* disallow things mesh doesn't support */
 		if (params.vlan)
-			err = -EINVAL;
+			return -EINVAL;
 		if (params.ht_capa)
-			err = -EINVAL;
+			return -EINVAL;
 		if (params.listen_interval >= 0)
-			err = -EINVAL;
+			return -EINVAL;
+		/*
+		 * No special handling for TDLS here -- the userspace
+		 * mesh code doesn't have this bug.
+		 */
 		if (params.sta_flags_mask &
 				~(BIT(NL80211_STA_FLAG_AUTHENTICATED) |
 				  BIT(NL80211_STA_FLAG_MFP) |
 				  BIT(NL80211_STA_FLAG_AUTHORIZED)))
-			err = -EINVAL;
+			return -EINVAL;
 		break;
 	default:
-		err = -EINVAL;
+		return -EOPNOTSUPP;
 	}
 
-	if (err)
-		goto out;
-
-	if (!rdev->ops->change_station) {
-		err = -EOPNOTSUPP;
-		goto out;
-	}
+	/* be aware of params.vlan when changing code here */
 
 	err = rdev->ops->change_station(&rdev->wiphy, dev, mac_addr, &params);
 
- out:
 	if (params.vlan)
 		dev_put(params.vlan);
 
@@ -2643,70 +2728,81 @@
 		params.plink_action =
 		    nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_ACTION]);
 
+	if (!rdev->ops->add_station)
+		return -EOPNOTSUPP;
+
 	if (parse_station_flags(info, &params))
 		return -EINVAL;
 
-	/* parse WME attributes if sta is WME capable */
-	if ((rdev->wiphy.flags & WIPHY_FLAG_AP_UAPSD) &&
-	    (params.sta_flags_set & BIT(NL80211_STA_FLAG_WME)) &&
-	    info->attrs[NL80211_ATTR_STA_WME]) {
-		struct nlattr *tb[NL80211_STA_WME_MAX + 1];
-		struct nlattr *nla;
+	switch (dev->ieee80211_ptr->iftype) {
+	case NL80211_IFTYPE_AP:
+	case NL80211_IFTYPE_AP_VLAN:
+	case NL80211_IFTYPE_P2P_GO:
+		/* parse WME attributes if sta is WME capable */
+		if ((rdev->wiphy.flags & WIPHY_FLAG_AP_UAPSD) &&
+		    (params.sta_flags_set & BIT(NL80211_STA_FLAG_WME)) &&
+		    info->attrs[NL80211_ATTR_STA_WME]) {
+			struct nlattr *tb[NL80211_STA_WME_MAX + 1];
+			struct nlattr *nla;
 
-		nla = info->attrs[NL80211_ATTR_STA_WME];
-		err = nla_parse_nested(tb, NL80211_STA_WME_MAX, nla,
-				       nl80211_sta_wme_policy);
-		if (err)
-			return err;
+			nla = info->attrs[NL80211_ATTR_STA_WME];
+			err = nla_parse_nested(tb, NL80211_STA_WME_MAX, nla,
+					       nl80211_sta_wme_policy);
+			if (err)
+				return err;
 
-		if (tb[NL80211_STA_WME_UAPSD_QUEUES])
-			params.uapsd_queues =
-			     nla_get_u8(tb[NL80211_STA_WME_UAPSD_QUEUES]);
-		if (params.uapsd_queues & ~IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK)
+			if (tb[NL80211_STA_WME_UAPSD_QUEUES])
+				params.uapsd_queues =
+				     nla_get_u8(tb[NL80211_STA_WME_UAPSD_QUEUES]);
+			if (params.uapsd_queues &
+					~IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK)
+				return -EINVAL;
+
+			if (tb[NL80211_STA_WME_MAX_SP])
+				params.max_sp =
+				     nla_get_u8(tb[NL80211_STA_WME_MAX_SP]);
+
+			if (params.max_sp &
+					~IEEE80211_WMM_IE_STA_QOSINFO_SP_MASK)
+				return -EINVAL;
+
+			params.sta_modify_mask |= STATION_PARAM_APPLY_UAPSD;
+		}
+		/* TDLS peers cannot be added */
+		if (params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))
 			return -EINVAL;
+		/* but don't bother the driver with it */
+		params.sta_flags_mask &= ~BIT(NL80211_STA_FLAG_TDLS_PEER);
 
-		if (tb[NL80211_STA_WME_MAX_SP])
-			params.max_sp =
-			     nla_get_u8(tb[NL80211_STA_WME_MAX_SP]);
-
-		if (params.max_sp & ~IEEE80211_WMM_IE_STA_QOSINFO_SP_MASK)
+		/* must be last in here for error handling */
+		params.vlan = get_vlan(info, rdev);
+		if (IS_ERR(params.vlan))
+			return PTR_ERR(params.vlan);
+		break;
+	case NL80211_IFTYPE_MESH_POINT:
+		/* TDLS peers cannot be added */
+		if (params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))
 			return -EINVAL;
-
-		params.sta_modify_mask |= STATION_PARAM_APPLY_UAPSD;
+		break;
+	case NL80211_IFTYPE_STATION:
+		/* Only TDLS peers can be added */
+		if (!(params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)))
+			return -EINVAL;
+		/* Can only add if TDLS ... */
+		if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS))
+			return -EOPNOTSUPP;
+		/* ... with external setup is supported */
+		if (!(rdev->wiphy.flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP))
+			return -EOPNOTSUPP;
+		break;
+	default:
+		return -EOPNOTSUPP;
 	}
 
-	if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
-	    dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN &&
-	    dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT &&
-	    dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO &&
-	    dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION)
-		return -EINVAL;
-
-	/*
-	 * Only managed stations can add TDLS peers, and only when the
-	 * wiphy supports external TDLS setup.
-	 */
-	if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_STATION &&
-	    !((params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) &&
-	      (rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) &&
-	      (rdev->wiphy.flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP)))
-		return -EINVAL;
-
-	err = get_vlan(info, rdev, &params.vlan);
-	if (err)
-		goto out;
-
-	/* validate settings */
-	err = 0;
-
-	if (!rdev->ops->add_station) {
-		err = -EOPNOTSUPP;
-		goto out;
-	}
+	/* be aware of params.vlan when changing code here */
 
 	err = rdev->ops->add_station(&rdev->wiphy, dev, mac_addr, &params);
 
- out:
 	if (params.vlan)
 		dev_put(params.vlan);
 	return err;
@@ -3127,6 +3223,8 @@
 			cur_params.dot11MeshHWMPactivePathTimeout);
 	NLA_PUT_U16(msg, NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL,
 			cur_params.dot11MeshHWMPpreqMinInterval);
+	NLA_PUT_U16(msg, NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL,
+			cur_params.dot11MeshHWMPperrMinInterval);
 	NLA_PUT_U16(msg, NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME,
 			cur_params.dot11MeshHWMPnetDiameterTraversalTime);
 	NLA_PUT_U8(msg, NL80211_MESHCONF_HWMP_ROOTMODE,
@@ -3161,6 +3259,7 @@
 	[NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT] = { .type = NLA_U16 },
 	[NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT] = { .type = NLA_U32 },
 	[NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL] = { .type = NLA_U16 },
+	[NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL] = { .type = NLA_U16 },
 	[NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME] = { .type = NLA_U16 },
 	[NL80211_MESHCONF_HWMP_ROOTMODE] = { .type = NLA_U8 },
 	[NL80211_MESHCONF_HWMP_RANN_INTERVAL] = { .type = NLA_U16 },
@@ -3235,6 +3334,9 @@
 	FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPpreqMinInterval,
 			mask, NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL,
 			nla_get_u16);
+	FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPperrMinInterval,
+			mask, NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL,
+			nla_get_u16);
 	FILL_IN_MESH_PARAM_IF_SET(tb, cfg,
 			dot11MeshHWMPnetDiameterTraversalTime,
 			mask, NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME,
@@ -3357,6 +3459,9 @@
 
 	NLA_PUT_STRING(msg, NL80211_ATTR_REG_ALPHA2,
 		cfg80211_regdomain->alpha2);
+	if (cfg80211_regdomain->dfs_region)
+		NLA_PUT_U8(msg, NL80211_ATTR_DFS_REGION,
+			   cfg80211_regdomain->dfs_region);
 
 	nl_reg_rules = nla_nest_start(msg, NL80211_ATTR_REG_RULES);
 	if (!nl_reg_rules)
@@ -3415,6 +3520,7 @@
 	char *alpha2 = NULL;
 	int rem_reg_rules = 0, r = 0;
 	u32 num_rules = 0, rule_idx = 0, size_of_regd;
+	u8 dfs_region = 0;
 	struct ieee80211_regdomain *rd = NULL;
 
 	if (!info->attrs[NL80211_ATTR_REG_ALPHA2])
@@ -3425,6 +3531,9 @@
 
 	alpha2 = nla_data(info->attrs[NL80211_ATTR_REG_ALPHA2]);
 
+	if (info->attrs[NL80211_ATTR_DFS_REGION])
+		dfs_region = nla_get_u8(info->attrs[NL80211_ATTR_DFS_REGION]);
+
 	nla_for_each_nested(nl_reg_rule, info->attrs[NL80211_ATTR_REG_RULES],
 			rem_reg_rules) {
 		num_rules++;
@@ -3452,6 +3561,13 @@
 	rd->alpha2[0] = alpha2[0];
 	rd->alpha2[1] = alpha2[1];
 
+	/*
+	 * Disable DFS master mode if the DFS region was
+	 * not supported or known on this kernel.
+	 */
+	if (reg_supported_dfs_region(dfs_region))
+		rd->dfs_region = dfs_region;
+
 	nla_for_each_nested(nl_reg_rule, info->attrs[NL80211_ATTR_REG_RULES],
 			rem_reg_rules) {
 		nla_parse(tb, NL80211_REG_RULE_ATTR_MAX,
@@ -4359,6 +4475,9 @@
 	const u8 *bssid, *ssid, *ie = NULL, *prev_bssid = NULL;
 	int err, ssid_len, ie_len = 0;
 	bool use_mfp = false;
+	u32 flags = 0;
+	struct ieee80211_ht_cap *ht_capa = NULL;
+	struct ieee80211_ht_cap *ht_capa_mask = NULL;
 
 	if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
 		return -EINVAL;
@@ -4402,11 +4521,25 @@
 	if (info->attrs[NL80211_ATTR_PREV_BSSID])
 		prev_bssid = nla_data(info->attrs[NL80211_ATTR_PREV_BSSID]);
 
+	if (nla_get_flag(info->attrs[NL80211_ATTR_DISABLE_HT]))
+		flags |= ASSOC_REQ_DISABLE_HT;
+
+	if (info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK])
+		ht_capa_mask =
+			nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK]);
+
+	if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) {
+		if (!ht_capa_mask)
+			return -EINVAL;
+		ht_capa = nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]);
+	}
+
 	err = nl80211_crypto_settings(rdev, info, &crypto, 1);
 	if (!err)
 		err = cfg80211_mlme_assoc(rdev, dev, chan, bssid, prev_bssid,
 					  ssid, ssid_len, ie, ie_len, use_mfp,
-					  &crypto);
+					  &crypto, flags, ht_capa,
+					  ht_capa_mask);
 
 	return err;
 }
@@ -4577,13 +4710,41 @@
 		ibss.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
 	}
 
-	ibss.channel = ieee80211_get_channel(wiphy,
-		nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]));
+	if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) {
+		enum nl80211_channel_type channel_type;
+
+		channel_type = nla_get_u32(
+				info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]);
+		if (channel_type != NL80211_CHAN_NO_HT &&
+		    channel_type != NL80211_CHAN_HT20 &&
+		    channel_type != NL80211_CHAN_HT40MINUS &&
+		    channel_type != NL80211_CHAN_HT40PLUS)
+			return -EINVAL;
+
+		if (channel_type != NL80211_CHAN_NO_HT &&
+		    !(wiphy->features & NL80211_FEATURE_HT_IBSS))
+			return -EINVAL;
+
+		ibss.channel_type = channel_type;
+	} else {
+		ibss.channel_type = NL80211_CHAN_NO_HT;
+	}
+
+	ibss.channel = rdev_freq_to_chan(rdev,
+		nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]),
+		ibss.channel_type);
 	if (!ibss.channel ||
 	    ibss.channel->flags & IEEE80211_CHAN_NO_IBSS ||
 	    ibss.channel->flags & IEEE80211_CHAN_DISABLED)
 		return -EINVAL;
 
+	/* Both channels should be able to initiate communication */
+	if ((ibss.channel_type == NL80211_CHAN_HT40PLUS ||
+	     ibss.channel_type == NL80211_CHAN_HT40MINUS) &&
+	    !cfg80211_can_beacon_sec_chan(&rdev->wiphy, ibss.channel,
+					  ibss.channel_type))
+		return -EINVAL;
+
 	ibss.channel_fixed = !!info->attrs[NL80211_ATTR_FREQ_FIXED];
 	ibss.privacy = !!info->attrs[NL80211_ATTR_PRIVACY];
 
@@ -4662,7 +4823,7 @@
 static int nl80211_testmode_dump(struct sk_buff *skb,
 				 struct netlink_callback *cb)
 {
-	struct cfg80211_registered_device *dev;
+	struct cfg80211_registered_device *rdev;
 	int err;
 	long phy_idx;
 	void *data = NULL;
@@ -4680,9 +4841,21 @@
 				  nl80211_policy);
 		if (err)
 			return err;
-		if (!nl80211_fam.attrbuf[NL80211_ATTR_WIPHY])
-			return -EINVAL;
-		phy_idx = nla_get_u32(nl80211_fam.attrbuf[NL80211_ATTR_WIPHY]);
+		if (nl80211_fam.attrbuf[NL80211_ATTR_WIPHY]) {
+			phy_idx = nla_get_u32(
+				nl80211_fam.attrbuf[NL80211_ATTR_WIPHY]);
+		} else {
+			struct net_device *netdev;
+
+			err = get_rdev_dev_by_ifindex(sock_net(skb->sk),
+						      nl80211_fam.attrbuf,
+						      &rdev, &netdev);
+			if (err)
+				return err;
+			dev_put(netdev);
+			phy_idx = rdev->wiphy_idx;
+			cfg80211_unlock_rdev(rdev);
+		}
 		if (nl80211_fam.attrbuf[NL80211_ATTR_TESTDATA])
 			cb->args[1] =
 				(long)nl80211_fam.attrbuf[NL80211_ATTR_TESTDATA];
@@ -4694,15 +4867,15 @@
 	}
 
 	mutex_lock(&cfg80211_mutex);
-	dev = cfg80211_rdev_by_wiphy_idx(phy_idx);
-	if (!dev) {
+	rdev = cfg80211_rdev_by_wiphy_idx(phy_idx);
+	if (!rdev) {
 		mutex_unlock(&cfg80211_mutex);
 		return -ENOENT;
 	}
-	cfg80211_lock_rdev(dev);
+	cfg80211_lock_rdev(rdev);
 	mutex_unlock(&cfg80211_mutex);
 
-	if (!dev->ops->testmode_dump) {
+	if (!rdev->ops->testmode_dump) {
 		err = -EOPNOTSUPP;
 		goto out_err;
 	}
@@ -4713,7 +4886,7 @@
 					   NL80211_CMD_TESTMODE);
 		struct nlattr *tmdata;
 
-		if (nla_put_u32(skb, NL80211_ATTR_WIPHY, dev->wiphy_idx) < 0) {
+		if (nla_put_u32(skb, NL80211_ATTR_WIPHY, phy_idx) < 0) {
 			genlmsg_cancel(skb, hdr);
 			break;
 		}
@@ -4723,8 +4896,8 @@
 			genlmsg_cancel(skb, hdr);
 			break;
 		}
-		err = dev->ops->testmode_dump(&dev->wiphy, skb, cb,
-					      data, data_len);
+		err = rdev->ops->testmode_dump(&rdev->wiphy, skb, cb,
+					       data, data_len);
 		nla_nest_end(skb, tmdata);
 
 		if (err == -ENOBUFS || err == -ENOENT) {
@@ -4742,7 +4915,7 @@
 	/* see above */
 	cb->args[0] = phy_idx + 1;
  out_err:
-	cfg80211_unlock_rdev(dev);
+	cfg80211_unlock_rdev(rdev);
 	return err;
 }
 
@@ -4896,6 +5069,22 @@
 			return PTR_ERR(connkeys);
 	}
 
+	if (nla_get_flag(info->attrs[NL80211_ATTR_DISABLE_HT]))
+		connect.flags |= ASSOC_REQ_DISABLE_HT;
+
+	if (info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK])
+		memcpy(&connect.ht_capa_mask,
+		       nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK]),
+		       sizeof(connect.ht_capa_mask));
+
+	if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) {
+		if (!info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK])
+			return -EINVAL;
+		memcpy(&connect.ht_capa,
+		       nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]),
+		       sizeof(connect.ht_capa));
+	}
+
 	err = cfg80211_connect(rdev, dev, &connect, connkeys);
 	if (err)
 		kfree(connkeys);
@@ -5083,7 +5272,8 @@
 	    duration > rdev->wiphy.max_remain_on_channel_duration)
 		return -EINVAL;
 
-	if (!rdev->ops->remain_on_channel)
+	if (!rdev->ops->remain_on_channel ||
+	    !(rdev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL))
 		return -EOPNOTSUPP;
 
 	if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) {
@@ -5271,12 +5461,13 @@
 	bool channel_type_valid = false;
 	u32 freq;
 	int err;
-	void *hdr;
+	void *hdr = NULL;
 	u64 cookie;
-	struct sk_buff *msg;
+	struct sk_buff *msg = NULL;
 	unsigned int wait = 0;
-	bool offchan;
-	bool no_cck;
+	bool offchan, no_cck, dont_wait_for_ack;
+
+	dont_wait_for_ack = info->attrs[NL80211_ATTR_DONT_WAIT_FOR_ACK];
 
 	if (!info->attrs[NL80211_ATTR_FRAME] ||
 	    !info->attrs[NL80211_ATTR_WIPHY_FREQ])
@@ -5295,7 +5486,7 @@
 		return -EOPNOTSUPP;
 
 	if (info->attrs[NL80211_ATTR_DURATION]) {
-		if (!rdev->ops->mgmt_tx_cancel_wait)
+		if (!(rdev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX))
 			return -EINVAL;
 		wait = nla_get_u32(info->attrs[NL80211_ATTR_DURATION]);
 	}
@@ -5313,6 +5504,9 @@
 
 	offchan = info->attrs[NL80211_ATTR_OFFCHANNEL_TX_OK];
 
+	if (offchan && !(rdev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX))
+		return -EINVAL;
+
 	no_cck = nla_get_flag(info->attrs[NL80211_ATTR_TX_NO_CCK_RATE]);
 
 	freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
@@ -5320,29 +5514,36 @@
 	if (chan == NULL)
 		return -EINVAL;
 
-	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
-	if (!msg)
-		return -ENOMEM;
+	if (!dont_wait_for_ack) {
+		msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+		if (!msg)
+			return -ENOMEM;
 
-	hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
-			     NL80211_CMD_FRAME);
+		hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
+				     NL80211_CMD_FRAME);
 
-	if (IS_ERR(hdr)) {
-		err = PTR_ERR(hdr);
-		goto free_msg;
+		if (IS_ERR(hdr)) {
+			err = PTR_ERR(hdr);
+			goto free_msg;
+		}
 	}
+
 	err = cfg80211_mlme_mgmt_tx(rdev, dev, chan, offchan, channel_type,
 				    channel_type_valid, wait,
 				    nla_data(info->attrs[NL80211_ATTR_FRAME]),
 				    nla_len(info->attrs[NL80211_ATTR_FRAME]),
-				    no_cck, &cookie);
+				    no_cck, dont_wait_for_ack, &cookie);
 	if (err)
 		goto free_msg;
 
-	NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie);
+	if (msg) {
+		NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie);
 
-	genlmsg_end(msg, hdr);
-	return genlmsg_reply(msg, info);
+		genlmsg_end(msg, hdr);
+		return genlmsg_reply(msg, info);
+	}
+
+	return 0;
 
  nla_put_failure:
 	err = -ENOBUFS;
@@ -5540,6 +5741,11 @@
 	setup.mesh_id = nla_data(info->attrs[NL80211_ATTR_MESH_ID]);
 	setup.mesh_id_len = nla_len(info->attrs[NL80211_ATTR_MESH_ID]);
 
+	if (info->attrs[NL80211_ATTR_MCAST_RATE] &&
+	    !nl80211_parse_mcast_rate(rdev, setup.mcast_rate,
+			    nla_get_u32(info->attrs[NL80211_ATTR_MCAST_RATE])))
+			return -EINVAL;
+
 	if (info->attrs[NL80211_ATTR_MESH_SETUP]) {
 		/* parse additional setup parameters if given */
 		err = nl80211_parse_mesh_setup(info, &setup);
@@ -5832,6 +6038,91 @@
 	return err;
 }
 
+static int nl80211_register_unexpected_frame(struct sk_buff *skb,
+					     struct genl_info *info)
+{
+	struct net_device *dev = info->user_ptr[1];
+	struct wireless_dev *wdev = dev->ieee80211_ptr;
+
+	if (wdev->iftype != NL80211_IFTYPE_AP &&
+	    wdev->iftype != NL80211_IFTYPE_P2P_GO)
+		return -EINVAL;
+
+	if (wdev->ap_unexpected_nlpid)
+		return -EBUSY;
+
+	wdev->ap_unexpected_nlpid = info->snd_pid;
+	return 0;
+}
+
+static int nl80211_probe_client(struct sk_buff *skb,
+				struct genl_info *info)
+{
+	struct cfg80211_registered_device *rdev = info->user_ptr[0];
+	struct net_device *dev = info->user_ptr[1];
+	struct wireless_dev *wdev = dev->ieee80211_ptr;
+	struct sk_buff *msg;
+	void *hdr;
+	const u8 *addr;
+	u64 cookie;
+	int err;
+
+	if (wdev->iftype != NL80211_IFTYPE_AP &&
+	    wdev->iftype != NL80211_IFTYPE_P2P_GO)
+		return -EOPNOTSUPP;
+
+	if (!info->attrs[NL80211_ATTR_MAC])
+		return -EINVAL;
+
+	if (!rdev->ops->probe_client)
+		return -EOPNOTSUPP;
+
+	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+	if (!msg)
+		return -ENOMEM;
+
+	hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
+			     NL80211_CMD_PROBE_CLIENT);
+
+	if (IS_ERR(hdr)) {
+		err = PTR_ERR(hdr);
+		goto free_msg;
+	}
+
+	addr = nla_data(info->attrs[NL80211_ATTR_MAC]);
+
+	err = rdev->ops->probe_client(&rdev->wiphy, dev, addr, &cookie);
+	if (err)
+		goto free_msg;
+
+	NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie);
+
+	genlmsg_end(msg, hdr);
+
+	return genlmsg_reply(msg, info);
+
+ nla_put_failure:
+	err = -ENOBUFS;
+ free_msg:
+	nlmsg_free(msg);
+	return err;
+}
+
+static int nl80211_register_beacons(struct sk_buff *skb, struct genl_info *info)
+{
+	struct cfg80211_registered_device *rdev = info->user_ptr[0];
+
+	if (!(rdev->wiphy.flags & WIPHY_FLAG_REPORTS_OBSS))
+		return -EOPNOTSUPP;
+
+	if (rdev->ap_beacons_nlpid)
+		return -EBUSY;
+
+	rdev->ap_beacons_nlpid = info->snd_pid;
+
+	return 0;
+}
+
 #define NL80211_FLAG_NEED_WIPHY		0x01
 #define NL80211_FLAG_NEED_NETDEV	0x02
 #define NL80211_FLAG_NEED_RTNL		0x04
@@ -5859,7 +6150,8 @@
 		}
 		info->user_ptr[0] = rdev;
 	} else if (ops->internal_flags & NL80211_FLAG_NEED_NETDEV) {
-		err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
+		err = get_rdev_dev_by_ifindex(genl_info_net(info), info->attrs,
+					      &rdev, &dev);
 		if (err) {
 			if (rtnl)
 				rtnl_unlock();
@@ -6387,6 +6679,39 @@
 		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
 				  NL80211_FLAG_NEED_RTNL,
 	},
+	{
+		.cmd = NL80211_CMD_UNEXPECTED_FRAME,
+		.doit = nl80211_register_unexpected_frame,
+		.policy = nl80211_policy,
+		.flags = GENL_ADMIN_PERM,
+		.internal_flags = NL80211_FLAG_NEED_NETDEV |
+				  NL80211_FLAG_NEED_RTNL,
+	},
+	{
+		.cmd = NL80211_CMD_PROBE_CLIENT,
+		.doit = nl80211_probe_client,
+		.policy = nl80211_policy,
+		.flags = GENL_ADMIN_PERM,
+		.internal_flags = NL80211_FLAG_NEED_NETDEV |
+				  NL80211_FLAG_NEED_RTNL,
+	},
+	{
+		.cmd = NL80211_CMD_REGISTER_BEACONS,
+		.doit = nl80211_register_beacons,
+		.policy = nl80211_policy,
+		.flags = GENL_ADMIN_PERM,
+		.internal_flags = NL80211_FLAG_NEED_WIPHY |
+				  NL80211_FLAG_NEED_RTNL,
+	},
+	{
+		.cmd = NL80211_CMD_SET_NOACK_MAP,
+		.doit = nl80211_set_noack_map,
+		.policy = nl80211_policy,
+		.flags = GENL_ADMIN_PERM,
+		.internal_flags = NL80211_FLAG_NEED_NETDEV |
+				  NL80211_FLAG_NEED_RTNL,
+	},
+
 };
 
 static struct genl_multicast_group nl80211_mlme_mcgrp = {
@@ -6639,10 +6964,7 @@
 	if (wiphy_idx_valid(request->wiphy_idx))
 		NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, request->wiphy_idx);
 
-	if (genlmsg_end(msg, hdr) < 0) {
-		nlmsg_free(msg);
-		return;
-	}
+	genlmsg_end(msg, hdr);
 
 	rcu_read_lock();
 	genlmsg_multicast_allns(msg, 0, nl80211_regulatory_mcgrp.id,
@@ -6678,10 +7000,7 @@
 	NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
 	NLA_PUT(msg, NL80211_ATTR_FRAME, len, buf);
 
-	if (genlmsg_end(msg, hdr) < 0) {
-		nlmsg_free(msg);
-		return;
-	}
+	genlmsg_end(msg, hdr);
 
 	genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
 				nl80211_mlme_mcgrp.id, gfp);
@@ -6762,10 +7081,7 @@
 	NLA_PUT_FLAG(msg, NL80211_ATTR_TIMED_OUT);
 	NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr);
 
-	if (genlmsg_end(msg, hdr) < 0) {
-		nlmsg_free(msg);
-		return;
-	}
+	genlmsg_end(msg, hdr);
 
 	genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
 				nl80211_mlme_mcgrp.id, gfp);
@@ -6821,10 +7137,7 @@
 	if (resp_ie)
 		NLA_PUT(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie);
 
-	if (genlmsg_end(msg, hdr) < 0) {
-		nlmsg_free(msg);
-		return;
-	}
+	genlmsg_end(msg, hdr);
 
 	genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
 				nl80211_mlme_mcgrp.id, gfp);
@@ -6862,10 +7175,7 @@
 	if (resp_ie)
 		NLA_PUT(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie);
 
-	if (genlmsg_end(msg, hdr) < 0) {
-		nlmsg_free(msg);
-		return;
-	}
+	genlmsg_end(msg, hdr);
 
 	genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
 				nl80211_mlme_mcgrp.id, gfp);
@@ -6903,10 +7213,7 @@
 	if (ie)
 		NLA_PUT(msg, NL80211_ATTR_IE, ie_len, ie);
 
-	if (genlmsg_end(msg, hdr) < 0) {
-		nlmsg_free(msg);
-		return;
-	}
+	genlmsg_end(msg, hdr);
 
 	genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
 				nl80211_mlme_mcgrp.id, GFP_KERNEL);
@@ -6939,10 +7246,7 @@
 	NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
 	NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid);
 
-	if (genlmsg_end(msg, hdr) < 0) {
-		nlmsg_free(msg);
-		return;
-	}
+	genlmsg_end(msg, hdr);
 
 	genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
 				nl80211_mlme_mcgrp.id, gfp);
@@ -6977,10 +7281,7 @@
 	if (ie_len && ie)
 		NLA_PUT(msg, NL80211_ATTR_IE, ie_len , ie);
 
-	if (genlmsg_end(msg, hdr) < 0) {
-		nlmsg_free(msg);
-		return;
-	}
+	genlmsg_end(msg, hdr);
 
 	genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
 				nl80211_mlme_mcgrp.id, gfp);
@@ -7019,10 +7320,7 @@
 	if (tsc)
 		NLA_PUT(msg, NL80211_ATTR_KEY_SEQ, 6, tsc);
 
-	if (genlmsg_end(msg, hdr) < 0) {
-		nlmsg_free(msg);
-		return;
-	}
+	genlmsg_end(msg, hdr);
 
 	genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
 				nl80211_mlme_mcgrp.id, gfp);
@@ -7073,10 +7371,7 @@
 		goto nla_put_failure;
 	nla_nest_end(msg, nl_freq);
 
-	if (genlmsg_end(msg, hdr) < 0) {
-		nlmsg_free(msg);
-		return;
-	}
+	genlmsg_end(msg, hdr);
 
 	rcu_read_lock();
 	genlmsg_multicast_allns(msg, 0, nl80211_regulatory_mcgrp.id,
@@ -7119,10 +7414,7 @@
 	if (cmd == NL80211_CMD_REMAIN_ON_CHANNEL)
 		NLA_PUT_U32(msg, NL80211_ATTR_DURATION, duration);
 
-	if (genlmsg_end(msg, hdr) < 0) {
-		nlmsg_free(msg);
-		return;
-	}
+	genlmsg_end(msg, hdr);
 
 	genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
 				nl80211_mlme_mcgrp.id, gfp);
@@ -7193,10 +7485,7 @@
 	NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
 	NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr);
 
-	if (genlmsg_end(msg, hdr) < 0) {
-		nlmsg_free(msg);
-		return;
-	}
+	genlmsg_end(msg, hdr);
 
 	genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
 				nl80211_mlme_mcgrp.id, gfp);
@@ -7207,13 +7496,68 @@
 	nlmsg_free(msg);
 }
 
+static bool __nl80211_unexpected_frame(struct net_device *dev, u8 cmd,
+				       const u8 *addr, gfp_t gfp)
+{
+	struct wireless_dev *wdev = dev->ieee80211_ptr;
+	struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+	struct sk_buff *msg;
+	void *hdr;
+	int err;
+	u32 nlpid = ACCESS_ONCE(wdev->ap_unexpected_nlpid);
+
+	if (!nlpid)
+		return false;
+
+	msg = nlmsg_new(100, gfp);
+	if (!msg)
+		return true;
+
+	hdr = nl80211hdr_put(msg, 0, 0, 0, cmd);
+	if (!hdr) {
+		nlmsg_free(msg);
+		return true;
+	}
+
+	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
+	NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
+	NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr);
+
+	err = genlmsg_end(msg, hdr);
+	if (err < 0) {
+		nlmsg_free(msg);
+		return true;
+	}
+
+	genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlpid);
+	return true;
+
+ nla_put_failure:
+	genlmsg_cancel(msg, hdr);
+	nlmsg_free(msg);
+	return true;
+}
+
+bool nl80211_unexpected_frame(struct net_device *dev, const u8 *addr, gfp_t gfp)
+{
+	return __nl80211_unexpected_frame(dev, NL80211_CMD_UNEXPECTED_FRAME,
+					  addr, gfp);
+}
+
+bool nl80211_unexpected_4addr_frame(struct net_device *dev,
+				    const u8 *addr, gfp_t gfp)
+{
+	return __nl80211_unexpected_frame(dev,
+					  NL80211_CMD_UNEXPECTED_4ADDR_FRAME,
+					  addr, gfp);
+}
+
 int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
 		      struct net_device *netdev, u32 nlpid,
 		      int freq, const u8 *buf, size_t len, gfp_t gfp)
 {
 	struct sk_buff *msg;
 	void *hdr;
-	int err;
 
 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
 	if (!msg)
@@ -7230,16 +7574,9 @@
 	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FREQ, freq);
 	NLA_PUT(msg, NL80211_ATTR_FRAME, len, buf);
 
-	err = genlmsg_end(msg, hdr);
-	if (err < 0) {
-		nlmsg_free(msg);
-		return err;
-	}
+	genlmsg_end(msg, hdr);
 
-	err = genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlpid);
-	if (err < 0)
-		return err;
-	return 0;
+	return genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlpid);
 
  nla_put_failure:
 	genlmsg_cancel(msg, hdr);
@@ -7272,10 +7609,7 @@
 	if (ack)
 		NLA_PUT_FLAG(msg, NL80211_ATTR_ACK);
 
-	if (genlmsg_end(msg, hdr) < 0) {
-		nlmsg_free(msg);
-		return;
-	}
+	genlmsg_end(msg, hdr);
 
 	genlmsg_multicast(msg, 0, nl80211_mlme_mcgrp.id, gfp);
 	return;
@@ -7317,10 +7651,7 @@
 
 	nla_nest_end(msg, pinfoattr);
 
-	if (genlmsg_end(msg, hdr) < 0) {
-		nlmsg_free(msg);
-		return;
-	}
+	genlmsg_end(msg, hdr);
 
 	genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
 				nl80211_mlme_mcgrp.id, gfp);
@@ -7362,10 +7693,7 @@
 
 	nla_nest_end(msg, rekey_attr);
 
-	if (genlmsg_end(msg, hdr) < 0) {
-		nlmsg_free(msg);
-		return;
-	}
+	genlmsg_end(msg, hdr);
 
 	genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
 				nl80211_mlme_mcgrp.id, gfp);
@@ -7408,10 +7736,7 @@
 
 	nla_nest_end(msg, attr);
 
-	if (genlmsg_end(msg, hdr) < 0) {
-		nlmsg_free(msg);
-		return;
-	}
+	genlmsg_end(msg, hdr);
 
 	genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
 				nl80211_mlme_mcgrp.id, gfp);
@@ -7453,7 +7778,45 @@
 
 	nla_nest_end(msg, pinfoattr);
 
-	if (genlmsg_end(msg, hdr) < 0) {
+	genlmsg_end(msg, hdr);
+
+	genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
+				nl80211_mlme_mcgrp.id, gfp);
+	return;
+
+ nla_put_failure:
+	genlmsg_cancel(msg, hdr);
+	nlmsg_free(msg);
+}
+
+void cfg80211_probe_status(struct net_device *dev, const u8 *addr,
+			   u64 cookie, bool acked, gfp_t gfp)
+{
+	struct wireless_dev *wdev = dev->ieee80211_ptr;
+	struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+	struct sk_buff *msg;
+	void *hdr;
+	int err;
+
+	msg = nlmsg_new(NLMSG_GOODSIZE, gfp);
+	if (!msg)
+		return;
+
+	hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_PROBE_CLIENT);
+	if (!hdr) {
+		nlmsg_free(msg);
+		return;
+	}
+
+	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
+	NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
+	NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr);
+	NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie);
+	if (acked)
+		NLA_PUT_FLAG(msg, NL80211_ATTR_ACK);
+
+	err = genlmsg_end(msg, hdr);
+	if (err < 0) {
 		nlmsg_free(msg);
 		return;
 	}
@@ -7466,6 +7829,45 @@
 	genlmsg_cancel(msg, hdr);
 	nlmsg_free(msg);
 }
+EXPORT_SYMBOL(cfg80211_probe_status);
+
+void cfg80211_report_obss_beacon(struct wiphy *wiphy,
+				 const u8 *frame, size_t len,
+				 int freq, gfp_t gfp)
+{
+	struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+	struct sk_buff *msg;
+	void *hdr;
+	u32 nlpid = ACCESS_ONCE(rdev->ap_beacons_nlpid);
+
+	if (!nlpid)
+		return;
+
+	msg = nlmsg_new(len + 100, gfp);
+	if (!msg)
+		return;
+
+	hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_FRAME);
+	if (!hdr) {
+		nlmsg_free(msg);
+		return;
+	}
+
+	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
+	if (freq)
+		NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FREQ, freq);
+	NLA_PUT(msg, NL80211_ATTR_FRAME, len, frame);
+
+	genlmsg_end(msg, hdr);
+
+	genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlpid);
+	return;
+
+ nla_put_failure:
+	genlmsg_cancel(msg, hdr);
+	nlmsg_free(msg);
+}
+EXPORT_SYMBOL(cfg80211_report_obss_beacon);
 
 static int nl80211_netlink_notify(struct notifier_block * nb,
 				  unsigned long state,
@@ -7480,9 +7882,12 @@
 
 	rcu_read_lock();
 
-	list_for_each_entry_rcu(rdev, &cfg80211_rdev_list, list)
+	list_for_each_entry_rcu(rdev, &cfg80211_rdev_list, list) {
 		list_for_each_entry_rcu(wdev, &rdev->netdev_list, list)
 			cfg80211_mlme_unregister_socket(wdev, notify->pid);
+		if (rdev->ap_beacons_nlpid == notify->pid)
+			rdev->ap_beacons_nlpid = 0;
+	}
 
 	rcu_read_unlock();
 
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index f24a1fb..12bf4d1 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -117,4 +117,9 @@
 				    struct net_device *netdev, int index,
 				    const u8 *bssid, bool preauth, gfp_t gfp);
 
+bool nl80211_unexpected_frame(struct net_device *dev,
+			      const u8 *addr, gfp_t gfp);
+bool nl80211_unexpected_4addr_frame(struct net_device *dev,
+				    const u8 *addr, gfp_t gfp);
+
 #endif /* __NET_WIRELESS_NL80211_H */
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 3302c56..f65feaa 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -2,13 +2,22 @@
  * Copyright 2002-2005, Instant802 Networks, Inc.
  * Copyright 2005-2006, Devicescape Software, Inc.
  * Copyright 2007	Johannes Berg <johannes@sipsolutions.net>
- * Copyright 2008	Luis R. Rodriguez <lrodriguz@atheros.com>
+ * Copyright 2008-2011	Luis R. Rodriguez <mcgrof@qca.qualcomm.com>
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
 
+
 /**
  * DOC: Wireless regulatory infrastructure
  *
@@ -873,10 +882,22 @@
 	chan->flags = flags | bw_flags | map_regdom_flags(reg_rule->flags);
 	chan->max_antenna_gain = min(chan->orig_mag,
 		(int) MBI_TO_DBI(power_rule->max_antenna_gain));
-	if (chan->orig_mpwr)
-		chan->max_power = min(chan->orig_mpwr,
-			(int) MBM_TO_DBM(power_rule->max_eirp));
-	else
+	if (chan->orig_mpwr) {
+		/*
+		 * Devices that have their own custom regulatory domain
+		 * but also use WIPHY_FLAG_STRICT_REGULATORY will follow the
+		 * passed country IE power settings.
+		 */
+		if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE &&
+		    wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY &&
+		    wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY) {
+			chan->max_power =
+				MBM_TO_DBM(power_rule->max_eirp);
+		} else {
+			chan->max_power = min(chan->orig_mpwr,
+				(int) MBM_TO_DBM(power_rule->max_eirp));
+		}
+	} else
 		chan->max_power = (int) MBM_TO_DBM(power_rule->max_eirp);
 }
 
@@ -1139,6 +1160,8 @@
 	if (ignore_reg_update(wiphy, initiator))
 		return;
 
+	last_request->dfs_region = cfg80211_regdomain->dfs_region;
+
 	for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
 		if (wiphy->bands[band])
 			handle_band(wiphy, band, initiator);
@@ -1161,9 +1184,21 @@
 static void update_all_wiphy_regulatory(enum nl80211_reg_initiator initiator)
 {
 	struct cfg80211_registered_device *rdev;
+	struct wiphy *wiphy;
 
-	list_for_each_entry(rdev, &cfg80211_rdev_list, list)
-		wiphy_update_regulatory(&rdev->wiphy, initiator);
+	list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
+		wiphy = &rdev->wiphy;
+		wiphy_update_regulatory(wiphy, initiator);
+		/*
+		 * Regulatory updates set by CORE are ignored for custom
+		 * regulatory cards. Let us notify the changes to the driver,
+		 * as some drivers used this to restore its orig_* reg domain.
+		 */
+		if (initiator == NL80211_REGDOM_SET_BY_CORE &&
+		    wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY &&
+		    wiphy->reg_notifier)
+			wiphy->reg_notifier(wiphy, last_request);
+	}
 }
 
 static void handle_channel_custom(struct wiphy *wiphy,
@@ -1454,18 +1489,18 @@
 }
 
 /* This processes *all* regulatory hints */
-static void reg_process_hint(struct regulatory_request *reg_request)
+static void reg_process_hint(struct regulatory_request *reg_request,
+			     enum nl80211_reg_initiator reg_initiator)
 {
 	int r = 0;
 	struct wiphy *wiphy = NULL;
-	enum nl80211_reg_initiator initiator = reg_request->initiator;
 
 	BUG_ON(!reg_request->alpha2);
 
 	if (wiphy_idx_valid(reg_request->wiphy_idx))
 		wiphy = wiphy_idx_to_wiphy(reg_request->wiphy_idx);
 
-	if (reg_request->initiator == NL80211_REGDOM_SET_BY_DRIVER &&
+	if (reg_initiator == NL80211_REGDOM_SET_BY_DRIVER &&
 	    !wiphy) {
 		kfree(reg_request);
 		return;
@@ -1475,7 +1510,7 @@
 	/* This is required so that the orig_* parameters are saved */
 	if (r == -EALREADY && wiphy &&
 	    wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY) {
-		wiphy_update_regulatory(wiphy, initiator);
+		wiphy_update_regulatory(wiphy, reg_initiator);
 		return;
 	}
 
@@ -1484,7 +1519,7 @@
 	 * source of bogus requests.
 	 */
 	if (r != -EALREADY &&
-	    reg_request->initiator == NL80211_REGDOM_SET_BY_USER)
+	    reg_initiator == NL80211_REGDOM_SET_BY_USER)
 		schedule_delayed_work(&reg_timeout, msecs_to_jiffies(3142));
 }
 
@@ -1521,7 +1556,7 @@
 
 	spin_unlock(&reg_requests_lock);
 
-	reg_process_hint(reg_request);
+	reg_process_hint(reg_request, reg_request->initiator);
 
 out:
 	mutex_unlock(&reg_mutex);
@@ -1766,6 +1801,26 @@
 		REG_DBG_PRINT("Restoring regulatory settings\n");
 }
 
+static void restore_custom_reg_settings(struct wiphy *wiphy)
+{
+	struct ieee80211_supported_band *sband;
+	enum ieee80211_band band;
+	struct ieee80211_channel *chan;
+	int i;
+
+	for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+		sband = wiphy->bands[band];
+		if (!sband)
+			continue;
+		for (i = 0; i < sband->n_channels; i++) {
+			chan = &sband->channels[i];
+			chan->flags = chan->orig_flags;
+			chan->max_antenna_gain = chan->orig_mag;
+			chan->max_power = chan->orig_mpwr;
+		}
+	}
+}
+
 /*
  * Restoring regulatory settings involves ingoring any
  * possibly stale country IE information and user regulatory
@@ -1784,9 +1839,11 @@
 static void restore_regulatory_settings(bool reset_user)
 {
 	char alpha2[2];
+	char world_alpha2[2];
 	struct reg_beacon *reg_beacon, *btmp;
 	struct regulatory_request *reg_request, *tmp;
 	LIST_HEAD(tmp_reg_req_list);
+	struct cfg80211_registered_device *rdev;
 
 	mutex_lock(&cfg80211_mutex);
 	mutex_lock(&reg_mutex);
@@ -1834,11 +1891,18 @@
 
 	/* First restore to the basic regulatory settings */
 	cfg80211_regdomain = cfg80211_world_regdom;
+	world_alpha2[0] = cfg80211_regdomain->alpha2[0];
+	world_alpha2[1] = cfg80211_regdomain->alpha2[1];
+
+	list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
+		if (rdev->wiphy.flags & WIPHY_FLAG_CUSTOM_REGULATORY)
+			restore_custom_reg_settings(&rdev->wiphy);
+	}
 
 	mutex_unlock(&reg_mutex);
 	mutex_unlock(&cfg80211_mutex);
 
-	regulatory_hint_core(cfg80211_regdomain->alpha2);
+	regulatory_hint_core(world_alpha2);
 
 	/*
 	 * This restores the ieee80211_regdom module parameter
@@ -1935,7 +1999,7 @@
 	const struct ieee80211_freq_range *freq_range = NULL;
 	const struct ieee80211_power_rule *power_rule = NULL;
 
-	pr_info("    (start_freq - end_freq @ bandwidth), (max_antenna_gain, max_eirp)\n");
+	pr_info("  (start_freq - end_freq @ bandwidth), (max_antenna_gain, max_eirp)\n");
 
 	for (i = 0; i < rd->n_reg_rules; i++) {
 		reg_rule = &rd->reg_rules[i];
@@ -1947,14 +2011,14 @@
 		 * in certain regions
 		 */
 		if (power_rule->max_antenna_gain)
-			pr_info("    (%d KHz - %d KHz @ %d KHz), (%d mBi, %d mBm)\n",
+			pr_info("  (%d KHz - %d KHz @ %d KHz), (%d mBi, %d mBm)\n",
 				freq_range->start_freq_khz,
 				freq_range->end_freq_khz,
 				freq_range->max_bandwidth_khz,
 				power_rule->max_antenna_gain,
 				power_rule->max_eirp);
 		else
-			pr_info("    (%d KHz - %d KHz @ %d KHz), (N/A, %d mBm)\n",
+			pr_info("  (%d KHz - %d KHz @ %d KHz), (N/A, %d mBm)\n",
 				freq_range->start_freq_khz,
 				freq_range->end_freq_khz,
 				freq_range->max_bandwidth_khz,
@@ -1962,6 +2026,42 @@
 	}
 }
 
+bool reg_supported_dfs_region(u8 dfs_region)
+{
+	switch (dfs_region) {
+	case NL80211_DFS_UNSET:
+	case NL80211_DFS_FCC:
+	case NL80211_DFS_ETSI:
+	case NL80211_DFS_JP:
+		return true;
+	default:
+		REG_DBG_PRINT("Ignoring uknown DFS master region: %d\n",
+			      dfs_region);
+		return false;
+	}
+}
+
+static void print_dfs_region(u8 dfs_region)
+{
+	if (!dfs_region)
+		return;
+
+	switch (dfs_region) {
+	case NL80211_DFS_FCC:
+		pr_info(" DFS Master region FCC");
+		break;
+	case NL80211_DFS_ETSI:
+		pr_info(" DFS Master region ETSI");
+		break;
+	case NL80211_DFS_JP:
+		pr_info(" DFS Master region JP");
+		break;
+	default:
+		pr_info(" DFS Master region Uknown");
+		break;
+	}
+}
+
 static void print_regdomain(const struct ieee80211_regdomain *rd)
 {
 
@@ -1989,6 +2089,7 @@
 			pr_info("Regulatory domain changed to country: %c%c\n",
 				rd->alpha2[0], rd->alpha2[1]);
 	}
+	print_dfs_region(rd->dfs_region);
 	print_rd_rules(rd);
 }
 
diff --git a/net/wireless/reg.h b/net/wireless/reg.h
index 4a56799..e2aaaf5 100644
--- a/net/wireless/reg.h
+++ b/net/wireless/reg.h
@@ -1,10 +1,26 @@
 #ifndef __NET_WIRELESS_REG_H
 #define __NET_WIRELESS_REG_H
+/*
+ * Copyright 2008-2011	Luis R. Rodriguez <mcgrof@qca.qualcomm.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
 
 extern const struct ieee80211_regdomain *cfg80211_regdomain;
 
 bool is_world_regdom(const char *alpha2);
 bool reg_is_valid_request(const char *alpha2);
+bool reg_supported_dfs_region(u8 dfs_region);
 
 int regulatory_hint_user(const char *alpha2);
 
diff --git a/net/wireless/regdb.h b/net/wireless/regdb.h
index 818222c..3279cfc 100644
--- a/net/wireless/regdb.h
+++ b/net/wireless/regdb.h
@@ -1,6 +1,22 @@
 #ifndef __REGDB_H__
 #define __REGDB_H__
 
+/*
+ * Copyright 2009 John W. Linville <linville@tuxdriver.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
 extern const struct ieee80211_regdomain *reg_regdb[];
 extern int reg_regdb_size;
 
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index dc23b31..31119e3 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -355,8 +355,8 @@
 	    sizeof(struct ieee80211_meshconf_ie) - 2) == 0;
 }
 
-static int cmp_bss(struct cfg80211_bss *a,
-		   struct cfg80211_bss *b)
+static int cmp_bss_core(struct cfg80211_bss *a,
+			struct cfg80211_bss *b)
 {
 	int r;
 
@@ -378,7 +378,15 @@
 			       b->len_information_elements);
 	}
 
-	r = memcmp(a->bssid, b->bssid, ETH_ALEN);
+	return memcmp(a->bssid, b->bssid, ETH_ALEN);
+}
+
+static int cmp_bss(struct cfg80211_bss *a,
+		   struct cfg80211_bss *b)
+{
+	int r;
+
+	r = cmp_bss_core(a, b);
 	if (r)
 		return r;
 
@@ -389,6 +397,52 @@
 		       b->len_information_elements);
 }
 
+static int cmp_hidden_bss(struct cfg80211_bss *a,
+		   struct cfg80211_bss *b)
+{
+	const u8 *ie1;
+	const u8 *ie2;
+	int i;
+	int r;
+
+	r = cmp_bss_core(a, b);
+	if (r)
+		return r;
+
+	ie1 = cfg80211_find_ie(WLAN_EID_SSID,
+			a->information_elements,
+			a->len_information_elements);
+	ie2 = cfg80211_find_ie(WLAN_EID_SSID,
+			b->information_elements,
+			b->len_information_elements);
+
+	/* Key comparator must use same algorithm in any rb-tree
+	 * search function (order is important), otherwise ordering
+	 * of items in the tree is broken and search gives incorrect
+	 * results. This code uses same order as cmp_ies() does. */
+
+	/* sort missing IE before (left of) present IE */
+	if (!ie1)
+		return -1;
+	if (!ie2)
+		return 1;
+
+	/* zero-size SSID is used as an indication of the hidden bss */
+	if (!ie2[1])
+		return 0;
+
+	/* sort by length first, then by contents */
+	if (ie1[1] != ie2[1])
+		return ie2[1] - ie1[1];
+
+	/* zeroed SSID ie is another indication of a hidden bss */
+	for (i = 0; i < ie2[1]; i++)
+		if (ie2[i + 2])
+			return -1;
+
+	return 0;
+}
+
 struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy,
 				      struct ieee80211_channel *channel,
 				      const u8 *bssid,
@@ -505,6 +559,48 @@
 }
 
 static struct cfg80211_internal_bss *
+rb_find_hidden_bss(struct cfg80211_registered_device *dev,
+	    struct cfg80211_internal_bss *res)
+{
+	struct rb_node *n = dev->bss_tree.rb_node;
+	struct cfg80211_internal_bss *bss;
+	int r;
+
+	while (n) {
+		bss = rb_entry(n, struct cfg80211_internal_bss, rbn);
+		r = cmp_hidden_bss(&res->pub, &bss->pub);
+
+		if (r == 0)
+			return bss;
+		else if (r < 0)
+			n = n->rb_left;
+		else
+			n = n->rb_right;
+	}
+
+	return NULL;
+}
+
+static void
+copy_hidden_ies(struct cfg80211_internal_bss *res,
+		 struct cfg80211_internal_bss *hidden)
+{
+	if (unlikely(res->pub.beacon_ies))
+		return;
+	if (WARN_ON(!hidden->pub.beacon_ies))
+		return;
+
+	res->pub.beacon_ies = kmalloc(hidden->pub.len_beacon_ies, GFP_ATOMIC);
+	if (unlikely(!res->pub.beacon_ies))
+		return;
+
+	res->beacon_ies_allocated = true;
+	res->pub.len_beacon_ies = hidden->pub.len_beacon_ies;
+	memcpy(res->pub.beacon_ies, hidden->pub.beacon_ies,
+			res->pub.len_beacon_ies);
+}
+
+static struct cfg80211_internal_bss *
 cfg80211_bss_update(struct cfg80211_registered_device *dev,
 		    struct cfg80211_internal_bss *res)
 {
@@ -607,6 +703,21 @@
 
 		kref_put(&res->ref, bss_release);
 	} else {
+		struct cfg80211_internal_bss *hidden;
+
+		/* First check if the beacon is a probe response from
+		 * a hidden bss. If so, copy beacon ies (with nullified
+		 * ssid) into the probe response bss entry (with real ssid).
+		 * It is required basically for PSM implementation
+		 * (probe responses do not contain tim ie) */
+
+		/* TODO: The code is not trying to update existing probe
+		 * response bss entries when beacon ies are
+		 * getting changed. */
+		hidden = rb_find_hidden_bss(dev, res);
+		if (hidden)
+			copy_hidden_ies(res, hidden);
+
 		/* this "consumes" the reference */
 		list_add_tail(&res->list, &dev->bss_list);
 		rb_insert_bss(dev, res);
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 0acfdc9..7b9ecae 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -190,7 +190,9 @@
 					    prev_bssid,
 					    params->ssid, params->ssid_len,
 					    params->ie, params->ie_len,
-					    false, &params->crypto);
+					    false, &params->crypto,
+					    params->flags, &params->ht_capa,
+					    &params->ht_capa_mask);
 		if (err)
 			__cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid,
 					       NULL, 0,
@@ -551,45 +553,35 @@
 EXPORT_SYMBOL(cfg80211_connect_result);
 
 void __cfg80211_roamed(struct wireless_dev *wdev,
-		       struct ieee80211_channel *channel,
-		       const u8 *bssid,
+		       struct cfg80211_bss *bss,
 		       const u8 *req_ie, size_t req_ie_len,
 		       const u8 *resp_ie, size_t resp_ie_len)
 {
-	struct cfg80211_bss *bss;
 #ifdef CONFIG_CFG80211_WEXT
 	union iwreq_data wrqu;
 #endif
-
 	ASSERT_WDEV_LOCK(wdev);
 
 	if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION &&
 		    wdev->iftype != NL80211_IFTYPE_P2P_CLIENT))
-		return;
+		goto out;
 
 	if (wdev->sme_state != CFG80211_SME_CONNECTED)
-		return;
+		goto out;
 
 	/* internal error -- how did we get to CONNECTED w/o BSS? */
 	if (WARN_ON(!wdev->current_bss)) {
-		return;
+		goto out;
 	}
 
 	cfg80211_unhold_bss(wdev->current_bss);
 	cfg80211_put_bss(&wdev->current_bss->pub);
 	wdev->current_bss = NULL;
 
-	bss = cfg80211_get_bss(wdev->wiphy, channel, bssid,
-			       wdev->ssid, wdev->ssid_len,
-			       WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS);
-
-	if (WARN_ON(!bss))
-		return;
-
 	cfg80211_hold_bss(bss_from_pub(bss));
 	wdev->current_bss = bss_from_pub(bss);
 
-	nl80211_send_roamed(wiphy_to_dev(wdev->wiphy), wdev->netdev, bssid,
+	nl80211_send_roamed(wiphy_to_dev(wdev->wiphy), wdev->netdev, bss->bssid,
 			    req_ie, req_ie_len, resp_ie, resp_ie_len,
 			    GFP_KERNEL);
 
@@ -610,11 +602,15 @@
 
 	memset(&wrqu, 0, sizeof(wrqu));
 	wrqu.ap_addr.sa_family = ARPHRD_ETHER;
-	memcpy(wrqu.ap_addr.sa_data, bssid, ETH_ALEN);
-	memcpy(wdev->wext.prev_bssid, bssid, ETH_ALEN);
+	memcpy(wrqu.ap_addr.sa_data, bss->bssid, ETH_ALEN);
+	memcpy(wdev->wext.prev_bssid, bss->bssid, ETH_ALEN);
 	wdev->wext.prev_bssid_valid = true;
 	wireless_send_event(wdev->netdev, SIOCGIWAP, &wrqu, NULL);
 #endif
+
+	return;
+out:
+	cfg80211_put_bss(bss);
 }
 
 void cfg80211_roamed(struct net_device *dev,
@@ -624,32 +620,57 @@
 		     const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp)
 {
 	struct wireless_dev *wdev = dev->ieee80211_ptr;
+	struct cfg80211_bss *bss;
+
+	CFG80211_DEV_WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTED);
+
+	bss = cfg80211_get_bss(wdev->wiphy, channel, bssid, wdev->ssid,
+			       wdev->ssid_len, WLAN_CAPABILITY_ESS,
+			       WLAN_CAPABILITY_ESS);
+	if (WARN_ON(!bss))
+		return;
+
+	cfg80211_roamed_bss(dev, bss, req_ie, req_ie_len, resp_ie,
+			    resp_ie_len, gfp);
+}
+EXPORT_SYMBOL(cfg80211_roamed);
+
+void cfg80211_roamed_bss(struct net_device *dev,
+			 struct cfg80211_bss *bss, const u8 *req_ie,
+			 size_t req_ie_len, const u8 *resp_ie,
+			 size_t resp_ie_len, gfp_t gfp)
+{
+	struct wireless_dev *wdev = dev->ieee80211_ptr;
 	struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
 	struct cfg80211_event *ev;
 	unsigned long flags;
 
 	CFG80211_DEV_WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTED);
 
-	ev = kzalloc(sizeof(*ev) + req_ie_len + resp_ie_len, gfp);
-	if (!ev)
+	if (WARN_ON(!bss))
 		return;
 
+	ev = kzalloc(sizeof(*ev) + req_ie_len + resp_ie_len, gfp);
+	if (!ev) {
+		cfg80211_put_bss(bss);
+		return;
+	}
+
 	ev->type = EVENT_ROAMED;
-	ev->rm.channel = channel;
-	memcpy(ev->rm.bssid, bssid, ETH_ALEN);
 	ev->rm.req_ie = ((u8 *)ev) + sizeof(*ev);
 	ev->rm.req_ie_len = req_ie_len;
 	memcpy((void *)ev->rm.req_ie, req_ie, req_ie_len);
 	ev->rm.resp_ie = ((u8 *)ev) + sizeof(*ev) + req_ie_len;
 	ev->rm.resp_ie_len = resp_ie_len;
 	memcpy((void *)ev->rm.resp_ie, resp_ie, resp_ie_len);
+	ev->rm.bss = bss;
 
 	spin_lock_irqsave(&wdev->event_lock, flags);
 	list_add_tail(&ev->list, &wdev->event_list);
 	spin_unlock_irqrestore(&wdev->event_lock, flags);
 	queue_work(cfg80211_wq, &rdev->event_work);
 }
-EXPORT_SYMBOL(cfg80211_roamed);
+EXPORT_SYMBOL(cfg80211_roamed_bss);
 
 void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
 			     size_t ie_len, u16 reason, bool from_ap)
@@ -774,6 +795,9 @@
 		wdev->connect_keys = NULL;
 	}
 
+	cfg80211_oper_and_ht_capa(&connect->ht_capa_mask,
+				  rdev->wiphy.ht_capa_mod_mask);
+
 	if (connkeys && connkeys->def >= 0) {
 		int idx;
 		u32 cipher;
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 4dde429..9aa9db6 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -7,9 +7,9 @@
 #include <linux/bitops.h>
 #include <linux/etherdevice.h>
 #include <linux/slab.h>
-#include <linux/crc32.h>
 #include <net/cfg80211.h>
 #include <net/ip.h>
+#include <net/dsfield.h>
 #include "core.h"
 
 struct ieee80211_rate *
@@ -240,17 +240,6 @@
 	return 0;
 }
 
-/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */
-/* Ethernet-II snap header (RFC1042 for most EtherTypes) */
-const unsigned char rfc1042_header[] __aligned(2) =
-	{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
-EXPORT_SYMBOL(rfc1042_header);
-
-/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
-const unsigned char bridge_tunnel_header[] __aligned(2) =
-	{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
-EXPORT_SYMBOL(bridge_tunnel_header);
-
 unsigned int __attribute_const__ ieee80211_hdrlen(__le16 fc)
 {
 	unsigned int hdrlen = 24;
@@ -662,7 +651,10 @@
 
 	switch (skb->protocol) {
 	case htons(ETH_P_IP):
-		dscp = ip_hdr(skb)->tos & 0xfc;
+		dscp = ipv4_get_dsfield(ip_hdr(skb)) & 0xfc;
+		break;
+	case htons(ETH_P_IPV6):
+		dscp = ipv6_get_dsfield(ipv6_hdr(skb)) & 0xfc;
 		break;
 	default:
 		return 0;
@@ -752,9 +744,9 @@
 				NULL);
 			break;
 		case EVENT_ROAMED:
-			__cfg80211_roamed(wdev, ev->rm.channel, ev->rm.bssid,
-					  ev->rm.req_ie, ev->rm.req_ie_len,
-					  ev->rm.resp_ie, ev->rm.resp_ie_len);
+			__cfg80211_roamed(wdev, ev->rm.bss, ev->rm.req_ie,
+					  ev->rm.req_ie_len, ev->rm.resp_ie,
+					  ev->rm.resp_ie_len);
 			break;
 		case EVENT_DISCONNECTED:
 			__cfg80211_disconnected(wdev->netdev,
@@ -1051,169 +1043,13 @@
 	return 0;
 }
 
-u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
-			       struct ieee802_11_elems *elems,
-			       u64 filter, u32 crc)
-{
-	size_t left = len;
-	u8 *pos = start;
-	bool calc_crc = filter != 0;
+/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */
+/* Ethernet-II snap header (RFC1042 for most EtherTypes) */
+const unsigned char rfc1042_header[] __aligned(2) =
+	{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
+EXPORT_SYMBOL(rfc1042_header);
 
-	memset(elems, 0, sizeof(*elems));
-	elems->ie_start = start;
-	elems->total_len = len;
-
-	while (left >= 2) {
-		u8 id, elen;
-
-		id = *pos++;
-		elen = *pos++;
-		left -= 2;
-
-		if (elen > left)
-			break;
-
-		if (calc_crc && id < 64 && (filter & (1ULL << id)))
-			crc = crc32_be(crc, pos - 2, elen + 2);
-
-		switch (id) {
-		case WLAN_EID_SSID:
-			elems->ssid = pos;
-			elems->ssid_len = elen;
-			break;
-		case WLAN_EID_SUPP_RATES:
-			elems->supp_rates = pos;
-			elems->supp_rates_len = elen;
-			break;
-		case WLAN_EID_FH_PARAMS:
-			elems->fh_params = pos;
-			elems->fh_params_len = elen;
-			break;
-		case WLAN_EID_DS_PARAMS:
-			elems->ds_params = pos;
-			elems->ds_params_len = elen;
-			break;
-		case WLAN_EID_CF_PARAMS:
-			elems->cf_params = pos;
-			elems->cf_params_len = elen;
-			break;
-		case WLAN_EID_TIM:
-			if (elen >= sizeof(struct ieee80211_tim_ie)) {
-				elems->tim = (void *)pos;
-				elems->tim_len = elen;
-			}
-			break;
-		case WLAN_EID_IBSS_PARAMS:
-			elems->ibss_params = pos;
-			elems->ibss_params_len = elen;
-			break;
-		case WLAN_EID_CHALLENGE:
-			elems->challenge = pos;
-			elems->challenge_len = elen;
-			break;
-		case WLAN_EID_VENDOR_SPECIFIC:
-			if (elen >= 4 && pos[0] == 0x00 && pos[1] == 0x50 &&
-			    pos[2] == 0xf2) {
-				/* Microsoft OUI (00:50:F2) */
-
-				if (calc_crc)
-					crc = crc32_be(crc, pos - 2, elen + 2);
-
-				if (pos[3] == 1) {
-					/* OUI Type 1 - WPA IE */
-					elems->wpa = pos;
-					elems->wpa_len = elen;
-				} else if (elen >= 5 && pos[3] == 2) {
-					/* OUI Type 2 - WMM IE */
-					if (pos[4] == 0) {
-						elems->wmm_info = pos;
-						elems->wmm_info_len = elen;
-					} else if (pos[4] == 1) {
-						elems->wmm_param = pos;
-						elems->wmm_param_len = elen;
-					}
-				}
-			}
-			break;
-		case WLAN_EID_RSN:
-			elems->rsn = pos;
-			elems->rsn_len = elen;
-			break;
-		case WLAN_EID_ERP_INFO:
-			elems->erp_info = pos;
-			elems->erp_info_len = elen;
-			break;
-		case WLAN_EID_EXT_SUPP_RATES:
-			elems->ext_supp_rates = pos;
-			elems->ext_supp_rates_len = elen;
-			break;
-		case WLAN_EID_HT_CAPABILITY:
-			if (elen >= sizeof(struct ieee80211_ht_cap))
-				elems->ht_cap_elem = (void *)pos;
-			break;
-		case WLAN_EID_HT_INFORMATION:
-			if (elen >= sizeof(struct ieee80211_ht_info))
-				elems->ht_info_elem = (void *)pos;
-			break;
-		case WLAN_EID_MESH_ID:
-			elems->mesh_id = pos;
-			elems->mesh_id_len = elen;
-			break;
-		case WLAN_EID_MESH_CONFIG:
-			if (elen >= sizeof(struct ieee80211_meshconf_ie))
-				elems->mesh_config = (void *)pos;
-			break;
-		case WLAN_EID_PEER_MGMT:
-			elems->peering = pos;
-			elems->peering_len = elen;
-			break;
-		case WLAN_EID_PREQ:
-			elems->preq = pos;
-			elems->preq_len = elen;
-			break;
-		case WLAN_EID_PREP:
-			elems->prep = pos;
-			elems->prep_len = elen;
-			break;
-		case WLAN_EID_PERR:
-			elems->perr = pos;
-			elems->perr_len = elen;
-			break;
-		case WLAN_EID_RANN:
-			if (elen >= sizeof(struct ieee80211_rann_ie))
-				elems->rann = (void *)pos;
-			break;
-		case WLAN_EID_CHANNEL_SWITCH:
-			elems->ch_switch_elem = pos;
-			elems->ch_switch_elem_len = elen;
-			break;
-		case WLAN_EID_QUIET:
-			if (!elems->quiet_elem) {
-				elems->quiet_elem = pos;
-				elems->quiet_elem_len = elen;
-			}
-			elems->num_of_quiet_elem++;
-			break;
-		case WLAN_EID_COUNTRY:
-			elems->country_elem = pos;
-			elems->country_elem_len = elen;
-			break;
-		case WLAN_EID_PWR_CONSTRAINT:
-			elems->pwr_constr_elem = pos;
-			elems->pwr_constr_elem_len = elen;
-			break;
-		case WLAN_EID_TIMEOUT_INTERVAL:
-			elems->timeout_int = pos;
-			elems->timeout_int_len = elen;
-			break;
-		default:
-			break;
-		}
-
-		left -= elen;
-		pos += elen;
-	}
-
-	return crc;
-}
-EXPORT_SYMBOL(ieee802_11_parse_elems_crc);
+/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
+const unsigned char bridge_tunnel_header[] __aligned(2) =
+	{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
+EXPORT_SYMBOL(bridge_tunnel_header);
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 6897436..3c24eb9 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -819,12 +819,24 @@
 				 struct iw_freq *freq, char *extra)
 {
 	struct wireless_dev *wdev = dev->ieee80211_ptr;
+	struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+	struct ieee80211_channel *chan;
 
 	switch (wdev->iftype) {
 	case NL80211_IFTYPE_STATION:
 		return cfg80211_mgd_wext_giwfreq(dev, info, freq, extra);
 	case NL80211_IFTYPE_ADHOC:
 		return cfg80211_ibss_wext_giwfreq(dev, info, freq, extra);
+	case NL80211_IFTYPE_MONITOR:
+		if (!rdev->ops->get_channel)
+			return -EINVAL;
+
+		chan = rdev->ops->get_channel(wdev->wiphy);
+		if (!chan)
+			return -EINVAL;
+		freq->m = chan->center_freq;
+		freq->e = 6;
+		return 0;
 	default:
 		if (!wdev->channel)
 			return -EINVAL;
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index 3e16c6a..a306bc6 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -232,7 +232,7 @@
 		return NOTIFY_DONE;
 
 	if (dev->type == ARPHRD_X25
-#if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE)
+#if IS_ENABLED(CONFIG_LLC)
 	 || dev->type == ARPHRD_ETHER
 #endif
 	 ) {
diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c
index fa2b418..f0ce862 100644
--- a/net/x25/x25_dev.c
+++ b/net/x25/x25_dev.c
@@ -161,7 +161,7 @@
 		*ptr = X25_IFACE_CONNECT;
 		break;
 
-#if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE)
+#if IS_ENABLED(CONFIG_LLC)
 	case ARPHRD_ETHER:
 		return;
 #endif
@@ -180,7 +180,7 @@
 	struct sk_buff *skb;
 	unsigned char *ptr;
 
-#if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE)
+#if IS_ENABLED(CONFIG_LLC)
 	if (nb->dev->type == ARPHRD_ETHER)
 		return;
 #endif
@@ -213,7 +213,7 @@
 		*dptr = X25_IFACE_DATA;
 		break;
 
-#if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE)
+#if IS_ENABLED(CONFIG_LLC)
 	case ARPHRD_ETHER:
 		kfree_skb(skb);
 		return;
diff --git a/net/x25/x25_route.c b/net/x25/x25_route.c
index 97d77c5..cf63662 100644
--- a/net/x25/x25_route.c
+++ b/net/x25/x25_route.c
@@ -134,7 +134,7 @@
 
 	if (dev &&
 	    (!(dev->flags & IFF_UP) || (dev->type != ARPHRD_X25
-#if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE)
+#if IS_ENABLED(CONFIG_LLC)
 					&& dev->type != ARPHRD_ETHER
 #endif
 					))){
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 2118d64..7661576 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -61,8 +61,8 @@
 {
 	const struct flowi4 *fl4 = &fl->u.ip4;
 
-	return  addr_match(&fl4->daddr, &sel->daddr, sel->prefixlen_d) &&
-		addr_match(&fl4->saddr, &sel->saddr, sel->prefixlen_s) &&
+	return  addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) &&
+		addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) &&
 		!((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) &&
 		!((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) &&
 		(fl4->flowi4_proto == sel->proto || !sel->proto) &&
@@ -1340,7 +1340,7 @@
 	case AF_INET:
 		dst_ops = &net->xfrm.xfrm4_dst_ops;
 		break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	case AF_INET6:
 		dst_ops = &net->xfrm.xfrm6_dst_ops;
 		break;
@@ -1499,7 +1499,7 @@
 		goto free_dst;
 
 	/* Copy neighbour for reachability confirmation */
-	dst_set_neighbour(dst0, neigh_clone(dst_get_neighbour(dst)));
+	dst_set_neighbour(dst0, neigh_clone(dst_get_neighbour_noref(dst)));
 
 	xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len);
 	xfrm_init_pmtu(dst_prev);
@@ -2276,8 +2276,6 @@
 {
 	struct dst_entry *head, *next;
 
-	flow_cache_flush();
-
 	spin_lock_bh(&xfrm_policy_sk_bundle_lock);
 	head = xfrm_policy_sk_bundles;
 	xfrm_policy_sk_bundles = NULL;
@@ -2290,6 +2288,18 @@
 	}
 }
 
+static void xfrm_garbage_collect(struct net *net)
+{
+	flow_cache_flush();
+	__xfrm_garbage_collect(net);
+}
+
+static void xfrm_garbage_collect_deferred(struct net *net)
+{
+	flow_cache_flush_deferred();
+	__xfrm_garbage_collect(net);
+}
+
 static void xfrm_init_pmtu(struct dst_entry *dst)
 {
 	do {
@@ -2422,7 +2432,7 @@
 		if (likely(dst_ops->neigh_lookup == NULL))
 			dst_ops->neigh_lookup = xfrm_neigh_lookup;
 		if (likely(afinfo->garbage_collect == NULL))
-			afinfo->garbage_collect = __xfrm_garbage_collect;
+			afinfo->garbage_collect = xfrm_garbage_collect_deferred;
 		xfrm_policy_afinfo[afinfo->family] = afinfo;
 	}
 	write_unlock_bh(&xfrm_policy_afinfo_lock);
@@ -2435,7 +2445,7 @@
 		case AF_INET:
 			xfrm_dst_ops = &net->xfrm.xfrm4_dst_ops;
 			break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 		case AF_INET6:
 			xfrm_dst_ops = &net->xfrm.xfrm6_dst_ops;
 			break;
@@ -2485,7 +2495,7 @@
 	afinfo = xfrm_policy_afinfo[AF_INET];
 	if (afinfo)
 		net->xfrm.xfrm4_dst_ops = *afinfo->dst_ops;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	afinfo = xfrm_policy_afinfo[AF_INET6];
 	if (afinfo)
 		net->xfrm.xfrm6_dst_ops = *afinfo->dst_ops;
@@ -2516,7 +2526,7 @@
 
 	switch (event) {
 	case NETDEV_DOWN:
-		__xfrm_garbage_collect(dev_net(dev));
+		xfrm_garbage_collect(dev_net(dev));
 	}
 	return NOTIFY_DONE;
 }
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 9414b9c..5b228f9 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -1035,16 +1035,12 @@
 			break;
 
 		case AF_INET6:
-			ipv6_addr_copy((struct in6_addr *)x->sel.daddr.a6,
-				       (const struct in6_addr *)daddr);
-			ipv6_addr_copy((struct in6_addr *)x->sel.saddr.a6,
-				       (const struct in6_addr *)saddr);
+			*(struct in6_addr *)x->sel.daddr.a6 = *(struct in6_addr *)daddr;
+			*(struct in6_addr *)x->sel.saddr.a6 = *(struct in6_addr *)saddr;
 			x->sel.prefixlen_d = 128;
 			x->sel.prefixlen_s = 128;
-			ipv6_addr_copy((struct in6_addr *)x->props.saddr.a6,
-				       (const struct in6_addr *)saddr);
-			ipv6_addr_copy((struct in6_addr *)x->id.daddr.a6,
-				       (const struct in6_addr *)daddr);
+			*(struct in6_addr *)x->props.saddr.a6 = *(struct in6_addr *)saddr;
+			*(struct in6_addr *)x->id.daddr.a6 = *(struct in6_addr *)daddr;
 			break;
 		}
 
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index d0a42df..e0d747a 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -28,7 +28,7 @@
 #include <net/netlink.h>
 #include <net/ah.h>
 #include <asm/uaccess.h>
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 #include <linux/in6.h>
 #endif
 
@@ -150,7 +150,7 @@
 		break;
 
 	case AF_INET6:
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 		break;
 #else
 		err = -EAFNOSUPPORT;
@@ -201,7 +201,7 @@
 			goto out;
 		break;
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	case IPPROTO_DSTOPTS:
 	case IPPROTO_ROUTING:
 		if (attrs[XFRMA_ALG_COMP]	||
@@ -1160,7 +1160,7 @@
 		break;
 
 	case AF_INET6:
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 		break;
 #else
 		return  -EAFNOSUPPORT;
@@ -1231,7 +1231,7 @@
 		switch (ut[i].family) {
 		case AF_INET:
 			break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 		case AF_INET6:
 			break;
 #endif
@@ -2604,7 +2604,7 @@
 			return NULL;
 		}
 		break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 	case AF_INET6:
 		if (opt != IPV6_XFRM_POLICY) {
 			*dir = -EOPNOTSUPP;
diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile
index ba573fe..914833d 100644
--- a/scripts/kconfig/Makefile
+++ b/scripts/kconfig/Makefile
@@ -60,8 +60,8 @@
 	    --directory=$(srctree) --directory=$(objtree)           \
 	    --output $(obj)/config.pot
 	$(Q)sed -i s/CHARSET/UTF-8/ $(obj)/config.pot
-	$(Q)ln -fs Kconfig.x86 arch/um/Kconfig
-	$(Q)(for i in `ls $(srctree)/arch/*/Kconfig`;    \
+	$(Q)(for i in `ls $(srctree)/arch/*/Kconfig      \
+	    $(srctree)/arch/*/um/Kconfig`;               \
 	    do                                           \
 		echo "  GEN $$i";                        \
 		$(obj)/kxgettext $$i                     \
@@ -69,7 +69,6 @@
 	    done )
 	$(Q)msguniq --sort-by-file --to-code=UTF-8 $(obj)/config.pot \
 	    --output $(obj)/linux.pot
-	$(Q)rm -f $(srctree)/arch/um/Kconfig
 	$(Q)rm -f $(obj)/config.pot
 
 PHONY += allnoconfig allyesconfig allmodconfig alldefconfig randconfig
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index f936d1f..363ab46 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -880,6 +880,74 @@
 	return 1;
 }
 
+/*
+ * Append a match expression for a single masked hex digit.
+ * outp points to a pointer to the character at which to append.
+ *	*outp is updated on return to point just after the appended text,
+ *	to facilitate further appending.
+ */
+static void append_nibble_mask(char **outp,
+			       unsigned int nibble, unsigned int mask)
+{
+	char *p = *outp;
+	unsigned int i;
+
+	switch (mask) {
+	case 0:
+		*p++ = '?';
+		break;
+
+	case 0xf:
+		p += sprintf(p, "%X",  nibble);
+		break;
+
+	default:
+		/*
+		 * Dumbly emit a match pattern for all possible matching
+		 * digits.  This could be improved in some cases using ranges,
+		 * but it has the advantage of being trivially correct, and is
+		 * often optimal.
+		 */
+		*p++ = '[';
+		for (i = 0; i < 0x10; i++)
+			if ((i & mask) == nibble)
+				p += sprintf(p, "%X", i);
+		*p++ = ']';
+	}
+
+	/* Ensure that the string remains NUL-terminated: */
+	*p = '\0';
+
+	/* Advance the caller's end-of-string pointer: */
+	*outp = p;
+}
+
+/*
+ * looks like: "amba:dN"
+ *
+ * N is exactly 8 digits, where each is an upper-case hex digit, or
+ *	a ? or [] pattern matching exactly one digit.
+ */
+static int do_amba_entry(const char *filename,
+			 struct amba_id *id, char *alias)
+{
+	unsigned int digit;
+	char *p = alias;
+
+	if ((id->id & id->mask) != id->id)
+		fatal("%s: Masked-off bit(s) of AMBA device ID are non-zero: "
+		      "id=0x%08X, mask=0x%08X.  Please fix this driver.\n",
+		      filename, id->id, id->mask);
+
+	p += sprintf(alias, "amba:d");
+	for (digit = 0; digit < 8; digit++)
+		append_nibble_mask(&p,
+				   (id->id >> (4 * (7 - digit))) & 0xf,
+				   (id->mask >> (4 * (7 - digit))) & 0xf);
+
+	return 1;
+}
+
 /* Ignore any prefix, eg. some architectures prepend _ */
 static inline int sym_is(const char *symbol, const char *name)
 {
@@ -1047,6 +1115,10 @@
 		do_table(symval, sym->st_size,
 			sizeof(struct isapnp_device_id), "isa",
 			do_isapnp_entry, mod);
+	else if (sym_is(symname, "__mod_amba_device_table"))
+		do_table(symval, sym->st_size,
+			sizeof(struct amba_id), "amba",
+			do_amba_entry, mod);
 	free(zeros);
 }
 
diff --git a/scripts/package/Makefile b/scripts/package/Makefile
index bc6aa00..87bf080 100644
--- a/scripts/package/Makefile
+++ b/scripts/package/Makefile
@@ -141,7 +141,7 @@
 help: FORCE
 	@echo '  rpm-pkg             - Build both source and binary RPM kernel packages'
 	@echo '  binrpm-pkg          - Build only the binary kernel package'
-	@echo '  deb-pkg             - Build the kernel as an deb package'
+	@echo '  deb-pkg             - Build the kernel as a deb package'
 	@echo '  tar-pkg             - Build the kernel as an uncompressed tarball'
 	@echo '  targz-pkg           - Build the kernel as a gzip compressed tarball'
 	@echo '  tarbz2-pkg          - Build the kernel as a bzip2 compressed tarball'
diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
index 69ddb47..e39df6d 100644
--- a/security/apparmor/apparmorfs.c
+++ b/security/apparmor/apparmorfs.c
@@ -165,7 +165,7 @@
  *
  * Used aafs_remove to remove entries created with this fn.
  */
-static int __init aafs_create(const char *name, int mask,
+static int __init aafs_create(const char *name, umode_t mask,
 			      const struct file_operations *fops)
 {
 	struct dentry *dentry;
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index 3783202..2c0a0ff 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -262,7 +262,7 @@
 }
 
 static int apparmor_path_mkdir(struct path *dir, struct dentry *dentry,
-			       int mode)
+			       umode_t mode)
 {
 	return common_perm_create(OP_MKDIR, dir, dentry, AA_MAY_CREATE,
 				  S_IFDIR);
@@ -274,7 +274,7 @@
 }
 
 static int apparmor_path_mknod(struct path *dir, struct dentry *dentry,
-			       int mode, unsigned int dev)
+			       umode_t mode, unsigned int dev)
 {
 	return common_perm_create(OP_MKNOD, dir, dentry, AA_MAY_CREATE, mode);
 }
@@ -344,13 +344,12 @@
 	return error;
 }
 
-static int apparmor_path_chmod(struct dentry *dentry, struct vfsmount *mnt,
-			       mode_t mode)
+static int apparmor_path_chmod(struct path *path, umode_t mode)
 {
-	if (!mediated_filesystem(dentry->d_inode))
+	if (!mediated_filesystem(path->dentry->d_inode))
 		return 0;
 
-	return common_perm_mnt_dentry(OP_CHMOD, mnt, dentry, AA_MAY_CHMOD);
+	return common_perm_mnt_dentry(OP_CHMOD, path->mnt, path->dentry, AA_MAY_CHMOD);
 }
 
 static int apparmor_path_chown(struct path *path, uid_t uid, gid_t gid)
diff --git a/security/apparmor/path.c b/security/apparmor/path.c
index b566eba..9d070a7 100644
--- a/security/apparmor/path.c
+++ b/security/apparmor/path.c
@@ -13,7 +13,6 @@
  */
 
 #include <linux/magic.h>
-#include <linux/mnt_namespace.h>
 #include <linux/mount.h>
 #include <linux/namei.h>
 #include <linux/nsproxy.h>
diff --git a/security/capability.c b/security/capability.c
index 2984ea4..3b5883b 100644
--- a/security/capability.c
+++ b/security/capability.c
@@ -125,7 +125,7 @@
 }
 
 static int cap_inode_create(struct inode *inode, struct dentry *dentry,
-			    int mask)
+			    umode_t mask)
 {
 	return 0;
 }
@@ -148,7 +148,7 @@
 }
 
 static int cap_inode_mkdir(struct inode *inode, struct dentry *dentry,
-			   int mask)
+			   umode_t mask)
 {
 	return 0;
 }
@@ -159,7 +159,7 @@
 }
 
 static int cap_inode_mknod(struct inode *inode, struct dentry *dentry,
-			   int mode, dev_t dev)
+			   umode_t mode, dev_t dev)
 {
 	return 0;
 }
@@ -235,13 +235,13 @@
 }
 
 #ifdef CONFIG_SECURITY_PATH
-static int cap_path_mknod(struct path *dir, struct dentry *dentry, int mode,
+static int cap_path_mknod(struct path *dir, struct dentry *dentry, umode_t mode,
 			  unsigned int dev)
 {
 	return 0;
 }
 
-static int cap_path_mkdir(struct path *dir, struct dentry *dentry, int mode)
+static int cap_path_mkdir(struct path *dir, struct dentry *dentry, umode_t mode)
 {
 	return 0;
 }
@@ -279,8 +279,7 @@
 	return 0;
 }
 
-static int cap_path_chmod(struct dentry *dentry, struct vfsmount *mnt,
-			  mode_t mode)
+static int cap_path_chmod(struct path *path, umode_t mode)
 {
 	return 0;
 }
diff --git a/security/inode.c b/security/inode.c
index c4df2fb..90a70a6 100644
--- a/security/inode.c
+++ b/security/inode.c
@@ -56,7 +56,7 @@
 	.llseek =	noop_llseek,
 };
 
-static struct inode *get_inode(struct super_block *sb, int mode, dev_t dev)
+static struct inode *get_inode(struct super_block *sb, umode_t mode, dev_t dev)
 {
 	struct inode *inode = new_inode(sb);
 
@@ -85,7 +85,7 @@
 
 /* SMP-safe */
 static int mknod(struct inode *dir, struct dentry *dentry,
-			 int mode, dev_t dev)
+			 umode_t mode, dev_t dev)
 {
 	struct inode *inode;
 	int error = -ENOMEM;
@@ -102,7 +102,7 @@
 	return error;
 }
 
-static int mkdir(struct inode *dir, struct dentry *dentry, int mode)
+static int mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 {
 	int res;
 
@@ -113,7 +113,7 @@
 	return res;
 }
 
-static int create(struct inode *dir, struct dentry *dentry, int mode)
+static int create(struct inode *dir, struct dentry *dentry, umode_t mode)
 {
 	mode = (mode & S_IALLUGO) | S_IFREG;
 	return mknod(dir, dentry, mode, 0);
@@ -145,7 +145,7 @@
 	.kill_sb =	kill_litter_super,
 };
 
-static int create_by_name(const char *name, mode_t mode,
+static int create_by_name(const char *name, umode_t mode,
 			  struct dentry *parent,
 			  struct dentry **dentry)
 {
@@ -159,12 +159,12 @@
 	 * have around.
 	 */
 	if (!parent)
-		parent = mount->mnt_sb->s_root;
+		parent = mount->mnt_root;
 
 	mutex_lock(&parent->d_inode->i_mutex);
 	*dentry = lookup_one_len(name, parent, strlen(name));
 	if (!IS_ERR(*dentry)) {
-		if ((mode & S_IFMT) == S_IFDIR)
+		if (S_ISDIR(mode))
 			error = mkdir(parent->d_inode, *dentry, mode);
 		else
 			error = create(parent->d_inode, *dentry, mode);
@@ -205,7 +205,7 @@
  * If securityfs is not enabled in the kernel, the value %-ENODEV is
  * returned.
  */
-struct dentry *securityfs_create_file(const char *name, mode_t mode,
+struct dentry *securityfs_create_file(const char *name, umode_t mode,
 				   struct dentry *parent, void *data,
 				   const struct file_operations *fops)
 {
diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c
index 5dd5b140..8738def 100644
--- a/security/integrity/evm/evm_crypto.c
+++ b/security/integrity/evm/evm_crypto.c
@@ -27,20 +27,35 @@
 
 struct crypto_shash *hmac_tfm;
 
+static DEFINE_MUTEX(mutex);
+
 static struct shash_desc *init_desc(void)
 {
 	int rc;
 	struct shash_desc *desc;
 
 	if (hmac_tfm == NULL) {
+		mutex_lock(&mutex);
+		if (hmac_tfm)
+			goto out;
 		hmac_tfm = crypto_alloc_shash(evm_hmac, 0, CRYPTO_ALG_ASYNC);
 		if (IS_ERR(hmac_tfm)) {
 			pr_err("Can not allocate %s (reason: %ld)\n",
 			       evm_hmac, PTR_ERR(hmac_tfm));
 			rc = PTR_ERR(hmac_tfm);
 			hmac_tfm = NULL;
+			mutex_unlock(&mutex);
 			return ERR_PTR(rc);
 		}
+		rc = crypto_shash_setkey(hmac_tfm, evmkey, evmkey_len);
+		if (rc) {
+			crypto_free_shash(hmac_tfm);
+			hmac_tfm = NULL;
+			mutex_unlock(&mutex);
+			return ERR_PTR(rc);
+		}
+out:
+		mutex_unlock(&mutex);
 	}
 
 	desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(hmac_tfm),
@@ -51,11 +66,7 @@
 	desc->tfm = hmac_tfm;
 	desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
 
-	rc = crypto_shash_setkey(hmac_tfm, evmkey, evmkey_len);
-	if (rc)
-		goto out;
 	rc = crypto_shash_init(desc);
-out:
 	if (rc) {
 		kfree(desc);
 		return ERR_PTR(rc);
diff --git a/security/lsm_audit.c b/security/lsm_audit.c
index 893af8a..7bd6f13 100644
--- a/security/lsm_audit.c
+++ b/security/lsm_audit.c
@@ -114,19 +114,20 @@
 	int offset, ret = 0;
 	struct ipv6hdr *ip6;
 	u8 nexthdr;
+	__be16 frag_off;
 
 	ip6 = ipv6_hdr(skb);
 	if (ip6 == NULL)
 		return -EINVAL;
-	ipv6_addr_copy(&ad->u.net.v6info.saddr, &ip6->saddr);
-	ipv6_addr_copy(&ad->u.net.v6info.daddr, &ip6->daddr);
+	ad->u.net.v6info.saddr = ip6->saddr;
+	ad->u.net.v6info.daddr = ip6->daddr;
 	ret = 0;
 	/* IPv6 can have several extension header before the Transport header
 	 * skip them */
 	offset = skb_network_offset(skb);
 	offset += sizeof(*ip6);
 	nexthdr = ip6->nexthdr;
-	offset = ipv6_skip_exthdr(skb, offset, &nexthdr);
+	offset = ipv6_skip_exthdr(skb, offset, &nexthdr, &frag_off);
 	if (offset < 0)
 		return 0;
 	if (proto)
diff --git a/security/security.c b/security/security.c
index 0c6cc69..214502c 100644
--- a/security/security.c
+++ b/security/security.c
@@ -381,14 +381,14 @@
 				     void **value, size_t *len)
 {
 	if (unlikely(IS_PRIVATE(inode)))
-		return 0;
+		return -EOPNOTSUPP;
 	return security_ops->inode_init_security(inode, dir, qstr, name, value,
 						 len);
 }
 EXPORT_SYMBOL(security_old_inode_init_security);
 
 #ifdef CONFIG_SECURITY_PATH
-int security_path_mknod(struct path *dir, struct dentry *dentry, int mode,
+int security_path_mknod(struct path *dir, struct dentry *dentry, umode_t mode,
 			unsigned int dev)
 {
 	if (unlikely(IS_PRIVATE(dir->dentry->d_inode)))
@@ -397,7 +397,7 @@
 }
 EXPORT_SYMBOL(security_path_mknod);
 
-int security_path_mkdir(struct path *dir, struct dentry *dentry, int mode)
+int security_path_mkdir(struct path *dir, struct dentry *dentry, umode_t mode)
 {
 	if (unlikely(IS_PRIVATE(dir->dentry->d_inode)))
 		return 0;
@@ -454,12 +454,11 @@
 	return security_ops->path_truncate(path);
 }
 
-int security_path_chmod(struct dentry *dentry, struct vfsmount *mnt,
-			mode_t mode)
+int security_path_chmod(struct path *path, umode_t mode)
 {
-	if (unlikely(IS_PRIVATE(dentry->d_inode)))
+	if (unlikely(IS_PRIVATE(path->dentry->d_inode)))
 		return 0;
-	return security_ops->path_chmod(dentry, mnt, mode);
+	return security_ops->path_chmod(path, mode);
 }
 
 int security_path_chown(struct path *path, uid_t uid, gid_t gid)
@@ -475,7 +474,7 @@
 }
 #endif
 
-int security_inode_create(struct inode *dir, struct dentry *dentry, int mode)
+int security_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode)
 {
 	if (unlikely(IS_PRIVATE(dir)))
 		return 0;
@@ -506,7 +505,7 @@
 	return security_ops->inode_symlink(dir, dentry, old_name);
 }
 
-int security_inode_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+int security_inode_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 {
 	if (unlikely(IS_PRIVATE(dir)))
 		return 0;
@@ -521,7 +520,7 @@
 	return security_ops->inode_rmdir(dir, dentry);
 }
 
-int security_inode_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
+int security_inode_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
 {
 	if (unlikely(IS_PRIVATE(dir)))
 		return 0;
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 1126c10..7cd4c3a 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -1090,7 +1090,7 @@
 			return SECCLASS_NETLINK_ROUTE_SOCKET;
 		case NETLINK_FIREWALL:
 			return SECCLASS_NETLINK_FIREWALL_SOCKET;
-		case NETLINK_INET_DIAG:
+		case NETLINK_SOCK_DIAG:
 			return SECCLASS_NETLINK_TCPDIAG_SOCKET;
 		case NETLINK_NFLOG:
 			return SECCLASS_NETLINK_NFLOG_SOCKET;
@@ -1740,7 +1740,7 @@
 {
 	u32 av = 0;
 
-	if ((mode & S_IFMT) != S_IFDIR) {
+	if (!S_ISDIR(mode)) {
 		if (mask & MAY_EXEC)
 			av |= FILE__EXECUTE;
 		if (mask & MAY_READ)
@@ -2507,7 +2507,7 @@
 	const struct cred *cred = current_cred();
 
 	if (flags & MS_REMOUNT)
-		return superblock_has_perm(cred, path->mnt->mnt_sb,
+		return superblock_has_perm(cred, path->dentry->d_sb,
 					   FILESYSTEM__REMOUNT, NULL);
 	else
 		return path_has_perm(cred, path, FILE__MOUNTON);
@@ -2598,7 +2598,7 @@
 	return 0;
 }
 
-static int selinux_inode_create(struct inode *dir, struct dentry *dentry, int mask)
+static int selinux_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode)
 {
 	return may_create(dir, dentry, SECCLASS_FILE);
 }
@@ -2618,7 +2618,7 @@
 	return may_create(dir, dentry, SECCLASS_LNK_FILE);
 }
 
-static int selinux_inode_mkdir(struct inode *dir, struct dentry *dentry, int mask)
+static int selinux_inode_mkdir(struct inode *dir, struct dentry *dentry, umode_t mask)
 {
 	return may_create(dir, dentry, SECCLASS_DIR);
 }
@@ -2628,7 +2628,7 @@
 	return may_link(dir, dentry, MAY_RMDIR);
 }
 
-static int selinux_inode_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
+static int selinux_inode_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
 {
 	return may_create(dir, dentry, inode_mode_to_security_class(mode));
 }
@@ -3561,19 +3561,20 @@
 	u8 nexthdr;
 	int ret = -EINVAL, offset;
 	struct ipv6hdr _ipv6h, *ip6;
+	__be16 frag_off;
 
 	offset = skb_network_offset(skb);
 	ip6 = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h);
 	if (ip6 == NULL)
 		goto out;
 
-	ipv6_addr_copy(&ad->u.net.v6info.saddr, &ip6->saddr);
-	ipv6_addr_copy(&ad->u.net.v6info.daddr, &ip6->daddr);
+	ad->u.net.v6info.saddr = ip6->saddr;
+	ad->u.net.v6info.daddr = ip6->daddr;
 	ret = 0;
 
 	nexthdr = ip6->nexthdr;
 	offset += sizeof(_ipv6h);
-	offset = ipv6_skip_exthdr(skb, offset, &nexthdr);
+	offset = ipv6_skip_exthdr(skb, offset, &nexthdr, &frag_off);
 	if (offset < 0)
 		goto out;
 
@@ -3871,7 +3872,7 @@
 		if (family == PF_INET)
 			ad.u.net.v4info.saddr = addr4->sin_addr.s_addr;
 		else
-			ipv6_addr_copy(&ad.u.net.v6info.saddr, &addr6->sin6_addr);
+			ad.u.net.v6info.saddr = addr6->sin6_addr;
 
 		err = avc_has_perm(sksec->sid, sid,
 				   sksec->sclass, node_perm, &ad);
diff --git a/security/selinux/netnode.c b/security/selinux/netnode.c
index 3bf46ab..8636585 100644
--- a/security/selinux/netnode.c
+++ b/security/selinux/netnode.c
@@ -220,7 +220,7 @@
 	case PF_INET6:
 		ret = security_node_sid(PF_INET6,
 					addr, sizeof(struct in6_addr), sid);
-		ipv6_addr_copy(&new->nsec.addr.ipv6, addr);
+		new->nsec.addr.ipv6 = *(struct in6_addr *)addr;
 		break;
 	default:
 		BUG();
diff --git a/security/selinux/netport.c b/security/selinux/netport.c
index 0b62bd1..7b9eb1f 100644
--- a/security/selinux/netport.c
+++ b/security/selinux/netport.c
@@ -123,7 +123,9 @@
 	if (sel_netport_hash[idx].size == SEL_NETPORT_HASH_BKT_LIMIT) {
 		struct sel_netport *tail;
 		tail = list_entry(
-			rcu_dereference(sel_netport_hash[idx].list.prev),
+			rcu_dereference_protected(
+				sel_netport_hash[idx].list.prev,
+				lockdep_is_held(&sel_netport_lock)),
 			struct sel_netport, list);
 		list_del_rcu(&tail->list);
 		kfree_rcu(tail, rcu);
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 7db62b4..e8af5b0b 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -406,7 +406,7 @@
 static int smack_sb_mount(char *dev_name, struct path *path,
 			  char *type, unsigned long flags, void *data)
 {
-	struct superblock_smack *sbp = path->mnt->mnt_sb->s_security;
+	struct superblock_smack *sbp = path->dentry->d_sb->s_security;
 	struct smk_audit_info ad;
 
 	smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
@@ -435,7 +435,7 @@
 	smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
 	smk_ad_setfield_u_fs_path(&ad, path);
 
-	sbp = mnt->mnt_sb->s_security;
+	sbp = path.dentry->d_sb->s_security;
 	return smk_curacc(sbp->smk_floor, MAY_WRITE, &ad);
 }
 
diff --git a/security/tomoyo/audit.c b/security/tomoyo/audit.c
index 075c3a6..5ca47ea 100644
--- a/security/tomoyo/audit.c
+++ b/security/tomoyo/audit.c
@@ -112,7 +112,7 @@
  *
  * Returns file type string.
  */
-static inline const char *tomoyo_filetype(const mode_t mode)
+static inline const char *tomoyo_filetype(const umode_t mode)
 {
 	switch (mode & S_IFMT) {
 	case S_IFREG:
@@ -180,7 +180,7 @@
 	for (i = 0; i < TOMOYO_MAX_PATH_STAT; i++) {
 		struct tomoyo_mini_stat *stat;
 		unsigned int dev;
-		mode_t mode;
+		umode_t mode;
 		if (!obj->stat_valid[i])
 			continue;
 		stat = &obj->stat[i];
diff --git a/security/tomoyo/common.h b/security/tomoyo/common.h
index ed311d7..deeab7b 100644
--- a/security/tomoyo/common.h
+++ b/security/tomoyo/common.h
@@ -564,7 +564,7 @@
 	uid_t uid;
 	gid_t gid;
 	ino_t ino;
-	mode_t mode;
+	umode_t mode;
 	dev_t dev;
 	dev_t rdev;
 };
diff --git a/security/tomoyo/realpath.c b/security/tomoyo/realpath.c
index d9f3ced..80a09c3 100644
--- a/security/tomoyo/realpath.c
+++ b/security/tomoyo/realpath.c
@@ -4,15 +4,8 @@
  * Copyright (C) 2005-2011  NTT DATA CORPORATION
  */
 
-#include <linux/types.h>
-#include <linux/mount.h>
-#include <linux/mnt_namespace.h>
-#include <linux/fs_struct.h>
-#include <linux/magic.h>
-#include <linux/slab.h>
-#include <net/sock.h>
 #include "common.h"
-#include "../../fs/internal.h"
+#include <linux/magic.h>
 
 /**
  * tomoyo_encode2 - Encode binary string to ascii string.
diff --git a/security/tomoyo/securityfs_if.c b/security/tomoyo/securityfs_if.c
index 2672ac4..482b2a5 100644
--- a/security/tomoyo/securityfs_if.c
+++ b/security/tomoyo/securityfs_if.c
@@ -224,7 +224,7 @@
  *
  * Returns nothing.
  */
-static void __init tomoyo_create_entry(const char *name, const mode_t mode,
+static void __init tomoyo_create_entry(const char *name, const umode_t mode,
 				       struct dentry *parent, const u8 key)
 {
 	securityfs_create_file(name, mode, parent, ((u8 *) NULL) + key,
diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
index 4b327b6..620d37c 100644
--- a/security/tomoyo/tomoyo.c
+++ b/security/tomoyo/tomoyo.c
@@ -186,7 +186,7 @@
  * Returns 0 on success, negative value otherwise.
  */
 static int tomoyo_path_mkdir(struct path *parent, struct dentry *dentry,
-			     int mode)
+			     umode_t mode)
 {
 	struct path path = { parent->mnt, dentry };
 	return tomoyo_path_number_perm(TOMOYO_TYPE_MKDIR, &path,
@@ -234,7 +234,7 @@
  * Returns 0 on success, negative value otherwise.
  */
 static int tomoyo_path_mknod(struct path *parent, struct dentry *dentry,
-			     int mode, unsigned int dev)
+			     umode_t mode, unsigned int dev)
 {
 	struct path path = { parent->mnt, dentry };
 	int type = TOMOYO_TYPE_CREATE;
@@ -353,17 +353,14 @@
 /**
  * tomoyo_path_chmod - Target for security_path_chmod().
  *
- * @dentry: Pointer to "struct dentry".
- * @mnt:    Pointer to "struct vfsmount".
- * @mode:   DAC permission mode.
+ * @path: Pointer to "struct path".
+ * @mode: DAC permission mode.
  *
  * Returns 0 on success, negative value otherwise.
  */
-static int tomoyo_path_chmod(struct dentry *dentry, struct vfsmount *mnt,
-			     mode_t mode)
+static int tomoyo_path_chmod(struct path *path, umode_t mode)
 {
-	struct path path = { mnt, dentry };
-	return tomoyo_path_number_perm(TOMOYO_TYPE_CHMOD, &path,
+	return tomoyo_path_number_perm(TOMOYO_TYPE_CHMOD, path,
 				       mode & S_IALLUGO);
 }
 
diff --git a/sound/aoa/codecs/Kconfig b/sound/aoa/codecs/Kconfig
index 808eb11..0c68e32 100644
--- a/sound/aoa/codecs/Kconfig
+++ b/sound/aoa/codecs/Kconfig
@@ -7,14 +7,6 @@
 	codec chip found in the latest Apple machines
 	(most of those with digital audio output).
 
-#config SND_AOA_TOPAZ
-#	tristate "support Topaz chips"
-#	---help---
-#	This option enables support for the Topaz (CS84xx)
-#	codec chips found in the latest Apple machines,
-#	these chips do the digital input and output on
-#	some PowerMacs.
-
 config SND_AOA_TAS
 	tristate "support TAS chips"
 	select I2C
diff --git a/sound/arm/aaci.c b/sound/arm/aaci.c
index e518d38..b37b702a 100644
--- a/sound/arm/aaci.c
+++ b/sound/arm/aaci.c
@@ -1097,6 +1097,8 @@
 	{ 0, 0 },
 };
 
+MODULE_DEVICE_TABLE(amba, aaci_ids);
+
 static struct amba_driver aaci_driver = {
 	.drv		= {
 		.name	= DRIVER_NAME,
diff --git a/sound/atmel/ac97c.c b/sound/atmel/ac97c.c
index 6e5adde..73516f6 100644
--- a/sound/atmel/ac97c.c
+++ b/sound/atmel/ac97c.c
@@ -899,6 +899,10 @@
 		/* AC97 v2.2 specifications says minimum 1 us. */
 		udelay(2);
 		gpio_set_value(chip->reset_pin, 1);
+	} else {
+		ac97c_writel(chip, MR, AC97C_MR_WRST | AC97C_MR_ENA);
+		udelay(2);
+		ac97c_writel(chip, MR, AC97C_MR_ENA);
 	}
 }
 
diff --git a/sound/core/Kconfig b/sound/core/Kconfig
index 475455c..c15682a 100644
--- a/sound/core/Kconfig
+++ b/sound/core/Kconfig
@@ -5,7 +5,6 @@
 config SND_PCM
 	tristate
 	select SND_TIMER
-	select GCD
 
 config SND_HWDEP
 	tristate
diff --git a/sound/oss/Kconfig b/sound/oss/Kconfig
index 6c9e8e8..5849b12 100644
--- a/sound/oss/Kconfig
+++ b/sound/oss/Kconfig
@@ -521,7 +521,7 @@
 
 config SOUND_VIDC
 	tristate "VIDC 16-bit sound"
-	depends on ARM && (ARCH_ACORN || ARCH_CLPS7500)
+	depends on ARM && ARCH_ACORN
 	help
 	  16-bit support for the VIDC onboard sound hardware found on Acorn
 	  machines.
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 7d98240..c2f79e6 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2507,6 +2507,7 @@
 	SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB),
 	SND_PCI_QUIRK(0x1043, 0x81b3, "ASUS", POS_FIX_LPIB),
 	SND_PCI_QUIRK(0x1043, 0x81e7, "ASUS M2V", POS_FIX_LPIB),
+	SND_PCI_QUIRK(0x1043, 0x83ce, "ASUS 1101HA", POS_FIX_LPIB),
 	SND_PCI_QUIRK(0x104d, 0x9069, "Sony VPCS11V9E", POS_FIX_LPIB),
 	SND_PCI_QUIRK(0x1297, 0x3166, "Shuttle", POS_FIX_LPIB),
 	SND_PCI_QUIRK(0x1458, 0xa022, "ga-ma770-ud3", POS_FIX_LPIB),
@@ -2970,7 +2971,8 @@
 	/* SCH */
 	{ PCI_DEVICE(0x8086, 0x811b),
 	  .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_SCH_SNOOP |
-	  AZX_DCAPS_BUFSIZE},
+	  AZX_DCAPS_BUFSIZE | AZX_DCAPS_POSFIX_LPIB }, /* Poulsbo */
+	/* ICH */
 	{ PCI_DEVICE(0x8086, 0x2668),
 	  .driver_data = AZX_DRIVER_ICH | AZX_DCAPS_OLD_SSYNC |
 	  AZX_DCAPS_BUFSIZE },  /* ICH6 */
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index eeb25d52..616678f 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -4929,6 +4929,12 @@
 				set_hp_led_gpio(codec);
 				return 1;
 			}
+			/* BIOS bug: unfilled OEM string */
+			if (strstr(dev->name, "HP_Mute_LED_P_G")) {
+				set_hp_led_gpio(codec);
+				spec->gpio_led_polarity = 1;
+				return 1;
+			}
 		}
 
 		/*
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 4584514..fa787d4 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -33,7 +33,7 @@
 	select SND_SOC_CX20442
 	select SND_SOC_DA7210 if I2C
 	select SND_SOC_DFBMCS320
-	select SND_SOC_JZ4740_CODEC if SOC_JZ4740
+	select SND_SOC_JZ4740_CODEC
 	select SND_SOC_LM4857 if I2C
 	select SND_SOC_MAX98088 if I2C
 	select SND_SOC_MAX98095 if I2C
diff --git a/sound/soc/codecs/jz4740.c b/sound/soc/codecs/jz4740.c
index e373f8f..3e1f4e1 100644
--- a/sound/soc/codecs/jz4740.c
+++ b/sound/soc/codecs/jz4740.c
@@ -15,6 +15,7 @@
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
+#include <linux/io.h>
 
 #include <linux/delay.h>
 
diff --git a/sound/soc/codecs/wm8776.c b/sound/soc/codecs/wm8776.c
index bfdc523..d3b0a20 100644
--- a/sound/soc/codecs/wm8776.c
+++ b/sound/soc/codecs/wm8776.c
@@ -235,6 +235,7 @@
 	switch (snd_pcm_format_width(params_format(params))) {
 	case 16:
 		iface = 0;
+		break;
 	case 20:
 		iface = 0x10;
 		break;
diff --git a/sound/soc/codecs/wm8958-dsp2.c b/sound/soc/codecs/wm8958-dsp2.c
index 0293763..5a14d5c 100644
--- a/sound/soc/codecs/wm8958-dsp2.c
+++ b/sound/soc/codecs/wm8958-dsp2.c
@@ -60,6 +60,8 @@
 	}
 
 	if (memcmp(fw->data, "WMFW", 4) != 0) {
+		memcpy(&data32, fw->data, sizeof(data32));
+		data32 = be32_to_cpu(data32);
 		dev_err(codec->dev, "%s: firmware has bad file magic %08x\n",
 			name, data32);
 		goto err;
diff --git a/sound/soc/codecs/wm8996.c b/sound/soc/codecs/wm8996.c
index 645c980..a33b04d 100644
--- a/sound/soc/codecs/wm8996.c
+++ b/sound/soc/codecs/wm8996.c
@@ -1968,6 +1968,7 @@
 		break;
 	case 24576000:
 		ratediv = WM8996_SYSCLK_DIV;
+		wm8996->sysclk /= 2;
 	case 12288000:
 		snd_soc_update_bits(codec, WM8996_AIF_RATE,
 				    WM8996_SYSCLK_RATE, WM8996_SYSCLK_RATE);
diff --git a/sound/soc/mxs/mxs-pcm.c b/sound/soc/mxs/mxs-pcm.c
index dea5aa4..f39d7dd 100644
--- a/sound/soc/mxs/mxs-pcm.c
+++ b/sound/soc/mxs/mxs-pcm.c
@@ -357,3 +357,6 @@
 	platform_driver_unregister(&mxs_pcm_driver);
 }
 module_exit(snd_mxs_pcm_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mxs-pcm-audio");
diff --git a/sound/soc/mxs/mxs-sgtl5000.c b/sound/soc/mxs/mxs-sgtl5000.c
index 7fbeaec..1c57f66 100644
--- a/sound/soc/mxs/mxs-sgtl5000.c
+++ b/sound/soc/mxs/mxs-sgtl5000.c
@@ -171,3 +171,4 @@
 MODULE_AUTHOR("Freescale Semiconductor, Inc.");
 MODULE_DESCRIPTION("MXS ALSA SoC Machine driver");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mxs-sgtl5000");
diff --git a/sound/soc/pxa/hx4700.c b/sound/soc/pxa/hx4700.c
index 65c1248..c664e33 100644
--- a/sound/soc/pxa/hx4700.c
+++ b/sound/soc/pxa/hx4700.c
@@ -209,9 +209,10 @@
 	snd_soc_card_hx4700.dev = &pdev->dev;
 	ret = snd_soc_register_card(&snd_soc_card_hx4700);
 	if (ret)
-		return ret;
+		gpio_free_array(hx4700_audio_gpios,
+				ARRAY_SIZE(hx4700_audio_gpios));
 
-	return 0;
+	return ret;
 }
 
 static int __devexit hx4700_audio_remove(struct platform_device *pdev)
diff --git a/sound/soc/samsung/jive_wm8750.c b/sound/soc/samsung/jive_wm8750.c
index 1826acf..8e523fd 100644
--- a/sound/soc/samsung/jive_wm8750.c
+++ b/sound/soc/samsung/jive_wm8750.c
@@ -101,7 +101,6 @@
 {
 	struct snd_soc_codec *codec = rtd->codec;
 	struct snd_soc_dapm_context *dapm = &codec->dapm;
-	int err;
 
 	/* These endpoints are not being used. */
 	snd_soc_dapm_nc_pin(dapm, "LINPUT2");
@@ -131,7 +130,7 @@
 	.dai_link	= &jive_dai,
 	.num_links	= 1,
 
-	.dapm_widgtets	= wm8750_dapm_widgets,
+	.dapm_widgets	= wm8750_dapm_widgets,
 	.num_dapm_widgets = ARRAY_SIZE(wm8750_dapm_widgets),
 	.dapm_routes	= audio_map,
 	.num_dapm_routes = ARRAY_SIZE(audio_map),
diff --git a/sound/soc/samsung/smdk2443_wm9710.c b/sound/soc/samsung/smdk2443_wm9710.c
index 3a0dbfc..8bd1dc5 100644
--- a/sound/soc/samsung/smdk2443_wm9710.c
+++ b/sound/soc/samsung/smdk2443_wm9710.c
@@ -12,6 +12,7 @@
  *
  */
 
+#include <linux/module.h>
 #include <sound/soc.h>
 
 static struct snd_soc_card smdk2443;
diff --git a/sound/sound_core.c b/sound/sound_core.c
index 6ce2778..c6e81fb 100644
--- a/sound/sound_core.c
+++ b/sound/sound_core.c
@@ -29,7 +29,7 @@
 MODULE_AUTHOR("Alan Cox");
 MODULE_LICENSE("GPL");
 
-static char *sound_devnode(struct device *dev, mode_t *mode)
+static char *sound_devnode(struct device *dev, umode_t *mode)
 {
 	if (MAJOR(dev->devt) == SOUND_MAJOR)
 		return NULL;
diff --git a/sound/usb/6fire/chip.c b/sound/usb/6fire/chip.c
index c7dca7b..ac2d5e1 100644
--- a/sound/usb/6fire/chip.c
+++ b/sound/usb/6fire/chip.c
@@ -211,22 +211,11 @@
 
 MODULE_DEVICE_TABLE(usb, device_table);
 
-static struct usb_driver driver = {
+static struct usb_driver usb_driver = {
 	.name = "snd-usb-6fire",
 	.probe = usb6fire_chip_probe,
 	.disconnect = usb6fire_chip_disconnect,
 	.id_table = device_table,
 };
 
-static int __init usb6fire_chip_init(void)
-{
-	return usb_register(&driver);
-}
-
-static void __exit usb6fire_chip_cleanup(void)
-{
-	usb_deregister(&driver);
-}
-
-module_init(usb6fire_chip_init);
-module_exit(usb6fire_chip_cleanup);
+module_usb_driver(usb_driver);
diff --git a/sound/usb/caiaq/device.c b/sound/usb/caiaq/device.c
index 3eb605b..457fb27 100644
--- a/sound/usb/caiaq/device.c
+++ b/sound/usb/caiaq/device.c
@@ -538,16 +538,5 @@
 	.id_table 	= snd_usb_id_table,
 };
 
-static int __init snd_module_init(void)
-{
-	return usb_register(&snd_usb_driver);
-}
-
-static void __exit snd_module_exit(void)
-{
-	usb_deregister(&snd_usb_driver);
-}
-
-module_init(snd_module_init)
-module_exit(snd_module_exit)
+module_usb_driver(snd_usb_driver);
 
diff --git a/sound/usb/misc/ua101.c b/sound/usb/misc/ua101.c
index c0609c2..4c11da9 100644
--- a/sound/usb/misc/ua101.c
+++ b/sound/usb/misc/ua101.c
@@ -1387,16 +1387,4 @@
 #endif
 };
 
-static int __init alsa_card_ua101_init(void)
-{
-	return usb_register(&ua101_driver);
-}
-
-static void __exit alsa_card_ua101_exit(void)
-{
-	usb_deregister(&ua101_driver);
-	mutex_destroy(&devices_mutex);
-}
-
-module_init(alsa_card_ua101_init);
-module_exit(alsa_card_ua101_exit);
+module_usb_driver(ua101_driver);
diff --git a/sound/usb/usx2y/us122l.c b/sound/usb/usx2y/us122l.c
index 726c1a7..625f7ca 100644
--- a/sound/usb/usx2y/us122l.c
+++ b/sound/usb/usx2y/us122l.c
@@ -772,16 +772,4 @@
 	.supports_autosuspend = 1
 };
 
-
-static int __init snd_us122l_module_init(void)
-{
-	return usb_register(&snd_us122l_usb_driver);
-}
-
-static void __exit snd_us122l_module_exit(void)
-{
-	usb_deregister(&snd_us122l_usb_driver);
-}
-
-module_init(snd_us122l_module_init)
-module_exit(snd_us122l_module_exit)
+module_usb_driver(snd_us122l_usb_driver);
diff --git a/sound/usb/usx2y/usbusx2y.c b/sound/usb/usx2y/usbusx2y.c
index cbd37f2..0c738ed 100644
--- a/sound/usb/usx2y/usbusx2y.c
+++ b/sound/usb/usx2y/usbusx2y.c
@@ -459,15 +459,4 @@
 	}
 }
 
-static int __init snd_usX2Y_module_init(void)
-{
-	return usb_register(&snd_usX2Y_usb_driver);
-}
-
-static void __exit snd_usX2Y_module_exit(void)
-{
-	usb_deregister(&snd_usX2Y_usb_driver);
-}
-
-module_init(snd_usX2Y_module_init)
-module_exit(snd_usX2Y_module_exit)
+module_usb_driver(snd_usX2Y_usb_driver);
diff --git a/tools/perf/Documentation/perf-annotate.txt b/tools/perf/Documentation/perf-annotate.txt
index fe6762e..c89f9e1 100644
--- a/tools/perf/Documentation/perf-annotate.txt
+++ b/tools/perf/Documentation/perf-annotate.txt
@@ -22,7 +22,7 @@
 -------
 -i::
 --input=::
-        Input file name. (default: perf.data)
+        Input file name. (default: perf.data unless stdin is a fifo)
 
 -d::
 --dsos=<dso[,dso...]>::
@@ -66,7 +66,7 @@
 	used. This interfaces starts by centering on the line with more
 	samples, TAB/UNTAB cycles through the lines with more samples.
 
--c::
+-C::
 --cpu:: Only report samples for the list of CPUs provided. Multiple CPUs can
 	be provided as a comma-separated list with no space: 0,1. Ranges of
 	CPUs are specified with -: 0-2. Default is to report samples on all
diff --git a/tools/perf/Documentation/perf-buildid-list.txt b/tools/perf/Documentation/perf-buildid-list.txt
index cc22325..25c52ef 100644
--- a/tools/perf/Documentation/perf-buildid-list.txt
+++ b/tools/perf/Documentation/perf-buildid-list.txt
@@ -26,7 +26,7 @@
         Show only DSOs with hits.
 -i::
 --input=::
-        Input file name. (default: perf.data)
+        Input file name. (default: perf.data unless stdin is a fifo)
 -f::
 --force::
 	Don't do ownership validation.
diff --git a/tools/perf/Documentation/perf-evlist.txt b/tools/perf/Documentation/perf-evlist.txt
index 0cada9e..0507ec7 100644
--- a/tools/perf/Documentation/perf-evlist.txt
+++ b/tools/perf/Documentation/perf-evlist.txt
@@ -18,7 +18,7 @@
 -------
 -i::
 --input=::
-        Input file name. (default: perf.data)
+        Input file name. (default: perf.data unless stdin is a fifo)
 
 SEE ALSO
 --------
diff --git a/tools/perf/Documentation/perf-kmem.txt b/tools/perf/Documentation/perf-kmem.txt
index a52fcde..7c8fbbf 100644
--- a/tools/perf/Documentation/perf-kmem.txt
+++ b/tools/perf/Documentation/perf-kmem.txt
@@ -23,7 +23,7 @@
 -------
 -i <file>::
 --input=<file>::
-	Select the input file (default: perf.data)
+	Select the input file (default: perf.data unless stdin is a fifo)
 
 --caller::
 	Show per-callsite statistics
diff --git a/tools/perf/Documentation/perf-lock.txt b/tools/perf/Documentation/perf-lock.txt
index 4a26a2f..d6b2a4f 100644
--- a/tools/perf/Documentation/perf-lock.txt
+++ b/tools/perf/Documentation/perf-lock.txt
@@ -29,7 +29,7 @@
 
 -i::
 --input=<file>::
-        Input file name.
+        Input file name. (default: perf.data unless stdin is a fifo)
 
 -v::
 --verbose::
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index 5a520f8..2937f7e 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -89,7 +89,7 @@
 
 -m::
 --mmap-pages=::
-	Number of mmap data pages.
+	Number of mmap data pages. Must be a power of two.
 
 -g::
 --call-graph::
diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt
index 212f24d..9b430e9 100644
--- a/tools/perf/Documentation/perf-report.txt
+++ b/tools/perf/Documentation/perf-report.txt
@@ -19,7 +19,7 @@
 -------
 -i::
 --input=::
-        Input file name. (default: perf.data)
+        Input file name. (default: perf.data unless stdin is a fifo)
 
 -v::
 --verbose::
@@ -39,7 +39,7 @@
 -T::
 --threads::
 	Show per-thread event counters
--C::
+-c::
 --comms=::
 	Only consider symbols in these comms. CSV that understands
 	file://filename entries.
@@ -80,9 +80,10 @@
 --dump-raw-trace::
         Dump raw trace in ASCII.
 
--g [type,min,order]::
+-g [type,min[,limit],order]::
 --call-graph::
-        Display call chains using type, min percent threshold and order.
+        Display call chains using type, min percent threshold, optional print
+	limit and order.
 	type can be either:
 	- flat: single column, linear exposure of call chains.
 	- graph: use a graph tree, displaying absolute overhead rates.
@@ -128,7 +129,7 @@
 --symfs=<directory>::
         Look for files with symbols relative to this directory.
 
--c::
+-C::
 --cpu:: Only report samples for the list of CPUs provided. Multiple CPUs can
 	be provided as a comma-separated list with no space: 0,1. Ranges of
 	CPUs are specified with -: 0-2. Default is to report samples on all
diff --git a/tools/perf/Documentation/perf-sched.txt b/tools/perf/Documentation/perf-sched.txt
index 5b212b5..8ff4df9 100644
--- a/tools/perf/Documentation/perf-sched.txt
+++ b/tools/perf/Documentation/perf-sched.txt
@@ -40,7 +40,7 @@
 -------
 -i::
 --input=<file>::
-        Input file name. (default: perf.data)
+        Input file name. (default: perf.data unless stdin is a fifo)
 
 -v::
 --verbose::
diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt
index dec87ec..2f6cef4 100644
--- a/tools/perf/Documentation/perf-script.txt
+++ b/tools/perf/Documentation/perf-script.txt
@@ -106,7 +106,7 @@
 
 -i::
 --input=::
-        Input file name.
+        Input file name. (default: perf.data unless stdin is a fifo)
 
 -d::
 --debug-mode::
@@ -182,12 +182,17 @@
 --hide-call-graph::
         When printing symbols do not display call chain.
 
--c::
+-C::
 --cpu:: Only report samples for the list of CPUs provided. Multiple CPUs can
 	be provided as a comma-separated list with no space: 0,1. Ranges of
 	CPUs are specified with -: 0-2. Default is to report samples on all
 	CPUs.
 
+-c::
+--comms=::
+	Only display events for these comms. CSV that understands
+	file://filename entries.
+
 -I::
 --show-info::
 	Display extended information about the perf.data file. This adds
diff --git a/tools/perf/Documentation/perf-test.txt b/tools/perf/Documentation/perf-test.txt
index 2c3b462..b24ac40 100644
--- a/tools/perf/Documentation/perf-test.txt
+++ b/tools/perf/Documentation/perf-test.txt
@@ -8,13 +8,19 @@
 SYNOPSIS
 --------
 [verse]
-'perf test <options>'
+'perf test [<options>] [{list <test-name-fragment>|[<test-name-fragments>|<test-numbers>]}]'
 
 DESCRIPTION
 -----------
 This command does assorted sanity tests, initially through linked routines but
 also will look for a directory with more tests in the form of scripts.
 
+To get a list of available tests use 'perf test list', specifying a test name
+fragment will show all tests that have it.
+
+To run just specific tests, inform test name fragments or the numbers obtained
+from 'perf test list'.
+
 OPTIONS
 -------
 -v::
diff --git a/tools/perf/Documentation/perf-timechart.txt b/tools/perf/Documentation/perf-timechart.txt
index d7b79e2..1632b0e 100644
--- a/tools/perf/Documentation/perf-timechart.txt
+++ b/tools/perf/Documentation/perf-timechart.txt
@@ -27,7 +27,7 @@
         Select the output file (default: output.svg)
 -i::
 --input=::
-        Select the input file (default: perf.data)
+        Select the input file (default: perf.data unless stdin is a fifo)
 -w::
 --width=::
         Select the width of the SVG file (default: 1000)
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index b98e307..ac86d67 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -278,6 +278,7 @@
 LIB_H += util/strlist.h
 LIB_H += util/strfilter.h
 LIB_H += util/svghelper.h
+LIB_H += util/tool.h
 LIB_H += util/run-command.h
 LIB_H += util/sigchain.h
 LIB_H += util/symbol.h
diff --git a/tools/perf/arch/powerpc/util/dwarf-regs.c b/tools/perf/arch/powerpc/util/dwarf-regs.c
index 48ae0c5..7cdd61d 100644
--- a/tools/perf/arch/powerpc/util/dwarf-regs.c
+++ b/tools/perf/arch/powerpc/util/dwarf-regs.c
@@ -9,7 +9,10 @@
  * 2 of the License, or (at your option) any later version.
  */
 
+#include <stdlib.h>
+#ifndef __UCLIBC__
 #include <libio.h>
+#endif
 #include <dwarf-regs.h>
 
 
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 46b4c24..214ba7f 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -27,32 +27,32 @@
 #include "util/sort.h"
 #include "util/hist.h"
 #include "util/session.h"
+#include "util/tool.h"
 
 #include <linux/bitmap.h>
 
-static char		const *input_name = "perf.data";
+struct perf_annotate {
+	struct perf_tool tool;
+	char const *input_name;
+	bool	   force, use_tui, use_stdio;
+	bool	   full_paths;
+	bool	   print_line;
+	const char *sym_hist_filter;
+	const char *cpu_list;
+	DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
+};
 
-static bool		force, use_tui, use_stdio;
-
-static bool		full_paths;
-
-static bool		print_line;
-
-static const char *sym_hist_filter;
-
-static const char	*cpu_list;
-static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
-
-static int perf_evlist__add_sample(struct perf_evlist *evlist,
-				   struct perf_sample *sample,
-				   struct perf_evsel *evsel,
-				   struct addr_location *al)
+static int perf_evsel__add_sample(struct perf_evsel *evsel,
+				  struct perf_sample *sample,
+				  struct addr_location *al,
+				  struct perf_annotate *ann)
 {
 	struct hist_entry *he;
 	int ret;
 
-	if (sym_hist_filter != NULL &&
-	    (al->sym == NULL || strcmp(sym_hist_filter, al->sym->name) != 0)) {
+	if (ann->sym_hist_filter != NULL &&
+	    (al->sym == NULL ||
+	     strcmp(ann->sym_hist_filter, al->sym->name) != 0)) {
 		/* We're only interested in a symbol named sym_hist_filter */
 		if (al->sym != NULL) {
 			rb_erase(&al->sym->rb_node,
@@ -69,8 +69,7 @@
 	ret = 0;
 	if (he->ms.sym != NULL) {
 		struct annotation *notes = symbol__annotation(he->ms.sym);
-		if (notes->src == NULL &&
-		    symbol__alloc_hist(he->ms.sym, evlist->nr_entries) < 0)
+		if (notes->src == NULL && symbol__alloc_hist(he->ms.sym) < 0)
 			return -ENOMEM;
 
 		ret = hist_entry__inc_addr_samples(he, evsel->idx, al->addr);
@@ -81,25 +80,26 @@
 	return ret;
 }
 
-static int process_sample_event(union perf_event *event,
+static int process_sample_event(struct perf_tool *tool,
+				union perf_event *event,
 				struct perf_sample *sample,
 				struct perf_evsel *evsel,
-				struct perf_session *session)
+				struct machine *machine)
 {
+	struct perf_annotate *ann = container_of(tool, struct perf_annotate, tool);
 	struct addr_location al;
 
-	if (perf_event__preprocess_sample(event, session, &al, sample,
+	if (perf_event__preprocess_sample(event, machine, &al, sample,
 					  symbol__annotate_init) < 0) {
 		pr_warning("problem processing %d event, skipping it.\n",
 			   event->header.type);
 		return -1;
 	}
 
-	if (cpu_list && !test_bit(sample->cpu, cpu_bitmap))
+	if (ann->cpu_list && !test_bit(sample->cpu, ann->cpu_bitmap))
 		return 0;
 
-	if (!al.filtered &&
-	    perf_evlist__add_sample(session->evlist, sample, evsel, &al)) {
+	if (!al.filtered && perf_evsel__add_sample(evsel, sample, &al, ann)) {
 		pr_warning("problem incrementing symbol count, "
 			   "skipping event\n");
 		return -1;
@@ -108,14 +108,15 @@
 	return 0;
 }
 
-static int hist_entry__tty_annotate(struct hist_entry *he, int evidx)
+static int hist_entry__tty_annotate(struct hist_entry *he, int evidx,
+				    struct perf_annotate *ann)
 {
 	return symbol__tty_annotate(he->ms.sym, he->ms.map, evidx,
-				    print_line, full_paths, 0, 0);
+				    ann->print_line, ann->full_paths, 0, 0);
 }
 
 static void hists__find_annotations(struct hists *self, int evidx,
-				    int nr_events)
+				    struct perf_annotate *ann)
 {
 	struct rb_node *nd = rb_first(&self->entries), *next;
 	int key = K_RIGHT;
@@ -138,8 +139,7 @@
 		}
 
 		if (use_browser > 0) {
-			key = hist_entry__tui_annotate(he, evidx, nr_events,
-						       NULL, NULL, 0);
+			key = hist_entry__tui_annotate(he, evidx, NULL, NULL, 0);
 			switch (key) {
 			case K_RIGHT:
 				next = rb_next(nd);
@@ -154,7 +154,7 @@
 			if (next != NULL)
 				nd = next;
 		} else {
-			hist_entry__tty_annotate(he, evidx);
+			hist_entry__tty_annotate(he, evidx, ann);
 			nd = rb_next(nd);
 			/*
 			 * Since we have a hist_entry per IP for the same
@@ -167,33 +167,26 @@
 	}
 }
 
-static struct perf_event_ops event_ops = {
-	.sample	= process_sample_event,
-	.mmap	= perf_event__process_mmap,
-	.comm	= perf_event__process_comm,
-	.fork	= perf_event__process_task,
-	.ordered_samples = true,
-	.ordering_requires_timestamps = true,
-};
-
-static int __cmd_annotate(void)
+static int __cmd_annotate(struct perf_annotate *ann)
 {
 	int ret;
 	struct perf_session *session;
 	struct perf_evsel *pos;
 	u64 total_nr_samples;
 
-	session = perf_session__new(input_name, O_RDONLY, force, false, &event_ops);
+	session = perf_session__new(ann->input_name, O_RDONLY,
+				    ann->force, false, &ann->tool);
 	if (session == NULL)
 		return -ENOMEM;
 
-	if (cpu_list) {
-		ret = perf_session__cpu_bitmap(session, cpu_list, cpu_bitmap);
+	if (ann->cpu_list) {
+		ret = perf_session__cpu_bitmap(session, ann->cpu_list,
+					       ann->cpu_bitmap);
 		if (ret)
 			goto out_delete;
 	}
 
-	ret = perf_session__process_events(session, &event_ops);
+	ret = perf_session__process_events(session, &ann->tool);
 	if (ret)
 		goto out_delete;
 
@@ -217,13 +210,12 @@
 			total_nr_samples += nr_samples;
 			hists__collapse_resort(hists);
 			hists__output_resort(hists);
-			hists__find_annotations(hists, pos->idx,
-						session->evlist->nr_entries);
+			hists__find_annotations(hists, pos->idx, ann);
 		}
 	}
 
 	if (total_nr_samples == 0) {
-		ui__warning("The %s file has no samples!\n", input_name);
+		ui__warning("The %s file has no samples!\n", session->filename);
 		goto out_delete;
 	}
 out_delete:
@@ -247,29 +239,41 @@
 	NULL
 };
 
-static const struct option options[] = {
-	OPT_STRING('i', "input", &input_name, "file",
+int cmd_annotate(int argc, const char **argv, const char *prefix __used)
+{
+	struct perf_annotate annotate = {
+		.tool = {
+			.sample	= process_sample_event,
+			.mmap	= perf_event__process_mmap,
+			.comm	= perf_event__process_comm,
+			.fork	= perf_event__process_task,
+			.ordered_samples = true,
+			.ordering_requires_timestamps = true,
+		},
+	};
+	const struct option options[] = {
+	OPT_STRING('i', "input", &annotate.input_name, "file",
 		    "input file name"),
 	OPT_STRING('d', "dsos", &symbol_conf.dso_list_str, "dso[,dso...]",
 		   "only consider symbols in these dsos"),
-	OPT_STRING('s', "symbol", &sym_hist_filter, "symbol",
+	OPT_STRING('s', "symbol", &annotate.sym_hist_filter, "symbol",
 		    "symbol to annotate"),
-	OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
+	OPT_BOOLEAN('f', "force", &annotate.force, "don't complain, do it"),
 	OPT_INCR('v', "verbose", &verbose,
 		    "be more verbose (show symbol address, etc)"),
 	OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
 		    "dump raw trace in ASCII"),
-	OPT_BOOLEAN(0, "tui", &use_tui, "Use the TUI interface"),
-	OPT_BOOLEAN(0, "stdio", &use_stdio, "Use the stdio interface"),
+	OPT_BOOLEAN(0, "tui", &annotate.use_tui, "Use the TUI interface"),
+	OPT_BOOLEAN(0, "stdio", &annotate.use_stdio, "Use the stdio interface"),
 	OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
 		   "file", "vmlinux pathname"),
 	OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules,
 		    "load module symbols - WARNING: use only with -k and LIVE kernel"),
-	OPT_BOOLEAN('l', "print-line", &print_line,
+	OPT_BOOLEAN('l', "print-line", &annotate.print_line,
 		    "print matching source lines (may be slow)"),
-	OPT_BOOLEAN('P', "full-paths", &full_paths,
+	OPT_BOOLEAN('P', "full-paths", &annotate.full_paths,
 		    "Don't shorten the displayed pathnames"),
-	OPT_STRING('c', "cpu", &cpu_list, "cpu", "list of cpus to profile"),
+	OPT_STRING('C', "cpu", &annotate.cpu_list, "cpu", "list of cpus to profile"),
 	OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
 		   "Look for files with symbols relative to this directory"),
 	OPT_BOOLEAN(0, "source", &symbol_conf.annotate_src,
@@ -279,15 +283,13 @@
 	OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style",
 		   "Specify disassembler style (e.g. -M intel for intel syntax)"),
 	OPT_END()
-};
+	};
 
-int cmd_annotate(int argc, const char **argv, const char *prefix __used)
-{
 	argc = parse_options(argc, argv, options, annotate_usage, 0);
 
-	if (use_stdio)
+	if (annotate.use_stdio)
 		use_browser = 0;
-	else if (use_tui)
+	else if (annotate.use_tui)
 		use_browser = 1;
 
 	setup_browser(true);
@@ -308,7 +310,7 @@
 		if (argc > 1)
 			usage_with_options(annotate_usage, options);
 
-		sym_hist_filter = argv[0];
+		annotate.sym_hist_filter = argv[0];
 	}
 
 	if (field_sep && *field_sep == '.') {
@@ -316,5 +318,5 @@
 		return -1;
 	}
 
-	return __cmd_annotate();
+	return __cmd_annotate(&annotate);
 }
diff --git a/tools/perf/builtin-buildid-list.c b/tools/perf/builtin-buildid-list.c
index cb690a6..5248046 100644
--- a/tools/perf/builtin-buildid-list.c
+++ b/tools/perf/builtin-buildid-list.c
@@ -18,7 +18,7 @@
 
 #include <libelf.h>
 
-static char const *input_name = "perf.data";
+static const char *input_name;
 static bool force;
 static bool show_kernel;
 static bool with_hits;
@@ -39,24 +39,6 @@
 	OPT_END()
 };
 
-static int perf_session__list_build_ids(void)
-{
-	struct perf_session *session;
-
-	session = perf_session__new(input_name, O_RDONLY, force, false,
-				    &build_id__mark_dso_hit_ops);
-	if (session == NULL)
-		return -1;
-
-	if (with_hits)
-		perf_session__process_events(session, &build_id__mark_dso_hit_ops);
-
-	perf_session__fprintf_dsos_buildid(session, stdout, with_hits);
-
-	perf_session__delete(session);
-	return 0;
-}
-
 static int sysfs__fprintf_build_id(FILE *fp)
 {
 	u8 kallsyms_build_id[BUILD_ID_SIZE];
@@ -85,18 +67,37 @@
 	return fprintf(fp, "%s\n", sbuild_id);
 }
 
+static int perf_session__list_build_ids(void)
+{
+	struct perf_session *session;
+
+	elf_version(EV_CURRENT);
+
+	session = perf_session__new(input_name, O_RDONLY, force, false,
+				    &build_id__mark_dso_hit_ops);
+	if (session == NULL)
+		return -1;
+
+	/*
+	 * See if this is an ELF file first:
+	 */
+	if (filename__fprintf_build_id(session->filename, stdout))
+		goto out;
+
+	if (with_hits)
+		perf_session__process_events(session, &build_id__mark_dso_hit_ops);
+
+	perf_session__fprintf_dsos_buildid(session, stdout, with_hits);
+out:
+	perf_session__delete(session);
+	return 0;
+}
+
 static int __cmd_buildid_list(void)
 {
 	if (show_kernel)
 		return sysfs__fprintf_build_id(stdout);
 
-	elf_version(EV_CURRENT);
-	/*
- 	 * See if this is an ELF file first:
- 	 */
-	if (filename__fprintf_build_id(input_name, stdout))
-		return 0;
-
 	return perf_session__list_build_ids();
 }
 
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index b39f3a1..4f19513 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -9,7 +9,9 @@
 #include "util/debug.h"
 #include "util/event.h"
 #include "util/hist.h"
+#include "util/evsel.h"
 #include "util/session.h"
+#include "util/tool.h"
 #include "util/sort.h"
 #include "util/symbol.h"
 #include "util/util.h"
@@ -30,14 +32,15 @@
 	return -ENOMEM;
 }
 
-static int diff__process_sample_event(union perf_event *event,
+static int diff__process_sample_event(struct perf_tool *tool __used,
+				      union perf_event *event,
 				      struct perf_sample *sample,
 				      struct perf_evsel *evsel __used,
-				      struct perf_session *session)
+				      struct machine *machine)
 {
 	struct addr_location al;
 
-	if (perf_event__preprocess_sample(event, session, &al, sample, NULL) < 0) {
+	if (perf_event__preprocess_sample(event, machine, &al, sample, NULL) < 0) {
 		pr_warning("problem processing %d event, skipping it.\n",
 			   event->header.type);
 		return -1;
@@ -46,16 +49,16 @@
 	if (al.filtered || al.sym == NULL)
 		return 0;
 
-	if (hists__add_entry(&session->hists, &al, sample->period)) {
+	if (hists__add_entry(&evsel->hists, &al, sample->period)) {
 		pr_warning("problem incrementing symbol period, skipping event\n");
 		return -1;
 	}
 
-	session->hists.stats.total_period += sample->period;
+	evsel->hists.stats.total_period += sample->period;
 	return 0;
 }
 
-static struct perf_event_ops event_ops = {
+static struct perf_tool perf_diff = {
 	.sample	= diff__process_sample_event,
 	.mmap	= perf_event__process_mmap,
 	.comm	= perf_event__process_comm,
@@ -145,13 +148,13 @@
 	int ret, i;
 	struct perf_session *session[2];
 
-	session[0] = perf_session__new(input_old, O_RDONLY, force, false, &event_ops);
-	session[1] = perf_session__new(input_new, O_RDONLY, force, false, &event_ops);
+	session[0] = perf_session__new(input_old, O_RDONLY, force, false, &perf_diff);
+	session[1] = perf_session__new(input_new, O_RDONLY, force, false, &perf_diff);
 	if (session[0] == NULL || session[1] == NULL)
 		return -ENOMEM;
 
 	for (i = 0; i < 2; ++i) {
-		ret = perf_session__process_events(session[i], &event_ops);
+		ret = perf_session__process_events(session[i], &perf_diff);
 		if (ret)
 			goto out_delete;
 	}
diff --git a/tools/perf/builtin-evlist.c b/tools/perf/builtin-evlist.c
index 4c5e9e0..2676032 100644
--- a/tools/perf/builtin-evlist.c
+++ b/tools/perf/builtin-evlist.c
@@ -15,7 +15,7 @@
 #include "util/parse-options.h"
 #include "util/session.h"
 
-static char const *input_name = "perf.data";
+static const char *input_name;
 
 static int __cmd_evlist(void)
 {
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index 8dfc12b..09c1061 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -9,6 +9,7 @@
 
 #include "perf.h"
 #include "util/session.h"
+#include "util/tool.h"
 #include "util/debug.h"
 
 #include "util/parse-options.h"
@@ -16,8 +17,9 @@
 static char		const *input_name = "-";
 static bool		inject_build_ids;
 
-static int perf_event__repipe_synth(union perf_event *event,
-				    struct perf_session *session __used)
+static int perf_event__repipe_synth(struct perf_tool *tool __used,
+				    union perf_event *event,
+				    struct machine *machine __used)
 {
 	uint32_t size;
 	void *buf = event;
@@ -36,41 +38,70 @@
 	return 0;
 }
 
-static int perf_event__repipe(union perf_event *event,
-			      struct perf_sample *sample __used,
-			      struct perf_session *session)
+static int perf_event__repipe_op2_synth(struct perf_tool *tool,
+					union perf_event *event,
+					struct perf_session *session __used)
 {
-	return perf_event__repipe_synth(event, session);
+	return perf_event__repipe_synth(tool, event, NULL);
 }
 
-static int perf_event__repipe_sample(union perf_event *event,
+static int perf_event__repipe_event_type_synth(struct perf_tool *tool,
+					       union perf_event *event)
+{
+	return perf_event__repipe_synth(tool, event, NULL);
+}
+
+static int perf_event__repipe_tracing_data_synth(union perf_event *event,
+						 struct perf_session *session __used)
+{
+	return perf_event__repipe_synth(NULL, event, NULL);
+}
+
+static int perf_event__repipe_attr(union perf_event *event,
+				   struct perf_evlist **pevlist __used)
+{
+	return perf_event__repipe_synth(NULL, event, NULL);
+}
+
+static int perf_event__repipe(struct perf_tool *tool,
+			      union perf_event *event,
+			      struct perf_sample *sample __used,
+			      struct machine *machine)
+{
+	return perf_event__repipe_synth(tool, event, machine);
+}
+
+static int perf_event__repipe_sample(struct perf_tool *tool,
+				     union perf_event *event,
 			      struct perf_sample *sample __used,
 			      struct perf_evsel *evsel __used,
-			      struct perf_session *session)
+			      struct machine *machine)
 {
-	return perf_event__repipe_synth(event, session);
+	return perf_event__repipe_synth(tool, event, machine);
 }
 
-static int perf_event__repipe_mmap(union perf_event *event,
+static int perf_event__repipe_mmap(struct perf_tool *tool,
+				   union perf_event *event,
 				   struct perf_sample *sample,
-				   struct perf_session *session)
+				   struct machine *machine)
 {
 	int err;
 
-	err = perf_event__process_mmap(event, sample, session);
-	perf_event__repipe(event, sample, session);
+	err = perf_event__process_mmap(tool, event, sample, machine);
+	perf_event__repipe(tool, event, sample, machine);
 
 	return err;
 }
 
-static int perf_event__repipe_task(union perf_event *event,
+static int perf_event__repipe_task(struct perf_tool *tool,
+				   union perf_event *event,
 				   struct perf_sample *sample,
-				   struct perf_session *session)
+				   struct machine *machine)
 {
 	int err;
 
-	err = perf_event__process_task(event, sample, session);
-	perf_event__repipe(event, sample, session);
+	err = perf_event__process_task(tool, event, sample, machine);
+	perf_event__repipe(tool, event, sample, machine);
 
 	return err;
 }
@@ -80,7 +111,7 @@
 {
 	int err;
 
-	perf_event__repipe_synth(event, session);
+	perf_event__repipe_synth(NULL, event, NULL);
 	err = perf_event__process_tracing_data(event, session);
 
 	return err;
@@ -100,10 +131,10 @@
 	return -1;
 }
 
-static int dso__inject_build_id(struct dso *self, struct perf_session *session)
+static int dso__inject_build_id(struct dso *self, struct perf_tool *tool,
+				struct machine *machine)
 {
 	u16 misc = PERF_RECORD_MISC_USER;
-	struct machine *machine;
 	int err;
 
 	if (dso__read_build_id(self) < 0) {
@@ -111,17 +142,11 @@
 		return -1;
 	}
 
-	machine = perf_session__find_host_machine(session);
-	if (machine == NULL) {
-		pr_err("Can't find machine for session\n");
-		return -1;
-	}
-
 	if (self->kernel)
 		misc = PERF_RECORD_MISC_KERNEL;
 
-	err = perf_event__synthesize_build_id(self, misc, perf_event__repipe,
-					      machine, session);
+	err = perf_event__synthesize_build_id(tool, self, misc, perf_event__repipe,
+					      machine);
 	if (err) {
 		pr_err("Can't synthesize build_id event for %s\n", self->long_name);
 		return -1;
@@ -130,10 +155,11 @@
 	return 0;
 }
 
-static int perf_event__inject_buildid(union perf_event *event,
+static int perf_event__inject_buildid(struct perf_tool *tool,
+				      union perf_event *event,
 				      struct perf_sample *sample,
 				      struct perf_evsel *evsel __used,
-				      struct perf_session *session)
+				      struct machine *machine)
 {
 	struct addr_location al;
 	struct thread *thread;
@@ -141,21 +167,21 @@
 
 	cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
 
-	thread = perf_session__findnew(session, event->ip.pid);
+	thread = machine__findnew_thread(machine, event->ip.pid);
 	if (thread == NULL) {
 		pr_err("problem processing %d event, skipping it.\n",
 		       event->header.type);
 		goto repipe;
 	}
 
-	thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION,
-			      event->ip.pid, event->ip.ip, &al);
+	thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION,
+			      event->ip.ip, &al);
 
 	if (al.map != NULL) {
 		if (!al.map->dso->hit) {
 			al.map->dso->hit = 1;
 			if (map__load(al.map, NULL) >= 0) {
-				dso__inject_build_id(al.map->dso, session);
+				dso__inject_build_id(al.map->dso, tool, machine);
 				/*
 				 * If this fails, too bad, let the other side
 				 * account this as unresolved.
@@ -168,24 +194,24 @@
 	}
 
 repipe:
-	perf_event__repipe(event, sample, session);
+	perf_event__repipe(tool, event, sample, machine);
 	return 0;
 }
 
-struct perf_event_ops inject_ops = {
+struct perf_tool perf_inject = {
 	.sample		= perf_event__repipe_sample,
 	.mmap		= perf_event__repipe,
 	.comm		= perf_event__repipe,
 	.fork		= perf_event__repipe,
 	.exit		= perf_event__repipe,
 	.lost		= perf_event__repipe,
-	.read		= perf_event__repipe,
+	.read		= perf_event__repipe_sample,
 	.throttle	= perf_event__repipe,
 	.unthrottle	= perf_event__repipe,
-	.attr		= perf_event__repipe_synth,
-	.event_type 	= perf_event__repipe_synth,
-	.tracing_data 	= perf_event__repipe_synth,
-	.build_id 	= perf_event__repipe_synth,
+	.attr		= perf_event__repipe_attr,
+	.event_type	= perf_event__repipe_event_type_synth,
+	.tracing_data	= perf_event__repipe_tracing_data_synth,
+	.build_id	= perf_event__repipe_op2_synth,
 };
 
 extern volatile int session_done;
@@ -203,17 +229,17 @@
 	signal(SIGINT, sig_handler);
 
 	if (inject_build_ids) {
-		inject_ops.sample	= perf_event__inject_buildid;
-		inject_ops.mmap		= perf_event__repipe_mmap;
-		inject_ops.fork		= perf_event__repipe_task;
-		inject_ops.tracing_data	= perf_event__repipe_tracing_data;
+		perf_inject.sample	 = perf_event__inject_buildid;
+		perf_inject.mmap	 = perf_event__repipe_mmap;
+		perf_inject.fork	 = perf_event__repipe_task;
+		perf_inject.tracing_data = perf_event__repipe_tracing_data;
 	}
 
-	session = perf_session__new(input_name, O_RDONLY, false, true, &inject_ops);
+	session = perf_session__new(input_name, O_RDONLY, false, true, &perf_inject);
 	if (session == NULL)
 		return -ENOMEM;
 
-	ret = perf_session__process_events(session, &inject_ops);
+	ret = perf_session__process_events(session, &perf_inject);
 
 	perf_session__delete(session);
 
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index 225e963..fe1ad8f 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -7,6 +7,7 @@
 #include "util/thread.h"
 #include "util/header.h"
 #include "util/session.h"
+#include "util/tool.h"
 
 #include "util/parse-options.h"
 #include "util/trace-event.h"
@@ -18,7 +19,7 @@
 struct alloc_stat;
 typedef int (*sort_fn_t)(struct alloc_stat *, struct alloc_stat *);
 
-static char const		*input_name = "perf.data";
+static const char		*input_name;
 
 static int			alloc_flag;
 static int			caller_flag;
@@ -303,12 +304,13 @@
 	}
 }
 
-static int process_sample_event(union perf_event *event,
+static int process_sample_event(struct perf_tool *tool __used,
+				union perf_event *event,
 				struct perf_sample *sample,
 				struct perf_evsel *evsel __used,
-				struct perf_session *session)
+				struct machine *machine)
 {
-	struct thread *thread = perf_session__findnew(session, event->ip.pid);
+	struct thread *thread = machine__findnew_thread(machine, event->ip.pid);
 
 	if (thread == NULL) {
 		pr_debug("problem processing %d event, skipping it.\n",
@@ -324,7 +326,7 @@
 	return 0;
 }
 
-static struct perf_event_ops event_ops = {
+static struct perf_tool perf_kmem = {
 	.sample			= process_sample_event,
 	.comm			= perf_event__process_comm,
 	.ordered_samples	= true,
@@ -483,7 +485,7 @@
 {
 	int err = -EINVAL;
 	struct perf_session *session = perf_session__new(input_name, O_RDONLY,
-							 0, false, &event_ops);
+							 0, false, &perf_kmem);
 	if (session == NULL)
 		return -ENOMEM;
 
@@ -494,7 +496,7 @@
 		goto out_delete;
 
 	setup_pager();
-	err = perf_session__process_events(session, &event_ops);
+	err = perf_session__process_events(session, &perf_kmem);
 	if (err != 0)
 		goto out_delete;
 	sort_result();
diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c
index 34d1e85..032324a 100644
--- a/tools/perf/builtin-kvm.c
+++ b/tools/perf/builtin-kvm.c
@@ -38,7 +38,7 @@
 	OPT_BOOLEAN(0, "guest", &perf_guest,
 		    "Collect guest os data"),
 	OPT_BOOLEAN(0, "host", &perf_host,
-		    "Collect guest os data"),
+		    "Collect host os data"),
 	OPT_STRING(0, "guestmount", &symbol_conf.guestmount, "directory",
 		   "guest mount directory under which every guest os"
 		   " instance has a subdir"),
diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
index 899080a..2296c39 100644
--- a/tools/perf/builtin-lock.c
+++ b/tools/perf/builtin-lock.c
@@ -12,6 +12,7 @@
 
 #include "util/debug.h"
 #include "util/session.h"
+#include "util/tool.h"
 
 #include <sys/types.h>
 #include <sys/prctl.h>
@@ -325,7 +326,7 @@
 	die("memory allocation failed\n");
 }
 
-static char			const *input_name = "perf.data";
+static const char *input_name;
 
 struct raw_event_sample {
 	u32			size;
@@ -845,12 +846,13 @@
 		die("Unknown type of information\n");
 }
 
-static int process_sample_event(union perf_event *event,
+static int process_sample_event(struct perf_tool *tool __used,
+				union perf_event *event,
 				struct perf_sample *sample,
 				struct perf_evsel *evsel __used,
-				struct perf_session *s)
+				struct machine *machine)
 {
-	struct thread *thread = perf_session__findnew(s, sample->tid);
+	struct thread *thread = machine__findnew_thread(machine, sample->tid);
 
 	if (thread == NULL) {
 		pr_debug("problem processing %d event, skipping it.\n",
@@ -863,7 +865,7 @@
 	return 0;
 }
 
-static struct perf_event_ops eops = {
+static struct perf_tool eops = {
 	.sample			= process_sample_event,
 	.comm			= perf_event__process_comm,
 	.ordered_samples	= true,
diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
index 710ae3d..59d43ab 100644
--- a/tools/perf/builtin-probe.c
+++ b/tools/perf/builtin-probe.c
@@ -46,7 +46,6 @@
 
 #define DEFAULT_VAR_FILTER "!__k???tab_* & !__crc_*"
 #define DEFAULT_FUNC_FILTER "!_*"
-#define MAX_PATH_LEN 256
 
 /* Session management structure */
 static struct {
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 6ab58cc..0abfb18 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -22,6 +22,7 @@
 #include "util/evsel.h"
 #include "util/debug.h"
 #include "util/session.h"
+#include "util/tool.h"
 #include "util/symbol.h"
 #include "util/cpumap.h"
 #include "util/thread_map.h"
@@ -35,55 +36,36 @@
 	WRITE_APPEND
 };
 
-static u64			user_interval			= ULLONG_MAX;
-static u64			default_interval		=      0;
+struct perf_record {
+	struct perf_tool	tool;
+	struct perf_record_opts	opts;
+	u64			bytes_written;
+	const char		*output_name;
+	struct perf_evlist	*evlist;
+	struct perf_session	*session;
+	const char		*progname;
+	int			output;
+	unsigned int		page_size;
+	int			realtime_prio;
+	enum write_mode_t	write_mode;
+	bool			no_buildid;
+	bool			no_buildid_cache;
+	bool			force;
+	bool			file_new;
+	bool			append_file;
+	long			samples;
+	off_t			post_processing_offset;
+};
 
-static unsigned int		page_size;
-static unsigned int		mmap_pages			= UINT_MAX;
-static unsigned int		user_freq 			= UINT_MAX;
-static int			freq				=   1000;
-static int			output;
-static int			pipe_output			=      0;
-static const char		*output_name			= NULL;
-static bool			group				=  false;
-static int			realtime_prio			=      0;
-static bool			nodelay				=  false;
-static bool			raw_samples			=  false;
-static bool			sample_id_all_avail		=   true;
-static bool			system_wide			=  false;
-static pid_t			target_pid			=     -1;
-static pid_t			target_tid			=     -1;
-static pid_t			child_pid			=     -1;
-static bool			no_inherit			=  false;
-static enum write_mode_t	write_mode			= WRITE_FORCE;
-static bool			call_graph			=  false;
-static bool			inherit_stat			=  false;
-static bool			no_samples			=  false;
-static bool			sample_address			=  false;
-static bool			sample_time			=  false;
-static bool			no_buildid			=  false;
-static bool			no_buildid_cache		=  false;
-static struct perf_evlist	*evsel_list;
-
-static long			samples				=      0;
-static u64			bytes_written			=      0;
-
-static int			file_new			=      1;
-static off_t			post_processing_offset;
-
-static struct perf_session	*session;
-static const char		*cpu_list;
-static const char               *progname;
-
-static void advance_output(size_t size)
+static void advance_output(struct perf_record *rec, size_t size)
 {
-	bytes_written += size;
+	rec->bytes_written += size;
 }
 
-static void write_output(void *buf, size_t size)
+static void write_output(struct perf_record *rec, void *buf, size_t size)
 {
 	while (size) {
-		int ret = write(output, buf, size);
+		int ret = write(rec->output, buf, size);
 
 		if (ret < 0)
 			die("failed to write");
@@ -91,30 +73,33 @@
 		size -= ret;
 		buf += ret;
 
-		bytes_written += ret;
+		rec->bytes_written += ret;
 	}
 }
 
-static int process_synthesized_event(union perf_event *event,
+static int process_synthesized_event(struct perf_tool *tool,
+				     union perf_event *event,
 				     struct perf_sample *sample __used,
-				     struct perf_session *self __used)
+				     struct machine *machine __used)
 {
-	write_output(event, event->header.size);
+	struct perf_record *rec = container_of(tool, struct perf_record, tool);
+	write_output(rec, event, event->header.size);
 	return 0;
 }
 
-static void mmap_read(struct perf_mmap *md)
+static void perf_record__mmap_read(struct perf_record *rec,
+				   struct perf_mmap *md)
 {
 	unsigned int head = perf_mmap__read_head(md);
 	unsigned int old = md->prev;
-	unsigned char *data = md->base + page_size;
+	unsigned char *data = md->base + rec->page_size;
 	unsigned long size;
 	void *buf;
 
 	if (old == head)
 		return;
 
-	samples++;
+	rec->samples++;
 
 	size = head - old;
 
@@ -123,14 +108,14 @@
 		size = md->mask + 1 - (old & md->mask);
 		old += size;
 
-		write_output(buf, size);
+		write_output(rec, buf, size);
 	}
 
 	buf = &data[old & md->mask];
 	size = head - old;
 	old += size;
 
-	write_output(buf, size);
+	write_output(rec, buf, size);
 
 	md->prev = old;
 	perf_mmap__write_tail(md, old);
@@ -149,17 +134,18 @@
 	signr = sig;
 }
 
-static void sig_atexit(void)
+static void perf_record__sig_exit(int exit_status __used, void *arg)
 {
+	struct perf_record *rec = arg;
 	int status;
 
-	if (child_pid > 0) {
+	if (rec->evlist->workload.pid > 0) {
 		if (!child_finished)
-			kill(child_pid, SIGTERM);
+			kill(rec->evlist->workload.pid, SIGTERM);
 
 		wait(&status);
 		if (WIFSIGNALED(status))
-			psignal(WTERMSIG(status), progname);
+			psignal(WTERMSIG(status), rec->progname);
 	}
 
 	if (signr == -1 || signr == SIGUSR1)
@@ -169,78 +155,6 @@
 	kill(getpid(), signr);
 }
 
-static void config_attr(struct perf_evsel *evsel, struct perf_evlist *evlist)
-{
-	struct perf_event_attr *attr = &evsel->attr;
-	int track = !evsel->idx; /* only the first counter needs these */
-
-	attr->disabled		= 1;
-	attr->inherit		= !no_inherit;
-	attr->read_format	= PERF_FORMAT_TOTAL_TIME_ENABLED |
-				  PERF_FORMAT_TOTAL_TIME_RUNNING |
-				  PERF_FORMAT_ID;
-
-	attr->sample_type	|= PERF_SAMPLE_IP | PERF_SAMPLE_TID;
-
-	if (evlist->nr_entries > 1)
-		attr->sample_type |= PERF_SAMPLE_ID;
-
-	/*
-	 * We default some events to a 1 default interval. But keep
-	 * it a weak assumption overridable by the user.
-	 */
-	if (!attr->sample_period || (user_freq != UINT_MAX &&
-				     user_interval != ULLONG_MAX)) {
-		if (freq) {
-			attr->sample_type	|= PERF_SAMPLE_PERIOD;
-			attr->freq		= 1;
-			attr->sample_freq	= freq;
-		} else {
-			attr->sample_period = default_interval;
-		}
-	}
-
-	if (no_samples)
-		attr->sample_freq = 0;
-
-	if (inherit_stat)
-		attr->inherit_stat = 1;
-
-	if (sample_address) {
-		attr->sample_type	|= PERF_SAMPLE_ADDR;
-		attr->mmap_data = track;
-	}
-
-	if (call_graph)
-		attr->sample_type	|= PERF_SAMPLE_CALLCHAIN;
-
-	if (system_wide)
-		attr->sample_type	|= PERF_SAMPLE_CPU;
-
-	if (sample_id_all_avail &&
-	    (sample_time || system_wide || !no_inherit || cpu_list))
-		attr->sample_type	|= PERF_SAMPLE_TIME;
-
-	if (raw_samples) {
-		attr->sample_type	|= PERF_SAMPLE_TIME;
-		attr->sample_type	|= PERF_SAMPLE_RAW;
-		attr->sample_type	|= PERF_SAMPLE_CPU;
-	}
-
-	if (nodelay) {
-		attr->watermark = 0;
-		attr->wakeup_events = 1;
-	}
-
-	attr->mmap		= track;
-	attr->comm		= track;
-
-	if (target_pid == -1 && target_tid == -1 && !system_wide) {
-		attr->disabled = 1;
-		attr->enable_on_exec = 1;
-	}
-}
-
 static bool perf_evlist__equal(struct perf_evlist *evlist,
 			       struct perf_evlist *other)
 {
@@ -260,15 +174,17 @@
 	return true;
 }
 
-static void open_counters(struct perf_evlist *evlist)
+static void perf_record__open(struct perf_record *rec)
 {
 	struct perf_evsel *pos, *first;
-
-	if (evlist->cpus->map[0] < 0)
-		no_inherit = true;
+	struct perf_evlist *evlist = rec->evlist;
+	struct perf_session *session = rec->session;
+	struct perf_record_opts *opts = &rec->opts;
 
 	first = list_entry(evlist->entries.next, struct perf_evsel, node);
 
+	perf_evlist__config_attrs(evlist, opts);
+
 	list_for_each_entry(pos, &evlist->entries, node) {
 		struct perf_event_attr *attr = &pos->attr;
 		struct xyarray *group_fd = NULL;
@@ -286,29 +202,27 @@
 		 */
 		bool time_needed = attr->sample_type & PERF_SAMPLE_TIME;
 
-		if (group && pos != first)
+		if (opts->group && pos != first)
 			group_fd = first->fd;
-
-		config_attr(pos, evlist);
 retry_sample_id:
-		attr->sample_id_all = sample_id_all_avail ? 1 : 0;
+		attr->sample_id_all = opts->sample_id_all_avail ? 1 : 0;
 try_again:
-		if (perf_evsel__open(pos, evlist->cpus, evlist->threads, group,
-				     group_fd) < 0) {
+		if (perf_evsel__open(pos, evlist->cpus, evlist->threads,
+				     opts->group, group_fd) < 0) {
 			int err = errno;
 
 			if (err == EPERM || err == EACCES) {
 				ui__error_paranoid();
 				exit(EXIT_FAILURE);
-			} else if (err ==  ENODEV && cpu_list) {
+			} else if (err ==  ENODEV && opts->cpu_list) {
 				die("No such device - did you specify"
 					" an out-of-range profile CPU?\n");
-			} else if (err == EINVAL && sample_id_all_avail) {
+			} else if (err == EINVAL && opts->sample_id_all_avail) {
 				/*
 				 * Old kernel, no attr->sample_id_type_all field
 				 */
-				sample_id_all_avail = false;
-				if (!sample_time && !raw_samples && !time_needed)
+				opts->sample_id_all_avail = false;
+				if (!opts->sample_time && !opts->raw_samples && !time_needed)
 					attr->sample_type &= ~PERF_SAMPLE_TIME;
 
 				goto retry_sample_id;
@@ -358,10 +272,20 @@
 		exit(-1);
 	}
 
-	if (perf_evlist__mmap(evlist, mmap_pages, false) < 0)
-		die("failed to mmap with %d (%s)\n", errno, strerror(errno));
+	if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0) {
+		if (errno == EPERM)
+			die("Permission error mapping pages.\n"
+			    "Consider increasing "
+			    "/proc/sys/kernel/perf_event_mlock_kb,\n"
+			    "or try again with a smaller value of -m/--mmap_pages.\n"
+			    "(current value: %d)\n", opts->mmap_pages);
+		else if (!is_power_of_2(opts->mmap_pages))
+			die("--mmap_pages/-m value must be a power of two.");
 
-	if (file_new)
+		die("failed to mmap with %d (%s)\n", errno, strerror(errno));
+	}
+
+	if (rec->file_new)
 		session->evlist = evlist;
 	else {
 		if (!perf_evlist__equal(session->evlist, evlist)) {
@@ -373,29 +297,32 @@
 	perf_session__update_sample_type(session);
 }
 
-static int process_buildids(void)
+static int process_buildids(struct perf_record *rec)
 {
-	u64 size = lseek(output, 0, SEEK_CUR);
+	u64 size = lseek(rec->output, 0, SEEK_CUR);
 
 	if (size == 0)
 		return 0;
 
-	session->fd = output;
-	return __perf_session__process_events(session, post_processing_offset,
-					      size - post_processing_offset,
+	rec->session->fd = rec->output;
+	return __perf_session__process_events(rec->session, rec->post_processing_offset,
+					      size - rec->post_processing_offset,
 					      size, &build_id__mark_dso_hit_ops);
 }
 
-static void atexit_header(void)
+static void perf_record__exit(int status __used, void *arg)
 {
-	if (!pipe_output) {
-		session->header.data_size += bytes_written;
+	struct perf_record *rec = arg;
 
-		if (!no_buildid)
-			process_buildids();
-		perf_session__write_header(session, evsel_list, output, true);
-		perf_session__delete(session);
-		perf_evlist__delete(evsel_list);
+	if (!rec->opts.pipe_output) {
+		rec->session->header.data_size += rec->bytes_written;
+
+		if (!rec->no_buildid)
+			process_buildids(rec);
+		perf_session__write_header(rec->session, rec->evlist,
+					   rec->output, true);
+		perf_session__delete(rec->session);
+		perf_evlist__delete(rec->evlist);
 		symbol__exit();
 	}
 }
@@ -403,7 +330,7 @@
 static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
 {
 	int err;
-	struct perf_session *psession = data;
+	struct perf_tool *tool = data;
 
 	if (machine__is_host(machine))
 		return;
@@ -416,8 +343,8 @@
 	 *method is used to avoid symbol missing when the first addr is
 	 *in module instead of in guest kernel.
 	 */
-	err = perf_event__synthesize_modules(process_synthesized_event,
-					     psession, machine);
+	err = perf_event__synthesize_modules(tool, process_synthesized_event,
+					     machine);
 	if (err < 0)
 		pr_err("Couldn't record guest kernel [%d]'s reference"
 		       " relocation symbol.\n", machine->pid);
@@ -426,12 +353,11 @@
 	 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
 	 * have no _text sometimes.
 	 */
-	err = perf_event__synthesize_kernel_mmap(process_synthesized_event,
-						 psession, machine, "_text");
+	err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
+						 machine, "_text");
 	if (err < 0)
-		err = perf_event__synthesize_kernel_mmap(process_synthesized_event,
-							 psession, machine,
-							 "_stext");
+		err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
+							 machine, "_stext");
 	if (err < 0)
 		pr_err("Couldn't record guest kernel [%d]'s reference"
 		       " relocation symbol.\n", machine->pid);
@@ -442,73 +368,71 @@
 	.type = PERF_RECORD_FINISHED_ROUND,
 };
 
-static void mmap_read_all(void)
+static void perf_record__mmap_read_all(struct perf_record *rec)
 {
 	int i;
 
-	for (i = 0; i < evsel_list->nr_mmaps; i++) {
-		if (evsel_list->mmap[i].base)
-			mmap_read(&evsel_list->mmap[i]);
+	for (i = 0; i < rec->evlist->nr_mmaps; i++) {
+		if (rec->evlist->mmap[i].base)
+			perf_record__mmap_read(rec, &rec->evlist->mmap[i]);
 	}
 
-	if (perf_header__has_feat(&session->header, HEADER_TRACE_INFO))
-		write_output(&finished_round_event, sizeof(finished_round_event));
+	if (perf_header__has_feat(&rec->session->header, HEADER_TRACE_INFO))
+		write_output(rec, &finished_round_event, sizeof(finished_round_event));
 }
 
-static int __cmd_record(int argc, const char **argv)
+static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
 {
 	struct stat st;
 	int flags;
-	int err;
+	int err, output;
 	unsigned long waking = 0;
-	int child_ready_pipe[2], go_pipe[2];
 	const bool forks = argc > 0;
-	char buf;
 	struct machine *machine;
+	struct perf_tool *tool = &rec->tool;
+	struct perf_record_opts *opts = &rec->opts;
+	struct perf_evlist *evsel_list = rec->evlist;
+	const char *output_name = rec->output_name;
+	struct perf_session *session;
 
-	progname = argv[0];
+	rec->progname = argv[0];
 
-	page_size = sysconf(_SC_PAGE_SIZE);
+	rec->page_size = sysconf(_SC_PAGE_SIZE);
 
-	atexit(sig_atexit);
+	on_exit(perf_record__sig_exit, rec);
 	signal(SIGCHLD, sig_handler);
 	signal(SIGINT, sig_handler);
 	signal(SIGUSR1, sig_handler);
 
-	if (forks && (pipe(child_ready_pipe) < 0 || pipe(go_pipe) < 0)) {
-		perror("failed to create pipes");
-		exit(-1);
-	}
-
 	if (!output_name) {
 		if (!fstat(STDOUT_FILENO, &st) && S_ISFIFO(st.st_mode))
-			pipe_output = 1;
+			opts->pipe_output = true;
 		else
-			output_name = "perf.data";
+			rec->output_name = output_name = "perf.data";
 	}
 	if (output_name) {
 		if (!strcmp(output_name, "-"))
-			pipe_output = 1;
+			opts->pipe_output = true;
 		else if (!stat(output_name, &st) && st.st_size) {
-			if (write_mode == WRITE_FORCE) {
+			if (rec->write_mode == WRITE_FORCE) {
 				char oldname[PATH_MAX];
 				snprintf(oldname, sizeof(oldname), "%s.old",
 					 output_name);
 				unlink(oldname);
 				rename(output_name, oldname);
 			}
-		} else if (write_mode == WRITE_APPEND) {
-			write_mode = WRITE_FORCE;
+		} else if (rec->write_mode == WRITE_APPEND) {
+			rec->write_mode = WRITE_FORCE;
 		}
 	}
 
 	flags = O_CREAT|O_RDWR;
-	if (write_mode == WRITE_APPEND)
-		file_new = 0;
+	if (rec->write_mode == WRITE_APPEND)
+		rec->file_new = 0;
 	else
 		flags |= O_TRUNC;
 
-	if (pipe_output)
+	if (opts->pipe_output)
 		output = STDOUT_FILENO;
 	else
 		output = open(output_name, flags, S_IRUSR | S_IWUSR);
@@ -517,17 +441,21 @@
 		exit(-1);
 	}
 
+	rec->output = output;
+
 	session = perf_session__new(output_name, O_WRONLY,
-				    write_mode == WRITE_FORCE, false, NULL);
+				    rec->write_mode == WRITE_FORCE, false, NULL);
 	if (session == NULL) {
 		pr_err("Not enough memory for reading perf file header\n");
 		return -1;
 	}
 
-	if (!no_buildid)
+	rec->session = session;
+
+	if (!rec->no_buildid)
 		perf_header__set_feat(&session->header, HEADER_BUILD_ID);
 
-	if (!file_new) {
+	if (!rec->file_new) {
 		err = perf_session__read_header(session, output);
 		if (err < 0)
 			goto out_delete_session;
@@ -549,94 +477,57 @@
 	perf_header__set_feat(&session->header, HEADER_NUMA_TOPOLOGY);
 	perf_header__set_feat(&session->header, HEADER_CPUID);
 
-	/* 512 kiB: default amount of unprivileged mlocked memory */
-	if (mmap_pages == UINT_MAX)
-		mmap_pages = (512 * 1024) / page_size;
-
 	if (forks) {
-		child_pid = fork();
-		if (child_pid < 0) {
-			perror("failed to fork");
-			exit(-1);
+		err = perf_evlist__prepare_workload(evsel_list, opts, argv);
+		if (err < 0) {
+			pr_err("Couldn't run the workload!\n");
+			goto out_delete_session;
 		}
-
-		if (!child_pid) {
-			if (pipe_output)
-				dup2(2, 1);
-			close(child_ready_pipe[0]);
-			close(go_pipe[1]);
-			fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
-
-			/*
-			 * Do a dummy execvp to get the PLT entry resolved,
-			 * so we avoid the resolver overhead on the real
-			 * execvp call.
-			 */
-			execvp("", (char **)argv);
-
-			/*
-			 * Tell the parent we're ready to go
-			 */
-			close(child_ready_pipe[1]);
-
-			/*
-			 * Wait until the parent tells us to go.
-			 */
-			if (read(go_pipe[0], &buf, 1) == -1)
-				perror("unable to read pipe");
-
-			execvp(argv[0], (char **)argv);
-
-			perror(argv[0]);
-			kill(getppid(), SIGUSR1);
-			exit(-1);
-		}
-
-		if (!system_wide && target_tid == -1 && target_pid == -1)
-			evsel_list->threads->map[0] = child_pid;
-
-		close(child_ready_pipe[1]);
-		close(go_pipe[0]);
-		/*
-		 * wait for child to settle
-		 */
-		if (read(child_ready_pipe[0], &buf, 1) == -1) {
-			perror("unable to read pipe");
-			exit(-1);
-		}
-		close(child_ready_pipe[0]);
 	}
 
-	open_counters(evsel_list);
+	perf_record__open(rec);
 
 	/*
-	 * perf_session__delete(session) will be called at atexit_header()
+	 * perf_session__delete(session) will be called at perf_record__exit()
 	 */
-	atexit(atexit_header);
+	on_exit(perf_record__exit, rec);
 
-	if (pipe_output) {
+	if (opts->pipe_output) {
 		err = perf_header__write_pipe(output);
 		if (err < 0)
 			return err;
-	} else if (file_new) {
+	} else if (rec->file_new) {
 		err = perf_session__write_header(session, evsel_list,
 						 output, false);
 		if (err < 0)
 			return err;
 	}
 
-	post_processing_offset = lseek(output, 0, SEEK_CUR);
+	if (!!rec->no_buildid
+	    && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
+		pr_err("Couldn't generating buildids. "
+		       "Use --no-buildid to profile anyway.\n");
+		return -1;
+	}
 
-	if (pipe_output) {
-		err = perf_session__synthesize_attrs(session,
-						     process_synthesized_event);
+	rec->post_processing_offset = lseek(output, 0, SEEK_CUR);
+
+	machine = perf_session__find_host_machine(session);
+	if (!machine) {
+		pr_err("Couldn't find native kernel information.\n");
+		return -1;
+	}
+
+	if (opts->pipe_output) {
+		err = perf_event__synthesize_attrs(tool, session,
+						   process_synthesized_event);
 		if (err < 0) {
 			pr_err("Couldn't synthesize attrs.\n");
 			return err;
 		}
 
-		err = perf_event__synthesize_event_types(process_synthesized_event,
-							 session);
+		err = perf_event__synthesize_event_types(tool, process_synthesized_event,
+							 machine);
 		if (err < 0) {
 			pr_err("Couldn't synthesize event_types.\n");
 			return err;
@@ -651,56 +542,49 @@
 			 * return this more properly and also
 			 * propagate errors that now are calling die()
 			 */
-			err = perf_event__synthesize_tracing_data(output, evsel_list,
-								  process_synthesized_event,
-								  session);
+			err = perf_event__synthesize_tracing_data(tool, output, evsel_list,
+								  process_synthesized_event);
 			if (err <= 0) {
 				pr_err("Couldn't record tracing data.\n");
 				return err;
 			}
-			advance_output(err);
+			advance_output(rec, err);
 		}
 	}
 
-	machine = perf_session__find_host_machine(session);
-	if (!machine) {
-		pr_err("Couldn't find native kernel information.\n");
-		return -1;
-	}
-
-	err = perf_event__synthesize_kernel_mmap(process_synthesized_event,
-						 session, machine, "_text");
+	err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
+						 machine, "_text");
 	if (err < 0)
-		err = perf_event__synthesize_kernel_mmap(process_synthesized_event,
-							 session, machine, "_stext");
+		err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
+							 machine, "_stext");
 	if (err < 0)
 		pr_err("Couldn't record kernel reference relocation symbol\n"
 		       "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
 		       "Check /proc/kallsyms permission or run as root.\n");
 
-	err = perf_event__synthesize_modules(process_synthesized_event,
-					     session, machine);
+	err = perf_event__synthesize_modules(tool, process_synthesized_event,
+					     machine);
 	if (err < 0)
 		pr_err("Couldn't record kernel module information.\n"
 		       "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
 		       "Check /proc/modules permission or run as root.\n");
 
 	if (perf_guest)
-		perf_session__process_machines(session,
+		perf_session__process_machines(session, tool,
 					       perf_event__synthesize_guest_os);
 
-	if (!system_wide)
-		perf_event__synthesize_thread_map(evsel_list->threads,
+	if (!opts->system_wide)
+		perf_event__synthesize_thread_map(tool, evsel_list->threads,
 						  process_synthesized_event,
-						  session);
+						  machine);
 	else
-		perf_event__synthesize_threads(process_synthesized_event,
-					       session);
+		perf_event__synthesize_threads(tool, process_synthesized_event,
+					       machine);
 
-	if (realtime_prio) {
+	if (rec->realtime_prio) {
 		struct sched_param param;
 
-		param.sched_priority = realtime_prio;
+		param.sched_priority = rec->realtime_prio;
 		if (sched_setscheduler(0, SCHED_FIFO, &param)) {
 			pr_err("Could not set realtime priority.\n");
 			exit(-1);
@@ -713,14 +597,14 @@
 	 * Let the child rip
 	 */
 	if (forks)
-		close(go_pipe[1]);
+		perf_evlist__start_workload(evsel_list);
 
 	for (;;) {
-		int hits = samples;
+		int hits = rec->samples;
 
-		mmap_read_all();
+		perf_record__mmap_read_all(rec);
 
-		if (hits == samples) {
+		if (hits == rec->samples) {
 			if (done)
 				break;
 			err = poll(evsel_list->pollfd, evsel_list->nr_fds, -1);
@@ -741,9 +625,9 @@
 	 */
 	fprintf(stderr,
 		"[ perf record: Captured and wrote %.3f MB %s (~%" PRIu64 " samples) ]\n",
-		(double)bytes_written / 1024.0 / 1024.0,
+		(double)rec->bytes_written / 1024.0 / 1024.0,
 		output_name,
-		bytes_written / 24);
+		rec->bytes_written / 24);
 
 	return 0;
 
@@ -758,58 +642,89 @@
 	NULL
 };
 
-static bool force, append_file;
+/*
+ * XXX Ideally would be local to cmd_record() and passed to a perf_record__new
+ * because we need to have access to it in perf_record__exit, that is called
+ * after cmd_record() exits, but since record_options need to be accessible to
+ * builtin-script, leave it here.
+ *
+ * At least we don't ouch it in all the other functions here directly.
+ *
+ * Just say no to tons of global variables, sigh.
+ */
+static struct perf_record record = {
+	.opts = {
+		.target_pid	     = -1,
+		.target_tid	     = -1,
+		.mmap_pages	     = UINT_MAX,
+		.user_freq	     = UINT_MAX,
+		.user_interval	     = ULLONG_MAX,
+		.freq		     = 1000,
+		.sample_id_all_avail = true,
+	},
+	.write_mode = WRITE_FORCE,
+	.file_new   = true,
+};
 
+/*
+ * XXX Will stay a global variable till we fix builtin-script.c to stop messing
+ * with it and switch to use the library functions in perf_evlist that came
+ * from builtin-record.c, i.e. use perf_record_opts,
+ * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
+ * using pipes, etc.
+ */
 const struct option record_options[] = {
-	OPT_CALLBACK('e', "event", &evsel_list, "event",
+	OPT_CALLBACK('e', "event", &record.evlist, "event",
 		     "event selector. use 'perf list' to list available events",
 		     parse_events_option),
-	OPT_CALLBACK(0, "filter", &evsel_list, "filter",
+	OPT_CALLBACK(0, "filter", &record.evlist, "filter",
 		     "event filter", parse_filter),
-	OPT_INTEGER('p', "pid", &target_pid,
+	OPT_INTEGER('p', "pid", &record.opts.target_pid,
 		    "record events on existing process id"),
-	OPT_INTEGER('t', "tid", &target_tid,
+	OPT_INTEGER('t', "tid", &record.opts.target_tid,
 		    "record events on existing thread id"),
-	OPT_INTEGER('r', "realtime", &realtime_prio,
+	OPT_INTEGER('r', "realtime", &record.realtime_prio,
 		    "collect data with this RT SCHED_FIFO priority"),
-	OPT_BOOLEAN('D', "no-delay", &nodelay,
+	OPT_BOOLEAN('D', "no-delay", &record.opts.no_delay,
 		    "collect data without buffering"),
-	OPT_BOOLEAN('R', "raw-samples", &raw_samples,
+	OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
 		    "collect raw sample records from all opened counters"),
-	OPT_BOOLEAN('a', "all-cpus", &system_wide,
+	OPT_BOOLEAN('a', "all-cpus", &record.opts.system_wide,
 			    "system-wide collection from all CPUs"),
-	OPT_BOOLEAN('A', "append", &append_file,
+	OPT_BOOLEAN('A', "append", &record.append_file,
 			    "append to the output file to do incremental profiling"),
-	OPT_STRING('C', "cpu", &cpu_list, "cpu",
+	OPT_STRING('C', "cpu", &record.opts.cpu_list, "cpu",
 		    "list of cpus to monitor"),
-	OPT_BOOLEAN('f', "force", &force,
+	OPT_BOOLEAN('f', "force", &record.force,
 			"overwrite existing data file (deprecated)"),
-	OPT_U64('c', "count", &user_interval, "event period to sample"),
-	OPT_STRING('o', "output", &output_name, "file",
+	OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
+	OPT_STRING('o', "output", &record.output_name, "file",
 		    "output file name"),
-	OPT_BOOLEAN('i', "no-inherit", &no_inherit,
+	OPT_BOOLEAN('i', "no-inherit", &record.opts.no_inherit,
 		    "child tasks do not inherit counters"),
-	OPT_UINTEGER('F', "freq", &user_freq, "profile at this frequency"),
-	OPT_UINTEGER('m', "mmap-pages", &mmap_pages, "number of mmap data pages"),
-	OPT_BOOLEAN(0, "group", &group,
+	OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"),
+	OPT_UINTEGER('m', "mmap-pages", &record.opts.mmap_pages,
+		     "number of mmap data pages"),
+	OPT_BOOLEAN(0, "group", &record.opts.group,
 		    "put the counters into a counter group"),
-	OPT_BOOLEAN('g', "call-graph", &call_graph,
+	OPT_BOOLEAN('g', "call-graph", &record.opts.call_graph,
 		    "do call-graph (stack chain/backtrace) recording"),
 	OPT_INCR('v', "verbose", &verbose,
 		    "be more verbose (show counter open errors, etc)"),
 	OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
-	OPT_BOOLEAN('s', "stat", &inherit_stat,
+	OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
 		    "per thread counts"),
-	OPT_BOOLEAN('d', "data", &sample_address,
+	OPT_BOOLEAN('d', "data", &record.opts.sample_address,
 		    "Sample addresses"),
-	OPT_BOOLEAN('T', "timestamp", &sample_time, "Sample timestamps"),
-	OPT_BOOLEAN('n', "no-samples", &no_samples,
+	OPT_BOOLEAN('T', "timestamp", &record.opts.sample_time, "Sample timestamps"),
+	OPT_BOOLEAN('P', "period", &record.opts.period, "Sample period"),
+	OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
 		    "don't sample"),
-	OPT_BOOLEAN('N', "no-buildid-cache", &no_buildid_cache,
+	OPT_BOOLEAN('N', "no-buildid-cache", &record.no_buildid_cache,
 		    "do not update the buildid cache"),
-	OPT_BOOLEAN('B', "no-buildid", &no_buildid,
+	OPT_BOOLEAN('B', "no-buildid", &record.no_buildid,
 		    "do not collect buildids in perf.data"),
-	OPT_CALLBACK('G', "cgroup", &evsel_list, "name",
+	OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
 		     "monitor event in cgroup name only",
 		     parse_cgroups),
 	OPT_END()
@@ -819,6 +734,8 @@
 {
 	int err = -ENOMEM;
 	struct perf_evsel *pos;
+	struct perf_evlist *evsel_list;
+	struct perf_record *rec = &record;
 
 	perf_header__set_cmdline(argc, argv);
 
@@ -826,23 +743,25 @@
 	if (evsel_list == NULL)
 		return -ENOMEM;
 
+	rec->evlist = evsel_list;
+
 	argc = parse_options(argc, argv, record_options, record_usage,
 			    PARSE_OPT_STOP_AT_NON_OPTION);
-	if (!argc && target_pid == -1 && target_tid == -1 &&
-		!system_wide && !cpu_list)
+	if (!argc && rec->opts.target_pid == -1 && rec->opts.target_tid == -1 &&
+		!rec->opts.system_wide && !rec->opts.cpu_list)
 		usage_with_options(record_usage, record_options);
 
-	if (force && append_file) {
+	if (rec->force && rec->append_file) {
 		fprintf(stderr, "Can't overwrite and append at the same time."
 				" You need to choose between -f and -A");
 		usage_with_options(record_usage, record_options);
-	} else if (append_file) {
-		write_mode = WRITE_APPEND;
+	} else if (rec->append_file) {
+		rec->write_mode = WRITE_APPEND;
 	} else {
-		write_mode = WRITE_FORCE;
+		rec->write_mode = WRITE_FORCE;
 	}
 
-	if (nr_cgroups && !system_wide) {
+	if (nr_cgroups && !rec->opts.system_wide) {
 		fprintf(stderr, "cgroup monitoring only available in"
 			" system-wide mode\n");
 		usage_with_options(record_usage, record_options);
@@ -860,7 +779,7 @@
 "If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
 "even with a suitable vmlinux or kallsyms file.\n\n");
 
-	if (no_buildid_cache || no_buildid)
+	if (rec->no_buildid_cache || rec->no_buildid)
 		disable_buildid_cache();
 
 	if (evsel_list->nr_entries == 0 &&
@@ -869,43 +788,37 @@
 		goto out_symbol_exit;
 	}
 
-	if (target_pid != -1)
-		target_tid = target_pid;
+	if (rec->opts.target_pid != -1)
+		rec->opts.target_tid = rec->opts.target_pid;
 
-	if (perf_evlist__create_maps(evsel_list, target_pid,
-				     target_tid, cpu_list) < 0)
+	if (perf_evlist__create_maps(evsel_list, rec->opts.target_pid,
+				     rec->opts.target_tid, rec->opts.cpu_list) < 0)
 		usage_with_options(record_usage, record_options);
 
 	list_for_each_entry(pos, &evsel_list->entries, node) {
-		if (perf_evsel__alloc_fd(pos, evsel_list->cpus->nr,
-					 evsel_list->threads->nr) < 0)
-			goto out_free_fd;
 		if (perf_header__push_event(pos->attr.config, event_name(pos)))
 			goto out_free_fd;
 	}
 
-	if (perf_evlist__alloc_pollfd(evsel_list) < 0)
-		goto out_free_fd;
-
-	if (user_interval != ULLONG_MAX)
-		default_interval = user_interval;
-	if (user_freq != UINT_MAX)
-		freq = user_freq;
+	if (rec->opts.user_interval != ULLONG_MAX)
+		rec->opts.default_interval = rec->opts.user_interval;
+	if (rec->opts.user_freq != UINT_MAX)
+		rec->opts.freq = rec->opts.user_freq;
 
 	/*
 	 * User specified count overrides default frequency.
 	 */
-	if (default_interval)
-		freq = 0;
-	else if (freq) {
-		default_interval = freq;
+	if (rec->opts.default_interval)
+		rec->opts.freq = 0;
+	else if (rec->opts.freq) {
+		rec->opts.default_interval = rec->opts.freq;
 	} else {
 		fprintf(stderr, "frequency and count are zero, aborting\n");
 		err = -EINVAL;
 		goto out_free_fd;
 	}
 
-	err = __cmd_record(argc, argv);
+	err = __cmd_record(&record, argc, argv);
 out_free_fd:
 	perf_evlist__delete_maps(evsel_list);
 out_symbol_exit:
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 4d7c834..25d34d4 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -25,6 +25,7 @@
 #include "util/evsel.h"
 #include "util/header.h"
 #include "util/session.h"
+#include "util/tool.h"
 
 #include "util/parse-options.h"
 #include "util/parse-events.h"
@@ -35,38 +36,35 @@
 
 #include <linux/bitmap.h>
 
-static char		const *input_name = "perf.data";
+struct perf_report {
+	struct perf_tool	tool;
+	struct perf_session	*session;
+	char const		*input_name;
+	bool			force, use_tui, use_stdio;
+	bool			hide_unresolved;
+	bool			dont_use_callchains;
+	bool			show_full_info;
+	bool			show_threads;
+	bool			inverted_callchain;
+	struct perf_read_values	show_threads_values;
+	const char		*pretty_printing_style;
+	symbol_filter_t		annotate_init;
+	const char		*cpu_list;
+	DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
+};
 
-static bool		force, use_tui, use_stdio;
-static bool		hide_unresolved;
-static bool		dont_use_callchains;
-static bool		show_full_info;
-
-static bool		show_threads;
-static struct perf_read_values	show_threads_values;
-
-static const char	default_pretty_printing_style[] = "normal";
-static const char	*pretty_printing_style = default_pretty_printing_style;
-
-static char		callchain_default_opt[] = "fractal,0.5,callee";
-static bool		inverted_callchain;
-static symbol_filter_t	annotate_init;
-
-static const char	*cpu_list;
-static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
-
-static int perf_session__add_hist_entry(struct perf_session *session,
-					struct addr_location *al,
-					struct perf_sample *sample,
-					struct perf_evsel *evsel)
+static int perf_evsel__add_hist_entry(struct perf_evsel *evsel,
+				      struct addr_location *al,
+				      struct perf_sample *sample,
+				      struct machine *machine)
 {
 	struct symbol *parent = NULL;
 	int err = 0;
 	struct hist_entry *he;
 
 	if ((sort__has_parent || symbol_conf.use_callchain) && sample->callchain) {
-		err = perf_session__resolve_callchain(session, al->thread,
-						      sample->callchain, &parent);
+		err = machine__resolve_callchain(machine, evsel, al->thread,
+						 sample->callchain, &parent);
 		if (err)
 			return err;
 	}
@@ -76,7 +74,8 @@
 		return -ENOMEM;
 
 	if (symbol_conf.use_callchain) {
-		err = callchain_append(he->callchain, &session->callchain_cursor,
+		err = callchain_append(he->callchain,
+				       &evsel->hists.callchain_cursor,
 				       sample->period);
 		if (err)
 			return err;
@@ -92,8 +91,7 @@
 		assert(evsel != NULL);
 
 		err = -ENOMEM;
-		if (notes->src == NULL &&
-		    symbol__alloc_hist(he->ms.sym, session->evlist->nr_entries) < 0)
+		if (notes->src == NULL && symbol__alloc_hist(he->ms.sym) < 0)
 			goto out;
 
 		err = hist_entry__inc_addr_samples(he, evsel->idx, al->addr);
@@ -106,30 +104,32 @@
 }
 
 
-static int process_sample_event(union perf_event *event,
+static int process_sample_event(struct perf_tool *tool,
+				union perf_event *event,
 				struct perf_sample *sample,
 				struct perf_evsel *evsel,
-				struct perf_session *session)
+				struct machine *machine)
 {
+	struct perf_report *rep = container_of(tool, struct perf_report, tool);
 	struct addr_location al;
 
-	if (perf_event__preprocess_sample(event, session, &al, sample,
-					  annotate_init) < 0) {
+	if (perf_event__preprocess_sample(event, machine, &al, sample,
+					  rep->annotate_init) < 0) {
 		fprintf(stderr, "problem processing %d event, skipping it.\n",
 			event->header.type);
 		return -1;
 	}
 
-	if (al.filtered || (hide_unresolved && al.sym == NULL))
+	if (al.filtered || (rep->hide_unresolved && al.sym == NULL))
 		return 0;
 
-	if (cpu_list && !test_bit(sample->cpu, cpu_bitmap))
+	if (rep->cpu_list && !test_bit(sample->cpu, rep->cpu_bitmap))
 		return 0;
 
 	if (al.map != NULL)
 		al.map->dso->hit = 1;
 
-	if (perf_session__add_hist_entry(session, &al, sample, evsel)) {
+	if (perf_evsel__add_hist_entry(evsel, &al, sample, machine)) {
 		pr_debug("problem incrementing symbol period, skipping event\n");
 		return -1;
 	}
@@ -137,15 +137,17 @@
 	return 0;
 }
 
-static int process_read_event(union perf_event *event,
+static int process_read_event(struct perf_tool *tool,
+			      union perf_event *event,
 			      struct perf_sample *sample __used,
-			      struct perf_session *session)
+			      struct perf_evsel *evsel,
+			      struct machine *machine __used)
 {
-	struct perf_evsel *evsel = perf_evlist__id2evsel(session->evlist,
-							 event->read.id);
-	if (show_threads) {
+	struct perf_report *rep = container_of(tool, struct perf_report, tool);
+
+	if (rep->show_threads) {
 		const char *name = evsel ? event_name(evsel) : "unknown";
-		perf_read_values_add_value(&show_threads_values,
+		perf_read_values_add_value(&rep->show_threads_values,
 					   event->read.pid, event->read.tid,
 					   event->read.id,
 					   name,
@@ -159,8 +161,10 @@
 	return 0;
 }
 
-static int perf_session__setup_sample_type(struct perf_session *self)
+static int perf_report__setup_sample_type(struct perf_report *rep)
 {
+	struct perf_session *self = rep->session;
+
 	if (!(self->sample_type & PERF_SAMPLE_CALLCHAIN)) {
 		if (sort__has_parent) {
 			ui__warning("Selected --sort parent, but no "
@@ -173,7 +177,8 @@
 				    "you call 'perf record' without -g?\n");
 			return -1;
 		}
-	} else if (!dont_use_callchains && callchain_param.mode != CHAIN_NONE &&
+	} else if (!rep->dont_use_callchains &&
+		   callchain_param.mode != CHAIN_NONE &&
 		   !symbol_conf.use_callchain) {
 			symbol_conf.use_callchain = true;
 			if (callchain_register_param(&callchain_param) < 0) {
@@ -186,22 +191,6 @@
 	return 0;
 }
 
-static struct perf_event_ops event_ops = {
-	.sample		 = process_sample_event,
-	.mmap		 = perf_event__process_mmap,
-	.comm		 = perf_event__process_comm,
-	.exit		 = perf_event__process_task,
-	.fork		 = perf_event__process_task,
-	.lost		 = perf_event__process_lost,
-	.read		 = process_read_event,
-	.attr		 = perf_event__process_attr,
-	.event_type	 = perf_event__process_event_type,
-	.tracing_data	 = perf_event__process_tracing_data,
-	.build_id	 = perf_event__process_build_id,
-	.ordered_samples = true,
-	.ordering_requires_timestamps = true,
-};
-
 extern volatile int session_done;
 
 static void sig_handler(int sig __used)
@@ -224,6 +213,7 @@
 }
 
 static int perf_evlist__tty_browse_hists(struct perf_evlist *evlist,
+					 struct perf_report *rep,
 					 const char *help)
 {
 	struct perf_evsel *pos;
@@ -241,18 +231,18 @@
 	    parent_pattern == default_parent_pattern) {
 		fprintf(stdout, "#\n# (%s)\n#\n", help);
 
-		if (show_threads) {
-			bool style = !strcmp(pretty_printing_style, "raw");
-			perf_read_values_display(stdout, &show_threads_values,
+		if (rep->show_threads) {
+			bool style = !strcmp(rep->pretty_printing_style, "raw");
+			perf_read_values_display(stdout, &rep->show_threads_values,
 						 style);
-			perf_read_values_destroy(&show_threads_values);
+			perf_read_values_destroy(&rep->show_threads_values);
 		}
 	}
 
 	return 0;
 }
 
-static int __cmd_report(void)
+static int __cmd_report(struct perf_report *rep)
 {
 	int ret = -EINVAL;
 	u64 nr_samples;
@@ -264,27 +254,31 @@
 
 	signal(SIGINT, sig_handler);
 
-	session = perf_session__new(input_name, O_RDONLY, force, false, &event_ops);
+	session = perf_session__new(rep->input_name, O_RDONLY,
+				    rep->force, false, &rep->tool);
 	if (session == NULL)
 		return -ENOMEM;
 
-	if (cpu_list) {
-		ret = perf_session__cpu_bitmap(session, cpu_list, cpu_bitmap);
+	rep->session = session;
+
+	if (rep->cpu_list) {
+		ret = perf_session__cpu_bitmap(session, rep->cpu_list,
+					       rep->cpu_bitmap);
 		if (ret)
 			goto out_delete;
 	}
 
 	if (use_browser <= 0)
-		perf_session__fprintf_info(session, stdout, show_full_info);
+		perf_session__fprintf_info(session, stdout, rep->show_full_info);
 
-	if (show_threads)
-		perf_read_values_init(&show_threads_values);
+	if (rep->show_threads)
+		perf_read_values_init(&rep->show_threads_values);
 
-	ret = perf_session__setup_sample_type(session);
+	ret = perf_report__setup_sample_type(rep);
 	if (ret)
 		goto out_delete;
 
-	ret = perf_session__process_events(session, &event_ops);
+	ret = perf_session__process_events(session, &rep->tool);
 	if (ret)
 		goto out_delete;
 
@@ -327,7 +321,7 @@
 	}
 
 	if (nr_samples == 0) {
-		ui__warning("The %s file has no samples!\n", input_name);
+		ui__warning("The %s file has no samples!\n", session->filename);
 		goto out_delete;
 	}
 
@@ -335,7 +329,7 @@
 		perf_evlist__tui_browse_hists(session->evlist, help,
 					      NULL, NULL, 0);
 	} else
-		perf_evlist__tty_browse_hists(session->evlist, help);
+		perf_evlist__tty_browse_hists(session->evlist, rep, help);
 
 out_delete:
 	/*
@@ -354,9 +348,9 @@
 }
 
 static int
-parse_callchain_opt(const struct option *opt __used, const char *arg,
-		    int unset)
+parse_callchain_opt(const struct option *opt, const char *arg, int unset)
 {
+	struct perf_report *rep = (struct perf_report *)opt->value;
 	char *tok, *tok2;
 	char *endptr;
 
@@ -364,7 +358,7 @@
 	 * --no-call-graph
 	 */
 	if (unset) {
-		dont_use_callchains = true;
+		rep->dont_use_callchains = true;
 		return 0;
 	}
 
@@ -412,7 +406,7 @@
 		goto setup;
 
 	if (tok2[0] != 'c') {
-		callchain_param.print_limit = strtod(tok2, &endptr);
+		callchain_param.print_limit = strtoul(tok2, &endptr, 0);
 		tok2 = strtok(NULL, ",");
 		if (!tok2)
 			goto setup;
@@ -433,13 +427,34 @@
 	return 0;
 }
 
-static const char * const report_usage[] = {
-	"perf report [<options>] <command>",
-	NULL
-};
-
-static const struct option options[] = {
-	OPT_STRING('i', "input", &input_name, "file",
+int cmd_report(int argc, const char **argv, const char *prefix __used)
+{
+	struct stat st;
+	char callchain_default_opt[] = "fractal,0.5,callee";
+	const char * const report_usage[] = {
+		"perf report [<options>]",
+		NULL
+	};
+	struct perf_report report = {
+		.tool = {
+			.sample		 = process_sample_event,
+			.mmap		 = perf_event__process_mmap,
+			.comm		 = perf_event__process_comm,
+			.exit		 = perf_event__process_task,
+			.fork		 = perf_event__process_task,
+			.lost		 = perf_event__process_lost,
+			.read		 = process_read_event,
+			.attr		 = perf_event__process_attr,
+			.event_type	 = perf_event__process_event_type,
+			.tracing_data	 = perf_event__process_tracing_data,
+			.build_id	 = perf_event__process_build_id,
+			.ordered_samples = true,
+			.ordering_requires_timestamps = true,
+		},
+		.pretty_printing_style	 = "normal",
+	};
+	const struct option options[] = {
+	OPT_STRING('i', "input", &report.input_name, "file",
 		    "input file name"),
 	OPT_INCR('v', "verbose", &verbose,
 		    "be more verbose (show symbol address, etc)"),
@@ -449,17 +464,18 @@
 		   "file", "vmlinux pathname"),
 	OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
 		   "file", "kallsyms pathname"),
-	OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
+	OPT_BOOLEAN('f', "force", &report.force, "don't complain, do it"),
 	OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules,
 		    "load module symbols - WARNING: use only with -k and LIVE kernel"),
 	OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples,
 		    "Show a column with the number of samples"),
-	OPT_BOOLEAN('T', "threads", &show_threads,
+	OPT_BOOLEAN('T', "threads", &report.show_threads,
 		    "Show per-thread event counters"),
-	OPT_STRING(0, "pretty", &pretty_printing_style, "key",
+	OPT_STRING(0, "pretty", &report.pretty_printing_style, "key",
 		   "pretty printing style key: normal raw"),
-	OPT_BOOLEAN(0, "tui", &use_tui, "Use the TUI interface"),
-	OPT_BOOLEAN(0, "stdio", &use_stdio, "Use the stdio interface"),
+	OPT_BOOLEAN(0, "tui", &report.use_tui, "Use the TUI interface"),
+	OPT_BOOLEAN(0, "stdio", &report.use_stdio,
+		    "Use the stdio interface"),
 	OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
 		   "sort by key(s): pid, comm, dso, symbol, parent"),
 	OPT_BOOLEAN(0, "showcpuutilization", &symbol_conf.show_cpu_utilization,
@@ -468,13 +484,14 @@
 		   "regex filter to identify parent, see: '--sort parent'"),
 	OPT_BOOLEAN('x', "exclude-other", &symbol_conf.exclude_other,
 		    "Only display entries with parent-match"),
-	OPT_CALLBACK_DEFAULT('g', "call-graph", NULL, "output_type,min_percent, call_order",
-		     "Display callchains using output_type (graph, flat, fractal, or none) , min percent threshold and callchain order. "
+	OPT_CALLBACK_DEFAULT('g', "call-graph", &report, "output_type,min_percent[,print_limit],call_order",
+		     "Display callchains using output_type (graph, flat, fractal, or none) , min percent threshold, optional print limit and callchain order. "
 		     "Default: fractal,0.5,callee", &parse_callchain_opt, callchain_default_opt),
-	OPT_BOOLEAN('G', "inverted", &inverted_callchain, "alias for inverted call graph"),
+	OPT_BOOLEAN('G', "inverted", &report.inverted_callchain,
+		    "alias for inverted call graph"),
 	OPT_STRING('d', "dsos", &symbol_conf.dso_list_str, "dso[,dso...]",
 		   "only consider symbols in these dsos"),
-	OPT_STRING('C', "comms", &symbol_conf.comm_list_str, "comm[,comm...]",
+	OPT_STRING('c', "comms", &symbol_conf.comm_list_str, "comm[,comm...]",
 		   "only consider symbols in these comms"),
 	OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
 		   "only consider these symbols"),
@@ -484,12 +501,13 @@
 	OPT_STRING('t', "field-separator", &symbol_conf.field_sep, "separator",
 		   "separator for columns, no spaces will be added between "
 		   "columns '.' is reserved."),
-	OPT_BOOLEAN('U', "hide-unresolved", &hide_unresolved,
+	OPT_BOOLEAN('U', "hide-unresolved", &report.hide_unresolved,
 		    "Only display entries resolved to a symbol"),
 	OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
 		    "Look for files with symbols relative to this directory"),
-	OPT_STRING('c', "cpu", &cpu_list, "cpu", "list of cpus to profile"),
-	OPT_BOOLEAN('I', "show-info", &show_full_info,
+	OPT_STRING('C', "cpu", &report.cpu_list, "cpu",
+		   "list of cpus to profile"),
+	OPT_BOOLEAN('I', "show-info", &report.show_full_info,
 		    "Display extended information about perf.data file"),
 	OPT_BOOLEAN(0, "source", &symbol_conf.annotate_src,
 		    "Interleave source code with assembly code (default)"),
@@ -500,24 +518,30 @@
 	OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period,
 		    "Show a column with the sum of periods"),
 	OPT_END()
-};
+	};
 
-int cmd_report(int argc, const char **argv, const char *prefix __used)
-{
 	argc = parse_options(argc, argv, options, report_usage, 0);
 
-	if (use_stdio)
+	if (report.use_stdio)
 		use_browser = 0;
-	else if (use_tui)
+	else if (report.use_tui)
 		use_browser = 1;
 
-	if (inverted_callchain)
+	if (report.inverted_callchain)
 		callchain_param.order = ORDER_CALLER;
 
-	if (strcmp(input_name, "-") != 0)
+	if (!report.input_name || !strlen(report.input_name)) {
+		if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode))
+			report.input_name = "-";
+		else
+			report.input_name = "perf.data";
+	}
+
+	if (strcmp(report.input_name, "-") != 0)
 		setup_browser(true);
 	else
 		use_browser = 0;
+
 	/*
 	 * Only in the newt browser we are doing integrated annotation,
 	 * so don't allocate extra space that won't be used in the stdio
@@ -525,7 +549,7 @@
 	 */
 	if (use_browser > 0) {
 		symbol_conf.priv_size = sizeof(struct annotation);
-		annotate_init	      = symbol__annotate_init;
+		report.annotate_init  = symbol__annotate_init;
 		/*
  		 * For searching by name on the "Browse map details".
  		 * providing it only in verbose mode not to bloat too
@@ -572,5 +596,5 @@
 	sort_entry__setup_elide(&sort_comm, symbol_conf.comm_list, "comm", stdout);
 	sort_entry__setup_elide(&sort_sym, symbol_conf.sym_list, "symbol", stdout);
 
-	return __cmd_report();
+	return __cmd_report(&report);
 }
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 5177964..fb8b5f8 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -2,11 +2,14 @@
 #include "perf.h"
 
 #include "util/util.h"
+#include "util/evlist.h"
 #include "util/cache.h"
+#include "util/evsel.h"
 #include "util/symbol.h"
 #include "util/thread.h"
 #include "util/header.h"
 #include "util/session.h"
+#include "util/tool.h"
 
 #include "util/parse-options.h"
 #include "util/trace-event.h"
@@ -19,7 +22,7 @@
 #include <pthread.h>
 #include <math.h>
 
-static char			const *input_name = "perf.data";
+static const char		*input_name;
 
 static char			default_sort_order[] = "avg, max, switch, runtime";
 static const char		*sort_order = default_sort_order;
@@ -723,21 +726,21 @@
 
 struct trace_sched_handler {
 	void (*switch_event)(struct trace_switch_event *,
-			     struct perf_session *,
+			     struct machine *,
 			     struct event *,
 			     int cpu,
 			     u64 timestamp,
 			     struct thread *thread);
 
 	void (*runtime_event)(struct trace_runtime_event *,
-			      struct perf_session *,
+			      struct machine *,
 			      struct event *,
 			      int cpu,
 			      u64 timestamp,
 			      struct thread *thread);
 
 	void (*wakeup_event)(struct trace_wakeup_event *,
-			     struct perf_session *,
+			     struct machine *,
 			     struct event *,
 			     int cpu,
 			     u64 timestamp,
@@ -750,7 +753,7 @@
 			   struct thread *thread);
 
 	void (*migrate_task_event)(struct trace_migrate_task_event *,
-			   struct perf_session *session,
+			   struct machine *machine,
 			   struct event *,
 			   int cpu,
 			   u64 timestamp,
@@ -760,7 +763,7 @@
 
 static void
 replay_wakeup_event(struct trace_wakeup_event *wakeup_event,
-		    struct perf_session *session __used,
+		    struct machine *machine __used,
 		    struct event *event,
 		    int cpu __used,
 		    u64 timestamp __used,
@@ -787,7 +790,7 @@
 
 static void
 replay_switch_event(struct trace_switch_event *switch_event,
-		    struct perf_session *session __used,
+		    struct machine *machine __used,
 		    struct event *event,
 		    int cpu,
 		    u64 timestamp,
@@ -1021,7 +1024,7 @@
 
 static void
 latency_switch_event(struct trace_switch_event *switch_event,
-		     struct perf_session *session,
+		     struct machine *machine,
 		     struct event *event __used,
 		     int cpu,
 		     u64 timestamp,
@@ -1045,8 +1048,8 @@
 		die("hm, delta: %" PRIu64 " < 0 ?\n", delta);
 
 
-	sched_out = perf_session__findnew(session, switch_event->prev_pid);
-	sched_in = perf_session__findnew(session, switch_event->next_pid);
+	sched_out = machine__findnew_thread(machine, switch_event->prev_pid);
+	sched_in = machine__findnew_thread(machine, switch_event->next_pid);
 
 	out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
 	if (!out_events) {
@@ -1074,13 +1077,13 @@
 
 static void
 latency_runtime_event(struct trace_runtime_event *runtime_event,
-		     struct perf_session *session,
+		     struct machine *machine,
 		     struct event *event __used,
 		     int cpu,
 		     u64 timestamp,
 		     struct thread *this_thread __used)
 {
-	struct thread *thread = perf_session__findnew(session, runtime_event->pid);
+	struct thread *thread = machine__findnew_thread(machine, runtime_event->pid);
 	struct work_atoms *atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);
 
 	BUG_ON(cpu >= MAX_CPUS || cpu < 0);
@@ -1097,7 +1100,7 @@
 
 static void
 latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
-		     struct perf_session *session,
+		     struct machine *machine,
 		     struct event *__event __used,
 		     int cpu __used,
 		     u64 timestamp,
@@ -1111,7 +1114,7 @@
 	if (!wakeup_event->success)
 		return;
 
-	wakee = perf_session__findnew(session, wakeup_event->pid);
+	wakee = machine__findnew_thread(machine, wakeup_event->pid);
 	atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid);
 	if (!atoms) {
 		thread_atoms_insert(wakee);
@@ -1145,7 +1148,7 @@
 
 static void
 latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event,
-		     struct perf_session *session,
+		     struct machine *machine,
 		     struct event *__event __used,
 		     int cpu __used,
 		     u64 timestamp,
@@ -1161,7 +1164,7 @@
 	if (profile_cpu == -1)
 		return;
 
-	migrant = perf_session__findnew(session, migrate_task_event->pid);
+	migrant = machine__findnew_thread(machine, migrate_task_event->pid);
 	atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid);
 	if (!atoms) {
 		thread_atoms_insert(migrant);
@@ -1356,12 +1359,13 @@
 static struct trace_sched_handler *trace_handler;
 
 static void
-process_sched_wakeup_event(void *data, struct perf_session *session,
+process_sched_wakeup_event(struct perf_tool *tool __used,
 			   struct event *event,
-			   int cpu __used,
-			   u64 timestamp __used,
-			   struct thread *thread __used)
+			   struct perf_sample *sample,
+			   struct machine *machine,
+			   struct thread *thread)
 {
+	void *data = sample->raw_data;
 	struct trace_wakeup_event wakeup_event;
 
 	FILL_COMMON_FIELDS(wakeup_event, event, data);
@@ -1373,8 +1377,8 @@
 	FILL_FIELD(wakeup_event, cpu, event, data);
 
 	if (trace_handler->wakeup_event)
-		trace_handler->wakeup_event(&wakeup_event, session, event,
-					    cpu, timestamp, thread);
+		trace_handler->wakeup_event(&wakeup_event, machine, event,
+					    sample->cpu, sample->time, thread);
 }
 
 /*
@@ -1392,7 +1396,7 @@
 
 static void
 map_switch_event(struct trace_switch_event *switch_event,
-		 struct perf_session *session,
+		 struct machine *machine,
 		 struct event *event __used,
 		 int this_cpu,
 		 u64 timestamp,
@@ -1420,8 +1424,8 @@
 		die("hm, delta: %" PRIu64 " < 0 ?\n", delta);
 
 
-	sched_out = perf_session__findnew(session, switch_event->prev_pid);
-	sched_in = perf_session__findnew(session, switch_event->next_pid);
+	sched_out = machine__findnew_thread(machine, switch_event->prev_pid);
+	sched_in = machine__findnew_thread(machine, switch_event->next_pid);
 
 	curr_thread[this_cpu] = sched_in;
 
@@ -1469,14 +1473,15 @@
 	}
 }
 
-
 static void
-process_sched_switch_event(void *data, struct perf_session *session,
+process_sched_switch_event(struct perf_tool *tool __used,
 			   struct event *event,
-			   int this_cpu,
-			   u64 timestamp __used,
-			   struct thread *thread __used)
+			   struct perf_sample *sample,
+			   struct machine *machine,
+			   struct thread *thread)
 {
+	int this_cpu = sample->cpu;
+	void *data = sample->raw_data;
 	struct trace_switch_event switch_event;
 
 	FILL_COMMON_FIELDS(switch_event, event, data);
@@ -1498,19 +1503,20 @@
 			nr_context_switch_bugs++;
 	}
 	if (trace_handler->switch_event)
-		trace_handler->switch_event(&switch_event, session, event,
-					    this_cpu, timestamp, thread);
+		trace_handler->switch_event(&switch_event, machine, event,
+					    this_cpu, sample->time, thread);
 
 	curr_pid[this_cpu] = switch_event.next_pid;
 }
 
 static void
-process_sched_runtime_event(void *data, struct perf_session *session,
-			   struct event *event,
-			   int cpu __used,
-			   u64 timestamp __used,
-			   struct thread *thread __used)
+process_sched_runtime_event(struct perf_tool *tool __used,
+			    struct event *event,
+			    struct perf_sample *sample,
+			    struct machine *machine,
+			    struct thread *thread)
 {
+	void *data = sample->raw_data;
 	struct trace_runtime_event runtime_event;
 
 	FILL_ARRAY(runtime_event, comm, event, data);
@@ -1519,16 +1525,18 @@
 	FILL_FIELD(runtime_event, vruntime, event, data);
 
 	if (trace_handler->runtime_event)
-		trace_handler->runtime_event(&runtime_event, session, event, cpu, timestamp, thread);
+		trace_handler->runtime_event(&runtime_event, machine, event,
+					     sample->cpu, sample->time, thread);
 }
 
 static void
-process_sched_fork_event(void *data,
+process_sched_fork_event(struct perf_tool *tool __used,
 			 struct event *event,
-			 int cpu __used,
-			 u64 timestamp __used,
-			 struct thread *thread __used)
+			 struct perf_sample *sample,
+			 struct machine *machine __used,
+			 struct thread *thread)
 {
+	void *data = sample->raw_data;
 	struct trace_fork_event fork_event;
 
 	FILL_COMMON_FIELDS(fork_event, event, data);
@@ -1540,13 +1548,14 @@
 
 	if (trace_handler->fork_event)
 		trace_handler->fork_event(&fork_event, event,
-					  cpu, timestamp, thread);
+					  sample->cpu, sample->time, thread);
 }
 
 static void
-process_sched_exit_event(struct event *event,
-			 int cpu __used,
-			 u64 timestamp __used,
+process_sched_exit_event(struct perf_tool *tool __used,
+			 struct event *event,
+			 struct perf_sample *sample __used,
+			 struct machine *machine __used,
 			 struct thread *thread __used)
 {
 	if (verbose)
@@ -1554,12 +1563,13 @@
 }
 
 static void
-process_sched_migrate_task_event(void *data, struct perf_session *session,
-			   struct event *event,
-			   int cpu __used,
-			   u64 timestamp __used,
-			   struct thread *thread __used)
+process_sched_migrate_task_event(struct perf_tool *tool __used,
+				 struct event *event,
+				 struct perf_sample *sample,
+				 struct machine *machine,
+				 struct thread *thread)
 {
+	void *data = sample->raw_data;
 	struct trace_migrate_task_event migrate_task_event;
 
 	FILL_COMMON_FIELDS(migrate_task_event, event, data);
@@ -1570,67 +1580,47 @@
 	FILL_FIELD(migrate_task_event, cpu, event, data);
 
 	if (trace_handler->migrate_task_event)
-		trace_handler->migrate_task_event(&migrate_task_event, session,
-						 event, cpu, timestamp, thread);
+		trace_handler->migrate_task_event(&migrate_task_event, machine,
+						  event, sample->cpu,
+						  sample->time, thread);
 }
 
-static void process_raw_event(union perf_event *raw_event __used,
-			      struct perf_session *session, void *data, int cpu,
-			      u64 timestamp, struct thread *thread)
+typedef void (*tracepoint_handler)(struct perf_tool *tool, struct event *event,
+				   struct perf_sample *sample,
+				   struct machine *machine,
+				   struct thread *thread);
+
+static int perf_sched__process_tracepoint_sample(struct perf_tool *tool,
+						 union perf_event *event __used,
+						 struct perf_sample *sample,
+						 struct perf_evsel *evsel,
+						 struct machine *machine)
 {
-	struct event *event;
-	int type;
+	struct thread *thread = machine__findnew_thread(machine, sample->pid);
 
-
-	type = trace_parse_common_type(data);
-	event = trace_find_event(type);
-
-	if (!strcmp(event->name, "sched_switch"))
-		process_sched_switch_event(data, session, event, cpu, timestamp, thread);
-	if (!strcmp(event->name, "sched_stat_runtime"))
-		process_sched_runtime_event(data, session, event, cpu, timestamp, thread);
-	if (!strcmp(event->name, "sched_wakeup"))
-		process_sched_wakeup_event(data, session, event, cpu, timestamp, thread);
-	if (!strcmp(event->name, "sched_wakeup_new"))
-		process_sched_wakeup_event(data, session, event, cpu, timestamp, thread);
-	if (!strcmp(event->name, "sched_process_fork"))
-		process_sched_fork_event(data, event, cpu, timestamp, thread);
-	if (!strcmp(event->name, "sched_process_exit"))
-		process_sched_exit_event(event, cpu, timestamp, thread);
-	if (!strcmp(event->name, "sched_migrate_task"))
-		process_sched_migrate_task_event(data, session, event, cpu, timestamp, thread);
-}
-
-static int process_sample_event(union perf_event *event,
-				struct perf_sample *sample,
-				struct perf_evsel *evsel __used,
-				struct perf_session *session)
-{
-	struct thread *thread;
-
-	if (!(session->sample_type & PERF_SAMPLE_RAW))
-		return 0;
-
-	thread = perf_session__findnew(session, sample->pid);
 	if (thread == NULL) {
-		pr_debug("problem processing %d event, skipping it.\n",
-			 event->header.type);
+		pr_debug("problem processing %s event, skipping it.\n",
+			 evsel->name);
 		return -1;
 	}
 
-	dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
+	evsel->hists.stats.total_period += sample->period;
+	hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE);
 
-	if (profile_cpu != -1 && profile_cpu != (int)sample->cpu)
-		return 0;
+	if (evsel->handler.func != NULL) {
+		tracepoint_handler f = evsel->handler.func;
 
-	process_raw_event(event, session, sample->raw_data, sample->cpu,
-			  sample->time, thread);
+		if (evsel->handler.data == NULL)
+			evsel->handler.data = trace_find_event(evsel->attr.config);
+
+		f(tool, evsel->handler.data, sample, machine, thread);
+	}
 
 	return 0;
 }
 
-static struct perf_event_ops event_ops = {
-	.sample			= process_sample_event,
+static struct perf_tool perf_sched = {
+	.sample			= perf_sched__process_tracepoint_sample,
 	.comm			= perf_event__process_comm,
 	.lost			= perf_event__process_lost,
 	.fork			= perf_event__process_task,
@@ -1640,13 +1630,25 @@
 static void read_events(bool destroy, struct perf_session **psession)
 {
 	int err = -EINVAL;
+	const struct perf_evsel_str_handler handlers[] = {
+		{ "sched:sched_switch",	      process_sched_switch_event, },
+		{ "sched:sched_stat_runtime", process_sched_runtime_event, },
+		{ "sched:sched_wakeup",	      process_sched_wakeup_event, },
+		{ "sched:sched_wakeup_new",   process_sched_wakeup_event, },
+		{ "sched:sched_process_fork", process_sched_fork_event, },
+		{ "sched:sched_process_exit", process_sched_exit_event, },
+		{ "sched:sched_migrate_task", process_sched_migrate_task_event, },
+	};
 	struct perf_session *session = perf_session__new(input_name, O_RDONLY,
-							 0, false, &event_ops);
+							 0, false, &perf_sched);
 	if (session == NULL)
 		die("No Memory");
 
+	err = perf_evlist__set_tracepoints_handlers_array(session->evlist, handlers);
+	assert(err == 0);
+
 	if (perf_session__has_traces(session, "record -R")) {
-		err = perf_session__process_events(session, &event_ops);
+		err = perf_session__process_events(session, &perf_sched);
 		if (err)
 			die("Failed to process events, error %d", err);
 
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 2f62a29..fd1909a 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -7,6 +7,7 @@
 #include "util/header.h"
 #include "util/parse-options.h"
 #include "util/session.h"
+#include "util/tool.h"
 #include "util/symbol.h"
 #include "util/thread.h"
 #include "util/trace-event.h"
@@ -23,6 +24,7 @@
 extern const struct option	record_options[];
 static bool			no_callchain;
 static bool			show_full_info;
+static bool			system_wide;
 static const char		*cpu_list;
 static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
 
@@ -315,7 +317,7 @@
 
 static void print_sample_addr(union perf_event *event,
 			  struct perf_sample *sample,
-			  struct perf_session *session,
+			  struct machine *machine,
 			  struct thread *thread,
 			  struct perf_event_attr *attr)
 {
@@ -328,11 +330,11 @@
 	if (!sample_addr_correlates_sym(attr))
 		return;
 
-	thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION,
-			      event->ip.pid, sample->addr, &al);
+	thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION,
+			      sample->addr, &al);
 	if (!al.map)
-		thread__find_addr_map(thread, session, cpumode, MAP__VARIABLE,
-				      event->ip.pid, sample->addr, &al);
+		thread__find_addr_map(thread, machine, cpumode, MAP__VARIABLE,
+				      sample->addr, &al);
 
 	al.cpu = sample->cpu;
 	al.sym = NULL;
@@ -362,7 +364,7 @@
 static void process_event(union perf_event *event __unused,
 			  struct perf_sample *sample,
 			  struct perf_evsel *evsel,
-			  struct perf_session *session,
+			  struct machine *machine,
 			  struct thread *thread)
 {
 	struct perf_event_attr *attr = &evsel->attr;
@@ -377,15 +379,15 @@
 				  sample->raw_size);
 
 	if (PRINT_FIELD(ADDR))
-		print_sample_addr(event, sample, session, thread, attr);
+		print_sample_addr(event, sample, machine, thread, attr);
 
 	if (PRINT_FIELD(IP)) {
 		if (!symbol_conf.use_callchain)
 			printf(" ");
 		else
 			printf("\n");
-		perf_session__print_ip(event, sample, session,
-					      PRINT_FIELD(SYM), PRINT_FIELD(DSO));
+		perf_event__print_ip(event, sample, machine, evsel,
+				     PRINT_FIELD(SYM), PRINT_FIELD(DSO));
 	}
 
 	printf("\n");
@@ -432,14 +434,16 @@
 	return scripting_ops->stop_script();
 }
 
-static char const		*input_name = "perf.data";
+static const char *input_name;
 
-static int process_sample_event(union perf_event *event,
+static int process_sample_event(struct perf_tool *tool __used,
+				union perf_event *event,
 				struct perf_sample *sample,
 				struct perf_evsel *evsel,
-				struct perf_session *session)
+				struct machine *machine)
 {
-	struct thread *thread = perf_session__findnew(session, event->ip.pid);
+	struct addr_location al;
+	struct thread *thread = machine__findnew_thread(machine, event->ip.tid);
 
 	if (thread == NULL) {
 		pr_debug("problem processing %d event, skipping it.\n",
@@ -458,16 +462,25 @@
 		return 0;
 	}
 
+	if (perf_event__preprocess_sample(event, machine, &al, sample, 0) < 0) {
+		pr_err("problem processing %d event, skipping it.\n",
+		       event->header.type);
+		return -1;
+	}
+
+	if (al.filtered)
+		return 0;
+
 	if (cpu_list && !test_bit(sample->cpu, cpu_bitmap))
 		return 0;
 
-	scripting_ops->process_event(event, sample, evsel, session, thread);
+	scripting_ops->process_event(event, sample, evsel, machine, thread);
 
-	session->hists.stats.total_period += sample->period;
+	evsel->hists.stats.total_period += sample->period;
 	return 0;
 }
 
-static struct perf_event_ops event_ops = {
+static struct perf_tool perf_script = {
 	.sample		 = process_sample_event,
 	.mmap		 = perf_event__process_mmap,
 	.comm		 = perf_event__process_comm,
@@ -494,7 +507,7 @@
 
 	signal(SIGINT, sig_handler);
 
-	ret = perf_session__process_events(session, &event_ops);
+	ret = perf_session__process_events(session, &perf_script);
 
 	if (debug_mode)
 		pr_err("Misordered timestamps: %" PRIu64 "\n", nr_unordered);
@@ -523,12 +536,6 @@
 	return s;
 }
 
-static void script_spec__delete(struct script_spec *s)
-{
-	free(s->spec);
-	free(s);
-}
-
 static void script_spec__add(struct script_spec *s)
 {
 	list_add_tail(&s->node, &script_specs);
@@ -554,16 +561,11 @@
 
 	s = script_spec__new(spec, ops);
 	if (!s)
-		goto out_delete_spec;
+		return NULL;
 
 	script_spec__add(s);
 
 	return s;
-
-out_delete_spec:
-	script_spec__delete(s);
-
-	return NULL;
 }
 
 int script_spec_register(const char *spec, struct scripting_ops *ops)
@@ -681,7 +683,8 @@
 			type = PERF_TYPE_RAW;
 		else {
 			fprintf(stderr, "Invalid event type in field string.\n");
-			return -EINVAL;
+			rc = -EINVAL;
+			goto out;
 		}
 
 		if (output[type].user_set)
@@ -923,6 +926,24 @@
 	return 0;
 }
 
+static char *get_script_root(struct dirent *script_dirent, const char *suffix)
+{
+	char *script_root, *str;
+
+	script_root = strdup(script_dirent->d_name);
+	if (!script_root)
+		return NULL;
+
+	str = (char *)ends_with(script_root, suffix);
+	if (!str) {
+		free(script_root);
+		return NULL;
+	}
+
+	*str = '\0';
+	return script_root;
+}
+
 static int list_available_scripts(const struct option *opt __used,
 				  const char *s __used, int unset __used)
 {
@@ -934,7 +955,6 @@
 	struct script_desc *desc;
 	char first_half[BUFSIZ];
 	char *script_root;
-	char *str;
 
 	snprintf(scripts_path, MAXPATHLEN, "%s/scripts", perf_exec_path());
 
@@ -950,16 +970,14 @@
 			continue;
 
 		for_each_script(lang_path, lang_dir, script_dirent, script_next) {
-			script_root = strdup(script_dirent.d_name);
-			str = (char *)ends_with(script_root, REPORT_SUFFIX);
-			if (str) {
-				*str = '\0';
+			script_root = get_script_root(&script_dirent, REPORT_SUFFIX);
+			if (script_root) {
 				desc = script_desc__findnew(script_root);
 				snprintf(script_path, MAXPATHLEN, "%s/%s",
 					 lang_path, script_dirent.d_name);
 				read_script_info(desc, script_path);
+				free(script_root);
 			}
-			free(script_root);
 		}
 	}
 
@@ -981,8 +999,7 @@
 	char script_path[MAXPATHLEN];
 	DIR *scripts_dir, *lang_dir;
 	char lang_path[MAXPATHLEN];
-	char *str, *__script_root;
-	char *path = NULL;
+	char *__script_root;
 
 	snprintf(scripts_path, MAXPATHLEN, "%s/scripts", perf_exec_path());
 
@@ -998,23 +1015,18 @@
 			continue;
 
 		for_each_script(lang_path, lang_dir, script_dirent, script_next) {
-			__script_root = strdup(script_dirent.d_name);
-			str = (char *)ends_with(__script_root, suffix);
-			if (str) {
-				*str = '\0';
-				if (strcmp(__script_root, script_root))
-					continue;
+			__script_root = get_script_root(&script_dirent, suffix);
+			if (__script_root && !strcmp(script_root, __script_root)) {
+				free(__script_root);
 				snprintf(script_path, MAXPATHLEN, "%s/%s",
 					 lang_path, script_dirent.d_name);
-				path = strdup(script_path);
-				free(__script_root);
-				break;
+				return strdup(script_path);
 			}
 			free(__script_root);
 		}
 	}
 
-	return path;
+	return NULL;
 }
 
 static bool is_top_script(const char *script_path)
@@ -1083,7 +1095,11 @@
 	OPT_CALLBACK('f', "fields", NULL, "str",
 		     "comma separated output fields prepend with 'type:'. Valid types: hw,sw,trace,raw. Fields: comm,tid,pid,time,cpu,event,trace,ip,sym,dso,addr",
 		     parse_output_fields),
-	OPT_STRING('c', "cpu", &cpu_list, "cpu", "list of cpus to profile"),
+	OPT_BOOLEAN('a', "all-cpus", &system_wide,
+		     "system-wide collection from all CPUs"),
+	OPT_STRING('C', "cpu", &cpu_list, "cpu", "list of cpus to profile"),
+	OPT_STRING('c', "comms", &symbol_conf.comm_list_str, "comm[,comm...]",
+		   "only display events for these comms"),
 	OPT_BOOLEAN('I', "show-info", &show_full_info,
 		    "display extended information from perf.data file"),
 	OPT_END()
@@ -1110,7 +1126,6 @@
 	struct perf_session *session;
 	char *script_path = NULL;
 	const char **__argv;
-	bool system_wide;
 	int i, j, err;
 
 	setup_scripting();
@@ -1178,15 +1193,17 @@
 		}
 
 		if (!pid) {
-			system_wide = true;
 			j = 0;
 
 			dup2(live_pipe[1], 1);
 			close(live_pipe[0]);
 
-			if (!is_top_script(argv[0]))
+			if (is_top_script(argv[0])) {
+				system_wide = true;
+			} else if (!system_wide) {
 				system_wide = !have_cmd(argc - rep_args,
 							&argv[rep_args]);
+			}
 
 			__argv = malloc((argc + 6) * sizeof(const char *));
 			if (!__argv)
@@ -1234,10 +1251,11 @@
 		script_path = rep_script_path;
 
 	if (script_path) {
-		system_wide = false;
 		j = 0;
 
-		if (rec_script_path)
+		if (!rec_script_path)
+			system_wide = false;
+		else if (!system_wide)
 			system_wide = !have_cmd(argc - 1, &argv[1]);
 
 		__argv = malloc((argc + 2) * sizeof(const char *));
@@ -1261,7 +1279,7 @@
 	if (!script_name)
 		setup_pager();
 
-	session = perf_session__new(input_name, O_RDONLY, 0, false, &event_ops);
+	session = perf_session__new(input_name, O_RDONLY, 0, false, &perf_script);
 	if (session == NULL)
 		return -ENOMEM;
 
@@ -1287,7 +1305,7 @@
 			return -1;
 		}
 
-		input = open(input_name, O_RDONLY);
+		input = open(session->filename, O_RDONLY);	/* input_name */
 		if (input < 0) {
 			perror("failed to open file");
 			exit(-1);
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 955930e..f5d2a63 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -578,6 +578,33 @@
 			avg / avg_stats(&walltime_nsecs_stats));
 }
 
+/* used for get_ratio_color() */
+enum grc_type {
+	GRC_STALLED_CYCLES_FE,
+	GRC_STALLED_CYCLES_BE,
+	GRC_CACHE_MISSES,
+	GRC_MAX_NR
+};
+
+static const char *get_ratio_color(enum grc_type type, double ratio)
+{
+	static const double grc_table[GRC_MAX_NR][3] = {
+		[GRC_STALLED_CYCLES_FE] = { 50.0, 30.0, 10.0 },
+		[GRC_STALLED_CYCLES_BE] = { 75.0, 50.0, 20.0 },
+		[GRC_CACHE_MISSES] 	= { 20.0, 10.0, 5.0 },
+	};
+	const char *color = PERF_COLOR_NORMAL;
+
+	if (ratio > grc_table[type][0])
+		color = PERF_COLOR_RED;
+	else if (ratio > grc_table[type][1])
+		color = PERF_COLOR_MAGENTA;
+	else if (ratio > grc_table[type][2])
+		color = PERF_COLOR_YELLOW;
+
+	return color;
+}
+
 static void print_stalled_cycles_frontend(int cpu, struct perf_evsel *evsel __used, double avg)
 {
 	double total, ratio = 0.0;
@@ -588,13 +615,7 @@
 	if (total)
 		ratio = avg / total * 100.0;
 
-	color = PERF_COLOR_NORMAL;
-	if (ratio > 50.0)
-		color = PERF_COLOR_RED;
-	else if (ratio > 30.0)
-		color = PERF_COLOR_MAGENTA;
-	else if (ratio > 10.0)
-		color = PERF_COLOR_YELLOW;
+	color = get_ratio_color(GRC_STALLED_CYCLES_FE, ratio);
 
 	fprintf(output, " #  ");
 	color_fprintf(output, color, "%6.2f%%", ratio);
@@ -611,13 +632,7 @@
 	if (total)
 		ratio = avg / total * 100.0;
 
-	color = PERF_COLOR_NORMAL;
-	if (ratio > 75.0)
-		color = PERF_COLOR_RED;
-	else if (ratio > 50.0)
-		color = PERF_COLOR_MAGENTA;
-	else if (ratio > 20.0)
-		color = PERF_COLOR_YELLOW;
+	color = get_ratio_color(GRC_STALLED_CYCLES_BE, ratio);
 
 	fprintf(output, " #  ");
 	color_fprintf(output, color, "%6.2f%%", ratio);
@@ -634,13 +649,7 @@
 	if (total)
 		ratio = avg / total * 100.0;
 
-	color = PERF_COLOR_NORMAL;
-	if (ratio > 20.0)
-		color = PERF_COLOR_RED;
-	else if (ratio > 10.0)
-		color = PERF_COLOR_MAGENTA;
-	else if (ratio > 5.0)
-		color = PERF_COLOR_YELLOW;
+	color = get_ratio_color(GRC_CACHE_MISSES, ratio);
 
 	fprintf(output, " #  ");
 	color_fprintf(output, color, "%6.2f%%", ratio);
@@ -657,13 +666,7 @@
 	if (total)
 		ratio = avg / total * 100.0;
 
-	color = PERF_COLOR_NORMAL;
-	if (ratio > 20.0)
-		color = PERF_COLOR_RED;
-	else if (ratio > 10.0)
-		color = PERF_COLOR_MAGENTA;
-	else if (ratio > 5.0)
-		color = PERF_COLOR_YELLOW;
+	color = get_ratio_color(GRC_CACHE_MISSES, ratio);
 
 	fprintf(output, " #  ");
 	color_fprintf(output, color, "%6.2f%%", ratio);
@@ -680,13 +683,7 @@
 	if (total)
 		ratio = avg / total * 100.0;
 
-	color = PERF_COLOR_NORMAL;
-	if (ratio > 20.0)
-		color = PERF_COLOR_RED;
-	else if (ratio > 10.0)
-		color = PERF_COLOR_MAGENTA;
-	else if (ratio > 5.0)
-		color = PERF_COLOR_YELLOW;
+	color = get_ratio_color(GRC_CACHE_MISSES, ratio);
 
 	fprintf(output, " #  ");
 	color_fprintf(output, color, "%6.2f%%", ratio);
@@ -703,13 +700,7 @@
 	if (total)
 		ratio = avg / total * 100.0;
 
-	color = PERF_COLOR_NORMAL;
-	if (ratio > 20.0)
-		color = PERF_COLOR_RED;
-	else if (ratio > 10.0)
-		color = PERF_COLOR_MAGENTA;
-	else if (ratio > 5.0)
-		color = PERF_COLOR_YELLOW;
+	color = get_ratio_color(GRC_CACHE_MISSES, ratio);
 
 	fprintf(output, " #  ");
 	color_fprintf(output, color, "%6.2f%%", ratio);
@@ -726,13 +717,7 @@
 	if (total)
 		ratio = avg / total * 100.0;
 
-	color = PERF_COLOR_NORMAL;
-	if (ratio > 20.0)
-		color = PERF_COLOR_RED;
-	else if (ratio > 10.0)
-		color = PERF_COLOR_MAGENTA;
-	else if (ratio > 5.0)
-		color = PERF_COLOR_YELLOW;
+	color = get_ratio_color(GRC_CACHE_MISSES, ratio);
 
 	fprintf(output, " #  ");
 	color_fprintf(output, color, "%6.2f%%", ratio);
@@ -749,13 +734,7 @@
 	if (total)
 		ratio = avg / total * 100.0;
 
-	color = PERF_COLOR_NORMAL;
-	if (ratio > 20.0)
-		color = PERF_COLOR_RED;
-	else if (ratio > 10.0)
-		color = PERF_COLOR_MAGENTA;
-	else if (ratio > 5.0)
-		color = PERF_COLOR_YELLOW;
+	color = get_ratio_color(GRC_CACHE_MISSES, ratio);
 
 	fprintf(output, " #  ");
 	color_fprintf(output, color, "%6.2f%%", ratio);
@@ -1108,22 +1087,13 @@
  */
 static int add_default_attributes(void)
 {
-	struct perf_evsel *pos;
-	size_t attr_nr = 0;
-	size_t c;
-
 	/* Set attrs if no event is selected and !null_run: */
 	if (null_run)
 		return 0;
 
 	if (!evsel_list->nr_entries) {
-		for (c = 0; c < ARRAY_SIZE(default_attrs); c++) {
-			pos = perf_evsel__new(default_attrs + c, c + attr_nr);
-			if (pos == NULL)
-				return -1;
-			perf_evlist__add(evsel_list, pos);
-		}
-		attr_nr += c;
+		if (perf_evlist__add_attrs_array(evsel_list, default_attrs) < 0)
+			return -1;
 	}
 
 	/* Detailed events get appended to the event list: */
@@ -1132,38 +1102,21 @@
 		return 0;
 
 	/* Append detailed run extra attributes: */
-	for (c = 0; c < ARRAY_SIZE(detailed_attrs); c++) {
-		pos = perf_evsel__new(detailed_attrs + c, c + attr_nr);
-		if (pos == NULL)
-			return -1;
-		perf_evlist__add(evsel_list, pos);
-	}
-	attr_nr += c;
+	if (perf_evlist__add_attrs_array(evsel_list, detailed_attrs) < 0)
+		return -1;
 
 	if (detailed_run < 2)
 		return 0;
 
 	/* Append very detailed run extra attributes: */
-	for (c = 0; c < ARRAY_SIZE(very_detailed_attrs); c++) {
-		pos = perf_evsel__new(very_detailed_attrs + c, c + attr_nr);
-		if (pos == NULL)
-			return -1;
-		perf_evlist__add(evsel_list, pos);
-	}
+	if (perf_evlist__add_attrs_array(evsel_list, very_detailed_attrs) < 0)
+		return -1;
 
 	if (detailed_run < 3)
 		return 0;
 
 	/* Append very, very detailed run extra attributes: */
-	for (c = 0; c < ARRAY_SIZE(very_very_detailed_attrs); c++) {
-		pos = perf_evsel__new(very_very_detailed_attrs + c, c + attr_nr);
-		if (pos == NULL)
-			return -1;
-		perf_evlist__add(evsel_list, pos);
-	}
-
-
-	return 0;
+	return perf_evlist__add_attrs_array(evsel_list, very_very_detailed_attrs);
 }
 
 int cmd_stat(int argc, const char **argv, const char *prefix __used)
@@ -1267,8 +1220,7 @@
 
 	list_for_each_entry(pos, &evsel_list->entries, node) {
 		if (perf_evsel__alloc_stat_priv(pos) < 0 ||
-		    perf_evsel__alloc_counts(pos, evsel_list->cpus->nr) < 0 ||
-		    perf_evsel__alloc_fd(pos, evsel_list->cpus->nr, evsel_list->threads->nr) < 0)
+		    perf_evsel__alloc_counts(pos, evsel_list->cpus->nr) < 0)
 			goto out_free_fd;
 	}
 
diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c
index 831d1ba..2b9a7f4 100644
--- a/tools/perf/builtin-test.c
+++ b/tools/perf/builtin-test.c
@@ -7,6 +7,7 @@
 
 #include "util/cache.h"
 #include "util/debug.h"
+#include "util/debugfs.h"
 #include "util/evlist.h"
 #include "util/parse-options.h"
 #include "util/parse-events.h"
@@ -14,8 +15,6 @@
 #include "util/thread_map.h"
 #include "../../include/linux/hw_breakpoint.h"
 
-static long page_size;
-
 static int vmlinux_matches_kallsyms_filter(struct map *map __used, struct symbol *sym)
 {
 	bool *visited = symbol__priv(sym);
@@ -31,6 +30,7 @@
 	struct map *kallsyms_map, *vmlinux_map;
 	struct machine kallsyms, vmlinux;
 	enum map_type type = MAP__FUNCTION;
+	long page_size = sysconf(_SC_PAGE_SIZE);
 	struct ref_reloc_sym ref_reloc_sym = { .name = "_stext", };
 
 	/*
@@ -247,7 +247,7 @@
 
 	if (asprintf(&filename,
 		     "%s/syscalls/%s/id",
-		     debugfs_path, evname) < 0)
+		     tracing_events_path, evname) < 0)
 		return -1;
 
 	fd = open(filename, O_RDONLY);
@@ -603,7 +603,7 @@
 
 #define TEST_ASSERT_VAL(text, cond) \
 do { \
-	if (!cond) { \
+	if (!(cond)) { \
 		pr_debug("FAILED %s:%d %s\n", __FILE__, __LINE__, text); \
 		return -1; \
 	} \
@@ -759,6 +759,103 @@
 	return 0;
 }
 
+static int test__checkevent_tracepoint_modifier(struct perf_evlist *evlist)
+{
+	struct perf_evsel *evsel = list_entry(evlist->entries.next,
+					      struct perf_evsel, node);
+
+	TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
+	TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
+	TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
+	TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
+
+	return test__checkevent_tracepoint(evlist);
+}
+
+static int
+test__checkevent_tracepoint_multi_modifier(struct perf_evlist *evlist)
+{
+	struct perf_evsel *evsel;
+
+	TEST_ASSERT_VAL("wrong number of entries", evlist->nr_entries > 1);
+
+	list_for_each_entry(evsel, &evlist->entries, node) {
+		TEST_ASSERT_VAL("wrong exclude_user",
+				!evsel->attr.exclude_user);
+		TEST_ASSERT_VAL("wrong exclude_kernel",
+				evsel->attr.exclude_kernel);
+		TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
+		TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
+	}
+
+	return test__checkevent_tracepoint_multi(evlist);
+}
+
+static int test__checkevent_raw_modifier(struct perf_evlist *evlist)
+{
+	struct perf_evsel *evsel = list_entry(evlist->entries.next,
+					      struct perf_evsel, node);
+
+	TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
+	TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
+	TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
+	TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip);
+
+	return test__checkevent_raw(evlist);
+}
+
+static int test__checkevent_numeric_modifier(struct perf_evlist *evlist)
+{
+	struct perf_evsel *evsel = list_entry(evlist->entries.next,
+					      struct perf_evsel, node);
+
+	TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
+	TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
+	TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv);
+	TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip);
+
+	return test__checkevent_numeric(evlist);
+}
+
+static int test__checkevent_symbolic_name_modifier(struct perf_evlist *evlist)
+{
+	struct perf_evsel *evsel = list_entry(evlist->entries.next,
+					      struct perf_evsel, node);
+
+	TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
+	TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
+	TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv);
+	TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
+
+	return test__checkevent_symbolic_name(evlist);
+}
+
+static int test__checkevent_symbolic_alias_modifier(struct perf_evlist *evlist)
+{
+	struct perf_evsel *evsel = list_entry(evlist->entries.next,
+					      struct perf_evsel, node);
+
+	TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
+	TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
+	TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
+	TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
+
+	return test__checkevent_symbolic_alias(evlist);
+}
+
+static int test__checkevent_genhw_modifier(struct perf_evlist *evlist)
+{
+	struct perf_evsel *evsel = list_entry(evlist->entries.next,
+					      struct perf_evsel, node);
+
+	TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
+	TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
+	TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
+	TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip);
+
+	return test__checkevent_genhw(evlist);
+}
+
 static struct test__event_st {
 	const char *name;
 	__u32 type;
@@ -808,6 +905,34 @@
 		.name  = "mem:0:w",
 		.check = test__checkevent_breakpoint_w,
 	},
+	{
+		.name  = "syscalls:sys_enter_open:k",
+		.check = test__checkevent_tracepoint_modifier,
+	},
+	{
+		.name  = "syscalls:*:u",
+		.check = test__checkevent_tracepoint_multi_modifier,
+	},
+	{
+		.name  = "r1:kp",
+		.check = test__checkevent_raw_modifier,
+	},
+	{
+		.name  = "1:1:hp",
+		.check = test__checkevent_numeric_modifier,
+	},
+	{
+		.name  = "instructions:h",
+		.check = test__checkevent_symbolic_name_modifier,
+	},
+	{
+		.name  = "faults:u",
+		.check = test__checkevent_symbolic_alias_modifier,
+	},
+	{
+		.name  = "L1-dcache-load-miss:kp",
+		.check = test__checkevent_genhw_modifier,
+	},
 };
 
 #define TEST__EVENTS_CNT (sizeof(test__events) / sizeof(struct test__event_st))
@@ -841,6 +966,336 @@
 
 	return ret;
 }
+
+static int sched__get_first_possible_cpu(pid_t pid, cpu_set_t **maskp,
+					 size_t *sizep)
+{
+	cpu_set_t *mask;
+	size_t size;
+	int i, cpu = -1, nrcpus = 1024;
+realloc:
+	mask = CPU_ALLOC(nrcpus);
+	size = CPU_ALLOC_SIZE(nrcpus);
+	CPU_ZERO_S(size, mask);
+
+	if (sched_getaffinity(pid, size, mask) == -1) {
+		CPU_FREE(mask);
+		if (errno == EINVAL && nrcpus < (1024 << 8)) {
+			nrcpus = nrcpus << 2;
+			goto realloc;
+		}
+		perror("sched_getaffinity");
+			return -1;
+	}
+
+	for (i = 0; i < nrcpus; i++) {
+		if (CPU_ISSET_S(i, size, mask)) {
+			if (cpu == -1) {
+				cpu = i;
+				*maskp = mask;
+				*sizep = size;
+			} else
+				CPU_CLR_S(i, size, mask);
+		}
+	}
+
+	if (cpu == -1)
+		CPU_FREE(mask);
+
+	return cpu;
+}
+
+static int test__PERF_RECORD(void)
+{
+	struct perf_record_opts opts = {
+		.target_pid = -1,
+		.target_tid = -1,
+		.no_delay   = true,
+		.freq	    = 10,
+		.mmap_pages = 256,
+		.sample_id_all_avail = true,
+	};
+	cpu_set_t *cpu_mask = NULL;
+	size_t cpu_mask_size = 0;
+	struct perf_evlist *evlist = perf_evlist__new(NULL, NULL);
+	struct perf_evsel *evsel;
+	struct perf_sample sample;
+	const char *cmd = "sleep";
+	const char *argv[] = { cmd, "1", NULL, };
+	char *bname;
+	u64 sample_type, prev_time = 0;
+	bool found_cmd_mmap = false,
+	     found_libc_mmap = false,
+	     found_vdso_mmap = false,
+	     found_ld_mmap = false;
+	int err = -1, errs = 0, i, wakeups = 0, sample_size;
+	u32 cpu;
+	int total_events = 0, nr_events[PERF_RECORD_MAX] = { 0, };
+
+	if (evlist == NULL || argv == NULL) {
+		pr_debug("Not enough memory to create evlist\n");
+		goto out;
+	}
+
+	/*
+	 * We need at least one evsel in the evlist, use the default
+	 * one: "cycles".
+	 */
+	err = perf_evlist__add_default(evlist);
+	if (err < 0) {
+		pr_debug("Not enough memory to create evsel\n");
+		goto out_delete_evlist;
+	}
+
+	/*
+	 * Create maps of threads and cpus to monitor. In this case
+	 * we start with all threads and cpus (-1, -1) but then in
+	 * perf_evlist__prepare_workload we'll fill in the only thread
+	 * we're monitoring, the one forked there.
+	 */
+	err = perf_evlist__create_maps(evlist, opts.target_pid,
+				       opts.target_tid, opts.cpu_list);
+	if (err < 0) {
+		pr_debug("Not enough memory to create thread/cpu maps\n");
+		goto out_delete_evlist;
+	}
+
+	/*
+	 * Prepare the workload in argv[] to run, it'll fork it, and then wait
+	 * for perf_evlist__start_workload() to exec it. This is done this way
+	 * so that we have time to open the evlist (calling sys_perf_event_open
+	 * on all the fds) and then mmap them.
+	 */
+	err = perf_evlist__prepare_workload(evlist, &opts, argv);
+	if (err < 0) {
+		pr_debug("Couldn't run the workload!\n");
+		goto out_delete_evlist;
+	}
+
+	/*
+	 * Config the evsels, setting attr->comm on the first one, etc.
+	 */
+	evsel = list_entry(evlist->entries.next, struct perf_evsel, node);
+	evsel->attr.sample_type |= PERF_SAMPLE_CPU;
+	evsel->attr.sample_type |= PERF_SAMPLE_TID;
+	evsel->attr.sample_type |= PERF_SAMPLE_TIME;
+	perf_evlist__config_attrs(evlist, &opts);
+
+	err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask,
+					    &cpu_mask_size);
+	if (err < 0) {
+		pr_debug("sched__get_first_possible_cpu: %s\n", strerror(errno));
+		goto out_delete_evlist;
+	}
+
+	cpu = err;
+
+	/*
+	 * So that we can check perf_sample.cpu on all the samples.
+	 */
+	if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, cpu_mask) < 0) {
+		pr_debug("sched_setaffinity: %s\n", strerror(errno));
+		goto out_free_cpu_mask;
+	}
+
+	/*
+	 * Call sys_perf_event_open on all the fds on all the evsels,
+	 * grouping them if asked to.
+	 */
+	err = perf_evlist__open(evlist, opts.group);
+	if (err < 0) {
+		pr_debug("perf_evlist__open: %s\n", strerror(errno));
+		goto out_delete_evlist;
+	}
+
+	/*
+	 * mmap the first fd on a given CPU and ask for events for the other
+	 * fds in the same CPU to be injected in the same mmap ring buffer
+	 * (using ioctl(PERF_EVENT_IOC_SET_OUTPUT)).
+	 */
+	err = perf_evlist__mmap(evlist, opts.mmap_pages, false);
+	if (err < 0) {
+		pr_debug("perf_evlist__mmap: %s\n", strerror(errno));
+		goto out_delete_evlist;
+	}
+
+	/*
+	 * We'll need these two to parse the PERF_SAMPLE_* fields in each
+	 * event.
+	 */
+	sample_type = perf_evlist__sample_type(evlist);
+	sample_size = __perf_evsel__sample_size(sample_type);
+
+	/*
+	 * Now that all is properly set up, enable the events, they will
+	 * count just on workload.pid, which will start...
+	 */
+	perf_evlist__enable(evlist);
+
+	/*
+	 * Now!
+	 */
+	perf_evlist__start_workload(evlist);
+
+	while (1) {
+		int before = total_events;
+
+		for (i = 0; i < evlist->nr_mmaps; i++) {
+			union perf_event *event;
+
+			while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
+				const u32 type = event->header.type;
+				const char *name = perf_event__name(type);
+
+				++total_events;
+				if (type < PERF_RECORD_MAX)
+					nr_events[type]++;
+
+				err = perf_event__parse_sample(event, sample_type,
+							       sample_size, true,
+							       &sample, false);
+				if (err < 0) {
+					if (verbose)
+						perf_event__fprintf(event, stderr);
+					pr_debug("Couldn't parse sample\n");
+					goto out_err;
+				}
+
+				if (verbose) {
+					pr_info("%" PRIu64" %d ", sample.time, sample.cpu);
+					perf_event__fprintf(event, stderr);
+				}
+
+				if (prev_time > sample.time) {
+					pr_debug("%s going backwards in time, prev=%" PRIu64 ", curr=%" PRIu64 "\n",
+						 name, prev_time, sample.time);
+					++errs;
+				}
+
+				prev_time = sample.time;
+
+				if (sample.cpu != cpu) {
+					pr_debug("%s with unexpected cpu, expected %d, got %d\n",
+						 name, cpu, sample.cpu);
+					++errs;
+				}
+
+				if ((pid_t)sample.pid != evlist->workload.pid) {
+					pr_debug("%s with unexpected pid, expected %d, got %d\n",
+						 name, evlist->workload.pid, sample.pid);
+					++errs;
+				}
+
+				if ((pid_t)sample.tid != evlist->workload.pid) {
+					pr_debug("%s with unexpected tid, expected %d, got %d\n",
+						 name, evlist->workload.pid, sample.tid);
+					++errs;
+				}
+
+				if ((type == PERF_RECORD_COMM ||
+				     type == PERF_RECORD_MMAP ||
+				     type == PERF_RECORD_FORK ||
+				     type == PERF_RECORD_EXIT) &&
+				     (pid_t)event->comm.pid != evlist->workload.pid) {
+					pr_debug("%s with unexpected pid/tid\n", name);
+					++errs;
+				}
+
+				if ((type == PERF_RECORD_COMM ||
+				     type == PERF_RECORD_MMAP) &&
+				     event->comm.pid != event->comm.tid) {
+					pr_debug("%s with different pid/tid!\n", name);
+					++errs;
+				}
+
+				switch (type) {
+				case PERF_RECORD_COMM:
+					if (strcmp(event->comm.comm, cmd)) {
+						pr_debug("%s with unexpected comm!\n", name);
+						++errs;
+					}
+					break;
+				case PERF_RECORD_EXIT:
+					goto found_exit;
+				case PERF_RECORD_MMAP:
+					bname = strrchr(event->mmap.filename, '/');
+					if (bname != NULL) {
+						if (!found_cmd_mmap)
+							found_cmd_mmap = !strcmp(bname + 1, cmd);
+						if (!found_libc_mmap)
+							found_libc_mmap = !strncmp(bname + 1, "libc", 4);
+						if (!found_ld_mmap)
+							found_ld_mmap = !strncmp(bname + 1, "ld", 2);
+					} else if (!found_vdso_mmap)
+						found_vdso_mmap = !strcmp(event->mmap.filename, "[vdso]");
+					break;
+
+				case PERF_RECORD_SAMPLE:
+					/* Just ignore samples for now */
+					break;
+				default:
+					pr_debug("Unexpected perf_event->header.type %d!\n",
+						 type);
+					++errs;
+				}
+			}
+		}
+
+		/*
+		 * We don't use poll here because at least at 3.1 times the
+		 * PERF_RECORD_{!SAMPLE} events don't honour
+		 * perf_event_attr.wakeup_events, just PERF_EVENT_SAMPLE does.
+		 */
+		if (total_events == before && false)
+			poll(evlist->pollfd, evlist->nr_fds, -1);
+
+		sleep(1);
+		if (++wakeups > 5) {
+			pr_debug("No PERF_RECORD_EXIT event!\n");
+			break;
+		}
+	}
+
+found_exit:
+	if (nr_events[PERF_RECORD_COMM] > 1) {
+		pr_debug("Excessive number of PERF_RECORD_COMM events!\n");
+		++errs;
+	}
+
+	if (nr_events[PERF_RECORD_COMM] == 0) {
+		pr_debug("Missing PERF_RECORD_COMM for %s!\n", cmd);
+		++errs;
+	}
+
+	if (!found_cmd_mmap) {
+		pr_debug("PERF_RECORD_MMAP for %s missing!\n", cmd);
+		++errs;
+	}
+
+	if (!found_libc_mmap) {
+		pr_debug("PERF_RECORD_MMAP for %s missing!\n", "libc");
+		++errs;
+	}
+
+	if (!found_ld_mmap) {
+		pr_debug("PERF_RECORD_MMAP for %s missing!\n", "ld");
+		++errs;
+	}
+
+	if (!found_vdso_mmap) {
+		pr_debug("PERF_RECORD_MMAP for %s missing!\n", "[vdso]");
+		++errs;
+	}
+out_err:
+	perf_evlist__munmap(evlist);
+out_free_cpu_mask:
+	CPU_FREE(cpu_mask);
+out_delete_evlist:
+	perf_evlist__delete(evlist);
+out:
+	return (err < 0 || errs > 0) ? -1 : 0;
+}
+
 static struct test {
 	const char *desc;
 	int (*func)(void);
@@ -866,45 +1321,89 @@
 		.func = test__parse_events,
 	},
 	{
+		.desc = "Validate PERF_RECORD_* events & perf_sample fields",
+		.func = test__PERF_RECORD,
+	},
+	{
 		.func = NULL,
 	},
 };
 
-static int __cmd_test(void)
+static bool perf_test__matches(int curr, int argc, const char *argv[])
+{
+	int i;
+
+	if (argc == 0)
+		return true;
+
+	for (i = 0; i < argc; ++i) {
+		char *end;
+		long nr = strtoul(argv[i], &end, 10);
+
+		if (*end == '\0') {
+			if (nr == curr + 1)
+				return true;
+			continue;
+		}
+
+		if (strstr(tests[curr].desc, argv[i]))
+			return true;
+	}
+
+	return false;
+}
+
+static int __cmd_test(int argc, const char *argv[])
 {
 	int i = 0;
 
-	page_size = sysconf(_SC_PAGE_SIZE);
-
 	while (tests[i].func) {
-		int err;
-		pr_info("%2d: %s:", i + 1, tests[i].desc);
+		int curr = i++, err;
+
+		if (!perf_test__matches(curr, argc, argv))
+			continue;
+
+		pr_info("%2d: %s:", i, tests[curr].desc);
 		pr_debug("\n--- start ---\n");
-		err = tests[i].func();
-		pr_debug("---- end ----\n%s:", tests[i].desc);
+		err = tests[curr].func();
+		pr_debug("---- end ----\n%s:", tests[curr].desc);
 		pr_info(" %s\n", err ? "FAILED!\n" : "Ok");
-		++i;
 	}
 
 	return 0;
 }
 
-static const char * const test_usage[] = {
-	"perf test [<options>]",
-	NULL,
-};
+static int perf_test__list(int argc, const char **argv)
+{
+	int i = 0;
 
-static const struct option test_options[] = {
-	OPT_INTEGER('v', "verbose", &verbose,
-		    "be more verbose (show symbol address, etc)"),
-	OPT_END()
-};
+	while (tests[i].func) {
+		int curr = i++;
+
+		if (argc > 1 && !strstr(tests[curr].desc, argv[1]))
+			continue;
+
+		pr_info("%2d: %s\n", i, tests[curr].desc);
+	}
+
+	return 0;
+}
 
 int cmd_test(int argc, const char **argv, const char *prefix __used)
 {
+	const char * const test_usage[] = {
+	"perf test [<options>] [{list <test-name-fragment>|[<test-name-fragments>|<test-numbers>]}]",
+	NULL,
+	};
+	const struct option test_options[] = {
+	OPT_INTEGER('v', "verbose", &verbose,
+		    "be more verbose (show symbol address, etc)"),
+	OPT_END()
+	};
+
 	argc = parse_options(argc, argv, test_options, test_usage, 0);
-	if (argc)
-		usage_with_options(test_usage, test_options);
+	if (argc >= 1 && !strcmp(argv[0], "list"))
+		return perf_test__list(argc, argv);
 
 	symbol_conf.priv_size = sizeof(int);
 	symbol_conf.sort_by_name = true;
@@ -915,5 +1414,5 @@
 
 	setup_pager();
 
-	return __cmd_test();
+	return __cmd_test(argc, argv);
 }
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
index aa26f4d..3b75b2e 100644
--- a/tools/perf/builtin-timechart.c
+++ b/tools/perf/builtin-timechart.c
@@ -19,6 +19,7 @@
 #include "util/color.h"
 #include <linux/list.h>
 #include "util/cache.h"
+#include "util/evsel.h"
 #include <linux/rbtree.h>
 #include "util/symbol.h"
 #include "util/callchain.h"
@@ -31,13 +32,14 @@
 #include "util/event.h"
 #include "util/session.h"
 #include "util/svghelper.h"
+#include "util/tool.h"
 
 #define SUPPORT_OLD_POWER_EVENTS 1
 #define PWR_EVENT_EXIT -1
 
 
-static char		const *input_name = "perf.data";
-static char		const *output_name = "output.svg";
+static const char	*input_name;
+static const char	*output_name = "output.svg";
 
 static unsigned int	numcpus;
 static u64		min_freq;	/* Lowest CPU frequency seen */
@@ -273,25 +275,28 @@
 static u64 cpus_pstate_start_times[MAX_CPUS];
 static u64 cpus_pstate_state[MAX_CPUS];
 
-static int process_comm_event(union perf_event *event,
+static int process_comm_event(struct perf_tool *tool __used,
+			      union perf_event *event,
 			      struct perf_sample *sample __used,
-			      struct perf_session *session __used)
+			      struct machine *machine __used)
 {
 	pid_set_comm(event->comm.tid, event->comm.comm);
 	return 0;
 }
 
-static int process_fork_event(union perf_event *event,
+static int process_fork_event(struct perf_tool *tool __used,
+			      union perf_event *event,
 			      struct perf_sample *sample __used,
-			      struct perf_session *session __used)
+			      struct machine *machine __used)
 {
 	pid_fork(event->fork.pid, event->fork.ppid, event->fork.time);
 	return 0;
 }
 
-static int process_exit_event(union perf_event *event,
+static int process_exit_event(struct perf_tool *tool __used,
+			      union perf_event *event,
 			      struct perf_sample *sample __used,
-			      struct perf_session *session __used)
+			      struct machine *machine __used)
 {
 	pid_exit(event->fork.pid, event->fork.time);
 	return 0;
@@ -486,14 +491,15 @@
 }
 
 
-static int process_sample_event(union perf_event *event __used,
+static int process_sample_event(struct perf_tool *tool __used,
+				union perf_event *event __used,
 				struct perf_sample *sample,
-				struct perf_evsel *evsel __used,
-				struct perf_session *session)
+				struct perf_evsel *evsel,
+				struct machine *machine __used)
 {
 	struct trace_entry *te;
 
-	if (session->sample_type & PERF_SAMPLE_TIME) {
+	if (evsel->attr.sample_type & PERF_SAMPLE_TIME) {
 		if (!first_time || first_time > sample->time)
 			first_time = sample->time;
 		if (last_time < sample->time)
@@ -501,7 +507,7 @@
 	}
 
 	te = (void *)sample->raw_data;
-	if (session->sample_type & PERF_SAMPLE_RAW && sample->raw_size > 0) {
+	if ((evsel->attr.sample_type & PERF_SAMPLE_RAW) && sample->raw_size > 0) {
 		char *event_str;
 #ifdef SUPPORT_OLD_POWER_EVENTS
 		struct power_entry_old *peo;
@@ -974,7 +980,7 @@
 	svg_close();
 }
 
-static struct perf_event_ops event_ops = {
+static struct perf_tool perf_timechart = {
 	.comm			= process_comm_event,
 	.fork			= process_fork_event,
 	.exit			= process_exit_event,
@@ -985,7 +991,7 @@
 static int __cmd_timechart(void)
 {
 	struct perf_session *session = perf_session__new(input_name, O_RDONLY,
-							 0, false, &event_ops);
+							 0, false, &perf_timechart);
 	int ret = -EINVAL;
 
 	if (session == NULL)
@@ -994,7 +1000,7 @@
 	if (!perf_session__has_traces(session, "timechart record"))
 		goto out_delete;
 
-	ret = perf_session__process_events(session, &event_ops);
+	ret = perf_session__process_events(session, &perf_timechart);
 	if (ret)
 		goto out_delete;
 
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index c9cdedb..4f81eeb 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -64,44 +64,6 @@
 #include <linux/unistd.h>
 #include <linux/types.h>
 
-static struct perf_top top = {
-	.count_filter		= 5,
-	.delay_secs		= 2,
-	.target_pid		= -1,
-	.target_tid		= -1,
-	.freq			= 1000, /* 1 KHz */
-};
-
-static bool			system_wide			=  false;
-
-static bool			use_tui, use_stdio;
-
-static bool			sort_has_symbols;
-
-static bool			dont_use_callchains;
-static char			callchain_default_opt[]		= "fractal,0.5,callee";
-
-
-static int			default_interval		=      0;
-
-static bool			kptr_restrict_warned;
-static bool			vmlinux_warned;
-static bool			inherit				=  false;
-static int			realtime_prio			=      0;
-static bool			group				=  false;
-static bool			sample_id_all_avail		=   true;
-static unsigned int		mmap_pages			=    128;
-
-static bool			dump_symtab                     =  false;
-
-static struct winsize		winsize;
-
-static const char		*sym_filter			=   NULL;
-static int			sym_pcnt_filter			=      5;
-
-/*
- * Source functions
- */
 
 void get_term_dimensions(struct winsize *ws)
 {
@@ -125,21 +87,23 @@
 	ws->ws_col = 80;
 }
 
-static void update_print_entries(struct winsize *ws)
+static void perf_top__update_print_entries(struct perf_top *top)
 {
-	top.print_entries = ws->ws_row;
+	top->print_entries = top->winsize.ws_row;
 
-	if (top.print_entries > 9)
-		top.print_entries -= 9;
+	if (top->print_entries > 9)
+		top->print_entries -= 9;
 }
 
-static void sig_winch_handler(int sig __used)
+static void perf_top__sig_winch(int sig __used, siginfo_t *info __used, void *arg)
 {
-	get_term_dimensions(&winsize);
-	update_print_entries(&winsize);
+	struct perf_top *top = arg;
+
+	get_term_dimensions(&top->winsize);
+	perf_top__update_print_entries(top);
 }
 
-static int parse_source(struct hist_entry *he)
+static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he)
 {
 	struct symbol *sym;
 	struct annotation *notes;
@@ -170,7 +134,7 @@
 
 	pthread_mutex_lock(&notes->lock);
 
-	if (symbol__alloc_hist(sym, top.evlist->nr_entries) < 0) {
+	if (symbol__alloc_hist(sym) < 0) {
 		pthread_mutex_unlock(&notes->lock);
 		pr_err("Not enough memory for annotating '%s' symbol!\n",
 		       sym->name);
@@ -181,7 +145,7 @@
 	err = symbol__annotate(sym, map, 0);
 	if (err == 0) {
 out_assign:
-		top.sym_filter_entry = he;
+		top->sym_filter_entry = he;
 	}
 
 	pthread_mutex_unlock(&notes->lock);
@@ -194,14 +158,16 @@
 	symbol__annotate_zero_histograms(sym);
 }
 
-static void record_precise_ip(struct hist_entry *he, int counter, u64 ip)
+static void perf_top__record_precise_ip(struct perf_top *top,
+					struct hist_entry *he,
+					int counter, u64 ip)
 {
 	struct annotation *notes;
 	struct symbol *sym;
 
 	if (he == NULL || he->ms.sym == NULL ||
-	    ((top.sym_filter_entry == NULL ||
-	      top.sym_filter_entry->ms.sym != he->ms.sym) && use_browser != 1))
+	    ((top->sym_filter_entry == NULL ||
+	      top->sym_filter_entry->ms.sym != he->ms.sym) && use_browser != 1))
 		return;
 
 	sym = he->ms.sym;
@@ -210,8 +176,7 @@
 	if (pthread_mutex_trylock(&notes->lock))
 		return;
 
-	if (notes->src == NULL &&
-	    symbol__alloc_hist(sym, top.evlist->nr_entries) < 0) {
+	if (notes->src == NULL && symbol__alloc_hist(sym) < 0) {
 		pthread_mutex_unlock(&notes->lock);
 		pr_err("Not enough memory for annotating '%s' symbol!\n",
 		       sym->name);
@@ -225,8 +190,9 @@
 	pthread_mutex_unlock(&notes->lock);
 }
 
-static void show_details(struct hist_entry *he)
+static void perf_top__show_details(struct perf_top *top)
 {
+	struct hist_entry *he = top->sym_filter_entry;
 	struct annotation *notes;
 	struct symbol *symbol;
 	int more;
@@ -242,15 +208,15 @@
 	if (notes->src == NULL)
 		goto out_unlock;
 
-	printf("Showing %s for %s\n", event_name(top.sym_evsel), symbol->name);
-	printf("  Events  Pcnt (>=%d%%)\n", sym_pcnt_filter);
+	printf("Showing %s for %s\n", event_name(top->sym_evsel), symbol->name);
+	printf("  Events  Pcnt (>=%d%%)\n", top->sym_pcnt_filter);
 
-	more = symbol__annotate_printf(symbol, he->ms.map, top.sym_evsel->idx,
-				       0, sym_pcnt_filter, top.print_entries, 4);
-	if (top.zero)
-		symbol__annotate_zero_histogram(symbol, top.sym_evsel->idx);
+	more = symbol__annotate_printf(symbol, he->ms.map, top->sym_evsel->idx,
+				       0, top->sym_pcnt_filter, top->print_entries, 4);
+	if (top->zero)
+		symbol__annotate_zero_histogram(symbol, top->sym_evsel->idx);
 	else
-		symbol__annotate_decay_histogram(symbol, top.sym_evsel->idx);
+		symbol__annotate_decay_histogram(symbol, top->sym_evsel->idx);
 	if (more != 0)
 		printf("%d lines not displayed, maybe increase display entries [e]\n", more);
 out_unlock:
@@ -259,11 +225,9 @@
 
 static const char		CONSOLE_CLEAR[] = "";
 
-static struct hist_entry *
-	perf_session__add_hist_entry(struct perf_session *session,
-				     struct addr_location *al,
-				     struct perf_sample *sample,
-				     struct perf_evsel *evsel)
+static struct hist_entry *perf_evsel__add_hist_entry(struct perf_evsel *evsel,
+						     struct addr_location *al,
+						     struct perf_sample *sample)
 {
 	struct hist_entry *he;
 
@@ -271,50 +235,51 @@
 	if (he == NULL)
 		return NULL;
 
-	session->hists.stats.total_period += sample->period;
+	evsel->hists.stats.total_period += sample->period;
 	hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE);
 	return he;
 }
 
-static void print_sym_table(void)
+static void perf_top__print_sym_table(struct perf_top *top)
 {
 	char bf[160];
 	int printed = 0;
-	const int win_width = winsize.ws_col - 1;
+	const int win_width = top->winsize.ws_col - 1;
 
 	puts(CONSOLE_CLEAR);
 
-	perf_top__header_snprintf(&top, bf, sizeof(bf));
+	perf_top__header_snprintf(top, bf, sizeof(bf));
 	printf("%s\n", bf);
 
-	perf_top__reset_sample_counters(&top);
+	perf_top__reset_sample_counters(top);
 
 	printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
 
-	if (top.sym_evsel->hists.stats.nr_lost_warned !=
-	    top.sym_evsel->hists.stats.nr_events[PERF_RECORD_LOST]) {
-		top.sym_evsel->hists.stats.nr_lost_warned =
-			top.sym_evsel->hists.stats.nr_events[PERF_RECORD_LOST];
+	if (top->sym_evsel->hists.stats.nr_lost_warned !=
+	    top->sym_evsel->hists.stats.nr_events[PERF_RECORD_LOST]) {
+		top->sym_evsel->hists.stats.nr_lost_warned =
+			top->sym_evsel->hists.stats.nr_events[PERF_RECORD_LOST];
 		color_fprintf(stdout, PERF_COLOR_RED,
 			      "WARNING: LOST %d chunks, Check IO/CPU overload",
-			      top.sym_evsel->hists.stats.nr_lost_warned);
+			      top->sym_evsel->hists.stats.nr_lost_warned);
 		++printed;
 	}
 
-	if (top.sym_filter_entry) {
-		show_details(top.sym_filter_entry);
+	if (top->sym_filter_entry) {
+		perf_top__show_details(top);
 		return;
 	}
 
-	hists__collapse_resort_threaded(&top.sym_evsel->hists);
-	hists__output_resort_threaded(&top.sym_evsel->hists);
-	hists__decay_entries_threaded(&top.sym_evsel->hists,
-				      top.hide_user_symbols,
-				      top.hide_kernel_symbols);
-	hists__output_recalc_col_len(&top.sym_evsel->hists, winsize.ws_row - 3);
+	hists__collapse_resort_threaded(&top->sym_evsel->hists);
+	hists__output_resort_threaded(&top->sym_evsel->hists);
+	hists__decay_entries_threaded(&top->sym_evsel->hists,
+				      top->hide_user_symbols,
+				      top->hide_kernel_symbols);
+	hists__output_recalc_col_len(&top->sym_evsel->hists,
+				     top->winsize.ws_row - 3);
 	putchar('\n');
-	hists__fprintf(&top.sym_evsel->hists, NULL, false, false,
-		       winsize.ws_row - 4 - printed, win_width, stdout);
+	hists__fprintf(&top->sym_evsel->hists, NULL, false, false,
+		       top->winsize.ws_row - 4 - printed, win_width, stdout);
 }
 
 static void prompt_integer(int *target, const char *msg)
@@ -352,17 +317,17 @@
 		*target = tmp;
 }
 
-static void prompt_symbol(struct hist_entry **target, const char *msg)
+static void perf_top__prompt_symbol(struct perf_top *top, const char *msg)
 {
 	char *buf = malloc(0), *p;
-	struct hist_entry *syme = *target, *n, *found = NULL;
+	struct hist_entry *syme = top->sym_filter_entry, *n, *found = NULL;
 	struct rb_node *next;
 	size_t dummy = 0;
 
 	/* zero counters of active symbol */
 	if (syme) {
 		__zero_source_counters(syme);
-		*target = NULL;
+		top->sym_filter_entry = NULL;
 	}
 
 	fprintf(stdout, "\n%s: ", msg);
@@ -373,7 +338,7 @@
 	if (p)
 		*p = 0;
 
-	next = rb_first(&top.sym_evsel->hists.entries);
+	next = rb_first(&top->sym_evsel->hists.entries);
 	while (next) {
 		n = rb_entry(next, struct hist_entry, rb_node);
 		if (n->ms.sym && !strcmp(buf, n->ms.sym->name)) {
@@ -386,47 +351,46 @@
 	if (!found) {
 		fprintf(stderr, "Sorry, %s is not active.\n", buf);
 		sleep(1);
-		return;
 	} else
-		parse_source(found);
+		perf_top__parse_source(top, found);
 
 out_free:
 	free(buf);
 }
 
-static void print_mapped_keys(void)
+static void perf_top__print_mapped_keys(struct perf_top *top)
 {
 	char *name = NULL;
 
-	if (top.sym_filter_entry) {
-		struct symbol *sym = top.sym_filter_entry->ms.sym;
+	if (top->sym_filter_entry) {
+		struct symbol *sym = top->sym_filter_entry->ms.sym;
 		name = sym->name;
 	}
 
 	fprintf(stdout, "\nMapped keys:\n");
-	fprintf(stdout, "\t[d]     display refresh delay.             \t(%d)\n", top.delay_secs);
-	fprintf(stdout, "\t[e]     display entries (lines).           \t(%d)\n", top.print_entries);
+	fprintf(stdout, "\t[d]     display refresh delay.             \t(%d)\n", top->delay_secs);
+	fprintf(stdout, "\t[e]     display entries (lines).           \t(%d)\n", top->print_entries);
 
-	if (top.evlist->nr_entries > 1)
-		fprintf(stdout, "\t[E]     active event counter.              \t(%s)\n", event_name(top.sym_evsel));
+	if (top->evlist->nr_entries > 1)
+		fprintf(stdout, "\t[E]     active event counter.              \t(%s)\n", event_name(top->sym_evsel));
 
-	fprintf(stdout, "\t[f]     profile display filter (count).    \t(%d)\n", top.count_filter);
+	fprintf(stdout, "\t[f]     profile display filter (count).    \t(%d)\n", top->count_filter);
 
-	fprintf(stdout, "\t[F]     annotate display filter (percent). \t(%d%%)\n", sym_pcnt_filter);
+	fprintf(stdout, "\t[F]     annotate display filter (percent). \t(%d%%)\n", top->sym_pcnt_filter);
 	fprintf(stdout, "\t[s]     annotate symbol.                   \t(%s)\n", name?: "NULL");
 	fprintf(stdout, "\t[S]     stop annotation.\n");
 
 	fprintf(stdout,
 		"\t[K]     hide kernel_symbols symbols.     \t(%s)\n",
-		top.hide_kernel_symbols ? "yes" : "no");
+		top->hide_kernel_symbols ? "yes" : "no");
 	fprintf(stdout,
 		"\t[U]     hide user symbols.               \t(%s)\n",
-		top.hide_user_symbols ? "yes" : "no");
-	fprintf(stdout, "\t[z]     toggle sample zeroing.             \t(%d)\n", top.zero ? 1 : 0);
+		top->hide_user_symbols ? "yes" : "no");
+	fprintf(stdout, "\t[z]     toggle sample zeroing.             \t(%d)\n", top->zero ? 1 : 0);
 	fprintf(stdout, "\t[qQ]    quit.\n");
 }
 
-static int key_mapped(int c)
+static int perf_top__key_mapped(struct perf_top *top, int c)
 {
 	switch (c) {
 		case 'd':
@@ -442,7 +406,7 @@
 		case 'S':
 			return 1;
 		case 'E':
-			return top.evlist->nr_entries > 1 ? 1 : 0;
+			return top->evlist->nr_entries > 1 ? 1 : 0;
 		default:
 			break;
 	}
@@ -450,13 +414,13 @@
 	return 0;
 }
 
-static void handle_keypress(int c)
+static void perf_top__handle_keypress(struct perf_top *top, int c)
 {
-	if (!key_mapped(c)) {
+	if (!perf_top__key_mapped(top, c)) {
 		struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
 		struct termios tc, save;
 
-		print_mapped_keys();
+		perf_top__print_mapped_keys(top);
 		fprintf(stdout, "\nEnter selection, or unmapped key to continue: ");
 		fflush(stdout);
 
@@ -471,81 +435,86 @@
 		c = getc(stdin);
 
 		tcsetattr(0, TCSAFLUSH, &save);
-		if (!key_mapped(c))
+		if (!perf_top__key_mapped(top, c))
 			return;
 	}
 
 	switch (c) {
 		case 'd':
-			prompt_integer(&top.delay_secs, "Enter display delay");
-			if (top.delay_secs < 1)
-				top.delay_secs = 1;
+			prompt_integer(&top->delay_secs, "Enter display delay");
+			if (top->delay_secs < 1)
+				top->delay_secs = 1;
 			break;
 		case 'e':
-			prompt_integer(&top.print_entries, "Enter display entries (lines)");
-			if (top.print_entries == 0) {
-				sig_winch_handler(SIGWINCH);
-				signal(SIGWINCH, sig_winch_handler);
+			prompt_integer(&top->print_entries, "Enter display entries (lines)");
+			if (top->print_entries == 0) {
+				struct sigaction act = {
+					.sa_sigaction = perf_top__sig_winch,
+					.sa_flags     = SA_SIGINFO,
+				};
+				perf_top__sig_winch(SIGWINCH, NULL, top);
+				sigaction(SIGWINCH, &act, NULL);
 			} else
 				signal(SIGWINCH, SIG_DFL);
 			break;
 		case 'E':
-			if (top.evlist->nr_entries > 1) {
+			if (top->evlist->nr_entries > 1) {
 				/* Select 0 as the default event: */
 				int counter = 0;
 
 				fprintf(stderr, "\nAvailable events:");
 
-				list_for_each_entry(top.sym_evsel, &top.evlist->entries, node)
-					fprintf(stderr, "\n\t%d %s", top.sym_evsel->idx, event_name(top.sym_evsel));
+				list_for_each_entry(top->sym_evsel, &top->evlist->entries, node)
+					fprintf(stderr, "\n\t%d %s", top->sym_evsel->idx, event_name(top->sym_evsel));
 
 				prompt_integer(&counter, "Enter details event counter");
 
-				if (counter >= top.evlist->nr_entries) {
-					top.sym_evsel = list_entry(top.evlist->entries.next, struct perf_evsel, node);
-					fprintf(stderr, "Sorry, no such event, using %s.\n", event_name(top.sym_evsel));
+				if (counter >= top->evlist->nr_entries) {
+					top->sym_evsel = list_entry(top->evlist->entries.next, struct perf_evsel, node);
+					fprintf(stderr, "Sorry, no such event, using %s.\n", event_name(top->sym_evsel));
 					sleep(1);
 					break;
 				}
-				list_for_each_entry(top.sym_evsel, &top.evlist->entries, node)
-					if (top.sym_evsel->idx == counter)
+				list_for_each_entry(top->sym_evsel, &top->evlist->entries, node)
+					if (top->sym_evsel->idx == counter)
 						break;
 			} else
-				top.sym_evsel = list_entry(top.evlist->entries.next, struct perf_evsel, node);
+				top->sym_evsel = list_entry(top->evlist->entries.next, struct perf_evsel, node);
 			break;
 		case 'f':
-			prompt_integer(&top.count_filter, "Enter display event count filter");
+			prompt_integer(&top->count_filter, "Enter display event count filter");
 			break;
 		case 'F':
-			prompt_percent(&sym_pcnt_filter, "Enter details display event filter (percent)");
+			prompt_percent(&top->sym_pcnt_filter,
+				       "Enter details display event filter (percent)");
 			break;
 		case 'K':
-			top.hide_kernel_symbols = !top.hide_kernel_symbols;
+			top->hide_kernel_symbols = !top->hide_kernel_symbols;
 			break;
 		case 'q':
 		case 'Q':
 			printf("exiting.\n");
-			if (dump_symtab)
-				perf_session__fprintf_dsos(top.session, stderr);
+			if (top->dump_symtab)
+				perf_session__fprintf_dsos(top->session, stderr);
 			exit(0);
 		case 's':
-			prompt_symbol(&top.sym_filter_entry, "Enter details symbol");
+			perf_top__prompt_symbol(top, "Enter details symbol");
 			break;
 		case 'S':
-			if (!top.sym_filter_entry)
+			if (!top->sym_filter_entry)
 				break;
 			else {
-				struct hist_entry *syme = top.sym_filter_entry;
+				struct hist_entry *syme = top->sym_filter_entry;
 
-				top.sym_filter_entry = NULL;
+				top->sym_filter_entry = NULL;
 				__zero_source_counters(syme);
 			}
 			break;
 		case 'U':
-			top.hide_user_symbols = !top.hide_user_symbols;
+			top->hide_user_symbols = !top->hide_user_symbols;
 			break;
 		case 'z':
-			top.zero = !top.zero;
+			top->zero = !top->zero;
 			break;
 		default:
 			break;
@@ -563,28 +532,30 @@
 	hists__collapse_resort_threaded(&t->sym_evsel->hists);
 	hists__output_resort_threaded(&t->sym_evsel->hists);
 	hists__decay_entries_threaded(&t->sym_evsel->hists,
-				      top.hide_user_symbols,
-				      top.hide_kernel_symbols);
+				      t->hide_user_symbols,
+				      t->hide_kernel_symbols);
 }
 
-static void *display_thread_tui(void *arg __used)
+static void *display_thread_tui(void *arg)
 {
+	struct perf_top *top = arg;
 	const char *help = "For a higher level overview, try: perf top --sort comm,dso";
 
-	perf_top__sort_new_samples(&top);
-	perf_evlist__tui_browse_hists(top.evlist, help,
+	perf_top__sort_new_samples(top);
+	perf_evlist__tui_browse_hists(top->evlist, help,
 				      perf_top__sort_new_samples,
-				      &top, top.delay_secs);
+				      top, top->delay_secs);
 
 	exit_browser(0);
 	exit(0);
 	return NULL;
 }
 
-static void *display_thread(void *arg __used)
+static void *display_thread(void *arg)
 {
 	struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
 	struct termios tc, save;
+	struct perf_top *top = arg;
 	int delay_msecs, c;
 
 	tcgetattr(0, &save);
@@ -595,13 +566,13 @@
 
 	pthread__unblock_sigwinch();
 repeat:
-	delay_msecs = top.delay_secs * 1000;
+	delay_msecs = top->delay_secs * 1000;
 	tcsetattr(0, TCSANOW, &tc);
 	/* trash return*/
 	getc(stdin);
 
 	while (1) {
-		print_sym_table();
+		perf_top__print_sym_table(top);
 		/*
 		 * Either timeout expired or we got an EINTR due to SIGWINCH,
 		 * refresh screen in both cases.
@@ -621,7 +592,7 @@
 	c = getc(stdin);
 	tcsetattr(0, TCSAFLUSH, &save);
 
-	handle_keypress(c);
+	perf_top__handle_keypress(top, c);
 	goto repeat;
 
 	return NULL;
@@ -673,47 +644,17 @@
 	return 0;
 }
 
-static void perf_event__process_sample(const union perf_event *event,
+static void perf_event__process_sample(struct perf_tool *tool,
+				       const union perf_event *event,
 				       struct perf_evsel *evsel,
 				       struct perf_sample *sample,
-				       struct perf_session *session)
+				       struct machine *machine)
 {
+	struct perf_top *top = container_of(tool, struct perf_top, tool);
 	struct symbol *parent = NULL;
 	u64 ip = event->ip.ip;
 	struct addr_location al;
-	struct machine *machine;
 	int err;
-	u8 origin = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
-
-	++top.samples;
-
-	switch (origin) {
-	case PERF_RECORD_MISC_USER:
-		++top.us_samples;
-		if (top.hide_user_symbols)
-			return;
-		machine = perf_session__find_host_machine(session);
-		break;
-	case PERF_RECORD_MISC_KERNEL:
-		++top.kernel_samples;
-		if (top.hide_kernel_symbols)
-			return;
-		machine = perf_session__find_host_machine(session);
-		break;
-	case PERF_RECORD_MISC_GUEST_KERNEL:
-		++top.guest_kernel_samples;
-		machine = perf_session__find_machine(session, event->ip.pid);
-		break;
-	case PERF_RECORD_MISC_GUEST_USER:
-		++top.guest_us_samples;
-		/*
-		 * TODO: we don't process guest user from host side
-		 * except simple counting.
-		 */
-		return;
-	default:
-		return;
-	}
 
 	if (!machine && perf_guest) {
 		pr_err("Can't find guest [%d]'s kernel information\n",
@@ -722,14 +663,14 @@
 	}
 
 	if (event->header.misc & PERF_RECORD_MISC_EXACT_IP)
-		top.exact_samples++;
+		top->exact_samples++;
 
-	if (perf_event__preprocess_sample(event, session, &al, sample,
+	if (perf_event__preprocess_sample(event, machine, &al, sample,
 					  symbol_filter) < 0 ||
 	    al.filtered)
 		return;
 
-	if (!kptr_restrict_warned &&
+	if (!top->kptr_restrict_warned &&
 	    symbol_conf.kptr_restrict &&
 	    al.cpumode == PERF_RECORD_MISC_KERNEL) {
 		ui__warning(
@@ -740,7 +681,7 @@
 			  " modules" : "");
 		if (use_browser <= 0)
 			sleep(5);
-		kptr_restrict_warned = true;
+		top->kptr_restrict_warned = true;
 	}
 
 	if (al.sym == NULL) {
@@ -756,7 +697,7 @@
 		 * --hide-kernel-symbols, even if the user specifies an
 		 * invalid --vmlinux ;-)
 		 */
-		if (!kptr_restrict_warned && !vmlinux_warned &&
+		if (!top->kptr_restrict_warned && !top->vmlinux_warned &&
 		    al.map == machine->vmlinux_maps[MAP__FUNCTION] &&
 		    RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION])) {
 			if (symbol_conf.vmlinux_name) {
@@ -769,7 +710,7 @@
 
 			if (use_browser <= 0)
 				sleep(5);
-			vmlinux_warned = true;
+			top->vmlinux_warned = true;
 		}
 	}
 
@@ -778,70 +719,109 @@
 
 		if ((sort__has_parent || symbol_conf.use_callchain) &&
 		    sample->callchain) {
-			err = perf_session__resolve_callchain(session, al.thread,
-							      sample->callchain, &parent);
+			err = machine__resolve_callchain(machine, evsel, al.thread,
+							 sample->callchain, &parent);
 			if (err)
 				return;
 		}
 
-		he = perf_session__add_hist_entry(session, &al, sample, evsel);
+		he = perf_evsel__add_hist_entry(evsel, &al, sample);
 		if (he == NULL) {
 			pr_err("Problem incrementing symbol period, skipping event\n");
 			return;
 		}
 
 		if (symbol_conf.use_callchain) {
-			err = callchain_append(he->callchain, &session->callchain_cursor,
+			err = callchain_append(he->callchain, &evsel->hists.callchain_cursor,
 					       sample->period);
 			if (err)
 				return;
 		}
 
-		if (sort_has_symbols)
-			record_precise_ip(he, evsel->idx, ip);
+		if (top->sort_has_symbols)
+			perf_top__record_precise_ip(top, he, evsel->idx, ip);
 	}
 
 	return;
 }
 
-static void perf_session__mmap_read_idx(struct perf_session *self, int idx)
+static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
 {
 	struct perf_sample sample;
 	struct perf_evsel *evsel;
+	struct perf_session *session = top->session;
 	union perf_event *event;
+	struct machine *machine;
+	u8 origin;
 	int ret;
 
-	while ((event = perf_evlist__mmap_read(top.evlist, idx)) != NULL) {
-		ret = perf_session__parse_sample(self, event, &sample);
+	while ((event = perf_evlist__mmap_read(top->evlist, idx)) != NULL) {
+		ret = perf_session__parse_sample(session, event, &sample);
 		if (ret) {
 			pr_err("Can't parse sample, err = %d\n", ret);
 			continue;
 		}
 
-		evsel = perf_evlist__id2evsel(self->evlist, sample.id);
+		evsel = perf_evlist__id2evsel(session->evlist, sample.id);
 		assert(evsel != NULL);
 
+		origin = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
+
 		if (event->header.type == PERF_RECORD_SAMPLE)
-			perf_event__process_sample(event, evsel, &sample, self);
-		else if (event->header.type < PERF_RECORD_MAX) {
+			++top->samples;
+
+		switch (origin) {
+		case PERF_RECORD_MISC_USER:
+			++top->us_samples;
+			if (top->hide_user_symbols)
+				continue;
+			machine = perf_session__find_host_machine(session);
+			break;
+		case PERF_RECORD_MISC_KERNEL:
+			++top->kernel_samples;
+			if (top->hide_kernel_symbols)
+				continue;
+			machine = perf_session__find_host_machine(session);
+			break;
+		case PERF_RECORD_MISC_GUEST_KERNEL:
+			++top->guest_kernel_samples;
+			machine = perf_session__find_machine(session, event->ip.pid);
+			break;
+		case PERF_RECORD_MISC_GUEST_USER:
+			++top->guest_us_samples;
+			/*
+			 * TODO: we don't process guest user from host side
+			 * except simple counting.
+			 */
+			/* Fall thru */
+		default:
+			continue;
+		}
+
+
+		if (event->header.type == PERF_RECORD_SAMPLE) {
+			perf_event__process_sample(&top->tool, event, evsel,
+						   &sample, machine);
+		} else if (event->header.type < PERF_RECORD_MAX) {
 			hists__inc_nr_events(&evsel->hists, event->header.type);
-			perf_event__process(event, &sample, self);
+			perf_event__process(&top->tool, event, &sample, machine);
 		} else
-			++self->hists.stats.nr_unknown_events;
+			++session->hists.stats.nr_unknown_events;
 	}
 }
 
-static void perf_session__mmap_read(struct perf_session *self)
+static void perf_top__mmap_read(struct perf_top *top)
 {
 	int i;
 
-	for (i = 0; i < top.evlist->nr_mmaps; i++)
-		perf_session__mmap_read_idx(self, i);
+	for (i = 0; i < top->evlist->nr_mmaps; i++)
+		perf_top__mmap_read_idx(top, i);
 }
 
-static void start_counters(struct perf_evlist *evlist)
+static void perf_top__start_counters(struct perf_top *top)
 {
 	struct perf_evsel *counter, *first;
+	struct perf_evlist *evlist = top->evlist;
 
 	first = list_entry(evlist->entries.next, struct perf_evsel, node);
 
@@ -849,15 +829,15 @@
 		struct perf_event_attr *attr = &counter->attr;
 		struct xyarray *group_fd = NULL;
 
-		if (group && counter != first)
+		if (top->group && counter != first)
 			group_fd = first->fd;
 
 		attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID;
 
-		if (top.freq) {
+		if (top->freq) {
 			attr->sample_type |= PERF_SAMPLE_PERIOD;
 			attr->freq	  = 1;
-			attr->sample_freq = top.freq;
+			attr->sample_freq = top->freq;
 		}
 
 		if (evlist->nr_entries > 1) {
@@ -870,23 +850,23 @@
 
 		attr->mmap = 1;
 		attr->comm = 1;
-		attr->inherit = inherit;
+		attr->inherit = top->inherit;
 retry_sample_id:
-		attr->sample_id_all = sample_id_all_avail ? 1 : 0;
+		attr->sample_id_all = top->sample_id_all_avail ? 1 : 0;
 try_again:
-		if (perf_evsel__open(counter, top.evlist->cpus,
-				     top.evlist->threads, group,
+		if (perf_evsel__open(counter, top->evlist->cpus,
+				     top->evlist->threads, top->group,
 				     group_fd) < 0) {
 			int err = errno;
 
 			if (err == EPERM || err == EACCES) {
 				ui__error_paranoid();
 				goto out_err;
-			} else if (err == EINVAL && sample_id_all_avail) {
+			} else if (err == EINVAL && top->sample_id_all_avail) {
 				/*
 				 * Old kernel, no attr->sample_id_type_all field
 				 */
-				sample_id_all_avail = false;
+				top->sample_id_all_avail = false;
 				goto retry_sample_id;
 			}
 			/*
@@ -920,7 +900,7 @@
 		}
 	}
 
-	if (perf_evlist__mmap(evlist, mmap_pages, false) < 0) {
+	if (perf_evlist__mmap(evlist, top->mmap_pages, false) < 0) {
 		ui__warning("Failed to mmap with %d (%s)\n",
 			    errno, strerror(errno));
 		goto out_err;
@@ -933,14 +913,14 @@
 	exit(0);
 }
 
-static int setup_sample_type(void)
+static int perf_top__setup_sample_type(struct perf_top *top)
 {
-	if (!sort_has_symbols) {
+	if (!top->sort_has_symbols) {
 		if (symbol_conf.use_callchain) {
 			ui__warning("Selected -g but \"sym\" not present in --sort/-s.");
 			return -EINVAL;
 		}
-	} else if (!dont_use_callchains && callchain_param.mode != CHAIN_NONE) {
+	} else if (!top->dont_use_callchains && callchain_param.mode != CHAIN_NONE) {
 		if (callchain_register_param(&callchain_param) < 0) {
 			ui__warning("Can't register callchain params.\n");
 			return -EINVAL;
@@ -950,7 +930,7 @@
 	return 0;
 }
 
-static int __cmd_top(void)
+static int __cmd_top(struct perf_top *top)
 {
 	pthread_t thread;
 	int ret;
@@ -958,39 +938,40 @@
 	 * FIXME: perf_session__new should allow passing a O_MMAP, so that all this
 	 * mmap reading, etc is encapsulated in it. Use O_WRONLY for now.
 	 */
-	top.session = perf_session__new(NULL, O_WRONLY, false, false, NULL);
-	if (top.session == NULL)
+	top->session = perf_session__new(NULL, O_WRONLY, false, false, NULL);
+	if (top->session == NULL)
 		return -ENOMEM;
 
-	ret = setup_sample_type();
+	ret = perf_top__setup_sample_type(top);
 	if (ret)
 		goto out_delete;
 
-	if (top.target_tid != -1)
-		perf_event__synthesize_thread_map(top.evlist->threads,
-						  perf_event__process, top.session);
+	if (top->target_tid != -1)
+		perf_event__synthesize_thread_map(&top->tool, top->evlist->threads,
+						  perf_event__process,
+						  &top->session->host_machine);
 	else
-		perf_event__synthesize_threads(perf_event__process, top.session);
-
-	start_counters(top.evlist);
-	top.session->evlist = top.evlist;
-	perf_session__update_sample_type(top.session);
+		perf_event__synthesize_threads(&top->tool, perf_event__process,
+					       &top->session->host_machine);
+	perf_top__start_counters(top);
+	top->session->evlist = top->evlist;
+	perf_session__update_sample_type(top->session);
 
 	/* Wait for a minimal set of events before starting the snapshot */
-	poll(top.evlist->pollfd, top.evlist->nr_fds, 100);
+	poll(top->evlist->pollfd, top->evlist->nr_fds, 100);
 
-	perf_session__mmap_read(top.session);
+	perf_top__mmap_read(top);
 
 	if (pthread_create(&thread, NULL, (use_browser > 0 ? display_thread_tui :
-							     display_thread), NULL)) {
+							    display_thread), top)) {
 		printf("Could not create display thread.\n");
 		exit(-1);
 	}
 
-	if (realtime_prio) {
+	if (top->realtime_prio) {
 		struct sched_param param;
 
-		param.sched_priority = realtime_prio;
+		param.sched_priority = top->realtime_prio;
 		if (sched_setscheduler(0, SCHED_FIFO, &param)) {
 			printf("Could not set realtime priority.\n");
 			exit(-1);
@@ -998,25 +979,25 @@
 	}
 
 	while (1) {
-		u64 hits = top.samples;
+		u64 hits = top->samples;
 
-		perf_session__mmap_read(top.session);
+		perf_top__mmap_read(top);
 
-		if (hits == top.samples)
-			ret = poll(top.evlist->pollfd, top.evlist->nr_fds, 100);
+		if (hits == top->samples)
+			ret = poll(top->evlist->pollfd, top->evlist->nr_fds, 100);
 	}
 
 out_delete:
-	perf_session__delete(top.session);
-	top.session = NULL;
+	perf_session__delete(top->session);
+	top->session = NULL;
 
 	return 0;
 }
 
 static int
-parse_callchain_opt(const struct option *opt __used, const char *arg,
-		    int unset)
+parse_callchain_opt(const struct option *opt, const char *arg, int unset)
 {
+	struct perf_top *top = (struct perf_top *)opt->value;
 	char *tok, *tok2;
 	char *endptr;
 
@@ -1024,7 +1005,7 @@
 	 * --no-call-graph
 	 */
 	if (unset) {
-		dont_use_callchains = true;
+		top->dont_use_callchains = true;
 		return 0;
 	}
 
@@ -1052,9 +1033,7 @@
 		symbol_conf.use_callchain = false;
 
 		return 0;
-	}
-
-	else
+	} else
 		return -1;
 
 	/* get the min percentage */
@@ -1098,17 +1077,32 @@
 	NULL
 };
 
-static const struct option options[] = {
+int cmd_top(int argc, const char **argv, const char *prefix __used)
+{
+	struct perf_evsel *pos;
+	int status = -ENOMEM;
+	struct perf_top top = {
+		.count_filter	     = 5,
+		.delay_secs	     = 2,
+		.target_pid	     = -1,
+		.target_tid	     = -1,
+		.freq		     = 1000, /* 1 KHz */
+		.sample_id_all_avail = true,
+		.mmap_pages	     = 128,
+		.sym_pcnt_filter     = 5,
+	};
+	char callchain_default_opt[] = "fractal,0.5,callee";
+	const struct option options[] = {
 	OPT_CALLBACK('e', "event", &top.evlist, "event",
 		     "event selector. use 'perf list' to list available events",
 		     parse_events_option),
-	OPT_INTEGER('c', "count", &default_interval,
+	OPT_INTEGER('c', "count", &top.default_interval,
 		    "event period to sample"),
 	OPT_INTEGER('p', "pid", &top.target_pid,
 		    "profile events on existing process id"),
 	OPT_INTEGER('t', "tid", &top.target_tid,
 		    "profile events on existing thread id"),
-	OPT_BOOLEAN('a', "all-cpus", &system_wide,
+	OPT_BOOLEAN('a', "all-cpus", &top.system_wide,
 			    "system-wide collection from all CPUs"),
 	OPT_STRING('C', "cpu", &top.cpu_list, "cpu",
 		    "list of cpus to monitor"),
@@ -1116,20 +1110,20 @@
 		   "file", "vmlinux pathname"),
 	OPT_BOOLEAN('K', "hide_kernel_symbols", &top.hide_kernel_symbols,
 		    "hide kernel symbols"),
-	OPT_UINTEGER('m', "mmap-pages", &mmap_pages, "number of mmap data pages"),
-	OPT_INTEGER('r', "realtime", &realtime_prio,
+	OPT_UINTEGER('m', "mmap-pages", &top.mmap_pages, "number of mmap data pages"),
+	OPT_INTEGER('r', "realtime", &top.realtime_prio,
 		    "collect data with this RT SCHED_FIFO priority"),
 	OPT_INTEGER('d', "delay", &top.delay_secs,
 		    "number of seconds to delay between refreshes"),
-	OPT_BOOLEAN('D', "dump-symtab", &dump_symtab,
+	OPT_BOOLEAN('D', "dump-symtab", &top.dump_symtab,
 			    "dump the symbol table used for profiling"),
 	OPT_INTEGER('f', "count-filter", &top.count_filter,
 		    "only display functions with more events than this"),
-	OPT_BOOLEAN('g', "group", &group,
+	OPT_BOOLEAN('g', "group", &top.group,
 			    "put the counters into a counter group"),
-	OPT_BOOLEAN('i', "inherit", &inherit,
+	OPT_BOOLEAN('i', "inherit", &top.inherit,
 		    "child tasks inherit counters"),
-	OPT_STRING(0, "sym-annotate", &sym_filter, "symbol name",
+	OPT_STRING(0, "sym-annotate", &top.sym_filter, "symbol name",
 		    "symbol to annotate"),
 	OPT_BOOLEAN('z', "zero", &top.zero,
 		    "zero history across updates"),
@@ -1139,15 +1133,15 @@
 		    "display this many functions"),
 	OPT_BOOLEAN('U', "hide_user_symbols", &top.hide_user_symbols,
 		    "hide user symbols"),
-	OPT_BOOLEAN(0, "tui", &use_tui, "Use the TUI interface"),
-	OPT_BOOLEAN(0, "stdio", &use_stdio, "Use the stdio interface"),
+	OPT_BOOLEAN(0, "tui", &top.use_tui, "Use the TUI interface"),
+	OPT_BOOLEAN(0, "stdio", &top.use_stdio, "Use the stdio interface"),
 	OPT_INCR('v', "verbose", &verbose,
 		    "be more verbose (show counter open errors, etc)"),
 	OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
 		   "sort by key(s): pid, comm, dso, symbol, parent"),
 	OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples,
 		    "Show a column with the number of samples"),
-	OPT_CALLBACK_DEFAULT('G', "call-graph", NULL, "output_type,min_percent, call_order",
+	OPT_CALLBACK_DEFAULT('G', "call-graph", &top, "output_type,min_percent, call_order",
 		     "Display callchains using output_type (graph, flat, fractal, or none), min percent threshold and callchain order. "
 		     "Default: fractal,0.5,callee", &parse_callchain_opt,
 		     callchain_default_opt),
@@ -1166,12 +1160,7 @@
 	OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style",
 		   "Specify disassembler style (e.g. -M intel for intel syntax)"),
 	OPT_END()
-};
-
-int cmd_top(int argc, const char **argv, const char *prefix __used)
-{
-	struct perf_evsel *pos;
-	int status = -ENOMEM;
+	};
 
 	top.evlist = perf_evlist__new(NULL, NULL);
 	if (top.evlist == NULL)
@@ -1188,9 +1177,9 @@
 
 	setup_sorting(top_usage, options);
 
-	if (use_stdio)
+	if (top.use_stdio)
 		use_browser = 0;
-	else if (use_tui)
+	else if (top.use_tui)
 		use_browser = 1;
 
 	setup_browser(false);
@@ -1215,38 +1204,31 @@
 		return -ENOMEM;
 	}
 
+	symbol_conf.nr_events = top.evlist->nr_entries;
+
 	if (top.delay_secs < 1)
 		top.delay_secs = 1;
 
 	/*
 	 * User specified count overrides default frequency.
 	 */
-	if (default_interval)
+	if (top.default_interval)
 		top.freq = 0;
 	else if (top.freq) {
-		default_interval = top.freq;
+		top.default_interval = top.freq;
 	} else {
 		fprintf(stderr, "frequency and count are zero, aborting\n");
 		exit(EXIT_FAILURE);
 	}
 
 	list_for_each_entry(pos, &top.evlist->entries, node) {
-		if (perf_evsel__alloc_fd(pos, top.evlist->cpus->nr,
-					 top.evlist->threads->nr) < 0)
-			goto out_free_fd;
 		/*
 		 * Fill in the ones not specifically initialized via -c:
 		 */
-		if (pos->attr.sample_period)
-			continue;
-
-		pos->attr.sample_period = default_interval;
+		if (!pos->attr.sample_period)
+			pos->attr.sample_period = top.default_interval;
 	}
 
-	if (perf_evlist__alloc_pollfd(top.evlist) < 0 ||
-	    perf_evlist__alloc_mmap(top.evlist) < 0)
-		goto out_free_fd;
-
 	top.sym_evsel = list_entry(top.evlist->entries.next, struct perf_evsel, node);
 
 	symbol_conf.priv_size = sizeof(struct annotation);
@@ -1263,16 +1245,20 @@
 	 * Avoid annotation data structures overhead when symbols aren't on the
 	 * sort list.
 	 */
-	sort_has_symbols = sort_sym.list.next != NULL;
+	top.sort_has_symbols = sort_sym.list.next != NULL;
 
-	get_term_dimensions(&winsize);
+	get_term_dimensions(&top.winsize);
 	if (top.print_entries == 0) {
-		update_print_entries(&winsize);
-		signal(SIGWINCH, sig_winch_handler);
+		struct sigaction act = {
+			.sa_sigaction = perf_top__sig_winch,
+			.sa_flags     = SA_SIGINFO,
+		};
+		perf_top__update_print_entries(&top);
+		sigaction(SIGWINCH, &act, NULL);
 	}
 
-	status = __cmd_top();
-out_free_fd:
+	status = __cmd_top(&top);
+
 	perf_evlist__delete(top.evlist);
 
 	return status;
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index 73d0cac..2b2e225 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -29,8 +29,6 @@
 	int val;
 };
 
-static char debugfs_mntpt[MAXPATHLEN];
-
 static int pager_command_config(const char *var, const char *value, void *data)
 {
 	struct pager_config *c = data;
@@ -81,15 +79,6 @@
 	}
 }
 
-static void set_debugfs_path(void)
-{
-	char *path;
-
-	path = getenv(PERF_DEBUGFS_ENVIRONMENT);
-	snprintf(debugfs_path, MAXPATHLEN, "%s/%s", path ?: debugfs_mntpt,
-		 "tracing/events");
-}
-
 static int handle_options(const char ***argv, int *argc, int *envchanged)
 {
 	int handled = 0;
@@ -161,15 +150,14 @@
 				fprintf(stderr, "No directory given for --debugfs-dir.\n");
 				usage(perf_usage_string);
 			}
-			strncpy(debugfs_mntpt, (*argv)[1], MAXPATHLEN);
-			debugfs_mntpt[MAXPATHLEN - 1] = '\0';
+			debugfs_set_path((*argv)[1]);
 			if (envchanged)
 				*envchanged = 1;
 			(*argv)++;
 			(*argc)--;
 		} else if (!prefixcmp(cmd, CMD_DEBUGFS_DIR)) {
-			strncpy(debugfs_mntpt, cmd + strlen(CMD_DEBUGFS_DIR), MAXPATHLEN);
-			debugfs_mntpt[MAXPATHLEN - 1] = '\0';
+			debugfs_set_path(cmd + strlen(CMD_DEBUGFS_DIR));
+			fprintf(stderr, "dir: %s\n", debugfs_mountpoint);
 			if (envchanged)
 				*envchanged = 1;
 		} else {
@@ -281,7 +269,6 @@
 	if (use_pager == -1 && p->option & USE_PAGER)
 		use_pager = 1;
 	commit_pager_choice();
-	set_debugfs_path();
 
 	status = p->fn(argc, argv, prefix);
 	exit_browser(status);
@@ -416,17 +403,6 @@
 	return done_alias;
 }
 
-/* mini /proc/mounts parser: searching for "^blah /mount/point debugfs" */
-static void get_debugfs_mntpt(void)
-{
-	const char *path = debugfs_mount(NULL);
-
-	if (path)
-		strncpy(debugfs_mntpt, path, sizeof(debugfs_mntpt));
-	else
-		debugfs_mntpt[0] = '\0';
-}
-
 static void pthread__block_sigwinch(void)
 {
 	sigset_t set;
@@ -453,7 +429,7 @@
 	if (!cmd)
 		cmd = "perf-help";
 	/* get debugfs mount point from /proc/mounts */
-	get_debugfs_mntpt();
+	debugfs_mount(NULL);
 	/*
 	 * "perf-xxxx" is the same as "perf xxxx", but we obviously:
 	 *
@@ -476,7 +452,6 @@
 	argc--;
 	handle_options(&argv, &argc, NULL);
 	commit_pager_choice();
-	set_debugfs_path();
 	set_buildid_dir();
 
 	if (argc > 0) {
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index 914c895..64f8bee 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -185,4 +185,28 @@
 
 void pthread__unblock_sigwinch(void);
 
+struct perf_record_opts {
+	pid_t	     target_pid;
+	pid_t	     target_tid;
+	bool	     call_graph;
+	bool	     group;
+	bool	     inherit_stat;
+	bool	     no_delay;
+	bool	     no_inherit;
+	bool	     no_samples;
+	bool	     pipe_output;
+	bool	     raw_samples;
+	bool	     sample_address;
+	bool	     sample_time;
+	bool	     sample_id_all_avail;
+	bool	     system_wide;
+	bool	     period;
+	unsigned int freq;
+	unsigned int mmap_pages;
+	unsigned int user_freq;
+	u64	     default_interval;
+	u64	     user_interval;
+	const char   *cpu_list;
+};
+
 #endif
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 119e996..011ed26 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -25,17 +25,17 @@
 	return 0;
 }
 
-int symbol__alloc_hist(struct symbol *sym, int nevents)
+int symbol__alloc_hist(struct symbol *sym)
 {
 	struct annotation *notes = symbol__annotation(sym);
 	size_t sizeof_sym_hist = (sizeof(struct sym_hist) +
 				  (sym->end - sym->start) * sizeof(u64));
 
-	notes->src = zalloc(sizeof(*notes->src) + nevents * sizeof_sym_hist);
+	notes->src = zalloc(sizeof(*notes->src) + symbol_conf.nr_events * sizeof_sym_hist);
 	if (notes->src == NULL)
 		return -1;
 	notes->src->sizeof_sym_hist = sizeof_sym_hist;
-	notes->src->nr_histograms   = nevents;
+	notes->src->nr_histograms   = symbol_conf.nr_events;
 	INIT_LIST_HEAD(&notes->src->source);
 	return 0;
 }
@@ -334,7 +334,7 @@
 		 disassembler_style ? "-M " : "",
 		 disassembler_style ? disassembler_style : "",
 		 map__rip_2objdump(map, sym->start),
-		 map__rip_2objdump(map, sym->end),
+		 map__rip_2objdump(map, sym->end+1),
 		 symbol_conf.annotate_asm_raw ? "" : "--no-show-raw",
 		 symbol_conf.annotate_src ? "-S" : "",
 		 symfs_filename, filename);
diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h
index d907252..efa5dc8 100644
--- a/tools/perf/util/annotate.h
+++ b/tools/perf/util/annotate.h
@@ -72,7 +72,7 @@
 
 int symbol__inc_addr_samples(struct symbol *sym, struct map *map,
 			     int evidx, u64 addr);
-int symbol__alloc_hist(struct symbol *sym, int nevents);
+int symbol__alloc_hist(struct symbol *sym);
 void symbol__annotate_zero_histograms(struct symbol *sym);
 
 int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize);
@@ -99,8 +99,7 @@
 }
 #else
 int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx,
-			 int nr_events, void(*timer)(void *arg), void *arg,
-			 int delay_secs);
+			 void(*timer)(void *arg), void *arg, int delay_secs);
 #endif
 
 extern const char	*disassembler_style;
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
index a91cd99..dff9c7a 100644
--- a/tools/perf/util/build-id.c
+++ b/tools/perf/util/build-id.c
@@ -13,15 +13,18 @@
 #include "symbol.h"
 #include <linux/kernel.h>
 #include "debug.h"
+#include "session.h"
+#include "tool.h"
 
-static int build_id__mark_dso_hit(union perf_event *event,
+static int build_id__mark_dso_hit(struct perf_tool *tool __used,
+				  union perf_event *event,
 				  struct perf_sample *sample __used,
 				  struct perf_evsel *evsel __used,
-				  struct perf_session *session)
+				  struct machine *machine)
 {
 	struct addr_location al;
 	u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
-	struct thread *thread = perf_session__findnew(session, event->ip.pid);
+	struct thread *thread = machine__findnew_thread(machine, event->ip.pid);
 
 	if (thread == NULL) {
 		pr_err("problem processing %d event, skipping it.\n",
@@ -29,8 +32,8 @@
 		return -1;
 	}
 
-	thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION,
-			      event->ip.pid, event->ip.ip, &al);
+	thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION,
+			      event->ip.ip, &al);
 
 	if (al.map != NULL)
 		al.map->dso->hit = 1;
@@ -38,25 +41,26 @@
 	return 0;
 }
 
-static int perf_event__exit_del_thread(union perf_event *event,
+static int perf_event__exit_del_thread(struct perf_tool *tool __used,
+				       union perf_event *event,
 				       struct perf_sample *sample __used,
-				       struct perf_session *session)
+				       struct machine *machine)
 {
-	struct thread *thread = perf_session__findnew(session, event->fork.tid);
+	struct thread *thread = machine__findnew_thread(machine, event->fork.tid);
 
 	dump_printf("(%d:%d):(%d:%d)\n", event->fork.pid, event->fork.tid,
 		    event->fork.ppid, event->fork.ptid);
 
 	if (thread) {
-		rb_erase(&thread->rb_node, &session->threads);
-		session->last_match = NULL;
+		rb_erase(&thread->rb_node, &machine->threads);
+		machine->last_match = NULL;
 		thread__delete(thread);
 	}
 
 	return 0;
 }
 
-struct perf_event_ops build_id__mark_dso_hit_ops = {
+struct perf_tool build_id__mark_dso_hit_ops = {
 	.sample	= build_id__mark_dso_hit,
 	.mmap	= perf_event__process_mmap,
 	.fork	= perf_event__process_task,
diff --git a/tools/perf/util/build-id.h b/tools/perf/util/build-id.h
index 5dafb00..a993ba8 100644
--- a/tools/perf/util/build-id.h
+++ b/tools/perf/util/build-id.h
@@ -3,7 +3,7 @@
 
 #include "session.h"
 
-extern struct perf_event_ops build_id__mark_dso_hit_ops;
+extern struct perf_tool build_id__mark_dso_hit_ops;
 
 char *dso__build_id_filename(struct dso *self, char *bf, size_t size);
 
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index 9b4ff16c..7f9c0f1 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -101,6 +101,9 @@
 int callchain_merge(struct callchain_cursor *cursor,
 		    struct callchain_root *dst, struct callchain_root *src);
 
+struct ip_callchain;
+union perf_event;
+
 bool ip_callchain__valid(struct ip_callchain *chain,
 			 const union perf_event *event);
 /*
diff --git a/tools/perf/util/cgroup.c b/tools/perf/util/cgroup.c
index 96bee5c..dbe2f16 100644
--- a/tools/perf/util/cgroup.c
+++ b/tools/perf/util/cgroup.c
@@ -3,7 +3,6 @@
 #include "parse-options.h"
 #include "evsel.h"
 #include "cgroup.h"
-#include "debugfs.h" /* MAX_PATH, STR() */
 #include "evlist.h"
 
 int nr_cgroups;
@@ -12,7 +11,7 @@
 cgroupfs_find_mountpoint(char *buf, size_t maxlen)
 {
 	FILE *fp;
-	char mountpoint[MAX_PATH+1], tokens[MAX_PATH+1], type[MAX_PATH+1];
+	char mountpoint[PATH_MAX + 1], tokens[PATH_MAX + 1], type[PATH_MAX + 1];
 	char *token, *saved_ptr = NULL;
 	int found = 0;
 
@@ -25,8 +24,8 @@
 	 * and inspect every cgroupfs mount point to find one that has
 	 * perf_event subsystem
 	 */
-	while (fscanf(fp, "%*s %"STR(MAX_PATH)"s %"STR(MAX_PATH)"s %"
-				STR(MAX_PATH)"s %*d %*d\n",
+	while (fscanf(fp, "%*s %"STR(PATH_MAX)"s %"STR(PATH_MAX)"s %"
+				STR(PATH_MAX)"s %*d %*d\n",
 				mountpoint, type, tokens) == 3) {
 
 		if (!strcmp(type, "cgroup")) {
@@ -57,15 +56,15 @@
 
 static int open_cgroup(char *name)
 {
-	char path[MAX_PATH+1];
-	char mnt[MAX_PATH+1];
+	char path[PATH_MAX + 1];
+	char mnt[PATH_MAX + 1];
 	int fd;
 
 
-	if (cgroupfs_find_mountpoint(mnt, MAX_PATH+1))
+	if (cgroupfs_find_mountpoint(mnt, PATH_MAX + 1))
 		return -1;
 
-	snprintf(path, MAX_PATH, "%s/%s", mnt, name);
+	snprintf(path, PATH_MAX, "%s/%s", mnt, name);
 
 	fd = open(path, O_RDONLY);
 	if (fd == -1)
diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c
index 80d9598..0deac6a 100644
--- a/tools/perf/util/config.c
+++ b/tools/perf/util/config.c
@@ -1,5 +1,8 @@
 /*
- * GIT - The information manager from hell
+ * config.c
+ *
+ * Helper functions for parsing config items.
+ * Originally copied from GIT source.
  *
  * Copyright (C) Linus Torvalds, 2005
  * Copyright (C) Johannes Schindelin, 2005
diff --git a/tools/perf/util/debugfs.c b/tools/perf/util/debugfs.c
index a88fefc..ffc35e7 100644
--- a/tools/perf/util/debugfs.c
+++ b/tools/perf/util/debugfs.c
@@ -2,8 +2,12 @@
 #include "debugfs.h"
 #include "cache.h"
 
+#include <linux/kernel.h>
+#include <sys/mount.h>
+
 static int debugfs_premounted;
-static char debugfs_mountpoint[MAX_PATH+1];
+char debugfs_mountpoint[PATH_MAX + 1] = "/sys/kernel/debug";
+char tracing_events_path[PATH_MAX + 1] = "/sys/kernel/debug/tracing/events";
 
 static const char *debugfs_known_mountpoints[] = {
 	"/sys/kernel/debug/",
@@ -62,11 +66,9 @@
 	/* give up and parse /proc/mounts */
 	fp = fopen("/proc/mounts", "r");
 	if (fp == NULL)
-		die("Can't open /proc/mounts for read");
+		return NULL;
 
-	while (fscanf(fp, "%*s %"
-		      STR(MAX_PATH)
-		      "s %99s %*s %*d %*d\n",
+	while (fscanf(fp, "%*s %" STR(PATH_MAX) "s %99s %*s %*d %*d\n",
 		      debugfs_mountpoint, type) == 2) {
 		if (strcmp(type, "debugfs") == 0)
 			break;
@@ -106,6 +108,12 @@
 	return 0;
 }
 
+static void debugfs_set_tracing_events_path(const char *mountpoint)
+{
+	snprintf(tracing_events_path, sizeof(tracing_events_path), "%s/%s",
+		 mountpoint, "tracing/events");
+}
+
 /* mount the debugfs somewhere if it's not mounted */
 
 char *debugfs_mount(const char *mountpoint)
@@ -113,7 +121,7 @@
 	/* see if it's already mounted */
 	if (debugfs_find_mountpoint()) {
 		debugfs_premounted = 1;
-		return debugfs_mountpoint;
+		goto out;
 	}
 
 	/* if not mounted and no argument */
@@ -129,12 +137,19 @@
 		return NULL;
 
 	/* save the mountpoint */
-	strncpy(debugfs_mountpoint, mountpoint, sizeof(debugfs_mountpoint));
 	debugfs_found = 1;
-
+	strncpy(debugfs_mountpoint, mountpoint, sizeof(debugfs_mountpoint));
+out:
+	debugfs_set_tracing_events_path(debugfs_mountpoint);
 	return debugfs_mountpoint;
 }
 
+void debugfs_set_path(const char *mountpoint)
+{
+	snprintf(debugfs_mountpoint, sizeof(debugfs_mountpoint), "%s", mountpoint);
+	debugfs_set_tracing_events_path(mountpoint);
+}
+
 /* umount the debugfs */
 
 int debugfs_umount(void)
@@ -158,7 +173,7 @@
 
 int debugfs_write(const char *entry, const char *value)
 {
-	char path[MAX_PATH+1];
+	char path[PATH_MAX + 1];
 	int ret, count;
 	int fd;
 
@@ -203,7 +218,7 @@
  */
 int debugfs_read(const char *entry, char *buffer, size_t size)
 {
-	char path[MAX_PATH+1];
+	char path[PATH_MAX + 1];
 	int ret;
 	int fd;
 
diff --git a/tools/perf/util/debugfs.h b/tools/perf/util/debugfs.h
index 83a0287..4a878f7 100644
--- a/tools/perf/util/debugfs.h
+++ b/tools/perf/util/debugfs.h
@@ -1,25 +1,18 @@
 #ifndef __DEBUGFS_H__
 #define __DEBUGFS_H__
 
-#include <sys/mount.h>
+const char *debugfs_find_mountpoint(void);
+int debugfs_valid_mountpoint(const char *debugfs);
+int debugfs_valid_entry(const char *path);
+char *debugfs_mount(const char *mountpoint);
+int debugfs_umount(void);
+void debugfs_set_path(const char *mountpoint);
+int debugfs_write(const char *entry, const char *value);
+int debugfs_read(const char *entry, char *buffer, size_t size);
+void debugfs_force_cleanup(void);
+int debugfs_make_path(const char *element, char *buffer, int size);
 
-#ifndef MAX_PATH
-# define MAX_PATH 256
-#endif
-
-#ifndef STR
-# define _STR(x) #x
-# define STR(x) _STR(x)
-#endif
-
-extern const char *debugfs_find_mountpoint(void);
-extern int debugfs_valid_mountpoint(const char *debugfs);
-extern int debugfs_valid_entry(const char *path);
-extern char *debugfs_mount(const char *mountpoint);
-extern int debugfs_umount(void);
-extern int debugfs_write(const char *entry, const char *value);
-extern int debugfs_read(const char *entry, char *buffer, size_t size);
-extern void debugfs_force_cleanup(void);
-extern int debugfs_make_path(const char *element, char *buffer, int size);
+extern char debugfs_mountpoint[];
+extern char tracing_events_path[];
 
 #endif /* __DEBUGFS_H__ */
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 437f8ca..73ddaf0 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -1,7 +1,6 @@
 #include <linux/types.h>
 #include "event.h"
 #include "debug.h"
-#include "session.h"
 #include "sort.h"
 #include "string.h"
 #include "strlist.h"
@@ -44,36 +43,27 @@
 	.period	   = 1,
 };
 
-static pid_t perf_event__synthesize_comm(union perf_event *event, pid_t pid,
-					 int full, perf_event__handler_t process,
-					 struct perf_session *session)
+static pid_t perf_event__get_comm_tgid(pid_t pid, char *comm, size_t len)
 {
 	char filename[PATH_MAX];
 	char bf[BUFSIZ];
 	FILE *fp;
 	size_t size = 0;
-	DIR *tasks;
-	struct dirent dirent, *next;
-	pid_t tgid = 0;
+	pid_t tgid = -1;
 
 	snprintf(filename, sizeof(filename), "/proc/%d/status", pid);
 
 	fp = fopen(filename, "r");
 	if (fp == NULL) {
-out_race:
-		/*
-		 * We raced with a task exiting - just return:
-		 */
 		pr_debug("couldn't open %s\n", filename);
 		return 0;
 	}
 
-	memset(&event->comm, 0, sizeof(event->comm));
-
-	while (!event->comm.comm[0] || !event->comm.pid) {
+	while (!comm[0] || (tgid < 0)) {
 		if (fgets(bf, sizeof(bf), fp) == NULL) {
-			pr_warning("couldn't get COMM and pgid, malformed %s\n", filename);
-			goto out;
+			pr_warning("couldn't get COMM and pgid, malformed %s\n",
+				   filename);
+			break;
 		}
 
 		if (memcmp(bf, "Name:", 5) == 0) {
@@ -81,33 +71,65 @@
 			while (*name && isspace(*name))
 				++name;
 			size = strlen(name) - 1;
-			memcpy(event->comm.comm, name, size++);
+			if (size >= len)
+				size = len - 1;
+			memcpy(comm, name, size);
+
 		} else if (memcmp(bf, "Tgid:", 5) == 0) {
 			char *tgids = bf + 5;
 			while (*tgids && isspace(*tgids))
 				++tgids;
-			tgid = event->comm.pid = atoi(tgids);
+			tgid = atoi(tgids);
 		}
 	}
 
+	fclose(fp);
+
+	return tgid;
+}
+
+static pid_t perf_event__synthesize_comm(struct perf_tool *tool,
+					 union perf_event *event, pid_t pid,
+					 int full,
+					 perf_event__handler_t process,
+					 struct machine *machine)
+{
+	char filename[PATH_MAX];
+	size_t size;
+	DIR *tasks;
+	struct dirent dirent, *next;
+	pid_t tgid;
+
+	memset(&event->comm, 0, sizeof(event->comm));
+
+	tgid = perf_event__get_comm_tgid(pid, event->comm.comm,
+					 sizeof(event->comm.comm));
+	if (tgid < 0)
+		goto out;
+
+	event->comm.pid = tgid;
 	event->comm.header.type = PERF_RECORD_COMM;
+
+	size = strlen(event->comm.comm) + 1;
 	size = ALIGN(size, sizeof(u64));
-	memset(event->comm.comm + size, 0, session->id_hdr_size);
+	memset(event->comm.comm + size, 0, machine->id_hdr_size);
 	event->comm.header.size = (sizeof(event->comm) -
 				(sizeof(event->comm.comm) - size) +
-				session->id_hdr_size);
+				machine->id_hdr_size);
 	if (!full) {
 		event->comm.tid = pid;
 
-		process(event, &synth_sample, session);
+		process(tool, event, &synth_sample, machine);
 		goto out;
 	}
 
 	snprintf(filename, sizeof(filename), "/proc/%d/task", pid);
 
 	tasks = opendir(filename);
-	if (tasks == NULL)
-		goto out_race;
+	if (tasks == NULL) {
+		pr_debug("couldn't open %s\n", filename);
+		return 0;
+	}
 
 	while (!readdir_r(tasks, &dirent, &next) && next) {
 		char *end;
@@ -115,22 +137,32 @@
 		if (*end)
 			continue;
 
+		/* already have tgid; jut want to update the comm */
+		(void) perf_event__get_comm_tgid(pid, event->comm.comm,
+					 sizeof(event->comm.comm));
+
+		size = strlen(event->comm.comm) + 1;
+		size = ALIGN(size, sizeof(u64));
+		memset(event->comm.comm + size, 0, machine->id_hdr_size);
+		event->comm.header.size = (sizeof(event->comm) -
+					  (sizeof(event->comm.comm) - size) +
+					  machine->id_hdr_size);
+
 		event->comm.tid = pid;
 
-		process(event, &synth_sample, session);
+		process(tool, event, &synth_sample, machine);
 	}
 
 	closedir(tasks);
 out:
-	fclose(fp);
-
 	return tgid;
 }
 
-static int perf_event__synthesize_mmap_events(union perf_event *event,
+static int perf_event__synthesize_mmap_events(struct perf_tool *tool,
+					      union perf_event *event,
 					      pid_t pid, pid_t tgid,
 					      perf_event__handler_t process,
-					      struct perf_session *session)
+					      struct machine *machine)
 {
 	char filename[PATH_MAX];
 	FILE *fp;
@@ -193,12 +225,12 @@
 			event->mmap.len -= event->mmap.start;
 			event->mmap.header.size = (sizeof(event->mmap) -
 					        (sizeof(event->mmap.filename) - size));
-			memset(event->mmap.filename + size, 0, session->id_hdr_size);
-			event->mmap.header.size += session->id_hdr_size;
+			memset(event->mmap.filename + size, 0, machine->id_hdr_size);
+			event->mmap.header.size += machine->id_hdr_size;
 			event->mmap.pid = tgid;
 			event->mmap.tid = pid;
 
-			process(event, &synth_sample, session);
+			process(tool, event, &synth_sample, machine);
 		}
 	}
 
@@ -206,14 +238,14 @@
 	return 0;
 }
 
-int perf_event__synthesize_modules(perf_event__handler_t process,
-				   struct perf_session *session,
+int perf_event__synthesize_modules(struct perf_tool *tool,
+				   perf_event__handler_t process,
 				   struct machine *machine)
 {
 	struct rb_node *nd;
 	struct map_groups *kmaps = &machine->kmaps;
 	union perf_event *event = zalloc((sizeof(event->mmap) +
-					  session->id_hdr_size));
+					  machine->id_hdr_size));
 	if (event == NULL) {
 		pr_debug("Not enough memory synthesizing mmap event "
 			 "for kernel modules\n");
@@ -243,15 +275,15 @@
 		event->mmap.header.type = PERF_RECORD_MMAP;
 		event->mmap.header.size = (sizeof(event->mmap) -
 				        (sizeof(event->mmap.filename) - size));
-		memset(event->mmap.filename + size, 0, session->id_hdr_size);
-		event->mmap.header.size += session->id_hdr_size;
+		memset(event->mmap.filename + size, 0, machine->id_hdr_size);
+		event->mmap.header.size += machine->id_hdr_size;
 		event->mmap.start = pos->start;
 		event->mmap.len   = pos->end - pos->start;
 		event->mmap.pid   = machine->pid;
 
 		memcpy(event->mmap.filename, pos->dso->long_name,
 		       pos->dso->long_name_len + 1);
-		process(event, &synth_sample, session);
+		process(tool, event, &synth_sample, machine);
 	}
 
 	free(event);
@@ -260,40 +292,69 @@
 
 static int __event__synthesize_thread(union perf_event *comm_event,
 				      union perf_event *mmap_event,
-				      pid_t pid, perf_event__handler_t process,
-				      struct perf_session *session)
+				      pid_t pid, int full,
+					  perf_event__handler_t process,
+				      struct perf_tool *tool,
+				      struct machine *machine)
 {
-	pid_t tgid = perf_event__synthesize_comm(comm_event, pid, 1, process,
-					    session);
+	pid_t tgid = perf_event__synthesize_comm(tool, comm_event, pid, full,
+						 process, machine);
 	if (tgid == -1)
 		return -1;
-	return perf_event__synthesize_mmap_events(mmap_event, pid, tgid,
-					     process, session);
+	return perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
+						  process, machine);
 }
 
-int perf_event__synthesize_thread_map(struct thread_map *threads,
+int perf_event__synthesize_thread_map(struct perf_tool *tool,
+				      struct thread_map *threads,
 				      perf_event__handler_t process,
-				      struct perf_session *session)
+				      struct machine *machine)
 {
 	union perf_event *comm_event, *mmap_event;
-	int err = -1, thread;
+	int err = -1, thread, j;
 
-	comm_event = malloc(sizeof(comm_event->comm) + session->id_hdr_size);
+	comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
 	if (comm_event == NULL)
 		goto out;
 
-	mmap_event = malloc(sizeof(mmap_event->mmap) + session->id_hdr_size);
+	mmap_event = malloc(sizeof(mmap_event->mmap) + machine->id_hdr_size);
 	if (mmap_event == NULL)
 		goto out_free_comm;
 
 	err = 0;
 	for (thread = 0; thread < threads->nr; ++thread) {
 		if (__event__synthesize_thread(comm_event, mmap_event,
-					       threads->map[thread],
-					       process, session)) {
+					       threads->map[thread], 0,
+					       process, tool, machine)) {
 			err = -1;
 			break;
 		}
+
+		/*
+		 * comm.pid is set to thread group id by
+		 * perf_event__synthesize_comm
+		 */
+		if ((int) comm_event->comm.pid != threads->map[thread]) {
+			bool need_leader = true;
+
+			/* is thread group leader in thread_map? */
+			for (j = 0; j < threads->nr; ++j) {
+				if ((int) comm_event->comm.pid == threads->map[j]) {
+					need_leader = false;
+					break;
+				}
+			}
+
+			/* if not, generate events for it */
+			if (need_leader &&
+			    __event__synthesize_thread(comm_event,
+						      mmap_event,
+						      comm_event->comm.pid, 0,
+						      process, tool, machine)) {
+				err = -1;
+				break;
+			}
+		}
 	}
 	free(mmap_event);
 out_free_comm:
@@ -302,19 +363,20 @@
 	return err;
 }
 
-int perf_event__synthesize_threads(perf_event__handler_t process,
-				   struct perf_session *session)
+int perf_event__synthesize_threads(struct perf_tool *tool,
+				   perf_event__handler_t process,
+				   struct machine *machine)
 {
 	DIR *proc;
 	struct dirent dirent, *next;
 	union perf_event *comm_event, *mmap_event;
 	int err = -1;
 
-	comm_event = malloc(sizeof(comm_event->comm) + session->id_hdr_size);
+	comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
 	if (comm_event == NULL)
 		goto out;
 
-	mmap_event = malloc(sizeof(mmap_event->mmap) + session->id_hdr_size);
+	mmap_event = malloc(sizeof(mmap_event->mmap) + machine->id_hdr_size);
 	if (mmap_event == NULL)
 		goto out_free_comm;
 
@@ -329,8 +391,8 @@
 		if (*end) /* only interested in proper numerical dirents */
 			continue;
 
-		__event__synthesize_thread(comm_event, mmap_event, pid,
-					   process, session);
+		__event__synthesize_thread(comm_event, mmap_event, pid, 1,
+					   process, tool, machine);
 	}
 
 	closedir(proc);
@@ -365,8 +427,8 @@
 	return 1;
 }
 
-int perf_event__synthesize_kernel_mmap(perf_event__handler_t process,
-				       struct perf_session *session,
+int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
+				       perf_event__handler_t process,
 				       struct machine *machine,
 				       const char *symbol_name)
 {
@@ -383,7 +445,7 @@
 	 */
 	struct process_symbol_args args = { .name = symbol_name, };
 	union perf_event *event = zalloc((sizeof(event->mmap) +
-					  session->id_hdr_size));
+					  machine->id_hdr_size));
 	if (event == NULL) {
 		pr_debug("Not enough memory synthesizing mmap event "
 			 "for kernel modules\n");
@@ -417,25 +479,32 @@
 	size = ALIGN(size, sizeof(u64));
 	event->mmap.header.type = PERF_RECORD_MMAP;
 	event->mmap.header.size = (sizeof(event->mmap) -
-			(sizeof(event->mmap.filename) - size) + session->id_hdr_size);
+			(sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
 	event->mmap.pgoff = args.start;
 	event->mmap.start = map->start;
 	event->mmap.len   = map->end - event->mmap.start;
 	event->mmap.pid   = machine->pid;
 
-	err = process(event, &synth_sample, session);
+	err = process(tool, event, &synth_sample, machine);
 	free(event);
 
 	return err;
 }
 
-int perf_event__process_comm(union perf_event *event,
-			     struct perf_sample *sample __used,
-			     struct perf_session *session)
+size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp)
 {
-	struct thread *thread = perf_session__findnew(session, event->comm.tid);
+	return fprintf(fp, ": %s:%d\n", event->comm.comm, event->comm.tid);
+}
 
-	dump_printf(": %s:%d\n", event->comm.comm, event->comm.tid);
+int perf_event__process_comm(struct perf_tool *tool __used,
+			     union perf_event *event,
+			     struct perf_sample *sample __used,
+			     struct machine *machine)
+{
+	struct thread *thread = machine__findnew_thread(machine, event->comm.tid);
+
+	if (dump_trace)
+		perf_event__fprintf_comm(event, stdout);
 
 	if (thread == NULL || thread__set_comm(thread, event->comm.comm)) {
 		dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
@@ -445,13 +514,13 @@
 	return 0;
 }
 
-int perf_event__process_lost(union perf_event *event,
+int perf_event__process_lost(struct perf_tool *tool __used,
+			     union perf_event *event,
 			     struct perf_sample *sample __used,
-			     struct perf_session *session)
+			     struct machine *machine __used)
 {
 	dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n",
 		    event->lost.id, event->lost.lost);
-	session->hists.stats.total_lost += event->lost.lost;
 	return 0;
 }
 
@@ -468,21 +537,15 @@
 		maps[MAP__FUNCTION]->end = ~0ULL;
 }
 
-static int perf_event__process_kernel_mmap(union perf_event *event,
-					   struct perf_session *session)
+static int perf_event__process_kernel_mmap(struct perf_tool *tool __used,
+					   union perf_event *event,
+					   struct machine *machine)
 {
 	struct map *map;
 	char kmmap_prefix[PATH_MAX];
-	struct machine *machine;
 	enum dso_kernel_type kernel_type;
 	bool is_kernel_mmap;
 
-	machine = perf_session__findnew_machine(session, event->mmap.pid);
-	if (!machine) {
-		pr_err("Can't find id %d's machine\n", event->mmap.pid);
-		goto out_problem;
-	}
-
 	machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix));
 	if (machine__is_host(machine))
 		kernel_type = DSO_TYPE_KERNEL;
@@ -549,9 +612,9 @@
 		 * time /proc/sys/kernel/kptr_restrict was non zero.
 		 */
 		if (event->mmap.pgoff != 0) {
-			perf_session__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps,
-								 symbol_name,
-								 event->mmap.pgoff);
+			maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps,
+							 symbol_name,
+							 event->mmap.pgoff);
 		}
 
 		if (machine__is_default_guest(machine)) {
@@ -567,32 +630,35 @@
 	return -1;
 }
 
-int perf_event__process_mmap(union perf_event *event,
-			     struct perf_sample *sample __used,
-			     struct perf_session *session)
+size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp)
 {
-	struct machine *machine;
+	return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %s\n",
+		       event->mmap.pid, event->mmap.tid, event->mmap.start,
+		       event->mmap.len, event->mmap.pgoff, event->mmap.filename);
+}
+
+int perf_event__process_mmap(struct perf_tool *tool,
+			     union perf_event *event,
+			     struct perf_sample *sample __used,
+			     struct machine *machine)
+{
 	struct thread *thread;
 	struct map *map;
 	u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
 	int ret = 0;
 
-	dump_printf(" %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %s\n",
-			event->mmap.pid, event->mmap.tid, event->mmap.start,
-			event->mmap.len, event->mmap.pgoff, event->mmap.filename);
+	if (dump_trace)
+		perf_event__fprintf_mmap(event, stdout);
 
 	if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
 	    cpumode == PERF_RECORD_MISC_KERNEL) {
-		ret = perf_event__process_kernel_mmap(event, session);
+		ret = perf_event__process_kernel_mmap(tool, event, machine);
 		if (ret < 0)
 			goto out_problem;
 		return 0;
 	}
 
-	machine = perf_session__find_host_machine(session);
-	if (machine == NULL)
-		goto out_problem;
-	thread = perf_session__findnew(session, event->mmap.pid);
+	thread = machine__findnew_thread(machine, event->mmap.pid);
 	if (thread == NULL)
 		goto out_problem;
 	map = map__new(&machine->user_dsos, event->mmap.start,
@@ -610,18 +676,26 @@
 	return 0;
 }
 
-int perf_event__process_task(union perf_event *event,
-			     struct perf_sample *sample __used,
-			     struct perf_session *session)
+size_t perf_event__fprintf_task(union perf_event *event, FILE *fp)
 {
-	struct thread *thread = perf_session__findnew(session, event->fork.tid);
-	struct thread *parent = perf_session__findnew(session, event->fork.ptid);
+	return fprintf(fp, "(%d:%d):(%d:%d)\n",
+		       event->fork.pid, event->fork.tid,
+		       event->fork.ppid, event->fork.ptid);
+}
 
-	dump_printf("(%d:%d):(%d:%d)\n", event->fork.pid, event->fork.tid,
-		    event->fork.ppid, event->fork.ptid);
+int perf_event__process_task(struct perf_tool *tool __used,
+			     union perf_event *event,
+			     struct perf_sample *sample __used,
+			      struct machine *machine)
+{
+	struct thread *thread = machine__findnew_thread(machine, event->fork.tid);
+	struct thread *parent = machine__findnew_thread(machine, event->fork.ptid);
+
+	if (dump_trace)
+		perf_event__fprintf_task(event, stdout);
 
 	if (event->header.type == PERF_RECORD_EXIT) {
-		perf_session__remove_thread(session, thread);
+		machine__remove_thread(machine, thread);
 		return 0;
 	}
 
@@ -634,22 +708,45 @@
 	return 0;
 }
 
-int perf_event__process(union perf_event *event, struct perf_sample *sample,
-			struct perf_session *session)
+size_t perf_event__fprintf(union perf_event *event, FILE *fp)
 {
+	size_t ret = fprintf(fp, "PERF_RECORD_%s",
+			     perf_event__name(event->header.type));
+
 	switch (event->header.type) {
 	case PERF_RECORD_COMM:
-		perf_event__process_comm(event, sample, session);
-		break;
-	case PERF_RECORD_MMAP:
-		perf_event__process_mmap(event, sample, session);
+		ret += perf_event__fprintf_comm(event, fp);
 		break;
 	case PERF_RECORD_FORK:
 	case PERF_RECORD_EXIT:
-		perf_event__process_task(event, sample, session);
+		ret += perf_event__fprintf_task(event, fp);
+		break;
+	case PERF_RECORD_MMAP:
+		ret += perf_event__fprintf_mmap(event, fp);
+		break;
+	default:
+		ret += fprintf(fp, "\n");
+	}
+
+	return ret;
+}
+
+int perf_event__process(struct perf_tool *tool, union perf_event *event,
+			struct perf_sample *sample, struct machine *machine)
+{
+	switch (event->header.type) {
+	case PERF_RECORD_COMM:
+		perf_event__process_comm(tool, event, sample, machine);
+		break;
+	case PERF_RECORD_MMAP:
+		perf_event__process_mmap(tool, event, sample, machine);
+		break;
+	case PERF_RECORD_FORK:
+	case PERF_RECORD_EXIT:
+		perf_event__process_task(tool, event, sample, machine);
 		break;
 	case PERF_RECORD_LOST:
-		perf_event__process_lost(event, sample, session);
+		perf_event__process_lost(tool, event, sample, machine);
 	default:
 		break;
 	}
@@ -658,36 +755,29 @@
 }
 
 void thread__find_addr_map(struct thread *self,
-			   struct perf_session *session, u8 cpumode,
-			   enum map_type type, pid_t pid, u64 addr,
+			   struct machine *machine, u8 cpumode,
+			   enum map_type type, u64 addr,
 			   struct addr_location *al)
 {
 	struct map_groups *mg = &self->mg;
-	struct machine *machine = NULL;
 
 	al->thread = self;
 	al->addr = addr;
 	al->cpumode = cpumode;
 	al->filtered = false;
 
+	if (machine == NULL) {
+		al->map = NULL;
+		return;
+	}
+
 	if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
 		al->level = 'k';
-		machine = perf_session__find_host_machine(session);
-		if (machine == NULL) {
-			al->map = NULL;
-			return;
-		}
 		mg = &machine->kmaps;
 	} else if (cpumode == PERF_RECORD_MISC_USER && perf_host) {
 		al->level = '.';
-		machine = perf_session__find_host_machine(session);
 	} else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
 		al->level = 'g';
-		machine = perf_session__find_machine(session, pid);
-		if (machine == NULL) {
-			al->map = NULL;
-			return;
-		}
 		mg = &machine->kmaps;
 	} else {
 		/*
@@ -733,13 +823,12 @@
 		al->addr = al->map->map_ip(al->map, al->addr);
 }
 
-void thread__find_addr_location(struct thread *self,
-				struct perf_session *session, u8 cpumode,
-				enum map_type type, pid_t pid, u64 addr,
+void thread__find_addr_location(struct thread *thread, struct machine *machine,
+				u8 cpumode, enum map_type type, u64 addr,
 				struct addr_location *al,
 				symbol_filter_t filter)
 {
-	thread__find_addr_map(self, session, cpumode, type, pid, addr, al);
+	thread__find_addr_map(thread, machine, cpumode, type, addr, al);
 	if (al->map != NULL)
 		al->sym = map__find_symbol(al->map, al->addr, filter);
 	else
@@ -747,13 +836,13 @@
 }
 
 int perf_event__preprocess_sample(const union perf_event *event,
-				  struct perf_session *session,
+				  struct machine *machine,
 				  struct addr_location *al,
 				  struct perf_sample *sample,
 				  symbol_filter_t filter)
 {
 	u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
-	struct thread *thread = perf_session__findnew(session, event->ip.pid);
+	struct thread *thread = machine__findnew_thread(machine, event->ip.pid);
 
 	if (thread == NULL)
 		return -1;
@@ -764,18 +853,18 @@
 
 	dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
 	/*
-	 * Have we already created the kernel maps for the host machine?
+	 * Have we already created the kernel maps for this machine?
 	 *
 	 * This should have happened earlier, when we processed the kernel MMAP
 	 * events, but for older perf.data files there was no such thing, so do
 	 * it now.
 	 */
 	if (cpumode == PERF_RECORD_MISC_KERNEL &&
-	    session->host_machine.vmlinux_maps[MAP__FUNCTION] == NULL)
-		machine__create_kernel_maps(&session->host_machine);
+	    machine->vmlinux_maps[MAP__FUNCTION] == NULL)
+		machine__create_kernel_maps(machine);
 
-	thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION,
-			      event->ip.pid, event->ip.ip, al);
+	thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION,
+			      event->ip.ip, al);
 	dump_printf(" ...... dso: %s\n",
 		    al->map ? al->map->dso->long_name :
 			al->level == 'H' ? "[hypervisor]" : "<not found>");
@@ -783,13 +872,14 @@
 	al->cpu = sample->cpu;
 
 	if (al->map) {
+		struct dso *dso = al->map->dso;
+
 		if (symbol_conf.dso_list &&
-		    (!al->map || !al->map->dso ||
-		     !(strlist__has_entry(symbol_conf.dso_list,
-					  al->map->dso->short_name) ||
-		       (al->map->dso->short_name != al->map->dso->long_name &&
-			strlist__has_entry(symbol_conf.dso_list,
-					   al->map->dso->long_name)))))
+		    (!dso || !(strlist__has_entry(symbol_conf.dso_list,
+						  dso->short_name) ||
+			       (dso->short_name != dso->long_name &&
+				strlist__has_entry(symbol_conf.dso_list,
+						   dso->long_name)))))
 			goto out_filtered;
 
 		al->sym = map__find_symbol(al->map, al->addr, filter);
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index 357a85b..cbdeaad 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -2,6 +2,7 @@
 #define __PERF_RECORD_H
 
 #include <limits.h>
+#include <stdio.h>
 
 #include "../perf.h"
 #include "map.h"
@@ -141,43 +142,54 @@
 
 void perf_event__print_totals(void);
 
-struct perf_session;
+struct perf_tool;
 struct thread_map;
 
-typedef int (*perf_event__handler_synth_t)(union perf_event *event, 
-					   struct perf_session *session);
-typedef int (*perf_event__handler_t)(union perf_event *event,
+typedef int (*perf_event__handler_t)(struct perf_tool *tool,
+				     union perf_event *event,
 				     struct perf_sample *sample,
-				      struct perf_session *session);
+				     struct machine *machine);
 
-int perf_event__synthesize_thread_map(struct thread_map *threads,
+int perf_event__synthesize_thread_map(struct perf_tool *tool,
+				      struct thread_map *threads,
 				      perf_event__handler_t process,
-				      struct perf_session *session);
-int perf_event__synthesize_threads(perf_event__handler_t process,
-				   struct perf_session *session);
-int perf_event__synthesize_kernel_mmap(perf_event__handler_t process,
-				       struct perf_session *session,
+				      struct machine *machine);
+int perf_event__synthesize_threads(struct perf_tool *tool,
+				   perf_event__handler_t process,
+				   struct machine *machine);
+int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
+				       perf_event__handler_t process,
 				       struct machine *machine,
 				       const char *symbol_name);
 
-int perf_event__synthesize_modules(perf_event__handler_t process,
-				   struct perf_session *session,
+int perf_event__synthesize_modules(struct perf_tool *tool,
+				   perf_event__handler_t process,
 				   struct machine *machine);
 
-int perf_event__process_comm(union perf_event *event, struct perf_sample *sample,
-			     struct perf_session *session);
-int perf_event__process_lost(union perf_event *event, struct perf_sample *sample,
-			     struct perf_session *session);
-int perf_event__process_mmap(union perf_event *event, struct perf_sample *sample,
-			     struct perf_session *session);
-int perf_event__process_task(union perf_event *event, struct perf_sample *sample,
-			     struct perf_session *session);
-int perf_event__process(union perf_event *event, struct perf_sample *sample,
-			struct perf_session *session);
+int perf_event__process_comm(struct perf_tool *tool,
+			     union perf_event *event,
+			     struct perf_sample *sample,
+			     struct machine *machine);
+int perf_event__process_lost(struct perf_tool *tool,
+			     union perf_event *event,
+			     struct perf_sample *sample,
+			     struct machine *machine);
+int perf_event__process_mmap(struct perf_tool *tool,
+			     union perf_event *event,
+			     struct perf_sample *sample,
+			     struct machine *machine);
+int perf_event__process_task(struct perf_tool *tool,
+			     union perf_event *event,
+			     struct perf_sample *sample,
+			     struct machine *machine);
+int perf_event__process(struct perf_tool *tool,
+			union perf_event *event,
+			struct perf_sample *sample,
+			struct machine *machine);
 
 struct addr_location;
 int perf_event__preprocess_sample(const union perf_event *self,
-				  struct perf_session *session,
+				  struct machine *machine,
 				  struct addr_location *al,
 				  struct perf_sample *sample,
 				  symbol_filter_t filter);
@@ -187,5 +199,13 @@
 int perf_event__parse_sample(const union perf_event *event, u64 type,
 			     int sample_size, bool sample_id_all,
 			     struct perf_sample *sample, bool swapped);
+int perf_event__synthesize_sample(union perf_event *event, u64 type,
+				  const struct perf_sample *sample,
+				  bool swapped);
+
+size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp);
+size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp);
+size_t perf_event__fprintf_task(union perf_event *event, FILE *fp);
+size_t perf_event__fprintf(union perf_event *event, FILE *fp);
 
 #endif /* __PERF_RECORD_H */
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index fbb4b4a..fa18370 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -6,12 +6,16 @@
  *
  * Released under the GPL v2. (and only v2, not any later version)
  */
+#include "util.h"
+#include "debugfs.h"
 #include <poll.h>
 #include "cpumap.h"
 #include "thread_map.h"
 #include "evlist.h"
 #include "evsel.h"
-#include "util.h"
+#include <unistd.h>
+
+#include "parse-events.h"
 
 #include <sys/mman.h>
 
@@ -30,6 +34,7 @@
 		INIT_HLIST_HEAD(&evlist->heads[i]);
 	INIT_LIST_HEAD(&evlist->entries);
 	perf_evlist__set_maps(evlist, cpus, threads);
+	evlist->workload.pid = -1;
 }
 
 struct perf_evlist *perf_evlist__new(struct cpu_map *cpus,
@@ -43,6 +48,22 @@
 	return evlist;
 }
 
+void perf_evlist__config_attrs(struct perf_evlist *evlist,
+			       struct perf_record_opts *opts)
+{
+	struct perf_evsel *evsel;
+
+	if (evlist->cpus->map[0] < 0)
+		opts->no_inherit = true;
+
+	list_for_each_entry(evsel, &evlist->entries, node) {
+		perf_evsel__config(evsel, opts);
+
+		if (evlist->nr_entries > 1)
+			evsel->attr.sample_type |= PERF_SAMPLE_ID;
+	}
+}
+
 static void perf_evlist__purge(struct perf_evlist *evlist)
 {
 	struct perf_evsel *pos, *n;
@@ -76,6 +97,14 @@
 	++evlist->nr_entries;
 }
 
+static void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
+					  struct list_head *list,
+					  int nr_entries)
+{
+	list_splice_tail(list, &evlist->entries);
+	evlist->nr_entries += nr_entries;
+}
+
 int perf_evlist__add_default(struct perf_evlist *evlist)
 {
 	struct perf_event_attr attr = {
@@ -100,6 +129,126 @@
 	return -ENOMEM;
 }
 
+int perf_evlist__add_attrs(struct perf_evlist *evlist,
+			   struct perf_event_attr *attrs, size_t nr_attrs)
+{
+	struct perf_evsel *evsel, *n;
+	LIST_HEAD(head);
+	size_t i;
+
+	for (i = 0; i < nr_attrs; i++) {
+		evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i);
+		if (evsel == NULL)
+			goto out_delete_partial_list;
+		list_add_tail(&evsel->node, &head);
+	}
+
+	perf_evlist__splice_list_tail(evlist, &head, nr_attrs);
+
+	return 0;
+
+out_delete_partial_list:
+	list_for_each_entry_safe(evsel, n, &head, node)
+		perf_evsel__delete(evsel);
+	return -1;
+}
+
+static int trace_event__id(const char *evname)
+{
+	char *filename, *colon;
+	int err = -1, fd;
+
+	if (asprintf(&filename, "%s/%s/id", tracing_events_path, evname) < 0)
+		return -1;
+
+	colon = strrchr(filename, ':');
+	if (colon != NULL)
+		*colon = '/';
+
+	fd = open(filename, O_RDONLY);
+	if (fd >= 0) {
+		char id[16];
+		if (read(fd, id, sizeof(id)) > 0)
+			err = atoi(id);
+		close(fd);
+	}
+
+	free(filename);
+	return err;
+}
+
+int perf_evlist__add_tracepoints(struct perf_evlist *evlist,
+				 const char *tracepoints[],
+				 size_t nr_tracepoints)
+{
+	int err;
+	size_t i;
+	struct perf_event_attr *attrs = zalloc(nr_tracepoints * sizeof(*attrs));
+
+	if (attrs == NULL)
+		return -1;
+
+	for (i = 0; i < nr_tracepoints; i++) {
+		err = trace_event__id(tracepoints[i]);
+
+		if (err < 0)
+			goto out_free_attrs;
+
+		attrs[i].type	       = PERF_TYPE_TRACEPOINT;
+		attrs[i].config	       = err;
+	        attrs[i].sample_type   = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
+					  PERF_SAMPLE_CPU);
+		attrs[i].sample_period = 1;
+	}
+
+	err = perf_evlist__add_attrs(evlist, attrs, nr_tracepoints);
+out_free_attrs:
+	free(attrs);
+	return err;
+}
+
+static struct perf_evsel *
+	perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
+{
+	struct perf_evsel *evsel;
+
+	list_for_each_entry(evsel, &evlist->entries, node) {
+		if (evsel->attr.type   == PERF_TYPE_TRACEPOINT &&
+		    (int)evsel->attr.config == id)
+			return evsel;
+	}
+
+	return NULL;
+}
+
+int perf_evlist__set_tracepoints_handlers(struct perf_evlist *evlist,
+					  const struct perf_evsel_str_handler *assocs,
+					  size_t nr_assocs)
+{
+	struct perf_evsel *evsel;
+	int err;
+	size_t i;
+
+	for (i = 0; i < nr_assocs; i++) {
+		err = trace_event__id(assocs[i].name);
+		if (err < 0)
+			goto out;
+
+		evsel = perf_evlist__find_tracepoint_by_id(evlist, err);
+		if (evsel == NULL)
+			continue;
+
+		err = -EEXIST;
+		if (evsel->handler.func != NULL)
+			goto out;
+		evsel->handler.func = assocs[i].handler;
+	}
+
+	err = 0;
+out:
+	return err;
+}
+
 void perf_evlist__disable(struct perf_evlist *evlist)
 {
 	int cpu, thread;
@@ -126,7 +275,7 @@
 	}
 }
 
-int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
+static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
 {
 	int nfds = evlist->cpus->nr * evlist->threads->nr * evlist->nr_entries;
 	evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
@@ -282,7 +431,7 @@
 	evlist->mmap = NULL;
 }
 
-int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
+static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
 {
 	evlist->nr_mmaps = evlist->cpus->nr;
 	if (evlist->cpus->map[0] == -1)
@@ -298,8 +447,10 @@
 	evlist->mmap[idx].mask = mask;
 	evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
 				      MAP_SHARED, fd, 0);
-	if (evlist->mmap[idx].base == MAP_FAILED)
+	if (evlist->mmap[idx].base == MAP_FAILED) {
+		evlist->mmap[idx].base = NULL;
 		return -1;
+	}
 
 	perf_evlist__add_pollfd(evlist, fd);
 	return 0;
@@ -400,14 +551,22 @@
  *
  * Using perf_evlist__read_on_cpu does this automatically.
  */
-int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite)
+int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
+		      bool overwrite)
 {
 	unsigned int page_size = sysconf(_SC_PAGE_SIZE);
-	int mask = pages * page_size - 1;
 	struct perf_evsel *evsel;
 	const struct cpu_map *cpus = evlist->cpus;
 	const struct thread_map *threads = evlist->threads;
-	int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE);
+	int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask;
+
+        /* 512 kiB: default amount of unprivileged mlocked memory */
+        if (pages == UINT_MAX)
+                pages = (512 * 1024) / page_size;
+	else if (!is_power_of_2(pages))
+		return -EINVAL;
+
+	mask = pages * page_size - 1;
 
 	if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
 		return -ENOMEM;
@@ -512,6 +671,38 @@
 	return first->attr.sample_type;
 }
 
+u16 perf_evlist__id_hdr_size(const struct perf_evlist *evlist)
+{
+	struct perf_evsel *first;
+	struct perf_sample *data;
+	u64 sample_type;
+	u16 size = 0;
+
+	first = list_entry(evlist->entries.next, struct perf_evsel, node);
+
+	if (!first->attr.sample_id_all)
+		goto out;
+
+	sample_type = first->attr.sample_type;
+
+	if (sample_type & PERF_SAMPLE_TID)
+		size += sizeof(data->tid) * 2;
+
+       if (sample_type & PERF_SAMPLE_TIME)
+		size += sizeof(data->time);
+
+	if (sample_type & PERF_SAMPLE_ID)
+		size += sizeof(data->id);
+
+	if (sample_type & PERF_SAMPLE_STREAM_ID)
+		size += sizeof(data->stream_id);
+
+	if (sample_type & PERF_SAMPLE_CPU)
+		size += sizeof(data->cpu) * 2;
+out:
+	return size;
+}
+
 bool perf_evlist__valid_sample_id_all(const struct perf_evlist *evlist)
 {
 	struct perf_evsel *pos, *first;
@@ -569,3 +760,97 @@
 
 	return err;
 }
+
+int perf_evlist__prepare_workload(struct perf_evlist *evlist,
+				  struct perf_record_opts *opts,
+				  const char *argv[])
+{
+	int child_ready_pipe[2], go_pipe[2];
+	char bf;
+
+	if (pipe(child_ready_pipe) < 0) {
+		perror("failed to create 'ready' pipe");
+		return -1;
+	}
+
+	if (pipe(go_pipe) < 0) {
+		perror("failed to create 'go' pipe");
+		goto out_close_ready_pipe;
+	}
+
+	evlist->workload.pid = fork();
+	if (evlist->workload.pid < 0) {
+		perror("failed to fork");
+		goto out_close_pipes;
+	}
+
+	if (!evlist->workload.pid) {
+		if (opts->pipe_output)
+			dup2(2, 1);
+
+		close(child_ready_pipe[0]);
+		close(go_pipe[1]);
+		fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
+
+		/*
+		 * Do a dummy execvp to get the PLT entry resolved,
+		 * so we avoid the resolver overhead on the real
+		 * execvp call.
+		 */
+		execvp("", (char **)argv);
+
+		/*
+		 * Tell the parent we're ready to go
+		 */
+		close(child_ready_pipe[1]);
+
+		/*
+		 * Wait until the parent tells us to go.
+		 */
+		if (read(go_pipe[0], &bf, 1) == -1)
+			perror("unable to read pipe");
+
+		execvp(argv[0], (char **)argv);
+
+		perror(argv[0]);
+		kill(getppid(), SIGUSR1);
+		exit(-1);
+	}
+
+	if (!opts->system_wide && opts->target_tid == -1 && opts->target_pid == -1)
+		evlist->threads->map[0] = evlist->workload.pid;
+
+	close(child_ready_pipe[1]);
+	close(go_pipe[0]);
+	/*
+	 * wait for child to settle
+	 */
+	if (read(child_ready_pipe[0], &bf, 1) == -1) {
+		perror("unable to read pipe");
+		goto out_close_pipes;
+	}
+
+	evlist->workload.cork_fd = go_pipe[1];
+	close(child_ready_pipe[0]);
+	return 0;
+
+out_close_pipes:
+	close(go_pipe[0]);
+	close(go_pipe[1]);
+out_close_ready_pipe:
+	close(child_ready_pipe[0]);
+	close(child_ready_pipe[1]);
+	return -1;
+}
+
+int perf_evlist__start_workload(struct perf_evlist *evlist)
+{
+	if (evlist->workload.cork_fd > 0) {
+		/*
+		 * Remove the cork, let it rip!
+		 */
+		return close(evlist->workload.cork_fd);
+	}
+
+	return 0;
+}
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 1779ffe..8922aee 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -2,12 +2,16 @@
 #define __PERF_EVLIST_H 1
 
 #include <linux/list.h>
+#include <stdio.h>
 #include "../perf.h"
 #include "event.h"
+#include "util.h"
+#include <unistd.h>
 
 struct pollfd;
 struct thread_map;
 struct cpu_map;
+struct perf_record_opts;
 
 #define PERF_EVLIST__HLIST_BITS 8
 #define PERF_EVLIST__HLIST_SIZE (1 << PERF_EVLIST__HLIST_BITS)
@@ -19,6 +23,10 @@
 	int		 nr_fds;
 	int		 nr_mmaps;
 	int		 mmap_len;
+	struct {
+		int	cork_fd;
+		pid_t	pid;
+	} workload;
 	bool		 overwrite;
 	union perf_event event_copy;
 	struct perf_mmap *mmap;
@@ -28,6 +36,11 @@
 	struct perf_evsel *selected;
 };
 
+struct perf_evsel_str_handler {
+	const char *name;
+	void	   *handler;
+};
+
 struct perf_evsel;
 
 struct perf_evlist *perf_evlist__new(struct cpu_map *cpus,
@@ -39,11 +52,26 @@
 
 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry);
 int perf_evlist__add_default(struct perf_evlist *evlist);
+int perf_evlist__add_attrs(struct perf_evlist *evlist,
+			   struct perf_event_attr *attrs, size_t nr_attrs);
+int perf_evlist__add_tracepoints(struct perf_evlist *evlist,
+				 const char *tracepoints[], size_t nr_tracepoints);
+int perf_evlist__set_tracepoints_handlers(struct perf_evlist *evlist,
+					  const struct perf_evsel_str_handler *assocs,
+					  size_t nr_assocs);
+
+#define perf_evlist__add_attrs_array(evlist, array) \
+	perf_evlist__add_attrs(evlist, array, ARRAY_SIZE(array))
+
+#define perf_evlist__add_tracepoints_array(evlist, array) \
+	perf_evlist__add_tracepoints(evlist, array, ARRAY_SIZE(array))
+
+#define perf_evlist__set_tracepoints_handlers_array(evlist, array) \
+	perf_evlist__set_tracepoints_handlers(evlist, array, ARRAY_SIZE(array))
 
 void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
 			 int cpu, int thread, u64 id);
 
-int perf_evlist__alloc_pollfd(struct perf_evlist *evlist);
 void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd);
 
 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id);
@@ -52,8 +80,16 @@
 
 int perf_evlist__open(struct perf_evlist *evlist, bool group);
 
-int perf_evlist__alloc_mmap(struct perf_evlist *evlist);
-int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite);
+void perf_evlist__config_attrs(struct perf_evlist *evlist,
+			       struct perf_record_opts *opts);
+
+int perf_evlist__prepare_workload(struct perf_evlist *evlist,
+				  struct perf_record_opts *opts,
+				  const char *argv[]);
+int perf_evlist__start_workload(struct perf_evlist *evlist);
+
+int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
+		      bool overwrite);
 void perf_evlist__munmap(struct perf_evlist *evlist);
 
 void perf_evlist__disable(struct perf_evlist *evlist);
@@ -77,6 +113,7 @@
 
 u64 perf_evlist__sample_type(const struct perf_evlist *evlist);
 bool perf_evlist__sample_id_all(const const struct perf_evlist *evlist);
+u16 perf_evlist__id_hdr_size(const struct perf_evlist *evlist);
 
 bool perf_evlist__valid_sample_type(const struct perf_evlist *evlist);
 bool perf_evlist__valid_sample_id_all(const struct perf_evlist *evlist);
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index d7915d4..667f3b7 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -63,6 +63,79 @@
 	return evsel;
 }
 
+void perf_evsel__config(struct perf_evsel *evsel, struct perf_record_opts *opts)
+{
+	struct perf_event_attr *attr = &evsel->attr;
+	int track = !evsel->idx; /* only the first counter needs these */
+
+	attr->sample_id_all = opts->sample_id_all_avail ? 1 : 0;
+	attr->inherit	    = !opts->no_inherit;
+	attr->read_format   = PERF_FORMAT_TOTAL_TIME_ENABLED |
+			      PERF_FORMAT_TOTAL_TIME_RUNNING |
+			      PERF_FORMAT_ID;
+
+	attr->sample_type  |= PERF_SAMPLE_IP | PERF_SAMPLE_TID;
+
+	/*
+	 * We default some events to a 1 default interval. But keep
+	 * it a weak assumption overridable by the user.
+	 */
+	if (!attr->sample_period || (opts->user_freq != UINT_MAX &&
+				     opts->user_interval != ULLONG_MAX)) {
+		if (opts->freq) {
+			attr->sample_type	|= PERF_SAMPLE_PERIOD;
+			attr->freq		= 1;
+			attr->sample_freq	= opts->freq;
+		} else {
+			attr->sample_period = opts->default_interval;
+		}
+	}
+
+	if (opts->no_samples)
+		attr->sample_freq = 0;
+
+	if (opts->inherit_stat)
+		attr->inherit_stat = 1;
+
+	if (opts->sample_address) {
+		attr->sample_type	|= PERF_SAMPLE_ADDR;
+		attr->mmap_data = track;
+	}
+
+	if (opts->call_graph)
+		attr->sample_type	|= PERF_SAMPLE_CALLCHAIN;
+
+	if (opts->system_wide)
+		attr->sample_type	|= PERF_SAMPLE_CPU;
+
+	if (opts->period)
+		attr->sample_type	|= PERF_SAMPLE_PERIOD;
+
+	if (opts->sample_id_all_avail &&
+	    (opts->sample_time || opts->system_wide ||
+	     !opts->no_inherit || opts->cpu_list))
+		attr->sample_type	|= PERF_SAMPLE_TIME;
+
+	if (opts->raw_samples) {
+		attr->sample_type	|= PERF_SAMPLE_TIME;
+		attr->sample_type	|= PERF_SAMPLE_RAW;
+		attr->sample_type	|= PERF_SAMPLE_CPU;
+	}
+
+	if (opts->no_delay) {
+		attr->watermark = 0;
+		attr->wakeup_events = 1;
+	}
+
+	attr->mmap = track;
+	attr->comm = track;
+
+	if (opts->target_pid == -1 && opts->target_tid == -1 && !opts->system_wide) {
+		attr->disabled = 1;
+		attr->enable_on_exec = 1;
+	}
+}
+
 int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
 {
 	int cpu, thread;
@@ -387,7 +460,7 @@
 		u32 val32[2];
 	} u;
 
-
+	memset(data, 0, sizeof(*data));
 	data->cpu = data->pid = data->tid = -1;
 	data->stream_id = data->id = data->time = -1ULL;
 
@@ -504,3 +577,82 @@
 
 	return 0;
 }
+
+int perf_event__synthesize_sample(union perf_event *event, u64 type,
+				  const struct perf_sample *sample,
+				  bool swapped)
+{
+	u64 *array;
+
+	/*
+	 * used for cross-endian analysis. See git commit 65014ab3
+	 * for why this goofiness is needed.
+	 */
+	union {
+		u64 val64;
+		u32 val32[2];
+	} u;
+
+	array = event->sample.array;
+
+	if (type & PERF_SAMPLE_IP) {
+		event->ip.ip = sample->ip;
+		array++;
+	}
+
+	if (type & PERF_SAMPLE_TID) {
+		u.val32[0] = sample->pid;
+		u.val32[1] = sample->tid;
+		if (swapped) {
+			/*
+			 * Inverse of what is done in perf_event__parse_sample
+			 */
+			u.val32[0] = bswap_32(u.val32[0]);
+			u.val32[1] = bswap_32(u.val32[1]);
+			u.val64 = bswap_64(u.val64);
+		}
+
+		*array = u.val64;
+		array++;
+	}
+
+	if (type & PERF_SAMPLE_TIME) {
+		*array = sample->time;
+		array++;
+	}
+
+	if (type & PERF_SAMPLE_ADDR) {
+		*array = sample->addr;
+		array++;
+	}
+
+	if (type & PERF_SAMPLE_ID) {
+		*array = sample->id;
+		array++;
+	}
+
+	if (type & PERF_SAMPLE_STREAM_ID) {
+		*array = sample->stream_id;
+		array++;
+	}
+
+	if (type & PERF_SAMPLE_CPU) {
+		u.val32[0] = sample->cpu;
+		if (swapped) {
+			/*
+			 * Inverse of what is done in perf_event__parse_sample
+			 */
+			u.val32[0] = bswap_32(u.val32[0]);
+			u.val64 = bswap_64(u.val64);
+		}
+		*array = u.val64;
+		array++;
+	}
+
+	if (type & PERF_SAMPLE_PERIOD) {
+		*array = sample->period;
+		array++;
+	}
+
+	return 0;
+}
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index b1d15e6..326b8e4 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -61,12 +61,17 @@
 		off_t		id_offset;
 	};
 	struct cgroup_sel	*cgrp;
+	struct {
+		void		*func;
+		void		*data;
+	} handler;
 	bool 			supported;
 };
 
 struct cpu_map;
 struct thread_map;
 struct perf_evlist;
+struct perf_record_opts;
 
 struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx);
 void perf_evsel__init(struct perf_evsel *evsel,
@@ -74,6 +79,9 @@
 void perf_evsel__exit(struct perf_evsel *evsel);
 void perf_evsel__delete(struct perf_evsel *evsel);
 
+void perf_evsel__config(struct perf_evsel *evsel,
+			struct perf_record_opts *opts);
+
 int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
 int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads);
 int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus);
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 33c17a2..3e7e0b0 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -8,6 +8,7 @@
 #include <stdlib.h>
 #include <linux/list.h>
 #include <linux/kernel.h>
+#include <linux/bitops.h>
 #include <sys/utsname.h>
 
 #include "evlist.h"
@@ -28,9 +29,6 @@
 static u32 header_argc;
 static const char **header_argv;
 
-static int dsos__write_buildid_table(struct perf_header *header, int fd);
-static int perf_session__cache_build_ids(struct perf_session *session);
-
 int perf_header__push_event(u64 id, const char *name)
 {
 	if (strlen(name) > MAX_EVENT_NAME)
@@ -187,6 +185,252 @@
 	return 0;
 }
 
+#define dsos__for_each_with_build_id(pos, head)	\
+	list_for_each_entry(pos, head, node)	\
+		if (!pos->has_build_id)		\
+			continue;		\
+		else
+
+static int __dsos__write_buildid_table(struct list_head *head, pid_t pid,
+				u16 misc, int fd)
+{
+	struct dso *pos;
+
+	dsos__for_each_with_build_id(pos, head) {
+		int err;
+		struct build_id_event b;
+		size_t len;
+
+		if (!pos->hit)
+			continue;
+		len = pos->long_name_len + 1;
+		len = ALIGN(len, NAME_ALIGN);
+		memset(&b, 0, sizeof(b));
+		memcpy(&b.build_id, pos->build_id, sizeof(pos->build_id));
+		b.pid = pid;
+		b.header.misc = misc;
+		b.header.size = sizeof(b) + len;
+		err = do_write(fd, &b, sizeof(b));
+		if (err < 0)
+			return err;
+		err = write_padded(fd, pos->long_name,
+				   pos->long_name_len + 1, len);
+		if (err < 0)
+			return err;
+	}
+
+	return 0;
+}
+
+static int machine__write_buildid_table(struct machine *machine, int fd)
+{
+	int err;
+	u16 kmisc = PERF_RECORD_MISC_KERNEL,
+	    umisc = PERF_RECORD_MISC_USER;
+
+	if (!machine__is_host(machine)) {
+		kmisc = PERF_RECORD_MISC_GUEST_KERNEL;
+		umisc = PERF_RECORD_MISC_GUEST_USER;
+	}
+
+	err = __dsos__write_buildid_table(&machine->kernel_dsos, machine->pid,
+					  kmisc, fd);
+	if (err == 0)
+		err = __dsos__write_buildid_table(&machine->user_dsos,
+						  machine->pid, umisc, fd);
+	return err;
+}
+
+static int dsos__write_buildid_table(struct perf_header *header, int fd)
+{
+	struct perf_session *session = container_of(header,
+			struct perf_session, header);
+	struct rb_node *nd;
+	int err = machine__write_buildid_table(&session->host_machine, fd);
+
+	if (err)
+		return err;
+
+	for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
+		struct machine *pos = rb_entry(nd, struct machine, rb_node);
+		err = machine__write_buildid_table(pos, fd);
+		if (err)
+			break;
+	}
+	return err;
+}
+
+int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
+			  const char *name, bool is_kallsyms)
+{
+	const size_t size = PATH_MAX;
+	char *realname, *filename = zalloc(size),
+	     *linkname = zalloc(size), *targetname;
+	int len, err = -1;
+
+	if (is_kallsyms) {
+		if (symbol_conf.kptr_restrict) {
+			pr_debug("Not caching a kptr_restrict'ed /proc/kallsyms\n");
+			return 0;
+		}
+		realname = (char *)name;
+	} else
+		realname = realpath(name, NULL);
+
+	if (realname == NULL || filename == NULL || linkname == NULL)
+		goto out_free;
+
+	len = snprintf(filename, size, "%s%s%s",
+		       debugdir, is_kallsyms ? "/" : "", realname);
+	if (mkdir_p(filename, 0755))
+		goto out_free;
+
+	snprintf(filename + len, sizeof(filename) - len, "/%s", sbuild_id);
+
+	if (access(filename, F_OK)) {
+		if (is_kallsyms) {
+			 if (copyfile("/proc/kallsyms", filename))
+				goto out_free;
+		} else if (link(realname, filename) && copyfile(name, filename))
+			goto out_free;
+	}
+
+	len = snprintf(linkname, size, "%s/.build-id/%.2s",
+		       debugdir, sbuild_id);
+
+	if (access(linkname, X_OK) && mkdir_p(linkname, 0755))
+		goto out_free;
+
+	snprintf(linkname + len, size - len, "/%s", sbuild_id + 2);
+	targetname = filename + strlen(debugdir) - 5;
+	memcpy(targetname, "../..", 5);
+
+	if (symlink(targetname, linkname) == 0)
+		err = 0;
+out_free:
+	if (!is_kallsyms)
+		free(realname);
+	free(filename);
+	free(linkname);
+	return err;
+}
+
+static int build_id_cache__add_b(const u8 *build_id, size_t build_id_size,
+				 const char *name, const char *debugdir,
+				 bool is_kallsyms)
+{
+	char sbuild_id[BUILD_ID_SIZE * 2 + 1];
+
+	build_id__sprintf(build_id, build_id_size, sbuild_id);
+
+	return build_id_cache__add_s(sbuild_id, debugdir, name, is_kallsyms);
+}
+
+int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir)
+{
+	const size_t size = PATH_MAX;
+	char *filename = zalloc(size),
+	     *linkname = zalloc(size);
+	int err = -1;
+
+	if (filename == NULL || linkname == NULL)
+		goto out_free;
+
+	snprintf(linkname, size, "%s/.build-id/%.2s/%s",
+		 debugdir, sbuild_id, sbuild_id + 2);
+
+	if (access(linkname, F_OK))
+		goto out_free;
+
+	if (readlink(linkname, filename, size - 1) < 0)
+		goto out_free;
+
+	if (unlink(linkname))
+		goto out_free;
+
+	/*
+	 * Since the link is relative, we must make it absolute:
+	 */
+	snprintf(linkname, size, "%s/.build-id/%.2s/%s",
+		 debugdir, sbuild_id, filename);
+
+	if (unlink(linkname))
+		goto out_free;
+
+	err = 0;
+out_free:
+	free(filename);
+	free(linkname);
+	return err;
+}
+
+static int dso__cache_build_id(struct dso *dso, const char *debugdir)
+{
+	bool is_kallsyms = dso->kernel && dso->long_name[0] != '/';
+
+	return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id),
+				     dso->long_name, debugdir, is_kallsyms);
+}
+
+static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir)
+{
+	struct dso *pos;
+	int err = 0;
+
+	dsos__for_each_with_build_id(pos, head)
+		if (dso__cache_build_id(pos, debugdir))
+			err = -1;
+
+	return err;
+}
+
+static int machine__cache_build_ids(struct machine *machine, const char *debugdir)
+{
+	int ret = __dsos__cache_build_ids(&machine->kernel_dsos, debugdir);
+	ret |= __dsos__cache_build_ids(&machine->user_dsos, debugdir);
+	return ret;
+}
+
+static int perf_session__cache_build_ids(struct perf_session *session)
+{
+	struct rb_node *nd;
+	int ret;
+	char debugdir[PATH_MAX];
+
+	snprintf(debugdir, sizeof(debugdir), "%s", buildid_dir);
+
+	if (mkdir(debugdir, 0755) != 0 && errno != EEXIST)
+		return -1;
+
+	ret = machine__cache_build_ids(&session->host_machine, debugdir);
+
+	for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
+		struct machine *pos = rb_entry(nd, struct machine, rb_node);
+		ret |= machine__cache_build_ids(pos, debugdir);
+	}
+	return ret ? -1 : 0;
+}
+
+static bool machine__read_build_ids(struct machine *machine, bool with_hits)
+{
+	bool ret = __dsos__read_build_ids(&machine->kernel_dsos, with_hits);
+	ret |= __dsos__read_build_ids(&machine->user_dsos, with_hits);
+	return ret;
+}
+
+static bool perf_session__read_build_ids(struct perf_session *session, bool with_hits)
+{
+	struct rb_node *nd;
+	bool ret = machine__read_build_ids(&session->host_machine, with_hits);
+
+	for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
+		struct machine *pos = rb_entry(nd, struct machine, rb_node);
+		ret |= machine__read_build_ids(pos, with_hits);
+	}
+
+	return ret;
+}
+
 static int write_trace_info(int fd, struct perf_header *h __used,
 			    struct perf_evlist *evlist)
 {
@@ -202,6 +446,9 @@
 
 	session = container_of(h, struct perf_session, header);
 
+	if (!perf_session__read_build_ids(session, true))
+		return -1;
+
 	err = dsos__write_buildid_table(h, fd);
 	if (err < 0) {
 		pr_debug("failed to write buildid table\n");
@@ -1065,26 +1312,30 @@
 	bool full_only;
 };
 
-#define FEAT_OPA(n, w, p) \
-	[n] = { .name = #n, .write = w, .print = p }
-#define FEAT_OPF(n, w, p) \
-	[n] = { .name = #n, .write = w, .print = p, .full_only = true }
+#define FEAT_OPA(n, func) \
+	[n] = { .name = #n, .write = write_##func, .print = print_##func }
+#define FEAT_OPF(n, func) \
+	[n] = { .name = #n, .write = write_##func, .print = print_##func, .full_only = true }
+
+/* feature_ops not implemented: */
+#define print_trace_info		NULL
+#define print_build_id			NULL
 
 static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
-	FEAT_OPA(HEADER_TRACE_INFO, write_trace_info, NULL),
-	FEAT_OPA(HEADER_BUILD_ID, write_build_id, NULL),
-	FEAT_OPA(HEADER_HOSTNAME, write_hostname, print_hostname),
-	FEAT_OPA(HEADER_OSRELEASE, write_osrelease, print_osrelease),
-	FEAT_OPA(HEADER_VERSION, write_version, print_version),
-	FEAT_OPA(HEADER_ARCH, write_arch, print_arch),
-	FEAT_OPA(HEADER_NRCPUS, write_nrcpus, print_nrcpus),
-	FEAT_OPA(HEADER_CPUDESC, write_cpudesc, print_cpudesc),
-	FEAT_OPA(HEADER_CPUID, write_cpuid, print_cpuid),
-	FEAT_OPA(HEADER_TOTAL_MEM, write_total_mem, print_total_mem),
-	FEAT_OPA(HEADER_EVENT_DESC, write_event_desc, print_event_desc),
-	FEAT_OPA(HEADER_CMDLINE, write_cmdline, print_cmdline),
-	FEAT_OPF(HEADER_CPU_TOPOLOGY, write_cpu_topology, print_cpu_topology),
-	FEAT_OPF(HEADER_NUMA_TOPOLOGY, write_numa_topology, print_numa_topology),
+	FEAT_OPA(HEADER_TRACE_INFO,	trace_info),
+	FEAT_OPA(HEADER_BUILD_ID,	build_id),
+	FEAT_OPA(HEADER_HOSTNAME,	hostname),
+	FEAT_OPA(HEADER_OSRELEASE,	osrelease),
+	FEAT_OPA(HEADER_VERSION,	version),
+	FEAT_OPA(HEADER_ARCH,		arch),
+	FEAT_OPA(HEADER_NRCPUS,		nrcpus),
+	FEAT_OPA(HEADER_CPUDESC,	cpudesc),
+	FEAT_OPA(HEADER_CPUID,		cpuid),
+	FEAT_OPA(HEADER_TOTAL_MEM,	total_mem),
+	FEAT_OPA(HEADER_EVENT_DESC,	event_desc),
+	FEAT_OPA(HEADER_CMDLINE,	cmdline),
+	FEAT_OPF(HEADER_CPU_TOPOLOGY,	cpu_topology),
+	FEAT_OPF(HEADER_NUMA_TOPOLOGY,	numa_topology),
 };
 
 struct header_print_data {
@@ -1103,9 +1354,9 @@
 				"%d, continuing...\n", section->offset, feat);
 		return 0;
 	}
-	if (feat < HEADER_TRACE_INFO || feat >= HEADER_LAST_FEATURE) {
+	if (feat >= HEADER_LAST_FEATURE) {
 		pr_warning("unknown feature %d\n", feat);
-		return -1;
+		return 0;
 	}
 	if (!feat_ops[feat].print)
 		return 0;
@@ -1132,252 +1383,6 @@
 	return 0;
 }
 
-#define dsos__for_each_with_build_id(pos, head)	\
-	list_for_each_entry(pos, head, node)	\
-		if (!pos->has_build_id)		\
-			continue;		\
-		else
-
-static int __dsos__write_buildid_table(struct list_head *head, pid_t pid,
-				u16 misc, int fd)
-{
-	struct dso *pos;
-
-	dsos__for_each_with_build_id(pos, head) {
-		int err;
-		struct build_id_event b;
-		size_t len;
-
-		if (!pos->hit)
-			continue;
-		len = pos->long_name_len + 1;
-		len = ALIGN(len, NAME_ALIGN);
-		memset(&b, 0, sizeof(b));
-		memcpy(&b.build_id, pos->build_id, sizeof(pos->build_id));
-		b.pid = pid;
-		b.header.misc = misc;
-		b.header.size = sizeof(b) + len;
-		err = do_write(fd, &b, sizeof(b));
-		if (err < 0)
-			return err;
-		err = write_padded(fd, pos->long_name,
-				   pos->long_name_len + 1, len);
-		if (err < 0)
-			return err;
-	}
-
-	return 0;
-}
-
-static int machine__write_buildid_table(struct machine *machine, int fd)
-{
-	int err;
-	u16 kmisc = PERF_RECORD_MISC_KERNEL,
-	    umisc = PERF_RECORD_MISC_USER;
-
-	if (!machine__is_host(machine)) {
-		kmisc = PERF_RECORD_MISC_GUEST_KERNEL;
-		umisc = PERF_RECORD_MISC_GUEST_USER;
-	}
-
-	err = __dsos__write_buildid_table(&machine->kernel_dsos, machine->pid,
-					  kmisc, fd);
-	if (err == 0)
-		err = __dsos__write_buildid_table(&machine->user_dsos,
-						  machine->pid, umisc, fd);
-	return err;
-}
-
-static int dsos__write_buildid_table(struct perf_header *header, int fd)
-{
-	struct perf_session *session = container_of(header,
-			struct perf_session, header);
-	struct rb_node *nd;
-	int err = machine__write_buildid_table(&session->host_machine, fd);
-
-	if (err)
-		return err;
-
-	for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
-		struct machine *pos = rb_entry(nd, struct machine, rb_node);
-		err = machine__write_buildid_table(pos, fd);
-		if (err)
-			break;
-	}
-	return err;
-}
-
-int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
-			  const char *name, bool is_kallsyms)
-{
-	const size_t size = PATH_MAX;
-	char *realname, *filename = zalloc(size),
-	     *linkname = zalloc(size), *targetname;
-	int len, err = -1;
-
-	if (is_kallsyms) {
-		if (symbol_conf.kptr_restrict) {
-			pr_debug("Not caching a kptr_restrict'ed /proc/kallsyms\n");
-			return 0;
-		}
-		realname = (char *)name;
-	} else
-		realname = realpath(name, NULL);
-
-	if (realname == NULL || filename == NULL || linkname == NULL)
-		goto out_free;
-
-	len = snprintf(filename, size, "%s%s%s",
-		       debugdir, is_kallsyms ? "/" : "", realname);
-	if (mkdir_p(filename, 0755))
-		goto out_free;
-
-	snprintf(filename + len, sizeof(filename) - len, "/%s", sbuild_id);
-
-	if (access(filename, F_OK)) {
-		if (is_kallsyms) {
-			 if (copyfile("/proc/kallsyms", filename))
-				goto out_free;
-		} else if (link(realname, filename) && copyfile(name, filename))
-			goto out_free;
-	}
-
-	len = snprintf(linkname, size, "%s/.build-id/%.2s",
-		       debugdir, sbuild_id);
-
-	if (access(linkname, X_OK) && mkdir_p(linkname, 0755))
-		goto out_free;
-
-	snprintf(linkname + len, size - len, "/%s", sbuild_id + 2);
-	targetname = filename + strlen(debugdir) - 5;
-	memcpy(targetname, "../..", 5);
-
-	if (symlink(targetname, linkname) == 0)
-		err = 0;
-out_free:
-	if (!is_kallsyms)
-		free(realname);
-	free(filename);
-	free(linkname);
-	return err;
-}
-
-static int build_id_cache__add_b(const u8 *build_id, size_t build_id_size,
-				 const char *name, const char *debugdir,
-				 bool is_kallsyms)
-{
-	char sbuild_id[BUILD_ID_SIZE * 2 + 1];
-
-	build_id__sprintf(build_id, build_id_size, sbuild_id);
-
-	return build_id_cache__add_s(sbuild_id, debugdir, name, is_kallsyms);
-}
-
-int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir)
-{
-	const size_t size = PATH_MAX;
-	char *filename = zalloc(size),
-	     *linkname = zalloc(size);
-	int err = -1;
-
-	if (filename == NULL || linkname == NULL)
-		goto out_free;
-
-	snprintf(linkname, size, "%s/.build-id/%.2s/%s",
-		 debugdir, sbuild_id, sbuild_id + 2);
-
-	if (access(linkname, F_OK))
-		goto out_free;
-
-	if (readlink(linkname, filename, size - 1) < 0)
-		goto out_free;
-
-	if (unlink(linkname))
-		goto out_free;
-
-	/*
-	 * Since the link is relative, we must make it absolute:
-	 */
-	snprintf(linkname, size, "%s/.build-id/%.2s/%s",
-		 debugdir, sbuild_id, filename);
-
-	if (unlink(linkname))
-		goto out_free;
-
-	err = 0;
-out_free:
-	free(filename);
-	free(linkname);
-	return err;
-}
-
-static int dso__cache_build_id(struct dso *dso, const char *debugdir)
-{
-	bool is_kallsyms = dso->kernel && dso->long_name[0] != '/';
-
-	return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id),
-				     dso->long_name, debugdir, is_kallsyms);
-}
-
-static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir)
-{
-	struct dso *pos;
-	int err = 0;
-
-	dsos__for_each_with_build_id(pos, head)
-		if (dso__cache_build_id(pos, debugdir))
-			err = -1;
-
-	return err;
-}
-
-static int machine__cache_build_ids(struct machine *machine, const char *debugdir)
-{
-	int ret = __dsos__cache_build_ids(&machine->kernel_dsos, debugdir);
-	ret |= __dsos__cache_build_ids(&machine->user_dsos, debugdir);
-	return ret;
-}
-
-static int perf_session__cache_build_ids(struct perf_session *session)
-{
-	struct rb_node *nd;
-	int ret;
-	char debugdir[PATH_MAX];
-
-	snprintf(debugdir, sizeof(debugdir), "%s", buildid_dir);
-
-	if (mkdir(debugdir, 0755) != 0 && errno != EEXIST)
-		return -1;
-
-	ret = machine__cache_build_ids(&session->host_machine, debugdir);
-
-	for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
-		struct machine *pos = rb_entry(nd, struct machine, rb_node);
-		ret |= machine__cache_build_ids(pos, debugdir);
-	}
-	return ret ? -1 : 0;
-}
-
-static bool machine__read_build_ids(struct machine *machine, bool with_hits)
-{
-	bool ret = __dsos__read_build_ids(&machine->kernel_dsos, with_hits);
-	ret |= __dsos__read_build_ids(&machine->user_dsos, with_hits);
-	return ret;
-}
-
-static bool perf_session__read_build_ids(struct perf_session *session, bool with_hits)
-{
-	struct rb_node *nd;
-	bool ret = machine__read_build_ids(&session->host_machine, with_hits);
-
-	for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
-		struct machine *pos = rb_entry(nd, struct machine, rb_node);
-		ret |= machine__read_build_ids(pos, with_hits);
-	}
-
-	return ret;
-}
-
 static int do_write_feat(int fd, struct perf_header *h, int type,
 			 struct perf_file_section **p,
 			 struct perf_evlist *evlist)
@@ -1386,6 +1391,8 @@
 	int ret = 0;
 
 	if (perf_header__has_feat(h, type)) {
+		if (!feat_ops[type].write)
+			return -1;
 
 		(*p)->offset = lseek(fd, 0, SEEK_CUR);
 
@@ -1408,18 +1415,12 @@
 				   struct perf_evlist *evlist, int fd)
 {
 	int nr_sections;
-	struct perf_session *session;
 	struct perf_file_section *feat_sec, *p;
 	int sec_size;
 	u64 sec_start;
+	int feat;
 	int err;
 
-	session = container_of(header, struct perf_session, header);
-
-	if (perf_header__has_feat(header, HEADER_BUILD_ID &&
-	    !perf_session__read_build_ids(session, true)))
-		perf_header__clear_feat(header, HEADER_BUILD_ID);
-
 	nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
 	if (!nr_sections)
 		return 0;
@@ -1433,64 +1434,11 @@
 	sec_start = header->data_offset + header->data_size;
 	lseek(fd, sec_start + sec_size, SEEK_SET);
 
-	err = do_write_feat(fd, header, HEADER_TRACE_INFO, &p, evlist);
-	if (err)
-		goto out_free;
-
-	err = do_write_feat(fd, header, HEADER_BUILD_ID, &p, evlist);
-	if (err) {
-		perf_header__clear_feat(header, HEADER_BUILD_ID);
-		goto out_free;
+	for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
+		if (do_write_feat(fd, header, feat, &p, evlist))
+			perf_header__clear_feat(header, feat);
 	}
 
-	err = do_write_feat(fd, header, HEADER_HOSTNAME, &p, evlist);
-	if (err)
-		perf_header__clear_feat(header, HEADER_HOSTNAME);
-
-	err = do_write_feat(fd, header, HEADER_OSRELEASE, &p, evlist);
-	if (err)
-		perf_header__clear_feat(header, HEADER_OSRELEASE);
-
-	err = do_write_feat(fd, header, HEADER_VERSION, &p, evlist);
-	if (err)
-		perf_header__clear_feat(header, HEADER_VERSION);
-
-	err = do_write_feat(fd, header, HEADER_ARCH, &p, evlist);
-	if (err)
-		perf_header__clear_feat(header, HEADER_ARCH);
-
-	err = do_write_feat(fd, header, HEADER_NRCPUS, &p, evlist);
-	if (err)
-		perf_header__clear_feat(header, HEADER_NRCPUS);
-
-	err = do_write_feat(fd, header, HEADER_CPUDESC, &p, evlist);
-	if (err)
-		perf_header__clear_feat(header, HEADER_CPUDESC);
-
-	err = do_write_feat(fd, header, HEADER_CPUID, &p, evlist);
-	if (err)
-		perf_header__clear_feat(header, HEADER_CPUID);
-
-	err = do_write_feat(fd, header, HEADER_TOTAL_MEM, &p, evlist);
-	if (err)
-		perf_header__clear_feat(header, HEADER_TOTAL_MEM);
-
-	err = do_write_feat(fd, header, HEADER_CMDLINE, &p, evlist);
-	if (err)
-		perf_header__clear_feat(header, HEADER_CMDLINE);
-
-	err = do_write_feat(fd, header, HEADER_EVENT_DESC, &p, evlist);
-	if (err)
-		perf_header__clear_feat(header, HEADER_EVENT_DESC);
-
-	err = do_write_feat(fd, header, HEADER_CPU_TOPOLOGY, &p, evlist);
-	if (err)
-		perf_header__clear_feat(header, HEADER_CPU_TOPOLOGY);
-
-	err = do_write_feat(fd, header, HEADER_NUMA_TOPOLOGY, &p, evlist);
-	if (err)
-		perf_header__clear_feat(header, HEADER_NUMA_TOPOLOGY);
-
 	lseek(fd, sec_start, SEEK_SET);
 	/*
 	 * may write more than needed due to dropped feature, but
@@ -1499,7 +1447,6 @@
 	err = do_write(fd, feat_sec, sec_size);
 	if (err < 0)
 		pr_debug("failed to write feature section\n");
-out_free:
 	free(feat_sec);
 	return err;
 }
@@ -1637,20 +1584,20 @@
 int perf_header__process_sections(struct perf_header *header, int fd,
 				  void *data,
 				  int (*process)(struct perf_file_section *section,
-				  struct perf_header *ph,
-				  int feat, int fd, void *data))
+						 struct perf_header *ph,
+						 int feat, int fd, void *data))
 {
-	struct perf_file_section *feat_sec;
+	struct perf_file_section *feat_sec, *sec;
 	int nr_sections;
 	int sec_size;
-	int idx = 0;
-	int err = -1, feat = 1;
+	int feat;
+	int err;
 
 	nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
 	if (!nr_sections)
 		return 0;
 
-	feat_sec = calloc(sizeof(*feat_sec), nr_sections);
+	feat_sec = sec = calloc(sizeof(*feat_sec), nr_sections);
 	if (!feat_sec)
 		return -1;
 
@@ -1658,20 +1605,16 @@
 
 	lseek(fd, header->data_offset + header->data_size, SEEK_SET);
 
-	if (perf_header__getbuffer64(header, fd, feat_sec, sec_size))
+	err = perf_header__getbuffer64(header, fd, feat_sec, sec_size);
+	if (err < 0)
 		goto out_free;
 
-	err = 0;
-	while (idx < nr_sections && feat < HEADER_LAST_FEATURE) {
-		if (perf_header__has_feat(header, feat)) {
-			struct perf_file_section *sec = &feat_sec[idx++];
-
-			err = process(sec, header, feat, fd, data);
-			if (err < 0)
-				break;
-		}
-		++feat;
+	for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) {
+		err = process(sec++, header, feat, fd, data);
+		if (err < 0)
+			goto out_free;
 	}
+	err = 0;
 out_free:
 	free(feat_sec);
 	return err;
@@ -1906,32 +1849,21 @@
 		return 0;
 	}
 
+	if (feat >= HEADER_LAST_FEATURE) {
+		pr_debug("unknown feature %d, continuing...\n", feat);
+		return 0;
+	}
+
 	switch (feat) {
 	case HEADER_TRACE_INFO:
 		trace_report(fd, false);
 		break;
-
 	case HEADER_BUILD_ID:
 		if (perf_header__read_build_ids(ph, fd, section->offset, section->size))
 			pr_debug("Failed to read buildids, continuing...\n");
 		break;
-
-	case HEADER_HOSTNAME:
-	case HEADER_OSRELEASE:
-	case HEADER_VERSION:
-	case HEADER_ARCH:
-	case HEADER_NRCPUS:
-	case HEADER_CPUDESC:
-	case HEADER_CPUID:
-	case HEADER_TOTAL_MEM:
-	case HEADER_CMDLINE:
-	case HEADER_EVENT_DESC:
-	case HEADER_CPU_TOPOLOGY:
-	case HEADER_NUMA_TOPOLOGY:
-		break;
-
 	default:
-		pr_debug("unknown feature %d, continuing...\n", feat);
+		break;
 	}
 
 	return 0;
@@ -2041,6 +1973,8 @@
 		lseek(fd, tmp, SEEK_SET);
 	}
 
+	symbol_conf.nr_events = nr_attrs;
+
 	if (f_header.event_types.size) {
 		lseek(fd, f_header.event_types.offset, SEEK_SET);
 		events = malloc(f_header.event_types.size);
@@ -2068,9 +2002,9 @@
 	return -ENOMEM;
 }
 
-int perf_event__synthesize_attr(struct perf_event_attr *attr, u16 ids, u64 *id,
-				perf_event__handler_t process,
-				struct perf_session *session)
+int perf_event__synthesize_attr(struct perf_tool *tool,
+				struct perf_event_attr *attr, u16 ids, u64 *id,
+				perf_event__handler_t process)
 {
 	union perf_event *ev;
 	size_t size;
@@ -2092,22 +2026,23 @@
 	ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
 	ev->attr.header.size = size;
 
-	err = process(ev, NULL, session);
+	err = process(tool, ev, NULL, NULL);
 
 	free(ev);
 
 	return err;
 }
 
-int perf_session__synthesize_attrs(struct perf_session *session,
+int perf_event__synthesize_attrs(struct perf_tool *tool,
+				   struct perf_session *session,
 				   perf_event__handler_t process)
 {
 	struct perf_evsel *attr;
 	int err = 0;
 
 	list_for_each_entry(attr, &session->evlist->entries, node) {
-		err = perf_event__synthesize_attr(&attr->attr, attr->ids,
-						  attr->id, process, session);
+		err = perf_event__synthesize_attr(tool, &attr->attr, attr->ids,
+						  attr->id, process);
 		if (err) {
 			pr_debug("failed to create perf header attribute\n");
 			return err;
@@ -2118,23 +2053,23 @@
 }
 
 int perf_event__process_attr(union perf_event *event,
-			     struct perf_session *session)
+			     struct perf_evlist **pevlist)
 {
 	unsigned int i, ids, n_ids;
 	struct perf_evsel *evsel;
+	struct perf_evlist *evlist = *pevlist;
 
-	if (session->evlist == NULL) {
-		session->evlist = perf_evlist__new(NULL, NULL);
-		if (session->evlist == NULL)
+	if (evlist == NULL) {
+		*pevlist = evlist = perf_evlist__new(NULL, NULL);
+		if (evlist == NULL)
 			return -ENOMEM;
 	}
 
-	evsel = perf_evsel__new(&event->attr.attr,
-				session->evlist->nr_entries);
+	evsel = perf_evsel__new(&event->attr.attr, evlist->nr_entries);
 	if (evsel == NULL)
 		return -ENOMEM;
 
-	perf_evlist__add(session->evlist, evsel);
+	perf_evlist__add(evlist, evsel);
 
 	ids = event->header.size;
 	ids -= (void *)&event->attr.id - (void *)event;
@@ -2148,18 +2083,16 @@
 		return -ENOMEM;
 
 	for (i = 0; i < n_ids; i++) {
-		perf_evlist__id_add(session->evlist, evsel, 0, i,
-				    event->attr.id[i]);
+		perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]);
 	}
 
-	perf_session__update_sample_type(session);
-
 	return 0;
 }
 
-int perf_event__synthesize_event_type(u64 event_id, char *name,
+int perf_event__synthesize_event_type(struct perf_tool *tool,
+				      u64 event_id, char *name,
 				      perf_event__handler_t process,
-				      struct perf_session *session)
+				      struct machine *machine)
 {
 	union perf_event ev;
 	size_t size = 0;
@@ -2177,13 +2110,14 @@
 	ev.event_type.header.size = sizeof(ev.event_type) -
 		(sizeof(ev.event_type.event_type.name) - size);
 
-	err = process(&ev, NULL, session);
+	err = process(tool, &ev, NULL, machine);
 
 	return err;
 }
 
-int perf_event__synthesize_event_types(perf_event__handler_t process,
-				       struct perf_session *session)
+int perf_event__synthesize_event_types(struct perf_tool *tool,
+				       perf_event__handler_t process,
+				       struct machine *machine)
 {
 	struct perf_trace_event_type *type;
 	int i, err = 0;
@@ -2191,9 +2125,9 @@
 	for (i = 0; i < event_count; i++) {
 		type = &events[i];
 
-		err = perf_event__synthesize_event_type(type->event_id,
+		err = perf_event__synthesize_event_type(tool, type->event_id,
 							type->name, process,
-							session);
+							machine);
 		if (err) {
 			pr_debug("failed to create perf header event type\n");
 			return err;
@@ -2203,8 +2137,8 @@
 	return err;
 }
 
-int perf_event__process_event_type(union perf_event *event,
-				   struct perf_session *session __unused)
+int perf_event__process_event_type(struct perf_tool *tool __unused,
+				   union perf_event *event)
 {
 	if (perf_header__push_event(event->event_type.event_type.event_id,
 				    event->event_type.event_type.name) < 0)
@@ -2213,9 +2147,9 @@
 	return 0;
 }
 
-int perf_event__synthesize_tracing_data(int fd, struct perf_evlist *evlist,
-					 perf_event__handler_t process,
-				   struct perf_session *session __unused)
+int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd,
+					struct perf_evlist *evlist,
+					perf_event__handler_t process)
 {
 	union perf_event ev;
 	struct tracing_data *tdata;
@@ -2246,7 +2180,7 @@
 	ev.tracing_data.header.size = sizeof(ev.tracing_data);
 	ev.tracing_data.size = aligned_size;
 
-	process(&ev, NULL, session);
+	process(tool, &ev, NULL, NULL);
 
 	/*
 	 * The put function will copy all the tracing data
@@ -2288,10 +2222,10 @@
 	return size_read + padding;
 }
 
-int perf_event__synthesize_build_id(struct dso *pos, u16 misc,
+int perf_event__synthesize_build_id(struct perf_tool *tool,
+				    struct dso *pos, u16 misc,
 				    perf_event__handler_t process,
-				    struct machine *machine,
-				    struct perf_session *session)
+				    struct machine *machine)
 {
 	union perf_event ev;
 	size_t len;
@@ -2311,12 +2245,13 @@
 	ev.build_id.header.size = sizeof(ev.build_id) + len;
 	memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
 
-	err = process(&ev, NULL, session);
+	err = process(tool, &ev, NULL, machine);
 
 	return err;
 }
 
-int perf_event__process_build_id(union perf_event *event,
+int perf_event__process_build_id(struct perf_tool *tool __used,
+				 union perf_event *event,
 				 struct perf_session *session)
 {
 	__event_process_build_id(&event->build_id,
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h
index 3d5a742..ac4ec95 100644
--- a/tools/perf/util/header.h
+++ b/tools/perf/util/header.h
@@ -10,7 +10,8 @@
 #include <linux/bitmap.h>
 
 enum {
-	HEADER_TRACE_INFO = 1,
+	HEADER_RESERVED		= 0,	/* always cleared */
+	HEADER_TRACE_INFO	= 1,
 	HEADER_BUILD_ID,
 
 	HEADER_HOSTNAME,
@@ -27,10 +28,9 @@
 	HEADER_NUMA_TOPOLOGY,
 
 	HEADER_LAST_FEATURE,
+	HEADER_FEAT_BITS	= 256,
 };
 
-#define HEADER_FEAT_BITS			256
-
 struct perf_file_section {
 	u64 offset;
 	u64 size;
@@ -68,6 +68,7 @@
 };
 
 struct perf_evlist;
+struct perf_session;
 
 int perf_session__read_header(struct perf_session *session, int fd);
 int perf_session__write_header(struct perf_session *session,
@@ -96,32 +97,36 @@
 			  const char *name, bool is_kallsyms);
 int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir);
 
-int perf_event__synthesize_attr(struct perf_event_attr *attr, u16 ids, u64 *id,
-				perf_event__handler_t process,
-				struct perf_session *session);
-int perf_session__synthesize_attrs(struct perf_session *session,
-				   perf_event__handler_t process);
-int perf_event__process_attr(union perf_event *event, struct perf_session *session);
+int perf_event__synthesize_attr(struct perf_tool *tool,
+				struct perf_event_attr *attr, u16 ids, u64 *id,
+				perf_event__handler_t process);
+int perf_event__synthesize_attrs(struct perf_tool *tool,
+				 struct perf_session *session,
+				 perf_event__handler_t process);
+int perf_event__process_attr(union perf_event *event, struct perf_evlist **pevlist);
 
-int perf_event__synthesize_event_type(u64 event_id, char *name,
+int perf_event__synthesize_event_type(struct perf_tool *tool,
+				      u64 event_id, char *name,
 				      perf_event__handler_t process,
-				      struct perf_session *session);
-int perf_event__synthesize_event_types(perf_event__handler_t process,
-				       struct perf_session *session);
-int perf_event__process_event_type(union perf_event *event,
-				   struct perf_session *session);
+				      struct machine *machine);
+int perf_event__synthesize_event_types(struct perf_tool *tool,
+				       perf_event__handler_t process,
+				       struct machine *machine);
+int perf_event__process_event_type(struct perf_tool *tool,
+				   union perf_event *event);
 
-int perf_event__synthesize_tracing_data(int fd, struct perf_evlist *evlist,
-					perf_event__handler_t process,
-					struct perf_session *session);
+int perf_event__synthesize_tracing_data(struct perf_tool *tool,
+					int fd, struct perf_evlist *evlist,
+					perf_event__handler_t process);
 int perf_event__process_tracing_data(union perf_event *event,
 				     struct perf_session *session);
 
-int perf_event__synthesize_build_id(struct dso *pos, u16 misc,
+int perf_event__synthesize_build_id(struct perf_tool *tool,
+				    struct dso *pos, u16 misc,
 				    perf_event__handler_t process,
-				    struct machine *machine,
-				    struct perf_session *session);
-int perf_event__process_build_id(union perf_event *event,
+				    struct machine *machine);
+int perf_event__process_build_id(struct perf_tool *tool,
+				 union perf_event *event,
 				 struct perf_session *session);
 
 /*
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 89289c8..ff6f9d5 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -117,7 +117,6 @@
 
 static inline int hist_entry__tui_annotate(struct hist_entry *self __used,
 					   int evidx __used,
-					   int nr_events __used,
 					   void(*timer)(void *arg) __used,
 					   void *arg __used,
 					   int delay_secs __used)
@@ -128,7 +127,7 @@
 #define K_RIGHT -2
 #else
 #include "ui/keysyms.h"
-int hist_entry__tui_annotate(struct hist_entry *he, int evidx, int nr_events,
+int hist_entry__tui_annotate(struct hist_entry *he, int evidx,
 			     void(*timer)(void *arg), void *arg, int delay_secs);
 
 int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help,
diff --git a/tools/perf/util/include/linux/bitops.h b/tools/perf/util/include/linux/bitops.h
index 305c848..62cdee7 100644
--- a/tools/perf/util/include/linux/bitops.h
+++ b/tools/perf/util/include/linux/bitops.h
@@ -9,6 +9,17 @@
 #define BITS_PER_BYTE           8
 #define BITS_TO_LONGS(nr)       DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
 
+#define for_each_set_bit(bit, addr, size) \
+	for ((bit) = find_first_bit((addr), (size));		\
+	     (bit) < (size);					\
+	     (bit) = find_next_bit((addr), (size), (bit) + 1))
+
+/* same as for_each_set_bit() but use bit as value to start with */
+#define for_each_set_bit_cont(bit, addr, size) \
+	for ((bit) = find_next_bit((addr), (size), (bit));	\
+	     (bit) < (size);					\
+	     (bit) = find_next_bit((addr), (size), (bit) + 1))
+
 static inline void set_bit(int nr, unsigned long *addr)
 {
 	addr[nr / BITS_PER_LONG] |= 1UL << (nr % BITS_PER_LONG);
@@ -30,4 +41,111 @@
 	return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
 }
 
+#define BITOP_WORD(nr)		((nr) / BITS_PER_LONG)
+
+/**
+ * __ffs - find first bit in word.
+ * @word: The word to search
+ *
+ * Undefined if no bit exists, so code should check against 0 first.
+ */
+static __always_inline unsigned long __ffs(unsigned long word)
+{
+	int num = 0;
+
+#if BITS_PER_LONG == 64
+	if ((word & 0xffffffff) == 0) {
+		num += 32;
+		word >>= 32;
+	}
+#endif
+	if ((word & 0xffff) == 0) {
+		num += 16;
+		word >>= 16;
+	}
+	if ((word & 0xff) == 0) {
+		num += 8;
+		word >>= 8;
+	}
+	if ((word & 0xf) == 0) {
+		num += 4;
+		word >>= 4;
+	}
+	if ((word & 0x3) == 0) {
+		num += 2;
+		word >>= 2;
+	}
+	if ((word & 0x1) == 0)
+		num += 1;
+	return num;
+}
+
+/*
+ * Find the first set bit in a memory region.
+ */
+static inline unsigned long
+find_first_bit(const unsigned long *addr, unsigned long size)
+{
+	const unsigned long *p = addr;
+	unsigned long result = 0;
+	unsigned long tmp;
+
+	while (size & ~(BITS_PER_LONG-1)) {
+		if ((tmp = *(p++)))
+			goto found;
+		result += BITS_PER_LONG;
+		size -= BITS_PER_LONG;
+	}
+	if (!size)
+		return result;
+
+	tmp = (*p) & (~0UL >> (BITS_PER_LONG - size));
+	if (tmp == 0UL)		/* Are any bits set? */
+		return result + size;	/* Nope. */
+found:
+	return result + __ffs(tmp);
+}
+
+/*
+ * Find the next set bit in a memory region.
+ */
+static inline unsigned long
+find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset)
+{
+	const unsigned long *p = addr + BITOP_WORD(offset);
+	unsigned long result = offset & ~(BITS_PER_LONG-1);
+	unsigned long tmp;
+
+	if (offset >= size)
+		return size;
+	size -= result;
+	offset %= BITS_PER_LONG;
+	if (offset) {
+		tmp = *(p++);
+		tmp &= (~0UL << offset);
+		if (size < BITS_PER_LONG)
+			goto found_first;
+		if (tmp)
+			goto found_middle;
+		size -= BITS_PER_LONG;
+		result += BITS_PER_LONG;
+	}
+	while (size & ~(BITS_PER_LONG-1)) {
+		if ((tmp = *(p++)))
+			goto found_middle;
+		result += BITS_PER_LONG;
+		size -= BITS_PER_LONG;
+	}
+	if (!size)
+		return result;
+	tmp = *p;
+
+found_first:
+	tmp &= (~0UL >> (BITS_PER_LONG - size));
+	if (tmp == 0UL)		/* Are any bits set? */
+		return result + size;	/* Nope. */
+found_middle:
+	return result + __ffs(tmp);
+}
+
 #endif
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index 78284b1..316aa0a 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -562,6 +562,10 @@
 	INIT_LIST_HEAD(&self->user_dsos);
 	INIT_LIST_HEAD(&self->kernel_dsos);
 
+	self->threads = RB_ROOT;
+	INIT_LIST_HEAD(&self->dead_threads);
+	self->last_match = NULL;
+
 	self->kmaps.machine = self;
 	self->pid	    = pid;
 	self->root_dir      = strdup(root_dir);
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
index 890d855..2b8017f 100644
--- a/tools/perf/util/map.h
+++ b/tools/perf/util/map.h
@@ -18,9 +18,11 @@
 extern const char *map_type__name[MAP__NR_TYPES];
 
 struct dso;
+struct ip_callchain;
 struct ref_reloc_sym;
 struct map_groups;
 struct machine;
+struct perf_evsel;
 
 struct map {
 	union {
@@ -61,7 +63,11 @@
 struct machine {
 	struct rb_node	  rb_node;
 	pid_t		  pid;
+	u16		  id_hdr_size;
 	char		  *root_dir;
+	struct rb_root	  threads;
+	struct list_head  dead_threads;
+	struct thread	  *last_match;
 	struct list_head  user_dsos;
 	struct list_head  kernel_dsos;
 	struct map_groups kmaps;
@@ -148,6 +154,13 @@
 void machine__exit(struct machine *self);
 void machine__delete(struct machine *self);
 
+int machine__resolve_callchain(struct machine *machine,
+			       struct perf_evsel *evsel, struct thread *thread,
+			       struct ip_callchain *chain,
+			       struct symbol **parent);
+int maps__set_kallsyms_ref_reloc_sym(struct map **maps, const char *symbol_name,
+				     u64 addr);
+
 /*
  * Default guest kernel is defined by parameter --guestkallsyms
  * and --guestmodules
@@ -190,6 +203,12 @@
 					       struct map **mapp,
 					       symbol_filter_t filter);
 
+
+struct thread *machine__findnew_thread(struct machine *machine, pid_t pid);
+void machine__remove_thread(struct machine *machine, struct thread *th);
+
+size_t machine__fprintf(struct machine *machine, FILE *fp);
+
 static inline
 struct symbol *machine__find_kernel_symbol(struct machine *self,
 					   enum map_type type, u64 addr,
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 928918b..531c283 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -25,8 +25,6 @@
 	EVT_HANDLED_ALL
 };
 
-char debugfs_path[MAXPATHLEN];
-
 #define CHW(x) .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_##x
 #define CSW(x) .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_##x
 
@@ -40,6 +38,7 @@
   { CHW(BRANCH_INSTRUCTIONS),		"branch-instructions",		"branches"		},
   { CHW(BRANCH_MISSES),			"branch-misses",		""			},
   { CHW(BUS_CYCLES),			"bus-cycles",			""			},
+  { CHW(REF_CPU_CYCLES),		"ref-cycles",			""			},
 
   { CSW(CPU_CLOCK),			"cpu-clock",			""			},
   { CSW(TASK_CLOCK),			"task-clock",			""			},
@@ -70,6 +69,7 @@
 	"bus-cycles",
 	"stalled-cycles-frontend",
 	"stalled-cycles-backend",
+	"ref-cycles",
 };
 
 static const char *sw_event_names[PERF_COUNT_SW_MAX] = {
@@ -140,7 +140,7 @@
 	char evt_path[MAXPATHLEN];
 	int fd;
 
-	snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", debugfs_path,
+	snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", tracing_events_path,
 			sys_dir->d_name, evt_dir->d_name);
 	fd = open(evt_path, O_RDONLY);
 	if (fd < 0)
@@ -171,16 +171,16 @@
 	char evt_path[MAXPATHLEN];
 	char dir_path[MAXPATHLEN];
 
-	if (debugfs_valid_mountpoint(debugfs_path))
+	if (debugfs_valid_mountpoint(tracing_events_path))
 		return NULL;
 
-	sys_dir = opendir(debugfs_path);
+	sys_dir = opendir(tracing_events_path);
 	if (!sys_dir)
 		return NULL;
 
 	for_each_subsystem(sys_dir, sys_dirent, sys_next) {
 
-		snprintf(dir_path, MAXPATHLEN, "%s/%s", debugfs_path,
+		snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path,
 			 sys_dirent.d_name);
 		evt_dir = opendir(dir_path);
 		if (!evt_dir)
@@ -447,7 +447,7 @@
 	u64 id;
 	int fd;
 
-	snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", debugfs_path,
+	snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", tracing_events_path,
 		 sys_name, evt_name);
 
 	fd = open(evt_path, O_RDONLY);
@@ -485,7 +485,7 @@
 	struct dirent *evt_ent;
 	DIR *evt_dir;
 
-	snprintf(evt_path, MAXPATHLEN, "%s/%s", debugfs_path, sys_name);
+	snprintf(evt_path, MAXPATHLEN, "%s/%s", tracing_events_path, sys_name);
 	evt_dir = opendir(evt_path);
 
 	if (!evt_dir) {
@@ -528,7 +528,7 @@
 	char sys_name[MAX_EVENT_LENGTH];
 	unsigned int sys_length, evt_length;
 
-	if (debugfs_valid_mountpoint(debugfs_path))
+	if (debugfs_valid_mountpoint(tracing_events_path))
 		return 0;
 
 	evt_name = strchr(*strp, ':');
@@ -920,10 +920,10 @@
 	char evt_path[MAXPATHLEN];
 	char dir_path[MAXPATHLEN];
 
-	if (debugfs_valid_mountpoint(debugfs_path))
+	if (debugfs_valid_mountpoint(tracing_events_path))
 		return;
 
-	sys_dir = opendir(debugfs_path);
+	sys_dir = opendir(tracing_events_path);
 	if (!sys_dir)
 		return;
 
@@ -932,7 +932,7 @@
 		    !strglobmatch(sys_dirent.d_name, subsys_glob))
 			continue;
 
-		snprintf(dir_path, MAXPATHLEN, "%s/%s", debugfs_path,
+		snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path,
 			 sys_dirent.d_name);
 		evt_dir = opendir(dir_path);
 		if (!evt_dir)
@@ -964,16 +964,16 @@
 	char evt_path[MAXPATHLEN];
 	char dir_path[MAXPATHLEN];
 
-	if (debugfs_valid_mountpoint(debugfs_path))
+	if (debugfs_valid_mountpoint(tracing_events_path))
 		return 0;
 
-	sys_dir = opendir(debugfs_path);
+	sys_dir = opendir(tracing_events_path);
 	if (!sys_dir)
 		return 0;
 
 	for_each_subsystem(sys_dir, sys_dirent, sys_next) {
 
-		snprintf(dir_path, MAXPATHLEN, "%s/%s", debugfs_path,
+		snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path,
 			 sys_dirent.d_name);
 		evt_dir = opendir(dir_path);
 		if (!evt_dir)
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index 2f8e375..7e0cbe7 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -39,7 +39,6 @@
 int print_hwcache_events(const char *event_glob);
 extern int is_valid_tracepoint(const char *event_string);
 
-extern char debugfs_path[];
 extern int valid_debugfs_mount(const char *debugfs);
 
 #endif /* __PERF_PARSE_EVENTS_H */
diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h
index 1132c8f..17e94d0 100644
--- a/tools/perf/util/probe-finder.h
+++ b/tools/perf/util/probe-finder.h
@@ -5,7 +5,6 @@
 #include "util.h"
 #include "probe-event.h"
 
-#define MAX_PATH_LEN		 256
 #define MAX_PROBE_BUFFER	1024
 #define MAX_PROBES		 128
 
diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c
index 74350ff..e30749e 100644
--- a/tools/perf/util/scripting-engines/trace-event-perl.c
+++ b/tools/perf/util/scripting-engines/trace-event-perl.c
@@ -27,7 +27,10 @@
 
 #include "../../perf.h"
 #include "../util.h"
+#include "../thread.h"
+#include "../event.h"
 #include "../trace-event.h"
+#include "../evsel.h"
 
 #include <EXTERN.h>
 #include <perl.h>
@@ -245,11 +248,11 @@
 	return event;
 }
 
-static void perl_process_event(union perf_event *pevent __unused,
-			       struct perf_sample *sample,
-			       struct perf_evsel *evsel,
-			       struct perf_session *session __unused,
-			       struct thread *thread)
+static void perl_process_tracepoint(union perf_event *pevent __unused,
+				    struct perf_sample *sample,
+				    struct perf_evsel *evsel,
+				    struct machine *machine __unused,
+				    struct thread *thread)
 {
 	struct format_field *field;
 	static char handler[256];
@@ -265,6 +268,9 @@
 
 	dSP;
 
+	if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
+		return;
+
 	type = trace_parse_common_type(data);
 
 	event = find_cache_event(type);
@@ -332,6 +338,42 @@
 	LEAVE;
 }
 
+static void perl_process_event_generic(union perf_event *pevent __unused,
+				       struct perf_sample *sample,
+				       struct perf_evsel *evsel __unused,
+				       struct machine *machine __unused,
+				       struct thread *thread __unused)
+{
+	dSP;
+
+	if (!get_cv("process_event", 0))
+		return;
+
+	ENTER;
+	SAVETMPS;
+	PUSHMARK(SP);
+	XPUSHs(sv_2mortal(newSVpvn((const char *)pevent, pevent->header.size)));
+	XPUSHs(sv_2mortal(newSVpvn((const char *)&evsel->attr, sizeof(evsel->attr))));
+	XPUSHs(sv_2mortal(newSVpvn((const char *)sample, sizeof(*sample))));
+	XPUSHs(sv_2mortal(newSVpvn((const char *)sample->raw_data, sample->raw_size)));
+	PUTBACK;
+	call_pv("process_event", G_SCALAR);
+	SPAGAIN;
+	PUTBACK;
+	FREETMPS;
+	LEAVE;
+}
+
+static void perl_process_event(union perf_event *pevent,
+			       struct perf_sample *sample,
+			       struct perf_evsel *evsel,
+			       struct machine *machine,
+			       struct thread *thread)
+{
+	perl_process_tracepoint(pevent, sample, evsel, machine, thread);
+	perl_process_event_generic(pevent, sample, evsel, machine, thread);
+}
+
 static void run_start_sub(void)
 {
 	dSP; /* access to Perl stack */
@@ -553,7 +595,28 @@
 	fprintf(ofp, "sub print_header\n{\n"
 		"\tmy ($event_name, $cpu, $secs, $nsecs, $pid, $comm) = @_;\n\n"
 		"\tprintf(\"%%-20s %%5u %%05u.%%09u %%8u %%-20s \",\n\t       "
-		"$event_name, $cpu, $secs, $nsecs, $pid, $comm);\n}");
+		"$event_name, $cpu, $secs, $nsecs, $pid, $comm);\n}\n");
+
+	fprintf(ofp,
+		"\n# Packed byte string args of process_event():\n"
+		"#\n"
+		"# $event:\tunion perf_event\tutil/event.h\n"
+		"# $attr:\tstruct perf_event_attr\tlinux/perf_event.h\n"
+		"# $sample:\tstruct perf_sample\tutil/event.h\n"
+		"# $raw_data:\tperf_sample->raw_data\tutil/event.h\n"
+		"\n"
+		"sub process_event\n"
+		"{\n"
+		"\tmy ($event, $attr, $sample, $raw_data) = @_;\n"
+		"\n"
+		"\tmy @event\t= unpack(\"LSS\", $event);\n"
+		"\tmy @attr\t= unpack(\"LLQQQQQLLQQ\", $attr);\n"
+		"\tmy @sample\t= unpack(\"QLLQQQQQLL\", $sample);\n"
+		"\tmy @raw_data\t= unpack(\"C*\", $raw_data);\n"
+		"\n"
+		"\tuse Data::Dumper;\n"
+		"\tprint Dumper \\@event, \\@attr, \\@sample, \\@raw_data;\n"
+		"}\n");
 
 	fclose(ofp);
 
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index 6ccf70e..0b2a487 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -29,6 +29,8 @@
 
 #include "../../perf.h"
 #include "../util.h"
+#include "../event.h"
+#include "../thread.h"
 #include "../trace-event.h"
 
 PyMODINIT_FUNC initperf_trace_context(void);
@@ -207,7 +209,7 @@
 static void python_process_event(union perf_event *pevent __unused,
 				 struct perf_sample *sample,
 				 struct perf_evsel *evsel __unused,
-				 struct perf_session *session __unused,
+				 struct machine *machine __unused,
 				 struct thread *thread)
 {
 	PyObject *handler, *retval, *context, *t, *obj, *dict = NULL;
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 0f4555c..b5ca255 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -10,6 +10,7 @@
 #include "evlist.h"
 #include "evsel.h"
 #include "session.h"
+#include "tool.h"
 #include "sort.h"
 #include "util.h"
 #include "cpumap.h"
@@ -78,39 +79,13 @@
 	return -1;
 }
 
-static void perf_session__id_header_size(struct perf_session *session)
-{
-       struct perf_sample *data;
-       u64 sample_type = session->sample_type;
-       u16 size = 0;
-
-	if (!session->sample_id_all)
-		goto out;
-
-       if (sample_type & PERF_SAMPLE_TID)
-               size += sizeof(data->tid) * 2;
-
-       if (sample_type & PERF_SAMPLE_TIME)
-               size += sizeof(data->time);
-
-       if (sample_type & PERF_SAMPLE_ID)
-               size += sizeof(data->id);
-
-       if (sample_type & PERF_SAMPLE_STREAM_ID)
-               size += sizeof(data->stream_id);
-
-       if (sample_type & PERF_SAMPLE_CPU)
-               size += sizeof(data->cpu) * 2;
-out:
-       session->id_hdr_size = size;
-}
-
 void perf_session__update_sample_type(struct perf_session *self)
 {
 	self->sample_type = perf_evlist__sample_type(self->evlist);
 	self->sample_size = __perf_evsel__sample_size(self->sample_type);
 	self->sample_id_all = perf_evlist__sample_id_all(self->evlist);
-	perf_session__id_header_size(self);
+	self->id_hdr_size = perf_evlist__id_hdr_size(self->evlist);
+	self->host_machine.id_hdr_size = self->id_hdr_size;
 }
 
 int perf_session__create_kernel_maps(struct perf_session *self)
@@ -130,18 +105,26 @@
 
 struct perf_session *perf_session__new(const char *filename, int mode,
 				       bool force, bool repipe,
-				       struct perf_event_ops *ops)
+				       struct perf_tool *tool)
 {
-	size_t len = filename ? strlen(filename) + 1 : 0;
-	struct perf_session *self = zalloc(sizeof(*self) + len);
+	struct perf_session *self;
+	struct stat st;
+	size_t len;
+
+	if (!filename || !strlen(filename)) {
+		if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode))
+			filename = "-";
+		else
+			filename = "perf.data";
+	}
+
+	len = strlen(filename);
+	self = zalloc(sizeof(*self) + len);
 
 	if (self == NULL)
 		goto out;
 
 	memcpy(self->filename, filename, len);
-	self->threads = RB_ROOT;
-	INIT_LIST_HEAD(&self->dead_threads);
-	self->last_match = NULL;
 	/*
 	 * On 64bit we can mmap the data file in one go. No need for tiny mmap
 	 * slices. On 32bit we use 32MB.
@@ -171,10 +154,10 @@
 			goto out_delete;
 	}
 
-	if (ops && ops->ordering_requires_timestamps &&
-	    ops->ordered_samples && !self->sample_id_all) {
+	if (tool && tool->ordering_requires_timestamps &&
+	    tool->ordered_samples && !self->sample_id_all) {
 		dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
-		ops->ordered_samples = false;
+		tool->ordered_samples = false;
 	}
 
 out:
@@ -184,17 +167,22 @@
 	return NULL;
 }
 
-static void perf_session__delete_dead_threads(struct perf_session *self)
+static void machine__delete_dead_threads(struct machine *machine)
 {
 	struct thread *n, *t;
 
-	list_for_each_entry_safe(t, n, &self->dead_threads, node) {
+	list_for_each_entry_safe(t, n, &machine->dead_threads, node) {
 		list_del(&t->node);
 		thread__delete(t);
 	}
 }
 
-static void perf_session__delete_threads(struct perf_session *self)
+static void perf_session__delete_dead_threads(struct perf_session *session)
+{
+	machine__delete_dead_threads(&session->host_machine);
+}
+
+static void machine__delete_threads(struct machine *self)
 {
 	struct rb_node *nd = rb_first(&self->threads);
 
@@ -207,6 +195,11 @@
 	}
 }
 
+static void perf_session__delete_threads(struct perf_session *session)
+{
+	machine__delete_threads(&session->host_machine);
+}
+
 void perf_session__delete(struct perf_session *self)
 {
 	perf_session__destroy_kernel_maps(self);
@@ -217,7 +210,7 @@
 	free(self);
 }
 
-void perf_session__remove_thread(struct perf_session *self, struct thread *th)
+void machine__remove_thread(struct machine *self, struct thread *th)
 {
 	self->last_match = NULL;
 	rb_erase(&th->rb_node, &self->threads);
@@ -236,16 +229,16 @@
 	return 0;
 }
 
-int perf_session__resolve_callchain(struct perf_session *self,
-				    struct thread *thread,
-				    struct ip_callchain *chain,
-				    struct symbol **parent)
+int machine__resolve_callchain(struct machine *self, struct perf_evsel *evsel,
+			       struct thread *thread,
+			       struct ip_callchain *chain,
+			       struct symbol **parent)
 {
 	u8 cpumode = PERF_RECORD_MISC_USER;
 	unsigned int i;
 	int err;
 
-	callchain_cursor_reset(&self->callchain_cursor);
+	callchain_cursor_reset(&evsel->hists.callchain_cursor);
 
 	for (i = 0; i < chain->nr; i++) {
 		u64 ip;
@@ -272,7 +265,7 @@
 
 		al.filtered = false;
 		thread__find_addr_location(thread, self, cpumode,
-				MAP__FUNCTION, thread->pid, ip, &al, NULL);
+					   MAP__FUNCTION, ip, &al, NULL);
 		if (al.sym != NULL) {
 			if (sort__has_parent && !*parent &&
 			    symbol__match_parent_regex(al.sym))
@@ -281,7 +274,7 @@
 				break;
 		}
 
-		err = callchain_cursor_append(&self->callchain_cursor,
+		err = callchain_cursor_append(&evsel->hists.callchain_cursor,
 					      ip, al.map, al.sym);
 		if (err)
 			return err;
@@ -290,75 +283,91 @@
 	return 0;
 }
 
-static int process_event_synth_stub(union perf_event *event __used,
-				    struct perf_session *session __used)
+static int process_event_synth_tracing_data_stub(union perf_event *event __used,
+						 struct perf_session *session __used)
 {
 	dump_printf(": unhandled!\n");
 	return 0;
 }
 
-static int process_event_sample_stub(union perf_event *event __used,
+static int process_event_synth_attr_stub(union perf_event *event __used,
+					 struct perf_evlist **pevlist __used)
+{
+	dump_printf(": unhandled!\n");
+	return 0;
+}
+
+static int process_event_sample_stub(struct perf_tool *tool __used,
+				     union perf_event *event __used,
 				     struct perf_sample *sample __used,
 				     struct perf_evsel *evsel __used,
-				     struct perf_session *session __used)
+				     struct machine *machine __used)
 {
 	dump_printf(": unhandled!\n");
 	return 0;
 }
 
-static int process_event_stub(union perf_event *event __used,
+static int process_event_stub(struct perf_tool *tool __used,
+			      union perf_event *event __used,
 			      struct perf_sample *sample __used,
-			      struct perf_session *session __used)
+			      struct machine *machine __used)
 {
 	dump_printf(": unhandled!\n");
 	return 0;
 }
 
-static int process_finished_round_stub(union perf_event *event __used,
-				       struct perf_session *session __used,
-				       struct perf_event_ops *ops __used)
+static int process_finished_round_stub(struct perf_tool *tool __used,
+				       union perf_event *event __used,
+				       struct perf_session *perf_session __used)
 {
 	dump_printf(": unhandled!\n");
 	return 0;
 }
 
-static int process_finished_round(union perf_event *event,
-				  struct perf_session *session,
-				  struct perf_event_ops *ops);
-
-static void perf_event_ops__fill_defaults(struct perf_event_ops *handler)
+static int process_event_type_stub(struct perf_tool *tool __used,
+				   union perf_event *event __used)
 {
-	if (handler->sample == NULL)
-		handler->sample = process_event_sample_stub;
-	if (handler->mmap == NULL)
-		handler->mmap = process_event_stub;
-	if (handler->comm == NULL)
-		handler->comm = process_event_stub;
-	if (handler->fork == NULL)
-		handler->fork = process_event_stub;
-	if (handler->exit == NULL)
-		handler->exit = process_event_stub;
-	if (handler->lost == NULL)
-		handler->lost = perf_event__process_lost;
-	if (handler->read == NULL)
-		handler->read = process_event_stub;
-	if (handler->throttle == NULL)
-		handler->throttle = process_event_stub;
-	if (handler->unthrottle == NULL)
-		handler->unthrottle = process_event_stub;
-	if (handler->attr == NULL)
-		handler->attr = process_event_synth_stub;
-	if (handler->event_type == NULL)
-		handler->event_type = process_event_synth_stub;
-	if (handler->tracing_data == NULL)
-		handler->tracing_data = process_event_synth_stub;
-	if (handler->build_id == NULL)
-		handler->build_id = process_event_synth_stub;
-	if (handler->finished_round == NULL) {
-		if (handler->ordered_samples)
-			handler->finished_round = process_finished_round;
+	dump_printf(": unhandled!\n");
+	return 0;
+}
+
+static int process_finished_round(struct perf_tool *tool,
+				  union perf_event *event,
+				  struct perf_session *session);
+
+static void perf_tool__fill_defaults(struct perf_tool *tool)
+{
+	if (tool->sample == NULL)
+		tool->sample = process_event_sample_stub;
+	if (tool->mmap == NULL)
+		tool->mmap = process_event_stub;
+	if (tool->comm == NULL)
+		tool->comm = process_event_stub;
+	if (tool->fork == NULL)
+		tool->fork = process_event_stub;
+	if (tool->exit == NULL)
+		tool->exit = process_event_stub;
+	if (tool->lost == NULL)
+		tool->lost = perf_event__process_lost;
+	if (tool->read == NULL)
+		tool->read = process_event_sample_stub;
+	if (tool->throttle == NULL)
+		tool->throttle = process_event_stub;
+	if (tool->unthrottle == NULL)
+		tool->unthrottle = process_event_stub;
+	if (tool->attr == NULL)
+		tool->attr = process_event_synth_attr_stub;
+	if (tool->event_type == NULL)
+		tool->event_type = process_event_type_stub;
+	if (tool->tracing_data == NULL)
+		tool->tracing_data = process_event_synth_tracing_data_stub;
+	if (tool->build_id == NULL)
+		tool->build_id = process_finished_round_stub;
+	if (tool->finished_round == NULL) {
+		if (tool->ordered_samples)
+			tool->finished_round = process_finished_round;
 		else
-			handler->finished_round = process_finished_round_stub;
+			tool->finished_round = process_finished_round_stub;
 	}
 }
 
@@ -490,11 +499,11 @@
 static int perf_session_deliver_event(struct perf_session *session,
 				      union perf_event *event,
 				      struct perf_sample *sample,
-				      struct perf_event_ops *ops,
+				      struct perf_tool *tool,
 				      u64 file_offset);
 
 static void flush_sample_queue(struct perf_session *s,
-			       struct perf_event_ops *ops)
+			       struct perf_tool *tool)
 {
 	struct ordered_samples *os = &s->ordered_samples;
 	struct list_head *head = &os->samples;
@@ -505,7 +514,7 @@
 	unsigned idx = 0, progress_next = os->nr_samples / 16;
 	int ret;
 
-	if (!ops->ordered_samples || !limit)
+	if (!tool->ordered_samples || !limit)
 		return;
 
 	list_for_each_entry_safe(iter, tmp, head, list) {
@@ -516,7 +525,7 @@
 		if (ret)
 			pr_err("Can't parse sample, err = %d\n", ret);
 		else
-			perf_session_deliver_event(s, iter->event, &sample, ops,
+			perf_session_deliver_event(s, iter->event, &sample, tool,
 						   iter->file_offset);
 
 		os->last_flush = iter->timestamp;
@@ -578,11 +587,11 @@
  *      Flush every events below timestamp 7
  *      etc...
  */
-static int process_finished_round(union perf_event *event __used,
-				  struct perf_session *session,
-				  struct perf_event_ops *ops)
+static int process_finished_round(struct perf_tool *tool,
+				  union perf_event *event __used,
+				  struct perf_session *session)
 {
-	flush_sample_queue(session, ops);
+	flush_sample_queue(session, tool);
 	session->ordered_samples.next_flush = session->ordered_samples.max_timestamp;
 
 	return 0;
@@ -737,13 +746,26 @@
 		callchain__printf(sample);
 }
 
+static struct machine *
+	perf_session__find_machine_for_cpumode(struct perf_session *session,
+					       union perf_event *event)
+{
+	const u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
+
+	if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest)
+		return perf_session__find_machine(session, event->ip.pid);
+
+	return perf_session__find_host_machine(session);
+}
+
 static int perf_session_deliver_event(struct perf_session *session,
 				      union perf_event *event,
 				      struct perf_sample *sample,
-				      struct perf_event_ops *ops,
+				      struct perf_tool *tool,
 				      u64 file_offset)
 {
 	struct perf_evsel *evsel;
+	struct machine *machine;
 
 	dump_event(session, event, file_offset, sample);
 
@@ -765,6 +787,8 @@
 		hists__inc_nr_events(&evsel->hists, event->header.type);
 	}
 
+	machine = perf_session__find_machine_for_cpumode(session, event);
+
 	switch (event->header.type) {
 	case PERF_RECORD_SAMPLE:
 		dump_sample(session, event, sample);
@@ -772,23 +796,25 @@
 			++session->hists.stats.nr_unknown_id;
 			return -1;
 		}
-		return ops->sample(event, sample, evsel, session);
+		return tool->sample(tool, event, sample, evsel, machine);
 	case PERF_RECORD_MMAP:
-		return ops->mmap(event, sample, session);
+		return tool->mmap(tool, event, sample, machine);
 	case PERF_RECORD_COMM:
-		return ops->comm(event, sample, session);
+		return tool->comm(tool, event, sample, machine);
 	case PERF_RECORD_FORK:
-		return ops->fork(event, sample, session);
+		return tool->fork(tool, event, sample, machine);
 	case PERF_RECORD_EXIT:
-		return ops->exit(event, sample, session);
+		return tool->exit(tool, event, sample, machine);
 	case PERF_RECORD_LOST:
-		return ops->lost(event, sample, session);
+		if (tool->lost == perf_event__process_lost)
+			session->hists.stats.total_lost += event->lost.lost;
+		return tool->lost(tool, event, sample, machine);
 	case PERF_RECORD_READ:
-		return ops->read(event, sample, session);
+		return tool->read(tool, event, sample, evsel, machine);
 	case PERF_RECORD_THROTTLE:
-		return ops->throttle(event, sample, session);
+		return tool->throttle(tool, event, sample, machine);
 	case PERF_RECORD_UNTHROTTLE:
-		return ops->unthrottle(event, sample, session);
+		return tool->unthrottle(tool, event, sample, machine);
 	default:
 		++session->hists.stats.nr_unknown_events;
 		return -1;
@@ -812,24 +838,29 @@
 }
 
 static int perf_session__process_user_event(struct perf_session *session, union perf_event *event,
-					    struct perf_event_ops *ops, u64 file_offset)
+					    struct perf_tool *tool, u64 file_offset)
 {
+	int err;
+
 	dump_event(session, event, file_offset, NULL);
 
 	/* These events are processed right away */
 	switch (event->header.type) {
 	case PERF_RECORD_HEADER_ATTR:
-		return ops->attr(event, session);
+		err = tool->attr(event, &session->evlist);
+		if (err == 0)
+			perf_session__update_sample_type(session);
+		return err;
 	case PERF_RECORD_HEADER_EVENT_TYPE:
-		return ops->event_type(event, session);
+		return tool->event_type(tool, event);
 	case PERF_RECORD_HEADER_TRACING_DATA:
 		/* setup for reading amidst mmap */
 		lseek(session->fd, file_offset, SEEK_SET);
-		return ops->tracing_data(event, session);
+		return tool->tracing_data(event, session);
 	case PERF_RECORD_HEADER_BUILD_ID:
-		return ops->build_id(event, session);
+		return tool->build_id(tool, event, session);
 	case PERF_RECORD_FINISHED_ROUND:
-		return ops->finished_round(event, session, ops);
+		return tool->finished_round(tool, event, session);
 	default:
 		return -EINVAL;
 	}
@@ -837,7 +868,7 @@
 
 static int perf_session__process_event(struct perf_session *session,
 				       union perf_event *event,
-				       struct perf_event_ops *ops,
+				       struct perf_tool *tool,
 				       u64 file_offset)
 {
 	struct perf_sample sample;
@@ -853,7 +884,7 @@
 	hists__inc_nr_events(&session->hists, event->header.type);
 
 	if (event->header.type >= PERF_RECORD_USER_TYPE_START)
-		return perf_session__process_user_event(session, event, ops, file_offset);
+		return perf_session__process_user_event(session, event, tool, file_offset);
 
 	/*
 	 * For all kernel events we get the sample data
@@ -866,14 +897,14 @@
 	if (perf_session__preprocess_sample(session, event, &sample))
 		return 0;
 
-	if (ops->ordered_samples) {
+	if (tool->ordered_samples) {
 		ret = perf_session_queue_event(session, event, &sample,
 					       file_offset);
 		if (ret != -ETIME)
 			return ret;
 	}
 
-	return perf_session_deliver_event(session, event, &sample, ops,
+	return perf_session_deliver_event(session, event, &sample, tool,
 					  file_offset);
 }
 
@@ -884,6 +915,11 @@
 	self->size = bswap_16(self->size);
 }
 
+struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
+{
+	return machine__findnew_thread(&session->host_machine, pid);
+}
+
 static struct thread *perf_session__register_idle_thread(struct perf_session *self)
 {
 	struct thread *thread = perf_session__findnew(self, 0);
@@ -897,9 +933,9 @@
 }
 
 static void perf_session__warn_about_errors(const struct perf_session *session,
-					    const struct perf_event_ops *ops)
+					    const struct perf_tool *tool)
 {
-	if (ops->lost == perf_event__process_lost &&
+	if (tool->lost == perf_event__process_lost &&
 	    session->hists.stats.nr_events[PERF_RECORD_LOST] != 0) {
 		ui__warning("Processed %d events and lost %d chunks!\n\n"
 			    "Check IO/CPU overload!\n\n",
@@ -934,7 +970,7 @@
 volatile int session_done;
 
 static int __perf_session__process_pipe_events(struct perf_session *self,
-					       struct perf_event_ops *ops)
+					       struct perf_tool *tool)
 {
 	union perf_event event;
 	uint32_t size;
@@ -943,7 +979,7 @@
 	int err;
 	void *p;
 
-	perf_event_ops__fill_defaults(ops);
+	perf_tool__fill_defaults(tool);
 
 	head = 0;
 more:
@@ -979,8 +1015,7 @@
 		}
 	}
 
-	if (size == 0 ||
-	    (skip = perf_session__process_event(self, &event, ops, head)) < 0) {
+	if ((skip = perf_session__process_event(self, &event, tool, head)) < 0) {
 		dump_printf("%#" PRIx64 " [%#x]: skipping unknown header type: %d\n",
 			    head, event.header.size, event.header.type);
 		/*
@@ -1003,7 +1038,7 @@
 done:
 	err = 0;
 out_err:
-	perf_session__warn_about_errors(self, ops);
+	perf_session__warn_about_errors(self, tool);
 	perf_session_free_sample_buffers(self);
 	return err;
 }
@@ -1034,7 +1069,7 @@
 
 int __perf_session__process_events(struct perf_session *session,
 				   u64 data_offset, u64 data_size,
-				   u64 file_size, struct perf_event_ops *ops)
+				   u64 file_size, struct perf_tool *tool)
 {
 	u64 head, page_offset, file_offset, file_pos, progress_next;
 	int err, mmap_prot, mmap_flags, map_idx = 0;
@@ -1043,7 +1078,7 @@
 	union perf_event *event;
 	uint32_t size;
 
-	perf_event_ops__fill_defaults(ops);
+	perf_tool__fill_defaults(tool);
 
 	page_size = sysconf(_SC_PAGESIZE);
 
@@ -1098,7 +1133,7 @@
 	size = event->header.size;
 
 	if (size == 0 ||
-	    perf_session__process_event(session, event, ops, file_pos) < 0) {
+	    perf_session__process_event(session, event, tool, file_pos) < 0) {
 		dump_printf("%#" PRIx64 " [%#x]: skipping unknown header type: %d\n",
 			    file_offset + head, event->header.size,
 			    event->header.type);
@@ -1127,15 +1162,15 @@
 	err = 0;
 	/* do the final flush for ordered samples */
 	session->ordered_samples.next_flush = ULLONG_MAX;
-	flush_sample_queue(session, ops);
+	flush_sample_queue(session, tool);
 out_err:
-	perf_session__warn_about_errors(session, ops);
+	perf_session__warn_about_errors(session, tool);
 	perf_session_free_sample_buffers(session);
 	return err;
 }
 
 int perf_session__process_events(struct perf_session *self,
-				 struct perf_event_ops *ops)
+				 struct perf_tool *tool)
 {
 	int err;
 
@@ -1146,9 +1181,9 @@
 		err = __perf_session__process_events(self,
 						     self->header.data_offset,
 						     self->header.data_size,
-						     self->size, ops);
+						     self->size, tool);
 	else
-		err = __perf_session__process_pipe_events(self, ops);
+		err = __perf_session__process_pipe_events(self, tool);
 
 	return err;
 }
@@ -1163,9 +1198,8 @@
 	return true;
 }
 
-int perf_session__set_kallsyms_ref_reloc_sym(struct map **maps,
-					     const char *symbol_name,
-					     u64 addr)
+int maps__set_kallsyms_ref_reloc_sym(struct map **maps,
+				     const char *symbol_name, u64 addr)
 {
 	char *bracket;
 	enum map_type i;
@@ -1224,6 +1258,27 @@
 	return ret;
 }
 
+size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
+{
+	/*
+	 * FIXME: Here we have to actually print all the machines in this
+	 * session, not just the host...
+	 */
+	return machine__fprintf(&session->host_machine, fp);
+}
+
+void perf_session__remove_thread(struct perf_session *session,
+				 struct thread *th)
+{
+	/*
+	 * FIXME: This one makes no sense, we need to remove the thread from
+	 * the machine it belongs to, perf_session can have many machines, so
+	 * doing it always on ->host_machine is wrong.  Fix when auditing all
+	 * the 'perf kvm' code.
+	 */
+	machine__remove_thread(&session->host_machine, th);
+}
+
 struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
 					      unsigned int type)
 {
@@ -1236,17 +1291,16 @@
 	return NULL;
 }
 
-void perf_session__print_ip(union perf_event *event,
-			    struct perf_sample *sample,
-			    struct perf_session *session,
-			    int print_sym, int print_dso)
+void perf_event__print_ip(union perf_event *event, struct perf_sample *sample,
+			  struct machine *machine, struct perf_evsel *evsel,
+			  int print_sym, int print_dso)
 {
 	struct addr_location al;
 	const char *symname, *dsoname;
-	struct callchain_cursor *cursor = &session->callchain_cursor;
+	struct callchain_cursor *cursor = &evsel->hists.callchain_cursor;
 	struct callchain_cursor_node *node;
 
-	if (perf_event__preprocess_sample(event, session, &al, sample,
+	if (perf_event__preprocess_sample(event, machine, &al, sample,
 					  NULL) < 0) {
 		error("problem processing %d event, skipping it.\n",
 			event->header.type);
@@ -1255,7 +1309,7 @@
 
 	if (symbol_conf.use_callchain && sample->callchain) {
 
-		if (perf_session__resolve_callchain(session, al.thread,
+		if (machine__resolve_callchain(machine, evsel, al.thread,
 						sample->callchain, NULL) != 0) {
 			if (verbose)
 				error("Failed to resolve callchain. Skipping\n");
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index 6e393c9..37bc383 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -30,9 +30,6 @@
 	struct perf_header	header;
 	unsigned long		size;
 	unsigned long		mmap_window;
-	struct rb_root		threads;
-	struct list_head	dead_threads;
-	struct thread		*last_match;
 	struct machine		host_machine;
 	struct rb_root		machines;
 	struct perf_evlist	*evlist;
@@ -53,65 +50,31 @@
 	int			cwdlen;
 	char			*cwd;
 	struct ordered_samples	ordered_samples;
-	struct callchain_cursor	callchain_cursor;
-	char			filename[0];
+	char			filename[1];
 };
 
-struct perf_evsel;
-struct perf_event_ops;
-
-typedef int (*event_sample)(union perf_event *event, struct perf_sample *sample,
-			    struct perf_evsel *evsel, struct perf_session *session);
-typedef int (*event_op)(union perf_event *self, struct perf_sample *sample,
-			struct perf_session *session);
-typedef int (*event_synth_op)(union perf_event *self,
-			      struct perf_session *session);
-typedef int (*event_op2)(union perf_event *self, struct perf_session *session,
-			 struct perf_event_ops *ops);
-
-struct perf_event_ops {
-	event_sample	sample;
-	event_op	mmap,
-			comm,
-			fork,
-			exit,
-			lost,
-			read,
-			throttle,
-			unthrottle;
-	event_synth_op	attr,
-			event_type,
-			tracing_data,
-			build_id;
-	event_op2	finished_round;
-	bool		ordered_samples;
-	bool		ordering_requires_timestamps;
-};
+struct perf_tool;
 
 struct perf_session *perf_session__new(const char *filename, int mode,
 				       bool force, bool repipe,
-				       struct perf_event_ops *ops);
+				       struct perf_tool *tool);
 void perf_session__delete(struct perf_session *self);
 
 void perf_event_header__bswap(struct perf_event_header *self);
 
 int __perf_session__process_events(struct perf_session *self,
 				   u64 data_offset, u64 data_size, u64 size,
-				   struct perf_event_ops *ops);
+				   struct perf_tool *tool);
 int perf_session__process_events(struct perf_session *self,
-				 struct perf_event_ops *event_ops);
+				 struct perf_tool *tool);
 
-int perf_session__resolve_callchain(struct perf_session *self,
+int perf_session__resolve_callchain(struct perf_session *self, struct perf_evsel *evsel,
 				    struct thread *thread,
 				    struct ip_callchain *chain,
 				    struct symbol **parent);
 
 bool perf_session__has_traces(struct perf_session *self, const char *msg);
 
-int perf_session__set_kallsyms_ref_reloc_sym(struct map **maps,
-					     const char *symbol_name,
-					     u64 addr);
-
 void mem_bswap_64(void *src, int byte_size);
 void perf_event__attr_swap(struct perf_event_attr *attr);
 
@@ -144,12 +107,16 @@
 
 static inline
 void perf_session__process_machines(struct perf_session *self,
+				    struct perf_tool *tool,
 				    machine__process_t process)
 {
-	process(&self->host_machine, self);
-	return machines__process(&self->machines, process, self);
+	process(&self->host_machine, tool);
+	return machines__process(&self->machines, process, tool);
 }
 
+struct thread *perf_session__findnew(struct perf_session *self, pid_t pid);
+size_t perf_session__fprintf(struct perf_session *self, FILE *fp);
+
 size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp);
 
 size_t perf_session__fprintf_dsos_buildid(struct perf_session *self,
@@ -167,13 +134,20 @@
 					session->header.needs_swap);
 }
 
+static inline int perf_session__synthesize_sample(struct perf_session *session,
+						  union perf_event *event,
+						  const struct perf_sample *sample)
+{
+	return perf_event__synthesize_sample(event, session->sample_type,
+					     sample, session->header.needs_swap);
+}
+
 struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
 					    unsigned int type);
 
-void perf_session__print_ip(union perf_event *event,
-				 struct perf_sample *sample,
-				 struct perf_session *session,
-				 int print_sym, int print_dso);
+void perf_event__print_ip(union perf_event *event, struct perf_sample *sample,
+			  struct machine *machine, struct perf_evsel *evsel,
+			  int print_sym, int print_dso);
 
 int perf_session__cpu_bitmap(struct perf_session *session,
 			     const char *cpu_list, unsigned long *cpu_bitmap);
diff --git a/tools/perf/util/setup.py b/tools/perf/util/setup.py
index 95d3700..36d4c56 100644
--- a/tools/perf/util/setup.py
+++ b/tools/perf/util/setup.py
@@ -27,7 +27,8 @@
 perf = Extension('perf',
 		  sources = ['util/python.c', 'util/ctype.c', 'util/evlist.c',
 			     'util/evsel.c', 'util/cpumap.c', 'util/thread_map.c',
-			     'util/util.c', 'util/xyarray.c', 'util/cgroup.c'],
+			     'util/util.c', 'util/xyarray.c', 'util/cgroup.c',
+			     'util/debugfs.c'],
 		  include_dirs = ['util/include'],
 		  extra_compile_args = cflags,
                  )
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 632b50c..215d50f 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -1757,7 +1757,7 @@
 		struct stat st;
 
 		/*sshfs might return bad dent->d_type, so we have to stat*/
-		sprintf(path, "%s/%s", dir_name, dent->d_name);
+		snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name);
 		if (stat(path, &st))
 			continue;
 
@@ -1766,8 +1766,6 @@
 			    !strcmp(dent->d_name, ".."))
 				continue;
 
-			snprintf(path, sizeof(path), "%s/%s",
-				 dir_name, dent->d_name);
 			ret = map_groups__set_modules_path_dir(mg, path);
 			if (ret < 0)
 				goto out;
@@ -1788,9 +1786,6 @@
 			if (map == NULL)
 				continue;
 
-			snprintf(path, sizeof(path), "%s/%s",
-				 dir_name, dent->d_name);
-
 			long_name = strdup(path);
 			if (long_name == NULL) {
 				ret = -1;
@@ -2609,10 +2604,10 @@
 	symbol_conf.initialized = true;
 	return 0;
 
-out_free_dso_list:
-	strlist__delete(symbol_conf.dso_list);
 out_free_comm_list:
 	strlist__delete(symbol_conf.comm_list);
+out_free_dso_list:
+	strlist__delete(symbol_conf.dso_list);
 	return -1;
 }
 
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index 29f8d74..123c2e1 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -68,6 +68,7 @@
 
 struct symbol_conf {
 	unsigned short	priv_size;
+	unsigned short	nr_events;
 	bool		try_vmlinux_path,
 			use_modules,
 			sort_by_name,
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index d5d3b22..fb4b7ea 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -61,7 +61,7 @@
 	       map_groups__fprintf(&self->mg, verbose, fp);
 }
 
-struct thread *perf_session__findnew(struct perf_session *self, pid_t pid)
+struct thread *machine__findnew_thread(struct machine *self, pid_t pid)
 {
 	struct rb_node **p = &self->threads.rb_node;
 	struct rb_node *parent = NULL;
@@ -125,12 +125,12 @@
 	return 0;
 }
 
-size_t perf_session__fprintf(struct perf_session *self, FILE *fp)
+size_t machine__fprintf(struct machine *machine, FILE *fp)
 {
 	size_t ret = 0;
 	struct rb_node *nd;
 
-	for (nd = rb_first(&self->threads); nd; nd = rb_next(nd)) {
+	for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
 		struct thread *pos = rb_entry(nd, struct thread, rb_node);
 
 		ret += thread__fprintf(pos, fp);
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
index e5f2401..70c2c13 100644
--- a/tools/perf/util/thread.h
+++ b/tools/perf/util/thread.h
@@ -18,16 +18,14 @@
 	int			comm_len;
 };
 
-struct perf_session;
+struct machine;
 
 void thread__delete(struct thread *self);
 
 int thread__set_comm(struct thread *self, const char *comm);
 int thread__comm_len(struct thread *self);
-struct thread *perf_session__findnew(struct perf_session *self, pid_t pid);
 void thread__insert_map(struct thread *self, struct map *map);
 int thread__fork(struct thread *self, struct thread *parent);
-size_t perf_session__fprintf(struct perf_session *self, FILE *fp);
 
 static inline struct map *thread__find_map(struct thread *self,
 					   enum map_type type, u64 addr)
@@ -35,14 +33,12 @@
 	return self ? map_groups__find(&self->mg, type, addr) : NULL;
 }
 
-void thread__find_addr_map(struct thread *self,
-			   struct perf_session *session, u8 cpumode,
-			   enum map_type type, pid_t pid, u64 addr,
+void thread__find_addr_map(struct thread *thread, struct machine *machine,
+			   u8 cpumode, enum map_type type, u64 addr,
 			   struct addr_location *al);
 
-void thread__find_addr_location(struct thread *self,
-				struct perf_session *session, u8 cpumode,
-				enum map_type type, pid_t pid, u64 addr,
+void thread__find_addr_location(struct thread *thread, struct machine *machine,
+				u8 cpumode, enum map_type type, u64 addr,
 				struct addr_location *al,
 				symbol_filter_t filter);
 #endif	/* __PERF_THREAD_H */
diff --git a/tools/perf/util/tool.h b/tools/perf/util/tool.h
new file mode 100644
index 0000000..b0e1aad
--- /dev/null
+++ b/tools/perf/util/tool.h
@@ -0,0 +1,50 @@
+#ifndef __PERF_TOOL_H
+#define __PERF_TOOL_H
+
+#include <stdbool.h>
+
+struct perf_session;
+union perf_event;
+struct perf_evlist;
+struct perf_evsel;
+struct perf_sample;
+struct perf_tool;
+struct machine;
+
+typedef int (*event_sample)(struct perf_tool *tool, union perf_event *event,
+			    struct perf_sample *sample,
+			    struct perf_evsel *evsel, struct machine *machine);
+
+typedef int (*event_op)(struct perf_tool *tool, union perf_event *event,
+			struct perf_sample *sample, struct machine *machine);
+
+typedef int (*event_attr_op)(union perf_event *event,
+			     struct perf_evlist **pevlist);
+typedef int (*event_simple_op)(struct perf_tool *tool, union perf_event *event);
+
+typedef int (*event_synth_op)(union perf_event *event,
+			      struct perf_session *session);
+
+typedef int (*event_op2)(struct perf_tool *tool, union perf_event *event,
+			 struct perf_session *session);
+
+struct perf_tool {
+	event_sample	sample,
+			read;
+	event_op	mmap,
+			comm,
+			fork,
+			exit,
+			lost,
+			throttle,
+			unthrottle;
+	event_attr_op	attr;
+	event_synth_op	tracing_data;
+	event_simple_op	event_type;
+	event_op2	finished_round,
+			build_id;
+	bool		ordered_samples;
+	bool		ordering_requires_timestamps;
+};
+
+#endif /* __PERF_TOOL_H */
diff --git a/tools/perf/util/top.h b/tools/perf/util/top.h
index 3996509..a248f3c 100644
--- a/tools/perf/util/top.h
+++ b/tools/perf/util/top.h
@@ -1,15 +1,17 @@
 #ifndef __PERF_TOP_H
 #define __PERF_TOP_H 1
 
+#include "tool.h"
 #include "types.h"
-#include "../perf.h"
 #include <stddef.h>
+#include <stdbool.h>
 
 struct perf_evlist;
 struct perf_evsel;
 struct perf_session;
 
 struct perf_top {
+	struct perf_tool   tool;
 	struct perf_evlist *evlist;
 	/*
 	 * Symbols will be added here in perf_event__process_sample and will
@@ -23,10 +25,26 @@
 	int		   freq;
 	pid_t		   target_pid, target_tid;
 	bool		   hide_kernel_symbols, hide_user_symbols, zero;
+	bool		   system_wide;
+	bool		   use_tui, use_stdio;
+	bool		   sort_has_symbols;
+	bool		   dont_use_callchains;
+	bool		   kptr_restrict_warned;
+	bool		   vmlinux_warned;
+	bool		   inherit;
+	bool		   group;
+	bool		   sample_id_all_avail;
+	bool		   dump_symtab;
 	const char	   *cpu_list;
 	struct hist_entry  *sym_filter_entry;
 	struct perf_evsel  *sym_evsel;
 	struct perf_session *session;
+	struct winsize	   winsize;
+	unsigned int	   mmap_pages;
+	int		   default_interval;
+	int		   realtime_prio;
+	int		   sym_pcnt_filter;
+	const char	   *sym_filter;
 };
 
 size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size);
diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c
index d2655f0..ac6830d 100644
--- a/tools/perf/util/trace-event-info.c
+++ b/tools/perf/util/trace-event-info.c
@@ -18,7 +18,8 @@
  *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
-#define _GNU_SOURCE
+#include <ctype.h>
+#include "util.h"
 #include <dirent.h>
 #include <mntent.h>
 #include <stdio.h>
@@ -31,7 +32,6 @@
 #include <pthread.h>
 #include <fcntl.h>
 #include <unistd.h>
-#include <ctype.h>
 #include <errno.h>
 #include <stdbool.h>
 #include <linux/list.h>
@@ -44,10 +44,6 @@
 
 #define VERSION "0.5"
 
-#define _STR(x) #x
-#define STR(x) _STR(x)
-#define MAX_PATH 256
-
 #define TRACE_CTRL	"tracing_on"
 #define TRACE		"trace"
 #define AVAILABLE	"available_tracers"
@@ -73,26 +69,6 @@
 };
 
 
-
-static void die(const char *fmt, ...)
-{
-	va_list ap;
-	int ret = errno;
-
-	if (errno)
-		perror("perf");
-	else
-		ret = -1;
-
-	va_start(ap, fmt);
-	fprintf(stderr, "  ");
-	vfprintf(stderr, fmt, ap);
-	va_end(ap);
-
-	fprintf(stderr, "\n");
-	exit(ret);
-}
-
 void *malloc_or_die(unsigned int size)
 {
 	void *data;
diff --git a/tools/perf/util/trace-event-scripting.c b/tools/perf/util/trace-event-scripting.c
index c9dcbec..a3fdf55 100644
--- a/tools/perf/util/trace-event-scripting.c
+++ b/tools/perf/util/trace-event-scripting.c
@@ -39,7 +39,7 @@
 static void process_event_unsupported(union perf_event *event __unused,
 				      struct perf_sample *sample __unused,
 				      struct perf_evsel *evsel __unused,
-				      struct perf_session *session __unused,
+				      struct machine *machine __unused,
 				      struct thread *thread __unused)
 {
 }
diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h
index a841008..58ae14c 100644
--- a/tools/perf/util/trace-event.h
+++ b/tools/perf/util/trace-event.h
@@ -3,7 +3,11 @@
 
 #include <stdbool.h>
 #include "parse-events.h"
-#include "session.h"
+
+struct machine;
+struct perf_sample;
+union perf_event;
+struct thread;
 
 #define __unused __attribute__((unused))
 
@@ -292,7 +296,7 @@
 	void (*process_event) (union perf_event *event,
 			       struct perf_sample *sample,
 			       struct perf_evsel *evsel,
-			       struct perf_session *session,
+			       struct machine *machine,
 			       struct thread *thread);
 	int (*generate_script) (const char *outfile);
 };
diff --git a/tools/perf/util/ui/browsers/annotate.c b/tools/perf/util/ui/browsers/annotate.c
index 0575905..295a9c9 100644
--- a/tools/perf/util/ui/browsers/annotate.c
+++ b/tools/perf/util/ui/browsers/annotate.c
@@ -224,7 +224,7 @@
 }
 
 static int annotate_browser__run(struct annotate_browser *self, int evidx,
-				 int nr_events, void(*timer)(void *arg),
+				 void(*timer)(void *arg),
 				 void *arg, int delay_secs)
 {
 	struct rb_node *nd = NULL;
@@ -328,8 +328,7 @@
 				notes = symbol__annotation(target);
 				pthread_mutex_lock(&notes->lock);
 
-				if (notes->src == NULL &&
-				    symbol__alloc_hist(target, nr_events) < 0) {
+				if (notes->src == NULL && symbol__alloc_hist(target) < 0) {
 					pthread_mutex_unlock(&notes->lock);
 					ui__warning("Not enough memory for annotating '%s' symbol!\n",
 						    target->name);
@@ -337,7 +336,7 @@
 				}
 
 				pthread_mutex_unlock(&notes->lock);
-				symbol__tui_annotate(target, ms->map, evidx, nr_events,
+				symbol__tui_annotate(target, ms->map, evidx,
 						     timer, arg, delay_secs);
 			}
 			continue;
@@ -358,15 +357,15 @@
 	return key;
 }
 
-int hist_entry__tui_annotate(struct hist_entry *he, int evidx, int nr_events,
+int hist_entry__tui_annotate(struct hist_entry *he, int evidx,
 			     void(*timer)(void *arg), void *arg, int delay_secs)
 {
-	return symbol__tui_annotate(he->ms.sym, he->ms.map, evidx, nr_events,
+	return symbol__tui_annotate(he->ms.sym, he->ms.map, evidx,
 				    timer, arg, delay_secs);
 }
 
 int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx,
-			 int nr_events, void(*timer)(void *arg), void *arg,
+			 void(*timer)(void *arg), void *arg,
 			 int delay_secs)
 {
 	struct objdump_line *pos, *n;
@@ -419,8 +418,7 @@
 	browser.b.nr_entries = browser.nr_entries;
 	browser.b.entries = &notes->src->source,
 	browser.b.width += 18; /* Percentage */
-	ret = annotate_browser__run(&browser, evidx, nr_events,
-				    timer, arg, delay_secs);
+	ret = annotate_browser__run(&browser, evidx, timer, arg, delay_secs);
 	list_for_each_entry_safe(pos, n, &notes->src->source, node) {
 		list_del(&pos->node);
 		objdump_line__free(pos);
diff --git a/tools/perf/util/ui/browsers/hists.c b/tools/perf/util/ui/browsers/hists.c
index d0c94b4..1212a38 100644
--- a/tools/perf/util/ui/browsers/hists.c
+++ b/tools/perf/util/ui/browsers/hists.c
@@ -1020,7 +1020,7 @@
 			 * Don't let this be freed, say, by hists__decay_entry.
 			 */
 			he->used = true;
-			err = hist_entry__tui_annotate(he, evsel->idx, nr_events,
+			err = hist_entry__tui_annotate(he, evsel->idx,
 						       timer, arg, delay_secs);
 			he->used = false;
 			ui_browser__update_nr_entries(&browser->b, browser->hists->nr_entries);
diff --git a/tools/perf/util/ui/progress.c b/tools/perf/util/ui/progress.c
index 295e366..13aa64e 100644
--- a/tools/perf/util/ui/progress.c
+++ b/tools/perf/util/ui/progress.c
@@ -14,6 +14,9 @@
 	if (use_browser <= 0)
 		return;
 
+	if (total == 0)
+		return;
+
 	ui__refresh_dimensions(true);
 	pthread_mutex_lock(&ui__lock);
 	y = SLtt_Screen_Rows / 2 - 2;
diff --git a/tools/perf/util/usage.c b/tools/perf/util/usage.c
index e16bf9a7..d76d1c0 100644
--- a/tools/perf/util/usage.c
+++ b/tools/perf/util/usage.c
@@ -1,5 +1,8 @@
 /*
- * GIT - The information manager from hell
+ * usage.c
+ *
+ * Various reporting routines.
+ * Originally copied from GIT source.
  *
  * Copyright (C) Linus Torvalds, 2005
  */
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index 0128906..37be34d 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -245,4 +245,15 @@
 #define _STR(x) #x
 #define STR(x) _STR(x)
 
+/*
+ *  Determine whether some value is a power of two, where zero is
+ * *not* considered a power of two.
+ */
+
+static inline __attribute__((const))
+bool is_power_of_2(unsigned long n)
+{
+	return (n != 0 && ((n & (n - 1)) == 0));
+}
+
 #endif
diff --git a/tools/perf/util/values.c b/tools/perf/util/values.c
index bdd3347..697c8b4 100644
--- a/tools/perf/util/values.c
+++ b/tools/perf/util/values.c
@@ -32,6 +32,7 @@
 
 	for (i = 0; i < values->threads; i++)
 		free(values->value[i]);
+	free(values->value);
 	free(values->pid);
 	free(values->tid);
 	free(values->counterrawid);
diff --git a/tools/power/x86/turbostat/turbostat.8 b/tools/power/x86/turbostat/turbostat.8
index ff75125..555c69a 100644
--- a/tools/power/x86/turbostat/turbostat.8
+++ b/tools/power/x86/turbostat/turbostat.8
@@ -38,8 +38,8 @@
 .PP
 .SH FIELD DESCRIPTIONS
 .nf
-\fBpkg\fP processor package number.
-\fBcore\fP processor core number.
+\fBpk\fP processor package number.
+\fBcr\fP processor core number.
 \fBCPU\fP Linux CPU (logical processor) number.
 \fB%c0\fP percent of the interval that the CPU retired instructions.
 \fBGHz\fP average clock rate while the CPU was in c0 state.
@@ -58,7 +58,7 @@
 
 .nf
 [root@x980]# ./turbostat
-core CPU   %c0   GHz  TSC   %c1    %c3    %c6   %pc3   %pc6
+cr   CPU   %c0   GHz  TSC   %c1    %c3    %c6   %pc3   %pc6
           0.04 1.62 3.38   0.11   0.00  99.85   0.00  95.07
   0   0   0.04 1.62 3.38   0.06   0.00  99.90   0.00  95.07
   0   6   0.02 1.62 3.38   0.08   0.00  99.90   0.00  95.07
@@ -102,7 +102,7 @@
 .nf
 [root@x980 lenb]# ./turbostat cat /dev/zero > /dev/null
 
-^Ccore CPU   %c0   GHz  TSC   %c1    %c3    %c6   %pc3   %pc6
+^Ccr   CPU   %c0   GHz  TSC   %c1    %c3    %c6   %pc3   %pc6
            8.49 3.63 3.38  16.23   0.66  74.63   0.00   0.00
    0   0   1.22 3.62 3.38  32.18   0.00  66.60   0.00   0.00
    0   6   0.40 3.61 3.38  33.00   0.00  66.60   0.00   0.00
diff --git a/tools/testing/ktest/sample.conf b/tools/testing/ktest/sample.conf
index dbedfa1..553c06b 100644
--- a/tools/testing/ktest/sample.conf
+++ b/tools/testing/ktest/sample.conf
@@ -950,7 +950,7 @@
 #   TEST_START
 #   TEST_TYPE = config_bisect
 #   CONFIG_BISECT_TYPE = build
-#   CONFIG_BISECT = /home/test/¢onfig-bad
+#   CONFIG_BISECT = /home/test/config-bad
 #   MIN_CONFIG = /home/test/config-min
 #   BISECT_MANUAL = 1
 #
diff --git a/virt/kvm/assigned-dev.c b/virt/kvm/assigned-dev.c
index 3ad0925..758e3b3 100644
--- a/virt/kvm/assigned-dev.c
+++ b/virt/kvm/assigned-dev.c
@@ -17,6 +17,8 @@
 #include <linux/pci.h>
 #include <linux/interrupt.h>
 #include <linux/slab.h>
+#include <linux/namei.h>
+#include <linux/fs.h>
 #include "irq.h"
 
 static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head,
@@ -480,12 +482,76 @@
 	return r;
 }
 
+/*
+ * We want to test whether the caller has been granted permissions to
+ * use this device.  To be able to configure and control the device,
+ * the user needs access to PCI configuration space and BAR resources.
+ * These are accessed through PCI sysfs.  PCI config space is often
+ * passed to the process calling this ioctl via file descriptor, so we
+ * can't rely on access to that file.  We can check for permissions
+ * on each of the BAR resource files, which is a pretty clear
+ * indicator that the user has been granted access to the device.
+ */
+static int probe_sysfs_permissions(struct pci_dev *dev)
+{
+#ifdef CONFIG_SYSFS
+	int i;
+	bool bar_found = false;
+
+	for (i = PCI_STD_RESOURCES; i <= PCI_STD_RESOURCE_END; i++) {
+		char *kpath, *syspath;
+		struct path path;
+		struct inode *inode;
+		int r;
+
+		if (!pci_resource_len(dev, i))
+			continue;
+
+		kpath = kobject_get_path(&dev->dev.kobj, GFP_KERNEL);
+		if (!kpath)
+			return -ENOMEM;
+
+		/* Per sysfs-rules, sysfs is always at /sys */
+		syspath = kasprintf(GFP_KERNEL, "/sys%s/resource%d", kpath, i);
+		kfree(kpath);
+		if (!syspath)
+			return -ENOMEM;
+
+		r = kern_path(syspath, LOOKUP_FOLLOW, &path);
+		kfree(syspath);
+		if (r)
+			return r;
+
+		inode = path.dentry->d_inode;
+
+		r = inode_permission(inode, MAY_READ | MAY_WRITE | MAY_ACCESS);
+		path_put(&path);
+		if (r)
+			return r;
+
+		bar_found = true;
+	}
+
+	/* If no resources, probably something special */
+	if (!bar_found)
+		return -EPERM;
+
+	return 0;
+#else
+	return -EINVAL; /* No way to control the device without sysfs */
+#endif
+}
+
 static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
 				      struct kvm_assigned_pci_dev *assigned_dev)
 {
 	int r = 0, idx;
 	struct kvm_assigned_dev_kernel *match;
 	struct pci_dev *dev;
+	u8 header_type;
+
+	if (!(assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU))
+		return -EINVAL;
 
 	mutex_lock(&kvm->lock);
 	idx = srcu_read_lock(&kvm->srcu);
@@ -513,6 +579,18 @@
 		r = -EINVAL;
 		goto out_free;
 	}
+
+	/* Don't allow bridges to be assigned */
+	pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
+	if ((header_type & PCI_HEADER_TYPE) != PCI_HEADER_TYPE_NORMAL) {
+		r = -EPERM;
+		goto out_put;
+	}
+
+	r = probe_sysfs_permissions(dev);
+	if (r)
+		goto out_put;
+
 	if (pci_enable_device(dev)) {
 		printk(KERN_INFO "%s: Could not enable PCI device\n", __func__);
 		r = -EBUSY;
@@ -544,16 +622,14 @@
 
 	list_add(&match->list, &kvm->arch.assigned_dev_head);
 
-	if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) {
-		if (!kvm->arch.iommu_domain) {
-			r = kvm_iommu_map_guest(kvm);
-			if (r)
-				goto out_list_del;
-		}
-		r = kvm_assign_device(kvm, match);
+	if (!kvm->arch.iommu_domain) {
+		r = kvm_iommu_map_guest(kvm);
 		if (r)
 			goto out_list_del;
 	}
+	r = kvm_assign_device(kvm, match);
+	if (r)
+		goto out_list_del;
 
 out:
 	srcu_read_unlock(&kvm->srcu, idx);
@@ -593,8 +669,7 @@
 		goto out;
 	}
 
-	if (match->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU)
-		kvm_deassign_device(kvm, match);
+	kvm_deassign_device(kvm, match);
 
 	kvm_free_assigned_device(kvm, match);